summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/speakup/main.c5
-rw-r--r--drivers/accessibility/speakup/selection.c11
-rw-r--r--drivers/accessibility/speakup/speakup.h1
-rw-r--r--drivers/accessibility/speakup/spk_ttyio.c10
-rw-r--r--drivers/accessibility/speakup/spk_types.h8
-rw-r--r--drivers/acpi/Kconfig65
-rw-r--r--drivers/acpi/Makefile10
-rw-r--r--drivers/acpi/acpi_apd.c32
-rw-r--r--drivers/acpi/acpi_cmos_rtc.c2
-rw-r--r--drivers/acpi/acpi_configfs.c1
-rw-r--r--drivers/acpi/acpi_dbg.c3
-rw-r--r--drivers/acpi/acpi_extlog.c6
-rw-r--r--drivers/acpi/acpi_lpss.c24
-rw-r--r--drivers/acpi/acpi_memhotplug.c22
-rw-r--r--drivers/acpi/acpi_platform.c2
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/acpi_processor.c35
-rw-r--r--drivers/acpi/acpi_video.c6
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h6
-rw-r--r--drivers/acpi/acpica/achware.h6
-rw-r--r--drivers/acpi/acpica/aclocal.h11
-rw-r--r--drivers/acpi/acpica/acpredef.h33
-rw-r--r--drivers/acpi/acpica/dbexec.c39
-rw-r--r--drivers/acpi/acpica/dbinput.c14
-rw-r--r--drivers/acpi/acpica/dbmethod.c167
-rw-r--r--drivers/acpi/acpica/evgpe.c4
-rw-r--r--drivers/acpi/acpica/evgpeblk.c27
-rw-r--r--drivers/acpi/acpica/evgpeinit.c23
-rw-r--r--drivers/acpi/acpica/hwgpe.c102
-rw-r--r--drivers/acpi/acpica/hwvalid.c30
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c4
-rw-r--r--drivers/acpi/acpica/nsxfobj.c3
-rw-r--r--drivers/acpi/acpica/psparse.c4
-rw-r--r--drivers/acpi/acpica/utpredef.c5
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c33
-rw-r--r--drivers/acpi/apei/apei-base.c6
-rw-r--r--drivers/acpi/apei/ghes.c65
-rw-r--r--drivers/acpi/arm64/iort.c18
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/bus.c5
-rw-r--r--drivers/acpi/button.c39
-rw-r--r--drivers/acpi/container.c3
-rw-r--r--drivers/acpi/custom_method.c2
-rw-r--r--drivers/acpi/debugfs.c3
-rw-r--r--drivers/acpi/dock.c5
-rw-r--r--drivers/acpi/dptf/Kconfig37
-rw-r--r--drivers/acpi/dptf/Makefile1
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c127
-rw-r--r--drivers/acpi/dptf/dptf_power.c4
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c7
-rw-r--r--drivers/acpi/ec.c10
-rw-r--r--drivers/acpi/event.c5
-rw-r--r--drivers/acpi/evged.c2
-rw-r--r--drivers/acpi/fan.c1
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/nfit/core.c22
-rw-r--r--drivers/acpi/numa/hmat.c167
-rw-r--r--drivers/acpi/numa/srat.c82
-rw-r--r--drivers/acpi/osl.c30
-rw-r--r--drivers/acpi/pci_irq.c2
-rw-r--r--drivers/acpi/pci_link.c12
-rw-r--r--drivers/acpi/pci_mcfg.c24
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pci_slot.c3
-rw-r--r--drivers/acpi/pmic/Kconfig67
-rw-r--r--drivers/acpi/pmic/Makefile10
-rw-r--r--drivers/acpi/power.c6
-rw-r--r--drivers/acpi/proc.c4
-rw-r--r--drivers/acpi/processor_core.c3
-rw-r--r--drivers/acpi/processor_perflib.c6
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/reboot.c11
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/acpi/sbshc.h6
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/acpi/tiny-power-button.c1
-rw-r--r--drivers/acpi/utils.c4
-rw-r--r--drivers/acpi/video_detect.c28
-rw-r--r--drivers/acpi/wakeup.c6
-rw-r--r--drivers/android/binder.c59
-rw-r--r--drivers/android/binder_alloc.c57
-rw-r--r--drivers/android/binder_alloc.h5
-rw-r--r--drivers/android/binder_alloc_selftest.c2
-rw-r--r--drivers/android/binderfs.c2
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/ahci_qoriq.c20
-rw-r--r--drivers/ata/libahci_platform.c2
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/pata_cmd64x.c2
-rw-r--r--drivers/ata/pata_ns87415.c3
-rw-r--r--drivers/ata/sata_highbank.c7
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/ata/sata_rcar.c2
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/arch_topology.c17
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/cacheinfo.c49
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/core.c130
-rw-r--r--drivers/base/cpu.c84
-rw-r--r--drivers/base/dd.c19
-rw-r--r--drivers/base/devcon.c231
-rw-r--r--drivers/base/devcoredump.c2
-rw-r--r--drivers/base/devres.c105
-rw-r--r--drivers/base/firmware_loader/fallback.c25
-rw-r--r--drivers/base/firmware_loader/fallback.h5
-rw-r--r--drivers/base/firmware_loader/fallback_platform.c12
-rw-r--r--drivers/base/firmware_loader/firmware.h7
-rw-r--r--drivers/base/firmware_loader/main.c135
-rw-r--r--drivers/base/memory.c65
-rw-r--r--drivers/base/node.c342
-rw-r--r--drivers/base/platform.c37
-rw-r--r--drivers/base/power/domain.c234
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/base/power/runtime.c62
-rw-r--r--drivers/base/power/sysfs.c160
-rw-r--r--drivers/base/power/wakeup_stats.c17
-rw-r--r--drivers/base/property.c73
-rw-r--r--drivers/base/regmap/Kconfig6
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h3
-rw-r--r--drivers/base/regmap/regmap-debugfs.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c33
-rw-r--r--drivers/base/regmap/regmap-sdw.c1
-rw-r--r--drivers/base/regmap/regmap-spi-avmm.c719
-rw-r--r--drivers/base/regmap/regmap.c145
-rw-r--r--drivers/base/soc.c64
-rw-r--r--drivers/base/syscore.c8
-rw-r--r--drivers/base/topology.c10
-rw-r--r--drivers/bcma/driver_pci_host.c4
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoeblk.c3
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/ataflop.c7
-rw-r--r--drivers/block/brd.c1
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_int.h1
-rw-r--r--drivers/block/drbd/drbd_main.c31
-rw-r--r--drivers/block/drbd/drbd_nl.c26
-rw-r--r--drivers/block/drbd/drbd_receiver.c12
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c6
-rw-r--r--drivers/block/floppy.c8
-rw-r--r--drivers/block/loop.c7
-rw-r--r--drivers/block/nbd.c33
-rw-r--r--drivers/block/null_blk.h7
-rw-r--r--drivers/block/null_blk_main.c22
-rw-r--r--drivers/block/null_blk_zoned.c454
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/pktcdvd.c94
-rw-r--r--drivers/block/rbd.c12
-rw-r--r--drivers/block/rnbd/rnbd-clt.c31
-rw-r--r--drivers/block/rsxx/core.c2
-rw-r--r--drivers/block/skd_main.c1
-rw-r--r--drivers/block/swim.c22
-rw-r--r--drivers/block/swim3.c4
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/block/xen-blkback/blkback.c22
-rw-r--r--drivers/block/xen-blkback/xenbus.c27
-rw-r--r--drivers/block/xen-blkfront.c20
-rw-r--r--drivers/block/xsysace.c75
-rw-r--r--drivers/block/zram/zram_drv.c40
-rw-r--r--drivers/bluetooth/ath3k.c93
-rw-r--r--drivers/bluetooth/btintel.c291
-rw-r--r--drivers/bluetooth/btintel.h91
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c54
-rw-r--r--drivers/bluetooth/btmtksdio.c4
-rw-r--r--drivers/bluetooth/btusb.c129
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_intel.c54
-rw-r--r--drivers/bluetooth/hci_ldisc.c1
-rw-r--r--drivers/bluetooth/hci_qca.c8
-rw-r--r--drivers/bluetooth/hci_serdev.c36
-rw-r--r--drivers/bus/brcmstb_gisb.c100
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c190
-rw-r--r--drivers/bus/fsl-mc/dprc.c141
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c12
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c75
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h31
-rw-r--r--drivers/bus/fsl-mc/mc-io.c7
-rw-r--r--drivers/bus/mhi/Kconfig20
-rw-r--r--drivers/bus/mhi/core/Makefile3
-rw-r--r--drivers/bus/mhi/core/boot.c17
-rw-r--r--drivers/bus/mhi/core/debugfs.c411
-rw-r--r--drivers/bus/mhi/core/init.c87
-rw-r--r--drivers/bus/mhi/core/internal.h37
-rw-r--r--drivers/bus/mhi/core/main.c27
-rw-r--r--drivers/bus/mhi/core/pm.c28
-rw-r--r--drivers/bus/mvebu-mbus.c12
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/char/Kconfig3
-rw-r--r--drivers/char/agp/amd-k7-agp.c2
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/sworks-agp.c2
-rw-r--r--drivers/char/hw_random/Kconfig24
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/cctrng.c9
-rw-r--r--drivers/char/hw_random/imx-rngc.c1
-rw-r--r--drivers/char/hw_random/ingenic-trng.c161
-rw-r--r--drivers/char/hw_random/intel-rng.c2
-rw-r--r--drivers/char/hw_random/iproc-rng200.c8
-rw-r--r--drivers/char/hw_random/mxc-rnga.c6
-rw-r--r--drivers/char/hw_random/npcm-rng.c14
-rw-r--r--drivers/char/hw_random/optee-rng.c6
-rw-r--r--drivers/char/hw_random/stm32-rng.c8
-rw-r--r--drivers/char/hw_random/xiphera-trng.c150
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c15
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c52
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c19
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c35
-rw-r--r--drivers/char/lp.c6
-rw-r--r--drivers/char/mem.c28
-rw-r--r--drivers/char/mspec.c5
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/char/raw.c56
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/eventlog/efi.c5
-rw-r--r--drivers/char/tpm/tpm-sysfs.c31
-rw-r--r--drivers/char/tpm/tpm_tis.c29
-rw-r--r--drivers/char/tpm/tpm_tis_core.c11
-rw-r--r--drivers/char/tpm/tpm_tis_core.h1
-rw-r--r--drivers/char/tpm/tpm_tis_synquacer.c208
-rw-r--r--drivers/char/virtio_console.c8
-rw-r--r--drivers/clk/Kconfig3
-rw-r--r--drivers/clk/at91/at91sam9g45.c7
-rw-r--r--drivers/clk/at91/clk-main.c11
-rw-r--r--drivers/clk/at91/clk-peripheral.c4
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c3
-rw-r--r--drivers/clk/at91/sam9x60.c2
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-pll.c14
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c4
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c1
-rw-r--r--drivers/clk/clk-axi-clkgen.c187
-rw-r--r--drivers/clk/clk-composite.c1
-rw-r--r--drivers/clk/clk-fixed-factor.c1
-rw-r--r--drivers/clk/clk-fixed-rate.c1
-rw-r--r--drivers/clk/clk-qoriq.c2
-rw-r--r--drivers/clk/clk-s2mps11.c13
-rw-r--r--drivers/clk/clk-si5341.c4
-rw-r--r--drivers/clk/clk.c2
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c1
-rw-r--r--drivers/clk/imx/Kconfig90
-rw-r--r--drivers/clk/imx/Makefile78
-rw-r--r--drivers/clk/imx/clk-busy.c1
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c1
-rw-r--r--drivers/clk/imx/clk-composite-8m.c3
-rw-r--r--drivers/clk/imx/clk-cpu.c2
-rw-r--r--drivers/clk/imx/clk-fixup-mux.c1
-rw-r--r--drivers/clk/imx/clk-frac-pll.c2
-rw-r--r--drivers/clk/imx/clk-gate2.c4
-rw-r--r--drivers/clk/imx/clk-imx21.c171
-rw-r--r--drivers/clk/imx/clk-imx27.c73
-rw-r--r--drivers/clk/imx/clk-imx31.c71
-rw-r--r--drivers/clk/imx/clk-imx35.c68
-rw-r--r--drivers/clk/imx/clk-imx6q.c1
-rw-r--r--drivers/clk/imx/clk-imx6sl.c15
-rw-r--r--drivers/clk/imx/clk-imx6sx.c1
-rw-r--r--drivers/clk/imx/clk-imx7d.c132
-rw-r--r--drivers/clk/imx/clk-imx8mm.c14
-rw-r--r--drivers/clk/imx/clk-imx8mn.c10
-rw-r--r--drivers/clk/imx/clk-imx8mp.c38
-rw-r--r--drivers/clk/imx/clk-imx8mq.c16
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c4
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c4
-rw-r--r--drivers/clk/imx/clk-lpcg-scu.c1
-rw-r--r--drivers/clk/imx/clk-pfd.c2
-rw-r--r--drivers/clk/imx/clk-pfdv2.c2
-rw-r--r--drivers/clk/imx/clk-pll14xx.c7
-rw-r--r--drivers/clk/imx/clk-pllv1.c1
-rw-r--r--drivers/clk/imx/clk-pllv3.c5
-rw-r--r--drivers/clk/imx/clk-pllv4.c1
-rw-r--r--drivers/clk/imx/clk-sscg-pll.c2
-rw-r--r--drivers/clk/imx/clk-vf610.c2
-rw-r--r--drivers/clk/imx/clk.c18
-rw-r--r--drivers/clk/imx/clk.h12
-rw-r--r--drivers/clk/ingenic/cgu.c134
-rw-r--r--drivers/clk/keystone/sci-clk.c19
-rw-r--r--drivers/clk/mediatek/Kconfig48
-rw-r--r--drivers/clk/mediatek/Makefile6
-rw-r--r--drivers/clk/mediatek/clk-mt6765.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c2
-rw-r--r--drivers/clk/mediatek/clk-mt6797.c8
-rw-r--r--drivers/clk/mediatek/clk-mt7629.c9
-rw-r--r--drivers/clk/mediatek/clk-mt8167-aud.c66
-rw-r--r--drivers/clk/mediatek/clk-mt8167-img.c60
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mfgcfg.c58
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mm.c132
-rw-r--r--drivers/clk/mediatek/clk-mt8167-vdec.c73
-rw-r--r--drivers/clk/mediatek/clk-mt8167.c1062
-rw-r--r--drivers/clk/meson/Kconfig26
-rw-r--r--drivers/clk/meson/axg-audio.c214
-rw-r--r--drivers/clk/meson/clk-phase.c56
-rw-r--r--drivers/clk/meson/clk-phase.h6
-rw-r--r--drivers/clk/meson/clk-regmap.h5
-rw-r--r--drivers/clk/meson/g12a.c11
-rw-r--r--drivers/clk/meson/meson-aoclk.c2
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c4
-rw-r--r--drivers/clk/mmp/clk-of-pxa1928.c3
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c2
-rw-r--r--drivers/clk/pxa/clk-pxa.h8
-rw-r--r--drivers/clk/qcom/Kconfig27
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c10
-rw-r--r--drivers/clk/qcom/clk-rcg2.c19
-rw-r--r--drivers/clk/qcom/clk-regmap.h6
-rw-r--r--drivers/clk/qcom/dispcc-sc7180.c3
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c1107
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c12
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c388
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c2
-rw-r--r--drivers/clk/qcom/gdsc.c8
-rw-r--r--drivers/clk/qcom/videocc-sm8150.c276
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c369
-rw-r--r--drivers/clk/renesas/Kconfig7
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7742-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/r8a7743-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7745-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77470-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7790-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a7791-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7792-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a7794-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c276
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c136
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h14
-rw-r--r--drivers/clk/rockchip/Kconfig78
-rw-r--r--drivers/clk/rockchip/Makefile42
-rw-r--r--drivers/clk/rockchip/clk-ddr.c1
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c18
-rw-r--r--drivers/clk/rockchip/clk-rk3308.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c56
-rw-r--r--drivers/clk/rockchip/clk.c113
-rw-r--r--drivers/clk/rockchip/softrst.c7
-rw-r--r--drivers/clk/samsung/clk-cpu.c37
-rw-r--r--drivers/clk/samsung/clk-cpu.h6
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c6
-rw-r--r--drivers/clk/samsung/clk-exynos4.c7
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c8
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c27
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c10
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c15
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c1
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c1
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c1
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c1
-rw-r--r--drivers/clk/sirf/clk-prima2.c2
-rw-r--r--drivers/clk/socfpga/clk-agilex.c13
-rw-r--r--drivers/clk/sunxi-ng/Kconfig10
-rw-r--r--drivers/clk/sunxi-ng/Makefile2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c214
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100-r.h21
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.c1276
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.h56
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c37
-rw-r--r--drivers/clk/tegra/clk-tegra210-emc.c2
-rw-r--r--drivers/clk/ti/autoidle.c14
-rw-r--r--drivers/clk/ti/clk-7xx.c1
-rw-r--r--drivers/clk/ti/clockdomain.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-cpugear.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mux.c2
-rw-r--r--drivers/clocksource/hyperv_timer.c4
-rw-r--r--drivers/clocksource/mps2-timer.c6
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c8
-rw-r--r--drivers/clocksource/timer-probe.c2
-rw-r--r--drivers/clocksource/timer-sp.h32
-rw-r--r--drivers/clocksource/timer-sp804.c210
-rw-r--r--drivers/connector/connector.c7
-rw-r--r--drivers/counter/microchip-tcb-capture.c2
-rw-r--r--drivers/counter/ti-eqep.c2
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c3
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c296
-rw-r--r--drivers/cpufreq/cpufreq.c79
-rw-r--r--drivers/cpufreq/cpufreq_governor.h2
-rw-r--r--drivers/cpufreq/cpufreq_performance.c1
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c1
-rw-r--r--drivers/cpufreq/cpufreq_stats.c105
-rw-r--r--drivers/cpufreq/e_powersaver.c1
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c49
-rw-r--r--drivers/cpufreq/longhaul.c1
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c9
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c144
-rw-r--r--drivers/cpufreq/s3c2410-cpufreq.c10
-rw-r--r--drivers/cpufreq/s3c2412-cpufreq.c44
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c29
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq-debugfs.c2
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c14
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c31
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c18
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c6
-rw-r--r--drivers/cpufreq/speedstep-lib.c2
-rw-r--r--drivers/cpufreq/sti-cpufreq.c6
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c30
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c12
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c2
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c59
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c34
-rw-r--r--drivers/cpuidle/cpuidle.c1
-rw-r--r--drivers/cpuidle/sysfs.c3
-rw-r--r--drivers/crypto/Kconfig25
-rw-r--r--drivers/crypto/allwinner/Kconfig43
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c17
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/Makefile3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c131
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c405
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c413
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c164
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c127
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h139
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c16
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c229
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c444
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c173
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h89
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h1
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c12
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c16
-rw-r--r--drivers/crypto/atmel-aes.c2
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/bcm/cipher.c111
-rw-r--r--drivers/crypto/bcm/cipher.h1
-rw-r--r--drivers/crypto/bcm/spu.c23
-rw-r--r--drivers/crypto/bcm/spu.h1
-rw-r--r--drivers/crypto/bcm/spu2.c12
-rw-r--r--drivers/crypto/bcm/spu2.h1
-rw-r--r--drivers/crypto/caam/Kconfig3
-rw-r--r--drivers/crypto/caam/Makefile2
-rw-r--r--drivers/crypto/caam/caamalg.c94
-rw-r--r--drivers/crypto/caam/caamalg_desc.c28
-rw-r--r--drivers/crypto/caam/caamalg_qi.c94
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c118
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/caam/ctrl.c88
-rw-r--r--drivers/crypto/caam/debugfs.c96
-rw-r--r--drivers/crypto/caam/debugfs.h26
-rw-r--r--drivers/crypto/caam/dpseci-debugfs.c23
-rw-r--r--drivers/crypto/caam/intern.h17
-rw-r--r--drivers/crypto/caam/jr.c10
-rw-r--r--drivers/crypto/caam/qi.c20
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c8
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c1
-rw-r--r--drivers/crypto/cavium/zip/zip_main.c44
-rw-r--r--drivers/crypto/ccp/ccp-ops.c3
-rw-r--r--drivers/crypto/ccree/cc_cipher.c282
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h1
-rw-r--r--drivers/crypto/ccree/cc_driver.c7
-rw-r--r--drivers/crypto/ccree/cc_driver.h1
-rw-r--r--drivers/crypto/ccree/cc_pm.c6
-rw-r--r--drivers/crypto/chelsio/Kconfig32
-rw-r--r--drivers/crypto/chelsio/Makefile5
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h33
-rw-r--r--drivers/crypto/chelsio/chcr_core.c64
-rw-r--r--drivers/crypto/chelsio/chcr_core.h98
-rw-r--r--drivers/crypto/hifn_795x.c28
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h1
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c59
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c33
-rw-r--r--drivers/crypto/hisilicon/qm.c237
-rw-r--r--drivers/crypto/hisilicon/qm.h31
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c51
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c39
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h15
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c140
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c195
-rw-r--r--drivers/crypto/img-hash.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c44
-rw-r--r--drivers/crypto/inside-secure/safexcel.h28
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c90
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c153
-rw-r--r--drivers/crypto/inside-secure/safexcel_ring.c9
-rw-r--r--drivers/crypto/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c4
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h20
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c5
-rw-r--r--drivers/crypto/marvell/cesa/hash.c24
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c16
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c8
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c57
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c16
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c2
-rw-r--r--drivers/crypto/n2_core.c60
-rw-r--r--drivers/crypto/omap-sham.c189
-rw-r--r--drivers/crypto/padlock-aes.c1
-rw-r--r--drivers/crypto/picoxcell_crypto.c9
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c19
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c17
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c19
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c17
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h6
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c19
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c42
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c10
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c7
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c9
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c19
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c17
-rw-r--r--drivers/crypto/qce/core.c1
-rw-r--r--drivers/crypto/qce/sha.c1
-rw-r--r--drivers/crypto/qce/skcipher.c1
-rw-r--r--drivers/crypto/qcom-rng.c1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c1
-rw-r--r--drivers/crypto/s5p-sss.c28
-rw-r--r--drivers/crypto/sa2ul.c235
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/stm32/Kconfig1
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c22
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c47
-rw-r--r--drivers/crypto/stm32/stm32-hash.c16
-rw-r--r--drivers/crypto/talitos.c8
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c28
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c31
-rw-r--r--drivers/crypto/virtio/Kconfig1
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c1
-rw-r--r--drivers/dax/Kconfig6
-rw-r--r--drivers/dax/Makefile3
-rw-r--r--drivers/dax/bus.c1049
-rw-r--r--drivers/dax/bus.h28
-rw-r--r--drivers/dax/dax-private.h60
-rw-r--r--drivers/dax/device.c138
-rw-r--r--drivers/dax/hmem/Makefile6
-rw-r--r--drivers/dax/hmem/device.c100
-rw-r--r--drivers/dax/hmem/hmem.c (renamed from drivers/dax/hmem.c)23
-rw-r--r--drivers/dax/kmem.c198
-rw-r--r--drivers/dax/pmem/compat.c2
-rw-r--r--drivers/dax/pmem/core.c22
-rw-r--r--drivers/dax/super.c3
-rw-r--r--drivers/devfreq/devfreq-event.c14
-rw-r--r--drivers/devfreq/devfreq.c57
-rw-r--r--drivers/devfreq/exynos-bus.c7
-rw-r--r--drivers/devfreq/rk3399_dmc.c2
-rw-r--r--drivers/devfreq/tegra30-devfreq.c8
-rw-r--r--drivers/dma-buf/dma-fence.c1
-rw-r--r--drivers/dma-buf/dma-resv.c5
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c2
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.c13
-rw-r--r--drivers/dma-buf/udmabuf.c10
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/altera-msgdma.c8
-rw-r--r--drivers/dma/at_hdmac.c7
-rw-r--r--drivers/dma/at_xdmac.c7
-rw-r--r--drivers/dma/bcm2835-dma.c3
-rw-r--r--drivers/dma/coh901318.c7
-rw-r--r--drivers/dma/dma-axi-dmac.c141
-rw-r--r--drivers/dma/dma-jz4780.c7
-rw-r--r--drivers/dma/dmaengine.c24
-rw-r--r--drivers/dma/dmatest.c23
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c2
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-regs.h2
-rw-r--r--drivers/dma/dw/core.c12
-rw-r--r--drivers/dma/dw/dw.c7
-rw-r--r--drivers/dma/dw/idma32.c5
-rw-r--r--drivers/dma/dw/of.c7
-rw-r--r--drivers/dma/ep93xx_dma.c7
-rw-r--r--drivers/dma/fsl_raid.c8
-rw-r--r--drivers/dma/fsldma.c6
-rw-r--r--drivers/dma/idxd/device.c10
-rw-r--r--drivers/dma/idxd/idxd.h3
-rw-r--r--drivers/dma/idxd/init.c2
-rw-r--r--drivers/dma/idxd/irq.c2
-rw-r--r--drivers/dma/idxd/sysfs.c95
-rw-r--r--drivers/dma/imx-dma.c9
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/ioat/init.c4
-rw-r--r--drivers/dma/iop-adma.c19
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/k3dma.c6
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c7
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c7
-rw-r--r--drivers/dma/mic_x100_dma.c770
-rw-r--r--drivers/dma/mic_x100_dma.h275
-rw-r--r--drivers/dma/mmp_pdma.c6
-rw-r--r--drivers/dma/mmp_tdma.c6
-rw-r--r--drivers/dma/mpc512x_dma.c6
-rw-r--r--drivers/dma/mv_xor.c7
-rw-r--r--drivers/dma/mv_xor_v2.c8
-rw-r--r--drivers/dma/mxs-dma.c9
-rw-r--r--drivers/dma/nbpfaxi.c6
-rw-r--r--drivers/dma/owl-dma.c3
-rw-r--r--drivers/dma/pch_dma.c42
-rw-r--r--drivers/dma/pl330.c30
-rw-r--r--drivers/dma/plx_dma.c7
-rw-r--r--drivers/dma/ppc4xx/adma.c7
-rw-r--r--drivers/dma/qcom/bam_dma.c10
-rw-r--r--drivers/dma/qcom/hidma.c6
-rw-r--r--drivers/dma/qcom/hidma_ll.c6
-rw-r--r--drivers/dma/sa11x0-dma.c6
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c25
-rw-r--r--drivers/dma/sh/Kconfig4
-rw-r--r--drivers/dma/sh/rcar-dmac.c4
-rw-r--r--drivers/dma/sh/shdma-base.c2
-rw-r--r--drivers/dma/sirf-dma.c6
-rw-r--r--drivers/dma/ste_dma40.c10
-rw-r--r--drivers/dma/stm32-dma.c8
-rw-r--r--drivers/dma/stm32-dmamux.c9
-rw-r--r--drivers/dma/stm32-mdma.c9
-rw-r--r--drivers/dma/sun6i-dma.c6
-rw-r--r--drivers/dma/tegra20-apb-dma.c7
-rw-r--r--drivers/dma/ti/Makefile5
-rw-r--r--drivers/dma/ti/k3-psil-j7200.c175
-rw-r--r--drivers/dma/ti/k3-psil-j721e.c3
-rw-r--r--drivers/dma/ti/k3-psil-priv.h1
-rw-r--r--drivers/dma/ti/k3-psil.c19
-rw-r--r--drivers/dma/ti/k3-udma-glue.c19
-rw-r--r--drivers/dma/ti/k3-udma.c64
-rw-r--r--drivers/dma/ti/omap-dma.c2
-rw-r--r--drivers/dma/timb_dma.c6
-rw-r--r--drivers/dma/txx9dmac.c14
-rw-r--r--drivers/dma/virt-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c7
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c45
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c218
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c8
-rw-r--r--drivers/dma/zx_dma.c6
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/al_mc_edac.c354
-rw-r--r--drivers/edac/amd64_edac.c6
-rw-r--r--drivers/edac/aspeed_edac.c22
-rw-r--r--drivers/edac/e752x_edac.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c22
-rw-r--r--drivers/edac/ghes_edac.c19
-rw-r--r--drivers/edac/i5100_edac.c11
-rw-r--r--drivers/edac/i5400_edac.c4
-rw-r--r--drivers/edac/i7300_edac.c4
-rw-r--r--drivers/edac/i7core_edac.c4
-rw-r--r--drivers/edac/ie31200_edac.c6
-rw-r--r--drivers/edac/mce_amd.c15
-rw-r--r--drivers/edac/sb_edac.c7
-rw-r--r--drivers/edac/thunderx_edac.c2
-rw-r--r--drivers/edac/ti_edac.c5
-rw-r--r--drivers/extcon/extcon-axp288.c13
-rw-r--r--drivers/extcon/extcon-max14577.c2
-rw-r--r--drivers/extcon/extcon-max77693.c2
-rw-r--r--drivers/extcon/extcon-max77843.c2
-rw-r--r--drivers/extcon/extcon-max8997.c2
-rw-r--r--drivers/extcon/extcon-palmas.c20
-rw-r--r--drivers/extcon/extcon-ptn5150.c226
-rw-r--r--drivers/extcon/extcon-usb-gpio.c2
-rw-r--r--drivers/firewire/ohci.c26
-rw-r--r--drivers/firmware/Kconfig7
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/arm_scmi/Makefile6
-rw-r--r--drivers/firmware/arm_scmi/base.c2
-rw-r--r--drivers/firmware/arm_scmi/bus.c6
-rw-r--r--drivers/firmware/arm_scmi/clock.c11
-rw-r--r--drivers/firmware/arm_scmi/common.h28
-rw-r--r--drivers/firmware/arm_scmi/driver.c39
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c4
-rw-r--r--drivers/firmware/arm_scmi/notify.c24
-rw-r--r--drivers/firmware/arm_scmi/perf.c11
-rw-r--r--drivers/firmware/arm_scmi/power.c9
-rw-r--r--drivers/firmware/arm_scmi/reset.c13
-rw-r--r--drivers/firmware/arm_scmi/sensors.c11
-rw-r--r--drivers/firmware/arm_scmi/smc.c4
-rw-r--r--drivers/firmware/arm_scmi/system.c131
-rw-r--r--drivers/firmware/arm_sdei.c305
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c1
-rw-r--r--drivers/firmware/dmi_scan.c2
-rw-r--r--drivers/firmware/efi/Kconfig21
-rw-r--r--drivers/firmware/efi/Makefile5
-rw-r--r--drivers/firmware/efi/cper.c18
-rw-r--r--drivers/firmware/efi/efi-init.c (renamed from drivers/firmware/efi/arm-init.c)1
-rw-r--r--drivers/firmware/efi/efi-pstore.c83
-rw-r--r--drivers/firmware/efi/efi.c53
-rw-r--r--drivers/firmware/efi/efivars.c45
-rw-r--r--drivers/firmware/efi/libstub/Makefile22
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c178
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c15
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c101
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c59
-rw-r--r--drivers/firmware/efi/libstub/efistub.h61
-rw-r--r--drivers/firmware/efi/libstub/fdt.c4
-rw-r--r--drivers/firmware/efi/libstub/file.c5
-rw-r--r--drivers/firmware/efi/libstub/hidden.h6
-rw-r--r--drivers/firmware/efi/libstub/relocate.c4
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c109
-rw-r--r--drivers/firmware/efi/libstub/string.c1
-rw-r--r--drivers/firmware/efi/libstub/vsprintf.c2
-rw-r--r--drivers/firmware/efi/mokvar-table.c359
-rw-r--r--drivers/firmware/efi/riscv-runtime.c143
-rw-r--r--drivers/firmware/efi/vars.c22
-rw-r--r--drivers/firmware/efi/x86_fake_mem.c12
-rw-r--r--drivers/firmware/google/Kconfig2
-rw-r--r--drivers/firmware/google/gsmi.c8
-rw-r--r--drivers/firmware/imx/scu-pd.c4
-rw-r--r--drivers/firmware/psci/psci.c12
-rw-r--r--drivers/firmware/qcom_scm.c24
-rw-r--r--drivers/firmware/qcom_scm.h1
-rw-r--r--drivers/firmware/qemu_fw_cfg.c3
-rw-r--r--drivers/firmware/raspberrypi.c61
-rw-r--r--drivers/firmware/smccc/smccc.c2
-rw-r--r--drivers/firmware/tegra/bpmp.c3
-rw-r--r--drivers/firmware/ti_sci.c6
-rw-r--r--drivers/firmware/xilinx/zynqmp.c3
-rw-r--r--drivers/fpga/dfl-fme-perf.c2
-rw-r--r--drivers/fpga/dfl-pci.c24
-rw-r--r--drivers/fpga/dfl.c477
-rw-r--r--drivers/fpga/dfl.h103
-rw-r--r--drivers/fpga/fpga-region.c2
-rw-r--r--drivers/fpga/stratix10-soc.c23
-rw-r--r--drivers/fpga/xilinx-spi.c77
-rw-r--r--drivers/fsi/fsi-core.c31
-rw-r--r--drivers/fsi/fsi-master-aspeed.c134
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c7
-rw-r--r--drivers/fsi/fsi-master-gpio.c5
-rw-r--r--drivers/fsi/fsi-master-hub.c15
-rw-r--r--drivers/fsi/fsi-master.h3
-rw-r--r--drivers/fsi/fsi-occ.c2
-rw-r--r--drivers/fsi/fsi-sbefifo.c2
-rw-r--r--drivers/fsi/fsi-scom.c2
-rw-r--r--drivers/gpio/Kconfig45
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/gpio-aggregator.c70
-rw-r--r--drivers/gpio/gpio-aspeed.c1
-rw-r--r--drivers/gpio/gpio-bcm-kona.c5
-rw-r--r--drivers/gpio/gpio-davinci.c8
-rw-r--r--drivers/gpio/gpio-dwapb.c356
-rw-r--r--drivers/gpio/gpio-mockup.c158
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c45
-rw-r--r--drivers/gpio/gpio-mxc.c6
-rw-r--r--drivers/gpio/gpio-omap.c17
-rw-r--r--drivers/gpio/gpio-pca953x.c11
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c62
-rw-r--r--drivers/gpio/gpio-pisosr.c9
-rw-r--r--drivers/gpio/gpio-sifive.c2
-rw-r--r--drivers/gpio/gpio-sl28cpld.c161
-rw-r--r--drivers/gpio/gpio-stp-xway.c54
-rw-r--r--drivers/gpio/gpio-tc3589x.c18
-rw-r--r--drivers/gpio/gpio-tegra186.c15
-rw-r--r--drivers/gpio/gpio-zynq.c8
-rw-r--r--drivers/gpio/gpiolib-acpi.c3
-rw-r--r--drivers/gpio/gpiolib-cdev.c1314
-rw-r--r--drivers/gpio/gpiolib-cdev.h4
-rw-r--r--drivers/gpio/gpiolib-devprop.c63
-rw-r--r--drivers/gpio/gpiolib-of.c5
-rw-r--r--drivers/gpio/gpiolib.c107
-rw-r--r--drivers/gpio/gpiolib.h6
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c190
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c841
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c229
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c313
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c381
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c251
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h276
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c217
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h6
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c296
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c531
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c7
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c170
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_rap_if.h84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c331
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h174
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c33
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c53
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c55
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h79
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c195
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c105
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c53
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c987
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h28
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c668
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c55
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile18
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c174
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c169
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c98
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c369
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c205
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c330
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h178
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c432
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c1527
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c266
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c261
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.c175
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.c411
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c395
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c29
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h156
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h44
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c17
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c16
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h116
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h76
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h104
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h0
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h22
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_offset.h0
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_sh_mask.h0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h25
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_sh_mask.h79
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_offset.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_sh_mask.h20
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h17
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h110
-rw-r--r--drivers/gpu/drm/amd/pm/Makefile46
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c)479
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c)957
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amd_powerplay.h (renamed from drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h)37
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h)8
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h (renamed from drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h)28
-rw-r--r--drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/hardwaremanager.h (renamed from drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/inc/hwmgr.h)7
-rw-r--r--drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h (renamed from drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/power_state.h (renamed from drivers/gpu/drm/amd/powerplay/inc/power_state.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/pp_debug.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_debug.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/pp_endian.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_endian.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/pp_thermal.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/ppinterrupt.h (renamed from drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu10.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu10.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu10_driver_if.h)3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h)39
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h)22
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu7.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu7.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu71.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu71.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu71_discrete.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu72.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu72.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu72_discrete.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu73.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu73.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu73_discrete.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu73_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu74.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu74.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu74_discrete.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu75.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu75.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu75_discrete.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu7_common.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu7_common.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu7_discrete.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu7_fusion.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu7_fusion.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu8.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu8.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu8_fusion.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu8_fusion.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu9.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu9.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h194
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_types.h)4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h)30
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_7_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_7_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h)9
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v12_0.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h)2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smumgr.h)2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h (renamed from drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/vega12_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/Makefile (renamed from drivers/gpu/drm/amd/powerplay/Makefile)23
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c (renamed from drivers/gpu/drm/amd/powerplay/amd_powerplay.c)35
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h (renamed from drivers/gpu/drm/amd/amdgpu/cik_dpm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/Makefile (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/Makefile)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c)7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c)5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr_ppt.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h)9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c)95
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h)3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_inc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_dyn_defaults.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c)28
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c)2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_inc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c)140
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h)1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_inc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h)1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c)2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c)154
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h)1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_inc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c)8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c (renamed from drivers/gpu/drm/amd/amdgpu/kv_dpm.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h (renamed from drivers/gpu/drm/amd/amdgpu/kv_dpm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/kv_smc.c (renamed from drivers/gpu/drm/amd/amdgpu/kv_smc.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/ppsmc.h (renamed from drivers/gpu/drm/amd/amdgpu/ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h (renamed from drivers/gpu/drm/amd/amdgpu/r600_dpm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/si_dpm.c (renamed from drivers/gpu/drm/amd/amdgpu/si_dpm.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/si_dpm.h (renamed from drivers/gpu/drm/amd/amdgpu/si_dpm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/si_smc.c (renamed from drivers/gpu/drm/amd/amdgpu/si_smc.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h (renamed from drivers/gpu/drm/amd/amdgpu/sislands_smc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/Makefile (renamed from drivers/gpu/drm/amd/powerplay/smumgr/Makefile)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c)29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c)7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c)8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c)12
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/Makefile36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c (renamed from drivers/gpu/drm/amd/powerplay/amdgpu_smu.c)212
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/Makefile33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c (renamed from drivers/gpu/drm/amd/powerplay/arcturus_ppt.c)146
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.h (renamed from drivers/gpu/drm/amd/powerplay/arcturus_ppt.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c (renamed from drivers/gpu/drm/amd/powerplay/navi10_ppt.c)479
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.h (renamed from drivers/gpu/drm/amd/powerplay/navi10_ppt.h)3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c (renamed from drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c)236
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h (renamed from drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.h)3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c (renamed from drivers/gpu/drm/amd/powerplay/smu_v11_0.c)329
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/Makefile31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c (renamed from drivers/gpu/drm/amd/powerplay/renoir_ppt.c)262
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.h (renamed from drivers/gpu/drm/amd/powerplay/renoir_ppt.h)1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c (renamed from drivers/gpu/drm/amd/powerplay/smu_v12_0.c)12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c (renamed from drivers/gpu/drm/amd/powerplay/smu_cmn.c)83
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h (renamed from drivers/gpu/drm/amd/powerplay/smu_cmn.h)12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h (renamed from drivers/gpu/drm/amd/powerplay/smu_internal.h)6
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c4
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c2
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h2
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c30
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c4
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c28
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c8
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c15
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c11
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c23
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c82
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h45
-rw-r--r--drivers/gpu/drm/ast/ast_main.c74
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c143
-rw-r--r--drivers/gpu/drm/ast/ast_post.c6
-rw-r--r--drivers/gpu/drm/bridge/Kconfig33
-rw-r--r--drivers/gpu/drm/bridge/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c12
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c9
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig24
-rw-r--r--drivers/gpu/drm/bridge/cadence/Makefile4
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c2532
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h400
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c78
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h19
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c1230
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c29
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c109
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c101
-rw-r--r--drivers/gpu/drm/bridge/panel.c7
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c100
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c91
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c155
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c280
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c109
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c123
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c749
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c3
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c24
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c1
-rw-r--r--drivers/gpu/drm/drm_cache.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c94
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c4
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c642
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c278
-rw-r--r--drivers/gpu/drm/drm_drv.c119
-rw-r--r--drivers/gpu/drm/drm_edid.c28
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c9
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c23
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c23
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c5
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c169
-rw-r--r--drivers/gpu/drm/drm_internal.h1
-rw-r--r--drivers/gpu/drm/drm_managed.c15
-rw-r--r--drivers/gpu/drm/drm_panel.c85
-rw-r--r--drivers/gpu/drm/drm_prime.c126
-rw-r--r--drivers/gpu/drm/drm_syncobj.c2
-rw-r--r--drivers/gpu/drm/drm_vblank.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c15
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c25
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c9
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c6
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c20
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c55
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c34
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c70
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c33
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c6
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c2
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c6
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c281
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c270
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h54
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c1210
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c703
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c87
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c208
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c117
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c106
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c112
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h10
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c1
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c89
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c130
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c108
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c1635
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c51
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h40
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c152
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c128
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c55
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c67
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c12
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c11
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c50
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c146
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c75
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c45
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c112
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.h5
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c184
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c305
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.h36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c319
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h84
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c106
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h33
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c97
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c105
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c300
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h142
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c258
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c150
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c73
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c42
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c183
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c22
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c196
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c34
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c36
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c2
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c76
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h44
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c79
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c32
-rw-r--r--drivers/gpu/drm/i915/i915_active.c246
-rw-r--r--drivers/gpu/drm/i915/i915_active.h31
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c162
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h150
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c139
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h12
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c228
-rw-r--r--drivers/gpu/drm/i915/i915_params.c5
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c57
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h70
-rw-r--r--drivers/gpu/drm/i915/i915_request.c223
-rw-r--r--drivers/gpu/drm/i915/i915_request.h8
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c46
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h13
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c27
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c16
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c16
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c30
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c41
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c77
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c87
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c44
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c26
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c2
-rw-r--r--drivers/gpu/drm/imx/Kconfig2
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig9
-rw-r--r--drivers/gpu/drm/imx/dcss/Makefile6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-blkctl.c70
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-crtc.c219
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-ctxld.c424
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.c325
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.h177
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dpr.c562
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c138
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dtg.c409
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c198
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.h44
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c405
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-scaler.c826
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-ss.c180
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c17
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c20
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c40
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c26
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c41
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c38
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c11
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c5
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c218
-rw-r--r--drivers/gpu/drm/mcde/mcde_drm.h67
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c89
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c276
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig2
-rw-r--r--drivers/gpu/drm/mediatek/Makefile5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c80
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c38
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c37
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c21
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h1
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig12
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c227
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h19
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mm.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c153
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h4
-rw-r--r--drivers/gpu/drm/msm/Kconfig19
-rw-r--r--drivers/gpu/drm/msm/Makefile18
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c65
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c77
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c82
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c120
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h12
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c182
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c105
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h82
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c109
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c132
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c55
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c145
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c84
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c51
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h13
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c47
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c7
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c24
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c68
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c638
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h72
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c535
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h30
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1019
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h131
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c1869
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h36
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c485
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h74
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c1463
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h39
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c164
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h18
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.c69
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.h80
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c1210
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h155
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c463
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h100
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.c293
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.h136
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.c372
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.h107
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h306
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h423
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c102
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c255
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h10
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c904
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c28
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h97
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c76
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h10
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c14
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c56
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h22
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h83
-rw-r--r--drivers/gpu/drm/msm/msm_gpummu.c17
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c208
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h16
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c7
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig8
-rw-r--r--drivers/gpu/drm/mxsfb/Makefile2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c343
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c273
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.h42
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c571
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c99
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_regs.h107
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c29
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c41
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core907d.c36
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core917d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c336
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c439
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo0039.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo5039.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo74c1.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo85b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo9039.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo90b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_boa0b5.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c174
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c72
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c217
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h48
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c37
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c66
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c193
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h9
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c17
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c14
-rw-r--r--drivers/gpu/drm/panel/Kconfig34
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c4
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c6
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c48
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c12
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c51
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c23
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c25
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c7
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c277
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c35
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c4
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c37
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c58
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c49
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c4
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c4
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c13
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c337
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c4
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c60
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c46
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c4
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c13
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c4
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c6
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c40
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c18
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c20
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c7
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c27
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c9
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c9
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c139
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c101
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c174
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.h13
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c6
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c7
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c232
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c14
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c61
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx424akp.c85
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c7
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c6
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c42
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c63
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c41
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c51
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c175
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h30
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c61
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c32
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c37
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c7
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h4
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c20
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c22
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c34
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c112
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c333
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c6
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c2
-rw-r--r--drivers/gpu/drm/radeon/uvd_v4_2.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c37
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c54
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c47
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c9
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c4
-rw-r--r--drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c17
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c38
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c13
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c8
-rw-r--r--drivers/gpu/drm/tegra/gem.c29
-rw-r--r--drivers/gpu/drm/tegra/output.c34
-rw-r--r--drivers/gpu/drm/tegra/plane.c15
-rw-r--r--drivers/gpu/drm/tegra/rgb.c102
-rw-r--r--drivers/gpu/drm/tegra/sor.c7
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c32
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h4
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c45
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c515
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c402
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c46
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c (renamed from drivers/gpu/drm/ttm/ttm_bo_manager.c)84
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c146
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c93
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c13
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c13
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c10
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c368
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c45
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h66
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c1652
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h184
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_phy.c521
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h442
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c273
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c316
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c224
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h177
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c12
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c57
-rw-r--r--drivers/gpu/drm/virtio/Kconfig3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h23
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c14
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c36
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c96
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c120
-rw-r--r--drivers/gpu/drm/vkms/Makefile9
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c171
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c5
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c56
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h10
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c4
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c142
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c70
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c73
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c60
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c71
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c268
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c5
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c6
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c27
-rw-r--r--drivers/gpu/host1x/job.c22
-rw-r--r--drivers/gpu/host1x/mipi.c22
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c67
-rw-r--r--drivers/greybus/interface.c6
-rw-r--r--drivers/hid/Kconfig9
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-alps.c2
-rw-r--r--drivers/hid/hid-apple.c2
-rw-r--r--drivers/hid/hid-core.c15
-rw-r--r--drivers/hid/hid-cp2112.c19
-rw-r--r--drivers/hid/hid-debug.c10
-rw-r--r--drivers/hid/hid-hyperv.c4
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c4
-rw-r--r--drivers/hid/hid-ite.c4
-rw-r--r--drivers/hid/hid-logitech-dj.c2
-rw-r--r--drivers/hid/hid-multitouch.c12
-rw-r--r--drivers/hid/hid-picolcd_cir.c10
-rw-r--r--drivers/hid/hid-rmi.c2
-rw-r--r--drivers/hid/hid-roccat-kone.c23
-rw-r--r--drivers/hid/hid-vivaldi.c144
-rw-r--r--drivers/hid/hid-wiimote-core.c10
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c15
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c8
-rw-r--r--drivers/hid/wacom_wac.c4
-rw-r--r--drivers/hv/channel.c461
-rw-r--r--drivers/hv/hv.c6
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hv/hv_util.c11
-rw-r--r--drivers/hv/vmbus_drv.c32
-rw-r--r--drivers/hwmon/Kconfig33
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/ad7414.c5
-rw-r--r--drivers/hwmon/ad7418.c9
-rw-r--r--drivers/hwmon/adc128d818.c5
-rw-r--r--drivers/hwmon/adm1021.c9
-rw-r--r--drivers/hwmon/adm1025.c5
-rw-r--r--drivers/hwmon/adm1026.c5
-rw-r--r--drivers/hwmon/adm1029.c5
-rw-r--r--drivers/hwmon/adm1031.c9
-rw-r--r--drivers/hwmon/adm1177.c5
-rw-r--r--drivers/hwmon/adm9240.c356
-rw-r--r--drivers/hwmon/ads7828.c9
-rw-r--r--drivers/hwmon/adt7410.c5
-rw-r--r--drivers/hwmon/adt7411.c5
-rw-r--r--drivers/hwmon/adt7462.c5
-rw-r--r--drivers/hwmon/adt7470.c5
-rw-r--r--drivers/hwmon/adt7475.c6
-rw-r--r--drivers/hwmon/amc6821.c5
-rw-r--r--drivers/hwmon/amd_energy.c166
-rw-r--r--drivers/hwmon/applesmc.c130
-rw-r--r--drivers/hwmon/asb100.c8
-rw-r--r--drivers/hwmon/asc7621.c4
-rw-r--r--drivers/hwmon/atxp1.c5
-rw-r--r--drivers/hwmon/bt1-pvt.c138
-rw-r--r--drivers/hwmon/bt1-pvt.h3
-rw-r--r--drivers/hwmon/dme1737.c9
-rw-r--r--drivers/hwmon/ds1621.c9
-rw-r--r--drivers/hwmon/ds620.c5
-rw-r--r--drivers/hwmon/emc1403.c8
-rw-r--r--drivers/hwmon/emc2103.c4
-rw-r--r--drivers/hwmon/emc6w201.c5
-rw-r--r--drivers/hwmon/f75375s.c10
-rw-r--r--drivers/hwmon/fschmd.c10
-rw-r--r--drivers/hwmon/ftsteutates.c4
-rw-r--r--drivers/hwmon/g760a.c5
-rw-r--r--drivers/hwmon/g762.c4
-rw-r--r--drivers/hwmon/gl518sm.c5
-rw-r--r--drivers/hwmon/gl520sm.c5
-rw-r--r--drivers/hwmon/gsc-hwmon.c32
-rw-r--r--drivers/hwmon/hih6130.c5
-rw-r--r--drivers/hwmon/hwmon.c10
-rw-r--r--drivers/hwmon/ina209.c5
-rw-r--r--drivers/hwmon/ina2xx.c9
-rw-r--r--drivers/hwmon/ina3221.c5
-rw-r--r--drivers/hwmon/intel-m10-bmc-hwmon.c334
-rw-r--r--drivers/hwmon/jc42.c4
-rw-r--r--drivers/hwmon/k10temp.c162
-rw-r--r--drivers/hwmon/lineage-pem.c5
-rw-r--r--drivers/hwmon/lm63.c9
-rw-r--r--drivers/hwmon/lm73.c4
-rw-r--r--drivers/hwmon/lm75.c33
-rw-r--r--drivers/hwmon/lm77.c4
-rw-r--r--drivers/hwmon/lm78.c9
-rw-r--r--drivers/hwmon/lm80.c5
-rw-r--r--drivers/hwmon/lm83.c9
-rw-r--r--drivers/hwmon/lm85.c8
-rw-r--r--drivers/hwmon/lm87.c4
-rw-r--r--drivers/hwmon/lm90.c7
-rw-r--r--drivers/hwmon/lm92.c5
-rw-r--r--drivers/hwmon/lm93.c5
-rw-r--r--drivers/hwmon/lm95234.c9
-rw-r--r--drivers/hwmon/lm95241.c5
-rw-r--r--drivers/hwmon/lm95245.c5
-rw-r--r--drivers/hwmon/ltc2945.c5
-rw-r--r--drivers/hwmon/ltc2947-i2c.c5
-rw-r--r--drivers/hwmon/ltc2990.c5
-rw-r--r--drivers/hwmon/ltc4151.c5
-rw-r--r--drivers/hwmon/ltc4215.c5
-rw-r--r--drivers/hwmon/ltc4222.c5
-rw-r--r--drivers/hwmon/ltc4245.c5
-rw-r--r--drivers/hwmon/ltc4260.c5
-rw-r--r--drivers/hwmon/ltc4261.c5
-rw-r--r--drivers/hwmon/max16065.c8
-rw-r--r--drivers/hwmon/max1619.c5
-rw-r--r--drivers/hwmon/max1668.c9
-rw-r--r--drivers/hwmon/max31730.c4
-rw-r--r--drivers/hwmon/max31790.c5
-rw-r--r--drivers/hwmon/max6621.c5
-rw-r--r--drivers/hwmon/max6639.c5
-rw-r--r--drivers/hwmon/max6642.c5
-rw-r--r--drivers/hwmon/max6650.c10
-rw-r--r--drivers/hwmon/max6697.c9
-rw-r--r--drivers/hwmon/mcp3021.c9
-rw-r--r--drivers/hwmon/mr75203.c656
-rw-r--r--drivers/hwmon/nct7802.c5
-rw-r--r--drivers/hwmon/nct7904.c5
-rw-r--r--drivers/hwmon/occ/p8_i2c.c5
-rw-r--r--drivers/hwmon/pcf8591.c5
-rw-r--r--drivers/hwmon/pmbus/Kconfig20
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/adm1266.c513
-rw-r--r--drivers/hwmon/pmbus/adm1275.c11
-rw-r--r--drivers/hwmon/pmbus/bel-pfe.c11
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c19
-rw-r--r--drivers/hwmon/pmbus/inspur-ipsps.c7
-rw-r--r--drivers/hwmon/pmbus/ir35221.c7
-rw-r--r--drivers/hwmon/pmbus/ir38064.c7
-rw-r--r--drivers/hwmon/pmbus/irps5401.c7
-rw-r--r--drivers/hwmon/pmbus/isl68137.c11
-rw-r--r--drivers/hwmon/pmbus/lm25066.c11
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c14
-rw-r--r--drivers/hwmon/pmbus/ltc3815.c7
-rw-r--r--drivers/hwmon/pmbus/max16064.c7
-rw-r--r--drivers/hwmon/pmbus/max16601.c7
-rw-r--r--drivers/hwmon/pmbus/max20730.c390
-rw-r--r--drivers/hwmon/pmbus/max20751.c7
-rw-r--r--drivers/hwmon/pmbus/max31785.c9
-rw-r--r--drivers/hwmon/pmbus/max34440.c39
-rw-r--r--drivers/hwmon/pmbus/max8688.c7
-rw-r--r--drivers/hwmon/pmbus/mp2975.c769
-rw-r--r--drivers/hwmon/pmbus/pmbus.c11
-rw-r--r--drivers/hwmon/pmbus/pmbus.h16
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c349
-rw-r--r--drivers/hwmon/pmbus/pxe1610.c7
-rw-r--r--drivers/hwmon/pmbus/tps40422.c7
-rw-r--r--drivers/hwmon/pmbus/tps53679.c11
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c13
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c13
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c7
-rw-r--r--drivers/hwmon/pmbus/zl6100.c11
-rw-r--r--drivers/hwmon/powr1220.c5
-rw-r--r--drivers/hwmon/pwm-fan.c26
-rw-r--r--drivers/hwmon/s3c-hwmon.c2
-rw-r--r--drivers/hwmon/scmi-hwmon.c6
-rw-r--r--drivers/hwmon/sht21.c5
-rw-r--r--drivers/hwmon/sht3x.c9
-rw-r--r--drivers/hwmon/shtc1.c34
-rw-r--r--drivers/hwmon/sl28cpld-hwmon.c142
-rw-r--r--drivers/hwmon/smm665.c9
-rw-r--r--drivers/hwmon/smsc47m192.c5
-rw-r--r--drivers/hwmon/sparx5-temp.c2
-rw-r--r--drivers/hwmon/stts751.c5
-rw-r--r--drivers/hwmon/tc654.c5
-rw-r--r--drivers/hwmon/tc74.c5
-rw-r--r--drivers/hwmon/thmc50.c9
-rw-r--r--drivers/hwmon/tmp102.c5
-rw-r--r--drivers/hwmon/tmp103.c5
-rw-r--r--drivers/hwmon/tmp108.c5
-rw-r--r--drivers/hwmon/tmp401.c7
-rw-r--r--drivers/hwmon/tmp421.c7
-rw-r--r--drivers/hwmon/tmp513.c11
-rw-r--r--drivers/hwmon/w83627ehf.c6
-rw-r--r--drivers/hwmon/w83773g.c5
-rw-r--r--drivers/hwmon/w83781d.c9
-rw-r--r--drivers/hwmon/w83791d.c8
-rw-r--r--drivers/hwmon/w83792d.c7
-rw-r--r--drivers/hwmon/w83793.c8
-rw-r--r--drivers/hwmon/w83795.c9
-rw-r--r--drivers/hwmon/w83l785ts.c8
-rw-r--r--drivers/hwmon/w83l786ng.c4
-rw-r--r--drivers/hwtracing/coresight/Kconfig54
-rw-r--r--drivers/hwtracing/coresight/Makefile26
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c (renamed from drivers/hwtracing/coresight/coresight.c)217
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c (renamed from drivers/hwtracing/coresight/coresight-cti.c)97
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c (renamed from drivers/hwtracing/coresight/coresight-etm3x.c)154
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c (renamed from drivers/hwtracing/coresight/coresight-etm4x.c)193
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h6
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c65
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h26
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c65
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c (renamed from drivers/hwtracing/coresight/coresight-tmc.c)25
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c21
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c20
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/stm/Kconfig2
-rw-r--r--drivers/hwtracing/stm/ftrace.c7
-rw-r--r--drivers/i2c/Kconfig9
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/busses/Kconfig20
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c52
-rw-r--r--drivers/i2c/busses/i2c-efm32.c12
-rw-r--r--drivers/i2c/busses/i2c-i801.c5
-rw-r--r--drivers/i2c/busses/i2c-imx.c32
-rw-r--r--drivers/i2c/busses/i2c-ismt.c12
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c3
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c2474
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c8
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c5
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c3
-rw-r--r--drivers/i2c/busses/i2c-owl.c5
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c5
-rw-r--r--drivers/i2c/busses/i2c-rcar.c65
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c19
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c86
-rw-r--r--drivers/i2c/busses/i2c-stm32.c12
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c6
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c145
-rw-r--r--drivers/i2c/busses/i2c-tegra.c1420
-rw-r--r--drivers/i2c/busses/i2c-xiic.c62
-rw-r--r--drivers/i2c/i2c-core-acpi.c11
-rw-r--r--drivers/i2c/i2c-slave-testunit.c175
-rw-r--r--drivers/i2c/i2c-smbus.c107
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c16
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c10
-rw-r--r--drivers/i3c/master.c144
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c4
-rw-r--r--drivers/ide/Kconfig7
-rw-r--r--drivers/ide/ide-cd.c16
-rw-r--r--drivers/ide/ide-disk.c5
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-gd.c48
-rw-r--r--drivers/ide/ide-ioctls.c4
-rw-r--r--drivers/ide/macide.c66
-rw-r--r--drivers/idle/intel_idle.c17
-rw-r--r--drivers/iio/Kconfig2
-rw-r--r--drivers/iio/accel/adis16201.c26
-rw-r--r--drivers/iio/accel/adis16209.c25
-rw-r--r--drivers/iio/accel/adxl372.c311
-rw-r--r--drivers/iio/accel/adxl372_i2c.c8
-rw-r--r--drivers/iio/accel/adxl372_spi.c4
-rw-r--r--drivers/iio/accel/bma180.c22
-rw-r--r--drivers/iio/accel/bma220_spi.c85
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c2
-rw-r--r--drivers/iio/accel/mma8452.c20
-rw-r--r--drivers/iio/adc/Kconfig4
-rw-r--r--drivers/iio/adc/ad7291.c35
-rw-r--r--drivers/iio/adc/ad7292.c4
-rw-r--r--drivers/iio/adc/ad7949.c2
-rw-r--r--drivers/iio/adc/ad9467.c124
-rw-r--r--drivers/iio/adc/adi-axi-adc.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c26
-rw-r--r--drivers/iio/adc/axp20x_adc.c14
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c4
-rw-r--r--drivers/iio/adc/envelope-detector.c16
-rw-r--r--drivers/iio/adc/exynos_adc.c30
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c15
-rw-r--r--drivers/iio/adc/ltc2497-core.c10
-rw-r--r--drivers/iio/adc/meson_saradc.c18
-rw-r--r--drivers/iio/adc/palmas_gpadc.c13
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c30
-rw-r--r--drivers/iio/adc/stm32-adc-core.c80
-rw-r--r--drivers/iio/adc/stm32-adc.c10
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c14
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c20
-rw-r--r--drivers/iio/adc/stm32-dfsdm.h2
-rw-r--r--drivers/iio/adc/ti-adc081c.c24
-rw-r--r--drivers/iio/adc/ti-adc0832.c11
-rw-r--r--drivers/iio/adc/ti-adc108s102.c5
-rw-r--r--drivers/iio/adc/ti-adc12138.c13
-rw-r--r--drivers/iio/adc/ti-adc128s052.c3
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c16
-rw-r--r--drivers/iio/afe/iio-rescale.c8
-rw-r--r--drivers/iio/amplifiers/Kconfig1
-rw-r--r--drivers/iio/amplifiers/hmc425a.c9
-rw-r--r--drivers/iio/buffer/Kconfig10
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c12
-rw-r--r--drivers/iio/chemical/ams-iaq-core.c3
-rw-r--r--drivers/iio/chemical/atlas-ezo-sensor.c88
-rw-r--r--drivers/iio/chemical/atlas-sensor.c10
-rw-r--r--drivers/iio/chemical/scd30_core.c9
-rw-r--r--drivers/iio/chemical/sgp30.c29
-rw-r--r--drivers/iio/chemical/vz89x.c18
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c3
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c7
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c11
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c3
-rw-r--r--drivers/iio/dac/ad5064.c6
-rw-r--r--drivers/iio/dac/ad5446.c5
-rw-r--r--drivers/iio/dac/ad5592r-base.c56
-rw-r--r--drivers/iio/dac/ad5592r.c7
-rw-r--r--drivers/iio/dac/ad5593r.c7
-rw-r--r--drivers/iio/dac/ad5686.c8
-rw-r--r--drivers/iio/dac/ad5686.h2
-rw-r--r--drivers/iio/dac/ad7303.c6
-rw-r--r--drivers/iio/dac/dpot-dac.c16
-rw-r--r--drivers/iio/dac/mcp4725.c29
-rw-r--r--drivers/iio/dac/stm32-dac-core.c5
-rw-r--r--drivers/iio/dac/stm32-dac.c13
-rw-r--r--drivers/iio/dac/ti-dac082s085.c5
-rw-r--r--drivers/iio/dac/ti-dac5571.c36
-rw-r--r--drivers/iio/dac/ti-dac7612.c14
-rw-r--r--drivers/iio/dummy/iio_dummy_evgen.c4
-rw-r--r--drivers/iio/frequency/ad9523.c60
-rw-r--r--drivers/iio/frequency/adf4350.c21
-rw-r--r--drivers/iio/gyro/Kconfig12
-rw-r--r--drivers/iio/gyro/Makefile1
-rw-r--r--drivers/iio/gyro/adis16080.c2
-rw-r--r--drivers/iio/gyro/adis16136.c37
-rw-r--r--drivers/iio/gyro/adis16260.c33
-rw-r--r--drivers/iio/gyro/adxrs290.c710
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c15
-rw-r--r--drivers/iio/health/max30102.c15
-rw-r--r--drivers/iio/humidity/Kconfig10
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/hdc100x.c3
-rw-r--r--drivers/iio/humidity/hdc2010.c353
-rw-r--r--drivers/iio/humidity/htu21.c3
-rw-r--r--drivers/iio/humidity/si7020.c3
-rw-r--r--drivers/iio/iio_core_trigger.h4
-rw-r--r--drivers/iio/imu/adis16400.c72
-rw-r--r--drivers/iio/imu/adis16460.c25
-rw-r--r--drivers/iio/imu/adis16475.c18
-rw-r--r--drivers/iio/imu/adis16480.c55
-rw-r--r--drivers/iio/imu/adis_buffer.c76
-rw-r--r--drivers/iio/imu/adis_trigger.c60
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c20
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h12
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c14
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c42
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c134
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c2
-rw-r--r--drivers/iio/industrialio-buffer.c46
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/iio/industrialio-event.c51
-rw-r--r--drivers/iio/industrialio-trigger.c3
-rw-r--r--drivers/iio/light/Kconfig15
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/as73211.c800
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c5
-rw-r--r--drivers/iio/light/gp2ap002.c2
-rw-r--r--drivers/iio/light/isl29018.c9
-rw-r--r--drivers/iio/light/si1145.c19
-rw-r--r--drivers/iio/light/tsl2772.c10
-rw-r--r--drivers/iio/magnetometer/ak8974.c14
-rw-r--r--drivers/iio/magnetometer/ak8975.c8
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c4
-rw-r--r--drivers/iio/magnetometer/mag3110.c20
-rw-r--r--drivers/iio/multiplexer/iio-mux.c8
-rw-r--r--drivers/iio/potentiometer/ad5272.c5
-rw-r--r--drivers/iio/potentiometer/ds1803.c6
-rw-r--r--drivers/iio/potentiometer/max5432.c8
-rw-r--r--drivers/iio/potentiometer/max5481.c23
-rw-r--r--drivers/iio/potentiometer/mcp4018.c12
-rw-r--r--drivers/iio/potentiometer/mcp4131.c8
-rw-r--r--drivers/iio/potentiometer/mcp4531.c11
-rw-r--r--drivers/iio/potentiostat/lmp91000.c11
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c5
-rw-r--r--drivers/iio/pressure/icp10100.c3
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c6
-rw-r--r--drivers/iio/pressure/ms5611_spi.c6
-rw-r--r--drivers/iio/pressure/ms5637.c3
-rw-r--r--drivers/iio/pressure/zpa2326_i2c.c6
-rw-r--r--drivers/iio/pressure/zpa2326_spi.c6
-rw-r--r--drivers/iio/proximity/as3935.c44
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c3
-rw-r--r--drivers/iio/proximity/sx9310.c427
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c104
-rw-r--r--drivers/iio/resolver/ad2s1200.c3
-rw-r--r--drivers/iio/temperature/ltc2983.c19
-rw-r--r--drivers/iio/temperature/mlx90632.c283
-rw-r--r--drivers/iio/temperature/tmp007.c4
-rw-r--r--drivers/iio/temperature/tsys01.c3
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/addr.c11
-rw-r--r--drivers/infiniband/core/cache.c72
-rw-r--r--drivers/infiniband/core/cm.c126
-rw-r--r--drivers/infiniband/core/cm_trace.c15
-rw-r--r--drivers/infiniband/core/cm_trace.h414
-rw-r--r--drivers/infiniband/core/cma.c661
-rw-r--r--drivers/infiniband/core/cma_configfs.c9
-rw-r--r--drivers/infiniband/core/cma_trace.h40
-rw-r--r--drivers/infiniband/core/core_priv.h13
-rw-r--r--drivers/infiniband/core/counters.c15
-rw-r--r--drivers/infiniband/core/cq.c39
-rw-r--r--drivers/infiniband/core/device.c77
-rw-r--r--drivers/infiniband/core/rdma_core.c34
-rw-r--r--drivers/infiniband/core/restrack.c161
-rw-r--r--drivers/infiniband/core/restrack.h10
-rw-r--r--drivers/infiniband/core/sysfs.c15
-rw-r--r--drivers/infiniband/core/ucma.c542
-rw-r--r--drivers/infiniband/core/umem.c139
-rw-r--r--drivers/infiniband/core/umem_odp.c291
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c93
-rw-r--r--drivers/infiniband/core/uverbs_main.c10
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c15
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c4
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c8
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c196
-rw-r--r--drivers/infiniband/core/uverbs_std_types_wq.c2
-rw-r--r--drivers/infiniband/core/verbs.c114
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c90
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h8
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c30
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h7
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c40
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c3
-rw-r--r--drivers/infiniband/hw/efa/efa.h14
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h69
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c28
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h18
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c4
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c258
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c34
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c22
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c23
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c27
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h74
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c51
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c534
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h43
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c19
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c81
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c80
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c16
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c63
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c64
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h3
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c152
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c158
-rw-r--r--drivers/infiniband/hw/mlx4/main.c45
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h62
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c35
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c345
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c9
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c8
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h4
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c7
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c16
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c148
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c154
-rw-r--r--drivers/infiniband/hw/mlx5/main.c76
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h100
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c189
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c56
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c182
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h4
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c5
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c23
-rw-r--r--drivers/infiniband/hw/mlx5/srq.h2
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c22
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c27
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c39
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h27
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c75
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c38
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h6
-rw-r--r--drivers/infiniband/hw/qedr/main.c31
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h33
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c7
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c438
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h11
-rw-r--r--drivers/infiniband/hw/qib/qib.h6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c52
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c10
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c5
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c18
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h6
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c9
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c9
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h8
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c43
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c64
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c32
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c35
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c54
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c39
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c89
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h36
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c32
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c68
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c33
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c35
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c37
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h33
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c56
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h48
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c11
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c9
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c50
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c15
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h1
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c76
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h7
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c13
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h1
-rw-r--r--drivers/input/evdev.c19
-rw-r--r--drivers/input/input-mt.c11
-rw-r--r--drivers/input/joystick/Kconfig10
-rw-r--r--drivers/input/joystick/Makefile1
-rw-r--r--drivers/input/joystick/adc-joystick.c264
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c4
-rw-r--r--drivers/input/keyboard/omap4-keypad.c6
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c8
-rw-r--r--drivers/input/misc/soc_button_array.c100
-rw-r--r--drivers/input/mouse/synaptics.c6
-rw-r--r--drivers/input/rmi4/Kconfig8
-rw-r--r--drivers/input/rmi4/Makefile1
-rw-r--r--drivers/input/rmi4/rmi_bus.c3
-rw-r--r--drivers/input/rmi4/rmi_driver.h1
-rw-r--r--drivers/input/rmi4/rmi_f30.c14
-rw-r--r--drivers/input/rmi4/rmi_f34v7.c9
-rw-r--r--drivers/input/rmi4/rmi_f3a.c241
-rw-r--r--drivers/input/serio/hil_mlc.c21
-rw-r--r--drivers/input/serio/hp_sdc_mlc.c8
-rw-r--r--drivers/input/serio/hyperv-keyboard.c4
-rw-r--r--drivers/input/serio/sun4i-ps2.c9
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/elants_i2c.c8
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c47
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c131
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c37
-rw-r--r--drivers/input/touchscreen/stmfts.c2
-rw-r--r--drivers/input/touchscreen/zinitix.c581
-rw-r--r--drivers/interconnect/Makefile2
-rw-r--r--drivers/interconnect/bulk.c117
-rw-r--r--drivers/interconnect/core.c143
-rw-r--r--drivers/interconnect/imx/imx.c13
-rw-r--r--drivers/interconnect/qcom/Kconfig20
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/bcm-voter.c36
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c37
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h21
-rw-r--r--drivers/interconnect/qcom/osm-l3.c91
-rw-r--r--drivers/interconnect/qcom/sc7180.c9
-rw-r--r--drivers/interconnect/qcom/sdm845.c11
-rw-r--r--drivers/interconnect/qcom/sm8150.c636
-rw-r--r--drivers/interconnect/qcom/sm8150.h154
-rw-r--r--drivers/interconnect/qcom/sm8250.c652
-rw-r--r--drivers/interconnect/qcom/sm8250.h164
-rw-r--r--drivers/iommu/Kconfig12
-rw-r--r--drivers/iommu/amd/amd_iommu.h19
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h12
-rw-r--r--drivers/iommu/amd/init.c48
-rw-r--r--drivers/iommu/amd/iommu.c243
-rw-r--r--drivers/iommu/amd/iommu_v2.c20
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile5
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c248
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c843
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h723
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c102
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h84
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c12
-rw-r--r--drivers/iommu/dma-iommu.c68
-rw-r--r--drivers/iommu/fsl_pamu.c2
-rw-r--r--drivers/iommu/hyperv-iommu.c8
-rw-r--r--drivers/iommu/intel/dmar.c79
-rw-r--r--drivers/iommu/intel/iommu.c82
-rw-r--r--drivers/iommu/intel/irq_remapping.c119
-rw-r--r--drivers/iommu/intel/pasid.c31
-rw-r--r--drivers/iommu/intel/pasid.h24
-rw-r--r--drivers/iommu/intel/svm.c68
-rw-r--r--drivers/iommu/io-pgtable-arm.c32
-rw-r--r--drivers/iommu/io-pgtable-arm.h30
-rw-r--r--drivers/iommu/iommu.c202
-rw-r--r--drivers/iommu/iova.c2
-rw-r--r--drivers/iommu/irq_remapping.c23
-rw-r--r--drivers/iommu/irq_remapping.h5
-rw-r--r--drivers/iommu/mtk_iommu.c49
-rw-r--r--drivers/iommu/mtk_iommu.h1
-rw-r--r--drivers/iommu/sun50i-iommu.c15
-rw-r--r--drivers/iommu/tegra-smmu.c138
-rw-r--r--drivers/irqchip/Kconfig35
-rw-r--r--drivers/irqchip/Makefile5
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c262
-rw-r--r--drivers/irqchip/irq-bcm2836.c153
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c81
-rw-r--r--drivers/irqchip/irq-gic-common.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c15
-rw-r--r--drivers/irqchip/irq-gic-v3.c184
-rw-r--r--drivers/irqchip/irq-gic.c247
-rw-r--r--drivers/irqchip/irq-hip04.c89
-rw-r--r--drivers/irqchip/irq-imx-intmux.c9
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c9
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c4
-rw-r--r--drivers/irqchip/irq-mst-intc.c199
-rw-r--r--drivers/irqchip/irq-owl-sirq.c359
-rw-r--r--drivers/irqchip/irq-pruss-intc.c664
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c8
-rw-r--r--drivers/irqchip/irq-s3c24xx.c1330
-rw-r--r--drivers/irqchip/irq-sifive-plic.c10
-rw-r--r--drivers/irqchip/irq-sl28cpld.c96
-rw-r--r--drivers/irqchip/irq-stm32-exti.c4
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c97
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c14
-rw-r--r--drivers/irqchip/irqchip.c2
-rw-r--r--drivers/irqchip/qcom-pdc.c14
-rw-r--r--drivers/leds/Kconfig31
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/TODO75
-rw-r--r--drivers/leds/led-class.c5
-rw-r--r--drivers/leds/leds-88pm860x.c6
-rw-r--r--drivers/leds/leds-aat1290.c2
-rw-r--r--drivers/leds/leds-acer-a500.c129
-rw-r--r--drivers/leds/leds-an30259a.c7
-rw-r--r--drivers/leds/leds-aw2013.c11
-rw-r--r--drivers/leds/leds-bcm6328.c11
-rw-r--r--drivers/leds/leds-bcm6358.c11
-rw-r--r--drivers/leds/leds-cpcap.c7
-rw-r--r--drivers/leds/leds-cr0014114.c3
-rw-r--r--drivers/leds/leds-el15203000.c3
-rw-r--r--drivers/leds/leds-gpio.c3
-rw-r--r--drivers/leds/leds-ip30.c1
-rw-r--r--drivers/leds/leds-is31fl319x.c32
-rw-r--r--drivers/leds/leds-is31fl32xx.c33
-rw-r--r--drivers/leds/leds-ktd2692.c4
-rw-r--r--drivers/leds/leds-lm3532.c65
-rw-r--r--drivers/leds/leds-lm36274.c133
-rw-r--r--drivers/leds/leds-lm3692x.c14
-rw-r--r--drivers/leds/leds-lm3697.c100
-rw-r--r--drivers/leds/leds-lp50xx.c631
-rw-r--r--drivers/leds/leds-lp5521.c2
-rw-r--r--drivers/leds/leds-lp5523.c2
-rw-r--r--drivers/leds/leds-lp5562.c2
-rw-r--r--drivers/leds/leds-lp55xx-common.c14
-rw-r--r--drivers/leds/leds-lp8501.c2
-rw-r--r--drivers/leds/leds-lp8860.c6
-rw-r--r--drivers/leds/leds-lt3593.c6
-rw-r--r--drivers/leds/leds-max77650.c24
-rw-r--r--drivers/leds/leds-max77693.c2
-rw-r--r--drivers/leds/leds-mc13783.c8
-rw-r--r--drivers/leds/leds-mt6323.c38
-rw-r--r--drivers/leds/leds-netxbig.c6
-rw-r--r--drivers/leds/leds-ns2.c346
-rw-r--r--drivers/leds/leds-pca9532.c24
-rw-r--r--drivers/leds/leds-pca955x.c8
-rw-r--r--drivers/leds/leds-pca963x.c399
-rw-r--r--drivers/leds/leds-pm8058.c33
-rw-r--r--drivers/leds/leds-powernv.c2
-rw-r--r--drivers/leds/leds-pwm.c49
-rw-r--r--drivers/leds/leds-s3c24xx.c2
-rw-r--r--drivers/leds/leds-sc27xx-bltc.c6
-rw-r--r--drivers/leds/leds-sgm3140.c29
-rw-r--r--drivers/leds/leds-spi-byte.c11
-rw-r--r--drivers/leds/leds-syscon.c13
-rw-r--r--drivers/leds/leds-tca6507.c116
-rw-r--r--drivers/leds/leds-tlc591xx.c24
-rw-r--r--drivers/leds/leds-turris-omnia.c8
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c13
-rw-r--r--drivers/lightnvm/core.c5
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c3
-rw-r--r--drivers/macintosh/windfarm_smu_sensors.c3
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/arm_mhu.c3
-rw-r--r--drivers/mailbox/arm_mhu_db.c354
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c6
-rw-r--r--drivers/mailbox/mailbox.c12
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c8
-rw-r--r--drivers/mailbox/tegra-hsp.c9
-rw-r--r--drivers/md/bcache/alloc.c60
-rw-r--r--drivers/md/bcache/bcache.h29
-rw-r--r--drivers/md/bcache/btree.c146
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/closure.c16
-rw-r--r--drivers/md/bcache/debug.c10
-rw-r--r--drivers/md/bcache/extents.c6
-rw-r--r--drivers/md/bcache/features.c4
-rw-r--r--drivers/md/bcache/io.c2
-rw-r--r--drivers/md/bcache/journal.c246
-rw-r--r--drivers/md/bcache/movinggc.c58
-rw-r--r--drivers/md/bcache/request.c16
-rw-r--r--drivers/md/bcache/super.c249
-rw-r--r--drivers/md/bcache/sysfs.c10
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-core.h56
-rw-r--r--drivers/md/dm-crypt.c17
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-linear.c5
-rw-r--r--drivers/md/dm-mpath.c16
-rw-r--r--drivers/md/dm-raid.c11
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-snap-persistent.c11
-rw-r--r--drivers/md/dm-table.c127
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/md/dm-writecache.c15
-rw-r--r--drivers/md/dm.c428
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/md/md-bitmap.c16
-rw-r--r--drivers/md/md-cluster.c7
-rw-r--r--drivers/md/md-linear.c2
-rw-r--r--drivers/md/md.c51
-rw-r--r--drivers/md/md.h6
-rw-r--r--drivers/md/persistent-data/dm-btree.c3
-rw-r--r--drivers/md/raid0.c47
-rw-r--r--drivers/md/raid10.c477
-rw-r--r--drivers/md/raid10.h1
-rw-r--r--drivers/md/raid5.c309
-rw-r--r--drivers/md/raid5.h29
-rw-r--r--drivers/media/cec/core/cec-adap.c8
-rw-r--r--drivers/media/cec/core/cec-core.c31
-rw-r--r--drivers/media/cec/core/cec-pin.c6
-rw-r--r--drivers/media/cec/platform/seco/seco-cec.c2
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_core.c2
-rw-r--r--drivers/media/common/siano/sms-cards.c2
-rw-r--r--drivers/media/common/siano/smsir.c4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c34
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c32
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c53
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c12
-rw-r--r--drivers/media/dvb-frontends/af9013.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c3
-rw-r--r--drivers/media/dvb-frontends/lg2160.c2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c7
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c1
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c1
-rw-r--r--drivers/media/dvb-frontends/tda10021.c40
-rw-r--r--drivers/media/dvb-frontends/tda10086.c22
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c41
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd_maps.h22
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.h6
-rw-r--r--drivers/media/firewire/firedtv-fw.c6
-rw-r--r--drivers/media/i2c/Kconfig2
-rw-r--r--drivers/media/i2c/adv7180.c9
-rw-r--r--drivers/media/i2c/adv748x/adv748x-core.c31
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c31
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h1
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c4
-rw-r--r--drivers/media/i2c/cx25840/cx25840-ir.c2
-rw-r--r--drivers/media/i2c/dw9807-vcm.c2
-rw-r--r--drivers/media/i2c/imx219.c2
-rw-r--r--drivers/media/i2c/imx258.c2
-rw-r--r--drivers/media/i2c/imx274.c8
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c3
-rw-r--r--drivers/media/i2c/max9286.c43
-rw-r--r--drivers/media/i2c/ml86v7667.c7
-rw-r--r--drivers/media/i2c/msp3400-kthreads.c2
-rw-r--r--drivers/media/i2c/mt9m001.c7
-rw-r--r--drivers/media/i2c/mt9m111.c7
-rw-r--r--drivers/media/i2c/ov2740.c24
-rw-r--r--drivers/media/i2c/ov5640.c340
-rw-r--r--drivers/media/i2c/ov5675.c15
-rw-r--r--drivers/media/i2c/ov6650.c57
-rw-r--r--drivers/media/i2c/ov7740.c10
-rw-r--r--drivers/media/i2c/ov8856.c430
-rw-r--r--drivers/media/i2c/ov9640.c9
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c2
-rw-r--r--drivers/media/i2c/s5k5baf.c5
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c6
-rw-r--r--drivers/media/i2c/tc358743.c21
-rw-r--r--drivers/media/i2c/tda1997x.c16
-rw-r--r--drivers/media/i2c/tvp5150.c9
-rw-r--r--drivers/media/i2c/tvp7002.c4
-rw-r--r--drivers/media/mc/mc-device.c7
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c15
-rw-r--r--drivers/media/pci/bt8xx/dvb-bt8xx.c7
-rw-r--r--drivers/media/pci/cobalt/cobalt-i2c.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-omnitek.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c11
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885.h4
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c15
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c19
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c2
-rw-r--r--drivers/media/pci/cx88/cx88-input.c4
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/dt3155/dt3155.c3
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c159
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h4
-rw-r--r--drivers/media/pci/mantis/mantis_dma.c4
-rw-r--r--drivers/media/pci/mantis/mantis_dma.h2
-rw-r--r--drivers/media/pci/mantis/mantis_dvb.c2
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c2
-rw-r--r--drivers/media/pci/ngene/ngene-core.c12
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c11
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c47
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-go7007.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-tvaudio.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134.h2
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c8
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c12
-rw-r--r--drivers/media/pci/saa7164/saa7164-dvb.c7
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c6
-rw-r--r--drivers/media/pci/smipcie/smipcie-ir.c7
-rw-r--r--drivers/media/pci/smipcie/smipcie-main.c6
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-i2c.c2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c6
-rw-r--r--drivers/media/pci/ttpci/av7110.c20
-rw-r--r--drivers/media/pci/ttpci/av7110_v4l.c4
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c15
-rw-r--r--drivers/media/pci/ttpci/budget-core.c6
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c19
-rw-r--r--drivers/media/platform/Kconfig9
-rw-r--r--drivers/media/platform/aspeed-video.c5
-rw-r--r--drivers/media/platform/coda/coda-bit.c4
-rw-r--r--drivers/media/platform/coda/coda-common.c17
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c27
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c67
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.h5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c18
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-reg.c9
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c89
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.h11
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c4
-rw-r--r--drivers/media/platform/fsl-viu.c2
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c10
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c40
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c133
-rw-r--r--drivers/media/platform/mtk-jpeg/Makefile5
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c930
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h106
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.c (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.c)10
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.h (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.h)12
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.c (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c)2
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.h (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.h)2
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_reg.h (renamed from drivers/media/platform/mtk-jpeg/mtk_jpeg_reg.h)19
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.c154
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.h91
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/Makefile6
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c11
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c62
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h40
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c211
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c180
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c231
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h38
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c3
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_base.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c12
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h11
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c68
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c11
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_if.h13
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_ipi_msg.h27
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.c141
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.h8
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c5
-rw-r--r--drivers/media/platform/mx2_emmaprp.c7
-rw-r--r--drivers/media/platform/omap3isp/isp.c8
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c2
-rw-r--r--drivers/media/platform/pxa_camera.c195
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c8
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h2
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c124
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.h2
-rw-r--r--drivers/media/platform/qcom/camss/camss.c5
-rw-r--r--drivers/media/platform/qcom/venus/Makefile2
-rw-r--r--drivers/media/platform/qcom/venus/core.c56
-rw-r--r--drivers/media/platform/qcom/venus/core.h34
-rw-r--r--drivers/media/platform/qcom/venus/dbgfs.c19
-rw-r--r--drivers/media/platform/qcom/venus/dbgfs.h12
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c19
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c241
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c5
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h1
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c37
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h28
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c18
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c3
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c72
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.h1
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c94
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c26
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c31
-rw-r--r--drivers/media/platform/qcom/venus/venc.c199
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c35
-rw-r--r--drivers/media/platform/rcar-fcp.c8
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c71
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c106
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c49
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c31
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h5
-rw-r--r--drivers/media/platform/rcar_drif.c30
-rw-r--r--drivers/media/platform/renesas-ceu.c4
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c1
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c5
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c7
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c4
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-debug.c29
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c10
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp.h2
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c10
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c4
-rw-r--r--drivers/media/platform/sti/hva/hva-debugfs.c22
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c4
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c4
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c10
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c6
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c11
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c7
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c5
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c8
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c11
-rw-r--r--drivers/media/radio/radio-si476x.c66
-rw-r--r--drivers/media/radio/si4713/si4713.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c28
-rw-r--r--drivers/media/rc/ati_remote.c4
-rw-r--r--drivers/media/rc/ene_ir.c18
-rw-r--r--drivers/media/rc/fintek-cir.c8
-rw-r--r--drivers/media/rc/gpio-ir-recv.c53
-rw-r--r--drivers/media/rc/igorplugusb.c6
-rw-r--r--drivers/media/rc/iguanair.c6
-rw-r--r--drivers/media/rc/imon_raw.c2
-rw-r--r--drivers/media/rc/ir-hix5hd2.c8
-rw-r--r--drivers/media/rc/ir-imon-decoder.c10
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c6
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c8
-rw-r--r--drivers/media/rc/ir-nec-decoder.c6
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c6
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c6
-rw-r--r--drivers/media/rc/ir-rcmm-decoder.c18
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c6
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c6
-rw-r--r--drivers/media/rc/ir-sony-decoder.c6
-rw-r--r--drivers/media/rc/ir-xmp-decoder.c15
-rw-r--r--drivers/media/rc/ir_toy.c14
-rw-r--r--drivers/media/rc/ite-cir.c10
-rw-r--r--drivers/media/rc/ite-cir.h4
-rw-r--r--drivers/media/rc/lirc_dev.c95
-rw-r--r--drivers/media/rc/mceusb.c12
-rw-r--r--drivers/media/rc/meson-ir.c4
-rw-r--r--drivers/media/rc/mtk-cir.c4
-rw-r--r--drivers/media/rc/nuvoton-cir.c10
-rw-r--r--drivers/media/rc/nuvoton-cir.h2
-rw-r--r--drivers/media/rc/rc-core-priv.h21
-rw-r--r--drivers/media/rc/rc-ir-raw.c16
-rw-r--r--drivers/media/rc/rc-loopback.c8
-rw-r--r--drivers/media/rc/rc-main.c10
-rw-r--r--drivers/media/rc/redrat3.c17
-rw-r--r--drivers/media/rc/serial_ir.c12
-rw-r--r--drivers/media/rc/sir_ir.c2
-rw-r--r--drivers/media/rc/st_rc.c6
-rw-r--r--drivers/media/rc/streamzap.c10
-rw-r--r--drivers/media/rc/sunxi-cir.c4
-rw-r--r--drivers/media/rc/ttusbir.c18
-rw-r--r--drivers/media/rc/winbond-cir.c10
-rw-r--r--drivers/media/rc/xbox_remote.c2
-rw-r--r--drivers/media/test-drivers/Kconfig16
-rw-r--r--drivers/media/test-drivers/Makefile1
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c6
-rw-r--r--drivers/media/test-drivers/vidtv/Kconfig11
-rw-r--r--drivers/media/test-drivers/vidtv/Makefile9
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c566
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.h63
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.c310
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.h76
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_common.c89
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_common.h33
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_demod.c464
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_demod.h69
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_encoder.h166
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_mux.c474
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_mux.h167
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_pes.c438
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_pes.h191
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_psi.c1322
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_psi.h577
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_s302m.c502
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_s302m.h92
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_ts.c137
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_ts.h108
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_tuner.c438
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_tuner.h43
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c7
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c674
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-out.c9
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-gen.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c38
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.c25
-rw-r--r--drivers/media/tuners/fc0011.c2
-rw-r--r--drivers/media/tuners/qt1010.c25
-rw-r--r--drivers/media/tuners/tda18271-fe.c2
-rw-r--r--drivers/media/tuners/tuner-simple.c5
-rw-r--r--drivers/media/usb/au0828/au0828-input.c8
-rw-r--r--drivers/media/usb/au0828/au0828-video.c12
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c9
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c16
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c4
-rw-r--r--drivers/media/usb/dvb-usb/Kconfig1
-rw-r--r--drivers/media/usb/dvb-usb/cxusb-analog.c13
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c11
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c26
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c8
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c22
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/go7007/go7007-driver.c2
-rw-r--r--drivers/media/usb/gspca/mr97310a.c10
-rw-r--r--drivers/media/usb/gspca/nw80x.c2
-rw-r--r--drivers/media/usb/gspca/ov519.c6
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c2
-rw-r--r--drivers/media/usb/gspca/sunplus.c4
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c4
-rw-r--r--drivers/media/usb/gspca/zc3xx.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c5
-rw-r--r--drivers/media/usb/pwc/pwc-v4l.c2
-rw-r--r--drivers/media/usb/siano/smsusb.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c8
-rw-r--r--drivers/media/usb/tm6000/tm6000-core.c24
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c197
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c7
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c3
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c4
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c46
-rw-r--r--drivers/media/usb/uvc/uvc_debugfs.c20
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c71
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c35
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c34
-rw-r--r--drivers/media/usb/uvc/uvc_video.c6
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h8
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c61
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-h264.c12
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c68
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c16
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c22
-rw-r--r--drivers/memory/Kconfig52
-rw-r--r--drivers/memory/Makefile2
-rw-r--r--drivers/memory/brcmstb_dpfe.c46
-rw-r--r--drivers/memory/emif.c55
-rw-r--r--drivers/memory/fsl-corenet-cf.c6
-rw-r--r--drivers/memory/mtk-smi.c23
-rw-r--r--drivers/memory/omap-gpmc.c272
-rw-r--r--drivers/memory/renesas-rpc-if.c4
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c114
-rw-r--r--drivers/memory/tegra/tegra124-emc.c14
-rw-r--r--drivers/memory/tegra/tegra124.c1
-rw-r--r--drivers/memory/tegra/tegra186-emc.c10
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c3
-rw-r--r--drivers/memory/tegra/tegra210.c4
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/message/fusion/mptctl.c5
-rw-r--r--drivers/message/fusion/mptfc.c6
-rw-r--r--drivers/message/fusion/mptscsih.c16
-rw-r--r--drivers/mfd/Kconfig52
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/dm355evm_msp.c76
-rw-r--r--drivers/mfd/ene-kb3930.c212
-rw-r--r--drivers/mfd/intel-lpss-pci.c4
-rw-r--r--drivers/mfd/intel-m10-bmc.c164
-rw-r--r--drivers/mfd/kempld-core.c117
-rw-r--r--drivers/mfd/khadas-mcu.c2
-rw-r--r--drivers/mfd/lp87565.c4
-rw-r--r--drivers/mfd/madera-core.c11
-rw-r--r--drivers/mfd/mt6360-core.c1
-rw-r--r--drivers/mfd/rn5t618.c1
-rw-r--r--drivers/mfd/simple-mfd-i2c.c57
-rw-r--r--drivers/mfd/sm501.c8
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c28
-rw-r--r--drivers/mfd/stmfx.c8
-rw-r--r--drivers/mfd/syscon.c2
-rw-r--r--drivers/mfd/wcd934x.c9
-rw-r--r--drivers/misc/Kconfig11
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/cardreader/rts5227.c117
-rw-r--r--drivers/misc/cardreader/rts5228.c5
-rw-r--r--drivers/misc/cardreader/rts5249.c162
-rw-r--r--drivers/misc/cardreader/rts5260.c44
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c24
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h17
-rw-r--r--drivers/misc/cxl/pci.c4
-rw-r--r--drivers/misc/eeprom/at24.c71
-rw-r--r--drivers/misc/eeprom/at25.c5
-rw-r--r--drivers/misc/eeprom/ee1004.c13
-rw-r--r--drivers/misc/eeprom/eeprom.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c1
-rw-r--r--drivers/misc/fastrpc.c20
-rw-r--r--drivers/misc/habanalabs/Kconfig1
-rw-r--r--drivers/misc/habanalabs/common/Makefile4
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c234
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c107
-rw-r--r--drivers/misc/habanalabs/common/context.c38
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c92
-rw-r--r--drivers/misc/habanalabs/common/device.c31
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c229
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h214
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c76
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c105
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c29
-rw-r--r--drivers/misc/habanalabs/common/hwmon.c60
-rw-r--r--drivers/misc/habanalabs/common/irq.c19
-rw-r--r--drivers/misc/habanalabs/common/memory.c90
-rw-r--r--drivers/misc/habanalabs/common/mmu.c812
-rw-r--r--drivers/misc/habanalabs/common/mmu_v1.c863
-rw-r--r--drivers/misc/habanalabs/common/pci.c17
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c60
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c292
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h61
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c5
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c12351
-rw-r--r--drivers/misc/habanalabs/goya/goya.c115
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h4
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h (renamed from drivers/misc/habanalabs/include/common/armcp_if.h)298
-rw-r--r--drivers/misc/habanalabs/include/common/qman_if.h2
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi.h2
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h274
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_reg_map.h1
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h2
-rw-r--r--drivers/misc/hisi_hikey_usb.c273
-rw-r--r--drivers/misc/kgdbts.c48
-rw-r--r--drivers/misc/lkdtm/bugs.c10
-rw-r--r--drivers/misc/lkdtm/core.c2
-rw-r--r--drivers/misc/lkdtm/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm/usercopy.c15
-rw-r--r--drivers/misc/mei/Kconfig10
-rw-r--r--drivers/misc/mei/Makefile3
-rw-r--r--drivers/misc/mei/bus-fixup.c12
-rw-r--r--drivers/misc/mei/bus.c89
-rw-r--r--drivers/misc/mei/client.c423
-rw-r--r--drivers/misc/mei/client.h26
-rw-r--r--drivers/misc/mei/debugfs.c9
-rw-r--r--drivers/misc/mei/hbm.c101
-rw-r--r--drivers/misc/mei/hbm.h2
-rw-r--r--drivers/misc/mei/hw-virtio.c874
-rw-r--r--drivers/misc/mei/hw.h152
-rw-r--r--drivers/misc/mei/interrupt.c113
-rw-r--r--drivers/misc/mei/main.c284
-rw-r--r--drivers/misc/mei/mei_dev.h34
-rw-r--r--drivers/misc/mic/Kconfig140
-rw-r--r--drivers/misc/mic/Makefile12
-rw-r--r--drivers/misc/mic/bus/Makefile9
-rw-r--r--drivers/misc/mic/bus/cosm_bus.c130
-rw-r--r--drivers/misc/mic/bus/cosm_bus.h125
-rw-r--r--drivers/misc/mic/bus/mic_bus.c193
-rw-r--r--drivers/misc/mic/bus/scif_bus.c201
-rw-r--r--drivers/misc/mic/bus/scif_bus.h125
-rw-r--r--drivers/misc/mic/bus/vop_bus.c194
-rw-r--r--drivers/misc/mic/bus/vop_bus.h129
-rw-r--r--drivers/misc/mic/card/Makefile11
-rw-r--r--drivers/misc/mic/card/mic_debugfs.c85
-rw-r--r--drivers/misc/mic/card/mic_device.c417
-rw-r--r--drivers/misc/mic/card/mic_device.h137
-rw-r--r--drivers/misc/mic/card/mic_x100.c347
-rw-r--r--drivers/misc/mic/card/mic_x100.h37
-rw-r--r--drivers/misc/mic/common/mic_dev.h55
-rw-r--r--drivers/misc/mic/cosm/Makefile11
-rw-r--r--drivers/misc/mic/cosm/cosm_debugfs.c116
-rw-r--r--drivers/misc/mic/cosm/cosm_main.c382
-rw-r--r--drivers/misc/mic/cosm/cosm_main.h61
-rw-r--r--drivers/misc/mic/cosm/cosm_scif_server.c399
-rw-r--r--drivers/misc/mic/cosm/cosm_sysfs.c449
-rw-r--r--drivers/misc/mic/cosm_client/Makefile8
-rw-r--r--drivers/misc/mic/cosm_client/cosm_scif_client.c269
-rw-r--r--drivers/misc/mic/host/Makefile12
-rw-r--r--drivers/misc/mic/host/mic_boot.c587
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c149
-rw-r--r--drivers/misc/mic/host/mic_device.h157
-rw-r--r--drivers/misc/mic/host/mic_intr.c635
-rw-r--r--drivers/misc/mic/host/mic_intr.h137
-rw-r--r--drivers/misc/mic/host/mic_main.c335
-rw-r--r--drivers/misc/mic/host/mic_smpt.c427
-rw-r--r--drivers/misc/mic/host/mic_smpt.h87
-rw-r--r--drivers/misc/mic/host/mic_x100.c585
-rw-r--r--drivers/misc/mic/host/mic_x100.h77
-rw-r--r--drivers/misc/mic/scif/Makefile21
-rw-r--r--drivers/misc/mic/scif/scif_api.c1485
-rw-r--r--drivers/misc/mic/scif/scif_debugfs.c116
-rw-r--r--drivers/misc/mic/scif/scif_dma.c1940
-rw-r--r--drivers/misc/mic/scif/scif_epd.c357
-rw-r--r--drivers/misc/mic/scif/scif_epd.h200
-rw-r--r--drivers/misc/mic/scif/scif_fd.c462
-rw-r--r--drivers/misc/mic/scif/scif_fence.c783
-rw-r--r--drivers/misc/mic/scif/scif_main.c351
-rw-r--r--drivers/misc/mic/scif/scif_main.h274
-rw-r--r--drivers/misc/mic/scif/scif_map.h127
-rw-r--r--drivers/misc/mic/scif/scif_mmap.c690
-rw-r--r--drivers/misc/mic/scif/scif_nm.c229
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c1349
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.h221
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.c175
-rw-r--r--drivers/misc/mic/scif/scif_peer_bus.h23
-rw-r--r--drivers/misc/mic/scif/scif_ports.c116
-rw-r--r--drivers/misc/mic/scif/scif_rb.c240
-rw-r--r--drivers/misc/mic/scif/scif_rb.h100
-rw-r--r--drivers/misc/mic/scif/scif_rma.c1760
-rw-r--r--drivers/misc/mic/scif/scif_rma.h477
-rw-r--r--drivers/misc/mic/scif/scif_rma_list.c282
-rw-r--r--drivers/misc/mic/scif/scif_rma_list.h48
-rw-r--r--drivers/misc/mic/vop/Makefile10
-rw-r--r--drivers/misc/mic/vop/vop_debugfs.c184
-rw-r--r--drivers/misc/mic/vop/vop_main.c783
-rw-r--r--drivers/misc/mic/vop/vop_main.h158
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c1158
-rw-r--r--drivers/misc/ocxl/Kconfig3
-rw-r--r--drivers/misc/ocxl/afu_irq.c12
-rw-r--r--drivers/misc/ocxl/core.c7
-rw-r--r--drivers/misc/ocxl/link.c15
-rw-r--r--drivers/misc/pci_endpoint_test.c17
-rw-r--r--drivers/misc/pvpanic.c8
-rw-r--r--drivers/misc/sgi-gru/grufile.c3
-rw-r--r--drivers/misc/sgi-xp/xp.h8
-rw-r--r--drivers/misc/sgi-xp/xp_main.c5
-rw-r--r--drivers/misc/sgi-xp/xp_uv.c7
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c7
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c3
-rw-r--r--drivers/misc/sgi-xp/xpnet.c3
-rw-r--r--drivers/misc/uacce/uacce.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c10
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/mmc/core/bus.c12
-rw-r--r--drivers/mmc/core/core.c10
-rw-r--r--drivers/mmc/core/host.c26
-rw-r--r--drivers/mmc/core/mmc.c22
-rw-r--r--drivers/mmc/core/mmc_test.c8
-rw-r--r--drivers/mmc/core/queue.c3
-rw-r--r--drivers/mmc/core/sd.c38
-rw-r--r--drivers/mmc/core/sdio.c24
-rw-r--r--drivers/mmc/core/sdio_bus.c54
-rw-r--r--drivers/mmc/core/sdio_cis.c11
-rw-r--r--drivers/mmc/host/Kconfig53
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/alcor.c1
-rw-r--r--drivers/mmc/host/android-goldfish.c1
-rw-r--r--drivers/mmc/host/atmel-mci.c1
-rw-r--r--drivers/mmc/host/au1xmmc.c1
-rw-r--r--drivers/mmc/host/bcm2835.c5
-rw-r--r--drivers/mmc/host/cavium-octeon.c1
-rw-r--r--drivers/mmc/host/cqhci.c6
-rw-r--r--drivers/mmc/host/cqhci.h2
-rw-r--r--drivers/mmc/host/davinci_mmc.c8
-rw-r--r--drivers/mmc/host/dw_mmc-bluefield.c1
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c1
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c1
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c1
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c1
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c1
-rw-r--r--drivers/mmc/host/dw_mmc-zx.c12
-rw-r--r--drivers/mmc/host/dw_mmc.c9
-rw-r--r--drivers/mmc/host/jz4740_mmc.c5
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c19
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c1
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c1
-rw-r--r--drivers/mmc/host/mmc_spi.c4
-rw-r--r--drivers/mmc/host/moxart-mmc.c24
-rw-r--r--drivers/mmc/host/mtk-sd.c68
-rw-r--r--drivers/mmc/host/mvsdio.c1
-rw-r--r--drivers/mmc/host/mxcmmc.c1
-rw-r--r--drivers/mmc/host/mxs-mmc.c1
-rw-r--r--drivers/mmc/host/omap.c1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c19
-rw-r--r--drivers/mmc/host/owl-mmc.c1
-rw-r--r--drivers/mmc/host/pxamci.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi.h6
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c223
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c5
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c1
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c1
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c8
-rw-r--r--drivers/mmc/host/s3cmci.c86
-rw-r--r--drivers/mmc/host/sdhci-acpi.c38
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c1
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c13
-rw-r--r--drivers/mmc/host/sdhci-cadence.c1
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c1
-rw-r--r--drivers/mmc/host/sdhci-dove.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c17
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c1
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-iproc.c2
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c1
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c8
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c2
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c1
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c1
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c49
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c1
-rw-r--r--drivers/mmc/host/sdhci-of-sparx5.c270
-rw-r--r--drivers/mmc/host/sdhci-omap.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c154
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c150
-rw-r--r--drivers/mmc/host/sdhci-pic32.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c1
-rw-r--r--drivers/mmc/host/sdhci-s3c.c3
-rw-r--r--drivers/mmc/host/sdhci-sirf.c1
-rw-r--r--drivers/mmc/host/sdhci-spear.c1
-rw-r--r--drivers/mmc/host/sdhci-sprd.c5
-rw-r--r--drivers/mmc/host/sdhci-st.c1
-rw-r--r--drivers/mmc/host/sdhci-tegra.c8
-rw-r--r--drivers/mmc/host/sdhci-xenon.c1
-rw-r--r--drivers/mmc/host/sdhci.c6
-rw-r--r--drivers/mmc/host/sdhci_am654.c207
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c1
-rw-r--r--drivers/mmc/host/sh_mmcif.c1
-rw-r--r--drivers/mmc/host/sunxi-mmc.c1
-rw-r--r--drivers/mmc/host/tmio_mmc.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.h8
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c53
-rw-r--r--drivers/mmc/host/uniphier-sd.c6
-rw-r--r--drivers/mmc/host/usdhi6rol0.c1
-rw-r--r--drivers/mmc/host/via-sdmmc.c3
-rw-r--r--drivers/mmc/host/wbsd.c1
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c1
-rw-r--r--drivers/most/Kconfig9
-rw-r--r--drivers/most/Makefile1
-rw-r--r--drivers/most/most_cdev.c (renamed from drivers/staging/most/cdev/cdev.c)0
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/devices/lart.c10
-rw-r--r--drivers/mtd/devices/spear_smi.c4
-rw-r--r--drivers/mtd/hyperbus/Kconfig7
-rw-r--r--drivers/mtd/hyperbus/Makefile1
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c144
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c170
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c35
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c28
-rw-r--r--drivers/mtd/maps/Kconfig11
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/physmap-bt1-rom.c126
-rw-r--r--drivers/mtd/maps/physmap-bt1-rom.h17
-rw-r--r--drivers/mtd/maps/physmap-core.c8
-rw-r--r--drivers/mtd/maps/vmu-flash.c11
-rw-r--r--drivers/mtd/mtdconcat.c43
-rw-r--r--drivers/mtd/mtdcore.c30
-rw-r--r--drivers/mtd/mtdoops.c11
-rw-r--r--drivers/mtd/nand/Kconfig8
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/ecc.c484
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c9
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c5
-rw-r--r--drivers/mtd/nand/raw/Kconfig1
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c6
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c16
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c457
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c4
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c3
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c28
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c12
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c3
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c2
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c38
-rw-r--r--drivers/mtd/nand/raw/denali.c3
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c3
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c20
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c49
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c4
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c14
-rw-r--r--drivers/mtd/nand/raw/gpio.c4
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c21
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c6
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c20
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c3
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c101
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c4
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c12
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c36
-rw-r--r--drivers/mtd/nand/raw/nand_base.c554
-rw-r--r--drivers/mtd/nand/raw/nand_bch.c1
-rw-r--r--drivers/mtd/nand/raw/nand_esmt.c15
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c44
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c9
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c23
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c17
-rw-r--r--drivers/mtd/nand/raw/nand_samsung.c22
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c19
-rw-r--r--drivers/mtd/nand/raw/nandsim.c8
-rw-r--r--drivers/mtd/nand/raw/ndfc.c2
-rw-r--r--drivers/mtd/nand/raw/omap2.c22
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c4
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c3
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c6
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c26
-rw-r--r--drivers/mtd/nand/raw/r852.c3
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c20
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c6
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c2
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c5
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c24
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c27
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c4
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c37
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c2
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c2
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c17
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c4
-rw-r--r--drivers/mtd/nand/spi/core.c12
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c63
-rw-r--r--drivers/mtd/nand/spi/macronix.c27
-rw-r--r--drivers/mtd/nand/spi/toshiba.c6
-rw-r--r--drivers/mtd/parsers/Kconfig2
-rw-r--r--drivers/mtd/spi-nor/controllers/intel-spi-pci.c1
-rw-r--r--drivers/mtd/spi-nor/core.c13
-rw-r--r--drivers/mtd/spi-nor/macronix.c2
-rw-r--r--drivers/mtd/spi-nor/winbond.c9
-rw-r--r--drivers/mtd/ubi/wl.c13
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/bareudp.c11
-rw-r--r--drivers/net/caif/Kconfig19
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_hsi.c19
-rw-r--r--drivers/net/caif/caif_spi.c874
-rw-r--r--drivers/net/caif/caif_spi_slave.c254
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/Kconfig4
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/c_can/c_can.c9
-rw-r--r--drivers/net/can/c_can/c_can.h4
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/cc770/cc770.h2
-rw-r--r--drivers/net/can/dev.c72
-rw-r--r--drivers/net/can/flexcan.c616
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/m_can/Kconfig2
-rw-r--r--drivers/net/can/m_can/m_can_platform.c2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c29
-rw-r--r--drivers/net/can/pch_can.c67
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c11
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c2
-rw-r--r--drivers/net/can/rx-offload.c15
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/softing/Kconfig6
-rw-r--r--drivers/net/can/softing/softing_fw.c8
-rw-r--r--drivers/net/can/softing/softing_main.c11
-rw-r--r--drivers/net/can/softing/softing_platform.h2
-rw-r--r--drivers/net/can/spi/Kconfig4
-rw-r--r--drivers/net/can/spi/Makefile1
-rw-r--r--drivers/net/can/spi/mcp251x.c345
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig17
-rw-r--r--drivers/net/can/spi/mcp251xfd/Makefile8
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c2927
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c89
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c556
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h835
-rw-r--r--drivers/net/can/ti_hecc.c37
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/gs_usb.c4
-rw-r--r--drivers/net/can/usb/mcba_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c166
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c51
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c52
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/ucan.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c4
-rw-r--r--drivers/net/can/xilinx_can.c22
-rw-r--r--drivers/net/dsa/Kconfig6
-rw-r--r--drivers/net/dsa/b53/b53_common.c99
-rw-r--r--drivers/net/dsa/b53/b53_priv.h5
-rw-r--r--drivers/net/dsa/bcm_sf2.c136
-rw-r--r--drivers/net/dsa/bcm_sf2.h4
-rw-r--r--drivers/net/dsa/dsa_loop.c59
-rw-r--r--drivers/net/dsa/lantiq_gswip.c26
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c6
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c32
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c19
-rw-r--r--drivers/net/dsa/mt7530.c1197
-rw-r--r--drivers/net/dsa/mt7530.h259
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c308
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h18
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c635
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.h21
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c59
-rw-r--r--drivers/net/dsa/ocelot/Kconfig23
-rw-r--r--drivers/net/dsa/ocelot/Makefile6
-rw-r--r--drivers/net/dsa/ocelot/felix.c124
-rw-r--r--drivers/net/dsa/ocelot/felix.h32
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c639
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c286
-rw-r--r--drivers/net/dsa/qca8k.c10
-rw-r--r--drivers/net/dsa/realtek-smi-core.c3
-rw-r--r--drivers/net/dsa/realtek-smi-core.h9
-rw-r--r--drivers/net/dsa/rtl8366.c291
-rw-r--r--drivers/net/dsa/rtl8366rb.c115
-rw-r--r--drivers/net/dsa/sja1105/Makefile1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h20
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c262
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c326
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c5
-rw-r--r--drivers/net/ethernet/3com/typhoon.c64
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c17
-rw-r--r--drivers/net/ethernet/8390/lib8390.c32
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c77
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c6
-rw-r--r--drivers/net/ethernet/alteon/acenic.c9
-rw-r--r--drivers/net/ethernet/alteon/acenic.h3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h128
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c247
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h42
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c84
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c203
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c178
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h40
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h31
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c15
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c19
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c51
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c37
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c13
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c160
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c55
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c66
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c50
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c19
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c40
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c98
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c784
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h163
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c173
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c336
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h397
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c18
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c20
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c7
-rw-r--r--drivers/net/ethernet/cadence/macb.h21
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c83
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c3
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c92
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c363
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c158
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c1
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c76
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/ael1002.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c91
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c73
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c205
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c175
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c143
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c92
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/Kconfig53
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/Makefile4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile8
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c (renamed from drivers/crypto/chelsio/chcr_ipsec.c)225
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h58
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/Makefile5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h (renamed from drivers/crypto/chelsio/chcr_common.h)24
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c (renamed from drivers/crypto/chelsio/chcr_ktls.c)1045
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h (renamed from drivers/crypto/chelsio/chcr_ktls.h)44
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/Makefile (renamed from drivers/crypto/chelsio/chtls/Makefile)0
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h (renamed from drivers/crypto/chelsio/chtls/chtls.h)88
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c (renamed from drivers/crypto/chelsio/chtls/chtls_cm.c)50
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h (renamed from drivers/crypto/chelsio/chtls/chtls_cm.h)0
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c (renamed from drivers/crypto/chelsio/chtls/chtls_hw.c)3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c (renamed from drivers/crypto/chelsio/chtls/chtls_io.c)12
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c (renamed from drivers/crypto/chelsio/chtls/chtls_main.c)2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c115
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c66
-rw-r--r--drivers/net/ethernet/cortina/gemini.c40
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c62
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c44
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c56
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c5
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c65
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c44
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c80
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c81
-rw-r--r--drivers/net/ethernet/dlink/sundance.c21
-rw-r--r--drivers/net/ethernet/dnet.c13
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c5
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c30
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c63
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c309
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c746
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h125
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c98
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c88
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h21
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c79
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h35
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c53
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c26
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c335
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c11
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.h6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c67
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c10
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c14
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c23
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c11
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve.h106
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c315
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h62
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c365
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c300
-rw-r--r--drivers/net/ethernet/google/gve/gve_register.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c34
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h90
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c77
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c352
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h35
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c180
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c105
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c62
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h34
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c176
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.c318
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.h114
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h20
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c27
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c92
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c55
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c37
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c148
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c23
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c25
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c464
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/intel/e100.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c159
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c40
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c56
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c349
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c103
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c233
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c127
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c138
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h80
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c472
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c17
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c66
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c62
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c135
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c49
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c16
-rw-r--r--drivers/net/ethernet/jme.c40
-rw-r--r--drivers/net/ethernet/korina.c3
-rw-r--r--drivers/net/ethernet/marvell/Kconfig7
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c47
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h203
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c878
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c457
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h47
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h541
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c275
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c36
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c41
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c239
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h103
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c98
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c180
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c212
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c112
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig26
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h206
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c112
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h23
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.c104
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.h35
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c780
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h11
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c1253
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h182
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c667
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c769
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c820
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.h19
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c1277
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.h13
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c527
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c)112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c883
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c663
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c944
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c505
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c463
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c911
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c647
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c173
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h239
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c603
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c377
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c163
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c234
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c17
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c76
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c35
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c114
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c565
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c17
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c61
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_s2.h64
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c856
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h99
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c195
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c5
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c63
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c77
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c24
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c91
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c14
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h7
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c12
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c72
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h17
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c73
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c85
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c7
-rw-r--r--drivers/net/ethernet/ni/nixge.c7
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c5
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c14
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c47
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c31
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c91
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h75
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c203
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c206
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h34
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c1103
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h115
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c103
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c48
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c211
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h1
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c259
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c130
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c38
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c17
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c3
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c98
-rw-r--r--drivers/net/ethernet/renesas/ravb.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c65
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c12
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c83
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c17
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c28
-rw-r--r--drivers/net/ethernet/sfc/ef10.c152
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c41
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c23
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c44
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c21
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c15
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h2
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c125
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c47
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c29
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/farch.c33
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c24
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c593
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c605
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.h15
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h131
-rw-r--r--drivers/net/ethernet/sfc/nic.h4
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h47
-rw-r--r--drivers/net/ethernet/sfc/ptp.c12
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/selftest.c18
-rw-r--r--drivers/net/ethernet/sfc/selftest.h4
-rw-r--r--drivers/net/ethernet/sfc/siena.c1
-rw-r--r--drivers/net/ethernet/sfc/tx.c136
-rw-r--r--drivers/net/ethernet/sfc/tx.h26
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c19
-rw-r--r--drivers/net/ethernet/silan/sc92031.c40
-rw-r--r--drivers/net/ethernet/sis/sis900.c8
-rw-r--r--drivers/net/ethernet/smsc/epic100.c71
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c13
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c51
-rw-r--r--drivers/net/ethernet/socionext/netsec.c24
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c196
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c298
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c18
-rw-r--r--drivers/net/ethernet/sun/sungem.c5
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c70
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c10
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c16
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h1
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c43
-rw-r--r--drivers/net/ethernet/ti/cpsw.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c421
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h7
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c42
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c10
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c18
-rw-r--r--drivers/net/ethernet/ti/tlan.c67
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c48
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c40
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c26
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c3
-rw-r--r--drivers/net/fddi/skfp/h/smc.h2
-rw-r--r--drivers/net/geneve.c11
-rw-r--r--drivers/net/gtp.c90
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hippi/rrunner.c117
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c46
-rw-r--r--drivers/net/hyperv/rndis_filter.c13
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c6
-rw-r--r--drivers/net/ipa/gsi.c32
-rw-r--r--drivers/net/ipa/gsi.h1
-rw-r--r--drivers/net/ipa/gsi_reg.h59
-rw-r--r--drivers/net/ipa/gsi_trans.c22
-rw-r--r--drivers/net/ipa/ipa.h17
-rw-r--r--drivers/net/ipa/ipa_clock.c28
-rw-r--r--drivers/net/ipa/ipa_endpoint.c53
-rw-r--r--drivers/net/ipa/ipa_interrupt.c14
-rw-r--r--drivers/net/ipa/ipa_main.c72
-rw-r--r--drivers/net/ipa/ipa_reg.h2
-rw-r--r--drivers/net/ipa/ipa_uc.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c8
-rw-r--r--drivers/net/macsec.c30
-rw-r--r--drivers/net/mdio/Kconfig251
-rw-r--r--drivers/net/mdio/Makefile29
-rw-r--r--drivers/net/mdio/mdio-aspeed.c (renamed from drivers/net/phy/mdio-aspeed.c)0
-rw-r--r--drivers/net/mdio/mdio-bcm-iproc.c (renamed from drivers/net/phy/mdio-bcm-iproc.c)0
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c (renamed from drivers/net/phy/mdio-bcm-unimac.c)0
-rw-r--r--drivers/net/mdio/mdio-bitbang.c (renamed from drivers/net/phy/mdio-bitbang.c)0
-rw-r--r--drivers/net/mdio/mdio-cavium.c (renamed from drivers/net/phy/mdio-cavium.c)0
-rw-r--r--drivers/net/mdio/mdio-cavium.h (renamed from drivers/net/phy/mdio-cavium.h)0
-rw-r--r--drivers/net/mdio/mdio-gpio.c (renamed from drivers/net/phy/mdio-gpio.c)0
-rw-r--r--drivers/net/mdio/mdio-hisi-femac.c (renamed from drivers/net/phy/mdio-hisi-femac.c)0
-rw-r--r--drivers/net/mdio/mdio-i2c.c (renamed from drivers/net/phy/mdio-i2c.c)3
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c (renamed from drivers/net/phy/mdio-ipq4019.c)109
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c (renamed from drivers/net/phy/mdio-ipq8064.c)0
-rw-r--r--drivers/net/mdio/mdio-moxart.c (renamed from drivers/net/phy/mdio-moxart.c)0
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c (renamed from drivers/net/phy/mdio-mscc-miim.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c (renamed from drivers/net/phy/mdio-mux-bcm-iproc.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c (renamed from drivers/net/phy/mdio-mux-gpio.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c (renamed from drivers/net/phy/mdio-mux-meson-g12a.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c (renamed from drivers/net/phy/mdio-mux-mmioreg.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c (renamed from drivers/net/phy/mdio-mux-multiplexer.c)0
-rw-r--r--drivers/net/mdio/mdio-mux.c (renamed from drivers/net/phy/mdio-mux.c)0
-rw-r--r--drivers/net/mdio/mdio-mvusb.c (renamed from drivers/net/phy/mdio-mvusb.c)0
-rw-r--r--drivers/net/mdio/mdio-octeon.c (renamed from drivers/net/phy/mdio-octeon.c)0
-rw-r--r--drivers/net/mdio/mdio-sun4i.c (renamed from drivers/net/phy/mdio-sun4i.c)0
-rw-r--r--drivers/net/mdio/mdio-thunder.c (renamed from drivers/net/phy/mdio-thunder.c)0
-rw-r--r--drivers/net/mdio/mdio-xgene.c (renamed from drivers/net/phy/mdio-xgene.c)2
-rw-r--r--drivers/net/mdio/of_mdio.c (renamed from drivers/of/of_mdio.c)38
-rw-r--r--drivers/net/netdevsim/Makefile2
-rw-r--r--drivers/net/netdevsim/dev.c35
-rw-r--r--drivers/net/netdevsim/ethtool.c64
-rw-r--r--drivers/net/netdevsim/netdev.c1
-rw-r--r--drivers/net/netdevsim/netdevsim.h20
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c34
-rw-r--r--drivers/net/pcs/Kconfig21
-rw-r--r--drivers/net/pcs/Makefile5
-rw-r--r--drivers/net/pcs/pcs-lynx.c318
-rw-r--r--drivers/net/pcs/pcs-xpcs.c (renamed from drivers/net/phy/mdio-xpcs.c)2
-rw-r--r--drivers/net/phy/Kconfig405
-rw-r--r--drivers/net/phy/Makefile37
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/bcm7xxx.c32
-rw-r--r--drivers/net/phy/dp83640.c70
-rw-r--r--drivers/net/phy/dp83822.c232
-rw-r--r--drivers/net/phy/dp83867.c45
-rw-r--r--drivers/net/phy/dp83869.c365
-rw-r--r--drivers/net/phy/marvell.c14
-rw-r--r--drivers/net/phy/mdio-i2c.h16
-rw-r--r--drivers/net/phy/mdio-xgene.h130
-rw-r--r--drivers/net/phy/mdio_bus.c15
-rw-r--r--drivers/net/phy/micrel.c14
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c2
-rw-r--r--drivers/net/phy/phy-core.c36
-rw-r--r--drivers/net/phy/phy.c69
-rw-r--r--drivers/net/phy/phylink.c48
-rw-r--r--drivers/net/phy/realtek.c49
-rw-r--r--drivers/net/phy/sfp.c5
-rw-r--r--drivers/net/phy/smsc.c126
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/tun.c18
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/cx82310_eth.c78
-rw-r--r--drivers/net/usb/kaweth.c261
-rw-r--r--drivers/net/usb/net1080.c1
-rw-r--r--drivers/net/usb/pegasus.c61
-rw-r--r--drivers/net/usb/qmi_wwan.c25
-rw-r--r--drivers/net/usb/rtl8150.c34
-rw-r--r--drivers/net/usb/smsc75xx.c13
-rw-r--r--drivers/net/usb/smsc95xx.c488
-rw-r--r--drivers/net/usb/usbnet.c30
-rw-r--r--drivers/net/veth.c18
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vrf.c92
-rw-r--r--drivers/net/vxlan.c22
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c1
-rw-r--r--drivers/net/wan/hdlc.c10
-rw-r--r--drivers/net/wan/hdlc_fr.c172
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c1
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c18
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c105
-rw-r--r--drivers/net/wan/lmc/lmc_media.c4
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c20
-rw-r--r--drivers/net/wan/sbni.c101
-rw-r--r--drivers/net/wan/slic_ds26522.c2
-rw-r--r--drivers/net/wan/x25_asy.c5
-rw-r--r--drivers/net/wan/x25_asy.h1
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wireguard/netlink.c14
-rw-r--r--drivers/net/wireless/admtek/adm8211.c83
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c81
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h22
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c349
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c929
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c331
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h19
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c73
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h76
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig18
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile12
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c455
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c224
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h15
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c291
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h79
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c1104
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h247
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c1097
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h217
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c (renamed from drivers/net/wireless/ath/ath11k/debug_htt_stats.c)56
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h (renamed from drivers/net/wireless/ath/ath11k/debug_htt_stats.h)27
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c29
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.h44
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c316
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h40
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c375
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c200
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c306
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h198
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h30
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c19
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c894
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h152
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c412
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c467
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h39
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c1062
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h72
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c357
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h29
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c36
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c154
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c26
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c25
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/rfkill.c7
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h68
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h37
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h4
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h5
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c12
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c7
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c15
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c57
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h222
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c288
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c757
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h12
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c279
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h18
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c30
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c36
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c11
-rw-r--r--drivers/net/wireless/atmel/atmel.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c14
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.c3
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c21
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_nphy.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c15
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/pio.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c62
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c39
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c31
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c31
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c47
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c99
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c47
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c112
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c268
-rw-r--r--drivers/net/wireless/cisco/airo.c913
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Kconfig4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c52
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c34
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c46
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-calib.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c67
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c25
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c76
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/debug.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h231
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h133
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h471
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c274
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c363
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c203
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c459
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c118
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c197
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c107
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c1089
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c530
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c1529
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h230
-rw-r--r--drivers/net/wireless/intersil/hostap/Kconfig4
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap.h6
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c33
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c3
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c11
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c14
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c12
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_38xx.c2
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_ioctl.c5
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_dev.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c108
-rw-r--r--drivers/net/wireless/marvell/libertas/defs.h3
-rw-r--r--drivers/net/wireless/marvell/libertas/firmware.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/rx.c11
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c22
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/deb_defs.h3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c37
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c323
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h149
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c429
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h427
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.h18
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c162
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c47
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c200
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c282
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h145
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h159
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c146
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c257
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c140
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c160
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c330
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c86
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h76
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c34
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c24
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c16
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c16
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c42
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c25
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c23
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c70
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c193
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c712
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c354
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c720
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c668
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c756
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c82
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c269
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c423
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c125
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c192
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c90
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c215
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c405
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c224
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c88
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c271
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c184
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c121
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c154
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c134
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c58
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c312
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c116
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c214
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c423
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c102
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c210
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c366
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c45
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c42
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c159
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c220
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c162
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c64
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c150
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c647
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c232
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c365
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c124
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c66
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c213
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c310
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c37
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c44
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c827
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c134
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c467
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c553
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c32
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c86
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c81
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c205
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h32
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c38
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c32
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.h2
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c33
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_ps.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c2
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c6
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h2
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c1
-rw-r--r--drivers/net/wireless/wl3501_cs.c26
-rw-r--r--drivers/net/wireless/zydas/zd1201.c6
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_chip.c4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c15
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c10
-rw-r--r--drivers/net/xen-netback/common.h15
-rw-r--r--drivers/net/xen-netback/interface.c61
-rw-r--r--drivers/net/xen-netback/netback.c11
-rw-r--r--drivers/net/xen-netback/rx.c13
-rw-r--r--drivers/nfc/pn533/usb.c2
-rw-r--r--drivers/nfc/s3fwrn5/Kconfig1
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c4
-rw-r--r--drivers/nfc/s3fwrn5/firmware.h2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c24
-rw-r--r--drivers/nfc/st-nci/se.c3
-rw-r--r--drivers/nfc/st21nfca/core.c1
-rw-r--r--drivers/nfc/st21nfca/se.c3
-rw-r--r--drivers/nfc/trf7970a.c1
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c1
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c2
-rw-r--r--drivers/ntb/test/ntb_msi_test.c5
-rw-r--r--drivers/nvdimm/badrange.c26
-rw-r--r--drivers/nvdimm/blk.c3
-rw-r--r--drivers/nvdimm/btt.c5
-rw-r--r--drivers/nvdimm/bus.c9
-rw-r--r--drivers/nvdimm/claim.c15
-rw-r--r--drivers/nvdimm/nd.h5
-rw-r--r--drivers/nvdimm/pfn_devs.c13
-rw-r--r--drivers/nvdimm/pmem.c37
-rw-r--r--drivers/nvdimm/region.c21
-rw-r--r--drivers/nvme/host/core.c560
-rw-r--r--drivers/nvme/host/fc.c312
-rw-r--r--drivers/nvme/host/multipath.c10
-rw-r--r--drivers/nvme/host/nvme.h29
-rw-r--r--drivers/nvme/host/pci.c60
-rw-r--r--drivers/nvme/host/rdma.c32
-rw-r--r--drivers/nvme/host/tcp.c16
-rw-r--r--drivers/nvme/host/zns.c57
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/core.c11
-rw-r--r--drivers/nvme/target/fc.c2
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvme/target/passthru.c61
-rw-r--r--drivers/nvme/target/tcp.c21
-rw-r--r--drivers/nvme/target/trace.h21
-rw-r--r--drivers/nvmem/core.c50
-rw-r--r--drivers/nvmem/mtk-efuse.c14
-rw-r--r--drivers/of/Kconfig7
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/address.c77
-rw-r--r--drivers/of/base.c1
-rw-r--r--drivers/of/device.c45
-rw-r--r--drivers/of/of_private.h11
-rw-r--r--drivers/of/of_reserved_mem.c15
-rw-r--r--drivers/of/platform.c2
-rw-r--r--drivers/of/unittest.c34
-rw-r--r--drivers/opp/core.c238
-rw-r--r--drivers/opp/cpu.c2
-rw-r--r--drivers/opp/of.c114
-rw-r--r--drivers/opp/opp.h5
-rw-r--r--drivers/oprofile/buffer_sync.c4
-rw-r--r--drivers/parisc/ccio-dma.c6
-rw-r--r--drivers/parisc/sba_iommu.c6
-rw-r--r--drivers/pci/Kconfig65
-rw-r--r--drivers/pci/controller/Kconfig15
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c1
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c8
-rw-r--r--drivers/pci/controller/dwc/Kconfig3
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c46
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c45
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c87
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c146
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c100
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c164
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c70
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c48
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c257
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c366
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c170
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h110
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c45
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c65
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c49
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c46
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c39
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c140
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c3
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c7
-rw-r--r--drivers/pci/controller/pci-aardvark.c108
-rw-r--r--drivers/pci/controller/pci-hyperv.c63
-rw-r--r--drivers/pci/controller/pci-loongson.c7
-rw-r--r--drivers/pci/controller/pci-mvebu.c26
-rw-r--r--drivers/pci/controller/pci-tegra.c51
-rw-r--r--drivers/pci/controller/pci-v3-semi.c1
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c4
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c461
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c327
-rw-r--r--drivers/pci/controller/pcie-iproc-bcma.c13
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c13
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c2
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c4
-rw-r--r--drivers/pci/controller/vmd.c309
-rw-r--r--drivers/pci/ecam.c10
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c15
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c8
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c1
-rw-r--r--drivers/pci/iov.c1
-rw-r--r--drivers/pci/msi.c38
-rw-r--r--drivers/pci/p2pdma.c22
-rw-r--r--drivers/pci/pci-acpi.c16
-rw-r--r--drivers/pci/pci-bridge-emul.c4
-rw-r--r--drivers/pci/pci-driver.c27
-rw-r--r--drivers/pci/pci-pf-stub.c14
-rw-r--r--drivers/pci/pci-sysfs.c7
-rw-r--r--drivers/pci/pci.c63
-rw-r--r--drivers/pci/pci.h9
-rw-r--r--drivers/pci/pcie/aspm.c294
-rw-r--r--drivers/pci/pcie/bw_notification.c3
-rw-r--r--drivers/pci/pcie/dpc.c7
-rw-r--r--drivers/pci/probe.c17
-rw-r--r--drivers/pci/quirks.c135
-rw-r--r--drivers/pci/xen-pcifront.c1
-rw-r--r--drivers/pcmcia/ds.c2
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm-cmn.c1641
-rw-r--r--drivers/perf/arm_dsu_pmu.c63
-rw-r--r--drivers/perf/arm_pmu.c155
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h1
-rw-r--r--drivers/perf/thunderx2_pmu.c7
-rw-r--r--drivers/perf/xgene_pmu.c32
-rw-r--r--drivers/phy/Kconfig11
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb3.c22
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c13
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-usb.c19
-rw-r--r--drivers/phy/cadence/phy-cadence-salvo.c8
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c24
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c2119
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c79
-rw-r--r--drivers/phy/hisilicon/phy-hi3660-usb3.c2
-rw-r--r--drivers/phy/intel/Kconfig22
-rw-r--r--drivers/phy/intel/Makefile5
-rw-r--r--drivers/phy/intel/phy-intel-keembay-emmc.c307
-rw-r--r--drivers/phy/intel/phy-intel-lgm-combo.c (renamed from drivers/phy/intel/phy-intel-combo.c)0
-rw-r--r--drivers/phy/intel/phy-intel-lgm-emmc.c (renamed from drivers/phy/intel/phy-intel-emmc.c)0
-rw-r--r--drivers/phy/lantiq/phy-lantiq-rcu-usb2.c2
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c14
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c14
-rw-r--r--drivers/phy/marvell/phy-pxa-28nm-hsic.c40
-rw-r--r--drivers/phy/marvell/phy-pxa-28nm-usb2.c33
-rw-r--r--drivers/phy/mediatek/Kconfig7
-rw-r--r--drivers/phy/mediatek/Makefile5
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c (renamed from drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c)4
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c (renamed from drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c)2
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c (renamed from drivers/gpu/drm/mediatek/mtk_hdmi_phy.c)6
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.h (renamed from drivers/gpu/drm/mediatek/mtk_hdmi_phy.h)3
-rw-r--r--drivers/phy/phy-lgm-usb.c284
-rw-r--r--drivers/phy/qualcomm/phy-qcom-apq8064-sata.c21
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c1053
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h80
-rw-r--r--drivers/phy/ralink/phy-ralink-usb.c2
-rw-r--r--drivers/phy/rockchip/Kconfig12
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-dphy-rx0.c (renamed from drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c)1
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c39
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c2
-rw-r--r--drivers/phy/socionext/Kconfig10
-rw-r--r--drivers/phy/socionext/Makefile1
-rw-r--r--drivers/phy/socionext/phy-uniphier-ahci.c321
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c325
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c159
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c36
-rw-r--r--drivers/pinctrl/Kconfig37
-rw-r--r--drivers/pinctrl/Makefile6
-rw-r--r--drivers/pinctrl/actions/Kconfig6
-rw-r--r--drivers/pinctrl/actions/Makefile1
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c4
-rw-r--r--drivers/pinctrl/actions/pinctrl-s500.c1727
-rw-r--r--drivers/pinctrl/actions/pinctrl-s700.c2
-rw-r--r--drivers/pinctrl/actions/pinctrl-s900.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c17
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c15
-rw-r--r--drivers/pinctrl/bcm/Kconfig1
-rw-r--r--drivers/pinctrl/devicetree.c5
-rw-r--r--drivers/pinctrl/freescale/Kconfig5
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c13
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h57
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8dxl.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8qm.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8qxp.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-scu.c5
-rw-r--r--drivers/pinctrl/intel/Kconfig12
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c24
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c22
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c170
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c64
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h7
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c60
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c42
-rw-r--r--drivers/pinctrl/mediatek/Kconfig14
-rw-r--r--drivers/pinctrl/mediatek/Makefile2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c11
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c103
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8167.c362
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c1409
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c31
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8167.h1248
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8192.h2275
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c11
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c16
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c6
-rw-r--r--drivers/pinctrl/pinctrl-amd.c6
-rw-r--r--drivers/pinctrl/pinctrl-amd.h69
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c7
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c349
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c47
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_spi.c4
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c8
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c30
-rw-r--r--drivers/pinctrl/pinctrl-single.c4
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c17
-rw-r--r--drivers/pinctrl/qcom/Kconfig9
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c43
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c630
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c18
-rw-r--r--drivers/pinctrl/renesas/Kconfig (renamed from drivers/pinctrl/sh-pfc/Kconfig)238
-rw-r--r--drivers/pinctrl/renesas/Makefile (renamed from drivers/pinctrl/sh-pfc/Makefile)8
-rw-r--r--drivers/pinctrl/renesas/core.c (renamed from drivers/pinctrl/sh-pfc/core.c)0
-rw-r--r--drivers/pinctrl/renesas/core.h (renamed from drivers/pinctrl/sh-pfc/core.h)0
-rw-r--r--drivers/pinctrl/renesas/gpio.c (renamed from drivers/pinctrl/sh-pfc/gpio.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-emev2.c (renamed from drivers/pinctrl/sh-pfc/pfc-emev2.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a73a4.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a73a4.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7740.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7740.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77470.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a77470.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7778.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7778.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7779.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7779.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7790.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7790.c)121
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7791.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7791.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7792.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7792.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7794.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7794.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77950.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a77950.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77951.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a77951.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7796.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a7796.c)2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77965.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a77965.c)2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77970.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a77970.c)2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77980.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a77980.c)2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77990.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a77990.c)2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77995.c (renamed from drivers/pinctrl/sh-pfc/pfc-r8a77995.c)2
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7203.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh7203.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7264.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh7264.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7269.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh7269.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-sh73a0.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh73a0.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7720.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh7720.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7722.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh7722.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7723.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh7723.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7724.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh7724.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7734.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh7734.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7757.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh7757.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7785.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh7785.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7786.c (renamed from drivers/pinctrl/sh-pfc/pfc-sh7786.c)0
-rw-r--r--drivers/pinctrl/renesas/pfc-shx3.c (renamed from drivers/pinctrl/sh-pfc/pfc-shx3.c)0
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c (renamed from drivers/pinctrl/pinctrl-rza1.c)11
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c (renamed from drivers/pinctrl/pinctrl-rza2.c)4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c (renamed from drivers/pinctrl/pinctrl-rzn1.c)6
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c (renamed from drivers/pinctrl/sh-pfc/pinctrl.c)0
-rw-r--r--drivers/pinctrl/renesas/sh_pfc.h (renamed from drivers/pinctrl/sh-pfc/sh_pfc.h)0
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c8
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c8
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c13
-rw-r--r--drivers/pinctrl/sunxi/Kconfig10
-rw-r--r--drivers/pinctrl/sunxi/Makefile2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c105
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c708
-rw-r--r--drivers/pinctrl/visconti/Kconfig14
-rw-r--r--drivers/pinctrl/visconti/Makefile3
-rw-r--r--drivers/pinctrl/visconti/pinctrl-common.c305
-rw-r--r--drivers/pinctrl/visconti/pinctrl-common.h96
-rw-r--r--drivers/pinctrl/visconti/pinctrl-tmpv7700.c355
-rw-r--r--drivers/platform/chrome/Kconfig10
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c12
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c106
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c26
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h27
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c42
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c4
-rw-r--r--drivers/platform/x86/hp-wmi.c23
-rw-r--r--drivers/platform/x86/intel_pmc_core.c121
-rw-r--r--drivers/platform/x86/intel_pmc_core.h5
-rw-r--r--drivers/platform/x86/mlx-platform.c16
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c18
-rw-r--r--drivers/pnp/core.c4
-rw-r--r--drivers/pnp/isapnp/compat.c23
-rw-r--r--drivers/pnp/quirks.c2
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/avs/Kconfig37
-rw-r--r--drivers/power/avs/Makefile4
-rw-r--r--drivers/power/reset/Kconfig4
-rw-r--r--drivers/power/reset/ocelot-reset.c55
-rw-r--r--drivers/power/supply/Kconfig36
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/ab8500_fg.c4
-rw-r--r--drivers/power/supply/bq24257_charger.c2
-rw-r--r--drivers/power/supply/bq2515x_charger.c14
-rw-r--r--drivers/power/supply/bq25890_charger.c17
-rw-r--r--drivers/power/supply/bq25980_charger.c1314
-rw-r--r--drivers/power/supply/bq25980_charger.h178
-rw-r--r--drivers/power/supply/bq27xxx_battery.c102
-rw-r--r--drivers/power/supply/bq27xxx_battery_hdq.c11
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c12
-rw-r--r--drivers/power/supply/charger-manager.c578
-rw-r--r--drivers/power/supply/cpcap-battery.c7
-rw-r--r--drivers/power/supply/ds2760_battery.c2
-rw-r--r--drivers/power/supply/ds2780_battery.c6
-rw-r--r--drivers/power/supply/ds2781_battery.c6
-rw-r--r--drivers/power/supply/goldfish_battery.c2
-rw-r--r--drivers/power/supply/gpio-charger.c172
-rw-r--r--drivers/power/supply/ingenic-battery.c8
-rw-r--r--drivers/power/supply/lego_ev3_battery.c24
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c3
-rw-r--r--drivers/power/supply/max17040_battery.c489
-rw-r--r--drivers/power/supply/max1721x_battery.c2
-rw-r--r--drivers/power/supply/pm2301_charger.c7
-rw-r--r--drivers/power/supply/power_supply_core.c19
-rw-r--r--drivers/power/supply/power_supply_sysfs.c1
-rw-r--r--drivers/power/supply/rn5t618_power.c556
-rw-r--r--drivers/power/supply/rt9455_charger.c2
-rw-r--r--drivers/power/supply/s3c_adc_battery.c2
-rw-r--r--drivers/power/supply/sbs-battery.c125
-rw-r--r--drivers/power/supply/smb347-charger.c692
-rw-r--r--drivers/power/supply/test_power.c26
-rw-r--r--drivers/power/supply/ucs1002_power.c75
-rw-r--r--drivers/powercap/Kconfig2
-rw-r--r--drivers/powercap/idle_inject.c1
-rw-r--r--drivers/powercap/intel_rapl_common.c84
-rw-r--r--drivers/powercap/intel_rapl_msr.c5
-rw-r--r--drivers/powercap/powercap_sys.c4
-rw-r--r--drivers/ptp/ptp_ines.c91
-rw-r--r--drivers/ptp/ptp_qoriq.c20
-rw-r--r--drivers/pwm/Kconfig12
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c17
-rw-r--r--drivers/pwm/pwm-bcm2835.c10
-rw-r--r--drivers/pwm/pwm-crc.c128
-rw-r--r--drivers/pwm/pwm-cros-ec.c37
-rw-r--r--drivers/pwm/pwm-img.c3
-rw-r--r--drivers/pwm/pwm-jz4740.c9
-rw-r--r--drivers/pwm/pwm-lpss-platform.c1
-rw-r--r--drivers/pwm/pwm-lpss.c85
-rw-r--r--drivers/pwm/pwm-lpss.h3
-rw-r--r--drivers/pwm/pwm-pca9685.c45
-rw-r--r--drivers/pwm/pwm-rockchip.c15
-rw-r--r--drivers/pwm/pwm-sifive.c8
-rw-r--r--drivers/pwm/pwm-sl28cpld.c270
-rw-r--r--drivers/pwm/pwm-sprd.c7
-rw-r--r--drivers/pwm/pwm-sun4i.c36
-rw-r--r--drivers/pwm/sysfs.c4
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c18
-rw-r--r--drivers/ras/cec.c26
-rw-r--r--drivers/regulator/88pg86x.c2
-rw-r--r--drivers/regulator/Kconfig64
-rw-r--r--drivers/regulator/Makefile5
-rw-r--r--drivers/regulator/bd718x7-regulator.c422
-rw-r--r--drivers/regulator/bd9576-regulator.c337
-rw-r--r--drivers/regulator/core.c241
-rw-r--r--drivers/regulator/da9055-regulator.c2
-rw-r--r--drivers/regulator/da9062-regulator.c2
-rw-r--r--drivers/regulator/da9063-regulator.c2
-rw-r--r--drivers/regulator/da9210-regulator.c6
-rw-r--r--drivers/regulator/da9211-regulator.c18
-rw-r--r--drivers/regulator/dbx500-prcmu.c26
-rw-r--r--drivers/regulator/dummy.c4
-rw-r--r--drivers/regulator/fan53555.c2
-rw-r--r--drivers/regulator/fixed.c20
-rw-r--r--drivers/regulator/lochnagar-regulator.c1
-rw-r--r--drivers/regulator/lp8755.c88
-rw-r--r--drivers/regulator/ltc3589.c12
-rw-r--r--drivers/regulator/ltc3676.c12
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/max77826-regulator.c2
-rw-r--r--drivers/regulator/mp886x.c109
-rw-r--r--drivers/regulator/mt6360-regulator.c459
-rw-r--r--drivers/regulator/pca9450-regulator.c6
-rw-r--r--drivers/regulator/pv88060-regulator.c10
-rw-r--r--drivers/regulator/pv88080-regulator.c10
-rw-r--r--drivers/regulator/pv88090-regulator.c10
-rw-r--r--drivers/regulator/pwm-regulator.c2
-rw-r--r--drivers/regulator/qcom-labibb-regulator.c8
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/regulator/qcom_smd-regulator.c167
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c177
-rw-r--r--drivers/regulator/qcom_usb_vbus-regulator.c1
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c214
-rw-r--r--drivers/regulator/rt4801-regulator.c223
-rw-r--r--drivers/regulator/rtmv20-regulator.c397
-rw-r--r--drivers/regulator/s5m8767.c13
-rw-r--r--drivers/regulator/slg51000-regulator.c4
-rw-r--r--drivers/regulator/stm32-booster.c2
-rw-r--r--drivers/regulator/stm32-pwr.c2
-rw-r--r--drivers/regulator/stm32-vrefbuf.c2
-rw-r--r--drivers/regulator/stpmic1_regulator.c4
-rw-r--r--drivers/regulator/stw481x-vmmc.c4
-rw-r--r--drivers/regulator/sy8106a-regulator.c2
-rw-r--r--drivers/regulator/sy8827n.c2
-rw-r--r--drivers/regulator/ti-abb-regulator.c2
-rw-r--r--drivers/regulator/tps51632-regulator.c2
-rw-r--r--drivers/regulator/tps6105x-regulator.c2
-rw-r--r--drivers/regulator/tps62360-regulator.c2
-rw-r--r--drivers/regulator/tps65023-regulator.c2
-rw-r--r--drivers/regulator/tps65086-regulator.c4
-rw-r--r--drivers/regulator/tps65090-regulator.c8
-rw-r--r--drivers/regulator/tps6586x-regulator.c8
-rw-r--r--drivers/regulator/tps65910-regulator.c135
-rw-r--r--drivers/regulator/tps65912-regulator.c4
-rw-r--r--drivers/regulator/wm831x-dcdc.c4
-rw-r--r--drivers/regulator/wm831x-isink.c2
-rw-r--r--drivers/regulator/wm831x-ldo.c2
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig15
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/mtk_common.h32
-rw-r--r--drivers/remoteproc/mtk_scp.c199
-rw-r--r--drivers/remoteproc/mtk_scp_ipi.c9
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c16
-rw-r--r--drivers/remoteproc/remoteproc_core.c25
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c6
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c23
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c119
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c2
-rw-r--r--drivers/remoteproc/stm32_rproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c1395
-rw-r--r--drivers/reset/Kconfig16
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c3
-rw-r--r--drivers/reset/reset-imx7.c13
-rw-r--r--drivers/reset/reset-raspberrypi.c122
-rw-r--r--drivers/reset/reset-zynqmp.c50
-rw-r--r--drivers/reset/sti/reset-syscfg.c7
-rw-r--r--drivers/rpmsg/mtk_rpmsg.c9
-rw-r--r--drivers/rpmsg/qcom_glink_native.c70
-rw-r--r--drivers/rpmsg/qcom_smd.c32
-rw-r--r--drivers/rpmsg/rpmsg_core.c2
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c7
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c76
-rw-r--r--drivers/rtc/rtc-ds1685.c8
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c4
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c7
-rw-r--r--drivers/rtc/rtc-mt6397.c3
-rw-r--r--drivers/rtc/rtc-pcf2127.c4
-rw-r--r--drivers/rtc/rtc-r9701.c43
-rw-r--r--drivers/rtc/rtc-rs5c313.c34
-rw-r--r--drivers/rtc/rtc-rv3028.c213
-rw-r--r--drivers/rtc/rtc-rv3032.c925
-rw-r--r--drivers/rtc/rtc-rv8803.c8
-rw-r--r--drivers/rtc/rtc-rx8010.c332
-rw-r--r--drivers/rtc/rtc-s3c.c9
-rw-r--r--drivers/rtc/rtc-st-lpc.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c15
-rw-r--r--drivers/s390/block/dasd_ioctl.c17
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/char/con3215.c7
-rw-r--r--drivers/s390/char/raw3270.h1
-rw-r--r--drivers/s390/char/sclp.h4
-rw-r--r--drivers/s390/char/sclp_ap.c63
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/sclp_early_core.c15
-rw-r--r--drivers/s390/char/sclp_rw.c18
-rw-r--r--drivers/s390/char/sclp_rw.h2
-rw-r--r--drivers/s390/char/sclp_sdias.c8
-rw-r--r--drivers/s390/char/tape.h3
-rw-r--r--drivers/s390/char/tape_std.h12
-rw-r--r--drivers/s390/char/zcore.c17
-rw-r--r--drivers/s390/cio/chsc.c43
-rw-r--r--drivers/s390/cio/chsc.h8
-rw-r--r--drivers/s390/cio/css.c25
-rw-r--r--drivers/s390/cio/css.h4
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_ops.c93
-rw-r--r--drivers/s390/cio/qdio_main.c43
-rw-r--r--drivers/s390/cio/qdio_setup.c38
-rw-r--r--drivers/s390/crypto/ap_bus.c425
-rw-r--r--drivers/s390/crypto/ap_bus.h54
-rw-r--r--drivers/s390/crypto/ap_card.c34
-rw-r--r--drivers/s390/crypto/ap_debug.h8
-rw-r--r--drivers/s390/crypto/ap_queue.c252
-rw-r--r--drivers/s390/crypto/pkey_api.c292
-rw-r--r--drivers/s390/crypto/zcrypt_api.c416
-rw-r--r--drivers/s390/crypto/zcrypt_api.h49
-rw-r--r--drivers/s390/crypto/zcrypt_card.c25
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c411
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h74
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c6
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c45
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c97
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h8
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c312
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h63
-rw-r--r--drivers/s390/crypto/zcrypt_error.h88
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c131
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c264
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h4
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c17
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/ctcm_fsms.h1
-rw-r--r--drivers/s390/net/ctcm_mpc.h1
-rw-r--r--drivers/s390/net/ism.h7
-rw-r--r--drivers/s390/net/ism_drv.c47
-rw-r--r--drivers/s390/net/qeth_core.h102
-rw-r--r--drivers/s390/net/qeth_core_main.c359
-rw-r--r--drivers/s390/net/qeth_core_mpc.h14
-rw-r--r--drivers/s390/net/qeth_core_sys.c71
-rw-r--r--drivers/s390/net/qeth_ethtool.c16
-rw-r--r--drivers/s390/net/qeth_l2.h9
-rw-r--r--drivers/s390/net/qeth_l2_main.c888
-rw-r--r--drivers/s390/net/qeth_l2_sys.c17
-rw-r--r--drivers/s390/net/qeth_l3.h4
-rw-r--r--drivers/s390/net/qeth_l3_main.c176
-rw-r--r--drivers/s390/net/qeth_l3_sys.c72
-rw-r--r--drivers/s390/scsi/zfcp_erp.c8
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c10
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/scsi/53c700.c125
-rw-r--r--drivers/scsi/53c700.h17
-rw-r--r--drivers/scsi/aacraid/aachba.c11
-rw-r--r--drivers/scsi/aacraid/commctrl.c20
-rw-r--r--drivers/scsi/aacraid/commsup.c9
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c7
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h8
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h102
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c377
-rw-r--r--drivers/scsi/arm/cumana_2.c19
-rw-r--r--drivers/scsi/arm/eesox.c9
-rw-r--r--drivers/scsi/arm/oak.c2
-rw-r--r--drivers/scsi/arm/powertec.c9
-rw-r--r--drivers/scsi/be2iscsi/be_main.c4
-rw-r--r--drivers/scsi/bfa/bfad.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c10
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c8
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h16
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c21
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.h1
-rw-r--r--drivers/scsi/dc395x.c41
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c9
-rw-r--r--drivers/scsi/dpt_i2o.c3
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c28
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c8
-rw-r--r--drivers/scsi/fdomain_isa.c5
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c6
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c9
-rw-r--r--drivers/scsi/fnic/fnic_main.c5
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.c8
-rw-r--r--drivers/scsi/gdth.c153
-rw-r--r--drivers/scsi/hisi_sas/Kconfig1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h40
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c159
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c24
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c338
-rw-r--r--drivers/scsi/hosts.c1
-rw-r--r--drivers/scsi/hpsa.c21
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/hpsa_cmd.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c229
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h160
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c36
-rw-r--r--drivers/scsi/initio.c14
-rw-r--r--drivers/scsi/isci/host.c2
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/isci/remote_node_table.h2
-rw-r--r--drivers/scsi/iscsi_tcp.c4
-rw-r--r--drivers/scsi/jazz_esp.c14
-rw-r--r--drivers/scsi/libfc/fc_disc.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c8
-rw-r--r--drivers/scsi/libsas/sas_discover.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c5
-rw-r--r--drivers/scsi/mac_esp.c14
-rw-r--r--drivers/scsi/megaraid.c192
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c41
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c29
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c74
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c16
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c366
-rw-r--r--drivers/scsi/mvsas/mv_init.c4
-rw-r--r--drivers/scsi/mvumi.c1
-rw-r--r--drivers/scsi/myrb.c11
-rw-r--r--drivers/scsi/myrs.c8
-rw-r--r--drivers/scsi/nsp32.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c6
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h27
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c38
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c221
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h15
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c109
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/qedf/qedf.h9
-rw-r--r--drivers/scsi/qedf/qedf_els.c34
-rw-r--r--drivers/scsi/qedf/qedf_io.c12
-rw-r--r--drivers/scsi/qedf/qedf_main.c151
-rw-r--r--drivers/scsi/qedi/qedi.h6
-rw-r--r--drivers/scsi/qedi/qedi_fw.c30
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c7
-rw-r--r--drivers/scsi/qedi/qedi_main.c131
-rw-r--r--drivers/scsi/qla1280.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c100
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h69
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c300
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c102
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h101
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c60
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c74
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c51
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c150
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c60
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h9
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h3
-rw-r--r--drivers/scsi/qlogicpti.c14
-rw-r--r--drivers/scsi/scsi_debug.c111
-rw-r--r--drivers/scsi/scsi_error.c37
-rw-r--r--drivers/scsi/scsi_lib.c196
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_sysfs.c11
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/sd.c141
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sense_codes.h54
-rw-r--r--drivers/scsi/sg.c9
-rw-r--r--drivers/scsi/sgiwd93.c14
-rw-r--r--drivers/scsi/smartpqi/Kconfig4
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h7
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c476
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h2
-rw-r--r--drivers/scsi/sni_53c710.c14
-rw-r--r--drivers/scsi/snic/snic_debugfs.c16
-rw-r--r--drivers/scsi/snic/snic_scsi.c8
-rw-r--r--drivers/scsi/snic/vnic_cq.c8
-rw-r--r--drivers/scsi/sr.c53
-rw-r--r--drivers/scsi/storvsc_drv.c56
-rw-r--r--drivers/scsi/sun3x_esp.c14
-rw-r--r--drivers/scsi/sun_esp.c14
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.c6
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c6
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/ufs/Kconfig1
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c13
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c266
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h29
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c262
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h11
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c10
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.c4
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c127
-rw-r--r--drivers/scsi/ufs/ufshcd.c880
-rw-r--r--drivers/scsi/ufs/ufshcd.h24
-rw-r--r--drivers/scsi/ufs/ufshci.h1
-rw-r--r--drivers/scsi/ufs/unipro.h3
-rw-r--r--drivers/scsi/virtio_scsi.c7
-rw-r--r--drivers/slimbus/core.c6
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c4
-rw-r--r--drivers/soc/actions/owl-sps-helper.c1
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c30
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c4
-rw-r--r--drivers/soc/bcm/Kconfig10
-rw-r--r--drivers/soc/bcm/Makefile1
-rw-r--r--drivers/soc/bcm/bcm63xx/Kconfig12
-rw-r--r--drivers/soc/bcm/bcm63xx/Makefile2
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm63xx-power.c378
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c105
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c2
-rw-r--r--drivers/soc/fsl/qbman/bman.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c2
-rw-r--r--drivers/soc/fsl/qe/ucc.c2
-rw-r--r--drivers/soc/imx/gpcv2.c15
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c113
-rw-r--r--drivers/soc/mediatek/mtk-infracfg.c4
-rw-r--r--drivers/soc/qcom/Kconfig16
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/apr.c2
-rw-r--r--drivers/soc/qcom/cpr.c (renamed from drivers/power/avs/qcom-cpr.c)8
-rw-r--r--drivers/soc/qcom/llcc-qcom.c7
-rw-r--r--drivers/soc/qcom/pdr_internal.h2
-rw-r--r--drivers/soc/qcom/rpmh-internal.h4
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c115
-rw-r--r--drivers/soc/qcom/socinfo.c5
-rw-r--r--drivers/soc/renesas/Kconfig354
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a779a0-sysc.c448
-rw-r--r--drivers/soc/renesas/rcar-rst.c6
-rw-r--r--drivers/soc/renesas/renesas-soc.c8
-rw-r--r--drivers/soc/rockchip/Kconfig8
-rw-r--r--drivers/soc/rockchip/Makefile1
-rw-r--r--drivers/soc/rockchip/io-domain.c (renamed from drivers/power/avs/rockchip-io-domain.c)0
-rw-r--r--drivers/soc/samsung/Kconfig49
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-asv.c2
-rw-r--r--drivers/soc/samsung/s3c-pm-check.c233
-rw-r--r--drivers/soc/samsung/s3c-pm-debug.c79
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c2
-rw-r--r--drivers/soc/tegra/Kconfig10
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c8
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c30
-rw-r--r--drivers/soc/tegra/fuse/fuse.h10
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c26
-rw-r--r--drivers/soc/tegra/pmc.c237
-rw-r--r--drivers/soc/ti/Kconfig11
-rw-r--r--drivers/soc/ti/Makefile2
-rw-r--r--drivers/soc/ti/k3-ringacc.c33
-rw-r--r--drivers/soc/ti/k3-socinfo.c1
-rw-r--r--drivers/soc/ti/knav_dma.c16
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c14
-rw-r--r--drivers/soc/ti/omap_prm.c274
-rw-r--r--drivers/soc/ti/pm33xx.c47
-rw-r--r--drivers/soc/ti/pruss.c354
-rw-r--r--drivers/soc/ti/smartreflex.c (renamed from drivers/power/avs/smartreflex.c)0
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c251
-rw-r--r--drivers/soc/versatile/soc-integrator.c2
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c2
-rw-r--r--drivers/soundwire/Kconfig7
-rw-r--r--drivers/soundwire/Makefile3
-rw-r--r--drivers/soundwire/bus.c120
-rw-r--r--drivers/soundwire/bus.h52
-rw-r--r--drivers/soundwire/bus_type.c9
-rw-r--r--drivers/soundwire/cadence_master.c199
-rw-r--r--drivers/soundwire/cadence_master.h5
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c425
-rw-r--r--drivers/soundwire/intel.c803
-rw-r--r--drivers/soundwire/intel.h4
-rw-r--r--drivers/soundwire/intel_init.c22
-rw-r--r--drivers/soundwire/master.c2
-rw-r--r--drivers/soundwire/mipi_disco.c18
-rw-r--r--drivers/soundwire/qcom.c118
-rw-r--r--drivers/soundwire/slave.c13
-rw-r--r--drivers/soundwire/stream.c45
-rw-r--r--drivers/soundwire/sysfs_local.h4
-rw-r--r--drivers/soundwire/sysfs_slave.c58
-rw-r--r--drivers/spi/Kconfig33
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-armada-3700.c1
-rw-r--r--drivers/spi/spi-atmel.c7
-rw-r--r--drivers/spi/spi-bcm-qspi.c13
-rw-r--r--drivers/spi/spi-bcm2835.c22
-rw-r--r--drivers/spi/spi-cadence-quadspi.c5
-rw-r--r--drivers/spi/spi-cadence.c2
-rw-r--r--drivers/spi/spi-dw-bt1.c339
-rw-r--r--drivers/spi/spi-dw-core.c679
-rw-r--r--drivers/spi/spi-dw-dma.c332
-rw-r--r--drivers/spi/spi-dw-mmio.c98
-rw-r--r--drivers/spi/spi-dw-pci.c22
-rw-r--r--drivers/spi/spi-dw.h89
-rw-r--r--drivers/spi/spi-fsi.c139
-rw-r--r--drivers/spi/spi-fsl-dspi.c68
-rw-r--r--drivers/spi/spi-fsl-espi.c2
-rw-r--r--drivers/spi/spi-fsl-lpspi.c6
-rw-r--r--drivers/spi/spi-geni-qcom.c194
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c261
-rw-r--r--drivers/spi/spi-imx.c34
-rw-r--r--drivers/spi/spi-lantiq-ssc.c12
-rw-r--r--drivers/spi/spi-mtk-nor.c409
-rw-r--r--drivers/spi/spi-mux.c5
-rw-r--r--drivers/spi/spi-npcm-fiu.c7
-rw-r--r--drivers/spi/spi-nxp-fspi.c69
-rw-r--r--drivers/spi/spi-omap2-mcspi.c17
-rw-r--r--drivers/spi/spi-qcom-qspi.c25
-rw-r--r--drivers/spi/spi-qup.c2
-rw-r--r--drivers/spi/spi-rspi.c81
-rw-r--r--drivers/spi/spi-s3c24xx-fiq.S113
-rw-r--r--drivers/spi/spi-s3c24xx-fiq.h23
-rw-r--r--drivers/spi/spi-s3c24xx-regs.h41
-rw-r--r--drivers/spi/spi-s3c24xx.c30
-rw-r--r--drivers/spi/spi-s3c64xx.c111
-rw-r--r--drivers/spi/spi-sprd-adi.c5
-rw-r--r--drivers/spi/spi-sprd.c17
-rw-r--r--drivers/spi/spi-stm32.c13
-rw-r--r--drivers/spi/spi-synquacer.c5
-rw-r--r--drivers/spi/spi-tegra114.c11
-rw-r--r--drivers/spi/spi-tegra20-sflash.c5
-rw-r--r--drivers/spi/spi-tegra20-slink.c10
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/spi/spi-xilinx.c3
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c720
-rw-r--r--drivers/spi/spidev.c4
-rw-r--r--drivers/ssb/pci.c7
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/comedi/comedi.h4
-rw-r--r--drivers/staging/comedi/comedidev.h2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c4
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c1
-rw-r--r--drivers/staging/comedi/drivers/comedi_8255.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c2
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c2
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c2
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c3
-rw-r--r--drivers/staging/emxx_udc/Kconfig2
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c19
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h461
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c2
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c55
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c441
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.h2
-rw-r--r--drivers/staging/fwserial/fwserial.c2
-rw-r--r--drivers/staging/greybus/audio_codec.c4
-rw-r--r--drivers/staging/greybus/audio_module.c6
-rw-r--r--drivers/staging/greybus/audio_topology.c20
-rw-r--r--drivers/staging/greybus/gbphy.h4
-rw-r--r--drivers/staging/hikey9xx/Kconfig49
-rw-r--r--drivers/staging/hikey9xx/Makefile7
-rw-r--r--drivers/staging/hikey9xx/TODO5
-rw-r--r--drivers/staging/hikey9xx/hi6421-spmi-pmic.c342
-rw-r--r--drivers/staging/hikey9xx/hi6421v600-regulator.c478
-rw-r--r--drivers/staging/hikey9xx/hisi-spmi-controller.c358
-rw-r--r--drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml159
-rw-r--r--drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml62
-rw-r--r--drivers/staging/hikey9xx/phy-hi3670-usb3.c671
-rw-r--r--drivers/staging/hikey9xx/phy-hi3670-usb3.yaml72
-rw-r--r--drivers/staging/iio/Documentation/dac/max51741
-rw-r--r--drivers/staging/iio/Documentation/device.txt74
-rw-r--r--drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x13
-rw-r--r--drivers/staging/iio/Documentation/overview.txt57
-rw-r--r--drivers/staging/iio/Documentation/ring.txt47
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-light79
-rw-r--r--drivers/staging/iio/Documentation/trigger.txt31
-rw-r--r--drivers/staging/iio/accel/adis16203.c26
-rw-r--r--drivers/staging/iio/accel/adis16240.c25
-rw-r--r--drivers/staging/iio/frequency/ad9834.c1
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c4
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c6
-rw-r--r--drivers/staging/ks7010/ks_hostif.c6
-rw-r--r--drivers/staging/media/Kconfig8
-rw-r--r--drivers/staging/media/Makefile3
-rw-r--r--drivers/staging/media/atomisp/Makefile12
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig74
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c68
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Kconfig12
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3554.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c29
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c17
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c84
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c51
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c11
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c1
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c1
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h1
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_private.h268
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c10
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_local.h21
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h12
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c3
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h4
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h12
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h73
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h6
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h4
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h8
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c6
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h4
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c70
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/ibuf_ctrl_public.h94
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h8
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h29
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/ibuf_ctrl.h47
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_dma.h47
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h16
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h2
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c6
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_env.h4
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mipi.h2
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream.h4
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_global.h29
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h7
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c145
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c3
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c144
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c53
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c4
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c3
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c7
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h7
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c31
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_global.h21
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_local.h17
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_public.h26
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_system_global.h16
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_global.h30
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_local.h3
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_private.h224
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_system_global.h19
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c863
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h10
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c26
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h15
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c196
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c10
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h18
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h4
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c20
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c24
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c58
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c4
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c2
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c4
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c379
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_defs.h4
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.c64
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.c4
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h44
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c46
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.c25
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c547
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_properties.c10
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c39
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.h8
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_struct.h2
-rw-r--r--drivers/staging/media/atomisp/pci/system_global.h23
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c5
-rw-r--r--drivers/staging/media/hantro/hantro_g1_h264_dec.c26
-rw-r--r--drivers/staging/media/hantro/hantro_h264.c14
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h2
-rw-r--r--drivers/staging/media/hantro/hantro_postproc.c4
-rw-r--r--drivers/staging/media/imx/TODO4
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h14
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.c2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c274
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c8
-rw-r--r--drivers/staging/media/omap4iss/iss.c2
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig13
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/Makefile2
-rw-r--r--drivers/staging/media/phy-rockchip-dphy-rx0/TODO6
-rw-r--r--drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst23
-rw-r--r--drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst22
-rw-r--r--drivers/staging/media/rkisp1/TODO4
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-capture.c230
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-common.h281
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-dev.c17
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-isp.c59
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-params.c211
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-regs.h1
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-resizer.c94
-rw-r--r--drivers/staging/media/rkisp1/rkisp1-stats.c29
-rw-r--r--drivers/staging/media/rkisp1/uapi/rkisp1-config.h289
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c37
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c14
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.h1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c9
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c61
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c10
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c29
-rw-r--r--drivers/staging/media/tegra-vde/iommu.c4
-rw-r--r--drivers/staging/media/tegra-vde/vde.c4
-rw-r--r--drivers/staging/media/tegra-video/Kconfig7
-rw-r--r--drivers/staging/media/tegra-video/TODO6
-rw-r--r--drivers/staging/media/tegra-video/csi.c314
-rw-r--r--drivers/staging/media/tegra-video/csi.h8
-rw-r--r--drivers/staging/media/tegra-video/tegra210.c25
-rw-r--r--drivers/staging/media/tegra-video/vi.c850
-rw-r--r--drivers/staging/media/tegra-video/vi.h25
-rw-r--r--drivers/staging/media/tegra-video/video.c23
-rw-r--r--drivers/staging/media/usbvision/Kconfig18
-rw-r--r--drivers/staging/media/usbvision/Makefile4
-rw-r--r--drivers/staging/media/usbvision/TODO11
-rw-r--r--drivers/staging/media/usbvision/usbvision-cards.c1120
-rw-r--r--drivers/staging/media/usbvision/usbvision-cards.h70
-rw-r--r--drivers/staging/media/usbvision/usbvision-core.c2428
-rw-r--r--drivers/staging/media/usbvision/usbvision-i2c.c438
-rw-r--r--drivers/staging/media/usbvision/usbvision-video.c1643
-rw-r--r--drivers/staging/media/usbvision/usbvision.h500
-rw-r--r--drivers/staging/media/zoran/Kconfig76
-rw-r--r--drivers/staging/media/zoran/Makefile7
-rw-r--r--drivers/staging/media/zoran/TODO19
-rw-r--r--drivers/staging/media/zoran/videocodec.c330
-rw-r--r--drivers/staging/media/zoran/videocodec.h308
-rw-r--r--drivers/staging/media/zoran/zoran.h319
-rw-r--r--drivers/staging/media/zoran/zoran_card.c1333
-rw-r--r--drivers/staging/media/zoran/zoran_card.h30
-rw-r--r--drivers/staging/media/zoran/zoran_device.c1013
-rw-r--r--drivers/staging/media/zoran/zoran_device.h64
-rw-r--r--drivers/staging/media/zoran/zoran_driver.c1037
-rw-r--r--drivers/staging/media/zoran/zr36016.c433
-rw-r--r--drivers/staging/media/zoran/zr36016.h92
-rw-r--r--drivers/staging/media/zoran/zr36050.c842
-rw-r--r--drivers/staging/media/zoran/zr36050.h163
-rw-r--r--drivers/staging/media/zoran/zr36057.h154
-rw-r--r--drivers/staging/media/zoran/zr36060.c872
-rw-r--r--drivers/staging/media/zoran/zr36060.h201
-rw-r--r--drivers/staging/most/Kconfig2
-rw-r--r--drivers/staging/most/Makefile1
-rw-r--r--drivers/staging/most/cdev/Kconfig13
-rw-r--r--drivers/staging/most/cdev/Makefile4
-rw-r--r--drivers/staging/most/dim2/dim2.c6
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c6
-rw-r--r--drivers/staging/mt7621-pci/TODO2
-rw-r--r--drivers/staging/nvec/nvec.c2
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c3
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet-rx.c34
-rw-r--r--drivers/staging/octeon/ethernet.c9
-rw-r--r--drivers/staging/pi433/pi433_if.h26
-rw-r--r--drivers/staging/qlge/qlge.h23
-rw-r--r--drivers/staging/qlge/qlge_dbg.c28
-rw-r--r--drivers/staging/qlge/qlge_main.c22
-rw-r--r--drivers/staging/qlge/qlge_mpi.c15
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c78
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c8
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c58
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c790
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c92
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c65
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c49
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c25
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c1
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c3
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c13
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h10
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h3
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h62
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h34
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c329
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c46
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c10
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c52
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c8
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c2
-rw-r--r--drivers/staging/rtl8192e/Kconfig5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c27
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c70
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c72
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c2
-rw-r--r--drivers/staging/rtl8192u/Kconfig1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c81
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c64
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c6
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c9
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c12
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h1
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c41
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c5
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c6
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware_img.h3
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c30
-rw-r--r--drivers/staging/rtl8192u/r819xU_phyreg.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c19
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c11
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c20
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c34
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c11
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c16
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c74
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c11
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c11
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h87
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c3
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c42
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c24
-rw-r--r--drivers/staging/sm750fb/sm750.c3
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c2
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h11
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c24
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c1257
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c25
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h14
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h29
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c21
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6655/mac.h2
-rw-r--r--drivers/staging/vt6655/rxtx.c24
-rw-r--r--drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml125
-rw-r--r--drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt98
-rw-r--r--drivers/staging/wfx/TODO19
-rw-r--r--drivers/staging/wfx/bh.c75
-rw-r--r--drivers/staging/wfx/bh.h2
-rw-r--r--drivers/staging/wfx/bus.h2
-rw-r--r--drivers/staging/wfx/bus_sdio.c2
-rw-r--r--drivers/staging/wfx/bus_spi.c2
-rw-r--r--drivers/staging/wfx/data_rx.c11
-rw-r--r--drivers/staging/wfx/data_rx.h2
-rw-r--r--drivers/staging/wfx/data_tx.c74
-rw-r--r--drivers/staging/wfx/data_tx.h5
-rw-r--r--drivers/staging/wfx/debug.c27
-rw-r--r--drivers/staging/wfx/fwio.c4
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h256
-rw-r--r--drivers/staging/wfx/hif_api_general.h131
-rw-r--r--drivers/staging/wfx/hif_api_mib.h50
-rw-r--r--drivers/staging/wfx/hif_rx.c91
-rw-r--r--drivers/staging/wfx/hif_tx.c116
-rw-r--r--drivers/staging/wfx/hif_tx.h12
-rw-r--r--drivers/staging/wfx/hif_tx_mib.c124
-rw-r--r--drivers/staging/wfx/hif_tx_mib.h13
-rw-r--r--drivers/staging/wfx/hwio.c2
-rw-r--r--drivers/staging/wfx/hwio.h2
-rw-r--r--drivers/staging/wfx/key.c12
-rw-r--r--drivers/staging/wfx/key.h2
-rw-r--r--drivers/staging/wfx/main.c33
-rw-r--r--drivers/staging/wfx/main.h4
-rw-r--r--drivers/staging/wfx/queue.c16
-rw-r--r--drivers/staging/wfx/queue.h3
-rw-r--r--drivers/staging/wfx/scan.c6
-rw-r--r--drivers/staging/wfx/scan.h2
-rw-r--r--drivers/staging/wfx/secure_link.h59
-rw-r--r--drivers/staging/wfx/sta.c348
-rw-r--r--drivers/staging/wfx/sta.h4
-rw-r--r--drivers/staging/wfx/traces.h2
-rw-r--r--drivers/staging/wfx/wfx.h7
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c18
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c11
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c24
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c2
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/target_core_iblock.c5
-rw-r--r--drivers/target/target_core_rd.c2
-rw-r--r--drivers/target/target_core_user.c352
-rw-r--r--drivers/tee/optee/core.c7
-rw-r--r--drivers/tee/optee/optee_msg.h21
-rw-r--r--drivers/tee/optee/optee_private.h1
-rw-r--r--drivers/tee/optee/optee_smc.h3
-rw-r--r--drivers/tee/optee/rpc.c95
-rw-r--r--drivers/tee/tee_core.c92
-rw-r--r--drivers/tee/tee_shm.c32
-rw-r--r--drivers/thermal/Kconfig6
-rw-r--r--drivers/thermal/cpufreq_cooling.c8
-rw-r--r--drivers/thermal/cpuidle_cooling.c2
-rw-r--r--drivers/thermal/devfreq_cooling.c3
-rw-r--r--drivers/thermal/gov_power_allocator.c6
-rw-r--r--drivers/thermal/imx8mm_thermal.c10
-rw-r--r--drivers/thermal/imx_thermal.c22
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c51
-rw-r--r--drivers/thermal/rcar_thermal.c4
-rw-r--r--drivers/thermal/st/Kconfig2
-rw-r--r--drivers/thermal/st/stm_thermal.c7
-rw-r--r--drivers/thermal/sun8i_thermal.c16
-rw-r--r--drivers/thermal/thermal_core.c13
-rw-r--r--drivers/thermal/thermal_core.h6
-rw-r--r--drivers/thermal/thermal_netlink.c11
-rw-r--r--drivers/thermal/thermal_sysfs.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c54
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.h6
-rw-r--r--drivers/thunderbolt/Kconfig14
-rw-r--r--drivers/thunderbolt/Makefile4
-rw-r--r--drivers/thunderbolt/acpi.c117
-rw-r--r--drivers/thunderbolt/cap.c136
-rw-r--r--drivers/thunderbolt/ctl.c23
-rw-r--r--drivers/thunderbolt/debugfs.c702
-rw-r--r--drivers/thunderbolt/domain.c48
-rw-r--r--drivers/thunderbolt/icm.c7
-rw-r--r--drivers/thunderbolt/lc.c151
-rw-r--r--drivers/thunderbolt/nhi.c113
-rw-r--r--drivers/thunderbolt/nhi.h2
-rw-r--r--drivers/thunderbolt/nhi_ops.c31
-rw-r--r--drivers/thunderbolt/quirks.c2
-rw-r--r--drivers/thunderbolt/switch.c216
-rw-r--r--drivers/thunderbolt/tb.c207
-rw-r--r--drivers/thunderbolt/tb.h162
-rw-r--r--drivers/thunderbolt/tb_msgs.h1
-rw-r--r--drivers/thunderbolt/tb_regs.h34
-rw-r--r--drivers/thunderbolt/test.c13
-rw-r--r--drivers/thunderbolt/usb4.c255
-rw-r--r--drivers/thunderbolt/xdomain.c1
-rw-r--r--drivers/tty/hvc/Kconfig1
-rw-r--r--drivers/tty/hvc/hvcs.c14
-rw-r--r--drivers/tty/ipwireless/hardware.c6
-rw-r--r--drivers/tty/ipwireless/network.c4
-rw-r--r--drivers/tty/ipwireless/tty.c2
-rw-r--r--drivers/tty/n_gsm.c38
-rw-r--r--drivers/tty/n_hdlc.c72
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/pty.c4
-rw-r--r--drivers/tty/serial/21285.c12
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c12
-rw-r--r--drivers/tty/serial/8250/8250_dw.c54
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c110
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c20
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c3
-rw-r--r--drivers/tty/serial/8250/8250_pci.c64
-rw-r--r--drivers/tty/serial/8250/8250_port.c5
-rw-r--r--drivers/tty/serial/Kconfig5
-rw-r--r--drivers/tty/serial/amba-pl011.c11
-rw-r--r--drivers/tty/serial/atmel_serial.c20
-rw-r--r--drivers/tty/serial/earlycon.c9
-rw-r--r--drivers/tty/serial/fsl_lpuart.c90
-rw-r--r--drivers/tty/serial/icom.c32
-rw-r--r--drivers/tty/serial/ifx6x60.c15
-rw-r--r--drivers/tty/serial/imx.c14
-rw-r--r--drivers/tty/serial/max310x.c29
-rw-r--r--drivers/tty/serial/mcf.c1
-rw-r--r--drivers/tty/serial/men_z135_uart.c8
-rw-r--r--drivers/tty/serial/mvebu-uart.c7
-rw-r--r--drivers/tty/serial/pch_uart.c2
-rw-r--r--drivers/tty/serial/pmac_zilog.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c19
-rw-r--r--drivers/tty/serial/sa1100.c22
-rw-r--r--drivers/tty/serial/sb1250-duart.c9
-rw-r--r--drivers/tty/serial/sc16is7xx.c1
-rw-r--r--drivers/tty/serial/serial_core.c30
-rw-r--r--drivers/tty/serial/serial_txx9.c3
-rw-r--r--drivers/tty/serial/stm32-usart.c33
-rw-r--r--drivers/tty/serial/timbuart.c6
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/synclink.c82
-rw-r--r--drivers/tty/synclink_gt.c95
-rw-r--r--drivers/tty/synclinkmp.c83
-rw-r--r--drivers/tty/sysrq.c49
-rw-r--r--drivers/tty/tty_baudrate.c6
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/tty/tty_io.c32
-rw-r--r--drivers/tty/tty_jobctrl.c4
-rw-r--r--drivers/tty/tty_ldisc.c3
-rw-r--r--drivers/tty/vt/consolemap.c4
-rw-r--r--drivers/tty/vt/keyboard.c39
-rw-r--r--drivers/tty/vt/selection.c2
-rw-r--r--drivers/tty/vt/vc_screen.c532
-rw-r--r--drivers/tty/vt/vt.c66
-rw-r--r--drivers/tty/vt/vt_ioctl.c107
-rw-r--r--drivers/uio/uio.c12
-rw-r--r--drivers/usb/atm/cxacru.c27
-rw-r--r--drivers/usb/atm/usbatm.c14
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c7
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c191
-rw-r--r--drivers/usb/cdns3/core.c202
-rw-r--r--drivers/usb/cdns3/core.h17
-rw-r--r--drivers/usb/cdns3/drd.c20
-rw-r--r--drivers/usb/cdns3/drd.h5
-rw-r--r--drivers/usb/cdns3/ep0.c75
-rw-r--r--drivers/usb/cdns3/gadget.c391
-rw-r--r--drivers/usb/cdns3/gadget.h16
-rw-r--r--drivers/usb/cdns3/host.c7
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c13
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h2
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c21
-rw-r--r--drivers/usb/class/cdc-acm.c100
-rw-r--r--drivers/usb/class/cdc-acm.h22
-rw-r--r--drivers/usb/class/cdc-wdm.c72
-rw-r--r--drivers/usb/common/usb-conn-gpio.c32
-rw-r--r--drivers/usb/core/Kconfig14
-rw-r--r--drivers/usb/core/devices.c41
-rw-r--r--drivers/usb/core/driver.c38
-rw-r--r--drivers/usb/core/generic.c8
-rw-r--r--drivers/usb/core/hcd.c6
-rw-r--r--drivers/usb/core/hub.c62
-rw-r--r--drivers/usb/core/message.c186
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/urb.c120
-rw-r--r--drivers/usb/core/usb.c12
-rw-r--r--drivers/usb/core/usb.h3
-rw-r--r--drivers/usb/dwc2/Kconfig1
-rw-r--r--drivers/usb/dwc2/Makefile2
-rw-r--r--drivers/usb/dwc2/core.h9
-rw-r--r--drivers/usb/dwc2/drd.c180
-rw-r--r--drivers/usb/dwc2/gadget.c42
-rw-r--r--drivers/usb/dwc2/params.c5
-rw-r--r--drivers/usb/dwc2/platform.c46
-rw-r--r--drivers/usb/dwc3/core.c92
-rw-r--r--drivers/usb/dwc3/core.h49
-rw-r--r--drivers/usb/dwc3/debug.h8
-rw-r--r--drivers/usb/dwc3/debugfs.c59
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c41
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c2
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c7
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c128
-rw-r--r--drivers/usb/dwc3/ep0.c62
-rw-r--r--drivers/usb/dwc3/gadget.c593
-rw-r--r--drivers/usb/dwc3/gadget.h3
-rw-r--r--drivers/usb/dwc3/trace.h17
-rw-r--r--drivers/usb/dwc3/ulpi.c2
-rw-r--r--drivers/usb/early/ehci-dbgp.c15
-rw-r--r--drivers/usb/early/xhci-dbc.c14
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/function/f_acm.c8
-rw-r--r--drivers/usb/gadget/function/f_midi.c6
-rw-r--r--drivers/usb/gadget/function/f_ncm.c10
-rw-r--r--drivers/usb/gadget/function/f_printer.c16
-rw-r--r--drivers/usb/gadget/function/f_tcm.c12
-rw-r--r--drivers/usb/gadget/function/f_uvc.c8
-rw-r--r--drivers/usb/gadget/function/u_ether.c2
-rw-r--r--drivers/usb/gadget/function/u_serial.c1
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c5
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c10
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h3
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c68
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h3
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c1
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c9
-rw-r--r--drivers/usb/gadget/udc/core.c82
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c7
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c11
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c2
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c17
-rw-r--r--drivers/usb/gadget/udc/net2272.c24
-rw-r--r--drivers/usb/gadget/udc/net2272.h1
-rw-r--r--drivers/usb/gadget/udc/net2280.c32
-rw-r--r--drivers/usb/gadget/udc/net2280.h1
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c55
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c55
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c34
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.h1
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc_regs.h146
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c60
-rw-r--r--drivers/usb/host/bcma-hcd.c13
-rw-r--r--drivers/usb/host/ehci-npcm7xx.c8
-rw-r--r--drivers/usb/host/ehci-platform.c16
-rw-r--r--drivers/usb/host/ehci-sched.c20
-rw-r--r--drivers/usb/host/ehci-spear.c8
-rw-r--r--drivers/usb/host/ehci-tegra.c4
-rw-r--r--drivers/usb/host/fotg210-hcd.c20
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c9
-rw-r--r--drivers/usb/host/ohci-hcd.c18
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c21
-rw-r--r--drivers/usb/host/pci-quirks.c35
-rw-r--r--drivers/usb/host/xhci-dbgtty.c6
-rw-r--r--drivers/usb/host/xhci-debugfs.c109
-rw-r--r--drivers/usb/host/xhci-debugfs.h10
-rw-r--r--drivers/usb/host/xhci-histb.c2
-rw-r--r--drivers/usb/host/xhci-mem.c4
-rw-r--r--drivers/usb/host/xhci-mtk.c6
-rw-r--r--drivers/usb/host/xhci-pci.c27
-rw-r--r--drivers/usb/host/xhci-plat.c44
-rw-r--r--drivers/usb/host/xhci-plat.h1
-rw-r--r--drivers/usb/host/xhci-rcar.c43
-rw-r--r--drivers/usb/host/xhci-ring.c23
-rw-r--r--drivers/usb/host/xhci-tegra.c1
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/image/microtek.c14
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c20
-rw-r--r--drivers/usb/misc/adutux.c1
-rw-r--r--drivers/usb/misc/apple-mfi-fastcharge.c21
-rw-r--r--drivers/usb/misc/appledisplay.c14
-rw-r--r--drivers/usb/misc/legousbtower.c61
-rw-r--r--drivers/usb/misc/usb3503.c18
-rw-r--r--drivers/usb/misc/usb4604.c8
-rw-r--r--drivers/usb/misc/usblcd.c1
-rw-r--r--drivers/usb/misc/yurex.c6
-rw-r--r--drivers/usb/mtu3/mtu3.h6
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c1
-rw-r--r--drivers/usb/musb/musb_dsps.c4
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c2
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c2
-rw-r--r--drivers/usb/phy/phy-mv-usb.c18
-rw-r--r--drivers/usb/phy/phy-ulpi-viewport.c12
-rw-r--r--drivers/usb/roles/class.c12
-rw-r--r--drivers/usb/serial/cyberjack.c7
-rw-r--r--drivers/usb/serial/ftdi_sio.c37
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/mos7720.c8
-rw-r--r--drivers/usb/serial/option.c15
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/qcserial.c4
-rw-r--r--drivers/usb/storage/isd200.c2
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/usb/storage/uas.c31
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/usb/typec/Kconfig24
-rw-r--r--drivers/usb/typec/Makefile2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c2
-rw-r--r--drivers/usb/typec/class.c15
-rw-r--r--drivers/usb/typec/hd3ss3220.c18
-rw-r--r--drivers/usb/typec/mux.c21
-rw-r--r--drivers/usb/typec/mux/Kconfig1
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c207
-rw-r--r--drivers/usb/typec/qcom-pmic-typec.c262
-rw-r--r--drivers/usb/typec/stusb160x.c873
-rw-r--r--drivers/usb/typec/tcpm/Kconfig14
-rw-r--r--drivers/usb/typec/tcpm/Makefile14
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c113
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h25
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.c503
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6360.c212
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c368
-rw-r--r--drivers/usb/typec/ucsi/psy.c9
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h2
-rw-r--r--drivers/usb/usbip/usbip_common.c8
-rw-r--r--drivers/usb/usbip/vhci_hcd.c8
-rw-r--r--drivers/vdpa/Kconfig1
-rw-r--r--drivers/vdpa/mlx5/core/mr.c5
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c12
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c35
-rw-r--r--drivers/vfio/Kconfig1
-rw-r--r--drivers/vfio/Makefile1
-rw-r--r--drivers/vfio/fsl-mc/Kconfig9
-rw-r--r--drivers/vfio/fsl-mc/Makefile4
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c687
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c194
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_private.h55
-rw-r--r--drivers/vfio/pci/Kconfig12
-rw-r--r--drivers/vfio/pci/Makefile1
-rw-r--r--drivers/vfio/pci/vfio_pci.c78
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c27
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h12
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c43
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c143
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c3
-rw-r--r--drivers/vfio/vfio.c9
-rw-r--r--drivers/vfio/vfio_iommu_type1.c42
-rw-r--r--drivers/vhost/scsi.c397
-rw-r--r--drivers/vhost/vdpa.c182
-rw-r--r--drivers/vhost/vhost.c11
-rw-r--r--drivers/vhost/vhost.h3
-rw-r--r--drivers/vhost/vringh.c9
-rw-r--r--drivers/video/backlight/Kconfig8
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/ktd253-backlight.c198
-rw-r--r--drivers/video/backlight/sky81452-backlight.c1
-rw-r--r--drivers/video/backlight/tosa_bl.c2
-rw-r--r--drivers/video/backlight/tosa_lcd.c2
-rw-r--r--drivers/video/console/Kconfig1
-rw-r--r--drivers/video/console/newport_con.c22
-rw-r--r--drivers/video/console/sticon.c304
-rw-r--r--drivers/video/console/sticore.c284
-rw-r--r--drivers/video/fbdev/Kconfig19
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/arcfb.c2
-rw-r--r--drivers/video/fbdev/arkfb.c41
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c2
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c51
-rw-r--r--drivers/video/fbdev/aty/atyfb.h4
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c50
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c12
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c38
-rw-r--r--drivers/video/fbdev/aty/radeonfb.h3
-rw-r--r--drivers/video/fbdev/core/fbcon.c12
-rw-r--r--drivers/video/fbdev/core/fbmem.c14
-rw-r--r--drivers/video/fbdev/cyber2000fb.c13
-rw-r--r--drivers/video/fbdev/geode/gxfb.h5
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c36
-rw-r--r--drivers/video/fbdev/geode/lxfb.h5
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c37
-rw-r--r--drivers/video/fbdev/geode/lxfb_ops.c4
-rw-r--r--drivers/video/fbdev/geode/suspend_gx.c4
-rw-r--r--drivers/video/fbdev/hyperv_fb.c10
-rw-r--r--drivers/video/fbdev/i740fb.c40
-rw-r--r--drivers/video/fbdev/kyro/STG4000InitDevice.c7
-rw-r--r--drivers/video/fbdev/mbx/Makefile4
-rw-r--r--drivers/video/fbdev/mbx/mbxdebugfs.c232
-rw-r--r--drivers/video/fbdev/mbx/mbxfb.c1053
-rw-r--r--drivers/video/fbdev/mbx/reg_bits.h614
-rw-r--r--drivers/video/fbdev/mbx/regs.h196
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c64
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c2
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/video/fbdev/s3c2410fb-regs-lcd.h143
-rw-r--r--drivers/video/fbdev/s3c2410fb.c16
-rw-r--r--drivers/video/fbdev/s3fb.c39
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c53
-rw-r--r--drivers/video/fbdev/sis/init.c11
-rw-r--r--drivers/video/fbdev/sm712fb.c8
-rw-r--r--drivers/video/fbdev/ssd1307fb.c8
-rw-r--r--drivers/video/fbdev/sstfb.c2
-rw-r--r--drivers/video/fbdev/sticore.h27
-rw-r--r--drivers/video/fbdev/tgafb.c12
-rw-r--r--drivers/video/fbdev/udlfb.c4
-rw-r--r--drivers/video/fbdev/vga16fb.c14
-rw-r--r--drivers/video/fbdev/via/via-core.c39
-rw-r--r--drivers/video/fbdev/vt8623fb.c41
-rw-r--r--drivers/virt/Kconfig2
-rw-r--r--drivers/virt/Makefile2
-rw-r--r--drivers/virt/fsl_hypervisor.c17
-rw-r--r--drivers/virt/nitro_enclaves/Kconfig20
-rw-r--r--drivers/virt/nitro_enclaves/Makefile9
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev.c1731
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev.h109
-rw-r--r--drivers/virt/nitro_enclaves/ne_pci_dev.c625
-rw-r--r--drivers/virt/nitro_enclaves/ne_pci_dev.h327
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c9
-rw-r--r--drivers/virtio/Kconfig13
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio.c21
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_dma_buf.c88
-rw-r--r--drivers/virtio/virtio_input.c2
-rw-r--r--drivers/virtio/virtio_mem.c52
-rw-r--r--drivers/virtio/virtio_mmio.c31
-rw-r--r--drivers/virtio/virtio_pci_modern.c95
-rw-r--r--drivers/w1/masters/mxc_w1.c14
-rw-r--r--drivers/w1/slaves/w1_ds2405.c2
-rw-r--r--drivers/w1/slaves/w1_ds2406.c2
-rw-r--r--drivers/w1/slaves/w1_ds2408.c2
-rw-r--r--drivers/w1/slaves/w1_ds2413.c2
-rw-r--r--drivers/w1/slaves/w1_ds2423.c2
-rw-r--r--drivers/w1/slaves/w1_ds2430.c2
-rw-r--r--drivers/w1/slaves/w1_ds2431.c2
-rw-r--r--drivers/w1/slaves/w1_ds2433.c2
-rw-r--r--drivers/w1/slaves/w1_ds2438.c2
-rw-r--r--drivers/w1/slaves/w1_ds250x.c2
-rw-r--r--drivers/w1/slaves/w1_ds2780.c2
-rw-r--r--drivers/w1/slaves/w1_ds2781.c2
-rw-r--r--drivers/w1/slaves/w1_ds2805.c2
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c2
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c2
-rw-r--r--drivers/w1/slaves/w1_therm.c459
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/watchdog/Kconfig29
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/cadence_wdt.c9
-rw-r--r--drivers/watchdog/davinci_wdt.c9
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c79
-rw-r--r--drivers/watchdog/it87_wdt.c6
-rw-r--r--drivers/watchdog/pcwd_usb.c2
-rw-r--r--drivers/watchdog/rdc321x_wdt.c5
-rw-r--r--drivers/watchdog/renesas_wdt.c12
-rw-r--r--drivers/watchdog/rti_wdt.c14
-rw-r--r--drivers/watchdog/sl28cpld_wdt.c229
-rw-r--r--drivers/watchdog/sp5100_tco.c18
-rw-r--r--drivers/watchdog/sp5100_tco.h2
-rw-r--r--drivers/watchdog/visconti_wdt.c195
-rw-r--r--drivers/watchdog/watchdog_dev.c8
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/events/events_2l.c16
-rw-r--r--drivers/xen/events/events_base.c477
-rw-r--r--drivers/xen/events/events_fifo.c92
-rw-r--r--drivers/xen/events/events_internal.h76
-rw-r--r--drivers/xen/evtchn.c7
-rw-r--r--drivers/xen/gntdev-dmabuf.c13
-rw-r--r--drivers/xen/gntdev.c17
-rw-r--r--drivers/xen/pvcalls-back.c76
-rw-r--r--drivers/xen/pvcalls-front.c2
-rw-r--r--drivers/xen/swiotlb-xen.c7
-rw-r--r--drivers/xen/unpopulated-alloc.c45
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c13
-rw-r--r--drivers/xen/xen-pciback/pciback.h12
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c48
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xen-scsiback.c23
-rw-r--r--drivers/xen/xenbus/xenbus_client.c30
-rw-r--r--drivers/zorro/zorro.c2
6556 files changed, 296423 insertions, 146402 deletions
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index ddfd12afe3b9..48019660a096 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -257,7 +257,7 @@ static struct notifier_block vt_notifier_block = {
static unsigned char get_attributes(struct vc_data *vc, u16 *pos)
{
- pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
+ pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, true);
return (scr_readw(pos) & ~vc->vc_hi_font_mask) >> 8;
}
@@ -357,7 +357,6 @@ static void speakup_cut(struct vc_data *vc)
mark_cut_flag = 0;
synth_printf("%s\n", spk_msg_get(MSG_CUT));
- speakup_clear_selection();
ret = speakup_set_selection(tty);
switch (ret) {
@@ -465,7 +464,7 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
u16 w;
u16 c;
- pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
+ pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, true);
w = scr_readw(pos);
c = w & 0xff;
diff --git a/drivers/accessibility/speakup/selection.c b/drivers/accessibility/speakup/selection.c
index 032f3264fba1..7df7afad5ab4 100644
--- a/drivers/accessibility/speakup/selection.c
+++ b/drivers/accessibility/speakup/selection.c
@@ -22,13 +22,6 @@ struct speakup_selection_work {
struct tty_struct *tty;
};
-void speakup_clear_selection(void)
-{
- console_lock();
- clear_selection();
- console_unlock();
-}
-
static void __speakup_set_selection(struct work_struct *work)
{
struct speakup_selection_work *ssw =
@@ -51,6 +44,10 @@ static void __speakup_set_selection(struct work_struct *work)
goto unref;
}
+ console_lock();
+ clear_selection();
+ console_unlock();
+
set_selection_kernel(&sel, tty);
unref:
diff --git a/drivers/accessibility/speakup/speakup.h b/drivers/accessibility/speakup/speakup.h
index 74fe49c2c511..33594f5a7983 100644
--- a/drivers/accessibility/speakup/speakup.h
+++ b/drivers/accessibility/speakup/speakup.h
@@ -70,7 +70,6 @@ void spk_do_flush(void);
void speakup_start_ttys(void);
void synth_buffer_add(u16 ch);
void synth_buffer_clear(void);
-void speakup_clear_selection(void);
int speakup_set_selection(struct tty_struct *tty);
void speakup_cancel_selection(void);
int speakup_paste_selection(struct tty_struct *tty);
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index a831ff64f8ba..ecc39983e946 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -298,11 +298,13 @@ static unsigned char ttyio_in(int timeout)
struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data;
char rv;
- if (wait_for_completion_timeout(&ldisc_data->completion,
+ if (!timeout) {
+ if (!try_wait_for_completion(&ldisc_data->completion))
+ return 0xff;
+ } else if (wait_for_completion_timeout(&ldisc_data->completion,
usecs_to_jiffies(timeout)) == 0) {
- if (timeout)
- pr_warn("spk_ttyio: timeout (%d) while waiting for input\n",
- timeout);
+ pr_warn("spk_ttyio: timeout (%d) while waiting for input\n",
+ timeout);
return 0xff;
}
diff --git a/drivers/accessibility/speakup/spk_types.h b/drivers/accessibility/speakup/spk_types.h
index 7398f1196e10..91fca3033a45 100644
--- a/drivers/accessibility/speakup/spk_types.h
+++ b/drivers/accessibility/speakup/spk_types.h
@@ -32,6 +32,10 @@ enum {
E_NEW_DEFAULT,
};
+/*
+ * Note: add new members at the end, speakupmap.h depends on the values of the
+ * enum starting from SPELL_DELAY (see inc_dec_var)
+ */
enum var_id_t {
VERSION = 0, SYNTH, SILENT, SYNTH_DIRECT,
KEYMAP, CHARS,
@@ -42,9 +46,9 @@ enum var_id_t {
SAY_CONTROL, SAY_WORD_CTL, NO_INTERRUPT, KEY_ECHO,
SPELL_DELAY, PUNC_LEVEL, READING_PUNC,
ATTRIB_BLEEP, BLEEPS,
- RATE, PITCH, INFLECTION, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG,
+ RATE, PITCH, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG,
DIRECT, PAUSE,
- CAPS_START, CAPS_STOP, CHARTAB,
+ CAPS_START, CAPS_STOP, CHARTAB, INFLECTION,
MAXVARS
};
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7540a5179a47..edf1558c1105 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -504,55 +504,6 @@ config ACPI_EXTLOG
config ACPI_ADXL
bool
-menuconfig PMIC_OPREGION
- bool "PMIC (Power Management Integrated Circuit) operation region support"
- help
- Select this option to enable support for ACPI operation
- region of the PMIC chip. The operation region can be used
- to control power rails and sensor reading/writing on the
- PMIC chip.
-
-if PMIC_OPREGION
-config BYTCRC_PMIC_OPREGION
- bool "ACPI operation region support for Bay Trail Crystal Cove PMIC"
- depends on INTEL_SOC_PMIC
- help
- This config adds ACPI operation region support for the Bay Trail
- version of the Crystal Cove PMIC.
-
-config CHTCRC_PMIC_OPREGION
- bool "ACPI operation region support for Cherry Trail Crystal Cove PMIC"
- depends on INTEL_SOC_PMIC
- help
- This config adds ACPI operation region support for the Cherry Trail
- version of the Crystal Cove PMIC.
-
-config XPOWER_PMIC_OPREGION
- bool "ACPI operation region support for XPower AXP288 PMIC"
- depends on MFD_AXP20X_I2C && IOSF_MBI=y
- help
- This config adds ACPI operation region support for XPower AXP288 PMIC.
-
-config BXT_WC_PMIC_OPREGION
- bool "ACPI operation region support for BXT WhiskeyCove PMIC"
- depends on INTEL_SOC_PMIC_BXTWC
- help
- This config adds ACPI operation region support for BXT WhiskeyCove PMIC.
-
-config CHT_WC_PMIC_OPREGION
- bool "ACPI operation region support for CHT Whiskey Cove PMIC"
- depends on INTEL_SOC_PMIC_CHTWC
- help
- This config adds ACPI operation region support for CHT Whiskey Cove PMIC.
-
-config CHT_DC_TI_PMIC_OPREGION
- bool "ACPI operation region support for Dollar Cove TI PMIC"
- depends on INTEL_SOC_PMIC_CHTDC_TI
- help
- This config adds ACPI operation region support for Dollar Cove TI PMIC.
-
-endif
-
config ACPI_CONFIGFS
tristate "ACPI configfs support"
select CONFIGFS_FS
@@ -568,21 +519,7 @@ config ACPI_PPTT
bool
endif
-config TPS68470_PMIC_OPREGION
- bool "ACPI operation region support for TPS68470 PMIC"
- depends on MFD_TPS68470
- help
- This config adds ACPI operation region support for TI TPS68470 PMIC.
- TPS68470 device is an advanced power management unit that powers
- a Compact Camera Module (CCM), generates clocks for image sensors,
- drives a dual LED for flash and incorporates two LED drivers for
- general purpose indicators.
- This driver enables ACPI operation region support control voltage
- regulators and clocks.
-
- This option is a bool as it provides an ACPI operation
- region, which must be available before any of the devices
- using this, are probed.
+source "drivers/acpi/pmic/Kconfig"
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 9a957544e357..44e412506317 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -107,17 +107,9 @@ obj-$(CONFIG_ACPI_APEI) += apei/
obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o
-obj-$(CONFIG_PMIC_OPREGION) += pmic/intel_pmic.o
-obj-$(CONFIG_BYTCRC_PMIC_OPREGION) += pmic/intel_pmic_bytcrc.o
-obj-$(CONFIG_CHTCRC_PMIC_OPREGION) += pmic/intel_pmic_chtcrc.o
-obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o
-obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += pmic/intel_pmic_bxtwc.o
-obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += pmic/intel_pmic_chtwc.o
-obj-$(CONFIG_CHT_DC_TI_PMIC_OPREGION) += pmic/intel_pmic_chtdc_ti.o
-
obj-$(CONFIG_ACPI_CONFIGFS) += acpi_configfs.o
-obj-$(CONFIG_TPS68470_PMIC_OPREGION) += pmic/tps68470_pmic.o
+obj-y += pmic/
video-objs += acpi_video.o video_detect.o
obj-y += dptf/
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 806b8ce05624..39359ce0eb2c 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -7,39 +7,28 @@
* Wu, Jeff <Jeff.Wu@amd.com>
*/
-#include <linux/clk-provider.h>
-#include <linux/platform_data/clk-fch.h>
-#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
-#include <linux/clkdev.h>
#include <linux/acpi.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/pm.h>
+#include <linux/platform_data/clk-fch.h>
+#include <linux/platform_device.h>
#include "internal.h"
-ACPI_MODULE_NAME("acpi_apd");
struct apd_private_data;
/**
- * ACPI_APD_SYSFS : add device attributes in sysfs
- * ACPI_APD_PM : attach power domain to device
- */
-#define ACPI_APD_SYSFS BIT(0)
-#define ACPI_APD_PM BIT(1)
-
-/**
* struct apd_device_desc - a descriptor for apd device
- * @flags: device flags like %ACPI_APD_SYSFS, %ACPI_APD_PM
* @fixed_clk_rate: fixed rate input clock source for acpi device;
* 0 means no fixed rate input clock source
+ * @properties: build-in properties of the device such as UART
* @setup: a hook routine to set device resource during create platform device
*
* Device description defined as acpi_device_id.driver_data
*/
struct apd_device_desc {
- unsigned int flags;
unsigned int fixed_clk_rate;
struct property_entry *properties;
int (*setup)(struct apd_private_data *pdata);
@@ -71,7 +60,6 @@ static int acpi_apd_setup(struct apd_private_data *pdata)
}
#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
-
static int misc_check_res(struct acpi_resource *ares, void *data)
{
struct resource res;
@@ -142,7 +130,7 @@ static const struct apd_device_desc cz_uart_desc = {
static const struct apd_device_desc fch_misc_desc = {
.setup = fch_misc_setup,
};
-#endif
+#endif /* CONFIG_X86_AMD_PLATFORM_DEVICE */
#ifdef CONFIG_ARM64
static const struct apd_device_desc xgene_i2c_desc = {
@@ -184,13 +172,9 @@ static const struct apd_device_desc hip08_spi_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 250000000,
};
-#endif
+#endif /* CONFIG_ARM64 */
-#else
-
-#define APD_ADDR(desc) (0UL)
-
-#endif /* CONFIG_X86_AMD_PLATFORM_DEVICE */
+#endif
/**
* Create platform device during acpi scan attach handle.
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
index 33ac6cb428fe..67f1d33d15c4 100644
--- a/drivers/acpi/acpi_cmos_rtc.c
+++ b/drivers/acpi/acpi_cmos_rtc.c
@@ -15,8 +15,6 @@
#include "internal.h"
-ACPI_MODULE_NAME("cmos rtc");
-
static const struct acpi_device_id acpi_cmos_rtc_ids[] = {
{ "PNP0B00" },
{ "PNP0B01" },
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index 88c8af455ea3..cf91f49101ea 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -228,6 +228,7 @@ static void acpi_table_drop_item(struct config_group *group,
ACPI_INFO(("Host-directed Dynamic ACPI Table Unload"));
acpi_unload_table(table->index);
+ config_item_put(cfg);
}
static struct configfs_group_operations acpi_table_group_ops = {
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 6041974c7627..fb7290338593 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -749,6 +749,9 @@ static int __init acpi_aml_init(void)
{
int ret;
+ if (acpi_disabled)
+ return -ENODEV;
+
/* Initialize AML IO interface */
mutex_init(&acpi_aml_io.lock);
init_waitqueue_head(&acpi_aml_io.wait);
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index f138e12b7b82..72f1fb77abcd 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -222,9 +222,9 @@ static int __init extlog_init(void)
u64 cap;
int rc;
- rdmsrl(MSR_IA32_MCG_CAP, cap);
-
- if (!(cap & MCG_ELOG_P) || !extlog_get_l1addr())
+ if (rdmsrl_safe(MSR_IA32_MCG_CAP, &cap) ||
+ !(cap & MCG_ELOG_P) ||
+ !extlog_get_l1addr())
return -ENODEV;
rc = -EINVAL;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 5e2bfbcf526f..be73974ce449 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -26,8 +26,6 @@
#include "internal.h"
-ACPI_MODULE_NAME("acpi_lpss");
-
#ifdef CONFIG_X86_INTEL_LPSS
#include <asm/cpu_device_id.h>
@@ -67,7 +65,15 @@ ACPI_MODULE_NAME("acpi_lpss");
#define LPSS_CLK_DIVIDER BIT(2)
#define LPSS_LTR BIT(3)
#define LPSS_SAVE_CTX BIT(4)
-#define LPSS_NO_D3_DELAY BIT(5)
+/*
+ * For some devices the DSDT AML code for another device turns off the device
+ * before our suspend handler runs, causing us to read/save all 1-s (0xffffffff)
+ * as ctx register values.
+ * Luckily these devices always use the same ctx register values, so we can
+ * work around this by saving the ctx registers once on activation.
+ */
+#define LPSS_SAVE_CTX_ONCE BIT(5)
+#define LPSS_NO_D3_DELAY BIT(6)
struct lpss_private_data;
@@ -254,9 +260,10 @@ static const struct lpss_device_desc byt_pwm_dev_desc = {
};
static const struct lpss_device_desc bsw_pwm_dev_desc = {
- .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
+ .flags = LPSS_SAVE_CTX_ONCE | LPSS_NO_D3_DELAY,
.prv_offset = 0x800,
.setup = bsw_pwm_setup,
+ .resume_from_noirq = true,
};
static const struct lpss_device_desc byt_uart_dev_desc = {
@@ -884,9 +891,14 @@ static int acpi_lpss_activate(struct device *dev)
* we have to deassert reset line to be sure that ->probe() will
* recognize the device.
*/
- if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
+ if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
lpss_deassert_reset(pdata);
+#ifdef CONFIG_PM
+ if (pdata->dev_desc->flags & LPSS_SAVE_CTX_ONCE)
+ acpi_lpss_save_ctx(dev, pdata);
+#endif
+
return 0;
}
@@ -1030,7 +1042,7 @@ static int acpi_lpss_resume(struct device *dev)
acpi_lpss_d3_to_d0_delay(pdata);
- if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
+ if (pdata->dev_desc->flags & (LPSS_SAVE_CTX | LPSS_SAVE_CTX_ONCE))
acpi_lpss_restore_ctx(dev, pdata);
return 0;
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index e294f44a7850..b02fd51e5589 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -22,13 +22,6 @@
#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
-#define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT
-
-#undef PREFIX
-#define PREFIX "ACPI:memory_hp:"
-
-ACPI_MODULE_NAME("acpi_memhotplug");
-
static const struct acpi_device_id memory_device_ids[] = {
{ACPI_MEMORY_DEVICE_HID, 0},
{"", 0},
@@ -36,11 +29,6 @@ static const struct acpi_device_id memory_device_ids[] = {
#ifdef CONFIG_ACPI_HOTPLUG_MEMORY
-/* Memory Device States */
-#define MEMORY_INVALID_STATE 0
-#define MEMORY_POWER_ON_STATE 1
-#define MEMORY_POWER_OFF_STATE 2
-
static int acpi_memory_device_add(struct acpi_device *device,
const struct acpi_device_id *not_used);
static void acpi_memory_device_remove(struct acpi_device *device);
@@ -64,8 +52,7 @@ struct acpi_memory_info {
};
struct acpi_memory_device {
- struct acpi_device * device;
- unsigned int state; /* State of the memory device */
+ struct acpi_device *device;
struct list_head res_list;
};
@@ -207,7 +194,8 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
if (node < 0)
node = memory_add_physaddr_to_nid(info->start_addr);
- result = __add_memory(node, info->start_addr, info->length);
+ result = __add_memory(node, info->start_addr, info->length,
+ MHP_NONE);
/*
* If the memory block has been used by the kernel, add_memory()
@@ -233,7 +221,6 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
}
if (!num_enabled) {
dev_err(&mem_device->device->dev, "add_memory failed\n");
- mem_device->state = MEMORY_INVALID_STATE;
return -EINVAL;
}
/*
@@ -304,9 +291,6 @@ static int acpi_memory_device_add(struct acpi_device *device,
return result;
}
- /* Set the device state */
- mem_device->state = MEMORY_POWER_ON_STATE;
-
result = acpi_memory_check_device(mem_device);
if (result) {
acpi_memory_device_free(mem_device);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index c05050f474cd..78d621290a35 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -19,8 +19,6 @@
#include "internal.h"
-ACPI_MODULE_NAME("platform");
-
static const struct acpi_device_id forbidden_id_list[] = {
{"PNP0000", 0}, /* PIC */
{"PNP0100", 0}, /* Timer */
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index f3039b93ff61..4ed755a963aa 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -11,6 +11,8 @@
#include <linux/module.h>
#include <linux/ctype.h>
+#include "internal.h"
+
static const struct acpi_device_id acpi_pnp_device_ids[] = {
/* pata_isapnp */
{"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index b51ddf3bb616..2ee5e05a0d69 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -264,7 +264,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
} else {
/*
* Declared with "Device" statement; match _UID.
- * Note that we don't handle string _UIDs yet.
*/
status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
NULL, &value);
@@ -798,22 +797,34 @@ int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
memset(&cx, 0, sizeof(cx));
element = &cst->package.elements[i];
- if (element->type != ACPI_TYPE_PACKAGE)
+ if (element->type != ACPI_TYPE_PACKAGE) {
+ acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n",
+ i, element->type);
continue;
+ }
- if (element->package.count != 4)
+ if (element->package.count != 4) {
+ acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n",
+ i, element->package.count);
continue;
+ }
obj = &element->package.elements[0];
- if (obj->type != ACPI_TYPE_BUFFER)
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n",
+ i, obj->type);
continue;
+ }
reg = (struct acpi_power_register *)obj->buffer.pointer;
obj = &element->package.elements[1];
- if (obj->type != ACPI_TYPE_INTEGER)
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n",
+ i, obj->type);
continue;
+ }
cx.type = obj->integer.value;
/*
@@ -850,6 +861,8 @@ int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
cx.entry_method = ACPI_CSTATE_HALT;
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
} else {
+ acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n",
+ i);
continue;
}
} else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
@@ -857,6 +870,8 @@ int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
cx.address);
} else {
+ acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n",
+ i, reg->space_id);
continue;
}
@@ -864,14 +879,20 @@ int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
cx.valid = 1;
obj = &element->package.elements[2];
- if (obj->type != ACPI_TYPE_INTEGER)
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n",
+ i, obj->type);
continue;
+ }
cx.latency = obj->integer.value;
obj = &element->package.elements[3];
- if (obj->type != ACPI_TYPE_INTEGER)
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n",
+ i, obj->type);
continue;
+ }
memcpy(&info->states[++last_index], &cx, sizeof(cx));
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index bc96457c9e25..a322a7bd286b 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -578,7 +578,7 @@ acpi_video_bqc_value_to_level(struct acpi_video_device *device,
ACPI_VIDEO_FIRST_LEVEL - 1 - bqc_value;
level = device->brightness->levels[bqc_value +
- ACPI_VIDEO_FIRST_LEVEL];
+ ACPI_VIDEO_FIRST_LEVEL];
} else {
level = bqc_value;
}
@@ -990,8 +990,8 @@ set_level:
goto out_free_levels;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "found %d brightness levels\n",
- br->count - ACPI_VIDEO_FIRST_LEVEL));
+ "found %d brightness levels\n",
+ br->count - ACPI_VIDEO_FIRST_LEVEL));
return 0;
out_free_levels:
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index a676daaa2da5..f8a3abdfe250 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -37,12 +37,14 @@ struct acpi_db_argument_info {
struct acpi_db_execute_walk {
u32 count;
u32 max_count;
+ char name_seg[ACPI_NAMESEG_SIZE + 1];
};
#define PARAM_LIST(pl) pl
#define EX_NO_SINGLE_STEP 1
#define EX_SINGLE_STEP 2
+#define EX_ALL 4
/*
* dbxface - external debugger interfaces
@@ -124,6 +126,8 @@ void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op);
void acpi_db_evaluate_predefined_names(void);
+void acpi_db_evaluate_all(char *name_seg);
+
/*
* dbnames - namespace commands
*/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 1030a0ce1599..2fee91f57b21 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -42,6 +42,12 @@ ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1a_enable);
ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_status);
ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_enable);
+#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES
+ACPI_GLOBAL(unsigned long, acpi_gbl_xgpe0_block_logical_address);
+ACPI_GLOBAL(unsigned long, acpi_gbl_xgpe1_block_logical_address);
+
+#endif /* ACPI_GPE_USE_LOGICAL_ADDRESSES */
+
/*
* Handle both ACPI 1.0 and ACPI 2.0+ Integer widths. The integer width is
* determined by the revision of the DSDT: If the DSDT revision is less than
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index ebf6453d0e21..6ab92e28330d 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -73,9 +73,15 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width);
acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width);
+acpi_status acpi_hw_validate_io_block(u64 address, u32 bit_width, u32 count);
+
/*
* hwgpe - GPE support
*/
+acpi_status acpi_hw_gpe_read(u64 *value, struct acpi_gpe_address *reg);
+
+acpi_status acpi_hw_gpe_write(u64 value, struct acpi_gpe_address *reg);
+
u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info);
acpi_status
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index af58cd2dc9d3..f83b98fa13ac 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -454,11 +454,18 @@ struct acpi_gpe_event_info {
u8 disable_for_dispatch; /* Masked during dispatching */
};
+/* GPE register address */
+
+struct acpi_gpe_address {
+ u8 space_id; /* Address space where the register exists */
+ u64 address; /* 64-bit address of the register */
+};
+
/* Information about a GPE register pair, one per each status/enable pair in an array */
struct acpi_gpe_register_info {
- struct acpi_generic_address status_address; /* Address of status reg */
- struct acpi_generic_address enable_address; /* Address of enable reg */
+ struct acpi_gpe_address status_address; /* Address of status reg */
+ struct acpi_gpe_address enable_address; /* Address of enable reg */
u16 base_gpe_number; /* Base GPE number for this register */
u8 enable_for_wake; /* GPEs to keep enabled when sleeping */
u8 enable_for_run; /* GPEs to keep enabled when running */
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 2cbb56652f1c..57ea2276790f 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -101,7 +101,7 @@ enum acpi_return_package_types {
/* Support macros for users of the predefined info table */
-#define METHOD_PREDEF_ARGS_MAX 4
+#define METHOD_PREDEF_ARGS_MAX 5
#define METHOD_ARG_BIT_WIDTH 3
#define METHOD_ARG_MASK 0x0007
#define ARG_COUNT_IS_MINIMUM 0x8000
@@ -117,6 +117,7 @@ enum acpi_return_package_types {
#define METHOD_2ARGS(a1,a2) (2 | (a1 << 3) | (a2 << 6))
#define METHOD_3ARGS(a1,a2,a3) (3 | (a1 << 3) | (a2 << 6) | (a3 << 9))
#define METHOD_4ARGS(a1,a2,a3,a4) (4 | (a1 << 3) | (a2 << 6) | (a3 << 9) | (a4 << 12))
+#define METHOD_5ARGS(a1,a2,a3,a4,a5) (5 | (a1 << 3) | (a2 << 6) | (a3 << 9) | (a4 << 12) | (a5 << 15))
#define METHOD_RETURNS(type) (type)
#define METHOD_NO_RETURN_VALUE 0
@@ -902,9 +903,39 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_S4W", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+ {{"_SBA", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
+
+ {{"_SBI", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Int, 1 Buf) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 1,
+ ACPI_RTYPE_BUFFER, 1, 0),
+
+ {{"_SBR",
+ METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
+ ACPI_TYPE_INTEGER),
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (2 Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,
+ ACPI_RTYPE_BUFFER | ACPI_RTYPE_INTEGER, 1, 0),
+
{{"_SBS", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+ {{"_SBT",
+ METHOD_4ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
+ ACPI_TYPE_ANY),
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (2 Int, 1 Buf | Int) */
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,
+ ACPI_RTYPE_BUFFER | ACPI_RTYPE_INTEGER, 1, 0),
+
+ {{"_SBW",
+ METHOD_5ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
+ ACPI_TYPE_INTEGER, ACPI_TYPE_ANY),
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},
+ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER | ACPI_RTYPE_INTEGER,
+ 1, 0, 0, 0),
+
{{"_SCP", METHOD_1ARGS(ACPI_TYPE_INTEGER) | ARG_COUNT_IS_MINIMUM,
METHOD_NO_RETURN_VALUE}}, /* Acpi 1.0 allowed 1 integer arg. Acpi 3.0 expanded to 3 args. Allow both. */
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index 4027eaab18a4..d3a9521e2dc8 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -86,7 +86,8 @@ void acpi_db_delete_objects(u32 count, union acpi_object *objects)
*
* RETURN: Status
*
- * DESCRIPTION: Execute a control method.
+ * DESCRIPTION: Execute a control method. Used to evaluate objects via the
+ * "EXECUTE" or "EVALUATE" commands.
*
******************************************************************************/
@@ -314,11 +315,12 @@ acpi_db_execution_walk(acpi_handle obj_handle,
status = acpi_evaluate_object(node, NULL, NULL, &return_obj);
+ acpi_gbl_method_executing = FALSE;
+
acpi_os_printf("Evaluation of [%4.4s] returned %s\n",
acpi_ut_get_node_name(node),
acpi_format_exception(status));
- acpi_gbl_method_executing = FALSE;
return (AE_OK);
}
@@ -334,7 +336,8 @@ acpi_db_execution_walk(acpi_handle obj_handle,
* RETURN: None
*
* DESCRIPTION: Execute a control method. Name is relative to the current
- * scope.
+ * scope. Function used for the "EXECUTE", "EVALUATE", and
+ * "ALL" commands
*
******************************************************************************/
@@ -372,6 +375,12 @@ acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags)
return;
}
+ if ((flags & EX_ALL) && (strlen(name) > 4)) {
+ acpi_os_printf("Input name (%s) must be a 4-char NameSeg\n",
+ name);
+ return;
+ }
+
name_string = ACPI_ALLOCATE(strlen(name) + 1);
if (!name_string) {
return;
@@ -389,13 +398,24 @@ acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags)
return;
}
- acpi_gbl_db_method_info.name = name_string;
- acpi_gbl_db_method_info.args = args;
- acpi_gbl_db_method_info.types = types;
- acpi_gbl_db_method_info.flags = flags;
+ /* Command (ALL <nameseg>) to execute all methods of a particular name */
- return_obj.pointer = NULL;
- return_obj.length = ACPI_ALLOCATE_BUFFER;
+ else if (flags & EX_ALL) {
+ acpi_gbl_db_method_info.name = name_string;
+ return_obj.pointer = NULL;
+ return_obj.length = ACPI_ALLOCATE_BUFFER;
+ acpi_db_evaluate_all(name_string);
+ ACPI_FREE(name_string);
+ return;
+ } else {
+ acpi_gbl_db_method_info.name = name_string;
+ acpi_gbl_db_method_info.args = args;
+ acpi_gbl_db_method_info.types = types;
+ acpi_gbl_db_method_info.flags = flags;
+
+ return_obj.pointer = NULL;
+ return_obj.length = ACPI_ALLOCATE_BUFFER;
+ }
status = acpi_db_execute_setup(&acpi_gbl_db_method_info);
if (ACPI_FAILURE(status)) {
@@ -450,6 +470,7 @@ acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags)
(u32)return_obj.length);
acpi_db_dump_external_object(return_obj.pointer, 1);
+ acpi_os_printf("\n");
/* Dump a _PLD buffer if present */
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index ee6a1b77af3f..2952856b8a67 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -37,6 +37,7 @@ acpi_db_match_command_help(const char *command,
enum acpi_ex_debugger_commands {
CMD_NOT_FOUND = 0,
CMD_NULL,
+ CMD_ALL,
CMD_ALLOCATIONS,
CMD_ARGS,
CMD_ARGUMENTS,
@@ -115,6 +116,7 @@ enum acpi_ex_debugger_commands {
static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
{"<NOT FOUND>", 0},
{"<NULL>", 0},
+ {"ALL", 1},
{"ALLOCATIONS", 0},
{"ARGS", 0},
{"ARGUMENTS", 0},
@@ -222,6 +224,7 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Type <Object>", "Display object type\n"},
{0, "\nControl Method Execution:", "\n"},
+ {1, " All <NameSeg>", "Evaluate all objects named NameSeg\n"},
{1, " Evaluate <Namepath> [Arguments]",
"Evaluate object or control method\n"},
{1, " Execute <Namepath> [Arguments]", "Synonym for Evaluate\n"},
@@ -436,7 +439,7 @@ static void acpi_db_display_help(char *command)
acpi_os_printf("\n");
} else {
- /* Display help for all commands that match the subtring */
+ /* Display help for all commands that match the substring */
acpi_db_display_command_info(command, TRUE);
}
@@ -740,6 +743,15 @@ acpi_db_command_dispatch(char *input_buffer,
}
break;
+ case CMD_ALL:
+
+ acpi_os_printf("Executing all objects with NameSeg: %s\n",
+ acpi_gbl_db_args[1]);
+ acpi_db_execute(acpi_gbl_db_args[1], &acpi_gbl_db_args[2],
+ &acpi_gbl_db_arg_types[2],
+ EX_NO_SINGLE_STEP | EX_ALL);
+ break;
+
case CMD_ALLOCATIONS:
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c
index 4e48a7de7413..889d13828e49 100644
--- a/drivers/acpi/acpica/dbmethod.c
+++ b/drivers/acpi/acpica/dbmethod.c
@@ -21,6 +21,8 @@ static acpi_status
acpi_db_walk_for_execute(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value);
+static acpi_status acpi_db_evaluate_object(struct acpi_namespace_node *node);
+
/*******************************************************************************
*
* FUNCTION: acpi_db_set_method_breakpoint
@@ -346,42 +348,26 @@ acpi_status acpi_db_disassemble_method(char *name)
/*******************************************************************************
*
- * FUNCTION: acpi_db_walk_for_execute
+ * FUNCTION: acpi_db_evaluate_object
*
- * PARAMETERS: Callback from walk_namespace
+ * PARAMETERS: node - Namespace node for the object
*
* RETURN: Status
*
- * DESCRIPTION: Batch execution module. Currently only executes predefined
- * ACPI names.
+ * DESCRIPTION: Main execution function for the Evaluate/Execute/All debugger
+ * commands.
*
******************************************************************************/
-static acpi_status
-acpi_db_walk_for_execute(acpi_handle obj_handle,
- u32 nesting_level, void *context, void **return_value)
+static acpi_status acpi_db_evaluate_object(struct acpi_namespace_node *node)
{
- struct acpi_namespace_node *node =
- (struct acpi_namespace_node *)obj_handle;
- struct acpi_db_execute_walk *info =
- (struct acpi_db_execute_walk *)context;
- struct acpi_buffer return_obj;
- acpi_status status;
char *pathname;
u32 i;
struct acpi_device_info *obj_info;
struct acpi_object_list param_objects;
union acpi_object params[ACPI_METHOD_NUM_ARGS];
- const union acpi_predefined_info *predefined;
-
- predefined = acpi_ut_match_predefined_method(node->name.ascii);
- if (!predefined) {
- return (AE_OK);
- }
-
- if (node->type == ACPI_TYPE_LOCAL_SCOPE) {
- return (AE_OK);
- }
+ struct acpi_buffer return_obj;
+ acpi_status status;
pathname = acpi_ns_get_external_pathname(node);
if (!pathname) {
@@ -390,7 +376,7 @@ acpi_db_walk_for_execute(acpi_handle obj_handle,
/* Get the object info for number of method parameters */
- status = acpi_get_object_info(obj_handle, &obj_info);
+ status = acpi_get_object_info(node, &obj_info);
if (ACPI_FAILURE(status)) {
ACPI_FREE(pathname);
return (status);
@@ -421,14 +407,67 @@ acpi_db_walk_for_execute(acpi_handle obj_handle,
acpi_gbl_method_executing = TRUE;
status = acpi_evaluate_object(node, NULL, &param_objects, &return_obj);
+ acpi_gbl_method_executing = FALSE;
acpi_os_printf("%-32s returned %s\n", pathname,
acpi_format_exception(status));
- acpi_gbl_method_executing = FALSE;
+ if (return_obj.length) {
+ acpi_os_printf("Evaluation of %s returned object %p, "
+ "external buffer length %X\n",
+ pathname, return_obj.pointer,
+ (u32)return_obj.length);
+
+ acpi_db_dump_external_object(return_obj.pointer, 1);
+ acpi_os_printf("\n");
+ }
+
ACPI_FREE(pathname);
/* Ignore status from method execution */
+ return (AE_OK);
+
+ /* Update count, check if we have executed enough methods */
+
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_db_walk_for_execute
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Batch execution function. Evaluates all "predefined" objects --
+ * the nameseg begins with an underscore.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_db_walk_for_execute(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value)
+{
+ struct acpi_namespace_node *node =
+ (struct acpi_namespace_node *)obj_handle;
+ struct acpi_db_execute_walk *info =
+ (struct acpi_db_execute_walk *)context;
+ acpi_status status;
+ const union acpi_predefined_info *predefined;
+
+ predefined = acpi_ut_match_predefined_method(node->name.ascii);
+ if (!predefined) {
+ return (AE_OK);
+ }
+
+ if (node->type == ACPI_TYPE_LOCAL_SCOPE) {
+ return (AE_OK);
+ }
+
+ acpi_db_evaluate_object(node);
+
+ /* Ignore status from object evaluation */
+
status = AE_OK;
/* Update count, check if we have executed enough methods */
@@ -443,6 +482,52 @@ acpi_db_walk_for_execute(acpi_handle obj_handle,
/*******************************************************************************
*
+ * FUNCTION: acpi_db_walk_for_execute_all
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Batch execution function. Evaluates all objects whose path ends
+ * with the nameseg "Info->NameSeg". Used for the "ALL" command.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_db_walk_for_execute_all(acpi_handle obj_handle,
+ u32 nesting_level,
+ void *context, void **return_value)
+{
+ struct acpi_namespace_node *node =
+ (struct acpi_namespace_node *)obj_handle;
+ struct acpi_db_execute_walk *info =
+ (struct acpi_db_execute_walk *)context;
+ acpi_status status;
+
+ if (!ACPI_COMPARE_NAMESEG(node->name.ascii, info->name_seg)) {
+ return (AE_OK);
+ }
+
+ if (node->type == ACPI_TYPE_LOCAL_SCOPE) {
+ return (AE_OK);
+ }
+
+ /* Now evaluate the input object (node) */
+
+ acpi_db_evaluate_object(node);
+
+ /* Ignore status from method execution */
+
+ status = AE_OK;
+
+ /* Update count of executed methods/objects */
+
+ info->count++;
+ return (status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_evaluate_predefined_names
*
* PARAMETERS: None
@@ -470,3 +555,35 @@ void acpi_db_evaluate_predefined_names(void)
acpi_os_printf("Evaluated %u predefined names in the namespace\n",
info.count);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_db_evaluate_all
+ *
+ * PARAMETERS: none_acpi_gbl_db_method_info
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Namespace batch execution. Implements the "ALL" command.
+ * Execute all namepaths whose final nameseg matches the
+ * input nameseg.
+ *
+ ******************************************************************************/
+
+void acpi_db_evaluate_all(char *name_seg)
+{
+ struct acpi_db_execute_walk info;
+
+ info.count = 0;
+ info.max_count = ACPI_UINT32_MAX;
+ ACPI_COPY_NAMESEG(info.name_seg, name_seg);
+ info.name_seg[ACPI_NAMESEG_SIZE] = 0;
+
+ /* Search all nodes in namespace */
+
+ (void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, acpi_db_walk_for_execute_all,
+ NULL, (void *)&info, NULL);
+
+ acpi_os_printf("Evaluated %u names in the namespace\n", info.count);
+}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 3e39907fedd9..06b9c8dd11c9 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -656,14 +656,14 @@ acpi_ev_detect_gpe(struct acpi_namespace_node *gpe_device,
/* GPE currently enabled (enable bit == 1)? */
- status = acpi_hw_read(&enable_reg, &gpe_register_info->enable_address);
+ status = acpi_hw_gpe_read(&enable_reg, &gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
/* GPE currently active (status bit == 1)? */
- status = acpi_hw_read(&status_reg, &gpe_register_info->status_address);
+ status = acpi_hw_gpe_read(&status_reg, &gpe_register_info->status_address);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 132adff1e131..f5298be4273a 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -233,12 +233,6 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
this_register->status_address.space_id = gpe_block->space_id;
this_register->enable_address.space_id = gpe_block->space_id;
- this_register->status_address.bit_width =
- ACPI_GPE_REGISTER_WIDTH;
- this_register->enable_address.bit_width =
- ACPI_GPE_REGISTER_WIDTH;
- this_register->status_address.bit_offset = 0;
- this_register->enable_address.bit_offset = 0;
/* Init the event_info for each GPE within this register */
@@ -251,14 +245,14 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
/* Disable all GPEs within this register */
- status = acpi_hw_write(0x00, &this_register->enable_address);
+ status = acpi_hw_gpe_write(0x00, &this_register->enable_address);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
/* Clear any pending GPE events within this register */
- status = acpi_hw_write(0xFF, &this_register->status_address);
+ status = acpi_hw_gpe_write(0xFF, &this_register->status_address);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
@@ -317,6 +311,23 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
return_ACPI_STATUS(AE_OK);
}
+ /* Validate the space_ID */
+
+ if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+ (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+ ACPI_ERROR((AE_INFO,
+ "Unsupported address space: 0x%X", space_id));
+ return_ACPI_STATUS(AE_SUPPORT);
+ }
+
+ if (space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ status = acpi_hw_validate_io_block(address,
+ ACPI_GPE_REGISTER_WIDTH,
+ register_count);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
+ }
+
/* Allocate a new GPE block */
gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 6effd8076dcc..6d82d30d8f7b 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -32,6 +32,16 @@ ACPI_MODULE_NAME("evgpeinit")
* kernel boot time as well.
*/
+#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES
+#define ACPI_FADT_GPE_BLOCK_ADDRESS(N) \
+ acpi_gbl_FADT.xgpe##N##_block.space_id == \
+ ACPI_ADR_SPACE_SYSTEM_MEMORY ? \
+ (u64)acpi_gbl_xgpe##N##_block_logical_address : \
+ acpi_gbl_FADT.xgpe##N##_block.address
+#else
+#define ACPI_FADT_GPE_BLOCK_ADDRESS(N) acpi_gbl_FADT.xgpe##N##_block.address
+#endif /* ACPI_GPE_USE_LOGICAL_ADDRESSES */
+
/*******************************************************************************
*
* FUNCTION: acpi_ev_gpe_initialize
@@ -49,6 +59,7 @@ acpi_status acpi_ev_gpe_initialize(void)
u32 register_count1 = 0;
u32 gpe_number_max = 0;
acpi_status status;
+ u64 address;
ACPI_FUNCTION_TRACE(ev_gpe_initialize);
@@ -85,8 +96,9 @@ acpi_status acpi_ev_gpe_initialize(void)
* If EITHER the register length OR the block address are zero, then that
* particular block is not supported.
*/
- if (acpi_gbl_FADT.gpe0_block_length &&
- acpi_gbl_FADT.xgpe0_block.address) {
+ address = ACPI_FADT_GPE_BLOCK_ADDRESS(0);
+
+ if (acpi_gbl_FADT.gpe0_block_length && address) {
/* GPE block 0 exists (has both length and address > 0) */
@@ -97,7 +109,6 @@ acpi_status acpi_ev_gpe_initialize(void)
/* Install GPE Block 0 */
status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
- acpi_gbl_FADT.xgpe0_block.
address,
acpi_gbl_FADT.xgpe0_block.
space_id, register_count0, 0,
@@ -110,8 +121,9 @@ acpi_status acpi_ev_gpe_initialize(void)
}
}
- if (acpi_gbl_FADT.gpe1_block_length &&
- acpi_gbl_FADT.xgpe1_block.address) {
+ address = ACPI_FADT_GPE_BLOCK_ADDRESS(1);
+
+ if (acpi_gbl_FADT.gpe1_block_length && address) {
/* GPE block 1 exists (has both length and address > 0) */
@@ -137,7 +149,6 @@ acpi_status acpi_ev_gpe_initialize(void)
status =
acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
- acpi_gbl_FADT.xgpe1_block.
address,
acpi_gbl_FADT.xgpe1_block.
space_id, register_count1,
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 49c46d4dd070..b13a4ed5bc63 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -26,6 +26,76 @@ acpi_hw_gpe_enable_write(u8 enable_mask,
/******************************************************************************
*
+ * FUNCTION: acpi_hw_gpe_read
+ *
+ * PARAMETERS: value - Where the value is returned
+ * reg - GPE register structure
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Read from a GPE register in either memory or IO space.
+ *
+ * LIMITATIONS: <These limitations also apply to acpi_hw_gpe_write>
+ * space_ID must be system_memory or system_IO.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_gpe_read(u64 *value, struct acpi_gpe_address *reg)
+{
+ acpi_status status;
+ u32 value32;
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES
+ *value = (u64)ACPI_GET8((unsigned long)reg->address);
+ return_ACPI_STATUS(AE_OK);
+#else
+ return acpi_os_read_memory((acpi_physical_address)reg->address,
+ value, ACPI_GPE_REGISTER_WIDTH);
+#endif
+ }
+
+ status = acpi_os_read_port((acpi_io_address)reg->address,
+ &value32, ACPI_GPE_REGISTER_WIDTH);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
+
+ *value = (u64)value32;
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_gpe_write
+ *
+ * PARAMETERS: value - Value to be written
+ * reg - GPE register structure
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Write to a GPE register in either memory or IO space.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_gpe_write(u64 value, struct acpi_gpe_address *reg)
+{
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES
+ ACPI_SET8((unsigned long)reg->address, value);
+ return_ACPI_STATUS(AE_OK);
+#else
+ return acpi_os_write_memory((acpi_physical_address)reg->address,
+ value, ACPI_GPE_REGISTER_WIDTH);
+#endif
+ }
+
+ return acpi_os_write_port((acpi_io_address)reg->address, (u32)value,
+ ACPI_GPE_REGISTER_WIDTH);
+}
+
+/******************************************************************************
+ *
* FUNCTION: acpi_hw_get_gpe_register_bit
*
* PARAMETERS: gpe_event_info - Info block for the GPE
@@ -79,7 +149,8 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
/* Get current value of the enable register that contains this GPE */
- status = acpi_hw_read(&enable_mask, &gpe_register_info->enable_address);
+ status = acpi_hw_gpe_read(&enable_mask,
+ &gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -118,9 +189,8 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
/* Write the updated enable mask */
- status =
- acpi_hw_write(enable_mask,
- &gpe_register_info->enable_address);
+ status = acpi_hw_gpe_write(enable_mask,
+ &gpe_register_info->enable_address);
}
return (status);
}
@@ -158,8 +228,8 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info)
*/
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
- status =
- acpi_hw_write(register_bit, &gpe_register_info->status_address);
+ status = acpi_hw_gpe_write(register_bit,
+ &gpe_register_info->status_address);
return (status);
}
@@ -227,7 +297,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
/* GPE currently enabled (enable bit == 1)? */
- status = acpi_hw_read(&in_byte, &gpe_register_info->enable_address);
+ status = acpi_hw_gpe_read(&in_byte, &gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -238,7 +308,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
/* GPE currently active (status bit == 1)? */
- status = acpi_hw_read(&in_byte, &gpe_register_info->status_address);
+ status = acpi_hw_gpe_read(&in_byte, &gpe_register_info->status_address);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -274,7 +344,8 @@ acpi_hw_gpe_enable_write(u8 enable_mask,
gpe_register_info->enable_mask = enable_mask;
- status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
+ status = acpi_hw_gpe_write(enable_mask,
+ &gpe_register_info->enable_address);
return (status);
}
@@ -341,9 +412,8 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Clear status on all GPEs in this register */
- status =
- acpi_hw_write(0xFF,
- &gpe_block->register_info[i].status_address);
+ status = acpi_hw_gpe_write(0xFF,
+ &gpe_block->register_info[i].status_address);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -481,14 +551,14 @@ acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
for (i = 0; i < gpe_block->register_count; i++) {
gpe_register_info = &gpe_block->register_info[i];
- status = acpi_hw_read(&in_enable,
- &gpe_register_info->enable_address);
+ status = acpi_hw_gpe_read(&in_enable,
+ &gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
continue;
}
- status = acpi_hw_read(&in_status,
- &gpe_register_info->status_address);
+ status = acpi_hw_gpe_read(&in_status,
+ &gpe_register_info->status_address);
if (ACPI_FAILURE(status)) {
continue;
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 4d94861e6093..b2ca7dfd3fc9 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -292,3 +292,33 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width)
return (AE_OK);
}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_validate_io_block
+ *
+ * PARAMETERS: Address Address of I/O port/register blobk
+ * bit_width Number of bits (8,16,32) in each register
+ * count Number of registers in the block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Validates a block of I/O ports/registers.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_hw_validate_io_block(u64 address, u32 bit_width, u32 count)
+{
+ acpi_status status;
+
+ while (count--) {
+ status = acpi_hw_validate_io_request((acpi_io_address)address,
+ bit_width);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
+
+ address += ACPI_DIV_8(bit_width);
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index fe9b3639a87d..83d26abcf448 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -294,7 +294,7 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
node_to_delete = next_node;
next_node = next_node->peer;
acpi_ns_delete_node(node_to_delete);
- };
+ }
/* Clear the parent's child pointer */
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index d5e8405e9d8f..6bbc7d350a16 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -55,7 +55,9 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info)
arg_type = METHOD_GET_NEXT_TYPE(arg_type_list);
user_arg_type = info->parameters[i]->common.type;
- if (user_arg_type != arg_type) {
+ /* No typechecking for ACPI_TYPE_ANY */
+
+ if ((user_arg_type != arg_type) && (arg_type != ACPI_TYPE_ANY)) {
ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
ACPI_WARN_ALWAYS,
"Argument #%u type mismatch - "
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index c022bef263e5..324269481160 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -24,7 +24,8 @@ ACPI_MODULE_NAME("nsxfobj")
*
* RETURN: Status
*
- * DESCRIPTION: This routine returns the type associatd with a particular handle
+ * DESCRIPTION: This routine returns the type associated with a particular
+ * handle
*
******************************************************************************/
acpi_status acpi_get_type(acpi_handle handle, acpi_object_type *ret_type)
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index c780046bf294..bd3caf735be3 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -508,8 +508,8 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
}
/*
- * If the transfer to the new method method call worked
- *, a new walk state was created -- get it
+ * If the transfer to the new method method call worked,
+ * a new walk state was created -- get it
*/
walk_state = acpi_ds_get_current_walk_state(thread);
continue;
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 05fe3470fb93..dd277f7e9f10 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -151,7 +151,7 @@ static u32 acpi_ut_get_argument_types(char *buffer, u16 argument_types);
static const char *ut_external_type_names[] = /* Indexed by ACPI_TYPE_* */
{
- ", UNSUPPORTED-TYPE",
+ ", Type_ANY",
", Integer",
", String",
", Buffer",
@@ -311,8 +311,7 @@ static u32 acpi_ut_get_argument_types(char *buffer, u16 argument_types)
for (i = 0; i < arg_count; i++) {
this_argument_type = METHOD_GET_NEXT_TYPE(argument_types);
- if (!this_argument_type
- || (this_argument_type > METHOD_MAX_ARG_TYPE)) {
+ if (this_argument_type > METHOD_MAX_ARG_TYPE) {
printf("**** Invalid argument type (%u) "
"in predefined info structure\n",
this_argument_type);
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 05ff20049b87..2d91003fcf26 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -45,10 +45,15 @@ acpi_status acpi_ut_convert_octal_string(char *string, u64 *return_value_ptr)
/* Convert each ASCII byte in the input string */
while (*string) {
-
- /* Character must be ASCII 0-7, otherwise terminate with no error */
-
+ /*
+ * Character must be ASCII 0-7, otherwise:
+ * 1) Runtime: terminate with no error, per the ACPI spec
+ * 2) Compiler: return an error
+ */
if (!(ACPI_IS_OCTAL_DIGIT(*string))) {
+#ifdef ACPI_ASL_COMPILER
+ status = AE_BAD_OCTAL_CONSTANT;
+#endif
break;
}
@@ -94,10 +99,15 @@ acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr)
/* Convert each ASCII byte in the input string */
while (*string) {
-
- /* Character must be ASCII 0-9, otherwise terminate with no error */
-
+ /*
+ * Character must be ASCII 0-9, otherwise:
+ * 1) Runtime: terminate with no error, per the ACPI spec
+ * 2) Compiler: return an error
+ */
if (!isdigit(*string)) {
+#ifdef ACPI_ASL_COMPILER
+ status = AE_BAD_DECIMAL_CONSTANT;
+#endif
break;
}
@@ -143,10 +153,15 @@ acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr)
/* Convert each ASCII byte in the input string */
while (*string) {
-
- /* Must be ASCII A-F, a-f, or 0-9, otherwise terminate with no error */
-
+ /*
+ * Character must be ASCII A-F, a-f, or 0-9, otherwise:
+ * 1) Runtime: terminate with no error, per the ACPI spec
+ * 2) Compiler: return an error
+ */
if (!isxdigit(*string)) {
+#ifdef ACPI_ASL_COMPILER
+ status = AE_BAD_HEX_CONSTANT;
+#endif
break;
}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index e358d0046494..552fd9ffaca4 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -632,7 +632,11 @@ int apei_map_generic_address(struct acpi_generic_address *reg)
rc = apei_check_gar(reg, &address, &access_bit_width);
if (rc)
return rc;
- return acpi_os_map_generic_address(reg);
+
+ if (!acpi_os_map_generic_address(reg))
+ return -ENXIO;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(apei_map_generic_address);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 81bf71b10d44..fce7ade2aba9 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -79,6 +79,12 @@
((struct acpi_hest_generic_status *) \
((struct ghes_estatus_node *)(estatus_node) + 1))
+#define GHES_VENDOR_ENTRY_LEN(gdata_len) \
+ (sizeof(struct ghes_vendor_record_entry) + (gdata_len))
+#define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry) \
+ ((struct acpi_hest_generic_data *) \
+ ((struct ghes_vendor_record_entry *)(vendor_entry) + 1))
+
/*
* NMI-like notifications vary by architecture, before the compiler can prune
* unused static functions it needs a value for these enums.
@@ -123,6 +129,12 @@ static DEFINE_MUTEX(ghes_list_mutex);
*/
static DEFINE_SPINLOCK(ghes_notify_lock_irq);
+struct ghes_vendor_record_entry {
+ struct work_struct work;
+ int error_severity;
+ char vendor_record[];
+};
+
static struct gen_pool *ghes_estatus_pool;
static unsigned long ghes_estatus_pool_size_request;
@@ -511,6 +523,56 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
#endif
}
+static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list);
+
+int ghes_register_vendor_record_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&vendor_record_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier);
+
+void ghes_unregister_vendor_record_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&vendor_record_notify_list, nb);
+}
+EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier);
+
+static void ghes_vendor_record_work_func(struct work_struct *work)
+{
+ struct ghes_vendor_record_entry *entry;
+ struct acpi_hest_generic_data *gdata;
+ u32 len;
+
+ entry = container_of(work, struct ghes_vendor_record_entry, work);
+ gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
+
+ blocking_notifier_call_chain(&vendor_record_notify_list,
+ entry->error_severity, gdata);
+
+ len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
+ gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len);
+}
+
+static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
+ int sev)
+{
+ struct acpi_hest_generic_data *copied_gdata;
+ struct ghes_vendor_record_entry *entry;
+ u32 len;
+
+ len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata));
+ entry = (void *)gen_pool_alloc(ghes_estatus_pool, len);
+ if (!entry)
+ return;
+
+ copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry);
+ memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata));
+ entry->error_severity = sev;
+
+ INIT_WORK(&entry->work, ghes_vendor_record_work_func);
+ schedule_work(&entry->work);
+}
+
static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -549,6 +611,7 @@ static bool ghes_do_proc(struct ghes *ghes,
} else {
void *err = acpi_hest_get_payload(gdata);
+ ghes_defer_non_standard_event(gdata, sev);
log_non_standard_event(sec_type, fru_id, fru_text,
sec_sev, err,
gdata->error_data_length);
@@ -879,7 +942,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
estatus_node->task_work.func = ghes_kick_task_work;
estatus_node->task_work_cpu = smp_processor_id();
ret = task_work_add(current, &estatus_node->task_work,
- true);
+ TWA_RESUME);
if (ret)
estatus_node->task_work.func = NULL;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index ec782e4a0fe4..9929ff50c0c0 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/dma-map-ops.h>
#define IORT_TYPE_MASK(type) (1 << (type))
#define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
@@ -811,8 +812,7 @@ static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
}
-static inline int iort_add_device_replay(const struct iommu_ops *ops,
- struct device *dev)
+static inline int iort_add_device_replay(struct device *dev)
{
int err = 0;
@@ -1072,7 +1072,7 @@ const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
*/
if (!err) {
ops = iort_fwspec_iommu_ops(dev);
- err = iort_add_device_replay(ops, dev);
+ err = iort_add_device_replay(dev);
}
/* Ignore all other errors apart from EPROBE_DEFER */
@@ -1087,11 +1087,6 @@ const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
}
#else
-static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
-{ return NULL; }
-static inline int iort_add_device_replay(const struct iommu_ops *ops,
- struct device *dev)
-{ return 0; }
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{ return 0; }
const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
@@ -1184,8 +1179,9 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
*dma_addr = dmaaddr;
*dma_size = size;
- dev->dma_pfn_offset = PFN_DOWN(offset);
- dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
+ ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size);
+
+ dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : "");
}
static void __init acpi_iort_register_irq(int hwirq, const char *name,
@@ -1335,7 +1331,7 @@ static int __init arm_smmu_v3_set_proximity(struct device *dev,
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
- int dev_node = acpi_map_pxm_to_node(smmu->pxm);
+ int dev_node = pxm_to_node(smmu->pxm);
if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
return -EINVAL;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index cab4af532f36..08ee1c7b12e0 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -987,7 +987,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
*/
if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
(test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
- (battery->capacity_now <= battery->alarm)))
+ (battery->capacity_now <= battery->alarm)))
acpi_pm_wakeup_event(&battery->device->dev);
return result;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 54002670cb7a..1682f8b454a2 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -303,7 +303,11 @@ static void acpi_bus_osc_support(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
+#ifdef CONFIG_ARM64
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT;
+#endif
#ifdef CONFIG_X86
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT;
if (boot_cpu_has(X86_FEATURE_HWP)) {
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPCV2_SUPPORT;
@@ -551,6 +555,7 @@ struct device *acpi_get_first_physical_node(struct acpi_device *adev)
mutex_unlock(physical_node_lock);
return phys_dev;
}
+EXPORT_SYMBOL_GPL(acpi_get_first_physical_node);
static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
const struct device *dev)
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index a4eda7fe50d3..0d93a5ef4d07 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -74,19 +74,6 @@ MODULE_DEVICE_TABLE(acpi, button_device_ids);
/* Please keep this list sorted alphabetically by vendor and model */
static const struct dmi_system_id dmi_lid_quirks[] = {
{
- /*
- * Acer Switch 10 SW5-012. _LID method messes with home and
- * power button GPIO IRQ settings causing an interrupt storm on
- * both GPIOs. This is unfixable without a DSDT override, so we
- * have to disable the lid-switch functionality altogether :|
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
- },
- .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
- },
- {
/* GP-electronic T701, _LID method points to a floating GPIO */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
@@ -102,7 +89,18 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
- DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E2215T"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
+ {
+ /*
+ * Medion Akoya E2228T, notification of the LID device only
+ * happens on close, not on open and _LID always returns closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E2228T"),
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
@@ -153,6 +151,7 @@ struct acpi_button {
int last_state;
ktime_t last_time;
bool suspended;
+ bool lid_state_initialized;
};
static struct acpi_device *lid_device;
@@ -383,6 +382,8 @@ static int acpi_lid_update_state(struct acpi_device *device,
static void acpi_lid_initialize_state(struct acpi_device *device)
{
+ struct acpi_button *button = acpi_driver_data(device);
+
switch (lid_init_state) {
case ACPI_BUTTON_LID_INIT_OPEN:
(void)acpi_lid_notify_state(device, 1);
@@ -394,13 +395,14 @@ static void acpi_lid_initialize_state(struct acpi_device *device)
default:
break;
}
+
+ button->lid_state_initialized = true;
}
static void acpi_button_notify(struct acpi_device *device, u32 event)
{
struct acpi_button *button = acpi_driver_data(device);
struct input_dev *input;
- int users;
switch (event) {
case ACPI_FIXED_HARDWARE_EVENT:
@@ -409,10 +411,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
case ACPI_BUTTON_NOTIFY_STATUS:
input = button->input;
if (button->type == ACPI_BUTTON_TYPE_LID) {
- mutex_lock(&button->input->mutex);
- users = button->input->users;
- mutex_unlock(&button->input->mutex);
- if (users)
+ if (button->lid_state_initialized)
acpi_lid_update_state(device, true);
} else {
int keycode;
@@ -457,7 +456,7 @@ static int acpi_button_resume(struct device *dev)
struct acpi_button *button = acpi_driver_data(device);
button->suspended = false;
- if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) {
+ if (button->type == ACPI_BUTTON_TYPE_LID) {
button->last_state = !!acpi_lid_evaluate_state(device);
button->last_time = ktime_get();
acpi_lid_initialize_state(device);
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 9ea5f55d97e3..ccaa647ac3d4 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -14,9 +14,6 @@
#include "internal.h"
-#define _COMPONENT ACPI_CONTAINER_COMPONENT
-ACPI_MODULE_NAME("container");
-
static const struct acpi_device_id container_device_ids[] = {
{"ACPI0004", 0},
{"PNP0A05", 0},
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index b097ef209313..7b54dc95d36b 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -13,8 +13,6 @@
#include "internal.h"
-#define _COMPONENT ACPI_SYSTEM_COMPONENT
-ACPI_MODULE_NAME("custom_method");
MODULE_LICENSE("GPL");
static struct dentry *cm_dentry;
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index d5ecea3715f8..074eb98d213e 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -10,9 +10,6 @@
#include "internal.h"
-#define _COMPONENT ACPI_SYSTEM_COMPONENT
-ACPI_MODULE_NAME("debugfs");
-
struct dentry *acpi_debugfs_dir;
EXPORT_SYMBOL_GPL(acpi_debugfs_dir);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 9bd72c26ef46..24e076f44d23 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -20,8 +20,6 @@
#include "internal.h"
-ACPI_MODULE_NAME("dock");
-
static bool immediate_undock = 1;
module_param(immediate_undock, bool, 0644);
MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
@@ -233,7 +231,8 @@ static void hot_remove_dock_devices(struct dock_station *ds)
* between them).
*/
list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
- dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, false);
+ dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST,
+ DOCK_CALL_HANDLER);
list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
acpi_bus_trim(dd->adev);
diff --git a/drivers/acpi/dptf/Kconfig b/drivers/acpi/dptf/Kconfig
index 90a2fd979282..1e8c7ce89bf1 100644
--- a/drivers/acpi/dptf/Kconfig
+++ b/drivers/acpi/dptf/Kconfig
@@ -1,8 +1,25 @@
# SPDX-License-Identifier: GPL-2.0
-config DPTF_POWER
- tristate "DPTF Platform Power Participant"
+
+menuconfig ACPI_DPTF
+ bool "Intel DPTF (Dynamic Platform and Thermal Framework) Support"
depends on X86
help
+ Intel Dynamic Platform and Thermal Framework (DPTF) is a platform
+ level hardware/software solution for power and thermal management.
+
+ As a container for multiple power/thermal technologies, DPTF provides
+ a coordinated approach for different policies to effect the hardware
+ state of a system.
+
+ For more information see:
+ <https://01.org/intel%C2%AE-dynamic-platform-and-thermal-framework-dptf-chromium-os/overview>
+
+if ACPI_DPTF
+
+config DPTF_POWER
+ tristate "Platform Power DPTF Participant"
+ default m
+ help
This driver adds support for Dynamic Platform and Thermal Framework
(DPTF) Platform Power Participant device (INT3407) support.
This participant is responsible for exposing platform telemetry:
@@ -14,3 +31,19 @@ config DPTF_POWER
To compile this driver as a module, choose M here:
the module will be called dptf_power.
+
+config DPTF_PCH_FIVR
+ tristate "PCH FIVR DPTF Participant"
+ default m
+ help
+ This driver adds support for Dynamic Platform and Thermal Framework
+ (DPTF) PCH FIVR Participant device support. This driver allows to
+ switch the PCH FIVR (Fully Integrated Voltage Regulator) frequency.
+ This participant is responsible for exposing:
+ freq_mhz_low_clock
+ freq_mhz_high_clock
+
+ To compile this driver as a module, choose M here:
+ the module will be called dptf_pch_fivr.
+
+endif
diff --git a/drivers/acpi/dptf/Makefile b/drivers/acpi/dptf/Makefile
index 1a9b0a2b25bf..297340682f66 100644
--- a/drivers/acpi/dptf/Makefile
+++ b/drivers/acpi/dptf/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_ACPI) += int340x_thermal.o
obj-$(CONFIG_DPTF_POWER) += dptf_power.o
+obj-$(CONFIG_DPTF_PCH_FIVR) += dptf_pch_fivr.o
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
new file mode 100644
index 000000000000..5fca18296bf6
--- /dev/null
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dptf_pch_fivr: DPTF PCH FIVR Participant driver
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/*
+ * Presentation of attributes which are defined for INT1045
+ * They are:
+ * freq_mhz_low_clock : Set PCH FIVR switching freq for
+ * FIVR clock 19.2MHz and 24MHz
+ * freq_mhz_high_clock : Set PCH FIVR switching freq for
+ * FIVR clock 38.4MHz
+ */
+#define PCH_FIVR_SHOW(name, method) \
+static ssize_t name##_show(struct device *dev,\
+ struct device_attribute *attr,\
+ char *buf)\
+{\
+ struct acpi_device *acpi_dev = dev_get_drvdata(dev);\
+ unsigned long long val;\
+ acpi_status status;\
+\
+ status = acpi_evaluate_integer(acpi_dev->handle, #method,\
+ NULL, &val);\
+ if (ACPI_SUCCESS(status))\
+ return sprintf(buf, "%d\n", (int)val);\
+ else\
+ return -EINVAL;\
+}
+
+#define PCH_FIVR_STORE(name, method) \
+static ssize_t name##_store(struct device *dev,\
+ struct device_attribute *attr,\
+ const char *buf, size_t count)\
+{\
+ struct acpi_device *acpi_dev = dev_get_drvdata(dev);\
+ acpi_status status;\
+ u32 val;\
+\
+ if (kstrtouint(buf, 0, &val) < 0)\
+ return -EINVAL;\
+\
+ status = acpi_execute_simple_method(acpi_dev->handle, #method, val);\
+ if (ACPI_SUCCESS(status))\
+ return count;\
+\
+ return -EINVAL;\
+}
+
+PCH_FIVR_SHOW(freq_mhz_low_clock, GFC0)
+PCH_FIVR_SHOW(freq_mhz_high_clock, GFC1)
+PCH_FIVR_STORE(freq_mhz_low_clock, RFC0)
+PCH_FIVR_STORE(freq_mhz_high_clock, RFC1)
+
+static DEVICE_ATTR_RW(freq_mhz_low_clock);
+static DEVICE_ATTR_RW(freq_mhz_high_clock);
+
+static struct attribute *fivr_attrs[] = {
+ &dev_attr_freq_mhz_low_clock.attr,
+ &dev_attr_freq_mhz_high_clock.attr,
+ NULL
+};
+
+static const struct attribute_group pch_fivr_attribute_group = {
+ .attrs = fivr_attrs,
+ .name = "pch_fivr_switch_frequency"
+};
+
+static int pch_fivr_add(struct platform_device *pdev)
+{
+ struct acpi_device *acpi_dev;
+ unsigned long long ptype;
+ acpi_status status;
+ int result;
+
+ acpi_dev = ACPI_COMPANION(&(pdev->dev));
+ if (!acpi_dev)
+ return -ENODEV;
+
+ status = acpi_evaluate_integer(acpi_dev->handle, "PTYP", NULL, &ptype);
+ if (ACPI_FAILURE(status) || ptype != 0x05)
+ return -ENODEV;
+
+ result = sysfs_create_group(&pdev->dev.kobj,
+ &pch_fivr_attribute_group);
+ if (result)
+ return result;
+
+ platform_set_drvdata(pdev, acpi_dev);
+
+ return 0;
+}
+
+static int pch_fivr_remove(struct platform_device *pdev)
+{
+ sysfs_remove_group(&pdev->dev.kobj, &pch_fivr_attribute_group);
+
+ return 0;
+}
+
+static const struct acpi_device_id pch_fivr_device_ids[] = {
+ {"INTC1045", 0},
+ {"INTC1049", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
+
+static struct platform_driver pch_fivr_driver = {
+ .probe = pch_fivr_add,
+ .remove = pch_fivr_remove,
+ .driver = {
+ .name = "dptf_pch_fivr",
+ .acpi_match_table = pch_fivr_device_ids,
+ },
+};
+
+module_platform_driver(pch_fivr_driver);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ACPI DPTF PCH FIVR driver");
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 92b996a564d0..a24d5d7aa117 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -229,6 +229,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INT3532", 0},
{"INTC1047", 0},
{"INTC1050", 0},
+ {"INTC1060", 0},
+ {"INTC1061", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
@@ -237,7 +239,7 @@ static struct platform_driver dptf_power_driver = {
.probe = dptf_power_add,
.remove = dptf_power_remove,
.driver = {
- .name = "DPTF Platform Power",
+ .name = "dptf_power",
.acpi_match_table = int3407_device_ids,
},
};
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index bc71a6a60334..d14025a85ce8 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -25,9 +25,16 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INT340A"},
{"INT340B"},
{"INTC1040"},
+ {"INTC1041"},
{"INTC1043"},
{"INTC1044"},
+ {"INTC1045"},
+ {"INTC1046"},
{"INTC1047"},
+ {"INTC1048"},
+ {"INTC1049"},
+ {"INTC1060"},
+ {"INTC1061"},
{""},
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index fcddda3d6712..e0cb1bcfffb2 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2011,20 +2011,16 @@ bool acpi_ec_dispatch_gpe(void)
if (acpi_any_gpe_status_set(first_ec->gpe))
return true;
- if (ec_no_wakeup)
- return false;
-
/*
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
- if (ret == ACPI_INTERRUPT_HANDLED) {
+ if (ret == ACPI_INTERRUPT_HANDLED)
pm_pr_dbg("ACPI EC GPE dispatched\n");
- /* Flush the event and query workqueues. */
- acpi_ec_flush_work();
- }
+ /* Flush the event and query workqueues. */
+ acpi_ec_flush_work();
return false;
}
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 47f21599f2ab..92e59f45329b 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -19,9 +19,6 @@
#include "internal.h"
-#define _COMPONENT ACPI_SYSTEM_COMPONENT
-ACPI_MODULE_NAME("event");
-
/* ACPI notifier chain */
static BLOCKING_NOTIFIER_HEAD(acpi_chain_head);
@@ -34,7 +31,7 @@ int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
event.type = type;
event.data = data;
return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event)
- == NOTIFY_BAD) ? -EINVAL : 0;
+ == NOTIFY_BAD) ? -EINVAL : 0;
}
EXPORT_SYMBOL(acpi_notifier_call_chain);
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index b1a7f8d6965e..fe6b6792c8bb 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -101,7 +101,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
switch (gsi) {
case 0 ... 255:
- sprintf(ev_name, "_%c%02hhX",
+ sprintf(ev_name, "_%c%02X",
trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 62873388b24f..48354f82fba6 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -27,6 +27,7 @@ static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
{"INT3404", 0},
{"INTC1044", 0},
+ {"INTC1048", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 43411a7457cd..e3638bafb941 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -134,7 +134,7 @@ int acpi_add_power_resource(acpi_handle handle);
void acpi_power_add_remove_device(struct acpi_device *adev, bool add);
int acpi_power_wakeup_list_init(struct list_head *list, int *system_level);
int acpi_device_sleep_wake(struct acpi_device *dev,
- int enable, int sleep_state, int dev_state);
+ int enable, int sleep_state, int dev_state);
int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 26dd208a0d63..442608220b5c 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1389,7 +1389,7 @@ static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
if (a == &dev_attr_scrub.attr)
@@ -1564,7 +1564,7 @@ static ssize_t format1_show(struct device *dev,
le16_to_cpu(nfit_dcr->dcr->code));
break;
}
- if (rc != ENXIO)
+ if (rc != -ENXIO)
break;
}
mutex_unlock(&acpi_desc->init_mutex);
@@ -1679,7 +1679,7 @@ static struct attribute *acpi_nfit_dimm_attributes[] = {
static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
struct attribute *a, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nvdimm *nvdimm = to_nvdimm(dev);
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -2175,10 +2175,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
* these commands.
*/
enum nfit_aux_cmds {
- NFIT_CMD_TRANSLATE_SPA = 5,
- NFIT_CMD_ARS_INJECT_SET = 7,
- NFIT_CMD_ARS_INJECT_CLEAR = 8,
- NFIT_CMD_ARS_INJECT_GET = 9,
+ NFIT_CMD_TRANSLATE_SPA = 5,
+ NFIT_CMD_ARS_INJECT_SET = 7,
+ NFIT_CMD_ARS_INJECT_CLEAR = 8,
+ NFIT_CMD_ARS_INJECT_GET = 9,
};
static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
@@ -2632,7 +2632,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
nfit_blk->bdw_offset = nfit_mem->bdw->offset;
mmio = &nfit_blk->mmio[BDW];
mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
- nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
+ nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
if (!mmio->addr.base) {
dev_dbg(dev, "%s failed to map bdw\n",
nvdimm_name(nvdimm));
@@ -3006,10 +3006,8 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
ndr_desc->provider_data = nfit_spa;
ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) {
- ndr_desc->numa_node = acpi_map_pxm_to_online_node(
- spa->proximity_domain);
- ndr_desc->target_node = acpi_map_pxm_to_node(
- spa->proximity_domain);
+ ndr_desc->numa_node = pxm_to_online_node(spa->proximity_domain);
+ ndr_desc->target_node = pxm_to_node(spa->proximity_domain);
} else {
ndr_desc->numa_node = NUMA_NO_NODE;
ndr_desc->target_node = NUMA_NO_NODE;
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 2c32cfb72370..cb73a5d6ea76 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -24,8 +24,15 @@
#include <linux/mutex.h>
#include <linux/node.h>
#include <linux/sysfs.h>
+#include <linux/dax.h>
static u8 hmat_revision;
+static int hmat_disable __initdata;
+
+void __init disable_hmat(void)
+{
+ hmat_disable = 1;
+}
static LIST_HEAD(targets);
static LIST_HEAD(initiators);
@@ -56,7 +63,7 @@ struct memory_target {
unsigned int memory_pxm;
unsigned int processor_pxm;
struct resource memregions;
- struct node_hmem_attrs hmem_attrs;
+ struct node_hmem_attrs hmem_attrs[2];
struct list_head caches;
struct node_cache_attrs cache_attrs;
bool registered;
@@ -65,6 +72,7 @@ struct memory_target {
struct memory_initiator {
struct list_head node;
unsigned int processor_pxm;
+ bool has_cpu;
};
struct memory_locality {
@@ -108,6 +116,7 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm)
return;
initiator->processor_pxm = cpu_pxm;
+ initiator->has_cpu = node_state(pxm_to_node(cpu_pxm), N_CPU);
list_add_tail(&initiator->node, &initiators);
}
@@ -215,28 +224,28 @@ static u32 hmat_normalize(u16 entry, u64 base, u8 type)
}
static void hmat_update_target_access(struct memory_target *target,
- u8 type, u32 value)
+ u8 type, u32 value, int access)
{
switch (type) {
case ACPI_HMAT_ACCESS_LATENCY:
- target->hmem_attrs.read_latency = value;
- target->hmem_attrs.write_latency = value;
+ target->hmem_attrs[access].read_latency = value;
+ target->hmem_attrs[access].write_latency = value;
break;
case ACPI_HMAT_READ_LATENCY:
- target->hmem_attrs.read_latency = value;
+ target->hmem_attrs[access].read_latency = value;
break;
case ACPI_HMAT_WRITE_LATENCY:
- target->hmem_attrs.write_latency = value;
+ target->hmem_attrs[access].write_latency = value;
break;
case ACPI_HMAT_ACCESS_BANDWIDTH:
- target->hmem_attrs.read_bandwidth = value;
- target->hmem_attrs.write_bandwidth = value;
+ target->hmem_attrs[access].read_bandwidth = value;
+ target->hmem_attrs[access].write_bandwidth = value;
break;
case ACPI_HMAT_READ_BANDWIDTH:
- target->hmem_attrs.read_bandwidth = value;
+ target->hmem_attrs[access].read_bandwidth = value;
break;
case ACPI_HMAT_WRITE_BANDWIDTH:
- target->hmem_attrs.write_bandwidth = value;
+ target->hmem_attrs[access].write_bandwidth = value;
break;
default:
break;
@@ -329,8 +338,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
if (mem_hier == ACPI_HMAT_MEMORY) {
target = find_mem_target(targs[targ]);
- if (target && target->processor_pxm == inits[init])
- hmat_update_target_access(target, type, value);
+ if (target && target->processor_pxm == inits[init]) {
+ hmat_update_target_access(target, type, value, 0);
+ /* If the node has a CPU, update access 1 */
+ if (node_state(pxm_to_node(inits[init]), N_CPU))
+ hmat_update_target_access(target, type, value, 1);
+ }
}
}
}
@@ -424,7 +437,8 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
- if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
+ if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
+ hmat_revision > 1) {
target = find_mem_target(p->memory_PD);
if (!target) {
pr_debug("HMAT: Memory Domain missing from SRAT\n");
@@ -566,6 +580,7 @@ static void hmat_register_target_initiators(struct memory_target *target)
unsigned int mem_nid, cpu_nid;
struct memory_locality *loc = NULL;
u32 best = 0;
+ bool access0done = false;
int i;
mem_nid = pxm_to_node(target->memory_pxm);
@@ -577,7 +592,11 @@ static void hmat_register_target_initiators(struct memory_target *target)
if (target->processor_pxm != PXM_INVAL) {
cpu_nid = pxm_to_node(target->processor_pxm);
register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
- return;
+ access0done = true;
+ if (node_state(cpu_nid, N_CPU)) {
+ register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
+ return;
+ }
}
if (list_empty(&localities))
@@ -591,6 +610,41 @@ static void hmat_register_target_initiators(struct memory_target *target)
*/
bitmap_zero(p_nodes, MAX_NUMNODES);
list_sort(p_nodes, &initiators, initiator_cmp);
+ if (!access0done) {
+ for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
+ loc = localities_types[i];
+ if (!loc)
+ continue;
+
+ best = 0;
+ list_for_each_entry(initiator, &initiators, node) {
+ u32 value;
+
+ if (!test_bit(initiator->processor_pxm, p_nodes))
+ continue;
+
+ value = hmat_initiator_perf(target, initiator,
+ loc->hmat_loc);
+ if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
+ bitmap_clear(p_nodes, 0, initiator->processor_pxm);
+ if (value != best)
+ clear_bit(initiator->processor_pxm, p_nodes);
+ }
+ if (best)
+ hmat_update_target_access(target, loc->hmat_loc->data_type,
+ best, 0);
+ }
+
+ for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
+ cpu_nid = pxm_to_node(i);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
+ }
+ }
+
+ /* Access 1 ignores Generic Initiators */
+ bitmap_zero(p_nodes, MAX_NUMNODES);
+ list_sort(p_nodes, &initiators, initiator_cmp);
+ best = 0;
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
loc = localities_types[i];
if (!loc)
@@ -600,6 +654,10 @@ static void hmat_register_target_initiators(struct memory_target *target)
list_for_each_entry(initiator, &initiators, node) {
u32 value;
+ if (!initiator->has_cpu) {
+ clear_bit(initiator->processor_pxm, p_nodes);
+ continue;
+ }
if (!test_bit(initiator->processor_pxm, p_nodes))
continue;
@@ -610,12 +668,11 @@ static void hmat_register_target_initiators(struct memory_target *target)
clear_bit(initiator->processor_pxm, p_nodes);
}
if (best)
- hmat_update_target_access(target, loc->hmat_loc->data_type, best);
+ hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1);
}
-
for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
cpu_nid = pxm_to_node(i);
- register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
}
}
@@ -628,70 +685,10 @@ static void hmat_register_target_cache(struct memory_target *target)
node_add_cache(mem_nid, &tcache->cache_attrs);
}
-static void hmat_register_target_perf(struct memory_target *target)
+static void hmat_register_target_perf(struct memory_target *target, int access)
{
unsigned mem_nid = pxm_to_node(target->memory_pxm);
- node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
-}
-
-static void hmat_register_target_device(struct memory_target *target,
- struct resource *r)
-{
- /* define a clean / non-busy resource for the platform device */
- struct resource res = {
- .start = r->start,
- .end = r->end,
- .flags = IORESOURCE_MEM,
- };
- struct platform_device *pdev;
- struct memregion_info info;
- int rc, id;
-
- rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
- IORES_DESC_SOFT_RESERVED);
- if (rc != REGION_INTERSECTS)
- return;
-
- id = memregion_alloc(GFP_KERNEL);
- if (id < 0) {
- pr_err("memregion allocation failure for %pr\n", &res);
- return;
- }
-
- pdev = platform_device_alloc("hmem", id);
- if (!pdev) {
- pr_err("hmem device allocation failure for %pr\n", &res);
- goto out_pdev;
- }
-
- pdev->dev.numa_node = acpi_map_pxm_to_online_node(target->memory_pxm);
- info = (struct memregion_info) {
- .target_node = acpi_map_pxm_to_node(target->memory_pxm),
- };
- rc = platform_device_add_data(pdev, &info, sizeof(info));
- if (rc < 0) {
- pr_err("hmem memregion_info allocation failure for %pr\n", &res);
- goto out_pdev;
- }
-
- rc = platform_device_add_resources(pdev, &res, 1);
- if (rc < 0) {
- pr_err("hmem resource allocation failure for %pr\n", &res);
- goto out_resource;
- }
-
- rc = platform_device_add(pdev);
- if (rc < 0) {
- dev_err(&pdev->dev, "device add failed for %pr\n", &res);
- goto out_resource;
- }
-
- return;
-
-out_resource:
- put_device(&pdev->dev);
-out_pdev:
- memregion_free(id);
+ node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access);
}
static void hmat_register_target_devices(struct memory_target *target)
@@ -705,8 +702,11 @@ static void hmat_register_target_devices(struct memory_target *target)
if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM))
return;
- for (res = target->memregions.child; res; res = res->sibling)
- hmat_register_target_device(target, res);
+ for (res = target->memregions.child; res; res = res->sibling) {
+ int target_nid = pxm_to_node(target->memory_pxm);
+
+ hmem_register_device(target_nid, res);
+ }
}
static void hmat_register_target(struct memory_target *target)
@@ -733,7 +733,8 @@ static void hmat_register_target(struct memory_target *target)
if (!target->registered) {
hmat_register_target_initiators(target);
hmat_register_target_cache(target);
- hmat_register_target_perf(target);
+ hmat_register_target_perf(target, 0);
+ hmat_register_target_perf(target, 1);
target->registered = true;
}
mutex_unlock(&target_lock);
@@ -814,7 +815,7 @@ static __init int hmat_init(void)
enum acpi_hmat_type i;
acpi_status status;
- if (srat_disabled())
+ if (srat_disabled() || hmat_disable)
return 0;
status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl);
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 15bbaab8500b..6021a1013442 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -27,11 +27,16 @@ static int node_to_pxm_map[MAX_NUMNODES]
= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
unsigned char acpi_srat_revision __initdata;
-int acpi_numa __initdata;
+static int acpi_numa __initdata;
+
+void __init disable_srat(void)
+{
+ acpi_numa = -1;
+}
int pxm_to_node(int pxm)
{
- if (pxm < 0)
+ if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
return NUMA_NO_NODE;
return pxm_to_node_map[pxm];
}
@@ -130,6 +135,36 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
}
break;
+ case ACPI_SRAT_TYPE_GENERIC_AFFINITY:
+ {
+ struct acpi_srat_generic_affinity *p =
+ (struct acpi_srat_generic_affinity *)header;
+
+ if (p->device_handle_type == 0) {
+ /*
+ * For pci devices this may be the only place they
+ * are assigned a proximity domain
+ */
+ pr_debug("SRAT Generic Initiator(Seg:%u BDF:%u) in proximity domain %d %s\n",
+ *(u16 *)(&p->device_handle[0]),
+ *(u16 *)(&p->device_handle[2]),
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
+ "enabled" : "disabled");
+ } else {
+ /*
+ * In this case we can rely on the device having a
+ * proximity domain reference
+ */
+ pr_debug("SRAT Generic Initiator(HID=%.8s UID=%.4s) in proximity domain %d %s\n",
+ (char *)(&p->device_handle[0]),
+ (char *)(&p->device_handle[8]),
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
+ "enabled" : "disabled");
+ }
+ }
+ break;
default:
pr_warn("Found unsupported SRAT entry (type = 0x%x)\n",
header->type);
@@ -163,7 +198,7 @@ static int __init slit_valid(struct acpi_table_slit *slit)
void __init bad_srat(void)
{
pr_err("SRAT: SRAT not used.\n");
- acpi_numa = -1;
+ disable_srat();
}
int __init srat_disabled(void)
@@ -332,6 +367,41 @@ acpi_parse_gicc_affinity(union acpi_subtable_headers *header,
return 0;
}
+#if defined(CONFIG_X86) || defined(CONFIG_ARM64)
+static int __init
+acpi_parse_gi_affinity(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_srat_generic_affinity *gi_affinity;
+ int node;
+
+ gi_affinity = (struct acpi_srat_generic_affinity *)header;
+ if (!gi_affinity)
+ return -EINVAL;
+ acpi_table_print_srat_entry(&header->common);
+
+ if (!(gi_affinity->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED))
+ return -EINVAL;
+
+ node = acpi_map_pxm_to_node(gi_affinity->proximity_domain);
+ if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+ pr_err("SRAT: Too many proximity domains.\n");
+ return -EINVAL;
+ }
+ node_set(node, numa_nodes_parsed);
+ node_set_state(node, N_GENERIC_INITIATOR);
+
+ return 0;
+}
+#else
+static int __init
+acpi_parse_gi_affinity(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ return 0;
+}
+#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
+
static int __initdata parsed_numa_memblks;
static int __init
@@ -385,7 +455,7 @@ int __init acpi_numa_init(void)
/* SRAT: System Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- struct acpi_subtable_proc srat_proc[3];
+ struct acpi_subtable_proc srat_proc[4];
memset(srat_proc, 0, sizeof(srat_proc));
srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
@@ -394,6 +464,8 @@ int __init acpi_numa_init(void)
srat_proc[1].handler = acpi_parse_x2apic_affinity;
srat_proc[2].id = ACPI_SRAT_TYPE_GICC_AFFINITY;
srat_proc[2].handler = acpi_parse_gicc_affinity;
+ srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY;
+ srat_proc[3].handler = acpi_parse_gi_affinity;
acpi_table_parse_entries_array(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),
@@ -436,6 +508,6 @@ int acpi_get_node(acpi_handle handle)
pxm = acpi_get_pxm(handle);
- return acpi_map_pxm_to_node(pxm);
+ return pxm_to_node(pxm);
}
EXPORT_SYMBOL(acpi_get_node);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 4a0b07792233..0418febc5cf2 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -447,24 +447,19 @@ void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
-int acpi_os_map_generic_address(struct acpi_generic_address *gas)
+void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
- void __iomem *virt;
if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
- return 0;
+ return NULL;
/* Handle possible alignment issues */
memcpy(&addr, &gas->address, sizeof(addr));
if (!addr || !gas->bit_width)
- return -EINVAL;
-
- virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
- if (!virt)
- return -EIO;
+ return NULL;
- return 0;
+ return acpi_os_map_iomem(addr, gas->bit_width / 8);
}
EXPORT_SYMBOL(acpi_os_map_generic_address);
@@ -1749,17 +1744,22 @@ acpi_status __init acpi_os_initialize(void)
{
acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
- acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
- acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
+
+ acpi_gbl_xgpe0_block_logical_address =
+ (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
+ acpi_gbl_xgpe1_block_logical_address =
+ (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
+
if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
/*
* Use acpi_os_map_generic_address to pre-map the reset
* register if it's in system memory.
*/
- int rv;
+ void *rv;
rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
- pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
+ pr_debug(PREFIX "%s: map reset_reg %s\n", __func__,
+ rv ? "successful" : "failed");
}
acpi_os_initialized = true;
@@ -1787,8 +1787,12 @@ acpi_status acpi_os_terminate(void)
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
+ acpi_gbl_xgpe0_block_logical_address = 0UL;
+ acpi_gbl_xgpe1_block_logical_address = 0UL;
+
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
+
if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index dea8a60e18a4..14ee631cb7cf 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -175,7 +175,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev,
* configure the IRQ assigned to this slot|dev|pin. The 'source_index'
* indicates which resource descriptor in the resource template (of
* the link device) this interrupt is allocated from.
- *
+ *
* NOTE: Don't query the Link Device for IRQ information at this time
* because Link Device enumeration may not have occurred yet
* (e.g. exists somewhere 'below' this _PRT entry in the ACPI
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 606da5d77ad3..fb4c5632a232 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -6,8 +6,8 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2002 Dominik Brodowski <devel@brodo.de>
*
- * TBD:
- * 1. Support more than one IRQ resource entry per link device (index).
+ * TBD:
+ * 1. Support more than one IRQ resource entry per link device (index).
* 2. Implement start/stop mechanism and use ACPI Bus Driver facilities
* for IRQ management (e.g. start()->_SRS).
*/
@@ -249,8 +249,8 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
}
}
- /*
- * Query and parse _CRS to get the current IRQ assignment.
+ /*
+ * Query and parse _CRS to get the current IRQ assignment.
*/
status = acpi_walk_resources(link->device->handle, METHOD_NAME__CRS,
@@ -396,7 +396,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
/*
* "acpi_irq_balance" (default in APIC mode) enables ACPI to use PIC Interrupt
* Link Devices to move the PIRQs around to minimize sharing.
- *
+ *
* "acpi_irq_nobalance" (default in PIC mode) tells ACPI not to move any PIC IRQs
* that the BIOS has already set to active. This is necessary because
* ACPI has no automatic means of knowing what ISA IRQs are used. Note that
@@ -414,7 +414,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
*
* Note that PCI IRQ routers have a list of possible IRQs,
* which may not include the IRQs this table says are available.
- *
+ *
* Since this heuristic can't tell the difference between a link
* that no device will attach to, vs. a link which may be shared
* by multiple active devices -- it is not optimal.
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 54b36b7ad47d..95f23acd5b80 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -142,6 +142,26 @@ static struct mcfg_fixup mcfg_quirks[] = {
XGENE_V2_ECAM_MCFG(4, 0),
XGENE_V2_ECAM_MCFG(4, 1),
XGENE_V2_ECAM_MCFG(4, 2),
+
+#define ALTRA_ECAM_QUIRK(rev, seg) \
+ { "Ampere", "Altra ", rev, seg, MCFG_BUS_ANY, &pci_32b_read_ops }
+
+ ALTRA_ECAM_QUIRK(1, 0),
+ ALTRA_ECAM_QUIRK(1, 1),
+ ALTRA_ECAM_QUIRK(1, 2),
+ ALTRA_ECAM_QUIRK(1, 3),
+ ALTRA_ECAM_QUIRK(1, 4),
+ ALTRA_ECAM_QUIRK(1, 5),
+ ALTRA_ECAM_QUIRK(1, 6),
+ ALTRA_ECAM_QUIRK(1, 7),
+ ALTRA_ECAM_QUIRK(1, 8),
+ ALTRA_ECAM_QUIRK(1, 9),
+ ALTRA_ECAM_QUIRK(1, 10),
+ ALTRA_ECAM_QUIRK(1, 11),
+ ALTRA_ECAM_QUIRK(1, 12),
+ ALTRA_ECAM_QUIRK(1, 13),
+ ALTRA_ECAM_QUIRK(1, 14),
+ ALTRA_ECAM_QUIRK(1, 15),
};
static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
@@ -153,7 +173,7 @@ static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment,
{
if (!memcmp(f->oem_id, mcfg_oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(f->oem_table_id, mcfg_oem_table_id,
- ACPI_OEM_TABLE_ID_SIZE) &&
+ ACPI_OEM_TABLE_ID_SIZE) &&
f->oem_revision == mcfg_oem_revision &&
f->segment == segment &&
resource_contains(&f->bus_range, bus_range))
@@ -280,5 +300,5 @@ void __init pci_mmcfg_late_init(void)
{
int err = acpi_table_parse(ACPI_SIG_MCFG, pci_mcfg_parse);
if (err)
- pr_err("Failed to parse MCFG (%d)\n", err);
+ pr_debug("Failed to parse MCFG (%d)\n", err);
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index f90e841c59f5..c12b5fb3e8fb 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -24,8 +24,6 @@
#include "internal.h"
-#define _COMPONENT ACPI_PCI_COMPONENT
-ACPI_MODULE_NAME("pci_root");
#define ACPI_PCI_ROOT_CLASS "pci_bridge"
#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge"
static int acpi_pci_root_add(struct acpi_device *device,
@@ -62,7 +60,7 @@ static DEFINE_MUTEX(osc_lock);
/**
* acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
- * @handle - the ACPI CA node in question.
+ * @handle: the ACPI CA node in question.
*
* Note: we could make this API take a struct acpi_device * instead, but
* for now, it's more convenient to operate on an acpi_handle.
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index ca2461d1bf14..d6cb2c27a23b 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -28,9 +28,6 @@
static int check_sta_before_sun;
-#define _COMPONENT ACPI_PCI_COMPONENT
-ACPI_MODULE_NAME("pci_slot");
-
#define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */
struct acpi_pci_slot {
diff --git a/drivers/acpi/pmic/Kconfig b/drivers/acpi/pmic/Kconfig
new file mode 100644
index 000000000000..56bbcb2ce61b
--- /dev/null
+++ b/drivers/acpi/pmic/Kconfig
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig PMIC_OPREGION
+ bool "PMIC (Power Management Integrated Circuit) operation region support"
+ help
+ Select this option to enable support for ACPI operation
+ region of the PMIC chip. The operation region can be used
+ to control power rails and sensor reading/writing on the
+ PMIC chip.
+
+if PMIC_OPREGION
+
+config BYTCRC_PMIC_OPREGION
+ bool "ACPI operation region support for Bay Trail Crystal Cove PMIC"
+ depends on INTEL_SOC_PMIC
+ help
+ This config adds ACPI operation region support for the Bay Trail
+ version of the Crystal Cove PMIC.
+
+config CHTCRC_PMIC_OPREGION
+ bool "ACPI operation region support for Cherry Trail Crystal Cove PMIC"
+ depends on INTEL_SOC_PMIC
+ help
+ This config adds ACPI operation region support for the Cherry Trail
+ version of the Crystal Cove PMIC.
+
+config XPOWER_PMIC_OPREGION
+ bool "ACPI operation region support for XPower AXP288 PMIC"
+ depends on MFD_AXP20X_I2C && IOSF_MBI=y
+ help
+ This config adds ACPI operation region support for XPower AXP288 PMIC.
+
+config BXT_WC_PMIC_OPREGION
+ bool "ACPI operation region support for BXT WhiskeyCove PMIC"
+ depends on INTEL_SOC_PMIC_BXTWC
+ help
+ This config adds ACPI operation region support for BXT WhiskeyCove PMIC.
+
+config CHT_WC_PMIC_OPREGION
+ bool "ACPI operation region support for CHT Whiskey Cove PMIC"
+ depends on INTEL_SOC_PMIC_CHTWC
+ help
+ This config adds ACPI operation region support for CHT Whiskey Cove PMIC.
+
+config CHT_DC_TI_PMIC_OPREGION
+ bool "ACPI operation region support for Dollar Cove TI PMIC"
+ depends on INTEL_SOC_PMIC_CHTDC_TI
+ help
+ This config adds ACPI operation region support for Dollar Cove TI PMIC.
+
+endif # PMIC_OPREGION
+
+config TPS68470_PMIC_OPREGION
+ bool "ACPI operation region support for TPS68470 PMIC"
+ depends on MFD_TPS68470
+ help
+ This config adds ACPI operation region support for TI TPS68470 PMIC.
+ TPS68470 device is an advanced power management unit that powers
+ a Compact Camera Module (CCM), generates clocks for image sensors,
+ drives a dual LED for flash and incorporates two LED drivers for
+ general purpose indicators.
+ This driver enables ACPI operation region support control voltage
+ regulators and clocks.
+
+ This option is a bool as it provides an ACPI operation
+ region, which must be available before any of the devices
+ using this, are probed.
diff --git a/drivers/acpi/pmic/Makefile b/drivers/acpi/pmic/Makefile
new file mode 100644
index 000000000000..cd072c64920c
--- /dev/null
+++ b/drivers/acpi/pmic/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PMIC_OPREGION) += intel_pmic.o
+obj-$(CONFIG_BYTCRC_PMIC_OPREGION) += intel_pmic_bytcrc.o
+obj-$(CONFIG_CHTCRC_PMIC_OPREGION) += intel_pmic_chtcrc.o
+obj-$(CONFIG_XPOWER_PMIC_OPREGION) += intel_pmic_xpower.o
+obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += intel_pmic_bxtwc.o
+obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += intel_pmic_chtwc.o
+obj-$(CONFIG_CHT_DC_TI_PMIC_OPREGION) += intel_pmic_chtdc_ti.o
+obj-$(CONFIG_TPS68470_PMIC_OPREGION) += tps68470_pmic.o
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 837b875d075e..8048da85b7e0 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -13,7 +13,7 @@
* 1. via "Device Specific (D-State) Control"
* 2. via "Power Resource Control".
* The code below deals with ACPI Power Resources control.
- *
+ *
* An ACPI "power resource object" represents a software controllable power
* plane, clock plane, or other resource depended on by a device.
*
@@ -645,7 +645,7 @@ int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
* -ENODEV if the execution of either _DSW or _PSW has failed
*/
int acpi_device_sleep_wake(struct acpi_device *dev,
- int enable, int sleep_state, int dev_state)
+ int enable, int sleep_state, int dev_state)
{
union acpi_object in_arg[3];
struct acpi_object_list arg_list = { 3, in_arg };
@@ -690,7 +690,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
/*
* Prepare a wakeup device, two steps (Ref ACPI 2.0:P229):
- * 1. Power on the power resources required for the wakeup device
+ * 1. Power on the power resources required for the wakeup device
* 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
* State Wake) for the device, if present
*/
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 7892980b3ce4..0cca7991f186 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -10,15 +10,11 @@
#include "sleep.h"
#include "internal.h"
-#define _COMPONENT ACPI_SYSTEM_COMPONENT
-
/*
* this file provides support for:
* /proc/acpi/wakeup
*/
-ACPI_MODULE_NAME("sleep")
-
static int
acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
{
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index f32beb7d7882..2ac48cda5b20 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -14,9 +14,6 @@
#include <linux/acpi.h>
#include <acpi/processor.h>
-#define _COMPONENT ACPI_PROCESSOR_COMPONENT
-ACPI_MODULE_NAME("processor_core");
-
static struct acpi_table_madt *get_madt_table(void)
{
static struct acpi_table_madt *madt;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 5909e8fa4013..b04a68950ff1 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -354,7 +354,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
(u32) px->control, (u32) px->status));
/*
- * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
+ * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
*/
if (!px->core_frequency ||
((u32)(px->core_frequency * 1000) !=
@@ -627,7 +627,7 @@ int acpi_processor_preregister_performance(
goto err_ret;
/*
- * Now that we have _PSD data from all CPUs, lets setup P-state
+ * Now that we have _PSD data from all CPUs, lets setup P-state
* domain info.
*/
for_each_possible_cpu(i) {
@@ -693,7 +693,7 @@ int acpi_processor_preregister_performance(
if (match_pdomain->domain != pdomain->domain)
continue;
- match_pr->performance->shared_type =
+ match_pr->performance->shared_type =
pr->performance->shared_type;
cpumask_copy(match_pr->performance->shared_cpu_map,
pr->performance->shared_cpu_map);
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 41feb88ee92d..6c7d05b37c98 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -20,8 +20,6 @@
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
-#define _COMPONENT ACPI_PROCESSOR_COMPONENT
-ACPI_MODULE_NAME("processor_thermal");
#ifdef CONFIG_CPU_FREQ
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index ca707f5b521d..2a61f884e222 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -3,6 +3,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <acpi/reboot.h>
+#include <linux/delay.h>
#ifdef CONFIG_PCI
static void acpi_pci_reboot(struct acpi_generic_address *rr, u8 reset_value)
@@ -66,4 +67,14 @@ void acpi_reboot(void)
acpi_reset();
break;
}
+
+ /*
+ * Some platforms do not shut down immediately after writing to the
+ * ACPI reset register, and this results in racing with the
+ * subsequent reboot mechanism.
+ *
+ * The 15ms delay has been found to be long enough for the system
+ * to reboot on the affected platforms.
+ */
+ mdelay(15);
}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index f158b8c30113..e6d9f4de2800 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -366,7 +366,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
state_readers[i].mode,
ACPI_SBS_BATTERY,
state_readers[i].command,
- (u8 *)battery +
+ (u8 *)battery +
state_readers[i].offset);
if (result)
goto end;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 87b74e9015e5..53c2862c4c75 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -176,7 +176,7 @@ int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 address,
EXPORT_SYMBOL_GPL(acpi_smbus_write);
int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
- smbus_alarm_callback callback, void *context)
+ smbus_alarm_callback callback, void *context)
{
mutex_lock(&hc->lock);
hc->callback = callback;
diff --git a/drivers/acpi/sbshc.h b/drivers/acpi/sbshc.h
index c3522bb82792..695c390e2884 100644
--- a/drivers/acpi/sbshc.h
+++ b/drivers/acpi/sbshc.h
@@ -24,9 +24,9 @@ enum acpi_sbs_device_addr {
typedef void (*smbus_alarm_callback)(void *context);
extern int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address,
- u8 command, u8 * data);
+ u8 command, u8 *data);
extern int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 slave_address,
- u8 command, u8 * data, u8 length);
+ u8 command, u8 *data, u8 length);
extern int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
- smbus_alarm_callback callback, void *context);
+ smbus_alarm_callback callback, void *context);
extern int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 2142f1554761..bc6a79e33220 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -13,7 +13,7 @@
#include <linux/kthread.h>
#include <linux/dmi.h>
#include <linux/nls.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pgtable.h>
@@ -898,8 +898,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
*/
err = acpi_device_sleep_wake(device, 0, 0, 0);
if (err)
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "error in _DSW or _PSW evaluation\n"));
+ pr_debug("error in _DSW or _PSW evaluation\n");
}
static void acpi_bus_init_power_state(struct acpi_device *device, int state)
@@ -1454,7 +1453,7 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
}
/**
- * acpi_dma_configure - Set-up DMA configuration for the device.
+ * acpi_dma_configure_id - Set-up DMA configuration for the device.
* @dev: The pointer to the device
* @attr: device dma attributes
* @input_id: input device id const value pointer
diff --git a/drivers/acpi/tiny-power-button.c b/drivers/acpi/tiny-power-button.c
index 6273d73c0b59..420e61b8eaae 100644
--- a/drivers/acpi/tiny-power-button.c
+++ b/drivers/acpi/tiny-power-button.c
@@ -4,7 +4,6 @@
#include <linux/acpi.h>
#include <acpi/button.h>
-ACPI_MODULE_NAME("tiny-power-button");
MODULE_AUTHOR("Josh Triplett");
MODULE_DESCRIPTION("ACPI Tiny Power Button Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 838b719ec7ce..d5411a166685 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -104,7 +104,6 @@ acpi_extract_package(union acpi_object *package,
" [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
- break;
}
break;
@@ -129,7 +128,6 @@ acpi_extract_package(union acpi_object *package,
" expecting [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
- break;
}
break;
case ACPI_TYPE_LOCAL_REFERENCE:
@@ -144,7 +142,6 @@ acpi_extract_package(union acpi_object *package,
" expecting [%c]\n",
i, format_string[i]);
return AE_BAD_DATA;
- break;
}
break;
@@ -155,7 +152,6 @@ acpi_extract_package(union acpi_object *package,
i));
/* TBD: handle nested packages... */
return AE_SUPPORT;
- break;
}
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 2499d7e3c710..4f5463b2a217 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -35,9 +35,6 @@
#include <linux/workqueue.h>
#include <acpi/video.h>
-ACPI_MODULE_NAME("video");
-#define _COMPONENT ACPI_VIDEO_COMPONENT
-
void acpi_video_unregister_backlight(void);
static bool backlight_notifier_registered;
@@ -181,14 +178,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
},
},
- {
- .callback = video_detect_force_video,
- .ident = "ThinkPad X201T",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"),
- },
- },
+ {
+ .callback = video_detect_force_video,
+ .ident = "ThinkPad X201T",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"),
+ },
+ },
/* The native backlight controls do not work on some older machines */
{
@@ -282,6 +279,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "530U4E/540U4E"),
},
},
+ /* https://bugs.launchpad.net/bugs/1894667 */
+ {
+ .callback = video_detect_force_video,
+ .ident = "HP 635 Notebook",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP 635 Notebook PC"),
+ },
+ },
/* Non win8 machines which need native backlight nevertheless */
{
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index 0b2e42530adf..b02bf770aead 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -26,8 +26,6 @@ static DEFINE_MUTEX(acpi_wakeup_handler_mutex);
* suspend/resume and isn't really required as this is called in S-state. At
* that time, there is no device hotplug
**/
-#define _COMPONENT ACPI_SYSTEM_COMPONENT
-ACPI_MODULE_NAME("wakeup_devices")
/**
* acpi_enable_wakeup_devices - Enable wake-up device GPEs.
@@ -46,7 +44,7 @@ void acpi_enable_wakeup_devices(u8 sleep_state)
if (!dev->wakeup.flags.valid
|| sleep_state > (u32) dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
- || dev->wakeup.prepare_count))
+ || dev->wakeup.prepare_count))
continue;
if (device_may_wakeup(&dev->dev))
@@ -71,7 +69,7 @@ void acpi_disable_wakeup_devices(u8 sleep_state)
if (!dev->wakeup.flags.valid
|| sleep_state > (u32) dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
- || dev->wakeup.prepare_count))
+ || dev->wakeup.prepare_count))
continue;
acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f936530a19b0..b5117576792b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -223,7 +223,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_work {
struct list_head entry;
- enum {
+ enum binder_work_type {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
BINDER_WORK_RETURN_ERROR,
@@ -885,27 +885,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked(
return w;
}
-/**
- * binder_dequeue_work_head() - Dequeues the item at head of list
- * @proc: binder_proc associated with list
- * @list: list to dequeue head
- *
- * Removes the head of the list if there are items on the list
- *
- * Return: pointer dequeued binder_work, NULL if list was empty
- */
-static struct binder_work *binder_dequeue_work_head(
- struct binder_proc *proc,
- struct list_head *list)
-{
- struct binder_work *w;
-
- binder_inner_proc_lock(proc);
- w = binder_dequeue_work_head_ilocked(list);
- binder_inner_proc_unlock(proc);
- return w;
-}
-
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
static void binder_free_thread(struct binder_thread *thread);
@@ -2250,7 +2229,7 @@ static void binder_deferred_fd_close(int fd)
__close_fd_get_file(fd, &twcb->file);
if (twcb->file) {
filp_close(twcb->file, current->files);
- task_work_add(current, &twcb->twork, true);
+ task_work_add(current, &twcb->twork, TWA_RESUME);
} else {
kfree(twcb);
}
@@ -2344,8 +2323,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
* file is done when the transaction is torn
* down.
*/
- WARN_ON(failed_at &&
- proc->tsk == current->group_leader);
} break;
case BINDER_TYPE_PTR:
/*
@@ -3136,7 +3113,7 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
- !reply && (t->flags & TF_ONE_WAY));
+ !reply && (t->flags & TF_ONE_WAY), current->tgid);
if (IS_ERR(t->buffer)) {
/*
* -ESRCH indicates VMA cleared. The target is dying.
@@ -4587,13 +4564,17 @@ static void binder_release_work(struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
+ enum binder_work_type wtype;
while (1) {
- w = binder_dequeue_work_head(proc, list);
+ binder_inner_proc_lock(proc);
+ w = binder_dequeue_work_head_ilocked(list);
+ wtype = w ? w->type : 0;
+ binder_inner_proc_unlock(proc);
if (!w)
return;
- switch (w->type) {
+ switch (wtype) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@@ -4627,9 +4608,11 @@ static void binder_release_work(struct binder_proc *proc,
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} break;
+ case BINDER_WORK_NODE:
+ break;
default:
pr_err("unexpected work type, %d, not freed\n",
- w->type);
+ wtype);
break;
}
}
@@ -5182,9 +5165,7 @@ static const struct vm_operations_struct binder_vm_ops = {
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
- int ret;
struct binder_proc *proc = filp->private_data;
- const char *failure_string;
if (proc->tsk != current->group_leader)
return -EINVAL;
@@ -5196,9 +5177,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
(unsigned long)pgprot_val(vma->vm_page_prot));
if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
- ret = -EPERM;
- failure_string = "bad vm_flags";
- goto err_bad_arg;
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+ proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
+ return -EPERM;
}
vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
vma->vm_flags &= ~VM_MAYWRITE;
@@ -5206,15 +5187,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- ret = binder_alloc_mmap_handler(&proc->alloc, vma);
- if (ret)
- return ret;
- return 0;
-
-err_bad_arg:
- pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
- proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
- return ret;
+ return binder_alloc_mmap_handler(&proc->alloc, vma);
}
static int binder_open(struct inode *nodp, struct file *filp)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 69609696a843..2f846b7ae8b8 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -338,12 +338,50 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
return vma;
}
+static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
+{
+ /*
+ * Find the amount and size of buffers allocated by the current caller;
+ * The idea is that once we cross the threshold, whoever is responsible
+ * for the low async space is likely to try to send another async txn,
+ * and at some point we'll catch them in the act. This is more efficient
+ * than keeping a map per pid.
+ */
+ struct rb_node *n;
+ struct binder_buffer *buffer;
+ size_t total_alloc_size = 0;
+ size_t num_buffers = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ if (buffer->pid != pid)
+ continue;
+ if (!buffer->async_transaction)
+ continue;
+ total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
+ + sizeof(struct binder_buffer);
+ num_buffers++;
+ }
+
+ /*
+ * Warn if this pid has more than 50 transactions, or more than 50% of
+ * async space (which is 25% of total buffer size).
+ */
+ if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
+ alloc->pid, pid, num_buffers, total_alloc_size);
+ }
+}
+
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async)
+ int is_async,
+ int pid)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -486,11 +524,20 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->offsets_size = offsets_size;
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
+ buffer->pid = pid;
if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
+ if (alloc->free_async_space < alloc->buffer_size / 10) {
+ /*
+ * Start detecting spammers once we have less than 20%
+ * of async space left (which is less than 10% of total
+ * buffer size).
+ */
+ debug_low_async_space_locked(alloc, pid);
+ }
}
return buffer;
@@ -508,6 +555,7 @@ err_alloc_buf_struct_failed:
* @offsets_size: user specified buffer offset
* @extra_buffers_size: size of extra space for meta-data (eg, security context)
* @is_async: buffer for async transaction
+ * @pid: pid to attribute allocation to (used for debugging)
*
* Allocate a new buffer given the requested sizes. Returns
* the kernel version of the buffer pointer. The size allocated
@@ -520,13 +568,14 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async)
+ int is_async,
+ int pid)
{
struct binder_buffer *buffer;
mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
- extra_buffers_size, is_async);
+ extra_buffers_size, is_async, pid);
mutex_unlock(&alloc->mutex);
return buffer;
}
@@ -652,7 +701,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
* @alloc: binder_alloc for this proc
* @buffer: kernel pointer to buffer
*
- * Free the buffer allocated via binder_alloc_new_buffer()
+ * Free the buffer allocated via binder_alloc_new_buf()
*/
void binder_alloc_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer)
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index db9c1b984695..55d8b4106766 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -32,6 +32,7 @@ struct binder_transaction;
* @offsets_size: size of array of offsets
* @extra_buffers_size: size of space for other objects (like sg lists)
* @user_data: user pointer to base of buffer space
+ * @pid: pid to attribute the buffer to (caller)
*
* Bookkeeping structure for binder transaction buffers
*/
@@ -51,6 +52,7 @@ struct binder_buffer {
size_t offsets_size;
size_t extra_buffers_size;
void __user *user_data;
+ int pid;
};
/**
@@ -117,7 +119,8 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async);
+ int is_async,
+ int pid);
extern void binder_alloc_init(struct binder_alloc *alloc);
extern int binder_alloc_shrinker_init(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 4151d9938255..c2b323bc3b3a 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -119,7 +119,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
int i;
for (i = 0; i < BUFFER_NUM; i++) {
- buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0);
if (IS_ERR(buffers[i]) ||
!check_buffer_pages_allocated(alloc, buffers[i],
sizes[i])) {
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 7b76fefde3f8..7b4f154f07e6 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -63,7 +63,7 @@ static const struct constant_table binderfs_param_stats[] = {
{}
};
-const struct fs_parameter_spec binderfs_fs_parameters[] = {
+static const struct fs_parameter_spec binderfs_fs_parameters[] = {
fsparam_u32("max", Opt_max),
fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats),
{}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fbd8eaa32d32..00ba8e5a1ccc 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -360,6 +360,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x43d4), board_ahci }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d5), board_ahci }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d6), board_ahci }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d7), board_ahci }, /* Rocket Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
{ PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index d991dd46e89c..98b8baa47dc5 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -240,6 +240,8 @@ enum {
as default lpm_policy */
AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
suspend/resume */
+ AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP
+ from phy_power_on() */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index d4bba3ace45d..3ad46d26d9d5 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -227,7 +227,7 @@ static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = {
static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = {
.plat_config = ahci_mvebu_armada_3700_config,
- .flags = AHCI_HFLAG_SUSPEND_PHYS,
+ .flags = AHCI_HFLAG_SUSPEND_PHYS | AHCI_HFLAG_IGN_NOTSUPP_POWER_ON,
};
static const struct of_device_id ahci_mvebu_of_match[] = {
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index a330307d3201..5b46fc9aeb4a 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -6,6 +6,7 @@
* Tang Yuantian <Yuantian.Tang@freescale.com>
*/
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm.h>
@@ -80,6 +81,12 @@ static const struct of_device_id ahci_qoriq_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
+static const struct acpi_device_id ahci_qoriq_acpi_match[] = {
+ {"NXP0004", .driver_data = (kernel_ulong_t)AHCI_LX2160A},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ahci_qoriq_acpi_match);
+
static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
@@ -255,6 +262,7 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
static int ahci_qoriq_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ const struct acpi_device_id *acpi_id;
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ahci_qoriq_priv *qoriq_priv;
@@ -267,14 +275,18 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
return PTR_ERR(hpriv);
of_id = of_match_node(ahci_qoriq_of_match, np);
- if (!of_id)
+ acpi_id = acpi_match_device(ahci_qoriq_acpi_match, &pdev->dev);
+ if (!(of_id || acpi_id))
return -ENODEV;
qoriq_priv = devm_kzalloc(dev, sizeof(*qoriq_priv), GFP_KERNEL);
if (!qoriq_priv)
return -ENOMEM;
- qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
+ if (of_id)
+ qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
+ else
+ qoriq_priv->type = (enum ahci_qoriq_type)acpi_id->driver_data;
if (unlikely(!ecc_initialized)) {
res = platform_get_resource_byname(pdev,
@@ -288,7 +300,8 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
}
}
- qoriq_priv->is_dmacoherent = of_dma_is_coherent(np);
+ if (device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT)
+ qoriq_priv->is_dmacoherent = true;
rc = ahci_platform_enable_resources(hpriv);
if (rc)
@@ -354,6 +367,7 @@ static struct platform_driver ahci_qoriq_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_qoriq_of_match,
+ .acpi_match_table = ahci_qoriq_acpi_match,
.pm = &ahci_qoriq_pm_ops,
},
};
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 86261deeb4c5..de638dafce21 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -59,7 +59,7 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
}
rc = phy_power_on(hpriv->phys[i]);
- if (rc) {
+ if (rc && !(rc == -EOPNOTSUPP && (hpriv->flags & AHCI_HFLAG_IGN_NOTSUPP_POWER_ON))) {
phy_exit(hpriv->phys[i]);
goto disable_phys;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f546a5761c4f..61c762961ca8 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5616,7 +5616,7 @@ int ata_host_start(struct ata_host *host)
EXPORT_SYMBOL_GPL(ata_host_start);
/**
- * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
+ * ata_host_init - Initialize a host struct for sas (ipr, libsas)
* @host: host to initialize
* @dev: device host is attached to
* @ops: port_ops
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index d912eaa65c94..b6f92050e60c 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1115,7 +1115,7 @@ void ata_eh_freeze_port(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
/**
- * ata_port_thaw_port - EH helper to thaw port
+ * ata_eh_thaw_port - EH helper to thaw port
* @ap: ATA port to thaw
*
* Thaw frozen port @ap.
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 70431912dc63..48b8934970f3 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1003,7 +1003,7 @@ void ata_scsi_sdev_config(struct scsi_device *sdev)
}
/**
- * atapi_drain_needed - Check whether data transfer may overflow
+ * ata_scsi_dma_need_drain - Check whether data transfer may overflow
* @rq: request to be checked
*
* ATAPI commands which transfer variable length data to host
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 3134eaec9e3d..1d74d89b5bed 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -461,7 +461,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
case 1:
ppi[0] = &cmd_info[4];
ppi[1] = &cmd_info[4];
- /* FALL THRU */
+ fallthrough;
/* Early revs have no CNTRL_CH0 */
case 2:
case 0:
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index 4b2ba813dcab..1532b2e3c672 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * pata_ns87415.c - NS87415 (non PARISC) PATA
+ * pata_ns87415.c - NS87415 (and PARISC SUPERIO 87560) PATA
*
* (C) 2005 Red Hat <alan@lxorguk.ukuu.org.uk>
*
@@ -16,7 +16,6 @@
* systems. This has its own special mountain of errata.
*
* TODO:
- * Test PARISC SuperIO
* Get someone to test on SPARC
* Implement lazy pio/dma switching for better performance
* 8bit shared timing.
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index ad3893c62572..64b2ef15ec19 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -571,7 +571,6 @@ static int ahci_highbank_suspend(struct device *dev)
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
u32 ctl;
- int rc;
if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
dev_err(dev, "firmware update required for suspend/resume\n");
@@ -588,11 +587,7 @@ static int ahci_highbank_suspend(struct device *dev)
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
- rc = ata_host_suspend(host, PMSG_SUSPEND);
- if (rc)
- return rc;
-
- return 0;
+ return ata_host_suspend(host, PMSG_SUSPEND);
}
static int ahci_highbank_resume(struct device *dev)
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index eb9dc14e5147..20190f66ced9 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -2100,7 +2100,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
pp->dhfis_bits &= ~done_mask;
pp->dmafis_bits &= ~done_mask;
pp->sdbfis_bits |= done_mask;
- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
if (!ap->qc_active) {
DPRINTK("over\n");
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 141ac600b64c..44b0ed8f6bb8 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -120,7 +120,7 @@
/* Descriptor table word 0 bit (when DTA32M = 1) */
#define SATA_RCAR_DTEND BIT(0)
-#define SATA_RCAR_DMA_BOUNDARY 0x1FFFFFFEUL
+#define SATA_RCAR_DMA_BOUNDARY 0x1FFFFFFFUL
/* Gen2 Physical Layer Control Registers */
#define RCAR_GEN2_PHY_CTL1_REG 0x1704
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 7f814da3c2d0..96bea1ab1ecc 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -327,7 +327,7 @@ done:
*/
-static struct atmdev_ops atmtcp_v_dev_ops = {
+static const struct atmdev_ops atmtcp_v_dev_ops = {
.dev_close = atmtcp_v_dev_close,
.open = atmtcp_v_open,
.close = atmtcp_v_close,
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 157452080f3d..41369fc7004f 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
topology.o container.o property.o cacheinfo.o \
- devcon.o swnode.o
+ swnode.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
obj-$(CONFIG_ISA_BUS_API) += isa.o
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 75f72d684294..de8587cc119e 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -21,18 +21,27 @@
#include <linux/sched.h>
#include <linux/smp.h>
-__weak bool arch_freq_counters_available(struct cpumask *cpus)
+bool topology_scale_freq_invariant(void)
+{
+ return cpufreq_supports_freq_invariance() ||
+ arch_freq_counters_available(cpu_online_mask);
+}
+
+__weak bool arch_freq_counters_available(const struct cpumask *cpus)
{
return false;
}
DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
-void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
- unsigned long max_freq)
+void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq)
{
unsigned long scale;
int i;
+ if (WARN_ON_ONCE(!cur_freq || !max_freq))
+ return;
+
/*
* If the use of counters for FIE is enabled, just return as we don't
* want to update the scale factor with information from CPUFREQ.
@@ -71,7 +80,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
+ return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
}
static void update_topology_flags_workfn(struct work_struct *work);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 886e9054999a..a9c23ecebc7c 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -229,7 +229,7 @@ static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf)
{
- return sprintf(buf, "%d\n", bus->p->drivers_autoprobe);
+ return sysfs_emit(buf, "%d\n", bus->p->drivers_autoprobe);
}
static ssize_t drivers_autoprobe_store(struct bus_type *bus,
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 8d553c92cd32..bfc095956dd1 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -362,7 +362,7 @@ static ssize_t file_name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
- return sprintf(buf, "%u\n", this_leaf->object); \
+ return sysfs_emit(buf, "%u\n", this_leaf->object); \
}
show_one(id, id);
@@ -377,44 +377,48 @@ static ssize_t size_show(struct device *dev,
{
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
- return sprintf(buf, "%uK\n", this_leaf->size >> 10);
+ return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
}
-static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
+static ssize_t shared_cpu_map_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
const struct cpumask *mask = &this_leaf->shared_cpu_map;
- return cpumap_print_to_pagebuf(list, buf, mask);
-}
-
-static ssize_t shared_cpu_map_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return shared_cpumap_show_func(dev, false, buf);
+ return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
}
static ssize_t shared_cpu_list_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return shared_cpumap_show_func(dev, true, buf);
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ const struct cpumask *mask = &this_leaf->shared_cpu_map;
+
+ return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
}
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ const char *output;
switch (this_leaf->type) {
case CACHE_TYPE_DATA:
- return sprintf(buf, "Data\n");
+ output = "Data";
+ break;
case CACHE_TYPE_INST:
- return sprintf(buf, "Instruction\n");
+ output = "Instruction";
+ break;
case CACHE_TYPE_UNIFIED:
- return sprintf(buf, "Unified\n");
+ output = "Unified";
+ break;
default:
return -EINVAL;
}
+
+ return sysfs_emit(buf, "%s\n", output);
}
static ssize_t allocation_policy_show(struct device *dev,
@@ -422,15 +426,18 @@ static ssize_t allocation_policy_show(struct device *dev,
{
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
unsigned int ci_attr = this_leaf->attributes;
- int n = 0;
+ const char *output;
if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
- n = sprintf(buf, "ReadWriteAllocate\n");
+ output = "ReadWriteAllocate";
else if (ci_attr & CACHE_READ_ALLOCATE)
- n = sprintf(buf, "ReadAllocate\n");
+ output = "ReadAllocate";
else if (ci_attr & CACHE_WRITE_ALLOCATE)
- n = sprintf(buf, "WriteAllocate\n");
- return n;
+ output = "WriteAllocate";
+ else
+ return 0;
+
+ return sysfs_emit(buf, "%s\n", output);
}
static ssize_t write_policy_show(struct device *dev,
@@ -441,9 +448,9 @@ static ssize_t write_policy_show(struct device *dev,
int n = 0;
if (ci_attr & CACHE_WRITE_THROUGH)
- n = sprintf(buf, "WriteThrough\n");
+ n = sysfs_emit(buf, "WriteThrough\n");
else if (ci_attr & CACHE_WRITE_BACK)
- n = sprintf(buf, "WriteBack\n");
+ n = sysfs_emit(buf, "WriteBack\n");
return n;
}
diff --git a/drivers/base/class.c b/drivers/base/class.c
index bcd410e6d70a..c3451481194e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -478,7 +478,7 @@ ssize_t show_class_attr_string(struct class *class,
struct class_attribute_string *cs;
cs = container_of(attr, struct class_attribute_string, attr);
- return snprintf(buf, PAGE_SIZE, "%s\n", cs->str);
+ return sysfs_emit(buf, "%s\n", cs->str);
}
EXPORT_SYMBOL_GPL(show_class_attr_string);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index bb5806a2bd4c..d661ada1518f 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -26,6 +26,7 @@
#include <linux/pm_runtime.h>
#include <linux/netdevice.h>
#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include <linux/sysfs.h>
#include "base.h"
@@ -239,27 +240,35 @@ void device_pm_move_to_tail(struct device *dev)
#define to_devlink(dev) container_of((dev), struct device_link, link_dev)
static ssize_t status_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- char *status;
+ const char *output;
switch (to_devlink(dev)->status) {
case DL_STATE_NONE:
- status = "not tracked"; break;
+ output = "not tracked";
+ break;
case DL_STATE_DORMANT:
- status = "dormant"; break;
+ output = "dormant";
+ break;
case DL_STATE_AVAILABLE:
- status = "available"; break;
+ output = "available";
+ break;
case DL_STATE_CONSUMER_PROBE:
- status = "consumer probing"; break;
+ output = "consumer probing";
+ break;
case DL_STATE_ACTIVE:
- status = "active"; break;
+ output = "active";
+ break;
case DL_STATE_SUPPLIER_UNBIND:
- status = "supplier unbinding"; break;
+ output = "supplier unbinding";
+ break;
default:
- status = "unknown"; break;
+ output = "unknown";
+ break;
}
- return sprintf(buf, "%s\n", status);
+
+ return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(status);
@@ -267,16 +276,16 @@ static ssize_t auto_remove_on_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct device_link *link = to_devlink(dev);
- char *str;
+ const char *output;
if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
- str = "supplier unbind";
+ output = "supplier unbind";
else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
- str = "consumer unbind";
+ output = "consumer unbind";
else
- str = "never";
+ output = "never";
- return sprintf(buf, "%s\n", str);
+ return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(auto_remove_on);
@@ -285,7 +294,7 @@ static ssize_t runtime_pm_show(struct device *dev,
{
struct device_link *link = to_devlink(dev);
- return sprintf(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
+ return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
}
static DEVICE_ATTR_RO(runtime_pm);
@@ -294,7 +303,8 @@ static ssize_t sync_state_only_show(struct device *dev,
{
struct device_link *link = to_devlink(dev);
- return sprintf(buf, "%d\n", !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ return sysfs_emit(buf, "%d\n",
+ !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
}
static DEVICE_ATTR_RO(sync_state_only);
@@ -763,8 +773,7 @@ static void __device_link_del(struct kref *kref)
dev_dbg(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
- if (link->flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_drop_link(link->consumer);
+ pm_runtime_drop_link(link);
list_del_rcu(&link->s_node);
list_del_rcu(&link->c_node);
@@ -778,8 +787,7 @@ static void __device_link_del(struct kref *kref)
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
- if (link->flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_drop_link(link->consumer);
+ pm_runtime_drop_link(link);
list_del(&link->s_node);
list_del(&link->c_node);
@@ -1059,7 +1067,7 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
&& dev->links.need_for_probe;
mutex_unlock(&wfs_lock);
device_unlock(dev);
- return sprintf(buf, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static DEVICE_ATTR_RO(waiting_for_supplier);
@@ -1709,7 +1717,7 @@ ssize_t device_show_ulong(struct device *dev,
char *buf)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
- return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
+ return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_ulong);
@@ -1739,7 +1747,7 @@ ssize_t device_show_int(struct device *dev,
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
- return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
+ return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_int);
@@ -1760,7 +1768,7 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
- return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
+ return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
}
EXPORT_SYMBOL_GPL(device_show_bool);
@@ -1788,6 +1796,8 @@ static void device_release(struct kobject *kobj)
*/
devres_release_all(dev);
+ kfree(dev->dma_range_map);
+
if (dev->release)
dev->release(dev);
else if (dev->type && dev->type->release)
@@ -1932,7 +1942,7 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
struct kset *kset;
struct kobj_uevent_env *env = NULL;
int i;
- size_t count = 0;
+ int len = 0;
int retval;
/* search the kset, the device belongs to */
@@ -1962,10 +1972,10 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
/* copy keys to file */
for (i = 0; i < env->envp_idx; i++)
- count += sprintf(&buf[count], "%s\n", env->envp[i]);
+ len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
out:
kfree(env);
- return count;
+ return len;
}
static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
@@ -1992,7 +2002,7 @@ static ssize_t online_show(struct device *dev, struct device_attribute *attr,
device_lock(dev);
val = !dev->offline;
device_unlock(dev);
- return sprintf(buf, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t online_store(struct device *dev, struct device_attribute *attr,
@@ -3062,6 +3072,7 @@ void device_del(struct device *dev)
struct device *parent = dev->parent;
struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
+ unsigned int noio_flag;
device_lock(dev);
kill_device(dev);
@@ -3073,6 +3084,7 @@ void device_del(struct device *dev)
/* Notify clients of device removal. This call must come
* before dpm_sysfs_remove().
*/
+ noio_flag = memalloc_noio_save();
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
@@ -3114,6 +3126,7 @@ void device_del(struct device *dev)
glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
cleanup_glue_dir(dev, glue_dir);
+ memalloc_noio_restore(noio_flag);
put_device(parent);
}
EXPORT_SYMBOL_GPL(device_del);
@@ -3324,7 +3337,7 @@ struct device *device_find_child_by_name(struct device *parent,
klist_iter_init(&parent->p->klist_children, &i);
while ((child = next_device(&i)))
- if (!strcmp(dev_name(child), name) && get_device(child))
+ if (sysfs_streq(dev_name(child), name) && get_device(child))
break;
klist_iter_exit(&i);
return child;
@@ -4061,22 +4074,21 @@ void device_shutdown(void)
*/
#ifdef CONFIG_PRINTK
-static int
-create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
+static void
+set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
{
const char *subsys;
- size_t pos = 0;
+
+ memset(dev_info, 0, sizeof(*dev_info));
if (dev->class)
subsys = dev->class->name;
else if (dev->bus)
subsys = dev->bus->name;
else
- return 0;
+ return;
- pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys);
- if (pos >= hdrlen)
- goto overflow;
+ strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
/*
* Add device identifier DEVICE=:
@@ -4092,41 +4104,28 @@ create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
c = 'b';
else
c = 'c';
- pos++;
- pos += snprintf(hdr + pos, hdrlen - pos,
- "DEVICE=%c%u:%u",
- c, MAJOR(dev->devt), MINOR(dev->devt));
+
+ snprintf(dev_info->device, sizeof(dev_info->device),
+ "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
} else if (strcmp(subsys, "net") == 0) {
struct net_device *net = to_net_dev(dev);
- pos++;
- pos += snprintf(hdr + pos, hdrlen - pos,
- "DEVICE=n%u", net->ifindex);
+ snprintf(dev_info->device, sizeof(dev_info->device),
+ "n%u", net->ifindex);
} else {
- pos++;
- pos += snprintf(hdr + pos, hdrlen - pos,
- "DEVICE=+%s:%s", subsys, dev_name(dev));
+ snprintf(dev_info->device, sizeof(dev_info->device),
+ "+%s:%s", subsys, dev_name(dev));
}
-
- if (pos >= hdrlen)
- goto overflow;
-
- return pos;
-
-overflow:
- dev_WARN(dev, "device/subsystem name too long");
- return 0;
}
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args)
{
- char hdr[128];
- size_t hdrlen;
+ struct dev_printk_info dev_info;
- hdrlen = create_syslog_header(dev, hdr, sizeof(hdr));
+ set_dev_info(dev, &dev_info);
- return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args);
+ return vprintk_emit(0, level, &dev_info, fmt, args);
}
EXPORT_SYMBOL(dev_vprintk_emit);
@@ -4211,13 +4210,16 @@ define_dev_printk_level(_dev_info, KERN_INFO);
* -EPROBE_DEFER and propagate error upwards.
* In case of -EPROBE_DEFER it sets also defer probe reason, which can be
* checked later by reading devices_deferred debugfs attribute.
- * It replaces code sequence:
+ * It replaces code sequence::
+ *
* if (err != -EPROBE_DEFER)
* dev_err(dev, ...);
* else
* dev_dbg(dev, ...);
* return err;
- * with
+ *
+ * with::
+ *
* return dev_err_probe(dev, err, ...);
*
* Returns @err.
@@ -4260,6 +4262,7 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
+ struct device *parent = dev->parent;
struct fwnode_handle *fn = dev->fwnode;
if (fwnode) {
@@ -4274,7 +4277,8 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
- fn->secondary = NULL;
+ if (!(parent && fn == parent->fwnode))
+ fn->secondary = ERR_PTR(-ENODEV);
} else {
dev->fwnode = NULL;
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index d2136ab9b14a..8f1d6569564c 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -139,11 +139,11 @@ EXPORT_SYMBOL_GPL(cpu_subsys);
#ifdef CONFIG_KEXEC
#include <linux/kexec.h>
-static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
+static ssize_t crash_notes_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
- ssize_t rc;
unsigned long long addr;
int cpunum;
@@ -156,21 +156,18 @@ static ssize_t show_crash_notes(struct device *dev, struct device_attribute *att
* operation should be safe. No locking required.
*/
addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
- rc = sprintf(buf, "%Lx\n", addr);
- return rc;
+
+ return sysfs_emit(buf, "%llx\n", addr);
}
-static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
+static DEVICE_ATTR_ADMIN_RO(crash_notes);
-static ssize_t show_crash_notes_size(struct device *dev,
+static ssize_t crash_notes_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- ssize_t rc;
-
- rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
- return rc;
+ return sysfs_emit(buf, "%zu\n", sizeof(note_buf_t));
}
-static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
+static DEVICE_ATTR_ADMIN_RO(crash_notes_size);
static struct attribute *crash_note_cpu_attrs[] = {
&dev_attr_crash_notes.attr,
@@ -231,7 +228,7 @@ static struct cpu_attr cpu_attrs[] = {
static ssize_t print_cpus_kernel_max(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", NR_CPUS - 1);
+ return sysfs_emit(buf, "%d\n", NR_CPUS - 1);
}
static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
@@ -241,37 +238,37 @@ unsigned int total_cpus;
static ssize_t print_cpus_offline(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n = 0, len = PAGE_SIZE-2;
+ int len = 0;
cpumask_var_t offline;
/* display offline cpus < nr_cpu_ids */
if (!alloc_cpumask_var(&offline, GFP_KERNEL))
return -ENOMEM;
cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
- n = scnprintf(buf, len, "%*pbl", cpumask_pr_args(offline));
+ len += sysfs_emit_at(buf, len, "%*pbl", cpumask_pr_args(offline));
free_cpumask_var(offline);
/* display offline cpus >= nr_cpu_ids */
if (total_cpus && nr_cpu_ids < total_cpus) {
- if (n && n < len)
- buf[n++] = ',';
+ len += sysfs_emit_at(buf, len, ",");
if (nr_cpu_ids == total_cpus-1)
- n += scnprintf(&buf[n], len - n, "%u", nr_cpu_ids);
+ len += sysfs_emit_at(buf, len, "%u", nr_cpu_ids);
else
- n += scnprintf(&buf[n], len - n, "%u-%d",
- nr_cpu_ids, total_cpus-1);
+ len += sysfs_emit_at(buf, len, "%u-%d",
+ nr_cpu_ids, total_cpus - 1);
}
- n += scnprintf(&buf[n], len - n, "\n");
- return n;
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
static ssize_t print_cpus_isolated(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n;
+ int len;
cpumask_var_t isolated;
if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
@@ -279,19 +276,19 @@ static ssize_t print_cpus_isolated(struct device *dev,
cpumask_andnot(isolated, cpu_possible_mask,
housekeeping_cpumask(HK_FLAG_DOMAIN));
- n = sprintf(buf, "%*pbl\n", cpumask_pr_args(isolated));
+ len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated));
free_cpumask_var(isolated);
- return n;
+ return len;
}
static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
#ifdef CONFIG_NO_HZ_FULL
static ssize_t print_cpus_nohz_full(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
}
static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
#endif
@@ -320,22 +317,23 @@ static ssize_t print_cpu_modalias(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- ssize_t n;
+ int len = 0;
u32 i;
- n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
- CPU_FEATURE_TYPEVAL);
+ len += sysfs_emit_at(buf, len,
+ "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
+ CPU_FEATURE_TYPEVAL);
for (i = 0; i < MAX_CPU_FEATURES; i++)
if (cpu_have_feature(i)) {
- if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
+ if (len + sizeof(",XXXX\n") >= PAGE_SIZE) {
WARN(1, "CPU features overflow page\n");
break;
}
- n += sprintf(&buf[n], ",%04X", i);
+ len += sysfs_emit_at(buf, len, ",%04X", i);
}
- buf[n++] = '\n';
- return n;
+ len += sysfs_emit_at(buf, len, "\n");
+ return len;
}
static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -516,56 +514,56 @@ static void __init cpu_dev_register_generic(void)
ssize_t __weak cpu_show_meltdown(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "Not affected\n");
+ return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "Not affected\n");
+ return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "Not affected\n");
+ return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "Not affected\n");
+ return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "Not affected\n");
+ return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_mds(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "Not affected\n");
+ return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "Not affected\n");
+ return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "Not affected\n");
+ return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_srbds(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "Not affected\n");
+ return sysfs_emit(buf, "Not affected\n");
}
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 857b0a928e8d..148e81969e04 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -19,7 +19,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/delay.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
@@ -486,7 +486,8 @@ static ssize_t state_synced_show(struct device *dev,
device_lock(dev);
val = dev->state_synced;
device_unlock(dev);
- return sprintf(buf, "%u\n", val);
+
+ return sysfs_emit(buf, "%u\n", val);
}
static DEVICE_ATTR_RO(state_synced);
@@ -658,15 +659,14 @@ done:
*/
static int really_probe_debug(struct device *dev, struct device_driver *drv)
{
- ktime_t calltime, delta, rettime;
+ ktime_t calltime, rettime;
int ret;
calltime = ktime_get();
ret = really_probe(dev, drv);
rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
pr_debug("probe of %s returned %d after %lld usecs\n",
- dev_name(dev), ret, (s64) ktime_to_us(delta));
+ dev_name(dev), ret, ktime_us_delta(rettime, calltime));
return ret;
}
@@ -1117,6 +1117,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv = dev->driver;
if (drv) {
+ pm_runtime_get_sync(dev);
+
while (device_links_busy(dev)) {
__device_driver_unlock(dev, parent);
@@ -1128,13 +1130,12 @@ static void __device_release_driver(struct device *dev, struct device *parent)
* have released the driver successfully while this one
* was waiting, so check for that.
*/
- if (dev->driver != drv)
+ if (dev->driver != drv) {
+ pm_runtime_put(dev);
return;
+ }
}
- pm_runtime_get_sync(dev);
- pm_runtime_clean_up_links(dev);
-
driver_sysfs_remove(dev);
if (dev->bus)
diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c
deleted file mode 100644
index 14e2178e09f8..000000000000
--- a/drivers/base/devcon.c
+++ /dev/null
@@ -1,231 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/**
- * Device connections
- *
- * Copyright (C) 2018 Intel Corporation
- * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- */
-
-#include <linux/device.h>
-#include <linux/property.h>
-
-static DEFINE_MUTEX(devcon_lock);
-static LIST_HEAD(devcon_list);
-
-static void *
-fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
- void *data, devcon_match_fn_t match)
-{
- struct device_connection con = { .id = con_id };
- struct fwnode_handle *ep;
- void *ret;
-
- fwnode_graph_for_each_endpoint(fwnode, ep) {
- con.fwnode = fwnode_graph_get_remote_port_parent(ep);
- if (!fwnode_device_is_available(con.fwnode))
- continue;
-
- ret = match(&con, -1, data);
- fwnode_handle_put(con.fwnode);
- if (ret) {
- fwnode_handle_put(ep);
- return ret;
- }
- }
- return NULL;
-}
-
-static void *
-fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
- void *data, devcon_match_fn_t match)
-{
- struct device_connection con = { };
- void *ret;
- int i;
-
- for (i = 0; ; i++) {
- con.fwnode = fwnode_find_reference(fwnode, con_id, i);
- if (IS_ERR(con.fwnode))
- break;
-
- ret = match(&con, -1, data);
- fwnode_handle_put(con.fwnode);
- if (ret)
- return ret;
- }
-
- return NULL;
-}
-
-/**
- * fwnode_connection_find_match - Find connection from a device node
- * @fwnode: Device node with the connection
- * @con_id: Identifier for the connection
- * @data: Data for the match function
- * @match: Function to check and convert the connection description
- *
- * Find a connection with unique identifier @con_id between @fwnode and another
- * device node. @match will be used to convert the connection description to
- * data the caller is expecting to be returned.
- */
-void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
- const char *con_id, void *data,
- devcon_match_fn_t match)
-{
- void *ret;
-
- if (!fwnode || !match)
- return NULL;
-
- ret = fwnode_graph_devcon_match(fwnode, con_id, data, match);
- if (ret)
- return ret;
-
- return fwnode_devcon_match(fwnode, con_id, data, match);
-}
-EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
-
-/**
- * device_connection_find_match - Find physical connection to a device
- * @dev: Device with the connection
- * @con_id: Identifier for the connection
- * @data: Data for the match function
- * @match: Function to check and convert the connection description
- *
- * Find a connection with unique identifier @con_id between @dev and another
- * device. @match will be used to convert the connection description to data the
- * caller is expecting to be returned.
- */
-void *device_connection_find_match(struct device *dev, const char *con_id,
- void *data, devcon_match_fn_t match)
-{
- struct fwnode_handle *fwnode = dev_fwnode(dev);
- const char *devname = dev_name(dev);
- struct device_connection *con;
- void *ret = NULL;
- int ep;
-
- if (!match)
- return NULL;
-
- ret = fwnode_connection_find_match(fwnode, con_id, data, match);
- if (ret)
- return ret;
-
- mutex_lock(&devcon_lock);
-
- list_for_each_entry(con, &devcon_list, list) {
- ep = match_string(con->endpoint, 2, devname);
- if (ep < 0)
- continue;
-
- if (con_id && strcmp(con->id, con_id))
- continue;
-
- ret = match(con, !ep, data);
- if (ret)
- break;
- }
-
- mutex_unlock(&devcon_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(device_connection_find_match);
-
-extern struct bus_type platform_bus_type;
-extern struct bus_type pci_bus_type;
-extern struct bus_type i2c_bus_type;
-extern struct bus_type spi_bus_type;
-
-static struct bus_type *generic_match_buses[] = {
- &platform_bus_type,
-#ifdef CONFIG_PCI
- &pci_bus_type,
-#endif
-#ifdef CONFIG_I2C
- &i2c_bus_type,
-#endif
-#ifdef CONFIG_SPI_MASTER
- &spi_bus_type,
-#endif
- NULL,
-};
-
-static void *device_connection_fwnode_match(struct device_connection *con)
-{
- struct bus_type *bus;
- struct device *dev;
-
- for (bus = generic_match_buses[0]; bus; bus++) {
- dev = bus_find_device_by_fwnode(bus, con->fwnode);
- if (dev && !strncmp(dev_name(dev), con->id, strlen(con->id)))
- return dev;
-
- put_device(dev);
- }
- return NULL;
-}
-
-/* This tries to find the device from the most common bus types by name. */
-static void *generic_match(struct device_connection *con, int ep, void *data)
-{
- struct bus_type *bus;
- struct device *dev;
-
- if (con->fwnode)
- return device_connection_fwnode_match(con);
-
- for (bus = generic_match_buses[0]; bus; bus++) {
- dev = bus_find_device_by_name(bus, NULL, con->endpoint[ep]);
- if (dev)
- return dev;
- }
-
- /*
- * We only get called if a connection was found, tell the caller to
- * wait for the other device to show up.
- */
- return ERR_PTR(-EPROBE_DEFER);
-}
-
-/**
- * device_connection_find - Find two devices connected together
- * @dev: Device with the connection
- * @con_id: Identifier for the connection
- *
- * Find a connection with unique identifier @con_id between @dev and
- * another device. On success returns handle to the device that is connected
- * to @dev, with the reference count for the found device incremented. Returns
- * NULL if no matching connection was found, or ERR_PTR(-EPROBE_DEFER) when a
- * connection was found but the other device has not been enumerated yet.
- */
-struct device *device_connection_find(struct device *dev, const char *con_id)
-{
- return device_connection_find_match(dev, con_id, NULL, generic_match);
-}
-EXPORT_SYMBOL_GPL(device_connection_find);
-
-/**
- * device_connection_add - Register a connection description
- * @con: The connection description to be registered
- */
-void device_connection_add(struct device_connection *con)
-{
- mutex_lock(&devcon_lock);
- list_add_tail(&con->list, &devcon_list);
- mutex_unlock(&devcon_lock);
-}
-EXPORT_SYMBOL_GPL(device_connection_add);
-
-/**
- * device_connections_remove - Unregister connection description
- * @con: The connection description to be unregistered
- */
-void device_connection_remove(struct device_connection *con)
-{
- mutex_lock(&devcon_lock);
- list_del(&con->list);
- mutex_unlock(&devcon_lock);
-}
-EXPORT_SYMBOL_GPL(device_connection_remove);
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index e42d0b514384..9243468e2c99 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -123,7 +123,7 @@ static int devcd_free(struct device *dev, void *data)
static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", devcd_disabled);
+ return sysfs_emit(buf, "%d\n", devcd_disabled);
}
static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index ed615d3b9cf1..586e9a75c840 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -126,6 +126,14 @@ static void add_dr(struct device *dev, struct devres_node *node)
list_add_tail(&node->entry, &dev->devres_head);
}
+static void replace_dr(struct device *dev,
+ struct devres_node *old, struct devres_node *new)
+{
+ devres_log(dev, old, "REPLACE");
+ BUG_ON(!list_empty(&new->entry));
+ list_replace(&old->entry, &new->entry);
+}
+
#ifdef CONFIG_DEBUG_DEVRES
void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
const char *name)
@@ -838,6 +846,103 @@ void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
EXPORT_SYMBOL_GPL(devm_kmalloc);
/**
+ * devm_krealloc - Resource-managed krealloc()
+ * @dev: Device to re-allocate memory for
+ * @ptr: Pointer to the memory chunk to re-allocate
+ * @new_size: New allocation size
+ * @gfp: Allocation gfp flags
+ *
+ * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc().
+ * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR,
+ * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the
+ * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't
+ * change the order in which the release callback for the re-alloc'ed devres
+ * will be called (except when falling back to devm_kmalloc() or when freeing
+ * resources when new_size is zero). The contents of the memory are preserved
+ * up to the lesser of new and old sizes.
+ */
+void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
+{
+ size_t total_new_size, total_old_size;
+ struct devres *old_dr, *new_dr;
+ unsigned long flags;
+
+ if (unlikely(!new_size)) {
+ devm_kfree(dev, ptr);
+ return ZERO_SIZE_PTR;
+ }
+
+ if (unlikely(ZERO_OR_NULL_PTR(ptr)))
+ return devm_kmalloc(dev, new_size, gfp);
+
+ if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
+ /*
+ * We cannot reliably realloc a const string returned by
+ * devm_kstrdup_const().
+ */
+ return NULL;
+
+ if (!check_dr_size(new_size, &total_new_size))
+ return NULL;
+
+ total_old_size = ksize(container_of(ptr, struct devres, data));
+ if (total_old_size == 0) {
+ WARN(1, "Pointer doesn't point to dynamically allocated memory.");
+ return NULL;
+ }
+
+ /*
+ * If new size is smaller or equal to the actual number of bytes
+ * allocated previously - just return the same pointer.
+ */
+ if (total_new_size <= total_old_size)
+ return ptr;
+
+ /*
+ * Otherwise: allocate new, larger chunk. We need to allocate before
+ * taking the lock as most probably the caller uses GFP_KERNEL.
+ */
+ new_dr = alloc_dr(devm_kmalloc_release,
+ total_new_size, gfp, dev_to_node(dev));
+ if (!new_dr)
+ return NULL;
+
+ /*
+ * The spinlock protects the linked list against concurrent
+ * modifications but not the resource itself.
+ */
+ spin_lock_irqsave(&dev->devres_lock, flags);
+
+ old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
+ if (!old_dr) {
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+ kfree(new_dr);
+ WARN(1, "Memory chunk not managed or managed by a different device.");
+ return NULL;
+ }
+
+ replace_dr(dev, &old_dr->node, &new_dr->node);
+
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ /*
+ * We can copy the memory contents after releasing the lock as we're
+ * no longer modyfing the list links.
+ */
+ memcpy(new_dr->data, old_dr->data,
+ total_old_size - offsetof(struct devres, data));
+ /*
+ * Same for releasing the old devres - it's now been removed from the
+ * list. This is also the reason why we must not use devm_kfree() - the
+ * links are no longer valid.
+ */
+ kfree(old_dr);
+
+ return new_dr->data;
+}
+EXPORT_SYMBOL_GPL(devm_krealloc);
+
+/**
* devm_kstrdup - Allocate resource managed space and
* copy an existing string into that.
* @dev: Device to allocate memory for
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 283ca2de76d4..4dec4b79ae06 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -124,7 +124,7 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom)
static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", __firmware_loading_timeout());
+ return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
}
/**
@@ -219,7 +219,7 @@ static ssize_t firmware_loading_show(struct device *dev,
loading = fw_sysfs_loading(fw_sysfs->fw_priv);
mutex_unlock(&fw_lock);
- return sprintf(buf, "%d\n", loading);
+ return sysfs_emit(buf, "%d\n", loading);
}
/**
@@ -272,9 +272,9 @@ static ssize_t firmware_loading_store(struct device *dev,
dev_err(dev, "%s: map pages failed\n",
__func__);
else
- rc = security_kernel_post_read_file(NULL,
- fw_priv->data, fw_priv->size,
- READING_FIRMWARE);
+ rc = security_kernel_post_load_data(fw_priv->data,
+ fw_priv->size,
+ LOADING_FIRMWARE, "blob");
/*
* Same logic as fw_load_abort, only the DONE bit
@@ -490,13 +490,11 @@ exit:
/**
* fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism
* @fw_sysfs: firmware sysfs information for the firmware to load
- * @opt_flags: flags of options, FW_OPT_*
* @timeout: timeout to wait for the load
*
* In charge of constructing a sysfs fallback interface for firmware loading.
**/
-static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
- u32 opt_flags, long timeout)
+static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
{
int retval = 0;
struct device *f_dev = &fw_sysfs->dev;
@@ -518,7 +516,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
list_add(&fw_priv->pending_list, &pending_fw_head);
mutex_unlock(&fw_lock);
- if (opt_flags & FW_OPT_UEVENT) {
+ if (fw_priv->opt_flags & FW_OPT_UEVENT) {
fw_priv->need_uevent = true;
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
@@ -580,10 +578,10 @@ static int fw_load_from_user_helper(struct firmware *firmware,
}
fw_sysfs->fw_priv = firmware->priv;
- ret = fw_load_sysfs_fallback(fw_sysfs, opt_flags, timeout);
+ ret = fw_load_sysfs_fallback(fw_sysfs, timeout);
if (!ret)
- ret = assign_fw(firmware, device, opt_flags);
+ ret = assign_fw(firmware, device);
out_unlock:
usermodehelper_read_unlock();
@@ -613,7 +611,7 @@ static bool fw_run_sysfs_fallback(u32 opt_flags)
return false;
/* Also permit LSMs and IMA to fail firmware sysfs fallback */
- ret = security_kernel_load_data(LOADING_FIRMWARE);
+ ret = security_kernel_load_data(LOADING_FIRMWARE, true);
if (ret < 0)
return false;
@@ -625,7 +623,8 @@ static bool fw_run_sysfs_fallback(u32 opt_flags)
* @fw: pointer to firmware image
* @name: name of firmware file to look for
* @device: device for which firmware is being loaded
- * @opt_flags: options to control firmware loading behaviour
+ * @opt_flags: options to control firmware loading behaviour, as defined by
+ * &enum fw_opt
* @ret: return value from direct lookup which triggered the fallback mechanism
*
* This function is called if direct lookup for the firmware failed, it enables
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 2afdb6adb23f..3af7205b302f 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -67,10 +67,9 @@ static inline void unregister_sysfs_loader(void)
#endif /* CONFIG_FW_LOADER_USER_HELPER */
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
-int firmware_fallback_platform(struct fw_priv *fw_priv, u32 opt_flags);
+int firmware_fallback_platform(struct fw_priv *fw_priv);
#else
-static inline int firmware_fallback_platform(struct fw_priv *fw_priv,
- u32 opt_flags)
+static inline int firmware_fallback_platform(struct fw_priv *fw_priv)
{
return -ENOENT;
}
diff --git a/drivers/base/firmware_loader/fallback_platform.c b/drivers/base/firmware_loader/fallback_platform.c
index 685edb7dd05a..00af99f0aff2 100644
--- a/drivers/base/firmware_loader/fallback_platform.c
+++ b/drivers/base/firmware_loader/fallback_platform.c
@@ -8,16 +8,16 @@
#include "fallback.h"
#include "firmware.h"
-int firmware_fallback_platform(struct fw_priv *fw_priv, u32 opt_flags)
+int firmware_fallback_platform(struct fw_priv *fw_priv)
{
const u8 *data;
size_t size;
int rc;
- if (!(opt_flags & FW_OPT_FALLBACK_PLATFORM))
+ if (!(fw_priv->opt_flags & FW_OPT_FALLBACK_PLATFORM))
return -ENOENT;
- rc = security_kernel_load_data(LOADING_FIRMWARE_EFI_EMBEDDED);
+ rc = security_kernel_load_data(LOADING_FIRMWARE, true);
if (rc)
return rc;
@@ -27,6 +27,12 @@ int firmware_fallback_platform(struct fw_priv *fw_priv, u32 opt_flags)
if (fw_priv->data && size > fw_priv->allocated_size)
return -ENOMEM;
+
+ rc = security_kernel_post_load_data((u8 *)data, size, LOADING_FIRMWARE,
+ "platform");
+ if (rc)
+ return rc;
+
if (!fw_priv->data)
fw_priv->data = vmalloc(size);
if (!fw_priv->data)
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index d08efc77cf16..63bd29fdcb9c 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -32,6 +32,8 @@
* @FW_OPT_FALLBACK_PLATFORM: Enable fallback to device fw copy embedded in
* the platform's main firmware. If both this fallback and the sysfs
* fallback are enabled, then this fallback will be tried first.
+ * @FW_OPT_PARTIAL: Allow partial read of firmware instead of needing to read
+ * entire file.
*/
enum fw_opt {
FW_OPT_UEVENT = BIT(0),
@@ -41,6 +43,7 @@ enum fw_opt {
FW_OPT_NOCACHE = BIT(4),
FW_OPT_NOFALLBACK_SYSFS = BIT(5),
FW_OPT_FALLBACK_PLATFORM = BIT(6),
+ FW_OPT_PARTIAL = BIT(7),
};
enum fw_status {
@@ -68,6 +71,8 @@ struct fw_priv {
void *data;
size_t size;
size_t allocated_size;
+ size_t offset;
+ u32 opt_flags;
#ifdef CONFIG_FW_LOADER_PAGED_BUF
bool is_paged_buf;
struct page **pages;
@@ -136,7 +141,7 @@ static inline void fw_state_done(struct fw_priv *fw_priv)
__fw_state_set(fw_priv, FW_STATUS_DONE);
}
-int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags);
+int assign_fw(struct firmware *fw, struct device *device);
#ifdef CONFIG_FW_LOADER_PAGED_BUF
void fw_free_paged_buf(struct fw_priv *fw_priv);
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 63b9714a0154..78355095e00d 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -12,6 +12,7 @@
#include <linux/capability.h>
#include <linux/device.h>
+#include <linux/kernel_read_file.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/timer.h>
@@ -167,10 +168,21 @@ static int fw_cache_piggyback_on_request(const char *name);
static struct fw_priv *__allocate_fw_priv(const char *fw_name,
struct firmware_cache *fwc,
- void *dbuf, size_t size)
+ void *dbuf,
+ size_t size,
+ size_t offset,
+ u32 opt_flags)
{
struct fw_priv *fw_priv;
+ /* For a partial read, the buffer must be preallocated. */
+ if ((opt_flags & FW_OPT_PARTIAL) && !dbuf)
+ return NULL;
+
+ /* Only partial reads are allowed to use an offset. */
+ if (offset != 0 && !(opt_flags & FW_OPT_PARTIAL))
+ return NULL;
+
fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
if (!fw_priv)
return NULL;
@@ -185,6 +197,8 @@ static struct fw_priv *__allocate_fw_priv(const char *fw_name,
fw_priv->fwc = fwc;
fw_priv->data = dbuf;
fw_priv->allocated_size = size;
+ fw_priv->offset = offset;
+ fw_priv->opt_flags = opt_flags;
fw_state_init(fw_priv);
#ifdef CONFIG_FW_LOADER_USER_HELPER
INIT_LIST_HEAD(&fw_priv->pending_list);
@@ -209,13 +223,20 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
/* Returns 1 for batching firmware requests with the same name */
static int alloc_lookup_fw_priv(const char *fw_name,
struct firmware_cache *fwc,
- struct fw_priv **fw_priv, void *dbuf,
- size_t size, u32 opt_flags)
+ struct fw_priv **fw_priv,
+ void *dbuf,
+ size_t size,
+ size_t offset,
+ u32 opt_flags)
{
struct fw_priv *tmp;
spin_lock(&fwc->lock);
- if (!(opt_flags & FW_OPT_NOCACHE)) {
+ /*
+ * Do not merge requests that are marked to be non-cached or
+ * are performing partial reads.
+ */
+ if (!(opt_flags & (FW_OPT_NOCACHE | FW_OPT_PARTIAL))) {
tmp = __lookup_fw_priv(fw_name);
if (tmp) {
kref_get(&tmp->ref);
@@ -226,7 +247,7 @@ static int alloc_lookup_fw_priv(const char *fw_name,
}
}
- tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
+ tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size, offset, opt_flags);
if (tmp) {
INIT_LIST_HEAD(&tmp->list);
if (!(opt_flags & FW_OPT_NOCACHE))
@@ -466,18 +487,16 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
size_t in_size,
const void *in_buffer))
{
- loff_t size;
+ size_t size;
int i, len;
int rc = -ENOENT;
char *path;
- enum kernel_read_file_id id = READING_FIRMWARE;
size_t msize = INT_MAX;
void *buffer = NULL;
/* Already populated data member means we're loading into a buffer */
if (!decompress && fw_priv->data) {
buffer = fw_priv->data;
- id = READING_FIRMWARE_PREALLOC_BUFFER;
msize = fw_priv->allocated_size;
}
@@ -486,6 +505,9 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
+ size_t file_size = 0;
+ size_t *file_size_ptr = NULL;
+
/* skip the unset customized path */
if (!fw_path[i][0])
continue;
@@ -499,10 +521,20 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
fw_priv->size = 0;
+ /*
+ * The total file size is only examined when doing a partial
+ * read; the "full read" case needs to fail if the whole
+ * firmware was not completely loaded.
+ */
+ if ((fw_priv->opt_flags & FW_OPT_PARTIAL) && buffer)
+ file_size_ptr = &file_size;
+
/* load firmware files from the mount namespace of init */
- rc = kernel_read_file_from_path_initns(path, &buffer,
- &size, msize, id);
- if (rc) {
+ rc = kernel_read_file_from_path_initns(path, fw_priv->offset,
+ &buffer, msize,
+ file_size_ptr,
+ READING_FIRMWARE);
+ if (rc < 0) {
if (rc != -ENOENT)
dev_warn(device, "loading %s failed with error %d\n",
path, rc);
@@ -511,6 +543,9 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
path);
continue;
}
+ size = rc;
+ rc = 0;
+
dev_dbg(device, "Loading firmware from %s\n", path);
if (decompress) {
dev_dbg(device, "f/w decompressing %s\n",
@@ -637,7 +672,7 @@ static int fw_add_devm_name(struct device *dev, const char *name)
}
#endif
-int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags)
+int assign_fw(struct firmware *fw, struct device *device)
{
struct fw_priv *fw_priv = fw->priv;
int ret;
@@ -656,8 +691,8 @@ int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags)
* should be fixed in devres or driver core.
*/
/* don't cache firmware handled without uevent */
- if (device && (opt_flags & FW_OPT_UEVENT) &&
- !(opt_flags & FW_OPT_NOCACHE)) {
+ if (device && (fw_priv->opt_flags & FW_OPT_UEVENT) &&
+ !(fw_priv->opt_flags & FW_OPT_NOCACHE)) {
ret = fw_add_devm_name(device, fw_priv->fw_name);
if (ret) {
mutex_unlock(&fw_lock);
@@ -669,7 +704,7 @@ int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags)
* After caching firmware image is started, let it piggyback
* on request firmware.
*/
- if (!(opt_flags & FW_OPT_NOCACHE) &&
+ if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
fw_priv->fwc->state == FW_LOADER_START_CACHE) {
if (fw_cache_piggyback_on_request(fw_priv->fw_name))
kref_get(&fw_priv->ref);
@@ -688,7 +723,7 @@ int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags)
static int
_request_firmware_prepare(struct firmware **firmware_p, const char *name,
struct device *device, void *dbuf, size_t size,
- u32 opt_flags)
+ size_t offset, u32 opt_flags)
{
struct firmware *firmware;
struct fw_priv *fw_priv;
@@ -707,7 +742,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
}
ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
- opt_flags);
+ offset, opt_flags);
/*
* bind with 'priv' now to avoid warning in failure path
@@ -754,9 +789,10 @@ static void fw_abort_batch_reqs(struct firmware *fw)
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device, void *buf, size_t size,
- u32 opt_flags)
+ size_t offset, u32 opt_flags)
{
struct firmware *fw = NULL;
+ bool nondirect = false;
int ret;
if (!firmware_p)
@@ -768,28 +804,34 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
}
ret = _request_firmware_prepare(&fw, name, device, buf, size,
- opt_flags);
+ offset, opt_flags);
if (ret <= 0) /* error or already assigned */
goto out;
ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
+
+ /* Only full reads can support decompression, platform, and sysfs. */
+ if (!(opt_flags & FW_OPT_PARTIAL))
+ nondirect = true;
+
#ifdef CONFIG_FW_LOADER_COMPRESS
- if (ret == -ENOENT)
+ if (ret == -ENOENT && nondirect)
ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
fw_decompress_xz);
#endif
-
- if (ret == -ENOENT)
- ret = firmware_fallback_platform(fw->priv, opt_flags);
+ if (ret == -ENOENT && nondirect)
+ ret = firmware_fallback_platform(fw->priv);
if (ret) {
if (!(opt_flags & FW_OPT_NO_WARN))
dev_warn(device,
"Direct firmware load for %s failed with error %d\n",
name, ret);
- ret = firmware_fallback_sysfs(fw, name, device, opt_flags, ret);
+ if (nondirect)
+ ret = firmware_fallback_sysfs(fw, name, device,
+ opt_flags, ret);
} else
- ret = assign_fw(fw, device, opt_flags);
+ ret = assign_fw(fw, device);
out:
if (ret < 0) {
@@ -830,7 +872,7 @@ request_firmware(const struct firmware **firmware_p, const char *name,
/* Need to pin this module until return */
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware_p, name, device, NULL, 0,
+ ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
FW_OPT_UEVENT);
module_put(THIS_MODULE);
return ret;
@@ -857,7 +899,7 @@ int firmware_request_nowarn(const struct firmware **firmware, const char *name,
/* Need to pin this module until return */
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware, name, device, NULL, 0,
+ ret = _request_firmware(firmware, name, device, NULL, 0, 0,
FW_OPT_UEVENT | FW_OPT_NO_WARN);
module_put(THIS_MODULE);
return ret;
@@ -881,7 +923,7 @@ int request_firmware_direct(const struct firmware **firmware_p,
int ret;
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware_p, name, device, NULL, 0,
+ ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
FW_OPT_UEVENT | FW_OPT_NO_WARN |
FW_OPT_NOFALLBACK_SYSFS);
module_put(THIS_MODULE);
@@ -906,7 +948,7 @@ int firmware_request_platform(const struct firmware **firmware,
/* Need to pin this module until return */
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware, name, device, NULL, 0,
+ ret = _request_firmware(firmware, name, device, NULL, 0, 0,
FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM);
module_put(THIS_MODULE);
return ret;
@@ -962,7 +1004,7 @@ request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
return -EOPNOTSUPP;
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware_p, name, device, buf, size,
+ ret = _request_firmware(firmware_p, name, device, buf, size, 0,
FW_OPT_UEVENT | FW_OPT_NOCACHE);
module_put(THIS_MODULE);
return ret;
@@ -970,6 +1012,37 @@ request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
EXPORT_SYMBOL(request_firmware_into_buf);
/**
+ * request_partial_firmware_into_buf() - load partial firmware into a previously allocated buffer
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded and DMA region allocated
+ * @buf: address of buffer to load firmware into
+ * @size: size of buffer
+ * @offset: offset into file to read
+ *
+ * This function works pretty much like request_firmware_into_buf except
+ * it allows a partial read of the file.
+ */
+int
+request_partial_firmware_into_buf(const struct firmware **firmware_p,
+ const char *name, struct device *device,
+ void *buf, size_t size, size_t offset)
+{
+ int ret;
+
+ if (fw_cache_is_setup(device, name))
+ return -EOPNOTSUPP;
+
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device, buf, size, offset,
+ FW_OPT_UEVENT | FW_OPT_NOCACHE |
+ FW_OPT_PARTIAL);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL(request_partial_firmware_into_buf);
+
+/**
* release_firmware() - release the resource associated with a firmware image
* @fw: firmware resource to release
**/
@@ -1001,7 +1074,7 @@ static void request_firmware_work_func(struct work_struct *work)
fw_work = container_of(work, struct firmware_work, work);
- _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
+ _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0,
fw_work->opt_flags);
fw_work->cont(fw, fw_work->context);
put_device(fw_work->device); /* taken in request_firmware_nowait() */
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 4db3c660de83..eef4ffb6122c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -119,7 +119,8 @@ static ssize_t phys_index_show(struct device *dev,
unsigned long phys_index;
phys_index = mem->start_section_nr / sections_per_block;
- return sprintf(buf, "%08lx\n", phys_index);
+
+ return sysfs_emit(buf, "%08lx\n", phys_index);
}
/*
@@ -129,7 +130,7 @@ static ssize_t phys_index_show(struct device *dev,
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
+ return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
}
/*
@@ -139,7 +140,7 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct memory_block *mem = to_memory_block(dev);
- ssize_t len = 0;
+ const char *output;
/*
* We can probably put these states in a nice little array
@@ -147,22 +148,20 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
*/
switch (mem->state) {
case MEM_ONLINE:
- len = sprintf(buf, "online\n");
+ output = "online";
break;
case MEM_OFFLINE:
- len = sprintf(buf, "offline\n");
+ output = "offline";
break;
case MEM_GOING_OFFLINE:
- len = sprintf(buf, "going-offline\n");
+ output = "going-offline";
break;
default:
- len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
- mem->state);
WARN_ON(1);
- break;
+ return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
}
- return len;
+ return sysfs_emit(buf, "%s\n", output);
}
int memory_notify(unsigned long val, void *v)
@@ -303,21 +302,22 @@ static ssize_t phys_device_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
- return sprintf(buf, "%d\n", mem->phys_device);
+
+ return sysfs_emit(buf, "%d\n", mem->phys_device);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn,
- unsigned long nr_pages, int online_type,
- struct zone *default_zone)
+static int print_allowed_zone(char *buf, int len, int nid,
+ unsigned long start_pfn, unsigned long nr_pages,
+ int online_type, struct zone *default_zone)
{
struct zone *zone;
zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
- if (zone != default_zone) {
- strcat(buf, " ");
- strcat(buf, zone->name);
- }
+ if (zone == default_zone)
+ return 0;
+
+ return sysfs_emit_at(buf, len, " %s", zone->name);
}
static ssize_t valid_zones_show(struct device *dev,
@@ -327,6 +327,7 @@ static ssize_t valid_zones_show(struct device *dev,
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
struct zone *default_zone;
+ int len = 0;
int nid;
/*
@@ -341,24 +342,23 @@ static ssize_t valid_zones_show(struct device *dev,
default_zone = test_pages_in_a_zone(start_pfn,
start_pfn + nr_pages);
if (!default_zone)
- return sprintf(buf, "none\n");
- strcat(buf, default_zone->name);
+ return sysfs_emit(buf, "%s\n", "none");
+ len += sysfs_emit_at(buf, len, "%s", default_zone->name);
goto out;
}
nid = mem->nid;
default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
nr_pages);
- strcat(buf, default_zone->name);
- print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
- default_zone);
- print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE,
- default_zone);
+ len += sysfs_emit_at(buf, len, "%s", default_zone->name);
+ len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
+ MMOP_ONLINE_KERNEL, default_zone);
+ len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
+ MMOP_ONLINE_MOVABLE, default_zone);
out:
- strcat(buf, "\n");
-
- return strlen(buf);
+ len += sysfs_emit_at(buf, len, "\n");
+ return len;
}
static DEVICE_ATTR_RO(valid_zones);
#endif
@@ -374,7 +374,7 @@ static DEVICE_ATTR_RO(removable);
static ssize_t block_size_bytes_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%lx\n", memory_block_size_bytes());
+ return sysfs_emit(buf, "%lx\n", memory_block_size_bytes());
}
static DEVICE_ATTR_RO(block_size_bytes);
@@ -386,8 +386,8 @@ static DEVICE_ATTR_RO(block_size_bytes);
static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%s\n",
- online_type_to_str[memhp_default_online_type]);
+ return sysfs_emit(buf, "%s\n",
+ online_type_to_str[memhp_default_online_type]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
@@ -432,7 +432,8 @@ static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
nid = memory_add_physaddr_to_nid(phys_addr);
ret = __add_memory(nid, phys_addr,
- MIN_MEMORY_BLOCK_SIZE * sections_per_block);
+ MIN_MEMORY_BLOCK_SIZE * sections_per_block,
+ MHP_NONE);
if (ret)
goto out;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 50af16e68d98..6ffa470e2984 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -46,19 +46,23 @@ static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
return n;
}
-static inline ssize_t node_read_cpumask(struct device *dev,
- struct device_attribute *attr, char *buf)
+static inline ssize_t cpumap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return node_read_cpumap(dev, false, buf);
}
-static inline ssize_t node_read_cpulist(struct device *dev,
- struct device_attribute *attr, char *buf)
+
+static DEVICE_ATTR_RO(cpumap);
+
+static inline ssize_t cpulist_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return node_read_cpumap(dev, true, buf);
}
-static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
-static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
+static DEVICE_ATTR_RO(cpulist);
/**
* struct node_access_nodes - Access class device to hold user visible
@@ -153,19 +157,20 @@ free:
}
#ifdef CONFIG_HMEM_REPORTING
-#define ACCESS_ATTR(name) \
-static ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- return sprintf(buf, "%u\n", to_access_nodes(dev)->hmem_attrs.name); \
-} \
-static DEVICE_ATTR_RO(name);
-
-ACCESS_ATTR(read_bandwidth)
-ACCESS_ATTR(read_latency)
-ACCESS_ATTR(write_bandwidth)
-ACCESS_ATTR(write_latency)
+#define ACCESS_ATTR(name) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sysfs_emit(buf, "%u\n", \
+ to_access_nodes(dev)->hmem_attrs.name); \
+} \
+static DEVICE_ATTR_RO(name)
+
+ACCESS_ATTR(read_bandwidth);
+ACCESS_ATTR(read_latency);
+ACCESS_ATTR(write_bandwidth);
+ACCESS_ATTR(write_latency);
static struct attribute *access_attrs[] = {
&dev_attr_read_bandwidth.attr,
@@ -225,7 +230,8 @@ static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- return sprintf(buf, fmt "\n", to_cache_info(dev)->cache_attrs.name);\
+ return sysfs_emit(buf, fmt "\n", \
+ to_cache_info(dev)->cache_attrs.name); \
} \
DEVICE_ATTR_RO(name);
@@ -361,7 +367,7 @@ static void node_remove_caches(struct node *node) { }
static ssize_t node_read_meminfo(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int n;
+ int len = 0;
int nid = dev->id;
struct pglist_data *pgdat = NODE_DATA(nid);
struct sysinfo i;
@@ -370,128 +376,128 @@ static ssize_t node_read_meminfo(struct device *dev,
si_meminfo_node(&i, nid);
sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
- n = sprintf(buf,
- "Node %d MemTotal: %8lu kB\n"
- "Node %d MemFree: %8lu kB\n"
- "Node %d MemUsed: %8lu kB\n"
- "Node %d Active: %8lu kB\n"
- "Node %d Inactive: %8lu kB\n"
- "Node %d Active(anon): %8lu kB\n"
- "Node %d Inactive(anon): %8lu kB\n"
- "Node %d Active(file): %8lu kB\n"
- "Node %d Inactive(file): %8lu kB\n"
- "Node %d Unevictable: %8lu kB\n"
- "Node %d Mlocked: %8lu kB\n",
- nid, K(i.totalram),
- nid, K(i.freeram),
- nid, K(i.totalram - i.freeram),
- nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
- node_page_state(pgdat, NR_ACTIVE_FILE)),
- nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
- node_page_state(pgdat, NR_INACTIVE_FILE)),
- nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
- nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
- nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
- nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
- nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
- nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
+ len = sysfs_emit_at(buf, len,
+ "Node %d MemTotal: %8lu kB\n"
+ "Node %d MemFree: %8lu kB\n"
+ "Node %d MemUsed: %8lu kB\n"
+ "Node %d Active: %8lu kB\n"
+ "Node %d Inactive: %8lu kB\n"
+ "Node %d Active(anon): %8lu kB\n"
+ "Node %d Inactive(anon): %8lu kB\n"
+ "Node %d Active(file): %8lu kB\n"
+ "Node %d Inactive(file): %8lu kB\n"
+ "Node %d Unevictable: %8lu kB\n"
+ "Node %d Mlocked: %8lu kB\n",
+ nid, K(i.totalram),
+ nid, K(i.freeram),
+ nid, K(i.totalram - i.freeram),
+ nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
+ node_page_state(pgdat, NR_ACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
+ node_page_state(pgdat, NR_INACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
+ nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
+ nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
+ nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
#ifdef CONFIG_HIGHMEM
- n += sprintf(buf + n,
- "Node %d HighTotal: %8lu kB\n"
- "Node %d HighFree: %8lu kB\n"
- "Node %d LowTotal: %8lu kB\n"
- "Node %d LowFree: %8lu kB\n",
- nid, K(i.totalhigh),
- nid, K(i.freehigh),
- nid, K(i.totalram - i.totalhigh),
- nid, K(i.freeram - i.freehigh));
+ len += sysfs_emit_at(buf, len,
+ "Node %d HighTotal: %8lu kB\n"
+ "Node %d HighFree: %8lu kB\n"
+ "Node %d LowTotal: %8lu kB\n"
+ "Node %d LowFree: %8lu kB\n",
+ nid, K(i.totalhigh),
+ nid, K(i.freehigh),
+ nid, K(i.totalram - i.totalhigh),
+ nid, K(i.freeram - i.freehigh));
#endif
- n += sprintf(buf + n,
- "Node %d Dirty: %8lu kB\n"
- "Node %d Writeback: %8lu kB\n"
- "Node %d FilePages: %8lu kB\n"
- "Node %d Mapped: %8lu kB\n"
- "Node %d AnonPages: %8lu kB\n"
- "Node %d Shmem: %8lu kB\n"
- "Node %d KernelStack: %8lu kB\n"
+ len += sysfs_emit_at(buf, len,
+ "Node %d Dirty: %8lu kB\n"
+ "Node %d Writeback: %8lu kB\n"
+ "Node %d FilePages: %8lu kB\n"
+ "Node %d Mapped: %8lu kB\n"
+ "Node %d AnonPages: %8lu kB\n"
+ "Node %d Shmem: %8lu kB\n"
+ "Node %d KernelStack: %8lu kB\n"
#ifdef CONFIG_SHADOW_CALL_STACK
- "Node %d ShadowCallStack:%8lu kB\n"
+ "Node %d ShadowCallStack:%8lu kB\n"
#endif
- "Node %d PageTables: %8lu kB\n"
- "Node %d NFS_Unstable: %8lu kB\n"
- "Node %d Bounce: %8lu kB\n"
- "Node %d WritebackTmp: %8lu kB\n"
- "Node %d KReclaimable: %8lu kB\n"
- "Node %d Slab: %8lu kB\n"
- "Node %d SReclaimable: %8lu kB\n"
- "Node %d SUnreclaim: %8lu kB\n"
+ "Node %d PageTables: %8lu kB\n"
+ "Node %d NFS_Unstable: %8lu kB\n"
+ "Node %d Bounce: %8lu kB\n"
+ "Node %d WritebackTmp: %8lu kB\n"
+ "Node %d KReclaimable: %8lu kB\n"
+ "Node %d Slab: %8lu kB\n"
+ "Node %d SReclaimable: %8lu kB\n"
+ "Node %d SUnreclaim: %8lu kB\n"
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- "Node %d AnonHugePages: %8lu kB\n"
- "Node %d ShmemHugePages: %8lu kB\n"
- "Node %d ShmemPmdMapped: %8lu kB\n"
- "Node %d FileHugePages: %8lu kB\n"
- "Node %d FilePmdMapped: %8lu kB\n"
+ "Node %d AnonHugePages: %8lu kB\n"
+ "Node %d ShmemHugePages: %8lu kB\n"
+ "Node %d ShmemPmdMapped: %8lu kB\n"
+ "Node %d FileHugePages: %8lu kB\n"
+ "Node %d FilePmdMapped: %8lu kB\n"
#endif
- ,
- nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
- nid, K(node_page_state(pgdat, NR_WRITEBACK)),
- nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
- nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
- nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
- nid, K(i.sharedram),
- nid, node_page_state(pgdat, NR_KERNEL_STACK_KB),
+ ,
+ nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
+ nid, K(node_page_state(pgdat, NR_WRITEBACK)),
+ nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
+ nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
+ nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
+ nid, K(i.sharedram),
+ nid, node_page_state(pgdat, NR_KERNEL_STACK_KB),
#ifdef CONFIG_SHADOW_CALL_STACK
- nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
+ nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
#endif
- nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
- nid, 0UL,
- nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
- nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
- nid, K(sreclaimable +
- node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
- nid, K(sreclaimable + sunreclaimable),
- nid, K(sreclaimable),
- nid, K(sunreclaimable)
+ nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
+ nid, 0UL,
+ nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
+ nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
+ nid, K(sreclaimable +
+ node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
+ nid, K(sreclaimable + sunreclaimable),
+ nid, K(sreclaimable),
+ nid, K(sunreclaimable)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- ,
- nid, K(node_page_state(pgdat, NR_ANON_THPS) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_FILE_THPS) *
- HPAGE_PMD_NR),
- nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) *
- HPAGE_PMD_NR)
+ ,
+ nid, K(node_page_state(pgdat, NR_ANON_THPS) *
+ HPAGE_PMD_NR),
+ nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
+ HPAGE_PMD_NR),
+ nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
+ HPAGE_PMD_NR),
+ nid, K(node_page_state(pgdat, NR_FILE_THPS) *
+ HPAGE_PMD_NR),
+ nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) *
+ HPAGE_PMD_NR)
#endif
- );
- n += hugetlb_report_node_meminfo(nid, buf + n);
- return n;
+ );
+ len += hugetlb_report_node_meminfo(buf, len, nid);
+ return len;
}
#undef K
-static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
+static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL);
static ssize_t node_read_numastat(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- return sprintf(buf,
- "numa_hit %lu\n"
- "numa_miss %lu\n"
- "numa_foreign %lu\n"
- "interleave_hit %lu\n"
- "local_node %lu\n"
- "other_node %lu\n",
- sum_zone_numa_state(dev->id, NUMA_HIT),
- sum_zone_numa_state(dev->id, NUMA_MISS),
- sum_zone_numa_state(dev->id, NUMA_FOREIGN),
- sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT),
- sum_zone_numa_state(dev->id, NUMA_LOCAL),
- sum_zone_numa_state(dev->id, NUMA_OTHER));
+ return sysfs_emit(buf,
+ "numa_hit %lu\n"
+ "numa_miss %lu\n"
+ "numa_foreign %lu\n"
+ "interleave_hit %lu\n"
+ "local_node %lu\n"
+ "other_node %lu\n",
+ sum_zone_numa_state(dev->id, NUMA_HIT),
+ sum_zone_numa_state(dev->id, NUMA_MISS),
+ sum_zone_numa_state(dev->id, NUMA_FOREIGN),
+ sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT),
+ sum_zone_numa_state(dev->id, NUMA_LOCAL),
+ sum_zone_numa_state(dev->id, NUMA_OTHER));
}
-static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
+static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL);
static ssize_t node_read_vmstat(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -499,28 +505,31 @@ static ssize_t node_read_vmstat(struct device *dev,
int nid = dev->id;
struct pglist_data *pgdat = NODE_DATA(nid);
int i;
- int n = 0;
+ int len = 0;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
- n += sprintf(buf+n, "%s %lu\n", zone_stat_name(i),
- sum_zone_node_page_state(nid, i));
+ len += sysfs_emit_at(buf, len, "%s %lu\n",
+ zone_stat_name(i),
+ sum_zone_node_page_state(nid, i));
#ifdef CONFIG_NUMA
for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
- n += sprintf(buf+n, "%s %lu\n", numa_stat_name(i),
- sum_zone_numa_state(nid, i));
-#endif
+ len += sysfs_emit_at(buf, len, "%s %lu\n",
+ numa_stat_name(i),
+ sum_zone_numa_state(nid, i));
+#endif
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
- n += sprintf(buf+n, "%s %lu\n", node_stat_name(i),
- node_page_state_pages(pgdat, i));
+ len += sysfs_emit_at(buf, len, "%s %lu\n",
+ node_stat_name(i),
+ node_page_state_pages(pgdat, i));
- return n;
+ return len;
}
-static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
+static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL);
static ssize_t node_read_distance(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
int nid = dev->id;
int len = 0;
@@ -532,13 +541,15 @@ static ssize_t node_read_distance(struct device *dev,
*/
BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
- for_each_online_node(i)
- len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
+ for_each_online_node(i) {
+ len += sysfs_emit_at(buf, len, "%s%d",
+ i ? " " : "", node_distance(nid, i));
+ }
- len += sprintf(buf + len, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
return len;
}
-static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
+static DEVICE_ATTR(distance, 0444, node_read_distance, NULL);
static struct attribute *node_dev_attrs[] = {
&dev_attr_cpumap.attr,
@@ -761,8 +772,8 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
return pfn_to_nid(pfn);
}
-static int do_register_memory_block_under_node(int nid,
- struct memory_block *mem_blk)
+static void do_register_memory_block_under_node(int nid,
+ struct memory_block *mem_blk)
{
int ret;
@@ -775,12 +786,19 @@ static int do_register_memory_block_under_node(int nid,
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
- if (ret)
- return ret;
+ if (ret && ret != -EEXIST)
+ dev_err_ratelimited(&node_devices[nid]->dev,
+ "can't create link to %s in sysfs (%d)\n",
+ kobject_name(&mem_blk->dev.kobj), ret);
- return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
+ ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj,
&node_devices[nid]->dev.kobj,
kobject_name(&node_devices[nid]->dev.kobj));
+ if (ret && ret != -EEXIST)
+ dev_err_ratelimited(&mem_blk->dev,
+ "can't create link to %s in sysfs (%d)\n",
+ kobject_name(&node_devices[nid]->dev.kobj),
+ ret);
}
/* register memory section under specified node if it spans that node */
@@ -816,7 +834,8 @@ static int register_mem_block_under_node_early(struct memory_block *mem_blk,
if (page_nid != nid)
continue;
- return do_register_memory_block_under_node(nid, mem_blk);
+ do_register_memory_block_under_node(nid, mem_blk);
+ return 0;
}
/* mem section does not span the specified node */
return 0;
@@ -831,7 +850,8 @@ static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
{
int nid = *(int *)arg;
- return do_register_memory_block_under_node(nid, mem_blk);
+ do_register_memory_block_under_node(nid, mem_blk);
+ return 0;
}
/*
@@ -849,8 +869,8 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
- enum meminit_context context)
+void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
+ enum meminit_context context)
{
walk_memory_blocks_func_t func;
@@ -859,9 +879,9 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
else
func = register_mem_block_under_node_early;
- return walk_memory_blocks(PFN_PHYS(start_pfn),
- PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
- func);
+ walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
+ (void *)&nid, func);
+ return;
}
#ifdef CONFIG_HUGETLBFS
@@ -970,17 +990,6 @@ void unregister_one_node(int nid)
* node states attributes
*/
-static ssize_t print_nodes_state(enum node_states state, char *buf)
-{
- int n;
-
- n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
- nodemask_pr_args(&node_states[state]));
- buf[n++] = '\n';
- buf[n] = '\0';
- return n;
-}
-
struct node_attr {
struct device_attribute attr;
enum node_states state;
@@ -990,7 +999,9 @@ static ssize_t show_node_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct node_attr *na = container_of(attr, struct node_attr, attr);
- return print_nodes_state(na->state, buf);
+
+ return sysfs_emit(buf, "%*pbl\n",
+ nodemask_pr_args(&node_states[na->state]));
}
#define _NODE_ATTR(name, state) \
@@ -1005,6 +1016,8 @@ static struct node_attr node_state_attr[] = {
#endif
[N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
[N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
+ [N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator,
+ N_GENERIC_INITIATOR),
};
static struct attribute *node_state_attrs[] = {
@@ -1016,6 +1029,7 @@ static struct attribute *node_state_attrs[] = {
#endif
&node_state_attr[N_MEMORY].attr.attr,
&node_state_attr[N_CPU].attr.attr,
+ &node_state_attr[N_GENERIC_INITIATOR].attr.attr,
NULL
};
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index e5d8a0503b4f..88aef93eb4dd 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -45,6 +45,8 @@ EXPORT_SYMBOL_GPL(platform_bus);
* @dev: platform device
* @type: resource type
* @num: resource index
+ *
+ * Return: a pointer to the resource or NULL on failure.
*/
struct resource *platform_get_resource(struct platform_device *dev,
unsigned int type, unsigned int num)
@@ -70,6 +72,9 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
* resource management
* @index: resource index
* @res: optional output parameter to store a pointer to the obtained resource.
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
*/
void __iomem *
devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
@@ -91,6 +96,9 @@ EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
* @pdev: platform device to use both for memory resource lookup as well as
* resource management
* @index: resource index
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
*/
void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index)
@@ -106,6 +114,9 @@ EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
* @pdev: platform device to use both for memory resource lookup as well as
* resource management
* @index: resource index
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
*/
void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
unsigned int index)
@@ -124,6 +135,9 @@ void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
* @pdev: platform device to use both for memory resource lookup as well as
* resource management
* @name: name of the resource
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
*/
void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device *pdev,
@@ -559,7 +573,7 @@ int platform_device_add(struct platform_device *pdev)
* that we remember it must be freed, and we append a suffix
* to avoid namespace collision with explicit IDs.
*/
- ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
if (ret < 0)
goto err_out;
pdev->id = ret;
@@ -600,7 +614,7 @@ int platform_device_add(struct platform_device *pdev)
failed:
if (pdev->id_auto) {
- ida_simple_remove(&platform_devid_ida, pdev->id);
+ ida_free(&platform_devid_ida, pdev->id);
pdev->id = PLATFORM_DEVID_AUTO;
}
@@ -631,7 +645,7 @@ void platform_device_del(struct platform_device *pdev)
device_del(&pdev->dev);
if (pdev->id_auto) {
- ida_simple_remove(&platform_devid_ida, pdev->id);
+ ida_free(&platform_devid_ida, pdev->id);
pdev->id = PLATFORM_DEVID_AUTO;
}
@@ -1009,10 +1023,10 @@ EXPORT_SYMBOL_GPL(platform_unregister_drivers);
* (b) sysfs attribute lets new-style coldplug recover from hotplug events
* mishandled before system is fully running: "modprobe $(cat modalias)"
*/
-static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
- char *buf)
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(dev);
int len;
len = of_device_modalias(dev, buf, PAGE_SIZE);
@@ -1023,9 +1037,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
if (len != -ENODEV)
return len;
- len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
-
- return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+ return sysfs_emit(buf, "platform:%s\n", pdev->name);
}
static DEVICE_ATTR_RO(modalias);
@@ -1070,16 +1082,17 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = sprintf(buf, "%s\n", pdev->driver_override);
+ len = sysfs_emit(buf, "%s\n", pdev->driver_override);
device_unlock(dev);
+
return len;
}
static DEVICE_ATTR_RW(driver_override);
static ssize_t numa_node_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev_to_node(dev));
+ return sysfs_emit(buf, "%d\n", dev_to_node(dev));
}
static DEVICE_ATTR_RO(numa_node);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 2cb5e04cf86c..743268996336 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -123,7 +123,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
#define genpd_unlock(p) p->lock_ops->unlock(p)
-#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
+#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
@@ -222,7 +222,7 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
* out of off and so update the idle time and vice
* versa.
*/
- if (genpd->status == GPD_STATE_ACTIVE) {
+ if (genpd->status == GENPD_STATE_ON) {
int state_idx = genpd->state_idx;
genpd->states[state_idx].idle_time =
@@ -415,26 +415,45 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
s64 elapsed_ns;
int ret;
+ /* Notify consumers that we are about to power on. */
+ ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
+ GENPD_NOTIFY_PRE_ON,
+ GENPD_NOTIFY_OFF, NULL);
+ ret = notifier_to_errno(ret);
+ if (ret)
+ return ret;
+
if (!genpd->power_on)
- return 0;
+ goto out;
+
+ if (!timed) {
+ ret = genpd->power_on(genpd);
+ if (ret)
+ goto err;
- if (!timed)
- return genpd->power_on(genpd);
+ goto out;
+ }
time_start = ktime_get();
ret = genpd->power_on(genpd);
if (ret)
- return ret;
+ goto err;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
- return ret;
+ goto out;
genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "on", elapsed_ns);
+out:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
+ return 0;
+err:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
+ NULL);
return ret;
}
@@ -445,27 +464,46 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
s64 elapsed_ns;
int ret;
+ /* Notify consumers that we are about to power off. */
+ ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
+ GENPD_NOTIFY_PRE_OFF,
+ GENPD_NOTIFY_ON, NULL);
+ ret = notifier_to_errno(ret);
+ if (ret)
+ return ret;
+
if (!genpd->power_off)
- return 0;
+ goto out;
- if (!timed)
- return genpd->power_off(genpd);
+ if (!timed) {
+ ret = genpd->power_off(genpd);
+ if (ret)
+ goto busy;
+
+ goto out;
+ }
time_start = ktime_get();
ret = genpd->power_off(genpd);
if (ret)
- return ret;
+ goto busy;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
- return 0;
+ goto out;
genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "off", elapsed_ns);
+out:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
+ NULL);
return 0;
+busy:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
+ return ret;
}
/**
@@ -497,6 +535,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
struct pm_domain_data *pdd;
struct gpd_link *link;
unsigned int not_suspended = 0;
+ int ret;
/*
* Do not try to power off the domain in the following situations:
@@ -544,27 +583,19 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
if (!genpd->gov)
genpd->state_idx = 0;
- if (genpd->power_off) {
- int ret;
-
- if (atomic_read(&genpd->sd_count) > 0)
- return -EBUSY;
+ /* Don't power off, if a child domain is waiting to power on. */
+ if (atomic_read(&genpd->sd_count) > 0)
+ return -EBUSY;
- /*
- * If sd_count > 0 at this point, one of the subdomains hasn't
- * managed to call genpd_power_on() for the parent yet after
- * incrementing it. In that case genpd_power_on() will wait
- * for us to drop the lock, so we can call .power_off() and let
- * the genpd_power_on() restore power for us (this shouldn't
- * happen very often).
- */
- ret = _genpd_power_off(genpd, true);
- if (ret)
- return ret;
+ ret = _genpd_power_off(genpd, true);
+ if (ret) {
+ genpd->states[genpd->state_idx].rejected++;
+ return ret;
}
- genpd->status = GPD_STATE_POWER_OFF;
+ genpd->status = GENPD_STATE_OFF;
genpd_update_accounting(genpd);
+ genpd->states[genpd->state_idx].usage++;
list_for_each_entry(link, &genpd->child_links, child_node) {
genpd_sd_counter_dec(link->parent);
@@ -616,7 +647,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
if (ret)
goto err;
- genpd->status = GPD_STATE_ACTIVE;
+ genpd->status = GENPD_STATE_ON;
genpd_update_accounting(genpd);
return 0;
@@ -961,7 +992,7 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
if (_genpd_power_off(genpd, false))
return;
- genpd->status = GPD_STATE_POWER_OFF;
+ genpd->status = GENPD_STATE_OFF;
list_for_each_entry(link, &genpd->child_links, child_node) {
genpd_sd_counter_dec(link->parent);
@@ -1007,8 +1038,7 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
}
_genpd_power_on(genpd, false);
-
- genpd->status = GPD_STATE_ACTIVE;
+ genpd->status = GENPD_STATE_ON;
}
/**
@@ -1281,13 +1311,14 @@ static int genpd_restore_noirq(struct device *dev)
* first time for the given domain in the present cycle.
*/
genpd_lock(genpd);
- if (genpd->suspended_count++ == 0)
+ if (genpd->suspended_count++ == 0) {
/*
* The boot kernel might put the domain into arbitrary state,
* so make it appear as powered off to genpd_sync_power_on(),
* so that it tries to power it on in case it was really off.
*/
- genpd->status = GPD_STATE_POWER_OFF;
+ genpd->status = GENPD_STATE_OFF;
+ }
genpd_sync_power_on(genpd, true, 0);
genpd_unlock(genpd);
@@ -1603,6 +1634,101 @@ int pm_genpd_remove_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
+/**
+ * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
+ *
+ * @dev: Device that should be associated with the notifier
+ * @nb: The notifier block to register
+ *
+ * Users may call this function to add a genpd power on/off notifier for an
+ * attached @dev. Only one notifier per device is allowed. The notifier is
+ * sent when genpd is powering on/off the PM domain.
+ *
+ * It is assumed that the user guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data;
+ int ret;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return -ENODEV;
+
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
+ return -EINVAL;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ if (gpd_data->power_nb)
+ return -EEXIST;
+
+ genpd_lock(genpd);
+ ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
+ genpd_unlock(genpd);
+
+ if (ret) {
+ dev_warn(dev, "failed to add notifier for PM domain %s\n",
+ genpd->name);
+ return ret;
+ }
+
+ gpd_data->power_nb = nb;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
+
+/**
+ * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
+ *
+ * @dev: Device that is associated with the notifier
+ *
+ * Users may call this function to remove a genpd power on/off notifier for an
+ * attached @dev.
+ *
+ * It is assumed that the user guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_remove_notifier(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data;
+ int ret;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return -ENODEV;
+
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
+ return -EINVAL;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ if (!gpd_data->power_nb)
+ return -ENODEV;
+
+ genpd_lock(genpd);
+ ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
+ gpd_data->power_nb);
+ genpd_unlock(genpd);
+
+ if (ret) {
+ dev_warn(dev, "failed to remove notifier for PM domain %s\n",
+ genpd->name);
+ return ret;
+ }
+
+ gpd_data->power_nb = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
+
static int genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *subdomain)
{
@@ -1773,11 +1899,12 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
INIT_LIST_HEAD(&genpd->parent_links);
INIT_LIST_HEAD(&genpd->child_links);
INIT_LIST_HEAD(&genpd->dev_list);
+ RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
genpd_lock_init(genpd);
genpd->gov = gov;
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
atomic_set(&genpd->sd_count, 0);
- genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
+ genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
genpd->device_count = 0;
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = true;
@@ -2044,8 +2171,9 @@ int of_genpd_add_provider_simple(struct device_node *np,
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table(&genpd->dev);
if (ret) {
- dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
- ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
+ ret);
goto unlock;
}
@@ -2054,7 +2182,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
* state.
*/
genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
- WARN_ON(!genpd->opp_table);
+ WARN_ON(IS_ERR(genpd->opp_table));
}
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
@@ -2111,8 +2239,9 @@ int of_genpd_add_provider_onecell(struct device_node *np,
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
if (ret) {
- dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
- i, ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
+ i, ret);
goto error;
}
@@ -2121,7 +2250,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
* performance state.
*/
genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
- WARN_ON(!genpd->opp_table);
+ WARN_ON(IS_ERR(genpd->opp_table));
}
genpd->provider = &np->fwnode;
@@ -2802,8 +2931,8 @@ static int genpd_summary_one(struct seq_file *s,
struct generic_pm_domain *genpd)
{
static const char * const status_lookup[] = {
- [GPD_STATE_ACTIVE] = "on",
- [GPD_STATE_POWER_OFF] = "off"
+ [GENPD_STATE_ON] = "on",
+ [GENPD_STATE_OFF] = "off"
};
struct pm_domain_data *pm_data;
const char *kobj_path;
@@ -2881,8 +3010,8 @@ static int summary_show(struct seq_file *s, void *data)
static int status_show(struct seq_file *s, void *data)
{
static const char * const status_lookup[] = {
- [GPD_STATE_ACTIVE] = "on",
- [GPD_STATE_POWER_OFF] = "off"
+ [GENPD_STATE_ON] = "on",
+ [GENPD_STATE_OFF] = "off"
};
struct generic_pm_domain *genpd = s->private;
@@ -2895,7 +3024,7 @@ static int status_show(struct seq_file *s, void *data)
if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
goto exit;
- if (genpd->status == GPD_STATE_POWER_OFF)
+ if (genpd->status == GENPD_STATE_OFF)
seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
genpd->state_idx);
else
@@ -2932,19 +3061,20 @@ static int idle_states_show(struct seq_file *s, void *data)
if (ret)
return -ERESTARTSYS;
- seq_puts(s, "State Time Spent(ms)\n");
+ seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
for (i = 0; i < genpd->state_count; i++) {
ktime_t delta = 0;
s64 msecs;
- if ((genpd->status == GPD_STATE_POWER_OFF) &&
+ if ((genpd->status == GENPD_STATE_OFF) &&
(genpd->state_idx == i))
delta = ktime_sub(ktime_get(), genpd->accounting_time);
msecs = ktime_to_ms(
ktime_add(genpd->states[i].idle_time, delta));
- seq_printf(s, "S%-13i %lld\n", i, msecs);
+ seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs,
+ genpd->states[i].usage, genpd->states[i].rejected);
}
genpd_unlock(genpd);
@@ -2961,7 +3091,7 @@ static int active_time_show(struct seq_file *s, void *data)
if (ret)
return -ERESTARTSYS;
- if (genpd->status == GPD_STATE_ACTIVE)
+ if (genpd->status == GENPD_STATE_ON)
delta = ktime_sub(ktime_get(), genpd->accounting_time);
seq_printf(s, "%lld ms\n", ktime_to_ms(
@@ -2984,7 +3114,7 @@ static int total_idle_time_show(struct seq_file *s, void *data)
for (i = 0; i < genpd->state_count; i++) {
- if ((genpd->status == GPD_STATE_POWER_OFF) &&
+ if ((genpd->status == GENPD_STATE_OFF) &&
(genpd->state_idx == i))
delta = ktime_sub(ktime_get(), genpd->accounting_time);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 205a06752ca9..c7ac49042cee 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -363,7 +363,6 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
return ops->thaw;
- break;
case PM_EVENT_RESTORE:
return ops->restore;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8143210a5c54..bfda153b1a41 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -291,8 +291,7 @@ static int rpm_get_suppliers(struct device *dev)
device_links_read_lock_held()) {
int retval;
- if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
- READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
continue;
retval = pm_runtime_get_sync(link->supplier);
@@ -312,8 +311,6 @@ static void rpm_put_suppliers(struct device *dev)
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
device_links_read_lock_held()) {
- if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
- continue;
while (refcount_dec_not_one(&link->rpm_active))
pm_runtime_put(link->supplier);
@@ -1646,42 +1643,6 @@ void pm_runtime_remove(struct device *dev)
}
/**
- * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
- * @dev: Device whose driver is going to be removed.
- *
- * Check links from this device to any consumers and if any of them have active
- * runtime PM references to the device, drop the usage counter of the device
- * (as many times as needed).
- *
- * Links with the DL_FLAG_MANAGED flag unset are ignored.
- *
- * Since the device is guaranteed to be runtime-active at the point this is
- * called, nothing else needs to be done here.
- *
- * Moreover, this is called after device_links_busy() has returned 'false', so
- * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
- * therefore rpm_active can't be manipulated concurrently.
- */
-void pm_runtime_clean_up_links(struct device *dev)
-{
- struct device_link *link;
- int idx;
-
- idx = device_links_read_lock();
-
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
- device_links_read_lock_held()) {
- if (!(link->flags & DL_FLAG_MANAGED))
- continue;
-
- while (refcount_dec_not_one(&link->rpm_active))
- pm_runtime_put_noidle(dev);
- }
-
- device_links_read_unlock(idx);
-}
-
-/**
* pm_runtime_get_suppliers - Resume and reference-count supplier devices.
* @dev: Consumer device.
*/
@@ -1732,7 +1693,7 @@ void pm_runtime_new_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
-void pm_runtime_drop_link(struct device *dev)
+static void pm_runtime_drop_link_count(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
WARN_ON(dev->power.links_count == 0);
@@ -1740,6 +1701,25 @@ void pm_runtime_drop_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
+/**
+ * pm_runtime_drop_link - Prepare for device link removal.
+ * @link: Device link going away.
+ *
+ * Drop the link count of the consumer end of @link and decrement the supplier
+ * device's runtime PM usage counter as many times as needed to drop all of the
+ * PM runtime reference to it from the consumer.
+ */
+void pm_runtime_drop_link(struct device_link *link)
+{
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ return;
+
+ pm_runtime_drop_link_count(link->consumer);
+
+ while (refcount_dec_not_one(&link->rpm_active))
+ pm_runtime_put(link->supplier);
+}
+
static bool pm_runtime_need_not_resume(struct device *dev)
{
return atomic_read(&dev->power.usage_count) <= 1 &&
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index c7b24812523c..a1474fb67db9 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -101,8 +101,8 @@ static const char ctrl_on[] = "on";
static ssize_t control_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%s\n",
- dev->power.runtime_auto ? ctrl_auto : ctrl_on);
+ return sysfs_emit(buf, "%s\n",
+ dev->power.runtime_auto ? ctrl_auto : ctrl_on);
}
static ssize_t control_store(struct device * dev, struct device_attribute *attr,
@@ -122,67 +122,71 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(control);
static ssize_t runtime_active_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- int ret;
u64 tmp = pm_runtime_active_time(dev);
+
do_div(tmp, NSEC_PER_MSEC);
- ret = sprintf(buf, "%llu\n", tmp);
- return ret;
+
+ return sysfs_emit(buf, "%llu\n", tmp);
}
static DEVICE_ATTR_RO(runtime_active_time);
static ssize_t runtime_suspended_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- int ret;
u64 tmp = pm_runtime_suspended_time(dev);
+
do_div(tmp, NSEC_PER_MSEC);
- ret = sprintf(buf, "%llu\n", tmp);
- return ret;
+
+ return sysfs_emit(buf, "%llu\n", tmp);
}
static DEVICE_ATTR_RO(runtime_suspended_time);
static ssize_t runtime_status_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- const char *p;
+ const char *output;
if (dev->power.runtime_error) {
- p = "error\n";
+ output = "error";
} else if (dev->power.disable_depth) {
- p = "unsupported\n";
+ output = "unsupported";
} else {
switch (dev->power.runtime_status) {
case RPM_SUSPENDED:
- p = "suspended\n";
+ output = "suspended";
break;
case RPM_SUSPENDING:
- p = "suspending\n";
+ output = "suspending";
break;
case RPM_RESUMING:
- p = "resuming\n";
+ output = "resuming";
break;
case RPM_ACTIVE:
- p = "active\n";
+ output = "active";
break;
default:
return -EIO;
}
}
- return sprintf(buf, p);
+ return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(runtime_status);
static ssize_t autosuspend_delay_ms_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
if (!dev->power.use_autosuspend)
return -EIO;
- return sprintf(buf, "%d\n", dev->power.autosuspend_delay);
+
+ return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay);
}
static ssize_t autosuspend_delay_ms_store(struct device *dev,
@@ -211,11 +215,11 @@ static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
s32 value = dev_pm_qos_requested_resume_latency(dev);
if (value == 0)
- return sprintf(buf, "n/a\n");
+ return sysfs_emit(buf, "n/a\n");
if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
value = 0;
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
@@ -255,11 +259,11 @@ static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
if (value < 0)
- return sprintf(buf, "auto\n");
+ return sysfs_emit(buf, "%s\n", "auto");
if (value == PM_QOS_LATENCY_ANY)
- return sprintf(buf, "any\n");
+ return sysfs_emit(buf, "%s\n", "any");
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
@@ -291,8 +295,8 @@ static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
- & PM_QOS_FLAG_NO_POWER_OFF));
+ return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
+ & PM_QOS_FLAG_NO_POWER_OFF));
}
static ssize_t pm_qos_no_power_off_store(struct device *dev,
@@ -320,9 +324,9 @@ static const char _disabled[] = "disabled";
static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%s\n", device_can_wakeup(dev)
- ? (device_may_wakeup(dev) ? _enabled : _disabled)
- : "");
+ return sysfs_emit(buf, "%s\n", device_can_wakeup(dev)
+ ? (device_may_wakeup(dev) ? _enabled : _disabled)
+ : "");
}
static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
@@ -345,7 +349,7 @@ static DEVICE_ATTR_RW(wakeup);
static ssize_t wakeup_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- unsigned long count = 0;
+ unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -354,7 +358,10 @@ static ssize_t wakeup_count_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_count);
@@ -363,7 +370,7 @@ static ssize_t wakeup_active_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- unsigned long count = 0;
+ unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -372,7 +379,10 @@ static ssize_t wakeup_active_count_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_active_count);
@@ -381,7 +391,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- unsigned long count = 0;
+ unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -390,7 +400,10 @@ static ssize_t wakeup_abort_count_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_abort_count);
@@ -399,7 +412,7 @@ static ssize_t wakeup_expire_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- unsigned long count = 0;
+ unsigned long count;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -408,7 +421,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
}
static DEVICE_ATTR_RO(wakeup_expire_count);
@@ -416,7 +432,7 @@ static DEVICE_ATTR_RO(wakeup_expire_count);
static ssize_t wakeup_active_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- unsigned int active = 0;
+ unsigned int active;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -425,7 +441,10 @@ static ssize_t wakeup_active_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%u\n", active);
}
static DEVICE_ATTR_RO(wakeup_active);
@@ -434,7 +453,7 @@ static ssize_t wakeup_total_time_ms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- s64 msec = 0;
+ s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -443,7 +462,10 @@ static ssize_t wakeup_total_time_ms_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
}
static DEVICE_ATTR_RO(wakeup_total_time_ms);
@@ -451,7 +473,7 @@ static DEVICE_ATTR_RO(wakeup_total_time_ms);
static ssize_t wakeup_max_time_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- s64 msec = 0;
+ s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -460,7 +482,10 @@ static ssize_t wakeup_max_time_ms_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
}
static DEVICE_ATTR_RO(wakeup_max_time_ms);
@@ -469,7 +494,7 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- s64 msec = 0;
+ s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -478,7 +503,10 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
}
static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
@@ -496,7 +524,7 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- s64 msec = 0;
+ s64 msec;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
@@ -505,7 +533,10 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
- return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
}
static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
@@ -522,7 +553,7 @@ static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
static ssize_t runtime_usage_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
+ return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count));
}
static DEVICE_ATTR_RO(runtime_usage);
@@ -530,21 +561,26 @@ static ssize_t runtime_active_kids_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev->power.ignore_children ?
- 0 : atomic_read(&dev->power.child_count));
+ return sysfs_emit(buf, "%d\n", dev->power.ignore_children ?
+ 0 : atomic_read(&dev->power.child_count));
}
static DEVICE_ATTR_RO(runtime_active_kids);
static ssize_t runtime_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (dev->power.disable_depth && (dev->power.runtime_auto == false))
- return sprintf(buf, "disabled & forbidden\n");
- if (dev->power.disable_depth)
- return sprintf(buf, "disabled\n");
- if (dev->power.runtime_auto == false)
- return sprintf(buf, "forbidden\n");
- return sprintf(buf, "enabled\n");
+ const char *output;
+
+ if (dev->power.disable_depth && !dev->power.runtime_auto)
+ output = "disabled & forbidden";
+ else if (dev->power.disable_depth)
+ output = "disabled";
+ else if (!dev->power.runtime_auto)
+ output = "forbidden";
+ else
+ output = "enabled";
+
+ return sysfs_emit(buf, "%s\n", output);
}
static DEVICE_ATTR_RO(runtime_enabled);
@@ -552,9 +588,9 @@ static DEVICE_ATTR_RO(runtime_enabled);
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%s\n",
- device_async_suspend_enabled(dev) ?
- _enabled : _disabled);
+ return sysfs_emit(buf, "%s\n",
+ device_async_suspend_enabled(dev) ?
+ _enabled : _disabled);
}
static ssize_t async_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
index c7734914d914..d638259b829a 100644
--- a/drivers/base/power/wakeup_stats.c
+++ b/drivers/base/power/wakeup_stats.c
@@ -26,7 +26,7 @@ static ssize_t _name##_show(struct device *dev, \
{ \
struct wakeup_source *ws = dev_get_drvdata(dev); \
\
- return sprintf(buf, "%lu\n", ws->_name); \
+ return sysfs_emit(buf, "%lu\n", ws->_name); \
} \
static DEVICE_ATTR_RO(_name)
@@ -42,7 +42,7 @@ static ssize_t active_time_ms_show(struct device *dev,
ktime_t active_time =
ws->active ? ktime_sub(ktime_get(), ws->last_time) : 0;
- return sprintf(buf, "%lld\n", ktime_to_ms(active_time));
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(active_time));
}
static DEVICE_ATTR_RO(active_time_ms);
@@ -57,7 +57,8 @@ static ssize_t total_time_ms_show(struct device *dev,
active_time = ktime_sub(ktime_get(), ws->last_time);
total_time = ktime_add(total_time, active_time);
}
- return sprintf(buf, "%lld\n", ktime_to_ms(total_time));
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(total_time));
}
static DEVICE_ATTR_RO(total_time_ms);
@@ -73,7 +74,8 @@ static ssize_t max_time_ms_show(struct device *dev,
if (active_time > max_time)
max_time = active_time;
}
- return sprintf(buf, "%lld\n", ktime_to_ms(max_time));
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(max_time));
}
static DEVICE_ATTR_RO(max_time_ms);
@@ -82,7 +84,7 @@ static ssize_t last_change_ms_show(struct device *dev,
{
struct wakeup_source *ws = dev_get_drvdata(dev);
- return sprintf(buf, "%lld\n", ktime_to_ms(ws->last_time));
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(ws->last_time));
}
static DEVICE_ATTR_RO(last_change_ms);
@@ -91,7 +93,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
{
struct wakeup_source *ws = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", ws->name);
+ return sysfs_emit(buf, "%s\n", ws->name);
}
static DEVICE_ATTR_RO(name);
@@ -106,7 +108,8 @@ static ssize_t prevent_suspend_time_ms_show(struct device *dev,
prevent_sleep_time = ktime_add(prevent_sleep_time,
ktime_sub(ktime_get(), ws->start_prevent_time));
}
- return sprintf(buf, "%lld\n", ktime_to_ms(prevent_sleep_time));
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(prevent_sleep_time));
}
static DEVICE_ATTR_RO(prevent_suspend_time_ms);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index d58aa98fe964..4c43d30145c6 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1184,3 +1184,76 @@ const void *device_get_match_data(struct device *dev)
return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
}
EXPORT_SYMBOL_GPL(device_get_match_data);
+
+static void *
+fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
+ void *data, devcon_match_fn_t match)
+{
+ struct fwnode_handle *node;
+ struct fwnode_handle *ep;
+ void *ret;
+
+ fwnode_graph_for_each_endpoint(fwnode, ep) {
+ node = fwnode_graph_get_remote_port_parent(ep);
+ if (!fwnode_device_is_available(node))
+ continue;
+
+ ret = match(node, con_id, data);
+ fwnode_handle_put(node);
+ if (ret) {
+ fwnode_handle_put(ep);
+ return ret;
+ }
+ }
+ return NULL;
+}
+
+static void *
+fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id,
+ void *data, devcon_match_fn_t match)
+{
+ struct fwnode_handle *node;
+ void *ret;
+ int i;
+
+ for (i = 0; ; i++) {
+ node = fwnode_find_reference(fwnode, con_id, i);
+ if (IS_ERR(node))
+ break;
+
+ ret = match(node, NULL, data);
+ fwnode_handle_put(node);
+ if (ret)
+ return ret;
+ }
+
+ return NULL;
+}
+
+/**
+ * fwnode_connection_find_match - Find connection from a device node
+ * @fwnode: Device node with the connection
+ * @con_id: Identifier for the connection
+ * @data: Data for the match function
+ * @match: Function to check and convert the connection description
+ *
+ * Find a connection with unique identifier @con_id between @fwnode and another
+ * device node. @match will be used to convert the connection description to
+ * data the caller is expecting to be returned.
+ */
+void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match)
+{
+ void *ret;
+
+ if (!fwnode || !match)
+ return NULL;
+
+ ret = fwnode_graph_devcon_match(fwnode, con_id, data, match);
+ if (ret)
+ return ret;
+
+ return fwnode_devcon_match(fwnode, con_id, data, match);
+}
+EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 1d1d26b0d279..bcb90d8c3960 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -4,7 +4,7 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM)
select IRQ_DOMAIN if REGMAP_IRQ
bool
@@ -53,3 +53,7 @@ config REGMAP_SCCB
config REGMAP_I3C
tristate
depends on I3C
+
+config REGMAP_SPI_AVMM
+ tristate
+ depends on SPI
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index ff6c7d8ec1cd..ac1b69ee4051 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o
obj-$(CONFIG_REGMAP_I3C) += regmap-i3c.o
+obj-$(CONFIG_REGMAP_SPI_AVMM) += regmap-spi-avmm.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 7be2fcfeea52..0097696c31de 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -161,6 +161,9 @@ struct regmap {
void *selector_work_buf; /* Scratch buffer used for selector */
struct hwspinlock *hwlock;
+
+ /* if set, the regmap core can sleep */
+ bool can_sleep;
};
struct regcache_ops {
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index b6d63ef16b44..8dfac7f3ed7a 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -183,7 +183,7 @@ static inline void regmap_calc_tot_len(struct regmap *map,
{
/* Calculate the length of a fixed format */
if (!map->debugfs_tot_len) {
- map->debugfs_reg_len = regmap_calc_reg_len(map->max_register),
+ map->debugfs_reg_len = regmap_calc_reg_len(map->max_register);
map->debugfs_val_len = 2 * map->format.val_bytes;
map->debugfs_tot_len = map->debugfs_reg_len +
map->debugfs_val_len + 3; /* : \n */
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 369a57e6f89d..ad5c2de395d1 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -168,6 +168,14 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
ret = regmap_write(map, reg, ~d->mask_buf[i]);
else
ret = regmap_write(map, reg, d->mask_buf[i]);
+ if (d->chip->clear_ack) {
+ if (d->chip->ack_invert && !ret)
+ ret = regmap_write(map, reg,
+ d->mask_buf[i]);
+ else if (!ret)
+ ret = regmap_write(map, reg,
+ ~d->mask_buf[i]);
+ }
if (ret != 0)
dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
@@ -493,7 +501,20 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = chip->ack_base +
(i * map->reg_stride * data->irq_reg_stride);
- ret = regmap_write(map, reg, data->status_buf[i]);
+ if (chip->ack_invert)
+ ret = regmap_write(map, reg,
+ ~data->status_buf[i]);
+ else
+ ret = regmap_write(map, reg,
+ data->status_buf[i]);
+ if (chip->clear_ack) {
+ if (chip->ack_invert && !ret)
+ ret = regmap_write(map, reg,
+ data->status_buf[i]);
+ else if (!ret)
+ ret = regmap_write(map, reg,
+ ~data->status_buf[i]);
+ }
if (ret != 0)
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
@@ -722,6 +743,16 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
else
ret = regmap_write(map, reg,
d->status_buf[i] & d->mask_buf[i]);
+ if (chip->clear_ack) {
+ if (chip->ack_invert && !ret)
+ ret = regmap_write(map, reg,
+ (d->status_buf[i] &
+ d->mask_buf[i]));
+ else if (!ret)
+ ret = regmap_write(map, reg,
+ ~(d->status_buf[i] &
+ d->mask_buf[i]));
+ }
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index 50a66382d87d..c92d614b4943 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -2,7 +2,6 @@
// Copyright(c) 2015-17 Intel Corporation.
#include <linux/device.h>
-#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/soundwire/sdw.h>
#include "internal.h"
diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c
new file mode 100644
index 000000000000..ad1da83e849f
--- /dev/null
+++ b/drivers/base/regmap/regmap-spi-avmm.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - SPI AVMM support
+//
+// Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+/*
+ * This driver implements the regmap operations for a generic SPI
+ * master to access the registers of the spi slave chip which has an
+ * Avalone bus in it.
+ *
+ * The "SPI slave to Avalon Master Bridge" (spi-avmm) IP should be integrated
+ * in the spi slave chip. The IP acts as a bridge to convert encoded streams of
+ * bytes from the host to the internal register read/write on Avalon bus. In
+ * order to issue register access requests to the slave chip, the host should
+ * send formatted bytes that conform to the transfer protocol.
+ * The transfer protocol contains 3 layers: transaction layer, packet layer
+ * and physical layer.
+ *
+ * Reference Documents could be found at:
+ * https://www.intel.com/content/www/us/en/programmable/documentation/sfo1400787952932.html
+ *
+ * Chapter "SPI Slave/JTAG to Avalon Master Bridge Cores" is a general
+ * introduction to the protocol.
+ *
+ * Chapter "Avalon Packets to Transactions Converter Core" describes
+ * the transaction layer.
+ *
+ * Chapter "Avalon-ST Bytes to Packets and Packets to Bytes Converter Cores"
+ * describes the packet layer.
+ *
+ * Chapter "Avalon-ST Serial Peripheral Interface Core" describes the
+ * physical layer.
+ *
+ *
+ * When host issues a regmap read/write, the driver will transform the request
+ * to byte stream layer by layer. It formats the register addr, value and
+ * length to the transaction layer request, then converts the request to packet
+ * layer bytes stream and then to physical layer bytes stream. Finally the
+ * driver sends the formatted byte stream over SPI bus to the slave chip.
+ *
+ * The spi-avmm IP on the slave chip decodes the byte stream and initiates
+ * register read/write on its internal Avalon bus, and then encodes the
+ * response to byte stream and sends back to host.
+ *
+ * The driver receives the byte stream, reverses the 3 layers transformation,
+ * and finally gets the response value (read out data for register read,
+ * successful written size for register write).
+ */
+
+#define PKT_SOP 0x7a
+#define PKT_EOP 0x7b
+#define PKT_CHANNEL 0x7c
+#define PKT_ESC 0x7d
+
+#define PHY_IDLE 0x4a
+#define PHY_ESC 0x4d
+
+#define TRANS_CODE_WRITE 0x0
+#define TRANS_CODE_SEQ_WRITE 0x4
+#define TRANS_CODE_READ 0x10
+#define TRANS_CODE_SEQ_READ 0x14
+#define TRANS_CODE_NO_TRANS 0x7f
+
+#define SPI_AVMM_XFER_TIMEOUT (msecs_to_jiffies(200))
+
+/* slave's register addr is 32 bits */
+#define SPI_AVMM_REG_SIZE 4UL
+/* slave's register value is 32 bits */
+#define SPI_AVMM_VAL_SIZE 4UL
+
+/*
+ * max rx size could be larger. But considering the buffer consuming,
+ * it is proper that we limit 1KB xfer at max.
+ */
+#define MAX_READ_CNT 256UL
+#define MAX_WRITE_CNT 1UL
+
+struct trans_req_header {
+ u8 code;
+ u8 rsvd;
+ __be16 size;
+ __be32 addr;
+} __packed;
+
+struct trans_resp_header {
+ u8 r_code;
+ u8 rsvd;
+ __be16 size;
+} __packed;
+
+#define TRANS_REQ_HD_SIZE (sizeof(struct trans_req_header))
+#define TRANS_RESP_HD_SIZE (sizeof(struct trans_resp_header))
+
+/*
+ * In transaction layer,
+ * the write request format is: Transaction request header + data
+ * the read request format is: Transaction request header
+ * the write response format is: Transaction response header
+ * the read response format is: pure data, no Transaction response header
+ */
+#define TRANS_WR_TX_SIZE(n) (TRANS_REQ_HD_SIZE + SPI_AVMM_VAL_SIZE * (n))
+#define TRANS_RD_TX_SIZE TRANS_REQ_HD_SIZE
+#define TRANS_TX_MAX TRANS_WR_TX_SIZE(MAX_WRITE_CNT)
+
+#define TRANS_RD_RX_SIZE(n) (SPI_AVMM_VAL_SIZE * (n))
+#define TRANS_WR_RX_SIZE TRANS_RESP_HD_SIZE
+#define TRANS_RX_MAX TRANS_RD_RX_SIZE(MAX_READ_CNT)
+
+/* tx & rx share one transaction layer buffer */
+#define TRANS_BUF_SIZE ((TRANS_TX_MAX > TRANS_RX_MAX) ? \
+ TRANS_TX_MAX : TRANS_RX_MAX)
+
+/*
+ * In tx phase, the host prepares all the phy layer bytes of a request in the
+ * phy buffer and sends them in a batch.
+ *
+ * The packet layer and physical layer defines several special chars for
+ * various purpose, when a transaction layer byte hits one of these special
+ * chars, it should be escaped. The escape rule is, "Escape char first,
+ * following the byte XOR'ed with 0x20".
+ *
+ * This macro defines the max possible length of the phy data. In the worst
+ * case, all transaction layer bytes need to be escaped (so the data length
+ * doubles), plus 4 special chars (SOP, CHANNEL, CHANNEL_NUM, EOP). Finally
+ * we should make sure the length is aligned to SPI BPW.
+ */
+#define PHY_TX_MAX ALIGN(2 * TRANS_TX_MAX + 4, 4)
+
+/*
+ * Unlike tx, phy rx is affected by possible PHY_IDLE bytes from slave, the max
+ * length of the rx bit stream is unpredictable. So the driver reads the words
+ * one by one, and parses each word immediately into transaction layer buffer.
+ * Only one word length of phy buffer is used for rx.
+ */
+#define PHY_BUF_SIZE PHY_TX_MAX
+
+/**
+ * struct spi_avmm_bridge - SPI slave to AVMM bus master bridge
+ *
+ * @spi: spi slave associated with this bridge.
+ * @word_len: bytes of word for spi transfer.
+ * @trans_len: length of valid data in trans_buf.
+ * @phy_len: length of valid data in phy_buf.
+ * @trans_buf: the bridge buffer for transaction layer data.
+ * @phy_buf: the bridge buffer for physical layer data.
+ * @swap_words: the word swapping cb for phy data. NULL if not needed.
+ *
+ * As a device's registers are implemented on the AVMM bus address space, it
+ * requires the driver to issue formatted requests to spi slave to AVMM bus
+ * master bridge to perform register access.
+ */
+struct spi_avmm_bridge {
+ struct spi_device *spi;
+ unsigned char word_len;
+ unsigned int trans_len;
+ unsigned int phy_len;
+ /* bridge buffer used in translation between protocol layers */
+ char trans_buf[TRANS_BUF_SIZE];
+ char phy_buf[PHY_BUF_SIZE];
+ void (*swap_words)(char *buf, unsigned int len);
+};
+
+static void br_swap_words_32(char *buf, unsigned int len)
+{
+ u32 *p = (u32 *)buf;
+ unsigned int count;
+
+ count = len / 4;
+ while (count--) {
+ *p = swab32p(p);
+ p++;
+ }
+}
+
+/*
+ * Format transaction layer data in br->trans_buf according to the register
+ * access request, Store valid transaction layer data length in br->trans_len.
+ */
+static int br_trans_tx_prepare(struct spi_avmm_bridge *br, bool is_read, u32 reg,
+ u32 *wr_val, u32 count)
+{
+ struct trans_req_header *header;
+ unsigned int trans_len;
+ u8 code;
+ __le32 *data;
+ int i;
+
+ if (is_read) {
+ if (count == 1)
+ code = TRANS_CODE_READ;
+ else
+ code = TRANS_CODE_SEQ_READ;
+ } else {
+ if (count == 1)
+ code = TRANS_CODE_WRITE;
+ else
+ code = TRANS_CODE_SEQ_WRITE;
+ }
+
+ header = (struct trans_req_header *)br->trans_buf;
+ header->code = code;
+ header->rsvd = 0;
+ header->size = cpu_to_be16((u16)count * SPI_AVMM_VAL_SIZE);
+ header->addr = cpu_to_be32(reg);
+
+ trans_len = TRANS_REQ_HD_SIZE;
+
+ if (!is_read) {
+ trans_len += SPI_AVMM_VAL_SIZE * count;
+ if (trans_len > sizeof(br->trans_buf))
+ return -ENOMEM;
+
+ data = (__le32 *)(br->trans_buf + TRANS_REQ_HD_SIZE);
+
+ for (i = 0; i < count; i++)
+ *data++ = cpu_to_le32(*wr_val++);
+ }
+
+ /* Store valid trans data length for next layer */
+ br->trans_len = trans_len;
+
+ return 0;
+}
+
+/*
+ * Convert transaction layer data (in br->trans_buf) to phy layer data, store
+ * them in br->phy_buf. Pad the phy_buf aligned with SPI's BPW. Store valid phy
+ * layer data length in br->phy_len.
+ *
+ * phy_buf len should be aligned with SPI's BPW. Spare bytes should be padded
+ * with PHY_IDLE, then the slave will just drop them.
+ *
+ * The driver will not simply pad 4a at the tail. The concern is that driver
+ * will not store MISO data during tx phase, if the driver pads 4a at the tail,
+ * it is possible that if the slave is fast enough to response at the padding
+ * time. As a result these rx bytes are lost. In the following case, 7a,7c,00
+ * will lost.
+ * MOSI ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|4a|4a|4a| |XX|XX|...
+ * MISO ...|4a|4a|4a|4a| |4a|4a|4a|4a| |4a|4a|4a|4a| |4a|7a|7c|00| |78|56|...
+ *
+ * So the driver moves EOP and bytes after EOP to the end of the aligned size,
+ * then fill the hole with PHY_IDLE. As following:
+ * before pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|
+ * after pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|4a| |4a|4a|7b|40|
+ * Then if the slave will not get the entire packet before the tx phase is
+ * over, it can't responsed to anything either.
+ */
+static int br_pkt_phy_tx_prepare(struct spi_avmm_bridge *br)
+{
+ char *tb, *tb_end, *pb, *pb_limit, *pb_eop = NULL;
+ unsigned int aligned_phy_len, move_size;
+ bool need_esc = false;
+
+ tb = br->trans_buf;
+ tb_end = tb + br->trans_len;
+ pb = br->phy_buf;
+ pb_limit = pb + ARRAY_SIZE(br->phy_buf);
+
+ *pb++ = PKT_SOP;
+
+ /*
+ * The driver doesn't support multiple channels so the channel number
+ * is always 0.
+ */
+ *pb++ = PKT_CHANNEL;
+ *pb++ = 0x0;
+
+ for (; pb < pb_limit && tb < tb_end; pb++) {
+ if (need_esc) {
+ *pb = *tb++ ^ 0x20;
+ need_esc = false;
+ continue;
+ }
+
+ /* EOP should be inserted before the last valid char */
+ if (tb == tb_end - 1 && !pb_eop) {
+ *pb = PKT_EOP;
+ pb_eop = pb;
+ continue;
+ }
+
+ /*
+ * insert an ESCAPE char if the data value equals any special
+ * char.
+ */
+ switch (*tb) {
+ case PKT_SOP:
+ case PKT_EOP:
+ case PKT_CHANNEL:
+ case PKT_ESC:
+ *pb = PKT_ESC;
+ need_esc = true;
+ break;
+ case PHY_IDLE:
+ case PHY_ESC:
+ *pb = PHY_ESC;
+ need_esc = true;
+ break;
+ default:
+ *pb = *tb++;
+ break;
+ }
+ }
+
+ /* The phy buffer is used out but transaction layer data remains */
+ if (tb < tb_end)
+ return -ENOMEM;
+
+ /* Store valid phy data length for spi transfer */
+ br->phy_len = pb - br->phy_buf;
+
+ if (br->word_len == 1)
+ return 0;
+
+ /* Do phy buf padding if word_len > 1 byte. */
+ aligned_phy_len = ALIGN(br->phy_len, br->word_len);
+ if (aligned_phy_len > sizeof(br->phy_buf))
+ return -ENOMEM;
+
+ if (aligned_phy_len == br->phy_len)
+ return 0;
+
+ /* move EOP and bytes after EOP to the end of aligned size */
+ move_size = pb - pb_eop;
+ memmove(&br->phy_buf[aligned_phy_len - move_size], pb_eop, move_size);
+
+ /* fill the hole with PHY_IDLEs */
+ memset(pb_eop, PHY_IDLE, aligned_phy_len - br->phy_len);
+
+ /* update the phy data length */
+ br->phy_len = aligned_phy_len;
+
+ return 0;
+}
+
+/*
+ * In tx phase, the slave only returns PHY_IDLE (0x4a). So the driver will
+ * ignore rx in tx phase.
+ */
+static int br_do_tx(struct spi_avmm_bridge *br)
+{
+ /* reorder words for spi transfer */
+ if (br->swap_words)
+ br->swap_words(br->phy_buf, br->phy_len);
+
+ /* send all data in phy_buf */
+ return spi_write(br->spi, br->phy_buf, br->phy_len);
+}
+
+/*
+ * This function read the rx byte stream from SPI word by word and convert
+ * them to transaction layer data in br->trans_buf. It also stores the length
+ * of rx transaction layer data in br->trans_len
+ *
+ * The slave may send an unknown number of PHY_IDLEs in rx phase, so we cannot
+ * prepare a fixed length buffer to receive all of the rx data in a batch. We
+ * have to read word by word and convert them to transaction layer data at
+ * once.
+ */
+static int br_do_rx_and_pkt_phy_parse(struct spi_avmm_bridge *br)
+{
+ bool eop_found = false, channel_found = false, esc_found = false;
+ bool valid_word = false, last_try = false;
+ struct device *dev = &br->spi->dev;
+ char *pb, *tb_limit, *tb = NULL;
+ unsigned long poll_timeout;
+ int ret, i;
+
+ tb_limit = br->trans_buf + ARRAY_SIZE(br->trans_buf);
+ pb = br->phy_buf;
+ poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT;
+ while (tb < tb_limit) {
+ ret = spi_read(br->spi, pb, br->word_len);
+ if (ret)
+ return ret;
+
+ /* reorder the word back */
+ if (br->swap_words)
+ br->swap_words(pb, br->word_len);
+
+ valid_word = false;
+ for (i = 0; i < br->word_len; i++) {
+ /* drop everything before first SOP */
+ if (!tb && pb[i] != PKT_SOP)
+ continue;
+
+ /* drop PHY_IDLE */
+ if (pb[i] == PHY_IDLE)
+ continue;
+
+ valid_word = true;
+
+ /*
+ * We don't support multiple channels, so error out if
+ * a non-zero channel number is found.
+ */
+ if (channel_found) {
+ if (pb[i] != 0) {
+ dev_err(dev, "%s channel num != 0\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ channel_found = false;
+ continue;
+ }
+
+ switch (pb[i]) {
+ case PKT_SOP:
+ /*
+ * reset the parsing if a second SOP appears.
+ */
+ tb = br->trans_buf;
+ eop_found = false;
+ channel_found = false;
+ esc_found = false;
+ break;
+ case PKT_EOP:
+ /*
+ * No special char is expected after ESC char.
+ * No special char (except ESC & PHY_IDLE) is
+ * expected after EOP char.
+ *
+ * The special chars are all dropped.
+ */
+ if (esc_found || eop_found)
+ return -EFAULT;
+
+ eop_found = true;
+ break;
+ case PKT_CHANNEL:
+ if (esc_found || eop_found)
+ return -EFAULT;
+
+ channel_found = true;
+ break;
+ case PKT_ESC:
+ case PHY_ESC:
+ if (esc_found)
+ return -EFAULT;
+
+ esc_found = true;
+ break;
+ default:
+ /* Record the normal byte in trans_buf. */
+ if (esc_found) {
+ *tb++ = pb[i] ^ 0x20;
+ esc_found = false;
+ } else {
+ *tb++ = pb[i];
+ }
+
+ /*
+ * We get the last normal byte after EOP, it is
+ * time we finish. Normally the function should
+ * return here.
+ */
+ if (eop_found) {
+ br->trans_len = tb - br->trans_buf;
+ return 0;
+ }
+ }
+ }
+
+ if (valid_word) {
+ /* update poll timeout when we get valid word */
+ poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT;
+ last_try = false;
+ } else {
+ /*
+ * We timeout when rx keeps invalid for some time. But
+ * it is possible we are scheduled out for long time
+ * after a spi_read. So when we are scheduled in, a SW
+ * timeout happens. But actually HW may have worked fine and
+ * has been ready long time ago. So we need to do an extra
+ * read, if we get a valid word then we could continue rx,
+ * otherwise real a HW issue happens.
+ */
+ if (last_try)
+ return -ETIMEDOUT;
+
+ if (time_after(jiffies, poll_timeout))
+ last_try = true;
+ }
+ }
+
+ /*
+ * We have used out all transfer layer buffer but cannot find the end
+ * of the byte stream.
+ */
+ dev_err(dev, "%s transfer buffer is full but rx doesn't end\n",
+ __func__);
+
+ return -EFAULT;
+}
+
+/*
+ * For read transactions, the avmm bus will directly return register values
+ * without transaction response header.
+ */
+static int br_rd_trans_rx_parse(struct spi_avmm_bridge *br,
+ u32 *val, unsigned int expected_count)
+{
+ unsigned int i, trans_len = br->trans_len;
+ __le32 *data;
+
+ if (expected_count * SPI_AVMM_VAL_SIZE != trans_len)
+ return -EFAULT;
+
+ data = (__le32 *)br->trans_buf;
+ for (i = 0; i < expected_count; i++)
+ *val++ = le32_to_cpu(*data++);
+
+ return 0;
+}
+
+/*
+ * For write transactions, the slave will return a transaction response
+ * header.
+ */
+static int br_wr_trans_rx_parse(struct spi_avmm_bridge *br,
+ unsigned int expected_count)
+{
+ unsigned int trans_len = br->trans_len;
+ struct trans_resp_header *resp;
+ u8 code;
+ u16 val_len;
+
+ if (trans_len != TRANS_RESP_HD_SIZE)
+ return -EFAULT;
+
+ resp = (struct trans_resp_header *)br->trans_buf;
+
+ code = resp->r_code ^ 0x80;
+ val_len = be16_to_cpu(resp->size);
+ if (!val_len || val_len != expected_count * SPI_AVMM_VAL_SIZE)
+ return -EFAULT;
+
+ /* error out if the trans code doesn't align with the val size */
+ if ((val_len == SPI_AVMM_VAL_SIZE && code != TRANS_CODE_WRITE) ||
+ (val_len > SPI_AVMM_VAL_SIZE && code != TRANS_CODE_SEQ_WRITE))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int do_reg_access(void *context, bool is_read, unsigned int reg,
+ unsigned int *value, unsigned int count)
+{
+ struct spi_avmm_bridge *br = context;
+ int ret;
+
+ /* invalidate bridge buffers first */
+ br->trans_len = 0;
+ br->phy_len = 0;
+
+ ret = br_trans_tx_prepare(br, is_read, reg, value, count);
+ if (ret)
+ return ret;
+
+ ret = br_pkt_phy_tx_prepare(br);
+ if (ret)
+ return ret;
+
+ ret = br_do_tx(br);
+ if (ret)
+ return ret;
+
+ ret = br_do_rx_and_pkt_phy_parse(br);
+ if (ret)
+ return ret;
+
+ if (is_read)
+ return br_rd_trans_rx_parse(br, value, count);
+ else
+ return br_wr_trans_rx_parse(br, count);
+}
+
+static int regmap_spi_avmm_gather_write(void *context,
+ const void *reg_buf, size_t reg_len,
+ const void *val_buf, size_t val_len)
+{
+ if (reg_len != SPI_AVMM_REG_SIZE)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE))
+ return -EINVAL;
+
+ return do_reg_access(context, false, *(u32 *)reg_buf, (u32 *)val_buf,
+ val_len / SPI_AVMM_VAL_SIZE);
+}
+
+static int regmap_spi_avmm_write(void *context, const void *data, size_t bytes)
+{
+ if (bytes < SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE)
+ return -EINVAL;
+
+ return regmap_spi_avmm_gather_write(context, data, SPI_AVMM_REG_SIZE,
+ data + SPI_AVMM_REG_SIZE,
+ bytes - SPI_AVMM_REG_SIZE);
+}
+
+static int regmap_spi_avmm_read(void *context,
+ const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ if (reg_len != SPI_AVMM_REG_SIZE)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE))
+ return -EINVAL;
+
+ return do_reg_access(context, true, *(u32 *)reg_buf, val_buf,
+ (val_len / SPI_AVMM_VAL_SIZE));
+}
+
+static struct spi_avmm_bridge *
+spi_avmm_bridge_ctx_gen(struct spi_device *spi)
+{
+ struct spi_avmm_bridge *br;
+
+ if (!spi)
+ return ERR_PTR(-ENODEV);
+
+ /* Only support BPW == 8 or 32 now. Try 32 BPW first. */
+ spi->mode = SPI_MODE_1;
+ spi->bits_per_word = 32;
+ if (spi_setup(spi)) {
+ spi->bits_per_word = 8;
+ if (spi_setup(spi))
+ return ERR_PTR(-EINVAL);
+ }
+
+ br = kzalloc(sizeof(*br), GFP_KERNEL);
+ if (!br)
+ return ERR_PTR(-ENOMEM);
+
+ br->spi = spi;
+ br->word_len = spi->bits_per_word / 8;
+ if (br->word_len == 4) {
+ /*
+ * The protocol requires little endian byte order but MSB
+ * first. So driver needs to swap the byte order word by word
+ * if word length > 1.
+ */
+ br->swap_words = br_swap_words_32;
+ }
+
+ return br;
+}
+
+static void spi_avmm_bridge_ctx_free(void *context)
+{
+ kfree(context);
+}
+
+static const struct regmap_bus regmap_spi_avmm_bus = {
+ .write = regmap_spi_avmm_write,
+ .gather_write = regmap_spi_avmm_gather_write,
+ .read = regmap_spi_avmm_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
+ .max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
+ .free_context = spi_avmm_bridge_ctx_free,
+};
+
+struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct spi_avmm_bridge *bridge;
+ struct regmap *map;
+
+ bridge = spi_avmm_bridge_ctx_gen(spi);
+ if (IS_ERR(bridge))
+ return ERR_CAST(bridge);
+
+ map = __regmap_init(&spi->dev, &regmap_spi_avmm_bus,
+ bridge, config, lock_key, lock_name);
+ if (IS_ERR(map)) {
+ spi_avmm_bridge_ctx_free(bridge);
+ return ERR_CAST(map);
+ }
+
+ return map;
+}
+EXPORT_SYMBOL_GPL(__regmap_init_spi_avmm);
+
+struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct spi_avmm_bridge *bridge;
+ struct regmap *map;
+
+ bridge = spi_avmm_bridge_ctx_gen(spi);
+ if (IS_ERR(bridge))
+ return ERR_CAST(bridge);
+
+ map = __devm_regmap_init(&spi->dev, &regmap_spi_avmm_bus,
+ bridge, config, lock_key, lock_name);
+ if (IS_ERR(map)) {
+ spi_avmm_bridge_ctx_free(bridge);
+ return ERR_CAST(map);
+ }
+
+ return map;
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index b71f9ecddff5..5db536ccfcd6 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -209,6 +209,18 @@ static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
return true;
}
+static void regmap_format_12_20_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ u8 *out = map->work_buf;
+
+ out[0] = reg >> 4;
+ out[1] = (reg << 4) | (val >> 16);
+ out[2] = val >> 8;
+ out[3] = val;
+}
+
+
static void regmap_format_2_6_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
@@ -711,13 +723,17 @@ struct regmap *__regmap_init(struct device *dev,
if (ret)
goto err_map;
+ ret = -EINVAL; /* Later error paths rely on this */
+
if (config->disable_locking) {
map->lock = map->unlock = regmap_lock_unlock_none;
+ map->can_sleep = config->can_sleep;
regmap_debugfs_disable(map);
} else if (config->lock && config->unlock) {
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
+ map->can_sleep = config->can_sleep;
} else if (config->use_hwlock) {
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
if (!map->hwlock) {
@@ -753,6 +769,7 @@ struct regmap *__regmap_init(struct device *dev,
mutex_init(&map->mutex);
map->lock = regmap_lock_mutex;
map->unlock = regmap_unlock_mutex;
+ map->can_sleep = true;
lockdep_set_class_and_name(&map->mutex,
lock_key, lock_name);
}
@@ -883,6 +900,16 @@ struct regmap *__regmap_init(struct device *dev,
}
break;
+ case 12:
+ switch (config->val_bits) {
+ case 20:
+ map->format.format_write = regmap_format_12_20_write;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+
case 8:
map->format.format_reg = regmap_format_8;
break;
@@ -1243,6 +1270,106 @@ struct regmap_field *devm_regmap_field_alloc(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
+
+/**
+ * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
+ *
+ * @regmap: regmap bank in which this register field is located.
+ * @rm_field: regmap register fields within the bank.
+ * @reg_field: Register fields within the bank.
+ * @num_fields: Number of register fields.
+ *
+ * The return value will be an -ENOMEM on error or zero for success.
+ * Newly allocated regmap_fields should be freed by calling
+ * regmap_field_bulk_free()
+ */
+int regmap_field_bulk_alloc(struct regmap *regmap,
+ struct regmap_field **rm_field,
+ struct reg_field *reg_field,
+ int num_fields)
+{
+ struct regmap_field *rf;
+ int i;
+
+ rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
+ if (!rf)
+ return -ENOMEM;
+
+ for (i = 0; i < num_fields; i++) {
+ regmap_field_init(&rf[i], regmap, reg_field[i]);
+ rm_field[i] = &rf[i];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
+
+/**
+ * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
+ * fields.
+ *
+ * @dev: Device that will be interacted with
+ * @regmap: regmap bank in which this register field is located.
+ * @rm_field: regmap register fields within the bank.
+ * @reg_field: Register fields within the bank.
+ * @num_fields: Number of register fields.
+ *
+ * The return value will be an -ENOMEM on error or zero for success.
+ * Newly allocated regmap_fields will be automatically freed by the
+ * device management code.
+ */
+int devm_regmap_field_bulk_alloc(struct device *dev,
+ struct regmap *regmap,
+ struct regmap_field **rm_field,
+ struct reg_field *reg_field,
+ int num_fields)
+{
+ struct regmap_field *rf;
+ int i;
+
+ rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
+ if (!rf)
+ return -ENOMEM;
+
+ for (i = 0; i < num_fields; i++) {
+ regmap_field_init(&rf[i], regmap, reg_field[i]);
+ rm_field[i] = &rf[i];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
+
+/**
+ * regmap_field_bulk_free() - Free register field allocated using
+ * regmap_field_bulk_alloc.
+ *
+ * @field: regmap fields which should be freed.
+ */
+void regmap_field_bulk_free(struct regmap_field *field)
+{
+ kfree(field);
+}
+EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
+
+/**
+ * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
+ * devm_regmap_field_bulk_alloc.
+ *
+ * @dev: Device that will be interacted with
+ * @field: regmap field which should be freed.
+ *
+ * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
+ * drivers need not call this function, as the memory allocated via devm
+ * will be freed as per device-driver life-cycle.
+ */
+void devm_regmap_field_bulk_free(struct device *dev,
+ struct regmap_field *field)
+{
+ devm_kfree(dev, field);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
+
/**
* devm_regmap_field_free() - Free a register field allocated using
* devm_regmap_field_alloc.
@@ -1365,6 +1492,8 @@ void regmap_exit(struct regmap *map)
}
if (map->hwlock)
hwspin_lock_free(map->hwlock);
+ if (map->lock == regmap_lock_mutex)
+ mutex_destroy(&map->mutex);
kfree_const(map->name);
kfree(map->patch);
kfree(map);
@@ -2253,8 +2382,12 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map,
if (ret != 0)
return ret;
- if (regs[i].delay_us)
- udelay(regs[i].delay_us);
+ if (regs[i].delay_us) {
+ if (map->can_sleep)
+ fsleep(regs[i].delay_us);
+ else
+ udelay(regs[i].delay_us);
+ }
base += n;
n = 0;
@@ -2290,8 +2423,12 @@ static int _regmap_multi_reg_write(struct regmap *map,
if (ret != 0)
return ret;
- if (regs[i].delay_us)
- udelay(regs[i].delay_us);
+ if (regs[i].delay_us) {
+ if (map->can_sleep)
+ fsleep(regs[i].delay_us);
+ else
+ udelay(regs[i].delay_us);
+ }
}
return 0;
}
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index a5bae551167d..d34609bb7386 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -17,9 +17,9 @@
static DEFINE_IDA(soc_ida);
-static ssize_t soc_info_get(struct device *dev,
- struct device_attribute *attr,
- char *buf);
+/* Prototype to allow declarations of DEVICE_ATTR(<foo>) before soc_info_show */
+static ssize_t soc_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
struct soc_device {
struct device dev;
@@ -31,11 +31,11 @@ static struct bus_type soc_bus_type = {
.name = "soc",
};
-static DEVICE_ATTR(machine, S_IRUGO, soc_info_get, NULL);
-static DEVICE_ATTR(family, S_IRUGO, soc_info_get, NULL);
-static DEVICE_ATTR(serial_number, S_IRUGO, soc_info_get, NULL);
-static DEVICE_ATTR(soc_id, S_IRUGO, soc_info_get, NULL);
-static DEVICE_ATTR(revision, S_IRUGO, soc_info_get, NULL);
+static DEVICE_ATTR(machine, 0444, soc_info_show, NULL);
+static DEVICE_ATTR(family, 0444, soc_info_show, NULL);
+static DEVICE_ATTR(serial_number, 0444, soc_info_show, NULL);
+static DEVICE_ATTR(soc_id, 0444, soc_info_show, NULL);
+static DEVICE_ATTR(revision, 0444, soc_info_show, NULL);
struct device *soc_device_to_device(struct soc_device *soc_dev)
{
@@ -49,45 +49,41 @@ static umode_t soc_attribute_mode(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
- if ((attr == &dev_attr_machine.attr)
- && (soc_dev->attr->machine != NULL))
+ if ((attr == &dev_attr_machine.attr) && soc_dev->attr->machine)
return attr->mode;
- if ((attr == &dev_attr_family.attr)
- && (soc_dev->attr->family != NULL))
+ if ((attr == &dev_attr_family.attr) && soc_dev->attr->family)
return attr->mode;
- if ((attr == &dev_attr_revision.attr)
- && (soc_dev->attr->revision != NULL))
+ if ((attr == &dev_attr_revision.attr) && soc_dev->attr->revision)
return attr->mode;
- if ((attr == &dev_attr_serial_number.attr)
- && (soc_dev->attr->serial_number != NULL))
+ if ((attr == &dev_attr_serial_number.attr) && soc_dev->attr->serial_number)
return attr->mode;
- if ((attr == &dev_attr_soc_id.attr)
- && (soc_dev->attr->soc_id != NULL))
+ if ((attr == &dev_attr_soc_id.attr) && soc_dev->attr->soc_id)
return attr->mode;
- /* Unknown or unfilled attribute. */
+ /* Unknown or unfilled attribute */
return 0;
}
-static ssize_t soc_info_get(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t soc_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+ const char *output;
if (attr == &dev_attr_machine)
- return sprintf(buf, "%s\n", soc_dev->attr->machine);
- if (attr == &dev_attr_family)
- return sprintf(buf, "%s\n", soc_dev->attr->family);
- if (attr == &dev_attr_revision)
- return sprintf(buf, "%s\n", soc_dev->attr->revision);
- if (attr == &dev_attr_serial_number)
- return sprintf(buf, "%s\n", soc_dev->attr->serial_number);
- if (attr == &dev_attr_soc_id)
- return sprintf(buf, "%s\n", soc_dev->attr->soc_id);
-
- return -EINVAL;
-
+ output = soc_dev->attr->machine;
+ else if (attr == &dev_attr_family)
+ output = soc_dev->attr->family;
+ else if (attr == &dev_attr_revision)
+ output = soc_dev->attr->revision;
+ else if (attr == &dev_attr_serial_number)
+ output = soc_dev->attr->serial_number;
+ else if (attr == &dev_attr_soc_id)
+ output = soc_dev->attr->soc_id;
+ else
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", output);
}
static struct attribute *soc_attr[] = {
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 0d346a307140..13db1f78d2ce 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -50,7 +50,7 @@ int syscore_suspend(void)
int ret = 0;
trace_suspend_resume(TPS("syscore_suspend"), 0, true);
- pr_debug("Checking wakeup interrupts\n");
+ pm_pr_dbg("Checking wakeup interrupts\n");
/* Return error code if there are any wakeup interrupts pending. */
if (pm_wakeup_pending())
@@ -61,8 +61,7 @@ int syscore_suspend(void)
list_for_each_entry_reverse(ops, &syscore_ops_list, node)
if (ops->suspend) {
- if (initcall_debug)
- pr_info("PM: Calling %pS\n", ops->suspend);
+ pm_pr_dbg("Calling %pS\n", ops->suspend);
ret = ops->suspend();
if (ret)
goto err_out;
@@ -99,8 +98,7 @@ void syscore_resume(void)
list_for_each_entry(ops, &syscore_ops_list, node)
if (ops->resume) {
- if (initcall_debug)
- pr_info("PM: Calling %pS\n", ops->resume);
+ pm_pr_dbg("Calling %pS\n", ops->resume);
ops->resume();
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled after %pS\n", ops->resume);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index ad8d33c6077b..4d254fcc93d1 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -14,11 +14,11 @@
#include <linux/hardirq.h>
#include <linux/topology.h>
-#define define_id_show_func(name) \
-static ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- return sprintf(buf, "%d\n", topology_##name(dev->id)); \
+#define define_id_show_func(name) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return sysfs_emit(buf, "%d\n", topology_##name(dev->id)); \
}
#define define_siblings_show_map(name, mask) \
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 88a93c266c19..6f8fc5f587fe 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -419,12 +419,12 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config;
pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config;
- pc_host->mem_resource.name = "BCMA PCIcore external memory",
+ pc_host->mem_resource.name = "BCMA PCIcore external memory";
pc_host->mem_resource.start = BCMA_SOC_PCI_DMA;
pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1;
pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED;
- pc_host->io_resource.name = "BCMA PCIcore external I/O",
+ pc_host->io_resource.name = "BCMA PCIcore external I/O";
pc_host->io_resource.start = 0x100;
pc_host->io_resource.end = 0x7FF;
pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 226219da3da6..71c2b1564558 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1670,7 +1670,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
}
if (mode & (FMODE_READ|FMODE_WRITE)) {
- check_disk_change(bdev);
+ bdev_check_media_change(bdev);
if (mode & FMODE_WRITE) {
int wrprot;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 5ca7216e9e01..c34e71b0c4a9 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -347,7 +347,6 @@ aoeblk_gdalloc(void *vp)
mempool_t *mp;
struct request_queue *q;
struct blk_mq_tag_set *set;
- enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
ulong flags;
int late = 0;
int err;
@@ -407,7 +406,7 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
- q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
+ blk_queue_io_opt(q, SZ_2M);
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 6dba41395155..313f0b946fe2 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -900,9 +900,7 @@ aoecmd_sleepwork(struct work_struct *work)
ssize = get_capacity(d->gd);
bd = bdget_disk(d->gd, 0);
if (bd) {
- inode_lock(bd->bd_inode);
- i_size_write(bd->bd_inode, (loff_t)ssize<<9);
- inode_unlock(bd->bd_inode);
+ bd_set_nr_sectors(bd, ssize);
bdput(bd);
}
spin_lock_irq(&d->lock);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index a50e13af0305..3e881fdb06e0 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1732,7 +1732,8 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* invalidate the buffer track to force a reread */
BufferDrive = -1;
set_bit(drive, &fake_change);
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
return 0;
default:
return -EINVAL;
@@ -1909,7 +1910,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
if (mode & (FMODE_READ|FMODE_WRITE)) {
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
if (mode & FMODE_WRITE) {
if (p->wpstat) {
if (p->ref < 0)
@@ -1953,7 +1955,6 @@ static const struct block_device_operations floppy_fops = {
.release = floppy_release,
.ioctl = fd_ioctl,
.check_events = floppy_check_events,
- .revalidate_disk= floppy_revalidate,
};
static const struct blk_mq_ops ataflop_mq_ops = {
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 2723a70eb855..cc49a921339f 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -403,7 +403,6 @@ static struct brd_device *brd_alloc(int i)
disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "ram%d", i);
set_capacity(disk, rd_size * 2);
- brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index b41897dceb2b..7227fc7ab8ed 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -865,7 +865,7 @@ int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
if (!get_ldev(device))
return 0; /* no disk, no metadata, no bitmap to manipulate bits in */
- nr_sectors = drbd_get_capacity(device->this_bdev);
+ nr_sectors = get_capacity(device->vdisk);
esector = sector + (size >> 9) - 1;
if (!expect(sector < nr_sectors))
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 740e93bad21f..8f879e5c2f67 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -841,7 +841,6 @@ struct drbd_device {
sector_t p_size; /* partner's disk size */
struct request_queue *rq_queue;
- struct block_device *this_bdev;
struct gendisk *vdisk;
unsigned long last_reattach_jif;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 573dbf6f0c31..65b95aef8dbc 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -984,7 +984,10 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enu
p->d_size = cpu_to_be64(d_size);
p->u_size = cpu_to_be64(u_size);
- p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
+ if (trigger_reply)
+ p->c_size = 0;
+ else
+ p->c_size = cpu_to_be64(get_capacity(device->vdisk));
p->max_bio_size = cpu_to_be32(max_bio_size);
p->queue_order_type = cpu_to_be16(q_order_type);
p->dds_flags = cpu_to_be16(flags);
@@ -2029,17 +2032,13 @@ void drbd_init_set_defaults(struct drbd_device *device)
device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
}
-static void _drbd_set_my_capacity(struct drbd_device *device, sector_t size)
-{
- /* set_capacity(device->this_bdev->bd_disk, size); */
- set_capacity(device->vdisk, size);
- device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
-}
-
void drbd_set_my_capacity(struct drbd_device *device, sector_t size)
{
char ppb[10];
- _drbd_set_my_capacity(device, size);
+
+ set_capacity(device->vdisk, size);
+ revalidate_disk_size(device->vdisk, false);
+
drbd_info(device, "size = %s (%llu KB)\n",
ppsize(ppb, size>>1), (unsigned long long)size>>1);
}
@@ -2069,7 +2068,8 @@ void drbd_device_cleanup(struct drbd_device *device)
}
D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
- _drbd_set_my_capacity(device, 0);
+ set_capacity(device->vdisk, 0);
+ revalidate_disk_size(device->vdisk, false);
if (device->bitmap) {
/* maybe never allocated. */
drbd_bm_resize(device, 0, 1);
@@ -2236,9 +2236,6 @@ void drbd_destroy_device(struct kref *kref)
/* cleanup stuff that may have been allocated during
* device (re-)configuration or state changes */
- if (device->this_bdev)
- bdput(device->this_bdev);
-
drbd_backing_dev_free(device, device->ldev);
device->ldev = NULL;
@@ -2765,10 +2762,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
- device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
- /* we have no partitions. we contain only ourselves. */
- device->this_bdev->bd_contains = device->this_bdev;
-
blk_queue_write_cache(q, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
@@ -3044,7 +3037,7 @@ void drbd_md_write(struct drbd_device *device, void *b)
memset(buffer, 0, sizeof(*buffer));
- buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
+ buffer->la_size_sect = cpu_to_be64(get_capacity(device->vdisk));
for (i = UI_CURRENT; i < UI_SIZE; i++)
buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
buffer->flags = cpu_to_be32(device->ldev->md.flags);
@@ -3102,7 +3095,7 @@ void drbd_md_sync(struct drbd_device *device)
/* Update device->ldev->md.la_size_sect,
* since we updated it on metadata. */
- device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
+ device->ldev->md.la_size_sect = get_capacity(device->vdisk);
drbd_md_put_buffer(device);
out:
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 43c8ae4d9fca..bf7de4c7b96c 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -996,7 +996,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
goto err_out;
}
- if (drbd_get_capacity(device->this_bdev) != size ||
+ if (get_capacity(device->vdisk) != size ||
drbd_bm_capacity(device) != size) {
int err;
err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
@@ -1362,15 +1362,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
if (b) {
blk_stack_limits(&q->limits, &b->limits, 0);
-
- if (q->backing_dev_info->ra_pages !=
- b->backing_dev_info->ra_pages) {
- drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
- q->backing_dev_info->ra_pages,
- b->backing_dev_info->ra_pages);
- q->backing_dev_info->ra_pages =
- b->backing_dev_info->ra_pages;
- }
+ blk_queue_update_readahead(q);
}
fixup_discard_if_not_supported(q);
fixup_write_zeroes(device, q);
@@ -1941,8 +1933,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
/* Make sure the new disk is big enough
* (we may currently be R_PRIMARY with no local disk...) */
- if (drbd_get_max_capacity(nbc) <
- drbd_get_capacity(device->this_bdev)) {
+ if (drbd_get_max_capacity(nbc) < get_capacity(device->vdisk)) {
retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -3370,7 +3361,6 @@ static void device_to_statistics(struct device_statistics *s,
if (get_ldev(device)) {
struct drbd_md *md = &device->ldev->md;
u64 *history_uuids = (u64 *)s->history_uuids;
- struct request_queue *q;
int n;
spin_lock_irq(&md->uuid_lock);
@@ -3384,14 +3374,9 @@ static void device_to_statistics(struct device_statistics *s,
spin_unlock_irq(&md->uuid_lock);
s->dev_disk_flags = md->flags;
- q = bdev_get_queue(device->ldev->backing_bdev);
- s->dev_lower_blocked =
- bdi_congested(q->backing_dev_info,
- (1 << WB_async_congested) |
- (1 << WB_sync_congested));
put_ldev(device);
}
- s->dev_size = drbd_get_capacity(device->this_bdev);
+ s->dev_size = get_capacity(device->vdisk);
s->dev_read = device->read_cnt;
s->dev_write = device->writ_cnt;
s->dev_al_writes = device->al_writ_cnt;
@@ -3831,8 +3816,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
nla_put_u32(skb, T_current_state, device->state.i) ||
nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
- nla_put_u64_0pad(skb, T_capacity,
- drbd_get_capacity(device->this_bdev)) ||
+ nla_put_u64_0pad(skb, T_capacity, get_capacity(device->vdisk)) ||
nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 422363daa618..dc333dbe5232 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1860,7 +1860,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
struct packet_info *pi) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
- const sector_t capacity = drbd_get_capacity(device->this_bdev);
+ const sector_t capacity = get_capacity(device->vdisk);
struct drbd_peer_request *peer_req;
struct page *page;
int digest_size, err;
@@ -2789,7 +2789,7 @@ bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
{
- struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
+ struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
unsigned long db, dt, dbdt;
unsigned int c_min_rate;
int curr_events;
@@ -2849,7 +2849,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
if (!peer_device)
return -EIO;
device = peer_device->device;
- capacity = drbd_get_capacity(device->this_bdev);
+ capacity = get_capacity(device->vdisk);
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
@@ -4117,7 +4117,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
if (!peer_device)
return config_unknown_volume(connection, pi);
device = peer_device->device;
- cur_size = drbd_get_capacity(device->this_bdev);
+ cur_size = get_capacity(device->vdisk);
p_size = be64_to_cpu(p->d_size);
p_usize = be64_to_cpu(p->u_size);
@@ -4252,8 +4252,8 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
}
if (device->state.conn > C_WF_REPORT_PARAMS) {
- if (be64_to_cpu(p->c_size) !=
- drbd_get_capacity(device->this_bdev) || ldsc) {
+ if (be64_to_cpu(p->c_size) != get_capacity(device->vdisk) ||
+ ldsc) {
/* we have different sizes, probably peer
* needs to know my new size... */
drbd_send_sizes(peer_device, 0, ddsf);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 5c975af9c15f..330f851cb8f0 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -888,7 +888,7 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
if (device->state.disk != D_INCONSISTENT)
return false;
esector = sector + (size >> 9) - 1;
- nr_sectors = drbd_get_capacity(device->this_bdev);
+ nr_sectors = get_capacity(device->vdisk);
D_ASSERT(device, sector < nr_sectors);
D_ASSERT(device, esector < nr_sectors);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 7c903de5c4e1..ba56f3f05312 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -591,7 +591,7 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
unsigned long bit;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(device->this_bdev);
+ const sector_t capacity = get_capacity(device->vdisk);
int max_bio_size;
int number, rollback_i, size;
int align, requeue = 0;
@@ -769,7 +769,7 @@ static int make_ov_request(struct drbd_device *device, int cancel)
{
int number, i, size;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(device->this_bdev);
+ const sector_t capacity = get_capacity(device->vdisk);
bool stop_sector_reached = false;
if (unlikely(cancel))
@@ -1672,7 +1672,7 @@ void drbd_resync_after_changed(struct drbd_device *device)
void drbd_rs_controller_reset(struct drbd_device *device)
{
- struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
+ struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
struct fifo_buffer *plan;
atomic_set(&device->rs_sect_in, 0);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a563b023458a..7df79ae6b0a1 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -561,6 +561,7 @@ static void floppy_release_irq_and_dma(void);
* output_byte is automatically disabled when reset is set.
*/
static void reset_fdc(void);
+static int floppy_revalidate(struct gendisk *disk);
/*
* These are global variables, as that's the easiest way to give
@@ -3275,7 +3276,8 @@ static int invalidate_drive(struct block_device *bdev)
/* invalidate the buffer track to force a reread */
set_bit((long)bdev->bd_disk->private_data, &fake_change);
process_fd_request();
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
return 0;
}
@@ -4123,7 +4125,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
drive_state[drive].last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
&drive_state[drive].flags);
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
@@ -4291,7 +4294,6 @@ static const struct block_device_operations floppy_fops = {
.ioctl = fd_ioctl,
.getgeo = fd_getgeo,
.check_events = floppy_check_events,
- .revalidate_disk = floppy_revalidate,
#ifdef CONFIG_COMPAT
.compat_ioctl = fd_compat_ioctl,
#endif
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d3394191e168..a58084c2ed7c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -253,9 +253,10 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
{
struct block_device *bdev = lo->lo_device;
- bd_set_size(bdev, size << SECTOR_SHIFT);
+ bd_set_nr_sectors(bdev, size);
- set_capacity_revalidate_and_notify(lo->lo_disk, size, false);
+ if (!set_capacity_revalidate_and_notify(lo->lo_disk, size, false))
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
}
static inline int
@@ -1251,7 +1252,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
set_capacity(lo->lo_disk, 0);
loop_sysfs_exit(lo);
if (bdev) {
- bd_set_size(bdev, 0);
+ bd_set_nr_sectors(bdev, 0);
/* let user-space know about this change */
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index edf8b632e3d2..aaae9220f3a0 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -296,10 +296,11 @@ static void nbd_size_clear(struct nbd_device *nbd)
}
}
-static void nbd_size_update(struct nbd_device *nbd)
+static void nbd_size_update(struct nbd_device *nbd, bool start)
{
struct nbd_config *config = nbd->config;
struct block_device *bdev = bdget_disk(nbd->disk, 0);
+ sector_t nr_sectors = config->bytesize >> 9;
if (config->flags & NBD_FLAG_SEND_TRIM) {
nbd->disk->queue->limits.discard_granularity = config->blksize;
@@ -308,13 +309,14 @@ static void nbd_size_update(struct nbd_device *nbd)
}
blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
- set_capacity(nbd->disk, config->bytesize >> 9);
+ set_capacity(nbd->disk, nr_sectors);
if (bdev) {
if (bdev->bd_disk) {
- bd_set_size(bdev, config->bytesize);
- set_blocksize(bdev, config->blksize);
+ bd_set_nr_sectors(bdev, nr_sectors);
+ if (start)
+ set_blocksize(bdev, config->blksize);
} else
- bdev->bd_invalidated = 1;
+ set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
bdput(bdev);
}
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
@@ -327,7 +329,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
config->blksize = blocksize;
config->bytesize = blocksize * nr_blocks;
if (nbd->task_recv != NULL)
- nbd_size_update(nbd);
+ nbd_size_update(nbd, false);
}
static void nbd_complete_rq(struct request *req)
@@ -801,9 +803,9 @@ static void recv_work(struct work_struct *work)
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
+ nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
- nbd_config_put(nbd);
kfree(args);
}
@@ -1138,7 +1140,7 @@ static void nbd_bdev_reset(struct block_device *bdev)
{
if (bdev->bd_openers > 1)
return;
- bd_set_size(bdev, 0);
+ bd_set_nr_sectors(bdev, 0);
}
static void nbd_parse_flags(struct nbd_device *nbd)
@@ -1307,7 +1309,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i;
queue_work(nbd->recv_workq, &args->work);
}
- nbd_size_update(nbd);
+ nbd_size_update(nbd, true);
return error;
}
@@ -1321,7 +1323,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
return ret;
if (max_part)
- bdev->bd_invalidated = 1;
+ set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0);
@@ -1499,9 +1501,9 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
- bdev->bd_invalidated = 1;
+ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
} else if (nbd_disconnected(nbd->config)) {
- bdev->bd_invalidated = 1;
+ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
}
out:
mutex_unlock(&nbd_index_mutex);
@@ -1516,6 +1518,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
bdev->bd_openers == 0)
nbd_disconnect_and_put(nbd);
+ bdput(bdev);
nbd_config_put(nbd);
nbd_put(nbd);
@@ -2183,7 +2186,7 @@ out:
return ret;
}
-static const struct genl_ops nbd_connect_genl_ops[] = {
+static const struct genl_small_ops nbd_connect_genl_ops[] = {
{
.cmd = NBD_CMD_CONNECT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -2215,8 +2218,8 @@ static struct genl_family nbd_genl_family __ro_after_init = {
.name = NBD_GENL_FAMILY_NAME,
.version = NBD_GENL_VERSION,
.module = THIS_MODULE,
- .ops = nbd_connect_genl_ops,
- .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
+ .small_ops = nbd_connect_genl_ops,
+ .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
.maxattr = NBD_ATTR_MAX,
.policy = nbd_attr_policy,
.mcgrps = nbd_mcast_grps,
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index daed4a9c3436..c24d9b5ad81a 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -42,8 +42,13 @@ struct nullb_device {
struct badblocks badblocks;
unsigned int nr_zones;
+ unsigned int nr_zones_imp_open;
+ unsigned int nr_zones_exp_open;
+ unsigned int nr_zones_closed;
struct blk_zone *zones;
sector_t zone_size_sects;
+ spinlock_t zone_lock;
+ unsigned long *zone_locks;
unsigned long size; /* device size in MB */
unsigned long completion_nsec; /* time in ns to complete a request */
@@ -51,6 +56,8 @@ struct nullb_device {
unsigned long zone_size; /* zone size in MB if device is zoned */
unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
unsigned int zone_nr_conv; /* number of conventional zones */
+ unsigned int zone_max_open; /* max number of open zones */
+ unsigned int zone_max_active; /* max number of active zones */
unsigned int submit_queues; /* number of submission queues */
unsigned int home_node; /* home node for the device */
unsigned int queue_mode; /* block interface */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index d74443a9c8fa..4685ea401d5b 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -164,6 +164,10 @@ static bool shared_tags;
module_param(shared_tags, bool, 0444);
MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
+static bool g_shared_tag_bitmap;
+module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444);
+MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
+
static int g_irqmode = NULL_IRQ_SOFTIRQ;
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -208,6 +212,14 @@ static unsigned int g_zone_nr_conv;
module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
+static unsigned int g_zone_max_open;
+module_param_named(zone_max_open, g_zone_max_open, uint, 0444);
+MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)");
+
+static unsigned int g_zone_max_active;
+module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
+MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -347,6 +359,8 @@ NULLB_DEVICE_ATTR(zoned, bool, NULL);
NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
+NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
+NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -464,6 +478,8 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_zone_size,
&nullb_device_attr_zone_capacity,
&nullb_device_attr_zone_nr_conv,
+ &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_max_active,
NULL,
};
@@ -517,7 +533,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE,
- "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv\n");
+ "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -580,6 +596,8 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_size = g_zone_size;
dev->zone_capacity = g_zone_capacity;
dev->zone_nr_conv = g_zone_nr_conv;
+ dev->zone_max_open = g_zone_max_open;
+ dev->zone_max_active = g_zone_max_active;
return dev;
}
@@ -1692,6 +1710,8 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
set->flags = BLK_MQ_F_SHOULD_MERGE;
if (g_no_sched)
set->flags |= BLK_MQ_F_NO_SCHED;
+ if (g_shared_tag_bitmap)
+ set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
set->driver_data = NULL;
if ((nullb && nullb->dev->blocking) || g_blocking)
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 3d25c9ad2383..beb34b4f76b0 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
#include "null_blk.h"
#define CREATE_TRACE_POINTS
@@ -45,12 +46,44 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
if (!dev->zones)
return -ENOMEM;
+ /*
+ * With memory backing, the zone_lock spinlock needs to be temporarily
+ * released to avoid scheduling in atomic context. To guarantee zone
+ * information protection, use a bitmap to lock zones with
+ * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
+ * implies that the queue is marked with BLK_MQ_F_BLOCKING.
+ */
+ spin_lock_init(&dev->zone_lock);
+ if (dev->memory_backed) {
+ dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
+ if (!dev->zone_locks) {
+ kvfree(dev->zones);
+ return -ENOMEM;
+ }
+ }
+
if (dev->zone_nr_conv >= dev->nr_zones) {
dev->zone_nr_conv = dev->nr_zones - 1;
pr_info("changed the number of conventional zones to %u",
dev->zone_nr_conv);
}
+ /* Max active zones has to be < nbr of seq zones in order to be enforceable */
+ if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
+ dev->zone_max_active = 0;
+ pr_info("zone_max_active limit disabled, limit >= zone count\n");
+ }
+
+ /* Max open zones has to be <= max active zones */
+ if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
+ dev->zone_max_open = dev->zone_max_active;
+ pr_info("changed the maximum number of open zones to %u\n",
+ dev->nr_zones);
+ } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
+ dev->zone_max_open = 0;
+ pr_info("zone_max_open limit disabled, limit >= zone count\n");
+ }
+
for (i = 0; i < dev->zone_nr_conv; i++) {
struct blk_zone *zone = &dev->zones[i];
@@ -99,21 +132,39 @@ int null_register_zoned_dev(struct nullb *nullb)
}
blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
+ blk_queue_max_open_zones(q, dev->zone_max_open);
+ blk_queue_max_active_zones(q, dev->zone_max_active);
return 0;
}
void null_free_zoned_dev(struct nullb_device *dev)
{
+ bitmap_free(dev->zone_locks);
kvfree(dev->zones);
}
+static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
+{
+ if (dev->memory_backed)
+ wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
+ spin_lock_irq(&dev->zone_lock);
+}
+
+static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
+{
+ spin_unlock_irq(&dev->zone_lock);
+
+ if (dev->memory_backed)
+ clear_and_wake_up_bit(zno, dev->zone_locks);
+}
+
int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
- unsigned int first_zone, i;
+ unsigned int first_zone, i, zno;
struct blk_zone zone;
int error;
@@ -124,15 +175,18 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
nr_zones = min(nr_zones, dev->nr_zones - first_zone);
trace_nullb_report_zones(nullb, nr_zones);
- for (i = 0; i < nr_zones; i++) {
+ zno = first_zone;
+ for (i = 0; i < nr_zones; i++, zno++) {
/*
* Stacked DM target drivers will remap the zone information by
* modifying the zone information passed to the report callback.
* So use a local copy to avoid corruption of the device zone
* array.
*/
- memcpy(&zone, &dev->zones[first_zone + i],
- sizeof(struct blk_zone));
+ null_lock_zone(dev, zno);
+ memcpy(&zone, &dev->zones[zno], sizeof(struct blk_zone));
+ null_unlock_zone(dev, zno);
+
error = cb(&zone, i, data);
if (error)
return error;
@@ -141,6 +195,10 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
return nr_zones;
}
+/*
+ * This is called in the case of memory backing from null_process_cmd()
+ * with the target zone already locked.
+ */
size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len)
{
@@ -159,6 +217,111 @@ size_t null_zone_valid_read_len(struct nullb *nullb,
return (zone->wp - sector) << SECTOR_SHIFT;
}
+static blk_status_t null_close_zone(struct nullb_device *dev, struct blk_zone *zone)
+{
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ /* close operation on closed is not an error */
+ return BLK_STS_OK;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_FULL:
+ default:
+ return BLK_STS_IOERR;
+ }
+
+ if (zone->wp == zone->start) {
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ } else {
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ dev->nr_zones_closed++;
+ }
+
+ return BLK_STS_OK;
+}
+
+static void null_close_first_imp_zone(struct nullb_device *dev)
+{
+ unsigned int i;
+
+ for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
+ if (dev->zones[i].cond == BLK_ZONE_COND_IMP_OPEN) {
+ null_close_zone(dev, &dev->zones[i]);
+ return;
+ }
+ }
+}
+
+static blk_status_t null_check_active(struct nullb_device *dev)
+{
+ if (!dev->zone_max_active)
+ return BLK_STS_OK;
+
+ if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
+ dev->nr_zones_closed < dev->zone_max_active)
+ return BLK_STS_OK;
+
+ return BLK_STS_ZONE_ACTIVE_RESOURCE;
+}
+
+static blk_status_t null_check_open(struct nullb_device *dev)
+{
+ if (!dev->zone_max_open)
+ return BLK_STS_OK;
+
+ if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
+ return BLK_STS_OK;
+
+ if (dev->nr_zones_imp_open) {
+ if (null_check_active(dev) == BLK_STS_OK) {
+ null_close_first_imp_zone(dev);
+ return BLK_STS_OK;
+ }
+ }
+
+ return BLK_STS_ZONE_OPEN_RESOURCE;
+}
+
+/*
+ * This function matches the manage open zone resources function in the ZBC standard,
+ * with the addition of max active zones support (added in the ZNS standard).
+ *
+ * The function determines if a zone can transition to implicit open or explicit open,
+ * while maintaining the max open zone (and max active zone) limit(s). It may close an
+ * implicit open zone in order to make additional zone resources available.
+ *
+ * ZBC states that an implicit open zone shall be closed only if there is not
+ * room within the open limit. However, with the addition of an active limit,
+ * it is not certain that closing an implicit open zone will allow a new zone
+ * to be opened, since we might already be at the active limit capacity.
+ */
+static blk_status_t null_check_zone_resources(struct nullb_device *dev, struct blk_zone *zone)
+{
+ blk_status_t ret;
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EMPTY:
+ ret = null_check_active(dev);
+ if (ret != BLK_STS_OK)
+ return ret;
+ fallthrough;
+ case BLK_ZONE_COND_CLOSED:
+ return null_check_open(dev);
+ default:
+ /* Should never be called for other states */
+ WARN_ON(1);
+ return BLK_STS_IOERR;
+ }
+}
+
static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors, bool append)
{
@@ -172,123 +335,272 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ null_lock_zone(dev, zno);
+
switch (zone->cond) {
case BLK_ZONE_COND_FULL:
/* Cannot write to a full zone */
- return BLK_STS_IOERR;
+ ret = BLK_STS_IOERR;
+ goto unlock;
case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_CLOSED:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ goto unlock;
+ break;
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ default:
+ /* Invalid zone condition */
+ ret = BLK_STS_IOERR;
+ goto unlock;
+ }
+
+ /*
+ * Regular writes must be at the write pointer position.
+ * Zone append writes are automatically issued at the write
+ * pointer and the position returned using the request or BIO
+ * sector.
+ */
+ if (append) {
+ sector = zone->wp;
+ if (cmd->bio)
+ cmd->bio->bi_iter.bi_sector = sector;
+ else
+ cmd->rq->__sector = sector;
+ } else if (sector != zone->wp) {
+ ret = BLK_STS_IOERR;
+ goto unlock;
+ }
+
+ if (zone->wp + nr_sectors > zone->start + zone->capacity) {
+ ret = BLK_STS_IOERR;
+ goto unlock;
+ }
+
+ if (zone->cond == BLK_ZONE_COND_CLOSED) {
+ dev->nr_zones_closed--;
+ dev->nr_zones_imp_open++;
+ } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
+ dev->nr_zones_imp_open++;
+ }
+ if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ /*
+ * Memory backing allocation may sleep: release the zone_lock spinlock
+ * to avoid scheduling in atomic context. Zone operation atomicity is
+ * still guaranteed through the zone_locks bitmap.
+ */
+ if (dev->memory_backed)
+ spin_unlock_irq(&dev->zone_lock);
+ ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ if (dev->memory_backed)
+ spin_lock_irq(&dev->zone_lock);
+
+ if (ret != BLK_STS_OK)
+ goto unlock;
+
+ zone->wp += nr_sectors;
+ if (zone->wp == zone->start + zone->capacity) {
+ if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
+ dev->nr_zones_exp_open--;
+ else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
+ dev->nr_zones_imp_open--;
+ zone->cond = BLK_ZONE_COND_FULL;
+ }
+ ret = BLK_STS_OK;
+
+unlock:
+ null_unlock_zone(dev, zno);
+
+ return ret;
+}
+
+static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone)
+{
+ blk_status_t ret;
+
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EXP_OPEN:
+ /* open operation on exp open is not an error */
+ return BLK_STS_OK;
+ case BLK_ZONE_COND_EMPTY:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ return ret;
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
case BLK_ZONE_COND_CLOSED:
- /*
- * Regular writes must be at the write pointer position.
- * Zone append writes are automatically issued at the write
- * pointer and the position returned using the request or BIO
- * sector.
- */
- if (append) {
- sector = zone->wp;
- if (cmd->bio)
- cmd->bio->bi_iter.bi_sector = sector;
- else
- cmd->rq->__sector = sector;
- } else if (sector != zone->wp) {
- return BLK_STS_IOERR;
- }
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ return ret;
+ dev->nr_zones_closed--;
+ break;
+ case BLK_ZONE_COND_FULL:
+ default:
+ return BLK_STS_IOERR;
+ }
+
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ dev->nr_zones_exp_open++;
- if (zone->wp + nr_sectors > zone->start + zone->capacity)
- return BLK_STS_IOERR;
+ return BLK_STS_OK;
+}
- if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
- zone->cond = BLK_ZONE_COND_IMP_OPEN;
+static blk_status_t null_finish_zone(struct nullb_device *dev, struct blk_zone *zone)
+{
+ blk_status_t ret;
+
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
- ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ switch (zone->cond) {
+ case BLK_ZONE_COND_FULL:
+ /* finish operation on full is not an error */
+ return BLK_STS_OK;
+ case BLK_ZONE_COND_EMPTY:
+ ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK)
return ret;
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ case BLK_ZONE_COND_CLOSED:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ return ret;
+ dev->nr_zones_closed--;
+ break;
+ default:
+ return BLK_STS_IOERR;
+ }
+
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zone->len;
- zone->wp += nr_sectors;
- if (zone->wp == zone->start + zone->capacity)
- zone->cond = BLK_ZONE_COND_FULL;
+ return BLK_STS_OK;
+}
+
+static blk_status_t null_reset_zone(struct nullb_device *dev, struct blk_zone *zone)
+{
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EMPTY:
+ /* reset operation on empty is not an error */
return BLK_STS_OK;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ case BLK_ZONE_COND_CLOSED:
+ dev->nr_zones_closed--;
+ break;
+ case BLK_ZONE_COND_FULL:
+ break;
default:
- /* Invalid zone condition */
return BLK_STS_IOERR;
}
+
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+
+ return BLK_STS_OK;
}
static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
- unsigned int zone_no = null_zone_no(dev, sector);
- struct blk_zone *zone = &dev->zones[zone_no];
+ unsigned int zone_no;
+ struct blk_zone *zone;
+ blk_status_t ret;
size_t i;
- switch (op) {
- case REQ_OP_ZONE_RESET_ALL:
- for (i = 0; i < dev->nr_zones; i++) {
- if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
- continue;
- zone[i].cond = BLK_ZONE_COND_EMPTY;
- zone[i].wp = zone[i].start;
+ if (op == REQ_OP_ZONE_RESET_ALL) {
+ for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
+ null_lock_zone(dev, i);
+ zone = &dev->zones[i];
+ if (zone->cond != BLK_ZONE_COND_EMPTY) {
+ null_reset_zone(dev, zone);
+ trace_nullb_zone_op(cmd, i, zone->cond);
+ }
+ null_unlock_zone(dev, i);
}
- break;
- case REQ_OP_ZONE_RESET:
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- return BLK_STS_IOERR;
+ return BLK_STS_OK;
+ }
- zone->cond = BLK_ZONE_COND_EMPTY;
- zone->wp = zone->start;
+ zone_no = null_zone_no(dev, sector);
+ zone = &dev->zones[zone_no];
+
+ null_lock_zone(dev, zone_no);
+
+ switch (op) {
+ case REQ_OP_ZONE_RESET:
+ ret = null_reset_zone(dev, zone);
break;
case REQ_OP_ZONE_OPEN:
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- return BLK_STS_IOERR;
- if (zone->cond == BLK_ZONE_COND_FULL)
- return BLK_STS_IOERR;
-
- zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ ret = null_open_zone(dev, zone);
break;
case REQ_OP_ZONE_CLOSE:
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- return BLK_STS_IOERR;
- if (zone->cond == BLK_ZONE_COND_FULL)
- return BLK_STS_IOERR;
-
- if (zone->wp == zone->start)
- zone->cond = BLK_ZONE_COND_EMPTY;
- else
- zone->cond = BLK_ZONE_COND_CLOSED;
+ ret = null_close_zone(dev, zone);
break;
case REQ_OP_ZONE_FINISH:
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- return BLK_STS_IOERR;
-
- zone->cond = BLK_ZONE_COND_FULL;
- zone->wp = zone->start + zone->len;
+ ret = null_finish_zone(dev, zone);
break;
default:
- return BLK_STS_NOTSUPP;
+ ret = BLK_STS_NOTSUPP;
+ break;
}
- trace_nullb_zone_op(cmd, zone_no, zone->cond);
- return BLK_STS_OK;
+ if (ret == BLK_STS_OK)
+ trace_nullb_zone_op(cmd, zone_no, zone->cond);
+
+ null_unlock_zone(dev, zone_no);
+
+ return ret;
}
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector, sector_t nr_sectors)
{
+ struct nullb_device *dev = cmd->nq->dev;
+ unsigned int zno = null_zone_no(dev, sector);
+ blk_status_t sts;
+
switch (op) {
case REQ_OP_WRITE:
- return null_zone_write(cmd, sector, nr_sectors, false);
+ sts = null_zone_write(cmd, sector, nr_sectors, false);
+ break;
case REQ_OP_ZONE_APPEND:
- return null_zone_write(cmd, sector, nr_sectors, true);
+ sts = null_zone_write(cmd, sector, nr_sectors, true);
+ break;
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
- return null_zone_mgmt(cmd, op, sector);
+ sts = null_zone_mgmt(cmd, op, sector);
+ break;
default:
- return null_process_cmd(cmd, op, sector, nr_sectors);
+ null_lock_zone(dev, zno);
+ sts = null_process_cmd(cmd, op, sector, nr_sectors);
+ null_unlock_zone(dev, zno);
}
+
+ return sts;
}
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 5124eca90e83..70da8b86ce58 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -233,7 +233,7 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
struct pcd_unit *cd = bdev->bd_disk->private_data;
int ret;
- check_disk_change(bdev);
+ bdev_check_media_change(bdev);
mutex_lock(&pcd_mutex);
ret = cdrom_open(&cd->info, bdev, mode);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 1034e445680c..467dbd06b7cd 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1082,65 +1082,6 @@ static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *p
}
}
-/*
- * recover a failed write, query for relocation if possible
- *
- * returns 1 if recovery is possible, or 0 if not
- *
- */
-static int pkt_start_recovery(struct packet_data *pkt)
-{
- /*
- * FIXME. We need help from the file system to implement
- * recovery handling.
- */
- return 0;
-#if 0
- struct request *rq = pkt->rq;
- struct pktcdvd_device *pd = rq->rq_disk->private_data;
- struct block_device *pkt_bdev;
- struct super_block *sb = NULL;
- unsigned long old_block, new_block;
- sector_t new_sector;
-
- pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
- if (pkt_bdev) {
- sb = get_super(pkt_bdev);
- bdput(pkt_bdev);
- }
-
- if (!sb)
- return 0;
-
- if (!sb->s_op->relocate_blocks)
- goto out;
-
- old_block = pkt->sector / (CD_FRAMESIZE >> 9);
- if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
- goto out;
-
- new_sector = new_block * (CD_FRAMESIZE >> 9);
- pkt->sector = new_sector;
-
- bio_reset(pkt->bio);
- bio_set_dev(pkt->bio, pd->bdev);
- bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
- pkt->bio->bi_iter.bi_sector = new_sector;
- pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
- pkt->bio->bi_vcnt = pkt->frames;
-
- pkt->bio->bi_end_io = pkt_end_io_packet_write;
- pkt->bio->bi_private = pkt;
-
- drop_super(sb);
- return 1;
-
-out:
- drop_super(sb);
- return 0;
-#endif
-}
-
static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
{
#if PACKET_DEBUG > 1
@@ -1357,12 +1298,8 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
break;
case PACKET_RECOVERY_STATE:
- if (pkt_start_recovery(pkt)) {
- pkt_start_write(pd, pkt);
- } else {
- pkt_dbg(2, pd, "No recovery possible\n");
- pkt_set_state(pkt, PACKET_FINISHED_STATE);
- }
+ pkt_dbg(2, pd, "No recovery possible\n");
+ pkt_set_state(pkt, PACKET_FINISHED_STATE);
break;
case PACKET_FINISHED_STATE:
@@ -2173,16 +2110,18 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
int ret;
long lba;
struct request_queue *q;
+ struct block_device *bdev;
/*
* We need to re-open the cdrom device without O_NONBLOCK to be able
* to read/write from/to it. It is already opened in O_NONBLOCK mode
- * so bdget() can't fail.
+ * so open should not fail.
*/
- bdget(pd->bdev->bd_dev);
- ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd);
- if (ret)
+ bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
+ if (IS_ERR(bdev)) {
+ ret = PTR_ERR(bdev);
goto out;
+ }
ret = pkt_get_last_written(pd, &lba);
if (ret) {
@@ -2192,7 +2131,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
set_capacity(pd->disk, lba << 2);
set_capacity(pd->bdev->bd_disk, lba << 2);
- bd_set_size(pd->bdev, (loff_t)lba << 11);
+ bd_set_nr_sectors(pd->bdev, lba << 2);
q = bdev_get_queue(pd->bdev);
if (write) {
@@ -2226,7 +2165,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
return 0;
out_putdev:
- blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
+ blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
out:
return ret;
}
@@ -2563,7 +2502,6 @@ static int pkt_seq_show(struct seq_file *m, void *p)
static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
int i;
- int ret = 0;
char b[BDEVNAME_SIZE];
struct block_device *bdev;
@@ -2586,12 +2524,9 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
}
- bdev = bdget(dev);
- if (!bdev)
- return -ENOMEM;
- ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
- if (ret)
- return ret;
+ bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
@@ -2609,7 +2544,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
if (IS_ERR(pd->cdrw.thread)) {
pkt_err(pd, "can't start kernel thread\n");
- ret = -ENOMEM;
goto out_mem;
}
@@ -2621,7 +2555,7 @@ out_mem:
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
- return ret;
+ return -ENOMEM;
}
static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index e77eaab5cf23..f84128abade3 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4010,10 +4010,10 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
ENTITY_NAME(lockers[0].id.name));
- ret = ceph_monc_blacklist_add(&client->monc,
+ ret = ceph_monc_blocklist_add(&client->monc,
&lockers[0].info.addr);
if (ret) {
- rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
+ rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
ENTITY_NAME(lockers[0].id.name), ret);
goto out;
}
@@ -4077,7 +4077,7 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
ret = rbd_try_lock(rbd_dev);
if (ret < 0) {
rbd_warn(rbd_dev, "failed to lock header: %d", ret);
- if (ret == -EBLACKLISTED)
+ if (ret == -EBLOCKLISTED)
goto out;
ret = 1; /* request lock anyway */
@@ -4613,7 +4613,7 @@ static void rbd_reregister_watch(struct work_struct *work)
ret = __rbd_register_watch(rbd_dev);
if (ret) {
rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
- if (ret != -EBLACKLISTED && ret != -ENOENT) {
+ if (ret != -EBLOCKLISTED && ret != -ENOENT) {
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->watch_dwork,
RBD_RETRY_DELAY);
@@ -4921,7 +4921,7 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
set_capacity(rbd_dev->disk, size);
- revalidate_disk(rbd_dev->disk);
+ revalidate_disk_size(rbd_dev->disk, true);
}
}
@@ -5022,7 +5022,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
}
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
- q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
/*
* disk_release() expects a queue ref from add_disk() and will
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index cc6a4e2587ae..8b2411ccbda9 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -91,29 +91,18 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
dev->max_segments = BMAX_SEGMENTS;
- dev->max_hw_sectors = min_t(u32, dev->max_hw_sectors,
- le32_to_cpu(rsp->max_hw_sectors));
- dev->max_segments = min_t(u16, dev->max_segments,
- le16_to_cpu(rsp->max_segments));
-
return 0;
}
static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
size_t new_nsectors)
{
- int err = 0;
-
rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
dev->nsectors, new_nsectors);
dev->nsectors = new_nsectors;
set_capacity(dev->gd, dev->nsectors);
- err = revalidate_disk(dev->gd);
- if (err)
- rnbd_clt_err(dev,
- "Failed to change device size from %zu to %zu, err: %d\n",
- dev->nsectors, new_nsectors, err);
- return err;
+ revalidate_disk_size(dev->gd, true);
+ return 0;
}
static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
@@ -433,7 +422,7 @@ enum wait_type {
};
static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
- struct rnbd_iu *iu, struct kvec *vec, size_t nr,
+ struct rnbd_iu *iu, struct kvec *vec,
size_t len, struct scatterlist *sg, unsigned int sg_len,
void (*conf)(struct work_struct *work),
int *errno, enum wait_type wait)
@@ -447,7 +436,7 @@ static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
.conf_fn = msg_conf,
};
err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit,
- vec, nr, len, sg, sg_len);
+ vec, 1, len, sg, sg_len);
if (!err && wait) {
wait_event(iu->comp.wait, iu->comp.errno != INT_MAX);
*errno = iu->comp.errno;
@@ -492,7 +481,7 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
msg.device_id = cpu_to_le32(device_id);
WARN_ON(!rnbd_clt_get_dev(dev));
- err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 1, 0, NULL, 0,
+ err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 0, NULL, 0,
msg_close_conf, &errno, wait);
if (err) {
rnbd_clt_put_dev(dev);
@@ -581,7 +570,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
WARN_ON(!rnbd_clt_get_dev(dev));
err = send_usr_msg(sess->rtrs, READ, iu,
- &vec, 1, sizeof(*rsp), iu->sglist, 1,
+ &vec, sizeof(*rsp), iu->sglist, 1,
msg_open_conf, &errno, wait);
if (err) {
rnbd_clt_put_dev(dev);
@@ -635,7 +624,7 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
goto put_iu;
}
err = send_usr_msg(sess->rtrs, READ, iu,
- &vec, 1, sizeof(*rsp), iu->sglist, 1,
+ &vec, sizeof(*rsp), iu->sglist, 1,
msg_sess_info_conf, &errno, wait);
if (err) {
rnbd_clt_put_sess(sess);
@@ -1180,7 +1169,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
tag_set->queue_depth = sess->queue_depth;
tag_set->numa_node = NUMA_NO_NODE;
tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_TAG_SHARED;
+ BLK_MQ_F_TAG_QUEUE_SHARED;
tag_set->cmd_size = sizeof(struct rnbd_iu);
tag_set->nr_hw_queues = num_online_cpus();
@@ -1520,7 +1509,7 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
"map_device: Failed to configure device, err: %d\n",
ret);
mutex_unlock(&dev->lock);
- goto del_dev;
+ goto send_close;
}
rnbd_clt_info(dev,
@@ -1539,6 +1528,8 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
return dev;
+send_close:
+ send_msg_close(dev, dev->device_id, WAIT);
del_dev:
delete_dev(dev);
put_dev:
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 8799e3bab067..63f549889f87 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -439,7 +439,7 @@ static void card_state_change(struct rsxx_cardinfo *card,
case CARD_STATE_FAULT:
dev_crit(CARD_TO_DEV(card),
"Hardware Fault reported!\n");
- /* Fall through. */
+ fallthrough;
/* Everything else, detach DMA interface if it's attached. */
case CARD_STATE_SHUTDOWN:
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index ae6454c24594..a962b4551bed 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <linux/scatterlist.h>
-#include <linux/version.h>
#include <linux/err.h>
#include <linux/aer.h>
#include <linux/wait.h>
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index dd34504382e5..52dd1efa00f9 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -638,7 +638,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
if (mode & (FMODE_READ|FMODE_WRITE)) {
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev) && fs->disk_in)
+ fs->ejected = 0;
if ((mode & FMODE_WRITE) && fs->write_protected) {
err = -EROFS;
goto out;
@@ -735,24 +736,6 @@ static unsigned int floppy_check_events(struct gendisk *disk,
return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
}
-static int floppy_revalidate(struct gendisk *disk)
-{
- struct floppy_state *fs = disk->private_data;
- struct swim __iomem *base = fs->swd->base;
-
- swim_drive(base, fs->location);
-
- if (fs->ejected)
- setup_medium(fs);
-
- if (!fs->disk_in)
- swim_motor(base, OFF);
- else
- fs->ejected = 0;
-
- return !fs->disk_in;
-}
-
static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_unlocked_open,
@@ -760,7 +743,6 @@ static const struct block_device_operations floppy_fops = {
.ioctl = floppy_ioctl,
.getgeo = floppy_getgeo,
.check_events = floppy_check_events,
- .revalidate_disk = floppy_revalidate,
};
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index aa77eb5fb7de..c2d922d125e2 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -945,7 +945,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (err == 0 && (mode & FMODE_NDELAY) == 0
&& (mode & (FMODE_READ|FMODE_WRITE))) {
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
if (fs->ejected)
err = -ENXIO;
}
@@ -1055,7 +1056,6 @@ static const struct block_device_operations floppy_fops = {
.release = floppy_release,
.ioctl = floppy_ioctl,
.check_events = floppy_check_events,
- .revalidate_disk= floppy_revalidate,
};
static const struct blk_mq_ops swim3_mq_ops = {
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index b2e48dac1ebd..a314b9382442 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -598,7 +598,7 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev)
struct virtio_blk *vblk = vdev->priv;
blk_queue_write_cache(vblk->disk->queue, writeback, false);
- revalidate_disk(vblk->disk);
+ revalidate_disk_size(vblk->disk, true);
}
static const char *const virtblk_cache_types[] = {
@@ -646,7 +646,7 @@ static struct attribute *virtblk_attrs[] = {
static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
struct virtio_device *vdev = vblk->vdev;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index adfc9352351d..501e9dacfff9 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -201,7 +201,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
-static int do_block_io_op(struct xen_blkif_ring *ring);
+static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct blkif_request *req,
struct pending_req *pending_req);
@@ -612,6 +612,8 @@ int xen_blkif_schedule(void *arg)
struct xen_vbd *vbd = &blkif->vbd;
unsigned long timeout;
int ret;
+ bool do_eoi;
+ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
set_freezable();
while (!kthread_should_stop()) {
@@ -636,16 +638,23 @@ int xen_blkif_schedule(void *arg)
if (timeout == 0)
goto purge_gnt_list;
+ do_eoi = ring->waiting_reqs;
+
ring->waiting_reqs = 0;
smp_mb(); /* clear flag *before* checking for work */
- ret = do_block_io_op(ring);
+ ret = do_block_io_op(ring, &eoi_flags);
if (ret > 0)
ring->waiting_reqs = 1;
if (ret == -EACCES)
wait_event_interruptible(ring->shutdown_wq,
kthread_should_stop());
+ if (do_eoi && !ring->waiting_reqs) {
+ xen_irq_lateeoi(ring->irq, eoi_flags);
+ eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
+ }
+
purge_gnt_list:
if (blkif->vbd.feature_gnt_persistent &&
time_after(jiffies, ring->next_lru)) {
@@ -1121,7 +1130,7 @@ static void end_block_io_op(struct bio *bio)
* and transmute it to the block API to hand it over to the proper block disk.
*/
static int
-__do_block_io_op(struct xen_blkif_ring *ring)
+__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{
union blkif_back_rings *blk_rings = &ring->blk_rings;
struct blkif_request req;
@@ -1144,6 +1153,9 @@ __do_block_io_op(struct xen_blkif_ring *ring)
if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
break;
+ /* We've seen a request, so clear spurious eoi flag. */
+ *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
+
if (kthread_should_stop()) {
more_to_do = 1;
break;
@@ -1202,13 +1214,13 @@ done:
}
static int
-do_block_io_op(struct xen_blkif_ring *ring)
+do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{
union blkif_back_rings *blk_rings = &ring->blk_rings;
int more_to_do;
do {
- more_to_do = __do_block_io_op(ring);
+ more_to_do = __do_block_io_op(ring, eoi_flags);
if (more_to_do)
break;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index b9aa5d1ac10b..f5705569e2a7 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -246,9 +246,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
if (req_prod - rsp_prod > size)
goto fail;
- err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
- xen_blkif_be_int, 0,
- "blkif-backend", ring);
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
+ evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
if (err < 0)
goto fail;
ring->irq = err;
@@ -474,6 +473,12 @@ static void xen_vbd_free(struct xen_vbd *vbd)
vbd->bdev = NULL;
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent,
+ "Enables the persistent grants feature");
+
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
unsigned major, unsigned minor, int readonly,
int cdrom)
@@ -519,6 +524,8 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (q && blk_queue_secure_erase(q))
vbd->discard_secure = true;
+ vbd->feature_gnt_persistent = feature_persistent;
+
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
@@ -906,7 +913,8 @@ again:
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
- err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1);
+ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
+ be->blkif->vbd.feature_gnt_persistent);
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
dev->nodename);
@@ -1067,7 +1075,6 @@ static int connect_ring(struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
- unsigned int pers_grants;
char protocol[64] = "";
int err, i;
char *xspath;
@@ -1093,9 +1100,11 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
- pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
- 0);
- blkif->vbd.feature_gnt_persistent = pers_grants;
+ if (blkif->vbd.feature_gnt_persistent)
+ blkif->vbd.feature_gnt_persistent =
+ xenbus_read_unsigned(dev->otherend,
+ "feature-persistent", 0);
+
blkif->vbd.overflow_max_grants = 0;
/*
@@ -1118,7 +1127,7 @@ static int connect_ring(struct backend_info *be)
pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
blkif->nr_rings, blkif->blk_protocol, protocol,
- pers_grants ? "persistent grants" : "");
+ blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
ring_page_order = xenbus_read_unsigned(dev->otherend,
"ring-page-order", 0);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 91de2e0755ae..48629d3433b4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1866,8 +1866,8 @@ again:
message = "writing protocol";
goto abort_transaction;
}
- err = xenbus_printf(xbt, dev->nodename,
- "feature-persistent", "%u", 1);
+ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
+ info->feature_persistent);
if (err)
dev_warn(&dev->dev,
"writing persistent grants feature to xenbus");
@@ -1941,6 +1941,13 @@ static int negotiate_mq(struct blkfront_info *info)
}
return 0;
}
+
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent,
+ "Enables the persistent grants feature");
+
/**
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
@@ -2007,6 +2014,8 @@ static int blkfront_probe(struct xenbus_device *dev,
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
+ info->feature_persistent = feature_persistent;
+
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
@@ -2316,9 +2325,10 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- info->feature_persistent =
- !!xenbus_read_unsigned(info->xbdev->otherend,
- "feature-persistent", 0);
+ if (info->feature_persistent)
+ info->feature_persistent =
+ !!xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-persistent", 0);
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
"feature-max-indirect-segments", 0);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 5d8e0ab3f054..eb8ef65778c3 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -443,22 +443,27 @@ static void ace_fix_driveid(u16 *id)
#define ACE_FSM_NUM_STATES 11
/* Set flag to exit FSM loop and reschedule tasklet */
-static inline void ace_fsm_yield(struct ace_device *ace)
+static inline void ace_fsm_yieldpoll(struct ace_device *ace)
{
- dev_dbg(ace->dev, "ace_fsm_yield()\n");
tasklet_schedule(&ace->fsm_tasklet);
ace->fsm_continue_flag = 0;
}
+static inline void ace_fsm_yield(struct ace_device *ace)
+{
+ dev_dbg(ace->dev, "%s()\n", __func__);
+ ace_fsm_yieldpoll(ace);
+}
+
/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
static inline void ace_fsm_yieldirq(struct ace_device *ace)
{
dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
- if (!ace->irq)
- /* No IRQ assigned, so need to poll */
- tasklet_schedule(&ace->fsm_tasklet);
- ace->fsm_continue_flag = 0;
+ if (ace->irq > 0)
+ ace->fsm_continue_flag = 0;
+ else
+ ace_fsm_yieldpoll(ace);
}
static bool ace_has_next_request(struct request_queue *q)
@@ -888,26 +893,20 @@ static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing)
return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0;
}
-static int ace_revalidate_disk(struct gendisk *gd)
+static void ace_media_changed(struct ace_device *ace)
{
- struct ace_device *ace = gd->private_data;
unsigned long flags;
- dev_dbg(ace->dev, "ace_revalidate_disk()\n");
+ dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
- if (ace->media_change) {
- dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
-
- spin_lock_irqsave(&ace->lock, flags);
- ace->id_req_count++;
- spin_unlock_irqrestore(&ace->lock, flags);
+ spin_lock_irqsave(&ace->lock, flags);
+ ace->id_req_count++;
+ spin_unlock_irqrestore(&ace->lock, flags);
- tasklet_schedule(&ace->fsm_tasklet);
- wait_for_completion(&ace->id_completion);
- }
+ tasklet_schedule(&ace->fsm_tasklet);
+ wait_for_completion(&ace->id_completion);
dev_dbg(ace->dev, "revalidate complete\n");
- return ace->id_result;
}
static int ace_open(struct block_device *bdev, fmode_t mode)
@@ -922,7 +921,8 @@ static int ace_open(struct block_device *bdev, fmode_t mode)
ace->users++;
spin_unlock_irqrestore(&ace->lock, flags);
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev) && ace->media_change)
+ ace_media_changed(ace);
mutex_unlock(&xsysace_mutex);
return 0;
@@ -966,7 +966,6 @@ static const struct block_device_operations ace_fops = {
.open = ace_open,
.release = ace_release,
.check_events = ace_check_events,
- .revalidate_disk = ace_revalidate_disk,
.getgeo = ace_getgeo,
};
@@ -1059,12 +1058,12 @@ static int ace_setup(struct ace_device *ace)
ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
/* Now we can hook up the irq handler */
- if (ace->irq) {
+ if (ace->irq > 0) {
rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
if (rc) {
/* Failure - fall back to polled mode */
dev_err(ace->dev, "request_irq failed\n");
- ace->irq = 0;
+ ace->irq = rc;
}
}
@@ -1080,7 +1079,7 @@ static int ace_setup(struct ace_device *ace)
(unsigned long long) ace->physaddr, ace->baseaddr, ace->irq);
ace->media_change = 1;
- ace_revalidate_disk(ace->gd);
+ ace_media_changed(ace);
/* Make the sysace device 'live' */
add_disk(ace->gd);
@@ -1116,7 +1115,7 @@ static void ace_teardown(struct ace_device *ace)
tasklet_kill(&ace->fsm_tasklet);
- if (ace->irq)
+ if (ace->irq > 0)
free_irq(ace->irq, ace);
iounmap(ace->baseaddr);
@@ -1129,11 +1128,6 @@ static int ace_alloc(struct device *dev, int id, resource_size_t physaddr,
int rc;
dev_dbg(dev, "ace_alloc(%p)\n", dev);
- if (!physaddr) {
- rc = -ENODEV;
- goto err_noreg;
- }
-
/* Allocate and initialize the ace device structure */
ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
if (!ace) {
@@ -1159,7 +1153,6 @@ err_setup:
dev_set_drvdata(dev, NULL);
kfree(ace);
err_alloc:
-err_noreg:
dev_err(dev, "could not initialize device, err=%i\n", rc);
return rc;
}
@@ -1182,10 +1175,11 @@ static void ace_free(struct device *dev)
static int ace_probe(struct platform_device *dev)
{
- resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
+ resource_size_t physaddr;
+ struct resource *res;
u32 id = dev->id;
- int irq = 0;
+ int irq;
int i;
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
@@ -1196,12 +1190,15 @@ static int ace_probe(struct platform_device *dev)
if (of_find_property(dev->dev.of_node, "8-bit", NULL))
bus_width = ACE_BUS_WIDTH_8;
- for (i = 0; i < dev->num_resources; i++) {
- if (dev->resource[i].flags & IORESOURCE_MEM)
- physaddr = dev->resource[i].start;
- if (dev->resource[i].flags & IORESOURCE_IRQ)
- irq = dev->resource[i].start;
- }
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ physaddr = res->start;
+ if (!physaddr)
+ return -ENODEV;
+
+ irq = platform_get_irq_optional(dev, 0);
/* Call the bus-independent setup code */
return ace_alloc(&dev->dev, id, physaddr, irq, bus_width);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 9100ac36670a..1b697208d661 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,6 +52,9 @@ static unsigned int num_devices = 1;
*/
static size_t huge_class_size;
+static const struct block_device_operations zram_devops;
+static const struct block_device_operations zram_wb_devops;
+
static void zram_free_page(struct zram *zram, size_t index);
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio);
@@ -408,8 +411,7 @@ static void reset_bdev(struct zram *zram)
zram->backing_dev = NULL;
zram->old_block_size = 0;
zram->bdev = NULL;
- zram->disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_SYNCHRONOUS_IO;
+ zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
}
@@ -491,9 +493,10 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- bdev = bdgrab(I_BDEV(inode));
- err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
- if (err < 0) {
+ bdev = blkdev_get_by_dev(inode->i_rdev,
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
+ if (IS_ERR(bdev)) {
+ err = PTR_ERR(bdev);
bdev = NULL;
goto out;
}
@@ -528,8 +531,7 @@ static ssize_t backing_dev_store(struct device *dev,
* freely but in fact, IO is going on so finally could cause
* use-after-free when the IO is really done.
*/
- zram->disk->queue->backing_dev_info->capabilities &=
- ~BDI_CAP_SYNCHRONOUS_IO;
+ zram->disk->fops = &zram_wb_devops;
up_write(&zram->init_lock);
pr_info("setup backing device %s\n", file_name);
@@ -1216,10 +1218,11 @@ out:
static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
struct bio *bio, bool partial_io)
{
- int ret;
+ struct zcomp_strm *zstrm;
unsigned long handle;
unsigned int size;
void *src, *dst;
+ int ret;
zram_slot_lock(zram, index);
if (zram_test_flag(zram, index, ZRAM_WB)) {
@@ -1250,6 +1253,9 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
size = zram_get_obj_size(zram, index);
+ if (size != PAGE_SIZE)
+ zstrm = zcomp_stream_get(zram->comp);
+
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_atomic(page);
@@ -1257,8 +1263,6 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
kunmap_atomic(dst);
ret = 0;
} else {
- struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
-
dst = kmap_atomic(page);
ret = zcomp_decompress(zstrm, src, size, dst);
kunmap_atomic(dst);
@@ -1268,7 +1272,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
zram_slot_unlock(zram, index);
/* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret))
+ if (WARN_ON(ret))
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
return ret;
@@ -1739,7 +1743,7 @@ static ssize_t disksize_store(struct device *dev,
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- revalidate_disk(zram->disk);
+ revalidate_disk_size(zram->disk, true);
up_write(&zram->init_lock);
return len;
@@ -1786,7 +1790,7 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
- revalidate_disk(zram->disk);
+ revalidate_disk_size(zram->disk, true);
bdput(bdev);
mutex_lock(&bdev->bd_mutex);
@@ -1819,6 +1823,13 @@ static const struct block_device_operations zram_devops = {
.owner = THIS_MODULE
};
+static const struct block_device_operations zram_wb_devops = {
+ .open = zram_open,
+ .submit_bio = zram_submit_bio,
+ .swap_slot_free_notify = zram_slot_free_notify,
+ .owner = THIS_MODULE
+};
+
static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);
static DEVICE_ATTR_RO(initstate);
@@ -1946,8 +1957,7 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
- zram->disk->queue->backing_dev_info->capabilities |=
- (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 4ce270513695..759d7828931d 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -212,19 +212,16 @@ static int ath3k_load_firmware(struct usb_device *udev,
BT_DBG("udev %p", udev);
- pipe = usb_sndctrlpipe(udev, 0);
-
send_buf = kmalloc(BULK_SIZE, GFP_KERNEL);
if (!send_buf) {
BT_ERR("Can't allocate memory chunk for firmware");
return -ENOMEM;
}
- memcpy(send_buf, firmware->data, FW_HDR_SIZE);
- err = usb_control_msg(udev, pipe, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR,
- 0, 0, send_buf, FW_HDR_SIZE,
- USB_CTRL_SET_TIMEOUT);
- if (err < 0) {
+ err = usb_control_msg_send(udev, 0, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR,
+ 0, 0, firmware->data, FW_HDR_SIZE,
+ USB_CTRL_SET_TIMEOUT, GFP_KERNEL);
+ if (err) {
BT_ERR("Can't change to loading configuration err");
goto error;
}
@@ -259,44 +256,19 @@ error:
static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
{
- int ret, pipe = 0;
- char *buf;
-
- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pipe = usb_rcvctrlpipe(udev, 0);
- ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE,
- USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
- buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT);
-
- *state = *buf;
- kfree(buf);
-
- return ret;
+ return usb_control_msg_recv(udev, 0, ATH3K_GETSTATE,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ state, 1, USB_CTRL_SET_TIMEOUT,
+ GFP_KERNEL);
}
static int ath3k_get_version(struct usb_device *udev,
struct ath3k_version *version)
{
- int ret, pipe = 0;
- struct ath3k_version *buf;
- const int size = sizeof(*buf);
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pipe = usb_rcvctrlpipe(udev, 0);
- ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION,
- USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
- buf, size, USB_CTRL_SET_TIMEOUT);
-
- memcpy(version, buf, size);
- kfree(buf);
-
- return ret;
+ return usb_control_msg_recv(udev, 0, ATH3K_GETVERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ version, sizeof(*version), USB_CTRL_SET_TIMEOUT,
+ GFP_KERNEL);
}
static int ath3k_load_fwfile(struct usb_device *udev,
@@ -316,13 +288,11 @@ static int ath3k_load_fwfile(struct usb_device *udev,
}
size = min_t(uint, count, FW_HDR_SIZE);
- memcpy(send_buf, firmware->data, size);
- pipe = usb_sndctrlpipe(udev, 0);
- ret = usb_control_msg(udev, pipe, ATH3K_DNLOAD,
- USB_TYPE_VENDOR, 0, 0, send_buf,
- size, USB_CTRL_SET_TIMEOUT);
- if (ret < 0) {
+ ret = usb_control_msg_send(udev, 0, ATH3K_DNLOAD, USB_TYPE_VENDOR, 0, 0,
+ firmware->data, size, USB_CTRL_SET_TIMEOUT,
+ GFP_KERNEL);
+ if (ret) {
BT_ERR("Can't change to loading configuration err");
kfree(send_buf);
return ret;
@@ -355,23 +325,19 @@ static int ath3k_load_fwfile(struct usb_device *udev,
return 0;
}
-static int ath3k_switch_pid(struct usb_device *udev)
+static void ath3k_switch_pid(struct usb_device *udev)
{
- int pipe = 0;
-
- pipe = usb_sndctrlpipe(udev, 0);
- return usb_control_msg(udev, pipe, USB_REG_SWITCH_VID_PID,
- USB_TYPE_VENDOR, 0, 0,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
+ usb_control_msg_send(udev, 0, USB_REG_SWITCH_VID_PID, USB_TYPE_VENDOR,
+ 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_KERNEL);
}
static int ath3k_set_normal_mode(struct usb_device *udev)
{
unsigned char fw_state;
- int pipe = 0, ret;
+ int ret;
ret = ath3k_get_state(udev, &fw_state);
- if (ret < 0) {
+ if (ret) {
BT_ERR("Can't get state to change to normal mode err");
return ret;
}
@@ -381,10 +347,9 @@ static int ath3k_set_normal_mode(struct usb_device *udev)
return 0;
}
- pipe = usb_sndctrlpipe(udev, 0);
- return usb_control_msg(udev, pipe, ATH3K_SET_NORMAL_MODE,
- USB_TYPE_VENDOR, 0, 0,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
+ return usb_control_msg_send(udev, 0, ATH3K_SET_NORMAL_MODE,
+ USB_TYPE_VENDOR, 0, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT, GFP_KERNEL);
}
static int ath3k_load_patch(struct usb_device *udev)
@@ -397,7 +362,7 @@ static int ath3k_load_patch(struct usb_device *udev)
int ret;
ret = ath3k_get_state(udev, &fw_state);
- if (ret < 0) {
+ if (ret) {
BT_ERR("Can't get state to change to load ram patch err");
return ret;
}
@@ -408,7 +373,7 @@ static int ath3k_load_patch(struct usb_device *udev)
}
ret = ath3k_get_version(udev, &fw_version);
- if (ret < 0) {
+ if (ret) {
BT_ERR("Can't get version to change to load ram patch err");
return ret;
}
@@ -449,13 +414,13 @@ static int ath3k_load_syscfg(struct usb_device *udev)
int clk_value, ret;
ret = ath3k_get_state(udev, &fw_state);
- if (ret < 0) {
+ if (ret) {
BT_ERR("Can't get state to change to load configuration err");
return -EBUSY;
}
ret = ath3k_get_version(udev, &fw_version);
- if (ret < 0) {
+ if (ret) {
BT_ERR("Can't get version to change to load ram patch err");
return ret;
}
@@ -529,7 +494,7 @@ static int ath3k_probe(struct usb_interface *intf,
return ret;
}
ret = ath3k_set_normal_mode(udev);
- if (ret < 0) {
+ if (ret) {
BT_ERR("Set normal mode failed");
return ret;
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 5fa5be3c5598..88ce5f0ffc4b 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -18,7 +18,11 @@
#define VERSION "0.1"
-#define BDADDR_INTEL (&(bdaddr_t) {{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
+#define BDADDR_INTEL (&(bdaddr_t){{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
+#define RSA_HEADER_LEN 644
+#define CSS_HEADER_OFFSET 8
+#define ECDSA_OFFSET 644
+#define ECDSA_HEADER_LEN 320
int btintel_check_bdaddr(struct hci_dev *hdev)
{
@@ -360,6 +364,144 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
}
EXPORT_SYMBOL_GPL(btintel_read_version);
+void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
+{
+ const char *variant;
+
+ switch (version->img_type) {
+ case 0x01:
+ variant = "Bootloader";
+ bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id);
+ bt_dev_info(hdev, "Secure boot is %s",
+ version->secure_boot ? "enabled" : "disabled");
+ bt_dev_info(hdev, "OTP lock is %s",
+ version->otp_lock ? "enabled" : "disabled");
+ bt_dev_info(hdev, "API lock is %s",
+ version->api_lock ? "enabled" : "disabled");
+ bt_dev_info(hdev, "Debug lock is %s",
+ version->debug_lock ? "enabled" : "disabled");
+ bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
+ version->min_fw_build_nn, version->min_fw_build_cw,
+ 2000 + version->min_fw_build_yy);
+ break;
+ case 0x03:
+ variant = "Firmware";
+ break;
+ default:
+ bt_dev_err(hdev, "Unsupported image type(%02x)", version->img_type);
+ goto done;
+ }
+
+ bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant,
+ 2000 + (version->timestamp >> 8), version->timestamp & 0xff,
+ version->build_type, version->build_num);
+
+done:
+ return;
+}
+EXPORT_SYMBOL_GPL(btintel_version_info_tlv);
+
+int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
+{
+ struct sk_buff *skb;
+ const u8 param[1] = { 0xFF };
+
+ if (!version)
+ return -EINVAL;
+
+ skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->data[0]) {
+ bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
+ skb->data[0]);
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ /* Consume Command Complete Status field */
+ skb_pull(skb, 1);
+
+ /* Event parameters contatin multiple TLVs. Read each of them
+ * and only keep the required data. Also, it use existing legacy
+ * version field like hw_platform, hw_variant, and fw_variant
+ * to keep the existing setup flow
+ */
+ while (skb->len) {
+ struct intel_tlv *tlv;
+
+ tlv = (struct intel_tlv *)skb->data;
+ switch (tlv->type) {
+ case INTEL_TLV_CNVI_TOP:
+ version->cnvi_top = get_unaligned_le32(tlv->val);
+ break;
+ case INTEL_TLV_CNVR_TOP:
+ version->cnvr_top = get_unaligned_le32(tlv->val);
+ break;
+ case INTEL_TLV_CNVI_BT:
+ version->cnvi_bt = get_unaligned_le32(tlv->val);
+ break;
+ case INTEL_TLV_CNVR_BT:
+ version->cnvr_bt = get_unaligned_le32(tlv->val);
+ break;
+ case INTEL_TLV_DEV_REV_ID:
+ version->dev_rev_id = get_unaligned_le16(tlv->val);
+ break;
+ case INTEL_TLV_IMAGE_TYPE:
+ version->img_type = tlv->val[0];
+ break;
+ case INTEL_TLV_TIME_STAMP:
+ version->timestamp = get_unaligned_le16(tlv->val);
+ break;
+ case INTEL_TLV_BUILD_TYPE:
+ version->build_type = tlv->val[0];
+ break;
+ case INTEL_TLV_BUILD_NUM:
+ version->build_num = get_unaligned_le32(tlv->val);
+ break;
+ case INTEL_TLV_SECURE_BOOT:
+ version->secure_boot = tlv->val[0];
+ break;
+ case INTEL_TLV_OTP_LOCK:
+ version->otp_lock = tlv->val[0];
+ break;
+ case INTEL_TLV_API_LOCK:
+ version->api_lock = tlv->val[0];
+ break;
+ case INTEL_TLV_DEBUG_LOCK:
+ version->debug_lock = tlv->val[0];
+ break;
+ case INTEL_TLV_MIN_FW:
+ version->min_fw_build_nn = tlv->val[0];
+ version->min_fw_build_cw = tlv->val[1];
+ version->min_fw_build_yy = tlv->val[2];
+ break;
+ case INTEL_TLV_LIMITED_CCE:
+ version->limited_cce = tlv->val[0];
+ break;
+ case INTEL_TLV_SBE_TYPE:
+ version->sbe_type = tlv->val[0];
+ break;
+ case INTEL_TLV_OTP_BDADDR:
+ memcpy(&version->otp_bd_addr, tlv->val, tlv->len);
+ break;
+ default:
+ /* Ignore rest of information */
+ break;
+ }
+ /* consume the current tlv and move to next*/
+ skb_pull(skb, tlv->len + sizeof(*tlv));
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_read_version_tlv);
+
/* ------- REGMAP IBT SUPPORT ------- */
#define IBT_REG_MODE_8BIT 0x00
@@ -626,12 +768,10 @@ int btintel_read_boot_params(struct hci_dev *hdev,
}
EXPORT_SYMBOL_GPL(btintel_read_boot_params);
-int btintel_download_firmware(struct hci_dev *hdev, const struct firmware *fw,
- u32 *boot_param)
+static int btintel_sfi_rsa_header_secure_send(struct hci_dev *hdev,
+ const struct firmware *fw)
{
int err;
- const u8 *fw_ptr;
- u32 frag_len;
/* Start the firmware download transaction with the Init fragment
* represented by the 128 bytes of CSS header.
@@ -660,8 +800,56 @@ int btintel_download_firmware(struct hci_dev *hdev, const struct firmware *fw,
goto done;
}
- fw_ptr = fw->data + 644;
+done:
+ return err;
+}
+
+static int btintel_sfi_ecdsa_header_secure_send(struct hci_dev *hdev,
+ const struct firmware *fw)
+{
+ int err;
+
+ /* Start the firmware download transaction with the Init fragment
+ * represented by the 128 bytes of CSS header.
+ */
+ err = btintel_secure_send(hdev, 0x00, 128, fw->data + 644);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
+ return err;
+ }
+
+ /* Send the 96 bytes of public key information from the firmware
+ * as the PKey fragment.
+ */
+ err = btintel_secure_send(hdev, 0x03, 96, fw->data + 644 + 128);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err);
+ return err;
+ }
+
+ /* Send the 96 bytes of signature information from the firmware
+ * as the Sign fragment
+ */
+ err = btintel_secure_send(hdev, 0x02, 96, fw->data + 644 + 224);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware signature (%d)",
+ err);
+ return err;
+ }
+ return 0;
+}
+
+static int btintel_download_firmware_payload(struct hci_dev *hdev,
+ const struct firmware *fw,
+ u32 *boot_param, size_t offset)
+{
+ int err;
+ const u8 *fw_ptr;
+ u32 frag_len;
+
+ fw_ptr = fw->data + offset;
frag_len = 0;
+ err = -EINVAL;
while (fw_ptr - fw->data < fw->size) {
struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
@@ -707,8 +895,99 @@ int btintel_download_firmware(struct hci_dev *hdev, const struct firmware *fw,
done:
return err;
}
+
+int btintel_download_firmware(struct hci_dev *hdev,
+ const struct firmware *fw,
+ u32 *boot_param)
+{
+ int err;
+
+ err = btintel_sfi_rsa_header_secure_send(hdev, fw);
+ if (err)
+ return err;
+
+ return btintel_download_firmware_payload(hdev, fw, boot_param,
+ RSA_HEADER_LEN);
+}
EXPORT_SYMBOL_GPL(btintel_download_firmware);
+int btintel_download_firmware_newgen(struct hci_dev *hdev,
+ const struct firmware *fw, u32 *boot_param,
+ u8 hw_variant, u8 sbe_type)
+{
+ int err;
+ u32 css_header_ver;
+
+ /* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support
+ * only RSA secure boot engine. Hence, the corresponding sfi file will
+ * have RSA header of 644 bytes followed by Command Buffer.
+ *
+ * iBT hardware variants 0x17, 0x18 onwards support both RSA and ECDSA
+ * secure boot engine. As a result, the corresponding sfi file will
+ * have RSA header of 644, ECDSA header of 320 bytes followed by
+ * Command Buffer.
+ *
+ * CSS Header byte positions 0x08 to 0x0B represent the CSS Header
+ * version: RSA(0x00010000) , ECDSA (0x00020000)
+ */
+ css_header_ver = get_unaligned_le32(fw->data + CSS_HEADER_OFFSET);
+ if (css_header_ver != 0x00010000) {
+ bt_dev_err(hdev, "Invalid CSS Header version");
+ return -EINVAL;
+ }
+
+ if (hw_variant <= 0x14) {
+ if (sbe_type != 0x00) {
+ bt_dev_err(hdev, "Invalid SBE type for hardware variant (%d)",
+ hw_variant);
+ return -EINVAL;
+ }
+
+ err = btintel_sfi_rsa_header_secure_send(hdev, fw);
+ if (err)
+ return err;
+
+ err = btintel_download_firmware_payload(hdev, fw, boot_param, RSA_HEADER_LEN);
+ if (err)
+ return err;
+ } else if (hw_variant >= 0x17) {
+ /* Check if CSS header for ECDSA follows the RSA header */
+ if (fw->data[ECDSA_OFFSET] != 0x06)
+ return -EINVAL;
+
+ /* Check if the CSS Header version is ECDSA(0x00020000) */
+ css_header_ver = get_unaligned_le32(fw->data + ECDSA_OFFSET + CSS_HEADER_OFFSET);
+ if (css_header_ver != 0x00020000) {
+ bt_dev_err(hdev, "Invalid CSS Header version");
+ return -EINVAL;
+ }
+
+ if (sbe_type == 0x00) {
+ err = btintel_sfi_rsa_header_secure_send(hdev, fw);
+ if (err)
+ return err;
+
+ err = btintel_download_firmware_payload(hdev, fw,
+ boot_param,
+ RSA_HEADER_LEN + ECDSA_HEADER_LEN);
+ if (err)
+ return err;
+ } else if (sbe_type == 0x01) {
+ err = btintel_sfi_ecdsa_header_secure_send(hdev, fw);
+ if (err)
+ return err;
+
+ err = btintel_download_firmware_payload(hdev, fw,
+ boot_param,
+ RSA_HEADER_LEN + ECDSA_HEADER_LEN);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_download_firmware_newgen);
+
void btintel_reset_to_bootloader(struct hci_dev *hdev)
{
struct intel_reset params;
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 08e20606fb58..78cc64b42b30 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -6,6 +6,72 @@
* Copyright (C) 2015 Intel Corporation
*/
+/* List of tlv type */
+enum {
+ INTEL_TLV_CNVI_TOP = 0x10,
+ INTEL_TLV_CNVR_TOP,
+ INTEL_TLV_CNVI_BT,
+ INTEL_TLV_CNVR_BT,
+ INTEL_TLV_CNVI_OTP,
+ INTEL_TLV_CNVR_OTP,
+ INTEL_TLV_DEV_REV_ID,
+ INTEL_TLV_USB_VENDOR_ID,
+ INTEL_TLV_USB_PRODUCT_ID,
+ INTEL_TLV_PCIE_VENDOR_ID,
+ INTEL_TLV_PCIE_DEVICE_ID,
+ INTEL_TLV_PCIE_SUBSYSTEM_ID,
+ INTEL_TLV_IMAGE_TYPE,
+ INTEL_TLV_TIME_STAMP,
+ INTEL_TLV_BUILD_TYPE,
+ INTEL_TLV_BUILD_NUM,
+ INTEL_TLV_FW_BUILD_PRODUCT,
+ INTEL_TLV_FW_BUILD_HW,
+ INTEL_TLV_FW_STEP,
+ INTEL_TLV_BT_SPEC,
+ INTEL_TLV_MFG_NAME,
+ INTEL_TLV_HCI_REV,
+ INTEL_TLV_LMP_SUBVER,
+ INTEL_TLV_OTP_PATCH_VER,
+ INTEL_TLV_SECURE_BOOT,
+ INTEL_TLV_KEY_FROM_HDR,
+ INTEL_TLV_OTP_LOCK,
+ INTEL_TLV_API_LOCK,
+ INTEL_TLV_DEBUG_LOCK,
+ INTEL_TLV_MIN_FW,
+ INTEL_TLV_LIMITED_CCE,
+ INTEL_TLV_SBE_TYPE,
+ INTEL_TLV_OTP_BDADDR,
+ INTEL_TLV_UNLOCKED_STATE
+};
+
+struct intel_tlv {
+ u8 type;
+ u8 len;
+ u8 val[];
+} __packed;
+
+struct intel_version_tlv {
+ u32 cnvi_top;
+ u32 cnvr_top;
+ u32 cnvi_bt;
+ u32 cnvr_bt;
+ u16 dev_rev_id;
+ u8 img_type;
+ u16 timestamp;
+ u8 build_type;
+ u32 build_num;
+ u8 secure_boot;
+ u8 otp_lock;
+ u8 api_lock;
+ u8 debug_lock;
+ u8 min_fw_build_nn;
+ u8 min_fw_build_cw;
+ u8 min_fw_build_yy;
+ u8 limited_cce;
+ u8 sbe_type;
+ bdaddr_t otp_bd_addr;
+};
+
struct intel_version {
u8 status;
u8 hw_platform;
@@ -77,12 +143,14 @@ int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable);
void btintel_hw_error(struct hci_dev *hdev, u8 code);
void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
+void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version);
int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
const void *param);
int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name);
int btintel_set_event_mask(struct hci_dev *hdev, bool debug);
int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug);
int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver);
+int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver);
struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
u16 opcode_write);
@@ -91,6 +159,10 @@ int btintel_read_boot_params(struct hci_dev *hdev,
struct intel_boot_params *params);
int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
u32 *boot_param);
+int btintel_download_firmware_newgen(struct hci_dev *hdev,
+ const struct firmware *fw,
+ u32 *boot_param, u8 hw_variant,
+ u8 sbe_type);
void btintel_reset_to_bootloader(struct hci_dev *hdev);
int btintel_read_debug_features(struct hci_dev *hdev,
struct intel_debug_features *features);
@@ -137,6 +209,11 @@ static inline void btintel_version_info(struct hci_dev *hdev,
{
}
+static inline void btintel_version_info_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *version)
+{
+}
+
static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type,
u32 plen, const void *param)
{
@@ -165,6 +242,12 @@ static inline int btintel_read_version(struct hci_dev *hdev,
return -EOPNOTSUPP;
}
+static inline int btintel_read_version_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *ver)
+{
+ return -EOPNOTSUPP;
+}
+
static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
u16 opcode_read,
u16 opcode_write)
@@ -191,6 +274,14 @@ static inline int btintel_download_firmware(struct hci_dev *dev,
return -EOPNOTSUPP;
}
+static inline int btintel_download_firmware_newgen(struct hci_dev *hdev,
+ const struct firmware *fw,
+ u32 *boot_param,
+ u8 hw_variant, u8 sbe_type)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void btintel_reset_to_bootloader(struct hci_dev *hdev)
{
}
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index d15fd5be0216..33d58b30c5ac 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -215,30 +215,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8897 = {
.fw_dump_end = 0xea,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8977 = {
- .cfg = 0x00,
- .host_int_mask = 0x08,
- .host_intstatus = 0x0c,
- .card_status = 0x5c,
- .sq_read_base_addr_a0 = 0xf8,
- .sq_read_base_addr_a1 = 0xf9,
- .card_revision = 0xc8,
- .card_fw_status0 = 0xe8,
- .card_fw_status1 = 0xe9,
- .card_rx_len = 0xea,
- .card_rx_unit = 0xeb,
- .io_port_0 = 0xe4,
- .io_port_1 = 0xe5,
- .io_port_2 = 0xe6,
- .int_read_to_clear = true,
- .host_int_rsr = 0x04,
- .card_misc_cfg = 0xD8,
- .fw_dump_ctrl = 0xf0,
- .fw_dump_start = 0xf1,
- .fw_dump_end = 0xf8,
-};
-
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8987 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_89xx = {
.cfg = 0x00,
.host_int_mask = 0x08,
.host_intstatus = 0x0c,
@@ -261,29 +238,6 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8987 = {
.fw_dump_end = 0xf8,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = {
- .cfg = 0x00,
- .host_int_mask = 0x08,
- .host_intstatus = 0x0c,
- .card_status = 0x5c,
- .sq_read_base_addr_a0 = 0xf8,
- .sq_read_base_addr_a1 = 0xf9,
- .card_revision = 0xc8,
- .card_fw_status0 = 0xe8,
- .card_fw_status1 = 0xe9,
- .card_rx_len = 0xea,
- .card_rx_unit = 0xeb,
- .io_port_0 = 0xe4,
- .io_port_1 = 0xe5,
- .io_port_2 = 0xe6,
- .int_read_to_clear = true,
- .host_int_rsr = 0x04,
- .card_misc_cfg = 0xD8,
- .fw_dump_ctrl = 0xf0,
- .fw_dump_start = 0xf1,
- .fw_dump_end = 0xf8,
-};
-
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "mrvl/sd8688_helper.bin",
.firmware = "mrvl/sd8688.bin",
@@ -332,7 +286,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
.helper = NULL,
.firmware = "mrvl/sdsd8977_combo_v2.bin",
- .reg = &btmrvl_reg_8977,
+ .reg = &btmrvl_reg_89xx,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
.supports_fw_dump = true,
@@ -341,7 +295,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
.helper = NULL,
.firmware = "mrvl/sd8987_uapsta.bin",
- .reg = &btmrvl_reg_8987,
+ .reg = &btmrvl_reg_89xx,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
.supports_fw_dump = true,
@@ -350,7 +304,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
.helper = NULL,
.firmware = "mrvl/sdsd8997_combo_v4.bin",
- .reg = &btmrvl_reg_8997,
+ .reg = &btmrvl_reg_89xx,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
.supports_fw_dump = true,
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index c7ab7a23bd67..ba45c59bd9f3 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -496,7 +496,7 @@ static void btmtksdio_interrupt(struct sdio_func *func)
sdio_claim_host(bdev->func);
/* Disable interrupt */
- sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
+ sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
int_status = sdio_readl(func, MTK_REG_CHISR, NULL);
@@ -530,7 +530,7 @@ static void btmtksdio_interrupt(struct sdio_func *func)
}
/* Enable interrupt */
- sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, 0);
+ sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
pm_runtime_mark_last_busy(bdev->dev);
pm_runtime_put_autosuspend(bdev->dev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8d2608ddfd08..1005b6e8ff74 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -59,6 +59,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_MEDIATEK 0x200000
#define BTUSB_WIDEBAND_SPEECH 0x400000
#define BTUSB_VALID_LE_STATES 0x800000
+#define BTUSB_QCA_WCN6855 0x1000000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -254,24 +255,46 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* QCA ROME chipset */
- { USB_DEVICE(0x0cf3, 0x535b), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x04ca, 0x3021), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0cf3, 0x535b), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3021), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+
+ /* QCA WCN6855 chipset */
+ { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -2338,10 +2361,10 @@ static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
static int btusb_intel_download_firmware(struct hci_dev *hdev,
struct intel_version *ver,
- struct intel_boot_params *params)
+ struct intel_boot_params *params,
+ u32 *boot_param)
{
const struct firmware *fw;
- u32 boot_param;
char fwname[64];
int err;
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -2479,7 +2502,7 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev,
set_bit(BTUSB_DOWNLOADING, &data->flags);
/* Start firmware downloading and get boot parameter */
- err = btintel_download_firmware(hdev, fw, &boot_param);
+ err = btintel_download_firmware(hdev, fw, boot_param);
if (err < 0) {
/* When FW download fails, send Intel Reset to retry
* FW download.
@@ -2561,7 +2584,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return err;
}
- err = btusb_intel_download_firmware(hdev, &ver, &params);
+ err = btusb_intel_download_firmware(hdev, &ver, &params, &boot_param);
if (err)
return err;
@@ -2896,6 +2919,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
kfree(dr);
+ usb_free_urb(urb);
return -ENOMEM;
}
@@ -3390,6 +3414,27 @@ static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
return 0;
}
+static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev,
+ const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ u8 buf[6];
+ long ret;
+
+ memcpy(buf, bdaddr, sizeof(bdaddr_t));
+
+ skb = __hci_cmd_sync_ev(hdev, 0xfc14, sizeof(buf), buf,
+ HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "Change address command failed (%ld)", ret);
+ return ret;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
#define QCA_DFU_PACKET_LEN 4096
#define QCA_GET_TARGET_VERSION 0x09
@@ -3409,7 +3454,8 @@ struct qca_version {
} __packed;
struct qca_rampatch_version {
- __le16 rom_version;
+ __le16 rom_version_high;
+ __le16 rom_version_low;
__le16 patch_version;
} __packed;
@@ -3421,12 +3467,14 @@ struct qca_device_info {
};
static const struct qca_device_info qca_devices_table[] = {
- { 0x00000100, 20, 4, 10 }, /* Rome 1.0 */
- { 0x00000101, 20, 4, 10 }, /* Rome 1.1 */
- { 0x00000200, 28, 4, 18 }, /* Rome 2.0 */
- { 0x00000201, 28, 4, 18 }, /* Rome 2.1 */
- { 0x00000300, 28, 4, 18 }, /* Rome 3.0 */
- { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */
+ { 0x00000100, 20, 4, 8 }, /* Rome 1.0 */
+ { 0x00000101, 20, 4, 8 }, /* Rome 1.1 */
+ { 0x00000200, 28, 4, 16 }, /* Rome 2.0 */
+ { 0x00000201, 28, 4, 16 }, /* Rome 2.1 */
+ { 0x00000300, 28, 4, 16 }, /* Rome 3.0 */
+ { 0x00000302, 28, 4, 16 }, /* Rome 3.2 */
+ { 0x00130100, 40, 4, 16 }, /* WCN6855 1.0 */
+ { 0x00130200, 40, 4, 16 }, /* WCN6855 2.0 */
};
static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
@@ -3528,8 +3576,8 @@ static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
{
struct qca_rampatch_version *rver;
const struct firmware *fw;
- u32 ver_rom, ver_patch;
- u16 rver_rom, rver_patch;
+ u32 ver_rom, ver_patch, rver_rom;
+ u16 rver_rom_low, rver_rom_high, rver_patch;
char fwname[64];
int err;
@@ -3548,9 +3596,16 @@ static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
bt_dev_info(hdev, "using rampatch file: %s", fwname);
rver = (struct qca_rampatch_version *)(fw->data + info->ver_offset);
- rver_rom = le16_to_cpu(rver->rom_version);
+ rver_rom_low = le16_to_cpu(rver->rom_version_low);
rver_patch = le16_to_cpu(rver->patch_version);
+ if (ver_rom & ~0xffffU) {
+ rver_rom_high = le16_to_cpu(rver->rom_version_high);
+ rver_rom = le32_to_cpu(rver_rom_high << 16 | rver_rom_low);
+ } else {
+ rver_rom = rver_rom_low;
+ }
+
bt_dev_info(hdev, "QCA: patch rome 0x%x build 0x%x, "
"firmware rome 0x%x build 0x%x",
rver_rom, rver_patch, ver_rom, ver_patch);
@@ -3624,9 +3679,6 @@ static int btusb_setup_qca(struct hci_dev *hdev)
return err;
ver_rom = le32_to_cpu(ver.rom_version);
- /* Don't care about high ROM versions */
- if (ver_rom & ~0xffffU)
- return 0;
for (i = 0; i < ARRAY_SIZE(qca_devices_table); i++) {
if (ver_rom == qca_devices_table[i].rom_version)
@@ -4062,6 +4114,13 @@ static int btusb_probe(struct usb_interface *intf,
btusb_check_needs_reset_resume(intf);
}
+ if (id->driver_info & BTUSB_QCA_WCN6855) {
+ data->setup_on_usb = btusb_setup_qca;
+ hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
+ hdev->cmd_timeout = btusb_qca_cmd_timeout;
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ }
+
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
data->isoc = NULL;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index e41854e0d79a..981d96cc7695 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -793,8 +793,6 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (!h5)
return -ENOMEM;
- set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
-
h5->hu = &h5->serdev_hu;
h5->serdev_hu.serdev = serdev;
serdev_device_set_drvdata(serdev, h5);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index f1299da6eed8..b20a40fab83e 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -288,7 +288,7 @@ static irqreturn_t intel_irq(int irq, void *dev_id)
static int intel_set_power(struct hci_uart *hu, bool powered)
{
- struct list_head *p;
+ struct intel_device *idev;
int err = -ENODEV;
if (!hu->tty->dev)
@@ -296,10 +296,7 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
mutex_lock(&intel_device_list_lock);
- list_for_each(p, &intel_device_list) {
- struct intel_device *idev = list_entry(p, struct intel_device,
- list);
-
+ list_for_each_entry(idev, &intel_device_list, list) {
/* tty device and pdev device should share the same parent
* which is the UART port.
*/
@@ -362,19 +359,16 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
static void intel_busy_work(struct work_struct *work)
{
- struct list_head *p;
struct intel_data *intel = container_of(work, struct intel_data,
busy_work);
+ struct intel_device *idev;
if (!intel->hu->tty->dev)
return;
/* Link is busy, delay the suspend */
mutex_lock(&intel_device_list_lock);
- list_for_each(p, &intel_device_list) {
- struct intel_device *idev = list_entry(p, struct intel_device,
- list);
-
+ list_for_each_entry(idev, &intel_device_list, list) {
if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) {
pm_runtime_get(&idev->pdev->dev);
pm_runtime_mark_last_busy(&idev->pdev->dev);
@@ -533,7 +527,7 @@ static int intel_setup(struct hci_uart *hu)
struct sk_buff *skb;
struct intel_version ver;
struct intel_boot_params params;
- struct list_head *p;
+ struct intel_device *idev;
const struct firmware *fw;
char fwname[64];
u32 boot_param;
@@ -693,14 +687,11 @@ static int intel_setup(struct hci_uart *hu)
case 0x0b: /* SfP */
case 0x0c: /* WsP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params.dev_revid));
+ ver.hw_variant, le16_to_cpu(params.dev_revid));
break;
case 0x12: /* ThP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(ver.hw_revision),
- le16_to_cpu(ver.fw_revision));
+ ver.hw_variant, ver.hw_revision, ver.fw_revision);
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
@@ -722,14 +713,11 @@ static int intel_setup(struct hci_uart *hu)
case 0x0b: /* SfP */
case 0x0c: /* WsP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params.dev_revid));
+ ver.hw_variant, le16_to_cpu(params.dev_revid));
break;
case 0x12: /* ThP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(ver.hw_revision),
- le16_to_cpu(ver.fw_revision));
+ ver.hw_variant, ver.hw_revision, ver.fw_revision);
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
@@ -839,13 +827,11 @@ done:
* until further LPM TX notification.
*/
mutex_lock(&intel_device_list_lock);
- list_for_each(p, &intel_device_list) {
- struct intel_device *dev = list_entry(p, struct intel_device,
- list);
+ list_for_each_entry(idev, &intel_device_list, list) {
if (!hu->tty->dev)
break;
- if (hu->tty->dev->parent == dev->pdev->dev.parent) {
- if (device_may_wakeup(&dev->pdev->dev)) {
+ if (hu->tty->dev->parent == idev->pdev->dev.parent) {
+ if (device_may_wakeup(&idev->pdev->dev)) {
set_bit(STATE_LPM_ENABLED, &intel->flags);
set_bit(STATE_TX_ACTIVE, &intel->flags);
}
@@ -999,7 +985,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
struct intel_data *intel = hu->priv;
- struct list_head *p;
+ struct intel_device *idev;
BT_DBG("hu %p skb %p", hu, skb);
@@ -1010,10 +996,7 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
* completed before enqueuing any packet.
*/
mutex_lock(&intel_device_list_lock);
- list_for_each(p, &intel_device_list) {
- struct intel_device *idev = list_entry(p, struct intel_device,
- list);
-
+ list_for_each_entry(idev, &intel_device_list, list) {
if (hu->tty->dev->parent == idev->pdev->dev.parent) {
pm_runtime_get_sync(&idev->pdev->dev);
pm_runtime_mark_last_busy(&idev->pdev->dev);
@@ -1076,7 +1059,8 @@ static const struct hci_uart_proto intel_proto = {
#ifdef CONFIG_ACPI
static const struct acpi_device_id intel_acpi_match[] = {
{ "INT33E1", 0 },
- { },
+ { "INT33E3", 0 },
+ { }
};
MODULE_DEVICE_TABLE(acpi, intel_acpi_match);
#endif
@@ -1138,9 +1122,9 @@ static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params host_wake_gpios = { 1, 0, false };
static const struct acpi_gpio_mapping acpi_hci_intel_gpios[] = {
- { "reset-gpios", &reset_gpios, 1 },
- { "host-wake-gpios", &host_wake_gpios, 1 },
- { },
+ { "reset-gpios", &reset_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
+ { "host-wake-gpios", &host_wake_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
+ { }
};
static int intel_probe(struct platform_device *pdev)
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 85a30fb9177b..f83d67eafc9f 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -538,6 +538,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
percpu_up_write(&hu->proto_lock);
+ cancel_work_sync(&hu->init_ready);
cancel_work_sync(&hu->write_work);
if (hdev) {
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 20e1dedbc58c..244b8feba523 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -693,8 +693,6 @@ static int qca_close(struct hci_uart *hu)
destroy_workqueue(qca->workqueue);
qca->hu = NULL;
- qca_power_shutdown(hu);
-
kfree_skb(qca->rx_skb);
hu->priv = NULL;
@@ -2007,8 +2005,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("Rome serdev registration failed");
- if (qcadev->susclk)
- clk_disable_unprepare(qcadev->susclk);
+ clk_disable_unprepare(qcadev->susclk);
return err;
}
}
@@ -2032,8 +2029,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
static void qca_serdev_remove(struct serdev_device *serdev)
{
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ struct qca_power *power = qcadev->bt_power;
- if (qca_is_wcn399x(qcadev->btsoc_type))
+ if (qca_is_wcn399x(qcadev->btsoc_type) && power->vregs_on)
qca_power_shutdown(&qcadev->serdev_hu);
else if (qcadev->susclk)
clk_disable_unprepare(qcadev->susclk);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 7b233312e723..ef96ad06fa54 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -113,8 +113,22 @@ static int hci_uart_flush(struct hci_dev *hdev)
/* Initialize device */
static int hci_uart_open(struct hci_dev *hdev)
{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ int err;
+
BT_DBG("%s %p", hdev->name, hdev);
+ /* When Quirk HCI_QUIRK_NON_PERSISTENT_SETUP is set by
+ * driver, BT SoC is completely turned OFF during
+ * BT OFF. Upon next BT ON UART port should be opened.
+ */
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ err = serdev_device_open(hu->serdev);
+ if (err)
+ return err;
+ set_bit(HCI_UART_PROTO_READY, &hu->flags);
+ }
+
/* Undo clearing this from hci_uart_close() */
hdev->flush = hci_uart_flush;
@@ -124,11 +138,25 @@ static int hci_uart_open(struct hci_dev *hdev)
/* Close device */
static int hci_uart_close(struct hci_dev *hdev)
{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
BT_DBG("hdev %p", hdev);
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ return 0;
+
hci_uart_flush(hdev);
hdev->flush = NULL;
+ /* When QUIRK HCI_QUIRK_NON_PERSISTENT_SETUP is set by driver,
+ * BT SOC is completely powered OFF during BT OFF, holding port
+ * open may drain the battery.
+ */
+ if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ serdev_device_close(hu->serdev);
+ }
+
return 0;
}
@@ -354,7 +382,7 @@ void hci_uart_unregister_device(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
- clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ cancel_work_sync(&hu->init_ready);
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
hci_free_dev(hdev);
@@ -362,6 +390,10 @@ void hci_uart_unregister_device(struct hci_uart *hu)
cancel_work_sync(&hu->write_work);
hu->proto->close(hu);
- serdev_device_close(hu->serdev);
+
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ serdev_device_close(hu->serdev);
+ }
}
EXPORT_SYMBOL_GPL(hci_uart_unregister_device);
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index ec1004c858b8..7355fa2cb439 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -30,8 +30,22 @@
#define ARB_ERR_CAP_STATUS_WRITE (1 << 1)
#define ARB_ERR_CAP_STATUS_VALID (1 << 0)
+#define ARB_BP_CAP_CLEAR (1 << 0)
+#define ARB_BP_CAP_STATUS_PROT_SHIFT 14
+#define ARB_BP_CAP_STATUS_TYPE (1 << 13)
+#define ARB_BP_CAP_STATUS_RSP_SHIFT 10
+#define ARB_BP_CAP_STATUS_MASK GENMASK(1, 0)
+#define ARB_BP_CAP_STATUS_BS_SHIFT 2
+#define ARB_BP_CAP_STATUS_WRITE (1 << 1)
+#define ARB_BP_CAP_STATUS_VALID (1 << 0)
+
enum {
ARB_TIMER,
+ ARB_BP_CAP_CLR,
+ ARB_BP_CAP_HI_ADDR,
+ ARB_BP_CAP_ADDR,
+ ARB_BP_CAP_STATUS,
+ ARB_BP_CAP_MASTER,
ARB_ERR_CAP_CLR,
ARB_ERR_CAP_HI_ADDR,
ARB_ERR_CAP_ADDR,
@@ -41,6 +55,11 @@ enum {
static const int gisb_offsets_bcm7038[] = {
[ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x0b8,
+ [ARB_BP_CAP_STATUS] = 0x0c0,
+ [ARB_BP_CAP_MASTER] = -1,
[ARB_ERR_CAP_CLR] = 0x0c4,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0c8,
@@ -50,6 +69,11 @@ static const int gisb_offsets_bcm7038[] = {
static const int gisb_offsets_bcm7278[] = {
[ARB_TIMER] = 0x008,
+ [ARB_BP_CAP_CLR] = 0x01c,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x220,
+ [ARB_BP_CAP_STATUS] = 0x230,
+ [ARB_BP_CAP_MASTER] = 0x234,
[ARB_ERR_CAP_CLR] = 0x7f8,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x7e0,
@@ -59,6 +83,11 @@ static const int gisb_offsets_bcm7278[] = {
static const int gisb_offsets_bcm7400[] = {
[ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x0b8,
+ [ARB_BP_CAP_STATUS] = 0x0c0,
+ [ARB_BP_CAP_MASTER] = 0x0c4,
[ARB_ERR_CAP_CLR] = 0x0c8,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x0cc,
@@ -68,6 +97,11 @@ static const int gisb_offsets_bcm7400[] = {
static const int gisb_offsets_bcm7435[] = {
[ARB_TIMER] = 0x00c,
+ [ARB_BP_CAP_CLR] = 0x014,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x158,
+ [ARB_BP_CAP_STATUS] = 0x160,
+ [ARB_BP_CAP_MASTER] = 0x164,
[ARB_ERR_CAP_CLR] = 0x168,
[ARB_ERR_CAP_HI_ADDR] = -1,
[ARB_ERR_CAP_ADDR] = 0x16c,
@@ -77,6 +111,11 @@ static const int gisb_offsets_bcm7435[] = {
static const int gisb_offsets_bcm7445[] = {
[ARB_TIMER] = 0x008,
+ [ARB_BP_CAP_CLR] = 0x010,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x1d8,
+ [ARB_BP_CAP_STATUS] = 0x1e0,
+ [ARB_BP_CAP_MASTER] = 0x1e4,
[ARB_ERR_CAP_CLR] = 0x7e4,
[ARB_ERR_CAP_HI_ADDR] = 0x7e8,
[ARB_ERR_CAP_ADDR] = 0x7ec,
@@ -125,6 +164,16 @@ static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
return value;
}
+static u64 gisb_read_bp_address(struct brcmstb_gisb_arb_device *gdev)
+{
+ u64 value;
+
+ value = gisb_read(gdev, ARB_BP_CAP_ADDR);
+ value |= (u64)gisb_read(gdev, ARB_BP_CAP_HI_ADDR) << 32;
+
+ return value;
+}
+
static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
{
int offset = gdev->gisb_offsets[reg];
@@ -210,8 +259,8 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
m_name = m_fmt;
}
- pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n",
- __func__, reason, arb_addr,
+ pr_crit("GISB: %s at 0x%llx [%c %s], core: %s\n",
+ reason, arb_addr,
cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
m_name);
@@ -259,6 +308,41 @@ static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t brcmstb_gisb_bp_handler(int irq, void *dev_id)
+{
+ struct brcmstb_gisb_arb_device *gdev = dev_id;
+ const char *m_name;
+ u32 bp_status;
+ u64 arb_addr;
+ u32 master;
+ char m_fmt[11];
+
+ bp_status = gisb_read(gdev, ARB_BP_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(bp_status & ARB_BP_CAP_STATUS_VALID))
+ return IRQ_HANDLED;
+
+ /* Read the address and master */
+ arb_addr = gisb_read_bp_address(gdev);
+ master = gisb_read(gdev, ARB_BP_CAP_MASTER);
+
+ m_name = brcmstb_gisb_master_to_str(gdev, master);
+ if (!m_name) {
+ snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master);
+ m_name = m_fmt;
+ }
+
+ pr_crit("GISB: breakpoint at 0x%llx [%c], core: %s\n",
+ arb_addr, bp_status & ARB_BP_CAP_STATUS_WRITE ? 'W' : 'R',
+ m_name);
+
+ /* clear the GISB error */
+ gisb_write(gdev, ARB_ERR_CAP_CLEAR, ARB_ERR_CAP_CLR);
+
+ return IRQ_HANDLED;
+}
+
/*
* Dump out gisb errors on die or panic.
*/
@@ -317,13 +401,14 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
struct brcmstb_gisb_arb_device *gdev;
const struct of_device_id *of_id;
struct resource *r;
- int err, timeout_irq, tea_irq;
+ int err, timeout_irq, tea_irq, bp_irq;
unsigned int num_masters, j = 0;
int i, first, last;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
timeout_irq = platform_get_irq(pdev, 0);
tea_irq = platform_get_irq(pdev, 1);
+ bp_irq = platform_get_irq(pdev, 2);
gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
if (!gdev)
@@ -356,6 +441,15 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
if (err < 0)
return err;
+ /* Interrupt is optional */
+ if (bp_irq > 0) {
+ err = devm_request_irq(&pdev->dev, bp_irq,
+ brcmstb_gisb_bp_handler, 0, pdev->name,
+ gdev);
+ if (err < 0)
+ return err;
+ }
+
/* If we do not have a valid mask, assume all masters are enabled */
if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
&gdev->valid_mask))
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 2a473c09bc33..91dc015963a8 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -3,6 +3,7 @@
* Freescale data path resource container (DPRC) driver
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
* Author: German Rivera <German.Rivera@freescale.com>
*
*/
@@ -80,9 +81,9 @@ static int __fsl_mc_device_remove(struct device *dev, void *data)
* the MC by removing devices that represent MC objects that have
* been dynamically removed in the physical DPRC.
*/
-static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
- struct fsl_mc_obj_desc *obj_desc_array,
- int num_child_objects_in_mc)
+void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc_array,
+ int num_child_objects_in_mc)
{
if (num_child_objects_in_mc != 0) {
/*
@@ -104,6 +105,7 @@ static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
__fsl_mc_device_remove);
}
}
+EXPORT_SYMBOL_GPL(dprc_remove_devices);
static int __fsl_mc_device_match(struct device *dev, void *data)
{
@@ -220,8 +222,8 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
* dprc_scan_objects - Discover objects in a DPRC
*
* @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
- * @total_irq_count: If argument is provided the function populates the
- * total number of IRQs created by objects in the DPRC.
+ * @alloc_interrupts: if true the function allocates the interrupt pool,
+ * otherwise the interrupt allocation is delayed
*
* Detects objects added and removed from a DPRC and synchronizes the
* state of the Linux bus driver, MC by adding and removing
@@ -236,7 +238,7 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
* of the device drivers for the non-allocatable devices.
*/
static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
- unsigned int *total_irq_count)
+ bool alloc_interrupts)
{
int num_child_objects;
int dprc_get_obj_failures;
@@ -317,22 +319,21 @@ static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
* Allocate IRQ's before binding the scanned devices with their
* respective drivers.
*/
- if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
+ if (dev_get_msi_domain(&mc_bus_dev->dev)) {
if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
dev_warn(&mc_bus_dev->dev,
"IRQs needed (%u) exceed IRQs preallocated (%u)\n",
irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
}
- error = fsl_mc_populate_irq_pool(mc_bus,
- FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
- if (error < 0)
- return error;
+ if (alloc_interrupts && !mc_bus->irq_resources) {
+ error = fsl_mc_populate_irq_pool(mc_bus_dev,
+ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ if (error < 0)
+ return error;
+ }
}
- if (total_irq_count)
- *total_irq_count = irq_count;
-
dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
num_child_objects);
@@ -354,9 +355,10 @@ static int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
* bus driver with the actual state of the MC by adding and removing
* devices as appropriate.
*/
-static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
+int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
+ bool alloc_interrupts)
{
- int error;
+ int error = 0;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
fsl_mc_init_all_resource_pools(mc_bus_dev);
@@ -365,16 +367,12 @@ static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
* Discover objects in the DPRC:
*/
mutex_lock(&mc_bus->scan_mutex);
- error = dprc_scan_objects(mc_bus_dev, NULL);
+ error = dprc_scan_objects(mc_bus_dev, alloc_interrupts);
mutex_unlock(&mc_bus->scan_mutex);
- if (error < 0) {
- fsl_mc_cleanup_all_resource_pools(mc_bus_dev);
- return error;
- }
- return 0;
+ return error;
}
-
+EXPORT_SYMBOL_GPL(dprc_scan_container);
/**
* dprc_irq0_handler - Regular ISR for DPRC interrupt 0
*
@@ -434,9 +432,8 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
DPRC_IRQ_EVENT_OBJ_DESTROYED |
DPRC_IRQ_EVENT_OBJ_CREATED)) {
- unsigned int irq_count;
- error = dprc_scan_objects(mc_dev, &irq_count);
+ error = dprc_scan_objects(mc_dev, true);
if (error < 0) {
/*
* If the error is -ENXIO, we ignore it, as it indicates
@@ -451,12 +448,6 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
goto out;
}
-
- if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
- dev_warn(dev,
- "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
- irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
- }
}
out:
@@ -597,25 +588,24 @@ error_free_irqs:
}
/**
- * dprc_probe - callback invoked when a DPRC is being bound to this driver
+ * dprc_setup - opens and creates a mc_io for DPRC
*
* @mc_dev: Pointer to fsl-mc device representing a DPRC
*
* It opens the physical DPRC in the MC.
- * It scans the DPRC to discover the MC objects contained in it.
- * It creates the interrupt pool for the MC bus associated with the DPRC.
- * It configures the interrupts for the DPRC device itself.
+ * It configures the DPRC portal used to communicate with MC
*/
-static int dprc_probe(struct fsl_mc_device *mc_dev)
+
+int dprc_setup(struct fsl_mc_device *mc_dev)
{
- int error;
- size_t region_size;
struct device *parent_dev = mc_dev->dev.parent;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ struct irq_domain *mc_msi_domain;
bool mc_io_created = false;
bool msi_domain_set = false;
u16 major_ver, minor_ver;
- struct irq_domain *mc_msi_domain;
+ size_t region_size;
+ int error;
if (!is_fsl_mc_bus_dprc(mc_dev))
return -EINVAL;
@@ -690,37 +680,63 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
goto error_cleanup_open;
}
- mutex_init(&mc_bus->scan_mutex);
+ return 0;
+
+error_cleanup_open:
+ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+
+error_cleanup_msi_domain:
+ if (msi_domain_set)
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+
+ if (mc_io_created) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dprc_setup);
+
+/**
+ * dprc_probe - callback invoked when a DPRC is being bound to this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing a DPRC
+ *
+ * It opens the physical DPRC in the MC.
+ * It scans the DPRC to discover the MC objects contained in it.
+ * It creates the interrupt pool for the MC bus associated with the DPRC.
+ * It configures the interrupts for the DPRC device itself.
+ */
+static int dprc_probe(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ error = dprc_setup(mc_dev);
+ if (error < 0)
+ return error;
/*
* Discover MC objects in DPRC object:
*/
- error = dprc_scan_container(mc_dev);
+ error = dprc_scan_container(mc_dev, true);
if (error < 0)
- goto error_cleanup_open;
+ goto dprc_cleanup;
/*
* Configure interrupt for the DPRC object associated with this MC bus:
*/
error = dprc_setup_irq(mc_dev);
if (error < 0)
- goto error_cleanup_open;
+ goto scan_cleanup;
dev_info(&mc_dev->dev, "DPRC device bound to driver");
return 0;
-error_cleanup_open:
- (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-
-error_cleanup_msi_domain:
- if (msi_domain_set)
- dev_set_msi_domain(&mc_dev->dev, NULL);
-
- if (mc_io_created) {
- fsl_destroy_mc_io(mc_dev->mc_io);
- mc_dev->mc_io = NULL;
- }
-
+scan_cleanup:
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+dprc_cleanup:
+ dprc_cleanup(mc_dev);
return error;
}
@@ -739,40 +755,39 @@ static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
}
/**
- * dprc_remove - callback invoked when a DPRC is being unbound from this driver
+ * dprc_cleanup - function that cleanups a DPRC
*
* @mc_dev: Pointer to fsl-mc device representing the DPRC
*
- * It removes the DPRC's child objects from Linux (not from the MC) and
- * closes the DPRC device in the MC.
- * It tears down the interrupts that were configured for the DPRC device.
+ * It closes the DPRC device in the MC.
* It destroys the interrupt pool associated with this MC bus.
*/
-static int dprc_remove(struct fsl_mc_device *mc_dev)
+
+int dprc_cleanup(struct fsl_mc_device *mc_dev)
{
int error;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ /* this function should be called only for DPRCs, it
+ * is an error to call it for regular objects
+ */
if (!is_fsl_mc_bus_dprc(mc_dev))
return -EINVAL;
- if (!mc_dev->mc_io)
- return -EINVAL;
-
- if (!mc_bus->irq_resources)
- return -EINVAL;
-
- if (dev_get_msi_domain(&mc_dev->dev))
- dprc_teardown_irq(mc_dev);
-
- device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
if (dev_get_msi_domain(&mc_dev->dev)) {
- fsl_mc_cleanup_irq_pool(mc_bus);
+ fsl_mc_cleanup_irq_pool(mc_dev);
dev_set_msi_domain(&mc_dev->dev, NULL);
}
fsl_mc_cleanup_all_resource_pools(mc_dev);
+ /* if this step fails we cannot go further with cleanup as there is no way of
+ * communicating with the firmware
+ */
+ if (!mc_dev->mc_io) {
+ dev_err(&mc_dev->dev, "mc_io is NULL, tear down cannot be performed in firmware\n");
+ return -EINVAL;
+ }
+
error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
if (error < 0)
dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
@@ -782,6 +797,37 @@ static int dprc_remove(struct fsl_mc_device *mc_dev)
mc_dev->mc_io = NULL;
}
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dprc_cleanup);
+
+/**
+ * dprc_remove - callback invoked when a DPRC is being unbound from this driver
+ *
+ * @mc_dev: Pointer to fsl-mc device representing the DPRC
+ *
+ * It removes the DPRC's child objects from Linux (not from the MC) and
+ * closes the DPRC device in the MC.
+ * It tears down the interrupts that were configured for the DPRC device.
+ * It destroys the interrupt pool associated with this MC bus.
+ */
+static int dprc_remove(struct fsl_mc_device *mc_dev)
+{
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -EINVAL;
+
+ if (!mc_bus->irq_resources)
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev))
+ dprc_teardown_irq(mc_dev);
+
+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+
+ dprc_cleanup(mc_dev);
+
dev_info(&mc_dev->dev, "DPRC device unbound from driver");
return 0;
}
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index 602f030d84eb..57b097caf255 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
*
*/
#include <linux/kernel.h>
@@ -8,6 +9,13 @@
#include "fsl-mc-private.h"
+/*
+ * cache the DPRC version to reduce the number of commands
+ * towards the mc firmware
+ */
+static u16 dprc_major_ver;
+static u16 dprc_minor_ver;
+
/**
* dprc_open() - Open DPRC object for use
* @mc_io: Pointer to MC portal's I/O object
@@ -73,6 +81,77 @@ int dprc_close(struct fsl_mc_io *mc_io,
EXPORT_SYMBOL_GPL(dprc_close);
/**
+ * dprc_reset_container - Reset child container.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @child_container_id: ID of the container to reset
+ * @options: 32 bit options:
+ * - 0 (no bits set) - all the objects inside the container are
+ * reset. The child containers are entered recursively and the
+ * objects reset. All the objects (including the child containers)
+ * are closed.
+ * - bit 0 set - all the objects inside the container are reset.
+ * However the child containers are not entered recursively.
+ * This option is supported for API versions >= 6.5
+ * In case a software context crashes or becomes non-responsive, the parent
+ * may wish to reset its resources container before the software context is
+ * restarted.
+ *
+ * This routine informs all objects assigned to the child container that the
+ * container is being reset, so they may perform any cleanup operations that are
+ * needed. All objects handles that were owned by the child container shall be
+ * closed.
+ *
+ * Note that such request may be submitted even if the child software context
+ * has not crashed, but the resulting object cleanup operations will not be
+ * aware of that.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_reset_container(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int child_container_id,
+ u32 options)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dprc_cmd_reset_container *cmd_params;
+ u32 cmdid = DPRC_CMDID_RESET_CONT;
+ int err;
+
+ /*
+ * If the DPRC object version was not yet cached, cache it now.
+ * Otherwise use the already cached value.
+ */
+ if (!dprc_major_ver && !dprc_minor_ver) {
+ err = dprc_get_api_version(mc_io, 0,
+ &dprc_major_ver,
+ &dprc_minor_ver);
+ if (err)
+ return err;
+ }
+
+ /*
+ * MC API 6.5 introduced a new field in the command used to pass
+ * some flags.
+ * Bit 0 indicates that the child containers are not recursively reset.
+ */
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 5))
+ cmdid = DPRC_CMDID_RESET_CONT_V2;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(cmdid, cmd_flags, token);
+ cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+ cmd_params->options = cpu_to_le32(options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dprc_reset_container);
+
+/**
* dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
@@ -281,7 +360,7 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
attr->container_id = le32_to_cpu(rsp_params->container_id);
- attr->icid = le16_to_cpu(rsp_params->icid);
+ attr->icid = le32_to_cpu(rsp_params->icid);
attr->options = le32_to_cpu(rsp_params->options);
attr->portal_id = le32_to_cpu(rsp_params->portal_id);
@@ -443,30 +522,44 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
struct fsl_mc_command cmd = { 0 };
struct dprc_cmd_get_obj_region *cmd_params;
struct dprc_rsp_get_obj_region *rsp_params;
- u16 major_ver, minor_ver;
int err;
- /* prepare command */
- err = dprc_get_api_version(mc_io, 0,
- &major_ver,
- &minor_ver);
- if (err)
- return err;
-
- /**
- * MC API version 6.3 introduced a new field to the region
- * descriptor: base_address. If the older API is in use then the base
- * address is set to zero to indicate it needs to be obtained elsewhere
- * (typically the device tree).
- */
- if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
- cmd.header =
- mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
- cmd_flags, token);
- else
- cmd.header =
- mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
- cmd_flags, token);
+ /*
+ * If the DPRC object version was not yet cached, cache it now.
+ * Otherwise use the already cached value.
+ */
+ if (!dprc_major_ver && !dprc_minor_ver) {
+ err = dprc_get_api_version(mc_io, 0,
+ &dprc_major_ver,
+ &dprc_minor_ver);
+ if (err)
+ return err;
+ }
+
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 6)) {
+ /*
+ * MC API version 6.6 changed the size of the MC portals and software
+ * portals to 64K (as implemented by hardware). If older API is in use the
+ * size reported is less (64 bytes for mc portals and 4K for software
+ * portals).
+ */
+
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V3,
+ cmd_flags, token);
+
+ } else if (dprc_major_ver == 6 && dprc_minor_ver >= 3) {
+ /*
+ * MC API version 6.3 introduced a new field to the region
+ * descriptor: base_address. If the older API is in use then the base
+ * address is set to zero to indicate it needs to be obtained elsewhere
+ * (typically the device tree).
+ */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
+ cmd_flags, token);
+ } else {
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
+ cmd_flags, token);
+ }
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
@@ -483,7 +576,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
region_desc->size = le32_to_cpu(rsp_params->size);
- if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
+ if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 3))
region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
else
region_desc->base_address = 0;
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index cc7bb900f524..e71a6f52ea0c 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_object_free);
* Initialize the interrupt pool associated with an fsl-mc bus.
* It allocates a block of IRQs from the GIC-ITS.
*/
-int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
unsigned int irq_count)
{
unsigned int i;
@@ -352,10 +352,14 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
struct fsl_mc_device_irq *irq_resources;
struct fsl_mc_device_irq *mc_dev_irq;
int error;
- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
struct fsl_mc_resource_pool *res_pool =
&mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+ /* do nothing if the IRQ pool is already populated */
+ if (mc_bus->irq_resources)
+ return 0;
+
if (irq_count == 0 ||
irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)
return -EINVAL;
@@ -407,9 +411,9 @@ EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
* Teardown the interrupt pool associated with an fsl-mc bus.
* It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
*/
-void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_device *mc_bus_dev)
{
- struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
struct fsl_mc_resource_pool *res_pool =
&mc_bus->resource_pools[FSL_MC_POOL_IRQ];
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index b69794e7364d..76a6ee505d33 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -3,6 +3,7 @@
* Freescale Management Complex (MC) bus driver
*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2019-2020 NXP
* Author: German Rivera <German.Rivera@freescale.com>
*
*/
@@ -78,6 +79,12 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
+ /* When driver_override is set, only bind to the matching driver */
+ if (mc_dev->driver_override) {
+ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name);
+ goto out;
+ }
+
if (!mc_drv->match_id_table)
goto out;
@@ -147,8 +154,52 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ char *driver_override, *old = mc_dev->driver_override;
+ char *cp;
+
+ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
+ return -EINVAL;
+
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ if (strlen(driver_override)) {
+ mc_dev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ mc_dev->driver_override = NULL;
+ }
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+}
+static DEVICE_ATTR_RW(driver_override);
+
static struct attribute *fsl_mc_dev_attrs[] = {
&dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
NULL,
};
@@ -452,7 +503,7 @@ common_cleanup:
}
static int get_dprc_icid(struct fsl_mc_io *mc_io,
- int container_id, u16 *icid)
+ int container_id, u32 *icid)
{
struct dprc_attributes attr;
int error;
@@ -564,11 +615,8 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
regions[i].end = regions[i].start + region_desc.size - 1;
regions[i].name = "fsl-mc object MMIO region";
- regions[i].flags = IORESOURCE_IO;
- if (region_desc.flags & DPRC_REGION_CACHEABLE)
- regions[i].flags |= IORESOURCE_CACHEABLE;
- if (region_desc.flags & DPRC_REGION_SHAREABLE)
- regions[i].flags |= IORESOURCE_MEM;
+ regions[i].flags = region_desc.flags & IORESOURCE_BITS;
+ regions[i].flags |= IORESOURCE_MEM;
}
mc_dev->regions = regions;
@@ -630,6 +678,7 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
if (!mc_bus)
return -ENOMEM;
+ mutex_init(&mc_bus->scan_mutex);
mc_dev = &mc_bus->mc_dev;
} else {
/*
@@ -748,6 +797,9 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_add);
*/
void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
{
+ kfree(mc_dev->driver_override);
+ mc_dev->driver_override = NULL;
+
/*
* The device-specific remove callback will get invoked by device_del()
*/
@@ -908,9 +960,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
u32 mc_portal_size, mc_stream_id;
struct resource *plat_res;
- if (!iommu_present(&fsl_mc_bus_type))
- return -EPROBE_DEFER;
-
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
@@ -918,11 +967,11 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mc);
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
- if (IS_ERR(mc->fsl_mc_regs))
- return PTR_ERR(mc->fsl_mc_regs);
+ if (plat_res)
+ mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
- if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) {
+ if (mc->fsl_mc_regs && IS_ENABLED(CONFIG_ACPI) &&
+ !dev_of_node(&pdev->dev)) {
mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
/*
* HW ORs the PL and BMT bit, places the result in bit 15 of
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index 7a46a12eb747..85ca5fdee581 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -80,10 +80,12 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
/* DPRC command versioning */
#define DPRC_CMD_BASE_VERSION 1
#define DPRC_CMD_2ND_VERSION 2
+#define DPRC_CMD_3RD_VERSION 3
#define DPRC_CMD_ID_OFFSET 4
#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
+#define DPRC_CMD_V3(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_3RD_VERSION)
/* DPRC command IDs */
#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
@@ -91,6 +93,8 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
+#define DPRC_CMDID_RESET_CONT DPRC_CMD(0x005)
+#define DPRC_CMDID_RESET_CONT_V2 DPRC_CMD_V2(0x005)
#define DPRC_CMDID_SET_IRQ DPRC_CMD(0x010)
#define DPRC_CMDID_SET_IRQ_ENABLE DPRC_CMD(0x012)
@@ -103,6 +107,7 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
+#define DPRC_CMDID_GET_OBJ_REG_V3 DPRC_CMD_V3(0x15E)
#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
@@ -111,6 +116,11 @@ struct dprc_cmd_open {
__le32 container_id;
};
+struct dprc_cmd_reset_container {
+ __le32 child_container_id;
+ __le32 options;
+};
+
struct dprc_cmd_set_irq {
/* cmd word 0 */
__le32 irq_val;
@@ -152,8 +162,7 @@ struct dprc_cmd_clear_irq_status {
struct dprc_rsp_get_attributes {
/* response word 0 */
__le32 container_id;
- __le16 icid;
- __le16 pad;
+ __le32 icid;
/* response word 1 */
__le32 options;
__le32 portal_id;
@@ -330,7 +339,7 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
*/
struct dprc_attributes {
int container_id;
- u16 icid;
+ u32 icid;
int portal_id;
u64 options;
};
@@ -358,12 +367,6 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
int obj_id,
u8 irq_index,
struct dprc_irq_cfg *irq_cfg);
-
-/* Region flags */
-/* Cacheable - Indicates that region should be mapped as cacheable */
-#define DPRC_REGION_CACHEABLE 0x00000001
-#define DPRC_REGION_SHAREABLE 0x00000002
-
/**
* enum dprc_region_type - Region type
* @DPRC_REGION_TYPE_MC_PORTAL: MC portal region
@@ -518,11 +521,6 @@ struct dpcon_cmd_set_notification {
__le64 user_ctx;
};
-/**
- * Maximum number of total IRQs that can be pre-allocated for an MC bus'
- * IRQ pool
- */
-#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
/**
* struct fsl_mc_resource_pool - Pool of MC resources of a given
@@ -597,11 +595,6 @@ void fsl_mc_msi_domain_free_irqs(struct device *dev);
struct irq_domain *fsl_mc_find_msi_domain(struct device *dev);
-int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
- unsigned int irq_count);
-
-void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
-
int __must_check fsl_create_mc_io(struct device *dev,
phys_addr_t mc_portal_phys_addr,
u32 mc_portal_size,
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index a30b53f1d87d..305015486b91 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -129,7 +129,12 @@ error_destroy_mc_io:
*/
void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
{
- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
+ struct fsl_mc_device *dpmcp_dev;
+
+ if (!mc_io)
+ return;
+
+ dpmcp_dev = mc_io->dpmcp_dev;
if (dpmcp_dev)
fsl_mc_io_unset_dpmcp(mc_io);
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
index a8bd9bd7db7c..e841c1097fb4 100644
--- a/drivers/bus/mhi/Kconfig
+++ b/drivers/bus/mhi/Kconfig
@@ -6,9 +6,17 @@
#
config MHI_BUS
- tristate "Modem Host Interface (MHI) bus"
- help
- Bus driver for MHI protocol. Modem Host Interface (MHI) is a
- communication protocol used by the host processors to control
- and communicate with modem devices over a high speed peripheral
- bus or shared memory.
+ tristate "Modem Host Interface (MHI) bus"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by the host processors to control
+ and communicate with modem devices over a high speed peripheral
+ bus or shared memory.
+
+config MHI_BUS_DEBUG
+ bool "Debugfs support for the MHI bus"
+ depends on MHI_BUS && DEBUG_FS
+ help
+ Enable debugfs support for use with the MHI transport. Allows
+ reading and/or modifying some values within the MHI controller
+ for debug and test purposes.
diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile
index 66e2700c9032..c3feb4130aa3 100644
--- a/drivers/bus/mhi/core/Makefile
+++ b/drivers/bus/mhi/core/Makefile
@@ -1,3 +1,4 @@
-obj-$(CONFIG_MHI_BUS) := mhi.o
+obj-$(CONFIG_MHI_BUS) += mhi.o
mhi-y := init.o main.o pm.o boot.o
+mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
index 0b38014d040e..24422f5c3d80 100644
--- a/drivers/bus/mhi/core/boot.c
+++ b/drivers/bus/mhi/core/boot.c
@@ -392,13 +392,28 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
void *buf;
dma_addr_t dma_addr;
size_t size;
- int ret;
+ int i, ret;
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev, "Device MHI is not in valid state\n");
return;
}
+ /* save hardware info from BHI */
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_SERIALNU,
+ &mhi_cntrl->serial_number);
+ if (ret)
+ dev_err(dev, "Could not capture serial number via BHI\n");
+
+ for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i),
+ &mhi_cntrl->oem_pk_hash[i]);
+ if (ret) {
+ dev_err(dev, "Could not capture OEM PK HASH via BHI\n");
+ break;
+ }
+ }
+
/* If device is in pass through, do reset to ready state transition */
if (mhi_cntrl->ee == MHI_EE_PTHRU)
goto fw_load_ee_pthru;
diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/core/debugfs.c
new file mode 100644
index 000000000000..3a48801e01f4
--- /dev/null
+++ b/drivers/bus/mhi/core/debugfs.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mhi.h>
+#include <linux/module.h>
+#include "internal.h"
+
+static int mhi_debugfs_states_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ /* states */
+ seq_printf(m, "PM state: %s Device: %s MHI state: %s EE: %s wake: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ mhi_is_active(mhi_cntrl) ? "Active" : "Inactive",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee),
+ mhi_cntrl->wake_set ? "true" : "false");
+
+ /* counters */
+ seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2,
+ mhi_cntrl->M3);
+
+ seq_printf(m, " device wake: %u pending packets: %u\n",
+ atomic_read(&mhi_cntrl->dev_wake),
+ atomic_read(&mhi_cntrl->pending_pkts));
+
+ return 0;
+}
+
+static int mhi_debugfs_events_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_event *mhi_event;
+ struct mhi_event_ctxt *er_ctxt;
+ int i;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+ mhi_event = mhi_cntrl->mhi_event;
+ for (i = 0; i < mhi_cntrl->total_ev_rings;
+ i++, er_ctxt++, mhi_event++) {
+ struct mhi_ring *ring = &mhi_event->ring;
+
+ if (mhi_event->offload_ev) {
+ seq_printf(m, "Index: %d is an offload event ring\n",
+ i);
+ continue;
+ }
+
+ seq_printf(m, "Index: %d intmod count: %lu time: %lu",
+ i, (er_ctxt->intmod & EV_CTX_INTMODC_MASK) >>
+ EV_CTX_INTMODC_SHIFT,
+ (er_ctxt->intmod & EV_CTX_INTMODT_MASK) >>
+ EV_CTX_INTMODT_SHIFT);
+
+ seq_printf(m, " base: 0x%0llx len: 0x%llx", er_ctxt->rbase,
+ er_ctxt->rlen);
+
+ seq_printf(m, " rp: 0x%llx wp: 0x%llx", er_ctxt->rp,
+ er_ctxt->wp);
+
+ seq_printf(m, " local rp: 0x%pK db: 0x%pad\n", ring->rp,
+ &mhi_event->db_cfg.db_val);
+ }
+
+ return 0;
+}
+
+static int mhi_debugfs_channels_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_chan *mhi_chan;
+ struct mhi_chan_ctxt *chan_ctxt;
+ int i;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ mhi_chan = mhi_cntrl->mhi_chan;
+ chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt;
+ for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+ struct mhi_ring *ring = &mhi_chan->tre_ring;
+
+ if (mhi_chan->offload_ch) {
+ seq_printf(m, "%s(%u) is an offload channel\n",
+ mhi_chan->name, mhi_chan->chan);
+ continue;
+ }
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ seq_printf(m,
+ "%s(%u) state: 0x%lx brstmode: 0x%lx pollcfg: 0x%lx",
+ mhi_chan->name, mhi_chan->chan, (chan_ctxt->chcfg &
+ CHAN_CTX_CHSTATE_MASK) >> CHAN_CTX_CHSTATE_SHIFT,
+ (chan_ctxt->chcfg & CHAN_CTX_BRSTMODE_MASK) >>
+ CHAN_CTX_BRSTMODE_SHIFT, (chan_ctxt->chcfg &
+ CHAN_CTX_POLLCFG_MASK) >> CHAN_CTX_POLLCFG_SHIFT);
+
+ seq_printf(m, " type: 0x%x event ring: %u", chan_ctxt->chtype,
+ chan_ctxt->erindex);
+
+ seq_printf(m, " base: 0x%llx len: 0x%llx rp: 0x%llx wp: 0x%llx",
+ chan_ctxt->rbase, chan_ctxt->rlen, chan_ctxt->rp,
+ chan_ctxt->wp);
+
+ seq_printf(m, " local rp: 0x%pK local wp: 0x%pK db: 0x%pad\n",
+ ring->rp, ring->wp,
+ &mhi_chan->db_cfg.db_val);
+ }
+
+ return 0;
+}
+
+static int mhi_device_info_show(struct device *dev, void *data)
+{
+ struct mhi_device *mhi_dev;
+
+ if (dev->bus != &mhi_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_device(dev);
+
+ seq_printf((struct seq_file *)data, "%s: type: %s dev_wake: %u",
+ mhi_dev->name, mhi_dev->dev_type ? "Controller" : "Transfer",
+ mhi_dev->dev_wake);
+
+ /* for transfer device types only */
+ if (mhi_dev->dev_type == MHI_DEVICE_XFER)
+ seq_printf((struct seq_file *)data, " channels: %u(UL)/%u(DL)",
+ mhi_dev->ul_chan_id, mhi_dev->dl_chan_id);
+
+ seq_puts((struct seq_file *)data, "\n");
+
+ return 0;
+}
+
+static int mhi_debugfs_devices_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ device_for_each_child(mhi_cntrl->cntrl_dev, m, mhi_device_info_show);
+
+ return 0;
+}
+
+static int mhi_debugfs_regdump_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ enum mhi_state state;
+ enum mhi_ee_type ee;
+ int i, ret = -EIO;
+ u32 val;
+ void __iomem *mhi_base = mhi_cntrl->regs;
+ void __iomem *bhi_base = mhi_cntrl->bhi;
+ void __iomem *bhie_base = mhi_cntrl->bhie;
+ void __iomem *wake_db = mhi_cntrl->wake_db;
+ struct {
+ const char *name;
+ int offset;
+ void __iomem *base;
+ } regs[] = {
+ { "MHI_REGLEN", MHIREGLEN, mhi_base},
+ { "MHI_VER", MHIVER, mhi_base},
+ { "MHI_CFG", MHICFG, mhi_base},
+ { "MHI_CTRL", MHICTRL, mhi_base},
+ { "MHI_STATUS", MHISTATUS, mhi_base},
+ { "MHI_WAKE_DB", 0, wake_db},
+ { "BHI_EXECENV", BHI_EXECENV, bhi_base},
+ { "BHI_STATUS", BHI_STATUS, bhi_base},
+ { "BHI_ERRCODE", BHI_ERRCODE, bhi_base},
+ { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base},
+ { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base},
+ { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base},
+ { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base},
+ { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base},
+ { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base},
+ { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base},
+ { NULL },
+ };
+
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+ return ret;
+
+ seq_printf(m, "Host PM state: %s Device state: %s EE: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_get_exec_env(mhi_cntrl);
+ seq_printf(m, "Device EE: %s state: %s\n", TO_MHI_EXEC_STR(ee),
+ TO_MHI_STATE_STR(state));
+
+ for (i = 0; regs[i].name; i++) {
+ if (!regs[i].base)
+ continue;
+ ret = mhi_read_reg(mhi_cntrl, regs[i].base, regs[i].offset,
+ &val);
+ if (ret)
+ continue;
+
+ seq_printf(m, "%s: 0x%x\n", regs[i].name, val);
+ }
+
+ return 0;
+}
+
+static int mhi_debugfs_device_wake_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ if (!mhi_is_active(mhi_cntrl)) {
+ seq_puts(m, "Device not ready\n");
+ return -ENODEV;
+ }
+
+ seq_printf(m,
+ "Wake count: %d\n%s\n", mhi_dev->dev_wake,
+ "Usage: echo get/put > device_wake to vote/unvote for M0");
+
+ return 0;
+}
+
+static ssize_t mhi_debugfs_device_wake_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct mhi_controller *mhi_cntrl = m->private;
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+ char buf[16];
+ int ret = -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "get", 3)) {
+ ret = mhi_device_get_sync(mhi_dev);
+ } else if (!strncmp(buf, "put", 3)) {
+ mhi_device_put(mhi_dev);
+ ret = 0;
+ }
+
+ return ret ? ret : count;
+}
+
+static int mhi_debugfs_timeout_ms_show(struct seq_file *m, void *d)
+{
+ struct mhi_controller *mhi_cntrl = m->private;
+
+ seq_printf(m, "%u ms\n", mhi_cntrl->timeout_ms);
+
+ return 0;
+}
+
+static ssize_t mhi_debugfs_timeout_ms_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct mhi_controller *mhi_cntrl = m->private;
+ u32 timeout_ms;
+
+ if (kstrtou32_from_user(ubuf, count, 0, &timeout_ms))
+ return -EINVAL;
+
+ mhi_cntrl->timeout_ms = timeout_ms;
+
+ return count;
+}
+
+static int mhi_debugfs_states_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_states_show, inode->i_private);
+}
+
+static int mhi_debugfs_events_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_events_show, inode->i_private);
+}
+
+static int mhi_debugfs_channels_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_channels_show, inode->i_private);
+}
+
+static int mhi_debugfs_devices_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_devices_show, inode->i_private);
+}
+
+static int mhi_debugfs_regdump_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_regdump_show, inode->i_private);
+}
+
+static int mhi_debugfs_device_wake_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_device_wake_show, inode->i_private);
+}
+
+static int mhi_debugfs_timeout_ms_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_debugfs_timeout_ms_show, inode->i_private);
+}
+
+static const struct file_operations debugfs_states_fops = {
+ .open = mhi_debugfs_states_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_events_fops = {
+ .open = mhi_debugfs_events_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_channels_fops = {
+ .open = mhi_debugfs_channels_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_devices_fops = {
+ .open = mhi_debugfs_devices_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_regdump_fops = {
+ .open = mhi_debugfs_regdump_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_device_wake_fops = {
+ .open = mhi_debugfs_device_wake_open,
+ .write = mhi_debugfs_device_wake_write,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static const struct file_operations debugfs_timeout_ms_fops = {
+ .open = mhi_debugfs_timeout_ms_open,
+ .write = mhi_debugfs_timeout_ms_write,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static struct dentry *mhi_debugfs_root;
+
+void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
+{
+ mhi_cntrl->debugfs_dentry =
+ debugfs_create_dir(dev_name(mhi_cntrl->cntrl_dev),
+ mhi_debugfs_root);
+
+ debugfs_create_file("states", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_states_fops);
+ debugfs_create_file("events", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_events_fops);
+ debugfs_create_file("channels", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_channels_fops);
+ debugfs_create_file("devices", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_devices_fops);
+ debugfs_create_file("regdump", 0444, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_regdump_fops);
+ debugfs_create_file("device_wake", 0644, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_device_wake_fops);
+ debugfs_create_file("timeout_ms", 0644, mhi_cntrl->debugfs_dentry,
+ mhi_cntrl, &debugfs_timeout_ms_fops);
+}
+
+void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
+{
+ debugfs_remove_recursive(mhi_cntrl->debugfs_dentry);
+ mhi_cntrl->debugfs_dentry = NULL;
+}
+
+void mhi_debugfs_init(void)
+{
+ mhi_debugfs_root = debugfs_create_dir(mhi_bus_type.name, NULL);
+}
+
+void mhi_debugfs_exit(void)
+{
+ debugfs_remove_recursive(mhi_debugfs_root);
+}
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index e43a190a7a36..0ffdebde8265 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -4,6 +4,7 @@
*
*/
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
@@ -75,6 +76,42 @@ const char *to_mhi_pm_state_str(enum mhi_pm_state state)
return mhi_pm_state_str[index];
}
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n",
+ mhi_cntrl->serial_number);
+}
+static DEVICE_ATTR_RO(serial_number);
+
+static ssize_t oem_pk_hash_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ int i, cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "OEMPKHASH[%d]: 0x%x\n", i,
+ mhi_cntrl->oem_pk_hash[i]);
+
+ return cnt;
+}
+static DEVICE_ATTR_RO(oem_pk_hash);
+
+static struct attribute *mhi_dev_attrs[] = {
+ &dev_attr_serial_number.attr,
+ &dev_attr_oem_pk_hash.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mhi_dev);
+
/* MHI protocol requires the transfer ring to be aligned with ring length */
static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
struct mhi_ring *ring,
@@ -125,6 +162,13 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
if (mhi_event->offload_ev)
continue;
+ if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
+ dev_err(dev, "irq %d not available for event ring\n",
+ mhi_event->irq);
+ ret = -EINVAL;
+ goto error_request;
+ }
+
ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
mhi_irq_handler,
IRQF_SHARED | IRQF_NO_SUSPEND,
@@ -562,10 +606,10 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
}
static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
- struct mhi_controller_config *config)
+ const struct mhi_controller_config *config)
{
struct mhi_event *mhi_event;
- struct mhi_event_config *event_cfg;
+ const struct mhi_event_config *event_cfg;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int i, num;
@@ -636,9 +680,6 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
mhi_event++;
}
- /* We need IRQ for each event ring + additional one for BHI */
- mhi_cntrl->nr_irqs_req = mhi_cntrl->total_ev_rings + 1;
-
return 0;
error_ev_cfg:
@@ -648,9 +689,9 @@ error_ev_cfg:
}
static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
- struct mhi_controller_config *config)
+ const struct mhi_controller_config *config)
{
- struct mhi_channel_config *ch_cfg;
+ const struct mhi_channel_config *ch_cfg;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int i;
u32 chan;
@@ -766,7 +807,7 @@ error_chan_cfg:
}
static int parse_config(struct mhi_controller *mhi_cntrl,
- struct mhi_controller_config *config)
+ const struct mhi_controller_config *config)
{
int ret;
@@ -803,7 +844,7 @@ error_ev_cfg:
}
int mhi_register_controller(struct mhi_controller *mhi_cntrl,
- struct mhi_controller_config *config)
+ const struct mhi_controller_config *config)
{
struct mhi_event *mhi_event;
struct mhi_chan *mhi_chan;
@@ -904,6 +945,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
mhi_dev->mhi_cntrl = mhi_cntrl;
dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
+ mhi_dev->name = dev_name(mhi_cntrl->cntrl_dev);
/* Init wakeup source */
device_init_wakeup(&mhi_dev->dev, true);
@@ -914,6 +956,8 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mhi_cntrl->mhi_dev = mhi_dev;
+ mhi_create_debugfs(mhi_cntrl);
+
return 0;
error_add_dev:
@@ -936,6 +980,8 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
unsigned int i;
+ mhi_destroy_debugfs(mhi_cntrl);
+
kfree(mhi_cntrl->mhi_cmd);
kfree(mhi_cntrl->mhi_event);
@@ -953,6 +999,22 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_unregister_controller);
+struct mhi_controller *mhi_alloc_controller(void)
+{
+ struct mhi_controller *mhi_cntrl;
+
+ mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
+
+ return mhi_cntrl;
+}
+EXPORT_SYMBOL_GPL(mhi_alloc_controller);
+
+void mhi_free_controller(struct mhi_controller *mhi_cntrl)
+{
+ kfree(mhi_cntrl);
+}
+EXPORT_SYMBOL_GPL(mhi_free_controller);
+
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
{
struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -1249,7 +1311,7 @@ static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
struct mhi_device *mhi_dev = to_mhi_device(dev);
return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
- mhi_dev->chan_name);
+ mhi_dev->name);
}
static int mhi_match(struct device *dev, struct device_driver *drv)
@@ -1266,7 +1328,7 @@ static int mhi_match(struct device *dev, struct device_driver *drv)
return 0;
for (id = mhi_drv->id_table; id->chan[0]; id++)
- if (!strcmp(mhi_dev->chan_name, id->chan)) {
+ if (!strcmp(mhi_dev->name, id->chan)) {
mhi_dev->id = id;
return 1;
}
@@ -1279,15 +1341,18 @@ struct bus_type mhi_bus_type = {
.dev_name = "mhi",
.match = mhi_match,
.uevent = mhi_uevent,
+ .dev_groups = mhi_dev_groups,
};
static int __init mhi_init(void)
{
+ mhi_debugfs_init();
return bus_register(&mhi_bus_type);
}
static void __exit mhi_exit(void)
{
+ mhi_debugfs_exit();
bus_unregister(&mhi_bus_type);
}
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index b1f640b75a94..7989269ddd96 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -570,6 +570,30 @@ struct mhi_chan {
/* Default MHI timeout */
#define MHI_TIMEOUT_MS (1000)
+/* debugfs related functions */
+#ifdef CONFIG_MHI_BUS_DEBUG
+void mhi_create_debugfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl);
+void mhi_debugfs_init(void);
+void mhi_debugfs_exit(void);
+#else
+static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline void mhi_debugfs_init(void)
+{
+}
+
+static inline void mhi_debugfs_exit(void)
+{
+}
+#endif
+
struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
int mhi_destroy_device(struct device *dev, void *data);
@@ -592,13 +616,24 @@ void mhi_pm_st_worker(struct work_struct *work);
void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
void mhi_fw_load_worker(struct work_struct *work);
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
-void mhi_ctrl_ev_task(unsigned long data);
int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
enum mhi_cmd_type cmd);
+static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
+{
+ return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
+ mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
+}
+
+static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
+{
+ pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
+ mhi_cntrl->runtime_get(mhi_cntrl);
+ mhi_cntrl->runtime_put(mhi_cntrl);
+}
/* Register access methods */
void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 1f622ce6be8b..2cff5ddff225 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -249,7 +249,7 @@ int mhi_destroy_device(struct device *dev, void *data)
put_device(&mhi_dev->dl_chan->mhi_dev->dev);
dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
- mhi_dev->chan_name);
+ mhi_dev->name);
/* Notify the client and remove the device from MHI bus */
device_del(dev);
@@ -327,10 +327,10 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl)
}
/* Channel name is same for both UL and DL */
- mhi_dev->chan_name = mhi_chan->name;
+ mhi_dev->name = mhi_chan->name;
dev_set_name(&mhi_dev->dev, "%s_%s",
dev_name(mhi_cntrl->cntrl_dev),
- mhi_dev->chan_name);
+ mhi_dev->name);
/* Init wakeup source if available */
if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
@@ -909,8 +909,7 @@ void mhi_ctrl_ev_task(unsigned long data)
* process it since we are probably in a suspended state,
* so trigger a resume.
*/
- mhi_cntrl->runtime_get(mhi_cntrl);
- mhi_cntrl->runtime_put(mhi_cntrl);
+ mhi_trigger_resume(mhi_cntrl);
return;
}
@@ -971,10 +970,8 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
}
/* we're in M3 or transitioning to M3 */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
- mhi_cntrl->runtime_get(mhi_cntrl);
- mhi_cntrl->runtime_put(mhi_cntrl);
- }
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
@@ -1032,10 +1029,8 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
}
/* we're in M3 or transitioning to M3 */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
- mhi_cntrl->runtime_get(mhi_cntrl);
- mhi_cntrl->runtime_put(mhi_cntrl);
- }
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
@@ -1147,10 +1142,8 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
/* we're in M3 or transitioning to M3 */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
- mhi_cntrl->runtime_get(mhi_cntrl);
- mhi_cntrl->runtime_put(mhi_cntrl);
- }
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index 796098078083..3de7b1639ec6 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -256,6 +256,7 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
dev_err(dev, "Unable to transition to M0 state\n");
return -EIO;
}
+ mhi_cntrl->M0++;
/* Wake up the device */
read_lock_bh(&mhi_cntrl->pm_lock);
@@ -326,6 +327,8 @@ void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
mhi_cntrl->dev_state = MHI_STATE_M2;
write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ mhi_cntrl->M2++;
wake_up_all(&mhi_cntrl->state_event);
/* If there are any pending resources, exit M2 immediately */
@@ -362,6 +365,7 @@ int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
return -EIO;
}
+ mhi_cntrl->M3++;
wake_up_all(&mhi_cntrl->state_event);
return 0;
@@ -686,7 +690,8 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
return -EIO;
/* Return busy if there are any pending resources */
- if (atomic_read(&mhi_cntrl->dev_wake))
+ if (atomic_read(&mhi_cntrl->dev_wake) ||
+ atomic_read(&mhi_cntrl->pending_pkts))
return -EBUSY;
/* Take MHI out of M2 state */
@@ -712,7 +717,8 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
write_lock_irq(&mhi_cntrl->pm_lock);
- if (atomic_read(&mhi_cntrl->dev_wake)) {
+ if (atomic_read(&mhi_cntrl->dev_wake) ||
+ atomic_read(&mhi_cntrl->pending_pkts)) {
write_unlock_irq(&mhi_cntrl->pm_lock);
return -EBUSY;
}
@@ -822,11 +828,8 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
/* Wake up the device */
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_get(mhi_cntrl, true);
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
- pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
- mhi_cntrl->runtime_get(mhi_cntrl);
- mhi_cntrl->runtime_put(mhi_cntrl);
- }
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
read_unlock_bh(&mhi_cntrl->pm_lock);
ret = wait_event_timeout(mhi_cntrl->state_event,
@@ -915,7 +918,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
dev_info(dev, "Requested to power ON\n");
- if (mhi_cntrl->nr_irqs < mhi_cntrl->total_ev_rings)
+ if (mhi_cntrl->nr_irqs < 1)
return -EINVAL;
/* Supply default wake routines if not provided by controller driver */
@@ -1113,6 +1116,9 @@ void mhi_device_get(struct mhi_device *mhi_dev)
mhi_dev->dev_wake++;
read_lock_bh(&mhi_cntrl->pm_lock);
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
+
mhi_cntrl->wake_get(mhi_cntrl, true);
read_unlock_bh(&mhi_cntrl->pm_lock);
}
@@ -1137,10 +1143,8 @@ void mhi_device_put(struct mhi_device *mhi_dev)
mhi_dev->dev_wake--;
read_lock_bh(&mhi_cntrl->pm_lock);
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
- mhi_cntrl->runtime_get(mhi_cntrl);
- mhi_cntrl->runtime_put(mhi_cntrl);
- }
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
+ mhi_trigger_resume(mhi_cntrl);
mhi_cntrl->wake_put(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 5b2a11a88951..2519ceede64b 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -610,23 +610,23 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)
static void __init
mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
{
- struct memblock_region *r;
- uint64_t s = 0;
+ phys_addr_t reg_start, reg_end;
+ uint64_t i, s = 0;
- for_each_memblock(memory, r) {
+ for_each_mem_range(i, &reg_start, &reg_end) {
/*
* This part of the memory is above 4 GB, so we don't
* care for the MBus bridge hole.
*/
- if (r->base >= 0x100000000ULL)
+ if (reg_start >= 0x100000000ULL)
continue;
/*
* The MBus bridge hole is at the end of the RAM under
* the 4 GB limit.
*/
- if (r->base + r->size > s)
- s = r->base + r->size;
+ if (reg_end > s)
+ s = reg_end;
}
*start = s;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 09b0cd292720..9874fc1c815b 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -479,7 +479,7 @@ static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
{
int ret;
- check_disk_change(bdev);
+ bdev_check_media_change(bdev);
mutex_lock(&gdrom_mutex);
ret = cdrom_open(gd.cd_info, bdev, mode);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index b1bd336761b1..d229a2d0c017 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -93,8 +93,9 @@ config PPDEV
config VIRTIO_CONSOLE
tristate "Virtio console"
- depends on VIRTIO && TTY
+ depends on TTY
select HVC_DRIVER
+ select VIRTIO
help
Virtio console for use with hypervisors.
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 6914e4f0ce98..2b2095542816 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -425,7 +425,7 @@ static int agp_amdk7_probe(struct pci_dev *pdev,
return -ENOMEM;
bridge->driver = &amd_irongate_driver;
- bridge->dev_private_data = &amd_irongate_private,
+ bridge->dev_private_data = &amd_irongate_private;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 623205bcd04a..f78e756157db 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -382,7 +382,7 @@ static int agp_nvidia_probe(struct pci_dev *pdev,
return -ENOMEM;
bridge->driver = &nvidia_driver;
- bridge->dev_private_data = &nvidia_private,
+ bridge->dev_private_data = &nvidia_private;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 7729414100ff..f875970bda65 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -513,7 +513,7 @@ static int agp_serverworks_probe(struct pci_dev *pdev,
return -ENOMEM;
bridge->driver = &sworks_driver;
- bridge->dev_private_data = &serverworks_private,
+ bridge->dev_private_data = &serverworks_private;
bridge->dev = pci_dev_get(pdev);
pci_set_drvdata(pdev, bridge);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index f976a49e1fb5..e92c4d9469d8 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -282,6 +282,20 @@ config HW_RANDOM_INGENIC_RNG
If unsure, say Y.
+config HW_RANDOM_INGENIC_TRNG
+ tristate "Ingenic True Random Number Generator support"
+ depends on HW_RANDOM
+ depends on MACH_X1830
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the True Random Number Generator
+ hardware found in ingenic X1830 SoC. YSH & ATIL CU1830-Neo uses X1830 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ingenic-trng.
+
+ If unsure, say Y.
+
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK
@@ -512,6 +526,16 @@ config HW_RANDOM_CCTRNG
will be called cctrng.
If unsure, say 'N'.
+config HW_RANDOM_XIPHERA
+ tristate "Xiphera FPGA based True Random Number Generator support"
+ depends on HAS_IOMEM
+ help
+ This driver provides kernel-side support for Xiphera True Random
+ Number Generator Intellectual Property Core.
+
+ To compile this driver as a module, choose M here: the
+ module will be called xiphera-trng.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 26ae06844f09..5da344509a4d 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
obj-$(CONFIG_HW_RANDOM_INGENIC_RNG) += ingenic-rng.o
+obj-$(CONFIG_HW_RANDOM_INGENIC_TRNG) += ingenic-trng.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
@@ -44,3 +45,4 @@ obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
+obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 619148fb2dc9..7a293f2147a0 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -463,11 +463,10 @@ static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
int rc = 0;
clk = devm_clk_get_optional(dev, NULL);
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- dev_err(dev, "Error getting clock: %pe\n", clk);
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Error getting clock\n");
+
drvdata->clk = clk;
rc = clk_prepare_enable(drvdata->clk);
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 9c47e431ce90..61c844baf26e 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -285,6 +285,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
rngc->rng.init = imx_rngc_init;
rngc->rng.read = imx_rngc_read;
rngc->rng.cleanup = imx_rngc_cleanup;
+ rngc->rng.quality = 19;
rngc->dev = &pdev->dev;
platform_set_drvdata(pdev, rngc);
diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c
new file mode 100644
index 000000000000..954a8411d67d
--- /dev/null
+++ b/drivers/char/hw_random/ingenic-trng.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic True Random Number Generator driver
+ * Copyright (c) 2019 漆鹏振 (Qi Pengzhen) <aric.pzqi@ingenic.com>
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* DTRNG register offsets */
+#define TRNG_REG_CFG_OFFSET 0x00
+#define TRNG_REG_RANDOMNUM_OFFSET 0x04
+#define TRNG_REG_STATUS_OFFSET 0x08
+
+/* bits within the CFG register */
+#define CFG_RDY_CLR BIT(12)
+#define CFG_INT_MASK BIT(11)
+#define CFG_GEN_EN BIT(0)
+
+/* bits within the STATUS register */
+#define STATUS_RANDOM_RDY BIT(0)
+
+struct ingenic_trng {
+ void __iomem *base;
+ struct clk *clk;
+ struct hwrng rng;
+};
+
+static int ingenic_trng_init(struct hwrng *rng)
+{
+ struct ingenic_trng *trng = container_of(rng, struct ingenic_trng, rng);
+ unsigned int ctrl;
+
+ ctrl = readl(trng->base + TRNG_REG_CFG_OFFSET);
+ ctrl |= CFG_GEN_EN;
+ writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET);
+
+ return 0;
+}
+
+static void ingenic_trng_cleanup(struct hwrng *rng)
+{
+ struct ingenic_trng *trng = container_of(rng, struct ingenic_trng, rng);
+ unsigned int ctrl;
+
+ ctrl = readl(trng->base + TRNG_REG_CFG_OFFSET);
+ ctrl &= ~CFG_GEN_EN;
+ writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET);
+}
+
+static int ingenic_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct ingenic_trng *trng = container_of(rng, struct ingenic_trng, rng);
+ u32 *data = buf;
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(trng->base + TRNG_REG_STATUS_OFFSET, status,
+ status & STATUS_RANDOM_RDY, 10, 1000);
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: Wait for DTRNG data ready timeout\n", __func__);
+ return ret;
+ }
+
+ *data = readl(trng->base + TRNG_REG_RANDOMNUM_OFFSET);
+
+ return 4;
+}
+
+static int ingenic_trng_probe(struct platform_device *pdev)
+{
+ struct ingenic_trng *trng;
+ int ret;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base)) {
+ pr_err("%s: Failed to map DTRNG registers\n", __func__);
+ ret = PTR_ERR(trng->base);
+ return PTR_ERR(trng->base);
+ }
+
+ trng->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(trng->clk)) {
+ ret = PTR_ERR(trng->clk);
+ pr_crit("%s: Cannot get DTRNG clock\n", __func__);
+ return PTR_ERR(trng->clk);
+ }
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret) {
+ pr_crit("%s: Unable to enable DTRNG clock\n", __func__);
+ return ret;
+ }
+
+ trng->rng.name = pdev->name;
+ trng->rng.init = ingenic_trng_init;
+ trng->rng.cleanup = ingenic_trng_cleanup;
+ trng->rng.read = ingenic_trng_read;
+
+ ret = hwrng_register(&trng->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register hwrng\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, trng);
+
+ dev_info(&pdev->dev, "Ingenic DTRNG driver registered\n");
+ return 0;
+}
+
+static int ingenic_trng_remove(struct platform_device *pdev)
+{
+ struct ingenic_trng *trng = platform_get_drvdata(pdev);
+ unsigned int ctrl;
+
+ hwrng_unregister(&trng->rng);
+
+ ctrl = readl(trng->base + TRNG_REG_CFG_OFFSET);
+ ctrl &= ~CFG_GEN_EN;
+ writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET);
+
+ clk_disable_unprepare(trng->clk);
+
+ return 0;
+}
+
+static const struct of_device_id ingenic_trng_of_match[] = {
+ { .compatible = "ingenic,x1830-dtrng" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ingenic_trng_of_match);
+
+static struct platform_driver ingenic_trng_driver = {
+ .probe = ingenic_trng_probe,
+ .remove = ingenic_trng_remove,
+ .driver = {
+ .name = "ingenic-trng",
+ .of_match_table = ingenic_trng_of_match,
+ },
+};
+
+module_platform_driver(ingenic_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("漆鹏振 (Qi Pengzhen) <aric.pzqi@ingenic.com>");
+MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
+MODULE_DESCRIPTION("Ingenic True Random Number Generator driver");
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 9f205bd1acc0..eb7db27f9f19 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -330,7 +330,7 @@ static int __init mod_init(void)
int err = -ENODEV;
int i;
struct pci_dev *dev = NULL;
- void __iomem *mem = mem;
+ void __iomem *mem;
u8 hw_status;
struct intel_rng_hw *intel_rng_hw;
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 32d9fe61a225..01583faf9893 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -195,10 +195,10 @@ static int iproc_rng200_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
}
- priv->rng.name = "iproc-rng200",
- priv->rng.read = iproc_rng200_read,
- priv->rng.init = iproc_rng200_init,
- priv->rng.cleanup = iproc_rng200_cleanup,
+ priv->rng.name = "iproc-rng200";
+ priv->rng.read = iproc_rng200_read;
+ priv->rng.init = iproc_rng200_init;
+ priv->rng.cleanup = iproc_rng200_cleanup;
/* Register driver */
ret = devm_hwrng_register(dev, &priv->rng);
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 025083c838f5..008763c988ed 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -143,9 +143,9 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
mxc_rng->dev = &pdev->dev;
mxc_rng->rng.name = "mxc-rnga";
mxc_rng->rng.init = mxc_rnga_init;
- mxc_rng->rng.cleanup = mxc_rnga_cleanup,
- mxc_rng->rng.data_present = mxc_rnga_data_present,
- mxc_rng->rng.data_read = mxc_rnga_data_read,
+ mxc_rng->rng.cleanup = mxc_rnga_cleanup;
+ mxc_rng->rng.data_present = mxc_rnga_data_present;
+ mxc_rng->rng.data_read = mxc_rnga_data_read;
mxc_rng->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(mxc_rng->clk)) {
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index 5d0d13f891b7..1ec5f267a656 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -58,24 +58,24 @@ static int npcm_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
pm_runtime_get_sync((struct device *)priv->rng.priv);
- while (max >= sizeof(u32)) {
+ while (max) {
if (wait) {
- if (readl_poll_timeout(priv->base + NPCM_RNGCS_REG,
+ if (readb_poll_timeout(priv->base + NPCM_RNGCS_REG,
ready,
ready & NPCM_RNG_DATA_VALID,
NPCM_RNG_POLL_USEC,
NPCM_RNG_TIMEOUT_USEC))
break;
} else {
- if ((readl(priv->base + NPCM_RNGCS_REG) &
+ if ((readb(priv->base + NPCM_RNGCS_REG) &
NPCM_RNG_DATA_VALID) == 0)
break;
}
- *(u32 *)buf = readl(priv->base + NPCM_RNGD_REG);
- retval += sizeof(u32);
- buf += sizeof(u32);
- max -= sizeof(u32);
+ *(u8 *)buf = readb(priv->base + NPCM_RNGD_REG);
+ retval++;
+ buf++;
+ max--;
}
pm_runtime_mark_last_busy((struct device *)priv->rng.priv);
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
index 49b2e02537dd..a99d82949981 100644
--- a/drivers/char/hw_random/optee-rng.c
+++ b/drivers/char/hw_random/optee-rng.c
@@ -122,14 +122,14 @@ static int optee_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
if (max > MAX_ENTROPY_REQ_SZ)
max = MAX_ENTROPY_REQ_SZ;
- while (read == 0) {
+ while (read < max) {
rng_size = get_optee_rng_data(pvt_data, data, (max - read));
data += rng_size;
read += rng_size;
- if (wait) {
- if (timeout-- == 0)
+ if (wait && pvt_data->data_rate) {
+ if ((timeout-- == 0) || (read == max))
return read;
msleep((1000 * (max - read)) / pvt_data->data_rate);
} else {
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 38324c2ddda1..bc22178f83e8 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -145,12 +145,12 @@ static int stm32_rng_probe(struct platform_device *ofdev)
dev_set_drvdata(dev, priv);
- priv->rng.name = dev_driver_string(dev),
+ priv->rng.name = dev_driver_string(dev);
#ifndef CONFIG_PM
- priv->rng.init = stm32_rng_init,
- priv->rng.cleanup = stm32_rng_cleanup,
+ priv->rng.init = stm32_rng_init;
+ priv->rng.cleanup = stm32_rng_cleanup;
#endif
- priv->rng.read = stm32_rng_read,
+ priv->rng.read = stm32_rng_read;
priv->rng.priv = (unsigned long) dev;
priv->rng.quality = 900;
diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c
new file mode 100644
index 000000000000..7bdab8c8a6a8
--- /dev/null
+++ b/drivers/char/hw_random/xiphera-trng.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 Xiphera Ltd. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define CONTROL_REG 0x00000000
+#define STATUS_REG 0x00000004
+#define RAND_REG 0x00000000
+
+#define HOST_TO_TRNG_RESET 0x00000001
+#define HOST_TO_TRNG_RELEASE_RESET 0x00000002
+#define HOST_TO_TRNG_ENABLE 0x80000000
+#define HOST_TO_TRNG_ZEROIZE 0x80000004
+#define HOST_TO_TRNG_ACK_ZEROIZE 0x80000008
+#define HOST_TO_TRNG_READ 0x8000000F
+
+/* trng statuses */
+#define TRNG_ACK_RESET 0x000000AC
+#define TRNG_SUCCESSFUL_STARTUP 0x00000057
+#define TRNG_FAILED_STARTUP 0x000000FA
+#define TRNG_NEW_RAND_AVAILABLE 0x000000ED
+
+struct xiphera_trng {
+ void __iomem *mem;
+ struct hwrng rng;
+};
+
+static int xiphera_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct xiphera_trng *trng = container_of(rng, struct xiphera_trng, rng);
+ int ret = 0;
+
+ while (max >= sizeof(u32)) {
+ /* check for data */
+ if (readl(trng->mem + STATUS_REG) == TRNG_NEW_RAND_AVAILABLE) {
+ *(u32 *)buf = readl(trng->mem + RAND_REG);
+ /*
+ * Inform the trng of the read
+ * and re-enable it to produce a new random number
+ */
+ writel(HOST_TO_TRNG_READ, trng->mem + CONTROL_REG);
+ writel(HOST_TO_TRNG_ENABLE, trng->mem + CONTROL_REG);
+ ret += sizeof(u32);
+ buf += sizeof(u32);
+ max -= sizeof(u32);
+ } else {
+ break;
+ }
+ }
+ return ret;
+}
+
+static int xiphera_trng_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct xiphera_trng *trng;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ trng->mem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(trng->mem))
+ return PTR_ERR(trng->mem);
+
+ /*
+ * the trng needs to be reset first which might not happen in time,
+ * hence we incorporate a small delay to ensure proper behaviour
+ */
+ writel(HOST_TO_TRNG_RESET, trng->mem + CONTROL_REG);
+ usleep_range(100, 200);
+
+ if (readl(trng->mem + STATUS_REG) != TRNG_ACK_RESET) {
+ /*
+ * there is a small chance the trng is just not ready yet,
+ * so we try one more time. If the second time fails, we give up
+ */
+ usleep_range(100, 200);
+ if (readl(trng->mem + STATUS_REG) != TRNG_ACK_RESET) {
+ dev_err(dev, "failed to reset the trng ip\n");
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * once again, to ensure proper behaviour we sleep
+ * for a while after zeroizing the trng
+ */
+ writel(HOST_TO_TRNG_RELEASE_RESET, trng->mem + CONTROL_REG);
+ writel(HOST_TO_TRNG_ENABLE, trng->mem + CONTROL_REG);
+ writel(HOST_TO_TRNG_ZEROIZE, trng->mem + CONTROL_REG);
+ msleep(20);
+
+ if (readl(trng->mem + STATUS_REG) != TRNG_SUCCESSFUL_STARTUP) {
+ /* diagnose the reason for the failure */
+ if (readl(trng->mem + STATUS_REG) == TRNG_FAILED_STARTUP) {
+ dev_err(dev, "trng ip startup-tests failed\n");
+ return -ENODEV;
+ }
+ dev_err(dev, "startup-tests yielded no response\n");
+ return -ENODEV;
+ }
+
+ writel(HOST_TO_TRNG_ACK_ZEROIZE, trng->mem + CONTROL_REG);
+
+ trng->rng.name = pdev->name;
+ trng->rng.read = xiphera_trng_read;
+ trng->rng.quality = 900;
+
+ ret = devm_hwrng_register(dev, &trng->rng);
+ if (ret) {
+ dev_err(dev, "failed to register rng device: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, trng);
+
+ return 0;
+}
+
+static const struct of_device_id xiphera_trng_of_match[] = {
+ { .compatible = "xiphera,xip8001b-trng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xiphera_trng_of_match);
+
+static struct platform_driver xiphera_trng_driver = {
+ .driver = {
+ .name = "xiphera-trng",
+ .of_match_table = xiphera_trng_of_match,
+ },
+ .probe = xiphera_trng_probe,
+};
+
+module_platform_driver(xiphera_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Atte Tommiska");
+MODULE_DESCRIPTION("Xiphera FPGA-based true random number generator driver");
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index f3f216cdf686..f41f78972b9c 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -213,8 +213,10 @@ static int bt_start_transaction(struct si_sm_data *bt,
if (bt->state == BT_STATE_LONG_BUSY)
return IPMI_NODE_BUSY_ERR;
- if (bt->state != BT_STATE_IDLE)
+ if (bt->state != BT_STATE_IDLE) {
+ dev_warn(bt->io->dev, "BT in invalid state %d\n", bt->state);
return IPMI_NOT_IN_MY_STATE_ERR;
+ }
if (bt_debug & BT_DEBUG_MSG) {
dev_dbg(bt->io->dev, "+++++++++++++++++ New command\n");
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 2e7cda08b079..efda90dcf5b3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -17,6 +17,8 @@
* that document.
*/
+#define DEBUG /* So dev_dbg() is always available. */
+
#include <linux/kernel.h> /* For printk. */
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -187,8 +189,8 @@ static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
(kcs->error_retries)++;
if (kcs->error_retries > MAX_ERROR_RETRIES) {
if (kcs_debug & KCS_DEBUG_ENABLE)
- printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n",
- reason);
+ dev_dbg(kcs->io->dev, "ipmi_kcs_sm: kcs hosed: %s\n",
+ reason);
kcs->state = KCS_HOSED;
} else {
kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
@@ -268,11 +270,13 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
if (size > MAX_KCS_WRITE_SIZE)
return IPMI_REQ_LEN_EXCEEDED_ERR;
- if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED))
+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
+ dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
return IPMI_NOT_IN_MY_STATE_ERR;
+ }
if (kcs_debug & KCS_DEBUG_MSG) {
- printk(KERN_DEBUG "start_kcs_transaction -");
+ dev_dbg(kcs->io->dev, "%s -", __func__);
for (i = 0; i < size; i++)
pr_cont(" %02x", data[i]);
pr_cont("\n");
@@ -331,7 +335,8 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
status = read_status(kcs);
if (kcs_debug & KCS_DEBUG_STATES)
- printk(KERN_DEBUG "KCS: State = %d, %x\n", kcs->state, status);
+ dev_dbg(kcs->io->dev,
+ "KCS: State = %d, %x\n", kcs->state, status);
/* All states wait for ibf, so just do it here. */
if (!check_ibf(kcs, status, time))
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 737c0b6b24ea..8774a3b8ff95 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -34,12 +34,13 @@
#include <linux/uuid.h>
#include <linux/nospec.h>
#include <linux/vmalloc.h>
+#include <linux/delay.h>
#define IPMI_DRIVER_VERSION "39.2"
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
-static void smi_recv_tasklet(unsigned long);
+static void smi_recv_tasklet(struct tasklet_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
@@ -60,6 +61,7 @@ enum ipmi_panic_event_op {
#else
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
#endif
+
static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
static int panic_op_write_handler(const char *val,
@@ -89,19 +91,19 @@ static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
{
switch (ipmi_send_panic_event) {
case IPMI_SEND_PANIC_EVENT_NONE:
- strcpy(buffer, "none");
+ strcpy(buffer, "none\n");
break;
case IPMI_SEND_PANIC_EVENT:
- strcpy(buffer, "event");
+ strcpy(buffer, "event\n");
break;
case IPMI_SEND_PANIC_EVENT_STRING:
- strcpy(buffer, "string");
+ strcpy(buffer, "string\n");
break;
default:
- strcpy(buffer, "???");
+ strcpy(buffer, "???\n");
break;
}
@@ -317,6 +319,7 @@ struct bmc_device {
int dyn_guid_set;
struct kref usecount;
struct work_struct remove_work;
+ unsigned char cc; /* completion code */
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
@@ -2381,6 +2384,8 @@ static void bmc_device_id_handler(struct ipmi_smi *intf,
msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
if (rv) {
dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
+ /* record completion code when error */
+ intf->bmc->cc = msg->msg.data[0];
intf->bmc->dyn_id_set = 0;
} else {
/*
@@ -2426,23 +2431,39 @@ send_get_device_id_cmd(struct ipmi_smi *intf)
static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
{
int rv;
-
- bmc->dyn_id_set = 2;
+ unsigned int retry_count = 0;
intf->null_user_handler = bmc_device_id_handler;
+retry:
+ bmc->cc = 0;
+ bmc->dyn_id_set = 2;
+
rv = send_get_device_id_cmd(intf);
if (rv)
- return rv;
+ goto out_reset_handler;
wait_event(intf->waitq, bmc->dyn_id_set != 2);
- if (!bmc->dyn_id_set)
+ if (!bmc->dyn_id_set) {
+ if ((bmc->cc == IPMI_DEVICE_IN_FW_UPDATE_ERR
+ || bmc->cc == IPMI_DEVICE_IN_INIT_ERR
+ || bmc->cc == IPMI_NOT_IN_MY_STATE_ERR)
+ && ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
+ msleep(500);
+ dev_warn(intf->si_dev,
+ "BMC returned 0x%2.2x, retry get bmc device id\n",
+ bmc->cc);
+ goto retry;
+ }
+
rv = -EIO; /* Something went wrong in the fetch. */
+ }
/* dyn_id_set makes the id data available. */
smp_rmb();
+out_reset_handler:
intf->null_user_handler = NULL;
return rv;
@@ -3245,7 +3266,6 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
/* It's the one we want */
if (msg->msg.data[0] != 0) {
/* Got an error from the channel, just go on. */
-
if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
/*
* If the MC does not support this
@@ -3329,6 +3349,7 @@ static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
dev_warn(intf->si_dev,
"Error sending channel information for channel 0, %d\n",
rv);
+ intf->null_user_handler = NULL;
return -EIO;
}
@@ -3430,9 +3451,8 @@ int ipmi_add_smi(struct module *owner,
intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
- tasklet_init(&intf->recv_tasklet,
- smi_recv_tasklet,
- (unsigned long) intf);
+ tasklet_setup(&intf->recv_tasklet,
+ smi_recv_tasklet);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs);
@@ -4467,10 +4487,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
}
}
-static void smi_recv_tasklet(unsigned long val)
+static void smi_recv_tasklet(struct tasklet_struct *t)
{
unsigned long flags = 0; /* keep us warning-free. */
- struct ipmi_smi *intf = (struct ipmi_smi *) val;
+ struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
int run_to_completion = intf->run_to_completion;
struct ipmi_smi_msg *newmsg = NULL;
@@ -4542,7 +4562,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion)
- smi_recv_tasklet((unsigned long) intf);
+ smi_recv_tasklet(&intf->recv_tasklet);
else
tasklet_schedule(&intf->recv_tasklet);
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 77b8d551ae7f..5eac94cf4ff8 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1316,6 +1316,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
unsigned char *resp;
unsigned long resp_len;
int rv = 0;
+ unsigned int retry_count = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
@@ -1327,6 +1328,8 @@ static int try_get_dev_id(struct smi_info *smi_info)
*/
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_DEVICE_ID_CMD;
+
+retry:
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
@@ -1339,6 +1342,20 @@ static int try_get_dev_id(struct smi_info *smi_info)
/* Check and record info from the get device id, in case we need it. */
rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
resp + 2, resp_len - 2, &smi_info->device_id);
+ if (rv) {
+ /* record completion code */
+ unsigned char cc = *(resp + 2);
+
+ if ((cc == IPMI_DEVICE_IN_FW_UPDATE_ERR
+ || cc == IPMI_DEVICE_IN_INIT_ERR
+ || cc == IPMI_NOT_IN_MY_STATE_ERR)
+ && ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
+ dev_warn(smi_info->io.dev,
+ "BMC returned 0x%2.2x, retry get bmc device id\n",
+ cc);
+ goto retry;
+ }
+ }
out:
kfree(resp);
@@ -1963,7 +1980,7 @@ static int try_smi_init(struct smi_info *new_smi)
/* Do this early so it's available for logs. */
if (!new_smi->io.dev) {
pr_err("IPMI interface added with no device\n");
- rv = EIO;
+ rv = -EIO;
goto out_err;
}
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index b6225bba2532..bfea500d6f5f 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -21,6 +21,8 @@
* 2001 Hewlett-Packard Company
*/
+#define DEBUG /* So dev_dbg() is always available. */
+
#include <linux/kernel.h> /* For printk. */
#include <linux/string.h>
#include <linux/module.h>
@@ -126,11 +128,14 @@ static int start_smic_transaction(struct si_sm_data *smic,
if (size > MAX_SMIC_WRITE_SIZE)
return IPMI_REQ_LEN_EXCEEDED_ERR;
- if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED))
+ if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) {
+ dev_warn(smic->io->dev,
+ "SMIC in invalid state %d\n", smic->state);
return IPMI_NOT_IN_MY_STATE_ERR;
+ }
if (smic_debug & SMIC_DEBUG_MSG) {
- printk(KERN_DEBUG "start_smic_transaction -");
+ dev_dbg(smic->io->dev, "%s -", __func__);
for (i = 0; i < size; i++)
pr_cont(" %02x", data[i]);
pr_cont("\n");
@@ -152,7 +157,7 @@ static int smic_get_result(struct si_sm_data *smic,
int i;
if (smic_debug & SMIC_DEBUG_MSG) {
- printk(KERN_DEBUG "smic_get result -");
+ dev_dbg(smic->io->dev, "smic_get result -");
for (i = 0; i < smic->read_pos; i++)
pr_cont(" %02x", smic->read_data[i]);
pr_cont("\n");
@@ -324,9 +329,9 @@ static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
}
if (smic->state != SMIC_IDLE) {
if (smic_debug & SMIC_DEBUG_STATES)
- printk(KERN_DEBUG
- "smic_event - smic->smic_timeout = %ld, time = %ld\n",
- smic->smic_timeout, time);
+ dev_dbg(smic->io->dev,
+ "%s - smic->smic_timeout = %ld, time = %ld\n",
+ __func__, smic->smic_timeout, time);
/*
* FIXME: smic_event is sometimes called with time >
* SMIC_RETRY_TIMEOUT
@@ -345,8 +350,9 @@ static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
status = read_smic_status(smic);
if (smic_debug & SMIC_DEBUG_STATES)
- printk(KERN_DEBUG "smic_event - state = %d, flags = 0x%02x, status = 0x%02x\n",
- smic->state, flags, status);
+ dev_dbg(smic->io->dev,
+ "%s - state = %d, flags = 0x%02x, status = 0x%02x\n",
+ __func__, smic->state, flags, status);
switch (smic->state) {
case SMIC_IDLE:
@@ -436,8 +442,9 @@ static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
data = read_smic_data(smic);
if (data != 0) {
if (smic_debug & SMIC_DEBUG_ENABLE)
- printk(KERN_DEBUG "SMIC_WRITE_END: data = %02x\n",
- data);
+ dev_dbg(smic->io->dev,
+ "SMIC_WRITE_END: data = %02x\n",
+ data);
start_error_recovery(smic,
"state = SMIC_WRITE_END, "
"data != SUCCESS");
@@ -516,8 +523,9 @@ static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
/* data register holds an error code */
if (data != 0) {
if (smic_debug & SMIC_DEBUG_ENABLE)
- printk(KERN_DEBUG "SMIC_READ_END: data = %02x\n",
- data);
+ dev_dbg(smic->io->dev,
+ "SMIC_READ_END: data = %02x\n",
+ data);
start_error_recovery(smic,
"state = SMIC_READ_END, "
"data != SUCCESS");
@@ -533,7 +541,8 @@ static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
default:
if (smic_debug & SMIC_DEBUG_ENABLE) {
- printk(KERN_DEBUG "smic->state = %d\n", smic->state);
+ dev_dbg(smic->io->dev,
+ "smic->state = %d\n", smic->state);
start_error_recovery(smic, "state = UNKNOWN");
return SI_SM_CALL_WITH_DELAY;
}
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 45932f05fd67..0ec73917d8dd 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -853,8 +853,10 @@ static void lp_console_write(struct console *co, const char *s,
count--;
do {
written = parport_write(port, crlf, i);
- if (written > 0)
- i -= written, crlf += written;
+ if (written > 0) {
+ i -= written;
+ crlf += written;
+ }
} while (i > 0 && (CONSOLE_LP_STRICT || written > 0));
}
} while (count > 0 && (CONSOLE_LP_STRICT || written > 0));
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index abd4ffdc8cde..94c2b556cf97 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -726,6 +726,33 @@ static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
return written;
}
+static ssize_t read_zero(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ size_t cleared = 0;
+
+ while (count) {
+ size_t chunk = min_t(size_t, count, PAGE_SIZE);
+ size_t left;
+
+ left = clear_user(buf + cleared, chunk);
+ if (unlikely(left)) {
+ cleared += (chunk - left);
+ if (!cleared)
+ return -EFAULT;
+ break;
+ }
+ cleared += chunk;
+ count -= chunk;
+
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
+
+ return cleared;
+}
+
static int mmap_zero(struct file *file, struct vm_area_struct *vma)
{
#ifndef CONFIG_MMU
@@ -921,6 +948,7 @@ static const struct file_operations zero_fops = {
.llseek = zero_lseek,
.write = write_zero,
.read_iter = read_iter_zero,
+ .read = read_zero,
.write_iter = write_iter_zero,
.mmap = mmap_zero,
.get_unmapped_area = get_unmapped_area_zero,
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 0fae33319d2e..f8231e2e84be 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -195,10 +195,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
pages = vma_pages(vma);
vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
- if (vdata_size <= PAGE_SIZE)
- vdata = kzalloc(vdata_size, GFP_KERNEL);
- else
- vdata = vzalloc(vdata_size);
+ vdata = kvzalloc(vdata_size, GFP_KERNEL);
if (!vdata)
return -ENOMEM;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d20ba1b104ca..2a41b21623ae 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1277,7 +1277,6 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
- this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 380bf518338e..5d52a1f4738c 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -28,7 +28,8 @@
#include <linux/uaccess.h>
struct raw_device_data {
- struct block_device *binding;
+ dev_t binding;
+ struct block_device *bdev;
int inuse;
};
@@ -63,19 +64,25 @@ static int raw_open(struct inode *inode, struct file *filp)
return 0;
}
+ pr_warn_ratelimited(
+ "process %s (pid %d) is using the deprecated raw device\n"
+ "support will be removed in Linux 5.14.\n",
+ current->comm, current->pid);
+
mutex_lock(&raw_mutex);
/*
* All we need to do on open is check that the device is bound.
*/
- bdev = raw_devices[minor].binding;
err = -ENODEV;
- if (!bdev)
+ if (!raw_devices[minor].binding)
goto out;
- bdgrab(bdev);
- err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
- if (err)
+ bdev = blkdev_get_by_dev(raw_devices[minor].binding,
+ filp->f_mode | FMODE_EXCL, raw_open);
+ if (IS_ERR(bdev)) {
+ err = PTR_ERR(bdev);
goto out;
+ }
err = set_blocksize(bdev, bdev_logical_block_size(bdev));
if (err)
goto out1;
@@ -85,6 +92,7 @@ static int raw_open(struct inode *inode, struct file *filp)
file_inode(filp)->i_mapping =
bdev->bd_inode->i_mapping;
filp->private_data = bdev;
+ raw_devices[minor].bdev = bdev;
mutex_unlock(&raw_mutex);
return 0;
@@ -105,7 +113,7 @@ static int raw_release(struct inode *inode, struct file *filp)
struct block_device *bdev;
mutex_lock(&raw_mutex);
- bdev = raw_devices[minor].binding;
+ bdev = raw_devices[minor].bdev;
if (--raw_devices[minor].inuse == 0)
/* Here inode->i_mapping == bdev->bd_inode->i_mapping */
inode->i_mapping = &inode->i_data;
@@ -128,6 +136,7 @@ raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
static int bind_set(int number, u64 major, u64 minor)
{
dev_t dev = MKDEV(major, minor);
+ dev_t raw = MKDEV(RAW_MAJOR, number);
struct raw_device_data *rawdev;
int err = 0;
@@ -161,25 +170,17 @@ static int bind_set(int number, u64 major, u64 minor)
mutex_unlock(&raw_mutex);
return -EBUSY;
}
- if (rawdev->binding) {
- bdput(rawdev->binding);
+ if (rawdev->binding)
module_put(THIS_MODULE);
- }
+
+ rawdev->binding = dev;
if (!dev) {
/* unbind */
- rawdev->binding = NULL;
- device_destroy(raw_class, MKDEV(RAW_MAJOR, number));
+ device_destroy(raw_class, raw);
} else {
- rawdev->binding = bdget(dev);
- if (rawdev->binding == NULL) {
- err = -ENOMEM;
- } else {
- dev_t raw = MKDEV(RAW_MAJOR, number);
- __module_get(THIS_MODULE);
- device_destroy(raw_class, raw);
- device_create(raw_class, NULL, raw, NULL,
- "raw%d", number);
- }
+ __module_get(THIS_MODULE);
+ device_destroy(raw_class, raw);
+ device_create(raw_class, NULL, raw, NULL, "raw%d", number);
}
mutex_unlock(&raw_mutex);
return err;
@@ -187,18 +188,9 @@ static int bind_set(int number, u64 major, u64 minor)
static int bind_get(int number, dev_t *dev)
{
- struct raw_device_data *rawdev;
- struct block_device *bdev;
-
if (number <= 0 || number >= max_raw_minors)
return -EINVAL;
-
- rawdev = &raw_devices[number];
-
- mutex_lock(&raw_mutex);
- bdev = rawdev->binding;
- *dev = bdev ? bdev->bd_dev : 0;
- mutex_unlock(&raw_mutex);
+ *dev = raw_devices[number].binding;
return 0;
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 58b4c573d176..a18c314da211 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -74,6 +74,18 @@ config TCG_TIS_SPI_CR50
If you have a H1 secure module running Cr50 firmware on SPI bus,
say Yes and it will be accessible from within Linux.
+config TCG_TIS_SYNQUACER
+ tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface (MMIO - SynQuacer)"
+ depends on ARCH_SYNQUACER
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip that is compliant with the
+ TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO
+ specification (TPM2.0) say Yes and it will be accessible from
+ within Linux on Socionext SynQuacer platform.
+ To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_synquacer.
+
config TCG_TIS_I2C_ATMEL
tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
depends on I2C
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 9567e5197f74..84db4fb3a9c9 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -21,6 +21,7 @@ tpm-$(CONFIG_EFI) += eventlog/efi.o
tpm-$(CONFIG_OF) += eventlog/of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+obj-$(CONFIG_TCG_TIS_SYNQUACER) += tpm_tis_synquacer.o
obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
tpm_tis_spi-y := tpm_tis_spi_main.o
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
index 6bb023de17f1..35229e5143ca 100644
--- a/drivers/char/tpm/eventlog/efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -41,6 +41,11 @@ int tpm_read_log_efi(struct tpm_chip *chip)
log_size = log_tbl->size;
memunmap(log_tbl);
+ if (!log_size) {
+ pr_warn("UEFI TPM log area empty\n");
+ return -EIO;
+ }
+
log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
MEMREMAP_WB);
if (!log_tbl) {
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index d52bf4df0bca..e2ff0b273a0f 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -56,31 +56,20 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
str +=
sprintf(str,
- "Algorithm: %02X %02X %02X %02X\n"
- "Encscheme: %02X %02X\n"
- "Sigscheme: %02X %02X\n"
- "Parameters: %02X %02X %02X %02X "
- "%02X %02X %02X %02X "
- "%02X %02X %02X %02X\n"
+ "Algorithm: %4ph\n"
+ "Encscheme: %2ph\n"
+ "Sigscheme: %2ph\n"
+ "Parameters: %12ph\n"
"Modulus length: %d\n"
"Modulus:\n",
- out->algorithm[0], out->algorithm[1], out->algorithm[2],
- out->algorithm[3],
- out->encscheme[0], out->encscheme[1],
- out->sigscheme[0], out->sigscheme[1],
- out->parameters[0], out->parameters[1],
- out->parameters[2], out->parameters[3],
- out->parameters[4], out->parameters[5],
- out->parameters[6], out->parameters[7],
- out->parameters[8], out->parameters[9],
- out->parameters[10], out->parameters[11],
+ out->algorithm,
+ out->encscheme,
+ out->sigscheme,
+ out->parameters,
be32_to_cpu(out->keysize));
- for (i = 0; i < 256; i++) {
- str += sprintf(str, "%02X ", out->modulus[i]);
- if ((i + 1) % 16 == 0)
- str += sprintf(str, "\n");
- }
+ for (i = 0; i < 256; i += 16)
+ str += sprintf(str, "%16ph\n", &out->modulus[i]);
out_buf:
tpm_buf_destroy(&tpm_buf);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 0b214963539d..4ed6e660273a 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/kernel.h>
+#include <linux/dmi.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -49,8 +50,8 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
return container_of(data, struct tpm_tis_tcg_phy, priv);
}
-static bool interrupts = true;
-module_param(interrupts, bool, 0444);
+static int interrupts = -1;
+module_param(interrupts, int, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
static bool itpm;
@@ -63,6 +64,28 @@ module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
#endif
+static int tpm_tis_disable_irq(const struct dmi_system_id *d)
+{
+ if (interrupts == -1) {
+ pr_notice("tpm_tis: %s detected: disabling interrupts.\n", d->ident);
+ interrupts = 0;
+ }
+
+ return 0;
+}
+
+static const struct dmi_system_id tpm_tis_dmi_table[] = {
+ {
+ .callback = tpm_tis_disable_irq,
+ .ident = "ThinkPad T490s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
+ },
+ },
+ {}
+};
+
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int has_hid(struct acpi_device *dev, const char *hid)
{
@@ -192,6 +215,8 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
int irq = -1;
int rc;
+ dmi_check_system(tpm_tis_dmi_table);
+
rc = check_acpi_tpm2(dev);
if (rc)
return rc;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 65ab1b027949..92c51c6cfd1b 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -239,6 +239,17 @@ static u8 tpm_tis_status(struct tpm_chip *chip)
if (rc < 0)
return 0;
+ if (unlikely((status & TPM_STS_READ_ZERO) != 0)) {
+ /*
+ * If this trips, the chances are the read is
+ * returning 0xff because the locality hasn't been
+ * acquired. Usually because tpm_try_get_ops() hasn't
+ * been called before doing a TPM operation.
+ */
+ WARN_ONCE(1, "TPM returned invalid status\n");
+ return 0;
+ }
+
return status;
}
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 7337819f5d7b..9b2d32a59f67 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -34,6 +34,7 @@ enum tis_status {
TPM_STS_GO = 0x20,
TPM_STS_DATA_AVAIL = 0x10,
TPM_STS_DATA_EXPECT = 0x08,
+ TPM_STS_READ_ZERO = 0x23, /* bits that must be zero on read */
};
enum tis_int_flags {
diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c
new file mode 100644
index 000000000000..e47bdd272704
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_synquacer.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Linaro Ltd.
+ *
+ * This device driver implements MMIO TPM on SynQuacer Platform.
+ */
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+/*
+ * irq > 0 means: use irq $irq;
+ * irq = 0 means: autoprobe for an irq;
+ * irq = -1 means: no irq support
+ */
+struct tpm_tis_synquacer_info {
+ struct resource res;
+ int irq;
+};
+
+struct tpm_tis_synquacer_phy {
+ struct tpm_tis_data priv;
+ void __iomem *iobase;
+};
+
+static inline struct tpm_tis_synquacer_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_synquacer_phy, priv);
+}
+
+static int tpm_tis_synquacer_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result)
+{
+ struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
+
+ while (len--)
+ *result++ = ioread8(phy->iobase + addr);
+
+ return 0;
+}
+
+static int tpm_tis_synquacer_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, const u8 *value)
+{
+ struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
+
+ while (len--)
+ iowrite8(*value++, phy->iobase + addr);
+
+ return 0;
+}
+
+static int tpm_tis_synquacer_read16_bw(struct tpm_tis_data *data,
+ u32 addr, u16 *result)
+{
+ struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
+
+ /*
+ * Due to the limitation of SPI controller on SynQuacer,
+ * 16/32 bits access must be done in byte-wise and descending order.
+ */
+ *result = (ioread8(phy->iobase + addr + 1) << 8) |
+ (ioread8(phy->iobase + addr));
+
+ return 0;
+}
+
+static int tpm_tis_synquacer_read32_bw(struct tpm_tis_data *data,
+ u32 addr, u32 *result)
+{
+ struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
+
+ /*
+ * Due to the limitation of SPI controller on SynQuacer,
+ * 16/32 bits access must be done in byte-wise and descending order.
+ */
+ *result = (ioread8(phy->iobase + addr + 3) << 24) |
+ (ioread8(phy->iobase + addr + 2) << 16) |
+ (ioread8(phy->iobase + addr + 1) << 8) |
+ (ioread8(phy->iobase + addr));
+
+ return 0;
+}
+
+static int tpm_tis_synquacer_write32_bw(struct tpm_tis_data *data,
+ u32 addr, u32 value)
+{
+ struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
+
+ /*
+ * Due to the limitation of SPI controller on SynQuacer,
+ * 16/32 bits access must be done in byte-wise and descending order.
+ */
+ iowrite8(value >> 24, phy->iobase + addr + 3);
+ iowrite8(value >> 16, phy->iobase + addr + 2);
+ iowrite8(value >> 8, phy->iobase + addr + 1);
+ iowrite8(value, phy->iobase + addr);
+
+ return 0;
+}
+
+static const struct tpm_tis_phy_ops tpm_tcg_bw = {
+ .read_bytes = tpm_tis_synquacer_read_bytes,
+ .write_bytes = tpm_tis_synquacer_write_bytes,
+ .read16 = tpm_tis_synquacer_read16_bw,
+ .read32 = tpm_tis_synquacer_read32_bw,
+ .write32 = tpm_tis_synquacer_write32_bw,
+};
+
+static int tpm_tis_synquacer_init(struct device *dev,
+ struct tpm_tis_synquacer_info *tpm_info)
+{
+ struct tpm_tis_synquacer_phy *phy;
+
+ phy = devm_kzalloc(dev, sizeof(struct tpm_tis_synquacer_phy), GFP_KERNEL);
+ if (phy == NULL)
+ return -ENOMEM;
+
+ phy->iobase = devm_ioremap_resource(dev, &tpm_info->res);
+ if (IS_ERR(phy->iobase))
+ return PTR_ERR(phy->iobase);
+
+ return tpm_tis_core_init(dev, &phy->priv, tpm_info->irq, &tpm_tcg_bw,
+ ACPI_HANDLE(dev));
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_synquacer_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static int tpm_tis_synquacer_probe(struct platform_device *pdev)
+{
+ struct tpm_tis_synquacer_info tpm_info = {};
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+ tpm_info.res = *res;
+
+ tpm_info.irq = -1;
+
+ return tpm_tis_synquacer_init(&pdev->dev, &tpm_info);
+}
+
+static int tpm_tis_synquacer_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tis_synquacer_of_platform_match[] = {
+ {.compatible = "socionext,synquacer-tpm-mmio"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, tis_synquacer_of_platform_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id tpm_synquacer_acpi_tbl[] = {
+ { "SCX0009" },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, tpm_synquacer_acpi_tbl);
+#endif
+
+static struct platform_driver tis_synquacer_drv = {
+ .probe = tpm_tis_synquacer_probe,
+ .remove = tpm_tis_synquacer_remove,
+ .driver = {
+ .name = "tpm_tis_synquacer",
+ .pm = &tpm_tis_synquacer_pm,
+ .of_match_table = of_match_ptr(tis_synquacer_of_platform_match),
+ .acpi_match_table = ACPI_PTR(tpm_synquacer_acpi_tbl),
+ },
+};
+
+static int __init tpm_tis_synquacer_module_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&tis_synquacer_drv);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static void __exit tpm_tis_synquacer_module_exit(void)
+{
+ platform_driver_unregister(&tis_synquacer_drv);
+}
+
+module_init(tpm_tis_synquacer_module_init);
+module_exit(tpm_tis_synquacer_module_exit);
+MODULE_DESCRIPTION("TPM MMIO Driver for Socionext SynQuacer platform");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index a2da8f768b94..1836cc56e357 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -435,12 +435,12 @@ static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size
/*
* Allocate DMA memory from ancestor. When a virtio
* device is created by remoteproc, the DMA memory is
- * associated with the grandparent device:
- * vdev => rproc => platform-dev.
+ * associated with the parent device:
+ * virtioY => remoteprocX#vdevYbuffer.
*/
- if (!vdev->dev.parent || !vdev->dev.parent->parent)
+ buf->dev = vdev->dev.parent;
+ if (!buf->dev)
goto free_buf;
- buf->dev = vdev->dev.parent->parent;
/* Increase device refcnt to avoid freeing it */
get_device(buf->dev);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 4026fac9fac3..c715d4681a0b 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -49,7 +49,7 @@ source "drivers/clk/versatile/Kconfig"
config CLK_HSDK
bool "PLL Driver for HSDK platform"
- depends on OF || COMPILE_TEST
+ depends on ARC_SOC_HSDK || COMPILE_TEST
depends on HAS_IOMEM
help
This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs
@@ -373,6 +373,7 @@ source "drivers/clk/meson/Kconfig"
source "drivers/clk/mvebu/Kconfig"
source "drivers/clk/qcom/Kconfig"
source "drivers/clk/renesas/Kconfig"
+source "drivers/clk/rockchip/Kconfig"
source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sifive/Kconfig"
source "drivers/clk/sprd/Kconfig"
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index c88ee20bee31..cb4a406ed15d 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -46,13 +46,6 @@ static const struct {
{ .n = "pck1", .p = "prog1", .id = 9 },
};
-static const struct clk_pcr_layout at91sam9g45_pcr_layout = {
- .offset = 0x10c,
- .cmd = BIT(12),
- .pid_mask = GENMASK(5, 0),
- .div_mask = GENMASK(17, 16),
-};
-
struct pck {
char *n;
u8 id;
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index 5c83e899084f..cfae2f59df66 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -437,12 +437,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
return -EINVAL;
regmap_read(regmap, AT91_CKGR_MOR, &tmp);
- tmp &= ~MOR_KEY_MASK;
if (index && !(tmp & AT91_PMC_MOSCSEL))
- regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
+ tmp = AT91_PMC_MOSCSEL;
else if (!index && (tmp & AT91_PMC_MOSCSEL))
- regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
+ tmp = 0;
+ else
+ return 0;
+
+ regmap_update_bits(regmap, AT91_CKGR_MOR,
+ AT91_PMC_MOSCSEL | MOR_KEY_MASK,
+ tmp | AT91_PMC_KEY);
while (!clk_sam9x5_main_ready(regmap))
cpu_relax();
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index 7867eaf0447f..7a27ba8e0577 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -112,8 +112,8 @@ at91_clk_register_peripheral(struct regmap *regmap, const char *name,
init.name = name;
init.ops = &peripheral_ops;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
init.flags = 0;
periph->id = id;
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index b473298ef7e6..78f458a7b2ef 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -331,7 +331,7 @@ static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
struct clk_hw *parent = clk_hw_get_parent(&core->hw);
unsigned long tmp_rate, tmp_parent_rate, tmp_diff;
long best_diff = -1, best_rate = -EINVAL;
- u32 divid, best_div;
+ u32 divid;
if (!rate)
return 0;
@@ -352,7 +352,6 @@ static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
*parent_rate = tmp_parent_rate;
best_rate = tmp_rate;
best_diff = tmp_diff;
- best_div = divid;
}
if (!best_diff)
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index ab6318c0589e..3c4c95603595 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -279,7 +279,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
parent_names[3] = "masterck";
parent_names[4] = "pllack_divck";
parent_names[5] = "upllck_divck";
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 2; i++) {
char name[6];
snprintf(name, sizeof(name), "prog%d", i);
diff --git a/drivers/clk/baikal-t1/clk-ccu-pll.c b/drivers/clk/baikal-t1/clk-ccu-pll.c
index 1eec8c0b8f50..2445d4b12baf 100644
--- a/drivers/clk/baikal-t1/clk-ccu-pll.c
+++ b/drivers/clk/baikal-t1/clk-ccu-pll.c
@@ -51,11 +51,13 @@ struct ccu_pll_info {
};
/*
- * Mark as critical all PLLs except Ethernet one. CPU and DDR PLLs are sources
- * of CPU cores and DDR controller reference clocks, due to which they
- * obviously shouldn't be ever gated. SATA and PCIe PLLs are the parents of
- * APB-bus and DDR controller AXI-bus clocks. If they are gated the system will
- * be unusable.
+ * Alas we have to mark all PLLs as critical. CPU and DDR PLLs are sources of
+ * CPU cores and DDR controller reference clocks, due to which they obviously
+ * shouldn't be ever gated. SATA and PCIe PLLs are the parents of APB-bus and
+ * DDR controller AXI-bus clocks. If they are gated the system will be
+ * unusable. Moreover disabling SATA and Ethernet PLLs causes automatic reset
+ * of the corresponding subsystems. So until we aren't ready to re-initialize
+ * all the devices consuming those PLLs, they will be marked as critical too.
*/
static const struct ccu_pll_info pll_info[] = {
CCU_PLL_INFO(CCU_CPU_PLL, "cpu_pll", "ref_clk", CCU_CPU_PLL_BASE,
@@ -67,7 +69,7 @@ static const struct ccu_pll_info pll_info[] = {
CCU_PLL_INFO(CCU_PCIE_PLL, "pcie_pll", "ref_clk", CCU_PCIE_PLL_BASE,
CLK_IS_CRITICAL),
CCU_PLL_INFO(CCU_ETH_PLL, "eth_pll", "ref_clk", CCU_ETH_PLL_BASE,
- CLK_SET_RATE_GATE)
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE)
};
struct ccu_pll_data {
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 3439bc65bb4e..1ac803e14fa3 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -1338,8 +1338,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
pll->hw.init = &init;
ret = devm_clk_hw_register(cprman->dev, &pll->hw);
- if (ret)
+ if (ret) {
+ kfree(pll);
return NULL;
+ }
return &pll->hw;
}
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 5cc82954e1ce..f89b9cfc4309 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -271,6 +271,7 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
case RPI_FIRMWARE_CORE_CLK_ID:
case RPI_FIRMWARE_M2MC_CLK_ID:
case RPI_FIRMWARE_V3D_CLK_ID:
+ case RPI_FIRMWARE_PIXEL_BVB_CLK_ID:
hw = raspberrypi_clk_register(rpi, clks->parent,
clks->id);
if (IS_ERR(hw))
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index 96f351785b41..14d803e6af62 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -27,19 +27,23 @@
#define AXI_CLKGEN_V2_DRP_STATUS_BUSY BIT(16)
+#define MMCM_REG_CLKOUT5_2 0x07
#define MMCM_REG_CLKOUT0_1 0x08
#define MMCM_REG_CLKOUT0_2 0x09
+#define MMCM_REG_CLKOUT6_2 0x13
#define MMCM_REG_CLK_FB1 0x14
#define MMCM_REG_CLK_FB2 0x15
#define MMCM_REG_CLK_DIV 0x16
#define MMCM_REG_LOCK1 0x18
#define MMCM_REG_LOCK2 0x19
#define MMCM_REG_LOCK3 0x1a
+#define MMCM_REG_POWER 0x28
#define MMCM_REG_FILTER1 0x4e
#define MMCM_REG_FILTER2 0x4f
#define MMCM_CLKOUT_NOCOUNT BIT(6)
+#define MMCM_CLK_DIV_DIVIDE BIT(11)
#define MMCM_CLK_DIV_NOCOUNT BIT(12)
struct axi_clkgen {
@@ -107,6 +111,8 @@ static void axi_clkgen_calc_params(unsigned long fin, unsigned long fout,
unsigned long d, d_min, d_max, _d_min, _d_max;
unsigned long m, m_min, m_max;
unsigned long f, dout, best_f, fvco;
+ unsigned long fract_shift = 0;
+ unsigned long fvco_min_fract, fvco_max_fract;
fin /= 1000;
fout /= 1000;
@@ -119,42 +125,89 @@ static void axi_clkgen_calc_params(unsigned long fin, unsigned long fout,
d_min = max_t(unsigned long, DIV_ROUND_UP(fin, fpfd_max), 1);
d_max = min_t(unsigned long, fin / fpfd_min, 80);
- m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min, fin) * d_min, 1);
- m_max = min_t(unsigned long, fvco_max * d_max / fin, 64);
+again:
+ fvco_min_fract = fvco_min << fract_shift;
+ fvco_max_fract = fvco_max << fract_shift;
+
+ m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
+ m_max = min_t(unsigned long, fvco_max_fract * d_max / fin, 64 << fract_shift);
for (m = m_min; m <= m_max; m++) {
- _d_min = max(d_min, DIV_ROUND_UP(fin * m, fvco_max));
- _d_max = min(d_max, fin * m / fvco_min);
+ _d_min = max(d_min, DIV_ROUND_UP(fin * m, fvco_max_fract));
+ _d_max = min(d_max, fin * m / fvco_min_fract);
for (d = _d_min; d <= _d_max; d++) {
fvco = fin * m / d;
dout = DIV_ROUND_CLOSEST(fvco, fout);
- dout = clamp_t(unsigned long, dout, 1, 128);
+ dout = clamp_t(unsigned long, dout, 1, 128 << fract_shift);
f = fvco / dout;
if (abs(f - fout) < abs(best_f - fout)) {
best_f = f;
*best_d = d;
- *best_m = m;
- *best_dout = dout;
+ *best_m = m << (3 - fract_shift);
+ *best_dout = dout << (3 - fract_shift);
if (best_f == fout)
return;
}
}
}
+
+ /* Lets see if we find a better setting in fractional mode */
+ if (fract_shift == 0) {
+ fract_shift = 3;
+ goto again;
+ }
}
-static void axi_clkgen_calc_clk_params(unsigned int divider, unsigned int *low,
- unsigned int *high, unsigned int *edge, unsigned int *nocount)
+struct axi_clkgen_div_params {
+ unsigned int low;
+ unsigned int high;
+ unsigned int edge;
+ unsigned int nocount;
+ unsigned int frac_en;
+ unsigned int frac;
+ unsigned int frac_wf_f;
+ unsigned int frac_wf_r;
+ unsigned int frac_phase;
+};
+
+static void axi_clkgen_calc_clk_params(unsigned int divider,
+ unsigned int frac_divider, struct axi_clkgen_div_params *params)
{
- if (divider == 1)
- *nocount = 1;
- else
- *nocount = 0;
- *high = divider / 2;
- *edge = divider % 2;
- *low = divider - *high;
+ memset(params, 0x0, sizeof(*params));
+
+ if (divider == 1) {
+ params->nocount = 1;
+ return;
+ }
+
+ if (frac_divider == 0) {
+ params->high = divider / 2;
+ params->edge = divider % 2;
+ params->low = divider - params->high;
+ } else {
+ params->frac_en = 1;
+ params->frac = frac_divider;
+
+ params->high = divider / 2;
+ params->edge = divider % 2;
+ params->low = params->high;
+
+ if (params->edge == 0) {
+ params->high--;
+ params->frac_wf_r = 1;
+ }
+
+ if (params->edge == 0 || frac_divider == 1)
+ params->low--;
+ if (((params->edge == 0) ^ (frac_divider == 1)) ||
+ (divider == 2 && frac_divider == 1))
+ params->frac_wf_f = 1;
+
+ params->frac_phase = params->edge * 4 + frac_divider / 2;
+ }
}
static void axi_clkgen_write(struct axi_clkgen *axi_clkgen,
@@ -246,15 +299,29 @@ static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
return container_of(clk_hw, struct axi_clkgen, clk_hw);
}
+static void axi_clkgen_set_div(struct axi_clkgen *axi_clkgen,
+ unsigned int reg1, unsigned int reg2, unsigned int reg3,
+ struct axi_clkgen_div_params *params)
+{
+ axi_clkgen_mmcm_write(axi_clkgen, reg1,
+ (params->high << 6) | params->low, 0xefff);
+ axi_clkgen_mmcm_write(axi_clkgen, reg2,
+ (params->frac << 12) | (params->frac_en << 11) |
+ (params->frac_wf_r << 10) | (params->edge << 7) |
+ (params->nocount << 6), 0x7fff);
+ if (reg3 != 0) {
+ axi_clkgen_mmcm_write(axi_clkgen, reg3,
+ (params->frac_phase << 11) | (params->frac_wf_f << 10), 0x3c00);
+ }
+}
+
static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
unsigned long rate, unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
unsigned int d, m, dout;
- unsigned int nocount;
- unsigned int high;
- unsigned int edge;
- unsigned int low;
+ struct axi_clkgen_div_params params;
+ uint32_t power = 0;
uint32_t filter;
uint32_t lock;
@@ -266,24 +333,26 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
if (d == 0 || dout == 0 || m == 0)
return -EINVAL;
+ if ((dout & 0x7) != 0 || (m & 0x7) != 0)
+ power |= 0x9800;
+
+ axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_POWER, power, 0x9800);
+
filter = axi_clkgen_lookup_filter(m - 1);
lock = axi_clkgen_lookup_lock(m - 1);
- axi_clkgen_calc_clk_params(dout, &low, &high, &edge, &nocount);
- axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLKOUT0_1,
- (high << 6) | low, 0xefff);
- axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLKOUT0_2,
- (edge << 7) | (nocount << 6), 0x03ff);
+ axi_clkgen_calc_clk_params(dout >> 3, dout & 0x7, &params);
+ axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLKOUT0_1, MMCM_REG_CLKOUT0_2,
+ MMCM_REG_CLKOUT5_2, &params);
- axi_clkgen_calc_clk_params(d, &low, &high, &edge, &nocount);
+ axi_clkgen_calc_clk_params(d, 0, &params);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_DIV,
- (edge << 13) | (nocount << 12) | (high << 6) | low, 0x3fff);
+ (params.edge << 13) | (params.nocount << 12) |
+ (params.high << 6) | params.low, 0x3fff);
- axi_clkgen_calc_clk_params(m, &low, &high, &edge, &nocount);
- axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_FB1,
- (high << 6) | low, 0xefff);
- axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_FB2,
- (edge << 7) | (nocount << 6), 0x03ff);
+ axi_clkgen_calc_clk_params(m >> 3, m & 0x7, &params);
+ axi_clkgen_set_div(axi_clkgen, MMCM_REG_CLK_FB1, MMCM_REG_CLK_FB2,
+ MMCM_REG_CLKOUT6_2, &params);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK1, lock & 0x3ff, 0x3ff);
axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK2,
@@ -313,35 +382,51 @@ static long axi_clkgen_round_rate(struct clk_hw *hw, unsigned long rate,
return min_t(unsigned long long, tmp, LONG_MAX);
}
+static unsigned int axi_clkgen_get_div(struct axi_clkgen *axi_clkgen,
+ unsigned int reg1, unsigned int reg2)
+{
+ unsigned int val1, val2;
+ unsigned int div;
+
+ axi_clkgen_mmcm_read(axi_clkgen, reg2, &val2);
+ if (val2 & MMCM_CLKOUT_NOCOUNT)
+ return 8;
+
+ axi_clkgen_mmcm_read(axi_clkgen, reg1, &val1);
+
+ div = (val1 & 0x3f) + ((val1 >> 6) & 0x3f);
+ div <<= 3;
+
+ if (val2 & MMCM_CLK_DIV_DIVIDE) {
+ if ((val2 & BIT(7)) && (val2 & 0x7000) != 0x1000)
+ div += 8;
+ else
+ div += 16;
+
+ div += (val2 >> 12) & 0x7;
+ }
+
+ return div;
+}
+
static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
unsigned int d, m, dout;
- unsigned int reg;
unsigned long long tmp;
+ unsigned int val;
- axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_2, &reg);
- if (reg & MMCM_CLKOUT_NOCOUNT) {
- dout = 1;
- } else {
- axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, &reg);
- dout = (reg & 0x3f) + ((reg >> 6) & 0x3f);
- }
+ dout = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLKOUT0_1,
+ MMCM_REG_CLKOUT0_2);
+ m = axi_clkgen_get_div(axi_clkgen, MMCM_REG_CLK_FB1,
+ MMCM_REG_CLK_FB2);
- axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &reg);
- if (reg & MMCM_CLK_DIV_NOCOUNT)
+ axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &val);
+ if (val & MMCM_CLK_DIV_NOCOUNT)
d = 1;
else
- d = (reg & 0x3f) + ((reg >> 6) & 0x3f);
-
- axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB2, &reg);
- if (reg & MMCM_CLKOUT_NOCOUNT) {
- m = 1;
- } else {
- axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, &reg);
- m = (reg & 0x3f) + ((reg >> 6) & 0x3f);
- }
+ d = (val & 0x3f) + ((val >> 6) & 0x3f);
if (d == 0 || dout == 0)
return 0;
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 7376f573bfdb..2ddb54f7d3ab 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -328,6 +328,7 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
rate_hw, rate_ops, gate_hw,
gate_ops, flags);
}
+EXPORT_SYMBOL_GPL(clk_hw_register_composite);
struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
const char *name,
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 8b343e59dc61..910e6e74ae90 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -206,6 +206,7 @@ static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
/**
* of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
+ * @node: device node for the clock
*/
void __init of_fixed_factor_clk_setup(struct device_node *node)
{
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 77499a27c8fb..45501637705c 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -168,6 +168,7 @@ static struct clk_hw *_of_fixed_clk_setup(struct device_node *node)
/**
* of_fixed_clk_setup() - Setup function for simple fixed rate clock
+ * @node: device node for the clock
*/
void __init of_fixed_clk_setup(struct device_node *node)
{
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 5942e9874bc0..46101c6a20f2 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -31,7 +31,7 @@
#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
#define CGB_PLL1 4
#define CGB_PLL2 5
-#define MAX_PLL_DIV 16
+#define MAX_PLL_DIV 32
struct clockgen_pll_div {
struct clk *clk;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 2ce370c804aa..aa21371f9104 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -267,18 +267,7 @@ static struct platform_driver s2mps11_clk_driver = {
.remove = s2mps11_clk_remove,
.id_table = s2mps11_clk_id,
};
-
-static int __init s2mps11_clk_init(void)
-{
- return platform_driver_register(&s2mps11_clk_driver);
-}
-subsys_initcall(s2mps11_clk_init);
-
-static void __exit s2mps11_clk_cleanup(void)
-{
- platform_driver_unregister(&s2mps11_clk_driver);
-}
-module_exit(s2mps11_clk_cleanup);
+module_platform_driver(s2mps11_clk_driver);
MODULE_DESCRIPTION("S2MPS11 Clock Driver");
MODULE_AUTHOR("Yadwinder Singh Brar <yadi.brar@samsung.com>");
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 3d7acab9d280..e0446e66fa64 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -883,11 +883,9 @@ static int si5341_output_set_parent(struct clk_hw *hw, u8 index)
static u8 si5341_output_get_parent(struct clk_hw *hw)
{
struct clk_si5341_output *output = to_clk_si5341_output(hw);
- int err;
u32 val;
- err = regmap_read(output->data->regmap,
- SI5341_OUT_MUX_SEL(output), &val);
+ regmap_read(output->data->regmap, SI5341_OUT_MUX_SEL(output), &val);
return val & 0x7;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0a9261a099bd..f83dac54ed85 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -4363,7 +4363,7 @@ struct of_clk_provider {
extern struct of_device_id __clk_of_table;
static const struct of_device_id __clk_of_table_sentinel
- __used __section(__clk_of_table_end);
+ __used __section("__clk_of_table_end");
static LIST_HEAD(of_clk_providers);
static DEFINE_MUTEX(of_clk_mutex);
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index bdc52364b421..77d18276bfe8 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -571,6 +571,7 @@ static const struct clk_ops da8xx_usb1_clk48_ops = {
/**
* da8xx_cfgchip_register_usb1_clk48 - Register a new USB 1.1 PHY clock
+ * @dev: The device
* @regmap: The CFGCHIP regmap
*/
static struct da8xx_usb1_clk48 *
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index db0253fa3d64..3b393cb07295 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -1,40 +1,102 @@
# SPDX-License-Identifier: GPL-2.0
# common clock support for NXP i.MX SoC family.
config MXC_CLK
- bool
- def_bool ARCH_MXC
+ tristate "IMX clock"
+ depends on ARCH_MXC || COMPILE_TEST
config MXC_CLK_SCU
- bool
- depends on IMX_SCU
+ tristate "IMX SCU clock"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on IMX_SCU && HAVE_ARM_SMCCC
+
+config CLK_IMX1
+ def_bool SOC_IMX1
+ select MXC_CLK
+
+config CLK_IMX25
+ def_bool SOC_IMX25
+ select MXC_CLK
+
+config CLK_IMX27
+ def_bool SOC_IMX27
+ select MXC_CLK
+
+config CLK_IMX31
+ def_bool SOC_IMX31
+ select MXC_CLK
+
+config CLK_IMX35
+ def_bool SOC_IMX35
+ select MXC_CLK
+
+config CLK_IMX5
+ def_bool SOC_IMX5
+ select MXC_CLK
+
+config CLK_IMX6Q
+ def_bool SOC_IMX6Q
+ select MXC_CLK
+
+config CLK_IMX6SL
+ def_bool SOC_IMX6SL
+ select MXC_CLK
+
+config CLK_IMX6SLL
+ def_bool SOC_IMX6SLL
+ select MXC_CLK
+
+config CLK_IMX6SX
+ def_bool SOC_IMX6SX
+ select MXC_CLK
+
+config CLK_IMX6UL
+ def_bool SOC_IMX6UL
+ select MXC_CLK
+
+config CLK_IMX7D
+ def_bool SOC_IMX7D
+ select MXC_CLK
+
+config CLK_IMX7ULP
+ def_bool SOC_IMX7ULP
+ select MXC_CLK
+
+config CLK_VF610
+ def_bool SOC_VF610
+ select MXC_CLK
config CLK_IMX8MM
- bool "IMX8MM CCM Clock Driver"
- depends on ARCH_MXC
+ tristate "IMX8MM CCM Clock Driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ select MXC_CLK
help
Build the driver for i.MX8MM CCM Clock Driver
config CLK_IMX8MN
- bool "IMX8MN CCM Clock Driver"
- depends on ARCH_MXC
+ tristate "IMX8MN CCM Clock Driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ select MXC_CLK
help
Build the driver for i.MX8MN CCM Clock Driver
config CLK_IMX8MP
- bool "IMX8MP CCM Clock Driver"
- depends on ARCH_MXC
+ tristate "IMX8MP CCM Clock Driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ select MXC_CLK
help
Build the driver for i.MX8MP CCM Clock Driver
config CLK_IMX8MQ
- bool "IMX8MQ CCM Clock Driver"
- depends on ARCH_MXC
+ tristate "IMX8MQ CCM Clock Driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ select MXC_CLK
help
Build the driver for i.MX8MQ CCM Clock Driver
config CLK_IMX8QXP
- bool "IMX8QXP SCU Clock"
- depends on ARCH_MXC && IMX_SCU && ARM64
+ tristate "IMX8QXP SCU Clock"
+ depends on (ARCH_MXC && ARM64) || COMPILE_TEST
+ depends on IMX_SCU && HAVE_ARM_SMCCC
select MXC_CLK_SCU
help
Build the driver for IMX8QXP SCU based clocks.
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 928f874c73d2..dd6a737d060b 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -1,48 +1,46 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MXC_CLK) += \
- clk.o \
- clk-busy.o \
- clk-composite-8m.o \
- clk-cpu.o \
- clk-composite-7ulp.o \
- clk-divider-gate.o \
- clk-fixup-div.o \
- clk-fixup-mux.o \
- clk-frac-pll.o \
- clk-gate-exclusive.o \
- clk-gate2.o \
- clk-pfd.o \
- clk-pfdv2.o \
- clk-pllv1.o \
- clk-pllv2.o \
- clk-pllv3.o \
- clk-pllv4.o \
- clk-sscg-pll.o \
- clk-pll14xx.o
-
-obj-$(CONFIG_MXC_CLK_SCU) += \
- clk-scu.o \
- clk-lpcg-scu.o
+mxc-clk-objs += clk.o
+mxc-clk-objs += clk-busy.o
+mxc-clk-objs += clk-composite-7ulp.o
+mxc-clk-objs += clk-composite-8m.o
+mxc-clk-objs += clk-cpu.o
+mxc-clk-objs += clk-divider-gate.o
+mxc-clk-objs += clk-fixup-div.o
+mxc-clk-objs += clk-fixup-mux.o
+mxc-clk-objs += clk-frac-pll.o
+mxc-clk-objs += clk-gate2.o
+mxc-clk-objs += clk-gate-exclusive.o
+mxc-clk-objs += clk-pfd.o
+mxc-clk-objs += clk-pfdv2.o
+mxc-clk-objs += clk-pllv1.o
+mxc-clk-objs += clk-pllv2.o
+mxc-clk-objs += clk-pllv3.o
+mxc-clk-objs += clk-pllv4.o
+mxc-clk-objs += clk-pll14xx.o
+mxc-clk-objs += clk-sscg-pll.o
+obj-$(CONFIG_MXC_CLK) += mxc-clk.o
obj-$(CONFIG_CLK_IMX8MM) += clk-imx8mm.o
obj-$(CONFIG_CLK_IMX8MN) += clk-imx8mn.o
obj-$(CONFIG_CLK_IMX8MP) += clk-imx8mp.o
obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
-obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o
-obj-$(CONFIG_SOC_IMX1) += clk-imx1.o
-obj-$(CONFIG_SOC_IMX21) += clk-imx21.o
-obj-$(CONFIG_SOC_IMX25) += clk-imx25.o
-obj-$(CONFIG_SOC_IMX27) += clk-imx27.o
-obj-$(CONFIG_SOC_IMX31) += clk-imx31.o
-obj-$(CONFIG_SOC_IMX35) += clk-imx35.o
-obj-$(CONFIG_SOC_IMX5) += clk-imx5.o
-obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o
-obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o
-obj-$(CONFIG_SOC_IMX6SLL) += clk-imx6sll.o
-obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o
-obj-$(CONFIG_SOC_IMX6UL) += clk-imx6ul.o
-obj-$(CONFIG_SOC_IMX7D) += clk-imx7d.o
-obj-$(CONFIG_SOC_IMX7ULP) += clk-imx7ulp.o
-obj-$(CONFIG_SOC_VF610) += clk-vf610.o
+obj-$(CONFIG_MXC_CLK_SCU) += clk-imx-scu.o clk-imx-lpcg-scu.o
+clk-imx-scu-$(CONFIG_CLK_IMX8QXP) += clk-scu.o clk-imx8qxp.o
+clk-imx-lpcg-scu-$(CONFIG_CLK_IMX8QXP) += clk-lpcg-scu.o clk-imx8qxp-lpcg.o
+
+obj-$(CONFIG_CLK_IMX1) += clk-imx1.o
+obj-$(CONFIG_CLK_IMX25) += clk-imx25.o
+obj-$(CONFIG_CLK_IMX27) += clk-imx27.o
+obj-$(CONFIG_CLK_IMX31) += clk-imx31.o
+obj-$(CONFIG_CLK_IMX35) += clk-imx35.o
+obj-$(CONFIG_CLK_IMX5) += clk-imx5.o
+obj-$(CONFIG_CLK_IMX6Q) += clk-imx6q.o
+obj-$(CONFIG_CLK_IMX6SL) += clk-imx6sl.o
+obj-$(CONFIG_CLK_IMX6SLL) += clk-imx6sll.o
+obj-$(CONFIG_CLK_IMX6SX) += clk-imx6sx.o
+obj-$(CONFIG_CLK_IMX6UL) += clk-imx6ul.o
+obj-$(CONFIG_CLK_IMX7D) += clk-imx7d.o
+obj-$(CONFIG_CLK_IMX7ULP) += clk-imx7ulp.o
+obj-$(CONFIG_CLK_VF610) += clk-vf610.o
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index 25c863da32c7..6f17311647f3 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -4,6 +4,7 @@
* Copyright 2012 Linaro Ltd.
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index b9efcc8a855d..7c4f31b31eb0 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -5,6 +5,7 @@
*
*/
+#include <linux/bits.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index d2b5af826f2c..2c309e3dc8e3 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -5,6 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -215,6 +216,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
div->width = PCG_PREDIV_WIDTH;
divider_ops = &imx8m_clk_composite_divider_ops;
mux_ops = &clk_mux_ops;
+ flags |= CLK_SET_PARENT_GATE;
}
div->lock = &imx_ccm_lock;
@@ -243,3 +245,4 @@ fail:
kfree(mux);
return ERR_CAST(hw);
}
+EXPORT_SYMBOL_GPL(imx8m_clk_hw_composite_flags);
diff --git a/drivers/clk/imx/clk-cpu.c b/drivers/clk/imx/clk-cpu.c
index cb182bec79ba..cb6ca4cf0535 100644
--- a/drivers/clk/imx/clk-cpu.c
+++ b/drivers/clk/imx/clk-cpu.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include "clk.h"
@@ -104,3 +105,4 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
return hw;
}
+EXPORT_SYMBOL_GPL(imx_clk_hw_cpu);
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index 58a67630bb6a..c82401570c84 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -3,6 +3,7 @@
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*/
+#include <linux/bits.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index 101e0a300376..c703056fae85 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
@@ -233,3 +234,4 @@ struct clk_hw *imx_clk_hw_frac_pll(const char *name,
return hw;
}
+EXPORT_SYMBOL_GPL(imx_clk_hw_frac_pll);
diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c
index b87ab3c3ba1e..7eed7083f46e 100644
--- a/drivers/clk/imx/clk-gate2.c
+++ b/drivers/clk/imx/clk-gate2.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -15,7 +16,7 @@
#include "clk.h"
/**
- * DOC: basic gatable clock which can gate and ungate it's ouput
+ * DOC: basic gateable clock which can gate and ungate its output
*
* Traits of this clock:
* prepare - clk_(un)prepare only ensures parent is (un)prepared
@@ -177,3 +178,4 @@ struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
return hw;
}
+EXPORT_SYMBOL_GPL(clk_hw_register_gate2);
diff --git a/drivers/clk/imx/clk-imx21.c b/drivers/clk/imx/clk-imx21.c
deleted file mode 100644
index 077b4a7123ce..000000000000
--- a/drivers/clk/imx/clk-imx21.c
+++ /dev/null
@@ -1,171 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <dt-bindings/clock/imx21-clock.h>
-#include <soc/imx/timer.h>
-#include <asm/irq.h>
-
-#include "clk.h"
-
-#define MX21_CCM_BASE_ADDR 0x10027000
-#define MX21_GPT1_BASE_ADDR 0x10003000
-#define MX21_INT_GPT1 (NR_IRQS_LEGACY + 26)
-
-static void __iomem *ccm __initdata;
-
-/* Register offsets */
-#define CCM_CSCR (ccm + 0x00)
-#define CCM_MPCTL0 (ccm + 0x04)
-#define CCM_SPCTL0 (ccm + 0x0c)
-#define CCM_PCDR0 (ccm + 0x18)
-#define CCM_PCDR1 (ccm + 0x1c)
-#define CCM_PCCR0 (ccm + 0x20)
-#define CCM_PCCR1 (ccm + 0x24)
-
-static const char *mpll_osc_sel_clks[] = { "ckih_gate", "ckih_div1p5", };
-static const char *mpll_sel_clks[] = { "fpm_gate", "mpll_osc_sel", };
-static const char *spll_sel_clks[] = { "fpm_gate", "mpll_osc_sel", };
-static const char *ssi_sel_clks[] = { "spll_gate", "mpll_gate", };
-
-static struct clk *clk[IMX21_CLK_MAX];
-static struct clk_onecell_data clk_data;
-
-static void __init _mx21_clocks_init(unsigned long lref, unsigned long href)
-{
- BUG_ON(!ccm);
-
- clk[IMX21_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
- clk[IMX21_CLK_CKIL] = imx_obtain_fixed_clock("ckil", lref);
- clk[IMX21_CLK_CKIH] = imx_obtain_fixed_clock("ckih", href);
- clk[IMX21_CLK_FPM] = imx_clk_fixed_factor("fpm", "ckil", 512, 1);
- clk[IMX21_CLK_CKIH_DIV1P5] = imx_clk_fixed_factor("ckih_div1p5", "ckih_gate", 2, 3);
-
- clk[IMX21_CLK_MPLL_GATE] = imx_clk_gate("mpll_gate", "mpll", CCM_CSCR, 0);
- clk[IMX21_CLK_SPLL_GATE] = imx_clk_gate("spll_gate", "spll", CCM_CSCR, 1);
- clk[IMX21_CLK_FPM_GATE] = imx_clk_gate("fpm_gate", "fpm", CCM_CSCR, 2);
- clk[IMX21_CLK_CKIH_GATE] = imx_clk_gate_dis("ckih_gate", "ckih", CCM_CSCR, 3);
- clk[IMX21_CLK_MPLL_OSC_SEL] = imx_clk_mux("mpll_osc_sel", CCM_CSCR, 4, 1, mpll_osc_sel_clks, ARRAY_SIZE(mpll_osc_sel_clks));
- clk[IMX21_CLK_IPG] = imx_clk_divider("ipg", "hclk", CCM_CSCR, 9, 1);
- clk[IMX21_CLK_HCLK] = imx_clk_divider("hclk", "fclk", CCM_CSCR, 10, 4);
- clk[IMX21_CLK_MPLL_SEL] = imx_clk_mux("mpll_sel", CCM_CSCR, 16, 1, mpll_sel_clks, ARRAY_SIZE(mpll_sel_clks));
- clk[IMX21_CLK_SPLL_SEL] = imx_clk_mux("spll_sel", CCM_CSCR, 17, 1, spll_sel_clks, ARRAY_SIZE(spll_sel_clks));
- clk[IMX21_CLK_SSI1_SEL] = imx_clk_mux("ssi1_sel", CCM_CSCR, 19, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
- clk[IMX21_CLK_SSI2_SEL] = imx_clk_mux("ssi2_sel", CCM_CSCR, 20, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
- clk[IMX21_CLK_USB_DIV] = imx_clk_divider("usb_div", "spll_gate", CCM_CSCR, 26, 3);
- clk[IMX21_CLK_FCLK] = imx_clk_divider("fclk", "mpll_gate", CCM_CSCR, 29, 3);
-
- clk[IMX21_CLK_MPLL] = imx_clk_pllv1(IMX_PLLV1_IMX21, "mpll", "mpll_sel", CCM_MPCTL0);
-
- clk[IMX21_CLK_SPLL] = imx_clk_pllv1(IMX_PLLV1_IMX21, "spll", "spll_sel", CCM_SPCTL0);
-
- clk[IMX21_CLK_NFC_DIV] = imx_clk_divider("nfc_div", "fclk", CCM_PCDR0, 12, 4);
- clk[IMX21_CLK_SSI1_DIV] = imx_clk_divider("ssi1_div", "ssi1_sel", CCM_PCDR0, 16, 6);
- clk[IMX21_CLK_SSI2_DIV] = imx_clk_divider("ssi2_div", "ssi2_sel", CCM_PCDR0, 26, 6);
-
- clk[IMX21_CLK_PER1] = imx_clk_divider("per1", "mpll_gate", CCM_PCDR1, 0, 6);
- clk[IMX21_CLK_PER2] = imx_clk_divider("per2", "mpll_gate", CCM_PCDR1, 8, 6);
- clk[IMX21_CLK_PER3] = imx_clk_divider("per3", "mpll_gate", CCM_PCDR1, 16, 6);
- clk[IMX21_CLK_PER4] = imx_clk_divider("per4", "mpll_gate", CCM_PCDR1, 24, 6);
-
- clk[IMX21_CLK_UART1_IPG_GATE] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR0, 0);
- clk[IMX21_CLK_UART2_IPG_GATE] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR0, 1);
- clk[IMX21_CLK_UART3_IPG_GATE] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR0, 2);
- clk[IMX21_CLK_UART4_IPG_GATE] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR0, 3);
- clk[IMX21_CLK_CSPI1_IPG_GATE] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 4);
- clk[IMX21_CLK_CSPI2_IPG_GATE] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 5);
- clk[IMX21_CLK_SSI1_GATE] = imx_clk_gate("ssi1_gate", "ipg", CCM_PCCR0, 6);
- clk[IMX21_CLK_SSI2_GATE] = imx_clk_gate("ssi2_gate", "ipg", CCM_PCCR0, 7);
- clk[IMX21_CLK_SDHC1_IPG_GATE] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 9);
- clk[IMX21_CLK_SDHC2_IPG_GATE] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 10);
- clk[IMX21_CLK_GPIO_GATE] = imx_clk_gate("gpio_gate", "ipg", CCM_PCCR0, 11);
- clk[IMX21_CLK_I2C_GATE] = imx_clk_gate("i2c_gate", "ipg", CCM_PCCR0, 12);
- clk[IMX21_CLK_DMA_GATE] = imx_clk_gate("dma_gate", "ipg", CCM_PCCR0, 13);
- clk[IMX21_CLK_USB_GATE] = imx_clk_gate("usb_gate", "usb_div", CCM_PCCR0, 14);
- clk[IMX21_CLK_EMMA_GATE] = imx_clk_gate("emma_gate", "ipg", CCM_PCCR0, 15);
- clk[IMX21_CLK_SSI2_BAUD_GATE] = imx_clk_gate("ssi2_baud_gate", "ipg", CCM_PCCR0, 16);
- clk[IMX21_CLK_SSI1_BAUD_GATE] = imx_clk_gate("ssi1_baud_gate", "ipg", CCM_PCCR0, 17);
- clk[IMX21_CLK_LCDC_IPG_GATE] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 18);
- clk[IMX21_CLK_NFC_GATE] = imx_clk_gate("nfc_gate", "nfc_div", CCM_PCCR0, 19);
- clk[IMX21_CLK_SLCDC_HCLK_GATE] = imx_clk_gate("slcdc_hclk_gate", "hclk", CCM_PCCR0, 21);
- clk[IMX21_CLK_PER4_GATE] = imx_clk_gate("per4_gate", "per4", CCM_PCCR0, 22);
- clk[IMX21_CLK_BMI_GATE] = imx_clk_gate("bmi_gate", "hclk", CCM_PCCR0, 23);
- clk[IMX21_CLK_USB_HCLK_GATE] = imx_clk_gate("usb_hclk_gate", "hclk", CCM_PCCR0, 24);
- clk[IMX21_CLK_SLCDC_GATE] = imx_clk_gate("slcdc_gate", "hclk", CCM_PCCR0, 25);
- clk[IMX21_CLK_LCDC_HCLK_GATE] = imx_clk_gate("lcdc_hclk_gate", "hclk", CCM_PCCR0, 26);
- clk[IMX21_CLK_EMMA_HCLK_GATE] = imx_clk_gate("emma_hclk_gate", "hclk", CCM_PCCR0, 27);
- clk[IMX21_CLK_BROM_GATE] = imx_clk_gate("brom_gate", "hclk", CCM_PCCR0, 28);
- clk[IMX21_CLK_DMA_HCLK_GATE] = imx_clk_gate("dma_hclk_gate", "hclk", CCM_PCCR0, 30);
- clk[IMX21_CLK_CSI_HCLK_GATE] = imx_clk_gate("csi_hclk_gate", "hclk", CCM_PCCR0, 31);
-
- clk[IMX21_CLK_CSPI3_IPG_GATE] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR1, 23);
- clk[IMX21_CLK_WDOG_GATE] = imx_clk_gate("wdog_gate", "ipg", CCM_PCCR1, 24);
- clk[IMX21_CLK_GPT1_IPG_GATE] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR1, 25);
- clk[IMX21_CLK_GPT2_IPG_GATE] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR1, 26);
- clk[IMX21_CLK_GPT3_IPG_GATE] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR1, 27);
- clk[IMX21_CLK_PWM_IPG_GATE] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR1, 28);
- clk[IMX21_CLK_RTC_GATE] = imx_clk_gate("rtc_gate", "ipg", CCM_PCCR1, 29);
- clk[IMX21_CLK_KPP_GATE] = imx_clk_gate("kpp_gate", "ipg", CCM_PCCR1, 30);
- clk[IMX21_CLK_OWIRE_GATE] = imx_clk_gate("owire_gate", "ipg", CCM_PCCR1, 31);
-
- imx_check_clocks(clk, ARRAY_SIZE(clk));
-}
-
-int __init mx21_clocks_init(unsigned long lref, unsigned long href)
-{
- ccm = ioremap(MX21_CCM_BASE_ADDR, SZ_2K);
-
- _mx21_clocks_init(lref, href);
-
- clk_register_clkdev(clk[IMX21_CLK_PER1], "per", "imx21-uart.0");
- clk_register_clkdev(clk[IMX21_CLK_UART1_IPG_GATE], "ipg", "imx21-uart.0");
- clk_register_clkdev(clk[IMX21_CLK_PER1], "per", "imx21-uart.1");
- clk_register_clkdev(clk[IMX21_CLK_UART2_IPG_GATE], "ipg", "imx21-uart.1");
- clk_register_clkdev(clk[IMX21_CLK_PER1], "per", "imx21-uart.2");
- clk_register_clkdev(clk[IMX21_CLK_UART3_IPG_GATE], "ipg", "imx21-uart.2");
- clk_register_clkdev(clk[IMX21_CLK_PER1], "per", "imx21-uart.3");
- clk_register_clkdev(clk[IMX21_CLK_UART4_IPG_GATE], "ipg", "imx21-uart.3");
- clk_register_clkdev(clk[IMX21_CLK_GPT1_IPG_GATE], "ipg", "imx-gpt.0");
- clk_register_clkdev(clk[IMX21_CLK_PER1], "per", "imx-gpt.0");
- clk_register_clkdev(clk[IMX21_CLK_PER2], "per", "imx21-cspi.0");
- clk_register_clkdev(clk[IMX21_CLK_CSPI1_IPG_GATE], "ipg", "imx21-cspi.0");
- clk_register_clkdev(clk[IMX21_CLK_PER2], "per", "imx21-cspi.1");
- clk_register_clkdev(clk[IMX21_CLK_CSPI2_IPG_GATE], "ipg", "imx21-cspi.1");
- clk_register_clkdev(clk[IMX21_CLK_PER2], "per", "imx21-cspi.2");
- clk_register_clkdev(clk[IMX21_CLK_CSPI3_IPG_GATE], "ipg", "imx21-cspi.2");
- clk_register_clkdev(clk[IMX21_CLK_PER3], "per", "imx21-fb.0");
- clk_register_clkdev(clk[IMX21_CLK_LCDC_IPG_GATE], "ipg", "imx21-fb.0");
- clk_register_clkdev(clk[IMX21_CLK_LCDC_HCLK_GATE], "ahb", "imx21-fb.0");
- clk_register_clkdev(clk[IMX21_CLK_USB_GATE], "per", "imx21-hcd.0");
- clk_register_clkdev(clk[IMX21_CLK_USB_HCLK_GATE], "ahb", "imx21-hcd.0");
- clk_register_clkdev(clk[IMX21_CLK_NFC_GATE], NULL, "imx21-nand.0");
- clk_register_clkdev(clk[IMX21_CLK_DMA_HCLK_GATE], "ahb", "imx21-dma");
- clk_register_clkdev(clk[IMX21_CLK_DMA_GATE], "ipg", "imx21-dma");
- clk_register_clkdev(clk[IMX21_CLK_WDOG_GATE], NULL, "imx2-wdt.0");
- clk_register_clkdev(clk[IMX21_CLK_I2C_GATE], NULL, "imx21-i2c.0");
- clk_register_clkdev(clk[IMX21_CLK_OWIRE_GATE], NULL, "mxc_w1.0");
-
- mxc_timer_init(MX21_GPT1_BASE_ADDR, MX21_INT_GPT1, GPT_TYPE_IMX21);
-
- return 0;
-}
-
-static void __init mx21_clocks_init_dt(struct device_node *np)
-{
- ccm = of_iomap(np, 0);
-
- _mx21_clocks_init(32768, 26000000);
-
- clk_data.clks = clk;
- clk_data.clk_num = ARRAY_SIZE(clk);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-}
-CLK_OF_DECLARE(imx27_ccm, "fsl,imx21-ccm", mx21_clocks_init_dt);
diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
index a3753067fc12..5585ded8b8c6 100644
--- a/drivers/clk/imx/clk-imx27.c
+++ b/drivers/clk/imx/clk-imx27.c
@@ -181,79 +181,6 @@ static void __init _mx27_clocks_init(unsigned long fref)
imx_print_silicon_rev("i.MX27", mx27_revision());
}
-int __init mx27_clocks_init(unsigned long fref)
-{
- ccm = ioremap(MX27_CCM_BASE_ADDR, SZ_4K);
-
- _mx27_clocks_init(fref);
-
- clk_register_clkdev(clk[IMX27_CLK_UART1_IPG_GATE], "ipg", "imx21-uart.0");
- clk_register_clkdev(clk[IMX27_CLK_PER1_GATE], "per", "imx21-uart.0");
- clk_register_clkdev(clk[IMX27_CLK_UART2_IPG_GATE], "ipg", "imx21-uart.1");
- clk_register_clkdev(clk[IMX27_CLK_PER1_GATE], "per", "imx21-uart.1");
- clk_register_clkdev(clk[IMX27_CLK_UART3_IPG_GATE], "ipg", "imx21-uart.2");
- clk_register_clkdev(clk[IMX27_CLK_PER1_GATE], "per", "imx21-uart.2");
- clk_register_clkdev(clk[IMX27_CLK_UART4_IPG_GATE], "ipg", "imx21-uart.3");
- clk_register_clkdev(clk[IMX27_CLK_PER1_GATE], "per", "imx21-uart.3");
- clk_register_clkdev(clk[IMX27_CLK_UART5_IPG_GATE], "ipg", "imx21-uart.4");
- clk_register_clkdev(clk[IMX27_CLK_PER1_GATE], "per", "imx21-uart.4");
- clk_register_clkdev(clk[IMX27_CLK_UART6_IPG_GATE], "ipg", "imx21-uart.5");
- clk_register_clkdev(clk[IMX27_CLK_PER1_GATE], "per", "imx21-uart.5");
- clk_register_clkdev(clk[IMX27_CLK_GPT1_IPG_GATE], "ipg", "imx-gpt.0");
- clk_register_clkdev(clk[IMX27_CLK_PER1_GATE], "per", "imx-gpt.0");
- clk_register_clkdev(clk[IMX27_CLK_PER2_GATE], "per", "imx21-mmc.0");
- clk_register_clkdev(clk[IMX27_CLK_SDHC1_IPG_GATE], "ipg", "imx21-mmc.0");
- clk_register_clkdev(clk[IMX27_CLK_PER2_GATE], "per", "imx21-mmc.1");
- clk_register_clkdev(clk[IMX27_CLK_SDHC2_IPG_GATE], "ipg", "imx21-mmc.1");
- clk_register_clkdev(clk[IMX27_CLK_PER2_GATE], "per", "imx21-mmc.2");
- clk_register_clkdev(clk[IMX27_CLK_SDHC2_IPG_GATE], "ipg", "imx21-mmc.2");
- clk_register_clkdev(clk[IMX27_CLK_PER2_GATE], "per", "imx27-cspi.0");
- clk_register_clkdev(clk[IMX27_CLK_CSPI1_IPG_GATE], "ipg", "imx27-cspi.0");
- clk_register_clkdev(clk[IMX27_CLK_PER2_GATE], "per", "imx27-cspi.1");
- clk_register_clkdev(clk[IMX27_CLK_CSPI2_IPG_GATE], "ipg", "imx27-cspi.1");
- clk_register_clkdev(clk[IMX27_CLK_PER2_GATE], "per", "imx27-cspi.2");
- clk_register_clkdev(clk[IMX27_CLK_CSPI3_IPG_GATE], "ipg", "imx27-cspi.2");
- clk_register_clkdev(clk[IMX27_CLK_PER3_GATE], "per", "imx21-fb.0");
- clk_register_clkdev(clk[IMX27_CLK_LCDC_IPG_GATE], "ipg", "imx21-fb.0");
- clk_register_clkdev(clk[IMX27_CLK_LCDC_AHB_GATE], "ahb", "imx21-fb.0");
- clk_register_clkdev(clk[IMX27_CLK_CSI_AHB_GATE], "ahb", "imx27-camera.0");
- clk_register_clkdev(clk[IMX27_CLK_PER4_GATE], "per", "imx27-camera.0");
- clk_register_clkdev(clk[IMX27_CLK_USB_DIV], "per", "imx-udc-mx27");
- clk_register_clkdev(clk[IMX27_CLK_USB_IPG_GATE], "ipg", "imx-udc-mx27");
- clk_register_clkdev(clk[IMX27_CLK_USB_AHB_GATE], "ahb", "imx-udc-mx27");
- clk_register_clkdev(clk[IMX27_CLK_USB_DIV], "per", "mxc-ehci.0");
- clk_register_clkdev(clk[IMX27_CLK_USB_IPG_GATE], "ipg", "mxc-ehci.0");
- clk_register_clkdev(clk[IMX27_CLK_USB_AHB_GATE], "ahb", "mxc-ehci.0");
- clk_register_clkdev(clk[IMX27_CLK_USB_DIV], "per", "mxc-ehci.1");
- clk_register_clkdev(clk[IMX27_CLK_USB_IPG_GATE], "ipg", "mxc-ehci.1");
- clk_register_clkdev(clk[IMX27_CLK_USB_AHB_GATE], "ahb", "mxc-ehci.1");
- clk_register_clkdev(clk[IMX27_CLK_USB_DIV], "per", "mxc-ehci.2");
- clk_register_clkdev(clk[IMX27_CLK_USB_IPG_GATE], "ipg", "mxc-ehci.2");
- clk_register_clkdev(clk[IMX27_CLK_USB_AHB_GATE], "ahb", "mxc-ehci.2");
- clk_register_clkdev(clk[IMX27_CLK_SSI1_IPG_GATE], NULL, "imx-ssi.0");
- clk_register_clkdev(clk[IMX27_CLK_SSI2_IPG_GATE], NULL, "imx-ssi.1");
- clk_register_clkdev(clk[IMX27_CLK_NFC_BAUD_GATE], NULL, "imx27-nand.0");
- clk_register_clkdev(clk[IMX27_CLK_VPU_BAUD_GATE], "per", "coda-imx27.0");
- clk_register_clkdev(clk[IMX27_CLK_VPU_AHB_GATE], "ahb", "coda-imx27.0");
- clk_register_clkdev(clk[IMX27_CLK_DMA_AHB_GATE], "ahb", "imx27-dma");
- clk_register_clkdev(clk[IMX27_CLK_DMA_IPG_GATE], "ipg", "imx27-dma");
- clk_register_clkdev(clk[IMX27_CLK_FEC_IPG_GATE], "ipg", "imx27-fec.0");
- clk_register_clkdev(clk[IMX27_CLK_FEC_AHB_GATE], "ahb", "imx27-fec.0");
- clk_register_clkdev(clk[IMX27_CLK_WDOG_IPG_GATE], NULL, "imx2-wdt.0");
- clk_register_clkdev(clk[IMX27_CLK_I2C1_IPG_GATE], NULL, "imx21-i2c.0");
- clk_register_clkdev(clk[IMX27_CLK_I2C2_IPG_GATE], NULL, "imx21-i2c.1");
- clk_register_clkdev(clk[IMX27_CLK_OWIRE_IPG_GATE], NULL, "mxc_w1.0");
- clk_register_clkdev(clk[IMX27_CLK_KPP_IPG_GATE], NULL, "imx-keypad");
- clk_register_clkdev(clk[IMX27_CLK_EMMA_AHB_GATE], "emma-ahb", "imx27-camera.0");
- clk_register_clkdev(clk[IMX27_CLK_EMMA_IPG_GATE], "emma-ipg", "imx27-camera.0");
- clk_register_clkdev(clk[IMX27_CLK_EMMA_AHB_GATE], "ahb", "m2m-emmaprp.0");
- clk_register_clkdev(clk[IMX27_CLK_EMMA_IPG_GATE], "ipg", "m2m-emmaprp.0");
-
- mxc_timer_init(MX27_GPT1_BASE_ADDR, MX27_INT_GPT1, GPT_TYPE_IMX21);
-
- return 0;
-}
-
static void __init mx27_clocks_init_dt(struct device_node *np)
{
struct device_node *refnp;
diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c
index 4bb05e440cdd..7b13fb57d842 100644
--- a/drivers/clk/imx/clk-imx31.c
+++ b/drivers/clk/imx/clk-imx31.c
@@ -132,77 +132,6 @@ static void __init _mx31_clocks_init(void __iomem *base, unsigned long fref)
clk_disable_unprepare(clk[iim_gate]);
}
-int __init mx31_clocks_init(unsigned long fref)
-{
- void __iomem *base;
-
- base = ioremap(MX31_CCM_BASE_ADDR, SZ_4K);
- if (!base)
- panic("%s: failed to map registers\n", __func__);
-
- _mx31_clocks_init(base, fref);
-
- clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
- clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
- clk_register_clkdev(clk[cspi1_gate], NULL, "imx31-cspi.0");
- clk_register_clkdev(clk[cspi2_gate], NULL, "imx31-cspi.1");
- clk_register_clkdev(clk[cspi3_gate], NULL, "imx31-cspi.2");
- clk_register_clkdev(clk[pwm_gate], "pwm", NULL);
- clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
- clk_register_clkdev(clk[ckil], "ref", "imx21-rtc");
- clk_register_clkdev(clk[rtc_gate], "ipg", "imx21-rtc");
- clk_register_clkdev(clk[epit1_gate], "epit", NULL);
- clk_register_clkdev(clk[epit2_gate], "epit", NULL);
- clk_register_clkdev(clk[nfc], NULL, "imx27-nand.0");
- clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
- clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
- clk_register_clkdev(clk[kpp_gate], NULL, "imx-keypad");
- clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0");
- clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0");
- clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
- clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.1");
- clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.1");
- clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
- clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2");
- clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2");
- clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
- clk_register_clkdev(clk[usb_div_post], "per", "imx-udc-mx27");
- clk_register_clkdev(clk[usb_gate], "ahb", "imx-udc-mx27");
- clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
- clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
- /* i.mx31 has the i.mx21 type uart */
- clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
- clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
- clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
- clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
- clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
- clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
- clk_register_clkdev(clk[uart4_gate], "per", "imx21-uart.3");
- clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.3");
- clk_register_clkdev(clk[uart5_gate], "per", "imx21-uart.4");
- clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.4");
- clk_register_clkdev(clk[i2c1_gate], NULL, "imx21-i2c.0");
- clk_register_clkdev(clk[i2c2_gate], NULL, "imx21-i2c.1");
- clk_register_clkdev(clk[i2c3_gate], NULL, "imx21-i2c.2");
- clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0");
- clk_register_clkdev(clk[sdhc1_gate], NULL, "imx31-mmc.0");
- clk_register_clkdev(clk[sdhc2_gate], NULL, "imx31-mmc.1");
- clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
- clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
- clk_register_clkdev(clk[firi_gate], "firi", NULL);
- clk_register_clkdev(clk[ata_gate], NULL, "pata_imx");
- clk_register_clkdev(clk[rtic_gate], "rtic", NULL);
- clk_register_clkdev(clk[rng_gate], NULL, "mxc_rnga");
- clk_register_clkdev(clk[sdma_gate], NULL, "imx31-sdma");
- clk_register_clkdev(clk[iim_gate], "iim", NULL);
-
-
- imx_register_uart_clocks(uart_clks);
- mxc_timer_init(MX31_GPT1_BASE_ADDR, MX31_INT_GPT, GPT_TYPE_IMX31);
-
- return 0;
-}
-
static void __init mx31_clocks_init_dt(struct device_node *np)
{
struct device_node *osc_np;
diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
index e595f559907f..c1df03665c09 100644
--- a/drivers/clk/imx/clk-imx35.c
+++ b/drivers/clk/imx/clk-imx35.c
@@ -248,74 +248,6 @@ static void __init _mx35_clocks_init(void)
imx_print_silicon_rev("i.MX35", mx35_revision());
}
-int __init mx35_clocks_init(void)
-{
- _mx35_clocks_init();
-
- clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
- clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
- clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
- clk_register_clkdev(clk[cspi1_gate], "per", "imx35-cspi.0");
- clk_register_clkdev(clk[cspi1_gate], "ipg", "imx35-cspi.0");
- clk_register_clkdev(clk[cspi2_gate], "per", "imx35-cspi.1");
- clk_register_clkdev(clk[cspi2_gate], "ipg", "imx35-cspi.1");
- clk_register_clkdev(clk[epit1_gate], NULL, "imx-epit.0");
- clk_register_clkdev(clk[epit2_gate], NULL, "imx-epit.1");
- clk_register_clkdev(clk[esdhc1_gate], "per", "sdhci-esdhc-imx35.0");
- clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.0");
- clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.0");
- clk_register_clkdev(clk[esdhc2_gate], "per", "sdhci-esdhc-imx35.1");
- clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.1");
- clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.1");
- clk_register_clkdev(clk[esdhc3_gate], "per", "sdhci-esdhc-imx35.2");
- clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.2");
- clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.2");
- /* i.mx35 has the i.mx27 type fec */
- clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
- clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
- clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
- clk_register_clkdev(clk[i2c1_gate], NULL, "imx21-i2c.0");
- clk_register_clkdev(clk[i2c2_gate], NULL, "imx21-i2c.1");
- clk_register_clkdev(clk[i2c3_gate], NULL, "imx21-i2c.2");
- clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
- clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
- clk_register_clkdev(clk[kpp_gate], NULL, "imx-keypad");
- clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
- clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
- clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
- clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
- /* i.mx35 has the i.mx21 type uart */
- clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
- clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
- clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
- clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
- clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
- clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
- /* i.mx35 has the i.mx21 type rtc */
- clk_register_clkdev(clk[ckil], "ref", "imx21-rtc");
- clk_register_clkdev(clk[rtc_gate], "ipg", "imx21-rtc");
- clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
- clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
- clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.0");
- clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
- clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
- clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.1");
- clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
- clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
- clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2");
- clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");
- clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
- clk_register_clkdev(clk[usbotg_gate], "ahb", "imx-udc-mx27");
- clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
- clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
- clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
- clk_register_clkdev(clk[admux_gate], "audmux", NULL);
-
- mxc_timer_init(MX35_GPT1_BASE_ADDR, MX35_INT_GPT, GPT_TYPE_IMX31);
-
- return 0;
-}
-
static void __init mx35_clocks_init_dt(struct device_node *ccm_node)
{
_mx35_clocks_init();
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index ba33c79158de..b2ff187cedab 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 0f647d148abf..2f9361946a0e 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -3,6 +3,7 @@
* Copyright 2013-2014 Freescale Semiconductor, Inc.
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/err.h>
@@ -14,19 +15,19 @@
#include "clk.h"
#define CCSR 0xc
-#define BM_CCSR_PLL1_SW_CLK_SEL (1 << 2)
+#define BM_CCSR_PLL1_SW_CLK_SEL BIT(2)
#define CACRR 0x10
#define CDHIPR 0x48
-#define BM_CDHIPR_ARM_PODF_BUSY (1 << 16)
+#define BM_CDHIPR_ARM_PODF_BUSY BIT(16)
#define ARM_WAIT_DIV_396M 2
#define ARM_WAIT_DIV_792M 4
#define ARM_WAIT_DIV_996M 6
#define PLL_ARM 0x0
-#define BM_PLL_ARM_DIV_SELECT (0x7f << 0)
-#define BM_PLL_ARM_POWERDOWN (1 << 12)
-#define BM_PLL_ARM_ENABLE (1 << 13)
-#define BM_PLL_ARM_LOCK (1 << 31)
+#define BM_PLL_ARM_DIV_SELECT 0x7f
+#define BM_PLL_ARM_POWERDOWN BIT(12)
+#define BM_PLL_ARM_ENABLE BIT(13)
+#define BM_PLL_ARM_LOCK BIT(31)
#define PLL_ARM_DIV_792M 66
static const char *step_sels[] = { "osc", "pll2_pfd2", };
@@ -145,7 +146,7 @@ static void imx6sl_enable_pll_arm(bool enable)
val |= BM_PLL_ARM_ENABLE;
val &= ~BM_PLL_ARM_POWERDOWN;
writel_relaxed(val, anatop_base + PLL_ARM);
- while (!(__raw_readl(anatop_base + PLL_ARM) & BM_PLL_ARM_LOCK))
+ while (!(readl_relaxed(anatop_base + PLL_ARM) & BM_PLL_ARM_LOCK))
;
} else {
writel_relaxed(saved_pll_arm, anatop_base + PLL_ARM);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index 89ba71271e5c..20dcce526d07 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -4,6 +4,7 @@
*/
#include <dt-bindings/clock/imx6sx-clock.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index b2057bd42e25..22d24a6a05e7 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -4,6 +4,7 @@
*/
#include <dt-bindings/clock/imx7d-clock.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
@@ -505,72 +506,73 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
hws[IMX7D_ARM_M4_ROOT_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, arm_m4_sel, ARRAY_SIZE(arm_m4_sel));
hws[IMX7D_MAIN_AXI_ROOT_SRC] = imx_clk_hw_mux2("axi_src", base + 0x8800, 24, 3, axi_sel, ARRAY_SIZE(axi_sel));
hws[IMX7D_DISP_AXI_ROOT_SRC] = imx_clk_hw_mux2("disp_axi_src", base + 0x8880, 24, 3, disp_axi_sel, ARRAY_SIZE(disp_axi_sel));
- hws[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_hw_mux2("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel));
- hws[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_hw_mux2("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel));
hws[IMX7D_AHB_CHANNEL_ROOT_SRC] = imx_clk_hw_mux2("ahb_src", base + 0x9000, 24, 3, ahb_channel_sel, ARRAY_SIZE(ahb_channel_sel));
- hws[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_hw_mux2("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel));
- hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel));
- hws[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_hw_mux2("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel));
- hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel));
- hws[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_hw_mux2("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel));
- hws[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_hw_mux2("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel));
- hws[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_hw_mux2("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel));
- hws[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_hw_mux2("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel));
- hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel));
- hws[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_hw_mux2("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel));
- hws[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_hw_mux2("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel));
- hws[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_hw_mux2("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel));
- hws[IMX7D_SAI1_ROOT_SRC] = imx_clk_hw_mux2("sai1_src", base + 0xa500, 24, 3, sai1_sel, ARRAY_SIZE(sai1_sel));
- hws[IMX7D_SAI2_ROOT_SRC] = imx_clk_hw_mux2("sai2_src", base + 0xa580, 24, 3, sai2_sel, ARRAY_SIZE(sai2_sel));
- hws[IMX7D_SAI3_ROOT_SRC] = imx_clk_hw_mux2("sai3_src", base + 0xa600, 24, 3, sai3_sel, ARRAY_SIZE(sai3_sel));
- hws[IMX7D_SPDIF_ROOT_SRC] = imx_clk_hw_mux2("spdif_src", base + 0xa680, 24, 3, spdif_sel, ARRAY_SIZE(spdif_sel));
- hws[IMX7D_ENET1_REF_ROOT_SRC] = imx_clk_hw_mux2("enet1_ref_src", base + 0xa700, 24, 3, enet1_ref_sel, ARRAY_SIZE(enet1_ref_sel));
- hws[IMX7D_ENET1_TIME_ROOT_SRC] = imx_clk_hw_mux2("enet1_time_src", base + 0xa780, 24, 3, enet1_time_sel, ARRAY_SIZE(enet1_time_sel));
- hws[IMX7D_ENET2_REF_ROOT_SRC] = imx_clk_hw_mux2("enet2_ref_src", base + 0xa800, 24, 3, enet2_ref_sel, ARRAY_SIZE(enet2_ref_sel));
- hws[IMX7D_ENET2_TIME_ROOT_SRC] = imx_clk_hw_mux2("enet2_time_src", base + 0xa880, 24, 3, enet2_time_sel, ARRAY_SIZE(enet2_time_sel));
- hws[IMX7D_ENET_PHY_REF_ROOT_SRC] = imx_clk_hw_mux2("enet_phy_ref_src", base + 0xa900, 24, 3, enet_phy_ref_sel, ARRAY_SIZE(enet_phy_ref_sel));
- hws[IMX7D_EIM_ROOT_SRC] = imx_clk_hw_mux2("eim_src", base + 0xa980, 24, 3, eim_sel, ARRAY_SIZE(eim_sel));
- hws[IMX7D_NAND_ROOT_SRC] = imx_clk_hw_mux2("nand_src", base + 0xaa00, 24, 3, nand_sel, ARRAY_SIZE(nand_sel));
- hws[IMX7D_QSPI_ROOT_SRC] = imx_clk_hw_mux2("qspi_src", base + 0xaa80, 24, 3, qspi_sel, ARRAY_SIZE(qspi_sel));
- hws[IMX7D_USDHC1_ROOT_SRC] = imx_clk_hw_mux2("usdhc1_src", base + 0xab00, 24, 3, usdhc1_sel, ARRAY_SIZE(usdhc1_sel));
- hws[IMX7D_USDHC2_ROOT_SRC] = imx_clk_hw_mux2("usdhc2_src", base + 0xab80, 24, 3, usdhc2_sel, ARRAY_SIZE(usdhc2_sel));
- hws[IMX7D_USDHC3_ROOT_SRC] = imx_clk_hw_mux2("usdhc3_src", base + 0xac00, 24, 3, usdhc3_sel, ARRAY_SIZE(usdhc3_sel));
- hws[IMX7D_CAN1_ROOT_SRC] = imx_clk_hw_mux2("can1_src", base + 0xac80, 24, 3, can1_sel, ARRAY_SIZE(can1_sel));
- hws[IMX7D_CAN2_ROOT_SRC] = imx_clk_hw_mux2("can2_src", base + 0xad00, 24, 3, can2_sel, ARRAY_SIZE(can2_sel));
- hws[IMX7D_I2C1_ROOT_SRC] = imx_clk_hw_mux2("i2c1_src", base + 0xad80, 24, 3, i2c1_sel, ARRAY_SIZE(i2c1_sel));
- hws[IMX7D_I2C2_ROOT_SRC] = imx_clk_hw_mux2("i2c2_src", base + 0xae00, 24, 3, i2c2_sel, ARRAY_SIZE(i2c2_sel));
- hws[IMX7D_I2C3_ROOT_SRC] = imx_clk_hw_mux2("i2c3_src", base + 0xae80, 24, 3, i2c3_sel, ARRAY_SIZE(i2c3_sel));
- hws[IMX7D_I2C4_ROOT_SRC] = imx_clk_hw_mux2("i2c4_src", base + 0xaf00, 24, 3, i2c4_sel, ARRAY_SIZE(i2c4_sel));
- hws[IMX7D_UART1_ROOT_SRC] = imx_clk_hw_mux2("uart1_src", base + 0xaf80, 24, 3, uart1_sel, ARRAY_SIZE(uart1_sel));
- hws[IMX7D_UART2_ROOT_SRC] = imx_clk_hw_mux2("uart2_src", base + 0xb000, 24, 3, uart2_sel, ARRAY_SIZE(uart2_sel));
- hws[IMX7D_UART3_ROOT_SRC] = imx_clk_hw_mux2("uart3_src", base + 0xb080, 24, 3, uart3_sel, ARRAY_SIZE(uart3_sel));
- hws[IMX7D_UART4_ROOT_SRC] = imx_clk_hw_mux2("uart4_src", base + 0xb100, 24, 3, uart4_sel, ARRAY_SIZE(uart4_sel));
- hws[IMX7D_UART5_ROOT_SRC] = imx_clk_hw_mux2("uart5_src", base + 0xb180, 24, 3, uart5_sel, ARRAY_SIZE(uart5_sel));
- hws[IMX7D_UART6_ROOT_SRC] = imx_clk_hw_mux2("uart6_src", base + 0xb200, 24, 3, uart6_sel, ARRAY_SIZE(uart6_sel));
- hws[IMX7D_UART7_ROOT_SRC] = imx_clk_hw_mux2("uart7_src", base + 0xb280, 24, 3, uart7_sel, ARRAY_SIZE(uart7_sel));
- hws[IMX7D_ECSPI1_ROOT_SRC] = imx_clk_hw_mux2("ecspi1_src", base + 0xb300, 24, 3, ecspi1_sel, ARRAY_SIZE(ecspi1_sel));
- hws[IMX7D_ECSPI2_ROOT_SRC] = imx_clk_hw_mux2("ecspi2_src", base + 0xb380, 24, 3, ecspi2_sel, ARRAY_SIZE(ecspi2_sel));
- hws[IMX7D_ECSPI3_ROOT_SRC] = imx_clk_hw_mux2("ecspi3_src", base + 0xb400, 24, 3, ecspi3_sel, ARRAY_SIZE(ecspi3_sel));
- hws[IMX7D_ECSPI4_ROOT_SRC] = imx_clk_hw_mux2("ecspi4_src", base + 0xb480, 24, 3, ecspi4_sel, ARRAY_SIZE(ecspi4_sel));
- hws[IMX7D_PWM1_ROOT_SRC] = imx_clk_hw_mux2("pwm1_src", base + 0xb500, 24, 3, pwm1_sel, ARRAY_SIZE(pwm1_sel));
- hws[IMX7D_PWM2_ROOT_SRC] = imx_clk_hw_mux2("pwm2_src", base + 0xb580, 24, 3, pwm2_sel, ARRAY_SIZE(pwm2_sel));
- hws[IMX7D_PWM3_ROOT_SRC] = imx_clk_hw_mux2("pwm3_src", base + 0xb600, 24, 3, pwm3_sel, ARRAY_SIZE(pwm3_sel));
- hws[IMX7D_PWM4_ROOT_SRC] = imx_clk_hw_mux2("pwm4_src", base + 0xb680, 24, 3, pwm4_sel, ARRAY_SIZE(pwm4_sel));
- hws[IMX7D_FLEXTIMER1_ROOT_SRC] = imx_clk_hw_mux2("flextimer1_src", base + 0xb700, 24, 3, flextimer1_sel, ARRAY_SIZE(flextimer1_sel));
- hws[IMX7D_FLEXTIMER2_ROOT_SRC] = imx_clk_hw_mux2("flextimer2_src", base + 0xb780, 24, 3, flextimer2_sel, ARRAY_SIZE(flextimer2_sel));
- hws[IMX7D_SIM1_ROOT_SRC] = imx_clk_hw_mux2("sim1_src", base + 0xb800, 24, 3, sim1_sel, ARRAY_SIZE(sim1_sel));
- hws[IMX7D_SIM2_ROOT_SRC] = imx_clk_hw_mux2("sim2_src", base + 0xb880, 24, 3, sim2_sel, ARRAY_SIZE(sim2_sel));
- hws[IMX7D_GPT1_ROOT_SRC] = imx_clk_hw_mux2("gpt1_src", base + 0xb900, 24, 3, gpt1_sel, ARRAY_SIZE(gpt1_sel));
- hws[IMX7D_GPT2_ROOT_SRC] = imx_clk_hw_mux2("gpt2_src", base + 0xb980, 24, 3, gpt2_sel, ARRAY_SIZE(gpt2_sel));
- hws[IMX7D_GPT3_ROOT_SRC] = imx_clk_hw_mux2("gpt3_src", base + 0xba00, 24, 3, gpt3_sel, ARRAY_SIZE(gpt3_sel));
- hws[IMX7D_GPT4_ROOT_SRC] = imx_clk_hw_mux2("gpt4_src", base + 0xba80, 24, 3, gpt4_sel, ARRAY_SIZE(gpt4_sel));
- hws[IMX7D_TRACE_ROOT_SRC] = imx_clk_hw_mux2("trace_src", base + 0xbb00, 24, 3, trace_sel, ARRAY_SIZE(trace_sel));
- hws[IMX7D_WDOG_ROOT_SRC] = imx_clk_hw_mux2("wdog_src", base + 0xbb80, 24, 3, wdog_sel, ARRAY_SIZE(wdog_sel));
- hws[IMX7D_CSI_MCLK_ROOT_SRC] = imx_clk_hw_mux2("csi_mclk_src", base + 0xbc00, 24, 3, csi_mclk_sel, ARRAY_SIZE(csi_mclk_sel));
- hws[IMX7D_AUDIO_MCLK_ROOT_SRC] = imx_clk_hw_mux2("audio_mclk_src", base + 0xbc80, 24, 3, audio_mclk_sel, ARRAY_SIZE(audio_mclk_sel));
- hws[IMX7D_WRCLK_ROOT_SRC] = imx_clk_hw_mux2("wrclk_src", base + 0xbd00, 24, 3, wrclk_sel, ARRAY_SIZE(wrclk_sel));
- hws[IMX7D_CLKO1_ROOT_SRC] = imx_clk_hw_mux2("clko1_src", base + 0xbd80, 24, 3, clko1_sel, ARRAY_SIZE(clko1_sel));
- hws[IMX7D_CLKO2_ROOT_SRC] = imx_clk_hw_mux2("clko2_src", base + 0xbe00, 24, 3, clko2_sel, ARRAY_SIZE(clko2_sel));
+
+ hws[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_hw_mux2_flags("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_hw_mux2_flags("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2_flags("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_hw_mux2_flags("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_hw_mux2_flags("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2_flags("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_hw_mux2_flags("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_SAI1_ROOT_SRC] = imx_clk_hw_mux2_flags("sai1_src", base + 0xa500, 24, 3, sai1_sel, ARRAY_SIZE(sai1_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_SAI2_ROOT_SRC] = imx_clk_hw_mux2_flags("sai2_src", base + 0xa580, 24, 3, sai2_sel, ARRAY_SIZE(sai2_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_SAI3_ROOT_SRC] = imx_clk_hw_mux2_flags("sai3_src", base + 0xa600, 24, 3, sai3_sel, ARRAY_SIZE(sai3_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_SPDIF_ROOT_SRC] = imx_clk_hw_mux2_flags("spdif_src", base + 0xa680, 24, 3, spdif_sel, ARRAY_SIZE(spdif_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_ENET1_REF_ROOT_SRC] = imx_clk_hw_mux2_flags("enet1_ref_src", base + 0xa700, 24, 3, enet1_ref_sel, ARRAY_SIZE(enet1_ref_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_ENET1_TIME_ROOT_SRC] = imx_clk_hw_mux2_flags("enet1_time_src", base + 0xa780, 24, 3, enet1_time_sel, ARRAY_SIZE(enet1_time_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_ENET2_REF_ROOT_SRC] = imx_clk_hw_mux2_flags("enet2_ref_src", base + 0xa800, 24, 3, enet2_ref_sel, ARRAY_SIZE(enet2_ref_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_ENET2_TIME_ROOT_SRC] = imx_clk_hw_mux2_flags("enet2_time_src", base + 0xa880, 24, 3, enet2_time_sel, ARRAY_SIZE(enet2_time_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_ENET_PHY_REF_ROOT_SRC] = imx_clk_hw_mux2_flags("enet_phy_ref_src", base + 0xa900, 24, 3, enet_phy_ref_sel, ARRAY_SIZE(enet_phy_ref_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_EIM_ROOT_SRC] = imx_clk_hw_mux2_flags("eim_src", base + 0xa980, 24, 3, eim_sel, ARRAY_SIZE(eim_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_NAND_ROOT_SRC] = imx_clk_hw_mux2_flags("nand_src", base + 0xaa00, 24, 3, nand_sel, ARRAY_SIZE(nand_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_QSPI_ROOT_SRC] = imx_clk_hw_mux2_flags("qspi_src", base + 0xaa80, 24, 3, qspi_sel, ARRAY_SIZE(qspi_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_USDHC1_ROOT_SRC] = imx_clk_hw_mux2_flags("usdhc1_src", base + 0xab00, 24, 3, usdhc1_sel, ARRAY_SIZE(usdhc1_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_USDHC2_ROOT_SRC] = imx_clk_hw_mux2_flags("usdhc2_src", base + 0xab80, 24, 3, usdhc2_sel, ARRAY_SIZE(usdhc2_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_USDHC3_ROOT_SRC] = imx_clk_hw_mux2_flags("usdhc3_src", base + 0xac00, 24, 3, usdhc3_sel, ARRAY_SIZE(usdhc3_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_CAN1_ROOT_SRC] = imx_clk_hw_mux2_flags("can1_src", base + 0xac80, 24, 3, can1_sel, ARRAY_SIZE(can1_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_CAN2_ROOT_SRC] = imx_clk_hw_mux2_flags("can2_src", base + 0xad00, 24, 3, can2_sel, ARRAY_SIZE(can2_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_I2C1_ROOT_SRC] = imx_clk_hw_mux2_flags("i2c1_src", base + 0xad80, 24, 3, i2c1_sel, ARRAY_SIZE(i2c1_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_I2C2_ROOT_SRC] = imx_clk_hw_mux2_flags("i2c2_src", base + 0xae00, 24, 3, i2c2_sel, ARRAY_SIZE(i2c2_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_I2C3_ROOT_SRC] = imx_clk_hw_mux2_flags("i2c3_src", base + 0xae80, 24, 3, i2c3_sel, ARRAY_SIZE(i2c3_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_I2C4_ROOT_SRC] = imx_clk_hw_mux2_flags("i2c4_src", base + 0xaf00, 24, 3, i2c4_sel, ARRAY_SIZE(i2c4_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_UART1_ROOT_SRC] = imx_clk_hw_mux2_flags("uart1_src", base + 0xaf80, 24, 3, uart1_sel, ARRAY_SIZE(uart1_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_UART2_ROOT_SRC] = imx_clk_hw_mux2_flags("uart2_src", base + 0xb000, 24, 3, uart2_sel, ARRAY_SIZE(uart2_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_UART3_ROOT_SRC] = imx_clk_hw_mux2_flags("uart3_src", base + 0xb080, 24, 3, uart3_sel, ARRAY_SIZE(uart3_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_UART4_ROOT_SRC] = imx_clk_hw_mux2_flags("uart4_src", base + 0xb100, 24, 3, uart4_sel, ARRAY_SIZE(uart4_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_UART5_ROOT_SRC] = imx_clk_hw_mux2_flags("uart5_src", base + 0xb180, 24, 3, uart5_sel, ARRAY_SIZE(uart5_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_UART6_ROOT_SRC] = imx_clk_hw_mux2_flags("uart6_src", base + 0xb200, 24, 3, uart6_sel, ARRAY_SIZE(uart6_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_UART7_ROOT_SRC] = imx_clk_hw_mux2_flags("uart7_src", base + 0xb280, 24, 3, uart7_sel, ARRAY_SIZE(uart7_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_ECSPI1_ROOT_SRC] = imx_clk_hw_mux2_flags("ecspi1_src", base + 0xb300, 24, 3, ecspi1_sel, ARRAY_SIZE(ecspi1_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_ECSPI2_ROOT_SRC] = imx_clk_hw_mux2_flags("ecspi2_src", base + 0xb380, 24, 3, ecspi2_sel, ARRAY_SIZE(ecspi2_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_ECSPI3_ROOT_SRC] = imx_clk_hw_mux2_flags("ecspi3_src", base + 0xb400, 24, 3, ecspi3_sel, ARRAY_SIZE(ecspi3_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_ECSPI4_ROOT_SRC] = imx_clk_hw_mux2_flags("ecspi4_src", base + 0xb480, 24, 3, ecspi4_sel, ARRAY_SIZE(ecspi4_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_PWM1_ROOT_SRC] = imx_clk_hw_mux2_flags("pwm1_src", base + 0xb500, 24, 3, pwm1_sel, ARRAY_SIZE(pwm1_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_PWM2_ROOT_SRC] = imx_clk_hw_mux2_flags("pwm2_src", base + 0xb580, 24, 3, pwm2_sel, ARRAY_SIZE(pwm2_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_PWM3_ROOT_SRC] = imx_clk_hw_mux2_flags("pwm3_src", base + 0xb600, 24, 3, pwm3_sel, ARRAY_SIZE(pwm3_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_PWM4_ROOT_SRC] = imx_clk_hw_mux2_flags("pwm4_src", base + 0xb680, 24, 3, pwm4_sel, ARRAY_SIZE(pwm4_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_FLEXTIMER1_ROOT_SRC] = imx_clk_hw_mux2_flags("flextimer1_src", base + 0xb700, 24, 3, flextimer1_sel, ARRAY_SIZE(flextimer1_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_FLEXTIMER2_ROOT_SRC] = imx_clk_hw_mux2_flags("flextimer2_src", base + 0xb780, 24, 3, flextimer2_sel, ARRAY_SIZE(flextimer2_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_SIM1_ROOT_SRC] = imx_clk_hw_mux2_flags("sim1_src", base + 0xb800, 24, 3, sim1_sel, ARRAY_SIZE(sim1_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_SIM2_ROOT_SRC] = imx_clk_hw_mux2_flags("sim2_src", base + 0xb880, 24, 3, sim2_sel, ARRAY_SIZE(sim2_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_GPT1_ROOT_SRC] = imx_clk_hw_mux2_flags("gpt1_src", base + 0xb900, 24, 3, gpt1_sel, ARRAY_SIZE(gpt1_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_GPT2_ROOT_SRC] = imx_clk_hw_mux2_flags("gpt2_src", base + 0xb980, 24, 3, gpt2_sel, ARRAY_SIZE(gpt2_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_GPT3_ROOT_SRC] = imx_clk_hw_mux2_flags("gpt3_src", base + 0xba00, 24, 3, gpt3_sel, ARRAY_SIZE(gpt3_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_GPT4_ROOT_SRC] = imx_clk_hw_mux2_flags("gpt4_src", base + 0xba80, 24, 3, gpt4_sel, ARRAY_SIZE(gpt4_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_TRACE_ROOT_SRC] = imx_clk_hw_mux2_flags("trace_src", base + 0xbb00, 24, 3, trace_sel, ARRAY_SIZE(trace_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_WDOG_ROOT_SRC] = imx_clk_hw_mux2_flags("wdog_src", base + 0xbb80, 24, 3, wdog_sel, ARRAY_SIZE(wdog_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_CSI_MCLK_ROOT_SRC] = imx_clk_hw_mux2_flags("csi_mclk_src", base + 0xbc00, 24, 3, csi_mclk_sel, ARRAY_SIZE(csi_mclk_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_AUDIO_MCLK_ROOT_SRC] = imx_clk_hw_mux2_flags("audio_mclk_src", base + 0xbc80, 24, 3, audio_mclk_sel, ARRAY_SIZE(audio_mclk_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_WRCLK_ROOT_SRC] = imx_clk_hw_mux2_flags("wrclk_src", base + 0xbd00, 24, 3, wrclk_sel, ARRAY_SIZE(wrclk_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_CLKO1_ROOT_SRC] = imx_clk_hw_mux2_flags("clko1_src", base + 0xbd80, 24, 3, clko1_sel, ARRAY_SIZE(clko1_sel), CLK_SET_PARENT_GATE);
+ hws[IMX7D_CLKO2_ROOT_SRC] = imx_clk_hw_mux2_flags("clko2_src", base + 0xbe00, 24, 3, clko2_sel, ARRAY_SIZE(clko2_sel), CLK_SET_PARENT_GATE);
hws[IMX7D_ARM_A7_ROOT_CG] = imx_clk_hw_gate3("arm_a7_cg", "arm_a7_src", base + 0x8000, 28);
hws[IMX7D_ARM_M4_ROOT_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index b793264c21c6..f358ad907299 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -443,9 +443,9 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mm_a53_core_sels, ARRAY_SIZE(imx8mm_a53_core_sels));
/* BUS */
- hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
+ hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
- hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
+ hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
@@ -453,11 +453,11 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
hws[IMX8MM_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
hws[IMX8MM_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
- hws[IMX8MM_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
- hws[IMX8MM_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
+ hws[IMX8MM_CLK_NOC] = imx8m_clk_hw_composite_bus_critical("noc", imx8mm_noc_sels, base + 0x8d00);
+ hws[IMX8MM_CLK_NOC_APB] = imx8m_clk_hw_composite_bus_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
/* AHB */
- hws[IMX8MM_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
+ hws[IMX8MM_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
hws[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
/* IPG */
@@ -657,3 +657,7 @@ static struct platform_driver imx8mm_clk_driver = {
},
};
module_platform_driver(imx8mm_clk_driver);
+
+MODULE_AUTHOR("Bai Ping <ping.bai@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8MM clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 213cc37b3173..f3c5e6cf55dd 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -431,7 +431,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mn_a53_core_sels, ARRAY_SIZE(imx8mn_a53_core_sels));
/* BUS */
- hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
+ hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
hws[IMX8MN_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
hws[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
hws[IMX8MN_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
@@ -439,9 +439,9 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
hws[IMX8MN_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
hws[IMX8MN_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
- hws[IMX8MN_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00);
+ hws[IMX8MN_CLK_NOC] = imx8m_clk_hw_composite_bus_critical("noc", imx8mn_noc_sels, base + 0x8d00);
- hws[IMX8MN_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
+ hws[IMX8MN_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
hws[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
hws[IMX8MN_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
hws[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
@@ -608,3 +608,7 @@ static struct platform_driver imx8mn_clk_driver = {
},
};
module_platform_driver(imx8mn_clk_driver);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8MN clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index ca747712400f..48e212477f52 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -152,10 +152,6 @@ static const char * const imx8mp_can2_sels[] = {"osc_24m", "sys_pll2_200m", "sys
"sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out",
"sys_pll2_250m", "audio_pll2_out", };
-static const char * const imx8mp_memrepair_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m",
- "sys_pll3_out", "audio_pll1_out", "video_pll1_out",
- "audio_pll2_out", "sys_pll1_133m", };
-
static const char * const imx8mp_pcie_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m",
"clk_ext1", "clk_ext2", "clk_ext3",
"clk_ext4", "sys_pll1_400m", };
@@ -375,15 +371,14 @@ static const char * const imx8mp_media_cam2_pix_sels[] = {"osc_24m", "sys_pll1_2
"sys_pll3_out", "audio_pll2_out",
"video_pll1_out", };
-static const char * const imx8mp_media_mipi_phy2_ref_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
- "sys_pll1_800m", "sys_pll2_1000m",
- "clk_ext2", "audio_pll2_out",
- "video_pll1_out", };
+static const char * const imx8mp_media_ldb_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m",
+ "sys_pll1_800m", "sys_pll2_1000m",
+ "clk_ext2", "audio_pll2_out",
+ "video_pll1_out", };
-static const char * const imx8mp_media_mipi_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m",
- "sys_pll1_800m", "sys_pll2_1000m",
- "sys_pll3_out", "clk_ext3",
- "audio_pll2_out", };
+static const char * const imx8mp_memrepair_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m",
+ "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out",
+ "clk_ext3", "audio_pll2_out", };
static const char * const imx8mp_pcie2_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m",
"sys_pll1_266m", "sys_pll1_800m", "sys_pll2_500m",
@@ -562,9 +557,9 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
/* CORE SEL */
hws[IMX8MP_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", ccm_base + 0x9880, 24, 1, imx8mp_a53_core_sels, ARRAY_SIZE(imx8mp_a53_core_sels));
- hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
+ hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
- hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
+ hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980);
hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite_bus("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite_bus("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
@@ -572,12 +567,12 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite_bus("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80);
hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00);
hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80);
- hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00);
- hws[IMX8MP_CLK_NOC_IO] = imx8m_clk_hw_composite_critical("noc_io", imx8mp_noc_io_sels, ccm_base + 0x8d80);
+ hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_bus_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00);
+ hws[IMX8MP_CLK_NOC_IO] = imx8m_clk_hw_composite_bus_critical("noc_io", imx8mp_noc_io_sels, ccm_base + 0x8d80);
hws[IMX8MP_CLK_ML_AXI] = imx8m_clk_hw_composite_bus("ml_axi", imx8mp_ml_axi_sels, ccm_base + 0x8e00);
hws[IMX8MP_CLK_ML_AHB] = imx8m_clk_hw_composite_bus("ml_ahb", imx8mp_ml_ahb_sels, ccm_base + 0x8e80);
- hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
+ hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
@@ -590,7 +585,6 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_VPU_G2] = imx8m_clk_hw_composite("vpu_g2", imx8mp_vpu_g2_sels, ccm_base + 0xa180);
hws[IMX8MP_CLK_CAN1] = imx8m_clk_hw_composite("can1", imx8mp_can1_sels, ccm_base + 0xa200);
hws[IMX8MP_CLK_CAN2] = imx8m_clk_hw_composite("can2", imx8mp_can2_sels, ccm_base + 0xa280);
- hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite("memrepair", imx8mp_memrepair_sels, ccm_base + 0xa300);
hws[IMX8MP_CLK_PCIE_PHY] = imx8m_clk_hw_composite("pcie_phy", imx8mp_pcie_phy_sels, ccm_base + 0xa380);
hws[IMX8MP_CLK_PCIE_AUX] = imx8m_clk_hw_composite("pcie_aux", imx8mp_pcie_aux_sels, ccm_base + 0xa400);
hws[IMX8MP_CLK_I2C5] = imx8m_clk_hw_composite("i2c5", imx8mp_i2c5_sels, ccm_base + 0xa480);
@@ -647,8 +641,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80);
hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp1_pix_sels, ccm_base + 0xbe00);
hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80);
- hws[IMX8MP_CLK_MEDIA_MIPI_PHY2_REF] = imx8m_clk_hw_composite("media_mipi_phy2_ref", imx8mp_media_mipi_phy2_ref_sels, ccm_base + 0xbf00);
- hws[IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC] = imx8m_clk_hw_composite("media_mipi_csi2_esc", imx8mp_media_mipi_csi2_esc_sels, ccm_base + 0xbf80);
+ hws[IMX8MP_CLK_MEDIA_LDB] = imx8m_clk_hw_composite("media_ldb", imx8mp_media_ldb_sels, ccm_base + 0xbf00);
+ hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite_critical("mem_repair", imx8mp_memrepair_sels, ccm_base + 0xbf80);
hws[IMX8MP_CLK_PCIE2_CTRL] = imx8m_clk_hw_composite("pcie2_ctrl", imx8mp_pcie2_ctrl_sels, ccm_base + 0xc000);
hws[IMX8MP_CLK_PCIE2_PHY] = imx8m_clk_hw_composite("pcie2_phy", imx8mp_pcie2_phy_sels, ccm_base + 0xc080);
hws[IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE] = imx8m_clk_hw_composite("media_mipi_test_byte", imx8mp_media_mipi_test_byte_sels, ccm_base + 0xc100);
@@ -773,3 +767,7 @@ static struct platform_driver imx8mp_clk_driver = {
},
};
module_platform_driver(imx8mp_clk_driver);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8MP clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index a64aace213c2..06292d4a98ff 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -157,10 +157,10 @@ static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys
"audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
+ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
- "audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
+ "sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
@@ -431,7 +431,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
hws[IMX8MQ_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mq_a53_core_sels, ARRAY_SIZE(imx8mq_a53_core_sels));
/* BUS */
- hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
+ hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_bus_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
hws[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
hws[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
hws[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
@@ -441,12 +441,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
hws[IMX8MQ_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
hws[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
hws[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
- hws[IMX8MQ_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00);
- hws[IMX8MQ_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
+ hws[IMX8MQ_CLK_NOC] = imx8m_clk_hw_composite_bus_critical("noc", imx8mq_noc_sels, base + 0x8d00);
+ hws[IMX8MQ_CLK_NOC_APB] = imx8m_clk_hw_composite_bus_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
/* AHB */
/* AHB clock is used by the AHB bus therefore marked as critical */
- hws[IMX8MQ_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
+ hws[IMX8MQ_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
hws[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
/* IPG */
@@ -643,3 +643,7 @@ static struct platform_driver imx8mq_clk_driver = {
},
};
module_platform_driver(imx8mq_clk_driver);
+
+MODULE_AUTHOR("Abel Vesa <abel.vesa@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8MQ clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index 04c8ee35e14c..e947a70054ac 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -232,3 +232,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
};
builtin_platform_driver(imx8qxp_lpcg_clk_driver);
+
+MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index 5e2903efc488..d650ca33cdc8 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -152,3 +152,7 @@ static struct platform_driver imx8qxp_clk_driver = {
.probe = imx8qxp_clk_probe,
};
builtin_platform_driver(imx8qxp_clk_driver);
+
+MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk-lpcg-scu.c b/drivers/clk/imx/clk-lpcg-scu.c
index a73a799fb777..1f0e44f921ae 100644
--- a/drivers/clk/imx/clk-lpcg-scu.c
+++ b/drivers/clk/imx/clk-lpcg-scu.c
@@ -4,6 +4,7 @@
* Dong Aisheng <aisheng.dong@nxp.com>
*/
+#include <linux/bits.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/clk/imx/clk-pfd.c b/drivers/clk/imx/clk-pfd.c
index 50b7c30296f7..5d2a9a3be95e 100644
--- a/drivers/clk/imx/clk-pfd.c
+++ b/drivers/clk/imx/clk-pfd.c
@@ -12,7 +12,7 @@
/**
* struct clk_pfd - IMX PFD clock
- * @clk_hw: clock source
+ * @hw: clock source
* @reg: PFD register address
* @idx: the index of PFD encoded in the register
*
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index 78e1f7641aaa..6b744c84278e 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -17,7 +17,7 @@
/**
* struct clk_pfdv2 - IMX PFD clock
- * @clk_hw: clock source
+ * @hw: clock source
* @reg: PFD register address
* @gate_bit: Gate bit offset
* @vld_bit: Valid bit offset
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index f9eb189b93c0..aba36e4217d2 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -3,9 +3,10 @@
* Copyright 2017-2018 NXP.
*/
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
@@ -68,6 +69,7 @@ struct imx_pll14xx_clk imx_1443x_pll = {
.rate_table = imx_pll1443x_tbl,
.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
};
+EXPORT_SYMBOL_GPL(imx_1443x_pll);
struct imx_pll14xx_clk imx_1443x_dram_pll = {
.type = PLL_1443X,
@@ -75,12 +77,14 @@ struct imx_pll14xx_clk imx_1443x_dram_pll = {
.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
.flags = CLK_GET_RATE_NOCACHE,
};
+EXPORT_SYMBOL_GPL(imx_1443x_dram_pll);
struct imx_pll14xx_clk imx_1416x_pll = {
.type = PLL_1416X,
.rate_table = imx_pll1416x_tbl,
.rate_count = ARRAY_SIZE(imx_pll1416x_tbl),
};
+EXPORT_SYMBOL_GPL(imx_1416x_pll);
static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
struct clk_pll14xx *pll, unsigned long rate)
@@ -436,3 +440,4 @@ struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
return hw;
}
+EXPORT_SYMBOL_GPL(imx_dev_clk_hw_pll14xx);
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index de4f8a41a7d0..36ffb0525735 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bits.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index b20cdea3e9cc..20ee9611ba6e 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -30,12 +30,15 @@
/**
* struct clk_pllv3 - IMX PLL clock version 3
- * @clk_hw: clock source
+ * @hw: clock source
* @base: base address of PLL registers
* @power_bit: pll power bit mask
* @powerup_set: set power_bit to power up the PLL
* @div_mask: mask of divider bits
* @div_shift: shift of divider bits
+ * @ref_clock: reference clock rate
+ * @num_offset: num register offset
+ * @denom_offset: denom register offset
*
* IMX PLL clock version 3, found on i.MX6 series. Divider for pllv3
* is actually a multiplier, and always sits at bit 0.
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index a49450431855..8ec703f27417 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -7,6 +7,7 @@
*
*/
+#include <linux/bits.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
diff --git a/drivers/clk/imx/clk-sscg-pll.c b/drivers/clk/imx/clk-sscg-pll.c
index 773d8a545cdf..9d6cdff0537f 100644
--- a/drivers/clk/imx/clk-sscg-pll.c
+++ b/drivers/clk/imx/clk-sscg-pll.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
@@ -537,3 +538,4 @@ struct clk_hw *imx_clk_hw_sscg_pll(const char *name,
return hw;
}
+EXPORT_SYMBOL_GPL(imx_clk_hw_sscg_pll);
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 5129ef8e1d6e..9e11f1c7c397 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -4,6 +4,7 @@
*/
#include <linux/of_address.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/syscore_ops.h>
#include <dt-bindings/clock/vf610-clock.h>
@@ -328,6 +329,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_DSPI2] = imx_clk_gate2("dspi2", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(12));
clk[VF610_CLK_DSPI3] = imx_clk_gate2("dspi3", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(13));
+ clk[VF610_CLK_CRC] = imx_clk_gate2("crc", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(3));
clk[VF610_CLK_WDT] = imx_clk_gate2("wdt", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(14));
clk[VF610_CLK_ESDHC0_SEL] = imx_clk_mux("esdhc0_sel", CCM_CSCMR1, 16, 2, esdhc_sels, 4);
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index 87ab8db3d282..47882c51cb85 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -13,6 +15,7 @@
#define CCDR_MMDC_CH1_MASK BIT(16)
DEFINE_SPINLOCK(imx_ccm_lock);
+EXPORT_SYMBOL_GPL(imx_ccm_lock);
void imx_unregister_clocks(struct clk *clks[], unsigned int count)
{
@@ -29,8 +32,9 @@ void imx_unregister_hw_clocks(struct clk_hw *hws[], unsigned int count)
for (i = 0; i < count; i++)
clk_hw_unregister(hws[i]);
}
+EXPORT_SYMBOL_GPL(imx_unregister_hw_clocks);
-void __init imx_mmdc_mask_handshake(void __iomem *ccm_base,
+void imx_mmdc_mask_handshake(void __iomem *ccm_base,
unsigned int chn)
{
unsigned int reg;
@@ -59,8 +63,9 @@ void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count)
pr_err("i.MX clk %u: register failed with %ld\n",
i, PTR_ERR(clks[i]));
}
+EXPORT_SYMBOL_GPL(imx_check_clk_hws);
-static struct clk * __init imx_obtain_fixed_clock_from_dt(const char *name)
+static struct clk *imx_obtain_fixed_clock_from_dt(const char *name)
{
struct of_phandle_args phandle;
struct clk *clk = ERR_PTR(-ENODEV);
@@ -80,7 +85,7 @@ static struct clk * __init imx_obtain_fixed_clock_from_dt(const char *name)
return clk;
}
-struct clk * __init imx_obtain_fixed_clock(
+struct clk *imx_obtain_fixed_clock(
const char *name, unsigned long rate)
{
struct clk *clk;
@@ -91,7 +96,7 @@ struct clk * __init imx_obtain_fixed_clock(
return clk;
}
-struct clk_hw * __init imx_obtain_fixed_clock_hw(
+struct clk_hw *imx_obtain_fixed_clock_hw(
const char *name, unsigned long rate)
{
struct clk *clk;
@@ -113,6 +118,7 @@ struct clk_hw * imx_obtain_fixed_clk_hw(struct device_node *np,
return __clk_get_hw(clk);
}
+EXPORT_SYMBOL_GPL(imx_obtain_fixed_clk_hw);
/*
* This fixups the register CCM_CSCMR1 write value.
@@ -140,6 +146,7 @@ void imx_cscmr1_fixup(u32 *val)
return;
}
+#ifndef MODULE
static int imx_keep_uart_clocks;
static struct clk ** const *imx_uart_clocks;
@@ -177,3 +184,6 @@ static int __init imx_clk_disable_uart(void)
return 0;
}
late_initcall_sync(imx_clk_disable_uart);
+#endif
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 16adbc34e05f..1d7be0c86538 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -2,6 +2,7 @@
#ifndef __MACH_IMX_CLK_H
#define __MACH_IMX_CLK_H
+#include <linux/bits.h>
#include <linux/spinlock.h>
#include <linux/clk-provider.h>
@@ -11,7 +12,13 @@ extern spinlock_t imx_ccm_lock;
void imx_check_clocks(struct clk *clks[], unsigned int count);
void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
+#ifndef MODULE
void imx_register_uart_clocks(struct clk ** const clks[]);
+#else
+static inline void imx_register_uart_clocks(struct clk ** const clks[])
+{
+}
+#endif
void imx_mmdc_mask_handshake(void __iomem *ccm_base, unsigned int chn);
void imx_unregister_clocks(struct clk *clks[], unsigned int count);
void imx_unregister_hw_clocks(struct clk_hw *hws[], unsigned int count);
@@ -542,6 +549,11 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
IMX_COMPOSITE_BUS, \
CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+#define imx8m_clk_hw_composite_bus_critical(name, parent_names, reg) \
+ imx8m_clk_hw_composite_flags(name, parent_names, ARRAY_SIZE(parent_names), reg, \
+ IMX_COMPOSITE_BUS, \
+ CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE | CLK_IS_CRITICAL)
+
#define imx8m_clk_hw_composite_core(name, parent_names, reg) \
imx8m_clk_hw_composite_flags(name, parent_names, \
ARRAY_SIZE(parent_names), reg, \
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index d7981b670221..dac6edc670cc 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -12,15 +12,24 @@
#include <linux/clkdev.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/time.h>
+
#include "cgu.h"
#define MHZ (1000 * 1000)
+static inline const struct ingenic_cgu_clk_info *
+to_clk_info(struct ingenic_clk *clk)
+{
+ return &clk->cgu->clock_info[clk->idx];
+}
+
/**
* ingenic_cgu_gate_get() - get the value of clock gate register bit
* @cgu: reference to the CGU whose registers should be read
@@ -71,14 +80,13 @@ static unsigned long
ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
+ const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
- const struct ingenic_cgu_clk_info *clk_info;
const struct ingenic_cgu_pll_info *pll_info;
unsigned m, n, od_enc, od;
bool bypass;
u32 ctl;
- clk_info = &cgu->clock_info[ingenic_clk->idx];
BUG_ON(clk_info->type != CGU_CLK_PLL);
pll_info = &clk_info->pll;
@@ -144,18 +152,6 @@ ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
n * od);
}
-static inline const struct ingenic_cgu_clk_info *to_clk_info(
- struct ingenic_clk *ingenic_clk)
-{
- struct ingenic_cgu *cgu = ingenic_clk->cgu;
- const struct ingenic_cgu_clk_info *clk_info;
-
- clk_info = &cgu->clock_info[ingenic_clk->idx];
- BUG_ON(clk_info->type != CGU_CLK_PLL);
-
- return clk_info;
-}
-
static long
ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
unsigned long *prate)
@@ -166,6 +162,16 @@ ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
}
+static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
+ const struct ingenic_cgu_pll_info *pll_info)
+{
+ u32 ctl;
+
+ return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
+ ctl & BIT(pll_info->stable_bit),
+ 0, 100 * USEC_PER_MSEC);
+}
+
static int
ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
unsigned long parent_rate)
@@ -176,6 +182,7 @@ ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
unsigned long rate, flags;
unsigned int m, n, od;
+ int ret = 0;
u32 ctl;
rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
@@ -197,9 +204,14 @@ ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
writel(ctl, cgu->base + pll_info->reg);
+
+ /* If the PLL is enabled, verify that it's stable */
+ if (ctl & BIT(pll_info->enable_bit))
+ ret = ingenic_pll_check_stable(cgu, pll_info);
+
spin_unlock_irqrestore(&cgu->lock, flags);
- return 0;
+ return ret;
}
static int ingenic_pll_enable(struct clk_hw *hw)
@@ -208,9 +220,8 @@ static int ingenic_pll_enable(struct clk_hw *hw)
struct ingenic_cgu *cgu = ingenic_clk->cgu;
const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
- const unsigned int timeout = 100;
unsigned long flags;
- unsigned int i;
+ int ret;
u32 ctl;
spin_lock_irqsave(&cgu->lock, flags);
@@ -226,20 +237,10 @@ static int ingenic_pll_enable(struct clk_hw *hw)
writel(ctl, cgu->base + pll_info->reg);
- /* wait for the PLL to stabilise */
- for (i = 0; i < timeout; i++) {
- ctl = readl(cgu->base + pll_info->reg);
- if (ctl & BIT(pll_info->stable_bit))
- break;
- mdelay(1);
- }
-
+ ret = ingenic_pll_check_stable(cgu, pll_info);
spin_unlock_irqrestore(&cgu->lock, flags);
- if (i == timeout)
- return -EBUSY;
-
- return 0;
+ return ret;
}
static void ingenic_pll_disable(struct clk_hw *hw)
@@ -290,13 +291,11 @@ static const struct clk_ops ingenic_pll_ops = {
static u8 ingenic_clk_get_parent(struct clk_hw *hw)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
+ const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
- const struct ingenic_cgu_clk_info *clk_info;
u32 reg;
u8 i, hw_idx, idx = 0;
- clk_info = &cgu->clock_info[ingenic_clk->idx];
-
if (clk_info->type & CGU_CLK_MUX) {
reg = readl(cgu->base + clk_info->mux.reg);
hw_idx = (reg >> clk_info->mux.shift) &
@@ -318,14 +317,12 @@ static u8 ingenic_clk_get_parent(struct clk_hw *hw)
static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
+ const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
- const struct ingenic_cgu_clk_info *clk_info;
unsigned long flags;
u8 curr_idx, hw_idx, num_poss;
u32 reg, mask;
- clk_info = &cgu->clock_info[ingenic_clk->idx];
-
if (clk_info->type & CGU_CLK_MUX) {
/*
* Convert the parent index to the hardware index by adding
@@ -368,13 +365,11 @@ static unsigned long
ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
+ const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
- const struct ingenic_cgu_clk_info *clk_info;
unsigned long rate = parent_rate;
u32 div_reg, div;
- clk_info = &cgu->clock_info[ingenic_clk->idx];
-
if (clk_info->type & CGU_CLK_DIV) {
div_reg = readl(cgu->base + clk_info->div.reg);
div = (div_reg >> clk_info->div.shift) &
@@ -443,35 +438,41 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
unsigned long *parent_rate)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
- struct ingenic_cgu *cgu = ingenic_clk->cgu;
- const struct ingenic_cgu_clk_info *clk_info;
+ const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
unsigned int div = 1;
- clk_info = &cgu->clock_info[ingenic_clk->idx];
-
if (clk_info->type & CGU_CLK_DIV)
div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
else if (clk_info->type & CGU_CLK_FIXDIV)
div = clk_info->fixdiv.div;
+ else if (clk_hw_can_set_rate_parent(hw))
+ *parent_rate = req_rate;
return DIV_ROUND_UP(*parent_rate, div);
}
+static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
+ const struct ingenic_cgu_clk_info *clk_info)
+{
+ u32 reg;
+
+ return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
+ !(reg & BIT(clk_info->div.busy_bit)),
+ 0, 100 * USEC_PER_MSEC);
+}
+
static int
ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
unsigned long parent_rate)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
+ const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
- const struct ingenic_cgu_clk_info *clk_info;
- const unsigned timeout = 100;
unsigned long rate, flags;
- unsigned int hw_div, div, i;
+ unsigned int hw_div, div;
u32 reg, mask;
int ret = 0;
- clk_info = &cgu->clock_info[ingenic_clk->idx];
-
if (clk_info->type & CGU_CLK_DIV) {
div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
rate = DIV_ROUND_UP(parent_rate, div);
@@ -504,16 +505,8 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
writel(reg, cgu->base + clk_info->div.reg);
/* wait for the change to take effect */
- if (clk_info->div.busy_bit != -1) {
- for (i = 0; i < timeout; i++) {
- reg = readl(cgu->base + clk_info->div.reg);
- if (!(reg & BIT(clk_info->div.busy_bit)))
- break;
- mdelay(1);
- }
- if (i == timeout)
- ret = -EBUSY;
- }
+ if (clk_info->div.busy_bit != -1)
+ ret = ingenic_clk_check_stable(cgu, clk_info);
spin_unlock_irqrestore(&cgu->lock, flags);
return ret;
@@ -525,12 +518,10 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
static int ingenic_clk_enable(struct clk_hw *hw)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
+ const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
- const struct ingenic_cgu_clk_info *clk_info;
unsigned long flags;
- clk_info = &cgu->clock_info[ingenic_clk->idx];
-
if (clk_info->type & CGU_CLK_GATE) {
/* ungate the clock */
spin_lock_irqsave(&cgu->lock, flags);
@@ -547,12 +538,10 @@ static int ingenic_clk_enable(struct clk_hw *hw)
static void ingenic_clk_disable(struct clk_hw *hw)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
+ const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
- const struct ingenic_cgu_clk_info *clk_info;
unsigned long flags;
- clk_info = &cgu->clock_info[ingenic_clk->idx];
-
if (clk_info->type & CGU_CLK_GATE) {
/* gate the clock */
spin_lock_irqsave(&cgu->lock, flags);
@@ -564,12 +553,10 @@ static void ingenic_clk_disable(struct clk_hw *hw)
static int ingenic_clk_is_enabled(struct clk_hw *hw)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
+ const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
- const struct ingenic_cgu_clk_info *clk_info;
int enabled = 1;
- clk_info = &cgu->clock_info[ingenic_clk->idx];
-
if (clk_info->type & CGU_CLK_GATE)
enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
@@ -644,6 +631,13 @@ static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
caps = clk_info->type;
+ if (caps & CGU_CLK_DIV) {
+ caps &= ~CGU_CLK_DIV;
+ } else if (!(caps & CGU_CLK_CUSTOM)) {
+ /* pass rate changes to the parent clock */
+ clk_init.flags |= CLK_SET_RATE_PARENT;
+ }
+
if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
clk_init.num_parents = 0;
@@ -683,7 +677,6 @@ static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
}
} else if (caps & CGU_CLK_PLL) {
clk_init.ops = &ingenic_pll_ops;
- clk_init.flags |= CLK_SET_RATE_GATE;
caps &= ~CGU_CLK_PLL;
@@ -706,13 +699,6 @@ static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
}
- if (caps & CGU_CLK_DIV) {
- caps &= ~CGU_CLK_DIV;
- } else {
- /* pass rate changes to the parent clock */
- clk_init.flags |= CLK_SET_RATE_PARENT;
- }
-
if (caps) {
pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
goto out;
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 2ad26cb927fd..aaf31abe1c8f 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -54,6 +54,8 @@ struct sci_clk_provider {
* @provider: Master clock provider
* @flags: Flags for the clock
* @node: Link for handling clocks probed via DT
+ * @cached_req: Cached requested freq for determine rate calls
+ * @cached_res: Cached result freq for determine rate calls
*/
struct sci_clk {
struct clk_hw hw;
@@ -63,6 +65,8 @@ struct sci_clk {
struct sci_clk_provider *provider;
u8 flags;
struct list_head node;
+ unsigned long cached_req;
+ unsigned long cached_res;
};
#define to_sci_clk(_hw) container_of(_hw, struct sci_clk, hw)
@@ -175,6 +179,11 @@ static int sci_clk_determine_rate(struct clk_hw *hw,
int ret;
u64 new_rate;
+ if (clk->cached_req && clk->cached_req == req->rate) {
+ req->rate = clk->cached_res;
+ return 0;
+ }
+
ret = clk->provider->ops->get_best_match_freq(clk->provider->sci,
clk->dev_id,
clk->clk_id,
@@ -189,6 +198,9 @@ static int sci_clk_determine_rate(struct clk_hw *hw,
return ret;
}
+ clk->cached_req = req->rate;
+ clk->cached_res = new_rate;
+
req->rate = new_rate;
return 0;
@@ -209,7 +221,8 @@ static int sci_clk_set_rate(struct clk_hw *hw, unsigned long rate,
struct sci_clk *clk = to_sci_clk(hw);
return clk->provider->ops->set_freq(clk->provider->sci, clk->dev_id,
- clk->clk_id, rate, rate, rate);
+ clk->clk_id, rate / 10 * 9, rate,
+ rate / 10 * 11);
}
/**
@@ -249,6 +262,8 @@ static int sci_clk_set_parent(struct clk_hw *hw, u8 index)
{
struct sci_clk *clk = to_sci_clk(hw);
+ clk->cached_req = 0;
+
return clk->provider->ops->set_parent(clk->provider->sci, clk->dev_id,
clk->clk_id,
index + 1 + clk->clk_id);
@@ -522,7 +537,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
np = of_find_node_with_property(np, *clk_name);
if (!np) {
clk_name++;
- break;
+ continue;
}
if (!of_device_is_available(np))
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 89ceb2fbc7c4..ce8475098b31 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -352,6 +352,54 @@ config COMMON_CLK_MT8135
help
This driver supports MediaTek MT8135 clocks.
+config COMMON_CLK_MT8167
+ bool "Clock driver for MediaTek MT8167"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8167 basic clocks.
+
+config COMMON_CLK_MT8167_AUDSYS
+ bool "Clock driver for MediaTek MT8167 audsys"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8167 audsys clocks.
+
+config COMMON_CLK_MT8167_IMGSYS
+ bool "Clock driver for MediaTek MT8167 imgsys"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8167 imgsys clocks.
+
+config COMMON_CLK_MT8167_MFGCFG
+ bool "Clock driver for MediaTek MT8167 mfgcfg"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8167 mfgcfg clocks.
+
+config COMMON_CLK_MT8167_MMSYS
+ bool "Clock driver for MediaTek MT8167 mmsys"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8167 mmsys clocks.
+
+config COMMON_CLK_MT8167_VDECSYS
+ bool "Clock driver for MediaTek MT8167 vdecsys"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8167 vdecsys clocks.
+
config COMMON_CLK_MT8173
bool "Clock driver for MediaTek MT8173"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 959b556d32ea..3b0c2be73824 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -47,6 +47,12 @@ obj-$(CONFIG_COMMON_CLK_MT7629) += clk-mt7629.o
obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
+obj-$(CONFIG_COMMON_CLK_MT8167) += clk-mt8167.o
+obj-$(CONFIG_COMMON_CLK_MT8167_AUDSYS) += clk-mt8167-aud.o
+obj-$(CONFIG_COMMON_CLK_MT8167_IMGSYS) += clk-mt8167-img.o
+obj-$(CONFIG_COMMON_CLK_MT8167_MFGCFG) += clk-mt8167-mfgcfg.o
+obj-$(CONFIG_COMMON_CLK_MT8167_MMSYS) += clk-mt8167-mm.o
+obj-$(CONFIG_COMMON_CLK_MT8167_VDECSYS) += clk-mt8167-vdec.o
obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
obj-$(CONFIG_COMMON_CLK_MT8173_MMSYS) += clk-mt8173-mm.o
obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183.o
diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
index db8db1b3b79d..d77ea5aff292 100644
--- a/drivers/clk/mediatek/clk-mt6765.c
+++ b/drivers/clk/mediatek/clk-mt6765.c
@@ -909,7 +909,6 @@ static struct platform_driver clk_mt6765_drv = {
.probe = clk_mt6765_probe,
.driver = {
.name = "clk-mt6765",
- .owner = THIS_MODULE,
.of_match_table = of_match_clk_mt6765,
},
};
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index 9766cccf5844..6e0d3a166729 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -919,6 +919,8 @@ static const struct mtk_gate infra_clks[] = {
"pwm_sel", 19),
GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
"pwm_sel", 21),
+ GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
+ "uart_sel", 22),
GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
"uart_sel", 23),
GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
index f35389a11af1..428eb24ffec5 100644
--- a/drivers/clk/mediatek/clk-mt6797.c
+++ b/drivers/clk/mediatek/clk-mt6797.c
@@ -582,7 +582,7 @@ CLK_OF_DECLARE_DRIVER(mtk_infra, "mediatek,mt6797-infracfg",
static int mtk_infrasys_init(struct platform_device *pdev)
{
- int r, i;
+ int i;
struct device_node *node = pdev->dev.of_node;
if (!infra_clk_data) {
@@ -599,11 +599,7 @@ static int mtk_infrasys_init(struct platform_device *pdev)
mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
infra_clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data);
- if (r)
- return r;
-
- return 0;
+ return of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data);
}
#define MT6797_PLL_FMAX (3000UL * MHZ)
diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
index b73bdf152836..a0ee079670c7 100644
--- a/drivers/clk/mediatek/clk-mt7629.c
+++ b/drivers/clk/mediatek/clk-mt7629.c
@@ -601,7 +601,6 @@ static int mtk_infrasys_init(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct clk_onecell_data *clk_data;
- int r;
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
@@ -611,12 +610,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
clk_data);
- r = of_clk_add_provider(node, of_clk_src_onecell_get,
- clk_data);
- if (r)
- return r;
-
- return 0;
+ return of_clk_add_provider(node, of_clk_src_onecell_get,
+ clk_data);
}
static int mtk_pericfg_init(struct platform_device *pdev)
diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c
new file mode 100644
index 000000000000..3f7bf6485792
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8167-aud.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2020 BayLibre, SAS
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ * Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8167-clk.h>
+
+static const struct mtk_gate_regs aud_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_AUD(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &aud_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate aud_clks[] __initconst = {
+ GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2),
+ GATE_AUD(CLK_AUD_I2S, "aud_i2s", "i2s_infra_bck", 6),
+ GATE_AUD(CLK_AUD_22M, "aud_22m", "rg_aud_engen1", 8),
+ GATE_AUD(CLK_AUD_24M, "aud_24m", "rg_aud_engen2", 9),
+ GATE_AUD(CLK_AUD_INTDIR, "aud_intdir", "rg_aud_spdif_in", 15),
+ GATE_AUD(CLK_AUD_APLL2_TUNER, "aud_apll2_tuner", "rg_aud_engen2", 18),
+ GATE_AUD(CLK_AUD_APLL_TUNER, "aud_apll_tuner", "rg_aud_engen1", 19),
+ GATE_AUD(CLK_AUD_HDMI, "aud_hdmi", "apll12_div4", 20),
+ GATE_AUD(CLK_AUD_SPDF, "aud_spdf", "apll12_div6", 21),
+ GATE_AUD(CLK_AUD_ADC, "aud_adc", "aud_afe", 24),
+ GATE_AUD(CLK_AUD_DAC, "aud_dac", "aud_afe", 25),
+ GATE_AUD(CLK_AUD_DAC_PREDIS, "aud_dac_predis", "aud_afe", 26),
+ GATE_AUD(CLK_AUD_TML, "aud_tml", "aud_afe", 27),
+};
+
+static void __init mtk_audsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
+
+ mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+}
+CLK_OF_DECLARE(mtk_audsys, "mediatek,mt8167-audsys", mtk_audsys_init);
diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c
new file mode 100644
index 000000000000..3b4ec9eae432
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8167-img.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2020 BayLibre, SAS
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ * Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8167-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &img_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate img_clks[] __initconst = {
+ GATE_IMG(CLK_IMG_LARB1_SMI, "img_larb1_smi", "smi_mm", 0),
+ GATE_IMG(CLK_IMG_CAM_SMI, "img_cam_smi", "smi_mm", 5),
+ GATE_IMG(CLK_IMG_CAM_CAM, "img_cam_cam", "smi_mm", 6),
+ GATE_IMG(CLK_IMG_SEN_TG, "img_sen_tg", "cam_mm", 7),
+ GATE_IMG(CLK_IMG_SEN_CAM, "img_sen_cam", "smi_mm", 8),
+ GATE_IMG(CLK_IMG_VENC, "img_venc", "smi_mm", 9),
+};
+
+static void __init mtk_imgsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+
+ mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+}
+CLK_OF_DECLARE(mtk_imgsys, "mediatek,mt8167-imgsys", mtk_imgsys_init);
diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
new file mode 100644
index 000000000000..90b871730f2d
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2020 BayLibre, SAS
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ * Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8167-clk.h>
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mfg_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mfg_clks[] __initconst = {
+ GATE_MFG(CLK_MFG_BAXI, "mfg_baxi", "ahb_infra_sel", 0),
+ GATE_MFG(CLK_MFG_BMEM, "mfg_bmem", "gfmux_emi1x_sel", 1),
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_mm", 2),
+ GATE_MFG(CLK_MFG_B26M, "mfg_b26m", "clk26m_ck", 3),
+};
+
+static void __init mtk_mfgcfg_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
+
+ mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+}
+CLK_OF_DECLARE(mtk_mfgcfg, "mediatek,mt8167-mfgcfg", mtk_mfgcfg_init);
diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c
new file mode 100644
index 000000000000..963b129aade1
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8167-mm.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2020 BayLibre, SAS
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ * Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8167-clk.h>
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "smi_mm", 0),
+ GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "smi_mm", 1),
+ GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "smi_mm", 2),
+ GATE_MM0(CLK_MM_MDP_RDMA, "mm_mdp_rdma", "smi_mm", 3),
+ GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "smi_mm", 4),
+ GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "smi_mm", 5),
+ GATE_MM0(CLK_MM_MDP_TDSHP, "mm_mdp_tdshp", "smi_mm", 6),
+ GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "smi_mm", 7),
+ GATE_MM0(CLK_MM_MDP_WROT, "mm_mdp_wrot", "smi_mm", 8),
+ GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "smi_mm", 9),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "smi_mm", 10),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "smi_mm", 11),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "smi_mm", 12),
+ GATE_MM0(CLK_MM_DISP_WDMA, "mm_disp_wdma", "smi_mm", 13),
+ GATE_MM0(CLK_MM_DISP_COLOR, "mm_disp_color", "smi_mm", 14),
+ GATE_MM0(CLK_MM_DISP_CCORR, "mm_disp_ccorr", "smi_mm", 15),
+ GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "smi_mm", 16),
+ GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "smi_mm", 17),
+ GATE_MM0(CLK_MM_DISP_DITHER, "mm_disp_dither", "smi_mm", 18),
+ GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "smi_mm", 19),
+ /* MM1 */
+ GATE_MM1(CLK_MM_DISP_PWM_MM, "mm_disp_pwm_mm", "smi_mm", 0),
+ GATE_MM1(CLK_MM_DISP_PWM_26M, "mm_disp_pwm_26m", "smi_mm", 1),
+ GATE_MM1(CLK_MM_DSI_ENGINE, "mm_dsi_engine", "smi_mm", 2),
+ GATE_MM1(CLK_MM_DSI_DIGITAL, "mm_dsi_digital", "dsi0_lntc_dsick", 3),
+ GATE_MM1(CLK_MM_DPI0_ENGINE, "mm_dpi0_engine", "smi_mm", 4),
+ GATE_MM1(CLK_MM_DPI0_PXL, "mm_dpi0_pxl", "rg_fdpi0", 5),
+ GATE_MM1(CLK_MM_LVDS_PXL, "mm_lvds_pxl", "vpll_dpix", 14),
+ GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvdstx_dig_cts", 15),
+ GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "smi_mm", 16),
+ GATE_MM1(CLK_MM_DPI1_PXL, "mm_dpi1_pxl", "rg_fdpi1", 17),
+ GATE_MM1(CLK_MM_HDMI_PXL, "mm_hdmi_pxl", "rg_fdpi1", 18),
+ GATE_MM1(CLK_MM_HDMI_SPDIF, "mm_hdmi_spdif", "apll12_div6", 19),
+ GATE_MM1(CLK_MM_HDMI_ADSP_BCK, "mm_hdmi_adsp_b", "apll12_div4b", 20),
+ GATE_MM1(CLK_MM_HDMI_PLL, "mm_hdmi_pll", "hdmtx_dig_cts", 21),
+};
+
+struct clk_mt8167_mm_driver_data {
+ const struct mtk_gate *gates_clk;
+ int gates_num;
+};
+
+static const struct clk_mt8167_mm_driver_data mt8167_mmsys_driver_data = {
+ .gates_clk = mm_clks,
+ .gates_num = ARRAY_SIZE(mm_clks),
+};
+
+static int clk_mt8167_mm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ const struct clk_mt8167_mm_driver_data *data;
+ struct clk_onecell_data *clk_data;
+ int ret;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ data = &mt8167_mmsys_driver_data;
+
+ ret = mtk_clk_register_gates(node, data->gates_clk, data->gates_num,
+ clk_data);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8173_mm_drv = {
+ .driver = {
+ .name = "clk-mt8167-mm",
+ },
+ .probe = clk_mt8167_mm_probe,
+};
+
+builtin_platform_driver(clk_mt8173_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c
new file mode 100644
index 000000000000..910b28355ec0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8167-vdec.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2020 BayLibre, SAS
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ * Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8167-clk.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0_I(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_VDEC1_I(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate vdec_clks[] __initconst = {
+ /* VDEC0 */
+ GATE_VDEC0_I(CLK_VDEC_CKEN, "vdec_cken", "rg_vdec", 0),
+ /* VDEC1 */
+ GATE_VDEC1_I(CLK_VDEC_LARB1_CKEN, "vdec_larb1_cken", "smi_mm", 0),
+};
+
+static void __init mtk_vdecsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
+
+ mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+}
+CLK_OF_DECLARE(mtk_vdecsys, "mediatek,mt8167-vdecsys", mtk_vdecsys_init);
diff --git a/drivers/clk/mediatek/clk-mt8167.c b/drivers/clk/mediatek/clk-mt8167.c
new file mode 100644
index 000000000000..e5ea10e31799
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8167.c
@@ -0,0 +1,1062 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2020 BayLibre, SAS
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ * Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8167-clk.h>
+
+static DEFINE_SPINLOCK(mt8167_clk_lock);
+
+static const struct mtk_fixed_clk fixed_clks[] __initconst = {
+ FIXED_CLK(CLK_TOP_CLK_NULL, "clk_null", NULL, 0),
+ FIXED_CLK(CLK_TOP_I2S_INFRA_BCK, "i2s_infra_bck", "clk_null", 26000000),
+ FIXED_CLK(CLK_TOP_MEMPLL, "mempll", "clk26m", 800000000),
+ FIXED_CLK(CLK_TOP_DSI0_LNTC_DSICK, "dsi0_lntc_dsick", "clk26m", 75000000),
+ FIXED_CLK(CLK_TOP_VPLL_DPIX, "vpll_dpix", "clk26m", 75000000),
+ FIXED_CLK(CLK_TOP_LVDSTX_CLKDIG_CTS, "lvdstx_dig_cts", "clk26m", 52500000),
+};
+
+static const struct mtk_fixed_factor top_divs[] __initconst = {
+ FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "mempll", 1, 1),
+ FACTOR(CLK_TOP_MAINPLL_D2, "mainpll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D8, "mainpll_d8", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D16, "mainpll_d16", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_MAINPLL_D11, "mainpll_d11", "mainpll", 1, 11),
+ FACTOR(CLK_TOP_MAINPLL_D22, "mainpll_d22", "mainpll", 1, 22),
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D12, "mainpll_d12", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D10, "mainpll_d10", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_MAINPLL_D20, "mainpll_d20", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_MAINPLL_D40, "mainpll_d40", "mainpll", 1, 40),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D14, "mainpll_d14", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D8, "univpll_d8", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D16, "univpll_d16", "univpll", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D12, "univpll_d12", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL_D24, "univpll_d24", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D20, "univpll_d20", "univpll", 1, 20),
+ FACTOR(CLK_TOP_MMPLL380M, "mmpll380m", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_200M, "mmpll_200m", "mmpll", 1, 3),
+ FACTOR(CLK_TOP_LVDSPLL, "lvdspll_ck", "lvdspll", 1, 1),
+ FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll", 1, 2),
+ FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll", 1, 4),
+ FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll", 1, 8),
+ FACTOR(CLK_TOP_USB_PHY48M, "usb_phy48m_ck", "univpll", 1, 26),
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "rg_apll1_d2_en", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "rg_apll1_d4_en", 1, 2),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "rg_apll2_d2_en", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "rg_apll2_d4_en", 1, 2),
+ FACTOR(CLK_TOP_CLK26M, "clk26m_ck", "clk26m", 1, 1),
+ FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "clk26m", 1, 2),
+ FACTOR(CLK_TOP_MIPI_26M, "mipi_26m", "clk26m", 1, 1),
+ FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll", 1, 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_ck", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_ck", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll_ck", 1, 16),
+ FACTOR(CLK_TOP_AHB_INFRA_D2, "ahb_infra_d2", "ahb_infra_sel", 1, 2),
+ FACTOR(CLK_TOP_NFI1X, "nfi1x_ck", "nfi2x_pad_sel", 1, 2),
+ FACTOR(CLK_TOP_ETH_D2, "eth_d2_ck", "eth_sel", 1, 2),
+};
+
+static const char * const uart0_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d24"
+};
+
+static const char * const gfmux_emi1x_parents[] __initconst = {
+ "clk26m_ck",
+ "dmpll_ck"
+};
+
+static const char * const emi_ddrphy_parents[] __initconst = {
+ "gfmux_emi1x_sel",
+ "gfmux_emi1x_sel"
+};
+
+static const char * const ahb_infra_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "mainpll_d11",
+ "clk_null",
+ "mainpll_d12",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d10"
+};
+
+static const char * const csw_mux_mfg_parents[] __initconst = {
+ "clk_null",
+ "clk_null",
+ "univpll_d3",
+ "univpll_d2",
+ "clk26m_ck",
+ "mainpll_d4",
+ "univpll_d24",
+ "mmpll380m"
+};
+
+static const char * const msdc0_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d6",
+ "mainpll_d8",
+ "univpll_d8",
+ "mainpll_d16",
+ "mmpll_200m",
+ "mainpll_d12",
+ "mmpll_d2"
+};
+
+static const char * const camtg_mm_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "usb_phy48m_ck",
+ "clk_null",
+ "univpll_d6"
+};
+
+static const char * const pwm_mm_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d12"
+};
+
+static const char * const uart1_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d24"
+};
+
+static const char * const msdc1_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d6",
+ "mainpll_d8",
+ "univpll_d8",
+ "mainpll_d16",
+ "mmpll_200m",
+ "mainpll_d12",
+ "mmpll_d2"
+};
+
+static const char * const spm_52m_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d24"
+};
+
+static const char * const pmicspi_parents[] __initconst = {
+ "univpll_d20",
+ "usb_phy48m_ck",
+ "univpll_d16",
+ "clk26m_ck"
+};
+
+static const char * const qaxi_aud26m_parents[] __initconst = {
+ "clk26m_ck",
+ "ahb_infra_sel"
+};
+
+static const char * const aud_intbus_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "mainpll_d22",
+ "clk_null",
+ "mainpll_d11"
+};
+
+static const char * const nfi2x_pad_parents[] __initconst = {
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk26m_ck",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d12",
+ "mainpll_d8",
+ "clk_null",
+ "mainpll_d6",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d4",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d10",
+ "mainpll_d7",
+ "clk_null",
+ "mainpll_d5"
+};
+
+static const char * const nfi1x_pad_parents[] __initconst = {
+ "ahb_infra_sel",
+ "nfi1x_ck"
+};
+
+static const char * const mfg_mm_parents[] __initconst = {
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "csw_mux_mfg_sel",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d3",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d5",
+ "mainpll_d7",
+ "clk_null",
+ "mainpll_d14"
+};
+
+static const char * const ddrphycfg_parents[] __initconst = {
+ "clk26m_ck",
+ "mainpll_d16"
+};
+
+static const char * const smi_mm_parents[] __initconst = {
+ "clk26m_ck",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "univpll_d4",
+ "mainpll_d7",
+ "clk_null",
+ "mainpll_d14"
+};
+
+static const char * const usb_78m_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "univpll_d16",
+ "clk_null",
+ "mainpll_d20"
+};
+
+static const char * const scam_mm_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "mainpll_d14",
+ "clk_null",
+ "mainpll_d12"
+};
+
+static const char * const spinor_parents[] __initconst = {
+ "clk26m_d2",
+ "clk26m_ck",
+ "mainpll_d40",
+ "univpll_d24",
+ "univpll_d20",
+ "mainpll_d20",
+ "mainpll_d16",
+ "univpll_d12"
+};
+
+static const char * const msdc2_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d6",
+ "mainpll_d8",
+ "univpll_d8",
+ "mainpll_d16",
+ "mmpll_200m",
+ "mainpll_d12",
+ "mmpll_d2"
+};
+
+static const char * const eth_parents[] __initconst = {
+ "clk26m_ck",
+ "mainpll_d40",
+ "univpll_d24",
+ "univpll_d20",
+ "mainpll_d20"
+};
+
+static const char * const vdec_mm_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d4",
+ "mainpll_d4",
+ "univpll_d5",
+ "univpll_d6",
+ "mainpll_d6"
+};
+
+static const char * const dpi0_mm_parents[] __initconst = {
+ "clk26m_ck",
+ "lvdspll_ck",
+ "lvdspll_d2",
+ "lvdspll_d4",
+ "lvdspll_d8"
+};
+
+static const char * const dpi1_mm_parents[] __initconst = {
+ "clk26m_ck",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "tvdpll_d8",
+ "tvdpll_d16"
+};
+
+static const char * const axi_mfg_in_parents[] __initconst = {
+ "clk26m_ck",
+ "mainpll_d11",
+ "univpll_d24",
+ "mmpll380m"
+};
+
+static const char * const slow_mfg_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d12",
+ "univpll_d24"
+};
+
+static const char * const aud1_parents[] __initconst = {
+ "clk26m_ck",
+ "apll1_ck"
+};
+
+static const char * const aud2_parents[] __initconst = {
+ "clk26m_ck",
+ "apll2_ck"
+};
+
+static const char * const aud_engen1_parents[] __initconst = {
+ "clk26m_ck",
+ "rg_apll1_d2_en",
+ "rg_apll1_d4_en",
+ "rg_apll1_d8_en"
+};
+
+static const char * const aud_engen2_parents[] __initconst = {
+ "clk26m_ck",
+ "rg_apll2_d2_en",
+ "rg_apll2_d4_en",
+ "rg_apll2_d8_en"
+};
+
+static const char * const i2c_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d20",
+ "univpll_d16",
+ "univpll_d12"
+};
+
+static const char * const aud_i2s0_m_parents[] __initconst = {
+ "rg_aud1",
+ "rg_aud2"
+};
+
+static const char * const pwm_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d12"
+};
+
+static const char * const spi_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d12",
+ "univpll_d8",
+ "univpll_d6"
+};
+
+static const char * const aud_spdifin_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d2"
+};
+
+static const char * const uart2_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d24"
+};
+
+static const char * const bsi_parents[] __initconst = {
+ "clk26m_ck",
+ "mainpll_d10",
+ "mainpll_d12",
+ "mainpll_d20"
+};
+
+static const char * const dbg_atclk_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "mainpll_d5",
+ "clk_null",
+ "univpll_d5"
+};
+
+static const char * const csw_nfiecc_parents[] __initconst = {
+ "clk_null",
+ "mainpll_d7",
+ "mainpll_d6",
+ "clk_null",
+ "mainpll_d5"
+};
+
+static const char * const nfiecc_parents[] __initconst = {
+ "clk_null",
+ "nfi2x_pad_sel",
+ "mainpll_d4",
+ "clk_null",
+ "csw_nfiecc_sel"
+};
+
+static struct mtk_composite top_muxes[] __initdata = {
+ /* CLK_MUX_SEL0 */
+ MUX(CLK_TOP_UART0_SEL, "uart0_sel", uart0_parents,
+ 0x000, 0, 1),
+ MUX(CLK_TOP_GFMUX_EMI1X_SEL, "gfmux_emi1x_sel", gfmux_emi1x_parents,
+ 0x000, 1, 1),
+ MUX(CLK_TOP_EMI_DDRPHY_SEL, "emi_ddrphy_sel", emi_ddrphy_parents,
+ 0x000, 2, 1),
+ MUX(CLK_TOP_AHB_INFRA_SEL, "ahb_infra_sel", ahb_infra_parents,
+ 0x000, 4, 4),
+ MUX(CLK_TOP_CSW_MUX_MFG_SEL, "csw_mux_mfg_sel", csw_mux_mfg_parents,
+ 0x000, 8, 3),
+ MUX(CLK_TOP_MSDC0_SEL, "msdc0_sel", msdc0_parents,
+ 0x000, 11, 3),
+ MUX(CLK_TOP_CAMTG_MM_SEL, "camtg_mm_sel", camtg_mm_parents,
+ 0x000, 15, 3),
+ MUX(CLK_TOP_PWM_MM_SEL, "pwm_mm_sel", pwm_mm_parents,
+ 0x000, 18, 1),
+ MUX(CLK_TOP_UART1_SEL, "uart1_sel", uart1_parents,
+ 0x000, 19, 1),
+ MUX(CLK_TOP_MSDC1_SEL, "msdc1_sel", msdc1_parents,
+ 0x000, 20, 3),
+ MUX(CLK_TOP_SPM_52M_SEL, "spm_52m_sel", spm_52m_parents,
+ 0x000, 23, 1),
+ MUX(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+ 0x000, 24, 2),
+ MUX(CLK_TOP_QAXI_AUD26M_SEL, "qaxi_aud26m_sel", qaxi_aud26m_parents,
+ 0x000, 26, 1),
+ MUX(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x000, 27, 3),
+ /* CLK_MUX_SEL1 */
+ MUX(CLK_TOP_NFI2X_PAD_SEL, "nfi2x_pad_sel", nfi2x_pad_parents,
+ 0x004, 0, 7),
+ MUX(CLK_TOP_NFI1X_PAD_SEL, "nfi1x_pad_sel", nfi1x_pad_parents,
+ 0x004, 7, 1),
+ MUX(CLK_TOP_MFG_MM_SEL, "mfg_mm_sel", mfg_mm_parents,
+ 0x004, 8, 6),
+ MUX(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+ 0x004, 15, 1),
+ MUX(CLK_TOP_SMI_MM_SEL, "smi_mm_sel", smi_mm_parents,
+ 0x004, 16, 4),
+ MUX(CLK_TOP_USB_78M_SEL, "usb_78m_sel", usb_78m_parents,
+ 0x004, 20, 3),
+ MUX(CLK_TOP_SCAM_MM_SEL, "scam_mm_sel", scam_mm_parents,
+ 0x004, 23, 3),
+ /* CLK_MUX_SEL8 */
+ MUX(CLK_TOP_SPINOR_SEL, "spinor_sel", spinor_parents,
+ 0x040, 0, 3),
+ MUX(CLK_TOP_MSDC2_SEL, "msdc2_sel", msdc2_parents,
+ 0x040, 3, 3),
+ MUX(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
+ 0x040, 6, 3),
+ MUX(CLK_TOP_VDEC_MM_SEL, "vdec_mm_sel", vdec_mm_parents,
+ 0x040, 9, 3),
+ MUX(CLK_TOP_DPI0_MM_SEL, "dpi0_mm_sel", dpi0_mm_parents,
+ 0x040, 12, 3),
+ MUX(CLK_TOP_DPI1_MM_SEL, "dpi1_mm_sel", dpi1_mm_parents,
+ 0x040, 15, 3),
+ MUX(CLK_TOP_AXI_MFG_IN_SEL, "axi_mfg_in_sel", axi_mfg_in_parents,
+ 0x040, 18, 2),
+ MUX(CLK_TOP_SLOW_MFG_SEL, "slow_mfg_sel", slow_mfg_parents,
+ 0x040, 20, 2),
+ MUX(CLK_TOP_AUD1_SEL, "aud1_sel", aud1_parents,
+ 0x040, 22, 1),
+ MUX(CLK_TOP_AUD2_SEL, "aud2_sel", aud2_parents,
+ 0x040, 23, 1),
+ MUX(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel", aud_engen1_parents,
+ 0x040, 24, 2),
+ MUX(CLK_TOP_AUD_ENGEN2_SEL, "aud_engen2_sel", aud_engen2_parents,
+ 0x040, 26, 2),
+ MUX(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents,
+ 0x040, 28, 2),
+ /* CLK_SEL_9 */
+ MUX(CLK_TOP_AUD_I2S0_M_SEL, "aud_i2s0_m_sel", aud_i2s0_m_parents,
+ 0x044, 12, 1),
+ MUX(CLK_TOP_AUD_I2S1_M_SEL, "aud_i2s1_m_sel", aud_i2s0_m_parents,
+ 0x044, 13, 1),
+ MUX(CLK_TOP_AUD_I2S2_M_SEL, "aud_i2s2_m_sel", aud_i2s0_m_parents,
+ 0x044, 14, 1),
+ MUX(CLK_TOP_AUD_I2S3_M_SEL, "aud_i2s3_m_sel", aud_i2s0_m_parents,
+ 0x044, 15, 1),
+ MUX(CLK_TOP_AUD_I2S4_M_SEL, "aud_i2s4_m_sel", aud_i2s0_m_parents,
+ 0x044, 16, 1),
+ MUX(CLK_TOP_AUD_I2S5_M_SEL, "aud_i2s5_m_sel", aud_i2s0_m_parents,
+ 0x044, 17, 1),
+ MUX(CLK_TOP_AUD_SPDIF_B_SEL, "aud_spdif_b_sel", aud_i2s0_m_parents,
+ 0x044, 18, 1),
+ /* CLK_MUX_SEL13 */
+ MUX(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+ 0x07c, 0, 1),
+ MUX(CLK_TOP_SPI_SEL, "spi_sel", spi_parents,
+ 0x07c, 1, 2),
+ MUX(CLK_TOP_AUD_SPDIFIN_SEL, "aud_spdifin_sel", aud_spdifin_parents,
+ 0x07c, 3, 1),
+ MUX(CLK_TOP_UART2_SEL, "uart2_sel", uart2_parents,
+ 0x07c, 4, 1),
+ MUX(CLK_TOP_BSI_SEL, "bsi_sel", bsi_parents,
+ 0x07c, 5, 2),
+ MUX(CLK_TOP_DBG_ATCLK_SEL, "dbg_atclk_sel", dbg_atclk_parents,
+ 0x07c, 7, 3),
+ MUX(CLK_TOP_CSW_NFIECC_SEL, "csw_nfiecc_sel", csw_nfiecc_parents,
+ 0x07c, 10, 3),
+ MUX(CLK_TOP_NFIECC_SEL, "nfiecc_sel", nfiecc_parents,
+ 0x07c, 13, 3),
+};
+
+static const char * const ifr_mux1_parents[] __initconst = {
+ "clk26m_ck",
+ "armpll",
+ "univpll",
+ "mainpll_d2"
+};
+
+static const char * const ifr_eth_25m_parents[] __initconst = {
+ "eth_d2_ck",
+ "rg_eth"
+};
+
+static const char * const ifr_i2c0_parents[] __initconst = {
+ "ahb_infra_d2",
+ "rg_i2c"
+};
+
+static const struct mtk_composite ifr_muxes[] __initconst = {
+ MUX(CLK_IFR_MUX1_SEL, "ifr_mux1_sel", ifr_mux1_parents, 0x000,
+ 2, 2),
+ MUX(CLK_IFR_ETH_25M_SEL, "ifr_eth_25m_sel", ifr_eth_25m_parents, 0x080,
+ 0, 1),
+ MUX(CLK_IFR_I2C0_SEL, "ifr_i2c0_sel", ifr_i2c0_parents, 0x080,
+ 1, 1),
+ MUX(CLK_IFR_I2C1_SEL, "ifr_i2c1_sel", ifr_i2c0_parents, 0x080,
+ 2, 1),
+ MUX(CLK_IFR_I2C2_SEL, "ifr_i2c2_sel", ifr_i2c0_parents, 0x080,
+ 3, 1),
+};
+
+#define DIV_ADJ(_id, _name, _parent, _reg, _shift, _width) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .div_reg = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+}
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV0, "apll12_ck_div0", "aud_i2s0_m_sel",
+ 0x0048, 0, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV1, "apll12_ck_div1", "aud_i2s1_m_sel",
+ 0x0048, 8, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV2, "apll12_ck_div2", "aud_i2s2_m_sel",
+ 0x0048, 16, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV3, "apll12_ck_div3", "aud_i2s3_m_sel",
+ 0x0048, 24, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV4, "apll12_ck_div4", "aud_i2s4_m_sel",
+ 0x004c, 0, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV4B, "apll12_ck_div4b", "apll12_div4",
+ 0x004c, 8, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV5, "apll12_ck_div5", "aud_i2s5_m_sel",
+ 0x004c, 16, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV5B, "apll12_ck_div5b", "apll12_div5",
+ 0x004c, 24, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV6, "apll12_ck_div6", "aud_spdif_b_sel",
+ 0x0078, 0, 8),
+};
+
+#define DIV_ADJ_FLAG(_id, _name, _parent, _reg, _shift, _width, _flag) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .div_reg = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+ .clk_divider_flags = _flag, \
+}
+
+static const struct mtk_clk_divider apmixed_adj_divs[] = {
+ DIV_ADJ_FLAG(CLK_APMIXED_HDMI_REF, "hdmi_ref", "tvdpll",
+ 0x1c4, 24, 3, CLK_DIVIDER_POWER_OF_TWO),
+};
+
+static const struct mtk_gate_regs top0_cg_regs = {
+ .set_ofs = 0x50,
+ .clr_ofs = 0x80,
+ .sta_ofs = 0x20,
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x54,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x24,
+};
+
+static const struct mtk_gate_regs top2_cg_regs = {
+ .set_ofs = 0x6c,
+ .clr_ofs = 0x9c,
+ .sta_ofs = 0x3c,
+};
+
+static const struct mtk_gate_regs top3_cg_regs = {
+ .set_ofs = 0xa0,
+ .clr_ofs = 0xb0,
+ .sta_ofs = 0x70,
+};
+
+static const struct mtk_gate_regs top4_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xb4,
+ .sta_ofs = 0x74,
+};
+
+static const struct mtk_gate_regs top5_cg_regs = {
+ .set_ofs = 0x44,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x44,
+};
+
+#define GATE_TOP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP0_I(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_TOP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP2_I(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_TOP3(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top3_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP4_I(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top4_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_TOP5(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top5_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate top_clks[] __initconst = {
+ /* TOP0 */
+ GATE_TOP0(CLK_TOP_PWM_MM, "pwm_mm", "pwm_mm_sel", 0),
+ GATE_TOP0(CLK_TOP_CAM_MM, "cam_mm", "camtg_mm_sel", 1),
+ GATE_TOP0(CLK_TOP_MFG_MM, "mfg_mm", "mfg_mm_sel", 2),
+ GATE_TOP0(CLK_TOP_SPM_52M, "spm_52m", "spm_52m_sel", 3),
+ GATE_TOP0_I(CLK_TOP_MIPI_26M_DBG, "mipi_26m_dbg", "mipi_26m", 4),
+ GATE_TOP0(CLK_TOP_SCAM_MM, "scam_mm", "scam_mm_sel", 5),
+ GATE_TOP0(CLK_TOP_SMI_MM, "smi_mm", "smi_mm_sel", 9),
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_THEM, "them", "ahb_infra_sel", 1),
+ GATE_TOP1(CLK_TOP_APDMA, "apdma", "ahb_infra_sel", 2),
+ GATE_TOP1(CLK_TOP_I2C0, "i2c0", "ifr_i2c0_sel", 3),
+ GATE_TOP1(CLK_TOP_I2C1, "i2c1", "ifr_i2c1_sel", 4),
+ GATE_TOP1(CLK_TOP_AUXADC1, "auxadc1", "ahb_infra_sel", 5),
+ GATE_TOP1(CLK_TOP_NFI, "nfi", "nfi1x_pad_sel", 6),
+ GATE_TOP1(CLK_TOP_NFIECC, "nfiecc", "rg_nfiecc", 7),
+ GATE_TOP1(CLK_TOP_DEBUGSYS, "debugsys", "rg_dbg_atclk", 8),
+ GATE_TOP1(CLK_TOP_PWM, "pwm", "ahb_infra_sel", 9),
+ GATE_TOP1(CLK_TOP_UART0, "uart0", "uart0_sel", 10),
+ GATE_TOP1(CLK_TOP_UART1, "uart1", "uart1_sel", 11),
+ GATE_TOP1(CLK_TOP_BTIF, "btif", "ahb_infra_sel", 12),
+ GATE_TOP1(CLK_TOP_USB, "usb", "usb_78m", 13),
+ GATE_TOP1(CLK_TOP_FLASHIF_26M, "flashif_26m", "clk26m_ck", 14),
+ GATE_TOP1(CLK_TOP_AUXADC2, "auxadc2", "ahb_infra_sel", 15),
+ GATE_TOP1(CLK_TOP_I2C2, "i2c2", "ifr_i2c2_sel", 16),
+ GATE_TOP1(CLK_TOP_MSDC0, "msdc0", "msdc0_sel", 17),
+ GATE_TOP1(CLK_TOP_MSDC1, "msdc1", "msdc1_sel", 18),
+ GATE_TOP1(CLK_TOP_NFI2X, "nfi2x", "nfi2x_pad_sel", 19),
+ GATE_TOP1(CLK_TOP_PMICWRAP_AP, "pwrap_ap", "clk26m_ck", 20),
+ GATE_TOP1(CLK_TOP_SEJ, "sej", "ahb_infra_sel", 21),
+ GATE_TOP1(CLK_TOP_MEMSLP_DLYER, "memslp_dlyer", "clk26m_ck", 22),
+ GATE_TOP1(CLK_TOP_SPI, "spi", "spi_sel", 23),
+ GATE_TOP1(CLK_TOP_APXGPT, "apxgpt", "clk26m_ck", 24),
+ GATE_TOP1(CLK_TOP_AUDIO, "audio", "clk26m_ck", 25),
+ GATE_TOP1(CLK_TOP_PMICWRAP_MD, "pwrap_md", "clk26m_ck", 27),
+ GATE_TOP1(CLK_TOP_PMICWRAP_CONN, "pwrap_conn", "clk26m_ck", 28),
+ GATE_TOP1(CLK_TOP_PMICWRAP_26M, "pwrap_26m", "clk26m_ck", 29),
+ GATE_TOP1(CLK_TOP_AUX_ADC, "aux_adc", "clk26m_ck", 30),
+ GATE_TOP1(CLK_TOP_AUX_TP, "aux_tp", "clk26m_ck", 31),
+ /* TOP2 */
+ GATE_TOP2(CLK_TOP_MSDC2, "msdc2", "ahb_infra_sel", 0),
+ GATE_TOP2(CLK_TOP_RBIST, "rbist", "univpll_d12", 1),
+ GATE_TOP2(CLK_TOP_NFI_BUS, "nfi_bus", "ahb_infra_sel", 2),
+ GATE_TOP2(CLK_TOP_GCE, "gce", "ahb_infra_sel", 4),
+ GATE_TOP2(CLK_TOP_TRNG, "trng", "ahb_infra_sel", 5),
+ GATE_TOP2(CLK_TOP_SEJ_13M, "sej_13m", "clk26m_ck", 6),
+ GATE_TOP2(CLK_TOP_AES, "aes", "ahb_infra_sel", 7),
+ GATE_TOP2(CLK_TOP_PWM_B, "pwm_b", "rg_pwm_infra", 8),
+ GATE_TOP2(CLK_TOP_PWM1_FB, "pwm1_fb", "rg_pwm_infra", 9),
+ GATE_TOP2(CLK_TOP_PWM2_FB, "pwm2_fb", "rg_pwm_infra", 10),
+ GATE_TOP2(CLK_TOP_PWM3_FB, "pwm3_fb", "rg_pwm_infra", 11),
+ GATE_TOP2(CLK_TOP_PWM4_FB, "pwm4_fb", "rg_pwm_infra", 12),
+ GATE_TOP2(CLK_TOP_PWM5_FB, "pwm5_fb", "rg_pwm_infra", 13),
+ GATE_TOP2(CLK_TOP_USB_1P, "usb_1p", "usb_78m", 14),
+ GATE_TOP2(CLK_TOP_FLASHIF_FREERUN, "flashif_freerun", "ahb_infra_sel",
+ 15),
+ GATE_TOP2(CLK_TOP_26M_HDMI_SIFM, "hdmi_sifm_26m", "clk26m_ck", 16),
+ GATE_TOP2(CLK_TOP_26M_CEC, "cec_26m", "clk26m_ck", 17),
+ GATE_TOP2(CLK_TOP_32K_CEC, "cec_32k", "clk32k", 18),
+ GATE_TOP2(CLK_TOP_66M_ETH, "eth_66m", "ahb_infra_d2", 19),
+ GATE_TOP2(CLK_TOP_133M_ETH, "eth_133m", "ahb_infra_sel", 20),
+ GATE_TOP2(CLK_TOP_FETH_25M, "feth_25m", "ifr_eth_25m_sel", 21),
+ GATE_TOP2(CLK_TOP_FETH_50M, "feth_50m", "rg_eth", 22),
+ GATE_TOP2(CLK_TOP_FLASHIF_AXI, "flashif_axi", "ahb_infra_sel", 23),
+ GATE_TOP2(CLK_TOP_USBIF, "usbif", "ahb_infra_sel", 24),
+ GATE_TOP2(CLK_TOP_UART2, "uart2", "rg_uart2", 25),
+ GATE_TOP2(CLK_TOP_BSI, "bsi", "ahb_infra_sel", 26),
+ GATE_TOP2(CLK_TOP_GCPU_B, "gcpu_b", "ahb_infra_sel", 27),
+ GATE_TOP2_I(CLK_TOP_MSDC0_INFRA, "msdc0_infra", "msdc0", 28),
+ GATE_TOP2_I(CLK_TOP_MSDC1_INFRA, "msdc1_infra", "msdc1", 29),
+ GATE_TOP2_I(CLK_TOP_MSDC2_INFRA, "msdc2_infra", "rg_msdc2", 30),
+ GATE_TOP2(CLK_TOP_USB_78M, "usb_78m", "usb_78m_sel", 31),
+ /* TOP3 */
+ GATE_TOP3(CLK_TOP_RG_SPINOR, "rg_spinor", "spinor_sel", 0),
+ GATE_TOP3(CLK_TOP_RG_MSDC2, "rg_msdc2", "msdc2_sel", 1),
+ GATE_TOP3(CLK_TOP_RG_ETH, "rg_eth", "eth_sel", 2),
+ GATE_TOP3(CLK_TOP_RG_VDEC, "rg_vdec", "vdec_mm_sel", 3),
+ GATE_TOP3(CLK_TOP_RG_FDPI0, "rg_fdpi0", "dpi0_mm_sel", 4),
+ GATE_TOP3(CLK_TOP_RG_FDPI1, "rg_fdpi1", "dpi1_mm_sel", 5),
+ GATE_TOP3(CLK_TOP_RG_AXI_MFG, "rg_axi_mfg", "axi_mfg_in_sel", 6),
+ GATE_TOP3(CLK_TOP_RG_SLOW_MFG, "rg_slow_mfg", "slow_mfg_sel", 7),
+ GATE_TOP3(CLK_TOP_RG_AUD1, "rg_aud1", "aud1_sel", 8),
+ GATE_TOP3(CLK_TOP_RG_AUD2, "rg_aud2", "aud2_sel", 9),
+ GATE_TOP3(CLK_TOP_RG_AUD_ENGEN1, "rg_aud_engen1", "aud_engen1_sel", 10),
+ GATE_TOP3(CLK_TOP_RG_AUD_ENGEN2, "rg_aud_engen2", "aud_engen2_sel", 11),
+ GATE_TOP3(CLK_TOP_RG_I2C, "rg_i2c", "i2c_sel", 12),
+ GATE_TOP3(CLK_TOP_RG_PWM_INFRA, "rg_pwm_infra", "pwm_sel", 13),
+ GATE_TOP3(CLK_TOP_RG_AUD_SPDIF_IN, "rg_aud_spdif_in", "aud_spdifin_sel",
+ 14),
+ GATE_TOP3(CLK_TOP_RG_UART2, "rg_uart2", "uart2_sel", 15),
+ GATE_TOP3(CLK_TOP_RG_BSI, "rg_bsi", "bsi_sel", 16),
+ GATE_TOP3(CLK_TOP_RG_DBG_ATCLK, "rg_dbg_atclk", "dbg_atclk_sel", 17),
+ GATE_TOP3(CLK_TOP_RG_NFIECC, "rg_nfiecc", "nfiecc_sel", 18),
+ /* TOP4 */
+ GATE_TOP4_I(CLK_TOP_RG_APLL1_D2_EN, "rg_apll1_d2_en", "apll1_d2", 8),
+ GATE_TOP4_I(CLK_TOP_RG_APLL1_D4_EN, "rg_apll1_d4_en", "apll1_d4", 9),
+ GATE_TOP4_I(CLK_TOP_RG_APLL1_D8_EN, "rg_apll1_d8_en", "apll1_d8", 10),
+ GATE_TOP4_I(CLK_TOP_RG_APLL2_D2_EN, "rg_apll2_d2_en", "apll2_d2", 11),
+ GATE_TOP4_I(CLK_TOP_RG_APLL2_D4_EN, "rg_apll2_d4_en", "apll2_d4", 12),
+ GATE_TOP4_I(CLK_TOP_RG_APLL2_D8_EN, "rg_apll2_d8_en", "apll2_d8", 13),
+ /* TOP5 */
+ GATE_TOP5(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll12_ck_div0", 0),
+ GATE_TOP5(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll12_ck_div1", 1),
+ GATE_TOP5(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll12_ck_div2", 2),
+ GATE_TOP5(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll12_ck_div3", 3),
+ GATE_TOP5(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll12_ck_div4", 4),
+ GATE_TOP5(CLK_TOP_APLL12_DIV4B, "apll12_div4b", "apll12_ck_div4b", 5),
+ GATE_TOP5(CLK_TOP_APLL12_DIV5, "apll12_div5", "apll12_ck_div5", 6),
+ GATE_TOP5(CLK_TOP_APLL12_DIV5B, "apll12_div5b", "apll12_ck_div5b", 7),
+ GATE_TOP5(CLK_TOP_APLL12_DIV6, "apll12_div6", "apll12_ck_div6", 8),
+};
+
+static void __init mtk_topckgen_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ void __iomem *base;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return;
+ }
+
+ clk_data = mtk_alloc_clk_data(MT8167_CLK_TOP_NR_CLK);
+
+ mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ clk_data);
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data);
+
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ &mt8167_clk_lock, clk_data);
+ mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ base, &mt8167_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8167-topckgen", mtk_topckgen_init);
+
+static void __init mtk_infracfg_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ void __iomem *base;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return;
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+
+ mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
+ &mt8167_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_infracfg, "mediatek,mt8167-infracfg", mtk_infracfg_init);
+
+#define MT8167_PLL_FMAX (2500UL * MHZ)
+
+#define CON0_MT8167_RST_BAR BIT(27)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift, _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT8167_RST_BAR, \
+ .fmax = MT8167_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ NULL)
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+ { .div = 0, .freq = MT8167_PLL_FMAX },
+ { .div = 1, .freq = 1000000000 },
+ { .div = 2, .freq = 604500000 },
+ { .div = 3, .freq = 253500000 },
+ { .div = 4, .freq = 126750000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0x00000001, 0,
+ 21, 0x0104, 24, 0, 0x0104, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0x00000001,
+ HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000001,
+ HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0x00000001, 0,
+ 21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0x00000001, 0,
+ 31, 0x0180, 1, 0x0194, 0x0184, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0x00000001, 0,
+ 31, 0x01A0, 1, 0x01B4, 0x01A4, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x01C0, 0x01D0, 0x00000001, 0,
+ 21, 0x01C4, 24, 0, 0x01C4, 0),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x01E0, 0x01F0, 0x00000001, 0,
+ 21, 0x01E4, 24, 0, 0x01E4, 0),
+};
+
+static void __init mtk_apmixedsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ int r;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return;
+ }
+
+ clk_data = mtk_alloc_clk_data(MT8167_CLK_APMIXED_NR_CLK);
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ mtk_clk_register_dividers(apmixed_adj_divs, ARRAY_SIZE(apmixed_adj_divs),
+ base, &mt8167_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+}
+CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8167-apmixedsys",
+ mtk_apmixedsys_init);
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index dabeb435d067..034da203e8e0 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
+menu "Clock support for Amlogic platforms"
+ depends on ARCH_MESON || COMPILE_TEST
+
config COMMON_CLK_MESON_REGMAP
tristate
select REGMAP
@@ -41,8 +44,9 @@ config COMMON_CLK_MESON_CPU_DYNDIV
select COMMON_CLK_MESON_REGMAP
config COMMON_CLK_MESON8B
- bool
- depends on ARCH_MESON
+ bool "Meson8 SoC Clock controller support"
+ depends on ARM
+ default y
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
@@ -54,8 +58,9 @@ config COMMON_CLK_MESON8B
want peripherals and CPU frequency scaling to work.
config COMMON_CLK_GXBB
- bool
- depends on ARCH_MESON
+ bool "GXBB and GXL SoC clock controllers support"
+ depends on ARM64
+ default y
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_VID_PLL_DIV
@@ -69,8 +74,9 @@ config COMMON_CLK_GXBB
Say Y if you want peripherals and CPU frequency scaling to work.
config COMMON_CLK_AXG
- bool
- depends on ARCH_MESON
+ bool "AXG SoC clock controllers support"
+ depends on ARM64
+ default y
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
@@ -84,7 +90,7 @@ config COMMON_CLK_AXG
config COMMON_CLK_AXG_AUDIO
tristate "Meson AXG Audio Clock Controller Driver"
- depends on ARCH_MESON
+ depends on ARM64
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_PHASE
select COMMON_CLK_MESON_SCLK_DIV
@@ -94,8 +100,9 @@ config COMMON_CLK_AXG_AUDIO
aka axg, Say Y if you want audio subsystem to work.
config COMMON_CLK_G12A
- bool
- depends on ARCH_MESON
+ bool "G12 and SM1 SoC clock controllers support"
+ depends on ARM64
+ default y
select COMMON_CLK_MESON_REGMAP
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
@@ -107,3 +114,4 @@ config COMMON_CLK_G12A
help
Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2
devices, aka g12a. Say Y if you want peripherals to work.
+endmenu
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 53715e36326c..7c8d02164443 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -147,6 +147,29 @@
}, \
}
+#define AUD_SCLK_WS(_name, _reg, _width, _shift_ph, _shift_ws, _pname, \
+ _iflags) { \
+ .data = &(struct meson_sclk_ws_inv_data) { \
+ .ph = { \
+ .reg_off = (_reg), \
+ .shift = (_shift_ph), \
+ .width = (_width), \
+ }, \
+ .ws = { \
+ .reg_off = (_reg), \
+ .shift = (_shift_ws), \
+ .width = (_width), \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "aud_"#_name, \
+ .ops = &meson_clk_phase_ops, \
+ .parent_names = (const char *[]){ #_pname }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
/* Audio Master Clocks */
static const struct clk_parent_data mst_mux_parent_data[] = {
{ .fw_name = "mst_in0", },
@@ -254,6 +277,10 @@ static const struct clk_parent_data tdm_lrclk_parent_data[] = {
AUD_PHASE(tdm##_name##_sclk, _reg, 1, 29, \
aud_tdm##_name##_sclk_post_en, \
CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)
+#define AUD_TDM_SCLK_WS(_name, _reg) \
+ AUD_SCLK_WS(tdm##_name##_sclk, _reg, 1, 29, 28, \
+ aud_tdm##_name##_sclk_post_en, \
+ CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)
#define AUD_TDM_LRLCK(_name, _reg) \
AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
@@ -499,12 +526,6 @@ static struct clk_regmap tdmin_c_sclk =
AUD_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
static struct clk_regmap tdmin_lb_sclk =
AUD_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static struct clk_regmap tdmout_a_sclk =
- AUD_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static struct clk_regmap tdmout_b_sclk =
- AUD_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static struct clk_regmap tdmout_c_sclk =
- AUD_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
static struct clk_regmap tdmin_a_lrclk =
AUD_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
@@ -521,6 +542,14 @@ static struct clk_regmap tdmout_b_lrclk =
static struct clk_regmap tdmout_c_lrclk =
AUD_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+/* AXG Clocks */
+static struct clk_regmap axg_tdmout_a_sclk =
+ AUD_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap axg_tdmout_b_sclk =
+ AUD_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap axg_tdmout_c_sclk =
+ AUD_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
/* AXG/G12A Clocks */
static struct clk_hw axg_aud_top = {
.init = &(struct clk_init_data) {
@@ -591,7 +620,13 @@ static struct clk_regmap g12a_tdm_sclk_pad_1 = AUD_TDM_PAD_CTRL(
static struct clk_regmap g12a_tdm_sclk_pad_2 = AUD_TDM_PAD_CTRL(
sclk_pad_2, AUDIO_MST_PAD_CTRL1, 8, sclk_pad_ctrl_parent_data);
-/* G12a/SM1 clocks */
+static struct clk_regmap g12a_tdmout_a_sclk =
+ AUD_TDM_SCLK_WS(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap g12a_tdmout_b_sclk =
+ AUD_TDM_SCLK_WS(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap g12a_tdmout_c_sclk =
+ AUD_TDM_SCLK_WS(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
static struct clk_regmap toram =
AUD_PCLK_GATE(toram, AUDIO_CLK_GATE_EN, 20);
static struct clk_regmap spdifout_b =
@@ -889,9 +924,9 @@ static struct clk_hw_onecell_data axg_audio_hw_onecell_data = {
[AUD_CLKID_TDMIN_B_SCLK] = &tdmin_b_sclk.hw,
[AUD_CLKID_TDMIN_C_SCLK] = &tdmin_c_sclk.hw,
[AUD_CLKID_TDMIN_LB_SCLK] = &tdmin_lb_sclk.hw,
- [AUD_CLKID_TDMOUT_A_SCLK] = &tdmout_a_sclk.hw,
- [AUD_CLKID_TDMOUT_B_SCLK] = &tdmout_b_sclk.hw,
- [AUD_CLKID_TDMOUT_C_SCLK] = &tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &axg_tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &axg_tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &axg_tdmout_c_sclk.hw,
[AUD_CLKID_TDMIN_A_LRCLK] = &tdmin_a_lrclk.hw,
[AUD_CLKID_TDMIN_B_LRCLK] = &tdmin_b_lrclk.hw,
[AUD_CLKID_TDMIN_C_LRCLK] = &tdmin_c_lrclk.hw,
@@ -1026,9 +1061,9 @@ static struct clk_hw_onecell_data g12a_audio_hw_onecell_data = {
[AUD_CLKID_TDMIN_B_SCLK] = &tdmin_b_sclk.hw,
[AUD_CLKID_TDMIN_C_SCLK] = &tdmin_c_sclk.hw,
[AUD_CLKID_TDMIN_LB_SCLK] = &tdmin_lb_sclk.hw,
- [AUD_CLKID_TDMOUT_A_SCLK] = &tdmout_a_sclk.hw,
- [AUD_CLKID_TDMOUT_B_SCLK] = &tdmout_b_sclk.hw,
- [AUD_CLKID_TDMOUT_C_SCLK] = &tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &g12a_tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &g12a_tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &g12a_tdmout_c_sclk.hw,
[AUD_CLKID_TDMIN_A_LRCLK] = &tdmin_a_lrclk.hw,
[AUD_CLKID_TDMIN_B_LRCLK] = &tdmin_b_lrclk.hw,
[AUD_CLKID_TDMIN_C_LRCLK] = &tdmin_c_lrclk.hw,
@@ -1170,9 +1205,9 @@ static struct clk_hw_onecell_data sm1_audio_hw_onecell_data = {
[AUD_CLKID_TDMIN_B_SCLK] = &tdmin_b_sclk.hw,
[AUD_CLKID_TDMIN_C_SCLK] = &tdmin_c_sclk.hw,
[AUD_CLKID_TDMIN_LB_SCLK] = &tdmin_lb_sclk.hw,
- [AUD_CLKID_TDMOUT_A_SCLK] = &tdmout_a_sclk.hw,
- [AUD_CLKID_TDMOUT_B_SCLK] = &tdmout_b_sclk.hw,
- [AUD_CLKID_TDMOUT_C_SCLK] = &tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &g12a_tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &g12a_tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &g12a_tdmout_c_sclk.hw,
[AUD_CLKID_TDMIN_A_LRCLK] = &tdmin_a_lrclk.hw,
[AUD_CLKID_TDMIN_B_LRCLK] = &tdmin_b_lrclk.hw,
[AUD_CLKID_TDMIN_C_LRCLK] = &tdmin_c_lrclk.hw,
@@ -1209,12 +1244,7 @@ static struct clk_hw_onecell_data sm1_audio_hw_onecell_data = {
};
-/* Convenience table to populate regmap in .probe()
- * Note that this table is shared between both AXG and G12A,
- * with spdifout_b clocks being exclusive to G12A. Since those
- * clocks are not declared within the AXG onecell table, we do not
- * feel the need to have separate AXG/G12A regmap tables.
- */
+/* Convenience table to populate regmap in .probe(). */
static struct clk_regmap *const axg_clk_regmaps[] = {
&ddr_arb,
&pdm,
@@ -1236,6 +1266,130 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&spdifout,
&resample,
&power_detect,
+ &mst_a_mclk_sel,
+ &mst_b_mclk_sel,
+ &mst_c_mclk_sel,
+ &mst_d_mclk_sel,
+ &mst_e_mclk_sel,
+ &mst_f_mclk_sel,
+ &mst_a_mclk_div,
+ &mst_b_mclk_div,
+ &mst_c_mclk_div,
+ &mst_d_mclk_div,
+ &mst_e_mclk_div,
+ &mst_f_mclk_div,
+ &mst_a_mclk,
+ &mst_b_mclk,
+ &mst_c_mclk,
+ &mst_d_mclk,
+ &mst_e_mclk,
+ &mst_f_mclk,
+ &spdifout_clk_sel,
+ &spdifout_clk_div,
+ &spdifout_clk,
+ &spdifin_clk_sel,
+ &spdifin_clk_div,
+ &spdifin_clk,
+ &pdm_dclk_sel,
+ &pdm_dclk_div,
+ &pdm_dclk,
+ &pdm_sysclk_sel,
+ &pdm_sysclk_div,
+ &pdm_sysclk,
+ &mst_a_sclk_pre_en,
+ &mst_b_sclk_pre_en,
+ &mst_c_sclk_pre_en,
+ &mst_d_sclk_pre_en,
+ &mst_e_sclk_pre_en,
+ &mst_f_sclk_pre_en,
+ &mst_a_sclk_div,
+ &mst_b_sclk_div,
+ &mst_c_sclk_div,
+ &mst_d_sclk_div,
+ &mst_e_sclk_div,
+ &mst_f_sclk_div,
+ &mst_a_sclk_post_en,
+ &mst_b_sclk_post_en,
+ &mst_c_sclk_post_en,
+ &mst_d_sclk_post_en,
+ &mst_e_sclk_post_en,
+ &mst_f_sclk_post_en,
+ &mst_a_sclk,
+ &mst_b_sclk,
+ &mst_c_sclk,
+ &mst_d_sclk,
+ &mst_e_sclk,
+ &mst_f_sclk,
+ &mst_a_lrclk_div,
+ &mst_b_lrclk_div,
+ &mst_c_lrclk_div,
+ &mst_d_lrclk_div,
+ &mst_e_lrclk_div,
+ &mst_f_lrclk_div,
+ &mst_a_lrclk,
+ &mst_b_lrclk,
+ &mst_c_lrclk,
+ &mst_d_lrclk,
+ &mst_e_lrclk,
+ &mst_f_lrclk,
+ &tdmin_a_sclk_sel,
+ &tdmin_b_sclk_sel,
+ &tdmin_c_sclk_sel,
+ &tdmin_lb_sclk_sel,
+ &tdmout_a_sclk_sel,
+ &tdmout_b_sclk_sel,
+ &tdmout_c_sclk_sel,
+ &tdmin_a_sclk_pre_en,
+ &tdmin_b_sclk_pre_en,
+ &tdmin_c_sclk_pre_en,
+ &tdmin_lb_sclk_pre_en,
+ &tdmout_a_sclk_pre_en,
+ &tdmout_b_sclk_pre_en,
+ &tdmout_c_sclk_pre_en,
+ &tdmin_a_sclk_post_en,
+ &tdmin_b_sclk_post_en,
+ &tdmin_c_sclk_post_en,
+ &tdmin_lb_sclk_post_en,
+ &tdmout_a_sclk_post_en,
+ &tdmout_b_sclk_post_en,
+ &tdmout_c_sclk_post_en,
+ &tdmin_a_sclk,
+ &tdmin_b_sclk,
+ &tdmin_c_sclk,
+ &tdmin_lb_sclk,
+ &axg_tdmout_a_sclk,
+ &axg_tdmout_b_sclk,
+ &axg_tdmout_c_sclk,
+ &tdmin_a_lrclk,
+ &tdmin_b_lrclk,
+ &tdmin_c_lrclk,
+ &tdmin_lb_lrclk,
+ &tdmout_a_lrclk,
+ &tdmout_b_lrclk,
+ &tdmout_c_lrclk,
+};
+
+static struct clk_regmap *const g12a_clk_regmaps[] = {
+ &ddr_arb,
+ &pdm,
+ &tdmin_a,
+ &tdmin_b,
+ &tdmin_c,
+ &tdmin_lb,
+ &tdmout_a,
+ &tdmout_b,
+ &tdmout_c,
+ &frddr_a,
+ &frddr_b,
+ &frddr_c,
+ &toddr_a,
+ &toddr_b,
+ &toddr_c,
+ &loopback,
+ &spdifin,
+ &spdifout,
+ &resample,
+ &power_detect,
&spdifout_b,
&mst_a_mclk_sel,
&mst_b_mclk_sel,
@@ -1328,9 +1482,9 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&tdmin_b_sclk,
&tdmin_c_sclk,
&tdmin_lb_sclk,
- &tdmout_a_sclk,
- &tdmout_b_sclk,
- &tdmout_c_sclk,
+ &g12a_tdmout_a_sclk,
+ &g12a_tdmout_b_sclk,
+ &g12a_tdmout_c_sclk,
&tdmin_a_lrclk,
&tdmin_b_lrclk,
&tdmin_c_lrclk,
@@ -1465,9 +1619,9 @@ static struct clk_regmap *const sm1_clk_regmaps[] = {
&tdmin_b_sclk,
&tdmin_c_sclk,
&tdmin_lb_sclk,
- &tdmout_a_sclk,
- &tdmout_b_sclk,
- &tdmout_c_sclk,
+ &g12a_tdmout_a_sclk,
+ &g12a_tdmout_b_sclk,
+ &g12a_tdmout_c_sclk,
&tdmin_a_lrclk,
&tdmin_b_lrclk,
&tdmin_c_lrclk,
@@ -1713,8 +1867,8 @@ static const struct audioclk_data axg_audioclk_data = {
};
static const struct audioclk_data g12a_audioclk_data = {
- .regmap_clks = axg_clk_regmaps,
- .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
+ .regmap_clks = g12a_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps),
.hw_onecell_data = &g12a_audio_hw_onecell_data,
.reset_offset = AUDIO_SW_RESET,
.reset_num = 26,
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index fe22e171121a..a6763439f7d2 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -125,6 +125,62 @@ const struct clk_ops meson_clk_triphase_ops = {
};
EXPORT_SYMBOL_GPL(meson_clk_triphase_ops);
+/*
+ * This is a special clock for the audio controller.
+ * This drive a bit clock inverter for which the
+ * opposite value of the inverter bit needs to be manually
+ * set into another bit
+ */
+static inline struct meson_sclk_ws_inv_data *
+meson_sclk_ws_inv_data(struct clk_regmap *clk)
+{
+ return (struct meson_sclk_ws_inv_data *)clk->data;
+}
+
+static int meson_sclk_ws_inv_sync(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_ws_inv_data *tph = meson_sclk_ws_inv_data(clk);
+ unsigned int val;
+
+ /* Get phase and sync the inverted value to ws */
+ val = meson_parm_read(clk->map, &tph->ph);
+ meson_parm_write(clk->map, &tph->ws, val ? 0 : 1);
+
+ return 0;
+}
+
+static int meson_sclk_ws_inv_get_phase(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_ws_inv_data *tph = meson_sclk_ws_inv_data(clk);
+ unsigned int val;
+
+ val = meson_parm_read(clk->map, &tph->ph);
+
+ return meson_clk_degrees_from_val(val, tph->ph.width);
+}
+
+static int meson_sclk_ws_inv_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_sclk_ws_inv_data *tph = meson_sclk_ws_inv_data(clk);
+ unsigned int val;
+
+ val = meson_clk_degrees_to_val(degrees, tph->ph.width);
+ meson_parm_write(clk->map, &tph->ph, val);
+ meson_parm_write(clk->map, &tph->ws, val ? 0 : 1);
+ return 0;
+}
+
+const struct clk_ops meson_sclk_ws_inv_ops = {
+ .init = meson_sclk_ws_inv_sync,
+ .get_phase = meson_sclk_ws_inv_get_phase,
+ .set_phase = meson_sclk_ws_inv_set_phase,
+};
+EXPORT_SYMBOL_GPL(meson_sclk_ws_inv_ops);
+
+
MODULE_DESCRIPTION("Amlogic phase driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/clk-phase.h b/drivers/clk/meson/clk-phase.h
index 5579f9ced142..b637b9b227bc 100644
--- a/drivers/clk/meson/clk-phase.h
+++ b/drivers/clk/meson/clk-phase.h
@@ -20,7 +20,13 @@ struct meson_clk_triphase_data {
struct parm ph2;
};
+struct meson_sclk_ws_inv_data {
+ struct parm ph;
+ struct parm ws;
+};
+
extern const struct clk_ops meson_clk_phase_ops;
extern const struct clk_ops meson_clk_triphase_ops;
+extern const struct clk_ops meson_sclk_ws_inv_ops;
#endif /* __MESON_CLK_PHASE_H */
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index c4a39604cffd..e365312da54e 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -26,7 +26,10 @@ struct clk_regmap {
void *data;
};
-#define to_clk_regmap(_hw) container_of(_hw, struct clk_regmap, hw)
+static inline struct clk_regmap *to_clk_regmap(struct clk_hw *hw)
+{
+ return container_of(hw, struct clk_regmap, hw);
+}
/**
* struct clk_regmap_gate_data - regmap backed gate specific data
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 9803d44bb157..b814d44917a5 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -298,6 +298,17 @@ static struct clk_regmap g12a_fclk_div2 = {
&g12a_fclk_div2_div.hw
},
.num_parents = 1,
+ /*
+ * Similar to fclk_div3, it seems that this clock is used by
+ * the resident firmware and is required by the platform to
+ * operate correctly.
+ * Until the following condition are met, we need this clock to
+ * be marked as critical:
+ * a) Mark the clock used by a firmware resource, if possible
+ * b) CCF has a clock hand-off mechanism to make the sure the
+ * clock stays on until the proper driver comes along
+ */
+ .flags = CLK_IS_CRITICAL,
},
};
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index bf8bea675d24..3a6d84cd6601 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -57,7 +57,7 @@ int meson_aoclkc_probe(struct platform_device *pdev)
rstc->data = data;
rstc->regmap = regmap;
rstc->reset.ops = &meson_aoclk_reset_ops;
- rstc->reset.nr_resets = data->num_reset,
+ rstc->reset.nr_resets = data->num_reset;
rstc->reset.of_node = dev->of_node;
ret = devm_reset_controller_register(dev, &rstc->reset);
if (ret) {
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 67208aea94c5..0839fb2049e9 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -347,9 +347,9 @@ static struct mmp_param_mux_clk mmp3_apmu_mux_clks[] = {
};
static struct mmp_param_div_clk apmu_div_clks[] = {
- {0, "disp0_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 8, 4, 0, &disp0_lock},
+ {0, "disp0_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 8, 4, CLK_DIVIDER_ONE_BASED, &disp0_lock},
{0, "disp0_sphy_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 15, 5, 0, &disp0_lock},
- {0, "disp1_div", "disp1_mux", CLK_SET_RATE_PARENT, APMU_DISP1, 8, 4, 0, &disp1_lock},
+ {0, "disp1_div", "disp1_mux", CLK_SET_RATE_PARENT, APMU_DISP1, 8, 4, CLK_DIVIDER_ONE_BASED, &disp1_lock},
{0, "ccic0_sphy_div", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
{0, "ccic1_sphy_div", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 10, 5, 0, &ccic1_lock},
};
diff --git a/drivers/clk/mmp/clk-of-pxa1928.c b/drivers/clk/mmp/clk-of-pxa1928.c
index cede7b4ca3b9..998fc4207b0e 100644
--- a/drivers/clk/mmp/clk-of-pxa1928.c
+++ b/drivers/clk/mmp/clk-of-pxa1928.c
@@ -68,7 +68,6 @@ static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
static void pxa1928_pll_init(struct pxa1928_clk_unit *pxa_unit)
{
- struct clk *clk;
struct mmp_clk_unit *unit = &pxa_unit->unit;
mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
@@ -77,7 +76,7 @@ static void pxa1928_pll_init(struct pxa1928_clk_unit *pxa_unit)
mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
ARRAY_SIZE(fixed_factor_clks));
- clk = mmp_clk_register_factor("uart_pll", "pll1_416",
+ mmp_clk_register_factor("uart_pll", "pll1_416",
CLK_SET_RATE_PARENT,
pxa_unit->mpmu_base + MPMU_UART_PLL,
&uart_factor_masks, uart_factor_tbl,
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index 6b394302c76a..b4259b60dcfd 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -197,7 +197,7 @@ static int ap_cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
stable_bit = BIT(clk->pll_regs->ratio_state_offset +
clk->cluster *
- clk->pll_regs->ratio_state_cluster_offset),
+ clk->pll_regs->ratio_state_cluster_offset);
ret = regmap_read_poll_timeout(clk->pll_cr_base,
clk->pll_regs->ratio_state_reg, reg,
reg & stable_bit, STATUS_POLL_PERIOD_US,
diff --git a/drivers/clk/pxa/clk-pxa.h b/drivers/clk/pxa/clk-pxa.h
index f131d2834af4..5768e0f728ce 100644
--- a/drivers/clk/pxa/clk-pxa.h
+++ b/drivers/clk/pxa/clk-pxa.h
@@ -19,11 +19,11 @@
#define MUX_RO_RATE_RO_OPS(name, clk_name) \
static struct clk_hw name ## _mux_hw; \
static struct clk_hw name ## _rate_hw; \
- static struct clk_ops name ## _mux_ops = { \
+ static const struct clk_ops name ## _mux_ops = { \
.get_parent = name ## _get_parent, \
.set_parent = dummy_clk_set_parent, \
}; \
- static struct clk_ops name ## _rate_ops = { \
+ static const struct clk_ops name ## _rate_ops = { \
.recalc_rate = name ## _get_rate, \
}; \
static struct clk * __init clk_register_ ## name(void) \
@@ -38,7 +38,7 @@
#define RATE_RO_OPS(name, clk_name) \
static struct clk_hw name ## _rate_hw; \
- static const struct clk_ops name ## _rate_ops = { \
+ static const struct clk_ops name ## _rate_ops = { \
.recalc_rate = name ## _get_rate, \
}; \
static struct clk * __init clk_register_ ## name(void) \
@@ -53,7 +53,7 @@
#define RATE_OPS(name, clk_name) \
static struct clk_hw name ## _rate_hw; \
- static struct clk_ops name ## _rate_ops = { \
+ static const struct clk_ops name ## _rate_ops = { \
.recalc_rate = name ## _get_rate, \
.set_rate = name ## _set_rate, \
.determine_rate = name ## _determine_rate, \
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 058327310c25..3a965bd326d5 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -413,6 +413,15 @@ config SDM_LPASSCC_845
Say Y if you want to use the LPASS branch clocks of the LPASS clock
controller to reset the LPASS subsystem.
+config SM_DISPCC_8250
+ tristate "SM8150 and SM8250 Display Clock Controller"
+ depends on SM_GCC_8150 || SM_GCC_8250
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM8150 and SM8250 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
help
@@ -444,6 +453,24 @@ config SM_GPUCC_8250
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_VIDEOCC_8150
+ tristate "SM8150 Video Clock Controller"
+ select SDM_GCC_8150
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SM8150 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
+config SM_VIDEOCC_8250
+ tristate "SM8250 Video Clock Controller"
+ select SDM_GCC_8250
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on SM8250 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode and decode.
+
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
depends on SPMI || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 9677e769e7e9..11ae86febe87 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -64,10 +64,13 @@ obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
obj-$(CONFIG_SDM_GPUCC_845) += gpucc-sdm845.o
obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
+obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
+obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
+obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 26139ef005e4..564431130a76 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -609,7 +609,7 @@ static unsigned long
alpha_huayra_pll_calc_rate(u64 prate, u32 l, u32 a)
{
/*
- * a contains 16 bit alpha_val in two’s compliment number in the range
+ * a contains 16 bit alpha_val in two’s complement number in the range
* of [-0.5, 0.5).
*/
if (a >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
@@ -641,7 +641,7 @@ alpha_huayra_pll_round_rate(unsigned long rate, unsigned long prate,
quotient++;
/*
- * alpha_val should be in two’s compliment number in the range
+ * alpha_val should be in two’s complement number in the range
* of [-0.5, 0.5) so if quotient >= 0.5 then increment the l value
* since alpha value will be subtracted in this case.
*/
@@ -666,7 +666,7 @@ alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
/*
* Depending upon alpha_mode, it can be treated as M/N value or
- * as a two’s compliment number. When alpha_mode=1,
+ * as a two’s complement number. When alpha_mode=1,
* pll_alpha_val<15:8>=M and pll_apla_val<7:0>=N
*
* Fout=FIN*(L+(M/N))
@@ -674,12 +674,12 @@ alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
* M is a signed number (-128 to 127) and N is unsigned
* (0 to 255). M/N has to be within +/-0.5.
*
- * When alpha_mode=0, it is a two’s compliment number in the
+ * When alpha_mode=0, it is a two’s complement number in the
* range [-0.5, 0.5).
*
* Fout=FIN*(L+(alpha_val)/2^16)
*
- * where alpha_val is two’s compliment number.
+ * where alpha_val is two’s complement number.
*/
if (!(ctl & PLL_ALPHA_MODE))
return alpha_huayra_pll_calc_rate(rate, l, alpha);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 357159fe85b5..59a5a0f261f3 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1182,14 +1182,21 @@ static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct clk_rate_request parent_req = *req;
- int ret;
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ unsigned long num, den;
+ u64 tmp;
- ret = __clk_determine_rate(clk_hw_get_parent(hw), &parent_req);
- if (ret)
- return ret;
+ /* Parent rate is a fixed phy link rate */
+ rational_best_approximation(req->best_parent_rate, req->rate,
+ GENMASK(rcg->mnd_width - 1, 0),
+ GENMASK(rcg->mnd_width - 1, 0), &den, &num);
+
+ if (!num || !den)
+ return -EINVAL;
- req->best_parent_rate = parent_req.rate;
+ tmp = req->best_parent_rate * num;
+ do_div(tmp, den);
+ req->rate = tmp;
return 0;
}
diff --git a/drivers/clk/qcom/clk-regmap.h b/drivers/clk/qcom/clk-regmap.h
index 6cfc1bccb255..14ec659a3a77 100644
--- a/drivers/clk/qcom/clk-regmap.h
+++ b/drivers/clk/qcom/clk-regmap.h
@@ -24,7 +24,11 @@ struct clk_regmap {
unsigned int enable_mask;
bool enable_is_inverted;
};
-#define to_clk_regmap(_hw) container_of(_hw, struct clk_regmap, hw)
+
+static inline struct clk_regmap *to_clk_regmap(struct clk_hw *hw)
+{
+ return container_of(hw, struct clk_regmap, hw);
+}
int clk_is_enabled_regmap(struct clk_hw *hw);
int clk_enable_regmap(struct clk_hw *hw);
diff --git a/drivers/clk/qcom/dispcc-sc7180.c b/drivers/clk/qcom/dispcc-sc7180.c
index 0a5d395bce93..f487515701e3 100644
--- a/drivers/clk/qcom/dispcc-sc7180.c
+++ b/drivers/clk/qcom/dispcc-sc7180.c
@@ -202,7 +202,6 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
.name = "disp_cc_mdss_dp_crypto_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_byte2_ops,
},
};
@@ -216,7 +215,6 @@ static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
.name = "disp_cc_mdss_dp_link_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_byte2_ops,
},
};
@@ -230,7 +228,6 @@ static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
.name = "disp_cc_mdss_dp_pixel_clk_src",
.parent_data = disp_cc_parent_data_1,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_dp_ops,
},
};
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
new file mode 100644
index 000000000000..07a98d3f882d
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -0,0 +1,1107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,dispcc-sm8250.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CHIP_SLEEP_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DP_PHY_PLL_LINK_CLK,
+ P_DP_PHY_PLL_VCO_DIV_CLK,
+ P_DPTX1_PHY_PLL_LINK_CLK,
+ P_DPTX1_PHY_PLL_VCO_DIV_CLK,
+ P_DPTX2_PHY_PLL_LINK_CLK,
+ P_DPTX2_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_EDP_PHY_PLL_LINK_CLK,
+ P_EDP_PHY_PLL_VCO_DIV_CLK,
+};
+
+static struct pll_vco vco_table[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0x47,
+ .alpha = 0xE000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A699C,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_init_data disp_cc_pll0_init = {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = vco_table,
+ .num_vco = ARRAY_SIZE(vco_table),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &disp_cc_pll0_init
+};
+
+static struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1F,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A699C,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_init_data disp_cc_pll1_init = {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = vco_table,
+ .num_vco = ARRAY_SIZE(vco_table),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &disp_cc_pll1_init
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP_PHY_PLL_LINK_CLK, 1 },
+ { P_DP_PHY_PLL_VCO_DIV_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "dp_phy_pll_link_clk" },
+ { .fw_name = "dp_phy_pll_vco_div_clk" },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "dsi0_phy_pll_out_byteclk" },
+ { .fw_name = "dsi1_phy_pll_out_byteclk" },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "dsi0_phy_pll_out_dsiclk" },
+ { .fw_name = "dsi1_phy_pll_out_dsiclk" },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x22bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x2110,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x212c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux1_clk_src = {
+ .cmd_rcgr = 0x2240,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
+ .cmd_rcgr = 0x21dc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dp_link1_clk_src[] = {
+ F(162000000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(270000000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(540000000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(810000000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link1_clk_src = {
+ .cmd_rcgr = 0x220c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_link1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
+ .cmd_rcgr = 0x2178,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_dp_link1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = {
+ .cmd_rcgr = 0x21c4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel2_clk_src = {
+ .cmd_rcgr = 0x21f4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel2_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dp_pixel_clk_src = {
+ .cmd_rcgr = 0x21ac,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x2148,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x2160,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL1_OUT_MAIN, 7, 0, 0),
+ F(100000000, P_DISP_CC_PLL1_OUT_MAIN, 6, 0, 0),
+ F(150000000, P_DISP_CC_PLL1_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_DISP_CC_PLL1_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_DISP_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(345000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(460000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x20c8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x2098,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x20b0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_DISP_CC_PLL1_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_DISP_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(345000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(460000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x20e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x20f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x2128,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x2144,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+
+static struct clk_regmap_div disp_cc_mdss_dp_link1_div_clk_src = {
+ .reg = 0x2224,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link1_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_link1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+
+static struct clk_regmap_div disp_cc_mdss_dp_link_div_clk_src = {
+ .reg = 0x2190,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dp_link_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x2080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x2030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x2034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux1_clk = {
+ .halt_reg = 0x2068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_aux1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_aux_clk = {
+ .halt_reg = 0x2054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link1_clk = {
+ .halt_reg = 0x205c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x205c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_link1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link1_intf_clk = {
+ .halt_reg = 0x2060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link1_intf_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_link1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_clk = {
+ .halt_reg = 0x2040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_link_intf_clk = {
+ .halt_reg = 0x2044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel1_clk = {
+ .halt_reg = 0x2050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel2_clk = {
+ .halt_reg = 0x2058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_pixel2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dp_pixel_clk = {
+ .halt_reg = 0x204c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x204c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_dp_pixel_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_dp_pixel_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x201c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0x4004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x2014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0x400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0x4008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x3000,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct clk_regmap *disp_cc_sm8250_clocks[] = {
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX1_CLK] = &disp_cc_mdss_dp_aux1_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX1_CLK_SRC] = &disp_cc_mdss_dp_aux1_clk_src.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
+ [DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK1_CLK] = &disp_cc_mdss_dp_link1_clk.clkr,
+ [DISP_CC_MDSS_DP_LINK1_CLK_SRC] = &disp_cc_mdss_dp_link1_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC] = &disp_cc_mdss_dp_link1_div_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK1_INTF_CLK] = &disp_cc_mdss_dp_link1_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK] = &disp_cc_mdss_dp_link_clk.clkr,
+ [DISP_CC_MDSS_DP_LINK_CLK_SRC] = &disp_cc_mdss_dp_link_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dp_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DP_LINK_INTF_CLK] = &disp_cc_mdss_dp_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL1_CLK] = &disp_cc_mdss_dp_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL1_CLK_SRC] = &disp_cc_mdss_dp_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DP_PIXEL2_CLK] = &disp_cc_mdss_dp_pixel2_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL2_CLK_SRC] = &disp_cc_mdss_dp_pixel2_clk_src.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK] = &disp_cc_mdss_dp_pixel_clk.clkr,
+ [DISP_CC_MDSS_DP_PIXEL_CLK_SRC] = &disp_cc_mdss_dp_pixel_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sm8250_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x2000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0x4000 },
+};
+
+static struct gdsc *disp_cc_sm8250_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static const struct regmap_config disp_cc_sm8250_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sm8250_desc = {
+ .config = &disp_cc_sm8250_regmap_config,
+ .clks = disp_cc_sm8250_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm8250_clocks),
+ .resets = disp_cc_sm8250_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm8250_resets),
+ .gdscs = disp_cc_sm8250_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm8250_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm8250_match_table[] = {
+ { .compatible = "qcom,sm8150-dispcc" },
+ { .compatible = "qcom,sm8250-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm8250_match_table);
+
+static int disp_cc_sm8250_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm8250_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* note: trion == lucid, except for the prepare() op */
+ BUILD_BUG_ON(CLK_ALPHA_PLL_TYPE_TRION != CLK_ALPHA_PLL_TYPE_LUCID);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8150-dispcc")) {
+ disp_cc_pll0_config.config_ctl_hi_val = 0x00002267;
+ disp_cc_pll0_config.config_ctl_hi1_val = 0x00000024;
+ disp_cc_pll0_config.user_ctl_hi1_val = 0x000000D0;
+ disp_cc_pll0_init.ops = &clk_alpha_pll_trion_ops;
+ disp_cc_pll1_config.config_ctl_hi_val = 0x00002267;
+ disp_cc_pll1_config.config_ctl_hi1_val = 0x00000024;
+ disp_cc_pll1_config.user_ctl_hi1_val = 0x000000D0;
+ disp_cc_pll1_init.ops = &clk_alpha_pll_trion_ops;
+ }
+
+ clk_lucid_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, 0x8000, 0x10, 0x10);
+
+ /* DISP_CC_XO_CLK always-on */
+ regmap_update_bits(regmap, 0x605c, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &disp_cc_sm8250_desc, regmap);
+}
+
+static struct platform_driver disp_cc_sm8250_driver = {
+ .probe = disp_cc_sm8250_probe,
+ .driver = {
+ .name = "disp_cc-sm8250",
+ .of_match_table = disp_cc_sm8250_match_table,
+ },
+};
+
+static int __init disp_cc_sm8250_init(void)
+{
+ return platform_driver_register(&disp_cc_sm8250_driver);
+}
+subsys_initcall(disp_cc_sm8250_init);
+
+static void __exit disp_cc_sm8250_exit(void)
+{
+ platform_driver_unregister(&disp_cc_sm8250_driver);
+}
+module_exit(disp_cc_sm8250_exit);
+
+MODULE_DESCRIPTION("QTI DISPCC SM8250 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index ef2c9c4cf9ab..108fe27bee10 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -4322,7 +4322,7 @@ static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
{ }
};
-struct clk_rcg2 pcie0_rchng_clk_src = {
+static struct clk_rcg2 pcie0_rchng_clk_src = {
.cmd_rcgr = 0x75070,
.freq_tbl = ftbl_pcie_rchng_clk_src,
.hid_width = 5,
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 778354f82b1e..39ebb443ae3d 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -595,24 +595,12 @@ static const struct clk_parent_data gcc_xo_gpll1_emclk_sleep_parent_data[] = {
{ .fw_name = "sleep_clk", .name = "sleep_clk" },
};
-static const struct parent_map gcc_xo_gpll6_gpll0_map[] = {
- { P_XO, 0 },
- { P_GPLL6, 1 },
- { P_GPLL0, 2 },
-};
-
static const struct clk_parent_data gcc_xo_gpll6_gpll0_parent_data[] = {
{ .fw_name = "xo" },
{ .hw = &gpll6_vote.hw },
{ .hw = &gpll0_vote.hw },
};
-static const struct parent_map gcc_xo_gpll6_gpll0a_map[] = {
- { P_XO, 0 },
- { P_GPLL6, 1 },
- { P_GPLL0_AUX, 2 },
-};
-
static const struct clk_parent_data gcc_xo_gpll6_gpll0a_parent_data[] = {
{ .fw_name = "xo" },
{ .hw = &gpll6_vote.hw },
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index b7fc8c7ba195..144d2ba7a9be 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -20,6 +20,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -1772,6 +1773,32 @@ static struct clk_branch gcc_gp3_clk = {
},
};
+static struct clk_branch gcc_lpass_q6_axi_clk = {
+ .halt_reg = 0x0280,
+ .clkr = {
+ .enable_reg = 0x0280,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_lpass_q6_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x0284,
+ .clkr = {
+ .enable_reg = 0x0284,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_pcie_0_aux_clk = {
.halt_reg = 0x1ad4,
.clkr = {
@@ -1790,6 +1817,32 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
},
};
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x1ad0,
+ .clkr = {
+ .enable_reg = 0x1ad0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x1acc,
+ .clkr = {
+ .enable_reg = 0x1acc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_pcie_0_pipe_clk = {
.halt_reg = 0x1ad8,
.halt_check = BRANCH_HALT_DELAY,
@@ -1809,6 +1862,20 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
},
};
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x1ac8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1ac8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_pcie_1_aux_clk = {
.halt_reg = 0x1b54,
.clkr = {
@@ -1827,6 +1894,32 @@ static struct clk_branch gcc_pcie_1_aux_clk = {
},
};
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x1b54,
+ .clkr = {
+ .enable_reg = 0x1b54,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x1b50,
+ .clkr = {
+ .enable_reg = 0x1b50,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_pcie_1_pipe_clk = {
.halt_reg = 0x1b58,
.halt_check = BRANCH_HALT_DELAY,
@@ -1846,6 +1939,19 @@ static struct clk_branch gcc_pcie_1_pipe_clk = {
},
};
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x1b48,
+ .clkr = {
+ .enable_reg = 0x1b48,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_pdm2_clk = {
.halt_reg = 0x0ccc,
.clkr = {
@@ -1864,6 +1970,19 @@ static struct clk_branch gcc_pdm2_clk = {
},
};
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x0cc4,
+ .clkr = {
+ .enable_reg = 0x0cc4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_sdcc1_apps_clk = {
.halt_reg = 0x04c4,
.clkr = {
@@ -1899,6 +2018,23 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
},
};
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x0508,
+ .clkr = {
+ .enable_reg = 0x0508,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sdcc2_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_sdcc2_apps_clk = {
.halt_reg = 0x0504,
.clkr = {
@@ -1917,6 +2053,23 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
},
};
+static struct clk_branch gcc_sdcc3_ahb_clk = {
+ .halt_reg = 0x0548,
+ .clkr = {
+ .enable_reg = 0x0548,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sdcc3_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_sdcc3_apps_clk = {
.halt_reg = 0x0544,
.clkr = {
@@ -1935,6 +2088,23 @@ static struct clk_branch gcc_sdcc3_apps_clk = {
},
};
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x0588,
+ .clkr = {
+ .enable_reg = 0x0588,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_sdcc4_ahb_clk",
+ .parent_names = (const char *[]){
+ "periph_noc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_sdcc4_apps_clk = {
.halt_reg = 0x0584,
.clkr = {
@@ -1989,6 +2159,19 @@ static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
},
};
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x0d84,
+ .clkr = {
+ .enable_reg = 0x0d84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_tsif_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_tsif_ref_clk = {
.halt_reg = 0x0d88,
.clkr = {
@@ -2007,6 +2190,19 @@ static struct clk_branch gcc_tsif_ref_clk = {
},
};
+static struct clk_branch gcc_ufs_ahb_clk = {
+ .halt_reg = 0x1d4c,
+ .clkr = {
+ .enable_reg = 0x1d4c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_ufs_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ufs_axi_clk = {
.halt_reg = 0x1d48,
.clkr = {
@@ -2043,6 +2239,34 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
},
};
+static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
+ .halt_reg = 0x1d60,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1d60,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_ufs_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
+ .halt_reg = 0x1d64,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1d64,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_ufs_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ufs_tx_cfg_clk = {
.halt_reg = 0x1d50,
.clkr = {
@@ -2061,6 +2285,47 @@ static struct clk_branch gcc_ufs_tx_cfg_clk = {
},
};
+static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
+ .halt_reg = 0x1d58,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1d58,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_ufs_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_tx_symbol_1_clk = {
+ .halt_reg = 0x1d5c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1d5c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_ufs_tx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_hs_phy_sleep_clk = {
+ .halt_reg = 0x04ac,
+ .clkr = {
+ .enable_reg = 0x04ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb2_hs_phy_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb30_master_clk = {
.halt_reg = 0x03c8,
.clkr = {
@@ -2097,6 +2362,19 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
},
};
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0x03cc,
+ .clkr = {
+ .enable_reg = 0x03cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb3_phy_aux_clk = {
.halt_reg = 0x1408,
.clkr = {
@@ -2115,6 +2393,19 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
},
};
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+ .halt_reg = 0x0488,
+ .clkr = {
+ .enable_reg = 0x0488,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb_hs_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb_hs_system_clk = {
.halt_reg = 0x0484,
.clkr = {
@@ -2133,6 +2424,59 @@ static struct clk_branch gcc_usb_hs_system_clk = {
},
};
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x1a84,
+ .clkr = {
+ .enable_reg = 0x1a84,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data)
+ {
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_gdsc = {
+ .gdscr = 0x1e18,
+ .pd = {
+ .name = "pcie",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x1ac4,
+ .pd = {
+ .name = "pcie_0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x1b44,
+ .pd = {
+ .name = "pcie_1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0x3c4,
+ .pd = {
+ .name = "usb30",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_gdsc = {
+ .gdscr = 0x1d44,
+ .pd = {
+ .name = "ufs",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_regmap *gcc_msm8994_clocks[] = {
[GPLL0_EARLY] = &gpll0_early.clkr,
[GPLL0] = &gpll0.clkr,
@@ -2233,26 +2577,64 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_LPASS_Q6_AXI_CLK] = &gcc_lpass_q6_axi_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
[GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
[GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
[GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
[GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
[GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
[GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
[GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC3_AHB_CLK] = &gcc_sdcc3_ahb_clk.clkr,
[GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
[GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
- [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
[GCC_SYS_NOC_UFS_AXI_CLK] = &gcc_sys_noc_ufs_axi_clk.clkr,
[GCC_SYS_NOC_USB3_AXI_CLK] = &gcc_sys_noc_usb3_axi_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
[GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_UFS_AHB_CLK] = &gcc_ufs_ahb_clk.clkr,
[GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr,
[GCC_UFS_RX_CFG_CLK] = &gcc_ufs_rx_cfg_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_0_CLK] = &gcc_ufs_rx_symbol_0_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_1_CLK] = &gcc_ufs_rx_symbol_1_clk.clkr,
[GCC_UFS_TX_CFG_CLK] = &gcc_ufs_tx_cfg_clk.clkr,
+ [GCC_UFS_TX_SYMBOL_0_CLK] = &gcc_ufs_tx_symbol_0_clk.clkr,
+ [GCC_UFS_TX_SYMBOL_1_CLK] = &gcc_ufs_tx_symbol_1_clk.clkr,
+ [GCC_USB2_HS_PHY_SLEEP_CLK] = &gcc_usb2_hs_phy_sleep_clk.clkr,
[GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
[GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
[GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
[GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+};
+
+static struct gdsc *gcc_msm8994_gdscs[] = {
+ [PCIE_GDSC] = &pcie_gdsc,
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [USB30_GDSC] = &usb30_gdsc,
+ [UFS_GDSC] = &ufs_gdsc,
+};
+
+static const struct qcom_reset_map gcc_msm8994_resets[] = {
+ [USB3_PHY_RESET] = { 0x1400 },
+ [USB3PHY_PHY_RESET] = { 0x1404 },
+ [PCIE_PHY_0_RESET] = { 0x1b18 },
+ [PCIE_PHY_1_RESET] = { 0x1b98 },
+ [QUSB2_PHY_RESET] = { 0x04b8 },
};
static const struct regmap_config gcc_msm8994_regmap_config = {
@@ -2267,6 +2649,10 @@ static const struct qcom_cc_desc gcc_msm8994_desc = {
.config = &gcc_msm8994_regmap_config,
.clks = gcc_msm8994_clocks,
.num_clks = ARRAY_SIZE(gcc_msm8994_clocks),
+ .resets = gcc_msm8994_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8994_resets),
+ .gdscs = gcc_msm8994_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8994_gdscs),
};
static const struct of_device_id gcc_msm8994_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index f0b47b7d50ca..31258795e7b8 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -666,7 +666,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
.cmd_rcgr = 0x48044,
.mnd_width = 0,
.hid_width = 5,
- .parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
+ .parent_map = gcc_parent_map_xo_gpll0,
.freq_tbl = ftbl_hmss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_rbcpr_clk_src",
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index bfc4ac02f9ea..af26e0695b86 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -358,6 +358,14 @@ static int gdsc_init(struct gdsc *sc)
if ((sc->flags & VOTABLE) && on)
gdsc_enable(&sc->pd);
+ /*
+ * Make sure the retain bit is set if the GDSC is already on, otherwise
+ * we end up turning off the GDSC and destroying all the register
+ * contents that we thought we were saving.
+ */
+ if ((sc->flags & RETAIN_FF_ENABLE) && on)
+ gdsc_retain_ff_on(sc);
+
/* If ALWAYS_ON GDSCs are not ON, turn them ON */
if (sc->flags & ALWAYS_ON) {
if (!on)
diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c
new file mode 100644
index 000000000000..3087e2ec8fd4
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sm8150.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,videocc-sm8150.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_CHIP_SLEEP_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_VIDEO_PLL0_OUT_EVEN,
+ P_VIDEO_PLL0_OUT_MAIN,
+ P_VIDEO_PLL0_OUT_ODD,
+};
+
+static struct pll_vco trion_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct alpha_pll_config video_pll0_config = {
+ .l = 0x14,
+ .alpha = 0xD555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002267,
+ .config_ctl_hi1_val = 0x00000024,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x000000D0,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x42c,
+ .vco_table = trion_vco,
+ .num_vco = ARRAY_SIZE(trion_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_trion_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &video_pll0.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_video_cc_iris_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(240000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(338000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(365000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(444000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ F(533000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_iris_clk_src = {
+ .cmd_rcgr = 0x7f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_iris_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_cc_iris_clk_src",
+ .parent_data = video_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch video_cc_iris_ahb_clk = {
+ .halt_reg = 0x8f4,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x8f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_iris_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_core_clk = {
+ .halt_reg = 0x890,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x890,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvs0_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1_core_clk = {
+ .halt_reg = 0x8d0,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x8d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvs1_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvsc_core_clk = {
+ .halt_reg = 0x850,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x850,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvsc_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_iris_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x814,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .flags = 0,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vcodec0_gdsc = {
+ .gdscr = 0x874,
+ .pd = {
+ .name = "vcodec0_gdsc",
+ },
+ .flags = HW_CTRL,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vcodec1_gdsc = {
+ .gdscr = 0x8b4,
+ .pd = {
+ .name = "vcodec1_gdsc",
+ },
+ .flags = HW_CTRL,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+static struct clk_regmap *video_cc_sm8150_clocks[] = {
+ [VIDEO_CC_IRIS_AHB_CLK] = &video_cc_iris_ahb_clk.clkr,
+ [VIDEO_CC_IRIS_CLK_SRC] = &video_cc_iris_clk_src.clkr,
+ [VIDEO_CC_MVS0_CORE_CLK] = &video_cc_mvs0_core_clk.clkr,
+ [VIDEO_CC_MVS1_CORE_CLK] = &video_cc_mvs1_core_clk.clkr,
+ [VIDEO_CC_MVSC_CORE_CLK] = &video_cc_mvsc_core_clk.clkr,
+ [VIDEO_CC_PLL0] = &video_pll0.clkr,
+};
+
+static struct gdsc *video_cc_sm8150_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [VCODEC0_GDSC] = &vcodec0_gdsc,
+ [VCODEC1_GDSC] = &vcodec1_gdsc,
+};
+
+static const struct regmap_config video_cc_sm8150_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb94,
+ .fast_io = true,
+};
+
+static const struct qcom_reset_map video_cc_sm8150_resets[] = {
+ [VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 },
+};
+
+static const struct qcom_cc_desc video_cc_sm8150_desc = {
+ .config = &video_cc_sm8150_regmap_config,
+ .clks = video_cc_sm8150_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sm8150_clocks),
+ .resets = video_cc_sm8150_resets,
+ .num_resets = ARRAY_SIZE(video_cc_sm8150_resets),
+ .gdscs = video_cc_sm8150_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sm8150_gdscs),
+};
+
+static const struct of_device_id video_cc_sm8150_match_table[] = {
+ { .compatible = "qcom,sm8150-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sm8150_match_table);
+
+static int video_cc_sm8150_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sm8150_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_trion_pll_configure(&video_pll0, regmap, &video_pll0_config);
+
+ /* Keep VIDEO_CC_XO_CLK ALWAYS-ON */
+ regmap_update_bits(regmap, 0x984, 0x1, 0x1);
+
+ return qcom_cc_really_probe(pdev, &video_cc_sm8150_desc, regmap);
+}
+
+static struct platform_driver video_cc_sm8150_driver = {
+ .probe = video_cc_sm8150_probe,
+ .driver = {
+ .name = "video_cc-sm8150",
+ .of_match_table = video_cc_sm8150_match_table,
+ },
+};
+
+static int __init video_cc_sm8150_init(void)
+{
+ return platform_driver_register(&video_cc_sm8150_driver);
+}
+subsys_initcall(video_cc_sm8150_init);
+
+static void __exit video_cc_sm8150_exit(void)
+{
+ platform_driver_unregister(&video_cc_sm8150_driver);
+}
+module_exit(video_cc_sm8150_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI VIDEOCC SM8150 Driver");
diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
new file mode 100644
index 000000000000..2797c61f5938
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sm8250.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,videocc-sm8250.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_BI_TCXO,
+ P_CHIP_SLEEP_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_VIDEO_PLL0_OUT_MAIN,
+ P_VIDEO_PLL1_OUT_MAIN,
+};
+
+static struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config video_pll0_config = {
+ .l = 0x25,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A699C,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x42c,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config video_pll1_config = {
+ .l = 0x2B,
+ .alpha = 0xC000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A699C,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll video_pll1 = {
+ .offset = 0x7d0,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "video_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &video_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL1_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &video_pll1.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(720000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs0_clk_src = {
+ .cmd_rcgr = 0xb94,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_mvs0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvs0_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(840000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1098000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs1_clk_src = {
+ .cmd_rcgr = 0xbb4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_2,
+ .freq_tbl = ftbl_video_cc_mvs1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvs1_clk_src",
+ .parent_data = video_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
+ .reg = 0xc54,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "video_cc_mvs0c_div2_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs1c_div2_div_clk_src = {
+ .reg = 0xcf4,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "video_cc_mvs1c_div2_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_mvs1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_clk = {
+ .halt_reg = 0xc34,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc34,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvs0c_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1_div2_clk = {
+ .halt_reg = 0xdf4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xdf4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvs1_div2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_mvs1c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1c_clk = {
+ .halt_reg = 0xcd4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xcd4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_cc_mvs1c_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &video_cc_mvs1c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mvs0c_gdsc = {
+ .gdscr = 0xbf8,
+ .pd = {
+ .name = "mvs0c_gdsc",
+ },
+ .flags = 0,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mvs1c_gdsc = {
+ .gdscr = 0xc98,
+ .pd = {
+ .name = "mvs1c_gdsc",
+ },
+ .flags = 0,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mvs0_gdsc = {
+ .gdscr = 0xd18,
+ .pd = {
+ .name = "mvs0_gdsc",
+ },
+ .flags = HW_CTRL,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mvs1_gdsc = {
+ .gdscr = 0xd98,
+ .pd = {
+ .name = "mvs1_gdsc",
+ },
+ .flags = HW_CTRL,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *video_cc_sm8250_clocks[] = {
+ [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
+ [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
+ [VIDEO_CC_MVS1_CLK_SRC] = &video_cc_mvs1_clk_src.clkr,
+ [VIDEO_CC_MVS1_DIV2_CLK] = &video_cc_mvs1_div2_clk.clkr,
+ [VIDEO_CC_MVS1C_CLK] = &video_cc_mvs1c_clk.clkr,
+ [VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC] = &video_cc_mvs1c_div2_div_clk_src.clkr,
+ [VIDEO_CC_PLL0] = &video_pll0.clkr,
+ [VIDEO_CC_PLL1] = &video_pll1.clkr,
+};
+
+static const struct qcom_reset_map video_cc_sm8250_resets[] = {
+ [VIDEO_CC_CVP_INTERFACE_BCR] = { 0xe54 },
+ [VIDEO_CC_CVP_MVS0_BCR] = { 0xd14 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0xc34, 2 },
+ [VIDEO_CC_CVP_MVS0C_BCR] = { 0xbf4 },
+ [VIDEO_CC_CVP_MVS1_BCR] = { 0xd94 },
+ [VIDEO_CC_MVS1C_CLK_ARES] = { 0xcd4, 2 },
+ [VIDEO_CC_CVP_MVS1C_BCR] = { 0xc94 },
+};
+
+static struct gdsc *video_cc_sm8250_gdscs[] = {
+ [MVS0C_GDSC] = &mvs0c_gdsc,
+ [MVS1C_GDSC] = &mvs1c_gdsc,
+ [MVS0_GDSC] = &mvs0_gdsc,
+ [MVS1_GDSC] = &mvs1_gdsc,
+};
+
+static const struct regmap_config video_cc_sm8250_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf4c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc video_cc_sm8250_desc = {
+ .config = &video_cc_sm8250_regmap_config,
+ .clks = video_cc_sm8250_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sm8250_clocks),
+ .resets = video_cc_sm8250_resets,
+ .num_resets = ARRAY_SIZE(video_cc_sm8250_resets),
+ .gdscs = video_cc_sm8250_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sm8250_gdscs),
+};
+
+static const struct of_device_id video_cc_sm8250_match_table[] = {
+ { .compatible = "qcom,sm8250-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sm8250_match_table);
+
+static int video_cc_sm8250_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sm8250_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&video_pll0, regmap, &video_pll0_config);
+ clk_lucid_pll_configure(&video_pll1, regmap, &video_pll1_config);
+
+ /* Keep VIDEO_CC_AHB_CLK and VIDEO_CC_XO_CLK ALWAYS-ON */
+ regmap_update_bits(regmap, 0xe58, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0xeec, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &video_cc_sm8250_desc, regmap);
+}
+
+static struct platform_driver video_cc_sm8250_driver = {
+ .probe = video_cc_sm8250_probe,
+ .driver = {
+ .name = "sm8250-videocc",
+ .of_match_table = video_cc_sm8250_match_table,
+ },
+};
+
+static int __init video_cc_sm8250_init(void)
+{
+ return platform_driver_register(&video_cc_sm8250_driver);
+}
+subsys_initcall(video_cc_sm8250_init);
+
+static void __exit video_cc_sm8250_exit(void)
+{
+ platform_driver_unregister(&video_cc_sm8250_driver);
+}
+module_exit(video_cc_sm8250_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI VIDEOCC SM8250 Driver");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 28e8730ce263..18915d668a30 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -30,6 +30,7 @@ config CLK_RENESAS
select CLK_R8A77980 if ARCH_R8A77980
select CLK_R8A77990 if ARCH_R8A77990
select CLK_R8A77995 if ARCH_R8A77995
+ select CLK_R8A779A0 if ARCH_R8A779A0
select CLK_R9A06G032 if ARCH_R9A06G032
select CLK_SH73A0 if ARCH_SH73A0
@@ -145,6 +146,10 @@ config CLK_R8A77995
bool "R-Car D3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A779A0
+ bool "R-Car V3U clock support" if COMPILE_TEST
+ select CLK_RENESAS_CPG_MSSR
+
config CLK_R9A06G032
bool "Renesas R9A06G032 clock driver"
help
@@ -162,7 +167,7 @@ config CLK_RCAR_GEN2_CPG
select CLK_RENESAS_CPG_MSSR
config CLK_RCAR_GEN3_CPG
- bool "R-Car Gen3 CPG clock support" if COMPILE_TEST
+ bool "R-Car Gen3 and RZ/G2 CPG clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSSR
config CLK_RCAR_USB2_CLOCK_SEL
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index c7c03ab9a6a3..c803912ef2ce 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_CLK_R8A77970) += r8a77970-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77980) += r8a77980-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77990) += r8a77990-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A779A0) += r8a779a0-cpg-mssr.o
obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
index 443bff08df4c..a85227c248f3 100644
--- a/drivers/clk/renesas/r7s9210-cpg-mssr.c
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -214,7 +214,7 @@ const struct cpg_mssr_info r7s9210_cpg_mssr_info __initconst = {
.cpg_clk_register = rza2_cpg_clk_register,
/* RZ/A2 has Standby Control Registers */
- .stbyctrl = true,
+ .reg_layout = CLK_REG_LAYOUT_RZ_A,
};
static void __init r7s9210_cpg_mssr_early_init(struct device_node *np)
diff --git a/drivers/clk/renesas/r8a7742-cpg-mssr.c b/drivers/clk/renesas/r8a7742-cpg-mssr.c
index e919828668a4..e541489bd1cd 100644
--- a/drivers/clk/renesas/r8a7742-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7742-cpg-mssr.c
@@ -97,7 +97,8 @@ static const struct mssr_mod_clk r8a7742_mod_clks[] __initconst = {
DEF_MOD("tmu0", 125, R8A7742_CLK_CP),
DEF_MOD("vsp1du1", 127, R8A7742_CLK_ZS),
DEF_MOD("vsp1du0", 128, R8A7742_CLK_ZS),
- DEF_MOD("vsp1-sy", 131, R8A7742_CLK_ZS),
+ DEF_MOD("vspr", 130, R8A7742_CLK_ZS),
+ DEF_MOD("vsps", 131, R8A7742_CLK_ZS),
DEF_MOD("scifa2", 202, R8A7742_CLK_MP),
DEF_MOD("scifa1", 203, R8A7742_CLK_MP),
DEF_MOD("scifa0", 204, R8A7742_CLK_MP),
diff --git a/drivers/clk/renesas/r8a7743-cpg-mssr.c b/drivers/clk/renesas/r8a7743-cpg-mssr.c
index c01d9af2525a..0bba12a48d22 100644
--- a/drivers/clk/renesas/r8a7743-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7743-cpg-mssr.c
@@ -92,7 +92,7 @@ static const struct mssr_mod_clk r8a7743_mod_clks[] __initconst = {
DEF_MOD("tmu0", 125, R8A7743_CLK_CP),
DEF_MOD("vsp1du1", 127, R8A7743_CLK_ZS),
DEF_MOD("vsp1du0", 128, R8A7743_CLK_ZS),
- DEF_MOD("vsp1-sy", 131, R8A7743_CLK_ZS),
+ DEF_MOD("vsps", 131, R8A7743_CLK_ZS),
DEF_MOD("scifa2", 202, R8A7743_CLK_MP),
DEF_MOD("scifa1", 203, R8A7743_CLK_MP),
DEF_MOD("scifa0", 204, R8A7743_CLK_MP),
diff --git a/drivers/clk/renesas/r8a7745-cpg-mssr.c b/drivers/clk/renesas/r8a7745-cpg-mssr.c
index 493874e5ebee..dc4a64e8dfb5 100644
--- a/drivers/clk/renesas/r8a7745-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7745-cpg-mssr.c
@@ -90,7 +90,7 @@ static const struct mssr_mod_clk r8a7745_mod_clks[] __initconst = {
DEF_MOD("cmt0", 124, R8A7745_CLK_R),
DEF_MOD("tmu0", 125, R8A7745_CLK_CP),
DEF_MOD("vsp1du0", 128, R8A7745_CLK_ZS),
- DEF_MOD("vsp1-sy", 131, R8A7745_CLK_ZS),
+ DEF_MOD("vsps", 131, R8A7745_CLK_ZS),
DEF_MOD("scifa2", 202, R8A7745_CLK_MP),
DEF_MOD("scifa1", 203, R8A7745_CLK_MP),
DEF_MOD("scifa0", 204, R8A7745_CLK_MP),
diff --git a/drivers/clk/renesas/r8a77470-cpg-mssr.c b/drivers/clk/renesas/r8a77470-cpg-mssr.c
index d81ae65f0d18..f3d6e65011d7 100644
--- a/drivers/clk/renesas/r8a77470-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77470-cpg-mssr.c
@@ -85,7 +85,7 @@ static const struct mssr_mod_clk r8a77470_mod_clks[] __initconst = {
DEF_MOD("tmu2", 122, R8A77470_CLK_P),
DEF_MOD("cmt0", 124, R8A77470_CLK_R),
DEF_MOD("vsp1du0", 128, R8A77470_CLK_ZS),
- DEF_MOD("vsp1-sy", 131, R8A77470_CLK_ZS),
+ DEF_MOD("vsps", 131, R8A77470_CLK_ZS),
DEF_MOD("msiof2", 205, R8A77470_CLK_MP),
DEF_MOD("msiof1", 208, R8A77470_CLK_MP),
DEF_MOD("sys-dmac1", 218, R8A77470_CLK_ZS),
diff --git a/drivers/clk/renesas/r8a7790-cpg-mssr.c b/drivers/clk/renesas/r8a7790-cpg-mssr.c
index c57cb93f8315..f7d233e0c142 100644
--- a/drivers/clk/renesas/r8a7790-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7790-cpg-mssr.c
@@ -108,8 +108,8 @@ static const struct mssr_mod_clk r8a7790_mod_clks[] __initconst = {
DEF_MOD("tmu0", 125, R8A7790_CLK_CP),
DEF_MOD("vsp1du1", 127, R8A7790_CLK_ZS),
DEF_MOD("vsp1du0", 128, R8A7790_CLK_ZS),
- DEF_MOD("vsp1-rt", 130, R8A7790_CLK_ZS),
- DEF_MOD("vsp1-sy", 131, R8A7790_CLK_ZS),
+ DEF_MOD("vspr", 130, R8A7790_CLK_ZS),
+ DEF_MOD("vsps", 131, R8A7790_CLK_ZS),
DEF_MOD("scifa2", 202, R8A7790_CLK_MP),
DEF_MOD("scifa1", 203, R8A7790_CLK_MP),
DEF_MOD("scifa0", 204, R8A7790_CLK_MP),
diff --git a/drivers/clk/renesas/r8a7791-cpg-mssr.c b/drivers/clk/renesas/r8a7791-cpg-mssr.c
index 65702debcabb..a0de784868da 100644
--- a/drivers/clk/renesas/r8a7791-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7791-cpg-mssr.c
@@ -102,7 +102,7 @@ static const struct mssr_mod_clk r8a7791_mod_clks[] __initconst = {
DEF_MOD("tmu0", 125, R8A7791_CLK_CP),
DEF_MOD("vsp1du1", 127, R8A7791_CLK_ZS),
DEF_MOD("vsp1du0", 128, R8A7791_CLK_ZS),
- DEF_MOD("vsp1-sy", 131, R8A7791_CLK_ZS),
+ DEF_MOD("vsps", 131, R8A7791_CLK_ZS),
DEF_MOD("scifa2", 202, R8A7791_CLK_MP),
DEF_MOD("scifa1", 203, R8A7791_CLK_MP),
DEF_MOD("scifa0", 204, R8A7791_CLK_MP),
diff --git a/drivers/clk/renesas/r8a7792-cpg-mssr.c b/drivers/clk/renesas/r8a7792-cpg-mssr.c
index cf8b84a3a060..77af250876a5 100644
--- a/drivers/clk/renesas/r8a7792-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7792-cpg-mssr.c
@@ -88,7 +88,7 @@ static const struct mssr_mod_clk r8a7792_mod_clks[] __initconst = {
DEF_MOD("tmu0", 125, R8A7792_CLK_CP),
DEF_MOD("vsp1du1", 127, R8A7792_CLK_ZS),
DEF_MOD("vsp1du0", 128, R8A7792_CLK_ZS),
- DEF_MOD("vsp1-sy", 131, R8A7792_CLK_ZS),
+ DEF_MOD("vsps", 131, R8A7792_CLK_ZS),
DEF_MOD("msiof1", 208, R8A7792_CLK_MP),
DEF_MOD("sys-dmac1", 218, R8A7792_CLK_ZS),
DEF_MOD("sys-dmac0", 219, R8A7792_CLK_ZS),
diff --git a/drivers/clk/renesas/r8a7794-cpg-mssr.c b/drivers/clk/renesas/r8a7794-cpg-mssr.c
index c1948693c5c1..4d7fa26a72c9 100644
--- a/drivers/clk/renesas/r8a7794-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7794-cpg-mssr.c
@@ -97,7 +97,7 @@ static const struct mssr_mod_clk r8a7794_mod_clks[] __initconst = {
DEF_MOD("cmt0", 124, R8A7794_CLK_R),
DEF_MOD("tmu0", 125, R8A7794_CLK_CP),
DEF_MOD("vsp1du0", 128, R8A7794_CLK_ZS),
- DEF_MOD("vsp1-sy", 131, R8A7794_CLK_ZS),
+ DEF_MOD("vsps", 131, R8A7794_CLK_ZS),
DEF_MOD("scifa2", 202, R8A7794_CLK_MP),
DEF_MOD("scifa1", 203, R8A7794_CLK_MP),
DEF_MOD("scifa0", 204, R8A7794_CLK_MP),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
new file mode 100644
index 000000000000..17ebbac7ddfb
--- /dev/null
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a779a0 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ */
+
+#include <linux/bug.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum rcar_r8a779a0_clk_types {
+ CLK_TYPE_R8A779A0_MAIN = CLK_TYPE_CUSTOM,
+ CLK_TYPE_R8A779A0_PLL1,
+ CLK_TYPE_R8A779A0_PLL2X_3X, /* PLL[23][01] */
+ CLK_TYPE_R8A779A0_PLL5,
+ CLK_TYPE_R8A779A0_MDSEL, /* Select parent/divider using mode pin */
+ CLK_TYPE_R8A779A0_OSC, /* OSC EXTAL predivider and fixed divider */
+};
+
+struct rcar_r8a779a0_cpg_pll_config {
+ u8 extal_div;
+ u8 pll1_mult;
+ u8 pll1_div;
+ u8 pll5_mult;
+ u8 pll5_div;
+ u8 osc_prediv;
+};
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A779A0_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL1,
+ CLK_PLL20,
+ CLK_PLL21,
+ CLK_PLL30,
+ CLK_PLL31,
+ CLK_PLL5,
+ CLK_PLL1_DIV2,
+ CLK_PLL20_DIV2,
+ CLK_PLL21_DIV2,
+ CLK_PLL30_DIV2,
+ CLK_PLL31_DIV2,
+ CLK_PLL5_DIV2,
+ CLK_PLL5_DIV4,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+ CLK_RPCSRC,
+ CLK_OCO,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+#define DEF_PLL(_name, _id, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \
+ .offset = _offset)
+
+static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_R8A779A0_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_R8A779A0_PLL1, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_R8A779A0_PLL5, CLK_MAIN),
+ DEF_PLL(".pll20", CLK_PLL20, 0x0834),
+ DEF_PLL(".pll21", CLK_PLL21, 0x0838),
+ DEF_PLL(".pll30", CLK_PLL30, 0x083c),
+ DEF_PLL(".pll31", CLK_PLL31, 0x0840),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll20_div2", CLK_PLL20_DIV2, CLK_PLL20, 2, 1),
+ DEF_FIXED(".pll21_div2", CLK_PLL21_DIV2, CLK_PLL21, 2, 1),
+ DEF_FIXED(".pll30_div2", CLK_PLL30_DIV2, CLK_PLL30, 2, 1),
+ DEF_FIXED(".pll31_div2", CLK_PLL31_DIV2, CLK_PLL31, 2, 1),
+ DEF_FIXED(".pll5_div2", CLK_PLL5_DIV2, CLK_PLL5, 2, 1),
+ DEF_FIXED(".pll5_div4", CLK_PLL5_DIV4, CLK_PLL5_DIV2, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 4, 1),
+ DEF_RATE(".oco", CLK_OCO, 32768),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("zx", R8A779A0_CLK_ZX, CLK_PLL20_DIV2, 2, 1),
+ DEF_FIXED("s1d1", R8A779A0_CLK_S1D1, CLK_S1, 1, 1),
+ DEF_FIXED("s1d2", R8A779A0_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A779A0_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s1d8", R8A779A0_CLK_S1D8, CLK_S1, 8, 1),
+ DEF_FIXED("s1d12", R8A779A0_CLK_S1D12, CLK_S1, 12, 1),
+ DEF_FIXED("s3d1", R8A779A0_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A779A0_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A779A0_CLK_S3D4, CLK_S3, 4, 1),
+ DEF_FIXED("zs", R8A779A0_CLK_ZS, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("zt", R8A779A0_CLK_ZT, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED("ztr", R8A779A0_CLK_ZTR, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED("zr", R8A779A0_CLK_ZR, CLK_PLL1_DIV2, 1, 1),
+ DEF_FIXED("dsi", R8A779A0_CLK_DSI, CLK_PLL5_DIV4, 1, 1),
+ DEF_FIXED("cnndsp", R8A779A0_CLK_CNNDSP, CLK_PLL5_DIV4, 1, 1),
+ DEF_FIXED("vip", R8A779A0_CLK_VIP, CLK_PLL5, 5, 1),
+ DEF_FIXED("adgh", R8A779A0_CLK_ADGH, CLK_PLL5_DIV4, 1, 1),
+ DEF_FIXED("icu", R8A779A0_CLK_ICU, CLK_PLL5_DIV4, 2, 1),
+ DEF_FIXED("icud2", R8A779A0_CLK_ICUD2, CLK_PLL5_DIV4, 4, 1),
+ DEF_FIXED("vcbus", R8A779A0_CLK_VCBUS, CLK_PLL5_DIV4, 1, 1),
+ DEF_FIXED("cbfusa", R8A779A0_CLK_CBFUSA, CLK_MAIN, 2, 1),
+
+ DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+ DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
+ DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880),
+
+ DEF_GEN3_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
+ DEF_GEN3_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
+};
+
+static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ DEF_MOD("scif0", 702, R8A779A0_CLK_S1D8),
+ DEF_MOD("scif1", 703, R8A779A0_CLK_S1D8),
+ DEF_MOD("scif3", 704, R8A779A0_CLK_S1D8),
+ DEF_MOD("scif4", 705, R8A779A0_CLK_S1D8),
+};
+
+static spinlock_t cpg_lock;
+
+static const struct rcar_r8a779a0_cpg_pll_config *cpg_pll_config __initdata;
+static unsigned int cpg_clk_extalr __initdata;
+static u32 cpg_mode __initdata;
+
+struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers)
+{
+ const struct clk *parent;
+ unsigned int mult = 1;
+ unsigned int div = 1;
+ u32 value;
+
+ parent = clks[core->parent & 0xffff]; /* some types use high bits */
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ switch (core->type) {
+ case CLK_TYPE_R8A779A0_MAIN:
+ div = cpg_pll_config->extal_div;
+ break;
+
+ case CLK_TYPE_R8A779A0_PLL1:
+ mult = cpg_pll_config->pll1_mult;
+ div = cpg_pll_config->pll1_div;
+ break;
+
+ case CLK_TYPE_R8A779A0_PLL2X_3X:
+ value = readl(base + core->offset);
+ mult = (((value >> 24) & 0x7f) + 1) * 2;
+ break;
+
+ case CLK_TYPE_R8A779A0_PLL5:
+ mult = cpg_pll_config->pll5_mult;
+ div = cpg_pll_config->pll5_div;
+ break;
+
+ case CLK_TYPE_R8A779A0_MDSEL:
+ /*
+ * Clock selectable between two parents and two fixed dividers
+ * using a mode pin
+ */
+ if (cpg_mode & BIT(core->offset)) {
+ div = core->div & 0xffff;
+ } else {
+ parent = clks[core->parent >> 16];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+ div = core->div >> 16;
+ }
+ mult = 1;
+ break;
+
+ case CLK_TYPE_R8A779A0_OSC:
+ /*
+ * Clock combining OSC EXTAL predivider and a fixed divider
+ */
+ div = cpg_pll_config->osc_prediv * core->div;
+ break;
+
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ return clk_register_fixed_factor(NULL, core->name,
+ __clk_get_name(parent), 0, mult, div);
+}
+
+/*
+ * CPG Clock Data
+ */
+/*
+ * MD EXTAL PLL1 PLL20 PLL30 PLL4 PLL5 OSC
+ * 14 13 (MHz) 21 31
+ * --------------------------------------------------------
+ * 0 0 16.66 x 1 x128 x216 x128 x144 x192 /16
+ * 0 1 20 x 1 x106 x180 x106 x120 x160 /19
+ * 1 0 Prohibited setting
+ * 1 1 33.33 / 2 x128 x216 x128 x144 x192 /32
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
+ (((md) & BIT(13)) >> 13))
+
+static const struct rcar_r8a779a0_cpg_pll_config cpg_pll_configs[4] = {
+ /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */
+ { 1, 128, 1, 192, 1, 16, },
+ { 1, 106, 1, 160, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, },
+ { 2, 128, 1, 192, 1, 32, },
+};
+
+static int __init r8a779a0_cpg_mssr_init(struct device *dev)
+{
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ cpg_clk_extalr = CLK_EXTALR;
+ spin_lock_init(&cpg_lock);
+
+ return 0;
+}
+
+const struct cpg_mssr_info r8a779a0_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a779a0_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a779a0_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a779a0_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a779a0_mod_clks),
+ .num_hw_mod_clks = 15 * 32,
+
+ /* Callbacks */
+ .init = r8a779a0_cpg_mssr_init,
+ .cpg_clk_register = rcar_r8a779a0_cpg_clk_register,
+
+ .reg_layout = CLK_REG_LAYOUT_RCAR_V3U,
+};
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 5a306d28738c..94db88370337 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -57,8 +57,10 @@ static const u16 mstpsr[] = {
0x9A0, 0x9A4, 0x9A8, 0x9AC,
};
-#define MSTPSR(i) mstpsr[i]
-
+static const u16 mstpsr_for_v3u[] = {
+ 0x2E00, 0x2E04, 0x2E08, 0x2E0C, 0x2E10, 0x2E14, 0x2E18, 0x2E1C,
+ 0x2E20, 0x2E24, 0x2E28, 0x2E2C, 0x2E30, 0x2E34, 0x2E38,
+};
/*
* System Module Stop Control Register offsets
@@ -69,7 +71,10 @@ static const u16 smstpcr[] = {
0x990, 0x994, 0x998, 0x99C,
};
-#define SMSTPCR(i) smstpcr[i]
+static const u16 mstpcr_for_v3u[] = {
+ 0x2D00, 0x2D04, 0x2D08, 0x2D0C, 0x2D10, 0x2D14, 0x2D18, 0x2D1C,
+ 0x2D20, 0x2D24, 0x2D28, 0x2D2C, 0x2D30, 0x2D34, 0x2D38,
+};
/*
* Standby Control Register offsets (RZ/A)
@@ -81,8 +86,6 @@ static const u16 stbcr[] = {
0x424, 0x428, 0x42C,
};
-#define STBCR(i) stbcr[i]
-
/*
* Software Reset Register offsets
*/
@@ -92,8 +95,10 @@ static const u16 srcr[] = {
0x920, 0x924, 0x928, 0x92C,
};
-#define SRCR(i) srcr[i]
-
+static const u16 srcr_for_v3u[] = {
+ 0x2C00, 0x2C04, 0x2C08, 0x2C0C, 0x2C10, 0x2C14, 0x2C18, 0x2C1C,
+ 0x2C20, 0x2C24, 0x2C28, 0x2C2C, 0x2C30, 0x2C34, 0x2C38,
+};
/* Realtime Module Stop Control Register offsets */
#define RMSTPCR(i) (smstpcr[i] - 0x20)
@@ -102,8 +107,16 @@ static const u16 srcr[] = {
#define MMSTPCR(i) (smstpcr[i] + 0x20)
/* Software Reset Clearing Register offsets */
-#define SRSTCLR(i) (0x940 + (i) * 4)
+static const u16 srstclr[] = {
+ 0x940, 0x944, 0x948, 0x94C, 0x950, 0x954, 0x958, 0x95C,
+ 0x960, 0x964, 0x968, 0x96C,
+};
+
+static const u16 srstclr_for_v3u[] = {
+ 0x2C80, 0x2C84, 0x2C88, 0x2C8C, 0x2C90, 0x2C94, 0x2C98, 0x2C9C,
+ 0x2CA0, 0x2CA4, 0x2CA8, 0x2CAC, 0x2CB0, 0x2CB4, 0x2CB8,
+};
/**
* Clock Pulse Generator / Module Standby and Software Reset Private Data
@@ -111,13 +124,17 @@ static const u16 srcr[] = {
* @rcdev: Optional reset controller entity
* @dev: CPG/MSSR device
* @base: CPG/MSSR register block base address
+ * @reg_layout: CPG/MSSR register layout
* @rmw_lock: protects RMW register accesses
* @np: Device node in DT for this CPG/MSSR module
* @num_core_clks: Number of Core Clocks in clks[]
* @num_mod_clks: Number of Module Clocks in clks[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
- * @stbyctrl: This device has Standby Control Registers
* @notifiers: Notifier chain to save/restore clock state for system resume
+ * @status_regs: Pointer to status registers array
+ * @control_regs: Pointer to control registers array
+ * @reset_regs: Pointer to reset registers array
+ * @reset_clear_regs: Pointer to reset clearing registers array
* @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
* @smstpcr_saved[].val: Saved values of SMSTPCR[]
* @clks: Array containing all Core and Module Clocks
@@ -128,19 +145,23 @@ struct cpg_mssr_priv {
#endif
struct device *dev;
void __iomem *base;
+ enum clk_reg_layout reg_layout;
spinlock_t rmw_lock;
struct device_node *np;
unsigned int num_core_clks;
unsigned int num_mod_clks;
unsigned int last_dt_core_clk;
- bool stbyctrl;
struct raw_notifier_head notifiers;
+ const u16 *status_regs;
+ const u16 *control_regs;
+ const u16 *reset_regs;
+ const u16 *reset_clear_regs;
struct {
u32 mask;
u32 val;
- } smstpcr_saved[ARRAY_SIZE(smstpcr)];
+ } smstpcr_saved[ARRAY_SIZE(mstpsr_for_v3u)];
struct clk *clks[];
};
@@ -177,40 +198,40 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
enable ? "ON" : "OFF");
spin_lock_irqsave(&priv->rmw_lock, flags);
- if (priv->stbyctrl) {
- value = readb(priv->base + STBCR(reg));
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
+ value = readb(priv->base + priv->control_regs[reg]);
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- writeb(value, priv->base + STBCR(reg));
+ writeb(value, priv->base + priv->control_regs[reg]);
/* dummy read to ensure write has completed */
- readb(priv->base + STBCR(reg));
- barrier_data(priv->base + STBCR(reg));
+ readb(priv->base + priv->control_regs[reg]);
+ barrier_data(priv->base + priv->control_regs[reg]);
} else {
- value = readl(priv->base + SMSTPCR(reg));
+ value = readl(priv->base + priv->control_regs[reg]);
if (enable)
value &= ~bitmask;
else
value |= bitmask;
- writel(value, priv->base + SMSTPCR(reg));
+ writel(value, priv->base + priv->control_regs[reg]);
}
spin_unlock_irqrestore(&priv->rmw_lock, flags);
- if (!enable || priv->stbyctrl)
+ if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
return 0;
for (i = 1000; i > 0; --i) {
- if (!(readl(priv->base + MSTPSR(reg)) & bitmask))
+ if (!(readl(priv->base + priv->status_regs[reg]) & bitmask))
break;
cpu_relax();
}
if (!i) {
dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
- priv->base + SMSTPCR(reg), bit);
+ priv->base + priv->control_regs[reg], bit);
return -ETIMEDOUT;
}
@@ -233,10 +254,10 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
struct cpg_mssr_priv *priv = clock->priv;
u32 value;
- if (priv->stbyctrl)
- value = readb(priv->base + STBCR(clock->index / 32));
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
+ value = readb(priv->base + priv->control_regs[clock->index / 32]);
else
- value = readl(priv->base + MSTPSR(clock->index / 32));
+ value = readl(priv->base + priv->status_regs[clock->index / 32]);
return !(value & BIT(clock->index % 32));
}
@@ -272,7 +293,7 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
case CPG_MOD:
type = "module";
- if (priv->stbyctrl) {
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
idx = MOD_CLK_PACK_10(clkidx);
range_check = 7 - (clkidx % 10);
} else {
@@ -578,13 +599,13 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
/* Reset module */
- writel(bitmask, priv->base + SRCR(reg));
+ writel(bitmask, priv->base + priv->reset_regs[reg]);
/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
udelay(35);
/* Release module from reset state */
- writel(bitmask, priv->base + SRSTCLR(reg));
+ writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
return 0;
}
@@ -598,7 +619,7 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
- writel(bitmask, priv->base + SRCR(reg));
+ writel(bitmask, priv->base + priv->reset_regs[reg]);
return 0;
}
@@ -612,7 +633,7 @@ static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit);
- writel(bitmask, priv->base + SRSTCLR(reg));
+ writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
return 0;
}
@@ -624,7 +645,7 @@ static int cpg_mssr_status(struct reset_controller_dev *rcdev,
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- return !!(readl(priv->base + SRCR(reg)) & bitmask);
+ return !!(readl(priv->base + priv->reset_regs[reg]) & bitmask);
}
static const struct reset_control_ops cpg_mssr_reset_ops = {
@@ -804,6 +825,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a77995_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A779A0
+ {
+ .compatible = "renesas,r8a779a0-cpg-mssr",
+ .data = &r8a779a0_cpg_mssr_info,
+ },
+#endif
{ /* sentinel */ }
};
@@ -825,9 +852,10 @@ static int cpg_mssr_suspend_noirq(struct device *dev)
/* Save module registers with bits under our control */
for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
if (priv->smstpcr_saved[reg].mask)
- priv->smstpcr_saved[reg].val = priv->stbyctrl ?
- readb(priv->base + STBCR(reg)) :
- readl(priv->base + SMSTPCR(reg));
+ priv->smstpcr_saved[reg].val =
+ priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
+ readb(priv->base + priv->control_regs[reg]) :
+ readl(priv->base + priv->control_regs[reg]);
}
/* Save core clocks */
@@ -855,23 +883,23 @@ static int cpg_mssr_resume_noirq(struct device *dev)
if (!mask)
continue;
- if (priv->stbyctrl)
- oldval = readb(priv->base + STBCR(reg));
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
+ oldval = readb(priv->base + priv->control_regs[reg]);
else
- oldval = readl(priv->base + SMSTPCR(reg));
+ oldval = readl(priv->base + priv->control_regs[reg]);
newval = oldval & ~mask;
newval |= priv->smstpcr_saved[reg].val & mask;
if (newval == oldval)
continue;
- if (priv->stbyctrl) {
- writeb(newval, priv->base + STBCR(reg));
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
+ writeb(newval, priv->base + priv->control_regs[reg]);
/* dummy read to ensure write has completed */
- readb(priv->base + STBCR(reg));
- barrier_data(priv->base + STBCR(reg));
+ readb(priv->base + priv->control_regs[reg]);
+ barrier_data(priv->base + priv->control_regs[reg]);
continue;
} else
- writel(newval, priv->base + SMSTPCR(reg));
+ writel(newval, priv->base + priv->control_regs[reg]);
/* Wait until enabled clocks are really enabled */
mask &= ~priv->smstpcr_saved[reg].val;
@@ -879,7 +907,7 @@ static int cpg_mssr_resume_noirq(struct device *dev)
continue;
for (i = 1000; i > 0; --i) {
- oldval = readl(priv->base + MSTPSR(reg));
+ oldval = readl(priv->base + priv->status_regs[reg]);
if (!(oldval & mask))
break;
cpu_relax();
@@ -887,8 +915,8 @@ static int cpg_mssr_resume_noirq(struct device *dev)
if (!i)
dev_warn(dev, "Failed to enable %s%u[0x%x]\n",
- priv->stbyctrl ? "STB" : "SMSTP", reg,
- oldval & mask);
+ priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
+ "STB" : "SMSTP", reg, oldval & mask);
}
return 0;
@@ -937,7 +965,23 @@ static int __init cpg_mssr_common_init(struct device *dev,
priv->num_mod_clks = info->num_hw_mod_clks;
priv->last_dt_core_clk = info->last_dt_core_clk;
RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
- priv->stbyctrl = info->stbyctrl;
+ priv->reg_layout = info->reg_layout;
+ if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3) {
+ priv->status_regs = mstpsr;
+ priv->control_regs = smstpcr;
+ priv->reset_regs = srcr;
+ priv->reset_clear_regs = srstclr;
+ } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
+ priv->control_regs = stbcr;
+ } else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_V3U) {
+ priv->status_regs = mstpsr_for_v3u;
+ priv->control_regs = mstpcr_for_v3u;
+ priv->reset_regs = srcr_for_v3u;
+ priv->reset_clear_regs = srstclr_for_v3u;
+ } else {
+ error = -EINVAL;
+ goto out_err;
+ }
for (i = 0; i < nclks; i++)
priv->clks[i] = ERR_PTR(-ENOENT);
@@ -1015,7 +1059,7 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
return error;
/* Reset Controller not supported for Standby Control SoCs */
- if (info->stbyctrl)
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
return 0;
error = cpg_mssr_reset_controller_register(priv);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 1cc569484250..6b2a0ade482e 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -85,6 +85,12 @@ struct mssr_mod_clk {
struct device_node;
+enum clk_reg_layout {
+ CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3 = 0,
+ CLK_REG_LAYOUT_RZ_A,
+ CLK_REG_LAYOUT_RCAR_V3U,
+};
+
/**
* SoC-specific CPG/MSSR Description
*
@@ -105,6 +111,7 @@ struct device_node;
* @crit_mod_clks: Array with Module Clock IDs of critical clocks that
* should not be disabled without a knowledgeable driver
* @num_crit_mod_clks: Number of entries in crit_mod_clks[]
+ * @reg_layout: CPG/MSSR register layout from enum clk_reg_layout
*
* @core_pm_clks: Array with IDs of Core Clocks that are suitable for Power
* Management, in addition to Module Clocks
@@ -112,10 +119,6 @@ struct device_node;
*
* @init: Optional callback to perform SoC-specific initialization
* @cpg_clk_register: Optional callback to handle special Core Clock types
- *
- * @stbyctrl: This device has Standby Control Registers which are 8-bits
- * wide, no status registers (MSTPSR) and have different address
- * offsets.
*/
struct cpg_mssr_info {
@@ -130,7 +133,7 @@ struct cpg_mssr_info {
unsigned int num_core_clks;
unsigned int last_dt_core_clk;
unsigned int num_total_core_clks;
- bool stbyctrl;
+ enum clk_reg_layout reg_layout;
/* Module Clocks */
const struct mssr_mod_clk *mod_clks;
@@ -174,6 +177,7 @@ extern const struct cpg_mssr_info r8a77970_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77980_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77990_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a779a0_cpg_mssr_info;
void __init cpg_mssr_early_init(struct device_node *np,
const struct cpg_mssr_info *info);
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
new file mode 100644
index 000000000000..47cd6c5de837
--- /dev/null
+++ b/drivers/clk/rockchip/Kconfig
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0
+# common clock support for ROCKCHIP SoC family.
+
+config COMMON_CLK_ROCKCHIP
+ bool "Rockchip clock controller common support"
+ depends on ARCH_ROCKCHIP
+ default ARCH_ROCKCHIP
+ help
+ Say y here to enable common clock controller for Rockchip platforms.
+
+if COMMON_CLK_ROCKCHIP
+config CLK_PX30
+ bool "Rockchip PX30 clock controller support"
+ default y
+ help
+ Build the driver for PX30 Clock Driver.
+
+config CLK_RV110X
+ bool "Rockchip RV110x clock controller support"
+ default y
+ help
+ Build the driver for RV110x Clock Driver.
+
+config CLK_RK3036
+ bool "Rockchip RK3036 clock controller support"
+ default y
+ help
+ Build the driver for RK3036 Clock Driver.
+
+config CLK_RK312X
+ bool "Rockchip RK312x clock controller support"
+ default y
+ help
+ Build the driver for RK312x Clock Driver.
+
+config CLK_RK3188
+ bool "Rockchip RK3188 clock controller support"
+ default y
+ help
+ Build the driver for RK3188 Clock Driver.
+
+config CLK_RK322X
+ bool "Rockchip RK322x clock controller support"
+ default y
+ help
+ Build the driver for RK322x Clock Driver.
+
+config CLK_RK3288
+ bool "Rockchip RK3288 clock controller support"
+ depends on ARM
+ default y
+ help
+ Build the driver for RK3288 Clock Driver.
+
+config CLK_RK3308
+ bool "Rockchip RK3308 clock controller support"
+ default y
+ help
+ Build the driver for RK3308 Clock Driver.
+
+config CLK_RK3328
+ bool "Rockchip RK3328 clock controller support"
+ default y
+ help
+ Build the driver for RK3328 Clock Driver.
+
+config CLK_RK3368
+ bool "Rockchip RK3368 clock controller support"
+ default y
+ help
+ Build the driver for RK3368 Clock Driver.
+
+config CLK_RK3399
+ tristate "Rockchip RK3399 clock controller support"
+ default y
+ help
+ Build the driver for RK3399 Clock Driver.
+endif
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 7c5b5813a87c..a99e4d9bbae1 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -3,24 +3,26 @@
# Rockchip Clock specific Makefile
#
-obj-y += clk.o
-obj-y += clk-pll.o
-obj-y += clk-cpu.o
-obj-y += clk-half-divider.o
-obj-y += clk-inverter.o
-obj-y += clk-mmc-phase.o
-obj-y += clk-muxgrf.o
-obj-y += clk-ddr.o
-obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
+obj-$(CONFIG_COMMON_CLK_ROCKCHIP) += clk-rockchip.o
-obj-y += clk-px30.o
-obj-y += clk-rv1108.o
-obj-y += clk-rk3036.o
-obj-y += clk-rk3128.o
-obj-y += clk-rk3188.o
-obj-y += clk-rk3228.o
-obj-y += clk-rk3288.o
-obj-y += clk-rk3308.o
-obj-y += clk-rk3328.o
-obj-y += clk-rk3368.o
-obj-y += clk-rk3399.o
+clk-rockchip-y += clk.o
+clk-rockchip-y += clk-pll.o
+clk-rockchip-y += clk-cpu.o
+clk-rockchip-y += clk-half-divider.o
+clk-rockchip-y += clk-inverter.o
+clk-rockchip-y += clk-mmc-phase.o
+clk-rockchip-y += clk-muxgrf.o
+clk-rockchip-y += clk-ddr.o
+clk-rockchip-$(CONFIG_RESET_CONTROLLER) += softrst.o
+
+obj-$(CONFIG_CLK_PX30) += clk-px30.o
+obj-$(CONFIG_CLK_RV110X) += clk-rv1108.o
+obj-$(CONFIG_CLK_RK3036) += clk-rk3036.o
+obj-$(CONFIG_CLK_RK312X) += clk-rk3128.o
+obj-$(CONFIG_CLK_RK3188) += clk-rk3188.o
+obj-$(CONFIG_CLK_RK322X) += clk-rk3228.o
+obj-$(CONFIG_CLK_RK3288) += clk-rk3288.o
+obj-$(CONFIG_CLK_RK3308) += clk-rk3308.o
+obj-$(CONFIG_CLK_RK3328) += clk-rk3328.o
+obj-$(CONFIG_CLK_RK3368) += clk-rk3368.o
+obj-$(CONFIG_CLK_RK3399) += clk-rk3399.o
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index 9273bce4d7b6..86718c54e56b 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -136,3 +136,4 @@ struct clk *rockchip_clk_register_ddrclk(const char *name, int flags,
return clk;
}
+EXPORT_SYMBOL_GPL(rockchip_clk_register_ddrclk);
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index b333fc28c94b..ccd5c270c213 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -166,7 +166,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
unsigned long flags,
spinlock_t *lock)
{
- struct clk *clk;
+ struct clk_hw *hw = ERR_PTR(-ENOMEM);
struct clk_mux *mux = NULL;
struct clk_gate *gate = NULL;
struct clk_divider *div = NULL;
@@ -212,16 +212,18 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
div_ops = &clk_half_divider_ops;
}
- clk = clk_register_composite(NULL, name, parent_names, num_parents,
- mux ? &mux->hw : NULL, mux_ops,
- div ? &div->hw : NULL, div_ops,
- gate ? &gate->hw : NULL, gate_ops,
- flags);
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ mux ? &mux->hw : NULL, mux_ops,
+ div ? &div->hw : NULL, div_ops,
+ gate ? &gate->hw : NULL, gate_ops,
+ flags);
+ if (IS_ERR(hw))
+ goto err_div;
- return clk;
+ return hw->clk;
err_div:
kfree(gate);
err_gate:
kfree(mux);
- return ERR_PTR(-ENOMEM);
+ return ERR_CAST(hw);
}
diff --git a/drivers/clk/rockchip/clk-rk3308.c b/drivers/clk/rockchip/clk-rk3308.c
index b0baf87a283e..5bf15f2a44b7 100644
--- a/drivers/clk/rockchip/clk-rk3308.c
+++ b/drivers/clk/rockchip/clk-rk3308.c
@@ -133,7 +133,6 @@ PNAME(mux_uart1_p) = { "clk_uart1_src", "dummy", "clk_uart1_frac" };
PNAME(mux_uart2_p) = { "clk_uart2_src", "dummy", "clk_uart2_frac" };
PNAME(mux_uart3_p) = { "clk_uart3_src", "dummy", "clk_uart3_frac" };
PNAME(mux_uart4_p) = { "clk_uart4_src", "dummy", "clk_uart4_frac" };
-PNAME(mux_timer_src_p) = { "xin24m", "clk_rtc32k" };
PNAME(mux_dclk_vop_p) = { "dclk_vop_src", "dclk_vop_frac", "xin24m" };
PNAME(mux_nandc_p) = { "clk_nandc_div", "clk_nandc_div50" };
PNAME(mux_sdmmc_p) = { "clk_sdmmc_div", "clk_sdmmc_div50" };
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index ce1d2446f142..7df2f1e00347 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -5,9 +5,11 @@
*/
#include <linux/clk-provider.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/rk3399-cru.h>
@@ -1600,3 +1602,57 @@ static void __init rk3399_pmu_clk_init(struct device_node *np)
rockchip_clk_of_add_provider(np, ctx);
}
CLK_OF_DECLARE(rk3399_cru_pmu, "rockchip,rk3399-pmucru", rk3399_pmu_clk_init);
+
+struct clk_rk3399_inits {
+ void (*inits)(struct device_node *np);
+};
+
+static const struct clk_rk3399_inits clk_rk3399_pmucru_init = {
+ .inits = rk3399_pmu_clk_init,
+};
+
+static const struct clk_rk3399_inits clk_rk3399_cru_init = {
+ .inits = rk3399_clk_init,
+};
+
+static const struct of_device_id clk_rk3399_match_table[] = {
+ {
+ .compatible = "rockchip,rk3399-cru",
+ .data = &clk_rk3399_cru_init,
+ }, {
+ .compatible = "rockchip,rk3399-pmucru",
+ .data = &clk_rk3399_pmucru_init,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clk_rk3399_match_table);
+
+static int __init clk_rk3399_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ const struct clk_rk3399_inits *init_data;
+
+ match = of_match_device(clk_rk3399_match_table, &pdev->dev);
+ if (!match || !match->data)
+ return -EINVAL;
+
+ init_data = match->data;
+ if (init_data->inits)
+ init_data->inits(np);
+
+ return 0;
+}
+
+static struct platform_driver clk_rk3399_driver = {
+ .driver = {
+ .name = "clk-rk3399",
+ .of_match_table = clk_rk3399_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rk3399_driver, clk_rk3399_probe);
+
+MODULE_DESCRIPTION("Rockchip RK3399 Clock Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:clk-rk3399");
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 546e810c3560..b443169dd408 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -43,7 +43,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
u8 gate_shift, u8 gate_flags, unsigned long flags,
spinlock_t *lock)
{
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_mux *mux = NULL;
struct clk_gate *gate = NULL;
struct clk_divider *div = NULL;
@@ -100,20 +100,18 @@ static struct clk *rockchip_clk_register_branch(const char *name,
: &clk_divider_ops;
}
- clk = clk_register_composite(NULL, name, parent_names, num_parents,
- mux ? &mux->hw : NULL, mux_ops,
- div ? &div->hw : NULL, div_ops,
- gate ? &gate->hw : NULL, gate_ops,
- flags);
-
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto err_composite;
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ mux ? &mux->hw : NULL, mux_ops,
+ div ? &div->hw : NULL, div_ops,
+ gate ? &gate->hw : NULL, gate_ops,
+ flags);
+ if (IS_ERR(hw)) {
+ kfree(div);
+ kfree(gate);
+ return ERR_CAST(hw);
}
- return clk;
-err_composite:
- kfree(div);
+ return hw->clk;
err_div:
kfree(gate);
err_gate:
@@ -214,8 +212,8 @@ static struct clk *rockchip_clk_register_frac_branch(
unsigned long flags, struct rockchip_clk_branch *child,
spinlock_t *lock)
{
+ struct clk_hw *hw;
struct rockchip_clk_frac *frac;
- struct clk *clk;
struct clk_gate *gate = NULL;
struct clk_fractional_divider *div = NULL;
const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
@@ -255,14 +253,14 @@ static struct clk *rockchip_clk_register_frac_branch(
div->approximation = rockchip_fractional_approximation;
div_ops = &clk_fractional_divider_ops;
- clk = clk_register_composite(NULL, name, parent_names, num_parents,
- NULL, NULL,
- &div->hw, div_ops,
- gate ? &gate->hw : NULL, gate_ops,
- flags | CLK_SET_RATE_UNGATE);
- if (IS_ERR(clk)) {
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ NULL, NULL,
+ &div->hw, div_ops,
+ gate ? &gate->hw : NULL, gate_ops,
+ flags | CLK_SET_RATE_UNGATE);
+ if (IS_ERR(hw)) {
kfree(frac);
- return clk;
+ return ERR_CAST(hw);
}
if (child) {
@@ -292,7 +290,7 @@ static struct clk *rockchip_clk_register_frac_branch(
mux_clk = clk_register(NULL, &frac_mux->hw);
if (IS_ERR(mux_clk)) {
kfree(frac);
- return clk;
+ return mux_clk;
}
rockchip_clk_add_lookup(ctx, mux_clk, child->id);
@@ -301,7 +299,7 @@ static struct clk *rockchip_clk_register_frac_branch(
if (frac->mux_frac_idx >= 0) {
pr_debug("%s: found fractional parent in mux at pos %d\n",
__func__, frac->mux_frac_idx);
- ret = clk_notifier_register(clk, &frac->clk_nb);
+ ret = clk_notifier_register(hw->clk, &frac->clk_nb);
if (ret)
pr_err("%s: failed to register clock notifier for %s\n",
__func__, name);
@@ -311,7 +309,7 @@ static struct clk *rockchip_clk_register_frac_branch(
}
}
- return clk;
+ return hw->clk;
}
static struct clk *rockchip_clk_register_factor_branch(const char *name,
@@ -320,7 +318,7 @@ static struct clk *rockchip_clk_register_factor_branch(const char *name,
int gate_offset, u8 gate_shift, u8 gate_flags,
unsigned long flags, spinlock_t *lock)
{
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_gate *gate = NULL;
struct clk_fixed_factor *fix = NULL;
@@ -349,20 +347,22 @@ static struct clk *rockchip_clk_register_factor_branch(const char *name,
fix->mult = mult;
fix->div = div;
- clk = clk_register_composite(NULL, name, parent_names, num_parents,
- NULL, NULL,
- &fix->hw, &clk_fixed_factor_ops,
- &gate->hw, &clk_gate_ops, flags);
- if (IS_ERR(clk)) {
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ NULL, NULL,
+ &fix->hw, &clk_fixed_factor_ops,
+ &gate->hw, &clk_gate_ops, flags);
+ if (IS_ERR(hw)) {
kfree(fix);
kfree(gate);
+ return ERR_CAST(hw);
}
- return clk;
+ return hw->clk;
}
-struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np,
- void __iomem *base, unsigned long nr_clks)
+struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_clks)
{
struct rockchip_clk_provider *ctx;
struct clk **clk_table;
@@ -394,14 +394,16 @@ err_free:
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
+EXPORT_SYMBOL_GPL(rockchip_clk_init);
-void __init rockchip_clk_of_add_provider(struct device_node *np,
- struct rockchip_clk_provider *ctx)
+void rockchip_clk_of_add_provider(struct device_node *np,
+ struct rockchip_clk_provider *ctx)
{
if (of_clk_add_provider(np, of_clk_src_onecell_get,
&ctx->clk_data))
pr_err("%s: could not register clk provider\n", __func__);
}
+EXPORT_SYMBOL_GPL(rockchip_clk_of_add_provider);
void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
struct clk *clk, unsigned int id)
@@ -409,8 +411,9 @@ void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
if (ctx->clk_data.clks && id)
ctx->clk_data.clks[id] = clk;
}
+EXPORT_SYMBOL_GPL(rockchip_clk_add_lookup);
-void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
+void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
struct rockchip_pll_clock *list,
unsigned int nr_pll, int grf_lock_offset)
{
@@ -433,11 +436,11 @@ void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
rockchip_clk_add_lookup(ctx, clk, list->id);
}
}
+EXPORT_SYMBOL_GPL(rockchip_clk_register_plls);
-void __init rockchip_clk_register_branches(
- struct rockchip_clk_provider *ctx,
- struct rockchip_clk_branch *list,
- unsigned int nr_clk)
+void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ unsigned int nr_clk)
{
struct clk *clk = NULL;
unsigned int idx;
@@ -566,14 +569,15 @@ void __init rockchip_clk_register_branches(
rockchip_clk_add_lookup(ctx, clk, list->id);
}
}
-
-void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
- unsigned int lookup_id,
- const char *name, const char *const *parent_names,
- u8 num_parents,
- const struct rockchip_cpuclk_reg_data *reg_data,
- const struct rockchip_cpuclk_rate_table *rates,
- int nrates)
+EXPORT_SYMBOL_GPL(rockchip_clk_register_branches);
+
+void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
+ unsigned int lookup_id,
+ const char *name, const char *const *parent_names,
+ u8 num_parents,
+ const struct rockchip_cpuclk_reg_data *reg_data,
+ const struct rockchip_cpuclk_rate_table *rates,
+ int nrates)
{
struct clk *clk;
@@ -588,9 +592,10 @@ void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
rockchip_clk_add_lookup(ctx, clk, lookup_id);
}
+EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk);
-void __init rockchip_clk_protect_critical(const char *const clocks[],
- int nclocks)
+void rockchip_clk_protect_critical(const char *const clocks[],
+ int nclocks)
{
int i;
@@ -602,6 +607,7 @@ void __init rockchip_clk_protect_critical(const char *const clocks[],
clk_prepare_enable(clk);
}
}
+EXPORT_SYMBOL_GPL(rockchip_clk_protect_critical);
static void __iomem *rst_base;
static unsigned int reg_restart;
@@ -621,10 +627,10 @@ static struct notifier_block rockchip_restart_handler = {
.priority = 128,
};
-void __init
+void
rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
- unsigned int reg,
- void (*cb)(void))
+ unsigned int reg,
+ void (*cb)(void))
{
int ret;
@@ -636,3 +642,4 @@ rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
pr_err("%s: cannot register restart handler, %d\n",
__func__, ret);
}
+EXPORT_SYMBOL_GPL(rockchip_register_restart_notifier);
diff --git a/drivers/clk/rockchip/softrst.c b/drivers/clk/rockchip/softrst.c
index 5f1ff5e47c4f..5d07266745b8 100644
--- a/drivers/clk/rockchip/softrst.c
+++ b/drivers/clk/rockchip/softrst.c
@@ -77,9 +77,9 @@ static const struct reset_control_ops rockchip_softrst_ops = {
.deassert = rockchip_softrst_deassert,
};
-void __init rockchip_register_softrst(struct device_node *np,
- unsigned int num_regs,
- void __iomem *base, u8 flags)
+void rockchip_register_softrst(struct device_node *np,
+ unsigned int num_regs,
+ void __iomem *base, u8 flags)
{
struct rockchip_softrst *softrst;
int ret;
@@ -107,3 +107,4 @@ void __init rockchip_register_softrst(struct device_node *np,
kfree(softrst);
}
};
+EXPORT_SYMBOL_GPL(rockchip_register_softrst);
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index efc4fa61fbaf..00ef4d1b0888 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -401,26 +401,34 @@ static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
/* helper function to register a CPU clock */
int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
- unsigned int lookup_id, const char *name, const char *parent,
- const char *alt_parent, unsigned long offset,
- const struct exynos_cpuclk_cfg_data *cfg,
+ unsigned int lookup_id, const char *name,
+ const struct clk_hw *parent, const struct clk_hw *alt_parent,
+ unsigned long offset, const struct exynos_cpuclk_cfg_data *cfg,
unsigned long num_cfgs, unsigned long flags)
{
struct exynos_cpuclk *cpuclk;
struct clk_init_data init;
- struct clk *parent_clk;
+ const char *parent_name;
int ret = 0;
+ if (IS_ERR(parent) || IS_ERR(alt_parent)) {
+ pr_err("%s: invalid parent clock(s)\n", __func__);
+ return -EINVAL;
+ }
+
cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
if (!cpuclk)
return -ENOMEM;
+ parent_name = clk_hw_get_name(parent);
+
init.name = name;
init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = &parent;
+ init.parent_names = &parent_name;
init.num_parents = 1;
init.ops = &exynos_cpuclk_clk_ops;
+ cpuclk->alt_parent = alt_parent;
cpuclk->hw.init = &init;
cpuclk->ctrl_base = ctx->reg_base + offset;
cpuclk->lock = &ctx->lock;
@@ -430,23 +438,8 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
else
cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
- cpuclk->alt_parent = __clk_get_hw(__clk_lookup(alt_parent));
- if (!cpuclk->alt_parent) {
- pr_err("%s: could not lookup alternate parent %s\n",
- __func__, alt_parent);
- ret = -EINVAL;
- goto free_cpuclk;
- }
-
- parent_clk = __clk_lookup(parent);
- if (!parent_clk) {
- pr_err("%s: could not lookup parent clock %s\n",
- __func__, parent);
- ret = -EINVAL;
- goto free_cpuclk;
- }
- ret = clk_notifier_register(parent_clk, &cpuclk->clk_nb);
+ ret = clk_notifier_register(parent->clk, &cpuclk->clk_nb);
if (ret) {
pr_err("%s: failed to register clock notifier for %s\n",
__func__, name);
@@ -471,7 +464,7 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
free_cpuclk_data:
kfree(cpuclk->cfg);
unregister_clk_nb:
- clk_notifier_unregister(parent_clk, &cpuclk->clk_nb);
+ clk_notifier_unregister(parent->clk, &cpuclk->clk_nb);
free_cpuclk:
kfree(cpuclk);
return ret;
diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h
index ad38cc27f3df..af74686db9ef 100644
--- a/drivers/clk/samsung/clk-cpu.h
+++ b/drivers/clk/samsung/clk-cpu.h
@@ -46,7 +46,7 @@ struct exynos_cpuclk_cfg_data {
*/
struct exynos_cpuclk {
struct clk_hw hw;
- struct clk_hw *alt_parent;
+ const struct clk_hw *alt_parent;
void __iomem *ctrl_base;
spinlock_t *lock;
const struct exynos_cpuclk_cfg_data *cfg;
@@ -62,9 +62,9 @@ struct exynos_cpuclk {
#define CLK_CPU_HAS_E5433_REGS_LAYOUT (1 << 2)
};
-extern int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
+int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
unsigned int lookup_id, const char *name,
- const char *parent, const char *alt_parent,
+ const struct clk_hw *parent, const struct clk_hw *alt_parent,
unsigned long offset,
const struct exynos_cpuclk_cfg_data *cfg,
unsigned long num_cfgs, unsigned long flags);
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 17897c7a84d4..17df7f9755aa 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -808,14 +808,16 @@ static const struct exynos_cpuclk_cfg_data e3250_armclk_d[] __initconst = {
static void __init exynos3250_cmu_init(struct device_node *np)
{
struct samsung_clk_provider *ctx;
+ struct clk_hw **hws;
ctx = samsung_cmu_register_one(np, &cmu_info);
if (!ctx)
return;
+ hws = ctx->clk_data.hws;
exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- mout_core_p[0], mout_core_p[1], 0x14200,
- e3250_armclk_d, ARRAY_SIZE(e3250_armclk_d),
+ hws[CLK_MOUT_APLL], hws[CLK_MOUT_MPLL_USER_C],
+ 0x14200, e3250_armclk_d, ARRAY_SIZE(e3250_armclk_d),
CLK_CPU_HAS_DIV1);
exynos3_core_down_clock(ctx->reg_base);
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index f4086287bb71..bf13e29a655c 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1233,6 +1233,8 @@ static void __init exynos4_clk_init(struct device_node *np,
enum exynos4_soc soc)
{
struct samsung_clk_provider *ctx;
+ struct clk_hw **hws;
+
exynos4_soc = soc;
reg_base = of_iomap(np, 0);
@@ -1240,6 +1242,7 @@ static void __init exynos4_clk_init(struct device_node *np,
panic("%s: failed to map registers\n", __func__);
ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ hws = ctx->clk_data.hws;
samsung_clk_of_register_fixed_ext(ctx, exynos4_fixed_rate_ext_clks,
ARRAY_SIZE(exynos4_fixed_rate_ext_clks),
@@ -1302,7 +1305,7 @@ static void __init exynos4_clk_init(struct device_node *np,
exynos4210_fixed_factor_clks,
ARRAY_SIZE(exynos4210_fixed_factor_clks));
exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- mout_core_p4210[0], mout_core_p4210[1], 0x14200,
+ hws[CLK_MOUT_APLL], hws[CLK_SCLK_MPLL], 0x14200,
e4210_armclk_d, ARRAY_SIZE(e4210_armclk_d),
CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
} else {
@@ -1317,7 +1320,7 @@ static void __init exynos4_clk_init(struct device_node *np,
ARRAY_SIZE(exynos4x12_fixed_factor_clks));
exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
+ hws[CLK_MOUT_APLL], hws[CLK_MOUT_MPLL_USER_C], 0x14200,
e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
}
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 931c70a4da19..06588fab408a 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -253,14 +253,14 @@ static const struct samsung_mux_clock exynos5250_mux_clks[] __initconst = {
/*
* CMU_CPU
*/
- MUX_F(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
/*
* CMU_CORE
*/
- MUX(0, "mout_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
+ MUX(CLK_MOUT_MPLL, "mout_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
/*
* CMU_TOP
@@ -782,6 +782,7 @@ static void __init exynos5250_clk_init(struct device_node *np)
{
struct samsung_clk_provider *ctx;
unsigned int tmp;
+ struct clk_hw **hws;
if (np) {
reg_base = of_iomap(np, 0);
@@ -792,6 +793,7 @@ static void __init exynos5250_clk_init(struct device_node *np)
}
ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ hws = ctx->clk_data.hws;
samsung_clk_of_register_fixed_ext(ctx, exynos5250_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5250_fixed_rate_ext_clks),
@@ -821,7 +823,7 @@ static void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_register_gate(ctx, exynos5250_gate_clks,
ARRAY_SIZE(exynos5250_gate_clks));
exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- mout_cpu_p[0], mout_cpu_p[1], 0x200,
+ hws[CLK_MOUT_APLL], hws[CLK_MOUT_MPLL], 0x200,
exynos5250_armclk_d, ARRAY_SIZE(exynos5250_armclk_d),
CLK_CPU_HAS_DIV1);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index bd620876544d..3ccd4eabd2a6 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -596,13 +596,14 @@ static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
MUX(0, "mout_user_pclk66_gpio", mout_user_pclk66_gpio_p,
SRC_TOP7, 4, 1),
- MUX(0, "mout_mspll_kfc", mout_mspll_cpu_p, SRC_TOP7, 8, 2),
- MUX(0, "mout_mspll_cpu", mout_mspll_cpu_p, SRC_TOP7, 12, 2),
-
- MUX_F(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ MUX(CLK_MOUT_MSPLL_KFC, "mout_mspll_kfc", mout_mspll_cpu_p,
+ SRC_TOP7, 8, 2),
+ MUX(CLK_MOUT_MSPLL_CPU, "mout_mspll_cpu", mout_mspll_cpu_p,
+ SRC_TOP7, 12, 2),
+ MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
MUX(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
- MUX_F(0, "mout_kpll", mout_kpll_p, SRC_KFC, 0, 1,
+ MUX_F(CLK_MOUT_KPLL, "mout_kpll", mout_kpll_p, SRC_KFC, 0, 1,
CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
MUX(0, "mout_kfc", mout_kfc_p, SRC_KFC, 16, 1),
@@ -712,8 +713,8 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
SRC_TOP12, 8, 1),
MUX(0, "mout_sw_aclk266_g2d", mout_sw_aclk266_g2d_p,
SRC_TOP12, 12, 1),
- MUX_F(0, "mout_sw_aclk_g3d", mout_sw_aclk_g3d_p, SRC_TOP12, 16, 1,
- CLK_SET_RATE_PARENT, 0),
+ MUX_F(CLK_MOUT_SW_ACLK_G3D, "mout_sw_aclk_g3d", mout_sw_aclk_g3d_p,
+ SRC_TOP12, 16, 1, CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_sw_aclk300_jpeg", mout_sw_aclk300_jpeg_p,
SRC_TOP12, 20, 1),
MUX(CLK_MOUT_SW_ACLK300, "mout_sw_aclk300_disp1",
@@ -1560,6 +1561,7 @@ static void __init exynos5x_clk_init(struct device_node *np,
enum exynos5x_soc soc)
{
struct samsung_clk_provider *ctx;
+ struct clk_hw **hws;
if (np) {
reg_base = of_iomap(np, 0);
@@ -1572,6 +1574,7 @@ static void __init exynos5x_clk_init(struct device_node *np,
exynos5x_soc = soc;
ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+ hws = ctx->clk_data.hws;
samsung_clk_of_register_fixed_ext(ctx, exynos5x_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5x_fixed_rate_ext_clks),
@@ -1623,15 +1626,15 @@ static void __init exynos5x_clk_init(struct device_node *np,
if (soc == EXYNOS5420) {
exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- mout_cpu_p[0], mout_cpu_p[1], 0x200,
+ hws[CLK_MOUT_APLL], hws[CLK_MOUT_MSPLL_CPU], 0x200,
exynos5420_eglclk_d, ARRAY_SIZE(exynos5420_eglclk_d), 0);
} else {
exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- mout_cpu_p[0], mout_cpu_p[1], 0x200,
+ hws[CLK_MOUT_APLL], hws[CLK_MOUT_MSPLL_CPU], 0x200,
exynos5800_eglclk_d, ARRAY_SIZE(exynos5800_eglclk_d), 0);
}
exynos_register_cpu_clock(ctx, CLK_KFC_CLK, "kfcclk",
- mout_kfc_p[0], mout_kfc_p[1], 0x28200,
+ hws[CLK_MOUT_KPLL], hws[CLK_MOUT_MSPLL_KFC], 0x28200,
exynos5420_kfcclk_d, ARRAY_SIZE(exynos5420_kfcclk_d), 0);
samsung_clk_extended_sleep_init(reg_base,
@@ -1654,12 +1657,12 @@ static void __init exynos5x_clk_init(struct device_node *np,
* that the internal busses get their clock regardless of the
* main G3D clock enablement status.
*/
- clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d"));
+ clk_prepare_enable(hws[CLK_MOUT_SW_ACLK_G3D]->clk);
/*
* Keep top BPLL mux enabled permanently to ensure that DRAM operates
* properly.
*/
- clk_prepare_enable(__clk_lookup("mout_bpll"));
+ clk_prepare_enable(hws[CLK_MOUT_BPLL]->clk);
samsung_clk_of_add_provider(np, ctx);
}
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 6f29ecd0442e..f203074d858b 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -3679,6 +3679,7 @@ static void __init exynos5433_cmu_apollo_init(struct device_node *np)
{
void __iomem *reg_base;
struct samsung_clk_provider *ctx;
+ struct clk_hw **hws;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -3701,8 +3702,10 @@ static void __init exynos5433_cmu_apollo_init(struct device_node *np)
samsung_clk_register_gate(ctx, apollo_gate_clks,
ARRAY_SIZE(apollo_gate_clks));
+ hws = ctx->clk_data.hws;
+
exynos_register_cpu_clock(ctx, CLK_SCLK_APOLLO, "apolloclk",
- mout_apollo_p[0], mout_apollo_p[1], 0x200,
+ hws[CLK_MOUT_APOLLO_PLL], hws[CLK_MOUT_BUS_PLL_APOLLO_USER], 0x200,
exynos5433_apolloclk_d, ARRAY_SIZE(exynos5433_apolloclk_d),
CLK_CPU_HAS_E5433_REGS_LAYOUT);
@@ -3933,6 +3936,7 @@ static void __init exynos5433_cmu_atlas_init(struct device_node *np)
{
void __iomem *reg_base;
struct samsung_clk_provider *ctx;
+ struct clk_hw **hws;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -3955,8 +3959,10 @@ static void __init exynos5433_cmu_atlas_init(struct device_node *np)
samsung_clk_register_gate(ctx, atlas_gate_clks,
ARRAY_SIZE(atlas_gate_clks));
+ hws = ctx->clk_data.hws;
+
exynos_register_cpu_clock(ctx, CLK_SCLK_ATLAS, "atlasclk",
- mout_atlas_p[0], mout_atlas_p[1], 0x200,
+ hws[CLK_MOUT_ATLAS_PLL], hws[CLK_MOUT_BUS_PLL_ATLAS_USER], 0x200,
exynos5433_atlasclk_d, ARRAY_SIZE(exynos5433_atlasclk_d),
CLK_CPU_HAS_E5433_REGS_LAYOUT);
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index 7dad9098e897..f5e0a6ba2d12 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -11,13 +11,10 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/clk-s3c2410.h>
#include <linux/module.h>
#include "clk.h"
-/* legacy access to misccr, until dt conversion is finished */
-#include <mach/hardware.h>
-#include <mach/regs-gpio.h>
-
#define MUX_DCLK0 0
#define MUX_DCLK1 1
#define DIV_DCLK0 2
@@ -52,6 +49,7 @@ struct s3c24xx_clkout {
struct clk_hw hw;
u32 mask;
u8 shift;
+ unsigned int (*modify_misccr)(unsigned int clr, unsigned int chg);
};
#define to_s3c24xx_clkout(_hw) container_of(_hw, struct s3c24xx_clkout, hw)
@@ -62,7 +60,7 @@ static u8 s3c24xx_clkout_get_parent(struct clk_hw *hw)
int num_parents = clk_hw_get_num_parents(hw);
u32 val;
- val = readl_relaxed(S3C24XX_MISCCR) >> clkout->shift;
+ val = clkout->modify_misccr(0, 0) >> clkout->shift;
val >>= clkout->shift;
val &= clkout->mask;
@@ -76,7 +74,7 @@ static int s3c24xx_clkout_set_parent(struct clk_hw *hw, u8 index)
{
struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw);
- s3c2410_modify_misccr((clkout->mask << clkout->shift),
+ clkout->modify_misccr((clkout->mask << clkout->shift),
(index << clkout->shift));
return 0;
@@ -92,10 +90,14 @@ static struct clk_hw *s3c24xx_register_clkout(struct device *dev,
const char *name, const char **parent_names, u8 num_parents,
u8 shift, u32 mask)
{
+ struct s3c2410_clk_platform_data *pdata = dev_get_platdata(dev);
struct s3c24xx_clkout *clkout;
struct clk_init_data init;
int ret;
+ if (!pdata)
+ return ERR_PTR(-EINVAL);
+
/* allocate the clkout */
clkout = kzalloc(sizeof(*clkout), GFP_KERNEL);
if (!clkout)
@@ -110,6 +112,7 @@ static struct clk_hw *s3c24xx_register_clkout(struct device *dev,
clkout->shift = shift;
clkout->mask = mask;
clkout->hw.init = &init;
+ clkout->modify_misccr = pdata->modify_misccr;
ret = clk_hw_register(dev, &clkout->hw);
if (ret)
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index fcf6764693cc..5831d0606077 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/samsung.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index a95ab5f75163..724ef642f048 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/samsung.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index c7aba1e1af70..a827d63766d1 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/samsung.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index b96d33e5eb45..56f95b63f71f 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/clk/samsung.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
index 45dcbc9e0302..d17b345f4d2d 100644
--- a/drivers/clk/sirf/clk-prima2.c
+++ b/drivers/clk/sirf/clk-prima2.c
@@ -134,7 +134,7 @@ static void __init prima2_clk_init(struct device_node *np)
for (i = pll1; i < maxclk; i++) {
prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
- BUG_ON(!prima2_clks[i]);
+ BUG_ON(IS_ERR(prima2_clks[i]));
}
clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
clk_register_clkdev(prima2_clks[io], NULL, "io");
diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
index 8fb12cbe0208..bb3e80928ebe 100644
--- a/drivers/clk/socfpga/clk-agilex.c
+++ b/drivers/clk/socfpga/clk-agilex.c
@@ -21,19 +21,6 @@ static const struct clk_parent_data pll_mux[] = {
.name = "f2s-free-clk", },
};
-static const struct clk_parent_data cntr_mux[] = {
- { .fw_name = "main_pll",
- .name = "main_pll", },
- { .fw_name = "periph_pll",
- .name = "periph_pll", },
- { .fw_name = "osc1",
- .name = "osc1", },
- { .fw_name = "cb-intosc-hs-div2-clk",
- .name = "cb-intosc-hs-div2-clk", },
- { .fw_name = "f2s-free-clk",
- .name = "f2s-free-clk", },
-};
-
static const struct clk_parent_data boot_mux[] = {
{ .fw_name = "osc1",
.name = "osc1", },
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index cdf333003c30..ce5f5847d5d3 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -17,6 +17,16 @@ config SUN50I_A64_CCU
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+config SUN50I_A100_CCU
+ bool "Support for the Allwinner A100 CCU"
+ default ARM64 && ARCH_SUNXI
+ depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+
+config SUN50I_A100_R_CCU
+ bool "Support for the Allwinner A100 PRCM CCU"
+ default ARM64 && ARCH_SUNXI
+ depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+
config SUN50I_H6_CCU
bool "Support for the Allwinner H6 CCU"
default ARM64 && ARCH_SUNXI
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 4c7bee883f2f..3eb5cff40eac 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -23,6 +23,8 @@ obj-y += ccu_mp.o
# SoC support
obj-$(CONFIG_SUNIV_F1C100S_CCU) += ccu-suniv-f1c100s.o
obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
+obj-$(CONFIG_SUN50I_A100_CCU) += ccu-sun50i-a100.o
+obj-$(CONFIG_SUN50I_A100_R_CCU) += ccu-sun50i-a100-r.o
obj-$(CONFIG_SUN50I_H6_CCU) += ccu-sun50i-h6.o
obj-$(CONFIG_SUN50I_H6_R_CCU) += ccu-sun50i-h6-r.o
obj-$(CONFIG_SUN4I_A10_CCU) += ccu-sun4i-a10.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
new file mode 100644
index 000000000000..a56142b90993
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_nm.h"
+
+#include "ccu-sun50i-a100-r.h"
+
+static const char * const cpus_r_apb2_parents[] = { "dcxo24M", "osc32k",
+ "iosc", "pll-periph0" };
+static const struct ccu_mux_var_prediv cpus_r_apb2_predivs[] = {
+ { .index = 3, .shift = 0, .width = 5 },
+};
+
+static struct ccu_div r_cpus_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 24,
+ .width = 2,
+
+ .var_predivs = cpus_r_apb2_predivs,
+ .n_var_predivs = ARRAY_SIZE(cpus_r_apb2_predivs),
+ },
+
+ .common = {
+ .reg = 0x000,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("cpus",
+ cpus_r_apb2_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &r_cpus_clk.common.hw, 1, 1, 0);
+
+static struct ccu_div r_apb1_clk = {
+ .div = _SUNXI_CCU_DIV(0, 2),
+
+ .common = {
+ .reg = 0x00c,
+ .hw.init = CLK_HW_INIT("r-apb1",
+ "r-ahb",
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct ccu_div r_apb2_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 24,
+ .width = 2,
+
+ .var_predivs = cpus_r_apb2_predivs,
+ .n_var_predivs = ARRAY_SIZE(cpus_r_apb2_predivs),
+ },
+
+ .common = {
+ .reg = 0x010,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("r-apb2",
+ cpus_r_apb2_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static const struct clk_parent_data clk_parent_r_apb1[] = {
+ { .hw = &r_apb1_clk.common.hw },
+};
+
+static const struct clk_parent_data clk_parent_r_apb2[] = {
+ { .hw = &r_apb2_clk.common.hw },
+};
+
+static SUNXI_CCU_GATE_DATA(r_apb1_timer_clk, "r-apb1-timer", clk_parent_r_apb1,
+ 0x11c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(r_apb1_twd_clk, "r-apb1-twd", clk_parent_r_apb1,
+ 0x12c, BIT(0), 0);
+
+static const char * const r_apb1_pwm_clk_parents[] = { "dcxo24M", "osc32k",
+ "iosc" };
+static SUNXI_CCU_MUX(r_apb1_pwm_clk, "r-apb1-pwm", r_apb1_pwm_clk_parents,
+ 0x130, 24, 2, 0);
+
+static SUNXI_CCU_GATE_DATA(r_apb1_bus_pwm_clk, "r-apb1-bus-pwm",
+ clk_parent_r_apb1, 0x13c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(r_apb1_ppu_clk, "r-apb1-ppu", clk_parent_r_apb1,
+ 0x17c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(r_apb2_uart_clk, "r-apb2-uart", clk_parent_r_apb2,
+ 0x18c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(r_apb2_i2c0_clk, "r-apb2-i2c0", clk_parent_r_apb2,
+ 0x19c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(r_apb2_i2c1_clk, "r-apb2-i2c1", clk_parent_r_apb2,
+ 0x19c, BIT(1), 0);
+
+static const char * const r_apb1_ir_rx_parents[] = { "osc32k", "dcxo24M" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(r_apb1_ir_rx_clk, "r-apb1-ir-rx",
+ r_apb1_ir_rx_parents, 0x1c0,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_DATA(r_apb1_bus_ir_rx_clk, "r-apb1-bus-ir-rx",
+ clk_parent_r_apb1, 0x1cc, BIT(0), 0);
+
+static SUNXI_CCU_GATE(r_ahb_bus_rtc_clk, "r-ahb-rtc", "r-ahb",
+ 0x20c, BIT(0), 0);
+
+static struct ccu_common *sun50i_a100_r_ccu_clks[] = {
+ &r_cpus_clk.common,
+ &r_apb1_clk.common,
+ &r_apb2_clk.common,
+ &r_apb1_timer_clk.common,
+ &r_apb1_twd_clk.common,
+ &r_apb1_pwm_clk.common,
+ &r_apb1_bus_pwm_clk.common,
+ &r_apb1_ppu_clk.common,
+ &r_apb2_uart_clk.common,
+ &r_apb2_i2c0_clk.common,
+ &r_apb2_i2c1_clk.common,
+ &r_apb1_ir_rx_clk.common,
+ &r_apb1_bus_ir_rx_clk.common,
+ &r_ahb_bus_rtc_clk.common,
+};
+
+static struct clk_hw_onecell_data sun50i_a100_r_hw_clks = {
+ .hws = {
+ [CLK_R_CPUS] = &r_cpus_clk.common.hw,
+ [CLK_R_AHB] = &r_ahb_clk.hw,
+ [CLK_R_APB1] = &r_apb1_clk.common.hw,
+ [CLK_R_APB2] = &r_apb2_clk.common.hw,
+ [CLK_R_APB1_TIMER] = &r_apb1_timer_clk.common.hw,
+ [CLK_R_APB1_TWD] = &r_apb1_twd_clk.common.hw,
+ [CLK_R_APB1_PWM] = &r_apb1_pwm_clk.common.hw,
+ [CLK_R_APB1_BUS_PWM] = &r_apb1_bus_pwm_clk.common.hw,
+ [CLK_R_APB1_PPU] = &r_apb1_ppu_clk.common.hw,
+ [CLK_R_APB2_UART] = &r_apb2_uart_clk.common.hw,
+ [CLK_R_APB2_I2C0] = &r_apb2_i2c0_clk.common.hw,
+ [CLK_R_APB2_I2C1] = &r_apb2_i2c1_clk.common.hw,
+ [CLK_R_APB1_IR] = &r_apb1_ir_rx_clk.common.hw,
+ [CLK_R_APB1_BUS_IR] = &r_apb1_bus_ir_rx_clk.common.hw,
+ [CLK_R_AHB_BUS_RTC] = &r_ahb_bus_rtc_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun50i_a100_r_ccu_resets[] = {
+ [RST_R_APB1_TIMER] = { 0x11c, BIT(16) },
+ [RST_R_APB1_BUS_PWM] = { 0x13c, BIT(16) },
+ [RST_R_APB1_PPU] = { 0x17c, BIT(16) },
+ [RST_R_APB2_UART] = { 0x18c, BIT(16) },
+ [RST_R_APB2_I2C0] = { 0x19c, BIT(16) },
+ [RST_R_APB2_I2C1] = { 0x19c, BIT(17) },
+ [RST_R_APB1_BUS_IR] = { 0x1cc, BIT(16) },
+ [RST_R_AHB_BUS_RTC] = { 0x20c, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun50i_a100_r_ccu_desc = {
+ .ccu_clks = sun50i_a100_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_a100_r_ccu_clks),
+
+ .hw_clks = &sun50i_a100_r_hw_clks,
+
+ .resets = sun50i_a100_r_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun50i_a100_r_ccu_resets),
+};
+
+static int sun50i_a100_r_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a100_r_ccu_desc);
+}
+
+static const struct of_device_id sun50i_a100_r_ccu_ids[] = {
+ { .compatible = "allwinner,sun50i-a100-r-ccu" },
+ { }
+};
+
+static struct platform_driver sun50i_a100_r_ccu_driver = {
+ .probe = sun50i_a100_r_ccu_probe,
+ .driver = {
+ .name = "sun50i-a100-r-ccu",
+ .of_match_table = sun50i_a100_r_ccu_ids,
+ },
+};
+module_platform_driver(sun50i_a100_r_ccu_driver);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.h b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.h
new file mode 100644
index 000000000000..3a8f187a51b7
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#ifndef _CCU_SUN50I_A100_R_H
+#define _CCU_SUN50I_A100_R_H
+
+#include <dt-bindings/clock/sun50i-a100-r-ccu.h>
+#include <dt-bindings/reset/sun50i-a100-r-ccu.h>
+
+#define CLK_R_CPUS 0
+#define CLK_R_AHB 1
+
+/* exported except APB1 for R_PIO */
+
+#define CLK_R_APB2 3
+
+#define CLK_NUMBER (CLK_R_AHB_BUS_RTC + 1)
+
+#endif /* _CCU_SUN50I_A100_R_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
new file mode 100644
index 000000000000..81b48c73d389
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
@@ -0,0 +1,1276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+
+#include "ccu-sun50i-a100.h"
+
+#define SUN50I_A100_PLL_SDM_ENABLE BIT(24)
+#define SUN50I_A100_PLL_OUTPUT_ENABLE BIT(27)
+#define SUN50I_A100_PLL_LOCK BIT(28)
+#define SUN50I_A100_PLL_LOCK_ENABLE BIT(29)
+#define SUN50I_A100_PLL_ENABLE BIT(31)
+
+#define SUN50I_A100_PLL_PERIPH1_PATTERN0 0xd1303333
+
+/*
+ * The CPU PLL is actually NP clock, with P being /1, /2 or /4. However
+ * P should only be used for output frequencies lower than 288 MHz.
+ *
+ * For now we can just model it as a multiplier clock, and force P to /1.
+ *
+ * The M factor is present in the register's description, but not in the
+ * frequency formula, and it's documented as "M is only used for backdoor
+ * testing", so it's not modelled and then force to 0.
+ */
+#define SUN50I_A100_PLL_CPUX_REG 0x000
+static struct ccu_mult pll_cpux_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .mult = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-cpux", "dcxo24M",
+ &ccu_mult_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/* Some PLLs are input * N / div1 / P. Model them as NKMP with no K */
+#define SUN50I_A100_PLL_DDR0_REG 0x010
+static struct ccu_nkmp pll_ddr0_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x010,
+ .hw.init = CLK_HW_INIT("pll-ddr0", "dcxo24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE |
+ CLK_IS_CRITICAL),
+ },
+};
+
+#define SUN50I_A100_PLL_PERIPH0_REG 0x020
+static struct ccu_nkmp pll_periph0_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x020,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph0", "dcxo24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_A100_PLL_PERIPH1_REG 0x028
+static struct ccu_nkmp pll_periph1_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .fixed_post_div = 2,
+ .common = {
+ .reg = 0x028,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-periph1", "dcxo24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+#define SUN50I_A100_PLL_PERIPH1_PATTERN0_REG 0x128
+
+#define SUN50I_A100_PLL_GPU_REG 0x030
+static struct ccu_nkmp pll_gpu_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x030,
+ .hw.init = CLK_HW_INIT("pll-gpu", "dcxo24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * For Video PLLs, the output divider is described as "used for testing"
+ * in the user manual. So it's not modelled and forced to 0.
+ */
+#define SUN50I_A100_PLL_VIDEO0_REG 0x040
+static struct ccu_nm pll_video0_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x040,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video0", "dcxo24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_A100_PLL_VIDEO1_REG 0x048
+static struct ccu_nm pll_video1_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x048,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video1", "dcxo24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_A100_PLL_VIDEO2_REG 0x050
+static struct ccu_nm pll_video2_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x050,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video2", "dcxo24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_A100_PLL_VE_REG 0x058
+static struct ccu_nkmp pll_ve_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x058,
+ .hw.init = CLK_HW_INIT("pll-ve", "dcxo24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * The COM PLL has m0 dividers in addition to the usual N, M
+ * factors. Since we only need 1 frequencies from this PLL: 45.1584 MHz,
+ * ignore it for now.
+ */
+#define SUN50I_A100_PLL_COM_REG 0x060
+static struct ccu_sdm_setting pll_com_sdm_table[] = {
+ { .rate = 451584000, .pattern = 0xc0014396, .m = 2, .n = 37 },
+};
+
+static struct ccu_nm pll_com_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(0, 1),
+ .sdm = _SUNXI_CCU_SDM(pll_com_sdm_table, BIT(24),
+ 0x160, BIT(31)),
+ .common = {
+ .reg = 0x060,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
+ .hw.init = CLK_HW_INIT("pll-com", "dcxo24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN50I_A100_PLL_VIDEO3_REG 0x068
+static struct ccu_nm pll_video3_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .fixed_post_div = 4,
+ .common = {
+ .reg = 0x068,
+ .features = CCU_FEATURE_FIXED_POSTDIV,
+ .hw.init = CLK_HW_INIT("pll-video3", "dcxo24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * The Audio PLL has m0, m1 dividers in addition to the usual N, M
+ * factors. Since we only need 4 frequencies from this PLL: 22.5792 MHz,
+ * 24.576 MHz, 90.3168MHz and 98.304MHz ignore them for now.
+ * Enforce the default for them, which is m0 = 1, m1 = 0.
+ */
+#define SUN50I_A100_PLL_AUDIO_REG 0x078
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 45158400, .pattern = 0xc001bcd3, .m = 18, .n = 33 },
+ { .rate = 49152000, .pattern = 0xc001eb85, .m = 20, .n = 40 },
+ { .rate = 180633600, .pattern = 0xc001288d, .m = 3, .n = 22 },
+ { .rate = 196608000, .pattern = 0xc001eb85, .m = 5, .n = 40 },
+};
+
+static struct ccu_nm pll_audio_clk = {
+ .enable = SUN50I_A100_PLL_OUTPUT_ENABLE,
+ .lock = SUN50I_A100_PLL_LOCK,
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(16, 6),
+ .fixed_post_div = 2,
+ .sdm = _SUNXI_CCU_SDM(pll_audio_sdm_table, BIT(24),
+ 0x178, BIT(31)),
+ .common = {
+ .reg = 0x078,
+ .features = CCU_FEATURE_FIXED_POSTDIV |
+ CCU_FEATURE_SIGMA_DELTA_MOD,
+ .hw.init = CLK_HW_INIT("pll-audio", "dcxo24M",
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const char * const cpux_parents[] = { "dcxo24M", "osc32k",
+ "iosc", "pll-cpux",
+ "pll-periph0" };
+static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
+ 0x500, 24, 3, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x500, 0, 2, 0);
+static SUNXI_CCU_M(cpux_apb_clk, "cpux-apb", "cpux", 0x500, 8, 2, 0);
+
+static const char * const psi_ahb1_ahb2_parents[] = { "dcxo24M", "osc32k",
+ "iosc", "pll-periph0",
+ "pll-periph0-2x" };
+static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
+ psi_ahb1_ahb2_parents, 0x510,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ 0);
+
+static const char * const ahb3_apb1_apb2_parents[] = { "dcxo24M", "osc32k",
+ "psi-ahb1-ahb2",
+ "pll-periph0",
+ "pll-periph0-2x" };
+static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ 0);
+
+static const char * const mbus_parents[] = { "dcxo24M", "pll-ddr0",
+ "pll-periph0",
+ "pll-periph0-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents, 0x540,
+ 0, 3, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL);
+
+static const char * const de_parents[] = { "pll-com", "pll-periph0-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de0", de_parents, 0x600,
+ 0, 4, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "psi-ahb1-ahb2",
+ 0x60c, BIT(0), 0);
+
+static const char * const g2d_parents[] = { "pll-com", "pll-periph0-2x",
+ "pll-video0-2x", "pll-video1-2x",
+ "pll-video2-2x"};
+static SUNXI_CCU_M_WITH_MUX_GATE(g2d_clk, "g2d",
+ g2d_parents,
+ 0x630,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_g2d_clk, "bus-g2d", "psi-ahb1-ahb2",
+ 0x63c, BIT(0), 0);
+
+static const char * const gpu_parents[] = { "pll-gpu" };
+static SUNXI_CCU_M_WITH_MUX_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
+ 0, 2, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "psi-ahb1-ahb2",
+ 0x67c, BIT(0), 0);
+
+static const char * const ce_parents[] = { "dcxo24M", "pll-periph0-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x680,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "psi-ahb1-ahb2",
+ 0x68c, BIT(0), 0);
+
+static const char * const ve_parents[] = { "pll-ve" };
+static SUNXI_CCU_M_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
+ 0, 3, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "psi-ahb1-ahb2",
+ 0x69c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "psi-ahb1-ahb2",
+ 0x70c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "psi-ahb1-ahb2",
+ 0x71c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "psi-ahb1-ahb2",
+ 0x72c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "psi-ahb1-ahb2",
+ 0x73c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(avs_clk, "avs", "dcxo24M", 0x740, BIT(31), 0);
+
+static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "psi-ahb1-ahb2",
+ 0x78c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_psi_clk, "bus-psi", "psi-ahb1-ahb2",
+ 0x79c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_pwm_clk, "bus-pwm", "apb1", 0x7ac, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_iommu_clk, "bus-iommu", "apb1", 0x7bc, BIT(0), 0);
+
+static SUNXI_CCU_GATE(mbus_dma_clk, "mbus-dma", "mbus",
+ 0x804, BIT(0), 0);
+static SUNXI_CCU_GATE(mbus_ve_clk, "mbus-ve", "mbus",
+ 0x804, BIT(1), 0);
+static SUNXI_CCU_GATE(mbus_ce_clk, "mbus-ce", "mbus",
+ 0x804, BIT(2), 0);
+static SUNXI_CCU_GATE(mbus_nand_clk, "mbus-nand", "mbus",
+ 0x804, BIT(5), 0);
+static SUNXI_CCU_GATE(mbus_csi_clk, "mbus-csi", "mbus",
+ 0x804, BIT(8), 0);
+static SUNXI_CCU_GATE(mbus_isp_clk, "mbus-isp", "mbus",
+ 0x804, BIT(9), 0);
+static SUNXI_CCU_GATE(mbus_g2d_clk, "mbus-g2d", "mbus",
+ 0x804, BIT(10), 0);
+
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "psi-ahb1-ahb2",
+ 0x80c, BIT(0), CLK_IS_CRITICAL);
+
+static const char * const nand_spi_parents[] = { "dcxo24M",
+ "pll-periph0",
+ "pll-periph1",
+ "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand0_clk, "nand0", nand_spi_parents, 0x810,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand1_clk, "nand1", nand_spi_parents, 0x814,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb3", 0x82c, BIT(0), 0);
+
+static const char * const mmc_parents[] = { "dcxo24M", "pll-periph0-2x",
+ "pll-periph1-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ CLK_SET_RATE_NO_REPARENT);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ CLK_SET_RATE_NO_REPARENT);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ CLK_SET_RATE_NO_REPARENT);
+
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb3", 0x84c, BIT(2), 0);
+
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2", 0x90c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2", 0x90c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2", 0x90c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2", 0x90c, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2", 0x90c, BIT(4), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2", 0x91c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2", 0x91c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2", 0x91c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_i2c3_clk, "bus-i2c3", "apb2", 0x91c, BIT(3), 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", nand_spi_parents, 0x940,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", nand_spi_parents, 0x944,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi2_clk, "spi2", nand_spi_parents, 0x948,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb3", 0x96c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb3", 0x96c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_spi2_clk, "bus-spi2", "ahb3", 0x96c, BIT(2), 0);
+
+static SUNXI_CCU_GATE(emac_25m_clk, "emac-25m", "ahb3", 0x970,
+ BIT(31) | BIT(30), 0);
+
+static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb3", 0x97c, BIT(0), 0);
+
+static const char * const ir_parents[] = { "osc32k", "iosc",
+ "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ir_rx_clk, "ir-rx", ir_parents, 0x990,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ir_rx_clk, "bus-ir-rx", "ahb3", 0x99c, BIT(0), 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ir_tx_clk, "ir-tx", ir_parents, 0x9c0,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ir_tx_clk, "bus-ir-tx", "apb1", 0x9cc, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_gpadc_clk, "bus-gpadc", "apb1", 0x9ec, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1", 0x9fc, BIT(0), 0);
+
+static const char * const audio_parents[] = { "pll-audio", "pll-com-audio" };
+static struct ccu_div i2s0_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa10,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s0",
+ audio_parents,
+ &ccu_div_ops,
+ CLK_SET_RATE_PARENT),
+ },
+};
+
+static struct ccu_div i2s1_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa14,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s1",
+ audio_parents,
+ &ccu_div_ops,
+ CLK_SET_RATE_PARENT),
+ },
+};
+
+static struct ccu_div i2s2_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa18,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s2",
+ audio_parents,
+ &ccu_div_ops,
+ CLK_SET_RATE_PARENT),
+ },
+};
+
+static struct ccu_div i2s3_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa1c,
+ .hw.init = CLK_HW_INIT_PARENTS("i2s3",
+ audio_parents,
+ &ccu_div_ops,
+ CLK_SET_RATE_PARENT),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1", 0xa20, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1", 0xa20, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1", 0xa20, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_i2s3_clk, "bus-i2s3", "apb1", 0xa20, BIT(3), 0);
+
+static struct ccu_div spdif_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa24,
+ .hw.init = CLK_HW_INIT_PARENTS("spdif",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1", 0xa2c, BIT(0), 0);
+
+static struct ccu_div dmic_clk = {
+ .enable = BIT(31),
+ .div = _SUNXI_CCU_DIV_FLAGS(8, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0xa40,
+ .hw.init = CLK_HW_INIT_PARENTS("dmic",
+ audio_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_dmic_clk, "bus-dmic", "apb1", 0xa4c, BIT(0), 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(audio_codec_dac_clk, "audio-codec-dac",
+ audio_parents, 0xa50,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(audio_codec_adc_clk, "audio-codec-adc",
+ audio_parents, 0xa54,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_WITH_MUX_GATE(audio_codec_4x_clk, "audio-codec-4x",
+ audio_parents, 0xa58,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_audio_codec_clk, "bus-audio-codec", "apb1", 0xa5c,
+ BIT(0), 0);
+
+/*
+ * There are OHCI 12M clock source selection bits for 2 USB 2.0 ports.
+ * We will force them to 0 (12M divided from 48M).
+ */
+#define SUN50I_A100_USB0_CLK_REG 0xa70
+#define SUN50I_A100_USB1_CLK_REG 0xa74
+
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc12M", 0xa70, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "dcxo24M", 0xa70, BIT(29), 0);
+
+static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "osc12M", 0xa74, BIT(31), 0);
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "dcxo24M", 0xa74, BIT(29), 0);
+
+static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb3", 0xa8c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_ohci1_clk, "bus-ohci1", "ahb3", 0xa8c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb3", 0xa8c, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb3", 0xa8c, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb3", 0xa8c, BIT(8), 0);
+
+static SUNXI_CCU_GATE(bus_lradc_clk, "bus-lradc", "ahb3", 0xa9c, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_dpss_top0_clk, "bus-dpss-top0", "ahb3",
+ 0xabc, BIT(0), 0);
+
+static SUNXI_CCU_GATE(bus_dpss_top1_clk, "bus-dpss-top1", "ahb3",
+ 0xacc, BIT(0), 0);
+
+static const char * const mipi_dsi_parents[] = { "dcxo24M", "pll-periph0-2x",
+ "pll-periph0" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mipi_dsi_clk, "mipi-dsi",
+ mipi_dsi_parents,
+ 0xb24,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_mipi_dsi_clk, "bus-mipi-dsi", "ahb3",
+ 0xb4c, BIT(0), 0);
+
+static const char * const tcon_lcd_parents[] = { "pll-video0-4x",
+ "pll-video1-4x",
+ "pll-video2-4x",
+ "pll-video3-4x",
+ "pll-periph0-2x" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_lcd_clk, "tcon-lcd0",
+ tcon_lcd_parents, 0xb60,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_tcon_lcd_clk, "bus-tcon-lcd0", "ahb3",
+ 0xb7c, BIT(0), 0);
+
+static const char * const ledc_parents[] = { "dcxo24M",
+ "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ledc_clk, "ledc",
+ ledc_parents, 0xbf0,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_ledc_clk, "bus-ledc", "ahb3", 0xbfc, BIT(0), 0);
+
+static const char * const csi_top_parents[] = { "pll-periph0-2x",
+ "pll-video0-2x",
+ "pll-video1-2x",
+ "pll-video2-2x",
+ "pll-video3-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_top_clk, "csi-top",
+ csi_top_parents, 0xc04,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const csi0_mclk_parents[] = { "dcxo24M", "pll-video2",
+ "pll-video3", "pll-video0",
+ "pll-video1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi0_mclk_clk, "csi0-mclk",
+ csi0_mclk_parents, 0xc08,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const csi1_mclk_parents[] = { "dcxo24M", "pll-video3",
+ "pll-video0", "pll-video1",
+ "pll-video2" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi1-mclk",
+ csi1_mclk_parents, 0xc0c,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb3", 0xc1c, BIT(0), 0);
+
+static const char * const csi_isp_parents[] = { "pll-periph0-2x",
+ "pll-video0-2x",
+ "pll-video1-2x",
+ "pll-video2-2x",
+ "pll-video3-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_isp_clk, "csi-isp",
+ csi_isp_parents, 0xc20,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+/* Fixed factor clocks */
+static CLK_FIXED_FACTOR_FW_NAME(osc12M_clk, "osc12M", "hosc", 2, 1, 0);
+
+static CLK_FIXED_FACTOR_HW(pll_com_audio_clk, "pll-com-audio",
+ &pll_com_clk.common.hw,
+ 5, 1, CLK_SET_RATE_PARENT);
+
+static CLK_FIXED_FACTOR_HW(pll_periph0_2x_clk, "pll-periph0-2x",
+ &pll_periph0_clk.common.hw,
+ 1, 2, 0);
+
+static CLK_FIXED_FACTOR_HW(pll_periph1_2x_clk, "pll-periph1-2x",
+ &pll_periph1_clk.common.hw,
+ 1, 2, 0);
+
+static const struct clk_hw *pll_video0_parents[] = {
+ &pll_video0_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_video0_4x_clk, "pll-video0-4x",
+ pll_video0_parents,
+ 1, 4, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_video0_2x_clk, "pll-video0-2x",
+ pll_video0_parents,
+ 1, 2, CLK_SET_RATE_PARENT);
+
+static const struct clk_hw *pll_video1_parents[] = {
+ &pll_video1_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_video1_4x_clk, "pll-video1-4x",
+ pll_video1_parents,
+ 1, 4, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_video1_2x_clk, "pll-video1-2x",
+ pll_video1_parents,
+ 1, 2, CLK_SET_RATE_PARENT);
+
+static const struct clk_hw *pll_video2_parents[] = {
+ &pll_video2_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_video2_4x_clk, "pll-video2-4x",
+ pll_video2_parents,
+ 1, 4, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_video2_2x_clk, "pll-video2-2x",
+ pll_video2_parents,
+ 1, 2, CLK_SET_RATE_PARENT);
+
+static const struct clk_hw *pll_video3_parents[] = {
+ &pll_video3_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_video3_4x_clk, "pll-video3-4x",
+ pll_video3_parents,
+ 1, 4, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_video3_2x_clk, "pll-video3-2x",
+ pll_video3_parents,
+ 1, 2, CLK_SET_RATE_PARENT);
+
+static struct ccu_common *sun50i_a100_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_periph0_clk.common,
+ &pll_periph1_clk.common,
+ &pll_gpu_clk.common,
+ &pll_video0_clk.common,
+ &pll_video1_clk.common,
+ &pll_video2_clk.common,
+ &pll_video3_clk.common,
+ &pll_ve_clk.common,
+ &pll_com_clk.common,
+ &pll_audio_clk.common,
+ &cpux_clk.common,
+ &axi_clk.common,
+ &cpux_apb_clk.common,
+ &psi_ahb1_ahb2_clk.common,
+ &ahb3_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &mbus_clk.common,
+ &de_clk.common,
+ &bus_de_clk.common,
+ &g2d_clk.common,
+ &bus_g2d_clk.common,
+ &gpu_clk.common,
+ &bus_gpu_clk.common,
+ &ce_clk.common,
+ &bus_ce_clk.common,
+ &ve_clk.common,
+ &bus_ve_clk.common,
+ &bus_dma_clk.common,
+ &bus_msgbox_clk.common,
+ &bus_spinlock_clk.common,
+ &bus_hstimer_clk.common,
+ &avs_clk.common,
+ &bus_dbg_clk.common,
+ &bus_psi_clk.common,
+ &bus_pwm_clk.common,
+ &bus_iommu_clk.common,
+ &mbus_dma_clk.common,
+ &mbus_ve_clk.common,
+ &mbus_ce_clk.common,
+ &mbus_nand_clk.common,
+ &mbus_csi_clk.common,
+ &mbus_isp_clk.common,
+ &mbus_g2d_clk.common,
+ &bus_dram_clk.common,
+ &nand0_clk.common,
+ &nand1_clk.common,
+ &bus_nand_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_i2c3_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &spi2_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_spi2_clk.common,
+ &emac_25m_clk.common,
+ &bus_emac_clk.common,
+ &ir_rx_clk.common,
+ &bus_ir_rx_clk.common,
+ &ir_tx_clk.common,
+ &bus_ir_tx_clk.common,
+ &bus_gpadc_clk.common,
+ &bus_ths_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &i2s3_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_i2s3_clk.common,
+ &spdif_clk.common,
+ &bus_spdif_clk.common,
+ &dmic_clk.common,
+ &bus_dmic_clk.common,
+ &audio_codec_dac_clk.common,
+ &audio_codec_adc_clk.common,
+ &audio_codec_4x_clk.common,
+ &bus_audio_codec_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_phy0_clk.common,
+ &usb_ohci1_clk.common,
+ &usb_phy1_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci1_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_otg_clk.common,
+ &bus_lradc_clk.common,
+ &bus_dpss_top0_clk.common,
+ &bus_dpss_top1_clk.common,
+ &mipi_dsi_clk.common,
+ &bus_mipi_dsi_clk.common,
+ &tcon_lcd_clk.common,
+ &bus_tcon_lcd_clk.common,
+ &ledc_clk.common,
+ &bus_ledc_clk.common,
+ &csi_top_clk.common,
+ &csi0_mclk_clk.common,
+ &csi1_mclk_clk.common,
+ &bus_csi_clk.common,
+ &csi_isp_clk.common,
+};
+
+static struct clk_hw_onecell_data sun50i_a100_hw_clks = {
+ .hws = {
+ [CLK_OSC12M] = &osc12M_clk.hw,
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
+ [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
+ [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw,
+ [CLK_PLL_VIDEO0_4X] = &pll_video0_4x_clk.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw,
+ [CLK_PLL_VIDEO1_4X] = &pll_video1_4x_clk.hw,
+ [CLK_PLL_VIDEO2] = &pll_video2_clk.common.hw,
+ [CLK_PLL_VIDEO2_2X] = &pll_video2_2x_clk.hw,
+ [CLK_PLL_VIDEO2_4X] = &pll_video2_4x_clk.hw,
+ [CLK_PLL_VIDEO3] = &pll_video3_clk.common.hw,
+ [CLK_PLL_VIDEO3_2X] = &pll_video3_2x_clk.hw,
+ [CLK_PLL_VIDEO3_4X] = &pll_video3_4x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_COM] = &pll_com_clk.common.hw,
+ [CLK_PLL_COM_AUDIO] = &pll_com_audio_clk.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.common.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_CPUX_APB] = &cpux_apb_clk.common.hw,
+ [CLK_PSI_AHB1_AHB2] = &psi_ahb1_ahb2_clk.common.hw,
+ [CLK_AHB3] = &ahb3_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_G2D] = &g2d_clk.common.hw,
+ [CLK_BUS_G2D] = &bus_g2d_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_BUS_PSI] = &bus_psi_clk.common.hw,
+ [CLK_BUS_PWM] = &bus_pwm_clk.common.hw,
+ [CLK_BUS_IOMMU] = &bus_iommu_clk.common.hw,
+ [CLK_MBUS_DMA] = &mbus_dma_clk.common.hw,
+ [CLK_MBUS_VE] = &mbus_ve_clk.common.hw,
+ [CLK_MBUS_CE] = &mbus_ce_clk.common.hw,
+ [CLK_MBUS_NAND] = &mbus_nand_clk.common.hw,
+ [CLK_MBUS_CSI] = &mbus_csi_clk.common.hw,
+ [CLK_MBUS_ISP] = &mbus_isp_clk.common.hw,
+ [CLK_MBUS_G2D] = &mbus_g2d_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_NAND0] = &nand0_clk.common.hw,
+ [CLK_NAND1] = &nand1_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_SPI2] = &spi2_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_SPI2] = &bus_spi2_clk.common.hw,
+ [CLK_EMAC_25M] = &emac_25m_clk.common.hw,
+ [CLK_BUS_EMAC] = &bus_emac_clk.common.hw,
+ [CLK_IR_RX] = &ir_rx_clk.common.hw,
+ [CLK_BUS_IR_RX] = &bus_ir_rx_clk.common.hw,
+ [CLK_IR_TX] = &ir_tx_clk.common.hw,
+ [CLK_BUS_IR_TX] = &bus_ir_tx_clk.common.hw,
+ [CLK_BUS_GPADC] = &bus_gpadc_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_I2S2] = &i2s2_clk.common.hw,
+ [CLK_I2S3] = &i2s3_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_I2S3] = &bus_i2s3_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_DMIC] = &dmic_clk.common.hw,
+ [CLK_BUS_DMIC] = &bus_dmic_clk.common.hw,
+ [CLK_AUDIO_DAC] = &audio_codec_dac_clk.common.hw,
+ [CLK_AUDIO_ADC] = &audio_codec_adc_clk.common.hw,
+ [CLK_AUDIO_4X] = &audio_codec_4x_clk.common.hw,
+ [CLK_BUS_AUDIO_CODEC] = &bus_audio_codec_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_LRADC] = &bus_lradc_clk.common.hw,
+ [CLK_BUS_DPSS_TOP0] = &bus_dpss_top0_clk.common.hw,
+ [CLK_BUS_DPSS_TOP1] = &bus_dpss_top1_clk.common.hw,
+ [CLK_MIPI_DSI] = &mipi_dsi_clk.common.hw,
+ [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw,
+ [CLK_TCON_LCD] = &tcon_lcd_clk.common.hw,
+ [CLK_BUS_TCON_LCD] = &bus_tcon_lcd_clk.common.hw,
+ [CLK_LEDC] = &ledc_clk.common.hw,
+ [CLK_BUS_LEDC] = &bus_ledc_clk.common.hw,
+ [CLK_CSI_TOP] = &csi_top_clk.common.hw,
+ [CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw,
+ [CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_CSI_ISP] = &csi_isp_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun50i_a100_ccu_resets[] = {
+ [RST_MBUS] = { 0x540, BIT(30) },
+
+ [RST_BUS_DE] = { 0x60c, BIT(16) },
+ [RST_BUS_G2D] = { 0x63c, BIT(16) },
+ [RST_BUS_GPU] = { 0x67c, BIT(16) },
+ [RST_BUS_CE] = { 0x68c, BIT(16) },
+ [RST_BUS_VE] = { 0x69c, BIT(16) },
+ [RST_BUS_DMA] = { 0x70c, BIT(16) },
+ [RST_BUS_MSGBOX] = { 0x71c, BIT(16) },
+ [RST_BUS_SPINLOCK] = { 0x72c, BIT(16) },
+ [RST_BUS_HSTIMER] = { 0x73c, BIT(16) },
+ [RST_BUS_DBG] = { 0x78c, BIT(16) },
+ [RST_BUS_PSI] = { 0x79c, BIT(16) },
+ [RST_BUS_PWM] = { 0x7ac, BIT(16) },
+ [RST_BUS_DRAM] = { 0x80c, BIT(16) },
+ [RST_BUS_NAND] = { 0x82c, BIT(16) },
+ [RST_BUS_MMC0] = { 0x84c, BIT(16) },
+ [RST_BUS_MMC1] = { 0x84c, BIT(17) },
+ [RST_BUS_MMC2] = { 0x84c, BIT(18) },
+ [RST_BUS_UART0] = { 0x90c, BIT(16) },
+ [RST_BUS_UART1] = { 0x90c, BIT(17) },
+ [RST_BUS_UART2] = { 0x90c, BIT(18) },
+ [RST_BUS_UART3] = { 0x90c, BIT(19) },
+ [RST_BUS_UART4] = { 0x90c, BIT(20) },
+ [RST_BUS_I2C0] = { 0x91c, BIT(16) },
+ [RST_BUS_I2C1] = { 0x91c, BIT(17) },
+ [RST_BUS_I2C2] = { 0x91c, BIT(18) },
+ [RST_BUS_I2C3] = { 0x91c, BIT(19) },
+ [RST_BUS_SPI0] = { 0x96c, BIT(16) },
+ [RST_BUS_SPI1] = { 0x96c, BIT(17) },
+ [RST_BUS_SPI2] = { 0x96c, BIT(18) },
+ [RST_BUS_EMAC] = { 0x97c, BIT(16) },
+ [RST_BUS_IR_RX] = { 0x99c, BIT(16) },
+ [RST_BUS_IR_TX] = { 0x9cc, BIT(16) },
+ [RST_BUS_GPADC] = { 0x9ec, BIT(16) },
+ [RST_BUS_THS] = { 0x9fc, BIT(16) },
+ [RST_BUS_I2S0] = { 0xa20, BIT(16) },
+ [RST_BUS_I2S1] = { 0xa20, BIT(17) },
+ [RST_BUS_I2S2] = { 0xa20, BIT(18) },
+ [RST_BUS_I2S3] = { 0xa20, BIT(19) },
+ [RST_BUS_SPDIF] = { 0xa2c, BIT(16) },
+ [RST_BUS_DMIC] = { 0xa4c, BIT(16) },
+ [RST_BUS_AUDIO_CODEC] = { 0xa5c, BIT(16) },
+
+ [RST_USB_PHY0] = { 0xa70, BIT(30) },
+ [RST_USB_PHY1] = { 0xa74, BIT(30) },
+
+ [RST_BUS_OHCI0] = { 0xa8c, BIT(16) },
+ [RST_BUS_OHCI1] = { 0xa8c, BIT(17) },
+ [RST_BUS_EHCI0] = { 0xa8c, BIT(20) },
+ [RST_BUS_EHCI1] = { 0xa8c, BIT(21) },
+ [RST_BUS_OTG] = { 0xa8c, BIT(24) },
+
+ [RST_BUS_LRADC] = { 0xa9c, BIT(16) },
+ [RST_BUS_DPSS_TOP0] = { 0xabc, BIT(16) },
+ [RST_BUS_DPSS_TOP1] = { 0xacc, BIT(16) },
+ [RST_BUS_MIPI_DSI] = { 0xb4c, BIT(16) },
+ [RST_BUS_TCON_LCD] = { 0xb7c, BIT(16) },
+ [RST_BUS_LVDS] = { 0xbac, BIT(16) },
+ [RST_BUS_LEDC] = { 0xbfc, BIT(16) },
+ [RST_BUS_CSI] = { 0xc1c, BIT(16) },
+ [RST_BUS_CSI_ISP] = { 0xc2c, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun50i_a100_ccu_desc = {
+ .ccu_clks = sun50i_a100_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_a100_ccu_clks),
+
+ .hw_clks = &sun50i_a100_hw_clks,
+
+ .resets = sun50i_a100_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun50i_a100_ccu_resets),
+};
+
+static const u32 sun50i_a100_pll_regs[] = {
+ SUN50I_A100_PLL_CPUX_REG,
+ SUN50I_A100_PLL_DDR0_REG,
+ SUN50I_A100_PLL_PERIPH0_REG,
+ SUN50I_A100_PLL_PERIPH1_REG,
+ SUN50I_A100_PLL_GPU_REG,
+ SUN50I_A100_PLL_VIDEO0_REG,
+ SUN50I_A100_PLL_VIDEO1_REG,
+ SUN50I_A100_PLL_VIDEO2_REG,
+ SUN50I_A100_PLL_VIDEO3_REG,
+ SUN50I_A100_PLL_VE_REG,
+ SUN50I_A100_PLL_COM_REG,
+ SUN50I_A100_PLL_AUDIO_REG,
+};
+
+static const u32 sun50i_a100_pll_video_regs[] = {
+ SUN50I_A100_PLL_VIDEO0_REG,
+ SUN50I_A100_PLL_VIDEO1_REG,
+ SUN50I_A100_PLL_VIDEO2_REG,
+ SUN50I_A100_PLL_VIDEO3_REG,
+};
+
+static const u32 sun50i_a100_usb2_clk_regs[] = {
+ SUN50I_A100_USB0_CLK_REG,
+ SUN50I_A100_USB1_CLK_REG,
+};
+
+static struct ccu_pll_nb sun50i_a100_pll_cpu_nb = {
+ .common = &pll_cpux_clk.common,
+ /* copy from pll_cpux_clk */
+ .enable = BIT(27),
+ .lock = BIT(28),
+};
+
+static struct ccu_mux_nb sun50i_a100_cpu_nb = {
+ .common = &cpux_clk.common,
+ .cm = &cpux_clk.mux,
+ .delay_us = 1,
+ .bypass_index = 4, /* index of pll periph0 */
+};
+
+static int sun50i_a100_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ u32 val;
+ int i, ret;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ /*
+ * Enable lock and enable bits on all PLLs.
+ *
+ * Due to the current design, multiple PLLs share one power switch,
+ * so switching PLL is easy to cause stability problems.
+ * When initializing, we enable them by default. When disable,
+ * we only turn off the output of PLL.
+ */
+ for (i = 0; i < ARRAY_SIZE(sun50i_a100_pll_regs); i++) {
+ val = readl(reg + sun50i_a100_pll_regs[i]);
+ val |= SUN50I_A100_PLL_LOCK_ENABLE | SUN50I_A100_PLL_ENABLE;
+ writel(val, reg + sun50i_a100_pll_regs[i]);
+ }
+
+ /*
+ * In order to pass the EMI certification, the SDM function of
+ * the peripheral 1 bus is enabled, and the frequency is still
+ * calculated using the previous division factor.
+ */
+ writel(SUN50I_A100_PLL_PERIPH1_PATTERN0,
+ reg + SUN50I_A100_PLL_PERIPH1_PATTERN0_REG);
+
+ val = readl(reg + SUN50I_A100_PLL_PERIPH1_REG);
+ val |= SUN50I_A100_PLL_SDM_ENABLE;
+ writel(val, reg + SUN50I_A100_PLL_PERIPH1_REG);
+
+ /*
+ * Force the output divider of video PLLs to 0.
+ *
+ * See the comment before pll-video0 definition for the reason.
+ */
+ for (i = 0; i < ARRAY_SIZE(sun50i_a100_pll_video_regs); i++) {
+ val = readl(reg + sun50i_a100_pll_video_regs[i]);
+ val &= ~BIT(0);
+ writel(val, reg + sun50i_a100_pll_video_regs[i]);
+ }
+
+ /*
+ * Enforce m1 = 0, m0 = 1 for Audio PLL
+ *
+ * See the comment before pll-audio definition for the reason.
+ */
+ val = readl(reg + SUN50I_A100_PLL_AUDIO_REG);
+ val &= ~BIT(1);
+ val |= BIT(0);
+ writel(val, reg + SUN50I_A100_PLL_AUDIO_REG);
+
+ /*
+ * Force OHCI 12M clock sources to 00 (12MHz divided from 48MHz)
+ *
+ * This clock mux is still mysterious, and the code just enforces
+ * it to have a valid clock parent.
+ */
+ for (i = 0; i < ARRAY_SIZE(sun50i_a100_usb2_clk_regs); i++) {
+ val = readl(reg + sun50i_a100_usb2_clk_regs[i]);
+ val &= ~GENMASK(25, 24);
+ writel(val, reg + sun50i_a100_usb2_clk_regs[i]);
+ }
+
+ ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a100_ccu_desc);
+ if (ret)
+ return ret;
+
+ /* Gate then ungate PLL CPU after any rate changes */
+ ccu_pll_notifier_register(&sun50i_a100_pll_cpu_nb);
+
+ /* Reparent CPU during PLL CPU rate changes */
+ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+ &sun50i_a100_cpu_nb);
+
+ return 0;
+}
+
+static const struct of_device_id sun50i_a100_ccu_ids[] = {
+ { .compatible = "allwinner,sun50i-a100-ccu" },
+ { }
+};
+
+static struct platform_driver sun50i_a100_ccu_driver = {
+ .probe = sun50i_a100_ccu_probe,
+ .driver = {
+ .name = "sun50i-a100-ccu",
+ .of_match_table = sun50i_a100_ccu_ids,
+ },
+};
+module_platform_driver(sun50i_a100_ccu_driver);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.h b/drivers/clk/sunxi-ng/ccu-sun50i-a100.h
new file mode 100644
index 000000000000..21ce92bb1d5f
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ */
+
+#ifndef _CCU_SUN50I_A100_H_
+#define _CCU_SUN50I_A100_H_
+
+#include <dt-bindings/clock/sun50i-a100-ccu.h>
+#include <dt-bindings/reset/sun50i-a100-ccu.h>
+
+#define CLK_OSC12M 0
+#define CLK_PLL_CPUX 1
+#define CLK_PLL_DDR0 2
+
+/* PLL_PERIPH0 exported for PRCM */
+
+#define CLK_PLL_PERIPH0_2X 4
+#define CLK_PLL_PERIPH1 5
+#define CLK_PLL_PERIPH1_2X 6
+#define CLK_PLL_GPU 7
+#define CLK_PLL_VIDEO0 8
+#define CLK_PLL_VIDEO0_2X 9
+#define CLK_PLL_VIDEO0_4X 10
+#define CLK_PLL_VIDEO1 11
+#define CLK_PLL_VIDEO1_2X 12
+#define CLK_PLL_VIDEO1_4X 13
+#define CLK_PLL_VIDEO2 14
+#define CLK_PLL_VIDEO2_2X 15
+#define CLK_PLL_VIDEO2_4X 16
+#define CLK_PLL_VIDEO3 17
+#define CLK_PLL_VIDEO3_2X 18
+#define CLK_PLL_VIDEO3_4X 19
+#define CLK_PLL_VE 20
+#define CLK_PLL_COM 21
+#define CLK_PLL_COM_AUDIO 22
+#define CLK_PLL_AUDIO 23
+
+/* CPUX clock exported for DVFS */
+
+#define CLK_AXI 25
+#define CLK_CPUX_APB 26
+#define CLK_PSI_AHB1_AHB2 27
+#define CLK_AHB3 28
+
+/* APB1 clock exported for PIO */
+
+#define CLK_APB2 30
+
+/* All module clocks and bus gates are exported except DRAM */
+
+#define CLK_BUS_DRAM 58
+
+#define CLK_NUMBER (CLK_CSI_ISP + 1)
+
+#endif /* _CCU_SUN50I_A100_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 23bfe1d12f21..84153418453f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -45,18 +45,29 @@ static struct ccu_nkmp pll_cpu_clk = {
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN8I_R40_PLL_AUDIO_REG 0x008
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
- "osc24M", 0x008,
- 8, 7, /* N */
- 0, 5, /* M */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ pll_audio_sdm_table, BIT(24),
+ 0x284, BIT(31),
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video0_clk, "pll-video0",
"osc24M", 0x0010,
@@ -952,10 +963,10 @@ static const struct clk_hw *clk_parent_pll_audio[] = {
&pll_audio_base_clk.common.hw
};
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio",
clk_parent_pll_audio,
- 4, 1, CLK_SET_RATE_PARENT);
+ 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x",
clk_parent_pll_audio,
2, 1, CLK_SET_RATE_PARENT);
@@ -1307,10 +1318,10 @@ static int sun8i_r40_ccu_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_R40_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
- writel(val | (3 << 16), reg + SUN8I_R40_PLL_AUDIO_REG);
+ writel(val | (0 << 16), reg + SUN8I_R40_PLL_AUDIO_REG);
/* Force PLL-MIPI to MIPI mode */
val = readl(reg + SUN8I_R40_PLL_MIPI_REG);
diff --git a/drivers/clk/tegra/clk-tegra210-emc.c b/drivers/clk/tegra/clk-tegra210-emc.c
index 51fd0ec2a2d0..672ca8c184d2 100644
--- a/drivers/clk/tegra/clk-tegra210-emc.c
+++ b/drivers/clk/tegra/clk-tegra210-emc.c
@@ -128,7 +128,7 @@ static int tegra210_clk_emc_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned int i;
int err;
- if (!provider || !provider->configs || provider->num_configs == 0)
+ if (!provider->configs || provider->num_configs == 0)
return -EINVAL;
for (i = 0; i < provider->num_configs; i++) {
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index 1cae226759dd..f6f8a409f148 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -82,7 +82,12 @@ static int _omap2_clk_allow_idle(struct clk_hw_omap *clk)
*/
int omap2_clk_deny_idle(struct clk *clk)
{
- struct clk_hw *hw = __clk_get_hw(clk);
+ struct clk_hw *hw;
+
+ if (!clk)
+ return -EINVAL;
+
+ hw = __clk_get_hw(clk);
if (omap2_clk_is_hw_omap(hw)) {
struct clk_hw_omap *c = to_clk_hw_omap(hw);
@@ -101,7 +106,12 @@ int omap2_clk_deny_idle(struct clk *clk)
*/
int omap2_clk_allow_idle(struct clk *clk)
{
- struct clk_hw *hw = __clk_get_hw(clk);
+ struct clk_hw *hw;
+
+ if (!clk)
+ return -EINVAL;
+
+ hw = __clk_get_hw(clk);
if (omap2_clk_is_hw_omap(hw)) {
struct clk_hw_omap *c = to_clk_hw_omap(hw);
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index b4cf578a69e1..4e27f88062e7 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -637,6 +637,7 @@ static const struct omap_clkctrl_reg_data dra7_l4sec_clkctrl_regs[] __initconst
{ DRA7_L4SEC_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_root_clk_div" },
{ DRA7_L4SEC_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { DRA7_L4SEC_SHAM2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ 0 },
};
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index ee56306f79d5..700b7f44f671 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -148,10 +148,12 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
if (!omap2_clk_is_hw_omap(clk_hw)) {
pr_warn("can't setup clkdm for basic clk %s\n",
__clk_get_name(clk));
+ clk_put(clk);
continue;
}
to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
omap2_init_clk_clkdm(clk_hw);
+ clk_put(clk);
}
}
diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c
index 1a33a08abf2f..a2f01a4da127 100644
--- a/drivers/clk/uniphier/clk-uniphier-cpugear.c
+++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c
@@ -90,7 +90,7 @@ struct clk_hw *uniphier_clk_register_cpugear(struct device *dev,
init.ops = &uniphier_clk_cpugear_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = data->parent_names;
- init.num_parents = data->num_parents,
+ init.num_parents = data->num_parents;
gear->regmap = regmap;
gear->regbase = data->regbase;
diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
index c0f4631601e2..462c84321b2d 100644
--- a/drivers/clk/uniphier/clk-uniphier-mux.c
+++ b/drivers/clk/uniphier/clk-uniphier-mux.c
@@ -70,7 +70,7 @@ struct clk_hw *uniphier_clk_register_mux(struct device *dev,
init.ops = &uniphier_clk_mux_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = data->parent_names;
- init.num_parents = data->num_parents,
+ init.num_parents = data->num_parents;
mux->regmap = regmap;
mux->reg = data->reg;
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 09aa44cb8a91..ba04cb381cd3 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -341,7 +341,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
return read_hv_clock_tsc();
}
-static u64 read_hv_sched_clock_tsc(void)
+static u64 notrace read_hv_sched_clock_tsc(void)
{
return (read_hv_clock_tsc() - hv_sched_clock_offset) *
(NSEC_PER_SEC / HV_CLOCK_HZ);
@@ -404,7 +404,7 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
return read_hv_clock_msr();
}
-static u64 read_hv_sched_clock_msr(void)
+static u64 notrace read_hv_sched_clock_msr(void)
{
return (read_hv_clock_msr() - hv_sched_clock_offset) *
(NSEC_PER_SEC / HV_CLOCK_HZ);
diff --git a/drivers/clocksource/mps2-timer.c b/drivers/clocksource/mps2-timer.c
index 2e64d984c83a..efe8cad8f2a5 100644
--- a/drivers/clocksource/mps2-timer.c
+++ b/drivers/clocksource/mps2-timer.c
@@ -149,9 +149,9 @@ static int __init mps2_clockevent_init(struct device_node *np)
ce->clkevt.rating = 200;
ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
ce->clkevt.cpumask = cpu_possible_mask;
- ce->clkevt.set_state_shutdown = mps2_timer_shutdown,
- ce->clkevt.set_state_periodic = mps2_timer_set_periodic,
- ce->clkevt.set_state_oneshot = mps2_timer_shutdown,
+ ce->clkevt.set_state_shutdown = mps2_timer_shutdown;
+ ce->clkevt.set_state_periodic = mps2_timer_set_periodic;
+ ce->clkevt.set_state_oneshot = mps2_timer_shutdown;
ce->clkevt.set_next_event = mps2_timer_set_next_event;
/* Ensure timer is disabled */
diff --git a/drivers/clocksource/timer-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index edf1a46269f1..e3acc3c631b7 100644
--- a/drivers/clocksource/timer-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
@@ -181,12 +181,12 @@ static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
clr = TIMER0_25MHZ;
local_timer_ctrl_clrset(clr, set);
- evt->name = "armada_370_xp_per_cpu_tick",
+ evt->name = "armada_370_xp_per_cpu_tick";
evt->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC;
- evt->shift = 32,
- evt->rating = 300,
- evt->set_next_event = armada_370_xp_clkevt_next_event,
+ evt->shift = 32;
+ evt->rating = 300;
+ evt->set_next_event = armada_370_xp_clkevt_next_event;
evt->set_state_shutdown = armada_370_xp_clkevt_shutdown;
evt->set_state_periodic = armada_370_xp_clkevt_set_periodic;
evt->set_state_oneshot = armada_370_xp_clkevt_shutdown;
diff --git a/drivers/clocksource/timer-probe.c b/drivers/clocksource/timer-probe.c
index ee9574da53c0..b7860bc0db4b 100644
--- a/drivers/clocksource/timer-probe.c
+++ b/drivers/clocksource/timer-probe.c
@@ -11,7 +11,7 @@
extern struct of_device_id __timer_of_table[];
static const struct of_device_id __timer_of_table_sentinel
- __used __section(__timer_of_table_end);
+ __used __section("__timer_of_table_end");
void __init timer_probe(void)
{
diff --git a/drivers/clocksource/timer-sp.h b/drivers/clocksource/timer-sp.h
index b2037eb94a41..811f840be0e5 100644
--- a/drivers/clocksource/timer-sp.h
+++ b/drivers/clocksource/timer-sp.h
@@ -10,6 +10,7 @@
*
* Every SP804 contains two identical timers.
*/
+#define NR_TIMERS 2
#define TIMER_1_BASE 0x00
#define TIMER_2_BASE 0x20
@@ -29,3 +30,34 @@
#define TIMER_RIS 0x10 /* CVR ro */
#define TIMER_MIS 0x14 /* CVR ro */
#define TIMER_BGLOAD 0x18 /* CVR rw */
+
+struct sp804_timer {
+ int load;
+ int load_h;
+ int value;
+ int value_h;
+ int ctrl;
+ int intclr;
+ int ris;
+ int mis;
+ int bgload;
+ int bgload_h;
+ int timer_base[NR_TIMERS];
+ int width;
+};
+
+struct sp804_clkevt {
+ void __iomem *base;
+ void __iomem *load;
+ void __iomem *load_h;
+ void __iomem *value;
+ void __iomem *value_h;
+ void __iomem *ctrl;
+ void __iomem *intclr;
+ void __iomem *ris;
+ void __iomem *mis;
+ void __iomem *bgload;
+ void __iomem *bgload_h;
+ unsigned long reload;
+ int width;
+};
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 5cd0abf9b396..6e8ad4a4ea3c 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -18,15 +18,57 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
-#include <clocksource/timer-sp804.h>
-
#include "timer-sp.h"
-static long __init sp804_get_clock_rate(struct clk *clk)
+/* Hisilicon 64-bit timer(a variant of ARM SP804) */
+#define HISI_TIMER_1_BASE 0x00
+#define HISI_TIMER_2_BASE 0x40
+#define HISI_TIMER_LOAD 0x00
+#define HISI_TIMER_LOAD_H 0x04
+#define HISI_TIMER_VALUE 0x08
+#define HISI_TIMER_VALUE_H 0x0c
+#define HISI_TIMER_CTRL 0x10
+#define HISI_TIMER_INTCLR 0x14
+#define HISI_TIMER_RIS 0x18
+#define HISI_TIMER_MIS 0x1c
+#define HISI_TIMER_BGLOAD 0x20
+#define HISI_TIMER_BGLOAD_H 0x24
+
+
+struct sp804_timer __initdata arm_sp804_timer = {
+ .load = TIMER_LOAD,
+ .value = TIMER_VALUE,
+ .ctrl = TIMER_CTRL,
+ .intclr = TIMER_INTCLR,
+ .timer_base = {TIMER_1_BASE, TIMER_2_BASE},
+ .width = 32,
+};
+
+struct sp804_timer __initdata hisi_sp804_timer = {
+ .load = HISI_TIMER_LOAD,
+ .load_h = HISI_TIMER_LOAD_H,
+ .value = HISI_TIMER_VALUE,
+ .value_h = HISI_TIMER_VALUE_H,
+ .ctrl = HISI_TIMER_CTRL,
+ .intclr = HISI_TIMER_INTCLR,
+ .timer_base = {HISI_TIMER_1_BASE, HISI_TIMER_2_BASE},
+ .width = 64,
+};
+
+static struct sp804_clkevt sp804_clkevt[NR_TIMERS];
+
+static long __init sp804_get_clock_rate(struct clk *clk, const char *name)
{
long rate;
int err;
+ if (!clk)
+ clk = clk_get_sys("sp804", name);
+ if (IS_ERR(clk)) {
+ pr_err("sp804: %s clock not found: %ld\n", name, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
err = clk_prepare(clk);
if (err) {
pr_err("sp804: clock failed to prepare: %d\n", err);
@@ -53,50 +95,57 @@ static long __init sp804_get_clock_rate(struct clk *clk)
return rate;
}
-static void __iomem *sched_clock_base;
-
-static u64 notrace sp804_read(void)
+static struct sp804_clkevt * __init sp804_clkevt_get(void __iomem *base)
{
- return ~readl_relaxed(sched_clock_base + TIMER_VALUE);
+ int i;
+
+ for (i = 0; i < NR_TIMERS; i++) {
+ if (sp804_clkevt[i].base == base)
+ return &sp804_clkevt[i];
+ }
+
+ /* It's impossible to reach here */
+ WARN_ON(1);
+
+ return NULL;
}
-void __init sp804_timer_disable(void __iomem *base)
+static struct sp804_clkevt *sched_clkevt;
+
+static u64 notrace sp804_read(void)
{
- writel(0, base + TIMER_CTRL);
+ return ~readl_relaxed(sched_clkevt->value);
}
-int __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
- const char *name,
- struct clk *clk,
- int use_sched_clock)
+int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
+ const char *name,
+ struct clk *clk,
+ int use_sched_clock)
{
long rate;
+ struct sp804_clkevt *clkevt;
- if (!clk) {
- clk = clk_get_sys("sp804", name);
- if (IS_ERR(clk)) {
- pr_err("sp804: clock not found: %d\n",
- (int)PTR_ERR(clk));
- return PTR_ERR(clk);
- }
- }
-
- rate = sp804_get_clock_rate(clk);
+ rate = sp804_get_clock_rate(clk, name);
if (rate < 0)
return -EINVAL;
- /* setup timer 0 as free-running clocksource */
- writel(0, base + TIMER_CTRL);
- writel(0xffffffff, base + TIMER_LOAD);
- writel(0xffffffff, base + TIMER_VALUE);
+ clkevt = sp804_clkevt_get(base);
+
+ writel(0, clkevt->ctrl);
+ writel(0xffffffff, clkevt->load);
+ writel(0xffffffff, clkevt->value);
+ if (clkevt->width == 64) {
+ writel(0xffffffff, clkevt->load_h);
+ writel(0xffffffff, clkevt->value_h);
+ }
writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
- base + TIMER_CTRL);
+ clkevt->ctrl);
- clocksource_mmio_init(base + TIMER_VALUE, name,
+ clocksource_mmio_init(clkevt->value, name,
rate, 200, 32, clocksource_mmio_readl_down);
if (use_sched_clock) {
- sched_clock_base = base;
+ sched_clkevt = clkevt;
sched_clock_register(sp804_read, 32, rate);
}
@@ -104,8 +153,7 @@ int __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
}
-static void __iomem *clkevt_base;
-static unsigned long clkevt_reload;
+static struct sp804_clkevt *common_clkevt;
/*
* IRQ handler for the timer
@@ -115,7 +163,7 @@ static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id)
struct clock_event_device *evt = dev_id;
/* clear the interrupt */
- writel(1, clkevt_base + TIMER_INTCLR);
+ writel(1, common_clkevt->intclr);
evt->event_handler(evt);
@@ -124,7 +172,7 @@ static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id)
static inline void timer_shutdown(struct clock_event_device *evt)
{
- writel(0, clkevt_base + TIMER_CTRL);
+ writel(0, common_clkevt->ctrl);
}
static int sp804_shutdown(struct clock_event_device *evt)
@@ -139,8 +187,8 @@ static int sp804_set_periodic(struct clock_event_device *evt)
TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
timer_shutdown(evt);
- writel(clkevt_reload, clkevt_base + TIMER_LOAD);
- writel(ctrl, clkevt_base + TIMER_CTRL);
+ writel(common_clkevt->reload, common_clkevt->load);
+ writel(ctrl, common_clkevt->ctrl);
return 0;
}
@@ -150,8 +198,8 @@ static int sp804_set_next_event(unsigned long next,
unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE |
TIMER_CTRL_ONESHOT | TIMER_CTRL_ENABLE;
- writel(next, clkevt_base + TIMER_LOAD);
- writel(ctrl, clkevt_base + TIMER_CTRL);
+ writel(next, common_clkevt->load);
+ writel(ctrl, common_clkevt->ctrl);
return 0;
}
@@ -168,30 +216,23 @@ static struct clock_event_device sp804_clockevent = {
.rating = 300,
};
-int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
+int __init sp804_clockevents_init(void __iomem *base, unsigned int irq,
+ struct clk *clk, const char *name)
{
struct clock_event_device *evt = &sp804_clockevent;
long rate;
- if (!clk)
- clk = clk_get_sys("sp804", name);
- if (IS_ERR(clk)) {
- pr_err("sp804: %s clock not found: %d\n", name,
- (int)PTR_ERR(clk));
- return PTR_ERR(clk);
- }
-
- rate = sp804_get_clock_rate(clk);
+ rate = sp804_get_clock_rate(clk, name);
if (rate < 0)
return -EINVAL;
- clkevt_base = base;
- clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
+ common_clkevt = sp804_clkevt_get(base);
+ common_clkevt->reload = DIV_ROUND_CLOSEST(rate, HZ);
evt->name = name;
evt->irq = irq;
evt->cpumask = cpu_possible_mask;
- writel(0, base + TIMER_CTRL);
+ writel(0, common_clkevt->ctrl);
if (request_irq(irq, sp804_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
"timer", &sp804_clockevent))
@@ -201,10 +242,33 @@ int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct
return 0;
}
-static int __init sp804_of_init(struct device_node *np)
+static void __init sp804_clkevt_init(struct sp804_timer *timer, void __iomem *base)
+{
+ int i;
+
+ for (i = 0; i < NR_TIMERS; i++) {
+ void __iomem *timer_base;
+ struct sp804_clkevt *clkevt;
+
+ timer_base = base + timer->timer_base[i];
+ clkevt = &sp804_clkevt[i];
+ clkevt->base = timer_base;
+ clkevt->load = timer_base + timer->load;
+ clkevt->load_h = timer_base + timer->load_h;
+ clkevt->value = timer_base + timer->value;
+ clkevt->value_h = timer_base + timer->value_h;
+ clkevt->ctrl = timer_base + timer->ctrl;
+ clkevt->intclr = timer_base + timer->intclr;
+ clkevt->width = timer->width;
+ }
+}
+
+static int __init sp804_of_init(struct device_node *np, struct sp804_timer *timer)
{
static bool initialized = false;
void __iomem *base;
+ void __iomem *timer1_base;
+ void __iomem *timer2_base;
int irq, ret = -EINVAL;
u32 irq_num = 0;
struct clk *clk1, *clk2;
@@ -214,9 +278,12 @@ static int __init sp804_of_init(struct device_node *np)
if (!base)
return -ENXIO;
+ timer1_base = base + timer->timer_base[0];
+ timer2_base = base + timer->timer_base[1];
+
/* Ensure timers are disabled */
- writel(0, base + TIMER_CTRL);
- writel(0, base + TIMER_2_BASE + TIMER_CTRL);
+ writel(0, timer1_base + timer->ctrl);
+ writel(0, timer2_base + timer->ctrl);
if (initialized || !of_device_is_available(np)) {
ret = -EINVAL;
@@ -242,24 +309,27 @@ static int __init sp804_of_init(struct device_node *np)
if (irq <= 0)
goto err;
+ sp804_clkevt_init(timer, base);
+
of_property_read_u32(np, "arm,sp804-has-irq", &irq_num);
if (irq_num == 2) {
- ret = __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
+ ret = sp804_clockevents_init(timer2_base, irq, clk2, name);
if (ret)
goto err;
- ret = __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
+ ret = sp804_clocksource_and_sched_clock_init(timer1_base,
+ name, clk1, 1);
if (ret)
goto err;
} else {
- ret = __sp804_clockevents_init(base, irq, clk1 , name);
+ ret = sp804_clockevents_init(timer1_base, irq, clk1, name);
if (ret)
goto err;
- ret =__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
- name, clk2, 1);
+ ret = sp804_clocksource_and_sched_clock_init(timer2_base,
+ name, clk2, 1);
if (ret)
goto err;
}
@@ -270,7 +340,18 @@ err:
iounmap(base);
return ret;
}
-TIMER_OF_DECLARE(sp804, "arm,sp804", sp804_of_init);
+
+static int __init arm_sp804_of_init(struct device_node *np)
+{
+ return sp804_of_init(np, &arm_sp804_timer);
+}
+TIMER_OF_DECLARE(sp804, "arm,sp804", arm_sp804_of_init);
+
+static int __init hisi_sp804_of_init(struct device_node *np)
+{
+ return sp804_of_init(np, &hisi_sp804_timer);
+}
+TIMER_OF_DECLARE(hisi_sp804, "hisilicon,sp804", hisi_sp804_of_init);
static int __init integrator_cp_of_init(struct device_node *np)
{
@@ -293,13 +374,16 @@ static int __init integrator_cp_of_init(struct device_node *np)
}
/* Ensure timer is disabled */
- writel(0, base + TIMER_CTRL);
+ writel(0, base + arm_sp804_timer.ctrl);
if (init_count == 2 || !of_device_is_available(np))
goto err;
+ sp804_clkevt_init(&arm_sp804_timer, base);
+
if (!init_count) {
- ret = __sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
+ ret = sp804_clocksource_and_sched_clock_init(base,
+ name, clk, 0);
if (ret)
goto err;
} else {
@@ -307,7 +391,7 @@ static int __init integrator_cp_of_init(struct device_node *np)
if (irq <= 0)
goto err;
- ret = __sp804_clockevents_init(base, irq, clk, name);
+ ret = sp804_clockevents_init(base, irq, clk, name);
if (ret)
goto err;
}
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 2d22d6bf52f2..7d59d18c6f26 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -197,17 +197,12 @@ int cn_add_callback(struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
- int err;
struct cn_dev *dev = &cdev;
if (!cn_already_initialized)
return -EAGAIN;
- err = cn_queue_add_callback(dev->cbdev, name, id, callback);
- if (err)
- return err;
-
- return 0;
+ return cn_queue_add_callback(dev->cbdev, name, id, callback);
}
EXPORT_SYMBOL_GPL(cn_add_callback);
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index b7b252c5addf..039c54a78aa5 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -253,7 +253,7 @@ static struct counter_count mchp_tc_counts[] = {
},
};
-static struct counter_ops mchp_tc_ops = {
+static const struct counter_ops mchp_tc_ops = {
.signal_read = mchp_tc_count_signal_read,
.count_read = mchp_tc_count_read,
.function_get = mchp_tc_count_function_get,
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 1ff07faef27f..e27771df8e23 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -439,7 +439,7 @@ static int ti_eqep_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
counter_unregister(&priv->counter);
- pm_runtime_put_sync(dev),
+ pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return 0;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 2c7171e0b001..85de313ddec2 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -71,6 +71,7 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE
config CPU_FREQ_DEFAULT_GOV_ONDEMAND
bool "ondemand"
+ depends on !(X86_INTEL_PSTATE && SMP)
select CPU_FREQ_GOV_ONDEMAND
select CPU_FREQ_GOV_PERFORMANCE
help
@@ -83,6 +84,7 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND
config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
bool "conservative"
+ depends on !(X86_INTEL_PSTATE && SMP)
select CPU_FREQ_GOV_CONSERVATIVE
select CPU_FREQ_GOV_PERFORMANCE
help
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index cb72fb507d57..015ec0c02835 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -196,7 +196,6 @@ config ARM_S3C24XX_CPUFREQ_DEBUGFS
config ARM_S3C2410_CPUFREQ
bool
depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2410
- select S3C2410_CPUFREQ_UTILS
help
CPU Frequency scaling support for S3C2410
@@ -233,7 +232,6 @@ config ARM_S3C2416_CPUFREQ_VCORESCALE
config ARM_S3C2440_CPUFREQ
bool "S3C2440/S3C2442 CPU Frequency scaling support"
depends on ARM_S3C24XX_CPUFREQ && (CPU_S3C2440 || CPU_S3C2442)
- select S3C2410_CPUFREQ_UTILS
default y
help
CPU Frequency scaling support for S3C2440 and S3C2442 SoC CPUs.
@@ -283,7 +281,7 @@ config ARM_SPEAR_CPUFREQ
config ARM_STI_CPUFREQ
tristate "STi CPUFreq support"
- depends on SOC_STIH407
+ depends on CPUFREQ_DT && SOC_STIH407
help
This driver uses the generic OPP framework to match the running
platform with a predefined set of suitable values. If not provided
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index e4ff681faaaa..1e4fbb002a31 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -691,7 +691,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
}
- if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
+ if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
+ !acpi_pstate_strict) {
cpumask_clear(policy->cpus);
cpumask_set_cpu(cpu, policy->cpus);
cpumask_copy(data->freqdomain_cpus,
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index df1c941260d1..b4af4094309b 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -484,6 +484,12 @@ remove_opp:
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
late_initcall(armada37xx_cpufreq_driver_init);
+static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
+ { .compatible = "marvell,armada-3700-nb-pm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
+
MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 7d01df7bfa6c..3776d960f405 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -137,6 +137,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
+ { .compatible = "st,stih418", },
{ .compatible = "sigma,tango4", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 944d7b45afe9..e363ae04aac6 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -13,6 +13,7 @@
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/err.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_opp.h>
@@ -24,32 +25,41 @@
#include "cpufreq-dt.h"
struct private_data {
- struct opp_table *opp_table;
+ struct list_head node;
+
+ cpumask_var_t cpus;
struct device *cpu_dev;
- const char *reg_name;
+ struct opp_table *opp_table;
+ struct opp_table *reg_opp_table;
bool have_static_opps;
};
+static LIST_HEAD(priv_list);
+
static struct freq_attr *cpufreq_dt_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL, /* Extra space for boost-attr if required */
NULL,
};
+static struct private_data *cpufreq_dt_find_data(int cpu)
+{
+ struct private_data *priv;
+
+ list_for_each_entry(priv, &priv_list, node) {
+ if (cpumask_test_cpu(cpu, priv->cpus))
+ return priv;
+ }
+
+ return NULL;
+}
+
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct private_data *priv = policy->driver_data;
unsigned long freq = policy->freq_table[index].frequency;
- int ret;
-
- ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
-
- if (!ret) {
- arch_set_freq_scale(policy->related_cpus, freq,
- policy->cpuinfo.max_freq);
- }
- return ret;
+ return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
}
/*
@@ -90,83 +100,24 @@ node_put:
return name;
}
-static int resources_available(void)
-{
- struct device *cpu_dev;
- struct regulator *cpu_reg;
- struct clk *cpu_clk;
- int ret = 0;
- const char *name;
-
- cpu_dev = get_cpu_device(0);
- if (!cpu_dev) {
- pr_err("failed to get cpu0 device\n");
- return -ENODEV;
- }
-
- cpu_clk = clk_get(cpu_dev, NULL);
- ret = PTR_ERR_OR_ZERO(cpu_clk);
- if (ret) {
- /*
- * If cpu's clk node is present, but clock is not yet
- * registered, we should try defering probe.
- */
- if (ret == -EPROBE_DEFER)
- dev_dbg(cpu_dev, "clock not ready, retry\n");
- else
- dev_err(cpu_dev, "failed to get clock: %d\n", ret);
-
- return ret;
- }
-
- clk_put(cpu_clk);
-
- ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
- if (ret)
- return ret;
-
- name = find_supply_name(cpu_dev);
- /* Platform doesn't require regulator */
- if (!name)
- return 0;
-
- cpu_reg = regulator_get_optional(cpu_dev, name);
- ret = PTR_ERR_OR_ZERO(cpu_reg);
- if (ret) {
- /*
- * If cpu's regulator supply node is present, but regulator is
- * not yet registered, we should try defering probe.
- */
- if (ret == -EPROBE_DEFER)
- dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
- else
- dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
-
- return ret;
- }
-
- regulator_put(cpu_reg);
- return 0;
-}
-
static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
- struct opp_table *opp_table = NULL;
struct private_data *priv;
struct device *cpu_dev;
struct clk *cpu_clk;
unsigned int transition_latency;
- bool fallback = false;
- const char *name;
int ret;
- cpu_dev = get_cpu_device(policy->cpu);
- if (!cpu_dev) {
- pr_err("failed to get cpu%d device\n", policy->cpu);
+ priv = cpufreq_dt_find_data(policy->cpu);
+ if (!priv) {
+ pr_err("failed to find data for cpu%d\n", policy->cpu);
return -ENODEV;
}
+ cpu_dev = priv->cpu_dev;
+ cpumask_copy(policy->cpus, priv->cpus);
+
cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
@@ -174,45 +125,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
- /* Get OPP-sharing information from "operating-points-v2" bindings */
- ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
- if (ret) {
- if (ret != -ENOENT)
- goto out_put_clk;
-
- /*
- * operating-points-v2 not supported, fallback to old method of
- * finding shared-OPPs for backward compatibility if the
- * platform hasn't set sharing CPUs.
- */
- if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
- fallback = true;
- }
-
- /*
- * OPP layer will be taking care of regulators now, but it needs to know
- * the name of the regulator first.
- */
- name = find_supply_name(cpu_dev);
- if (name) {
- opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
- if (IS_ERR(opp_table)) {
- ret = PTR_ERR(opp_table);
- dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
- policy->cpu, ret);
- goto out_put_clk;
- }
- }
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto out_put_regulator;
- }
-
- priv->reg_name = name;
- priv->opp_table = opp_table;
-
/*
* Initialize OPP tables for all policy->cpus. They will be shared by
* all CPUs which have marked their CPUs shared with OPP bindings.
@@ -232,31 +144,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
- dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
- ret = -EPROBE_DEFER;
+ dev_err(cpu_dev, "OPP table can't be empty\n");
+ ret = -ENODEV;
goto out_free_opp;
}
- if (fallback) {
- cpumask_setall(policy->cpus);
-
- /*
- * OPP tables are initialized only for policy->cpu, do it for
- * others as well.
- */
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
- if (ret)
- dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
- __func__, ret);
- }
-
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out_free_opp;
}
- priv->cpu_dev = cpu_dev;
policy->driver_data = priv;
policy->clk = cpu_clk;
policy->freq_table = freq_table;
@@ -288,11 +186,6 @@ out_free_cpufreq_table:
out_free_opp:
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(policy->cpus);
- kfree(priv);
-out_put_regulator:
- if (name)
- dev_pm_opp_put_regulators(opp_table);
-out_put_clk:
clk_put(cpu_clk);
return ret;
@@ -320,12 +213,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
if (priv->have_static_opps)
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
- if (priv->reg_name)
- dev_pm_opp_put_regulators(priv->opp_table);
-
clk_put(policy->clk);
- kfree(priv);
-
return 0;
}
@@ -344,21 +232,119 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.suspend = cpufreq_generic_suspend,
};
-static int dt_cpufreq_probe(struct platform_device *pdev)
+static int dt_cpufreq_early_init(struct device *dev, int cpu)
{
- struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
+ struct private_data *priv;
+ struct device *cpu_dev;
+ const char *reg_name;
int ret;
+ /* Check if this CPU is already covered by some other policy */
+ if (cpufreq_dt_find_data(cpu))
+ return 0;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return -EPROBE_DEFER;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ priv->cpu_dev = cpu_dev;
+
+ /* Try to get OPP table early to ensure resources are available */
+ priv->opp_table = dev_pm_opp_get_opp_table(cpu_dev);
+ if (IS_ERR(priv->opp_table)) {
+ ret = PTR_ERR(priv->opp_table);
+ if (ret != -EPROBE_DEFER)
+ dev_err(cpu_dev, "failed to get OPP table: %d\n", ret);
+ goto free_cpumask;
+ }
+
/*
- * All per-cluster (CPUs sharing clock/voltages) initialization is done
- * from ->init(). In probe(), we just need to make sure that clk and
- * regulators are available. Else defer probe and retry.
- *
- * FIXME: Is checking this only for CPU0 sufficient ?
+ * OPP layer will be taking care of regulators now, but it needs to know
+ * the name of the regulator first.
*/
- ret = resources_available();
- if (ret)
- return ret;
+ reg_name = find_supply_name(cpu_dev);
+ if (reg_name) {
+ priv->reg_opp_table = dev_pm_opp_set_regulators(cpu_dev,
+ &reg_name, 1);
+ if (IS_ERR(priv->reg_opp_table)) {
+ ret = PTR_ERR(priv->reg_opp_table);
+ if (ret != -EPROBE_DEFER)
+ dev_err(cpu_dev, "failed to set regulators: %d\n",
+ ret);
+ goto put_table;
+ }
+ }
+
+ /* Find OPP sharing information so we can fill pri->cpus here */
+ /* Get OPP-sharing information from "operating-points-v2" bindings */
+ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
+ if (ret) {
+ if (ret != -ENOENT)
+ goto put_reg;
+
+ /*
+ * operating-points-v2 not supported, fallback to all CPUs share
+ * OPP for backward compatibility if the platform hasn't set
+ * sharing CPUs.
+ */
+ if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) {
+ cpumask_setall(priv->cpus);
+
+ /*
+ * OPP tables are initialized only for cpu, do it for
+ * others as well.
+ */
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
+ if (ret)
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+ }
+ }
+
+ list_add(&priv->node, &priv_list);
+ return 0;
+
+put_reg:
+ if (priv->reg_opp_table)
+ dev_pm_opp_put_regulators(priv->reg_opp_table);
+put_table:
+ dev_pm_opp_put_opp_table(priv->opp_table);
+free_cpumask:
+ free_cpumask_var(priv->cpus);
+ return ret;
+}
+
+static void dt_cpufreq_release(void)
+{
+ struct private_data *priv, *tmp;
+
+ list_for_each_entry_safe(priv, tmp, &priv_list, node) {
+ if (priv->reg_opp_table)
+ dev_pm_opp_put_regulators(priv->reg_opp_table);
+ dev_pm_opp_put_opp_table(priv->opp_table);
+ free_cpumask_var(priv->cpus);
+ list_del(&priv->node);
+ }
+}
+
+static int dt_cpufreq_probe(struct platform_device *pdev)
+{
+ struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
+ int ret, cpu;
+
+ /* Request resources early so we can return in case of -EPROBE_DEFER */
+ for_each_possible_cpu(cpu) {
+ ret = dt_cpufreq_early_init(&pdev->dev, cpu);
+ if (ret)
+ goto err;
+ }
if (data) {
if (data->have_governor_per_policy)
@@ -374,15 +360,21 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
}
ret = cpufreq_register_driver(&dt_cpufreq_driver);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "failed register driver: %d\n", ret);
+ goto err;
+ }
+ return 0;
+err:
+ dt_cpufreq_release();
return ret;
}
static int dt_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&dt_cpufreq_driver);
+ dt_cpufreq_release();
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 47aa90f9a7c2..1e7e3f2ff09f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -61,6 +61,12 @@ static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
static DEFINE_RWLOCK(cpufreq_driver_lock);
+static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
+bool cpufreq_supports_freq_invariance(void)
+{
+ return static_branch_likely(&cpufreq_freq_invariance);
+}
+
/* Flag to suspend/resume CPUFreq governors */
static bool cpufreq_suspended;
@@ -154,12 +160,6 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
-__weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
- unsigned long max_freq)
-{
-}
-EXPORT_SYMBOL_GPL(arch_set_freq_scale);
-
/*
* This is a generic cpufreq init() routine which can be used by cpufreq
* drivers of SMP systems. It will do following:
@@ -446,6 +446,10 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
cpufreq_notify_post_transition(policy, freqs, transition_failed);
+ arch_set_freq_scale(policy->related_cpus,
+ policy->cur,
+ policy->cpuinfo.max_freq);
+
policy->transition_ongoing = false;
policy->transition_task = NULL;
@@ -1450,14 +1454,13 @@ static int cpufreq_online(unsigned int cpu)
*/
if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
&& has_target()) {
+ unsigned int old_freq = policy->cur;
+
/* Are we running at unknown frequency ? */
- ret = cpufreq_frequency_table_get_index(policy, policy->cur);
+ ret = cpufreq_frequency_table_get_index(policy, old_freq);
if (ret == -EINVAL) {
- /* Warn user and fix it */
- pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
- __func__, policy->cpu, policy->cur);
- ret = __cpufreq_driver_target(policy, policy->cur - 1,
- CPUFREQ_RELATION_L);
+ ret = __cpufreq_driver_target(policy, old_freq - 1,
+ CPUFREQ_RELATION_L);
/*
* Reaching here after boot in a few seconds may not
@@ -1465,8 +1468,8 @@ static int cpufreq_online(unsigned int cpu)
* frequency for longer duration. Hence, a BUG_ON().
*/
BUG_ON(ret);
- pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
- __func__, policy->cpu, policy->cur);
+ pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
+ __func__, policy->cpu, old_freq, policy->cur);
}
}
@@ -1905,6 +1908,18 @@ void cpufreq_resume(void)
}
/**
+ * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
+ * @flags: Flags to test against the current cpufreq driver's flags.
+ *
+ * Assumes that the driver is there, so callers must ensure that this is the
+ * case.
+ */
+bool cpufreq_driver_test_flags(u16 flags)
+{
+ return !!(cpufreq_driver->flags & flags);
+}
+
+/**
* cpufreq_get_current_driver - return current driver's name
*
* Return the name string of the currently loaded cpufreq driver
@@ -2056,9 +2071,26 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
+ unsigned int freq;
+ int cpu;
+
target_freq = clamp_val(target_freq, policy->min, policy->max);
+ freq = cpufreq_driver->fast_switch(policy, target_freq);
+
+ if (!freq)
+ return 0;
+
+ policy->cur = freq;
+ arch_set_freq_scale(policy->related_cpus, freq,
+ policy->cpuinfo.max_freq);
+ cpufreq_stats_record_transition(policy, freq);
+
+ if (trace_cpu_frequency_enabled()) {
+ for_each_cpu(cpu, policy->cpus)
+ trace_cpu_frequency(freq, cpu);
+ }
- return cpufreq_driver->fast_switch(policy, target_freq);
+ return freq;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
@@ -2167,7 +2199,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
* exactly same freq is called again and so we can save on few function
* calls.
*/
- if (target_freq == policy->cur)
+ if (target_freq == policy->cur &&
+ !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
return 0;
/* Save last value to restore later on errors */
@@ -2221,7 +2254,7 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
return -EINVAL;
/* Platform doesn't want dynamic frequency switching ? */
- if (policy->governor->dynamic_switching &&
+ if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
struct cpufreq_governor *gov = cpufreq_fallback_governor();
@@ -2247,6 +2280,8 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
}
}
+ policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
+
return 0;
}
@@ -2710,6 +2745,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ /*
+ * Mark support for the scheduler's frequency invariance engine for
+ * drivers that implement target(), target_index() or fast_switch().
+ */
+ if (!cpufreq_driver->setpolicy) {
+ static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+ pr_debug("supports frequency invariance");
+ }
+
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
@@ -2779,6 +2823,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
cpus_read_lock();
subsys_interface_unregister(&cpufreq_interface);
remove_boost_sysfs_file();
+ static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
cpuhp_remove_state_nocalls_cpuslocked(hp_online);
write_lock_irqsave(&cpufreq_driver_lock, flags);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index c56773c25757..bab8e6140377 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -156,7 +156,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
{ \
.name = _name_, \
- .dynamic_switching = true, \
+ .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, \
.owner = THIS_MODULE, \
.init = cpufreq_dbs_governor_init, \
.exit = cpufreq_dbs_governor_exit, \
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index 71c1d9aba772..addd93f2a420 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -20,6 +20,7 @@ static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy)
static struct cpufreq_governor cpufreq_gov_performance = {
.name = "performance",
.owner = THIS_MODULE,
+ .flags = CPUFREQ_GOV_STRICT_TARGET,
.limits = cpufreq_gov_performance_limits,
};
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 7749522355b5..8d830d860e91 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -21,6 +21,7 @@ static struct cpufreq_governor cpufreq_gov_powersave = {
.name = "powersave",
.limits = cpufreq_gov_powersave_limits,
.owner = THIS_MODULE,
+ .flags = CPUFREQ_GOV_STRICT_TARGET,
};
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 94d959a8e954..6cd5c8ab5d49 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -19,64 +19,104 @@ struct cpufreq_stats {
unsigned int state_num;
unsigned int last_index;
u64 *time_in_state;
- spinlock_t lock;
unsigned int *freq_table;
unsigned int *trans_table;
+
+ /* Deferred reset */
+ unsigned int reset_pending;
+ unsigned long long reset_time;
};
-static void cpufreq_stats_update(struct cpufreq_stats *stats)
+static void cpufreq_stats_update(struct cpufreq_stats *stats,
+ unsigned long long time)
{
unsigned long long cur_time = get_jiffies_64();
- stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
+ stats->time_in_state[stats->last_index] += cur_time - time;
stats->last_time = cur_time;
}
-static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
+static void cpufreq_stats_reset_table(struct cpufreq_stats *stats)
{
unsigned int count = stats->max_state;
- spin_lock(&stats->lock);
memset(stats->time_in_state, 0, count * sizeof(u64));
memset(stats->trans_table, 0, count * count * sizeof(int));
stats->last_time = get_jiffies_64();
stats->total_trans = 0;
- spin_unlock(&stats->lock);
+
+ /* Adjust for the time elapsed since reset was requested */
+ WRITE_ONCE(stats->reset_pending, 0);
+ /*
+ * Prevent the reset_time read from being reordered before the
+ * reset_pending accesses in cpufreq_stats_record_transition().
+ */
+ smp_rmb();
+ cpufreq_stats_update(stats, READ_ONCE(stats->reset_time));
}
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
{
- return sprintf(buf, "%d\n", policy->stats->total_trans);
+ struct cpufreq_stats *stats = policy->stats;
+
+ if (READ_ONCE(stats->reset_pending))
+ return sprintf(buf, "%d\n", 0);
+ else
+ return sprintf(buf, "%u\n", stats->total_trans);
}
cpufreq_freq_attr_ro(total_trans);
static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
{
struct cpufreq_stats *stats = policy->stats;
+ bool pending = READ_ONCE(stats->reset_pending);
+ unsigned long long time;
ssize_t len = 0;
int i;
- if (policy->fast_switch_enabled)
- return 0;
-
- spin_lock(&stats->lock);
- cpufreq_stats_update(stats);
- spin_unlock(&stats->lock);
-
for (i = 0; i < stats->state_num; i++) {
+ if (pending) {
+ if (i == stats->last_index) {
+ /*
+ * Prevent the reset_time read from occurring
+ * before the reset_pending read above.
+ */
+ smp_rmb();
+ time = get_jiffies_64() - READ_ONCE(stats->reset_time);
+ } else {
+ time = 0;
+ }
+ } else {
+ time = stats->time_in_state[i];
+ if (i == stats->last_index)
+ time += get_jiffies_64() - stats->last_time;
+ }
+
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
- (unsigned long long)
- jiffies_64_to_clock_t(stats->time_in_state[i]));
+ jiffies_64_to_clock_t(time));
}
return len;
}
cpufreq_freq_attr_ro(time_in_state);
+/* We don't care what is written to the attribute */
static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
size_t count)
{
- /* We don't care what is written to the attribute. */
- cpufreq_stats_clear_table(policy->stats);
+ struct cpufreq_stats *stats = policy->stats;
+
+ /*
+ * Defer resetting of stats to cpufreq_stats_record_transition() to
+ * avoid races.
+ */
+ WRITE_ONCE(stats->reset_time, get_jiffies_64());
+ /*
+ * The memory barrier below is to prevent the readers of reset_time from
+ * seeing a stale or partially updated value.
+ */
+ smp_wmb();
+ WRITE_ONCE(stats->reset_pending, 1);
+
return count;
}
cpufreq_freq_attr_wo(reset);
@@ -84,11 +124,9 @@ cpufreq_freq_attr_wo(reset);
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
{
struct cpufreq_stats *stats = policy->stats;
+ bool pending = READ_ONCE(stats->reset_pending);
ssize_t len = 0;
- int i, j;
-
- if (policy->fast_switch_enabled)
- return 0;
+ int i, j, count;
len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += scnprintf(buf + len, PAGE_SIZE - len, " : ");
@@ -113,8 +151,13 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
for (j = 0; j < stats->state_num; j++) {
if (len >= PAGE_SIZE)
break;
- len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
- stats->trans_table[i*stats->max_state+j]);
+
+ if (pending)
+ count = 0;
+ else
+ count = stats->trans_table[i * stats->max_state + j];
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", count);
}
if (len >= PAGE_SIZE)
break;
@@ -208,7 +251,6 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
stats->state_num = i;
stats->last_time = get_jiffies_64();
stats->last_index = freq_table_get_index(stats, policy->cur);
- spin_lock_init(&stats->lock);
policy->stats = stats;
ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
@@ -228,23 +270,22 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
struct cpufreq_stats *stats = policy->stats;
int old_index, new_index;
- if (!stats) {
- pr_debug("%s: No stats found\n", __func__);
+ if (unlikely(!stats))
return;
- }
+
+ if (unlikely(READ_ONCE(stats->reset_pending)))
+ cpufreq_stats_reset_table(stats);
old_index = stats->last_index;
new_index = freq_table_get_index(stats, new_freq);
/* We can't do stats->time_in_state[-1]= .. */
- if (old_index == -1 || new_index == -1 || old_index == new_index)
+ if (unlikely(old_index == -1 || new_index == -1 || old_index == new_index))
return;
- spin_lock(&stats->lock);
- cpufreq_stats_update(stats);
+ cpufreq_stats_update(stats, stats->last_time);
stats->last_index = new_index;
stats->trans_table[old_index * stats->max_state + new_index]++;
stats->total_trans++;
- spin_unlock(&stats->lock);
}
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 776a58bab0ff..ab93bce8ae77 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -223,7 +223,6 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
case EPS_BRAND_C3:
pr_cont("C3\n");
return -ENODEV;
- break;
}
/* Enable Enhanced PowerSaver */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index ef7b34c1fd2b..5bf5fc759881 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -48,7 +48,6 @@ static struct clk_bulk_data clks[] = {
};
static struct device *cpu_dev;
-static bool free_opp;
static struct cpufreq_frequency_table *freq_table;
static unsigned int max_freq;
static unsigned int transition_latency;
@@ -390,9 +389,6 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
- /* Because we have added the OPPs here, we must free them */
- free_opp = true;
-
if (of_machine_is_compatible("fsl,imx6ul") ||
of_machine_is_compatible("fsl,imx6ull")) {
ret = imx6ul_opp_check_speed_grading(cpu_dev);
@@ -507,8 +503,7 @@ soc_opp_out:
free_freq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_opp:
- if (free_opp)
- dev_pm_opp_of_remove_table(cpu_dev);
+ dev_pm_opp_of_remove_table(cpu_dev);
put_reg:
if (!IS_ERR(arm_reg))
regulator_put(arm_reg);
@@ -528,8 +523,7 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
- if (free_opp)
- dev_pm_opp_of_remove_table(cpu_dev);
+ dev_pm_opp_of_remove_table(cpu_dev);
regulator_put(arm_reg);
if (!IS_ERR(pu_reg))
regulator_put(pu_reg);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9a515c460a00..36a3ccfe6d3d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1420,6 +1420,24 @@ static void __init intel_pstate_sysfs_expose_params(void)
}
}
+static void __init intel_pstate_sysfs_remove(void)
+{
+ if (!intel_pstate_kobject)
+ return;
+
+ sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group);
+
+ if (!per_cpu_limits) {
+ sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr);
+ sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr);
+
+ if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids))
+ sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr);
+ }
+
+ kobject_put(intel_pstate_kobject);
+}
+
static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
{
int rc;
@@ -2509,7 +2527,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
}
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
- bool fast_switch)
+ bool strict, bool fast_switch)
{
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
@@ -2521,7 +2539,7 @@ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
* field in it, so opportunistically update the max too if needed.
*/
value &= ~HWP_MAX_PERF(~0L);
- value |= HWP_MAX_PERF(cpu->max_perf_ratio);
+ value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
if (value == prev)
return;
@@ -2544,20 +2562,20 @@ static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
pstate_funcs.get_val(cpu, target_pstate));
}
-static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
- bool fast_switch)
+static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
+ int target_pstate, bool fast_switch)
{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
int old_pstate = cpu->pstate.current_pstate;
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
- if (target_pstate != old_pstate) {
+ if (hwp_active) {
+ intel_cpufreq_adjust_hwp(cpu, target_pstate,
+ policy->strict_target, fast_switch);
+ cpu->pstate.current_pstate = target_pstate;
+ } else if (target_pstate != old_pstate) {
+ intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
cpu->pstate.current_pstate = target_pstate;
- if (hwp_active)
- intel_cpufreq_adjust_hwp(cpu, target_pstate,
- fast_switch);
- else
- intel_cpufreq_adjust_perf_ctl(cpu, target_pstate,
- fast_switch);
}
intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
@@ -2593,7 +2611,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
break;
}
- target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false);
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
freqs.new = target_pstate * cpu->pstate.scaling;
@@ -2612,7 +2630,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
- target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true);
+ target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
return target_pstate * cpu->pstate.scaling;
}
@@ -3014,6 +3032,7 @@ static int __init intel_pstate_init(void)
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
+ intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
if (!default_driver)
default_driver = &intel_pstate;
@@ -3063,8 +3082,10 @@ hwp_cpu_matched:
mutex_lock(&intel_pstate_driver_lock);
rc = intel_pstate_register_driver(default_driver);
mutex_unlock(&intel_pstate_driver_lock);
- if (rc)
+ if (rc) {
+ intel_pstate_sysfs_remove();
return rc;
+ }
if (hwp_active) {
const struct x86_cpu_id *id;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 123fb006810d..182a4dbca095 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -593,7 +593,6 @@ static void longhaul_setup_voltagescaling(void)
break;
default:
return;
- break;
}
if (min_vid_speed >= highest_speed)
return;
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index a9af15e994cc..e439b43c19eb 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -885,12 +885,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
int cpu;
- struct cpufreq_policy cpu_policy;
+ struct cpufreq_policy *cpu_policy;
rebooting = true;
for_each_online_cpu(cpu) {
- cpufreq_get_policy(&cpu_policy, cpu);
- powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
+ cpu_policy = cpufreq_cpu_get(cpu);
+ if (!cpu_policy)
+ continue;
+ powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
+ cpufreq_cpu_put(cpu_policy);
}
return NOTIFY_DONE;
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 3fb044b907a8..9ed5341dc515 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -19,18 +19,23 @@
#define LUT_L_VAL GENMASK(7, 0)
#define LUT_CORE_COUNT GENMASK(18, 16)
#define LUT_VOLT GENMASK(11, 0)
-#define LUT_ROW_SIZE 32
#define CLK_HW_DIV 2
#define LUT_TURBO_IND 1
-/* Register offsets */
-#define REG_ENABLE 0x0
-#define REG_FREQ_LUT 0x110
-#define REG_VOLT_LUT 0x114
-#define REG_PERF_STATE 0x920
+struct qcom_cpufreq_soc_data {
+ u32 reg_enable;
+ u32 reg_freq_lut;
+ u32 reg_volt_lut;
+ u32 reg_perf_state;
+ u8 lut_row_size;
+};
+
+struct qcom_cpufreq_data {
+ void __iomem *base;
+ const struct qcom_cpufreq_soc_data *soc_data;
+};
static unsigned long cpu_hw_rate, xo_rate;
-static struct platform_device *global_pdev;
static bool icc_scaling_enabled;
static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
@@ -77,22 +82,22 @@ static int qcom_cpufreq_update_opp(struct device *cpu_dev,
static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
- void __iomem *perf_state_reg = policy->driver_data;
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
unsigned long freq = policy->freq_table[index].frequency;
- writel_relaxed(index, perf_state_reg);
+ writel_relaxed(index, data->base + soc_data->reg_perf_state);
if (icc_scaling_enabled)
qcom_cpufreq_set_bw(policy, freq);
- arch_set_freq_scale(policy->related_cpus, freq,
- policy->cpuinfo.max_freq);
return 0;
}
static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
{
- void __iomem *perf_state_reg;
+ struct qcom_cpufreq_data *data;
+ const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
unsigned int index;
@@ -100,9 +105,10 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
if (!policy)
return 0;
- perf_state_reg = policy->driver_data;
+ data = policy->driver_data;
+ soc_data = data->soc_data;
- index = readl_relaxed(perf_state_reg);
+ index = readl_relaxed(data->base + soc_data->reg_perf_state);
index = min(index, LUT_MAX_ENTRIES - 1);
return policy->freq_table[index].frequency;
@@ -111,23 +117,18 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- void __iomem *perf_state_reg = policy->driver_data;
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
unsigned int index;
- unsigned long freq;
index = policy->cached_resolved_idx;
- writel_relaxed(index, perf_state_reg);
+ writel_relaxed(index, data->base + soc_data->reg_perf_state);
- freq = policy->freq_table[index].frequency;
- arch_set_freq_scale(policy->related_cpus, freq,
- policy->cpuinfo.max_freq);
-
- return freq;
+ return policy->freq_table[index].frequency;
}
static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
- struct cpufreq_policy *policy,
- void __iomem *base)
+ struct cpufreq_policy *policy)
{
u32 data, src, lval, i, core_count, prev_freq = 0, freq;
u32 volt;
@@ -135,6 +136,8 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
struct dev_pm_opp *opp;
unsigned long rate;
int ret;
+ struct qcom_cpufreq_data *drv_data = policy->driver_data;
+ const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
if (!table)
@@ -161,14 +164,14 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
}
for (i = 0; i < LUT_MAX_ENTRIES; i++) {
- data = readl_relaxed(base + REG_FREQ_LUT +
- i * LUT_ROW_SIZE);
+ data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
+ i * soc_data->lut_row_size);
src = FIELD_GET(LUT_SRC, data);
lval = FIELD_GET(LUT_L_VAL, data);
core_count = FIELD_GET(LUT_CORE_COUNT, data);
- data = readl_relaxed(base + REG_VOLT_LUT +
- i * LUT_ROW_SIZE);
+ data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
+ i * soc_data->lut_row_size);
volt = FIELD_GET(LUT_VOLT, data) * 1000;
if (src)
@@ -177,10 +180,15 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
freq = cpu_hw_rate / 1000;
if (freq != prev_freq && core_count != LUT_TURBO_IND) {
- table[i].frequency = freq;
- qcom_cpufreq_update_opp(cpu_dev, freq, volt);
- dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
+ if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
+ table[i].frequency = freq;
+ dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
freq, core_count);
+ } else {
+ dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
+ table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
} else if (core_count == LUT_TURBO_IND) {
table[i].frequency = CPUFREQ_ENTRY_INVALID;
}
@@ -197,9 +205,13 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
* as the boost frequency
*/
if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
- prev->frequency = prev_freq;
- prev->flags = CPUFREQ_BOOST_FREQ;
- qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt);
+ if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
+ prev->frequency = prev_freq;
+ prev->flags = CPUFREQ_BOOST_FREQ;
+ } else {
+ dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
+ freq);
+ }
}
break;
@@ -238,14 +250,38 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
}
}
+static const struct qcom_cpufreq_soc_data qcom_soc_data = {
+ .reg_enable = 0x0,
+ .reg_freq_lut = 0x110,
+ .reg_volt_lut = 0x114,
+ .reg_perf_state = 0x920,
+ .lut_row_size = 32,
+};
+
+static const struct qcom_cpufreq_soc_data epss_soc_data = {
+ .reg_enable = 0x0,
+ .reg_freq_lut = 0x100,
+ .reg_volt_lut = 0x200,
+ .reg_perf_state = 0x320,
+ .lut_row_size = 4,
+};
+
+static const struct of_device_id qcom_cpufreq_hw_match[] = {
+ { .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
+ { .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
+
static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
- struct device *dev = &global_pdev->dev;
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ struct device *dev = &pdev->dev;
struct of_phandle_args args;
struct device_node *cpu_np;
struct device *cpu_dev;
- struct resource *res;
void __iomem *base;
+ struct qcom_cpufreq_data *data;
int ret, index;
cpu_dev = get_cpu_device(policy->cpu);
@@ -267,16 +303,21 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
index = args.args[0];
- res = platform_get_resource(global_pdev, IORESOURCE_MEM, index);
- if (!res)
- return -ENODEV;
+ base = devm_platform_ioremap_resource(pdev, index);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- base = devm_ioremap(dev, res->start, resource_size(res));
- if (!base)
- return -ENOMEM;
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ data->soc_data = of_device_get_match_data(&pdev->dev);
+ data->base = base;
/* HW should be in enabled state to proceed */
- if (!(readl_relaxed(base + REG_ENABLE) & 0x1)) {
+ if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
ret = -ENODEV;
goto error;
@@ -289,9 +330,9 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
goto error;
}
- policy->driver_data = base + REG_PERF_STATE;
+ policy->driver_data = data;
- ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy, base);
+ ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
dev_err(dev, "Domain-%d failed to read LUT\n", index);
goto error;
@@ -315,12 +356,13 @@ error:
static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct device *cpu_dev = get_cpu_device(policy->cpu);
- void __iomem *base = policy->driver_data - REG_PERF_STATE;
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ struct platform_device *pdev = cpufreq_get_driver_data();
dev_pm_opp_remove_all_dynamic(cpu_dev);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
kfree(policy->freq_table);
- devm_iounmap(&global_pdev->dev, base);
+ devm_iounmap(&pdev->dev, data->base);
return 0;
}
@@ -365,7 +407,7 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
clk_put(clk);
- global_pdev = pdev;
+ cpufreq_qcom_hw_driver.driver_data = pdev;
/* Check for optional interconnect paths on CPU0 */
cpu_dev = get_cpu_device(0);
@@ -390,12 +432,6 @@ static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
}
-static const struct of_device_id qcom_cpufreq_hw_match[] = {
- { .compatible = "qcom,cpufreq-hw" },
- {}
-};
-MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
-
static struct platform_driver qcom_cpufreq_hw_driver = {
.probe = qcom_cpufreq_hw_driver_probe,
.remove = qcom_cpufreq_hw_driver_remove,
diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c
index 0c4f2ccd7e22..5dcfbf0bfb74 100644
--- a/drivers/cpufreq/s3c2410-cpufreq.c
+++ b/drivers/cpufreq/s3c2410-cpufreq.c
@@ -16,14 +16,14 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/soc/samsung/s3c-cpufreq-core.h>
+#include <linux/soc/samsung/s3c-pm.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <mach/regs-clock.h>
-
-#include <plat/cpu.h>
-#include <plat/cpu-freq-core.h>
+#define S3C2410_CLKDIVN_PDIVN (1<<0)
+#define S3C2410_CLKDIVN_HDIVN (1<<1)
/* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */
@@ -37,7 +37,7 @@ static void s3c2410_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
if (cfg->divs.p_divisor != cfg->divs.h_divisor)
clkdiv |= S3C2410_CLKDIVN_PDIVN;
- __raw_writel(clkdiv, S3C2410_CLKDIVN);
+ s3c24xx_write_clkdivn(clkdiv);
}
static int s3c2410_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c
index 53385a9ab957..5945945ead7c 100644
--- a/drivers/cpufreq/s3c2412-cpufreq.c
+++ b/drivers/cpufreq/s3c2412-cpufreq.c
@@ -19,15 +19,24 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/soc/samsung/s3c-cpufreq-core.h>
+#include <linux/soc/samsung/s3c-pm.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <mach/regs-clock.h>
-#include <mach/s3c2412.h>
-
-#include <plat/cpu.h>
-#include <plat/cpu-freq-core.h>
+#define S3C2412_CLKDIVN_PDIVN (1<<2)
+#define S3C2412_CLKDIVN_HDIVN_MASK (3<<0)
+#define S3C2412_CLKDIVN_ARMDIVN (1<<3)
+#define S3C2412_CLKDIVN_DVSEN (1<<4)
+#define S3C2412_CLKDIVN_HALFHCLK (1<<5)
+#define S3C2412_CLKDIVN_USB48DIV (1<<6)
+#define S3C2412_CLKDIVN_UARTDIV_MASK (15<<8)
+#define S3C2412_CLKDIVN_UARTDIV_SHIFT (8)
+#define S3C2412_CLKDIVN_I2SDIV_MASK (15<<12)
+#define S3C2412_CLKDIVN_I2SDIV_SHIFT (12)
+#define S3C2412_CLKDIVN_CAMDIV_MASK (15<<16)
+#define S3C2412_CLKDIVN_CAMDIV_SHIFT (16)
/* our clock resources. */
static struct clk *xtal;
@@ -117,7 +126,7 @@ static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
unsigned long clkdiv;
unsigned long olddiv;
- olddiv = clkdiv = __raw_readl(S3C2410_CLKDIVN);
+ olddiv = clkdiv = s3c24xx_read_clkdivn();
/* clear off current clock info */
@@ -134,32 +143,11 @@ static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
clkdiv |= S3C2412_CLKDIVN_PDIVN;
s3c_freq_dbg("%s: div %08lx => %08lx\n", __func__, olddiv, clkdiv);
- __raw_writel(clkdiv, S3C2410_CLKDIVN);
+ s3c24xx_write_clkdivn(clkdiv);
clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
}
-static void s3c2412_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
-{
- struct s3c_cpufreq_board *board = cfg->board;
- unsigned long refresh;
-
- s3c_freq_dbg("%s: refresh %u ns, hclk %lu\n", __func__,
- board->refresh, cfg->freq.hclk);
-
- /* Reduce both the refresh time (in ns) and the frequency (in MHz)
- * by 10 each to ensure that we do not overflow 32 bit numbers. This
- * should work for HCLK up to 133MHz and refresh period up to 30usec.
- */
-
- refresh = (board->refresh / 10);
- refresh *= (cfg->freq.hclk / 100);
- refresh /= (1 * 1000 * 1000); /* 10^6 */
-
- s3c_freq_dbg("%s: setting refresh 0x%08lx\n", __func__, refresh);
- __raw_writel(refresh, S3C2412_REFRESH);
-}
-
/* set the default cpu frequency information, based on an 200MHz part
* as we have no other way of detecting the speed rating in software.
*/
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
index 3f772ba8896e..148e8aedefa9 100644
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -20,14 +20,27 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/soc/samsung/s3c-cpufreq-core.h>
+#include <linux/soc/samsung/s3c-pm.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <mach/regs-clock.h>
+#define S3C2440_CLKDIVN_PDIVN (1<<0)
+#define S3C2440_CLKDIVN_HDIVN_MASK (3<<1)
+#define S3C2440_CLKDIVN_HDIVN_1 (0<<1)
+#define S3C2440_CLKDIVN_HDIVN_2 (1<<1)
+#define S3C2440_CLKDIVN_HDIVN_4_8 (2<<1)
+#define S3C2440_CLKDIVN_HDIVN_3_6 (3<<1)
+#define S3C2440_CLKDIVN_UCLK (1<<3)
-#include <plat/cpu.h>
-#include <plat/cpu-freq-core.h>
+#define S3C2440_CAMDIVN_CAMCLK_MASK (0xf<<0)
+#define S3C2440_CAMDIVN_CAMCLK_SEL (1<<4)
+#define S3C2440_CAMDIVN_HCLK3_HALF (1<<8)
+#define S3C2440_CAMDIVN_HCLK4_HALF (1<<9)
+#define S3C2440_CAMDIVN_DVSEN (1<<12)
+
+#define S3C2442_CAMDIVN_CAMCLK_DIV3 (1<<5)
static struct clk *xtal;
static struct clk *fclk;
@@ -143,8 +156,8 @@ static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
s3c_freq_dbg("%s: divisors: h=%d, p=%d\n", __func__,
cfg->divs.h_divisor, cfg->divs.p_divisor);
- clkdiv = __raw_readl(S3C2410_CLKDIVN);
- camdiv = __raw_readl(S3C2440_CAMDIVN);
+ clkdiv = s3c24xx_read_clkdivn();
+ camdiv = s3c2440_read_camdivn();
clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN);
camdiv &= ~CAMDIVN_HCLK_HALF;
@@ -184,11 +197,11 @@ static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
* then make a short delay and remove the hclk halving if necessary.
*/
- __raw_writel(camdiv | CAMDIVN_HCLK_HALF, S3C2440_CAMDIVN);
- __raw_writel(clkdiv, S3C2410_CLKDIVN);
+ s3c2440_write_camdivn(camdiv | CAMDIVN_HCLK_HALF);
+ s3c24xx_write_clkdivn(clkdiv);
ndelay(20);
- __raw_writel(camdiv, S3C2440_CAMDIVN);
+ s3c2440_write_camdivn(camdiv);
clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
}
diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
index 290e3539d03e..93971dfe7c75 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
@@ -18,7 +18,7 @@
#include <linux/seq_file.h>
#include <linux/err.h>
-#include <plat/cpu-freq-core.h>
+#include <linux/soc/samsung/s3c-cpufreq-core.h>
static struct dentry *dbgfs_root;
static struct dentry *dbgfs_file_io;
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index ed0e713b1b57..37efc0dc3f91 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -21,17 +21,13 @@
#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
+#include <linux/soc/samsung/s3c-cpufreq-core.h>
+#include <linux/soc/samsung/s3c-pm.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <plat/cpu.h>
-#include <plat/cpu-freq-core.h>
-
-#include <mach/regs-clock.h>
-
/* note, cpufreq support deals in kHz, no Hz */
-
static struct cpufreq_driver s3c24xx_driver;
static struct s3c_cpufreq_config cpu_cur;
static struct s3c_iotimings s3c24xx_iotiming;
@@ -68,7 +64,7 @@ static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg)
cfg->freq.pclk = pclk = clk_get_rate(clk_pclk);
cfg->freq.armclk = armclk = clk_get_rate(clk_arm);
- cfg->pll.driver_data = __raw_readl(S3C2410_MPLLCON);
+ cfg->pll.driver_data = s3c24xx_read_mpllcon();
cfg->pll.frequency = fclk;
cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
@@ -386,7 +382,7 @@ static unsigned int suspend_freq;
static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
{
suspend_pll.frequency = clk_get_rate(_clk_mpll);
- suspend_pll.driver_data = __raw_readl(S3C2410_MPLLCON);
+ suspend_pll.driver_data = s3c24xx_read_mpllcon();
suspend_freq = clk_get_rate(clk_arm);
return 0;
@@ -547,7 +543,7 @@ static void s3c_cpufreq_update_loctkime(void)
val |= calc_locktime(rate, cpu_cur.info->locktime_m);
pr_info("%s: new locktime is 0x%08x\n", __func__, val);
- __raw_writel(val, S3C2410_LOCKTIME);
+ s3c24xx_write_locktime(val);
}
static int s3c_cpufreq_build_freq(void)
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index e84281e2561d..bed496cf8d24 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -590,6 +590,7 @@ static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
static int s5pv210_cpufreq_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct device_node *np;
int id, result = 0;
@@ -602,28 +603,20 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
* cpufreq-dt driver.
*/
arm_regulator = regulator_get(NULL, "vddarm");
- if (IS_ERR(arm_regulator)) {
- if (PTR_ERR(arm_regulator) == -EPROBE_DEFER)
- pr_debug("vddarm regulator not ready, defer\n");
- else
- pr_err("failed to get regulator vddarm\n");
- return PTR_ERR(arm_regulator);
- }
+ if (IS_ERR(arm_regulator))
+ return dev_err_probe(dev, PTR_ERR(arm_regulator),
+ "failed to get regulator vddarm\n");
int_regulator = regulator_get(NULL, "vddint");
if (IS_ERR(int_regulator)) {
- if (PTR_ERR(int_regulator) == -EPROBE_DEFER)
- pr_debug("vddint regulator not ready, defer\n");
- else
- pr_err("failed to get regulator vddint\n");
- result = PTR_ERR(int_regulator);
+ result = dev_err_probe(dev, PTR_ERR(int_regulator),
+ "failed to get regulator vddint\n");
goto err_int_regulator;
}
np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
if (!np) {
- pr_err("%s: failed to find clock controller DT node\n",
- __func__);
+ dev_err(dev, "failed to find clock controller DT node\n");
result = -ENODEV;
goto err_clock;
}
@@ -631,7 +624,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
clk_base = of_iomap(np, 0);
of_node_put(np);
if (!clk_base) {
- pr_err("%s: failed to map clock registers\n", __func__);
+ dev_err(dev, "failed to map clock registers\n");
result = -EFAULT;
goto err_clock;
}
@@ -639,8 +632,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") {
id = of_alias_get_id(np, "dmc");
if (id < 0 || id >= ARRAY_SIZE(dmc_base)) {
- pr_err("%s: failed to get alias of dmc node '%pOFn'\n",
- __func__, np);
+ dev_err(dev, "failed to get alias of dmc node '%pOFn'\n", np);
of_node_put(np);
result = id;
goto err_clk_base;
@@ -648,8 +640,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
dmc_base[id] = of_iomap(np, 0);
if (!dmc_base[id]) {
- pr_err("%s: failed to map dmc%d registers\n",
- __func__, id);
+ dev_err(dev, "failed to map dmc%d registers\n", id);
of_node_put(np);
result = -EFAULT;
goto err_dmc;
@@ -658,7 +649,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) {
if (!dmc_base[id]) {
- pr_err("%s: failed to find dmc%d node\n", __func__, id);
+ dev_err(dev, "failed to find dmc%d node\n", id);
result = -ENODEV;
goto err_dmc;
}
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index fb42e3390377..e855e8612a67 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -29,7 +29,7 @@ static const struct scmi_handle *handle;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct scmi_perf_ops *perf_ops = handle->perf_ops;
+ const struct scmi_perf_ops *perf_ops = handle->perf_ops;
struct scmi_data *priv = policy->driver_data;
unsigned long rate;
int ret;
@@ -48,30 +48,22 @@ static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
static int
scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- int ret;
struct scmi_data *priv = policy->driver_data;
- struct scmi_perf_ops *perf_ops = handle->perf_ops;
+ const struct scmi_perf_ops *perf_ops = handle->perf_ops;
u64 freq = policy->freq_table[index].frequency;
- ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
- if (!ret)
- arch_set_freq_scale(policy->related_cpus, freq,
- policy->cpuinfo.max_freq);
- return ret;
+ return perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
}
static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct scmi_data *priv = policy->driver_data;
- struct scmi_perf_ops *perf_ops = handle->perf_ops;
+ const struct scmi_perf_ops *perf_ops = handle->perf_ops;
if (!perf_ops->freq_set(handle, priv->domain_id,
- target_freq * 1000, true)) {
- arch_set_freq_scale(policy->related_cpus, target_freq,
- policy->cpuinfo.max_freq);
+ target_freq * 1000, true))
return target_freq;
- }
return 0;
}
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index b0f5388b8854..43db05b949d9 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -47,9 +47,8 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
static int
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- unsigned long freq = policy->freq_table[index].frequency;
+ u64 rate = policy->freq_table[index].frequency * 1000;
struct scpi_data *priv = policy->driver_data;
- u64 rate = freq * 1000;
int ret;
ret = clk_set_rate(priv->clk, rate);
@@ -60,9 +59,6 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
if (clk_get_rate(priv->clk) != rate)
return -EIO;
- arch_set_freq_scale(policy->related_cpus, freq,
- policy->cpuinfo.max_freq);
-
return 0;
}
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index a13a2d1e444e..0b66df4ed513 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -240,7 +240,7 @@ unsigned int speedstep_get_frequency(enum speedstep_processor processor)
return pentium3_get_frequency(processor);
default:
return 0;
- };
+ }
return 0;
}
EXPORT_SYMBOL_GPL(speedstep_get_frequency);
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index a5ad96d29adc..4ac6fb23792a 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -141,7 +141,8 @@ static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = {
static const struct reg_field *sti_cpufreq_match(void)
{
if (of_machine_is_compatible("st,stih407") ||
- of_machine_is_compatible("st,stih410"))
+ of_machine_is_compatible("st,stih410") ||
+ of_machine_is_compatible("st,stih418"))
return sti_stih407_dvfs_regfields;
return NULL;
@@ -258,7 +259,8 @@ static int sti_cpufreq_init(void)
int ret;
if ((!of_machine_is_compatible("st,stih407")) &&
- (!of_machine_is_compatible("st,stih410")))
+ (!of_machine_is_compatible("st,stih410")) &&
+ (!of_machine_is_compatible("st,stih418")))
return -ENODEV;
ddata.cpu = get_cpu_device(0);
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 01e1f58ba422..4b4079f51559 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -14,6 +14,7 @@
#define EDVD_CORE_VOLT_FREQ(core) (0x20 + (core) * 0x4)
#define EDVD_CORE_VOLT_FREQ_F_SHIFT 0
+#define EDVD_CORE_VOLT_FREQ_F_MASK 0xffff
#define EDVD_CORE_VOLT_FREQ_V_SHIFT 16
struct tegra186_cpufreq_cluster_info {
@@ -91,10 +92,39 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
return 0;
}
+static unsigned int tegra186_cpufreq_get(unsigned int cpu)
+{
+ struct cpufreq_frequency_table *tbl;
+ struct cpufreq_policy *policy;
+ void __iomem *edvd_reg;
+ unsigned int i, freq = 0;
+ u32 ndiv;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return 0;
+
+ tbl = policy->freq_table;
+ edvd_reg = policy->driver_data;
+ ndiv = readl(edvd_reg) & EDVD_CORE_VOLT_FREQ_F_MASK;
+
+ for (i = 0; tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if ((tbl[i].driver_data & EDVD_CORE_VOLT_FREQ_F_MASK) == ndiv) {
+ freq = tbl[i].frequency;
+ break;
+ }
+ }
+
+ cpufreq_cpu_put(policy);
+
+ return freq;
+}
+
static struct cpufreq_driver tegra186_cpufreq_driver = {
.name = "tegra186",
.flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .get = tegra186_cpufreq_get,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra186_cpufreq_set_target,
.init = tegra186_cpufreq_init,
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 4e8b1dee7c9a..e89b905754d2 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -182,7 +182,6 @@ static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
{
u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
unsigned int freqs_new;
- int ret;
cur_cluster = cpu_to_cluster(cpu);
new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
@@ -197,15 +196,8 @@ static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
new_cluster = A15_CLUSTER;
}
- ret = ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
- freqs_new);
-
- if (!ret) {
- arch_set_freq_scale(policy->related_cpus, freqs_new,
- policy->cpuinfo.max_freq);
- }
-
- return ret;
+ return ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
+ freqs_new);
}
static inline u32 get_table_count(struct cpufreq_frequency_table *table)
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index addaa6e6718b..c32c600b3cf8 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -141,7 +141,7 @@ static int stop_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- power9_idle_type(stop_psscr_table[index].val,
+ arch300_idle_type(stop_psscr_table[index].val,
stop_psscr_table[index].mask);
return index;
}
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index b6e9649ab0da..4a031c62f92a 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -105,7 +105,7 @@ static void psci_pd_free_states(struct genpd_power_state *states,
kfree(states);
}
-static int psci_pd_init(struct device_node *np)
+static int psci_pd_init(struct device_node *np, bool use_osi)
{
struct generic_pm_domain *pd;
struct psci_pd_provider *pd_provider;
@@ -135,11 +135,16 @@ static int psci_pd_init(struct device_node *np)
pd->free_states = psci_pd_free_states;
pd->name = kbasename(pd->name);
- pd->power_off = psci_pd_power_off;
pd->states = states;
pd->state_count = state_count;
pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
+ /* Allow power off when OSI has been successfully enabled. */
+ if (use_osi)
+ pd->power_off = psci_pd_power_off;
+ else
+ pd->flags |= GENPD_FLAG_ALWAYS_ON;
+
/* Use governor for CPU PM domains if it has some states to manage. */
pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
@@ -190,7 +195,7 @@ static void psci_pd_remove(void)
}
}
-static int psci_pd_init_topology(struct device_node *np, bool add)
+static int psci_pd_init_topology(struct device_node *np)
{
struct device_node *node;
struct of_phandle_args child, parent;
@@ -203,9 +208,7 @@ static int psci_pd_init_topology(struct device_node *np, bool add)
child.np = node;
child.args_count = 0;
-
- ret = add ? of_genpd_add_subdomain(&parent, &child) :
- of_genpd_remove_subdomain(&parent, &child);
+ ret = of_genpd_add_subdomain(&parent, &child);
of_node_put(parent.np);
if (ret) {
of_node_put(node);
@@ -216,14 +219,20 @@ static int psci_pd_init_topology(struct device_node *np, bool add)
return 0;
}
-static int psci_pd_add_topology(struct device_node *np)
+static bool psci_pd_try_set_osi_mode(void)
{
- return psci_pd_init_topology(np, true);
-}
+ int ret;
-static void psci_pd_remove_topology(struct device_node *np)
-{
- psci_pd_init_topology(np, false);
+ if (!psci_has_osi_support())
+ return false;
+
+ ret = psci_set_osi_mode(true);
+ if (ret) {
+ pr_warn("failed to enable OSI mode: %d\n", ret);
+ return false;
+ }
+
+ return true;
}
static void psci_cpuidle_domain_sync_state(struct device *dev)
@@ -244,14 +253,14 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *node;
+ bool use_osi;
int ret = 0, pd_count = 0;
if (!np)
return -ENODEV;
- /* Currently limit the hierarchical topology to be used in OSI mode. */
- if (!psci_has_osi_support())
- return 0;
+ /* If OSI mode is supported, let's try to enable it. */
+ use_osi = psci_pd_try_set_osi_mode();
/*
* Parse child nodes for the "#power-domain-cells" property and
@@ -261,7 +270,7 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
if (!of_find_property(node, "#power-domain-cells", NULL))
continue;
- ret = psci_pd_init(node);
+ ret = psci_pd_init(node, use_osi);
if (ret)
goto put_node;
@@ -270,30 +279,24 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
/* Bail out if not using the hierarchical CPU topology. */
if (!pd_count)
- return 0;
+ goto no_pd;
/* Link genpd masters/subdomains to model the CPU topology. */
- ret = psci_pd_add_topology(np);
+ ret = psci_pd_init_topology(np);
if (ret)
goto remove_pd;
- /* Try to enable OSI mode. */
- ret = psci_set_osi_mode();
- if (ret) {
- pr_warn("failed to enable OSI mode: %d\n", ret);
- psci_pd_remove_topology(np);
- goto remove_pd;
- }
-
pr_info("Initialized CPU PM domain topology\n");
return 0;
put_node:
of_node_put(node);
remove_pd:
- if (pd_count)
- psci_pd_remove();
+ psci_pd_remove();
pr_err("failed to create CPU PM domains ret=%d\n", ret);
+no_pd:
+ if (use_osi)
+ psci_set_osi_mode(false);
return ret;
}
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index a12fb141875a..e8956706a291 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -172,7 +172,7 @@ static int tegra_cpuidle_coupled_barrier(struct cpuidle_device *dev)
static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
int index, unsigned int cpu)
{
- int ret;
+ int err;
/*
* CC6 state is the "CPU cluster power-off" state. In order to
@@ -183,9 +183,9 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
* CPU cores, GIC and L2 cache).
*/
if (index == TEGRA_CC6) {
- ret = tegra_cpuidle_coupled_barrier(dev);
- if (ret)
- return ret;
+ err = tegra_cpuidle_coupled_barrier(dev);
+ if (err)
+ return err;
}
local_fiq_disable();
@@ -194,15 +194,15 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
switch (index) {
case TEGRA_C7:
- ret = tegra_cpuidle_c7_enter();
+ err = tegra_cpuidle_c7_enter();
break;
case TEGRA_CC6:
- ret = tegra_cpuidle_cc6_enter(cpu);
+ err = tegra_cpuidle_cc6_enter(cpu);
break;
default:
- ret = -EINVAL;
+ err = -EINVAL;
break;
}
@@ -210,7 +210,7 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
tegra_pm_clear_cpu_in_lp2();
local_fiq_enable();
- return ret;
+ return err ?: index;
}
static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu)
@@ -236,21 +236,27 @@ static int tegra_cpuidle_enter(struct cpuidle_device *dev,
int index)
{
unsigned int cpu = cpu_logical_map(dev->cpu);
- int err;
+ int ret;
index = tegra_cpuidle_adjust_state_index(index, cpu);
if (dev->states_usage[index].disable)
return -1;
if (index == TEGRA_C1)
- err = arm_cpuidle_simple_enter(dev, drv, index);
+ ret = arm_cpuidle_simple_enter(dev, drv, index);
else
- err = tegra_cpuidle_state_enter(dev, index, cpu);
+ ret = tegra_cpuidle_state_enter(dev, index, cpu);
- if (err && (err != -EINTR || index != TEGRA_CC6))
- pr_err_once("failed to enter state %d err: %d\n", index, err);
+ if (ret < 0) {
+ if (ret != -EINTR || index != TEGRA_CC6)
+ pr_err_once("failed to enter state %d err: %d\n",
+ index, ret);
+ index = -1;
+ } else {
+ index = ret;
+ }
- return err ? -1 : index;
+ return index;
}
static int tegra114_enter_s2idle(struct cpuidle_device *dev,
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 29e84687f3c3..83af15f77f66 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -297,6 +297,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
}
} else {
dev->last_residency_ns = 0;
+ dev->states_usage[index].rejected++;
}
return entered_state;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 091d1caceb41..53ec9585ccd4 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -256,6 +256,7 @@ define_show_state_time_function(exit_latency)
define_show_state_time_function(target_residency)
define_show_state_function(power_usage)
define_show_state_ull_function(usage)
+define_show_state_ull_function(rejected)
define_show_state_str_function(name)
define_show_state_str_function(desc)
define_show_state_ull_function(above)
@@ -312,6 +313,7 @@ define_one_state_ro(latency, show_state_exit_latency);
define_one_state_ro(residency, show_state_target_residency);
define_one_state_ro(power, show_state_power_usage);
define_one_state_ro(usage, show_state_usage);
+define_one_state_ro(rejected, show_state_rejected);
define_one_state_ro(time, show_state_time);
define_one_state_rw(disable, show_state_disable, store_state_disable);
define_one_state_ro(above, show_state_above);
@@ -325,6 +327,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
&attr_residency.attr,
&attr_power.attr,
&attr_usage.attr,
+ &attr_rejected.attr,
&attr_time.attr,
&attr_disable.attr,
&attr_above.attr,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 52a9b7cf6576..37da0c070a88 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -71,10 +71,26 @@ config ZCRYPT
help
Select this option if you want to enable support for
s390 cryptographic adapters like:
- + PCI-X Cryptographic Coprocessor (PCIXCC)
- + Crypto Express 2,3,4 or 5 Coprocessor (CEXxC)
- + Crypto Express 2,3,4 or 5 Accelerator (CEXxA)
- + Crypto Express 4 or 5 EP11 Coprocessor (CEXxP)
+ + Crypto Express 2 up to 7 Coprocessor (CEXxC)
+ + Crypto Express 2 up to 7 Accelerator (CEXxA)
+ + Crypto Express 4 up to 7 EP11 Coprocessor (CEXxP)
+
+config ZCRYPT_DEBUG
+ bool "Enable debug features for s390 cryptographic adapters"
+ default n
+ depends on DEBUG_KERNEL
+ depends on ZCRYPT
+ help
+ Say 'Y' here to enable some additional debug features on the
+ s390 cryptographic adapters driver.
+
+ There will be some more sysfs attributes displayed for ap cards
+ and queues and some flags on crypto requests are interpreted as
+ debugging messages to force error injection.
+
+ Do not enable on production level kernel build.
+
+ If unsure, say N.
config ZCRYPT_MULTIDEVNODES
bool "Support for multiple zcrypt device nodes"
@@ -873,6 +889,7 @@ config CRYPTO_DEV_SA2UL
select CRYPTO_AES
select CRYPTO_AES_ARM64
select CRYPTO_ALGAPI
+ select CRYPTO_AUTHENC
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
index 12e7c6a85a02..0cdfe0e8cc66 100644
--- a/drivers/crypto/allwinner/Kconfig
+++ b/drivers/crypto/allwinner/Kconfig
@@ -59,6 +59,32 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
This will create /sys/kernel/debug/sun8i-ce/stats for displaying
the number of requests per flow and per algorithm.
+config CRYPTO_DEV_SUN8I_CE_HASH
+ bool "Enable support for hash on sun8i-ce"
+ depends on CRYPTO_DEV_SUN8I_CE
+ select MD5
+ select SHA1
+ select SHA256
+ select SHA512
+ help
+ Say y to enable support for hash algorithms.
+
+config CRYPTO_DEV_SUN8I_CE_PRNG
+ bool "Support for Allwinner Crypto Engine PRNG"
+ depends on CRYPTO_DEV_SUN8I_CE
+ select CRYPTO_RNG
+ help
+ Select this option if you want to provide kernel-side support for
+ the Pseudo-Random Number Generator found in the Crypto Engine.
+
+config CRYPTO_DEV_SUN8I_CE_TRNG
+ bool "Support for Allwinner Crypto Engine TRNG"
+ depends on CRYPTO_DEV_SUN8I_CE
+ select HW_RANDOM
+ help
+ Select this option if you want to provide kernel-side support for
+ the True Random Number Generator found in the Crypto Engine.
+
config CRYPTO_DEV_SUN8I_SS
tristate "Support for Allwinner Security System cryptographic offloader"
select CRYPTO_SKCIPHER
@@ -85,3 +111,20 @@ config CRYPTO_DEV_SUN8I_SS_DEBUG
Say y to enable sun8i-ss debug stats.
This will create /sys/kernel/debug/sun8i-ss/stats for displaying
the number of requests per flow and per algorithm.
+
+config CRYPTO_DEV_SUN8I_SS_PRNG
+ bool "Support for Allwinner Security System PRNG"
+ depends on CRYPTO_DEV_SUN8I_SS
+ select CRYPTO_RNG
+ help
+ Select this option if you want to provide kernel-side support for
+ the Pseudo-Random Number Generator found in the Security System.
+
+config CRYPTO_DEV_SUN8I_SS_HASH
+ bool "Enable support for hash on sun8i-ss"
+ depends on CRYPTO_DEV_SUN8I_SS
+ select MD5
+ select SHA1
+ select SHA256
+ help
+ Say y to enable support for hash algorithms.
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index dc35edd90034..1dff48558f53 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -9,6 +9,7 @@
* You could find the datasheet in Documentation/arm/sunxi.rst
*/
#include "sun4i-ss.h"
+#include <asm/unaligned.h>
#include <linux/scatterlist.h>
/* This is a totally arbitrary value */
@@ -196,7 +197,7 @@ static int sun4i_hash(struct ahash_request *areq)
struct sg_mapping_iter mi;
int in_r, err = 0;
size_t copied = 0;
- __le32 wb = 0;
+ u32 wb = 0;
dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
__func__, crypto_tfm_alg_name(areq->base.tfm),
@@ -408,7 +409,7 @@ hash_final:
nbw = op->len - 4 * nwait;
if (nbw) {
- wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4));
+ wb = le32_to_cpup((__le32 *)(op->buf + nwait * 4));
wb &= GENMASK((nbw * 8) - 1, 0);
op->byte_count += nbw;
@@ -417,7 +418,7 @@ hash_final:
/* write the remaining bytes of the nbw buffer */
wb |= ((1 << 7) << (nbw * 8));
- bf[j++] = le32_to_cpu(wb);
+ ((__le32 *)bf)[j++] = cpu_to_le32(wb);
/*
* number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
@@ -479,16 +480,16 @@ hash_final:
/* Get the hash from the device */
if (op->mode == SS_OP_SHA1) {
for (i = 0; i < 5; i++) {
+ v = readl(ss->base + SS_MD0 + i * 4);
if (ss->variant->sha1_in_be)
- v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
+ put_unaligned_le32(v, areq->result + i * 4);
else
- v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
- memcpy(areq->result + i * 4, &v, 4);
+ put_unaligned_be32(v, areq->result + i * 4);
}
} else {
for (i = 0; i < 4; i++) {
- v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
- memcpy(areq->result + i * 4, &v, 4);
+ v = readl(ss->base + SS_MD0 + i * 4);
+ put_unaligned_le32(v, areq->result + i * 4);
}
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/Makefile b/drivers/crypto/allwinner/sun8i-ce/Makefile
index 08b68c3c1ca9..0842eb2d9408 100644
--- a/drivers/crypto/allwinner/sun8i-ce/Makefile
+++ b/drivers/crypto/allwinner/sun8i-ce/Makefile
@@ -1,2 +1,5 @@
obj-$(CONFIG_CRYPTO_DEV_SUN8I_CE) += sun8i-ce.o
sun8i-ce-y += sun8i-ce-core.o sun8i-ce-cipher.o
+sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_HASH) += sun8i-ce-hash.o
+sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG) += sun8i-ce-prng.o
+sun8i-ce-$(CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG) += sun8i-ce-trng.o
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index b4d5fea27d20..33707a2e55ff 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -75,8 +75,9 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
return err;
}
-static int sun8i_ce_cipher(struct skcipher_request *areq)
+static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
{
+ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
@@ -87,8 +88,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
struct ce_task *cet;
struct scatterlist *sg;
unsigned int todo, len, offset, ivsize;
- dma_addr_t addr_iv = 0, addr_key = 0;
- void *backup_iv = NULL;
u32 common, sym;
int flow, i;
int nr_sgs = 0;
@@ -119,7 +118,7 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
common |= rctx->op_dir | CE_COMM_INT;
cet->t_common_ctl = cpu_to_le32(common);
/* CTS and recent CE (H6) need length in bytes, in word otherwise */
- if (ce->variant->has_t_dlen_in_bytes)
+ if (ce->variant->cipher_t_dlen_in_bytes)
cet->t_dlen = cpu_to_le32(areq->cryptlen);
else
cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
@@ -141,41 +140,41 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
cet->t_sym_ctl = cpu_to_le32(sym);
cet->t_asym_ctl = 0;
- addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
- cet->t_key = cpu_to_le32(addr_key);
- if (dma_mapping_error(ce->dev, addr_key)) {
+ rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ce->dev, rctx->addr_key)) {
dev_err(ce->dev, "Cannot DMA MAP KEY\n");
err = -EFAULT;
goto theend;
}
+ cet->t_key = cpu_to_le32(rctx->addr_key);
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
- chan->ivlen = ivsize;
- chan->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
- if (!chan->bounce_iv) {
+ rctx->ivlen = ivsize;
+ rctx->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
+ if (!rctx->bounce_iv) {
err = -ENOMEM;
goto theend_key;
}
if (rctx->op_dir & CE_DECRYPTION) {
- backup_iv = kzalloc(ivsize, GFP_KERNEL);
- if (!backup_iv) {
+ rctx->backup_iv = kzalloc(ivsize, GFP_KERNEL);
+ if (!rctx->backup_iv) {
err = -ENOMEM;
goto theend_key;
}
offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(backup_iv, areq->src, offset,
- ivsize, 0);
+ scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
+ offset, ivsize, 0);
}
- memcpy(chan->bounce_iv, areq->iv, ivsize);
- addr_iv = dma_map_single(ce->dev, chan->bounce_iv, chan->ivlen,
- DMA_TO_DEVICE);
- cet->t_iv = cpu_to_le32(addr_iv);
- if (dma_mapping_error(ce->dev, addr_iv)) {
+ memcpy(rctx->bounce_iv, areq->iv, ivsize);
+ rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
err = -ENOMEM;
goto theend_iv;
}
+ cet->t_iv = cpu_to_le32(rctx->addr_iv);
}
if (areq->src == areq->dst) {
@@ -235,7 +234,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
}
chan->timeout = areq->cryptlen;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+ rctx->nr_sgs = nr_sgs;
+ rctx->nr_sgd = nr_sgd;
+ return 0;
theend_sgs:
if (areq->src == areq->dst) {
@@ -248,34 +249,83 @@ theend_sgs:
theend_iv:
if (areq->iv && ivsize > 0) {
- if (addr_iv)
- dma_unmap_single(ce->dev, addr_iv, chan->ivlen,
- DMA_TO_DEVICE);
+ if (rctx->addr_iv)
+ dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, backup_iv, ivsize);
- kfree_sensitive(backup_iv);
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ kfree_sensitive(rctx->backup_iv);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- kfree(chan->bounce_iv);
+ kfree(rctx->bounce_iv);
}
theend_key:
- dma_unmap_single(ce->dev, addr_key, op->keylen, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
theend:
return err;
}
-static int sun8i_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
{
- int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
+ int flow, err;
- err = sun8i_ce_cipher(breq);
+ flow = rctx->flow;
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
crypto_finalize_skcipher_request(engine, breq, err);
+ return 0;
+}
+
+static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
+{
+ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ unsigned int ivsize, offset;
+ int nr_sgs = rctx->nr_sgs;
+ int nr_sgd = rctx->nr_sgd;
+ int flow;
+
+ flow = rctx->flow;
+ chan = &ce->chanlist[flow];
+ cet = chan->tl;
+ ivsize = crypto_skcipher_ivsize(tfm);
+
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ if (nr_sgs > 0)
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+ if (areq->iv && ivsize > 0) {
+ if (cet->t_iv)
+ dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & CE_DECRYPTION) {
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ kfree_sensitive(rctx->backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
+ }
+ kfree(rctx->bounce_iv);
+ }
+
+ dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
return 0;
}
@@ -347,9 +397,9 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(&sktfm->base),
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
- op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
+ op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
+ op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
+ op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
@@ -366,10 +416,7 @@ void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
{
struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
crypto_free_skcipher(op->fallback_tfm);
pm_runtime_put_sync_suspend(op->ce->dev);
}
@@ -391,10 +438,7 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
return -EINVAL;
}
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
@@ -416,10 +460,7 @@ int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (err)
return err;
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 138759dc8190..158422ff5695 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "sun8i-ce.h"
@@ -35,73 +36,108 @@
static const struct ce_variant ce_h3_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ALG_SHA384, CE_ALG_SHA512
+ },
.op_mode = { CE_OP_ECB, CE_OP_CBC
},
.ce_clks = {
{ "bus", 0, 200000000 },
{ "mod", 50000000, 0 },
- }
+ },
+ .esr = ESR_H3,
+ .prng = CE_ALG_PRNG,
+ .trng = CE_ID_NOTSUPP,
};
static const struct ce_variant ce_h5_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
.op_mode = { CE_OP_ECB, CE_OP_CBC
},
.ce_clks = {
{ "bus", 0, 200000000 },
{ "mod", 300000000, 0 },
- }
+ },
+ .esr = ESR_H5,
+ .prng = CE_ALG_PRNG,
+ .trng = CE_ID_NOTSUPP,
};
static const struct ce_variant ce_h6_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ALG_SHA384, CE_ALG_SHA512
+ },
.op_mode = { CE_OP_ECB, CE_OP_CBC
},
- .has_t_dlen_in_bytes = true,
+ .cipher_t_dlen_in_bytes = true,
+ .hash_t_dlen_in_bits = true,
+ .prng_t_dlen_in_bytes = true,
+ .trng_t_dlen_in_bytes = true,
.ce_clks = {
{ "bus", 0, 200000000 },
{ "mod", 300000000, 0 },
{ "ram", 0, 400000000 },
- }
+ },
+ .esr = ESR_H6,
+ .prng = CE_ALG_PRNG_V2,
+ .trng = CE_ALG_TRNG_V2,
};
static const struct ce_variant ce_a64_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
.op_mode = { CE_OP_ECB, CE_OP_CBC
},
.ce_clks = {
{ "bus", 0, 200000000 },
{ "mod", 300000000, 0 },
- }
+ },
+ .esr = ESR_A64,
+ .prng = CE_ALG_PRNG,
+ .trng = CE_ID_NOTSUPP,
};
static const struct ce_variant ce_r40_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
+ .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
+ CE_ID_NOTSUPP, CE_ID_NOTSUPP
+ },
.op_mode = { CE_OP_ECB, CE_OP_CBC
},
.ce_clks = {
{ "bus", 0, 200000000 },
{ "mod", 300000000, 0 },
- }
+ },
+ .esr = ESR_R40,
+ .prng = CE_ALG_PRNG,
+ .trng = CE_ID_NOTSUPP,
};
/*
* sun8i_ce_get_engine_number() get the next channel slot
* This is a simple round-robin way of getting the next channel
+ * The flow 3 is reserve for xRNG operations
*/
int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce)
{
- return atomic_inc_return(&ce->flow) % MAXFLOW;
+ return atomic_inc_return(&ce->flow) % (MAXFLOW - 1);
}
int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
{
u32 v;
int err = 0;
+ struct ce_task *cet = ce->chanlist[flow].tl;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
ce->chanlist[flow].stat_req++;
@@ -120,7 +156,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
/* Be sure all data is written before enabling the task */
wmb();
- v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8;
+ /* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored
+ * on older SoCs, we have no reason to complicate things.
+ */
+ v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8);
writel(v, ce->base + CE_TLR);
mutex_unlock(&ce->mlock);
@@ -128,19 +167,56 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
msecs_to_jiffies(ce->chanlist[flow].timeout));
if (ce->chanlist[flow].status == 0) {
- dev_err(ce->dev, "DMA timeout for %s\n", name);
+ dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name,
+ ce->chanlist[flow].timeout, flow);
err = -EFAULT;
}
/* No need to lock for this read, the channel is locked so
* nothing could modify the error value for this channel
*/
v = readl(ce->base + CE_ESR);
- if (v) {
+ switch (ce->variant->esr) {
+ case ESR_H3:
+ /* Sadly, the error bit is not per flow */
+ if (v) {
+ dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ err = -EFAULT;
+ print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
+ cet, sizeof(struct ce_task), false);
+ }
+ if (v & CE_ERR_ALGO_NOTSUP)
+ dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
+ if (v & CE_ERR_DATALEN)
+ dev_err(ce->dev, "CE ERROR: data length error\n");
+ if (v & CE_ERR_KEYSRAM)
+ dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
+ break;
+ case ESR_A64:
+ case ESR_H5:
+ case ESR_R40:
v >>= (flow * 4);
+ v &= 0xF;
+ if (v) {
+ dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ err = -EFAULT;
+ print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
+ cet, sizeof(struct ce_task), false);
+ }
+ if (v & CE_ERR_ALGO_NOTSUP)
+ dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
+ if (v & CE_ERR_DATALEN)
+ dev_err(ce->dev, "CE ERROR: data length error\n");
+ if (v & CE_ERR_KEYSRAM)
+ dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
+ break;
+ case ESR_H6:
+ v >>= (flow * 8);
v &= 0xFF;
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
err = -EFAULT;
+ print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
+ cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -150,7 +226,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
dev_err(ce->dev, "CE ERROR: keysram access error for AES\n");
if (v & CE_ERR_ADDR_INVALID)
dev_err(ce->dev, "CE ERROR: address invalid\n");
- }
+ if (v & CE_ERR_KEYLADDER)
+ dev_err(ce->dev, "CE ERROR: key ladder configuration error\n");
+ break;
+ }
return err;
}
@@ -280,13 +359,214 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.decrypt = sun8i_ce_skdecrypt,
}
},
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_HASH
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_MD5,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_SHA1,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_SHA224,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_SHA256,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_SHA384,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ce_algo_id = CE_ID_HASH_SHA512,
+ .alg.hash = {
+ .init = sun8i_ce_hash_init,
+ .update = sun8i_ce_hash_update,
+ .final = sun8i_ce_hash_final,
+ .finup = sun8i_ce_hash_finup,
+ .digest = sun8i_ce_hash_digest,
+ .export = sun8i_ce_hash_export,
+ .import = sun8i_ce_hash_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-sun8i-ce",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_hash_crainit,
+ .cra_exit = sun8i_ce_hash_craexit,
+ }
+ }
+ }
+},
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG
+{
+ .type = CRYPTO_ALG_TYPE_RNG,
+ .alg.rng = {
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "sun8i-ce-prng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct sun8i_ce_rng_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ce_prng_init,
+ .cra_exit = sun8i_ce_prng_exit,
+ },
+ .generate = sun8i_ce_prng_generate,
+ .seed = sun8i_ce_prng_seed,
+ .seedsize = PRNG_SEED_SIZE,
+ }
+},
+#endif
};
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
-static int sun8i_ce_dbgfs_read(struct seq_file *seq, void *v)
+static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
{
struct sun8i_ce_dev *ce = seq->private;
- int i;
+ unsigned int i;
for (i = 0; i < MAXFLOW; i++)
seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req);
@@ -301,23 +581,28 @@ static int sun8i_ce_dbgfs_read(struct seq_file *seq, void *v)
ce_algs[i].alg.skcipher.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ ce_algs[i].alg.hash.halg.base.cra_driver_name,
+ ce_algs[i].alg.hash.halg.base.cra_name,
+ ce_algs[i].stat_req, ce_algs[i].stat_fb);
+ break;
+ case CRYPTO_ALG_TYPE_RNG:
+ seq_printf(seq, "%s %s %lu %lu\n",
+ ce_algs[i].alg.rng.base.cra_driver_name,
+ ce_algs[i].alg.rng.base.cra_name,
+ ce_algs[i].stat_req, ce_algs[i].stat_bytes);
+ break;
}
}
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
+ seq_printf(seq, "HWRNG %lu %lu\n",
+ ce->hwrng_stat_req, ce->hwrng_stat_bytes);
+#endif
return 0;
}
-static int sun8i_ce_dbgfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sun8i_ce_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations sun8i_ce_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sun8i_ce_dbgfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sun8i_ce_debugfs);
#endif
static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i)
@@ -482,7 +767,8 @@ static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce)
static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
{
- int ce_method, err, id, i;
+ int ce_method, err, id;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
ce_algs[i].ce = ce;
@@ -515,6 +801,43 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
return err;
}
break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ id = ce_algs[i].ce_algo_id;
+ ce_method = ce->variant->alg_hash[id];
+ if (ce_method == CE_ID_NOTSUPP) {
+ dev_info(ce->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ce_algs[i].alg.hash.halg.base.cra_name);
+ ce_algs[i].ce = NULL;
+ break;
+ }
+ dev_info(ce->dev, "Register %s\n",
+ ce_algs[i].alg.hash.halg.base.cra_name);
+ err = crypto_register_ahash(&ce_algs[i].alg.hash);
+ if (err) {
+ dev_err(ce->dev, "ERROR: Fail to register %s\n",
+ ce_algs[i].alg.hash.halg.base.cra_name);
+ ce_algs[i].ce = NULL;
+ return err;
+ }
+ break;
+ case CRYPTO_ALG_TYPE_RNG:
+ if (ce->variant->prng == CE_ID_NOTSUPP) {
+ dev_info(ce->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ce_algs[i].alg.rng.base.cra_name);
+ ce_algs[i].ce = NULL;
+ break;
+ }
+ dev_info(ce->dev, "Register %s\n",
+ ce_algs[i].alg.rng.base.cra_name);
+ err = crypto_register_rng(&ce_algs[i].alg.rng);
+ if (err) {
+ dev_err(ce->dev, "Fail to register %s\n",
+ ce_algs[i].alg.rng.base.cra_name);
+ ce_algs[i].ce = NULL;
+ }
+ break;
default:
ce_algs[i].ce = NULL;
dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
@@ -525,7 +848,7 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
{
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
if (!ce_algs[i].ce)
@@ -536,6 +859,16 @@ static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
ce_algs[i].alg.skcipher.base.cra_name);
crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ dev_info(ce->dev, "Unregister %d %s\n", i,
+ ce_algs[i].alg.hash.halg.base.cra_name);
+ crypto_unregister_ahash(&ce_algs[i].alg.hash);
+ break;
+ case CRYPTO_ALG_TYPE_RNG:
+ dev_info(ce->dev, "Unregister %d %s\n", i,
+ ce_algs[i].alg.rng.base.cra_name);
+ crypto_unregister_rng(&ce_algs[i].alg.rng);
+ break;
}
}
}
@@ -573,14 +906,12 @@ static int sun8i_ce_probe(struct platform_device *pdev)
return irq;
ce->reset = devm_reset_control_get(&pdev->dev, NULL);
- if (IS_ERR(ce->reset)) {
- if (PTR_ERR(ce->reset) == -EPROBE_DEFER)
- return PTR_ERR(ce->reset);
- dev_err(&pdev->dev, "No reset control found\n");
- return PTR_ERR(ce->reset);
- }
+ if (IS_ERR(ce->reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset),
+ "No reset control found\n");
mutex_init(&ce->mlock);
+ mutex_init(&ce->rnglock);
err = sun8i_ce_allocate_chanlist(ce);
if (err)
@@ -605,6 +936,10 @@ static int sun8i_ce_probe(struct platform_device *pdev)
if (err < 0)
goto error_alg;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
+ sun8i_ce_hwrng_register(ce);
+#endif
+
v = readl(ce->base + CE_CTR);
v >>= CE_DIE_ID_SHIFT;
v &= CE_DIE_ID_MASK;
@@ -634,6 +969,10 @@ static int sun8i_ce_remove(struct platform_device *pdev)
{
struct sun8i_ce_dev *ce = platform_get_drvdata(pdev);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
+ sun8i_ce_hwrng_unregister(ce);
+#endif
+
sun8i_ce_unregister_algs(ce);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
new file mode 100644
index 000000000000..a94bf28f858a
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-hash.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi.rst
+ */
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include "sun8i-ce.h"
+
+int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct sun8i_ce_alg_template *algt;
+ int err;
+
+ memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx));
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ op->ce = algt->ce;
+
+ op->enginectx.op.do_one_request = sun8i_ce_hash_run;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ /* FALLBACK */
+ op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
+ algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct sun8i_ce_hash_reqctx) +
+ crypto_ahash_reqsize(op->fallback_tfm));
+
+ dev_info(op->ce->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(tfm),
+ crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
+ err = pm_runtime_get_sync(op->ce->dev);
+ if (err < 0)
+ goto error_pm;
+ return 0;
+error_pm:
+ pm_runtime_put_noidle(op->ce->dev);
+ crypto_free_ahash(op->fallback_tfm);
+ return err;
+}
+
+void sun8i_ce_hash_craexit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(tfmctx->fallback_tfm);
+ pm_runtime_put_sync_suspend(tfmctx->ce->dev);
+}
+
+int sun8i_ce_hash_init(struct ahash_request *areq)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+int sun8i_ce_hash_final(struct ahash_request *areq)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = areq->result;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+int sun8i_ce_hash_update(struct ahash_request *areq)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+int sun8i_ce_hash_finup(struct ahash_request *areq)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+ rctx->fallback_req.result = areq->result;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+ rctx->fallback_req.result = areq->result;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_digest(&rctx->fallback_req);
+}
+
+static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
+{
+ struct scatterlist *sg;
+
+ if (areq->nbytes == 0)
+ return true;
+ /* we need to reserve one SG for padding one */
+ if (sg_nents(areq->src) > MAX_SG - 1)
+ return true;
+ sg = areq->src;
+ while (sg) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return true;
+ sg = sg_next(sg);
+ }
+ return false;
+}
+
+int sun8i_ce_hash_digest(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_alg_template *algt;
+ struct sun8i_ce_dev *ce;
+ struct crypto_engine *engine;
+ struct scatterlist *sg;
+ int nr_sgs, e, i;
+
+ if (sun8i_ce_hash_need_fallback(areq))
+ return sun8i_ce_hash_digest_fb(areq);
+
+ nr_sgs = sg_nents(areq->src);
+ if (nr_sgs > MAX_SG - 1)
+ return sun8i_ce_hash_digest_fb(areq);
+
+ for_each_sg(areq->src, sg, nr_sgs, i) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return sun8i_ce_hash_digest_fb(areq);
+ }
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ ce = algt->ce;
+
+ e = sun8i_ce_get_engine_number(ce);
+ rctx->flow = e;
+ engine = ce->chanlist[e].engine;
+
+ return crypto_transfer_hash_request_to_engine(engine, areq);
+}
+
+int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+{
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_alg_template *algt;
+ struct sun8i_ce_dev *ce;
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ struct scatterlist *sg;
+ int nr_sgs, flow, err;
+ unsigned int len;
+ u32 common;
+ u64 byte_count;
+ __le32 *bf;
+ void *buf;
+ int j, i, todo;
+ int nbw = 0;
+ u64 fill, min_fill;
+ __be64 *bebits;
+ __le64 *lebits;
+ void *result;
+ u64 bs;
+ int digestsize;
+ dma_addr_t addr_res, addr_pad;
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ ce = algt->ce;
+
+ bs = algt->alg.hash.halg.base.cra_blocksize;
+ digestsize = algt->alg.hash.halg.digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ digestsize = SHA256_DIGEST_SIZE;
+ if (digestsize == SHA384_DIGEST_SIZE)
+ digestsize = SHA512_DIGEST_SIZE;
+
+ /* the padding could be up to two block. */
+ buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA);
+ if (!buf)
+ return -ENOMEM;
+ bf = (__le32 *)buf;
+
+ result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
+ if (!result)
+ return -ENOMEM;
+
+ flow = rctx->flow;
+ chan = &ce->chanlist[flow];
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt->stat_req++;
+#endif
+ dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
+
+ cet = chan->tl;
+ memset(cet, 0, sizeof(struct ce_task));
+
+ cet->t_id = cpu_to_le32(flow);
+ common = ce->variant->alg_hash[algt->ce_algo_id];
+ common |= CE_COMM_INT;
+ cet->t_common_ctl = cpu_to_le32(common);
+
+ cet->t_sym_ctl = 0;
+ cet->t_asym_ctl = 0;
+
+ nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
+ dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+
+ len = areq->nbytes;
+ for_each_sg(areq->src, sg, nr_sgs, i) {
+ cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
+ todo = min(len, sg_dma_len(sg));
+ cet->t_src[i].len = cpu_to_le32(todo / 4);
+ len -= todo;
+ }
+ if (len > 0) {
+ dev_err(ce->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend;
+ }
+ addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
+ cet->t_dst[0].addr = cpu_to_le32(addr_res);
+ cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
+ if (dma_mapping_error(ce->dev, addr_res)) {
+ dev_err(ce->dev, "DMA map dest\n");
+ err = -EINVAL;
+ goto theend;
+ }
+
+ byte_count = areq->nbytes;
+ j = 0;
+ bf[j++] = cpu_to_le32(0x80);
+
+ if (bs == 64) {
+ fill = 64 - (byte_count % 64);
+ min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
+ } else {
+ fill = 128 - (byte_count % 128);
+ min_fill = 4 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
+ }
+
+ if (fill < min_fill)
+ fill += bs;
+
+ j += (fill - min_fill) / sizeof(u32);
+
+ switch (algt->ce_algo_id) {
+ case CE_ID_HASH_MD5:
+ lebits = (__le64 *)&bf[j];
+ *lebits = cpu_to_le64(byte_count << 3);
+ j += 2;
+ break;
+ case CE_ID_HASH_SHA1:
+ case CE_ID_HASH_SHA224:
+ case CE_ID_HASH_SHA256:
+ bebits = (__be64 *)&bf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ break;
+ case CE_ID_HASH_SHA384:
+ case CE_ID_HASH_SHA512:
+ bebits = (__be64 *)&bf[j];
+ *bebits = cpu_to_be64(byte_count >> 61);
+ j += 2;
+ bebits = (__be64 *)&bf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ break;
+ }
+
+ addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
+ cet->t_src[i].addr = cpu_to_le32(addr_pad);
+ cet->t_src[i].len = cpu_to_le32(j);
+ if (dma_mapping_error(ce->dev, addr_pad)) {
+ dev_err(ce->dev, "DMA error on padding SG\n");
+ err = -EINVAL;
+ goto theend;
+ }
+
+ if (ce->variant->hash_t_dlen_in_bits)
+ cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8);
+ else
+ cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
+
+ chan->timeout = areq->nbytes;
+
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+
+ dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+
+ kfree(buf);
+
+ memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+ kfree(result);
+theend:
+ crypto_finalize_hash_request(engine, breq, err);
+ return 0;
+}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
new file mode 100644
index 000000000000..cfde9ee4356b
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-prng.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file handle the PRNG
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ */
+#include "sun8i-ce.h"
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <crypto/internal/rng.h>
+
+int sun8i_ce_prng_init(struct crypto_tfm *tfm)
+{
+ struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memset(ctx, 0, sizeof(struct sun8i_ce_rng_tfm_ctx));
+ return 0;
+}
+
+void sun8i_ce_prng_exit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memzero_explicit(ctx->seed, ctx->slen);
+ kfree(ctx->seed);
+ ctx->seed = NULL;
+ ctx->slen = 0;
+}
+
+int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
+
+ if (ctx->seed && ctx->slen != slen) {
+ memzero_explicit(ctx->seed, ctx->slen);
+ kfree(ctx->seed);
+ ctx->slen = 0;
+ ctx->seed = NULL;
+ }
+ if (!ctx->seed)
+ ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA);
+ if (!ctx->seed)
+ return -ENOMEM;
+
+ memcpy(ctx->seed, seed, slen);
+ ctx->slen = slen;
+
+ return 0;
+}
+
+int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen)
+{
+ struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+ struct sun8i_ce_alg_template *algt;
+ struct sun8i_ce_dev *ce;
+ dma_addr_t dma_iv, dma_dst;
+ int err = 0;
+ int flow = 3;
+ unsigned int todo;
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ u32 common, sym;
+ void *d;
+
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.rng);
+ ce = algt->ce;
+
+ if (ctx->slen == 0) {
+ dev_err(ce->dev, "not seeded\n");
+ return -EINVAL;
+ }
+
+ /* we want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE */
+ todo = dlen + ctx->slen + PRNG_DATA_SIZE * 2;
+ todo -= todo % PRNG_DATA_SIZE;
+
+ d = kzalloc(todo, GFP_KERNEL | GFP_DMA);
+ if (!d) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ dev_dbg(ce->dev, "%s PRNG slen=%u dlen=%u todo=%u multi=%u\n", __func__,
+ slen, dlen, todo, todo / PRNG_DATA_SIZE);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt->stat_req++;
+ algt->stat_bytes += todo;
+#endif
+
+ dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ce->dev, dma_iv)) {
+ dev_err(ce->dev, "Cannot DMA MAP IV\n");
+ goto err_iv;
+ }
+
+ dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ce->dev, dma_dst)) {
+ dev_err(ce->dev, "Cannot DMA MAP DST\n");
+ err = -EFAULT;
+ goto err_dst;
+ }
+
+ err = pm_runtime_get_sync(ce->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(ce->dev);
+ goto err_pm;
+ }
+
+ mutex_lock(&ce->rnglock);
+ chan = &ce->chanlist[flow];
+
+ cet = &chan->tl[0];
+ memset(cet, 0, sizeof(struct ce_task));
+
+ cet->t_id = cpu_to_le32(flow);
+ common = ce->variant->prng | CE_COMM_INT;
+ cet->t_common_ctl = cpu_to_le32(common);
+
+ /* recent CE (H6) need length in bytes, in word otherwise */
+ if (ce->variant->prng_t_dlen_in_bytes)
+ cet->t_dlen = cpu_to_le32(todo);
+ else
+ cet->t_dlen = cpu_to_le32(todo / 4);
+
+ sym = PRNG_LD;
+ cet->t_sym_ctl = cpu_to_le32(sym);
+ cet->t_asym_ctl = 0;
+
+ cet->t_key = cpu_to_le32(dma_iv);
+ cet->t_iv = cpu_to_le32(dma_iv);
+
+ cet->t_dst[0].addr = cpu_to_le32(dma_dst);
+ cet->t_dst[0].len = cpu_to_le32(todo / 4);
+ ce->chanlist[flow].timeout = 2000;
+
+ err = sun8i_ce_run_task(ce, 3, "PRNG");
+ mutex_unlock(&ce->rnglock);
+
+ pm_runtime_put(ce->dev);
+
+err_pm:
+ dma_unmap_single(ce->dev, dma_dst, todo, DMA_FROM_DEVICE);
+err_dst:
+ dma_unmap_single(ce->dev, dma_iv, ctx->slen, DMA_TO_DEVICE);
+
+ if (!err) {
+ memcpy(dst, d, dlen);
+ memcpy(ctx->seed, d + dlen, ctx->slen);
+ }
+ memzero_explicit(d, todo);
+err_iv:
+ kfree(d);
+err_mem:
+ return err;
+}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
new file mode 100644
index 000000000000..5b7af4498bd5
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ce-trng.c - hardware cryptographic offloader for
+ * Allwinner H3/A64/H5/H2+/H6/R40 SoC
+ *
+ * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file handle the TRNG
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ */
+#include "sun8i-ce.h"
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/hw_random.h>
+/*
+ * Note that according to the algorithm ID, 2 versions of the TRNG exists,
+ * The first present in H3/H5/R40/A64 and the second present in H6.
+ * This file adds support for both, but only the second is working
+ * reliabily according to rngtest.
+ **/
+
+static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct sun8i_ce_dev *ce;
+ dma_addr_t dma_dst;
+ int err = 0;
+ int flow = 3;
+ unsigned int todo;
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ u32 common;
+ void *d;
+
+ ce = container_of(rng, struct sun8i_ce_dev, trng);
+
+ /* round the data length to a multiple of 32*/
+ todo = max + 32;
+ todo -= todo % 32;
+
+ d = kzalloc(todo, GFP_KERNEL | GFP_DMA);
+ if (!d)
+ return -ENOMEM;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ ce->hwrng_stat_req++;
+ ce->hwrng_stat_bytes += todo;
+#endif
+
+ dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ce->dev, dma_dst)) {
+ dev_err(ce->dev, "Cannot DMA MAP DST\n");
+ err = -EFAULT;
+ goto err_dst;
+ }
+
+ err = pm_runtime_get_sync(ce->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(ce->dev);
+ goto err_pm;
+ }
+
+ mutex_lock(&ce->rnglock);
+ chan = &ce->chanlist[flow];
+
+ cet = &chan->tl[0];
+ memset(cet, 0, sizeof(struct ce_task));
+
+ cet->t_id = cpu_to_le32(flow);
+ common = ce->variant->trng | CE_COMM_INT;
+ cet->t_common_ctl = cpu_to_le32(common);
+
+ /* recent CE (H6) need length in bytes, in word otherwise */
+ if (ce->variant->trng_t_dlen_in_bytes)
+ cet->t_dlen = cpu_to_le32(todo);
+ else
+ cet->t_dlen = cpu_to_le32(todo / 4);
+
+ cet->t_sym_ctl = 0;
+ cet->t_asym_ctl = 0;
+
+ cet->t_dst[0].addr = cpu_to_le32(dma_dst);
+ cet->t_dst[0].len = cpu_to_le32(todo / 4);
+ ce->chanlist[flow].timeout = todo;
+
+ err = sun8i_ce_run_task(ce, 3, "TRNG");
+ mutex_unlock(&ce->rnglock);
+
+ pm_runtime_put(ce->dev);
+
+err_pm:
+ dma_unmap_single(ce->dev, dma_dst, todo, DMA_FROM_DEVICE);
+
+ if (!err) {
+ memcpy(data, d, max);
+ err = max;
+ }
+ memzero_explicit(d, todo);
+err_dst:
+ kfree(d);
+ return err;
+}
+
+int sun8i_ce_hwrng_register(struct sun8i_ce_dev *ce)
+{
+ int ret;
+
+ if (ce->variant->trng == CE_ID_NOTSUPP) {
+ dev_info(ce->dev, "TRNG not supported\n");
+ return 0;
+ }
+ ce->trng.name = "sun8i Crypto Engine TRNG";
+ ce->trng.read = sun8i_ce_trng_read;
+ ce->trng.quality = 1000;
+
+ ret = hwrng_register(&ce->trng);
+ if (ret)
+ dev_err(ce->dev, "Fail to register the TRNG\n");
+ return ret;
+}
+
+void sun8i_ce_hwrng_unregister(struct sun8i_ce_dev *ce)
+{
+ if (ce->variant->trng == CE_ID_NOTSUPP)
+ return;
+ hwrng_unregister(&ce->trng);
+}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 963645fe4adb..558027516aed 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -12,6 +12,11 @@
#include <linux/atomic.h>
#include <linux/debugfs.h>
#include <linux/crypto.h>
+#include <linux/hw_random.h>
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/rng.h>
+#include <crypto/sha.h>
/* CE Registers */
#define CE_TDQ 0x00
@@ -45,6 +50,16 @@
#define CE_ALG_AES 0
#define CE_ALG_DES 1
#define CE_ALG_3DES 2
+#define CE_ALG_MD5 16
+#define CE_ALG_SHA1 17
+#define CE_ALG_SHA224 18
+#define CE_ALG_SHA256 19
+#define CE_ALG_SHA384 20
+#define CE_ALG_SHA512 21
+#define CE_ALG_TRNG 48
+#define CE_ALG_PRNG 49
+#define CE_ALG_TRNG_V2 0x1c
+#define CE_ALG_PRNG_V2 0x1d
/* Used in ce_variant */
#define CE_ID_NOTSUPP 0xFF
@@ -54,6 +69,14 @@
#define CE_ID_CIPHER_DES3 2
#define CE_ID_CIPHER_MAX 3
+#define CE_ID_HASH_MD5 0
+#define CE_ID_HASH_SHA1 1
+#define CE_ID_HASH_SHA224 2
+#define CE_ID_HASH_SHA256 3
+#define CE_ID_HASH_SHA384 4
+#define CE_ID_HASH_SHA512 5
+#define CE_ID_HASH_MAX 6
+
#define CE_ID_OP_ECB 0
#define CE_ID_OP_CBC 1
#define CE_ID_OP_MAX 2
@@ -65,6 +88,16 @@
#define CE_ERR_ADDR_INVALID BIT(5)
#define CE_ERR_KEYLADDER BIT(6)
+#define ESR_H3 0
+#define ESR_A64 1
+#define ESR_R40 2
+#define ESR_H5 3
+#define ESR_H6 4
+
+#define PRNG_DATA_SIZE (160 / 8)
+#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8)
+#define PRNG_LD BIT(17)
+
#define CE_DIE_ID_SHIFT 16
#define CE_DIE_ID_MASK 0x07
@@ -90,16 +123,34 @@ struct ce_clock {
* struct ce_variant - Describe CE capability for each variant hardware
* @alg_cipher: list of supported ciphers. for each CE_ID_ this will give the
* coresponding CE_ALG_XXX value
+ * @alg_hash: list of supported hashes. for each CE_ID_ this will give the
+ * corresponding CE_ALG_XXX value
* @op_mode: list of supported block modes
- * @has_t_dlen_in_bytes: Does the request size for cipher is in
+ * @cipher_t_dlen_in_bytes: Does the request size for cipher is in
+ * bytes or words
+ * @hash_t_dlen_in_bytes: Does the request size for hash is in
+ * bits or words
+ * @prng_t_dlen_in_bytes: Does the request size for PRNG is in
+ * bytes or words
+ * @trng_t_dlen_in_bytes: Does the request size for TRNG is in
* bytes or words
* @ce_clks: list of clocks needed by this variant
+ * @esr: The type of error register
+ * @prng: The CE_ALG_XXX value for the PRNG
+ * @trng: The CE_ALG_XXX value for the TRNG
*/
struct ce_variant {
char alg_cipher[CE_ID_CIPHER_MAX];
+ char alg_hash[CE_ID_HASH_MAX];
u32 op_mode[CE_ID_OP_MAX];
- bool has_t_dlen_in_bytes;
+ bool cipher_t_dlen_in_bytes;
+ bool hash_t_dlen_in_bits;
+ bool prng_t_dlen_in_bytes;
+ bool trng_t_dlen_in_bytes;
struct ce_clock ce_clks[CE_MAX_CLOCKS];
+ int esr;
+ unsigned char prng;
+ unsigned char trng;
};
struct sginfo {
@@ -129,8 +180,6 @@ struct ce_task {
/*
* struct sun8i_ce_flow - Information used by each flow
* @engine: ptr to the crypto_engine for this flow
- * @bounce_iv: buffer which contain the IV
- * @ivlen: size of bounce_iv
* @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done
* @t_phy: Physical address of task
@@ -139,8 +188,6 @@ struct ce_task {
*/
struct sun8i_ce_flow {
struct crypto_engine *engine;
- void *bounce_iv;
- unsigned int ivlen;
struct completion complete;
int status;
dma_addr_t t_phy;
@@ -158,6 +205,7 @@ struct sun8i_ce_flow {
* @reset: pointer to reset controller
* @dev: the platform device
* @mlock: Control access to device registers
+ * @rnglock: Control access to the RNG (dedicated channel 3)
* @chanlist: array of all flow
* @flow: flow to use in next request
* @variant: pointer to variant specific data
@@ -170,6 +218,7 @@ struct sun8i_ce_dev {
struct reset_control *reset;
struct device *dev;
struct mutex mlock;
+ struct mutex rnglock;
struct sun8i_ce_flow *chanlist;
atomic_t flow;
const struct ce_variant *variant;
@@ -177,17 +226,38 @@ struct sun8i_ce_dev {
struct dentry *dbgfs_dir;
struct dentry *dbgfs_stats;
#endif
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
+ struct hwrng trng;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ unsigned long hwrng_stat_req;
+ unsigned long hwrng_stat_bytes;
+#endif
+#endif
};
/*
* struct sun8i_cipher_req_ctx - context for a skcipher request
* @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request
+ * @backup_iv: buffer which contain the next IV to store
+ * @bounce_iv: buffer which contain the IV
+ * @ivlen: size of bounce_iv
+ * @nr_sgs: The number of source SG (as given by dma_map_sg())
+ * @nr_sgd: The number of destination SG (as given by dma_map_sg())
+ * @addr_iv: The IV addr returned by dma_map_single, need to unmap later
+ * @addr_key: The key addr returned by dma_map_single, need to unmap later
* @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
u32 op_dir;
int flow;
+ void *backup_iv;
+ void *bounce_iv;
+ unsigned int ivlen;
+ int nr_sgs;
+ int nr_sgd;
+ dma_addr_t addr_iv;
+ dma_addr_t addr_key;
struct skcipher_request fallback_req; // keep at the end
};
@@ -208,6 +278,38 @@ struct sun8i_cipher_tfm_ctx {
};
/*
+ * struct sun8i_ce_hash_tfm_ctx - context for an ahash TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @ce: pointer to the private data of driver handling this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ */
+struct sun8i_ce_hash_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct sun8i_ce_dev *ce;
+ struct crypto_ahash *fallback_tfm;
+};
+
+/*
+ * struct sun8i_ce_hash_reqctx - context for an ahash request
+ * @fallback_req: pre-allocated fallback request
+ * @flow: the flow to use for this request
+ */
+struct sun8i_ce_hash_reqctx {
+ struct ahash_request fallback_req;
+ int flow;
+};
+
+/*
+ * struct sun8i_ce_prng_ctx - context for PRNG TFM
+ * @seed: The seed to use
+ * @slen: The size of the seed
+ */
+struct sun8i_ce_rng_tfm_ctx {
+ void *seed;
+ unsigned int slen;
+};
+
+/*
* struct sun8i_ce_alg_template - crypto_alg template
* @type: the CRYPTO_ALG_TYPE for this template
* @ce_algo_id: the CE_ID for this template
@@ -217,6 +319,7 @@ struct sun8i_cipher_tfm_ctx {
* @alg: one of sub struct must be used
* @stat_req: number of request done on this template
* @stat_fb: number of request which has fallbacked
+ * @stat_bytes: total data size done by this template
*/
struct sun8i_ce_alg_template {
u32 type;
@@ -225,10 +328,13 @@ struct sun8i_ce_alg_template {
struct sun8i_ce_dev *ce;
union {
struct skcipher_alg skcipher;
+ struct ahash_alg hash;
+ struct rng_alg rng;
} alg;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
unsigned long stat_req;
unsigned long stat_fb;
+ unsigned long stat_bytes;
#endif
};
@@ -246,3 +352,24 @@ int sun8i_ce_skencrypt(struct skcipher_request *areq);
int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce);
int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name);
+
+int sun8i_ce_hash_crainit(struct crypto_tfm *tfm);
+void sun8i_ce_hash_craexit(struct crypto_tfm *tfm);
+int sun8i_ce_hash_init(struct ahash_request *areq);
+int sun8i_ce_hash_export(struct ahash_request *areq, void *out);
+int sun8i_ce_hash_import(struct ahash_request *areq, const void *in);
+int sun8i_ce_hash(struct ahash_request *areq);
+int sun8i_ce_hash_final(struct ahash_request *areq);
+int sun8i_ce_hash_update(struct ahash_request *areq);
+int sun8i_ce_hash_finup(struct ahash_request *areq);
+int sun8i_ce_hash_digest(struct ahash_request *areq);
+int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq);
+
+int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen);
+int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
+void sun8i_ce_prng_exit(struct crypto_tfm *tfm);
+int sun8i_ce_prng_init(struct crypto_tfm *tfm);
+
+int sun8i_ce_hwrng_register(struct sun8i_ce_dev *ce);
+void sun8i_ce_hwrng_unregister(struct sun8i_ce_dev *ce);
diff --git a/drivers/crypto/allwinner/sun8i-ss/Makefile b/drivers/crypto/allwinner/sun8i-ss/Makefile
index add7b0543fd5..aabfd893c817 100644
--- a/drivers/crypto/allwinner/sun8i-ss/Makefile
+++ b/drivers/crypto/allwinner/sun8i-ss/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_CRYPTO_DEV_SUN8I_SS) += sun8i-ss.o
sun8i-ss-y += sun8i-ss-core.o sun8i-ss-cipher.o
+sun8i-ss-$(CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG) += sun8i-ss-prng.o
+sun8i-ss-$(CONFIG_CRYPTO_DEV_SUN8I_SS_HASH) += sun8i-ss-hash.o
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 7b39b4495571..ed2a69f82e1c 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -248,7 +248,6 @@ theend_iv:
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & SS_DECRYPTION) {
memcpy(areq->iv, backup_iv, ivsize);
- memzero_explicit(backup_iv, ivsize);
kfree_sensitive(backup_iv);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
@@ -368,10 +367,7 @@ void sun8i_ss_cipher_exit(struct crypto_tfm *tfm)
{
struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
crypto_free_skcipher(op->fallback_tfm);
pm_runtime_put_sync(op->ss->dev);
}
@@ -393,10 +389,7 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
return -EINVAL;
}
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
@@ -419,10 +412,7 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
return -EINVAL;
}
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 9a23515783a6..e0ddc684798d 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "sun8i-ss.h"
@@ -40,6 +41,8 @@ static const struct ss_variant ss_a80_variant = {
static const struct ss_variant ss_a83t_variant = {
.alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES,
},
+ .alg_hash = { SS_ALG_MD5, SS_ALG_SHA1, SS_ALG_SHA224, SS_ALG_SHA256,
+ },
.op_mode = { SS_OP_ECB, SS_OP_CBC,
},
.ss_clks = {
@@ -61,7 +64,7 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
const char *name)
{
int flow = rctx->flow;
- u32 v = 1;
+ u32 v = SS_START;
int i;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
@@ -264,13 +267,154 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.decrypt = sun8i_ss_skdecrypt,
}
},
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG
+{
+ .type = CRYPTO_ALG_TYPE_RNG,
+ .alg.rng = {
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "sun8i-ss-prng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct sun8i_ss_rng_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_prng_init,
+ .cra_exit = sun8i_ss_prng_exit,
+ },
+ .generate = sun8i_ss_prng_generate,
+ .seed = sun8i_ss_prng_seed,
+ .seedsize = PRNG_SEED_SIZE,
+ }
+},
+#endif
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_HASH
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ss_algo_id = SS_ID_HASH_MD5,
+ .alg.hash = {
+ .init = sun8i_ss_hash_init,
+ .update = sun8i_ss_hash_update,
+ .final = sun8i_ss_hash_final,
+ .finup = sun8i_ss_hash_finup,
+ .digest = sun8i_ss_hash_digest,
+ .export = sun8i_ss_hash_export,
+ .import = sun8i_ss_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-sun8i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_hash_crainit,
+ .cra_exit = sun8i_ss_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ss_algo_id = SS_ID_HASH_SHA1,
+ .alg.hash = {
+ .init = sun8i_ss_hash_init,
+ .update = sun8i_ss_hash_update,
+ .final = sun8i_ss_hash_final,
+ .finup = sun8i_ss_hash_finup,
+ .digest = sun8i_ss_hash_digest,
+ .export = sun8i_ss_hash_export,
+ .import = sun8i_ss_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sun8i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_hash_crainit,
+ .cra_exit = sun8i_ss_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ss_algo_id = SS_ID_HASH_SHA224,
+ .alg.hash = {
+ .init = sun8i_ss_hash_init,
+ .update = sun8i_ss_hash_update,
+ .final = sun8i_ss_hash_final,
+ .finup = sun8i_ss_hash_finup,
+ .digest = sun8i_ss_hash_digest,
+ .export = sun8i_ss_hash_export,
+ .import = sun8i_ss_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-sun8i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_hash_crainit,
+ .cra_exit = sun8i_ss_hash_craexit,
+ }
+ }
+ }
+},
+{ .type = CRYPTO_ALG_TYPE_AHASH,
+ .ss_algo_id = SS_ID_HASH_SHA256,
+ .alg.hash = {
+ .init = sun8i_ss_hash_init,
+ .update = sun8i_ss_hash_update,
+ .final = sun8i_ss_hash_final,
+ .finup = sun8i_ss_hash_finup,
+ .digest = sun8i_ss_hash_digest,
+ .export = sun8i_ss_hash_export,
+ .import = sun8i_ss_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-sun8i-ss",
+ .cra_priority = 300,
+ .cra_alignmask = 3,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sun8i_ss_hash_crainit,
+ .cra_exit = sun8i_ss_hash_craexit,
+ }
+ }
+ }
+},
+#endif
};
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
-static int sun8i_ss_dbgfs_read(struct seq_file *seq, void *v)
+static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
{
struct sun8i_ss_dev *ss = seq->private;
- int i;
+ unsigned int i;
for (i = 0; i < MAXFLOW; i++)
seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req);
@@ -280,28 +424,29 @@ static int sun8i_ss_dbgfs_read(struct seq_file *seq, void *v)
continue;
switch (ss_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- seq_printf(seq, "%s %s %lu %lu\n",
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ss_algs[i].alg.skcipher.base.cra_driver_name,
ss_algs[i].alg.skcipher.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n",
+ ss_algs[i].alg.rng.base.cra_driver_name,
+ ss_algs[i].alg.rng.base.cra_name,
+ ss_algs[i].stat_req, ss_algs[i].stat_bytes);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
+ ss_algs[i].alg.hash.halg.base.cra_driver_name,
+ ss_algs[i].alg.hash.halg.base.cra_name,
+ ss_algs[i].stat_req, ss_algs[i].stat_fb);
+ break;
}
}
return 0;
}
-static int sun8i_ss_dbgfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sun8i_ss_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations sun8i_ss_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sun8i_ss_dbgfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sun8i_ss_debugfs);
#endif
static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
@@ -415,7 +560,8 @@ static void sun8i_ss_pm_exit(struct sun8i_ss_dev *ss)
static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
{
- int ss_method, err, id, i;
+ int ss_method, err, id;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
ss_algs[i].ss = ss;
@@ -448,6 +594,34 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
return err;
}
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ err = crypto_register_rng(&ss_algs[i].alg.rng);
+ if (err) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.rng.base.cra_name);
+ ss_algs[i].ss = NULL;
+ }
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ id = ss_algs[i].ss_algo_id;
+ ss_method = ss->variant->alg_hash[id];
+ if (ss_method == SS_ID_NOTSUPP) {
+ dev_info(ss->dev,
+ "DEBUG: Algo of %s not supported\n",
+ ss_algs[i].alg.hash.halg.base.cra_name);
+ ss_algs[i].ss = NULL;
+ break;
+ }
+ dev_info(ss->dev, "Register %s\n",
+ ss_algs[i].alg.hash.halg.base.cra_name);
+ err = crypto_register_ahash(&ss_algs[i].alg.hash);
+ if (err) {
+ dev_err(ss->dev, "ERROR: Fail to register %s\n",
+ ss_algs[i].alg.hash.halg.base.cra_name);
+ ss_algs[i].ss = NULL;
+ return err;
+ }
+ break;
default:
ss_algs[i].ss = NULL;
dev_err(ss->dev, "ERROR: tried to register an unknown algo\n");
@@ -458,7 +632,7 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
{
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
if (!ss_algs[i].ss)
@@ -469,6 +643,16 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
ss_algs[i].alg.skcipher.base.cra_name);
crypto_unregister_skcipher(&ss_algs[i].alg.skcipher);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ dev_info(ss->dev, "Unregister %d %s\n", i,
+ ss_algs[i].alg.rng.base.cra_name);
+ crypto_unregister_rng(&ss_algs[i].alg.rng);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ dev_info(ss->dev, "Unregister %d %s\n", i,
+ ss_algs[i].alg.hash.halg.base.cra_name);
+ crypto_unregister_ahash(&ss_algs[i].alg.hash);
+ break;
}
}
}
@@ -545,12 +729,9 @@ static int sun8i_ss_probe(struct platform_device *pdev)
return irq;
ss->reset = devm_reset_control_get(&pdev->dev, NULL);
- if (IS_ERR(ss->reset)) {
- if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
- return PTR_ERR(ss->reset);
- dev_err(&pdev->dev, "No reset control found\n");
- return PTR_ERR(ss->reset);
- }
+ if (IS_ERR(ss->reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ss->reset),
+ "No reset control found\n");
mutex_init(&ss->mlock);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
new file mode 100644
index 000000000000..b6ab2054f217
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ss-hash.c - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file add support for MD5 and SHA1/SHA224/SHA256.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi.rst
+ */
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include "sun8i-ss.h"
+
+int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct sun8i_ss_alg_template *algt;
+ int err;
+
+ memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ op->ss = algt->ss;
+
+ op->enginectx.op.do_one_request = sun8i_ss_hash_run;
+ op->enginectx.op.prepare_request = NULL;
+ op->enginectx.op.unprepare_request = NULL;
+
+ /* FALLBACK */
+ op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
+ return PTR_ERR(op->fallback_tfm);
+ }
+
+ if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
+ algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct sun8i_ss_hash_reqctx) +
+ crypto_ahash_reqsize(op->fallback_tfm));
+
+ dev_info(op->ss->dev, "Fallback for %s is %s\n",
+ crypto_tfm_alg_driver_name(tfm),
+ crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
+ err = pm_runtime_get_sync(op->ss->dev);
+ if (err < 0)
+ goto error_pm;
+ return 0;
+error_pm:
+ pm_runtime_put_noidle(op->ss->dev);
+ crypto_free_ahash(op->fallback_tfm);
+ return err;
+}
+
+void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(tfmctx->fallback_tfm);
+ pm_runtime_put_sync_suspend(tfmctx->ss->dev);
+}
+
+int sun8i_ss_hash_init(struct ahash_request *areq)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+int sun8i_ss_hash_final(struct ahash_request *areq)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = areq->result;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+int sun8i_ss_hash_update(struct ahash_request *areq)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+int sun8i_ss_hash_finup(struct ahash_request *areq)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+ rctx->fallback_req.result = areq->result;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
+{
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_alg_template *algt;
+#endif
+
+ ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
+ rctx->fallback_req.base.flags = areq->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = areq->nbytes;
+ rctx->fallback_req.src = areq->src;
+ rctx->fallback_req.result = areq->result;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt->stat_fb++;
+#endif
+
+ return crypto_ahash_digest(&rctx->fallback_req);
+}
+
+static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
+ struct sun8i_ss_hash_reqctx *rctx,
+ const char *name)
+{
+ int flow = rctx->flow;
+ u32 v = SS_START;
+ int i;
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ ss->flows[flow].stat_req++;
+#endif
+
+ /* choose between stream0/stream1 */
+ if (flow)
+ v |= SS_FLOW1;
+ else
+ v |= SS_FLOW0;
+
+ v |= rctx->method;
+
+ for (i = 0; i < MAX_SG; i++) {
+ if (!rctx->t_dst[i].addr)
+ break;
+
+ mutex_lock(&ss->mlock);
+ if (i > 0) {
+ v |= BIT(17);
+ writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
+ writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
+ }
+
+ dev_dbg(ss->dev,
+ "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
+ i, flow, name, v,
+ rctx->t_src[i].len, rctx->t_dst[i].len,
+ rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
+
+ writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
+ writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
+ writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
+ writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
+
+ reinit_completion(&ss->flows[flow].complete);
+ ss->flows[flow].status = 0;
+ wmb();
+
+ writel(v, ss->base + SS_CTL_REG);
+ mutex_unlock(&ss->mlock);
+ wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
+ msecs_to_jiffies(2000));
+ if (ss->flows[flow].status == 0) {
+ dev_err(ss->dev, "DMA timeout for %s\n", name);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
+{
+ struct scatterlist *sg;
+
+ if (areq->nbytes == 0)
+ return true;
+ /* we need to reserve one SG for the padding one */
+ if (sg_nents(areq->src) > MAX_SG - 1)
+ return true;
+ sg = areq->src;
+ while (sg) {
+ /* SS can operate hash only on full block size
+ * since SS support only MD5,sha1,sha224 and sha256, blocksize
+ * is always 64
+ * TODO: handle request if last SG is not len%64
+ * but this will need to copy data on a new SG of size=64
+ */
+ if (sg->length % 64 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return true;
+ sg = sg_next(sg);
+ }
+ return false;
+}
+
+int sun8i_ss_hash_digest(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ss_alg_template *algt;
+ struct sun8i_ss_dev *ss;
+ struct crypto_engine *engine;
+ struct scatterlist *sg;
+ int nr_sgs, e, i;
+
+ if (sun8i_ss_hash_need_fallback(areq))
+ return sun8i_ss_hash_digest_fb(areq);
+
+ nr_sgs = sg_nents(areq->src);
+ if (nr_sgs > MAX_SG - 1)
+ return sun8i_ss_hash_digest_fb(areq);
+
+ for_each_sg(areq->src, sg, nr_sgs, i) {
+ if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
+ return sun8i_ss_hash_digest_fb(areq);
+ }
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ ss = algt->ss;
+
+ e = sun8i_ss_get_engine_number(ss);
+ rctx->flow = e;
+ engine = ss->flows[e].engine;
+
+ return crypto_transfer_hash_request_to_engine(engine, areq);
+}
+
+/* sun8i_ss_hash_run - run an ahash request
+ * Send the data of the request to the SS along with an extra SG with padding
+ */
+int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
+{
+ struct ahash_request *areq = container_of(breq, struct ahash_request, base);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ss_alg_template *algt;
+ struct sun8i_ss_dev *ss;
+ struct scatterlist *sg;
+ int nr_sgs, err, digestsize;
+ unsigned int len;
+ u64 fill, min_fill, byte_count;
+ void *pad, *result;
+ int j, i, todo;
+ __be64 *bebits;
+ __le64 *lebits;
+ dma_addr_t addr_res, addr_pad;
+ __le32 *bf;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ ss = algt->ss;
+
+ digestsize = algt->alg.hash.halg.digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ digestsize = SHA256_DIGEST_SIZE;
+
+ /* the padding could be up to two block. */
+ pad = kzalloc(algt->alg.hash.halg.base.cra_blocksize * 2, GFP_KERNEL | GFP_DMA);
+ if (!pad)
+ return -ENOMEM;
+ bf = (__le32 *)pad;
+
+ result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
+ if (!result)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_SG; i++) {
+ rctx->t_dst[i].addr = 0;
+ rctx->t_dst[i].len = 0;
+ }
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt->stat_req++;
+#endif
+
+ rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
+
+ nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
+ dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
+ err = -EINVAL;
+ goto theend;
+ }
+
+ addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ss->dev, addr_res)) {
+ dev_err(ss->dev, "DMA map dest\n");
+ err = -EINVAL;
+ goto theend;
+ }
+
+ len = areq->nbytes;
+ for_each_sg(areq->src, sg, nr_sgs, i) {
+ rctx->t_src[i].addr = sg_dma_address(sg);
+ todo = min(len, sg_dma_len(sg));
+ rctx->t_src[i].len = todo / 4;
+ len -= todo;
+ rctx->t_dst[i].addr = addr_res;
+ rctx->t_dst[i].len = digestsize / 4;
+ }
+ if (len > 0) {
+ dev_err(ss->dev, "remaining len %d\n", len);
+ err = -EINVAL;
+ goto theend;
+ }
+
+ byte_count = areq->nbytes;
+ j = 0;
+ bf[j++] = cpu_to_le32(0x80);
+
+ fill = 64 - (byte_count % 64);
+ min_fill = 3 * sizeof(u32);
+
+ if (fill < min_fill)
+ fill += 64;
+
+ j += (fill - min_fill) / sizeof(u32);
+
+ switch (algt->ss_algo_id) {
+ case SS_ID_HASH_MD5:
+ lebits = (__le64 *)&bf[j];
+ *lebits = cpu_to_le64(byte_count << 3);
+ j += 2;
+ break;
+ case SS_ID_HASH_SHA1:
+ case SS_ID_HASH_SHA224:
+ case SS_ID_HASH_SHA256:
+ bebits = (__be64 *)&bf[j];
+ *bebits = cpu_to_be64(byte_count << 3);
+ j += 2;
+ break;
+ }
+
+ addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
+ rctx->t_src[i].addr = addr_pad;
+ rctx->t_src[i].len = j;
+ rctx->t_dst[i].addr = addr_res;
+ rctx->t_dst[i].len = digestsize / 4;
+ if (dma_mapping_error(ss->dev, addr_pad)) {
+ dev_err(ss->dev, "DMA error on padding SG\n");
+ err = -EINVAL;
+ goto theend;
+ }
+
+ err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
+
+ dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
+
+ kfree(pad);
+
+ memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+ kfree(result);
+theend:
+ crypto_finalize_hash_request(engine, breq, err);
+ return 0;
+}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
new file mode 100644
index 000000000000..08a1473b2145
--- /dev/null
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sun8i-ss-prng.c - hardware cryptographic offloader for
+ * Allwinner A80/A83T SoC
+ *
+ * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
+ *
+ * This file handle the PRNG found in the SS
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi.rst
+ */
+#include "sun8i-ss.h"
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <crypto/internal/rng.h>
+
+int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
+
+ if (ctx->seed && ctx->slen != slen) {
+ memzero_explicit(ctx->seed, ctx->slen);
+ kfree(ctx->seed);
+ ctx->slen = 0;
+ ctx->seed = NULL;
+ }
+ if (!ctx->seed)
+ ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA);
+ if (!ctx->seed)
+ return -ENOMEM;
+
+ memcpy(ctx->seed, seed, slen);
+ ctx->slen = slen;
+
+ return 0;
+}
+
+int sun8i_ss_prng_init(struct crypto_tfm *tfm)
+{
+ struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memset(ctx, 0, sizeof(struct sun8i_ss_rng_tfm_ctx));
+ return 0;
+}
+
+void sun8i_ss_prng_exit(struct crypto_tfm *tfm)
+{
+ struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ memzero_explicit(ctx->seed, ctx->slen);
+ kfree(ctx->seed);
+ ctx->seed = NULL;
+ ctx->slen = 0;
+}
+
+int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen)
+{
+ struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+ struct sun8i_ss_alg_template *algt;
+ struct sun8i_ss_dev *ss;
+ dma_addr_t dma_iv, dma_dst;
+ unsigned int todo;
+ int err = 0;
+ int flow;
+ void *d;
+ u32 v;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.rng);
+ ss = algt->ss;
+
+ if (ctx->slen == 0) {
+ dev_err(ss->dev, "The PRNG is not seeded\n");
+ return -EINVAL;
+ }
+
+ /* The SS does not give an updated seed, so we need to get a new one.
+ * So we will ask for an extra PRNG_SEED_SIZE data.
+ * We want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE
+ */
+ todo = dlen + PRNG_SEED_SIZE + PRNG_DATA_SIZE;
+ todo -= todo % PRNG_DATA_SIZE;
+
+ d = kzalloc(todo, GFP_KERNEL | GFP_DMA);
+ if (!d)
+ return -ENOMEM;
+
+ flow = sun8i_ss_get_engine_number(ss);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt->stat_req++;
+ algt->stat_bytes += todo;
+#endif
+
+ v = SS_ALG_PRNG | SS_PRNG_CONTINUE | SS_START;
+ if (flow)
+ v |= SS_FLOW1;
+ else
+ v |= SS_FLOW0;
+
+ dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ss->dev, dma_iv)) {
+ dev_err(ss->dev, "Cannot DMA MAP IV\n");
+ return -EFAULT;
+ }
+
+ dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ss->dev, dma_dst)) {
+ dev_err(ss->dev, "Cannot DMA MAP DST\n");
+ err = -EFAULT;
+ goto err_iv;
+ }
+
+ err = pm_runtime_get_sync(ss->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(ss->dev);
+ goto err_pm;
+ }
+ err = 0;
+
+ mutex_lock(&ss->mlock);
+ writel(dma_iv, ss->base + SS_IV_ADR_REG);
+ /* the PRNG act badly (failing rngtest) without SS_KEY_ADR_REG set */
+ writel(dma_iv, ss->base + SS_KEY_ADR_REG);
+ writel(dma_dst, ss->base + SS_DST_ADR_REG);
+ writel(todo / 4, ss->base + SS_LEN_ADR_REG);
+
+ reinit_completion(&ss->flows[flow].complete);
+ ss->flows[flow].status = 0;
+ /* Be sure all data is written before enabling the task */
+ wmb();
+
+ writel(v, ss->base + SS_CTL_REG);
+
+ wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
+ msecs_to_jiffies(todo));
+ if (ss->flows[flow].status == 0) {
+ dev_err(ss->dev, "DMA timeout for PRNG (size=%u)\n", todo);
+ err = -EFAULT;
+ }
+ /* Since cipher and hash use the linux/cryptoengine and that we have
+ * a cryptoengine per flow, we are sure that they will issue only one
+ * request per flow.
+ * Since the cryptoengine wait for completion before submitting a new
+ * one, the mlock could be left just after the final writel.
+ * But cryptoengine cannot handle crypto_rng, so we need to be sure
+ * nothing will use our flow.
+ * The easiest way is to grab mlock until the hardware end our requests.
+ * We could have used a per flow lock, but this would increase
+ * complexity.
+ * The drawback is that no request could be handled for the other flow.
+ */
+ mutex_unlock(&ss->mlock);
+
+ pm_runtime_put(ss->dev);
+
+err_pm:
+ dma_unmap_single(ss->dev, dma_dst, todo, DMA_FROM_DEVICE);
+err_iv:
+ dma_unmap_single(ss->dev, dma_iv, ctx->slen, DMA_TO_DEVICE);
+
+ if (!err) {
+ memcpy(dst, d, dlen);
+ /* Update seed */
+ memcpy(ctx->seed, d + dlen, ctx->slen);
+ }
+ memzero_explicit(d, todo);
+ kfree(d);
+
+ return err;
+}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index 0405767f1f7e..1a66457f4a20 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -8,10 +8,16 @@
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/engine.h>
+#include <crypto/rng.h>
#include <crypto/skcipher.h>
#include <linux/atomic.h>
#include <linux/debugfs.h>
#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+
+#define SS_START 1
#define SS_ENCRYPTION 0
#define SS_DECRYPTION BIT(6)
@@ -19,6 +25,11 @@
#define SS_ALG_AES 0
#define SS_ALG_DES (1 << 2)
#define SS_ALG_3DES (2 << 2)
+#define SS_ALG_MD5 (3 << 2)
+#define SS_ALG_PRNG (4 << 2)
+#define SS_ALG_SHA1 (6 << 2)
+#define SS_ALG_SHA224 (7 << 2)
+#define SS_ALG_SHA256 (8 << 2)
#define SS_CTL_REG 0x00
#define SS_INT_CTL_REG 0x04
@@ -47,9 +58,17 @@
#define SS_OP_ECB 0
#define SS_OP_CBC (1 << 13)
+#define SS_ID_HASH_MD5 0
+#define SS_ID_HASH_SHA1 1
+#define SS_ID_HASH_SHA224 2
+#define SS_ID_HASH_SHA256 3
+#define SS_ID_HASH_MAX 4
+
#define SS_FLOW0 BIT(30)
#define SS_FLOW1 BIT(31)
+#define SS_PRNG_CONTINUE BIT(18)
+
#define MAX_SG 8
#define MAXFLOW 2
@@ -59,6 +78,9 @@
#define SS_DIE_ID_SHIFT 20
#define SS_DIE_ID_MASK 0x07
+#define PRNG_DATA_SIZE (160 / 8)
+#define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8)
+
/*
* struct ss_clock - Describe clocks used by sun8i-ss
* @name: Name of clock needed by this variant
@@ -75,11 +97,14 @@ struct ss_clock {
* struct ss_variant - Describe SS capability for each variant hardware
* @alg_cipher: list of supported ciphers. for each SS_ID_ this will give the
* coresponding SS_ALG_XXX value
+ * @alg_hash: list of supported hashes. for each SS_ID_ this will give the
+ * corresponding SS_ALG_XXX value
* @op_mode: list of supported block modes
- * @ss_clks! list of clock needed by this variant
+ * @ss_clks: list of clock needed by this variant
*/
struct ss_variant {
char alg_cipher[SS_ID_CIPHER_MAX];
+ char alg_hash[SS_ID_HASH_MAX];
u32 op_mode[SS_ID_OP_MAX];
struct ss_clock ss_clks[SS_MAX_CLOCKS];
};
@@ -170,6 +195,8 @@ struct sun8i_cipher_req_ctx {
* @keylen: len of the key
* @ss: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
+ *
+ * enginectx must be the first element
*/
struct sun8i_cipher_tfm_ctx {
struct crypto_engine_ctx enginectx;
@@ -180,6 +207,46 @@ struct sun8i_cipher_tfm_ctx {
};
/*
+ * struct sun8i_ss_prng_ctx - context for PRNG TFM
+ * @seed: The seed to use
+ * @slen: The size of the seed
+ */
+struct sun8i_ss_rng_tfm_ctx {
+ void *seed;
+ unsigned int slen;
+};
+
+/*
+ * struct sun8i_ss_hash_tfm_ctx - context for an ahash TFM
+ * @enginectx: crypto_engine used by this TFM
+ * @fallback_tfm: pointer to the fallback TFM
+ * @ss: pointer to the private data of driver handling this TFM
+ *
+ * enginectx must be the first element
+ */
+struct sun8i_ss_hash_tfm_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct crypto_ahash *fallback_tfm;
+ struct sun8i_ss_dev *ss;
+};
+
+/*
+ * struct sun8i_ss_hash_reqctx - context for an ahash request
+ * @t_src: list of DMA address and size for source SGs
+ * @t_dst: list of DMA address and size for destination SGs
+ * @fallback_req: pre-allocated fallback request
+ * @method: the register value for the algorithm used by this request
+ * @flow: the flow to use for this request
+ */
+struct sun8i_ss_hash_reqctx {
+ struct sginfo t_src[MAX_SG];
+ struct sginfo t_dst[MAX_SG];
+ struct ahash_request fallback_req;
+ u32 method;
+ int flow;
+};
+
+/*
* struct sun8i_ss_alg_template - crypto_alg template
* @type: the CRYPTO_ALG_TYPE for this template
* @ss_algo_id: the SS_ID for this template
@@ -189,6 +256,7 @@ struct sun8i_cipher_tfm_ctx {
* @alg: one of sub struct must be used
* @stat_req: number of request done on this template
* @stat_fb: number of request which has fallbacked
+ * @stat_bytes: total data size done by this template
*/
struct sun8i_ss_alg_template {
u32 type;
@@ -197,10 +265,13 @@ struct sun8i_ss_alg_template {
struct sun8i_ss_dev *ss;
union {
struct skcipher_alg skcipher;
+ struct rng_alg rng;
+ struct ahash_alg hash;
} alg;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
unsigned long stat_req;
unsigned long stat_fb;
+ unsigned long stat_bytes;
#endif
};
@@ -218,3 +289,19 @@ int sun8i_ss_skencrypt(struct skcipher_request *areq);
int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss);
int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, const char *name);
+int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen);
+int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
+int sun8i_ss_prng_init(struct crypto_tfm *tfm);
+void sun8i_ss_prng_exit(struct crypto_tfm *tfm);
+
+int sun8i_ss_hash_crainit(struct crypto_tfm *tfm);
+void sun8i_ss_hash_craexit(struct crypto_tfm *tfm);
+int sun8i_ss_hash_init(struct ahash_request *areq);
+int sun8i_ss_hash_export(struct ahash_request *areq, void *out);
+int sun8i_ss_hash_import(struct ahash_request *areq, const void *in);
+int sun8i_ss_hash_final(struct ahash_request *areq);
+int sun8i_ss_hash_update(struct ahash_request *areq);
+int sun8i_ss_hash_finup(struct ahash_request *areq);
+int sun8i_ss_hash_digest(struct ahash_request *areq);
+int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq);
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index f7fc0c464125..7729a637fb02 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -55,7 +55,7 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
sa->sa_command_1.w = 0;
sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
- sa->sa_command_1.bf.feedback_mode = cfb,
+ sa->sa_command_1.bf.feedback_mode = cfb;
sa->sa_command_1.bf.sa_rev = 1;
sa->sa_command_1.bf.hmac_muting = hmac_mc;
sa->sa_command_1.bf.extended_seq_num = esn;
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 6b6841359190..a4e25b46cd0a 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -15,6 +15,7 @@
#include <linux/ratelimit.h>
#include <linux/mutex.h>
+#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/rng.h>
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index d93210726697..8b5e07316352 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -99,7 +99,7 @@ static int meson_cipher(struct skcipher_request *areq)
unsigned int keyivlen, ivsize, offset, tloffset;
dma_addr_t phykeyiv;
void *backup_iv = NULL, *bkeyiv;
- __le32 v;
+ u32 v;
algt = container_of(alg, struct meson_alg_template, alg.skcipher);
@@ -340,10 +340,7 @@ void meson_cipher_exit(struct crypto_tfm *tfm)
{
struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
crypto_free_skcipher(op->fallback_tfm);
}
@@ -367,10 +364,7 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
return -EINVAL;
}
- if (op->key) {
- memzero_explicit(op->key, op->keylen);
- kfree(op->key);
- }
+ kfree_sensitive(op->key);
op->keylen = keylen;
op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!op->key)
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 466552acbbbb..5bbeff433c8c 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -98,7 +98,7 @@ static struct meson_alg_template mc_algs[] = {
};
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
-static int meson_dbgfs_read(struct seq_file *seq, void *v)
+static int meson_debugfs_show(struct seq_file *seq, void *v)
{
struct meson_dev *mc = seq->private;
int i;
@@ -118,19 +118,7 @@ static int meson_dbgfs_read(struct seq_file *seq, void *v)
}
return 0;
}
-
-static int meson_dbgfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, meson_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations meson_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = meson_dbgfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(meson_debugfs);
#endif
static void meson_free_chanlist(struct meson_dev *mc, int i)
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index a6e14491e080..b1d286004295 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1539,7 +1539,7 @@ static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
/* Write incr32(J0) into IV. */
j0_lsw = j0[3];
- j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1);
+ be32_add_cpu(&j0[3], 1);
atmel_aes_write_block(dd, AES_IVR(0), j0);
j0[3] = j0_lsw;
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index ed40dbb98c6b..4d63cb13a54f 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -912,7 +912,7 @@ static void atmel_tdes_skcipher_alg_init(struct skcipher_alg *alg)
{
alg->base.cra_priority = ATMEL_TDES_PRIORITY;
alg->base.cra_flags = CRYPTO_ALG_ASYNC;
- alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ alg->base.cra_ctxsize = sizeof(struct atmel_tdes_ctx);
alg->base.cra_module = THIS_MODULE;
alg->init = atmel_tdes_init_tfm;
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 8a7fa1ae1ade..50d169e61b41 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -165,10 +165,6 @@ spu_skcipher_rx_sg_create(struct brcm_message *mssg,
return -EFAULT;
}
- if (ctx->cipher.alg == CIPHER_ALG_RC4)
- /* Add buffer to catch 260-byte SUPDT field for RC4 */
- sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
-
if (stat_pad_len)
sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
@@ -317,7 +313,6 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
u8 local_iv_ctr[MAX_IV_SIZE];
u32 stat_pad_len; /* num bytes to align status field */
u32 pad_len; /* total length of all padding */
- bool update_key = false;
struct brcm_message *mssg; /* mailbox message */
/* number of entries in src and dst sg in mailbox message. */
@@ -391,28 +386,6 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
}
}
- if (ctx->cipher.alg == CIPHER_ALG_RC4) {
- rx_frag_num++;
- if (chunk_start) {
- /*
- * for non-first RC4 chunks, use SUPDT from previous
- * response as key for this chunk.
- */
- cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
- update_key = true;
- cipher_parms.type = CIPHER_TYPE_UPDT;
- } else if (!rctx->is_encrypt) {
- /*
- * First RC4 chunk. For decrypt, key in pre-built msg
- * header may have been changed if encrypt required
- * multiple chunks. So revert the key to the
- * ctx->enckey value.
- */
- update_key = true;
- cipher_parms.type = CIPHER_TYPE_INIT;
- }
- }
-
if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
flow_log("max_payload infinite\n");
else
@@ -425,14 +398,9 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
sizeof(rctx->msg_buf.bcm_spu_req_hdr));
- /*
- * Pass SUPDT field as key. Key field in finish() call is only used
- * when update_key has been set above for RC4. Will be ignored in
- * all other cases.
- */
spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
ctx->spu_req_hdr_len, !(rctx->is_encrypt),
- &cipher_parms, update_key, chunksize);
+ &cipher_parms, chunksize);
atomic64_add(chunksize, &iproc_priv.bytes_out);
@@ -527,9 +495,6 @@ static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
__func__, rctx->total_received, payload_len);
dump_sg(req->dst, rctx->total_received, payload_len);
- if (ctx->cipher.alg == CIPHER_ALG_RC4)
- packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak,
- SPU_SUPDT_LEN);
rctx->total_received += payload_len;
if (rctx->total_received == rctx->total_todo) {
@@ -1853,26 +1818,6 @@ static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
return 0;
}
-static int rc4_setkey(struct crypto_skcipher *cipher, const u8 *key,
- unsigned int keylen)
-{
- struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
- int i;
-
- ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
-
- ctx->enckey[0] = 0x00; /* 0x00 */
- ctx->enckey[1] = 0x00; /* i */
- ctx->enckey[2] = 0x00; /* 0x00 */
- ctx->enckey[3] = 0x00; /* j */
- for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
- ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
-
- ctx->cipher_type = CIPHER_TYPE_INIT;
-
- return 0;
-}
-
static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int keylen)
{
@@ -1895,9 +1840,6 @@ static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
case CIPHER_ALG_AES:
err = aes_setkey(cipher, key, keylen);
break;
- case CIPHER_ALG_RC4:
- err = rc4_setkey(cipher, key, keylen);
- break;
default:
pr_err("%s() Error: unknown cipher alg\n", __func__);
err = -EINVAL;
@@ -1905,11 +1847,9 @@ static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
if (err)
return err;
- /* RC4 already populated ctx->enkey */
- if (ctx->cipher.alg != CIPHER_ALG_RC4) {
- memcpy(ctx->enckey, key, keylen);
- ctx->enckeylen = keylen;
- }
+ memcpy(ctx->enckey, key, keylen);
+ ctx->enckeylen = keylen;
+
/* SPU needs XTS keys in the reverse order the crypto API presents */
if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
(ctx->cipher.mode == CIPHER_MODE_XTS)) {
@@ -2872,9 +2812,6 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
goto badkey;
}
break;
- case CIPHER_ALG_RC4:
- ctx->cipher_type = CIPHER_TYPE_INIT;
- break;
default:
pr_err("%s() Error: Unknown cipher alg\n", __func__);
return -EINVAL;
@@ -2930,7 +2867,6 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
ctx->enckeylen = keylen;
ctx->authkeylen = 0;
- memcpy(ctx->enckey, key, ctx->enckeylen);
switch (ctx->enckeylen) {
case AES_KEYSIZE_128:
@@ -2946,6 +2882,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
goto badkey;
}
+ memcpy(ctx->enckey, key, ctx->enckeylen);
+
flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
ctx->authkeylen);
flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
@@ -3000,6 +2938,10 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
flow_log("%s\n", __func__);
+
+ if (keylen < GCM_ESP_SALT_SIZE)
+ return -EINVAL;
+
ctx->salt_len = GCM_ESP_SALT_SIZE;
ctx->salt_offset = GCM_ESP_SALT_OFFSET;
memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
@@ -3028,6 +2970,10 @@ static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
flow_log("%s\n", __func__);
+
+ if (keylen < GCM_ESP_SALT_SIZE)
+ return -EINVAL;
+
ctx->salt_len = GCM_ESP_SALT_SIZE;
ctx->salt_offset = GCM_ESP_SALT_OFFSET;
memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
@@ -3057,6 +3003,10 @@ static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
flow_log("%s\n", __func__);
+
+ if (keylen < CCM_ESP_SALT_SIZE)
+ return -EINVAL;
+
ctx->salt_len = CCM_ESP_SALT_SIZE;
ctx->salt_offset = CCM_ESP_SALT_OFFSET;
memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
@@ -3606,25 +3556,6 @@ static struct iproc_alg_s driver_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
- .base.cra_name = "ecb(arc4)",
- .base.cra_driver_name = "ecb-arc4-iproc",
- .base.cra_blocksize = ARC4_BLOCK_SIZE,
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- .ivsize = 0,
- },
- .cipher_info = {
- .alg = CIPHER_ALG_RC4,
- .mode = CIPHER_MODE_NONE,
- },
- .auth_info = {
- .alg = HASH_ALG_NONE,
- .mode = HASH_MODE_NONE,
- },
- },
- {
- .type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
.base.cra_name = "ofb(des)",
.base.cra_driver_name = "ofb-des-iproc",
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -4526,15 +4457,9 @@ static void spu_counters_init(void)
static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
{
- struct spu_hw *spu = &iproc_priv.spu;
struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
int err;
- /* SPU2 does not support RC4 */
- if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
- (spu->spu_type == SPU_TYPE_SPU2))
- return 0;
-
crypto->base.cra_module = THIS_MODULE;
crypto->base.cra_priority = cipher_pri;
crypto->base.cra_alignmask = 0;
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index b6d83e3aa46c..035c8389cb3d 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -388,7 +388,6 @@ struct spu_hw {
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size);
void (*spu_request_pad)(u8 *pad_start, u32 gcm_padding,
u32 hash_pad_len, enum hash_alg auth_alg,
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index e7562e9bf396..fe126f95c702 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -222,10 +222,6 @@ void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len)
cipher_key_len = 24;
name = "3DES";
break;
- case CIPHER_ALG_RC4:
- cipher_key_len = 260;
- name = "ARC4";
- break;
case CIPHER_ALG_AES:
switch (cipher_type) {
case CIPHER_TYPE_AES128:
@@ -919,21 +915,16 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
* @spu_req_hdr_len: Length in bytes of the SPU request header
* @isInbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
- * @update_key: If true, rewrite the cipher key in SCTX
* @data_size: Length of the data in the BD field
*
* Assumes much of the header was already filled in at setkey() time in
* spum_cipher_req_init().
- * spum_cipher_req_init() fills in the encryption key. For RC4, when submitting
- * a request for a non-first chunk, we use the 260-byte SUPDT field from the
- * previous response as the key. update_key is true for this case. Unused in all
- * other cases.
+ * spum_cipher_req_init() fills in the encryption key.
*/
void spum_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size)
{
struct SPUHEADER *spuh;
@@ -948,11 +939,6 @@ void spum_cipher_req_finish(u8 *spu_hdr,
flow_log(" in: %u\n", is_inbound);
flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
cipher_parms->type);
- if (update_key) {
- flow_log(" cipher key len: %u\n", cipher_parms->key_len);
- flow_dump(" key: ", cipher_parms->key_buf,
- cipher_parms->key_len);
- }
/*
* In XTS mode, API puts "i" parameter (block tweak) in IV. For
@@ -981,13 +967,6 @@ void spum_cipher_req_finish(u8 *spu_hdr,
else
cipher_bits &= ~CIPHER_INBOUND;
- /* update encryption key for RC4 on non-first chunk */
- if (update_key) {
- spuh->sa.cipher_flags |=
- cipher_parms->type << CIPHER_TYPE_SHIFT;
- memcpy(spuh + 1, cipher_parms->key_buf, cipher_parms->key_len);
- }
-
if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len)
/* cipher iv provided so put it in here */
memcpy(bdesc_ptr - cipher_parms->iv_len, cipher_parms->iv_buf,
diff --git a/drivers/crypto/bcm/spu.h b/drivers/crypto/bcm/spu.h
index b247bc5b9354..dd132389bcaa 100644
--- a/drivers/crypto/bcm/spu.h
+++ b/drivers/crypto/bcm/spu.h
@@ -251,7 +251,6 @@ void spum_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size);
void spum_request_pad(u8 *pad_start,
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 59abb5ecefa4..c860ffb0b4c3 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -1170,21 +1170,16 @@ u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
* @spu_req_hdr_len: Length in bytes of the SPU request header
* @isInbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
- * @update_key: If true, rewrite the cipher key in SCTX
* @data_size: Length of the data in the BD field
*
* Assumes much of the header was already filled in at setkey() time in
* spu_cipher_req_init().
- * spu_cipher_req_init() fills in the encryption key. For RC4, when submitting a
- * request for a non-first chunk, we use the 260-byte SUPDT field from the
- * previous response as the key. update_key is true for this case. Unused in all
- * other cases.
+ * spu_cipher_req_init() fills in the encryption key.
*/
void spu2_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size)
{
struct SPU2_FMD *fmd;
@@ -1196,11 +1191,6 @@ void spu2_cipher_req_finish(u8 *spu_hdr,
flow_log(" in: %u\n", is_inbound);
flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
cipher_parms->type);
- if (update_key) {
- flow_log(" cipher key len: %u\n", cipher_parms->key_len);
- flow_dump(" key: ", cipher_parms->key_buf,
- cipher_parms->key_len);
- }
flow_log(" iv len: %d\n", cipher_parms->iv_len);
flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
flow_log(" data_size: %u\n", data_size);
diff --git a/drivers/crypto/bcm/spu2.h b/drivers/crypto/bcm/spu2.h
index 03af6c38df7f..6e666bfb3cfc 100644
--- a/drivers/crypto/bcm/spu2.h
+++ b/drivers/crypto/bcm/spu2.h
@@ -200,7 +200,6 @@ void spu2_cipher_req_finish(u8 *spu_hdr,
u16 spu_req_hdr_len,
unsigned int is_inbound,
struct spu_cipher_parms *cipher_parms,
- bool update_key,
unsigned int data_size);
void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
enum hash_alg auth_alg, enum hash_mode auth_mode,
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index bc35aa0ec07a..84ea7cba5ee5 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -101,6 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
+ select CRYPTO_XTS
help
Selecting this will offload crypto for users of the
scatterlist crypto API (such as the linux native IPSec
@@ -114,6 +115,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_DES
+ select CRYPTO_XTS
help
Selecting this will use CAAM Queue Interface (QI) for sending
& receiving crypto jobs to/from CAAM. This gives better performance
@@ -165,6 +167,7 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM
select CRYPTO_AEAD
select CRYPTO_HASH
select CRYPTO_DES
+ select CRYPTO_XTS
help
CAAM driver for QorIQ Data Path Acceleration Architecture 2.
It handles DPSECI DPAA2 objects that sit on the Management Complex
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 68d5cc0f28e2..3570286eb9ce 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -27,6 +27,8 @@ ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
endif
+caam-$(CONFIG_DEBUG_FS) += debugfs.o
+
obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
dpaa2_caam-y := caamalg_qi2.o dpseci.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 91feda5b63f6..cf5bd7666dfc 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -57,6 +57,8 @@
#include "key_gen.h"
#include "caamalg_desc.h"
#include <crypto/engine.h>
+#include <crypto/xts.h>
+#include <asm/unaligned.h>
/*
* crypto alg
@@ -114,10 +116,13 @@ struct caam_ctx {
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
+ bool xts_key_fallback;
+ struct crypto_skcipher *fallback;
};
struct caam_skcipher_req_ctx {
struct skcipher_edesc *edesc;
+ struct skcipher_request fallback_req;
};
struct caam_aead_req_ctx {
@@ -829,11 +834,23 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
{
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
+ int err;
- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ err = xts_verify_key(skcipher, key, keylen);
+ if (err) {
dev_dbg(jrdev, "key size mismatch\n");
- return -EINVAL;
+ return err;
+ }
+
+ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
+ ctx->xts_key_fallback = true;
+
+ if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
+ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ if (err)
+ return err;
}
ctx->cdata.keylen = keylen;
@@ -1755,6 +1772,14 @@ static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
return ret;
}
+static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
+}
+
static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
@@ -1762,12 +1787,34 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int ret = 0;
- if (!req->cryptlen)
+ /*
+ * XTS is expected to return an error even for input length = 0
+ * Note that the case input length < block size will be caught during
+ * HW offloading and return an error.
+ */
+ if (!req->cryptlen && !ctx->fallback)
return 0;
+ if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
+ ctx->xts_key_fallback)) {
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+ crypto_skcipher_decrypt(&rctx->fallback_req);
+ }
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
@@ -1905,6 +1952,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam",
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = xts_skcipher_setkey,
@@ -3344,13 +3392,35 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
+ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+ int ret = 0;
ctx->enginectx.op.do_one_request = skcipher_do_one_req;
- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
- false);
+ if (alg_aai == OP_ALG_AAI_XTS) {
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
+ struct crypto_skcipher *fallback;
+
+ fallback = crypto_alloc_skcipher(tfm_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
+ tfm_name, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ ctx->fallback = fallback;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
+ crypto_skcipher_reqsize(fallback));
+ } else {
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
+ }
+
+ ret = caam_init_common(ctx, &caam_alg->caam, false);
+ if (ret && ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+
+ return ret;
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3378,7 +3448,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_skcipher_ctx(tfm));
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+ caam_exit_common(ctx);
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -3412,8 +3486,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY);
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index d6c58184bb57..7571e1ac913b 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -373,6 +373,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
* with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
+ * @geniv: whether to generate Encrypted Chain IV
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
@@ -1550,13 +1551,14 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
set_jump_tgt_here(desc, key_jump_cmd);
/*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
+ * create sequence for loading the sector index / 16B tweak value
+ * Lower 8B of IV - sector index / tweak lower half
+ * Upper 8B of IV - upper half of 16B tweak
*/
append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
(0x20 << LDST_OFFSET_SHIFT));
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x30 << LDST_OFFSET_SHIFT));
/* Load operation */
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
@@ -1565,9 +1567,11 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
/* Perform operation */
skcipher_append_src_dst(desc);
- /* Store upper 8B of IV */
+ /* Store lower 8B and upper 8B of IV */
append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
(0x20 << LDST_OFFSET_SHIFT));
+ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x30 << LDST_OFFSET_SHIFT));
print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__)
": ", DUMP_PREFIX_ADDRESS, 16, 4,
@@ -1609,23 +1613,25 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
set_jump_tgt_here(desc, key_jump_cmd);
/*
- * create sequence for loading the sector index
- * Upper 8B of IV - will be used as sector index
- * Lower 8B of IV - will be discarded
+ * create sequence for loading the sector index / 16B tweak value
+ * Lower 8B of IV - sector index / tweak lower half
+ * Upper 8B of IV - upper half of 16B tweak
*/
append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
(0x20 << LDST_OFFSET_SHIFT));
- append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
+ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x30 << LDST_OFFSET_SHIFT));
/* Load operation */
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
skcipher_append_src_dst(desc);
- /* Store upper 8B of IV */
+ /* Store lower 8B and upper 8B of IV */
append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
(0x20 << LDST_OFFSET_SHIFT));
+ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+ (0x30 << LDST_OFFSET_SHIFT));
print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__)
": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index bb1c0106a95c..66f60d78bdc8 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -18,6 +18,8 @@
#include "qi.h"
#include "jr.h"
#include "caamalg_desc.h"
+#include <crypto/xts.h>
+#include <asm/unaligned.h>
/*
* crypto alg
@@ -67,6 +69,12 @@ struct caam_ctx {
struct device *qidev;
spinlock_t lock; /* Protects multiple init of driver context */
struct caam_drv_ctx *drv_ctx[NUM_OP];
+ bool xts_key_fallback;
+ struct crypto_skcipher *fallback;
+};
+
+struct caam_skcipher_req_ctx {
+ struct skcipher_request fallback_req;
};
static int aead_set_sh_desc(struct crypto_aead *aead)
@@ -725,11 +733,23 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
{
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
int ret = 0;
+ int err;
- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ err = xts_verify_key(skcipher, key, keylen);
+ if (err) {
dev_dbg(jrdev, "key size mismatch\n");
- return -EINVAL;
+ return err;
+ }
+
+ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
+ ctx->xts_key_fallback = true;
+
+ if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
+ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ if (err)
+ return err;
}
ctx->cdata.keylen = keylen;
@@ -1373,16 +1393,46 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return edesc;
}
+static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
+}
+
static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
- if (!req->cryptlen)
+ /*
+ * XTS is expected to return an error even for input length = 0
+ * Note that the case input length < block size will be caught during
+ * HW offloading and return an error.
+ */
+ if (!req->cryptlen && !ctx->fallback)
return 0;
+ if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
+ ctx->xts_key_fallback)) {
+ struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+ crypto_skcipher_decrypt(&rctx->fallback_req);
+ }
+
if (unlikely(caam_congested))
return -EAGAIN;
@@ -1507,6 +1557,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam-qi",
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = xts_skcipher_setkey,
@@ -2440,9 +2491,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+ int ret = 0;
+
+ if (alg_aai == OP_ALG_AAI_XTS) {
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
+ struct crypto_skcipher *fallback;
+
+ fallback = crypto_alloc_skcipher(tfm_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
+ tfm_name, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ ctx->fallback = fallback;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
+ crypto_skcipher_reqsize(fallback));
+ }
+
+ ret = caam_init_common(ctx, &caam_alg->caam, false);
+ if (ret && ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
- return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
- false);
+ return ret;
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2468,7 +2542,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_skcipher_ctx(tfm));
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+ caam_exit_common(ctx);
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -2502,8 +2580,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY);
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 66ae1d581168..98c1ff1744bb 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -19,6 +19,8 @@
#include <linux/fsl/mc.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
+#include <crypto/xts.h>
+#include <asm/unaligned.h>
#define CAAM_CRA_PRIORITY 2000
@@ -59,7 +61,7 @@ struct caam_skcipher_alg {
};
/**
- * caam_ctx - per-session context
+ * struct caam_ctx - per-session context
* @flc: Flow Contexts array
* @key: [authentication key], encryption key
* @flc_dma: I/O virtual addresses of the Flow Contexts
@@ -80,6 +82,8 @@ struct caam_ctx {
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
+ bool xts_key_fallback;
+ struct crypto_skcipher *fallback;
};
static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
@@ -1054,12 +1058,24 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
{
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *dev = ctx->dev;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
struct caam_flc *flc;
u32 *desc;
+ int err;
- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ err = xts_verify_key(skcipher, key, keylen);
+ if (err) {
dev_dbg(dev, "key size mismatch\n");
- return -EINVAL;
+ return err;
+ }
+
+ if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
+ ctx->xts_key_fallback = true;
+
+ if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
+ err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ if (err)
+ return err;
}
ctx->cdata.keylen = keylen;
@@ -1443,17 +1459,44 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
skcipher_request_complete(req, ecode);
}
+static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
+}
+
static int skcipher_encrypt(struct skcipher_request *req)
{
struct skcipher_edesc *edesc;
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_request *caam_req = skcipher_request_ctx(req);
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
int ret;
- if (!req->cryptlen)
+ /*
+ * XTS is expected to return an error even for input length = 0
+ * Note that the case input length < block size will be caught during
+ * HW offloading and return an error.
+ */
+ if (!req->cryptlen && !ctx->fallback)
return 0;
+ if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
+ ctx->xts_key_fallback)) {
+ skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&caam_req->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ return crypto_skcipher_encrypt(&caam_req->fallback_req);
+ }
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req);
if (IS_ERR(edesc))
@@ -1480,10 +1523,30 @@ static int skcipher_decrypt(struct skcipher_request *req)
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct caam_request *caam_req = skcipher_request_ctx(req);
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
int ret;
- if (!req->cryptlen)
+ /*
+ * XTS is expected to return an error even for input length = 0
+ * Note that the case input length < block size will be caught during
+ * HW offloading and return an error.
+ */
+ if (!req->cryptlen && !ctx->fallback)
return 0;
+
+ if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
+ ctx->xts_key_fallback)) {
+ skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&caam_req->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+
+ return crypto_skcipher_decrypt(&caam_req->fallback_req);
+ }
+
/* allocate extended descriptor */
edesc = skcipher_edesc_alloc(req);
if (IS_ERR(edesc))
@@ -1537,9 +1600,34 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+ int ret = 0;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
- return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
+ if (alg_aai == OP_ALG_AAI_XTS) {
+ const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
+ struct crypto_skcipher *fallback;
+
+ fallback = crypto_alloc_skcipher(tfm_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ dev_err(ctx->dev, "Failed to allocate %s fallback: %ld\n",
+ tfm_name, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ ctx->fallback = fallback;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
+ crypto_skcipher_reqsize(fallback));
+ } else {
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
+ }
+
+ ret = caam_cra_init(ctx, &caam_alg->caam, false);
+ if (ret && ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+
+ return ret;
}
static int caam_cra_init_aead(struct crypto_aead *tfm)
@@ -1562,7 +1650,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_skcipher_ctx(tfm));
+ struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->fallback)
+ crypto_free_skcipher(ctx->fallback);
+ caam_exit_common(ctx);
}
static void caam_cra_exit_aead(struct crypto_aead *tfm)
@@ -1665,6 +1757,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam-qi2",
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = xts_skcipher_setkey,
@@ -2912,8 +3005,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY);
alg->init = caam_cra_init_skcipher;
alg->exit = caam_cra_exit;
@@ -2951,7 +3044,7 @@ enum hash_optype {
};
/**
- * caam_hash_ctx - ahash per-session context
+ * struct caam_hash_ctx - ahash per-session context
* @flc: Flow Contexts array
* @key: authentication key
* @flc_dma: I/O virtual addresses of the Flow Contexts
@@ -5115,8 +5208,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
/* DPIO */
err = dpaa2_dpseci_dpio_setup(priv);
if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
+ dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
goto err_dpio_setup;
}
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index f29cb7bd7dd3..d35253407ade 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -13,6 +13,7 @@
#include <linux/netdevice.h>
#include "dpseci.h"
#include "desc_constr.h"
+#include <crypto/skcipher.h>
#define DPAA2_CAAM_STORE_SIZE 16
/* NAPI weight *must* be a multiple of the store size. */
@@ -186,6 +187,7 @@ struct caam_request {
void (*cbk)(void *ctx, u32 err);
void *ctx;
void *edesc;
+ struct skcipher_request fallback_req;
};
/**
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 94502f1d4b48..ca0361b2dbb0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -13,6 +13,7 @@
#include <linux/fsl/mc.h>
#include "compat.h"
+#include "debugfs.h"
#include "regs.h"
#include "intern.h"
#include "jr.h"
@@ -332,11 +333,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
kfree(desc);
- if (!ret)
- ret = devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng,
- ctrldev);
+ if (ret)
+ return ret;
- return ret;
+ return devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng, ctrldev);
}
/*
@@ -443,7 +443,9 @@ static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
* by u-boot.
* In case this property is not passed an attempt to retrieve the CAAM
* era via register reads will be made.
- **/
+ *
+ * @ctrl: controller region
+ */
static int caam_get_era(struct caam_ctrl __iomem *ctrl)
{
struct device_node *caam_node;
@@ -582,12 +584,10 @@ static int init_clocks(struct device *dev, const struct caam_imx_data *data)
return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
}
-#ifdef CONFIG_DEBUG_FS
static void caam_remove_debugfs(void *root)
{
debugfs_remove_recursive(root);
}
-#endif
#ifdef CONFIG_FSL_MC_BUS
static bool check_version(struct fsl_mc_version *mc_version, u32 major,
@@ -619,10 +619,7 @@ static int caam_probe(struct platform_device *pdev)
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_drv_private *ctrlpriv;
-#ifdef CONFIG_DEBUG_FS
- struct caam_perfmon *perfmon;
struct dentry *dfs_root;
-#endif
u32 scfgr, comp_params;
u8 rng_vid;
int pg_size;
@@ -777,21 +774,15 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->era = caam_get_era(ctrl);
ctrlpriv->domain = iommu_get_domain_for_dev(dev);
-#ifdef CONFIG_DEBUG_FS
- /*
- * FIXME: needs better naming distinction, as some amalgamation of
- * "caam" and nprop->full_name. The OF name isn't distinctive,
- * but does separate instances
- */
- perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
-
dfs_root = debugfs_create_dir(dev_name(dev), NULL);
- ret = devm_add_action_or_reset(dev, caam_remove_debugfs, dfs_root);
- if (ret)
- return ret;
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ ret = devm_add_action_or_reset(dev, caam_remove_debugfs,
+ dfs_root);
+ if (ret)
+ return ret;
+ }
- ctrlpriv->ctl = debugfs_create_dir("ctl", dfs_root);
-#endif
+ caam_debugfs_init(ctrlpriv, dfs_root);
/* Check to see if (DPAA 1.x) QI present. If so, enable */
if (ctrlpriv->qi_present && !caam_dpaa2) {
@@ -912,57 +903,6 @@ static int caam_probe(struct platform_device *pdev)
dev_info(dev, "job rings = %d, qi = %d\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present);
-#ifdef CONFIG_DEBUG_FS
- debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->req_dequeued,
- &caam_fops_u64_ro);
- debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_req,
- &caam_fops_u64_ro);
- debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_req,
- &caam_fops_u64_ro);
- debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_bytes,
- &caam_fops_u64_ro);
- debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_prot_bytes,
- &caam_fops_u64_ro);
- debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_bytes,
- &caam_fops_u64_ro);
- debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_valid_bytes,
- &caam_fops_u64_ro);
-
- /* Controller level - global status values */
- debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultaddr,
- &caam_fops_u32_ro);
- debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultdetail,
- &caam_fops_u32_ro);
- debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->status,
- &caam_fops_u32_ro);
-
- /* Internal covering keys (useful in non-secure mode only) */
- ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
- ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- debugfs_create_blob("kek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
- &ctrlpriv->ctl_kek_wrap);
-
- ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
- ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- debugfs_create_blob("tkek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
- &ctrlpriv->ctl_tkek_wrap);
-
- ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
- ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
- debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl,
- &ctrlpriv->ctl_tdsk_wrap);
-#endif
-
ret = devm_of_platform_populate(dev);
if (ret)
dev_err(dev, "JR platform devices creation error\n");
diff --git a/drivers/crypto/caam/debugfs.c b/drivers/crypto/caam/debugfs.c
new file mode 100644
index 000000000000..8ebf18398166
--- /dev/null
+++ b/drivers/crypto/caam/debugfs.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+
+#include <linux/debugfs.h>
+#include "compat.h"
+#include "debugfs.h"
+#include "regs.h"
+#include "intern.h"
+
+static int caam_debugfs_u64_get(void *data, u64 *val)
+{
+ *val = caam64_to_cpu(*(u64 *)data);
+ return 0;
+}
+
+static int caam_debugfs_u32_get(void *data, u64 *val)
+{
+ *val = caam32_to_cpu(*(u32 *)data);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
+
+#ifdef CONFIG_CAAM_QI
+/*
+ * This is a counter for the number of times the congestion group (where all
+ * the request and response queueus are) reached congestion. Incremented
+ * each time the congestion callback is called with congested == true.
+ */
+static u64 times_congested;
+
+void caam_debugfs_qi_congested(void)
+{
+ times_congested++;
+}
+
+void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv)
+{
+ debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
+ &times_congested, &caam_fops_u64_ro);
+}
+#endif
+
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root)
+{
+ struct caam_perfmon *perfmon;
+
+ /*
+ * FIXME: needs better naming distinction, as some amalgamation of
+ * "caam" and nprop->full_name. The OF name isn't distinctive,
+ * but does separate instances
+ */
+ perfmon = (struct caam_perfmon __force *)&ctrlpriv->ctrl->perfmon;
+
+ ctrlpriv->ctl = debugfs_create_dir("ctl", root);
+
+ debugfs_create_file("rq_dequeued", 0444, ctrlpriv->ctl,
+ &perfmon->req_dequeued, &caam_fops_u64_ro);
+ debugfs_create_file("ob_rq_encrypted", 0444, ctrlpriv->ctl,
+ &perfmon->ob_enc_req, &caam_fops_u64_ro);
+ debugfs_create_file("ib_rq_decrypted", 0444, ctrlpriv->ctl,
+ &perfmon->ib_dec_req, &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_encrypted", 0444, ctrlpriv->ctl,
+ &perfmon->ob_enc_bytes, &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_protected", 0444, ctrlpriv->ctl,
+ &perfmon->ob_prot_bytes, &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_decrypted", 0444, ctrlpriv->ctl,
+ &perfmon->ib_dec_bytes, &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_validated", 0444, ctrlpriv->ctl,
+ &perfmon->ib_valid_bytes, &caam_fops_u64_ro);
+
+ /* Controller level - global status values */
+ debugfs_create_file("fault_addr", 0444, ctrlpriv->ctl,
+ &perfmon->faultaddr, &caam_fops_u32_ro);
+ debugfs_create_file("fault_detail", 0444, ctrlpriv->ctl,
+ &perfmon->faultdetail, &caam_fops_u32_ro);
+ debugfs_create_file("fault_status", 0444, ctrlpriv->ctl,
+ &perfmon->status, &caam_fops_u32_ro);
+
+ /* Internal covering keys (useful in non-secure mode only) */
+ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
+ ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ debugfs_create_blob("kek", 0444, ctrlpriv->ctl,
+ &ctrlpriv->ctl_kek_wrap);
+
+ ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
+ ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ debugfs_create_blob("tkek", 0444, ctrlpriv->ctl,
+ &ctrlpriv->ctl_tkek_wrap);
+
+ ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
+ ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
+ debugfs_create_blob("tdsk", 0444, ctrlpriv->ctl,
+ &ctrlpriv->ctl_tdsk_wrap);
+}
diff --git a/drivers/crypto/caam/debugfs.h b/drivers/crypto/caam/debugfs.h
new file mode 100644
index 000000000000..661d768acdbf
--- /dev/null
+++ b/drivers/crypto/caam/debugfs.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#ifndef CAAM_DEBUGFS_H
+#define CAAM_DEBUGFS_H
+
+struct dentry;
+struct caam_drv_private;
+
+#ifdef CONFIG_DEBUG_FS
+void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct dentry *root);
+#else
+static inline void caam_debugfs_init(struct caam_drv_private *ctrlpriv,
+ struct dentry *root)
+{}
+#endif
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_CAAM_QI)
+void caam_debugfs_qi_congested(void);
+void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv);
+#else
+static inline void caam_debugfs_qi_congested(void) {}
+static inline void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv) {}
+#endif
+
+#endif /* CAAM_DEBUGFS_H */
diff --git a/drivers/crypto/caam/dpseci-debugfs.c b/drivers/crypto/caam/dpseci-debugfs.c
index c5bfc923abd8..0eca8c2fd916 100644
--- a/drivers/crypto/caam/dpseci-debugfs.c
+++ b/drivers/crypto/caam/dpseci-debugfs.c
@@ -44,33 +44,14 @@ static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset)
return 0;
}
-static int dpseci_dbg_fqs_open(struct inode *inode, struct file *file)
-{
- int err;
- struct dpaa2_caam_priv *priv;
-
- priv = (struct dpaa2_caam_priv *)inode->i_private;
-
- err = single_open(file, dpseci_dbg_fqs_show, priv);
- if (err < 0)
- dev_err(priv->dev, "single_open() failed\n");
-
- return err;
-}
-
-static const struct file_operations dpseci_dbg_fq_ops = {
- .open = dpseci_dbg_fqs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dpseci_dbg_fqs);
void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv)
{
priv->dfs_root = debugfs_create_dir(dev_name(priv->dev), NULL);
debugfs_create_file("fq_stats", 0444, priv->dfs_root, priv,
- &dpseci_dbg_fq_ops);
+ &dpseci_dbg_fqs_fops);
}
void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv)
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 402d6a362e8c..9112279a4de0 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -195,23 +195,6 @@ static inline void caam_qi_algapi_exit(void)
#endif /* CONFIG_CAAM_QI */
-#ifdef CONFIG_DEBUG_FS
-static int caam_debugfs_u64_get(void *data, u64 *val)
-{
- *val = caam64_to_cpu(*(u64 *)data);
- return 0;
-}
-
-static int caam_debugfs_u32_get(void *data, u64 *val)
-{
- *val = caam32_to_cpu(*(u32 *)data);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
-#endif
-
static inline u64 caam_get_dma_mask(struct device *dev)
{
struct device_node *nprop = dev->of_node;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index bf6b03b17251..6f669966ba2c 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -324,7 +324,7 @@ EXPORT_SYMBOL(caam_jr_alloc);
/**
* caam_jr_free() - Free the Job Ring
- * @rdev - points to the dev that identifies the Job ring to
+ * @rdev: points to the dev that identifies the Job ring to
* be released.
**/
void caam_jr_free(struct device *rdev)
@@ -349,15 +349,15 @@ EXPORT_SYMBOL(caam_jr_free);
* of this request. This has the form:
* callback(struct device *dev, u32 *desc, u32 stat, void *arg)
* where:
- * @dev: contains the job ring device that processed this
+ * dev: contains the job ring device that processed this
* response.
- * @desc: descriptor that initiated the request, same as
+ * desc: descriptor that initiated the request, same as
* "desc" being argued to caam_jr_enqueue().
- * @status: untranslated status received from CAAM. See the
+ * status: untranslated status received from CAAM. See the
* reference manual for a detailed description of
* error meaning, or see the JRSTA definitions in the
* register header file
- * @areq: optional pointer to an argument passed with the
+ * areq: optional pointer to an argument passed with the
* original request
* @areq: optional pointer to a user argument for use at callback
* time.
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index b390b935db6d..ec53528d8205 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -11,6 +11,7 @@
#include <linux/kthread.h>
#include <soc/fsl/qman.h>
+#include "debugfs.h"
#include "regs.h"
#include "qi.h"
#include "desc.h"
@@ -73,15 +74,6 @@ static struct caam_qi_priv qipriv ____cacheline_aligned;
bool caam_congested __read_mostly;
EXPORT_SYMBOL(caam_congested);
-#ifdef CONFIG_DEBUG_FS
-/*
- * This is a counter for the number of times the congestion group (where all
- * the request and response queueus are) reached congestion. Incremented
- * each time the congestion callback is called with congested == true.
- */
-static u64 times_congested;
-#endif
-
/*
* This is a a cache of buffers, from which the users of CAAM QI driver
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
@@ -544,9 +536,8 @@ static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
caam_congested = congested;
if (congested) {
-#ifdef CONFIG_DEBUG_FS
- times_congested++;
-#endif
+ caam_debugfs_qi_congested();
+
pr_debug_ratelimited("CAAM entered congestion\n");
} else {
@@ -775,10 +766,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
return -ENOMEM;
}
-#ifdef CONFIG_DEBUG_FS
- debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
- &times_congested, &caam_fops_u64_ro);
-#endif
+ caam_debugfs_qi_init(ctrlpriv);
err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
if (err)
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 5af0dc2a8909..ce3b91c612f0 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -451,13 +451,7 @@ static struct skcipher_alg algs[] = { {
static inline int cav_register_algs(void)
{
- int err = 0;
-
- err = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
- if (err)
- return err;
-
- return 0;
+ return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
}
static inline void cav_unregister_algs(void)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index cee2a2713038..9d14be97e381 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -451,6 +451,7 @@ static int nitrox_probe(struct pci_dev *pdev,
err = pci_request_mem_regions(pdev, nitrox_driver_name);
if (err) {
pci_disable_device(pdev);
+ dev_err(&pdev->dev, "Failed to request mem regions!\n");
return err;
}
pci_set_master(pdev);
diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
index 194624b4855b..d35216e2f6cd 100644
--- a/drivers/crypto/cavium/zip/zip_main.c
+++ b/drivers/crypto/cavium/zip/zip_main.c
@@ -460,7 +460,7 @@ static void zip_unregister_compression_device(void)
#include <linux/debugfs.h>
/* Displays ZIP device statistics */
-static int zip_show_stats(struct seq_file *s, void *unused)
+static int zip_stats_show(struct seq_file *s, void *unused)
{
u64 val = 0ull;
u64 avg_chunk = 0ull, avg_cr = 0ull;
@@ -523,7 +523,7 @@ static int zip_show_stats(struct seq_file *s, void *unused)
}
/* Clears stats data */
-static int zip_clear_stats(struct seq_file *s, void *unused)
+static int zip_clear_show(struct seq_file *s, void *unused)
{
int index = 0;
@@ -558,7 +558,7 @@ static struct zip_registers zipregs[64] = {
};
/* Prints registers' contents */
-static int zip_print_regs(struct seq_file *s, void *unused)
+static int zip_regs_show(struct seq_file *s, void *unused)
{
u64 val = 0;
int i = 0, index = 0;
@@ -584,41 +584,9 @@ static int zip_print_regs(struct seq_file *s, void *unused)
return 0;
}
-static int zip_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, zip_show_stats, NULL);
-}
-
-static const struct file_operations zip_stats_fops = {
- .owner = THIS_MODULE,
- .open = zip_stats_open,
- .read = seq_read,
- .release = single_release,
-};
-
-static int zip_clear_open(struct inode *inode, struct file *file)
-{
- return single_open(file, zip_clear_stats, NULL);
-}
-
-static const struct file_operations zip_clear_fops = {
- .owner = THIS_MODULE,
- .open = zip_clear_open,
- .read = seq_read,
- .release = single_release,
-};
-
-static int zip_regs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, zip_print_regs, NULL);
-}
-
-static const struct file_operations zip_regs_fops = {
- .owner = THIS_MODULE,
- .open = zip_regs_open,
- .read = seq_read,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(zip_stats);
+DEFINE_SHOW_ATTRIBUTE(zip_clear);
+DEFINE_SHOW_ATTRIBUTE(zip_regs);
/* Root directory for thunderx_zip debugfs entry */
static struct dentry *zip_debugfs_root;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index bd270e66185e..d6a8f4e4b14a 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -8,6 +8,7 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -1744,7 +1745,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
break;
default:
ret = -EINVAL;
- goto e_ctx;
+ goto e_data;
}
} else {
/* Stash the context */
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d77ae981b64b..dafa6577a845 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -75,8 +75,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
switch (size) {
case CC_AES_128_BIT_KEY_SIZE:
case CC_AES_192_BIT_KEY_SIZE:
- if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
- ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
+ if (ctx_p->cipher_mode != DRV_CIPHER_XTS)
return 0;
break;
case CC_AES_256_BIT_KEY_SIZE:
@@ -84,8 +83,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
case (CC_AES_192_BIT_KEY_SIZE * 2):
case (CC_AES_256_BIT_KEY_SIZE * 2):
if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
- ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
- ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV)
return 0;
break;
default:
@@ -122,7 +120,6 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p,
case DRV_CIPHER_ECB:
case DRV_CIPHER_CBC:
case DRV_CIPHER_ESSIV:
- case DRV_CIPHER_BITLOCKER:
if (IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
@@ -348,8 +345,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
}
if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
- ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
- ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
if (hki.hw_key1 == hki.hw_key2) {
dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
hki.hw_key1, hki.hw_key2);
@@ -547,7 +543,6 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
- case DRV_CIPHER_BITLOCKER:
/* IV */
hw_desc_init(&desc[*seq_size]);
set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
@@ -602,7 +597,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
- case DRV_CIPHER_BITLOCKER:
break;
default:
dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
@@ -624,16 +618,8 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
unsigned int key_len = (ctx_p->keylen / 2);
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
- unsigned int du_size = nbytes;
unsigned int key_offset = key_len;
- struct cc_crypto_alg *cc_alg =
- container_of(tfm->__crt_alg, struct cc_crypto_alg,
- skcipher_alg.base);
-
- if (cc_alg->data_unit)
- du_size = cc_alg->data_unit;
-
switch (cipher_mode) {
case DRV_CIPHER_ECB:
break;
@@ -644,7 +630,6 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
- case DRV_CIPHER_BITLOCKER:
if (cipher_mode == DRV_CIPHER_ESSIV)
key_len = SHA256_DIGEST_SIZE;
@@ -661,7 +646,7 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
(key_dma_addr + key_offset),
key_len, NS_BIT);
}
- set_xex_data_unit_size(&desc[*seq_size], du_size);
+ set_xex_data_unit_size(&desc[*seq_size], nbytes);
set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
set_key_size_aes(&desc[*seq_size], key_len);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
@@ -758,7 +743,6 @@ static void cc_setup_key_desc(struct crypto_tfm *tfm,
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
- case DRV_CIPHER_BITLOCKER:
/* Load AES key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -1039,44 +1023,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "xts512(paes)",
- .driver_name = "xts-paes-du512-ccree",
- .blocksize = 1,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_XTS,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
- .name = "xts4096(paes)",
- .driver_name = "xts-paes-du4096-ccree",
- .blocksize = 1,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_XTS,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
.name = "essiv(cbc(paes),sha256)",
.driver_name = "essiv-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1095,100 +1041,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "essiv512(cbc(paes),sha256)",
- .driver_name = "essiv-paes-du512-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_ESSIV,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
- .name = "essiv4096(cbc(paes),sha256)",
- .driver_name = "essiv-paes-du4096-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_ESSIV,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
- .name = "bitlocker(paes)",
- .driver_name = "bitlocker-paes-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
- .name = "bitlocker512(paes)",
- .driver_name = "bitlocker-paes-du512-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
- .name = "bitlocker4096(paes)",
- .driver_name = "bitlocker-paes-du4096-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
.name = "ecb(paes)",
.driver_name = "ecb-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1300,42 +1152,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "xts512(aes)",
- .driver_name = "xts-aes-du512-ccree",
- .blocksize = 1,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_XTS,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
- .name = "xts4096(aes)",
- .driver_name = "xts-aes-du4096-ccree",
- .blocksize = 1,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_XTS,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
.name = "essiv(cbc(aes),sha256)",
.driver_name = "essiv-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1353,95 +1169,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "essiv512(cbc(aes),sha256)",
- .driver_name = "essiv-aes-du512-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_ESSIV,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
- .name = "essiv4096(cbc(aes),sha256)",
- .driver_name = "essiv-aes-du4096-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_ESSIV,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
- .name = "bitlocker(aes)",
- .driver_name = "bitlocker-aes-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
- .name = "bitlocker512(aes)",
- .driver_name = "bitlocker-aes-du512-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 512,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
- .name = "bitlocker4096(aes)",
- .driver_name = "bitlocker-aes-du4096-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_BITLOCKER,
- .flow_mode = S_DIN_to_AES,
- .data_unit = 4096,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- },
- {
.name = "ecb(aes)",
.driver_name = "ecb-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1712,7 +1439,6 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
t_alg->cipher_mode = tmpl->cipher_mode;
t_alg->flow_mode = tmpl->flow_mode;
- t_alg->data_unit = tmpl->data_unit;
return t_alg;
}
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index ccf960a0d989..bd9a1c0896b3 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -108,7 +108,6 @@ enum drv_cipher_mode {
DRV_CIPHER_CBC_CTS = 11,
DRV_CIPHER_GCTR = 12,
DRV_CIPHER_ESSIV = 13,
- DRV_CIPHER_BITLOCKER = 14,
DRV_CIPHER_RESERVE32B = S32_MAX
};
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 2d50991b9a17..6f519d3e896c 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -300,11 +300,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->plat_dev = plat_dev;
clk = devm_clk_get_optional(dev, NULL);
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- dev_err(dev, "Error getting clock: %pe\n", clk);
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Error getting clock\n");
new_drvdata->clk = clk;
new_drvdata->coherent = of_dma_is_coherent(np);
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index d938886390d2..af77b2020350 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -162,7 +162,6 @@ struct cc_crypto_alg {
int cipher_mode;
int flow_mode; /* Note: currently, refers to the cipher mode only. */
int auth_mode;
- unsigned int data_unit;
struct cc_drvdata *drvdata;
struct skcipher_alg skcipher_alg;
struct aead_alg aead_alg;
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index d39e1664fc7e..3c65bf070c90 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -65,8 +65,12 @@ const struct dev_pm_ops ccree_pm = {
int cc_pm_get(struct device *dev)
{
int rc = pm_runtime_get_sync(dev);
+ if (rc < 0) {
+ pm_runtime_put_noidle(dev);
+ return rc;
+ }
- return (rc == 1 ? 0 : rc);
+ return 0;
}
void cc_pm_put_suspend(struct device *dev)
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 2984fdf51e85..f886401af13e 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -21,35 +21,3 @@ config CRYPTO_DEV_CHELSIO
To compile this driver as a module, choose M here: the module
will be called chcr.
-
-config CHELSIO_IPSEC_INLINE
- bool "Chelsio IPSec XFRM Tx crypto offload"
- depends on CHELSIO_T4
- depends on CRYPTO_DEV_CHELSIO
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- default n
- help
- Enable support for IPSec Tx Inline.
-
-config CRYPTO_DEV_CHELSIO_TLS
- tristate "Chelsio Crypto Inline TLS Driver"
- depends on CHELSIO_T4
- depends on TLS_TOE
- select CRYPTO_DEV_CHELSIO
- help
- Support Chelsio Inline TLS with Chelsio crypto accelerator.
-
- To compile this driver as a module, choose M here: the module
- will be called chtls.
-
-config CHELSIO_TLS_DEVICE
- bool "Chelsio Inline KTLS Offload"
- depends on CHELSIO_T4
- depends on TLS_DEVICE
- select CRYPTO_DEV_CHELSIO
- default y
- help
- This flag enables support for kernel tls offload over Chelsio T6
- crypto accelerator. CONFIG_CHELSIO_TLS_DEVICE flag can be enabled
- only if CONFIG_TLS and CONFIG_TLS_DEVICE flags are enabled.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index 0e9d035927e9..2e5df484ab01 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -3,8 +3,3 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
-chcr-objs += chcr_ktls.o
-#endif
-chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
-obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls/
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index d4f6e010dc79..507aafe93f21 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -86,39 +86,6 @@
KEY_CONTEXT_OPAD_PRESENT_M)
#define KEY_CONTEXT_OPAD_PRESENT_F KEY_CONTEXT_OPAD_PRESENT_V(1U)
-#define TLS_KEYCTX_RXFLIT_CNT_S 24
-#define TLS_KEYCTX_RXFLIT_CNT_V(x) ((x) << TLS_KEYCTX_RXFLIT_CNT_S)
-
-#define TLS_KEYCTX_RXPROT_VER_S 20
-#define TLS_KEYCTX_RXPROT_VER_M 0xf
-#define TLS_KEYCTX_RXPROT_VER_V(x) ((x) << TLS_KEYCTX_RXPROT_VER_S)
-
-#define TLS_KEYCTX_RXCIPH_MODE_S 16
-#define TLS_KEYCTX_RXCIPH_MODE_M 0xf
-#define TLS_KEYCTX_RXCIPH_MODE_V(x) ((x) << TLS_KEYCTX_RXCIPH_MODE_S)
-
-#define TLS_KEYCTX_RXAUTH_MODE_S 12
-#define TLS_KEYCTX_RXAUTH_MODE_M 0xf
-#define TLS_KEYCTX_RXAUTH_MODE_V(x) ((x) << TLS_KEYCTX_RXAUTH_MODE_S)
-
-#define TLS_KEYCTX_RXCIAU_CTRL_S 11
-#define TLS_KEYCTX_RXCIAU_CTRL_V(x) ((x) << TLS_KEYCTX_RXCIAU_CTRL_S)
-
-#define TLS_KEYCTX_RX_SEQCTR_S 9
-#define TLS_KEYCTX_RX_SEQCTR_M 0x3
-#define TLS_KEYCTX_RX_SEQCTR_V(x) ((x) << TLS_KEYCTX_RX_SEQCTR_S)
-
-#define TLS_KEYCTX_RX_VALID_S 8
-#define TLS_KEYCTX_RX_VALID_V(x) ((x) << TLS_KEYCTX_RX_VALID_S)
-
-#define TLS_KEYCTX_RXCK_SIZE_S 3
-#define TLS_KEYCTX_RXCK_SIZE_M 0x7
-#define TLS_KEYCTX_RXCK_SIZE_V(x) ((x) << TLS_KEYCTX_RXCK_SIZE_S)
-
-#define TLS_KEYCTX_RXMK_SIZE_S 0
-#define TLS_KEYCTX_RXMK_SIZE_M 0x7
-#define TLS_KEYCTX_RXMK_SIZE_V(x) ((x) << TLS_KEYCTX_RXMK_SIZE_S)
-
#define CHCR_HASH_MAX_DIGEST_SIZE 64
#define CHCR_MAX_SHA_DIGEST_SIZE 64
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index bd8dac806e7a..f91f9d762a45 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -33,23 +33,8 @@ static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
-#if defined(CONFIG_CHELSIO_TLS_DEVICE)
-static const struct tlsdev_ops chcr_ktls_ops = {
- .tls_dev_add = chcr_ktls_dev_add,
- .tls_dev_del = chcr_ktls_dev_del,
-};
-#endif
-
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
-static void update_netdev_features(void);
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
-
static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_FW6_PLD] = cpl_fw6_pld_handler,
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
- [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
-#endif
};
static struct cxgb4_uld_info chcr_uld_info = {
@@ -60,12 +45,6 @@ static struct cxgb4_uld_info chcr_uld_info = {
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
-#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
- .tx_handler = chcr_uld_tx_handler,
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
-#if defined(CONFIG_CHELSIO_TLS_DEVICE)
- .tlsdev_ops = &chcr_ktls_ops,
-#endif
};
static void detach_work_fn(struct work_struct *work)
@@ -148,7 +127,7 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
static int chcr_dev_move(struct uld_ctx *u_ctx)
{
- mutex_lock(&drv_data.drv_mutex);
+ mutex_lock(&drv_data.drv_mutex);
if (drv_data.last_dev == u_ctx) {
if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
drv_data.last_dev = list_first_entry(&drv_data.act_dev,
@@ -241,23 +220,6 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
-#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
-int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
-{
- /* In case if skb's decrypted bit is set, it's nic tls packet, else it's
- * ipsec packet.
- */
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- if (skb->decrypted)
- return chcr_ktls_xmit(skb, dev);
-#endif
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
- return chcr_ipsec_xmit(skb, dev);
-#endif
- return 0;
-}
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
-
static void chcr_detach_device(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev = &u_ctx->dev;
@@ -305,24 +267,6 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
return ret;
}
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
-static void update_netdev_features(void)
-{
- struct uld_ctx *u_ctx, *tmp;
-
- mutex_lock(&drv_data.drv_mutex);
- list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
- if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
- chcr_add_xfrmops(&u_ctx->lldi);
- }
- list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
- if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
- chcr_add_xfrmops(&u_ctx->lldi);
- }
- mutex_unlock(&drv_data.drv_mutex);
-}
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
-
static int __init chcr_crypto_init(void)
{
INIT_LIST_HEAD(&drv_data.act_dev);
@@ -332,12 +276,6 @@ static int __init chcr_crypto_init(void)
drv_data.last_dev = NULL;
cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
- #ifdef CONFIG_CHELSIO_IPSEC_INLINE
- rtnl_lock();
- update_netdev_features();
- rtnl_unlock();
- #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
-
return 0;
}
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 67d77abd6775..b02f981e7c32 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -72,54 +72,6 @@ struct _key_ctx {
unsigned char key[];
};
-#define KEYCTX_TX_WR_IV_S 55
-#define KEYCTX_TX_WR_IV_M 0x1ffULL
-#define KEYCTX_TX_WR_IV_V(x) ((x) << KEYCTX_TX_WR_IV_S)
-#define KEYCTX_TX_WR_IV_G(x) \
- (((x) >> KEYCTX_TX_WR_IV_S) & KEYCTX_TX_WR_IV_M)
-
-#define KEYCTX_TX_WR_AAD_S 47
-#define KEYCTX_TX_WR_AAD_M 0xffULL
-#define KEYCTX_TX_WR_AAD_V(x) ((x) << KEYCTX_TX_WR_AAD_S)
-#define KEYCTX_TX_WR_AAD_G(x) (((x) >> KEYCTX_TX_WR_AAD_S) & \
- KEYCTX_TX_WR_AAD_M)
-
-#define KEYCTX_TX_WR_AADST_S 39
-#define KEYCTX_TX_WR_AADST_M 0xffULL
-#define KEYCTX_TX_WR_AADST_V(x) ((x) << KEYCTX_TX_WR_AADST_S)
-#define KEYCTX_TX_WR_AADST_G(x) \
- (((x) >> KEYCTX_TX_WR_AADST_S) & KEYCTX_TX_WR_AADST_M)
-
-#define KEYCTX_TX_WR_CIPHER_S 30
-#define KEYCTX_TX_WR_CIPHER_M 0x1ffULL
-#define KEYCTX_TX_WR_CIPHER_V(x) ((x) << KEYCTX_TX_WR_CIPHER_S)
-#define KEYCTX_TX_WR_CIPHER_G(x) \
- (((x) >> KEYCTX_TX_WR_CIPHER_S) & KEYCTX_TX_WR_CIPHER_M)
-
-#define KEYCTX_TX_WR_CIPHERST_S 23
-#define KEYCTX_TX_WR_CIPHERST_M 0x7f
-#define KEYCTX_TX_WR_CIPHERST_V(x) ((x) << KEYCTX_TX_WR_CIPHERST_S)
-#define KEYCTX_TX_WR_CIPHERST_G(x) \
- (((x) >> KEYCTX_TX_WR_CIPHERST_S) & KEYCTX_TX_WR_CIPHERST_M)
-
-#define KEYCTX_TX_WR_AUTH_S 14
-#define KEYCTX_TX_WR_AUTH_M 0x1ff
-#define KEYCTX_TX_WR_AUTH_V(x) ((x) << KEYCTX_TX_WR_AUTH_S)
-#define KEYCTX_TX_WR_AUTH_G(x) \
- (((x) >> KEYCTX_TX_WR_AUTH_S) & KEYCTX_TX_WR_AUTH_M)
-
-#define KEYCTX_TX_WR_AUTHST_S 7
-#define KEYCTX_TX_WR_AUTHST_M 0x7f
-#define KEYCTX_TX_WR_AUTHST_V(x) ((x) << KEYCTX_TX_WR_AUTHST_S)
-#define KEYCTX_TX_WR_AUTHST_G(x) \
- (((x) >> KEYCTX_TX_WR_AUTHST_S) & KEYCTX_TX_WR_AUTHST_M)
-
-#define KEYCTX_TX_WR_AUTHIN_S 0
-#define KEYCTX_TX_WR_AUTHIN_M 0x7f
-#define KEYCTX_TX_WR_AUTHIN_V(x) ((x) << KEYCTX_TX_WR_AUTHIN_S)
-#define KEYCTX_TX_WR_AUTHIN_G(x) \
- (((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
-
#define WQ_RETRY 5
struct chcr_driver_data {
struct list_head act_dev;
@@ -157,42 +109,6 @@ struct uld_ctx {
struct chcr_dev dev;
};
-struct sge_opaque_hdr {
- void *dev;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
-};
-
-struct chcr_ipsec_req {
- struct ulp_txpkt ulptx;
- struct ulptx_idata sc_imm;
- struct cpl_tx_sec_pdu sec_cpl;
- struct _key_ctx key_ctx;
-};
-
-struct chcr_ipsec_wr {
- struct fw_ulptx_wr wreq;
- struct chcr_ipsec_req req;
-};
-
-#define ESN_IV_INSERT_OFFSET 12
-struct chcr_ipsec_aadiv {
- __be32 spi;
- u8 seq_no[8];
- u8 iv[8];
-};
-
-struct ipsec_sa_entry {
- int hmac_ctrl;
- u16 esn;
- u16 resv;
- unsigned int enckey_len;
- unsigned int kctx_len;
- unsigned int authsize;
- __be32 key_ctx_hdr;
- char salt[MAX_SALT];
- char key[2 * AES_MAX_KEY_SIZE];
-};
-
/*
* sgl_len - calculates the size of an SGL of the given capacity
* @n: the number of SGL entries
@@ -221,18 +137,4 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
-int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
-void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
-int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
-int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
-int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
-extern int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn);
-extern void chcr_ktls_dev_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction);
-#endif
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 354836468c5d..7e7a8f01ea6b 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -780,8 +780,8 @@ static int hifn_register_rng(struct hifn_device *dev)
dev->pk_clk_freq) * 256;
dev->rng.name = dev->name;
- dev->rng.data_present = hifn_rng_data_present,
- dev->rng.data_read = hifn_rng_data_read,
+ dev->rng.data_present = hifn_rng_data_present;
+ dev->rng.data_read = hifn_rng_data_read;
dev->rng.priv = (unsigned long)dev;
return hwrng_register(&dev->rng);
@@ -1235,7 +1235,8 @@ static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
int idx;
dma_addr_t addr;
- addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
+ addr = dma_map_page(&dev->pdev->dev, page, offset, size,
+ DMA_TO_DEVICE);
idx = dma->srci;
@@ -1293,7 +1294,8 @@ static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
int idx;
dma_addr_t addr;
- addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
+ addr = dma_map_page(&dev->pdev->dev, page, offset, size,
+ DMA_FROM_DEVICE);
idx = dma->dsti;
dma->dstr[idx].p = __cpu_to_le32(addr);
@@ -2470,7 +2472,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err)
goto err_out_disable_pci_device;
@@ -2514,8 +2516,9 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
- dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma),
- &dev->desc_dma);
+ dev->desc_virt = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct hifn_dma),
+ &dev->desc_dma, GFP_KERNEL);
if (!dev->desc_virt) {
dev_err(&pdev->dev, "Failed to allocate descriptor rings.\n");
err = -ENOMEM;
@@ -2572,8 +2575,8 @@ err_out_free_irq:
free_irq(dev->irq, dev);
tasklet_kill(&dev->tasklet);
err_out_free_desc:
- pci_free_consistent(pdev, sizeof(struct hifn_dma),
- dev->desc_virt, dev->desc_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct hifn_dma), dev->desc_virt,
+ dev->desc_dma);
err_out_unmap_bars:
for (i = 0; i < 3; ++i)
@@ -2610,8 +2613,8 @@ static void hifn_remove(struct pci_dev *pdev)
hifn_flush(dev);
- pci_free_consistent(pdev, sizeof(struct hifn_dma),
- dev->desc_virt, dev->desc_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct hifn_dma),
+ dev->desc_virt, dev->desc_dma);
for (i = 0; i < 3; ++i)
if (dev->bar[i])
iounmap(dev->bar[i]);
@@ -2642,9 +2645,6 @@ static int __init hifn_init(void)
unsigned int freq;
int err;
- /* HIFN supports only 32-bit addresses */
- BUILD_BUG_ON(sizeof(dma_addr_t) != 4);
-
if (strncmp(hifn_pll_ref, "ext", 3) &&
strncmp(hifn_pll_ref, "pci", 3)) {
pr_err("hifn795x: invalid hifn_pll_ref clock, must be pci or ext");
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index ed730d173e95..f69252b24671 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -56,7 +56,6 @@ struct hpre_dfx {
* Just relevant for PF.
*/
struct hpre_debug {
- struct dentry *debug_root;
struct hpre_dfx dfx[HPRE_DFX_FILE_NUM];
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 7b5cb27d473d..a87f9904087a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -98,9 +98,6 @@ struct hpre_asym_request {
struct timespec64 req_time;
};
-static DEFINE_MUTEX(hpre_alg_lock);
-static unsigned int hpre_active_devs;
-
static int hpre_alloc_req_id(struct hpre_ctx *ctx)
{
unsigned long flags;
@@ -191,8 +188,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
hpre_req->dst = NULL;
dma_dir = DMA_FROM_DEVICE;
}
- *tmp = dma_map_single(dev, sg_virt(data),
- len, dma_dir);
+ *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
if (unlikely(dma_mapping_error(dev, *tmp))) {
dev_err(dev, "dma map data err!\n");
return -ENOMEM;
@@ -242,8 +238,8 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
((is_dh && !is_src) || !is_dh))
ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
else
- ret = hpre_prepare_dma_buf(hpre_req, data, len,
- is_src, &tmp);
+ ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
+
if (unlikely(ret))
return ret;
@@ -270,11 +266,9 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
if (src) {
if (req->src)
- dma_free_coherent(dev, ctx->key_sz,
- req->src, tmp);
+ dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
else
- dma_unmap_single(dev, tmp,
- ctx->key_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
}
tmp = le64_to_cpu(sqe->out);
@@ -477,7 +471,7 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
h_req->areq.dh = kreq;
msg = &h_req->req;
memset(msg, 0, sizeof(*msg));
- msg->key = cpu_to_le64((u64)ctx->dh.dma_xa_p);
+ msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
}
msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
@@ -534,6 +528,8 @@ static int hpre_dh_compute_value(struct kpp_request *req)
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
if (unlikely(ret))
goto clear_all;
+ } else {
+ msg->in = cpu_to_le64(ctx->dh.dma_g);
}
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
@@ -743,7 +739,7 @@ static int hpre_rsa_enc(struct akcipher_request *req)
return ret;
msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
- msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
+ msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
if (unlikely(ret))
@@ -791,11 +787,11 @@ static int hpre_rsa_dec(struct akcipher_request *req)
return ret;
if (ctx->crt_g2_mode) {
- msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
+ msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
HPRE_ALG_NC_CRT);
} else {
- msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
+ msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
HPRE_ALG_NC_NCRT);
}
@@ -1160,36 +1156,25 @@ static struct kpp_alg dh = {
int hpre_algs_register(void)
{
- int ret = 0;
-
- mutex_lock(&hpre_alg_lock);
- if (++hpre_active_devs == 1) {
- rsa.base.cra_flags = 0;
- ret = crypto_register_akcipher(&rsa);
- if (ret)
- goto unlock;
+ int ret;
+
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
+ if (ret)
+ return ret;
#ifdef CONFIG_CRYPTO_DH
- ret = crypto_register_kpp(&dh);
- if (ret) {
- crypto_unregister_akcipher(&rsa);
- goto unlock;
- }
+ ret = crypto_register_kpp(&dh);
+ if (ret)
+ crypto_unregister_akcipher(&rsa);
#endif
- }
-unlock:
- mutex_unlock(&hpre_alg_lock);
return ret;
}
void hpre_algs_unregister(void)
{
- mutex_lock(&hpre_alg_lock);
- if (--hpre_active_devs == 0) {
- crypto_unregister_akcipher(&rsa);
+ crypto_unregister_akcipher(&rsa);
#ifdef CONFIG_CRYPTO_DH
- crypto_unregister_kpp(&dh);
+ crypto_unregister_kpp(&dh);
#endif
- }
- mutex_unlock(&hpre_alg_lock);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index b135c74fb619..a33394d91bbf 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -90,7 +90,6 @@
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
-static struct hisi_qm_list hpre_devices;
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -106,6 +105,11 @@ struct hpre_hw_error {
const char *msg;
};
+static struct hisi_qm_list hpre_devices = {
+ .register_to_crypto = hpre_algs_register,
+ .unregister_from_crypto = hpre_algs_unregister,
+};
+
static const char * const hpre_debug_file_name[] = {
[HPRE_CURRENT_QM] = "current_qm",
[HPRE_CLEAR_ENABLE] = "rdclr_en",
@@ -186,7 +190,7 @@ static const struct kernel_param_ops hpre_pf_q_num_ops = {
static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)");
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(2-1024)");
static const struct kernel_param_ops vfs_num_ops = {
.set = vfs_num_set,
@@ -864,9 +868,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
dev_warn(&pdev->dev, "init debugfs fail!\n");
- hisi_qm_add_to_list(qm, &hpre_devices);
-
- ret = hpre_algs_register();
+ ret = hisi_qm_alg_register(qm, &hpre_devices);
if (ret < 0) {
pci_err(pdev, "fail to register algs to crypto!\n");
goto err_with_qm_start;
@@ -875,18 +877,17 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
- goto err_with_crypto_register;
+ goto err_with_alg_register;
}
return 0;
-err_with_crypto_register:
- hpre_algs_unregister();
+err_with_alg_register:
+ hisi_qm_alg_unregister(qm, &hpre_devices);
err_with_qm_start:
- hisi_qm_del_from_list(qm, &hpre_devices);
hpre_debugfs_exit(qm);
- hisi_qm_stop(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
err_with_err_init:
hisi_qm_dev_err_uninit(qm);
@@ -899,14 +900,13 @@ err_with_qm_init:
static void hpre_remove(struct pci_dev *pdev)
{
- struct hpre *hpre = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hpre->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
int ret;
- hpre_algs_unregister();
- hisi_qm_del_from_list(qm, &hpre_devices);
+ hisi_qm_wait_task_finish(qm, &hpre_devices);
+ hisi_qm_alg_unregister(qm, &hpre_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
- ret = hisi_qm_sriov_disable(pdev);
+ ret = hisi_qm_sriov_disable(pdev, qm->is_frozen);
if (ret) {
pci_err(pdev, "Disable SRIOV fail!\n");
return;
@@ -918,7 +918,7 @@ static void hpre_remove(struct pci_dev *pdev)
}
hpre_debugfs_exit(qm);
- hisi_qm_stop(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
}
@@ -939,6 +939,7 @@ static struct pci_driver hpre_pci_driver = {
.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
hisi_qm_sriov_configure : NULL,
.err_handler = &hpre_err_handler,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hpre_register_debugfs(void)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 6527c53b073f..530f23116d7c 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -180,7 +180,10 @@
#define QM_DBG_TMP_BUF_LEN 22
#define QM_PCI_COMMAND_INVALID ~0
+#define WAIT_PERIOD 20
+#define REMOVE_WAIT_DELAY 10
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
+#define QM_EQ_DEPTH (1024 * 2)
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
@@ -652,7 +655,7 @@ static void qm_work_process(struct work_struct *work)
qp = qm_to_hisi_qp(qm, eqe);
qm_poll_qp(qp, qm);
- if (qm->status.eq_head == QM_Q_DEPTH - 1) {
+ if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
eqe = qm->eqe;
qm->status.eq_head = 0;
@@ -661,7 +664,7 @@ static void qm_work_process(struct work_struct *work)
qm->status.eq_head++;
}
- if (eqe_num == QM_Q_DEPTH / 2 - 1) {
+ if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
eqe_num = 0;
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
}
@@ -754,7 +757,7 @@ static void qm_init_qp_status(struct hisi_qp *qp)
qp_status->sq_tail = 0;
qp_status->cq_head = 0;
qp_status->cqc_phase = true;
- atomic_set(&qp_status->flags, 0);
+ atomic_set(&qp_status->used, 0);
}
static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
@@ -1046,17 +1049,7 @@ static int qm_regs_show(struct seq_file *s, void *unused)
return 0;
}
-static int qm_regs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, qm_regs_show, inode->i_private);
-}
-
-static const struct file_operations qm_regs_fops = {
- .owner = THIS_MODULE,
- .open = qm_regs_open,
- .read = seq_read,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qm_regs);
static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *pos)
@@ -1370,7 +1363,13 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
return -EINVAL;
ret = kstrtou32(s, 0, &xeqe_id);
- if (ret || xeqe_id >= QM_Q_DEPTH) {
+ if (ret)
+ return -EINVAL;
+
+ if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
+ dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
+ return -EINVAL;
+ } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
return -EINVAL;
}
@@ -1420,17 +1419,18 @@ static int qm_dbg_help(struct hisi_qm *qm, char *s)
static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
{
struct device *dev = &qm->pdev->dev;
- char *presult, *s;
+ char *presult, *s, *s_tmp;
int ret;
s = kstrdup(cmd_buf, GFP_KERNEL);
if (!s)
return -ENOMEM;
+ s_tmp = s;
presult = strsep(&s, " ");
if (!presult) {
- kfree(s);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_buffer_free;
}
if (!strcmp(presult, "sqc"))
@@ -1459,7 +1459,8 @@ static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
if (ret)
dev_info(dev, "Please echo help\n");
- kfree(s);
+err_buffer_free:
+ kfree(s_tmp);
return ret;
}
@@ -1644,7 +1645,7 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
struct hisi_qp_status *qp_status = &qp->qp_status;
u16 sq_tail = qp_status->sq_tail;
- if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH))
+ if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
return NULL;
return qp->sqe + sq_tail * qp->qm->sqe_size;
@@ -1981,7 +1982,7 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
atomic_read(&qp->qm->status.flags) == QM_STOP ||
qp->is_resetting)) {
- dev_info(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
+ dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
return -EAGAIN;
}
@@ -2215,6 +2216,82 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
}
/**
+ * qm_frozen() - Try to froze QM to cut continuous queue request. If
+ * there is user on the QM, return failure without doing anything.
+ * @qm: The qm needed to be fronzen.
+ *
+ * This function frozes QM, then we can do SRIOV disabling.
+ */
+static int qm_frozen(struct hisi_qm *qm)
+{
+ down_write(&qm->qps_lock);
+
+ if (qm->is_frozen) {
+ up_write(&qm->qps_lock);
+ return 0;
+ }
+
+ if (!qm->qp_in_used) {
+ qm->qp_in_used = qm->qp_num;
+ qm->is_frozen = true;
+ up_write(&qm->qps_lock);
+ return 0;
+ }
+
+ up_write(&qm->qps_lock);
+
+ return -EBUSY;
+}
+
+static int qm_try_frozen_vfs(struct pci_dev *pdev,
+ struct hisi_qm_list *qm_list)
+{
+ struct hisi_qm *qm, *vf_qm;
+ struct pci_dev *dev;
+ int ret = 0;
+
+ if (!qm_list || !pdev)
+ return -EINVAL;
+
+ /* Try to frozen all the VFs as disable SRIOV */
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = qm->pdev;
+ if (dev == pdev)
+ continue;
+ if (pci_physfn(dev) == pdev) {
+ vf_qm = pci_get_drvdata(dev);
+ ret = qm_frozen(vf_qm);
+ if (ret)
+ goto frozen_fail;
+ }
+ }
+
+frozen_fail:
+ mutex_unlock(&qm_list->lock);
+
+ return ret;
+}
+
+/**
+ * hisi_qm_wait_task_finish() - Wait until the task is finished
+ * when removing the driver.
+ * @qm: The qm needed to wait for the task to finish.
+ * @qm_list: The list of all available devices.
+ */
+void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ while (qm_frozen(qm) ||
+ ((qm->fun_type == QM_HW_PF) &&
+ qm_try_frozen_vfs(qm->pdev, qm_list))) {
+ msleep(WAIT_PERIOD);
+ }
+
+ udelay(REMOVE_WAIT_DELAY);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
+
+/**
* hisi_qm_get_free_qp_num() - Get free number of qp in qm.
* @qm: The qm which want to get free qp.
*
@@ -2282,7 +2359,7 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
} while (0)
idr_init(&qm->qp_idr);
- qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) +
+ qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
@@ -2292,7 +2369,7 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
if (!qm->qdma.va)
return -ENOMEM;
- QM_INIT_BUF(qm, eqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
QM_INIT_BUF(qm, sqc, qm->qp_num);
QM_INIT_BUF(qm, cqc, qm->qp_num);
@@ -2338,6 +2415,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
mutex_init(&qm->mailbox_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
+ qm->is_frozen = false;
}
/**
@@ -2462,7 +2540,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
if (qm->ver == QM_HW_V1)
eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
- eqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
kfree(eqc);
@@ -2633,18 +2711,20 @@ static void qm_clear_queues(struct hisi_qm *qm)
/**
* hisi_qm_stop() - Stop a qm.
* @qm: The qm which will be stopped.
+ * @r: The reason to stop qm.
*
* This function stops qm and its qps, then qm can not accept request.
* Related resources are not released at this state, we can use hisi_qm_start
* to let qm start again.
*/
-int hisi_qm_stop(struct hisi_qm *qm)
+int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
{
struct device *dev = &qm->pdev->dev;
int ret = 0;
down_write(&qm->qps_lock);
+ qm->status.stop_reason = r;
if (!qm_avail_state(qm, QM_STOP)) {
ret = -EPERM;
goto err_unlock;
@@ -3081,11 +3161,12 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
/**
* hisi_qm_sriov_disable - disable virtual functions
- * @pdev: the PCI device
+ * @pdev: the PCI device.
+ * @is_frozen: true when all the VFs are frozen.
*
- * Return failure if there are VFs assigned already.
+ * Return failure if there are VFs assigned already or VF is in used.
*/
-int hisi_qm_sriov_disable(struct pci_dev *pdev)
+int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
@@ -3094,7 +3175,12 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev)
return -EPERM;
}
- /* remove in hpre_pci_driver will be called to free VF resources */
+ /* While VF is in used, SRIOV cannot be disabled. */
+ if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
+ pci_err(pdev, "Task is using its VF!\n");
+ return -EBUSY;
+ }
+
pci_disable_sriov(pdev);
return qm_clear_vft_config(qm);
}
@@ -3110,7 +3196,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
- return hisi_qm_sriov_disable(pdev);
+ return hisi_qm_sriov_disable(pdev, 0);
else
return hisi_qm_sriov_enable(pdev, num_vfs);
}
@@ -3290,10 +3376,10 @@ static int qm_set_msi(struct hisi_qm *qm, bool set)
return 0;
}
-static int qm_vf_reset_prepare(struct hisi_qm *qm)
+static int qm_vf_reset_prepare(struct hisi_qm *qm,
+ enum qm_stop_reason stop_reason)
{
struct hisi_qm_list *qm_list = qm->qm_list;
- int stop_reason = qm->status.stop_reason;
struct pci_dev *pdev = qm->pdev;
struct pci_dev *virtfn;
struct hisi_qm *vf_qm;
@@ -3306,8 +3392,10 @@ static int qm_vf_reset_prepare(struct hisi_qm *qm)
continue;
if (pci_physfn(virtfn) == pdev) {
- vf_qm->status.stop_reason = stop_reason;
- ret = hisi_qm_stop(vf_qm);
+ /* save VFs PCIE BAR configuration */
+ pci_save_state(virtfn);
+
+ ret = hisi_qm_stop(vf_qm, stop_reason);
if (ret)
goto stop_fail;
}
@@ -3346,15 +3434,14 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
}
if (qm->vfs_num) {
- ret = qm_vf_reset_prepare(qm);
+ ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop VFs!\n");
return ret;
}
}
- qm->status.stop_reason = QM_SOFT_RESET;
- ret = hisi_qm_stop(qm);
+ ret = hisi_qm_stop(qm, QM_SOFT_RESET);
if (ret) {
pci_err(pdev, "Fails to stop QM!\n");
return ret;
@@ -3471,6 +3558,9 @@ static int qm_vf_reset_done(struct hisi_qm *qm)
continue;
if (pci_physfn(virtfn) == pdev) {
+ /* enable VFs PCIE BAR configuration */
+ pci_restore_state(virtfn);
+
ret = qm_restart(vf_qm);
if (ret)
goto restart_fail;
@@ -3695,7 +3785,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
}
if (qm->vfs_num) {
- ret = qm_vf_reset_prepare(qm);
+ ret = qm_vf_reset_prepare(qm, QM_FLR);
if (ret) {
pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
ret);
@@ -3703,7 +3793,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
}
}
- ret = hisi_qm_stop(qm);
+ ret = hisi_qm_stop(qm, QM_FLR);
if (ret) {
pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
return;
@@ -3821,6 +3911,23 @@ err_aeq_irq:
return ret;
}
+/**
+ * hisi_qm_dev_shutdown() - Shutdown device.
+ * @pdev: The device will be shutdown.
+ *
+ * This function will stop qm when OS shutdown or rebooting.
+ */
+void hisi_qm_dev_shutdown(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = hisi_qm_stop(qm, QM_NORMAL);
+ if (ret)
+ dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
+
static void hisi_qm_controller_reset(struct work_struct *rst_work)
{
struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
@@ -3834,6 +3941,58 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
}
/**
+ * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
+ * @qm: The qm needs add.
+ * @qm_list: The qm list.
+ *
+ * This function adds qm to qm list, and will register algorithm to
+ * crypto when the qm list is empty.
+ */
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ int flag = 0;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ if (list_empty(&qm_list->list))
+ flag = 1;
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+
+ if (flag) {
+ ret = qm_list->register_to_crypto();
+ if (ret) {
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
+
+/**
+ * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
+ * qm list.
+ * @qm: The qm needs delete.
+ * @qm_list: The qm list.
+ *
+ * This function deletes qm from qm list, and will unregister algorithm
+ * from crypto when the qm list is empty.
+ */
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+
+ if (list_empty(&qm_list->list))
+ qm_list->unregister_from_crypto();
+}
+EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
+
+/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needing init.
*
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 6c1d3c7d64ee..0420f4ce7197 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -79,7 +79,7 @@
#define QM_BASE_CE QM_ECC_1BIT
#define QM_Q_DEPTH 1024
-
+#define QM_MIN_QNUM 2
#define HISI_ACC_SGL_SGE_NR_MAX 255
/* page number for queue file region */
@@ -193,6 +193,8 @@ struct hisi_qm_err_ini {
struct hisi_qm_list {
struct mutex lock;
struct list_head list;
+ int (*register_to_crypto)(void);
+ void (*unregister_from_crypto)(void);
};
struct hisi_qm {
@@ -243,6 +245,7 @@ struct hisi_qm {
const char *algs;
bool use_sva;
+ bool is_frozen;
resource_size_t phys_base;
resource_size_t phys_size;
struct uacce_device *uacce;
@@ -306,7 +309,7 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp,
}
ret = kstrtou32(val, 10, &n);
- if (ret || !n || n > q_num)
+ if (ret || n < QM_MIN_QNUM || n > q_num)
return -EINVAL;
return param_set_int(val, kp);
@@ -336,26 +339,10 @@ static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
mutex_init(&qm_list->lock);
}
-static inline void hisi_qm_add_to_list(struct hisi_qm *qm,
- struct hisi_qm_list *qm_list)
-{
- mutex_lock(&qm_list->lock);
- list_add_tail(&qm->list, &qm_list->list);
- mutex_unlock(&qm_list->lock);
-}
-
-static inline void hisi_qm_del_from_list(struct hisi_qm *qm,
- struct hisi_qm_list *qm_list)
-{
- mutex_lock(&qm_list->lock);
- list_del(&qm->list);
- mutex_unlock(&qm_list->lock);
-}
-
int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm);
-int hisi_qm_stop(struct hisi_qm *qm);
+int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
int hisi_qm_stop_qp(struct hisi_qp *qp);
@@ -367,7 +354,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
-int hisi_qm_sriov_disable(struct pci_dev *pdev);
+int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
void hisi_qm_dev_err_init(struct hisi_qm *qm);
void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
@@ -390,4 +377,8 @@ void hisi_acc_free_sgl_pool(struct device *dev,
int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
u8 alg_type, int node, struct hisi_qp **qps);
void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
+void hisi_qm_dev_shutdown(struct pci_dev *pdev);
+void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 497969ae8b23..bb493423668c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -66,8 +66,6 @@
#define SEC_SQE_AEAD_FLAG 3
#define SEC_SQE_DONE 0x1
-static atomic_t sec_active_devs;
-
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -342,11 +340,14 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
ret = sec_alloc_pbuf_resource(dev, res);
if (ret) {
dev_err(dev, "fail to alloc pbuf dma resource!\n");
- goto alloc_fail;
+ goto alloc_pbuf_fail;
}
}
return 0;
+alloc_pbuf_fail:
+ if (ctx->alg_type == SEC_AEAD)
+ sec_free_mac_resource(dev, qp_ctx->res);
alloc_fail:
sec_free_civ_resource(dev, res);
@@ -457,8 +458,10 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->fake_req_limit = QM_Q_DEPTH >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
- if (!ctx->qp_ctx)
- return -ENOMEM;
+ if (!ctx->qp_ctx) {
+ ret = -ENOMEM;
+ goto err_destroy_qps;
+ }
for (i = 0; i < sec->ctx_q_num; i++) {
ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
@@ -467,12 +470,15 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
}
return 0;
+
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
- sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
+err_destroy_qps:
+ sec_destroy_qps(ctx->qps, sec->ctx_q_num);
+
return ret;
}
@@ -1633,33 +1639,24 @@ static struct aead_alg sec_aeads[] = {
int sec_register_to_crypto(void)
{
- int ret = 0;
+ int ret;
/* To avoid repeat register */
- if (atomic_add_return(1, &sec_active_devs) == 1) {
- ret = crypto_register_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
- if (ret)
- return ret;
-
- ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
- if (ret)
- goto reg_aead_fail;
- }
-
- return ret;
-
-reg_aead_fail:
- crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers));
+ ret = crypto_register_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ if (ret)
+ return ret;
+ ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ if (ret)
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(void)
{
- if (atomic_sub_return(1, &sec_active_devs) == 0) {
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
- }
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
+ crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 2297425486cb..548896394c4b 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -99,7 +99,11 @@ struct sec_dfx_item {
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
-static struct hisi_qm_list sec_devices;
+
+static struct hisi_qm_list sec_devices = {
+ .register_to_crypto = sec_register_to_crypto,
+ .unregister_from_crypto = sec_unregister_from_crypto,
+};
static const struct sec_hw_error sec_hw_errors[] = {
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
@@ -165,7 +169,7 @@ static const struct kernel_param_ops sec_pf_q_num_ops = {
static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
{
@@ -879,29 +883,26 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
pci_warn(pdev, "Failed to init debugfs!\n");
- hisi_qm_add_to_list(qm, &sec_devices);
-
- ret = sec_register_to_crypto();
+ ret = hisi_qm_alg_register(qm, &sec_devices);
if (ret < 0) {
pr_err("Failed to register driver to crypto.\n");
- goto err_remove_from_list;
+ goto err_qm_stop;
}
if (qm->fun_type == QM_HW_PF && vfs_num) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
- goto err_crypto_unregister;
+ goto err_alg_unregister;
}
return 0;
-err_crypto_unregister:
- sec_unregister_from_crypto();
+err_alg_unregister:
+ hisi_qm_alg_unregister(qm, &sec_devices);
-err_remove_from_list:
- hisi_qm_del_from_list(qm, &sec_devices);
+err_qm_stop:
sec_debugfs_exit(qm);
- hisi_qm_stop(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
err_probe_uninit:
sec_probe_uninit(qm);
@@ -914,19 +915,16 @@ err_qm_uninit:
static void sec_remove(struct pci_dev *pdev)
{
- struct sec_dev *sec = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &sec->qm;
-
- sec_unregister_from_crypto();
-
- hisi_qm_del_from_list(qm, &sec_devices);
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ hisi_qm_wait_task_finish(qm, &sec_devices);
+ hisi_qm_alg_unregister(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
- hisi_qm_sriov_disable(pdev);
+ hisi_qm_sriov_disable(pdev, qm->is_frozen);
sec_debugfs_exit(qm);
- (void)hisi_qm_stop(qm);
+ (void)hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
sec_debug_regs_clear(qm);
@@ -950,6 +948,7 @@ static struct pci_driver sec_pci_driver = {
.remove = sec_remove,
.err_handler = &sec_err_handler,
.sriov_configure = hisi_qm_sriov_configure,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void sec_register_debugfs(void)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 4484be13812b..92397f993e23 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -9,20 +9,6 @@
#include <linux/list.h>
#include "../qm.h"
-/* hisi_zip_sqe dw3 */
-#define HZIP_BD_STATUS_M GENMASK(7, 0)
-/* hisi_zip_sqe dw7 */
-#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
-/* hisi_zip_sqe dw8 */
-#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
-/* hisi_zip_sqe dw9 */
-#define HZIP_REQ_TYPE_M GENMASK(7, 0)
-#define HZIP_ALG_TYPE_ZLIB 0x02
-#define HZIP_ALG_TYPE_GZIP 0x03
-#define HZIP_BUF_TYPE_M GENMASK(11, 8)
-#define HZIP_PBUFFER 0x0
-#define HZIP_SGL 0x1
-
enum hisi_zip_error_type {
/* negative compression */
HZIP_NC_ERR = 0x0d,
@@ -39,7 +25,6 @@ struct hisi_zip_ctrl;
struct hisi_zip {
struct hisi_qm qm;
- struct list_head list;
struct hisi_zip_ctrl *ctrl;
struct hisi_zip_dfx dfx;
};
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 01fd6a78111d..08b4660b014c 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -6,6 +6,20 @@
#include <linux/scatterlist.h>
#include "zip.h"
+/* hisi_zip_sqe dw3 */
+#define HZIP_BD_STATUS_M GENMASK(7, 0)
+/* hisi_zip_sqe dw7 */
+#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
+/* hisi_zip_sqe dw8 */
+#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
+/* hisi_zip_sqe dw9 */
+#define HZIP_REQ_TYPE_M GENMASK(7, 0)
+#define HZIP_ALG_TYPE_ZLIB 0x02
+#define HZIP_ALG_TYPE_GZIP 0x03
+#define HZIP_BUF_TYPE_M GENMASK(11, 8)
+#define HZIP_PBUFFER 0x0
+#define HZIP_SGL 0x1
+
#define HZIP_ZLIB_HEAD_SIZE 2
#define HZIP_GZIP_HEAD_SIZE 10
@@ -16,22 +30,29 @@
#define GZIP_HEAD_FLG_SHIFT 3
#define GZIP_HEAD_FEXTRA_SHIFT 10
-#define GZIP_HEAD_FEXTRA_XLEN 2
+#define GZIP_HEAD_FEXTRA_XLEN 2UL
#define GZIP_HEAD_FHCRC_SIZE 2
-#define HZIP_CTX_Q_NUM 2
#define HZIP_GZIP_HEAD_BUF 256
#define HZIP_ALG_PRIORITY 300
#define HZIP_SGL_SGE_NR 10
static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
-static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {0x1f, 0x8b, 0x08, 0x0, 0x0,
- 0x0, 0x0, 0x0, 0x0, 0x03};
+static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {
+ 0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03
+};
+
enum hisi_zip_alg_type {
HZIP_ALG_TYPE_COMP = 0,
HZIP_ALG_TYPE_DECOMP = 1,
};
+enum {
+ HZIP_QPC_COMP,
+ HZIP_QPC_DECOMP,
+ HZIP_CTX_Q_NUM
+};
+
#define COMP_NAME_TO_TYPE(alg_name) \
(!strcmp((alg_name), "zlib-deflate") ? HZIP_ALG_TYPE_ZLIB : \
!strcmp((alg_name), "gzip") ? HZIP_ALG_TYPE_GZIP : 0) \
@@ -46,13 +67,13 @@ enum hisi_zip_alg_type {
struct hisi_zip_req {
struct acomp_req *req;
- int sskip;
- int dskip;
+ u32 sskip;
+ u32 dskip;
struct hisi_acc_hw_sgl *hw_src;
struct hisi_acc_hw_sgl *hw_dst;
dma_addr_t dma_src;
dma_addr_t dma_dst;
- int req_id;
+ u16 req_id;
};
struct hisi_zip_req_q {
@@ -71,8 +92,6 @@ struct hisi_zip_qp_ctx {
};
struct hisi_zip_ctx {
-#define QPC_COMP 0
-#define QPC_DECOMP 1
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
};
@@ -116,7 +135,7 @@ static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
- u32 dlen, int sskip, int dskip)
+ u32 dlen, u32 sskip, u32 dskip)
{
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
@@ -143,7 +162,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
- dev_err(dev, "start qp failed!\n");
+ dev_err(dev, "failed to start qp (%d)!\n", ret);
return ret;
}
@@ -166,7 +185,7 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
if (ret) {
- pr_err("Can not create zip qps!\n");
+ pr_err("failed to create zip qps (%d)!\n", ret);
return -ENODEV;
}
@@ -264,11 +283,11 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
return 0;
err_free_loop1:
- kfree(ctx->qp_ctx[QPC_DECOMP].req_q.req_bitmap);
+ kfree(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
err_free_loop0:
- kfree(ctx->qp_ctx[QPC_COMP].req_q.q);
+ kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
err_free_bitmap:
- kfree(ctx->qp_ctx[QPC_COMP].req_q.req_bitmap);
+ kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
return ret;
}
@@ -303,8 +322,8 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
return 0;
err_free_sgl_pool0:
- hisi_acc_free_sgl_pool(&ctx->qp_ctx[QPC_COMP].qp->qm->pdev->dev,
- ctx->qp_ctx[QPC_COMP].sgl_pool);
+ hisi_acc_free_sgl_pool(&ctx->qp_ctx[HZIP_QPC_COMP].qp->qm->pdev->dev,
+ ctx->qp_ctx[HZIP_QPC_COMP].sgl_pool);
return -ENOMEM;
}
@@ -342,7 +361,6 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
atomic64_inc(&dfx->recv_cnt);
status = sqe->dw3 & HZIP_BD_STATUS_M;
-
if (status != 0 && status != HZIP_NC_ERR) {
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
@@ -377,19 +395,28 @@ static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
{
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+ struct device *dev;
int ret;
ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
- if (ret)
+ if (ret) {
+ pr_err("failed to init ctx (%d)!\n", ret);
return ret;
+ }
+
+ dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
ret = hisi_zip_create_req_q(ctx);
- if (ret)
+ if (ret) {
+ dev_err(dev, "failed to create request queue (%d)!\n", ret);
goto err_ctx_exit;
+ }
ret = hisi_zip_create_sgl_pool(ctx);
- if (ret)
+ if (ret) {
+ dev_err(dev, "failed to create sgl pool (%d)!\n", ret);
goto err_release_req_q;
+ }
hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb);
@@ -419,13 +446,15 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
int ret;
ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
- if (ret != head_size)
+ if (ret != head_size) {
+ pr_err("the head size of buffer is wrong (%d)!\n", ret);
return -ENOMEM;
+ }
return head_size;
}
-static size_t get_gzip_head_size(struct scatterlist *sgl)
+static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
{
char buf[HZIP_GZIP_HEAD_BUF];
@@ -434,13 +463,20 @@ static size_t get_gzip_head_size(struct scatterlist *sgl)
return __get_gzip_head_size(buf);
}
-static size_t get_comp_head_size(struct scatterlist *src, u8 req_type)
+static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
{
+ if (!acomp_req->src || !acomp_req->slen)
+ return -EINVAL;
+
+ if ((req_type == HZIP_ALG_TYPE_GZIP) &&
+ (acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
+ return -EINVAL;
+
switch (req_type) {
case HZIP_ALG_TYPE_ZLIB:
return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
case HZIP_ALG_TYPE_GZIP:
- return get_gzip_head_size(src);
+ return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
default:
pr_err("request type does not support!\n");
return -EINVAL;
@@ -462,7 +498,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
if (req_id >= req_q->size) {
write_unlock(&req_q->req_lock);
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
- return ERR_PTR(-EBUSY);
+ return ERR_PTR(-EAGAIN);
}
set_bit(req_id, req_q->req_bitmap);
@@ -492,8 +528,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
struct hisi_zip_sqe zip_sqe;
- dma_addr_t input;
- dma_addr_t output;
+ dma_addr_t input, output;
int ret;
if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
@@ -501,8 +536,11 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
req->req_id << 1, &input);
- if (IS_ERR(req->hw_src))
+ if (IS_ERR(req->hw_src)) {
+ dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
+ PTR_ERR(req->hw_src));
return PTR_ERR(req->hw_src);
+ }
req->dma_src = input;
req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
@@ -510,6 +548,8 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
&output);
if (IS_ERR(req->hw_dst)) {
ret = PTR_ERR(req->hw_dst);
+ dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
+ ret);
goto err_unmap_input;
}
req->dma_dst = output;
@@ -524,6 +564,8 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
ret = hisi_qp_send(qp, &zip_sqe);
if (ret < 0) {
atomic64_inc(&dfx->send_busy_cnt);
+ ret = -EAGAIN;
+ dev_dbg_ratelimited(dev, "failed to send request!\n");
goto err_unmap_output;
}
@@ -539,23 +581,29 @@ err_unmap_input:
static int hisi_zip_acompress(struct acomp_req *acomp_req)
{
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
- struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_COMP];
+ struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
+ struct device *dev = &qp_ctx->qp->qm->pdev->dev;
struct hisi_zip_req *req;
int head_size;
int ret;
/* let's output compression head now */
head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
- if (head_size < 0)
- return -ENOMEM;
+ if (head_size < 0) {
+ dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
+ head_size);
+ return head_size;
+ }
- req = hisi_zip_create_req(acomp_req, qp_ctx, (size_t)head_size, true);
+ req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
if (IS_ERR(req))
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS)
+ if (ret != -EINPROGRESS) {
+ dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
hisi_zip_remove_req(qp_ctx, req);
+ }
return ret;
}
@@ -563,20 +611,28 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
static int hisi_zip_adecompress(struct acomp_req *acomp_req)
{
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
- struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[QPC_DECOMP];
+ struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
+ struct device *dev = &qp_ctx->qp->qm->pdev->dev;
struct hisi_zip_req *req;
- size_t head_size;
- int ret;
+ int head_size, ret;
- head_size = get_comp_head_size(acomp_req->src, qp_ctx->qp->req_type);
+ head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
+ if (head_size < 0) {
+ dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
+ head_size);
+ return head_size;
+ }
req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
if (IS_ERR(req))
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS)
+ if (ret != -EINPROGRESS) {
+ dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
+ ret);
hisi_zip_remove_req(qp_ctx, req);
+ }
return ret;
}
@@ -611,17 +667,17 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
int hisi_zip_register_to_crypto(void)
{
- int ret = 0;
+ int ret;
ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
if (ret) {
- pr_err("Zlib acomp algorithm registration failed\n");
+ pr_err("failed to register to zlib (%d)!\n", ret);
return ret;
}
ret = crypto_register_acomp(&hisi_zip_acomp_gzip);
if (ret) {
- pr_err("Gzip acomp algorithm registration failed\n");
+ pr_err("failed to register to gzip (%d)!\n", ret);
crypto_unregister_acomp(&hisi_zip_acomp_zlib);
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index e2845b2c963d..4bd2c811abba 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -17,7 +17,6 @@
#define PCI_DEVICE_ID_ZIP_PF 0xa250
#define PCI_DEVICE_ID_ZIP_VF 0xa251
-#define HZIP_VF_NUM 63
#define HZIP_QUEUE_NUM_V1 4096
#define HZIP_QUEUE_NUM_V2 1024
@@ -30,18 +29,18 @@
#define DECOMP3_ENABLE BIT(5)
#define DECOMP4_ENABLE BIT(6)
#define DECOMP5_ENABLE BIT(7)
-#define ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
+#define HZIP_ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
DECOMP0_ENABLE | DECOMP1_ENABLE | \
DECOMP2_ENABLE | DECOMP3_ENABLE | \
DECOMP4_ENABLE | DECOMP5_ENABLE)
-#define DECOMP_CHECK_ENABLE BIT(16)
+#define HZIP_DECOMP_CHECK_ENABLE BIT(16)
#define HZIP_FSM_MAX_CNT 0x301008
#define HZIP_PORT_ARCA_CHE_0 0x301040
#define HZIP_PORT_ARCA_CHE_1 0x301044
#define HZIP_PORT_AWCA_CHE_0 0x301060
#define HZIP_PORT_AWCA_CHE_1 0x301064
-#define CACHE_ALL_EN 0xffffffff
+#define HZIP_CACHE_ALL_EN 0xffffffff
#define HZIP_BD_RUSER_32_63 0x301110
#define HZIP_SGL_RUSER_32_63 0x30111c
@@ -83,7 +82,7 @@
#define HZIP_PF_DEF_Q_BASE 0
#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
-#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define HZIP_SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
#define HZIP_WR_PORT BIT(11)
@@ -92,9 +91,13 @@
#define HZIP_SQE_MASK_OFFSET 64
#define HZIP_SQE_MASK_LEN 48
+#define HZIP_CNT_CLR_CE_EN BIT(0)
+#define HZIP_RO_CNT_CLR_CE_EN BIT(2)
+#define HZIP_RD_CNT_CLR_CE_EN (HZIP_CNT_CLR_CE_EN | \
+ HZIP_RO_CNT_CLR_CE_EN)
+
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
-static struct hisi_qm_list zip_devices;
struct hisi_zip_hw_error {
u32 int_msk;
@@ -106,6 +109,11 @@ struct zip_dfx_item {
u32 offset;
};
+static struct hisi_qm_list zip_devices = {
+ .register_to_crypto = hisi_zip_register_to_crypto,
+ .unregister_from_crypto = hisi_zip_unregister_from_crypto,
+};
+
static struct zip_dfx_item zip_dfx_files[] = {
{"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)},
{"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)},
@@ -153,7 +161,6 @@ struct ctrl_debug_file {
*/
struct hisi_zip_ctrl {
struct hisi_zip *hisi_zip;
- struct dentry *debug_root;
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
};
@@ -216,7 +223,7 @@ static const struct kernel_param_ops pf_q_num_ops = {
static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
static const struct kernel_param_ops vfs_num_ops = {
.set = vfs_num_set,
@@ -256,15 +263,16 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
/* qm cache */
writel(AXI_M_CFG, base + QM_AXI_M_CFG);
writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
+
/* disable FLR triggered by BME(bus master enable) */
writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
/* cache */
- writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
- writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
- writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
- writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
/* user domain configurations */
writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
@@ -280,10 +288,10 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
}
/* let's open all compression/decompression cores */
- writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN,
+ writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN,
base + HZIP_CLOCK_GATE_CTRL);
- /* enable sqc writeback */
+ /* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
@@ -309,7 +317,7 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
- qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -356,7 +364,7 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
if (val > qm->vfs_num)
return -EINVAL;
- /* Calculate curr_qm_qp_num and store */
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
@@ -387,7 +395,7 @@ static u32 clear_enable_read(struct ctrl_debug_file *file)
struct hisi_qm *qm = file_to_qm(file);
return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
- SOFT_CTRL_CNT_CLR_CE_BIT;
+ HZIP_SOFT_CTRL_CNT_CLR_CE_BIT;
}
static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
@@ -399,14 +407,14 @@ static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
return -EINVAL;
tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
- ~SOFT_CTRL_CNT_CLR_CE_BIT) | val;
+ ~HZIP_SOFT_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
return 0;
}
-static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
{
struct ctrl_debug_file *file = filp->private_data;
char tbuf[HZIP_BUF_SIZE];
@@ -426,12 +434,13 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
return -EINVAL;
}
spin_unlock_irq(&file->lock);
- ret = sprintf(tbuf, "%u\n", val);
+ ret = scnprintf(tbuf, sizeof(tbuf), "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
-static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *pos)
{
struct ctrl_debug_file *file = filp->private_data;
char tbuf[HZIP_BUF_SIZE];
@@ -480,11 +489,10 @@ err_input:
static const struct file_operations ctrl_debug_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = ctrl_debug_read,
- .write = ctrl_debug_write,
+ .read = hisi_zip_ctrl_debug_read,
+ .write = hisi_zip_ctrl_debug_write,
};
-
static int zip_debugfs_atomic64_set(void *data, u64 val)
{
if (val)
@@ -505,10 +513,8 @@ static int zip_debugfs_atomic64_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
zip_debugfs_atomic64_set, "%llu\n");
-static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
+static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
- struct hisi_zip *hisi_zip = ctrl->hisi_zip;
- struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
@@ -517,9 +523,10 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
for (i = 0; i < HZIP_CORE_NUM; i++) {
if (i < HZIP_COMP_CORE_NUM)
- sprintf(buf, "comp_core%d", i);
+ scnprintf(buf, sizeof(buf), "comp_core%d", i);
else
- sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM);
+ scnprintf(buf, sizeof(buf), "decomp_core%d",
+ i - HZIP_COMP_CORE_NUM);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -529,7 +536,7 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
regset->base = qm->io_base + core_offsets[i];
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
debugfs_create_regset32("regs", 0444, tmp_d, regset);
}
@@ -548,33 +555,32 @@ static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) {
data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset);
debugfs_create_file(zip_dfx_files[i].name,
- 0644,
- tmp_dir,
- data,
- &zip_atomic64_ops);
+ 0644, tmp_dir, data,
+ &zip_atomic64_ops);
}
}
-static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
+static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
{
+ struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
int i;
for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&ctrl->files[i].lock);
- ctrl->files[i].ctrl = ctrl;
- ctrl->files[i].index = i;
+ spin_lock_init(&zip->ctrl->files[i].lock);
+ zip->ctrl->files[i].ctrl = zip->ctrl;
+ zip->ctrl->files[i].index = i;
debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
+ qm->debug.debug_root,
+ zip->ctrl->files + i,
&ctrl_debug_fops);
}
- return hisi_zip_core_debug_init(ctrl);
+ return hisi_zip_core_debug_init(qm);
}
-static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
+static int hisi_zip_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct dentry *dev_d;
int ret;
@@ -589,8 +595,7 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
goto failed_to_create;
if (qm->fun_type == QM_HW_PF) {
- hisi_zip->ctrl->debug_root = dev_d;
- ret = hisi_zip_ctrl_debug_init(hisi_zip->ctrl);
+ ret = hisi_zip_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
@@ -604,25 +609,36 @@ failed_to_create:
return ret;
}
-static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip)
+/* hisi_zip_debug_regs_clear() - clear the zip debug regs */
+static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
+ int i, j;
+ /* clear current_qm */
writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ /* enable register read_clear bit */
+ writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
+ for (i = 0; i < ARRAY_SIZE(core_offsets); i++)
+ for (j = 0; j < ARRAY_SIZE(hzip_dfx_regs); j++)
+ readl(qm->io_base + core_offsets[i] +
+ hzip_dfx_regs[j].offset);
+
+ /* disable register read_clear bit */
writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
hisi_qm_debug_regs_clear(qm);
}
-static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip)
+static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
- if (qm->fun_type == QM_HW_PF)
- hisi_zip_debug_regs_clear(hisi_zip);
+ if (qm->fun_type == QM_HW_PF) {
+ hisi_zip_debug_regs_clear(qm);
+ qm->debug.curr_qm_qp_num = 0;
+ }
}
static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
@@ -634,7 +650,7 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
while (err->msg) {
if (err->int_msk & err_sts) {
dev_err(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
+ err->msg, err->int_msk);
if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) {
err_val = readl(qm->io_base +
@@ -642,9 +658,6 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n",
((err_val >>
HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF));
- dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n",
- (err_val >>
- HZIP_SRAM_ECC_ERR_ADDR_SHIFT));
}
}
err++;
@@ -729,7 +742,7 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip_set_user_domain_and_cache(qm);
hisi_qm_dev_err_init(qm);
- hisi_zip_debug_regs_clear(hisi_zip);
+ hisi_zip_debug_regs_clear(qm);
return 0;
}
@@ -747,6 +760,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = HZIP_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &zip_devices;
} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
/*
@@ -803,32 +817,44 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret)
- goto err_qm_uninit;
+ goto err_dev_err_uninit;
- ret = hisi_zip_debugfs_init(hisi_zip);
+ ret = hisi_zip_debugfs_init(qm);
if (ret)
- dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
+ pci_err(pdev, "failed to init debugfs (%d)!\n", ret);
- hisi_qm_add_to_list(qm, &zip_devices);
+ ret = hisi_qm_alg_register(qm, &zip_devices);
+ if (ret < 0) {
+ pci_err(pdev, "failed to register driver to crypto!\n");
+ goto err_qm_stop;
+ }
if (qm->uacce) {
ret = uacce_register(qm->uacce);
- if (ret)
- goto err_qm_uninit;
+ if (ret) {
+ pci_err(pdev, "failed to register uacce (%d)!\n", ret);
+ goto err_qm_alg_unregister;
+ }
}
if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
ret = hisi_qm_sriov_enable(pdev, vfs_num);
if (ret < 0)
- goto err_remove_from_list;
+ goto err_qm_alg_unregister;
}
return 0;
-err_remove_from_list:
- hisi_qm_del_from_list(qm, &zip_devices);
- hisi_zip_debugfs_exit(hisi_zip);
- hisi_qm_stop(qm);
+err_qm_alg_unregister:
+ hisi_qm_alg_unregister(qm, &zip_devices);
+
+err_qm_stop:
+ hisi_zip_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
+
+err_dev_err_uninit:
+ hisi_qm_dev_err_uninit(qm);
+
err_qm_uninit:
hisi_qm_uninit(qm);
@@ -837,18 +863,18 @@ err_qm_uninit:
static void hisi_zip_remove(struct pci_dev *pdev)
{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_zip->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
- if (qm->fun_type == QM_HW_PF && qm->vfs_num)
- hisi_qm_sriov_disable(pdev);
+ hisi_qm_wait_task_finish(qm, &zip_devices);
+ hisi_qm_alg_unregister(qm, &zip_devices);
- hisi_zip_debugfs_exit(hisi_zip);
- hisi_qm_stop(qm);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev, qm->is_frozen);
+ hisi_zip_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
- hisi_qm_del_from_list(qm, &zip_devices);
}
static const struct pci_error_handlers hisi_zip_err_handler = {
@@ -866,6 +892,7 @@ static struct pci_driver hisi_zip_pci_driver = {
.sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
hisi_qm_sriov_configure : NULL,
.err_handler = &hisi_zip_err_handler,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hisi_zip_register_debugfs(void)
@@ -890,29 +917,15 @@ static int __init hisi_zip_init(void)
ret = pci_register_driver(&hisi_zip_pci_driver);
if (ret < 0) {
+ hisi_zip_unregister_debugfs();
pr_err("Failed to register pci driver.\n");
- goto err_pci;
}
- ret = hisi_zip_register_to_crypto();
- if (ret < 0) {
- pr_err("Failed to register driver to crypto.\n");
- goto err_crypto;
- }
-
- return 0;
-
-err_crypto:
- pci_unregister_driver(&hisi_zip_pci_driver);
-err_pci:
- hisi_zip_unregister_debugfs();
-
return ret;
}
static void __exit hisi_zip_exit(void)
{
- hisi_zip_unregister_from_crypto();
pci_unregister_driver(&hisi_zip_pci_driver);
hisi_zip_unregister_debugfs();
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 87226b7c2795..91f555ccbb31 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index fa7398e68858..eb2418450f12 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -304,6 +304,11 @@ static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
/* Enable access to all IFPP program memories */
writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN,
EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
+
+ /* bypass the OCE, if present */
+ if (priv->flags & EIP197_OCE)
+ writel(EIP197_DEBUG_OCE_BYPASS, EIP197_PE(priv) +
+ EIP197_PE_DEBUG(pe));
}
}
@@ -1495,6 +1500,9 @@ static int safexcel_probe_generic(void *pdev,
hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
+ priv->hwconfig.icever = 0;
+ priv->hwconfig.ocever = 0;
+ priv->hwconfig.psever = 0;
if (priv->flags & SAFEXCEL_HW_EIP197) {
/* EIP197 */
peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
@@ -1513,8 +1521,37 @@ static int safexcel_probe_generic(void *pdev,
EIP197_N_RINGS_MASK;
if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
priv->flags |= EIP197_PE_ARB;
- if (EIP206_OPT_ICE_TYPE(peopt) == 1)
+ if (EIP206_OPT_ICE_TYPE(peopt) == 1) {
priv->flags |= EIP197_ICE;
+ /* Detect ICE EIP207 class. engine and version */
+ version = readl(EIP197_PE(priv) +
+ EIP197_PE_ICE_VERSION(0));
+ if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
+ dev_err(dev, "EIP%d: ICE EIP207 not detected.\n",
+ peid);
+ return -ENODEV;
+ }
+ priv->hwconfig.icever = EIP197_VERSION_MASK(version);
+ }
+ if (EIP206_OPT_OCE_TYPE(peopt) == 1) {
+ priv->flags |= EIP197_OCE;
+ /* Detect EIP96PP packet stream editor and version */
+ version = readl(EIP197_PE(priv) + EIP197_PE_PSE_VERSION(0));
+ if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
+ dev_err(dev, "EIP%d: EIP96PP not detected.\n", peid);
+ return -ENODEV;
+ }
+ priv->hwconfig.psever = EIP197_VERSION_MASK(version);
+ /* Detect OCE EIP207 class. engine and version */
+ version = readl(EIP197_PE(priv) +
+ EIP197_PE_ICE_VERSION(0));
+ if (EIP197_REG_LO16(version) != EIP207_VERSION_LE) {
+ dev_err(dev, "EIP%d: OCE EIP207 not detected.\n",
+ peid);
+ return -ENODEV;
+ }
+ priv->hwconfig.ocever = EIP197_VERSION_MASK(version);
+ }
/* If not a full TRC, then assume simple TRC */
if (!(hwopt & EIP197_OPT_HAS_TRC))
priv->flags |= EIP197_SIMPLE_TRC;
@@ -1552,13 +1589,14 @@ static int safexcel_probe_generic(void *pdev,
EIP197_PE_EIP96_OPTIONS(0));
/* Print single info line describing what we just detected */
- dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x,alg:%08x\n",
+ dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n",
peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
priv->hwconfig.ppver, priv->hwconfig.pever,
- priv->hwconfig.algo_flags);
+ priv->hwconfig.algo_flags, priv->hwconfig.icever,
+ priv->hwconfig.ocever, priv->hwconfig.psever);
safexcel_configure(priv);
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 7c5fe382d272..9045f2d7f4c6 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -12,7 +12,9 @@
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
+#include <crypto/sha3.h>
#include <crypto/skcipher.h>
+#include <linux/types.h>
#define EIP197_HIA_VERSION_BE 0xca35
#define EIP197_HIA_VERSION_LE 0x35ca
@@ -22,6 +24,7 @@
#define EIP96_VERSION_LE 0x9f60
#define EIP201_VERSION_LE 0x36c9
#define EIP206_VERSION_LE 0x31ce
+#define EIP207_VERSION_LE 0x30cf
#define EIP197_REG_LO16(reg) (reg & 0xffff)
#define EIP197_REG_HI16(reg) ((reg >> 16) & 0xffff)
#define EIP197_VERSION_MASK(reg) ((reg >> 16) & 0xfff)
@@ -34,6 +37,7 @@
/* EIP206 OPTIONS ENCODING */
#define EIP206_OPT_ICE_TYPE(n) ((n>>8)&3)
+#define EIP206_OPT_OCE_TYPE(n) ((n>>10)&3)
/* EIP197 OPTIONS ENCODING */
#define EIP197_OPT_HAS_TRC BIT(31)
@@ -168,6 +172,7 @@
#define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n)))
#define EIP197_PE_ICE_PPTF_CTRL(n) (0x0e00 + (0x2000 * (n)))
#define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n)))
+#define EIP197_PE_ICE_VERSION(n) (0x0ffc + (0x2000 * (n)))
#define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n)))
#define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
#define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
@@ -176,8 +181,11 @@
#define EIP197_PE_EIP96_FUNCTION2_EN(n) (0x1030 + (0x2000 * (n)))
#define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n)))
#define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n)))
+#define EIP197_PE_OCE_VERSION(n) (0x1bfc + (0x2000 * (n)))
#define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n)))
#define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n)))
+#define EIP197_PE_PSE_VERSION(n) (0x1efc + (0x2000 * (n)))
+#define EIP197_PE_DEBUG(n) (0x1ff4 + (0x2000 * (n)))
#define EIP197_PE_OPTIONS(n) (0x1ff8 + (0x2000 * (n)))
#define EIP197_PE_VERSION(n) (0x1ffc + (0x2000 * (n)))
#define EIP197_MST_CTRL 0xfff4
@@ -352,6 +360,9 @@
/* EIP197_PE_EIP96_TOKEN_CTRL2 */
#define EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE BIT(3)
+/* EIP197_PE_DEBUG */
+#define EIP197_DEBUG_OCE_BYPASS BIT(1)
+
/* EIP197_STRC_CONFIG */
#define EIP197_STRC_CONFIG_INIT BIT(31)
#define EIP197_STRC_CONFIG_LARGE_REC(s) (s<<8)
@@ -776,6 +787,7 @@ enum safexcel_flags {
EIP197_PE_ARB = BIT(2),
EIP197_ICE = BIT(3),
EIP197_SIMPLE_TRC = BIT(4),
+ EIP197_OCE = BIT(5),
};
struct safexcel_hwconfig {
@@ -783,7 +795,10 @@ struct safexcel_hwconfig {
int hwver;
int hiaver;
int ppver;
+ int icever;
int pever;
+ int ocever;
+ int psever;
int hwdataw;
int hwcfsize;
int hwrfsize;
@@ -819,8 +834,16 @@ struct safexcel_context {
struct crypto_async_request *req, bool *complete,
int *ret);
struct safexcel_context_record *ctxr;
+ struct safexcel_crypto_priv *priv;
dma_addr_t ctxr_dma;
+ union {
+ __le32 le[SHA3_512_BLOCK_SIZE / 4];
+ __be32 be[SHA3_512_BLOCK_SIZE / 4];
+ u32 word[SHA3_512_BLOCK_SIZE / 4];
+ u8 byte[SHA3_512_BLOCK_SIZE];
+ } ipad, opad;
+
int ring;
bool needs_inv;
bool exit_inv;
@@ -898,8 +921,9 @@ void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
void safexcel_inv_complete(struct crypto_async_request *req, int error);
-int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
- void *istate, void *ostate);
+int safexcel_hmac_setkey(struct safexcel_context *base, const u8 *key,
+ unsigned int keylen, const char *alg,
+ unsigned int state_sz);
/* available algorithms */
extern struct safexcel_alg_template safexcel_alg_ecb_des;
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 1ac3253b7903..9bcfb79a030f 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -61,8 +61,6 @@ struct safexcel_cipher_ctx {
/* All the below is AEAD specific */
u32 hash_alg;
u32 state_sz;
- __be32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
- __be32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
struct crypto_cipher *hkaes;
struct crypto_aead *fback;
@@ -375,7 +373,7 @@ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
@@ -406,11 +404,11 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_ahash_export_state istate, ostate;
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_authenc_keys keys;
struct crypto_aes_ctx aes;
int err = -EINVAL, i;
+ const char *alg;
if (unlikely(crypto_authenc_extractkeys(&keys, key, len)))
goto badkey;
@@ -465,53 +463,37 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
/* Auth key */
switch (ctx->hash_alg) {
case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
- if (safexcel_hmac_setkey("safexcel-sha1", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sha1";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA224:
- if (safexcel_hmac_setkey("safexcel-sha224", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sha224";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA256:
- if (safexcel_hmac_setkey("safexcel-sha256", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sha256";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA384:
- if (safexcel_hmac_setkey("safexcel-sha384", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sha384";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA512:
- if (safexcel_hmac_setkey("safexcel-sha512", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sha512";
break;
case CONTEXT_CONTROL_CRYPTO_ALG_SM3:
- if (safexcel_hmac_setkey("safexcel-sm3", keys.authkey,
- keys.authkeylen, &istate, &ostate))
- goto badkey;
+ alg = "safexcel-sm3";
break;
default:
dev_err(priv->dev, "aead: unsupported hash algorithm\n");
goto badkey;
}
- if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
- (memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
- memcmp(ctx->opad, ostate.state, ctx->state_sz)))
- ctx->base.needs_inv = true;
+ if (safexcel_hmac_setkey(&ctx->base, keys.authkey, keys.authkeylen,
+ alg, ctx->state_sz))
+ goto badkey;
/* Now copy the keys into the context */
for (i = 0; i < keys.enckeylen / sizeof(u32); i++)
ctx->key[i] = cpu_to_le32(((u32 *)keys.enckey)[i]);
ctx->key_len = keys.enckeylen;
- memcpy(ctx->ipad, &istate.state, ctx->state_sz);
- memcpy(ctx->opad, &ostate.state, ctx->state_sz);
-
memzero_explicit(&keys, sizeof(keys));
return 0;
@@ -525,7 +507,7 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
struct safexcel_cipher_req *sreq,
struct safexcel_command_desc *cdesc)
{
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ctrl_size = ctx->key_len / sizeof(u32);
cdesc->control_data.control1 = ctx->mode;
@@ -692,7 +674,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
struct skcipher_request *areq = skcipher_request_cast(base);
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct safexcel_command_desc *cdesc;
struct safexcel_command_desc *first_cdesc = NULL;
struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
@@ -718,10 +700,10 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
totlen_dst += digestsize;
memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
- ctx->ipad, ctx->state_sz);
+ &ctx->base.ipad, ctx->state_sz);
if (!ctx->xcm)
memcpy(ctx->base.ctxr->data + (ctx->key_len +
- ctx->state_sz) / sizeof(u32), ctx->opad,
+ ctx->state_sz) / sizeof(u32), &ctx->base.opad,
ctx->state_sz);
} else if ((ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
(sreq->direction == SAFEXCEL_DECRYPT)) {
@@ -1020,7 +1002,7 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base,
int ring, int *commands, int *results)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
@@ -1039,7 +1021,7 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
@@ -1072,7 +1054,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_cipher_req *sreq = aead_request_ctx(req);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
@@ -1094,7 +1076,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
struct safexcel_inv_result *result)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ring = ctx->base.ring;
init_completion(&result->completion);
@@ -1157,7 +1139,7 @@ static int safexcel_queue_req(struct crypto_async_request *base,
enum safexcel_cipher_direction dir)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret, ring;
sreq->needs_inv = false;
@@ -1211,7 +1193,7 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
sizeof(struct safexcel_cipher_req));
- ctx->priv = tmpl->priv;
+ ctx->base.priv = tmpl->priv;
ctx->base.send = safexcel_skcipher_send;
ctx->base.handle_result = safexcel_skcipher_handle_result;
@@ -1237,7 +1219,7 @@ static int safexcel_cipher_cra_exit(struct crypto_tfm *tfm)
static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
if (safexcel_cipher_cra_exit(tfm))
@@ -1257,7 +1239,7 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
static void safexcel_aead_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
if (safexcel_cipher_cra_exit(tfm))
@@ -1431,7 +1413,7 @@ static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
unsigned int keylen;
@@ -1505,7 +1487,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key,
unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
ret = verify_skcipher_des_key(ctfm, key);
@@ -1604,7 +1586,7 @@ static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int err;
err = verify_skcipher_des3_key(ctfm, key);
@@ -1723,7 +1705,7 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
sizeof(struct safexcel_cipher_req));
- ctx->priv = tmpl->priv;
+ ctx->base.priv = tmpl->priv;
ctx->alg = SAFEXCEL_AES; /* default */
ctx->blocksz = AES_BLOCK_SIZE;
@@ -2466,7 +2448,7 @@ static int safexcel_skcipher_aesxts_setkey(struct crypto_skcipher *ctfm,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
unsigned int keylen;
@@ -2580,7 +2562,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
u32 hashkey[AES_BLOCK_SIZE >> 2];
int ret, i;
@@ -2618,7 +2600,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
- if (be32_to_cpu(ctx->ipad[i]) != hashkey[i]) {
+ if (be32_to_cpu(ctx->base.ipad.be[i]) != hashkey[i]) {
ctx->base.needs_inv = true;
break;
}
@@ -2626,7 +2608,7 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
}
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
- ctx->ipad[i] = cpu_to_be32(hashkey[i]);
+ ctx->base.ipad.be[i] = cpu_to_be32(hashkey[i]);
memzero_explicit(hashkey, AES_BLOCK_SIZE);
memzero_explicit(&aes, sizeof(aes));
@@ -2693,7 +2675,7 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct crypto_aes_ctx aes;
int ret, i;
@@ -2714,7 +2696,7 @@ static int safexcel_aead_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
for (i = 0; i < len / sizeof(u32); i++) {
ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
- ctx->ipad[i + 2 * AES_BLOCK_SIZE / sizeof(u32)] =
+ ctx->base.ipad.be[i + 2 * AES_BLOCK_SIZE / sizeof(u32)] =
cpu_to_be32(aes.key_enc[i]);
}
@@ -2815,7 +2797,7 @@ struct safexcel_alg_template safexcel_alg_ccm = {
static void safexcel_chacha20_setkey(struct safexcel_cipher_ctx *ctx,
const u8 *key)
{
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
if (memcmp(ctx->key, key, CHACHA_KEY_SIZE))
@@ -3084,7 +3066,7 @@ static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
if (len != SM4_KEY_SIZE)
return -EINVAL;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 16a467969d8e..56d5ccb5cc00 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -20,7 +20,6 @@
struct safexcel_ahash_ctx {
struct safexcel_context base;
- struct safexcel_crypto_priv *priv;
u32 alg;
u8 key_sz;
@@ -29,9 +28,6 @@ struct safexcel_ahash_ctx {
bool fb_init_done;
bool fb_do_setkey;
- __le32 ipad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
- __le32 opad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
-
struct crypto_cipher *kaes;
struct crypto_ahash *fback;
struct crypto_shash *shpre;
@@ -111,7 +107,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
struct safexcel_ahash_req *req,
struct safexcel_command_desc *cdesc)
{
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
u64 count = 0;
cdesc->control_data.control0 = ctx->alg;
@@ -124,7 +120,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
*/
if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM)) {
if (req->xcbcmac)
- memcpy(ctx->base.ctxr->data, ctx->ipad, ctx->key_sz);
+ memcpy(ctx->base.ctxr->data, &ctx->base.ipad, ctx->key_sz);
else
memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
@@ -206,7 +202,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
} else { /* HMAC */
/* Need outer digest for HMAC finalization */
memcpy(ctx->base.ctxr->data + (req->state_sz >> 2),
- ctx->opad, req->state_sz);
+ &ctx->base.opad, req->state_sz);
/* Single pass HMAC - no digest count */
cdesc->control_data.control0 |=
@@ -275,7 +271,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
memcpy(sreq->cache, sreq->state,
crypto_ahash_digestsize(ahash));
- memcpy(sreq->state, ctx->opad, sreq->digest_sz);
+ memcpy(sreq->state, &ctx->base.opad, sreq->digest_sz);
sreq->len = sreq->block_sz +
crypto_ahash_digestsize(ahash);
@@ -316,7 +312,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
@@ -379,10 +375,14 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
// 10- padding for XCBCMAC & CMAC
req->cache[cache_len + skip] = 0x80;
// HW will use K2 iso K3 - compensate!
- for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
- ((__be32 *)req->cache)[i] ^=
- cpu_to_be32(le32_to_cpu(
- ctx->ipad[i] ^ ctx->ipad[i + 4]));
+ for (i = 0; i < AES_BLOCK_SIZE / 4; i++) {
+ u32 *cache = (void *)req->cache;
+ u32 *ipad = ctx->base.ipad.word;
+ u32 x;
+
+ x = ipad[i] ^ ipad[i + 4];
+ cache[i] ^= swab(x);
+ }
}
cache_len = AES_BLOCK_SIZE;
queued = queued + extra;
@@ -591,7 +591,7 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
- ret = safexcel_invalidate_cache(async, ctx->priv,
+ ret = safexcel_invalidate_cache(async, ctx->base.priv,
ctx->base.ctxr_dma, ring);
if (unlikely(ret))
return ret;
@@ -620,7 +620,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async,
static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {};
@@ -688,7 +688,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret, ring;
req->needs_inv = false;
@@ -702,7 +702,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
/* invalidate for HMAC finish with odigest changed */
(req->finish && req->hmac &&
memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
- ctx->opad, req->state_sz))))
+ &ctx->base.opad, req->state_sz))))
/*
* We're still setting needs_inv here, even though it is
* cleared right away, because the needs_inv flag can be
@@ -803,7 +803,7 @@ static int safexcel_ahash_final(struct ahash_request *areq)
ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5 &&
req->len == sizeof(u32) && !areq->nbytes)) {
/* Zero length CRC32 */
- memcpy(areq->result, ctx->ipad, sizeof(u32));
+ memcpy(areq->result, &ctx->base.ipad, sizeof(u32));
return 0;
} else if (unlikely(ctx->cbcmac && req->len == AES_BLOCK_SIZE &&
!areq->nbytes)) {
@@ -815,9 +815,12 @@ static int safexcel_ahash_final(struct ahash_request *areq)
/* Zero length (X)CBC/CMAC */
int i;
- for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
- ((__be32 *)areq->result)[i] =
- cpu_to_be32(le32_to_cpu(ctx->ipad[i + 4]));//K3
+ for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
+ u32 *result = (void *)areq->result;
+
+ /* K3 */
+ result[i] = swab(ctx->base.ipad.word[i + 4]);
+ }
areq->result[0] ^= 0x80; // 10- padding
crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
return 0;
@@ -917,7 +920,7 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
container_of(__crypto_ahash_alg(tfm->__crt_alg),
struct safexcel_alg_template, alg.ahash);
- ctx->priv = tmpl->priv;
+ ctx->base.priv = tmpl->priv;
ctx->base.send = safexcel_ahash_send;
ctx->base.handle_result = safexcel_handle_result;
ctx->fb_do_setkey = false;
@@ -956,7 +959,7 @@ static int safexcel_sha1_digest(struct ahash_request *areq)
static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = ctx->base.priv;
int ret;
/* context not allocated, skip invalidation */
@@ -1012,7 +1015,7 @@ static int safexcel_hmac_sha1_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SHA1_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA1_BLOCK_SIZE;
req->processed = SHA1_BLOCK_SIZE;
@@ -1082,8 +1085,7 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
}
/* Avoid leaking */
- memzero_explicit(keydup, keylen);
- kfree(keydup);
+ kfree_sensitive(keydup);
if (ret)
return ret;
@@ -1135,8 +1137,9 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
return crypto_ahash_export(areq, state);
}
-int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
- void *istate, void *ostate)
+static int __safexcel_hmac_setkey(const char *alg, const u8 *key,
+ unsigned int keylen,
+ void *istate, void *ostate)
{
struct ahash_request *areq;
struct crypto_ahash *tfm;
@@ -1185,30 +1188,38 @@ free_ahash:
return ret;
}
-static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen, const char *alg,
- unsigned int state_sz)
+int safexcel_hmac_setkey(struct safexcel_context *base, const u8 *key,
+ unsigned int keylen, const char *alg,
+ unsigned int state_sz)
{
- struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct safexcel_crypto_priv *priv = ctx->priv;
+ struct safexcel_crypto_priv *priv = base->priv;
struct safexcel_ahash_export_state istate, ostate;
int ret;
- ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
+ ret = __safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
if (ret)
return ret;
- if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr &&
- (memcmp(ctx->ipad, istate.state, state_sz) ||
- memcmp(ctx->opad, ostate.state, state_sz)))
- ctx->base.needs_inv = true;
+ if (priv->flags & EIP197_TRC_CACHE && base->ctxr &&
+ (memcmp(&base->ipad, istate.state, state_sz) ||
+ memcmp(&base->opad, ostate.state, state_sz)))
+ base->needs_inv = true;
- memcpy(ctx->ipad, &istate.state, state_sz);
- memcpy(ctx->opad, &ostate.state, state_sz);
+ memcpy(&base->ipad, &istate.state, state_sz);
+ memcpy(&base->opad, &ostate.state, state_sz);
return 0;
}
+static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen, const char *alg,
+ unsigned int state_sz)
+{
+ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ return safexcel_hmac_setkey(&ctx->base, key, keylen, alg, state_sz);
+}
+
static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -1377,7 +1388,7 @@ static int safexcel_hmac_sha224_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SHA256_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA256_BLOCK_SIZE;
req->processed = SHA256_BLOCK_SIZE;
@@ -1449,7 +1460,7 @@ static int safexcel_hmac_sha256_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SHA256_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA256_BLOCK_SIZE;
req->processed = SHA256_BLOCK_SIZE;
@@ -1635,7 +1646,7 @@ static int safexcel_hmac_sha512_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SHA512_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA512_BLOCK_SIZE;
req->processed = SHA512_BLOCK_SIZE;
@@ -1707,7 +1718,7 @@ static int safexcel_hmac_sha384_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SHA512_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SHA512_BLOCK_SIZE;
req->processed = SHA512_BLOCK_SIZE;
@@ -1829,7 +1840,7 @@ static int safexcel_hmac_md5_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, MD5_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = MD5_HMAC_BLOCK_SIZE;
req->processed = MD5_HMAC_BLOCK_SIZE;
@@ -1900,7 +1911,7 @@ static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
int ret = safexcel_ahash_cra_init(tfm);
/* Default 'key' is all zeroes */
- memset(ctx->ipad, 0, sizeof(u32));
+ memset(&ctx->base.ipad, 0, sizeof(u32));
return ret;
}
@@ -1912,7 +1923,7 @@ static int safexcel_crc32_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from loaded key */
- req->state[0] = (__force __le32)le32_to_cpu(~ctx->ipad[0]);
+ req->state[0] = cpu_to_le32(~ctx->base.ipad.word[0]);
/* Set processed to non-zero to enable invalidation detection */
req->len = sizeof(u32);
req->processed = sizeof(u32);
@@ -1934,7 +1945,7 @@ static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
if (keylen != sizeof(u32))
return -EINVAL;
- memcpy(ctx->ipad, key, sizeof(u32));
+ memcpy(&ctx->base.ipad, key, sizeof(u32));
return 0;
}
@@ -1984,7 +1995,7 @@ static int safexcel_cbcmac_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from loaded keys */
- memcpy(req->state, ctx->ipad, ctx->key_sz);
+ memcpy(req->state, &ctx->base.ipad, ctx->key_sz);
/* Set processed to non-zero to enable invalidation detection */
req->len = AES_BLOCK_SIZE;
req->processed = AES_BLOCK_SIZE;
@@ -2009,9 +2020,9 @@ static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
+ memset(&ctx->base.ipad, 0, 2 * AES_BLOCK_SIZE);
for (i = 0; i < len / sizeof(u32); i++)
- ctx->ipad[i + 8] = (__force __le32)cpu_to_be32(aes.key_enc[i]);
+ ctx->base.ipad.be[i + 8] = cpu_to_be32(aes.key_enc[i]);
if (len == AES_KEYSIZE_192) {
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
@@ -2093,8 +2104,7 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
"\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
- ctx->ipad[i] =
- cpu_to_le32((__force u32)cpu_to_be32(key_tmp[i]));
+ ctx->base.ipad.word[i] = swab(key_tmp[i]);
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
@@ -2177,8 +2187,7 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
return ret;
for (i = 0; i < len / sizeof(u32); i++)
- ctx->ipad[i + 8] =
- cpu_to_le32((__force u32)cpu_to_be32(aes.key_enc[i]));
+ ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]);
/* precompute the CMAC key material */
crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
@@ -2209,7 +2218,7 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
/* end of code borrowed from crypto/cmac.c */
for (i = 0; i < 2 * AES_BLOCK_SIZE / sizeof(u32); i++)
- ctx->ipad[i] = (__force __le32)cpu_to_be32(((u32 *)consts)[i]);
+ ctx->base.ipad.be[i] = cpu_to_be32(((u32 *)consts)[i]);
if (len == AES_KEYSIZE_192) {
ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
@@ -2331,7 +2340,7 @@ static int safexcel_hmac_sm3_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Start from ipad precompute */
- memcpy(req->state, ctx->ipad, SM3_DIGEST_SIZE);
+ memcpy(req->state, &ctx->base.ipad, SM3_DIGEST_SIZE);
/* Already processed the key^ipad part now! */
req->len = SM3_BLOCK_SIZE;
req->processed = SM3_BLOCK_SIZE;
@@ -2424,11 +2433,11 @@ static int safexcel_sha3_fbcheck(struct ahash_request *req)
/* Set fallback cipher HMAC key */
u8 key[SHA3_224_BLOCK_SIZE];
- memcpy(key, ctx->ipad,
+ memcpy(key, &ctx->base.ipad,
crypto_ahash_blocksize(ctx->fback) / 2);
memcpy(key +
crypto_ahash_blocksize(ctx->fback) / 2,
- ctx->opad,
+ &ctx->base.opad,
crypto_ahash_blocksize(ctx->fback) / 2);
ret = crypto_ahash_setkey(ctx->fback, key,
crypto_ahash_blocksize(ctx->fback));
@@ -2801,7 +2810,7 @@ static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
* first using our fallback cipher
*/
ret = crypto_shash_digest(ctx->shdesc, key, keylen,
- (u8 *)ctx->ipad);
+ ctx->base.ipad.byte);
keylen = crypto_shash_digestsize(ctx->shpre);
/*
@@ -2810,8 +2819,8 @@ static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
*/
if (keylen > crypto_ahash_blocksize(tfm) / 2)
/* Buffers overlap, need to use memmove iso memcpy! */
- memmove(ctx->opad,
- (u8 *)ctx->ipad +
+ memmove(&ctx->base.opad,
+ ctx->base.ipad.byte +
crypto_ahash_blocksize(tfm) / 2,
keylen - crypto_ahash_blocksize(tfm) / 2);
} else {
@@ -2821,11 +2830,11 @@ static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
* to match the existing HMAC driver infrastructure.
*/
if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
- memcpy(ctx->ipad, key, keylen);
+ memcpy(&ctx->base.ipad, key, keylen);
} else {
- memcpy(ctx->ipad, key,
+ memcpy(&ctx->base.ipad, key,
crypto_ahash_blocksize(tfm) / 2);
- memcpy(ctx->opad,
+ memcpy(&ctx->base.opad,
key + crypto_ahash_blocksize(tfm) / 2,
keylen - crypto_ahash_blocksize(tfm) / 2);
}
@@ -2833,11 +2842,11 @@ static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
/* Pad key with zeroes */
if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
- memset((u8 *)ctx->ipad + keylen, 0,
+ memset(ctx->base.ipad.byte + keylen, 0,
crypto_ahash_blocksize(tfm) / 2 - keylen);
- memset(ctx->opad, 0, crypto_ahash_blocksize(tfm) / 2);
+ memset(&ctx->base.opad, 0, crypto_ahash_blocksize(tfm) / 2);
} else {
- memset((u8 *)ctx->opad + keylen -
+ memset(ctx->base.opad.byte + keylen -
crypto_ahash_blocksize(tfm) / 2, 0,
crypto_ahash_blocksize(tfm) - keylen);
}
@@ -2856,7 +2865,7 @@ static int safexcel_hmac_sha3_224_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
- memcpy(req->state, ctx->ipad, SHA3_224_BLOCK_SIZE / 2);
+ memcpy(req->state, &ctx->base.ipad, SHA3_224_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_224_BLOCK_SIZE;
req->processed = SHA3_224_BLOCK_SIZE;
@@ -2927,7 +2936,7 @@ static int safexcel_hmac_sha3_256_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
- memcpy(req->state, ctx->ipad, SHA3_256_BLOCK_SIZE / 2);
+ memcpy(req->state, &ctx->base.ipad, SHA3_256_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_256_BLOCK_SIZE;
req->processed = SHA3_256_BLOCK_SIZE;
@@ -2998,7 +3007,7 @@ static int safexcel_hmac_sha3_384_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
- memcpy(req->state, ctx->ipad, SHA3_384_BLOCK_SIZE / 2);
+ memcpy(req->state, &ctx->base.ipad, SHA3_384_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_384_BLOCK_SIZE;
req->processed = SHA3_384_BLOCK_SIZE;
@@ -3069,7 +3078,7 @@ static int safexcel_hmac_sha3_512_init(struct ahash_request *areq)
memset(req, 0, sizeof(*req));
/* Copy (half of) the key */
- memcpy(req->state, ctx->ipad, SHA3_512_BLOCK_SIZE / 2);
+ memcpy(req->state, &ctx->base.ipad, SHA3_512_BLOCK_SIZE / 2);
/* Start of HMAC should have len == processed == blocksize */
req->len = SHA3_512_BLOCK_SIZE;
req->processed = SHA3_512_BLOCK_SIZE;
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c
index e454c3d44f07..90f15032c8df 100644
--- a/drivers/crypto/inside-secure/safexcel_ring.c
+++ b/drivers/crypto/inside-secure/safexcel_ring.c
@@ -236,8 +236,8 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
rdesc->particle_size = len;
rdesc->rsvd0 = 0;
- rdesc->descriptor_overflow = 0;
- rdesc->buffer_overflow = 0;
+ rdesc->descriptor_overflow = 1; /* assume error */
+ rdesc->buffer_overflow = 1; /* assume error */
rdesc->last_seg = last;
rdesc->first_seg = first;
rdesc->result_size = EIP197_RD64_RESULT_SIZE;
@@ -245,9 +245,10 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
rdesc->data_lo = lower_32_bits(data);
rdesc->data_hi = upper_32_bits(data);
- /* Clear length & error code in result token */
+ /* Clear length in result token */
rtoken->packet_length = 0;
- rtoken->error_code = 0;
+ /* Assume errors - HW will clear if not the case */
+ rtoken->error_code = 0x7fff;
return rdesc;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index f478bb0a566a..276012e7c482 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -528,7 +528,7 @@ static void release_ixp_crypto(struct device *dev)
if (crypt_virt) {
dma_free_coherent(dev,
- NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
+ NPE_QLEN * sizeof(struct crypt_ctl),
crypt_virt, crypt_phys);
}
}
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index d63bca9718dc..06211858bf2e 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -437,7 +437,6 @@ static int mv_cesa_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mv_cesa_dev *cesa;
struct mv_cesa_engine *engines;
- struct resource *res;
int irq, ret, i, cpu;
u32 sram_size;
@@ -475,8 +474,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
spin_lock_init(&cesa->lock);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- cesa->regs = devm_ioremap_resource(dev, res);
+ cesa->regs = devm_platform_ioremap_resource_byname(pdev, "regs");
if (IS_ERR(cesa->regs))
return PTR_ERR(cesa->regs);
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index 0c9cbb681e49..fabfaaccca87 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -2,12 +2,10 @@
#ifndef __MARVELL_CESA_H__
#define __MARVELL_CESA_H__
-#include <crypto/algapi.h>
-#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
-#include <linux/crypto.h>
+#include <linux/dma-direction.h>
#include <linux/dmapool.h>
#define CESA_ENGINE_OFF(i) (((i) * 0x2000))
@@ -239,7 +237,7 @@ struct mv_cesa_sec_accel_desc {
* Context associated to a cipher operation.
*/
struct mv_cesa_skcipher_op_ctx {
- u32 key[8];
+ __le32 key[8];
u32 iv[4];
};
@@ -252,7 +250,7 @@ struct mv_cesa_skcipher_op_ctx {
*/
struct mv_cesa_hash_op_ctx {
u32 iv[16];
- u32 hash[8];
+ __le32 hash[8];
};
/**
@@ -300,8 +298,14 @@ struct mv_cesa_op_ctx {
*/
struct mv_cesa_tdma_desc {
__le32 byte_cnt;
- __le32 src;
- __le32 dst;
+ union {
+ __le32 src;
+ dma_addr_t src_dma;
+ };
+ union {
+ __le32 dst;
+ dma_addr_t dst_dma;
+ };
__le32 next_dma;
/* Software state */
@@ -506,7 +510,7 @@ struct mv_cesa_hash_ctx {
*/
struct mv_cesa_hmac_ctx {
struct mv_cesa_ctx base;
- u32 iv[16];
+ __be32 iv[16];
};
/**
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index 45b4d7a29833..b4a6ff9dd6d5 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -11,6 +11,8 @@
#include <crypto/aes.h>
#include <crypto/internal/des.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include "cesa.h"
@@ -262,8 +264,7 @@ static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
remaining = (ctx->aes.key_length - 16) / 4;
offset = ctx->aes.key_length + 24 - remaining;
for (i = 0; i < remaining; i++)
- ctx->aes.key_dec[4 + i] =
- cpu_to_le32(ctx->aes.key_enc[offset + i]);
+ ctx->aes.key_dec[4 + i] = ctx->aes.key_enc[offset + i];
return 0;
}
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index f2a2fc111164..add7ea011c98 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -12,6 +12,8 @@
#include <crypto/hmac.h>
#include <crypto/md5.h>
#include <crypto/sha.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include "cesa.h"
@@ -222,9 +224,11 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
CESA_SA_DATA_SRAM_OFFSET + len,
new_cache_ptr);
} else {
- len += mv_cesa_ahash_pad_req(creq,
- engine->sram + len +
- CESA_SA_DATA_SRAM_OFFSET);
+ i = mv_cesa_ahash_pad_req(creq, creq->cache);
+ len += i;
+ memcpy_toio(engine->sram + len +
+ CESA_SA_DATA_SRAM_OFFSET,
+ creq->cache, i);
}
if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
@@ -342,7 +346,7 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
*/
data = creq->base.chain.last->op->ctx.hash.hash;
for (i = 0; i < digsize / 4; i++)
- creq->state[i] = cpu_to_le32(data[i]);
+ creq->state[i] = le32_to_cpu(data[i]);
memcpy(ahashreq->result, data, digsize);
} else {
@@ -1265,10 +1269,10 @@ static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
return ret;
for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
- ctx->iv[i] = be32_to_cpu(istate.hash[i]);
+ ctx->iv[i] = cpu_to_be32(istate.hash[i]);
for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
- ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
+ ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
return 0;
}
@@ -1336,10 +1340,10 @@ static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
return ret;
for (i = 0; i < ARRAY_SIZE(istate.state); i++)
- ctx->iv[i] = be32_to_cpu(istate.state[i]);
+ ctx->iv[i] = cpu_to_be32(istate.state[i]);
for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
- ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
+ ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
return 0;
}
@@ -1394,10 +1398,10 @@ static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
return ret;
for (i = 0; i < ARRAY_SIZE(istate.state); i++)
- ctx->iv[i] = be32_to_cpu(istate.state[i]);
+ ctx->iv[i] = cpu_to_be32(istate.state[i]);
for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
- ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
+ ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
return 0;
}
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index b81ee276fe0e..5d9c48fb72b2 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -83,10 +83,10 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
for (tdma = dreq->chain.first; tdma; tdma = tdma->next) {
if (tdma->flags & CESA_TDMA_DST_IN_SRAM)
- tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma);
+ tdma->dst = cpu_to_le32(tdma->dst_dma + engine->sram_dma);
if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
- tdma->src = cpu_to_le32(tdma->src + engine->sram_dma);
+ tdma->src = cpu_to_le32(tdma->src_dma + engine->sram_dma);
if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
mv_cesa_adjust_op(engine, tdma->op);
@@ -114,7 +114,7 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
*/
if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
!(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
- last->next_dma = dreq->chain.first->cur_dma;
+ last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
}
}
@@ -237,8 +237,8 @@ int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
return -EIO;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
- tdma->src = src;
- tdma->dst = op_desc->src;
+ tdma->src_dma = src;
+ tdma->dst_dma = op_desc->src_dma;
tdma->op = op_desc->op;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
@@ -272,7 +272,7 @@ struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
tdma->op = op;
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
tdma->src = cpu_to_le32(dma_handle);
- tdma->dst = CESA_SA_CFG_SRAM_OFFSET;
+ tdma->dst_dma = CESA_SA_CFG_SRAM_OFFSET;
tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP;
return op;
@@ -289,8 +289,8 @@ int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain,
return PTR_ERR(tdma);
tdma->byte_cnt = cpu_to_le32(size | BIT(31));
- tdma->src = src;
- tdma->dst = dst;
+ tdma->src_dma = src;
+ tdma->dst_dma = dst;
flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
tdma->flags = flags | CESA_TDMA_DATA;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index cc103b1bc224..40b482198ebc 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -824,18 +824,12 @@ static ssize_t eng_grp_info_show(struct device *dev,
static int create_sysfs_eng_grps_info(struct device *dev,
struct otx_cpt_eng_grp_info *eng_grp)
{
- int ret;
-
eng_grp->info_attr.show = eng_grp_info_show;
eng_grp->info_attr.store = NULL;
eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name;
eng_grp->info_attr.attr.mode = 0440;
sysfs_attr_init(&eng_grp->info_attr.attr);
- ret = device_create_file(dev, &eng_grp->info_attr);
- if (ret)
- return ret;
-
- return 0;
+ return device_create_file(dev, &eng_grp->info_attr);
}
static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode)
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 4ad3571ab6af..7323066724c3 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -126,7 +126,7 @@ struct mtk_aes_ctx {
struct mtk_aes_ctr_ctx {
struct mtk_aes_base_ctx base;
- u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
+ __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
size_t offset;
struct scatterlist src[2];
struct scatterlist dst[2];
@@ -242,22 +242,6 @@ static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
sg->length += dma->remainder;
}
-static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
-{
- int i;
-
- for (i = 0; i < SIZE_IN_WORDS(size); i++)
- dst[i] = cpu_to_le32(src[i]);
-}
-
-static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
-{
- int i;
-
- for (i = 0; i < SIZE_IN_WORDS(size); i++)
- dst[i] = cpu_to_be32(src[i]);
-}
-
static inline int mtk_aes_complete(struct mtk_cryp *cryp,
struct mtk_aes_rec *aes,
int err)
@@ -321,7 +305,7 @@ static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
/* Prepare enough space for authenticated tag */
if (aes->flags & AES_FLAGS_GCM)
- res->hdr += AES_BLOCK_SIZE;
+ le32_add_cpu(&res->hdr, AES_BLOCK_SIZE);
/*
* Make sure that all changes to the DMA ring are done before we
@@ -449,10 +433,10 @@ static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
return;
}
- mtk_aes_write_state_le(info->state + ctx->keylen, (void *)req->iv,
- AES_BLOCK_SIZE);
+ memcpy(info->state + ctx->keylen, req->iv, AES_BLOCK_SIZE);
ctr:
- info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
+ le32_add_cpu(&info->tfm[0],
+ le32_to_cpu(AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE))));
info->tfm[1] |= AES_TFM_FULL_IV;
info->cmd[cnt++] = AES_CMD2;
ecb:
@@ -601,8 +585,7 @@ static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
/* Write IVs into transform state buffer. */
- mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
- AES_BLOCK_SIZE);
+ memcpy(ctx->info.state + ctx->keylen, cctx->iv, AES_BLOCK_SIZE);
if (unlikely(fragmented)) {
/*
@@ -654,7 +637,7 @@ static int mtk_aes_setkey(struct crypto_skcipher *tfm,
}
ctx->keylen = SIZE_IN_WORDS(keylen);
- mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
+ memcpy(ctx->key, key, keylen);
return 0;
}
@@ -848,7 +831,7 @@ mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
struct mtk_aes_rec *aes)
{
- u32 status = cryp->ring[aes->id]->res_prev->ct;
+ __le32 status = cryp->ring[aes->id]->res_prev->ct;
return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
-EBADMSG : 0);
@@ -866,7 +849,7 @@ static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
u32 cnt = 0;
- ctx->ct_hdr = AES_CT_CTRL_HDR | len;
+ ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
@@ -889,8 +872,8 @@ static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
AES_TFM_ENC_HASH;
- mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
- AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
+ memcpy(info->state + ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE),
+ req->iv, ivsize);
}
static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
@@ -994,9 +977,13 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
u32 keylen)
{
struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
- u8 hash[AES_BLOCK_SIZE] __aligned(4) = {};
+ union {
+ u32 x32[SIZE_IN_WORDS(AES_BLOCK_SIZE)];
+ u8 x8[AES_BLOCK_SIZE];
+ } hash = {};
struct crypto_aes_ctx aes_ctx;
int err;
+ int i;
switch (keylen) {
case AES_KEYSIZE_128:
@@ -1019,12 +1006,16 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (err)
return err;
- aes_encrypt(&aes_ctx, hash, hash);
+ aes_encrypt(&aes_ctx, hash.x8, hash.x8);
memzero_explicit(&aes_ctx, sizeof(aes_ctx));
- mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
- mtk_aes_write_state_be(ctx->key + ctx->keylen, (const u32 *)hash,
- AES_BLOCK_SIZE);
+ memcpy(ctx->key, key, keylen);
+
+ /* Why do we need to do this? */
+ for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++)
+ hash.x32[i] = swab32(hash.x32[i]);
+
+ memcpy(ctx->key + ctx->keylen, &hash, AES_BLOCK_SIZE);
return 0;
}
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index 7e3ad085b5bd..9d878620e5c9 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -185,8 +185,6 @@ static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp)
static int mtk_dfe_dse_reset(struct mtk_cryp *cryp)
{
- int err;
-
/* Reset DSE/DFE and correct system priorities for all rings. */
writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL);
writel(0, cryp->base + DFE_PRIO_0);
@@ -200,11 +198,7 @@ static int mtk_dfe_dse_reset(struct mtk_cryp *cryp)
writel(0, cryp->base + DSE_PRIO_2);
writel(0, cryp->base + DSE_PRIO_3);
- err = mtk_dfe_dse_state_check(cryp);
- if (err)
- return err;
-
- return 0;
+ return mtk_dfe_dse_state_check(cryp);
}
static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp,
@@ -442,7 +436,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
{
struct mtk_ring **ring = cryp->ring;
- int i, err = ENOMEM;
+ int i;
for (i = 0; i < MTK_RING_MAX; i++) {
ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
@@ -469,14 +463,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
return 0;
err_cleanup:
- for (; i--; ) {
+ do {
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->res_base, ring[i]->res_dma);
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->cmd_base, ring[i]->cmd_dma);
kfree(ring[i]);
- }
- return err;
+ } while (i--);
+ return -ENOMEM;
}
static int mtk_crypto_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index da3f0b8814aa..3d5d7d68b03b 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -239,7 +239,7 @@ static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
{
u32 index, padlen;
- u64 bits[2];
+ __be64 bits[2];
u64 size = ctx->digcnt;
size += ctx->bufcnt;
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index d8aec5153b21..3642bf83d809 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -249,7 +249,7 @@ static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
struct n2_ahash_alg {
struct list_head entry;
const u8 *hash_zero;
- const u32 *hash_init;
+ const u8 *hash_init;
u8 hw_op_hashsz;
u8 digest_size;
u8 auth_type;
@@ -662,7 +662,6 @@ struct n2_skcipher_context {
u8 aes[AES_MAX_KEY_SIZE];
u8 des[DES_KEY_SIZE];
u8 des3[3 * DES_KEY_SIZE];
- u8 arc4[258]; /* S-box, X, Y */
} key;
};
@@ -789,36 +788,6 @@ static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
-static int n2_arc4_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- u8 *s = ctx->key.arc4;
- u8 *x = s + 256;
- u8 *y = x + 1;
- int i, j, k;
-
- ctx->enc_type = n2alg->enc_type;
-
- j = k = 0;
- *x = 0;
- *y = 0;
- for (i = 0; i < 256; i++)
- s[i] = i;
- for (i = 0; i < 256; i++) {
- u8 a = s[i];
- j = (j + key[k] + a) & 0xff;
- s[i] = s[j];
- s[j] = a;
- if (++k >= keylen)
- k = 0;
- }
-
- return 0;
-}
-
static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
{
int this_len = nbytes;
@@ -1122,21 +1091,6 @@ struct n2_skcipher_tmpl {
};
static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
- /* ARC4: only ECB is supported (chaining bits ignored) */
- { .name = "ecb(arc4)",
- .drv_name = "ecb-arc4",
- .block_size = 1,
- .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = 1,
- .max_keysize = 256,
- .setkey = n2_arc4_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
-
/* DES: ECB CBC and CFB are supported */
{ .name = "ecb(des)",
.drv_name = "ecb-des",
@@ -1271,7 +1225,7 @@ static LIST_HEAD(skcipher_algs);
struct n2_hash_tmpl {
const char *name;
const u8 *hash_zero;
- const u32 *hash_init;
+ const u8 *hash_init;
u8 hw_op_hashsz;
u8 digest_size;
u8 block_size;
@@ -1279,7 +1233,7 @@ struct n2_hash_tmpl {
u8 hmac_type;
};
-static const u32 n2_md5_init[MD5_HASH_WORDS] = {
+static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
cpu_to_le32(MD5_H0),
cpu_to_le32(MD5_H1),
cpu_to_le32(MD5_H2),
@@ -1300,7 +1254,7 @@ static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
static const struct n2_hash_tmpl hash_tmpls[] = {
{ .name = "md5",
.hash_zero = md5_zero_message_hash,
- .hash_init = n2_md5_init,
+ .hash_init = (u8 *)n2_md5_init,
.auth_type = AUTH_TYPE_MD5,
.hmac_type = AUTH_TYPE_HMAC_MD5,
.hw_op_hashsz = MD5_DIGEST_SIZE,
@@ -1308,7 +1262,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.block_size = MD5_HMAC_BLOCK_SIZE },
{ .name = "sha1",
.hash_zero = sha1_zero_message_hash,
- .hash_init = n2_sha1_init,
+ .hash_init = (u8 *)n2_sha1_init,
.auth_type = AUTH_TYPE_SHA1,
.hmac_type = AUTH_TYPE_HMAC_SHA1,
.hw_op_hashsz = SHA1_DIGEST_SIZE,
@@ -1316,7 +1270,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.block_size = SHA1_BLOCK_SIZE },
{ .name = "sha256",
.hash_zero = sha256_zero_message_hash,
- .hash_init = n2_sha256_init,
+ .hash_init = (u8 *)n2_sha256_init,
.auth_type = AUTH_TYPE_SHA256,
.hmac_type = AUTH_TYPE_HMAC_SHA256,
.hw_op_hashsz = SHA256_DIGEST_SIZE,
@@ -1324,7 +1278,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = {
.block_size = SHA256_BLOCK_SIZE },
{ .name = "sha224",
.hash_zero = sha224_zero_message_hash,
- .hash_init = n2_sha224_init,
+ .hash_init = (u8 *)n2_sha224_init,
.auth_type = AUTH_TYPE_SHA256,
.hmac_type = AUTH_TYPE_RESERVED,
.hw_op_hashsz = SHA256_DIGEST_SIZE,
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 954d703f2981..a3b38d2c92e7 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -39,6 +39,7 @@
#include <crypto/hash.h>
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
+#include <crypto/engine.h>
#define MD5_DIGEST_SIZE 16
@@ -100,7 +101,6 @@
#define DEFAULT_AUTOSUSPEND_DELAY 1000
/* mostly device flags */
-#define FLAGS_BUSY 0
#define FLAGS_FINAL 1
#define FLAGS_DMA_ACTIVE 2
#define FLAGS_OUTPUT_READY 3
@@ -144,7 +144,7 @@ struct omap_sham_dev;
struct omap_sham_reqctx {
struct omap_sham_dev *dd;
unsigned long flags;
- unsigned long op;
+ u8 op;
u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
size_t digcnt;
@@ -168,6 +168,7 @@ struct omap_sham_hmac_ctx {
};
struct omap_sham_ctx {
+ struct crypto_engine_ctx enginectx;
unsigned long flags;
/* fallback stuff */
@@ -219,7 +220,6 @@ struct omap_sham_dev {
struct device *dev;
void __iomem *io_base;
int irq;
- spinlock_t lock;
int err;
struct dma_chan *dma_lch;
struct tasklet_struct done_task;
@@ -230,6 +230,7 @@ struct omap_sham_dev {
int fallback_sz;
struct crypto_queue queue;
struct ahash_request *req;
+ struct crypto_engine *engine;
const struct omap_sham_pdata *pdata;
};
@@ -245,6 +246,9 @@ static struct omap_sham_drv sham = {
.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
};
+static int omap_sham_enqueue(struct ahash_request *req, unsigned int op);
+static void omap_sham_finish_req(struct ahash_request *req, int err);
+
static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
{
return __raw_readl(dd->io_base + offset);
@@ -456,6 +460,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
u32 val, mask;
+ if (likely(ctx->digcnt))
+ omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
+
/*
* Setting ALGO_CONST only for the first iteration and
* CLOSE_HASH only for the last one. Note that flags mode bits
@@ -854,13 +861,16 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
return 0;
}
-static int omap_sham_prepare_request(struct ahash_request *req, bool update)
+static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
int bs;
int ret;
unsigned int nbytes;
bool final = rctx->flags & BIT(FLAGS_FINUP);
+ bool update = rctx->op == OP_UPDATE;
int hash_later;
bs = get_block_size(rctx);
@@ -1021,7 +1031,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err;
bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
- !(dd->flags & BIT(FLAGS_HUGE));
+ !(dd->flags & BIT(FLAGS_HUGE));
dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
ctx->total, ctx->digcnt, final);
@@ -1069,6 +1079,39 @@ static int omap_sham_final_req(struct omap_sham_dev *dd)
return err;
}
+static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = container_of(areq, struct ahash_request,
+ base);
+ struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+ struct omap_sham_dev *dd = ctx->dd;
+ int err;
+ bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
+ !(dd->flags & BIT(FLAGS_HUGE));
+
+ dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
+ ctx->op, ctx->total, ctx->digcnt, final);
+
+ dd->req = req;
+
+ err = omap_sham_hw_init(dd);
+ if (err)
+ return err;
+
+ if (ctx->digcnt)
+ dd->pdata->copy_hash(req, 0);
+
+ if (ctx->op == OP_UPDATE)
+ err = omap_sham_update_req(dd);
+ else if (ctx->op == OP_FINAL)
+ err = omap_sham_final_req(dd);
+
+ if (err != -EINPROGRESS)
+ omap_sham_finish_req(req, err);
+
+ return 0;
+}
+
static int omap_sham_finish_hmac(struct ahash_request *req)
{
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
@@ -1116,25 +1159,20 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
ctx->sg = NULL;
- dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
+ dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED) |
+ BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
+ BIT(FLAGS_OUTPUT_READY));
+
+ if (!err)
+ dd->pdata->copy_hash(req, 1);
if (dd->flags & BIT(FLAGS_HUGE)) {
- dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
- BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE));
- omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
- if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
- err = omap_sham_update_req(dd);
- if (err != -EINPROGRESS &&
- (ctx->flags & BIT(FLAGS_FINUP)))
- err = omap_sham_final_req(dd);
- } else if (ctx->op == OP_FINAL) {
- omap_sham_final_req(dd);
- }
+ /* Re-enqueue the request */
+ omap_sham_enqueue(req, ctx->op);
return;
}
if (!err) {
- dd->pdata->copy_hash(req, 1);
if (test_bit(FLAGS_FINAL, &dd->flags))
err = omap_sham_finish(req);
} else {
@@ -1142,7 +1180,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
}
/* atomic operation is not needed here */
- dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
+ dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
pm_runtime_mark_last_busy(dd->dev);
@@ -1150,81 +1188,13 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
ctx->offset = 0;
- if (req->base.complete)
- req->base.complete(&req->base, err);
+ crypto_finalize_hash_request(dd->engine, req, err);
}
static int omap_sham_handle_queue(struct omap_sham_dev *dd,
struct ahash_request *req)
{
- struct crypto_async_request *async_req, *backlog;
- struct omap_sham_reqctx *ctx;
- unsigned long flags;
- int err = 0, ret = 0;
-
-retry:
- spin_lock_irqsave(&dd->lock, flags);
- if (req)
- ret = ahash_enqueue_request(&dd->queue, req);
- if (test_bit(FLAGS_BUSY, &dd->flags)) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&dd->queue);
- async_req = crypto_dequeue_request(&dd->queue);
- if (async_req)
- set_bit(FLAGS_BUSY, &dd->flags);
- spin_unlock_irqrestore(&dd->lock, flags);
-
- if (!async_req)
- return ret;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- req = ahash_request_cast(async_req);
- dd->req = req;
- ctx = ahash_request_ctx(req);
-
- err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
- if (err || !ctx->total)
- goto err1;
-
- dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
- ctx->op, req->nbytes);
-
- err = omap_sham_hw_init(dd);
- if (err)
- goto err1;
-
- if (ctx->digcnt)
- /* request has changed - restore hash */
- dd->pdata->copy_hash(req, 0);
-
- if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
- err = omap_sham_update_req(dd);
- if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
- /* no final() after finup() */
- err = omap_sham_final_req(dd);
- } else if (ctx->op == OP_FINAL) {
- err = omap_sham_final_req(dd);
- }
-err1:
- dev_dbg(dd->dev, "exit, err: %d\n", err);
-
- if (err != -EINPROGRESS) {
- /* done_task will not finish it, so do it here */
- omap_sham_finish_req(req, err);
- req = NULL;
-
- /*
- * Execute next request immediately if there is anything
- * in queue.
- */
- goto retry;
- }
-
- return ret;
+ return crypto_transfer_hash_request_to_engine(dd->engine, req);
}
static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
@@ -1394,6 +1364,10 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
+ tctx->enginectx.op.do_one_request = omap_sham_hash_one_req;
+ tctx->enginectx.op.prepare_request = omap_sham_prepare_request;
+ tctx->enginectx.op.unprepare_request = NULL;
+
return 0;
}
@@ -1757,11 +1731,6 @@ static void omap_sham_done_task(unsigned long data)
dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
- if (!test_bit(FLAGS_BUSY, &dd->flags)) {
- omap_sham_handle_queue(dd, NULL);
- return;
- }
-
if (test_bit(FLAGS_CPU, &dd->flags)) {
if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
goto finish;
@@ -1786,20 +1755,12 @@ finish:
dev_dbg(dd->dev, "update done: err: %d\n", err);
/* finish curent request */
omap_sham_finish_req(dd->req, err);
-
- /* If we are not busy, process next req */
- if (!test_bit(FLAGS_BUSY, &dd->flags))
- omap_sham_handle_queue(dd, NULL);
}
static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
{
- if (!test_bit(FLAGS_BUSY, &dd->flags)) {
- dev_warn(dd->dev, "Interrupt when no active requests.\n");
- } else {
- set_bit(FLAGS_OUTPUT_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
- }
+ set_bit(FLAGS_OUTPUT_READY, &dd->flags);
+ tasklet_schedule(&dd->done_task);
return IRQ_HANDLED;
}
@@ -2072,7 +2033,6 @@ static ssize_t queue_len_store(struct device *dev,
struct omap_sham_dev *dd = dev_get_drvdata(dev);
ssize_t status;
long value;
- unsigned long flags;
status = kstrtol(buf, 0, &value);
if (status)
@@ -2086,9 +2046,7 @@ static ssize_t queue_len_store(struct device *dev,
* than current size, it will just not accept new entries until
* it has shrank enough.
*/
- spin_lock_irqsave(&dd->lock, flags);
dd->queue.max_qlen = value;
- spin_unlock_irqrestore(&dd->lock, flags);
return size;
}
@@ -2125,7 +2083,6 @@ static int omap_sham_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dd);
INIT_LIST_HEAD(&dd->list);
- spin_lock_init(&dd->lock);
tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
@@ -2190,6 +2147,16 @@ static int omap_sham_probe(struct platform_device *pdev)
list_add_tail(&dd->list, &sham.dev_list);
spin_unlock(&sham.lock);
+ dd->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dd->engine) {
+ err = -ENOMEM;
+ goto err_engine;
+ }
+
+ err = crypto_engine_start(dd->engine);
+ if (err)
+ goto err_engine_start;
+
for (i = 0; i < dd->pdata->algs_info_size; i++) {
if (dd->pdata->algs_info[i].registered)
break;
@@ -2223,6 +2190,12 @@ err_algs:
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
crypto_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
+err_engine_start:
+ crypto_engine_exit(dd->engine);
+err_engine:
+ spin_lock(&sham.lock);
+ list_del(&dd->list);
+ spin_unlock(&sham.lock);
err_pm:
pm_runtime_disable(dev);
if (!dd->polling_mode)
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 62c6fe88b212..1be549a07a21 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -18,6 +18,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/slab.h>
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index dac6eb37fff9..fb34bf92861d 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1685,11 +1685,6 @@ static int spacc_probe(struct platform_device *pdev)
goto err_clk_put;
}
- ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (ret)
- goto err_clk_disable;
-
-
/*
* Use an IRQ threshold of 50% as a default. This seems to be a
* reasonable trade off of latency against throughput but can be
@@ -1697,6 +1692,10 @@ static int spacc_probe(struct platform_device *pdev)
*/
engine->stat_irq_thresh = (engine->fifo_sz / 2);
+ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (ret)
+ goto err_clk_disable;
+
/*
* Configure the interrupts. We only use the STAT_CNT interrupt as we
* only submit a new packet for processing when we complete another in
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index 020d099409e5..ed0e8e33fe4b 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_c3xxx_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_C3XXX_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_C3XXX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
adf_clean_hw_data_c3xxx(accel_dev->hw_device);
break;
default:
@@ -83,7 +80,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_C3XXX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -143,10 +140,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
@@ -203,7 +198,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- if (adf_enable_aer(accel_dev, &adf_driver)) {
+ if (adf_enable_aer(accel_dev)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
goto out_err_free_reg;
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 11039fe55f61..456979b136a2 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_c3xxxvf_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_C3XXXIOV_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_C3XXXIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
adf_clean_hw_data_c3xxxiov(accel_dev->hw_device);
break;
default:
@@ -85,7 +82,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_C3XXXIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -127,10 +124,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 4ba9c14383af..d8e7c9c25590 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_c62x_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_C62X_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_C62X_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
adf_clean_hw_data_c62x(accel_dev->hw_device);
break;
default:
@@ -83,7 +80,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_C62X_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -143,10 +140,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
@@ -203,7 +198,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- if (adf_enable_aer(accel_dev, &adf_driver)) {
+ if (adf_enable_aer(accel_dev)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
goto out_err_free_reg;
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index b8b021d54bb5..b9810f79eb84 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_c62xvf_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_C62XIOV_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_C62X_VF), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_C62XIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
adf_clean_hw_data_c62xiov(accel_dev->hw_device);
break;
default:
@@ -85,7 +82,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_C62XIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X_VF:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -127,10 +124,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index c1db8c26afb6..06952ece53d9 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -15,12 +15,6 @@
#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
#define ADF_C3XXX_DEVICE_NAME "c3xxx"
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
-#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
-#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
-#define ADF_C62X_PCI_DEVICE_ID 0x37c8
-#define ADF_C62XIOV_PCI_DEVICE_ID 0x37c9
-#define ADF_C3XXX_PCI_DEVICE_ID 0x19e2
-#define ADF_C3XXXIOV_PCI_DEVICE_ID 0x19e3
#define ADF_ERRSOU3 (0x3A000 + 0x0C)
#define ADF_ERRSOU5 (0x3A000 + 0xD8)
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index 32102e27e559..d2ae293d0df6 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -175,7 +175,6 @@ static const struct pci_error_handlers adf_err_handler = {
/**
* adf_enable_aer() - Enable Advance Error Reporting for acceleration device
* @accel_dev: Pointer to acceleration device.
- * @adf: PCI device driver owning the given acceleration device.
*
* Function enables PCI Advance Error Reporting for the
* QAT acceleration device accel_dev.
@@ -183,11 +182,12 @@ static const struct pci_error_handlers adf_err_handler = {
*
* Return: 0 on success, error code otherwise.
*/
-int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
+int adf_enable_aer(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ struct pci_driver *pdrv = pdev->driver;
- adf->err_handler = &adf_err_handler;
+ pdrv->err_handler = &adf_err_handler;
pci_enable_pcie_error_reporting(pdev);
return 0;
}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index ac462796cefc..22ae32838113 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -52,24 +52,7 @@ static const struct seq_operations qat_dev_cfg_sops = {
.show = qat_dev_cfg_show
};
-static int qat_dev_cfg_open(struct inode *inode, struct file *file)
-{
- int ret = seq_open(file, &qat_dev_cfg_sops);
-
- if (!ret) {
- struct seq_file *seq_f = file->private_data;
-
- seq_f->private = inode->i_private;
- }
- return ret;
-}
-
-static const struct file_operations qat_dev_cfg_fops = {
- .open = qat_dev_cfg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
+DEFINE_SEQ_ATTRIBUTE(qat_dev_cfg);
/**
* adf_cfg_dev_add() - Create an acceleration device configuration table.
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index ebfcb4ea618d..f22342f612c1 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -95,7 +95,7 @@ void adf_ae_fw_release(struct adf_accel_dev *accel_dev);
int adf_ae_start(struct adf_accel_dev *accel_dev);
int adf_ae_stop(struct adf_accel_dev *accel_dev);
-int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
+int adf_enable_aer(struct adf_accel_dev *accel_dev);
void adf_disable_aer(struct adf_accel_dev *accel_dev);
void adf_reset_sbr(struct adf_accel_dev *accel_dev);
void adf_reset_flr(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 71d0c44aacca..eb9b3be9d8eb 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -416,8 +416,6 @@ static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
static int __init adf_register_ctl_device_driver(void)
{
- mutex_init(&adf_ctl_lock);
-
if (adf_chr_drv_create())
goto err_chr_dev;
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 72753af056b3..92ec035576df 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -285,7 +285,7 @@ struct adf_accel_dev *adf_devmgr_get_first(void)
/**
* adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
- * @accel_dev: Pointer to pci device.
+ * @pci_dev: Pointer to pci device.
*
* Function returns acceleration device associated with the given pci device.
* To be used by QAT device specific drivers.
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 8827aa139f96..963b2bea78f2 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -173,10 +173,14 @@ EXPORT_SYMBOL_GPL(adf_disable_sriov);
/**
* adf_sriov_configure() - Enable SRIOV for the device
* @pdev: Pointer to pci device.
+ * @numvfs: Number of virtual functions (VFs) to enable.
+ *
+ * Note that the @numvfs parameter is ignored and all VFs supported by the
+ * device are enabled due to the design of the hardware.
*
* Function enables SRIOV for the pci device.
*
- * Return: 0 on success, error code otherwise.
+ * Return: number of VFs enabled on success, error code otherwise.
*/
int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index 2a2eccbf56ec..dac25ba47260 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -77,31 +77,14 @@ static void adf_ring_stop(struct seq_file *sfile, void *v)
mutex_unlock(&ring_read_lock);
}
-static const struct seq_operations adf_ring_sops = {
+static const struct seq_operations adf_ring_debug_sops = {
.start = adf_ring_start,
.next = adf_ring_next,
.stop = adf_ring_stop,
.show = adf_ring_show
};
-static int adf_ring_open(struct inode *inode, struct file *file)
-{
- int ret = seq_open(file, &adf_ring_sops);
-
- if (!ret) {
- struct seq_file *seq_f = file->private_data;
-
- seq_f->private = inode->i_private;
- }
- return ret;
-}
-
-static const struct file_operations adf_ring_debug_fops = {
- .open = adf_ring_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
+DEFINE_SEQ_ATTRIBUTE(adf_ring_debug);
int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
{
@@ -188,31 +171,14 @@ static void adf_bank_stop(struct seq_file *sfile, void *v)
mutex_unlock(&bank_read_lock);
}
-static const struct seq_operations adf_bank_sops = {
+static const struct seq_operations adf_bank_debug_sops = {
.start = adf_bank_start,
.next = adf_bank_next,
.stop = adf_bank_stop,
.show = adf_bank_show
};
-static int adf_bank_open(struct inode *inode, struct file *file)
-{
- int ret = seq_open(file, &adf_bank_sops);
-
- if (!ret) {
- struct seq_file *seq_f = file->private_data;
-
- seq_f->private = inode->i_private;
- }
- return ret;
-}
-
-static const struct file_operations adf_bank_debug_fops = {
- .open = adf_bank_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
+DEFINE_SEQ_ATTRIBUTE(adf_bank_debug);
int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
{
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 72753b84dc95..d552dbcfe0a0 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -828,6 +828,11 @@ static int qat_alg_aead_dec(struct aead_request *areq)
struct icp_qat_fw_la_bulk_req *msg;
int digst_size = crypto_aead_authsize(aead_tfm);
int ret, ctr = 0;
+ u32 cipher_len;
+
+ cipher_len = areq->cryptlen - digst_size;
+ if (cipher_len % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
@@ -842,7 +847,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- cipher_param->cipher_length = areq->cryptlen - digst_size;
+ cipher_param->cipher_length = cipher_len;
cipher_param->cipher_offset = areq->assoclen;
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
@@ -871,6 +876,9 @@ static int qat_alg_aead_enc(struct aead_request *areq)
u8 *iv = areq->iv;
int ret, ctr = 0;
+ if (areq->cryptlen % AES_BLOCK_SIZE != 0)
+ return -EINVAL;
+
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
if (unlikely(ret))
return ret;
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index fa467e0f8285..6b9d47682d04 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/pci_ids.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
@@ -412,7 +413,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
unsigned int csr_val;
int times = 30;
- if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
+ if (handle->pci_dev->device != PCI_DEVICE_ID_INTEL_QAT_DH895XCC)
return 0;
csr_val = ADF_CSR_RD(csr_addr, 0);
@@ -672,13 +673,13 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
(void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
LOCAL_TO_XFER_REG_OFFSET);
handle->pci_dev = pci_info->pci_dev;
- if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_DH895XCC) {
sram_bar =
&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
handle->hal_sram_addr_v = sram_bar->virt_addr;
}
handle->fw_auth = (handle->pci_dev->device ==
- ADF_DH895XCC_PCI_DEVICE_ID) ? false : true;
+ PCI_DEVICE_ID_INTEL_QAT_DH895XCC) ? false : true;
handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
if (!handle->hal_handle)
goto out_hal_handle;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 00c615f9f9a8..5d1f28cd6680 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -4,6 +4,7 @@
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/pci_ids.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
@@ -711,11 +712,11 @@ static unsigned int
qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
{
switch (handle->pci_dev->device) {
- case ADF_DH895XCC_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
return ICP_QAT_AC_895XCC_DEV_TYPE;
- case ADF_C62X_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C62X:
return ICP_QAT_AC_C62X_DEV_TYPE;
- case ADF_C3XXX_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
return ICP_QAT_AC_C3XXX_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
@@ -1391,7 +1392,7 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
status = qat_uclo_auth_fw(handle, desc);
qat_uclo_ummap_auth_fw(handle, &desc);
} else {
- if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_C3XXX) {
pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
return -EINVAL;
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 4e877b75822b..ecb4f6f20e22 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_dh895xcc_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_DH895XCC_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
break;
default:
@@ -83,7 +80,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_DH895XCC_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -143,10 +140,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
@@ -205,7 +200,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- if (adf_enable_aer(accel_dev, &adf_driver)) {
+ if (adf_enable_aer(accel_dev)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
goto out_err_free_reg;
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 7d6e1db272c2..404cf9df6922 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -18,12 +18,9 @@
#include <adf_cfg.h>
#include "adf_dh895xccvf_hw_data.h"
-#define ADF_SYSTEM_DEVICE(device_id) \
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
-
static const struct pci_device_id adf_pci_tbl[] = {
- ADF_SYSTEM_DEVICE(ADF_DH895XCCIOV_PCI_DEVICE_ID),
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF), },
+ { }
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
@@ -58,7 +55,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
- case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
break;
default:
@@ -85,7 +82,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
switch (ent->device) {
- case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+ case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
@@ -127,10 +124,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create dev top level debugfs entry */
- snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
- ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
+ hw_data->dev_class->name, pci_name(pdev));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index cb6d61eb7302..ea616b7259ae 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index c230843e2ffb..87be96a0b0bb 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -4,6 +4,7 @@
*/
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <crypto/internal/hash.h>
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 5630c5addd28..a2d3da0ad95f 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -4,6 +4,7 @@
*/
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 4730f84b646d..99ba8d51d102 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -7,6 +7,7 @@
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/crypto.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index f385587f99af..35d73061d156 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -10,6 +10,7 @@
*/
#include "rk3288_crypto.h"
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index 2b49c677afdb..3db595570c9c 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -7,6 +7,7 @@
#include <crypto/algapi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 6b7ecbec092e..81befe7febaa 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -8,6 +8,7 @@
*
* Some ideas are from marvell/cesa.c and s5p-sss.c driver.
*/
+#include <linux/device.h>
#include "rk3288_crypto.h"
/*
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 4a75c8e1fa6c..1cece1a7d3f0 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -8,6 +8,7 @@
*
* Some ideas are from marvell-cesa.c and s5p-sss.c driver.
*/
+#include <linux/device.h>
#include "rk3288_crypto.h"
#define RK_CRYPTO_DEC BIT(0)
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 341433fbcc4a..88a6c853ffd7 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -260,6 +260,7 @@ struct s5p_aes_ctx {
* struct s5p_aes_dev - Crypto device state container
* @dev: Associated device
* @clk: Clock for accessing hardware
+ * @pclk: APB bus clock necessary to access the hardware
* @ioaddr: Mapped IO memory region
* @aes_ioaddr: Per-varian offset for AES block IO memory
* @irq_fc: Feed control interrupt line
@@ -342,13 +343,13 @@ struct s5p_aes_dev {
* @engine: Bits for selecting type of HASH in SSS block
* @sg: sg for DMA transfer
* @sg_len: Length of sg for DMA transfer
- * @sgl[]: sg for joining buffer and req->src scatterlist
+ * @sgl: sg for joining buffer and req->src scatterlist
* @skip: Skip offset in req->src for current op
* @total: Total number of bytes for current request
* @finup: Keep state for finup or final.
* @error: Keep track of error.
* @bufcnt: Number of bytes holded in buffer[]
- * @buffer[]: For byte(s) from end of req->src in UPDATE op
+ * @buffer: For byte(s) from end of req->src in UPDATE op
*/
struct s5p_hash_reqctx {
struct s5p_aes_dev *dd;
@@ -1125,7 +1126,7 @@ static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
* s5p_hash_prepare_sgs() - prepare sg for processing
* @ctx: request context
* @sg: source scatterlist request
- * @nbytes: number of bytes to process from sg
+ * @new_len: number of bytes to process from sg
* @final: final flag
*
* Check two conditions: (1) if buffers in sg have len aligned data, and (2)
@@ -2200,11 +2201,10 @@ static int s5p_aes_probe(struct platform_device *pdev)
}
pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
- if (IS_ERR(pdata->clk)) {
- dev_err(dev, "failed to find secss clock %s\n",
- variant->clk_names[0]);
- return -ENOENT;
- }
+ if (IS_ERR(pdata->clk))
+ return dev_err_probe(dev, PTR_ERR(pdata->clk),
+ "failed to find secss clock %s\n",
+ variant->clk_names[0]);
err = clk_prepare_enable(pdata->clk);
if (err < 0) {
@@ -2216,9 +2216,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
if (variant->clk_names[1]) {
pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
if (IS_ERR(pdata->pclk)) {
- dev_err(dev, "failed to find clock %s\n",
- variant->clk_names[1]);
- err = -ENOENT;
+ err = dev_err_probe(dev, PTR_ERR(pdata->pclk),
+ "failed to find clock %s\n",
+ variant->clk_names[1]);
goto err_clk;
}
@@ -2307,8 +2307,7 @@ err_algs:
tasklet_kill(&pdata->tasklet);
err_irq:
- if (pdata->pclk)
- clk_disable_unprepare(pdata->pclk);
+ clk_disable_unprepare(pdata->pclk);
err_clk:
clk_disable_unprepare(pdata->clk);
@@ -2338,8 +2337,7 @@ static int s5p_aes_remove(struct platform_device *pdev)
pdata->use_hash = false;
}
- if (pdata->pclk)
- clk_disable_unprepare(pdata->pclk);
+ clk_disable_unprepare(pdata->pclk);
clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 5bc099052bd2..eda93fab95fe 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -143,33 +143,38 @@ struct sa_alg_tmpl {
};
/**
+ * struct sa_mapped_sg: scatterlist information for tx and rx
+ * @mapped: Set to true if the @sgt is mapped
+ * @dir: mapping direction used for @sgt
+ * @split_sg: Set if the sg is split and needs to be freed up
+ * @static_sg: Static scatterlist entry for overriding data
+ * @sgt: scatterlist table for DMA API use
+ */
+struct sa_mapped_sg {
+ bool mapped;
+ enum dma_data_direction dir;
+ struct scatterlist static_sg;
+ struct scatterlist *split_sg;
+ struct sg_table sgt;
+};
+/**
* struct sa_rx_data: RX Packet miscellaneous data place holder
* @req: crypto request data pointer
* @ddev: pointer to the DMA device
* @tx_in: dma_async_tx_descriptor pointer for rx channel
- * @split_src_sg: Set if the src sg is split and needs to be freed up
- * @split_dst_sg: Set if the dst sg is split and needs to be freed up
+ * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
* @enc: Flag indicating either encryption or decryption
* @enc_iv_size: Initialisation vector size
* @iv_idx: Initialisation vector index
- * @rx_sg: Static scatterlist entry for overriding RX data
- * @tx_sg: Static scatterlist entry for overriding TX data
- * @src: Source data pointer
- * @dst: Destination data pointer
*/
struct sa_rx_data {
void *req;
struct device *ddev;
struct dma_async_tx_descriptor *tx_in;
- struct scatterlist *split_src_sg;
- struct scatterlist *split_dst_sg;
+ struct sa_mapped_sg mapped_sg[2];
u8 enc;
u8 enc_iv_size;
u8 iv_idx;
- struct scatterlist rx_sg;
- struct scatterlist tx_sg;
- struct scatterlist *src;
- struct scatterlist *dst;
};
/**
@@ -976,23 +981,46 @@ static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
return sa_cipher_setkey(tfm, key, keylen, &ad);
}
+static void sa_sync_from_device(struct sa_rx_data *rxd)
+{
+ struct sg_table *sgt;
+
+ if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
+ sgt = &rxd->mapped_sg[0].sgt;
+ else
+ sgt = &rxd->mapped_sg[1].sgt;
+
+ dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
+}
+
+static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
+ struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
+
+ if (mapped_sg->mapped) {
+ dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
+ mapped_sg->dir, 0);
+ kfree(mapped_sg->split_sg);
+ }
+ }
+
+ kfree(rxd);
+}
+
static void sa_aes_dma_in_callback(void *data)
{
struct sa_rx_data *rxd = (struct sa_rx_data *)data;
struct skcipher_request *req;
- int sglen;
u32 *result;
__be32 *mdptr;
size_t ml, pl;
int i;
- enum dma_data_direction dir_src;
- bool diff_dst;
+ sa_sync_from_device(rxd);
req = container_of(rxd->req, struct skcipher_request, base);
- sglen = sg_nents_for_len(req->src, req->cryptlen);
-
- diff_dst = (req->src != req->dst) ? true : false;
- dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
if (req->iv) {
mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
@@ -1003,18 +1031,7 @@ static void sa_aes_dma_in_callback(void *data)
result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
}
- dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src);
- kfree(rxd->split_src_sg);
-
- if (diff_dst) {
- sglen = sg_nents_for_len(req->dst, req->cryptlen);
-
- dma_unmap_sg(rxd->ddev, req->dst, sglen,
- DMA_FROM_DEVICE);
- kfree(rxd->split_dst_sg);
- }
-
- kfree(rxd);
+ sa_free_sa_rx_data(rxd);
skcipher_request_complete(req, 0);
}
@@ -1043,7 +1060,6 @@ static int sa_run(struct sa_req *req)
struct device *ddev;
struct dma_chan *dma_rx;
int sg_nents, src_nents, dst_nents;
- int mapped_src_nents, mapped_dst_nents;
struct scatterlist *src, *dst;
size_t pl, ml, split_size;
struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
@@ -1052,6 +1068,7 @@ static int sa_run(struct sa_req *req)
u32 *mdptr;
bool diff_dst;
enum dma_data_direction dir_src;
+ struct sa_mapped_sg *mapped_sg;
gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
@@ -1082,6 +1099,7 @@ static int sa_run(struct sa_req *req)
dma_rx = pdata->dma_rx1;
ddev = dma_rx->device->dev;
+ rxd->ddev = ddev;
memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
@@ -1109,60 +1127,90 @@ static int sa_run(struct sa_req *req)
split_size = req->size;
+ mapped_sg = &rxd->mapped_sg[0];
if (sg_nents == 1 && split_size <= req->src->length) {
- src = &rxd->rx_sg;
+ src = &mapped_sg->static_sg;
+ src_nents = 1;
sg_init_table(src, 1);
sg_set_page(src, sg_page(req->src), split_size,
req->src->offset);
- src_nents = 1;
- dma_map_sg(ddev, src, sg_nents, dir_src);
+
+ mapped_sg->sgt.sgl = src;
+ mapped_sg->sgt.orig_nents = src_nents;
+ ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
+ if (ret)
+ return ret;
+
+ mapped_sg->dir = dir_src;
+ mapped_sg->mapped = true;
} else {
- mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents,
- dir_src);
- ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size,
- &src, &src_nents, gfp_flags);
+ mapped_sg->sgt.sgl = req->src;
+ mapped_sg->sgt.orig_nents = sg_nents;
+ ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
+ if (ret)
+ return ret;
+
+ mapped_sg->dir = dir_src;
+ mapped_sg->mapped = true;
+
+ ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
+ &split_size, &src, &src_nents, gfp_flags);
if (ret) {
- src_nents = sg_nents;
- src = req->src;
+ src_nents = mapped_sg->sgt.nents;
+ src = mapped_sg->sgt.sgl;
} else {
- rxd->split_src_sg = src;
+ mapped_sg->split_sg = src;
}
}
+ dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
+
if (!diff_dst) {
dst_nents = src_nents;
dst = src;
} else {
dst_nents = sg_nents_for_len(req->dst, req->size);
+ mapped_sg = &rxd->mapped_sg[1];
if (dst_nents == 1 && split_size <= req->dst->length) {
- dst = &rxd->tx_sg;
+ dst = &mapped_sg->static_sg;
+ dst_nents = 1;
sg_init_table(dst, 1);
sg_set_page(dst, sg_page(req->dst), split_size,
req->dst->offset);
- dst_nents = 1;
- dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE);
+
+ mapped_sg->sgt.sgl = dst;
+ mapped_sg->sgt.orig_nents = dst_nents;
+ ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
+ DMA_FROM_DEVICE, 0);
+ if (ret)
+ goto err_cleanup;
+
+ mapped_sg->dir = DMA_FROM_DEVICE;
+ mapped_sg->mapped = true;
} else {
- mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
- &split_size, &dst, &dst_nents,
+ mapped_sg->sgt.sgl = req->dst;
+ mapped_sg->sgt.orig_nents = dst_nents;
+ ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
+ DMA_FROM_DEVICE, 0);
+ if (ret)
+ goto err_cleanup;
+
+ mapped_sg->dir = DMA_FROM_DEVICE;
+ mapped_sg->mapped = true;
+
+ ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
+ 0, 1, &split_size, &dst, &dst_nents,
gfp_flags);
if (ret) {
- dst_nents = dst_nents;
- dst = req->dst;
+ dst_nents = mapped_sg->sgt.nents;
+ dst = mapped_sg->sgt.sgl;
} else {
- rxd->split_dst_sg = dst;
+ mapped_sg->split_sg = dst;
}
}
}
- if (unlikely(src_nents != sg_nents)) {
- dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
- ret = -EIO;
- goto err_cleanup;
- }
-
rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -1174,9 +1222,6 @@ static int sa_run(struct sa_req *req)
rxd->req = (void *)req->base;
rxd->enc = req->enc;
- rxd->ddev = ddev;
- rxd->src = src;
- rxd->dst = dst;
rxd->iv_idx = req->ctx->iv_idx;
rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
rxd->tx_in->callback = req->callback;
@@ -1214,16 +1259,7 @@ static int sa_run(struct sa_req *req)
return -EINPROGRESS;
err_cleanup:
- dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE);
- kfree(rxd->split_src_sg);
-
- if (req->src != req->dst) {
- dst_nents = sg_nents_for_len(req->dst, req->size);
- dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE);
- kfree(rxd->split_dst_sg);
- }
-
- kfree(rxd);
+ sa_free_sa_rx_data(rxd);
return ret;
}
@@ -1293,11 +1329,12 @@ static void sa_sha_dma_in_callback(void *data)
struct ahash_request *req;
struct crypto_ahash *tfm;
unsigned int authsize;
- int i, sg_nents;
+ int i;
size_t ml, pl;
u32 *result;
__be32 *mdptr;
+ sa_sync_from_device(rxd);
req = container_of(rxd->req, struct ahash_request, base);
tfm = crypto_ahash_reqtfm(req);
authsize = crypto_ahash_digestsize(tfm);
@@ -1308,12 +1345,7 @@ static void sa_sha_dma_in_callback(void *data)
for (i = 0; i < (authsize / 4); i++)
result[i] = be32_to_cpu(mdptr[i + 4]);
- sg_nents = sg_nents_for_len(req->src, req->nbytes);
- dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE);
-
- kfree(rxd->split_src_sg);
-
- kfree(rxd);
+ sa_free_sa_rx_data(rxd);
ahash_request_complete(req, 0);
}
@@ -1482,8 +1514,8 @@ static int sa_sha_init(struct ahash_request *req)
struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
- dev_dbg(sa_k3_dev, "init: digest size: %d, rctx=%llx\n",
- crypto_ahash_digestsize(tfm), (u64)rctx);
+ dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
+ crypto_ahash_digestsize(tfm), rctx);
ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
rctx->fallback_req.base.flags =
@@ -1637,43 +1669,28 @@ static void sa_aead_dma_in_callback(void *data)
unsigned int authsize;
u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
size_t pl, ml;
- int i, sglen;
+ int i;
int err = 0;
u16 auth_len;
u32 *mdptr;
- bool diff_dst;
- enum dma_data_direction dir_src;
+ sa_sync_from_device(rxd);
req = container_of(rxd->req, struct aead_request, base);
tfm = crypto_aead_reqtfm(req);
start = req->assoclen + req->cryptlen;
authsize = crypto_aead_authsize(tfm);
- diff_dst = (req->src != req->dst) ? true : false;
- dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
-
mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
for (i = 0; i < (authsize / 4); i++)
mdptr[i + 4] = swab32(mdptr[i + 4]);
auth_len = req->assoclen + req->cryptlen;
- if (!rxd->enc)
- auth_len -= authsize;
-
- sglen = sg_nents_for_len(rxd->src, auth_len);
- dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src);
- kfree(rxd->split_src_sg);
-
- if (diff_dst) {
- sglen = sg_nents_for_len(rxd->dst, auth_len);
- dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE);
- kfree(rxd->split_dst_sg);
- }
if (rxd->enc) {
scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1);
} else {
+ auth_len -= authsize;
start -= authsize;
scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
0);
@@ -1681,7 +1698,7 @@ static void sa_aead_dma_in_callback(void *data)
err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
}
- kfree(rxd);
+ sa_free_sa_rx_data(rxd);
aead_request_complete(req, err);
}
@@ -2243,25 +2260,21 @@ static int sa_dma_init(struct sa_crypto_data *dd)
return ret;
dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
- if (IS_ERR(dd->dma_rx1)) {
- if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER)
- dev_err(dd->dev, "Unable to request rx1 DMA channel\n");
- return PTR_ERR(dd->dma_rx1);
- }
+ if (IS_ERR(dd->dma_rx1))
+ return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
+ "Unable to request rx1 DMA channel\n");
dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
if (IS_ERR(dd->dma_rx2)) {
dma_release_channel(dd->dma_rx1);
- if (PTR_ERR(dd->dma_rx2) != -EPROBE_DEFER)
- dev_err(dd->dev, "Unable to request rx2 DMA channel\n");
- return PTR_ERR(dd->dma_rx2);
+ return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
+ "Unable to request rx2 DMA channel\n");
}
dd->dma_tx = dma_request_chan(dd->dev, "tx");
if (IS_ERR(dd->dma_tx)) {
- if (PTR_ERR(dd->dma_tx) != -EPROBE_DEFER)
- dev_err(dd->dev, "Unable to request tx DMA channel\n");
- ret = PTR_ERR(dd->dma_tx);
+ ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
+ "Unable to request tx DMA channel\n");
goto err_dma_tx;
}
@@ -2333,7 +2346,7 @@ static int sa_ul_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
ret);
return ret;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0c8cb23ae708..d60679c79822 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -18,7 +18,7 @@
#include <crypto/sha.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 4ef3eb11361c..4a4c3284ae1f 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -3,6 +3,7 @@ config CRYPTO_DEV_STM32_CRC
tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
+ select CRC32
help
This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 3ba41148c2a4..75867c0b0017 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -6,7 +6,10 @@
#include <linux/bitrev.h>
#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/crc32poly.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
@@ -147,7 +150,6 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct stm32_crc *crc;
- unsigned long flags;
crc = stm32_crc_get_next_crc();
if (!crc)
@@ -155,7 +157,15 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
pm_runtime_get_sync(crc->dev);
- spin_lock_irqsave(&crc->lock, flags);
+ if (!spin_trylock(&crc->lock)) {
+ /* Hardware is busy, calculate crc32 by software */
+ if (mctx->poly == CRC32_POLY_LE)
+ ctx->partial = crc32_le(ctx->partial, d8, length);
+ else
+ ctx->partial = __crc32c_le(ctx->partial, d8, length);
+
+ goto pm_out;
+ }
/*
* Restore previously calculated CRC for this context as init value
@@ -195,8 +205,9 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
/* Store partial result */
ctx->partial = readl_relaxed(crc->regs + CRC_DR);
- spin_unlock_irqrestore(&crc->lock, flags);
+ spin_unlock(&crc->lock);
+pm_out:
pm_runtime_mark_last_busy(crc->dev);
pm_runtime_put_autosuspend(crc->dev);
@@ -216,9 +227,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
return burst_update(desc, d8, length);
/* Digest first bytes not 32bit aligned at first pass in the loop */
- size = min(length,
- burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8,
- sizeof(u32)));
+ size = min_t(size_t, length, burst_sz + (size_t)d8 -
+ ALIGN_DOWN((size_t)d8, sizeof(u32)));
for (rem_sz = length, cur = d8; rem_sz;
rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
ret = burst_update(desc, cur, size);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index d347a1d6e351..2670c30332fa 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -118,7 +118,7 @@ struct stm32_cryp_ctx {
struct crypto_engine_ctx enginectx;
struct stm32_cryp *cryp;
int keylen;
- u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ __be32 key[AES_KEYSIZE_256 / sizeof(u32)];
unsigned long flags;
};
@@ -380,24 +380,24 @@ static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
return 0;
}
-static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
+static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, __be32 *iv)
{
if (!iv)
return;
- stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++));
- stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV0LR, be32_to_cpu(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV0RR, be32_to_cpu(*iv++));
if (is_aes(cryp)) {
- stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++));
- stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV1LR, be32_to_cpu(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV1RR, be32_to_cpu(*iv++));
}
}
static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
{
struct skcipher_request *req = cryp->req;
- u32 *tmp = (void *)req->iv;
+ __be32 *tmp = (void *)req->iv;
if (!tmp)
return;
@@ -417,13 +417,13 @@ static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
int r_id;
if (is_des(c)) {
- stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0]));
- stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1]));
+ stm32_cryp_write(c, CRYP_K1LR, be32_to_cpu(c->ctx->key[0]));
+ stm32_cryp_write(c, CRYP_K1RR, be32_to_cpu(c->ctx->key[1]));
} else {
r_id = CRYP_K3RR;
for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4)
stm32_cryp_write(c, r_id,
- cpu_to_be32(c->ctx->key[i - 1]));
+ be32_to_cpu(c->ctx->key[i - 1]));
}
}
@@ -469,7 +469,7 @@ static unsigned int stm32_cryp_get_input_text_len(struct stm32_cryp *cryp)
static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg)
{
int ret;
- u32 iv[4];
+ __be32 iv[4];
/* Phase 1 : init */
memcpy(iv, cryp->areq->iv, 12);
@@ -491,6 +491,7 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
{
int ret;
u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE];
+ __be32 *bd;
u32 *d;
unsigned int i, textlen;
@@ -498,7 +499,7 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
iv[AES_BLOCK_SIZE - 1] = 1;
- stm32_cryp_hw_write_iv(cryp, (u32 *)iv);
+ stm32_cryp_hw_write_iv(cryp, (__be32 *)iv);
/* Build B0 */
memcpy(b0, iv, AES_BLOCK_SIZE);
@@ -518,11 +519,14 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
/* Write B0 */
d = (u32 *)b0;
+ bd = (__be32 *)b0;
for (i = 0; i < AES_BLOCK_32; i++) {
+ u32 xd = d[i];
+
if (!cryp->caps->padding_wa)
- *d = cpu_to_be32(*d);
- stm32_cryp_write(cryp, CRYP_DIN, *d++);
+ xd = be32_to_cpu(bd[i]);
+ stm32_cryp_write(cryp, CRYP_DIN, xd);
}
/* Wait for end of processing */
@@ -617,7 +621,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
case CR_TDES_CBC:
case CR_AES_CBC:
case CR_AES_CTR:
- stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->iv);
+ stm32_cryp_hw_write_iv(cryp, (__be32 *)cryp->req->iv);
break;
default:
@@ -1120,7 +1124,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
/* GCM: write aad and payload size (in bits) */
size_bit = cryp->areq->assoclen * 8;
if (cryp->caps->swap_final)
- size_bit = cpu_to_be32(size_bit);
+ size_bit = (__force u32)cpu_to_be32(size_bit);
stm32_cryp_write(cryp, CRYP_DIN, 0);
stm32_cryp_write(cryp, CRYP_DIN, size_bit);
@@ -1129,7 +1133,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
cryp->areq->cryptlen - AES_BLOCK_SIZE;
size_bit *= 8;
if (cryp->caps->swap_final)
- size_bit = cpu_to_be32(size_bit);
+ size_bit = (__force u32)cpu_to_be32(size_bit);
stm32_cryp_write(cryp, CRYP_DIN, 0);
stm32_cryp_write(cryp, CRYP_DIN, size_bit);
@@ -1137,14 +1141,19 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
/* CCM: write CTR0 */
u8 iv[AES_BLOCK_SIZE];
u32 *iv32 = (u32 *)iv;
+ __be32 *biv;
+
+ biv = (void *)iv;
memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
for (i = 0; i < AES_BLOCK_32; i++) {
+ u32 xiv = iv32[i];
+
if (!cryp->caps->padding_wa)
- *iv32 = cpu_to_be32(*iv32);
- stm32_cryp_write(cryp, CRYP_DIN, *iv32++);
+ xiv = be32_to_cpu(biv[i]);
+ stm32_cryp_write(cryp, CRYP_DIN, xiv);
}
}
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 03c5e6683805..e3e25278a970 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -748,7 +749,7 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
static void stm32_hash_copy_hash(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- u32 *hash = (u32 *)rctx->digest;
+ __be32 *hash = (void *)rctx->digest;
unsigned int i, hashsize;
switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
@@ -769,7 +770,7 @@ static void stm32_hash_copy_hash(struct ahash_request *req)
}
for (i = 0; i < hashsize / sizeof(u32); i++)
- hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
+ hash[i] = cpu_to_be32(stm32_hash_read(rctx->hdev,
HASH_HREG(i)));
}
@@ -1463,14 +1464,9 @@ static int stm32_hash_probe(struct platform_device *pdev)
}
hdev->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(hdev->clk)) {
- if (PTR_ERR(hdev->clk) != -EPROBE_DEFER) {
- dev_err(dev, "failed to get clock for hash (%lu)\n",
- PTR_ERR(hdev->clk));
- }
-
- return PTR_ERR(hdev->clk);
- }
+ if (IS_ERR(hdev->clk))
+ return dev_err_probe(dev, PTR_ERR(hdev->clk),
+ "failed to get clock for hash\n");
ret = clk_prepare_enable(hdev->clk);
if (ret) {
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 7c547352a862..66773892f665 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -806,10 +806,10 @@ static int talitos_register_rng(struct device *dev)
struct talitos_private *priv = dev_get_drvdata(dev);
int err;
- priv->rng.name = dev_driver_string(dev),
- priv->rng.init = talitos_rng_init,
- priv->rng.data_present = talitos_rng_data_present,
- priv->rng.data_read = talitos_rng_data_read,
+ priv->rng.name = dev_driver_string(dev);
+ priv->rng.init = talitos_rng_init;
+ priv->rng.data_present = talitos_rng_data_present;
+ priv->rng.data_read = talitos_rng_data_read;
priv->rng.priv = (unsigned long)dev;
err = hwrng_register(&priv->rng);
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 800dfc4d16c4..c3adeb2e5823 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -11,13 +11,15 @@
#include <linux/clk.h>
#include <linux/completion.h>
-#include <linux/crypto.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irqreturn.h>
+#include <linux/kernel.h>
#include <linux/klist.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -27,7 +29,6 @@
#include <linux/platform_data/dma-ste-dma40.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
@@ -92,17 +93,6 @@ struct cryp_ctx {
static struct cryp_driver_data driver_data;
/**
- * uint8p_to_uint32_be - 4*uint8 to uint32 big endian
- * @in: Data to convert.
- */
-static inline u32 uint8p_to_uint32_be(u8 *in)
-{
- u32 *data = (u32 *)in;
-
- return cpu_to_be32p(data);
-}
-
-/**
* swap_bits_in_byte - mirror the bits in a byte
* @b: the byte to be mirrored
*
@@ -284,6 +274,7 @@ static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
int i;
int status = 0;
int num_of_regs = ctx->blocksize / 8;
+ __be32 *civ = (__be32 *)ctx->iv;
u32 iv[AES_BLOCK_SIZE / 4];
dev_dbg(device_data->dev, "[%s]", __func__);
@@ -300,7 +291,7 @@ static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
}
for (i = 0; i < ctx->blocksize / 4; i++)
- iv[i] = uint8p_to_uint32_be(ctx->iv + i*4);
+ iv[i] = be32_to_cpup(civ + i);
for (i = 0; i < num_of_regs; i++) {
status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
@@ -339,23 +330,24 @@ static int cfg_keys(struct cryp_ctx *ctx)
int i;
int num_of_regs = ctx->keylen / 8;
u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
+ __be32 *ckey = (__be32 *)ctx->key;
int cryp_error = 0;
dev_dbg(ctx->device->dev, "[%s]", __func__);
if (mode_is_aes(ctx->config.algomode)) {
- swap_words_in_key_and_bits_in_byte((u8 *)ctx->key,
+ swap_words_in_key_and_bits_in_byte((u8 *)ckey,
(u8 *)swapped_key,
ctx->keylen);
} else {
for (i = 0; i < ctx->keylen / 4; i++)
- swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4);
+ swapped_key[i] = be32_to_cpup(ckey + i);
}
for (i = 0; i < num_of_regs; i++) {
cryp_error = set_key(ctx->device,
- *(((u32 *)swapped_key)+i*2),
- *(((u32 *)swapped_key)+i*2+1),
+ swapped_key[i * 2],
+ swapped_key[i * 2 + 1],
(enum cryp_key_reg_index) i);
if (cryp_error != 0) {
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index a5ee8c2fb4e0..3d407eebb2ba 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -1071,27 +1072,32 @@ int hash_hw_update(struct ahash_request *req)
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_hash_walk walk;
- int msg_length = crypto_hash_walk_first(req, &walk);
-
- /* Empty message ("") is correct indata */
- if (msg_length == 0)
- return ret;
+ int msg_length;
index = req_ctx->state.index;
buffer = (u8 *)req_ctx->state.buffer;
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ msg_length = crypto_hash_walk_first(req, &walk);
+
+ /* Empty message ("") is correct indata */
+ if (msg_length == 0) {
+ ret = 0;
+ goto release_dev;
+ }
+
/* Check if ctx->state.length + msg_length
overflows */
if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
- return -EPERM;
+ ret = crypto_hash_walk_done(&walk, -EPERM);
+ goto release_dev;
}
- ret = hash_get_device_data(ctx, &device_data);
- if (ret)
- return ret;
-
/* Main loop */
while (0 != msg_length) {
data_buffer = walk.data;
@@ -1101,7 +1107,8 @@ int hash_hw_update(struct ahash_request *req)
if (ret) {
dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
__func__);
- goto out;
+ crypto_hash_walk_done(&walk, ret);
+ goto release_dev;
}
msg_length = crypto_hash_walk_done(&walk, 0);
@@ -1111,7 +1118,7 @@ int hash_hw_update(struct ahash_request *req)
dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
__func__, req_ctx->state.index, req_ctx->state.bit_index);
-out:
+release_dev:
release_hash_device(device_data);
return ret;
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
index fb294174e408..b894e3a8be4f 100644
--- a/drivers/crypto/virtio/Kconfig
+++ b/drivers/crypto/virtio/Kconfig
@@ -5,7 +5,6 @@ config CRYPTO_DEV_VIRTIO
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
- default m
help
This driver provides support for virtio crypto device. If you
choose 'M' here, this module will be called virtio_crypto.
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 27079354dbe9..bf1f421e05f2 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -10,6 +10,7 @@
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 3b6c06f07326..567428e10b7b 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -35,6 +35,7 @@ config DEV_DAX_PMEM
config DEV_DAX_HMEM
tristate "HMEM DAX: direct access to 'specific purpose' memory"
depends on EFI_SOFT_RESERVE
+ select NUMA_KEEP_MEMINFO if (NUMA && X86)
default DEV_DAX
help
EFI 2.8 platforms, and others, may advertise 'specific purpose'
@@ -48,6 +49,11 @@ config DEV_DAX_HMEM
Say M if unsure.
+config DEV_DAX_HMEM_DEVICES
+ depends on NUMA_KEEP_MEMINFO # for phys_to_target_node()
+ depends on DEV_DAX_HMEM && DAX=y
+ def_bool y
+
config DEV_DAX_KMEM
tristate "KMEM DAX: volatile-use of persistent memory"
default DEV_DAX
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
index 80065b38b3c4..9d4ba672d305 100644
--- a/drivers/dax/Makefile
+++ b/drivers/dax/Makefile
@@ -2,11 +2,10 @@
obj-$(CONFIG_DAX) += dax.o
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o
-obj-$(CONFIG_DEV_DAX_HMEM) += dax_hmem.o
dax-y := super.o
dax-y += bus.o
device_dax-y := device.o
-dax_hmem-y := hmem.o
obj-y += pmem/
+obj-y += hmem/
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index df238c8b6ef2..27513d311242 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -6,6 +6,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dax.h>
+#include <linux/io.h>
#include "dax-private.h"
#include "bus.h"
@@ -130,10 +131,63 @@ ATTRIBUTE_GROUPS(dax_drv);
static int dax_bus_match(struct device *dev, struct device_driver *drv);
+static bool is_static(struct dax_region *dax_region)
+{
+ return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0;
+}
+
+static u64 dev_dax_size(struct dev_dax *dev_dax)
+{
+ u64 size = 0;
+ int i;
+
+ device_lock_assert(&dev_dax->dev);
+
+ for (i = 0; i < dev_dax->nr_range; i++)
+ size += range_len(&dev_dax->ranges[i].range);
+
+ return size;
+}
+
+static int dax_bus_probe(struct device *dev)
+{
+ struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+ int rc;
+
+ if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0)
+ return -ENXIO;
+
+ rc = dax_drv->probe(dev_dax);
+
+ if (rc || is_static(dax_region))
+ return rc;
+
+ /*
+ * Track new seed creation only after successful probe of the
+ * previous seed.
+ */
+ if (dax_region->seed == dev)
+ dax_region->seed = NULL;
+
+ return 0;
+}
+
+static int dax_bus_remove(struct device *dev)
+{
+ struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+
+ return dax_drv->remove(dev_dax);
+}
+
static struct bus_type dax_bus_type = {
.name = "dax",
.uevent = dax_bus_uevent,
.match = dax_bus_match,
+ .probe = dax_bus_probe,
+ .remove = dax_bus_remove,
.drv_groups = dax_drv_groups,
};
@@ -176,18 +230,269 @@ static ssize_t region_size_show(struct device *dev,
static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
region_size_show, NULL);
-static ssize_t align_show(struct device *dev,
+static ssize_t region_align_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", dax_region->align);
}
-static DEVICE_ATTR_RO(align);
+static struct device_attribute dev_attr_region_align =
+ __ATTR(align, 0400, region_align_show, NULL);
+
+#define for_each_dax_region_resource(dax_region, res) \
+ for (res = (dax_region)->res.child; res; res = res->sibling)
+
+static unsigned long long dax_region_avail_size(struct dax_region *dax_region)
+{
+ resource_size_t size = resource_size(&dax_region->res);
+ struct resource *res;
+
+ device_lock_assert(dax_region->dev);
+
+ for_each_dax_region_resource(dax_region, res)
+ size -= resource_size(res);
+ return size;
+}
+
+static ssize_t available_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+ unsigned long long size;
+
+ device_lock(dev);
+ size = dax_region_avail_size(dax_region);
+ device_unlock(dev);
+
+ return sprintf(buf, "%llu\n", size);
+}
+static DEVICE_ATTR_RO(available_size);
+
+static ssize_t seed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+ struct device *seed;
+ ssize_t rc;
+
+ if (is_static(dax_region))
+ return -EINVAL;
+
+ device_lock(dev);
+ seed = dax_region->seed;
+ rc = sprintf(buf, "%s\n", seed ? dev_name(seed) : "");
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(seed);
+
+static ssize_t create_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+ struct device *youngest;
+ ssize_t rc;
+
+ if (is_static(dax_region))
+ return -EINVAL;
+
+ device_lock(dev);
+ youngest = dax_region->youngest;
+ rc = sprintf(buf, "%s\n", youngest ? dev_name(youngest) : "");
+ device_unlock(dev);
+
+ return rc;
+}
+
+static ssize_t create_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+ unsigned long long avail;
+ ssize_t rc;
+ int val;
+
+ if (is_static(dax_region))
+ return -EINVAL;
+
+ rc = kstrtoint(buf, 0, &val);
+ if (rc)
+ return rc;
+ if (val != 1)
+ return -EINVAL;
+
+ device_lock(dev);
+ avail = dax_region_avail_size(dax_region);
+ if (avail == 0)
+ rc = -ENOSPC;
+ else {
+ struct dev_dax_data data = {
+ .dax_region = dax_region,
+ .size = 0,
+ .id = -1,
+ };
+ struct dev_dax *dev_dax = devm_create_dev_dax(&data);
+
+ if (IS_ERR(dev_dax))
+ rc = PTR_ERR(dev_dax);
+ else {
+ /*
+ * In support of crafting multiple new devices
+ * simultaneously multiple seeds can be created,
+ * but only the first one that has not been
+ * successfully bound is tracked as the region
+ * seed.
+ */
+ if (!dax_region->seed)
+ dax_region->seed = &dev_dax->dev;
+ dax_region->youngest = &dev_dax->dev;
+ rc = len;
+ }
+ }
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(create);
+
+void kill_dev_dax(struct dev_dax *dev_dax)
+{
+ struct dax_device *dax_dev = dev_dax->dax_dev;
+ struct inode *inode = dax_inode(dax_dev);
+
+ kill_dax(dax_dev);
+ unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+}
+EXPORT_SYMBOL_GPL(kill_dev_dax);
+
+static void free_dev_dax_ranges(struct dev_dax *dev_dax)
+{
+ struct dax_region *dax_region = dev_dax->region;
+ int i;
+
+ device_lock_assert(dax_region->dev);
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ struct range *range = &dev_dax->ranges[i].range;
+
+ __release_region(&dax_region->res, range->start,
+ range_len(range));
+ }
+ dev_dax->nr_range = 0;
+}
+
+static void unregister_dev_dax(void *dev)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ kill_dev_dax(dev_dax);
+ free_dev_dax_ranges(dev_dax);
+ device_del(dev);
+ put_device(dev);
+}
+
+/* a return value >= 0 indicates this invocation invalidated the id */
+static int __free_dev_dax_id(struct dev_dax *dev_dax)
+{
+ struct dax_region *dax_region = dev_dax->region;
+ struct device *dev = &dev_dax->dev;
+ int rc = dev_dax->id;
+
+ device_lock_assert(dev);
+
+ if (is_static(dax_region) || dev_dax->id < 0)
+ return -1;
+ ida_free(&dax_region->ida, dev_dax->id);
+ dev_dax->id = -1;
+ return rc;
+}
+
+static int free_dev_dax_id(struct dev_dax *dev_dax)
+{
+ struct device *dev = &dev_dax->dev;
+ int rc;
+
+ device_lock(dev);
+ rc = __free_dev_dax_id(dev_dax);
+ device_unlock(dev);
+ return rc;
+}
+
+static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+ struct dev_dax *dev_dax;
+ struct device *victim;
+ bool do_del = false;
+ int rc;
+
+ if (is_static(dax_region))
+ return -EINVAL;
+
+ victim = device_find_child_by_name(dax_region->dev, buf);
+ if (!victim)
+ return -ENXIO;
+
+ device_lock(dev);
+ device_lock(victim);
+ dev_dax = to_dev_dax(victim);
+ if (victim->driver || dev_dax_size(dev_dax))
+ rc = -EBUSY;
+ else {
+ /*
+ * Invalidate the device so it does not become active
+ * again, but always preserve device-id-0 so that
+ * /sys/bus/dax/ is guaranteed to be populated while any
+ * dax_region is registered.
+ */
+ if (dev_dax->id > 0) {
+ do_del = __free_dev_dax_id(dev_dax) >= 0;
+ rc = len;
+ if (dax_region->seed == victim)
+ dax_region->seed = NULL;
+ if (dax_region->youngest == victim)
+ dax_region->youngest = NULL;
+ } else
+ rc = -EBUSY;
+ }
+ device_unlock(victim);
+
+ /* won the race to invalidate the device, clean it up */
+ if (do_del)
+ devm_release_action(dev, unregister_dev_dax, victim);
+ device_unlock(dev);
+ put_device(victim);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(delete);
+
+static umode_t dax_region_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+
+ if (is_static(dax_region))
+ if (a == &dev_attr_available_size.attr
+ || a == &dev_attr_create.attr
+ || a == &dev_attr_seed.attr
+ || a == &dev_attr_delete.attr)
+ return 0;
+ return a->mode;
+}
static struct attribute *dax_region_attributes[] = {
+ &dev_attr_available_size.attr,
&dev_attr_region_size.attr,
- &dev_attr_align.attr,
+ &dev_attr_region_align.attr,
+ &dev_attr_create.attr,
+ &dev_attr_seed.attr,
+ &dev_attr_delete.attr,
&dev_attr_id.attr,
NULL,
};
@@ -195,6 +500,7 @@ static struct attribute *dax_region_attributes[] = {
static const struct attribute_group dax_region_attribute_group = {
.name = "dax_region",
.attrs = dax_region_attributes,
+ .is_visible = dax_region_visible,
};
static const struct attribute_group *dax_region_attribute_groups[] = {
@@ -226,8 +532,8 @@ static void dax_region_unregister(void *region)
}
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
- struct resource *res, int target_node, unsigned int align,
- unsigned long long pfn_flags)
+ struct range *range, int target_node, unsigned int align,
+ unsigned long flags)
{
struct dax_region *dax_region;
@@ -241,8 +547,8 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
return NULL;
}
- if (!IS_ALIGNED(res->start, align)
- || !IS_ALIGNED(resource_size(res), align))
+ if (!IS_ALIGNED(range->start, align)
+ || !IS_ALIGNED(range_len(range), align))
return NULL;
dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
@@ -250,13 +556,18 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
return NULL;
dev_set_drvdata(parent, dax_region);
- memcpy(&dax_region->res, res, sizeof(*res));
- dax_region->pfn_flags = pfn_flags;
kref_init(&dax_region->kref);
dax_region->id = region_id;
dax_region->align = align;
dax_region->dev = parent;
dax_region->target_node = target_node;
+ ida_init(&dax_region->ida);
+ dax_region->res = (struct resource) {
+ .start = range->start,
+ .end = range->end,
+ .flags = IORESOURCE_MEM | flags,
+ };
+
if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
kfree(dax_region);
return NULL;
@@ -269,45 +580,631 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
}
EXPORT_SYMBOL_GPL(alloc_dax_region);
+static void dax_mapping_release(struct device *dev)
+{
+ struct dax_mapping *mapping = to_dax_mapping(dev);
+ struct dev_dax *dev_dax = to_dev_dax(dev->parent);
+
+ ida_free(&dev_dax->ida, mapping->id);
+ kfree(mapping);
+}
+
+static void unregister_dax_mapping(void *data)
+{
+ struct device *dev = data;
+ struct dax_mapping *mapping = to_dax_mapping(dev);
+ struct dev_dax *dev_dax = to_dev_dax(dev->parent);
+ struct dax_region *dax_region = dev_dax->region;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ device_lock_assert(dax_region->dev);
+
+ dev_dax->ranges[mapping->range_id].mapping = NULL;
+ mapping->range_id = -1;
+
+ device_del(dev);
+ put_device(dev);
+}
+
+static struct dev_dax_range *get_dax_range(struct device *dev)
+{
+ struct dax_mapping *mapping = to_dax_mapping(dev);
+ struct dev_dax *dev_dax = to_dev_dax(dev->parent);
+ struct dax_region *dax_region = dev_dax->region;
+
+ device_lock(dax_region->dev);
+ if (mapping->range_id < 0) {
+ device_unlock(dax_region->dev);
+ return NULL;
+ }
+
+ return &dev_dax->ranges[mapping->range_id];
+}
+
+static void put_dax_range(struct dev_dax_range *dax_range)
+{
+ struct dax_mapping *mapping = dax_range->mapping;
+ struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent);
+ struct dax_region *dax_region = dev_dax->region;
+
+ device_unlock(dax_region->dev);
+}
+
+static ssize_t start_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax_range *dax_range;
+ ssize_t rc;
+
+ dax_range = get_dax_range(dev);
+ if (!dax_range)
+ return -ENXIO;
+ rc = sprintf(buf, "%#llx\n", dax_range->range.start);
+ put_dax_range(dax_range);
+
+ return rc;
+}
+static DEVICE_ATTR(start, 0400, start_show, NULL);
+
+static ssize_t end_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax_range *dax_range;
+ ssize_t rc;
+
+ dax_range = get_dax_range(dev);
+ if (!dax_range)
+ return -ENXIO;
+ rc = sprintf(buf, "%#llx\n", dax_range->range.end);
+ put_dax_range(dax_range);
+
+ return rc;
+}
+static DEVICE_ATTR(end, 0400, end_show, NULL);
+
+static ssize_t pgoff_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax_range *dax_range;
+ ssize_t rc;
+
+ dax_range = get_dax_range(dev);
+ if (!dax_range)
+ return -ENXIO;
+ rc = sprintf(buf, "%#lx\n", dax_range->pgoff);
+ put_dax_range(dax_range);
+
+ return rc;
+}
+static DEVICE_ATTR(page_offset, 0400, pgoff_show, NULL);
+
+static struct attribute *dax_mapping_attributes[] = {
+ &dev_attr_start.attr,
+ &dev_attr_end.attr,
+ &dev_attr_page_offset.attr,
+ NULL,
+};
+
+static const struct attribute_group dax_mapping_attribute_group = {
+ .attrs = dax_mapping_attributes,
+};
+
+static const struct attribute_group *dax_mapping_attribute_groups[] = {
+ &dax_mapping_attribute_group,
+ NULL,
+};
+
+static struct device_type dax_mapping_type = {
+ .release = dax_mapping_release,
+ .groups = dax_mapping_attribute_groups,
+};
+
+static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
+{
+ struct dax_region *dax_region = dev_dax->region;
+ struct dax_mapping *mapping;
+ struct device *dev;
+ int rc;
+
+ device_lock_assert(dax_region->dev);
+
+ if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver,
+ "region disabled\n"))
+ return -ENXIO;
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+ mapping->range_id = range_id;
+ mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL);
+ if (mapping->id < 0) {
+ kfree(mapping);
+ return -ENOMEM;
+ }
+ dev_dax->ranges[range_id].mapping = mapping;
+ dev = &mapping->dev;
+ device_initialize(dev);
+ dev->parent = &dev_dax->dev;
+ dev->type = &dax_mapping_type;
+ dev_set_name(dev, "mapping%d", mapping->id);
+ rc = device_add(dev);
+ if (rc) {
+ put_device(dev);
+ return rc;
+ }
+
+ rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_mapping,
+ dev);
+ if (rc)
+ return rc;
+ return 0;
+}
+
+static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
+ resource_size_t size)
+{
+ struct dax_region *dax_region = dev_dax->region;
+ struct resource *res = &dax_region->res;
+ struct device *dev = &dev_dax->dev;
+ struct dev_dax_range *ranges;
+ unsigned long pgoff = 0;
+ struct resource *alloc;
+ int i, rc;
+
+ device_lock_assert(dax_region->dev);
+
+ /* handle the seed alloc special case */
+ if (!size) {
+ if (dev_WARN_ONCE(dev, dev_dax->nr_range,
+ "0-size allocation must be first\n"))
+ return -EBUSY;
+ /* nr_range == 0 is elsewhere special cased as 0-size device */
+ return 0;
+ }
+
+ ranges = krealloc(dev_dax->ranges, sizeof(*ranges)
+ * (dev_dax->nr_range + 1), GFP_KERNEL);
+ if (!ranges)
+ return -ENOMEM;
+
+ alloc = __request_region(res, start, size, dev_name(dev), 0);
+ if (!alloc) {
+ /*
+ * If this was an empty set of ranges nothing else
+ * will release @ranges, so do it now.
+ */
+ if (!dev_dax->nr_range) {
+ kfree(ranges);
+ ranges = NULL;
+ }
+ dev_dax->ranges = ranges;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < dev_dax->nr_range; i++)
+ pgoff += PHYS_PFN(range_len(&ranges[i].range));
+ dev_dax->ranges = ranges;
+ ranges[dev_dax->nr_range++] = (struct dev_dax_range) {
+ .pgoff = pgoff,
+ .range = {
+ .start = alloc->start,
+ .end = alloc->end,
+ },
+ };
+
+ dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
+ &alloc->start, &alloc->end);
+ /*
+ * A dev_dax instance must be registered before mapping device
+ * children can be added. Defer to devm_create_dev_dax() to add
+ * the initial mapping device.
+ */
+ if (!device_is_registered(&dev_dax->dev))
+ return 0;
+
+ rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
+ if (rc) {
+ dev_dbg(dev, "delete range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
+ &alloc->start, &alloc->end);
+ dev_dax->nr_range--;
+ __release_region(res, alloc->start, resource_size(alloc));
+ return rc;
+ }
+
+ return 0;
+}
+
+static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
+{
+ int last_range = dev_dax->nr_range - 1;
+ struct dev_dax_range *dax_range = &dev_dax->ranges[last_range];
+ struct dax_region *dax_region = dev_dax->region;
+ bool is_shrink = resource_size(res) > size;
+ struct range *range = &dax_range->range;
+ struct device *dev = &dev_dax->dev;
+ int rc;
+
+ device_lock_assert(dax_region->dev);
+
+ if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n"))
+ return -EINVAL;
+
+ rc = adjust_resource(res, range->start, size);
+ if (rc)
+ return rc;
+
+ *range = (struct range) {
+ .start = range->start,
+ .end = range->start + size - 1,
+ };
+
+ dev_dbg(dev, "%s range[%d]: %#llx:%#llx\n", is_shrink ? "shrink" : "extend",
+ last_range, (unsigned long long) range->start,
+ (unsigned long long) range->end);
+
+ return 0;
+}
+
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
- unsigned long long size = resource_size(&dev_dax->region->res);
+ unsigned long long size;
+
+ device_lock(dev);
+ size = dev_dax_size(dev_dax);
+ device_unlock(dev);
return sprintf(buf, "%llu\n", size);
}
-static DEVICE_ATTR_RO(size);
-static int dev_dax_target_node(struct dev_dax *dev_dax)
+static bool alloc_is_aligned(struct dev_dax *dev_dax, resource_size_t size)
+{
+ /*
+ * The minimum mapping granularity for a device instance is a
+ * single subsection, unless the arch says otherwise.
+ */
+ return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align()));
+}
+
+static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
{
+ resource_size_t to_shrink = dev_dax_size(dev_dax) - size;
struct dax_region *dax_region = dev_dax->region;
+ struct device *dev = &dev_dax->dev;
+ int i;
+
+ for (i = dev_dax->nr_range - 1; i >= 0; i--) {
+ struct range *range = &dev_dax->ranges[i].range;
+ struct dax_mapping *mapping = dev_dax->ranges[i].mapping;
+ struct resource *adjust = NULL, *res;
+ resource_size_t shrink;
+
+ shrink = min_t(u64, to_shrink, range_len(range));
+ if (shrink >= range_len(range)) {
+ devm_release_action(dax_region->dev,
+ unregister_dax_mapping, &mapping->dev);
+ __release_region(&dax_region->res, range->start,
+ range_len(range));
+ dev_dax->nr_range--;
+ dev_dbg(dev, "delete range[%d]: %#llx:%#llx\n", i,
+ (unsigned long long) range->start,
+ (unsigned long long) range->end);
+ to_shrink -= shrink;
+ if (!to_shrink)
+ break;
+ continue;
+ }
+
+ for_each_dax_region_resource(dax_region, res)
+ if (strcmp(res->name, dev_name(dev)) == 0
+ && res->start == range->start) {
+ adjust = res;
+ break;
+ }
+
+ if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1,
+ "failed to find matching resource\n"))
+ return -ENXIO;
+ return adjust_dev_dax_range(dev_dax, adjust, range_len(range)
+ - shrink);
+ }
+ return 0;
+}
- return dax_region->target_node;
+/*
+ * Only allow adjustments that preserve the relative pgoff of existing
+ * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff.
+ */
+static bool adjust_ok(struct dev_dax *dev_dax, struct resource *res)
+{
+ struct dev_dax_range *last;
+ int i;
+
+ if (dev_dax->nr_range == 0)
+ return false;
+ if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0)
+ return false;
+ last = &dev_dax->ranges[dev_dax->nr_range - 1];
+ if (last->range.start != res->start || last->range.end != res->end)
+ return false;
+ for (i = 0; i < dev_dax->nr_range - 1; i++) {
+ struct dev_dax_range *dax_range = &dev_dax->ranges[i];
+
+ if (dax_range->pgoff > last->pgoff)
+ return false;
+ }
+
+ return true;
}
-static ssize_t target_node_show(struct device *dev,
+static ssize_t dev_dax_resize(struct dax_region *dax_region,
+ struct dev_dax *dev_dax, resource_size_t size)
+{
+ resource_size_t avail = dax_region_avail_size(dax_region), to_alloc;
+ resource_size_t dev_size = dev_dax_size(dev_dax);
+ struct resource *region_res = &dax_region->res;
+ struct device *dev = &dev_dax->dev;
+ struct resource *res, *first;
+ resource_size_t alloc = 0;
+ int rc;
+
+ if (dev->driver)
+ return -EBUSY;
+ if (size == dev_size)
+ return 0;
+ if (size > dev_size && size - dev_size > avail)
+ return -ENOSPC;
+ if (size < dev_size)
+ return dev_dax_shrink(dev_dax, size);
+
+ to_alloc = size - dev_size;
+ if (dev_WARN_ONCE(dev, !alloc_is_aligned(dev_dax, to_alloc),
+ "resize of %pa misaligned\n", &to_alloc))
+ return -ENXIO;
+
+ /*
+ * Expand the device into the unused portion of the region. This
+ * may involve adjusting the end of an existing resource, or
+ * allocating a new resource.
+ */
+retry:
+ first = region_res->child;
+ if (!first)
+ return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc);
+
+ rc = -ENOSPC;
+ for (res = first; res; res = res->sibling) {
+ struct resource *next = res->sibling;
+
+ /* space at the beginning of the region */
+ if (res == first && res->start > dax_region->res.start) {
+ alloc = min(res->start - dax_region->res.start, to_alloc);
+ rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc);
+ break;
+ }
+
+ alloc = 0;
+ /* space between allocations */
+ if (next && next->start > res->end + 1)
+ alloc = min(next->start - (res->end + 1), to_alloc);
+
+ /* space at the end of the region */
+ if (!alloc && !next && res->end < region_res->end)
+ alloc = min(region_res->end - res->end, to_alloc);
+
+ if (!alloc)
+ continue;
+
+ if (adjust_ok(dev_dax, res)) {
+ rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc);
+ break;
+ }
+ rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc);
+ break;
+ }
+ if (rc)
+ return rc;
+ to_alloc -= alloc;
+ if (to_alloc)
+ goto retry;
+ return 0;
+}
+
+static ssize_t size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ ssize_t rc;
+ unsigned long long val;
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+
+ rc = kstrtoull(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (!alloc_is_aligned(dev_dax, val)) {
+ dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val);
+ return -EINVAL;
+ }
+
+ device_lock(dax_region->dev);
+ if (!dax_region->dev->driver) {
+ device_unlock(dax_region->dev);
+ return -ENXIO;
+ }
+ device_lock(dev);
+ rc = dev_dax_resize(dax_region, dev_dax, val);
+ device_unlock(dev);
+ device_unlock(dax_region->dev);
+
+ return rc == 0 ? len : rc;
+}
+static DEVICE_ATTR_RW(size);
+
+static ssize_t range_parse(const char *opt, size_t len, struct range *range)
+{
+ unsigned long long addr = 0;
+ char *start, *end, *str;
+ ssize_t rc = EINVAL;
+
+ str = kstrdup(opt, GFP_KERNEL);
+ if (!str)
+ return rc;
+
+ end = str;
+ start = strsep(&end, "-");
+ if (!start || !end)
+ goto err;
+
+ rc = kstrtoull(start, 16, &addr);
+ if (rc)
+ goto err;
+ range->start = addr;
+
+ rc = kstrtoull(end, 16, &addr);
+ if (rc)
+ goto err;
+ range->end = addr;
+
+err:
+ kfree(str);
+ return rc;
+}
+
+static ssize_t mapping_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+ size_t to_alloc;
+ struct range r;
+ ssize_t rc;
+
+ rc = range_parse(buf, len, &r);
+ if (rc)
+ return rc;
+
+ rc = -ENXIO;
+ device_lock(dax_region->dev);
+ if (!dax_region->dev->driver) {
+ device_unlock(dax_region->dev);
+ return rc;
+ }
+ device_lock(dev);
+
+ to_alloc = range_len(&r);
+ if (alloc_is_aligned(dev_dax, to_alloc))
+ rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc);
+ device_unlock(dev);
+ device_unlock(dax_region->dev);
+
+ return rc == 0 ? len : rc;
+}
+static DEVICE_ATTR_WO(mapping);
+
+static ssize_t align_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
- return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax));
+ return sprintf(buf, "%d\n", dev_dax->align);
}
-static DEVICE_ATTR_RO(target_node);
-static unsigned long long dev_dax_resource(struct dev_dax *dev_dax)
+static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax)
{
+ resource_size_t dev_size = dev_dax_size(dev_dax);
+ struct device *dev = &dev_dax->dev;
+ int i;
+
+ if (dev_size > 0 && !alloc_is_aligned(dev_dax, dev_size)) {
+ dev_dbg(dev, "%s: align %u invalid for size %pa\n",
+ __func__, dev_dax->align, &dev_size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ size_t len = range_len(&dev_dax->ranges[i].range);
+
+ if (!alloc_is_aligned(dev_dax, len)) {
+ dev_dbg(dev, "%s: align %u invalid for range %d\n",
+ __func__, dev_dax->align, i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t align_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_region *dax_region = dev_dax->region;
+ unsigned long val, align_save;
+ ssize_t rc;
- return dax_region->res.start;
+ rc = kstrtoul(buf, 0, &val);
+ if (rc)
+ return -ENXIO;
+
+ if (!dax_align_valid(val))
+ return -EINVAL;
+
+ device_lock(dax_region->dev);
+ if (!dax_region->dev->driver) {
+ device_unlock(dax_region->dev);
+ return -ENXIO;
+ }
+
+ device_lock(dev);
+ if (dev->driver) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
+ align_save = dev_dax->align;
+ dev_dax->align = val;
+ rc = dev_dax_validate_align(dev_dax);
+ if (rc)
+ dev_dax->align = align_save;
+out_unlock:
+ device_unlock(dev);
+ device_unlock(dax_region->dev);
+ return rc == 0 ? len : rc;
}
+static DEVICE_ATTR_RW(align);
+
+static int dev_dax_target_node(struct dev_dax *dev_dax)
+{
+ struct dax_region *dax_region = dev_dax->region;
+
+ return dax_region->target_node;
+}
+
+static ssize_t target_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+
+ return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax));
+}
+static DEVICE_ATTR_RO(target_node);
static ssize_t resource_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+ unsigned long long start;
+
+ if (dev_dax->nr_range < 1)
+ start = dax_region->res.start;
+ else
+ start = dev_dax->ranges[0].range.start;
- return sprintf(buf, "%#llx\n", dev_dax_resource(dev_dax));
+ return sprintf(buf, "%#llx\n", start);
}
static DEVICE_ATTR(resource, 0400, resource_show, NULL);
@@ -333,18 +1230,26 @@ static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
return 0;
if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
return 0;
+ if (a == &dev_attr_mapping.attr && is_static(dax_region))
+ return 0;
+ if ((a == &dev_attr_align.attr ||
+ a == &dev_attr_size.attr) && is_static(dax_region))
+ return 0444;
return a->mode;
}
static struct attribute *dev_dax_attributes[] = {
&dev_attr_modalias.attr,
&dev_attr_size.attr,
+ &dev_attr_mapping.attr,
&dev_attr_target_node.attr,
+ &dev_attr_align.attr,
&dev_attr_resource.attr,
&dev_attr_numa_node.attr,
NULL,
@@ -360,24 +1265,17 @@ static const struct attribute_group *dax_attribute_groups[] = {
NULL,
};
-void kill_dev_dax(struct dev_dax *dev_dax)
-{
- struct dax_device *dax_dev = dev_dax->dax_dev;
- struct inode *inode = dax_inode(dax_dev);
-
- kill_dax(dax_dev);
- unmap_mapping_range(inode->i_mapping, 0, 0, 1);
-}
-EXPORT_SYMBOL_GPL(kill_dev_dax);
-
static void dev_dax_release(struct device *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_region *dax_region = dev_dax->region;
struct dax_device *dax_dev = dev_dax->dax_dev;
- dax_region_put(dax_region);
put_dax(dax_dev);
+ free_dev_dax_id(dev_dax);
+ dax_region_put(dax_region);
+ kfree(dev_dax->ranges);
+ kfree(dev_dax->pgmap);
kfree(dev_dax);
}
@@ -386,35 +1284,61 @@ static const struct device_type dev_dax_type = {
.groups = dax_attribute_groups,
};
-static void unregister_dev_dax(void *dev)
-{
- struct dev_dax *dev_dax = to_dev_dax(dev);
-
- dev_dbg(dev, "%s\n", __func__);
-
- kill_dev_dax(dev_dax);
- device_del(dev);
- put_device(dev);
-}
-
-struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
- struct dev_pagemap *pgmap, enum dev_dax_subsys subsys)
+struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
{
+ struct dax_region *dax_region = data->dax_region;
struct device *parent = dax_region->dev;
struct dax_device *dax_dev;
struct dev_dax *dev_dax;
struct inode *inode;
struct device *dev;
- int rc = -ENOMEM;
-
- if (id < 0)
- return ERR_PTR(-EINVAL);
+ int rc;
dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL);
if (!dev_dax)
return ERR_PTR(-ENOMEM);
- memcpy(&dev_dax->pgmap, pgmap, sizeof(*pgmap));
+ if (is_static(dax_region)) {
+ if (dev_WARN_ONCE(parent, data->id < 0,
+ "dynamic id specified to static region\n")) {
+ rc = -EINVAL;
+ goto err_id;
+ }
+
+ dev_dax->id = data->id;
+ } else {
+ if (dev_WARN_ONCE(parent, data->id >= 0,
+ "static id specified to dynamic region\n")) {
+ rc = -EINVAL;
+ goto err_id;
+ }
+
+ rc = ida_alloc(&dax_region->ida, GFP_KERNEL);
+ if (rc < 0)
+ goto err_id;
+ dev_dax->id = rc;
+ }
+
+ dev_dax->region = dax_region;
+ dev = &dev_dax->dev;
+ device_initialize(dev);
+ dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
+
+ rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size);
+ if (rc)
+ goto err_range;
+
+ if (data->pgmap) {
+ dev_WARN_ONCE(parent, !is_static(dax_region),
+ "custom dev_pagemap requires a static dax_region\n");
+
+ dev_dax->pgmap = kmemdup(data->pgmap,
+ sizeof(struct dev_pagemap), GFP_KERNEL);
+ if (!dev_dax->pgmap) {
+ rc = -ENOMEM;
+ goto err_pgmap;
+ }
+ }
/*
* No 'host' or dax_operations since there is no access to this
@@ -423,30 +1347,26 @@ struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC);
if (IS_ERR(dax_dev)) {
rc = PTR_ERR(dax_dev);
- goto err;
+ goto err_alloc_dax;
}
/* a device_dax instance is dead while the driver is not attached */
kill_dax(dax_dev);
- /* from here on we're committed to teardown via dax_dev_release() */
- dev = &dev_dax->dev;
- device_initialize(dev);
-
dev_dax->dax_dev = dax_dev;
- dev_dax->region = dax_region;
dev_dax->target_node = dax_region->target_node;
+ dev_dax->align = dax_region->align;
+ ida_init(&dev_dax->ida);
kref_get(&dax_region->kref);
inode = dax_inode(dax_dev);
dev->devt = inode->i_rdev;
- if (subsys == DEV_DAX_BUS)
+ if (data->subsys == DEV_DAX_BUS)
dev->bus = &dax_bus_type;
else
dev->class = dax_class;
dev->parent = parent;
dev->type = &dev_dax_type;
- dev_set_name(dev, "dax%d.%d", dax_region->id, id);
rc = device_add(dev);
if (rc) {
@@ -459,14 +1379,27 @@ struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
if (rc)
return ERR_PTR(rc);
+ /* register mapping device for the initial allocation range */
+ if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) {
+ rc = devm_register_dax_mapping(dev_dax, 0);
+ if (rc)
+ return ERR_PTR(rc);
+ }
+
return dev_dax;
- err:
+err_alloc_dax:
+ kfree(dev_dax->pgmap);
+err_pgmap:
+ free_dev_dax_ranges(dev_dax);
+err_range:
+ free_dev_dax_id(dev_dax);
+err_id:
kfree(dev_dax);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(__devm_create_dev_dax);
+EXPORT_SYMBOL_GPL(devm_create_dev_dax);
static int match_always_count;
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index 9e4eba67e8b9..72b92f95509f 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -3,29 +3,33 @@
#ifndef __DAX_BUS_H__
#define __DAX_BUS_H__
#include <linux/device.h>
+#include <linux/range.h>
struct dev_dax;
struct resource;
struct dax_device;
struct dax_region;
void dax_region_put(struct dax_region *dax_region);
+
+#define IORESOURCE_DAX_STATIC (1UL << 0)
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
- struct resource *res, int target_node, unsigned int align,
- unsigned long long flags);
+ struct range *range, int target_node, unsigned int align,
+ unsigned long flags);
enum dev_dax_subsys {
- DEV_DAX_BUS,
+ DEV_DAX_BUS = 0, /* zeroed dev_dax_data picks this by default */
DEV_DAX_CLASS,
};
-struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
- struct dev_pagemap *pgmap, enum dev_dax_subsys subsys);
+struct dev_dax_data {
+ struct dax_region *dax_region;
+ struct dev_pagemap *pgmap;
+ enum dev_dax_subsys subsys;
+ resource_size_t size;
+ int id;
+};
-static inline struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
- int id, struct dev_pagemap *pgmap)
-{
- return __devm_create_dev_dax(dax_region, id, pgmap, DEV_DAX_BUS);
-}
+struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data);
/* to be deleted when DEV_DAX_CLASS is removed */
struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys);
@@ -34,6 +38,8 @@ struct dax_device_driver {
struct device_driver drv;
struct list_head ids;
int match_always;
+ int (*probe)(struct dev_dax *dev);
+ int (*remove)(struct dev_dax *dev);
};
int __dax_driver_register(struct dax_device_driver *dax_drv,
@@ -44,7 +50,7 @@ void dax_driver_unregister(struct dax_device_driver *dax_drv);
void kill_dev_dax(struct dev_dax *dev_dax);
#if IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)
-int dev_dax_probe(struct device *dev);
+int dev_dax_probe(struct dev_dax *dev_dax);
#endif
/*
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 16850d5388ab..1c974b7caae6 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/idr.h>
/* private routines between core files */
struct dax_device;
@@ -22,8 +23,10 @@ void dax_bus_exit(void);
* @kref: to pin while other agents have a need to do lookups
* @dev: parent device backing this region
* @align: allocation and mapping alignment for child dax devices
- * @res: physical address range of the region
- * @pfn_flags: identify whether the pfns are paged back or not
+ * @ida: instance id allocator
+ * @res: resource tree to track instance allocations
+ * @seed: allow userspace to find the first unbound seed device
+ * @youngest: allow userspace to find the most recently created device
*/
struct dax_region {
int id;
@@ -31,8 +34,16 @@ struct dax_region {
struct kref kref;
struct device *dev;
unsigned int align;
+ struct ida ida;
struct resource res;
- unsigned long long pfn_flags;
+ struct device *seed;
+ struct device *youngest;
+};
+
+struct dax_mapping {
+ struct device dev;
+ int range_id;
+ int id;
};
/**
@@ -41,22 +52,57 @@ struct dax_region {
* @region - parent region
* @dax_dev - core dax functionality
* @target_node: effective numa node if dev_dax memory range is onlined
+ * @id: ida allocated id
+ * @ida: mapping id allocator
* @dev - device core
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
- * @dax_mem_res: physical address range of hotadded DAX memory
- * @dax_mem_name: name for hotadded DAX memory via add_memory_driver_managed()
+ * @nr_range: size of @ranges
+ * @ranges: resource-span + pgoff tuples for the instance
*/
struct dev_dax {
struct dax_region *region;
struct dax_device *dax_dev;
+ unsigned int align;
int target_node;
+ int id;
+ struct ida ida;
struct device dev;
- struct dev_pagemap pgmap;
- struct resource *dax_kmem_res;
+ struct dev_pagemap *pgmap;
+ int nr_range;
+ struct dev_dax_range {
+ unsigned long pgoff;
+ struct range range;
+ struct dax_mapping *mapping;
+ } *ranges;
};
static inline struct dev_dax *to_dev_dax(struct device *dev)
{
return container_of(dev, struct dev_dax, dev);
}
+
+static inline struct dax_mapping *to_dax_mapping(struct device *dev)
+{
+ return container_of(dev, struct dax_mapping, dev);
+}
+
+phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline bool dax_align_valid(unsigned long align)
+{
+ if (align == PUD_SIZE && IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+ return true;
+ if (align == PMD_SIZE && has_transparent_hugepage())
+ return true;
+ if (align == PAGE_SIZE)
+ return true;
+ return false;
+}
+#else
+static inline bool dax_align_valid(unsigned long align)
+{
+ return align == PAGE_SIZE;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 1e89513f3c59..25e0b84a4296 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -17,7 +17,6 @@
static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
const char *func)
{
- struct dax_region *dax_region = dev_dax->region;
struct device *dev = &dev_dax->dev;
unsigned long mask;
@@ -32,7 +31,7 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
return -EINVAL;
}
- mask = dax_region->align - 1;
+ mask = dev_dax->align - 1;
if (vma->vm_start & mask || vma->vm_end & mask) {
dev_info_ratelimited(dev,
"%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
@@ -41,14 +40,6 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
return -EINVAL;
}
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
- && (vma->vm_flags & VM_DONTCOPY) == 0) {
- dev_info_ratelimited(dev,
- "%s: %s: fail, dax range requires MADV_DONTFORK\n",
- current->comm, func);
- return -EINVAL;
- }
-
if (!vma_is_dax(vma)) {
dev_info_ratelimited(dev,
"%s: %s: fail, vma is not DAX capable\n",
@@ -63,15 +54,22 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size)
{
- struct resource *res = &dev_dax->region->res;
- phys_addr_t phys;
-
- phys = pgoff * PAGE_SIZE + res->start;
- if (phys >= res->start && phys <= res->end) {
- if (phys + size - 1 <= res->end)
+ int i;
+
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ struct dev_dax_range *dax_range = &dev_dax->ranges[i];
+ struct range *range = &dax_range->range;
+ unsigned long long pgoff_end;
+ phys_addr_t phys;
+
+ pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1;
+ if (pgoff < dax_range->pgoff || pgoff > pgoff_end)
+ continue;
+ phys = PFN_PHYS(pgoff - dax_range->pgoff) + range->start;
+ if (phys + size - 1 <= range->end)
return phys;
+ break;
}
-
return -1;
}
@@ -79,21 +77,19 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
struct vm_fault *vmf, pfn_t *pfn)
{
struct device *dev = &dev_dax->dev;
- struct dax_region *dax_region;
phys_addr_t phys;
unsigned int fault_size = PAGE_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
- dax_region = dev_dax->region;
- if (dax_region->align > PAGE_SIZE) {
+ if (dev_dax->align > PAGE_SIZE) {
dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
- dax_region->align, fault_size);
+ dev_dax->align, fault_size);
return VM_FAULT_SIGBUS;
}
- if (fault_size != dax_region->align)
+ if (fault_size != dev_dax->align)
return VM_FAULT_SIGBUS;
phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
@@ -102,7 +98,7 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+ *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
}
@@ -112,7 +108,6 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
{
unsigned long pmd_addr = vmf->address & PMD_MASK;
struct device *dev = &dev_dax->dev;
- struct dax_region *dax_region;
phys_addr_t phys;
pgoff_t pgoff;
unsigned int fault_size = PMD_SIZE;
@@ -120,22 +115,15 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
if (check_vma(dev_dax, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
- dax_region = dev_dax->region;
- if (dax_region->align > PMD_SIZE) {
+ if (dev_dax->align > PMD_SIZE) {
dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
- dax_region->align, fault_size);
- return VM_FAULT_SIGBUS;
- }
-
- /* dax pmd mappings require pfn_t_devmap() */
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
- dev_dbg(dev, "region lacks devmap flags\n");
+ dev_dax->align, fault_size);
return VM_FAULT_SIGBUS;
}
- if (fault_size < dax_region->align)
+ if (fault_size < dev_dax->align)
return VM_FAULT_SIGBUS;
- else if (fault_size > dax_region->align)
+ else if (fault_size > dev_dax->align)
return VM_FAULT_FALLBACK;
/* if we are outside of the VMA */
@@ -150,7 +138,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+ *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
@@ -161,7 +149,6 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
{
unsigned long pud_addr = vmf->address & PUD_MASK;
struct device *dev = &dev_dax->dev;
- struct dax_region *dax_region;
phys_addr_t phys;
pgoff_t pgoff;
unsigned int fault_size = PUD_SIZE;
@@ -170,22 +157,15 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
if (check_vma(dev_dax, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
- dax_region = dev_dax->region;
- if (dax_region->align > PUD_SIZE) {
+ if (dev_dax->align > PUD_SIZE) {
dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
- dax_region->align, fault_size);
- return VM_FAULT_SIGBUS;
- }
-
- /* dax pud mappings require pfn_t_devmap() */
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
- dev_dbg(dev, "region lacks devmap flags\n");
+ dev_dax->align, fault_size);
return VM_FAULT_SIGBUS;
}
- if (fault_size < dax_region->align)
+ if (fault_size < dev_dax->align)
return VM_FAULT_SIGBUS;
- else if (fault_size > dax_region->align)
+ else if (fault_size > dev_dax->align)
return VM_FAULT_FALLBACK;
/* if we are outside of the VMA */
@@ -200,7 +180,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+ *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
@@ -280,9 +260,8 @@ static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
{
struct file *filp = vma->vm_file;
struct dev_dax *dev_dax = filp->private_data;
- struct dax_region *dax_region = dev_dax->region;
- if (!IS_ALIGNED(addr, dax_region->align))
+ if (!IS_ALIGNED(addr, dev_dax->align))
return -EINVAL;
return 0;
}
@@ -291,9 +270,8 @@ static unsigned long dev_dax_pagesize(struct vm_area_struct *vma)
{
struct file *filp = vma->vm_file;
struct dev_dax *dev_dax = filp->private_data;
- struct dax_region *dax_region = dev_dax->region;
- return dax_region->align;
+ return dev_dax->align;
}
static const struct vm_operations_struct dax_vm_ops = {
@@ -332,13 +310,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
{
unsigned long off, off_end, off_align, len_align, addr_align, align;
struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
- struct dax_region *dax_region;
if (!dev_dax || addr)
goto out;
- dax_region = dev_dax->region;
- align = dax_region->align;
+ align = dev_dax->align;
off = pgoff << PAGE_SHIFT;
off_end = off + len;
off_align = round_up(off, align);
@@ -412,25 +388,45 @@ static void dev_dax_kill(void *dev_dax)
kill_dev_dax(dev_dax);
}
-int dev_dax_probe(struct device *dev)
+int dev_dax_probe(struct dev_dax *dev_dax)
{
- struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_device *dax_dev = dev_dax->dax_dev;
- struct resource *res = &dev_dax->region->res;
+ struct device *dev = &dev_dax->dev;
+ struct dev_pagemap *pgmap;
struct inode *inode;
struct cdev *cdev;
void *addr;
- int rc;
+ int rc, i;
- /* 1:1 map region resource range to device-dax instance range */
- if (!devm_request_mem_region(dev, res->start, resource_size(res),
- dev_name(dev))) {
- dev_warn(dev, "could not reserve region %pR\n", res);
- return -EBUSY;
+ pgmap = dev_dax->pgmap;
+ if (dev_WARN_ONCE(dev, pgmap && dev_dax->nr_range > 1,
+ "static pgmap / multi-range device conflict\n"))
+ return -EINVAL;
+
+ if (!pgmap) {
+ pgmap = devm_kzalloc(dev, sizeof(*pgmap) + sizeof(struct range)
+ * (dev_dax->nr_range - 1), GFP_KERNEL);
+ if (!pgmap)
+ return -ENOMEM;
+ pgmap->nr_range = dev_dax->nr_range;
+ }
+
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ struct range *range = &dev_dax->ranges[i].range;
+
+ if (!devm_request_mem_region(dev, range->start,
+ range_len(range), dev_name(dev))) {
+ dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve range\n",
+ i, range->start, range->end);
+ return -EBUSY;
+ }
+ /* don't update the range for static pgmap */
+ if (!dev_dax->pgmap)
+ pgmap->ranges[i] = *range;
}
- dev_dax->pgmap.type = MEMORY_DEVICE_GENERIC;
- addr = devm_memremap_pages(dev, &dev_dax->pgmap);
+ pgmap->type = MEMORY_DEVICE_GENERIC;
+ addr = devm_memremap_pages(dev, pgmap);
if (IS_ERR(addr))
return PTR_ERR(addr);
@@ -456,17 +452,15 @@ int dev_dax_probe(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_dax_probe);
-static int dev_dax_remove(struct device *dev)
+static int dev_dax_remove(struct dev_dax *dev_dax)
{
/* all probe actions are unwound by devm */
return 0;
}
static struct dax_device_driver device_dax_driver = {
- .drv = {
- .probe = dev_dax_probe,
- .remove = dev_dax_remove,
- },
+ .probe = dev_dax_probe,
+ .remove = dev_dax_remove,
.match_always = 1,
};
diff --git a/drivers/dax/hmem/Makefile b/drivers/dax/hmem/Makefile
new file mode 100644
index 000000000000..57377b4c3d47
--- /dev/null
+++ b/drivers/dax/hmem/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DEV_DAX_HMEM) += dax_hmem.o
+obj-$(CONFIG_DEV_DAX_HMEM_DEVICES) += device_hmem.o
+
+device_hmem-y := device.o
+dax_hmem-y := hmem.o
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
new file mode 100644
index 000000000000..cb6401c9e9a4
--- /dev/null
+++ b/drivers/dax/hmem/device.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/platform_device.h>
+#include <linux/memregion.h>
+#include <linux/module.h>
+#include <linux/dax.h>
+#include <linux/mm.h>
+
+static bool nohmem;
+module_param_named(disable, nohmem, bool, 0444);
+
+void hmem_register_device(int target_nid, struct resource *r)
+{
+ /* define a clean / non-busy resource for the platform device */
+ struct resource res = {
+ .start = r->start,
+ .end = r->end,
+ .flags = IORESOURCE_MEM,
+ };
+ struct platform_device *pdev;
+ struct memregion_info info;
+ int rc, id;
+
+ if (nohmem)
+ return;
+
+ rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
+ IORES_DESC_SOFT_RESERVED);
+ if (rc != REGION_INTERSECTS)
+ return;
+
+ id = memregion_alloc(GFP_KERNEL);
+ if (id < 0) {
+ pr_err("memregion allocation failure for %pr\n", &res);
+ return;
+ }
+
+ pdev = platform_device_alloc("hmem", id);
+ if (!pdev) {
+ pr_err("hmem device allocation failure for %pr\n", &res);
+ goto out_pdev;
+ }
+
+ pdev->dev.numa_node = numa_map_to_online_node(target_nid);
+ info = (struct memregion_info) {
+ .target_node = target_nid,
+ };
+ rc = platform_device_add_data(pdev, &info, sizeof(info));
+ if (rc < 0) {
+ pr_err("hmem memregion_info allocation failure for %pr\n", &res);
+ goto out_pdev;
+ }
+
+ rc = platform_device_add_resources(pdev, &res, 1);
+ if (rc < 0) {
+ pr_err("hmem resource allocation failure for %pr\n", &res);
+ goto out_resource;
+ }
+
+ rc = platform_device_add(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "device add failed for %pr\n", &res);
+ goto out_resource;
+ }
+
+ return;
+
+out_resource:
+ put_device(&pdev->dev);
+out_pdev:
+ memregion_free(id);
+}
+
+static __init int hmem_register_one(struct resource *res, void *data)
+{
+ /*
+ * If the resource is not a top-level resource it was already
+ * assigned to a device by the HMAT parsing.
+ */
+ if (res->parent != &iomem_resource) {
+ pr_info("HMEM: skip %pr, already claimed\n", res);
+ return 0;
+ }
+
+ hmem_register_device(phys_to_target_node(res->start), res);
+
+ return 0;
+}
+
+static __init int hmem_init(void)
+{
+ walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
+ IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
+ return 0;
+}
+
+/*
+ * As this is a fallback for address ranges unclaimed by the ACPI HMAT
+ * parsing it must be at an initcall level greater than hmat_init().
+ */
+late_initcall(hmem_init);
diff --git a/drivers/dax/hmem.c b/drivers/dax/hmem/hmem.c
index fe7214daf62e..1bf040dbc834 100644
--- a/drivers/dax/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -3,30 +3,39 @@
#include <linux/memregion.h>
#include <linux/module.h>
#include <linux/pfn_t.h>
-#include "bus.h"
+#include "../bus.h"
+
+static bool region_idle;
+module_param_named(region_idle, region_idle, bool, 0644);
static int dax_hmem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct dev_pagemap pgmap = { };
struct dax_region *dax_region;
struct memregion_info *mri;
+ struct dev_dax_data data;
struct dev_dax *dev_dax;
struct resource *res;
+ struct range range;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOMEM;
mri = dev->platform_data;
- memcpy(&pgmap.res, res, sizeof(*res));
-
- dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node,
- PMD_SIZE, PFN_DEV|PFN_MAP);
+ range.start = res->start;
+ range.end = res->end;
+ dax_region = alloc_dax_region(dev, pdev->id, &range, mri->target_node,
+ PMD_SIZE, 0);
if (!dax_region)
return -ENOMEM;
- dev_dax = devm_create_dev_dax(dax_region, 0, &pgmap);
+ data = (struct dev_dax_data) {
+ .dax_region = dax_region,
+ .id = -1,
+ .size = region_idle ? 0 : resource_size(res),
+ };
+ dev_dax = devm_create_dev_dax(&data);
if (IS_ERR(dev_dax))
return PTR_ERR(dev_dax);
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 275aa5f87399..b4368c5b6a0c 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -19,17 +19,34 @@ static const char *kmem_name;
/* Set if any memory will remain added when the driver will be unloaded. */
static bool any_hotremove_failed;
-int dev_dax_kmem_probe(struct device *dev)
+static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r)
{
- struct dev_dax *dev_dax = to_dev_dax(dev);
- struct resource *res = &dev_dax->region->res;
- resource_size_t kmem_start;
- resource_size_t kmem_size;
- resource_size_t kmem_end;
- struct resource *new_res;
- const char *new_res_name;
+ struct dev_dax_range *dax_range = &dev_dax->ranges[i];
+ struct range *range = &dax_range->range;
+
+ /* memory-block align the hotplug range */
+ r->start = ALIGN(range->start, memory_block_size_bytes());
+ r->end = ALIGN_DOWN(range->end + 1, memory_block_size_bytes()) - 1;
+ if (r->start >= r->end) {
+ r->start = range->start;
+ r->end = range->end;
+ return -ENOSPC;
+ }
+ return 0;
+}
+
+struct dax_kmem_data {
+ const char *res_name;
+ struct resource *res[];
+};
+
+static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
+{
+ struct device *dev = &dev_dax->dev;
+ struct dax_kmem_data *data;
+ int rc = -ENOMEM;
+ int i, mapped = 0;
int numa_node;
- int rc;
/*
* Ensure good NUMA information for the persistent memory.
@@ -39,68 +56,91 @@ int dev_dax_kmem_probe(struct device *dev)
*/
numa_node = dev_dax->target_node;
if (numa_node < 0) {
- dev_warn(dev, "rejecting DAX region %pR with invalid node: %d\n",
- res, numa_node);
+ dev_warn(dev, "rejecting DAX region with invalid node: %d\n",
+ numa_node);
return -EINVAL;
}
- /* Hotplug starting at the beginning of the next block: */
- kmem_start = ALIGN(res->start, memory_block_size_bytes());
-
- kmem_size = resource_size(res);
- /* Adjust the size down to compensate for moving up kmem_start: */
- kmem_size -= kmem_start - res->start;
- /* Align the size down to cover only complete blocks: */
- kmem_size &= ~(memory_block_size_bytes() - 1);
- kmem_end = kmem_start + kmem_size;
-
- new_res_name = kstrdup(dev_name(dev), GFP_KERNEL);
- if (!new_res_name)
+ data = kzalloc(sizeof(*data) + sizeof(struct resource *) * dev_dax->nr_range, GFP_KERNEL);
+ if (!data)
return -ENOMEM;
- /* Region is permanently reserved if hotremove fails. */
- new_res = request_mem_region(kmem_start, kmem_size, new_res_name);
- if (!new_res) {
- dev_warn(dev, "could not reserve region [%pa-%pa]\n",
- &kmem_start, &kmem_end);
- kfree(new_res_name);
- return -EBUSY;
+ data->res_name = kstrdup(dev_name(dev), GFP_KERNEL);
+ if (!data->res_name)
+ goto err_res_name;
+
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ struct resource *res;
+ struct range range;
+
+ rc = dax_kmem_range(dev_dax, i, &range);
+ if (rc) {
+ dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
+ i, range.start, range.end);
+ continue;
+ }
+
+ /* Region is permanently reserved if hotremove fails. */
+ res = request_mem_region(range.start, range_len(&range), data->res_name);
+ if (!res) {
+ dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve region\n",
+ i, range.start, range.end);
+ /*
+ * Once some memory has been onlined we can't
+ * assume that it can be un-onlined safely.
+ */
+ if (mapped)
+ continue;
+ rc = -EBUSY;
+ goto err_request_mem;
+ }
+ data->res[i] = res;
+
+ /*
+ * Set flags appropriate for System RAM. Leave ..._BUSY clear
+ * so that add_memory() can add a child resource. Do not
+ * inherit flags from the parent since it may set new flags
+ * unknown to us that will break add_memory() below.
+ */
+ res->flags = IORESOURCE_SYSTEM_RAM;
+
+ /*
+ * Ensure that future kexec'd kernels will not treat
+ * this as RAM automatically.
+ */
+ rc = add_memory_driver_managed(numa_node, range.start,
+ range_len(&range), kmem_name, MHP_NONE);
+
+ if (rc) {
+ dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
+ i, range.start, range.end);
+ release_resource(res);
+ kfree(res);
+ data->res[i] = NULL;
+ if (mapped)
+ continue;
+ goto err_request_mem;
+ }
+ mapped++;
}
- /*
- * Set flags appropriate for System RAM. Leave ..._BUSY clear
- * so that add_memory() can add a child resource. Do not
- * inherit flags from the parent since it may set new flags
- * unknown to us that will break add_memory() below.
- */
- new_res->flags = IORESOURCE_SYSTEM_RAM;
-
- /*
- * Ensure that future kexec'd kernels will not treat this as RAM
- * automatically.
- */
- rc = add_memory_driver_managed(numa_node, new_res->start,
- resource_size(new_res), kmem_name);
- if (rc) {
- release_resource(new_res);
- kfree(new_res);
- kfree(new_res_name);
- return rc;
- }
- dev_dax->dax_kmem_res = new_res;
+ dev_set_drvdata(dev, data);
return 0;
+
+err_request_mem:
+ kfree(data->res_name);
+err_res_name:
+ kfree(data);
+ return rc;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-static int dev_dax_kmem_remove(struct device *dev)
+static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
{
- struct dev_dax *dev_dax = to_dev_dax(dev);
- struct resource *res = dev_dax->dax_kmem_res;
- resource_size_t kmem_start = res->start;
- resource_size_t kmem_size = resource_size(res);
- const char *res_name = res->name;
- int rc;
+ int i, success = 0;
+ struct device *dev = &dev_dax->dev;
+ struct dax_kmem_data *data = dev_get_drvdata(dev);
/*
* We have one shot for removing memory, if some memory blocks were not
@@ -108,25 +148,39 @@ static int dev_dax_kmem_remove(struct device *dev)
* there is no way to hotremove this memory until reboot because device
* unbind will succeed even if we return failure.
*/
- rc = remove_memory(dev_dax->target_node, kmem_start, kmem_size);
- if (rc) {
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ struct range range;
+ int rc;
+
+ rc = dax_kmem_range(dev_dax, i, &range);
+ if (rc)
+ continue;
+
+ rc = remove_memory(dev_dax->target_node, range.start,
+ range_len(&range));
+ if (rc == 0) {
+ release_resource(data->res[i]);
+ kfree(data->res[i]);
+ data->res[i] = NULL;
+ success++;
+ continue;
+ }
any_hotremove_failed = true;
dev_err(dev,
- "DAX region %pR cannot be hotremoved until the next reboot\n",
- res);
- return rc;
+ "mapping%d: %#llx-%#llx cannot be hotremoved until the next reboot\n",
+ i, range.start, range.end);
}
- /* Release and free dax resources */
- release_resource(res);
- kfree(res);
- kfree(res_name);
- dev_dax->dax_kmem_res = NULL;
+ if (success >= dev_dax->nr_range) {
+ kfree(data->res_name);
+ kfree(data);
+ dev_set_drvdata(dev, NULL);
+ }
return 0;
}
#else
-static int dev_dax_kmem_remove(struct device *dev)
+static int dev_dax_kmem_remove(struct dev_dax *dev_dax)
{
/*
* Without hotremove purposely leak the request_mem_region() for the
@@ -141,10 +195,8 @@ static int dev_dax_kmem_remove(struct device *dev)
#endif /* CONFIG_MEMORY_HOTREMOVE */
static struct dax_device_driver device_dax_kmem_driver = {
- .drv = {
- .probe = dev_dax_kmem_probe,
- .remove = dev_dax_kmem_remove,
- },
+ .probe = dev_dax_kmem_probe,
+ .remove = dev_dax_kmem_remove,
};
static int __init dax_kmem_init(void)
diff --git a/drivers/dax/pmem/compat.c b/drivers/dax/pmem/compat.c
index d7b15e6f30c5..863c114fd88c 100644
--- a/drivers/dax/pmem/compat.c
+++ b/drivers/dax/pmem/compat.c
@@ -22,7 +22,7 @@ static int dax_pmem_compat_probe(struct device *dev)
return -ENOMEM;
device_lock(&dev_dax->dev);
- rc = dev_dax_probe(&dev_dax->dev);
+ rc = dev_dax_probe(dev_dax);
device_unlock(&dev_dax->dev);
devres_close_group(&dev_dax->dev, dev_dax);
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index 2bedf8414fff..62b26bfceab1 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -9,11 +9,12 @@
struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
{
- struct resource res;
+ struct range range;
int rc, id, region_id;
resource_size_t offset;
struct nd_pfn_sb *pfn_sb;
struct dev_dax *dev_dax;
+ struct dev_dax_data data;
struct nd_namespace_io *nsio;
struct dax_region *dax_region;
struct dev_pagemap pgmap = { };
@@ -49,16 +50,23 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
if (rc != 2)
return ERR_PTR(-EINVAL);
- /* adjust the dax_region resource to the start of data */
- memcpy(&res, &pgmap.res, sizeof(res));
- res.start += offset;
- dax_region = alloc_dax_region(dev, region_id, &res,
+ /* adjust the dax_region range to the start of data */
+ range = pgmap.range;
+ range.start += offset,
+ dax_region = alloc_dax_region(dev, region_id, &range,
nd_region->target_node, le32_to_cpu(pfn_sb->align),
- PFN_DEV|PFN_MAP);
+ IORESOURCE_DAX_STATIC);
if (!dax_region)
return ERR_PTR(-ENOMEM);
- dev_dax = __devm_create_dev_dax(dax_region, id, &pgmap, subsys);
+ data = (struct dev_dax_data) {
+ .dax_region = dax_region,
+ .id = id,
+ .pgmap = &pgmap,
+ .subsys = subsys,
+ .size = range_len(&range),
+ };
+ dev_dax = devm_create_dev_dax(&data);
/* child dev_dax instances now own the lifetime of the dax_region */
dax_region_put(dax_region);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index e84070b55463..edc279be3e59 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -46,7 +46,8 @@ EXPORT_SYMBOL_GPL(dax_read_unlock);
int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
pgoff_t *pgoff)
{
- phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
+ sector_t start_sect = bdev ? get_start_sect(bdev) : 0;
+ phys_addr_t phys_off = (start_sect + sector) * 512;
if (pgoff)
*pgoff = PHYS_PFN(phys_off);
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 56efbeb7851e..6765c03334bc 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -213,20 +213,21 @@ EXPORT_SYMBOL_GPL(devfreq_event_reset_event);
* devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from
* devicetree.
* @dev : the pointer to the given device
+ * @phandle_name: name of property holding a phandle value
* @index : the index into list of devfreq-event device
*
* Note that this function return the pointer of devfreq-event device.
*/
struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
- int index)
+ const char *phandle_name, int index)
{
struct device_node *node;
struct devfreq_event_dev *edev;
- if (!dev->of_node)
+ if (!dev->of_node || !phandle_name)
return ERR_PTR(-EINVAL);
- node = of_parse_phandle(dev->of_node, "devfreq-events", index);
+ node = of_parse_phandle(dev->of_node, phandle_name, index);
if (!node)
return ERR_PTR(-ENODEV);
@@ -258,19 +259,20 @@ EXPORT_SYMBOL_GPL(devfreq_event_get_edev_by_phandle);
/**
* devfreq_event_get_edev_count() - Get the count of devfreq-event dev
* @dev : the pointer to the given device
+ * @phandle_name: name of property holding a phandle value
*
* Note that this function return the count of devfreq-event devices.
*/
-int devfreq_event_get_edev_count(struct device *dev)
+int devfreq_event_get_edev_count(struct device *dev, const char *phandle_name)
{
int count;
- if (!dev->of_node) {
+ if (!dev->of_node || !phandle_name) {
dev_err(dev, "device does not have a device node entry\n");
return -EINVAL;
}
- count = of_property_count_elems_of_size(dev->of_node, "devfreq-events",
+ count = of_property_count_elems_of_size(dev->of_node, phandle_name,
sizeof(u32));
if (count < 0) {
dev_err(dev,
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 071b59fe84d2..861c100f9fac 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -984,47 +984,74 @@ EXPORT_SYMBOL(devm_devfreq_add_device);
#ifdef CONFIG_OF
/*
+ * devfreq_get_devfreq_by_node - Get the devfreq device from devicetree
+ * @node - pointer to device_node
+ *
+ * return the instance of devfreq device
+ */
+struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
+{
+ struct devfreq *devfreq;
+
+ if (!node)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&devfreq_list_lock);
+ list_for_each_entry(devfreq, &devfreq_list, node) {
+ if (devfreq->dev.parent
+ && devfreq->dev.parent->of_node == node) {
+ mutex_unlock(&devfreq_list_lock);
+ return devfreq;
+ }
+ }
+ mutex_unlock(&devfreq_list_lock);
+
+ return ERR_PTR(-ENODEV);
+}
+
+/*
* devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
* @dev - instance to the given device
+ * @phandle_name - name of property holding a phandle value
* @index - index into list of devfreq
*
* return the instance of devfreq device
*/
-struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+ const char *phandle_name, int index)
{
struct device_node *node;
struct devfreq *devfreq;
- if (!dev)
+ if (!dev || !phandle_name)
return ERR_PTR(-EINVAL);
if (!dev->of_node)
return ERR_PTR(-EINVAL);
- node = of_parse_phandle(dev->of_node, "devfreq", index);
+ node = of_parse_phandle(dev->of_node, phandle_name, index);
if (!node)
return ERR_PTR(-ENODEV);
- mutex_lock(&devfreq_list_lock);
- list_for_each_entry(devfreq, &devfreq_list, node) {
- if (devfreq->dev.parent
- && devfreq->dev.parent->of_node == node) {
- mutex_unlock(&devfreq_list_lock);
- of_node_put(node);
- return devfreq;
- }
- }
- mutex_unlock(&devfreq_list_lock);
+ devfreq = devfreq_get_devfreq_by_node(node);
of_node_put(node);
- return ERR_PTR(-EPROBE_DEFER);
+ return devfreq;
}
+
#else
-struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
+ const char *phandle_name, int index)
{
return ERR_PTR(-ENODEV);
}
#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_node);
EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
/**
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 8fa8eb541373..1e684a448c9e 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -193,7 +193,7 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
* Get the devfreq-event devices to get the current utilization of
* buses. This raw data will be used in devfreq ondemand governor.
*/
- count = devfreq_event_get_edev_count(dev);
+ count = devfreq_event_get_edev_count(dev, "devfreq-events");
if (count < 0) {
dev_err(dev, "failed to get the count of devfreq-event dev\n");
ret = count;
@@ -209,7 +209,8 @@ static int exynos_bus_parent_parse_of(struct device_node *np,
}
for (i = 0; i < count; i++) {
- bus->edev[i] = devfreq_event_get_edev_by_phandle(dev, i);
+ bus->edev[i] = devfreq_event_get_edev_by_phandle(dev,
+ "devfreq-events", i);
if (IS_ERR(bus->edev[i])) {
ret = -EPROBE_DEFER;
goto err_regulator;
@@ -360,7 +361,7 @@ static int exynos_bus_profile_init_passive(struct exynos_bus *bus,
profile->exit = exynos_bus_passive_exit;
/* Get the instance of parent devfreq device */
- parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0);
+ parent_devfreq = devfreq_get_devfreq_by_phandle(dev, "devfreq", 0);
if (IS_ERR(parent_devfreq))
return -EPROBE_DEFER;
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 027769e39f9b..2e912166a993 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -341,7 +341,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
return PTR_ERR(data->dmc_clk);
}
- data->edev = devfreq_event_get_edev_by_phandle(dev, 0);
+ data->edev = devfreq_event_get_edev_by_phandle(dev, "devfreq-events", 0);
if (IS_ERR(data->edev))
return -EPROBE_DEFER;
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index dedd39de7367..f5e74c2ede85 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -822,8 +822,6 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
- reset_control_assert(tegra->reset);
-
err = clk_prepare_enable(tegra->clock);
if (err) {
dev_err(&pdev->dev,
@@ -831,7 +829,11 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
- reset_control_deassert(tegra->reset);
+ err = reset_control_reset(tegra->reset);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to reset hardware: %d\n", err);
+ goto disable_clk;
+ }
rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
if (rate < 0) {
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 43624b4ee13d..7475e09b0680 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -283,6 +283,7 @@ EXPORT_SYMBOL(dma_fence_begin_signalling);
/**
* dma_fence_end_signalling - end a critical DMA fence signalling section
+ * @cookie: opaque cookie from dma_fence_begin_signalling()
*
* Closes a critical section annotation opened by dma_fence_begin_signalling().
*/
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 434a3314fb0e..1c8f2581cb09 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -98,12 +98,14 @@ static int __init dma_resv_lockdep(void)
struct mm_struct *mm = mm_alloc();
struct ww_acquire_ctx ctx;
struct dma_resv obj;
+ struct address_space mapping;
int ret;
if (!mm)
return -ENOMEM;
dma_resv_init(&obj);
+ address_space_init_once(&mapping);
mmap_read_lock(mm);
ww_acquire_init(&ctx, &reservation_ww_class);
@@ -111,6 +113,9 @@ static int __init dma_resv_lockdep(void)
if (ret == -EDEADLK)
dma_resv_lock_slow(&obj, &ctx);
fs_reclaim_acquire(GFP_KERNEL);
+ /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
+ i_mmap_lock_write(&mapping);
+ i_mmap_unlock_write(&mapping);
#ifdef CONFIG_MMU_NOTIFIER
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
__dma_fence_might_wait();
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 626cf7fd033a..e55384dc115b 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -10,7 +10,7 @@
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
-#include <linux/dma-contiguous.h>
+#include <linux/dma-map-ops.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/highmem.h>
diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c
index 9f964ca3f59c..d0696cf937af 100644
--- a/drivers/dma-buf/heaps/heap-helpers.c
+++ b/drivers/dma-buf/heaps/heap-helpers.c
@@ -140,13 +140,12 @@ struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_heaps_attachment *a = attachment->priv;
- struct sg_table *table;
-
- table = &a->table;
+ struct sg_table *table = &a->table;
+ int ret;
- if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
- direction))
- table = ERR_PTR(-ENOMEM);
+ ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ if (ret)
+ table = ERR_PTR(ret);
return table;
}
@@ -154,7 +153,7 @@ static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *table,
enum dma_data_direction direction)
{
- dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
+ dma_unmap_sgtable(attachment->dev, table, direction, 0);
}
static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index acb26c627d27..db732f71e59a 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -63,10 +63,9 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
GFP_KERNEL);
if (ret < 0)
goto err;
- if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) {
- ret = -EINVAL;
+ ret = dma_map_sgtable(dev, sg, direction, 0);
+ if (ret < 0)
goto err;
- }
return sg;
err:
@@ -78,7 +77,7 @@ err:
static void put_sg_table(struct device *dev, struct sg_table *sg,
enum dma_data_direction direction)
{
- dma_unmap_sg(dev, sg->sgl, sg->nents, direction);
+ dma_unmap_sgtable(dev, sg, direction, 0);
sg_free_table(sg);
kfree(sg);
}
@@ -308,6 +307,9 @@ static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
static const struct file_operations udmabuf_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = udmabuf_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = udmabuf_ioctl,
+#endif
};
static struct miscdevice udmabuf_misc = {
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 518a1437862a..90284ffda58a 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -318,24 +318,6 @@ config INTEL_IOP_ADMA
help
Enable support for the Intel(R) IOP Series RAID engines.
-config INTEL_MIC_X100_DMA
- tristate "Intel MIC X100 DMA Driver"
- depends on 64BIT && X86 && INTEL_MIC_BUS
- select DMA_ENGINE
- help
- This enables DMA support for the Intel Many Integrated Core
- (MIC) family of PCIe form factor coprocessor X100 devices that
- run a 64 bit Linux OS. This driver will be used by both MIC
- host and card drivers.
-
- If you are building host kernel with a MIC device or a card
- kernel for a MIC device, then say M (recommended) or Y, else
- say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
config K3_DMA
tristate "Hisilicon K3 DMA support"
depends on ARCH_HI3xxx || ARCH_HISI || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index e60f81331d4c..948a8da05f8b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IDXD) += idxd/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
-obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 321ac3a7aa41..9a841ce5f0c5 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -678,11 +678,11 @@ static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
/**
* msgdma_tasklet - Schedule completion tasklet
- * @data: Pointer to the Altera sSGDMA channel structure
+ * @t: Pointer to the Altera sSGDMA channel structure
*/
-static void msgdma_tasklet(unsigned long data)
+static void msgdma_tasklet(struct tasklet_struct *t)
{
- struct msgdma_device *mdev = (struct msgdma_device *)data;
+ struct msgdma_device *mdev = from_tasklet(mdev, t, irq_tasklet);
u32 count;
u32 __maybe_unused size;
u32 __maybe_unused status;
@@ -830,7 +830,7 @@ static int msgdma_probe(struct platform_device *pdev)
if (ret)
return ret;
- tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev);
+ tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet);
dma_cookie_init(&mdev->dmachan);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index a2cf25c6e3b3..7eaee5b705b1 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -598,9 +598,9 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
/*-- IRQ & Tasklet ---------------------------------------------------*/
-static void atc_tasklet(unsigned long data)
+static void atc_tasklet(struct tasklet_struct *t)
{
- struct at_dma_chan *atchan = (struct at_dma_chan *)data;
+ struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
return atc_handle_error(atchan);
@@ -1892,8 +1892,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&atchan->queue);
INIT_LIST_HEAD(&atchan->free_list);
- tasklet_init(&atchan->tasklet, atc_tasklet,
- (unsigned long)atchan);
+ tasklet_setup(&atchan->tasklet, atc_tasklet);
atc_enable_chan_irq(atdma, i);
}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index fd92f048c491..3b53115db268 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1613,9 +1613,9 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
/* Then continue with usual descriptor management */
}
-static void at_xdmac_tasklet(unsigned long data)
+static void at_xdmac_tasklet(struct tasklet_struct *t)
{
- struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
+ struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
struct at_xdmac_desc *desc;
u32 error_mask;
@@ -2063,8 +2063,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
spin_lock_init(&atchan->lock);
INIT_LIST_HEAD(&atchan->xfers_list);
INIT_LIST_HEAD(&atchan->free_descs_list);
- tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
- (unsigned long)atchan);
+ tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
/* Clear pending interrupts. */
while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 4768ef26013b..630dfbb01a40 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -41,14 +41,12 @@
* struct bcm2835_dmadev - BCM2835 DMA controller
* @ddev: DMA device
* @base: base address of register map
- * @dma_parms: DMA parameters (to convey 1 GByte max segment size to clients)
* @zero_page: bus address of zero page (to detect transactions copying from
* zero page and avoid accessing memory if so)
*/
struct bcm2835_dmadev {
struct dma_device ddev;
void __iomem *base;
- struct device_dma_parameters dma_parms;
dma_addr_t zero_page;
};
@@ -902,7 +900,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
if (!od)
return -ENOMEM;
- pdev->dev.dma_parms = &od->dma_parms;
dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 1092d4ce723e..95b9b2f5358e 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1868,9 +1868,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
* This tasklet is called from the interrupt handler to
* handle each descriptor (DMA job) that is sent to a channel.
*/
-static void dma_tasklet(unsigned long data)
+static void dma_tasklet(struct tasklet_struct *t)
{
- struct coh901318_chan *cohc = (struct coh901318_chan *) data;
+ struct coh901318_chan *cohc = from_tasklet(cohc, t, tasklet);
struct coh901318_desc *cohd_fin;
unsigned long flags;
struct dmaengine_desc_callback cb;
@@ -2615,8 +2615,7 @@ static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
INIT_LIST_HEAD(&cohc->active);
INIT_LIST_HEAD(&cohc->queue);
- tasklet_init(&cohc->tasklet, dma_tasklet,
- (unsigned long) cohc);
+ tasklet_setup(&cohc->tasklet, dma_tasklet);
list_add_tail(&cohc->chan.device_node,
&dma->channels);
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index f1d149e32839..5161b73c30c4 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -6,6 +6,7 @@
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -45,6 +46,16 @@
* there is no address than can or needs to be configured for the device side.
*/
+#define AXI_DMAC_REG_INTERFACE_DESC 0x10
+#define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12)
+#define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
+#define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8)
+#define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
+#define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4)
+#define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
+#define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
+#define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
+
#define AXI_DMAC_REG_IRQ_MASK 0x80
#define AXI_DMAC_REG_IRQ_PENDING 0x84
#define AXI_DMAC_REG_IRQ_SOURCE 0x88
@@ -134,8 +145,6 @@ struct axi_dmac {
struct dma_device dma_dev;
struct axi_dmac_chan chan;
-
- struct device_dma_parameters dma_parms;
};
static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
@@ -717,6 +726,20 @@ static const struct regmap_config axi_dmac_regmap_config = {
.writeable_reg = axi_dmac_regmap_rdwr,
};
+static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan)
+{
+ chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
+
+ if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
+ chan->direction = DMA_MEM_TO_MEM;
+ else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
+ chan->direction = DMA_MEM_TO_DEV;
+ else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
+ chan->direction = DMA_DEV_TO_MEM;
+ else
+ chan->direction = DMA_DEV_TO_DEV;
+}
+
/*
* The configuration stored in the devicetree matches the configuration
* parameters of the peripheral instance and allows the driver to know which
@@ -760,26 +783,81 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
return ret;
chan->dest_width = val / 8;
- chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
+ axi_dmac_adjust_chan_params(chan);
- if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
- chan->direction = DMA_MEM_TO_MEM;
- else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
- chan->direction = DMA_MEM_TO_DEV;
- else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
- chan->direction = DMA_DEV_TO_MEM;
- else
- chan->direction = DMA_DEV_TO_DEV;
+ return 0;
+}
+
+static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac)
+{
+ struct device_node *of_channels, *of_chan;
+ int ret;
+
+ of_channels = of_get_child_by_name(dev->of_node, "adi,channels");
+ if (of_channels == NULL)
+ return -ENODEV;
+
+ for_each_child_of_node(of_channels, of_chan) {
+ ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
+ if (ret) {
+ of_node_put(of_chan);
+ of_node_put(of_channels);
+ return -EINVAL;
+ }
+ }
+ of_node_put(of_channels);
return 0;
}
-static int axi_dmac_detect_caps(struct axi_dmac *dmac)
+static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac)
{
struct axi_dmac_chan *chan = &dmac->chan;
- unsigned int version;
+ unsigned int val, desc;
- version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
+ desc = axi_dmac_read(dmac, AXI_DMAC_REG_INTERFACE_DESC);
+ if (desc == 0) {
+ dev_err(dev, "DMA interface register reads zero\n");
+ return -EFAULT;
+ }
+
+ val = AXI_DMAC_DMA_SRC_TYPE_GET(desc);
+ if (val > AXI_DMAC_BUS_TYPE_FIFO) {
+ dev_err(dev, "Invalid source bus type read: %d\n", val);
+ return -EINVAL;
+ }
+ chan->src_type = val;
+
+ val = AXI_DMAC_DMA_DST_TYPE_GET(desc);
+ if (val > AXI_DMAC_BUS_TYPE_FIFO) {
+ dev_err(dev, "Invalid destination bus type read: %d\n", val);
+ return -EINVAL;
+ }
+ chan->dest_type = val;
+
+ val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc);
+ if (val == 0) {
+ dev_err(dev, "Source bus width is zero\n");
+ return -EINVAL;
+ }
+ /* widths are stored in log2 */
+ chan->src_width = 1 << val;
+
+ val = AXI_DMAC_DMA_DST_WIDTH_GET(desc);
+ if (val == 0) {
+ dev_err(dev, "Destination bus width is zero\n");
+ return -EINVAL;
+ }
+ chan->dest_width = 1 << val;
+
+ axi_dmac_adjust_chan_params(chan);
+
+ return 0;
+}
+
+static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
+{
+ struct axi_dmac_chan *chan = &dmac->chan;
axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
@@ -826,11 +904,11 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac)
static int axi_dmac_probe(struct platform_device *pdev)
{
- struct device_node *of_channels, *of_chan;
struct dma_device *dma_dev;
struct axi_dmac *dmac;
struct resource *res;
struct regmap *regmap;
+ unsigned int version;
int ret;
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
@@ -852,23 +930,22 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->clk))
return PTR_ERR(dmac->clk);
- INIT_LIST_HEAD(&dmac->chan.active_descs);
+ ret = clk_prepare_enable(dmac->clk);
+ if (ret < 0)
+ return ret;
- of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
- if (of_channels == NULL)
- return -ENODEV;
+ version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
- for_each_child_of_node(of_channels, of_chan) {
- ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
- if (ret) {
- of_node_put(of_chan);
- of_node_put(of_channels);
- return -EINVAL;
- }
- }
- of_node_put(of_channels);
+ if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
+ ret = axi_dmac_read_chan_config(&pdev->dev, dmac);
+ else
+ ret = axi_dmac_parse_dt(&pdev->dev, dmac);
+
+ if (ret < 0)
+ goto err_clk_disable;
+
+ INIT_LIST_HEAD(&dmac->chan.active_descs);
- pdev->dev.dma_parms = &dmac->dma_parms;
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
dma_dev = &dmac->dma_dev;
@@ -894,11 +971,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
dmac->chan.vchan.desc_free = axi_dmac_desc_free;
vchan_init(&dmac->chan.vchan, dma_dev);
- ret = clk_prepare_enable(dmac->clk);
- if (ret < 0)
- return ret;
-
- ret = axi_dmac_detect_caps(dmac);
+ ret = axi_dmac_detect_caps(dmac, version);
if (ret)
goto err_clk_disable;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 8beed91428bd..a608efaa435f 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -639,11 +639,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
unsigned long flags;
unsigned long residue = 0;
+ spin_lock_irqsave(&jzchan->vchan.lock, flags);
+
status = dma_cookie_status(chan, cookie, txstate);
if ((status == DMA_COMPLETE) || (txstate == NULL))
- return status;
-
- spin_lock_irqsave(&jzchan->vchan.lock, flags);
+ goto out_unlock_irqrestore;
vdesc = vchan_find_desc(&jzchan->vchan, cookie);
if (vdesc) {
@@ -660,6 +660,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
status = DMA_ERROR;
+out_unlock_irqrestore:
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
return status;
}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a53e71d2bbd4..7974fa0400d8 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -847,8 +847,10 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
}
mutex_unlock(&dma_list_mutex);
- if (IS_ERR_OR_NULL(chan))
- return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(chan))
+ return chan;
+ if (!chan)
+ return ERR_PTR(-EPROBE_DEFER);
found:
#ifdef CONFIG_DEBUG_FS
@@ -872,24 +874,6 @@ found:
EXPORT_SYMBOL_GPL(dma_request_chan);
/**
- * dma_request_slave_channel - try to allocate an exclusive slave channel
- * @dev: pointer to client device structure
- * @name: slave channel name
- *
- * Returns pointer to appropriate DMA channel on success or NULL.
- */
-struct dma_chan *dma_request_slave_channel(struct device *dev,
- const char *name)
-{
- struct dma_chan *ch = dma_request_chan(dev, name);
- if (IS_ERR(ch))
- return NULL;
-
- return ch;
-}
-EXPORT_SYMBOL_GPL(dma_request_slave_channel);
-
-/**
* dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
* @mask: capabilities that the channel must satisfy
*
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a819611b8892..a3a172173e34 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -7,6 +7,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/err.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -454,8 +455,13 @@ static unsigned int min_odd(unsigned int x, unsigned int y)
static void result(const char *err, unsigned int n, unsigned int src_off,
unsigned int dst_off, unsigned int len, unsigned long data)
{
- pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
- current->comm, n, err, src_off, dst_off, len, data);
+ if (IS_ERR_VALUE(data)) {
+ pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%ld)\n",
+ current->comm, n, err, src_off, dst_off, len, data);
+ } else {
+ pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
+ current->comm, n, err, src_off, dst_off, len, data);
+ }
}
static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
@@ -1052,13 +1058,7 @@ static int dmatest_add_channel(struct dmatest_info *info,
static bool filter(struct dma_chan *chan, void *param)
{
- struct dmatest_params *params = param;
-
- if (!dmatest_match_channel(params, chan) ||
- !dmatest_match_device(params, chan->device))
- return false;
- else
- return true;
+ return dmatest_match_channel(param, chan) && dmatest_match_device(param, chan->device);
}
static void request_channels(struct dmatest_info *info,
@@ -1249,15 +1249,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
add_threaded_test(info);
/* Check if channel was added successfully */
- dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
-
- if (dtc->chan) {
+ if (!list_empty(&info->channels)) {
/*
* if new channel was not successfully added, revert the
* "test_channel" string to the name of the last successfully
* added channel. exception for when users issues empty string
* to channel parameter.
*/
+ dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
&& (strcmp("", strim(test_channel)) != 0)) {
ret = -EINVAL;
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 42739508c0d8..6f62711a4c94 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -293,7 +293,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!regs)
return;
- base_dir = debugfs_create_dir(dw->name, 0);
+ base_dir = debugfs_create_dir(dw->name, NULL);
if (!base_dir)
return;
diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h
index cd6476884507..dfd70e223c2f 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-regs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h
@@ -40,7 +40,7 @@ struct dw_edma_v0_ch {
struct dw_edma_v0_ch_regs wr; /* 0x200 */
u32 padding_1[55]; /* [0x224..0x2fc] */
struct dw_edma_v0_ch_regs rd; /* 0x300 */
- u32 padding_2[55]; /* [0x224..0x2fc] */
+ u32 padding_2[55]; /* [0x324..0x3fc] */
};
struct dw_edma_v0_unroll {
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 4700f2e87a62..7ab83fe601ed 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -463,9 +463,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, bad_desc, true);
}
-static void dw_dma_tasklet(unsigned long data)
+static void dw_dma_tasklet(struct tasklet_struct *t)
{
- struct dw_dma *dw = (struct dw_dma *)data;
+ struct dw_dma *dw = from_tasklet(dw, t, tasklet);
struct dw_dma_chan *dwc;
u32 status_xfer;
u32 status_err;
@@ -723,7 +723,7 @@ slave_sg_fromdev_fill_desc:
lli_write(desc, sar, reg);
lli_write(desc, dar, mem);
lli_write(desc, ctlhi, ctlhi);
- mem_width = __ffs(data_width | mem | dlen);
+ mem_width = __ffs(data_width | mem);
lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
desc->len = dlen;
@@ -772,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
if (dws->dma_dev != chan->device->dev)
return false;
+ /* permit channels in accordance with the channels mask */
+ if (dws->channels && !(dws->channels & dwc->mask))
+ return false;
+
/* We have to copy data since dws can be temporary storage */
memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
@@ -1138,7 +1142,7 @@ int do_dma_probe(struct dw_dma_chip *chip)
goto err_pdata;
}
- tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
+ tasklet_setup(&dw->tasklet, dw_dma_tasklet);
err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
dw->name, dw);
diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
index 7a085b3c1854..a4862263ff14 100644
--- a/drivers/dma/dw/dw.c
+++ b/drivers/dma/dw/dw.c
@@ -14,7 +14,7 @@
static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- u32 cfghi = DWC_CFGH_FIFO_MODE;
+ u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
bool hs_polarity = dwc->dws.hs_polarity;
@@ -67,9 +67,8 @@ static size_t dw_dma_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc)
{
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
- bool is_slave = is_slave_direction(dwc->direction);
- u8 smsize = is_slave ? sconfig->src_maxburst : DW_DMA_MSIZE_16;
- u8 dmsize = is_slave ? sconfig->dst_maxburst : DW_DMA_MSIZE_16;
+ u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0;
+ u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0;
u8 p_master = dwc->dws.p_master;
u8 m_master = dwc->dws.m_master;
u8 dms = (dwc->direction == DMA_MEM_TO_DEV) ? p_master : m_master;
diff --git a/drivers/dma/dw/idma32.c b/drivers/dma/dw/idma32.c
index f00657308811..3ce44de25d33 100644
--- a/drivers/dma/dw/idma32.c
+++ b/drivers/dma/dw/idma32.c
@@ -73,9 +73,8 @@ static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc)
{
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
- bool is_slave = is_slave_direction(dwc->direction);
- u8 smsize = is_slave ? sconfig->src_maxburst : IDMA32_MSIZE_8;
- u8 dmsize = is_slave ? sconfig->dst_maxburst : IDMA32_MSIZE_8;
+ u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0;
+ u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0;
return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize);
diff --git a/drivers/dma/dw/of.c b/drivers/dma/dw/of.c
index 1474b3817ef4..c1cf7675b9d1 100644
--- a/drivers/dma/dw/of.c
+++ b/drivers/dma/dw/of.c
@@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
};
dma_cap_mask_t cap;
- if (dma_spec->args_count != 3)
+ if (dma_spec->args_count < 3 || dma_spec->args_count > 4)
return NULL;
slave.src_id = dma_spec->args[0];
slave.dst_id = dma_spec->args[0];
slave.m_master = dma_spec->args[1];
slave.p_master = dma_spec->args[2];
+ if (dma_spec->args_count >= 4)
+ slave.channels = dma_spec->args[3];
if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
slave.m_master >= dw->pdata->nr_masters ||
- slave.p_master >= dw->pdata->nr_masters))
+ slave.p_master >= dw->pdata->nr_masters ||
+ slave.channels >= BIT(dw->pdata->nr_channels)))
return NULL;
dma_cap_zero(cap);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 87a246012629..01027779beb8 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -745,9 +745,9 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
spin_unlock_irqrestore(&edmac->lock, flags);
}
-static void ep93xx_dma_tasklet(unsigned long data)
+static void ep93xx_dma_tasklet(struct tasklet_struct *t)
{
- struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
+ struct ep93xx_dma_chan *edmac = from_tasklet(edmac, t, tasklet);
struct ep93xx_dma_desc *desc, *d;
struct dmaengine_desc_callback cb;
LIST_HEAD(list);
@@ -1353,8 +1353,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&edmac->active);
INIT_LIST_HEAD(&edmac->queue);
INIT_LIST_HEAD(&edmac->free_list);
- tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
- (unsigned long)edmac);
+ tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet);
list_add_tail(&edmac->chan.device_node,
&dma_dev->channels);
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 493dc6c59d1d..fdf3500d96a9 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -154,17 +154,15 @@ static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)
fsl_re_issue_pending(&re_chan->chan);
}
-static void fsl_re_dequeue(unsigned long data)
+static void fsl_re_dequeue(struct tasklet_struct *t)
{
- struct fsl_re_chan *re_chan;
+ struct fsl_re_chan *re_chan = from_tasklet(re_chan, t, irqtask);
struct fsl_re_desc *desc, *_desc;
struct fsl_re_hw_desc *hwdesc;
unsigned long flags;
unsigned int count, oub_count;
int found;
- re_chan = dev_get_drvdata((struct device *)data);
-
fsl_re_cleanup_descs(re_chan);
spin_lock_irqsave(&re_chan->desc_lock, flags);
@@ -671,7 +669,7 @@ static int fsl_re_chan_probe(struct platform_device *ofdev,
snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q);
chandev = &chan_ofdev->dev;
- tasklet_init(&chan->irqtask, fsl_re_dequeue, (unsigned long)chandev);
+ tasklet_setup(&chan->irqtask, fsl_re_dequeue);
ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev);
if (ret) {
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e342cf52d296..0feb323bae1e 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -976,9 +976,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static void dma_do_tasklet(unsigned long data)
+static void dma_do_tasklet(struct tasklet_struct *t)
{
- struct fsldma_chan *chan = (struct fsldma_chan *)data;
+ struct fsldma_chan *chan = from_tasklet(chan, t, tasklet);
chan_dbg(chan, "tasklet entry\n");
@@ -1151,7 +1151,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
}
fdev->chan[chan->id] = chan;
- tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
+ tasklet_setup(&chan->tasklet, dma_do_tasklet);
snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
/* Initialize the channel */
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index b75d699160bf..200b9109cacf 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -368,6 +368,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
__func__, cmd_code, operand);
+ idxd->cmd_status = 0;
__set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
idxd->cmd_done = &done;
iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
@@ -379,8 +380,11 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
spin_unlock_irqrestore(&idxd->dev_lock, flags);
wait_for_completion(&done);
spin_lock_irqsave(&idxd->dev_lock, flags);
- if (status)
+ if (status) {
*status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ idxd->cmd_status = *status & GENMASK(7, 0);
+ }
+
__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
/* Wake up other pending commands */
wake_up(&idxd->cmd_waitq);
@@ -555,8 +559,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg.priority = wq->priority;
/* bytes 12-15 */
- wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
- wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
+ wq->wqcfg.max_xfer_shift = ilog2(wq->max_xfer_bytes);
+ wq->wqcfg.max_batch_shift = ilog2(wq->max_batch_size);
dev_dbg(dev, "WQ %d CFGs\n", wq->id);
for (i = 0; i < 8; i++) {
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index e62b4799d189..c64df197e724 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -114,6 +114,8 @@ struct idxd_wq {
struct sbitmap_queue sbq;
struct dma_chan dma_chan;
char name[WQ_NAME_SIZE + 1];
+ u64 max_xfer_bytes;
+ u32 max_batch_size;
};
struct idxd_engine {
@@ -154,6 +156,7 @@ struct idxd_device {
unsigned long flags;
int id;
int major;
+ u8 cmd_status;
struct pci_dev *pdev;
void __iomem *reg_base;
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index c7c61974f20f..11e5ce168177 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -176,6 +176,8 @@ static int idxd_setup_internals(struct idxd_device *idxd)
wq->idxd = idxd;
mutex_init(&wq->wq_lock);
wq->idxd_cdev.minor = -1;
+ wq->max_xfer_bytes = idxd->max_xfer_bytes;
+ wq->max_batch_size = idxd->max_batch_size;
}
for (i = 0; i < idxd->max_engines; i++) {
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 1e9e6991f543..17a65a13fb64 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -64,6 +64,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
bool err = false;
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (cause & IDXD_INTC_ERR) {
spin_lock_bh(&idxd->dev_lock);
@@ -121,7 +122,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
val);
- iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (!err)
goto out;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index dcba60953217..07a5db06a29a 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1064,6 +1064,89 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
static struct device_attribute dev_attr_wq_cdev_minor =
__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
+static int __get_sysfs_u64(const char *buf, u64 *val)
+{
+ int rc;
+
+ rc = kstrtou64(buf, 0, val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (*val == 0)
+ return -EINVAL;
+
+ *val = roundup_pow_of_two(*val);
+ return 0;
+}
+
+static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
+}
+
+static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ u64 xfer_size;
+ int rc;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ rc = __get_sysfs_u64(buf, &xfer_size);
+ if (rc < 0)
+ return rc;
+
+ if (xfer_size > idxd->max_xfer_bytes)
+ return -EINVAL;
+
+ wq->max_xfer_bytes = xfer_size;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_max_transfer_size =
+ __ATTR(max_transfer_size, 0644,
+ wq_max_transfer_size_show, wq_max_transfer_size_store);
+
+static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->max_batch_size);
+}
+
+static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ u64 batch_size;
+ int rc;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ rc = __get_sysfs_u64(buf, &batch_size);
+ if (rc < 0)
+ return rc;
+
+ if (batch_size > idxd->max_batch_size)
+ return -EINVAL;
+
+ wq->max_batch_size = (u32)batch_size;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_max_batch_size =
+ __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -1074,6 +1157,8 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_type.attr,
&dev_attr_wq_name.attr,
&dev_attr_wq_cdev_minor.attr,
+ &dev_attr_wq_max_transfer_size.attr,
+ &dev_attr_wq_max_batch_size.attr,
NULL,
};
@@ -1317,6 +1402,15 @@ static ssize_t cdev_major_show(struct device *dev,
}
static DEVICE_ATTR_RO(cdev_major);
+static ssize_t cmd_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%#x\n", idxd->cmd_status);
+}
+static DEVICE_ATTR_RO(cmd_status);
+
static struct attribute *idxd_device_attributes[] = {
&dev_attr_version.attr,
&dev_attr_max_groups.attr,
@@ -1335,6 +1429,7 @@ static struct attribute *idxd_device_attributes[] = {
&dev_attr_max_tokens.attr,
&dev_attr_token_limit.attr,
&dev_attr_cdev_major.attr,
+ &dev_attr_cmd_status.attr,
NULL,
};
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 88717506c1f6..670db04b0757 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -173,7 +173,6 @@ enum imx_dma_type {
struct imxdma_engine {
struct device *dev;
- struct device_dma_parameters dma_parms;
struct dma_device dma_device;
void __iomem *base;
struct clk *dma_ahb;
@@ -613,9 +612,9 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
return 0;
}
-static void imxdma_tasklet(unsigned long data)
+static void imxdma_tasklet(struct tasklet_struct *t)
{
- struct imxdma_channel *imxdmac = (void *)data;
+ struct imxdma_channel *imxdmac = from_tasklet(imxdmac, t, dma_tasklet);
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc, *next_desc;
unsigned long flags;
@@ -1169,8 +1168,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&imxdmac->ld_free);
INIT_LIST_HEAD(&imxdmac->ld_active);
- tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
- (unsigned long)imxdmac);
+ tasklet_setup(&imxdmac->dma_tasklet, imxdma_tasklet);
imxdmac->chan.device = &imxdma->dma_device;
dma_cookie_init(&imxdmac->chan);
imxdmac->channel = i;
@@ -1196,7 +1194,6 @@ static int __init imxdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imxdma);
imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
- imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
ret = dma_async_device_register(&imxdma->dma_device);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 4f8d8f5e1132..16b908c77db3 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -426,7 +426,6 @@ struct sdma_driver_data {
struct sdma_engine {
struct device *dev;
- struct device_dma_parameters dma_parms;
struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control;
void __iomem *regs;
@@ -2118,7 +2117,6 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
- sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
sdma->dma_device.copy_align = 2;
dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index a814b200299b..37ff4ec7db76 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -26,11 +26,11 @@
#include "../dmaengine.h"
-int completion_timeout = 200;
+static int completion_timeout = 200;
module_param(completion_timeout, int, 0644);
MODULE_PARM_DESC(completion_timeout,
"set ioat completion timeout [msec] (default 200 [msec])");
-int idle_timeout = 2000;
+static int idle_timeout = 2000;
module_param(idle_timeout, int, 0644);
MODULE_PARM_DESC(idle_timeout,
"set ioat idel timeout [msec] (default 2000 [msec])");
@@ -165,7 +165,7 @@ void ioat_stop(struct ioatdma_chan *ioat_chan)
tasklet_kill(&ioat_chan->cleanup_task);
/* final cleanup now that everything is quiesced and can't re-arm */
- ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
+ ioat_cleanup_event(&ioat_chan->cleanup_task);
}
static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
@@ -389,7 +389,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
struct ioat_descs *descs = &ioat_chan->descs[i];
descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
- SZ_2M, &descs->hw, flags);
+ IOAT_CHUNK_SIZE, &descs->hw, flags);
if (!descs->virt) {
int idx;
@@ -690,9 +690,9 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
spin_unlock_bh(&ioat_chan->cleanup_lock);
}
-void ioat_cleanup_event(unsigned long data)
+void ioat_cleanup_event(struct tasklet_struct *t)
{
- struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
+ struct ioatdma_chan *ioat_chan = from_tasklet(ioat_chan, t, cleanup_task);
ioat_cleanup(ioat_chan);
if (!test_bit(IOAT_RUN, &ioat_chan->state))
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index f7f31fdf14cf..140cfe3782fb 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -393,7 +393,7 @@ int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
enum dma_status
ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate);
-void ioat_cleanup_event(unsigned long data);
+void ioat_cleanup_event(struct tasklet_struct *t);
void ioat_timer_event(struct timer_list *t);
int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
void ioat_issue_pending(struct dma_chan *chan);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 8a53f5c96b16..191b59279007 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -767,8 +767,6 @@ ioat_init_channel(struct ioatdma_device *ioat_dma,
struct ioatdma_chan *ioat_chan, int idx)
{
struct dma_device *dma = &ioat_dma->dma_dev;
- struct dma_chan *c = &ioat_chan->dma_chan;
- unsigned long data = (unsigned long) c;
ioat_chan->ioat_dma = ioat_dma;
ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
@@ -778,7 +776,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma,
list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
ioat_dma->idx[idx] = ioat_chan;
timer_setup(&ioat_chan->timer, ioat_timer_event, 0);
- tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
+ tasklet_setup(&ioat_chan->cleanup_task, ioat_cleanup_event);
}
#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 3350bffb2e93..310b899d581f 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -238,9 +238,10 @@ iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
spin_unlock_bh(&iop_chan->lock);
}
-static void iop_adma_tasklet(unsigned long data)
+static void iop_adma_tasklet(struct tasklet_struct *t)
{
- struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
+ struct iop_adma_chan *iop_chan = from_tasklet(iop_chan, t,
+ irq_tasklet);
/* lockdep will flag depedency submissions as potentially
* recursive locking, this is not the case as a dependency
@@ -416,6 +417,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
{
char *hw_desc;
+ dma_addr_t dma_desc;
int idx;
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *slot = NULL;
@@ -444,9 +446,8 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
INIT_LIST_HEAD(&slot->tx_list);
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
- hw_desc = (char *) iop_chan->device->dma_desc_pool;
- slot->async_tx.phys =
- (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
+ dma_desc = iop_chan->device->dma_desc_pool;
+ slot->async_tx.phys = dma_desc + idx * IOP_ADMA_SLOT_SIZE;
slot->idx = idx;
spin_lock_bh(&iop_chan->lock);
@@ -1296,9 +1297,8 @@ static int iop_adma_probe(struct platform_device *pdev)
goto err_free_adev;
}
- dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %p\n",
- __func__, adev->dma_desc_pool_virt,
- (void *) adev->dma_desc_pool);
+ dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %pad\n",
+ __func__, adev->dma_desc_pool_virt, &adev->dma_desc_pool);
adev->id = plat_data->hw_id;
@@ -1351,8 +1351,7 @@ static int iop_adma_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_free_iop_chan;
}
- tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
- iop_chan);
+ tasklet_setup(&iop_chan->irq_tasklet, iop_adma_tasklet);
/* clear errors before enabling interrupts */
iop_adma_device_clear_err_status(iop_chan);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 0457b1f26540..38036db284cb 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1299,9 +1299,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void ipu_gc_tasklet(unsigned long arg)
+static void ipu_gc_tasklet(struct tasklet_struct *t)
{
- struct ipu *ipu = (struct ipu *)arg;
+ struct ipu *ipu = from_tasklet(ipu, t, tasklet);
int i;
for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1740,7 +1740,7 @@ static int __init ipu_probe(struct platform_device *pdev)
if (ret < 0)
goto err_idmac_init;
- tasklet_init(&ipu_data.tasklet, ipu_gc_tasklet, (unsigned long)&ipu_data);
+ tasklet_setup(&ipu_data.tasklet, ipu_gc_tasklet);
ipu_data.dev = &pdev->dev;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index c5c1aa0dcaed..f609a84c493c 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -297,9 +297,9 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
return -EAGAIN;
}
-static void k3_dma_tasklet(unsigned long arg)
+static void k3_dma_tasklet(struct tasklet_struct *t)
{
- struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
+ struct k3_dma_dev *d = from_tasklet(d, t, task);
struct k3_dma_phy *p;
struct k3_dma_chan *c, *cn;
unsigned pch, pch_alloc = 0;
@@ -962,7 +962,7 @@ static int k3_dma_probe(struct platform_device *op)
spin_lock_init(&d->lock);
INIT_LIST_HEAD(&d->chan_pending);
- tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
+ tasklet_setup(&d->task, k3_dma_tasklet);
platform_set_drvdata(op, d);
dev_info(&op->dev, "initialized\n");
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 6bf838e63be1..41ef9f15d3d5 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -356,9 +356,9 @@ static struct mtk_cqdma_vdesc
return ret;
}
-static void mtk_cqdma_tasklet_cb(unsigned long data)
+static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t)
{
- struct mtk_cqdma_pchan *pc = (struct mtk_cqdma_pchan *)data;
+ struct mtk_cqdma_pchan *pc = from_tasklet(pc, t, tasklet);
struct mtk_cqdma_vdesc *cvd = NULL;
unsigned long flags;
@@ -878,8 +878,7 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
/* initialize tasklet for each PC */
for (i = 0; i < cqdma->dma_channels; ++i)
- tasklet_init(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb,
- (unsigned long)cqdma->pc[i]);
+ tasklet_setup(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb);
dev_info(&pdev->dev, "MediaTek CQDMA driver registered\n");
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 29f1223b285a..27c07350971d 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -624,14 +624,9 @@ static int mtk_uart_apdma_runtime_suspend(struct device *dev)
static int mtk_uart_apdma_runtime_resume(struct device *dev)
{
- int ret;
struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
- ret = clk_prepare_enable(mtkd->clk);
- if (ret)
- return ret;
-
- return 0;
+ return clk_prepare_enable(mtkd->clk);
}
#endif /* CONFIG_PM */
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
deleted file mode 100644
index fea8608a7810..000000000000
--- a/drivers/dma/mic_x100_dma.c
+++ /dev/null
@@ -1,770 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel MIC X100 DMA Driver.
- *
- * Adapted from IOAT dma driver.
- */
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/seq_file.h>
-#include <linux/vmalloc.h>
-
-#include "mic_x100_dma.h"
-
-#define MIC_DMA_MAX_XFER_SIZE_CARD (1 * 1024 * 1024 -\
- MIC_DMA_ALIGN_BYTES)
-#define MIC_DMA_MAX_XFER_SIZE_HOST (1 * 1024 * 1024 >> 1)
-#define MIC_DMA_DESC_TYPE_SHIFT 60
-#define MIC_DMA_MEMCPY_LEN_SHIFT 46
-#define MIC_DMA_STAT_INTR_SHIFT 59
-
-/* high-water mark for pushing dma descriptors */
-static int mic_dma_pending_level = 4;
-
-/* Status descriptor is used to write a 64 bit value to a memory location */
-enum mic_dma_desc_format_type {
- MIC_DMA_MEMCPY = 1,
- MIC_DMA_STATUS,
-};
-
-static inline u32 mic_dma_hw_ring_inc(u32 val)
-{
- return (val + 1) % MIC_DMA_DESC_RX_SIZE;
-}
-
-static inline u32 mic_dma_hw_ring_dec(u32 val)
-{
- return val ? val - 1 : MIC_DMA_DESC_RX_SIZE - 1;
-}
-
-static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan *ch)
-{
- ch->head = mic_dma_hw_ring_inc(ch->head);
-}
-
-/* Prepare a memcpy desc */
-static inline void mic_dma_memcpy_desc(struct mic_dma_desc *desc,
- dma_addr_t src_phys, dma_addr_t dst_phys, u64 size)
-{
- u64 qw0, qw1;
-
- qw0 = src_phys;
- qw0 |= (size >> MIC_DMA_ALIGN_SHIFT) << MIC_DMA_MEMCPY_LEN_SHIFT;
- qw1 = MIC_DMA_MEMCPY;
- qw1 <<= MIC_DMA_DESC_TYPE_SHIFT;
- qw1 |= dst_phys;
- desc->qw0 = qw0;
- desc->qw1 = qw1;
-}
-
-/* Prepare a status desc. with @data to be written at @dst_phys */
-static inline void mic_dma_prep_status_desc(struct mic_dma_desc *desc, u64 data,
- dma_addr_t dst_phys, bool generate_intr)
-{
- u64 qw0, qw1;
-
- qw0 = data;
- qw1 = (u64) MIC_DMA_STATUS << MIC_DMA_DESC_TYPE_SHIFT | dst_phys;
- if (generate_intr)
- qw1 |= (1ULL << MIC_DMA_STAT_INTR_SHIFT);
- desc->qw0 = qw0;
- desc->qw1 = qw1;
-}
-
-static void mic_dma_cleanup(struct mic_dma_chan *ch)
-{
- struct dma_async_tx_descriptor *tx;
- u32 tail;
- u32 last_tail;
-
- spin_lock(&ch->cleanup_lock);
- tail = mic_dma_read_cmp_cnt(ch);
- /*
- * This is the barrier pair for smp_wmb() in fn.
- * mic_dma_tx_submit_unlock. It's required so that we read the
- * updated cookie value from tx->cookie.
- */
- smp_rmb();
- for (last_tail = ch->last_tail; tail != last_tail;) {
- tx = &ch->tx_array[last_tail];
- if (tx->cookie) {
- dma_cookie_complete(tx);
- dmaengine_desc_get_callback_invoke(tx, NULL);
- tx->callback = NULL;
- }
- last_tail = mic_dma_hw_ring_inc(last_tail);
- }
- /* finish all completion callbacks before incrementing tail */
- smp_mb();
- ch->last_tail = last_tail;
- spin_unlock(&ch->cleanup_lock);
-}
-
-static u32 mic_dma_ring_count(u32 head, u32 tail)
-{
- u32 count;
-
- if (head >= tail)
- count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head);
- else
- count = tail - head;
- return count - 1;
-}
-
-/* Returns the num. of free descriptors on success, -ENOMEM on failure */
-static int mic_dma_avail_desc_ring_space(struct mic_dma_chan *ch, int required)
-{
- struct device *dev = mic_dma_ch_to_device(ch);
- u32 count;
-
- count = mic_dma_ring_count(ch->head, ch->last_tail);
- if (count < required) {
- mic_dma_cleanup(ch);
- count = mic_dma_ring_count(ch->head, ch->last_tail);
- }
-
- if (count < required) {
- dev_dbg(dev, "Not enough desc space");
- dev_dbg(dev, "%s %d required=%u, avail=%u\n",
- __func__, __LINE__, required, count);
- return -ENOMEM;
- } else {
- return count;
- }
-}
-
-/* Program memcpy descriptors into the descriptor ring and update s/w head ptr*/
-static int mic_dma_prog_memcpy_desc(struct mic_dma_chan *ch, dma_addr_t src,
- dma_addr_t dst, size_t len)
-{
- size_t current_transfer_len;
- size_t max_xfer_size = to_mic_dma_dev(ch)->max_xfer_size;
- /* 3 is added to make sure we have enough space for status desc */
- int num_desc = len / max_xfer_size + 3;
- int ret;
-
- if (len % max_xfer_size)
- num_desc++;
-
- ret = mic_dma_avail_desc_ring_space(ch, num_desc);
- if (ret < 0)
- return ret;
- do {
- current_transfer_len = min(len, max_xfer_size);
- mic_dma_memcpy_desc(&ch->desc_ring[ch->head],
- src, dst, current_transfer_len);
- mic_dma_hw_ring_inc_head(ch);
- len -= current_transfer_len;
- dst = dst + current_transfer_len;
- src = src + current_transfer_len;
- } while (len > 0);
- return 0;
-}
-
-/* It's a h/w quirk and h/w needs 2 status descriptors for every status desc */
-static void mic_dma_prog_intr(struct mic_dma_chan *ch)
-{
- mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
- ch->status_dest_micpa, false);
- mic_dma_hw_ring_inc_head(ch);
- mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
- ch->status_dest_micpa, true);
- mic_dma_hw_ring_inc_head(ch);
-}
-
-/* Wrapper function to program memcpy descriptors/status descriptors */
-static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src,
- dma_addr_t dst, size_t len)
-{
- if (len && -ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) {
- return -ENOMEM;
- } else {
- /* 3 is the maximum number of status descriptors */
- int ret = mic_dma_avail_desc_ring_space(ch, 3);
-
- if (ret < 0)
- return ret;
- }
-
- /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */
- if (flags & DMA_PREP_FENCE) {
- mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0,
- ch->status_dest_micpa, false);
- mic_dma_hw_ring_inc_head(ch);
- }
-
- if (flags & DMA_PREP_INTERRUPT)
- mic_dma_prog_intr(ch);
-
- return 0;
-}
-
-static inline void mic_dma_issue_pending(struct dma_chan *ch)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
-
- spin_lock(&mic_ch->issue_lock);
- /*
- * Write to head triggers h/w to act on the descriptors.
- * On MIC, writing the same head value twice causes
- * a h/w error. On second write, h/w assumes we filled
- * the entire ring & overwrote some of the descriptors.
- */
- if (mic_ch->issued == mic_ch->submitted)
- goto out;
- mic_ch->issued = mic_ch->submitted;
- /*
- * make descriptor updates visible before advancing head,
- * this is purposefully not smp_wmb() since we are also
- * publishing the descriptor updates to a dma device
- */
- wmb();
- mic_dma_write_reg(mic_ch, MIC_DMA_REG_DHPR, mic_ch->issued);
-out:
- spin_unlock(&mic_ch->issue_lock);
-}
-
-static inline void mic_dma_update_pending(struct mic_dma_chan *ch)
-{
- if (mic_dma_ring_count(ch->issued, ch->submitted)
- > mic_dma_pending_level)
- mic_dma_issue_pending(&ch->api_ch);
-}
-
-static dma_cookie_t mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(tx->chan);
- dma_cookie_t cookie;
-
- dma_cookie_assign(tx);
- cookie = tx->cookie;
- /*
- * We need an smp write barrier here because another CPU might see
- * an update to submitted and update h/w head even before we
- * assigned a cookie to this tx.
- */
- smp_wmb();
- mic_ch->submitted = mic_ch->head;
- spin_unlock(&mic_ch->prep_lock);
- mic_dma_update_pending(mic_ch);
- return cookie;
-}
-
-static inline struct dma_async_tx_descriptor *
-allocate_tx(struct mic_dma_chan *ch)
-{
- u32 idx = mic_dma_hw_ring_dec(ch->head);
- struct dma_async_tx_descriptor *tx = &ch->tx_array[idx];
-
- dma_async_tx_descriptor_init(tx, &ch->api_ch);
- tx->tx_submit = mic_dma_tx_submit_unlock;
- return tx;
-}
-
-/* Program a status descriptor with dst as address and value to be written */
-static struct dma_async_tx_descriptor *
-mic_dma_prep_status_lock(struct dma_chan *ch, dma_addr_t dst, u64 src_val,
- unsigned long flags)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
- int result;
-
- spin_lock(&mic_ch->prep_lock);
- result = mic_dma_avail_desc_ring_space(mic_ch, 4);
- if (result < 0)
- goto error;
- mic_dma_prep_status_desc(&mic_ch->desc_ring[mic_ch->head], src_val, dst,
- false);
- mic_dma_hw_ring_inc_head(mic_ch);
- result = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
- if (result < 0)
- goto error;
-
- return allocate_tx(mic_ch);
-error:
- dev_err(mic_dma_ch_to_device(mic_ch),
- "Error enqueueing dma status descriptor, error=%d\n", result);
- spin_unlock(&mic_ch->prep_lock);
- return NULL;
-}
-
-/*
- * Prepare a memcpy descriptor to be added to the ring.
- * Note that the temporary descriptor adds an extra overhead of copying the
- * descriptor to ring. So, we copy directly to the descriptor ring
- */
-static struct dma_async_tx_descriptor *
-mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
- dma_addr_t dma_src, size_t len, unsigned long flags)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
- struct device *dev = mic_dma_ch_to_device(mic_ch);
- int result;
-
- if (!len && !flags)
- return NULL;
-
- spin_lock(&mic_ch->prep_lock);
- result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
- if (result >= 0)
- return allocate_tx(mic_ch);
- dev_err(dev, "Error enqueueing dma, error=%d\n", result);
- spin_unlock(&mic_ch->prep_lock);
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *
-mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
- int ret;
-
- spin_lock(&mic_ch->prep_lock);
- ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
- if (!ret)
- return allocate_tx(mic_ch);
- spin_unlock(&mic_ch->prep_lock);
- return NULL;
-}
-
-/* Return the status of the transaction */
-static enum dma_status
-mic_dma_tx_status(struct dma_chan *ch, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
-
- if (DMA_COMPLETE != dma_cookie_status(ch, cookie, txstate))
- mic_dma_cleanup(mic_ch);
-
- return dma_cookie_status(ch, cookie, txstate);
-}
-
-static irqreturn_t mic_dma_thread_fn(int irq, void *data)
-{
- mic_dma_cleanup((struct mic_dma_chan *)data);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mic_dma_intr_handler(int irq, void *data)
-{
- struct mic_dma_chan *ch = ((struct mic_dma_chan *)data);
-
- mic_dma_ack_interrupt(ch);
- return IRQ_WAKE_THREAD;
-}
-
-static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch)
-{
- u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
- struct device *dev = &to_mbus_device(ch)->dev;
-
- desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
- ch->desc_ring = kzalloc(desc_ring_size, GFP_KERNEL);
-
- if (!ch->desc_ring)
- return -ENOMEM;
-
- ch->desc_ring_micpa = dma_map_single(dev, ch->desc_ring,
- desc_ring_size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, ch->desc_ring_micpa))
- goto map_error;
-
- ch->tx_array = vzalloc(array_size(MIC_DMA_DESC_RX_SIZE,
- sizeof(*ch->tx_array)));
- if (!ch->tx_array)
- goto tx_error;
- return 0;
-tx_error:
- dma_unmap_single(dev, ch->desc_ring_micpa, desc_ring_size,
- DMA_BIDIRECTIONAL);
-map_error:
- kfree(ch->desc_ring);
- return -ENOMEM;
-}
-
-static void mic_dma_free_desc_ring(struct mic_dma_chan *ch)
-{
- u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring);
-
- vfree(ch->tx_array);
- desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES);
- dma_unmap_single(&to_mbus_device(ch)->dev, ch->desc_ring_micpa,
- desc_ring_size, DMA_BIDIRECTIONAL);
- kfree(ch->desc_ring);
- ch->desc_ring = NULL;
-}
-
-static void mic_dma_free_status_dest(struct mic_dma_chan *ch)
-{
- dma_unmap_single(&to_mbus_device(ch)->dev, ch->status_dest_micpa,
- L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
- kfree(ch->status_dest);
-}
-
-static int mic_dma_alloc_status_dest(struct mic_dma_chan *ch)
-{
- struct device *dev = &to_mbus_device(ch)->dev;
-
- ch->status_dest = kzalloc(L1_CACHE_BYTES, GFP_KERNEL);
- if (!ch->status_dest)
- return -ENOMEM;
- ch->status_dest_micpa = dma_map_single(dev, ch->status_dest,
- L1_CACHE_BYTES, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, ch->status_dest_micpa)) {
- kfree(ch->status_dest);
- ch->status_dest = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-static int mic_dma_check_chan(struct mic_dma_chan *ch)
-{
- if (mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR) ||
- mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & MIC_DMA_CHAN_QUIESCE) {
- mic_dma_disable_chan(ch);
- mic_dma_chan_mask_intr(ch);
- dev_err(mic_dma_ch_to_device(ch),
- "%s %d error setting up mic dma chan %d\n",
- __func__, __LINE__, ch->ch_num);
- return -EBUSY;
- }
- return 0;
-}
-
-static int mic_dma_chan_setup(struct mic_dma_chan *ch)
-{
- if (MIC_DMA_CHAN_MIC == ch->owner)
- mic_dma_chan_set_owner(ch);
- mic_dma_disable_chan(ch);
- mic_dma_chan_mask_intr(ch);
- mic_dma_write_reg(ch, MIC_DMA_REG_DCHERRMSK, 0);
- mic_dma_chan_set_desc_ring(ch);
- ch->last_tail = mic_dma_read_reg(ch, MIC_DMA_REG_DTPR);
- ch->head = ch->last_tail;
- ch->issued = 0;
- mic_dma_chan_unmask_intr(ch);
- mic_dma_enable_chan(ch);
- return mic_dma_check_chan(ch);
-}
-
-static void mic_dma_chan_destroy(struct mic_dma_chan *ch)
-{
- mic_dma_disable_chan(ch);
- mic_dma_chan_mask_intr(ch);
-}
-
-static int mic_dma_setup_irq(struct mic_dma_chan *ch)
-{
- ch->cookie =
- to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch),
- mic_dma_intr_handler, mic_dma_thread_fn,
- "mic dma_channel", ch, ch->ch_num);
- return PTR_ERR_OR_ZERO(ch->cookie);
-}
-
-static inline void mic_dma_free_irq(struct mic_dma_chan *ch)
-{
- to_mbus_hw_ops(ch)->free_irq(to_mbus_device(ch), ch->cookie, ch);
-}
-
-static int mic_dma_chan_init(struct mic_dma_chan *ch)
-{
- int ret = mic_dma_alloc_desc_ring(ch);
-
- if (ret)
- goto ring_error;
- ret = mic_dma_alloc_status_dest(ch);
- if (ret)
- goto status_error;
- ret = mic_dma_chan_setup(ch);
- if (ret)
- goto chan_error;
- return ret;
-chan_error:
- mic_dma_free_status_dest(ch);
-status_error:
- mic_dma_free_desc_ring(ch);
-ring_error:
- return ret;
-}
-
-static int mic_dma_drain_chan(struct mic_dma_chan *ch)
-{
- struct dma_async_tx_descriptor *tx;
- int err = 0;
- dma_cookie_t cookie;
-
- tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE);
- if (!tx) {
- err = -ENOMEM;
- goto error;
- }
-
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie))
- err = -ENOMEM;
- else
- err = dma_sync_wait(&ch->api_ch, cookie);
- if (err) {
- dev_err(mic_dma_ch_to_device(ch), "%s %d TO chan 0x%x\n",
- __func__, __LINE__, ch->ch_num);
- err = -EIO;
- }
-error:
- mic_dma_cleanup(ch);
- return err;
-}
-
-static inline void mic_dma_chan_uninit(struct mic_dma_chan *ch)
-{
- mic_dma_chan_destroy(ch);
- mic_dma_cleanup(ch);
- mic_dma_free_status_dest(ch);
- mic_dma_free_desc_ring(ch);
-}
-
-static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
- enum mic_dma_chan_owner owner)
-{
- int i, first_chan = mic_dma_dev->start_ch;
- struct mic_dma_chan *ch;
- int ret;
-
- for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- ch = &mic_dma_dev->mic_ch[i];
- ch->ch_num = i;
- ch->owner = owner;
- spin_lock_init(&ch->cleanup_lock);
- spin_lock_init(&ch->prep_lock);
- spin_lock_init(&ch->issue_lock);
- ret = mic_dma_setup_irq(ch);
- if (ret)
- goto error;
- }
- return 0;
-error:
- for (i = i - 1; i >= first_chan; i--)
- mic_dma_free_irq(ch);
- return ret;
-}
-
-static void mic_dma_uninit(struct mic_dma_device *mic_dma_dev)
-{
- int i, first_chan = mic_dma_dev->start_ch;
- struct mic_dma_chan *ch;
-
- for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- ch = &mic_dma_dev->mic_ch[i];
- mic_dma_free_irq(ch);
- }
-}
-
-static int mic_dma_alloc_chan_resources(struct dma_chan *ch)
-{
- int ret = mic_dma_chan_init(to_mic_dma_chan(ch));
- if (ret)
- return ret;
- return MIC_DMA_DESC_RX_SIZE;
-}
-
-static void mic_dma_free_chan_resources(struct dma_chan *ch)
-{
- struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
- mic_dma_drain_chan(mic_ch);
- mic_dma_chan_uninit(mic_ch);
-}
-
-/* Set the fn. handlers and register the dma device with dma api */
-static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev,
- enum mic_dma_chan_owner owner)
-{
- int i, first_chan = mic_dma_dev->start_ch;
-
- dma_cap_zero(mic_dma_dev->dma_dev.cap_mask);
- /*
- * This dma engine is not capable of host memory to host memory
- * transfers
- */
- dma_cap_set(DMA_MEMCPY, mic_dma_dev->dma_dev.cap_mask);
-
- if (MIC_DMA_CHAN_HOST == owner)
- dma_cap_set(DMA_PRIVATE, mic_dma_dev->dma_dev.cap_mask);
- mic_dma_dev->dma_dev.device_alloc_chan_resources =
- mic_dma_alloc_chan_resources;
- mic_dma_dev->dma_dev.device_free_chan_resources =
- mic_dma_free_chan_resources;
- mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status;
- mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock;
- mic_dma_dev->dma_dev.device_prep_dma_imm_data =
- mic_dma_prep_status_lock;
- mic_dma_dev->dma_dev.device_prep_dma_interrupt =
- mic_dma_prep_interrupt_lock;
- mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending;
- mic_dma_dev->dma_dev.copy_align = MIC_DMA_ALIGN_SHIFT;
- INIT_LIST_HEAD(&mic_dma_dev->dma_dev.channels);
- for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- mic_dma_dev->mic_ch[i].api_ch.device = &mic_dma_dev->dma_dev;
- dma_cookie_init(&mic_dma_dev->mic_ch[i].api_ch);
- list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node,
- &mic_dma_dev->dma_dev.channels);
- }
- return dmaenginem_async_device_register(&mic_dma_dev->dma_dev);
-}
-
-/*
- * Initializes dma channels and registers the dma device with the
- * dma engine api.
- */
-static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
- enum mic_dma_chan_owner owner)
-{
- struct mic_dma_device *mic_dma_dev;
- int ret;
- struct device *dev = &mbdev->dev;
-
- mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL);
- if (!mic_dma_dev) {
- ret = -ENOMEM;
- goto alloc_error;
- }
- mic_dma_dev->mbdev = mbdev;
- mic_dma_dev->dma_dev.dev = dev;
- mic_dma_dev->mmio = mbdev->mmio_va;
- if (MIC_DMA_CHAN_HOST == owner) {
- mic_dma_dev->start_ch = 0;
- mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_HOST;
- } else {
- mic_dma_dev->start_ch = 4;
- mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_CARD;
- }
- ret = mic_dma_init(mic_dma_dev, owner);
- if (ret)
- goto init_error;
- ret = mic_dma_register_dma_device(mic_dma_dev, owner);
- if (ret)
- goto reg_error;
- return mic_dma_dev;
-reg_error:
- mic_dma_uninit(mic_dma_dev);
-init_error:
- mic_dma_dev = NULL;
-alloc_error:
- dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
- return mic_dma_dev;
-}
-
-static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
-{
- mic_dma_uninit(mic_dma_dev);
-}
-
-/* DEBUGFS CODE */
-static int mic_dma_reg_show(struct seq_file *s, void *pos)
-{
- struct mic_dma_device *mic_dma_dev = s->private;
- int i, chan_num, first_chan = mic_dma_dev->start_ch;
- struct mic_dma_chan *ch;
-
- seq_printf(s, "SBOX_DCR: %#x\n",
- mic_dma_mmio_read(&mic_dma_dev->mic_ch[first_chan],
- MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR));
- seq_puts(s, "DMA Channel Registers\n");
- seq_printf(s, "%-10s| %-10s %-10s %-10s %-10s %-10s",
- "Channel", "DCAR", "DTPR", "DHPR", "DRAR_HI", "DRAR_LO");
- seq_printf(s, " %-11s %-14s %-10s\n", "DCHERR", "DCHERRMSK", "DSTAT");
- for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
- ch = &mic_dma_dev->mic_ch[i];
- chan_num = ch->ch_num;
- seq_printf(s, "%-10i| %-#10x %-#10x %-#10x %-#10x",
- chan_num,
- mic_dma_read_reg(ch, MIC_DMA_REG_DCAR),
- mic_dma_read_reg(ch, MIC_DMA_REG_DTPR),
- mic_dma_read_reg(ch, MIC_DMA_REG_DHPR),
- mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_HI));
- seq_printf(s, " %-#10x %-#10x %-#14x %-#10x\n",
- mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_LO),
- mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR),
- mic_dma_read_reg(ch, MIC_DMA_REG_DCHERRMSK),
- mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT));
- }
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mic_dma_reg);
-
-/* Debugfs parent dir */
-static struct dentry *mic_dma_dbg;
-
-static int mic_dma_driver_probe(struct mbus_device *mbdev)
-{
- struct mic_dma_device *mic_dma_dev;
- enum mic_dma_chan_owner owner;
-
- if (MBUS_DEV_DMA_MIC == mbdev->id.device)
- owner = MIC_DMA_CHAN_MIC;
- else
- owner = MIC_DMA_CHAN_HOST;
-
- mic_dma_dev = mic_dma_dev_reg(mbdev, owner);
- dev_set_drvdata(&mbdev->dev, mic_dma_dev);
-
- if (mic_dma_dbg) {
- mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev),
- mic_dma_dbg);
- debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir,
- mic_dma_dev, &mic_dma_reg_fops);
- }
- return 0;
-}
-
-static void mic_dma_driver_remove(struct mbus_device *mbdev)
-{
- struct mic_dma_device *mic_dma_dev;
-
- mic_dma_dev = dev_get_drvdata(&mbdev->dev);
- debugfs_remove_recursive(mic_dma_dev->dbg_dir);
- mic_dma_dev_unreg(mic_dma_dev);
-}
-
-static struct mbus_device_id id_table[] = {
- {MBUS_DEV_DMA_MIC, MBUS_DEV_ANY_ID},
- {MBUS_DEV_DMA_HOST, MBUS_DEV_ANY_ID},
- {0},
-};
-
-static struct mbus_driver mic_dma_driver = {
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = mic_dma_driver_probe,
- .remove = mic_dma_driver_remove,
-};
-
-static int __init mic_x100_dma_init(void)
-{
- int rc = mbus_register_driver(&mic_dma_driver);
- if (rc)
- return rc;
- mic_dma_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
- return 0;
-}
-
-static void __exit mic_x100_dma_exit(void)
-{
- debugfs_remove_recursive(mic_dma_dbg);
- mbus_unregister_driver(&mic_dma_driver);
-}
-
-module_init(mic_x100_dma_init);
-module_exit(mic_x100_dma_exit);
-
-MODULE_DEVICE_TABLE(mbus, id_table);
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC X100 DMA Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mic_x100_dma.h b/drivers/dma/mic_x100_dma.h
deleted file mode 100644
index 68ef43a91714..000000000000
--- a/drivers/dma/mic_x100_dma.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel MIC X100 DMA Driver.
- *
- * Adapted from IOAT dma driver.
- */
-#ifndef _MIC_X100_DMA_H_
-#define _MIC_X100_DMA_H_
-
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/mic_bus.h>
-
-#include "dmaengine.h"
-
-/*
- * MIC has a total of 8 dma channels.
- * Four channels are assigned for host SW use & the remaining for MIC SW.
- * MIC DMA transfer size & addresses need to be 64 byte aligned.
- */
-#define MIC_DMA_MAX_NUM_CHAN 8
-#define MIC_DMA_NUM_CHAN 4
-#define MIC_DMA_ALIGN_SHIFT DMAENGINE_ALIGN_64_BYTES
-#define MIC_DMA_ALIGN_BYTES (1 << MIC_DMA_ALIGN_SHIFT)
-#define MIC_DMA_DESC_RX_SIZE (128 * 1024 - 4)
-
-/*
- * Register descriptions
- * All the registers are 32 bit registers.
- * DCR is a global register and all others are per-channel.
- * DCR - bits 0, 2, 4, 6, 8, 10, 12, 14 - enable bits for channels 0 to 7
- * bits 1, 3, 5, 7, 9, 11, 13, 15 - owner bits for channels 0 to 7
- * DCAR - bit 24 & 25 interrupt masks for mic owned & host owned channels
- * DHPR - head of the descriptor ring updated by s/w
- * DTPR - tail of the descriptor ring updated by h/w
- * DRAR_LO - lower 32 bits of descriptor ring's mic address
- * DRAR_HI - 3:0 - remaining 4 bits of descriptor ring's mic address
- * 20:4 descriptor ring size
- * 25:21 mic smpt entry number
- * DSTAT - 16:0 h/w completion count; 31:28 dma engine status
- * DCHERR - this register is non-zero on error
- * DCHERRMSK - interrupt mask register
- */
-#define MIC_DMA_HW_CMP_CNT_MASK 0x1ffff
-#define MIC_DMA_CHAN_QUIESCE 0x20000000
-#define MIC_DMA_SBOX_BASE 0x00010000
-#define MIC_DMA_SBOX_DCR 0x0000A280
-#define MIC_DMA_SBOX_CH_BASE 0x0001A000
-#define MIC_DMA_SBOX_CHAN_OFF 0x40
-#define MIC_DMA_SBOX_DCAR_IM0 (0x1 << 24)
-#define MIC_DMA_SBOX_DCAR_IM1 (0x1 << 25)
-#define MIC_DMA_SBOX_DRARHI_SYS_MASK (0x1 << 26)
-#define MIC_DMA_REG_DCAR 0
-#define MIC_DMA_REG_DHPR 4
-#define MIC_DMA_REG_DTPR 8
-#define MIC_DMA_REG_DRAR_LO 20
-#define MIC_DMA_REG_DRAR_HI 24
-#define MIC_DMA_REG_DSTAT 32
-#define MIC_DMA_REG_DCHERR 44
-#define MIC_DMA_REG_DCHERRMSK 48
-
-/* HW dma desc */
-struct mic_dma_desc {
- u64 qw0;
- u64 qw1;
-};
-
-enum mic_dma_chan_owner {
- MIC_DMA_CHAN_MIC = 0,
- MIC_DMA_CHAN_HOST
-};
-
-/*
- * mic_dma_chan - channel specific information
- * @ch_num: channel number
- * @owner: owner of this channel
- * @last_tail: cached value of descriptor ring tail
- * @head: index of next descriptor in desc_ring
- * @issued: hardware notification point
- * @submitted: index that will be used to submit descriptors to h/w
- * @api_ch: dma engine api channel
- * @desc_ring: dma descriptor ring
- * @desc_ring_micpa: mic physical address of desc_ring
- * @status_dest: destination for status (fence) descriptor
- * @status_dest_micpa: mic address for status_dest,
- * DMA controller uses this address
- * @tx_array: array of async_tx
- * @cleanup_lock: lock held when processing completed tx
- * @prep_lock: lock held in prep_memcpy & released in tx_submit
- * @issue_lock: lock used to synchronize writes to head
- * @cookie: mic_irq cookie used with mic irq request
- */
-struct mic_dma_chan {
- int ch_num;
- enum mic_dma_chan_owner owner;
- u32 last_tail;
- u32 head;
- u32 issued;
- u32 submitted;
- struct dma_chan api_ch;
- struct mic_dma_desc *desc_ring;
- dma_addr_t desc_ring_micpa;
- u64 *status_dest;
- dma_addr_t status_dest_micpa;
- struct dma_async_tx_descriptor *tx_array;
- spinlock_t cleanup_lock;
- spinlock_t prep_lock;
- spinlock_t issue_lock;
- struct mic_irq *cookie;
-};
-
-/*
- * struct mic_dma_device - per mic device
- * @mic_ch: dma channels
- * @dma_dev: underlying dma device
- * @mbdev: mic bus dma device
- * @mmio: virtual address of the mmio space
- * @dbg_dir: debugfs directory
- * @start_ch: first channel number that can be used
- * @max_xfer_size: maximum transfer size per dma descriptor
- */
-struct mic_dma_device {
- struct mic_dma_chan mic_ch[MIC_DMA_MAX_NUM_CHAN];
- struct dma_device dma_dev;
- struct mbus_device *mbdev;
- void __iomem *mmio;
- struct dentry *dbg_dir;
- int start_ch;
- size_t max_xfer_size;
-};
-
-static inline struct mic_dma_chan *to_mic_dma_chan(struct dma_chan *ch)
-{
- return container_of(ch, struct mic_dma_chan, api_ch);
-}
-
-static inline struct mic_dma_device *to_mic_dma_dev(struct mic_dma_chan *ch)
-{
- return
- container_of((const typeof(((struct mic_dma_device *)0)->mic_ch)*)
- (ch - ch->ch_num), struct mic_dma_device, mic_ch);
-}
-
-static inline struct mbus_device *to_mbus_device(struct mic_dma_chan *ch)
-{
- return to_mic_dma_dev(ch)->mbdev;
-}
-
-static inline struct mbus_hw_ops *to_mbus_hw_ops(struct mic_dma_chan *ch)
-{
- return to_mbus_device(ch)->hw_ops;
-}
-
-static inline struct device *mic_dma_ch_to_device(struct mic_dma_chan *ch)
-{
- return to_mic_dma_dev(ch)->dma_dev.dev;
-}
-
-static inline void __iomem *mic_dma_chan_to_mmio(struct mic_dma_chan *ch)
-{
- return to_mic_dma_dev(ch)->mmio;
-}
-
-static inline u32 mic_dma_read_reg(struct mic_dma_chan *ch, u32 reg)
-{
- return ioread32(mic_dma_chan_to_mmio(ch) + MIC_DMA_SBOX_CH_BASE +
- ch->ch_num * MIC_DMA_SBOX_CHAN_OFF + reg);
-}
-
-static inline void mic_dma_write_reg(struct mic_dma_chan *ch, u32 reg, u32 val)
-{
- iowrite32(val, mic_dma_chan_to_mmio(ch) + MIC_DMA_SBOX_CH_BASE +
- ch->ch_num * MIC_DMA_SBOX_CHAN_OFF + reg);
-}
-
-static inline u32 mic_dma_mmio_read(struct mic_dma_chan *ch, u32 offset)
-{
- return ioread32(mic_dma_chan_to_mmio(ch) + offset);
-}
-
-static inline void mic_dma_mmio_write(struct mic_dma_chan *ch, u32 val,
- u32 offset)
-{
- iowrite32(val, mic_dma_chan_to_mmio(ch) + offset);
-}
-
-static inline u32 mic_dma_read_cmp_cnt(struct mic_dma_chan *ch)
-{
- return mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) &
- MIC_DMA_HW_CMP_CNT_MASK;
-}
-
-static inline void mic_dma_chan_set_owner(struct mic_dma_chan *ch)
-{
- u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
- u32 chan_num = ch->ch_num;
-
- dcr = (dcr & ~(0x1 << (chan_num * 2))) | (ch->owner << (chan_num * 2));
- mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
-}
-
-static inline void mic_dma_enable_chan(struct mic_dma_chan *ch)
-{
- u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
-
- dcr |= 2 << (ch->ch_num << 1);
- mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
-}
-
-static inline void mic_dma_disable_chan(struct mic_dma_chan *ch)
-{
- u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
-
- dcr &= ~(2 << (ch->ch_num << 1));
- mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR);
-}
-
-static void mic_dma_chan_set_desc_ring(struct mic_dma_chan *ch)
-{
- u32 drar_hi;
- dma_addr_t desc_ring_micpa = ch->desc_ring_micpa;
-
- drar_hi = (MIC_DMA_DESC_RX_SIZE & 0x1ffff) << 4;
- if (MIC_DMA_CHAN_MIC == ch->owner) {
- drar_hi |= (desc_ring_micpa >> 32) & 0xf;
- } else {
- drar_hi |= MIC_DMA_SBOX_DRARHI_SYS_MASK;
- drar_hi |= ((desc_ring_micpa >> 34)
- & 0x1f) << 21;
- drar_hi |= (desc_ring_micpa >> 32) & 0x3;
- }
- mic_dma_write_reg(ch, MIC_DMA_REG_DRAR_LO, (u32) desc_ring_micpa);
- mic_dma_write_reg(ch, MIC_DMA_REG_DRAR_HI, drar_hi);
-}
-
-static inline void mic_dma_chan_mask_intr(struct mic_dma_chan *ch)
-{
- u32 dcar = mic_dma_read_reg(ch, MIC_DMA_REG_DCAR);
-
- if (MIC_DMA_CHAN_MIC == ch->owner)
- dcar |= MIC_DMA_SBOX_DCAR_IM0;
- else
- dcar |= MIC_DMA_SBOX_DCAR_IM1;
- mic_dma_write_reg(ch, MIC_DMA_REG_DCAR, dcar);
-}
-
-static inline void mic_dma_chan_unmask_intr(struct mic_dma_chan *ch)
-{
- u32 dcar = mic_dma_read_reg(ch, MIC_DMA_REG_DCAR);
-
- if (MIC_DMA_CHAN_MIC == ch->owner)
- dcar &= ~MIC_DMA_SBOX_DCAR_IM0;
- else
- dcar &= ~MIC_DMA_SBOX_DCAR_IM1;
- mic_dma_write_reg(ch, MIC_DMA_REG_DCAR, dcar);
-}
-
-static void mic_dma_ack_interrupt(struct mic_dma_chan *ch)
-{
- if (MIC_DMA_CHAN_MIC == ch->owner) {
- /* HW errata */
- mic_dma_chan_mask_intr(ch);
- mic_dma_chan_unmask_intr(ch);
- }
- to_mbus_hw_ops(ch)->ack_interrupt(to_mbus_device(ch), ch->ch_num);
-}
-#endif
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index f42f792db277..b84303be8edf 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -873,9 +873,9 @@ static void mmp_pdma_issue_pending(struct dma_chan *dchan)
* Do call back
* Start pending list
*/
-static void dma_do_tasklet(unsigned long data)
+static void dma_do_tasklet(struct tasklet_struct *t)
{
- struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
+ struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet);
struct mmp_pdma_desc_sw *desc, *_desc;
LIST_HEAD(chain_cleanup);
unsigned long flags;
@@ -993,7 +993,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
spin_lock_init(&chan->desc_lock);
chan->dev = pdev->dev;
chan->chan.device = &pdev->device;
- tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
+ tasklet_setup(&chan->tasklet, dma_do_tasklet);
INIT_LIST_HEAD(&chan->chain_pending);
INIT_LIST_HEAD(&chan->chain_running);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 960c7c40aef7..a262e0eb4cc9 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -346,9 +346,9 @@ static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id)
return IRQ_NONE;
}
-static void dma_do_tasklet(unsigned long data)
+static void dma_do_tasklet(struct tasklet_struct *t)
{
- struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
+ struct mmp_tdma_chan *tdmac = from_tasklet(tdmac, t, tasklet);
dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL);
}
@@ -586,7 +586,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
tdmac->pool = pool;
tdmac->status = DMA_COMPLETE;
tdev->tdmac[tdmac->idx] = tdmac;
- tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
+ tasklet_setup(&tdmac->tasklet, dma_do_tasklet);
/* add the channel to tdma_chan list */
list_add_tail(&tdmac->chan.device_node,
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index dc2cae7bcf69..c1a69149c8bf 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -414,9 +414,9 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
}
/* DMA Tasklet */
-static void mpc_dma_tasklet(unsigned long data)
+static void mpc_dma_tasklet(struct tasklet_struct *t)
{
- struct mpc_dma *mdma = (void *)data;
+ struct mpc_dma *mdma = from_tasklet(mdma, t, tasklet);
unsigned long flags;
uint es;
@@ -1009,7 +1009,7 @@ static int mpc_dma_probe(struct platform_device *op)
list_add_tail(&mchan->chan.device_node, &dma->channels);
}
- tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
+ tasklet_setup(&mdma->tasklet, mpc_dma_tasklet);
/*
* Configure DMA Engine:
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 0ac8e7b34e12..00cd1335eeba 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -336,9 +336,9 @@ static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
mv_chan->dmachan.completed_cookie = cookie;
}
-static void mv_xor_tasklet(unsigned long data)
+static void mv_xor_tasklet(struct tasklet_struct *t)
{
- struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
+ struct mv_xor_chan *chan = from_tasklet(chan, t, irq_tasklet);
spin_lock(&chan->lock);
mv_chan_slot_cleanup(chan);
@@ -1097,8 +1097,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->mmr_base = xordev->xor_base;
mv_chan->mmr_high_base = xordev->xor_high_base;
- tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
- mv_chan);
+ tasklet_setup(&mv_chan->irq_tasklet, mv_xor_tasklet);
/* clear errors before enabling interrupts */
mv_chan_clear_err_status(mv_chan);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 9225f08dfee9..2753a6b916f6 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -553,9 +553,10 @@ int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
/*
* handle the descriptors after HW process
*/
-static void mv_xor_v2_tasklet(unsigned long data)
+static void mv_xor_v2_tasklet(struct tasklet_struct *t)
{
- struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
+ struct mv_xor_v2_device *xor_dev = from_tasklet(xor_dev, t,
+ irq_tasklet);
int pending_ptr, num_of_pending, i;
struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
@@ -780,8 +781,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (ret)
goto free_msi_irqs;
- tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
- (unsigned long) xor_dev);
+ tasklet_setup(&xor_dev->irq_tasklet, mv_xor_v2_tasklet);
xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 3039bba0e4d5..65f816b40c32 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -141,7 +141,6 @@ struct mxs_dma_engine {
void __iomem *base;
struct clk *clk;
struct dma_device dma_device;
- struct device_dma_parameters dma_parms;
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
struct platform_device *pdev;
unsigned int nr_channels;
@@ -320,9 +319,9 @@ static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
return dma_cookie_assign(tx);
}
-static void mxs_dma_tasklet(unsigned long data)
+static void mxs_dma_tasklet(struct tasklet_struct *t)
{
- struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
+ struct mxs_dma_chan *mxs_chan = from_tasklet(mxs_chan, t, tasklet);
dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL);
}
@@ -812,8 +811,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
mxs_chan->chan.device = &mxs_dma->dma_device;
dma_cookie_init(&mxs_chan->chan);
- tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
- (unsigned long) mxs_chan);
+ tasklet_setup(&mxs_chan->tasklet, mxs_dma_tasklet);
/* Add the channel to mxs_chan list */
@@ -829,7 +827,6 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
mxs_dma->dma_device.dev = &pdev->dev;
/* mxs_dma gets 65535 bytes maximum sg size */
- mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index ca4e0930207a..9c52c57919c6 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1113,9 +1113,9 @@ static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
return dchan;
}
-static void nbpf_chan_tasklet(unsigned long data)
+static void nbpf_chan_tasklet(struct tasklet_struct *t)
{
- struct nbpf_channel *chan = (struct nbpf_channel *)data;
+ struct nbpf_channel *chan = from_tasklet(chan, t, tasklet);
struct nbpf_desc *desc, *tmp;
struct dmaengine_desc_callback cb;
@@ -1260,7 +1260,7 @@ static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
snprintf(chan->name, sizeof(chan->name), "nbpf %d", n);
- tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan);
+ tasklet_setup(&chan->tasklet, nbpf_chan_tasklet);
ret = devm_request_irq(dma_dev->dev, chan->irq,
nbpf_chan_irq, IRQF_SHARED,
chan->name, chan);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 331c8d8b10a3..9fede32641e9 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -124,7 +124,7 @@
#define FCNT_VAL 0x1
/**
- * owl_dmadesc_offsets - Describe DMA descriptor, hardware link
+ * enum owl_dmadesc_offsets - Describe DMA descriptor, hardware link
* list for dma transfer
* @OWL_DMADESC_NEXT_LLI: physical address of the next link list
* @OWL_DMADESC_SADDR: source physical address
@@ -135,6 +135,7 @@
* @OWL_DMADESC_CTRLA: dma_mode and linklist ctrl config
* @OWL_DMADESC_CTRLB: interrupt config
* @OWL_DMADESC_CONST_NUM: data for constant fill
+ * @OWL_DMADESC_SIZE: max size of this enum
*/
enum owl_dmadesc_offsets {
OWL_DMADESC_NEXT_LLI = 0,
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index a3b0b4c56a19..1da04112fcdb 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -670,9 +670,9 @@ static int pd_device_terminate_all(struct dma_chan *chan)
return 0;
}
-static void pdc_tasklet(unsigned long data)
+static void pdc_tasklet(struct tasklet_struct *t)
{
- struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
+ struct pch_dma_chan *pd_chan = from_tasklet(pd_chan, t, tasklet);
unsigned long flags;
if (!pdc_is_idle(pd_chan)) {
@@ -735,8 +735,7 @@ static irqreturn_t pd_irq(int irq, void *devid)
return ret0 | ret2;
}
-#ifdef CONFIG_PM
-static void pch_dma_save_regs(struct pch_dma *pd)
+static void __maybe_unused pch_dma_save_regs(struct pch_dma *pd)
{
struct pch_dma_chan *pd_chan;
struct dma_chan *chan, *_c;
@@ -759,7 +758,7 @@ static void pch_dma_save_regs(struct pch_dma *pd)
}
}
-static void pch_dma_restore_regs(struct pch_dma *pd)
+static void __maybe_unused pch_dma_restore_regs(struct pch_dma *pd)
{
struct pch_dma_chan *pd_chan;
struct dma_chan *chan, *_c;
@@ -782,40 +781,25 @@ static void pch_dma_restore_regs(struct pch_dma *pd)
}
}
-static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pch_dma_suspend(struct device *dev)
{
- struct pch_dma *pd = pci_get_drvdata(pdev);
+ struct pch_dma *pd = dev_get_drvdata(dev);
if (pd)
pch_dma_save_regs(pd);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
-static int pch_dma_resume(struct pci_dev *pdev)
+static int __maybe_unused pch_dma_resume(struct device *dev)
{
- struct pch_dma *pd = pci_get_drvdata(pdev);
- int err;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- err = pci_enable_device(pdev);
- if (err) {
- dev_dbg(&pdev->dev, "failed to enable device\n");
- return err;
- }
+ struct pch_dma *pd = dev_get_drvdata(dev);
if (pd)
pch_dma_restore_regs(pd);
return 0;
}
-#endif
static int pch_dma_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
@@ -898,8 +882,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
INIT_LIST_HEAD(&pd_chan->queue);
INIT_LIST_HEAD(&pd_chan->free_list);
- tasklet_init(&pd_chan->tasklet, pdc_tasklet,
- (unsigned long)pd_chan);
+ tasklet_setup(&pd_chan->tasklet, pdc_tasklet);
list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
}
@@ -993,15 +976,14 @@ static const struct pci_device_id pch_dma_id_table[] = {
{ 0, },
};
+static SIMPLE_DEV_PM_OPS(pch_dma_pm_ops, pch_dma_suspend, pch_dma_resume);
+
static struct pci_driver pch_dma_driver = {
.name = DRV_NAME,
.id_table = pch_dma_id_table,
.probe = pch_dma_probe,
.remove = pch_dma_remove,
-#ifdef CONFIG_PM
- .suspend = pch_dma_suspend,
- .resume = pch_dma_resume,
-#endif
+ .driver.pm = &pch_dma_pm_ops,
};
module_pci_driver(pch_dma_driver);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 5274a0704d96..e9f0101d92fa 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -255,7 +255,7 @@ enum pl330_byteswap {
static unsigned cmd_line;
#define PL330_DBGCMD_DUMP(off, x...) do { \
printk("%x:", cmd_line); \
- printk(x); \
+ printk(KERN_CONT x); \
cmd_line += off; \
} while (0)
#define PL330_DBGMC_START(addr) (cmd_line = addr)
@@ -460,9 +460,6 @@ struct pl330_dmac {
/* DMA-Engine Device */
struct dma_device ddma;
- /* Holds info about sg limitations */
- struct device_dma_parameters dma_parms;
-
/* Pool of descriptors available for the DMAC's channels */
struct list_head desc_pool;
/* To protect desc_pool manipulation */
@@ -1576,9 +1573,9 @@ static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
-static void pl330_dotask(unsigned long data)
+static void pl330_dotask(struct tasklet_struct *t)
{
- struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
+ struct pl330_dmac *pl330 = from_tasklet(pl330, t, tasks);
unsigned long flags;
int i;
@@ -1982,7 +1979,7 @@ static int pl330_add(struct pl330_dmac *pl330)
return ret;
}
- tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
+ tasklet_setup(&pl330->tasks, pl330_dotask);
pl330->state = INIT;
@@ -2065,9 +2062,9 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
}
}
-static void pl330_tasklet(unsigned long data)
+static void pl330_tasklet(struct tasklet_struct *t)
{
- struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
+ struct dma_pl330_chan *pch = from_tasklet(pch, t, task);
struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
bool power_down = false;
@@ -2175,7 +2172,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
return -ENOMEM;
}
- tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
+ tasklet_setup(&pch->task, pl330_tasklet);
spin_unlock_irqrestore(&pl330->lock, flags);
@@ -2487,7 +2484,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
list_splice_tail_init(&pch->submitted_list, &pch->work_list);
spin_unlock_irqrestore(&pch->lock, flags);
- pl330_tasklet((unsigned long)pch);
+ pl330_tasklet(&pch->task);
}
/*
@@ -3034,9 +3031,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma");
if (IS_ERR(pl330->rstc)) {
- if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER)
- dev_err(&adev->dev, "Failed to get reset!\n");
- return PTR_ERR(pl330->rstc);
+ return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc), "Failed to get reset!\n");
} else {
ret = reset_control_deassert(pl330->rstc);
if (ret) {
@@ -3047,9 +3042,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp");
if (IS_ERR(pl330->rstc_ocp)) {
- if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER)
- dev_err(&adev->dev, "Failed to get OCP reset!\n");
- return PTR_ERR(pl330->rstc_ocp);
+ return dev_err_probe(&adev->dev, PTR_ERR(pl330->rstc_ocp),
+ "Failed to get OCP reset!\n");
} else {
ret = reset_control_deassert(pl330->rstc_ocp);
if (ret) {
@@ -3154,8 +3148,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
}
}
- adev->dev.dma_parms = &pl330->dma_parms;
-
/*
* This is the limit for transfers with a buswidth of 1, larger
* buswidths will have larger limits.
diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
index db4c5fd453a9..f387c5bbc170 100644
--- a/drivers/dma/plx_dma.c
+++ b/drivers/dma/plx_dma.c
@@ -241,9 +241,9 @@ static void plx_dma_stop(struct plx_dma_dev *plxdev)
rcu_read_unlock();
}
-static void plx_dma_desc_task(unsigned long data)
+static void plx_dma_desc_task(struct tasklet_struct *t)
{
- struct plx_dma_dev *plxdev = (void *)data;
+ struct plx_dma_dev *plxdev = from_tasklet(plxdev, t, desc_task);
plx_dma_process_desc(plxdev);
}
@@ -513,8 +513,7 @@ static int plx_dma_create(struct pci_dev *pdev)
}
spin_lock_init(&plxdev->ring_lock);
- tasklet_init(&plxdev->desc_task, plx_dma_desc_task,
- (unsigned long)plxdev);
+ tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
RCU_INIT_POINTER(plxdev->pdev, pdev);
plxdev->bar = pcim_iomap_table(pdev)[0];
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 4db000d5f01c..71cdaaa8134c 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -1660,9 +1660,9 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
/**
* ppc440spe_adma_tasklet - clean up watch-dog initiator
*/
-static void ppc440spe_adma_tasklet(unsigned long data)
+static void ppc440spe_adma_tasklet(struct tasklet_struct *t)
{
- struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data;
+ struct ppc440spe_adma_chan *chan = from_tasklet(chan, t, irq_tasklet);
spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
__ppc440spe_adma_slot_cleanup(chan);
@@ -4141,8 +4141,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
chan->common.device = &adev->common;
dma_cookie_init(&chan->common);
list_add_tail(&chan->common.device_node, &adev->common.channels);
- tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
- (unsigned long)chan);
+ tasklet_setup(&chan->irq_tasklet, ppc440spe_adma_tasklet);
/* allocate and map helper pages for async validation or
* async_mult/async_sum_product operations on DMA0/1.
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 5a08dd0d3388..4eeb8bb27279 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -381,7 +381,6 @@ struct bam_device {
void __iomem *regs;
struct device *dev;
struct dma_device common;
- struct device_dma_parameters dma_parms;
struct bam_chan *channels;
u32 num_channels;
u32 num_ees;
@@ -1071,13 +1070,13 @@ static void bam_start_dma(struct bam_chan *bchan)
/**
* dma_tasklet - DMA IRQ tasklet
- * @data: tasklet argument (bam controller structure)
+ * @t: tasklet argument (bam controller structure)
*
* Sets up next DMA operation and then processes all completed transactions
*/
-static void dma_tasklet(unsigned long data)
+static void dma_tasklet(struct tasklet_struct *t)
{
- struct bam_device *bdev = (struct bam_device *)data;
+ struct bam_device *bdev = from_tasklet(bdev, t, task);
struct bam_chan *bchan;
unsigned long flags;
unsigned int i;
@@ -1293,7 +1292,7 @@ static int bam_dma_probe(struct platform_device *pdev)
if (ret)
goto err_disable_clk;
- tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
+ tasklet_setup(&bdev->task, dma_tasklet);
bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
sizeof(*bdev->channels), GFP_KERNEL);
@@ -1316,7 +1315,6 @@ static int bam_dma_probe(struct platform_device *pdev)
/* set max dma segment size */
bdev->common.dev = bdev->dev;
- bdev->common.dev->dma_parms = &bdev->dma_parms;
ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
if (ret) {
dev_err(bdev->dev, "cannot set maximum segment size\n");
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 0a6d3ea08c78..6c0f9eb8ecc6 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -224,9 +224,9 @@ static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
return 0;
}
-static void hidma_issue_task(unsigned long arg)
+static void hidma_issue_task(struct tasklet_struct *t)
{
- struct hidma_dev *dmadev = (struct hidma_dev *)arg;
+ struct hidma_dev *dmadev = from_tasklet(dmadev, t, task);
pm_runtime_get_sync(dmadev->ddev.dev);
hidma_ll_start(dmadev->lldev);
@@ -885,7 +885,7 @@ static int hidma_probe(struct platform_device *pdev)
goto uninit;
dmadev->irq = chirq;
- tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
+ tasklet_setup(&dmadev->task, hidma_issue_task);
hidma_debug_init(dmadev);
hidma_sysfs_init(dmadev);
dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index bb4471e84e48..53244e0e34a3 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -173,9 +173,9 @@ int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
/*
* Multiple TREs may be queued and waiting in the pending queue.
*/
-static void hidma_ll_tre_complete(unsigned long arg)
+static void hidma_ll_tre_complete(struct tasklet_struct *t)
{
- struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
+ struct hidma_lldev *lldev = from_tasklet(lldev, t, task);
struct hidma_tre *tre;
while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
@@ -792,7 +792,7 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
return NULL;
spin_lock_init(&lldev->lock);
- tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
+ tasklet_setup(&lldev->task, hidma_ll_tre_complete);
lldev->initialized = 1;
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
return lldev;
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 0fa7f14a65a1..1e918e284fc0 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -323,9 +323,9 @@ static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
}
}
-static void sa11x0_dma_tasklet(unsigned long arg)
+static void sa11x0_dma_tasklet(struct tasklet_struct *t)
{
- struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
+ struct sa11x0_dma_dev *d = from_tasklet(d, t, task);
struct sa11x0_dma_phy *p;
struct sa11x0_dma_chan *c;
unsigned pch, pch_alloc = 0;
@@ -928,7 +928,7 @@ static int sa11x0_dma_probe(struct platform_device *pdev)
goto err_ioremap;
}
- tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
+ tasklet_setup(&d->task, sa11x0_dma_tasklet);
for (i = 0; i < NR_PHY_CHAN; i++) {
struct sa11x0_dma_phy *p = &d->phy[i];
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 6e530dca6d9e..528deb5d9f31 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -281,10 +281,9 @@ static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
desc->in_use = false;
}
-static void sf_pdma_donebh_tasklet(unsigned long arg)
+static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
{
- struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
- struct sf_pdma_desc *desc = chan->desc;
+ struct sf_pdma_chan *chan = from_tasklet(chan, t, done_tasklet);
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
@@ -295,12 +294,15 @@ static void sf_pdma_donebh_tasklet(unsigned long arg)
}
spin_unlock_irqrestore(&chan->lock, flags);
- dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ list_del(&chan->desc->vdesc.node);
+ vchan_cookie_complete(&chan->desc->vdesc);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
-static void sf_pdma_errbh_tasklet(unsigned long arg)
+static void sf_pdma_errbh_tasklet(struct tasklet_struct *t)
{
- struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
+ struct sf_pdma_chan *chan = from_tasklet(chan, t, err_tasklet);
struct sf_pdma_desc *desc = chan->desc;
unsigned long flags;
@@ -332,8 +334,7 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
residue = readq(regs->residue);
if (!residue) {
- list_del(&chan->desc->vdesc.node);
- vchan_cookie_complete(&chan->desc->vdesc);
+ tasklet_hi_schedule(&chan->done_tasklet);
} else {
/* submit next trascatioin if possible */
struct sf_pdma_desc *desc = chan->desc;
@@ -347,8 +348,6 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
spin_unlock_irqrestore(&chan->vchan.lock, flags);
- tasklet_hi_schedule(&chan->done_tasklet);
-
return IRQ_HANDLED;
}
@@ -476,10 +475,8 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
writel(PDMA_CLEAR_CTRL, chan->regs.ctrl);
- tasklet_init(&chan->done_tasklet,
- sf_pdma_donebh_tasklet, (unsigned long)chan);
- tasklet_init(&chan->err_tasklet,
- sf_pdma_errbh_tasklet, (unsigned long)chan);
+ tasklet_setup(&chan->done_tasklet, sf_pdma_donebh_tasklet);
+ tasklet_setup(&chan->err_tasklet, sf_pdma_errbh_tasklet);
}
}
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 54d5d0369d3c..13437323a85b 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -32,12 +32,12 @@ config SH_DMAE
Enable support for the Renesas SuperH DMA controllers.
config RCAR_DMAC
- tristate "Renesas R-Car Gen2 DMA Controller"
+ tristate "Renesas R-Car Gen{2,3} and RZ/G{1,2} DMA Controller"
depends on ARCH_RENESAS || COMPILE_TEST
select RENESAS_DMA
help
This driver supports the general purpose DMA controller found in the
- Renesas R-Car second generation SoCs.
+ Renesas R-Car Gen{2,3} and RZ/G{1,2} SoCs.
config RENESAS_USB_DMAC
tristate "Renesas USB-DMA Controller"
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 59b36ab5d684..a57705356e8b 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -199,7 +199,6 @@ struct rcar_dmac {
struct dma_device engine;
struct device *dev;
void __iomem *iomem;
- struct device_dma_parameters parms;
unsigned int n_channels;
struct rcar_dmac_chan *channels;
@@ -1228,7 +1227,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
* Allocate the sg list dynamically as it would consume too much stack
* space.
*/
- sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
+ sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT);
if (!sgl)
return NULL;
@@ -1845,7 +1844,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
- dmac->dev->dma_parms = &dmac->parms;
dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 788d696323bb..7f72b3f4cd1a 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -728,7 +728,7 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
* Allocate the sg list dynamically as it would consumer too much stack
* space.
*/
- sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL);
+ sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL);
if (!sgl)
return NULL;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 30064689d67f..a5c2843384fd 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -393,9 +393,9 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
}
/* DMA Tasklet */
-static void sirfsoc_dma_tasklet(unsigned long data)
+static void sirfsoc_dma_tasklet(struct tasklet_struct *t)
{
- struct sirfsoc_dma *sdma = (void *)data;
+ struct sirfsoc_dma *sdma = from_tasklet(sdma, t, tasklet);
sirfsoc_dma_process_completed(sdma);
}
@@ -938,7 +938,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
list_add_tail(&schan->chan.device_node, &dma->channels);
}
- tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
+ tasklet_setup(&sdma->tasklet, sirfsoc_dma_tasklet);
/* Register DMA engine */
dev_set_drvdata(dev, sdma);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 21e2f1d0c210..77ab1f4730be 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -535,7 +535,6 @@ struct d40_gen_dmac {
* mode" allocated physical channels.
* @num_log_chans: The number of logical channels. Calculated from
* num_phy_chans.
- * @dma_parms: DMA parameters for the channel
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
* @dma_slave: dma_device channels that can do only do slave transfers.
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
@@ -577,7 +576,6 @@ struct d40_base {
int num_memcpy_chans;
int num_phy_chans;
int num_log_chans;
- struct device_dma_parameters dma_parms;
struct dma_device dma_both;
struct dma_device dma_slave;
struct dma_device dma_memcpy;
@@ -1573,9 +1571,9 @@ static void dma_tc_handle(struct d40_chan *d40c)
}
-static void dma_tasklet(unsigned long data)
+static void dma_tasklet(struct tasklet_struct *t)
{
- struct d40_chan *d40c = (struct d40_chan *) data;
+ struct d40_chan *d40c = from_tasklet(d40c, t, tasklet);
struct d40_desc *d40d;
unsigned long flags;
bool callback_active;
@@ -2806,8 +2804,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
INIT_LIST_HEAD(&d40c->client);
INIT_LIST_HEAD(&d40c->prepare_queue);
- tasklet_init(&d40c->tasklet, dma_tasklet,
- (unsigned long) d40c);
+ tasklet_setup(&d40c->tasklet, dma_tasklet);
list_add_tail(&d40c->chan.device_node,
&dma->channels);
@@ -3641,7 +3638,6 @@ static int __init d40_probe(struct platform_device *pdev)
if (ret)
goto destroy_cache;
- base->dev->dma_parms = &base->dma_parms;
ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
if (ret) {
d40_err(&pdev->dev, "Failed to set dma max seg size\n");
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 96ad1b3d24c6..d0055d2f0b9a 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1311,12 +1311,8 @@ static int stm32_dma_probe(struct platform_device *pdev)
return PTR_ERR(dmadev->base);
dmadev->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dmadev->clk)) {
- ret = PTR_ERR(dmadev->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Can't get clock\n");
- return ret;
- }
+ if (IS_ERR(dmadev->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk), "Can't get clock\n");
ret = clk_prepare_enable(dmadev->clk);
if (ret < 0) {
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index 12f7637e13a1..a10ccd964376 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -252,12 +252,9 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
spin_lock_init(&stm32_dmamux->lock);
stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(stm32_dmamux->clk)) {
- ret = PTR_ERR(stm32_dmamux->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Missing clock controller\n");
- return ret;
- }
+ if (IS_ERR(stm32_dmamux->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(stm32_dmamux->clk),
+ "Missing clock controller\n");
ret = clk_prepare_enable(stm32_dmamux->clk);
if (ret < 0) {
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 5469563703d1..08cfbfab837b 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1580,12 +1580,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return PTR_ERR(dmadev->base);
dmadev->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dmadev->clk)) {
- ret = PTR_ERR(dmadev->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Missing clock controller\n");
- return ret;
- }
+ if (IS_ERR(dmadev->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk),
+ "Missing clock controller\n");
ret = clk_prepare_enable(dmadev->clk);
if (ret < 0) {
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 06cd7f867f7c..f5f9c86c50bc 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -467,9 +467,9 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
return 0;
}
-static void sun6i_dma_tasklet(unsigned long data)
+static void sun6i_dma_tasklet(struct tasklet_struct *t)
{
- struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
+ struct sun6i_dma_dev *sdev = from_tasklet(sdev, t, task);
struct sun6i_vchan *vchan;
struct sun6i_pchan *pchan;
unsigned int pchan_alloc = 0;
@@ -1343,7 +1343,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
if (!sdc->vchans)
return -ENOMEM;
- tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
+ tasklet_setup(&sdc->task, sun6i_dma_tasklet);
for (i = 0; i < sdc->num_pchans; i++) {
struct sun6i_pchan *pchan = &sdc->pchans[i];
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 55fc7400f717..71827d9b0aa1 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -644,9 +644,9 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
}
}
-static void tegra_dma_tasklet(unsigned long data)
+static void tegra_dma_tasklet(struct tasklet_struct *t)
{
- struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
+ struct tegra_dma_channel *tdc = from_tasklet(tdc, t, tasklet);
struct dmaengine_desc_callback cb;
struct tegra_dma_desc *dma_desc;
unsigned int cb_count;
@@ -1523,8 +1523,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
tdc->id = i;
tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
- tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
- (unsigned long)tdc);
+ tasklet_setup(&tdc->tasklet, tegra_dma_tasklet);
spin_lock_init(&tdc->lock);
init_waitqueue_head(&tdc->wq);
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index 9a29a107e374..0c67254caee6 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -4,5 +4,8 @@ obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o
obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o
-obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o
+obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \
+ k3-psil-am654.o \
+ k3-psil-j721e.o \
+ k3-psil-j7200.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/k3-psil-j7200.c b/drivers/dma/ti/k3-psil-j7200.c
new file mode 100644
index 000000000000..5ea63ea74822
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-j7200.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j7200_src_ep_map[] = {
+ /* PDMA_MCASP - McASP0-2 */
+ PSIL_PDMA_MCASP(0x4400),
+ PSIL_PDMA_MCASP(0x4401),
+ PSIL_PDMA_MCASP(0x4402),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4600),
+ PSIL_PDMA_XY_PKT(0x4601),
+ PSIL_PDMA_XY_PKT(0x4602),
+ PSIL_PDMA_XY_PKT(0x4603),
+ PSIL_PDMA_XY_PKT(0x4604),
+ PSIL_PDMA_XY_PKT(0x4605),
+ PSIL_PDMA_XY_PKT(0x4606),
+ PSIL_PDMA_XY_PKT(0x4607),
+ PSIL_PDMA_XY_PKT(0x4608),
+ PSIL_PDMA_XY_PKT(0x4609),
+ PSIL_PDMA_XY_PKT(0x460a),
+ PSIL_PDMA_XY_PKT(0x460b),
+ PSIL_PDMA_XY_PKT(0x460c),
+ PSIL_PDMA_XY_PKT(0x460d),
+ PSIL_PDMA_XY_PKT(0x460e),
+ PSIL_PDMA_XY_PKT(0x460f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0x4610),
+ PSIL_PDMA_XY_PKT(0x4611),
+ PSIL_PDMA_XY_PKT(0x4612),
+ PSIL_PDMA_XY_PKT(0x4613),
+ PSIL_PDMA_XY_PKT(0x4614),
+ PSIL_PDMA_XY_PKT(0x4615),
+ PSIL_PDMA_XY_PKT(0x4616),
+ PSIL_PDMA_XY_PKT(0x4617),
+ PSIL_PDMA_XY_PKT(0x4618),
+ PSIL_PDMA_XY_PKT(0x4619),
+ PSIL_PDMA_XY_PKT(0x461a),
+ PSIL_PDMA_XY_PKT(0x461b),
+ PSIL_PDMA_XY_PKT(0x461c),
+ PSIL_PDMA_XY_PKT(0x461d),
+ PSIL_PDMA_XY_PKT(0x461e),
+ PSIL_PDMA_XY_PKT(0x461f),
+ /* PDMA_USART_G0 - UART0-1 */
+ PSIL_PDMA_XY_PKT(0x4700),
+ PSIL_PDMA_XY_PKT(0x4701),
+ /* PDMA_USART_G1 - UART2-3 */
+ PSIL_PDMA_XY_PKT(0x4702),
+ PSIL_PDMA_XY_PKT(0x4703),
+ /* PDMA_USART_G2 - UART4-9 */
+ PSIL_PDMA_XY_PKT(0x4704),
+ PSIL_PDMA_XY_PKT(0x4705),
+ PSIL_PDMA_XY_PKT(0x4706),
+ PSIL_PDMA_XY_PKT(0x4707),
+ PSIL_PDMA_XY_PKT(0x4708),
+ PSIL_PDMA_XY_PKT(0x4709),
+ /* CPSW5 */
+ PSIL_ETHERNET(0x4a00),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA_MISC_G0 - SPI0 */
+ PSIL_PDMA_XY_PKT(0x7100),
+ PSIL_PDMA_XY_PKT(0x7101),
+ PSIL_PDMA_XY_PKT(0x7102),
+ PSIL_PDMA_XY_PKT(0x7103),
+ /* MCU_PDMA_MISC_G1 - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ /* MCU_PDMA_MISC_G2 - UART0 */
+ PSIL_PDMA_XY_PKT(0x7300),
+ /* MCU_PDMA_ADC - ADC0-1 */
+ PSIL_PDMA_XY_TR(0x7400),
+ PSIL_PDMA_XY_TR(0x7401),
+ /* SA2UL */
+ PSIL_SA2UL(0x7500, 0),
+ PSIL_SA2UL(0x7501, 0),
+ PSIL_SA2UL(0x7502, 0),
+ PSIL_SA2UL(0x7503, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j7200_dst_ep_map[] = {
+ /* CPSW5 */
+ PSIL_ETHERNET(0xca00),
+ PSIL_ETHERNET(0xca01),
+ PSIL_ETHERNET(0xca02),
+ PSIL_ETHERNET(0xca03),
+ PSIL_ETHERNET(0xca04),
+ PSIL_ETHERNET(0xca05),
+ PSIL_ETHERNET(0xca06),
+ PSIL_ETHERNET(0xca07),
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+ /* SA2UL */
+ PSIL_SA2UL(0xf500, 1),
+ PSIL_SA2UL(0xf501, 1),
+};
+
+struct psil_ep_map j7200_ep_map = {
+ .name = "j7200",
+ .src = j7200_src_ep_map,
+ .src_count = ARRAY_SIZE(j7200_src_ep_map),
+ .dst = j7200_dst_ep_map,
+ .dst_count = ARRAY_SIZE(j7200_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
index e3cfd5f66842..7580870ed746 100644
--- a/drivers/dma/ti/k3-psil-j721e.c
+++ b/drivers/dma/ti/k3-psil-j721e.c
@@ -166,6 +166,8 @@ static struct psil_ep j721e_src_ep_map[] = {
/* SA2UL */
PSIL_SA2UL(0x7500, 0),
PSIL_SA2UL(0x7501, 0),
+ PSIL_SA2UL(0x7502, 0),
+ PSIL_SA2UL(0x7503, 0),
};
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
@@ -211,6 +213,7 @@ static struct psil_ep j721e_dst_ep_map[] = {
PSIL_ETHERNET(0xf007),
/* SA2UL */
PSIL_SA2UL(0xf500, 1),
+ PSIL_SA2UL(0xf501, 1),
};
struct psil_ep_map j721e_ep_map = {
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index a1f389ca371e..b4b0fb359eff 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -39,5 +39,6 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
/* SoC PSI-L endpoint maps */
extern struct psil_ep_map am654_ep_map;
extern struct psil_ep_map j721e_ep_map;
+extern struct psil_ep_map j7200_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index fb7c8150b0d1..837853aab95a 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -9,11 +9,19 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/sys_soc.h>
#include "k3-psil-priv.h"
static DEFINE_MUTEX(ep_map_mutex);
-static struct psil_ep_map *soc_ep_map;
+static const struct psil_ep_map *soc_ep_map;
+
+static const struct soc_device_attribute k3_soc_devices[] = {
+ { .family = "AM65X", .data = &am654_ep_map },
+ { .family = "J721E", .data = &j721e_ep_map },
+ { .family = "J7200", .data = &j7200_ep_map },
+ { /* sentinel */ }
+};
struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
{
@@ -21,10 +29,11 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
mutex_lock(&ep_map_mutex);
if (!soc_ep_map) {
- if (of_machine_is_compatible("ti,am654")) {
- soc_ep_map = &am654_ep_map;
- } else if (of_machine_is_compatible("ti,j721e")) {
- soc_ep_map = &j721e_ep_map;
+ const struct soc_device_attribute *soc;
+
+ soc = soc_device_match(k3_soc_devices);
+ if (soc) {
+ soc_ep_map = soc->data;
} else {
pr_err("PSIL: No compatible machine found for map\n");
mutex_unlock(&ep_map_mutex);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index 3a5d33ea5ebe..a367584f0d7b 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -378,17 +378,11 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
- u32 txrt_ctl;
-
- txrt_ctl = UDMA_PEER_RT_EN_ENABLE;
xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
- txrt_ctl);
+ UDMA_PEER_RT_EN_ENABLE);
- txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx,
- UDMA_CHAN_RT_CTL_REG);
- txrt_ctl |= UDMA_CHAN_RT_CTL_EN;
xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
- txrt_ctl);
+ UDMA_CHAN_RT_CTL_EN);
k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
return 0;
@@ -579,8 +573,8 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
/* request and cfg rings */
ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
- flow_cfg->ring_rxq_id,
flow_cfg->ring_rxfdq0_id,
+ flow_cfg->ring_rxq_id,
&flow->ringrxfdq,
&flow->ringrx);
if (ret) {
@@ -1058,19 +1052,14 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
{
- u32 rxrt_ctl;
-
if (rx_chn->remote)
return -EINVAL;
if (rx_chn->flows_ready < rx_chn->flow_num)
return -EINVAL;
- rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx,
- UDMA_CHAN_RT_CTL_REG);
- rxrt_ctl |= UDMA_CHAN_RT_CTL_EN;
xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
- rxrt_ctl);
+ UDMA_CHAN_RT_CTL_EN);
xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
UDMA_PEER_RT_EN_ENABLE);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index d86dba0fd8e6..82cf6c77f5c9 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/of_device.h>
@@ -91,6 +92,9 @@ struct udma_match_data {
bool enable_memcpy_support;
u32 flags;
u32 statictr_z_mask;
+};
+
+struct udma_soc_data {
u32 rchan_oes_offset;
};
@@ -117,6 +121,7 @@ struct udma_dev {
struct device *dev;
void __iomem *mmrs[MMR_LAST];
const struct udma_match_data *match_data;
+ const struct udma_soc_data *soc_data;
u8 tpl_levels;
u32 tpl_start_idx[3];
@@ -1679,7 +1684,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
{
struct udma_chan *uc = to_udma_chan(chan);
struct udma_dev *ud = to_udma_dev(chan->device);
- const struct udma_match_data *match_data = ud->match_data;
+ const struct udma_soc_data *soc_data = ud->soc_data;
struct k3_ring *irq_ring;
u32 irq_udma_idx;
int ret;
@@ -1779,7 +1784,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
K3_PSIL_DST_THREAD_ID_OFFSET;
irq_ring = uc->rflow->r_ring;
- irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
+ irq_udma_idx = soc_data->rchan_oes_offset + uc->rchan->id;
ret = udma_tisci_rx_channel_config(uc);
break;
@@ -2024,11 +2029,6 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
int num_tr = 0;
int tr_idx = 0;
- if (!is_slave_direction(dir)) {
- dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
- return NULL;
- }
-
/* estimate the number of TRs we will need */
for_each_sg(sgl, sgent, sglen, i) {
if (sg_dma_len(sgent) < SZ_64K)
@@ -2400,11 +2400,6 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
unsigned int i;
int num_tr;
- if (!is_slave_direction(dir)) {
- dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
- return NULL;
- }
-
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
&tr0_cnt1, &tr1_cnt0);
if (num_tr < 0) {
@@ -2914,9 +2909,9 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
* This tasklet handles the completion of a DMA descriptor by
* calling its callback and freeing it.
*/
-static void udma_vchan_complete(unsigned long arg)
+static void udma_vchan_complete(struct tasklet_struct *t)
{
- struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+ struct virt_dma_chan *vc = from_tasklet(vc, t, task);
struct virt_dma_desc *vd, *_vd;
struct dmaengine_desc_callback cb;
LIST_HEAD(head);
@@ -3101,14 +3096,12 @@ static struct udma_match_data am654_main_data = {
.psil_base = 0x1000,
.enable_memcpy_support = true,
.statictr_z_mask = GENMASK(11, 0),
- .rchan_oes_offset = 0x200,
};
static struct udma_match_data am654_mcu_data = {
.psil_base = 0x6000,
.enable_memcpy_support = false,
.statictr_z_mask = GENMASK(11, 0),
- .rchan_oes_offset = 0x200,
};
static struct udma_match_data j721e_main_data = {
@@ -3116,7 +3109,6 @@ static struct udma_match_data j721e_main_data = {
.enable_memcpy_support = true,
.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
.statictr_z_mask = GENMASK(23, 0),
- .rchan_oes_offset = 0x400,
};
static struct udma_match_data j721e_mcu_data = {
@@ -3124,7 +3116,6 @@ static struct udma_match_data j721e_mcu_data = {
.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
.statictr_z_mask = GENMASK(23, 0),
- .rchan_oes_offset = 0x400,
};
static const struct of_device_id udma_of_match[] = {
@@ -3145,15 +3136,31 @@ static const struct of_device_id udma_of_match[] = {
{ /* Sentinel */ },
};
+static struct udma_soc_data am654_soc_data = {
+ .rchan_oes_offset = 0x200,
+};
+
+static struct udma_soc_data j721e_soc_data = {
+ .rchan_oes_offset = 0x400,
+};
+
+static struct udma_soc_data j7200_soc_data = {
+ .rchan_oes_offset = 0x80,
+};
+
+static const struct soc_device_attribute k3_soc_devices[] = {
+ { .family = "AM65X", .data = &am654_soc_data },
+ { .family = "J721E", .data = &j721e_soc_data },
+ { .family = "J7200", .data = &j7200_soc_data },
+ { /* sentinel */ }
+};
+
static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
{
- struct resource *res;
int i;
for (i = 0; i < MMR_LAST; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- mmr_names[i]);
- ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
+ ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
if (IS_ERR(ud->mmrs[i]))
return PTR_ERR(ud->mmrs[i]);
}
@@ -3287,7 +3294,7 @@ static int udma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
for (j = 0; j < rm_res->sets; j++, i++) {
irq_res.desc[i].start = rm_res->desc[j].start +
- ud->match_data->rchan_oes_offset;
+ ud->soc_data->rchan_oes_offset;
irq_res.desc[i].num = rm_res->desc[j].num;
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@@ -3497,6 +3504,7 @@ static void udma_dbg_summary_show(struct seq_file *s,
static int udma_probe(struct platform_device *pdev)
{
struct device_node *navss_node = pdev->dev.parent->of_node;
+ const struct soc_device_attribute *soc;
struct device *dev = &pdev->dev;
struct udma_dev *ud;
const struct of_device_id *match;
@@ -3561,6 +3569,13 @@ static int udma_probe(struct platform_device *pdev)
}
ud->match_data = match->data;
+ soc = soc_device_match(k3_soc_devices);
+ if (!soc) {
+ dev_err(dev, "No compatible SoC found\n");
+ return -ENODEV;
+ }
+ ud->soc_data = soc->data;
+
dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
@@ -3649,8 +3664,7 @@ static int udma_probe(struct platform_device *pdev)
vchan_init(&uc->vc, &ud->ddev);
/* Use custom vchan completion handling */
- tasklet_init(&uc->vc.task, udma_vchan_complete,
- (unsigned long)&uc->vc);
+ tasklet_setup(&uc->vc.task, udma_vchan_complete);
init_completion(&uc->teardown_completed);
INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
}
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 918301e17552..c9fe5e3a6b55 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1904,7 +1904,7 @@ static struct platform_driver omap_dma_driver = {
.remove = omap_dma_remove,
.driver = {
.name = "omap-dma-engine",
- .of_match_table = of_match_ptr(omap_dma_match),
+ .of_match_table = omap_dma_match,
},
};
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 68e48bf54d78..3f524be69efb 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -563,9 +563,9 @@ static int td_terminate_all(struct dma_chan *chan)
return 0;
}
-static void td_tasklet(unsigned long data)
+static void td_tasklet(struct tasklet_struct *t)
{
- struct timb_dma *td = (struct timb_dma *)data;
+ struct timb_dma *td = from_tasklet(td, t, tasklet);
u32 isr;
u32 ipr;
u32 ier;
@@ -658,7 +658,7 @@ static int td_probe(struct platform_device *pdev)
iowrite32(0x0, td->membase + TIMBDMA_IER);
iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
- tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
+ tasklet_setup(&td->tasklet, td_tasklet);
err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
if (err) {
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 628bdf4430c7..5b6b375a257e 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -601,13 +601,13 @@ scan_done:
}
}
-static void txx9dmac_chan_tasklet(unsigned long data)
+static void txx9dmac_chan_tasklet(struct tasklet_struct *t)
{
int irq;
u32 csr;
struct txx9dmac_chan *dc;
- dc = (struct txx9dmac_chan *)data;
+ dc = from_tasklet(dc, t, tasklet);
csr = channel_readl(dc, CSR);
dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
@@ -638,13 +638,13 @@ static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void txx9dmac_tasklet(unsigned long data)
+static void txx9dmac_tasklet(struct tasklet_struct *t)
{
int irq;
u32 csr;
struct txx9dmac_chan *dc;
- struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data;
+ struct txx9dmac_dev *ddev = from_tasklet(ddev, t, tasklet);
u32 mcr;
int i;
@@ -1113,8 +1113,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet,
- (unsigned long)dc);
+ tasklet_setup(&dc->tasklet, txx9dmac_chan_tasklet);
dc->irq = irq;
err = devm_request_irq(&pdev->dev, dc->irq,
txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
@@ -1200,8 +1199,7 @@ static int __init txx9dmac_probe(struct platform_device *pdev)
ddev->irq = platform_get_irq(pdev, 0);
if (ddev->irq >= 0) {
- tasklet_init(&ddev->tasklet, txx9dmac_tasklet,
- (unsigned long)ddev);
+ tasklet_setup(&ddev->tasklet, txx9dmac_tasklet);
err = devm_request_irq(&pdev->dev, ddev->irq,
txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
if (err)
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 23e33a85f033..a6f4265be0c9 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -80,9 +80,9 @@ EXPORT_SYMBOL_GPL(vchan_find_desc);
* This tasklet handles the completion of a DMA descriptor by
* calling its callback and freeing it.
*/
-static void vchan_complete(unsigned long arg)
+static void vchan_complete(struct tasklet_struct *t)
{
- struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+ struct virt_dma_chan *vc = from_tasklet(vc, t, task);
struct virt_dma_desc *vd, *_vd;
struct dmaengine_desc_callback cb;
LIST_HEAD(head);
@@ -131,7 +131,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
INIT_LIST_HEAD(&vc->desc_completed);
INIT_LIST_HEAD(&vc->desc_terminated);
- tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
+ tasklet_setup(&vc->task, vchan_complete);
vc->chan.device = dmadev;
list_add_tail(&vc->chan.device_node, &dmadev->channels);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 4f733d37a22e..3589b4ef50b8 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -975,9 +975,9 @@ static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan,
return dma_cookie_status(dchan, cookie, txstate);
}
-static void xgene_dma_tasklet_cb(unsigned long data)
+static void xgene_dma_tasklet_cb(struct tasklet_struct *t)
{
- struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
+ struct xgene_dma_chan *chan = from_tasklet(chan, t, tasklet);
/* Run all cleanup for descriptors which have been completed */
xgene_dma_cleanup_descriptors(chan);
@@ -1539,8 +1539,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
INIT_LIST_HEAD(&chan->ld_pending);
INIT_LIST_HEAD(&chan->ld_running);
INIT_LIST_HEAD(&chan->ld_completed);
- tasklet_init(&chan->tasklet, xgene_dma_tasklet_cb,
- (unsigned long)chan);
+ tasklet_setup(&chan->tasklet, xgene_dma_tasklet_cb);
chan->pending = 0;
chan->desc_pool = NULL;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 5429497d3560..ecff35402860 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1044,11 +1044,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/**
* xilinx_dma_do_tasklet - Schedule completion tasklet
- * @data: Pointer to the Xilinx DMA channel structure
+ * @t: Pointer to the Xilinx DMA channel structure
*/
-static void xilinx_dma_do_tasklet(unsigned long data)
+static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
{
- struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
+ struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
xilinx_dma_chan_desc_cleanup(chan);
}
@@ -2536,13 +2536,8 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*tmp_clk = NULL;
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
- if (IS_ERR(*axi_clk)) {
- err = PTR_ERR(*axi_clk);
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
- err);
- return err;
- }
+ if (IS_ERR(*axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
if (IS_ERR(*tx_clk))
@@ -2603,22 +2598,12 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*tmp2_clk = NULL;
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
- if (IS_ERR(*axi_clk)) {
- err = PTR_ERR(*axi_clk);
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get axi_clk (%d)\n",
- err);
- return err;
- }
+ if (IS_ERR(*axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
- if (IS_ERR(*dev_clk)) {
- err = PTR_ERR(*dev_clk);
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get dev_clk (%d)\n",
- err);
- return err;
- }
+ if (IS_ERR(*dev_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
err = clk_prepare_enable(*axi_clk);
if (err) {
@@ -2647,13 +2632,8 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
int err;
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
- if (IS_ERR(*axi_clk)) {
- err = PTR_ERR(*axi_clk);
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
- err);
- return err;
- }
+ if (IS_ERR(*axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
*tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
if (IS_ERR(*tx_clk))
@@ -2866,8 +2846,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
}
/* Initialize the tasklet */
- tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
- (unsigned long)chan);
+ tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
/*
* Initialize the DMA channel and add it to the DMA engine channels
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index b37197c772aa..55df63dead8d 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
@@ -267,6 +268,210 @@ struct xilinx_dpdma_device {
};
/* -----------------------------------------------------------------------------
+ * DebugFS
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+enum xilinx_dpdma_testcases {
+ DPDMA_TC_INTR_DONE,
+ DPDMA_TC_NONE
+};
+
+struct xilinx_dpdma_debugfs {
+ enum xilinx_dpdma_testcases testcase;
+ u16 xilinx_dpdma_irq_done_count;
+ unsigned int chan_id;
+};
+
+static struct xilinx_dpdma_debugfs dpdma_debugfs;
+struct xilinx_dpdma_debugfs_request {
+ const char *name;
+ enum xilinx_dpdma_testcases tc;
+ ssize_t (*read)(char *buf);
+ int (*write)(char *args);
+};
+
+static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
+{
+ if (chan->id == dpdma_debugfs.chan_id)
+ dpdma_debugfs.xilinx_dpdma_irq_done_count++;
+}
+
+static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
+{
+ size_t out_str_len;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
+ out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(buf, out_str_len, "%d",
+ dpdma_debugfs.xilinx_dpdma_irq_done_count);
+
+ return 0;
+}
+
+static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args)
+{
+ char *arg;
+ int ret;
+ u32 id;
+
+ arg = strsep(&args, " ");
+ if (!arg || strncasecmp(arg, "start", 5))
+ return -EINVAL;
+
+ arg = strsep(&args, " ");
+ if (!arg)
+ return -EINVAL;
+
+ ret = kstrtou32(arg, 0, &id);
+ if (ret < 0)
+ return ret;
+
+ if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1)
+ return -EINVAL;
+
+ dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
+ dpdma_debugfs.xilinx_dpdma_irq_done_count = 0;
+ dpdma_debugfs.chan_id = id;
+
+ return 0;
+}
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
+ {
+ .name = "DESCRIPTOR_DONE_INTR",
+ .tc = DPDMA_TC_INTR_DONE,
+ .read = xilinx_dpdma_debugfs_desc_done_irq_read,
+ .write = xilinx_dpdma_debugfs_desc_done_irq_write,
+ },
+};
+
+static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ enum xilinx_dpdma_testcases testcase;
+ char *kern_buff;
+ int ret = 0;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+ return -ENOMEM;
+ }
+
+ testcase = READ_ONCE(dpdma_debugfs.testcase);
+ if (testcase != DPDMA_TC_NONE) {
+ ret = dpdma_debugfs_reqs[testcase].read(kern_buff);
+ if (ret < 0)
+ goto done;
+ } else {
+ strlcpy(kern_buff, "No testcase executed",
+ XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
+ }
+
+ size = min(size, strlen(kern_buff));
+ if (copy_to_user(buf, kern_buff, size))
+ ret = -EFAULT;
+
+done:
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static ssize_t xilinx_dpdma_debugfs_write(struct file *f,
+ const char __user *buf, size_t size,
+ loff_t *pos)
+{
+ char *kern_buff, *kern_buff_start;
+ char *testcase;
+ unsigned int i;
+ int ret;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ /* Supporting single instance of test as of now. */
+ if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0)
+ goto done;
+
+ /* Read the testcase name from a user request. */
+ testcase = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
+ if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = dpdma_debugfs_reqs[i].write(kern_buff);
+ if (ret < 0)
+ goto done;
+
+ ret = size;
+
+done:
+ kfree(kern_buff_start);
+ return ret;
+}
+
+static const struct file_operations fops_xilinx_dpdma_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dpdma_debugfs_read,
+ .write = xilinx_dpdma_debugfs_write,
+};
+
+static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
+{
+ struct dentry *dent;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ dent = debugfs_create_file("testcase", 0444, xdev->common.dbg_dev_root,
+ NULL, &fops_xilinx_dpdma_dbgfs);
+ if (IS_ERR(dent))
+ dev_err(xdev->dev, "Failed to create debugfs testcase file\n");
+}
+
+#else
+static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
+{
+}
+
+static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/* -----------------------------------------------------------------------------
* I/O Accessors
*/
@@ -842,6 +1047,8 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
+ xilinx_dpdma_debugfs_desc_done_irq(chan);
+
if (active)
vchan_cyclic_callback(&active->vdesc);
else
@@ -1251,15 +1458,15 @@ static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
/**
* xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
- * @data: tasklet data to be casted to DPDMA channel structure
+ * @t: pointer to the tasklet associated with this handler
*
* Per channel error handling tasklet. This function waits for the outstanding
* transaction to complete and triggers error handling. After error handling,
* re-enable channel error interrupts, and restart the channel if needed.
*/
-static void xilinx_dpdma_chan_err_task(unsigned long data)
+static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
{
- struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
+ struct xilinx_dpdma_chan *chan = from_tasklet(chan, t, err_task);
struct xilinx_dpdma_device *xdev = chan->xdev;
unsigned long flags;
@@ -1348,8 +1555,7 @@ static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev,
spin_lock_init(&chan->lock);
init_waitqueue_head(&chan->wait_to_stop);
- tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task,
- (unsigned long)chan);
+ tasklet_setup(&chan->err_task, xilinx_dpdma_chan_err_task);
chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc;
vchan_init(&chan->vchan, &xdev->common);
@@ -1477,6 +1683,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
xilinx_dpdma_enable_irq(xdev);
+ xilinx_dpdma_debugfs_init(xdev);
+
dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
return 0;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index ff253696d183..d8419565b92c 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -742,11 +742,11 @@ static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
/**
* zynqmp_dma_do_tasklet - Schedule completion tasklet
- * @data: Pointer to the ZynqMP DMA channel structure
+ * @t: Pointer to the ZynqMP DMA channel structure
*/
-static void zynqmp_dma_do_tasklet(unsigned long data)
+static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
{
- struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
+ struct zynqmp_dma_chan *chan = from_tasklet(chan, t, tasklet);
u32 count;
unsigned long irqflags;
@@ -908,7 +908,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent");
zdev->chan = chan;
- tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan);
+ tasklet_setup(&chan->tasklet, zynqmp_dma_do_tasklet);
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->active_list);
INIT_LIST_HEAD(&chan->pending_list);
diff --git a/drivers/dma/zx_dma.c b/drivers/dma/zx_dma.c
index 5fe2e8b9a7b8..b057582b2fac 100644
--- a/drivers/dma/zx_dma.c
+++ b/drivers/dma/zx_dma.c
@@ -285,9 +285,7 @@ static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
p = &d->phy[i];
c = p->vchan;
if (c) {
- unsigned long flags;
-
- spin_lock_irqsave(&c->vc.lock, flags);
+ spin_lock(&c->vc.lock);
if (c->cyclic) {
vchan_cyclic_callback(&p->ds_run->vd);
} else {
@@ -295,7 +293,7 @@ static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
p->ds_done = p->ds_run;
task = 1;
}
- spin_unlock_irqrestore(&c->vc.lock, flags);
+ spin_unlock(&c->vc.lock);
irq_chan |= BIT(i);
}
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 7b6ec3014ba2..7a47680d6f07 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -100,6 +100,13 @@ config EDAC_AMD64_ERROR_INJECTION
In addition, there are two control files, inject_read and inject_write,
which trigger the DRAM ECC Read and Write respectively.
+config EDAC_AL_MC
+ tristate "Amazon's Annapurna Lab Memory Controller"
+ depends on (ARCH_ALPINE || COMPILE_TEST)
+ help
+ Support for error detection and correction for Amazon's Annapurna
+ Labs Alpine chips which allow 1 bit correction and 2 bits detection.
+
config EDAC_AMD76X
tristate "AMD 76x (760, 762, 768)"
depends on PCI && X86_32
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 269e15118cea..3a849168780d 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_EDAC_GHES) += ghes_edac.o
edac_mce_amd-y := mce_amd.o
obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o
+obj-$(CONFIG_EDAC_AL_MC) += al_mc_edac.o
obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o
obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
diff --git a/drivers/edac/al_mc_edac.c b/drivers/edac/al_mc_edac.c
new file mode 100644
index 000000000000..7d4f396c27b5
--- /dev/null
+++ b/drivers/edac/al_mc_edac.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/edac.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include "edac_module.h"
+
+/* Registers Offset */
+#define AL_MC_ECC_CFG 0x70
+#define AL_MC_ECC_CLEAR 0x7c
+#define AL_MC_ECC_ERR_COUNT 0x80
+#define AL_MC_ECC_CE_ADDR0 0x84
+#define AL_MC_ECC_CE_ADDR1 0x88
+#define AL_MC_ECC_UE_ADDR0 0xa4
+#define AL_MC_ECC_UE_ADDR1 0xa8
+#define AL_MC_ECC_CE_SYND0 0x8c
+#define AL_MC_ECC_CE_SYND1 0x90
+#define AL_MC_ECC_CE_SYND2 0x94
+#define AL_MC_ECC_UE_SYND0 0xac
+#define AL_MC_ECC_UE_SYND1 0xb0
+#define AL_MC_ECC_UE_SYND2 0xb4
+
+/* Registers Fields */
+#define AL_MC_ECC_CFG_SCRUB_DISABLED BIT(4)
+
+#define AL_MC_ECC_CLEAR_UE_COUNT BIT(3)
+#define AL_MC_ECC_CLEAR_CE_COUNT BIT(2)
+#define AL_MC_ECC_CLEAR_UE_ERR BIT(1)
+#define AL_MC_ECC_CLEAR_CE_ERR BIT(0)
+
+#define AL_MC_ECC_ERR_COUNT_UE GENMASK(31, 16)
+#define AL_MC_ECC_ERR_COUNT_CE GENMASK(15, 0)
+
+#define AL_MC_ECC_CE_ADDR0_RANK GENMASK(25, 24)
+#define AL_MC_ECC_CE_ADDR0_ROW GENMASK(17, 0)
+
+#define AL_MC_ECC_CE_ADDR1_BG GENMASK(25, 24)
+#define AL_MC_ECC_CE_ADDR1_BANK GENMASK(18, 16)
+#define AL_MC_ECC_CE_ADDR1_COLUMN GENMASK(11, 0)
+
+#define AL_MC_ECC_UE_ADDR0_RANK GENMASK(25, 24)
+#define AL_MC_ECC_UE_ADDR0_ROW GENMASK(17, 0)
+
+#define AL_MC_ECC_UE_ADDR1_BG GENMASK(25, 24)
+#define AL_MC_ECC_UE_ADDR1_BANK GENMASK(18, 16)
+#define AL_MC_ECC_UE_ADDR1_COLUMN GENMASK(11, 0)
+
+#define DRV_NAME "al_mc_edac"
+#define AL_MC_EDAC_MSG_MAX 256
+
+struct al_mc_edac {
+ void __iomem *mmio_base;
+ spinlock_t lock;
+ int irq_ce;
+ int irq_ue;
+};
+
+static void prepare_msg(char *message, size_t buffer_size,
+ enum hw_event_mc_err_type type,
+ u8 rank, u32 row, u8 bg, u8 bank, u16 column,
+ u32 syn0, u32 syn1, u32 syn2)
+{
+ snprintf(message, buffer_size,
+ "%s rank=0x%x row=0x%x bg=0x%x bank=0x%x col=0x%x syn0: 0x%x syn1: 0x%x syn2: 0x%x",
+ type == HW_EVENT_ERR_UNCORRECTED ? "UE" : "CE",
+ rank, row, bg, bank, column, syn0, syn1, syn2);
+}
+
+static int handle_ce(struct mem_ctl_info *mci)
+{
+ u32 eccerrcnt, ecccaddr0, ecccaddr1, ecccsyn0, ecccsyn1, ecccsyn2, row;
+ struct al_mc_edac *al_mc = mci->pvt_info;
+ char msg[AL_MC_EDAC_MSG_MAX];
+ u16 ce_count, column;
+ unsigned long flags;
+ u8 rank, bg, bank;
+
+ eccerrcnt = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_ERR_COUNT);
+ ce_count = FIELD_GET(AL_MC_ECC_ERR_COUNT_CE, eccerrcnt);
+ if (!ce_count)
+ return 0;
+
+ ecccaddr0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_ADDR0);
+ ecccaddr1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_ADDR1);
+ ecccsyn0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_SYND0);
+ ecccsyn1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_SYND1);
+ ecccsyn2 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_SYND2);
+
+ writel_relaxed(AL_MC_ECC_CLEAR_CE_COUNT | AL_MC_ECC_CLEAR_CE_ERR,
+ al_mc->mmio_base + AL_MC_ECC_CLEAR);
+
+ dev_dbg(mci->pdev, "eccuaddr0=0x%08x eccuaddr1=0x%08x\n",
+ ecccaddr0, ecccaddr1);
+
+ rank = FIELD_GET(AL_MC_ECC_CE_ADDR0_RANK, ecccaddr0);
+ row = FIELD_GET(AL_MC_ECC_CE_ADDR0_ROW, ecccaddr0);
+
+ bg = FIELD_GET(AL_MC_ECC_CE_ADDR1_BG, ecccaddr1);
+ bank = FIELD_GET(AL_MC_ECC_CE_ADDR1_BANK, ecccaddr1);
+ column = FIELD_GET(AL_MC_ECC_CE_ADDR1_COLUMN, ecccaddr1);
+
+ prepare_msg(msg, sizeof(msg), HW_EVENT_ERR_CORRECTED,
+ rank, row, bg, bank, column,
+ ecccsyn0, ecccsyn1, ecccsyn2);
+
+ spin_lock_irqsave(&al_mc->lock, flags);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ ce_count, 0, 0, 0, 0, 0, -1, mci->ctl_name, msg);
+ spin_unlock_irqrestore(&al_mc->lock, flags);
+
+ return ce_count;
+}
+
+static int handle_ue(struct mem_ctl_info *mci)
+{
+ u32 eccerrcnt, eccuaddr0, eccuaddr1, eccusyn0, eccusyn1, eccusyn2, row;
+ struct al_mc_edac *al_mc = mci->pvt_info;
+ char msg[AL_MC_EDAC_MSG_MAX];
+ u16 ue_count, column;
+ unsigned long flags;
+ u8 rank, bg, bank;
+
+ eccerrcnt = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_ERR_COUNT);
+ ue_count = FIELD_GET(AL_MC_ECC_ERR_COUNT_UE, eccerrcnt);
+ if (!ue_count)
+ return 0;
+
+ eccuaddr0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_ADDR0);
+ eccuaddr1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_ADDR1);
+ eccusyn0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_SYND0);
+ eccusyn1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_SYND1);
+ eccusyn2 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_SYND2);
+
+ writel_relaxed(AL_MC_ECC_CLEAR_UE_COUNT | AL_MC_ECC_CLEAR_UE_ERR,
+ al_mc->mmio_base + AL_MC_ECC_CLEAR);
+
+ dev_dbg(mci->pdev, "eccuaddr0=0x%08x eccuaddr1=0x%08x\n",
+ eccuaddr0, eccuaddr1);
+
+ rank = FIELD_GET(AL_MC_ECC_UE_ADDR0_RANK, eccuaddr0);
+ row = FIELD_GET(AL_MC_ECC_UE_ADDR0_ROW, eccuaddr0);
+
+ bg = FIELD_GET(AL_MC_ECC_UE_ADDR1_BG, eccuaddr1);
+ bank = FIELD_GET(AL_MC_ECC_UE_ADDR1_BANK, eccuaddr1);
+ column = FIELD_GET(AL_MC_ECC_UE_ADDR1_COLUMN, eccuaddr1);
+
+ prepare_msg(msg, sizeof(msg), HW_EVENT_ERR_UNCORRECTED,
+ rank, row, bg, bank, column,
+ eccusyn0, eccusyn1, eccusyn2);
+
+ spin_lock_irqsave(&al_mc->lock, flags);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ ue_count, 0, 0, 0, 0, 0, -1, mci->ctl_name, msg);
+ spin_unlock_irqrestore(&al_mc->lock, flags);
+
+ return ue_count;
+}
+
+static void al_mc_edac_check(struct mem_ctl_info *mci)
+{
+ struct al_mc_edac *al_mc = mci->pvt_info;
+
+ if (al_mc->irq_ue <= 0)
+ handle_ue(mci);
+
+ if (al_mc->irq_ce <= 0)
+ handle_ce(mci);
+}
+
+static irqreturn_t al_mc_edac_irq_handler_ue(int irq, void *info)
+{
+ struct platform_device *pdev = info;
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ if (handle_ue(mci))
+ return IRQ_HANDLED;
+ return IRQ_NONE;
+}
+
+static irqreturn_t al_mc_edac_irq_handler_ce(int irq, void *info)
+{
+ struct platform_device *pdev = info;
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ if (handle_ce(mci))
+ return IRQ_HANDLED;
+ return IRQ_NONE;
+}
+
+static enum scrub_type get_scrub_mode(void __iomem *mmio_base)
+{
+ u32 ecccfg0;
+
+ ecccfg0 = readl(mmio_base + AL_MC_ECC_CFG);
+
+ if (FIELD_GET(AL_MC_ECC_CFG_SCRUB_DISABLED, ecccfg0))
+ return SCRUB_NONE;
+ else
+ return SCRUB_HW_SRC;
+}
+
+static void devm_al_mc_edac_free(void *data)
+{
+ edac_mc_free(data);
+}
+
+static void devm_al_mc_edac_del(void *data)
+{
+ edac_mc_del_mc(data);
+}
+
+static int al_mc_edac_probe(struct platform_device *pdev)
+{
+ struct edac_mc_layer layers[1];
+ struct mem_ctl_info *mci;
+ struct al_mc_edac *al_mc;
+ void __iomem *mmio_base;
+ struct dimm_info *dimm;
+ int ret;
+
+ mmio_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mmio_base)) {
+ dev_err(&pdev->dev, "failed to ioremap memory (%ld)\n",
+ PTR_ERR(mmio_base));
+ return PTR_ERR(mmio_base);
+ }
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct al_mc_edac));
+ if (!mci)
+ return -ENOMEM;
+
+ ret = devm_add_action(&pdev->dev, devm_al_mc_edac_free, mci);
+ if (ret) {
+ edac_mc_free(mci);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, mci);
+ al_mc = mci->pvt_info;
+
+ al_mc->mmio_base = mmio_base;
+
+ al_mc->irq_ue = of_irq_get_byname(pdev->dev.of_node, "ue");
+ if (al_mc->irq_ue <= 0)
+ dev_dbg(&pdev->dev,
+ "no IRQ defined for UE - falling back to polling\n");
+
+ al_mc->irq_ce = of_irq_get_byname(pdev->dev.of_node, "ce");
+ if (al_mc->irq_ce <= 0)
+ dev_dbg(&pdev->dev,
+ "no IRQ defined for CE - falling back to polling\n");
+
+ /*
+ * In case both interrupts (ue/ce) are to be found, use interrupt mode.
+ * In case none of the interrupt are foud, use polling mode.
+ * In case only one interrupt is found, use interrupt mode for it but
+ * keep polling mode enable for the other.
+ */
+ if (al_mc->irq_ue <= 0 || al_mc->irq_ce <= 0) {
+ edac_op_state = EDAC_OPSTATE_POLL;
+ mci->edac_check = al_mc_edac_check;
+ } else {
+ edac_op_state = EDAC_OPSTATE_INT;
+ }
+
+ spin_lock_init(&al_mc->lock);
+
+ mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = DRV_NAME;
+ mci->ctl_name = "al_mc";
+ mci->pdev = &pdev->dev;
+ mci->scrub_mode = get_scrub_mode(mmio_base);
+
+ dimm = *mci->dimms;
+ dimm->grain = 1;
+
+ ret = edac_mc_add_mc(mci);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "fail to add memory controller device (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = devm_add_action(&pdev->dev, devm_al_mc_edac_del, &pdev->dev);
+ if (ret) {
+ edac_mc_del_mc(&pdev->dev);
+ return ret;
+ }
+
+ if (al_mc->irq_ue > 0) {
+ ret = devm_request_irq(&pdev->dev,
+ al_mc->irq_ue,
+ al_mc_edac_irq_handler_ue,
+ IRQF_SHARED,
+ pdev->name,
+ pdev);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "failed to request UE IRQ %d (%d)\n",
+ al_mc->irq_ue, ret);
+ return ret;
+ }
+ }
+
+ if (al_mc->irq_ce > 0) {
+ ret = devm_request_irq(&pdev->dev,
+ al_mc->irq_ce,
+ al_mc_edac_irq_handler_ce,
+ IRQF_SHARED,
+ pdev->name,
+ pdev);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "failed to request CE IRQ %d (%d)\n",
+ al_mc->irq_ce, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id al_mc_edac_of_match[] = {
+ { .compatible = "amazon,al-mc-edac", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, al_mc_edac_of_match);
+
+static struct platform_driver al_mc_edac_driver = {
+ .probe = al_mc_edac_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = al_mc_edac_of_match,
+ },
+};
+
+module_platform_driver(al_mc_edac_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Talel Shenhar");
+MODULE_DESCRIPTION("Amazon's Annapurna Lab's Memory Controller EDAC Driver");
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index fcc08bbf6945..1362274d840b 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3385,6 +3385,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
break;
case 0x19:
+ if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
+ fam_type = &family_types[F17_M70H_CPUS];
+ pvt->ops = &family_types[F17_M70H_CPUS].ops;
+ fam_type->ctl_name = "F19h_M20h";
+ break;
+ }
fam_type = &family_types[F19_CPUS];
pvt->ops = &family_types[F19_CPUS].ops;
family_types[F19_CPUS].ctl_name = "F19h";
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index b194658b8b5c..fde809efc520 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -209,8 +209,8 @@ static int config_irq(void *ctx, struct platform_device *pdev)
/* register interrupt handler */
irq = platform_get_irq(pdev, 0);
dev_dbg(&pdev->dev, "got irq %d\n", irq);
- if (!irq)
- return -ENODEV;
+ if (irq < 0)
+ return irq;
rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
DRV_NAME, ctx);
@@ -388,23 +388,7 @@ static struct platform_driver aspeed_driver = {
.probe = aspeed_probe,
.remove = aspeed_remove
};
-
-
-static int __init aspeed_init(void)
-{
- return platform_driver_register(&aspeed_driver);
-}
-
-
-static void __exit aspeed_exit(void)
-{
- platform_driver_unregister(&aspeed_driver);
-}
-
-
-module_init(aspeed_init);
-module_exit(aspeed_exit);
-
+module_platform_driver(aspeed_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stefan Schaeckeler <sschaeck@cisco.com>");
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index de732dc2ef33..313d08018166 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -7,7 +7,7 @@
* Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
*
* Datasheets:
- * http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
+ * https://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
* ftp://download.intel.com/design/intarch/datashts/31345803.pdf
*
* Written by Tom Zimmerman
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4e6aca595133..2f9f1e74bb35 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -474,8 +474,12 @@ static ssize_t dimmdev_location_show(struct device *dev,
struct device_attribute *mattr, char *data)
{
struct dimm_info *dimm = to_dimm(dev);
+ ssize_t count;
- return edac_dimm_info_location(dimm, data, PAGE_SIZE);
+ count = edac_dimm_info_location(dimm, data, PAGE_SIZE);
+ count += scnprintf(data + count, PAGE_SIZE - count, "\n");
+
+ return count;
}
static ssize_t dimmdev_label_show(struct device *dev,
@@ -813,15 +817,23 @@ static ssize_t mci_max_location_show(struct device *dev,
char *data)
{
struct mem_ctl_info *mci = to_mci(dev);
- int i;
+ int len = PAGE_SIZE;
char *p = data;
+ int i, n;
for (i = 0; i < mci->n_layers; i++) {
- p += sprintf(p, "%s %d ",
- edac_layer_name[mci->layers[i].type],
- mci->layers[i].size - 1);
+ n = scnprintf(p, len, "%s %d ",
+ edac_layer_name[mci->layers[i].type],
+ mci->layers[i].size - 1);
+ len -= n;
+ if (len <= 0)
+ goto out;
+
+ p += n;
}
+ p += scnprintf(p, len, "\n");
+out:
return p - data;
}
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 94d1e3165052..a918ca93e4f7 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2013 by Mauro Carvalho Chehab
*
- * Red Hat Inc. http://www.redhat.com
+ * Red Hat Inc. https://www.redhat.com
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -372,8 +372,18 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
p += sprintf(p, "rank:%d ", mem_err->rank);
if (mem_err->validation_bits & CPER_MEM_VALID_BANK)
p += sprintf(p, "bank:%d ", mem_err->bank);
- if (mem_err->validation_bits & CPER_MEM_VALID_ROW)
- p += sprintf(p, "row:%d ", mem_err->row);
+ if (mem_err->validation_bits & CPER_MEM_VALID_BANK_GROUP)
+ p += sprintf(p, "bank_group:%d ",
+ mem_err->bank >> CPER_MEM_BANK_GROUP_SHIFT);
+ if (mem_err->validation_bits & CPER_MEM_VALID_BANK_ADDRESS)
+ p += sprintf(p, "bank_address:%d ",
+ mem_err->bank & CPER_MEM_BANK_ADDRESS_MASK);
+ if (mem_err->validation_bits & (CPER_MEM_VALID_ROW | CPER_MEM_VALID_ROW_EXT)) {
+ u32 row = mem_err->row;
+
+ row |= cper_get_mem_extension(mem_err->validation_bits, mem_err->extended);
+ p += sprintf(p, "row:%d ", row);
+ }
if (mem_err->validation_bits & CPER_MEM_VALID_COLUMN)
p += sprintf(p, "col:%d ", mem_err->column);
if (mem_err->validation_bits & CPER_MEM_VALID_BIT_POSITION)
@@ -395,6 +405,9 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
strcpy(e->label, dimm->label);
}
}
+ if (mem_err->validation_bits & CPER_MEM_VALID_CHIP_ID)
+ p += sprintf(p, "chipID: %d ",
+ mem_err->extended >> CPER_MEM_CHIP_ID_SHIFT);
if (p > e->location)
*(p - 1) = '\0';
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 191aa7c19ded..324a46b8479b 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -1061,16 +1061,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
PCI_DEVICE_ID_INTEL_5100_19, 0);
if (!einj) {
ret = -ENODEV;
- goto bail_einj;
+ goto bail_mc_free;
}
rc = pci_enable_device(einj);
if (rc < 0) {
ret = rc;
- goto bail_disable_einj;
+ goto bail_einj;
}
-
mci->pdev = &pdev->dev;
priv = mci->pvt_info;
@@ -1136,14 +1135,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
bail_scrub:
priv->scrub_enable = 0;
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
- edac_mc_free(mci);
-
-bail_disable_einj:
pci_disable_device(einj);
bail_einj:
pci_dev_put(einj);
+bail_mc_free:
+ edac_mc_free(mci);
+
bail_disable_ch1:
pci_disable_device(ch1mm);
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index f131c05ade9f..92d63eb533ae 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -8,7 +8,7 @@
* Ben Woodard <woodard@redhat.com>
* Mauro Carvalho Chehab
*
- * Red Hat Inc. http://www.redhat.com
+ * Red Hat Inc. https://www.redhat.com
*
* Forked and adapted from the i5000_edac driver which was
* written by Douglas Thompson Linux Networx <norsk5@xmission.com>
@@ -1460,7 +1460,7 @@ module_exit(i5400_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Woodard <woodard@redhat.com>");
MODULE_AUTHOR("Mauro Carvalho Chehab");
-MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel I5400 memory controllers - "
I5400_REVISION);
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 2e9bbe56cde9..4f28b8c8d378 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -5,7 +5,7 @@
* Copyright (c) 2010 by:
* Mauro Carvalho Chehab
*
- * Red Hat Inc. http://www.redhat.com
+ * Red Hat Inc. https://www.redhat.com
*
* Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
* http://www.intel.com/Assets/PDF/datasheet/318082.pdf
@@ -1206,7 +1206,7 @@ module_exit(i7300_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
-MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
I7300_REVISION);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 2acd9f9284a2..23d25724bae4 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -9,7 +9,7 @@
* Copyright (c) 2009-2010 by:
* Mauro Carvalho Chehab
*
- * Red Hat Inc. http://www.redhat.com
+ * Red Hat Inc. https://www.redhat.com
*
* Forked and adapted from the i5400_edac driver
*
@@ -2391,7 +2391,7 @@ module_exit(i7core_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
-MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
I7CORE_REVISION);
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index ebe50996cc42..c47963240b65 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -9,7 +9,7 @@
* Since the DRAM controller is on the cpu chip, we can use its PCI device
* id to identify these processors.
*
- * PCI DRAM controller device ids (Taken from The PCI ID Repository - http://pci-ids.ucw.cz/)
+ * PCI DRAM controller device ids (Taken from The PCI ID Repository - https://pci-ids.ucw.cz/)
*
* 0108: Xeon E3-1200 Processor Family DRAM Controller
* 010c: Xeon E3-1200/2nd Generation Core Processor Family DRAM Controller
@@ -23,9 +23,9 @@
* 3e..: 8th/9th Gen Core Processor Host Bridge/DRAM Registers
*
* Based on Intel specification:
- * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
+ * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
* http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html
- * http://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-h-processor-lines-datasheet-vol-2.html
+ * https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-h-processor-lines-datasheet-vol-2.html
* https://www.intel.com/content/www/us/en/products/docs/processors/core/8th-gen-core-family-datasheet-vol-2.html
*
* According to the above datasheet (p.16):
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 325aedf46ff2..7f28edb070bd 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -210,6 +210,11 @@ static const char * const smca_if_mce_desc[] = {
"L2 BTB Multi-Match Error",
"L2 Cache Response Poison Error",
"System Read Data Error",
+ "Hardware Assertion Error",
+ "L1-TLB Multi-Hit",
+ "L2-TLB Multi-Hit",
+ "BSR Parity Error",
+ "CT MCE",
};
static const char * const smca_l2_mce_desc[] = {
@@ -228,7 +233,8 @@ static const char * const smca_de_mce_desc[] = {
"Fetch address FIFO parity error",
"Patch RAM data parity error",
"Patch RAM sequencer parity error",
- "Micro-op buffer parity error"
+ "Micro-op buffer parity error",
+ "Hardware Assertion MCA Error",
};
static const char * const smca_ex_mce_desc[] = {
@@ -244,6 +250,8 @@ static const char * const smca_ex_mce_desc[] = {
"Scheduling queue parity error",
"Branch buffer queue parity error",
"Hardware Assertion error",
+ "Spec Map parity error",
+ "Retire Map parity error",
};
static const char * const smca_fp_mce_desc[] = {
@@ -360,6 +368,7 @@ static const char * const smca_smu2_mce_desc[] = {
"Instruction Tag Cache Bank A ECC or parity error",
"Instruction Tag Cache Bank B ECC or parity error",
"System Hub Read Buffer ECC or parity error",
+ "PHY RAM ECC error",
};
static const char * const smca_mp5_mce_desc[] = {
@@ -990,10 +999,8 @@ static void decode_smca_error(struct mce *m)
pr_emerg(HW_ERR "%s Ext. Error Code: %d", ip_name, xec);
/* Only print the decode of valid error codes */
- if (xec < smca_mce_descs[bank_type].num_descs &&
- (hwid->xec_bitmap & BIT_ULL(xec))) {
+ if (xec < smca_mce_descs[bank_type].num_descs)
pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]);
- }
if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc)
decode_dram_ecc(cpu_to_node(m->extcpu), m);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index c5ab634cb6a4..93daa4297f2e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -939,12 +939,9 @@ static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
static enum dev_type __ibridge_get_width(u32 mtr)
{
- enum dev_type type;
+ enum dev_type type = DEV_UNKNOWN;
switch (mtr) {
- case 3:
- type = DEV_UNKNOWN;
- break;
case 2:
type = DEV_X16;
break;
@@ -3552,6 +3549,6 @@ MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
-MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
SBRIDGE_REVISION);
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 4af9744cc6d0..0eb5eb97fd74 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -454,7 +454,7 @@ DEBUGFS_STRUCT(inject_int, 0200, thunderx_lmc_inject_int_write, NULL);
DEBUGFS_STRUCT(inject_ecc, 0200, thunderx_lmc_inject_ecc_write, NULL);
DEBUGFS_STRUCT(int_w1c, 0400, NULL, thunderx_lmc_int_read);
-struct debugfs_entry *lmc_dfs_ents[] = {
+static struct debugfs_entry *lmc_dfs_ents[] = {
&debugfs_mask0,
&debugfs_mask2,
&debugfs_parity_test,
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index 8be3e89a510e..e7eae20f83d1 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
*
* Texas Instruments DDR3 ECC error correction and detection driver
*
@@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev)
/* add EMIF ECC error handler */
error_irq = platform_get_irq(pdev, 0);
- if (!error_irq) {
+ if (error_irq < 0) {
+ ret = error_irq;
edac_printk(KERN_ERR, EDAC_MOD_NAME,
"EMIF irq number not defined.\n");
goto err;
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 525345367260..fdb31954cf2b 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -491,18 +491,7 @@ static struct platform_driver axp288_extcon_driver = {
.pm = &axp288_extcon_pm_ops,
},
};
-
-static int __init axp288_extcon_init(void)
-{
- return platform_driver_register(&axp288_extcon_driver);
-}
-module_init(axp288_extcon_init);
-
-static void __exit axp288_extcon_exit(void)
-{
- platform_driver_unregister(&axp288_extcon_driver);
-}
-module_exit(axp288_extcon_exit);
+module_platform_driver(axp288_extcon_driver);
MODULE_AUTHOR("Ramakrishna Pallala <ramakrishna.pallala@intel.com>");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index cc47d626095c..ace523924e58 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -713,7 +713,7 @@ static int max14577_muic_probe(struct platform_device *pdev)
max14577_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
- return -ENOMEM;
+ return PTR_ERR(info->edev);
}
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 32fc5a66ffa9..4a410fd2ea9a 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -1157,7 +1157,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
max77693_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
- return -ENOMEM;
+ return PTR_ERR(info->edev);
}
ret = devm_extcon_dev_register(&pdev->dev, info->edev);
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index e6b50ca83008..8e6e97ec65a8 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -845,7 +845,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
max77843_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "Failed to allocate memory for extcon\n");
- ret = -ENODEV;
+ ret = PTR_ERR(info->edev);
goto err_muic_irq;
}
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 172e116ac1ce..337b0eea4e62 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -674,7 +674,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
info->edev = devm_extcon_dev_allocate(&pdev->dev, max8997_extcon_cable);
if (IS_ERR(info->edev)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(info->edev);
goto err_irq;
}
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index cea58d0cb457..a2852bcc5f0d 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -2,7 +2,7 @@
/*
* Palmas USB transceiver driver
*
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
* Author: Graeme Gregory <gg@slimlogic.co.uk>
* Author: Kishon Vijay Abraham I <kishon@ti.com>
* Based on twl6030_usb.c
@@ -205,21 +205,15 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_usb->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id",
GPIOD_IN);
- if (PTR_ERR(palmas_usb->id_gpiod) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(palmas_usb->id_gpiod)) {
- dev_err(&pdev->dev, "failed to get id gpio\n");
- return PTR_ERR(palmas_usb->id_gpiod);
- }
+ if (IS_ERR(palmas_usb->id_gpiod))
+ return dev_err_probe(&pdev->dev, PTR_ERR(palmas_usb->id_gpiod),
+ "failed to get id gpio\n");
palmas_usb->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
GPIOD_IN);
- if (PTR_ERR(palmas_usb->vbus_gpiod) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(palmas_usb->vbus_gpiod)) {
- dev_err(&pdev->dev, "failed to get vbus gpio\n");
- return PTR_ERR(palmas_usb->vbus_gpiod);
- }
+ if (IS_ERR(palmas_usb->vbus_gpiod))
+ return dev_err_probe(&pdev->dev, PTR_ERR(palmas_usb->vbus_gpiod),
+ "failed to get id gpio\n");
if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) {
palmas_usb->enable_id_detection = false;
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
index d1c997599390..5b9a3cf8df26 100644
--- a/drivers/extcon/extcon-ptn5150.c
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -5,7 +5,9 @@
// Based on extcon-sm5502.c driver
// Copyright (c) 2018-2019 by Vijai Kumar K
// Author: Vijai Kumar K <vijaikumar.kanagarajan@gmail.com>
+// Copyright (c) 2020 Krzysztof Kozlowski <krzk@kernel.org>
+#include <linux/bitfield.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -17,46 +19,28 @@
#include <linux/gpio/consumer.h>
/* PTN5150 registers */
-enum ptn5150_reg {
- PTN5150_REG_DEVICE_ID = 0x01,
- PTN5150_REG_CONTROL,
- PTN5150_REG_INT_STATUS,
- PTN5150_REG_CC_STATUS,
- PTN5150_REG_CON_DET = 0x09,
- PTN5150_REG_VCONN_STATUS,
- PTN5150_REG_RESET,
- PTN5150_REG_INT_MASK = 0x18,
- PTN5150_REG_INT_REG_STATUS,
- PTN5150_REG_END,
-};
+#define PTN5150_REG_DEVICE_ID 0x01
+#define PTN5150_REG_CONTROL 0x02
+#define PTN5150_REG_INT_STATUS 0x03
+#define PTN5150_REG_CC_STATUS 0x04
+#define PTN5150_REG_CON_DET 0x09
+#define PTN5150_REG_VCONN_STATUS 0x0a
+#define PTN5150_REG_RESET 0x0b
+#define PTN5150_REG_INT_MASK 0x18
+#define PTN5150_REG_INT_REG_STATUS 0x19
+#define PTN5150_REG_END PTN5150_REG_INT_REG_STATUS
#define PTN5150_DFP_ATTACHED 0x1
#define PTN5150_UFP_ATTACHED 0x2
/* Define PTN5150 MASK/SHIFT constant */
-#define PTN5150_REG_DEVICE_ID_VENDOR_SHIFT 0
-#define PTN5150_REG_DEVICE_ID_VENDOR_MASK \
- (0x3 << PTN5150_REG_DEVICE_ID_VENDOR_SHIFT)
-
-#define PTN5150_REG_DEVICE_ID_VERSION_SHIFT 3
-#define PTN5150_REG_DEVICE_ID_VERSION_MASK \
- (0x1f << PTN5150_REG_DEVICE_ID_VERSION_SHIFT)
-
-#define PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT 2
-#define PTN5150_REG_CC_PORT_ATTACHMENT_MASK \
- (0x7 << PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT)
+#define PTN5150_REG_DEVICE_ID_VERSION GENMASK(7, 3)
+#define PTN5150_REG_DEVICE_ID_VENDOR GENMASK(2, 0)
-#define PTN5150_REG_CC_VBUS_DETECTION_SHIFT 7
-#define PTN5150_REG_CC_VBUS_DETECTION_MASK \
- (0x1 << PTN5150_REG_CC_VBUS_DETECTION_SHIFT)
-
-#define PTN5150_REG_INT_CABLE_ATTACH_SHIFT 0
-#define PTN5150_REG_INT_CABLE_ATTACH_MASK \
- (0x1 << PTN5150_REG_INT_CABLE_ATTACH_SHIFT)
-
-#define PTN5150_REG_INT_CABLE_DETACH_SHIFT 1
-#define PTN5150_REG_INT_CABLE_DETACH_MASK \
- (0x1 << PTN5150_REG_CC_CABLE_DETACH_SHIFT)
+#define PTN5150_REG_CC_PORT_ATTACHMENT GENMASK(4, 2)
+#define PTN5150_REG_CC_VBUS_DETECTION BIT(7)
+#define PTN5150_REG_INT_CABLE_ATTACH_MASK BIT(0)
+#define PTN5150_REG_INT_CABLE_DETACH_MASK BIT(1)
struct ptn5150_info {
struct device *dev;
@@ -83,12 +67,45 @@ static const struct regmap_config ptn5150_regmap_config = {
.max_register = PTN5150_REG_END,
};
+static void ptn5150_check_state(struct ptn5150_info *info)
+{
+ unsigned int port_status, reg_data, vbus;
+ int ret;
+
+ ret = regmap_read(info->regmap, PTN5150_REG_CC_STATUS, &reg_data);
+ if (ret) {
+ dev_err(info->dev, "failed to read CC STATUS %d\n", ret);
+ return;
+ }
+
+ port_status = FIELD_GET(PTN5150_REG_CC_PORT_ATTACHMENT, reg_data);
+
+ switch (port_status) {
+ case PTN5150_DFP_ATTACHED:
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
+ gpiod_set_value_cansleep(info->vbus_gpiod, 0);
+ extcon_set_state_sync(info->edev, EXTCON_USB, true);
+ break;
+ case PTN5150_UFP_ATTACHED:
+ extcon_set_state_sync(info->edev, EXTCON_USB, false);
+ vbus = FIELD_GET(PTN5150_REG_CC_VBUS_DETECTION, reg_data);
+ if (vbus)
+ gpiod_set_value_cansleep(info->vbus_gpiod, 0);
+ else
+ gpiod_set_value_cansleep(info->vbus_gpiod, 1);
+
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
+ break;
+ default:
+ break;
+ }
+}
+
static void ptn5150_irq_work(struct work_struct *work)
{
struct ptn5150_info *info = container_of(work,
struct ptn5150_info, irq_work);
int ret = 0;
- unsigned int reg_data;
unsigned int int_status;
if (!info->edev)
@@ -96,13 +113,6 @@ static void ptn5150_irq_work(struct work_struct *work)
mutex_lock(&info->mutex);
- ret = regmap_read(info->regmap, PTN5150_REG_CC_STATUS, &reg_data);
- if (ret) {
- dev_err(info->dev, "failed to read CC STATUS %d\n", ret);
- mutex_unlock(&info->mutex);
- return;
- }
-
/* Clear interrupt. Read would clear the register */
ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, &int_status);
if (ret) {
@@ -116,47 +126,13 @@ static void ptn5150_irq_work(struct work_struct *work)
cable_attach = int_status & PTN5150_REG_INT_CABLE_ATTACH_MASK;
if (cable_attach) {
- unsigned int port_status;
- unsigned int vbus;
-
- port_status = ((reg_data &
- PTN5150_REG_CC_PORT_ATTACHMENT_MASK) >>
- PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT);
-
- switch (port_status) {
- case PTN5150_DFP_ATTACHED:
- extcon_set_state_sync(info->edev,
- EXTCON_USB_HOST, false);
- gpiod_set_value(info->vbus_gpiod, 0);
- extcon_set_state_sync(info->edev, EXTCON_USB,
- true);
- break;
- case PTN5150_UFP_ATTACHED:
- extcon_set_state_sync(info->edev, EXTCON_USB,
- false);
- vbus = ((reg_data &
- PTN5150_REG_CC_VBUS_DETECTION_MASK) >>
- PTN5150_REG_CC_VBUS_DETECTION_SHIFT);
- if (vbus)
- gpiod_set_value(info->vbus_gpiod, 0);
- else
- gpiod_set_value(info->vbus_gpiod, 1);
-
- extcon_set_state_sync(info->edev,
- EXTCON_USB_HOST, true);
- break;
- default:
- dev_err(info->dev,
- "Unknown Port status : %x\n",
- port_status);
- break;
- }
+ ptn5150_check_state(info);
} else {
extcon_set_state_sync(info->edev,
EXTCON_USB_HOST, false);
extcon_set_state_sync(info->edev,
EXTCON_USB, false);
- gpiod_set_value(info->vbus_gpiod, 0);
+ gpiod_set_value_cansleep(info->vbus_gpiod, 0);
}
}
@@ -194,13 +170,10 @@ static int ptn5150_init_dev_type(struct ptn5150_info *info)
return -EINVAL;
}
- vendor_id = ((reg_data & PTN5150_REG_DEVICE_ID_VENDOR_MASK) >>
- PTN5150_REG_DEVICE_ID_VENDOR_SHIFT);
- version_id = ((reg_data & PTN5150_REG_DEVICE_ID_VERSION_MASK) >>
- PTN5150_REG_DEVICE_ID_VERSION_SHIFT);
-
- dev_info(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n",
- version_id, vendor_id);
+ vendor_id = FIELD_GET(PTN5150_REG_DEVICE_ID_VENDOR, reg_data);
+ version_id = FIELD_GET(PTN5150_REG_DEVICE_ID_VERSION, reg_data);
+ dev_dbg(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n",
+ version_id, vendor_id);
/* Clear any existing interrupts */
ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, &reg_data);
@@ -221,8 +194,7 @@ static int ptn5150_init_dev_type(struct ptn5150_info *info)
return 0;
}
-static int ptn5150_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int ptn5150_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct device_node *np = i2c->dev.of_node;
@@ -239,20 +211,15 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c,
info->dev = &i2c->dev;
info->i2c = i2c;
- info->int_gpiod = devm_gpiod_get(&i2c->dev, "int", GPIOD_IN);
- if (IS_ERR(info->int_gpiod)) {
- dev_err(dev, "failed to get INT GPIO\n");
- return PTR_ERR(info->int_gpiod);
- }
- info->vbus_gpiod = devm_gpiod_get(&i2c->dev, "vbus", GPIOD_IN);
+ info->vbus_gpiod = devm_gpiod_get(&i2c->dev, "vbus", GPIOD_OUT_LOW);
if (IS_ERR(info->vbus_gpiod)) {
- dev_err(dev, "failed to get VBUS GPIO\n");
- return PTR_ERR(info->vbus_gpiod);
- }
- ret = gpiod_direction_output(info->vbus_gpiod, 0);
- if (ret) {
- dev_err(dev, "failed to set VBUS GPIO direction\n");
- return -EINVAL;
+ ret = PTR_ERR(info->vbus_gpiod);
+ if (ret == -ENOENT) {
+ dev_info(dev, "No VBUS GPIO, ignoring VBUS control\n");
+ info->vbus_gpiod = NULL;
+ } else {
+ return dev_err_probe(dev, ret, "failed to get VBUS GPIO\n");
+ }
}
mutex_init(&info->mutex);
@@ -261,28 +228,34 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c,
info->regmap = devm_regmap_init_i2c(i2c, &ptn5150_regmap_config);
if (IS_ERR(info->regmap)) {
- ret = PTR_ERR(info->regmap);
- dev_err(info->dev, "failed to allocate register map: %d\n",
- ret);
- return ret;
+ return dev_err_probe(info->dev, PTR_ERR(info->regmap),
+ "failed to allocate register map\n");
}
- if (info->int_gpiod) {
+ if (i2c->irq > 0) {
+ info->irq = i2c->irq;
+ } else {
+ info->int_gpiod = devm_gpiod_get(&i2c->dev, "int", GPIOD_IN);
+ if (IS_ERR(info->int_gpiod)) {
+ return dev_err_probe(dev, PTR_ERR(info->int_gpiod),
+ "failed to get INT GPIO\n");
+ }
+
info->irq = gpiod_to_irq(info->int_gpiod);
if (info->irq < 0) {
dev_err(dev, "failed to get INTB IRQ\n");
return info->irq;
}
+ }
- ret = devm_request_threaded_irq(dev, info->irq, NULL,
- ptn5150_irq_handler,
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- i2c->name, info);
- if (ret < 0) {
- dev_err(dev, "failed to request handler for INTB IRQ\n");
- return ret;
- }
+ ret = devm_request_threaded_irq(dev, info->irq, NULL,
+ ptn5150_irq_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ i2c->name, info);
+ if (ret < 0) {
+ dev_err(dev, "failed to request handler for INTB IRQ\n");
+ return ret;
}
/* Allocate extcon device */
@@ -299,11 +272,26 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+
/* Initialize PTN5150 device and print vendor id and version id */
ret = ptn5150_init_dev_type(info);
if (ret)
return -EINVAL;
+ /*
+ * Update current extcon state if for example OTG connection was there
+ * before the probe
+ */
+ mutex_lock(&info->mutex);
+ ptn5150_check_state(info);
+ mutex_unlock(&info->mutex);
+
return 0;
}
@@ -324,16 +312,12 @@ static struct i2c_driver ptn5150_i2c_driver = {
.name = "ptn5150",
.of_match_table = ptn5150_dt_match,
},
- .probe = ptn5150_i2c_probe,
+ .probe_new = ptn5150_i2c_probe,
.id_table = ptn5150_i2c_id,
};
-
-static int __init ptn5150_i2c_init(void)
-{
- return i2c_add_driver(&ptn5150_i2c_driver);
-}
-subsys_initcall(ptn5150_i2c_init);
+module_i2c_driver(ptn5150_i2c_driver);
MODULE_DESCRIPTION("NXP PTN5150 CC logic Extcon driver");
MODULE_AUTHOR("Vijai Kumar K <vijaikumar.kanagarajan@gmail.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 98b5afa5b615..f06be6d4e2a9 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -2,7 +2,7 @@
/**
* drivers/extcon/extcon-usb-gpio.c - USB GPIO extcon driver
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com
* Author: Roger Quadros <rogerq@ti.com>
*/
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 020cb15a4d8f..9811c40956e5 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -674,17 +674,16 @@ static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
static void ar_context_release(struct ar_context *ctx)
{
+ struct device *dev = ctx->ohci->card.device;
unsigned int i;
vunmap(ctx->buffer);
- for (i = 0; i < AR_BUFFERS; i++)
- if (ctx->pages[i]) {
- dma_unmap_page(ctx->ohci->card.device,
- ar_buffer_bus(ctx, i),
- PAGE_SIZE, DMA_FROM_DEVICE);
- __free_page(ctx->pages[i]);
- }
+ for (i = 0; i < AR_BUFFERS; i++) {
+ if (ctx->pages[i])
+ dma_free_pages(dev, PAGE_SIZE, ctx->pages[i],
+ ar_buffer_bus(ctx, i), DMA_FROM_DEVICE);
+ }
}
static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
@@ -970,6 +969,7 @@ error:
static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
unsigned int descriptors_offset, u32 regs)
{
+ struct device *dev = ohci->card.device;
unsigned int i;
dma_addr_t dma_addr;
struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
@@ -980,17 +980,13 @@ static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
for (i = 0; i < AR_BUFFERS; i++) {
- ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
+ ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
if (!ctx->pages[i])
goto out_of_memory;
- dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
- 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(ohci->card.device, dma_addr)) {
- __free_page(ctx->pages[i]);
- ctx->pages[i] = NULL;
- goto out_of_memory;
- }
set_page_private(ctx->pages[i], dma_addr);
+ dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
}
for (i = 0; i < AR_BUFFERS; i++)
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index fbd785dd0513..3315e3c21586 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -7,7 +7,7 @@
menu "Firmware Drivers"
config ARM_SCMI_PROTOCOL
- bool "ARM System Control and Management Interface (SCMI) Message Protocol"
+ tristate "ARM System Control and Management Interface (SCMI) Message Protocol"
depends on ARM || ARM64 || COMPILE_TEST
depends on MAILBOX
help
@@ -178,16 +178,15 @@ config ISCSI_IBFT
Otherwise, say N.
config RASPBERRYPI_FIRMWARE
- bool "Raspberry Pi Firmware Driver"
+ tristate "Raspberry Pi Firmware Driver"
depends on BCM2835_MBOX
- default USB_PCI
help
This option enables support for communicating with the firmware on the
Raspberry Pi.
config FW_CFG_SYSFS
tristate "QEMU fw_cfg device support in sysfs"
- depends on SYSFS && (ARM || ARM64 || PPC_PMAC || SPARC || X86)
+ depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86)
depends on HAS_IOPORT_MAP
default n
help
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 99510be9f5ed..5e013b6a3692 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
-obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
+obj-y += arm_scmi/
obj-y += broadcom/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 6f9cbc4aef22..bc0d54f8e861 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -1,9 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o
scmi-bus-y = bus.o
scmi-driver-y = driver.o notify.o
scmi-transport-y = shmem.o
scmi-transport-$(CONFIG_MAILBOX) += mailbox.o
scmi-transport-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smc.o
-scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
+scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o
+scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
+ $(scmi-transport-y)
+obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index 9853bd3c4d45..017e5d8bd869 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -197,6 +197,8 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
protocols_imp[tot_num_ret + loop] = *(list + loop);
tot_num_ret += loop_num_ret;
+
+ scmi_reset_rx_to_maxsz(handle, t);
} while (loop_num_ret);
scmi_xfer_put(handle, t);
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index db55c43a2cbd..1377ec76a45d 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -230,7 +230,7 @@ static void scmi_devices_unregister(void)
bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
}
-static int __init scmi_bus_init(void)
+int __init scmi_bus_init(void)
{
int retval;
@@ -240,12 +240,10 @@ static int __init scmi_bus_init(void)
return retval;
}
-subsys_initcall(scmi_bus_init);
-static void __exit scmi_bus_exit(void)
+void __exit scmi_bus_exit(void)
{
scmi_devices_unregister();
bus_unregister(&scmi_bus_type);
ida_destroy(&scmi_bus_id);
}
-module_exit(scmi_bus_exit);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 75e39882746e..4645677d86f1 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -192,6 +192,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
}
tot_rate_cnt += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
@@ -318,7 +320,7 @@ scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
return clk;
}
-static struct scmi_clk_ops clk_ops = {
+static const struct scmi_clk_ops clk_ops = {
.count_get = scmi_clock_count_get,
.info_get = scmi_clock_info_get,
.rate_get = scmi_clock_rate_get,
@@ -364,9 +366,4 @@ static int scmi_clock_protocol_init(struct scmi_handle *handle)
return 0;
}
-static int __init scmi_clock_init(void)
-{
- return scmi_protocol_register(SCMI_PROTOCOL_CLOCK,
- &scmi_clock_protocol_init);
-}
-subsys_initcall(scmi_clock_init);
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_CLOCK, clock)
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index c113e578cc6c..65063fa948d4 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -147,6 +147,8 @@ int scmi_do_xfer_with_response(const struct scmi_handle *h,
struct scmi_xfer *xfer);
int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p);
+void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer);
int scmi_handle_put(const struct scmi_handle *handle);
struct scmi_handle *scmi_handle_get(struct device *dev);
void scmi_set_handle(struct scmi_device *scmi_dev);
@@ -156,6 +158,30 @@ void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
int scmi_base_protocol_init(struct scmi_handle *h);
+int __init scmi_bus_init(void);
+void __exit scmi_bus_exit(void);
+
+#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \
+ int __init scmi_##func##_register(void); \
+ void __exit scmi_##func##_unregister(void)
+DECLARE_SCMI_REGISTER_UNREGISTER(clock);
+DECLARE_SCMI_REGISTER_UNREGISTER(perf);
+DECLARE_SCMI_REGISTER_UNREGISTER(power);
+DECLARE_SCMI_REGISTER_UNREGISTER(reset);
+DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
+DECLARE_SCMI_REGISTER_UNREGISTER(system);
+
+#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(id, name) \
+int __init scmi_##name##_register(void) \
+{ \
+ return scmi_protocol_register((id), &scmi_##name##_protocol_init); \
+} \
+\
+void __exit scmi_##name##_unregister(void) \
+{ \
+ scmi_protocol_unregister((id)); \
+}
+
/* SCMI Transport */
/**
* struct scmi_chan_info - Structure representing a SCMI channel information
@@ -210,7 +236,7 @@ struct scmi_transport_ops {
* @max_msg_size: Maximum size of data per message that can be handled.
*/
struct scmi_desc {
- struct scmi_transport_ops *ops;
+ const struct scmi_transport_ops *ops;
int max_rx_timeout_ms;
int max_msg;
int max_msg_size;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 03ec74242c14..3dfd8b6a0ebf 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -402,6 +402,14 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
return ret;
}
+void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_info *info = handle_to_scmi_info(handle);
+
+ xfer->rx.len = info->desc->max_msg_size;
+}
+
#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
/**
@@ -730,6 +738,7 @@ struct scmi_prot_devnames {
static struct scmi_prot_devnames devnames[] = {
{ SCMI_PROTOCOL_POWER, { "genpd" },},
+ { SCMI_PROTOCOL_SYSTEM, { "syspower" },},
{ SCMI_PROTOCOL_PERF, { "cpufreq" },},
{ SCMI_PROTOCOL_CLOCK, { "clocks" },},
{ SCMI_PROTOCOL_SENSOR, { "hwmon" },},
@@ -928,7 +937,35 @@ static struct platform_driver scmi_driver = {
.remove = scmi_remove,
};
-module_platform_driver(scmi_driver);
+static int __init scmi_driver_init(void)
+{
+ scmi_bus_init();
+
+ scmi_clock_register();
+ scmi_perf_register();
+ scmi_power_register();
+ scmi_reset_register();
+ scmi_sensors_register();
+ scmi_system_register();
+
+ return platform_driver_register(&scmi_driver);
+}
+subsys_initcall(scmi_driver_init);
+
+static void __exit scmi_driver_exit(void)
+{
+ scmi_bus_exit();
+
+ scmi_clock_unregister();
+ scmi_perf_unregister();
+ scmi_power_unregister();
+ scmi_reset_unregister();
+ scmi_sensors_unregister();
+ scmi_system_unregister();
+
+ platform_driver_unregister(&scmi_driver);
+}
+module_exit(scmi_driver_exit);
MODULE_ALIAS("platform: arm-scmi");
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 6998dc86b5ce..4626404be541 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -110,7 +110,7 @@ static int mailbox_chan_free(int id, void *p, void *data)
struct scmi_chan_info *cinfo = p;
struct scmi_mailbox *smbox = cinfo->transport_info;
- if (!IS_ERR(smbox->chan)) {
+ if (smbox && !IS_ERR(smbox->chan)) {
mbox_free_channel(smbox->chan);
cinfo->transport_info = NULL;
smbox->chan = NULL;
@@ -181,7 +181,7 @@ mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
return shmem_poll_done(smbox->shmem, xfer);
}
-static struct scmi_transport_ops scmi_mailbox_ops = {
+static const struct scmi_transport_ops scmi_mailbox_ops = {
.chan_available = mailbox_chan_available,
.chan_setup = mailbox_chan_setup,
.chan_free = mailbox_chan_free,
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index 4731daaacd19..ce336899d636 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -1403,15 +1403,21 @@ static void scmi_protocols_late_init(struct work_struct *work)
"finalized PENDING handler - key:%X\n",
hndl->key);
ret = scmi_event_handler_enable_events(hndl);
+ if (ret) {
+ dev_dbg(ni->handle->dev,
+ "purging INVALID handler - key:%X\n",
+ hndl->key);
+ scmi_put_active_handler(ni, hndl);
+ }
} else {
ret = scmi_valid_pending_handler(ni, hndl);
- }
- if (ret) {
- dev_dbg(ni->handle->dev,
- "purging PENDING handler - key:%X\n",
- hndl->key);
- /* this hndl can be only a pending one */
- scmi_put_handler_unlocked(ni, hndl);
+ if (ret) {
+ dev_dbg(ni->handle->dev,
+ "purging PENDING handler - key:%X\n",
+ hndl->key);
+ /* this hndl can be only a pending one */
+ scmi_put_handler_unlocked(ni, hndl);
+ }
}
}
mutex_unlock(&ni->pending_mtx);
@@ -1421,7 +1427,7 @@ static void scmi_protocols_late_init(struct work_struct *work)
* notify_ops are attached to the handle so that can be accessed
* directly from an scmi_driver to register its own notifiers.
*/
-static struct scmi_notify_ops notify_ops = {
+static const struct scmi_notify_ops notify_ops = {
.register_event_notifier = scmi_register_notifier,
.unregister_event_notifier = scmi_unregister_notifier,
};
@@ -1468,7 +1474,7 @@ int scmi_notification_init(struct scmi_handle *handle)
ni->gid = gid;
ni->handle = handle;
- ni->notify_wq = alloc_workqueue("scmi_notify",
+ ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
0);
if (!ni->notify_wq)
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 3e1e87012c95..82fb3babff72 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -304,6 +304,8 @@ scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
}
tot_opp_cnt += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
@@ -748,7 +750,7 @@ static bool scmi_fast_switch_possible(const struct scmi_handle *handle,
return dom->fc_info && dom->fc_info->level_set_addr;
}
-static struct scmi_perf_ops perf_ops = {
+static const struct scmi_perf_ops perf_ops = {
.limits_set = scmi_perf_limits_set,
.limits_get = scmi_perf_limits_get,
.level_set = scmi_perf_level_set,
@@ -890,9 +892,4 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
return 0;
}
-static int __init scmi_perf_init(void)
-{
- return scmi_protocol_register(SCMI_PROTOCOL_PERF,
- &scmi_perf_protocol_init);
-}
-subsys_initcall(scmi_perf_init);
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_PERF, perf)
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 46f213644c49..1f37258e9bee 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -184,7 +184,7 @@ static char *scmi_power_name_get(const struct scmi_handle *handle, u32 domain)
return dom->name;
}
-static struct scmi_power_ops power_ops = {
+static const struct scmi_power_ops power_ops = {
.num_domains_get = scmi_power_num_domains_get,
.name_get = scmi_power_name_get,
.state_set = scmi_power_state_set,
@@ -301,9 +301,4 @@ static int scmi_power_protocol_init(struct scmi_handle *handle)
return 0;
}
-static int __init scmi_power_init(void)
-{
- return scmi_protocol_register(SCMI_PROTOCOL_POWER,
- &scmi_power_protocol_init);
-}
-subsys_initcall(scmi_power_init);
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_POWER, power)
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index 3691bafca057..a981a22cfe89 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -36,9 +36,7 @@ struct scmi_msg_reset_domain_reset {
#define EXPLICIT_RESET_ASSERT BIT(1)
#define ASYNCHRONOUS_RESET BIT(2)
__le32 reset_state;
-#define ARCH_RESET_TYPE BIT(31)
-#define COLD_RESET_STATE BIT(0)
-#define ARCH_COLD_RESET (ARCH_RESET_TYPE | COLD_RESET_STATE)
+#define ARCH_COLD_RESET 0
};
struct scmi_msg_reset_notify {
@@ -194,7 +192,7 @@ scmi_reset_domain_deassert(const struct scmi_handle *handle, u32 domain)
return scmi_domain_reset(handle, domain, 0, ARCH_COLD_RESET);
}
-static struct scmi_reset_ops reset_ops = {
+static const struct scmi_reset_ops reset_ops = {
.num_domains_get = scmi_reset_num_domains_get,
.name_get = scmi_reset_name_get,
.latency_get = scmi_reset_latency_get,
@@ -313,9 +311,4 @@ static int scmi_reset_protocol_init(struct scmi_handle *handle)
return 0;
}
-static int __init scmi_reset_init(void)
-{
- return scmi_protocol_register(SCMI_PROTOCOL_RESET,
- &scmi_reset_protocol_init);
-}
-subsys_initcall(scmi_reset_init);
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_RESET, reset)
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 1af0ad362e82..b4232d611033 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -166,6 +166,8 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
}
desc_index += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
@@ -275,7 +277,7 @@ static int scmi_sensor_count_get(const struct scmi_handle *handle)
return si->num_sensors;
}
-static struct scmi_sensor_ops sensor_ops = {
+static const struct scmi_sensor_ops sensor_ops = {
.count_get = scmi_sensor_count_get,
.info_get = scmi_sensor_info_get,
.trip_point_config = scmi_sensor_trip_point_config,
@@ -365,9 +367,4 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
return 0;
}
-static int __init scmi_sensors_init(void)
-{
- return scmi_protocol_register(SCMI_PROTOCOL_SENSOR,
- &scmi_sensors_protocol_init);
-}
-subsys_initcall(scmi_sensors_init);
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_SENSOR, sensors)
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index a1537d123e38..82a82a5dc86a 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -137,7 +137,7 @@ smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
return shmem_poll_done(scmi_info->shmem, xfer);
}
-static struct scmi_transport_ops scmi_smc_ops = {
+static const struct scmi_transport_ops scmi_smc_ops = {
.chan_available = smc_chan_available,
.chan_setup = smc_chan_setup,
.chan_free = smc_chan_free,
@@ -149,6 +149,6 @@ static struct scmi_transport_ops scmi_smc_ops = {
const struct scmi_desc scmi_smc_desc = {
.ops = &scmi_smc_ops,
.max_rx_timeout_ms = 30,
- .max_msg = 1,
+ .max_msg = 20,
.max_msg_size = 128,
};
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
new file mode 100644
index 000000000000..283e12d5f24b
--- /dev/null
+++ b/drivers/firmware/arm_scmi/system.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) System Power Protocol
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ */
+
+#define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt
+
+#include <linux/scmi_protocol.h>
+
+#include "common.h"
+#include "notify.h"
+
+#define SCMI_SYSTEM_NUM_SOURCES 1
+
+enum scmi_system_protocol_cmd {
+ SYSTEM_POWER_STATE_NOTIFY = 0x5,
+};
+
+struct scmi_system_power_state_notify {
+ __le32 notify_enable;
+};
+
+struct scmi_system_power_state_notifier_payld {
+ __le32 agent_id;
+ __le32 flags;
+ __le32 system_state;
+};
+
+struct scmi_system_info {
+ u32 version;
+};
+
+static int scmi_system_request_notify(const struct scmi_handle *handle,
+ bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_system_power_state_notify *notify;
+
+ ret = scmi_xfer_get_init(handle, SYSTEM_POWER_STATE_NOTIFY,
+ SCMI_PROTOCOL_SYSTEM, sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = scmi_do_xfer(handle, t);
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_system_set_notify_enabled(const struct scmi_handle *handle,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret;
+
+ ret = scmi_system_request_notify(handle, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLE - evt[%X] - ret:%d\n", evt_id, ret);
+
+ return ret;
+}
+
+static void *scmi_system_fill_custom_report(const struct scmi_handle *handle,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_system_power_state_notifier_payld *p = payld;
+ struct scmi_system_power_state_notifier_report *r = report;
+
+ if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER ||
+ sizeof(*p) != payld_sz)
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->flags = le32_to_cpu(p->flags);
+ r->system_state = le32_to_cpu(p->system_state);
+ *src_id = 0;
+
+ return r;
+}
+
+static const struct scmi_event system_events[] = {
+ {
+ .id = SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER,
+ .max_payld_sz =
+ sizeof(struct scmi_system_power_state_notifier_payld),
+ .max_report_sz =
+ sizeof(struct scmi_system_power_state_notifier_report),
+ },
+};
+
+static const struct scmi_event_ops system_event_ops = {
+ .set_notify_enabled = scmi_system_set_notify_enabled,
+ .fill_custom_report = scmi_system_fill_custom_report,
+};
+
+static int scmi_system_protocol_init(struct scmi_handle *handle)
+{
+ u32 version;
+ struct scmi_system_info *pinfo;
+
+ scmi_version_get(handle, SCMI_PROTOCOL_SYSTEM, &version);
+
+ dev_dbg(handle->dev, "System Power Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ scmi_register_protocol_events(handle,
+ SCMI_PROTOCOL_SYSTEM, SCMI_PROTO_QUEUE_SZ,
+ &system_event_ops,
+ system_events,
+ ARRAY_SIZE(system_events),
+ SCMI_SYSTEM_NUM_SOURCES);
+
+ pinfo->version = version;
+ handle->system_priv = pinfo;
+
+ return 0;
+}
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_SYSTEM, system)
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index b4b9ce97f415..840754dcc6ca 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -78,11 +78,26 @@ struct sdei_crosscall_args {
int first_error;
};
-#define CROSSCALL_INIT(arg, event) (arg.event = event, \
- arg.first_error = 0, \
- atomic_set(&arg.errors, 0))
+#define CROSSCALL_INIT(arg, event) \
+ do { \
+ arg.event = event; \
+ arg.first_error = 0; \
+ atomic_set(&arg.errors, 0); \
+ } while (0)
+
+static inline int sdei_do_local_call(smp_call_func_t fn,
+ struct sdei_event *event)
+{
+ struct sdei_crosscall_args arg;
+
+ CROSSCALL_INIT(arg, event);
+ fn(&arg);
-static inline int sdei_do_cross_call(void *fn, struct sdei_event * event)
+ return arg.first_error;
+}
+
+static inline int sdei_do_cross_call(smp_call_func_t fn,
+ struct sdei_event *event)
{
struct sdei_crosscall_args arg;
@@ -114,26 +129,7 @@ static int sdei_to_linux_errno(unsigned long sdei_err)
return -ENOMEM;
}
- /* Not an error value ... */
- return sdei_err;
-}
-
-/*
- * If x0 is any of these values, then the call failed, use sdei_to_linux_errno()
- * to translate.
- */
-static int sdei_is_err(struct arm_smccc_res *res)
-{
- switch (res->a0) {
- case SDEI_NOT_SUPPORTED:
- case SDEI_INVALID_PARAMETERS:
- case SDEI_DENIED:
- case SDEI_PENDING:
- case SDEI_OUT_OF_RESOURCE:
- return true;
- }
-
- return false;
+ return 0;
}
static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
@@ -141,14 +137,13 @@ static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
unsigned long arg3, unsigned long arg4,
u64 *result)
{
- int err = 0;
+ int err;
struct arm_smccc_res res;
if (sdei_firmware_call) {
sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
&res);
- if (sdei_is_err(&res))
- err = sdei_to_linux_errno(res.a0);
+ err = sdei_to_linux_errno(res.a0);
} else {
/*
* !sdei_firmware_call means we failed to probe or called
@@ -210,36 +205,34 @@ static struct sdei_event *sdei_event_create(u32 event_num,
lockdep_assert_held(&sdei_events_lock);
event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return ERR_PTR(-ENOMEM);
+ if (!event) {
+ err = -ENOMEM;
+ goto fail;
+ }
INIT_LIST_HEAD(&event->list);
event->event_num = event_num;
err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
&result);
- if (err) {
- kfree(event);
- return ERR_PTR(err);
- }
+ if (err)
+ goto fail;
event->priority = result;
err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
&result);
- if (err) {
- kfree(event);
- return ERR_PTR(err);
- }
+ if (err)
+ goto fail;
event->type = result;
if (event->type == SDEI_EVENT_TYPE_SHARED) {
reg = kzalloc(sizeof(*reg), GFP_KERNEL);
if (!reg) {
- kfree(event);
- return ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
+ goto fail;
}
- reg->event_num = event_num;
+ reg->event_num = event->event_num;
reg->priority = event->priority;
reg->callback = cb;
@@ -251,8 +244,8 @@ static struct sdei_event *sdei_event_create(u32 event_num,
regs = alloc_percpu(struct sdei_registered_event);
if (!regs) {
- kfree(event);
- return ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
+ goto fail;
}
for_each_possible_cpu(cpu) {
@@ -272,6 +265,10 @@ static struct sdei_event *sdei_event_create(u32 event_num,
spin_unlock(&sdei_list_lock);
return event;
+
+fail:
+ kfree(event);
+ return ERR_PTR(err);
}
static void sdei_event_destroy_llocked(struct sdei_event *event)
@@ -490,16 +487,6 @@ static void _local_event_unregister(void *data)
sdei_cross_call_return(arg, err);
}
-static int _sdei_event_unregister(struct sdei_event *event)
-{
- lockdep_assert_held(&sdei_events_lock);
-
- if (event->type == SDEI_EVENT_TYPE_SHARED)
- return sdei_api_event_unregister(event->event_num);
-
- return sdei_do_cross_call(_local_event_unregister, event);
-}
-
int sdei_event_unregister(u32 event_num)
{
int err;
@@ -509,24 +496,27 @@ int sdei_event_unregister(u32 event_num)
mutex_lock(&sdei_events_lock);
event = sdei_event_find(event_num);
- do {
- if (!event) {
- pr_warn("Event %u not registered\n", event_num);
- err = -ENOENT;
- break;
- }
+ if (!event) {
+ pr_warn("Event %u not registered\n", event_num);
+ err = -ENOENT;
+ goto unlock;
+ }
- spin_lock(&sdei_list_lock);
- event->reregister = false;
- event->reenable = false;
- spin_unlock(&sdei_list_lock);
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
- err = _sdei_event_unregister(event);
- if (err)
- break;
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_unregister(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_unregister, event);
- sdei_event_destroy(event);
- } while (0);
+ if (err)
+ goto unlock;
+
+ sdei_event_destroy(event);
+unlock:
mutex_unlock(&sdei_events_lock);
return err;
@@ -547,7 +537,7 @@ static int sdei_unregister_shared(void)
if (event->type != SDEI_EVENT_TYPE_SHARED)
continue;
- err = _sdei_event_unregister(event);
+ err = sdei_api_event_unregister(event->event_num);
if (err)
break;
}
@@ -581,25 +571,6 @@ static void _local_event_register(void *data)
sdei_cross_call_return(arg, err);
}
-static int _sdei_event_register(struct sdei_event *event)
-{
- int err;
-
- lockdep_assert_held(&sdei_events_lock);
-
- if (event->type == SDEI_EVENT_TYPE_SHARED)
- return sdei_api_event_register(event->event_num,
- sdei_entry_point,
- event->registered,
- SDEI_EVENT_REGISTER_RM_ANY, 0);
-
- err = sdei_do_cross_call(_local_event_register, event);
- if (err)
- sdei_do_cross_call(_local_event_unregister, event);
-
- return err;
-}
-
int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
{
int err;
@@ -608,63 +579,44 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
WARN_ON(in_nmi());
mutex_lock(&sdei_events_lock);
- do {
- if (sdei_event_find(event_num)) {
- pr_warn("Event %u already registered\n", event_num);
- err = -EBUSY;
- break;
- }
-
- event = sdei_event_create(event_num, cb, arg);
- if (IS_ERR(event)) {
- err = PTR_ERR(event);
- pr_warn("Failed to create event %u: %d\n", event_num,
- err);
- break;
- }
-
- cpus_read_lock();
- err = _sdei_event_register(event);
- if (err) {
- sdei_event_destroy(event);
- pr_warn("Failed to register event %u: %d\n", event_num,
- err);
- } else {
- spin_lock(&sdei_list_lock);
- event->reregister = true;
- spin_unlock(&sdei_list_lock);
- }
- cpus_read_unlock();
- } while (0);
- mutex_unlock(&sdei_events_lock);
-
- return err;
-}
-
-static int sdei_reregister_event_llocked(struct sdei_event *event)
-{
- int err;
-
- lockdep_assert_held(&sdei_events_lock);
- lockdep_assert_held(&sdei_list_lock);
+ if (sdei_event_find(event_num)) {
+ pr_warn("Event %u already registered\n", event_num);
+ err = -EBUSY;
+ goto unlock;
+ }
- err = _sdei_event_register(event);
- if (err) {
- pr_err("Failed to re-register event %u\n", event->event_num);
- sdei_event_destroy_llocked(event);
- return err;
+ event = sdei_event_create(event_num, cb, arg);
+ if (IS_ERR(event)) {
+ err = PTR_ERR(event);
+ pr_warn("Failed to create event %u: %d\n", event_num, err);
+ goto unlock;
}
- if (event->reenable) {
- if (event->type == SDEI_EVENT_TYPE_SHARED)
- err = sdei_api_event_enable(event->event_num);
- else
- err = sdei_do_cross_call(_local_event_enable, event);
+ cpus_read_lock();
+ if (event->type == SDEI_EVENT_TYPE_SHARED) {
+ err = sdei_api_event_register(event->event_num,
+ sdei_entry_point,
+ event->registered,
+ SDEI_EVENT_REGISTER_RM_ANY, 0);
+ } else {
+ err = sdei_do_cross_call(_local_event_register, event);
+ if (err)
+ sdei_do_cross_call(_local_event_unregister, event);
}
- if (err)
- pr_err("Failed to re-enable event %u\n", event->event_num);
+ if (err) {
+ sdei_event_destroy(event);
+ pr_warn("Failed to register event %u: %d\n", event_num, err);
+ goto cpu_unlock;
+ }
+ spin_lock(&sdei_list_lock);
+ event->reregister = true;
+ spin_unlock(&sdei_list_lock);
+cpu_unlock:
+ cpus_read_unlock();
+unlock:
+ mutex_unlock(&sdei_events_lock);
return err;
}
@@ -680,9 +632,24 @@ static int sdei_reregister_shared(void)
continue;
if (event->reregister) {
- err = sdei_reregister_event_llocked(event);
- if (err)
+ err = sdei_api_event_register(event->event_num,
+ sdei_entry_point, event->registered,
+ SDEI_EVENT_REGISTER_RM_ANY, 0);
+ if (err) {
+ pr_err("Failed to re-register event %u\n",
+ event->event_num);
+ sdei_event_destroy_llocked(event);
break;
+ }
+ }
+
+ if (event->reenable) {
+ err = sdei_api_event_enable(event->event_num);
+ if (err) {
+ pr_err("Failed to re-enable event %u\n",
+ event->event_num);
+ break;
+ }
}
}
spin_unlock(&sdei_list_lock);
@@ -694,7 +661,7 @@ static int sdei_reregister_shared(void)
static int sdei_cpuhp_down(unsigned int cpu)
{
struct sdei_event *event;
- struct sdei_crosscall_args arg;
+ int err;
/* un-register private events */
spin_lock(&sdei_list_lock);
@@ -702,12 +669,11 @@ static int sdei_cpuhp_down(unsigned int cpu)
if (event->type == SDEI_EVENT_TYPE_SHARED)
continue;
- CROSSCALL_INIT(arg, event);
- /* call the cross-call function locally... */
- _local_event_unregister(&arg);
- if (arg.first_error)
+ err = sdei_do_local_call(_local_event_unregister, event);
+ if (err) {
pr_err("Failed to unregister event %u: %d\n",
- event->event_num, arg.first_error);
+ event->event_num, err);
+ }
}
spin_unlock(&sdei_list_lock);
@@ -717,7 +683,7 @@ static int sdei_cpuhp_down(unsigned int cpu)
static int sdei_cpuhp_up(unsigned int cpu)
{
struct sdei_event *event;
- struct sdei_crosscall_args arg;
+ int err;
/* re-register/enable private events */
spin_lock(&sdei_list_lock);
@@ -726,20 +692,19 @@ static int sdei_cpuhp_up(unsigned int cpu)
continue;
if (event->reregister) {
- CROSSCALL_INIT(arg, event);
- /* call the cross-call function locally... */
- _local_event_register(&arg);
- if (arg.first_error)
+ err = sdei_do_local_call(_local_event_register, event);
+ if (err) {
pr_err("Failed to re-register event %u: %d\n",
- event->event_num, arg.first_error);
+ event->event_num, err);
+ }
}
if (event->reenable) {
- CROSSCALL_INIT(arg, event);
- _local_event_enable(&arg);
- if (arg.first_error)
+ err = sdei_do_local_call(_local_event_enable, event);
+ if (err) {
pr_err("Failed to re-enable event %u: %d\n",
- event->event_num, arg.first_error);
+ event->event_num, err);
+ }
}
}
spin_unlock(&sdei_list_lock);
@@ -976,7 +941,7 @@ static int sdei_get_conduit(struct platform_device *pdev)
}
pr_warn("invalid \"method\" property: %s\n", method);
- } else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
+ } else if (!acpi_disabled) {
if (acpi_psci_use_hvc()) {
sdei_firmware_call = &sdei_smccc_hvc;
return SMCCC_CONDUIT_HVC;
@@ -1000,8 +965,6 @@ static int sdei_probe(struct platform_device *pdev)
return 0;
err = sdei_api_get_version(&ver);
- if (err == -EOPNOTSUPP)
- pr_err("advertised but not implemented in platform firmware\n");
if (err) {
pr_err("Failed to get SDEI version: %d\n", err);
sdei_mark_interface_broken();
@@ -1099,16 +1062,20 @@ static bool __init sdei_present_acpi(void)
static int __init sdei_init(void)
{
- int ret = platform_driver_register(&sdei_driver);
-
- if (!ret && sdei_present_acpi()) {
- struct platform_device *pdev;
-
- pdev = platform_device_register_simple(sdei_driver.driver.name,
- 0, NULL, 0);
- if (IS_ERR(pdev))
- pr_info("Failed to register ACPI:SDEI platform device %ld\n",
- PTR_ERR(pdev));
+ struct platform_device *pdev;
+ int ret;
+
+ ret = platform_driver_register(&sdei_driver);
+ if (ret || !sdei_present_acpi())
+ return ret;
+
+ pdev = platform_device_register_simple(sdei_driver.driver.name,
+ 0, NULL, 0);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ platform_driver_unregister(&sdei_driver);
+ pr_info("Failed to register ACPI:SDEI platform device %d\n",
+ ret);
}
return ret;
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
index 4787f86c8ac1..14fbcd11657c 100644
--- a/drivers/firmware/broadcom/bcm47xx_sprom.c
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -27,6 +27,7 @@
*/
#include <linux/bcm47xx_nvram.h>
+#include <linux/bcm47xx_sprom.h>
#include <linux/bcma/bcma.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 5066d1f1d687..d51ca0428bb8 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -21,7 +21,7 @@ EXPORT_SYMBOL_GPL(dmi_kobj);
/*
* DMI stands for "Desktop Management Interface". It is part
* of and an antecedent to, SMBIOS, which stands for System
- * Management BIOS. See further: http://www.dmtf.org/standards
+ * Management BIOS. See further: https://www.dmtf.org/standards
*/
static const char dmi_empty_string[] = "";
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 3939699e62fe..36ec1f718893 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -4,20 +4,15 @@ menu "EFI (Extensible Firmware Interface) Support"
config EFI_VARS
tristate "EFI Variable Support via sysfs"
- depends on EFI
+ depends on EFI && (X86 || IA64)
default n
help
If you say Y here, you are able to get EFI (Extensible Firmware
Interface) variable information via sysfs. You may read,
write, create, and destroy EFI variables through this interface.
-
- Note that using this driver in concert with efibootmgr requires
- at least test release version 0.5.0-test3 or later, which is
- available from:
- <http://linux.dell.com/efibootmgr/testing/efibootmgr-0.5.0-test3.tar.gz>
-
- Subsequent efibootmgr releases may be found at:
- <http://github.com/vathpela/efibootmgr>
+ Note that this driver is only retained for compatibility with
+ legacy users: new users should use the efivarfs filesystem
+ instead.
config EFI_ESRT
bool
@@ -26,7 +21,7 @@ config EFI_ESRT
config EFI_VARS_PSTORE
tristate "Register efivars backend for pstore"
- depends on EFI_VARS && PSTORE
+ depends on PSTORE
default y
help
Say Y here to enable use efivars as a backend to pstore. This
@@ -111,7 +106,7 @@ config EFI_GENERIC_STUB
config EFI_ARMSTUB_DTB_LOADER
bool "Enable the DTB loader"
- depends on EFI_GENERIC_STUB
+ depends on EFI_GENERIC_STUB && !RISCV
default y
help
Select this config option to add support for the dtb= command
@@ -128,6 +123,7 @@ config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER
bool "Enable the command line initrd loader" if !X86
depends on EFI_STUB && (EFI_GENERIC_STUB || X86)
default y
+ depends on !RISCV
help
Select this config option to add support for the initrd= command
line parameter, allowing an initrd that resides on the same volume
@@ -137,7 +133,6 @@ config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
- depends on EFI_VARS
default n
help
This module installs a reboot hook, such that if reboot() is
@@ -281,7 +276,7 @@ config EFI_EARLYCON
config EFI_CUSTOM_SSDT_OVERLAYS
bool "Load custom ACPI SSDT overlay from an EFI variable"
- depends on EFI_VARS && ACPI
+ depends on EFI && ACPI
default ACPI_TABLE_UPGRADE
help
Allow loading of an ACPI SSDT overlay from an EFI variable specified
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 7a216984552b..d6ca2da19339 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -28,13 +28,16 @@ obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o
+obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o
fake_map-y += fake_mem.o
fake_map-$(CONFIG_X86) += x86_fake_mem.o
-arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
+arm-obj-$(CONFIG_EFI) := efi-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
+riscv-obj-$(CONFIG_EFI) := efi-init.o riscv-runtime.o
+obj-$(CONFIG_RISCV) += $(riscv-obj-y)
obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
obj-$(CONFIG_EFI_EARLYCON) += earlycon.o
obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index f564e15fbc7e..e15d484b6a5a 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -232,10 +232,20 @@ static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
n += scnprintf(msg + n, len - n, "rank: %d ", mem->rank);
if (mem->validation_bits & CPER_MEM_VALID_BANK)
n += scnprintf(msg + n, len - n, "bank: %d ", mem->bank);
+ if (mem->validation_bits & CPER_MEM_VALID_BANK_GROUP)
+ n += scnprintf(msg + n, len - n, "bank_group: %d ",
+ mem->bank >> CPER_MEM_BANK_GROUP_SHIFT);
+ if (mem->validation_bits & CPER_MEM_VALID_BANK_ADDRESS)
+ n += scnprintf(msg + n, len - n, "bank_address: %d ",
+ mem->bank & CPER_MEM_BANK_ADDRESS_MASK);
if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
n += scnprintf(msg + n, len - n, "device: %d ", mem->device);
- if (mem->validation_bits & CPER_MEM_VALID_ROW)
- n += scnprintf(msg + n, len - n, "row: %d ", mem->row);
+ if (mem->validation_bits & (CPER_MEM_VALID_ROW | CPER_MEM_VALID_ROW_EXT)) {
+ u32 row = mem->row;
+
+ row |= cper_get_mem_extension(mem->validation_bits, mem->extended);
+ n += scnprintf(msg + n, len - n, "row: %d ", row);
+ }
if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
n += scnprintf(msg + n, len - n, "column: %d ", mem->column);
if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
@@ -250,6 +260,9 @@ static int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg)
if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
scnprintf(msg + n, len - n, "target_id: 0x%016llx ",
mem->target_id);
+ if (mem->validation_bits & CPER_MEM_VALID_CHIP_ID)
+ scnprintf(msg + n, len - n, "chip_id: %d ",
+ mem->extended >> CPER_MEM_CHIP_ID_SHIFT);
msg[n] = '\0';
return n;
@@ -292,6 +305,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *mem,
cmem->requestor_id = mem->requestor_id;
cmem->responder_id = mem->responder_id;
cmem->target_id = mem->target_id;
+ cmem->extended = mem->extended;
cmem->rank = mem->rank;
cmem->mem_array_handle = mem->mem_array_handle;
cmem->mem_dev_handle = mem->mem_dev_handle;
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/efi-init.c
index 71c445d20258..f55a92ff12c0 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -236,6 +236,7 @@ void __init efi_init(void)
reserve_regions();
efi_esrt_init();
+ efi_mokvar_table_init();
memblock_reserve(data.phys_map & PAGE_MASK,
PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index feb7fe6f2da7..0ef086e43090 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -8,6 +8,8 @@
#define DUMP_NAME_LEN 66
+#define EFIVARS_DATA_SIZE_MAX 1024
+
static bool efivars_pstore_disable =
IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
@@ -18,6 +20,9 @@ module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS)
+static LIST_HEAD(efi_pstore_list);
+static DECLARE_WORK(efivar_work, NULL);
+
static int efi_pstore_open(struct pstore_info *psi)
{
psi->data = NULL;
@@ -126,7 +131,7 @@ static inline int __efi_pstore_scan_sysfs_exit(struct efivar_entry *entry,
if (entry->deleting) {
list_del(&entry->list);
efivar_entry_iter_end();
- efivar_unregister(entry);
+ kfree(entry);
if (efivar_entry_iter_begin())
return -EINTR;
} else if (turn_off_scanning)
@@ -169,7 +174,7 @@ static int efi_pstore_sysfs_entry_iter(struct pstore_record *record)
{
struct efivar_entry **pos = (struct efivar_entry **)&record->psi->data;
struct efivar_entry *entry, *n;
- struct list_head *head = &efivar_sysfs_list;
+ struct list_head *head = &efi_pstore_list;
int size = 0;
int ret;
@@ -263,8 +268,9 @@ static int efi_pstore_write(struct pstore_record *record)
ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
preemptible(), record->size, record->psi->buf);
- if (record->reason == KMSG_DUMP_OOPS)
- efivar_run_worker();
+ if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE))
+ if (!schedule_work(&efivar_work))
+ module_put(THIS_MODULE);
return ret;
};
@@ -314,12 +320,12 @@ static int efi_pstore_erase_name(const char *name)
if (efivar_entry_iter_begin())
return -EINTR;
- found = __efivar_entry_iter(efi_pstore_erase_func, &efivar_sysfs_list,
+ found = __efivar_entry_iter(efi_pstore_erase_func, &efi_pstore_list,
efi_name, &entry);
efivar_entry_iter_end();
if (found && !entry->scanning)
- efivar_unregister(entry);
+ kfree(entry);
return found ? 0 : -ENOENT;
}
@@ -354,14 +360,77 @@ static struct pstore_info efi_pstore_info = {
.erase = efi_pstore_erase,
};
+static int efi_pstore_callback(efi_char16_t *name, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ struct efivar_entry *entry;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->var.VariableName, name, name_size);
+ entry->var.VendorGuid = vendor;
+
+ ret = efivar_entry_add(entry, &efi_pstore_list);
+ if (ret)
+ kfree(entry);
+
+ return ret;
+}
+
+static int efi_pstore_update_entry(efi_char16_t *name, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ struct efivar_entry *entry = data;
+
+ if (efivar_entry_find(name, vendor, &efi_pstore_list, false))
+ return 0;
+
+ memcpy(entry->var.VariableName, name, name_size);
+ memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+
+ return 1;
+}
+
+static void efi_pstore_update_entries(struct work_struct *work)
+{
+ struct efivar_entry *entry;
+ int err;
+
+ /* Add new sysfs entries */
+ while (1) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ err = efivar_init(efi_pstore_update_entry, entry,
+ false, &efi_pstore_list);
+ if (!err)
+ break;
+
+ efivar_entry_add(entry, &efi_pstore_list);
+ }
+
+ kfree(entry);
+ module_put(THIS_MODULE);
+}
+
static __init int efivars_pstore_init(void)
{
+ int ret;
+
if (!efivars_kobject() || !efivar_supports_writes())
return 0;
if (efivars_pstore_disable)
return 0;
+ ret = efivar_init(efi_pstore_callback, NULL, true, &efi_pstore_list);
+ if (ret)
+ return ret;
+
efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
if (!efi_pstore_info.buf)
return -ENOMEM;
@@ -374,6 +443,8 @@ static __init int efivars_pstore_init(void)
efi_pstore_info.bufsize = 0;
}
+ INIT_WORK(&efivar_work, efi_pstore_update_entries);
+
return 0;
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 3aa07c3b5136..5e5480a0a32d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -43,6 +43,9 @@ struct efi __read_mostly efi = {
.esrt = EFI_INVALID_TABLE_ADDR,
.tpm_log = EFI_INVALID_TABLE_ADDR,
.tpm_final_log = EFI_INVALID_TABLE_ADDR,
+#ifdef CONFIG_LOAD_UEFI_KEYS
+ .mokvar_table = EFI_INVALID_TABLE_ADDR,
+#endif
};
EXPORT_SYMBOL(efi);
@@ -519,6 +522,9 @@ static const efi_config_table_type_t common_tables[] __initconst = {
#ifdef CONFIG_EFI_RCI2_TABLE
{DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
#endif
+#ifdef CONFIG_LOAD_UEFI_KEYS
+ {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
+#endif
{},
};
@@ -714,7 +720,7 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
vendor);
}
-static __initdata char memory_type_name[][20] = {
+static __initdata char memory_type_name[][13] = {
"Reserved",
"Loader Code",
"Loader Data",
@@ -722,14 +728,14 @@ static __initdata char memory_type_name[][20] = {
"Boot Data",
"Runtime Code",
"Runtime Data",
- "Conventional Memory",
- "Unusable Memory",
- "ACPI Reclaim Memory",
- "ACPI Memory NVS",
- "Memory Mapped I/O",
- "MMIO Port Space",
+ "Conventional",
+ "Unusable",
+ "ACPI Reclaim",
+ "ACPI Mem NVS",
+ "MMIO",
+ "MMIO Port",
"PAL Code",
- "Persistent Memory",
+ "Persistent",
};
char * __init efi_md_typeattr_format(char *buf, size_t size,
@@ -756,26 +762,27 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
- EFI_MEMORY_NV | EFI_MEMORY_SP |
+ EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
snprintf(pos, size, "|attr=0x%016llx]",
(unsigned long long)attr);
else
snprintf(pos, size,
- "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
- attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
- attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
- attr & EFI_MEMORY_SP ? "SP" : "",
- attr & EFI_MEMORY_NV ? "NV" : "",
- attr & EFI_MEMORY_XP ? "XP" : "",
- attr & EFI_MEMORY_RP ? "RP" : "",
- attr & EFI_MEMORY_WP ? "WP" : "",
- attr & EFI_MEMORY_RO ? "RO" : "",
- attr & EFI_MEMORY_UCE ? "UCE" : "",
- attr & EFI_MEMORY_WB ? "WB" : "",
- attr & EFI_MEMORY_WT ? "WT" : "",
- attr & EFI_MEMORY_WC ? "WC" : "",
- attr & EFI_MEMORY_UC ? "UC" : "");
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
+ attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
+ attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
+ attr & EFI_MEMORY_SP ? "SP" : "",
+ attr & EFI_MEMORY_NV ? "NV" : "",
+ attr & EFI_MEMORY_XP ? "XP" : "",
+ attr & EFI_MEMORY_RP ? "RP" : "",
+ attr & EFI_MEMORY_WP ? "WP" : "",
+ attr & EFI_MEMORY_RO ? "RO" : "",
+ attr & EFI_MEMORY_UCE ? "UCE" : "",
+ attr & EFI_MEMORY_WB ? "WB" : "",
+ attr & EFI_MEMORY_WT ? "WT" : "",
+ attr & EFI_MEMORY_WC ? "WC" : "",
+ attr & EFI_MEMORY_UC ? "UC" : "");
return buf;
}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index dcea137142b3..e6b16b3a17a8 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -22,10 +22,8 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@Dell.com>");
MODULE_DESCRIPTION("sysfs interface to EFI Variables");
MODULE_LICENSE("GPL");
MODULE_VERSION(EFIVARS_VERSION);
-MODULE_ALIAS("platform:efivars");
-LIST_HEAD(efivar_sysfs_list);
-EXPORT_SYMBOL_GPL(efivar_sysfs_list);
+static LIST_HEAD(efivar_sysfs_list);
static struct kset *efivars_kset;
@@ -591,42 +589,6 @@ out_free:
return error;
}
-static int efivar_update_sysfs_entry(efi_char16_t *name, efi_guid_t vendor,
- unsigned long name_size, void *data)
-{
- struct efivar_entry *entry = data;
-
- if (efivar_entry_find(name, vendor, &efivar_sysfs_list, false))
- return 0;
-
- memcpy(entry->var.VariableName, name, name_size);
- memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
-
- return 1;
-}
-
-static void efivar_update_sysfs_entries(struct work_struct *work)
-{
- struct efivar_entry *entry;
- int err;
-
- /* Add new sysfs entries */
- while (1) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return;
-
- err = efivar_init(efivar_update_sysfs_entry, entry,
- false, &efivar_sysfs_list);
- if (!err)
- break;
-
- efivar_create_sysfs_entry(entry);
- }
-
- kfree(entry);
-}
-
static int efivars_sysfs_callback(efi_char16_t *name, efi_guid_t vendor,
unsigned long name_size, void *data)
{
@@ -675,7 +637,7 @@ static void efivars_sysfs_exit(void)
kset_unregister(efivars_kset);
}
-int efivars_sysfs_init(void)
+static int efivars_sysfs_init(void)
{
struct kobject *parent_kobj = efivars_kobject();
int error = 0;
@@ -701,11 +663,8 @@ int efivars_sysfs_init(void)
return error;
}
- INIT_WORK(&efivar_work, efivar_update_sysfs_entries);
-
return 0;
}
-EXPORT_SYMBOL_GPL(efivars_sysfs_init);
module_init(efivars_sysfs_init);
module_exit(efivars_sysfs_exit);
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 296b18fbd7a2..8a94388e38b3 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -18,15 +18,18 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
cflags-$(CONFIG_ARM64) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
- -fpie $(DISABLE_STACKLEAK_PLUGIN)
+ -fpie $(DISABLE_STACKLEAK_PLUGIN) \
+ $(call cc-option,-mbranch-protection=none)
cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
+cflags-$(CONFIG_RISCV) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+ -fpic
cflags-$(CONFIG_EFI_GENERIC_STUB) += -I$(srctree)/scripts/dtc/libfdt
KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
- -include $(srctree)/drivers/firmware/efi/libstub/hidden.h \
+ -include $(srctree)/include/linux/hidden.h \
-D__NO_FORTIFY \
-ffreestanding \
-fno-stack-protector \
@@ -63,8 +66,14 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o fdt.o string.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o
lib-$(CONFIG_X86) += x86-stub.o
+lib-$(CONFIG_RISCV) += riscv-stub.o
CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
-CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+
+# Even when -mbranch-protection=none is set, Clang will generate a
+# .note.gnu.property for code-less object files (like lib/ctype.c),
+# so work around this by explicitly removing the unwanted section.
+# https://bugs.llvm.org/show_bug.cgi?id=46480
+STUBCOPY_FLAGS-y += --remove-section=.note.gnu.property
#
# For x86, bootloaders like systemd-boot or grub-efi do not zero-initialize the
@@ -106,6 +115,13 @@ STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
+# For RISC-V, we don't need anything special other than arm64. Keep all the
+# symbols in .init section and make sure that no absolute symbols references
+# doesn't exist.
+STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
+ --prefix-symbols=__efistub_
+STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
+
$(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,stubcopy)
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index d08e5d55838c..4b5b2403b3a0 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -113,162 +113,58 @@ void free_screen_info(struct screen_info *si)
efi_bs_call(free_pool, si);
}
-static efi_status_t reserve_kernel_base(unsigned long dram_base,
- unsigned long *reserve_addr,
- unsigned long *reserve_size)
-{
- efi_physical_addr_t alloc_addr;
- efi_memory_desc_t *memory_map;
- unsigned long nr_pages, map_size, desc_size, buff_size;
- efi_status_t status;
- unsigned long l;
-
- struct efi_boot_memmap map = {
- .map = &memory_map,
- .map_size = &map_size,
- .desc_size = &desc_size,
- .desc_ver = NULL,
- .key_ptr = NULL,
- .buff_size = &buff_size,
- };
-
- /*
- * Reserve memory for the uncompressed kernel image. This is
- * all that prevents any future allocations from conflicting
- * with the kernel. Since we can't tell from the compressed
- * image how much DRAM the kernel actually uses (due to BSS
- * size uncertainty) we allocate the maximum possible size.
- * Do this very early, as prints can cause memory allocations
- * that may conflict with this.
- */
- alloc_addr = dram_base + MAX_UNCOMP_KERNEL_SIZE;
- nr_pages = MAX_UNCOMP_KERNEL_SIZE / EFI_PAGE_SIZE;
- status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
- EFI_BOOT_SERVICES_DATA, nr_pages, &alloc_addr);
- if (status == EFI_SUCCESS) {
- if (alloc_addr == dram_base) {
- *reserve_addr = alloc_addr;
- *reserve_size = MAX_UNCOMP_KERNEL_SIZE;
- return EFI_SUCCESS;
- }
- /*
- * If we end up here, the allocation succeeded but starts below
- * dram_base. This can only occur if the real base of DRAM is
- * not a multiple of 128 MB, in which case dram_base will have
- * been rounded up. Since this implies that a part of the region
- * was already occupied, we need to fall through to the code
- * below to ensure that the existing allocations don't conflict.
- * For this reason, we use EFI_BOOT_SERVICES_DATA above and not
- * EFI_LOADER_DATA, which we wouldn't able to distinguish from
- * allocations that we want to disallow.
- */
- }
-
- /*
- * If the allocation above failed, we may still be able to proceed:
- * if the only allocations in the region are of types that will be
- * released to the OS after ExitBootServices(), the decompressor can
- * safely overwrite them.
- */
- status = efi_get_memory_map(&map);
- if (status != EFI_SUCCESS) {
- efi_err("reserve_kernel_base(): Unable to retrieve memory map.\n");
- return status;
- }
-
- for (l = 0; l < map_size; l += desc_size) {
- efi_memory_desc_t *desc;
- u64 start, end;
-
- desc = (void *)memory_map + l;
- start = desc->phys_addr;
- end = start + desc->num_pages * EFI_PAGE_SIZE;
-
- /* Skip if entry does not intersect with region */
- if (start >= dram_base + MAX_UNCOMP_KERNEL_SIZE ||
- end <= dram_base)
- continue;
-
- switch (desc->type) {
- case EFI_BOOT_SERVICES_CODE:
- case EFI_BOOT_SERVICES_DATA:
- /* Ignore types that are released to the OS anyway */
- continue;
-
- case EFI_CONVENTIONAL_MEMORY:
- /* Skip soft reserved conventional memory */
- if (efi_soft_reserve_enabled() &&
- (desc->attribute & EFI_MEMORY_SP))
- continue;
-
- /*
- * Reserve the intersection between this entry and the
- * region.
- */
- start = max(start, (u64)dram_base);
- end = min(end, (u64)dram_base + MAX_UNCOMP_KERNEL_SIZE);
-
- status = efi_bs_call(allocate_pages,
- EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA,
- (end - start) / EFI_PAGE_SIZE,
- &start);
- if (status != EFI_SUCCESS) {
- efi_err("reserve_kernel_base(): alloc failed.\n");
- goto out;
- }
- break;
-
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- /*
- * These regions may be released and reallocated for
- * another purpose (including EFI_RUNTIME_SERVICE_DATA)
- * at any time during the execution of the OS loader,
- * so we cannot consider them as safe.
- */
- default:
- /*
- * Treat any other allocation in the region as unsafe */
- status = EFI_OUT_OF_RESOURCES;
- goto out;
- }
- }
-
- status = EFI_SUCCESS;
-out:
- efi_bs_call(free_pool, memory_map);
- return status;
-}
-
efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
- unsigned long dram_base,
efi_loaded_image_t *image)
{
- unsigned long kernel_base;
+ const int slack = TEXT_OFFSET - 5 * PAGE_SIZE;
+ int alloc_size = MAX_UNCOMP_KERNEL_SIZE + EFI_PHYS_ALIGN;
+ unsigned long alloc_base, kernel_base;
efi_status_t status;
- /* use a 16 MiB aligned base for the decompressed kernel */
- kernel_base = round_up(dram_base, SZ_16M) + TEXT_OFFSET;
-
/*
- * Note that some platforms (notably, the Raspberry Pi 2) put
- * spin-tables and other pieces of firmware at the base of RAM,
- * abusing the fact that the window of TEXT_OFFSET bytes at the
- * base of the kernel image is only partially used at the moment.
- * (Up to 5 pages are used for the swapper page tables)
+ * Allocate space for the decompressed kernel as low as possible.
+ * The region should be 16 MiB aligned, but the first 'slack' bytes
+ * are not used by Linux, so we allow those to be occupied by the
+ * firmware.
*/
- status = reserve_kernel_base(kernel_base - 5 * PAGE_SIZE, reserve_addr,
- reserve_size);
+ status = efi_low_alloc_above(alloc_size, EFI_PAGE_SIZE, &alloc_base, 0x0);
if (status != EFI_SUCCESS) {
efi_err("Unable to allocate memory for uncompressed kernel.\n");
return status;
}
- *image_addr = kernel_base;
+ if ((alloc_base % EFI_PHYS_ALIGN) > slack) {
+ /*
+ * More than 'slack' bytes are already occupied at the base of
+ * the allocation, so we need to advance to the next 16 MiB block.
+ */
+ kernel_base = round_up(alloc_base, EFI_PHYS_ALIGN);
+ efi_info("Free memory starts at 0x%lx, setting kernel_base to 0x%lx\n",
+ alloc_base, kernel_base);
+ } else {
+ kernel_base = round_down(alloc_base, EFI_PHYS_ALIGN);
+ }
+
+ *reserve_addr = kernel_base + slack;
+ *reserve_size = MAX_UNCOMP_KERNEL_SIZE;
+
+ /* now free the parts that we will not use */
+ if (*reserve_addr > alloc_base) {
+ efi_bs_call(free_pages, alloc_base,
+ (*reserve_addr - alloc_base) / EFI_PAGE_SIZE);
+ alloc_size -= *reserve_addr - alloc_base;
+ }
+ efi_bs_call(free_pages, *reserve_addr + MAX_UNCOMP_KERNEL_SIZE,
+ (alloc_size - MAX_UNCOMP_KERNEL_SIZE) / EFI_PAGE_SIZE);
+
+ *image_addr = kernel_base + TEXT_OFFSET;
*image_size = 0;
+
+ efi_debug("image addr == 0x%lx, reserve_addr == 0x%lx\n",
+ *image_addr, *reserve_addr);
+
return EFI_SUCCESS;
}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index e5bfac79e5ac..22ece1ad68a8 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -50,7 +50,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
- unsigned long dram_base,
efi_loaded_image_t *image)
{
efi_status_t status;
@@ -62,10 +61,12 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
if (status == EFI_NOT_FOUND) {
- efi_info("EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+ efi_info("EFI_RNG_PROTOCOL unavailable, KASLR will be disabled\n");
+ efi_nokaslr = true;
} else if (status != EFI_SUCCESS) {
- efi_err("efi_get_random_bytes() failed\n");
- return status;
+ efi_err("efi_get_random_bytes() failed (0x%lx), KASLR will be disabled\n",
+ status);
+ efi_nokaslr = true;
}
} else {
efi_info("KASLR disabled on kernel command line\n");
@@ -77,7 +78,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
kernel_size = _edata - _text;
kernel_memsize = kernel_size + (_end - _edata);
- *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align();
+ *reserve_size = kernel_memsize;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
/*
@@ -91,7 +92,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
}
if (status != EFI_SUCCESS) {
- if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align())) {
+ if (IS_ALIGNED((u64)_text, min_kimg_align())) {
/*
* Just execute from wherever we were loaded by the
* UEFI PE/COFF loader if the alignment is suitable.
@@ -111,7 +112,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
}
}
- *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align();
+ *image_addr = *reserve_addr;
memcpy((void *)*image_addr, _text, kernel_size);
return EFI_SUCCESS;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index f735db55adc0..aa8da0a49829 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -239,6 +239,102 @@ efi_status_t efi_parse_options(char const *cmdline)
}
/*
+ * The EFI_LOAD_OPTION descriptor has the following layout:
+ * u32 Attributes;
+ * u16 FilePathListLength;
+ * u16 Description[];
+ * efi_device_path_protocol_t FilePathList[];
+ * u8 OptionalData[];
+ *
+ * This function validates and unpacks the variable-size data fields.
+ */
+static
+bool efi_load_option_unpack(efi_load_option_unpacked_t *dest,
+ const efi_load_option_t *src, size_t size)
+{
+ const void *pos;
+ u16 c;
+ efi_device_path_protocol_t header;
+ const efi_char16_t *description;
+ const efi_device_path_protocol_t *file_path_list;
+
+ if (size < offsetof(efi_load_option_t, variable_data))
+ return false;
+ pos = src->variable_data;
+ size -= offsetof(efi_load_option_t, variable_data);
+
+ if ((src->attributes & ~EFI_LOAD_OPTION_MASK) != 0)
+ return false;
+
+ /* Scan description. */
+ description = pos;
+ do {
+ if (size < sizeof(c))
+ return false;
+ c = *(const u16 *)pos;
+ pos += sizeof(c);
+ size -= sizeof(c);
+ } while (c != L'\0');
+
+ /* Scan file_path_list. */
+ file_path_list = pos;
+ do {
+ if (size < sizeof(header))
+ return false;
+ header = *(const efi_device_path_protocol_t *)pos;
+ if (header.length < sizeof(header))
+ return false;
+ if (size < header.length)
+ return false;
+ pos += header.length;
+ size -= header.length;
+ } while ((header.type != EFI_DEV_END_PATH && header.type != EFI_DEV_END_PATH2) ||
+ (header.sub_type != EFI_DEV_END_ENTIRE));
+ if (pos != (const void *)file_path_list + src->file_path_list_length)
+ return false;
+
+ dest->attributes = src->attributes;
+ dest->file_path_list_length = src->file_path_list_length;
+ dest->description = description;
+ dest->file_path_list = file_path_list;
+ dest->optional_data_size = size;
+ dest->optional_data = size ? pos : NULL;
+
+ return true;
+}
+
+/*
+ * At least some versions of Dell firmware pass the entire contents of the
+ * Boot#### variable, i.e. the EFI_LOAD_OPTION descriptor, rather than just the
+ * OptionalData field.
+ *
+ * Detect this case and extract OptionalData.
+ */
+void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_size)
+{
+ const efi_load_option_t *load_option = *load_options;
+ efi_load_option_unpacked_t load_option_unpacked;
+
+ if (!IS_ENABLED(CONFIG_X86))
+ return;
+ if (!load_option)
+ return;
+ if (*load_options_size < sizeof(*load_option))
+ return;
+ if ((load_option->attributes & ~EFI_LOAD_OPTION_BOOT_MASK) != 0)
+ return;
+
+ if (!efi_load_option_unpack(&load_option_unpacked, load_option, *load_options_size))
+ return;
+
+ efi_warn_once(FW_BUG "LoadOptions is an EFI_LOAD_OPTION descriptor\n");
+ efi_warn_once(FW_BUG "Using OptionalData as a workaround\n");
+
+ *load_options = load_option_unpacked.optional_data;
+ *load_options_size = load_option_unpacked.optional_data_size;
+}
+
+/*
* Convert the unicode UEFI command line to ASCII to pass to kernel.
* Size of memory allocated return in *cmd_line_len.
* Returns NULL on error.
@@ -247,12 +343,15 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
{
const u16 *s2;
unsigned long cmdline_addr = 0;
- int options_chars = efi_table_attr(image, load_options_size) / 2;
+ int options_chars = efi_table_attr(image, load_options_size);
const u16 *options = efi_table_attr(image, load_options);
int options_bytes = 0, safe_options_bytes = 0; /* UTF-8 bytes */
bool in_quote = false;
efi_status_t status;
+ efi_apply_loadoptions_quirk((const void **)&options, &options_chars);
+ options_chars /= sizeof(*options);
+
if (options) {
s2 = options;
while (options_bytes < COMMAND_LINE_SIZE && options_chars--) {
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index a5a405d8ab44..914a343c7785 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -17,7 +17,10 @@
/*
* This is the base address at which to start allocating virtual memory ranges
- * for UEFI Runtime Services. This is in the low TTBR0 range so that we can use
+ * for UEFI Runtime Services.
+ *
+ * For ARM/ARM64:
+ * This is in the low TTBR0 range so that we can use
* any allocation we choose, and eliminate the risk of a conflict after kexec.
* The value chosen is the largest non-zero power of 2 suitable for this purpose
* both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
@@ -25,6 +28,12 @@
* Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split,
* map everything below 1 GB. (512 MB is a reasonable upper bound for the
* entire footprint of the UEFI runtime services memory regions)
+ *
+ * For RISC-V:
+ * There is no specific reason for which, this address (512MB) can't be used
+ * EFI runtime virtual address for RISC-V. It also helps to use EFI runtime
+ * services on both RV32/RV64. Keep the same runtime virtual address for RISC-V
+ * as well to minimize the code churn.
*/
#define EFI_RT_VIRTUAL_BASE SZ_512M
#define EFI_RT_VIRTUAL_SIZE SZ_512M
@@ -87,40 +96,6 @@ static void install_memreserve_table(void)
efi_err("Failed to install memreserve config table!\n");
}
-static unsigned long get_dram_base(void)
-{
- efi_status_t status;
- unsigned long map_size, buff_size;
- unsigned long membase = EFI_ERROR;
- struct efi_memory_map map;
- efi_memory_desc_t *md;
- struct efi_boot_memmap boot_map;
-
- boot_map.map = (efi_memory_desc_t **)&map.map;
- boot_map.map_size = &map_size;
- boot_map.desc_size = &map.desc_size;
- boot_map.desc_ver = NULL;
- boot_map.key_ptr = NULL;
- boot_map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&boot_map);
- if (status != EFI_SUCCESS)
- return membase;
-
- map.map_end = map.map + map_size;
-
- for_each_efi_memory_desc_in_map(&map, md) {
- if (md->attribute & EFI_MEMORY_WB) {
- if (membase > md->phys_addr)
- membase = md->phys_addr;
- }
- }
-
- efi_bs_call(free_pool, map.map);
-
- return membase;
-}
-
/*
* EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint
* that is described in the PE/COFF header. Most of the code is the same
@@ -134,7 +109,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_status_t status;
unsigned long image_addr;
unsigned long image_size = 0;
- unsigned long dram_base;
/* addr/point and size pairs for memory management*/
unsigned long initrd_addr = 0;
unsigned long initrd_size = 0;
@@ -174,13 +148,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
goto fail;
}
- dram_base = get_dram_base();
- if (dram_base == EFI_ERROR) {
- efi_err("Failed to find DRAM base\n");
- status = EFI_LOAD_ERROR;
- goto fail;
- }
-
/*
* Get the command line from EFI, using the LOADED_IMAGE
* protocol. We are going to copy the command line into the
@@ -218,7 +185,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
status = handle_kernel_image(&image_addr, &image_size,
&reserve_addr,
&reserve_size,
- dram_base, image);
+ image);
if (status != EFI_SUCCESS) {
efi_err("Failed to relocate kernel\n");
goto fail_free_screeninfo;
@@ -262,7 +229,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_info("Generating empty DTB\n");
if (!efi_noinitrd) {
- max_addr = efi_get_max_initrd_addr(dram_base, image_addr);
+ max_addr = efi_get_max_initrd_addr(image_addr);
status = efi_load_initrd(image, &initrd_addr, &initrd_size,
ULONG_MAX, max_addr);
if (status != EFI_SUCCESS)
@@ -306,7 +273,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
install_memreserve_table();
status = allocate_new_fdt_and_exit_boot(handle, &fdt_addr,
- efi_get_max_fdt_addr(dram_base),
+ efi_get_max_fdt_addr(image_addr),
initrd_addr, initrd_size,
cmdline_ptr, fdt_addr, fdt_size);
if (status != EFI_SUCCESS)
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 85050f5a1b28..2d7abcd99de9 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -10,9 +10,6 @@
#include <linux/types.h>
#include <asm/efi.h>
-/* error code which can't be mistaken for valid address */
-#define EFI_ERROR (~0UL)
-
/*
* __init annotations should not be used in the EFI stub, since the code is
* either included in the decompressor (x86, ARM) where they have no effect,
@@ -55,11 +52,34 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
#define efi_info(fmt, ...) \
efi_printk(KERN_INFO fmt, ##__VA_ARGS__)
+#define efi_warn(fmt, ...) \
+ efi_printk(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
#define efi_err(fmt, ...) \
efi_printk(KERN_ERR "ERROR: " fmt, ##__VA_ARGS__)
#define efi_debug(fmt, ...) \
efi_printk(KERN_DEBUG "DEBUG: " fmt, ##__VA_ARGS__)
+#define efi_printk_once(fmt, ...) \
+({ \
+ static bool __print_once; \
+ bool __ret_print_once = !__print_once; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ efi_printk(fmt, ##__VA_ARGS__); \
+ } \
+ __ret_print_once; \
+})
+
+#define efi_info_once(fmt, ...) \
+ efi_printk_once(KERN_INFO fmt, ##__VA_ARGS__)
+#define efi_warn_once(fmt, ...) \
+ efi_printk_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
+#define efi_err_once(fmt, ...) \
+ efi_printk_once(KERN_ERR "ERROR: " fmt, ##__VA_ARGS__)
+#define efi_debug_once(fmt, ...) \
+ efi_printk_once(KERN_DEBUG "DEBUG: " fmt, ##__VA_ARGS__)
+
/* Helper macros for the usual case of using simple C variables: */
#ifndef fdt_setprop_inplace_var
#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \
@@ -688,6 +708,35 @@ union efi_load_file_protocol {
} mixed_mode;
};
+typedef struct {
+ u32 attributes;
+ u16 file_path_list_length;
+ u8 variable_data[];
+ // efi_char16_t description[];
+ // efi_device_path_protocol_t file_path_list[];
+ // u8 optional_data[];
+} __packed efi_load_option_t;
+
+#define EFI_LOAD_OPTION_ACTIVE 0x0001U
+#define EFI_LOAD_OPTION_FORCE_RECONNECT 0x0002U
+#define EFI_LOAD_OPTION_HIDDEN 0x0008U
+#define EFI_LOAD_OPTION_CATEGORY 0x1f00U
+#define EFI_LOAD_OPTION_CATEGORY_BOOT 0x0000U
+#define EFI_LOAD_OPTION_CATEGORY_APP 0x0100U
+
+#define EFI_LOAD_OPTION_BOOT_MASK \
+ (EFI_LOAD_OPTION_ACTIVE|EFI_LOAD_OPTION_HIDDEN|EFI_LOAD_OPTION_CATEGORY)
+#define EFI_LOAD_OPTION_MASK (EFI_LOAD_OPTION_FORCE_RECONNECT|EFI_LOAD_OPTION_BOOT_MASK)
+
+typedef struct {
+ u32 attributes;
+ u16 file_path_list_length;
+ const efi_char16_t *description;
+ const efi_device_path_protocol_t *file_path_list;
+ size_t optional_data_size;
+ const void *optional_data;
+} efi_load_option_unpacked_t;
+
void efi_pci_disable_bridge_busmaster(void);
typedef efi_status_t (*efi_exit_boot_map_processing)(
@@ -730,6 +779,8 @@ __printf(1, 2) int efi_printk(char const *fmt, ...);
void efi_free(unsigned long size, unsigned long addr);
+void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_size);
+
char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
efi_status_t efi_get_memory_map(struct efi_boot_memmap *map);
@@ -740,6 +791,9 @@ efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
unsigned long max, unsigned long align);
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min);
+
efi_status_t efi_relocate_kernel(unsigned long *image_addr,
unsigned long image_size,
unsigned long alloc_size,
@@ -786,7 +840,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
unsigned long *reserve_size,
- unsigned long dram_base,
efi_loaded_image_t *image);
asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 11ecf3c4640e..368cd60000ee 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -136,7 +136,7 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
if (status)
goto fdt_set_fail;
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
efi_status_t efi_status;
efi_status = efi_get_random_bytes(sizeof(fdt_val64),
@@ -145,8 +145,6 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
if (status)
goto fdt_set_fail;
- } else if (efi_status != EFI_NOT_FOUND) {
- return efi_status;
}
}
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index 630caa6b1f4c..4e81c6077188 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -136,7 +136,7 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
unsigned long *load_size)
{
const efi_char16_t *cmdline = image->load_options;
- int cmdline_len = image->load_options_size / 2;
+ int cmdline_len = image->load_options_size;
unsigned long efi_chunk_size = ULONG_MAX;
efi_file_protocol_t *volume = NULL;
efi_file_protocol_t *file;
@@ -148,6 +148,9 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
if (!load_addr || !load_size)
return EFI_INVALID_PARAMETER;
+ efi_apply_loadoptions_quirk((const void **)&cmdline, &cmdline_len);
+ cmdline_len /= sizeof(*cmdline);
+
if (IS_ENABLED(CONFIG_X86) && !efi_nochunk)
efi_chunk_size = EFI_READ_CHUNK_SIZE;
diff --git a/drivers/firmware/efi/libstub/hidden.h b/drivers/firmware/efi/libstub/hidden.h
deleted file mode 100644
index 3493b041f419..000000000000
--- a/drivers/firmware/efi/libstub/hidden.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * To prevent the compiler from emitting GOT-indirected (and thus absolute)
- * references to any global symbols, override their visibility as 'hidden'
- */
-#pragma GCC visibility push(hidden)
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
index 9b1aaf8b123f..8ee9eb2b9039 100644
--- a/drivers/firmware/efi/libstub/relocate.c
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -20,8 +20,8 @@
*
* Return: status code
*/
-static efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long min)
+efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long min)
{
unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map;
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
new file mode 100644
index 000000000000..380e4e251399
--- /dev/null
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+
+#include <asm/efi.h>
+#include <asm/sections.h>
+
+#include "efistub.h"
+
+/*
+ * RISC-V requires the kernel image to placed 2 MB aligned base for 64 bit and
+ * 4MB for 32 bit.
+ */
+#ifdef CONFIG_64BIT
+#define MIN_KIMG_ALIGN SZ_2M
+#else
+#define MIN_KIMG_ALIGN SZ_4M
+#endif
+
+typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long);
+
+static u32 hartid;
+
+static u32 get_boot_hartid_from_fdt(void)
+{
+ const void *fdt;
+ int chosen_node, len;
+ const fdt32_t *prop;
+
+ fdt = get_efi_config_table(DEVICE_TREE_GUID);
+ if (!fdt)
+ return U32_MAX;
+
+ chosen_node = fdt_path_offset(fdt, "/chosen");
+ if (chosen_node < 0)
+ return U32_MAX;
+
+ prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
+ if (!prop || len != sizeof(u32))
+ return U32_MAX;
+
+ return fdt32_to_cpu(*prop);
+}
+
+efi_status_t check_platform_features(void)
+{
+ hartid = get_boot_hartid_from_fdt();
+ if (hartid == U32_MAX) {
+ efi_err("/chosen/boot-hartid missing or invalid!\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+}
+
+void __noreturn efi_enter_kernel(unsigned long entrypoint, unsigned long fdt,
+ unsigned long fdt_size)
+{
+ unsigned long stext_offset = _start_kernel - _start;
+ unsigned long kernel_entry = entrypoint + stext_offset;
+ jump_kernel_func jump_kernel = (jump_kernel_func)kernel_entry;
+
+ /*
+ * Jump to real kernel here with following constraints.
+ * 1. MMU should be disabled.
+ * 2. a0 should contain hartid
+ * 3. a1 should DT address
+ */
+ csr_write(CSR_SATP, 0);
+ jump_kernel(hartid, fdt);
+}
+
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image)
+{
+ unsigned long kernel_size = 0;
+ unsigned long preferred_addr;
+ efi_status_t status;
+
+ kernel_size = _edata - _start;
+ *image_addr = (unsigned long)_start;
+ *image_size = kernel_size + (_end - _edata);
+
+ /*
+ * RISC-V kernel maps PAGE_OFFSET virtual address to the same physical
+ * address where kernel is booted. That's why kernel should boot from
+ * as low as possible to avoid wastage of memory. Currently, dram_base
+ * is occupied by the firmware. So the preferred address for kernel to
+ * boot is next aligned address. If preferred address is not available,
+ * relocate_kernel will fall back to efi_low_alloc_above to allocate
+ * lowest possible memory region as long as the address and size meets
+ * the alignment constraints.
+ */
+ preferred_addr = MIN_KIMG_ALIGN;
+ status = efi_relocate_kernel(image_addr, kernel_size, *image_size,
+ preferred_addr, MIN_KIMG_ALIGN, 0x0);
+
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to relocate kernel\n");
+ *image_size = 0;
+ }
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/string.c b/drivers/firmware/efi/libstub/string.c
index 1ac2f8764715..5d13e43869ee 100644
--- a/drivers/firmware/efi/libstub/string.c
+++ b/drivers/firmware/efi/libstub/string.c
@@ -7,6 +7,7 @@
*/
#include <linux/ctype.h>
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/drivers/firmware/efi/libstub/vsprintf.c b/drivers/firmware/efi/libstub/vsprintf.c
index e65ef49a54cd..1088e288c04d 100644
--- a/drivers/firmware/efi/libstub/vsprintf.c
+++ b/drivers/firmware/efi/libstub/vsprintf.c
@@ -135,7 +135,7 @@ char *number(char *end, unsigned long long num, int base, char locase)
break;
default:
unreachable();
- };
+ }
return end;
}
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
new file mode 100644
index 000000000000..d8bc01340686
--- /dev/null
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mokvar-table.c
+ *
+ * Copyright (c) 2020 Red Hat
+ * Author: Lenny Szubowicz <lszubowi@redhat.com>
+ *
+ * This module contains the kernel support for the Linux EFI Machine
+ * Owner Key (MOK) variable configuration table, which is identified by
+ * the LINUX_EFI_MOK_VARIABLE_TABLE_GUID.
+ *
+ * This EFI configuration table provides a more robust alternative to
+ * EFI volatile variables by which an EFI boot loader can pass the
+ * contents of the Machine Owner Key (MOK) certificate stores to the
+ * kernel during boot. If both the EFI MOK config table and corresponding
+ * EFI MOK variables are present, the table should be considered as
+ * more authoritative.
+ *
+ * This module includes code that validates and maps the EFI MOK table,
+ * if it's presence was detected very early in boot.
+ *
+ * Kernel interface routines are provided to walk through all the
+ * entries in the MOK config table or to search for a specific named
+ * entry.
+ *
+ * The contents of the individual named MOK config table entries are
+ * made available to user space via read-only sysfs binary files under:
+ *
+ * /sys/firmware/efi/mok-variables/
+ *
+ */
+#define pr_fmt(fmt) "mokvar: " fmt
+
+#include <linux/capability.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/early_ioremap.h>
+
+/*
+ * The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table is a packed
+ * sequence of struct efi_mokvar_table_entry, one for each named
+ * MOK variable. The sequence is terminated by an entry with a
+ * completely NULL name and 0 data size.
+ *
+ * efi_mokvar_table_size is set to the computed size of the
+ * MOK config table by efi_mokvar_table_init(). This will be
+ * non-zero if and only if the table if present and has been
+ * validated by efi_mokvar_table_init().
+ */
+static size_t efi_mokvar_table_size;
+
+/*
+ * efi_mokvar_table_va is the kernel virtual address at which the
+ * EFI MOK config table has been mapped by efi_mokvar_sysfs_init().
+ */
+static struct efi_mokvar_table_entry *efi_mokvar_table_va;
+
+/*
+ * Each /sys/firmware/efi/mok-variables/ sysfs file is represented by
+ * an instance of struct efi_mokvar_sysfs_attr on efi_mokvar_sysfs_list.
+ * bin_attr.private points to the associated EFI MOK config table entry.
+ *
+ * This list is created during boot and then remains unchanged.
+ * So no synchronization is currently required to walk the list.
+ */
+struct efi_mokvar_sysfs_attr {
+ struct bin_attribute bin_attr;
+ struct list_head node;
+};
+
+static LIST_HEAD(efi_mokvar_sysfs_list);
+static struct kobject *mokvar_kobj;
+
+/*
+ * efi_mokvar_table_init() - Early boot validation of EFI MOK config table
+ *
+ * If present, validate and compute the size of the EFI MOK variable
+ * configuration table. This table may be provided by an EFI boot loader
+ * as an alternative to ordinary EFI variables, due to platform-dependent
+ * limitations. The memory occupied by this table is marked as reserved.
+ *
+ * This routine must be called before efi_free_boot_services() in order
+ * to guarantee that it can mark the table as reserved.
+ *
+ * Implicit inputs:
+ * efi.mokvar_table: Physical address of EFI MOK variable config table
+ * or special value that indicates no such table.
+ *
+ * Implicit outputs:
+ * efi_mokvar_table_size: Computed size of EFI MOK variable config table.
+ * The table is considered present and valid if this
+ * is non-zero.
+ */
+void __init efi_mokvar_table_init(void)
+{
+ efi_memory_desc_t md;
+ void *va = NULL;
+ unsigned long cur_offset = 0;
+ unsigned long offset_limit;
+ unsigned long map_size = 0;
+ unsigned long map_size_needed = 0;
+ unsigned long size;
+ struct efi_mokvar_table_entry *mokvar_entry;
+ int err;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ if (efi.mokvar_table == EFI_INVALID_TABLE_ADDR)
+ return;
+ /*
+ * The EFI MOK config table must fit within a single EFI memory
+ * descriptor range.
+ */
+ err = efi_mem_desc_lookup(efi.mokvar_table, &md);
+ if (err) {
+ pr_warn("EFI MOKvar config table is not within the EFI memory map\n");
+ return;
+ }
+
+ offset_limit = efi_mem_desc_end(&md) - efi.mokvar_table;
+
+ /*
+ * Validate the MOK config table. Since there is no table header
+ * from which we could get the total size of the MOK config table,
+ * we compute the total size as we validate each variably sized
+ * entry, remapping as necessary.
+ */
+ err = -EINVAL;
+ while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
+ mokvar_entry = va + cur_offset;
+ map_size_needed = cur_offset + sizeof(*mokvar_entry);
+ if (map_size_needed > map_size) {
+ if (va)
+ early_memunmap(va, map_size);
+ /*
+ * Map a little more than the fixed size entry
+ * header, anticipating some data. It's safe to
+ * do so as long as we stay within current memory
+ * descriptor.
+ */
+ map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
+ offset_limit);
+ va = early_memremap(efi.mokvar_table, map_size);
+ if (!va) {
+ pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
+ efi.mokvar_table, map_size);
+ return;
+ }
+ mokvar_entry = va + cur_offset;
+ }
+
+ /* Check for last sentinel entry */
+ if (mokvar_entry->name[0] == '\0') {
+ if (mokvar_entry->data_size != 0)
+ break;
+ err = 0;
+ break;
+ }
+
+ /* Sanity check that the name is null terminated */
+ size = strnlen(mokvar_entry->name,
+ sizeof(mokvar_entry->name));
+ if (size >= sizeof(mokvar_entry->name))
+ break;
+
+ /* Advance to the next entry */
+ cur_offset = map_size_needed + mokvar_entry->data_size;
+ }
+
+ if (va)
+ early_memunmap(va, map_size);
+ if (err) {
+ pr_err("EFI MOKvar config table is not valid\n");
+ return;
+ }
+ efi_mem_reserve(efi.mokvar_table, map_size_needed);
+ efi_mokvar_table_size = map_size_needed;
+}
+
+/*
+ * efi_mokvar_entry_next() - Get next entry in the EFI MOK config table
+ *
+ * mokvar_entry: Pointer to current EFI MOK config table entry
+ * or null. Null indicates get first entry.
+ * Passed by reference. This is updated to the
+ * same value as the return value.
+ *
+ * Returns: Pointer to next EFI MOK config table entry
+ * or null, if there are no more entries.
+ * Same value is returned in the mokvar_entry
+ * parameter.
+ *
+ * This routine depends on the EFI MOK config table being entirely
+ * mapped with it's starting virtual address in efi_mokvar_table_va.
+ */
+struct efi_mokvar_table_entry *efi_mokvar_entry_next(
+ struct efi_mokvar_table_entry **mokvar_entry)
+{
+ struct efi_mokvar_table_entry *mokvar_cur;
+ struct efi_mokvar_table_entry *mokvar_next;
+ size_t size_cur;
+
+ mokvar_cur = *mokvar_entry;
+ *mokvar_entry = NULL;
+
+ if (efi_mokvar_table_va == NULL)
+ return NULL;
+
+ if (mokvar_cur == NULL) {
+ mokvar_next = efi_mokvar_table_va;
+ } else {
+ if (mokvar_cur->name[0] == '\0')
+ return NULL;
+ size_cur = sizeof(*mokvar_cur) + mokvar_cur->data_size;
+ mokvar_next = (void *)mokvar_cur + size_cur;
+ }
+
+ if (mokvar_next->name[0] == '\0')
+ return NULL;
+
+ *mokvar_entry = mokvar_next;
+ return mokvar_next;
+}
+
+/*
+ * efi_mokvar_entry_find() - Find EFI MOK config entry by name
+ *
+ * name: Name of the entry to look for.
+ *
+ * Returns: Pointer to EFI MOK config table entry if found;
+ * null otherwise.
+ *
+ * This routine depends on the EFI MOK config table being entirely
+ * mapped with it's starting virtual address in efi_mokvar_table_va.
+ */
+struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name)
+{
+ struct efi_mokvar_table_entry *mokvar_entry = NULL;
+
+ while (efi_mokvar_entry_next(&mokvar_entry)) {
+ if (!strncmp(name, mokvar_entry->name,
+ sizeof(mokvar_entry->name)))
+ return mokvar_entry;
+ }
+ return NULL;
+}
+
+/*
+ * efi_mokvar_sysfs_read() - sysfs binary file read routine
+ *
+ * Returns: Count of bytes read.
+ *
+ * Copy EFI MOK config table entry data for this mokvar sysfs binary file
+ * to the supplied buffer, starting at the specified offset into mokvar table
+ * entry data, for the specified count bytes. The copy is limited by the
+ * amount of data in this mokvar config table entry.
+ */
+static ssize_t efi_mokvar_sysfs_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct efi_mokvar_table_entry *mokvar_entry = bin_attr->private;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return 0;
+
+ if (off >= mokvar_entry->data_size)
+ return 0;
+ if (count > mokvar_entry->data_size - off)
+ count = mokvar_entry->data_size - off;
+
+ memcpy(buf, mokvar_entry->data + off, count);
+ return count;
+}
+
+/*
+ * efi_mokvar_sysfs_init() - Map EFI MOK config table and create sysfs
+ *
+ * Map the EFI MOK variable config table for run-time use by the kernel
+ * and create the sysfs entries in /sys/firmware/efi/mok-variables/
+ *
+ * This routine just returns if a valid EFI MOK variable config table
+ * was not found earlier during boot.
+ *
+ * This routine must be called during a "middle" initcall phase, i.e.
+ * after efi_mokvar_table_init() but before UEFI certs are loaded
+ * during late init.
+ *
+ * Implicit inputs:
+ * efi.mokvar_table: Physical address of EFI MOK variable config table
+ * or special value that indicates no such table.
+ *
+ * efi_mokvar_table_size: Computed size of EFI MOK variable config table.
+ * The table is considered present and valid if this
+ * is non-zero.
+ *
+ * Implicit outputs:
+ * efi_mokvar_table_va: Start virtual address of the EFI MOK config table.
+ */
+static int __init efi_mokvar_sysfs_init(void)
+{
+ void *config_va;
+ struct efi_mokvar_table_entry *mokvar_entry = NULL;
+ struct efi_mokvar_sysfs_attr *mokvar_sysfs = NULL;
+ int err = 0;
+
+ if (efi_mokvar_table_size == 0)
+ return -ENOENT;
+
+ config_va = memremap(efi.mokvar_table, efi_mokvar_table_size,
+ MEMREMAP_WB);
+ if (!config_va) {
+ pr_err("Failed to map EFI MOKvar config table\n");
+ return -ENOMEM;
+ }
+ efi_mokvar_table_va = config_va;
+
+ mokvar_kobj = kobject_create_and_add("mok-variables", efi_kobj);
+ if (!mokvar_kobj) {
+ pr_err("Failed to create EFI mok-variables sysfs entry\n");
+ return -ENOMEM;
+ }
+
+ while (efi_mokvar_entry_next(&mokvar_entry)) {
+ mokvar_sysfs = kzalloc(sizeof(*mokvar_sysfs), GFP_KERNEL);
+ if (!mokvar_sysfs) {
+ err = -ENOMEM;
+ break;
+ }
+
+ sysfs_bin_attr_init(&mokvar_sysfs->bin_attr);
+ mokvar_sysfs->bin_attr.private = mokvar_entry;
+ mokvar_sysfs->bin_attr.attr.name = mokvar_entry->name;
+ mokvar_sysfs->bin_attr.attr.mode = 0400;
+ mokvar_sysfs->bin_attr.size = mokvar_entry->data_size;
+ mokvar_sysfs->bin_attr.read = efi_mokvar_sysfs_read;
+
+ err = sysfs_create_bin_file(mokvar_kobj,
+ &mokvar_sysfs->bin_attr);
+ if (err)
+ break;
+
+ list_add_tail(&mokvar_sysfs->node, &efi_mokvar_sysfs_list);
+ }
+
+ if (err) {
+ pr_err("Failed to create some EFI mok-variables sysfs entries\n");
+ kfree(mokvar_sysfs);
+ }
+ return err;
+}
+device_initcall(efi_mokvar_sysfs_init);
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
new file mode 100644
index 000000000000..d28e715d2bcc
--- /dev/null
+++ b/drivers/firmware/efi/riscv-runtime.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extensible Firmware Interface
+ *
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ * Adapted from drivers/firmware/efi/arm-runtime.c
+ *
+ */
+
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/preempt.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/pgtable.h>
+
+#include <asm/cacheflush.h>
+#include <asm/efi.h>
+#include <asm/mmu.h>
+#include <asm/pgalloc.h>
+
+static bool __init efi_virtmap_init(void)
+{
+ efi_memory_desc_t *md;
+
+ efi_mm.pgd = pgd_alloc(&efi_mm);
+ mm_init_cpumask(&efi_mm);
+ init_new_context(NULL, &efi_mm);
+
+ for_each_efi_memory_desc(md) {
+ phys_addr_t phys = md->phys_addr;
+ int ret;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+ return false;
+
+ ret = efi_create_mapping(&efi_mm, md);
+ if (ret) {
+ pr_warn(" EFI remap %pa: failed to create mapping (%d)\n",
+ &phys, ret);
+ return false;
+ }
+ }
+
+ if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
+ return false;
+
+ return true;
+}
+
+/*
+ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
+ * non-early mapping of the UEFI system table and virtual mappings for all
+ * EFI_MEMORY_RUNTIME regions.
+ */
+static int __init riscv_enable_runtime_services(void)
+{
+ u64 mapsize;
+
+ if (!efi_enabled(EFI_BOOT)) {
+ pr_info("EFI services will not be available.\n");
+ return 0;
+ }
+
+ efi_memmap_unmap();
+
+ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+ if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+ pr_err("Failed to remap EFI memory map\n");
+ return 0;
+ }
+
+ if (efi_soft_reserve_enabled()) {
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+ continue;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (WARN_ON(!res))
+ break;
+
+ res->start = md->phys_addr;
+ res->end = md->phys_addr + md_size - 1;
+ res->name = "Soft Reserved";
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_SOFT_RESERVED;
+
+ insert_resource(&iomem_resource, res);
+ }
+ }
+
+ if (efi_runtime_disabled()) {
+ pr_info("EFI runtime services will be disabled.\n");
+ return 0;
+ }
+
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_info("EFI runtime services access via paravirt.\n");
+ return 0;
+ }
+
+ pr_info("Remapping and enabling EFI services.\n");
+
+ if (!efi_virtmap_init()) {
+ pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
+ return -ENOMEM;
+ }
+
+ /* Set up runtime services function pointers */
+ efi_native_runtime_setup();
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+ return 0;
+}
+early_initcall(riscv_enable_runtime_services);
+
+void efi_virtmap_load(void)
+{
+ preempt_disable();
+ switch_mm(current->active_mm, &efi_mm, NULL);
+}
+
+void efi_virtmap_unload(void)
+{
+ switch_mm(&efi_mm, current->active_mm, NULL);
+ preempt_enable();
+}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 973eef234b36..41c1d00bf933 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -32,10 +32,6 @@ static struct efivars *__efivars;
*/
static DEFINE_SEMAPHORE(efivars_lock);
-static bool efivar_wq_enabled = true;
-DECLARE_WORK(efivar_work, NULL);
-EXPORT_SYMBOL_GPL(efivar_work);
-
static bool
validate_device_path(efi_char16_t *var_name, int match, u8 *buffer,
unsigned long len)
@@ -391,13 +387,6 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
size_t i, len8 = len16 / sizeof(efi_char16_t);
char *str8;
- /*
- * Disable the workqueue since the algorithm it uses for
- * detecting new variables won't work with this buggy
- * implementation of GetNextVariableName().
- */
- efivar_wq_enabled = false;
-
str8 = kzalloc(len8, GFP_KERNEL);
if (!str8)
return;
@@ -414,7 +403,6 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
* efivar_init - build the initial list of EFI variables
* @func: callback function to invoke for every variable
* @data: function-specific data to pass to @func
- * @atomic: do we need to execute the @func-loop atomically?
* @duplicates: error if we encounter duplicates on @head?
* @head: initialised head of variable list
*
@@ -1158,16 +1146,6 @@ struct kobject *efivars_kobject(void)
EXPORT_SYMBOL_GPL(efivars_kobject);
/**
- * efivar_run_worker - schedule the efivar worker thread
- */
-void efivar_run_worker(void)
-{
- if (efivar_wq_enabled)
- schedule_work(&efivar_work);
-}
-EXPORT_SYMBOL_GPL(efivar_run_worker);
-
-/**
* efivars_register - register an efivars
* @efivars: efivars to register
* @ops: efivars operations
diff --git a/drivers/firmware/efi/x86_fake_mem.c b/drivers/firmware/efi/x86_fake_mem.c
index e5d6d5a1b240..0bafcc1bb0f6 100644
--- a/drivers/firmware/efi/x86_fake_mem.c
+++ b/drivers/firmware/efi/x86_fake_mem.c
@@ -38,7 +38,7 @@ void __init efi_fake_memmap_early(void)
m_start = mem->range.start;
m_end = mem->range.end;
for_each_efi_memory_desc(md) {
- u64 start, end;
+ u64 start, end, size;
if (md->type != EFI_CONVENTIONAL_MEMORY)
continue;
@@ -58,11 +58,17 @@ void __init efi_fake_memmap_early(void)
*/
start = max(start, m_start);
end = min(end, m_end);
+ size = end - start + 1;
if (end <= start)
continue;
- e820__range_update(start, end - start + 1, E820_TYPE_RAM,
- E820_TYPE_SOFT_RESERVED);
+
+ /*
+ * Ensure each efi_fake_mem instance results in
+ * a unique e820 resource
+ */
+ e820__range_remove(start, size, E820_TYPE_RAM, 1);
+ e820__range_add(start, size, E820_TYPE_SOFT_RESERVED);
e820__update_table(e820_table);
}
}
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index a3a6ca659ffa..97968aece54f 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -15,7 +15,7 @@ config GOOGLE_SMI
help
Say Y here if you want to enable SMI callbacks for Google
platforms. This provides an interface for writing to and
- clearing the event log. If EFI_VARS is also enabled this
+ clearing the event log. If CONFIG_EFI is also enabled this
driver provides an interface for reading and writing NVRAM
variables.
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 5b2011ebbe26..7d9367b22010 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -302,7 +302,7 @@ static int gsmi_exec(u8 func, u8 sub)
return rc;
}
-#ifdef CONFIG_EFI_VARS
+#ifdef CONFIG_EFI
static struct efivars efivars;
@@ -483,7 +483,7 @@ static const struct efivar_operations efivar_ops = {
.get_next_variable = gsmi_get_next_variable,
};
-#endif /* CONFIG_EFI_VARS */
+#endif /* CONFIG_EFI */
static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -1007,7 +1007,7 @@ static __init int gsmi_init(void)
goto out_remove_bin_file;
}
-#ifdef CONFIG_EFI_VARS
+#ifdef CONFIG_EFI
ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj);
if (ret) {
printk(KERN_INFO "gsmi: Failed to register efivars\n");
@@ -1047,7 +1047,7 @@ static void __exit gsmi_exit(void)
unregister_die_notifier(&gsmi_die_notifier);
atomic_notifier_chain_unregister(&panic_notifier_list,
&gsmi_panic_notifier);
-#ifdef CONFIG_EFI_VARS
+#ifdef CONFIG_EFI
efivars_unregister(&efivars);
#endif
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index af3d6d9ead28..946eea292b52 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -46,6 +46,7 @@
#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/firmware/imx/sci.h>
+#include <linux/firmware/imx/svc/rm.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -256,6 +257,9 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
struct imx_sc_pm_domain *sc_pd;
int ret;
+ if (!imx_sc_rm_is_resource_owned(pm_ipc_handle, pd_ranges->rsrc + idx))
+ return NULL;
+
sc_pd = devm_kzalloc(dev, sizeof(*sc_pd), GFP_KERNEL);
if (!sc_pd)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 92013ecc2d9e..00af99b6f97c 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -151,12 +151,15 @@ static u32 psci_get_version(void)
return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
-int psci_set_osi_mode(void)
+int psci_set_osi_mode(bool enable)
{
+ unsigned long suspend_mode;
int err;
- err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE,
- PSCI_1_0_SUSPEND_MODE_OSI, 0, 0);
+ suspend_mode = enable ? PSCI_1_0_SUSPEND_MODE_OSI :
+ PSCI_1_0_SUSPEND_MODE_PC;
+
+ err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0);
return psci_to_linux_errno(err);
}
@@ -546,8 +549,7 @@ static int __init psci_1_0_init(struct device_node *np)
pr_info("OSI mode supported.\n");
/* Default to PC mode. */
- invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE,
- PSCI_1_0_SUSPEND_MODE_PC, 0, 0);
+ psci_set_osi_mode(false);
}
return 0;
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index e8bbf2d38ae7..7be48c1bec96 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -756,6 +756,30 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
+ u32 cp_nonpixel_start,
+ u32 cp_nonpixel_size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_VIDEO_VAR,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ .args[0] = cp_start,
+ .args[1] = cp_size,
+ .args[2] = cp_nonpixel_start,
+ .args[3] = cp_nonpixel_size,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ struct qcom_scm_res res;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ return ret ? : res.result[0];
+}
+EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
+
static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
size_t mem_sz, phys_addr_t src, size_t src_sz,
phys_addr_t dest, size_t dest_sz)
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 38ea614d29fe..95cd1ac30ab0 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -97,6 +97,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02
#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03
#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04
+#define QCOM_SCM_MP_VIDEO_VAR 0x08
#define QCOM_SCM_MP_ASSIGN 0x16
#define QCOM_SCM_SVC_OCMEM 0x0f
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 6945c3c96637..0078260fbabe 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -215,6 +215,9 @@ static void fw_cfg_io_cleanup(void)
# define FW_CFG_CTRL_OFF 0x08
# define FW_CFG_DATA_OFF 0x00
# define FW_CFG_DMA_OFF 0x10
+# elif defined(CONFIG_PARISC) /* parisc */
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x04
# elif (defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC32)) /* ppc/mac,sun4m */
# define FW_CFG_CTRL_OFF 0x00
# define FW_CFG_DATA_OFF 0x02
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 8f2fb4c562da..2371d08bdd17 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -12,8 +12,6 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
#define MBOX_MSG(chan, data28) (((data28) & ~0xf) | ((chan) & 0xf))
@@ -21,8 +19,6 @@
#define MBOX_DATA28(msg) ((msg) & ~0xf)
#define MBOX_CHAN_PROPERTY 8
-#define VL805_PCI_CONFIG_VERSION_OFFSET 0x50
-
static struct platform_device *rpi_hwmon;
static struct platform_device *rpi_clk;
@@ -301,63 +297,6 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
}
EXPORT_SYMBOL_GPL(rpi_firmware_get);
-/*
- * The Raspberry Pi 4 gets its USB functionality from VL805, a PCIe chip that
- * implements xHCI. After a PCI reset, VL805's firmware may either be loaded
- * directly from an EEPROM or, if not present, by the SoC's co-processor,
- * VideoCore. RPi4's VideoCore OS contains both the non public firmware load
- * logic and the VL805 firmware blob. This function triggers the aforementioned
- * process.
- */
-int rpi_firmware_init_vl805(struct pci_dev *pdev)
-{
- struct device_node *fw_np;
- struct rpi_firmware *fw;
- u32 dev_addr, version;
- int ret;
-
- fw_np = of_find_compatible_node(NULL, NULL,
- "raspberrypi,bcm2835-firmware");
- if (!fw_np)
- return 0;
-
- fw = rpi_firmware_get(fw_np);
- of_node_put(fw_np);
- if (!fw)
- return -ENODEV;
-
- /*
- * Make sure we don't trigger a firmware load unnecessarily.
- *
- * If something went wrong with PCI, this whole exercise would be
- * futile as VideoCore expects from us a configured PCI bus. Just take
- * the faulty version (likely ~0) and let xHCI's registration fail
- * further down the line.
- */
- pci_read_config_dword(pdev, VL805_PCI_CONFIG_VERSION_OFFSET, &version);
- if (version)
- goto exit;
-
- dev_addr = pdev->bus->number << 20 | PCI_SLOT(pdev->devfn) << 15 |
- PCI_FUNC(pdev->devfn) << 12;
-
- ret = rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_XHCI_RESET,
- &dev_addr, sizeof(dev_addr));
- if (ret)
- return ret;
-
- /* Wait for vl805 to startup */
- usleep_range(200, 1000);
-
- pci_read_config_dword(pdev, VL805_PCI_CONFIG_VERSION_OFFSET,
- &version);
-exit:
- pci_info(pdev, "VL805 firmware version %08x\n", version);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rpi_firmware_init_vl805);
-
static const struct of_device_id rpi_firmware_of_match[] = {
{ .compatible = "raspberrypi,bcm2835-firmware", },
{},
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index 4e80921ee212..00c88b809c0c 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -24,8 +24,10 @@ enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
return smccc_conduit;
}
+EXPORT_SYMBOL_GPL(arm_smccc_1_1_get_conduit);
u32 arm_smccc_get_version(void)
{
return smccc_version;
}
+EXPORT_SYMBOL_GPL(arm_smccc_get_version);
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 4d93d8925e14..0742a90cb844 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -856,7 +856,8 @@ static const struct tegra_bpmp_soc tegra210_soc = {
static const struct of_device_id tegra_bpmp_match[] = {
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 722af9ee53d6..896f53ec7857 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -1106,7 +1106,8 @@ static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id)
{
- return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+ return ti_sci_set_clock_state(handle, dev_id, clk_id,
+ MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
MSG_CLOCK_SW_STATE_UNREQ);
}
@@ -1125,7 +1126,8 @@ static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id)
{
- return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+ return ti_sci_set_clock_state(handle, dev_id, clk_id,
+ MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
MSG_CLOCK_SW_STATE_AUTO);
}
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 8d1ff2454e2e..efb8a66efc68 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -147,6 +147,9 @@ static int zynqmp_pm_feature(u32 api_id)
return 0;
/* Return value if feature is already checked */
+ if (api_id > ARRAY_SIZE(zynqmp_pm_features))
+ return PM_FEATURE_INVALID;
+
if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
return zynqmp_pm_features[api_id];
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
index 6ce1ed222ea4..531266287eee 100644
--- a/drivers/fpga/dfl-fme-perf.c
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -148,7 +148,7 @@ struct fme_perf_priv {
struct device *dev;
void __iomem *ioaddr;
struct pmu pmu;
- u64 id;
+ u16 id;
u32 fab_users;
u32 fab_port_id;
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index e220bec2927d..a2203d03c9e2 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -31,12 +31,12 @@ struct cci_drvdata {
struct dfl_fpga_cdev *cdev; /* container device */
};
-static void __iomem *cci_pci_ioremap_bar(struct pci_dev *pcidev, int bar)
+static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
{
- if (pcim_iomap_regions(pcidev, BIT(bar), DRV_NAME))
+ if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
return NULL;
- return pcim_iomap_table(pcidev)[bar];
+ return pcim_iomap_table(pcidev)[0];
}
static int cci_pci_alloc_irq(struct pci_dev *pcidev)
@@ -156,8 +156,8 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
goto irq_free_exit;
}
- /* start to find Device Feature List from Bar 0 */
- base = cci_pci_ioremap_bar(pcidev, 0);
+ /* start to find Device Feature List in Bar 0 */
+ base = cci_pci_ioremap_bar0(pcidev);
if (!base) {
ret = -ENOMEM;
goto irq_free_exit;
@@ -172,7 +172,7 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
start = pci_resource_start(pcidev, 0);
len = pci_resource_len(pcidev, 0);
- dfl_fpga_enum_info_add_dfl(info, start, len, base);
+ dfl_fpga_enum_info_add_dfl(info, start, len);
/*
* find more Device Feature Lists (e.g. Ports) per information
@@ -196,26 +196,24 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
*/
bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
- base = cci_pci_ioremap_bar(pcidev, bar);
- if (!base)
- continue;
-
start = pci_resource_start(pcidev, bar) + offset;
len = pci_resource_len(pcidev, bar) - offset;
- dfl_fpga_enum_info_add_dfl(info, start, len,
- base + offset);
+ dfl_fpga_enum_info_add_dfl(info, start, len);
}
} else if (dfl_feature_is_port(base)) {
start = pci_resource_start(pcidev, 0);
len = pci_resource_len(pcidev, 0);
- dfl_fpga_enum_info_add_dfl(info, start, len, base);
+ dfl_fpga_enum_info_add_dfl(info, start, len);
} else {
ret = -ENODEV;
goto irq_free_exit;
}
+ /* release I/O mappings for next step enumeration */
+ pcim_iounmap_regions(pcidev, BIT(0));
+
/* start enumeration with prepared enumeration information */
cdev = dfl_fpga_feature_devs_enumerate(info);
if (IS_ERR(cdev)) {
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 649958a36e62..b450870b75ed 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -30,12 +30,6 @@ static DEFINE_MUTEX(dfl_id_mutex);
* index to dfl_chardevs table. If no chardev support just set devt_type
* as one invalid index (DFL_FPGA_DEVT_MAX).
*/
-enum dfl_id_type {
- FME_ID, /* fme id allocation and mapping */
- PORT_ID, /* port id allocation and mapping */
- DFL_ID_MAX,
-};
-
enum dfl_fpga_devt_type {
DFL_FPGA_DEVT_FME,
DFL_FPGA_DEVT_PORT,
@@ -58,7 +52,7 @@ static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
*/
struct dfl_dev_info {
const char *name;
- u32 dfh_id;
+ u16 dfh_id;
struct idr id;
enum dfl_fpga_devt_type devt_type;
};
@@ -134,7 +128,7 @@ static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
return DFL_ID_MAX;
}
-static enum dfl_id_type dfh_id_to_type(u32 id)
+static enum dfl_id_type dfh_id_to_type(u16 id)
{
int i;
@@ -250,6 +244,249 @@ int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
}
EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
+static DEFINE_IDA(dfl_device_ida);
+
+static const struct dfl_device_id *
+dfl_match_one_device(const struct dfl_device_id *id, struct dfl_device *ddev)
+{
+ if (id->type == ddev->type && id->feature_id == ddev->feature_id)
+ return id;
+
+ return NULL;
+}
+
+static int dfl_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct dfl_device *ddev = to_dfl_dev(dev);
+ struct dfl_driver *ddrv = to_dfl_drv(drv);
+ const struct dfl_device_id *id_entry;
+
+ id_entry = ddrv->id_table;
+ if (id_entry) {
+ while (id_entry->feature_id) {
+ if (dfl_match_one_device(id_entry, ddev)) {
+ ddev->id_entry = id_entry;
+ return 1;
+ }
+ id_entry++;
+ }
+ }
+
+ return 0;
+}
+
+static int dfl_bus_probe(struct device *dev)
+{
+ struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
+ struct dfl_device *ddev = to_dfl_dev(dev);
+
+ return ddrv->probe(ddev);
+}
+
+static int dfl_bus_remove(struct device *dev)
+{
+ struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
+ struct dfl_device *ddev = to_dfl_dev(dev);
+
+ if (ddrv->remove)
+ ddrv->remove(ddev);
+
+ return 0;
+}
+
+static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct dfl_device *ddev = to_dfl_dev(dev);
+
+ /* The type has 4 valid bits and feature_id has 12 valid bits */
+ return add_uevent_var(env, "MODALIAS=dfl:t%01Xf%03X",
+ ddev->type, ddev->feature_id);
+}
+
+static ssize_t
+type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_device *ddev = to_dfl_dev(dev);
+
+ return sprintf(buf, "0x%x\n", ddev->type);
+}
+static DEVICE_ATTR_RO(type);
+
+static ssize_t
+feature_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dfl_device *ddev = to_dfl_dev(dev);
+
+ return sprintf(buf, "0x%x\n", ddev->feature_id);
+}
+static DEVICE_ATTR_RO(feature_id);
+
+static struct attribute *dfl_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_feature_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(dfl_dev);
+
+static struct bus_type dfl_bus_type = {
+ .name = "dfl",
+ .match = dfl_bus_match,
+ .probe = dfl_bus_probe,
+ .remove = dfl_bus_remove,
+ .uevent = dfl_bus_uevent,
+ .dev_groups = dfl_dev_groups,
+};
+
+static void release_dfl_dev(struct device *dev)
+{
+ struct dfl_device *ddev = to_dfl_dev(dev);
+
+ if (ddev->mmio_res.parent)
+ release_resource(&ddev->mmio_res);
+
+ ida_simple_remove(&dfl_device_ida, ddev->id);
+ kfree(ddev->irqs);
+ kfree(ddev);
+}
+
+static struct dfl_device *
+dfl_dev_add(struct dfl_feature_platform_data *pdata,
+ struct dfl_feature *feature)
+{
+ struct platform_device *pdev = pdata->dev;
+ struct resource *parent_res;
+ struct dfl_device *ddev;
+ int id, i, ret;
+
+ ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
+ if (!ddev)
+ return ERR_PTR(-ENOMEM);
+
+ id = ida_simple_get(&dfl_device_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(&pdev->dev, "unable to get id\n");
+ kfree(ddev);
+ return ERR_PTR(id);
+ }
+
+ /* freeing resources by put_device() after device_initialize() */
+ device_initialize(&ddev->dev);
+ ddev->dev.parent = &pdev->dev;
+ ddev->dev.bus = &dfl_bus_type;
+ ddev->dev.release = release_dfl_dev;
+ ddev->id = id;
+ ret = dev_set_name(&ddev->dev, "dfl_dev.%d", id);
+ if (ret)
+ goto put_dev;
+
+ ddev->type = feature_dev_id_type(pdev);
+ ddev->feature_id = feature->id;
+ ddev->cdev = pdata->dfl_cdev;
+
+ /* add mmio resource */
+ parent_res = &pdev->resource[feature->resource_index];
+ ddev->mmio_res.flags = IORESOURCE_MEM;
+ ddev->mmio_res.start = parent_res->start;
+ ddev->mmio_res.end = parent_res->end;
+ ddev->mmio_res.name = dev_name(&ddev->dev);
+ ret = insert_resource(parent_res, &ddev->mmio_res);
+ if (ret) {
+ dev_err(&pdev->dev, "%s failed to claim resource: %pR\n",
+ dev_name(&ddev->dev), &ddev->mmio_res);
+ goto put_dev;
+ }
+
+ /* then add irq resource */
+ if (feature->nr_irqs) {
+ ddev->irqs = kcalloc(feature->nr_irqs,
+ sizeof(*ddev->irqs), GFP_KERNEL);
+ if (!ddev->irqs) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
+ for (i = 0; i < feature->nr_irqs; i++)
+ ddev->irqs[i] = feature->irq_ctx[i].irq;
+
+ ddev->num_irqs = feature->nr_irqs;
+ }
+
+ ret = device_add(&ddev->dev);
+ if (ret)
+ goto put_dev;
+
+ dev_dbg(&pdev->dev, "add dfl_dev: %s\n", dev_name(&ddev->dev));
+ return ddev;
+
+put_dev:
+ /* calls release_dfl_dev() which does the clean up */
+ put_device(&ddev->dev);
+ return ERR_PTR(ret);
+}
+
+static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_feature *feature;
+
+ dfl_fpga_dev_for_each_feature(pdata, feature) {
+ if (feature->ddev) {
+ device_unregister(&feature->ddev->dev);
+ feature->ddev = NULL;
+ }
+ }
+}
+
+static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
+{
+ struct dfl_feature *feature;
+ struct dfl_device *ddev;
+ int ret;
+
+ dfl_fpga_dev_for_each_feature(pdata, feature) {
+ if (feature->ioaddr)
+ continue;
+
+ if (feature->ddev) {
+ ret = -EEXIST;
+ goto err;
+ }
+
+ ddev = dfl_dev_add(pdata, feature);
+ if (IS_ERR(ddev)) {
+ ret = PTR_ERR(ddev);
+ goto err;
+ }
+
+ feature->ddev = ddev;
+ }
+
+ return 0;
+
+err:
+ dfl_devs_remove(pdata);
+ return ret;
+}
+
+int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner)
+{
+ if (!dfl_drv || !dfl_drv->probe || !dfl_drv->id_table)
+ return -EINVAL;
+
+ dfl_drv->drv.owner = owner;
+ dfl_drv->drv.bus = &dfl_bus_type;
+
+ return driver_register(&dfl_drv->drv);
+}
+EXPORT_SYMBOL(__dfl_driver_register);
+
+void dfl_driver_unregister(struct dfl_driver *dfl_drv)
+{
+ driver_unregister(&dfl_drv->drv);
+}
+EXPORT_SYMBOL(dfl_driver_unregister);
+
+#define is_header_feature(feature) ((feature)->id == FEATURE_ID_FIU_HEADER)
+
/**
* dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
* @pdev: feature device.
@@ -259,12 +496,15 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct dfl_feature *feature;
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_devs_remove(pdata);
+
+ dfl_fpga_dev_for_each_feature(pdata, feature) {
if (feature->ops) {
if (feature->ops->uinit)
feature->ops->uinit(pdev, feature);
feature->ops = NULL;
}
+ }
}
EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
@@ -273,8 +513,22 @@ static int dfl_feature_instance_init(struct platform_device *pdev,
struct dfl_feature *feature,
struct dfl_feature_driver *drv)
{
+ void __iomem *base;
int ret = 0;
+ if (!is_header_feature(feature)) {
+ base = devm_platform_ioremap_resource(pdev,
+ feature->resource_index);
+ if (IS_ERR(base)) {
+ dev_err(&pdev->dev,
+ "ioremap failed for feature 0x%x!\n",
+ feature->id);
+ return PTR_ERR(base);
+ }
+
+ feature->ioaddr = base;
+ }
+
if (drv->ops->init) {
ret = drv->ops->init(pdev, feature);
if (ret)
@@ -331,6 +585,10 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,
drv++;
}
+ ret = dfl_devs_add(pdata);
+ if (ret)
+ goto exit;
+
return 0;
exit:
dfl_fpga_dev_feature_uinit(pdev);
@@ -427,7 +685,9 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
* @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
* this device.
* @feature_dev: current feature device.
- * @ioaddr: header register region address of feature device in enumeration.
+ * @ioaddr: header register region address of current FIU in enumeration.
+ * @start: register resource start of current FIU.
+ * @len: max register resource length of current FIU.
* @sub_features: a sub features linked list for feature device in enumeration.
* @feature_num: number of sub features for feature device in enumeration.
*/
@@ -439,6 +699,8 @@ struct build_feature_devs_info {
struct platform_device *feature_dev;
void __iomem *ioaddr;
+ resource_size_t start;
+ resource_size_t len;
struct list_head sub_features;
int feature_num;
};
@@ -454,7 +716,7 @@ struct build_feature_devs_info {
* @nr_irqs: number of irqs of this sub feature.
*/
struct dfl_feature_info {
- u64 fid;
+ u16 fid;
struct resource mmio_res;
void __iomem *ioaddr;
struct list_head node;
@@ -484,10 +746,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
struct dfl_feature_platform_data *pdata;
struct dfl_feature_info *finfo, *p;
enum dfl_id_type type;
- int ret, index = 0;
-
- if (!fdev)
- return 0;
+ int ret, index = 0, res_idx = 0;
type = feature_dev_id_type(fdev);
if (WARN_ON_ONCE(type >= DFL_ID_MAX))
@@ -530,16 +789,32 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
/* fill features and resource information for feature dev */
list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- struct dfl_feature *feature = &pdata->features[index];
+ struct dfl_feature *feature = &pdata->features[index++];
struct dfl_feature_irq_ctx *ctx;
unsigned int i;
/* save resource information for each feature */
feature->dev = fdev;
feature->id = finfo->fid;
- feature->resource_index = index;
- feature->ioaddr = finfo->ioaddr;
- fdev->resource[index++] = finfo->mmio_res;
+
+ /*
+ * the FIU header feature has some fundamental functions (sriov
+ * set, port enable/disable) needed for the dfl bus device and
+ * other sub features. So its mmio resource should be mapped by
+ * DFL bus device. And we should not assign it to feature
+ * devices (dfl-fme/afu) again.
+ */
+ if (is_header_feature(feature)) {
+ feature->resource_index = -1;
+ feature->ioaddr =
+ devm_ioremap_resource(binfo->dev,
+ &finfo->mmio_res);
+ if (IS_ERR(feature->ioaddr))
+ return PTR_ERR(feature->ioaddr);
+ } else {
+ feature->resource_index = res_idx;
+ fdev->resource[res_idx++] = finfo->mmio_res;
+ }
if (finfo->nr_irqs) {
ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
@@ -582,19 +857,13 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
static int
build_info_create_dev(struct build_feature_devs_info *binfo,
- enum dfl_id_type type, void __iomem *ioaddr)
+ enum dfl_id_type type)
{
struct platform_device *fdev;
- int ret;
if (type >= DFL_ID_MAX)
return -EINVAL;
- /* we will create a new device, commit current device first */
- ret = build_info_commit_dev(binfo);
- if (ret)
- return ret;
-
/*
* we use -ENODEV as the initialization indicator which indicates
* whether the id need to be reclaimed
@@ -605,7 +874,7 @@ build_info_create_dev(struct build_feature_devs_info *binfo,
binfo->feature_dev = fdev;
binfo->feature_num = 0;
- binfo->ioaddr = ioaddr;
+
INIT_LIST_HEAD(&binfo->sub_features);
fdev->id = dfl_id_alloc(type, &fdev->dev);
@@ -649,7 +918,7 @@ static inline u32 feature_size(void __iomem *start)
return ofst ? ofst : 4096;
}
-static u64 feature_id(void __iomem *start)
+static u16 feature_id(void __iomem *start)
{
u64 v = readq(start + DFH);
u16 id = FIELD_GET(DFH_ID, v);
@@ -667,7 +936,7 @@ static u64 feature_id(void __iomem *start)
}
static int parse_feature_irqs(struct build_feature_devs_info *binfo,
- resource_size_t ofst, u64 fid,
+ resource_size_t ofst, u16 fid,
unsigned int *irq_base, unsigned int *nr_irqs)
{
void __iomem *base = binfo->ioaddr + ofst;
@@ -713,12 +982,12 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
return 0;
}
- dev_dbg(binfo->dev, "feature: 0x%llx, irq_base: %u, nr_irqs: %u\n",
+ dev_dbg(binfo->dev, "feature: 0x%x, irq_base: %u, nr_irqs: %u\n",
fid, ibase, inr);
if (ibase + inr > binfo->nr_irqs) {
dev_err(binfo->dev,
- "Invalid interrupt number in feature 0x%llx\n", fid);
+ "Invalid interrupt number in feature 0x%x\n", fid);
return -EINVAL;
}
@@ -726,7 +995,7 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
virq = binfo->irq_table[ibase + i];
if (virq < 0 || virq > NR_IRQS) {
dev_err(binfo->dev,
- "Invalid irq table entry for feature 0x%llx\n",
+ "Invalid irq table entry for feature 0x%x\n",
fid);
return -EINVAL;
}
@@ -747,18 +1016,17 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
*/
static int
create_feature_instance(struct build_feature_devs_info *binfo,
- struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
- resource_size_t size, u64 fid)
+ resource_size_t ofst, resource_size_t size, u16 fid)
{
unsigned int irq_base, nr_irqs;
struct dfl_feature_info *finfo;
int ret;
/* read feature size and id if inputs are invalid */
- size = size ? size : feature_size(dfl->ioaddr + ofst);
- fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
+ size = size ? size : feature_size(binfo->ioaddr + ofst);
+ fid = fid ? fid : feature_id(binfo->ioaddr + ofst);
- if (dfl->len - ofst < size)
+ if (binfo->len - ofst < size)
return -EINVAL;
ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs);
@@ -770,12 +1038,11 @@ create_feature_instance(struct build_feature_devs_info *binfo,
return -ENOMEM;
finfo->fid = fid;
- finfo->mmio_res.start = dfl->start + ofst;
+ finfo->mmio_res.start = binfo->start + ofst;
finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
finfo->mmio_res.flags = IORESOURCE_MEM;
finfo->irq_base = irq_base;
finfo->nr_irqs = nr_irqs;
- finfo->ioaddr = dfl->ioaddr + ofst;
list_add_tail(&finfo->node, &binfo->sub_features);
binfo->feature_num++;
@@ -784,7 +1051,6 @@ create_feature_instance(struct build_feature_devs_info *binfo,
}
static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
- struct dfl_fpga_enum_dfl *dfl,
resource_size_t ofst)
{
u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
@@ -792,21 +1058,22 @@ static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
WARN_ON(!size);
- return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
+ return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
}
+#define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
+
static int parse_feature_afu(struct build_feature_devs_info *binfo,
- struct dfl_fpga_enum_dfl *dfl,
resource_size_t ofst)
{
- if (!binfo->feature_dev) {
+ if (!is_feature_dev_detected(binfo)) {
dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
return -EINVAL;
}
switch (feature_dev_id_type(binfo->feature_dev)) {
case PORT_ID:
- return parse_feature_port_afu(binfo, dfl, ofst);
+ return parse_feature_port_afu(binfo, ofst);
default:
dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
binfo->feature_dev->name);
@@ -815,35 +1082,79 @@ static int parse_feature_afu(struct build_feature_devs_info *binfo,
return 0;
}
+static int build_info_prepare(struct build_feature_devs_info *binfo,
+ resource_size_t start, resource_size_t len)
+{
+ struct device *dev = binfo->dev;
+ void __iomem *ioaddr;
+
+ if (!devm_request_mem_region(dev, start, len, dev_name(dev))) {
+ dev_err(dev, "request region fail, start:%pa, len:%pa\n",
+ &start, &len);
+ return -EBUSY;
+ }
+
+ ioaddr = devm_ioremap(dev, start, len);
+ if (!ioaddr) {
+ dev_err(dev, "ioremap region fail, start:%pa, len:%pa\n",
+ &start, &len);
+ return -ENOMEM;
+ }
+
+ binfo->start = start;
+ binfo->len = len;
+ binfo->ioaddr = ioaddr;
+
+ return 0;
+}
+
+static void build_info_complete(struct build_feature_devs_info *binfo)
+{
+ devm_iounmap(binfo->dev, binfo->ioaddr);
+ devm_release_mem_region(binfo->dev, binfo->start, binfo->len);
+}
+
static int parse_feature_fiu(struct build_feature_devs_info *binfo,
- struct dfl_fpga_enum_dfl *dfl,
resource_size_t ofst)
{
- u32 id, offset;
- u64 v;
int ret = 0;
+ u32 offset;
+ u16 id;
+ u64 v;
+
+ if (is_feature_dev_detected(binfo)) {
+ build_info_complete(binfo);
+
+ ret = build_info_commit_dev(binfo);
+ if (ret)
+ return ret;
- v = readq(dfl->ioaddr + ofst + DFH);
+ ret = build_info_prepare(binfo, binfo->start + ofst,
+ binfo->len - ofst);
+ if (ret)
+ return ret;
+ }
+
+ v = readq(binfo->ioaddr + DFH);
id = FIELD_GET(DFH_ID, v);
/* create platform device for dfl feature dev */
- ret = build_info_create_dev(binfo, dfh_id_to_type(id),
- dfl->ioaddr + ofst);
+ ret = build_info_create_dev(binfo, dfh_id_to_type(id));
if (ret)
return ret;
- ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
+ ret = create_feature_instance(binfo, 0, 0, 0);
if (ret)
return ret;
/*
* find and parse FIU's child AFU via its NEXT_AFU register.
* please note that only Port has valid NEXT_AFU pointer per spec.
*/
- v = readq(dfl->ioaddr + ofst + NEXT_AFU);
+ v = readq(binfo->ioaddr + NEXT_AFU);
offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
if (offset)
- return parse_feature_afu(binfo, dfl, ofst + offset);
+ return parse_feature_afu(binfo, offset);
dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
@@ -851,41 +1162,39 @@ static int parse_feature_fiu(struct build_feature_devs_info *binfo,
}
static int parse_feature_private(struct build_feature_devs_info *binfo,
- struct dfl_fpga_enum_dfl *dfl,
resource_size_t ofst)
{
- if (!binfo->feature_dev) {
- dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
- (unsigned long long)feature_id(dfl->ioaddr + ofst));
+ if (!is_feature_dev_detected(binfo)) {
+ dev_err(binfo->dev, "the private feature 0x%x does not belong to any AFU.\n",
+ feature_id(binfo->ioaddr + ofst));
return -EINVAL;
}
- return create_feature_instance(binfo, dfl, ofst, 0, 0);
+ return create_feature_instance(binfo, ofst, 0, 0);
}
/**
* parse_feature - parse a feature on given device feature list
*
* @binfo: build feature devices information.
- * @dfl: device feature list to parse
- * @ofst: offset to feature header on this device feature list
+ * @ofst: offset to current FIU header
*/
static int parse_feature(struct build_feature_devs_info *binfo,
- struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
+ resource_size_t ofst)
{
u64 v;
u32 type;
- v = readq(dfl->ioaddr + ofst + DFH);
+ v = readq(binfo->ioaddr + ofst + DFH);
type = FIELD_GET(DFH_TYPE, v);
switch (type) {
case DFH_TYPE_AFU:
- return parse_feature_afu(binfo, dfl, ofst);
+ return parse_feature_afu(binfo, ofst);
case DFH_TYPE_PRIVATE:
- return parse_feature_private(binfo, dfl, ofst);
+ return parse_feature_private(binfo, ofst);
case DFH_TYPE_FIU:
- return parse_feature_fiu(binfo, dfl, ofst);
+ return parse_feature_fiu(binfo, ofst);
default:
dev_info(binfo->dev,
"Feature Type %x is not supported.\n", type);
@@ -895,14 +1204,17 @@ static int parse_feature(struct build_feature_devs_info *binfo,
}
static int parse_feature_list(struct build_feature_devs_info *binfo,
- struct dfl_fpga_enum_dfl *dfl)
+ resource_size_t start, resource_size_t len)
{
- void __iomem *start = dfl->ioaddr;
- void __iomem *end = dfl->ioaddr + dfl->len;
+ resource_size_t end = start + len;
int ret = 0;
u32 ofst = 0;
u64 v;
+ ret = build_info_prepare(binfo, start, len);
+ if (ret)
+ return ret;
+
/* walk through the device feature list via DFH's next DFH pointer. */
for (; start < end; start += ofst) {
if (end - start < DFH_SIZE) {
@@ -910,11 +1222,11 @@ static int parse_feature_list(struct build_feature_devs_info *binfo,
return -EINVAL;
}
- ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
+ ret = parse_feature(binfo, start - binfo->start);
if (ret)
return ret;
- v = readq(start + DFH);
+ v = readq(binfo->ioaddr + start - binfo->start + DFH);
ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
/* stop parsing if EOL(End of List) is set or offset is 0 */
@@ -923,7 +1235,12 @@ static int parse_feature_list(struct build_feature_devs_info *binfo,
}
/* commit current feature device when reach the end of list */
- return build_info_commit_dev(binfo);
+ build_info_complete(binfo);
+
+ if (is_feature_dev_detected(binfo))
+ ret = build_info_commit_dev(binfo);
+
+ return ret;
}
struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
@@ -976,7 +1293,6 @@ EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
* @info: ptr to dfl_fpga_enum_info
* @start: mmio resource address of the device feature list.
* @len: mmio resource length of the device feature list.
- * @ioaddr: mapped mmio resource address of the device feature list.
*
* One FPGA device may have one or more Device Feature Lists (DFLs), use this
* function to add information of each DFL to common data structure for next
@@ -985,8 +1301,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
* Return: 0 on success, negative error code otherwise.
*/
int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
- resource_size_t start, resource_size_t len,
- void __iomem *ioaddr)
+ resource_size_t start, resource_size_t len)
{
struct dfl_fpga_enum_dfl *dfl;
@@ -996,7 +1311,6 @@ int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
dfl->start = start;
dfl->len = len;
- dfl->ioaddr = ioaddr;
list_add_tail(&dfl->node, &info->dfls);
@@ -1119,7 +1433,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
* Lists.
*/
list_for_each_entry(dfl, &info->dfls, node) {
- ret = parse_feature_list(binfo, dfl);
+ ret = parse_feature_list(binfo, dfl->start, dfl->len);
if (ret) {
remove_feature_devs(cdev);
build_info_free(binfo);
@@ -1212,11 +1526,17 @@ static int __init dfl_fpga_init(void)
{
int ret;
+ ret = bus_register(&dfl_bus_type);
+ if (ret)
+ return ret;
+
dfl_ids_init();
ret = dfl_chardev_init();
- if (ret)
+ if (ret) {
dfl_ids_destroy();
+ bus_unregister(&dfl_bus_type);
+ }
return ret;
}
@@ -1424,7 +1744,7 @@ static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx,
return 0;
feature->irq_ctx[idx].name =
- kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%llx)", idx,
+ kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%x)", idx,
dev_name(&pdev->dev), feature->id);
if (!feature->irq_ctx[idx].name)
return -ENOMEM;
@@ -1554,6 +1874,7 @@ static void __exit dfl_fpga_exit(void)
{
dfl_chardev_uinit();
dfl_ids_destroy();
+ bus_unregister(&dfl_bus_type);
}
module_init(dfl_fpga_init);
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index a32dfba2a88b..5dc758f655b7 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -197,7 +197,7 @@ int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
* @id: unique dfl private feature id.
*/
struct dfl_feature_id {
- u64 id;
+ u16 id;
};
/**
@@ -236,16 +236,18 @@ struct dfl_feature_irq_ctx {
* @irq_ctx: interrupt context list.
* @nr_irqs: number of interrupt contexts.
* @ops: ops of this sub feature.
+ * @ddev: ptr to the dfl device of this sub feature.
* @priv: priv data of this feature.
*/
struct dfl_feature {
struct platform_device *dev;
- u64 id;
+ u16 id;
int resource_index;
void __iomem *ioaddr;
struct dfl_feature_irq_ctx *irq_ctx;
unsigned int nr_irqs;
const struct dfl_feature_ops *ops;
+ struct dfl_device *ddev;
void *priv;
};
@@ -365,7 +367,7 @@ struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)
(feature) < (pdata)->features + (pdata)->num; (feature)++)
static inline
-struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u64 id)
+struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u16 id)
{
struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
struct dfl_feature *feature;
@@ -378,7 +380,7 @@ struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u64 id)
}
static inline
-void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u64 id)
+void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
{
struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
@@ -389,7 +391,7 @@ void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u64 id)
return NULL;
}
-static inline bool is_dfl_feature_present(struct device *dev, u64 id)
+static inline bool is_dfl_feature_present(struct device *dev, u16 id)
{
return !!dfl_get_feature_ioaddr_by_id(dev, id);
}
@@ -441,22 +443,17 @@ struct dfl_fpga_enum_info {
*
* @start: base address of this device feature list.
* @len: size of this device feature list.
- * @ioaddr: mapped base address of this device feature list.
* @node: node in list of device feature lists.
*/
struct dfl_fpga_enum_dfl {
resource_size_t start;
resource_size_t len;
-
- void __iomem *ioaddr;
-
struct list_head node;
};
struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev);
int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
- resource_size_t start, resource_size_t len,
- void __iomem *ioaddr);
+ resource_size_t start, resource_size_t len);
int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
unsigned int nr_irqs, int *irq_table);
void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
@@ -519,4 +516,88 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned long arg);
+/**
+ * enum dfl_id_type - define the DFL FIU types
+ */
+enum dfl_id_type {
+ FME_ID,
+ PORT_ID,
+ DFL_ID_MAX,
+};
+
+/**
+ * struct dfl_device_id - dfl device identifier
+ * @type: contains 4 bits DFL FIU type of the device. See enum dfl_id_type.
+ * @feature_id: contains 12 bits feature identifier local to its DFL FIU type.
+ * @driver_data: driver specific data.
+ */
+struct dfl_device_id {
+ u8 type;
+ u16 feature_id;
+ unsigned long driver_data;
+};
+
+/**
+ * struct dfl_device - represent an dfl device on dfl bus
+ *
+ * @dev: generic device interface.
+ * @id: id of the dfl device.
+ * @type: type of DFL FIU of the device. See enum dfl_id_type.
+ * @feature_id: 16 bits feature identifier local to its DFL FIU type.
+ * @mmio_res: mmio resource of this dfl device.
+ * @irqs: list of Linux IRQ numbers of this dfl device.
+ * @num_irqs: number of IRQs supported by this dfl device.
+ * @cdev: pointer to DFL FPGA container device this dfl device belongs to.
+ * @id_entry: matched id entry in dfl driver's id table.
+ */
+struct dfl_device {
+ struct device dev;
+ int id;
+ u8 type;
+ u16 feature_id;
+ struct resource mmio_res;
+ int *irqs;
+ unsigned int num_irqs;
+ struct dfl_fpga_cdev *cdev;
+ const struct dfl_device_id *id_entry;
+};
+
+/**
+ * struct dfl_driver - represent an dfl device driver
+ *
+ * @drv: driver model structure.
+ * @id_table: pointer to table of device IDs the driver is interested in.
+ * { } member terminated.
+ * @probe: mandatory callback for device binding.
+ * @remove: callback for device unbinding.
+ */
+struct dfl_driver {
+ struct device_driver drv;
+ const struct dfl_device_id *id_table;
+
+ int (*probe)(struct dfl_device *dfl_dev);
+ void (*remove)(struct dfl_device *dfl_dev);
+};
+
+#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
+#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE.
+ */
+#define dfl_driver_register(drv) \
+ __dfl_driver_register(drv, THIS_MODULE)
+int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner);
+void dfl_driver_unregister(struct dfl_driver *dfl_drv);
+
+/*
+ * module_dfl_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit().
+ */
+#define module_dfl_driver(__dfl_driver) \
+ module_driver(__dfl_driver, dfl_driver_register, \
+ dfl_driver_unregister)
+
#endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index bde5a9d460c5..c3134b89c3fe 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * FPGA Region - Device Tree support for FPGA programming under Linux
+ * FPGA Region - Support for FPGA programming under Linux
*
* Copyright (C) 2013-2016 Altera Corporation
* Copyright (C) 2017 Intel Corporation
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 44b7c569d4dc..657a70c5fc99 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -196,17 +196,13 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
if (ret < 0)
goto init_done;
- ret = wait_for_completion_interruptible_timeout(
+ ret = wait_for_completion_timeout(
&priv->status_return_completion, S10_RECONFIG_TIMEOUT);
if (!ret) {
dev_err(dev, "timeout waiting for RECONFIG_REQUEST\n");
ret = -ETIMEDOUT;
goto init_done;
}
- if (ret < 0) {
- dev_err(dev, "error (%d) waiting for RECONFIG_REQUEST\n", ret);
- goto init_done;
- }
ret = 0;
if (!test_and_clear_bit(SVC_STATUS_OK, &priv->status)) {
@@ -318,7 +314,7 @@ static int s10_ops_write(struct fpga_manager *mgr, const char *buf,
*/
wait_status = 1; /* not timed out */
if (!priv->status)
- wait_status = wait_for_completion_interruptible_timeout(
+ wait_status = wait_for_completion_timeout(
&priv->status_return_completion,
S10_BUFFER_TIMEOUT);
@@ -340,13 +336,6 @@ static int s10_ops_write(struct fpga_manager *mgr, const char *buf,
ret = -ETIMEDOUT;
break;
}
- if (wait_status < 0) {
- ret = wait_status;
- dev_err(dev,
- "error (%d) waiting for svc layer buffers\n",
- ret);
- break;
- }
}
if (!s10_free_buffers(mgr))
@@ -372,7 +361,7 @@ static int s10_ops_write_complete(struct fpga_manager *mgr,
if (ret < 0)
break;
- ret = wait_for_completion_interruptible_timeout(
+ ret = wait_for_completion_timeout(
&priv->status_return_completion, timeout);
if (!ret) {
dev_err(dev,
@@ -380,12 +369,6 @@ static int s10_ops_write_complete(struct fpga_manager *mgr,
ret = -ETIMEDOUT;
break;
}
- if (ret < 0) {
- dev_err(dev,
- "error (%d) waiting for RECONFIG_COMPLETED\n",
- ret);
- break;
- }
/* Not error or timeout, so ret is # of jiffies until timeout */
timeout = ret;
ret = 0;
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index 2967aa2a74e2..824abbbd631e 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -27,11 +27,22 @@ struct xilinx_spi_conf {
struct gpio_desc *done;
};
-static enum fpga_mgr_states xilinx_spi_state(struct fpga_manager *mgr)
+static int get_done_gpio(struct fpga_manager *mgr)
{
struct xilinx_spi_conf *conf = mgr->priv;
+ int ret;
+
+ ret = gpiod_get_value(conf->done);
+
+ if (ret < 0)
+ dev_err(&mgr->dev, "Error reading DONE (%d)\n", ret);
+
+ return ret;
+}
- if (!gpiod_get_value(conf->done))
+static enum fpga_mgr_states xilinx_spi_state(struct fpga_manager *mgr)
+{
+ if (!get_done_gpio(mgr))
return FPGA_MGR_STATE_RESET;
return FPGA_MGR_STATE_UNKNOWN;
@@ -57,11 +68,21 @@ static int wait_for_init_b(struct fpga_manager *mgr, int value,
if (conf->init_b) {
while (time_before(jiffies, timeout)) {
- /* dump_state(conf, "wait for init_d .."); */
- if (gpiod_get_value(conf->init_b) == value)
+ int ret = gpiod_get_value(conf->init_b);
+
+ if (ret == value)
return 0;
+
+ if (ret < 0) {
+ dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret);
+ return ret;
+ }
+
usleep_range(100, 400);
}
+
+ dev_err(&mgr->dev, "Timeout waiting for INIT_B to %s\n",
+ value ? "assert" : "deassert");
return -ETIMEDOUT;
}
@@ -78,7 +99,7 @@ static int xilinx_spi_write_init(struct fpga_manager *mgr,
int err;
if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
- dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+ dev_err(&mgr->dev, "Partial reconfiguration not supported\n");
return -EINVAL;
}
@@ -86,7 +107,6 @@ static int xilinx_spi_write_init(struct fpga_manager *mgr,
err = wait_for_init_b(mgr, 1, 1); /* min is 500 ns */
if (err) {
- dev_err(&mgr->dev, "INIT_B pin did not go low\n");
gpiod_set_value(conf->prog_b, 0);
return err;
}
@@ -94,12 +114,10 @@ static int xilinx_spi_write_init(struct fpga_manager *mgr,
gpiod_set_value(conf->prog_b, 0);
err = wait_for_init_b(mgr, 0, 0);
- if (err) {
- dev_err(&mgr->dev, "INIT_B pin did not go high\n");
+ if (err)
return err;
- }
- if (gpiod_get_value(conf->done)) {
+ if (get_done_gpio(mgr)) {
dev_err(&mgr->dev, "Unexpected DONE pin state...\n");
return -EIO;
}
@@ -152,25 +170,46 @@ static int xilinx_spi_write_complete(struct fpga_manager *mgr,
struct fpga_image_info *info)
{
struct xilinx_spi_conf *conf = mgr->priv;
- unsigned long timeout;
+ unsigned long timeout = jiffies + usecs_to_jiffies(info->config_complete_timeout_us);
+ bool expired = false;
+ int done;
int ret;
- if (gpiod_get_value(conf->done))
- return xilinx_spi_apply_cclk_cycles(conf);
+ /*
+ * This loop is carefully written such that if the driver is
+ * scheduled out for more than 'timeout', we still check for DONE
+ * before giving up and we apply 8 extra CCLK cycles in all cases.
+ */
+ while (!expired) {
+ expired = time_after(jiffies, timeout);
- timeout = jiffies + usecs_to_jiffies(info->config_complete_timeout_us);
-
- while (time_before(jiffies, timeout)) {
+ done = get_done_gpio(mgr);
+ if (done < 0)
+ return done;
ret = xilinx_spi_apply_cclk_cycles(conf);
if (ret)
return ret;
- if (gpiod_get_value(conf->done))
- return xilinx_spi_apply_cclk_cycles(conf);
+ if (done)
+ return 0;
+ }
+
+ if (conf->init_b) {
+ ret = gpiod_get_value(conf->init_b);
+
+ if (ret < 0) {
+ dev_err(&mgr->dev, "Error reading INIT_B (%d)\n", ret);
+ return ret;
+ }
+
+ dev_err(&mgr->dev,
+ ret ? "CRC error or invalid device\n"
+ : "Missing sync word or incomplete bitstream\n");
+ } else {
+ dev_err(&mgr->dev, "Timeout after config data transfer\n");
}
- dev_err(&mgr->dev, "Timeout after config data transfer.\n");
return -ETIMEDOUT;
}
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 8244da8a7241..4e60e84cd17a 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -50,6 +50,7 @@ static const int engine_page_size = 0x400;
#define FSI_SMODE 0x0 /* R/W: Mode register */
#define FSI_SISC 0x8 /* R/W: Interrupt condition */
#define FSI_SSTAT 0x14 /* R : Slave status */
+#define FSI_SLBUS 0x30 /* W : LBUS Ownership */
#define FSI_LLMODE 0x100 /* R/W: Link layer mode register */
/*
@@ -67,6 +68,11 @@ static const int engine_page_size = 0x400;
#define FSI_SMODE_LBCRR_MASK 0xf /* Clk ratio mask */
/*
+ * SLBUS fields
+ */
+#define FSI_SLBUS_FORCE 0x80000000 /* Force LBUS ownership */
+
+/*
* LLMODE fields
*/
#define FSI_LLMODE_ASYNC 0x1
@@ -981,7 +987,7 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
uint32_t cfam_id;
struct fsi_slave *slave;
uint8_t crc;
- __be32 data, llmode;
+ __be32 data, llmode, slbus;
int rc;
/* Currently, we only support single slaves on a link, and use the
@@ -1052,6 +1058,14 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
}
+ slbus = cpu_to_be32(FSI_SLBUS_FORCE);
+ rc = fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SLBUS,
+ &slbus, sizeof(slbus));
+ if (rc)
+ dev_warn(&master->dev,
+ "can't set slbus on slave:%02x:%02x %d\n", link, id,
+ rc);
+
rc = fsi_slave_set_smode(slave);
if (rc) {
dev_warn(&master->dev,
@@ -1154,10 +1168,18 @@ static int fsi_master_write(struct fsi_master *master, int link,
return rc;
}
+static int fsi_master_link_disable(struct fsi_master *master, int link)
+{
+ if (master->link_enable)
+ return master->link_enable(master, link, false);
+
+ return 0;
+}
+
static int fsi_master_link_enable(struct fsi_master *master, int link)
{
if (master->link_enable)
- return master->link_enable(master, link);
+ return master->link_enable(master, link, true);
return 0;
}
@@ -1192,12 +1214,15 @@ static int fsi_master_scan(struct fsi_master *master)
}
rc = fsi_master_break(master, link);
if (rc) {
+ fsi_master_link_disable(master, link);
dev_dbg(&master->dev,
"break to link %d failed: %d\n", link, rc);
continue;
}
- fsi_slave_init(master, link, 0);
+ rc = fsi_slave_init(master, link, 0);
+ if (rc)
+ fsi_master_link_disable(master, link);
}
return 0;
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
index f49742b310c2..c006ec008a1a 100644
--- a/drivers/fsi/fsi-master-aspeed.c
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/iopoll.h>
+#include <linux/gpio/consumer.h>
#include "fsi-master.h"
@@ -21,6 +22,7 @@ struct fsi_master_aspeed {
struct device *dev;
void __iomem *base;
struct clk *clk;
+ struct gpio_desc *cfam_reset_gpio;
};
#define to_fsi_master_aspeed(m) \
@@ -82,7 +84,12 @@ static const u32 fsi_base = 0xa0000000;
#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
-#define DEFAULT_DIVISOR 14
+/* Run the bus at maximum speed by default */
+#define FSI_DIVISOR_DEFAULT 1
+#define FSI_DIVISOR_CABLED 2
+static u16 aspeed_fsi_divisor = FSI_DIVISOR_DEFAULT;
+module_param_named(bus_div,aspeed_fsi_divisor, ushort, 0);
+
#define OPB_POLL_TIMEOUT 10000
static int __opb_write(struct fsi_master_aspeed *aspeed, u32 addr,
@@ -241,9 +248,10 @@ static int aspeed_master_read(struct fsi_master *master, int link,
struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
int ret;
- if (id != 0)
+ if (id > 0x3)
return -EINVAL;
+ addr |= id << 21;
addr += link * FSI_HUB_LINK_SIZE;
switch (size) {
@@ -273,9 +281,10 @@ static int aspeed_master_write(struct fsi_master *master, int link,
struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
int ret;
- if (id != 0)
+ if (id > 0x3)
return -EINVAL;
+ addr |= id << 21;
addr += link * FSI_HUB_LINK_SIZE;
switch (size) {
@@ -299,32 +308,28 @@ static int aspeed_master_write(struct fsi_master *master, int link,
return 0;
}
-static int aspeed_master_link_enable(struct fsi_master *master, int link)
+static int aspeed_master_link_enable(struct fsi_master *master, int link,
+ bool enable)
{
struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
int idx, bit, ret;
- __be32 reg, result;
+ __be32 reg;
idx = link / 32;
bit = link % 32;
reg = cpu_to_be32(0x80000000 >> bit);
+ if (!enable)
+ return opb_writel(aspeed, ctrl_base + FSI_MCENP0 + (4 * idx),
+ reg);
+
ret = opb_writel(aspeed, ctrl_base + FSI_MSENP0 + (4 * idx), reg);
if (ret)
return ret;
mdelay(FSI_LINK_ENABLE_SETUP_TIME);
- ret = opb_readl(aspeed, ctrl_base + FSI_MENP0 + (4 * idx), &result);
- if (ret)
- return ret;
-
- if (result != reg) {
- dev_err(aspeed->dev, "%s failed: %08x\n", __func__, result);
- return -EIO;
- }
-
return 0;
}
@@ -386,9 +391,11 @@ static int aspeed_master_init(struct fsi_master_aspeed *aspeed)
opb_writel(aspeed, ctrl_base + FSI_MECTRL, reg);
reg = cpu_to_be32(FSI_MMODE_ECRC | FSI_MMODE_EPC | FSI_MMODE_RELA
- | fsi_mmode_crs0(DEFAULT_DIVISOR)
- | fsi_mmode_crs1(DEFAULT_DIVISOR)
+ | fsi_mmode_crs0(aspeed_fsi_divisor)
+ | fsi_mmode_crs1(aspeed_fsi_divisor)
| FSI_MMODE_P8_TO_LSB);
+ dev_info(aspeed->dev, "mmode set to %08x (divisor %d)\n",
+ be32_to_cpu(reg), aspeed_fsi_divisor);
opb_writel(aspeed, ctrl_base + FSI_MMODE, reg);
reg = cpu_to_be32(0xffff0000);
@@ -419,6 +426,90 @@ static int aspeed_master_init(struct fsi_master_aspeed *aspeed)
return 0;
}
+static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsi_master_aspeed *aspeed = dev_get_drvdata(dev);
+
+ gpiod_set_value(aspeed->cfam_reset_gpio, 1);
+ usleep_range(900, 1000);
+ gpiod_set_value(aspeed->cfam_reset_gpio, 0);
+
+ return count;
+}
+
+static DEVICE_ATTR(cfam_reset, 0200, NULL, cfam_reset_store);
+
+static int setup_cfam_reset(struct fsi_master_aspeed *aspeed)
+{
+ struct device *dev = aspeed->dev;
+ struct gpio_desc *gpio;
+ int rc;
+
+ gpio = devm_gpiod_get_optional(dev, "cfam-reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+ if (!gpio)
+ return 0;
+
+ aspeed->cfam_reset_gpio = gpio;
+
+ rc = device_create_file(dev, &dev_attr_cfam_reset);
+ if (rc) {
+ devm_gpiod_put(dev, gpio);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int tacoma_cabled_fsi_fixup(struct device *dev)
+{
+ struct gpio_desc *routing_gpio, *mux_gpio;
+ int gpio;
+
+ /*
+ * The routing GPIO is a jumper indicating we should mux for the
+ * externally connected FSI cable.
+ */
+ routing_gpio = devm_gpiod_get_optional(dev, "fsi-routing",
+ GPIOD_IN | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ if (IS_ERR(routing_gpio))
+ return PTR_ERR(routing_gpio);
+ if (!routing_gpio)
+ return 0;
+
+ mux_gpio = devm_gpiod_get_optional(dev, "fsi-mux", GPIOD_ASIS);
+ if (IS_ERR(mux_gpio))
+ return PTR_ERR(mux_gpio);
+ if (!mux_gpio)
+ return 0;
+
+ gpio = gpiod_get_value(routing_gpio);
+ if (gpio < 0)
+ return gpio;
+
+ /* If the routing GPIO is high we should set the mux to low. */
+ if (gpio) {
+ /*
+ * Cable signal integrity means we should run the bus
+ * slightly slower. Do not override if a kernel param
+ * has already overridden.
+ */
+ if (aspeed_fsi_divisor == FSI_DIVISOR_DEFAULT)
+ aspeed_fsi_divisor = FSI_DIVISOR_CABLED;
+
+ gpiod_direction_output(mux_gpio, 0);
+ dev_info(dev, "FSI configured for external cable\n");
+ } else {
+ gpiod_direction_output(mux_gpio, 1);
+ }
+
+ devm_gpiod_put(dev, routing_gpio);
+
+ return 0;
+}
+
static int fsi_master_aspeed_probe(struct platform_device *pdev)
{
struct fsi_master_aspeed *aspeed;
@@ -426,6 +517,12 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
int rc, links, reg;
__be32 raw;
+ rc = tacoma_cabled_fsi_fixup(&pdev->dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Tacoma FSI cable fixup failed\n");
+ return rc;
+ }
+
aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL);
if (!aspeed)
return -ENOMEM;
@@ -448,6 +545,11 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev)
return rc;
}
+ rc = setup_cfam_reset(aspeed);
+ if (rc) {
+ dev_err(&pdev->dev, "CFAM reset GPIO setup failed\n");
+ }
+
writel(0x1, aspeed->base + OPB_CLK_SYNC);
writel(OPB1_XFER_ACK_EN | OPB0_XFER_ACK_EN,
aspeed->base + OPB_IRQ_MASK);
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
index 04d10ea8d343..57a779a89b07 100644
--- a/drivers/fsi/fsi-master-ast-cf.c
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -838,7 +838,7 @@ static int load_copro_firmware(struct fsi_master_acf *master)
rc = request_firmware(&fw, FW_FILE_NAME, master->dev);
if (rc) {
dev_err(
- master->dev, "Error %d to load firwmare '%s' !\n",
+ master->dev, "Error %d to load firmware '%s' !\n",
rc, FW_FILE_NAME);
return rc;
}
@@ -1039,7 +1039,8 @@ static void fsi_master_acf_setup_external(struct fsi_master_acf *master)
gpiod_direction_input(master->gpio_data);
}
-static int fsi_master_acf_link_enable(struct fsi_master *_master, int link)
+static int fsi_master_acf_link_enable(struct fsi_master *_master, int link,
+ bool enable)
{
struct fsi_master_acf *master = to_fsi_master_acf(_master);
int rc = -EBUSY;
@@ -1049,7 +1050,7 @@ static int fsi_master_acf_link_enable(struct fsi_master *_master, int link)
mutex_lock(&master->lock);
if (!master->external_mode) {
- gpiod_set_value(master->gpio_enable, 1);
+ gpiod_set_value(master->gpio_enable, enable ? 1 : 0);
rc = 0;
}
mutex_unlock(&master->lock);
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
index 4dcce17f243f..aa97c4a250cb 100644
--- a/drivers/fsi/fsi-master-gpio.c
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -678,7 +678,8 @@ static void fsi_master_gpio_init_external(struct fsi_master_gpio *master)
gpiod_direction_input(master->gpio_data);
}
-static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link)
+static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link,
+ bool enable)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
int rc = -EBUSY;
@@ -688,7 +689,7 @@ static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link)
mutex_lock(&master->cmd_lock);
if (!master->external_mode) {
- gpiod_set_value(master->gpio_enable, 1);
+ gpiod_set_value(master->gpio_enable, enable ? 1 : 0);
rc = 0;
}
mutex_unlock(&master->cmd_lock);
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
index def35cf92571..01f0a796111e 100644
--- a/drivers/fsi/fsi-master-hub.c
+++ b/drivers/fsi/fsi-master-hub.c
@@ -77,7 +77,8 @@ static int hub_master_break(struct fsi_master *master, int link)
return hub_master_write(master, link, 0, addr, &cmd, sizeof(cmd));
}
-static int hub_master_link_enable(struct fsi_master *master, int link)
+static int hub_master_link_enable(struct fsi_master *master, int link,
+ bool enable)
{
struct fsi_master_hub *hub = to_fsi_master_hub(master);
int idx, bit;
@@ -89,13 +90,17 @@ static int hub_master_link_enable(struct fsi_master *master, int link)
reg = cpu_to_be32(0x80000000 >> bit);
+ if (!enable)
+ return fsi_device_write(hub->upstream, FSI_MCENP0 + (4 * idx),
+ &reg, 4);
+
rc = fsi_device_write(hub->upstream, FSI_MSENP0 + (4 * idx), &reg, 4);
+ if (rc)
+ return rc;
mdelay(FSI_LINK_ENABLE_SETUP_TIME);
- fsi_device_read(hub->upstream, FSI_MENP0 + (4 * idx), &reg, 4);
-
- return rc;
+ return 0;
}
static void hub_master_release(struct device *dev)
@@ -271,7 +276,7 @@ static int hub_master_remove(struct device *dev)
return 0;
}
-static struct fsi_device_id hub_master_ids[] = {
+static const struct fsi_device_id hub_master_ids[] = {
{
.engine_type = FSI_ENGID_HUB_MASTER,
.version = FSI_VERSION_ANY,
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index 6e8d4d4d5149..cd6bee5e12a7 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -130,7 +130,8 @@ struct fsi_master {
uint32_t addr, const void *val, size_t size);
int (*term)(struct fsi_master *, int link, uint8_t id);
int (*send_break)(struct fsi_master *, int link);
- int (*link_enable)(struct fsi_master *, int link);
+ int (*link_enable)(struct fsi_master *, int link,
+ bool enable);
int (*link_config)(struct fsi_master *, int link,
u8 t_send_delay, u8 t_echo_delay);
};
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index 7da9c81759ac..9eeb856c8905 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -555,7 +555,7 @@ static int occ_probe(struct platform_device *pdev)
hwmon_dev_info.id = occ->idx;
hwmon_dev = platform_device_register_full(&hwmon_dev_info);
- if (!hwmon_dev)
+ if (IS_ERR(hwmon_dev))
dev_warn(dev, "failed to create hwmon device\n");
return 0;
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
index f54df9ebc8b3..bfd5e5da8020 100644
--- a/drivers/fsi/fsi-sbefifo.c
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -1028,7 +1028,7 @@ static int sbefifo_remove(struct device *dev)
return 0;
}
-static struct fsi_device_id sbefifo_ids[] = {
+static const struct fsi_device_id sbefifo_ids[] = {
{
.engine_type = FSI_ENGID_SBE,
.version = FSI_VERSION_ANY,
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index 004dc03ccf09..b45bfab7b7f5 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -627,7 +627,7 @@ static int scom_remove(struct device *dev)
return 0;
}
-static struct fsi_device_id scom_ids[] = {
+static const struct fsi_device_id scom_ids[] = {
{
.engine_type = FSI_ENGID_SCOM,
.version = FSI_VERSION_ANY,
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8030fd91a3cc..5d4de5cd6759 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -66,8 +66,33 @@ config GPIO_SYSFS
This ABI is deprecated. If you want to use GPIO from userspace,
use the character device /dev/gpiochipN with the appropriate
- ioctl() operations instead. The character device is always
- available.
+ ioctl() operations instead.
+
+config GPIO_CDEV
+ bool
+ prompt "Character device (/dev/gpiochipN) support" if EXPERT
+ default y
+ help
+ Say Y here to add the character device /dev/gpiochipN interface
+ for GPIOs. The character device allows userspace to control GPIOs
+ using ioctl() operations.
+
+ Only say N if you are sure that the GPIO character device is not
+ required.
+
+ If unsure, say Y.
+
+config GPIO_CDEV_V1
+ bool "Support GPIO ABI Version 1"
+ default y
+ depends on GPIO_CDEV
+ help
+ Say Y here to support version 1 of the GPIO CDEV ABI.
+
+ This ABI version is deprecated.
+ Please use the latest ABI for new developments.
+
+ If unsure, say Y.
config GPIO_GENERIC
depends on HAS_IOMEM # Only for IOMEM drivers
@@ -202,7 +227,7 @@ config GPIO_DAVINCI
config GPIO_DWAPB
tristate "Synopsys DesignWare APB GPIO driver"
select GPIO_GENERIC
- select GENERIC_IRQ_CHIP
+ select GPIOLIB_IRQCHIP
help
Say Y or M here to build support for the Synopsys DesignWare APB
GPIO block.
@@ -397,7 +422,7 @@ config GPIO_MVEBU
select REGMAP_MMIO
config GPIO_MXC
- def_bool y
+ tristate "i.MX GPIO support"
depends on ARCH_MXC || COMPILE_TEST
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
@@ -1223,6 +1248,18 @@ config GPIO_RC5T583
This driver provides the support for driving/reading the gpio pins
of RC5T583 device through standard gpio library.
+config GPIO_SL28CPLD
+ tristate "Kontron sl28cpld GPIO support"
+ depends on MFD_SL28CPLD || COMPILE_TEST
+ select GPIO_REGMAP
+ select GPIOLIB_IRQCHIP
+ select REGMAP_IRQ
+ help
+ This enables support for the GPIOs found on the Kontron sl28 CPLD.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-sl28cpld.
+
config GPIO_STMPE
bool "STMPE GPIOs"
depends on MFD_STMPE
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 4f9abff4f2dc..09dada80ac34 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -6,9 +6,8 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIOLIB) += gpiolib-devres.o
obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
-obj-$(CONFIG_GPIOLIB) += gpiolib-devprop.o
-obj-$(CONFIG_GPIOLIB) += gpiolib-cdev.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
+obj-$(CONFIG_GPIO_CDEV) += gpiolib-cdev.o
obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
@@ -132,6 +131,7 @@ obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o
obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
+obj-$(CONFIG_GPIO_SL28CPLD) += gpio-sl28cpld.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
obj-$(CONFIG_GPIO_SPEAR_SPICS) += gpio-spear-spics.o
obj-$(CONFIG_GPIO_SPRD) += gpio-sprd.o
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 424a3d25350b..dfd8a4876a27 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -333,20 +333,14 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
return gpiod_get_value(fwd->descs[offset]);
}
-static int gpio_fwd_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
unsigned long *bits)
{
- struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
- unsigned long *values, flags = 0;
struct gpio_desc **descs;
+ unsigned long *values;
unsigned int i, j = 0;
int error;
- if (chip->can_sleep)
- mutex_lock(&fwd->mlock);
- else
- spin_lock_irqsave(&fwd->slock, flags);
-
/* Both values bitmap and desc pointers are stored in tmp[] */
values = &fwd->tmp[0];
descs = (void *)&fwd->tmp[BITS_TO_LONGS(fwd->chip.ngpio)];
@@ -356,16 +350,32 @@ static int gpio_fwd_get_multiple(struct gpio_chip *chip, unsigned long *mask,
descs[j++] = fwd->descs[i];
error = gpiod_get_array_value(j, descs, NULL, values);
- if (!error) {
- j = 0;
- for_each_set_bit(i, mask, fwd->chip.ngpio)
- __assign_bit(i, bits, test_bit(j++, values));
- }
+ if (error)
+ return error;
- if (chip->can_sleep)
+ j = 0;
+ for_each_set_bit(i, mask, fwd->chip.ngpio)
+ __assign_bit(i, bits, test_bit(j++, values));
+
+ return 0;
+}
+
+static int gpio_fwd_get_multiple_locked(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ unsigned long flags;
+ int error;
+
+ if (chip->can_sleep) {
+ mutex_lock(&fwd->mlock);
+ error = gpio_fwd_get_multiple(fwd, mask, bits);
mutex_unlock(&fwd->mlock);
- else
+ } else {
+ spin_lock_irqsave(&fwd->slock, flags);
+ error = gpio_fwd_get_multiple(fwd, mask, bits);
spin_unlock_irqrestore(&fwd->slock, flags);
+ }
return error;
}
@@ -377,19 +387,13 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
gpiod_set_value(fwd->descs[offset], value);
}
-static void gpio_fwd_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
unsigned long *bits)
{
- struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
- unsigned long *values, flags = 0;
struct gpio_desc **descs;
+ unsigned long *values;
unsigned int i, j = 0;
- if (chip->can_sleep)
- mutex_lock(&fwd->mlock);
- else
- spin_lock_irqsave(&fwd->slock, flags);
-
/* Both values bitmap and desc pointers are stored in tmp[] */
values = &fwd->tmp[0];
descs = (void *)&fwd->tmp[BITS_TO_LONGS(fwd->chip.ngpio)];
@@ -400,11 +404,23 @@ static void gpio_fwd_set_multiple(struct gpio_chip *chip, unsigned long *mask,
}
gpiod_set_array_value(j, descs, NULL, values);
+}
- if (chip->can_sleep)
+static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ unsigned long flags;
+
+ if (chip->can_sleep) {
+ mutex_lock(&fwd->mlock);
+ gpio_fwd_set_multiple(fwd, mask, bits);
mutex_unlock(&fwd->mlock);
- else
+ } else {
+ spin_lock_irqsave(&fwd->slock, flags);
+ gpio_fwd_set_multiple(fwd, mask, bits);
spin_unlock_irqrestore(&fwd->slock, flags);
+ }
}
static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -470,9 +486,9 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
chip->direction_input = gpio_fwd_direction_input;
chip->direction_output = gpio_fwd_direction_output;
chip->get = gpio_fwd_get;
- chip->get_multiple = gpio_fwd_get_multiple;
+ chip->get_multiple = gpio_fwd_get_multiple_locked;
chip->set = gpio_fwd_set;
- chip->set_multiple = gpio_fwd_set_multiple;
+ chip->set_multiple = gpio_fwd_set_multiple_locked;
chip->base = -1;
chip->ngpio = ngpios;
fwd->descs = descs;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index e44d5de2a120..b966f5e28ebf 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1114,6 +1114,7 @@ static const struct aspeed_gpio_config ast2500_config =
static const struct aspeed_bank_props ast2600_bank_props[] = {
/* input output */
+ {4, 0xffffffff, 0x00ffffff}, /* Q/R/S/T */
{5, 0xffffffff, 0xffffff00}, /* U/V/W/X */
{6, 0x0000ffff, 0x0000ffff}, /* Y/Z */
{ },
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index cf3687a7925f..1e6b427f2c4a 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -590,10 +590,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
dev_err(dev, "Couldn't determine # GPIO banks\n");
return -ENOENT;
} else if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Couldn't determine GPIO banks: (%pe)\n",
- ERR_PTR(ret));
- return ret;
+ return dev_err_probe(dev, ret, "Couldn't determine GPIO banks\n");
}
kona_gpio->num_bank = ret;
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 085b874db2a9..6f2138503726 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -237,12 +237,8 @@ static int davinci_gpio_probe(struct platform_device *pdev)
for (i = 0; i < nirq; i++) {
chips->irqs[i] = platform_get_irq(pdev, i);
- if (chips->irqs[i] < 0) {
- if (chips->irqs[i] != -EPROBE_DEFER)
- dev_info(dev, "IRQ not populated, err = %d\n",
- chips->irqs[i]);
- return chips->irqs[i];
- }
+ if (chips->irqs[i] < 0)
+ return dev_err_probe(dev, chips->irqs[i], "IRQ not populated\n");
}
chips->chip.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 1d8d55bd63aa..2a9046c0fb16 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -13,7 +13,6 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -83,22 +82,29 @@ struct dwapb_context {
};
#endif
+struct dwapb_gpio_port_irqchip {
+ struct irq_chip irqchip;
+ unsigned int nr_irqs;
+ unsigned int irq[DWAPB_MAX_GPIOS];
+};
+
struct dwapb_gpio_port {
struct gpio_chip gc;
- bool is_registered;
+ struct dwapb_gpio_port_irqchip *pirq;
struct dwapb_gpio *gpio;
#ifdef CONFIG_PM_SLEEP
struct dwapb_context *ctx;
#endif
unsigned int idx;
};
+#define to_dwapb_gpio(_gc) \
+ (container_of(_gc, struct dwapb_gpio_port, gc)->gpio)
struct dwapb_gpio {
struct device *dev;
void __iomem *regs;
struct dwapb_gpio_port *ports;
unsigned int nr_ports;
- struct irq_domain *domain;
unsigned int flags;
struct reset_control *rst;
struct clk_bulk_data clks[DWAPB_NR_CLOCKS];
@@ -147,14 +153,6 @@ static inline void dwapb_write(struct dwapb_gpio *gpio, unsigned int offset,
gc->write_reg(reg_base + gpio_reg_convert(gpio, offset), val);
}
-static int dwapb_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
-{
- struct dwapb_gpio_port *port = gpiochip_get_data(gc);
- struct dwapb_gpio *gpio = port->gpio;
-
- return irq_find_mapping(gpio->domain, offset);
-}
-
static struct dwapb_gpio_port *dwapb_offs_to_port(struct dwapb_gpio *gpio, unsigned int offs)
{
struct dwapb_gpio_port *port;
@@ -162,7 +160,7 @@ static struct dwapb_gpio_port *dwapb_offs_to_port(struct dwapb_gpio *gpio, unsig
for (i = 0; i < gpio->nr_ports; i++) {
port = &gpio->ports[i];
- if (port->idx == offs / 32)
+ if (port->idx == offs / DWAPB_MAX_GPIOS)
return port;
}
@@ -182,7 +180,7 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
pol = dwapb_read(gpio, GPIO_INT_POLARITY);
/* Just read the current value right out of the data register */
- val = gc->get(gc, offs % 32);
+ val = gc->get(gc, offs % DWAPB_MAX_GPIOS);
if (val)
pol &= ~BIT(offs);
else
@@ -193,12 +191,13 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
{
+ struct gpio_chip *gc = &gpio->ports[0].gc;
unsigned long irq_status;
irq_hw_number_t hwirq;
irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
- for_each_set_bit(hwirq, &irq_status, 32) {
- int gpio_irq = irq_find_mapping(gpio->domain, hwirq);
+ for_each_set_bit(hwirq, &irq_status, DWAPB_MAX_GPIOS) {
+ int gpio_irq = irq_find_mapping(gc->irq.domain, hwirq);
u32 irq_type = irq_get_trigger_type(gpio_irq);
generic_handle_irq(gpio_irq);
@@ -220,11 +219,53 @@ static void dwapb_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id)
+{
+ return IRQ_RETVAL(dwapb_do_irq(dev_id));
+}
+
+static void dwapb_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
+ u32 val = BIT(irqd_to_hwirq(d));
+ unsigned long flags;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+ dwapb_write(gpio, GPIO_PORTA_EOI, val);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static void dwapb_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+ val = dwapb_read(gpio, GPIO_INTMASK) | BIT(irqd_to_hwirq(d));
+ dwapb_write(gpio, GPIO_INTMASK, val);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static void dwapb_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+ val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(irqd_to_hwirq(d));
+ dwapb_write(gpio, GPIO_INTMASK, val);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
static void dwapb_irq_enable(struct irq_data *d)
{
- struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
- struct dwapb_gpio *gpio = igc->private;
- struct gpio_chip *gc = &gpio->ports[0].gc;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
unsigned long flags;
u32 val;
@@ -237,9 +278,8 @@ static void dwapb_irq_enable(struct irq_data *d)
static void dwapb_irq_disable(struct irq_data *d)
{
- struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
- struct dwapb_gpio *gpio = igc->private;
- struct gpio_chip *gc = &gpio->ports[0].gc;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
unsigned long flags;
u32 val;
@@ -252,9 +292,8 @@ static void dwapb_irq_disable(struct irq_data *d)
static int dwapb_irq_set_type(struct irq_data *d, u32 type)
{
- struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
- struct dwapb_gpio *gpio = igc->private;
- struct gpio_chip *gc = &gpio->ports[0].gc;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t bit = irqd_to_hwirq(d);
unsigned long level, polarity, flags;
@@ -288,7 +327,10 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
break;
}
- irq_setup_alt_chip(d, type);
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+ else if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
dwapb_write(gpio, GPIO_INTTYPE_LEVEL, level);
if (type != IRQ_TYPE_EDGE_BOTH)
@@ -301,8 +343,8 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
#ifdef CONFIG_PM_SLEEP
static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
{
- struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
- struct dwapb_gpio *gpio = igc->private;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
struct dwapb_context *ctx = gpio->ports[0].ctx;
irq_hw_number_t bit = irqd_to_hwirq(d);
@@ -349,84 +391,67 @@ static int dwapb_gpio_set_config(struct gpio_chip *gc, unsigned offset,
return dwapb_gpio_set_debounce(gc, offset, debounce);
}
-static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id)
+static int dwapb_convert_irqs(struct dwapb_gpio_port_irqchip *pirq,
+ struct dwapb_port_property *pp)
{
- return IRQ_RETVAL(dwapb_do_irq(dev_id));
+ int i;
+
+ /* Group all available IRQs into an array of parental IRQs. */
+ for (i = 0; i < pp->ngpio; ++i) {
+ if (!pp->irq[i])
+ continue;
+
+ pirq->irq[pirq->nr_irqs++] = pp->irq[i];
+ }
+
+ return pirq->nr_irqs ? 0 : -ENOENT;
}
static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
struct dwapb_gpio_port *port,
struct dwapb_port_property *pp)
{
+ struct dwapb_gpio_port_irqchip *pirq;
struct gpio_chip *gc = &port->gc;
- struct fwnode_handle *fwnode = pp->fwnode;
- struct irq_chip_generic *irq_gc = NULL;
- unsigned int ngpio = gc->ngpio;
- struct irq_chip_type *ct;
- irq_hw_number_t hwirq;
- int err, i;
-
- if (memchr_inv(pp->irq, 0, sizeof(pp->irq)) == NULL) {
- dev_warn(gpio->dev, "no IRQ for port%d\n", pp->idx);
- return;
- }
+ struct gpio_irq_chip *girq;
+ int err;
- gpio->domain = irq_domain_create_linear(fwnode, ngpio,
- &irq_generic_chip_ops, gpio);
- if (!gpio->domain)
+ pirq = devm_kzalloc(gpio->dev, sizeof(*pirq), GFP_KERNEL);
+ if (!pirq)
return;
- err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2,
- DWAPB_DRIVER_NAME, handle_bad_irq,
- IRQ_NOREQUEST, 0,
- IRQ_GC_INIT_NESTED_LOCK);
- if (err) {
- dev_info(gpio->dev, "irq_alloc_domain_generic_chips failed\n");
- irq_domain_remove(gpio->domain);
- gpio->domain = NULL;
- return;
- }
-
- irq_gc = irq_get_domain_generic_chip(gpio->domain, 0);
- if (!irq_gc) {
- irq_domain_remove(gpio->domain);
- gpio->domain = NULL;
- return;
+ if (dwapb_convert_irqs(pirq, pp)) {
+ dev_warn(gpio->dev, "no IRQ for port%d\n", pp->idx);
+ goto err_kfree_pirq;
}
- irq_gc->reg_base = gpio->regs;
- irq_gc->private = gpio;
-
- for (i = 0; i < 2; i++) {
- ct = &irq_gc->chip_types[i];
- ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_mask = irq_gc_mask_set_bit;
- ct->chip.irq_unmask = irq_gc_mask_clr_bit;
- ct->chip.irq_set_type = dwapb_irq_set_type;
- ct->chip.irq_enable = dwapb_irq_enable;
- ct->chip.irq_disable = dwapb_irq_disable;
+ girq = &gc->irq;
+ girq->handler = handle_bad_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+
+ port->pirq = pirq;
+ pirq->irqchip.name = DWAPB_DRIVER_NAME;
+ pirq->irqchip.irq_ack = dwapb_irq_ack;
+ pirq->irqchip.irq_mask = dwapb_irq_mask;
+ pirq->irqchip.irq_unmask = dwapb_irq_unmask;
+ pirq->irqchip.irq_set_type = dwapb_irq_set_type;
+ pirq->irqchip.irq_enable = dwapb_irq_enable;
+ pirq->irqchip.irq_disable = dwapb_irq_disable;
#ifdef CONFIG_PM_SLEEP
- ct->chip.irq_set_wake = dwapb_irq_set_wake;
+ pirq->irqchip.irq_set_wake = dwapb_irq_set_wake;
#endif
- ct->regs.ack = gpio_reg_convert(gpio, GPIO_PORTA_EOI);
- ct->regs.mask = gpio_reg_convert(gpio, GPIO_INTMASK);
- ct->type = IRQ_TYPE_LEVEL_MASK;
- }
-
- irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
- irq_gc->chip_types[0].handler = handle_level_irq;
- irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
- irq_gc->chip_types[1].handler = handle_edge_irq;
if (!pp->irq_shared) {
- int i;
-
- for (i = 0; i < pp->ngpio; i++) {
- if (pp->irq[i])
- irq_set_chained_handler_and_data(pp->irq[i],
- dwapb_irq_handler, gpio);
- }
+ girq->num_parents = pirq->nr_irqs;
+ girq->parents = pirq->irq;
+ girq->parent_handler_data = gpio;
+ girq->parent_handler = dwapb_irq_handler;
} else {
+ /* This will let us handle the parent IRQ in the driver */
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->parent_handler = NULL;
+
/*
* Request a shared IRQ since where MFD would have devices
* using the same irq pin
@@ -436,33 +461,16 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
IRQF_SHARED, DWAPB_DRIVER_NAME, gpio);
if (err) {
dev_err(gpio->dev, "error requesting IRQ\n");
- irq_domain_remove(gpio->domain);
- gpio->domain = NULL;
- return;
+ goto err_kfree_pirq;
}
}
- for (hwirq = 0; hwirq < ngpio; hwirq++)
- irq_create_mapping(gpio->domain, hwirq);
+ girq->chip = &pirq->irqchip;
- port->gc.to_irq = dwapb_gpio_to_irq;
-}
-
-static void dwapb_irq_teardown(struct dwapb_gpio *gpio)
-{
- struct dwapb_gpio_port *port = &gpio->ports[0];
- struct gpio_chip *gc = &port->gc;
- unsigned int ngpio = gc->ngpio;
- irq_hw_number_t hwirq;
+ return;
- if (!gpio->domain)
- return;
-
- for (hwirq = 0; hwirq < ngpio; hwirq++)
- irq_dispose_mapping(irq_find_mapping(gpio->domain, hwirq));
-
- irq_domain_remove(gpio->domain);
- gpio->domain = NULL;
+err_kfree_pirq:
+ devm_kfree(gpio->dev, pirq);
}
static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
@@ -510,36 +518,16 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
if (pp->idx == 0)
dwapb_configure_irqs(gpio, port, pp);
- err = gpiochip_add_data(&port->gc, port);
+ err = devm_gpiochip_add_data(gpio->dev, &port->gc, port);
if (err) {
dev_err(gpio->dev, "failed to register gpiochip for port%d\n",
port->idx);
return err;
}
- /* Add GPIO-signaled ACPI event support */
- acpi_gpiochip_request_interrupts(&port->gc);
-
- port->is_registered = true;
-
return 0;
}
-static void dwapb_gpio_unregister(struct dwapb_gpio *gpio)
-{
- unsigned int m;
-
- for (m = 0; m < gpio->nr_ports; ++m) {
- struct dwapb_gpio_port *port = &gpio->ports[m];
-
- if (!port->is_registered)
- continue;
-
- acpi_gpiochip_free_interrupts(&port->gc);
- gpiochip_remove(&port->gc);
- }
-}
-
static void dwapb_get_irq(struct device *dev, struct fwnode_handle *fwnode,
struct dwapb_port_property *pp)
{
@@ -594,11 +582,12 @@ static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
return ERR_PTR(-EINVAL);
}
- if (fwnode_property_read_u32(fwnode, "snps,nr-gpios", &pp->ngpio)) {
+ if (fwnode_property_read_u32(fwnode, "ngpios", &pp->ngpio) &&
+ fwnode_property_read_u32(fwnode, "snps,nr-gpios", &pp->ngpio)) {
dev_info(dev,
"failed to get number of gpios for port%d\n",
i);
- pp->ngpio = 32;
+ pp->ngpio = DWAPB_MAX_GPIOS;
}
pp->irq_shared = false;
@@ -615,6 +604,62 @@ static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
return pdata;
}
+static void dwapb_assert_reset(void *data)
+{
+ struct dwapb_gpio *gpio = data;
+
+ reset_control_assert(gpio->rst);
+}
+
+static int dwapb_get_reset(struct dwapb_gpio *gpio)
+{
+ int err;
+
+ gpio->rst = devm_reset_control_get_optional_shared(gpio->dev, NULL);
+ if (IS_ERR(gpio->rst)) {
+ dev_err(gpio->dev, "Cannot get reset descriptor\n");
+ return PTR_ERR(gpio->rst);
+ }
+
+ err = reset_control_deassert(gpio->rst);
+ if (err) {
+ dev_err(gpio->dev, "Cannot deassert reset lane\n");
+ return err;
+ }
+
+ return devm_add_action_or_reset(gpio->dev, dwapb_assert_reset, gpio);
+}
+
+static void dwapb_disable_clks(void *data)
+{
+ struct dwapb_gpio *gpio = data;
+
+ clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
+}
+
+static int dwapb_get_clks(struct dwapb_gpio *gpio)
+{
+ int err;
+
+ /* Optional bus and debounce clocks */
+ gpio->clks[0].id = "bus";
+ gpio->clks[1].id = "db";
+ err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS,
+ gpio->clks);
+ if (err) {
+ dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n");
+ return err;
+ }
+
+ err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
+ if (err) {
+ dev_err(gpio->dev, "Cannot enable APB/Debounce clocks\n");
+ return err;
+ }
+
+ return devm_add_action_or_reset(gpio->dev, dwapb_disable_clks, gpio);
+}
+
static const struct of_device_id dwapb_of_match[] = {
{ .compatible = "snps,dw-apb-gpio", .data = (void *)0},
{ .compatible = "apm,xgene-gpio-v2", .data = (void *)GPIO_REG_OFFSET_V2},
@@ -654,11 +699,9 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
gpio->dev = &pdev->dev;
gpio->nr_ports = pdata->nports;
- gpio->rst = devm_reset_control_get_optional_shared(dev, NULL);
- if (IS_ERR(gpio->rst))
- return PTR_ERR(gpio->rst);
-
- reset_control_deassert(gpio->rst);
+ err = dwapb_get_reset(gpio);
+ if (err)
+ return err;
gpio->ports = devm_kcalloc(&pdev->dev, gpio->nr_ports,
sizeof(*gpio->ports), GFP_KERNEL);
@@ -669,49 +712,17 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio->regs))
return PTR_ERR(gpio->regs);
- /* Optional bus and debounce clocks */
- gpio->clks[0].id = "bus";
- gpio->clks[1].id = "db";
- err = devm_clk_bulk_get_optional(&pdev->dev, DWAPB_NR_CLOCKS,
- gpio->clks);
- if (err) {
- dev_err(&pdev->dev, "Cannot get APB/Debounce clocks\n");
+ err = dwapb_get_clks(gpio);
+ if (err)
return err;
- }
-
- err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
- if (err) {
- dev_err(&pdev->dev, "Cannot enable APB/Debounce clocks\n");
- return err;
- }
gpio->flags = (uintptr_t)device_get_match_data(dev);
for (i = 0; i < gpio->nr_ports; i++) {
err = dwapb_gpio_add_port(gpio, &pdata->properties[i], i);
if (err)
- goto out_unregister;
+ return err;
}
- platform_set_drvdata(pdev, gpio);
-
- return 0;
-
-out_unregister:
- dwapb_gpio_unregister(gpio);
- dwapb_irq_teardown(gpio);
- clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
-
- return err;
-}
-
-static int dwapb_gpio_remove(struct platform_device *pdev)
-{
- struct dwapb_gpio *gpio = platform_get_drvdata(pdev);
-
- dwapb_gpio_unregister(gpio);
- dwapb_irq_teardown(gpio);
- reset_control_assert(gpio->rst);
- clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
return 0;
}
@@ -815,7 +826,6 @@ static struct platform_driver dwapb_gpio_driver = {
.acpi_match_table = dwapb_acpi_match,
},
.probe = dwapb_gpio_probe,
- .remove = dwapb_gpio_remove,
};
module_platform_driver(dwapb_gpio_driver);
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 1652897fdf90..67ed4f238d43 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -7,10 +7,10 @@
* Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/debugfs.h>
-#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irq_sim.h>
@@ -19,21 +19,19 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
#include <linux/uaccess.h>
#include "gpiolib.h"
-#define GPIO_MOCKUP_NAME "gpio-mockup"
#define GPIO_MOCKUP_MAX_GC 10
/*
* We're storing two values per chip: the GPIO base and the number
* of GPIO lines.
*/
#define GPIO_MOCKUP_MAX_RANGES (GPIO_MOCKUP_MAX_GC * 2)
-/* Maximum of three properties + the sentinel. */
-#define GPIO_MOCKUP_MAX_PROP 4
-
-#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
+/* Maximum of four properties + the sentinel. */
+#define GPIO_MOCKUP_MAX_PROP 5
/*
* struct gpio_pin_status - structure describing a GPIO status
@@ -375,31 +373,6 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
debugfs_create_file(name, 0200, chip->dbg_dir, priv,
&gpio_mockup_debugfs_ops);
}
-
- return;
-}
-
-static int gpio_mockup_name_lines(struct device *dev,
- struct gpio_mockup_chip *chip)
-{
- struct gpio_chip *gc = &chip->gc;
- char **names;
- int i;
-
- names = devm_kcalloc(dev, gc->ngpio, sizeof(char *), GFP_KERNEL);
- if (!names)
- return -ENOMEM;
-
- for (i = 0; i < gc->ngpio; i++) {
- names[i] = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%d", gc->label, i);
- if (!names[i])
- return -ENOMEM;
- }
-
- gc->names = (const char *const *)names;
-
- return 0;
}
static void gpio_mockup_dispose_mappings(void *data)
@@ -434,21 +407,14 @@ static int gpio_mockup_probe(struct platform_device *pdev)
if (rv)
return rv;
- rv = device_property_read_string(dev, "chip-name", &name);
+ rv = device_property_read_string(dev, "chip-label", &name);
if (rv)
- name = NULL;
+ name = dev_name(dev);
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- if (!name) {
- name = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%c", pdev->name, pdev->id + 'A');
- if (!name)
- return -ENOMEM;
- }
-
mutex_init(&chip->lock);
gc = &chip->gc;
@@ -476,12 +442,6 @@ static int gpio_mockup_probe(struct platform_device *pdev)
for (i = 0; i < gc->ngpio; i++)
chip->lines[i].dir = GPIO_LINE_DIRECTION_IN;
- if (device_property_read_bool(dev, "named-gpio-lines")) {
- rv = gpio_mockup_name_lines(dev, chip);
- if (rv)
- return rv;
- }
-
chip->irq_sim_domain = devm_irq_domain_create_sim(dev, NULL,
gc->ngpio);
if (IS_ERR(chip->irq_sim_domain))
@@ -502,7 +462,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
static struct platform_driver gpio_mockup_driver = {
.driver = {
- .name = GPIO_MOCKUP_NAME,
+ .name = "gpio-mockup",
},
.probe = gpio_mockup_probe,
};
@@ -522,14 +482,80 @@ static void gpio_mockup_unregister_pdevs(void)
}
}
-static int __init gpio_mockup_init(void)
+static __init char **gpio_mockup_make_line_names(const char *label,
+ unsigned int num_lines)
+{
+ unsigned int i;
+ char **names;
+
+ names = kcalloc(num_lines + 1, sizeof(char *), GFP_KERNEL);
+ if (!names)
+ return NULL;
+
+ for (i = 0; i < num_lines; i++) {
+ names[i] = kasprintf(GFP_KERNEL, "%s-%u", label, i);
+ if (!names[i]) {
+ kfree_strarray(names, i);
+ return NULL;
+ }
+ }
+
+ return names;
+}
+
+static int __init gpio_mockup_register_chip(int idx)
{
struct property_entry properties[GPIO_MOCKUP_MAX_PROP];
- int i, prop, num_chips, err = 0, base;
struct platform_device_info pdevinfo;
struct platform_device *pdev;
+ char **line_names = NULL;
+ char chip_label[32];
+ int prop = 0, base;
u16 ngpio;
+ memset(properties, 0, sizeof(properties));
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+
+ snprintf(chip_label, sizeof(chip_label), "gpio-mockup-%c", idx + 'A');
+ properties[prop++] = PROPERTY_ENTRY_STRING("chip-label", chip_label);
+
+ base = gpio_mockup_range_base(idx);
+ if (base >= 0)
+ properties[prop++] = PROPERTY_ENTRY_U32("gpio-base", base);
+
+ ngpio = base < 0 ? gpio_mockup_range_ngpio(idx)
+ : gpio_mockup_range_ngpio(idx) - base;
+ properties[prop++] = PROPERTY_ENTRY_U16("nr-gpios", ngpio);
+
+ if (gpio_mockup_named_lines) {
+ line_names = gpio_mockup_make_line_names(chip_label, ngpio);
+ if (!line_names)
+ return -ENOMEM;
+
+ properties[prop++] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
+ "gpio-line-names", line_names, ngpio);
+ }
+
+ pdevinfo.name = "gpio-mockup";
+ pdevinfo.id = idx;
+ pdevinfo.properties = properties;
+
+ pdev = platform_device_register_full(&pdevinfo);
+ kfree_strarray(line_names, ngpio);
+ if (IS_ERR(pdev)) {
+ pr_err("error registering device");
+ return PTR_ERR(pdev);
+ }
+
+ gpio_mockup_pdevs[idx] = pdev;
+
+ return 0;
+}
+
+static int __init gpio_mockup_init(void)
+{
+ int i, num_chips, err;
+
if ((gpio_mockup_num_ranges < 2) ||
(gpio_mockup_num_ranges % 2) ||
(gpio_mockup_num_ranges > GPIO_MOCKUP_MAX_RANGES))
@@ -551,43 +577,19 @@ static int __init gpio_mockup_init(void)
err = platform_driver_register(&gpio_mockup_driver);
if (err) {
- gpio_mockup_err("error registering platform driver\n");
+ pr_err("error registering platform driver\n");
debugfs_remove_recursive(gpio_mockup_dbg_dir);
return err;
}
for (i = 0; i < num_chips; i++) {
- memset(properties, 0, sizeof(properties));
- memset(&pdevinfo, 0, sizeof(pdevinfo));
- prop = 0;
-
- base = gpio_mockup_range_base(i);
- if (base >= 0)
- properties[prop++] = PROPERTY_ENTRY_U32("gpio-base",
- base);
-
- ngpio = base < 0 ? gpio_mockup_range_ngpio(i)
- : gpio_mockup_range_ngpio(i) - base;
- properties[prop++] = PROPERTY_ENTRY_U16("nr-gpios", ngpio);
-
- if (gpio_mockup_named_lines)
- properties[prop++] = PROPERTY_ENTRY_BOOL(
- "named-gpio-lines");
-
- pdevinfo.name = GPIO_MOCKUP_NAME;
- pdevinfo.id = i;
- pdevinfo.properties = properties;
-
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev)) {
- gpio_mockup_err("error registering device");
+ err = gpio_mockup_register_chip(i);
+ if (err) {
platform_driver_unregister(&gpio_mockup_driver);
gpio_mockup_unregister_pdevs();
debugfs_remove_recursive(gpio_mockup_dbg_dir);
- return PTR_ERR(pdev);
+ return err;
}
-
- gpio_mockup_pdevs[i] = pdev;
}
return 0;
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 1e866524a4bd..6dfca83bcd90 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -47,27 +47,6 @@ struct mpc8xxx_gpio_chip {
unsigned int irqn;
};
-/* The GPIO Input Buffer Enable register(GPIO_IBE) is used to
- * control the input enable of each individual GPIO port.
- * When an individual GPIO port’s direction is set to
- * input (GPIO_GPDIR[DRn=0]), the associated input enable must be
- * set (GPIOxGPIE[IEn]=1) to propagate the port value to the GPIO
- * Data Register.
- */
-static int ls1028a_gpio_dir_in_init(struct gpio_chip *gc)
-{
- unsigned long flags;
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
-
- spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
-
- spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
- return 0;
-}
-
/*
* This hardware has a big endian bit assignment such that GPIO line 0 is
* connected to bit 31, line 1 to bit 30 ... line 31 to bit 0.
@@ -283,7 +262,6 @@ static const struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
};
struct mpc8xxx_gpio_devtype {
- int (*gpio_dir_in_init)(struct gpio_chip *chip);
int (*gpio_dir_out)(struct gpio_chip *, unsigned int, int);
int (*gpio_get)(struct gpio_chip *, unsigned int);
int (*irq_set_type)(struct irq_data *, unsigned int);
@@ -294,11 +272,6 @@ static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = {
.irq_set_type = mpc512x_irq_set_type,
};
-static const struct mpc8xxx_gpio_devtype ls1028a_gpio_devtype = {
- .gpio_dir_in_init = ls1028a_gpio_dir_in_init,
- .irq_set_type = mpc8xxx_irq_set_type,
-};
-
static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = {
.gpio_dir_out = mpc5125_gpio_dir_out,
.irq_set_type = mpc512x_irq_set_type,
@@ -319,8 +292,8 @@ static const struct of_device_id mpc8xxx_gpio_ids[] = {
{ .compatible = "fsl,mpc5121-gpio", .data = &mpc512x_gpio_devtype, },
{ .compatible = "fsl,mpc5125-gpio", .data = &mpc5125_gpio_devtype, },
{ .compatible = "fsl,pq3-gpio", },
- { .compatible = "fsl,ls1028a-gpio", .data = &ls1028a_gpio_devtype, },
- { .compatible = "fsl,ls1088a-gpio", .data = &ls1028a_gpio_devtype, },
+ { .compatible = "fsl,ls1028a-gpio", },
+ { .compatible = "fsl,ls1088a-gpio", },
{ .compatible = "fsl,qoriq-gpio", },
{}
};
@@ -389,7 +362,16 @@ static int mpc8xxx_probe(struct platform_device *pdev)
gc->to_irq = mpc8xxx_gpio_to_irq;
- if (of_device_is_compatible(np, "fsl,qoriq-gpio"))
+ /*
+ * The GPIO Input Buffer Enable register(GPIO_IBE) is used to control
+ * the input enable of each individual GPIO port. When an individual
+ * GPIO port’s direction is set to input (GPIO_GPDIR[DRn=0]), the
+ * associated input enable must be set (GPIOxGPIE[IEn]=1) to propagate
+ * the port value to the GPIO Data Register.
+ */
+ if (of_device_is_compatible(np, "fsl,qoriq-gpio") ||
+ of_device_is_compatible(np, "fsl,ls1028a-gpio") ||
+ of_device_is_compatible(np, "fsl,ls1088a-gpio"))
gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
ret = gpiochip_add_data(gc, mpc8xxx_gc);
@@ -411,9 +393,6 @@ static int mpc8xxx_probe(struct platform_device *pdev)
/* ack and mask all irqs */
gc->write_reg(mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR, 0);
- /* enable input buffer */
- if (devtype->gpio_dir_in_init)
- devtype->gpio_dir_in_init(gc);
ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn,
mpc8xxx_gpio_irq_cascade,
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 64278a4756f0..643f4c557ac2 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -15,6 +15,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
@@ -158,6 +159,7 @@ static const struct of_device_id mxc_gpio_dt_ids[] = {
{ .compatible = "fsl,imx7d-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mxc_gpio_dt_ids);
/*
* MX2 has one interrupt *for all* gpio ports. The list is used
@@ -604,3 +606,7 @@ static int __init gpio_mxc_init(void)
return platform_driver_register(&mxc_gpio_driver);
}
subsys_initcall(gpio_mxc_init);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("i.MX GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 0ea640fb636c..f7ceb2b11afc 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1114,13 +1114,23 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
{
struct device *dev = bank->chip.parent;
void __iomem *base = bank->base;
- u32 nowake;
+ u32 mask, nowake;
bank->saved_datain = readl_relaxed(base + bank->regs->datain);
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
+ /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
+ mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect;
+ mask &= ~bank->context.risingdetect;
+ bank->saved_datain |= mask;
+
+ /* Check for pending EDGE_RISING, ignore EDGE_BOTH */
+ mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect;
+ mask &= ~bank->context.fallingdetect;
+ bank->saved_datain &= ~mask;
+
if (!may_lose_context)
goto update_gpio_context_count;
@@ -1394,10 +1404,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
if (bank->irq <= 0) {
if (!bank->irq)
bank->irq = -ENXIO;
- if (bank->irq != -EPROBE_DEFER)
- dev_err(dev,
- "can't get irq resource ret=%d\n", bank->irq);
- return bank->irq;
+ return dev_err_probe(dev, bank->irq, "can't get irq resource\n");
}
bank->chip.parent = dev;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index c2d6121c48c9..825b362eb4b7 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -90,6 +90,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pcal6416", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "pcal6524", 24 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "pcal9535", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
+ { "pcal9554b", 8 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "pcal9555a", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "max7310", 8 | PCA953X_TYPE, },
@@ -1018,12 +1019,9 @@ static int pca953x_probe(struct i2c_client *client,
chip->client = client;
reg = devm_regulator_get(&client->dev, "vcc");
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- if (ret != -EPROBE_DEFER)
- dev_err(&client->dev, "reg get err: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(reg))
+ return dev_err_probe(&client->dev, PTR_ERR(reg), "reg get err\n");
+
ret = regulator_enable(reg);
if (ret) {
dev_err(&client->dev, "reg en err: %d\n", ret);
@@ -1255,6 +1253,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "nxp,pcal6416", .data = OF_953X(16, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal6524", .data = OF_953X(24, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal9535", .data = OF_953X(16, PCA_LATCH_INT), },
+ { .compatible = "nxp,pcal9554b", .data = OF_953X( 8, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal9555a", .data = OF_953X(16, PCA_LATCH_INT), },
{ .compatible = "maxim,max7310", .data = OF_953X( 8, 0), },
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index a68941d19ac6..2a07fd96707e 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -28,6 +28,47 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+/*
+ * PLX PEX8311 PCI LCS_INTCSR Interrupt Control/Status
+ *
+ * Bit: Description
+ * 0: Enable Interrupt Sources (Bit 0)
+ * 1: Enable Interrupt Sources (Bit 1)
+ * 2: Generate Internal PCI Bus Internal SERR# Interrupt
+ * 3: Mailbox Interrupt Enable
+ * 4: Power Management Interrupt Enable
+ * 5: Power Management Interrupt
+ * 6: Slave Read Local Data Parity Check Error Enable
+ * 7: Slave Read Local Data Parity Check Error Status
+ * 8: Internal PCI Wire Interrupt Enable
+ * 9: PCI Express Doorbell Interrupt Enable
+ * 10: PCI Abort Interrupt Enable
+ * 11: Local Interrupt Input Enable
+ * 12: Retry Abort Enable
+ * 13: PCI Express Doorbell Interrupt Active
+ * 14: PCI Abort Interrupt Active
+ * 15: Local Interrupt Input Active
+ * 16: Local Interrupt Output Enable
+ * 17: Local Doorbell Interrupt Enable
+ * 18: DMA Channel 0 Interrupt Enable
+ * 19: DMA Channel 1 Interrupt Enable
+ * 20: Local Doorbell Interrupt Active
+ * 21: DMA Channel 0 Interrupt Active
+ * 22: DMA Channel 1 Interrupt Active
+ * 23: Built-In Self-Test (BIST) Interrupt Active
+ * 24: Direct Master was the Bus Master during a Master or Target Abort
+ * 25: DMA Channel 0 was the Bus Master during a Master or Target Abort
+ * 26: DMA Channel 1 was the Bus Master during a Master or Target Abort
+ * 27: Target Abort after internal 256 consecutive Master Retrys
+ * 28: PCI Bus wrote data to LCS_MBOX0
+ * 29: PCI Bus wrote data to LCS_MBOX1
+ * 30: PCI Bus wrote data to LCS_MBOX2
+ * 31: PCI Bus wrote data to LCS_MBOX3
+ */
+#define PLX_PEX8311_PCI_LCS_INTCSR 0x68
+#define INTCSR_INTERNAL_PCI_WIRE BIT(8)
+#define INTCSR_LOCAL_INPUT BIT(11)
+
/**
* struct idio_24_gpio_reg - GPIO device registers structure
* @out0_7: Read: FET Outputs 0-7
@@ -92,6 +133,7 @@ struct idio_24_gpio_reg {
struct idio_24_gpio {
struct gpio_chip chip;
raw_spinlock_t lock;
+ __u8 __iomem *plx;
struct idio_24_gpio_reg __iomem *reg;
unsigned long irq_mask;
};
@@ -334,13 +376,13 @@ static void idio_24_irq_mask(struct irq_data *data)
unsigned long flags;
const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
unsigned char new_irq_mask;
- const unsigned long bank_offset = bit_offset/8 * 8;
+ const unsigned long bank_offset = bit_offset / 8;
unsigned char cos_enable_state;
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
- idio24gpio->irq_mask &= BIT(bit_offset);
- new_irq_mask = idio24gpio->irq_mask >> bank_offset;
+ idio24gpio->irq_mask &= ~BIT(bit_offset);
+ new_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
if (!new_irq_mask) {
cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
@@ -363,12 +405,12 @@ static void idio_24_irq_unmask(struct irq_data *data)
unsigned long flags;
unsigned char prev_irq_mask;
const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
- const unsigned long bank_offset = bit_offset/8 * 8;
+ const unsigned long bank_offset = bit_offset / 8;
unsigned char cos_enable_state;
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
- prev_irq_mask = idio24gpio->irq_mask >> bank_offset;
+ prev_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
idio24gpio->irq_mask |= BIT(bit_offset);
if (!prev_irq_mask) {
@@ -455,6 +497,7 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct device *const dev = &pdev->dev;
struct idio_24_gpio *idio24gpio;
int err;
+ const size_t pci_plx_bar_index = 1;
const size_t pci_bar_index = 2;
const char *const name = pci_name(pdev);
struct gpio_irq_chip *girq;
@@ -469,12 +512,13 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
+ err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | BIT(pci_bar_index), name);
if (err) {
dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
return err;
}
+ idio24gpio->plx = pcim_iomap_table(pdev)[pci_plx_bar_index];
idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
idio24gpio->chip.label = name;
@@ -504,6 +548,12 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Software board reset */
iowrite8(0, &idio24gpio->reg->soft_reset);
+ /*
+ * enable PLX PEX8311 internal PCI wire interrupt and local interrupt
+ * input
+ */
+ iowrite8((INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT) >> 8,
+ idio24gpio->plx + PLX_PEX8311_PCI_LCS_INTCSR + 1);
err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
if (err) {
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index 6698feabaced..8e04054cf07e 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -148,12 +148,9 @@ static int pisosr_gpio_probe(struct spi_device *spi)
return -ENOMEM;
gpio->load_gpio = devm_gpiod_get_optional(dev, "load", GPIOD_OUT_LOW);
- if (IS_ERR(gpio->load_gpio)) {
- ret = PTR_ERR(gpio->load_gpio);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Unable to allocate load GPIO\n");
- return ret;
- }
+ if (IS_ERR(gpio->load_gpio))
+ return dev_err_probe(dev, PTR_ERR(gpio->load_gpio),
+ "Unable to allocate load GPIO\n");
mutex_init(&gpio->lock);
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index c54dd08f2cbf..d5eb9ca11901 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -183,7 +183,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
return PTR_ERR(chip->regs);
ngpio = of_irq_count(node);
- if (ngpio >= SIFIVE_GPIO_MAX) {
+ if (ngpio > SIFIVE_GPIO_MAX) {
dev_err(dev, "Too many GPIO interrupts (max=%d)\n",
SIFIVE_GPIO_MAX);
return -ENXIO;
diff --git a/drivers/gpio/gpio-sl28cpld.c b/drivers/gpio/gpio-sl28cpld.c
new file mode 100644
index 000000000000..889b8f5622c2
--- /dev/null
+++ b/drivers/gpio/gpio-sl28cpld.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sl28cpld GPIO driver
+ *
+ * Copyright 2020 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* GPIO flavor */
+#define GPIO_REG_DIR 0x00
+#define GPIO_REG_OUT 0x01
+#define GPIO_REG_IN 0x02
+#define GPIO_REG_IE 0x03
+#define GPIO_REG_IP 0x04
+
+/* input-only flavor */
+#define GPI_REG_IN 0x00
+
+/* output-only flavor */
+#define GPO_REG_OUT 0x00
+
+enum sl28cpld_gpio_type {
+ SL28CPLD_GPIO = 1,
+ SL28CPLD_GPI,
+ SL28CPLD_GPO,
+};
+
+static const struct regmap_irq sl28cpld_gpio_irqs[] = {
+ REGMAP_IRQ_REG_LINE(0, 8),
+ REGMAP_IRQ_REG_LINE(1, 8),
+ REGMAP_IRQ_REG_LINE(2, 8),
+ REGMAP_IRQ_REG_LINE(3, 8),
+ REGMAP_IRQ_REG_LINE(4, 8),
+ REGMAP_IRQ_REG_LINE(5, 8),
+ REGMAP_IRQ_REG_LINE(6, 8),
+ REGMAP_IRQ_REG_LINE(7, 8),
+};
+
+static int sl28cpld_gpio_irq_init(struct platform_device *pdev,
+ unsigned int base,
+ struct gpio_regmap_config *config)
+{
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap_irq_chip *irq_chip;
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+
+ if (!device_property_read_bool(dev, "interrupt-controller"))
+ return 0;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ irq_chip = devm_kzalloc(dev, sizeof(*irq_chip), GFP_KERNEL);
+ if (!irq_chip)
+ return -ENOMEM;
+
+ irq_chip->name = "sl28cpld-gpio-irq",
+ irq_chip->irqs = sl28cpld_gpio_irqs;
+ irq_chip->num_irqs = ARRAY_SIZE(sl28cpld_gpio_irqs);
+ irq_chip->num_regs = 1;
+ irq_chip->status_base = base + GPIO_REG_IP;
+ irq_chip->mask_base = base + GPIO_REG_IE;
+ irq_chip->mask_invert = true,
+ irq_chip->ack_base = base + GPIO_REG_IP;
+
+ ret = devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
+ config->regmap, irq,
+ IRQF_SHARED | IRQF_ONESHOT,
+ 0, irq_chip, &irq_data);
+ if (ret)
+ return ret;
+
+ config->irq_domain = regmap_irq_get_domain(irq_data);
+
+ return 0;
+}
+
+static int sl28cpld_gpio_probe(struct platform_device *pdev)
+{
+ struct gpio_regmap_config config = {0};
+ enum sl28cpld_gpio_type type;
+ struct regmap *regmap;
+ u32 base;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -ENODEV;
+
+ type = (uintptr_t)device_get_match_data(&pdev->dev);
+ if (!type)
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "reg", &base);
+ if (ret)
+ return -EINVAL;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap)
+ return -ENODEV;
+
+ config.regmap = regmap;
+ config.parent = &pdev->dev;
+ config.ngpio = 8;
+
+ switch (type) {
+ case SL28CPLD_GPIO:
+ config.reg_dat_base = base + GPIO_REG_IN;
+ config.reg_set_base = base + GPIO_REG_OUT;
+ /* reg_dir_out_base might be zero */
+ config.reg_dir_out_base = GPIO_REGMAP_ADDR(base + GPIO_REG_DIR);
+
+ /* This type supports interrupts */
+ ret = sl28cpld_gpio_irq_init(pdev, base, &config);
+ if (ret)
+ return ret;
+ break;
+ case SL28CPLD_GPO:
+ config.reg_set_base = base + GPO_REG_OUT;
+ break;
+ case SL28CPLD_GPI:
+ config.reg_dat_base = base + GPI_REG_IN;
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown type %d\n", type);
+ return -ENODEV;
+ }
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(&pdev->dev, &config));
+}
+
+static const struct of_device_id sl28cpld_gpio_of_match[] = {
+ { .compatible = "kontron,sl28cpld-gpio", .data = (void *)SL28CPLD_GPIO },
+ { .compatible = "kontron,sl28cpld-gpi", .data = (void *)SL28CPLD_GPI },
+ { .compatible = "kontron,sl28cpld-gpo", .data = (void *)SL28CPLD_GPO },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sl28cpld_gpio_of_match);
+
+static struct platform_driver sl28cpld_gpio_driver = {
+ .probe = sl28cpld_gpio_probe,
+ .driver = {
+ .name = "sl28cpld-gpio",
+ .of_match_table = sl28cpld_gpio_of_match,
+ },
+};
+module_platform_driver(sl28cpld_gpio_driver);
+
+MODULE_DESCRIPTION("sl28cpld GPIO Driver");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 9e23a5ae8108..0ce1543426a4 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -41,7 +41,10 @@
#define XWAY_STP_4HZ BIT(23)
#define XWAY_STP_8HZ BIT(24)
#define XWAY_STP_10HZ (BIT(24) | BIT(23))
-#define XWAY_STP_SPEED_MASK (0xf << 23)
+#define XWAY_STP_SPEED_MASK (BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27))
+
+#define XWAY_STP_FPIS_VALUE BIT(21)
+#define XWAY_STP_FPIS_MASK (BIT(20) | BIT(21))
/* clock source for automatic update */
#define XWAY_STP_UPD_FPI BIT(31)
@@ -54,7 +57,9 @@
/* 2 groups of 3 bits can be driven by the phys */
#define XWAY_STP_PHY_MASK 0x7
#define XWAY_STP_PHY1_SHIFT 27
-#define XWAY_STP_PHY2_SHIFT 15
+#define XWAY_STP_PHY2_SHIFT 3
+#define XWAY_STP_PHY3_SHIFT 6
+#define XWAY_STP_PHY4_SHIFT 15
/* STP has 3 groups of 8 bits */
#define XWAY_STP_GROUP0 BIT(0)
@@ -80,6 +85,8 @@ struct xway_stp {
u8 dsl; /* the 2 LSBs can be driven by the dsl core */
u8 phy1; /* 3 bits can be driven by phy1 */
u8 phy2; /* 3 bits can be driven by phy2 */
+ u8 phy3; /* 3 bits can be driven by phy3 */
+ u8 phy4; /* 3 bits can be driven by phy4 */
u8 reserved; /* mask out the hw driven bits in gpio_request */
};
@@ -114,7 +121,8 @@ static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
else
chip->shadow &= ~BIT(gpio);
xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0);
- xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+ if (!chip->reserved)
+ xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
}
/**
@@ -188,16 +196,37 @@ static void xway_stp_hw_init(struct xway_stp *chip)
chip->phy2 << XWAY_STP_PHY2_SHIFT,
XWAY_STP_CON1);
+ if (of_machine_is_compatible("lantiq,grx390")
+ || of_machine_is_compatible("lantiq,ar10")) {
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_PHY_MASK << XWAY_STP_PHY3_SHIFT,
+ chip->phy3 << XWAY_STP_PHY3_SHIFT,
+ XWAY_STP_CON1);
+ }
+
+ if (of_machine_is_compatible("lantiq,grx390")) {
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_PHY_MASK << XWAY_STP_PHY4_SHIFT,
+ chip->phy4 << XWAY_STP_PHY4_SHIFT,
+ XWAY_STP_CON1);
+ }
+
/* mask out the hw driven bits in gpio_request */
- chip->reserved = (chip->phy2 << 5) | (chip->phy1 << 2) | chip->dsl;
+ chip->reserved = (chip->phy4 << 11) | (chip->phy3 << 8) | (chip->phy2 << 5)
+ | (chip->phy1 << 2) | chip->dsl;
/*
* if we have pins that are driven by hw, we need to tell the stp what
* clock to use as a timer.
*/
- if (chip->reserved)
+ if (chip->reserved) {
xway_stp_w32_mask(chip->virt, XWAY_STP_UPD_MASK,
XWAY_STP_UPD_FPI, XWAY_STP_CON1);
+ xway_stp_w32_mask(chip->virt, XWAY_STP_SPEED_MASK,
+ XWAY_STP_10HZ, XWAY_STP_CON1);
+ xway_stp_w32_mask(chip->virt, XWAY_STP_FPIS_MASK,
+ XWAY_STP_FPIS_VALUE, XWAY_STP_CON1);
+ }
}
static int xway_stp_probe(struct platform_device *pdev)
@@ -242,13 +271,26 @@ static int xway_stp_probe(struct platform_device *pdev)
/* find out which gpios are controlled by the phys */
if (of_machine_is_compatible("lantiq,ar9") ||
of_machine_is_compatible("lantiq,gr9") ||
- of_machine_is_compatible("lantiq,vr9")) {
+ of_machine_is_compatible("lantiq,vr9") ||
+ of_machine_is_compatible("lantiq,ar10") ||
+ of_machine_is_compatible("lantiq,grx390")) {
if (!of_property_read_u32(pdev->dev.of_node, "lantiq,phy1", &phy))
chip->phy1 = phy & XWAY_STP_PHY_MASK;
if (!of_property_read_u32(pdev->dev.of_node, "lantiq,phy2", &phy))
chip->phy2 = phy & XWAY_STP_PHY_MASK;
}
+ if (of_machine_is_compatible("lantiq,ar10") ||
+ of_machine_is_compatible("lantiq,grx390")) {
+ if (!of_property_read_u32(pdev->dev.of_node, "lantiq,phy3", &phy))
+ chip->phy3 = phy & XWAY_STP_PHY_MASK;
+ }
+
+ if (of_machine_is_compatible("lantiq,grx390")) {
+ if (!of_property_read_u32(pdev->dev.of_node, "lantiq,phy4", &phy))
+ chip->phy4 = phy & XWAY_STP_PHY_MASK;
+ }
+
/* check which edge trigger we should use, default to a falling edge */
if (!of_find_property(pdev->dev.of_node, "lantiq,rising", NULL))
chip->edge = XWAY_STP_FALLING;
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index ea3f68a28fea..55b8dbd13d11 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -19,9 +19,9 @@
* These registers are modified under the irq bus lock and cached to avoid
* unnecessary writes in bus_sync_unlock.
*/
-enum { REG_IBE, REG_IEV, REG_IS, REG_IE };
+enum { REG_IBE, REG_IEV, REG_IS, REG_IE, REG_DIRECT };
-#define CACHE_NR_REGS 4
+#define CACHE_NR_REGS 5
#define CACHE_NR_BANKS 3
struct tc3589x_gpio {
@@ -200,6 +200,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
[REG_IEV] = TC3589x_GPIOIEV0,
[REG_IS] = TC3589x_GPIOIS0,
[REG_IE] = TC3589x_GPIOIE0,
+ [REG_DIRECT] = TC3589x_DIRECT0,
};
int i, j;
@@ -228,6 +229,7 @@ static void tc3589x_gpio_irq_mask(struct irq_data *d)
int mask = BIT(offset % 8);
tc3589x_gpio->regs[REG_IE][regoffset] &= ~mask;
+ tc3589x_gpio->regs[REG_DIRECT][regoffset] |= mask;
}
static void tc3589x_gpio_irq_unmask(struct irq_data *d)
@@ -239,6 +241,7 @@ static void tc3589x_gpio_irq_unmask(struct irq_data *d)
int mask = BIT(offset % 8);
tc3589x_gpio->regs[REG_IE][regoffset] |= mask;
+ tc3589x_gpio->regs[REG_DIRECT][regoffset] &= ~mask;
}
static struct irq_chip tc3589x_gpio_irq_chip = {
@@ -334,6 +337,17 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ /* For tc35894, have to disable Direct KBD interrupts,
+ * else IRQST will always be 0x20, IRQN low level, can't
+ * clear the irq status.
+ * TODO: need more test on other tc3589x chip.
+ *
+ */
+ ret = tc3589x_reg_write(tc3589x, TC3589x_DKBDMSK,
+ TC3589x_DKBDMSK_ELINT | TC3589x_DKBDMSK_EINT);
+ if (ret < 0)
+ return ret;
+
ret = devm_request_threaded_irq(&pdev->dev,
irq, NULL, tc3589x_gpio_irq,
IRQF_ONESHOT, "tc3589x-gpio",
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 178e9128ded0..9500074b1f1b 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -430,7 +430,18 @@ static int tegra186_irq_set_type(struct irq_data *data, unsigned int type)
else
irq_set_handler_locked(data, handle_edge_irq);
- return irq_chip_set_type_parent(data, type);
+ if (data->parent_data)
+ return irq_chip_set_type_parent(data, type);
+
+ return 0;
+}
+
+static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ if (data->parent_data)
+ return irq_chip_set_wake_parent(data, on);
+
+ return 0;
}
static void tegra186_gpio_irq(struct irq_desc *desc)
@@ -678,7 +689,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->intc.irq_mask = tegra186_irq_mask;
gpio->intc.irq_unmask = tegra186_irq_unmask;
gpio->intc.irq_set_type = tegra186_irq_set_type;
- gpio->intc.irq_set_wake = irq_chip_set_wake_parent;
+ gpio->intc.irq_set_wake = tegra186_irq_set_wake;
irq = &gpio->gpio.irq;
irq->chip = &gpio->intc;
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 53d1387592fd..0b5a17ab996f 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -929,11 +929,9 @@ static int zynq_gpio_probe(struct platform_device *pdev)
/* Retrieve GPIO clock */
gpio->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(gpio->clk)) {
- if (PTR_ERR(gpio->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "input clock not found.\n");
- return PTR_ERR(gpio->clk);
- }
+ if (IS_ERR(gpio->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpio->clk), "input clock not found.\n");
+
ret = clk_prepare_enable(gpio->clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable clock.\n");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 54ca3c18b291..834a12f3219e 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1221,9 +1221,6 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
return;
}
- if (!chip->names)
- devprop_gpiochip_set_names(chip, dev_fwnode(chip->parent));
-
acpi_gpiochip_request_regions(acpi_gpio);
acpi_gpiochip_scan_gpios(acpi_gpio);
acpi_walk_dep_device_list(handle);
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index fed5a3b2172f..e9faeaf65d14 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/anon_inodes.h>
+#include <linux/atomic.h>
#include <linux/bitmap.h>
+#include <linux/build_bug.h>
#include <linux/cdev.h>
#include <linux/compat.h>
+#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/file.h>
@@ -14,16 +17,37 @@
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/pinctrl/consumer.h>
#include <linux/poll.h>
#include <linux/spinlock.h>
#include <linux/timekeeping.h>
#include <linux/uaccess.h>
+#include <linux/workqueue.h>
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
#include "gpiolib-cdev.h"
+/*
+ * Array sizes must ensure 64-bit alignment and not create holes in the
+ * struct packing.
+ */
+static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
+static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
+
+/*
+ * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
+ */
+static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
+static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
+static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
+static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
+static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
+static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
+static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
+static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
+
/* Character device interface to GPIO.
*
* The GPIO character device, /dev/gpiochipN, provides userspace an
@@ -34,6 +58,7 @@
* GPIO line handle management
*/
+#ifdef CONFIG_GPIO_CDEV_V1
/**
* struct linehandle_state - contains the state of a userspace handle
* @gdev: the GPIO device the handle pertains to
@@ -159,7 +184,8 @@ static long linehandle_set_config(struct linehandle_state *lh,
}
blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_CONFIG, desc);
+ GPIO_V2_LINE_CHANGED_CONFIG,
+ desc);
}
return 0;
}
@@ -281,11 +307,11 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
lh->gdev = gdev;
get_device(&gdev->dev);
- /* Make sure this is terminated */
- handlereq.consumer_label[sizeof(handlereq.consumer_label)-1] = '\0';
- if (strlen(handlereq.consumer_label)) {
- lh->label = kstrdup(handlereq.consumer_label,
- GFP_KERNEL);
+ if (handlereq.consumer_label[0] != '\0') {
+ /* label is only initialized if consumer_label is set */
+ lh->label = kstrndup(handlereq.consumer_label,
+ sizeof(handlereq.consumer_label) - 1,
+ GFP_KERNEL);
if (!lh->label) {
ret = -ENOMEM;
goto out_free_lh;
@@ -331,7 +357,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
}
blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_REQUESTED, desc);
+ GPIO_V2_LINE_CHANGED_REQUESTED, desc);
dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
offset);
@@ -376,6 +402,1036 @@ out_free_lh:
linehandle_free(lh);
return ret;
}
+#endif /* CONFIG_GPIO_CDEV_V1 */
+
+/**
+ * struct line - contains the state of a requested line
+ * @desc: the GPIO descriptor for this line.
+ * @req: the corresponding line request
+ * @irq: the interrupt triggered in response to events on this GPIO
+ * @eflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
+ * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
+ * @timestamp_ns: cache for the timestamp storing it between hardirq and
+ * IRQ thread, used to bring the timestamp close to the actual event
+ * @req_seqno: the seqno for the current edge event in the sequence of
+ * events for the corresponding line request. This is drawn from the @req.
+ * @line_seqno: the seqno for the current edge event in the sequence of
+ * events for this line.
+ * @work: the worker that implements software debouncing
+ * @sw_debounced: flag indicating if the software debouncer is active
+ * @level: the current debounced physical level of the line
+ */
+struct line {
+ struct gpio_desc *desc;
+ /*
+ * -- edge detector specific fields --
+ */
+ struct linereq *req;
+ unsigned int irq;
+ u64 eflags;
+ /*
+ * timestamp_ns and req_seqno are accessed only by
+ * edge_irq_handler() and edge_irq_thread(), which are themselves
+ * mutually exclusive, so no additional protection is necessary.
+ */
+ u64 timestamp_ns;
+ u32 req_seqno;
+ /*
+ * line_seqno is accessed by either edge_irq_thread() or
+ * debounce_work_func(), which are themselves mutually exclusive,
+ * so no additional protection is necessary.
+ */
+ u32 line_seqno;
+ /*
+ * -- debouncer specific fields --
+ */
+ struct delayed_work work;
+ /*
+ * sw_debounce is accessed by linereq_set_config(), which is the
+ * only setter, and linereq_get_values(), which can live with a
+ * slightly stale value.
+ */
+ unsigned int sw_debounced;
+ /*
+ * level is accessed by debounce_work_func(), which is the only
+ * setter, and linereq_get_values() which can live with a slightly
+ * stale value.
+ */
+ unsigned int level;
+};
+
+/**
+ * struct linereq - contains the state of a userspace line request
+ * @gdev: the GPIO device the line request pertains to
+ * @label: consumer label used to tag GPIO descriptors
+ * @num_lines: the number of lines in the lines array
+ * @wait: wait queue that handles blocking reads of events
+ * @event_buffer_size: the number of elements allocated in @events
+ * @events: KFIFO for the GPIO events
+ * @seqno: the sequence number for edge events generated on all lines in
+ * this line request. Note that this is not used when @num_lines is 1, as
+ * the line_seqno is then the same and is cheaper to calculate.
+ * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
+ * of configuration, particularly multi-step accesses to desc flags.
+ * @lines: the lines held by this line request, with @num_lines elements.
+ */
+struct linereq {
+ struct gpio_device *gdev;
+ const char *label;
+ u32 num_lines;
+ wait_queue_head_t wait;
+ u32 event_buffer_size;
+ DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
+ atomic_t seqno;
+ struct mutex config_mutex;
+ struct line lines[];
+};
+
+#define GPIO_V2_LINE_BIAS_FLAGS \
+ (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
+ GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
+ GPIO_V2_LINE_FLAG_BIAS_DISABLED)
+
+#define GPIO_V2_LINE_DIRECTION_FLAGS \
+ (GPIO_V2_LINE_FLAG_INPUT | \
+ GPIO_V2_LINE_FLAG_OUTPUT)
+
+#define GPIO_V2_LINE_DRIVE_FLAGS \
+ (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
+ GPIO_V2_LINE_FLAG_OPEN_SOURCE)
+
+#define GPIO_V2_LINE_EDGE_FLAGS \
+ (GPIO_V2_LINE_FLAG_EDGE_RISING | \
+ GPIO_V2_LINE_FLAG_EDGE_FALLING)
+
+#define GPIO_V2_LINE_VALID_FLAGS \
+ (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
+ GPIO_V2_LINE_DIRECTION_FLAGS | \
+ GPIO_V2_LINE_DRIVE_FLAGS | \
+ GPIO_V2_LINE_EDGE_FLAGS | \
+ GPIO_V2_LINE_BIAS_FLAGS)
+
+static void linereq_put_event(struct linereq *lr,
+ struct gpio_v2_line_event *le)
+{
+ bool overflow = false;
+
+ spin_lock(&lr->wait.lock);
+ if (kfifo_is_full(&lr->events)) {
+ overflow = true;
+ kfifo_skip(&lr->events);
+ }
+ kfifo_in(&lr->events, le, 1);
+ spin_unlock(&lr->wait.lock);
+ if (!overflow)
+ wake_up_poll(&lr->wait, EPOLLIN);
+ else
+ pr_debug_ratelimited("event FIFO is full - event dropped\n");
+}
+
+static irqreturn_t edge_irq_thread(int irq, void *p)
+{
+ struct line *line = p;
+ struct linereq *lr = line->req;
+ struct gpio_v2_line_event le;
+
+ /* Do not leak kernel stack to userspace */
+ memset(&le, 0, sizeof(le));
+
+ if (line->timestamp_ns) {
+ le.timestamp_ns = line->timestamp_ns;
+ } else {
+ /*
+ * We may be running from a nested threaded interrupt in
+ * which case we didn't get the timestamp from
+ * edge_irq_handler().
+ */
+ le.timestamp_ns = ktime_get_ns();
+ if (lr->num_lines != 1)
+ line->req_seqno = atomic_inc_return(&lr->seqno);
+ }
+ line->timestamp_ns = 0;
+
+ if (line->eflags == (GPIO_V2_LINE_FLAG_EDGE_RISING |
+ GPIO_V2_LINE_FLAG_EDGE_FALLING)) {
+ int level = gpiod_get_value_cansleep(line->desc);
+
+ if (level)
+ /* Emit low-to-high event */
+ le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+ else
+ /* Emit high-to-low event */
+ le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+ } else if (line->eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
+ /* Emit low-to-high event */
+ le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+ } else if (line->eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
+ /* Emit high-to-low event */
+ le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+ } else {
+ return IRQ_NONE;
+ }
+ line->line_seqno++;
+ le.line_seqno = line->line_seqno;
+ le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
+ le.offset = gpio_chip_hwgpio(line->desc);
+
+ linereq_put_event(lr, &le);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t edge_irq_handler(int irq, void *p)
+{
+ struct line *line = p;
+ struct linereq *lr = line->req;
+
+ /*
+ * Just store the timestamp in hardirq context so we get it as
+ * close in time as possible to the actual event.
+ */
+ line->timestamp_ns = ktime_get_ns();
+
+ if (lr->num_lines != 1)
+ line->req_seqno = atomic_inc_return(&lr->seqno);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/*
+ * returns the current debounced logical value.
+ */
+static bool debounced_value(struct line *line)
+{
+ bool value;
+
+ /*
+ * minor race - debouncer may be stopped here, so edge_detector_stop()
+ * must leave the value unchanged so the following will read the level
+ * from when the debouncer was last running.
+ */
+ value = READ_ONCE(line->level);
+
+ if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+ value = !value;
+
+ return value;
+}
+
+static irqreturn_t debounce_irq_handler(int irq, void *p)
+{
+ struct line *line = p;
+
+ mod_delayed_work(system_wq, &line->work,
+ usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
+
+ return IRQ_HANDLED;
+}
+
+static void debounce_work_func(struct work_struct *work)
+{
+ struct gpio_v2_line_event le;
+ struct line *line = container_of(work, struct line, work.work);
+ struct linereq *lr;
+ int level;
+
+ level = gpiod_get_raw_value_cansleep(line->desc);
+ if (level < 0) {
+ pr_debug_ratelimited("debouncer failed to read line value\n");
+ return;
+ }
+
+ if (READ_ONCE(line->level) == level)
+ return;
+
+ WRITE_ONCE(line->level, level);
+
+ /* -- edge detection -- */
+ if (!line->eflags)
+ return;
+
+ /* switch from physical level to logical - if they differ */
+ if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+ level = !level;
+
+ /* ignore edges that are not being monitored */
+ if (((line->eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
+ ((line->eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
+ return;
+
+ /* Do not leak kernel stack to userspace */
+ memset(&le, 0, sizeof(le));
+
+ lr = line->req;
+ le.timestamp_ns = ktime_get_ns();
+ le.offset = gpio_chip_hwgpio(line->desc);
+ line->line_seqno++;
+ le.line_seqno = line->line_seqno;
+ le.seqno = (lr->num_lines == 1) ?
+ le.line_seqno : atomic_inc_return(&lr->seqno);
+
+ if (level)
+ /* Emit low-to-high event */
+ le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+ else
+ /* Emit high-to-low event */
+ le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+
+ linereq_put_event(lr, &le);
+}
+
+static int debounce_setup(struct line *line,
+ unsigned int debounce_period_us)
+{
+ unsigned long irqflags;
+ int ret, level, irq;
+
+ /* try hardware */
+ ret = gpiod_set_debounce(line->desc, debounce_period_us);
+ if (!ret) {
+ WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
+ return ret;
+ }
+ if (ret != -ENOTSUPP)
+ return ret;
+
+ if (debounce_period_us) {
+ /* setup software debounce */
+ level = gpiod_get_raw_value_cansleep(line->desc);
+ if (level < 0)
+ return level;
+
+ irq = gpiod_to_irq(line->desc);
+ if (irq < 0)
+ return -ENXIO;
+
+ WRITE_ONCE(line->level, level);
+ irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
+ ret = request_irq(irq, debounce_irq_handler, irqflags,
+ line->req->label, line);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(line->sw_debounced, 1);
+ line->irq = irq;
+ }
+ return 0;
+}
+
+static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
+ unsigned int line_idx)
+{
+ unsigned int i;
+ u64 mask = BIT_ULL(line_idx);
+
+ for (i = 0; i < lc->num_attrs; i++) {
+ if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
+ (lc->attrs[i].mask & mask))
+ return true;
+ }
+ return false;
+}
+
+static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
+ unsigned int line_idx)
+{
+ unsigned int i;
+ u64 mask = BIT_ULL(line_idx);
+
+ for (i = 0; i < lc->num_attrs; i++) {
+ if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
+ (lc->attrs[i].mask & mask))
+ return lc->attrs[i].attr.debounce_period_us;
+ }
+ return 0;
+}
+
+static void edge_detector_stop(struct line *line)
+{
+ if (line->irq) {
+ free_irq(line->irq, line);
+ line->irq = 0;
+ }
+
+ cancel_delayed_work_sync(&line->work);
+ WRITE_ONCE(line->sw_debounced, 0);
+ line->eflags = 0;
+ /* do not change line->level - see comment in debounced_value() */
+}
+
+static int edge_detector_setup(struct line *line,
+ struct gpio_v2_line_config *lc,
+ unsigned int line_idx,
+ u64 eflags)
+{
+ u32 debounce_period_us;
+ unsigned long irqflags = 0;
+ int irq, ret;
+
+ if (eflags && !kfifo_initialized(&line->req->events)) {
+ ret = kfifo_alloc(&line->req->events,
+ line->req->event_buffer_size, GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
+ line->eflags = eflags;
+ if (gpio_v2_line_config_debounced(lc, line_idx)) {
+ debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
+ ret = debounce_setup(line, debounce_period_us);
+ if (ret)
+ return ret;
+ WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
+ }
+
+ /* detection disabled or sw debouncer will provide edge detection */
+ if (!eflags || READ_ONCE(line->sw_debounced))
+ return 0;
+
+ irq = gpiod_to_irq(line->desc);
+ if (irq < 0)
+ return -ENXIO;
+
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
+ if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
+ irqflags |= IRQF_ONESHOT;
+
+ /* Request a thread to read the events */
+ ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
+ irqflags, line->req->label, line);
+ if (ret)
+ return ret;
+
+ line->irq = irq;
+ return 0;
+}
+
+static int edge_detector_update(struct line *line,
+ struct gpio_v2_line_config *lc,
+ unsigned int line_idx,
+ u64 eflags, bool polarity_change)
+{
+ unsigned int debounce_period_us =
+ gpio_v2_line_config_debounce_period(lc, line_idx);
+
+ if ((line->eflags == eflags) && !polarity_change &&
+ (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
+ return 0;
+
+ /* sw debounced and still will be...*/
+ if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
+ line->eflags = eflags;
+ WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
+ return 0;
+ }
+
+ /* reconfiguring edge detection or sw debounce being disabled */
+ if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
+ (!debounce_period_us && READ_ONCE(line->sw_debounced)))
+ edge_detector_stop(line);
+
+ return edge_detector_setup(line, lc, line_idx, eflags);
+}
+
+static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
+ unsigned int line_idx)
+{
+ unsigned int i;
+ u64 mask = BIT_ULL(line_idx);
+
+ for (i = 0; i < lc->num_attrs; i++) {
+ if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
+ (lc->attrs[i].mask & mask))
+ return lc->attrs[i].attr.flags;
+ }
+ return lc->flags;
+}
+
+static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
+ unsigned int line_idx)
+{
+ unsigned int i;
+ u64 mask = BIT_ULL(line_idx);
+
+ for (i = 0; i < lc->num_attrs; i++) {
+ if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
+ (lc->attrs[i].mask & mask))
+ return !!(lc->attrs[i].attr.values & mask);
+ }
+ return 0;
+}
+
+static int gpio_v2_line_flags_validate(u64 flags)
+{
+ /* Return an error if an unknown flag is set */
+ if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
+ return -EINVAL;
+
+ /*
+ * Do not allow both INPUT and OUTPUT flags to be set as they are
+ * contradictory.
+ */
+ if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
+ (flags & GPIO_V2_LINE_FLAG_OUTPUT))
+ return -EINVAL;
+
+ /* Edge detection requires explicit input. */
+ if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
+ !(flags & GPIO_V2_LINE_FLAG_INPUT))
+ return -EINVAL;
+
+ /*
+ * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
+ * request. If the hardware actually supports enabling both at the
+ * same time the electrical result would be disastrous.
+ */
+ if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
+ (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
+ return -EINVAL;
+
+ /* Drive requires explicit output direction. */
+ if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
+ !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
+ return -EINVAL;
+
+ /* Bias requires explicit direction. */
+ if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
+ !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
+ return -EINVAL;
+
+ /* Only one bias flag can be set. */
+ if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
+ (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
+ GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
+ ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
+ (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
+ unsigned int num_lines)
+{
+ unsigned int i;
+ u64 flags;
+ int ret;
+
+ if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
+ return -EINVAL;
+
+ if (memchr_inv(lc->padding, 0, sizeof(lc->padding)))
+ return -EINVAL;
+
+ for (i = 0; i < num_lines; i++) {
+ flags = gpio_v2_line_config_flags(lc, i);
+ ret = gpio_v2_line_flags_validate(flags);
+ if (ret)
+ return ret;
+
+ /* debounce requires explicit input */
+ if (gpio_v2_line_config_debounced(lc, i) &&
+ !(flags & GPIO_V2_LINE_FLAG_INPUT))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
+ unsigned long *flagsp)
+{
+ assign_bit(FLAG_ACTIVE_LOW, flagsp,
+ flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
+
+ if (flags & GPIO_V2_LINE_FLAG_OUTPUT)
+ set_bit(FLAG_IS_OUT, flagsp);
+ else if (flags & GPIO_V2_LINE_FLAG_INPUT)
+ clear_bit(FLAG_IS_OUT, flagsp);
+
+ assign_bit(FLAG_EDGE_RISING, flagsp,
+ flags & GPIO_V2_LINE_FLAG_EDGE_RISING);
+ assign_bit(FLAG_EDGE_FALLING, flagsp,
+ flags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
+
+ assign_bit(FLAG_OPEN_DRAIN, flagsp,
+ flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
+ assign_bit(FLAG_OPEN_SOURCE, flagsp,
+ flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
+
+ assign_bit(FLAG_PULL_UP, flagsp,
+ flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
+ assign_bit(FLAG_PULL_DOWN, flagsp,
+ flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
+ assign_bit(FLAG_BIAS_DISABLE, flagsp,
+ flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
+}
+
+static long linereq_get_values(struct linereq *lr, void __user *ip)
+{
+ struct gpio_v2_line_values lv;
+ DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
+ struct gpio_desc **descs;
+ unsigned int i, didx, num_get;
+ bool val;
+ int ret;
+
+ /* NOTE: It's ok to read values of output lines. */
+ if (copy_from_user(&lv, ip, sizeof(lv)))
+ return -EFAULT;
+
+ for (num_get = 0, i = 0; i < lr->num_lines; i++) {
+ if (lv.mask & BIT_ULL(i)) {
+ num_get++;
+ descs = &lr->lines[i].desc;
+ }
+ }
+
+ if (num_get == 0)
+ return -EINVAL;
+
+ if (num_get != 1) {
+ descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
+ if (!descs)
+ return -ENOMEM;
+ for (didx = 0, i = 0; i < lr->num_lines; i++) {
+ if (lv.mask & BIT_ULL(i)) {
+ descs[didx] = lr->lines[i].desc;
+ didx++;
+ }
+ }
+ }
+ ret = gpiod_get_array_value_complex(false, true, num_get,
+ descs, NULL, vals);
+
+ if (num_get != 1)
+ kfree(descs);
+ if (ret)
+ return ret;
+
+ lv.bits = 0;
+ for (didx = 0, i = 0; i < lr->num_lines; i++) {
+ if (lv.mask & BIT_ULL(i)) {
+ if (lr->lines[i].sw_debounced)
+ val = debounced_value(&lr->lines[i]);
+ else
+ val = test_bit(didx, vals);
+ if (val)
+ lv.bits |= BIT_ULL(i);
+ didx++;
+ }
+ }
+
+ if (copy_to_user(ip, &lv, sizeof(lv)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long linereq_set_values_unlocked(struct linereq *lr,
+ struct gpio_v2_line_values *lv)
+{
+ DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
+ struct gpio_desc **descs;
+ unsigned int i, didx, num_set;
+ int ret;
+
+ bitmap_zero(vals, GPIO_V2_LINES_MAX);
+ for (num_set = 0, i = 0; i < lr->num_lines; i++) {
+ if (lv->mask & BIT_ULL(i)) {
+ if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
+ return -EPERM;
+ if (lv->bits & BIT_ULL(i))
+ __set_bit(num_set, vals);
+ num_set++;
+ descs = &lr->lines[i].desc;
+ }
+ }
+ if (num_set == 0)
+ return -EINVAL;
+
+ if (num_set != 1) {
+ /* build compacted desc array and values */
+ descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
+ if (!descs)
+ return -ENOMEM;
+ for (didx = 0, i = 0; i < lr->num_lines; i++) {
+ if (lv->mask & BIT_ULL(i)) {
+ descs[didx] = lr->lines[i].desc;
+ didx++;
+ }
+ }
+ }
+ ret = gpiod_set_array_value_complex(false, true, num_set,
+ descs, NULL, vals);
+
+ if (num_set != 1)
+ kfree(descs);
+ return ret;
+}
+
+static long linereq_set_values(struct linereq *lr, void __user *ip)
+{
+ struct gpio_v2_line_values lv;
+ int ret;
+
+ if (copy_from_user(&lv, ip, sizeof(lv)))
+ return -EFAULT;
+
+ mutex_lock(&lr->config_mutex);
+
+ ret = linereq_set_values_unlocked(lr, &lv);
+
+ mutex_unlock(&lr->config_mutex);
+
+ return ret;
+}
+
+static long linereq_set_config_unlocked(struct linereq *lr,
+ struct gpio_v2_line_config *lc)
+{
+ struct gpio_desc *desc;
+ unsigned int i;
+ u64 flags;
+ bool polarity_change;
+ int ret;
+
+ for (i = 0; i < lr->num_lines; i++) {
+ desc = lr->lines[i].desc;
+ flags = gpio_v2_line_config_flags(lc, i);
+ polarity_change =
+ (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) !=
+ ((flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW) != 0));
+
+ gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
+ /*
+ * Lines have to be requested explicitly for input
+ * or output, else the line will be treated "as is".
+ */
+ if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
+ int val = gpio_v2_line_config_output_value(lc, i);
+
+ edge_detector_stop(&lr->lines[i]);
+ ret = gpiod_direction_output(desc, val);
+ if (ret)
+ return ret;
+ } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
+ ret = gpiod_direction_input(desc);
+ if (ret)
+ return ret;
+
+ ret = edge_detector_update(&lr->lines[i], lc, i,
+ flags & GPIO_V2_LINE_EDGE_FLAGS,
+ polarity_change);
+ if (ret)
+ return ret;
+ }
+
+ blocking_notifier_call_chain(&desc->gdev->notifier,
+ GPIO_V2_LINE_CHANGED_CONFIG,
+ desc);
+ }
+ return 0;
+}
+
+static long linereq_set_config(struct linereq *lr, void __user *ip)
+{
+ struct gpio_v2_line_config lc;
+ int ret;
+
+ if (copy_from_user(&lc, ip, sizeof(lc)))
+ return -EFAULT;
+
+ ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
+ if (ret)
+ return ret;
+
+ mutex_lock(&lr->config_mutex);
+
+ ret = linereq_set_config_unlocked(lr, &lc);
+
+ mutex_unlock(&lr->config_mutex);
+
+ return ret;
+}
+
+static long linereq_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct linereq *lr = file->private_data;
+ void __user *ip = (void __user *)arg;
+
+ if (cmd == GPIO_V2_LINE_GET_VALUES_IOCTL)
+ return linereq_get_values(lr, ip);
+ else if (cmd == GPIO_V2_LINE_SET_VALUES_IOCTL)
+ return linereq_set_values(lr, ip);
+ else if (cmd == GPIO_V2_LINE_SET_CONFIG_IOCTL)
+ return linereq_set_config(lr, ip);
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static __poll_t linereq_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct linereq *lr = file->private_data;
+ __poll_t events = 0;
+
+ poll_wait(file, &lr->wait, wait);
+
+ if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
+ &lr->wait.lock))
+ events = EPOLLIN | EPOLLRDNORM;
+
+ return events;
+}
+
+static ssize_t linereq_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *f_ps)
+{
+ struct linereq *lr = file->private_data;
+ struct gpio_v2_line_event le;
+ ssize_t bytes_read = 0;
+ int ret;
+
+ if (count < sizeof(le))
+ return -EINVAL;
+
+ do {
+ spin_lock(&lr->wait.lock);
+ if (kfifo_is_empty(&lr->events)) {
+ if (bytes_read) {
+ spin_unlock(&lr->wait.lock);
+ return bytes_read;
+ }
+
+ if (file->f_flags & O_NONBLOCK) {
+ spin_unlock(&lr->wait.lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_locked(lr->wait,
+ !kfifo_is_empty(&lr->events));
+ if (ret) {
+ spin_unlock(&lr->wait.lock);
+ return ret;
+ }
+ }
+
+ ret = kfifo_out(&lr->events, &le, 1);
+ spin_unlock(&lr->wait.lock);
+ if (ret != 1) {
+ /*
+ * This should never happen - we were holding the
+ * lock from the moment we learned the fifo is no
+ * longer empty until now.
+ */
+ ret = -EIO;
+ break;
+ }
+
+ if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
+ return -EFAULT;
+ bytes_read += sizeof(le);
+ } while (count >= bytes_read + sizeof(le));
+
+ return bytes_read;
+}
+
+static void linereq_free(struct linereq *lr)
+{
+ unsigned int i;
+
+ for (i = 0; i < lr->num_lines; i++) {
+ edge_detector_stop(&lr->lines[i]);
+ if (lr->lines[i].desc)
+ gpiod_free(lr->lines[i].desc);
+ }
+ kfifo_free(&lr->events);
+ kfree(lr->label);
+ put_device(&lr->gdev->dev);
+ kfree(lr);
+}
+
+static int linereq_release(struct inode *inode, struct file *file)
+{
+ struct linereq *lr = file->private_data;
+
+ linereq_free(lr);
+ return 0;
+}
+
+static const struct file_operations line_fileops = {
+ .release = linereq_release,
+ .read = linereq_read,
+ .poll = linereq_poll,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = linereq_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = linereq_ioctl_compat,
+#endif
+};
+
+static int linereq_create(struct gpio_device *gdev, void __user *ip)
+{
+ struct gpio_v2_line_request ulr;
+ struct gpio_v2_line_config *lc;
+ struct linereq *lr;
+ struct file *file;
+ u64 flags;
+ unsigned int i;
+ int fd, ret;
+
+ if (copy_from_user(&ulr, ip, sizeof(ulr)))
+ return -EFAULT;
+
+ if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
+ return -EINVAL;
+
+ if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding)))
+ return -EINVAL;
+
+ lc = &ulr.config;
+ ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
+ if (ret)
+ return ret;
+
+ lr = kzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
+ if (!lr)
+ return -ENOMEM;
+
+ lr->gdev = gdev;
+ get_device(&gdev->dev);
+
+ for (i = 0; i < ulr.num_lines; i++) {
+ lr->lines[i].req = lr;
+ WRITE_ONCE(lr->lines[i].sw_debounced, 0);
+ INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
+ }
+
+ if (ulr.consumer[0] != '\0') {
+ /* label is only initialized if consumer is set */
+ lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
+ GFP_KERNEL);
+ if (!lr->label) {
+ ret = -ENOMEM;
+ goto out_free_linereq;
+ }
+ }
+
+ mutex_init(&lr->config_mutex);
+ init_waitqueue_head(&lr->wait);
+ lr->event_buffer_size = ulr.event_buffer_size;
+ if (lr->event_buffer_size == 0)
+ lr->event_buffer_size = ulr.num_lines * 16;
+ else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
+ lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
+
+ atomic_set(&lr->seqno, 0);
+ lr->num_lines = ulr.num_lines;
+
+ /* Request each GPIO */
+ for (i = 0; i < ulr.num_lines; i++) {
+ u32 offset = ulr.offsets[i];
+ struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
+
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
+ goto out_free_linereq;
+ }
+
+ ret = gpiod_request(desc, lr->label);
+ if (ret)
+ goto out_free_linereq;
+
+ lr->lines[i].desc = desc;
+ flags = gpio_v2_line_config_flags(lc, i);
+ gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
+
+ ret = gpiod_set_transitory(desc, false);
+ if (ret < 0)
+ goto out_free_linereq;
+
+ /*
+ * Lines have to be requested explicitly for input
+ * or output, else the line will be treated "as is".
+ */
+ if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
+ int val = gpio_v2_line_config_output_value(lc, i);
+
+ ret = gpiod_direction_output(desc, val);
+ if (ret)
+ goto out_free_linereq;
+ } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
+ ret = gpiod_direction_input(desc);
+ if (ret)
+ goto out_free_linereq;
+
+ ret = edge_detector_setup(&lr->lines[i], lc, i,
+ flags & GPIO_V2_LINE_EDGE_FLAGS);
+ if (ret)
+ goto out_free_linereq;
+ }
+
+ blocking_notifier_call_chain(&desc->gdev->notifier,
+ GPIO_V2_LINE_CHANGED_REQUESTED, desc);
+
+ dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
+ offset);
+ }
+
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto out_free_linereq;
+ }
+
+ file = anon_inode_getfile("gpio-line", &line_fileops, lr,
+ O_RDONLY | O_CLOEXEC);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_put_unused_fd;
+ }
+
+ ulr.fd = fd;
+ if (copy_to_user(ip, &ulr, sizeof(ulr))) {
+ /*
+ * fput() will trigger the release() callback, so do not go onto
+ * the regular error cleanup path here.
+ */
+ fput(file);
+ put_unused_fd(fd);
+ return -EFAULT;
+ }
+
+ fd_install(fd, file);
+
+ dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
+ lr->num_lines);
+
+ return 0;
+
+out_put_unused_fd:
+ put_unused_fd(fd);
+out_free_linereq:
+ linereq_free(lr);
+ return ret;
+}
+
+#ifdef CONFIG_GPIO_CDEV_V1
/*
* GPIO line event management
@@ -680,11 +1736,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
le->gdev = gdev;
get_device(&gdev->dev);
- /* Make sure this is terminated */
- eventreq.consumer_label[sizeof(eventreq.consumer_label)-1] = '\0';
- if (strlen(eventreq.consumer_label)) {
- le->label = kstrdup(eventreq.consumer_label,
- GFP_KERNEL);
+ if (eventreq.consumer_label[0] != '\0') {
+ /* label is only initialized if consumer_label is set */
+ le->label = kstrndup(eventreq.consumer_label,
+ sizeof(eventreq.consumer_label) - 1,
+ GFP_KERNEL);
if (!le->label) {
ret = -ENOMEM;
goto out_free_le;
@@ -704,7 +1760,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
goto out_free_le;
blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_REQUESTED, desc);
+ GPIO_V2_LINE_CHANGED_REQUESTED, desc);
irq = gpiod_to_irq(desc);
if (irq <= 0) {
@@ -771,12 +1827,60 @@ out_free_le:
return ret;
}
+static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
+ struct gpioline_info *info_v1)
+{
+ u64 flagsv2 = info_v2->flags;
+
+ memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
+ memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
+ info_v1->line_offset = info_v2->offset;
+ info_v1->flags = 0;
+
+ if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
+ info_v1->flags |= GPIOLINE_FLAG_KERNEL;
+
+ if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
+ info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
+
+ if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
+ info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
+
+ if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
+ info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
+ if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
+ info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
+
+ if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
+ info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
+ if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
+ info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
+ if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
+ info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
+}
+
+static void gpio_v2_line_info_changed_to_v1(
+ struct gpio_v2_line_info_changed *lic_v2,
+ struct gpioline_info_changed *lic_v1)
+{
+ gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
+ lic_v1->timestamp = lic_v2->timestamp_ns;
+ lic_v1->event_type = lic_v2->event_type;
+}
+
+#endif /* CONFIG_GPIO_CDEV_V1 */
+
static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
- struct gpioline_info *info)
+ struct gpio_v2_line_info *info)
{
struct gpio_chip *gc = desc->gdev->chip;
bool ok_for_pinctrl;
unsigned long flags;
+ u32 debounce_period_us;
+ unsigned int num_attrs = 0;
+
+ memset(info, 0, sizeof(*info));
+ info->offset = gpio_chip_hwgpio(desc);
/*
* This function takes a mutex so we must check this before taking
@@ -786,23 +1890,15 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
* lock common to both frameworks?
*/
ok_for_pinctrl =
- pinctrl_gpio_can_use_line(gc->base + info->line_offset);
+ pinctrl_gpio_can_use_line(gc->base + info->offset);
spin_lock_irqsave(&gpio_lock, flags);
- if (desc->name) {
- strncpy(info->name, desc->name, sizeof(info->name));
- info->name[sizeof(info->name) - 1] = '\0';
- } else {
- info->name[0] = '\0';
- }
+ if (desc->name)
+ strscpy(info->name, desc->name, sizeof(info->name));
- if (desc->label) {
- strncpy(info->consumer, desc->label, sizeof(info->consumer));
- info->consumer[sizeof(info->consumer) - 1] = '\0';
- } else {
- info->consumer[0] = '\0';
- }
+ if (desc->label)
+ strscpy(info->consumer, desc->label, sizeof(info->consumer));
/*
* Userspace only need to know that the kernel is using this GPIO so
@@ -815,23 +1911,40 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
test_bit(FLAG_EXPORT, &desc->flags) ||
test_bit(FLAG_SYSFS, &desc->flags) ||
!ok_for_pinctrl)
- info->flags |= GPIOLINE_FLAG_KERNEL;
+ info->flags |= GPIO_V2_LINE_FLAG_USED;
+
if (test_bit(FLAG_IS_OUT, &desc->flags))
- info->flags |= GPIOLINE_FLAG_IS_OUT;
+ info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
+ else
+ info->flags |= GPIO_V2_LINE_FLAG_INPUT;
+
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- info->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
+ info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
+
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- info->flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
- GPIOLINE_FLAG_IS_OUT);
+ info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- info->flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
- GPIOLINE_FLAG_IS_OUT);
+ info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
+
if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
- info->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
+ info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
if (test_bit(FLAG_PULL_DOWN, &desc->flags))
- info->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
+ info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
if (test_bit(FLAG_PULL_UP, &desc->flags))
- info->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
+ info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
+
+ if (test_bit(FLAG_EDGE_RISING, &desc->flags))
+ info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
+ if (test_bit(FLAG_EDGE_FALLING, &desc->flags))
+ info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
+
+ debounce_period_us = READ_ONCE(desc->debounce_period_us);
+ if (debounce_period_us) {
+ info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
+ info->attrs[num_attrs].debounce_period_us = debounce_period_us;
+ num_attrs++;
+ }
+ info->num_attrs = num_attrs;
spin_unlock_irqrestore(&gpio_lock, flags);
}
@@ -839,11 +1952,65 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
struct gpio_chardev_data {
struct gpio_device *gdev;
wait_queue_head_t wait;
- DECLARE_KFIFO(events, struct gpioline_info_changed, 32);
+ DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
struct notifier_block lineinfo_changed_nb;
unsigned long *watched_lines;
+#ifdef CONFIG_GPIO_CDEV_V1
+ atomic_t watch_abi_version;
+#endif
};
+#ifdef CONFIG_GPIO_CDEV_V1
+/*
+ * returns 0 if the versions match, else the previously selected ABI version
+ */
+static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
+ unsigned int version)
+{
+ int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
+
+ if (abiv == version)
+ return 0;
+
+ return abiv;
+}
+#endif
+
+static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
+ bool watch)
+{
+ struct gpio_desc *desc;
+ struct gpio_v2_line_info lineinfo;
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+
+ if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding)))
+ return -EINVAL;
+
+ desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (watch) {
+#ifdef CONFIG_GPIO_CDEV_V1
+ if (lineinfo_ensure_abi_version(cdev, 2))
+ return -EPERM;
+#endif
+ if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
+ return -EBUSY;
+ }
+ gpio_desc_to_lineinfo(desc, &lineinfo);
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+ if (watch)
+ clear_bit(lineinfo.offset, cdev->watched_lines);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
@@ -853,7 +2020,6 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct gpio_device *gdev = cdev->gdev;
struct gpio_chip *gc = gdev->chip;
void __user *ip = (void __user *)arg;
- struct gpio_desc *desc;
__u32 offset;
/* We fail any subsequent ioctl():s when the chip is gone */
@@ -866,18 +2032,19 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
memset(&chipinfo, 0, sizeof(chipinfo));
- strncpy(chipinfo.name, dev_name(&gdev->dev),
+ strscpy(chipinfo.name, dev_name(&gdev->dev),
sizeof(chipinfo.name));
- chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
- strncpy(chipinfo.label, gdev->label,
+ strscpy(chipinfo.label, gdev->label,
sizeof(chipinfo.label));
- chipinfo.label[sizeof(chipinfo.label)-1] = '\0';
chipinfo.lines = gdev->ngpio;
if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
return -EFAULT;
return 0;
+#ifdef CONFIG_GPIO_CDEV_V1
} else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
+ struct gpio_desc *desc;
struct gpioline_info lineinfo;
+ struct gpio_v2_line_info lineinfo_v2;
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
@@ -887,7 +2054,8 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (IS_ERR(desc))
return PTR_ERR(desc);
- gpio_desc_to_lineinfo(desc, &lineinfo);
+ gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+ gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
@@ -897,7 +2065,9 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
return lineevent_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ struct gpio_desc *desc;
struct gpioline_info lineinfo;
+ struct gpio_v2_line_info lineinfo_v2;
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
@@ -907,10 +2077,14 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (IS_ERR(desc))
return PTR_ERR(desc);
+ if (lineinfo_ensure_abi_version(cdev, 1))
+ return -EPERM;
+
if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
return -EBUSY;
- gpio_desc_to_lineinfo(desc, &lineinfo);
+ gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+ gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
clear_bit(lineinfo.line_offset, cdev->watched_lines);
@@ -918,6 +2092,13 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
return 0;
+#endif /* CONFIG_GPIO_CDEV_V1 */
+ } else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL ||
+ cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) {
+ return lineinfo_get(cdev, ip,
+ cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL);
+ } else if (cmd == GPIO_V2_GET_LINE_IOCTL) {
+ return linereq_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
if (copy_from_user(&offset, ip, sizeof(offset)))
return -EFAULT;
@@ -951,7 +2132,7 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct gpio_chardev_data *cdev = to_gpio_chardev_data(nb);
- struct gpioline_info_changed chg;
+ struct gpio_v2_line_info_changed chg;
struct gpio_desc *desc = data;
int ret;
@@ -959,9 +2140,8 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
return NOTIFY_DONE;
memset(&chg, 0, sizeof(chg));
- chg.info.line_offset = gpio_chip_hwgpio(desc);
chg.event_type = action;
- chg.timestamp = ktime_get_ns();
+ chg.timestamp_ns = ktime_get_ns();
gpio_desc_to_lineinfo(desc, &chg.info);
ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
@@ -992,12 +2172,16 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
size_t count, loff_t *off)
{
struct gpio_chardev_data *cdev = file->private_data;
- struct gpioline_info_changed event;
+ struct gpio_v2_line_info_changed event;
ssize_t bytes_read = 0;
int ret;
+ size_t event_size;
- if (count < sizeof(event))
+#ifndef CONFIG_GPIO_CDEV_V1
+ event_size = sizeof(struct gpio_v2_line_info_changed);
+ if (count < event_size)
return -EINVAL;
+#endif
do {
spin_lock(&cdev->wait.lock);
@@ -1019,7 +2203,17 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
return ret;
}
}
-
+#ifdef CONFIG_GPIO_CDEV_V1
+ /* must be after kfifo check so watch_abi_version is set */
+ if (atomic_read(&cdev->watch_abi_version) == 2)
+ event_size = sizeof(struct gpio_v2_line_info_changed);
+ else
+ event_size = sizeof(struct gpioline_info_changed);
+ if (count < event_size) {
+ spin_unlock(&cdev->wait.lock);
+ return -EINVAL;
+ }
+#endif
ret = kfifo_out(&cdev->events, &event, 1);
spin_unlock(&cdev->wait.lock);
if (ret != 1) {
@@ -1028,9 +2222,23 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
/* We should never get here. See lineevent_read(). */
}
- if (copy_to_user(buf + bytes_read, &event, sizeof(event)))
+#ifdef CONFIG_GPIO_CDEV_V1
+ if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
+ if (copy_to_user(buf + bytes_read, &event, event_size))
+ return -EFAULT;
+ } else {
+ struct gpioline_info_changed event_v1;
+
+ gpio_v2_line_info_changed_to_v1(&event, &event_v1);
+ if (copy_to_user(buf + bytes_read, &event_v1,
+ event_size))
+ return -EFAULT;
+ }
+#else
+ if (copy_to_user(buf + bytes_read, &event, event_size))
return -EFAULT;
- bytes_read += sizeof(event);
+#endif
+ bytes_read += event_size;
} while (count >= bytes_read + sizeof(event));
return bytes_read;
diff --git a/drivers/gpio/gpiolib-cdev.h b/drivers/gpio/gpiolib-cdev.h
index 973578e7ad10..b42644cbffb8 100644
--- a/drivers/gpio/gpiolib-cdev.h
+++ b/drivers/gpio/gpiolib-cdev.h
@@ -3,7 +3,9 @@
#ifndef GPIOLIB_CDEV_H
#define GPIOLIB_CDEV_H
-#include <linux/device.h>
+#include <linux/types.h>
+
+struct gpio_device;
int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt);
void gpiolib_cdev_unregister(struct gpio_device *gdev);
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
deleted file mode 100644
index 26741032fa9e..000000000000
--- a/drivers/gpio/gpiolib-devprop.c
+++ /dev/null
@@ -1,63 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Device property helpers for GPIO chips.
- *
- * Copyright (C) 2016, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#include <linux/property.h>
-#include <linux/slab.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/driver.h>
-#include <linux/export.h>
-
-#include "gpiolib.h"
-
-/**
- * devprop_gpiochip_set_names - Set GPIO line names using device properties
- * @chip: GPIO chip whose lines should be named, if possible
- * @fwnode: Property Node containing the gpio-line-names property
- *
- * Looks for device property "gpio-line-names" and if it exists assigns
- * GPIO line names for the chip. The memory allocated for the assigned
- * names belong to the underlying firmware node and should not be released
- * by the caller.
- */
-void devprop_gpiochip_set_names(struct gpio_chip *chip,
- const struct fwnode_handle *fwnode)
-{
- struct gpio_device *gdev = chip->gpiodev;
- const char **names;
- int ret, i;
- int count;
-
- count = fwnode_property_read_string_array(fwnode, "gpio-line-names",
- NULL, 0);
- if (count < 0)
- return;
-
- if (count > gdev->ngpio) {
- dev_warn(&gdev->dev, "gpio-line-names is length %d but should be at most length %d",
- count, gdev->ngpio);
- count = gdev->ngpio;
- }
-
- names = kcalloc(count, sizeof(*names), GFP_KERNEL);
- if (!names)
- return;
-
- ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
- names, count);
- if (ret < 0) {
- dev_warn(&gdev->dev, "failed to read GPIO line names\n");
- kfree(names);
- return;
- }
-
- for (i = 0; i < count; i++)
- gdev->descs[i].name = names[i];
-
- kfree(names);
-}
-EXPORT_SYMBOL_GPL(devprop_gpiochip_set_names);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index bd31dd3b6a75..2f895a2b8411 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -1026,11 +1026,6 @@ int of_gpiochip_add(struct gpio_chip *chip)
if (ret)
return ret;
- /* If the chip defines names itself, these take precedence */
- if (!chip->names)
- devprop_gpiochip_set_names(chip,
- of_fwnode_handle(chip->of_node));
-
of_node_get(chip->of_node);
ret = of_gpiochip_scan_gpios(chip);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 80137c1b3cdc..089ddcaa9bc6 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -340,9 +340,6 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
struct gpio_device *gdev = gc->gpiodev;
int i;
- if (!gc->names)
- return 0;
-
/* First check all names if they are unique */
for (i = 0; i != gc->ngpio; ++i) {
struct gpio_desc *gpio;
@@ -361,6 +358,57 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
return 0;
}
+/*
+ * devprop_gpiochip_set_names - Set GPIO line names using device properties
+ * @chip: GPIO chip whose lines should be named, if possible
+ *
+ * Looks for device property "gpio-line-names" and if it exists assigns
+ * GPIO line names for the chip. The memory allocated for the assigned
+ * names belong to the underlying software node and should not be released
+ * by the caller.
+ */
+static int devprop_gpiochip_set_names(struct gpio_chip *chip)
+{
+ struct gpio_device *gdev = chip->gpiodev;
+ struct device *dev = chip->parent;
+ const char **names;
+ int ret, i;
+ int count;
+
+ /* GPIO chip may not have a parent device whose properties we inspect. */
+ if (!dev)
+ return 0;
+
+ count = device_property_string_array_count(dev, "gpio-line-names");
+ if (count < 0)
+ return 0;
+
+ if (count > gdev->ngpio) {
+ dev_warn(&gdev->dev, "gpio-line-names is length %d but should be at most length %d",
+ count, gdev->ngpio);
+ count = gdev->ngpio;
+ }
+
+ names = kcalloc(count, sizeof(*names), GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ ret = device_property_read_string_array(dev, "gpio-line-names",
+ names, count);
+ if (ret < 0) {
+ dev_warn(&gdev->dev, "failed to read GPIO line names\n");
+ kfree(names);
+ return ret;
+ }
+
+ for (i = 0; i < count; i++)
+ gdev->descs[i].name = names[i];
+
+ kfree(names);
+
+ return 0;
+}
+
static unsigned long *gpiochip_allocate_mask(struct gpio_chip *gc)
{
unsigned long *p;
@@ -426,17 +474,29 @@ static void gpiodevice_release(struct device *dev)
struct gpio_device *gdev = dev_get_drvdata(dev);
list_del(&gdev->list);
- ida_simple_remove(&gpio_ida, gdev->id);
+ ida_free(&gpio_ida, gdev->id);
kfree_const(gdev->label);
kfree(gdev->descs);
kfree(gdev);
}
+#ifdef CONFIG_GPIO_CDEV
+#define gcdev_register(gdev, devt) gpiolib_cdev_register((gdev), (devt))
+#define gcdev_unregister(gdev) gpiolib_cdev_unregister((gdev))
+#else
+/*
+ * gpiolib_cdev_register() indirectly calls device_add(), which is still
+ * required even when cdev is not selected.
+ */
+#define gcdev_register(gdev, devt) device_add(&(gdev)->dev)
+#define gcdev_unregister(gdev) device_del(&(gdev)->dev)
+#endif
+
static int gpiochip_setup_dev(struct gpio_device *gdev)
{
int ret;
- ret = gpiolib_cdev_register(gdev, gpio_devt);
+ ret = gcdev_register(gdev, gpio_devt);
if (ret)
return ret;
@@ -452,7 +512,7 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
return 0;
err_remove_device:
- gpiolib_cdev_unregister(gdev);
+ gcdev_unregister(gdev);
return ret;
}
@@ -537,7 +597,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gc->of_node = gdev->dev.of_node;
#endif
- gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL);
+ gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
if (gdev->id < 0) {
ret = gdev->id;
goto err_free_gdev;
@@ -621,7 +681,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
- ret = gpiochip_set_desc_names(gc);
+ if (gc->names)
+ ret = gpiochip_set_desc_names(gc);
+ else
+ ret = devprop_gpiochip_set_names(gc);
if (ret)
goto err_remove_from_list;
@@ -705,7 +768,7 @@ err_free_label:
err_free_descs:
kfree(gdev->descs);
err_free_ida:
- ida_simple_remove(&gpio_ida, gdev->id);
+ ida_free(&gpio_ida, gdev->id);
err_free_gdev:
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
@@ -774,7 +837,7 @@ void gpiochip_remove(struct gpio_chip *gc)
* be removed, else it will be dangling until the last user is
* gone.
*/
- gpiolib_cdev_unregister(gdev);
+ gcdev_unregister(gdev);
put_device(&gdev->dev);
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
@@ -2041,10 +2104,15 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
clear_bit(FLAG_PULL_UP, &desc->flags);
clear_bit(FLAG_PULL_DOWN, &desc->flags);
clear_bit(FLAG_BIAS_DISABLE, &desc->flags);
+ clear_bit(FLAG_EDGE_RISING, &desc->flags);
+ clear_bit(FLAG_EDGE_FALLING, &desc->flags);
clear_bit(FLAG_IS_HOGGED, &desc->flags);
#ifdef CONFIG_OF_DYNAMIC
desc->hog = NULL;
#endif
+#ifdef CONFIG_GPIO_CDEV
+ WRITE_ONCE(desc->debounce_period_us, 0);
+#endif
ret = true;
}
@@ -4402,31 +4470,18 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
return 0;
}
-static const struct seq_operations gpiolib_seq_ops = {
+static const struct seq_operations gpiolib_sops = {
.start = gpiolib_seq_start,
.next = gpiolib_seq_next,
.stop = gpiolib_seq_stop,
.show = gpiolib_seq_show,
};
-
-static int gpiolib_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &gpiolib_seq_ops);
-}
-
-static const struct file_operations gpiolib_operations = {
- .owner = THIS_MODULE,
- .open = gpiolib_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(gpiolib);
static int __init gpiolib_debugfs_init(void)
{
/* /sys/kernel/debug/gpio */
- debugfs_create_file("gpio", S_IFREG | S_IRUGO, NULL, NULL,
- &gpiolib_operations);
+ debugfs_create_file("gpio", 0444, NULL, NULL, &gpiolib_fops);
return 0;
}
subsys_initcall(gpiolib_debugfs_init);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 6709f79c02dd..b674b5bb980e 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -114,6 +114,8 @@ struct gpio_desc {
#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */
#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
+#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
+#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
/* Connection label */
const char *label;
@@ -122,6 +124,10 @@ struct gpio_desc {
#ifdef CONFIG_OF_DYNAMIC
struct device_node *hog;
#endif
+#ifdef CONFIG_GPIO_CDEV
+ /* debounce period in microseconds */
+ unsigned int debounce_period_us;
+#endif
};
int gpiod_request(struct gpio_desc *desc, const char *label);
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2f31579f91d4..81569009f884 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -100,7 +100,7 @@ obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STM) += stm/
obj-$(CONFIG_DRM_STI) += sti/
-obj-$(CONFIG_DRM_IMX) += imx/
+obj-y += imx/
obj-$(CONFIG_DRM_INGENIC) += ingenic/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 403ec3db29df..39976c7b100c 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -30,7 +30,7 @@ FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
- -I$(FULL_AMD_PATH)/powerplay/inc \
+ -I$(FULL_AMD_PATH)/pm/inc \
-I$(FULL_AMD_PATH)/acp/include \
-I$(FULL_AMD_DISPLAY_PATH) \
-I$(FULL_AMD_DISPLAY_PATH)/include \
@@ -47,7 +47,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
- amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
+ atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
@@ -55,15 +55,15 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
- amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o
+ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
# add asic specific block
-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \
dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
-amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \
uvd_v3_1.o
amdgpu-y += \
@@ -85,7 +85,7 @@ amdgpu-y += \
# add UMC block
amdgpu-y += \
- umc_v6_1.o umc_v6_0.o
+ umc_v6_1.o umc_v6_0.o umc_v8_7.o
# add IH block
amdgpu-y += \
@@ -105,10 +105,6 @@ amdgpu-y += \
psp_v11_0.o \
psp_v12_0.o
-# add SMC block
-amdgpu-y += \
- amdgpu_dpm.o
-
# add DCE block
amdgpu-y += \
dce_v10_0.o \
@@ -212,7 +208,7 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o
-include $(FULL_AMD_PATH)/powerplay/Makefile
+include $(FULL_AMD_PATH)/pm/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 327a0daf4a1d..87f095dc385c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -49,6 +49,8 @@
#include <linux/rbtree.h>
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -102,6 +104,7 @@
#include "amdgpu_mes.h"
#include "amdgpu_umc.h"
#include "amdgpu_mmhub.h"
+#include "amdgpu_gfxhub.h"
#include "amdgpu_df.h"
#define MAX_GPU_INSTANCE 16
@@ -178,6 +181,7 @@ extern uint amdgpu_dm_abm_level;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
+extern int amdgpu_bad_page_threshold;
extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
@@ -187,9 +191,11 @@ extern int amdgpu_force_asic_type;
#ifdef CONFIG_HSA_AMD
extern int sched_policy;
extern bool debug_evictions;
+extern bool no_system_mem_limit;
#else
static const int sched_policy = KFD_SCHED_POLICY_HWS;
static const bool debug_evictions; /* = false */
+static const bool no_system_mem_limit;
#endif
extern int amdgpu_tmz;
@@ -201,6 +207,7 @@ extern int amdgpu_si_support;
#ifdef CONFIG_DRM_AMDGPU_CIK
extern int amdgpu_cik_support;
#endif
+extern int amdgpu_num_kcq;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
@@ -212,6 +219,8 @@ extern int amdgpu_cik_support;
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 16
+#define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */
+
/* hard reset data */
#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
@@ -245,6 +254,7 @@ struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
struct amdgpu_atif;
struct kfd_vm_fault_info;
+struct amdgpu_hive_info;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
@@ -611,6 +621,8 @@ struct amdgpu_asic_funcs {
uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
/* device supports BACO */
bool (*supports_baco)(struct amdgpu_device *adev);
+ /* pre asic_init quirks */
+ void (*pre_asic_init)(struct amdgpu_device *adev);
};
/*
@@ -648,16 +660,6 @@ struct amdgpu_atcs {
};
/*
- * Firmware VRAM reservation
- */
-struct amdgpu_fw_vram_usage {
- u64 start_offset;
- u64 size;
- struct amdgpu_bo *reserved_bo;
- void *va;
-};
-
-/*
* CGS
*/
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
@@ -725,13 +727,13 @@ struct amd_powerplay {
#define AMDGPU_MAX_DF_PERFMONS 4
struct amdgpu_device {
struct device *dev;
- struct drm_device *ddev;
struct pci_dev *pdev;
+ struct drm_device ddev;
#ifdef CONFIG_DRM_AMD_ACP
struct amdgpu_acp acp;
#endif
-
+ struct amdgpu_hive_info *hive;
/* ASIC */
enum amd_asic_type asic_type;
uint32_t family;
@@ -765,7 +767,6 @@ struct amdgpu_device {
bool is_atom_fw;
uint8_t *bios;
uint32_t bios_size;
- struct amdgpu_bo *stolen_vga_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -881,6 +882,9 @@ struct amdgpu_device {
/* mmhub */
struct amdgpu_mmhub mmhub;
+ /* gfxhub */
+ struct amdgpu_gfxhub gfxhub;
+
/* gfx */
struct amdgpu_gfx gfx;
@@ -917,11 +921,6 @@ struct amdgpu_device {
/* display related functionality */
struct amdgpu_display_manager dm;
- /* discovery */
- uint8_t *discovery_bin;
- uint32_t discovery_tmr_size;
- struct amdgpu_bo *discovery_memory;
-
/* mes */
bool enable_mes;
struct amdgpu_mes mes;
@@ -946,8 +945,6 @@ struct amdgpu_device {
struct delayed_work delayed_init_work;
struct amdgpu_virt virt;
- /* firmware VRAM reservation */
- struct amdgpu_fw_vram_usage fw_vram_usage;
/* link all shadow bo */
struct list_head shadow_list;
@@ -961,9 +958,9 @@ struct amdgpu_device {
bool in_suspend;
bool in_hibernate;
- bool in_gpu_reset;
+ atomic_t in_gpu_reset;
enum pp_mp1_state mp1_state;
- struct mutex lock_reset;
+ struct rw_semaphore reset_sem;
struct amdgpu_doorbell_index doorbell_index;
struct mutex notifier_lock;
@@ -995,34 +992,60 @@ struct amdgpu_device {
atomic_t throttling_logging_enabled;
struct ratelimit_state throttling_logging_rs;
+ uint32_t ras_features;
+
+ bool in_pci_err_recovery;
+ struct pci_saved_state *pci_state;
};
+static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
+{
+ return container_of(ddev, struct amdgpu_device, ddev);
+}
+
+static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
+{
+ return &adev->ddev;
+}
+
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
{
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
int amdgpu_device_init(struct amdgpu_device *adev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
uint32_t flags);
void amdgpu_device_fini(struct amdgpu_device *adev);
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write);
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t acc_flags);
+void amdgpu_device_wreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v,
uint32_t acc_flags);
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags);
-void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags);
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
+u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr);
+u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr);
+void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr, u32 reg_data);
+void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr, u64 reg_data);
+
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
@@ -1033,8 +1056,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
*/
#define AMDGPU_REGS_NO_KIQ (1<<1)
-#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
-#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
+#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
@@ -1042,9 +1065,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
-#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
-#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
+#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
+#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
@@ -1090,7 +1113,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
WREG32_SMC(_Reg, tmp); \
} while (0)
-#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
+#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
@@ -1141,10 +1164,12 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
+#define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
/* Common functions */
+bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job* job);
@@ -1194,7 +1219,7 @@ static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl;
-int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
+int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
@@ -1258,6 +1283,15 @@ static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
+pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
+pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
+void amdgpu_pci_resume(struct pci_dev *pdev);
+
+bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
+bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
+
#include "amdgpu_object.h"
/* used by df_v3_6.c and amdgpu_pmu.c */
@@ -1278,4 +1312,8 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
return adev->gmc.tmz_enabled;
}
+static inline int amdgpu_in_reset(struct amdgpu_device *adev)
+{
+ return atomic_read(&adev->in_gpu_reset);
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 12247a32f9ef..d3e51d361179 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -136,9 +136,7 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
* 2. power off the acp tiles
* 3. check and enter ulv state
*/
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
}
return 0;
}
@@ -157,8 +155,7 @@ static int acp_poweron(struct generic_pm_domain *genpd)
* 2. turn on acp clock
* 3. power on acp tiles
*/
- if (adev->powerplay.pp_funcs->set_powergating_by_smu)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
}
return 0;
}
@@ -529,9 +526,7 @@ static int acp_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 913c8f0513bd..165b02e267b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -463,11 +463,11 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if (adev->flags & AMD_IS_PX) {
- pm_runtime_get_sync(adev->ddev->dev);
+ pm_runtime_get_sync(adev_to_drm(adev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(adev->ddev);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ drm_helper_hpd_irq_event(adev_to_drm(adev));
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
}
/* TODO: check other events */
@@ -806,8 +806,8 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
adev->atif = atif;
- if (atif->notifications.brightness_change) {
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+ if (atif->notifications.brightness_change) {
if (amdgpu_device_has_dc_support(adev)) {
#if defined(CONFIG_DRM_AMD_DC)
struct amdgpu_display_manager *dm = &adev->dm;
@@ -817,7 +817,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
struct drm_encoder *tmp;
/* Find the encoder controlling the brightness */
- list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list,
+ list_for_each_entry(tmp, &adev_to_drm(adev)->mode_config.encoder_list,
head) {
struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1b865fed74ca..0544460653b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -36,6 +36,8 @@
*/
uint64_t amdgpu_amdkfd_total_mem_size;
+static bool kfd_initialized;
+
int amdgpu_amdkfd_init(void)
{
struct sysinfo si;
@@ -51,19 +53,26 @@ int amdgpu_amdkfd_init(void)
#else
ret = -ENOENT;
#endif
+ kfd_initialized = !ret;
return ret;
}
void amdgpu_amdkfd_fini(void)
{
- kgd2kfd_exit();
+ if (kfd_initialized) {
+ kgd2kfd_exit();
+ kfd_initialized = false;
+ }
}
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
{
bool vf = amdgpu_sriov_vf(adev);
+ if (!kfd_initialized)
+ return;
+
adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
adev->pdev, adev->asic_type, vf);
@@ -119,7 +128,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
.gpuvm_size = min(adev->vm_manager.max_pfn
<< AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GMC_HOLE_START),
- .drm_render_minor = adev->ddev->render->index,
+ .drm_render_minor = adev_to_drm(adev)->render->index,
.sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
};
@@ -160,7 +169,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->doorbell_index.last_non_cp;
}
- kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources);
+ kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources);
}
}
@@ -479,11 +488,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
goto out_put;
obj = dma_buf->priv;
- if (obj->dev->driver != adev->ddev->driver)
+ if (obj->dev->driver != adev_to_drm(adev)->driver)
/* Can't handle buffers from different drivers */
goto out_put;
- adev = obj->dev->dev_private;
+ adev = drm_to_adev(obj->dev);
bo = gem_to_amdgpu_bo(obj);
if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT)))
@@ -517,8 +526,9 @@ out_put:
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ return amdgpu_vram_mgr_usage(vram_man);
}
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
@@ -571,6 +581,13 @@ uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
return adev->rev_id;
}
+int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->gmc.noretry;
+}
+
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len)
@@ -612,6 +629,7 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
job->vmid = vmid;
ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
+
if (ret) {
DRM_ERROR("amdgpu: failed to schedule IB.\n");
goto err_ib_sched;
@@ -755,4 +773,8 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
{
}
+
+void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index ffe149aafc39..ea391ca7f2f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -181,6 +181,7 @@ uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
+int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
/* Read user wptr from a specified user address space with page fault
@@ -207,11 +208,11 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
})
/* GPUVM API */
-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
+int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
- struct file *filp, unsigned int pasid,
+ struct file *filp, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef);
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
@@ -270,5 +271,6 @@ int kgd2kfd_resume_mm(struct mm_struct *mm);
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
struct dma_fence *fence);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
+void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 35d4a5ab0228..1afa8f122e7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -283,22 +283,6 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base)
-{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
- pr_err("trying to set page table base for wrong VMID %u\n",
- vmid);
- return;
- }
-
- mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
-
- gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
-}
-
const struct kfd2kgd_calls arcturus_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -317,7 +301,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
- .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
- .get_hive_id = amdgpu_amdkfd_get_hive_id,
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+ .set_vm_context_page_table_base =
+ kgd_gfx_v9_set_vm_context_page_table_base,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index bf927f432506..4763bab7a4d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -32,7 +32,6 @@
#include "v10_structs.h"
#include "nv.h"
#include "nvd.h"
-#include "gfxhub_v2_0.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -105,7 +104,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
unlock_srbm(kgd);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
unsigned int vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -542,7 +541,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp;
struct v10_compute_mqd *m = get_mqd(mqd);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
#if 0
@@ -753,7 +752,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
}
/* SDMA is on gfxhub as well for Navi1* series */
- gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
@@ -776,6 +775,4 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .get_hive_id = amdgpu_amdkfd_get_hive_id,
- .get_unique_id = amdgpu_amdkfd_get_unique_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index cdea1338c8dc..50016bf9c427 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -31,7 +31,6 @@
#include "v10_structs.h"
#include "nv.h"
#include "nvd.h"
-#include "gfxhub_v2_1.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -657,7 +656,7 @@ static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t v
struct amdgpu_device *adev = get_amdgpu_device(kgd);
/* SDMA is on gfxhub as well for Navi1* series */
- gfxhub_v2_1_setup_vm_pt_regs(adev, vmid, page_table_base);
+ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
#if 0
@@ -822,7 +821,6 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.address_watch_get_offset = address_watch_get_offset_v10_3,
.get_atc_vmid_pasid_mapping_info = NULL,
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
- .get_hive_id = amdgpu_amdkfd_get_hive_id,
#if 0
.enable_debug_trap = enable_debug_trap_v10_3,
.disable_debug_trap = disable_debug_trap_v10_3,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 744366c7ee85..b91d27e39bad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -139,7 +139,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
unlock_srbm(kgd);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
unsigned int vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -423,7 +423,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
unsigned long flags, end_jiffies;
int retry;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
acquire_queue(kgd, pipe_id, queue_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index feab4cc6e836..5ce0ce704a21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -96,7 +96,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
unlock_srbm(kgd);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
unsigned int vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -419,7 +419,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
int retry;
struct vi_mqd *m = get_mqd(mqd);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
acquire_queue(kgd, pipe_id, queue_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 1102de76d876..43b18863a8b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -36,9 +36,7 @@
#include "v9_structs.h"
#include "soc15.h"
#include "soc15d.h"
-#include "mmhub_v1_0.h"
-#include "gfxhub_v1_0.h"
-
+#include "gfx_v9_0.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -110,7 +108,7 @@ void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
unlock_srbm(kgd);
}
-int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
unsigned int vmid)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -552,7 +550,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp;
struct v9_mqd *m = get_mqd(mqd);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
acquire_queue(kgd, pipe_id, queue_id);
@@ -690,7 +688,7 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
return 0;
}
-static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -701,9 +699,182 @@ static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
return;
}
- mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+ adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
+
+ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
+static void lock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+ mutex_lock(&adev->srbm_mutex);
+ mutex_lock(&adev->grbm_idx_mutex);
+
+}
+
+static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+ mutex_unlock(&adev->grbm_idx_mutex);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+/**
+ * @get_wave_count: Read device registers to get number of waves in flight for
+ * a particular queue. The method also returns the VMID associated with the
+ * queue.
+ *
+ * @adev: Handle of device whose registers are to be read
+ * @queue_idx: Index of queue in the queue-map bit-field
+ * @wave_cnt: Output parameter updated with number of waves in flight
+ * @vmid: Output parameter updated with VMID of queue whose wave count
+ * is being collected
+ */
+static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
+ int *wave_cnt, int *vmid)
+{
+ int pipe_idx;
+ int queue_slot;
+ unsigned int reg_val;
+
+ /*
+ * Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID
+ * parameters to read out waves in flight. Get VMID if there are
+ * non-zero waves in flight.
+ */
+ *vmid = 0xFF;
+ *wave_cnt = 0;
+ pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
+ queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
+ soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0);
+ reg_val = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
+ queue_slot);
+ *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
+ if (*wave_cnt != 0)
+ *vmid = (RREG32_SOC15(GC, 0, mmCP_HQD_VMID) &
+ CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT;
+}
+
+/**
+ * @kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each
+ * shader engine and aggregates the number of waves that are in flight for the
+ * process whose pasid is provided as a parameter. The process could have ZERO
+ * or more queues running and submitting waves to compute units.
+ *
+ * @kgd: Handle of device from which to get number of waves in flight
+ * @pasid: Identifies the process for which this query call is invoked
+ * @wave_cnt: Output parameter updated with number of waves in flight that
+ * belong to process with given pasid
+ * @max_waves_per_cu: Output parameter updated with maximum number of waves
+ * possible per Compute Unit
+ *
+ * @note: It's possible that the device has too many queues (oversubscription)
+ * in which case a VMID could be remapped to a different PASID. This could lead
+ * to an iaccurate wave count. Following is a high-level sequence:
+ * Time T1: vmid = getVmid(); vmid is associated with Pasid P1
+ * Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2
+ * In the sequence above wave count obtained from time T1 will be incorrectly
+ * lost or added to total wave count.
+ *
+ * The registers that provide the waves in flight are:
+ *
+ * SPI_CSQ_WF_ACTIVE_STATUS - bit-map of queues per pipe. The bit is ON if a
+ * queue is slotted, OFF if there is no queue. A process could have ZERO or
+ * more queues slotted and submitting waves to be run on compute units. Even
+ * when there is a queue it is possible there could be zero wave fronts, this
+ * can happen when queue is waiting on top-of-pipe events - e.g. waitRegMem
+ * command
+ *
+ * For each bit that is ON from above:
+ *
+ * Read (SPI_CSQ_WF_ACTIVE_COUNT_0 + queue_idx) register. It provides the
+ * number of waves that are in flight for the queue at specified index. The
+ * index ranges from 0 to 7.
+ *
+ * If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID
+ * of the wave(s).
+ *
+ * Determine if VMID from above step maps to pasid provided as parameter. If
+ * it matches agrregate the wave count. That the VMID will not match pasid is
+ * a normal condition i.e. a device is expected to support multiple queues
+ * from multiple proceses.
+ *
+ * Reading registers referenced above involves programming GRBM appropriately
+ */
+static void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
+ int *pasid_wave_cnt, int *max_waves_per_cu)
+{
+ int qidx;
+ int vmid;
+ int se_idx;
+ int sh_idx;
+ int se_cnt;
+ int sh_cnt;
+ int wave_cnt;
+ int queue_map;
+ int pasid_tmp;
+ int max_queue_cnt;
+ int vmid_wave_cnt = 0;
+ struct amdgpu_device *adev;
+ DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
+
+ adev = get_amdgpu_device(kgd);
+ lock_spi_csq_mutexes(adev);
+ soc15_grbm_select(adev, 1, 0, 0, 0);
+
+ /*
+ * Iterate through the shader engines and arrays of the device
+ * to get number of waves in flight
+ */
+ bitmap_complement(cp_queue_bitmap, adev->gfx.mec.queue_bitmap,
+ KGD_MAX_QUEUES);
+ max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+ sh_cnt = adev->gfx.config.max_sh_per_se;
+ se_cnt = adev->gfx.config.max_shader_engines;
+ for (se_idx = 0; se_idx < se_cnt; se_idx++) {
+ for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
+
+ gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
+ queue_map = RREG32(SOC15_REG_OFFSET(GC, 0,
+ mmSPI_CSQ_WF_ACTIVE_STATUS));
+
+ /*
+ * Assumption: queue map encodes following schema: four
+ * pipes per each micro-engine, with each pipe mapping
+ * eight queues. This schema is true for GFX9 devices
+ * and must be verified for newer device families
+ */
+ for (qidx = 0; qidx < max_queue_cnt; qidx++) {
+
+ /* Skip qeueus that are not associated with
+ * compute functions
+ */
+ if (!test_bit(qidx, cp_queue_bitmap))
+ continue;
+
+ if (!(queue_map & (1 << qidx)))
+ continue;
+
+ /* Get number of waves in flight and aggregate them */
+ get_wave_count(adev, qidx, &wave_cnt, &vmid);
+ if (wave_cnt != 0) {
+ pasid_tmp =
+ RREG32(SOC15_REG_OFFSET(OSSSYS, 0,
+ mmIH_VMID_0_LUT) + vmid);
+ if (pasid_tmp == pasid)
+ vmid_wave_cnt += wave_cnt;
+ }
+ }
+ }
+ }
+
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ soc15_grbm_select(adev, 0, 0, 0, 0);
+ unlock_spi_csq_mutexes(adev);
- gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+ /* Update the output parameters and return */
+ *pasid_wave_cnt = vmid_wave_cnt;
+ *max_waves_per_cu = adev->gfx.cu_info.simd_per_cu *
+ adev->gfx.cu_info.max_waves_per_simd;
}
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
@@ -726,6 +897,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
- .get_hive_id = amdgpu_amdkfd_get_hive_id,
- .get_unique_id = amdgpu_amdkfd_get_unique_id,
+ .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index aedf67d57449..fc8934b86d93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -26,7 +26,7 @@ void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases);
-int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid,
unsigned int vmid);
int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
@@ -60,3 +60,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
uint8_t vmid, uint16_t *p_pasid);
+
+void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+ uint32_t vmid, uint64_t page_table_base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index a58af513c952..5da487b64a66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -148,8 +148,12 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock);
+ if (kfd_mem_limit.system_mem_used + system_mem_needed >
+ kfd_mem_limit.max_system_mem_limit)
+ pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
+
if ((kfd_mem_limit.system_mem_used + system_mem_needed >
- kfd_mem_limit.max_system_mem_limit) ||
+ kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
(adev->kfd.vram_used + vram_needed >
@@ -562,7 +566,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
mutex_lock(&process_info->lock);
- ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
+ ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
if (ret) {
pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
goto out;
@@ -992,7 +996,7 @@ create_evict_fence_fail:
return ret;
}
-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
+int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef)
{
@@ -1028,7 +1032,7 @@ amdgpu_vm_init_fail:
}
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
- struct file *filp, unsigned int pasid,
+ struct file *filp, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef)
{
@@ -1668,7 +1672,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
return -EINVAL;
obj = dma_buf->priv;
- if (obj->dev->dev_private != adev)
+ if (drm_to_adev(obj->dev) != adev)
/* Can't handle buffers from other devices */
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 29f767e026e4..469352e2d6ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -148,7 +148,7 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id);
- adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp);
+ adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp);
}
gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
@@ -541,7 +541,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
}
}
- amdgpu_link_encoder_connector(adev->ddev);
+ amdgpu_link_encoder_connector(adev_to_drm(adev));
return true;
}
@@ -1786,9 +1786,9 @@ static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
- adev->fw_vram_usage.start_offset = (start_addr &
+ adev->mman.fw_vram_usage_start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
- adev->fw_vram_usage.size = size << 10;
+ adev->mman.fw_vram_usage_size = size << 10;
/* Use the default scratch size */
usage_bytes = 0;
} else {
@@ -1882,7 +1882,7 @@ static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
*/
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
- struct amdgpu_device *adev = info->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(info->dev);
WREG32(reg, val);
}
@@ -1898,7 +1898,7 @@ static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
*/
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
{
- struct amdgpu_device *adev = info->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(info->dev);
uint32_t r;
r = RREG32(reg);
@@ -1916,7 +1916,7 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
*/
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
- struct amdgpu_device *adev = info->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(info->dev);
WREG32_IO(reg, val);
}
@@ -1932,7 +1932,7 @@ static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
*/
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{
- struct amdgpu_device *adev = info->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(info->dev);
uint32_t r;
r = RREG32_IO(reg);
@@ -1944,7 +1944,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
struct atom_context *ctx = adev->mode_info.atom_context;
return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
@@ -1995,7 +1995,7 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
return -ENOMEM;
adev->mode_info.atom_card_info = atom_card_info;
- atom_card_info->dev = adev->ddev;
+ atom_card_info->dev = adev_to_drm(adev);
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
/* needed for iio ops */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 1279053324f9..b4df6460e45a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -89,9 +89,9 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
- adev->fw_vram_usage.start_offset = (start_addr &
+ adev->mman.fw_vram_usage_start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
- adev->fw_vram_usage.size = size << 10;
+ adev->mman.fw_vram_usage_size = size << 10;
/* Use the default scratch size */
usage_bytes = 0;
} else {
@@ -543,6 +543,7 @@ int amdgpu_mem_train_support(struct amdgpu_device *adev)
case HW_REV(11, 0, 0):
case HW_REV(11, 0, 5):
case HW_REV(11, 0, 7):
+ case HW_REV(11, 0, 11):
ret = 1;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 3e35a8f2c5e5..7abe9500c0c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -616,7 +616,7 @@ static bool amdgpu_atpx_detect(void)
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
- has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+ has_atpx |= amdgpu_atpx_pci_probe_handle(pdev);
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
@@ -626,7 +626,7 @@ static bool amdgpu_atpx_detect(void)
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
vga_count++;
- has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+ has_atpx |= amdgpu_atpx_pci_probe_handle(pdev);
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index b1172d93c99c..6333cada1e09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -417,26 +417,40 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
bool amdgpu_get_bios(struct amdgpu_device *adev)
{
- if (amdgpu_atrm_get_bios(adev))
+ if (amdgpu_atrm_get_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ATRM\n");
goto success;
+ }
- if (amdgpu_acpi_vfct_bios(adev))
+ if (amdgpu_acpi_vfct_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VFCT\n");
goto success;
+ }
- if (igp_read_bios_from_vram(adev))
+ if (igp_read_bios_from_vram(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n");
goto success;
+ }
- if (amdgpu_read_bios(adev))
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
goto success;
+ }
- if (amdgpu_read_bios_from_rom(adev))
+ if (amdgpu_read_bios_from_rom(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM\n");
goto success;
+ }
- if (amdgpu_read_disabled_bios(adev))
+ if (amdgpu_read_disabled_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from disabled ROM BAR\n");
goto success;
+ }
- if (amdgpu_read_platform_bios(adev))
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
goto success;
+ }
DRM_ERROR("Unable to locate a BIOS ROM\n");
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 4053597b3af2..15c45b2a3983 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -265,7 +265,7 @@ error_free:
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index a1aec205435d..65d1b23d7e74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -26,6 +26,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -41,7 +42,7 @@
void amdgpu_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* bail if the connector does not have hpd pin, e.g.,
@@ -279,7 +280,7 @@ amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
static void amdgpu_connector_get_edid(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->edid)
@@ -463,7 +464,7 @@ static int amdgpu_connector_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
@@ -834,7 +835,7 @@ static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* XXX check mode bandwidth */
@@ -941,7 +942,7 @@ static bool
amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
enum drm_connector_status status;
@@ -972,7 +973,7 @@ static enum drm_connector_status
amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
const struct drm_encoder_helper_funcs *encoder_funcs;
int r;
@@ -1159,7 +1160,7 @@ static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* XXX check mode bandwidth */
@@ -1311,7 +1312,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if ((adev->clock.default_dispclk >= 53900) &&
amdgpu_connector_encoder_is_hbr2(connector)) {
@@ -1325,7 +1326,7 @@ static enum drm_connector_status
amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
@@ -1413,6 +1414,10 @@ out:
pm_runtime_put_autosuspend(connector->dev->dev);
}
+ drm_dp_set_subconnector_property(&amdgpu_connector->base,
+ ret,
+ amdgpu_dig_connector->dpcd,
+ amdgpu_dig_connector->downstream_ports);
return ret;
}
@@ -1521,7 +1526,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
@@ -1959,6 +1964,11 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (has_aux)
amdgpu_atombios_dp_aux_init(amdgpu_connector);
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ drm_connector_attach_dp_subconnector_property(&amdgpu_connector->base);
+ }
+
return;
failed:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a512ccbc4dea..12598a4b5c78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -299,7 +299,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
{
s64 time_us, increment_us;
u64 free_vram, total_vram, used_vram;
-
+ struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
/* Allow a maximum of 200 accumulated ms. This is basically per-IB
* throttling.
*
@@ -316,7 +316,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
}
total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
- used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ used_vram = amdgpu_vram_mgr_usage(vram_man);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
spin_lock(&adev->mm_stats.lock);
@@ -363,7 +363,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
u64 total_vis_vram = adev->gmc.visible_vram_size;
u64 used_vis_vram =
- amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ amdgpu_vram_mgr_vis_usage(vram_man);
if (used_vis_vram < total_vis_vram) {
u64 free_vis_vram = total_vis_vram - used_vis_vram;
@@ -1275,13 +1275,24 @@ error_unlock:
return r;
}
+static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
+{
+ int i;
+
+ if (!trace_amdgpu_cs_enabled())
+ return;
+
+ for (i = 0; i < parser->job->num_ibs; i++)
+ trace_amdgpu_cs(parser, i);
+}
+
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
- int i, r;
+ int r;
if (amdgpu_ras_intr_triggered())
return -EHWPOISON;
@@ -1294,7 +1305,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, data);
if (r) {
- DRM_ERROR("Failed to initialize parser %d!\n", r);
+ if (printk_ratelimit())
+ DRM_ERROR("Failed to initialize parser %d!\n", r);
goto out;
}
@@ -1319,8 +1331,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
reserved_buffers = true;
- for (i = 0; i < parser.job->num_ibs; i++)
- trace_amdgpu_cs(&parser, i);
+ trace_amdgpu_cs_ibs(&parser);
r = amdgpu_cs_vm_handling(&parser);
if (r)
@@ -1421,7 +1432,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_fence_to_handle *info = data;
struct dma_fence *fence;
struct drm_syncobj *syncobj;
@@ -1597,7 +1608,7 @@ err_free_fence_array:
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 8842c55d4490..c80d8339f58c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -46,7 +46,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum drm_sched_priority priority)
{
- if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
+ if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
return -EINVAL;
/* NORMAL and below are accessible by everyone */
@@ -65,7 +65,7 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
{
switch (prio) {
- case DRM_SCHED_PRIORITY_HIGH_HW:
+ case DRM_SCHED_PRIORITY_HIGH:
case DRM_SCHED_PRIORITY_KERNEL:
return AMDGPU_GFX_PIPE_PRIO_HIGH;
default:
@@ -114,7 +114,11 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
- if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
+ /* disable load balance if the hw engine retains context among dependent jobs */
+ if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
+ hw_ip == AMDGPU_HW_IP_VCN_DEC ||
+ hw_ip == AMDGPU_HW_IP_UVD_ENC ||
+ hw_ip == AMDGPU_HW_IP_UVD) {
sched = drm_sched_pick_best(scheds, num_scheds);
scheds = &sched;
num_scheds = 1;
@@ -385,16 +389,15 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
enum drm_sched_priority priority;
union drm_amdgpu_ctx *args = data;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
- r = 0;
id = args->in.ctx_id;
- priority = amdgpu_to_sched_priority(args->in.priority);
+ r = amdgpu_to_sched_priority(args->in.priority, &priority);
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
- if (priority == DRM_SCHED_PRIORITY_INVALID)
+ if (r == -EINVAL)
priority = DRM_SCHED_PRIORITY_NORMAL;
switch (args->in.op) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 193ffdb957b6..2d125b8b15ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -34,6 +34,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_dm_debugfs.h"
#include "amdgpu_ras.h"
+#include "amdgpu_rap.h"
/**
* amdgpu_debugfs_add_files - Add simple debugfs entries
@@ -68,8 +69,8 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
adev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
- adev->ddev->primary->debugfs_root,
- adev->ddev->primary);
+ adev_to_drm(adev)->primary->debugfs_root,
+ adev_to_drm(adev)->primary);
#endif
return 0;
}
@@ -100,14 +101,18 @@ static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
file->private_data = adev;
- mutex_lock(&adev->lock_reset);
+ ret = down_read_killable(&adev->reset_sem);
+ if (ret)
+ return ret;
+
if (adev->autodump.dumping.done) {
reinit_completion(&adev->autodump.dumping);
ret = 0;
} else {
ret = -EBUSY;
}
- mutex_unlock(&adev->lock_reset);
+
+ up_read(&adev->reset_sem);
return ret;
}
@@ -126,7 +131,7 @@ static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_
poll_wait(file, &adev->autodump.gpu_hang, poll_table);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return POLLIN | POLLRDNORM | POLLWRNORM;
return 0;
@@ -146,7 +151,7 @@ static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
init_waitqueue_head(&adev->autodump.gpu_hang);
debugfs_create_file("amdgpu_autodump", 0600,
- adev->ddev->primary->debugfs_root,
+ adev_to_drm(adev)->primary->debugfs_root,
adev, &autodump_debug_fops);
}
@@ -222,23 +227,23 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
*pos &= (1UL << 22) - 1;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
}
@@ -262,7 +267,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
- amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
+ amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
}
if (r) {
result = r;
@@ -287,8 +292,8 @@ end:
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -335,15 +340,15 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -353,8 +358,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
value = RREG32_PCIE(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -365,8 +370,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -394,15 +399,15 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -411,8 +416,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
r = get_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -425,8 +430,8 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -454,15 +459,15 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -472,8 +477,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
value = RREG32_DIDT(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -484,8 +489,8 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -513,15 +518,15 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -530,8 +535,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
r = get_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -544,8 +549,8 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -573,15 +578,15 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -591,8 +596,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
value = RREG32_SMC(*pos);
r = put_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -603,8 +608,8 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -632,15 +637,15 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -649,8 +654,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
r = get_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -663,8 +668,8 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
@@ -791,22 +796,22 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
valuesize = sizeof(values);
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r) {
amdgpu_virt_disable_access_debugfs(adev);
@@ -873,15 +878,15 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -896,8 +901,8 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
amdgpu_virt_disable_access_debugfs(adev);
@@ -971,7 +976,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
if (!data)
return -ENOMEM;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0)
goto err;
@@ -994,8 +999,8 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
while (size) {
uint32_t value;
@@ -1017,7 +1022,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
return result;
err:
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
kfree(data);
return r;
}
@@ -1042,9 +1047,9 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -1053,8 +1058,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
r = get_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -1066,8 +1071,8 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return result;
}
@@ -1091,7 +1096,7 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0)
return r;
@@ -1100,15 +1105,15 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
r = amdgpu_get_gfx_off_status(adev, &value);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = put_user(value, (uint32_t *)buf);
if (r) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -1118,8 +1123,8 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
size -= 4;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return result;
}
@@ -1211,7 +1216,7 @@ static const char *debugfs_regs_names[] = {
*/
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
- struct drm_minor *minor = adev->ddev->primary;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *ent, *root = minor->debugfs_root;
unsigned int i;
@@ -1231,17 +1236,19 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0, i;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
/* Avoid accidently unparking the sched thread during GPU reset */
- mutex_lock(&adev->lock_reset);
+ r = down_read_killable(&adev->reset_sem);
+ if (r)
+ return r;
/* hold on the scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -1268,7 +1275,7 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
kthread_unpark(ring->sched.thread);
}
- mutex_unlock(&adev->lock_reset);
+ up_read(&adev->reset_sem);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1280,7 +1287,7 @@ static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
seq_write(m, adev->bios, adev->bios_size);
return 0;
@@ -1290,12 +1297,12 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -1311,12 +1318,12 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -1458,7 +1465,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
return -ENOMEM;
/* Avoid accidently unparking the sched thread during GPU reset */
- mutex_lock(&adev->lock_reset);
+ r = down_read_killable(&adev->reset_sem);
+ if (r)
+ goto pro_end;
/* stop the scheduler */
kthread_park(ring->sched.thread);
@@ -1499,13 +1508,14 @@ failure:
/* restart the scheduler */
kthread_unpark(ring->sched.thread);
- mutex_unlock(&adev->lock_reset);
+ up_read(&adev->reset_sem);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+pro_end:
kfree(fences);
- return 0;
+ return r;
}
static int amdgpu_debugfs_sclk_set(void *data, u64 val)
@@ -1517,9 +1527,9 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return -EINVAL;
- ret = pm_runtime_get_sync(adev->ddev->dev);
+ ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
}
@@ -1532,8 +1542,8 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
return 0;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
return -EINVAL;
@@ -1553,7 +1563,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600,
- adev->ddev->primary->debugfs_root, adev,
+ adev_to_drm(adev)->primary->debugfs_root, adev,
&fops_ib_preempt);
if (!(adev->debugfs_preempt)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
@@ -1562,7 +1572,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
adev->smu.debugfs_sclk =
debugfs_create_file("amdgpu_force_sclk", 0200,
- adev->ddev->primary->debugfs_root, adev,
+ adev_to_drm(adev)->primary->debugfs_root, adev,
&fops_sclk_set);
if (!(adev->smu.debugfs_sclk)) {
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
@@ -1623,6 +1633,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_debugfs_autodump_init(adev);
+ amdgpu_rap_debugfs_init(adev);
+
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d0b8d0d341af..e3783f5a459d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -80,6 +80,7 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
@@ -130,7 +131,7 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
@@ -155,7 +156,7 @@ static ssize_t amdgpu_device_get_product_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
}
@@ -177,7 +178,7 @@ static ssize_t amdgpu_device_get_product_number(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
}
@@ -199,7 +200,7 @@ static ssize_t amdgpu_device_get_serial_number(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
}
@@ -217,7 +218,7 @@ static DEVICE_ATTR(serial_number, S_IRUGO,
*/
bool amdgpu_device_supports_boco(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (adev->flags & AMD_IS_PX)
return true;
@@ -234,14 +235,16 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
*/
bool amdgpu_device_supports_baco(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_asic_supports_baco(adev);
}
+/*
+ * VRAM access helper functions
+ */
+
/**
- * VRAM access helper functions.
- *
* amdgpu_device_vram_access - read/write a buffer in vram
*
* @adev: amdgpu_device pointer
@@ -301,10 +304,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
}
/*
- * MMIO register access helper functions.
+ * register access helper functions.
*/
/**
- * amdgpu_mm_rreg - read a memory mapped IO register
+ * amdgpu_device_rreg - read a memory mapped IO or indirect register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
@@ -312,25 +315,29 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
*
* Returns the 32 bit value from the offset specified.
*/
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
- uint32_t acc_flags)
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t acc_flags)
{
uint32_t ret;
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
- return amdgpu_kiq_rreg(adev, reg);
-
- if ((reg * 4) < adev->rmmio_size)
- ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
+ if (adev->in_pci_err_recovery)
+ return 0;
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ if ((reg * 4) < adev->rmmio_size) {
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+ amdgpu_sriov_runtime(adev) &&
+ down_read_trylock(&adev->reset_sem)) {
+ ret = amdgpu_kiq_rreg(adev, reg);
+ up_read(&adev->reset_sem);
+ } else {
+ ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
+ }
+ } else {
+ ret = adev->pcie_rreg(adev, reg * 4);
}
- trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+
+ trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
+
return ret;
}
@@ -348,7 +355,11 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
*
* Returns the 8 bit value from the offset specified.
*/
-uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
+uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
+{
+ if (adev->in_pci_err_recovery)
+ return 0;
+
if (offset < adev->rmmio_size)
return (readb(adev->rmmio + offset));
BUG();
@@ -369,31 +380,19 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
*
* Writes the value specified to the offset specified.
*/
-void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
+void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
+{
+ if (adev->in_pci_err_recovery)
+ return;
+
if (offset < adev->rmmio_size)
writeb(value, adev->rmmio + offset);
else
BUG();
}
-void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
-{
- trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
- if ((reg * 4) < adev->rmmio_size)
- writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
-}
-
/**
- * amdgpu_mm_wreg - write to a memory mapped IO register
+ * amdgpu_device_wreg - write to a memory mapped IO or indirect register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
@@ -402,13 +401,27 @@ void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg,
*
* Writes the value specified to the offset specified.
*/
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags)
+void amdgpu_device_wreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v,
+ uint32_t acc_flags)
{
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
- return amdgpu_kiq_wreg(adev, reg, v);
+ if (adev->in_pci_err_recovery)
+ return;
+
+ if ((reg * 4) < adev->rmmio_size) {
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+ amdgpu_sriov_runtime(adev) &&
+ down_read_trylock(&adev->reset_sem)) {
+ amdgpu_kiq_wreg(adev, reg, v);
+ up_read(&adev->reset_sem);
+ } else {
+ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+ }
+ } else {
+ adev->pcie_wreg(adev, reg * 4, v);
+ }
- amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+ trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
}
/*
@@ -416,18 +429,20 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
*
* this function is invoked only the debugfs register access
* */
-void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags)
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v)
{
- if (amdgpu_sriov_fullaccess(adev) &&
- adev->gfx.rlc.funcs &&
- adev->gfx.rlc.funcs->is_rlcg_access_range) {
+ if (adev->in_pci_err_recovery)
+ return;
+ if (amdgpu_sriov_fullaccess(adev) &&
+ adev->gfx.rlc.funcs &&
+ adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
+ } else {
+ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
-
- amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
}
/**
@@ -440,6 +455,9 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t
*/
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
{
+ if (adev->in_pci_err_recovery)
+ return 0;
+
if ((reg * 4) < adev->rio_mem_size)
return ioread32(adev->rio_mem + (reg * 4));
else {
@@ -459,6 +477,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
*/
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
+ if (adev->in_pci_err_recovery)
+ return;
+
if ((reg * 4) < adev->rio_mem_size)
iowrite32(v, adev->rio_mem + (reg * 4));
else {
@@ -478,6 +499,9 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
*/
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
{
+ if (adev->in_pci_err_recovery)
+ return 0;
+
if (index < adev->doorbell.num_doorbells) {
return readl(adev->doorbell.ptr + index);
} else {
@@ -498,6 +522,9 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
*/
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
{
+ if (adev->in_pci_err_recovery)
+ return;
+
if (index < adev->doorbell.num_doorbells) {
writel(v, adev->doorbell.ptr + index);
} else {
@@ -516,6 +543,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
*/
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
{
+ if (adev->in_pci_err_recovery)
+ return 0;
+
if (index < adev->doorbell.num_doorbells) {
return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
} else {
@@ -536,6 +566,9 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
*/
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
{
+ if (adev->in_pci_err_recovery)
+ return;
+
if (index < adev->doorbell.num_doorbells) {
atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
} else {
@@ -544,9 +577,138 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
}
/**
+ * amdgpu_device_indirect_rreg - read an indirect register
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ *
+ * Returns the value of indirect register @reg_addr
+ */
+u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr)
+{
+ unsigned long flags;
+ u32 r;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ r = readl(pcie_data_offset);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ return r;
+}
+
+/**
+ * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ *
+ * Returns the value of indirect register @reg_addr
+ */
+u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr)
+{
+ unsigned long flags;
+ u64 r;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ /* read low 32 bits */
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ r = readl(pcie_data_offset);
+ /* read high 32 bits */
+ writel(reg_addr + 4, pcie_index_offset);
+ readl(pcie_index_offset);
+ r |= ((u64)readl(pcie_data_offset) << 32);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ return r;
+}
+
+/**
+ * amdgpu_device_indirect_wreg - write an indirect register address
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ * @reg_addr: indirect register offset
+ * @reg_data: indirect register data
+ *
+ */
+void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr, u32 reg_data)
+{
+ unsigned long flags;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ writel(reg_data, pcie_data_offset);
+ readl(pcie_data_offset);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/**
+ * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ * @reg_addr: indirect register offset
+ * @reg_data: indirect register data
+ *
+ */
+void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
+ u32 pcie_index, u32 pcie_data,
+ u32 reg_addr, u64 reg_data)
+{
+ unsigned long flags;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ /* write low 32 bits */
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
+ readl(pcie_data_offset);
+ /* write high 32 bits */
+ writel(reg_addr + 4, pcie_index_offset);
+ readl(pcie_index_offset);
+ writel((u32)(reg_data >> 32), pcie_data_offset);
+ readl(pcie_data_offset);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/**
* amdgpu_invalid_rreg - dummy reg read function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
@@ -563,7 +725,7 @@ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
/**
* amdgpu_invalid_wreg - dummy reg write function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
* @v: value to write to the register
*
@@ -580,7 +742,7 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32
/**
* amdgpu_invalid_rreg64 - dummy 64 bit reg read function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
@@ -597,7 +759,7 @@ static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
/**
* amdgpu_invalid_wreg64 - dummy reg write function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
* @v: value to write to the register
*
@@ -614,7 +776,7 @@ static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint
/**
* amdgpu_block_invalid_rreg - dummy reg read function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @block: offset of instance
* @reg: offset of register
*
@@ -634,7 +796,7 @@ static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
/**
* amdgpu_block_invalid_wreg - dummy reg write function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @block: offset of instance
* @reg: offset of register
* @v: value to write to the register
@@ -652,9 +814,23 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
}
/**
+ * amdgpu_device_asic_init - Wrapper for atom asic_init
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Does any asic specific work and then calls atom asic init.
+ */
+static int amdgpu_device_asic_init(struct amdgpu_device *adev)
+{
+ amdgpu_asic_pre_asic_init(adev);
+
+ return amdgpu_atom_asic_init(adev->mode_info.atom_context);
+}
+
+/**
* amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
*
* Allocates a scratch page of VRAM for use by various things in the
* driver.
@@ -671,7 +847,7 @@ static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
/**
* amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
*
* Frees the VRAM scratch page.
*/
@@ -1197,6 +1373,15 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_gmc_tmz_set(adev);
+ if (amdgpu_num_kcq == -1) {
+ amdgpu_num_kcq = 8;
+ } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
+ amdgpu_num_kcq = 8;
+ dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
+ }
+
+ amdgpu_gmc_noretry_set(adev);
+
return 0;
}
@@ -1209,7 +1394,8 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
* Callback for the switcheroo driver. Suspends or resumes the
* the asics before or after it is powered up using ACPI methods.
*/
-static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
@@ -1223,7 +1409,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
+ amdgpu_device_load_pci_state(dev->pdev);
r = pci_enable_device(dev->pdev);
if (r)
DRM_WARN("pci_enable_device failed (%d)\n", r);
@@ -1236,7 +1422,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_suspend(dev, true);
- pci_save_state(dev->pdev);
+ amdgpu_device_cache_pci_state(dev->pdev);
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3cold);
@@ -1502,7 +1688,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
- struct drm_device *ddev = adev->ddev;
+ struct drm_device *ddev = adev_to_drm(adev);
const char *pci_address_name = pci_name(ddev->pdev);
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
@@ -1561,7 +1747,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->discovery_bin) {
+ if (adev->mman.discovery_bin) {
amdgpu_discovery_get_gfx_info(adev);
/*
@@ -1620,7 +1806,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
case CHIP_NAVI10:
chip_name = "navi10";
@@ -1929,7 +2118,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
if (adev->ip_blocks[i].status.hw == true)
break;
- if (adev->in_gpu_reset || adev->in_suspend) {
+ if (amdgpu_in_reset(adev) || adev->in_suspend) {
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
DRM_ERROR("resume of IP block <%s> failed %d\n",
@@ -2049,13 +2238,19 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
* it should be called after amdgpu_device_ip_hw_init_phase2 since
* for some ASICs the RAS EEPROM code relies on SMU fully functioning
* for I2C communication which only true at this point.
- * recovery_init may fail, but it can free all resources allocated by
- * itself and its failure should not stop amdgpu init process.
+ *
+ * amdgpu_ras_recovery_init may fail, but the upper only cares the
+ * failure from bad gpu situation and stop amdgpu init process
+ * accordingly. For other failed cases, it will still release all
+ * the resource and print error message, rather than returning one
+ * negative value to upper level.
*
* Note: theoretically, this should be called before all vram allocations
* to protect retired page from abusing
*/
- amdgpu_ras_recovery_init(adev);
+ r = amdgpu_ras_recovery_init(adev);
+ if (r)
+ goto init_failed;
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_add_device(adev);
@@ -2100,7 +2295,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
AMDGPU_RESET_MAGIC_NUM))
return true;
- if (!adev->in_gpu_reset)
+ if (!amdgpu_in_reset(adev))
return false;
/*
@@ -2211,9 +2406,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void)
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
if (!(adev->flags & AMD_IS_APU) &&
- !gpu_ins->mgpu_fan_enabled &&
- adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
+ !gpu_ins->mgpu_fan_enabled) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
break;
@@ -2568,17 +2761,16 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_IH,
};
- for (i = 0; i < adev->num_ip_blocks; i++)
- adev->ip_blocks[i].status.hw = false;
-
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
int j;
struct amdgpu_ip_block *block;
- for (j = 0; j < adev->num_ip_blocks; j++) {
- block = &adev->ip_blocks[j];
+ block = &adev->ip_blocks[i];
+ block->status.hw = false;
- if (block->version->type != ip_order[i] ||
+ for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
+
+ if (block->version->type != ip_order[j] ||
!block->status.valid)
continue;
@@ -2771,6 +2963,12 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{
switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+#endif
case CHIP_BONAIRE:
case CHIP_KAVERI:
case CHIP_KABINI:
@@ -2819,13 +3017,13 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
/**
* amdgpu_device_has_dc_support - check if dc is supported
*
- * @adev: amdgpu_device_pointer
+ * @adev: amdgpu_device pointer
*
* Returns true for supported, false for not supported
*/
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
return false;
return amdgpu_device_asic_has_dc_support(adev->asic_type);
@@ -2836,7 +3034,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
{
struct amdgpu_device *adev =
container_of(__work, struct amdgpu_device, xgmi_reset_work);
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
/* It's a bug to not have a hive within this function */
if (WARN_ON(!hive))
@@ -2851,13 +3049,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
task_barrier_enter(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev);
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
if (adev->asic_reset_res)
goto fail;
task_barrier_exit(&hive->tb);
- adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev);
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
if (adev->asic_reset_res)
goto fail;
@@ -2873,7 +3071,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
fail:
if (adev->asic_reset_res)
DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
- adev->asic_reset_res, adev->ddev->unique);
+ adev->asic_reset_res, adev_to_drm(adev)->unique);
+ amdgpu_put_xgmi_hive(hive);
}
static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
@@ -2952,12 +3151,11 @@ static const struct attribute *amdgpu_dev_attributes[] = {
NULL
};
+
/**
* amdgpu_device_init - initialize the driver
*
* @adev: amdgpu_device pointer
- * @ddev: drm dev pointer
- * @pdev: pci dev pointer
* @flags: driver flags
*
* Initializes the driver info and hw (all asics).
@@ -2965,18 +3163,15 @@ static const struct attribute *amdgpu_dev_attributes[] = {
* Called at driver startup.
*/
int amdgpu_device_init(struct amdgpu_device *adev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
uint32_t flags)
{
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct pci_dev *pdev = adev->pdev;
int r, i;
bool boco = false;
u32 max_MBps;
adev->shutdown = false;
- adev->dev = &pdev->dev;
- adev->ddev = ddev;
- adev->pdev = pdev;
adev->flags = flags;
if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
@@ -3032,7 +3227,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
- mutex_init(&adev->lock_reset);
+ atomic_set(&adev->in_gpu_reset, 0);
+ init_rwsem(&adev->reset_sem);
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
@@ -3127,13 +3323,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- return r;
+ goto failed_unmap;
}
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
- return r;
+ goto failed_unmap;
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -3174,6 +3370,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
+ pci_enable_pcie_error_reporting(adev->ddev.pdev);
+
/* Post card if necessary */
if (amdgpu_device_need_post(adev)) {
if (!adev->bios) {
@@ -3182,7 +3380,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
DRM_INFO("GPU posting now...\n");
- r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ r = amdgpu_device_asic_init(adev);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
goto failed;
@@ -3220,7 +3418,7 @@ fence_driver_init:
}
/* init the mode config */
- drm_mode_config_init(adev->ddev);
+ drm_mode_config_init(adev_to_drm(adev));
r = amdgpu_device_ip_init(adev);
if (r) {
@@ -3316,16 +3514,18 @@ fence_driver_init:
flush_delayed_work(&adev->delayed_init_work);
r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
- if (r) {
+ if (r)
dev_err(adev->dev, "Could not create amdgpu device attr\n");
- return r;
- }
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
if (r)
dev_err(adev->dev, "amdgpu_pmu_init failed\n");
+ /* Have stored pci confspace at hand for restore in sudden PCI error */
+ if (amdgpu_device_cache_pci_state(adev->pdev))
+ pci_restore_state(pdev);
+
return 0;
failed:
@@ -3333,6 +3533,10 @@ failed:
if (boco)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
+failed_unmap:
+ iounmap(adev->rmmio);
+ adev->rmmio = NULL;
+
return r;
}
@@ -3346,31 +3550,33 @@ failed:
*/
void amdgpu_device_fini(struct amdgpu_device *adev)
{
- int r;
-
- DRM_INFO("amdgpu: finishing device.\n");
+ dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true;
+ kfree(adev->pci_state);
+
/* make sure IB test finished before entering exclusive mode
* to avoid preemption on IB test
* */
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_request_full_gpu(adev, false);
+ amdgpu_virt_fini_data_exchange(adev);
+ }
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
if (!amdgpu_device_has_dc_support(adev))
- drm_helper_force_disable_all(adev->ddev);
+ drm_helper_force_disable_all(adev_to_drm(adev));
else
- drm_atomic_helper_shutdown(adev->ddev);
+ drm_atomic_helper_shutdown(adev_to_drm(adev));
}
amdgpu_fence_driver_fini(adev);
if (adev->pm_sysfs_en)
amdgpu_pm_sysfs_fini(adev);
amdgpu_fbdev_fini(adev);
- r = amdgpu_device_ip_fini(adev);
+ amdgpu_device_ip_fini(adev);
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
@@ -3388,7 +3594,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_has_atpx_dgpu_power_cntl()) &&
!pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_unregister_client(adev->pdev);
- if (amdgpu_device_supports_boco(adev->ddev))
+ if (amdgpu_device_supports_boco(adev_to_drm(adev)))
vga_switcheroo_fini_domain_pm_ops(adev->dev);
vga_client_register(adev->pdev, NULL, NULL, NULL);
if (adev->rio_mem)
@@ -3404,7 +3610,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
- if (adev->discovery_bin)
+ if (adev->mman.discovery_bin)
amdgpu_discovery_fini(adev);
}
@@ -3430,11 +3636,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
struct drm_connector_list_iter iter;
int r;
- if (dev == NULL || dev->dev_private == NULL) {
- return -ENODEV;
- }
-
- adev = dev->dev_private;
+ adev = drm_to_adev(dev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -3522,7 +3724,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
{
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_crtc *crtc;
int r = 0;
@@ -3531,14 +3733,14 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
/* post card */
if (amdgpu_device_need_post(adev)) {
- r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ r = amdgpu_device_asic_init(adev);
if (r)
- DRM_ERROR("amdgpu asic init failed\n");
+ dev_err(adev->dev, "amdgpu asic init failed\n");
}
r = amdgpu_device_ip_resume(adev);
if (r) {
- DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
+ dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
return r;
}
amdgpu_fence_driver_resume(adev);
@@ -3562,7 +3764,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
if (r == 0) {
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0)
- DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+ dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
amdgpu_bo_unreserve(aobj);
}
@@ -3652,7 +3854,7 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hang =
adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
if (adev->ip_blocks[i].status.hang) {
- DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
+ dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
}
}
@@ -3713,7 +3915,7 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
if (adev->ip_blocks[i].status.hang) {
- DRM_INFO("Some block need full reset!\n");
+ dev_info(adev->dev, "Some block need full reset!\n");
return true;
}
}
@@ -3801,7 +4003,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
else
tmo = msecs_to_jiffies(100);
- DRM_INFO("recover vram bo from shadow start\n");
+ dev_info(adev->dev, "recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
@@ -3837,11 +4039,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
dma_fence_put(fence);
if (r < 0 || tmo <= 0) {
- DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
+ dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
return -EIO;
}
- DRM_INFO("recover vram bo from shadow done\n");
+ dev_info(adev->dev, "recover vram bo from shadow done\n");
return 0;
}
@@ -3849,7 +4051,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
/**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @from_hypervisor: request from hypervisor
*
* do VF FLR and reinitialize Asic
@@ -3876,7 +4078,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_virt_init_data_exchange(adev);
/* we need recover gart prior to run SMC/CP/SDMA resume */
- amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
+ amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
r = amdgpu_device_fw_loading(adev);
if (r)
@@ -3902,9 +4104,37 @@ error:
}
/**
+ * amdgpu_device_has_job_running - check if there is any job in mirror list
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * check if there is any job in mirror list
+ */
+bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
+{
+ int i;
+ struct drm_sched_job *job;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ spin_lock(&ring->sched.job_list_lock);
+ job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
+ struct drm_sched_job, node);
+ spin_unlock(&ring->sched.job_list_lock);
+ if (job)
+ return true;
+ }
+ return false;
+}
+
+/**
* amdgpu_device_should_recover_gpu - check if we should try GPU recovery
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
*
* Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
* a hung GPU.
@@ -3912,7 +4142,7 @@ error:
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
{
if (!amdgpu_device_ip_check_soft_reset(adev)) {
- DRM_INFO("Timeout, but no hardware hang detected.\n");
+ dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
return false;
}
@@ -3952,7 +4182,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
return true;
disabled:
- DRM_INFO("GPU recovery disabled.\n");
+ dev_info(adev->dev, "GPU recovery disabled.\n");
return false;
}
@@ -3966,6 +4196,11 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
amdgpu_debugfs_wait_dump(adev);
+ if (amdgpu_sriov_vf(adev)) {
+ /* stop the data exchange thread */
+ amdgpu_virt_fini_data_exchange(adev);
+ }
+
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -3991,7 +4226,7 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
r = amdgpu_device_ip_soft_reset(adev);
amdgpu_device_ip_post_soft_reset(adev);
if (r || amdgpu_device_ip_check_soft_reset(adev)) {
- DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
need_full_reset = true;
}
}
@@ -4007,7 +4242,8 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
struct list_head *device_list_handle,
- bool *need_full_reset_arg)
+ bool *need_full_reset_arg,
+ bool skip_hw_reset)
{
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset = *need_full_reset_arg, vram_lost = false;
@@ -4017,7 +4253,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
* ASIC reset has to be done on all HGMI hive nodes ASAP
* to allow proper links negotiation in FW (within 1 sec)
*/
- if (need_full_reset) {
+ if (!skip_hw_reset && need_full_reset) {
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
@@ -4027,8 +4263,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
r = amdgpu_asic_reset(tmp_adev);
if (r) {
- DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
- r, tmp_adev->ddev->unique);
+ dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
+ r, adev_to_drm(tmp_adev)->unique);
break;
}
}
@@ -4060,8 +4296,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
if (need_full_reset) {
/* post card */
- if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
- DRM_WARN("asic atom init failed!");
+ if (amdgpu_device_asic_init(tmp_adev))
+ dev_warn(tmp_adev->dev, "asic atom init failed!");
if (!r) {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
@@ -4075,8 +4311,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
amdgpu_inc_vram_lost(tmp_adev);
}
- r = amdgpu_gtt_mgr_recover(
- &tmp_adev->mman.bdev.man[TTM_PL_TT]);
+ r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
if (r)
goto out;
@@ -4103,8 +4338,23 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
amdgpu_fbdev_set_suspend(tmp_adev, 0);
- /* must succeed. */
- amdgpu_ras_resume(tmp_adev);
+ /*
+ * The GPU enters bad state once faulty pages
+ * by ECC has reached the threshold, and ras
+ * recovery is scheduled next. So add one check
+ * here to break recovery if it indeed exceeds
+ * bad page threshold, and remind user to
+ * retire this GPU or setting one bigger
+ * bad_page_threshold value to fix this once
+ * probing driver again.
+ */
+ if (!amdgpu_ras_check_err_threshold(tmp_adev)) {
+ /* must succeed. */
+ amdgpu_ras_resume(tmp_adev);
+ } else {
+ r = -EINVAL;
+ goto out;
+ }
/* Update PSP FW topology after reset */
if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
@@ -4112,7 +4362,6 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
}
-
out:
if (!r) {
amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
@@ -4137,16 +4386,19 @@ end:
return r;
}
-static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
+static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
{
- if (trylock) {
- if (!mutex_trylock(&adev->lock_reset))
- return false;
- } else
- mutex_lock(&adev->lock_reset);
+ if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
+ return false;
+
+ if (hive) {
+ down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
+ } else {
+ down_write(&adev->reset_sem);
+ }
atomic_inc(&adev->gpu_reset_counter);
- adev->in_gpu_reset = true;
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
@@ -4166,8 +4418,8 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
amdgpu_vf_error_trans_all(adev);
adev->mp1_state = PP_MP1_STATE_NONE;
- adev->in_gpu_reset = false;
- mutex_unlock(&adev->lock_reset);
+ atomic_set(&adev->in_gpu_reset, 0);
+ up_write(&adev->reset_sem);
}
static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
@@ -4231,7 +4483,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @job: which job trigger hang
*
* Attempt to reset the GPU if it has hung (all asics).
@@ -4251,7 +4503,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
bool need_emergency_restart = false;
bool audio_suspended = false;
- /**
+ /*
* Special case: RAS triggered and full reset isn't supported
*/
need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
@@ -4277,12 +4529,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* We always reset all schedulers for device and all devices for XGMI
* hive so that should take care of them too.
*/
- hive = amdgpu_get_xgmi_hive(adev, true);
- if (hive && !mutex_trylock(&hive->reset_lock)) {
- DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
- job ? job->base.id : -1, hive->hive_id);
- mutex_unlock(&hive->hive_lock);
- return 0;
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
+ DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
+ job ? job->base.id : -1, hive->hive_id);
+ amdgpu_put_xgmi_hive(hive);
+ return 0;
+ }
+ mutex_lock(&hive->hive_lock);
}
/*
@@ -4304,11 +4559,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- if (!amdgpu_device_lock_adev(tmp_adev, !hive)) {
- DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
+ if (!amdgpu_device_lock_adev(tmp_adev, hive)) {
+ dev_info(tmp_adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
job ? job->base.id : -1);
- mutex_unlock(&hive->hive_lock);
- return 0;
+ r = 0;
+ goto skip_recovery;
}
/*
@@ -4376,12 +4631,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
r = amdgpu_device_pre_asic_reset(tmp_adev,
- NULL,
+ (tmp_adev == adev) ? job : NULL,
&need_full_reset);
/*TODO Should we stop ?*/
if (r) {
- DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
- r, tmp_adev->ddev->unique);
+ dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev_to_drm(tmp_adev)->unique);
tmp_adev->asic_reset_res = r;
}
}
@@ -4393,7 +4648,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
} else {
- r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
+ r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
if (r && r == -EAGAIN)
goto retry;
}
@@ -4417,7 +4672,7 @@ skip_hw_reset:
}
if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
- drm_helper_resume_force_mode(tmp_adev->ddev);
+ drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
}
tmp_adev->asic_reset_res = 0;
@@ -4441,9 +4696,11 @@ skip_sched_resume:
amdgpu_device_unlock_adev(tmp_adev);
}
+skip_recovery:
if (hive) {
- mutex_unlock(&hive->reset_lock);
+ atomic_set(&hive->in_reset, 0);
mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
}
if (r)
@@ -4589,10 +4846,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
int amdgpu_device_baco_enter(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!amdgpu_device_supports_baco(adev->ddev))
+ if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
return -ENOTSUPP;
if (ras && ras->supported)
@@ -4603,11 +4860,11 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
int amdgpu_device_baco_exit(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
- if (!amdgpu_device_supports_baco(adev->ddev))
+ if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
return -ENOTSUPP;
ret = amdgpu_dpm_baco_exit(adev);
@@ -4619,3 +4876,235 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
return 0;
}
+
+static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ cancel_delayed_work_sync(&ring->sched.work_tdr);
+ }
+}
+
+/**
+ * amdgpu_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
+ */
+pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
+
+ DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ DRM_WARN("No support for XGMI hive yet...");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ /* Fatal error, prepare for slot reset */
+ case pci_channel_io_frozen:
+ /*
+ * Cancel and wait for all TDRs in progress if failing to
+ * set adev->in_gpu_reset in amdgpu_device_lock_adev
+ *
+ * Locking adev->reset_sem will prevent any external access
+ * to GPU during PCI error recovery
+ */
+ while (!amdgpu_device_lock_adev(adev, NULL))
+ amdgpu_cancel_all_tdr(adev);
+
+ /*
+ * Block any work scheduling as we do for regular GPU reset
+ * for the duration of the recovery
+ */
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ drm_sched_stop(&ring->sched, NULL);
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ /* Permanent error, prepare for device removal */
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
+{
+
+ DRM_INFO("PCI error: mmio enabled callback!!\n");
+
+ /* TODO - dump whatever for debugging purposes */
+
+ /* This called only if amdgpu_pci_error_detected returns
+ * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
+ * works, no need to reset slot.
+ */
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int r, i;
+ bool need_full_reset = true;
+ u32 memsize;
+ struct list_head device_list;
+
+ DRM_INFO("PCI error: slot reset callback!!\n");
+
+ INIT_LIST_HEAD(&device_list);
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+
+ /* wait for asic to come out of reset */
+ msleep(500);
+
+ /* Restore PCI confspace */
+ amdgpu_device_load_pci_state(pdev);
+
+ /* confirm ASIC came out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ memsize = amdgpu_asic_get_config_memsize(adev);
+
+ if (memsize != 0xffffffff)
+ break;
+ udelay(1);
+ }
+ if (memsize == 0xffffffff) {
+ r = -ETIME;
+ goto out;
+ }
+
+ adev->in_pci_err_recovery = true;
+ r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
+ adev->in_pci_err_recovery = false;
+ if (r)
+ goto out;
+
+ r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
+
+out:
+ if (!r) {
+ if (amdgpu_device_cache_pci_state(adev->pdev))
+ pci_restore_state(adev->pdev);
+
+ DRM_INFO("PCIe error recovery succeeded\n");
+ } else {
+ DRM_ERROR("PCIe error recovery failed, err:%d", r);
+ amdgpu_device_unlock_adev(adev);
+ }
+
+ return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * amdgpu_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+void amdgpu_pci_resume(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
+
+
+ DRM_INFO("PCI error: resume callback!!\n");
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+
+ drm_sched_resubmit_jobs(&ring->sched);
+ drm_sched_start(&ring->sched, true);
+ }
+
+ amdgpu_device_unlock_adev(adev);
+}
+
+bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int r;
+
+ r = pci_save_state(pdev);
+ if (!r) {
+ kfree(adev->pci_state);
+
+ adev->pci_state = pci_store_saved_state(pdev);
+
+ if (!adev->pci_state) {
+ DRM_ERROR("Failed to store PCI saved state");
+ return false;
+ }
+ } else {
+ DRM_WARN("Failed to save PCI state, err:%d\n", r);
+ return false;
+ }
+
+ return true;
+}
+
+bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int r;
+
+ if (!adev->pci_state)
+ return false;
+
+ r = pci_load_saved_state(pdev, adev->pci_state);
+
+ if (!r) {
+ pci_restore_state(pdev);
+ } else {
+ DRM_WARN("Failed to load PCI state, err:%d\n", r);
+ return false;
+ }
+
+ return true;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 61a26c15c8dd..373cdebe0e2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -44,9 +44,9 @@ struct amdgpu_df_funcs {
void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
bool enable);
int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
- int is_enable);
+ int is_add);
int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
- int is_disable);
+ int is_remove);
void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
uint64_t *count);
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index a50ff2306504..bfb95143ba5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -136,7 +136,7 @@ static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *bin
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
- adev->discovery_tmr_size, false);
+ adev->mman.discovery_tmr_size, false);
return 0;
}
@@ -168,18 +168,18 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum;
int r;
- adev->discovery_tmr_size = DISCOVERY_TMR_SIZE;
- adev->discovery_bin = kzalloc(adev->discovery_tmr_size, GFP_KERNEL);
- if (!adev->discovery_bin)
+ adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
+ adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
+ if (!adev->mman.discovery_bin)
return -ENOMEM;
- r = amdgpu_discovery_read_binary(adev, adev->discovery_bin);
+ r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin);
if (r) {
DRM_ERROR("failed to read ip discovery binary\n");
goto out;
}
- bhdr = (struct binary_header *)adev->discovery_bin;
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
DRM_ERROR("invalid ip discovery binary signature\n");
@@ -192,7 +192,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
size = bhdr->binary_size - offset;
checksum = bhdr->binary_checksum;
- if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
size, checksum)) {
DRM_ERROR("invalid ip discovery binary checksum\n");
r = -EINVAL;
@@ -202,7 +202,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[IP_DISCOVERY];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
- ihdr = (struct ip_discovery_header *)(adev->discovery_bin + offset);
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
DRM_ERROR("invalid ip discovery data table signature\n");
@@ -210,7 +210,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
ihdr->size, checksum)) {
DRM_ERROR("invalid ip discovery data table checksum\n");
r = -EINVAL;
@@ -220,9 +220,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[GC];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
- ghdr = (struct gpu_info_header *)(adev->discovery_bin + offset);
+ ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
- if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
ghdr->size, checksum)) {
DRM_ERROR("invalid gc data table checksum\n");
r = -EINVAL;
@@ -232,16 +232,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0;
out:
- kfree(adev->discovery_bin);
- adev->discovery_bin = NULL;
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
return r;
}
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
- kfree(adev->discovery_bin);
- adev->discovery_bin = NULL;
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
}
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
@@ -265,8 +265,8 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
return r;
}
- bhdr = (struct binary_header *)adev->discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
@@ -274,7 +274,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -288,7 +288,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->discovery_bin + ip_offset);
+ ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
num_base_address = ip->num_base_address;
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@@ -337,24 +337,24 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
uint16_t num_ips;
int i, j;
- if (!adev->discovery_bin) {
+ if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->discovery_bin + ip_offset);
+ ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
if (le16_to_cpu(ip->hw_id) == hw_id) {
if (major)
@@ -377,13 +377,13 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct gc_info_v1_0 *gc_info;
- if (!adev->discovery_bin) {
+ if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->discovery_bin;
- gc_info = (struct gc_info_v1_0 *)(adev->discovery_bin +
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset));
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 44c1f6e00635..7cc7af2a6822 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -93,7 +93,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
* targeted by the flip
*/
if (amdgpu_crtc->enabled &&
- (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
+ (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
@@ -152,7 +152,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_gem_object *obj;
struct amdgpu_flip_work *work;
@@ -292,7 +292,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
pm_runtime_mark_last_busy(dev->dev);
- adev = dev->dev_private;
+ adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref,
take the current one */
if (active && !adev->have_disp_power_ref) {
@@ -619,51 +619,51 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
int sz;
adev->mode_info.coherent_mode_property =
- drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
+ drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
if (!adev->mode_info.coherent_mode_property)
return -ENOMEM;
adev->mode_info.load_detect_property =
- drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
+ drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
if (!adev->mode_info.load_detect_property)
return -ENOMEM;
- drm_mode_create_scaling_mode_property(adev->ddev);
+ drm_mode_create_scaling_mode_property(adev_to_drm(adev));
sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
adev->mode_info.underscan_property =
- drm_property_create_enum(adev->ddev, 0,
- "underscan",
- amdgpu_underscan_enum_list, sz);
+ drm_property_create_enum(adev_to_drm(adev), 0,
+ "underscan",
+ amdgpu_underscan_enum_list, sz);
adev->mode_info.underscan_hborder_property =
- drm_property_create_range(adev->ddev, 0,
- "underscan hborder", 0, 128);
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "underscan hborder", 0, 128);
if (!adev->mode_info.underscan_hborder_property)
return -ENOMEM;
adev->mode_info.underscan_vborder_property =
- drm_property_create_range(adev->ddev, 0,
- "underscan vborder", 0, 128);
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "underscan vborder", 0, 128);
if (!adev->mode_info.underscan_vborder_property)
return -ENOMEM;
sz = ARRAY_SIZE(amdgpu_audio_enum_list);
adev->mode_info.audio_property =
- drm_property_create_enum(adev->ddev, 0,
+ drm_property_create_enum(adev_to_drm(adev), 0,
"audio",
amdgpu_audio_enum_list, sz);
sz = ARRAY_SIZE(amdgpu_dither_enum_list);
adev->mode_info.dither_property =
- drm_property_create_enum(adev->ddev, 0,
+ drm_property_create_enum(adev_to_drm(adev), 0,
"dither",
amdgpu_dither_enum_list, sz);
if (amdgpu_device_has_dc_support(adev)) {
adev->mode_info.abm_level_property =
- drm_property_create_range(adev->ddev, 0,
- "abm level", 0, 4);
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "abm level", 0, 4);
if (!adev->mode_info.abm_level_property)
return -ENOMEM;
}
@@ -813,7 +813,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
int vbl_start, vbl_end, vtotal, ret = 0;
bool in_vbl = true;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 519ce4427fce..957934926b24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -35,6 +35,7 @@
#include "amdgpu_display.h"
#include "amdgpu_gem.h"
#include "amdgpu_dma_buf.h"
+#include "amdgpu_xgmi.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
@@ -302,7 +303,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
switch (bo->tbo.mem.mem_type) {
case TTM_PL_TT:
- sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+ sgt = drm_prime_pages_to_sg(obj->dev,
+ bo->tbo.ttm->pages,
bo->tbo.num_pages);
if (IS_ERR(sgt))
return sgt;
@@ -454,7 +456,7 @@ static struct drm_gem_object *
amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
{
struct dma_resv *resv = dma_buf->resv;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
int ret;
@@ -595,3 +597,36 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
obj->import_attach = attach;
return obj;
}
+
+/**
+ * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
+ *
+ * @adev: amdgpu_device pointer of the importer
+ * @bo: amdgpu buffer object
+ *
+ * Returns:
+ * True if dmabuf accessible over xgmi, false otherwise.
+ */
+bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo)
+{
+ struct drm_gem_object *obj = &bo->tbo.base;
+ struct drm_gem_object *gobj;
+
+ if (obj->import_attach) {
+ struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* No XGMI with non AMD GPUs */
+ return false;
+
+ gobj = dma_buf->priv;
+ bo = gem_to_amdgpu_bo(gobj);
+ }
+
+ if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
+ (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
+ return true;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
index ec447a7b6b28..2c5c84a06bb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -29,6 +29,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags);
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
+bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 321032d3a51a..42d9748921f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -26,12 +26,12 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_managed.h>
#include "amdgpu_drv.h"
#include <drm/drm_pciids.h>
#include <linux/console.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_probe_helper.h>
@@ -88,9 +88,10 @@
* - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
* - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
* - 3.39.0 - DMABUF implicit sync does a full pipeline sync
+ * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 39
+#define KMS_DRIVER_MINOR 40
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -146,16 +147,18 @@ int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
-int amdgpu_noretry;
+int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
int amdgpu_tmz = 0;
int amdgpu_reset_method = -1; /* auto */
+int amdgpu_num_kcq = -1;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
int amdgpu_ras_enable = -1;
uint amdgpu_ras_mask = 0xffffffff;
+int amdgpu_bad_page_threshold = -1;
/**
* DOC: vramlimit (int)
@@ -393,12 +396,12 @@ MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
/**
- * DOC: ppfeaturemask (uint)
+ * DOC: ppfeaturemask (hexint)
* Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
* The default is the current set of stable power features.
*/
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
-module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, hexint, 0444);
/**
* DOC: forcelongtraining (uint)
@@ -593,8 +596,13 @@ MODULE_PARM_DESC(mes,
"Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
module_param_named(mes, amdgpu_mes, int, 0444);
+/**
+ * DOC: noretry (int)
+ * Disable retry faults in the GPU memory controller.
+ * (0 = retry enabled, 1 = retry disabled, -1 auto (default))
+ */
MODULE_PARM_DESC(noretry,
- "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
+ "Disable retry faults (0 = retry enabled, 1 = retry disabled, -1 auto (default))");
module_param_named(noretry, amdgpu_noretry, int, 0644);
/**
@@ -676,11 +684,14 @@ MODULE_PARM_DESC(debug_largebar,
* Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT
* table to get information about AMD APUs. This option can serve as a workaround on
* systems with a broken CRAT table.
+ *
+ * Default is auto (according to asic type, iommu_v2, and crat table, to decide
+ * whehter use CRAT)
*/
int ignore_crat;
module_param(ignore_crat, int, 0444);
MODULE_PARM_DESC(ignore_crat,
- "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
+ "Ignore CRAT table during KFD initialization (0 = auto (default), 1 = ignore CRAT)");
/**
* DOC: halt_if_hws_hang (int)
@@ -715,6 +726,15 @@ MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1
bool debug_evictions;
module_param(debug_evictions, bool, 0644);
MODULE_PARM_DESC(debug_evictions, "enable eviction debug messages (false = default)");
+
+/**
+ * DOC: no_system_mem_limit(bool)
+ * Disable system memory limit, to support multiple process shared memory
+ */
+bool no_system_mem_limit;
+module_param(no_system_mem_limit, bool, 0644);
+MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = default)");
+
#endif
/**
@@ -765,6 +785,19 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)");
module_param_named(reset_method, amdgpu_reset_method, int, 0444);
+/**
+ * DOC: bad_page_threshold (int)
+ * Bad page threshold is to specify the threshold value of faulty pages
+ * detected by RAS ECC, that may result in GPU entering bad status if total
+ * faulty pages by ECC exceed threshold value and leave it for user's further
+ * check.
+ */
+MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)");
+module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
+
+MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
+module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1033,6 +1066,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ {0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */
{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
@@ -1065,7 +1099,7 @@ static struct drm_driver kms_driver;
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- struct drm_device *dev;
+ struct drm_device *ddev;
struct amdgpu_device *adev;
unsigned long flags = ent->driver_data;
int ret, retry = 0;
@@ -1081,6 +1115,16 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
+ /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
+ * however, SME requires an indirect IOMMU mapping because the encryption
+ * bit is beyond the DMA mask of the chip.
+ */
+ if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
+ dev_info(&pdev->dev,
+ "SME is not compatible with RAVEN\n");
+ return -ENOTSUPP;
+ }
+
#ifdef CONFIG_DRM_AMDGPU_SI
if (!amdgpu_si_support) {
switch (flags & AMD_ASIC_MASK) {
@@ -1121,36 +1165,39 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- dev = drm_dev_alloc(&kms_driver, &pdev->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ adev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*adev), ddev);
+ if (IS_ERR(adev))
+ return PTR_ERR(adev);
+
+ adev->dev = &pdev->dev;
+ adev->pdev = pdev;
+ ddev = adev_to_drm(adev);
if (!supports_atomic)
- dev->driver_features &= ~DRIVER_ATOMIC;
+ ddev->driver_features &= ~DRIVER_ATOMIC;
ret = pci_enable_device(pdev);
if (ret)
- goto err_free;
-
- dev->pdev = pdev;
+ return ret;
- pci_set_drvdata(pdev, dev);
+ ddev->pdev = pdev;
+ pci_set_drvdata(pdev, ddev);
- ret = amdgpu_driver_load_kms(dev, ent->driver_data);
+ ret = amdgpu_driver_load_kms(adev, ent->driver_data);
if (ret)
goto err_pci;
retry_init:
- ret = drm_dev_register(dev, ent->driver_data);
+ ret = drm_dev_register(ddev, ent->driver_data);
if (ret == -EAGAIN && ++retry <= 3) {
DRM_INFO("retry init %d\n", retry);
/* Don't request EX mode too frequently which is attacking */
msleep(5000);
goto retry_init;
- } else if (ret)
+ } else if (ret) {
goto err_pci;
+ }
- adev = dev->dev_private;
ret = amdgpu_debugfs_init(adev);
if (ret)
DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
@@ -1159,8 +1206,6 @@ retry_init:
err_pci:
pci_disable_device(pdev);
-err_free:
- drm_dev_put(dev);
return ret;
}
@@ -1177,14 +1222,13 @@ amdgpu_pci_remove(struct pci_dev *pdev)
amdgpu_driver_unload_kms(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
- drm_dev_put(dev);
}
static void
amdgpu_pci_shutdown(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (amdgpu_ras_intr_triggered())
return;
@@ -1217,7 +1261,7 @@ static int amdgpu_pmops_resume(struct device *dev)
static int amdgpu_pmops_freeze(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
adev->in_hibernate = true;
@@ -1253,7 +1297,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct amdgpu_device *adev = drm_dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
int ret, i;
if (!adev->runpm) {
@@ -1287,7 +1331,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
if (amdgpu_is_atpx_hybrid()) {
pci_ignore_hotplug(pdev);
} else {
- pci_save_state(pdev);
+ amdgpu_device_cache_pci_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
@@ -1304,7 +1348,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct amdgpu_device *adev = drm_dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
int ret;
if (!adev->runpm)
@@ -1320,7 +1364,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
} else {
pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
+ amdgpu_device_load_pci_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
@@ -1340,7 +1384,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
static int amdgpu_pmops_runtime_idle(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
int ret = 1;
@@ -1499,6 +1543,13 @@ static struct drm_driver kms_driver = {
.patchlevel = KMS_DRIVER_PATCHLEVEL,
};
+static struct pci_error_handlers amdgpu_pci_err_handler = {
+ .error_detected = amdgpu_pci_error_detected,
+ .mmio_enabled = amdgpu_pci_mmio_enabled,
+ .slot_reset = amdgpu_pci_slot_reset,
+ .resume = amdgpu_pci_resume,
+};
+
static struct pci_driver amdgpu_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
@@ -1506,10 +1557,9 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.remove = amdgpu_pci_remove,
.shutdown = amdgpu_pci_shutdown,
.driver.pm = &amdgpu_pm_ops,
+ .err_handler = &amdgpu_pci_err_handler,
};
-
-
static int __init amdgpu_init(void)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 61fcf247a638..af4ef84e27a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -35,7 +35,7 @@
void
amdgpu_link_encoder_connector(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index db731f573f98..e2c2eb45a793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -135,7 +135,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED;
- info = drm_get_format_info(adev->ddev, mode_cmd);
+ info = drm_get_format_info(adev_to_drm(adev), mode_cmd);
cpp = info->cpp[0];
/* need to align pitch with crtc limits */
@@ -231,7 +231,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
goto out;
}
- ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
+ ret = amdgpu_display_framebuffer_init(adev_to_drm(adev), &rfbdev->rfb,
&mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
@@ -254,7 +254,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
/* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
+ info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base;
info->apertures->ranges[0].size = adev->gmc.aper_size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -270,7 +270,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
- vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
+ vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info);
return 0;
out:
@@ -318,7 +318,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
return 0;
/* don't init fbdev if there are no connectors */
- if (list_empty(&adev->ddev->mode_config.connector_list))
+ if (list_empty(&adev_to_drm(adev)->mode_config.connector_list))
return 0;
/* select 8 bpp console on low vram cards */
@@ -332,10 +332,10 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
rfbdev->adev = adev;
adev->mode_info.rfbdev = rfbdev;
- drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
- &amdgpu_fb_helper_funcs);
+ drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper,
+ &amdgpu_fb_helper_funcs);
- ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper);
+ ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper);
if (ret) {
kfree(rfbdev);
return ret;
@@ -343,7 +343,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
/* disable all the possible outputs/crtcs before entering KMS mode */
if (!amdgpu_device_has_dc_support(adev))
- drm_helper_disable_unused_functions(adev->ddev);
+ drm_helper_disable_unused_functions(adev_to_drm(adev));
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
@@ -354,7 +354,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
if (!adev->mode_info.rfbdev)
return;
- amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev);
+ amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev);
kfree(adev->mode_info.rfbdev);
adev->mode_info.rfbdev = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 58d4c219178a..fe2d495d08ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -155,7 +155,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
- pm_runtime_get_noresume(adev->ddev->dev);
+ pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
@@ -284,8 +284,8 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
BUG();
dma_fence_put(fence);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
} while (last_seq != seq);
return true;
@@ -700,7 +700,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -749,7 +749,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r;
r = pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index e811fecc540f..8f4a8f8d8146 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -34,18 +34,31 @@
static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
{
- /* TODO: Gaming SKUs don't have the FRU EEPROM.
- * Use this hack to address hangs on modprobe on gaming SKUs
- * until a proper solution can be implemented by only supporting
- * the explicit chip IDs for VG20 Server cards
- *
- * TODO: Add list of supported Arcturus DIDs once confirmed
+ /* Only server cards have the FRU EEPROM
+ * TODO: See if we can figure this out dynamically instead of
+ * having to parse VBIOS versions.
*/
- if ((adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a0) ||
- (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a1) ||
- (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a4))
- return true;
- return false;
+ struct atom_context *atom_ctx = adev->mode_info.atom_context;
+
+ /* VBIOS is of the format ###-DXXXYY-##. For SKU identification,
+ * we can use just the "DXXX" portion. If there were more models, we
+ * could convert the 3 characters to a hex integer and use a switch
+ * for ease/speed/readability. For now, 2 string comparisons are
+ * reasonable and not too expensive
+ */
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ /* D161 and D163 are the VG20 server SKUs */
+ if (strnstr(atom_ctx->vbios_version, "D161",
+ sizeof(atom_ctx->vbios_version)) ||
+ strnstr(atom_ctx->vbios_version, "D163",
+ sizeof(atom_ctx->vbios_version)))
+ return true;
+ else
+ return false;
+ default:
+ return false;
+ }
}
static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
index f29a8611d69b..1308d976d60e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
@@ -26,4 +26,4 @@
int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
-#endif // __AMDGPU_PRODINFO_H__
+#endif // __AMDGPU_FRU_EEPROM_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7f9e50247413..7e8265da9f25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -93,7 +93,7 @@ retry:
void amdgpu_gem_force_release(struct amdgpu_device *adev)
{
- struct drm_device *ddev = adev->ddev;
+ struct drm_device *ddev = adev_to_drm(adev);
struct drm_file *file;
mutex_lock(&ddev->filelist_mutex);
@@ -217,7 +217,7 @@ out_unlock:
int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_gem_create *args = data;
@@ -298,7 +298,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct ttm_operation_ctx ctx = { true, false };
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_userptr *args = data;
struct drm_gem_object *gobj;
struct amdgpu_bo *bo;
@@ -332,7 +332,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
bo = gem_to_amdgpu_bo(gobj);
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
- r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
+ r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
if (r)
goto release_object;
@@ -587,7 +587,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_amdgpu_gem_va *args = data;
struct drm_gem_object *gobj;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
@@ -596,6 +596,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint64_t va_flags;
+ uint64_t vm_size;
int r = 0;
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
@@ -616,6 +617,15 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->va_address &= AMDGPU_GMC_HOLE_MASK;
+ vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_SIZE;
+ if (args->va_address + args->map_size > vm_size) {
+ dev_dbg(&dev->pdev->dev,
+ "va_address 0x%llx is in top reserved area 0x%llx\n",
+ args->va_address + args->map_size, vm_size);
+ return -EINVAL;
+ }
+
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
args->flags);
@@ -711,7 +721,7 @@ error_unref:
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
@@ -788,7 +798,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_gem_object *gobj;
uint32_t handle;
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 78d37f92c7be..8c9bacfdbc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -202,40 +202,29 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
- int i, queue, pipe, mec;
+ int i, queue, pipe;
bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
-
- /* policy for amdgpu compute queue ownership */
- for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
- queue = i % adev->gfx.mec.num_queue_per_pipe;
- pipe = (i / adev->gfx.mec.num_queue_per_pipe)
- % adev->gfx.mec.num_pipe_per_mec;
- mec = (i / adev->gfx.mec.num_queue_per_pipe)
- / adev->gfx.mec.num_pipe_per_mec;
-
- /* we've run out of HW */
- if (mec >= adev->gfx.mec.num_mec)
- break;
-
- if (multipipe_policy) {
- /* policy: amdgpu owns the first two queues of the first MEC */
- if (mec == 0 && queue < 2)
- set_bit(i, adev->gfx.mec.queue_bitmap);
- } else {
- /* policy: amdgpu owns all queues in the first pipe */
- if (mec == 0 && pipe == 0)
- set_bit(i, adev->gfx.mec.queue_bitmap);
+ int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe,
+ adev->gfx.num_compute_rings);
+
+ if (multipipe_policy) {
+ /* policy: make queues evenly cross all pipes on MEC1 only */
+ for (i = 0; i < max_queues_per_mec; i++) {
+ pipe = i % adev->gfx.mec.num_pipe_per_mec;
+ queue = (i / adev->gfx.mec.num_pipe_per_mec) %
+ adev->gfx.mec.num_queue_per_pipe;
+
+ set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
+ adev->gfx.mec.queue_bitmap);
}
+ } else {
+ /* policy: amdgpu owns all queues in the given pipe */
+ for (i = 0; i < max_queues_per_mec; ++i)
+ set_bit(i, adev->gfx.mec.queue_bitmap);
}
- /* update the number of active compute rings */
- adev->gfx.num_compute_rings =
- bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
-
- /* If you hit this case and edited the policy, you probably just
- * need to increase AMDGPU_MAX_COMPUTE_RINGS */
- if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS))
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
}
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
@@ -571,8 +560,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
} else if (!enable && adev->gfx.gfx_off_state) {
- if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false))
+ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
adev->gfx.gfx_off_state = false;
+
+ if (adev->gfx.funcs->init_spm_golden) {
+ dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
+ amdgpu_gfx_init_spm_golden(adev);
+ }
+ }
}
mutex_unlock(&adev->gfx.gfx_off_mutex);
@@ -698,6 +693,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
+ if (adev->in_pci_err_recovery)
+ return 0;
+
BUG_ON(!ring->funcs->emit_rreg);
spin_lock_irqsave(&kiq->ring_lock, flags);
@@ -724,7 +722,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
*
* also don't wait anymore for IRQ context
* */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
goto failed_kiq_read;
might_sleep();
@@ -748,7 +746,7 @@ failed_unlock:
failed_kiq_read:
if (reg_val_offs)
amdgpu_device_wb_free(adev, reg_val_offs);
- pr_err("failed to read reg:%x\n", reg);
+ dev_err(adev->dev, "failed to read reg:%x\n", reg);
return ~0;
}
@@ -762,6 +760,9 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
BUG_ON(!ring->funcs->emit_wreg);
+ if (adev->in_pci_err_recovery)
+ return;
+
spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_wreg(ring, reg, v);
@@ -782,7 +783,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
*
* also don't wait anymore for IRQ context
* */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
goto failed_kiq_write;
might_sleep();
@@ -801,5 +802,5 @@ failed_undo:
amdgpu_ring_undo(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_write:
- pr_err("failed to write reg:%x\n", reg);
+ dev_err(adev->dev, "failed to write reg:%x\n", reg);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 1e7a2b0997c5..258498cbf1eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -216,6 +216,8 @@ struct amdgpu_gfx_funcs {
int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
void (*reset_ras_error_count) (struct amdgpu_device *adev);
+ void (*init_spm_golden)(struct amdgpu_device *adev);
+ void (*query_ras_error_status) (struct amdgpu_device *adev);
};
struct sq_work {
@@ -324,6 +326,7 @@ struct amdgpu_gfx {
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid))
+#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
/**
* amdgpu_gfx_create_bitmask - create a bitmask
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
new file mode 100644
index 000000000000..66ebc2e3b2ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_GFXHUB_H__
+#define __AMDGPU_GFXHUB_H__
+
+struct amdgpu_gfxhub_funcs {
+ u64 (*get_fb_location)(struct amdgpu_device *adev);
+ u64 (*get_mc_fb_offset)(struct amdgpu_device *adev);
+ void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
+ int (*gart_enable)(struct amdgpu_device *adev);
+
+ void (*gart_disable)(struct amdgpu_device *adev);
+ void (*set_fault_enable_default)(struct amdgpu_device *adev, bool value);
+ void (*init)(struct amdgpu_device *adev);
+ int (*get_xgmi_info)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_gfxhub {
+ const struct amdgpu_gfxhub_funcs *funcs;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 34cbd6f6a56b..36604d751d62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -27,6 +27,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "amdgpu.h"
+#include "amdgpu_gmc.h"
#include "amdgpu_ras.h"
#include "amdgpu_xgmi.h"
@@ -411,3 +412,102 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
break;
}
}
+
+/**
+ * amdgpu_noretry_set -- set per asic noretry defaults
+ * @adev: amdgpu_device pointer
+ *
+ * Set a per asic default for the no-retry parameter.
+ *
+ */
+void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
+{
+ struct amdgpu_gmc *gmc = &adev->gmc;
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ /* Raven currently has issues with noretry
+ * regardless of what we decide for other
+ * asics, we should leave raven with
+ * noretry = 0 until we root cause the
+ * issues.
+ */
+ if (amdgpu_noretry == -1)
+ gmc->noretry = 0;
+ else
+ gmc->noretry = amdgpu_noretry;
+ break;
+ default:
+ /* default this to 0 for now, but we may want
+ * to change this in the future for certain
+ * GPUs as it can increase performance in
+ * certain cases.
+ */
+ if (amdgpu_noretry == -1)
+ gmc->noretry = 0;
+ else
+ gmc->noretry = amdgpu_noretry;
+ break;
+ }
+}
+
+void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
+ bool enable)
+{
+ struct amdgpu_vmhub *hub;
+ u32 tmp, reg, i;
+
+ hub = &adev->vmhub[hub_type];
+ for (i = 0; i < 16; i++) {
+ reg = hub->vm_context0_cntl + hub->ctx_distance * i;
+
+ tmp = RREG32(reg);
+ if (enable)
+ tmp |= hub->vm_cntx_cntl_vm_fault;
+ else
+ tmp &= ~hub->vm_cntx_cntl_vm_fault;
+
+ WREG32(reg, tmp);
+ }
+}
+
+void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
+{
+ unsigned size;
+
+ /*
+ * TODO:
+ * Currently there is a bug where some memory client outside
+ * of the driver writes to first 8M of VRAM on S3 resume,
+ * this overrides GART which by default gets placed in first 8M and
+ * causes VM_FAULTS once GTT is accessed.
+ * Keep the stolen memory reservation until the while this is not solved.
+ */
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ adev->mman.keep_stolen_vga_memory = true;
+ break;
+ default:
+ adev->mman.keep_stolen_vga_memory = false;
+ break;
+ }
+
+ if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE))
+ size = 0;
+ else
+ size = amdgpu_gmc_get_vbios_fb_size(adev);
+
+ /* set to 0 if the pre-OS buffer uses up most of vram */
+ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+ size = 0;
+
+ if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
+ adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
+ adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
+ } else {
+ adev->mman.stolen_vga_size = size;
+ adev->mman.stolen_extended_size = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index acdb61cfa24c..aa0c83776ce0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -74,6 +74,12 @@ struct amdgpu_gmc_fault {
/*
* VMHUB structures, functions & helpers
*/
+struct amdgpu_vmhub_funcs {
+ void (*print_l2_protection_fault_status)(struct amdgpu_device *adev,
+ uint32_t status);
+ uint32_t (*get_invalidate_req)(unsigned int vmid, uint32_t flush_type);
+};
+
struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32;
@@ -92,6 +98,10 @@ struct amdgpu_vmhub {
uint32_t ctx_addr_distance; /* include LO32/HI32 */
uint32_t eng_distance;
uint32_t eng_addr_distance; /* include LO32/HI32 */
+
+ uint32_t vm_cntx_cntl_vm_fault;
+
+ const struct amdgpu_vmhub_funcs *vmhub_funcs;
};
/*
@@ -121,6 +131,8 @@ struct amdgpu_gmc_funcs {
void (*get_vm_pte)(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags);
+ /* get the amount of memory used by the vbios for pre-OS console */
+ unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
};
struct amdgpu_xgmi {
@@ -203,7 +215,6 @@ struct amdgpu_gmc {
uint8_t vram_vendor;
uint32_t srbm_soft_reset;
bool prt_warning;
- uint64_t stolen_size;
uint32_t sdpif_register;
/* apertures */
u64 shared_aperture_start;
@@ -228,6 +239,7 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
+ int noretry;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
@@ -239,6 +251,7 @@ struct amdgpu_gmc {
#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
+#define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev))
/**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
@@ -288,5 +301,12 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
+extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev);
+
+extern void
+amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
+ bool enable);
+
+void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 77fae40197ab..731f3aa2e6ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -24,11 +24,10 @@
#include "amdgpu.h"
-struct amdgpu_gtt_mgr {
- struct drm_mm mm;
- spinlock_t lock;
- atomic64_t available;
-};
+static inline struct amdgpu_gtt_mgr *to_gtt_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct amdgpu_gtt_mgr, manager);
+}
struct amdgpu_gtt_node {
struct drm_mm_node node;
@@ -47,10 +46,11 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
return snprintf(buf, PAGE_SIZE, "%llu\n",
- (adev->mman.bdev.man[TTM_PL_TT].size) * PAGE_SIZE);
+ man->size * PAGE_SIZE);
}
/**
@@ -65,10 +65,11 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]));
+ amdgpu_gtt_mgr_usage(man));
}
static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
@@ -76,32 +77,32 @@ static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO,
amdgpu_mem_info_gtt_used_show, NULL);
+static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func;
/**
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
*
- * @man: TTM memory type manager
- * @p_size: maximum size of GTT
+ * @adev: amdgpu_device pointer
+ * @gtt_size: maximum size of GTT
*
* Allocate and initialize the GTT manager.
*/
-static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
+int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_gtt_mgr *mgr;
+ struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
uint64_t start, size;
int ret;
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return -ENOMEM;
+ man->use_tt = true;
+ man->func = &amdgpu_gtt_mgr_func;
+
+ ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT);
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
- atomic64_set(&mgr->available, p_size);
- man->priv = mgr;
+ atomic64_set(&mgr->available, gtt_size >> PAGE_SHIFT);
ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total);
if (ret) {
@@ -114,31 +115,40 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
return ret;
}
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager);
+ ttm_resource_manager_set_used(man, true);
return 0;
}
/**
* amdgpu_gtt_mgr_fini - free and destroy GTT manager
*
- * @man: TTM memory type manager
+ * @adev: amdgpu_device pointer
*
* Destroy and free the GTT manager, returns -EBUSY if ranges are still
* allocated inside it.
*/
-static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
+void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
+ int ret;
+
+ ttm_resource_manager_set_used(man, false);
+
+ ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
+ if (ret)
+ return;
+
spin_lock(&mgr->lock);
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
- kfree(mgr);
- man->priv = NULL;
device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total);
device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used);
- return 0;
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, NULL);
}
/**
@@ -148,7 +158,7 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
*
* Check if a mem object has already address space allocated.
*/
-bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
{
return mem->mm_node != NULL;
}
@@ -163,12 +173,12 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
*
* Dummy, allocate the node but no space for it yet.
*/
-static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
+static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_gtt_node *node;
int r;
@@ -226,10 +236,10 @@ err_out:
*
* Free the allocated GTT again.
*/
-static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
+ struct ttm_resource *mem)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_gtt_node *node = mem->mm_node;
if (node) {
@@ -249,17 +259,17 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
*
* Return how many bytes are used in the GTT domain
*/
-uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
+uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
s64 result = man->size - atomic64_read(&mgr->available);
return (result > 0 ? result : 0) * PAGE_SIZE;
}
-int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
+int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_gtt_node *node;
struct drm_mm_node *mm_node;
int r = 0;
@@ -284,10 +294,10 @@ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
*
* Dump the table content using printk.
*/
-static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
+static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
spin_lock(&mgr->lock);
drm_mm_print(&mgr->mm, printer);
@@ -298,10 +308,8 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
amdgpu_gtt_mgr_usage(man) >> 20);
}
-const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
- .init = amdgpu_gtt_mgr_init,
- .takedown = amdgpu_gtt_mgr_fini,
- .get_node = amdgpu_gtt_mgr_new,
- .put_node = amdgpu_gtt_mgr_del,
+static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = {
+ .alloc = amdgpu_gtt_mgr_new,
+ .free = amdgpu_gtt_mgr_del,
.debug = amdgpu_gtt_mgr_debug
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 70dbe343f51d..47cad23a6b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -40,7 +40,7 @@
static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
{
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
@@ -82,7 +82,7 @@ static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
{
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
@@ -101,7 +101,7 @@ static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
static int amdgpu_i2c_get_clock(void *i2c_priv)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -116,7 +116,7 @@ static int amdgpu_i2c_get_clock(void *i2c_priv)
static int amdgpu_i2c_get_data(void *i2c_priv)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -130,7 +130,7 @@ static int amdgpu_i2c_get_data(void *i2c_priv)
static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -143,7 +143,7 @@ static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
static void amdgpu_i2c_set_data(void *i2c_priv, int data)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -253,7 +253,7 @@ void amdgpu_i2c_add(struct amdgpu_device *adev,
const struct amdgpu_i2c_bus_rec *rec,
const char *name)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
int i;
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index dcd492170598..2f53fa0ae9a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -445,7 +445,7 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
seq_printf(m, "--------------------- DELAYED --------------------- \n");
amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 7521f4ab55de..6e9a9e5dbea0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -43,7 +43,7 @@ static DEFINE_IDA(amdgpu_pasid_ida);
/* Helper to free pasid from a fence callback */
struct amdgpu_pasid_cb {
struct dma_fence_cb cb;
- unsigned int pasid;
+ u32 pasid;
};
/**
@@ -79,7 +79,7 @@ int amdgpu_pasid_alloc(unsigned int bits)
* amdgpu_pasid_free - Free a PASID
* @pasid: PASID to free
*/
-void amdgpu_pasid_free(unsigned int pasid)
+void amdgpu_pasid_free(u32 pasid)
{
trace_amdgpu_pasid_freed(pasid);
ida_simple_remove(&amdgpu_pasid_ida, pasid);
@@ -105,7 +105,7 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
* Free the pasid only after all the fences in resv are signaled.
*/
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
- unsigned int pasid)
+ u32 pasid)
{
struct dma_fence *fence, **fences;
struct amdgpu_pasid_cb *cb;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 8e58325bbca2..0c3b4fa1f936 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -71,9 +71,9 @@ struct amdgpu_vmid_mgr {
};
int amdgpu_pasid_alloc(unsigned int bits);
-void amdgpu_pasid_free(unsigned int pasid);
+void amdgpu_pasid_free(u32 pasid);
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
- unsigned int pasid);
+ u32 pasid);
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 0cc4c67f95f7..300ac73b4738 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -85,7 +85,7 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
hotplug_work);
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
@@ -151,7 +151,7 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
irqreturn_t amdgpu_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
irqreturn_t ret;
ret = amdgpu_ih_process(adev, &adev->irq.ih);
@@ -268,9 +268,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (!adev->enable_virtual_display)
/* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
- adev->ddev->vblank_disable_immediate = true;
+ adev_to_drm(adev)->vblank_disable_immediate = true;
- r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
+ r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
if (r)
return r;
@@ -284,14 +284,14 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.installed = true;
/* Use vector 0 for MSI-X */
- r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0));
+ r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0));
if (r) {
adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
return r;
}
- adev->ddev->max_vblank_count = 0x00ffffff;
+ adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
DRM_DEBUG("amdgpu: irq initialized.\n");
return 0;
@@ -311,7 +311,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
unsigned i, j;
if (adev->irq.installed) {
- drm_irq_uninstall(adev->ddev);
+ drm_irq_uninstall(adev_to_drm(adev));
adev->irq.installed = false;
if (adev->irq.msi_enabled)
pci_free_irq_vectors(adev->pdev);
@@ -522,7 +522,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
{
- if (!adev->ddev->irq_enabled)
+ if (!adev_to_drm(adev)->irq_enabled)
return -ENOENT;
if (type >= src->num_types)
@@ -552,7 +552,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
{
- if (!adev->ddev->irq_enabled)
+ if (!adev_to_drm(adev)->irq_enabled)
return -ENOENT;
if (type >= src->num_types)
@@ -583,7 +583,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
{
- if (!adev->ddev->irq_enabled)
+ if (!adev_to_drm(adev)->irq_enabled)
return false;
if (type >= src->num_types)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 937029ad5271..dcfe8a3b03ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -251,7 +251,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
int i;
/* Signal all jobs not yet scheduled */
- for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
if (!rq)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 414548064648..efda38349a03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -78,7 +78,7 @@ void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
*/
void amdgpu_driver_unload_kms(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (adev == NULL)
return;
@@ -86,7 +86,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
amdgpu_unregister_gpu_instance(adev);
if (adev->rmmio == NULL)
- goto done_free;
+ return;
if (adev->runpm) {
pm_runtime_get_sync(dev->dev);
@@ -94,12 +94,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
}
amdgpu_acpi_fini(adev);
-
amdgpu_device_fini(adev);
-
-done_free:
- kfree(adev);
- dev->dev_private = NULL;
}
void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
@@ -130,22 +125,18 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
/**
* amdgpu_driver_load_kms - Main load function for KMS.
*
- * @dev: drm dev pointer
+ * @adev: pointer to struct amdgpu_device
* @flags: device flags
*
* This is the main load function for KMS (all asics).
* Returns 0 on success, error on failure.
*/
-int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
+int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
{
- struct amdgpu_device *adev;
+ struct drm_device *dev;
int r, acpi_status;
- adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
- if (adev == NULL) {
- return -ENOMEM;
- }
- dev->dev_private = (void *)adev;
+ dev = adev_to_drm(adev);
if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
@@ -160,7 +151,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
* properly initialize the GPU MC controller and permit
* VRAM allocation
*/
- r = amdgpu_device_init(adev, dev, dev->pdev, flags);
+ r = amdgpu_device_init(adev, flags);
if (r) {
dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
goto out;
@@ -186,7 +177,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
break;
case CHIP_VEGA10:
/* turn runpm on if noretry=0 */
- if (!amdgpu_noretry)
+ if (!adev->gmc.noretry)
adev->runpm = true;
break;
default:
@@ -291,14 +282,25 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_TA:
- if (query_fw->index > 1)
- return -EINVAL;
- if (query_fw->index == 0) {
+ switch (query_fw->index) {
+ case 0:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_xgmi_ucode_version;
- } else {
+ break;
+ case 1:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_ras_ucode_version;
+ break;
+ case 2:
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_hdcp_ucode_version;
+ break;
+ case 3:
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_dtm_ucode_version;
+ break;
+ default:
+ return -EINVAL;
}
break;
case AMDGPU_INFO_FW_SDMA:
@@ -480,7 +482,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
*/
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
@@ -595,13 +597,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
- ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE:
- ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
+ ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info;
@@ -624,7 +626,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
min(adev->gmc.visible_vram_size -
atomic64_read(&adev->visible_pin_size),
vram_gtt.vram_size);
- vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
return copy_to_user(out, &vram_gtt,
@@ -632,14 +634,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
case AMDGPU_INFO_MEMORY: {
struct drm_amdgpu_memory_info mem;
-
+ struct ttm_resource_manager *vram_man =
+ ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+ struct ttm_resource_manager *gtt_man =
+ ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage =
- amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ amdgpu_vram_mgr_usage(vram_man);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -649,16 +654,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
atomic64_read(&adev->visible_pin_size),
mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage =
- amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ amdgpu_vram_mgr_vis_usage(vram_man);
mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
- mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ mem.gtt.total_heap_size = gtt_man->size;
mem.gtt.total_heap_size *= PAGE_SIZE;
mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
atomic64_read(&adev->gart_pin_size);
mem.gtt.heap_usage =
- amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
+ amdgpu_gtt_mgr_usage(gtt_man);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
@@ -742,6 +747,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
+ if (amdgpu_is_tmz(adev))
+ dev_info.ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
@@ -995,7 +1002,7 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
*/
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv;
int r, pasid;
@@ -1080,11 +1087,11 @@ pm_put:
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_bo_list *list;
struct amdgpu_bo *pd;
- unsigned int pasid;
+ u32 pasid;
int handle;
if (!fpriv)
@@ -1145,7 +1152,7 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int vpos, hpos, stat;
u32 count;
@@ -1213,7 +1220,7 @@ int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
@@ -1230,7 +1237,7 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
amdgpu_irq_put(adev, &adev->crtc_irq, idx);
@@ -1266,7 +1273,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw;
struct atom_context *ctx = adev->mode_info.atom_context;
@@ -1389,13 +1396,31 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
fw_info.feature, fw_info.ver);
query_fw.fw_type = AMDGPU_INFO_FW_TA;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < 4; i++) {
query_fw.index = i;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
continue;
- seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
- i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
+ switch (query_fw.index) {
+ case 0:
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+ "RAS", fw_info.feature, fw_info.ver);
+ break;
+ case 1:
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+ "XGMI", fw_info.feature, fw_info.ver);
+ break;
+ case 2:
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+ "HDCP", fw_info.feature, fw_info.ver);
+ break;
+ case 3:
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+ "DTM", fw_info.feature, fw_info.ver);
+ break;
+ default:
+ return -EINVAL;
+ }
}
/* SMC */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index e89fb35fec71..1ae9bdae7311 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -27,6 +27,20 @@ struct amdgpu_mmhub_funcs {
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*reset_ras_error_count)(struct amdgpu_device *adev);
+ u64 (*get_fb_location)(struct amdgpu_device *adev);
+ void (*init)(struct amdgpu_device *adev);
+ int (*gart_enable)(struct amdgpu_device *adev);
+ void (*set_fault_enable_default)(struct amdgpu_device *adev,
+ bool value);
+ void (*gart_disable)(struct amdgpu_device *adev);
+ int (*set_clockgating)(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+ void (*get_clockgating)(struct amdgpu_device *adev, u32 *flags);
+ void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
+ void (*update_power_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*query_ras_error_status)(struct amdgpu_device *adev);
};
struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 37ba07e2feb5..a04decb934b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -46,6 +46,7 @@
#include <drm/drm_dp_mst_helper.h>
#include "modules/inc/mod_freesync.h"
+#include "amdgpu_dm_irq_params.h"
struct amdgpu_bo;
struct amdgpu_device;
@@ -404,7 +405,8 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
- u32 last_flip_vblank;
+ /* parameters access from DM IRQ handler */
+ struct dm_irq_params dm_irq_params;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
@@ -469,6 +471,7 @@ struct amdgpu_encoder {
struct amdgpu_connector_atom_dig {
/* displayport */
u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5ac7b5561475..ac043baac05d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -136,8 +136,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ places[c].mem_type = TTM_PL_VRAM;
+ places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
@@ -152,7 +152,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_TT;
+ places[c].mem_type = TTM_PL_TT;
+ places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
@@ -164,7 +165,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_CPU) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_SYSTEM;
+ places[c].mem_type = TTM_PL_SYSTEM;
+ places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
@@ -176,28 +178,32 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GDS) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
+ places[c].mem_type = AMDGPU_PL_GDS;
+ places[c].flags = TTM_PL_FLAG_UNCACHED;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GWS) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
+ places[c].mem_type = AMDGPU_PL_GWS;
+ places[c].flags = TTM_PL_FLAG_UNCACHED;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_OA) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
+ places[c].mem_type = AMDGPU_PL_OA;
+ places[c].flags = TTM_PL_FLAG_UNCACHED;
c++;
}
if (!c) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ places[c].mem_type = TTM_PL_SYSTEM;
+ places[c].flags = TTM_PL_MASK_CACHING;
c++;
}
@@ -374,6 +380,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
if (r)
return r;
+ if ((*bo_ptr) == NULL)
+ return 0;
+
/*
* Remove the original mem node and create a new one at the request
* position.
@@ -381,7 +390,7 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
if (cpu_addr)
amdgpu_bo_kunmap(*bo_ptr);
- ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+ ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
@@ -442,14 +451,14 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
unsigned long size, u32 domain)
{
- struct ttm_mem_type_manager *man = NULL;
+ struct ttm_resource_manager *man = NULL;
/*
* If GTT is part of requested domains the check must succeed to
* allow fall back to GTT
*/
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
- man = &adev->mman.bdev.man[TTM_PL_TT];
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
if (size < (man->size << PAGE_SHIFT))
return true;
@@ -458,7 +467,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
}
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
- man = &adev->mman.bdev.man[TTM_PL_VRAM];
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
if (size < (man->size << PAGE_SHIFT))
return true;
@@ -552,7 +561,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
+ drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
INIT_LIST_HEAD(&bo->shadow_list);
bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
@@ -591,7 +600,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
- bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+ bo->tbo.mem.mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
@@ -1268,11 +1277,11 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
*/
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = &bo->mem;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
@@ -1299,7 +1308,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_bo_move_notify - notification about a BO being released
+ * amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object
*
* Wipes VRAM buffers whose contents should not be leaked before the
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index afa5189dba7d..5ddb6cf96030 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -160,7 +160,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
- r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
+ r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(adev->dev, "%p reserve failed\n", bo);
@@ -283,7 +283,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
uint64_t *flags);
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem);
+ struct ttm_resource *new_mem);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 1311d6aec5d4..69af462db34d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -226,7 +226,7 @@ static int init_pmu_by_type(struct amdgpu_device *adev,
pmu_entry->pmu.attr_groups = attr_groups;
pmu_entry->pmu_perf_type = pmu_perf_type;
snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
- pmu_file_prefix, adev->ddev->primary->index);
+ pmu_file_prefix, adev_to_drm(adev)->primary->index);
ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 06757681b2ce..a6dbe4b83533 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -161,10 +161,12 @@ static int psp_sw_init(void *handle)
struct psp_context *psp = &adev->psp;
int ret;
- ret = psp_init_microcode(psp);
- if (ret) {
- DRM_ERROR("Failed to load psp firmware!\n");
- return ret;
+ if (!amdgpu_sriov_vf(adev)) {
+ ret = psp_init_microcode(psp);
+ if (ret) {
+ DRM_ERROR("Failed to load psp firmware!\n");
+ return ret;
+ }
}
ret = psp_memory_training_init(psp);
@@ -206,7 +208,8 @@ static int psp_sw_fini(void *handle)
adev->psp.ta_fw = NULL;
}
- if (adev->asic_type == CHIP_NAVI10)
+ if (adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_SIENNA_CICHLID)
psp_sysfs_fini(adev);
return 0;
@@ -219,6 +222,9 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
int i;
struct amdgpu_device *adev = psp->adev;
+ if (psp->adev->in_pci_err_recovery)
+ return 0;
+
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32(reg_index);
if (check_changed) {
@@ -245,6 +251,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
bool ras_intr = false;
bool skip_unsupport = false;
+ if (psp->adev->in_pci_err_recovery)
+ return 0;
+
mutex_lock(&psp->mutex);
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
@@ -929,6 +938,7 @@ static int psp_ras_load(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
+ struct ta_ras_shared_memory *ras_cmd;
/*
* TODO: bypass the loading in sriov for now
@@ -952,11 +962,20 @@ static int psp_ras_load(struct psp_context *psp)
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
+ ras_cmd = (struct ta_ras_shared_memory*)psp->ras.ras_shared_buf;
+
if (!ret) {
- psp->ras.ras_initialized = true;
psp->ras.session_id = cmd->resp.session_id;
+
+ if (!ras_cmd->ras_status)
+ psp->ras.ras_initialized = true;
+ else
+ dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
}
+ if (ret || ras_cmd->ras_status)
+ amdgpu_ras_fini(psp->adev);
+
kfree(cmd);
return ret;
@@ -1429,6 +1448,168 @@ static int psp_dtm_terminate(struct psp_context *psp)
}
// DTM end
+// RAP start
+static int psp_rap_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for rap ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->rap_context.rap_shared_bo,
+ &psp->rap_context.rap_shared_mc_addr,
+ &psp->rap_context.rap_shared_buf);
+
+ return ret;
+}
+
+static int psp_rap_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_rap_start_addr, psp->ta_rap_ucode_size);
+
+ psp_prep_ta_load_cmd_buf(cmd,
+ psp->fw_pri_mc_addr,
+ psp->ta_rap_ucode_size,
+ psp->rap_context.rap_shared_mc_addr,
+ PSP_RAP_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->rap_context.rap_initialized = true;
+ psp->rap_context.session_id = cmd->resp.session_id;
+ mutex_init(&psp->rap_context.mutex);
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_rap_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_rap_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_rap_ucode_size ||
+ !psp->adev->psp.ta_rap_start_addr) {
+ dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->rap_context.rap_initialized) {
+ ret = psp_rap_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_rap_load(psp);
+ if (ret)
+ return ret;
+
+ ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE);
+ if (ret != TA_RAP_STATUS__SUCCESS) {
+ psp_rap_unload(psp);
+
+ amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
+ &psp->rap_context.rap_shared_mc_addr,
+ &psp->rap_context.rap_shared_buf);
+
+ psp->rap_context.rap_initialized = false;
+
+ dev_warn(psp->adev->dev, "RAP TA initialize fail.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int psp_rap_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->rap_context.rap_initialized)
+ return 0;
+
+ ret = psp_rap_unload(psp);
+
+ psp->rap_context.rap_initialized = false;
+
+ /* free rap shared memory */
+ amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
+ &psp->rap_context.rap_shared_mc_addr,
+ &psp->rap_context.rap_shared_buf);
+
+ return ret;
+}
+
+int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ struct ta_rap_shared_memory *rap_cmd;
+ int ret;
+
+ if (!psp->rap_context.rap_initialized)
+ return -EINVAL;
+
+ if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
+ ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
+ return -EINVAL;
+
+ mutex_lock(&psp->rap_context.mutex);
+
+ rap_cmd = (struct ta_rap_shared_memory *)
+ psp->rap_context.rap_shared_buf;
+ memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
+
+ rap_cmd->cmd_id = ta_cmd_id;
+ rap_cmd->validation_method_id = METHOD_A;
+
+ ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id);
+ if (ret) {
+ mutex_unlock(&psp->rap_context.mutex);
+ return ret;
+ }
+
+ mutex_unlock(&psp->rap_context.mutex);
+
+ return rap_cmd->rap_status;
+}
+// RAP end
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -1570,6 +1751,12 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
break;
+ case AMDGPU_UCODE_ID_RLC_IRAM:
+ *type = GFX_FW_TYPE_RLC_IRAM;
+ break;
+ case AMDGPU_UCODE_ID_RLC_DRAM:
+ *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
+ break;
case AMDGPU_UCODE_ID_SMC:
*type = GFX_FW_TYPE_SMU;
break;
@@ -1706,7 +1893,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
return 0;
- if (adev->in_gpu_reset && ras && ras->supported) {
+ if (amdgpu_in_reset(adev) && ras && ras->supported) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
if (ret) {
DRM_WARN("Failed to set MP1 state prepare for reload\n");
@@ -1821,7 +2008,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
int ret;
struct psp_context *psp = &adev->psp;
- if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */
goto skip_memalloc;
}
@@ -1891,6 +2078,11 @@ skip_memalloc:
if (ret)
dev_err(psp->adev->dev,
"DTM: Failed to initialize DTM\n");
+
+ ret = psp_rap_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAP: Failed to initialize RAP\n");
}
return 0;
@@ -1941,6 +2133,7 @@ static int psp_hw_fini(void *handle)
if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
+ psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
}
@@ -1999,6 +2192,11 @@ static int psp_suspend(void *handle)
DRM_ERROR("Failed to terminate dtm ta\n");
return ret;
}
+ ret = psp_rap_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate rap ta\n");
+ return ret;
+ }
}
ret = psp_asd_unload(psp);
@@ -2077,6 +2275,11 @@ static int psp_resume(void *handle)
if (ret)
dev_err(psp->adev->dev,
"DTM: Failed to initialize DTM\n");
+
+ ret = psp_rap_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAP: Failed to initialize RAP\n");
}
mutex_unlock(&adev->firmware.mutex);
@@ -2321,6 +2524,7 @@ int parse_ta_bin_descriptor(struct psp_context *psp,
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
psp->asd_start_addr = ucode_start_addr;
+ psp->asd_fw = psp->ta_fw;
break;
case TA_FW_TYPE_PSP_XGMI:
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
@@ -2342,6 +2546,11 @@ int parse_ta_bin_descriptor(struct psp_context *psp,
psp->ta_dtm_ucode_size = le32_to_cpu(desc->size_bytes);
psp->ta_dtm_start_addr = ucode_start_addr;
break;
+ case TA_FW_TYPE_PSP_RAP:
+ psp->ta_rap_ucode_version = le32_to_cpu(desc->fw_version);
+ psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes);
+ psp->ta_rap_start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
break;
@@ -2420,7 +2629,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t fw_ver;
int ret;
@@ -2447,7 +2656,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
void *cpu_addr;
dma_addr_t dma_addr;
int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 623888bf30cb..919d2fb7427b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -29,6 +29,7 @@
#include "psp_gfx_if.h"
#include "ta_xgmi_if.h"
#include "ta_ras_if.h"
+#include "ta_rap_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
@@ -38,6 +39,7 @@
#define PSP_TMR_SIZE 0x400000
#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
#define PSP_DTM_SHARED_MEM_SIZE 0x4000
+#define PSP_RAP_SHARED_MEM_SIZE 0x4000
#define PSP_SHARED_MEM_SIZE 0x4000
struct psp_context;
@@ -159,6 +161,15 @@ struct psp_dtm_context {
struct mutex mutex;
};
+struct psp_rap_context {
+ bool rap_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *rap_shared_bo;
+ uint64_t rap_shared_mc_addr;
+ void *rap_shared_buf;
+ struct mutex mutex;
+};
+
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
#define GDDR6_MEM_TRAINING_OFFSET 0x8000
@@ -277,11 +288,16 @@ struct psp_context
uint32_t ta_dtm_ucode_size;
uint8_t *ta_dtm_start_addr;
+ uint32_t ta_rap_ucode_version;
+ uint32_t ta_rap_ucode_size;
+ uint8_t *ta_rap_start_addr;
+
struct psp_asd_context asd_context;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
struct psp_hdcp_context hdcp_context;
struct psp_dtm_context dtm_context;
+ struct psp_rap_context rap_context;
struct mutex mutex;
struct psp_memory_training_context mem_train_ctx;
};
@@ -357,6 +373,7 @@ int psp_ras_trigger_error(struct psp_context *psp,
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
new file mode 100644
index 000000000000..8da5356c36f1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_rap.h"
+
+/**
+ * DOC: AMDGPU RAP debugfs test interface
+ *
+ * how to use?
+ * echo opcode > <debugfs_dir>/dri/xxx/rap_test
+ *
+ * opcode:
+ * currently, only 2 is supported by Linux host driver,
+ * opcode 2 stands for TA_CMD_RAP__VALIDATE_L0, used to
+ * trigger L0 policy validation, you can refer more detail
+ * from header file ta_rap_if.h
+ *
+ */
+static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct ta_rap_shared_memory *rap_shared_mem;
+ struct ta_rap_cmd_output_data *rap_cmd_output;
+ struct drm_device *dev = adev_to_drm(adev);
+ uint32_t op;
+ int ret;
+
+ if (*pos || size != 2)
+ return -EINVAL;
+
+ ret = kstrtouint_from_user(buf, size, *pos, &op);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+ }
+
+ /* make sure gfx core is on, RAP TA cann't handle
+ * GFX OFF case currently.
+ */
+ amdgpu_gfx_off_ctrl(adev, false);
+
+ switch (op) {
+ case 2:
+ ret = psp_rap_invoke(&adev->psp, op);
+
+ if (ret == TA_RAP_STATUS__SUCCESS) {
+ dev_info(adev->dev, "RAP L0 validate test success.\n");
+ } else {
+ rap_shared_mem = (struct ta_rap_shared_memory *)
+ adev->psp.rap_context.rap_shared_buf;
+ rap_cmd_output = &(rap_shared_mem->rap_out_message.output);
+
+ dev_info(adev->dev, "RAP test failed, the output is:\n");
+ dev_info(adev->dev, "\tlast_subsection: 0x%08x.\n",
+ rap_cmd_output->last_subsection);
+ dev_info(adev->dev, "\tnum_total_validate: 0x%08x.\n",
+ rap_cmd_output->num_total_validate);
+ dev_info(adev->dev, "\tnum_valid: 0x%08x.\n",
+ rap_cmd_output->num_valid);
+ dev_info(adev->dev, "\tlast_validate_addr: 0x%08x.\n",
+ rap_cmd_output->last_validate_addr);
+ dev_info(adev->dev, "\tlast_validate_val: 0x%08x.\n",
+ rap_cmd_output->last_validate_val);
+ dev_info(adev->dev, "\tlast_validate_val_exptd: 0x%08x.\n",
+ rap_cmd_output->last_validate_val_exptd);
+ }
+ break;
+ default:
+ dev_info(adev->dev, "Unsupported op id: %d, ", op);
+ dev_info(adev->dev, "Only support op 2(L0 validate test).\n");
+ }
+
+ amdgpu_gfx_off_ctrl(adev, true);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return size;
+}
+
+static const struct file_operations amdgpu_rap_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_rap_debugfs_write,
+ .llseek = default_llseek
+};
+
+void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ if (!adev->psp.rap_context.rap_initialized)
+ return;
+
+ debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
+ adev, &amdgpu_rap_debugfs_ops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h
new file mode 100644
index 000000000000..ec6d7632d3a0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_RAP_H
+#define _AMDGPU_RAP_H
+
+#include "amdgpu.h"
+
+void amdgpu_rap_debugfs_init(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 1bedb416eebd..4e36551ab50b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -34,6 +34,8 @@
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
+static const char *RAS_FS_NAME = "ras";
+
const char *ras_error_string[] = {
"none",
"parity",
@@ -62,13 +64,14 @@ const char *ras_block_string[] = {
#define ras_err_str(i) (ras_error_string[ffs(i)])
#define ras_block_str(i) (ras_block_string[i])
-#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
-#define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
/* inject address is 52 bits */
#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
+/* typical ECC bad page rate(1 bad page per 100MB VRAM) */
+#define RAS_BAD_PAGE_RATE (100 * 1024 * 1024ULL)
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -367,12 +370,19 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct amdgpu_device *adev =
+ (struct amdgpu_device *)file_inode(f)->i_private;
int ret;
- ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
+ ret = amdgpu_ras_eeprom_reset_table(
+ &(amdgpu_ras_get_context(adev)->eeprom_control));
- return ret == 1 ? size : -EIO;
+ if (ret == 1) {
+ amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
+ return size;
+ } else {
+ return -EIO;
+ }
}
static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
@@ -1017,45 +1027,13 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
}
-static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
+static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct attribute *attrs[] = {
- &con->features_attr.attr,
- NULL
- };
- struct bin_attribute *bin_attrs[] = {
- &con->badpages_attr,
- NULL
- };
- struct attribute_group group = {
- .name = "ras",
- .attrs = attrs,
- .bin_attrs = bin_attrs,
- };
- con->features_attr = (struct device_attribute) {
- .attr = {
- .name = "features",
- .mode = S_IRUGO,
- },
- .show = amdgpu_ras_sysfs_features_read,
- };
-
- con->badpages_attr = (struct bin_attribute) {
- .attr = {
- .name = "gpu_vram_bad_pages",
- .mode = S_IRUGO,
- },
- .size = 0,
- .private = NULL,
- .read = amdgpu_ras_sysfs_badpages_read,
- };
-
- sysfs_attr_init(attrs[0]);
- sysfs_bin_attr_init(bin_attrs[0]);
-
- return sysfs_create_group(&adev->dev->kobj, &group);
+ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &con->badpages_attr.attr,
+ RAS_FS_NAME);
}
static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
@@ -1065,14 +1043,9 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
&con->features_attr.attr,
NULL
};
- struct bin_attribute *bin_attrs[] = {
- &con->badpages_attr,
- NULL
- };
struct attribute_group group = {
- .name = "ras",
+ .name = RAS_FS_NAME,
.attrs = attrs,
- .bin_attrs = bin_attrs,
};
sysfs_remove_group(&adev->dev->kobj, &group);
@@ -1105,7 +1078,7 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
if (sysfs_add_file_to_group(&adev->dev->kobj,
&obj->sysfs_attr.attr,
- "ras")) {
+ RAS_FS_NAME)) {
put_obj(obj);
return -EINVAL;
}
@@ -1125,7 +1098,7 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
sysfs_remove_file_from_group(&adev->dev->kobj,
&obj->sysfs_attr.attr,
- "ras");
+ RAS_FS_NAME);
obj->attr_inuse = 0;
put_obj(obj);
@@ -1141,6 +1114,9 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_sysfs_remove(adev, &obj->head);
}
+ if (amdgpu_bad_page_threshold != 0)
+ amdgpu_ras_sysfs_remove_bad_page_node(adev);
+
amdgpu_ras_sysfs_remove_feature_node(adev);
return 0;
@@ -1169,9 +1145,9 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct drm_minor *minor = adev->ddev->primary;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
- con->dir = debugfs_create_dir("ras", minor->debugfs_root);
+ con->dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
adev, &amdgpu_ras_debugfs_ctrl_ops);
debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
@@ -1187,6 +1163,13 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
*/
debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
&con->reboot);
+
+ /*
+ * User could set this not to clean up hardware's error count register
+ * of RAS IPs during ras recovery.
+ */
+ debugfs_create_bool("disable_ras_err_cnt_harvest", 0644,
+ con->dir, &con->disable_ras_err_cnt_harvest);
}
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
@@ -1211,6 +1194,7 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
{
+#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
struct ras_fs_if fs_info;
@@ -1233,6 +1217,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_create(adev, &fs_info);
}
}
+#endif
}
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
@@ -1249,6 +1234,7 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
{
+#if defined(CONFIG_DEBUG_FS)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj, *tmp;
@@ -1257,14 +1243,48 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
}
con->dir = NULL;
+#endif
}
/* debugfs end */
/* ras fs */
-
+static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
+ amdgpu_ras_sysfs_badpages_read, NULL, 0);
+static DEVICE_ATTR(features, S_IRUGO,
+ amdgpu_ras_sysfs_features_read, NULL);
static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
{
- amdgpu_ras_sysfs_create_feature_node(adev);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct attribute_group group = {
+ .name = RAS_FS_NAME,
+ };
+ struct attribute *attrs[] = {
+ &con->features_attr.attr,
+ NULL
+ };
+ struct bin_attribute *bin_attrs[] = {
+ NULL,
+ NULL,
+ };
+ int r;
+
+ /* add features entry */
+ con->features_attr = dev_attr_features;
+ group.attrs = attrs;
+ sysfs_attr_init(attrs[0]);
+
+ if (amdgpu_bad_page_threshold != 0) {
+ /* add bad_page_features entry */
+ bin_attr_gpu_vram_bad_pages.private = NULL;
+ con->badpages_attr = bin_attr_gpu_vram_bad_pages;
+ bin_attrs[0] = &con->badpages_attr;
+ group.bin_attrs = bin_attrs;
+ sysfs_bin_attr_init(bin_attrs[0]);
+ }
+
+ r = sysfs_create_group(&adev->dev->kobj, &group);
+ if (r)
+ dev_err(adev->dev, "Failed to create RAS sysfs group!");
return 0;
}
@@ -1456,6 +1476,45 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
}
}
+/* Parse RdRspStatus and WrRspStatus */
+void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
+ struct ras_query_if *info)
+{
+ /*
+ * Only two block need to query read/write
+ * RspStatus at current state
+ */
+ switch (info->head.block) {
+ case AMDGPU_RAS_BLOCK__GFX:
+ if (adev->gfx.funcs->query_ras_error_status)
+ adev->gfx.funcs->query_ras_error_status(adev);
+ break;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ if (adev->mmhub.funcs->query_ras_error_status)
+ adev->mmhub.funcs->query_ras_error_status(adev);
+ break;
+ default:
+ break;
+ }
+}
+
+static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!con)
+ return;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ amdgpu_ras_error_status_query(adev, &info);
+ }
+}
+
/* recovery begin */
/* return 0 on success.
@@ -1512,23 +1571,30 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_device *remote_adev = NULL;
struct amdgpu_device *adev = ras->adev;
struct list_head device_list, *device_list_handle = NULL;
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
-
- /* Build list of devices to query RAS related errors */
- if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
- device_list_handle = &hive->device_list;
- else {
- INIT_LIST_HEAD(&device_list);
- list_add_tail(&adev->gmc.xgmi.head, &device_list);
- device_list_handle = &device_list;
- }
- list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) {
- amdgpu_ras_log_on_err_counter(remote_adev);
+ if (!ras->disable_ras_err_cnt_harvest) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+
+ /* Build list of devices to query RAS related errors */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+ device_list_handle = &hive->device_list;
+ } else {
+ INIT_LIST_HEAD(&device_list);
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
+
+ list_for_each_entry(remote_adev,
+ device_list_handle, gmc.xgmi.head) {
+ amdgpu_ras_query_err_status(remote_adev);
+ amdgpu_ras_log_on_err_counter(remote_adev);
+ }
+
+ amdgpu_put_xgmi_hive(hive);
}
if (amdgpu_device_should_recover_gpu(ras->adev))
- amdgpu_device_gpu_recover(ras->adev, 0);
+ amdgpu_device_gpu_recover(ras->adev, NULL);
atomic_set(&ras->in_recovery, 0);
}
@@ -1643,7 +1709,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
int ret = 0;
/* no bad page record, skip eeprom access */
- if (!control->num_recs)
+ if (!control->num_recs || (amdgpu_bad_page_threshold == 0))
return ret;
bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
@@ -1697,6 +1763,47 @@ out:
return ret;
}
+static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
+ uint32_t max_length)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int tmp_threshold = amdgpu_bad_page_threshold;
+ u64 val;
+
+ /*
+ * Justification of value bad_page_cnt_threshold in ras structure
+ *
+ * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
+ * in eeprom, and introduce two scenarios accordingly.
+ *
+ * Bad page retirement enablement:
+ * - If amdgpu_bad_page_threshold = -1,
+ * bad_page_cnt_threshold = typical value by formula.
+ *
+ * - When the value from user is 0 < amdgpu_bad_page_threshold <
+ * max record length in eeprom, use it directly.
+ *
+ * Bad page retirement disablement:
+ * - If amdgpu_bad_page_threshold = 0, bad page retirement
+ * functionality is disabled, and bad_page_cnt_threshold will
+ * take no effect.
+ */
+
+ if (tmp_threshold < -1)
+ tmp_threshold = -1;
+ else if (tmp_threshold > max_length)
+ tmp_threshold = max_length;
+
+ if (tmp_threshold == -1) {
+ val = adev->gmc.mc_vram_size;
+ do_div(val, RAS_BAD_PAGE_RATE);
+ con->bad_page_cnt_threshold = min(lower_32_bits(val),
+ max_length);
+ } else {
+ con->bad_page_cnt_threshold = tmp_threshold;
+ }
+}
+
/* called in gpu recovery/init */
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
{
@@ -1706,7 +1813,8 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
struct amdgpu_bo *bo = NULL;
int i, ret = 0;
- if (!con || !con->eh_data)
+ /* Not reserve bad page when amdgpu_bad_page_threshold == 0. */
+ if (!con || !con->eh_data || (amdgpu_bad_page_threshold == 0))
return 0;
mutex_lock(&con->recovery_lock);
@@ -1774,6 +1882,8 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data **data;
+ uint32_t max_eeprom_records_len = 0;
+ bool exc_err_limit = false;
int ret;
if (con)
@@ -1792,8 +1902,15 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
atomic_set(&con->in_recovery, 0);
con->adev = adev;
- ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
- if (ret)
+ max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
+ amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
+
+ ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
+ /*
+ * This calling fails when exc_err_limit is true or
+ * ret != 0.
+ */
+ if (exc_err_limit || ret)
goto free;
if (con->eeprom_control.num_recs) {
@@ -1817,6 +1934,15 @@ free:
out:
dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
+ /*
+ * Except error threshold exceeding case, other failure cases in this
+ * function would not fail amdgpu driver init.
+ */
+ if (!exc_err_limit)
+ ret = 0;
+ else
+ ret = -EINVAL;
+
return ret;
}
@@ -1856,6 +1982,17 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev)
+{
+ if (adev->asic_type != CHIP_VEGA10 &&
+ adev->asic_type != CHIP_VEGA20 &&
+ adev->asic_type != CHIP_ARCTURUS &&
+ adev->asic_type != CHIP_SIENNA_CICHLID)
+ return 1;
+ else
+ return 0;
+}
+
/*
* check hardware's ras ability which will be saved in hw_supported.
* if hardware does not support ras, we can skip some ras initializtion and
@@ -1872,8 +2009,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*supported = 0;
if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
- (adev->asic_type != CHIP_VEGA20 &&
- adev->asic_type != CHIP_ARCTURUS))
+ amdgpu_ras_check_asic_type(adev))
return;
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
@@ -1895,6 +2031,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*supported = amdgpu_ras_enable == 0 ?
0 : *hw_supported & amdgpu_ras_mask;
+ adev->ras_features = *supported;
}
int amdgpu_ras_init(struct amdgpu_device *adev)
@@ -1917,9 +2054,9 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_check_supported(adev, &con->hw_supported,
&con->supported);
- if (!con->hw_supported) {
+ if (!con->hw_supported || (adev->asic_type == CHIP_VEGA10)) {
r = 0;
- goto err_out;
+ goto release_con;
}
con->features = 0;
@@ -1930,25 +2067,25 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (adev->nbio.funcs->init_ras_controller_interrupt) {
r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
if (r)
- goto err_out;
+ goto release_con;
}
if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
if (r)
- goto err_out;
+ goto release_con;
}
if (amdgpu_ras_fs_init(adev)) {
r = -EINVAL;
- goto err_out;
+ goto release_con;
}
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
return 0;
-err_out:
+release_con:
amdgpu_ras_set_context(adev, NULL);
kfree(con);
@@ -1976,7 +2113,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
amdgpu_ras_request_reset_on_boot(adev,
ras_block->block);
return 0;
- } else if (adev->in_suspend || adev->in_gpu_reset) {
+ } else if (adev->in_suspend || amdgpu_in_reset(adev)) {
/* in resume phase, if fail to enable ras,
* clean up all ras fs nodes, and disable ras */
goto cleanup;
@@ -1985,7 +2122,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
}
/* in resume phase, no need to create ras fs node */
- if (adev->in_suspend || adev->in_gpu_reset)
+ if (adev->in_suspend || amdgpu_in_reset(adev))
return 0;
if (ih_info->cb) {
@@ -2143,3 +2280,19 @@ bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
return false;
}
+
+bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ bool exc_err_limit = false;
+
+ if (con && (amdgpu_bad_page_threshold != 0))
+ amdgpu_ras_eeprom_check_err_threshold(&con->eeprom_control,
+ &exc_err_limit);
+
+ /*
+ * We are only interested in variable exc_err_limit,
+ * as it says if GPU is in bad state or not.
+ */
+ return exc_err_limit;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index b2667342cf67..6b8d7bb83bb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -31,6 +31,10 @@
#include "ta_ras_if.h"
#include "amdgpu_ras_eeprom.h"
+#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0)
+#define AMDGPU_RAS_FLAG_INIT_NEED_RESET (0x1 << 1)
+#define AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV (0x1 << 2)
+
enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
AMDGPU_RAS_BLOCK__SDMA,
@@ -336,6 +340,12 @@ struct amdgpu_ras {
struct amdgpu_ras_eeprom_control eeprom_control;
bool error_query_ready;
+
+ /* bad page count threshold */
+ uint32_t bad_page_cnt_threshold;
+
+ /* disable ras error count harvest in recovery */
+ bool disable_ras_err_cnt_harvest;
};
struct ras_fs_data {
@@ -490,6 +500,8 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev);
unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
bool is_ce);
+bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev);
+
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages);
@@ -500,10 +512,14 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- /* save bad page to eeprom before gpu reset,
- * i2c may be unstable in gpu reset
+ /*
+ * Save bad page to eeprom before gpu reset, i2c may be unstable
+ * in gpu reset.
+ *
+ * Also, exclude the case when ras recovery issuer is
+ * eeprom page write itself.
*/
- if (in_task())
+ if (!(ras->flags & AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV) && in_task())
amdgpu_ras_reserve_bad_pages(adev);
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index c0096097bbcf..0e64c39a2372 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -46,6 +46,9 @@
#define EEPROM_TABLE_HDR_VAL 0x414d4452
#define EEPROM_TABLE_VER 0x00010000
+/* Bad GPU tag ‘BADG’ */
+#define EEPROM_TABLE_HDR_BAD 0x42414447
+
/* Assume 2 Mbit size */
#define EEPROM_SIZE_BYTES 256000
#define EEPROM_PAGE__SIZE_BYTES 256
@@ -56,6 +59,15 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
+static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
+{
+ if ((adev->asic_type == CHIP_VEGA20) ||
+ (adev->asic_type == CHIP_ARCTURUS))
+ return true;
+
+ return false;
+}
+
static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
uint16_t *i2c_addr)
{
@@ -213,6 +225,24 @@ static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
return true;
}
+static int amdgpu_ras_eeprom_correct_header_tag(
+ struct amdgpu_ras_eeprom_control *control,
+ uint32_t header)
+{
+ unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE];
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ int ret = 0;
+
+ memset(buff, 0, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE);
+
+ mutex_lock(&control->tbl_mutex);
+ hdr->header = header;
+ ret = __update_table_header(control, buff);
+ mutex_unlock(&control->tbl_mutex);
+
+ return ret;
+}
+
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
{
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
@@ -238,12 +268,14 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
}
-int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
+int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
+ bool *exceed_err_limit)
{
int ret = 0;
struct amdgpu_device *adev = to_amdgpu_device(control);
unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
struct i2c_msg msg = {
.addr = 0,
.flags = I2C_M_RD,
@@ -251,6 +283,11 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
.buf = buff,
};
+ *exceed_err_limit = false;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
/* Verify i2c adapter is initialized */
if (!adev->pm.smu_i2c.algo)
return -ENOENT;
@@ -279,6 +316,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
control->num_recs);
+ } else if ((hdr->header == EEPROM_TABLE_HDR_BAD) &&
+ (amdgpu_bad_page_threshold != 0)) {
+ if (ras->bad_page_cnt_threshold > control->num_recs) {
+ dev_info(adev->dev, "Using one valid bigger bad page "
+ "threshold and correcting eeprom header tag.\n");
+ ret = amdgpu_ras_eeprom_correct_header_tag(control,
+ EEPROM_TABLE_HDR_VAL);
+ } else {
+ *exceed_err_limit = true;
+ dev_err(adev->dev, "Exceeding the bad_page_threshold parameter, "
+ "disabling the GPU.\n");
+ }
} else {
DRM_INFO("Creating new EEPROM table");
@@ -375,6 +424,49 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
return curr_address;
}
+int amdgpu_ras_eeprom_check_err_threshold(
+ struct amdgpu_ras_eeprom_control *control,
+ bool *exceed_err_limit)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ unsigned char buff[EEPROM_ADDRESS_SIZE +
+ EEPROM_TABLE_HEADER_SIZE] = { 0 };
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct i2c_msg msg = {
+ .addr = control->i2c_address,
+ .flags = I2C_M_RD,
+ .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
+ .buf = buff,
+ };
+ int ret;
+
+ *exceed_err_limit = false;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ /* read EEPROM table header */
+ mutex_lock(&control->tbl_mutex);
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+ if (ret < 1) {
+ dev_err(adev->dev, "Failed to read EEPROM table header.\n");
+ goto err;
+ }
+
+ __decode_table_header_from_buff(hdr, &buff[2]);
+
+ if (hdr->header == EEPROM_TABLE_HDR_BAD) {
+ dev_warn(adev->dev, "This GPU is in BAD status.");
+ dev_warn(adev->dev, "Please retire it or setting one bigger "
+ "threshold value when reloading driver.\n");
+ *exceed_err_limit = true;
+ }
+
+err:
+ mutex_unlock(&control->tbl_mutex);
+ return 0;
+}
+
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
bool write,
@@ -383,10 +475,12 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
int i, ret = 0;
struct i2c_msg *msgs, *msg;
unsigned char *buffs, *buff;
+ bool sched_ras_recovery = false;
struct eeprom_table_record *record;
struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS)
+ if (!__is_ras_eeprom_supported(adev))
return 0;
buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE,
@@ -402,11 +496,30 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
goto free_buff;
}
+ /*
+ * If saved bad pages number exceeds the bad page threshold for
+ * the whole VRAM, update table header to mark the BAD GPU tag
+ * and schedule one ras recovery after eeprom write is done,
+ * this can avoid the missing for latest records.
+ *
+ * This new header will be picked up and checked in the bootup
+ * by ras recovery, which may break bootup process to notify
+ * user this GPU is in bad state and to retire such GPU for
+ * further check.
+ */
+ if (write && (amdgpu_bad_page_threshold != 0) &&
+ ((control->num_recs + num) >= ras->bad_page_cnt_threshold)) {
+ dev_warn(adev->dev,
+ "Saved bad pages(%d) reaches threshold value(%d).\n",
+ control->num_recs + num, ras->bad_page_cnt_threshold);
+ control->tbl_hdr.header = EEPROM_TABLE_HDR_BAD;
+ sched_ras_recovery = true;
+ }
+
/* In case of overflow just start from beginning to not lose newest records */
if (write && (control->next_addr + EEPROM_TABLE_RECORD_SIZE * num > EEPROM_SIZE_BYTES))
control->next_addr = EEPROM_RECORD_START;
-
/*
* TODO Currently makes EEPROM writes for each record, this creates
* internal fragmentation. Optimized the code to do full page write of
@@ -482,6 +595,20 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
__update_tbl_checksum(control, records, num, old_hdr_byte_sum);
__update_table_header(control, buffs);
+
+ if (sched_ras_recovery) {
+ /*
+ * Before scheduling ras recovery, assert the related
+ * flag first, which shall bypass common bad page
+ * reservation execution in amdgpu_ras_reset_gpu.
+ */
+ amdgpu_ras_get_context(adev)->flags |=
+ AMDGPU_RAS_FLAG_SKIP_BAD_PAGE_RESV;
+
+ dev_warn(adev->dev, "Conduct ras recovery due to bad "
+ "page threshold reached.\n");
+ amdgpu_ras_reset_gpu(adev);
+ }
} else if (!__validate_tbl_checksum(control, records, num)) {
DRM_WARN("EEPROM Table checksum mismatch!");
/* TODO Uncomment when EEPROM read/write is relliable */
@@ -499,6 +626,11 @@ free_buff:
return ret == num ? 0 : -EIO;
}
+inline uint32_t amdgpu_ras_eeprom_get_record_max_length(void)
+{
+ return EEPROM_MAX_RECORD_NUM;
+}
+
/* Used for testing if bugs encountered */
#if 0
void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 9e7d640920fb..c7a5e5c7c61e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -76,14 +76,21 @@ struct eeprom_table_record {
unsigned char mcumc_id;
}__attribute__((__packed__));
-int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
+ bool *exceed_err_limit);
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_check_err_threshold(
+ struct amdgpu_ras_eeprom_control *control,
+ bool *exceed_err_limit);
+
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
bool write,
int num);
+inline uint32_t amdgpu_ras_eeprom_get_record_max_length(void);
+
void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control);
#endif // _AMDGPU_RAS_EEPROM_H
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 13ea8ebc421c..15ee13c3bd9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -267,7 +267,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
&ring->sched;
}
- for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
+ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i)
atomic_set(&ring->num_jobs[i], 0);
return 0;
@@ -420,7 +420,7 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
- struct drm_minor *minor = adev->ddev->primary;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *ent, *root = minor->debugfs_root;
char name[32];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index da871d84b742..7112137689db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -243,7 +243,7 @@ struct amdgpu_ring {
bool has_compute_vm_bug;
bool no_scheduler;
- atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
+ atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 60bb3e8b3118..aeaaae713c59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -168,12 +168,16 @@ struct amdgpu_rlc {
u32 save_restore_list_cntl_size_bytes;
u32 save_restore_list_gpm_size_bytes;
u32 save_restore_list_srm_size_bytes;
+ u32 rlc_iram_ucode_size_bytes;
+ u32 rlc_dram_ucode_size_bytes;
u32 *register_list_format;
u32 *register_restore;
u8 *save_restore_list_cntl;
u8 *save_restore_list_gpm;
u8 *save_restore_list_srm;
+ u8 *rlc_iram_ucode;
+ u8 *rlc_dram_ucode;
bool is_rlc_v2_1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index c799691dfa84..0da0a0d98672 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -32,24 +32,32 @@
#include "amdgpu_vm.h"
-enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+int amdgpu_to_sched_priority(int amdgpu_priority,
+ enum drm_sched_priority *prio)
{
switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
- return DRM_SCHED_PRIORITY_HIGH_HW;
+ *prio = DRM_SCHED_PRIORITY_HIGH;
+ break;
case AMDGPU_CTX_PRIORITY_HIGH:
- return DRM_SCHED_PRIORITY_HIGH_SW;
+ *prio = DRM_SCHED_PRIORITY_HIGH;
+ break;
case AMDGPU_CTX_PRIORITY_NORMAL:
- return DRM_SCHED_PRIORITY_NORMAL;
+ *prio = DRM_SCHED_PRIORITY_NORMAL;
+ break;
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
- return DRM_SCHED_PRIORITY_LOW;
+ *prio = DRM_SCHED_PRIORITY_MIN;
+ break;
case AMDGPU_CTX_PRIORITY_UNSET:
- return DRM_SCHED_PRIORITY_UNSET;
+ *prio = DRM_SCHED_PRIORITY_UNSET;
+ break;
default:
WARN(1, "Invalid context priority %d\n", amdgpu_priority);
- return DRM_SCHED_PRIORITY_INVALID;
+ return -EINVAL;
}
+
+ return 0;
}
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
@@ -115,13 +123,24 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
union drm_amdgpu_sched *args = data;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
enum drm_sched_priority priority;
int r;
- priority = amdgpu_to_sched_priority(args->in.priority);
- if (priority == DRM_SCHED_PRIORITY_INVALID)
+ /* First check the op, then the op's argument.
+ */
+ switch (args->in.op) {
+ case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
+ case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
+ break;
+ default:
+ DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
return -EINVAL;
+ }
+
+ r = amdgpu_to_sched_priority(args->in.priority, &priority);
+ if (r)
+ return r;
switch (args->in.op) {
case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
@@ -136,7 +155,8 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
priority);
break;
default:
- DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
+ /* Impossible.
+ */
r = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
index 12299fd95691..67e5b2472f6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -30,7 +30,8 @@ enum drm_sched_priority;
struct drm_device;
struct drm_file;
-enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+int amdgpu_to_sched_priority(int amdgpu_priority,
+ enum drm_sched_priority *prio);
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 63e734a125fb..ee9480d14cbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -35,7 +35,7 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
-TRACE_EVENT(amdgpu_mm_rreg,
+TRACE_EVENT(amdgpu_device_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
@@ -54,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
(unsigned long)__entry->value)
);
-TRACE_EVENT(amdgpu_mm_wreg,
+TRACE_EVENT(amdgpu_device_wreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
@@ -321,6 +321,49 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
TP_ARGS(mapping)
);
+TRACE_EVENT(amdgpu_vm_update_ptes,
+ TP_PROTO(struct amdgpu_vm_update_params *p,
+ uint64_t start, uint64_t end,
+ unsigned int nptes, uint64_t dst,
+ uint64_t incr, uint64_t flags,
+ pid_t pid, uint64_t vm_ctx),
+ TP_ARGS(p, start, end, nptes, dst, incr, flags, pid, vm_ctx),
+ TP_STRUCT__entry(
+ __field(u64, start)
+ __field(u64, end)
+ __field(u64, flags)
+ __field(unsigned int, nptes)
+ __field(u64, incr)
+ __field(pid_t, pid)
+ __field(u64, vm_ctx)
+ __dynamic_array(u64, dst, nptes)
+ ),
+
+ TP_fast_assign(
+ unsigned int i;
+
+ __entry->start = start;
+ __entry->end = end;
+ __entry->flags = flags;
+ __entry->incr = incr;
+ __entry->nptes = nptes;
+ __entry->pid = pid;
+ __entry->vm_ctx = vm_ctx;
+ for (i = 0; i < nptes; ++i) {
+ u64 addr = p->pages_addr ? amdgpu_vm_map_gart(
+ p->pages_addr, dst) : dst;
+
+ ((u64 *)__get_dynamic_array(dst))[i] = addr;
+ dst += incr;
+ }
+ ),
+ TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx,"
+ " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid,
+ __entry->vm_ctx, __entry->start, __entry->end,
+ __entry->flags, __entry->incr, __print_array(
+ __get_dynamic_array(dst), __entry->nptes, 8))
+);
+
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags, bool direct),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 978bae731398..8039d2399584 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -63,61 +63,16 @@
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
+static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_resource *bo_mem);
-/**
- * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
- * memory request.
- *
- * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
- * @type: The type of memory requested
- * @man: The memory type manager for each domain
- *
- * This is called by ttm_bo_init_mm() when a buffer object is being
- * initialized.
- */
-static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
+static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
+ unsigned int type,
+ uint64_t size)
{
- struct amdgpu_device *adev;
-
- adev = amdgpu_ttm_adev(bdev);
-
- switch (type) {
- case TTM_PL_SYSTEM:
- /* System memory */
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_TT:
- /* GTT memory */
- man->func = &amdgpu_gtt_mgr_func;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- break;
- case TTM_PL_VRAM:
- /* "On-card" video ram */
- man->func = &amdgpu_vram_mgr_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- case AMDGPU_PL_GDS:
- case AMDGPU_PL_GWS:
- case AMDGPU_PL_OA:
- /* On-chip GDS memory*/
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED;
- man->available_caching = TTM_PL_FLAG_UNCACHED;
- man->default_caching = TTM_PL_FLAG_UNCACHED;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
- return -EINVAL;
- }
- return 0;
+ return ttm_range_man_init(&adev->mman.bdev, type,
+ false, size >> PAGE_SHIFT);
}
/**
@@ -136,7 +91,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_MASK_CACHING
};
/* Don't handle scatter gather BOs */
@@ -223,24 +179,6 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
}
/**
- * amdgpu_move_null - Register memory for a buffer object
- *
- * @bo: The bo to assign the memory to
- * @new_mem: The memory to be assigned.
- *
- * Assign the memory from new_mem to the memory of the buffer object bo.
- */
-static void amdgpu_move_null(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem)
-{
- struct ttm_mem_reg *old_mem = &bo->mem;
-
- BUG_ON(old_mem->mm_node != NULL);
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-}
-
-/**
* amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
*
* @bo: The bo to assign the memory to.
@@ -250,7 +188,7 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
*/
static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
struct drm_mm_node *mm_node,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
uint64_t addr = 0;
@@ -270,7 +208,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
* @offset: The offset that drm_mm_node is used for finding.
*
*/
-static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
+static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
uint64_t *offset)
{
struct drm_mm_node *mm_node = mem->mm_node;
@@ -298,7 +236,7 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
* the physical address for local memory.
*/
static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
struct drm_mm_node *mm_node,
unsigned num_pages, uint64_t offset,
unsigned window, struct amdgpu_ring *ring,
@@ -521,9 +459,9 @@ error:
* help move buffers to and from VRAM.
*/
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+ bool evict,
+ struct ttm_resource *new_mem,
+ struct ttm_resource *old_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
@@ -562,9 +500,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
/* Always block for VM page tables before committing the new location */
if (bo->type == ttm_bo_type_kernel)
- r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem);
+ r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
else
- r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
+ r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
dma_fence_put(fence);
return r;
@@ -582,10 +520,10 @@ error:
*/
static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg tmp_mem;
+ struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource tmp_mem;
struct ttm_place placements;
struct ttm_placement placement;
int r;
@@ -599,7 +537,8 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = 0;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = TTM_PL_MASK_CACHING;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit from VRAM\n");
@@ -612,14 +551,18 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
goto out_cleanup;
}
+ r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ if (unlikely(r))
+ goto out_cleanup;
+
/* Bind the memory to the GTT space */
- r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
+ r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
/* blit VRAM to GTT */
- r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -627,7 +570,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
/* move BO (in tmp_mem) to new_mem */
r = ttm_bo_move_ttm(bo, ctx, new_mem);
out_cleanup:
- ttm_bo_mem_put(bo, &tmp_mem);
+ ttm_resource_free(bo, &tmp_mem);
return r;
}
@@ -638,10 +581,10 @@ out_cleanup:
*/
static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg tmp_mem;
+ struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
int r;
@@ -655,7 +598,8 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = 0;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = TTM_PL_MASK_CACHING;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit to VRAM\n");
@@ -669,12 +613,12 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
}
/* copy to VRAM */
- r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
out_cleanup:
- ttm_bo_mem_put(bo, &tmp_mem);
+ ttm_resource_free(bo, &tmp_mem);
return r;
}
@@ -684,7 +628,7 @@ out_cleanup:
* Called by amdgpu_bo_move()
*/
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
struct drm_mm_node *nodes = mem->mm_node;
@@ -694,7 +638,7 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
if (mem->mem_type != TTM_PL_VRAM)
return false;
- /* ttm_mem_reg_ioremap only supports contiguous memory */
+ /* ttm_resource_ioremap only supports contiguous memory */
if (nodes->size != mem->num_pages)
return false;
@@ -709,11 +653,11 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
*/
static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = &bo->mem;
int r;
/* Can't move a pinned BO */
@@ -724,7 +668,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
adev = amdgpu_ttm_adev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
- amdgpu_move_null(bo, new_mem);
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
if ((old_mem->mem_type == TTM_PL_TT &&
@@ -732,7 +676,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
(old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_TT)) {
/* bind is enough */
- amdgpu_move_null(bo, new_mem);
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
if (old_mem->mem_type == AMDGPU_PL_GDS ||
@@ -742,7 +686,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
new_mem->mem_type == AMDGPU_PL_GWS ||
new_mem->mem_type == AMDGPU_PL_OA) {
/* Nothing to save here */
- amdgpu_move_null(bo, new_mem);
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
@@ -758,7 +702,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
new_mem->mem_type == TTM_PL_VRAM) {
r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
} else {
- r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
+ r = amdgpu_move_blit(bo, evict,
new_mem, old_mem);
}
@@ -795,19 +739,12 @@ memcpy:
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/
-static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct drm_mm_node *mm_node = mem->mm_node;
+ size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
@@ -817,18 +754,18 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
- if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
+ if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
return -EINVAL;
/* Only physically contiguous buffers apply. In a contiguous
* buffer, size of the first mm_node would match the number of
- * pages in ttm_mem_reg.
+ * pages in ttm_resource.
*/
if (adev->mman.aper_base_kaddr &&
(mm_node->size == mem->num_pages))
mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
mem->bus.offset;
- mem->bus.base = adev->gmc.aper_base;
+ mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
break;
default:
@@ -840,12 +777,13 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
uint64_t offset = (page_offset << PAGE_SHIFT);
struct drm_mm_node *mm;
mm = amdgpu_find_mm_node(&bo->mem, &offset);
- return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
- (offset >> PAGE_SHIFT);
+ offset += adev->gmc.aper_base;
+ return mm->start + (offset >> PAGE_SHIFT);
}
/**
@@ -879,6 +817,7 @@ struct amdgpu_ttm_tt {
uint64_t userptr;
struct task_struct *usertask;
uint32_t userflags;
+ bool bound;
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
struct hmm_range *range;
#endif
@@ -1046,9 +985,10 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
*
* Called by amdgpu_ttm_backend_bind()
**/
-static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
@@ -1083,9 +1023,10 @@ release_sg:
/**
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/
-static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
@@ -1166,16 +1107,23 @@ gart_bind_fail:
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space.
*/
-static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
+static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_resource *bo_mem)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void*)ttm;
uint64_t flags;
int r = 0;
+ if (!bo_mem)
+ return -EINVAL;
+
+ if (gtt->bound)
+ return 0;
+
if (gtt->userptr) {
- r = amdgpu_ttm_tt_pin_userptr(ttm);
+ r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) {
DRM_ERROR("failed to pin userptr\n");
return r;
@@ -1207,18 +1155,24 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
if (r)
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
+ gtt->bound = true;
return r;
}
/**
- * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
+ * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
+ * through AGP or GART aperture.
+ *
+ * If bo is accessible through AGP aperture, then use AGP aperture
+ * to access bo; otherwise allocate logical space in GART aperture
+ * and map bo to GART aperture.
*/
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
- struct ttm_mem_reg tmp;
+ struct ttm_resource tmp;
struct ttm_placement placement;
struct ttm_place placements;
uint64_t addr, flags;
@@ -1241,8 +1195,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
- placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
- TTM_PL_FLAG_TT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = bo->mem.placement;
r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
if (unlikely(r))
@@ -1255,11 +1209,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
gtt->offset = (u64)tmp.start << PAGE_SHIFT;
r = amdgpu_ttm_gart_bind(adev, bo, flags);
if (unlikely(r)) {
- ttm_bo_mem_put(bo, &tmp);
+ ttm_resource_free(bo, &tmp);
return r;
}
- ttm_bo_mem_put(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->mem);
bo->mem = tmp;
}
@@ -1293,15 +1247,19 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy().
*/
-static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
+static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
+ if (!gtt->bound)
+ return;
+
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr)
- amdgpu_ttm_tt_unpin_userptr(ttm);
+ amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
return;
@@ -1311,12 +1269,16 @@ static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
if (r)
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
gtt->ttm.ttm.num_pages, gtt->offset);
+ gtt->bound = false;
}
-static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
+static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ amdgpu_ttm_backend_unbind(bdev, ttm);
+ ttm_tt_destroy_common(bdev, ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@@ -1324,12 +1286,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
kfree(gtt);
}
-static struct ttm_backend_func amdgpu_backend_func = {
- .bind = &amdgpu_ttm_backend_bind,
- .unbind = &amdgpu_ttm_backend_unbind,
- .destroy = &amdgpu_ttm_backend_destroy,
-};
-
/**
* amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
*
@@ -1346,7 +1302,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
if (gtt == NULL) {
return NULL;
}
- gtt->ttm.ttm.func = &amdgpu_backend_func;
gtt->gobj = &bo->base;
/* allocate space for the uninitialized page entries */
@@ -1363,10 +1318,11 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
* Map the pages of a ttm_tt object to an address space visible
* to the underlying device.
*/
-static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
- struct ttm_operation_ctx *ctx)
+static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
@@ -1376,7 +1332,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
return -ENOMEM;
ttm->page_flags |= TTM_PAGE_FLAG_SG;
- ttm->state = tt_unbound;
+ ttm_tt_set_populated(ttm);
return 0;
}
@@ -1396,7 +1352,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
- ttm->state = tt_unbound;
+ ttm_tt_set_populated(ttm);
return 0;
}
@@ -1417,7 +1373,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
-static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev;
@@ -1441,7 +1397,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return;
- adev = amdgpu_ttm_adev(ttm->bdev);
+ adev = amdgpu_ttm_adev(bdev);
#ifdef CONFIG_SWIOTLB
if (adev->need_swiotlb && swiotlb_nr_tbl()) {
@@ -1458,21 +1414,26 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
* amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
* task
*
- * @ttm: The ttm_tt object to bind this userptr object to
+ * @bo: The ttm_buffer_object to bind this userptr to
* @addr: The address in the current tasks VM space to use
* @flags: Requirements of userptr object.
*
* Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
* to current task
*/
-int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
- uint32_t flags)
+int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
+ uint64_t addr, uint32_t flags)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt;
- if (gtt == NULL)
- return -EINVAL;
+ if (!bo->ttm) {
+ /* TODO: We want a separate TTM object type for userptrs */
+ bo->ttm = amdgpu_ttm_tt_create(bo, 0);
+ if (bo->ttm == NULL)
+ return -ENOMEM;
+ }
+ gtt = (void*)bo->ttm;
gtt->userptr = addr;
gtt->userflags = flags;
@@ -1558,7 +1519,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
*
* Figure out the flags to use for a VM PDE (Page Directory Entry).
*/
-uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
{
uint64_t flags = 0;
@@ -1584,7 +1545,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
* Figure out the flags to use for a VM PTE (Page Table Entry).
*/
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
@@ -1742,7 +1703,9 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
- .init_mem_type = &amdgpu_init_mem_type,
+ .ttm_tt_bind = &amdgpu_ttm_backend_bind,
+ .ttm_tt_unbind = &amdgpu_ttm_backend_unbind,
+ .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
@@ -1768,8 +1731,8 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
*/
static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
{
- amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
- NULL, &adev->fw_vram_usage.va);
+ amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
+ NULL, &adev->mman.fw_vram_usage_va);
}
/**
@@ -1783,19 +1746,19 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{
uint64_t vram_size = adev->gmc.visible_vram_size;
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
+ adev->mman.fw_vram_usage_va = NULL;
+ adev->mman.fw_vram_usage_reserved_bo = NULL;
- if (adev->fw_vram_usage.size == 0 ||
- adev->fw_vram_usage.size > vram_size)
+ if (adev->mman.fw_vram_usage_size == 0 ||
+ adev->mman.fw_vram_usage_size > vram_size)
return 0;
return amdgpu_bo_create_kernel_at(adev,
- adev->fw_vram_usage.start_offset,
- adev->fw_vram_usage.size,
+ adev->mman.fw_vram_usage_start_offset,
+ adev->mman.fw_vram_usage_size,
AMDGPU_GEM_DOMAIN_VRAM,
- &adev->fw_vram_usage.reserved_bo,
- &adev->fw_vram_usage.va);
+ &adev->mman.fw_vram_usage_reserved_bo,
+ &adev->mman.fw_vram_usage_va);
}
/*
@@ -1827,7 +1790,7 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
memset(ctx, 0, sizeof(*ctx));
ctx->c2p_train_data_offset =
- ALIGN((adev->gmc.mc_vram_size - adev->discovery_tmr_size - SZ_1M), SZ_1M);
+ ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
ctx->p2c_train_data_offset =
(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size =
@@ -1866,10 +1829,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
* Otherwise, fallback to legacy approach to check and reserve tmr block for ip
* discovery data and G6 memory training data respectively
*/
- adev->discovery_tmr_size =
+ adev->mman.discovery_tmr_size =
amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
- if (!adev->discovery_tmr_size)
- adev->discovery_tmr_size = DISCOVERY_TMR_OFFSET;
+ if (!adev->mman.discovery_tmr_size)
+ adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
if (mem_train_support) {
/* reserve vram for mem train according to TMR location */
@@ -1889,14 +1852,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
}
ret = amdgpu_bo_create_kernel_at(adev,
- adev->gmc.real_vram_size - adev->discovery_tmr_size,
- adev->discovery_tmr_size,
+ adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
+ adev->mman.discovery_tmr_size,
AMDGPU_GEM_DOMAIN_VRAM,
- &adev->discovery_memory,
+ &adev->mman.discovery_memory,
NULL);
if (ret) {
DRM_ERROR("alloc tmr failed(%d)!\n", ret);
- amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
return ret;
}
@@ -1917,15 +1880,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
uint64_t gtt_size;
int r;
u64 vis_vram_limit;
- void *stolen_vga_buf;
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
&amdgpu_bo_driver,
- adev->ddev->anon_inode->i_mapping,
- adev->ddev->vma_offset_manager,
+ adev_to_drm(adev)->anon_inode->i_mapping,
+ adev_to_drm(adev)->vma_offset_manager,
dma_addressing_limited(adev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -1937,8 +1899,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
adev->mman.bdev.no_retry = true;
/* Initialize VRAM pool with all of VRAM divided into pages */
- r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
- adev->gmc.real_vram_size >> PAGE_SHIFT);
+ r = amdgpu_vram_mgr_init(adev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
@@ -1971,7 +1932,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
* If IP discovery enabled, a block of memory should be
* reserved for IP discovey.
*/
- if (adev->discovery_bin) {
+ if (adev->mman.discovery_bin) {
r = amdgpu_ttm_reserve_tmr(adev);
if (r)
return r;
@@ -1981,10 +1942,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
* This is used for VGA emulation and pre-OS scanout buffers to
* avoid display artifacts while transitioning between pre-OS
* and driver. */
- r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->stolen_vga_memory,
- NULL, &stolen_vga_buf);
+ r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mman.stolen_vga_memory,
+ NULL);
+ if (r)
+ return r;
+ r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
+ adev->mman.stolen_extended_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mman.stolen_extended_memory,
+ NULL);
if (r)
return r;
@@ -2005,7 +1973,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
gtt_size = (uint64_t)amdgpu_gtt_size << 20;
/* Initialize GTT memory pool */
- r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
+ r = amdgpu_gtt_mgr_init(adev, gtt_size);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
@@ -2014,22 +1982,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
(unsigned)(gtt_size / (1024 * 1024)));
/* Initialize various on-chip memory pools */
- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
- adev->gds.gds_size);
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
DRM_ERROR("Failed initializing GDS heap.\n");
return r;
}
- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
- adev->gds.gws_size);
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
if (r) {
DRM_ERROR("Failed initializing gws heap.\n");
return r;
}
- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
- adev->gds.oa_size);
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
if (r) {
DRM_ERROR("Failed initializing oa heap.\n");
return r;
@@ -2043,9 +2008,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*/
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
{
- void *stolen_vga_buf;
/* return the VGA stolen memory (if any) back to VRAM */
- amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
+ if (!adev->mman.keep_stolen_vga_memory)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
}
/**
@@ -2057,19 +2023,22 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return;
amdgpu_ttm_training_reserve_vram_fini(adev);
+ /* return the stolen vga memory back to VRAM */
+ if (adev->mman.keep_stolen_vga_memory)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
/* return the IP Discovery TMR memory back to VRAM */
- amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
if (adev->mman.aper_base_kaddr)
iounmap(adev->mman.aper_base_kaddr);
adev->mman.aper_base_kaddr = NULL;
- ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
- ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
+ amdgpu_vram_mgr_fini(adev);
+ amdgpu_gtt_mgr_fini(adev);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
@@ -2086,11 +2055,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
*/
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
{
- struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
uint64_t size;
int r;
- if (!adev->mman.initialized || adev->in_gpu_reset ||
+ if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
adev->mman.buffer_funcs_enabled == enable)
return;
@@ -2101,7 +2070,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
ring = adev->mman.buffer_funcs_ring;
sched = &ring->sched;
r = drm_sched_entity_init(&adev->mman.entity,
- DRM_SCHED_PRIORITY_KERNEL, &sched,
+ DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
@@ -2126,7 +2095,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
- struct amdgpu_device *adev = file_priv->minor->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
if (adev == NULL)
return -EINVAL;
@@ -2307,8 +2276,8 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
unsigned ttm_pl = (uintptr_t)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
struct drm_printer p = drm_seq_file_printer(m);
man->func->debug(man, &p);
@@ -2598,7 +2567,7 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
unsigned count;
- struct drm_minor *minor = adev->ddev->primary;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *ent, *root = minor->debugfs_root;
for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 17c8d0d7bcc3..a87951b2f06d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -32,15 +32,26 @@
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
-#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0)
-#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
-#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
-
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
#define AMDGPU_POISON 0xd0bed0be
+struct amdgpu_vram_mgr {
+ struct ttm_resource_manager manager;
+ struct drm_mm mm;
+ spinlock_t lock;
+ atomic64_t usage;
+ atomic64_t vis_usage;
+};
+
+struct amdgpu_gtt_mgr {
+ struct ttm_resource_manager manager;
+ struct drm_mm mm;
+ spinlock_t lock;
+ atomic64_t available;
+};
+
struct amdgpu_mman {
struct ttm_bo_device bdev;
bool mem_global_referenced;
@@ -59,24 +70,46 @@ struct amdgpu_mman {
struct mutex gtt_window_lock;
/* Scheduler entity for buffer moves */
struct drm_sched_entity entity;
+
+ struct amdgpu_vram_mgr vram_mgr;
+ struct amdgpu_gtt_mgr gtt_mgr;
+
+ uint64_t stolen_vga_size;
+ struct amdgpu_bo *stolen_vga_memory;
+ uint64_t stolen_extended_size;
+ struct amdgpu_bo *stolen_extended_memory;
+ bool keep_stolen_vga_memory;
+
+ /* discovery */
+ uint8_t *discovery_bin;
+ uint32_t discovery_tmr_size;
+ struct amdgpu_bo *discovery_memory;
+
+ /* firmware VRAM reservation */
+ u64 fw_vram_usage_start_offset;
+ u64 fw_vram_usage_size;
+ struct amdgpu_bo *fw_vram_usage_reserved_bo;
+ void *fw_vram_usage_va;
};
struct amdgpu_copy_mem {
struct ttm_buffer_object *bo;
- struct ttm_mem_reg *mem;
+ struct ttm_resource *mem;
unsigned long offset;
};
-extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
-extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
+int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size);
+void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev);
+int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
+void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
-bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
-uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
-int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
+uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man);
+int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt);
@@ -84,8 +117,8 @@ void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
struct device *dev,
enum dma_data_direction dir,
struct sg_table *sgt);
-uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
-uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
+uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man);
+uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_late_init(struct amdgpu_device *adev);
@@ -130,8 +163,8 @@ static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
#endif
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
-int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
- uint32_t flags);
+int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
+ uint64_t addr, uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
@@ -140,9 +173,9 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
-uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
+uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
- struct ttm_mem_reg *mem);
+ struct ttm_resource *mem);
int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 183743c5fb7b..b313ce4c3e97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -408,7 +408,7 @@ static ssize_t show_##name(struct device *dev, \
char *buf) \
{ \
struct drm_device *ddev = dev_get_drvdata(dev); \
- struct amdgpu_device *adev = ddev->dev_private; \
+ struct amdgpu_device *adev = drm_to_adev(ddev); \
\
return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \
} \
@@ -500,6 +500,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM &&
+ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_IRAM &&
+ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_DRAM &&
ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM &&
ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV &&
ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) {
@@ -556,6 +558,14 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
ucode->ucode_size);
+ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_IRAM) {
+ ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
+ memcpy(ucode->kaddr, adev->gfx.rlc.rlc_iram_ucode,
+ ucode->ucode_size);
+ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_DRAM) {
+ ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
+ memcpy(ucode->kaddr, adev->gfx.rlc.rlc_dram_ucode,
+ ucode->ucode_size);
} else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES) {
ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data +
@@ -628,7 +638,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
struct amdgpu_firmware_info *ucode = NULL;
/* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
- if (!amdgpu_sriov_vf(adev) && (adev->in_gpu_reset || adev->in_suspend))
+ if (!amdgpu_sriov_vf(adev) && (amdgpu_in_reset(adev) || adev->in_suspend))
return 0;
/*
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 12a8bc8fca0b..0e43b46d3ab5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -131,6 +131,7 @@ enum ta_fw_type {
TA_FW_TYPE_PSP_RAS,
TA_FW_TYPE_PSP_HDCP,
TA_FW_TYPE_PSP_DTM,
+ TA_FW_TYPE_PSP_RAP,
};
struct ta_fw_bin_desc {
@@ -221,6 +222,15 @@ struct rlc_firmware_header_v2_1 {
uint32_t save_restore_list_srm_offset_bytes;
};
+/* version_major=2, version_minor=1 */
+struct rlc_firmware_header_v2_2 {
+ struct rlc_firmware_header_v2_1 v2_1;
+ uint32_t rlc_iram_ucode_size_bytes;
+ uint32_t rlc_iram_ucode_offset_bytes;
+ uint32_t rlc_dram_ucode_size_bytes;
+ uint32_t rlc_dram_ucode_offset_bytes;
+};
+
/* version_major=1, version_minor=0 */
struct sdma_firmware_header_v1_0 {
struct common_firmware_header header;
@@ -338,6 +348,8 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
+ AMDGPU_UCODE_ID_RLC_IRAM,
+ AMDGPU_UCODE_ID_RLC_DRAM,
AMDGPU_UCODE_ID_RLC_G,
AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_SMC,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index af1b1ccf613c..262baf0f61ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -125,8 +125,9 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
"detected in UMC block\n",
err_data->ue_count);
- if (err_data->err_addr_cnt &&
- amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+ if ((amdgpu_bad_page_threshold != 0) &&
+ err_data->err_addr_cnt &&
+ amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt))
dev_warn(adev->dev, "Failed to add ras bad page!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index a615a1eb750b..183814493658 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -21,6 +21,20 @@
#ifndef __AMDGPU_UMC_H__
#define __AMDGPU_UMC_H__
+/*
+ * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
+/* channel index is the index of 256B block */
+#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
+/* offset in 256B block */
+#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
+
+#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++)
+#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
+#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
+
struct amdgpu_umc_funcs {
void (*err_cnt_init)(struct amdgpu_device *adev);
int (*ras_late_init)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 495c3d7bb2b2..a563328e3dae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -39,6 +39,7 @@
#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
+#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
@@ -50,6 +51,7 @@ MODULE_FIRMWARE(FIRMWARE_PICASSO);
MODULE_FIRMWARE(FIRMWARE_RAVEN2);
MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
MODULE_FIRMWARE(FIRMWARE_RENOIR);
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
MODULE_FIRMWARE(FIRMWARE_NAVI10);
MODULE_FIRMWARE(FIRMWARE_NAVI14);
MODULE_FIRMWARE(FIRMWARE_NAVI12);
@@ -68,6 +70,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
mutex_init(&adev->vcn.vcn_pg_lock);
+ mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
atomic_set(&adev->vcn.total_submission_cnt, 0);
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
@@ -88,7 +91,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
adev->vcn.indirect_sram = true;
break;
case CHIP_RENOIR:
- fw_name = FIRMWARE_RENOIR;
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ fw_name = FIRMWARE_RENOIR;
+ else
+ fw_name = FIRMWARE_GREEN_SARDINE;
+
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
@@ -237,6 +244,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
}
release_firmware(adev->vcn.fw);
+ mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 7a9b804bc988..17691158f783 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -220,6 +220,7 @@ struct amdgpu_vcn {
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
struct mutex vcn_pg_lock;
+ struct mutex vcn1_jpeg1_workaround;
atomic_t total_submission_cnt;
unsigned harvest_config;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 1203c20491e6..d0aea5e39531 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -31,6 +31,12 @@
#include "soc15.h"
#include "nv.h"
+#define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
+ do { \
+ vf2pf_info->ucode_info[ucode].id = ucode; \
+ vf2pf_info->ucode_info[ucode].version = ver; \
+ } while (0)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{
/* By now all MMIO pages except mailbox are blocked */
@@ -45,7 +51,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
if (adev->mode_info.num_crtc == 0)
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
- adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
+ adev_to_drm(adev)->driver->driver_features &= ~DRIVER_ATOMIC;
adev->cg_flags = 0;
adev->pg_flags = 0;
}
@@ -93,7 +99,7 @@ failed_undo:
amdgpu_ring_undo(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq:
- pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
+ dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
}
/**
@@ -239,10 +245,10 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
}
-int amdgpu_virt_fw_reserve_get_checksum(void *obj,
- unsigned long obj_size,
- unsigned int key,
- unsigned int chksum)
+unsigned int amd_sriov_msg_checksum(void *obj,
+ unsigned long obj_size,
+ unsigned int key,
+ unsigned int checksum)
{
unsigned int ret = key;
unsigned long i = 0;
@@ -252,9 +258,9 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj,
/* calculate checksum */
for (i = 0; i < obj_size; ++i)
ret += *(pos + i);
- /* minus the chksum itself */
- pos = (char *)&chksum;
- for (i = 0; i < sizeof(chksum); ++i)
+ /* minus the checksum itself */
+ pos = (char *)&checksum;
+ for (i = 0; i < sizeof(checksum); ++i)
ret -= *(pos + i);
return ret;
}
@@ -401,7 +407,7 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
if (bp_block_size) {
bp_cnt = bp_block_size / sizeof(uint64_t);
for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
- retired_page = *(uint64_t *)(adev->fw_vram_usage.va +
+ retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
bp_block_offset + bp_idx * sizeof(uint64_t));
bp.retired_page = retired_page;
@@ -415,33 +421,188 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
}
}
-void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
{
- uint32_t pf2vf_size = 0;
- uint32_t checksum = 0;
+ struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
+ uint32_t checksum;
uint32_t checkval;
- char *str;
+
+ if (adev->virt.fw_reserve.p_pf2vf == NULL)
+ return -EINVAL;
+
+ if (pf2vf_info->size > 1024) {
+ DRM_ERROR("invalid pf2vf message size\n");
+ return -EINVAL;
+ }
+
+ switch (pf2vf_info->version) {
+ case 1:
+ checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
+ checkval = amd_sriov_msg_checksum(
+ adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+ adev->virt.fw_reserve.checksum_key, checksum);
+ if (checksum != checkval) {
+ DRM_ERROR("invalid pf2vf message\n");
+ return -EINVAL;
+ }
+
+ adev->virt.gim_feature =
+ ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
+ break;
+ case 2:
+ /* TODO: missing key, need to add it later */
+ checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
+ checkval = amd_sriov_msg_checksum(
+ adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+ 0, checksum);
+ if (checksum != checkval) {
+ DRM_ERROR("invalid pf2vf message\n");
+ return -EINVAL;
+ }
+
+ adev->virt.vf2pf_update_interval_ms =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
+ adev->virt.gim_feature =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
+
+ break;
+ default:
+ DRM_ERROR("invalid pf2vf version\n");
+ return -EINVAL;
+ }
+
+ /* correct too large or too little interval value */
+ if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
+ adev->virt.vf2pf_update_interval_ms = 2000;
+
+ return 0;
+}
+
+static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_vf2pf_info *vf2pf_info;
+ vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
+
+ if (adev->virt.fw_reserve.p_vf2pf == NULL)
+ return;
+
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ta_ras_ucode_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.ta_xgmi_ucode_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version);
+}
+
+static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_vf2pf_info *vf2pf_info;
+ struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+
+ vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
+
+ if (adev->virt.fw_reserve.p_vf2pf == NULL)
+ return -EINVAL;
+
+ memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
+
+ vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
+ vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
+
+#ifdef MODULE
+ if (THIS_MODULE->version != NULL)
+ strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
+ else
+#endif
+ strcpy(vf2pf_info->driver_version, "N/A");
+
+ vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
+ vf2pf_info->driver_cert = 0;
+ vf2pf_info->os_info.all = 0;
+
+ vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20;
+ vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20;
+ vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
+ vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
+
+ amdgpu_virt_populate_vf2pf_ucode_info(adev);
+
+ /* TODO: read dynamic info */
+ vf2pf_info->gfx_usage = 0;
+ vf2pf_info->compute_usage = 0;
+ vf2pf_info->encode_usage = 0;
+ vf2pf_info->decode_usage = 0;
+
+ vf2pf_info->checksum =
+ amd_sriov_msg_checksum(
+ vf2pf_info, vf2pf_info->header.size, 0, 0);
+
+ return 0;
+}
+
+void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ amdgpu_virt_write_vf2pf_data(adev);
+
+ schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
+}
+
+void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
+{
+ if (adev->virt.vf2pf_update_interval_ms != 0) {
+ DRM_INFO("clean up the vf2pf work item\n");
+ flush_delayed_work(&adev->virt.vf2pf_work);
+ cancel_delayed_work_sync(&adev->virt.vf2pf_work);
+ }
+}
+
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+{
uint64_t bp_block_offset = 0;
uint32_t bp_block_size = 0;
- struct amdgim_pf2vf_info_v2 *pf2vf_v2 = NULL;
+ struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
+ adev->virt.vf2pf_update_interval_ms = 0;
+
+ if (adev->mman.fw_vram_usage_va != NULL) {
+ adev->virt.vf2pf_update_interval_ms = 2000;
- if (adev->fw_vram_usage.va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)(
- adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
- AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
- AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
- AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
-
- /* pf2vf message must be in 4K */
- if (pf2vf_size > 0 && pf2vf_size < 4096) {
- if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
- pf2vf_v2 = (struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf;
- bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_L & 0xFFFFFFFF) |
- ((((uint64_t)pf2vf_v2->bp_block_offset_H) << 32) & 0xFFFFFFFF00000000);
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ amdgpu_virt_write_vf2pf_data(adev);
+
+ /* bad page handling for version 2 */
+ if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
+ pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
+
+ bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
+ ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
bp_block_size = pf2vf_v2->bp_block_size;
if (bp_block_size && !adev->virt.ras_init_done)
@@ -450,37 +611,11 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
if (adev->virt.ras_init_done)
amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
}
+ }
- checkval = amdgpu_virt_fw_reserve_get_checksum(
- adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
- adev->virt.fw_reserve.checksum_key, checksum);
- if (checkval == checksum) {
- adev->virt.fw_reserve.p_vf2pf =
- ((void *)adev->virt.fw_reserve.p_pf2vf +
- pf2vf_size);
- memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
- sizeof(amdgim_vf2pf_info));
- AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
- AMDGPU_FW_VRAM_VF2PF_VER);
- AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
- sizeof(amdgim_vf2pf_info));
- AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
- &str);
-#ifdef MODULE
- if (THIS_MODULE->version != NULL)
- strcpy(str, THIS_MODULE->version);
- else
-#endif
- strcpy(str, "N/A");
- AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
- 0);
- AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
- amdgpu_virt_fw_reserve_get_checksum(
- adev->virt.fw_reserve.p_vf2pf,
- pf2vf_size,
- adev->virt.fw_reserve.checksum_key, 0));
- }
- }
+ if (adev->virt.vf2pf_update_interval_ms != 0) {
+ INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
+ schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index f826945989c7..8dd624c20f89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -24,6 +24,8 @@
#ifndef AMDGPU_VIRT_H
#define AMDGPU_VIRT_H
+#include "amdgv_sriovmsg.h"
+
#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */
#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
@@ -79,7 +81,10 @@ struct amdgpu_virt_fw_reserve {
struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
unsigned int checksum_key;
};
+
/*
+ * Legacy GIM header
+ *
* Defination between PF and VF
* Structures forcibly aligned to 4 to keep the same style as PF.
*/
@@ -101,15 +106,7 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
};
-struct amd_sriov_msg_pf2vf_info_header {
- /* the total structure size in byte. */
- uint32_t size;
- /* version of this structure, written by the GIM */
- uint32_t version;
- /* reserved */
- uint32_t reserved[2];
-} __aligned(4);
-struct amdgim_pf2vf_info_v1 {
+struct amdgim_pf2vf_info_v1 {
/* header contains size and version */
struct amd_sriov_msg_pf2vf_info_header header;
/* max_width * max_height */
@@ -128,54 +125,6 @@ struct amdgim_pf2vf_info_v1 {
unsigned int checksum;
} __aligned(4);
-struct amdgim_pf2vf_info_v2 {
- /* header contains size and version */
- struct amd_sriov_msg_pf2vf_info_header header;
- /* use private key from mailbox 2 to create chueksum */
- uint32_t checksum;
- /* The features flags of the GIM driver supports. */
- uint32_t feature_flags;
- /* max_width * max_height */
- uint32_t uvd_enc_max_pixels_count;
- /* 16x16 pixels/sec, codec independent */
- uint32_t uvd_enc_max_bandwidth;
- /* max_width * max_height */
- uint32_t vce_enc_max_pixels_count;
- /* 16x16 pixels/sec, codec independent */
- uint32_t vce_enc_max_bandwidth;
- /* Bad pages block position in BYTE */
- uint32_t bp_block_offset_L;
- uint32_t bp_block_offset_H;
- /* Bad pages block size in BYTE */
- uint32_t bp_block_size;
- /* MEC FW position in kb from the start of VF visible frame buffer */
- uint32_t mecfw_kboffset_L;
- uint32_t mecfw_kboffset_H;
- /* MEC FW size in KB */
- uint32_t mecfw_ksize;
- /* UVD FW position in kb from the start of VF visible frame buffer */
- uint32_t uvdfw_kboffset_L;
- uint32_t uvdfw_kboffset_H;
- /* UVD FW size in KB */
- uint32_t uvdfw_ksize;
- /* VCE FW position in kb from the start of VF visible frame buffer */
- uint32_t vcefw_kboffset_L;
- uint32_t vcefw_kboffset_H;
- /* VCE FW size in KB */
- uint32_t vcefw_ksize;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (18 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 0)];
-} __aligned(4);
-
-
-struct amd_sriov_msg_vf2pf_info_header {
- /* the total structure size in byte. */
- uint32_t size;
- /*version of this structure, written by the guest */
- uint32_t version;
- /* reserved */
- uint32_t reserved[2];
-} __aligned(4);
-
struct amdgim_vf2pf_info_v1 {
/* header contains size and version */
struct amd_sriov_msg_vf2pf_info_header header;
@@ -237,31 +186,6 @@ struct amdgim_vf2pf_info_v2 {
uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
} __aligned(4);
-#define AMDGPU_FW_VRAM_VF2PF_VER 2
-typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
-
-#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
- do { \
- ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
- } while (0)
-
-#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
- do { \
- (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
- } while (0)
-
-#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
- do { \
- if (!adev->virt.fw_reserve.p_pf2vf) \
- *(val) = 0; \
- else { \
- if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
- *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
- if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
- *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
- } \
- } while (0)
-
struct amdgpu_virt_ras_err_handler_data {
/* point to bad page records array */
struct eeprom_table_record *bps;
@@ -285,7 +209,7 @@ struct amdgpu_virt {
struct work_struct flr_work;
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
- struct amdgpu_vf_error_buffer vf_errors;
+ struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
uint32_t reg_access_mode;
@@ -293,6 +217,10 @@ struct amdgpu_virt {
bool tdr_debug;
struct amdgpu_virt_ras_err_handler_data *virt_eh_data;
bool ras_init_done;
+
+ /* vf2pf message */
+ struct delayed_work vf2pf_work;
+ uint32_t vf2pf_update_interval_ms;
};
#define amdgpu_sriov_enabled(adev) \
@@ -325,9 +253,9 @@ static inline bool is_virtual_machine(void)
#define amdgpu_sriov_is_pp_one_vf(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
#define amdgpu_sriov_is_debug(adev) \
- ((!adev->in_gpu_reset) && adev->virt.tdr_debug)
+ ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
#define amdgpu_sriov_is_normal(adev) \
- ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug))
+ ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
@@ -341,11 +269,9 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
-int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
- unsigned int key,
- unsigned int chksum);
void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
void amdgpu_detect_virtualization(struct amdgpu_device *adev);
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 71e005cf2952..df110afa97bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -28,6 +28,7 @@
#include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
#include <linux/idr.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -35,6 +36,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_dma_buf.h"
/**
* DOC: GPUVM
@@ -1500,6 +1502,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
pt = cursor.entry->base.bo;
shift = parent_shift;
+ frag_end = max(frag_end, ALIGN(frag_start + 1,
+ 1ULL << shift));
}
/* Looks good so far, calculate parameters for the update */
@@ -1511,19 +1515,26 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
entry_end = min(entry_end, end);
do {
+ struct amdgpu_vm *vm = params->vm;
uint64_t upd_end = min(entry_end, frag_end);
unsigned nptes = (upd_end - frag_start) >> shift;
+ uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
/* This can happen when we set higher level PDs to
* silent to stop fault floods.
*/
nptes = max(nptes, 1u);
+
+ trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
+ nptes, dst, incr, upd_flags,
+ vm->task_info.pid,
+ vm->immediate.fence_context);
amdgpu_vm_update_flags(params, pt, cursor.level,
pe_start, dst, nptes, incr,
- flags | AMDGPU_PTE_FRAG(frag));
+ upd_flags);
pe_start += nptes * 8;
- dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
+ dst += nptes * incr;
frag_start = upd_end;
if (frag_start >= frag_end) {
@@ -1691,13 +1702,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t max_entries;
uint64_t addr, last;
+ max_entries = mapping->last - start + 1;
if (nodes) {
addr = nodes->start << PAGE_SHIFT;
- max_entries = (nodes->size - pfn) *
- AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ max_entries = min((nodes->size - pfn) *
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE, max_entries);
} else {
addr = 0;
- max_entries = S64_MAX;
}
if (pages_addr) {
@@ -1727,7 +1738,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
addr += pfn << PAGE_SHIFT;
}
- last = min((uint64_t)mapping->last, start + max_entries - 1);
+ last = start + max_entries - 1;
r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
start, last, flags, addr,
dma_addr, fence);
@@ -1765,7 +1776,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
- struct ttm_mem_reg *mem;
+ struct ttm_resource *mem;
struct drm_mm_node *nodes;
struct dma_fence **last_update;
struct dma_resv *resv;
@@ -1778,15 +1789,24 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
nodes = NULL;
resv = vm->root.base.bo->tbo.base.resv;
} else {
+ struct drm_gem_object *obj = &bo->tbo.base;
struct ttm_dma_tt *ttm;
+ resv = bo->tbo.base.resv;
+ if (obj->import_attach && bo_va->is_xgmi) {
+ struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+
+ if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
+ bo = gem_to_amdgpu_bo(gobj);
+ }
mem = &bo->tbo.mem;
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- resv = bo->tbo.base.resv;
}
if (bo) {
@@ -2132,8 +2152,10 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
- if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
- (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
+ if (!bo)
+ return bo_va;
+
+ if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
bo_va->is_xgmi = true;
/* Power up XGMI if it can be potentially used */
amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
@@ -2785,7 +2807,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
* 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int vm_context, unsigned int pasid)
+ int vm_context, u32 pasid)
{
struct amdgpu_bo_param bp;
struct amdgpu_bo *root;
@@ -2956,7 +2978,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
* 0 for success, -errno for errors.
*/
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned int pasid)
+ u32 pasid)
{
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
int r;
@@ -3209,7 +3231,7 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
union drm_amdgpu_vm *args = data;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
long timeout = msecs_to_jiffies(2000);
int r;
@@ -3254,7 +3276,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
* @pasid: PASID identifier for VM
* @task_info: task_info to fill.
*/
-void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
struct amdgpu_task_info *task_info)
{
struct amdgpu_vm *vm;
@@ -3298,7 +3320,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* Try to gracefully handle a VM fault. Return true if the fault was handled and
* shouldn't be reported any more.
*/
-bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
uint64_t addr)
{
struct amdgpu_bo *root;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 770025a5e500..58c83a7ad0fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -98,7 +98,7 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
#define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
-/* How to programm VM fault handling */
+/* How to program VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
@@ -112,8 +112,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB_0 1
#define AMDGPU_MMHUB_1 2
-/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
+/* Reserve 2MB at top/bottom of address space for kernel use */
+#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
@@ -372,8 +372,8 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int vm_context, unsigned int pasid);
-int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
+ int vm_context, u32 pasid);
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
@@ -430,9 +430,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
-void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
struct amdgpu_task_info *task_info);
-bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
uint64_t addr);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 39c704a1fb0e..0786e7555554 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -59,7 +59,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update
- * @pe: kmap addr of the page entry
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 189d46ea603b..db790574dc2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -155,7 +155,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update
- * @pe: addr of the page entry
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
@@ -187,7 +187,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update
- * @pe: addr of the page entry
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 0739e259bf91..0c6b7c5ecfec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -28,12 +28,15 @@
#include "amdgpu_atomfirmware.h"
#include "atom.h"
-struct amdgpu_vram_mgr {
- struct drm_mm mm;
- spinlock_t lock;
- atomic64_t usage;
- atomic64_t vis_usage;
-};
+static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct amdgpu_vram_mgr, manager);
+}
+
+static inline struct amdgpu_device *to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
+{
+ return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
+}
/**
* DOC: mem_info_vram_total
@@ -47,7 +50,7 @@ static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
}
@@ -64,7 +67,7 @@ static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
}
@@ -81,10 +84,11 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
+ amdgpu_vram_mgr_usage(man));
}
/**
@@ -99,10 +103,11 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
+ amdgpu_vram_mgr_vis_usage(man));
}
static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
@@ -110,7 +115,7 @@ static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
switch (adev->gmc.vram_vendor) {
case SAMSUNG:
@@ -158,63 +163,72 @@ static const struct attribute *amdgpu_vram_mgr_attributes[] = {
NULL
};
+static const struct ttm_resource_manager_func amdgpu_vram_mgr_func;
+
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
- * @man: TTM memory type manager
- * @p_size: maximum size of VRAM
+ * @adev: amdgpu_device pointer
*
* Allocate and initialize the VRAM manager.
*/
-static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
+int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_vram_mgr *mgr;
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
int ret;
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return -ENOMEM;
+ ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
+
+ man->func = &amdgpu_vram_mgr_func;
- drm_mm_init(&mgr->mm, 0, p_size);
+ drm_mm_init(&mgr->mm, 0, man->size);
spin_lock_init(&mgr->lock);
- man->priv = mgr;
/* Add the two VRAM-related sysfs files */
ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
if (ret)
DRM_ERROR("Failed to register sysfs\n");
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
+ ttm_resource_manager_set_used(man, true);
return 0;
}
/**
* amdgpu_vram_mgr_fini - free and destroy VRAM manager
*
- * @man: TTM memory type manager
+ * @adev: amdgpu_device pointer
*
* Destroy and free the VRAM manager, returns -EBUSY if ranges are still
* allocated inside it.
*/
-static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
+void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
+ int ret;
+
+ ttm_resource_manager_set_used(man, false);
+
+ ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
+ if (ret)
+ return;
spin_lock(&mgr->lock);
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
- kfree(mgr);
- man->priv = NULL;
+
sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
- return 0;
+
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
}
/**
* amdgpu_vram_mgr_vis_size - Calculate visible node size
*
- * @adev: amdgpu device structure
+ * @adev: amdgpu_device pointer
* @node: MM node structure
*
* Calculate how many bytes of the MM node are inside visible VRAM
@@ -243,7 +257,7 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_mem_reg *mem = &bo->tbo.mem;
+ struct ttm_resource *mem = &bo->tbo.mem;
struct drm_mm_node *nodes = mem->mm_node;
unsigned pages = mem->num_pages;
u64 usage;
@@ -263,13 +277,13 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
/**
* amdgpu_vram_mgr_virt_start - update virtual start address
*
- * @mem: ttm_mem_reg to update
+ * @mem: ttm_resource to update
* @node: just allocated node
*
* Calculate a virtual BO start address to easily check if everything is CPU
* accessible.
*/
-static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem,
+static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
struct drm_mm_node *node)
{
unsigned long start;
@@ -292,13 +306,13 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_mem_reg *mem,
*
* Allocate VRAM for the given BO.
*/
-static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode;
@@ -410,11 +424,11 @@ error:
*
* Free the allocated VRAM again.
*/
-static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
+ struct ttm_resource *mem)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct drm_mm_node *nodes = mem->mm_node;
uint64_t usage = 0, vis_usage = 0;
unsigned pages = mem->num_pages;
@@ -451,7 +465,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
* Allocate and fill a sg table from a VRAM allocation.
*/
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt)
@@ -544,9 +558,9 @@ void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
*
* Returns how many bytes are used in this domain.
*/
-uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
+uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man)
{
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
return atomic64_read(&mgr->usage);
}
@@ -558,9 +572,9 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
*
* Returns how many bytes are used in the visible part of VRAM
*/
-uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
+uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man)
{
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
return atomic64_read(&mgr->vis_usage);
}
@@ -573,10 +587,10 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
*
* Dump the table content using printk.
*/
-static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
+static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
spin_lock(&mgr->lock);
drm_mm_print(&mgr->mm, printer);
@@ -587,10 +601,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
amdgpu_vram_mgr_vis_usage(man) >> 20);
}
-const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
- .init = amdgpu_vram_mgr_init,
- .takedown = amdgpu_vram_mgr_fini,
- .get_node = amdgpu_vram_mgr_new,
- .put_node = amdgpu_vram_mgr_del,
- .debug = amdgpu_vram_mgr_debug
+static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
+ .alloc = amdgpu_vram_mgr_new,
+ .free = amdgpu_vram_mgr_del,
+ .debug = amdgpu_vram_mgr_debug
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index e3a3755cb999..1162913c8bf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -35,11 +35,9 @@
static DEFINE_MUTEX(xgmi_mutex);
-#define AMDGPU_MAX_XGMI_HIVE 8
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
-static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
-static unsigned hive_count = 0;
+static LIST_HEAD(xgmi_hive_list);
static const int xgmi_pcs_err_status_reg_vg20[] = {
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
@@ -171,65 +169,53 @@ static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
*
*/
+static struct attribute amdgpu_xgmi_hive_id = {
+ .name = "xgmi_hive_id",
+ .mode = S_IRUGO
+};
-static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct amdgpu_hive_info *hive =
- container_of(attr, struct amdgpu_hive_info, dev_attr);
-
- return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
-}
+static struct attribute *amdgpu_xgmi_hive_attrs[] = {
+ &amdgpu_xgmi_hive_id,
+ NULL
+};
-static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
- struct amdgpu_hive_info *hive)
+static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
- int ret = 0;
+ struct amdgpu_hive_info *hive = container_of(
+ kobj, struct amdgpu_hive_info, kobj);
- if (WARN_ON(hive->kobj))
- return -EINVAL;
-
- hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
- if (!hive->kobj) {
- dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
- return -EINVAL;
- }
-
- hive->dev_attr = (struct device_attribute) {
- .attr = {
- .name = "xgmi_hive_id",
- .mode = S_IRUGO,
-
- },
- .show = amdgpu_xgmi_show_hive_id,
- };
+ if (attr == &amdgpu_xgmi_hive_id)
+ return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
- ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
- if (ret) {
- dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
- kobject_del(hive->kobj);
- kobject_put(hive->kobj);
- hive->kobj = NULL;
- }
-
- return ret;
+ return 0;
}
-static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev,
- struct amdgpu_hive_info *hive)
+static void amdgpu_xgmi_hive_release(struct kobject *kobj)
{
- sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
- kobject_del(hive->kobj);
- kobject_put(hive->kobj);
- hive->kobj = NULL;
+ struct amdgpu_hive_info *hive = container_of(
+ kobj, struct amdgpu_hive_info, kobj);
+
+ mutex_destroy(&hive->hive_lock);
+ kfree(hive);
}
+static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
+ .show = amdgpu_xgmi_show_attrs,
+};
+
+struct kobj_type amdgpu_xgmi_hive_type = {
+ .release = amdgpu_xgmi_hive_release,
+ .sysfs_ops = &amdgpu_xgmi_hive_ops,
+ .default_attrs = amdgpu_xgmi_hive_attrs,
+};
+
static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
@@ -241,7 +227,7 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
uint64_t fica_out;
unsigned int error_count = 0;
@@ -287,8 +273,8 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
/* Create sysfs link to hive info folder on the first device */
- if (adev != hive->adev) {
- ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
+ if (hive->kobj.parent != (&adev->dev->kobj)) {
+ ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
"xgmi_hive_info");
if (ret) {
dev_err(adev->dev, "XGMI: Failed to create link to hive info");
@@ -296,9 +282,9 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
}
}
- sprintf(node, "node%d", hive->number_devices);
+ sprintf(node, "node%d", atomic_read(&hive->number_devices));
/* Create sysfs link form the hive folder to yourself */
- ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node);
+ ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
if (ret) {
dev_err(adev->dev, "XGMI: Failed to create link from hive info");
goto remove_link;
@@ -308,7 +294,7 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
remove_link:
- sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
+ sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
remove_file:
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
@@ -326,78 +312,96 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
device_remove_file(adev->dev, &dev_attr_xgmi_error);
- if (adev != hive->adev)
+ if (hive->kobj.parent != (&adev->dev->kobj))
sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
- sprintf(node, "node%d", hive->number_devices);
- sysfs_remove_link(hive->kobj, node);
+ sprintf(node, "node%d", atomic_read(&hive->number_devices));
+ sysfs_remove_link(&hive->kobj, node);
}
-struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
{
- int i;
- struct amdgpu_hive_info *tmp;
+ struct amdgpu_hive_info *hive = NULL, *tmp = NULL;
+ int ret;
if (!adev->gmc.xgmi.hive_id)
return NULL;
+ if (adev->hive) {
+ kobject_get(&adev->hive->kobj);
+ return adev->hive;
+ }
+
mutex_lock(&xgmi_mutex);
- for (i = 0 ; i < hive_count; ++i) {
- tmp = &xgmi_hives[i];
- if (tmp->hive_id == adev->gmc.xgmi.hive_id) {
- if (lock)
- mutex_lock(&tmp->hive_lock);
- mutex_unlock(&xgmi_mutex);
- return tmp;
+ if (!list_empty(&xgmi_hive_list)) {
+ list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node) {
+ if (hive->hive_id == adev->gmc.xgmi.hive_id)
+ goto pro_end;
}
}
- if (i >= AMDGPU_MAX_XGMI_HIVE) {
- mutex_unlock(&xgmi_mutex);
- return NULL;
+
+ hive = kzalloc(sizeof(*hive), GFP_KERNEL);
+ if (!hive) {
+ dev_err(adev->dev, "XGMI: allocation failed\n");
+ hive = NULL;
+ goto pro_end;
}
/* initialize new hive if not exist */
- tmp = &xgmi_hives[hive_count++];
-
- if (amdgpu_xgmi_sysfs_create(adev, tmp)) {
- mutex_unlock(&xgmi_mutex);
- return NULL;
+ ret = kobject_init_and_add(&hive->kobj,
+ &amdgpu_xgmi_hive_type,
+ &adev->dev->kobj,
+ "%s", "xgmi_hive_info");
+ if (ret) {
+ dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
+ kfree(hive);
+ hive = NULL;
+ goto pro_end;
}
- tmp->adev = adev;
- tmp->hive_id = adev->gmc.xgmi.hive_id;
- INIT_LIST_HEAD(&tmp->device_list);
- mutex_init(&tmp->hive_lock);
- mutex_init(&tmp->reset_lock);
- task_barrier_init(&tmp->tb);
-
- if (lock)
- mutex_lock(&tmp->hive_lock);
- tmp->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
- tmp->hi_req_gpu = NULL;
+ hive->hive_id = adev->gmc.xgmi.hive_id;
+ INIT_LIST_HEAD(&hive->device_list);
+ INIT_LIST_HEAD(&hive->node);
+ mutex_init(&hive->hive_lock);
+ atomic_set(&hive->in_reset, 0);
+ atomic_set(&hive->number_devices, 0);
+ task_barrier_init(&hive->tb);
+ hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
+ hive->hi_req_gpu = NULL;
/*
* hive pstate on boot is high in vega20 so we have to go to low
* pstate on after boot.
*/
- tmp->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
+ hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
+ list_add_tail(&hive->node, &xgmi_hive_list);
+
+pro_end:
+ if (hive)
+ kobject_get(&hive->kobj);
mutex_unlock(&xgmi_mutex);
+ return hive;
+}
- return tmp;
+void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
+{
+ if (hive)
+ kobject_put(&hive->kobj);
}
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
int ret = 0;
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
struct amdgpu_device *request_adev = hive->hi_req_gpu ?
hive->hi_req_gpu : adev;
bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
+ amdgpu_put_xgmi_hive(hive);
/* fw bug so temporarily disable pstate switching */
return 0;
@@ -449,7 +453,7 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
/* Each psp need to set the latest topology */
ret = psp_xgmi_set_topology_info(&adev->psp,
- hive->number_devices,
+ atomic_read(&hive->number_devices),
&adev->psp.xgmi_context.top_info);
if (ret)
dev_err(adev->dev,
@@ -511,7 +515,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
}
- hive = amdgpu_get_xgmi_hive(adev, 1);
+ hive = amdgpu_get_xgmi_hive(adev);
if (!hive) {
ret = -EINVAL;
dev_err(adev->dev,
@@ -519,6 +523,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
goto exit;
}
+ mutex_lock(&hive->hive_lock);
top_info = &adev->psp.xgmi_context.top_info;
@@ -526,7 +531,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
list_for_each_entry(entry, &hive->device_list, head)
top_info->nodes[count++].node_id = entry->node_id;
top_info->num_nodes = count;
- hive->number_devices = count;
+ atomic_set(&hive->number_devices, count);
task_barrier_add_task(&hive->tb);
@@ -541,7 +546,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
}
ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
if (ret)
- goto exit;
+ goto exit_unlock;
}
/* get latest topology info for each device from psp */
@@ -554,7 +559,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
tmp_adev->gmc.xgmi.node_id,
tmp_adev->gmc.xgmi.hive_id, ret);
/* To do : continue with some node failed or disable the whole hive */
- goto exit;
+ goto exit_unlock;
}
}
}
@@ -562,39 +567,51 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
if (!ret)
ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
-
+exit_unlock:
mutex_unlock(&hive->hive_lock);
exit:
- if (!ret)
+ if (!ret) {
+ adev->hive = hive;
dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
- else
+ } else {
+ amdgpu_put_xgmi_hive(hive);
dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
ret);
+ }
return ret;
}
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{
- struct amdgpu_hive_info *hive;
+ struct amdgpu_hive_info *hive = adev->hive;
if (!adev->gmc.xgmi.supported)
return -EINVAL;
- hive = amdgpu_get_xgmi_hive(adev, 1);
if (!hive)
return -EINVAL;
+ mutex_lock(&hive->hive_lock);
task_barrier_rem_task(&hive->tb);
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
+ if (hive->hi_req_gpu == adev)
+ hive->hi_req_gpu = NULL;
+ list_del(&adev->gmc.xgmi.head);
mutex_unlock(&hive->hive_lock);
- if(!(--hive->number_devices)){
- amdgpu_xgmi_sysfs_destroy(adev, hive);
- mutex_destroy(&hive->hive_lock);
- mutex_destroy(&hive->reset_lock);
+ amdgpu_put_xgmi_hive(hive);
+ adev->hive = NULL;
+
+ if (atomic_dec_return(&hive->number_devices) == 0) {
+ /* Remove the hive from global hive list */
+ mutex_lock(&xgmi_mutex);
+ list_del(&hive->node);
+ mutex_unlock(&xgmi_mutex);
+
+ amdgpu_put_xgmi_hive(hive);
}
return psp_xgmi_terminate(&adev->psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 6999eab16a72..148560d63554 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -27,13 +27,13 @@
struct amdgpu_hive_info {
- uint64_t hive_id;
- struct list_head device_list;
- int number_devices;
- struct mutex hive_lock, reset_lock;
- struct kobject *kobj;
- struct device_attribute dev_attr;
- struct amdgpu_device *adev;
+ struct kobject kobj;
+ uint64_t hive_id;
+ struct list_head device_list;
+ struct list_head node;
+ atomic_t number_devices;
+ struct mutex hive_lock;
+ atomic_t in_reset;
int hi_req_count;
struct amdgpu_device *hi_req_gpu;
struct task_barrier tb;
@@ -50,7 +50,8 @@ struct amdgpu_pcs_ras_field {
uint32_t pcs_err_shift;
};
-struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
+void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
new file mode 100644
index 000000000000..5355827ed0ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2018-2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGV_SRIOV_MSG__H_
+#define AMDGV_SRIOV_MSG__H_
+
+/* unit in kilobytes */
+#define AMD_SRIOV_MSG_VBIOS_OFFSET 0
+#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
+#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
+#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
+
+/*
+ * layout
+ * 0 64KB 65KB 66KB
+ * | VBIOS | PF2VF | VF2PF | Bad Page | ...
+ * | 64KB | 1KB | 1KB |
+ */
+#define AMD_SRIOV_MSG_SIZE_KB 1
+#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
+#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
+
+/*
+ * PF2VF history log:
+ * v1 defined in amdgim
+ * v2 current
+ *
+ * VF2PF history log:
+ * v1 defined in amdgim
+ * v2 defined in amdgim
+ * v3 current
+ */
+#define AMD_SRIOV_MSG_FW_VRAM_PF2VF_VER 2
+#define AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER 3
+
+#define AMD_SRIOV_MSG_RESERVE_UCODE 24
+
+enum amd_sriov_ucode_engine_id {
+ AMD_SRIOV_UCODE_ID_VCE = 0,
+ AMD_SRIOV_UCODE_ID_UVD,
+ AMD_SRIOV_UCODE_ID_MC,
+ AMD_SRIOV_UCODE_ID_ME,
+ AMD_SRIOV_UCODE_ID_PFP,
+ AMD_SRIOV_UCODE_ID_CE,
+ AMD_SRIOV_UCODE_ID_RLC,
+ AMD_SRIOV_UCODE_ID_RLC_SRLC,
+ AMD_SRIOV_UCODE_ID_RLC_SRLG,
+ AMD_SRIOV_UCODE_ID_RLC_SRLS,
+ AMD_SRIOV_UCODE_ID_MEC,
+ AMD_SRIOV_UCODE_ID_MEC2,
+ AMD_SRIOV_UCODE_ID_SOS,
+ AMD_SRIOV_UCODE_ID_ASD,
+ AMD_SRIOV_UCODE_ID_TA_RAS,
+ AMD_SRIOV_UCODE_ID_TA_XGMI,
+ AMD_SRIOV_UCODE_ID_SMC,
+ AMD_SRIOV_UCODE_ID_SDMA,
+ AMD_SRIOV_UCODE_ID_SDMA2,
+ AMD_SRIOV_UCODE_ID_VCN,
+ AMD_SRIOV_UCODE_ID_DMCU,
+ AMD_SRIOV_UCODE_ID__MAX
+};
+
+#pragma pack(push, 1) // PF2VF / VF2PF data areas are byte packed
+
+union amd_sriov_msg_feature_flags {
+ struct {
+ uint32_t error_log_collect : 1;
+ uint32_t host_load_ucodes : 1;
+ uint32_t host_flr_vramlost : 1;
+ uint32_t mm_bw_management : 1;
+ uint32_t pp_one_vf_mode : 1;
+ uint32_t reserved : 27;
+ } flags;
+ uint32_t all;
+};
+
+union amd_sriov_msg_os_info {
+ struct {
+ uint32_t windows : 1;
+ uint32_t reserved : 31;
+ } info;
+ uint32_t all;
+};
+
+struct amd_sriov_msg_pf2vf_info_header {
+ /* the total structure size in byte */
+ uint32_t size;
+ /* version of this structure, written by the HOST */
+ uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
+};
+
+struct amd_sriov_msg_pf2vf_info {
+ /* header contains size and version */
+ struct amd_sriov_msg_pf2vf_info_header header;
+ /* use private key from mailbox 2 to create checksum */
+ uint32_t checksum;
+ /* The features flags of the HOST driver supports */
+ union amd_sriov_msg_feature_flags feature_flags;
+ /* (max_width * max_height * fps) / (16 * 16) */
+ uint32_t hevc_enc_max_mb_per_second;
+ /* (max_width * max_height) / (16 * 16) */
+ uint32_t hevc_enc_max_mb_per_frame;
+ /* (max_width * max_height * fps) / (16 * 16) */
+ uint32_t avc_enc_max_mb_per_second;
+ /* (max_width * max_height) / (16 * 16) */
+ uint32_t avc_enc_max_mb_per_frame;
+ /* MEC FW position in BYTE from the start of VF visible frame buffer */
+ uint64_t mecfw_offset;
+ /* MEC FW size in BYTE */
+ uint32_t mecfw_size;
+ /* UVD FW position in BYTE from the start of VF visible frame buffer */
+ uint64_t uvdfw_offset;
+ /* UVD FW size in BYTE */
+ uint32_t uvdfw_size;
+ /* VCE FW position in BYTE from the start of VF visible frame buffer */
+ uint64_t vcefw_offset;
+ /* VCE FW size in BYTE */
+ uint32_t vcefw_size;
+ /* Bad pages block position in BYTE */
+ uint32_t bp_block_offset_low;
+ uint32_t bp_block_offset_high;
+ /* Bad pages block size in BYTE */
+ uint32_t bp_block_size;
+ /* frequency for VF to update the VF2PF area in msec, 0 = manual */
+ uint32_t vf2pf_update_interval_ms;
+ /* identification in ROCm SMI */
+ uint64_t uuid;
+ uint32_t fcn_idx;
+ /* reserved */
+ uint32_t reserved[256-26];
+};
+
+struct amd_sriov_msg_vf2pf_info_header {
+ /* the total structure size in byte */
+ uint32_t size;
+ /* version of this structure, written by the guest */
+ uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
+};
+
+struct amd_sriov_msg_vf2pf_info {
+ /* header contains size and version */
+ struct amd_sriov_msg_vf2pf_info_header header;
+ uint32_t checksum;
+ /* driver version */
+ uint8_t driver_version[64];
+ /* driver certification, 1=WHQL, 0=None */
+ uint32_t driver_cert;
+ /* guest OS type and version */
+ union amd_sriov_msg_os_info os_info;
+ /* guest fb information in the unit of MB */
+ uint32_t fb_usage;
+ /* guest gfx engine usage percentage */
+ uint32_t gfx_usage;
+ /* guest gfx engine health percentage */
+ uint32_t gfx_health;
+ /* guest compute engine usage percentage */
+ uint32_t compute_usage;
+ /* guest compute engine health percentage */
+ uint32_t compute_health;
+ /* guest avc engine usage percentage. 0xffff means N/A */
+ uint32_t avc_enc_usage;
+ /* guest avc engine health percentage. 0xffff means N/A */
+ uint32_t avc_enc_health;
+ /* guest hevc engine usage percentage. 0xffff means N/A */
+ uint32_t hevc_enc_usage;
+ /* guest hevc engine usage percentage. 0xffff means N/A */
+ uint32_t hevc_enc_health;
+ /* combined encode/decode usage */
+ uint32_t encode_usage;
+ uint32_t decode_usage;
+ /* Version of PF2VF that VF understands */
+ uint32_t pf2vf_version_required;
+ /* additional FB usage */
+ uint32_t fb_vis_usage;
+ uint32_t fb_vis_size;
+ uint32_t fb_size;
+ /* guest ucode data, each one is 1.25 Dword */
+ struct {
+ uint8_t id;
+ uint32_t version;
+ } ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE];
+
+ /* reserved */
+ uint32_t reserved[256-68];
+};
+
+/* mailbox message send from guest to host */
+enum amd_sriov_mailbox_request_message {
+ MB_REQ_MSG_REQ_GPU_INIT_ACCESS = 1,
+ MB_REQ_MSG_REL_GPU_INIT_ACCESS,
+ MB_REQ_MSG_REQ_GPU_FINI_ACCESS,
+ MB_REQ_MSG_REL_GPU_FINI_ACCESS,
+ MB_REQ_MSG_REQ_GPU_RESET_ACCESS,
+ MB_REQ_MSG_REQ_GPU_INIT_DATA,
+
+ MB_REQ_MSG_LOG_VF_ERROR = 200,
+};
+
+/* mailbox message send from host to guest */
+enum amd_sriov_mailbox_response_message {
+ MB_RES_MSG_CLR_MSG_BUF = 0,
+ MB_RES_MSG_READY_TO_ACCESS_GPU = 1,
+ MB_RES_MSG_FLR_NOTIFICATION,
+ MB_RES_MSG_FLR_NOTIFICATION_COMPLETION,
+ MB_RES_MSG_SUCCESS,
+ MB_RES_MSG_FAIL,
+ MB_RES_MSG_QUERY_ALIVE,
+ MB_RES_MSG_GPU_INIT_DATA_READY,
+
+ MB_RES_MSG_TEXT_MESSAGE = 255
+};
+
+/* version data stored in MAILBOX_MSGBUF_RCV_DW1 for future expansion */
+enum amd_sriov_gpu_init_data_version {
+ GPU_INIT_DATA_READY_V1 = 1,
+};
+
+#pragma pack(pop) // Restore previous packing option
+
+/* checksum function between host and guest */
+unsigned int amd_sriov_msg_checksum(void *obj,
+ unsigned long obj_size,
+ unsigned int key,
+ unsigned int checksum);
+
+/* assertion at compile time */
+#ifdef __linux__
+#define stringification(s) _stringification(s)
+#define _stringification(s) #s
+
+_Static_assert(
+ sizeof(struct amd_sriov_msg_vf2pf_info) == AMD_SRIOV_MSG_SIZE_KB << 10,
+ "amd_sriov_msg_vf2pf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB");
+
+_Static_assert(
+ sizeof(struct amd_sriov_msg_pf2vf_info) == AMD_SRIOV_MSG_SIZE_KB << 10,
+ "amd_sriov_msg_pf2vf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB");
+
+_Static_assert(
+ AMD_SRIOV_MSG_RESERVE_UCODE % 4 == 0,
+ "AMD_SRIOV_MSG_RESERVE_UCODE must be multiple of 4");
+
+_Static_assert(
+ AMD_SRIOV_MSG_RESERVE_UCODE > AMD_SRIOV_UCODE_ID__MAX,
+ "AMD_SRIOV_MSG_RESERVE_UCODE must be bigger than AMD_SRIOV_UCODE_ID__MAX");
+
+#undef _stringification
+#undef stringification
+#endif
+
+#endif /* AMDGV_SRIOV_MSG__H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
index 847ca9b3ce4e..3ea557864320 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
@@ -73,6 +73,7 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
athub_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
athub_update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 213e62a28ba0..159a2a4385a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -41,7 +41,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
SET_CRTC_OVERSCAN_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
@@ -84,7 +84,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
ENABLE_SCALER_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
@@ -114,7 +114,7 @@ void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index =
GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
ENABLE_CRTC_PS_ALLOCATION args;
@@ -131,7 +131,7 @@ void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
ENABLE_CRTC_PS_ALLOCATION args;
@@ -147,7 +147,7 @@ void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
BLANK_CRTC_PS_ALLOCATION args;
@@ -163,7 +163,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
@@ -192,7 +192,7 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
u16 misc = 0;
@@ -307,7 +307,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_encoder *encoder = amdgpu_crtc->encoder;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -588,7 +588,7 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
struct amdgpu_atom_ss *ss)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u8 frev, crev;
int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
union set_pixel_clock args;
@@ -749,7 +749,7 @@ int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
@@ -818,7 +818,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
u32 pll_clock = mode->clock;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 9b74cfdba7b8..a3ba9ca11e98 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -60,7 +60,7 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
u8 delay, u8 *ack)
{
struct drm_device *dev = chan->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
@@ -305,7 +305,7 @@ static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector)
{
struct drm_device *dev = amdgpu_connector->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
amdgpu_connector->ddc_bus->rec.i2c_id, 0);
@@ -328,6 +328,22 @@ static void amdgpu_atombios_dp_probe_oui(struct amdgpu_connector *amdgpu_connect
buf[0], buf[1], buf[2]);
}
+static void amdgpu_atombios_dp_ds_ports(struct amdgpu_connector *amdgpu_connector)
+{
+ struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
+ int ret;
+
+ if (dig_connector->dpcd[DP_DPCD_REV] > 0x10) {
+ ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux,
+ DP_DOWNSTREAM_PORT_0,
+ dig_connector->downstream_ports,
+ DP_MAX_DOWNSTREAM_PORTS);
+ if (ret)
+ memset(dig_connector->downstream_ports, 0,
+ DP_MAX_DOWNSTREAM_PORTS);
+ }
+}
+
int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
{
struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
@@ -343,7 +359,7 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
dig_connector->dpcd);
amdgpu_atombios_dp_probe_oui(amdgpu_connector);
-
+ amdgpu_atombios_dp_ds_ports(amdgpu_connector);
return 0;
}
@@ -702,7 +718,7 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *dig_connector;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1e94a9b652f7..8339c8c3a328 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -70,7 +70,7 @@ u8
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return 0;
@@ -84,7 +84,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
{
struct drm_encoder *encoder = &amdgpu_encoder->base;
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder_atom_dig *dig;
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
@@ -152,7 +152,7 @@ amdgpu_atombios_encoder_get_backlight_brightness(struct backlight_device *bd)
struct amdgpu_backlight_privdata *pdata = bl_get_data(bd);
struct amdgpu_encoder *amdgpu_encoder = pdata->encoder;
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
}
@@ -166,7 +166,7 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
struct drm_connector *drm_connector)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct backlight_device *bd;
struct backlight_properties props;
struct amdgpu_backlight_privdata *pdata;
@@ -229,7 +229,7 @@ void
amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct backlight_device *bd = NULL;
struct amdgpu_encoder_atom_dig *dig;
@@ -319,7 +319,7 @@ static void
amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
DAC_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
@@ -382,7 +382,7 @@ static void
amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
union dvo_encoder_control args;
int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
@@ -573,7 +573,7 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
int action, int panel_mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -762,7 +762,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1178,7 +1178,7 @@ amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_device *dev = amdgpu_connector->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union dig_transmitter_control args;
int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
uint8_t frev, crev;
@@ -1225,7 +1225,7 @@ amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder,
int action)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder *ext_amdgpu_encoder = to_amdgpu_encoder(ext_encoder);
union external_encoder_control args;
@@ -1466,7 +1466,7 @@ void
amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
union crtc_source_param args;
@@ -1673,7 +1673,7 @@ amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
void
amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -1701,7 +1701,7 @@ amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
@@ -1751,7 +1751,7 @@ amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
uint32_t bios_0_scratch;
@@ -1790,7 +1790,7 @@ amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
@@ -1848,7 +1848,7 @@ amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector,
bool connected)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector =
to_amdgpu_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -1999,7 +1999,7 @@ struct amdgpu_encoder_atom_dig *
amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
uint16_t data_offset, misc;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index b4cc7c55fa16..09a538465ffd 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -40,7 +40,7 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
u8 *buf, u8 num)
{
struct drm_device *dev = chan->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index c2c67ab68a43..5442df094102 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1336,11 +1336,13 @@ cik_asic_reset_method(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- case CHIP_HAWAII:
/* disable baco reset until it works */
/* smu7_asic_get_baco_capability(adev, &baco_reset); */
baco_reset = false;
break;
+ case CHIP_HAWAII:
+ baco_reset = cik_asic_supports_baco(adev);
+ break;
default:
baco_reset = false;
break;
@@ -1366,8 +1368,10 @@ static int cik_asic_reset(struct amdgpu_device *adev)
int r;
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev);
} else {
+ dev_info(adev->dev, "PCI CONFIG reset\n");
r = cik_asic_pci_config_reset(adev);
}
@@ -1919,6 +1923,10 @@ static uint64_t cik_get_pcie_replay_count(struct amdgpu_device *adev)
return (nak_r + nak_g);
}
+static void cik_pre_asic_init(struct amdgpu_device *adev)
+{
+}
+
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
@@ -1939,6 +1947,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.need_reset_on_init = &cik_need_reset_on_init,
.get_pcie_replay_count = &cik_get_pcie_replay_count,
.supports_baco = &cik_asic_supports_baco,
+ .pre_asic_init = &cik_pre_asic_init,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 401c99f0b2d0..db953e95f3d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -316,14 +316,9 @@ static int cik_ih_sw_fini(void *handle)
static int cik_ih_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = cik_ih_irq_init(adev);
- if (r)
- return r;
-
- return 0;
+ return cik_ih_irq_init(adev);
}
static int cik_ih_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 20f108818b2b..a3c3fe96515f 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1071,22 +1071,19 @@ static int cik_sdma_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 tmp = RREG32(mmSRBM_STATUS2);
+ u32 tmp;
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
- }
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
- }
+ /* sdma0 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+
+ /* sdma1 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 84b45a019a36..5963cbe0d455 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -328,7 +328,7 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
*/
static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -383,7 +383,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
*/
static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -504,7 +504,7 @@ void dce_v10_0_disable_dce(struct amdgpu_device *adev)
static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1209,7 +1209,7 @@ static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *ad
static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1226,7 +1226,7 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1272,7 +1272,7 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1328,7 +1328,7 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1483,7 +1483,7 @@ static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1519,7 +1519,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint8_t *frame = buffer + 3;
@@ -1538,7 +1538,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@@ -1569,7 +1569,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1749,7 +1749,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1822,7 +1822,7 @@ static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@@ -1836,7 +1836,7 @@ static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (enable)
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
@@ -1850,7 +1850,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -2095,7 +2095,7 @@ static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
u32 tmp;
@@ -2111,7 +2111,7 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b;
int i;
u32 tmp;
@@ -2250,7 +2250,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use;
int pll;
@@ -2285,7 +2285,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
@@ -2300,7 +2300,7 @@ static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp;
tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
@@ -2311,7 +2311,7 @@ static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2329,7 +2329,7 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0;
amdgpu_crtc->cursor_x = x;
@@ -2503,7 +2503,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -2557,7 +2557,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss;
int i;
@@ -2701,7 +2701,7 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -2709,8 +2709,8 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = 128;
amdgpu_crtc->max_cursor_height = 128;
- adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+ adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
switch (amdgpu_crtc->crtc_id) {
case 0:
@@ -2792,24 +2792,24 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.async_page_flip = true;
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2819,7 +2819,7 @@ static int dce_v10_0_sw_init(void *handle)
}
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_display_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2832,7 +2832,7 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
- drm_kms_helper_poll_init(adev->ddev);
+ drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
return 0;
@@ -2844,13 +2844,13 @@ static int dce_v10_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v10_0_audio_fini(adev);
dce_v10_0_afmt_fini(adev);
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
return 0;
@@ -3157,14 +3157,14 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -3176,7 +3176,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
@@ -3245,7 +3245,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
+ drm_handle_vblank(adev_to_drm(adev), crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
@@ -3345,7 +3345,7 @@ dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3385,7 +3385,7 @@ static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3485,7 +3485,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device,
u16 caps)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 01ce52266966..1954472c8e8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -346,7 +346,7 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
*/
static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -400,7 +400,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
*/
static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -530,7 +530,7 @@ void dce_v11_0_disable_dce(struct amdgpu_device *adev)
static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1235,7 +1235,7 @@ static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *ad
static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1252,7 +1252,7 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1298,7 +1298,7 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1354,7 +1354,7 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1525,7 +1525,7 @@ static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1561,7 +1561,7 @@ static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint8_t *frame = buffer + 3;
@@ -1580,7 +1580,7 @@ static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@@ -1611,7 +1611,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1791,7 +1791,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1864,7 +1864,7 @@ static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@@ -1878,7 +1878,7 @@ static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (enable)
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
@@ -1892,7 +1892,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -2137,7 +2137,7 @@ static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
u32 tmp;
@@ -2153,7 +2153,7 @@ static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b;
int i;
u32 tmp;
@@ -2283,7 +2283,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use;
int pll;
@@ -2364,7 +2364,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
@@ -2379,7 +2379,7 @@ static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp;
tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
@@ -2390,7 +2390,7 @@ static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2408,7 +2408,7 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0;
amdgpu_crtc->cursor_x = x;
@@ -2582,7 +2582,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -2636,7 +2636,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss;
int i;
@@ -2706,7 +2706,7 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (!amdgpu_crtc->adjusted_clock)
return -EINVAL;
@@ -2809,7 +2809,7 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -2817,8 +2817,8 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = 128;
amdgpu_crtc->max_cursor_height = 128;
- adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+ adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
switch (amdgpu_crtc->crtc_id) {
case 0:
@@ -2913,24 +2913,24 @@ static int dce_v11_0_sw_init(void *handle)
if (r)
return r;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.async_page_flip = true;
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */
@@ -2941,7 +2941,7 @@ static int dce_v11_0_sw_init(void *handle)
}
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_display_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2954,7 +2954,7 @@ static int dce_v11_0_sw_init(void *handle)
if (r)
return r;
- drm_kms_helper_poll_init(adev->ddev);
+ drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
return 0;
@@ -2966,13 +2966,13 @@ static int dce_v11_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v11_0_audio_fini(adev);
dce_v11_0_afmt_fini(adev);
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
return 0;
@@ -3283,14 +3283,14 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
if(amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -3302,7 +3302,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
if(works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
@@ -3372,7 +3372,7 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
+ drm_handle_vblank(adev_to_drm(adev), crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
@@ -3471,7 +3471,7 @@ dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3511,7 +3511,7 @@ static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3611,7 +3611,7 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device,
u16 caps)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index cbddead3dafb..3a44753a80d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -279,7 +279,7 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
*/
static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -324,7 +324,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
*/
static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -401,7 +401,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@@ -1114,7 +1114,7 @@ static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *ade
static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1130,7 +1130,7 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1174,7 +1174,7 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1235,7 +1235,7 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1392,7 +1392,7 @@ static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1408,7 +1408,7 @@ static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
uint32_t clock, int bpc)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1446,7 +1446,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1488,7 +1488,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
u32 tmp;
@@ -1522,7 +1522,7 @@ static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1566,7 +1566,7 @@ static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1579,7 +1579,7 @@ static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1616,7 +1616,7 @@ static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1645,7 +1645,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1714,7 +1714,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1788,7 +1788,7 @@ static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@@ -1799,7 +1799,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
}
@@ -1810,7 +1810,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -2033,7 +2033,7 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -2048,7 +2048,7 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b;
int i;
@@ -2148,7 +2148,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use;
int pll;
@@ -2177,7 +2177,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
@@ -2192,7 +2192,7 @@ static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
@@ -2204,7 +2204,7 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr));
@@ -2222,7 +2222,7 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0;
int w = amdgpu_crtc->cursor_width;
@@ -2397,7 +2397,7 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -2447,7 +2447,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss;
int i;
@@ -2591,7 +2591,7 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -2599,8 +2599,8 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
- adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+ adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
@@ -2669,20 +2669,20 @@ static int dce_v6_0_sw_init(void *handle)
adev->mode_info.mode_config_initialized = true;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.async_page_flip = true;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2693,7 +2693,7 @@ static int dce_v6_0_sw_init(void *handle)
ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
if (ret)
- amdgpu_display_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2706,7 +2706,7 @@ static int dce_v6_0_sw_init(void *handle)
if (r)
return r;
- drm_kms_helper_poll_init(adev->ddev);
+ drm_kms_helper_poll_init(adev_to_drm(adev));
return r;
}
@@ -2717,12 +2717,12 @@ static int dce_v6_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v6_0_audio_fini(adev);
dce_v6_0_afmt_fini(adev);
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
return 0;
@@ -2967,7 +2967,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
+ drm_handle_vblank(adev_to_drm(adev), crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
break;
@@ -3036,14 +3036,14 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -3055,7 +3055,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
@@ -3146,7 +3146,7 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3187,7 +3187,7 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3297,7 +3297,7 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device,
u16 caps)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index fa0ad50b628c..3603e5f13077 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -273,7 +273,7 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
*/
static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -318,7 +318,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
*/
static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
@@ -444,7 +444,7 @@ void dce_v8_0_disable_dce(struct amdgpu_device *adev)
static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1146,7 +1146,7 @@ static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *ade
static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset;
@@ -1164,7 +1164,7 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1225,7 +1225,7 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -1278,7 +1278,7 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset;
@@ -1446,7 +1446,7 @@ static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1469,7 +1469,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
@@ -1489,7 +1489,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@@ -1516,7 +1516,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1678,7 +1678,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1751,7 +1751,7 @@ static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@@ -1765,7 +1765,7 @@ static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (enable)
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
@@ -1779,7 +1779,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -2004,7 +2004,7 @@ static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -2018,7 +2018,7 @@ static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b;
int i;
@@ -2140,7 +2140,7 @@ static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use;
int pll;
@@ -2188,7 +2188,7 @@ static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
@@ -2203,7 +2203,7 @@ static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
@@ -2213,7 +2213,7 @@ static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr));
@@ -2230,7 +2230,7 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0;
amdgpu_crtc->cursor_x = x;
@@ -2404,7 +2404,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -2458,7 +2458,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss;
int i;
@@ -2609,7 +2609,7 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -2617,8 +2617,8 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
- adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
+ adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
@@ -2689,24 +2689,24 @@ static int dce_v8_0_sw_init(void *handle)
if (r)
return r;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.async_page_flip = true;
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2716,7 +2716,7 @@ static int dce_v8_0_sw_init(void *handle)
}
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_display_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2729,7 +2729,7 @@ static int dce_v8_0_sw_init(void *handle)
if (r)
return r;
- drm_kms_helper_poll_init(adev->ddev);
+ drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
return 0;
@@ -2741,13 +2741,13 @@ static int dce_v8_0_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v8_0_audio_fini(adev);
dce_v8_0_afmt_fini(adev);
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
return 0;
@@ -3057,7 +3057,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
+ drm_handle_vblank(adev_to_drm(adev), crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
break;
@@ -3126,14 +3126,14 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -3145,7 +3145,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
@@ -3233,7 +3233,7 @@ dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3273,7 +3273,7 @@ static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3373,7 +3373,7 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device,
u16 caps)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index d5ff7b6331ff..b4d4b76538d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -47,6 +47,9 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
int index);
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+ unsigned crtc_id);
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer);
static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
int crtc,
enum amdgpu_interrupt_state state);
@@ -132,7 +135,7 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -171,8 +174,10 @@ static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
- drm_crtc_vblank_off(crtc);
+ if (dev->num_crtcs)
+ drm_crtc_vblank_off(crtc);
amdgpu_crtc->enabled = false;
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
@@ -235,7 +240,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -247,6 +252,11 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
+ hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires(&amdgpu_crtc->vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD);
+ amdgpu_crtc->vblank_timer.function = dce_virtual_vblank_timer_handle;
+ hrtimer_start(&amdgpu_crtc->vblank_timer,
+ DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
return 0;
}
@@ -374,24 +384,24 @@ static int dce_virtual_sw_init(void *handle)
if (r)
return r;
- adev->ddev->max_vblank_count = 0;
+ adev_to_drm(adev)->max_vblank_count = 0;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs, encoders, connectors */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -403,7 +413,7 @@ static int dce_virtual_sw_init(void *handle)
return r;
}
- drm_kms_helper_poll_init(adev->ddev);
+ drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
return 0;
@@ -415,9 +425,9 @@ static int dce_virtual_sw_fini(void *handle)
kfree(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
/* clear crtcs pointer to avoid dce irq finish routine access freed data */
memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS);
adev->mode_info.mode_config_initialized = false;
@@ -476,7 +486,7 @@ static int dce_virtual_hw_fini(void *handle)
for (i = 0; i<adev->mode_info.num_crtc; i++)
if (adev->mode_info.crtcs[i])
- dce_virtual_set_crtc_vblank_interrupt_state(adev, i, AMDGPU_IRQ_STATE_DISABLE);
+ hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
return 0;
}
@@ -602,7 +612,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
if (!encoder)
return -ENOMEM;
encoder->possible_crtcs = 1 << index;
- drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs,
+ drm_encoder_init(adev_to_drm(adev), encoder, &dce_virtual_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
@@ -613,7 +623,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
}
/* add a new connector */
- drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs,
+ drm_connector_init(adev_to_drm(adev), connector, &dce_virtual_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -663,14 +673,14 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -682,7 +692,7 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
amdgpu_bo_unref(&works->old_abo);
@@ -697,10 +707,16 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla
struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
struct amdgpu_crtc, vblank_timer);
struct drm_device *ddev = amdgpu_crtc->base.dev;
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_irq_src *source = adev->irq.client[AMDGPU_IRQ_CLIENTID_LEGACY].sources
+ [VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER];
+ int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ amdgpu_crtc->crtc_id);
- drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
- dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+ if (amdgpu_irq_enabled(adev, source, irq_type)) {
+ drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+ dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+ }
hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
HRTIMER_MODE_REL);
@@ -716,21 +732,6 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad
return;
}
- if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
- DRM_DEBUG("Enable software vsync timer\n");
- hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
- DCE_VIRTUAL_VBLANK_PERIOD);
- adev->mode_info.crtcs[crtc]->vblank_timer.function =
- dce_virtual_vblank_timer_handle;
- hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
- DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
- } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
- DRM_DEBUG("Disable software vsync timer\n");
- hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
- }
-
adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 1ab261836983..7b89fd2aa44a 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -251,7 +251,7 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
int i, count;
ddev = dev_get_drvdata(dev);
- adev = ddev->dev_private;
+ adev = drm_to_adev(ddev);
count = 0;
for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
@@ -455,7 +455,8 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
uint32_t *lo_base_addr,
uint32_t *hi_base_addr,
uint32_t *lo_val,
- uint32_t *hi_val)
+ uint32_t *hi_val,
+ bool is_enable)
{
uint32_t eventsel, instance, unitmask;
@@ -477,7 +478,8 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
instance_5432 = (instance >> 2) & 0xf;
instance_76 = (instance >> 6) & 0x3;
- *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel | (1 << 22);
+ *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel;
+ *lo_val = is_enable ? *lo_val | (1 << 22) : *lo_val & ~(1 << 22);
*hi_val = (instance_76 << 29) | instance_5432;
DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
@@ -572,14 +574,14 @@ static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
}
static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
- int is_enable)
+ int is_add)
{
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
int err = 0, ret = 0;
switch (adev->asic_type) {
case CHIP_VEGA20:
- if (is_enable)
+ if (is_add)
return df_v3_6_pmc_add_cntr(adev, config);
df_v3_6_reset_perfmon_cntr(adev, config);
@@ -589,7 +591,8 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
&lo_base_addr,
&hi_base_addr,
&lo_val,
- &hi_val);
+ &hi_val,
+ true);
if (ret)
return ret;
@@ -612,7 +615,7 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
}
static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
- int is_disable)
+ int is_remove)
{
uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
int ret = 0;
@@ -624,15 +627,17 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
&lo_base_addr,
&hi_base_addr,
&lo_val,
- &hi_val);
+ &hi_val,
+ false);
if (ret)
return ret;
- df_v3_6_reset_perfmon_cntr(adev, config);
- if (is_disable)
+ if (is_remove) {
+ df_v3_6_reset_perfmon_cntr(adev, config);
df_v3_6_pmc_release_cntr(adev, config);
+ }
break;
default:
@@ -646,7 +651,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
uint64_t config,
uint64_t *count)
{
- uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
+ uint32_t lo_base_addr = 0, hi_base_addr = 0, lo_val = 0, hi_val = 0;
*count = 0;
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f73ce9721233..3579565e0eab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -112,6 +112,25 @@
#define mmCP_HYP_ME_UCODE_DATA 0x5817
#define mmCP_HYP_ME_UCODE_DATA_BASE_IDX 1
+//CC_GC_SA_UNIT_DISABLE
+#define mmCC_GC_SA_UNIT_DISABLE 0x0fe9
+#define mmCC_GC_SA_UNIT_DISABLE_BASE_IDX 0
+#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8
+#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x0000FF00L
+//GC_USER_SA_UNIT_DISABLE
+#define mmGC_USER_SA_UNIT_DISABLE 0x0fea
+#define mmGC_USER_SA_UNIT_DISABLE_BASE_IDX 0
+#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8
+#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x0000FF00L
+//PA_SC_ENHANCE_3
+#define mmPA_SC_ENHANCE_3 0x1085
+#define mmPA_SC_ENHANCE_3_BASE_IDX 0
+#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3
+#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK 0x00000008L
+
+#define mmCGTT_SPI_CS_CLK_CTRL 0x507c
+#define mmCGTT_SPI_CS_CLK_CTRL_BASE_IDX 1
+
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -3078,6 +3097,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
static const struct soc15_reg_golden golden_settings_gc_10_3[] =
{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
@@ -3091,6 +3111,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x10f80988),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820),
@@ -3188,6 +3209,8 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
+static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
+static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -3307,6 +3330,29 @@ static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
}
+static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_0_nv10,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
+ break;
+ case CHIP_NAVI14:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_nv14,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
+ break;
+ case CHIP_NAVI12:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_2_nv12,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
+ break;
+ default:
+ break;
+ }
+}
+
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
@@ -3317,9 +3363,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_10_0_nv10,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
break;
case CHIP_NAVI14:
soc15_program_register_sequence(adev,
@@ -3328,9 +3371,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_10_1_nv14,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
break;
case CHIP_NAVI12:
soc15_program_register_sequence(adev,
@@ -3339,9 +3379,6 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_10_1_2_nv12,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
break;
case CHIP_SIENNA_CICHLID:
soc15_program_register_sequence(adev,
@@ -3360,6 +3397,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
default:
break;
}
+ gfx_v10_0_init_spm_golden_registers(adev);
}
static void gfx_v10_0_scratch_init(struct amdgpu_device *adev)
@@ -3545,7 +3583,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
break;
}
- if (adev->gfx.cp_fw_write_wait == false)
+ if (!adev->gfx.cp_fw_write_wait)
DRM_WARN_ONCE("CP firmware version too old, please update!");
}
@@ -3571,6 +3609,17 @@ static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
}
+static void gfx_v10_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_2 *rlc_hdr;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
+ adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
+}
+
static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
{
bool ret = false;
@@ -3686,8 +3735,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
- if (version_major == 2 && version_minor == 1)
- adev->gfx.rlc.is_rlc_v2_1 = true;
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
@@ -3729,8 +3776,12 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
- if (adev->gfx.rlc.is_rlc_v2_1)
- gfx_v10_0_init_rlc_ext_microcode(adev);
+ if (version_major == 2) {
+ if (version_minor >= 1)
+ gfx_v10_0_init_rlc_ext_microcode(adev);
+ if (version_minor == 2)
+ gfx_v10_0_init_rlc_iram_dram_microcode(adev);
+ }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
@@ -3791,8 +3842,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
- if (adev->gfx.rlc.is_rlc_v2_1 &&
- adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
+ if (adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
adev->gfx.rlc.save_restore_list_srm_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
@@ -3812,6 +3862,21 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
+
+ if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
+ adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
+ }
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
@@ -4025,21 +4090,23 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE;
- r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_obj,
- &adev->gfx.mec.hpd_eop_gpu_addr,
- (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- gfx_v10_0_mec_fini(adev);
- return r;
- }
+ if (mec_hpd_size) {
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+ gfx_v10_0_mec_fini(adev);
+ return r;
+ }
- memset(hpd, 0, mec_hpd_size);
+ memset(hpd, 0, mec_hpd_size);
- amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ }
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
@@ -4150,6 +4217,7 @@ static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = {
.read_wave_sgprs = &gfx_v10_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v10_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v10_0_select_me_pipe_q,
+ .init_spm_golden = &gfx_v10_0_init_spm_golden_registers,
};
static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
@@ -4518,12 +4586,17 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
int i, j;
u32 data;
u32 active_rbs = 0;
+ u32 bitmap;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ bitmap = i * adev->gfx.config.max_sh_per_se + j;
+ if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
+ continue;
gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v10_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
@@ -6183,7 +6256,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -6195,7 +6268,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (adev->in_gpu_reset) {
+ } else if (amdgpu_in_reset(adev)) {
/* reset mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
@@ -6436,6 +6509,10 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
struct v10_compute_mqd *mqd = ring->mqd_ptr;
int j;
+ /* inactivate the queue */
+ if (amdgpu_sriov_vf(adev))
+ WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
+
/* disable wptr polling */
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
@@ -6544,7 +6621,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v10_0_kiq_setting(ring);
- if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
@@ -6580,7 +6657,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v10_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -6590,7 +6667,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
@@ -6928,6 +7005,9 @@ static int gfx_v10_0_hw_init(void *handle)
if (r)
return r;
+ if (adev->asic_type == CHIP_SIENNA_CICHLID)
+ gfx_v10_3_program_pbb_mode(adev);
+
return r;
}
@@ -6961,15 +7041,19 @@ static int gfx_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ if (!adev->in_pci_err_recovery) {
#ifndef BRING_UP_DEBUG
- if (amdgpu_async_gfx_ring) {
- r = gfx_v10_0_kiq_disable_kgq(adev);
- if (r)
- DRM_ERROR("KGQ disable failed\n");
- }
+ if (amdgpu_async_gfx_ring) {
+ r = gfx_v10_0_kiq_disable_kgq(adev);
+ if (r)
+ DRM_ERROR("KGQ disable failed\n");
+ }
#endif
- if (amdgpu_gfx_disable_kcq(adev))
- DRM_ERROR("KCQ disable failed\n");
+ if (amdgpu_gfx_disable_kcq(adev))
+ DRM_ERROR("KCQ disable failed\n");
+ }
+
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
@@ -7036,8 +7120,7 @@ static int gfx_v10_0_soft_reset(void *handle)
GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK |
GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK |
- GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK
- | GRBM_STATUS__BCI_BUSY_MASK)) {
+ GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK)) {
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_CP,
1);
@@ -7162,7 +7245,7 @@ static int gfx_v10_0_early_init(void *handle)
break;
}
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ adev->gfx.num_compute_rings = amdgpu_num_kcq;
gfx_v10_0_set_kiq_pm4_funcs(adev);
gfx_v10_0_set_ring_funcs(adev);
@@ -7430,7 +7513,6 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
(AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
- AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS))
gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
@@ -8739,6 +8821,10 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ bitmap = i * adev->gfx.config.max_sh_per_se + j;
+ if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
+ continue;
mask = 1;
ao_bitmap = 0;
counter = 0;
@@ -8773,6 +8859,47 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
return 0;
}
+static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev)
+{
+ uint32_t efuse_setting, vbios_setting, disabled_sa, max_sa_mask;
+
+ efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
+ efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
+ efuse_setting >>= CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT;
+
+ vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SA_UNIT_DISABLE);
+ vbios_setting &= GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK;
+ vbios_setting >>= GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT;
+
+ max_sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
+ adev->gfx.config.max_shader_engines);
+ disabled_sa = efuse_setting | vbios_setting;
+ disabled_sa &= max_sa_mask;
+
+ return disabled_sa;
+}
+
+static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
+{
+ uint32_t max_sa_per_se, max_sa_per_se_mask, max_shader_engines;
+ uint32_t disabled_sa_mask, se_index, disabled_sa_per_se;
+
+ disabled_sa_mask = gfx_v10_3_get_disabled_sa(adev);
+
+ max_sa_per_se = adev->gfx.config.max_sh_per_se;
+ max_sa_per_se_mask = (1 << max_sa_per_se) - 1;
+ max_shader_engines = adev->gfx.config.max_shader_engines;
+
+ for (se_index = 0; max_shader_engines > se_index; se_index++) {
+ disabled_sa_per_se = disabled_sa_mask >> (se_index * max_sa_per_se);
+ disabled_sa_per_se &= max_sa_per_se_mask;
+ if (disabled_sa_per_se == max_sa_per_se_mask) {
+ WREG32_FIELD15(GC, 0, PA_SC_ENHANCE_3, FORCE_PBB_WORKLOAD_MODE_TO_ZERO, 1);
+ break;
+ }
+ }
+}
+
const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_GFX,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 88f63d7ea371..94b7e0531d09 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1343,21 +1343,22 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
+ if (mec_hpd_size) {
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+ return r;
+ }
- r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.mec.hpd_eop_obj,
- &adev->gfx.mec.hpd_eop_gpu_addr,
- (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- return r;
- }
-
- memset(hpd, 0, mec_hpd_size);
+ memset(hpd, 0, mec_hpd_size);
- amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ }
return 0;
}
@@ -4632,7 +4633,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v8_0_kiq_setting(ring);
- if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -4669,7 +4670,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -4681,7 +4682,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
- } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -5294,7 +5295,7 @@ static int gfx_v8_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ adev->gfx.num_compute_rings = amdgpu_num_kcq;
adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
gfx_v8_0_set_ring_funcs(adev);
gfx_v8_0_set_irq_funcs(adev);
@@ -5342,10 +5343,9 @@ static int gfx_v8_0_late_init(void *handle)
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- if (((adev->asic_type == CHIP_POLARIS11) ||
+ if ((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) &&
- adev->powerplay.pp_funcs->set_powergating_by_smu)
+ (adev->asic_type == CHIP_VEGAM))
/* Send msg to SMU via Powerplay */
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
@@ -5879,8 +5879,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -5901,8 +5900,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
@@ -5931,8 +5929,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
@@ -5951,8 +5948,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_3D,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -5973,8 +5969,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
@@ -5989,8 +5984,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_RLC,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
@@ -6004,8 +5998,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CP,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b95f22262a90..0d8e203b10ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -49,6 +49,7 @@
#include "amdgpu_ras.h"
#include "gfx_v9_4.h"
+#include "gfx_v9_0.h"
#include "asic_reg/pwr/pwr_10_0_offset.h"
#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
@@ -116,6 +117,13 @@ MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
+
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
@@ -788,7 +796,6 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
@@ -1630,7 +1637,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
default:
BUG();
@@ -1939,22 +1949,23 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
+ if (mec_hpd_size) {
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+ gfx_v9_0_mec_fini(adev);
+ return r;
+ }
- r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.mec.hpd_eop_obj,
- &adev->gfx.mec.hpd_eop_gpu_addr,
- (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- gfx_v9_0_mec_fini(adev);
- return r;
- }
-
- memset(hpd, 0, mec_hpd_size);
+ memset(hpd, 0, mec_hpd_size);
- amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ }
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
@@ -2074,6 +2085,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
.ras_error_inject = &gfx_v9_4_ras_error_inject,
.query_ras_error_count = &gfx_v9_4_query_ras_error_count,
.reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
+ .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
};
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -2195,7 +2207,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int mec, int pipe, int queue)
{
- int r;
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
unsigned int hw_prio;
@@ -2220,13 +2231,8 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type, hw_prio);
- if (r)
- return r;
-
-
- return 0;
+ return amdgpu_ring_init(adev, ring, 1024,
+ &adev->gfx.eop_irq, irq_type, hw_prio);
}
static int gfx_v9_0_sw_init(void *handle)
@@ -2401,7 +2407,8 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
/* TODO */
}
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
+ u32 instance)
{
u32 data;
@@ -2559,14 +2566,14 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
- !!amdgpu_noretry);
+ !!adev->gmc.noretry);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
} else {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
- !!amdgpu_noretry);
+ !!adev->gmc.noretry);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
(adev->gmc.private_aperture_start >> 48));
@@ -2799,7 +2806,7 @@ static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
- if (enable == true) {
+ if (enable) {
/* enable GFXIP control over CGPG */
data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
if(default_data != data)
@@ -3685,7 +3692,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v9_0_kiq_setting(ring);
- if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
@@ -3723,7 +3730,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -3735,7 +3742,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
- } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
+ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
@@ -3929,7 +3936,7 @@ static int gfx_v9_0_hw_fini(void *handle)
/* Use deinitialize sequence from CAIL when unbinding device from driver,
* otherwise KIQ is hanging when binding back
*/
- if (!adev->in_gpu_reset && !adev->in_suspend) {
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
adev->gfx.kiq.ring.pipe,
@@ -4087,7 +4094,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
*
* also don't wait anymore for IRQ context
* */
- if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
goto failed_kiq_read;
might_sleep();
@@ -4626,7 +4633,7 @@ static int gfx_v9_0_early_init(void *handle)
adev->gfx.num_gfx_rings = 0;
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+ adev->gfx.num_compute_rings = amdgpu_num_kcq;
gfx_v9_0_set_kiq_pm4_funcs(adev);
gfx_v9_0_set_ring_funcs(adev);
gfx_v9_0_set_irq_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
index fa5a3fbaf6ab..dfe8d4841f58 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
@@ -26,9 +26,7 @@
extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block;
-void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
-uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
+ u32 instance);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index 46351db36922..bc699d680ce8 100755..100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -57,10 +57,10 @@ static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = {
/* SPI */
{ SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 },
/* SQ */
- { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 },
- { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 },
- { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 },
- { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 8, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 8, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 8, 16 },
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 8, 16 },
/* SQC */
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 },
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 },
@@ -992,3 +992,32 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
return ret;
}
+
+static const struct soc15_reg_entry gfx_v9_4_rdrsp_status_regs =
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
+
+void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+{
+ uint32_t i, j;
+ uint32_t reg_value;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < gfx_v9_4_rdrsp_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_rdrsp_status_regs.instance;
+ j++) {
+ gfx_v9_4_select_se_sh(adev, i, 0, j);
+ reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_rdrsp_status_regs));
+ if (reg_value)
+ dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
+ j, reg_value);
+ }
+ }
+
+ gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
index 1ffecc5c0f0a..875f18473a98 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -34,4 +34,6 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
+void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev);
+
#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 529e46386a50..fad887a66886 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -245,7 +245,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -403,3 +403,13 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev)
hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
}
+
+
+const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
+ .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v1_0_gart_enable,
+ .gart_disable = gfxhub_v1_0_gart_disable,
+ .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
+ .init = gfxhub_v1_0_init,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index 92d3a70cd9b1..0c46672bbf49 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -33,4 +33,5 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
index c0ab71df0d90..1e24b6d51e41 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
@@ -21,6 +21,7 @@
*
*/
#include "amdgpu.h"
+#include "gfxhub_v1_0.h"
#include "gfxhub_v1_1.h"
#include "gc/gc_9_2_1_offset.h"
@@ -28,7 +29,7 @@
#include "soc15_common.h"
-int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
+static int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
{
u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
u32 max_region =
@@ -66,3 +67,13 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
return 0;
}
+
+const struct amdgpu_gfxhub_funcs gfxhub_v1_1_funcs = {
+ .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v1_0_gart_enable,
+ .gart_disable = gfxhub_v1_0_gart_disable,
+ .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
+ .init = gfxhub_v1_0_init,
+ .get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h
index d753cf28a0a6..ae5759ffbee3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h
@@ -24,6 +24,6 @@
#ifndef __GFXHUB_V1_1_H__
#define __GFXHUB_V1_1_H__
-int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v1_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 394e6f56948a..456360bf58fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -31,7 +31,78 @@
#include "soc15_common.h"
-u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
+static const char *gfxhub_client_ids[] = {
+ "CB/DB",
+ "Reserved",
+ "GE1",
+ "GE2",
+ "CPF",
+ "CPC",
+ "CPG",
+ "RLC",
+ "TCP",
+ "SQC (inst)",
+ "SQC (data)",
+ "SQG",
+ "Reserved",
+ "SDMA0",
+ "SDMA1",
+ "GCR",
+ "SDMA2",
+ "SDMA3",
+};
+
+static uint32_t gfxhub_v2_0_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vmid*/
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
+static void
+gfxhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
+ uint32_t status)
+{
+ u32 cid = REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, CID);
+
+ dev_err(adev->dev,
+ "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
+ status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
+ cid);
+ dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
+ dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
+ dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
+ dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
+ dev_err(adev->dev, "\t RW: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, RW));
+}
+
+static u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
{
u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
@@ -41,12 +112,12 @@ u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
return base;
}
-u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
+static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
{
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
}
-void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -82,11 +153,6 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
if (!amdgpu_sriov_vf(adev)) {
- /*
- * the new L1 policy will block SRIOV guest from writing
- * these regs, and they will be programed at host.
- * so skip programing these regs.
- */
/* Disable AGP. */
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
@@ -247,7 +313,7 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -276,7 +342,7 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
}
}
-int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
+static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
/* GART Enable. */
gfxhub_v2_0_init_gart_aperture_regs(adev);
@@ -292,7 +358,7 @@ int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
+static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
u32 tmp;
@@ -323,7 +389,7 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
-void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
+static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
@@ -360,7 +426,12 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
-void gfxhub_v2_0_init(struct amdgpu_device *adev)
+static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = {
+ .print_l2_protection_fault_status = gfxhub_v2_0_print_l2_protection_fault_status,
+ .get_invalidate_req = gfxhub_v2_0_get_invalidate_req,
+};
+
+static void gfxhub_v2_0_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -390,4 +461,24 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev)
mmGCVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+ hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
+ hub->vmhub_funcs = &gfxhub_v2_0_vmhub_funcs;
}
+
+const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs = {
+ .get_fb_location = gfxhub_v2_0_get_fb_location,
+ .get_mc_fb_offset = gfxhub_v2_0_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v2_0_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v2_0_gart_enable,
+ .gart_disable = gfxhub_v2_0_gart_disable,
+ .set_fault_enable_default = gfxhub_v2_0_set_fault_enable_default,
+ .init = gfxhub_v2_0_init,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
index 392b8cd94fc0..9ddc35cd53d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
@@ -24,14 +24,6 @@
#ifndef __GFXHUB_V2_0_H__
#define __GFXHUB_V2_0_H__
-u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev);
-int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev);
-void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev);
-void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
- bool value);
-void gfxhub_v2_0_init(struct amdgpu_device *adev);
-u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev);
-void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t page_table_base);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 5d2505956f84..724bb29e9bb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -31,7 +31,78 @@
#include "soc15_common.h"
-u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
+static const char *gfxhub_client_ids[] = {
+ "CB/DB",
+ "Reserved",
+ "GE1",
+ "GE2",
+ "CPF",
+ "CPC",
+ "CPG",
+ "RLC",
+ "TCP",
+ "SQC (inst)",
+ "SQC (data)",
+ "SQG",
+ "Reserved",
+ "SDMA0",
+ "SDMA1",
+ "GCR",
+ "SDMA2",
+ "SDMA3",
+};
+
+static uint32_t gfxhub_v2_1_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vmid*/
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
+static void
+gfxhub_v2_1_print_l2_protection_fault_status(struct amdgpu_device *adev,
+ uint32_t status)
+{
+ u32 cid = REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, CID);
+
+ dev_err(adev->dev,
+ "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
+ status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
+ cid);
+ dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
+ dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
+ dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
+ dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
+ dev_err(adev->dev, "\t RW: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, RW));
+}
+
+static u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
{
u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
@@ -41,12 +112,12 @@ u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
return base;
}
-u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev)
+static u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev)
{
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
}
-void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -248,7 +319,7 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -277,7 +348,7 @@ static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev)
}
}
-int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
+static int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
/*
@@ -305,7 +376,7 @@ int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
return 0;
}
-void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
+static void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
u32 tmp;
@@ -334,7 +405,7 @@ void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
-void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
+static void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
@@ -378,7 +449,12 @@ void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
-void gfxhub_v2_1_init(struct amdgpu_device *adev)
+static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = {
+ .print_l2_protection_fault_status = gfxhub_v2_1_print_l2_protection_fault_status,
+ .get_invalidate_req = gfxhub_v2_1_get_invalidate_req,
+};
+
+static void gfxhub_v2_1_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -408,9 +484,19 @@ void gfxhub_v2_1_init(struct amdgpu_device *adev)
mmGCVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+ hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
+ hub->vmhub_funcs = &gfxhub_v2_1_vmhub_funcs;
}
-int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
+static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
{
u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_CNTL);
u32 max_region =
@@ -445,3 +531,14 @@ int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
return 0;
}
+
+const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
+ .get_fb_location = gfxhub_v2_1_get_fb_location,
+ .get_mc_fb_offset = gfxhub_v2_1_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v2_1_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v2_1_gart_enable,
+ .gart_disable = gfxhub_v2_1_gart_disable,
+ .set_fault_enable_default = gfxhub_v2_1_set_fault_enable_default,
+ .init = gfxhub_v2_1_init,
+ .get_xgmi_info = gfxhub_v2_1_get_xgmi_info,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h
index 3452a4e9a3da..f75c2eccfad9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h
@@ -24,16 +24,6 @@
#ifndef __GFXHUB_V2_1_H__
#define __GFXHUB_V2_1_H__
-u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev);
-int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev);
-void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev);
-void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
- bool value);
-void gfxhub_v2_1_init(struct amdgpu_device *adev);
-u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev);
-void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t page_table_base);
-
-int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index ec90c62078d9..dbc8b76b9b78 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -25,11 +25,10 @@
#include "amdgpu.h"
#include "amdgpu_atomfirmware.h"
#include "gmc_v10_0.h"
+#include "umc_v8_7.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "hdp/hdp_5_0_0_sh_mask.h"
-#include "gc/gc_10_1_0_sh_mask.h"
-#include "mmhub/mmhub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_offset.h"
#include "dcn/dcn_2_0_0_offset.h"
@@ -57,68 +56,31 @@ static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
};
#endif
+static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
static int
gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src, unsigned type,
enum amdgpu_interrupt_state state)
{
- struct amdgpu_vmhub *hub;
- u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i;
-
- bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
-
- bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
-
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB_0];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + hub->ctx_distance * i;
- tmp = RREG32(reg);
- tmp &= ~bits[AMDGPU_MMHUB_0];
- WREG32(reg, tmp);
- }
-
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
/* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + hub->ctx_distance * i;
- tmp = RREG32(reg);
- tmp &= ~bits[AMDGPU_GFXHUB_0];
- WREG32(reg, tmp);
- }
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB_0];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + hub->ctx_distance * i;
- tmp = RREG32(reg);
- tmp |= bits[AMDGPU_MMHUB_0];
- WREG32(reg, tmp);
- }
-
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
/* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + hub->ctx_distance * i;
- tmp = RREG32(reg);
- tmp |= bits[AMDGPU_GFXHUB_0];
- WREG32(reg, tmp);
- }
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
break;
default:
break;
@@ -166,29 +128,8 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
task_info.task_name, task_info.pid);
dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
addr, entry->client_id);
- if (!amdgpu_sriov_vf(adev)) {
- dev_err(adev->dev,
- "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
- status);
- dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, CID));
- dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
- dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
- dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
- dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
- dev_err(adev->dev, "\t RW: 0x%lx\n",
- REG_GET_FIELD(status,
- GCVM_L2_PROTECTION_FAULT_STATUS, RW));
- }
+ if (!amdgpu_sriov_vf(adev))
+ hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
}
return 0;
@@ -199,30 +140,20 @@ static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
.process = gmc_v10_0_process_interrupt,
};
-static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
+static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
+ .set = gmc_v10_0_ecc_interrupt_state,
+ .process = amdgpu_umc_process_ecc_irq,
+};
+
+ static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
-}
-static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
- uint32_t flush_type)
-{
- u32 req = 0;
-
- /* invalidate using legacy mode on vmid*/
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
- PER_VMID_INVALIDATE_REQ, 1 << vmid);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
- req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
- CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
-
- return req;
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->gmc.ecc_irq.num_types = 1;
+ adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
+ }
}
/**
@@ -265,7 +196,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
{
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
- u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+ u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 tmp;
/* Use register 17 for GART */
const unsigned eng = 17;
@@ -356,16 +287,17 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
*/
if (adev->gfx.kiq.ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
- !adev->in_gpu_reset) {
-
+ down_read_trylock(&adev->reset_sem)) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
const unsigned eng = 17;
- u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
+ u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid);
+
+ up_read(&adev->reset_sem);
return;
}
@@ -381,7 +313,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
if (!adev->mman.buffer_funcs_enabled ||
!adev->ib_pool_ready ||
- adev->in_gpu_reset ||
+ amdgpu_in_reset(adev) ||
ring->sched.ready == false) {
gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
mutex_unlock(&adev->mman.gtt_window_lock);
@@ -459,7 +391,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME;
}
@@ -491,7 +423,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
{
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
+ uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
/*
@@ -641,6 +573,28 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
}
}
+static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+ u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
+ unsigned size;
+
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
+ } else {
+ u32 viewport;
+ u32 pitch;
+
+ viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
+ pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
+ size = (REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
+ 4);
+ }
+
+ return size;
+}
+
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
@@ -648,7 +602,8 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
.map_mtype = gmc_v10_0_map_mtype,
.get_vm_pde = gmc_v10_0_get_vm_pde,
- .get_vm_pte = gmc_v10_0_get_vm_pte
+ .get_vm_pte = gmc_v10_0_get_vm_pte,
+ .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
};
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -657,12 +612,51 @@ static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
}
+static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
+ adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
+ adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
+ adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
+ adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
+ adev->umc.funcs = &umc_v8_7_funcs;
+ break;
+ default:
+ break;
+ }
+}
+
+
+static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
+{
+ adev->mmhub.funcs = &mmhub_v2_0_funcs;
+}
+
+static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
+ break;
+ default:
+ adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
+ break;
+ }
+}
+
+
static int gmc_v10_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v10_0_set_mmhub_funcs(adev);
+ gmc_v10_0_set_gfxhub_funcs(adev);
gmc_v10_0_set_gmc_funcs(adev);
gmc_v10_0_set_irq_funcs(adev);
+ gmc_v10_0_set_umc_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
@@ -685,6 +679,10 @@ static int gmc_v10_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_gmc_ras_late_init(adev);
+ if (r)
+ return r;
+
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
}
@@ -693,11 +691,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
{
u64 base = 0;
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- base = gfxhub_v2_1_get_fb_location(adev);
- else
- base = gfxhub_v2_0_get_fb_location(adev);
+ base = adev->gfxhub.funcs->get_fb_location(adev);
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
@@ -706,11 +700,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_gart_location(adev, mc);
/* base offset of vram pages */
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- adev->vm_manager.vram_base_offset = gfxhub_v2_1_get_mc_fb_offset(adev);
- else
- adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
+ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
/* add the xgmi offset of the physical node */
adev->vm_manager.vram_base_offset +=
@@ -789,48 +779,14 @@ static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev);
}
-static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
-{
- u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
- unsigned size;
-
- if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
- size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
- } else {
- u32 viewport;
- u32 pitch;
-
- viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
- pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
- size = (REG_GET_FIELD(viewport,
- HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
- REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
- 4);
- }
- /* return 0 if the pre-OS buffer uses up most of vram */
- if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) {
- DRM_ERROR("Warning: pre-OS buffer uses most of vram, \
- be aware of gart table overwrite\n");
- return 0;
- }
-
- return size;
-}
-
-
-
static int gmc_v10_0_sw_init(void *handle)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- gfxhub_v2_1_init(adev);
- else
- gfxhub_v2_0_init(adev);
+ adev->gfxhub.funcs->init(adev);
- mmhub_v2_0_init(adev);
+ adev->mmhub.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
@@ -878,6 +834,14 @@ static int gmc_v10_0_sw_init(void *handle)
if (r)
return r;
+ if (!amdgpu_sriov_vf(adev)) {
+ /* interrupt sent to DF. */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
+ &adev->gmc.ecc_irq);
+ if (r)
+ return r;
+ }
+
/*
* Set the internal MC address mask This is the max address of the GPU's
* internal address space.
@@ -891,7 +855,7 @@ static int gmc_v10_0_sw_init(void *handle)
}
if (adev->gmc.xgmi.supported) {
- r = gfxhub_v2_1_get_xgmi_info(adev);
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r)
return r;
}
@@ -900,7 +864,7 @@ static int gmc_v10_0_sw_init(void *handle)
if (r)
return r;
- adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
+ amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */
r = amdgpu_bo_init(adev);
@@ -983,15 +947,11 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- r = gfxhub_v2_1_gart_enable(adev);
- else
- r = gfxhub_v2_0_gart_enable(adev);
+ r = adev->gfxhub.funcs->gart_enable(adev);
if (r)
return r;
- r = mmhub_v2_0_gart_enable(adev);
+ r = adev->mmhub.funcs->gart_enable(adev);
if (r)
return r;
@@ -1008,12 +968,8 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- gfxhub_v2_1_set_fault_enable_default(adev, value);
- else
- gfxhub_v2_0_set_fault_enable_default(adev, value);
- mmhub_v2_0_set_fault_enable_default(adev, value);
+ adev->gfxhub.funcs->set_fault_enable_default(adev, value);
+ adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
@@ -1038,6 +994,9 @@ static int gmc_v10_0_hw_init(void *handle)
if (r)
return r;
+ if (adev->umc.funcs && adev->umc.funcs->init_registers)
+ adev->umc.funcs->init_registers(adev);
+
return 0;
}
@@ -1050,12 +1009,8 @@ static int gmc_v10_0_hw_init(void *handle)
*/
static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER)
- gfxhub_v2_1_gart_disable(adev);
- else
- gfxhub_v2_0_gart_disable(adev);
- mmhub_v2_0_gart_disable(adev);
+ adev->gfxhub.funcs->gart_disable(adev);
+ adev->mmhub.funcs->gart_disable(adev);
amdgpu_gart_table_vram_unpin(adev);
}
@@ -1069,6 +1024,7 @@ static int gmc_v10_0_hw_fini(void *handle)
return 0;
}
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v10_0_gart_disable(adev);
@@ -1121,7 +1077,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = mmhub_v2_0_set_clockgating(adev, state);
+ r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
return r;
@@ -1136,7 +1092,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- mmhub_v2_0_get_clockgating(adev, flags);
+ adev->mmhub.funcs->get_clockgating(adev, flags);
if (adev->asic_type == CHIP_SIENNA_CICHLID ||
adev->asic_type == CHIP_NAVY_FLOUNDER)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 538e7ee35cdf..95a9117e9564 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -805,16 +805,13 @@ static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
- size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
}
- /* return 0 if the pre-OS buffer uses up most of vram */
- if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
- return 0;
return size;
}
@@ -862,7 +859,7 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
- adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev);
+ amdgpu_gmc_get_vbios_allocations(adev);
r = amdgpu_bo_init(adev);
if (r)
@@ -1136,6 +1133,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte = gmc_v6_0_get_vm_pte,
+ .get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size,
};
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index e18296dc1386..80c146df338a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -434,7 +434,7 @@ static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
int vmid;
unsigned int tmp;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
for (vmid = 1; vmid < 16; vmid++) {
@@ -970,16 +970,14 @@ static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
- size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
}
- /* return 0 if the pre-OS buffer uses up most of vram */
- if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
- return 0;
+
return size;
}
@@ -1035,7 +1033,7 @@ static int gmc_v7_0_sw_init(void *handle)
if (r)
return r;
- adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev);
+ amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */
r = amdgpu_bo_init(adev);
@@ -1372,7 +1370,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,
.get_vm_pde = gmc_v7_0_get_vm_pde,
- .get_vm_pte = gmc_v7_0_get_vm_pte
+ .get_vm_pte = gmc_v7_0_get_vm_pte,
+ .get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size,
};
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index a9e722b8a458..9ab65ca7df77 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -635,7 +635,7 @@ static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
int vmid;
unsigned int tmp;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
for (vmid = 1; vmid < 16; vmid++) {
@@ -1087,16 +1087,14 @@ static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
unsigned size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
- size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
}
- /* return 0 if the pre-OS buffer uses up most of vram */
- if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
- return 0;
+
return size;
}
@@ -1160,7 +1158,7 @@ static int gmc_v8_0_sw_init(void *handle)
if (r)
return r;
- adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev);
+ amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */
r = amdgpu_bo_init(adev);
@@ -1739,7 +1737,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt,
.get_vm_pde = gmc_v8_0_get_vm_pde,
- .get_vm_pte = gmc_v8_0_get_vm_pte
+ .get_vm_pte = gmc_v8_0_get_vm_pte,
+ .get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size,
};
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index b67ba38a195f..3ebbddb63705 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -67,6 +67,221 @@
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
+
+
+static const char *gfxhub_client_ids[] = {
+ "CB",
+ "DB",
+ "IA",
+ "WD",
+ "CPF",
+ "CPC",
+ "CPG",
+ "RLC",
+ "TCP",
+ "SQC (inst)",
+ "SQC (data)",
+ "SQG",
+ "PA",
+};
+
+static const char *mmhub_client_ids_raven[][2] = {
+ [0][0] = "MP1",
+ [1][0] = "MP0",
+ [2][0] = "VCN",
+ [3][0] = "VCNU",
+ [4][0] = "HDP",
+ [5][0] = "DCE",
+ [13][0] = "UTCL2",
+ [19][0] = "TLS",
+ [26][0] = "OSS",
+ [27][0] = "SDMA0",
+ [0][1] = "MP1",
+ [1][1] = "MP0",
+ [2][1] = "VCN",
+ [3][1] = "VCNU",
+ [4][1] = "HDP",
+ [5][1] = "XDP",
+ [6][1] = "DBGU0",
+ [7][1] = "DCE",
+ [8][1] = "DCEDWB0",
+ [9][1] = "DCEDWB1",
+ [26][1] = "OSS",
+ [27][1] = "SDMA0",
+};
+
+static const char *mmhub_client_ids_renoir[][2] = {
+ [0][0] = "MP1",
+ [1][0] = "MP0",
+ [2][0] = "HDP",
+ [4][0] = "DCEDMC",
+ [5][0] = "DCEVGA",
+ [13][0] = "UTCL2",
+ [19][0] = "TLS",
+ [26][0] = "OSS",
+ [27][0] = "SDMA0",
+ [28][0] = "VCN",
+ [29][0] = "VCNU",
+ [30][0] = "JPEG",
+ [0][1] = "MP1",
+ [1][1] = "MP0",
+ [2][1] = "HDP",
+ [3][1] = "XDP",
+ [6][1] = "DBGU0",
+ [7][1] = "DCEDMC",
+ [8][1] = "DCEVGA",
+ [9][1] = "DCEDWB",
+ [26][1] = "OSS",
+ [27][1] = "SDMA0",
+ [28][1] = "VCN",
+ [29][1] = "VCNU",
+ [30][1] = "JPEG",
+};
+
+static const char *mmhub_client_ids_vega10[][2] = {
+ [0][0] = "MP0",
+ [1][0] = "UVD",
+ [2][0] = "UVDU",
+ [3][0] = "HDP",
+ [13][0] = "UTCL2",
+ [14][0] = "OSS",
+ [15][0] = "SDMA1",
+ [32+0][0] = "VCE0",
+ [32+1][0] = "VCE0U",
+ [32+2][0] = "XDMA",
+ [32+3][0] = "DCE",
+ [32+4][0] = "MP1",
+ [32+14][0] = "SDMA0",
+ [0][1] = "MP0",
+ [1][1] = "UVD",
+ [2][1] = "UVDU",
+ [3][1] = "DBGU0",
+ [4][1] = "HDP",
+ [5][1] = "XDP",
+ [14][1] = "OSS",
+ [15][1] = "SDMA0",
+ [32+0][1] = "VCE0",
+ [32+1][1] = "VCE0U",
+ [32+2][1] = "XDMA",
+ [32+3][1] = "DCE",
+ [32+4][1] = "DCEDWB",
+ [32+5][1] = "MP1",
+ [32+6][1] = "DBGU1",
+ [32+14][1] = "SDMA1",
+};
+
+static const char *mmhub_client_ids_vega12[][2] = {
+ [0][0] = "MP0",
+ [1][0] = "VCE0",
+ [2][0] = "VCE0U",
+ [3][0] = "HDP",
+ [13][0] = "UTCL2",
+ [14][0] = "OSS",
+ [15][0] = "SDMA1",
+ [32+0][0] = "DCE",
+ [32+1][0] = "XDMA",
+ [32+2][0] = "UVD",
+ [32+3][0] = "UVDU",
+ [32+4][0] = "MP1",
+ [32+15][0] = "SDMA0",
+ [0][1] = "MP0",
+ [1][1] = "VCE0",
+ [2][1] = "VCE0U",
+ [3][1] = "DBGU0",
+ [4][1] = "HDP",
+ [5][1] = "XDP",
+ [14][1] = "OSS",
+ [15][1] = "SDMA0",
+ [32+0][1] = "DCE",
+ [32+1][1] = "DCEDWB",
+ [32+2][1] = "XDMA",
+ [32+3][1] = "UVD",
+ [32+4][1] = "UVDU",
+ [32+5][1] = "MP1",
+ [32+6][1] = "DBGU1",
+ [32+15][1] = "SDMA1",
+};
+
+static const char *mmhub_client_ids_vega20[][2] = {
+ [0][0] = "XDMA",
+ [1][0] = "DCE",
+ [2][0] = "VCE0",
+ [3][0] = "VCE0U",
+ [4][0] = "UVD",
+ [5][0] = "UVD1U",
+ [13][0] = "OSS",
+ [14][0] = "HDP",
+ [15][0] = "SDMA0",
+ [32+0][0] = "UVD",
+ [32+1][0] = "UVDU",
+ [32+2][0] = "MP1",
+ [32+3][0] = "MP0",
+ [32+12][0] = "UTCL2",
+ [32+14][0] = "SDMA1",
+ [0][1] = "XDMA",
+ [1][1] = "DCE",
+ [2][1] = "DCEDWB",
+ [3][1] = "VCE0",
+ [4][1] = "VCE0U",
+ [5][1] = "UVD1",
+ [6][1] = "UVD1U",
+ [7][1] = "DBGU0",
+ [8][1] = "XDP",
+ [13][1] = "OSS",
+ [14][1] = "HDP",
+ [15][1] = "SDMA0",
+ [32+0][1] = "UVD",
+ [32+1][1] = "UVDU",
+ [32+2][1] = "DBGU1",
+ [32+3][1] = "MP1",
+ [32+4][1] = "MP0",
+ [32+14][1] = "SDMA1",
+};
+
+static const char *mmhub_client_ids_arcturus[][2] = {
+ [2][0] = "MP1",
+ [3][0] = "MP0",
+ [10][0] = "UTCL2",
+ [13][0] = "OSS",
+ [14][0] = "HDP",
+ [15][0] = "SDMA0",
+ [32+15][0] = "SDMA1",
+ [64+15][0] = "SDMA2",
+ [96+15][0] = "SDMA3",
+ [128+15][0] = "SDMA4",
+ [160+11][0] = "JPEG",
+ [160+12][0] = "VCN",
+ [160+13][0] = "VCNU",
+ [160+15][0] = "SDMA5",
+ [192+10][0] = "UTCL2",
+ [192+11][0] = "JPEG1",
+ [192+12][0] = "VCN1",
+ [192+13][0] = "VCN1U",
+ [192+15][0] = "SDMA6",
+ [224+15][0] = "SDMA7",
+ [0][1] = "DBGU1",
+ [1][1] = "XDP",
+ [2][1] = "MP1",
+ [3][1] = "MP0",
+ [13][1] = "OSS",
+ [14][1] = "HDP",
+ [15][1] = "SDMA0",
+ [32+15][1] = "SDMA1",
+ [64+15][1] = "SDMA2",
+ [96+15][1] = "SDMA3",
+ [128+15][1] = "SDMA4",
+ [160+11][1] = "JPEG",
+ [160+12][1] = "VCN",
+ [160+13][1] = "VCNU",
+ [160+15][1] = "SDMA5",
+ [192+11][1] = "JPEG1",
+ [192+12][1] = "VCN1",
+ [192+13][1] = "VCN1U",
+ [192+15][1] = "SDMA6",
+ [224+15][1] = "SDMA7",
+};
static const u32 golden_settings_vega10_hdp[] =
{
@@ -300,9 +515,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
{
struct amdgpu_vmhub *hub;
bool retry_fault = !!(entry->src_data[1] & 0x80);
- uint32_t status = 0;
+ uint32_t status = 0, cid = 0, rw = 0;
u64 addr;
char hub_name[10];
+ const char *mmhub_cid;
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
@@ -337,6 +553,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
+ cid = REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, CID);
+ rw = REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, RW);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
}
@@ -359,9 +579,37 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev,
"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
- dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
- REG_GET_FIELD(status,
- VM_L2_PROTECTION_FAULT_STATUS, CID));
+ if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
+ cid);
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ mmhub_cid = mmhub_client_ids_vega10[cid][rw];
+ break;
+ case CHIP_VEGA12:
+ mmhub_cid = mmhub_client_ids_vega12[cid][rw];
+ break;
+ case CHIP_VEGA20:
+ mmhub_cid = mmhub_client_ids_vega20[cid][rw];
+ break;
+ case CHIP_ARCTURUS:
+ mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
+ break;
+ case CHIP_RAVEN:
+ mmhub_cid = mmhub_client_ids_raven[cid][rw];
+ break;
+ case CHIP_RENOIR:
+ mmhub_cid = mmhub_client_ids_renoir[cid][rw];
+ break;
+ default:
+ mmhub_cid = NULL;
+ break;
+ }
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ mmhub_cid ? mmhub_cid : "unknown", cid);
+ }
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -374,10 +622,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
- dev_err(adev->dev, "\t RW: 0x%lx\n",
- REG_GET_FIELD(status,
- VM_L2_PROTECTION_FAULT_STATUS, RW));
-
+ dev_err(adev->dev, "\t RW: 0x%x\n", rw);
}
}
@@ -500,13 +745,14 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* as GFXOFF under bare metal
*/
if (adev->gfx.kiq.ring.sched.ready &&
- (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
- !adev->in_gpu_reset) {
+ (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
+ down_read_trylock(&adev->reset_sem)) {
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid);
+ up_read(&adev->reset_sem);
return;
}
@@ -596,10 +842,10 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EIO;
- if (ring->sched.ready) {
+ if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
/* Vega20+XGMI caches PTEs in TC and TLB. Add a
* heavy-weight TLB flush (type 2), which flushes
* both. Due to a race condition with concurrent
@@ -626,6 +872,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (r) {
amdgpu_ring_undo(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
+ up_read(&adev->reset_sem);
return -ETIME;
}
@@ -633,10 +880,11 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
+ up_read(&adev->reset_sem);
return -ETIME;
}
-
+ up_read(&adev->reset_sem);
return 0;
}
@@ -826,6 +1074,41 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags |= AMDGPU_PTE_SNOOPED;
}
+static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+ u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
+ unsigned size;
+
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = AMDGPU_VBIOS_VGA_ALLOCATION;
+ } else {
+ u32 viewport;
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
+ size = (REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
+ 4);
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ default:
+ viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
+ size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
+ 4);
+ break;
+ }
+ }
+
+ return size;
+}
+
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
@@ -833,7 +1116,8 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
.map_mtype = gmc_v9_0_map_mtype,
.get_vm_pde = gmc_v9_0_get_vm_pde,
- .get_vm_pte = gmc_v9_0_get_vm_pte
+ .get_vm_pte = gmc_v9_0_get_vm_pte,
+ .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
};
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -871,13 +1155,24 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
- case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ adev->mmhub.funcs = &mmhub_v9_4_funcs;
+ break;
+ default:
adev->mmhub.funcs = &mmhub_v1_0_funcs;
break;
+ }
+}
+
+static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
case CHIP_ARCTURUS:
- adev->mmhub.funcs = &mmhub_v9_4_funcs;
+ case CHIP_VEGA20:
+ adev->gfxhub.funcs = &gfxhub_v1_1_funcs;
break;
default:
+ adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
break;
}
}
@@ -890,6 +1185,7 @@ static int gmc_v9_0_early_init(void *handle)
gmc_v9_0_set_irq_funcs(adev);
gmc_v9_0_set_umc_funcs(adev);
gmc_v9_0_set_mmhub_funcs(adev);
+ gmc_v9_0_set_gfxhub_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
@@ -901,57 +1197,26 @@ static int gmc_v9_0_early_init(void *handle)
return 0;
}
-static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
-{
-
- /*
- * TODO:
- * Currently there is a bug where some memory client outside
- * of the driver writes to first 8M of VRAM on S3 resume,
- * this overrides GART which by default gets placed in first 8M and
- * causes VM_FAULTS once GTT is accessed.
- * Keep the stolen memory reservation until the while this is not solved.
- * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
- */
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- return true;
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- default:
- return false;
- }
-}
-
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- if (!gmc_v9_0_keep_stolen_memory(adev))
- amdgpu_bo_late_init(adev);
+ amdgpu_bo_late_init(adev);
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
if (r)
return r;
- /* Check if ecc is available */
+
+ /*
+ * Workaround performance drop issue with VBIOS enables partial
+ * writes, while disables HBM ECC for vega10.
+ */
if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
- r = amdgpu_atomfirmware_mem_ecc_supported(adev);
- if (!r) {
- DRM_INFO("ECC is not present.\n");
+ if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
- } else
- DRM_INFO("ECC is active.\n");
-
- r = amdgpu_atomfirmware_sram_ecc_supported(adev);
- if (!r)
- DRM_INFO("SRAM ECC is not present.\n");
- else
- DRM_INFO("SRAM ECC is active.\n");
+ }
}
if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
@@ -969,10 +1234,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
{
u64 base = 0;
- if (adev->asic_type == CHIP_ARCTURUS)
- base = mmhub_v9_4_get_fb_location(adev);
- else if (!amdgpu_sriov_vf(adev))
- base = mmhub_v1_0_get_fb_location(adev);
+ if (!amdgpu_sriov_vf(adev))
+ base = adev->mmhub.funcs->get_fb_location(adev);
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
@@ -980,7 +1243,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_gart_location(adev, mc);
amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */
- adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
+ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
/* XXX: add the xgmi offset of the physical node? */
adev->vm_manager.vram_base_offset +=
@@ -1015,7 +1278,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
#ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU) {
- adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
+ adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
#endif
@@ -1066,50 +1329,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev);
}
-static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+/**
+ * gmc_v9_0_save_registers - saves regs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This saves potential register values that should be
+ * restored upon resume
+ */
+static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
{
- u32 d1vga_control;
- unsigned size;
-
- /*
- * TODO Remove once GART corruption is resolved
- * Check related code in gmc_v9_0_sw_fini
- * */
- if (gmc_v9_0_keep_stolen_memory(adev))
- return 9 * 1024 * 1024;
-
- d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
- if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
- size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
- } else {
- u32 viewport;
-
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
- size = (REG_GET_FIELD(viewport,
- HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
- REG_GET_FIELD(viewport,
- HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
- 4);
- break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- default:
- viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
- size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
- REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
- 4);
- break;
- }
- }
- /* return 0 if the pre-OS buffer uses up most of vram */
- if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
- return 0;
-
- return size;
+ if (adev->asic_type == CHIP_RAVEN)
+ adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
static int gmc_v9_0_sw_init(void *handle)
@@ -1117,11 +1348,9 @@ static int gmc_v9_0_sw_init(void *handle)
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gfxhub_v1_0_init(adev);
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_init(adev);
- else
- mmhub_v1_0_init(adev);
+ adev->gfxhub.funcs->init(adev);
+
+ adev->mmhub.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
@@ -1233,7 +1462,7 @@ static int gmc_v9_0_sw_init(void *handle)
adev->need_swiotlb = drm_need_swiotlb(44);
if (adev->gmc.xgmi.supported) {
- r = gfxhub_v1_1_get_xgmi_info(adev);
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r)
return r;
}
@@ -1242,7 +1471,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
+ amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */
r = amdgpu_bo_init(adev);
@@ -1268,21 +1497,18 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_vm_manager_init(adev);
+ gmc_v9_0_save_registers(adev);
+
return 0;
}
static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- void *stolen_vga_buf;
amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
-
- if (gmc_v9_0_keep_stolen_memory(adev))
- amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
-
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
amdgpu_gart_fini(adev);
@@ -1326,10 +1552,13 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
*
* This restores register values, saved at suspend.
*/
-static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
+void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_RAVEN)
- WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
+ if (adev->asic_type == CHIP_RAVEN) {
+ WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
+ WARN_ON(adev->gmc.sdpif_register !=
+ RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
+ }
}
/**
@@ -1349,14 +1578,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- r = gfxhub_v1_0_gart_enable(adev);
+ r = adev->gfxhub.funcs->gart_enable(adev);
if (r)
return r;
- if (adev->asic_type == CHIP_ARCTURUS)
- r = mmhub_v9_4_gart_enable(adev);
- else
- r = mmhub_v1_0_gart_enable(adev);
+ r = adev->mmhub.funcs->gart_enable(adev);
if (r)
return r;
@@ -1391,11 +1617,10 @@ static int gmc_v9_0_hw_init(void *handle)
golden_settings_vega10_hdp,
ARRAY_SIZE(golden_settings_vega10_hdp));
+ if (adev->mmhub.funcs->update_power_gating)
+ adev->mmhub.funcs->update_power_gating(adev, true);
+
switch (adev->asic_type) {
- case CHIP_RAVEN:
- /* TODO for renoir */
- mmhub_v1_0_update_power_gating(adev, true);
- break;
case CHIP_ARCTURUS:
WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
break;
@@ -1420,11 +1645,8 @@ static int gmc_v9_0_hw_init(void *handle)
value = true;
if (!amdgpu_sriov_vf(adev)) {
- gfxhub_v1_0_set_fault_enable_default(adev, value);
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_set_fault_enable_default(adev, value);
- else
- mmhub_v1_0_set_fault_enable_default(adev, value);
+ adev->gfxhub.funcs->set_fault_enable_default(adev, value);
+ adev->mmhub.funcs->set_fault_enable_default(adev, value);
}
for (i = 0; i < adev->num_vmhubs; ++i)
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
@@ -1438,20 +1660,6 @@ static int gmc_v9_0_hw_init(void *handle)
}
/**
- * gmc_v9_0_save_registers - saves regs
- *
- * @adev: amdgpu_device pointer
- *
- * This saves potential register values that should be
- * restored upon resume
- */
-static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
-{
- if (adev->asic_type == CHIP_RAVEN)
- adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
-}
-
-/**
* gmc_v9_0_gart_disable - gart disable
*
* @adev: amdgpu_device pointer
@@ -1460,11 +1668,8 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
*/
static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{
- gfxhub_v1_0_gart_disable(adev);
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_gart_disable(adev);
- else
- mmhub_v1_0_gart_disable(adev);
+ adev->gfxhub.funcs->gart_disable(adev);
+ adev->mmhub.funcs->gart_disable(adev);
amdgpu_gart_table_vram_unpin(adev);
}
@@ -1487,16 +1692,9 @@ static int gmc_v9_0_hw_fini(void *handle)
static int gmc_v9_0_suspend(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v9_0_hw_fini(adev);
- if (r)
- return r;
-
- gmc_v9_0_save_registers(adev);
-
- return 0;
+ return gmc_v9_0_hw_fini(adev);
}
static int gmc_v9_0_resume(void *handle)
@@ -1504,7 +1702,6 @@ static int gmc_v9_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gmc_v9_0_restore_registers(adev);
r = gmc_v9_0_hw_init(adev);
if (r)
return r;
@@ -1537,10 +1734,7 @@ static int gmc_v9_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_set_clockgating(adev, state);
- else
- mmhub_v1_0_set_clockgating(adev, state);
+ adev->mmhub.funcs->set_clockgating(adev, state);
athub_v1_0_set_clockgating(adev, state);
@@ -1551,10 +1745,7 @@ static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS)
- mmhub_v9_4_get_clockgating(adev, flags);
- else
- mmhub_v1_0_get_clockgating(adev, flags);
+ adev->mmhub.funcs->get_clockgating(adev, flags);
athub_v1_0_get_clockgating(adev, flags);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
index e0585e8c6c1b..c415c439f690 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
@@ -26,4 +26,6 @@
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
+
+void gmc_v9_0_restore_registers(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index bc300283b6ab..c600b61b5f45 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -33,6 +33,7 @@
static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
{
@@ -564,8 +565,8 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.insert_start = jpeg_v1_0_decode_ring_insert_start,
.insert_end = jpeg_v1_0_decode_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .begin_use = vcn_v1_0_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
+ .begin_use = jpeg_v1_0_ring_begin_use,
+ .end_use = vcn_v1_0_ring_end_use,
.emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
.emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
@@ -586,3 +587,22 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs;
}
+
+static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+ int cnt = 0;
+
+ mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+
+ if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec))
+ DRM_ERROR("JPEG dec: vcn dec ring may not be empty\n");
+
+ for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) {
+ if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt]))
+ DRM_ERROR("JPEG dec: vcn enc ring[%d] may not be empty\n", cnt);
+ }
+
+ vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 7a51c615d22d..845306f63cdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -55,22 +55,18 @@ static int amdgpu_ih_clientid_jpeg[] = {
static int jpeg_v2_5_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS) {
- u32 harvest;
- int i;
-
- adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
- for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
- harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
- if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
- adev->jpeg.harvest_config |= 1 << i;
- }
+ u32 harvest;
+ int i;
- if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
- AMDGPU_JPEG_HARVEST_JPEG1))
- return -ENOENT;
- } else
- adev->jpeg.num_jpeg_inst = 1;
+ adev->jpeg.num_jpeg_inst = JPEG25_MAX_HW_INSTANCES_ARCTURUS;
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ harvest = RREG32_SOC15(JPEG, i, mmCC_UVD_HARVESTING);
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ adev->jpeg.harvest_config |= 1 << i;
+ }
+ if (adev->jpeg.harvest_config == (AMDGPU_JPEG_HARVEST_JPEG0 |
+ AMDGPU_JPEG_HARVEST_JPEG1))
+ return -ENOENT;
jpeg_v2_5_set_dec_ring_funcs(adev);
jpeg_v2_5_set_irq_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index c41e5590a701..3a0dff53654d 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -460,15 +460,10 @@ static bool jpeg_v3_0_is_idle(void *handle)
static int jpeg_v3_0_wait_for_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int ret;
- ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
+ return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
- if (ret)
- return ret;
-
- return ret;
}
static int jpeg_v3_0_set_clockgating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 4b746584a797..1c22d8393b21 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -832,7 +832,6 @@ static int mes_v10_1_queue_init(struct amdgpu_device *adev)
static int mes_v10_1_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int r;
ring = &adev->mes.ring;
@@ -849,11 +848,7 @@ static int mes_v10_1_ring_init(struct amdgpu_device *adev)
ring->no_scheduler = true;
sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
- r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
- if (r)
- return r;
-
- return 0;
+ return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
}
static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index dffcb93ecee5..f84701c562bf 100755..100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -34,7 +34,7 @@
#define mmDAGB0_CNTL_MISC2_RV 0x008f
#define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
-u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
+static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
{
u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
@@ -51,7 +51,7 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
return base;
}
-void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@@ -268,7 +268,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -297,20 +297,19 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
}
}
-void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
+static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
bool enable)
{
if (amdgpu_sriov_vf(adev))
return;
if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
}
}
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
+static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
/*
@@ -338,7 +337,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
+static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
u32 tmp;
@@ -373,7 +372,7 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
-void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
@@ -415,7 +414,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
-void mmhub_v1_0_init(struct amdgpu_device *adev)
+static void mmhub_v1_0_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@@ -525,7 +524,7 @@ static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
}
-int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
+static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
if (amdgpu_sriov_vf(adev))
@@ -549,7 +548,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
int data, data1;
@@ -781,4 +780,13 @@ const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
.reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
+ .get_fb_location = mmhub_v1_0_get_fb_location,
+ .init = mmhub_v1_0_init,
+ .gart_enable = mmhub_v1_0_gart_enable,
+ .set_fault_enable_default = mmhub_v1_0_set_fault_enable_default,
+ .gart_disable = mmhub_v1_0_gart_disable,
+ .set_clockgating = mmhub_v1_0_set_clockgating,
+ .get_clockgating = mmhub_v1_0_get_clockgating,
+ .setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs,
+ .update_power_gating = mmhub_v1_0_update_power_gating,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index c43319e8f945..d77f5b65a618 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -25,18 +25,4 @@
extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
-u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
-void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
- bool value);
-void mmhub_v1_0_init(struct amdgpu_device *adev);
-int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
- enum amd_clockgating_state state);
-void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
-void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
- bool enable);
-void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t page_table_base);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index c79fc54bc3c4..2063700f0bc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -36,7 +36,130 @@
#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070
#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0
-void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static const char *mmhub_client_ids_navi1x[][2] = {
+ [3][0] = "DCEDMC",
+ [4][0] = "DCEVGA",
+ [5][0] = "MP0",
+ [6][0] = "MP1",
+ [13][0] = "VMC",
+ [14][0] = "HDP",
+ [15][0] = "OSS",
+ [16][0] = "VCNU",
+ [17][0] = "JPEG",
+ [18][0] = "VCN",
+ [3][1] = "DCEDMC",
+ [4][1] = "DCEXFC",
+ [5][1] = "DCEVGA",
+ [6][1] = "DCEDWB",
+ [7][1] = "MP0",
+ [8][1] = "MP1",
+ [9][1] = "DBGU1",
+ [10][1] = "DBGU0",
+ [11][1] = "XDP",
+ [14][1] = "HDP",
+ [15][1] = "OSS",
+ [16][1] = "VCNU",
+ [17][1] = "JPEG",
+ [18][1] = "VCN",
+};
+
+static const char *mmhub_client_ids_sienna_cichlid[][2] = {
+ [3][0] = "DCEDMC",
+ [4][0] = "DCEVGA",
+ [5][0] = "MP0",
+ [6][0] = "MP1",
+ [8][0] = "VMC",
+ [9][0] = "VCNU0",
+ [10][0] = "JPEG",
+ [12][0] = "VCNU1",
+ [13][0] = "VCN1",
+ [14][0] = "HDP",
+ [15][0] = "OSS",
+ [32+11][0] = "VCN0",
+ [0][1] = "DBGU0",
+ [1][1] = "DBGU1",
+ [2][1] = "DCEDWB",
+ [3][1] = "DCEDMC",
+ [4][1] = "DCEVGA",
+ [5][1] = "MP0",
+ [6][1] = "MP1",
+ [7][1] = "XDP",
+ [9][1] = "VCNU0",
+ [10][1] = "JPEG",
+ [11][1] = "VCN0",
+ [12][1] = "VCNU1",
+ [13][1] = "VCN1",
+ [14][1] = "HDP",
+ [15][1] = "OSS",
+};
+
+static uint32_t mmhub_v2_0_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vmid*/
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
+static void
+mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
+ uint32_t status)
+{
+ uint32_t cid, rw;
+ const char *mmhub_cid = NULL;
+
+ cid = REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, CID);
+ rw = REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, RW);
+
+ dev_err(adev->dev,
+ "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
+ status);
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
+ mmhub_cid = mmhub_client_ids_navi1x[cid][rw];
+ break;
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw];
+ break;
+ default:
+ mmhub_cid = NULL;
+ break;
+ }
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ mmhub_cid ? mmhub_cid : "unknown", cid);
+ dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
+ dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
+ dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
+ dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ MMVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
+ dev_err(adev->dev, "\t RW: 0x%x\n", rw);
+}
+
+static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@@ -78,11 +201,6 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF);
if (!amdgpu_sriov_vf(adev)) {
- /*
- * the new L1 policy will block SRIOV guest from writing
- * these regs, and they will be programed at host.
- * so skip programing these regs.
- */
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->gmc.vram_start >> 18);
@@ -251,7 +369,7 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -280,7 +398,7 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
}
}
-int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
+static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
/* GART Enable. */
mmhub_v2_0_init_gart_aperture_regs(adev);
@@ -296,7 +414,7 @@ int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
+static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
u32 tmp;
@@ -327,7 +445,7 @@ void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
-void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+static void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
@@ -370,7 +488,12 @@ void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
-void mmhub_v2_0_init(struct amdgpu_device *adev)
+static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = {
+ .print_l2_protection_fault_status = mmhub_v2_0_print_l2_protection_fault_status,
+ .get_invalidate_req = mmhub_v2_0_get_invalidate_req,
+};
+
+static void mmhub_v2_0_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
@@ -400,6 +523,16 @@ void mmhub_v2_0_init(struct amdgpu_device *adev)
mmMMVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = mmMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+ hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
+ hub->vmhub_funcs = &mmhub_v2_0_vmhub_funcs;
}
static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -490,7 +623,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
}
}
-int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
+static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
if (amdgpu_sriov_vf(adev))
@@ -514,7 +647,7 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
int data, data1;
@@ -547,3 +680,14 @@ void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_MC_LS;
}
+
+const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
+ .init = mmhub_v2_0_init,
+ .gart_enable = mmhub_v2_0_gart_enable,
+ .set_fault_enable_default = mmhub_v2_0_set_fault_enable_default,
+ .gart_disable = mmhub_v2_0_gart_disable,
+ .set_clockgating = mmhub_v2_0_set_clockgating,
+ .get_clockgating = mmhub_v2_0_get_clockgating,
+ .setup_vm_pt_regs = mmhub_v2_0_setup_vm_pt_regs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
index 3ea4344f0315..f80f461d67da 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
@@ -23,15 +23,6 @@
#ifndef __MMHUB_V2_0_H__
#define __MMHUB_V2_0_H__
-int mmhub_v2_0_gart_enable(struct amdgpu_device *adev);
-void mmhub_v2_0_gart_disable(struct amdgpu_device *adev);
-void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
- bool value);
-void mmhub_v2_0_init(struct amdgpu_device *adev);
-int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
- enum amd_clockgating_state state);
-void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
-void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t page_table_base);
+extern const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 9979f54fef57..66748bb01b52 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -36,7 +36,7 @@
#define MMHUB_NUM_INSTANCES 2
#define MMHUB_INSTANCE_REGISTER_OFFSET 0x3000
-u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
+static u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
{
/* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */
u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE);
@@ -97,7 +97,7 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
(u32)(adev->gmc.gart_end >> 44));
}
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
int i;
@@ -330,7 +330,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET +
i * hub->ctx_distance, tmp);
@@ -375,7 +375,7 @@ static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev,
}
}
-int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
+static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
{
int i;
@@ -397,7 +397,7 @@ int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
return 0;
}
-void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
+static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
u32 tmp;
@@ -442,7 +442,7 @@ void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
-void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+static void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
int i;
@@ -500,7 +500,7 @@ void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
}
}
-void mmhub_v9_4_init(struct amdgpu_device *adev)
+static void mmhub_v9_4_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] =
{&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]};
@@ -630,7 +630,7 @@ static void mmhub_v9_4_update_medium_grain_light_sleep(struct amdgpu_device *ade
}
}
-int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
+static int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
if (amdgpu_sriov_vf(adev))
@@ -650,7 +650,7 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
{
int data, data1;
@@ -1624,8 +1624,45 @@ static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
}
}
+static const struct soc15_reg_entry mmhub_v9_4_err_status_regs[] = {
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_ERR_STATUS), 0, 0, 0 },
+};
+
+static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+{
+ int i;
+ uint32_t reg_value;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_err_status_regs); i++) {
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_err_status_regs[i]));
+ if (reg_value)
+ dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
+ i, reg_value);
+ }
+}
+
const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
.reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
+ .get_fb_location = mmhub_v9_4_get_fb_location,
+ .init = mmhub_v9_4_init,
+ .gart_enable = mmhub_v9_4_gart_enable,
+ .set_fault_enable_default = mmhub_v9_4_set_fault_enable_default,
+ .gart_disable = mmhub_v9_4_gart_disable,
+ .set_clockgating = mmhub_v9_4_set_clockgating,
+ .get_clockgating = mmhub_v9_4_get_clockgating,
+ .setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs,
+ .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
index 1b979773776c..92404a8f66f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
@@ -25,16 +25,4 @@
extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
-u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev);
-int mmhub_v9_4_gart_enable(struct amdgpu_device *adev);
-void mmhub_v9_4_gart_disable(struct amdgpu_device *adev);
-void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev,
- bool value);
-void mmhub_v9_4_init(struct amdgpu_device *adev);
-int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
- enum amd_clockgating_state state);
-void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags);
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
- uint64_t page_table_base);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 5fd67e1cc2a0..f5ce9a9f4cf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -238,19 +238,15 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
- int locked;
/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
- *
- * we can unlock the lock_reset to allow "amdgpu_job_timedout"
- * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
- * which means host side had finished this VF's FLR.
*/
- locked = mutex_trylock(&adev->lock_reset);
- if (locked)
- adev->in_gpu_reset = true;
+ if (!down_read_trylock(&adev->reset_sem))
+ return;
+
+ atomic_set(&adev->in_gpu_reset, 1);
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
@@ -261,14 +257,13 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
} while (timeout > 1);
flr_done:
- if (locked) {
- adev->in_gpu_reset = false;
- mutex_unlock(&adev->lock_reset);
- }
+ atomic_set(&adev->in_gpu_reset, 0);
+ up_read(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
- && adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)
+ && (!amdgpu_device_has_job_running(adev) ||
+ adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT))
amdgpu_device_gpu_recover(adev, NULL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index ce2bf1fb79ed..666ed99cc14b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -259,19 +259,15 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
- int locked;
/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
- *
- * we can unlock the lock_reset to allow "amdgpu_job_timedout"
- * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
- * which means host side had finished this VF's FLR.
*/
- locked = mutex_trylock(&adev->lock_reset);
- if (locked)
- adev->in_gpu_reset = true;
+ if (!down_read_trylock(&adev->reset_sem))
+ return;
+
+ atomic_set(&adev->in_gpu_reset, 1);
do {
if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
@@ -282,14 +278,13 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
} while (timeout > 1);
flr_done:
- if (locked) {
- adev->in_gpu_reset = false;
- mutex_unlock(&adev->lock_reset);
- }
+ atomic_set(&adev->in_gpu_reset, 0);
+ up_read(&adev->reset_sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
- && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
+ && (!amdgpu_device_has_job_running(adev) ||
+ adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 350f1bf063c6..74b1e7dc49a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -306,7 +306,8 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
} else {
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
}
- navi10_ih_reroute_ih(adev);
+ if (adev->irq.ih1.ring_size)
+ navi10_ih_reroute_ih(adev);
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
if (ih->use_bus_addr) {
@@ -668,19 +669,26 @@ static int navi10_ih_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
- if (r)
- return r;
+ adev->irq.ih1.ring_size = 0;
+ adev->irq.ih2.ring_size = 0;
- adev->irq.ih1.use_doorbell = true;
- adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+ if (adev->asic_type < CHIP_NAVI10) {
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index =
+ (adev->doorbell_index.ih + 1) << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
- adev->irq.ih2.use_doorbell = true;
- adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index =
+ (adev->doorbell_index.ih + 2) << 1;
+ }
r = amdgpu_irq_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index e629156173d3..eadc9526d33f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -302,6 +302,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
uint32_t bif_doorbell_intr_cntl;
struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
@@ -312,28 +313,31 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
RAS_CNTLR_INTERRUPT_CLEAR, 1);
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
- /*
- * clear error status after ras_controller_intr according to
- * hw team and count ue number for query
- */
- nbio_v7_4_query_ras_error_count(adev, &err_data);
-
- /* logging on error counter and printing for awareness */
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
-
- if (err_data.ce_count)
- dev_info(adev->dev, "%ld correctable hardware "
- "errors detected in %s block, "
- "no user action is needed.\n",
- obj->err_data.ce_count,
- adev->nbio.ras_if->name);
-
- if (err_data.ue_count)
- dev_info(adev->dev, "%ld uncorrectable hardware "
- "errors detected in %s block\n",
- obj->err_data.ue_count,
- adev->nbio.ras_if->name);
+ if (!ras->disable_ras_err_cnt_harvest) {
+ /*
+ * clear error status after ras_controller_intr
+ * according to hw team and count ue number
+ * for query
+ */
+ nbio_v7_4_query_ras_error_count(adev, &err_data);
+
+ /* logging on error cnt and printing for awareness */
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+
+ if (err_data.ce_count)
+ dev_info(adev->dev, "%ld correctable hardware "
+ "errors detected in %s block, "
+ "no user action is needed.\n",
+ obj->err_data.ce_count,
+ adev->nbio.ras_if->name);
+
+ if (err_data.ue_count)
+ dev_info(adev->dev, "%ld uncorrectable hardware "
+ "errors detected in %s block\n",
+ obj->err_data.ue_count,
+ adev->nbio.ras_if->name);
+ }
dev_info(adev->dev, "RAS controller interrupt triggered "
"by NBIF error\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index ca11253e787c..8eeba8096493 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -69,75 +69,40 @@ static const struct amd_ip_funcs nv_common_ip_funcs;
*/
static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
- unsigned long flags, address, data;
- u32 r;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(address, reg);
- (void)RREG32(address);
- r = RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
- return r;
+ return amdgpu_device_indirect_rreg(adev, address, data, reg);
}
static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
- unsigned long flags, address, data;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(address, reg);
- (void)RREG32(address);
- WREG32(data, v);
- (void)RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ amdgpu_device_indirect_wreg(adev, address, data, reg, v);
}
static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
{
- unsigned long flags, address, data;
- u64 r;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- /* read low 32 bit */
- WREG32(address, reg);
- (void)RREG32(address);
- r = RREG32(data);
-
- /* read high 32 bit*/
- WREG32(address, reg + 4);
- (void)RREG32(address);
- r |= ((u64)RREG32(data) << 32);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
- return r;
+ return amdgpu_device_indirect_rreg64(adev, address, data, reg);
}
static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
- unsigned long flags, address, data;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- /* write low 32 bit */
- WREG32(address, reg);
- (void)RREG32(address);
- WREG32(data, (u32)(v & 0xffffffffULL));
- (void)RREG32(data);
-
- /* write high 32 bit */
- WREG32(address, reg + 4);
- (void)RREG32(address);
- WREG32(data, (u32)(v >> 32));
- (void)RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
}
static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
@@ -311,7 +276,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
/* disable BM */
pci_clear_master(adev->pdev);
- pci_save_state(adev->pdev);
+ amdgpu_device_cache_pci_state(adev->pdev);
if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
dev_info(adev->dev, "GPU smu mode1 reset\n");
@@ -323,7 +288,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
if (ret)
dev_err(adev->dev, "GPU mode1 reset failed\n");
- pci_restore_state(adev->pdev);
+ amdgpu_device_load_pci_state(adev->pdev);
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -380,7 +345,7 @@ static int nv_asic_reset(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- dev_info(adev->dev, "GPU BACO reset\n");
+ dev_info(adev->dev, "BACO reset\n");
ret = smu_baco_enter(smu);
if (ret)
@@ -388,8 +353,10 @@ static int nv_asic_reset(struct amdgpu_device *adev)
ret = smu_baco_exit(smu);
if (ret)
return ret;
- } else
+ } else {
+ dev_info(adev->dev, "MODE1 reset\n");
ret = nv_asic_mode1_reset(adev);
+ }
return ret;
}
@@ -488,6 +455,15 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
adev->virt.ops = &xgpu_nv_virt_ops;
}
+static bool nv_is_headless_sku(struct pci_dev *pdev)
+{
+ if ((pdev->device == 0x731E &&
+ (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
+ (pdev->device == 0x7340 && pdev->revision == 0xC9))
+ return true;
+ return false;
+}
+
int nv_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
@@ -524,7 +500,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ if (!nv_is_headless_sku(adev->pdev))
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
if (adev->enable_mes)
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
@@ -619,7 +596,7 @@ static void nv_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
} else {
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
@@ -690,6 +667,10 @@ static void nv_init_doorbell_index(struct amdgpu_device *adev)
adev->doorbell_index.sdma_doorbell_range = 20;
}
+static void nv_pre_asic_init(struct amdgpu_device *adev)
+{
+}
+
static const struct amdgpu_asic_funcs nv_asic_funcs =
{
.read_disabled_bios = &nv_read_disabled_bios,
@@ -709,6 +690,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.need_reset_on_init = &nv_need_reset_on_init,
.get_pcie_replay_count = &nv_get_pcie_replay_count,
.supports_baco = &nv_asic_supports_baco,
+ .pre_asic_init = &nv_pre_asic_init,
};
static int nv_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index cbc04a5c0fe1..4137dc710aaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -83,19 +83,6 @@ struct psp_gfx_ctrl
*/
#define GFX_FLAG_RESPONSE 0x80000000
-/* Gbr IH registers ID */
-enum ih_reg_id {
- IH_RB = 0, // IH_RB_CNTL
- IH_RB_RNG1 = 1, // IH_RB_CNTL_RING1
- IH_RB_RNG2 = 2, // IH_RB_CNTL_RING2
-};
-
-/* Command to setup Gibraltar IH register */
-struct psp_gfx_cmd_gbr_ih_reg {
- uint32_t reg_value; /* Value to be set to the IH_RB_CNTL... register*/
- enum ih_reg_id reg_id; /* ID of the register */
-};
-
/* TEE Gfx Command IDs for the ring buffer interface. */
enum psp_gfx_cmd_id
{
@@ -214,7 +201,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_UVD1 = 23, /* UVD1 VG-20 */
GFX_FW_TYPE_TOC = 24, /* TOC NV-10 */
GFX_FW_TYPE_RLC_P = 25, /* RLC P NV */
- GFX_FW_TYPE_RLX6 = 26, /* RLX6 NV */
+ GFX_FW_TYPE_RLC_IRAM = 26, /* RLC_IRAM NV */
GFX_FW_TYPE_GLOBAL_TAP_DELAYS = 27, /* GLOBAL TAP DELAYS NV */
GFX_FW_TYPE_SE0_TAP_DELAYS = 28, /* SE0 TAP DELAYS NV */
GFX_FW_TYPE_SE1_TAP_DELAYS = 29, /* SE1 TAP DELAYS NV */
@@ -236,7 +223,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_ACCUM_CTRL_RAM = 45, /* ACCUM CTRL RAM NV */
GFX_FW_TYPE_RLCP_CAM = 46, /* RLCP CAM NV */
GFX_FW_TYPE_RLC_SPP_CAM_EXT = 47, /* RLC SPP CAM EXT NV */
- GFX_FW_TYPE_RLX6_DRAM_BOOT = 48, /* RLX6 DRAM BOOT NV */
+ GFX_FW_TYPE_RLC_DRAM_BOOT = 48, /* RLC DRAM BOOT NV */
GFX_FW_TYPE_VCN0_RAM = 49, /* VCN_RAM NV + RN */
GFX_FW_TYPE_VCN1_RAM = 50, /* VCN_RAM NV + RN */
GFX_FW_TYPE_DMUB = 51, /* DMUB RN */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 6c9614f77d33..c4828bd3264b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -38,6 +38,10 @@
#include "oss/osssys_4_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
+MODULE_FIRMWARE("amdgpu/renoir_ta.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_ta.bin");
+
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
@@ -45,17 +49,72 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
const char *chip_name;
+ char fw_name[30];
int err = 0;
+ const struct ta_firmware_header_v1_0 *ta_hdr;
+ DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
default:
BUG();
}
err = psp_init_asd_microcode(psp, chip_name);
+ if (err)
+ goto out;
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+ if (err) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ dev_info(adev->dev,
+ "psp v12.0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ } else {
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out2;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)
+ adev->psp.ta_fw->data;
+ adev->psp.ta_hdcp_ucode_version =
+ le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
+ adev->psp.ta_hdcp_ucode_size =
+ le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
+ adev->psp.ta_hdcp_start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_dtm_ucode_version =
+ le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
+ adev->psp.ta_dtm_ucode_size =
+ le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
+ adev->psp.ta_dtm_start_addr =
+ (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+ }
+
+ return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+out:
+ if (err) {
+ dev_err(adev->dev,
+ "psp v12.0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ }
+
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 856c50386c86..e82f49f62f6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -69,6 +69,7 @@ MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
+MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
@@ -592,6 +593,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -616,7 +620,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_RENOIR:
- chip_name = "renoir";
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ chip_name = "renoir";
+ else
+ chip_name = "green_sardine";
break;
default:
BUG();
@@ -1000,7 +1007,7 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
sdma[i] = &adev->sdma.instance[i].page;
if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (unset == false)) {
+ (!unset)) {
amdgpu_ttm_set_buffer_funcs_status(adev, false);
unset = true;
}
@@ -1063,6 +1070,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
}
WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
+
+ /*
+ * Enable SDMA utilization. Its only supported on
+ * Arcturus for the moment and firmware version 14
+ * and above.
+ */
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ adev->sdma.instance[i].fw_version >= 14)
+ WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable);
}
}
@@ -1080,7 +1096,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v4_0_gfx_stop(adev);
sdma_v4_0_rlc_stop(adev);
if (adev->sdma.has_page_queue)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index e2232dd12d8e..9c72b95b7463 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -203,6 +203,9 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -616,7 +619,7 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v5_0_gfx_stop(adev);
sdma_v5_0_rlc_stop(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 46a9617fee5f..9f3952723c63 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -148,6 +148,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -559,7 +562,7 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v5_2_gfx_stop(adev);
sdma_v5_2_rlc_stop(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 1b449291f068..e5e336fd9e94 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -52,6 +52,8 @@
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+#include "amdgpu_dm.h"
+
static const u32 tahiti_golden_registers[] =
{
mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
@@ -1215,10 +1217,100 @@ static bool si_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-//xxx: not implemented
+static void si_set_clk_bypass_mode(struct amdgpu_device *adev)
+{
+ u32 tmp, i;
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_BYPASS_EN;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+ tmp |= SPLL_CTLREQ_CHG;
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
+ break;
+ udelay(1);
+ }
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
+ tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
+ WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
+
+ tmp = RREG32(MPLL_CNTL_MODE);
+ tmp &= ~MPLL_MCLK_SEL;
+ WREG32(MPLL_CNTL_MODE, tmp);
+}
+
+static void si_spll_powerdown(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32(SPLL_CNTL_MODE);
+ tmp |= SPLL_SW_DIR_CONTROL;
+ WREG32(SPLL_CNTL_MODE, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_RESET;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(CG_SPLL_FUNC_CNTL);
+ tmp |= SPLL_SLEEP;
+ WREG32(CG_SPLL_FUNC_CNTL, tmp);
+
+ tmp = RREG32(SPLL_CNTL_MODE);
+ tmp &= ~SPLL_SW_DIR_CONTROL;
+ WREG32(SPLL_CNTL_MODE, tmp);
+}
+
+static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
+{
+ u32 i;
+ int r = -EINVAL;
+
+ dev_info(adev->dev, "GPU pci config reset\n");
+
+ /* set mclk/sclk to bypass */
+ si_set_clk_bypass_mode(adev);
+ /* powerdown spll */
+ si_spll_powerdown(adev);
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+ /* reset */
+ amdgpu_device_pci_config_reset(adev);
+
+ udelay(100);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+ /* enable BM */
+ pci_set_master(adev->pdev);
+ adev->has_hw_reset = true;
+ r = 0;
+ break;
+ }
+ udelay(1);
+ }
+
+ return r;
+}
+
static int si_asic_reset(struct amdgpu_device *adev)
{
- return 0;
+ int r;
+
+ dev_info(adev->dev, "PCI CONFIG reset\n");
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+ r = si_gpu_pci_config_reset(adev);
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
+ return r;
}
static bool si_asic_supports_baco(struct amdgpu_device *adev)
@@ -1247,7 +1339,7 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state)
uint32_t temp;
temp = RREG32(CONFIG_CNTL);
- if (state == false) {
+ if (!state) {
temp &= ~(1<<0);
temp |= (1<<1);
} else {
@@ -1779,6 +1871,10 @@ static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
return 0;
}
+static void si_pre_asic_init(struct amdgpu_device *adev)
+{
+}
+
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
@@ -1800,6 +1896,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.need_reset_on_init = &si_need_reset_on_init,
.get_pcie_replay_count = &si_get_pcie_replay_count,
.supports_baco = &si_asic_supports_baco,
+ .pre_asic_init = &si_pre_asic_init,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
@@ -2546,6 +2643,10 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
@@ -2560,6 +2661,10 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index d55bf64770c4..7fb240c4990c 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -508,14 +508,9 @@ static bool smu_v11_0_i2c_bus_lock(struct i2c_adapter *control)
struct amdgpu_device *adev = to_amdgpu_device(control);
/* Send PPSMC_MSG_RequestI2CBus */
- if (!adev->powerplay.pp_funcs->smu_i2c_bus_access)
- goto Fail;
-
-
- if (!adev->powerplay.pp_funcs->smu_i2c_bus_access(adev->powerplay.pp_handle, true))
+ if (!amdgpu_dpm_smu_i2c_bus_access(adev, true))
return true;
-Fail:
return false;
}
@@ -523,16 +518,10 @@ static bool smu_v11_0_i2c_bus_unlock(struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
- /* Send PPSMC_MSG_RequestI2CBus */
- if (!adev->powerplay.pp_funcs->smu_i2c_bus_access)
- goto Fail;
-
/* Send PPSMC_MSG_ReleaseI2CBus */
- if (!adev->powerplay.pp_funcs->smu_i2c_bus_access(adev->powerplay.pp_handle,
- false))
+ if (!amdgpu_dpm_smu_i2c_bus_access(adev, false))
return true;
-Fail:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c28ebf41530a..f57c5f57efa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -101,75 +101,40 @@
*/
static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
- unsigned long flags, address, data;
- u32 r;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(address, reg);
- (void)RREG32(address);
- r = RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
- return r;
+ return amdgpu_device_indirect_rreg(adev, address, data, reg);
}
static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
- unsigned long flags, address, data;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(address, reg);
- (void)RREG32(address);
- WREG32(data, v);
- (void)RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ amdgpu_device_indirect_wreg(adev, address, data, reg, v);
}
static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
{
- unsigned long flags, address, data;
- u64 r;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- /* read low 32 bit */
- WREG32(address, reg);
- (void)RREG32(address);
- r = RREG32(data);
-
- /* read high 32 bit*/
- WREG32(address, reg + 4);
- (void)RREG32(address);
- r |= ((u64)RREG32(data) << 32);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
- return r;
+ return amdgpu_device_indirect_rreg64(adev, address, data, reg);
}
static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
- unsigned long flags, address, data;
+ unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- /* write low 32 bit */
- WREG32(address, reg);
- (void)RREG32(address);
- WREG32(data, (u32)(v & 0xffffffffULL));
- (void)RREG32(data);
-
- /* write high 32 bit */
- WREG32(address, reg + 4);
- (void)RREG32(address);
- WREG32(data, (u32)(v >> 32));
- (void)RREG32(data);
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
}
static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
@@ -484,13 +449,13 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
/* disable BM */
pci_clear_master(adev->pdev);
- pci_save_state(adev->pdev);
+ amdgpu_device_cache_pci_state(adev->pdev);
ret = psp_gpu_reset(adev);
if (ret)
dev_err(adev->dev, "GPU mode1 reset failed\n");
- pci_restore_state(adev->pdev);
+ amdgpu_device_load_pci_state(adev->pdev);
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -580,10 +545,13 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
+ dev_info(adev->dev, "BACO reset\n");
return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2:
+ dev_info(adev->dev, "MODE2 reset\n");
return amdgpu_dpm_mode2_reset(adev);
default:
+ dev_info(adev->dev, "MODE1 reset\n");
return soc15_asic_mode1_reset(adev);
}
}
@@ -1026,6 +994,11 @@ static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
return (nak_r + nak_g);
}
+static void soc15_pre_asic_init(struct amdgpu_device *adev)
+{
+ gmc_v9_0_restore_registers(adev);
+}
+
static const struct amdgpu_asic_funcs soc15_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
@@ -1046,6 +1019,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
+ .pre_asic_init = &soc15_pre_asic_init,
};
static const struct amdgpu_asic_funcs vega20_asic_funcs =
@@ -1069,6 +1043,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
+ .pre_asic_init = &soc15_pre_asic_init,
};
static int soc15_common_early_init(void *handle)
@@ -1220,8 +1195,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
AMD_PG_SUPPORT_MMHUB |
- AMD_PG_SUPPORT_VCN |
- AMD_PG_SUPPORT_VCN_DPG;
+ AMD_PG_SUPPORT_VCN;
} else {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1268,7 +1242,15 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
- adev->apu_flags |= AMD_APU_IS_RENOIR;
+ if (adev->pdev->device == 0x1636)
+ adev->apu_flags |= AMD_APU_IS_RENOIR;
+ else
+ adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
+
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ adev->external_rev_id = adev->rev_id + 0x91;
+ else
+ adev->external_rev_id = adev->rev_id + 0xa1;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
@@ -1293,7 +1275,6 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_VCN_DPG;
- adev->external_rev_id = adev->rev_id + 0x91;
break;
default:
/* FIXME: not supported yet */
@@ -1449,7 +1430,8 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
uint32_t def, data;
if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS) {
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_RENOIR) {
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h b/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h
new file mode 100644
index 000000000000..f14833fae07c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_rap_if.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_RAP_IF_H
+#define _TA_RAP_IF_H
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+enum ta_rap_status {
+ TA_RAP_STATUS__SUCCESS = 1,
+ TA_RAP_STATUS__ERROR_GENERIC_FAILURE = 2,
+ TA_RAP_STATUS__ERROR_CMD_NOT_SUPPORTED = 3,
+ TA_RAP_STATUS__ERROR_INVALID_VALIDATION_METHOD = 4,
+ TA_RAP_STATUS__ERROR_NULL_POINTER = 5,
+ TA_RAP_STATUS__ERROR_NOT_INITIALIZED = 6,
+ TA_RAP_STATUS__ERROR_VALIDATION_FAILED = 7,
+ TA_RAP_STATUS__ERROR_ASIC_NOT_SUPPORTED = 8,
+ TA_RAP_STATUS__ERROR_OPERATION_NOT_PERMISSABLE = 9,
+ TA_RAP_STATUS__ERROR_ALREADY_INIT = 10,
+};
+
+enum ta_rap_cmd {
+ TA_CMD_RAP__INITIALIZE = 1,
+ TA_CMD_RAP__VALIDATE_L0 = 2,
+};
+
+enum ta_rap_validation_method {
+ METHOD_A = 1,
+};
+
+struct ta_rap_cmd_input_data {
+ uint8_t reserved[8];
+};
+
+struct ta_rap_cmd_output_data {
+ uint32_t last_subsection;
+ uint32_t num_total_validate;
+ uint32_t num_valid;
+ uint32_t last_validate_addr;
+ uint32_t last_validate_val;
+ uint32_t last_validate_val_exptd;
+};
+
+union ta_rap_cmd_input {
+ struct ta_rap_cmd_input_data input;
+};
+
+union ta_rap_cmd_output {
+ struct ta_rap_cmd_output_data output;
+};
+
+struct ta_rap_shared_memory {
+ uint32_t cmd_id;
+ uint32_t validation_method_id;
+ uint32_t resp_id;
+ enum ta_rap_status rap_status;
+ union ta_rap_cmd_input rap_in_message;
+ union ta_rap_cmd_output rap_out_message;
+ uint8_t reserved[64];
+};
+
+#endif // #define _TA_RAP_IF_H
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 418cf097c918..5288617ca552 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -32,20 +32,6 @@
#define UMC_6_INST_DIST 0x40000
-/*
- * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
- * is the index of 8KB block
- */
-#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
-/* channel index is the index of 256B block */
-#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
-/* offset in 256B block */
-#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
-
-#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++)
-#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
-#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
-
const uint32_t
umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = {
{2, 18, 11, 27}, {4, 20, 13, 29},
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
new file mode 100644
index 000000000000..5665c77a9d58
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v8_7.h"
+#include "amdgpu_ras.h"
+#include "amdgpu.h"
+
+#include "rsmu/rsmu_0_0_2_offset.h"
+#include "rsmu/rsmu_0_0_2_sh_mask.h"
+#include "umc/umc_8_7_0_offset.h"
+#include "umc/umc_8_7_0_sh_mask.h"
+
+#define UMC_8_INST_DIST 0x40000
+
+const uint32_t
+ umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM] = {
+ {2, 11}, {4, 13},
+ {1, 8}, {7, 14},
+ {10, 3}, {12, 5},
+ {9, 0}, {15, 6}
+};
+
+static inline uint32_t get_umc_8_reg_offset(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_offs*ch_inst + UMC_8_INST_DIST*umc_inst;
+}
+
+static void umc_v8_7_clear_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_err_cnt_addr;
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt);
+
+ /* select the lower chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear lower chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V8_7_CE_CNT_INIT);
+
+ /* select the higher chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear higher chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V8_7_CE_CNT_INIT);
+}
+
+static void umc_v8_7_clear_error_count(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v8_7_clear_error_count_per_channel(adev,
+ umc_reg_offset);
+ }
+}
+
+static void umc_v8_7_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+ uint64_t mc_umc_status;
+ uint32_t mc_umc_status_addr;
+
+ /* UMC 8_7_2 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt);
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+
+ /* select the lower chip and check the error count */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) -
+ UMC_V8_7_CE_CNT_INIT);
+
+ /* select the higher chip and check the err counter */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) -
+ UMC_V8_7_CE_CNT_INIT);
+
+ /* check for SRAM correctable error
+ MCUMC_STATUS is a 64 bit register */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ *error_count += 1;
+}
+
+static void umc_v8_7_querry_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ uint32_t mc_umc_status_addr;
+
+ mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+
+ /* check the MCUMC_STATUS */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ *error_count += 1;
+}
+
+static void umc_v8_7_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v8_7_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count));
+ umc_v8_7_querry_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+ }
+
+ umc_v8_7_clear_error_count(adev);
+}
+
+static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ uint32_t umc_reg_offset,
+ uint32_t ch_inst,
+ uint32_t umc_inst)
+{
+ uint32_t lsb, mc_umc_status_addr;
+ uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
+ struct eeprom_table_record *err_rec;
+ uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
+
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+ if (mc_umc_status == 0)
+ return;
+
+ if (!err_data->err_addr) {
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ return;
+ }
+
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
+
+ /* calculate error address if ue/ce error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+ /* the lowest lsb bits should be ignored */
+ lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB);
+ err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ err_addr &= ~((0x1ULL << lsb) - 1);
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1) {
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_inst;
+
+ err_data->err_addr_cnt++;
+ }
+ }
+
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+}
+
+static void umc_v8_7_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v8_7_query_error_address(adev,
+ err_data,
+ umc_reg_offset,
+ ch_inst,
+ umc_inst);
+ }
+}
+
+static void umc_v8_7_err_cnt_init_per_channel(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t ecc_err_cnt_addr;
+
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_GeccErrCnt);
+
+ /* select the lower chip and check the error count */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 0);
+ /* set ce error interrupt type to APIC based interrupt */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
+ GeccErrInt, 0x1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ /* set error count to initial value */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_7_CE_CNT_INIT);
+
+ /* select the higher chip and check the err counter */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
+ GeccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_7_CE_CNT_INIT);
+}
+
+static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_8_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v8_7_err_cnt_init_per_channel(adev, umc_reg_offset);
+ }
+}
+
+const struct amdgpu_umc_funcs umc_v8_7_funcs = {
+ .err_cnt_init = umc_v8_7_err_cnt_init,
+ .ras_late_init = amdgpu_umc_ras_late_init,
+ .query_ras_error_count = umc_v8_7_query_ras_error_count,
+ .query_ras_error_address = umc_v8_7_query_ras_error_address,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
new file mode 100644
index 000000000000..d4d0468e3df5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V8_7_H__
+#define __UMC_V8_7_H__
+
+#include "soc15_common.h"
+#include "amdgpu.h"
+
+/* HBM Memory Channel Width */
+#define UMC_V8_7_HBM_MEMORY_CHANNEL_WIDTH 128
+/* number of umc channel instance with memory map register access */
+#define UMC_V8_7_CHANNEL_INSTANCE_NUM 2
+/* number of umc instance with memory map register access */
+#define UMC_V8_7_UMC_INSTANCE_NUM 8
+/* total channel instances in one umc block */
+#define UMC_V8_7_TOTAL_CHANNEL_NUM (UMC_V8_7_CHANNEL_INSTANCE_NUM * UMC_V8_7_UMC_INSTANCE_NUM)
+/* UMC regiser per channel offset */
+#define UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA 0x400
+
+/* EccErrCnt max value */
+#define UMC_V8_7_CE_CNT_MAX 0xffff
+/* umc ce interrupt threshold */
+#define UMC_V8_7_CE_INT_THRESHOLD 0xffff
+/* umc ce count initial value */
+#define UMC_V8_7_CE_CNT_INIT (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD)
+
+extern const struct amdgpu_umc_funcs umc_v8_7_funcs;
+extern const uint32_t
+ umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM];
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 3cafba726587..b0c0c438fc93 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -348,7 +348,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
/* Set the write pointer delay */
WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
- /* programm the 4GB memory segment for rptr and ring buffer */
+ /* program the 4GB memory segment for rptr and ring buffer */
WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
(0x7 << 16) | (0x1 << 31));
@@ -541,7 +541,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
uint64_t addr;
uint32_t size;
- /* programm the VCPU memory controller bits 0-27 */
+ /* program the VCPU memory controller bits 0-27 */
addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index a566ff926e90..6e57001f6d0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -253,7 +253,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
uint64_t offset;
uint32_t size;
- /* programm memory controller bits 0-27 */
+ /* program memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
@@ -404,7 +404,7 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
/* set the wb address */
WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 0a880bc101b8..666bfa4a0b8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -583,7 +583,7 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
uint64_t offset;
uint32_t size;
- /* programm memory controller bits 0-27 */
+ /* program memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
@@ -825,7 +825,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
/* set the wb address */
WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1240,8 +1240,8 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
break;
}
- if (false == int_handled)
- DRM_ERROR("Unhandled interrupt: %d %d\n",
+ if (!int_handled)
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index e07e3fae99b5..b44c8677ce8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1073,7 +1073,7 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 927c330fad21..86e1ef732ebe 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -54,6 +54,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
static void vcn_v1_0_idle_work_handler(struct work_struct *work);
+static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
/**
* vcn_v1_0_early_init - set function pointers
@@ -910,7 +911,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1068,7 +1069,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1804,11 +1805,24 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
}
}
-void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
- struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+ mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+
+ if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec))
+ DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
+
+ vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
+
+}
+
+void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
+{
+ struct amdgpu_device *adev = ring->adev;
+
if (set_clocks) {
amdgpu_gfx_off_ctrl(adev, false);
if (adev->pm.dpm_enabled)
@@ -1844,6 +1858,12 @@ void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
}
}
+void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
+{
+ schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+ mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
+}
+
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
@@ -1891,7 +1911,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.insert_end = vcn_v1_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = vcn_v1_0_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
+ .end_use = vcn_v1_0_ring_end_use,
.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
@@ -1923,7 +1943,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.insert_end = vcn_v1_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = vcn_v1_0_ring_begin_use,
- .end_use = amdgpu_vcn_ring_end_use,
+ .end_use = vcn_v1_0_ring_end_use,
.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
index f67d7391fc21..1f1cc7f0ece7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
@@ -24,7 +24,8 @@
#ifndef __VCN_V1_0_H__
#define __VCN_V1_0_H__
-void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring);
+void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks);
extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 23a9eb5b2c8a..e5d29dee0c88 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -900,7 +900,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1060,7 +1060,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index e99bef6e2354..0f1d3ef8baa7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -80,23 +80,18 @@ static int vcn_v2_5_early_init(void *handle)
adev->vcn.harvest_config = 0;
adev->vcn.num_enc_rings = 1;
} else {
- if (adev->asic_type == CHIP_ARCTURUS) {
- u32 harvest;
- int i;
-
- adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
- if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
- adev->vcn.harvest_config |= 1 << i;
- }
-
- if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
- AMDGPU_VCN_HARVEST_VCN1))
- /* both instances are harvested, disable the block */
- return -ENOENT;
- } else
- adev->vcn.num_vcn_inst = 1;
+ u32 harvest;
+ int i;
+ adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ adev->vcn.harvest_config |= 1 << i;
+ }
+ if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
+ AMDGPU_VCN_HARVEST_VCN1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
adev->vcn.num_enc_rings = 2;
}
@@ -887,7 +882,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1067,7 +1062,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
- /* programm the RB_BASE for ring buffer */
+ /* program the RB_BASE for ring buffer */
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1108,7 +1103,7 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
{
uint32_t data = 0, loop = 0, size = 0;
uint64_t addr = table->gpu_addr;
- struct mmsch_v1_1_init_header *header = NULL;;
+ struct mmsch_v1_1_init_header *header = NULL;
header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
size = header->total_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 3a805eaf6f11..e074f7ed388c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -198,7 +198,7 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
}
- if (i != 0)
+ if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 0)
ring->no_scheduler = true;
sprintf(ring->name, "vcn_dec_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
@@ -222,7 +222,7 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
}
- if (i != 1)
+ if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 1)
ring->no_scheduler = true;
sprintf(ring->name, "vcn_enc_%d.%d", i, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index f6f2ed0830b1..9bcd0eebc6d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -752,8 +752,10 @@ static int vi_asic_reset(struct amdgpu_device *adev)
int r;
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev);
} else {
+ dev_info(adev->dev, "PCI CONFIG reset\n");
r = vi_asic_pci_config_reset(adev);
}
@@ -1066,6 +1068,10 @@ static bool vi_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+static void vi_pre_asic_init(struct amdgpu_device *adev)
+{
+}
+
static const struct amdgpu_asic_funcs vi_asic_funcs =
{
.read_disabled_bios = &vi_read_disabled_bios,
@@ -1086,6 +1092,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.need_reset_on_init = &vi_need_reset_on_init,
.get_pcie_replay_count = &vi_get_pcie_replay_count,
.supports_baco = &vi_asic_supports_baco,
+ .pre_asic_init = &vi_pre_asic_init,
};
#define CZ_REV_BRISTOL(rev) \
@@ -1507,8 +1514,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_MC,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
@@ -1526,8 +1532,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_SDMA,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
@@ -1545,8 +1550,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_HDP,
pp_support_state,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
@@ -1560,8 +1564,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_LS,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
if (state == AMD_CG_STATE_UNGATE)
@@ -1573,8 +1576,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_CG,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
@@ -1588,8 +1590,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_DRM,
PP_STATE_SUPPORT_LS,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
@@ -1603,8 +1604,7 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_ROM,
PP_STATE_SUPPORT_CG,
pp_state);
- if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
- amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 24b471734117..dcb1d89d776e 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -91,7 +91,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
(const struct cik_ih_ring_entry *)ih_ring_entry;
uint32_t context_id = ihre->data & 0xfffffff;
unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
- unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16;
+ u32 pasid = (ihre->ring_id & 0xffff0000) >> 16;
if (pasid == 0)
return;
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 577d901fdb63..affbca7c0050 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -911,7 +911,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0x705d0000, 0x807c817c,
0x8070ff70, 0x00000080,
0xbf0a7b7c, 0xbf85fff8,
- 0xbf82014f, 0xbef4037e,
+ 0xbf820151, 0xbef4037e,
0x8775ff7f, 0x0000ffff,
0x8875ff75, 0x00040000,
0xbef60380, 0xbef703ff,
@@ -1024,61 +1024,62 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbe883108, 0xbe8a310a,
0xbe8c310c, 0xbe8e310e,
0xbf06807c, 0xbf84fff0,
- 0xb9782a05, 0x80788178,
- 0xbf0d9972, 0xbf850002,
- 0x8f788978, 0xbf820001,
- 0x8f788a78, 0xb96e1e06,
- 0x8f6e8a6e, 0x80786e78,
- 0x8078ff78, 0x00000200,
- 0xbef603ff, 0x01000000,
- 0xf4211bfa, 0xf0000000,
- 0x80788478, 0xf4211b3a,
+ 0xba80f801, 0x00000000,
+ 0xbf8a0000, 0xb9782a05,
+ 0x80788178, 0xbf0d9972,
+ 0xbf850002, 0x8f788978,
+ 0xbf820001, 0x8f788a78,
+ 0xb96e1e06, 0x8f6e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0xbef603ff,
+ 0x01000000, 0xf4211bfa,
0xf0000000, 0x80788478,
- 0xf4211b7a, 0xf0000000,
- 0x80788478, 0xf4211c3a,
+ 0xf4211b3a, 0xf0000000,
+ 0x80788478, 0xf4211b7a,
0xf0000000, 0x80788478,
- 0xf4211c7a, 0xf0000000,
- 0x80788478, 0xf4211eba,
+ 0xf4211c3a, 0xf0000000,
+ 0x80788478, 0xf4211c7a,
0xf0000000, 0x80788478,
- 0xf4211efa, 0xf0000000,
- 0x80788478, 0xf4211e7a,
+ 0xf4211eba, 0xf0000000,
+ 0x80788478, 0xf4211efa,
0xf0000000, 0x80788478,
- 0xf4211cfa, 0xf0000000,
- 0x80788478, 0xf4211bba,
+ 0xf4211e7a, 0xf0000000,
+ 0x80788478, 0xf4211cfa,
0xf0000000, 0x80788478,
- 0xbf8cc07f, 0xb9eef814,
0xf4211bba, 0xf0000000,
0x80788478, 0xbf8cc07f,
- 0xb9eef815, 0xbefc036f,
- 0xbefe0370, 0xbeff0371,
- 0x876f7bff, 0x000003ff,
- 0xb9ef4803, 0xb9f9f816,
- 0x876f7bff, 0xfffff800,
- 0x906f8b6f, 0xb9efa2c3,
- 0xb9f3f801, 0xb96e2a05,
- 0x806e816e, 0xbf0d9972,
- 0xbf850002, 0x8f6e896e,
- 0xbf820001, 0x8f6e8a6e,
- 0x806eff6e, 0x00000200,
- 0x806e746e, 0x826f8075,
- 0x876fff6f, 0x0000ffff,
- 0xf4091c37, 0xfa000050,
- 0xf4091d37, 0xfa000060,
- 0xf4011e77, 0xfa000074,
- 0xbf8cc07f, 0x876fff6d,
- 0xfc000000, 0x906f9a6f,
- 0x8f6f906f, 0xbeee0380,
+ 0xb9eef814, 0xf4211bba,
+ 0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef815,
+ 0xbefc036f, 0xbefe0370,
+ 0xbeff0371, 0x876f7bff,
+ 0x000003ff, 0xb9ef4803,
+ 0xb9f9f816, 0x876f7bff,
+ 0xfffff800, 0x906f8b6f,
+ 0xb9efa2c3, 0xb9f3f801,
+ 0xb96e2a05, 0x806e816e,
+ 0xbf0d9972, 0xbf850002,
+ 0x8f6e896e, 0xbf820001,
+ 0x8f6e8a6e, 0x806eff6e,
+ 0x00000200, 0x806e746e,
+ 0x826f8075, 0x876fff6f,
+ 0x0000ffff, 0xf4091c37,
+ 0xfa000050, 0xf4091d37,
+ 0xfa000060, 0xf4011e77,
+ 0xfa000074, 0xbf8cc07f,
+ 0x876fff6d, 0xfc000000,
+ 0x906f9a6f, 0x8f6f906f,
+ 0xbeee0380, 0x886e6f6e,
+ 0x876fff6d, 0x02000000,
+ 0x906f996f, 0x8f6f8f6f,
0x886e6f6e, 0x876fff6d,
- 0x02000000, 0x906f996f,
- 0x8f6f8f6f, 0x886e6f6e,
- 0x876fff6d, 0x01000000,
- 0x906f986f, 0x8f6f996f,
- 0x886e6f6e, 0x876fff7a,
- 0x00800000, 0x906f976f,
- 0xb9eef807, 0x876dff6d,
- 0x0000ffff, 0x87fe7e7e,
- 0x87ea6a6a, 0xb9faf802,
- 0xbf8a0000, 0xbe80226c,
+ 0x01000000, 0x906f986f,
+ 0x8f6f996f, 0x886e6f6e,
+ 0x876fff7a, 0x00800000,
+ 0x906f976f, 0xb9eef807,
+ 0x876dff6d, 0x0000ffff,
+ 0x87fe7e7e, 0x87ea6a6a,
+ 0xb9faf802, 0xbe80226c,
0xbf810000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
@@ -1807,7 +1808,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x705d0000, 0x807c817c,
0x8070ff70, 0x00000080,
0xbf0a7b7c, 0xbf85fff8,
- 0xbf82013a, 0xbef4037e,
+ 0xbf82013c, 0xbef4037e,
0x8775ff7f, 0x0000ffff,
0x8875ff75, 0x00040000,
0xbef60380, 0xbef703ff,
@@ -1920,50 +1921,51 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbe883108, 0xbe8a310a,
0xbe8c310c, 0xbe8e310e,
0xbf06807c, 0xbf84fff0,
- 0xb9782a05, 0x80788178,
- 0xbf0d9972, 0xbf850002,
- 0x8f788978, 0xbf820001,
- 0x8f788a78, 0xb96e1e06,
- 0x8f6e8a6e, 0x80786e78,
- 0x8078ff78, 0x00000200,
- 0xbef603ff, 0x01000000,
- 0xf4211bfa, 0xf0000000,
- 0x80788478, 0xf4211b3a,
+ 0xba80f801, 0x00000000,
+ 0xbf8a0000, 0xb9782a05,
+ 0x80788178, 0xbf0d9972,
+ 0xbf850002, 0x8f788978,
+ 0xbf820001, 0x8f788a78,
+ 0xb96e1e06, 0x8f6e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0xbef603ff,
+ 0x01000000, 0xf4211bfa,
0xf0000000, 0x80788478,
- 0xf4211b7a, 0xf0000000,
- 0x80788478, 0xf4211c3a,
+ 0xf4211b3a, 0xf0000000,
+ 0x80788478, 0xf4211b7a,
0xf0000000, 0x80788478,
- 0xf4211c7a, 0xf0000000,
- 0x80788478, 0xf4211eba,
+ 0xf4211c3a, 0xf0000000,
+ 0x80788478, 0xf4211c7a,
0xf0000000, 0x80788478,
- 0xf4211efa, 0xf0000000,
- 0x80788478, 0xf4211e7a,
+ 0xf4211eba, 0xf0000000,
+ 0x80788478, 0xf4211efa,
0xf0000000, 0x80788478,
- 0xf4211cfa, 0xf0000000,
- 0x80788478, 0xf4211bba,
+ 0xf4211e7a, 0xf0000000,
+ 0x80788478, 0xf4211cfa,
0xf0000000, 0x80788478,
- 0xbf8cc07f, 0xb9eef814,
0xf4211bba, 0xf0000000,
0x80788478, 0xbf8cc07f,
- 0xb9eef815, 0xbefc036f,
- 0xbefe0370, 0xbeff0371,
- 0x876f7bff, 0x000003ff,
- 0xb9ef4803, 0x876f7bff,
- 0xfffff800, 0x906f8b6f,
- 0xb9efa2c3, 0xb9f3f801,
- 0xb96e2a05, 0x806e816e,
- 0xbf0d9972, 0xbf850002,
- 0x8f6e896e, 0xbf820001,
- 0x8f6e8a6e, 0x806eff6e,
- 0x00000200, 0x806e746e,
- 0x826f8075, 0x876fff6f,
- 0x0000ffff, 0xf4091c37,
- 0xfa000050, 0xf4091d37,
- 0xfa000060, 0xf4011e77,
- 0xfa000074, 0xbf8cc07f,
- 0x876dff6d, 0x0000ffff,
- 0x87fe7e7e, 0x87ea6a6a,
- 0xb9faf802, 0xbf8a0000,
+ 0xb9eef814, 0xf4211bba,
+ 0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef815,
+ 0xbefc036f, 0xbefe0370,
+ 0xbeff0371, 0x876f7bff,
+ 0x000003ff, 0xb9ef4803,
+ 0x876f7bff, 0xfffff800,
+ 0x906f8b6f, 0xb9efa2c3,
+ 0xb9f3f801, 0xb96e2a05,
+ 0x806e816e, 0xbf0d9972,
+ 0xbf850002, 0x8f6e896e,
+ 0xbf820001, 0x8f6e8a6e,
+ 0x806eff6e, 0x00000200,
+ 0x806e746e, 0x826f8075,
+ 0x876fff6f, 0x0000ffff,
+ 0xf4091c37, 0xfa000050,
+ 0xf4091d37, 0xfa000060,
+ 0xf4011e77, 0xfa000074,
+ 0xbf8cc07f, 0x876dff6d,
+ 0x0000ffff, 0x87fe7e7e,
+ 0x87ea6a6a, 0xb9faf802,
0xbe80226c, 0xbf810000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 5b220f2a7501..5081f91190b8 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -894,6 +894,11 @@ L_RESTORE_SGPR:
s_cmp_eq_u32 m0, 0 //scc = (m0 < s_sgpr_save_num) ? 1 : 0
s_cbranch_scc0 L_RESTORE_SGPR_LOOP
+ // s_barrier with MODE.DEBUG_EN=1, STATUS.PRIV=1 incorrectly asserts debug exception.
+ // Clear DEBUG_EN before and restore MODE after the barrier.
+ s_setreg_imm32_b32 hwreg(HW_REG_MODE), 0
+ s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG
+
/* restore HW registers */
L_RESTORE_HWREG:
// HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
@@ -976,8 +981,6 @@ L_RESTORE_HWREG:
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
- s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG
-
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index e9b96ad3d9a5..222f1df1a6b6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -97,6 +97,7 @@ void kfd_chardev_exit(void)
device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
class_destroy(kfd_class);
unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
+ kfd_device = NULL;
}
struct device *kfd_chardev(void)
@@ -1254,7 +1255,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev)
return true;
}
- if (dev->device_info->needs_iommu_device)
+ if (dev->use_iommu_v2)
return false;
amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
@@ -1290,18 +1291,6 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return -EINVAL;
}
- if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
- if (args->size != kfd_doorbell_process_slice(dev))
- return -EINVAL;
- offset = kfd_get_process_doorbells(dev, p);
- } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
- if (args->size != PAGE_SIZE)
- return -EINVAL;
- offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
- if (!offset)
- return -ENOMEM;
- }
-
mutex_lock(&p->mutex);
pdd = kfd_bind_process_to_device(dev, p);
@@ -1310,6 +1299,24 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_unlock;
}
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+ if (args->size != kfd_doorbell_process_slice(dev)) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ offset = kfd_get_process_doorbells(pdd);
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
+ if (args->size != PAGE_SIZE) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
+ if (!offset) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+ }
+
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
dev->kgd, args->va_addr, args->size,
pdd->vm, (struct kgd_mem **) &mem, &offset,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 6a250f8fcfb8..3de5e14c5ae3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -742,6 +742,22 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
return 0;
}
+static bool kfd_ignore_crat(void)
+{
+ bool ret;
+
+ if (ignore_crat)
+ return true;
+
+#ifndef KFD_SUPPORT_IOMMU_V2
+ ret = true;
+#else
+ ret = false;
+#endif
+
+ return ret;
+}
+
/*
* kfd_create_crat_image_acpi - Allocates memory for CRAT image and
* copies CRAT from ACPI (if available).
@@ -776,15 +792,16 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
return -EINVAL;
}
- if (ignore_crat) {
+ if (kfd_ignore_crat()) {
pr_info("CRAT table disabled by module option\n");
return -ENODATA;
}
- pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL);
+ pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
if (!pcrat_image)
return -ENOMEM;
+ memcpy(pcrat_image, crat_table, crat_table->length);
*crat_image = pcrat_image;
*size = crat_table->length;
@@ -793,11 +810,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
/* Memory required to create Virtual CRAT.
* Since there is no easy way to predict the amount of memory required, the
- * following amount are allocated for CPU and GPU Virtual CRAT. This is
+ * following amount is allocated for GPU Virtual CRAT. This is
* expected to cover all known conditions. But to be safe additional check
* is put in the code to ensure we don't overwrite.
*/
-#define VCRAT_SIZE_FOR_CPU (2 * PAGE_SIZE)
#define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE)
/* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
@@ -948,7 +964,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
#endif
int ret = 0;
- if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
+ if (!pcrat_image)
return -EINVAL;
/* Fill in CRAT Header.
@@ -1348,30 +1364,37 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
uint32_t proximity_domain)
{
void *pcrat_image = NULL;
- int ret = 0;
+ int ret = 0, num_nodes;
+ size_t dyn_size;
if (!crat_image)
return -EINVAL;
*crat_image = NULL;
- /* Allocate one VCRAT_SIZE_FOR_CPU for CPU virtual CRAT image and
- * VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image. This should cover
- * all the current conditions. A check is put not to overwrite beyond
- * allocated size
+ /* Allocate the CPU Virtual CRAT size based on the number of online
+ * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
+ * This should cover all the current conditions. A check is put not
+ * to overwrite beyond allocated size for GPUs
*/
switch (flags) {
case COMPUTE_UNIT_CPU:
- pcrat_image = kmalloc(VCRAT_SIZE_FOR_CPU, GFP_KERNEL);
+ num_nodes = num_online_nodes();
+ dyn_size = sizeof(struct crat_header) +
+ num_nodes * (sizeof(struct crat_subtype_computeunit) +
+ sizeof(struct crat_subtype_memory) +
+ (num_nodes - 1) * sizeof(struct crat_subtype_iolink));
+ pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
if (!pcrat_image)
return -ENOMEM;
- *size = VCRAT_SIZE_FOR_CPU;
+ *size = dyn_size;
+ pr_debug("CRAT size is %ld", dyn_size);
ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
break;
case COMPUTE_UNIT_GPU:
if (!kdev)
return -EINVAL;
- pcrat_image = kmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
+ pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
if (!pcrat_image)
return -ENOMEM;
*size = VCRAT_SIZE_FOR_GPU;
@@ -1390,7 +1413,7 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
if (!ret)
*crat_image = pcrat_image;
else
- kfree(pcrat_image);
+ kvfree(pcrat_image);
return ret;
}
@@ -1403,5 +1426,5 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
*/
void kfd_destroy_crat_image(void *crat_image)
{
- kfree(crat_image);
+ kvfree(crat_image);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index 27bcc5b472f6..b258a3dae767 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -45,7 +45,7 @@ static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
}
static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
- unsigned int pasid, uint64_t vmid0_address,
+ u32 pasid, uint64_t vmid0_address,
uint32_t *packet_buff, size_t size_in_bytes)
{
struct pm4__release_mem *rm_packet;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
index a04a1fe1d0d9..f9c6df1fdc5c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
@@ -275,7 +275,7 @@ struct kfd_dbgdev {
};
struct kfd_dbgmgr {
- unsigned int pasid;
+ u32 pasid;
struct kfd_dev *dev;
struct kfd_dbgdev *dbgdev;
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 4bfedaab183f..903170e59342 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -29,6 +29,7 @@
#include "cwsr_trap_handler.h"
#include "kfd_iommu.h"
#include "amdgpu_amdkfd.h"
+#include "kfd_smi_events.h"
#define MQD_SIZE_ALIGNED 768
@@ -115,6 +116,7 @@ static const struct kfd_device_info carrizo_device_info = {
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
+#endif
static const struct kfd_device_info raven_device_info = {
.asic_family = CHIP_RAVEN,
@@ -133,7 +135,6 @@ static const struct kfd_device_info raven_device_info = {
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
-#endif
static const struct kfd_device_info hawaii_device_info = {
.asic_family = CHIP_HAWAII,
@@ -502,8 +503,8 @@ static const struct kfd_device_info *kfd_supported_devices[][2] = {
#ifdef KFD_SUPPORT_IOMMU_V2
[CHIP_KAVERI] = {&kaveri_device_info, NULL},
[CHIP_CARRIZO] = {&carrizo_device_info, NULL},
- [CHIP_RAVEN] = {&raven_device_info, NULL},
#endif
+ [CHIP_RAVEN] = {&raven_device_info, NULL},
[CHIP_HAWAII] = {&hawaii_device_info, NULL},
[CHIP_TONGA] = {&tonga_device_info, NULL},
[CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
@@ -582,6 +583,8 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
atomic_set(&kfd->sram_ecc_flag, 0);
+ ida_init(&kfd->doorbell_ida);
+
return kfd;
}
@@ -711,11 +714,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_doorbell_error;
}
- if (kfd->kfd2kgd->get_hive_id)
- kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
+ kfd->hive_id = amdgpu_amdkfd_get_hive_id(kfd->kgd);
+
+ kfd->unique_id = amdgpu_amdkfd_get_unique_id(kfd->kgd);
- if (kfd->kfd2kgd->get_unique_id)
- kfd->unique_id = kfd->kfd2kgd->get_unique_id(kfd->kgd);
+ kfd->noretry = amdgpu_amdkfd_get_noretry(kfd->kgd);
if (kfd_interrupt_init(kfd)) {
dev_err(kfd_device, "Error initializing interrupts\n");
@@ -737,6 +740,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto gws_error;
}
+ /* If CRAT is broken, won't set iommu enabled */
+ kfd_double_confirm_iommu_support(kfd);
+
if (kfd_iommu_device_init(kfd)) {
dev_err(kfd_device, "Error initializing iommuv2\n");
goto device_iommu_error;
@@ -796,6 +802,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
kfd_doorbell_fini(kfd);
+ ida_destroy(&kfd->doorbell_ida);
kfd_gtt_sa_fini(kfd);
amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
if (kfd->gws)
@@ -810,6 +817,8 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
if (!kfd->init_complete)
return 0;
+ kfd_smi_event_update_gpu_reset(kfd, false);
+
kfd->dqm->ops.pre_reset(kfd->dqm);
kgd2kfd_suspend(kfd, false);
@@ -838,6 +847,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
atomic_set(&kfd->sram_ecc_flag, 0);
+ kfd_smi_event_update_gpu_reset(kfd, true);
+
return 0;
}
@@ -1245,6 +1256,12 @@ void kfd_dec_compute_active(struct kfd_dev *kfd)
WARN_ONCE(count < 0, "Compute profile ref. count error");
}
+void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
+{
+ if (kfd)
+ kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 0f4508b4903e..c0ae04a08625 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -40,7 +40,7 @@
#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
- unsigned int pasid, unsigned int vmid);
+ u32 pasid, unsigned int vmid);
static int execute_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
@@ -153,30 +153,6 @@ static void decrement_queue_count(struct device_queue_manager *dqm,
dqm->active_cp_queue_count--;
}
-int read_sdma_queue_counter(uint64_t q_rptr, uint64_t *val)
-{
- int ret;
- uint64_t tmp = 0;
-
- if (!val)
- return -EINVAL;
- /*
- * SDMA activity counter is stored at queue's RPTR + 0x8 location.
- */
- if (!access_ok((const void __user *)(q_rptr +
- sizeof(uint64_t)), sizeof(uint64_t))) {
- pr_err("Can't access sdma queue activity counter\n");
- return -EFAULT;
- }
-
- ret = get_user(tmp, (uint64_t *)(q_rptr + sizeof(uint64_t)));
- if (!ret) {
- *val = tmp;
- }
-
- return ret;
-}
-
static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
{
struct kfd_dev *dev = qpd->dqm->dev;
@@ -215,9 +191,8 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
}
q->properties.doorbell_off =
- kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
+ kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd),
q->doorbell_id);
-
return 0;
}
@@ -552,7 +527,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
/* Get the SDMA queue stats */
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
(q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
- retval = read_sdma_queue_counter((uint64_t)q->properties.read_ptr,
+ retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
&sdma_val);
if (retval)
pr_err("Failed to read SDMA queue counter for queue: %d\n",
@@ -674,9 +649,10 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID 0x%x queues\n",
+ pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
+ pdd->last_evict_timestamp = get_jiffies_64();
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
*/
@@ -724,7 +700,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID 0x%x queues\n",
+ pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -738,6 +714,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
q->properties.is_active = false;
decrement_queue_count(dqm, q->properties.type);
}
+ pdd->last_evict_timestamp = get_jiffies_64();
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
@@ -756,6 +733,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
uint64_t pd_base;
+ uint64_t eviction_duration;
int retval, ret = 0;
pdd = qpd_to_pdd(qpd);
@@ -770,7 +748,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID 0x%x queues\n",
+ pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -823,6 +801,8 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
ret = retval;
}
qpd->evicted = 0;
+ eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
+ atomic64_add(eviction_duration, &pdd->evict_duration_counter);
out:
if (mm)
mmput(mm);
@@ -836,6 +816,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
struct queue *q;
struct kfd_process_device *pdd;
uint64_t pd_base;
+ uint64_t eviction_duration;
int retval = 0;
pdd = qpd_to_pdd(qpd);
@@ -850,7 +831,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID 0x%x queues\n",
+ pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -869,6 +850,8 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
qpd->evicted = 0;
+ eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
+ atomic64_add(eviction_duration, &pdd->evict_duration_counter);
out:
dqm_unlock(dqm);
return retval;
@@ -948,7 +931,7 @@ out:
}
static int
-set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
+set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
unsigned int vmid)
{
return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
@@ -1475,7 +1458,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
/* Get the SDMA queue stats */
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
(q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
- retval = read_sdma_queue_counter((uint64_t)q->properties.read_ptr,
+ retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
&sdma_val);
if (retval)
pr_err("Failed to read SDMA queue counter for queue: %d\n",
@@ -1981,8 +1964,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
kfree(dqm);
}
-int kfd_process_vm_fault(struct device_queue_manager *dqm,
- unsigned int pasid)
+int kfd_process_vm_fault(struct device_queue_manager *dqm, u32 pasid)
{
struct kfd_process_device *pdd;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@@ -1990,6 +1972,7 @@ int kfd_process_vm_fault(struct device_queue_manager *dqm,
if (!p)
return -EINVAL;
+ WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
pdd = kfd_get_process_device_data(dqm->dev, p);
if (pdd)
ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 49d8e324c636..16262e5d93f5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -251,5 +251,11 @@ static inline void dqm_unlock(struct device_queue_manager *dqm)
mutex_unlock(&dqm->lock_hidden);
}
-int read_sdma_queue_counter(uint64_t q_rptr, uint64_t *val);
+static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val)
+{
+ /*
+ * SDMA activity counter is stored at queue's RPTR + 0x8 location.
+ */
+ return get_user(*val, q_rptr + 1);
+}
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
index 72e4d61ac752..ad0593342333 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
@@ -58,8 +58,9 @@ static int update_qpd_v10(struct device_queue_manager *dqm,
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
qpd->sh_mem_config =
- SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+ (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
#if 0
/* TODO:
* This shouldn't be an issue with Navi10. Verify.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 95a82ac455f2..eca6331efa94 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -61,8 +61,8 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config =
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (amdgpu_noretry &&
- !dqm->dev->device_info->needs_iommu_device)
+ if (dqm->dev->noretry &&
+ !dqm->dev->use_iommu_v2)
qpd->sh_mem_config |=
1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 8e0c00b9555e..768d153acff4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -31,9 +31,6 @@
* kernel queues using the first doorbell page reserved for the kernel.
*/
-static DEFINE_IDA(doorbell_ida);
-static unsigned int max_doorbell_slices;
-
/*
* Each device exposes a doorbell aperture, a PCI MMIO aperture that
* receives 32-bit writes that are passed to queues as wptr values.
@@ -84,9 +81,9 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
else
return -ENOSPC;
- if (!max_doorbell_slices ||
- doorbell_process_limit < max_doorbell_slices)
- max_doorbell_slices = doorbell_process_limit;
+ if (!kfd->max_doorbell_slices ||
+ doorbell_process_limit < kfd->max_doorbell_slices)
+ kfd->max_doorbell_slices = doorbell_process_limit;
kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
doorbell_start_offset;
@@ -130,6 +127,7 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
phys_addr_t address;
+ struct kfd_process_device *pdd;
/*
* For simplicitly we only allow mapping of the entire doorbell
@@ -138,9 +136,12 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev))
return -EINVAL;
- /* Calculate physical address of doorbell */
- address = kfd_get_process_doorbells(dev, process);
+ pdd = kfd_get_process_device_data(dev, process);
+ if (!pdd)
+ return -EINVAL;
+ /* Calculate physical address of doorbell */
+ address = kfd_get_process_doorbells(pdd);
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP;
@@ -226,7 +227,7 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
}
unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
- struct kfd_process *process,
+ struct kfd_process_device *pdd,
unsigned int doorbell_id)
{
/*
@@ -236,7 +237,7 @@ unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
* units regardless of the ASIC-dependent doorbell size.
*/
return kfd->doorbell_base_dw_offset +
- process->doorbell_index
+ pdd->doorbell_index
* kfd_doorbell_process_slice(kfd) / sizeof(u32) +
doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
}
@@ -251,25 +252,24 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
}
-phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
- struct kfd_process *process)
+phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
{
- return dev->doorbell_base +
- process->doorbell_index * kfd_doorbell_process_slice(dev);
+ return pdd->dev->doorbell_base +
+ pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev);
}
-int kfd_alloc_process_doorbells(struct kfd_process *process)
+int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_index)
{
- int r = ida_simple_get(&doorbell_ida, 1, max_doorbell_slices,
+ int r = ida_simple_get(&kfd->doorbell_ida, 1, kfd->max_doorbell_slices,
GFP_KERNEL);
if (r > 0)
- process->doorbell_index = r;
+ *doorbell_index = r;
return r;
}
-void kfd_free_process_doorbells(struct kfd_process *process)
+void kfd_free_process_doorbells(struct kfd_dev *kfd, unsigned int doorbell_index)
{
- if (process->doorbell_index)
- ida_simple_remove(&doorbell_ida, process->doorbell_index);
+ if (doorbell_index)
+ ida_simple_remove(&kfd->doorbell_ida, doorbell_index);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index a9583b95fcc1..ba2c2ce0c55a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -460,7 +460,7 @@ static void set_event_from_interrupt(struct kfd_process *p,
}
}
-void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
+void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint32_t valid_id_bits)
{
struct kfd_event *ev = NULL;
@@ -872,7 +872,7 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
}
#ifdef KFD_SUPPORT_IOMMU_V2
-void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
+void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid,
unsigned long address, bool is_write_requested,
bool is_execute_requested)
{
@@ -950,7 +950,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
}
#endif /* KFD_SUPPORT_IOMMU_V2 */
-void kfd_signal_hw_exception_event(unsigned int pasid)
+void kfd_signal_hw_exception_event(u32 pasid)
{
/*
* Because we are called from arbitrary context (workqueue) as opposed
@@ -971,7 +971,7 @@ void kfd_signal_hw_exception_event(unsigned int pasid)
kfd_unref_process(p);
}
-void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
struct kfd_vm_fault_info *info)
{
struct kfd_event *ev;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
index c7ac6c73af86..c8fe5dbdad55 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -79,7 +79,7 @@ struct kfd_event {
#define KFD_EVENT_TYPE_DEBUG 5
#define KFD_EVENT_TYPE_MEMORY 8
-extern void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
- uint32_t valid_id_bits);
+extern void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
+ uint32_t valid_id_bits);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index c1166c40ac15..3c22909470f2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -321,7 +321,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
pdd->lds_base = MAKE_LDS_APP_BASE_VI();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
- if (!pdd->dev->device_info->needs_iommu_device) {
+ if (!pdd->dev->use_iommu_v2) {
/* dGPUs: SVM aperture starting at 0
* with small reserved space for kernel.
* Set them to CANONICAL addresses.
@@ -425,7 +425,7 @@ int kfd_init_apertures(struct kfd_process *process)
return -EINVAL;
}
- if (!dev->device_info->needs_iommu_device) {
+ if (!dev->use_iommu_v2) {
/* dGPUs: the reserved space for kernel
* before SVM
*/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 7c8786b9eb0a..66bbca61e3ef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -41,7 +41,7 @@ int kfd_iommu_check_device(struct kfd_dev *kfd)
struct amd_iommu_device_info iommu_info;
int err;
- if (!kfd->device_info->needs_iommu_device)
+ if (!kfd->use_iommu_v2)
return -ENODEV;
iommu_info.flags = 0;
@@ -63,7 +63,7 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
unsigned int pasid_limit;
int err;
- if (!kfd->device_info->needs_iommu_device)
+ if (!kfd->use_iommu_v2)
return 0;
iommu_info.flags = 0;
@@ -109,7 +109,7 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
struct kfd_process *p = pdd->process;
int err;
- if (!dev->device_info->needs_iommu_device || pdd->bound == PDD_BOUND)
+ if (!dev->use_iommu_v2 || pdd->bound == PDD_BOUND)
return 0;
if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
@@ -139,7 +139,7 @@ void kfd_iommu_unbind_process(struct kfd_process *p)
}
/* Callback for process shutdown invoked by the IOMMU driver */
-static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
+static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid)
{
struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
struct kfd_process *p;
@@ -185,8 +185,8 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
}
/* This function called by IOMMU driver on PPR failure */
-static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
- unsigned long address, u16 flags)
+static int iommu_invalid_ppr_cb(struct pci_dev *pdev, u32 pasid,
+ unsigned long address, u16 flags)
{
struct kfd_dev *dev;
@@ -284,7 +284,7 @@ static void kfd_unbind_processes_from_device(struct kfd_dev *kfd)
*/
void kfd_iommu_suspend(struct kfd_dev *kfd)
{
- if (!kfd->device_info->needs_iommu_device)
+ if (!kfd->use_iommu_v2)
return;
kfd_unbind_processes_from_device(kfd);
@@ -304,7 +304,7 @@ int kfd_iommu_resume(struct kfd_dev *kfd)
unsigned int pasid_limit;
int err;
- if (!kfd->device_info->needs_iommu_device)
+ if (!kfd->use_iommu_v2)
return 0;
pasid_limit = kfd_get_pasid_limit();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index f4b7f7e6c40e..5e90fe642192 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -70,6 +70,7 @@ err_create_wq:
err_topology:
kfd_chardev_exit();
err_ioctl:
+ pr_err("KFD is disabled due to module initialization failure\n");
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 2a07c4f2cd0d..af5816f51e55 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -51,7 +51,7 @@ unsigned int kfd_get_pasid_limit(void)
return 1U << pasid_bits;
}
-unsigned int kfd_pasid_alloc(void)
+u32 kfd_pasid_alloc(void)
{
int r = amdgpu_pasid_alloc(pasid_bits);
@@ -63,7 +63,7 @@ unsigned int kfd_pasid_alloc(void)
return 0;
}
-void kfd_pasid_free(unsigned int pasid)
+void kfd_pasid_free(u32 pasid)
{
amdgpu_pasid_free(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 6727e9de5b8b..c77cf23032ac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -297,6 +297,9 @@ struct kfd_dev {
bool pci_atomic_requested;
+ /* Use IOMMU v2 flag */
+ bool use_iommu_v2;
+
/* SRAM ECC flag */
atomic_t sram_ecc_flag;
@@ -309,6 +312,13 @@ struct kfd_dev {
/* Clients watching SMI events */
struct list_head smi_clients;
spinlock_t smi_lock;
+
+ uint32_t reset_seq_num;
+
+ struct ida doorbell_ida;
+ unsigned int max_doorbell_slices;
+
+ int noretry;
};
enum kfd_mempool {
@@ -626,7 +636,7 @@ enum kfd_pdd_bound {
PDD_BOUND_SUSPENDED,
};
-#define MAX_SYSFS_FILENAME_LEN 11
+#define MAX_SYSFS_FILENAME_LEN 15
/*
* SDMA counter runs at 100MHz frequency.
@@ -687,6 +697,39 @@ struct kfd_process_device {
uint64_t sdma_past_activity_counter;
struct attribute attr_sdma;
char sdma_filename[MAX_SYSFS_FILENAME_LEN];
+
+ /* Eviction activity tracking */
+ uint64_t last_evict_timestamp;
+ atomic64_t evict_duration_counter;
+ struct attribute attr_evict;
+
+ struct kobject *kobj_stats;
+ unsigned int doorbell_index;
+
+ /*
+ * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process
+ * that is associated with device encoded by "this" struct instance. The
+ * value reflects CU usage by all of the waves launched by this process
+ * on this device. A very important property of occupancy parameter is
+ * that its value is a snapshot of current use.
+ *
+ * Following is to be noted regarding how this parameter is reported:
+ *
+ * The number of waves that a CU can launch is limited by couple of
+ * parameters. These are encoded by struct amdgpu_cu_info instance
+ * that is part of every device definition. For GFX9 devices this
+ * translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves
+ * do not use scratch memory and 32 waves (max_scratch_slots_per_cu)
+ * when they do use scratch memory. This could change for future
+ * devices and therefore this example should be considered as a guide.
+ *
+ * All CU's of a device are available for the process. This may not be true
+ * under certain conditions - e.g. CU masking.
+ *
+ * Finally number of CU's that are occupied by a process is affected by both
+ * number of CU's a device has along with number of other competing processes
+ */
+ struct attribute attr_cu_occupancy;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -723,8 +766,7 @@ struct kfd_process {
/* We want to receive a notification when the mm_struct is destroyed */
struct mmu_notifier mmu_notifier;
- uint16_t pasid;
- unsigned int doorbell_index;
+ u32 pasid;
/*
* List of kfd_process_device structures,
@@ -800,7 +842,7 @@ int kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
struct kfd_process *kfd_create_process(struct file *filep);
struct kfd_process *kfd_get_process(const struct task_struct *);
-struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
+struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
void kfd_unref_process(struct kfd_process *p);
int kfd_process_evict_queues(struct kfd_process *p);
@@ -841,8 +883,8 @@ int kfd_pasid_init(void);
void kfd_pasid_exit(void);
bool kfd_set_pasid_limit(unsigned int new_limit);
unsigned int kfd_get_pasid_limit(void);
-unsigned int kfd_pasid_alloc(void);
-void kfd_pasid_free(unsigned int pasid);
+u32 kfd_pasid_alloc(void);
+void kfd_pasid_free(u32 pasid);
/* Doorbells */
size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
@@ -857,13 +899,13 @@ u32 read_kernel_doorbell(u32 __iomem *db);
void write_kernel_doorbell(void __iomem *db, u32 value);
void write_kernel_doorbell64(void __iomem *db, u64 value);
unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
- struct kfd_process *process,
+ struct kfd_process_device *pdd,
unsigned int doorbell_id);
-phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
- struct kfd_process *process);
-int kfd_alloc_process_doorbells(struct kfd_process *process);
-void kfd_free_process_doorbells(struct kfd_process *process);
-
+phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd);
+int kfd_alloc_process_doorbells(struct kfd_dev *kfd,
+ unsigned int *doorbell_index);
+void kfd_free_process_doorbells(struct kfd_dev *kfd,
+ unsigned int doorbell_index);
/* GTT Sub-Allocator */
int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
@@ -892,6 +934,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
int kfd_numa_node_to_apic_id(int numa_node_id);
+void kfd_double_confirm_iommu_support(struct kfd_dev *gpu);
/* Interrupts */
int kfd_interrupt_init(struct kfd_dev *dev);
@@ -927,7 +970,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
-int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
+int kfd_process_vm_fault(struct device_queue_manager *dqm, u32 pasid);
/* Process Queue Manager */
struct process_queue_node {
@@ -1049,12 +1092,12 @@ int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
bool all, uint32_t user_timeout_ms,
uint32_t *wait_result);
-void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
+void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint32_t valid_id_bits);
void kfd_signal_iommu_event(struct kfd_dev *dev,
- unsigned int pasid, unsigned long address,
- bool is_write_requested, bool is_execute_requested);
-void kfd_signal_hw_exception_event(unsigned int pasid);
+ u32 pasid, unsigned long address,
+ bool is_write_requested, bool is_execute_requested);
+void kfd_signal_hw_exception_event(u32 pasid);
int kfd_set_event(struct kfd_process *p, uint32_t event_id);
int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
@@ -1065,7 +1108,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
uint64_t *event_page_offset, uint32_t *event_slot_index);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
-void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
struct kfd_vm_fault_info *info);
void kfd_signal_reset_event(struct kfd_dev *dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 40695d52e9a8..65803e153a22 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -87,7 +87,7 @@ struct kfd_sdma_activity_handler_workarea {
};
struct temp_sdma_queue_list {
- uint64_t rptr;
+ uint64_t __user *rptr;
uint64_t sdma_val;
unsigned int queue_id;
struct list_head list;
@@ -159,7 +159,7 @@ static void kfd_sdma_activity_worker(struct work_struct *work)
}
INIT_LIST_HEAD(&sdma_q->list);
- sdma_q->rptr = (uint64_t)q->properties.read_ptr;
+ sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
sdma_q->queue_id = q->properties.queue_id;
list_add_tail(&sdma_q->list, &sdma_q_list.list);
}
@@ -218,7 +218,7 @@ static void kfd_sdma_activity_worker(struct work_struct *work)
continue;
list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
- if (((uint64_t)q->properties.read_ptr == sdma_q->rptr) &&
+ if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
(sdma_q->queue_id == q->properties.queue_id)) {
list_del(&sdma_q->list);
kfree(sdma_q);
@@ -249,6 +249,52 @@ cleanup:
}
}
+/**
+ * @kfd_get_cu_occupancy() - Collect number of waves in-flight on this device
+ * by current process. Translates acquired wave count into number of compute units
+ * that are occupied.
+ *
+ * @atr: Handle of attribute that allows reporting of wave count. The attribute
+ * handle encapsulates GPU device it is associated with, thereby allowing collection
+ * of waves in flight, etc
+ *
+ * @buffer: Handle of user provided buffer updated with wave count
+ *
+ * Return: Number of bytes written to user buffer or an error value
+ */
+static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
+{
+ int cu_cnt;
+ int wave_cnt;
+ int max_waves_per_cu;
+ struct kfd_dev *dev = NULL;
+ struct kfd_process *proc = NULL;
+ struct kfd_process_device *pdd = NULL;
+
+ pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
+ dev = pdd->dev;
+ if (dev->kfd2kgd->get_cu_occupancy == NULL)
+ return -EINVAL;
+
+ cu_cnt = 0;
+ proc = pdd->process;
+ if (pdd->qpd.queue_count == 0) {
+ pr_debug("Gpu-Id: %d has no active queues for process %d\n",
+ dev->id, proc->pasid);
+ return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
+ }
+
+ /* Collect wave count from device if it supports */
+ wave_cnt = 0;
+ max_waves_per_cu = 0;
+ dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt,
+ &max_waves_per_cu);
+
+ /* Translate wave count to number of compute units */
+ cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
+ return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
+}
+
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
@@ -270,6 +316,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
kfd_sdma_activity_worker);
sdma_activity_work_handler.pdd = pdd;
+ sdma_activity_work_handler.sdma_activity_counter = 0;
schedule_work(&sdma_activity_work_handler.sdma_activity_work);
@@ -344,6 +391,32 @@ static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
return 0;
}
+static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ if (strcmp(attr->name, "evicted_ms") == 0) {
+ struct kfd_process_device *pdd = container_of(attr,
+ struct kfd_process_device,
+ attr_evict);
+ uint64_t evict_jiffies;
+
+ evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
+
+ return snprintf(buffer,
+ PAGE_SIZE,
+ "%llu\n",
+ jiffies64_to_msecs(evict_jiffies));
+
+ /* Sysfs handle that gets CU occupancy is per device */
+ } else if (strcmp(attr->name, "cu_occupancy") == 0) {
+ return kfd_get_cu_occupancy(attr, buffer);
+ } else {
+ pr_err("Invalid attribute");
+ }
+
+ return 0;
+}
+
static struct attribute attr_queue_size = {
.name = "size",
.mode = KFD_SYSFS_FILE_MODE
@@ -375,6 +448,19 @@ static struct kobj_type procfs_queue_type = {
.default_attrs = procfs_queue_attrs,
};
+static const struct sysfs_ops procfs_stats_ops = {
+ .show = kfd_procfs_stats_show,
+};
+
+static struct attribute *procfs_stats_attrs[] = {
+ NULL
+};
+
+static struct kobj_type procfs_stats_type = {
+ .sysfs_ops = &procfs_stats_ops,
+ .default_attrs = procfs_stats_attrs,
+};
+
int kfd_procfs_add_queue(struct queue *q)
{
struct kfd_process *proc;
@@ -416,6 +502,72 @@ static int kfd_sysfs_create_file(struct kfd_process *p, struct attribute *attr,
return ret;
}
+static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
+{
+ int ret = 0;
+ struct kfd_process_device *pdd;
+ char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
+
+ if (!p)
+ return -EINVAL;
+
+ if (!p->kobj)
+ return -EFAULT;
+
+ /*
+ * Create sysfs files for each GPU:
+ * - proc/<pid>/stats_<gpuid>/
+ * - proc/<pid>/stats_<gpuid>/evicted_ms
+ * - proc/<pid>/stats_<gpuid>/cu_occupancy
+ */
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ struct kobject *kobj_stats;
+
+ snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
+ "stats_%u", pdd->dev->id);
+ kobj_stats = kfd_alloc_struct(kobj_stats);
+ if (!kobj_stats)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(kobj_stats,
+ &procfs_stats_type,
+ p->kobj,
+ stats_dir_filename);
+
+ if (ret) {
+ pr_warn("Creating KFD proc/stats_%s folder failed",
+ stats_dir_filename);
+ kobject_put(kobj_stats);
+ goto err;
+ }
+
+ pdd->kobj_stats = kobj_stats;
+ pdd->attr_evict.name = "evicted_ms";
+ pdd->attr_evict.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&pdd->attr_evict);
+ ret = sysfs_create_file(kobj_stats, &pdd->attr_evict);
+ if (ret)
+ pr_warn("Creating eviction stats for gpuid %d failed",
+ (int)pdd->dev->id);
+
+ /* Add sysfs file to report compute unit occupancy */
+ if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) {
+ pdd->attr_cu_occupancy.name = "cu_occupancy";
+ pdd->attr_cu_occupancy.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&pdd->attr_cu_occupancy);
+ ret = sysfs_create_file(kobj_stats,
+ &pdd->attr_cu_occupancy);
+ if (ret)
+ pr_warn("Creating %s failed for gpuid: %d",
+ pdd->attr_cu_occupancy.name,
+ (int)pdd->dev->id);
+ }
+ }
+err:
+ return ret;
+}
+
+
static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
{
int ret = 0;
@@ -451,7 +603,6 @@ static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
return ret;
}
-
void kfd_procfs_del_queue(struct queue *q)
{
if (!q)
@@ -659,6 +810,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (!process->kobj_queues)
pr_warn("Creating KFD proc/queues folder failed");
+ ret = kfd_procfs_add_sysfs_stats(process);
+ if (ret)
+ pr_warn("Creating sysfs stats dir for pid %d failed",
+ (int)process->lead_thread->pid);
+
ret = kfd_procfs_add_sysfs_files(process);
if (ret)
pr_warn("Creating sysfs usage file for pid %d failed",
@@ -780,6 +936,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
kfree(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr);
+ kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
+
/*
* before destroying pdd, make sure to report availability
* for auto suspend
@@ -815,6 +973,12 @@ static void kfd_process_wq_release(struct work_struct *work)
list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
sysfs_remove_file(p->kobj, &pdd->attr_vram);
sysfs_remove_file(p->kobj, &pdd->attr_sdma);
+ sysfs_remove_file(p->kobj, &pdd->attr_evict);
+ if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL)
+ sysfs_remove_file(p->kobj, &pdd->attr_cu_occupancy);
+ kobject_del(pdd->kobj_stats);
+ kobject_put(pdd->kobj_stats);
+ pdd->kobj_stats = NULL;
}
kobject_del(p->kobj);
@@ -832,8 +996,6 @@ static void kfd_process_wq_release(struct work_struct *work)
kfd_event_free_process(p);
kfd_pasid_free(p->pasid);
- kfd_free_process_doorbells(p);
-
mutex_destroy(&p->mutex);
put_task_struct(p->lead_thread);
@@ -1011,9 +1173,6 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (process->pasid == 0)
goto err_alloc_pasid;
- if (kfd_alloc_process_doorbells(process) < 0)
- goto err_alloc_doorbells;
-
err = pqm_init(&process->pqm, process);
if (err != 0)
goto err_process_pqm_init;
@@ -1041,8 +1200,6 @@ err_register_notifier:
err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
- kfd_free_process_doorbells(process);
-err_alloc_doorbells:
kfd_pasid_free(process->pasid);
err_alloc_pasid:
mutex_destroy(&process->mutex);
@@ -1105,10 +1262,14 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
if (!pdd)
return NULL;
+ if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
+ pr_err("Failed to alloc doorbell for pdd\n");
+ goto err_free_pdd;
+ }
+
if (init_doorbell_bitmap(&pdd->qpd, dev)) {
pr_err("Failed to init doorbell for process\n");
- kfree(pdd);
- return NULL;
+ goto err_free_pdd;
}
pdd->dev = dev;
@@ -1124,12 +1285,17 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->runtime_inuse = false;
pdd->vram_usage = 0;
pdd->sdma_past_activity_counter = 0;
+ atomic64_set(&pdd->evict_duration_counter, 0);
list_add(&pdd->per_device_list, &p->per_device_data);
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
return pdd;
+
+err_free_pdd:
+ kfree(pdd);
+ return NULL;
}
/**
@@ -1306,7 +1472,7 @@ void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
}
/* This increments the process->ref counter. */
-struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
+struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
{
struct kfd_process *p, *ret_p = NULL;
unsigned int temp;
@@ -1487,6 +1653,7 @@ void kfd_suspend_all_processes(void)
unsigned int temp;
int idx = srcu_read_lock(&kfd_processes_srcu);
+ WARN(debug_evictions, "Evicting all processes");
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 7b348bf9df21..17d1736367ea 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -24,6 +24,7 @@
#include <linux/wait.h>
#include <linux/anon_inodes.h>
#include <uapi/linux/kfd_ioctl.h>
+#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "kfd_priv.h"
#include "kfd_smi_events.h"
@@ -148,15 +149,94 @@ static int kfd_smi_ev_release(struct inode *inode, struct file *filep)
return 0;
}
+static void add_event_to_kfifo(struct kfd_dev *dev, unsigned int smi_event,
+ char *event_msg, int len)
+{
+ struct kfd_smi_client *client;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(client, &dev->smi_clients, list) {
+ if (!(READ_ONCE(client->events) &
+ KFD_SMI_EVENT_MASK_FROM_INDEX(smi_event)))
+ continue;
+ spin_lock(&client->lock);
+ if (kfifo_avail(&client->fifo) >= len) {
+ kfifo_in(&client->fifo, event_msg, len);
+ wake_up_all(&client->wait_queue);
+ } else {
+ pr_debug("smi_event(EventID: %u): no space left\n",
+ smi_event);
+ }
+ spin_unlock(&client->lock);
+ }
+
+ rcu_read_unlock();
+}
+
+void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset)
+{
+ /*
+ * GpuReset msg = Reset seq number (incremented for
+ * every reset message sent before GPU reset).
+ * 1 byte event + 1 byte space + 8 bytes seq num +
+ * 1 byte \n + 1 byte \0 = 12
+ */
+ char fifo_in[12];
+ int len;
+ unsigned int event;
+
+ if (list_empty(&dev->smi_clients))
+ return;
+
+ memset(fifo_in, 0x0, sizeof(fifo_in));
+
+ if (post_reset) {
+ event = KFD_SMI_EVENT_GPU_POST_RESET;
+ } else {
+ event = KFD_SMI_EVENT_GPU_PRE_RESET;
+ ++(dev->reset_seq_num);
+ }
+
+ len = snprintf(fifo_in, sizeof(fifo_in), "%x %x\n", event,
+ dev->reset_seq_num);
+
+ add_event_to_kfifo(dev, event, fifo_in, len);
+}
+
+void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
+ uint32_t throttle_bitmask)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
+ /*
+ * ThermalThrottle msg = throttle_bitmask(8):
+ * thermal_interrupt_count(16):
+ * 1 byte event + 1 byte space + 8 byte throttle_bitmask +
+ * 1 byte : + 16 byte thermal_interupt_counter + 1 byte \n +
+ * 1 byte \0 = 29
+ */
+ char fifo_in[29];
+ int len;
+
+ if (list_empty(&dev->smi_clients))
+ return;
+
+ len = snprintf(fifo_in, sizeof(fifo_in), "%x %x:%llx\n",
+ KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask,
+ atomic64_read(&adev->smu.throttle_int_counter));
+
+ add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len);
+}
+
void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd;
struct amdgpu_task_info task_info;
/* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */
- /* 16 bytes event + 1 byte space + 25 bytes msg + 1 byte \n = 43
+ /* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n +
+ * 1 byte \0 = 29
*/
- char fifo_in[43];
- struct kfd_smi_client *client;
+ char fifo_in[29];
int len;
if (list_empty(&dev->smi_clients))
@@ -168,25 +248,10 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid)
if (!task_info.pid)
return;
- len = snprintf(fifo_in, 43, "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT,
+ len = snprintf(fifo_in, sizeof(fifo_in), "%x %x:%s\n", KFD_SMI_EVENT_VMFAULT,
task_info.pid, task_info.task_name);
- rcu_read_lock();
-
- list_for_each_entry_rcu(client, &dev->smi_clients, list) {
- if (!(READ_ONCE(client->events) & KFD_SMI_EVENT_VMFAULT))
- continue;
- spin_lock(&client->lock);
- if (kfifo_avail(&client->fifo) >= len) {
- kfifo_in(&client->fifo, fifo_in, len);
- wake_up_all(&client->wait_queue);
- }
- else
- pr_debug("smi_event(vmfault): no space left\n");
- spin_unlock(&client->lock);
- }
-
- rcu_read_unlock();
+ add_event_to_kfifo(dev, KFD_SMI_EVENT_VMFAULT, fifo_in, len);
}
int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
index a9cb218fef96..b9b0438202e2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
@@ -25,5 +25,8 @@
int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd);
void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid);
+void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
+ uint32_t throttle_bitmask);
+void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index f185f6cbc05c..2b31c3066aaa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -446,7 +446,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, offs, "simd_count",
- dev->node_props.simd_count);
+ dev->gpu ? dev->node_props.simd_count : 0);
sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
dev->node_props.mem_banks_count);
sysfs_show_32bit_prop(buffer, offs, "caches_count",
@@ -1139,7 +1139,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
/* Discrete GPUs need their own topology device list
* entries. Don't assign them to CPU/APU nodes.
*/
- if (!gpu->device_info->needs_iommu_device &&
+ if (!gpu->use_iommu_v2 &&
dev->node_props.cpu_cores_count)
continue;
@@ -1239,7 +1239,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
void *crat_image = NULL;
size_t image_size = 0;
int proximity_domain;
- struct amdgpu_ras *ctx;
+ struct amdgpu_device *adev;
INIT_LIST_HEAD(&temp_topology_device_list);
@@ -1388,7 +1388,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* Overwrite ATS capability according to needs_iommu_device to fix
* potential missing corresponding bit in CRAT of BIOS.
*/
- if (dev->gpu->device_info->needs_iommu_device)
+ if (dev->gpu->use_iommu_v2)
dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
else
dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
@@ -1404,19 +1404,17 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.max_waves_per_simd = 10;
}
- ctx = amdgpu_ras_get_context((struct amdgpu_device *)(dev->gpu->kgd));
- if (ctx) {
- /* kfd only concerns sram ecc on GFX/SDMA and HBM ecc on UMC */
- dev->node_props.capability |=
- (((ctx->features & BIT(AMDGPU_RAS_BLOCK__SDMA)) != 0) ||
- ((ctx->features & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0)) ?
- HSA_CAP_SRAM_EDCSUPPORTED : 0;
- dev->node_props.capability |= ((ctx->features & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
- HSA_CAP_MEM_EDCSUPPORTED : 0;
-
- dev->node_props.capability |= (ctx->features != 0) ?
+ adev = (struct amdgpu_device *)(dev->gpu->kgd);
+ /* kfd only concerns sram ecc on GFX and HBM ecc on UMC */
+ dev->node_props.capability |=
+ ((adev->ras_features & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
+ HSA_CAP_SRAM_EDCSUPPORTED : 0;
+ dev->node_props.capability |= ((adev->ras_features & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
+ HSA_CAP_MEM_EDCSUPPORTED : 0;
+
+ if (adev->asic_type != CHIP_VEGA10)
+ dev->node_props.capability |= (adev->ras_features != 0) ?
HSA_CAP_RASEVENTNOTIFY : 0;
- }
kfd_debug_print_topology();
@@ -1515,6 +1513,29 @@ int kfd_numa_node_to_apic_id(int numa_node_id)
return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id));
}
+void kfd_double_confirm_iommu_support(struct kfd_dev *gpu)
+{
+ struct kfd_topology_device *dev;
+
+ gpu->use_iommu_v2 = false;
+
+ if (!gpu->device_info->needs_iommu_device)
+ return;
+
+ down_read(&topology_lock);
+
+ /* Only use IOMMUv2 if there is an APU topology node with no GPU
+ * assigned yet. This GPU will be assigned to it.
+ */
+ list_for_each_entry(dev, &topology_device_list, list)
+ if (dev->node_props.cpu_cores_count &&
+ dev->node_props.simd_count &&
+ !dev->gpu)
+ gpu->use_iommu_v2 = true;
+
+ up_read(&topology_lock);
+}
+
#if defined(CONFIG_DEBUG_FS)
int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 34ae4f3a32f4..60dfdd432aba 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,7 +6,7 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ select DRM_AMD_DC_DCN if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON)) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
@@ -31,9 +31,18 @@ config DRM_AMD_DC_HDCP
help
Choose this option if you want to support HDCP authentication.
+config DRM_AMD_DC_SI
+ bool "AMD DC support for Southern Islands ASICs"
+ default n
+ help
+ Choose this option to enable new AMD DC support for SI asics
+ by default. This includes Tahiti, Pitcairn, Cape Verde, Oland.
+ Hainan is not supported by AMD DC and it has no physical DCE6.
+
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
+ depends on KGDB
help
Choose this option if you want to hit kdgb_break in assert.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a717a4904268..e93e18c06c0e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -100,6 +100,8 @@ MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
#endif
+#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -127,6 +129,42 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
+static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
+{
+ switch (link->dpcd_caps.dongle_type) {
+ case DISPLAY_DONGLE_NONE:
+ return DRM_MODE_SUBCONNECTOR_Native;
+ case DISPLAY_DONGLE_DP_VGA_CONVERTER:
+ return DRM_MODE_SUBCONNECTOR_VGA;
+ case DISPLAY_DONGLE_DP_DVI_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ return DRM_MODE_SUBCONNECTOR_DVID;
+ case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+ return DRM_MODE_SUBCONNECTOR_HDMIA;
+ case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+ default:
+ return DRM_MODE_SUBCONNECTOR_Unknown;
+ }
+}
+
+static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = aconnector->dc_link;
+ struct drm_connector *connector = &aconnector->base;
+ enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return;
+
+ if (aconnector->dc_sink)
+ subconnector = get_subconnector_type(link);
+
+ drm_object_property_set_value(&connector->base,
+ connector->dev->mode_config.dp_subconnector_property,
+ subconnector);
+}
+
/*
* initializes drm_device display related structures, based on the information
* provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
@@ -171,7 +209,7 @@ static void amdgpu_dm_set_psr_caps(struct dc_link *link);
static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
-
+static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
/*
* dm_vblank_get_counter
@@ -192,17 +230,14 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
return 0;
else {
struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
- struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
- acrtc->base.state);
-
- if (acrtc_state->stream == NULL) {
+ if (acrtc->dm_irq_params.stream == NULL) {
DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
- return dc_stream_get_vblank_counter(acrtc_state->stream);
+ return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
}
}
@@ -215,10 +250,8 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
return -EINVAL;
else {
struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
- struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
- acrtc->base.state);
- if (acrtc_state->stream == NULL) {
+ if (acrtc->dm_irq_params.stream == NULL) {
DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
@@ -228,7 +261,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
* TODO rework base driver to use values directly.
* for now parse it back into reg-format
*/
- dc_stream_get_scanoutpos(acrtc_state->stream,
+ dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
&v_blank_start,
&v_blank_end,
&h_position,
@@ -268,7 +301,7 @@ static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device *adev,
int otg_inst)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
@@ -287,6 +320,14 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
return NULL;
}
+static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
+{
+ return acrtc->dm_irq_params.freesync_config.state ==
+ VRR_STATE_ACTIVE_VARIABLE ||
+ acrtc->dm_irq_params.freesync_config.state ==
+ VRR_STATE_ACTIVE_FIXED;
+}
+
static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
@@ -307,7 +348,6 @@ static void dm_pflip_high_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
struct drm_pending_vblank_event *e;
- struct dm_crtc_state *acrtc_state;
uint32_t vpos, hpos, v_blank_start, v_blank_end;
bool vrr_active;
@@ -320,7 +360,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
return;
}
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
@@ -328,7 +368,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
AMDGPU_FLIP_SUBMITTED,
amdgpu_crtc->crtc_id,
amdgpu_crtc);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return;
}
@@ -339,12 +379,11 @@ static void dm_pflip_high_irq(void *interrupt_params)
if (!e)
WARN_ON(1);
- acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
- vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+ vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
/* Fixed refresh rate, or VRR scanout position outside front-porch? */
if (!vrr_active ||
- !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
+ !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
&v_blank_end, &hpos, &vpos) ||
(vpos < v_blank_start)) {
/* Update to correct count and vblank timestamp if racing with
@@ -380,7 +419,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
e->pipe = amdgpu_crtc->crtc_id;
- list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
+ list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
e = NULL;
}
@@ -389,11 +428,11 @@ static void dm_pflip_high_irq(void *interrupt_params)
* of pageflip completion, so last_flip_vblank is the forbidden count
* for queueing new pageflips if vsync + VRR is enabled.
*/
- amdgpu_crtc->last_flip_vblank =
+ amdgpu_crtc->dm_irq_params.last_flip_vblank =
amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
amdgpu_crtc->crtc_id, amdgpu_crtc,
@@ -405,17 +444,17 @@ static void dm_vupdate_high_irq(void *interrupt_params)
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
- struct dm_crtc_state *acrtc_state;
unsigned long flags;
+ int vrr_active;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
if (acrtc) {
- acrtc_state = to_dm_crtc_state(acrtc->base.state);
+ vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
+ vrr_active);
/* Core vblank handling is done here after end of front-porch in
* vrr mode, as vblank timestamping will give valid results
@@ -423,23 +462,23 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* page-flip completion events that have been queued to us
* if a pageflip happened inside front-porch.
*/
- if (amdgpu_dm_vrr_active(acrtc_state)) {
+ if (vrr_active) {
drm_crtc_handle_vblank(&acrtc->base);
/* BTR processing for pre-DCE12 ASICs */
- if (acrtc_state->stream &&
+ if (acrtc->dm_irq_params.stream &&
adev->family < AMDGPU_FAMILY_AI) {
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
- acrtc_state->stream,
- &acrtc_state->vrr_params);
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params);
dc_stream_adjust_vmin_vmax(
adev->dm.dc,
- acrtc_state->stream,
- &acrtc_state->vrr_params.adjust);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
}
}
@@ -457,18 +496,17 @@ static void dm_crtc_high_irq(void *interrupt_params)
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
- struct dm_crtc_state *acrtc_state;
unsigned long flags;
+ int vrr_active;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
if (!acrtc)
return;
- acrtc_state = to_dm_crtc_state(acrtc->base.state);
+ vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state),
- acrtc_state->active_planes);
+ vrr_active, acrtc->dm_irq_params.active_planes);
/**
* Core vblank handling at start of front-porch is only possible
@@ -476,7 +514,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
* valid results while done in front-porch. Otherwise defer it
* to dm_vupdate_high_irq after end of front-porch.
*/
- if (!amdgpu_dm_vrr_active(acrtc_state))
+ if (!vrr_active)
drm_crtc_handle_vblank(&acrtc->base);
/**
@@ -489,16 +527,18 @@ static void dm_crtc_high_irq(void *interrupt_params)
if (adev->family < AMDGPU_FAMILY_AI)
return;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
- acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+ if (acrtc->dm_irq_params.stream &&
+ acrtc->dm_irq_params.vrr_params.supported &&
+ acrtc->dm_irq_params.freesync_config.state ==
+ VRR_STATE_ACTIVE_VARIABLE) {
mod_freesync_handle_v_update(adev->dm.freesync_module,
- acrtc_state->stream,
- &acrtc_state->vrr_params);
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
- &acrtc_state->vrr_params.adjust);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
}
/*
@@ -513,7 +553,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
*/
if (adev->family >= AMDGPU_FAMILY_RV &&
acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
- acrtc_state->active_planes == 0) {
+ acrtc->dm_irq_params.active_planes == 0) {
if (acrtc->event) {
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
acrtc->event = NULL;
@@ -522,7 +562,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
acrtc->pflip_status = AMDGPU_FLIP_NONE;
}
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
static int dm_set_clockgating_state(void *handle,
@@ -544,8 +584,8 @@ static int dm_early_init(void* handle);
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct dm_comressor_info *compressor = &adev->dm.compressor;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_compressor_info *compressor = &adev->dm.compressor;
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
struct drm_display_mode *mode;
unsigned long max_size = 0;
@@ -586,7 +626,7 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
unsigned char *buf, int max_bytes)
{
struct drm_device *dev = dev_get_drvdata(kdev);
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct amdgpu_dm_connector *aconnector;
@@ -625,7 +665,7 @@ static int amdgpu_dm_audio_component_bind(struct device *kdev,
struct device *hda_kdev, void *data)
{
struct drm_device *dev = dev_get_drvdata(kdev);
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_audio_component *acomp = data;
acomp->ops = &amdgpu_dm_audio_component_ops;
@@ -639,7 +679,7 @@ static void amdgpu_dm_audio_component_unbind(struct device *kdev,
struct device *hda_kdev, void *data)
{
struct drm_device *dev = dev_get_drvdata(kdev);
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_audio_component *acomp = data;
acomp->ops = NULL;
@@ -842,6 +882,45 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
}
+static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct amdgpu_dm_connector *amdgpu_dm_connector;
+ struct drm_connector_state *conn_state;
+ struct dm_crtc_state *acrtc_state;
+ struct drm_crtc_state *crtc_state;
+ struct dc_stream_state *stream;
+ struct drm_device *dev = adev_to_drm(adev);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ conn_state = connector->state;
+
+ if (!(conn_state && conn_state->crtc))
+ continue;
+
+ crtc = conn_state->crtc;
+ acrtc_state = to_dm_crtc_state(crtc->state);
+
+ if (!(acrtc_state && acrtc_state->stream))
+ continue;
+
+ stream = acrtc_state->stream;
+
+ if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
+ amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
+ amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
+ amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ crtc_state->mode_changed = true;
+ }
+ }
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -850,7 +929,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#endif
int r;
- adev->dm.ddev = adev->ddev;
+ adev->dm.ddev = adev_to_drm(adev);
adev->dm.adev = adev;
/* Zero all the fields */
@@ -896,6 +975,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
case CHIP_RAVEN:
case CHIP_RENOIR:
init_data.flags.gpu_vm_support = true;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ init_data.flags.disable_dmcu = true;
break;
default:
break;
@@ -986,10 +1067,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* TODO: Add_display_info? */
/* TODO use dynamic cursor width */
- adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
- adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
+ adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
+ adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
- if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
+ if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
goto error;
@@ -1066,6 +1147,12 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
const struct dmcu_firmware_header_v1_0 *hdr;
switch(adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+#endif
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
@@ -1184,6 +1271,8 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
case CHIP_RENOIR:
dmub_asic = DMUB_ASIC_DCN21;
fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
break;
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
case CHIP_SIENNA_CICHLID:
@@ -1383,9 +1472,6 @@ static int dm_late_init(void *handle)
struct dmcu *dmcu = NULL;
bool ret = true;
- if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
- return detect_mst_link_for_all_connectors(adev->ddev);
-
dmcu = adev->dm.dc->res_pool->dmcu;
for (i = 0; i < 16; i++)
@@ -1414,7 +1500,7 @@ static int dm_late_init(void *handle)
if (!ret)
return -EINVAL;
- return detect_mst_link_for_all_connectors(adev->ddev);
+ return detect_mst_link_for_all_connectors(adev_to_drm(adev));
}
static void s3_handle_mst(struct drm_device *dev, bool suspend)
@@ -1652,7 +1738,7 @@ static int dm_suspend(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
- if (adev->in_gpu_reset) {
+ if (amdgpu_in_reset(adev)) {
mutex_lock(&dm->dc_lock);
dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
@@ -1666,9 +1752,9 @@ static int dm_suspend(void *handle)
}
WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
- s3_handle_mst(adev->ddev, true);
+ s3_handle_mst(adev_to_drm(adev), true);
amdgpu_dm_irq_suspend(adev);
@@ -1822,7 +1908,7 @@ cleanup:
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
- struct drm_device *ddev = adev->ddev;
+ struct drm_device *ddev = adev_to_drm(adev);
struct amdgpu_display_manager *dm = &adev->dm;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
@@ -1838,7 +1924,7 @@ static int dm_resume(void *handle)
struct dc_state *dc_state;
int i, r, j;
- if (adev->in_gpu_reset) {
+ if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
r = dm_dmub_hw_init(adev);
@@ -2044,7 +2130,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
return;
conn_base = &aconnector->base;
- adev = conn_base->dev->dev_private;
+ adev = drm_to_adev(conn_base->dev);
dm = &adev->dm;
caps = &dm->backlight_caps;
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
@@ -2095,7 +2181,6 @@ void amdgpu_dm_update_connector_after_detect(
if (aconnector->mst_mgr.mst_state == true)
return;
-
sink = aconnector->dc_link->local_sink;
if (sink)
dc_sink_retain(sink);
@@ -2222,6 +2307,8 @@ void amdgpu_dm_update_connector_after_detect(
mutex_unlock(&dev->mode_config.mutex);
+ update_subconnector_property(aconnector);
+
if (sink)
dc_sink_release(sink);
}
@@ -2233,7 +2320,7 @@ static void handle_hpd_irq(void *param)
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
#ifdef CONFIG_DRM_AMD_DC_HDCP
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
#endif
/*
@@ -2366,7 +2453,7 @@ static void handle_hpd_rx_irq(void *param)
enum dc_connection_type new_connection_type = dc_connection_none;
#ifdef CONFIG_DRM_AMD_DC_HDCP
union hpd_irq_data hpd_irq_data;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
#endif
@@ -2437,7 +2524,7 @@ static void handle_hpd_rx_irq(void *param)
static void register_hpd_handlers(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct amdgpu_dm_connector *aconnector;
const struct dc_link *dc_link;
@@ -2474,6 +2561,89 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
}
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce60_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling. */
+
+ /* Use VBLANK interrupt */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i+1 , 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+#endif
+
/* Register IRQ sources and initialize IRQ callbacks */
static int dce110_register_irq_handlers(struct amdgpu_device *adev)
{
@@ -2704,7 +2874,7 @@ static int dm_atomic_get_state(struct drm_atomic_state *state,
struct dm_atomic_state **dm_state)
{
struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_display_manager *dm = &adev->dm;
struct drm_private_state *priv_state;
@@ -2724,7 +2894,7 @@ static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_display_manager *dm = &adev->dm;
struct drm_private_obj *obj;
struct drm_private_state *new_obj_state;
@@ -2738,24 +2908,6 @@ dm_atomic_get_new_state(struct drm_atomic_state *state)
return NULL;
}
-static struct dm_atomic_state *
-dm_atomic_get_old_state(struct drm_atomic_state *state)
-{
- struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_display_manager *dm = &adev->dm;
- struct drm_private_obj *obj;
- struct drm_private_state *old_obj_state;
- int i;
-
- for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
- if (obj->funcs == dm->atomic_obj.funcs)
- return to_dm_atomic_state(old_obj_state);
- }
-
- return NULL;
-}
-
static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj *obj)
{
@@ -2803,18 +2955,18 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev->mode_info.mode_config_initialized = true;
- adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
- adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+ adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+ adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
/* indicates support for immediate flip */
- adev->ddev->mode_config.async_page_flip = true;
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
- adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
@@ -2828,7 +2980,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
- drm_atomic_private_obj_init(adev->ddev,
+ drm_atomic_private_obj_init(adev_to_drm(adev),
&adev->dm.atomic_obj,
&state->base,
&dm_atomic_state_funcs);
@@ -3000,13 +3152,13 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
- dm->adev->ddev->primary->index);
+ adev_to_drm(dm->adev)->primary->index);
dm->backlight_dev = backlight_device_register(bl_name,
- dm->adev->ddev->dev,
- dm,
- &amdgpu_dm_backlight_ops,
- &props);
+ adev_to_drm(dm->adev)->dev,
+ dm,
+ &amdgpu_dm_backlight_ops,
+ &props);
if (IS_ERR(dm->backlight_dev))
DRM_ERROR("DM: Backlight registration failed!\n");
@@ -3212,6 +3364,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ if (dce60_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+#endif
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
@@ -3254,9 +3417,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
- /* No userspace support. */
- dm->dc->debug.disable_tri_buf = true;
-
return 0;
fail:
kfree(aencoder);
@@ -3312,14 +3472,14 @@ static ssize_t s3_debug_store(struct device *device,
int ret;
int s3_state;
struct drm_device *drm_dev = dev_get_drvdata(device);
- struct amdgpu_device *adev = drm_dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
ret = kstrtoint(buf, 0, &s3_state);
if (ret == 0) {
if (s3_state) {
dm_resume(adev);
- drm_kms_helper_hotplug_event(adev->ddev);
+ drm_kms_helper_hotplug_event(adev_to_drm(adev));
} else
dm_suspend(adev);
}
@@ -3336,6 +3496,20 @@ static int dm_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_OLAND:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+#endif
case CHIP_BONAIRE:
case CHIP_HAWAII:
adev->mode_info.num_crtc = 6;
@@ -3432,7 +3606,7 @@ static int dm_early_init(void *handle)
*/
#if defined(CONFIG_DEBUG_KERNEL_DC)
device_create_file(
- adev->ddev->dev,
+ adev_to_drm(adev)->dev,
&dev_attr_s3_debug);
#endif
@@ -3443,21 +3617,12 @@ static bool modeset_required(struct drm_crtc_state *crtc_state,
struct dc_stream_state *new_stream,
struct dc_stream_state *old_stream)
{
- if (!drm_atomic_crtc_needs_modeset(crtc_state))
- return false;
-
- if (!crtc_state->enable)
- return false;
-
- return crtc_state->active;
+ return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
}
static bool modereset_required(struct drm_crtc_state *crtc_state)
{
- if (!drm_atomic_crtc_needs_modeset(crtc_state))
- return false;
-
- return !crtc_state->enable || !crtc_state->active;
+ return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
}
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
@@ -3530,8 +3695,17 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags, bool *tmz_surface)
{
- struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
- int r = amdgpu_bo_reserve(rbo, false);
+ struct amdgpu_bo *rbo;
+ int r;
+
+ if (!amdgpu_fb) {
+ *tiling_flags = 0;
+ *tmz_surface = false;
+ return 0;
+ }
+
+ rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
+ r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
/* Don't show error message when returning -ERESTARTSYS */
@@ -3954,13 +4128,10 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct drm_crtc_state *crtc_state)
{
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
- const struct amdgpu_framebuffer *amdgpu_fb =
- to_amdgpu_framebuffer(plane_state->fb);
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
struct dc_scaling_info scaling_info;
struct dc_plane_info plane_info;
- uint64_t tiling_flags;
int ret;
- bool tmz_surface = false;
bool force_disable_dcc = false;
ret = fill_dc_scaling_info(plane_state, &scaling_info);
@@ -3972,15 +4143,12 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->clip_rect = scaling_info.clip_rect;
dc_plane_state->scaling_quality = scaling_info.scaling_quality;
- ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
- if (ret)
- return ret;
-
force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
- ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
+ ret = fill_dc_plane_info_and_addr(adev, plane_state,
+ dm_plane_state->tiling_flags,
&plane_info,
&dc_plane_state->address,
- tmz_surface,
+ dm_plane_state->tmz_surface,
force_disable_dcc);
if (ret)
return ret;
@@ -4562,7 +4730,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_link_get_link_cap(aconnector->dc_link));
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (dsc_caps.is_dsc_supported)
+ if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
+ /* Set DSC policy according to dsc_clock_en */
+ dc_dsc_policy_set_enable_dsc_when_not_needed(
+ aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
+
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
&dsc_caps,
aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
@@ -4570,6 +4742,19 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
&stream->timing,
&stream->timing.dsc_cfg))
stream->timing.flags.DSC = 1;
+ /* Overwrite the stream flag if DSC is enabled through debugfs */
+ if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
+ stream->timing.flags.DSC = 1;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
+ stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
+ stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
+ stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
+ }
#endif
}
@@ -4583,7 +4768,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
update_stream_signal(stream, sink);
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
- mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
+
if (stream->link->psr_settings.psr_feature_enabled) {
//
// should decide stream support vsc sdp colorimetry capability
@@ -4663,7 +4849,6 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
}
state->active_planes = cur->active_planes;
- state->vrr_params = cur->vrr_params;
state->vrr_infopacket = cur->vrr_infopacket;
state->abm_level = cur->abm_level;
state->vrr_supported = cur->vrr_supported;
@@ -4681,7 +4866,7 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int rc;
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
@@ -4697,7 +4882,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
int rc = 0;
@@ -4764,6 +4949,8 @@ amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
else
connected = (aconnector->base.force == DRM_FORCE_ON);
+ update_subconnector_property(aconnector);
+
return (connected ? connector_status_connected :
connector_status_disconnected);
}
@@ -4774,7 +4961,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct dm_connector_state *dm_old_state =
to_dm_connector_state(connector->state);
struct dm_connector_state *dm_new_state =
@@ -4829,7 +5016,7 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
uint64_t *val)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct dm_connector_state *dm_state =
to_dm_connector_state(state);
int ret = -EINVAL;
@@ -4879,9 +5066,16 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
const struct dc_link *link = aconnector->dc_link;
- struct amdgpu_device *adev = connector->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct amdgpu_display_manager *dm = &adev->dm;
+ /*
+ * Call only if mst_mgr was iniitalized before since it's not done
+ * for all connector types.
+ */
+ if (aconnector->mst_mgr.dev)
+ drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
@@ -5064,7 +5258,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct dc_stream_state *old_stream)
{
struct drm_connector *connector = &aconnector->base;
- struct amdgpu_device *adev = connector->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct dc_stream_state *stream;
const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
@@ -5328,7 +5522,7 @@ static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dc *dc = adev->dm.dc;
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
int ret = -EINVAL;
@@ -5549,6 +5743,10 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane)
dc_plane_state_retain(dm_plane_state->dc_state);
}
+ /* Framebuffer hasn't been updated yet, so retain old flags. */
+ dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
+ dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
+
return &dm_plane_state->base;
}
@@ -5583,14 +5781,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
struct list_head list;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
- uint64_t tiling_flags;
uint32_t domain;
int r;
- bool tmz_surface = false;
- bool force_disable_dcc = false;
-
- dm_plane_state_old = to_dm_plane_state(plane->state);
- dm_plane_state_new = to_dm_plane_state(new_state);
if (!new_state->fb) {
DRM_DEBUG_DRIVER("No FB bound\n");
@@ -5634,27 +5826,35 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
return r;
}
- amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
-
- tmz_surface = amdgpu_bo_encrypted(rbo);
-
ttm_eu_backoff_reservation(&ticket, &list);
afb->address = amdgpu_bo_gpu_offset(rbo);
amdgpu_bo_ref(rbo);
+ /**
+ * We don't do surface updates on planes that have been newly created,
+ * but we also don't have the afb->address during atomic check.
+ *
+ * Fill in buffer attributes depending on the address here, but only on
+ * newly created planes since they're not being used by DC yet and this
+ * won't modify global state.
+ */
+ dm_plane_state_old = to_dm_plane_state(plane->state);
+ dm_plane_state_new = to_dm_plane_state(new_state);
+
if (dm_plane_state_new->dc_state &&
- dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
- struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
+ dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
+ struct dc_plane_state *plane_state =
+ dm_plane_state_new->dc_state;
+ bool force_disable_dcc = !plane_state->dcc.enable;
- force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
- tiling_flags, &plane_state->tiling_info,
- &plane_state->plane_size, &plane_state->dcc,
- &plane_state->address, tmz_surface,
- force_disable_dcc);
+ dm_plane_state_new->tiling_flags,
+ &plane_state->tiling_info, &plane_state->plane_size,
+ &plane_state->dcc, &plane_state->address,
+ dm_plane_state_new->tmz_surface, force_disable_dcc);
}
return 0;
@@ -5695,7 +5895,7 @@ static int dm_plane_helper_check_state(struct drm_plane_state *state,
static int dm_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct amdgpu_device *adev = plane->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
struct dc *dc = adev->dm.dc;
struct dm_plane_state *dm_plane_state;
struct dc_scaling_info scaling_info;
@@ -5864,7 +6064,7 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
num_formats = get_plane_formats(plane, plane_cap, formats,
ARRAY_SIZE(formats));
- res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
+ res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
&dm_plane_funcs, formats, num_formats,
NULL, plane->type, NULL);
if (res)
@@ -5898,8 +6098,9 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
- drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
- supported_rotations);
+ if (dm->adev->asic_type >= CHIP_BONAIRE)
+ drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
+ supported_rotations);
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
@@ -6169,7 +6370,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
struct dc_link *link,
int link_index)
{
- struct amdgpu_device *adev = dm->ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
/*
* Some of the properties below require access to state, like bpc.
@@ -6420,7 +6621,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
struct amdgpu_encoder *aencoder,
uint32_t link_index)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int res = drm_encoder_init(dev,
&aencoder->base,
@@ -6605,7 +6806,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state)
{
- struct amdgpu_device *adev = plane->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
@@ -6694,6 +6895,7 @@ static void update_freesync_state_on_stream(
struct mod_vrr_params vrr_params;
struct dc_info_packet vrr_infopacket = {0};
struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
unsigned long flags;
if (!new_stream)
@@ -6707,8 +6909,8 @@ static void update_freesync_state_on_stream(
if (!new_stream->timing.h_total || !new_stream->timing.v_total)
return;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
- vrr_params = new_crtc_state->vrr_params;
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ vrr_params = acrtc->dm_irq_params.vrr_params;
if (surface) {
mod_freesync_handle_preflip(
@@ -6739,7 +6941,7 @@ static void update_freesync_state_on_stream(
&vrr_infopacket);
new_crtc_state->freesync_timing_changed |=
- (memcmp(&new_crtc_state->vrr_params.adjust,
+ (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
&vrr_params.adjust,
sizeof(vrr_params.adjust)) != 0);
@@ -6748,10 +6950,10 @@ static void update_freesync_state_on_stream(
&vrr_infopacket,
sizeof(vrr_infopacket)) != 0);
- new_crtc_state->vrr_params = vrr_params;
+ acrtc->dm_irq_params.vrr_params = vrr_params;
new_crtc_state->vrr_infopacket = vrr_infopacket;
- new_stream->adjust = new_crtc_state->vrr_params.adjust;
+ new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
new_stream->vrr_infopacket = vrr_infopacket;
if (new_crtc_state->freesync_vrr_info_changed)
@@ -6760,10 +6962,10 @@ static void update_freesync_state_on_stream(
(int)new_crtc_state->base.vrr_enabled,
(int)vrr_params.state);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
-static void pre_update_freesync_state_on_stream(
+static void update_stream_irq_parameters(
struct amdgpu_display_manager *dm,
struct dm_crtc_state *new_crtc_state)
{
@@ -6771,6 +6973,7 @@ static void pre_update_freesync_state_on_stream(
struct mod_vrr_params vrr_params;
struct mod_freesync_config config = new_crtc_state->freesync_config;
struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
unsigned long flags;
if (!new_stream)
@@ -6783,8 +6986,8 @@ static void pre_update_freesync_state_on_stream(
if (!new_stream->timing.h_total || !new_stream->timing.v_total)
return;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
- vrr_params = new_crtc_state->vrr_params;
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ vrr_params = acrtc->dm_irq_params.vrr_params;
if (new_crtc_state->vrr_supported &&
config.min_refresh_in_uhz &&
@@ -6801,12 +7004,15 @@ static void pre_update_freesync_state_on_stream(
&config, &vrr_params);
new_crtc_state->freesync_timing_changed |=
- (memcmp(&new_crtc_state->vrr_params.adjust,
- &vrr_params.adjust,
- sizeof(vrr_params.adjust)) != 0);
+ (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
+ &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
- new_crtc_state->vrr_params = vrr_params;
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ new_crtc_state->freesync_config = config;
+ /* Copy state for access from DM IRQ handler */
+ acrtc->dm_irq_params.freesync_config = config;
+ acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
+ acrtc->dm_irq_params.vrr_params = vrr_params;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
@@ -6876,8 +7082,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
long r;
unsigned long flags;
struct amdgpu_bo *abo;
- uint64_t tiling_flags;
- bool tmz_surface = false;
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
@@ -6961,28 +7165,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (unlikely(r <= 0))
DRM_ERROR("Waiting for fences timed out!");
- /*
- * TODO This might fail and hence better not used, wait
- * explicitly on fences instead
- * and in general should be called for
- * blocking commit to as per framework helpers
- */
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0))
- DRM_ERROR("failed to reserve buffer before flip\n");
-
- amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
-
- tmz_surface = amdgpu_bo_encrypted(abo);
-
- amdgpu_bo_unreserve(abo);
-
fill_dc_plane_info_and_addr(
- dm->adev, new_plane_state, tiling_flags,
+ dm->adev, new_plane_state,
+ dm_new_plane_state->tiling_flags,
&bundle->plane_infos[planes_count],
&bundle->flip_addrs[planes_count].address,
- tmz_surface,
- false);
+ dm_new_plane_state->tmz_surface, false);
DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
@@ -7047,7 +7235,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* on late submission of flips.
*/
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
- last_flip_vblank = acrtc_attach->last_flip_vblank;
+ last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
@@ -7131,7 +7319,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
dc_stream_adjust_vmin_vmax(
dm->dc, acrtc_state->stream,
- &acrtc_state->vrr_params.adjust);
+ &acrtc_attach->dm_irq_params.vrr_params.adjust);
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
mutex_lock(&dm->dc_lock);
@@ -7160,9 +7348,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* on some ASICs).
*/
if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
- dm_update_pflip_irq_state(
- (struct amdgpu_device *)dev->dev_private,
- acrtc_attach);
+ dm_update_pflip_irq_state(drm_to_adev(dev),
+ acrtc_attach);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
@@ -7192,7 +7379,7 @@ cleanup:
static void amdgpu_dm_commit_audio(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_state *old_con_state, *new_con_state;
@@ -7282,34 +7469,6 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- struct amdgpu_device *adev = dev->dev_private;
- int i;
-
- /*
- * We evade vblank and pflip interrupts on CRTCs that are undergoing
- * a modeset, being disabled, or have no active planes.
- *
- * It's done in atomic commit rather than commit tail for now since
- * some of these interrupt handlers access the current CRTC state and
- * potentially the stream pointer itself.
- *
- * Since the atomic state is swapped within atomic commit and not within
- * commit tail this would leave to new state (that hasn't been committed yet)
- * being accesssed from within the handlers.
- *
- * TODO: Fix this so we can do this in commit tail and not have to block
- * in atomic check.
- */
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-
- if (old_crtc_state->active &&
- (!new_crtc_state->active ||
- drm_atomic_crtc_needs_modeset(new_crtc_state)))
- manage_dm_interrupts(adev, acrtc, false);
- }
/*
* Add check here for SoC's that support hardware cursor plane, to
* unset legacy_cursor_update
@@ -7331,7 +7490,7 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_display_manager *dm = &adev->dm;
struct dm_atomic_state *dm_state;
struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
@@ -7344,8 +7503,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_connector_state *old_con_state, *new_con_state;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
int crtc_disable_count = 0;
+ bool mode_set_reset_required = false;
drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_atomic_helper_calc_timestamping_constants(state);
dm_state = dm_atomic_get_new_state(state);
if (dm_state && dm_state->context) {
@@ -7358,6 +7519,20 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_resource_state_copy_construct_current(dm->dc, dc_state);
}
+ for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (old_crtc_state->active &&
+ (!new_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ manage_dm_interrupts(adev, acrtc, false);
+ dc_stream_release(dm_old_crtc_state->stream);
+ }
+ }
+
/* update changed items */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -7420,19 +7595,21 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->enabled = true;
acrtc->hw_mode = new_crtc_state->mode;
crtc->hwmode = new_crtc_state->mode;
+ mode_set_reset_required = true;
} else if (modereset_required(new_crtc_state)) {
DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
/* i.e. reset mode */
- if (dm_old_crtc_state->stream) {
- if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
-
+ if (dm_old_crtc_state->stream)
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
- }
+ mode_set_reset_required = true;
}
} /* for_each_crtc_in_state() */
if (dc_state) {
+ /* if there mode set or reset, disable eDP PSR */
+ if (mode_set_reset_required)
+ amdgpu_dm_psr_disable_all(dm);
+
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
WARN_ON(!dc_commit_state(dm->dc, dc_state));
@@ -7451,7 +7628,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (!status)
status = dc_stream_get_status_from_state(dc_state,
dm_new_crtc_state->stream);
-
if (!status)
DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
else
@@ -7577,8 +7753,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- /* Update freesync active state. */
- pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
+ /* For freesync config update on crtc state and params for irq */
+ update_stream_irq_parameters(dm, dm_new_crtc_state);
/* Handle vrr on->off / off->on transitions */
amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
@@ -7594,10 +7770,15 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
if (new_crtc_state->active &&
(!old_crtc_state->active ||
drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ dc_stream_retain(dm_new_crtc_state->stream);
+ acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
+
#ifdef CONFIG_DEBUG_FS
/**
* Frontend may have changed so reapply the CRC capture
@@ -7634,7 +7815,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* send vblank event on all events not handled in flip and
* mark consumed event for drm_atomic_helper_commit_hw_done
*/
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->event)
@@ -7642,7 +7823,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_crtc_state->event = NULL;
}
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
/* Signal HW programming completion */
drm_atomic_helper_commit_hw_done(state);
@@ -7841,8 +8022,6 @@ static void reset_freesync_config_for_crtc(
{
new_crtc_state->vrr_supported = false;
- memset(&new_crtc_state->vrr_params, 0,
- sizeof(new_crtc_state->vrr_params));
memset(&new_crtc_state->vrr_infopacket, 0,
sizeof(new_crtc_state->vrr_infopacket));
}
@@ -7914,6 +8093,13 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
goto fail;
}
+ /*
+ * TODO: Check VSDB bits to decide whether this should
+ * be enabled or not.
+ */
+ new_stream->triggered_crtc_reset.enabled =
+ dm->force_timing_sync;
+
dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
ret = fill_hdr_info_packet(drm_new_conn_state,
@@ -8033,8 +8219,7 @@ skip_modeset:
* We want to do dc stream updates that do not require a
* full modeset below.
*/
- if (!(enable && aconnector && new_crtc_state->enable &&
- new_crtc_state->active))
+ if (!(enable && aconnector && new_crtc_state->active))
return 0;
/*
* Given above conditions, the dc state cannot be NULL because:
@@ -8125,6 +8310,8 @@ static bool should_reset_plane(struct drm_atomic_state *state,
* TODO: Come up with a more elegant solution for this.
*/
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
+ struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
+
if (other->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -8135,9 +8322,45 @@ static bool should_reset_plane(struct drm_atomic_state *state,
if (old_other_state->crtc != new_other_state->crtc)
return true;
- /* TODO: Remove this once we can handle fast format changes. */
- if (old_other_state->fb && new_other_state->fb &&
- old_other_state->fb->format != new_other_state->fb->format)
+ /* Src/dst size and scaling updates. */
+ if (old_other_state->src_w != new_other_state->src_w ||
+ old_other_state->src_h != new_other_state->src_h ||
+ old_other_state->crtc_w != new_other_state->crtc_w ||
+ old_other_state->crtc_h != new_other_state->crtc_h)
+ return true;
+
+ /* Rotation / mirroring updates. */
+ if (old_other_state->rotation != new_other_state->rotation)
+ return true;
+
+ /* Blending updates. */
+ if (old_other_state->pixel_blend_mode !=
+ new_other_state->pixel_blend_mode)
+ return true;
+
+ /* Alpha updates. */
+ if (old_other_state->alpha != new_other_state->alpha)
+ return true;
+
+ /* Colorspace changes. */
+ if (old_other_state->color_range != new_other_state->color_range ||
+ old_other_state->color_encoding != new_other_state->color_encoding)
+ return true;
+
+ /* Framebuffer checks fall at the end. */
+ if (!old_other_state->fb || !new_other_state->fb)
+ continue;
+
+ /* Pixel format changes can require bandwidth updates. */
+ if (old_other_state->fb->format != new_other_state->fb->format)
+ return true;
+
+ old_dm_plane_state = to_dm_plane_state(old_other_state);
+ new_dm_plane_state = to_dm_plane_state(new_other_state);
+
+ /* Tiling and DCC changes also require bandwidth updates. */
+ if (old_dm_plane_state->tiling_flags !=
+ new_dm_plane_state->tiling_flags)
return true;
}
@@ -8217,8 +8440,7 @@ static int dm_update_plane_state(struct dc *dc,
dm_old_plane_state->dc_state,
dm_state->context)) {
- ret = EINVAL;
- return ret;
+ return -EINVAL;
}
@@ -8259,7 +8481,7 @@ static int dm_update_plane_state(struct dc *dc,
plane->base.id, new_plane_crtc->base.id);
ret = fill_dc_plane_attributes(
- new_plane_crtc->dev->dev_private,
+ drm_to_adev(new_plane_crtc->dev),
dc_new_plane_state,
new_plane_state,
new_crtc_state);
@@ -8305,169 +8527,6 @@ static int dm_update_plane_state(struct dc *dc,
return ret;
}
-static int
-dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
- struct drm_atomic_state *state,
- enum surface_update_type *out_type)
-{
- struct dc *dc = dm->dc;
- struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
- int i, j, num_plane, ret = 0;
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
- struct drm_crtc *new_plane_crtc;
- struct drm_plane *plane;
-
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state, *old_crtc_state;
- struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
- struct dc_stream_status *status = NULL;
- enum surface_update_type update_type = UPDATE_TYPE_FAST;
- struct surface_info_bundle {
- struct dc_surface_update surface_updates[MAX_SURFACES];
- struct dc_plane_info plane_infos[MAX_SURFACES];
- struct dc_scaling_info scaling_infos[MAX_SURFACES];
- struct dc_flip_addrs flip_addrs[MAX_SURFACES];
- struct dc_stream_update stream_update;
- } *bundle;
-
- bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
-
- if (!bundle) {
- DRM_ERROR("Failed to allocate update bundle\n");
- /* Set type to FULL to avoid crashing in DC*/
- update_type = UPDATE_TYPE_FULL;
- goto cleanup;
- }
-
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-
- memset(bundle, 0, sizeof(struct surface_info_bundle));
-
- new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
- old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
- num_plane = 0;
-
- if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
- update_type = UPDATE_TYPE_FULL;
- goto cleanup;
- }
-
- if (!new_dm_crtc_state->stream)
- continue;
-
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
- const struct amdgpu_framebuffer *amdgpu_fb =
- to_amdgpu_framebuffer(new_plane_state->fb);
- struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
- struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
- struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
- uint64_t tiling_flags;
- bool tmz_surface = false;
-
- new_plane_crtc = new_plane_state->crtc;
- new_dm_plane_state = to_dm_plane_state(new_plane_state);
- old_dm_plane_state = to_dm_plane_state(old_plane_state);
-
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
- continue;
-
- if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
- update_type = UPDATE_TYPE_FULL;
- goto cleanup;
- }
-
- if (crtc != new_plane_crtc)
- continue;
-
- bundle->surface_updates[num_plane].surface =
- new_dm_plane_state->dc_state;
-
- if (new_crtc_state->mode_changed) {
- bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
- bundle->stream_update.src = new_dm_crtc_state->stream->src;
- }
-
- if (new_crtc_state->color_mgmt_changed) {
- bundle->surface_updates[num_plane].gamma =
- new_dm_plane_state->dc_state->gamma_correction;
- bundle->surface_updates[num_plane].in_transfer_func =
- new_dm_plane_state->dc_state->in_transfer_func;
- bundle->surface_updates[num_plane].gamut_remap_matrix =
- &new_dm_plane_state->dc_state->gamut_remap_matrix;
- bundle->stream_update.gamut_remap =
- &new_dm_crtc_state->stream->gamut_remap_matrix;
- bundle->stream_update.output_csc_transform =
- &new_dm_crtc_state->stream->csc_color_matrix;
- bundle->stream_update.out_transfer_func =
- new_dm_crtc_state->stream->out_transfer_func;
- }
-
- ret = fill_dc_scaling_info(new_plane_state,
- scaling_info);
- if (ret)
- goto cleanup;
-
- bundle->surface_updates[num_plane].scaling_info = scaling_info;
-
- if (amdgpu_fb) {
- ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
- if (ret)
- goto cleanup;
-
- ret = fill_dc_plane_info_and_addr(
- dm->adev, new_plane_state, tiling_flags,
- plane_info,
- &flip_addr->address, tmz_surface,
- false);
- if (ret)
- goto cleanup;
-
- bundle->surface_updates[num_plane].plane_info = plane_info;
- bundle->surface_updates[num_plane].flip_addr = flip_addr;
- }
-
- num_plane++;
- }
-
- if (num_plane == 0)
- continue;
-
- ret = dm_atomic_get_state(state, &dm_state);
- if (ret)
- goto cleanup;
-
- old_dm_state = dm_atomic_get_old_state(state);
- if (!old_dm_state) {
- ret = -EINVAL;
- goto cleanup;
- }
-
- status = dc_stream_get_status_from_state(old_dm_state->context,
- new_dm_crtc_state->stream);
- bundle->stream_update.stream = new_dm_crtc_state->stream;
- /*
- * TODO: DC modifies the surface during this call so we need
- * to lock here - find a way to do this without locking.
- */
- mutex_lock(&dm->dc_lock);
- update_type = dc_check_update_surfaces_for_stream(
- dc, bundle->surface_updates, num_plane,
- &bundle->stream_update, status);
- mutex_unlock(&dm->dc_lock);
-
- if (update_type > UPDATE_TYPE_MED) {
- update_type = UPDATE_TYPE_FULL;
- goto cleanup;
- }
- }
-
-cleanup:
- kfree(bundle);
-
- *out_type = update_type;
- return ret;
-}
#if defined(CONFIG_DRM_AMD_DC_DCN)
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
@@ -8508,8 +8567,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
* acquired. For full updates case which removes/adds/updates streams on one
* CRTC while flipping on another CRTC, acquiring global lock will guarantee
* that any such full update commit will wait for completion of any outstanding
- * flip using DRMs synchronization events. See
- * dm_determine_update_type_for_commit()
+ * flip using DRMs synchronization events.
*
* Note that DM adds the affected connectors for all CRTCs in state, when that
* might not seem necessary. This is because DC stream creation requires the
@@ -8521,7 +8579,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct dm_atomic_state *dm_state = NULL;
struct dc *dc = adev->dm.dc;
struct drm_connector *connector;
@@ -8530,17 +8588,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
- enum surface_update_type update_type = UPDATE_TYPE_FAST;
- enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
enum dc_status status;
int ret, i;
-
- /*
- * This bool will be set for true for any modeset/reset
- * or plane update which implies non fast surface update.
- */
bool lock_and_validation_needed = false;
+ amdgpu_check_debugfs_connector_property_change(adev, state);
+
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
goto fail;
@@ -8633,6 +8686,17 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
+ /* Prepass for updating tiling flags on new planes. */
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
+ struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
+
+ ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
+ &new_dm_plane_state->tmz_surface);
+ if (ret)
+ goto fail;
+ }
+
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
@@ -8721,27 +8785,23 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
continue;
- overall_update_type = UPDATE_TYPE_FULL;
lock_and_validation_needed = true;
}
- ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
- if (ret)
- goto fail;
-
- if (overall_update_type < update_type)
- overall_update_type = update_type;
-
- /*
- * lock_and_validation_needed was an old way to determine if we need to set
- * the global lock. Leaving it in to check if we broke any corner cases
- * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
- * lock_and_validation_needed false = UPDATE_TYPE_FAST
+ /**
+ * Streams and planes are reset when there are changes that affect
+ * bandwidth. Anything that affects bandwidth needs to go through
+ * DC global validation to ensure that the configuration can be applied
+ * to hardware.
+ *
+ * We have to currently stall out here in atomic_check for outstanding
+ * commits to finish in this case because our IRQ handlers reference
+ * DRM state directly - we can end up disabling interrupts too early
+ * if we don't.
+ *
+ * TODO: Remove this stall and drop DM state private objects.
*/
- if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
- WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
-
- if (overall_update_type > UPDATE_TYPE_FAST) {
+ if (lock_and_validation_needed) {
ret = dm_atomic_get_state(state, &dm_state);
if (ret)
goto fail;
@@ -8823,7 +8883,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_crtc_state *dm_new_crtc_state =
to_dm_crtc_state(new_crtc_state);
- dm_new_crtc_state->update_type = (int)overall_update_type;
+ dm_new_crtc_state->update_type = lock_and_validation_needed ?
+ UPDATE_TYPE_FULL :
+ UPDATE_TYPE_FAST;
}
/* Must be success */
@@ -8872,7 +8934,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct dm_connector_state *dm_con_state = NULL;
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
bool freesync_capable = false;
if (!connector->state) {
@@ -9071,3 +9133,34 @@ static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
return dc_link_set_psr_allow_active(stream->link, false, true);
}
+
+/*
+ * amdgpu_dm_psr_disable() - disable psr f/w
+ * if psr is enabled on any stream
+ *
+ * Return: true if success
+ */
+static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
+{
+ DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
+ return dc_set_psr_allow_active(dm->dc, false);
+}
+
+void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ int i;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (dc->current_state) {
+ for (i = 0; i < dc->current_state->stream_count; ++i)
+ dc->current_state->streams[i]
+ ->triggered_crtc_reset.enabled =
+ adev->dm.force_timing_sync;
+
+ dm_enable_per_frame_crtc_master_sync(dc->current_state);
+ dc_trigger_sync(dc, dc->current_state);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index dd1559c743c2..a8a0e8cb1a11 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -86,7 +86,7 @@ struct irq_list_head {
* @bo_ptr: Pointer to the buffer object
* @gpu_addr: MMIO gpu addr
*/
-struct dm_comressor_info {
+struct dm_compressor_info {
void *cpu_addr;
struct amdgpu_bo *bo_ptr;
uint64_t gpu_addr;
@@ -148,7 +148,9 @@ struct amdgpu_dm_backlight_caps {
* @soc_bounding_box: SOC bounding box values provided by gpu_info FW
* @cached_state: Caches device atomic state for suspend/resume
* @cached_dc_state: Cached state of content streams
- * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
+ * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info
+ * @force_timing_sync: set via debugfs. When set, indicates that all connected
+ * displays will be forced to synchronize.
*/
struct amdgpu_display_manager {
@@ -322,7 +324,7 @@ struct amdgpu_display_manager {
struct drm_atomic_state *cached_state;
struct dc_state *cached_dc_state;
- struct dm_comressor_info compressor;
+ struct dm_compressor_info compressor;
const struct firmware *fw_dmcu;
uint32_t dmcu_fw_version;
@@ -340,6 +342,20 @@ struct amdgpu_display_manager {
* fake encoders used for DP MST.
*/
struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
+ bool force_timing_sync;
+};
+
+enum dsc_clock_force_state {
+ DSC_CLK_FORCE_DEFAULT = 0,
+ DSC_CLK_FORCE_ENABLE,
+ DSC_CLK_FORCE_DISABLE,
+};
+
+struct dsc_preferred_settings {
+ enum dsc_clock_force_state dsc_force_enable;
+ uint32_t dsc_num_slices_v;
+ uint32_t dsc_num_slices_h;
+ uint32_t dsc_bits_per_pixel;
};
struct amdgpu_dm_connector {
@@ -389,6 +405,7 @@ struct amdgpu_dm_connector {
uint32_t debugfs_dpcd_size;
#endif
bool force_yuv420_output;
+ struct dsc_preferred_settings dsc_settings;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
@@ -403,6 +420,8 @@ struct dc_plane_state;
struct dm_plane_state {
struct drm_plane_state base;
struct dc_plane_state *dc_state;
+ uint64_t tiling_flags;
+ bool tmz_surface;
};
struct dm_crtc_state {
@@ -423,7 +442,6 @@ struct dm_crtc_state {
bool vrr_supported;
struct mod_freesync_config freesync_config;
- struct mod_vrr_params vrr_params;
struct dc_info_packet vrr_infopacket;
int abm_level;
@@ -485,6 +503,8 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct edid *edid);
+void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
+
#define MAX_COLOR_LUT_ENTRIES 4096
/* Legacy gamm LUT users such as X doesn't like large LUT sizes */
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index b321ff654df4..5df05f0d18bc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -308,8 +308,7 @@ static int __set_input_tf(struct dc_transfer_func *func,
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
{
struct dc_stream_state *stream = crtc->stream;
- struct amdgpu_device *adev =
- (struct amdgpu_device *)crtc->base.state->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev);
bool has_rom = adev->asic_type <= CHIP_RAVEN;
struct drm_color_ctm *ctm = NULL;
const struct drm_color_lut *degamma_lut, *regamma_lut;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index eaad9099bc0b..d0699e98db92 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -101,7 +101,7 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
int ret = 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index e5a6d9115949..8cd646eef096 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -49,6 +49,10 @@ struct dmub_debugfs_trace_entry {
uint32_t param1;
};
+static inline const char *yesno(bool v)
+{
+ return v ? "yes" : "no";
+}
/* parse_write_buffer_into_params - Helper function to parse debugfs write buffer into an array
*
@@ -107,7 +111,6 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
if (*param_nums > max_param_num)
*param_nums = max_param_num;
-;
wr_buf_ptr = wr_buf; /* reset buf pointer */
wr_buf_count = 0; /* number of char already checked */
@@ -261,7 +264,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ if (parse_write_buffer_into_params(wr_buf, size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -420,7 +423,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ if (parse_write_buffer_into_params(wr_buf, size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -572,7 +575,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ if (parse_write_buffer_into_params(wr_buf, size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -905,7 +908,7 @@ static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
if (size < sizeof(connector->debugfs_dpcd_address))
- return 0;
+ return -EINVAL;
r = copy_from_user(&connector->debugfs_dpcd_address,
buf, sizeof(connector->debugfs_dpcd_address));
@@ -920,7 +923,7 @@ static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf,
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
if (size < sizeof(connector->debugfs_dpcd_size))
- return 0;
+ return -EINVAL;
r = copy_from_user(&connector->debugfs_dpcd_size,
buf, sizeof(connector->debugfs_dpcd_size));
@@ -940,8 +943,8 @@ static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf,
struct dc_link *link = connector->dc_link;
uint32_t write_size = connector->debugfs_dpcd_size;
- if (size < write_size)
- return 0;
+ if (!write_size || size < write_size)
+ return -EINVAL;
data = kzalloc(write_size, GFP_KERNEL);
if (!data)
@@ -964,7 +967,7 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
struct dc_link *link = connector->dc_link;
uint32_t read_size = connector->debugfs_dpcd_size;
- if (size < read_size)
+ if (!read_size || size < read_size)
return 0;
data = kzalloc(read_size, GFP_KERNEL);
@@ -980,6 +983,190 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
return read_size - r;
}
+/* function: Read link's DSC & FEC capabilities
+ *
+ *
+ * Access it with the following command (you need to specify
+ * connector like DP-1):
+ *
+ * cat /sys/kernel/debug/dri/0/DP-X/dp_dsc_fec_support
+ *
+ */
+static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ int ret = 0;
+ bool try_again = false;
+ bool is_fec_supported = false;
+ bool is_dsc_supported = false;
+ struct dpcd_caps dpcd_caps;
+
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+ do {
+ try_again = false;
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
+ if (ret) {
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret) {
+ try_again = true;
+ continue;
+ }
+ }
+ break;
+ }
+ if (connector->status != connector_status_connected) {
+ ret = -ENODEV;
+ break;
+ }
+ dpcd_caps = aconnector->dc_link->dpcd_caps;
+ if (aconnector->port) {
+ /* aconnector sets dsc_aux during get_modes call
+ * if MST connector has it means it can either
+ * enable DSC on the sink device or on MST branch
+ * its connected to.
+ */
+ if (aconnector->dsc_aux) {
+ is_fec_supported = true;
+ is_dsc_supported = true;
+ }
+ } else {
+ is_fec_supported = dpcd_caps.fec_cap.raw & 0x1;
+ is_dsc_supported = dpcd_caps.dsc_caps.dsc_basic_caps.raw[0] & 0x1;
+ }
+ } while (try_again);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ seq_printf(m, "FEC_Sink_Support: %s\n", yesno(is_fec_supported));
+ seq_printf(m, "DSC_Sink_Support: %s\n", yesno(is_dsc_supported));
+
+ return ret;
+}
+
+/* function: Trigger virtual HPD redetection on connector
+ *
+ * This function will perform link rediscovery, link disable
+ * and enable, and dm connector state update.
+ *
+ * Retrigger HPD on an existing connector by echoing 1 into
+ * its respectful "trigger_hotplug" debugfs entry:
+ *
+ * echo 1 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug
+ *
+ * This function can perform HPD unplug:
+ *
+ * echo 0 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug
+ *
+ */
+static ssize_t dp_trigger_hotplug(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct drm_connector *connector = &aconnector->base;
+ struct dc_link *link = NULL;
+ struct drm_device *dev = connector->dev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ char *wr_buf = NULL;
+ uint32_t wr_buf_size = 42;
+ int max_param_num = 1;
+ long param[1] = {0};
+ uint8_t param_nums = 0;
+
+ if (!aconnector || !aconnector->dc_link)
+ return -EINVAL;
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!wr_buf) {
+ DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
+ return -ENOSPC;
+ }
+
+ if (parse_write_buffer_into_params(wr_buf, size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ if (param_nums <= 0) {
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ if (param[0] == 1) {
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) &&
+ new_connection_type != dc_connection_none)
+ goto unlock;
+
+ if (!dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD))
+ goto unlock;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_hotplug_event(dev);
+ } else if (param[0] == 0) {
+ if (!aconnector->dc_link)
+ goto unlock;
+
+ link = aconnector->dc_link;
+
+ if (link->local_sink) {
+ dc_sink_release(link->local_sink);
+ link->local_sink = NULL;
+ }
+
+ link->dpcd_sink_count = 0;
+ link->type = dc_connection_none;
+ link->dongle_max_pix_clk = 0;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_hotplug_event(dev);
+ }
+
+unlock:
+ mutex_unlock(&aconnector->hpd_lock);
+
+ kfree(wr_buf);
+ return size;
+}
+
+/* function: read DSC status on the connector
+ *
+ * The read function: dp_dsc_clock_en_read
+ * returns current status of DSC clock on the connector.
+ * The return is a boolean flag: 1 or 0.
+ *
+ * Access it with the following command (you need to specify
+ * connector like DP-1):
+ *
+ * cat /sys/kernel/debug/dri/0/DP-X/dsc_clock_en
+ *
+ * Expected output:
+ * 1 - means that DSC is currently enabled
+ * 0 - means that DSC is disabled
+ */
static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -1037,6 +1224,105 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
return result;
}
+/* function: write force DSC on the connector
+ *
+ * The write function: dp_dsc_clock_en_write
+ * enables to force DSC on the connector.
+ * User can write to either force enable or force disable DSC
+ * on the next modeset or set it to driver default
+ *
+ * Accepted inputs:
+ * 0 - default DSC enablement policy
+ * 1 - force enable DSC on the connector
+ * 2 - force disable DSC on the connector (might cause fail in atomic_check)
+ *
+ * Writing DSC settings is done with the following command:
+ * - To force enable DSC (you need to specify
+ * connector like DP-1):
+ *
+ * echo 0x1 > /sys/kernel/debug/dri/0/DP-X/dsc_clock_en
+ *
+ * - To return to default state set the flag to zero and
+ * let driver deal with DSC automatically
+ * (you need to specify connector like DP-1):
+ *
+ * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_clock_en
+ *
+ */
+static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct pipe_ctx *pipe_ctx;
+ int i;
+ char *wr_buf = NULL;
+ uint32_t wr_buf_size = 42;
+ int max_param_num = 1;
+ long param[1] = {0};
+ uint8_t param_nums = 0;
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!wr_buf) {
+ DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
+ return -ENOSPC;
+ }
+
+ if (parse_write_buffer_into_params(wr_buf, size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ if (param_nums <= 0) {
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx || !pipe_ctx->stream)
+ goto done;
+
+ if (param[0] == 1)
+ aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_ENABLE;
+ else if (param[0] == 2)
+ aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DISABLE;
+ else
+ aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DEFAULT;
+
+done:
+ kfree(wr_buf);
+ return size;
+}
+
+/* function: read DSC slice width parameter on the connector
+ *
+ * The read function: dp_dsc_slice_width_read
+ * returns dsc slice width used in the current configuration
+ * The return is an integer: 0 or other positive number
+ *
+ * Access the status with the following command:
+ *
+ * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_width
+ *
+ * 0 - means that DSC is disabled
+ *
+ * Any other number more than zero represents the
+ * slice width currently used by DSC in pixels
+ *
+ */
static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -1094,6 +1380,103 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
return result;
}
+/* function: write DSC slice width parameter
+ *
+ * The write function: dp_dsc_slice_width_write
+ * overwrites automatically generated DSC configuration
+ * of slice width.
+ *
+ * The user has to write the slice width divisible by the
+ * picture width.
+ *
+ * Also the user has to write width in hexidecimal
+ * rather than in decimal.
+ *
+ * Writing DSC settings is done with the following command:
+ * - To force overwrite slice width: (example sets to 1920 pixels)
+ *
+ * echo 0x780 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_width
+ *
+ * - To stop overwriting and let driver find the optimal size,
+ * set the width to zero:
+ *
+ * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_width
+ *
+ */
+static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct pipe_ctx *pipe_ctx;
+ int i;
+ char *wr_buf = NULL;
+ uint32_t wr_buf_size = 42;
+ int max_param_num = 1;
+ long param[1] = {0};
+ uint8_t param_nums = 0;
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!wr_buf) {
+ DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
+ return -ENOSPC;
+ }
+
+ if (parse_write_buffer_into_params(wr_buf, size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ if (param_nums <= 0) {
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx || !pipe_ctx->stream)
+ goto done;
+
+ if (param[0] > 0)
+ aconnector->dsc_settings.dsc_num_slices_h = DIV_ROUND_UP(
+ pipe_ctx->stream->timing.h_addressable,
+ param[0]);
+ else
+ aconnector->dsc_settings.dsc_num_slices_h = 0;
+
+done:
+ kfree(wr_buf);
+ return size;
+}
+
+/* function: read DSC slice height parameter on the connector
+ *
+ * The read function: dp_dsc_slice_height_read
+ * returns dsc slice height used in the current configuration
+ * The return is an integer: 0 or other positive number
+ *
+ * Access the status with the following command:
+ *
+ * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_height
+ *
+ * 0 - means that DSC is disabled
+ *
+ * Any other number more than zero represents the
+ * slice height currently used by DSC in pixels
+ *
+ */
static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -1151,6 +1534,99 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
return result;
}
+/* function: write DSC slice height parameter
+ *
+ * The write function: dp_dsc_slice_height_write
+ * overwrites automatically generated DSC configuration
+ * of slice height.
+ *
+ * The user has to write the slice height divisible by the
+ * picture height.
+ *
+ * Also the user has to write height in hexidecimal
+ * rather than in decimal.
+ *
+ * Writing DSC settings is done with the following command:
+ * - To force overwrite slice height (example sets to 128 pixels):
+ *
+ * echo 0x80 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_height
+ *
+ * - To stop overwriting and let driver find the optimal size,
+ * set the height to zero:
+ *
+ * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_height
+ *
+ */
+static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct pipe_ctx *pipe_ctx;
+ int i;
+ char *wr_buf = NULL;
+ uint32_t wr_buf_size = 42;
+ int max_param_num = 1;
+ uint8_t param_nums = 0;
+ long param[1] = {0};
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!wr_buf) {
+ DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
+ return -ENOSPC;
+ }
+
+ if (parse_write_buffer_into_params(wr_buf, size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ if (param_nums <= 0) {
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx || !pipe_ctx->stream)
+ goto done;
+
+ if (param[0] > 0)
+ aconnector->dsc_settings.dsc_num_slices_v = DIV_ROUND_UP(
+ pipe_ctx->stream->timing.v_addressable,
+ param[0]);
+ else
+ aconnector->dsc_settings.dsc_num_slices_v = 0;
+
+done:
+ kfree(wr_buf);
+ return size;
+}
+
+/* function: read DSC target rate on the connector in bits per pixel
+ *
+ * The read function: dp_dsc_bits_per_pixel_read
+ * returns target rate of compression in bits per pixel
+ * The return is an integer: 0 or other positive integer
+ *
+ * Access it with the following command:
+ *
+ * cat /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel
+ *
+ * 0 - means that DSC is disabled
+ */
static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -1208,6 +1684,94 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
return result;
}
+/* function: write DSC target rate in bits per pixel
+ *
+ * The write function: dp_dsc_bits_per_pixel_write
+ * overwrites automatically generated DSC configuration
+ * of DSC target bit rate.
+ *
+ * Also the user has to write bpp in hexidecimal
+ * rather than in decimal.
+ *
+ * Writing DSC settings is done with the following command:
+ * - To force overwrite rate (example sets to 256 bpp x 1/16):
+ *
+ * echo 0x100 > /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel
+ *
+ * - To stop overwriting and let driver find the optimal rate,
+ * set the rate to zero:
+ *
+ * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel
+ *
+ */
+static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct pipe_ctx *pipe_ctx;
+ int i;
+ char *wr_buf = NULL;
+ uint32_t wr_buf_size = 42;
+ int max_param_num = 1;
+ uint8_t param_nums = 0;
+ long param[1] = {0};
+
+ if (size == 0)
+ return -EINVAL;
+
+ wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!wr_buf) {
+ DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
+ return -ENOSPC;
+ }
+
+ if (parse_write_buffer_into_params(wr_buf, size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ if (param_nums <= 0) {
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ kfree(wr_buf);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx || !pipe_ctx->stream)
+ goto done;
+
+ aconnector->dsc_settings.dsc_bits_per_pixel = param[0];
+
+done:
+ kfree(wr_buf);
+ return size;
+}
+
+/* function: read DSC picture width parameter on the connector
+ *
+ * The read function: dp_dsc_pic_width_read
+ * returns dsc picture width used in the current configuration
+ * It is the same as h_addressable of the current
+ * display's timing
+ * The return is an integer: 0 or other positive integer
+ * If 0 then DSC is disabled.
+ *
+ * Access it with the following command:
+ *
+ * cat /sys/kernel/debug/dri/0/DP-X/dsc_pic_width
+ *
+ * 0 - means that DSC is disabled
+ */
static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -1322,6 +1886,21 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
return result;
}
+/* function: read DSC chunk size parameter on the connector
+ *
+ * The read function: dp_dsc_chunk_size_read
+ * returns dsc chunk size set in the current configuration
+ * The value is calculated automatically by DSC code
+ * and depends on slice parameters and bpp target rate
+ * The return is an integer: 0 or other positive integer
+ * If 0 then DSC is disabled.
+ *
+ * Access it with the following command:
+ *
+ * cat /sys/kernel/debug/dri/0/DP-X/dsc_chunk_size
+ *
+ * 0 - means that DSC is disabled
+ */
static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -1379,6 +1958,21 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
return result;
}
+/* function: read DSC slice bpg offset on the connector
+ *
+ * The read function: dp_dsc_slice_bpg_offset_read
+ * returns dsc bpg slice offset set in the current configuration
+ * The value is calculated automatically by DSC code
+ * and depends on slice parameters and bpp target rate
+ * The return is an integer: 0 or other positive integer
+ * If 0 then DSC is disabled.
+ *
+ * Access it with the following command:
+ *
+ * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_bpg_offset
+ *
+ * 0 - means that DSC is disabled
+ */
static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -1436,6 +2030,7 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
return result;
}
+DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
@@ -1446,24 +2041,28 @@ DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_dsc_clock_en_read,
+ .write = dp_dsc_clock_en_write,
.llseek = default_llseek
};
static const struct file_operations dp_dsc_slice_width_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_dsc_slice_width_read,
+ .write = dp_dsc_slice_width_write,
.llseek = default_llseek
};
static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_dsc_slice_height_read,
+ .write = dp_dsc_slice_height_write,
.llseek = default_llseek
};
static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_dsc_bits_per_pixel_read,
+ .write = dp_dsc_bits_per_pixel_write,
.llseek = default_llseek
};
@@ -1491,6 +2090,12 @@ static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = {
.llseek = default_llseek
};
+static const struct file_operations dp_trigger_hotplug_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_trigger_hotplug,
+ .llseek = default_llseek
+};
+
static const struct file_operations dp_link_settings_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_link_settings_read,
@@ -1541,6 +2146,7 @@ static const struct {
const struct file_operations *fops;
} dp_debugfs_entries[] = {
{"link_settings", &dp_link_settings_debugfs_fops},
+ {"trigger_hotplug", &dp_trigger_hotplug_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
#ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -1557,7 +2163,8 @@ static const struct {
{"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
{"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
{"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
- {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops}
+ {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops},
+ {"dp_dsc_fec_support", &dp_dsc_fec_support_fops}
};
#ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -1721,7 +2328,7 @@ static int current_backlight_read(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_display_manager *dm = &adev->dm;
unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link);
@@ -1739,7 +2346,7 @@ static int target_backlight_read(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_display_manager *dm = &adev->dm;
unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link);
@@ -1778,6 +2385,38 @@ static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
};
/*
+ * Sets the force_timing_sync debug optino from the given string.
+ * All connected displays will be force synchronized immediately.
+ * Usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync
+ */
+static int force_timing_sync_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+
+ adev->dm.force_timing_sync = (bool)val;
+
+ amdgpu_dm_trigger_timing_sync(adev_to_drm(adev));
+
+ return 0;
+}
+
+/*
+ * Gets the force_timing_sync debug option value into the given buffer.
+ * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync
+ */
+static int force_timing_sync_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = data;
+
+ *val = adev->dm.force_timing_sync;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get,
+ force_timing_sync_set, "%llu\n");
+
+/*
* Sets the DC visual confirm debug option from the given string.
* Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
*/
@@ -1815,7 +2454,7 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
.llseek = default_llseek
};
- struct drm_minor *minor = adev->ddev->primary;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
int ret;
@@ -1836,5 +2475,8 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root,
adev, &dmub_fw_state_fops);
+ debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root,
+ adev, &force_timing_sync_ops);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index cbcf504f73a5..357778556b06 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -719,7 +719,7 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
*/
void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
@@ -755,7 +755,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
*/
void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
new file mode 100644
index 000000000000..45825a34f8eb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_IRQ_PARAMS_H__
+#define __AMDGPU_DM_IRQ_PARAMS_H__
+
+struct dm_irq_params {
+ u32 last_flip_vblank;
+ struct mod_vrr_params vrr_params;
+ struct dc_stream_state *stream;
+ int active_planes;
+ struct mod_freesync_config freesync_config;
+};
+
+#endif /* __AMDGPU_DM_IRQ_PARAMS_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 336aaa09be46..eee19edeeee5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -26,6 +26,7 @@
#include <linux/version.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_dp_helper.h>
#include "dm_services.h"
#include "amdgpu.h"
#include "amdgpu_dm.h"
@@ -158,7 +159,20 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
u8 dsc_caps[16] = { 0 };
aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
+#if defined(CONFIG_HP_HOOK_WORKAROUND)
+ /*
+ * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
+ * because it only check the dsc/fec caps of the "port variable" and not the dock
+ *
+ * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display
+ *
+ * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
+ *
+ */
+ if (!aconnector->dsc_aux && !port->parent->port_parent)
+ aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
+#endif
if (!aconnector->dsc_aux)
return false;
@@ -241,7 +255,7 @@ dm_mst_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *connector_state)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
return &adev->dm.mst_encoders[acrtc->crtc_id].base;
@@ -310,7 +324,7 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
int i;
for (i = 0; i < adev->dm.display_indexes_num; i++) {
@@ -337,7 +351,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
{
struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
struct drm_device *dev = master->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
int i;
@@ -426,11 +440,13 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
aconnector->mst_mgr.cbs = &dm_mst_cbs;
drm_dp_mst_topology_mgr_init(
&aconnector->mst_mgr,
- dm->adev->ddev,
+ adev_to_drm(dm->adev),
&aconnector->dm_dp_aux.aux,
16,
4,
aconnector->connector_id);
+
+ drm_connector_attach_dp_subconnector_property(&aconnector->base);
}
int dm_mst_get_pbn_divider(struct dc_link *link)
@@ -450,6 +466,10 @@ struct dsc_mst_fairness_params {
struct dc_dsc_bw_range bw_range;
bool compression_possible;
struct drm_dp_mst_port *port;
+ enum dsc_clock_force_state clock_force_enable;
+ uint32_t num_slices_h;
+ uint32_t num_slices_v;
+ uint32_t bpp_overwrite;
};
struct dsc_mst_fairness_vars {
@@ -483,7 +503,17 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
params[i].timing,
&params[i].timing->dsc_cfg)) {
params[i].timing->flags.DSC = 1;
- params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
+
+ if (params[i].bpp_overwrite)
+ params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite;
+ else
+ params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
+
+ if (params[i].num_slices_h)
+ params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
+
+ if (params[i].num_slices_v)
+ params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v;
} else {
params[i].timing->flags.DSC = 0;
}
@@ -615,7 +645,9 @@ static void try_disable_dsc(struct drm_atomic_state *state,
int remaining_to_try = 0;
for (i = 0; i < count; i++) {
- if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
+ if (vars[i].dsc_enabled
+ && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16
+ && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
tried[i] = false;
remaining_to_try += 1;
@@ -676,6 +708,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dsc_mst_fairness_vars vars[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
int count = 0;
+ bool debugfs_overwrite = false;
memset(params, 0, sizeof(params));
@@ -694,6 +727,12 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].sink = stream->sink;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
params[count].port = aconnector->port;
+ params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
+ if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
+ debugfs_overwrite = true;
+ params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
+ params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
+ params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
if (!dc_dsc_compute_bandwidth_range(
@@ -719,14 +758,14 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
dm_mst_get_pbn_divider(dc_link)) < 0)
return false;
}
- if (!drm_dp_mst_atomic_check(state)) {
+ if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
set_dsc_configs_from_fairness_vars(params, vars, count);
return true;
}
/* Try max compression */
for (i = 0; i < count; i++) {
- if (params[i].compression_possible) {
+ if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i].dsc_enabled = true;
vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index c5f2216e59c4..6e575ffe34d0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -592,9 +592,6 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
&wm_with_clock_ranges);
- else if (adev->smu.ppt_funcs)
- smu_set_watermarks_for_clock_ranges(&adev->smu,
- &wm_with_clock_ranges);
}
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
@@ -667,49 +664,8 @@ static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
- struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
- wm_with_clock_ranges.wm_dmif_clocks_ranges;
- struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
- wm_with_clock_ranges.wm_mcif_clocks_ranges;
- int32_t i;
- wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
- wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
-
- for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
- if (ranges->reader_wm_sets[i].wm_inst > 3)
- wm_dce_clocks[i].wm_set_id = WM_SET_A;
- else
- wm_dce_clocks[i].wm_set_id =
- ranges->reader_wm_sets[i].wm_inst;
- wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
- ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
- wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
- ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
- wm_dce_clocks[i].wm_max_mem_clk_in_khz =
- ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
- wm_dce_clocks[i].wm_min_mem_clk_in_khz =
- ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
- }
-
- for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
- if (ranges->writer_wm_sets[i].wm_inst > 3)
- wm_soc_clocks[i].wm_set_id = WM_SET_A;
- else
- wm_soc_clocks[i].wm_set_id =
- ranges->writer_wm_sets[i].wm_inst;
- wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
- ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
- wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
- ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
- wm_soc_clocks[i].wm_max_mem_clk_in_khz =
- ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
- wm_soc_clocks[i].wm_min_mem_clk_in_khz =
- ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
- }
-
- smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
+ smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
return PP_SMU_RESULT_OK;
}
@@ -810,7 +766,7 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
}
static enum pp_smu_status pp_nv_set_pstate_handshake_support(
- struct pp_smu *pp, BOOLEAN pstate_handshake_supported)
+ struct pp_smu *pp, bool pstate_handshake_supported)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
@@ -920,60 +876,8 @@ static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
- struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
- struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
- wm_with_clock_ranges.wm_dmif_clocks_ranges;
- struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
- wm_with_clock_ranges.wm_mcif_clocks_ranges;
- int32_t i;
-
- if (!smu->ppt_funcs)
- return PP_SMU_RESULT_UNSUPPORTED;
-
- wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
- wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
-
- for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
- if (ranges->reader_wm_sets[i].wm_inst > 3)
- wm_dce_clocks[i].wm_set_id = WM_SET_A;
- else
- wm_dce_clocks[i].wm_set_id =
- ranges->reader_wm_sets[i].wm_inst;
-
- wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
- ranges->reader_wm_sets[i].min_drain_clk_mhz;
-
- wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
- ranges->reader_wm_sets[i].max_drain_clk_mhz;
-
- wm_dce_clocks[i].wm_min_mem_clk_in_khz =
- ranges->reader_wm_sets[i].min_fill_clk_mhz;
-
- wm_dce_clocks[i].wm_max_mem_clk_in_khz =
- ranges->reader_wm_sets[i].max_fill_clk_mhz;
- }
-
- for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
- if (ranges->writer_wm_sets[i].wm_inst > 3)
- wm_soc_clocks[i].wm_set_id = WM_SET_A;
- else
- wm_soc_clocks[i].wm_set_id =
- ranges->writer_wm_sets[i].wm_inst;
- wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
- ranges->writer_wm_sets[i].min_fill_clk_mhz;
-
- wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
- ranges->writer_wm_sets[i].max_fill_clk_mhz;
-
- wm_soc_clocks[i].wm_min_mem_clk_in_khz =
- ranges->writer_wm_sets[i].min_drain_clk_mhz;
-
- wm_soc_clocks[i].wm_max_mem_clk_in_khz =
- ranges->writer_wm_sets[i].max_drain_clk_mhz;
- }
- smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
+ smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
return PP_SMU_RESULT_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index e0f4f1be1618..047b1e2dd8f1 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -43,6 +43,10 @@ DC_LIBS += dce110
DC_LIBS += dce100
DC_LIBS += dce80
+ifdef CONFIG_DRM_AMD_DC_SI
+DC_LIBS += dce60
+endif
+
ifdef CONFIG_DRM_AMD_DC_HDCP
DC_LIBS += hdcp
endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/Makefile b/drivers/gpu/drm/amd/display/dc/bios/Makefile
index 239e86bbec5a..ed6b5e9763f6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/bios/Makefile
@@ -32,6 +32,15 @@ AMD_DAL_BIOS = $(addprefix $(AMDDALPATH)/dc/bios/,$(BIOS))
AMD_DISPLAY_FILES += $(AMD_DAL_BIOS)
###############################################################################
+# DCE 6x
+###############################################################################
+# All DCE6.x are derived from DCE6.0, so 6.0 MUST be defined if ANY of
+# DCE6.x is compiled.
+ifdef CONFIG_DRM_AMD_DC_SI
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce60/command_table_helper_dce60.o
+endif
+
+###############################################################################
# DCE 8x
###############################################################################
# All DCE8.x are derived from DCE8.0, so 8.0 MUST be defined if ANY of
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 2d5c7daaee23..29d64e7e304f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -847,6 +847,73 @@ static enum bp_result bios_parser_get_spread_spectrum_info(
return result;
}
+static enum bp_result get_soc_bb_info_v4_4(
+ struct bios_parser *bp,
+ struct bp_soc_bb_info *soc_bb_info)
+{
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_display_controller_info_v4_4 *disp_cntl_tbl = NULL;
+
+ if (!soc_bb_info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (!DATA_TABLES(smu_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_4,
+ DATA_TABLES(dce_info));
+ if (!disp_cntl_tbl)
+ return BP_RESULT_BADBIOSTABLE;
+
+ soc_bb_info->dram_clock_change_latency_100ns = disp_cntl_tbl->max_mclk_chg_lat;
+ soc_bb_info->dram_sr_enter_exit_latency_100ns = disp_cntl_tbl->max_sr_enter_exit_lat;
+ soc_bb_info->dram_sr_exit_latency_100ns = disp_cntl_tbl->max_sr_exit_lat;
+
+ return result;
+}
+
+static enum bp_result bios_parser_get_soc_bb_info(
+ struct dc_bios *dcb,
+ struct bp_soc_bb_info *soc_bb_info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ struct atom_common_table_header *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!soc_bb_info) /* check for bad input */
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_UNSUPPORTED;
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(dce_info));
+ get_atom_data_table_revision(header, &tbl_revision);
+
+ switch (tbl_revision.major) {
+ case 4:
+ switch (tbl_revision.minor) {
+ case 1:
+ case 2:
+ case 3:
+ break;
+ case 4:
+ result = get_soc_bb_info_v4_4(bp, soc_bb_info);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
static enum bp_result get_embedded_panel_info_v2_1(
struct bios_parser *bp,
struct embedded_panel_info *info)
@@ -2222,7 +2289,9 @@ static const struct dc_vbios_funcs vbios_funcs = {
.get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
- .enable_lvtma_control = bios_parser_enable_lvtma_control
+ .enable_lvtma_control = bios_parser_enable_lvtma_control,
+
+ .get_soc_bb_info = bios_parser_get_soc_bb_info,
};
static bool bios_parser2_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 5815983caaf8..070459e3e407 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -1877,9 +1877,7 @@ static enum bp_result set_crtc_using_dtd_timing_v3(
* but it is 4 either from Edid data (spec CEA 861)
* or CEA timing table.
*/
- params.usV_SyncOffset =
- cpu_to_le16(le16_to_cpu(params.usV_SyncOffset) + 1);
-
+ le16_add_cpu(&params.usV_SyncOffset, 1);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index eb3ae5c3677c..25bdf1c38e0a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -569,10 +569,7 @@ static enum bp_result set_crtc_using_dtd_timing_v3(
* but it is 4 either from Edid data (spec CEA 861)
* or CEA timing table.
*/
- params.v_syncoffset =
- cpu_to_le16(le16_to_cpu(params.v_syncoffset) +
- 1);
-
+ le16_add_cpu(&params.v_syncoffset, 1);
}
}
@@ -923,11 +920,39 @@ static void init_enable_lvtma_control(struct bios_parser *bp)
}
+static void enable_lvtma_control_dmcub(
+ struct dc_dmub_srv *dmcub,
+ uint8_t uc_pwr_on)
+{
+
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__VBIOS;
+ cmd.cmd_common.header.sub_type =
+ DMUB_CMD__VBIOS_LVTMA_CONTROL;
+ cmd.cmd_common.cmd_buffer[0] =
+ uc_pwr_on;
+
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
+ dc_dmub_srv_cmd_execute(dmcub);
+ dc_dmub_srv_wait_idle(dmcub);
+
+}
+
static enum bp_result enable_lvtma_control(
struct bios_parser *bp,
uint8_t uc_pwr_on)
{
enum bp_result result = BP_RESULT_FAILURE;
+
+ if (bp->base.ctx->dc->ctx->dmub_srv &&
+ bp->base.ctx->dc->debug.dmub_command_table) {
+ enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv,
+ uc_pwr_on);
+ return BP_RESULT_OK;
+ }
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index 253bbb1eea60..48b4ef03fc8f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -36,6 +36,14 @@ bool dal_bios_parser_init_cmd_tbl_helper(
enum dce_version dce)
{
switch (dce) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case DCE_VERSION_6_0:
+ case DCE_VERSION_6_1:
+ case DCE_VERSION_6_4:
+ *h = dal_cmd_tbl_helper_dce60_get_table();
+ return true;
+#endif
+
case DCE_VERSION_8_0:
case DCE_VERSION_8_1:
case DCE_VERSION_8_3:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
index 4c3789df253d..dfd30aaf4032 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
@@ -26,6 +26,9 @@
#ifndef __DAL_COMMAND_TABLE_HELPER_H__
#define __DAL_COMMAND_TABLE_HELPER_H__
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#include "dce60/command_table_helper_dce60.h"
+#endif
#include "dce80/command_table_helper_dce80.h"
#include "dce110/command_table_helper_dce110.h"
#include "dce112/command_table_helper_dce112.h"
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 21ff6b686f5f..74c498b6774d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -37,6 +37,14 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
enum dce_version dce)
{
switch (dce) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case DCE_VERSION_6_0:
+ case DCE_VERSION_6_1:
+ case DCE_VERSION_6_4:
+ *h = dal_cmd_tbl_helper_dce60_get_table();
+ return true;
+#endif
+
case DCE_VERSION_8_0:
case DCE_VERSION_8_1:
case DCE_VERSION_8_3:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
index 785fcb20a1b9..66e0a3e73768 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
@@ -26,6 +26,9 @@
#ifndef __DAL_COMMAND_TABLE_HELPER2_H__
#define __DAL_COMMAND_TABLE_HELPER2_H__
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#include "dce60/command_table_helper_dce60.h"
+#endif
#include "dce80/command_table_helper_dce80.h"
#include "dce110/command_table_helper_dce110.h"
#include "dce112/command_table_helper2_dce112.h"
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
new file mode 100644
index 000000000000..710221b4f5c5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/grph_object_id.h"
+#include "include/grph_object_defs.h"
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ *atom_pll_id = ATOM_PPLL1;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ *atom_pll_id = ATOM_PPLL2;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_EXT_PLL1;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ /* for VCE encoding,
+ * we need to pass in ATOM_PPLL_INVALID
+ */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ /* When programming DP DTO PLL ID should be invalid */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ BREAK_TO_DEBUGGER(); /* check when this will happen! */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel >> 4;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ }
+
+ return atom_dig_encoder_sel;
+}
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .assign_control_parameter =
+ dal_cmd_table_helper_assign_control_parameter,
+ .clock_source_id_to_ref_clk_src =
+ dal_cmd_table_helper_clock_source_id_to_ref_clk_src,
+ .transmitter_bp_to_atom = dal_cmd_table_helper_transmitter_bp_to_atom,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+ .encoder_mode_bp_to_atom =
+ dal_cmd_table_helper_encoder_mode_bp_to_atom,
+};
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce60_get_table(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.h b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.h
new file mode 100644
index 000000000000..f733be553d5a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE60_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE60_H__
+
+struct command_table_helper;
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce60_get_table(void);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 4674aca8f206..64f515d74410 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -33,6 +33,10 @@ ifdef CONFIG_PPC64
calcs_ccflags := -mhard-float -maltivec
endif
+ifdef CONFIG_ARM64
+calcs_rcflags := -mgeneral-regs-only
+endif
+
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
@@ -53,6 +57,9 @@ endif
CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags)
CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags)
CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_rcflags)
BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 6874276bb2a1..1a495759a034 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -30,6 +30,17 @@ AMD_DAL_CLK_MGR = $(addprefix $(AMDDALPATH)/dc/clk_mgr/,$(CLK_MGR))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR)
+ifdef CONFIG_DRM_AMD_DC_SI
+###############################################################################
+# DCE 60
+###############################################################################
+CLK_MGR_DCE60 = dce60_clk_mgr.o
+
+AMD_DAL_CLK_MGR_DCE60 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dce60/,$(CLK_MGR_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCE60)
+endif
+
###############################################################################
# DCE 100 and DCE8x
###############################################################################
@@ -93,6 +104,13 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
+# prevent build errors:
+# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
+# this file is unused on arm64, just like on ppc64
+ifdef CONFIG_ARM64
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := -mgeneral-regs-only
+endif
+
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 6a345d43028c..857f156e4985 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -34,6 +34,7 @@
#include "dce110/dce110_clk_mgr.h"
#include "dce112/dce112_clk_mgr.h"
#include "dce120/dce120_clk_mgr.h"
+#include "dce60/dce60_clk_mgr.h"
#include "dcn10/rv1_clk_mgr.h"
#include "dcn10/rv2_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
@@ -123,6 +124,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
}
switch (asic_id.chip_family) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case FAMILY_SI:
+ dce60_clk_mgr_construct(ctx, clk_mgr);
+ break;
+#endif
case FAMILY_CI:
case FAMILY_KV:
dce_clk_mgr_construct(ctx, clk_mgr);
@@ -160,6 +166,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
}
+
+ if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev)) {
+ rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ break;
+ }
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index d031bd3d3072..807dca8f7d7a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -79,8 +79,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
memset(&dce_clk_params, 0, sizeof(dce_clk_params));
/* Make sure requested clock isn't lower than minimum threshold*/
- if (requested_clk_khz > 0)
- requested_clk_khz = max(requested_clk_khz,
+ requested_clk_khz = max(requested_clk_khz,
clk_mgr_dce->base.dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
new file mode 100644
index 000000000000..0267644717b2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dccg.h"
+#include "clk_mgr_internal.h"
+#include "dce100/dce_clk_mgr.h"
+#include "dce110/dce110_clk_mgr.h"
+#include "dce60_clk_mgr.h"
+#include "reg_helper.h"
+#include "dmcu.h"
+#include "core_types.h"
+#include "dal_asic_id.h"
+
+/*
+ * Currently the register shifts and masks in this file are used for dce60
+ * which has no DPREFCLK_CNTL register
+ * TODO: remove this when DENTIST_DISPCLK_CNTL
+ * is moved to dccg, where it belongs
+ */
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
+#define REG(reg) \
+ (clk_mgr->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE60_BASE()
+};
+
+static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(__SHIFT)
+};
+
+static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(_MASK)
+};
+
+
+/* Max clock values for each state indexed by "enum clocks_state": */
+static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
+/* ClocksStateInvalid - should not be used */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateUltraLow - not expected to be used for DCE 6.0 */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateLow */
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+/* ClocksStateNominal */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+/* ClocksStatePerformance */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+
+static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ int dprefclk_wdivider;
+ int dp_ref_clk_khz;
+ int target_div;
+
+ /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */
+
+ /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+ * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+
+ /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+
+ /* Calculate the current DFS clock, in kHz.*/
+ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+
+ return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
+}
+
+static void dce60_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce60_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dm_pp_power_level_change_request level_change_req;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ patched_disp_clk = patched_disp_clk * 115 / 100;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(clk_mgr_base->ctx, &level_change_req))
+ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr_base->clks.dispclk_khz)) {
+ patched_disp_clk = dce_set_clock(clk_mgr_base, patched_disp_clk);
+ clk_mgr_base->clks.dispclk_khz = patched_disp_clk;
+ }
+ dce60_pplib_apply_display_requirements(clk_mgr_base->ctx->dc, context);
+}
+
+
+
+
+
+
+
+
+static struct clk_mgr_funcs dce60_funcs = {
+ .get_dp_ref_clk_frequency = dce60_get_dp_ref_freq_khz,
+ .update_clocks = dce60_update_clocks
+};
+
+void dce60_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr)
+{
+ dce_clk_mgr_construct(ctx, clk_mgr);
+
+ memcpy(clk_mgr->max_clks_by_state,
+ dce60_max_clks_by_state,
+ sizeof(dce60_max_clks_by_state));
+
+ clk_mgr->regs = &disp_clk_regs;
+ clk_mgr->clk_mgr_shift = &disp_clk_shift;
+ clk_mgr->clk_mgr_mask = &disp_clk_mask;
+ clk_mgr->base.funcs = &dce60_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.h
new file mode 100644
index 000000000000..eca3e5168089
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef DAL_DC_DCE_DCE60_CLK_MGR_H_
+#define DAL_DC_DCE_DCE60_CLK_MGR_H_
+
+#include "dc.h"
+
+void dce60_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr_dce);
+
+#endif /* DAL_DC_DCE_DCE60_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 21a3073c8929..2f8fee05547a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -761,6 +761,7 @@ void rn_clk_mgr_construct(
{
struct dc_debug_options *debug = &ctx->dc->debug;
struct dpm_clocks clock_table = { 0 };
+ enum pp_smu_status status = 0;
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn21_funcs;
@@ -817,8 +818,10 @@ void rn_clk_mgr_construct(
clk_mgr->base.bw_params = &rn_bw_params;
if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
- pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
- if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+ status = pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
+
+ if (status == PP_SMU_RESULT_OK &&
+ ctx->dc_bios && ctx->dc_bios->integrated_info) {
rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 9133646f6d5f..b0e9b0509568 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -554,8 +554,7 @@ void dcn3_clk_mgr_construct(
void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
{
- if (clk_mgr->base.bw_params)
- kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr->base.bw_params);
if (clk_mgr->wm_range_table)
dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 92eb1ca1634f..45ad05f6e03b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -735,6 +735,8 @@ static bool dc_construct(struct dc *dc,
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
#endif
+ dc->debug.force_ignore_link_settings = init_params->force_ignore_link_settings;
+
if (dc->res_pool->funcs->update_bw_bounding_box)
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
@@ -842,6 +844,60 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc_release_state(current_ctx);
}
+static void disable_vbios_mode_if_required(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ unsigned int i, j;
+
+ /* check if timing_changed, disable stream*/
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = NULL;
+ struct dc_link *link = NULL;
+ struct pipe_ctx *pipe = NULL;
+
+ pipe = &context->res_ctx.pipe_ctx[i];
+ stream = pipe->stream;
+ if (stream == NULL)
+ continue;
+
+ if (stream->link->local_sink &&
+ stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ link = stream->link;
+ }
+
+ if (link != NULL) {
+ unsigned int enc_inst, tg_inst = 0;
+ unsigned int pix_clk_100hz;
+
+ enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+ if (enc_inst != ENGINE_ID_UNKNOWN) {
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (dc->res_pool->stream_enc[j]->id == enc_inst) {
+ tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+
+ dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
+ dc->res_pool->dp_clock_source,
+ tg_inst, &pix_clk_100hz);
+
+ if (link->link_status.link_active) {
+ uint32_t requested_pix_clk_100hz =
+ pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
+
+ if (pix_clk_100hz != requested_pix_clk_100hz) {
+ core_link_disable_stream(pipe);
+ pipe->stream->dpms_off = false;
+ }
+ }
+ }
+ }
+ }
+}
+
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
@@ -1238,6 +1294,27 @@ bool dc_enable_stereo(
return ret;
}
+void dc_trigger_sync(struct dc *dc, struct dc_state *context)
+{
+ if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
+ enable_timing_multisync(dc, context);
+ program_timing_sync(dc, context);
+ }
+}
+
+static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ unsigned int stream_mask = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ stream_mask |= 1 << i;
+ }
+
+ return stream_mask;
+}
+
/*
* Applies given context to HW and copy it into current context.
* It's up to the user to release the src context afterwards.
@@ -1257,15 +1334,17 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
- if (!dcb->funcs->is_accelerated_mode(dcb))
+ if (!dcb->funcs->is_accelerated_mode(dcb)) {
+ disable_vbios_mode_if_required(dc, context);
dc->hwss.enable_accelerated_mode(dc, context);
+ }
- for (i = 0; i < context->stream_count; i++) {
+ for (i = 0; i < context->stream_count; i++)
if (context->streams[i]->apply_seamless_boot_optimization)
dc->optimize_seamless_boot_streams++;
- }
- if (dc->optimize_seamless_boot_streams == 0)
+ if (context->stream_count > dc->optimize_seamless_boot_streams ||
+ context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
disable_dangling_plane(dc, context);
@@ -1297,10 +1376,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (result != DC_OK)
return result;
- if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
- enable_timing_multisync(dc, context);
- program_timing_sync(dc, context);
- }
+ dc_trigger_sync(dc, context);
/* Program all planes within new context*/
if (dc->hwss.program_front_end_for_ctx) {
@@ -1350,13 +1426,19 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (dc->optimize_seamless_boot_streams == 0) {
+ if (context->stream_count > dc->optimize_seamless_boot_streams ||
+ context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
}
+ context->stream_mask = get_stream_mask(dc, context);
+
+ if (context->stream_mask != dc->current_state->stream_mask)
+ dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
+
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
@@ -1476,13 +1558,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
return true;
}
-struct dc_state *dc_create_state(struct dc *dc)
+static void init_state(struct dc *dc, struct dc_state *context)
{
- struct dc_state *context = kvzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
-
- if (!context)
- return NULL;
/* Each context must have their own instance of VBA and in order to
* initialize and obtain IP and SOC the base DML instance from DC is
* initially copied into every context
@@ -1490,6 +1567,17 @@ struct dc_state *dc_create_state(struct dc *dc)
#ifdef CONFIG_DRM_AMD_DC_DCN
memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
#endif
+}
+
+struct dc_state *dc_create_state(struct dc *dc)
+{
+ struct dc_state *context = kvzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!context)
+ return NULL;
+
+ init_state(dc, context);
kref_init(&context->refcount);
@@ -2295,6 +2383,7 @@ static void commit_planes_for_stream(struct dc *dc,
enum surface_update_type update_type,
struct dc_state *context)
{
+ bool mpcc_disconnected = false;
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
@@ -2325,6 +2414,15 @@ static void commit_planes_for_stream(struct dc *dc,
context_clock_trace(dc, context);
}
+ if (update_type != UPDATE_TYPE_FAST && dc->hwss.interdependent_update_lock &&
+ dc->hwss.disconnect_pipes && dc->hwss.wait_for_pending_cleared){
+ dc->hwss.interdependent_update_lock(dc, context, true);
+ mpcc_disconnected = dc->hwss.disconnect_pipes(dc, context);
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ if (mpcc_disconnected)
+ dc->hwss.wait_for_pending_cleared(dc, context);
+ }
+
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -2400,8 +2498,7 @@ static void commit_planes_for_stream(struct dc *dc,
plane_state->triplebuffer_flips = false;
if (update_type == UPDATE_TYPE_FAST &&
dc->hwss.program_triplebuffer != NULL &&
- !plane_state->flip_immediate &&
- !dc->debug.disable_tri_buf) {
+ !plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
plane_state->triplebuffer_flips = true;
}
@@ -2428,8 +2525,7 @@ static void commit_planes_for_stream(struct dc *dc,
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
- if (dc->hwss.program_triplebuffer != NULL &&
- !dc->debug.disable_tri_buf) {
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
@@ -2494,8 +2590,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (pipe_ctx->plane_state != plane_state)
continue;
/*program triple buffer after lock based on flip type*/
- if (dc->hwss.program_triplebuffer != NULL &&
- !dc->debug.disable_tri_buf) {
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
/*only enable triplebuffer for fast_update*/
dc->hwss.program_triplebuffer(
dc, pipe_ctx, plane_state->triplebuffer_flips);
@@ -2621,7 +2716,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
- if (update_type > UPDATE_TYPE_FAST) {
+ if (update_type >= UPDATE_TYPE_FULL) {
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
DC_ERROR("Mode validation failed for stream update!\n");
dc_release_state(context);
@@ -2933,6 +3028,30 @@ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_
dc->hwss.get_clock(dc, clock_type, clock_cfg);
}
+/* enable/disable eDP PSR without specify stream for eDP */
+bool dc_set_psr_allow_active(struct dc *dc, bool enable)
+{
+ int i;
+
+ for (i = 0; i < dc->current_state->stream_count ; i++) {
+ struct dc_link *link;
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ link = stream->link;
+ if (!link)
+ continue;
+
+ if (link->psr_settings.psr_feature_enabled) {
+ if (enable && !link->psr_settings.psr_allow_active)
+ return dc_link_set_psr_allow_active(link, true, false);
+ else if (!enable && link->psr_settings.psr_allow_active)
+ return dc_link_set_psr_allow_active(link, false, true);
+ }
+ }
+
+ return true;
+}
+
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
void dc_allow_idle_optimizations(struct dc *dc, bool allow)
@@ -2979,4 +3098,10 @@ void dc_lock_memory_clock_frequency(struct dc *dc)
if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
}
+
+bool dc_is_plane_eligible_for_idle_optimizaitons(struct dc *dc,
+ struct dc_plane_state *plane)
+{
+ return false;
+}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index c026b393f3c5..2a9080400bdd 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -177,7 +177,7 @@ static bool is_ycbcr709_limited_type(
ret = true;
return ret;
}
-enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
+static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
{
enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 437d1a7a16fe..fec87a2e210c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2441,7 +2441,7 @@ enum dc_status dc_link_validate_mode_timing(
/* A hack to avoid failing any modes for EDID override feature on
* topology change such as lower quality cable for DP or different dongle
*/
- if (link->remote_sinks[0])
+ if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
return DC_OK;
/* Passive Dongle */
@@ -2566,7 +2566,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
link->psr_settings.psr_allow_active = allow_active;
if (psr != NULL && link->psr_settings.psr_feature_enabled)
- psr->funcs->psr_enable(psr, allow_active);
+ psr->funcs->psr_enable(psr, allow_active, wait);
else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
else
@@ -2946,7 +2946,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
pbn = get_pbn_from_timing(pipe_ctx);
avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
- stream_encoder->funcs->set_mst_bandwidth(
+ stream_encoder->funcs->set_throttled_vcp_size(
stream_encoder,
avg_time_slots_per_mtp);
@@ -2974,7 +2974,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
*/
/* slot X.Y */
- stream_encoder->funcs->set_mst_bandwidth(
+ stream_encoder->funcs->set_throttled_vcp_size(
stream_encoder,
avg_time_slots_per_mtp);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index b984eecca58b..dec12de37642 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -148,14 +148,6 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
return p->payloads.count;
}
-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
-{
- if (!p)
- return;
-
- dal_vector_destruct(&p->payloads);
-}
-
#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
void dal_ddc_i2c_payloads_add(
@@ -582,7 +574,7 @@ bool dal_ddc_service_query_ddc_data(
ddc->link,
&command);
- dal_ddc_i2c_payloads_destroy(&payloads);
+ dal_vector_destruct(&payloads.payloads);
}
return success;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index b2be6ad5101d..ff1e9963ec7a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -49,14 +49,31 @@ static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b);
-static uint32_t get_training_aux_rd_interval(
- struct dc_link *link,
- uint32_t default_wait_in_micro_secs)
+static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
+ const struct dc_link_settings *link_settings)
{
union training_aux_rd_interval training_rd_interval;
+ uint32_t wait_in_micro_secs = 100;
memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ return wait_in_micro_secs;
+}
+
+static uint32_t get_eq_training_aux_rd_interval(
+ struct dc_link *link,
+ const struct dc_link_settings *link_settings)
+{
+ union training_aux_rd_interval training_rd_interval;
+ uint32_t wait_in_micro_secs = 400;
+ memset(&training_rd_interval, 0, sizeof(training_rd_interval));
/* overwrite the delay if rev > 1.1*/
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
/* DP 1.2 or later - retrieve delay through
@@ -68,10 +85,10 @@ static uint32_t get_training_aux_rd_interval(
sizeof(training_rd_interval));
if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
- default_wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
}
- return default_wait_in_micro_secs;
+ return wait_in_micro_secs;
}
static void wait_for_training_aux_rd_interval(
@@ -101,7 +118,16 @@ static void dpcd_set_training_pattern(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
-static enum dc_dp_training_pattern get_supported_tp(struct dc_link *link)
+static enum dc_dp_training_pattern decide_cr_training_pattern(
+ const struct dc_link_settings *link_settings)
+{
+ enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
+
+ return pattern;
+}
+
+static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct dc_link_settings *link_settings)
{
enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2;
struct encoder_feature_support *features = &link->link_enc->features;
@@ -132,7 +158,6 @@ static void dpcd_set_link_settings(
union down_spread_ctrl downspread = { {0} };
union lane_count_set lane_count_set = { {0} };
- enum dc_dp_training_pattern dp_tr_pattern;
downspread.raw = (uint8_t)
(lt_settings->link_settings.link_spread);
@@ -143,9 +168,8 @@ static void dpcd_set_link_settings(
lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
- dp_tr_pattern = get_supported_tp(link);
- if (dp_tr_pattern != DP_TRAINING_PATTERN_SEQUENCE_4) {
+ if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
}
@@ -373,34 +397,30 @@ static void dpcd_set_lt_pattern_and_lane_settings(
static bool is_cr_done(enum dc_lane_count ln_count,
union lane_status *dpcd_lane_status)
{
- bool done = true;
uint32_t lane;
/*LANEx_CR_DONE bits All 1's?*/
for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
if (!dpcd_lane_status[lane].bits.CR_DONE_0)
- done = false;
+ return false;
}
- return done;
-
+ return true;
}
static bool is_ch_eq_done(enum dc_lane_count ln_count,
union lane_status *dpcd_lane_status,
union lane_align_status_updated *lane_status_updated)
{
- bool done = true;
uint32_t lane;
if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE)
- done = false;
+ return false;
else {
for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 ||
!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
- done = false;
+ return false;
}
}
- return done;
-
+ return true;
}
static void update_drive_settings(
@@ -979,7 +999,7 @@ static void start_clock_recovery_pattern_early(struct dc_link *link,
{
DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
__func__);
- dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset);
+ dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
dp_set_hw_lane_settings(link, lt_settings, offset);
udelay(400);
}
@@ -994,7 +1014,6 @@ static enum link_training_result perform_clock_recovery_sequence(
uint32_t wait_time_microsec;
struct link_training_settings req_settings;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
@@ -1002,7 +1021,7 @@ static enum link_training_result perform_clock_recovery_sequence(
retry_count = 0;
if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
- dp_set_hw_training_pattern(link, tr_pattern, offset);
+ dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset);
/* najeeb - The synaptics MST hub can put the LT in
* infinite loop by switching the VS
@@ -1029,7 +1048,7 @@ static enum link_training_result perform_clock_recovery_sequence(
dpcd_set_lt_pattern_and_lane_settings(
link,
lt_settings,
- tr_pattern,
+ lt_settings->pattern_for_cr,
offset);
else
dpcd_set_lane_settings(
@@ -1113,7 +1132,7 @@ static inline enum link_training_result perform_link_training_int(
* TPS4 must be used instead of POST_LT_ADJ_REQ.
*/
if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
- get_supported_tp(link) == DP_TRAINING_PATTERN_SEQUENCE_4)
+ lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4)
return status;
if (status == LINK_TRAINING_SUCCESS &&
@@ -1245,17 +1264,21 @@ static void initialize_training_settings(
if (overrides->cr_pattern_time != NULL)
lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
else
- lt_settings->cr_pattern_time = get_training_aux_rd_interval(link, 100);
+ lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
if (overrides->eq_pattern_time != NULL)
lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
else
- lt_settings->eq_pattern_time = get_training_aux_rd_interval(link, 400);
+ lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
+ if (overrides->pattern_for_cr != NULL)
+ lt_settings->pattern_for_cr = *overrides->pattern_for_cr;
+ else
+ lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
if (overrides->pattern_for_eq != NULL)
lt_settings->pattern_for_eq = *overrides->pattern_for_eq;
else
- lt_settings->pattern_for_eq = get_supported_tp(link);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
if (overrides->enhanced_framing != NULL)
lt_settings->enhanced_framing = *overrides->enhanced_framing;
@@ -1457,7 +1480,6 @@ bool dc_link_dp_perform_link_training_skip_aux(
const struct dc_link_settings *link_setting)
{
struct link_training_settings lt_settings;
- enum dc_dp_training_pattern pattern_for_cr = DP_TRAINING_PATTERN_SEQUENCE_1;
initialize_training_settings(
link,
@@ -1468,7 +1490,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
/* 1. Perform_clock_recovery_sequence. */
/* transmit training pattern for clock recovery */
- dp_set_hw_training_pattern(link, pattern_for_cr, DPRX);
+ dp_set_hw_training_pattern(link, lt_settings.pattern_for_cr, DPRX);
/* call HWSS to set lane settings*/
dp_set_hw_lane_settings(link, &lt_settings, DPRX);
@@ -1610,6 +1632,9 @@ bool perform_link_training_with_retries(
for (j = 0; j < attempts; ++j) {
+ DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d\n",
+ __func__, (unsigned int)j + 1, attempts);
+
dp_enable_link_phy(
link,
signal,
@@ -1638,6 +1663,9 @@ bool perform_link_training_with_retries(
if (j == (attempts - 1))
break;
+ DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",
+ __func__, (unsigned int)j + 1, attempts);
+
dp_disable_link_phy(link, signal);
msleep(delay_between_attempts);
@@ -2431,6 +2459,12 @@ static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settin
return false;
}
+static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting)
+{
+ *link_setting = link->verified_link_cap;
+ return true;
+}
+
void decide_link_settings(struct dc_stream_state *stream,
struct dc_link_settings *link_setting)
{
@@ -2456,11 +2490,9 @@ void decide_link_settings(struct dc_stream_state *stream,
* TODO: add MST specific link training routine
*/
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- *link_setting = link->verified_link_cap;
- return;
- }
-
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ if (decide_mst_link_settings(link, link_setting))
+ return;
+ } else if (link->connector_signal == SIGNAL_TYPE_EDP) {
if (decide_edp_link_settings(link, link_setting, req_bw))
return;
} else if (decide_dp_link_settings(link, link_setting, req_bw))
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index dd88eb348dfa..11a619befb42 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -104,6 +104,12 @@ void dp_enable_link_phy(
struct clock_source *dp_cs =
link->dc->res_pool->dp_clock_source;
unsigned int i;
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
/* If the current pixel clock source is not DTO(happens after
* switching from HDMI passive dongle to DP on the same connector),
* switch the pixel clock source to DTO.
@@ -223,6 +229,8 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
dp_receiver_power_ctrl(link, false);
if (signal == SIGNAL_TYPE_EDP) {
+ if (link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, false);
link->link_enc->funcs->disable_output(link->link_enc, signal);
link->dc->hwss.edp_power_control(link, false);
} else {
@@ -485,13 +493,15 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
OPTC_DSC_DISABLED, 0, 0);
/* disable DSC in stream encoder */
- if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
- pipe_ctx->stream_res.stream_enc,
- OPTC_DSC_DISABLED, 0, 0);
-
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc, false, NULL);
+ if (dc_is_dp_signal(stream->signal)) {
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
+ pipe_ctx->stream_res.stream_enc,
+ OPTC_DSC_DISABLED, 0, 0);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc, false, NULL);
+ }
}
/* disable DSC block */
@@ -528,7 +538,6 @@ out:
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
@@ -551,7 +560,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
DC_LOG_DSC(" ");
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
- if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (dc_is_dp_signal(stream->signal)) {
DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.stream_enc,
@@ -560,7 +569,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
}
} else {
/* disable DSC PPS in stream encoder */
- if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (dc_is_dp_signal(stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
pipe_ctx->stream_res.stream_enc, false, NULL);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 7b5f90ebb133..59d48cf819ea 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -42,6 +42,9 @@
#include "virtual/virtual_stream_encoder.h"
#include "dpcd_defs.h"
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#include "dce60/dce60_resource.h"
+#endif
#include "dce80/dce80_resource.h"
#include "dce100/dce100_resource.h"
#include "dce110/dce110_resource.h"
@@ -63,6 +66,18 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
switch (asic_id.chip_family) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case FAMILY_SI:
+ if (ASIC_REV_IS_TAHITI_P(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_PITCAIRN_PM(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_CAPEVERDE_M(asic_id.hw_internal_rev))
+ dc_version = DCE_VERSION_6_0;
+ else if (ASIC_REV_IS_OLAND_M(asic_id.hw_internal_rev))
+ dc_version = DCE_VERSION_6_4;
+ else
+ dc_version = DCE_VERSION_6_1;
+ break;
+#endif
case FAMILY_CI:
dc_version = DCE_VERSION_8_0;
break;
@@ -105,6 +120,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
dc_version = DCN_VERSION_1_01;
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_2_1;
+ if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev))
+ dc_version = DCN_VERSION_2_1;
break;
#endif
@@ -129,6 +146,20 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
struct resource_pool *res_pool = NULL;
switch (dc_version) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case DCE_VERSION_6_0:
+ res_pool = dce60_create_resource_pool(
+ init_data->num_virtual_links, dc);
+ break;
+ case DCE_VERSION_6_1:
+ res_pool = dce61_create_resource_pool(
+ init_data->num_virtual_links, dc);
+ break;
+ case DCE_VERSION_6_4:
+ res_pool = dce64_create_resource_pool(
+ init_data->num_virtual_links, dc);
+ break;
+#endif
case DCE_VERSION_8_0:
res_pool = dce80_create_resource_pool(
init_data->num_virtual_links, dc);
@@ -753,11 +784,18 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
- data->recout.x = stream->dst.x;
- if (stream->src.x < surf_clip.x)
- data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
+ /*
+ * Only the leftmost ODM pipe should be offset by a nonzero distance
+ */
+ if (!pipe_ctx->prev_odm_pipe) {
+ data->recout.x = stream->dst.x;
+ if (stream->src.x < surf_clip.x)
+ data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
/ stream->src.width;
+ } else
+ data->recout.x = 0;
+
data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
@@ -928,7 +966,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
- struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe;
+ struct pipe_ctx *odm_pipe = pipe_ctx;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = pipe_ctx->plane_state->src_rect;
int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
@@ -959,21 +997,24 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
swap(src.width, src.height);
}
+ /*modified recout_skip_h calculation due to odm having no recout offset*/
+ while (odm_pipe->prev_odm_pipe) {
+ odm_idx++;
+ odm_pipe = odm_pipe->prev_odm_pipe;
+ }
+ /*odm_pipe is the leftmost pipe in the ODM group*/
+ recout_skip_h = odm_idx * data->recout.width;
+
/* Recout matching initial vp offset = recout_offset - (stream dst offset +
* ((surf dst offset - stream src offset) * 1/ stream scaling ratio)
* - (surf surf_src offset * 1/ full scl ratio))
*/
- recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ recout_skip_h += odm_pipe->plane_res.scl_data.recout.x
+ - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
* stream->dst.width / stream->src.width -
src.x * plane_state->dst_rect.width / src.width
* stream->dst.width / stream->src.width);
- /*modified recout_skip_h calculation due to odm having no recout offset*/
- while (odm_pipe) {
- odm_idx++;
- odm_pipe = odm_pipe->prev_odm_pipe;
- }
- if (odm_idx)
- recout_skip_h += odm_idx * data->recout.width;
+
recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 0257a900fe2b..d48fd87d3b95 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -123,7 +123,6 @@ static bool dc_stream_construct(struct dc_stream_state *stream,
return false;
}
stream->out_transfer_func->type = TF_TYPE_BYPASS;
- stream->out_transfer_func->ctx = stream->ctx;
stream->stream_id = stream->ctx->dc_stream_id_count;
stream->ctx->dc_stream_id_count++;
@@ -298,7 +297,7 @@ bool dc_stream_set_cursor_attributes(
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
/* disable idle optimizations while updating cursor */
if (dc->idle_optimizations_allowed) {
- dc->hwss.apply_idle_power_optimizations(dc, false);
+ dc_allow_idle_optimizations(dc, false);
reset_idle_optimizations = true;
}
@@ -326,7 +325,7 @@ bool dc_stream_set_cursor_attributes(
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
/* re-enable idle optimizations if necessary */
if (reset_idle_optimizations)
- dc->hwss.apply_idle_power_optimizations(dc, true);
+ dc_allow_idle_optimizations(dc, true);
#endif
return true;
@@ -359,9 +358,8 @@ bool dc_stream_set_cursor_position(
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
/* disable idle optimizations if enabling cursor */
- if (dc->idle_optimizations_allowed &&
- !stream->cursor_position.enable && position->enable) {
- dc->hwss.apply_idle_power_optimizations(dc, false);
+ if (dc->idle_optimizations_allowed && !stream->cursor_position.enable && position->enable) {
+ dc_allow_idle_optimizations(dc, false);
reset_idle_optimizations = true;
}
@@ -392,7 +390,7 @@ bool dc_stream_set_cursor_position(
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
/* re-enable idle optimizations if necessary */
if (reset_idle_optimizations)
- dc->hwss.apply_idle_power_optimizations(dc, true);
+ dc_allow_idle_optimizations(dc, true);
#endif
return true;
@@ -708,3 +706,4 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
"\tlink: %d\n",
stream->link->link_index);
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ea1229a3e2b2..3d7d27435f15 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -48,22 +48,17 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
plane_state->in_transfer_func = dc_create_transfer_func();
if (plane_state->in_transfer_func != NULL) {
plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
- plane_state->in_transfer_func->ctx = ctx;
}
plane_state->in_shaper_func = dc_create_transfer_func();
if (plane_state->in_shaper_func != NULL) {
plane_state->in_shaper_func->type = TF_TYPE_BYPASS;
- plane_state->in_shaper_func->ctx = ctx;
}
plane_state->lut3d_func = dc_create_3dlut_func();
- if (plane_state->lut3d_func != NULL) {
- plane_state->lut3d_func->ctx = ctx;
- }
+
plane_state->blend_tf = dc_create_transfer_func();
if (plane_state->blend_tf != NULL) {
plane_state->blend_tf->type = TF_TYPE_BYPASS;
- plane_state->blend_tf->ctx = ctx;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index f50ef4255020..82fe0ab56e3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.95"
+#define DC_VER "3.2.104"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -476,7 +476,7 @@ struct dc_debug_options {
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
#endif
unsigned int force_fclk_khz;
- bool disable_tri_buf;
+ bool enable_tri_buf;
bool dmub_offload_enabled;
bool dmcub_emulation;
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
@@ -503,6 +503,7 @@ struct dc_debug_options {
bool usbc_combo_phy_reset_wa;
bool disable_dsc;
bool enable_dram_clock_change_one_display_vactive;
+ bool force_ignore_link_settings;
};
struct dc_debug_data {
@@ -660,6 +661,7 @@ struct dc_init_data {
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
bool force_smu_not_present;
#endif
+ bool force_ignore_link_settings;
};
struct dc_callback_init {
@@ -745,7 +747,6 @@ struct dc_transfer_func {
enum dc_transfer_func_predefined tf;
/* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
uint32_t sdr_ref_white_level;
- struct dc_context *ctx;
union {
struct pwl_params pwl;
struct dc_transfer_func_distributed_points tf_pts;
@@ -772,7 +773,6 @@ struct dc_3dlut {
struct tetrahedral_params lut_3d;
struct fixed31_32 hdr_multiplier;
union dc_3dlut_state state;
- struct dc_context *ctx;
};
/*
* This structure is filled in by dc_surface_get_status and contains
@@ -1250,6 +1250,9 @@ enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
+ struct dc_plane_state *plane);
+
void dc_allow_idle_optimizations(struct dc *dc, bool allow);
/*
@@ -1265,6 +1268,9 @@ void dc_unlock_memory_clock_frequency(struct dc *dc);
void dc_lock_memory_clock_frequency(struct dc *dc);
#endif
+
+bool dc_set_psr_allow_active(struct dc *dc, bool enable);
+
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 0811f941f430..e146e3cba8eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -140,6 +140,10 @@ struct dc_vbios_funcs {
enum bp_result (*enable_lvtma_control)(
struct dc_bios *bios,
uint8_t uc_pwr_on);
+
+ enum bp_result (*get_soc_bb_info)(
+ struct dc_bios *dcb,
+ struct bp_soc_bb_info *soc_bb_info);
};
struct bios_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index eea2429ac67d..b98754811977 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -132,3 +132,19 @@ void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv)
/* Continue spinning so we don't hang the ASIC. */
}
}
+
+bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int stream_mask)
+{
+ struct dmub_srv *dmub;
+ const uint32_t timeout = 30;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ dmub = dc_dmub_srv->dmub;
+
+ return dmub_srv_send_gpint_command(
+ dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
+ stream_mask, timeout) == DMUB_STATUS_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index a3a09ccb6d26..bb4ab61887e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -56,4 +56,6 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv);
+bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
+ unsigned int stream_mask);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index a8a3b0643505..80a2191a3115 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -123,6 +123,7 @@ struct dc_link_training_overrides {
uint16_t *cr_pattern_time;
uint16_t *eq_pattern_time;
+ enum dc_dp_training_pattern *pattern_for_cr;
enum dc_dp_training_pattern *pattern_for_eq;
enum dc_link_spread *downspread;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 3800340a5b4f..768ab38d41cf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -51,6 +51,7 @@ struct dc_dsc_policy {
int min_slice_height; // Must not be less than 8
uint32_t max_target_bpp;
uint32_t min_target_bpp;
+ bool enable_dsc_when_not_needed;
};
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
@@ -80,4 +81,6 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit);
+void dc_dsc_policy_set_enable_dsc_when_not_needed(bool enable);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index e002ef706e1d..266b93a705d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -237,6 +237,8 @@ enum dc_detect_reason {
DETECT_REASON_BOOT,
DETECT_REASON_HPD,
DETECT_REASON_HPDRX,
+ DETECT_REASON_FALLBACK,
+ DETECT_REASON_RETRAIN
};
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index d9888f316da6..c246af7c584b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -397,6 +397,8 @@ bool dc_enable_stereo(
struct dc_stream_state *streams[],
uint8_t stream_count);
+/* Triggers multi-stream synchronization. */
+void dc_trigger_sync(struct dc *dc, struct dc_state *context);
enum surface_update_type dc_check_update_surfaces_for_stream(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 946ba929c6f6..c47a19719de2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -122,7 +122,7 @@ struct dc_context {
};
-#define DC_MAX_EDID_BUFFER_SIZE 1024
+#define DC_MAX_EDID_BUFFER_SIZE 1280
#define DC_EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
@@ -233,6 +233,7 @@ struct dc_panel_patch {
unsigned int skip_scdc_overwrite;
unsigned int delay_ignore_msa;
unsigned int disable_fec;
+ unsigned int extra_t3_ms;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index a44effcda49f..e84d21605854 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -46,6 +46,8 @@
SR(BL1_PWM_USER_LEVEL), \
SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \
SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \
+ SR(DC_ABM1_ACE_OFFSET_SLOPE_0), \
+ SR(DC_ABM1_ACE_THRES_12), \
SR(BIOS_SCRATCH_2)
#define ABM_DCN10_REG_LIST(id)\
@@ -60,6 +62,8 @@
SRI(BL1_PWM_USER_LEVEL, ABM, id), \
SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \
+ SRI(DC_ABM1_ACE_THRES_12, ABM, id), \
NBIO_SR(BIOS_SCRATCH_2)
#define ABM_DCN20_REG_LIST() \
@@ -74,10 +78,12 @@
SR(BL1_PWM_USER_LEVEL), \
SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \
SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \
+ SR(DC_ABM1_ACE_OFFSET_SLOPE_0), \
+ SR(DC_ABM1_ACE_THRES_12), \
NBIO_SR(BIOS_SCRATCH_2)
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
-#define ABM_DCN301_REG_LIST(id)\
+#define ABM_DCN30_REG_LIST(id)\
ABM_COMMON_REG_LIST_DCE_BASE(), \
SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
@@ -89,6 +95,8 @@
SRI(BL1_PWM_USER_LEVEL, ABM, id), \
SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \
+ SRI(DC_ABM1_ACE_THRES_12, ABM, id), \
NBIO_SR(BIOS_SCRATCH_2)
#endif
@@ -208,6 +216,8 @@ struct dce_abm_registers {
uint32_t BL1_PWM_USER_LEVEL;
uint32_t DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES;
uint32_t DC_ABM1_HGLS_REG_READ_PROGRESS;
+ uint32_t DC_ABM1_ACE_OFFSET_SLOPE_0;
+ uint32_t DC_ABM1_ACE_THRES_12;
uint32_t MASTER_COMM_CNTL_REG;
uint32_t MASTER_COMM_CMD_REG;
uint32_t MASTER_COMM_DATA_REG1;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 408046579712..2a2a0fdb9253 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -867,6 +867,98 @@ void dce_aud_wall_dto_setup(
}
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_aud_wall_dto_setup(
+ struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_pll_info *pll_info)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ struct azalia_clock_info clock_info = { 0 };
+
+ if (dc_is_hdmi_signal(signal)) {
+ uint32_t src_sel;
+
+ /*DTO0 Programming goal:
+ -generate 24MHz, 128*Fs from 24MHz
+ -use DTO0 when an active HDMI port is connected
+ (optionally a DP is connected) */
+
+ /* calculate DTO settings */
+ get_azalia_clock_info_hdmi(
+ crtc_info->requested_pixel_clock_100Hz,
+ crtc_info->calculated_pixel_clock_100Hz,
+ &clock_info);
+
+ DC_LOG_HW_AUDIO("\n%s:Input::requested_pixel_clock_100Hz = %d"\
+ "calculated_pixel_clock_100Hz =%d\n"\
+ "audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\
+ crtc_info->requested_pixel_clock_100Hz,\
+ crtc_info->calculated_pixel_clock_100Hz,\
+ clock_info.audio_dto_module,\
+ clock_info.audio_dto_phase);
+
+ /* On TN/SI, Program DTO source select and DTO select before
+ programming DTO modulo and DTO phase. These bits must be
+ programmed first, otherwise there will be no HDMI audio at boot
+ up. This is a HW sequence change (different from old ASICs).
+ Caution when changing this programming sequence.
+
+ HDMI enabled, using DTO0
+ program master CRTC for DTO0 */
+ src_sel = pll_info->dto_source - DTO_SOURCE_ID0;
+ REG_UPDATE_2(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO0_SOURCE_SEL, src_sel,
+ DCCG_AUDIO_DTO_SEL, 0);
+
+ /* module */
+ REG_UPDATE(DCCG_AUDIO_DTO0_MODULE,
+ DCCG_AUDIO_DTO0_MODULE, clock_info.audio_dto_module);
+
+ /* phase */
+ REG_UPDATE(DCCG_AUDIO_DTO0_PHASE,
+ DCCG_AUDIO_DTO0_PHASE, clock_info.audio_dto_phase);
+ } else {
+ /*DTO1 Programming goal:
+ -generate 24MHz, 128*Fs from 24MHz (DCE6 does not support 512*Fs)
+ -default is to used DTO1, and switch to DTO0 when an audio
+ master HDMI port is connected
+ -use as default for DP
+
+ calculate DTO settings */
+ get_azalia_clock_info_dp(
+ crtc_info->requested_pixel_clock_100Hz,
+ pll_info,
+ &clock_info);
+
+ /* Program DTO select before programming DTO modulo and DTO
+ phase. default to use DTO1 */
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO_SEL, 1);
+
+ /* DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1)
+ * Cannot select 512fs for DP
+ *
+ * DCE6 has no DCCG_AUDIO_DTO2_USE_512FBR_DTO mask
+ */
+
+ /* module */
+ REG_UPDATE(DCCG_AUDIO_DTO1_MODULE,
+ DCCG_AUDIO_DTO1_MODULE, clock_info.audio_dto_module);
+
+ /* phase */
+ REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
+ DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
+
+ /* DCE6 has no DCCG_AUDIO_DTO2_USE_512FBR_DTO mask in DCCG_AUDIO_DTO_SOURCE reg */
+
+ }
+}
+#endif
+
static bool dce_aud_endpoint_valid(struct audio *audio)
{
uint32_t value;
@@ -926,6 +1018,19 @@ static const struct audio_funcs funcs = {
.az_configure = dce_aud_az_configure,
.destroy = dce_aud_destroy,
};
+
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static const struct audio_funcs dce60_funcs = {
+ .endpoint_valid = dce_aud_endpoint_valid,
+ .hw_init = dce_aud_hw_init,
+ .wall_dto_setup = dce60_aud_wall_dto_setup,
+ .az_enable = dce_aud_az_enable,
+ .az_disable = dce_aud_az_disable,
+ .az_configure = dce_aud_az_configure,
+ .destroy = dce_aud_destroy,
+};
+#endif
+
void dce_aud_destroy(struct audio **audio)
{
struct dce_audio *aud = DCE_AUD(*audio);
@@ -959,3 +1064,29 @@ struct audio *dce_audio_create(
return &audio->base;
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+struct audio *dce60_audio_create(
+ struct dc_context *ctx,
+ unsigned int inst,
+ const struct dce_audio_registers *reg,
+ const struct dce_audio_shift *shifts,
+ const struct dce_audio_mask *masks
+ )
+{
+ struct dce_audio *audio = kzalloc(sizeof(*audio), GFP_KERNEL);
+
+ if (audio == NULL) {
+ ASSERT_CRITICAL(audio);
+ return NULL;
+ }
+
+ audio->base.ctx = ctx;
+ audio->base.inst = inst;
+ audio->base.funcs = &dce60_funcs;
+
+ audio->regs = reg;
+ audio->shifts = shifts;
+ audio->masks = masks;
+ return &audio->base;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
index 1392fab0860b..5622d5e32d81 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -64,6 +64,20 @@
SF(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
SF(AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define AUD_DCE60_MASK_SH_LIST(mask_sh)\
+ SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
+ SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+ SF(DCCG_AUDIO_DTO0_MODULE, DCCG_AUDIO_DTO0_MODULE, mask_sh),\
+ SF(DCCG_AUDIO_DTO0_PHASE, DCCG_AUDIO_DTO0_PHASE, mask_sh),\
+ SF(DCCG_AUDIO_DTO1_MODULE, DCCG_AUDIO_DTO1_MODULE, mask_sh),\
+ SF(DCCG_AUDIO_DTO1_PHASE, DCCG_AUDIO_DTO1_PHASE, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, AUDIO_RATE_CAPABILITIES, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, CLKSTOP, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, EPSS, mask_sh), \
+ SF(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh)
+#endif
struct dce_audio_registers {
uint32_t AZALIA_F0_CODEC_ENDPOINT_INDEX;
@@ -135,6 +149,15 @@ struct audio *dce_audio_create(
const struct dce_audio_shift *shifts,
const struct dce_audio_mask *masks);
+#if defined(CONFIG_DRM_AMD_DC_SI)
+struct audio *dce60_audio_create(
+ struct dc_context *ctx,
+ unsigned int inst,
+ const struct dce_audio_registers *reg,
+ const struct dce_audio_shift *shifts,
+ const struct dce_audio_mask *masks);
+#endif
+
void dce_aud_destroy(struct audio **audio);
void dce_aud_hw_init(struct audio *audio);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 9cc65dc1970f..49ae5ff12da6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1149,7 +1149,8 @@ static uint32_t dcn3_get_pix_clk_dividers(
static const struct clock_source_funcs dcn3_clk_src_funcs = {
.cs_power_down = dce110_clock_source_power_down,
.program_pix_clk = dcn3_program_pix_clk,
- .get_pix_clk_dividers = dcn3_get_pix_clk_dividers
+ .get_pix_clk_dividers = dcn3_get_pix_clk_dividers,
+ .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
};
#endif
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 5e044c2d3d6d..93e7f34d4775 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -46,6 +46,24 @@
SR(SMU_INTERRUPT_CONTROL), \
SR(DC_DMCU_SCRATCH)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define DMCU_DCE60_REG_LIST() \
+ SR(DMCU_CTRL), \
+ SR(DMCU_STATUS), \
+ SR(DMCU_RAM_ACCESS_CTRL), \
+ SR(DMCU_IRAM_WR_CTRL), \
+ SR(DMCU_IRAM_WR_DATA), \
+ SR(MASTER_COMM_DATA_REG1), \
+ SR(MASTER_COMM_DATA_REG2), \
+ SR(MASTER_COMM_DATA_REG3), \
+ SR(MASTER_COMM_CMD_REG), \
+ SR(MASTER_COMM_CNTL_REG), \
+ SR(DMCU_IRAM_RD_CTRL), \
+ SR(DMCU_IRAM_RD_DATA), \
+ SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
+ SR(DC_DMCU_SCRATCH)
+#endif
+
#define DMCU_DCE80_REG_LIST() \
SR(DMCU_CTRL), \
SR(DMCU_STATUS), \
@@ -104,6 +122,25 @@
STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \
DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define DMCU_MASK_SH_LIST_DCE60(mask_sh) \
+ DMCU_SF(DMCU_CTRL, \
+ DMCU_ENABLE, mask_sh), \
+ DMCU_SF(DMCU_STATUS, \
+ UC_IN_STOP_MODE, mask_sh), \
+ DMCU_SF(DMCU_STATUS, \
+ UC_IN_RESET, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_HOST_ACCESS_EN, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_WR_ADDR_AUTO_INC, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_RD_ADDR_AUTO_INC, mask_sh), \
+ DMCU_SF(MASTER_COMM_CMD_REG, \
+ MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
+ DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh)
+#endif
+
#define DMCU_MASK_SH_LIST_DCE80(mask_sh) \
DMCU_SF(DMCU_CTRL, \
DMCU_ENABLE, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index e1c5839a80dc..4202fadb2c0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -85,6 +85,15 @@ void dce_pipe_control_lock(struct dc *dc,
}
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_pipe_control_lock(struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock)
+{
+ /* DCE6 has no BLND_V_UPDATE_LOCK register */
+}
+#endif
+
void dce_set_blender_mode(struct dce_hwseq *hws,
unsigned int blnd_inst,
enum blnd_mode mode)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 66b88d6ba398..70bbc1311327 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -110,6 +110,12 @@
SR(BLNDV_CONTROL),\
HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define HWSEQ_DCE6_REG_LIST() \
+ HWSEQ_DCEF_REG_LIST_DCE8(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
+#endif
+
#define HWSEQ_DCE8_REG_LIST() \
HWSEQ_DCEF_REG_LIST_DCE8(), \
HWSEQ_BLND_REG_LIST(), \
@@ -488,6 +494,12 @@ struct dce_hwseq_registers {
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define HWSEQ_DCE6_MASK_SH_LIST(mask_sh)\
+ .DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
+#endif
+
#define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
.DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
@@ -836,6 +848,12 @@ void dce_pipe_control_lock(struct dc *dc,
void dce_set_blender_mode(struct dce_hwseq *hws,
unsigned int blnd_inst, enum blnd_mode mode);
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_pipe_control_lock(struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+#endif
+
void dce_clock_gating_power_up(struct dce_hwseq *hws,
bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
index ce30dbf579d4..80569a2734eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
@@ -231,6 +231,22 @@ static void dce_ipp_set_degamma(
CURSOR2_DEGAMMA_MODE, degamma_type);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_ipp_set_degamma(
+ struct input_pixel_processor *ipp,
+ enum ipp_degamma_mode mode)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+ uint32_t degamma_type = (mode == IPP_DEGAMMA_MODE_HW_sRGB) ? 1 : 0;
+
+ ASSERT(mode == IPP_DEGAMMA_MODE_BYPASS || mode == IPP_DEGAMMA_MODE_HW_sRGB);
+ /* DCE6 does not have CURSOR2_DEGAMMA_MODE bit in DEGAMMA_CONTROL reg */
+ REG_SET_2(DEGAMMA_CONTROL, 0,
+ GRPH_DEGAMMA_MODE, degamma_type,
+ CURSOR_DEGAMMA_MODE, degamma_type);
+}
+#endif
+
static const struct ipp_funcs dce_ipp_funcs = {
.ipp_cursor_set_attributes = dce_ipp_cursor_set_attributes,
.ipp_cursor_set_position = dce_ipp_cursor_set_position,
@@ -239,6 +255,17 @@ static const struct ipp_funcs dce_ipp_funcs = {
.ipp_set_degamma = dce_ipp_set_degamma
};
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static const struct ipp_funcs dce60_ipp_funcs = {
+ .ipp_cursor_set_attributes = dce_ipp_cursor_set_attributes,
+ .ipp_cursor_set_position = dce_ipp_cursor_set_position,
+ .ipp_program_prescale = dce_ipp_program_prescale,
+ .ipp_program_input_lut = dce_ipp_program_input_lut,
+ .ipp_set_degamma = dce60_ipp_set_degamma
+};
+#endif
+
+
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
@@ -260,6 +287,25 @@ void dce_ipp_construct(
ipp_dce->ipp_mask = ipp_mask;
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_ipp_construct(
+ struct dce_ipp *ipp_dce,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_ipp_registers *regs,
+ const struct dce_ipp_shift *ipp_shift,
+ const struct dce_ipp_mask *ipp_mask)
+{
+ ipp_dce->base.ctx = ctx;
+ ipp_dce->base.inst = inst;
+ ipp_dce->base.funcs = &dce60_ipp_funcs;
+
+ ipp_dce->regs = regs;
+ ipp_dce->ipp_shift = ipp_shift;
+ ipp_dce->ipp_mask = ipp_mask;
+}
+#endif
+
void dce_ipp_destroy(struct input_pixel_processor **ipp)
{
kfree(TO_DCE_IPP(*ipp));
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h
index ca04e97d44c3..0028d4bdd81b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h
@@ -147,6 +147,46 @@
IPP_SF(DCP0_DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, mask_sh), \
IPP_SF(DCP0_DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define IPP_DCE60_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ IPP_SF(CUR_UPDATE, CURSOR_UPDATE_LOCK, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_EN, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(CUR_CONTROL, CUR_INV_TRANS_CLAMP, mask_sh), \
+ IPP_SF(CUR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(CUR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(CUR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(CUR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_BLUE, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_GREEN, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_RED, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_BLUE, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_GREEN, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_RED, mask_sh), \
+ IPP_SF(CUR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(CUR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(CUR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(CUR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_SCALE_R, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_BIAS_R, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_SCALE_G, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_BIAS_G, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_SCALE_B, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_BIAS_B, mask_sh), \
+ IPP_SF(INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, mask_sh), \
+ IPP_SF(DC_LUT_WRITE_EN_MASK, DC_LUT_WRITE_EN_MASK, mask_sh), \
+ IPP_SF(DC_LUT_RW_MODE, DC_LUT_RW_MODE, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_R_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_G_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_B_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_RW_INDEX, DC_LUT_RW_INDEX, mask_sh), \
+ IPP_SF(DC_LUT_SEQ_COLOR, DC_LUT_SEQ_COLOR, mask_sh), \
+ IPP_SF(DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, mask_sh)
+#endif
+
#define IPP_REG_FIELD_LIST(type) \
type CURSOR_UPDATE_LOCK; \
type CURSOR_EN; \
@@ -233,6 +273,15 @@ void dce_ipp_construct(struct dce_ipp *ipp_dce,
const struct dce_ipp_shift *ipp_shift,
const struct dce_ipp_mask *ipp_mask);
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_ipp_construct(struct dce_ipp *ipp_dce,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_ipp_registers *regs,
+ const struct dce_ipp_shift *ipp_shift,
+ const struct dce_ipp_mask *ipp_mask);
+#endif
+
void dce_ipp_destroy(struct input_pixel_processor **ipp);
#endif /* _DCE_IPP_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 8d8c84c81b34..b409f6b2bfd8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -425,6 +425,59 @@ static void set_dp_phy_pattern_hbr2_compliance_cp2520_2(
enable_phy_bypass_mode(enc110, false);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2(
+ struct dce110_link_encoder *enc110,
+ unsigned int cp2520_pattern)
+{
+
+ /* previously there is a register DP_HBR2_EYE_PATTERN
+ * that is enabled to get the pattern.
+ * But it does not work with the latest spec change,
+ * so we are programming the following registers manually.
+ *
+ * The following settings have been confirmed
+ * by Nick Chorney and Sandra Liu */
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Setup DIG encoder in DP SST mode */
+ enc110->base.funcs->setup(&enc110->base, SIGNAL_TYPE_DISPLAY_PORT);
+
+ /* ensure normal panel mode. */
+ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT);
+
+ /* no vbid after BS (SR)
+ * DP_LINK_FRAMING_CNTL changed history Sandra Liu
+ * 11000260 / 11000104 / 110000FC */
+ REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
+ DP_IDLE_BS_INTERVAL, 0xFC,
+ DP_VBID_DISABLE, 1,
+ DP_VID_ENHANCED_FRAME_MODE, 1);
+
+ /* DCE6 has no DP_DPHY_SCRAM_CNTL register, skip swap BS with SR */
+
+ /* select cp2520 patterns */
+ if (REG(DP_DPHY_HBR2_PATTERN_CONTROL))
+ REG_UPDATE(DP_DPHY_HBR2_PATTERN_CONTROL,
+ DP_DPHY_HBR2_PATTERN_CONTROL, cp2520_pattern);
+ else
+ /* pre-DCE11 can only generate CP2520 pattern 2 */
+ ASSERT(cp2520_pattern == 2);
+
+ /* set link training complete */
+ set_link_training_complete(enc110, true);
+
+ /* disable video stream */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+}
+#endif
+
static void set_dp_phy_pattern_passthrough_mode(
struct dce110_link_encoder *enc110,
enum dp_panel_mode panel_mode)
@@ -452,6 +505,35 @@ static void set_dp_phy_pattern_passthrough_mode(
disable_prbs_mode(enc110);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_set_dp_phy_pattern_passthrough_mode(
+ struct dce110_link_encoder *enc110,
+ enum dp_panel_mode panel_mode)
+{
+ /* program correct panel mode */
+ setup_panel_mode(enc110, panel_mode);
+
+ /* restore LINK_FRAMING_CNTL
+ * in case we were doing HBR2 compliance pattern before
+ */
+ REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
+ DP_IDLE_BS_INTERVAL, 0x2000,
+ DP_VBID_DISABLE, 0,
+ DP_VID_ENHANCED_FRAME_MODE, 1);
+
+ /* DCE6 has no DP_DPHY_SCRAM_CNTL register, skip DPHY_SCRAMBLER_BS_COUNT restore */
+
+ /* set link training complete */
+ set_link_training_complete(enc110, true);
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Disable PRBS mode */
+ disable_prbs_mode(enc110);
+}
+#endif
+
/* return value is bit-vector */
static uint8_t get_frontend_source(
enum engine_id engine)
@@ -490,6 +572,20 @@ static void configure_encoder(
REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_configure_encoder(
+ struct dce110_link_encoder *enc110,
+ const struct dc_link_settings *link_settings)
+{
+ /* set number of lanes */
+
+ REG_SET(DP_CONFIG, 0,
+ DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
+
+ /* DCE6 has no DP_DPHY_SCRAM_CNTL register, skip setup scrambler */
+}
+#endif
+
static void aux_initialize(
struct dce110_link_encoder *enc110)
{
@@ -1059,6 +1155,87 @@ void dce110_link_encoder_enable_dp_mst_output(
BREAK_TO_DEBUGGER();
}
}
+
+#if defined(CONFIG_DRM_AMD_DC_SI)
+/* enables DP PHY output */
+void dce60_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Enable the PHY */
+
+ /* number_of_lanes is used for pixel clock adjust,
+ * but it's not passed to asic_control.
+ * We need to set number of lanes manually.
+ */
+ dce60_configure_encoder(enc110, link_settings);
+ cntl.connector_obj_id = enc110->base.connector;
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = enc->preferred_engine;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.pll_id = clock_source;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ;
+ /* TODO: check if undefined works */
+ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+/* enables DP PHY output in MST mode */
+void dce60_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Enable the PHY */
+
+ /* number_of_lanes is used for pixel clock adjust,
+ * but it's not passed to asic_control.
+ * We need to set number of lanes manually.
+ */
+ dce60_configure_encoder(enc110, link_settings);
+
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = ENGINE_ID_UNKNOWN;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.pll_id = clock_source;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ;
+ /* TODO: check if undefined works */
+ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+#endif
+
/*
* @brief
* Disable transmitter and its encoder
@@ -1208,6 +1385,63 @@ void dce110_link_encoder_dp_set_phy_pattern(
}
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+/* set DP PHY test and training patterns */
+void dce60_link_encoder_dp_set_phy_pattern(
+ struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *param)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ switch (param->dp_phy_pattern) {
+ case DP_TEST_PATTERN_TRAINING_PATTERN1:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN2:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN3:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN4:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 3);
+ break;
+ case DP_TEST_PATTERN_D102:
+ set_dp_phy_pattern_d102(enc110);
+ break;
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ set_dp_phy_pattern_symbol_error(enc110);
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ set_dp_phy_pattern_prbs7(enc110);
+ break;
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ set_dp_phy_pattern_80bit_custom(
+ enc110, param->custom_pattern);
+ break;
+ case DP_TEST_PATTERN_CP2520_1:
+ dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 1);
+ break;
+ case DP_TEST_PATTERN_CP2520_2:
+ dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 2);
+ break;
+ case DP_TEST_PATTERN_CP2520_3:
+ dce60_set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 3);
+ break;
+ case DP_TEST_PATTERN_VIDEO_MODE: {
+ dce60_set_dp_phy_pattern_passthrough_mode(
+ enc110, param->dp_panel_mode);
+ break;
+ }
+
+ default:
+ /* invalid phy pattern */
+ ASSERT_CRITICAL(false);
+ break;
+ }
+}
+#endif
+
static void fill_stream_allocation_row_info(
const struct link_mst_stream_allocation *stream_allocation,
uint32_t *src,
@@ -1407,3 +1641,138 @@ void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc,
*link_settings = max_link_cap;
}
+
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static const struct link_encoder_funcs dce60_lnk_enc_funcs = {
+ .validate_output_with_stream =
+ dce110_link_encoder_validate_output_with_stream,
+ .hw_init = dce110_link_encoder_hw_init,
+ .setup = dce110_link_encoder_setup,
+ .enable_tmds_output = dce110_link_encoder_enable_tmds_output,
+ .enable_dp_output = dce60_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dce60_link_encoder_enable_dp_mst_output,
+ .enable_lvds_output = dce110_link_encoder_enable_lvds_output,
+ .disable_output = dce110_link_encoder_disable_output,
+ .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dce60_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dce110_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dce110_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dce110_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dce110_link_encoder_enable_hpd,
+ .disable_hpd = dce110_link_encoder_disable_hpd,
+ .is_dig_enabled = dce110_is_dig_enabled,
+ .destroy = dce110_link_encoder_destroy,
+ .get_max_link_cap = dce110_link_encoder_get_max_link_cap
+};
+
+void dce60_link_encoder_construct(
+ struct dce110_link_encoder *enc110,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dce110_link_enc_registers *link_regs,
+ const struct dce110_link_enc_aux_registers *aux_regs,
+ const struct dce110_link_enc_hpd_registers *hpd_regs)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+
+ enc110->base.funcs = &dce60_lnk_enc_funcs;
+ enc110->base.ctx = init_data->ctx;
+ enc110->base.id = init_data->encoder;
+
+ enc110->base.hpd_source = init_data->hpd_source;
+ enc110->base.connector = init_data->connector;
+
+ enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc110->base.features = *enc_features;
+
+ enc110->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc110->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc110->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc110->link_regs = link_regs;
+ enc110->aux_regs = aux_regs;
+ enc110->hpd_regs = hpd_regs;
+
+ switch (enc110->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc110->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc110->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc110->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc110->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc110->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc110->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc110->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
+ enc110->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (BP_RESULT_OK == result) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc110->base.ctx->dc->debug.hdmi20_disable) {
+ enc110->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 66027d496778..cb714a48b171 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -76,6 +76,34 @@
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
SR(DCI_MEM_PWR_STATUS)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define LE_DCE60_REG_LIST(id)\
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SR(DMCU_RAM_ACCESS_CTRL), \
+ SR(DMCU_IRAM_RD_CTRL), \
+ SR(DMCU_IRAM_RD_DATA), \
+ SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
+ SRI(DIG_BE_CNTL, DIG, id), \
+ SRI(DIG_BE_EN_CNTL, DIG, id), \
+ SRI(DP_CONFIG, DP, id), \
+ SRI(DP_DPHY_CNTL, DP, id), \
+ SRI(DP_DPHY_PRBS_CNTL, DP, id), \
+ SRI(DP_DPHY_SYM0, DP, id), \
+ SRI(DP_DPHY_SYM1, DP, id), \
+ SRI(DP_DPHY_SYM2, DP, id), \
+ SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
+ SRI(DP_LINK_CNTL, DP, id), \
+ SRI(DP_LINK_FRAMING_CNTL, DP, id), \
+ SRI(DP_MSE_SAT0, DP, id), \
+ SRI(DP_MSE_SAT1, DP, id), \
+ SRI(DP_MSE_SAT2, DP, id), \
+ SRI(DP_MSE_SAT_UPDATE, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_DPHY_FAST_TRAINING, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id)
+#endif
+
#define LE_DCE80_REG_LIST(id)\
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
LE_COMMON_REG_LIST_BASE(id)
@@ -171,6 +199,16 @@ void dce110_link_encoder_construct(
const struct dce110_link_enc_aux_registers *aux_regs,
const struct dce110_link_enc_hpd_registers *hpd_regs);
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_link_encoder_construct(
+ struct dce110_link_encoder *enc110,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dce110_link_enc_registers *link_regs,
+ const struct dce110_link_enc_aux_registers *aux_regs,
+ const struct dce110_link_enc_hpd_registers *hpd_regs);
+#endif
+
bool dce110_link_encoder_validate_dvi_output(
const struct dce110_link_encoder *enc110,
enum signal_type connector_signal,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 51481e922eb9..79a6f261a0da 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -174,6 +174,22 @@ static void program_urgency_watermark(
URGENCY_HIGH_WATERMARK, urgency_high_wm);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_program_urgency_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t urgency_low_wm,
+ uint32_t urgency_high_wm)
+{
+ REG_UPDATE(DPG_PIPE_ARBITRATION_CONTROL3,
+ URGENCY_WATERMARK_MASK, wm_select);
+
+ REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0,
+ URGENCY_LOW_WATERMARK, urgency_low_wm,
+ URGENCY_HIGH_WATERMARK, urgency_high_wm);
+}
+#endif
+
static void dce120_program_urgency_watermark(
struct dce_mem_input *dce_mi,
uint32_t wm_select,
@@ -193,6 +209,25 @@ static void dce120_program_urgency_watermark(
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_program_nbp_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t nbp_wm)
+{
+ REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK_MASK, wm_select);
+
+ REG_UPDATE_3(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_ENABLE, 1,
+ NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, 1,
+ NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1);
+
+ REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK, nbp_wm);
+}
+#endif
+
static void program_nbp_watermark(
struct dce_mem_input *dce_mi,
uint32_t wm_select,
@@ -225,6 +260,20 @@ static void program_nbp_watermark(
}
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_program_stutter_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t stutter_mark)
+{
+ REG_UPDATE(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select);
+
+ REG_UPDATE(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+}
+#endif
+
static void dce120_program_stutter_watermark(
struct dce_mem_input *dce_mi,
uint32_t wm_select,
@@ -286,6 +335,34 @@ static void dce_mi_program_display_marks(
program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark); /* set d */
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_mi_program_display_marks(
+ struct mem_input *mi,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter_exit,
+ struct dce_watermarks stutter_enter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
+
+ dce60_program_urgency_watermark(dce_mi, 2, /* set a */
+ urgent.a_mark, total_dest_line_time_ns);
+ dce60_program_urgency_watermark(dce_mi, 1, /* set d */
+ urgent.d_mark, total_dest_line_time_ns);
+
+ REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE, stutter_en,
+ STUTTER_IGNORE_FBC, 1);
+ dce60_program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */
+ dce60_program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */
+
+ dce60_program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark); /* set a */
+ dce60_program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark); /* set d */
+}
+#endif
+
static void dce112_mi_program_display_marks(struct mem_input *mi,
struct dce_watermarks nbp,
struct dce_watermarks stutter_exit,
@@ -369,7 +446,7 @@ static void program_tiling(
*/
}
- if (dce_mi->masks->GRPH_ARRAY_MODE) { /* GFX8 */
+ if (dce_mi->masks->GRPH_MICRO_TILE_MODE) { /* GFX8 */
REG_UPDATE_9(GRPH_CONTROL,
GRPH_NUM_BANKS, info->gfx8.num_banks,
GRPH_BANK_WIDTH, info->gfx8.bank_width,
@@ -385,6 +462,23 @@ static void program_tiling(
GRPH_Z, 0);
*/
}
+
+ if (dce_mi->masks->GRPH_ARRAY_MODE) { /* GFX6 but reuses gfx8 struct */
+ REG_UPDATE_8(GRPH_CONTROL,
+ GRPH_NUM_BANKS, info->gfx8.num_banks,
+ GRPH_BANK_WIDTH, info->gfx8.bank_width,
+ GRPH_BANK_HEIGHT, info->gfx8.bank_height,
+ GRPH_MACRO_TILE_ASPECT, info->gfx8.tile_aspect,
+ GRPH_TILE_SPLIT, info->gfx8.tile_split,
+ /* DCE6 has no GRPH_MICRO_TILE_MODE mask */
+ GRPH_PIPE_CONFIG, info->gfx8.pipe_config,
+ GRPH_ARRAY_MODE, info->gfx8.array_mode,
+ GRPH_COLOR_EXPANSION_MODE, 1);
+ /* 01 - DCP_GRPH_COLOR_EXPANSION_MODE_ZEXP: zero expansion for YCbCr */
+ /*
+ GRPH_Z, 0);
+ */
+ }
}
@@ -429,6 +523,36 @@ static void program_size_and_rotation(
GRPH_ROTATION_ANGLE, rotation_angles[rotation]);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_program_size(
+ struct dce_mem_input *dce_mi,
+ enum dc_rotation_angle rotation, /* not used in DCE6 */
+ const struct plane_size *plane_size)
+{
+ struct rect hw_rect = plane_size->surface_size;
+ /* DCE6 has no HW rotation, skip rotation_angles declaration */
+
+ /* DCE6 has no HW rotation, skip ROTATION_ANGLE_* processing */
+
+ REG_SET(GRPH_X_START, 0,
+ GRPH_X_START, hw_rect.x);
+
+ REG_SET(GRPH_Y_START, 0,
+ GRPH_Y_START, hw_rect.y);
+
+ REG_SET(GRPH_X_END, 0,
+ GRPH_X_END, hw_rect.width);
+
+ REG_SET(GRPH_Y_END, 0,
+ GRPH_Y_END, hw_rect.height);
+
+ REG_SET(GRPH_PITCH, 0,
+ GRPH_PITCH, plane_size->surface_pitch);
+
+ /* DCE6 has no HW_ROTATION register, skip setting rotation_angles */
+}
+#endif
+
static void program_grph_pixel_format(
struct dce_mem_input *dce_mi,
enum surface_pixel_format format)
@@ -521,6 +645,28 @@ static void dce_mi_program_surface_config(
program_grph_pixel_format(dce_mi, format);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_mi_program_surface_config(
+ struct mem_input *mi,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ enum dc_rotation_angle rotation, /* not used in DCE6 */
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ REG_UPDATE(GRPH_ENABLE, GRPH_ENABLE, 1);
+
+ program_tiling(dce_mi, tiling_info);
+ dce60_program_size(dce_mi, rotation, plane_size);
+
+ if (format >= SURFACE_PIXEL_FORMAT_GRPH_BEGIN &&
+ format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ program_grph_pixel_format(dce_mi, format);
+}
+#endif
+
static uint32_t get_dmif_switch_time_us(
uint32_t h_total,
uint32_t v_total,
@@ -741,6 +887,20 @@ static const struct mem_input_funcs dce_mi_funcs = {
.mem_input_is_flip_pending = dce_mi_is_flip_pending
};
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static const struct mem_input_funcs dce60_mi_funcs = {
+ .mem_input_program_display_marks = dce60_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mi_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce60_mi_program_surface_config,
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
+#endif
+
static const struct mem_input_funcs dce112_mi_funcs = {
.mem_input_program_display_marks = dce112_mi_program_display_marks,
.allocate_mem_input = dce_mi_allocate_dmif,
@@ -783,6 +943,20 @@ void dce_mem_input_construct(
dce_mi->masks = mi_mask;
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask)
+{
+ dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
+ dce_mi->base.funcs = &dce60_mi_funcs;
+}
+#endif
+
void dce112_mem_input_construct(
struct dce_mem_input *dce_mi,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
index d15b0d7f47fc..23db5c72f07e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -58,6 +58,31 @@
SRI(DVMM_PTE_CONTROL, DCP, id),\
SRI(DVMM_PTE_ARB_CONTROL, DCP, id)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define MI_DCE6_REG_LIST(id)\
+ SRI(GRPH_ENABLE, DCP, id),\
+ SRI(GRPH_CONTROL, DCP, id),\
+ SRI(GRPH_X_START, DCP, id),\
+ SRI(GRPH_Y_START, DCP, id),\
+ SRI(GRPH_X_END, DCP, id),\
+ SRI(GRPH_Y_END, DCP, id),\
+ SRI(GRPH_PITCH, DCP, id),\
+ SRI(GRPH_SWAP_CNTL, DCP, id),\
+ SRI(PRESCALE_GRPH_CONTROL, DCP, id),\
+ SRI(GRPH_UPDATE, DCP, id),\
+ SRI(GRPH_FLIP_CONTROL, DCP, id),\
+ SRI(GRPH_PRIMARY_SURFACE_ADDRESS, DCP, id),\
+ SRI(GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, DCP, id),\
+ SRI(GRPH_SECONDARY_SURFACE_ADDRESS, DCP, id),\
+ SRI(GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, DCP, id),\
+ SRI(DPG_PIPE_ARBITRATION_CONTROL1, DMIF_PG, id),\
+ SRI(DPG_PIPE_ARBITRATION_CONTROL3, DMIF_PG, id),\
+ SRI(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, DMIF_PG, id),\
+ SRI(DPG_PIPE_URGENCY_CONTROL, DMIF_PG, id),\
+ SRI(DPG_PIPE_STUTTER_CONTROL, DMIF_PG, id),\
+ SRI(DMIF_BUFFER_CONTROL, PIPE, id)
+#endif
+
#define MI_DCE8_REG_LIST(id)\
MI_DCE_BASE_REG_LIST(id),\
SRI(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, DMIF_PG, id)
@@ -104,6 +129,9 @@ struct dce_mem_input_registers {
uint32_t GRPH_SECONDARY_SURFACE_ADDRESS_HIGH;
/* DMIF_PG */
uint32_t DPG_PIPE_ARBITRATION_CONTROL1;
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ uint32_t DPG_PIPE_ARBITRATION_CONTROL3;
+#endif
uint32_t DPG_WATERMARK_MASK_CONTROL;
uint32_t DPG_PIPE_URGENCY_CONTROL;
uint32_t DPG_PIPE_URGENT_LEVEL_CONTROL;
@@ -126,6 +154,18 @@ struct dce_mem_input_registers {
#define SFB(blk_name, reg_name, field_name, post_fix)\
.field_name = blk_name ## reg_name ## __ ## field_name ## post_fix
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define MI_GFX6_TILE_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_BANK_WIDTH, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_BANK_HEIGHT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_TILE_SPLIT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_PIPE_CONFIG, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_ARRAY_MODE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE, mask_sh)
+#endif
+
#define MI_GFX8_TILE_MASK_SH_LIST(mask_sh, blk)\
SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
SFB(blk, GRPH_CONTROL, GRPH_BANK_WIDTH, mask_sh),\
@@ -162,6 +202,32 @@ struct dce_mem_input_registers {
SFB(blk, GRPH_UPDATE, GRPH_UPDATE_LOCK, mask_sh),\
SFB(blk, GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_H_RETRACE_EN, mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define MI_DCP_MASK_SH_LIST_DCE6(mask_sh, blk)\
+ SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
+ SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
+ SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
+ SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
+ SFB(blk, GRPH_Y_END, GRPH_Y_END, mask_sh),\
+ SFB(blk, GRPH_PITCH, GRPH_PITCH, mask_sh),\
+ SFB(blk, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, mask_sh),\
+ SFB(blk, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_SELECT, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_R_SIGN, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_G_SIGN, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_B_SIGN, mask_sh),\
+ SFB(blk, GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ SFB(blk, GRPH_SECONDARY_SURFACE_ADDRESS, GRPH_SECONDARY_SURFACE_ADDRESS, mask_sh),\
+ SFB(blk, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ SFB(blk, GRPH_PRIMARY_SURFACE_ADDRESS, GRPH_PRIMARY_SURFACE_ADDRESS, mask_sh),\
+ SFB(blk, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING, mask_sh),\
+ SFB(blk, GRPH_UPDATE, GRPH_UPDATE_LOCK, mask_sh),\
+ SFB(blk, GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_H_RETRACE_EN, mask_sh)
+#endif
+
#define MI_DCP_DCE11_MASK_SH_LIST(mask_sh, blk)\
SFB(blk, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, mask_sh)
@@ -172,6 +238,33 @@ struct dce_mem_input_registers {
SFB(blk, DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK, mask_sh),\
SFB(blk, DVMM_PTE_ARB_CONTROL, DVMM_MAX_PTE_REQ_OUTSTANDING, mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define MI_DMIF_PG_MASK_SH_LIST_DCE6(mask_sh, blk)\
+ SFB(blk, DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE, mask_sh),\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_IGNORE_FBC, mask_sh),\
+ SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, mask_sh),\
+ SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED, mask_sh)
+
+#define MI_DMIF_PG_MASK_SH_DCE6(mask_sh, blk)\
+ SFB(blk, DPG_PIPE_ARBITRATION_CONTROL3, URGENCY_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_ENABLE, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK, mask_sh)
+
+#define MI_DCE6_MASK_SH_LIST(mask_sh)\
+ MI_DCP_MASK_SH_LIST_DCE6(mask_sh, ),\
+ MI_DMIF_PG_MASK_SH_LIST_DCE6(mask_sh, ),\
+ MI_DMIF_PG_MASK_SH_DCE6(mask_sh, ),\
+ MI_GFX6_TILE_MASK_SH_LIST(mask_sh, )
+#endif
+
#define MI_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
SFB(blk, DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION, mask_sh),\
SFB(blk, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, mask_sh),\
@@ -345,6 +438,16 @@ void dce_mem_input_construct(
const struct dce_mem_input_shift *mi_shift,
const struct dce_mem_input_mask *mi_mask);
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask);
+#endif
+
void dce112_mem_input_construct(
struct dce_mem_input *dce_mi,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
index 51081d9ae3fb..e459ae65aaf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -141,6 +141,47 @@ static void set_truncation(
params->flags.TRUNCATE_MODE);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+/**
+ * dce60_set_truncation
+ * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
+ * 2) enable truncation
+ * 3) HW remove 12bit FMT support for DCE11 power saving reason.
+ */
+static void dce60_set_truncation(
+ struct dce110_opp *opp110,
+ const struct bit_depth_reduction_params *params)
+{
+ /* DCE6 has no FMT_TRUNCATE_MODE bit in FMT_BIT_DEPTH_CONTROL reg */
+
+ /*Disable truncation*/
+ REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 0,
+ FMT_TRUNCATE_DEPTH, 0);
+
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ /* 8bpc trunc on YCbCr422*/
+ if (params->flags.TRUNCATE_DEPTH == 1)
+ REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH, 1);
+ else if (params->flags.TRUNCATE_DEPTH == 2)
+ /* 10bpc trunc on YCbCr422*/
+ REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH, 2);
+ return;
+ }
+ /* on other format-to do */
+ if (params->flags.TRUNCATE_ENABLED == 0)
+ return;
+ /*Set truncation depth and Enable truncation*/
+ REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH,
+ params->flags.TRUNCATE_DEPTH);
+}
+#endif
/**
* set_spatial_dither
@@ -373,6 +414,57 @@ void dce110_opp_set_clamping(
}
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+/**
+ * Set Clamping for DCE6 parts
+ * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
+ * 1 for 8 bpc
+ * 2 for 10 bpc
+ * 3 for 12 bpc
+ * 7 for programable
+ * 2) Enable clamp if Limited range requested
+ */
+void dce60_opp_set_clamping(
+ struct dce110_opp *opp110,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 0,
+ FMT_CLAMP_COLOR_FORMAT, 0);
+
+ switch (params->clamping_level) {
+ case CLAMPING_FULL_RANGE:
+ break;
+ case CLAMPING_LIMITED_RANGE_8BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 1);
+ break;
+ case CLAMPING_LIMITED_RANGE_10BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 2);
+ break;
+ case CLAMPING_LIMITED_RANGE_12BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 3);
+ break;
+ case CLAMPING_LIMITED_RANGE_PROGRAMMABLE:
+ /*Set clamp control*/
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 7);
+
+ /* DCE6 does have FMT_CLAMP_COMPONENT_{R,G,B} registers */
+
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
/**
* set_pixel_encoding
*
@@ -408,6 +500,39 @@ static void set_pixel_encoding(
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+/**
+ * dce60_set_pixel_encoding
+ * DCE6 has no FMT_SUBSAMPLING_{MODE,ORDER} bits in FMT_CONTROL reg
+ * Set Pixel Encoding
+ * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
+ * 1: YCbCr 4:2:2
+ */
+static void dce60_set_pixel_encoding(
+ struct dce110_opp *opp110,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ if (opp110->opp_mask->FMT_CBCR_BIT_REDUCTION_BYPASS)
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 0,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
+ else
+ REG_UPDATE(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 0);
+
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 1);
+ }
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 1);
+ }
+
+}
+#endif
+
void dce110_opp_program_bit_depth_reduction(
struct output_pixel_processor *opp,
const struct bit_depth_reduction_params *params)
@@ -419,6 +544,19 @@ void dce110_opp_program_bit_depth_reduction(
set_temporal_dither(opp110, params);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_opp_program_bit_depth_reduction(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ dce60_set_truncation(opp110, params);
+ set_spatial_dither(opp110, params);
+ set_temporal_dither(opp110, params);
+}
+#endif
+
void dce110_opp_program_clamping_and_pixel_encoding(
struct output_pixel_processor *opp,
const struct clamping_and_pixel_encoding_params *params)
@@ -429,6 +567,19 @@ void dce110_opp_program_clamping_and_pixel_encoding(
set_pixel_encoding(opp110, params);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_opp_program_clamping_and_pixel_encoding(
+ struct output_pixel_processor *opp,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ dce60_opp_set_clamping(opp110, params);
+ dce60_set_pixel_encoding(opp110, params);
+}
+#endif
+
+
static void program_formatter_420_memory(struct output_pixel_processor *opp)
{
struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
@@ -526,7 +677,32 @@ void dce110_opp_program_fmt(
return;
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_opp_program_fmt(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ /* dithering is affected by <CrtcSourceSelect>, hence should be
+ * programmed afterwards */
+
+ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ program_formatter_420_memory(opp);
+
+ dce60_opp_program_bit_depth_reduction(
+ opp,
+ fmt_bit_depth);
+
+ dce60_opp_program_clamping_and_pixel_encoding(
+ opp,
+ clamping);
+
+ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ program_formatter_reset_dig_resync_fifo(opp);
+ return;
+}
+#endif
@@ -541,6 +717,15 @@ static const struct opp_funcs funcs = {
.opp_program_bit_depth_reduction = dce110_opp_program_bit_depth_reduction
};
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static const struct opp_funcs dce60_opp_funcs = {
+ .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion,
+ .opp_destroy = dce110_opp_destroy,
+ .opp_program_fmt = dce60_opp_program_fmt,
+ .opp_program_bit_depth_reduction = dce60_opp_program_bit_depth_reduction
+};
+#endif
+
void dce110_opp_construct(struct dce110_opp *opp110,
struct dc_context *ctx,
uint32_t inst,
@@ -559,6 +744,26 @@ void dce110_opp_construct(struct dce110_opp *opp110,
opp110->opp_mask = opp_mask;
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_opp_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_opp_registers *regs,
+ const struct dce_opp_shift *opp_shift,
+ const struct dce_opp_mask *opp_mask)
+{
+ opp110->base.funcs = &dce60_opp_funcs;
+
+ opp110->base.ctx = ctx;
+
+ opp110->base.inst = inst;
+
+ opp110->regs = regs;
+ opp110->opp_shift = opp_shift;
+ opp110->opp_mask = opp_mask;
+}
+#endif
+
void dce110_opp_destroy(struct output_pixel_processor **opp)
{
if (*opp)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
index 2ab0147cbd9d..4d484ef60f35 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
@@ -81,6 +81,17 @@ enum dce110_opp_reg_type {
OPP_COMMON_REG_LIST_BASE(id), \
SRI(CONTROL, FMT_MEMORY, id)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define OPP_DCE_60_REG_LIST(id) \
+ SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
+ SRI(FMT_BIT_DEPTH_CONTROL, FMT, id), \
+ SRI(FMT_CONTROL, FMT, id), \
+ SRI(FMT_DITHER_RAND_R_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_G_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \
+ SRI(FMT_CLAMP_CNTL, FMT, id)
+#endif
+
#define OPP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -192,6 +203,35 @@ enum dce110_opp_reg_type {
OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_ORDER, mask_sh),\
OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define OPP_COMMON_MASK_SH_LIST_DCE_60(mask_sh)\
+ OPP_SF(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh),\
+ OPP_SF(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_LEVEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_25FRC_SEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_50FRC_SEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_75FRC_SEL, mask_sh),\
+ OPP_SF(FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh),\
+ OPP_SF(FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh)
+#endif
+
#define OPP_REG_FIELD_LIST(type) \
type FMT_DYNAMIC_EXP_EN; \
type FMT_DYNAMIC_EXP_MODE; \
@@ -279,6 +319,15 @@ void dce110_opp_construct(struct dce110_opp *opp110,
const struct dce_opp_shift *opp_shift,
const struct dce_opp_mask *opp_mask);
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_opp_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_opp_registers *regs,
+ const struct dce_opp_shift *opp_shift,
+ const struct dce_opp_mask *opp_mask);
+#endif
+
void dce110_opp_destroy(struct output_pixel_processor **opp);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index 43781e77be43..74f7619d4154 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -46,13 +46,14 @@
#define FN(reg_name, field_name) \
dce_panel_cntl->shift->field_name, dce_panel_cntl->mask->field_name
-static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *dce_panel_cntl)
+static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
{
uint64_t current_backlight;
uint32_t round_result;
uint32_t pwm_period_cntl, bl_period, bl_int_count;
uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
uint32_t bl_period_mask, bl_pwm_mask;
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
@@ -75,7 +76,7 @@ static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *d
else
bl_pwm &= 0xFFFF;
- current_backlight = bl_pwm << (1 + bl_int_count);
+ current_backlight = (uint64_t)bl_pwm << (1 + bl_int_count);
if (bl_period == 0)
bl_period = 0xFFFF;
@@ -150,7 +151,7 @@ static uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
- current_backlight = calculate_16_bit_backlight_from_pwm(dce_panel_cntl);
+ current_backlight = dce_get_16_bit_backlight_from_pwm(panel_cntl);
return current_backlight;
}
@@ -158,11 +159,15 @@ static uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
static bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
- uint32_t value;
+ uint32_t blon, blon_ovrd, pwrseq_target_state;
- REG_GET(PWRSEQ_CNTL, LVTMA_BLON, &value);
+ REG_GET_2(PWRSEQ_CNTL, LVTMA_BLON, &blon, LVTMA_BLON_OVRD, &blon_ovrd);
+ REG_GET(PWRSEQ_CNTL, LVTMA_PWRSEQ_TARGET_STATE, &pwrseq_target_state);
- return value;
+ if (blon_ovrd)
+ return blon;
+ else
+ return pwrseq_target_state;
}
static bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
@@ -273,6 +278,7 @@ static const struct panel_cntl_funcs dce_link_panel_cntl_funcs = {
.is_panel_powered_on = dce_is_panel_powered_on,
.store_backlight_level = dce_store_backlight_level,
.driver_set_backlight = dce_driver_set_backlight,
+ .get_current_backlight = dce_get_16_bit_backlight_from_pwm,
};
void dce_panel_cntl_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
index 99c68ca9c7e0..6bd1196083a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
@@ -54,15 +54,17 @@
SR(BL_PWM_CNTL2), \
SR(BL_PWM_PERIOD_CNTL), \
SR(BL_PWM_GRP1_REG_LOCK), \
- SR(BIOS_SCRATCH_2)
+ NBIO_SR(BIOS_SCRATCH_2)
#define DCE_PANEL_CNTL_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define DCE_PANEL_CNTL_MASK_SH_LIST(mask_sh) \
DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON_OVRD, mask_sh),\
DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_PWRSEQ_TARGET_STATE, mask_sh), \
DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh), \
DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
@@ -76,8 +78,10 @@
#define DCE_PANEL_CNTL_REG_FIELD_LIST(type) \
type LVTMA_BLON;\
+ type LVTMA_BLON_OVRD;\
type LVTMA_DIGON;\
type LVTMA_DIGON_OVRD;\
+ type LVTMA_PWRSEQ_TARGET_STATE; \
type LVTMA_PWRSEQ_TARGET_STATE_R; \
type BL_PWM_REF_DIV; \
type BL_PWM_EN; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 4cdaaf4d881c..5054bb567b74 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -710,7 +710,7 @@ static void dce110_stream_encoder_lvds_set_stream_attribute(
ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
}
-static void dce110_stream_encoder_set_mst_bandwidth(
+static void dce110_stream_encoder_set_throttled_vcp_size(
struct stream_encoder *enc,
struct fixed31_32 avg_time_slots_per_mtp)
{
@@ -1621,8 +1621,8 @@ static const struct stream_encoder_funcs dce110_str_enc_funcs = {
dce110_stream_encoder_dvi_set_stream_attribute,
.lvds_set_stream_attribute =
dce110_stream_encoder_lvds_set_stream_attribute,
- .set_mst_bandwidth =
- dce110_stream_encoder_set_mst_bandwidth,
+ .set_throttled_vcp_size =
+ dce110_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
dce110_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index ab63d0d0304c..2a32b66959ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -146,6 +146,33 @@ static bool setup_scaling_configuration(
return true;
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static bool dce60_setup_scaling_configuration(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
+
+ if (data->taps.h_taps + data->taps.v_taps <= 2) {
+ /* Set bypass */
+
+ /* DCE6 has no SCL_MODE register, skip scale mode programming */
+
+ return false;
+ }
+
+ REG_SET_2(SCL_TAP_CONTROL, 0,
+ SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
+ SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
+
+ /* DCE6 has no SCL_MODE register, skip scale mode programming */
+
+ /* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */
+
+ return true;
+}
+#endif
+
static void program_overscan(
struct dce_transform *xfm_dce,
const struct scaler_data *data)
@@ -279,6 +306,36 @@ static void calculate_inits(
inits->v_init.fraction = dc_fixpt_u0d19(v_init) << 5;
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_calculate_inits(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data,
+ struct sclh_ratios_inits *inits)
+{
+ struct fixed31_32 v_init;
+
+ inits->h_int_scale_ratio =
+ dc_fixpt_u2d19(data->ratios.horz) << 5;
+ inits->v_int_scale_ratio =
+ dc_fixpt_u2d19(data->ratios.vert) << 5;
+
+ /* DCE6 h_init_luma setting inspired by DCE110 */
+ inits->h_init_luma.integer = 1;
+
+ /* DCE6 h_init_chroma setting inspired by DCE110 */
+ inits->h_init_chroma.integer = 1;
+
+ v_init =
+ dc_fixpt_div_int(
+ dc_fixpt_add(
+ data->ratios.vert,
+ dc_fixpt_from_int(data->taps.v_taps + 1)),
+ 2);
+ inits->v_init.integer = dc_fixpt_floor(v_init);
+ inits->v_init.fraction = dc_fixpt_u0d19(v_init) << 5;
+}
+#endif
+
static void program_scl_ratios_inits(
struct dce_transform *xfm_dce,
struct scl_ratios_inits *inits)
@@ -301,6 +358,36 @@ static void program_scl_ratios_inits(
REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_program_scl_ratios_inits(
+ struct dce_transform *xfm_dce,
+ struct sclh_ratios_inits *inits)
+{
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
+ SCL_H_SCALE_RATIO, inits->h_int_scale_ratio);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
+ SCL_V_SCALE_RATIO, inits->v_int_scale_ratio);
+
+ /* DCE6 has SCL_HORZ_FILTER_INIT_RGB_LUMA register */
+ REG_SET_2(SCL_HORZ_FILTER_INIT_RGB_LUMA, 0,
+ SCL_H_INIT_INT_RGB_Y, inits->h_init_luma.integer,
+ SCL_H_INIT_FRAC_RGB_Y, inits->h_init_luma.fraction);
+
+ /* DCE6 has SCL_HORZ_FILTER_INIT_CHROMA register */
+ REG_SET_2(SCL_HORZ_FILTER_INIT_CHROMA, 0,
+ SCL_H_INIT_INT_CBCR, inits->h_init_chroma.integer,
+ SCL_H_INIT_FRAC_CBCR, inits->h_init_chroma.fraction);
+
+ REG_SET_2(SCL_VERT_FILTER_INIT, 0,
+ SCL_V_INIT_INT, inits->v_init.integer,
+ SCL_V_INIT_FRAC, inits->v_init.fraction);
+
+ REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
+}
+#endif
+
static const uint16_t *get_filter_coeffs_16p(int taps, struct fixed31_32 ratio)
{
if (taps == 4)
@@ -399,6 +486,91 @@ static void dce_transform_set_scaler(
REG_UPDATE(LB_DATA_FORMAT, ALPHA_EN, data->lb_params.alpha_en);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_transform_set_scaler(
+ struct transform *xfm,
+ const struct scaler_data *data)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ bool is_scaling_required;
+ bool filter_updated = false;
+ const uint16_t *coeffs_v, *coeffs_h;
+
+ /*Use whole line buffer memory always*/
+ REG_SET(DC_LB_MEMORY_SPLIT, 0,
+ DC_LB_MEMORY_CONFIG, 0);
+
+ REG_SET(DC_LB_MEM_SIZE, 0,
+ DC_LB_MEM_SIZE, xfm_dce->lb_memory_size);
+
+ /* Clear SCL_F_SHARP_CONTROL value to 0 */
+ REG_WRITE(SCL_F_SHARP_CONTROL, 0);
+
+ /* 1. Program overscan */
+ program_overscan(xfm_dce, data);
+
+ /* 2. Program taps and configuration */
+ is_scaling_required = dce60_setup_scaling_configuration(xfm_dce, data);
+
+ if (is_scaling_required) {
+ /* 3. Calculate and program ratio, DCE6 filter initialization */
+ struct sclh_ratios_inits inits = { 0 };
+
+ /* DCE6 has specific calculate_inits() function */
+ dce60_calculate_inits(xfm_dce, data, &inits);
+
+ /* DCE6 has specific program_scl_ratios_inits() function */
+ dce60_program_scl_ratios_inits(xfm_dce, &inits);
+
+ coeffs_v = get_filter_coeffs_16p(data->taps.v_taps, data->ratios.vert);
+ coeffs_h = get_filter_coeffs_16p(data->taps.h_taps, data->ratios.horz);
+
+ if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
+ /* 4. Program vertical filters */
+ if (xfm_dce->filter_v == NULL)
+ REG_SET(SCL_VERT_FILTER_CONTROL, 0,
+ SCL_V_2TAP_HARDCODE_COEF_EN, 0);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps,
+ coeffs_v,
+ FILTER_TYPE_RGB_Y_VERTICAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps,
+ coeffs_v,
+ FILTER_TYPE_ALPHA_VERTICAL);
+
+ /* 5. Program horizontal filters */
+ if (xfm_dce->filter_h == NULL)
+ REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
+ SCL_H_2TAP_HARDCODE_COEF_EN, 0);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps,
+ coeffs_h,
+ FILTER_TYPE_RGB_Y_HORIZONTAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps,
+ coeffs_h,
+ FILTER_TYPE_ALPHA_HORIZONTAL);
+
+ xfm_dce->filter_v = coeffs_v;
+ xfm_dce->filter_h = coeffs_h;
+ filter_updated = true;
+ }
+ }
+
+ /* 6. Program the viewport */
+ program_viewport(xfm_dce, &data->viewport);
+
+ /* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */
+
+ /* DCE6 DATA_FORMAT register does not support ALPHA_EN */
+}
+#endif
+
/*****************************************************************************
* set_clamp
*
@@ -664,6 +836,67 @@ static void program_bit_depth_reduction(
bit_depth_params->flags.HIGHPASS_RANDOM);
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+/*****************************************************************************
+ * dce60_transform_bit_depth_reduction program
+ *
+ * @brief
+ * Programs the DCP bit depth reduction registers (Clamp, Round/Truncate,
+ * Dither) for dce
+ *
+ * @param depth : bit depth to set the clamp to (should match denorm)
+ *
+ ******************************************************************************/
+static void dce60_program_bit_depth_reduction(
+ struct dce_transform *xfm_dce,
+ enum dc_color_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params)
+{
+ enum dcp_out_trunc_round_depth trunc_round_depth;
+ enum dcp_out_trunc_round_mode trunc_mode;
+ bool spatial_dither_enable;
+
+ ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
+
+ spatial_dither_enable = bit_depth_params->flags.SPATIAL_DITHER_ENABLED;
+ /* Default to 12 bit truncation without rounding */
+ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT;
+ trunc_mode = DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE;
+
+ if (bit_depth_params->flags.TRUNCATE_ENABLED) {
+ /* Don't enable dithering if truncation is enabled */
+ spatial_dither_enable = false;
+ trunc_mode = bit_depth_params->flags.TRUNCATE_MODE ?
+ DCP_OUT_TRUNC_ROUND_MODE_ROUND :
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE;
+
+ if (bit_depth_params->flags.TRUNCATE_DEPTH == 0 ||
+ bit_depth_params->flags.TRUNCATE_DEPTH == 1)
+ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_8BIT;
+ else if (bit_depth_params->flags.TRUNCATE_DEPTH == 2)
+ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_10BIT;
+ else {
+ /*
+ * Invalid truncate/round depth. Setting here to 12bit
+ * to prevent use-before-initialize errors.
+ */
+ trunc_round_depth = DCP_OUT_TRUNC_ROUND_DEPTH_12BIT;
+ BREAK_TO_DEBUGGER();
+ }
+ }
+
+ /* DCE6 has no OUT_CLAMP_CONTROL_* registers - set_clamp() is skipped */
+ set_round(xfm_dce, trunc_mode, trunc_round_depth);
+ set_dither(xfm_dce,
+ spatial_dither_enable,
+ DCP_SPATIAL_DITHER_MODE_A_AA_A,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP,
+ bit_depth_params->flags.FRAME_RANDOM,
+ bit_depth_params->flags.RGB_RANDOM,
+ bit_depth_params->flags.HIGHPASS_RANDOM);
+}
+#endif
+
static int dce_transform_get_max_num_of_supported_lines(
struct dce_transform *xfm_dce,
enum lb_pixel_depth depth,
@@ -797,6 +1030,59 @@ static void dce_transform_set_pixel_storage_depth(
}
}
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static void dce60_transform_set_pixel_storage_depth(
+ struct transform *xfm,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ int pixel_depth, expan_mode;
+ enum dc_color_depth color_depth;
+
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ color_depth = COLOR_DEPTH_666;
+ pixel_depth = 2;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_24BPP:
+ color_depth = COLOR_DEPTH_888;
+ pixel_depth = 1;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_30BPP:
+ color_depth = COLOR_DEPTH_101010;
+ pixel_depth = 0;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_36BPP:
+ color_depth = COLOR_DEPTH_121212;
+ pixel_depth = 3;
+ expan_mode = 0;
+ break;
+ default:
+ color_depth = COLOR_DEPTH_101010;
+ pixel_depth = 0;
+ expan_mode = 1;
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ set_denormalization(xfm_dce, color_depth);
+ dce60_program_bit_depth_reduction(xfm_dce, color_depth, bit_depth_params);
+
+ /* DATA_FORMAT in DCE6 does not have PIXEL_DEPTH and PIXEL_EXPAN_MODE masks */
+
+ if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
+ /*we should use unsupported capabilities
+ * unless it is required by w/a*/
+ DC_LOG_WARNING("%s: Capability not supported",
+ __func__);
+ }
+}
+#endif
+
static void program_gamut_remap(
struct dce_transform *xfm_dce,
const uint16_t *reg_val)
@@ -1335,6 +1621,21 @@ static const struct transform_funcs dce_transform_funcs = {
.transform_get_optimal_number_of_taps = dce_transform_get_optimal_number_of_taps
};
+#if defined(CONFIG_DRM_AMD_DC_SI)
+static const struct transform_funcs dce60_transform_funcs = {
+ .transform_reset = dce_transform_reset,
+ .transform_set_scaler = dce60_transform_set_scaler,
+ .transform_set_gamut_remap = dce_transform_set_gamut_remap,
+ .opp_set_csc_adjustment = dce110_opp_set_csc_adjustment,
+ .opp_set_csc_default = dce110_opp_set_csc_default,
+ .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut,
+ .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl,
+ .opp_set_regamma_mode = dce110_opp_set_regamma_mode,
+ .transform_set_pixel_storage_depth = dce60_transform_set_pixel_storage_depth,
+ .transform_get_optimal_number_of_taps = dce_transform_get_optimal_number_of_taps
+};
+#endif
+
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
@@ -1365,3 +1666,32 @@ void dce_transform_construct(
xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
}
+
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_transform_construct(
+ struct dce_transform *xfm_dce,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_transform_registers *regs,
+ const struct dce_transform_shift *xfm_shift,
+ const struct dce_transform_mask *xfm_mask)
+{
+ xfm_dce->base.ctx = ctx;
+
+ xfm_dce->base.inst = inst;
+ xfm_dce->base.funcs = &dce60_transform_funcs;
+
+ xfm_dce->regs = regs;
+ xfm_dce->xfm_shift = xfm_shift;
+ xfm_dce->xfm_mask = xfm_mask;
+
+ xfm_dce->prescaler_on = true;
+ xfm_dce->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
index 948281d8b6af..cbce194ec7b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -108,6 +108,68 @@
SRI(DCFE_MEM_PWR_CTRL, DCFE, id), \
SRI(DCFE_MEM_PWR_STATUS, DCFE, id)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define XFM_COMMON_REG_LIST_DCE60_BASE(id) \
+ SRI(DATA_FORMAT, LB, id), \
+ SRI(GAMUT_REMAP_CONTROL, DCP, id), \
+ SRI(GAMUT_REMAP_C11_C12, DCP, id), \
+ SRI(GAMUT_REMAP_C13_C14, DCP, id), \
+ SRI(GAMUT_REMAP_C21_C22, DCP, id), \
+ SRI(GAMUT_REMAP_C23_C24, DCP, id), \
+ SRI(GAMUT_REMAP_C31_C32, DCP, id), \
+ SRI(GAMUT_REMAP_C33_C34, DCP, id), \
+ SRI(OUTPUT_CSC_C11_C12, DCP, id), \
+ SRI(OUTPUT_CSC_C13_C14, DCP, id), \
+ SRI(OUTPUT_CSC_C21_C22, DCP, id), \
+ SRI(OUTPUT_CSC_C23_C24, DCP, id), \
+ SRI(OUTPUT_CSC_C31_C32, DCP, id), \
+ SRI(OUTPUT_CSC_C33_C34, DCP, id), \
+ SRI(OUTPUT_CSC_CONTROL, DCP, id), \
+ SRI(REGAMMA_CNTLA_START_CNTL, DCP, id), \
+ SRI(REGAMMA_CNTLA_SLOPE_CNTL, DCP, id), \
+ SRI(REGAMMA_CNTLA_END_CNTL1, DCP, id), \
+ SRI(REGAMMA_CNTLA_END_CNTL2, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_0_1, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_2_3, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_4_5, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_6_7, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_8_9, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_10_11, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_12_13, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_14_15, DCP, id), \
+ SRI(REGAMMA_LUT_WRITE_EN_MASK, DCP, id), \
+ SRI(REGAMMA_LUT_INDEX, DCP, id), \
+ SRI(REGAMMA_LUT_DATA, DCP, id), \
+ SRI(REGAMMA_CONTROL, DCP, id), \
+ SRI(DENORM_CONTROL, DCP, id), \
+ SRI(DCP_SPATIAL_DITHER_CNTL, DCP, id), \
+ SRI(OUT_ROUND_CONTROL, DCP, id), \
+ SRI(SCL_TAP_CONTROL, SCL, id), \
+ SRI(SCL_CONTROL, SCL, id), \
+ SRI(SCL_BYPASS_CONTROL, SCL, id), \
+ SRI(EXT_OVERSCAN_LEFT_RIGHT, SCL, id), \
+ SRI(EXT_OVERSCAN_TOP_BOTTOM, SCL, id), \
+ SRI(SCL_VERT_FILTER_CONTROL, SCL, id), \
+ SRI(SCL_HORZ_FILTER_CONTROL, SCL, id), \
+ SRI(SCL_COEF_RAM_SELECT, SCL, id), \
+ SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
+ SRI(VIEWPORT_START, SCL, id), \
+ SRI(VIEWPORT_SIZE, SCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
+ SRI(SCL_VERT_FILTER_INIT, SCL, id), \
+ SRI(SCL_AUTOMATIC_MODE_CONTROL, SCL, id), \
+ SRI(DC_LB_MEMORY_SPLIT, LB, id), \
+ SRI(DC_LB_MEM_SIZE, LB, id), \
+ SRI(DCFE_MEM_LIGHT_SLEEP_CNTL, CRTC, id), \
+ SRI(SCL_UPDATE, SCL, id), \
+ SRI(SCL_F_SHARP_CONTROL, SCL, id)
+
+#define XFM_COMMON_REG_LIST_DCE60(id) \
+ XFM_COMMON_REG_LIST_DCE60_BASE(id), \
+ SRI(DCFE_MEM_LIGHT_SLEEP_CNTL, CRTC, id)
+#endif
+
#define XFM_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -204,6 +266,83 @@
XFM_SF(DCFE_MEM_PWR_STATUS, DCP_REGAMMA_MEM_PWR_STATE, mask_sh),\
XFM_SF(SCL_MODE, SCL_PSCL_EN, mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define XFM_COMMON_MASK_SH_LIST_DCE60(mask_sh) \
+ XFM_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(mask_sh), \
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_LIGHT_SLEEP_DIS, mask_sh),\
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, DCP_LUT_LIGHT_SLEEP_DIS, mask_sh),\
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_MEM_PWR_STATE, mask_sh)
+
+#define XFM_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(mask_sh) \
+ XFM_SF(OUT_ROUND_CONTROL, OUT_ROUND_TRUNC_MODE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_EN, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_MODE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_DEPTH, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_FRAME_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_RGB_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_HIGHPASS_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DENORM_CONTROL, DENORM_MODE, mask_sh), \
+ XFM_SF(DATA_FORMAT, INTERLEAVE_EN, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C11, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C12, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C13, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C14, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C21, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C22, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C23, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C24, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C31, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C32, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C33, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C34, mask_sh), \
+ XFM_SF(GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, mask_sh), \
+ XFM_SF(OUTPUT_CSC_C11_C12, OUTPUT_CSC_C11, mask_sh),\
+ XFM_SF(OUTPUT_CSC_C11_C12, OUTPUT_CSC_C12, mask_sh),\
+ XFM_SF(OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_SLOPE_CNTL, REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL1, REGAMMA_CNTLA_EXP_REGION_END, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_BASE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_SLOPE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(REGAMMA_LUT_WRITE_EN_MASK, REGAMMA_LUT_WRITE_EN_MASK, mask_sh),\
+ XFM_SF(REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\
+ XFM_SF(SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL_BYPASS_CONTROL, SCL_BYPASS_MODE, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_PHASE, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF, mask_sh), \
+ XFM_SF(VIEWPORT_START, VIEWPORT_X_START, mask_sh), \
+ XFM_SF(VIEWPORT_START, VIEWPORT_Y_START, mask_sh), \
+ XFM_SF(VIEWPORT_SIZE, VIEWPORT_HEIGHT, mask_sh), \
+ XFM_SF(VIEWPORT_SIZE, VIEWPORT_WIDTH, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL_H_INIT_INT_RGB_Y, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL_H_INIT_FRAC_RGB_Y, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_INIT_CHROMA, SCL_H_INIT_INT_CBCR, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_INIT_CHROMA, SCL_H_INIT_FRAC_CBCR, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_CONTROL, SCL_H_FILTER_PICK_NEAREST, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_CONTROL, SCL_V_FILTER_PICK_NEAREST, mask_sh), \
+ XFM_SF(DC_LB_MEMORY_SPLIT, DC_LB_MEMORY_CONFIG, mask_sh), \
+ XFM_SF(DC_LB_MEM_SIZE, DC_LB_MEM_SIZE, mask_sh)
+#endif
+
#define XFM_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh) \
XFM_SF(DCP0_OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MIN_B_CB, mask_sh), \
XFM_SF(DCP0_OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MAX_B_CB, mask_sh), \
@@ -302,6 +441,7 @@
type DCP_RGB_RANDOM_ENABLE; \
type DCP_HIGHPASS_RANDOM_ENABLE; \
type DENORM_MODE; \
+ type INTERLEAVE_EN; \
type PIXEL_DEPTH; \
type PIXEL_EXPAN_MODE; \
type GAMUT_REMAP_C11; \
@@ -365,12 +505,20 @@
type SCL_V_SCALE_RATIO; \
type SCL_H_INIT_INT; \
type SCL_H_INIT_FRAC; \
+ type SCL_H_INIT_INT_RGB_Y; \
+ type SCL_H_INIT_FRAC_RGB_Y; \
+ type SCL_H_INIT_INT_CBCR; \
+ type SCL_H_INIT_FRAC_CBCR; \
type SCL_V_INIT_INT; \
type SCL_V_INIT_FRAC; \
+ type DC_LB_MEMORY_CONFIG; \
+ type DC_LB_MEM_SIZE; \
type LB_MEMORY_CONFIG; \
type LB_MEMORY_SIZE; \
type SCL_V_2TAP_HARDCODE_COEF_EN; \
type SCL_H_2TAP_HARDCODE_COEF_EN; \
+ type SCL_V_FILTER_PICK_NEAREST; \
+ type SCL_H_FILTER_PICK_NEAREST; \
type SCL_COEF_UPDATE_COMPLETE; \
type ALPHA_EN
@@ -383,6 +531,9 @@ struct dce_transform_mask {
};
struct dce_transform_registers {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ uint32_t DATA_FORMAT;
+#endif
uint32_t LB_DATA_FORMAT;
uint32_t GAMUT_REMAP_CONTROL;
uint32_t GAMUT_REMAP_C11_C12;
@@ -438,8 +589,16 @@ struct dce_transform_registers {
uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
uint32_t SCL_VERT_FILTER_SCALE_RATIO;
uint32_t SCL_HORZ_FILTER_INIT;
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA;
+ uint32_t SCL_HORZ_FILTER_INIT_CHROMA;
+#endif
uint32_t SCL_VERT_FILTER_INIT;
uint32_t SCL_AUTOMATIC_MODE_CONTROL;
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ uint32_t DC_LB_MEMORY_SPLIT;
+ uint32_t DC_LB_MEM_SIZE;
+#endif
uint32_t LB_MEMORY_CTRL;
uint32_t SCL_UPDATE;
uint32_t SCL_F_SHARP_CONTROL;
@@ -457,6 +616,16 @@ struct scl_ratios_inits {
struct init_int_and_frac v_init;
};
+#if defined(CONFIG_DRM_AMD_DC_SI)
+struct sclh_ratios_inits {
+ uint32_t h_int_scale_ratio;
+ uint32_t v_int_scale_ratio;
+ struct init_int_and_frac h_init_luma;
+ struct init_int_and_frac h_init_chroma;
+ struct init_int_and_frac v_init;
+};
+#endif
+
enum ram_filter_type {
FILTER_TYPE_RGB_Y_VERTICAL = 0, /* 0 - RGB/Y Vertical filter */
FILTER_TYPE_CBCR_VERTICAL = 1, /* 1 - CbCr Vertical filter */
@@ -489,6 +658,15 @@ void dce_transform_construct(struct dce_transform *xfm_dce,
const struct dce_transform_shift *xfm_shift,
const struct dce_transform_mask *xfm_mask);
+#if defined(CONFIG_DRM_AMD_DC_SI)
+void dce60_transform_construct(struct dce_transform *xfm_dce,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_transform_registers *regs,
+ const struct dce_transform_shift *xfm_shift,
+ const struct dce_transform_mask *xfm_mask);
+#endif
+
bool dce_transform_get_optimal_number_of_taps(
struct transform *xfm,
struct scaler_data *scl_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 5167d6b8a48d..67af67ef2865 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -119,10 +119,11 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
/**
* Enable/Disable PSR.
*/
-static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
+static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
+ uint32_t retry_count, psr_state = 0;
cmd.psr_enable.header.type = DMUB_CMD__PSR;
@@ -136,6 +137,30 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ /* Below loops 1000 x 500us = 500 ms.
+ * Exit PSR may need to wait 1-2 frames to power up. Timeout after at
+ * least a few frames. Should never hit the max retry assert below.
+ */
+ if (wait) {
+ for (retry_count = 0; retry_count <= 1000; retry_count++) {
+ dmub_psr_get_state(dmub, &psr_state);
+
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+
+ udelay(500);
+ }
+
+ /* assert if max retry hit */
+ if (retry_count >= 1000)
+ ASSERT(0);
+ }
}
/**
@@ -231,10 +256,11 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
copy_settings_data->frame_delay = psr_context->frame_delay;
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
+ copy_settings_data->init_sdp_deadline = psr_context->sdpTransmitLineNumDeadline;
+ copy_settings_data->debug.u32All = 0;
copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
true : false;
- copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
- copy_settings_data->init_sdp_deadline = psr_context->sdpTransmitLineNumDeadline;
+ copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index f404fecd6410..dc121ed92d2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -36,7 +36,7 @@ struct dmub_psr {
struct dmub_psr_funcs {
bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
- void (*psr_enable)(struct dmub_psr *dmub, bool enable);
+ void (*psr_enable)(struct dmub_psr *dmub, bool enable, bool wait);
void (*psr_get_state)(struct dmub_psr *dmub, uint32_t *psr_state);
void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level);
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 45c9e9027886..3ac6c7b65a45 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -720,6 +720,7 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_context *ctx = link->ctx;
struct graphics_object_id connector = link->link_enc->connector;
struct gpio *hpd;
+ struct dc_sink *sink = link->local_sink;
bool edp_hpd_high = false;
uint32_t time_elapsed = 0;
uint32_t timeout = power_up ?
@@ -752,6 +753,14 @@ void dce110_edp_wait_for_hpd_ready(
return;
}
+ if (sink != NULL) {
+ if (sink->edid_caps.panel_patch.extra_t3_ms > 0) {
+ int extra_t3_in_ms = sink->edid_caps.panel_patch.extra_t3_ms;
+
+ msleep(extra_t3_in_ms);
+ }
+ }
+
dal_gpio_open(hpd, GPIO_MODE_INTERRUPT);
/* wait until timeout or panel detected */
@@ -801,37 +810,66 @@ void dce110_edp_power_control(
if (power_up !=
link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) {
+
+ unsigned long long current_ts = dm_get_timestamp(ctx);
+ unsigned long long time_since_edp_poweroff_ms =
+ div64_u64(dm_get_elapse_time_in_ns(
+ ctx,
+ current_ts,
+ link->link_trace.time_stamp.edp_poweroff), 1000000);
+ unsigned long long time_since_edp_poweron_ms =
+ div64_u64(dm_get_elapse_time_in_ns(
+ ctx,
+ current_ts,
+ link->link_trace.time_stamp.edp_poweron), 1000000);
+ DC_LOG_HW_RESUME_S3(
+ "%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu",
+ __func__,
+ power_up,
+ current_ts,
+ link->link_trace.time_stamp.edp_poweroff,
+ link->link_trace.time_stamp.edp_poweron,
+ time_since_edp_poweroff_ms,
+ time_since_edp_poweron_ms);
+
/* Send VBIOS command to prompt eDP panel power */
if (power_up) {
- unsigned long long current_ts = dm_get_timestamp(ctx);
- unsigned long long duration_in_ms =
- div64_u64(dm_get_elapse_time_in_ns(
- ctx,
- current_ts,
- link->link_trace.time_stamp.edp_poweroff), 1000000);
- unsigned long long wait_time_ms = 0;
-
- /* max 500ms from LCDVDD off to on */
- unsigned long long edp_poweroff_time_ms = 500;
+ /* edp requires a min of 500ms from LCDVDD off to on */
+ unsigned long long remaining_min_edp_poweroff_time_ms = 500;
+ /* add time defined by a patch, if any (usually patch extra_t12_ms is 0) */
if (link->local_sink != NULL)
- edp_poweroff_time_ms =
- 500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
- if (link->link_trace.time_stamp.edp_poweroff == 0)
- wait_time_ms = edp_poweroff_time_ms;
- else if (duration_in_ms < edp_poweroff_time_ms)
- wait_time_ms = edp_poweroff_time_ms - duration_in_ms;
-
- if (wait_time_ms) {
- msleep(wait_time_ms);
- dm_output_to_console("%s: wait %lld ms to power on eDP.\n",
- __func__, wait_time_ms);
+ remaining_min_edp_poweroff_time_ms +=
+ link->local_sink->edid_caps.panel_patch.extra_t12_ms;
+
+ /* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
+ if (link->link_trace.time_stamp.edp_poweroff != 0) {
+ if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms)
+ remaining_min_edp_poweroff_time_ms =
+ remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms;
+ else
+ remaining_min_edp_poweroff_time_ms = 0;
}
+ if (remaining_min_edp_poweroff_time_ms) {
+ DC_LOG_HW_RESUME_S3(
+ "%s: remaining_min_edp_poweroff_time_ms=%llu: begin wait.\n",
+ __func__, remaining_min_edp_poweroff_time_ms);
+ msleep(remaining_min_edp_poweroff_time_ms);
+ DC_LOG_HW_RESUME_S3(
+ "%s: remaining_min_edp_poweroff_time_ms=%llu: end wait.\n",
+ __func__, remaining_min_edp_poweroff_time_ms);
+ dm_output_to_console("%s: wait %lld ms to power on eDP.\n",
+ __func__, remaining_min_edp_poweroff_time_ms);
+ } else {
+ DC_LOG_HW_RESUME_S3(
+ "%s: remaining_min_edp_poweroff_time_ms=%llu: no wait required.\n",
+ __func__, remaining_min_edp_poweroff_time_ms);
+ }
}
DC_LOG_HW_RESUME_S3(
- "%s: Panel Power action: %s\n",
+ "%s: BEGIN: Panel Power action: %s\n",
__func__, (power_up ? "On":"Off"));
cntl.action = power_up ?
@@ -855,12 +893,23 @@ void dce110_edp_power_control(
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
+ DC_LOG_HW_RESUME_S3(
+ "%s: END: Panel Power action: %s bp_result=%u\n",
+ __func__, (power_up ? "On":"Off"),
+ bp_result);
+
if (!power_up)
/*save driver power off time stamp*/
link->link_trace.time_stamp.edp_poweroff = dm_get_timestamp(ctx);
else
link->link_trace.time_stamp.edp_poweron = dm_get_timestamp(ctx);
+ DC_LOG_HW_RESUME_S3(
+ "%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n",
+ __func__,
+ link->link_trace.time_stamp.edp_poweroff,
+ link->link_trace.time_stamp.edp_poweron);
+
if (bp_result != BP_RESULT_OK)
DC_LOG_ERROR(
"%s: Panel Power bp_result: %d\n",
@@ -1605,7 +1654,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
// enable fastboot if backend is enabled on eDP
if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) {
/* Set optimization flag on eDP stream*/
- if (edp_stream) {
+ if (edp_stream && edp_link->link_status.link_active) {
edp_stream->apply_edp_fast_boot_optimization = true;
can_apply_edp_fast_boot = true;
}
@@ -2688,7 +2737,7 @@ static void program_output_csc(struct dc *dc,
}
}
-void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
+static void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
@@ -2733,7 +2782,7 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
}
-void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+static void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
@@ -2841,6 +2890,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.setup_stereo = NULL,
.set_avmute = dce110_set_avmute,
.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dce110_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 5d83e8174005..0853bc9917c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -1017,7 +1017,7 @@ enum dc_status dce112_add_stream_to_ctx(
struct dc_state *new_ctx,
struct dc_stream_state *dc_stream)
{
- enum dc_status result = DC_ERROR_UNEXPECTED;
+ enum dc_status result;
result = resource_map_pool_resources(dc, new_ctx, dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
new file mode 100644
index 000000000000..7036c3bd0f87
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -0,0 +1,34 @@
+#
+# Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
+ dce60_resource.o
+
+AMD_DAL_DCE60 = $(addprefix $(AMDDALPATH)/dc/dce60/,$(DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE60)
+
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c
new file mode 100644
index 000000000000..920c7ae29d53
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dce60_hw_sequencer.h"
+
+#include "dce/dce_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce100/dce100_hw_sequencer.h"
+
+/* include DCE6 register header files */
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
+#define DC_LOGGER_INIT()
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+
+/***************************PIPE_CONTROL***********************************/
+
+/*
+ * Check if FBC can be enabled
+ */
+static bool dce60_should_enable_fbc(struct dc *dc,
+ struct dc_state *context,
+ uint32_t *pipe_idx)
+{
+ uint32_t i;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &context->res_ctx;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
+
+
+ ASSERT(dc->fbc_compressor);
+
+ /* FBC memory should be allocated */
+ if (!dc->ctx->fbc_gpu_addr)
+ return false;
+
+ /* Only supports single display */
+ if (context->stream_count != 1)
+ return false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (res_ctx->pipe_ctx[i].stream) {
+
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx)
+ continue;
+
+ /* fbc not applicable on underlay pipe */
+ if (pipe_ctx->pipe_idx != underlay_idx) {
+ *pipe_idx = i;
+ break;
+ }
+ }
+ }
+
+ if (i == dc->res_pool->pipe_count)
+ return false;
+
+ if (!pipe_ctx->stream->link)
+ return false;
+
+ /* Only supports eDP */
+ if (pipe_ctx->stream->link->connector_signal != SIGNAL_TYPE_EDP)
+ return false;
+
+ /* PSR should not be enabled */
+ if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled)
+ return false;
+
+ /* Nothing to compress */
+ if (!pipe_ctx->plane_state)
+ return false;
+
+ /* Only for non-linear tiling */
+ if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
+ return false;
+
+ return true;
+}
+
+/*
+ * Enable FBC
+ */
+static void dce60_enable_fbc(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ uint32_t pipe_idx = 0;
+
+ if (dce60_should_enable_fbc(dc, context, &pipe_idx)) {
+ /* Program GRPH COMPRESSED ADDRESS and PITCH */
+ struct compr_addr_and_pitch_params params = {0, 0, 0};
+ struct compressor *compr = dc->fbc_compressor;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+
+ params.source_view_width = pipe_ctx->stream->timing.h_addressable;
+ params.source_view_height = pipe_ctx->stream->timing.v_addressable;
+ params.inst = pipe_ctx->stream_res.tg->inst;
+ compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
+
+ compr->funcs->surface_address_and_pitch(compr, &params);
+ compr->funcs->set_fbc_invalidation_triggers(compr, 1);
+
+ compr->funcs->enable_fbc(compr, &params);
+ }
+}
+
+
+/*******************************************************************************
+ * Front End programming
+ ******************************************************************************/
+
+static void dce60_set_default_colors(struct pipe_ctx *pipe_ctx)
+{
+ struct default_adjustment default_adjust = { 0 };
+
+ default_adjust.force_hw_default = false;
+ default_adjust.in_color_space = pipe_ctx->plane_state->color_space;
+ default_adjust.out_color_space = pipe_ctx->stream->output_color_space;
+ default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW;
+ default_adjust.surface_pixel_format = pipe_ctx->plane_res.scl_data.format;
+
+ /* display color depth */
+ default_adjust.color_depth =
+ pipe_ctx->stream->timing.display_color_depth;
+
+ /* Lb color depth */
+ default_adjust.lb_color_depth = pipe_ctx->plane_res.scl_data.lb_params.depth;
+
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_default(
+ pipe_ctx->plane_res.xfm, &default_adjust);
+}
+
+/*******************************************************************************
+ * In order to turn on surface we will program
+ * CRTC
+ *
+ * DCE6 has no bottom_pipe and no Blender HW
+ * We need to set 'blank_target' to false in order to turn on the display
+ *
+ * |-----------|------------|---------|
+ * |curr pipe | set_blank | |
+ * |Surface |blank_target| CRCT |
+ * |visibility | argument | |
+ * |-----------|------------|---------|
+ * | off | true | blank |
+ * | on | false | unblank |
+ * |-----------|------------|---------|
+ *
+ ******************************************************************************/
+static void dce60_program_surface_visibility(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ bool blank_target = false;
+
+ /* DCE6 has no bottom_pipe and no Blender HW */
+
+ if (!pipe_ctx->plane_state->visible)
+ blank_target = true;
+
+ /* DCE6 skip dce_set_blender_mode() but then proceed to 'unblank' CRTC */
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, blank_target);
+
+}
+
+
+static void dce60_get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->stream_res.tg->inst) / 4;
+
+ switch (pipe_ctx->plane_res.scl_data.format) {
+ case PIXEL_FORMAT_ARGB8888:
+ /* set boarder color to red */
+ color->color_r_cr = color_value;
+ break;
+
+ case PIXEL_FORMAT_ARGB2101010:
+ /* set boarder color to blue */
+ color->color_b_cb = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP8:
+ /* set boarder color to green */
+ color->color_g_y = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP10:
+ /* set boarder color to yellow */
+ color->color_g_y = color_value;
+ color->color_r_cr = color_value;
+ break;
+ case PIXEL_FORMAT_FP16:
+ /* set boarder color to white */
+ color->color_r_cr = color_value;
+ color->color_b_cb = color_value;
+ color->color_g_y = color_value;
+ break;
+ default:
+ break;
+ }
+}
+
+static void dce60_program_scaler(const struct dc *dc,
+ const struct pipe_ctx *pipe_ctx)
+{
+ struct tg_color color = {0};
+
+ /* DCE6 skips DCN TOFPGA check for transform_set_pixel_storage_depth == NULL */
+
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
+ dce60_get_surface_visual_confirm_color(pipe_ctx, &color);
+ else
+ color_space_to_black_color(dc,
+ pipe_ctx->stream->output_color_space,
+ &color);
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
+ pipe_ctx->plane_res.xfm,
+ pipe_ctx->plane_res.scl_data.lb_params.depth,
+ &pipe_ctx->stream->bit_depth_params);
+
+ if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) {
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ color.color_r_cr = color.color_g_y;
+
+ pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
+ pipe_ctx->stream_res.tg,
+ &color);
+ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
+ &pipe_ctx->plane_res.scl_data);
+}
+
+static void
+dce60_program_front_end_for_pipe(
+ struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct xfm_grph_csc_adjustment adjust;
+ struct out_csc_color_matrix tbl_entry;
+ unsigned int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ DC_LOGGER_INIT();
+ memset(&tbl_entry, 0, sizeof(tbl_entry));
+
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+ dce_enable_fe_clock(dc->hwseq, mi->inst, true);
+
+ dce60_set_default_colors(pipe_ctx);
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ tbl_entry.color_space =
+ pipe_ctx->stream->output_color_space;
+
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] =
+ pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment
+ (pipe_ctx->plane_res.xfm, &tbl_entry);
+ }
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+
+ for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+ adjust.temperature_matrix[i] =
+ pipe_ctx->stream->gamut_remap_matrix.matrix[i];
+ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+
+ dce60_program_scaler(dc, pipe_ctx);
+
+ mi->funcs->mem_input_program_surface_config(
+ mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &plane_state->plane_size,
+ plane_state->rotation,
+ NULL,
+ false);
+ if (mi->funcs->set_blank)
+ mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible);
+
+ if (dc->config.gpu_vm_support)
+ mi->funcs->mem_input_program_pte_vm(
+ pipe_ctx->plane_res.mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ plane_state->rotation);
+
+ /* Moved programming gamma from dc to hwss */
+ if (pipe_ctx->plane_state->update_flags.bits.full_update ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
+
+ if (pipe_ctx->plane_state->update_flags.bits.full_update)
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
+
+ DC_LOG_SURFACE(
+ "Pipe:%d %p: addr hi:0x%x, "
+ "addr low:0x%x, "
+ "src: %d, %d, %d,"
+ " %d; dst: %d, %d, %d, %d;"
+ "clip: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ (void *) pipe_ctx->plane_state,
+ pipe_ctx->plane_state->address.grph.addr.high_part,
+ pipe_ctx->plane_state->address.grph.addr.low_part,
+ pipe_ctx->plane_state->src_rect.x,
+ pipe_ctx->plane_state->src_rect.y,
+ pipe_ctx->plane_state->src_rect.width,
+ pipe_ctx->plane_state->src_rect.height,
+ pipe_ctx->plane_state->dst_rect.x,
+ pipe_ctx->plane_state->dst_rect.y,
+ pipe_ctx->plane_state->dst_rect.width,
+ pipe_ctx->plane_state->dst_rect.height,
+ pipe_ctx->plane_state->clip_rect.x,
+ pipe_ctx->plane_state->clip_rect.y,
+ pipe_ctx->plane_state->clip_rect.width,
+ pipe_ctx->plane_state->clip_rect.height);
+
+ DC_LOG_SURFACE(
+ "Pipe %d: width, height, x, y\n"
+ "viewport:%d, %d, %d, %d\n"
+ "recout: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->plane_res.scl_data.viewport.width,
+ pipe_ctx->plane_res.scl_data.viewport.height,
+ pipe_ctx->plane_res.scl_data.viewport.x,
+ pipe_ctx->plane_res.scl_data.viewport.y,
+ pipe_ctx->plane_res.scl_data.recout.width,
+ pipe_ctx->plane_res.scl_data.recout.height,
+ pipe_ctx->plane_res.scl_data.recout.x,
+ pipe_ctx->plane_res.scl_data.recout.y);
+}
+
+static void dce60_apply_ctx_for_surface(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context)
+{
+ int i;
+
+ if (num_planes == 0)
+ return;
+
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ /* Need to allocate mem before program front end for Fiji */
+ pipe_ctx->plane_res.mi->funcs->allocate_mem_input(
+ pipe_ctx->plane_res.mi,
+ pipe_ctx->stream->timing.h_total,
+ pipe_ctx->stream->timing.v_total,
+ pipe_ctx->stream->timing.pix_clk_100hz / 10,
+ context->stream_count);
+
+ dce60_program_front_end_for_pipe(dc, pipe_ctx);
+
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+
+ dce60_program_surface_visibility(dc, pipe_ctx);
+
+ }
+
+ if (dc->fbc_compressor)
+ dce60_enable_fbc(dc, context);
+}
+
+void dce60_hw_sequencer_construct(struct dc *dc)
+{
+ dce110_hw_sequencer_construct(dc);
+
+ dc->hwseq->funcs.enable_display_power_gating = dce100_enable_display_power_gating;
+ dc->hwss.apply_ctx_for_surface = dce60_apply_ctx_for_surface;
+ dc->hwss.cursor_lock = dce60_pipe_control_lock;
+ dc->hwss.pipe_control_lock = dce60_pipe_control_lock;
+ dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
+ dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h
new file mode 100644
index 000000000000..f3b2d8b60d5b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_hw_sequencer.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE60_H__
+#define __DC_HWSS_DCE60_H__
+
+#include "core_types.h"
+#include "hw_sequencer_private.h"
+
+struct dc;
+
+void dce60_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCE60_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
new file mode 100644
index 000000000000..5a5a9cb77acb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
@@ -0,0 +1,1527 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "irq/dce60/irq_service_dce60.h"
+#include "dce110/dce110_timing_generator.h"
+#include "dce110/dce110_resource.h"
+#include "dce60/dce60_timing_generator.h"
+#include "dce/dce_mem_input.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_transform.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "dce60/dce60_hw_sequencer.h"
+#include "dce100/dce100_resource.h"
+#include "dce/dce_panel_cntl.h"
+
+#include "reg_helper.h"
+
+#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_i2c.h"
+/* TODO remove this include */
+
+#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+#include "gmc/gmc_6_0_d.h"
+#include "gmc/gmc_6_0_sh_mask.h"
+#endif
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+#define mmDP_DPHY_INTERNAL_CTRL 0x1CDE
+#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE
+#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x1FDE
+#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x42DE
+#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x45DE
+#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x48DE
+#define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4BDE
+#endif
+
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_3 0x05CC
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x1CCE
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE
+#endif
+
+
+#ifndef mmHPD_DC_HPD_CONTROL
+ #define mmHPD_DC_HPD_CONTROL 0x189A
+ #define mmHPD0_DC_HPD_CONTROL 0x189A
+ #define mmHPD1_DC_HPD_CONTROL 0x18A2
+ #define mmHPD2_DC_HPD_CONTROL 0x18AA
+ #define mmHPD3_DC_HPD_CONTROL 0x18B2
+ #define mmHPD4_DC_HPD_CONTROL 0x18BA
+ #define mmHPD5_DC_HPD_CONTROL 0x18C2
+#endif
+
+#define DCE11_DIG_FE_CNTL 0x4a00
+#define DCE11_DIG_BE_CNTL 0x4a47
+#define DCE11_DP_SEC 0x4ac3
+
+static const struct dce110_timing_generator_offsets dce60_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL3
+ - mmDPG_PIPE_ARBITRATION_CONTROL3),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL3
+ - mmDPG_PIPE_ARBITRATION_CONTROL3),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL3
+ - mmDPG_PIPE_ARBITRATION_CONTROL3),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL3
+ - mmDPG_PIPE_ARBITRATION_CONTROL3),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL3
+ - mmDPG_PIPE_ARBITRATION_CONTROL3),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL3
+ - mmDPG_PIPE_ARBITRATION_CONTROL3),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_COMMON_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE60_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE60_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE60(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE60(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE60(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE60_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5)
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST_DCE_BASE(id),\
+ .AFMT_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
+};
+
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_60_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_60(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_60(_MASK)
+};
+
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE10_AUX_MASK_SH_LIST(_MASK)
+};
+
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_DCE60_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_audio_mask audio_mask = {
+ AUD_DCE60_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(id)\
+[id] = {\
+ CS_COMMON_REG_LIST_DCE_80(id),\
+}
+
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0),
+ clk_src_regs(1),
+ clk_src_regs(2)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 2,
+ .num_ddc = 6,
+};
+
+static const struct resource_caps res_cap_61 = {
+ .num_timing_generator = 4,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 3,
+ .num_ddc = 6,
+};
+
+static const struct resource_caps res_cap_64 = {
+ .num_timing_generator = 2,
+ .num_audio = 2,
+ .num_stream_encoder = 2,
+ .num_pll = 2,
+ .num_ddc = 2,
+};
+
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE60_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE60(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE60(_MASK)
+};
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x1918
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce60_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct timing_generator *dce60_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce60_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct output_pixel_processor *dce60_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce60_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct dce_aux *dce60_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
+
+ return &aux_engine->base;
+}
+#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
+
+static const struct dce_i2c_registers i2c_hw_regs[] = {
+ i2c_inst_regs(1),
+ i2c_inst_regs(2),
+ i2c_inst_regs(3),
+ i2c_inst_regs(4),
+ i2c_inst_regs(5),
+ i2c_inst_regs(6),
+};
+
+static const struct dce_i2c_shift i2c_shifts = {
+ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_i2c_mask i2c_masks = {
+ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+struct dce_i2c_hw *dce60_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_i2c_hw *dce_i2c_hw =
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+
+ if (!dce_i2c_hw)
+ return NULL;
+
+ dce_i2c_hw_construct(dce_i2c_hw, ctx, inst,
+ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
+ return dce_i2c_hw;
+}
+
+struct dce_i2c_sw *dce60_i2c_sw_create(
+ struct dc_context *ctx)
+{
+ struct dce_i2c_sw *dce_i2c_sw =
+ kzalloc(sizeof(struct dce_i2c_sw), GFP_KERNEL);
+
+ if (!dce_i2c_sw)
+ return NULL;
+
+ dce_i2c_sw_construct(dce_i2c_sw, ctx);
+
+ return dce_i2c_sw;
+}
+static struct stream_encoder *dce60_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE6_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE6_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE6_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce60_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce60_stream_encoder_create,
+ .create_hwseq = dce60_hwseq_create,
+};
+
+#define mi_inst_regs(id) { \
+ MI_DCE6_REG_LIST(id), \
+ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
+}
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE6_MASK_SH_LIST(__SHIFT),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE6_MASK_SH_LIST(_MASK),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
+};
+
+static struct mem_input *dce60_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce60_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ dce_mi->wa.single_head_rdreq_dmif_limit = 2;
+ return &dce_mi->base;
+}
+
+static void dce60_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce60_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce60_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ transform->prescaler_on = false;
+ return &transform->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 297000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true
+};
+
+struct link_encoder *dce60_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
+
+ if (!enc110)
+ return NULL;
+
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
+ dce60_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[link_regs_id],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+static struct panel_cntl *dce60_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
+struct clock_source *dce60_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ kfree(clk_src);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce60_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static struct input_pixel_processor *dce60_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce60_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static void dce60_resource_destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce60_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ }
+ if (pool->base.sw_i2cs[i] != NULL) {
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dce60_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+ }
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.dp_clock_source != NULL)
+ dce60_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL) {
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+}
+
+bool dce60_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ int i;
+ bool at_least_one_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ at_least_one_pipe = true;
+ }
+
+ if (at_least_one_pipe) {
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw_ctx.bw.dce.dispclk_khz = 681000;
+ context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ } else {
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+ context->bw_ctx.bw.dce.yclk_khz = 0;
+ }
+
+ return true;
+}
+
+static bool dce60_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 1)
+ return false;
+
+ if (context->stream_status[i].plane_states[0]->format
+ >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ }
+
+ return true;
+}
+
+enum dc_status dce60_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce60_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+static void dce60_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ dce60_resource_destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+static const struct resource_funcs dce60_res_pool_funcs = {
+ .destroy = dce60_destroy_resource_pool,
+ .link_enc_create = dce60_link_encoder_create,
+ .panel_cntl_create = dce60_panel_cntl_create,
+ .validate_bandwidth = dce60_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce100_add_stream_to_ctx,
+ .validate_global = dce60_validate_global,
+ .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
+};
+
+static bool dce60_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_bios *bp;
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dce60_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap.num_timing_generator;
+ pool->base.timing_generator_count = res_cap.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 64;
+ dc->caps.dual_link_dvi = true;
+ dc->caps.extended_aux_timeout_support = false;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clk_src_count = 2;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clk_src_count = 1;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce60_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce60_timing_generator_create(
+ ctx, i, &dce60_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce60_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce60_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce60_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce60_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dce60_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
+ pool->base.hw_i2cs[i] = dce60_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create i2c engine!!\n");
+ goto res_create_fail;
+ }
+ pool->base.sw_i2cs[i] = dce60_i2c_sw_create(ctx);
+ if (pool->base.sw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create sw i2c!!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->caps.disable_dp_clk_share = true;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce60_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ dce60_resource_destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce60_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce60_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static bool dce61_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_bios *bp;
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_61;
+ pool->base.funcs = &dce60_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap_61.num_timing_generator;
+ pool->base.timing_generator_count = res_cap_61.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 64;
+ dc->caps.is_apu = true;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[2] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 3;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce60_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce60_timing_generator_create(
+ ctx, i, &dce60_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce60_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce60_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce60_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce60_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dce60_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
+ pool->base.hw_i2cs[i] = dce60_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create i2c engine!!\n");
+ goto res_create_fail;
+ }
+ pool->base.sw_i2cs[i] = dce60_i2c_sw_create(ctx);
+ if (pool->base.sw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create sw i2c!!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->caps.disable_dp_clk_share = true;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce60_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ dce60_resource_destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce61_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce61_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static bool dce64_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_bios *bp;
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_64;
+ pool->base.funcs = &dce60_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap_64.num_timing_generator;
+ pool->base.timing_generator_count = res_cap_64.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 64;
+ dc->caps.is_apu = true;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ pool->base.clk_src_count = 2;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ pool->base.clk_src_count = 1;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce60_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce60_timing_generator_create(
+ ctx, i, &dce60_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce60_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce60_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce60_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce60_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dce60_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
+ pool->base.hw_i2cs[i] = dce60_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create i2c engine!!\n");
+ goto res_create_fail;
+ }
+ pool->base.sw_i2cs[i] = dce60_i2c_sw_create(ctx);
+ if (pool->base.sw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create sw i2c!!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->caps.disable_dp_clk_share = true;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce60_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ dce60_resource_destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce64_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce64_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h
new file mode 100644
index 000000000000..5d653a76b0b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE60_H__
+#define __DC_RESOURCE_DCE60_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+struct resource_pool *dce60_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+struct resource_pool *dce61_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+struct resource_pool *dce64_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+#endif /* __DC_RESOURCE_DCE60_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
new file mode 100644
index 000000000000..fc1af0ff0ca4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE6 register header files */
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
+#include "dc_types.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "../dce110/dce110_timing_generator.h"
+#include "dce60_timing_generator.h"
+
+#include "timing_generator.h"
+
+enum black_color_format {
+ BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0, /* used as index in array */
+ BLACK_COLOR_FORMAT_RGB_LIMITED,
+ BLACK_COLOR_FORMAT_YUV_TV,
+ BLACK_COLOR_FORMAT_YUV_CV,
+ BLACK_COLOR_FORMAT_YUV_SUPER_AA,
+
+ BLACK_COLOR_FORMAT_COUNT
+};
+
+static const struct dce110_timing_generator_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+}
+};
+
+#define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10
+
+#define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1)
+#define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1)
+
+#define CRTC_REG(reg) (reg + tg110->offsets.crtc)
+#define DCP_REG(reg) (reg + tg110->offsets.dcp)
+#define DMIF_REG(reg) (reg + tg110->offsets.dmif)
+
+static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz)
+{
+ uint64_t pix_dur;
+ uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1
+ + DCE110TG_FROM_TG(tg)->offsets.dmif;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (pix_clk_100hz == 0)
+ return;
+
+ pix_dur = div_u64(10000000000ull, pix_clk_100hz);
+
+ set_reg_field_value(
+ value,
+ pix_dur,
+ DPG_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ int vready_offset,
+ int vstartup_start,
+ int vupdate_offset,
+ int vupdate_width,
+ const enum signal_type signal,
+ bool use_vbios)
+{
+ if (!use_vbios)
+ program_pix_dur(tg, timing->pix_clk_100hz);
+
+ dce110_tg_program_timing(tg, timing, 0, 0, 0, 0, 0, use_vbios);
+}
+
+static void dce60_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+ /* DCE6 has CRTC_PREFETCH_EN bit in CRTC_CONTROL register */
+ uint32_t addr2 = CRTC_REG(mmCRTC_CONTROL);
+ uint32_t value2 = dm_read_reg(tg->ctx, addr2);
+
+ /* DCE6 does not support CRTC_LEGACY_REQUESTOR_EN bit
+ so here is not possible to set bit based on enable argument */
+
+ if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
+ set_reg_field_value(
+ value,
+ 3,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value2,
+ 0,
+ CRTC_CONTROL,
+ CRTC_PREFETCH_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 4,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value2,
+ 1,
+ CRTC_CONTROL,
+ CRTC_PREFETCH_EN);
+ }
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PROGRESSIVE_START_LINE_EARLY);
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_INTERLACE_START_LINE_EARLY);
+
+ dm_write_reg(tg->ctx, addr, value);
+ dm_write_reg(tg->ctx, addr2, value2);
+}
+
+static bool dce60_is_tg_enabled(struct timing_generator *tg)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+ uint32_t field = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ addr = CRTC_REG(mmCRTC_CONTROL);
+ value = dm_read_reg(tg->ctx, addr);
+ field = get_reg_field_value(value, CRTC_CONTROL,
+ CRTC_CURRENT_MASTER_EN_STATE);
+ return field == 1;
+}
+
+bool dce60_configure_crc(struct timing_generator *tg,
+ const struct crc_params *params)
+{
+ /* Cannot configure crc on a CRTC that is disabled */
+ if (!dce60_is_tg_enabled(tg))
+ return false;
+
+ /* DCE6 has no CRTC_CRC_CNTL register, nothing to do */
+
+ return true;
+}
+
+static const struct timing_generator_funcs dce60_tg_funcs = {
+ .validate_timing = dce110_tg_validate_timing,
+ .program_timing = program_timing,
+ .enable_crtc = dce110_timing_generator_enable_crtc,
+ .disable_crtc = dce110_timing_generator_disable_crtc,
+ .is_counter_moving = dce110_timing_generator_is_counter_moving,
+ .get_position = dce110_timing_generator_get_position,
+ .get_frame_count = dce110_timing_generator_get_vblank_counter,
+ .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos,
+ .set_early_control = dce110_timing_generator_set_early_control,
+ .wait_for_state = dce110_tg_wait_for_state,
+ .set_blank = dce110_tg_set_blank,
+ .is_blanked = dce110_tg_is_blanked,
+ .set_colors = dce110_tg_set_colors,
+ .set_overscan_blank_color =
+ dce110_timing_generator_set_overscan_color_black,
+ .set_blank_color = dce110_timing_generator_program_blank_color,
+ .disable_vga = dce110_timing_generator_disable_vga,
+ .did_triggered_reset_occur =
+ dce110_timing_generator_did_triggered_reset_occur,
+ .setup_global_swap_lock =
+ dce110_timing_generator_setup_global_swap_lock,
+ .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger,
+ .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger,
+ .tear_down_global_swap_lock =
+ dce110_timing_generator_tear_down_global_swap_lock,
+ .set_drr = dce110_timing_generator_set_drr,
+ .set_static_screen_control =
+ dce110_timing_generator_set_static_screen_control,
+ .set_test_pattern = dce110_timing_generator_set_test_pattern,
+ .arm_vert_intr = dce110_arm_vert_intr,
+
+ /* DCE6.0 overrides */
+ .enable_advanced_request =
+ dce60_timing_generator_enable_advanced_request,
+ .configure_crc = dce60_configure_crc,
+ .get_crc = dce110_get_crc,
+};
+
+void dce60_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ tg110->controller_id = CONTROLLER_ID_D0 + instance;
+ tg110->base.inst = instance;
+ tg110->offsets = *offsets;
+ tg110->derived_offsets = reg_offsets[instance];
+
+ tg110->base.funcs = &dce60_tg_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ tg110->min_h_blank = 56;
+ tg110->min_h_front_porch = 4;
+ tg110->min_h_back_porch = 4;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.h
new file mode 100644
index 000000000000..81d831233cc5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCE60_H__
+#define __DC_TIMING_GENERATOR_DCE60_H__
+
+#include "timing_generator.h"
+#include "../include/grph_object_id.h"
+
+/* DCE6.0 implementation inherits from DCE11.0 */
+void dce60_timing_generator_construct(
+ struct dce110_timing_generator *tg,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets);
+
+#endif /* __DC_TIMING_GENERATOR_DCE60_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 62ad1a11bff9..733e6e6e43bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -31,4 +31,11 @@ DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
+# fix:
+# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
+# aarch64 does not support soft-float, so use hard-float and handle this in code
+ifdef CONFIG_ARM64
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn10/dcn10_resource.o := -mgeneral-regs-only
+endif
+
AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 47a39eb9400b..7a00fe525dfb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -325,8 +325,6 @@ bool cm_helper_translate_curve_to_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE_CTX(output_tf->ctx);
-
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
@@ -524,8 +522,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE_CTX(output_tf->ctx);
-
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index cedf359a00f5..db5615a51fea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -734,6 +734,9 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
struct dc_plane_address earliest_inuse_address;
+ if (hubp && hubp->power_gated)
+ return false;
+
REG_GET(DCSURF_FLIP_CONTROL,
SURFACE_FLIP_PENDING, &flip_pending);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index fa643ec5a876..d0f3bf953d02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2377,14 +2377,6 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
&blnd_cfg.black_color);
}
- /*
- * The way 420 is packed, 2 channels carry Y component, 1 channel
- * alternate between Cb and Cr, so both channels need the pixel
- * value for Y
- */
- if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
- blnd_cfg.black_color.color_r_cr = blnd_cfg.black_color.color_g_y;
-
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
@@ -2769,6 +2761,154 @@ static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
return NULL;
}
+bool dcn10_disconnect_pipes(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ bool found_pipe = false;
+ int i, j;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dc_state *old_ctx = dc->current_state;
+ bool mpcc_disconnected = false;
+ struct pipe_ctx *old_pipe;
+ struct pipe_ctx *new_pipe;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ new_pipe = &context->res_ctx.pipe_ctx[i];
+ new_pipe->update_flags.raw = 0;
+
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
+ continue;
+
+ if (old_pipe->plane_state && !new_pipe->plane_state)
+ new_pipe->update_flags.bits.disable = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ }
+
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ /* Disconnect mpcc here only if losing pipe split*/
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable &&
+ old_ctx->res_ctx.pipe_ctx[i].top_pipe) {
+
+ /* Find the top pipe in the new ctx for the bottom pipe that we
+ * want to remove by comparing the streams and planes. If both
+ * pipes are being disabled then do it in the regular pipe
+ * programming sequence
+ */
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ if (old_ctx->res_ctx.pipe_ctx[i].top_pipe->stream == context->res_ctx.pipe_ctx[j].stream &&
+ old_ctx->res_ctx.pipe_ctx[i].top_pipe->plane_state == context->res_ctx.pipe_ctx[j].plane_state &&
+ !context->res_ctx.pipe_ctx[j].top_pipe &&
+ !context->res_ctx.pipe_ctx[j].update_flags.bits.disable) {
+ found_pipe = true;
+ break;
+ }
+ }
+
+ // Disconnect if the top pipe lost it's pipe split
+ if (found_pipe && !context->res_ctx.pipe_ctx[j].bottom_pipe) {
+ hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
+ mpcc_disconnected = true;
+ }
+ }
+ found_pipe = false;
+ }
+ }
+
+ if (mpcc_disconnected) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+
+ if (!pipe_ctx || !plane_state || !pipe_ctx->stream)
+ continue;
+
+ // Only update scaler and viewport here if we lose a pipe split.
+ // This is to prevent half the screen from being black when we
+ // unlock after disconnecting MPCC.
+ if (!(old_pipe && !pipe_ctx->top_pipe &&
+ !pipe_ctx->bottom_pipe && old_pipe->bottom_pipe))
+ continue;
+
+ if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) {
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
+
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+
+ hubp->funcs->mem_program_viewport(
+ hubp,
+ &pipe_ctx->plane_res.scl_data.viewport,
+ &pipe_ctx->plane_res.scl_data.viewport_c);
+ }
+ }
+ }
+ }
+ return mpcc_disconnected;
+}
+
+void dcn10_wait_for_pending_cleared(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *pipe_ctx;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ tg = pipe_ctx->stream_res.tg;
+
+ /*
+ * Only wait for top pipe's tg penindg bit
+ * Also skip if pipe is disabled.
+ */
+ if (pipe_ctx->top_pipe ||
+ !pipe_ctx->stream || !pipe_ctx->plane_state ||
+ !tg->funcs->is_tg_enabled(tg))
+ continue;
+
+ /*
+ * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
+ * For some reason waiting for OTG_UPDATE_PENDING cleared
+ * seems to not trigger the update right away, and if we
+ * lock again before VUPDATE then we don't get a separated
+ * operation.
+ */
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ }
+}
+
void dcn10_apply_ctx_for_surface(
struct dc *dc,
const struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 6d891166da8a..e5691e499023 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -194,6 +194,12 @@ void dcn10_get_surface_visual_confirm_color(
void dcn10_get_hdr_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+bool dcn10_disconnect_pipes(
+ struct dc *dc,
+ struct dc_state *context);
+
+void dcn10_wait_for_pending_cleared(struct dc *dc,
+ struct dc_state *context);
void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
void dcn10_verify_allow_pstate_change_high(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 5c98b71c1d47..b24c8ae8b1ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -34,6 +34,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
.post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
+ .disconnect_pipes = dcn10_disconnect_pipes,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.update_plane_addr = dcn10_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -64,6 +66,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 2972392f9788..800be2693fac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -288,6 +288,17 @@ void optc1_program_timing(
if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2)
h_div = H_TIMING_DIV_BY2;
+ if (REG(OPTC_DATA_FORMAT_CONTROL)) {
+ uint32_t data_fmt = 0;
+
+ if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ data_fmt = 1;
+ else if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ data_fmt = 2;
+
+ REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
+ }
+
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
if (optc1->tg_mask->OTG_H_TIMING_DIV_MODE != 0) {
if (optc1->opp_count == 4)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 8939541ad7af..a78712caf124 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -798,7 +798,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
- .dp_ycbcr420_supported = false,
+ .dp_ycbcr420_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -1339,6 +1339,47 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
return value;
}
+/*
+ * Some architectures don't support soft-float (e.g. aarch64), on those
+ * this function has to be called with hardfloat enabled, make sure not
+ * to inline it so whatever fp stuff is done stays inside
+ */
+static noinline void dcn10_resource_construct_fp(
+ struct dc *dc)
+{
+ if (dc->ctx->dce_version == DCN_VERSION_1_01) {
+ struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc;
+ struct dcn_ip_params *dcn_ip = dc->dcn_ip;
+ struct display_mode_lib *dml = &dc->dml;
+
+ dml->ip.max_num_dpp = 3;
+ /* TODO how to handle 23.84? */
+ dcn_soc->dram_clock_change_latency = 23;
+ dcn_ip->max_num_dpp = 3;
+ }
+ if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
+ dc->dcn_soc->urgent_latency = 3;
+ dc->debug.disable_dmcu = true;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f;
+ }
+
+
+ dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
+ ASSERT(dc->dcn_soc->number_of_channels < 3);
+ if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/
+ dc->dcn_soc->number_of_channels = 2;
+
+ if (dc->dcn_soc->number_of_channels == 1) {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
+ if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f;
+ }
+ }
+}
+
static bool dcn10_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
@@ -1490,37 +1531,15 @@ static bool dcn10_resource_construct(
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
- if (dc->ctx->dce_version == DCN_VERSION_1_01) {
- struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc;
- struct dcn_ip_params *dcn_ip = dc->dcn_ip;
- struct display_mode_lib *dml = &dc->dml;
-
- dml->ip.max_num_dpp = 3;
- /* TODO how to handle 23.84? */
- dcn_soc->dram_clock_change_latency = 23;
- dcn_ip->max_num_dpp = 3;
- }
- if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
- dc->dcn_soc->urgent_latency = 3;
- dc->debug.disable_dmcu = true;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f;
- }
-
-
- dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
- ASSERT(dc->dcn_soc->number_of_channels < 3);
- if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/
- dc->dcn_soc->number_of_channels = 2;
-
- if (dc->dcn_soc->number_of_channels == 1) {
- dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
- dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
- if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
- dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f;
- }
- }
+#if defined(CONFIG_ARM64)
+ /* Aarch64 does not support -msoft-float/-mfloat-abi=soft */
+ DC_FP_START();
+ dcn10_resource_construct_fp(dc);
+ DC_FP_END();
+#else
+ /* Other architectures we build for build this with soft-float */
+ dcn10_resource_construct_fp(dc);
+#endif
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 842abb4c475b..f70fcadf1ee5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -619,7 +619,7 @@ void enc1_stream_encoder_dvi_set_stream_attribute(
enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
}
-void enc1_stream_encoder_set_mst_bandwidth(
+void enc1_stream_encoder_set_throttled_vcp_size(
struct stream_encoder *enc,
struct fixed31_32 avg_time_slots_per_mtp)
{
@@ -896,10 +896,10 @@ void enc1_stream_encoder_dp_blank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
/* Larger delay to wait until VBLANK - use max retry of
- * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
+ * 10us*10200=102ms. This covers 100.0ms of minimum 10 Hz mode +
* a little more because we may not trust delay accuracy.
*/
- max_retries = DP_BLANK_MAX_RETRY * 250;
+ max_retries = DP_BLANK_MAX_RETRY * 501;
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
@@ -1616,8 +1616,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
enc1_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
enc1_stream_encoder_dvi_set_stream_attribute,
- .set_mst_bandwidth =
- enc1_stream_encoder_set_mst_bandwidth,
+ .set_throttled_vcp_size =
+ enc1_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
enc1_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 30eae7459d50..b99d2527cf03 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -588,7 +588,7 @@ void enc1_stream_encoder_dvi_set_stream_attribute(
struct dc_crtc_timing *crtc_timing,
bool is_dual_link);
-void enc1_stream_encoder_set_mst_bandwidth(
+void enc1_stream_encoder_set_throttled_vcp_size(
struct stream_encoder *enc,
struct fixed31_32 avg_time_slots_per_mtp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 5fcaf78334ff..624cb1341ef1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -17,6 +17,10 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
endif
+ifdef CONFIG_ARM64
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mgeneral-regs-only
+endif
+
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index 667640c4b288..1118e33aaa2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -94,6 +94,7 @@
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
+ DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, ICH_RESET_AT_END_OF_LINE, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index bb920d0e0b89..368818d2dfc6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -908,6 +908,9 @@ bool hubp2_is_flip_pending(struct hubp *hubp)
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
struct dc_plane_address earliest_inuse_address;
+ if (hubp && hubp->power_gated)
+ return false;
+
REG_GET(DCSURF_FLIP_CONTROL,
SURFACE_FLIP_PENDING, &flip_pending);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index c8cfd3ba1c15..01530e686f43 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1251,6 +1251,11 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
return;
}
+ /* Detect plane change */
+ if (old_pipe->plane_state != new_pipe->plane_state) {
+ new_pipe->update_flags.bits.plane_changed = true;
+ }
+
/* Detect top pipe only changes */
if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
/* Detect odm changes */
@@ -1392,6 +1397,7 @@ static void dcn20_update_dchubp_dpp(
&pipe_ctx->ttu_regs);
if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
plane_state->update_flags.bits.bpp_change ||
plane_state->update_flags.bits.input_csc_change ||
plane_state->update_flags.bits.color_space_change ||
@@ -1414,6 +1420,7 @@ static void dcn20_update_dchubp_dpp(
}
if (pipe_ctx->update_flags.bits.mpcc
+ || pipe_ctx->update_flags.bits.plane_changed
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
// MPCC inst is equal to pipe index in practice
@@ -1515,6 +1522,7 @@ static void dcn20_update_dchubp_dpp(
}
if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
pipe_ctx->update_flags.bits.opp_changed ||
plane_state->update_flags.bits.pixel_format_change ||
plane_state->update_flags.bits.horizontal_mirror_change ||
@@ -1539,7 +1547,9 @@ static void dcn20_update_dchubp_dpp(
hubp->power_gated = false;
}
- if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.addr_update)
hws->funcs.update_plane_addr(dc, pipe_ctx);
@@ -1632,16 +1642,26 @@ void dcn20_program_front_end_for_ctx(
struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ /* Carry over GSL groups in case the context is changing. */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == old_pipe_ctx->stream)
+ pipe_ctx->stream_res.gsl_group =
+ old_pipe_ctx->stream_res.gsl_group;
+ }
+
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
- if (dc->hwss.program_triplebuffer != NULL &&
- !dc->debug.disable_tri_buf) {
+ if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
}
}
}
@@ -1909,9 +1929,9 @@ void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.dsc) {
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
- dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
+ hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
while (odm_pipe) {
- dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true);
+ hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true);
odm_pipe = odm_pipe->next_odm_pipe;
}
}
@@ -1924,9 +1944,9 @@ void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.dsc) {
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
- dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
+ hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
while (odm_pipe) {
- dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false);
+ hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false);
odm_pipe = odm_pipe->next_odm_pipe;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 3dde6f26de47..072193c5ffe6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -34,6 +34,8 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .disconnect_pipes = dcn10_disconnect_pipes,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
@@ -66,6 +68,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index dcbf28dd72d4..864acd695cbb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -231,8 +231,6 @@
SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
SRI(DPCSTX_TX_CLOCK_CNTL, DPCSTX, id), \
SRI(DPCSTX_TX_CNTL, DPCSTX, id), \
- SRI(DPCSTX_DEBUG_CONFIG, DPCSTX, id), \
- SRI(RDPCSTX_DEBUG_CONFIG, RDPCSTX, id), \
SR(RDPCSTX0_RDPCSTX_SCRATCH)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 8c16967fe018..d8b18c515d06 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -239,7 +239,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
/ opp_cnt;
uint32_t memory_mask;
- uint32_t data_fmt = 0;
ASSERT(opp_cnt == 2);
@@ -262,13 +261,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
REG_SET(OPTC_MEMORY_CONFIG, 0,
OPTC_MEM_SEL, memory_mask);
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- data_fmt = 1;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- data_fmt = 2;
-
- REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
-
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 1,
OPTC_SEG0_SRC_SEL, opp_id[0],
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index f31f48dd0da2..d50a9c370637 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -150,7 +150,6 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.dispclk_delay_subtotal = 87, //
.dcfclk_cstate_latency = 10, // SRExitTime
.max_inter_dcn_tile_repeaters = 8,
-
.xfc_supported = true,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
@@ -298,8 +297,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 8.6,
- .sr_enter_plus_exit_time_us = 10.9,
+ .sr_exit_time_us = 11.6,
+ .sr_enter_plus_exit_time_us = 13.9,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -1075,7 +1074,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
- .disable_tri_buf = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
};
@@ -1092,6 +1090,7 @@ static const struct dc_debug_options debug_defaults_diags = {
.disable_stutter = true,
.scl_reset_length10 = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
+ .enable_tri_buf = true,
};
void dcn20_dpp_destroy(struct dpp **dpp)
@@ -2203,9 +2202,9 @@ int dcn20_populate_dml_pipes_from_context(
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
- /*fill up the audio sample rate*/
+ /*fill up the audio sample rate (unit in kHz)*/
get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
- pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate;
+ pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
#endif
/*
* For graphic plane, cursor number is 1, nv12 is 0
@@ -3209,6 +3208,9 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
dc->debug.enable_dram_clock_change_one_display_vactive;
+ /*Unsafe due to current pipe merge and split logic*/
+ ASSERT(context != dc->current_state);
+
if (fast_validate) {
return dcn20_validate_bandwidth_internal(dc, context, true);
}
@@ -3320,7 +3322,7 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat
return DC_OK;
}
-static struct resource_funcs dcn20_res_pool_funcs = {
+static const struct resource_funcs dcn20_res_pool_funcs = {
.destroy = dcn20_destroy_resource_pool,
.link_enc_create = dcn20_link_encoder_create,
.panel_cntl_create = dcn20_panel_cntl_create,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index 2c1959845c29..cdd39ee9761d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -95,7 +95,6 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst);
void dcn20_dsc_destroy(struct display_stream_compressor **dsc);
-void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb);
void dcn20_cap_soc_clocks(
struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index e3984f02b7b3..4075ae111530 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -561,8 +561,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc1_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
enc1_stream_encoder_dvi_set_stream_attribute,
- .set_mst_bandwidth =
- enc1_stream_encoder_set_mst_bandwidth,
+ .set_throttled_vcp_size =
+ enc1_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
enc2_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 07684d3e375a..51a2f3d4c194 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -13,6 +13,10 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
endif
+ifdef CONFIG_ARM64
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mgeneral-regs-only
+endif
+
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index b187f71afa65..2b7396c9fcb4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -35,6 +35,8 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .disconnect_pipes = dcn10_disconnect_pipes,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
@@ -67,6 +69,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 88d41a385add..e73785e74cba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -894,6 +894,8 @@ static const struct dc_debug_options debug_defaults_diags = {
.disable_pplib_wm_range = true,
.disable_stutter = true,
.disable_48mhz_pwrdwn = true,
+ .disable_psr = true,
+ .enable_tri_buf = true
};
enum dcn20_clk_src_array_id {
@@ -1184,6 +1186,9 @@ bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
BW_VAL_TRACE_COUNT();
+ /*Unsafe due to current pipe merge and split logic*/
+ ASSERT(context != dc->current_state);
+
out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel);
if (pipe_cnt == 0)
@@ -1754,7 +1759,7 @@ enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_stat
return result;
}
-static struct resource_funcs dcn21_res_pool_funcs = {
+static const struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,
.panel_cntl_create = dcn21_panel_cntl_create,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
index a139a87a1a81..41a1d0e9b7e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
@@ -122,8 +122,6 @@ bool cm3_helper_translate_curve_to_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE_CTX(output_tf->ctx);
-
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
@@ -314,8 +312,6 @@ bool cm3_helper_translate_curve_to_degamma_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE_CTX(output_tf->ctx);
-
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index f5e80a0db72b..6c0f7ef0a3df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -790,8 +790,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
enc3_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
enc3_stream_encoder_dvi_set_stream_attribute,
- .set_mst_bandwidth =
- enc1_stream_encoder_set_mst_bandwidth,
+ .set_throttled_vcp_size =
+ enc1_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
enc3_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index a5d750ed569e..204773ffc376 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -35,7 +35,6 @@
#include "dcn30_dpp.h"
#include "dcn10/dcn10_cm_common.h"
#include "dcn30_cm_common.h"
-#include "clk_mgr.h"
#include "reg_helper.h"
#include "abm.h"
#include "clk_mgr.h"
@@ -220,15 +219,13 @@ static void dcn30_set_writeback(
struct dc_writeback_info *wb_info,
struct dc_state *context)
{
- struct dwbc *dwb;
struct mcif_wb *mcif_wb;
struct mcif_buf_params *mcif_buf_params;
ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES);
ASSERT(wb_info->wb_enabled);
ASSERT(wb_info->mpcc_inst >= 0);
- ASSERT(wb_info->mpcc_inst < 4);
- dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ ASSERT(wb_info->mpcc_inst < dc->res_pool->mpcc_count);
mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
mcif_buf_params = &wb_info->mcif_buf_params;
@@ -692,26 +689,23 @@ void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
- unsigned int surface_size;
-
if (!dc->ctx->dmub_srv)
return false;
if (enable) {
- if (dc->current_state
- && dc->current_state->stream_count == 1 // single display only
- && dc->current_state->stream_status[0].plane_count == 1 // single surface only
- && dc->current_state->stream_status[0].plane_states[0]->address.page_table_base.quad_part == 0 // no VM
- // Only 8 and 16 bit formats
- && dc->current_state->stream_status[0].plane_states[0]->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
- && dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888) {
-
- surface_size = dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_pitch *
- dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_size.height *
- (dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
-
+ if (dc->current_state) {
+ int i;
+
+ /* First, check no-memory-requests case */
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc->current_state->stream_status[i]
+ .plane_count)
+ /* Fail eligibility on a visible stream */
+ break;
+ }
}
+ /* No applicable optimizations */
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 9afee7160490..7c90c2222506 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -35,6 +35,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .disconnect_pipes = dcn10_disconnect_pipes,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
@@ -67,6 +69,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 224c8d145eba..b1f228fc119a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -179,8 +179,7 @@ void optc3_set_dsc_config(struct timing_generator *optc,
}
-
-static void optc3_set_odm_bypass(struct timing_generator *optc,
+void optc3_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -210,7 +209,6 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in
int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
/ opp_cnt;
uint32_t memory_mask = 0;
- uint32_t data_fmt = 0;
/* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
* REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
@@ -241,13 +239,6 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in
REG_SET(OPTC_MEMORY_CONFIG, 0,
OPTC_MEM_SEL, memory_mask);
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- data_fmt = 1;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- data_fmt = 2;
-
- REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
-
if (opp_cnt == 2) {
REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
OPTC_NUM_OF_INPUT_SEGMENT, 1,
@@ -277,7 +268,7 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in
*
* Options: any time, start of frame, dp start of frame (range timing)
*/
-void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable)
+static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t mode = enable ? 2 : 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
index 33f13c1e7520..379616831636 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
@@ -339,4 +339,8 @@ void optc3_set_dsc_config(struct timing_generator *optc,
void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
+void optc3_set_odm_bypass(struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing);
+void optc3_tg_init(struct timing_generator *optc);
+
#endif /* __DC_OPTC_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index ebe0cc5b833b..2455d210ccf6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -79,6 +79,7 @@
#include "reg_helper.h"
#include "dce/dmub_abm.h"
+#include "dce/dmub_psr.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
@@ -340,7 +341,7 @@ static const struct dce110_clk_src_mask cs_mask = {
#define abm_regs(id)\
[id] = {\
- ABM_DCN301_REG_LIST(id)\
+ ABM_DCN30_REG_LIST(id)\
}
static const struct dce_abm_registers abm_regs[] = {
@@ -491,6 +492,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
[id] = {\
LE_DCN3_REG_LIST(id), \
UNIPHY_DCN2_REG_LIST(phyid), \
+ DPCS_DCN2_REG_LIST(id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
}
@@ -831,7 +833,7 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_dmcu = true,
+ .disable_dmcu = true, //No DMCU on DCN30
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
@@ -848,10 +850,11 @@ static const struct dc_debug_options debug_defaults_drv = {
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
+ .disable_psr = false,
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = true, //No dmcu on DCN30
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -864,6 +867,8 @@ static const struct dc_debug_options debug_defaults_diags = {
.scl_reset_length10 = true,
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
+ .disable_psr = true,
+ .enable_tri_buf = true,
};
void dcn30_dpp_destroy(struct dpp **dpp)
@@ -872,7 +877,7 @@ void dcn30_dpp_destroy(struct dpp **dpp)
*dpp = NULL;
}
-struct dpp *dcn30_dpp_create(
+static struct dpp *dcn30_dpp_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -890,7 +895,8 @@ struct dpp *dcn30_dpp_create(
kfree(dpp);
return NULL;
}
-struct output_pixel_processor *dcn30_opp_create(
+
+static struct output_pixel_processor *dcn30_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
@@ -906,7 +912,7 @@ struct output_pixel_processor *dcn30_opp_create(
return &opp->base;
}
-struct dce_aux *dcn30_aux_engine_create(
+static struct dce_aux *dcn30_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -925,6 +931,7 @@ struct dce_aux *dcn30_aux_engine_create(
return &aux_engine->base;
}
+
#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
static const struct dce_i2c_registers i2c_hw_regs[] = {
@@ -944,7 +951,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
};
-struct dce_i2c_hw *dcn30_i2c_hw_create(
+static struct dce_i2c_hw *dcn30_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -959,6 +966,7 @@ struct dce_i2c_hw *dcn30_i2c_hw_create(
return dce_i2c_hw;
}
+
static struct mpc *dcn30_mpc_create(
struct dc_context *ctx,
int num_mpcc,
@@ -1009,7 +1017,7 @@ struct hubbub *dcn30_hubbub_create(struct dc_context *ctx)
return &hubbub3->base;
}
-struct timing_generator *dcn30_timing_generator_create(
+static struct timing_generator *dcn30_timing_generator_create(
struct dc_context *ctx,
uint32_t instance)
{
@@ -1043,7 +1051,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS4_CAPABLE = true
};
-struct link_encoder *dcn30_link_encoder_create(
+static struct link_encoder *dcn30_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
@@ -1064,7 +1072,7 @@ struct link_encoder *dcn30_link_encoder_create(
return &enc20->enc10.base;
}
-struct panel_cntl *dcn30_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+static struct panel_cntl *dcn30_panel_cntl_create(const struct panel_cntl_init_data *init_data)
{
struct dce_panel_cntl *panel_cntl =
kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
@@ -1308,11 +1316,14 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)
dce_abm_destroy(&pool->base.multiple_abms[i]);
}
+ if (pool->base.psr != NULL)
+ dmub_psr_destroy(&pool->base.psr);
+
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
}
-struct hubp *dcn30_hubp_create(
+static struct hubp *dcn30_hubp_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -1331,7 +1342,7 @@ struct hubp *dcn30_hubp_create(
return NULL;
}
-bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
+static bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
@@ -1356,7 +1367,7 @@ bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
return true;
}
-bool dcn30_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+static bool dcn30_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
{
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
@@ -1817,6 +1828,22 @@ static bool init_soc_bounding_box(struct dc *dc,
loaded_ip->max_num_dpp = pool->base.pipe_count;
loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
dcn20_patch_bounding_box(dc, loaded_bb);
+
+ if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+ struct bp_soc_bb_info bb_info = {0};
+
+ if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
+ if (bb_info.dram_clock_change_latency_100ns > 0)
+ dcn3_0_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
+
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_0_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
+
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_0_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+ }
+ }
+
return true;
}
@@ -1872,6 +1899,48 @@ static bool dcn30_split_stream_for_mpc_or_odm(
return true;
}
+static struct pipe_ctx *dcn30_find_split_pipe(
+ struct dc *dc,
+ struct dc_state *context,
+ int old_index)
+{
+ struct pipe_ctx *pipe = NULL;
+ int i;
+
+ if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[old_index];
+ pipe->pipe_idx = old_index;
+ }
+
+ if (!pipe)
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
+ && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
+ if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ pipe->pipe_idx = i;
+ break;
+ }
+ }
+ }
+
+ /*
+ * May need to fix pipes getting tossed from 1 opp to another on flip
+ * Add for debugging transient underflow during topology updates:
+ * ASSERT(pipe);
+ */
+ if (!pipe)
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ pipe->pipe_idx = i;
+ break;
+ }
+ }
+
+ return pipe;
+}
+
static bool dcn30_internal_validate_bw(
struct dc *dc,
struct dc_state *context,
@@ -1997,6 +2066,7 @@ static bool dcn30_internal_validate_bw(
dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ repopulate_pipes = true;
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
struct pipe_ctx *top_pipe = pipe->top_pipe;
struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
@@ -2011,6 +2081,7 @@ static bool dcn30_internal_validate_bw(
pipe->stream = NULL;
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ repopulate_pipes = true;
} else
ASSERT(0); /* Should never try to merge master pipe */
@@ -2018,8 +2089,10 @@ static bool dcn30_internal_validate_bw(
for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = NULL;
bool odm;
+ int old_index = -1;
if (!pipe->stream || newly_split[i])
continue;
@@ -2031,7 +2104,20 @@ static bool dcn30_internal_validate_bw(
continue;
if (split[i]) {
- hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+ if (odm) {
+ if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+ else if (old_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->pipe_idx;
+ } else {
+ if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+ else if (old_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->pipe_idx;
+ }
+ hsplit_pipe = dcn30_find_split_pipe(dc, context, old_index);
ASSERT(hsplit_pipe);
if (!hsplit_pipe)
goto validate_fail;
@@ -2045,8 +2131,16 @@ static bool dcn30_internal_validate_bw(
repopulate_pipes = true;
}
if (split[i] == 4) {
- struct pipe_ctx *pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+ struct pipe_ctx *pipe_4to1;
+ if (odm && old_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->pipe_idx;
+ else if (!odm && old_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->pipe_idx;
+ else
+ old_index = -1;
+ pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
goto validate_fail;
@@ -2056,7 +2150,16 @@ static bool dcn30_internal_validate_bw(
goto validate_fail;
newly_split[pipe_4to1->pipe_idx] = true;
- pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+ if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+ else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+ else
+ old_index = -1;
+ pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index);
ASSERT(pipe_4to1);
if (!pipe_4to1)
goto validate_fail;
@@ -2100,7 +2203,7 @@ validate_out:
return out;
}
-static void dcn30_calculate_wm(
+void dcn30_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
@@ -2108,6 +2211,8 @@ static void dcn30_calculate_wm(
{
int i, pipe_idx;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
+ dm_dram_clock_change_unsupported;
if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
@@ -2141,30 +2246,12 @@ static void dcn30_calculate_wm(
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- /* Set C:
- * DCFCLK: Min Required
- * FCLK(proportional to UCLK): 1GHz or Max
- * pstate latency overriden to 5us
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
/* Set D:
* DCFCLK: Min Required
* FCLK(proportional to UCLK): 1GHz or Max
* sr_enter_exit = 4, sr_exit = 2us
*/
+ /*
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
@@ -2178,26 +2265,72 @@ static void dcn30_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ */
- /* Set A:
+ /* Set C:
* DCFCLK: Min Required
* FCLK(proportional to UCLK): 1GHz or Max
- *
- * Set A calculated last so that following calculations are based on Set A
+ * pstate latency overridden to 5us
*/
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+ unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+ unsigned int min_dram_speed_mts_margin = 160;
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
+
+ if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
+ min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+
+ for (i = 3; i > 0; i--) {
+ if ((min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) &&
+ (min_dram_speed_mts - min_dram_speed_mts_margin < dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts))
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+ }
+
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ if (!pstate_en) {
+ /* The only difference between A and C is p-state latency, if p-state is not supported we want to
+ * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
+ */
+ context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0x13FFFF;
+ } else {
+ /* Set A:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ *
+ * Set A calculated last so that following calculations are based on Set A
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
}
- context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+
+ /* Make set D = set A until set D is enabled */
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
@@ -2217,6 +2350,13 @@ static void dcn30_calculate_wm(
pipe_idx++;
}
+
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
+ if (!pstate_en)
+ /* Restore full p-state latency */
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
}
bool dcn30_validate_bandwidth(struct dc *dc,
@@ -2249,8 +2389,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
goto validate_out;
}
- dcn30_calculate_wm(dc, context, pipes, pipe_cnt, vlevel);
- dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
BW_VAL_TRACE_END_WATERMARKS();
@@ -2293,7 +2432,7 @@ static void get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
(dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
}
-static void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
unsigned int i, j;
unsigned int num_states = 0;
@@ -2413,14 +2552,16 @@ static void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
}
-static struct resource_funcs dcn30_res_pool_funcs = {
+static const struct resource_funcs dcn30_res_pool_funcs = {
.destroy = dcn30_destroy_resource_pool,
.link_enc_create = dcn30_link_encoder_create,
.panel_cntl_create = dcn30_panel_cntl_create,
.validate_bandwidth = dcn30_validate_bandwidth,
+ .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
@@ -2618,6 +2759,14 @@ static bool dcn30_resource_construct(
}
}
pool->base.timing_generator_count = i;
+ /* PSR */
+ pool->base.psr = dmub_psr_create(ctx);
+
+ if (pool->base.psr == NULL) {
+ dm_error("DC: failed to create PSR obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
@@ -2684,7 +2833,7 @@ static bool dcn30_resource_construct(
if (!resource_construct(num_virtual_links, dc, &pool->base,
(!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
&res_create_funcs : &res_create_maximus_funcs)))
- goto create_fail;
+ goto create_fail;
/* HW Sequencer and Plane caps */
dcn30_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
index 4b4a4d81c1e3..d163812af858 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -55,6 +55,11 @@ unsigned int dcn30_calc_max_scaled_time(
bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate);
+void dcn30_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
void dcn30_populate_dml_writeback_from_context(
struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
@@ -79,4 +84,7 @@ enum dc_status dcn30_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *dc_stream);
+
+void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
#endif /* _DCN30_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index ae608c329366..3586934df25f 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -30,8 +30,6 @@
* interface to PPLIB/SMU to setup clocks and pstate requirements on SoC
*/
-typedef bool BOOLEAN;
-
enum pp_smu_ver {
/*
* PP_SMU_INTERFACE_X should be interpreted as the interface defined
@@ -240,7 +238,7 @@ struct pp_smu_funcs_nv {
* DC hardware
*/
enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp,
- BOOLEAN pstate_handshake_supported);
+ bool pstate_handshake_supported);
};
#define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 417331438c30..dbc7e2abe379 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -33,6 +33,10 @@ ifdef CONFIG_PPC64
dml_ccflags := -mhard-float -maltivec
endif
+ifdef CONFIG_ARM64
+dml_rcflags := -mgeneral-regs-only
+endif
+
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
@@ -60,6 +64,13 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_rcflags)
endif
ifdef CONFIG_DRM_AMD_DC_DCN3_0
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) -Wframe-larger-than=2048
@@ -67,6 +78,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
endif
CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_rcflags)
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 80170f9721ce..860e72a51534 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2635,15 +2635,14 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
}
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
- mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+ mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
+ mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
- if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
- if (mode_lib->vba.DRAMClockChangeWatermark >
- dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
- mode_lib->vba.MinTTUVBlank[k] += 25;
- }
+ if (mode_lib->vba.DRAMClockChangeWatermark >
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ mode_lib->vba.MinTTUVBlank[k] += 25;
}
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
index 1e557ddcb638..d0b90947f540 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
@@ -33,7 +33,7 @@ struct display_mode_lib;
// Function: dml_rq_dlg_get_rq_reg
// Main entry point for test to get the register values out of this DML class.
-// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
+// This function calls <get_rq_param> and <extract_rq_regs> functions to calculate
// and then populate the rq_regs struct
// Input:
// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
index 0d53e871a9d1..27cf8bed9376 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
@@ -33,7 +33,7 @@ struct display_mode_lib;
// Function: dml_rq_dlg_get_rq_reg
// Main entry point for test to get the register values out of this DML class.
-// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
+// This function calls <get_rq_param> and <extract_rq_regs> functions to calculate
// and then populate the rq_regs struct
// Input:
// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index a576eed94d9b..367c82b5ab4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -1294,7 +1294,7 @@ static unsigned int CalculateVMAndRowBytes(
unsigned int MacroTileHeight;
unsigned int ExtraDPDEBytesFrame;
unsigned int PDEAndMetaPTEBytesFrame;
- unsigned int PixelPTEReqHeightPTEs;
+ unsigned int PixelPTEReqHeightPTEs = 0;
if (DCCEnable == true) {
*MetaRequestHeight = 8 * BlockHeight256Bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 2beb284f89b0..9e0ae18e71fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -597,7 +597,8 @@ static void CalculateStutterEfficiency(
double meta_row_bw[],
double dpte_row_bw[],
double *StutterEfficiencyNotIncludingVBlank,
- double *StutterEfficiency);
+ double *StutterEfficiency,
+ double *StutterPeriodOut);
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
@@ -3134,7 +3135,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->meta_row_bw,
v->dpte_row_bw,
&v->StutterEfficiencyNotIncludingVBlank,
- &v->StutterEfficiency);
+ &v->StutterEfficiency,
+ &v->StutterPeriod);
}
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
@@ -3235,7 +3237,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
- } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
+ } else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
@@ -5305,7 +5307,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
ViewportExceedsSurface = true;
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
- && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
+ && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
ViewportExceedsSurface = true;
}
@@ -5515,7 +5517,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
if (WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
- if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave || mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
+ if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding * 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - mode_lib->vba.WritebackDRAMClockChangeWatermark;
@@ -5556,7 +5558,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
} else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
@@ -6151,7 +6153,8 @@ static void CalculateStutterEfficiency(
double meta_row_bw[],
double dpte_row_bw[],
double *StutterEfficiencyNotIncludingVBlank,
- double *StutterEfficiency)
+ double *StutterEfficiency,
+ double *StutterPeriodOut)
{
double FullDETBufferingTimeY[DC__NUM_DPP__MAX] = { 0 };
double FrameTimeForMinFullDETBufferingTime = 0;
@@ -6262,6 +6265,9 @@ static void CalculateStutterEfficiency(
}
*StutterEfficiency = (*StutterEfficiencyNotIncludingVBlank / 100.0 * (FrameTimeForMinFullDETBufferingTime - SmallestVBlank) + SmallestVBlank) / FrameTimeForMinFullDETBufferingTime * 100;
+
+ if (StutterPeriodOut)
+ *StutterPeriodOut = StutterPeriod;
}
static void CalculateSwathAndDETConfiguration(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 5bb10f6e300d..416bf6fb67bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -279,7 +279,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
- } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
+ } else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
index e5b17e1104c6..c04965cceff3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
@@ -32,7 +32,7 @@ struct display_mode_lib;
// Function: dml_rq_dlg_get_rq_reg
// Main entry point for test to get the register values out of this DML class.
-// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
+// This function calls <get_rq_param> and <extract_rq_regs> functions to calculate
// and then populate the rq_regs struct
// Input:
// pipe_param - pipe source configuration (e.g. vp, pitch, scaling, dest, etc.)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index afdd4f0d9d71..b32093136089 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -467,7 +467,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] =
1;
mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0;
- mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;;
+ mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
dout->dsc_slices;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index ea29cf95d470..f2624a1156e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -10,6 +10,10 @@ ifdef CONFIG_PPC64
dsc_ccflags := -mhard-float -maltivec
endif
+ifdef CONFIG_ARM64
+dsc_rcflags := -mgeneral-regs-only
+endif
+
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
@@ -28,6 +32,7 @@ endif
endif
CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_rcflags)
DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 8cdaa6eef5d3..4c844cfaa956 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -34,6 +34,9 @@
/* default DSC policy target bitrate limit is 16bpp */
static uint32_t dsc_policy_max_target_bpp_limit = 16;
+/* default DSC policy enables DSC only when needed */
+static bool dsc_policy_enable_dsc_when_not_needed;
+
static uint32_t dc_dsc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing)
{
@@ -189,8 +192,10 @@ static bool dsc_throughput_from_dpcd(int dpcd_throughput, int *throughput)
}
-static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bpp_increment_div)
+static bool dsc_bpp_increment_div_from_dpcd(uint8_t bpp_increment_dpcd, uint32_t *bpp_increment_div)
{
+ // Mask bpp increment dpcd field to avoid reading other fields
+ bpp_increment_dpcd &= 0x7;
switch (bpp_increment_dpcd) {
case 0:
@@ -360,7 +365,7 @@ static bool decide_dsc_target_bpp_x16(
get_dsc_bandwidth_range(policy->min_target_bpp, policy->max_target_bpp,
dsc_common_caps, timing, &range);
- if (target_bandwidth_kbps >= range.stream_kbps) {
+ if (!policy->enable_dsc_when_not_needed && target_bandwidth_kbps >= range.stream_kbps) {
/* enough bandwidth without dsc */
*target_bpp_x16 = 0;
should_use_dsc = false;
@@ -961,9 +966,20 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, struct dc
/* internal upper limit, default 16 bpp */
if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit)
policy->max_target_bpp = dsc_policy_max_target_bpp_limit;
+
+ /* enable DSC when not needed, default false */
+ if (dsc_policy_enable_dsc_when_not_needed)
+ policy->enable_dsc_when_not_needed = dsc_policy_enable_dsc_when_not_needed;
+ else
+ policy->enable_dsc_when_not_needed = false;
}
void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit)
{
dsc_policy_max_target_bpp_limit = limit;
}
+
+void dc_dsc_policy_set_enable_dsc_when_not_needed(bool enable)
+{
+ dsc_policy_enable_dsc_when_not_needed = enable;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index 0f2f4508e564..74c0943ed644 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -31,6 +31,18 @@ AMD_DAL_GPIO = $(addprefix $(AMDDALPATH)/dc/gpio/,$(GPIO))
AMD_DISPLAY_FILES += $(AMD_DAL_GPIO)
###############################################################################
+# DCE 6x
+###############################################################################
+# all DCE6.x are derived from DCE6.0
+ifdef CONFIG_DRM_AMD_DC_SI
+GPIO_DCE60 = hw_translate_dce60.o hw_factory_dce60.o
+
+AMD_DAL_GPIO_DCE60 = $(addprefix $(AMDDALPATH)/dc/gpio/dce60/,$(GPIO_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE60)
+endif
+
+###############################################################################
# DCE 8x
###############################################################################
# all DCE8.x are derived from DCE8.0
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
index cf98aa827a9a..e883864cff3c 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
@@ -162,7 +162,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
}
-/* fucntion table */
+/* function table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.c b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.c
new file mode 100644
index 000000000000..cc69acd8ada7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+#include "hw_factory_dce60.h"
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+#include "../hw_generic.h"
+
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
+
+#define REG(reg_name)\
+ mm ## reg_name
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define HPD_REG_LIST_DCE6(id) \
+ HPD_GPIO_REG_LIST(id), \
+ .int_status = mmDC_HPD ## id ## _INT_STATUS,\
+ .toggle_filt_cntl = mmDC_HPD ## id ## _TOGGLE_FILT_CNTL
+
+#define HPD_MASK_SH_LIST_DCE6(mask_sh) \
+ .DC_HPD_SENSE_DELAYED = DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED ## mask_sh,\
+ .DC_HPD_SENSE = DC_HPD1_INT_STATUS__DC_HPD1_SENSE ## mask_sh,\
+ .DC_HPD_CONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY ## mask_sh,\
+ .DC_HPD_DISCONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY ## mask_sh
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST_DCE6(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5),
+ hpd_regs(6)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST_DCE6(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST_DCE6(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+static const struct hw_factory_funcs funcs = {
+ .init_ddc_data = dal_hw_ddc_init,
+ .init_generic = NULL,
+ .init_hpd = dal_hw_hpd_init,
+ .get_ddc_pin = dal_hw_ddc_get_pin,
+ .get_hpd_pin = dal_hw_hpd_get_pin,
+ .get_generic_pin = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+
+void dal_hw_factory_dce60_init(
+ struct hw_factory *factory)
+{
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.h b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.h
new file mode 100644
index 000000000000..1fd54ff8979c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCE60_H__
+#define __DAL_HW_FACTORY_DCE60_H__
+
+void dal_hw_factory_dce60_init(
+ struct hw_factory *factory);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.c b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.c
new file mode 100644
index 000000000000..255df31ec577
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "hw_translate_dce60.h"
+
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+#include "smu/smu_6_0_d.h"
+
+/*
+ * @brief
+ * Returns index of first bit (starting with LSB) which is set
+ */
+static uint32_t index_from_vector(
+ uint32_t vector)
+{
+ uint32_t result = 0;
+ uint32_t mask = 1;
+
+ do {
+ if (vector == mask)
+ return result;
+
+ ++result;
+ mask <<= 1;
+ } while (mask);
+
+ BREAK_TO_DEBUGGER();
+
+ return GPIO_ENUM_UNKNOWN;
+}
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case mmDC_GPIO_GENERIC_A:
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* HPD */
+ case mmDC_GPIO_HPD_A:
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case mmDC_GPIO_SYNCA_A:
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* mmDC_GPIO_GENLK_MASK */
+ case mmDC_GPIO_GENLK_A:
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* GPIOPAD */
+ case mmGPIOPAD_A:
+ *id = GPIO_ID_GPIO_PAD;
+ *en = index_from_vector(mask);
+ return (*en <= GPIO_GPIO_PAD_MAX);
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case mmDC_GPIO_DDC1_A:
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case mmDC_GPIO_DDC2_A:
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case mmDC_GPIO_DDC3_A:
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case mmDC_GPIO_DDC4_A:
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case mmDC_GPIO_DDC5_A:
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case mmDC_GPIO_DDC6_A:
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case mmDC_GPIO_DDCVGA_A:
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case mmDC_GPIO_I2CPAD_A:
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case mmDC_GPIO_PWRSEQ_A:
+ case mmDC_GPIO_PAD_STRENGTH_1:
+ case mmDC_GPIO_PAD_STRENGTH_2:
+ case mmDC_GPIO_DEBUG:
+ return false;
+ /* UNEXPECTED */
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = mmDC_GPIO_GENERIC_A;
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = mmDC_GPIO_HPD_A;
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GPIO_PAD:
+ info->offset = mmGPIOPAD_A;
+ info->mask = (1 << en);
+ result = (info->mask <= GPIO_GPIO_PAD_MAX);
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+void dal_hw_translate_dce60_init(
+ struct hw_translate *translate)
+{
+ translate->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.h b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.h
new file mode 100644
index 000000000000..1e811f35cec7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCE60_H__
+#define __DAL_HW_TRANSLATE_DCE60_H__
+
+void dal_hw_translate_dce60_init(
+ struct hw_translate *tr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
index b38c96c9fed3..7d36b56346a6 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
@@ -194,7 +194,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
}
-/* fucntion table */
+/* function table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = dal_hw_generic_init,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
index 83f798cb8b21..9b63c6c0cc84 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
@@ -221,7 +221,7 @@ static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
generic->base.regs = &generic_regs[en].gpio;
}
-/* fucntion table */
+/* function table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = dal_hw_generic_init,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
index 907c5911eb9e..2f57ee6deabc 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
@@ -202,7 +202,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
}
-/* fucntion table */
+/* function table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = dal_hw_generic_init,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
index 7e7fb6572107..21583699f992 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
@@ -117,6 +117,12 @@ static const struct ddc_registers ddc_data_regs_dcn[] = {
ddc_data_regs_dcn2(4),
ddc_data_regs_dcn2(5),
ddc_data_regs_dcn2(6),
+ {
+ DDC_GPIO_VGA_REG_LIST(DATA),
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ }
};
static const struct ddc_registers ddc_clk_regs_dcn[] = {
@@ -126,6 +132,12 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = {
ddc_clk_regs_dcn2(4),
ddc_clk_regs_dcn2(5),
ddc_clk_regs_dcn2(6),
+ {
+ DDC_GPIO_VGA_REG_LIST(CLK),
+ .ddc_setup = 0,
+ .phy_aux_cntl = 0,
+ .dc_gpio_aux_ctrl_5 = 0
+ }
};
static const struct ddc_sh_mask ddc_shift[] = {
@@ -218,7 +230,7 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
}
-/* fucntion table */
+/* function table */
static const struct hw_factory_funcs funcs = {
.init_ddc_data = dal_hw_ddc_init,
.init_generic = dal_hw_generic_init,
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
index f67c18375bfd..dac427b68fd7 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -63,13 +63,13 @@ enum gpio_result dal_gpio_open_ex(
enum gpio_mode mode)
{
if (gpio->pin) {
- ASSERT_CRITICAL(false);
+ BREAK_TO_DEBUGGER();
return GPIO_RESULT_ALREADY_OPENED;
}
// No action if allocation failed during gpio construct
if (!gpio->hw_container.ddc) {
- ASSERT_CRITICAL(false);
+ BREAK_TO_DEBUGGER();
return GPIO_RESULT_NON_SPECIFIC_ERROR;
}
gpio->mode = mode;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index e5cfe28bc7bf..6fc8a6e9dc15 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -42,6 +42,9 @@
* Post-requisites: headers required by this unit
*/
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#include "dce60/hw_factory_dce60.h"
+#endif
#include "dce80/hw_factory_dce80.h"
#include "dce110/hw_factory_dce110.h"
#include "dce120/hw_factory_dce120.h"
@@ -71,6 +74,13 @@ bool dal_hw_factory_init(
}
switch (dce_version) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case DCE_VERSION_6_0:
+ case DCE_VERSION_6_1:
+ case DCE_VERSION_6_4:
+ dal_hw_factory_dce60_init(factory);
+ return true;
+#endif
case DCE_VERSION_8_0:
case DCE_VERSION_8_1:
case DCE_VERSION_8_3:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index efea7cb0f17c..3a93c945e57d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -40,6 +40,9 @@
* Post-requisites: headers required by this unit
*/
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#include "dce60/hw_translate_dce60.h"
+#endif
#include "dce80/hw_translate_dce80.h"
#include "dce110/hw_translate_dce110.h"
#include "dce120/hw_translate_dce120.h"
@@ -69,6 +72,13 @@ bool dal_hw_translate_init(
}
switch (dce_version) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case DCE_VERSION_6_0:
+ case DCE_VERSION_6_1:
+ case DCE_VERSION_6_4:
+ dal_hw_translate_dce60_init(translate);
+ return true;
+#endif
case DCE_VERSION_8_0:
case DCE_VERSION_8_1:
case DCE_VERSION_8_3:
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 329395ee7461..6e6bc66e49f0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -101,7 +101,7 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *context,
bool fast_validate);
- void (*calculate_wm)(
+ void (*calculate_wm_and_dlg)(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
@@ -300,6 +300,7 @@ union pipe_update_flags {
uint32_t gamut_remap : 1;
uint32_t scaler : 1;
uint32_t viewport : 1;
+ uint32_t plane_changed : 1;
} bits;
uint32_t raw;
};
@@ -396,6 +397,7 @@ struct dc_state {
struct dc_stream_state *streams[MAX_PIPES];
struct dc_stream_status stream_status[MAX_PIPES];
uint8_t stream_count;
+ uint8_t stream_mask;
struct resource_context res_ctx;
@@ -410,6 +412,10 @@ struct dc_state {
struct clk_mgr *clk_mgr;
struct kref refcount;
+
+ struct {
+ unsigned int stutter_period_us;
+ } perf_params;
};
#endif /* _CORE_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 72743058836d..949b61351ede 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -89,6 +89,11 @@ enum dentist_divider_range {
.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
.DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define CLK_COMMON_REG_LIST_DCE60_BASE() \
+ SR(DENTIST_DISPCLK_CNTL)
+#endif
+
#define CLK_COMMON_REG_LIST_DCN_BASE() \
SR(DENTIST_DISPCLK_CNTL)
@@ -115,6 +120,12 @@ enum dentist_divider_range {
CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#define CLK_COMMON_MASK_SH_LIST_DCE60_COMMON_BASE(mask_sh) \
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+#endif
+
#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
index f9ab5abb6462..48eac622c6a0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
@@ -49,6 +49,7 @@ struct panel_cntl_funcs {
void (*store_backlight_level)(struct panel_cntl *panel_cntl);
void (*driver_set_backlight)(struct panel_cntl *panel_cntl,
uint32_t backlight_pwm_u16_16);
+ uint32_t (*get_current_backlight)(struct panel_cntl *panel_cntl);
};
struct panel_cntl_init_data {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 11ce06e69d3f..0184cefb083b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -143,7 +143,7 @@ struct stream_encoder_funcs {
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing);
- void (*set_mst_bandwidth)(
+ void (*set_throttled_vcp_size)(
struct stream_encoder *enc,
struct fixed31_32 avg_time_slots_per_mtp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 3c986717dcd5..64c1be818b0e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -67,6 +67,10 @@ struct hw_sequencer_funcs {
int num_planes, struct dc_state *context);
void (*program_front_end_for_ctx)(struct dc *dc,
struct dc_state *context);
+ bool (*disconnect_pipes)(struct dc *dc,
+ struct dc_state *context);
+ void (*wait_for_pending_cleared)(struct dc *dc,
+ struct dc_state *context);
void (*post_unlock_program_front_end)(struct dc *dc,
struct dc_state *context);
void (*update_plane_addr)(const struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index 3352b79fb1cb..405c25322607 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -31,6 +31,17 @@ AMD_DAL_IRQ = $(addprefix $(AMDDALPATH)/dc/irq/,$(IRQ))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ)
###############################################################################
+# DCE 6x
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_SI
+IRQ_DCE60 = irq_service_dce60.o
+
+AMD_DAL_IRQ_DCE60 = $(addprefix $(AMDDALPATH)/dc/irq/dce60/,$(IRQ_DCE60))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE60)
+endif
+
+###############################################################################
# DCE 8x
###############################################################################
IRQ_DCE80 = irq_service_dce80.o
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
new file mode 100644
index 000000000000..524481885fd0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "irq_service_dce60.h"
+#include "../dce110/irq_service_dce110.h"
+
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#define VISLANDS30_IV_SRCID_D1_VBLANK 1
+#define VISLANDS30_IV_SRCID_D2_VBLANK 2
+#define VISLANDS30_IV_SRCID_D3_VBLANK 3
+#define VISLANDS30_IV_SRCID_D4_VBLANK 4
+#define VISLANDS30_IV_SRCID_D5_VBLANK 5
+#define VISLANDS30_IV_SRCID_D6_VBLANK 6
+
+#include "dc_types.h"
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD1_INT_STATUS,
+ DC_HPD1_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD1_INT_CONTROL,
+ DC_HPD1_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = dce110_vblank_set,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs_dce60 = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_INVALID + reg_num] = {\
+ .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
+ ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\
+ },\
+ .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
+ .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
+ .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD6 + reg_num] = {\
+ .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
+ ~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\
+ .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
+ .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
+ .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ .enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ .enable_value = {\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
+ .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
+ .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ .enable_value = {\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
+ .ack_mask =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ .enable_reg = mmLB ## reg_num ## _INT_MASK,\
+ .enable_mask =\
+ INT_MASK__VBLANK_INT_MASK,\
+ .enable_value = {\
+ INT_MASK__VBLANK_INT_MASK,\
+ ~INT_MASK__VBLANK_INT_MASK},\
+ .ack_reg = mmLB ## reg_num ## _VBLANK_STATUS,\
+ .ack_mask =\
+ VBLANK_STATUS__VBLANK_ACK_MASK,\
+ .ack_value =\
+ VBLANK_STATUS__VBLANK_ACK_MASK,\
+ .funcs = &vblank_irq_info_funcs_dce60\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dce60[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_int_entry(6),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ hpd_rx_int_entry(6),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+enum dc_irq_source to_dal_irq_source_dce60(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case VISLANDS30_IV_SRCID_D1_VBLANK:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case VISLANDS30_IV_SRCID_D2_VBLANK:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case VISLANDS30_IV_SRCID_D3_VBLANK:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case VISLANDS30_IV_SRCID_D4_VBLANK:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case VISLANDS30_IV_SRCID_D5_VBLANK:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case VISLANDS30_IV_SRCID_D6_VBLANK:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case VISLANDS30_IV_SRCID_D1_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case VISLANDS30_IV_SRCID_D2_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case VISLANDS30_IV_SRCID_D3_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case VISLANDS30_IV_SRCID_D4_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case VISLANDS30_IV_SRCID_D5_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case VISLANDS30_IV_SRCID_D6_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE6;
+ case VISLANDS30_IV_SRCID_D1_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case VISLANDS30_IV_SRCID_D2_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case VISLANDS30_IV_SRCID_D3_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case VISLANDS30_IV_SRCID_D4_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case VISLANDS30_IV_SRCID_D5_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case VISLANDS30_IV_SRCID_D6_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP6;
+
+ case VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A:
+ return DC_IRQ_SOURCE_HPD1;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B:
+ return DC_IRQ_SOURCE_HPD2;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C:
+ return DC_IRQ_SOURCE_HPD3;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D:
+ return DC_IRQ_SOURCE_HPD4;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E:
+ return DC_IRQ_SOURCE_HPD5;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F:
+ return DC_IRQ_SOURCE_HPD6;
+ case VISLANDS30_IV_EXTID_HPD_RX_A:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_B:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_C:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_D:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_E:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_F:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static const struct irq_service_funcs irq_service_funcs_dce60 = {
+ .to_dal_irq_source = to_dal_irq_source_dce60
+};
+
+static void dce60_irq_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dce60;
+ irq_service->funcs = &irq_service_funcs_dce60;
+}
+
+struct irq_service *dal_irq_service_dce60_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ dce60_irq_construct(irq_service, init_data);
+ return irq_service;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.h b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.h
new file mode 100644
index 000000000000..294db29e8115
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2020 Mauro Rossi <issor.oruam@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCE60_H__
+#define __DAL_IRQ_SERVICE_DCE60_H__
+
+#include "../irq_service.h"
+
+enum dc_irq_source to_dal_irq_source_dce60(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id);
+
+struct irq_service *dal_irq_service_dce60_create(
+ struct irq_service_init_data *init_data);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
index 49689f71f4f1..0effbb2bd74a 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
@@ -306,8 +306,8 @@ irq_source_info_dcn30[DAL_IRQ_SOURCES_NUMBER] = {
pflip_int_entry(1),
pflip_int_entry(2),
pflip_int_entry(3),
- [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
- [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
[DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
gpio_pad_int_entry(0),
gpio_pad_int_entry(1),
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 33053b9fe6bd..6bf27bde8724 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -32,6 +32,9 @@
#include "dce110/irq_service_dce110.h"
+#if defined(CONFIG_DRM_AMD_DC_SI)
+#include "dce60/irq_service_dce60.h"
+#endif
#include "dce80/irq_service_dce80.h"
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index c3bbfe397e8d..95cb56929e79 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -55,6 +55,10 @@
#include <asm/fpu/api.h>
#define DC_FP_START() kernel_fpu_begin()
#define DC_FP_END() kernel_fpu_end()
+#elif defined(CONFIG_ARM64)
+#include <asm/neon.h>
+#define DC_FP_START() kernel_neon_begin()
+#define DC_FP_END() kernel_neon_end()
#elif defined(CONFIG_PPC64)
#include <asm/switch_to.h>
#include <asm/cputable.h>
@@ -90,36 +94,27 @@
* general debug capabilities
*
*/
-#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
-#define ASSERT_CRITICAL(expr) do { \
- if (WARN_ON(!(expr))) { \
- kgdb_breakpoint(); \
- } \
-} while (0)
+#ifdef CONFIG_DEBUG_KERNEL_DC
+#define dc_breakpoint() kgdb_breakpoint()
#else
-#define ASSERT_CRITICAL(expr) do { \
- if (WARN_ON(!(expr))) { \
- ; \
- } \
-} while (0)
+#define dc_breakpoint() do {} while (0)
#endif
-#if defined(CONFIG_DEBUG_KERNEL_DC)
-#define ASSERT(expr) ASSERT_CRITICAL(expr)
+#define ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) \
+ dc_breakpoint(); \
+ } while (0)
-#else
-#define ASSERT(expr) WARN_ON_ONCE(!(expr))
-#endif
+#define ASSERT(expr) do { \
+ if (WARN_ON_ONCE(!(expr))) \
+ dc_breakpoint(); \
+ } while (0)
-#if defined(CONFIG_DEBUG_KERNEL_DC) && (defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB))
#define BREAK_TO_DEBUGGER() \
do { \
DRM_DEBUG_DRIVER("%s():%d\n", __func__, __LINE__); \
- kgdb_breakpoint(); \
+ dc_breakpoint(); \
} while (0)
-#else
-#define BREAK_TO_DEBUGGER() DRM_DEBUG_DRIVER("%s():%d\n", __func__, __LINE__)
-#endif
#define DC_ERR(...) do { \
dm_error(__VA_ARGS__); \
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index b8040da94b9d..1053b165c139 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -46,9 +46,10 @@ static void virtual_stream_encoder_dvi_set_stream_attribute(
struct dc_crtc_timing *crtc_timing,
bool is_dual_link) {}
-static void virtual_stream_encoder_set_mst_bandwidth(
+static void virtual_stream_encoder_set_throttled_vcp_size(
struct stream_encoder *enc,
- struct fixed31_32 avg_time_slots_per_mtp) {}
+ struct fixed31_32 avg_time_slots_per_mtp)
+{}
static void virtual_stream_encoder_update_hdmi_info_packets(
struct stream_encoder *enc,
@@ -87,6 +88,23 @@ static void virtual_enc_dp_set_odm_combine(
bool odm_combine)
{}
+static void virtual_dig_connect_to_otg(
+ struct stream_encoder *enc,
+ int tg_inst)
+{}
+
+static void virtual_setup_stereo_sync(
+ struct stream_encoder *enc,
+ int tg_inst,
+ bool enable)
+{}
+
+static void virtual_stream_encoder_set_dsc_pps_info_packet(
+ struct stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps)
+{}
+
static const struct stream_encoder_funcs virtual_str_enc_funcs = {
.dp_set_odm_combine =
virtual_enc_dp_set_odm_combine,
@@ -96,8 +114,8 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
virtual_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
virtual_stream_encoder_dvi_set_stream_attribute,
- .set_mst_bandwidth =
- virtual_stream_encoder_set_mst_bandwidth,
+ .set_throttled_vcp_size =
+ virtual_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
virtual_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
@@ -114,6 +132,9 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
.audio_mute_control = virtual_audio_mute_control,
.set_avmute = virtual_stream_encoder_set_avmute,
.hdmi_reset_stream_attribute = virtual_stream_encoder_reset_hdmi_stream_attribute,
+ .dig_connect_to_otg = virtual_dig_connect_to_otg,
+ .setup_stereo_sync = virtual_setup_stereo_sync,
+ .dp_set_dsc_pps_info_packet = virtual_stream_encoder_set_dsc_pps_info_packet,
};
bool virtual_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index e013875b89ed..d103ec1eaa73 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -36,11 +36,20 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xe6d590b09
+#define DMUB_FW_VERSION_GIT_HASH 0x9cf8f05fe
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 25
-#define DMUB_FW_VERSION_UCODE ((DMUB_FW_VERSION_MAJOR << 24) | (DMUB_FW_VERSION_MINOR << 16) | DMUB_FW_VERSION_REVISION)
+#define DMUB_FW_VERSION_REVISION 35
+#define DMUB_FW_VERSION_TEST 0
+#define DMUB_FW_VERSION_VBIOS 0
+#define DMUB_FW_VERSION_HOTFIX 0
+#define DMUB_FW_VERSION_UCODE (((DMUB_FW_VERSION_MAJOR & 0xFF) << 24) | \
+ ((DMUB_FW_VERSION_MINOR & 0xFF) << 16) | \
+ ((DMUB_FW_VERSION_REVISION & 0xFF) << 8) | \
+ ((DMUB_FW_VERSION_TEST & 0x1) << 7) | \
+ ((DMUB_FW_VERSION_VBIOS & 0x1) << 6) | \
+ (DMUB_FW_VERSION_HOTFIX & 0x3F))
+
#endif
//<DMUB_TYPES>==================================================================
@@ -48,6 +57,7 @@
#define SET_ABM_PIPE_GRADUALLY_DISABLE 0
#define SET_ABM_PIPE_IMMEDIATELY_DISABLE 255
+#define SET_ABM_PIPE_IMMEDIATE_KEEP_GAIN_DISABLE 254
#define SET_ABM_PIPE_NORMAL 1
/* Maximum number of streams on any ASIC. */
@@ -60,10 +70,6 @@
#define PHYSICAL_ADDRESS_LOC union large_integer
#endif
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
#ifndef dmub_memcpy
#define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes))
#endif
@@ -72,6 +78,10 @@ extern "C" {
#define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes))
#endif
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
#ifndef dmub_udelay
#define dmub_udelay(microseconds) udelay(microseconds)
#endif
@@ -88,6 +98,7 @@ union dmub_psr_debug_flags {
struct {
uint32_t visual_confirm : 1;
uint32_t use_hw_lock_mgr : 1;
+ uint32_t log_line_nums : 1;
} bitfields;
uint32_t u32All;
@@ -160,7 +171,7 @@ union dmub_fw_boot_status {
uint32_t dal_fw : 1;
uint32_t mailbox_rdy : 1;
uint32_t optimized_init_done : 1;
- uint32_t reserved : 29;
+ uint32_t restore_required : 1;
} bits;
uint32_t all;
};
@@ -169,6 +180,7 @@ enum dmub_fw_boot_status_bit {
DMUB_FW_BOOT_STATUS_BIT_DAL_FIRMWARE = (1 << 0),
DMUB_FW_BOOT_STATUS_BIT_MAILBOX_READY = (1 << 1),
DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2),
+ DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3),
};
/* Register bit definition for SCRATCH15 */
@@ -204,6 +216,7 @@ enum dmub_cmd_vbios_type {
DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1,
DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2,
DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3,
+ DMUB_CMD__VBIOS_LVTMA_CONTROL = 15,
};
//==============================================================================
@@ -287,9 +300,17 @@ enum dmub_cmd_type {
DMUB_CMD__PSR = 64,
DMUB_CMD__ABM = 66,
DMUB_CMD__HW_LOCK = 69,
+ DMUB_CMD__DP_AUX_ACCESS = 70,
+ DMUB_CMD__OUTBOX1_ENABLE = 71,
DMUB_CMD__VBIOS = 128,
};
+enum dmub_out_cmd_type {
+ DMUB_OUT_CMD__NULL = 0,
+ DMUB_OUT_CMD__DP_AUX_REPLY = 1,
+ DMUB_OUT_CMD__DP_HPD_NOTIFY = 2,
+};
+
#pragma pack(push, 1)
struct dmub_cmd_header {
@@ -445,6 +466,78 @@ struct dmub_rb_cmd_dpphy_init {
uint8_t reserved[60];
};
+enum dp_aux_request_action {
+ DP_AUX_REQ_ACTION_I2C_WRITE = 0x00,
+ DP_AUX_REQ_ACTION_I2C_READ = 0x10,
+ DP_AUX_REQ_ACTION_I2C_STATUS_REQ = 0x20,
+ DP_AUX_REQ_ACTION_I2C_WRITE_MOT = 0x40,
+ DP_AUX_REQ_ACTION_I2C_READ_MOT = 0x50,
+ DP_AUX_REQ_ACTION_I2C_STATUS_REQ_MOT = 0x60,
+ DP_AUX_REQ_ACTION_DPCD_WRITE = 0x80,
+ DP_AUX_REQ_ACTION_DPCD_READ = 0x90
+};
+
+/* DP AUX command */
+struct aux_transaction_parameters {
+ uint8_t is_i2c_over_aux;
+ uint8_t action;
+ uint8_t length;
+ uint8_t pad;
+ uint32_t address;
+ uint8_t data[16];
+};
+
+struct dmub_cmd_dp_aux_control_data {
+ uint32_t handle;
+ uint8_t port_index;
+ uint8_t sw_crc_enabled;
+ uint16_t timeout;
+ struct aux_transaction_parameters dpaux;
+};
+
+struct dmub_rb_cmd_dp_aux_access {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_dp_aux_control_data aux_control;
+};
+
+struct dmub_rb_cmd_outbox1_enable {
+ struct dmub_cmd_header header;
+ uint32_t enable;
+};
+
+/* DP AUX Reply command - OutBox Cmd */
+struct aux_reply_data {
+ uint8_t command;
+ uint8_t length;
+ uint8_t pad[2];
+ uint8_t data[16];
+};
+
+struct aux_reply_control_data {
+ uint32_t handle;
+ uint8_t phy_port_index;
+ uint8_t result;
+ uint16_t pad;
+};
+
+struct dmub_rb_cmd_dp_aux_reply {
+ struct dmub_cmd_header header;
+ struct aux_reply_control_data control;
+ struct aux_reply_data reply_data;
+};
+
+struct dp_hpd_data {
+ uint8_t phy_port_index;
+ uint8_t hpd_type;
+ uint8_t hpd_status;
+ uint8_t pad;
+};
+
+struct dmub_rb_cmd_dp_hpd_notify {
+ struct dmub_cmd_header header;
+ struct dp_hpd_data hpd_data;
+};
+
/*
* Command IDs should be treated as stable ABI.
* Do not reuse or modify IDs.
@@ -674,8 +767,15 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
struct dmub_rb_cmd_abm_init_config abm_init_config;
+ struct dmub_rb_cmd_dp_aux_access dp_aux_access;
+ struct dmub_rb_cmd_outbox1_enable outbox1_enable;
};
+union dmub_rb_out_cmd {
+ struct dmub_rb_cmd_common cmd_common;
+ struct dmub_rb_cmd_dp_aux_reply dp_aux_reply;
+ struct dmub_rb_cmd_dp_hpd_notify dp_hpd_notify;
+};
#pragma pack(pop)
@@ -748,6 +848,25 @@ static inline bool dmub_rb_push_front(struct dmub_rb *rb,
return true;
}
+static inline bool dmub_rb_out_push_front(struct dmub_rb *rb,
+ const union dmub_rb_out_cmd *cmd)
+{
+ uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
+ const uint8_t *src = (uint8_t *)cmd;
+
+ if (dmub_rb_full(rb))
+ return false;
+
+ dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE);
+
+ rb->wrpt += DMUB_RB_CMD_SIZE;
+
+ if (rb->wrpt >= rb->capacity)
+ rb->wrpt %= rb->capacity;
+
+ return true;
+}
+
static inline bool dmub_rb_front(struct dmub_rb *rb,
union dmub_rb_cmd *cmd)
{
@@ -761,6 +880,23 @@ static inline bool dmub_rb_front(struct dmub_rb *rb,
return true;
}
+static inline bool dmub_rb_out_front(struct dmub_rb *rb,
+ union dmub_rb_out_cmd *cmd)
+{
+ const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
+ uint64_t *dst = (uint64_t *)cmd;
+ int i;
+
+ if (dmub_rb_empty(rb))
+ return false;
+
+ // copying data
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ *dst++ = *src++;
+
+ return true;
+}
+
static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
{
if (dmub_rb_empty(rb))
@@ -781,12 +917,10 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
while (rptr != wptr) {
uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
- //uint64_t volatile *p = (uint64_t volatile *)data;
- uint64_t temp;
int i;
for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
- temp = *data++;
+ *data++;
rptr += DMUB_RB_CMD_SIZE;
if (rptr >= rb->capacity)
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 21011edea337..7c782924c941 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -318,4 +318,10 @@ struct bp_encoder_cap_info {
uint32_t RESERVED:27;
};
+struct bp_soc_bb_info {
+ uint32_t dram_clock_change_latency_100ns;
+ uint32_t dram_sr_exit_latency_100ns;
+ uint32_t dram_sr_enter_exit_latency_100ns;
+};
+
#endif /*__DAL_BIOS_PARSER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index abeb58d544b1..ffcb059297d3 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -30,6 +30,34 @@
* ASIC internal revision ID
*/
+/* DCE60 (based on si_id.h in GPUOpen-Tools CodeXL) */
+#define SI_TAHITI_P_A0 0x01
+#define SI_TAHITI_P_B0 0x05
+#define SI_TAHITI_P_B1 0x06
+#define SI_PITCAIRN_PM_A0 0x14
+#define SI_PITCAIRN_PM_A1 0x15
+#define SI_CAPEVERDE_M_A0 0x28
+#define SI_CAPEVERDE_M_A1 0x29
+#define SI_OLAND_M_A0 0x3C
+#define SI_HAINAN_V_A0 0x46
+
+#define SI_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_TAHITI_P(rev) \
+ ((rev >= SI_TAHITI_P_A0) && (rev < SI_PITCAIRN_PM_A0))
+
+#define ASIC_REV_IS_PITCAIRN_PM(rev) \
+ ((rev >= SI_PITCAIRN_PM_A0) && (rev < SI_CAPEVERDE_M_A0))
+
+#define ASIC_REV_IS_CAPEVERDE_M(rev) \
+ ((rev >= SI_CAPEVERDE_M_A0) && (rev < SI_OLAND_M_A0))
+
+#define ASIC_REV_IS_OLAND_M(rev) \
+ ((rev >= SI_OLAND_M_A0) && (rev < SI_HAINAN_V_A0))
+
+#define ASIC_REV_IS_HAINAN_V(rev) \
+ ((rev >= SI_HAINAN_V_A0) && (rev < SI_UNKNOWN))
+
/* DCE80 (based on ci_id.h in Perforce) */
#define CI_BONAIRE_M_A0 0x14
#define CI_BONAIRE_M_A1 0x15
@@ -177,10 +205,25 @@ enum {
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
#define ASICREV_IS_SIENNA_CICHLID_P(eChipRev) ((eChipRev >= NV_SIENNA_CICHLID_P_A0))
#endif
+#define GREEN_SARDINE_A0 0xA1
+#ifndef ASICREV_IS_GREEN_SARDINE
+#define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF))
+#endif
/*
* ASIC chip ID
*/
+
+/* DCE60 */
+#define DEVICE_ID_SI_TAHITI_P_6780 0x6780
+#define DEVICE_ID_SI_PITCAIRN_PM_6800 0x6800
+#define DEVICE_ID_SI_PITCAIRN_PM_6808 0x6808
+#define DEVICE_ID_SI_CAPEVERDE_M_6820 0x6820
+#define DEVICE_ID_SI_CAPEVERDE_M_6828 0x6828
+#define DEVICE_ID_SI_OLAND_M_6600 0x6600
+#define DEVICE_ID_SI_OLAND_M_6608 0x6608
+#define DEVICE_ID_SI_HAINAN_V_6660 0x6660
+
/* DCE80 */
#define DEVICE_ID_KALINDI_9834 0x9834
#define DEVICE_ID_TEMASH_9839 0x9839
@@ -190,6 +233,7 @@ enum {
#define DEVICE_ID_RENOIR_1636 0x1636
/* Asic Family IDs for different asic family. */
+#define FAMILY_SI 110 /* Southern Islands: Tahiti (P), Pitcairn (PM), Cape Verde (M), Oland (M), Hainan (V) */
#define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */
#define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */
#define FAMILY_VI 130 /* Volcanic Islands: Iceland (V), Tonga (M) */
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index b67c9fa6b9cd..8aaa3af69202 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -34,6 +34,9 @@ struct dc_bios;
enum dce_version {
DCE_VERSION_UNKNOWN = (-1),
+ DCE_VERSION_6_0,
+ DCE_VERSION_6_1,
+ DCE_VERSION_6_4,
DCE_VERSION_8_0,
DCE_VERSION_8_1,
DCE_VERSION_8_3,
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 550f46e9b95f..7392a89e771f 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -80,6 +80,7 @@ struct link_training_settings {
uint16_t cr_pattern_time;
uint16_t eq_pattern_time;
+ enum dc_dp_training_pattern pattern_for_cr;
enum dc_dp_training_pattern pattern_for_eq;
bool enhanced_framing;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index e9fbd94f8635..20e554e771d1 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -470,6 +470,14 @@ enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
if (reset_status != MOD_HDCP_STATUS_SUCCESS)
push_error_status(hdcp, reset_status);
}
+
+ /* Clear CP_IRQ status if needed */
+ if (event_ctx.event == MOD_HDCP_EVENT_CPIRQ) {
+ status = mod_hdcp_clear_cp_irq_status(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+ }
+
return status;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index b0cefed2eb02..6c678cfb82e3 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -386,6 +386,7 @@ enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp);
/* hdcp version helpers */
static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index bb5130f4228d..f7b5583ee609 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -30,6 +30,8 @@
#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
#define HDCP_MAX_AUX_TRANSACTION_SIZE 16
+#define DP_CP_IRQ (1 << 2)
+
enum mod_hdcp_ddc_message_id {
MOD_HDCP_MESSAGE_ID_INVALID = -1,
@@ -645,3 +647,18 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp)
status = MOD_HDCP_STATUS_INVALID_OPERATION;
return status;
}
+
+enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp)
+{
+ uint8_t clear_cp_irq_bit = DP_CP_IRQ;
+ uint32_t size = 1;
+
+ if (is_dp_hdcp(hdcp)) {
+ uint32_t cp_irq_addrs = (hdcp->connection.link.dp.rev >= 0x14)
+ ? DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0:DP_DEVICE_SERVICE_IRQ_VECTOR;
+ return hdcp->config.ddc.funcs.write_dpcd(hdcp->config.ddc.handle, cp_irq_addrs,
+ &clear_cp_irq_bit, size) ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+ }
+
+ return MOD_HDCP_STATUS_INVALID_OPERATION;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 13c57ff2abdc..1ab813b4fd14 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -37,6 +37,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet);
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
+ struct dc_info_packet *info_packet);
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 7cd8a43d1889..0fdf7a3e96de 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -421,15 +421,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
*****************************************************************************
*/
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue)
+ struct dc_info_packet *info_packet)
{
unsigned int length = 5;
bool hdmi_vic_mode = false;
uint8_t checksum = 0;
uint32_t i = 0;
enum dc_timing_3d_format format;
- bool bALLM = (bool)ALLMEnabled;
- bool bALLMVal = (bool)ALLMValue;
info_packet->valid = false;
format = stream->timing.timing_3d_format;
@@ -442,20 +440,13 @@ void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
&& format == TIMING_3D_FORMAT_NONE)
hdmi_vic_mode = true;
- if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode && !bALLM)
+ if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode)
return;
info_packet->sb[1] = 0x03;
info_packet->sb[2] = 0x0C;
info_packet->sb[3] = 0x00;
- if (bALLM) {
- info_packet->sb[1] = 0xD8;
- info_packet->sb[2] = 0x5D;
- info_packet->sb[3] = 0xC4;
- info_packet->sb[4] = HF_VSIF_VERSION;
- }
-
if (format != TIMING_3D_FORMAT_NONE)
info_packet->sb[4] = (2 << 5);
@@ -490,9 +481,6 @@ void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
info_packet->hb1 = 0x01;
info_packet->hb2 = (uint8_t) (length);
- if (bALLM)
- info_packet->sb[5] = (info_packet->sb[5] & ~0x02) | (bALLMVal << 1);
-
checksum += info_packet->hb0;
checksum += info_packet->hb1;
checksum += info_packet->hb2;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index e98c84ef206f..06c1aabf10ce 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -45,8 +45,43 @@ enum amd_apu_flags {
AMD_APU_IS_RAVEN2 = 0x00000002UL,
AMD_APU_IS_PICASSO = 0x00000004UL,
AMD_APU_IS_RENOIR = 0x00000008UL,
+ AMD_APU_IS_GREEN_SARDINE = 0x00000010UL,
};
+/**
+* DOC: IP Blocks
+*
+* GPUs are composed of IP (intellectual property) blocks. These
+* IP blocks provide various functionalities: display, graphics,
+* video decode, etc. The IP blocks that comprise a particular GPU
+* are listed in the GPU's respective SoC file. amdgpu_device.c
+* acquires the list of IP blocks for the GPU in use on initialization.
+* It can then operate on this list to perform standard driver operations
+* such as: init, fini, suspend, resume, etc.
+*
+*
+* IP block implementations are named using the following convention:
+* <functionality>_v<version> (E.g.: gfx_v6_0).
+*/
+
+/**
+* enum amd_ip_block_type - Used to classify IP blocks by functionality.
+*
+* @AMD_IP_BLOCK_TYPE_COMMON: GPU Family
+* @AMD_IP_BLOCK_TYPE_GMC: Graphics Memory Controller
+* @AMD_IP_BLOCK_TYPE_IH: Interrupt Handler
+* @AMD_IP_BLOCK_TYPE_SMC: System Management Controller
+* @AMD_IP_BLOCK_TYPE_PSP: Platform Security Processor
+* @AMD_IP_BLOCK_TYPE_DCE: Display and Compositing Engine
+* @AMD_IP_BLOCK_TYPE_GFX: Graphics and Compute Engine
+* @AMD_IP_BLOCK_TYPE_SDMA: System DMA Engine
+* @AMD_IP_BLOCK_TYPE_UVD: Unified Video Decoder
+* @AMD_IP_BLOCK_TYPE_VCE: Video Compression Engine
+* @AMD_IP_BLOCK_TYPE_ACP: Audio Co-Processor
+* @AMD_IP_BLOCK_TYPE_VCN: Video Core/Codec Next
+* @AMD_IP_BLOCK_TYPE_MES: Micro-Engine Scheduler
+* @AMD_IP_BLOCK_TYPE_JPEG: JPEG Engine
+*/
enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_COMMON,
AMD_IP_BLOCK_TYPE_GMC,
@@ -128,6 +163,34 @@ enum amd_powergating_state {
#define AMD_PG_SUPPORT_ATHUB (1 << 16)
#define AMD_PG_SUPPORT_JPEG (1 << 17)
+/**
+ * enum PP_FEATURE_MASK - Used to mask power play features.
+ *
+ * @PP_SCLK_DPM_MASK: Dynamic adjustment of the system (graphics) clock.
+ * @PP_MCLK_DPM_MASK: Dynamic adjustment of the memory clock.
+ * @PP_PCIE_DPM_MASK: Dynamic adjustment of PCIE clocks and lanes.
+ * @PP_SCLK_DEEP_SLEEP_MASK: System (graphics) clock deep sleep.
+ * @PP_POWER_CONTAINMENT_MASK: Power containment.
+ * @PP_UVD_HANDSHAKE_MASK: Unified video decoder handshake.
+ * @PP_SMC_VOLTAGE_CONTROL_MASK: Dynamic voltage control.
+ * @PP_VBI_TIME_SUPPORT_MASK: Vertical blank interval support.
+ * @PP_ULV_MASK: Ultra low voltage.
+ * @PP_ENABLE_GFX_CG_THRU_SMU: SMU control of GFX engine clockgating.
+ * @PP_CLOCK_STRETCH_MASK: Clock stretching.
+ * @PP_OD_FUZZY_FAN_CONTROL_MASK: Overdrive fuzzy fan control.
+ * @PP_SOCCLK_DPM_MASK: Dynamic adjustment of the SoC clock.
+ * @PP_DCEFCLK_DPM_MASK: Dynamic adjustment of the Display Controller Engine Fabric clock.
+ * @PP_OVERDRIVE_MASK: Over- and under-clocking support.
+ * @PP_GFXOFF_MASK: Dynamic graphics engine power control.
+ * @PP_ACG_MASK: Adaptive clock generator.
+ * @PP_STUTTER_MODE: Stutter mode.
+ * @PP_AVFS_MASK: Adaptive voltage and frequency scaling.
+ *
+ * To override these settings on boot, append amdgpu.ppfeaturemask=<mask> to
+ * the kernel's command line parameters. This is usually done through a system's
+ * boot loader (E.g. GRUB). If manually loading the driver, pass
+ * ppfeaturemask=<mask> as a modprobe parameter.
+ */
enum PP_FEATURE_MASK {
PP_SCLK_DPM_MASK = 0x1,
PP_MCLK_DPM_MASK = 0x2,
@@ -165,56 +228,59 @@ enum DC_DEBUG_MASK {
};
enum amd_dpm_forced_level;
+
/**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
+ * @name: Name of IP block
+ * @early_init: sets up early driver state (pre sw_init),
+ * does not configure hw - Optional
+ * @late_init: sets up late driver/hw state (post hw_init) - Optional
+ * @sw_init: sets up driver state, does not configure hw
+ * @sw_fini: tears down driver state, does not configure hw
+ * @hw_init: sets up the hw state
+ * @hw_fini: tears down the hw state
+ * @late_fini: final cleanup
+ * @suspend: handles IP specific hw/sw changes for suspend
+ * @resume: handles IP specific hw/sw changes for resume
+ * @is_idle: returns current IP block idle status
+ * @wait_for_idle: poll for idle
+ * @check_soft_reset: check soft reset the IP block
+ * @pre_soft_reset: pre soft reset the IP block
+ * @soft_reset: soft reset the IP block
+ * @post_soft_reset: post soft reset the IP block
+ * @set_clockgating_state: enable/disable cg for the IP block
+ * @set_powergating_state: enable/disable pg for the IP block
+ * @get_clockgating_state: get current clockgating status
+ * @enable_umd_pstate: enable UMD powerstate
+ *
+ * These hooks provide an interface for controlling the operational state
+ * of IP blocks. After acquiring a list of IP blocks for the GPU in use,
+ * the driver can make chip-wide state changes by walking this list and
+ * making calls to hooks from each IP block. This list is ordered to ensure
+ * that the driver initializes the IP blocks in a safe sequence.
*/
struct amd_ip_funcs {
- /** @name: Name of IP block */
char *name;
- /**
- * @early_init:
- *
- * sets up early driver state (pre sw_init),
- * does not configure hw - Optional
- */
int (*early_init)(void *handle);
- /** @late_init: sets up late driver/hw state (post hw_init) - Optional */
int (*late_init)(void *handle);
- /** @sw_init: sets up driver state, does not configure hw */
int (*sw_init)(void *handle);
- /** @sw_fini: tears down driver state, does not configure hw */
int (*sw_fini)(void *handle);
- /** @hw_init: sets up the hw state */
int (*hw_init)(void *handle);
- /** @hw_fini: tears down the hw state */
int (*hw_fini)(void *handle);
- /** @late_fini: final cleanup */
void (*late_fini)(void *handle);
- /** @suspend: handles IP specific hw/sw changes for suspend */
int (*suspend)(void *handle);
- /** @resume: handles IP specific hw/sw changes for resume */
int (*resume)(void *handle);
- /** @is_idle: returns current IP block idle status */
bool (*is_idle)(void *handle);
- /** @wait_for_idle: poll for idle */
int (*wait_for_idle)(void *handle);
- /** @check_soft_reset: check soft reset the IP block */
bool (*check_soft_reset)(void *handle);
- /** @pre_soft_reset: pre soft reset the IP block */
int (*pre_soft_reset)(void *handle);
- /** @soft_reset: soft reset the IP block */
int (*soft_reset)(void *handle);
- /** @post_soft_reset: post soft reset the IP block */
int (*post_soft_reset)(void *handle);
- /** @set_clockgating_state: enable/disable cg for the IP block */
int (*set_clockgating_state)(void *handle,
enum amd_clockgating_state state);
- /** @set_powergating_state: enable/disable pg for the IP block */
int (*set_powergating_state)(void *handle,
enum amd_powergating_state state);
- /** @get_clockgating_state: get current clockgating status */
void (*get_clockgating_state)(void *handle, u32 *flags);
- /** @enable_umd_pstate: enable UMD powerstate */
int (*enable_umd_pstate)(void *handle, enum amd_dpm_forced_level *level);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
index 27bb8c1ab858..b6f74bf4af02 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
@@ -7376,8 +7376,6 @@
#define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e
#define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2
-#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d
-#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
// addressBlock: dce_dc_fmt4_dispdec
// base address: 0x2000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
index ae798f768853..9de01ae574c0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
@@ -4444,14 +4444,90 @@
/* Registers that spilled out of sid.h */
#define mmDATA_FORMAT 0x1AC0
+#define mmLB0_DATA_FORMAT 0x1AC0
+#define mmLB1_DATA_FORMAT 0x1DC0
+#define mmLB2_DATA_FORMAT 0x40C0
+#define mmLB3_DATA_FORMAT 0x43C0
+#define mmLB4_DATA_FORMAT 0x46C0
+#define mmLB5_DATA_FORMAT 0x49C0
#define mmDESKTOP_HEIGHT 0x1AC1
+#define mmLB0_DESKTOP_HEIGHT 0x1AC1
+#define mmLB1_DESKTOP_HEIGHT 0x1DC1
+#define mmLB2_DESKTOP_HEIGHT 0x40C1
+#define mmLB3_DESKTOP_HEIGHT 0x43C1
+#define mmLB4_DESKTOP_HEIGHT 0x46C1
+#define mmLB5_DESKTOP_HEIGHT 0x49C1
#define mmDC_LB_MEMORY_SPLIT 0x1AC3
+#define mmLB0_DC_LB_MEMORY_SPLIT 0x1AC3
+#define mmLB1_DC_LB_MEMORY_SPLIT 0x1DC3
+#define mmLB2_DC_LB_MEMORY_SPLIT 0x40C3
+#define mmLB3_DC_LB_MEMORY_SPLIT 0x43C3
+#define mmLB4_DC_LB_MEMORY_SPLIT 0x46C3
+#define mmLB5_DC_LB_MEMORY_SPLIT 0x49C3
+#define mmDC_LB_MEM_SIZE 0x1AC4
+#define mmLB0_DC_LB_MEM_SIZE 0x1AC4
+#define mmLB1_DC_LB_MEM_SIZE 0x1DC4
+#define mmLB2_DC_LB_MEM_SIZE 0x40C4
+#define mmLB3_DC_LB_MEM_SIZE 0x43C4
+#define mmLB4_DC_LB_MEM_SIZE 0x46C4
+#define mmLB5_DC_LB_MEM_SIZE 0x49C4
#define mmPRIORITY_A_CNT 0x1AC6
+#define mmLB0_PRIORITY_A_CNT 0x1AC6
+#define mmLB1_PRIORITY_A_CNT 0x1DC6
+#define mmLB2_PRIORITY_A_CNT 0x40C6
+#define mmLB3_PRIORITY_A_CNT 0x43C6
+#define mmLB4_PRIORITY_A_CNT 0x46C6
+#define mmLB5_PRIORITY_A_CNT 0x49C6
#define mmPRIORITY_B_CNT 0x1AC7
+#define mmLB0_PRIORITY_B_CNT 0x1AC7
+#define mmLB1_PRIORITY_B_CNT 0x1DC7
+#define mmLB2_PRIORITY_B_CNT 0x40C7
+#define mmLB3_PRIORITY_B_CNT 0x43C7
+#define mmLB4_PRIORITY_B_CNT 0x46C7
+#define mmLB5_PRIORITY_B_CNT 0x49C7
#define mmDPG_PIPE_ARBITRATION_CONTROL3 0x1B32
+#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL3 0x1B32
+#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL3 0x1E32
+#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL3 0x4132
+#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL3 0x4432
+#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL3 0x4732
+#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL3 0x4A32
#define mmINT_MASK 0x1AD0
+#define mmLB0_INT_MASK 0x1AD0
+#define mmLB1_INT_MASK 0x1DD0
+#define mmLB2_INT_MASK 0x40D0
+#define mmLB3_INT_MASK 0x43D0
+#define mmLB4_INT_MASK 0x46D0
+#define mmLB5_INT_MASK 0x49D0
#define mmVLINE_STATUS 0x1AEE
+#define mmLB0_VLINE_STATUS 0x1AEE
+#define mmLB1_VLINE_STATUS 0x1DEE
+#define mmLB2_VLINE_STATUS 0x40EE
+#define mmLB3_VLINE_STATUS 0x43EE
+#define mmLB4_VLINE_STATUS 0x46EE
+#define mmLB5_VLINE_STATUS 0x49EE
#define mmVBLANK_STATUS 0x1AEF
+#define mmLB0_VBLANK_STATUS 0x1AEF
+#define mmLB1_VBLANK_STATUS 0x1DEF
+#define mmLB2_VBLANK_STATUS 0x40EF
+#define mmLB3_VBLANK_STATUS 0x43EF
+#define mmLB4_VBLANK_STATUS 0x46EF
+#define mmLB5_VBLANK_STATUS 0x49EF
+#define mmSCL_HORZ_FILTER_INIT_RGB_LUMA 0x1B4C
+#define mmSCL0_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x1B4C
+#define mmSCL1_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x1E4C
+#define mmSCL2_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x414C
+#define mmSCL3_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x444C
+#define mmSCL4_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x474C
+#define mmSCL5_SCL_HORZ_FILTER_INIT_RGB_LUMA 0x4A4C
+
+#define mmSCL_HORZ_FILTER_INIT_CHROMA 0x1B4D
+#define mmSCL0_SCL_HORZ_FILTER_INIT_CHROMA 0x1B4D
+#define mmSCL1_SCL_HORZ_FILTER_INIT_CHROMA 0x1E4D
+#define mmSCL2_SCL_HORZ_FILTER_INIT_CHROMA 0x414D
+#define mmSCL3_SCL_HORZ_FILTER_INIT_CHROMA 0x444D
+#define mmSCL4_SCL_HORZ_FILTER_INIT_CHROMA 0x474D
+#define mmSCL5_SCL_HORZ_FILTER_INIT_CHROMA 0x4A4D
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
index abe05bc80752..41c4a46ce357 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
@@ -2076,6 +2076,8 @@
#define CRTC_CONTROL__CRTC_START_POINT_CNTL__SHIFT 0x0000000c
#define CRTC_CONTROL__CRTC_SYNC_RESET_SEL_MASK 0x00000010L
#define CRTC_CONTROL__CRTC_SYNC_RESET_SEL__SHIFT 0x00000004
+#define CRTC_CONTROL__CRTC_PREFETCH_EN_MASK 0x10000000L
+#define CRTC_CONTROL__CRTC_PREFETCH_EN__SHIFT 0x0000001c
#define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN_MASK 0x00000001L
#define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN__SHIFT 0x00000000
#define CRTC_COUNT_CONTROL__CRTC_HORZ_REPETITION_COUNT_MASK 0x0000001eL
@@ -6364,6 +6366,8 @@
#define DPG_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT__SHIFT 0x00000000
#define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT_MASK 0xffff0000L
#define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT__SHIFT 0x00000010
+#define DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK_MASK 0x00030000L
+#define DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT 0x00000010
#define DPG_PIPE_DPM_CONTROL__DPM_ENABLE_MASK 0x00000001L
#define DPG_PIPE_DPM_CONTROL__DPM_ENABLE__SHIFT 0x00000000
#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE_MASK 0x00000010L
@@ -6384,6 +6388,8 @@
#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST__SHIFT 0x00000008
#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x00000010L
#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x00000004
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x00003000L
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x0000000c
#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK 0xffff0000L
#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK__SHIFT 0x00000010
#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH_MASK 0x00000001L
@@ -6406,6 +6412,8 @@
#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH__SHIFT 0x00000008
#define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK 0x00000001L
#define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE__SHIFT 0x00000000
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK_MASK 0x00003000L
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK__SHIFT 0x0000000c
#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK 0xffff0000L
#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK__SHIFT 0x00000010
#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR_MASK 0x00000010L
@@ -7256,6 +7264,8 @@
#define GRPH_CONTROL__GRPH_FORMAT__SHIFT 0x00000008
#define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT_MASK 0x000c0000L
#define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT 0x00000012
+#define GRPH_CONTROL__GRPH_ARRAY_MODE_MASK 0x00f00000L
+#define GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT 0x00000014
#define GRPH_CONTROL__GRPH_NUM_BANKS_MASK 0x0000000cL
#define GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT 0x00000002
#define GRPH_CONTROL__GRPH_PIPE_CONFIG_MASK 0x1f000000L
@@ -9835,4 +9845,98 @@
#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN__SHIFT 0x00000008
+// DATA_FORMAT
+#define DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x00000000
+#define DATA_FORMAT__RESET_REQ_AT_EOL_MASK 0x00000010L
+#define DATA_FORMAT__RESET_REQ_AT_EOL__SHIFT 0x00000004
+#define DATA_FORMAT__PREFETCH_MASK 0x00001000L
+#define DATA_FORMAT__PREFETCH__SHIFT 0x0000000c
+#define DATA_FORMAT__SOF_READ_PT_MASK 0x001f0000L
+#define DATA_FORMAT__SOF_READ_PT__SHIFT 0x00000010
+#define DATA_FORMAT__REQUEST_MODE_MASK 0x03000000L
+#define DATA_FORMAT__REQUEST_MODE__SHIFT 0x00000018
+#define DATA_FORMAT__ALLOW_REQ_MODE_1_2_MASK 0x10000000L
+#define DATA_FORMAT__ALLOW_REQ_MODE_1_2__SHIFT 0x0000001c
+
+
+// DC_LB_MEMORY_SPLIT
+#define DC_LB_MEMORY_SPLIT__LB_NUM_PARTITIONS_MASK 0x000f0000L
+#define DC_LB_MEMORY_SPLIT__LB_NUM_PARTITIONS__SHIFT 0x00000010
+#define DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG_MASK 0x00300000L
+#define DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT 0x00000014
+
+// DC_LB_MEM_SIZE
+#define DC_LB_MEM_SIZE__DC_LB_MEM_SIZE_MASK 0x000007ffL
+#define DC_LB_MEM_SIZE__DC_LB_MEM_SIZE__SHIFT 0x00000000
+
+// SCL_TAP_CONTROL
+#define SCL_TAP_CONTROL__SCL_V_NUM_OF_TAPS_MASK 0x00000007L
+#define SCL_TAP_CONTROL__SCL_V_NUM_OF_TAPS__SHIFT 0x00000000
+#define SCL_TAP_CONTROL__SCL_H_NUM_OF_TAPS_MASK 0x00000f00L
+#define SCL_TAP_CONTROL__SCL_H_NUM_OF_TAPS__SHIFT 0x00000008
+
+// INT_MASK
+#define INT_MASK__VBLANK_INT_MASK 0x00000001L
+#define INT_MASK__VBLANK_INT__SHIFT 0x00000000
+#define INT_MASK__VLINE_INT_MASK 0x00000010L
+#define INT_MASK__VLINE_INT__SHIFT 0x00000004
+
+// PRIORITY_A_CNT
+#define PRIORITY_A_CNT__PRIORITY_MARK_A_MASK 0x00007fffL
+#define PRIORITY_A_CNT__PRIORITY_MARK_A__SHIFT 0x00000000
+#define PRIORITY_A_CNT__PRIORITY_A_OFF_MASK 0x00010000L
+#define PRIORITY_A_CNT__PRIORITY_A_OFF__SHIFT 0x00000010
+#define PRIORITY_A_CNT__PRIORITY_A_ALWAYS_ON_MASK 0x00100000L
+#define PRIORITY_A_CNT__PRIORITY_A_ALWAYS_ON__SHIFT 0x00000014
+#define PRIORITY_A_CNT__PRIORITY_A_FORCE_MASK_MASK 0x01000000L
+#define PRIORITY_A_CNT__PRIORITY_A_FORCE_MASK__SHIFT 0x00000018
+
+// PRIORITY_B_CNT
+#define PRIORITY_B_CNT__PRIORITY_MARK_B_MASK 0x00007fffL
+#define PRIORITY_B_CNT__PRIORITY_MARK_B__SHIFT 0x00000000
+#define PRIORITY_B_CNT__PRIORITY_B_OFF_MASK 0x00010000L
+#define PRIORITY_B_CNT__PRIORITY_B_OFF__SHIFT 0x00000010
+#define PRIORITY_B_CNT__PRIORITY_B_ALWAYS_ON_MASK 0x00100000L
+#define PRIORITY_B_CNT__PRIORITY_B_ALWAYS_ON__SHIFT 0x00000014
+#define PRIORITY_B_CNT__PRIORITY_B_FORCE_MASK_MASK 0x01000000L
+#define PRIORITY_B_CNT__PRIORITY_B_FORCE_MASK__SHIFT 0x00000018
+
+// VLINE_STATUS
+#define VLINE_STATUS__VLINE_OCCURRED_MASK 0x00000001L
+#define VLINE_STATUS__VLINE_OCCURRED__SHIFT 0x00000000
+#define VLINE_STATUS__VLINE_ACK_MASK 0x00000010L
+#define VLINE_STATUS__VLINE_ACK__SHIFT 0x00000004
+#define VLINE_STATUS__VLINE_STAT_MASK 0x00001000L
+#define VLINE_STATUS__VLINE_STAT__SHIFT 0x0000000c
+#define VLINE_STATUS__VLINE_INTERRUPT_MASK 0x00010000L
+#define VLINE_STATUS__VLINE_INTERRUPT__SHIFT 0x00000010
+#define VLINE_STATUS__VLINE_INTERRUPT_TYPE_MASK 0x00020000L
+#define VLINE_STATUS__VLINE_INTERRUPT_TYPE__SHIFT 0x00000011
+
+// VBLANK_STATUS
+#define VBLANK_STATUS__VBLANK_OCCURRED_MASK 0x00000001L
+#define VBLANK_STATUS__VBLANK_OCCURRED__SHIFT 0x00000000
+#define VBLANK_STATUS__VBLANK_ACK_MASK 0x00000010L
+#define VBLANK_STATUS__VBLANK_ACK__SHIFT 0x00000004
+#define VBLANK_STATUS__VBLANK_STAT_MASK 0x00001000L
+#define VBLANK_STATUS__VBLANK_STAT__SHIFT 0x0000000c
+#define VBLANK_STATUS__VBLANK_INTERRUPT_MASK 0x00010000L
+#define VBLANK_STATUS__VBLANK_INTERRUPT__SHIFT 0x00000010
+#define VBLANK_STATUS__VBLANK_INTERRUPT_TYPE_MASK 0x00020000L
+#define VBLANK_STATUS__VBLANK_INTERRUPT_TYPE__SHIFT 0x00000011
+
+// SCL_HORZ_FILTER_INIT_RGB_LUMA
+#define SCL_HORZ_FILTER_INIT_RGB_LUMA__SCL_H_INIT_FRAC_RGB_Y_MASK 0x0000ffffL
+#define SCL_HORZ_FILTER_INIT_RGB_LUMA__SCL_H_INIT_FRAC_RGB_Y__SHIFT 0x00000000
+#define SCL_HORZ_FILTER_INIT_RGB_LUMA__SCL_H_INIT_INT_RGB_Y_MASK 0x000f0000L
+#define SCL_HORZ_FILTER_INIT_RGB_LUMA__SCL_H_INIT_INT_RGB_Y__SHIFT 0x00000010
+
+// SCL_HORZ_FILTER_INIT_CHROMA
+#define SCL_HORZ_FILTER_INIT_CHROMA__SCL_H_INIT_FRAC_CBCR_MASK 0x0000ffffL
+#define SCL_HORZ_FILTER_INIT_CHROMA__SCL_H_INIT_FRAC_CBCR__SHIFT 0x00000000
+#define SCL_HORZ_FILTER_INIT_CHROMA__SCL_H_INIT_INT_CBCR_MASK 0x00070000L
+#define SCL_HORZ_FILTER_INIT_CHROMA__SCL_H_INIT_INT_CBCR__SHIFT 0x00000010
+
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
index cf166b591bc5..cf166b591bc5 100755..100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
index 0e0319e98c07..ea683f452bb3 100755..100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h
@@ -50271,6 +50271,10 @@
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
#define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP0_DSC_DEBUG_CONTROL
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+
// addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec
//DSCCIF0_DSCCIF_CONFIG0
@@ -50789,6 +50793,9 @@
#define DSC_TOP1_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
#define DSC_TOP1_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP1_DSC_DEBUG_CONTROL
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP1_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
// addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec
@@ -51308,6 +51315,10 @@
#define DSC_TOP2_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
#define DSC_TOP2_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP2_DSC_DEBUG_CONTROL
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP2_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+
// addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec
//DSCCIF2_DSCCIF_CONFIG0
@@ -51826,6 +51837,9 @@
#define DSC_TOP3_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
#define DSC_TOP3_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP3_DSC_DEBUG_CONTROL
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP3_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
// addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec
@@ -52346,6 +52360,10 @@
#define DSC_TOP4_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
#define DSC_TOP4_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
#define DSC_TOP4_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP4_DSC_DEBUG_CONTROL
+#define DSC_TOP4_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP4_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+
// addressBlock: dce_dc_dsc4_dispdec_dsccif_dispdec
//DSCCIF4_DSCCIF_CONFIG0
@@ -52864,6 +52882,10 @@
#define DSC_TOP5_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L
#define DSC_TOP5_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L
#define DSC_TOP5_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L
+//DSC_TOP5_DSC_DEBUG_CONTROL
+#define DSC_TOP5_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0
+#define DSC_TOP5_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L
+
// addressBlock: dce_dc_dsc5_dispdec_dsccif_dispdec
//DSCCIF5_DSCCIF_CONFIG0
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_offset.h
index 67faaf68e9d7..67faaf68e9d7 100755..100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_offset.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_sh_mask.h
index b4ef50a72868..b4ef50a72868 100755..100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_sh_mask.h
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
index 644a9fa71bb2..66a4151fa676 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
@@ -9184,6 +9184,8 @@
#define mmRLC_GPM_THREAD_ENABLE_BASE_IDX 1
#define mmRLC_RLCG_DOORBELL_RANGE 0x4c47
#define mmRLC_RLCG_DOORBELL_RANGE_BASE_IDX 1
+#define mmRLC_CGTT_MGCG_OVERRIDE 0x4c48
+#define mmRLC_CGTT_MGCG_OVERRIDE_BASE_IDX 1
#define mmRLC_CGCG_CGLS_CTRL 0x4c49
#define mmRLC_CGCG_CGLS_CTRL_BASE_IDX 1
#define mmRLC_CGCG_RAMP_CTRL 0x4c4a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
index 2e449fcff893..aed799d9a0e8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
@@ -32365,6 +32365,31 @@
#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+//RLC_CGTT_MGCG_OVERRIDE
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_0__SHIFT 0x0
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE__SHIFT 0x2
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE__SHIFT 0x3
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE__SHIFT 0x4
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE__SHIFT 0x5
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE__SHIFT 0x6
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE__SHIFT 0x7
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE__SHIFT 0x8
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_15_9__SHIFT 0x9
+#define RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY__SHIFT 0x10
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_17__SHIFT 0x11
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_0_MASK 0x00000001L
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK 0x00000002L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK 0x00000004L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK 0x00000008L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK 0x00000010L
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK 0x00000020L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK 0x00000040L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK 0x00000080L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK 0x00000100L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_15_9_MASK 0x0000FE00L
+#define RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK 0x00010000L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_17_MASK 0xFFFE0000L
//RLC_RLCG_DOORBELL_STAT
#define RLC_RLCG_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
#define RLC_RLCG_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
index f41556abfbbc..629a8a3b55e9 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
@@ -205,6 +205,8 @@
#define mmGCEA_EDC_CNT2_BASE_IDX 0
#define mmGCEA_EDC_CNT3 0x071b
#define mmGCEA_EDC_CNT3_BASE_IDX 0
+#define mmGCEA_ERR_STATUS 0x0712
+#define mmGCEA_ERR_STATUS_BASE_IDX 0
// addressBlock: gc_gfxudec
// base address: 0x30000
@@ -261,4 +263,4 @@
#define mmRLC_EDC_CNT2 0x4d41
#define mmRLC_EDC_CNT2_BASE_IDX 1
-#endif \ No newline at end of file
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_offset.h
new file mode 100644
index 000000000000..3685766c4d56
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_offset.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_8_7_0_OFFSET_HEADER
+#define _umc_8_7_0_OFFSET_HEADER
+
+#define mmUMCCH0_0_GeccErrCntSel 0x0328
+#define mmUMCCH0_0_GeccErrCntSel_BASE_IDX 0
+#define mmUMCCH0_0_GeccErrCnt 0x0329
+#define mmUMCCH0_0_GeccErrCnt_BASE_IDX 0
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0 0x03c2
+#define mmMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 0
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0 0x03c4
+#define mmMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_sh_mask.h
new file mode 100644
index 000000000000..4c5097fa0c09
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_sh_mask.h
@@ -0,0 +1,79 @@
+#ifndef _umc_8_7_0_SH_MASK_HEADER
+#define _umc_8_7_0_SH_MASK_HEADER
+
+//UMCCH0_0_GeccErrCntSel
+#define UMCCH0_0_GeccErrCntSel__GeccErrCntCsSel__SHIFT 0x0
+#define UMCCH0_0_GeccErrCntSel__GeccErrInt__SHIFT 0xc
+#define UMCCH0_0_GeccErrCntSel__GeccErrCntEn__SHIFT 0xf
+#define UMCCH0_0_GeccErrCntSel__PoisonCntEn__SHIFT 0x10
+#define UMCCH0_0_GeccErrCntSel__GeccErrCntCsSel_MASK 0x0000000FL
+#define UMCCH0_0_GeccErrCntSel__GeccErrInt_MASK 0x00003000L
+#define UMCCH0_0_GeccErrCntSel__GeccErrCntEn_MASK 0x00008000L
+#define UMCCH0_0_GeccErrCntSel__PoisonCntEn_MASK 0x00030000L
+//UMCCH0_0_GeccErrCnt
+#define UMCCH0_0_GeccErrCnt__GeccErrCnt__SHIFT 0x0
+#define UMCCH0_0_GeccErrCnt__GeccUnCorrErrCnt__SHIFT 0x10
+#define UMCCH0_0_GeccErrCnt__GeccErrCnt_MASK 0x0000FFFFL
+#define UMCCH0_0_GeccErrCnt__GeccUnCorrErrCnt_MASK 0xFFFF0000L
+//MCA_UMC_UMC0_MCUMC_STATUST0
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCode__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCodeExt__SHIFT 0x10
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV22__SHIFT 0x16
+#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrLsb__SHIFT 0x18
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV30__SHIFT 0x1e
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreId__SHIFT 0x20
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV38__SHIFT 0x26
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Scrub__SHIFT 0x28
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV41__SHIFT 0x29
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Poison__SHIFT 0x2b
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Deferred__SHIFT 0x2c
+#define MCA_UMC_UMC0_MCUMC_STATUST0__UECC__SHIFT 0x2d
+#define MCA_UMC_UMC0_MCUMC_STATUST0__CECC__SHIFT 0x2e
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV47__SHIFT 0x2f
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Transparent__SHIFT 0x34
+#define MCA_UMC_UMC0_MCUMC_STATUST0__SyndV__SHIFT 0x35
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV54__SHIFT 0x36
+#define MCA_UMC_UMC0_MCUMC_STATUST0__TCC__SHIFT 0x37
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreIdVal__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_STATUST0__PCC__SHIFT 0x39
+#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrV__SHIFT 0x3a
+#define MCA_UMC_UMC0_MCUMC_STATUST0__MiscV__SHIFT 0x3b
+#define MCA_UMC_UMC0_MCUMC_STATUST0__En__SHIFT 0x3c
+#define MCA_UMC_UMC0_MCUMC_STATUST0__UC__SHIFT 0x3d
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Overflow__SHIFT 0x3e
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Val__SHIFT 0x3f
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCode_MASK 0x000000000000FFFFL
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCodeExt_MASK 0x00000000003F0000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV22_MASK 0x0000000000C00000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrLsb_MASK 0x000000003F000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV30_MASK 0x00000000C0000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreId_MASK 0x0000003F00000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV38_MASK 0x000000C000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Scrub_MASK 0x0000010000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV41_MASK 0x0000060000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Poison_MASK 0x0000080000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Deferred_MASK 0x0000100000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__UECC_MASK 0x0000200000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__CECC_MASK 0x0000400000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV47_MASK 0x000F800000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Transparent_MASK 0x0010000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__SyndV_MASK 0x0020000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV54_MASK 0x0040000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__TCC_MASK 0x0080000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreIdVal_MASK 0x0100000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__PCC_MASK 0x0200000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrV_MASK 0x0400000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__MiscV_MASK 0x0800000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__En_MASK 0x1000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__UC_MASK 0x2000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Overflow_MASK 0x4000000000000000L
+#define MCA_UMC_UMC0_MCUMC_STATUST0__Val_MASK 0x8000000000000000L
+//MCA_UMC_UMC0_MCUMC_ADDRT0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__LSB__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x3e
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__LSB_MASK 0x3F00000000000000L
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved_MASK 0xC000000000000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_offset.h
index 07aceffb108a..524ba4421c17 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_offset.h
@@ -151,6 +151,8 @@
#define mmUVD_LMI_CTRL2_BASE_IDX 1
#define mmUVD_MASTINT_EN 0x0540
#define mmUVD_MASTINT_EN_BASE_IDX 1
+#define mmUVD_FW_STATUS 0x0557
+#define mmUVD_FW_STATUS_BASE_IDX 1
#define mmJPEG_CGC_CTRL 0x0565
#define mmJPEG_CGC_CTRL_BASE_IDX 1
#define mmUVD_LMI_CTRL 0x0566
@@ -219,4 +221,5 @@
#define mmUVD_CONTEXT_ID2_BASE_IDX 1
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_sh_mask.h
index b427f73bd536..919be1842bd5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_sh_mask.h
@@ -807,5 +807,25 @@
#define UVD_CONTEXT_ID2__CONTEXT_ID2__SHIFT 0x0
#define UVD_CONTEXT_ID2__CONTEXT_ID2_MASK 0xFFFFFFFFL
+//UVD_FW_STATUS
+#define UVD_FW_STATUS__BUSY__SHIFT 0x0
+#define UVD_FW_STATUS__ACTIVE__SHIFT 0x1
+#define UVD_FW_STATUS__SEND_EFUSE_REQ__SHIFT 0x2
+#define UVD_FW_STATUS__DONE__SHIFT 0x8
+#define UVD_FW_STATUS__PASS__SHIFT 0x10
+#define UVD_FW_STATUS__FAIL__SHIFT 0x11
+#define UVD_FW_STATUS__INVALID_LEN__SHIFT 0x12
+#define UVD_FW_STATUS__INVALID_0_PADDING__SHIFT 0x13
+#define UVD_FW_STATUS__INVALID_NONCE__SHIFT 0x14
+#define UVD_FW_STATUS__BUSY_MASK 0x00000001L
+#define UVD_FW_STATUS__ACTIVE_MASK 0x00000002L
+#define UVD_FW_STATUS__SEND_EFUSE_REQ_MASK 0x00000004L
+#define UVD_FW_STATUS__DONE_MASK 0x00000100L
+#define UVD_FW_STATUS__PASS_MASK 0x00010000L
+#define UVD_FW_STATUS__FAIL_MASK 0x00020000L
+#define UVD_FW_STATUS__INVALID_LEN_MASK 0x00040000L
+#define UVD_FW_STATUS__INVALID_0_PADDING_MASK 0x00080000L
+#define UVD_FW_STATUS__INVALID_NONCE_MASK 0x00100000L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index a3c238c39ef5..95c656d205ed 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -212,10 +212,15 @@ struct tile_config {
* IH ring entry. This function allows the KFD ISR to get the VMID
* from the fault status register as early as possible.
*
- * @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled
+ * @get_cu_occupancy: Function pointer that returns to caller the number
+ * of wave fronts that are in flight for all of the queues of a process
+ * as identified by its pasid. It is important to note that the value
+ * returned by this function is a snapshot of current moment and cannot
+ * guarantee any minimum for the number of waves in-flight. This function
+ * is defined for devices that belong to GFX9 and later GFX families. Care
+ * must be taken in calling this function as it is not defined for devices
+ * that belong to GFX8 and below GFX families.
*
- * @get_unique_id: Returns uuid id of current device
- *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -226,7 +231,7 @@ struct kfd2kgd_calls {
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
- int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
+ int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, u32 pasid,
unsigned int vmid);
int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
@@ -290,9 +295,9 @@ struct kfd2kgd_calls {
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
- uint64_t (*get_hive_id)(struct kgd_dev *kgd);
- uint64_t (*get_unique_id)(struct kgd_dev *kgd);
+ void (*get_cu_occupancy)(struct kgd_dev *kgd, int pasid, int *wave_cnt,
+ int *max_waves_per_cu);
};
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index a7f92d0b3a90..94132c70d7af 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -281,6 +281,7 @@ struct amd_pm_funcs {
int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
int (*get_power_profile_mode)(void *handle, char *buf);
int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+ int (*set_fine_grain_clk_vol)(void *handle, uint32_t type, long *input, uint32_t size);
int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state);
int (*smu_i2c_bus_access)(void *handle, bool acquire);
@@ -322,6 +323,115 @@ struct amd_pm_funcs {
int (*asic_reset_mode_2)(void *handle);
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
+ ssize_t (*get_gpu_metrics)(void *handle, void **table);
+};
+
+struct metrics_table_header {
+ uint16_t structure_size;
+ uint8_t format_revision;
+ uint8_t content_revision;
+};
+
+struct gpu_metrics_v1_0 {
+ struct metrics_table_header common_header;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Temperature */
+ uint16_t temperature_edge;
+ uint16_t temperature_hotspot;
+ uint16_t temperature_mem;
+ uint16_t temperature_vrgfx;
+ uint16_t temperature_vrsoc;
+ uint16_t temperature_vrmem;
+
+ /* Utilization */
+ uint16_t average_gfx_activity;
+ uint16_t average_umc_activity; // memory controller
+ uint16_t average_mm_activity; // UVD or VCN
+
+ /* Power/Energy */
+ uint16_t average_socket_power;
+ uint32_t energy_accumulator;
+
+ /* Average clocks */
+ uint16_t average_gfxclk_frequency;
+ uint16_t average_socclk_frequency;
+ uint16_t average_uclk_frequency;
+ uint16_t average_vclk0_frequency;
+ uint16_t average_dclk0_frequency;
+ uint16_t average_vclk1_frequency;
+ uint16_t average_dclk1_frequency;
+
+ /* Current clocks */
+ uint16_t current_gfxclk;
+ uint16_t current_socclk;
+ uint16_t current_uclk;
+ uint16_t current_vclk0;
+ uint16_t current_dclk0;
+ uint16_t current_vclk1;
+ uint16_t current_dclk1;
+
+ /* Throttle status */
+ uint32_t throttle_status;
+
+ /* Fans */
+ uint16_t current_fan_speed;
+
+ /* Link width/speed */
+ uint8_t pcie_link_width;
+ uint8_t pcie_link_speed; // in 0.1 GT/s
+};
+
+struct gpu_metrics_v2_0 {
+ struct metrics_table_header common_header;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Temperature */
+ uint16_t temperature_gfx; // gfx temperature on APUs
+ uint16_t temperature_soc; // soc temperature on APUs
+ uint16_t temperature_core[8]; // CPU core temperature on APUs
+ uint16_t temperature_l3[2];
+
+ /* Utilization */
+ uint16_t average_gfx_activity;
+ uint16_t average_mm_activity; // UVD or VCN
+
+ /* Power/Energy */
+ uint16_t average_socket_power; // dGPU + APU power on A + A platform
+ uint16_t average_cpu_power;
+ uint16_t average_soc_power;
+ uint16_t average_gfx_power;
+ uint16_t average_core_power[8]; // CPU core power on APUs
+
+ /* Average clocks */
+ uint16_t average_gfxclk_frequency;
+ uint16_t average_socclk_frequency;
+ uint16_t average_uclk_frequency;
+ uint16_t average_fclk_frequency;
+ uint16_t average_vclk_frequency;
+ uint16_t average_dclk_frequency;
+
+ /* Current clocks */
+ uint16_t current_gfxclk;
+ uint16_t current_socclk;
+ uint16_t current_uclk;
+ uint16_t current_fclk;
+ uint16_t current_vclk;
+ uint16_t current_dclk;
+ uint16_t current_coreclk[8]; // CPU core clocks
+ uint16_t current_l3clk[2];
+
+ /* Throttle status */
+ uint32_t throttle_status;
+
+ /* Fans */
+ uint16_t fan_pwm;
+
+ uint16_t padding;
};
#endif
diff --git a/drivers/gpu/drm/amd/pm/Makefile b/drivers/gpu/drm/amd/pm/Makefile
new file mode 100644
index 000000000000..f01e86030cd1
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/Makefile
@@ -0,0 +1,46 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+
+subdir-ccflags-y += \
+ -I$(FULL_AMD_PATH)/pm/inc/ \
+ -I$(FULL_AMD_PATH)/include/asic_reg \
+ -I$(FULL_AMD_PATH)/include \
+ -I$(FULL_AMD_PATH)/pm/swsmu \
+ -I$(FULL_AMD_PATH)/pm/swsmu/smu11 \
+ -I$(FULL_AMD_PATH)/pm/swsmu/smu12 \
+ -I$(FULL_AMD_PATH)/pm/powerplay \
+ -I$(FULL_AMD_PATH)/pm/powerplay/smumgr\
+ -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr
+
+AMD_PM_PATH = ../pm
+
+PM_LIBS = swsmu powerplay
+
+AMD_PM = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/,$(PM_LIBS)))
+
+include $(AMD_PM)
+
+PM_MGR = amdgpu_dpm.o amdgpu_pm.o
+
+AMD_PM_POWER = $(addprefix $(AMD_PM_PATH)/,$(PM_MGR))
+
+AMD_POWERPLAY_FILES += $(AMD_PM_POWER)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 2082c0acd216..17a45baff638 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -28,6 +28,11 @@
#include "amdgpu_dpm.h"
#include "atom.h"
#include "amd_pcie.h"
+#include "amdgpu_display.h"
+#include "hwmgr.h"
+#include <linux/power_supply.h>
+
+#define WIDTH_4K 3840
void amdgpu_dpm_print_class_info(u32 class, u32 class2)
{
@@ -117,7 +122,7 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
{
- struct drm_device *ddev = adev->ddev;
+ struct drm_device *ddev = adev_to_drm(adev);
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
@@ -138,7 +143,7 @@ void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
u32 vblank_in_pixels;
@@ -165,7 +170,7 @@ u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
u32 vrefresh = 0;
@@ -1110,8 +1115,6 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
int ret = 0;
- dev_info(adev->dev, "GPU BACO reset\n");
-
if (is_support_sw_smu(adev)) {
ret = smu_baco_enter(smu);
if (ret)
@@ -1216,3 +1219,469 @@ int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
return 0;
}
+
+int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs =
+ adev->powerplay.pp_funcs;
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_enable_mgpu_fan_boost(smu);
+ else if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
+ ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
+ uint32_t msg_id)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs =
+ adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (pp_funcs && pp_funcs->set_clockgating_by_smu)
+ ret = pp_funcs->set_clockgating_by_smu(pp_handle,
+ msg_id);
+
+ return ret;
+}
+
+int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
+ bool acquire)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs =
+ adev->powerplay.pp_funcs;
+ int ret = -EOPNOTSUPP;
+
+ if (pp_funcs && pp_funcs->smu_i2c_bus_access)
+ ret = pp_funcs->smu_i2c_bus_access(pp_handle,
+ acquire);
+
+ return ret;
+}
+
+void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
+{
+ if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ if (power_supply_is_system_supplied() > 0)
+ adev->pm.ac_power = true;
+ else
+ adev->pm.ac_power = false;
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->enable_bapm)
+ amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
+ mutex_unlock(&adev->pm.mutex);
+
+ if (is_support_sw_smu(adev))
+ smu_set_ac_dc(&adev->smu);
+ }
+}
+
+int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_read_sensor(&adev->smu, sensor, data, size);
+ else {
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
+ sensor, data, size);
+ else
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device,
+ pm.dpm.thermal.work);
+ /* switch to the thermal state */
+ enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
+ int temp, size = sizeof(temp);
+
+ if (!adev->pm.dpm_enabled)
+ return;
+
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
+ (void *)&temp, &size)) {
+ if (temp < adev->pm.dpm.thermal.min_temp)
+ /* switch back the user state */
+ dpm_state = adev->pm.dpm.user_state;
+ } else {
+ if (adev->pm.dpm.thermal.high_to_low)
+ /* switch back the user state */
+ dpm_state = adev->pm.dpm.user_state;
+ }
+ mutex_lock(&adev->pm.mutex);
+ if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
+ adev->pm.dpm.thermal_active = true;
+ else
+ adev->pm.dpm.thermal_active = false;
+ adev->pm.dpm.state = dpm_state;
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_pm_compute_clocks(adev);
+}
+
+static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
+ enum amd_pm_state_type dpm_state)
+{
+ int i;
+ struct amdgpu_ps *ps;
+ u32 ui_class;
+ bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
+ true : false;
+
+ /* check if the vblank period is too short to adjust the mclk */
+ if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
+ if (amdgpu_dpm_vblank_too_short(adev))
+ single_display = false;
+ }
+
+ /* certain older asics have a separare 3D performance state,
+ * so try that first if the user selected performance
+ */
+ if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
+ dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
+ /* balanced states don't exist at the moment */
+ if (dpm_state == POWER_STATE_TYPE_BALANCED)
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+
+restart_search:
+ /* Pick the best power state based on current conditions */
+ for (i = 0; i < adev->pm.dpm.num_ps; i++) {
+ ps = &adev->pm.dpm.ps[i];
+ ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
+ switch (dpm_state) {
+ /* user states */
+ case POWER_STATE_TYPE_BATTERY:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ case POWER_STATE_TYPE_BALANCED:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ case POWER_STATE_TYPE_PERFORMANCE:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ /* internal states */
+ case POWER_STATE_TYPE_INTERNAL_UVD:
+ if (adev->pm.dpm.uvd_ps)
+ return adev->pm.dpm.uvd_ps;
+ else
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+ if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_BOOT:
+ return adev->pm.dpm.boot_ps;
+ case POWER_STATE_TYPE_INTERNAL_THERMAL:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_ACPI:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_ULV:
+ if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_3DPERF:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ return ps;
+ break;
+ default:
+ break;
+ }
+ }
+ /* use a fallback state if we didn't match */
+ switch (dpm_state) {
+ case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ goto restart_search;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+ case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+ if (adev->pm.dpm.uvd_ps) {
+ return adev->pm.dpm.uvd_ps;
+ } else {
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ goto restart_search;
+ }
+ case POWER_STATE_TYPE_INTERNAL_THERMAL:
+ dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
+ goto restart_search;
+ case POWER_STATE_TYPE_INTERNAL_ACPI:
+ dpm_state = POWER_STATE_TYPE_BATTERY;
+ goto restart_search;
+ case POWER_STATE_TYPE_BATTERY:
+ case POWER_STATE_TYPE_BALANCED:
+ case POWER_STATE_TYPE_INTERNAL_3DPERF:
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ goto restart_search;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
+{
+ struct amdgpu_ps *ps;
+ enum amd_pm_state_type dpm_state;
+ int ret;
+ bool equal = false;
+
+ /* if dpm init failed */
+ if (!adev->pm.dpm_enabled)
+ return;
+
+ if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
+ /* add other state override checks here */
+ if ((!adev->pm.dpm.thermal_active) &&
+ (!adev->pm.dpm.uvd_active))
+ adev->pm.dpm.state = adev->pm.dpm.user_state;
+ }
+ dpm_state = adev->pm.dpm.state;
+
+ ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
+ if (ps)
+ adev->pm.dpm.requested_ps = ps;
+ else
+ return;
+
+ if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
+ printk("switching from power state:\n");
+ amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
+ printk("switching to power state:\n");
+ amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
+ }
+
+ /* update whether vce is active */
+ ps->vce_active = adev->pm.dpm.vce_active;
+ if (adev->powerplay.pp_funcs->display_configuration_changed)
+ amdgpu_dpm_display_configuration_changed(adev);
+
+ ret = amdgpu_dpm_pre_set_power_state(adev);
+ if (ret)
+ return;
+
+ if (adev->powerplay.pp_funcs->check_state_equal) {
+ if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
+ equal = false;
+ }
+
+ if (equal)
+ return;
+
+ amdgpu_dpm_set_power_state(adev);
+ amdgpu_dpm_post_set_power_state(adev);
+
+ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
+ if (adev->powerplay.pp_funcs->force_performance_level) {
+ if (adev->pm.dpm.thermal_active) {
+ enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
+ /* force low perf level for thermal */
+ amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
+ /* save the user's level */
+ adev->pm.dpm.forced_level = level;
+ } else {
+ /* otherwise, user selected level */
+ amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
+ }
+ }
+}
+
+void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+{
+ int i = 0;
+
+ if (!adev->pm.dpm_enabled)
+ return;
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_bandwidth_update(adev);
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->sched.ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+
+ if (is_support_sw_smu(adev)) {
+ struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
+ smu_handle_task(&adev->smu,
+ smu_dpm->dpm_level,
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ true);
+ } else {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_get_active_displays(adev);
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+ adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+ adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
+ if (adev->pm.pm_display_cfg.vrefresh > 120)
+ adev->pm.pm_display_cfg.min_vblank_time = 0;
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+ mutex_unlock(&adev->pm.mutex);
+ }
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
+ } else {
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_get_active_displays(adev);
+ amdgpu_dpm_change_power_state_locked(adev);
+ mutex_unlock(&adev->pm.mutex);
+ }
+ }
+}
+
+void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.uvd_active = true;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
+ } else {
+ adev->pm.dpm.uvd_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_pm_compute_clocks(adev);
+ } else {
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+
+ /* enable/disable Low Memory PState for UVD (4k videos) */
+ if (adev->asic_type == CHIP_STONEY &&
+ adev->uvd.decode_image_width >= WIDTH_4K) {
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (hwmgr && hwmgr->hwmgr_func &&
+ hwmgr->hwmgr_func->update_nbdpm_pstate)
+ hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
+ !enable,
+ true);
+ }
+ }
+}
+
+void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
+ } else {
+ adev->pm.dpm.vce_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_pm_compute_clocks(adev);
+ } else {
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+ }
+}
+
+void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->powerplay.pp_funcs->print_power_state == NULL)
+ return;
+
+ for (i = 0; i < adev->pm.dpm.num_ps; i++)
+ amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
+
+}
+
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+}
+
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
+{
+ int r;
+
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
+ r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
+ if (r) {
+ pr_err("smu firmware loading failed\n");
+ return r;
+ }
+ *smu_version = adev->pm.fw_version;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e4dbf14320b6..529816637c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -29,17 +29,14 @@
#include "amdgpu_drv.h"
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
-#include "amdgpu_display.h"
#include "amdgpu_smu.h"
#include "atom.h"
-#include <linux/power_supply.h>
#include <linux/pci.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
#include <linux/pm_runtime.h>
#include "hwmgr.h"
-#define WIDTH_4K 3840
static const struct cg_flag_name clocks[] = {
{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
@@ -81,45 +78,6 @@ static const struct hwmon_temp_label {
{PP_TEMP_MEM, "mem"},
};
-void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
-{
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- if (power_supply_is_system_supplied() > 0)
- adev->pm.ac_power = true;
- else
- adev->pm.ac_power = false;
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->enable_bapm)
- amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
- mutex_unlock(&adev->pm.mutex);
-
- if (is_support_sw_smu(adev))
- smu_set_ac_dc(&adev->smu);
- }
-}
-
-int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
- void *data, uint32_t *size)
-{
- int ret = 0;
-
- if (!data || !size)
- return -EINVAL;
-
- if (is_support_sw_smu(adev))
- ret = smu_read_sensor(&adev->smu, sensor, data, size);
- else {
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
- sensor, data, size);
- else
- ret = -EINVAL;
- }
-
- return ret;
-}
-
/**
* DOC: power_dpm_state
*
@@ -159,11 +117,11 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_pm_state_type pm;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -197,11 +155,11 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_pm_state_type state;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
if (strncmp("battery", buf, strlen("battery")) == 0)
@@ -303,11 +261,11 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_dpm_forced_level level = 0xff;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -344,12 +302,12 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_dpm_forced_level level;
enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
if (strncmp("low", buf, strlen("low")) == 0) {
@@ -449,11 +407,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
struct pp_states_info data;
int i, buf_len, ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -491,13 +449,13 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
struct pp_states_info data;
struct smu_context *smu = &adev->smu;
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -536,9 +494,9 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->pp_force_state_enabled)
@@ -553,12 +511,12 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_pm_state_type state = 0;
unsigned long idx;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
if (strlen(buf) == 1)
@@ -614,11 +572,11 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
char *table = NULL;
int size, ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -659,10 +617,10 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
int ret = 0;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -694,6 +652,52 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* in each power level within a power state. The pp_od_clk_voltage is used for
* this.
*
+ * Note that the actual memory controller clock rate are exposed, not
+ * the effective memory clock of the DRAMs. To translate it, use the
+ * following formula:
+ *
+ * Clock conversion (Mhz):
+ *
+ * HBM: effective_memory_clock = memory_controller_clock * 1
+ *
+ * G5: effective_memory_clock = memory_controller_clock * 1
+ *
+ * G6: effective_memory_clock = memory_controller_clock * 2
+ *
+ * DRAM data rate (MT/s):
+ *
+ * HBM: effective_memory_clock * 2 = data_rate
+ *
+ * G5: effective_memory_clock * 4 = data_rate
+ *
+ * G6: effective_memory_clock * 8 = data_rate
+ *
+ * Bandwidth (MB/s):
+ *
+ * data_rate * vram_bit_width / 8 = memory_bandwidth
+ *
+ * Some examples:
+ *
+ * G5 on RX460:
+ *
+ * memory_controller_clock = 1750 Mhz
+ *
+ * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
+ *
+ * data rate = 1750 * 4 = 7000 MT/s
+ *
+ * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
+ *
+ * G6 on RX5700:
+ *
+ * memory_controller_clock = 875 Mhz
+ *
+ * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
+ *
+ * data rate = 1750 * 8 = 14000 MT/s
+ *
+ * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
+ *
* < For Vega10 and previous ASICs >
*
* Reading the file will display:
@@ -759,7 +763,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
uint32_t parameter_size = 0;
long parameter[64];
@@ -769,7 +773,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
if (count > 127)
@@ -796,7 +800,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
while (isspace(*++tmp_str));
- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+ while (tmp_str[0]) {
+ sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -822,6 +827,18 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
return -EINVAL;
}
} else {
+
+ if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
+ ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
+ parameter,
+ parameter_size);
+ if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
+ }
+ }
+
if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
parameter, parameter_size);
@@ -858,11 +875,11 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -912,11 +929,11 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint64_t featuremask;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = kstrtou64(buf, 0, &featuremask);
@@ -957,11 +974,11 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -1018,11 +1035,11 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -1066,7 +1083,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
- while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
+ while (tmp[0]) {
+ sub_str = strsep(&tmp, delimiter);
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
@@ -1085,11 +1103,11 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
uint32_t mask = 0;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
@@ -1121,11 +1139,11 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -1153,11 +1171,11 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t mask = 0;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
@@ -1189,11 +1207,11 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -1221,11 +1239,11 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
uint32_t mask = 0;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
@@ -1259,11 +1277,11 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -1291,11 +1309,11 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
uint32_t mask = 0;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
@@ -1329,11 +1347,11 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -1361,11 +1379,11 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
uint32_t mask = 0;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
@@ -1399,11 +1417,11 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -1431,11 +1449,11 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
uint32_t mask = 0;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
@@ -1469,11 +1487,11 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t value = 0;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -1499,11 +1517,11 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
long int value;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = kstrtol(buf, 0, &value);
@@ -1542,11 +1560,11 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t value = 0;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -1572,11 +1590,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
int ret;
long int value;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = kstrtol(buf, 0, &value);
@@ -1635,11 +1653,11 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
ssize_t size;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
@@ -1669,7 +1687,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
{
int ret;
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t parameter_size = 0;
long parameter[64];
char *sub_str, buf_cpy[128];
@@ -1679,7 +1697,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
tmp[0] = *(buf);
@@ -1695,7 +1713,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
- while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+ while (tmp_str[0]) {
+ sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -1739,10 +1758,10 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
int r, value, size = sizeof(value);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
r = pm_runtime_get_sync(ddev->dev);
@@ -1777,10 +1796,10 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
int r, value, size = sizeof(value);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
r = pm_runtime_get_sync(ddev->dev);
@@ -1819,11 +1838,11 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
uint64_t count0 = 0, count1 = 0;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->flags & AMD_IS_APU)
@@ -1862,9 +1881,9 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->unique_id)
@@ -1893,10 +1912,10 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
- adev->ddev->unique,
+ adev_to_drm(adev)->unique,
atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
adev->throttling_logging_rs.interval / HZ + 1);
}
@@ -1907,7 +1926,7 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(ddev);
long throttling_logging_interval;
unsigned long flags;
int ret = 0;
@@ -1940,6 +1959,57 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
return count;
}
+/**
+ * DOC: gpu_metrics
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current gpu
+ * metrics data. The file gpu_metrics is used for this. Reading the
+ * file will dump all the current gpu metrics data.
+ *
+ * These data include temperature, frequency, engines utilization,
+ * power consume, throttler status, fan speed and cpu core statistics(
+ * available for APU only). That's it will give a snapshot of all sensors
+ * at the same time.
+ */
+static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ void *gpu_metrics;
+ ssize_t size = 0;
+ int ret;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ if (is_support_sw_smu(adev))
+ size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics);
+ else if (adev->powerplay.pp_funcs->get_gpu_metrics)
+ size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
+
+ if (size <= 0)
+ goto out;
+
+ if (size >= PAGE_SIZE)
+ size = PAGE_SIZE - 1;
+
+ memcpy(buf, gpu_metrics, size);
+
+out:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return size;
+}
+
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC),
@@ -1963,6 +2033,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC),
};
static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
@@ -2012,6 +2083,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
} else if (DEVICE_ATTR_IS(pp_features)) {
if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
*states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(gpu_metrics)) {
+ if (asic_type < CHIP_VEGA12)
+ *states = ATTR_STATE_UNSUPPORTED;
}
if (asic_type == CHIP_ARCTURUS) {
@@ -2131,15 +2205,15 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
int channel = to_sensor_dev_attr(attr)->index;
int r, temp = 0, size = sizeof(temp);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
if (channel >= PP_TEMP_MAX)
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -2164,8 +2238,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
break;
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r)
return r;
@@ -2267,12 +2341,12 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- ret = pm_runtime_get_sync(adev->ddev->dev);
+ ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
}
@@ -2280,16 +2354,16 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
pwm_mode = smu_get_fan_control_mode(&adev->smu);
} else {
if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return -EINVAL;
}
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return sprintf(buf, "%i\n", pwm_mode);
}
@@ -2303,16 +2377,16 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err, ret;
int value;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
err = kstrtoint(buf, 10, &value);
if (err)
return err;
- ret = pm_runtime_get_sync(adev->ddev->dev);
+ ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
}
@@ -2320,16 +2394,16 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
smu_set_fan_control_mode(&adev->smu, value);
} else {
if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return -EINVAL;
}
amdgpu_dpm_set_fan_control_mode(adev, value);
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return count;
}
@@ -2357,12 +2431,12 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- err = pm_runtime_get_sync(adev->ddev->dev);
+ err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
@@ -2373,15 +2447,15 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return -EINVAL;
}
err = kstrtou32(buf, 10, &value);
if (err) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
@@ -2394,8 +2468,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
else
err = -EINVAL;
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
@@ -2411,12 +2485,12 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- err = pm_runtime_get_sync(adev->ddev->dev);
+ err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
@@ -2427,8 +2501,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
else
err = -EINVAL;
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
@@ -2446,12 +2520,12 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- err = pm_runtime_get_sync(adev->ddev->dev);
+ err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
@@ -2462,8 +2536,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
else
err = -EINVAL;
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
@@ -2480,20 +2554,20 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 size = sizeof(min_rpm);
int r;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
(void *)&min_rpm, &size);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r)
return r;
@@ -2510,20 +2584,20 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 size = sizeof(max_rpm);
int r;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
(void *)&max_rpm, &size);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r)
return r;
@@ -2539,12 +2613,12 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- err = pm_runtime_get_sync(adev->ddev->dev);
+ err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
@@ -2555,8 +2629,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
else
err = -EINVAL;
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
@@ -2573,12 +2647,12 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- err = pm_runtime_get_sync(adev->ddev->dev);
+ err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
@@ -2588,15 +2662,15 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return -ENODATA;
}
err = kstrtou32(buf, 10, &value);
if (err) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
@@ -2607,8 +2681,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
else
err = -EINVAL;
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
@@ -2624,12 +2698,12 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- ret = pm_runtime_get_sync(adev->ddev->dev);
+ ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
}
@@ -2637,16 +2711,16 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
pwm_mode = smu_get_fan_control_mode(&adev->smu);
} else {
if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return -EINVAL;
}
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
@@ -2661,7 +2735,7 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
err = kstrtoint(buf, 10, &value);
@@ -2675,9 +2749,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
- err = pm_runtime_get_sync(adev->ddev->dev);
+ err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
@@ -2685,15 +2759,15 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
smu_set_fan_control_mode(&adev->smu, pwm_mode);
} else {
if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return -EINVAL;
}
amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return count;
}
@@ -2706,12 +2780,12 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
u32 vddgfx;
int r, size = sizeof(vddgfx);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -2719,8 +2793,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
(void *)&vddgfx, &size);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r)
return r;
@@ -2743,16 +2817,16 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
u32 vddnb;
int r, size = sizeof(vddnb);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -2760,8 +2834,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
(void *)&vddnb, &size);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r)
return r;
@@ -2785,12 +2859,12 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
int r, size = sizeof(u32);
unsigned uw;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -2798,8 +2872,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
(void *)&query, &size);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r)
return r;
@@ -2826,12 +2900,12 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
ssize_t size;
int r;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -2845,8 +2919,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
size = snprintf(buf, PAGE_SIZE, "\n");
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return size;
}
@@ -2860,12 +2934,12 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
ssize_t size;
int r;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -2879,8 +2953,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
size = snprintf(buf, PAGE_SIZE, "\n");
}
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return size;
}
@@ -2895,7 +2969,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
if (amdgpu_sriov_vf(adev))
@@ -2908,9 +2982,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
value = value / 1000000; /* convert to Watt */
- err = pm_runtime_get_sync(adev->ddev->dev);
+ err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
@@ -2921,8 +2995,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
else
err = -EINVAL;
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
return err;
@@ -2938,12 +3012,12 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
uint32_t sclk;
int r, size = sizeof(sclk);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -2951,8 +3025,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
(void *)&sclk, &size);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r)
return r;
@@ -2975,12 +3049,12 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
uint32_t mclk;
int r, size = sizeof(mclk);
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
- r = pm_runtime_get_sync(adev->ddev->dev);
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
@@ -2988,8 +3062,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
(void *)&mclk, &size);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r)
return r;
@@ -3249,14 +3323,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
}
if (((adev->flags & AMD_IS_APU) ||
- adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
- adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
- (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ adev->family == AMDGPU_FAMILY_SI) && /* not implemented yet */
+ (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0;
+ if (((adev->family == AMDGPU_FAMILY_SI) ||
+ ((adev->flags & AMD_IS_APU) &&
+ (adev->asic_type < CHIP_RENOIR))) && /* not implemented yet */
+ (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
+ return 0;
+
if (!is_support_sw_smu(adev)) {
/* hide max/min values if we can't both query and manage the fan */
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
@@ -3321,338 +3399,6 @@ static const struct attribute_group *hwmon_groups[] = {
NULL
};
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
-{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device,
- pm.dpm.thermal.work);
- /* switch to the thermal state */
- enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
- int temp, size = sizeof(temp);
-
- if (!adev->pm.dpm_enabled)
- return;
-
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
- (void *)&temp, &size)) {
- if (temp < adev->pm.dpm.thermal.min_temp)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- } else {
- if (adev->pm.dpm.thermal.high_to_low)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- }
- mutex_lock(&adev->pm.mutex);
- if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
- adev->pm.dpm.thermal_active = true;
- else
- adev->pm.dpm.thermal_active = false;
- adev->pm.dpm.state = dpm_state;
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
-}
-
-static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
- enum amd_pm_state_type dpm_state)
-{
- int i;
- struct amdgpu_ps *ps;
- u32 ui_class;
- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
-
- /* check if the vblank period is too short to adjust the mclk */
- if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
- if (amdgpu_dpm_vblank_too_short(adev))
- single_display = false;
- }
-
- /* certain older asics have a separare 3D performance state,
- * so try that first if the user selected performance
- */
- if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
- dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
- /* balanced states don't exist at the moment */
- if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
-
-restart_search:
- /* Pick the best power state based on current conditions */
- for (i = 0; i < adev->pm.dpm.num_ps; i++) {
- ps = &adev->pm.dpm.ps[i];
- ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
- switch (dpm_state) {
- /* user states */
- case POWER_STATE_TYPE_BATTERY:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_BALANCED:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_PERFORMANCE:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- /* internal states */
- case POWER_STATE_TYPE_INTERNAL_UVD:
- if (adev->pm.dpm.uvd_ps)
- return adev->pm.dpm.uvd_ps;
- else
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_BOOT:
- return adev->pm.dpm.boot_ps;
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ULV:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- return ps;
- break;
- default:
- break;
- }
- }
- /* use a fallback state if we didn't match */
- switch (dpm_state) {
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (adev->pm.dpm.uvd_ps) {
- return adev->pm.dpm.uvd_ps;
- } else {
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- }
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- dpm_state = POWER_STATE_TYPE_BATTERY;
- goto restart_search;
- case POWER_STATE_TYPE_BATTERY:
- case POWER_STATE_TYPE_BALANCED:
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- default:
- break;
- }
-
- return NULL;
-}
-
-static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
-{
- struct amdgpu_ps *ps;
- enum amd_pm_state_type dpm_state;
- int ret;
- bool equal = false;
-
- /* if dpm init failed */
- if (!adev->pm.dpm_enabled)
- return;
-
- if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
- /* add other state override checks here */
- if ((!adev->pm.dpm.thermal_active) &&
- (!adev->pm.dpm.uvd_active))
- adev->pm.dpm.state = adev->pm.dpm.user_state;
- }
- dpm_state = adev->pm.dpm.state;
-
- ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
- if (ps)
- adev->pm.dpm.requested_ps = ps;
- else
- return;
-
- if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
- printk("switching from power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
- printk("switching to power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
- }
-
- /* update whether vce is active */
- ps->vce_active = adev->pm.dpm.vce_active;
- if (adev->powerplay.pp_funcs->display_configuration_changed)
- amdgpu_dpm_display_configuration_changed(adev);
-
- ret = amdgpu_dpm_pre_set_power_state(adev);
- if (ret)
- return;
-
- if (adev->powerplay.pp_funcs->check_state_equal) {
- if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
- equal = false;
- }
-
- if (equal)
- return;
-
- amdgpu_dpm_set_power_state(adev);
- amdgpu_dpm_post_set_power_state(adev);
-
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-
- if (adev->powerplay.pp_funcs->force_performance_level) {
- if (adev->pm.dpm.thermal_active) {
- enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
- /* force low perf level for thermal */
- amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
- /* save the user's level */
- adev->pm.dpm.forced_level = level;
- } else {
- /* otherwise, user selected level */
- amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
- }
- }
-}
-
-void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
-{
- int ret = 0;
-
- if (adev->family == AMDGPU_FAMILY_SI) {
- mutex_lock(&adev->pm.mutex);
- if (enable) {
- adev->pm.dpm.uvd_active = true;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
- } else {
- adev->pm.dpm.uvd_active = false;
- }
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
- } else {
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
- if (ret)
- DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
-
- /* enable/disable Low Memory PState for UVD (4k videos) */
- if (adev->asic_type == CHIP_STONEY &&
- adev->uvd.decode_image_width >= WIDTH_4K) {
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-
- if (hwmgr && hwmgr->hwmgr_func &&
- hwmgr->hwmgr_func->update_nbdpm_pstate)
- hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
- !enable,
- true);
- }
- }
-}
-
-void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
-{
- int ret = 0;
-
- if (adev->family == AMDGPU_FAMILY_SI) {
- mutex_lock(&adev->pm.mutex);
- if (enable) {
- adev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
- } else {
- adev->pm.dpm.vce_active = false;
- }
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
- } else {
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
- if (ret)
- DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
- }
-}
-
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
-{
- int i;
-
- if (adev->powerplay.pp_funcs->print_power_state == NULL)
- return;
-
- for (i = 0; i < adev->pm.dpm.num_ps; i++)
- amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
-
-}
-
-void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
-{
- int ret = 0;
-
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
- if (ret)
- DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
-}
-
-int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
-{
- int r;
-
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
- r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
- if (r) {
- pr_err("smu firmware loading failed\n");
- return r;
- }
- *smu_version = adev->pm.fw_version;
- }
- return 0;
-}
-
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
int ret;
@@ -3713,55 +3459,6 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
}
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
-{
- int i = 0;
-
- if (!adev->pm.dpm_enabled)
- return;
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_bandwidth_update(adev);
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->sched.ready)
- amdgpu_fence_wait_empty(ring);
- }
-
- if (is_support_sw_smu(adev)) {
- struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
- smu_handle_task(&adev->smu,
- smu_dpm->dpm_level,
- AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
- true);
- } else {
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- if (!amdgpu_device_has_dc_support(adev)) {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
- mutex_unlock(&adev->pm.mutex);
- }
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
- } else {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- amdgpu_dpm_change_power_state_locked(adev);
- mutex_unlock(&adev->pm.mutex);
- }
- }
-}
-
/*
* Debugfs info
*/
@@ -3869,11 +3566,11 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 flags = 0;
int r;
- if (adev->in_gpu_reset)
+ if (amdgpu_in_reset(adev))
return -EPERM;
r = pm_runtime_get_sync(dev->dev);
@@ -3882,11 +3579,6 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
return r;
}
- amdgpu_device_ip_get_clockgating_state(adev, &flags);
- seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
- amdgpu_parse_cg_state(m, flags);
- seq_printf(m, "\n");
-
if (!adev->pm.dpm_enabled) {
seq_printf(m, "dpm not enabled\n");
pm_runtime_mark_last_busy(dev->dev);
@@ -3906,7 +3598,16 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
} else {
r = amdgpu_debugfs_pm_info_pp(m, adev);
}
+ if (r)
+ goto out;
+
+ amdgpu_device_ip_get_clockgating_state(adev, &flags);
+
+ seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
+ amdgpu_parse_cg_state(m, flags);
+ seq_printf(m, "\n");
+out:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/pm/inc/amd_powerplay.h
index fe3665965416..fe3665965416 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/pm/inc/amd_powerplay.h
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index aa27fe65cdfa..f6e0e7d8a007 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -341,10 +341,6 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request))
-#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
- ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
- (adev)->powerplay.pp_handle, msg_id))
-
#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
((adev)->powerplay.pp_funcs->get_power_profile_mode(\
(adev)->powerplay.pp_handle, buf))
@@ -353,14 +349,14 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->set_power_profile_mode(\
(adev)->powerplay.pp_handle, parameter, size))
+#define amdgpu_dpm_set_fine_grain_clk_vol(adev, type, parameter, size) \
+ ((adev)->powerplay.pp_funcs->set_fine_grain_clk_vol(\
+ (adev)->powerplay.pp_handle, type, parameter, size))
+
#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
(adev)->powerplay.pp_handle, type, parameter, size))
-#define amdgpu_dpm_enable_mgpu_fan_boost(adev) \
- ((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\
- (adev)->powerplay.pp_handle))
-
#define amdgpu_dpm_get_ppfeature_status(adev, buf) \
((adev)->powerplay.pp_funcs->get_ppfeature_status(\
(adev)->powerplay.pp_handle, (buf)))
@@ -369,6 +365,9 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->set_ppfeature_status(\
(adev)->powerplay.pp_handle, (ppfeatures)))
+#define amdgpu_dpm_get_gpu_metrics(adev, table) \
+ ((adev)->powerplay.pp_funcs->get_gpu_metrics((adev)->powerplay.pp_handle, table))
+
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
@@ -545,4 +544,26 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en);
+int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev);
+
+int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
+ uint32_t msg_id);
+
+int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
+ bool acquire);
+
+void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
+
+int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+
+void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
+
+void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
+void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
+void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
index d9ae2b49a402..45a22e101d15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
@@ -79,18 +79,10 @@ struct amdgpu_device_attr_entry {
amdgpu_get_##_name, NULL, \
_flags, ##__VA_ARGS__)
-void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev);
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev);
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
-int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
-void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
-void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
-void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 074458eb5407..44fd0cd069de 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -270,10 +270,14 @@ struct smu_table_context
*/
struct smu_table driver_table;
struct smu_table memory_pool;
+ struct smu_table dummy_read_1_table;
uint8_t thermal_controller_type;
void *overdrive_table;
void *boot_overdrive_table;
+
+ uint32_t gpu_metrics_table_size;
+ void *gpu_metrics_table;
};
struct smu_dpm_context {
@@ -448,6 +452,11 @@ struct smu_context
bool dc_controlled_by_gpio;
struct work_struct throttling_logging_work;
+ atomic64_t throttle_int_counter;
+ struct work_struct interrupt_work;
+
+ unsigned fan_max_rpm;
+ unsigned manual_fan_speed_rpm;
};
struct i2c_adapter;
@@ -491,10 +500,9 @@ struct pptable_funcs {
int (*notify_smc_display_config)(struct smu_context *smu);
int (*set_cpu_power_state)(struct smu_context *smu);
bool (*is_dpm_running)(struct smu_context *smu);
- int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
int (*set_watermarks_table)(struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
+ struct pp_smu_wm_range_sets *clock_ranges);
int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
int (*set_default_od_settings)(struct smu_context *smu);
@@ -567,7 +575,6 @@ struct pptable_funcs {
int (*conv_power_profile_to_pplib_workload)(int power_profile);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
- int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
int (*gfx_off_control)(struct smu_context *smu, bool enable);
@@ -585,11 +592,17 @@ struct pptable_funcs {
int (*mode2_reset)(struct smu_context *smu);
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
- int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
void (*log_thermal_throttling_event)(struct smu_context *smu);
size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf);
int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask);
+ ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table);
+ int (*enable_mgpu_fan_boost)(struct smu_context *smu);
+ int (*gfx_ulv_control)(struct smu_context *smu, bool enablement);
+ int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
+ int (*get_fan_parameters)(struct smu_context *smu);
+ int (*post_init)(struct smu_context *smu);
+ void (*interrupt_work)(struct smu_context *smu);
};
typedef enum {
@@ -693,7 +706,6 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed);
int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk);
-int smu_set_active_display_count(struct smu_context *smu, uint32_t count);
int smu_get_clock_by_type(struct smu_context *smu,
enum amd_pp_clock_type type,
@@ -745,7 +757,7 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
int smu_write_watermarks_table(struct smu_context *smu);
int smu_set_watermarks_for_clock_ranges(
struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
+ struct pp_smu_wm_range_sets *clock_ranges);
/* smu to display interface */
extern int smu_display_configuration_change(struct smu_context *smu, const
@@ -792,5 +804,9 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
+ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu, void **table);
+
+int smu_enable_mgpu_fan_boost(struct smu_context *smu);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h
index 79afb132164e..79afb132164e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h
index 9b698780aed8..9b698780aed8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h
index 7ae494569a60..7ae494569a60 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/inc/hardwaremanager.h
index 6e0be6027705..6e0be6027705 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/pm/inc/hardwaremanager.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/inc/hwmgr.h
index 15ed6cbdf366..518796a26eda 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/inc/hwmgr.h
@@ -229,6 +229,7 @@ struct pp_smumgr_func {
bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
+ int (*stop_smc)(struct pp_hwmgr *hwmgr);
};
struct pp_hwmgr_func {
@@ -340,6 +341,9 @@ struct pp_hwmgr_func {
int (*odn_edit_dpm_table)(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size);
+ int (*set_fine_grain_clk_vol)(struct pp_hwmgr *hwmgr,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
@@ -347,6 +351,8 @@ struct pp_hwmgr_func {
int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr, bool *cap);
int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
@@ -359,6 +365,7 @@ struct pp_hwmgr_func {
int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate);
int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr,
bool disable);
+ ssize_t (*get_gpu_metrics)(struct pp_hwmgr *hwmgr, void **table);
};
struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h
index 6a53b7e74ccd..6a53b7e74ccd 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
+++ b/drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/pm/inc/power_state.h
index a5f2227a3971..a5f2227a3971 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h
+++ b/drivers/gpu/drm/amd/pm/inc/power_state.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/pm/inc/pp_debug.h
index cea65093b6ad..cea65093b6ad 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/pm/inc/pp_debug.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_endian.h b/drivers/gpu/drm/amd/pm/inc/pp_endian.h
index f49d1963fe85..f49d1963fe85 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_endian.h
+++ b/drivers/gpu/drm/amd/pm/inc/pp_endian.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h b/drivers/gpu/drm/amd/pm/inc/pp_thermal.h
index 3e30768f9e1c..3e30768f9e1c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_thermal.h
+++ b/drivers/gpu/drm/amd/pm/inc/pp_thermal.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h b/drivers/gpu/drm/amd/pm/inc/ppinterrupt.h
index c067e0925b6b..c067e0925b6b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h
+++ b/drivers/gpu/drm/amd/pm/inc/ppinterrupt.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h
index df4677da736c..df4677da736c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu10.h b/drivers/gpu/drm/amd/pm/inc/smu10.h
index b96520528240..b96520528240 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu10.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu10.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu10_driver_if.h b/drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h
index dea8fe93da63..c498158771cc 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu10_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h
@@ -54,7 +54,8 @@ typedef struct {
uint16_t MaxMclk;
uint8_t WmSetting;
- uint8_t Padding[3];
+ uint8_t WmType;
+ uint8_t Padding[2];
} WatermarkRowGeneric_t;
#define NUM_WM_RANGES 4
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h
index fdc6b7a57bc9..fdc6b7a57bc9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h
index 43d43d6addc0..43d43d6addc0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h
index 4b2da98afcd2..246d3951a78a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h
@@ -885,6 +885,45 @@ typedef struct {
} SmuMetrics_t;
typedef struct {
+ uint16_t CurrClock[PPCLK_COUNT];
+ uint16_t AverageGfxclkFrequency;
+ uint16_t AverageSocclkFrequency;
+ uint16_t AverageUclkFrequency ;
+ uint16_t AverageGfxActivity ;
+ uint16_t AverageUclkActivity ;
+ uint8_t CurrSocVoltageOffset ;
+ uint8_t CurrGfxVoltageOffset ;
+ uint8_t CurrMemVidOffset ;
+ uint8_t Padding8 ;
+ uint16_t AverageSocketPower ;
+ uint16_t TemperatureEdge ;
+ uint16_t TemperatureHotspot ;
+ uint16_t TemperatureMem ;
+ uint16_t TemperatureVrGfx ;
+ uint16_t TemperatureVrMem0 ;
+ uint16_t TemperatureVrMem1 ;
+ uint16_t TemperatureVrSoc ;
+ uint16_t TemperatureLiquid0 ;
+ uint16_t TemperatureLiquid1 ;
+ uint16_t TemperaturePlx ;
+ uint16_t Padding16 ;
+ uint32_t ThrottlerStatus ;
+
+ uint8_t LinkDpmLevel;
+ uint8_t Padding8_2;
+ uint16_t CurrFanSpeed;
+
+ uint32_t EnergyAccumulator;
+ uint16_t AverageVclkFrequency ;
+ uint16_t AverageDclkFrequency ;
+ uint16_t VcnActivityPercentage ;
+ uint16_t padding16_2;
+
+ // Padding - ignore
+ uint32_t MmHubPadding[8]; // SMU internal use
+} SmuMetrics_NV12_t;
+
+typedef struct {
uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz)
uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz)
uint16_t MinUclk;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
index aa2708fccb6d..1275246769d9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
@@ -27,9 +27,9 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x34
+#define SMU11_DRIVER_IF_VERSION 0x39
-#define PPTABLE_Sienna_Cichlid_SMU_VERSION 5
+#define PPTABLE_Sienna_Cichlid_SMU_VERSION 6
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SMNCLK_DPM_LEVELS 2
@@ -127,7 +127,7 @@
#define FEATURE_DF_CSTATE_BIT 45
#define FEATURE_2_STEP_PSTATE_BIT 46
#define FEATURE_SMNCLK_DPM_BIT 47
-#define FEATURE_SPARE_48_BIT 48
+#define FEATURE_PERLINK_GMIDOWN_BIT 48
#define FEATURE_GFX_EDC_BIT 49
#define FEATURE_SPARE_50_BIT 50
#define FEATURE_SPARE_51_BIT 51
@@ -793,8 +793,18 @@ typedef struct {
// SECTION: Sku Reserved
uint8_t CustomerVariant;
- uint8_t Spare[3];
- uint32_t SkuReserved[14];
+
+ //VC BTC parameters are only applicable to VDD_GFX domain
+ uint8_t VcBtcEnabled;
+ uint16_t VcBtcVminT0; // T0_VMIN
+ uint16_t VcBtcFixedVminAgingOffset; // FIXED_VMIN_AGING_OFFSET
+ uint16_t VcBtcVmin2PsmDegrationGb; // VMIN_TO_PSM_DEGRADATION_GB
+ uint32_t VcBtcPsmA; // A_PSM
+ uint32_t VcBtcPsmB; // B_PSM
+ uint32_t VcBtcVminA; // A_VMIN
+ uint32_t VcBtcVminB; // B_VMIN
+
+ uint32_t SkuReserved[9];
// MAJOR SECTION: BOARD PARAMETERS
@@ -952,7 +962,7 @@ typedef struct {
uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
uint16_t MaxOpTemp; // Degree Celcius
- uint16_t Padding_16[1];
+ int16_t VddGfxOffset; // in mV
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h
index e9315eb5b48e..e9315eb5b48e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7.h b/drivers/gpu/drm/amd/pm/inc/smu7.h
index e14072d45918..e14072d45918 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu7.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/pm/inc/smu71.h
index 71c9b2d28640..71c9b2d28640 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu71.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu71.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu71_discrete.h
index c0e3936d5c2e..c0e3936d5c2e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu71_discrete.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72.h b/drivers/gpu/drm/amd/pm/inc/smu72.h
index 9ad1cefff79f..9ad1cefff79f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu72.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu72_discrete.h
index 2aefbb85f620..2aefbb85f620 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu72_discrete.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu73.h b/drivers/gpu/drm/amd/pm/inc/smu73.h
index c6b12a4c00db..c6b12a4c00db 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu73.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu73.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu73_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu73_discrete.h
index 5916be08a7fe..5916be08a7fe 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu73_discrete.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu73_discrete.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74.h b/drivers/gpu/drm/amd/pm/inc/smu74.h
index fd10a9fa843d..fd10a9fa843d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu74.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu74.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu74_discrete.h
index 899d6d8108c2..899d6d8108c2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu74_discrete.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75.h b/drivers/gpu/drm/amd/pm/inc/smu75.h
index 771523001533..771523001533 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu75.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu75.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu75_discrete.h
index b64e58a22ddf..b64e58a22ddf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu75_discrete.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h b/drivers/gpu/drm/amd/pm/inc/smu7_common.h
index 94bf7b649c20..94bf7b649c20 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu7_common.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h b/drivers/gpu/drm/amd/pm/inc/smu7_discrete.h
index ee876745dd12..ee876745dd12 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu7_discrete.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_fusion.h b/drivers/gpu/drm/amd/pm/inc/smu7_fusion.h
index 78ada9ffd508..78ada9ffd508 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_fusion.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu7_fusion.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h
index 6e19f4c7cf8f..6e19f4c7cf8f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu8.h b/drivers/gpu/drm/amd/pm/inc/smu8.h
index d758d07b6a31..d758d07b6a31 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu8.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu8.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu8_fusion.h b/drivers/gpu/drm/amd/pm/inc/smu8_fusion.h
index 0c37c94e9414..0c37c94e9414 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu8_fusion.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu8_fusion.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9.h b/drivers/gpu/drm/amd/pm/inc/smu9.h
index 70ac4d477be2..70ac4d477be2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu9.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h
index 2818c98ff5ca..2818c98ff5ca 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h b/drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h
new file mode 100644
index 000000000000..beab6d7b28b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#ifndef SMU_11_0_CDR_TABLE
+#define SMU_11_0_CDR_TABLE
+
+
+#pragma pack(push, 1)
+
+/// CDR table : PRBS sequence for DQ toggles
+
+/*static unsigned int NoDbiPrbs7[] =
+{
+//256 bytes, 256 byte aligned
+0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+};
+
+
+static unsigned int DbiPrbs7[] =
+{
+// 256 bytes, 256 byte aligned
+0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+};
+*/
+
+
+//4096 bytes, 256 byte aligned
+static unsigned int NoDbiPrbs7[] =
+{
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f,
+ 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f,
+ 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff,
+};
+
+// 4096 bytes, 256 byte aligned
+static unsigned int DbiPrbs7[] =
+{
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff,
+ 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff,
+ 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff,
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 7b585e205a5a..cbf4a58b77d9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -173,6 +173,9 @@
__SMU_DUMMY_MAP(GmiPwrDnControl), \
__SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
__SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
+ __SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH), \
+ __SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW), \
+ __SMU_DUMMY_MAP(GET_UMC_FW_WA), \
__SMU_DUMMY_MAP(Mode1Reset), \
#undef __SMU_DUMMY_MAP
@@ -217,6 +220,7 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(DPM_MP0CLK), \
__SMU_DUMMY_MAP(DPM_LINK), \
__SMU_DUMMY_MAP(DPM_DCEFCLK), \
+ __SMU_DUMMY_MAP(DPM_XGMI), \
__SMU_DUMMY_MAP(DS_GFXCLK), \
__SMU_DUMMY_MAP(DS_SOCCLK), \
__SMU_DUMMY_MAP(DS_LCLK), \
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h b/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h
index eb0f79f9c876..eb0f79f9c876 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h
index 880152c0f775..880152c0f775 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index 6a42331aba8a..2d1c3babaa3a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -28,10 +28,10 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_ARCT 0x17
#define SMU11_DRIVER_IF_VERSION_NV10 0x36
-#define SMU11_DRIVER_IF_VERSION_NV12 0x33
+#define SMU11_DRIVER_IF_VERSION_NV12 0x36
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
-#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x34
-#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x3
+#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x39
+#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x5
/* MP Apertures */
#define MP0_Public 0x03800000
@@ -200,12 +200,12 @@ int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode);
-int
-smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
-
int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed);
+int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed);
+
int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate);
@@ -264,5 +264,23 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
uint32_t *min_value,
uint32_t *max_value);
+int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);
+
+int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
+
+int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu);
+
+int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
+
+void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics);
+
+int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
+ bool enablement);
+
+int smu_v11_0_deep_sleep_control(struct smu_context *smu,
+ bool enablement);
+
+void smu_v11_0_interrupt_work(struct smu_context *smu);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h
index 35dd6072cc45..35dd6072cc45 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_7_pptable.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h
index 247c6e9632ba..247c6e9632ba 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_7_pptable.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h
index 406bfd187ce8..26181b679098 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h
@@ -123,7 +123,14 @@
#define PPSMC_MSG_DALDisableDummyPstateChange 0x49
#define PPSMC_MSG_DALEnableDummyPstateChange 0x4A
-#define PPSMC_Message_Count 0x4B
+#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x4C
+
+#define PPSMC_MSG_SetDriverDummyTableDramAddrHigh 0x4E
+#define PPSMC_MSG_SetDriverDummyTableDramAddrLow 0x4F
+
+#define PPSMC_MSG_GetUMCFWWA 0x50
+
+#define PPSMC_Message_Count 0x51
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h
index 7a63cf8e85ed..7a63cf8e85ed 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
index 02de3b6199e5..fa2e8cb07967 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
@@ -60,5 +60,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
int smu_v12_0_set_driver_table_location(struct smu_context *smu);
+void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h
index 9ac9f3bd3664..9ac9f3bd3664 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/pm/inc/smumgr.h
index ad100b533d04..5f46f1a4f38e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/pm/inc/smumgr.h
@@ -113,4 +113,6 @@ extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_settin
extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
+extern int smum_stop_smc(struct pp_hwmgr *hwmgr);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h
index 63631296d751..63631296d751 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h
index 715b5a168831..715b5a168831 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h
index b6ffd08784e7..b6ffd08784e7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h
index f985c78d746a..f985c78d746a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h
index 0c66f0fe1aaf..0c66f0fe1aaf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/pm/powerplay/Makefile
index e9c48f99f71b..0fb114adc79f 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/pm/powerplay/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2020 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -20,25 +20,20 @@
# OTHER DEALINGS IN THE SOFTWARE.
#
-subdir-ccflags-y += \
- -I$(FULL_AMD_PATH)/powerplay/inc/ \
- -I$(FULL_AMD_PATH)/include/asic_reg \
- -I$(FULL_AMD_PATH)/include \
- -I$(FULL_AMD_PATH)/powerplay/smumgr\
- -I$(FULL_AMD_PATH)/powerplay/hwmgr
-
-AMD_PP_PATH = ../powerplay
+AMD_PP_PATH = ../pm/powerplay
PP_LIBS = smumgr hwmgr
-AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))
+AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/powerplay/,$(PP_LIBS)))
include $(AMD_POWERPLAY)
-POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o \
- smu_v12_0.o arcturus_ppt.o navi10_ppt.o \
- renoir_ppt.o sienna_cichlid_ppt.o smu_cmn.o
+POWER_MGR-y = amd_powerplay.o
+
+POWER_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o
+
+POWER_MGR-$(CONFIG_DRM_AMDGPU_SI)+= si_dpm.o si_smc.o
-AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
+AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR-y))
AMD_POWERPLAY_FILES += $(AMD_PP_POWER)
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 7e6dcdf7df73..eab9768029c1 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -911,6 +911,19 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
return ret;
}
+static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
+ return 0;
+
+ return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
+}
+
static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
{
struct pp_hwmgr *hwmgr = handle;
@@ -920,7 +933,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
- return -EINVAL;
+ return 0;
}
return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
@@ -1598,6 +1611,24 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
return 0;
}
+static ssize_t pp_get_gpu_metrics(void *handle, void **table)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ ssize_t size;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&hwmgr->smu_lock);
+ size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return size;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1627,6 +1658,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_powergating_by_smu = pp_set_powergating_by_smu,
.get_power_profile_mode = pp_get_power_profile_mode,
.set_power_profile_mode = pp_set_power_profile_mode,
+ .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
.odn_edit_dpm_table = pp_odn_edit_dpm_table,
.set_mp1_state = pp_dpm_set_mp1_state,
.set_power_limit = pp_set_power_limit,
@@ -1658,4 +1690,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.smu_i2c_bus_access = pp_smu_i2c_bus_access,
.set_df_cstate = pp_set_df_cstate,
.set_xgmi_pstate = pp_set_xgmi_pstate,
+ .get_gpu_metrics = pp_get_gpu_metrics,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h
index 2fcc4b60153c..2fcc4b60153c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/Makefile
index 2773966ae434..2773966ae434 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/Makefile
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c
index 3be40114e63d..45f608838f6e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c
@@ -142,12 +142,12 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
{ CMD_DELAY_MS, 0, 0, 0, 20, 0 },
- { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
- { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
- { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
};
@@ -155,6 +155,7 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
static const struct baco_cmd_entry clean_baco_tbl[] =
{
{ CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
{ CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.h
index 17041f187020..17041f187020 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c
index 1c73776bd606..1c73776bd606 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.h
index 8393eb62706d..8393eb62706d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c
index c0368f2dfb21..c0368f2dfb21 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.h
index 47f402900bdb..47f402900bdb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
index 9454ab50f9a1..1f9b9facdf1f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
@@ -271,7 +271,10 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr)
bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
- PHM_FUNC_CHECK(hwmgr);
+ if (hwmgr == NULL ||
+ hwmgr->hwmgr_func == NULL)
+ return false;
+
if (hwmgr->pp_one_vf)
return false;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
index f48fdc7f0382..f48fdc7f0382 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr_ppt.h
index c0193e09d58a..c0193e09d58a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr_ppt.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c
index 8f8e296f2fe9..8f8e296f2fe9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.h
index 87a5fa0a157a..87a5fa0a157a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.c
index 8de384bf9a8f..8de384bf9a8f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.h
index 4112a9398163..4112a9398163 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
index 31a32a79cfc2..31a32a79cfc2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.h
index b62d55f1f289..b62d55f1f289 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 01dc46dc9c8a..01dc46dc9c8a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
index 3ee54f182943..76ed2e413594 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
@@ -26,15 +26,6 @@
#include "hwmgr.h"
-#define MEM_TYPE_GDDR5 0x50
-#define MEM_TYPE_GDDR4 0x40
-#define MEM_TYPE_GDDR3 0x30
-#define MEM_TYPE_DDR2 0x20
-#define MEM_TYPE_GDDR1 0x10
-#define MEM_TYPE_DDR3 0xb0
-#define MEM_TYPE_MASK 0xF0
-
-
/* As returned from PowerConnectorDetectionTable. */
#define PP_ATOM_POWER_BUDGET_DISABLE_OVERDRIVE 0x80
#define PP_ATOM_POWER_BUDGET_SHOW_WARNING 0x40
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
index 615cf2c09e54..615cf2c09e54 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
index b7e2651b570b..b7e2651b570b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
index 8f50a038396c..8f50a038396c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.c
index 186496a34cbe..186496a34cbe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.h
index 70b163b35570..70b163b35570 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
index 1e870f58dd12..1e870f58dd12 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
index b760f95e7fa7..b760f95e7fa7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.h
index b9710abdff01..b9710abdff01 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
index 719597c5d27d..719597c5d27d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.h
index baddaa75693b..baddaa75693b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 43f7adff6cb7..cf60f3992303 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -242,6 +242,34 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
return 0;
}
+static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (clock && smu10_data->gfx_actual_soft_min_freq != clock) {
+ smu10_data->gfx_actual_soft_min_freq = clock;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ smu10_data->gfx_actual_soft_min_freq,
+ NULL);
+ }
+ return 0;
+}
+
+static int smu10_set_soft_max_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (clock && smu10_data->gfx_max_freq_limit != (clock * 100)) {
+ smu10_data->gfx_max_freq_limit = clock * 100;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ clock,
+ NULL);
+ }
+ return 0;
+}
+
static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
@@ -527,6 +555,9 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
+ /* enable the pp_od_clk_voltage sysfs file */
+ hwmgr->od_enabled = 1;
+
return result;
}
@@ -650,7 +681,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- SMU10_UMD_PSTATE_VCE,
+ SMU10_UMD_PSTATE_PROFILE_VCE,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -667,7 +698,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- SMU10_UMD_PSTATE_VCE,
+ SMU10_UMD_PSTATE_PROFILE_VCE,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
@@ -949,6 +980,26 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
((mclk_table->entries[i].clk / 100)
== now) ? "*" : "");
break;
+ case OD_SCLK:
+ if (hwmgr->od_enabled) {
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
+
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
+ size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
+ }
+ break;
+ case OD_RANGE:
+ if (hwmgr->od_enabled) {
+ uint32_t min_freq, max_freq = 0;
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+ min_freq, max_freq);
+ }
+ break;
default:
break;
}
@@ -1183,8 +1234,19 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
struct smu10_hwmgr *data = hwmgr->backend;
struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
Watermarks_t *table = &(data->water_marks_table);
+ struct amdgpu_device *adev = hwmgr->adev;
+ int i;
smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
+
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
+ for (i = 0; i < NUM_WM_RANGES; i++)
+ table->WatermarkRow[WM_DCFCLK][i].WmType = (uint8_t)0;
+
+ for (i = 0; i < NUM_WM_RANGES; i++)
+ table->WatermarkRow[WM_SOCCLK][i].WmType = (uint8_t)0;
+ }
+
smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
data->water_marks_exist = true;
return 0;
@@ -1350,6 +1412,32 @@ static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mod
NULL);
}
+static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
+{
+ if (!hwmgr->od_enabled) {
+ pr_err("Fine grain not support\n");
+ return -EINVAL;
+ }
+
+ if (size != 2) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
+ if (input[0] == 0)
+ smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
+ else if (input[0] == 1)
+ smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
+ else
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.backend_init = smu10_hwmgr_backend_init,
.backend_fini = smu10_hwmgr_backend_fini,
@@ -1390,9 +1478,12 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.powergate_sdma = smu10_powergate_sdma,
.set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
.set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
+ .set_hard_min_gfxclk_by_freq = smu10_set_hard_min_gfxclk_by_freq,
+ .set_soft_max_gfxclk_by_freq = smu10_set_soft_max_gfxclk_by_freq,
.get_power_profile_mode = smu10_get_power_profile_mode,
.set_power_profile_mode = smu10_set_power_profile_mode,
.asic_reset = smu10_asic_reset,
+ .set_fine_grain_clk_vol = smu10_set_fine_grain_clk_vol,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
index 0f969de10fab..6c9b5f060902 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
@@ -284,7 +284,7 @@ struct smu10_hwmgr {
uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
uint32_t gfx_min_freq_limit;
- uint32_t gfx_max_freq_limit;
+ uint32_t gfx_max_freq_limit; /* in 10Khz*/
bool vcn_power_gated;
bool vcn_dpg_mode;
@@ -310,6 +310,7 @@ int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
#define SMU10_UMD_PSTATE_SOCCLK 626
#define SMU10_UMD_PSTATE_FCLK 933
#define SMU10_UMD_PSTATE_VCE 0x03C00320
+#define SMU10_UMD_PSTATE_PROFILE_VCE 0x02AD0229
#define SMU10_UMD_PSTATE_PEAK_SOCCLK 757
#define SMU10_UMD_PSTATE_PEAK_FCLK 1200
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_inc.h
index edb68e302f6f..edb68e302f6f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_inc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_inc.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
index 044cda005aed..044cda005aed 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
index be0d98abb536..be0d98abb536 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c
index f2bda3bcbbde..f2bda3bcbbde 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h
index fc8f8a6acc72..fc8f8a6acc72 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_dyn_defaults.h
index 3477d4dfff70..3477d4dfff70 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_dyn_defaults.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 4a3b64aa21ce..35629140fc7a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -1541,6 +1541,10 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to reset to default!", result = tmp_result);
+ tmp_result = smum_stop_smc(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop smc!", result = tmp_result);
+
tmp_result = smu7_force_switch_to_arbf0(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to force to switch arbf0!", result = tmp_result);
@@ -1585,9 +1589,25 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->current_profile_setting.sclk_down_hyst = 100;
data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
data->current_profile_setting.bupdate_mclk = 1;
- data->current_profile_setting.mclk_up_hyst = 0;
- data->current_profile_setting.mclk_down_hyst = 100;
- data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
+ if (hwmgr->chip_id >= CHIP_POLARIS10) {
+ if (adev->gmc.vram_width == 256) {
+ data->current_profile_setting.mclk_up_hyst = 10;
+ data->current_profile_setting.mclk_down_hyst = 60;
+ data->current_profile_setting.mclk_activity = 25;
+ } else if (adev->gmc.vram_width == 128) {
+ data->current_profile_setting.mclk_up_hyst = 5;
+ data->current_profile_setting.mclk_down_hyst = 16;
+ data->current_profile_setting.mclk_activity = 20;
+ } else if (adev->gmc.vram_width == 64) {
+ data->current_profile_setting.mclk_up_hyst = 3;
+ data->current_profile_setting.mclk_down_hyst = 16;
+ data->current_profile_setting.mclk_activity = 20;
+ }
+ } else {
+ data->current_profile_setting.mclk_up_hyst = 0;
+ data->current_profile_setting.mclk_down_hyst = 100;
+ data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
+ }
hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
@@ -2873,7 +2893,7 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
if (hwmgr->is_kicker)
switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
else
- switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+ switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
break;
case CHIP_VEGAM:
switch_limit_us = 30;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h
index 69d361f8dfca..69d361f8dfca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c
index 5d4971576111..5d4971576111 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.h
index 22f86b6bf1be..22f86b6bf1be 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
index 0b30f73649a8..0b30f73649a8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h
index 42c1ba0fad78..42c1ba0fad78 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index 35ed47ebaf09..35ed47ebaf09 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.h
index 05a06083e1b8..05a06083e1b8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
index de0a37f7c632..de0a37f7c632 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
index 84e90f801ac3..84e90f801ac3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
index 60b5ca974356..60b5ca974356 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
index ad33983a8064..ad33983a8064 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c
index ea743bea8e29..ea743bea8e29 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.h
index 5dc16cc8a295..5dc16cc8a295 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c
index 46bb16c29cf6..46bb16c29cf6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.h
index 96d793f026a5..96d793f026a5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index c378a000c934..7eada3098ffc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -4659,7 +4659,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false);
- PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
+ PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return -EINVAL);
data->water_marks_bitmap |= WaterMarksLoaded;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
index f752b4ad0c8a..f752b4ad0c8a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_inc.h
index faf7ac044348..faf7ac044348 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_inc.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
index 9757d47dd6b8..9757d47dd6b8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.h
index b95771ab89cd..b95771ab89cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
index c934e9612c1b..c934e9612c1b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
index f29af5ca0aa0..f29af5ca0aa0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.h
index da5fbec9b0cd..da5fbec9b0cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index 952cd3d7240e..952cd3d7240e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h
index 4a0ede7c1f07..4a0ede7c1f07 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c
index bc53cce4f32d..bc53cce4f32d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.h
index 57b72e5a95ae..57b72e5a95ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index a678a67f1c0d..dc206fa88c5e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -47,6 +47,13 @@
#include "pp_thermal.h"
#include "vega12_baco.h"
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
+#define LINK_WIDTH_MAX 6
+#define LINK_SPEED_MAX 3
+static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static int link_speed[] = {25, 50, 80, 160};
static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask);
@@ -1255,22 +1262,29 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
return (mem_clk * 100);
}
-static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table)
+static int vega12_get_metrics_table(struct pp_hwmgr *hwmgr,
+ SmuMetrics_t *metrics_table,
+ bool bypass_cache)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
int ret = 0;
- if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) {
- ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table,
- TABLE_SMU_METRICS, true);
+ if (bypass_cache ||
+ !data->metrics_time ||
+ time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) {
+ ret = smum_smc_table_manager(hwmgr,
+ (uint8_t *)(&data->metrics_table),
+ TABLE_SMU_METRICS,
+ true);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
return ret;
}
- memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t));
data->metrics_time = jiffies;
- } else
+ }
+
+ if (metrics_table)
memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
return ret;
@@ -1281,7 +1295,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
SmuMetrics_t metrics_table;
int ret = 0;
- ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+ ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
@@ -1332,7 +1346,7 @@ static int vega12_get_current_activity_percent(
SmuMetrics_t metrics_table;
int ret = 0;
- ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+ ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
@@ -1380,7 +1394,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
- ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+ ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
@@ -1389,7 +1403,7 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_MEM_TEMP:
- ret = vega12_get_metrics_table(hwmgr, &metrics_table);
+ ret = vega12_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
@@ -2095,6 +2109,46 @@ static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return 0;
}
+static int vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+}
+
+static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
+{
+ uint32_t width_level;
+
+ width_level = vega12_get_current_pcie_link_width_level(hwmgr);
+ if (width_level > LINK_WIDTH_MAX)
+ width_level = 0;
+
+ return link_width[width_level];
+}
+
+static int vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+}
+
+static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
+{
+ uint32_t speed_level;
+
+ speed_level = vega12_get_current_pcie_link_speed_level(hwmgr);
+ if (speed_level > LINK_SPEED_MAX)
+ speed_level = 0;
+
+ return link_speed[speed_level];
+}
+
static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
@@ -2390,7 +2444,7 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
!(data->water_marks_bitmap & WaterMarksLoaded)) {
result = smum_smc_table_manager(hwmgr,
(uint8_t *)wm_table, TABLE_WATERMARKS, false);
- PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
+ PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return -EINVAL);
data->water_marks_bitmap |= WaterMarksLoaded;
}
@@ -2682,6 +2736,69 @@ static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
return 0;
}
+static void vega12_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
+{
+ memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
+
+ gpu_metrics->common_header.structure_size =
+ sizeof(struct gpu_metrics_v1_0);
+ gpu_metrics->common_header.format_revision = 1;
+ gpu_metrics->common_header.content_revision = 0;
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+}
+
+static ssize_t vega12_get_gpu_metrics(struct pp_hwmgr *hwmgr,
+ void **table)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ struct gpu_metrics_v1_0 *gpu_metrics =
+ &data->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ uint32_t fan_speed_rpm;
+ int ret;
+
+ ret = vega12_get_metrics_table(hwmgr, &metrics, true);
+ if (ret)
+ return ret;
+
+ vega12_init_gpu_metrics_v1_0(gpu_metrics);
+
+ gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+ gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+ gpu_metrics->temperature_mem = metrics.TemperatureHBM;
+ gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+ gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
+
+ gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+ gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+
+ gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+ gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+ gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+ gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
+ gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
+
+ gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+ vega12_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
+ gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
+
+ gpu_metrics->pcie_link_width =
+ vega12_get_current_pcie_link_width(hwmgr);
+ gpu_metrics->pcie_link_speed =
+ vega12_get_current_pcie_link_speed(hwmgr);
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v1_0);
+}
+
static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.backend_init = vega12_hwmgr_backend_init,
.backend_fini = vega12_hwmgr_backend_fini,
@@ -2739,6 +2856,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.get_ppfeature_status = vega12_get_ppfeature_status,
.set_ppfeature_status = vega12_set_ppfeature_status,
.set_mp1_state = vega12_set_mp1_state,
+ .get_gpu_metrics = vega12_get_gpu_metrics,
};
int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h
index 73875399666a..aa63ae41942d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h
@@ -399,6 +399,7 @@ struct vega12_hwmgr {
unsigned long metrics_time;
SmuMetrics_t metrics_table;
+ struct gpu_metrics_v1_0 gpu_metrics_table;
};
#define VEGA12_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_inc.h
index e6d9e84059e1..0d08c57d3bca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_inc.h
@@ -35,7 +35,6 @@
#include "asic_reg/gc/gc_9_2_1_sh_mask.h"
#include "asic_reg/nbio/nbio_6_1_offset.h"
-#include "asic_reg/nbio/nbio_6_1_offset.h"
#include "asic_reg/nbio/nbio_6_1_sh_mask.h"
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h
index bf4f5095b80d..bf4f5095b80d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_pptable.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
index 195d8539fbb4..740e2fc7a034 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
@@ -252,7 +252,7 @@ static int init_powerplay_table_information(
phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax, ATOM_VEGA12_PPCLOCK_COUNT);
phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin, ATOM_VEGA12_PPCLOCK_COUNT);
- pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL);
+ pptable_information->smc_pptable = kmalloc(sizeof(PPTable_t), GFP_KERNEL);
if (pptable_information->smc_pptable == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.h
index 65652ae65929..65652ae65929 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
index 7ace439dcde7..7ace439dcde7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.h
index 0d8ed039ab12..0d8ed039ab12 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
index 2a28c9df15a0..2a28c9df15a0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
index f06471e712dc..f06471e712dc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index ea70d736f6a8..da84012b7fd5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -55,6 +55,11 @@
#define smnPCIE_LC_SPEED_CNTL 0x11140290
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+#define LINK_WIDTH_MAX 6
+#define LINK_SPEED_MAX 3
+static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static int link_speed[] = {25, 50, 80, 160};
+
static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
@@ -484,7 +489,7 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
int ret = 0;
- bool use_baco = (adev->in_gpu_reset &&
+ bool use_baco = (amdgpu_in_reset(adev) &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
(adev->in_runpm && amdgpu_asic_supports_baco(adev));
@@ -2067,22 +2072,29 @@ static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
return (mem_clk * 100);
}
-static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table)
+static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr,
+ SmuMetrics_t *metrics_table,
+ bool bypass_cache)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
int ret = 0;
- if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) {
- ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table,
- TABLE_SMU_METRICS, true);
+ if (bypass_cache ||
+ !data->metrics_time ||
+ time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) {
+ ret = smum_smc_table_manager(hwmgr,
+ (uint8_t *)(&data->metrics_table),
+ TABLE_SMU_METRICS,
+ true);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
return ret;
}
- memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t));
data->metrics_time = jiffies;
- } else
+ }
+
+ if (metrics_table)
memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
return ret;
@@ -2094,7 +2106,7 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
int ret = 0;
SmuMetrics_t metrics_table;
- ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+ ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
@@ -2132,7 +2144,7 @@ static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
int ret = 0;
SmuMetrics_t metrics_table;
- ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+ ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
@@ -2162,7 +2174,7 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+ ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
@@ -2187,7 +2199,7 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_EDGE_TEMP:
- ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+ ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
@@ -2196,7 +2208,7 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_MEM_TEMP:
- ret = vega20_get_metrics_table(hwmgr, &metrics_table);
+ ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
if (ret)
return ret;
@@ -3259,6 +3271,46 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return 0;
}
+static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+}
+
+static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
+{
+ uint32_t width_level;
+
+ width_level = vega20_get_current_pcie_link_width_level(hwmgr);
+ if (width_level > LINK_WIDTH_MAX)
+ width_level = 0;
+
+ return link_width[width_level];
+}
+
+static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+}
+
+static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
+{
+ uint32_t speed_level;
+
+ speed_level = vega20_get_current_pcie_link_speed_level(hwmgr);
+ if (speed_level > LINK_SPEED_MAX)
+ speed_level = 0;
+
+ return link_speed[speed_level];
+}
+
static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
@@ -3271,7 +3323,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
struct phm_ppt_v3_information *pptable_information =
(struct phm_ppt_v3_information *)hwmgr->pptable;
PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
- struct amdgpu_device *adev = hwmgr->adev;
struct pp_clock_levels_with_latency clocks;
struct vega20_single_dpm_table *fclk_dpm_table =
&(data->dpm_table.fclk_table);
@@ -3365,12 +3416,10 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case PP_PCIE:
- current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
- PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
- >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
- current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
- PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
- >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ current_gen_speed =
+ vega20_get_current_pcie_link_speed_level(hwmgr);
+ current_lane_width =
+ vega20_get_current_pcie_link_width_level(hwmgr);
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (i == 1 && data->pcie_parameters_override) {
gen_speed = data->pcie_gen_level1;
@@ -4212,6 +4261,72 @@ static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
return ret;
}
+static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
+{
+ memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
+
+ gpu_metrics->common_header.structure_size =
+ sizeof(struct gpu_metrics_v1_0);
+ gpu_metrics->common_header.format_revision = 1;
+ gpu_metrics->common_header.content_revision = 0;
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+}
+
+static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr,
+ void **table)
+{
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+ struct gpu_metrics_v1_0 *gpu_metrics =
+ &data->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ uint32_t fan_speed_rpm;
+ int ret;
+
+ ret = vega20_get_metrics_table(hwmgr, &metrics, true);
+ if (ret)
+ return ret;
+
+ vega20_init_gpu_metrics_v1_0(gpu_metrics);
+
+ gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+ gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+ gpu_metrics->temperature_mem = metrics.TemperatureHBM;
+ gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+ gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
+ gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
+
+ gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+ gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+
+ gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+
+ gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+ gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+ gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+ gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
+ gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
+
+ gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+ vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
+ gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
+
+ gpu_metrics->pcie_link_width =
+ vega20_get_current_pcie_link_width(hwmgr);
+ gpu_metrics->pcie_link_speed =
+ vega20_get_current_pcie_link_speed(hwmgr);
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v1_0);
+}
+
static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
/* init/fini related */
.backend_init = vega20_hwmgr_backend_init,
@@ -4282,6 +4397,7 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.smu_i2c_bus_access = vega20_smu_i2c_bus_access,
.set_df_cstate = vega20_set_df_cstate,
.set_xgmi_pstate = vega20_set_xgmi_pstate,
+ .get_gpu_metrics = vega20_get_gpu_metrics,
};
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h
index 2c3125f82b24..075c0094da9c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h
@@ -527,6 +527,7 @@ struct vega20_hwmgr {
unsigned long metrics_time;
SmuMetrics_t metrics_table;
+ struct gpu_metrics_v1_0 gpu_metrics_table;
bool pcie_parameters_override;
uint32_t pcie_gen_level1;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_inc.h
index 613cb1989b3d..613cb1989b3d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_inc.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.c
index d7cc3d2d9e17..d7cc3d2d9e17 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.h
index d68c734c0f4e..d68c734c0f4e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h
index 2222e29405c6..2222e29405c6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_pptable.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
index 7a7f15d0c53a..1f9082539457 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
@@ -890,14 +890,12 @@ static int init_powerplay_table_information(
power_saving_clock_count);
}
- pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL);
+ pptable_information->smc_pptable = kmemdup(&(powerplay_table->smcPPTable),
+ sizeof(PPTable_t),
+ GFP_KERNEL);
if (pptable_information->smc_pptable == NULL)
return -ENOMEM;
- memcpy(pptable_information->smc_pptable,
- &(powerplay_table->smcPPTable),
- sizeof(PPTable_t));
-
result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
if (result)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.h
index 846c2cb40b35..846c2cb40b35 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.h
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
index 364162ddaa9c..364162ddaa9c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h
index 2d1769bbd24e..2d1769bbd24e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
index 4b3faaccecb9..4b3faaccecb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h
index 6df0ed41317c..6df0ed41317c 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_smc.c b/drivers/gpu/drm/amd/pm/powerplay/kv_smc.c
index 2d9ab6b8be66..2d9ab6b8be66 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_smc.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/kv_smc.c
diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/ppsmc.h
index 8463245f424f..8463245f424f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/ppsmc.h
diff --git a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h
index 055321f61ca7..055321f61ca7 100644
--- a/drivers/gpu/drm/amd/amdgpu/r600_dpm.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
index b5986d19dc08..b5986d19dc08 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h
index bc0be6818e21..bc0be6818e21 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/pm/powerplay/si_smc.c
index 8f994ffa9cd1..8f994ffa9cd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/si_smc.c
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h
index d2930eceaf3c..d2930eceaf3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/pm/powerplay/smumgr/Makefile
index 6c59c61a0d81..6c59c61a0d81 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/Makefile
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index e4d1f3d66ef4..329bf4d44bbc 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -2726,10 +2726,7 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS,
- VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return ci_is_smc_ram_running(hwmgr);
}
static int ci_smu_init(struct pp_hwmgr *hwmgr)
@@ -2939,6 +2936,29 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
return 0;
}
+static void ci_reset_smc(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL,
+ rst_reg, 1);
+}
+
+
+static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0,
+ ck_disable, 1);
+}
+
+static int ci_stop_smc(struct pp_hwmgr *hwmgr)
+{
+ ci_reset_smc(hwmgr);
+ ci_stop_smc_clock(hwmgr);
+
+ return 0;
+}
+
const struct pp_smumgr_func ci_smu_funcs = {
.name = "ci_smu",
.smu_init = ci_smu_init,
@@ -2964,4 +2984,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
.is_dpm_running = ci_is_dpm_running,
.update_dpm_settings = ci_update_dpm_settings,
.update_smc_table = ci_update_smc_table,
+ .stop_smc = ci_stop_smc,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.h
index a8282705c569..a8282705c569 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
index ecb9ee46d6b3..ecb9ee46d6b3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.h
index 6d3746268ccf..6d3746268ccf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 431ad2fd38df..431ad2fd38df 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.h
index f32c506779c9..f32c506779c9 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
index c3d2e6dcf62a..c3d2e6dcf62a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.h
index 1ec425df9eda..1ec425df9eda 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
index ea2279bb8cbf..ea2279bb8cbf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.h
index 9c2be74a2b2f..9c2be74a2b2f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
index aae25243eb10..aae25243eb10 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
index e7303dc8c260..e7303dc8c260 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
index 76d4f12ceedf..76d4f12ceedf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.h
index c7b61222d258..c7b61222d258 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c
index adfbcbe5d113..8a9aee85043e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c
@@ -61,9 +61,6 @@ static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
uint32_t reg;
uint32_t ret;
- /* Due to the L1 policy problem under SRIOV, we have to use
- * mmMP1_SMN_C2PMSG_103 as the driver response register
- */
if (hwmgr->pp_one_vf) {
reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_103);
@@ -148,10 +145,6 @@ int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
smu9_wait_for_response(hwmgr);
- /* Due to the L1 policy problem under SRIOV, we have to use
- * mmMP1_SMN_C2PMSG_101 as the driver message register and
- * mmMP1_SMN_C2PMSG_102 as the driver parameter register.
- */
if (hwmgr->pp_one_vf) {
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0);
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102, parameter);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.h
index 1462279ca128..1462279ca128 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
index b6fb48066841..b6921db3c130 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c
@@ -245,3 +245,11 @@ int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t tabl
return -EINVAL;
}
+
+int smum_stop_smc(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr->smumgr_funcs->stop_smc)
+ return hwmgr->smumgr_funcs->stop_smc(hwmgr);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
index 4bfadb49521b..4bfadb49521b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.h
index d664fedd3d85..d664fedd3d85 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
index 1e222c5d91a4..daf122f24f23 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
@@ -209,11 +209,13 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
int ret;
struct cgs_firmware_info info = {0};
- ret = cgs_get_firmware_info(hwmgr->device,
- CGS_UCODE_ID_SMU,
- &info);
- if (ret || !info.kptr)
- return -EINVAL;
+ if (!amdgpu_sriov_vf((struct amdgpu_device *)hwmgr->adev)) {
+ ret = cgs_get_firmware_info(hwmgr->device,
+ CGS_UCODE_ID_SMU,
+ &info);
+ if (ret || !info.kptr)
+ return -EINVAL;
+ }
priv = kzalloc(sizeof(struct vega10_smumgr), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.h
index bad760f22624..bad760f22624 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
index f54df76537e4..f54df76537e4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.h
index aeec965ce81f..aeec965ce81f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
index cf43629d29d2..cf43629d29d2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.h
index 62ebbfd6068f..62ebbfd6068f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.h
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
index 0ecc18b55ffb..0ecc18b55ffb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.h
index 2b6558238500..2b6558238500 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.h
diff --git a/drivers/gpu/drm/amd/pm/swsmu/Makefile b/drivers/gpu/drm/amd/pm/swsmu/Makefile
new file mode 100644
index 000000000000..6f281990b7b4
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/Makefile
@@ -0,0 +1,36 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+
+AMD_SWSMU_PATH = ../pm/swsmu
+
+SWSMU_LIBS = smu11 smu12
+
+AMD_SWSMU = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/swsmu/,$(SWSMU_LIBS)))
+
+include $(AMD_SWSMU)
+
+SWSMU_MGR = amdgpu_smu.o \
+ smu_cmn.o \
+
+AMD_SWSMU_POWER = $(addprefix $(AMD_SWSMU_PATH)/,$(SWSMU_MGR))
+
+AMD_POWERPLAY_FILES += $(AMD_SWSMU_POWER)
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 8dc5abb6931e..b1e5ec01527b 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -361,20 +361,16 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
int ret = 0;
uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
- mutex_lock(&feature->mutex);
bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
- mutex_unlock(&feature->mutex);
ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
SMU_FEATURE_MAX/32);
if (ret)
return ret;
- mutex_lock(&feature->mutex);
bitmap_or(feature->allowed, feature->allowed,
(unsigned long *)allowed_feature_mask,
feature->feature_num);
- mutex_unlock(&feature->mutex);
return ret;
}
@@ -421,6 +417,9 @@ static int smu_early_init(void *handle)
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
mutex_init(&smu->mutex);
+ mutex_init(&smu->smu_baco.mutex);
+ smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+ smu->smu_baco.platform_support = false;
return smu_set_funcs(adev);
}
@@ -473,6 +472,12 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled)
return 0;
+ ret = smu_post_init(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to post smu init!\n");
+ return ret;
+ }
+
ret = smu_set_default_od_settings(smu);
if (ret) {
dev_err(adev->dev, "Failed to setup default OD settings!\n");
@@ -493,6 +498,8 @@ static int smu_late_init(void *handle)
smu_get_unique_id(smu);
+ smu_get_fan_parameters(smu);
+
smu_handle_task(&adev->smu,
smu->smu_dpm.dpm_level,
AMD_PP_TASK_COMPLETE_INIT,
@@ -565,9 +572,6 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
struct smu_table *tables = smu_table->tables;
struct smu_table *driver_table = &(smu_table->driver_table);
- if (!tables)
- return 0;
-
if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
&tables[SMU_TABLE_PMSTATUSLOG].mc_address,
@@ -644,6 +648,45 @@ static int smu_free_memory_pool(struct smu_context *smu)
return 0;
}
+static int smu_alloc_dummy_read_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *dummy_read_1_table =
+ &smu_table->dummy_read_1_table;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ dummy_read_1_table->size = 0x40000;
+ dummy_read_1_table->align = PAGE_SIZE;
+ dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+ ret = amdgpu_bo_create_kernel(adev,
+ dummy_read_1_table->size,
+ dummy_read_1_table->align,
+ dummy_read_1_table->domain,
+ &dummy_read_1_table->bo,
+ &dummy_read_1_table->mc_address,
+ &dummy_read_1_table->cpu_addr);
+ if (ret)
+ dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
+
+ return ret;
+}
+
+static void smu_free_dummy_read_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *dummy_read_1_table =
+ &smu_table->dummy_read_1_table;
+
+
+ amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
+ &dummy_read_1_table->mc_address,
+ &dummy_read_1_table->cpu_addr);
+
+ memset(dummy_read_1_table, 0, sizeof(struct smu_table));
+}
+
static int smu_smc_table_sw_init(struct smu_context *smu)
{
int ret;
@@ -679,6 +722,10 @@ static int smu_smc_table_sw_init(struct smu_context *smu)
if (ret)
return ret;
+ ret = smu_alloc_dummy_read_table(smu);
+ if (ret)
+ return ret;
+
ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
if (ret)
return ret;
@@ -692,6 +739,8 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
+ smu_free_dummy_read_table(smu);
+
ret = smu_free_memory_pool(smu);
if (ret)
return ret;
@@ -723,6 +772,19 @@ static void smu_throttling_logging_work_fn(struct work_struct *work)
smu_log_thermal_throttling(smu);
}
+static void smu_interrupt_work_fn(struct work_struct *work)
+{
+ struct smu_context *smu = container_of(work, struct smu_context,
+ interrupt_work);
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
+ smu->ppt_funcs->interrupt_work(smu);
+
+ mutex_unlock(&smu->mutex);
+}
+
static int smu_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -736,15 +798,13 @@ static int smu_sw_init(void *handle)
bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
- mutex_init(&smu->smu_baco.mutex);
- smu->smu_baco.state = SMU_BACO_STATE_EXIT;
- smu->smu_baco.platform_support = false;
-
mutex_init(&smu->sensor_lock);
mutex_init(&smu->metrics_lock);
mutex_init(&smu->message_lock);
INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
+ INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
+ atomic64_set(&smu->throttle_int_counter, 0);
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -774,10 +834,13 @@ static int smu_sw_init(void *handle)
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
- ret = smu_init_microcode(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to load smu firmware!\n");
- return ret;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ ret = smu_init_microcode(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to load smu firmware!\n");
+ return ret;
+ }
}
ret = smu_smc_table_sw_init(smu);
@@ -955,24 +1018,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
- ret = smu_disable_umc_cdr_12gbps_workaround(smu);
- if (ret) {
- dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
- return ret;
- }
-
- /*
- * For Navi1X, manually switch it to AC mode as PMFW
- * may boot it with DC mode.
- */
- ret = smu_set_power_source(smu,
- adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
- SMU_POWER_SOURCE_DC);
- if (ret) {
- dev_err(adev->dev, "Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
- return ret;
- }
-
/*
* Set initialized values (get from vbios) to dpm tables context such as
* gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
@@ -1109,7 +1154,7 @@ static int smu_disable_dpms(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
bool use_baco = !smu->is_apu &&
- ((adev->in_gpu_reset &&
+ ((amdgpu_in_reset(adev) &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
@@ -1165,6 +1210,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
int ret = 0;
cancel_work_sync(&smu->throttling_logging_work);
+ cancel_work_sync(&smu->interrupt_work);
ret = smu_disable_thermal_alert(smu);
if (ret) {
@@ -1185,7 +1231,6 @@ static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- int ret = 0;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1201,11 +1246,7 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
- ret = smu_smc_hw_cleanup(smu);
- if (ret)
- return ret;
-
- return 0;
+ return smu_smc_hw_cleanup(smu);
}
int smu_reset(struct smu_context *smu)
@@ -1445,6 +1486,8 @@ static int smu_enable_umd_pstate(void *handle,
amdgpu_device_ip_set_clockgating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_UNGATE);
+ smu_gfx_ulv_control(smu, false);
+ smu_deep_sleep_control(smu, false);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
@@ -1452,6 +1495,8 @@ static int smu_enable_umd_pstate(void *handle,
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = smu_dpm_ctx->saved_dpm_level;
smu_dpm_ctx->enable_umd_pstate = false;
+ smu_deep_sleep_control(smu, true);
+ smu_gfx_ulv_control(smu, true);
amdgpu_device_ip_set_clockgating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_GATE);
@@ -1783,25 +1828,19 @@ int smu_write_watermarks_table(struct smu_context *smu)
}
int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+ struct pp_smu_wm_range_sets *clock_ranges)
{
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (smu->disable_watermark)
+ return 0;
- if (!smu->disable_watermark &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
- ret = smu_set_watermarks_table(smu, clock_ranges);
+ mutex_lock(&smu->mutex);
- if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
- smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
- }
- }
+ ret = smu_set_watermarks_table(smu, clock_ranges);
mutex_unlock(&smu->mutex);
@@ -2191,31 +2230,44 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
{
int ret = 0;
+ uint32_t percent;
+ uint32_t current_rpm;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->get_fan_speed_percent)
- ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
+ if (smu->ppt_funcs->get_fan_speed_rpm) {
+ ret = smu->ppt_funcs->get_fan_speed_rpm(smu, &current_rpm);
+ if (!ret) {
+ percent = current_rpm * 100 / smu->fan_max_rpm;
+ *speed = percent > 100 ? 100 : percent;
+ }
+ }
mutex_unlock(&smu->mutex);
+
return ret;
}
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
int ret = 0;
+ uint32_t rpm;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->set_fan_speed_percent)
- ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
+ if (smu->ppt_funcs->set_fan_speed_rpm) {
+ if (speed > 100)
+ speed = 100;
+ rpm = speed * smu->fan_max_rpm / 100;
+ ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
+ }
mutex_unlock(&smu->mutex);
@@ -2255,19 +2307,6 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
return ret;
}
-int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
-{
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- if (smu->ppt_funcs->set_active_display_count)
- ret = smu->ppt_funcs->set_active_display_count(smu, count);
-
- return ret;
-}
-
int smu_get_clock_by_type(struct smu_context *smu,
enum amd_pp_clock_type type,
struct amd_pp_clocks *clocks)
@@ -2637,3 +2676,40 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
return ret;
}
+
+ssize_t smu_sys_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ ssize_t size;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->ppt_funcs->get_gpu_metrics)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&smu->mutex);
+
+ size = smu->ppt_funcs->get_gpu_metrics(smu, table);
+
+ mutex_unlock(&smu->mutex);
+
+ return size;
+}
+
+int smu_enable_mgpu_fan_boost(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->enable_mgpu_fan_boost)
+ ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu11/Makefile
new file mode 100644
index 000000000000..f98d97192635
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/Makefile
@@ -0,0 +1,33 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'smu manager' sub-component of powerplay.
+# It provides the smu management services for the driver.
+
+SMU11_MGR = arcturus_ppt.o \
+ navi10_ppt.o \
+ sienna_cichlid_ppt.o \
+ smu_v11_0.o
+
+AMD_SWSMU_SMU11MGR = $(addprefix $(AMD_SWSMU_PATH)/smu11/,$(SMU11_MGR))
+
+AMD_POWERPLAY_FILES += $(AMD_SWSMU_SMU11MGR)
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 9582b38162f0..fc376281e629 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -79,6 +79,8 @@
/* possible frequency drift (1Mhz) */
#define EPSILON 1
+#define smnPCIE_ESM_CTRL 0x111003D0
+
static const struct cmn2asic_msg_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -234,6 +236,13 @@ static int arcturus_tables_init(struct smu_context *smu)
return -ENOMEM;
smu_table->metrics_time = 0;
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0);
+ smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table) {
+ kfree(smu_table->metrics_table);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -377,11 +386,9 @@ static int arcturus_check_powerplay_table(struct smu_context *smu)
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
- mutex_lock(&smu_baco->mutex);
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true;
- mutex_unlock(&smu_baco->mutex);
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
@@ -542,19 +549,12 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
mutex_lock(&smu->metrics_lock);
- if (!smu_table->metrics_time ||
- time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_SMU_METRICS,
- 0,
- smu_table->metrics_table,
- false);
- if (ret) {
- dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
- mutex_unlock(&smu->metrics_lock);
- return ret;
- }
- smu_table->metrics_time = jiffies;
+ ret = smu_cmn_get_metrics_table_locked(smu,
+ NULL,
+ false);
+ if (ret) {
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
}
switch (member) {
@@ -897,9 +897,10 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
return ret;
}
- if (smu_version >= 0x361200) {
+ if ((smu_version >= 0x361200) &&
+ (smu_version <= 0x361a00)) {
dev_err(smu->adev->dev, "Forcing clock level is not supported with "
- "54.18 and onwards SMU firmwares\n");
+ "54.18 - 54.26(included) SMU firmwares\n");
return -EOPNOTSUPP;
}
@@ -1120,29 +1121,23 @@ static int arcturus_get_fan_speed_rpm(struct smu_context *smu,
if (!speed)
return -EINVAL;
- return arcturus_get_smu_metrics_data(smu,
- METRICS_CURR_FANSPEED,
- speed);
+ switch (smu_v11_0_get_fan_control_mode(smu)) {
+ case AMD_FAN_CTRL_AUTO:
+ return arcturus_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ speed);
+ default:
+ return smu_v11_0_get_fan_speed_rpm(smu, speed);
+ }
}
-static int arcturus_get_fan_speed_percent(struct smu_context *smu,
- uint32_t *speed)
+static int arcturus_get_fan_parameters(struct smu_context *smu)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t percent, current_rpm;
- int ret = 0;
-
- if (!speed)
- return -EINVAL;
-
- ret = arcturus_get_fan_speed_rpm(smu, &current_rpm);
- if (ret)
- return ret;
- percent = current_rpm * 100 / pptable->FanMaximumRpm;
- *speed = percent > 100 ? 100 : percent;
+ smu->fan_max_rpm = pptable->FanMaximumRpm;
- return ret;
+ return 0;
}
static int arcturus_get_power_limit(struct smu_context *smu)
@@ -1392,9 +1387,10 @@ static int arcturus_set_performance_level(struct smu_context *smu,
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- if (smu_version >= 0x361200) {
+ if ((smu_version >= 0x361200) &&
+ (smu_version <= 0x361a00)) {
dev_err(smu->adev->dev, "Forcing clock level is not supported with "
- "54.18 and onwards SMU firmwares\n");
+ "54.18 - 54.26(included) SMU firmwares\n");
return -EOPNOTSUPP;
}
break;
@@ -2240,6 +2236,77 @@ static void arcturus_log_thermal_throttling_event(struct smu_context *smu)
dev_warn(adev->dev, "WARN: GPU thermal throttling temperature reached, expect performance decrease. %s.\n",
log_buf);
+ kgd2kfd_smi_event_throttle(smu->adev->kfd.dev, throttler_status);
+}
+
+static int arcturus_get_current_pcie_link_speed(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t esm_ctrl;
+
+ /* TODO: confirm this on real target */
+ esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
+ if ((esm_ctrl >> 15) & 0x1FFFF)
+ return (((esm_ctrl >> 8) & 0x3F) + 128);
+
+ return smu_v11_0_get_current_pcie_link_speed(smu);
+}
+
+static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v1_0 *gpu_metrics =
+ (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu,
+ &metrics,
+ true);
+ if (ret)
+ return ret;
+
+ smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+
+ gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+ gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+ gpu_metrics->temperature_mem = metrics.TemperatureHBM;
+ gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+ gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
+ gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem;
+
+ gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+ gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+ gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
+
+ gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+ gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
+
+ gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
+ gpu_metrics->average_vclk0_frequency = metrics.AverageVclkFrequency;
+ gpu_metrics->average_dclk0_frequency = metrics.AverageDclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+ gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+ gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+ gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
+ gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
+
+ gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+ gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
+
+ gpu_metrics->pcie_link_width =
+ smu_v11_0_get_current_pcie_link_width(smu);
+ gpu_metrics->pcie_link_speed =
+ arcturus_get_current_pcie_link_speed(smu);
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v1_0);
}
static const struct pptable_funcs arcturus_ppt_funcs = {
@@ -2254,7 +2321,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.print_clk_levels = arcturus_print_clk_levels,
.force_clk_levels = arcturus_force_clk_levels,
.read_sensor = arcturus_read_sensor,
- .get_fan_speed_percent = arcturus_get_fan_speed_percent,
.get_fan_speed_rpm = arcturus_get_fan_speed_rpm,
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
@@ -2300,7 +2366,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
@@ -2319,6 +2384,11 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.log_thermal_throttling_event = arcturus_log_thermal_throttling_event,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+ .get_gpu_metrics = arcturus_get_gpu_metrics,
+ .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
+ .deep_sleep_control = smu_v11_0_deep_sleep_control,
+ .get_fan_parameters = arcturus_get_fan_parameters,
+ .interrupt_work = smu_v11_0_interrupt_work,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.h
index d756b16924b8..d756b16924b8 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.h
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index b1547a83e721..ef1a62e86a0e 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -45,6 +45,7 @@
#include "asic_reg/mp/mp_11_0_sh_mask.h"
#include "smu_cmn.h"
+#include "smu_11_0_cdr_table.h"
/*
* DO NOT use these for err/warn/info/debug messages.
@@ -138,6 +139,10 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange, 0),
MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm, 0),
MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0),
+ MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
+ MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH, PPSMC_MSG_SetDriverDummyTableDramAddrHigh, 0),
+ MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW, PPSMC_MSG_SetDriverDummyTableDramAddrLow, 0),
+ MSG_MAP(GET_UMC_FW_WA, PPSMC_MSG_GetUMCFWWA, 0),
};
static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
@@ -278,9 +283,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
| FEATURE_MASK(FEATURE_FW_CTF_BIT)
| FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
- if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
-
if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
@@ -290,11 +292,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
- if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
- | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
- | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
-
if (adev->pm.pp_feature & PP_ULV_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
@@ -319,19 +316,24 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
if (smu->dc_controlled_by_gpio)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
- /* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
- if (is_asic_secure(smu)) {
- /* only for navi10 A0 */
- if ((adev->asic_type == CHIP_NAVI10) &&
- (adev->rev_id == 0)) {
- *(uint64_t *)feature_mask &=
- ~(FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
- | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
- | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT));
- *(uint64_t *)feature_mask &=
- ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
- }
- }
+ if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+
+ /* DPM UCLK enablement should be skipped for navi10 A0 secure board */
+ if (!(is_asic_secure(smu) &&
+ (adev->asic_type == CHIP_NAVI10) &&
+ (adev->rev_id == 0)) &&
+ (adev->pm.pp_feature & PP_MCLK_DPM_MASK))
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
+ | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
+ | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
+
+ /* DS SOCCLK enablement should be skipped for navi10 A0 secure board */
+ if (is_asic_secure(smu) &&
+ (adev->asic_type == CHIP_NAVI10) &&
+ (adev->rev_id == 0))
+ *(uint64_t *)feature_mask &=
+ ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
return 0;
}
@@ -346,11 +348,9 @@ static int navi10_check_powerplay_table(struct smu_context *smu)
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
- mutex_lock(&smu_baco->mutex);
if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true;
- mutex_unlock(&smu_baco->mutex);
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
@@ -456,13 +456,18 @@ static int navi10_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ struct amdgpu_device *adev = smu->adev;
SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ if (adev->asic_type == CHIP_NAVI12)
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_NV12_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ else
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
@@ -473,16 +478,30 @@ static int navi10_tables_init(struct smu_context *smu)
sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ smu_table->metrics_table = kzalloc(adev->asic_type == CHIP_NAVI12 ?
+ sizeof(SmuMetrics_NV12_t) :
+ sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
- return -ENOMEM;
+ goto err0_out;
smu_table->metrics_time = 0;
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0);
+ smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table)
+ goto err1_out;
+
smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
if (!smu_table->watermarks_table)
- return -ENOMEM;
+ goto err2_out;
return 0;
+
+err2_out:
+ kfree(smu_table->gpu_metrics_table);
+err1_out:
+ kfree(smu_table->metrics_table);
+err0_out:
+ return -ENOMEM;
}
static int navi10_get_smu_metrics_data(struct smu_context *smu,
@@ -490,23 +509,22 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table= &smu->smu_table;
+ /*
+ * This works for NV12 also. As although NV12 uses a different
+ * SmuMetrics structure from other NV1X ASICs, they share the
+ * same offsets for the heading parts(those members used here).
+ */
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
mutex_lock(&smu->metrics_lock);
- if (!smu_table->metrics_time ||
- time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_SMU_METRICS,
- 0,
- smu_table->metrics_table,
- false);
- if (ret) {
- dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
- mutex_unlock(&smu->metrics_lock);
- return ret;
- }
- smu_table->metrics_time = jiffies;
+
+ ret = smu_cmn_get_metrics_table_locked(smu,
+ NULL,
+ false);
+ if (ret) {
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
}
switch (member) {
@@ -909,7 +927,6 @@ static int navi10_print_clk_levels(struct smu_context *smu,
uint32_t gen_speed, lane_width;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
- struct amdgpu_device *adev = smu->adev;
PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
OverDriveTable_t *od_table =
(OverDriveTable_t *)table_context->overdrive_table;
@@ -963,12 +980,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
}
break;
case SMU_PCIE:
- gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
- PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
- >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
- lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
- PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
- >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
+ lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
for (i = 0; i < NUM_LINK_LEVELS; i++)
size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
@@ -1353,22 +1366,13 @@ static int navi10_get_fan_speed_rpm(struct smu_context *smu,
speed);
}
-static int navi10_get_fan_speed_percent(struct smu_context *smu,
- uint32_t *speed)
+static int navi10_get_fan_parameters(struct smu_context *smu)
{
- int ret = 0;
- uint32_t percent = 0;
- uint32_t current_rpm;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- ret = navi10_get_fan_speed_rpm(smu, &current_rpm);
- if (ret)
- return ret;
+ smu->fan_max_rpm = pptable->FanMaximumRpm;
- percent = current_rpm * 100 / pptable->FanMaximumRpm;
- *speed = percent > 100 ? 100 : percent;
-
- return ret;
+ return 0;
}
static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
@@ -1592,57 +1596,43 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
}
static int navi10_set_watermarks_table(struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+ struct pp_smu_wm_range_sets *clock_ranges)
{
Watermarks_t *table = smu->smu_table.watermarks_table;
int ret = 0;
int i;
if (clock_ranges) {
- if (clock_ranges->num_wm_dmif_sets > 4 ||
- clock_ranges->num_wm_mcif_sets > 4)
+ if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
+ clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
return -EINVAL;
- for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
- table->WatermarkRow[1][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MinUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].WmSetting = (uint8_t)
- clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
+ table->WatermarkRow[WM_DCEFCLK][i].MinClock =
+ clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
+ clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
+ table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
+ clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
+ clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
+
+ table->WatermarkRow[WM_DCEFCLK][i].WmSetting =
+ clock_ranges->reader_wm_sets[i].wm_inst;
}
- for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
- table->WatermarkRow[0][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MinUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].WmSetting = (uint8_t)
- clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MinUclk =
+ clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
+ clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
+
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting =
+ clock_ranges->writer_wm_sets[i].wm_inst;
}
smu->watermarks_bitmap |= WATERMARKS_EXIST;
@@ -2186,59 +2176,46 @@ static int navi10_run_btc(struct smu_context *smu)
return ret;
}
-static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
+static bool navi10_need_umc_cdr_workaround(struct smu_context *smu)
{
- int result = 0;
-
- if (!enable)
- result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
- else
- result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
-
- return result;
-}
+ struct amdgpu_device *adev = smu->adev;
-static inline bool navi10_need_umc_cdr_12gbps_workaround(struct amdgpu_device *adev)
-{
- if (adev->asic_type != CHIP_NAVI10)
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
return false;
- if (adev->pdev->device == 0x731f &&
- (adev->pdev->revision == 0xc2 ||
- adev->pdev->revision == 0xc3 ||
- adev->pdev->revision == 0xca ||
- adev->pdev->revision == 0xcb))
+ if (adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_NAVI14)
return true;
- else
- return false;
+
+ return false;
}
-static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
+static int navi10_umc_hybrid_cdr_workaround(struct smu_context *smu)
{
uint32_t uclk_count, uclk_min, uclk_max;
- uint32_t smu_version;
int ret = 0;
- if (!navi10_need_umc_cdr_12gbps_workaround(smu->adev))
- return 0;
-
- ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (ret)
- return ret;
-
- /* This workaround is available only for 42.50 or later SMC firmwares */
- if (smu_version < 0x2A3200)
+ /* This workaround can be applied only with uclk dpm enabled */
+ if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
return 0;
ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
if (ret)
return ret;
- ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
+ ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
if (ret)
return ret;
- ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
+ /*
+ * The NAVI10_UMC_HYBRID_CDR_WORKAROUND_UCLK_THRESHOLD is 750Mhz.
+ * This workaround is needed only when the max uclk frequency
+ * not greater than that.
+ */
+ if (uclk_max > 0x2EE)
+ return 0;
+
+ ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
if (ret)
return ret;
@@ -2255,8 +2232,97 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
/*
* In this case, SMU already disabled dummy pstate during enablement
* of UCLK DPM, we have to re-enabled it.
- * */
- return navi10_dummy_pstate_control(smu, true);
+ */
+ return smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
+}
+
+static int navi10_set_dummy_pstates_table_location(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *dummy_read_table =
+ &smu_table->dummy_read_1_table;
+ char *dummy_table = dummy_read_table->cpu_addr;
+ int ret = 0;
+ uint32_t i;
+
+ for (i = 0; i < 0x40000; i += 0x1000 * 2) {
+ memcpy(dummy_table, &NoDbiPrbs7[0], 0x1000);
+ dummy_table += 0x1000;
+ memcpy(dummy_table, &DbiPrbs7[0], 0x1000);
+ dummy_table += 0x1000;
+ }
+
+ amdgpu_asic_flush_hdp(smu->adev, NULL);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH,
+ upper_32_bits(dummy_read_table->mc_address),
+ NULL);
+ if (ret)
+ return ret;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW,
+ lower_32_bits(dummy_read_table->mc_address),
+ NULL);
+}
+
+static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint8_t umc_fw_greater_than_v136 = false;
+ uint8_t umc_fw_disable_cdr = false;
+ uint32_t pmfw_version;
+ uint32_t param;
+ int ret = 0;
+
+ if (!navi10_need_umc_cdr_workaround(smu))
+ return 0;
+
+ ret = smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu version!\n");
+ return ret;
+ }
+
+ /*
+ * The messages below are only supported by Navi10 42.53.0 and later
+ * PMFWs and Navi14 53.29.0 and later PMFWs.
+ * - PPSMC_MSG_SetDriverDummyTableDramAddrHigh
+ * - PPSMC_MSG_SetDriverDummyTableDramAddrLow
+ * - PPSMC_MSG_GetUMCFWWA
+ */
+ if (((adev->asic_type == CHIP_NAVI10) && (pmfw_version >= 0x2a3500)) ||
+ ((adev->asic_type == CHIP_NAVI14) && (pmfw_version >= 0x351D00))) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GET_UMC_FW_WA,
+ 0,
+ &param);
+ if (ret)
+ return ret;
+
+ /* First bit indicates if the UMC f/w is above v137 */
+ umc_fw_greater_than_v136 = param & 0x1;
+
+ /* Second bit indicates if hybrid-cdr is disabled */
+ umc_fw_disable_cdr = param & 0x2;
+
+ /* w/a only allowed if UMC f/w is <= 136 */
+ if (umc_fw_greater_than_v136)
+ return 0;
+
+ if (umc_fw_disable_cdr) {
+ if (adev->asic_type == CHIP_NAVI10)
+ return navi10_umc_hybrid_cdr_workaround(smu);
+ } else {
+ return navi10_set_dummy_pstates_table_location(smu);
+ }
+ } else {
+ if (adev->asic_type == CHIP_NAVI10)
+ return navi10_umc_hybrid_cdr_workaround(smu);
+ }
+
+ return 0;
}
static void navi10_fill_i2c_req(SwI2cRequest_t *req, bool write,
@@ -2463,37 +2529,136 @@ static const struct i2c_algorithm navi10_i2c_algo = {
.functionality = navi10_i2c_func,
};
-static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
+ void **table)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v1_0 *gpu_metrics =
+ (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
+ struct amdgpu_device *adev = smu->adev;
+ SmuMetrics_NV12_t nv12_metrics = { 0 };
+ SmuMetrics_t metrics;
+ int ret = 0;
- control->owner = THIS_MODULE;
- control->class = I2C_CLASS_SPD;
- control->dev.parent = &adev->pdev->dev;
- control->algo = &navi10_i2c_algo;
- snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+ mutex_lock(&smu->metrics_lock);
- res = i2c_add_adapter(control);
- if (res)
- DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ ret = smu_cmn_get_metrics_table_locked(smu,
+ NULL,
+ true);
+ if (ret) {
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
+ }
- return res;
+ memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
+ if (adev->asic_type == CHIP_NAVI12)
+ memcpy(&nv12_metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
+
+ mutex_unlock(&smu->metrics_lock);
+
+ smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+
+ gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+ gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+ gpu_metrics->temperature_mem = metrics.TemperatureMem;
+ gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+ gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
+ gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
+
+ gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+ gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+
+ gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+
+ gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
+
+ if (adev->asic_type == CHIP_NAVI12) {
+ gpu_metrics->energy_accumulator = nv12_metrics.EnergyAccumulator;
+ gpu_metrics->average_vclk0_frequency = nv12_metrics.AverageVclkFrequency;
+ gpu_metrics->average_dclk0_frequency = nv12_metrics.AverageDclkFrequency;
+ gpu_metrics->average_mm_activity = nv12_metrics.VcnActivityPercentage;
+ }
+
+ gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+ gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+ gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+ gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
+ gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
+
+ gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+ gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
+
+ gpu_metrics->pcie_link_width =
+ smu_v11_0_get_current_pcie_link_width(smu);
+ gpu_metrics->pcie_link_speed =
+ smu_v11_0_get_current_pcie_link_speed(smu);
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v1_0);
}
-static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
{
- i2c_del_adapter(control);
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t param = 0;
+
+ /* Navi12 does not support this */
+ if (adev->asic_type == CHIP_NAVI12)
+ return 0;
+
+ /* Workaround for WS SKU */
+ if (adev->pdev->device == 0x7312 &&
+ adev->pdev->revision == 0)
+ param = 0xD188;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetMGpuFanBoostLimitRpm,
+ param,
+ NULL);
}
+static int navi10_post_smu_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = navi10_run_umc_cdr_workaround(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
+ return ret;
+ }
+
+ if (!smu->dc_controlled_by_gpio) {
+ /*
+ * For Navi1X, manually switch it to AC mode as PMFW
+ * may boot it with DC mode.
+ */
+ ret = smu_v11_0_set_power_source(smu,
+ adev->pm.ac_power ?
+ SMU_POWER_SOURCE_AC :
+ SMU_POWER_SOURCE_DC);
+ if (ret) {
+ dev_err(adev->dev, "Failed to switch to %s mode!\n",
+ adev->pm.ac_power ? "AC" : "DC");
+ return ret;
+ }
+ }
+
+ return ret;
+}
static const struct pptable_funcs navi10_ppt_funcs = {
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
.set_default_dpm_table = navi10_set_default_dpm_table,
.dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
.dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
- .i2c_init = navi10_i2c_control_init,
- .i2c_fini = navi10_i2c_control_fini,
.print_clk_levels = navi10_print_clk_levels,
.force_clk_levels = navi10_force_clk_levels,
.populate_umd_state_clk = navi10_populate_umd_state_clk,
@@ -2502,7 +2667,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.display_config_changed = navi10_display_config_changed,
.notify_smc_display_config = navi10_notify_smc_display_config,
.is_dpm_running = navi10_is_dpm_running,
- .get_fan_speed_percent = navi10_get_fan_speed_percent,
.get_fan_speed_rpm = navi10_get_fan_speed_rpm,
.get_power_profile_mode = navi10_get_power_profile_mode,
.set_power_profile_mode = navi10_set_power_profile_mode,
@@ -2546,7 +2710,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
@@ -2563,10 +2726,16 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_default_od_settings = navi10_set_default_od_settings,
.od_edit_dpm_table = navi10_od_edit_dpm_table,
.run_btc = navi10_run_btc,
- .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
.set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+ .get_gpu_metrics = navi10_get_gpu_metrics,
+ .enable_mgpu_fan_boost = navi10_enable_mgpu_fan_boost,
+ .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
+ .deep_sleep_control = smu_v11_0_deep_sleep_control,
+ .get_fan_parameters = navi10_get_fan_parameters,
+ .post_init = navi10_post_smu_init,
+ .interrupt_work = smu_v11_0_interrupt_work,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.h
index 2abb4ba01db1..84dc5a1b6830 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.h
@@ -49,9 +49,6 @@
#define NAVI10_VOLTAGE_SCALE (4)
-#define smnPCIE_LC_SPEED_CNTL 0x11140290
-#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
-
extern void navi10_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index ace682fde22f..895d89bea7fa 100644
--- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -126,6 +126,7 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
+ MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
};
static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
@@ -150,14 +151,17 @@ static struct cmn2asic_mapping sienna_cichlid_feature_mask_map[SMU_FEATURE_COUNT
FEA_MAP(DPM_GFXCLK),
FEA_MAP(DPM_GFX_GPO),
FEA_MAP(DPM_UCLK),
+ FEA_MAP(DPM_FCLK),
FEA_MAP(DPM_SOCCLK),
FEA_MAP(DPM_MP0CLK),
FEA_MAP(DPM_LINK),
FEA_MAP(DPM_DCEFCLK),
+ FEA_MAP(DPM_XGMI),
FEA_MAP(MEM_VDDCI_SCALING),
FEA_MAP(MEM_MVDD_SCALING),
FEA_MAP(DS_GFXCLK),
FEA_MAP(DS_SOCCLK),
+ FEA_MAP(DS_FCLK),
FEA_MAP(DS_LCLK),
FEA_MAP(DS_DCEFCLK),
FEA_MAP(DS_UCLK),
@@ -297,11 +301,9 @@ static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
- mutex_lock(&smu_baco->mutex);
if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true;
- mutex_unlock(&smu_baco->mutex);
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
@@ -388,14 +390,26 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
- return -ENOMEM;
+ goto err0_out;
smu_table->metrics_time = 0;
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0);
+ smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table)
+ goto err1_out;
+
smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
if (!smu_table->watermarks_table)
- return -ENOMEM;
+ goto err2_out;
return 0;
+
+err2_out:
+ kfree(smu_table->gpu_metrics_table);
+err1_out:
+ kfree(smu_table->metrics_table);
+err0_out:
+ return -ENOMEM;
}
static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
@@ -407,19 +421,13 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
int ret = 0;
mutex_lock(&smu->metrics_lock);
- if (!smu_table->metrics_time ||
- time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_SMU_METRICS,
- 0,
- smu_table->metrics_table,
- false);
- if (ret) {
- dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
- mutex_unlock(&smu->metrics_lock);
- return ret;
- }
- smu_table->metrics_time = jiffies;
+
+ ret = smu_cmn_get_metrics_table_locked(smu,
+ NULL,
+ false);
+ if (ret) {
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
}
switch (member) {
@@ -447,6 +455,9 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
case METRICS_CURR_DCEFCLK:
*value = metrics->CurrClock[PPCLK_DCEFCLK];
break;
+ case METRICS_CURR_FCLK:
+ *value = metrics->CurrClock[PPCLK_FCLK];
+ break;
case METRICS_AVERAGE_GFXCLK:
if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
*value = metrics->AverageGfxclkFrequencyPostDs;
@@ -943,23 +954,23 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
freq_values[1] = cur_value;
mark_index = cur_value == freq_values[0] ? 0 :
cur_value == freq_values[2] ? 2 : 1;
- if (mark_index != 1)
- freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
- for (i = 0; i < 3; i++) {
+ count = 3;
+ if (mark_index != 1) {
+ count = 2;
+ freq_values[1] = freq_values[2];
+ }
+
+ for (i = 0; i < count; i++) {
size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
- i == mark_index ? "*" : "");
+ cur_value == freq_values[i] ? "*" : "");
}
}
break;
case SMU_PCIE:
- gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
- PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
- >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
- lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
- PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
- >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
+ lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
for (i = 0; i < NUM_LINK_LEVELS; i++)
size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
@@ -1167,26 +1178,17 @@ static int sienna_cichlid_get_fan_speed_rpm(struct smu_context *smu,
return -EINVAL;
return sienna_cichlid_get_smu_metrics_data(smu,
- METRICS_CURR_FANSPEED,
- speed);
+ METRICS_CURR_FANSPEED,
+ speed);
}
-static int sienna_cichlid_get_fan_speed_percent(struct smu_context *smu,
- uint32_t *speed)
+static int sienna_cichlid_get_fan_parameters(struct smu_context *smu)
{
- int ret = 0;
- uint32_t percent = 0;
- uint32_t current_rpm;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- ret = sienna_cichlid_get_fan_speed_rpm(smu, &current_rpm);
- if (ret)
- return ret;
+ smu->fan_max_rpm = pptable->FanMaximumRpm;
- percent = current_rpm * 100 / pptable->FanMaximumRpm;
- *speed = percent > 100 ? 100 : percent;
-
- return ret;
+ return 0;
}
static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *buf)
@@ -1410,58 +1412,43 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
}
static int sienna_cichlid_set_watermarks_table(struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15
- *clock_ranges)
+ struct pp_smu_wm_range_sets *clock_ranges)
{
Watermarks_t *table = smu->smu_table.watermarks_table;
int ret = 0;
int i;
if (clock_ranges) {
- if (clock_ranges->num_wm_dmif_sets > 4 ||
- clock_ranges->num_wm_mcif_sets > 4)
+ if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
+ clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
return -EINVAL;
- for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
- table->WatermarkRow[1][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MinUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[1][i].WmSetting = (uint8_t)
- clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
+ table->WatermarkRow[WM_DCEFCLK][i].MinClock =
+ clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
+ clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
+ table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
+ clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
+ clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
+
+ table->WatermarkRow[WM_DCEFCLK][i].WmSetting =
+ clock_ranges->reader_wm_sets[i].wm_inst;
}
- for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
- table->WatermarkRow[0][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MinUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
- 1000));
- table->WatermarkRow[0][i].WmSetting = (uint8_t)
- clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MinUclk =
+ clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
+ clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
+
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting =
+ clock_ranges->writer_wm_sets[i].wm_inst;
}
smu->watermarks_bitmap |= WATERMARKS_EXIST;
@@ -2292,11 +2279,6 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]);
dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]);
dev_info(smu->adev->dev, "SkuReserved[8] = 0x%x\n", pptable->SkuReserved[8]);
- dev_info(smu->adev->dev, "SkuReserved[9] = 0x%x\n", pptable->SkuReserved[9]);
- dev_info(smu->adev->dev, "SkuReserved[10] = 0x%x\n", pptable->SkuReserved[10]);
- dev_info(smu->adev->dev, "SkuReserved[11] = 0x%x\n", pptable->SkuReserved[11]);
- dev_info(smu->adev->dev, "SkuReserved[12] = 0x%x\n", pptable->SkuReserved[12]);
- dev_info(smu->adev->dev, "SkuReserved[13] = 0x%x\n", pptable->SkuReserved[13]);
dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]);
dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]);
@@ -2666,6 +2648,76 @@ static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_
i2c_del_adapter(control);
}
+static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v1_0 *gpu_metrics =
+ (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu,
+ &metrics,
+ true);
+ if (ret)
+ return ret;
+
+ smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+
+ gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+ gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+ gpu_metrics->temperature_mem = metrics.TemperatureMem;
+ gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+ gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
+ gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
+
+ gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+ gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+ gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
+
+ gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+ gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
+
+ if (metrics.AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
+ gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs;
+ else
+ gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs;
+ gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs;
+ gpu_metrics->average_vclk0_frequency = metrics.AverageVclk0Frequency;
+ gpu_metrics->average_dclk0_frequency = metrics.AverageDclk0Frequency;
+ gpu_metrics->average_vclk1_frequency = metrics.AverageVclk1Frequency;
+ gpu_metrics->average_dclk1_frequency = metrics.AverageDclk1Frequency;
+
+ gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+ gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+ gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+ gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK_0];
+ gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK_0];
+ gpu_metrics->current_vclk1 = metrics.CurrClock[PPCLK_VCLK_1];
+ gpu_metrics->current_dclk1 = metrics.CurrClock[PPCLK_DCLK_1];
+
+ gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+ gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
+
+ gpu_metrics->pcie_link_width =
+ smu_v11_0_get_current_pcie_link_width(smu);
+ gpu_metrics->pcie_link_speed =
+ smu_v11_0_get_current_pcie_link_speed(smu);
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v1_0);
+}
+
+static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
+{
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetMGpuFanBoostLimitRpm,
+ 0,
+ NULL);
+}
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
@@ -2681,7 +2733,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_config_changed = sienna_cichlid_display_config_changed,
.notify_smc_display_config = sienna_cichlid_notify_smc_display_config,
.is_dpm_running = sienna_cichlid_is_dpm_running,
- .get_fan_speed_percent = sienna_cichlid_get_fan_speed_percent,
.get_fan_speed_rpm = sienna_cichlid_get_fan_speed_rpm,
.get_power_profile_mode = sienna_cichlid_get_power_profile_mode,
.set_power_profile_mode = sienna_cichlid_set_power_profile_mode,
@@ -2725,7 +2776,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
.gfx_off_control = smu_v11_0_gfx_off_control,
@@ -2744,6 +2794,12 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.run_btc = sienna_cichlid_run_btc,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+ .get_gpu_metrics = sienna_cichlid_get_gpu_metrics,
+ .enable_mgpu_fan_boost = sienna_cichlid_enable_mgpu_fan_boost,
+ .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
+ .deep_sleep_control = smu_v11_0_deep_sleep_control,
+ .get_fan_parameters = sienna_cichlid_get_fan_parameters,
+ .interrupt_work = smu_v11_0_interrupt_work,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
index 8078886e4cbc..57e120c440ea 100644
--- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
@@ -31,7 +31,4 @@ typedef enum {
extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
-#define smnPCIE_LC_SPEED_CNTL 0x11140290
-#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
-
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 7b950a582a28..2380759ddf48 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -67,6 +67,19 @@ MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
#define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
+#define LINK_WIDTH_MAX 6
+#define LINK_SPEED_MAX 3
+
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
+#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
+#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
+
+static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static int link_speed[] = {25, 50, 80, 160};
+
int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -309,39 +322,42 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
void *table;
uint16_t version_major, version_minor;
- hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
- version_major = le16_to_cpu(hdr->header.header_version_major);
- version_minor = le16_to_cpu(hdr->header.header_version_minor);
- if ((version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) ||
- adev->asic_type == CHIP_NAVY_FLOUNDER) {
- dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
- switch (version_minor) {
- case 0:
- ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
- break;
- case 1:
- ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
- smu->smu_table.boot_values.pp_table_id);
- break;
- default:
- ret = -EINVAL;
- break;
+ if (!amdgpu_sriov_vf(adev)) {
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ version_major = le16_to_cpu(hdr->header.header_version_major);
+ version_minor = le16_to_cpu(hdr->header.header_version_minor);
+ if ((version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) ||
+ adev->asic_type == CHIP_NAVY_FLOUNDER) {
+ dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
+ switch (version_minor) {
+ case 0:
+ ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
+ break;
+ case 1:
+ ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
+ smu->smu_table.boot_values.pp_table_id);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ return ret;
+ goto out;
}
- if (ret)
- return ret;
+ }
- } else {
- dev_info(adev->dev, "use vbios provided pptable\n");
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- powerplayinfo);
+ dev_info(adev->dev, "use vbios provided pptable\n");
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ powerplayinfo);
- ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
- (uint8_t **)&table);
- if (ret)
- return ret;
- size = atom_table_size;
- }
+ ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
+ (uint8_t **)&table);
+ if (ret)
+ return ret;
+ size = atom_table_size;
+out:
if (!smu->smu_table.power_play_table)
smu->smu_table.power_play_table = table;
if (!smu->smu_table.power_play_table_size)
@@ -404,10 +420,12 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ kfree(smu_table->gpu_metrics_table);
kfree(smu_table->boot_overdrive_table);
kfree(smu_table->overdrive_table);
kfree(smu_table->max_sustainable_clocks);
kfree(smu_table->driver_pptable);
+ smu_table->gpu_metrics_table = NULL;
smu_table->boot_overdrive_table = NULL;
smu_table->overdrive_table = NULL;
smu_table->max_sustainable_clocks = NULL;
@@ -438,9 +456,6 @@ int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
- if (smu_power->power_context || smu_power->power_context_size != 0)
- return -EINVAL;
-
smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
GFP_KERNEL);
if (!smu_power->power_context)
@@ -454,9 +469,6 @@ int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
- if (!smu_power->power_context || smu_power->power_context_size == 0)
- return -EINVAL;
-
kfree(smu_power->power_context);
smu_power->power_context = NULL;
smu_power->power_context_size = 0;
@@ -685,18 +697,16 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
- int ret = 0;
struct amdgpu_device *adev = smu->adev;
/* Navy_Flounder do not support to change display num currently */
if (adev->asic_type == CHIP_NAVY_FLOUNDER)
return 0;
- if (!smu->pm_enabled)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
- return ret;
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_NumOfDisplays,
+ count,
+ NULL);
}
@@ -706,7 +716,6 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
- mutex_lock(&feature->mutex);
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
goto failed;
@@ -723,7 +732,6 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
goto failed;
failed:
- mutex_unlock(&feature->mutex);
return ret;
}
@@ -760,9 +768,6 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
- if (!smu->pm_enabled)
- return ret;
-
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
@@ -932,12 +937,45 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return 0;
}
+static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
+{
+ return smu_cmn_send_smc_msg(smu,
+ SMU_MSG_ReenableAcDcInterrupt,
+ NULL);
+}
+
+static int smu_v11_0_process_pending_interrupt(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (smu->dc_controlled_by_gpio &&
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
+ ret = smu_v11_0_ack_ac_dc_interrupt(smu);
+
+ return ret;
+}
+
+void smu_v11_0_interrupt_work(struct smu_context *smu)
+{
+ if (smu_v11_0_ack_ac_dc_interrupt(smu))
+ dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n");
+}
+
int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
{
- if (smu->smu_table.thermal_controller_type)
- return amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
+ int ret = 0;
- return 0;
+ if (smu->smu_table.thermal_controller_type) {
+ ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * After init there might have been missed interrupts triggered
+ * before driver registers for interrupt (Ex. AC/DC).
+ */
+ return smu_v11_0_process_pending_interrupt(smu);
}
int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
@@ -1085,35 +1123,6 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
}
int
-smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t duty100, duty;
- uint64_t tmp64;
-
- if (speed > 100)
- speed = 100;
-
- if (smu_v11_0_auto_fan_control(smu, 0))
- return -EINVAL;
-
- duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
- CG_FDO_CTRL1, FMAX_DUTY100);
- if (!duty100)
- return -EINVAL;
-
- tmp64 = (uint64_t)speed * duty100;
- do_div(tmp64, 100);
- duty = (uint32_t)tmp64;
-
- WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
- REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
- CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
-
- return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
-}
-
-int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{
@@ -1121,7 +1130,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
switch (mode) {
case AMD_FAN_CTRL_NONE:
- ret = smu_v11_0_set_fan_speed_percent(smu, 100);
+ ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm);
break;
case AMD_FAN_CTRL_MANUAL:
ret = smu_v11_0_auto_fan_control(smu, 0);
@@ -1167,15 +1176,34 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
return ret;
}
+int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t tach_period, crystal_clock_freq;
+ uint64_t tmp64;
+
+ tach_period = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
+ CG_TACH_CTRL, TARGET_PERIOD);
+ if (!tach_period)
+ return -EINVAL;
+
+ crystal_clock_freq = amdgpu_asic_get_xclk(adev);
+
+ tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000;
+ do_div(tmp64, (tach_period * 8));
+ *speed = (uint32_t)tmp64;
+
+ return 0;
+}
+
int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
- int ret = 0;
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetXgmiMode,
- pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetXgmiMode,
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
NULL);
- return ret;
}
static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
@@ -1243,13 +1271,6 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
return 0;
}
-static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
-{
- return smu_cmn_send_smc_msg(smu,
- SMU_MSG_ReenableAcDcInterrupt,
- NULL);
-}
-
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
@@ -1305,13 +1326,18 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
switch (ctxid) {
case 0x3:
dev_dbg(adev->dev, "Switched to AC mode!\n");
- smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+ schedule_work(&smu->interrupt_work);
break;
case 0x4:
dev_dbg(adev->dev, "Switched to DC mode!\n");
- smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+ schedule_work(&smu->interrupt_work);
break;
case 0x7:
+ /*
+ * Increment the throttle interrupt counter
+ */
+ atomic64_inc(&smu->throttle_int_counter);
+
if (!atomic_read(&adev->throttling_logging_enabled))
return 0;
@@ -1401,11 +1427,7 @@ int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
- int ret = 0;
-
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
-
- return ret;
+ return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
}
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
@@ -1416,13 +1438,8 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v
bool smu_v11_0_baco_is_support(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
- bool baco_support;
-
- mutex_lock(&smu_baco->mutex);
- baco_support = smu_baco->platform_support;
- mutex_unlock(&smu_baco->mutex);
- if (!baco_support)
+ if (!smu_baco->platform_support)
return false;
/* Arcturus does not support this bit mask */
@@ -1509,13 +1526,7 @@ int smu_v11_0_baco_enter(struct smu_context *smu)
int smu_v11_0_baco_exit(struct smu_context *smu)
{
- int ret = 0;
-
- ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
- if (ret)
- return ret;
-
- return ret;
+ return smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
}
int smu_v11_0_mode1_reset(struct smu_context *smu)
@@ -1913,3 +1924,99 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
return ret;
}
+
+int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+}
+
+int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
+{
+ uint32_t width_level;
+
+ width_level = smu_v11_0_get_current_pcie_link_width_level(smu);
+ if (width_level > LINK_WIDTH_MAX)
+ width_level = 0;
+
+ return link_width[width_level];
+}
+
+int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+}
+
+int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
+{
+ uint32_t speed_level;
+
+ speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu);
+ if (speed_level > LINK_SPEED_MAX)
+ speed_level = 0;
+
+ return link_speed[speed_level];
+}
+
+void smu_v11_0_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
+{
+ memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
+
+ gpu_metrics->common_header.structure_size =
+ sizeof(struct gpu_metrics_v1_0);
+ gpu_metrics->common_header.format_revision = 1;
+ gpu_metrics->common_header.content_revision = 0;
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+}
+
+int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
+ bool enablement)
+{
+ int ret = 0;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
+
+ return ret;
+}
+
+int smu_v11_0_deep_sleep_control(struct smu_context *smu,
+ bool enablement)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu12/Makefile
new file mode 100644
index 000000000000..67e53f7da3ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'smu manager' sub-component of powerplay.
+# It provides the smu management services for the driver.
+
+SMU12_MGR = renoir_ppt.o \
+ smu_v12_0.o
+
+AMD_SWSMU_SMU12MGR = $(addprefix $(AMD_SWSMU_PATH)/smu12/,$(SMU12_MGR))
+
+AMD_POWERPLAY_FILES += $(AMD_SWSMU_SMU12MGR)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 15263cf210d5..66c1026489be 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -128,30 +128,6 @@ static struct cmn2asic_mapping renoir_workload_map[PP_SMC_POWER_PROFILE_COUNT] =
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
};
-static int renoir_get_metrics_table(struct smu_context *smu,
- SmuMetrics_t *metrics_table)
-{
- struct smu_table_context *smu_table= &smu->smu_table;
- int ret = 0;
-
- mutex_lock(&smu->metrics_lock);
- if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
- ret = smu_cmn_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
- (void *)smu_table->metrics_table, false);
- if (ret) {
- dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
- mutex_unlock(&smu->metrics_lock);
- return ret;
- }
- smu_table->metrics_time = jiffies;
- }
-
- memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
- mutex_unlock(&smu->metrics_lock);
-
- return ret;
-}
-
static int renoir_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -166,18 +142,32 @@ static int renoir_init_smc_tables(struct smu_context *smu)
smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
if (!smu_table->clocks_table)
- return -ENOMEM;
+ goto err0_out;
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
- return -ENOMEM;
+ goto err1_out;
smu_table->metrics_time = 0;
smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
if (!smu_table->watermarks_table)
- return -ENOMEM;
+ goto err2_out;
+
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_0);
+ smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table)
+ goto err3_out;
return 0;
+
+err3_out:
+ kfree(smu_table->watermarks_table);
+err2_out:
+ kfree(smu_table->metrics_table);
+err1_out:
+ kfree(smu_table->clocks_table);
+err0_out:
+ return -ENOMEM;
}
/**
@@ -363,7 +353,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
memset(&metrics, 0, sizeof(metrics));
- ret = renoir_get_metrics_table(smu, &metrics);
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
if (ret)
return ret;
@@ -509,7 +499,7 @@ static int renoir_get_current_clk_freq_by_table(struct smu_context *smu,
int ret = 0, clk_id = 0;
SmuMetrics_t metrics;
- ret = renoir_get_metrics_table(smu, &metrics);
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
if (ret)
return ret;
@@ -592,7 +582,7 @@ static int renoir_get_gpu_temperature(struct smu_context *smu, uint32_t *value)
if (!value)
return -EINVAL;
- ret = renoir_get_metrics_table(smu, &metrics);
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
if (ret)
return ret;
@@ -612,7 +602,7 @@ static int renoir_get_current_activity_percent(struct smu_context *smu,
if (!value)
return -EINVAL;
- ret = renoir_get_metrics_table(smu, &metrics);
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
if (ret)
return ret;
@@ -628,6 +618,44 @@ static int renoir_get_current_activity_percent(struct smu_context *smu,
return 0;
}
+static int renoir_get_vddc(struct smu_context *smu, uint32_t *value,
+ unsigned int index)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (index >= 2)
+ return -EINVAL;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
+ if (ret)
+ return ret;
+
+ *value = metrics.Voltage[index];
+
+ return 0;
+}
+
+static int renoir_get_power(struct smu_context *smu, uint32_t *value)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
+ if (ret)
+ return ret;
+
+ *value = metrics.CurrentSocketPower << 8;
+
+ return 0;
+}
+
/**
* This interface get dpm clock table for dc
*/
@@ -806,9 +834,59 @@ static int renoir_set_performance_level(struct smu_context *smu,
ret = renoir_force_dpm_limit_value(smu, false);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
ret = renoir_unforce_dpm_levels(smu);
break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinGfxClk,
+ RENOIR_UMD_PSTATE_GFXCLK,
+ NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinFclkByFreq,
+ RENOIR_UMD_PSTATE_FCLK,
+ NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinSocclkByFreq,
+ RENOIR_UMD_PSTATE_SOCCLK,
+ NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinVcn,
+ RENOIR_UMD_PSTATE_VCNCLK,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxGfxClk,
+ RENOIR_UMD_PSTATE_GFXCLK,
+ NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxFclkByFreq,
+ RENOIR_UMD_PSTATE_FCLK,
+ NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxSocclkByFreq,
+ RENOIR_UMD_PSTATE_SOCCLK,
+ NULL);
+ if (ret)
+ return ret;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSoftMaxVcn,
+ RENOIR_UMD_PSTATE_VCNCLK,
+ NULL);
+ if (ret)
+ return ret;
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
ret = renoir_get_profiling_clk_mask(smu, level,
@@ -837,50 +915,48 @@ static int renoir_set_performance_level(struct smu_context *smu,
*/
static int renoir_set_watermarks_table(
struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+ struct pp_smu_wm_range_sets *clock_ranges)
{
Watermarks_t *table = smu->smu_table.watermarks_table;
int ret = 0;
int i;
if (clock_ranges) {
- if (clock_ranges->num_wm_dmif_sets > 4 ||
- clock_ranges->num_wm_mcif_sets > 4)
+ if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
+ clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
return -EINVAL;
/* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
- for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
table->WatermarkRow[WM_DCFCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
+ clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
table->WatermarkRow[WM_DCFCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
+ clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
table->WatermarkRow[WM_DCFCLK][i].MinMclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
- table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
- clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
+
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting =
+ clock_ranges->reader_wm_sets[i].wm_inst;
+ table->WatermarkRow[WM_DCFCLK][i].WmType =
+ clock_ranges->reader_wm_sets[i].wm_type;
}
- for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
table->WatermarkRow[WM_SOCCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
+ clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
table->WatermarkRow[WM_SOCCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
+ clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
table->WatermarkRow[WM_SOCCLK][i].MinMclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
- cpu_to_le16((uint16_t)
- (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
- table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
- clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
+
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting =
+ clock_ranges->writer_wm_sets[i].wm_inst;
+ table->WatermarkRow[WM_SOCCLK][i].WmType =
+ clock_ranges->writer_wm_sets[i].wm_type;
}
smu->watermarks_bitmap |= WATERMARKS_EXIST;
@@ -964,6 +1040,18 @@ static int renoir_read_sensor(struct smu_context *smu,
*(uint32_t *)data *= 100;
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = renoir_get_vddc(smu, (uint32_t *)data, 0);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDNB:
+ ret = renoir_get_vddc(smu, (uint32_t *)data, 1);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = renoir_get_power(smu, (uint32_t *)data);
+ *size = 4;
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -989,6 +1077,65 @@ static bool renoir_is_dpm_running(struct smu_context *smu)
}
+static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_0 *gpu_metrics =
+ (struct gpu_metrics_v2_0 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_v12_0_init_gpu_metrics_v2_0(gpu_metrics);
+
+ gpu_metrics->temperature_gfx = metrics.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.CoreTemperature[0],
+ sizeof(uint16_t) * 8);
+ gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
+ gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1];
+
+ gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+ gpu_metrics->average_mm_activity = metrics.AverageUvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Power[0];
+ gpu_metrics->average_soc_power = metrics.Power[1];
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.CorePower[0],
+ sizeof(uint16_t) * 8);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.AverageFclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.AverageVclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.ClockFrequency[CLOCK_GFXCLK];
+ gpu_metrics->current_socclk = metrics.ClockFrequency[CLOCK_SOCCLK];
+ gpu_metrics->current_uclk = metrics.ClockFrequency[CLOCK_UMCCLK];
+ gpu_metrics->current_fclk = metrics.ClockFrequency[CLOCK_FCLK];
+ gpu_metrics->current_vclk = metrics.ClockFrequency[CLOCK_VCLK];
+ gpu_metrics->current_dclk = metrics.ClockFrequency[CLOCK_DCLK];
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.CoreFrequency[0],
+ sizeof(uint16_t) * 8);
+ gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
+ gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1];
+
+ gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+ gpu_metrics->fan_pwm = metrics.FanPwm;
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_0);
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.set_power_state = NULL,
.print_clk_levels = renoir_print_clk_levels,
@@ -1023,6 +1170,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.is_dpm_running = renoir_is_dpm_running,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+ .get_gpu_metrics = renoir_get_gpu_metrics,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.h
index 8c3f004cdf8d..11c3c22fecbe 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.h
@@ -29,5 +29,6 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
#define RENOIR_UMD_PSTATE_GFXCLK 700
#define RENOIR_UMD_PSTATE_SOCCLK 678
#define RENOIR_UMD_PSTATE_FCLK 800
+#define RENOIR_UMD_PSTATE_VCNCLK 0x022D01D8
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 31456437bb18..660f403d5770 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -274,3 +274,15 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
return ret;
}
+
+void smu_v12_0_init_gpu_metrics_v2_0(struct gpu_metrics_v2_0 *gpu_metrics)
+{
+ memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v2_0));
+
+ gpu_metrics->common_header.structure_size =
+ sizeof(struct gpu_metrics_v2_0);
+ gpu_metrics->common_header.format_revision = 2;
+ gpu_metrics->common_header.content_revision = 0;
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 5c23c44c33bd..92b2ea4c197b 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -112,6 +112,9 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
+ if (smu->adev->in_pci_err_recovery)
+ return 0;
+
index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_MSG,
msg);
@@ -343,9 +346,9 @@ int smu_cmn_get_enabled_mask(struct smu_context *smu,
return ret;
}
-static int smu_cmn_feature_update_enable_state(struct smu_context *smu,
- uint64_t feature_mask,
- bool enabled)
+int smu_cmn_feature_update_enable_state(struct smu_context *smu,
+ uint64_t feature_mask,
+ bool enabled)
{
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
@@ -428,10 +431,9 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
char *buf)
{
uint32_t feature_mask[2] = { 0 };
- int32_t feature_index = 0;
+ int feature_index = 0;
uint32_t count = 0;
- uint32_t sort_feature[SMU_FEATURE_COUNT];
- uint64_t hw_feature_count = 0;
+ int8_t sort_feature[SMU_FEATURE_COUNT];
size_t size = 0;
int ret = 0, i;
@@ -444,23 +446,31 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
feature_mask[1], feature_mask[0]);
+ memset(sort_feature, -1, sizeof(sort_feature));
+
for (i = 0; i < SMU_FEATURE_COUNT; i++) {
feature_index = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
i);
if (feature_index < 0)
continue;
+
sort_feature[feature_index] = i;
- hw_feature_count++;
}
- for (i = 0; i < hw_feature_count; i++) {
+ size += sprintf(buf + size, "%-2s. %-20s %-3s : %-s\n",
+ "No", "Feature", "Bit", "State");
+
+ for (i = 0; i < SMU_FEATURE_COUNT; i++) {
+ if (sort_feature[i] < 0)
+ continue;
+
size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
- count++,
- smu_get_feature_name(smu, sort_feature[i]),
- i,
- !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
- "enabled" : "disabled");
+ count++,
+ smu_get_feature_name(smu, sort_feature[i]),
+ i,
+ !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
+ "enabled" : "disabled");
}
return size;
@@ -604,7 +614,7 @@ int smu_cmn_update_table(struct smu_context *smu,
memcpy(table_data, table->cpu_addr, table_size);
}
- return ret;
+ return 0;
}
int smu_cmn_write_watermarks_table(struct smu_context *smu)
@@ -631,3 +641,48 @@ int smu_cmn_write_pptable(struct smu_context *smu)
pptable,
true);
}
+
+int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
+ void *metrics_table,
+ bool bypass_cache)
+{
+ struct smu_table_context *smu_table= &smu->smu_table;
+ uint32_t table_size =
+ smu_table->tables[SMU_TABLE_SMU_METRICS].size;
+ int ret = 0;
+
+ if (bypass_cache ||
+ !smu_table->metrics_time ||
+ time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_SMU_METRICS,
+ 0,
+ smu_table->metrics_table,
+ false);
+ if (ret) {
+ dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
+ return ret;
+ }
+ smu_table->metrics_time = jiffies;
+ }
+
+ if (metrics_table)
+ memcpy(metrics_table, smu_table->metrics_table, table_size);
+
+ return 0;
+}
+
+int smu_cmn_get_metrics_table(struct smu_context *smu,
+ void *metrics_table,
+ bool bypass_cache)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table_locked(smu,
+ metrics_table,
+ bypass_cache);
+ mutex_unlock(&smu->metrics_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 98face8c5fd6..ab577be23c15 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -52,6 +52,10 @@ int smu_cmn_get_enabled_mask(struct smu_context *smu,
uint32_t *feature_mask,
uint32_t num);
+int smu_cmn_feature_update_enable_state(struct smu_context *smu,
+ uint64_t feature_mask,
+ bool enabled);
+
int smu_cmn_feature_set_enabled(struct smu_context *smu,
enum smu_feature_mask mask,
bool enable);
@@ -79,5 +83,13 @@ int smu_cmn_write_watermarks_table(struct smu_context *smu);
int smu_cmn_write_pptable(struct smu_context *smu);
+int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
+ void *metrics_table,
+ bool bypass_cache);
+
+int smu_cmn_get_metrics_table(struct smu_context *smu,
+ void *metrics_table,
+ bool bypass_cache);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 264073d4e263..c5adbe46ba0d 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -42,6 +42,7 @@
#define smu_check_fw_version(smu) smu_ppt_funcs(check_fw_version, 0, smu)
#define smu_write_pptable(smu) smu_ppt_funcs(write_pptable, 0, smu)
#define smu_set_min_dcef_deep_sleep(smu, clk) smu_ppt_funcs(set_min_dcef_deep_sleep, 0, smu, clk)
+#define smu_set_active_display_count(smu, count) smu_ppt_funcs(set_active_display_count, 0, smu, count)
#define smu_set_driver_table_location(smu) smu_ppt_funcs(set_driver_table_location, 0, smu)
#define smu_set_tool_table_location(smu) smu_ppt_funcs(set_tool_table_location, 0, smu)
#define smu_notify_memory_pool_location(smu) smu_ppt_funcs(notify_memory_pool_location, 0, smu)
@@ -83,7 +84,6 @@
#define smu_asic_set_performance_level(smu, level) smu_ppt_funcs(set_performance_level, -EINVAL, smu, level)
#define smu_dump_pptable(smu) smu_ppt_funcs(dump_pptable, 0, smu)
#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap)
-#define smu_disable_umc_cdr_12gbps_workaround(smu) smu_ppt_funcs(disable_umc_cdr_12gbps_workaround, 0, smu)
#define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src)
#define smu_i2c_init(smu, control) smu_ppt_funcs(i2c_init, 0, smu, control)
#define smu_i2c_fini(smu, control) smu_ppt_funcs(i2c_fini, 0, smu, control)
@@ -92,6 +92,10 @@
#define smu_get_asic_power_limits(smu) smu_ppt_funcs(get_power_limit, 0, smu)
#define smu_get_pp_feature_mask(smu, buf) smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf)
#define smu_set_pp_feature_mask(smu, new_mask) smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask)
+#define smu_gfx_ulv_control(smu, enablement) smu_ppt_funcs(gfx_ulv_control, 0, smu, enablement)
+#define smu_deep_sleep_control(smu, enablement) smu_ppt_funcs(deep_sleep_control, 0, smu, enablement)
+#define smu_get_fan_parameters(smu) smu_ppt_funcs(get_fan_parameters, 0, smu)
+#define smu_post_init(smu) smu_ppt_funcs(post_init, 0, smu)
#endif
#endif
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index ab45ac445045..351a85088d0e 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -346,7 +346,7 @@ static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
if (cma_obj->sgt)
sgt = cma_obj->sgt;
else
- sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+ sgt = obj->funcs->get_sg_table(obj);
if (!sgt)
return false;
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 38dfaa46d306..a887b6a5f8bd 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -757,7 +757,7 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_private *priv = crtc->dev->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(crtc->dev);
if (dcrtc->cursor_obj)
drm_gem_object_put(&dcrtc->cursor_obj->obj);
@@ -901,7 +901,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
struct resource *res, int irq, const struct armada_variant *variant,
struct device_node *port)
{
- struct armada_private *priv = drm->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(drm);
struct armada_crtc *dcrtc;
struct drm_plane *primary;
void __iomem *base;
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index c6fc2f1d58e9..29f4b52e3c8d 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -19,7 +19,7 @@ static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct armada_private *priv = dev->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(dev);
struct drm_printer p = drm_seq_file_printer(m);
mutex_lock(&priv->linear_lock);
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index a11bdaccbb33..6a5a87932576 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -73,6 +73,8 @@ struct armada_private {
#endif
};
+#define drm_to_armada_dev(dev) container_of(dev, struct armada_private, drm)
+
int armada_fbdev_init(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 5fc25c3f445c..980d3f1f8f16 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -87,24 +87,13 @@ static int armada_drm_bind(struct device *dev)
"armada-drm"))
return -EBUSY;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- /*
- * The drm_device structure must be at the start of
- * armada_private for drm_dev_put() to work correctly.
- */
- BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0);
-
- ret = drm_dev_init(&priv->drm, &armada_drm_driver, dev);
- if (ret) {
- dev_err(dev, "[" DRM_NAME ":%s] drm_dev_init failed: %d\n",
- __func__, ret);
- kfree(priv);
- return ret;
+ priv = devm_drm_dev_alloc(dev, &armada_drm_driver,
+ struct armada_private, drm);
+ if (IS_ERR(priv)) {
+ dev_err(dev, "[" DRM_NAME ":%s] devm_drm_dev_alloc failed: %li\n",
+ __func__, PTR_ERR(priv));
+ return PTR_ERR(priv);
}
- drmm_add_final_kfree(&priv->drm, priv);
/* Remove early framebuffers */
ret = drm_fb_helper_remove_conflicting_framebuffers(NULL,
@@ -117,8 +106,6 @@ static int armada_drm_bind(struct device *dev)
return ret;
}
- priv->drm.dev_private = priv;
-
dev_set_drvdata(dev, &priv->drm);
/* Mode setting support */
@@ -174,14 +161,13 @@ static int armada_drm_bind(struct device *dev)
err_kms:
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
- drm_dev_put(&priv->drm);
return ret;
}
static void armada_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct armada_private *priv = drm->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(drm);
drm_kms_helper_poll_fini(&priv->drm);
armada_fbdev_fini(&priv->drm);
@@ -194,8 +180,6 @@ static void armada_drm_unbind(struct device *dev)
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
-
- drm_dev_put(&priv->drm);
}
static int compare_of(struct device *dev, void *data)
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 0c4601275507..38f5170c0fea 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -117,7 +117,7 @@ static const struct drm_fb_helper_funcs armada_fb_helper_funcs = {
int armada_fbdev_init(struct drm_device *dev)
{
- struct armada_private *priv = dev->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(dev);
struct drm_fb_helper *fbh;
int ret;
@@ -151,7 +151,7 @@ int armada_fbdev_init(struct drm_device *dev)
void armada_fbdev_fini(struct drm_device *dev)
{
- struct armada_private *priv = dev->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(dev);
struct drm_fb_helper *fbh = priv->fbdev;
if (fbh) {
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 8005614d2e6b..6654bccd9466 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -39,7 +39,7 @@ static size_t roundup_gem_size(size_t size)
void armada_gem_free_object(struct drm_gem_object *obj)
{
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
- struct armada_private *priv = obj->dev->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(obj->dev);
DRM_DEBUG_DRIVER("release obj %p\n", dobj);
@@ -77,7 +77,7 @@ void armada_gem_free_object(struct drm_gem_object *obj)
int
armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
{
- struct armada_private *priv = dev->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(dev);
size_t size = obj->obj.size;
if (obj->page || obj->linear)
@@ -379,7 +379,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
struct armada_gem_object *dobj = drm_to_armada_gem(obj);
struct scatterlist *sg;
struct sg_table *sgt;
- int i, num;
+ int i;
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
@@ -395,22 +395,18 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
mapping = dobj->obj.filp->f_mapping;
- for_each_sg(sgt->sgl, sg, count, i) {
+ for_each_sgtable_sg(sgt, sg, i) {
struct page *page;
page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page)) {
- num = i;
+ if (IS_ERR(page))
goto release;
- }
sg_set_page(sg, page, PAGE_SIZE, 0);
}
- if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
- num = sgt->nents;
+ if (dma_map_sgtable(attach->dev, sgt, dir, 0))
goto release;
- }
} else if (dobj->page) {
/* Single contiguous page */
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
@@ -418,7 +414,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
- if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ if (dma_map_sgtable(attach->dev, sgt, dir, 0))
goto free_table;
} else if (dobj->linear) {
/* Single contiguous physical region - no struct page */
@@ -432,8 +428,9 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
return sgt;
release:
- for_each_sg(sgt->sgl, sg, num, i)
- put_page(sg_page(sg));
+ for_each_sgtable_sg(sgt, sg, i)
+ if (sg_page(sg))
+ put_page(sg_page(sg));
free_table:
sg_free_table(sgt);
free_sgt:
@@ -449,11 +446,12 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
int i;
if (!dobj->linear)
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ dma_unmap_sgtable(attach->dev, sgt, dir, 0);
if (dobj->obj.filp) {
struct scatterlist *sg;
- for_each_sg(sgt->sgl, sg, sgt->nents, i)
+
+ for_each_sgtable_sg(sgt, sg, i)
put_page(sg_page(sg));
}
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 07f0da4d9ba1..30e01101f59e 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -344,7 +344,7 @@ static int armada_overlay_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_property *property,
uint64_t val)
{
- struct armada_private *priv = plane->dev->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(plane->dev);
#define K2R(val) (((val) >> 0) & 0xff)
#define K2G(val) (((val) >> 8) & 0xff)
@@ -412,7 +412,7 @@ static int armada_overlay_get_property(struct drm_plane *plane,
const struct drm_plane_state *state, struct drm_property *property,
uint64_t *val)
{
- struct armada_private *priv = plane->dev->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(plane->dev);
#define C2K(c,s) (((c) >> (s)) & 0xff)
#define R2BGR(r,g,b,s) (C2K(r,s) << 0 | C2K(g,s) << 8 | C2K(b,s) << 16)
@@ -505,7 +505,7 @@ static const struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
static int armada_overlay_create_properties(struct drm_device *dev)
{
- struct armada_private *priv = dev->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(dev);
if (priv->colorkey_prop)
return 0;
@@ -539,7 +539,7 @@ static int armada_overlay_create_properties(struct drm_device *dev)
int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
{
- struct armada_private *priv = dev->dev_private;
+ struct armada_private *priv = drm_to_armada_dev(dev);
struct drm_mode_object *mobj;
struct drm_plane *overlay;
int ret;
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 903f4f304647..2b424b2b85cc 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -63,15 +63,21 @@ static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static void aspeed_gfx_setup_mode_config(struct drm_device *drm)
+static int aspeed_gfx_setup_mode_config(struct drm_device *drm)
{
- drm_mode_config_init(drm);
+ int ret;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = 800;
drm->mode_config.max_height = 600;
drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs;
+
+ return ret;
}
static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
@@ -144,7 +150,9 @@ static int aspeed_gfx_load(struct drm_device *drm)
writel(0, priv->base + CRT_CTRL1);
writel(0, priv->base + CRT_CTRL2);
- aspeed_gfx_setup_mode_config(drm);
+ ret = aspeed_gfx_setup_mode_config(drm);
+ if (ret < 0)
+ return ret;
ret = drm_vblank_init(drm, 1);
if (ret < 0) {
@@ -179,7 +187,6 @@ static int aspeed_gfx_load(struct drm_device *drm)
static void aspeed_gfx_unload(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
}
DEFINE_DRM_GEM_CMA_FOPS(fops);
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
index acf0d23514e8..e0f4613918ad 100644
--- a/drivers/gpu/drm/ast/ast_cursor.c
+++ b/drivers/gpu/drm/ast/ast_cursor.c
@@ -47,7 +47,7 @@ static void ast_cursor_fini(struct ast_private *ast)
static void ast_cursor_release(struct drm_device *dev, void *ptr)
{
- struct ast_private *ast = dev->dev_private;
+ struct ast_private *ast = to_ast_private(dev);
ast_cursor_fini(ast);
}
@@ -57,7 +57,7 @@ static void ast_cursor_release(struct drm_device *dev, void *ptr)
*/
int ast_cursor_init(struct ast_private *ast)
{
- struct drm_device *dev = ast->dev;
+ struct drm_device *dev = &ast->base;
size_t size, i;
struct drm_gem_vram_object *gbo;
void __iomem *vaddr;
@@ -168,7 +168,7 @@ static void update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int h
int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
{
- struct drm_device *dev = ast->dev;
+ struct drm_device *dev = &ast->base;
struct drm_gem_vram_object *gbo;
int ret;
void *src;
@@ -217,7 +217,7 @@ static void ast_cursor_set_base(struct ast_private *ast, u64 address)
void ast_cursor_page_flip(struct ast_private *ast)
{
- struct drm_device *dev = ast->dev;
+ struct drm_device *dev = &ast->base;
struct drm_gem_vram_object *gbo;
s64 off;
@@ -253,7 +253,8 @@ void ast_cursor_show(struct ast_private *ast, int x, int y,
unsigned int offset_x, unsigned int offset_y)
{
u8 x_offset, y_offset;
- u8 __iomem *dst, __iomem *sig;
+ u8 __iomem *dst;
+ u8 __iomem *sig;
u8 jreg;
dst = ast->cursor.vaddr[ast->cursor.next_index];
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 4b85a504825a..88121c0e0d05 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -8,11 +8,24 @@
MODULE_FIRMWARE("ast_dp501_fw.bin");
+static void ast_release_firmware(void *data)
+{
+ struct ast_private *ast = data;
+
+ release_firmware(ast->dp501_fw);
+ ast->dp501_fw = NULL;
+}
+
static int ast_load_dp501_microcode(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
+ int ret;
+
+ ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev);
+ if (ret)
+ return ret;
- return request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev);
+ return devm_add_action_or_reset(dev->dev, ast_release_firmware, ast);
}
static void send_ack(struct ast_private *ast)
@@ -435,11 +448,3 @@ void ast_init_3rdtx(struct drm_device *dev)
}
}
}
-
-void ast_release_firmware(struct drm_device *dev)
-{
- struct ast_private *ast = to_ast_private(dev);
-
- release_firmware(ast->dp501_fw);
- ast->dp501_fw = NULL;
-}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 0b58f7aee6b0..f0b4af1c390a 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -43,9 +43,33 @@ int ast_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, ast_modeset, int, 0400);
-#define PCI_VENDOR_ASPEED 0x1a03
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(ast_fops);
+
+static struct drm_driver ast_driver = {
+ .driver_features = DRIVER_ATOMIC |
+ DRIVER_GEM |
+ DRIVER_MODESET,
+
+ .fops = &ast_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ DRM_GEM_VRAM_DRIVER
+};
+
+/*
+ * PCI driver
+ */
-static struct drm_driver driver;
+#define PCI_VENDOR_ASPEED 0x1a03
#define AST_VGA_DEVICE(id, info) { \
.class = PCI_BASE_CLASS_DISPLAY << 16, \
@@ -56,13 +80,13 @@ static struct drm_driver driver;
.subdevice = PCI_ANY_ID, \
.driver_data = (unsigned long) info }
-static const struct pci_device_id pciidlist[] = {
+static const struct pci_device_id ast_pciidlist[] = {
AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
{0, 0, 0},
};
-MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_DEVICE_TABLE(pci, ast_pciidlist);
static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
{
@@ -85,6 +109,7 @@ static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct ast_private *ast;
struct drm_device *dev;
int ret;
@@ -94,41 +119,25 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- dev = drm_dev_alloc(&driver, &pdev->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
-
- dev->pdev = pdev;
- pci_set_drvdata(pdev, dev);
-
- ret = ast_driver_load(dev, ent->driver_data);
- if (ret)
- goto err_drm_dev_put;
+ ast = ast_device_create(&ast_driver, pdev, ent->driver_data);
+ if (IS_ERR(ast))
+ return PTR_ERR(ast);
+ dev = &ast->base;
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
- goto err_ast_driver_unload;
+ return ret;
drm_fbdev_generic_setup(dev, 32);
return 0;
-
-err_ast_driver_unload:
- ast_driver_unload(dev);
-err_drm_dev_put:
- drm_dev_put(dev);
- return ret;
-
}
-static void
-ast_pci_remove(struct pci_dev *pdev)
+static void ast_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
- ast_driver_unload(dev);
- drm_dev_put(dev);
}
static int ast_drm_freeze(struct drm_device *dev)
@@ -217,30 +226,12 @@ static const struct dev_pm_ops ast_pm_ops = {
static struct pci_driver ast_pci_driver = {
.name = DRIVER_NAME,
- .id_table = pciidlist,
+ .id_table = ast_pciidlist,
.probe = ast_pci_probe,
.remove = ast_pci_remove,
.driver.pm = &ast_pm_ops,
};
-DEFINE_DRM_GEM_FOPS(ast_fops);
-
-static struct drm_driver driver = {
- .driver_features = DRIVER_ATOMIC |
- DRIVER_GEM |
- DRIVER_MODESET,
-
- .fops = &ast_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-
- DRM_GEM_VRAM_DRIVER
-};
-
static int __init ast_init(void)
{
if (vgacon_text_force() && ast_modeset == -1)
@@ -261,4 +252,3 @@ module_exit(ast_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
-
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index e3a264ac7ee2..467049ca8430 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,9 +98,25 @@ enum ast_tx_chip {
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+struct ast_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+};
+
+struct ast_connector {
+ struct drm_connector base;
+ struct ast_i2c_chan *i2c;
+};
+
+static inline struct ast_connector *
+to_ast_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct ast_connector, base);
+}
struct ast_private {
- struct drm_device *dev;
+ struct drm_device base;
void __iomem *regs;
void __iomem *ioregs;
@@ -119,9 +135,11 @@ struct ast_private {
unsigned int next_index;
} cursor;
- struct drm_encoder encoder;
struct drm_plane primary_plane;
struct drm_plane cursor_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct ast_connector connector;
bool support_wide_screen;
enum {
@@ -138,11 +156,12 @@ struct ast_private {
static inline struct ast_private *to_ast_private(struct drm_device *dev)
{
- return dev->dev_private;
+ return container_of(dev, struct ast_private, base);
}
-int ast_driver_load(struct drm_device *dev, unsigned long flags);
-void ast_driver_unload(struct drm_device *dev);
+struct ast_private *ast_device_create(struct drm_driver *drv,
+ struct pci_dev *pdev,
+ unsigned long flags);
#define AST_IO_AR_PORT_WRITE (0x40)
#define AST_IO_MISC_PORT_WRITE (0x42)
@@ -158,6 +177,8 @@ void ast_driver_unload(struct drm_device *dev);
#define AST_IO_MM_OFFSET (0x380)
+#define AST_IO_VGAIR1_VREFRESH BIT(3)
+
#define __ast_read(x) \
static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
u##x val = 0;\
@@ -226,19 +247,6 @@ static inline void ast_open_key(struct ast_private *ast)
#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
-struct ast_i2c_chan {
- struct i2c_adapter adapter;
- struct drm_device *dev;
- struct i2c_algo_bit_data bit;
-};
-
-struct ast_connector {
- struct drm_connector base;
- struct ast_i2c_chan *i2c;
-};
-
-#define to_ast_connector(x) container_of(x, struct ast_connector, base)
-
struct ast_vbios_stdtable {
u8 misc;
u8 seq[4];
@@ -305,7 +313,6 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
-void ast_release_firmware(struct drm_device *dev);
/* ast_cursor.c */
int ast_cursor_init(struct ast_private *ast);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 6a9fba051d13..77066bca8793 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -30,8 +30,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_managed.h>
#include "ast_drv.h"
@@ -230,11 +232,11 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->tx_chip_type = AST_TX_SIL164;
break;
case 0x08:
- ast->dp501_fw_addr = kzalloc(32*1024, GFP_KERNEL);
+ ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL);
if (ast->dp501_fw_addr) {
/* backup firmware */
if (ast_backup_fw(dev, ast->dp501_fw_addr, 32*1024)) {
- kfree(ast->dp501_fw_addr);
+ drmm_kfree(dev, ast->dp501_fw_addr);
ast->dp501_fw_addr = NULL;
}
}
@@ -378,24 +380,38 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-int ast_driver_load(struct drm_device *dev, unsigned long flags)
+/*
+ * Run this function as part of the HW device cleanup; not
+ * when the DRM device gets released.
+ */
+static void ast_device_release(void *data)
{
+ struct ast_private *ast = data;
+
+ /* enable standard VGA decode */
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
+}
+
+struct ast_private *ast_device_create(struct drm_driver *drv,
+ struct pci_dev *pdev,
+ unsigned long flags)
+{
+ struct drm_device *dev;
struct ast_private *ast;
bool need_post;
int ret = 0;
- ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL);
- if (!ast)
- return -ENOMEM;
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_private, base);
+ if (IS_ERR(ast))
+ return ast;
+ dev = &ast->base;
- dev->dev_private = ast;
- ast->dev = dev;
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
ast->regs = pci_iomap(dev->pdev, 1, 0);
- if (!ast->regs) {
- ret = -EIO;
- goto out_free;
- }
+ if (!ast->regs)
+ return ERR_PTR(-EIO);
/*
* If we don't have IO space at all, use MMIO now and
@@ -410,17 +426,16 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
/* "map" IO regs if the above hasn't done so already */
if (!ast->ioregs) {
ast->ioregs = pci_iomap(dev->pdev, 2, 0);
- if (!ast->ioregs) {
- ret = -EIO;
- goto out_free;
- }
+ if (!ast->ioregs)
+ return ERR_PTR(-EIO);
}
ast_detect_chip(dev, &need_post);
ret = ast_get_dram_info(dev);
if (ret)
- goto out_free;
+ return ERR_PTR(ret);
+
drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n",
ast->mclk, ast->dram_type, ast->dram_bus_width);
@@ -429,28 +444,15 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
ret = ast_mm_init(ast);
if (ret)
- goto out_free;
+ return ERR_PTR(ret);
ret = ast_mode_config_init(ast);
if (ret)
- goto out_free;
+ return ERR_PTR(ret);
- return 0;
-out_free:
- kfree(ast);
- dev->dev_private = NULL;
- return ret;
-}
-
-void ast_driver_unload(struct drm_device *dev)
-{
- struct ast_private *ast = to_ast_private(dev);
-
- /* enable standard VGA decode */
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
-
- ast_release_firmware(dev);
- kfree(ast->dp501_fw_addr);
+ ret = devm_add_action_or_reset(dev->dev, ast_device_release, ast);
+ if (ret)
+ return ERR_PTR(ret);
- kfree(ast);
+ return ast;
}
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 9186ec3ebbe0..8392ebde504b 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -85,9 +85,9 @@ static void ast_mm_release(struct drm_device *dev, void *ptr)
int ast_mm_init(struct ast_private *ast)
{
+ struct drm_device *dev = &ast->base;
u32 vram_size;
int ret;
- struct drm_device *dev = ast->dev;
vram_size = ast_get_vram_size(ast);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 154cd877d9d1..834a156e3a75 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -514,6 +514,16 @@ static void ast_set_start_address_crt1(struct ast_private *ast,
}
+static void ast_wait_for_vretrace(struct ast_private *ast)
+{
+ unsigned long timeout = jiffies + HZ;
+ u8 vgair1;
+
+ do {
+ vgair1 = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+ } while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && time_before(jiffies, timeout));
+}
+
/*
* Primary plane
*/
@@ -562,13 +572,24 @@ ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_plane_state *state = plane->state;
struct drm_gem_vram_object *gbo;
s64 gpu_addr;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
+
+ if (!old_fb || (fb->format != old_fb->format)) {
+ struct drm_crtc_state *crtc_state = state->crtc->state;
+ struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
+ struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
- gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
+ ast_set_color_reg(ast, fb->format);
+ ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info);
+ }
+
+ gbo = drm_gem_vram_of_gem(fb->obj[0]);
gpu_addr = drm_gem_vram_offset(gbo);
if (drm_WARN_ON_ONCE(dev, gpu_addr < 0))
return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
- ast_set_offset_reg(ast, state->fb);
+ ast_set_offset_reg(ast, fb);
ast_set_start_address_crt1(ast, (u32)gpu_addr);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
@@ -663,7 +684,7 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
- struct ast_private *ast = plane->dev->dev_private;
+ struct ast_private *ast = to_ast_private(plane->dev);
unsigned int offset_x, offset_y;
offset_x = AST_MAX_HWC_WIDTH - fb->width;
@@ -733,6 +754,7 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ struct drm_device *dev = crtc->dev;
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
bool succ;
@@ -743,8 +765,8 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
ast_state = to_ast_crtc_state(state);
format = ast_state->format;
- if (!format)
- return 0;
+ if (drm_WARN_ON_ONCE(dev, !format))
+ return -EINVAL; /* BUG: We didn't set format in primary check(). */
succ = ast_get_vbios_mode_info(format, &state->mode,
&state->adjusted_mode,
@@ -755,39 +777,17 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
return 0;
}
-static void ast_crtc_helper_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
-{
- struct ast_private *ast = to_ast_private(crtc->dev);
-
- ast_open_key(ast);
-}
-
-static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void
+ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
- struct ast_crtc_state *ast_state;
- const struct drm_format_info *format;
- struct ast_vbios_mode_info *vbios_mode_info;
- struct drm_display_mode *adjusted_mode;
-
- ast_state = to_ast_crtc_state(crtc->state);
-
- format = ast_state->format;
- if (!format)
- return;
-
- vbios_mode_info = &ast_state->vbios_mode_info;
-
- ast_set_color_reg(ast, format);
- ast_set_vbios_color_reg(ast, format, vbios_mode_info);
-
- if (!crtc->state->mode_changed)
- return;
-
- adjusted_mode = &crtc->state->adjusted_mode;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
+ struct ast_vbios_mode_info *vbios_mode_info =
+ &ast_crtc_state->vbios_mode_info;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
@@ -796,12 +796,7 @@ static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info);
ast_set_crtthd_reg(ast);
ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info);
-}
-static void
-ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
-{
ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
@@ -809,13 +804,32 @@ static void
ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ struct drm_device *dev = crtc->dev;
+ struct ast_private *ast = to_ast_private(dev);
+
ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ /*
+ * HW cursors require the underlying primary plane and CRTC to
+ * display a valid mode and image. This is not the case during
+ * full modeset operations. So we temporarily disable any active
+ * plane, including the HW cursor. Each plane's atomic_update()
+ * helper will re-enable it if necessary.
+ *
+ * We only do this during *full* modesets. It does not affect
+ * simple pageflips on the planes.
+ */
+ drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
+
+ /*
+ * Ensure that no scanout takes place before reprogramming mode
+ * and format registers.
+ */
+ ast_wait_for_vretrace(ast);
}
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.atomic_check = ast_crtc_helper_atomic_check,
- .atomic_begin = ast_crtc_helper_atomic_begin,
- .atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable,
.atomic_disable = ast_crtc_helper_atomic_disable,
};
@@ -831,12 +845,6 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
}
-static void ast_crtc_destroy(struct drm_crtc *crtc)
-{
- drm_crtc_cleanup(crtc);
- kfree(crtc);
-}
-
static struct drm_crtc_state *
ast_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
@@ -872,7 +880,7 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
static const struct drm_crtc_funcs ast_crtc_funcs = {
.reset = ast_crtc_reset,
.gamma_set = drm_atomic_helper_legacy_gamma_set,
- .destroy = ast_crtc_destroy,
+ .destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = ast_crtc_atomic_duplicate_state,
@@ -882,27 +890,19 @@ static const struct drm_crtc_funcs ast_crtc_funcs = {
static int ast_crtc_init(struct drm_device *dev)
{
struct ast_private *ast = to_ast_private(dev);
- struct drm_crtc *crtc;
+ struct drm_crtc *crtc = &ast->crtc;
int ret;
- crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
- if (!crtc)
- return -ENOMEM;
-
ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane,
&ast->cursor_plane, &ast_crtc_funcs,
NULL);
if (ret)
- goto err_kfree;
+ return ret;
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_crtc_helper_add(crtc, &ast_crtc_helper_funcs);
return 0;
-
-err_kfree:
- kfree(crtc);
- return ret;
}
/*
@@ -1021,7 +1021,6 @@ static void ast_connector_destroy(struct drm_connector *connector)
struct ast_connector *ast_connector = to_ast_connector(connector);
ast_i2c_destroy(ast_connector->i2c);
drm_connector_cleanup(connector);
- kfree(connector);
}
static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
@@ -1039,15 +1038,11 @@ static const struct drm_connector_funcs ast_connector_funcs = {
static int ast_connector_init(struct drm_device *dev)
{
- struct ast_connector *ast_connector;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
-
- ast_connector = kzalloc(sizeof(struct ast_connector), GFP_KERNEL);
- if (!ast_connector)
- return -ENOMEM;
+ struct ast_private *ast = to_ast_private(dev);
+ struct ast_connector *ast_connector = &ast->connector;
+ struct drm_connector *connector = &ast_connector->base;
+ struct drm_encoder *encoder = &ast->encoder;
- connector = &ast_connector->base;
ast_connector->i2c = ast_i2c_create(dev);
if (!ast_connector->i2c)
drm_err(dev, "failed to add ddc bus for connector\n");
@@ -1064,7 +1059,6 @@ static int ast_connector_init(struct drm_device *dev)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
drm_connector_attach_encoder(connector, encoder);
return 0;
@@ -1074,6 +1068,11 @@ static int ast_connector_init(struct drm_device *dev)
* Mode config
*/
+static const struct drm_mode_config_helper_funcs
+ast_mode_config_helper_funcs = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
static const struct drm_mode_config_funcs ast_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.mode_valid = drm_vram_helper_mode_valid,
@@ -1083,7 +1082,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
int ast_mode_config_init(struct ast_private *ast)
{
- struct drm_device *dev = ast->dev;
+ struct drm_device *dev = &ast->base;
int ret;
ret = ast_cursor_init(ast);
@@ -1099,7 +1098,7 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- dev->mode_config.fb_base = pci_resource_start(ast->dev->pdev, 0);
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
if (ast->chip == AST2100 ||
ast->chip == AST2200 ||
@@ -1113,6 +1112,8 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.max_height = 1200;
}
+ dev->mode_config.helper_private = &ast_mode_config_helper_funcs;
+
memset(&ast->primary_plane, 0, sizeof(ast->primary_plane));
ret = drm_universal_plane_init(dev, &ast->primary_plane, 0x01,
&ast_primary_plane_funcs,
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index c043fe717553..8902c2f84bf9 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -365,12 +365,12 @@ static void ast_init_dram_reg(struct drm_device *dev)
void ast_post_gpu(struct drm_device *dev)
{
- u32 reg;
struct ast_private *ast = to_ast_private(dev);
+ u32 reg;
- pci_read_config_dword(ast->dev->pdev, 0x04, &reg);
+ pci_read_config_dword(dev->pdev, 0x04, &reg);
reg |= 0x3;
- pci_write_config_dword(ast->dev->pdev, 0x04, reg);
+ pci_write_config_dword(dev->pdev, 0x04, reg);
ast_enable_vga(dev);
ast_open_key(ast);
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 43271c21d3fc..ef91646441b1 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -48,6 +48,19 @@ config DRM_DISPLAY_CONNECTOR
on ARM-based platforms. Saying Y here when this driver is not needed
will not cause any issue.
+config DRM_LONTIUM_LT9611
+ tristate "Lontium LT9611 DSI/HDMI bridge"
+ select SND_SOC_HDMI_CODEC if SND_SOC
+ depends on OF
+ select DRM_PANEL_BRIDGE
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ Driver for Lontium LT9611 DSI to HDMI bridge
+ chip driver that converts dual DSI and I2S to
+ HDMI signals
+ Please say Y if you have such hardware.
+
config DRM_LVDS_CODEC
tristate "Transparent LVDS encoders and decoders support"
depends on OF
@@ -153,6 +166,14 @@ config DRM_THINE_THC63LVD1024
help
Thine THC63LVD1024 LVDS/parallel converter driver.
+config DRM_TOSHIBA_TC358762
+ tristate "TC358762 DSI/DPI bridge"
+ depends on OF
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
+ help
+ Toshiba TC358762 DSI/DPI bridge driver.
+
config DRM_TOSHIBA_TC358764
tristate "TC358764 DSI/LVDS bridge"
depends on OF
@@ -181,6 +202,16 @@ config DRM_TOSHIBA_TC358768
help
Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
+config DRM_TOSHIBA_TC358775
+ tristate "Toshiba TC358775 DSI/LVDS bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ Toshiba TC358775 DSI/LVDS bridge chip driver.
+
config DRM_TI_TFP410
tristate "TI TFP410 DVI/HDMI bridge"
depends on OF
@@ -210,6 +241,8 @@ source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
+source "drivers/gpu/drm/bridge/cadence/Kconfig"
+
source "drivers/gpu/drm/bridge/synopsys/Kconfig"
endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index d63d4b7e4347..2b3aff104e46 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
+obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
@@ -12,9 +13,11 @@ obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358762) += tc358762.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358775) += tc358775.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
@@ -22,4 +25,5 @@ obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
obj-y += analogix/
+obj-y += cadence/
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index f082b4ed4878..d9164fab044d 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -507,10 +507,6 @@ static const struct drm_connector_helper_funcs anx6345_connector_helper_funcs =
static void
anx6345_connector_destroy(struct drm_connector *connector)
{
- struct anx6345 *anx6345 = connector_to_anx6345(connector);
-
- if (anx6345->panel)
- drm_panel_detach(anx6345->panel);
drm_connector_cleanup(connector);
}
@@ -575,14 +571,6 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge,
return err;
}
- if (anx6345->panel) {
- err = drm_panel_attach(anx6345->panel, &anx6345->connector);
- if (err) {
- DRM_ERROR("Failed to attach panel: %d\n", err);
- return err;
- }
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 76736fb8ed94..aa1bb86293fd 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1265,14 +1265,6 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
}
}
- if (dp->plat_data->panel) {
- ret = drm_panel_attach(dp->plat_data->panel, &dp->connector);
- if (ret) {
- DRM_ERROR("Failed to attach panel\n");
- return ret;
- }
- }
-
return 0;
}
@@ -1803,7 +1795,6 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
if (dp->plat_data->panel) {
if (drm_panel_unprepare(dp->plat_data->panel))
DRM_ERROR("failed to turnoff the panel\n");
- drm_panel_detach(dp->plat_data->panel);
}
drm_dp_aux_unregister(&dp->aux);
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
new file mode 100644
index 000000000000..ef8c230e0f62
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_CDNS_MHDP8546
+ tristate "Cadence DPI/DP bridge"
+ select DRM_KMS_HELPER
+ select DRM_PANEL_BRIDGE
+ depends on OF
+ help
+ Support Cadence DPI to DP bridge. This is an internal
+ bridge and is meant to be directly embedded in a SoC.
+ It takes a DPI stream as input and outputs it encoded
+ in DP format.
+
+if DRM_CDNS_MHDP8546
+
+config DRM_CDNS_MHDP8546_J721E
+ depends on ARCH_K3 || COMPILE_TEST
+ bool "J721E Cadence DPI/DP wrapper support"
+ default y
+ help
+ Support J721E Cadence DPI/DP wrapper. This is a wrapper
+ which adds support for J721E related platform ops. It
+ initializes the J721E Display Port and sets up the
+ clock and data muxes.
+endif
diff --git a/drivers/gpu/drm/bridge/cadence/Makefile b/drivers/gpu/drm/bridge/cadence/Makefile
new file mode 100644
index 000000000000..8f647991b374
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_CDNS_MHDP8546) += cdns-mhdp8546.o
+cdns-mhdp8546-y := cdns-mhdp8546-core.o
+cdns-mhdp8546-$(CONFIG_DRM_CDNS_MHDP8546_J721E) += cdns-mhdp8546-j721e.o
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
new file mode 100644
index 000000000000..d0c65610ebb5
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -0,0 +1,2532 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence MHDP8546 DP bridge driver.
+ *
+ * Copyright (C) 2020 Cadence Design Systems, Inc.
+ *
+ * Authors: Quentin Schulz <quentin.schulz@free-electrons.com>
+ * Swapnil Jakhade <sjakhade@cadence.com>
+ * Yuti Amonkar <yamonkar@cadence.com>
+ * Tomi Valkeinen <tomi.valkeinen@ti.com>
+ * Jyri Sarha <jsarha@ti.com>
+ *
+ * TODO:
+ * - Implement optimized mailbox communication using mailbox interrupts
+ * - Add support for power management
+ * - Add support for features like audio, MST and fast link training
+ * - Implement request_fw_cancel to handle HW_STATE
+ * - Fix asynchronous loading of firmware implementation
+ * - Add DRM helper function for cdns_mhdp_lower_link_rate
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include <asm/unaligned.h>
+
+#include "cdns-mhdp8546-core.h"
+
+#include "cdns-mhdp8546-j721e.h"
+
+static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
+{
+ int ret, empty;
+
+ WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
+
+ ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
+ empty, !empty, MAILBOX_RETRY_US,
+ MAILBOX_TIMEOUT_US);
+ if (ret < 0)
+ return ret;
+
+ return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
+}
+
+static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
+{
+ int ret, full;
+
+ WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
+
+ ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
+ full, !full, MAILBOX_RETRY_US,
+ MAILBOX_TIMEOUT_US);
+ if (ret < 0)
+ return ret;
+
+ writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
+
+ return 0;
+}
+
+static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
+ u8 module_id, u8 opcode,
+ u16 req_size)
+{
+ u32 mbox_size, i;
+ u8 header[4];
+ int ret;
+
+ /* read the header of the message */
+ for (i = 0; i < sizeof(header); i++) {
+ ret = cdns_mhdp_mailbox_read(mhdp);
+ if (ret < 0)
+ return ret;
+
+ header[i] = ret;
+ }
+
+ mbox_size = get_unaligned_be16(header + 2);
+
+ if (opcode != header[0] || module_id != header[1] ||
+ req_size != mbox_size) {
+ /*
+ * If the message in mailbox is not what we want, we need to
+ * clear the mailbox by reading its contents.
+ */
+ for (i = 0; i < mbox_size; i++)
+ if (cdns_mhdp_mailbox_read(mhdp) < 0)
+ break;
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
+ u8 *buff, u16 buff_size)
+{
+ u32 i;
+ int ret;
+
+ for (i = 0; i < buff_size; i++) {
+ ret = cdns_mhdp_mailbox_read(mhdp);
+ if (ret < 0)
+ return ret;
+
+ buff[i] = ret;
+ }
+
+ return 0;
+}
+
+static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
+ u8 opcode, u16 size, u8 *message)
+{
+ u8 header[4];
+ int ret, i;
+
+ header[0] = opcode;
+ header[1] = module_id;
+ put_unaligned_be16(size, header + 2);
+
+ for (i = 0; i < sizeof(header); i++) {
+ ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < size; i++) {
+ ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static
+int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
+{
+ u8 msg[4], resp[8];
+ int ret;
+
+ put_unaligned_be32(addr, msg);
+
+ mutex_lock(&mhdp->mbox_mutex);
+
+ ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
+ GENERAL_REGISTER_READ,
+ sizeof(msg), msg);
+ if (ret)
+ goto out;
+
+ ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
+ GENERAL_REGISTER_READ,
+ sizeof(resp));
+ if (ret)
+ goto out;
+
+ ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
+ if (ret)
+ goto out;
+
+ /* Returned address value should be the same as requested */
+ if (memcmp(msg, resp, sizeof(msg))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *value = get_unaligned_be32(resp + 4);
+
+out:
+ mutex_unlock(&mhdp->mbox_mutex);
+ if (ret) {
+ dev_err(mhdp->dev, "Failed to read register\n");
+ *value = 0;
+ }
+
+ return ret;
+}
+
+static
+int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
+{
+ u8 msg[6];
+ int ret;
+
+ put_unaligned_be16(addr, msg);
+ put_unaligned_be32(val, msg + 2);
+
+ mutex_lock(&mhdp->mbox_mutex);
+
+ ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_WRITE_REGISTER, sizeof(msg), msg);
+
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+static
+int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
+ u8 start_bit, u8 bits_no, u32 val)
+{
+ u8 field[8];
+ int ret;
+
+ put_unaligned_be16(addr, field);
+ field[2] = start_bit;
+ field[3] = bits_no;
+ put_unaligned_be32(val, field + 4);
+
+ mutex_lock(&mhdp->mbox_mutex);
+
+ ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_WRITE_FIELD, sizeof(field), field);
+
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+static
+int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
+ u32 addr, u8 *data, u16 len)
+{
+ u8 msg[5], reg[5];
+ int ret;
+
+ put_unaligned_be16(len, msg);
+ put_unaligned_be24(addr, msg + 2);
+
+ mutex_lock(&mhdp->mbox_mutex);
+
+ ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_READ_DPCD, sizeof(msg), msg);
+ if (ret)
+ goto out;
+
+ ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_READ_DPCD,
+ sizeof(reg) + len);
+ if (ret)
+ goto out;
+
+ ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
+ if (ret)
+ goto out;
+
+ ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
+
+out:
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+static
+int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
+{
+ u8 msg[6], reg[5];
+ int ret;
+
+ put_unaligned_be16(1, msg);
+ put_unaligned_be24(addr, msg + 2);
+ msg[5] = value;
+
+ mutex_lock(&mhdp->mbox_mutex);
+
+ ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_WRITE_DPCD, sizeof(msg), msg);
+ if (ret)
+ goto out;
+
+ ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_WRITE_DPCD, sizeof(reg));
+ if (ret)
+ goto out;
+
+ ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
+ if (ret)
+ goto out;
+
+ if (addr != get_unaligned_be24(reg + 2))
+ ret = -EINVAL;
+
+out:
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ if (ret)
+ dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
+ return ret;
+}
+
+static
+int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
+{
+ u8 msg[5];
+ int ret, i;
+
+ msg[0] = GENERAL_MAIN_CONTROL;
+ msg[1] = MB_MODULE_ID_GENERAL;
+ msg[2] = 0;
+ msg[3] = 1;
+ msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
+
+ mutex_lock(&mhdp->mbox_mutex);
+
+ for (i = 0; i < sizeof(msg); i++) {
+ ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
+ if (ret)
+ goto out;
+ }
+
+ /* read the firmware state */
+ ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
+ if (ret)
+ goto out;
+
+ ret = 0;
+
+out:
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ if (ret < 0)
+ dev_err(mhdp->dev, "set firmware active failed\n");
+ return ret;
+}
+
+static
+int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
+{
+ u8 status;
+ int ret;
+
+ mutex_lock(&mhdp->mbox_mutex);
+
+ ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_HPD_STATE, 0, NULL);
+ if (ret)
+ goto err_get_hpd;
+
+ ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_HPD_STATE,
+ sizeof(status));
+ if (ret)
+ goto err_get_hpd;
+
+ ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
+ if (ret)
+ goto err_get_hpd;
+
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
+ status ? "" : "un");
+
+ return status;
+
+err_get_hpd:
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+static
+int cdns_mhdp_get_edid_block(void *data, u8 *edid,
+ unsigned int block, size_t length)
+{
+ struct cdns_mhdp_device *mhdp = data;
+ u8 msg[2], reg[2], i;
+ int ret;
+
+ mutex_lock(&mhdp->mbox_mutex);
+
+ for (i = 0; i < 4; i++) {
+ msg[0] = block / 2;
+ msg[1] = block % 2;
+
+ ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_GET_EDID, sizeof(msg), msg);
+ if (ret)
+ continue;
+
+ ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_GET_EDID,
+ sizeof(reg) + length);
+ if (ret)
+ continue;
+
+ ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
+ if (ret)
+ continue;
+
+ ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
+ if (ret)
+ continue;
+
+ if (reg[0] == length && reg[1] == block / 2)
+ break;
+ }
+
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ if (ret)
+ dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
+ block, ret);
+
+ return ret;
+}
+
+static
+int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
+{
+ u8 event = 0;
+ int ret;
+
+ mutex_lock(&mhdp->mbox_mutex);
+
+ ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_READ_EVENT, 0, NULL);
+ if (ret)
+ goto out;
+
+ ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_READ_EVENT, sizeof(event));
+ if (ret < 0)
+ goto out;
+
+ ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
+out:
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
+ (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
+ (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
+ (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
+ (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
+
+ return event;
+}
+
+static
+int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
+ unsigned int udelay, const u8 *lanes_data,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 payload[7];
+ u8 hdr[5]; /* For DPCD read response header */
+ u32 addr;
+ int ret;
+
+ if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
+ dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ payload[0] = nlanes;
+ put_unaligned_be16(udelay, payload + 1);
+ memcpy(payload + 3, lanes_data, nlanes);
+
+ mutex_lock(&mhdp->mbox_mutex);
+
+ ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_ADJUST_LT,
+ sizeof(payload), payload);
+ if (ret)
+ goto out;
+
+ /* Yes, read the DPCD read command response */
+ ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
+ DPTX_READ_DPCD,
+ sizeof(hdr) + DP_LINK_STATUS_SIZE);
+ if (ret)
+ goto out;
+
+ ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
+ if (ret)
+ goto out;
+
+ addr = get_unaligned_be24(hdr + 2);
+ if (addr != DP_LANE0_1_STATUS)
+ goto out;
+
+ ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
+ DP_LINK_STATUS_SIZE);
+
+out:
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ if (ret)
+ dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
+
+ return ret;
+}
+
+/**
+ * cdns_mhdp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static
+int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+/**
+ * cdns_mhdp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static
+int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
+ struct cdns_mhdp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/**
+ * cdns_mhdp_link_configure() - configure a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static
+int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
+ struct cdns_mhdp_link *link)
+{
+ u8 values[2];
+ int err;
+
+ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ values[1] = link->num_lanes;
+
+ if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
+{
+ return min(mhdp->host.link_rate, mhdp->sink.link_rate);
+}
+
+static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
+{
+ return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
+}
+
+static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
+{
+ return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
+}
+
+static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
+{
+ /* Check if SSC is supported by both sides */
+ return mhdp->host.ssc && mhdp->sink.ssc;
+}
+
+static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
+{
+ dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
+
+ if (mhdp->plugged)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
+{
+ u32 major_num, minor_num, revision;
+ u32 fw_ver, lib_ver;
+
+ fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
+ | readl(mhdp->regs + CDNS_VER_L);
+
+ lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
+ | readl(mhdp->regs + CDNS_LIB_L_ADDR);
+
+ if (lib_ver < 33984) {
+ /*
+ * Older FW versions with major number 1, used to store FW
+ * version information by storing repository revision number
+ * in registers. This is for identifying these FW versions.
+ */
+ major_num = 1;
+ minor_num = 2;
+ if (fw_ver == 26098) {
+ revision = 15;
+ } else if (lib_ver == 0 && fw_ver == 0) {
+ revision = 17;
+ } else {
+ dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
+ fw_ver, lib_ver);
+ return -ENODEV;
+ }
+ } else {
+ /* To identify newer FW versions with major number 2 onwards. */
+ major_num = fw_ver / 10000;
+ minor_num = (fw_ver / 100) % 100;
+ revision = (fw_ver % 10000) % 100;
+ }
+
+ dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
+ revision);
+ return 0;
+}
+
+static int cdns_mhdp_fw_activate(const struct firmware *fw,
+ struct cdns_mhdp_device *mhdp)
+{
+ unsigned int reg;
+ int ret;
+
+ /* Release uCPU reset and stall it. */
+ writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
+
+ memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
+
+ /* Leave debug mode, release stall */
+ writel(0, mhdp->regs + CDNS_APB_CTRL);
+
+ /*
+ * Wait for the KEEP_ALIVE "message" on the first 8 bits.
+ * Updated each sched "tick" (~2ms)
+ */
+ ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
+ reg & CDNS_KEEP_ALIVE_MASK, 500,
+ CDNS_KEEP_ALIVE_TIMEOUT);
+ if (ret) {
+ dev_err(mhdp->dev,
+ "device didn't give any life sign: reg %d\n", reg);
+ return ret;
+ }
+
+ ret = cdns_mhdp_check_fw_version(mhdp);
+ if (ret)
+ return ret;
+
+ /* Init events to 0 as it's not cleared by FW at boot but on read */
+ readl(mhdp->regs + CDNS_SW_EVENT0);
+ readl(mhdp->regs + CDNS_SW_EVENT1);
+ readl(mhdp->regs + CDNS_SW_EVENT2);
+ readl(mhdp->regs + CDNS_SW_EVENT3);
+
+ /* Activate uCPU */
+ ret = cdns_mhdp_set_firmware_active(mhdp, true);
+ if (ret)
+ return ret;
+
+ spin_lock(&mhdp->start_lock);
+
+ mhdp->hw_state = MHDP_HW_READY;
+
+ /*
+ * Here we must keep the lock while enabling the interrupts
+ * since it would otherwise be possible that interrupt enable
+ * code is executed after the bridge is detached. The similar
+ * situation is not possible in attach()/detach() callbacks
+ * since the hw_state changes from MHDP_HW_READY to
+ * MHDP_HW_STOPPED happens only due to driver removal when
+ * bridge should already be detached.
+ */
+ if (mhdp->bridge_attached)
+ writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
+ mhdp->regs + CDNS_APB_INT_MASK);
+
+ spin_unlock(&mhdp->start_lock);
+
+ wake_up(&mhdp->fw_load_wq);
+ dev_dbg(mhdp->dev, "DP FW activated\n");
+
+ return 0;
+}
+
+static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
+{
+ struct cdns_mhdp_device *mhdp = context;
+ bool bridge_attached;
+ int ret;
+
+ dev_dbg(mhdp->dev, "firmware callback\n");
+
+ if (!fw || !fw->data) {
+ dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
+ return;
+ }
+
+ ret = cdns_mhdp_fw_activate(fw, mhdp);
+
+ release_firmware(fw);
+
+ if (ret)
+ return;
+
+ /*
+ * XXX how to make sure the bridge is still attached when
+ * calling drm_kms_helper_hotplug_event() after releasing
+ * the lock? We should not hold the spin lock when
+ * calling drm_kms_helper_hotplug_event() since it may
+ * cause a dead lock. FB-dev console calls detect from the
+ * same thread just down the call stack started here.
+ */
+ spin_lock(&mhdp->start_lock);
+ bridge_attached = mhdp->bridge_attached;
+ spin_unlock(&mhdp->start_lock);
+ if (bridge_attached) {
+ if (mhdp->connector.dev)
+ drm_kms_helper_hotplug_event(mhdp->bridge.dev);
+ else
+ drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
+ }
+}
+
+static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
+{
+ int ret;
+
+ ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
+ GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
+ if (ret) {
+ dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
+ FW_NAME, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
+ int ret;
+
+ if (msg->request != DP_AUX_NATIVE_WRITE &&
+ msg->request != DP_AUX_NATIVE_READ)
+ return -EOPNOTSUPP;
+
+ if (msg->request == DP_AUX_NATIVE_WRITE) {
+ const u8 *buf = msg->buffer;
+ unsigned int i;
+
+ for (i = 0; i < msg->size; ++i) {
+ ret = cdns_mhdp_dpcd_write(mhdp,
+ msg->address + i, buf[i]);
+ if (!ret)
+ continue;
+
+ dev_err(mhdp->dev,
+ "Failed to write DPCD addr %u\n",
+ msg->address + i);
+
+ return ret;
+ }
+ } else {
+ ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
+ msg->buffer, msg->size);
+ if (ret) {
+ dev_err(mhdp->dev,
+ "Failed to read DPCD addr %u\n",
+ msg->address);
+
+ return ret;
+ }
+ }
+
+ return msg->size;
+}
+
+static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
+{
+ union phy_configure_opts phy_cfg;
+ u32 reg32;
+ int ret;
+
+ drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ /* Reset PHY configuration */
+ reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
+ if (!mhdp->host.scrambler)
+ reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
+ mhdp->sink.enhanced & mhdp->host.enhanced);
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
+ CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
+
+ cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
+ phy_cfg.dp.link_rate = mhdp->link.rate / 100;
+ phy_cfg.dp.lanes = mhdp->link.num_lanes;
+
+ memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
+ memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
+
+ phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
+ phy_cfg.dp.set_lanes = true;
+ phy_cfg.dp.set_rate = true;
+ phy_cfg.dp.set_voltages = true;
+ ret = phy_configure(mhdp->phy, &phy_cfg);
+ if (ret) {
+ dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
+ CDNS_PHY_COMMON_CONFIG |
+ CDNS_PHY_TRAINING_EN |
+ CDNS_PHY_TRAINING_TYPE(1) |
+ CDNS_PHY_SCRAMBLER_BYPASS);
+
+ drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
+
+ return 0;
+}
+
+static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
+ u8 link_status[DP_LINK_STATUS_SIZE],
+ u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
+ union phy_configure_opts *phy_cfg)
+{
+ u8 adjust, max_pre_emph, max_volt_swing;
+ u8 set_volt, set_pre;
+ unsigned int i;
+
+ max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
+ << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
+
+ for (i = 0; i < mhdp->link.num_lanes; i++) {
+ /* Check if Voltage swing and pre-emphasis are within limits */
+ adjust = drm_dp_get_adjust_request_voltage(link_status, i);
+ set_volt = min(adjust, max_volt_swing);
+
+ adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+ set_pre = min(adjust, max_pre_emph)
+ >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ /*
+ * Voltage swing level and pre-emphasis level combination is
+ * not allowed: leaving pre-emphasis as-is, and adjusting
+ * voltage swing.
+ */
+ if (set_volt + set_pre > 3)
+ set_volt = 3 - set_pre;
+
+ phy_cfg->dp.voltage[i] = set_volt;
+ lanes_data[i] = set_volt;
+
+ if (set_volt == max_volt_swing)
+ lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
+
+ phy_cfg->dp.pre[i] = set_pre;
+ lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+ if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
+ lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ }
+}
+
+static
+void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+ unsigned int lane, u8 volt)
+{
+ unsigned int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
+
+ link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
+ link_status[idx] |= volt << s;
+}
+
+static
+void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+ unsigned int lane, u8 pre_emphasis)
+{
+ unsigned int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
+
+ link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
+ link_status[idx] |= pre_emphasis << s;
+}
+
+static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
+ u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
+ unsigned int i;
+ u8 volt, pre;
+
+ for (i = 0; i < mhdp->link.num_lanes; i++) {
+ volt = drm_dp_get_adjust_request_voltage(link_status, i);
+ pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+ if (volt + pre > 3)
+ cdns_mhdp_set_adjust_request_voltage(link_status, i,
+ 3 - pre);
+ if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
+ cdns_mhdp_set_adjust_request_voltage(link_status, i,
+ max_volt);
+ if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
+ cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
+ i, max_pre);
+ }
+}
+
+static void cdns_mhdp_print_lt_status(const char *prefix,
+ struct cdns_mhdp_device *mhdp,
+ union phy_configure_opts *phy_cfg)
+{
+ char vs[8] = "0/0/0/0";
+ char pe[8] = "0/0/0/0";
+ unsigned int i;
+
+ for (i = 0; i < mhdp->link.num_lanes; i++) {
+ vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
+ pe[i * 2] = '0' + phy_cfg->dp.pre[i];
+ }
+
+ vs[i * 2 - 1] = '\0';
+ pe[i * 2 - 1] = '\0';
+
+ dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
+ prefix,
+ mhdp->link.num_lanes, mhdp->link.rate / 100,
+ vs, pe);
+}
+
+static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
+ u8 eq_tps,
+ unsigned int training_interval)
+{
+ u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ union phy_configure_opts phy_cfg;
+ u32 reg32;
+ int ret;
+ bool r;
+
+ dev_dbg(mhdp->dev, "Starting EQ phase\n");
+
+ /* Enable link training TPS[eq_tps] in PHY */
+ reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
+ CDNS_PHY_TRAINING_TYPE(eq_tps);
+ if (eq_tps != 4)
+ reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
+ cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
+
+ drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
+ (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
+ CDNS_DP_TRAINING_PATTERN_4);
+
+ drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
+
+ do {
+ cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
+ &phy_cfg);
+ phy_cfg.dp.lanes = mhdp->link.num_lanes;
+ phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
+ phy_cfg.dp.set_lanes = false;
+ phy_cfg.dp.set_rate = false;
+ phy_cfg.dp.set_voltages = true;
+ ret = phy_configure(mhdp->phy, &phy_cfg);
+ if (ret) {
+ dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
+ training_interval, lanes_data, link_status);
+
+ r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
+ if (!r)
+ goto err;
+
+ if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
+ cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
+ &phy_cfg);
+ return true;
+ }
+
+ fail_counter_short++;
+
+ cdns_mhdp_adjust_requested_eq(mhdp, link_status);
+ } while (fail_counter_short < 5);
+
+err:
+ cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
+
+ return false;
+}
+
+static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
+ u8 link_status[DP_LINK_STATUS_SIZE],
+ u8 *req_volt, u8 *req_pre)
+{
+ const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
+ const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
+ unsigned int i;
+
+ for (i = 0; i < mhdp->link.num_lanes; i++) {
+ u8 val;
+
+ val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
+ max_volt : req_volt[i];
+ cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
+
+ val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
+ max_pre : req_pre[i];
+ cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
+ }
+}
+
+static
+void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
+ bool *same_before_adjust, bool *max_swing_reached,
+ u8 before_cr[CDNS_DP_MAX_NUM_LANES],
+ u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
+ u8 *req_pre)
+{
+ const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
+ const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
+ bool same_pre, same_volt;
+ unsigned int i;
+ u8 adjust;
+
+ *same_before_adjust = false;
+ *max_swing_reached = false;
+ *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
+
+ for (i = 0; i < mhdp->link.num_lanes; i++) {
+ adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
+ req_volt[i] = min(adjust, max_volt);
+
+ adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ req_pre[i] = min(adjust, max_pre);
+
+ same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
+ req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
+ req_volt[i];
+ if (same_pre && same_volt)
+ *same_before_adjust = true;
+
+ /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
+ if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
+ *max_swing_reached = true;
+ return;
+ }
+ }
+}
+
+static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
+{
+ u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
+ fail_counter_short = 0, fail_counter_cr_long = 0;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ bool cr_done;
+ union phy_configure_opts phy_cfg;
+ int ret;
+
+ dev_dbg(mhdp->dev, "Starting CR phase\n");
+
+ ret = cdns_mhdp_link_training_init(mhdp);
+ if (ret)
+ goto err;
+
+ drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
+
+ do {
+ u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
+ u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
+ bool same_before_adjust, max_swing_reached;
+
+ cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
+ &phy_cfg);
+ phy_cfg.dp.lanes = mhdp->link.num_lanes;
+ phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
+ phy_cfg.dp.set_lanes = false;
+ phy_cfg.dp.set_rate = false;
+ phy_cfg.dp.set_voltages = true;
+ ret = phy_configure(mhdp->phy, &phy_cfg);
+ if (ret) {
+ dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
+ lanes_data, link_status);
+
+ cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
+ &max_swing_reached, lanes_data,
+ link_status,
+ requested_adjust_volt_swing,
+ requested_adjust_pre_emphasis);
+
+ if (max_swing_reached) {
+ dev_err(mhdp->dev, "CR: max swing reached\n");
+ goto err;
+ }
+
+ if (cr_done) {
+ cdns_mhdp_print_lt_status("CR phase ok", mhdp,
+ &phy_cfg);
+ return true;
+ }
+
+ /* Not all CR_DONE bits set */
+ fail_counter_cr_long++;
+
+ if (same_before_adjust) {
+ fail_counter_short++;
+ continue;
+ }
+
+ fail_counter_short = 0;
+ /*
+ * Voltage swing/pre-emphasis adjust requested
+ * during CR phase
+ */
+ cdns_mhdp_adjust_requested_cr(mhdp, link_status,
+ requested_adjust_volt_swing,
+ requested_adjust_pre_emphasis);
+ } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
+
+err:
+ cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
+
+ return false;
+}
+
+static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
+{
+ switch (drm_dp_link_rate_to_bw_code(link->rate)) {
+ case DP_LINK_BW_2_7:
+ link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
+ break;
+ case DP_LINK_BW_5_4:
+ link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
+ break;
+ case DP_LINK_BW_8_1:
+ link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
+ break;
+ }
+}
+
+static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
+ unsigned int training_interval)
+{
+ u32 reg32;
+ const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
+ int ret;
+
+ while (1) {
+ if (!cdns_mhdp_link_training_cr(mhdp)) {
+ if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
+ DP_LINK_BW_1_62) {
+ dev_dbg(mhdp->dev,
+ "Reducing link rate during CR phase\n");
+ cdns_mhdp_lower_link_rate(&mhdp->link);
+
+ continue;
+ } else if (mhdp->link.num_lanes > 1) {
+ dev_dbg(mhdp->dev,
+ "Reducing lanes number during CR phase\n");
+ mhdp->link.num_lanes >>= 1;
+ mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
+
+ continue;
+ }
+
+ dev_err(mhdp->dev,
+ "Link training failed during CR phase\n");
+ goto err;
+ }
+
+ if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
+ training_interval))
+ break;
+
+ if (mhdp->link.num_lanes > 1) {
+ dev_dbg(mhdp->dev,
+ "Reducing lanes number during EQ phase\n");
+ mhdp->link.num_lanes >>= 1;
+
+ continue;
+ } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
+ DP_LINK_BW_1_62) {
+ dev_dbg(mhdp->dev,
+ "Reducing link rate during EQ phase\n");
+ cdns_mhdp_lower_link_rate(&mhdp->link);
+ mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
+
+ continue;
+ }
+
+ dev_err(mhdp->dev, "Link training failed during EQ phase\n");
+ goto err;
+ }
+
+ dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
+ mhdp->link.num_lanes, mhdp->link.rate / 100);
+
+ drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
+ mhdp->host.scrambler ? 0 :
+ DP_LINK_SCRAMBLING_DISABLE);
+
+ ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &reg32);
+ if (ret < 0) {
+ dev_err(mhdp->dev,
+ "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
+ ret);
+ return ret;
+ }
+ reg32 &= ~GENMASK(1, 0);
+ reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
+ reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
+ reg32 |= CDNS_DP_FRAMER_EN;
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
+
+ /* Reset PHY config */
+ reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
+ if (!mhdp->host.scrambler)
+ reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
+ cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
+
+ return 0;
+err:
+ /* Reset PHY config */
+ reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
+ if (!mhdp->host.scrambler)
+ reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
+ cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
+
+ drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ return -EIO;
+}
+
+static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
+ u32 interval)
+{
+ if (interval == 0)
+ return 400;
+ if (interval < 5)
+ return 4000 << (interval - 1);
+ dev_err(mhdp->dev,
+ "wrong training interval returned by DPCD: %d\n", interval);
+ return 0;
+}
+
+static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
+{
+ unsigned int link_rate;
+
+ /* Get source capabilities based on PHY attributes */
+
+ mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
+ if (!mhdp->host.lanes_cnt)
+ mhdp->host.lanes_cnt = 4;
+
+ link_rate = mhdp->phy->attrs.max_link_rate;
+ if (!link_rate)
+ link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
+ else
+ /* PHY uses Mb/s, DRM uses tens of kb/s. */
+ link_rate *= 100;
+
+ mhdp->host.link_rate = link_rate;
+ mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
+ mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
+ mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
+ CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
+ CDNS_SUPPORT_TPS(4);
+ mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
+ mhdp->host.fast_link = false;
+ mhdp->host.enhanced = true;
+ mhdp->host.scrambler = true;
+ mhdp->host.ssc = false;
+}
+
+static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
+ u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ mhdp->sink.link_rate = mhdp->link.rate;
+ mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
+ mhdp->sink.enhanced = !!(mhdp->link.capabilities &
+ DP_LINK_CAP_ENHANCED_FRAMING);
+
+ /* Set SSC support */
+ mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
+ DP_MAX_DOWNSPREAD_0_5);
+
+ /* Set TPS support */
+ mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
+ if (drm_dp_tps3_supported(dpcd))
+ mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
+ if (drm_dp_tps4_supported(dpcd))
+ mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
+
+ /* Set fast link support */
+ mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
+ DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+}
+
+static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
+{
+ u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
+ u32 resp, interval, interval_us;
+ u8 ext_cap_chk = 0;
+ unsigned int addr;
+ int err;
+
+ WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
+
+ drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
+ &ext_cap_chk);
+
+ if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
+ addr = DP_DP13_DPCD_REV;
+ else
+ addr = DP_DPCD_REV;
+
+ err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
+ if (err < 0) {
+ dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
+ return err;
+ }
+
+ mhdp->link.revision = dpcd[0];
+ mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
+ mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
+
+ if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
+ mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
+
+ dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
+ cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
+
+ cdns_mhdp_fill_sink_caps(mhdp, dpcd);
+
+ mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
+ mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
+
+ /* Disable framer for link training */
+ err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
+ if (err < 0) {
+ dev_err(mhdp->dev,
+ "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
+ err);
+ return err;
+ }
+
+ resp &= ~CDNS_DP_FRAMER_EN;
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
+
+ /* Spread AMP if required, enable 8b/10b coding */
+ amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
+ amp[1] = DP_SET_ANSI_8B10B;
+ drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
+
+ if (mhdp->host.fast_link & mhdp->sink.fast_link) {
+ dev_err(mhdp->dev, "fastlink not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
+ interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
+ if (!interval_us ||
+ cdns_mhdp_link_training(mhdp, interval_us)) {
+ dev_err(mhdp->dev, "Link training failed. Exiting.\n");
+ return -EIO;
+ }
+
+ mhdp->link_up = true;
+
+ return 0;
+}
+
+static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
+{
+ WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
+
+ if (mhdp->plugged)
+ cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
+
+ mhdp->link_up = false;
+}
+
+static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
+ struct drm_connector *connector)
+{
+ if (!mhdp->plugged)
+ return NULL;
+
+ return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
+}
+
+static int cdns_mhdp_get_modes(struct drm_connector *connector)
+{
+ struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
+ struct edid *edid;
+ int num_modes;
+
+ if (!mhdp->plugged)
+ return 0;
+
+ edid = cdns_mhdp_get_edid(mhdp, connector);
+ if (!edid) {
+ dev_err(mhdp->dev, "Failed to read EDID\n");
+ return 0;
+ }
+
+ drm_connector_update_edid_property(connector, edid);
+ num_modes = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ /*
+ * HACK: Warn about unsupported display formats until we deal
+ * with them correctly.
+ */
+ if (connector->display_info.color_formats &&
+ !(connector->display_info.color_formats &
+ mhdp->display_fmt.color_format))
+ dev_warn(mhdp->dev,
+ "%s: No supported color_format found (0x%08x)\n",
+ __func__, connector->display_info.color_formats);
+
+ if (connector->display_info.bpc &&
+ connector->display_info.bpc < mhdp->display_fmt.bpc)
+ dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
+ __func__, connector->display_info.bpc,
+ mhdp->display_fmt.bpc);
+
+ return num_modes;
+}
+
+static int cdns_mhdp_connector_detect(struct drm_connector *conn,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
+
+ return cdns_mhdp_detect(mhdp);
+}
+
+static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
+{
+ u32 bpp;
+
+ if (fmt->y_only)
+ return fmt->bpc;
+
+ switch (fmt->color_format) {
+ case DRM_COLOR_FORMAT_RGB444:
+ case DRM_COLOR_FORMAT_YCRCB444:
+ bpp = fmt->bpc * 3;
+ break;
+ case DRM_COLOR_FORMAT_YCRCB422:
+ bpp = fmt->bpc * 2;
+ break;
+ case DRM_COLOR_FORMAT_YCRCB420:
+ bpp = fmt->bpc * 3 / 2;
+ break;
+ default:
+ bpp = fmt->bpc * 3;
+ WARN_ON(1);
+ }
+ return bpp;
+}
+
+static
+bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
+ const struct drm_display_mode *mode,
+ unsigned int lanes, unsigned int rate)
+{
+ u32 max_bw, req_bw, bpp;
+
+ /*
+ * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
+ * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
+ * value thus equals the bandwidth in 10kb/s units, which matches the
+ * units of the rate parameter.
+ */
+
+ bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
+ req_bw = mode->clock * bpp / 8;
+ max_bw = lanes * rate;
+ if (req_bw > max_bw) {
+ dev_dbg(mhdp->dev,
+ "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
+ mode->name, req_bw, max_bw);
+
+ return false;
+ }
+
+ return true;
+}
+
+static
+enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
+ struct drm_display_mode *mode)
+{
+ struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
+
+ mutex_lock(&mhdp->link_mutex);
+
+ if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
+ mhdp->link.rate)) {
+ mutex_unlock(&mhdp->link_mutex);
+ return MODE_CLOCK_HIGH;
+ }
+
+ mutex_unlock(&mhdp->link_mutex);
+ return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
+ .detect_ctx = cdns_mhdp_connector_detect,
+ .get_modes = cdns_mhdp_get_modes,
+ .mode_valid = cdns_mhdp_mode_valid,
+};
+
+static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .destroy = drm_connector_cleanup,
+};
+
+static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
+{
+ u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
+ struct drm_connector *conn = &mhdp->connector;
+ struct drm_bridge *bridge = &mhdp->bridge;
+ int ret;
+
+ if (!bridge->encoder) {
+ dev_err(mhdp->dev, "Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ conn->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
+
+ ret = drm_display_info_set_bus_formats(&conn->display_info,
+ &bus_format, 1);
+ if (ret)
+ return ret;
+
+ ret = drm_connector_attach_encoder(conn, bridge->encoder);
+ if (ret) {
+ dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdns_mhdp_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+ bool hw_ready;
+ int ret;
+
+ dev_dbg(mhdp->dev, "%s\n", __func__);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ ret = cdns_mhdp_connector_init(mhdp);
+ if (ret)
+ return ret;
+ }
+
+ spin_lock(&mhdp->start_lock);
+
+ mhdp->bridge_attached = true;
+ hw_ready = mhdp->hw_state == MHDP_HW_READY;
+
+ spin_unlock(&mhdp->start_lock);
+
+ /* Enable SW event interrupts */
+ if (hw_ready)
+ writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
+ mhdp->regs + CDNS_APB_INT_MASK);
+
+ return 0;
+}
+
+static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
+ const struct drm_display_mode *mode)
+{
+ unsigned int dp_framer_sp = 0, msa_horizontal_1,
+ msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
+ misc0 = 0, misc1 = 0, pxl_repr,
+ front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
+ dp_vertical_1;
+ u8 stream_id = mhdp->stream_id;
+ u32 bpp, bpc, pxlfmt, framer;
+ int ret;
+
+ pxlfmt = mhdp->display_fmt.color_format;
+ bpc = mhdp->display_fmt.bpc;
+
+ /*
+ * If YCBCR supported and stream not SD, use ITU709
+ * Need to handle ITU version with YCBCR420 when supported
+ */
+ if ((pxlfmt == DRM_COLOR_FORMAT_YCRCB444 ||
+ pxlfmt == DRM_COLOR_FORMAT_YCRCB422) && mode->crtc_vdisplay >= 720)
+ misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
+
+ bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
+
+ switch (pxlfmt) {
+ case DRM_COLOR_FORMAT_RGB444:
+ pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
+ misc0 |= DP_COLOR_FORMAT_RGB;
+ break;
+ case DRM_COLOR_FORMAT_YCRCB444:
+ pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
+ misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
+ break;
+ case DRM_COLOR_FORMAT_YCRCB422:
+ pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
+ misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
+ break;
+ case DRM_COLOR_FORMAT_YCRCB420:
+ pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
+ break;
+ default:
+ pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
+ }
+
+ switch (bpc) {
+ case 6:
+ misc0 |= DP_TEST_BIT_DEPTH_6;
+ pxl_repr |= CDNS_DP_FRAMER_6_BPC;
+ break;
+ case 8:
+ misc0 |= DP_TEST_BIT_DEPTH_8;
+ pxl_repr |= CDNS_DP_FRAMER_8_BPC;
+ break;
+ case 10:
+ misc0 |= DP_TEST_BIT_DEPTH_10;
+ pxl_repr |= CDNS_DP_FRAMER_10_BPC;
+ break;
+ case 12:
+ misc0 |= DP_TEST_BIT_DEPTH_12;
+ pxl_repr |= CDNS_DP_FRAMER_12_BPC;
+ break;
+ case 16:
+ misc0 |= DP_TEST_BIT_DEPTH_16;
+ pxl_repr |= CDNS_DP_FRAMER_16_BPC;
+ break;
+ }
+
+ bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
+
+ cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
+ bnd_hsync2vsync);
+
+ hsync2vsync_pol_ctrl = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
+ cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
+ hsync2vsync_pol_ctrl);
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
+
+ front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
+ back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
+ CDNS_DP_FRONT_PORCH(front_porch) |
+ CDNS_DP_BACK_PORCH(back_porch));
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
+ mode->crtc_hdisplay * bpp / 8);
+
+ msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
+ CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
+ CDNS_DP_MSAH0_HSYNC_START(msa_h0));
+
+ hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
+ CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
+ msa_horizontal_1);
+
+ msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
+ CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
+ CDNS_DP_MSAV0_VSYNC_START(msa_v0));
+
+ vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
+ CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
+ msa_vertical_1);
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ mode->crtc_vtotal % 2 == 0)
+ misc1 = DP_TEST_INTERLACED;
+ if (mhdp->display_fmt.y_only)
+ misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
+ /* Use VSC SDP for Y420 */
+ if (pxlfmt == DRM_COLOR_FORMAT_YCRCB420)
+ misc1 = CDNS_DP_TEST_VSC_SDP;
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
+ misc0 | (misc1 << 8));
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
+ CDNS_DP_H_HSYNC_WIDTH(hsync) |
+ CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
+ CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
+ CDNS_DP_V0_VSTART(msa_v0));
+
+ dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ mode->crtc_vtotal % 2 == 0)
+ dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
+
+ cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
+ (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
+ CDNS_DP_VB_ID_INTERLACED : 0);
+
+ ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
+ if (ret < 0) {
+ dev_err(mhdp->dev,
+ "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
+ ret);
+ return;
+ }
+ framer |= CDNS_DP_FRAMER_EN;
+ framer &= ~CDNS_DP_NO_VIDEO_MODE;
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
+}
+
+static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
+ const struct drm_display_mode *mode)
+{
+ u32 rate, vs, required_bandwidth, available_bandwidth;
+ s32 line_thresh1, line_thresh2, line_thresh = 0;
+ int pxlclock = mode->crtc_clock;
+ u32 tu_size = 64;
+ u32 bpp;
+
+ /* Get rate in MSymbols per second per lane */
+ rate = mhdp->link.rate / 1000;
+
+ bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
+
+ required_bandwidth = pxlclock * bpp / 8;
+ available_bandwidth = mhdp->link.num_lanes * rate;
+
+ vs = tu_size * required_bandwidth / available_bandwidth;
+ vs /= 1000;
+
+ if (vs == tu_size)
+ vs = tu_size - 1;
+
+ line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
+ line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
+ line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
+ line_thresh = (line_thresh >> 5) + 2;
+
+ mhdp->stream_id = 0;
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
+ CDNS_DP_FRAMER_TU_VS(vs) |
+ CDNS_DP_FRAMER_TU_SIZE(tu_size) |
+ CDNS_DP_FRAMER_TU_CNT_RST_EN);
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
+ line_thresh & GENMASK(5, 0));
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
+ CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
+ 0 : tu_size - vs));
+
+ cdns_mhdp_configure_video(mhdp, mode);
+}
+
+static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct cdns_mhdp_bridge_state *mhdp_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_bridge_state *new_state;
+ const struct drm_display_mode *mode;
+ u32 resp;
+ int ret;
+
+ dev_dbg(mhdp->dev, "bridge enable\n");
+
+ mutex_lock(&mhdp->link_mutex);
+
+ if (mhdp->plugged && !mhdp->link_up) {
+ ret = cdns_mhdp_link_up(mhdp);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
+ mhdp->info->ops->enable(mhdp);
+
+ /* Enable VIF clock for stream 0 */
+ ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
+ if (ret < 0) {
+ dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
+ goto out;
+ }
+
+ cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
+ resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ goto out;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ goto out;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ goto out;
+
+ mode = &crtc_state->adjusted_mode;
+
+ new_state = drm_atomic_get_new_bridge_state(state, bridge);
+ if (WARN_ON(!new_state))
+ goto out;
+
+ if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
+ mhdp->link.rate)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cdns_mhdp_sst_enable(mhdp, mode);
+
+ mhdp_state = to_cdns_mhdp_bridge_state(new_state);
+
+ mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
+ drm_mode_set_name(mhdp_state->current_mode);
+
+ dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
+
+ mhdp->bridge_enabled = true;
+
+out:
+ mutex_unlock(&mhdp->link_mutex);
+ if (ret < 0)
+ schedule_work(&mhdp->modeset_retry_work);
+}
+
+static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+ u32 resp;
+
+ dev_dbg(mhdp->dev, "%s\n", __func__);
+
+ mutex_lock(&mhdp->link_mutex);
+
+ mhdp->bridge_enabled = false;
+ cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
+ resp &= ~CDNS_DP_FRAMER_EN;
+ resp |= CDNS_DP_NO_VIDEO_MODE;
+ cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
+
+ cdns_mhdp_link_down(mhdp);
+
+ /* Disable VIF clock for stream 0 */
+ cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
+ cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
+ resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
+
+ if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
+ mhdp->info->ops->disable(mhdp);
+
+ mutex_unlock(&mhdp->link_mutex);
+}
+
+static void cdns_mhdp_detach(struct drm_bridge *bridge)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+ dev_dbg(mhdp->dev, "%s\n", __func__);
+
+ spin_lock(&mhdp->start_lock);
+
+ mhdp->bridge_attached = false;
+
+ spin_unlock(&mhdp->start_lock);
+
+ writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
+}
+
+static struct drm_bridge_state *
+cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+ struct cdns_mhdp_bridge_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
+
+ return &state->base;
+}
+
+static void
+cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ struct cdns_mhdp_bridge_state *cdns_mhdp_state;
+
+ cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
+
+ if (cdns_mhdp_state->current_mode) {
+ drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
+ cdns_mhdp_state->current_mode = NULL;
+ }
+
+ kfree(cdns_mhdp_state);
+}
+
+static struct drm_bridge_state *
+cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
+{
+ struct cdns_mhdp_bridge_state *cdns_mhdp_state;
+
+ cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
+ if (!cdns_mhdp_state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
+
+ return &cdns_mhdp_state->base;
+}
+
+static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+ const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+
+ mutex_lock(&mhdp->link_mutex);
+
+ if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
+ mhdp->link.rate)) {
+ dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
+ __func__, mode->name, mhdp->link.num_lanes,
+ mhdp->link.rate / 100);
+ mutex_unlock(&mhdp->link_mutex);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&mhdp->link_mutex);
+ return 0;
+}
+
+static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+ return cdns_mhdp_detect(mhdp);
+}
+
+static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+ return cdns_mhdp_get_edid(mhdp, connector);
+}
+
+static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+ /* Enable SW event interrupts */
+ if (mhdp->bridge_attached)
+ writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
+ mhdp->regs + CDNS_APB_INT_MASK);
+}
+
+static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+ writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
+}
+
+static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
+ .atomic_enable = cdns_mhdp_atomic_enable,
+ .atomic_disable = cdns_mhdp_atomic_disable,
+ .atomic_check = cdns_mhdp_atomic_check,
+ .attach = cdns_mhdp_attach,
+ .detach = cdns_mhdp_detach,
+ .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
+ .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
+ .atomic_reset = cdns_mhdp_bridge_atomic_reset,
+ .detect = cdns_mhdp_bridge_detect,
+ .get_edid = cdns_mhdp_bridge_get_edid,
+ .hpd_enable = cdns_mhdp_bridge_hpd_enable,
+ .hpd_disable = cdns_mhdp_bridge_hpd_disable,
+};
+
+static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
+{
+ int hpd_event, hpd_status;
+
+ *hpd_pulse = false;
+
+ hpd_event = cdns_mhdp_read_hpd_event(mhdp);
+
+ /* Getting event bits failed, bail out */
+ if (hpd_event < 0) {
+ dev_warn(mhdp->dev, "%s: read event failed: %d\n",
+ __func__, hpd_event);
+ return false;
+ }
+
+ hpd_status = cdns_mhdp_get_hpd_status(mhdp);
+ if (hpd_status < 0) {
+ dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
+ __func__, hpd_status);
+ return false;
+ }
+
+ if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
+ *hpd_pulse = true;
+
+ return !!hpd_status;
+}
+
+static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
+{
+ struct cdns_mhdp_bridge_state *cdns_bridge_state;
+ struct drm_display_mode *current_mode;
+ bool old_plugged = mhdp->plugged;
+ struct drm_bridge_state *state;
+ u8 status[DP_LINK_STATUS_SIZE];
+ bool hpd_pulse;
+ int ret = 0;
+
+ mutex_lock(&mhdp->link_mutex);
+
+ mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
+
+ if (!mhdp->plugged) {
+ cdns_mhdp_link_down(mhdp);
+ mhdp->link.rate = mhdp->host.link_rate;
+ mhdp->link.num_lanes = mhdp->host.lanes_cnt;
+ goto out;
+ }
+
+ /*
+ * If we get a HPD pulse event and we were and still are connected,
+ * check the link status. If link status is ok, there's nothing to do
+ * as we don't handle DP interrupts. If link status is bad, continue
+ * with full link setup.
+ */
+ if (hpd_pulse && old_plugged == mhdp->plugged) {
+ ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
+
+ /*
+ * If everything looks fine, just return, as we don't handle
+ * DP IRQs.
+ */
+ if (ret > 0 &&
+ drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
+ drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
+ goto out;
+
+ /* If link is bad, mark link as down so that we do a new LT */
+ mhdp->link_up = false;
+ }
+
+ if (!mhdp->link_up) {
+ ret = cdns_mhdp_link_up(mhdp);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (mhdp->bridge_enabled) {
+ state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
+ if (!state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
+ if (!cdns_bridge_state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ current_mode = cdns_bridge_state->current_mode;
+ if (!current_mode) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
+ mhdp->link.rate)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
+ current_mode->name);
+
+ cdns_mhdp_sst_enable(mhdp, current_mode);
+ }
+out:
+ mutex_unlock(&mhdp->link_mutex);
+ return ret;
+}
+
+static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
+{
+ struct cdns_mhdp_device *mhdp;
+ struct drm_connector *conn;
+
+ mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
+
+ conn = &mhdp->connector;
+
+ /* Grab the locks before changing connector property */
+ mutex_lock(&conn->dev->mode_config.mutex);
+
+ /*
+ * Set connector link status to BAD and send a Uevent to notify
+ * userspace to do a modeset.
+ */
+ drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
+ mutex_unlock(&conn->dev->mode_config.mutex);
+
+ /* Send Hotplug uevent so userspace can reprobe */
+ drm_kms_helper_hotplug_event(mhdp->bridge.dev);
+}
+
+static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
+{
+ struct cdns_mhdp_device *mhdp = data;
+ u32 apb_stat, sw_ev0;
+ bool bridge_attached;
+ int ret;
+
+ apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
+ if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
+ return IRQ_NONE;
+
+ sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
+
+ /*
+ * Calling drm_kms_helper_hotplug_event() when not attached
+ * to drm device causes an oops because the drm_bridge->dev
+ * is NULL. See cdns_mhdp_fw_cb() comments for details about the
+ * problems related drm_kms_helper_hotplug_event() call.
+ */
+ spin_lock(&mhdp->start_lock);
+ bridge_attached = mhdp->bridge_attached;
+ spin_unlock(&mhdp->start_lock);
+
+ if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
+ ret = cdns_mhdp_update_link_status(mhdp);
+ if (mhdp->connector.dev) {
+ if (ret < 0)
+ schedule_work(&mhdp->modeset_retry_work);
+ else
+ drm_kms_helper_hotplug_event(mhdp->bridge.dev);
+ } else {
+ drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cdns_mhdp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cdns_mhdp_device *mhdp;
+ unsigned long rate;
+ struct clk *clk;
+ int ret;
+ int irq;
+
+ mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
+ if (!mhdp)
+ return -ENOMEM;
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
+ mhdp->clk = clk;
+ mhdp->dev = dev;
+ mutex_init(&mhdp->mbox_mutex);
+ mutex_init(&mhdp->link_mutex);
+ spin_lock_init(&mhdp->start_lock);
+
+ drm_dp_aux_init(&mhdp->aux);
+ mhdp->aux.dev = dev;
+ mhdp->aux.transfer = cdns_mhdp_transfer;
+
+ mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mhdp->regs)) {
+ dev_err(dev, "Failed to get memory resource\n");
+ return PTR_ERR(mhdp->regs);
+ }
+
+ mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
+ if (IS_ERR(mhdp->phy)) {
+ dev_err(dev, "no PHY configured\n");
+ return PTR_ERR(mhdp->phy);
+ }
+
+ platform_set_drvdata(pdev, mhdp);
+
+ mhdp->info = of_device_get_match_data(dev);
+
+ clk_prepare_enable(clk);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ pm_runtime_disable(dev);
+ goto clk_disable;
+ }
+
+ if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
+ ret = mhdp->info->ops->init(mhdp);
+ if (ret != 0) {
+ dev_err(dev, "MHDP platform initialization failed: %d\n",
+ ret);
+ goto runtime_put;
+ }
+ }
+
+ rate = clk_get_rate(clk);
+ writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
+ writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
+
+ dev_dbg(dev, "func clk rate %lu Hz\n", rate);
+
+ writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
+ cdns_mhdp_irq_handler, IRQF_ONESHOT,
+ "mhdp8546", mhdp);
+ if (ret) {
+ dev_err(dev, "cannot install IRQ %d\n", irq);
+ ret = -EIO;
+ goto plat_fini;
+ }
+
+ cdns_mhdp_fill_host_caps(mhdp);
+
+ /* Initialize link rate and num of lanes to host values */
+ mhdp->link.rate = mhdp->host.link_rate;
+ mhdp->link.num_lanes = mhdp->host.lanes_cnt;
+
+ /* The only currently supported format */
+ mhdp->display_fmt.y_only = false;
+ mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
+ mhdp->display_fmt.bpc = 8;
+
+ mhdp->bridge.of_node = pdev->dev.of_node;
+ mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
+ mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HPD;
+ mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+ if (mhdp->info)
+ mhdp->bridge.timings = mhdp->info->timings;
+
+ ret = phy_init(mhdp->phy);
+ if (ret) {
+ dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
+ goto plat_fini;
+ }
+
+ /* Initialize the work for modeset in case of link train failure */
+ INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
+
+ init_waitqueue_head(&mhdp->fw_load_wq);
+
+ ret = cdns_mhdp_load_firmware(mhdp);
+ if (ret)
+ goto phy_exit;
+
+ drm_bridge_add(&mhdp->bridge);
+
+ return 0;
+
+phy_exit:
+ phy_exit(mhdp->phy);
+plat_fini:
+ if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
+ mhdp->info->ops->exit(mhdp);
+runtime_put:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+clk_disable:
+ clk_disable_unprepare(mhdp->clk);
+
+ return ret;
+}
+
+static int cdns_mhdp_remove(struct platform_device *pdev)
+{
+ struct cdns_mhdp_device *mhdp = dev_get_drvdata(&pdev->dev);
+ unsigned long timeout = msecs_to_jiffies(100);
+ bool stop_fw = false;
+ int ret;
+
+ drm_bridge_remove(&mhdp->bridge);
+
+ ret = wait_event_timeout(mhdp->fw_load_wq,
+ mhdp->hw_state == MHDP_HW_READY,
+ timeout);
+ if (ret == 0)
+ dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
+ __func__);
+ else
+ stop_fw = true;
+
+ spin_lock(&mhdp->start_lock);
+ mhdp->hw_state = MHDP_HW_STOPPED;
+ spin_unlock(&mhdp->start_lock);
+
+ if (stop_fw)
+ ret = cdns_mhdp_set_firmware_active(mhdp, false);
+
+ phy_exit(mhdp->phy);
+
+ if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
+ mhdp->info->ops->exit(mhdp);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ cancel_work_sync(&mhdp->modeset_retry_work);
+ flush_scheduled_work();
+
+ clk_disable_unprepare(mhdp->clk);
+
+ return ret;
+}
+
+static const struct of_device_id mhdp_ids[] = {
+ { .compatible = "cdns,mhdp8546", },
+#ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
+ { .compatible = "ti,j721e-mhdp8546",
+ .data = &(const struct cdns_mhdp_platform_info) {
+ .timings = &mhdp_ti_j721e_bridge_timings,
+ .ops = &mhdp_ti_j721e_ops,
+ },
+ },
+#endif
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mhdp_ids);
+
+static struct platform_driver mhdp_driver = {
+ .driver = {
+ .name = "cdns-mhdp8546",
+ .of_match_table = of_match_ptr(mhdp_ids),
+ },
+ .probe = cdns_mhdp_probe,
+ .remove = cdns_mhdp_remove,
+};
+module_platform_driver(mhdp_driver);
+
+MODULE_FIRMWARE(FW_NAME);
+
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>");
+MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cdns-mhdp8546");
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
new file mode 100644
index 000000000000..5897a85e3159
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
@@ -0,0 +1,400 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence MHDP8546 DP bridge driver.
+ *
+ * Copyright (C) 2020 Cadence Design Systems, Inc.
+ *
+ * Author: Quentin Schulz <quentin.schulz@free-electrons.com>
+ * Swapnil Jakhade <sjakhade@cadence.com>
+ */
+
+#ifndef CDNS_MHDP8546_CORE_H
+#define CDNS_MHDP8546_CORE_H
+
+#include <linux/bits.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_dp_helper.h>
+
+struct clk;
+struct device;
+struct phy;
+
+/* Register offsets */
+#define CDNS_APB_CTRL 0x00000
+#define CDNS_CPU_STALL BIT(3)
+
+#define CDNS_MAILBOX_FULL 0x00008
+#define CDNS_MAILBOX_EMPTY 0x0000c
+#define CDNS_MAILBOX_TX_DATA 0x00010
+#define CDNS_MAILBOX_RX_DATA 0x00014
+#define CDNS_KEEP_ALIVE 0x00018
+#define CDNS_KEEP_ALIVE_MASK GENMASK(7, 0)
+
+#define CDNS_VER_L 0x0001C
+#define CDNS_VER_H 0x00020
+#define CDNS_LIB_L_ADDR 0x00024
+#define CDNS_LIB_H_ADDR 0x00028
+
+#define CDNS_MB_INT_MASK 0x00034
+#define CDNS_MB_INT_STATUS 0x00038
+
+#define CDNS_SW_CLK_L 0x0003c
+#define CDNS_SW_CLK_H 0x00040
+
+#define CDNS_SW_EVENT0 0x00044
+#define CDNS_DPTX_HPD BIT(0)
+
+#define CDNS_SW_EVENT1 0x00048
+#define CDNS_SW_EVENT2 0x0004c
+#define CDNS_SW_EVENT3 0x00050
+
+#define CDNS_APB_INT_MASK 0x0006C
+#define CDNS_APB_INT_MASK_MAILBOX_INT BIT(0)
+#define CDNS_APB_INT_MASK_SW_EVENT_INT BIT(1)
+
+#define CDNS_APB_INT_STATUS 0x00070
+
+#define CDNS_DPTX_CAR 0x00904
+#define CDNS_VIF_CLK_EN BIT(0)
+#define CDNS_VIF_CLK_RSTN BIT(1)
+
+#define CDNS_SOURCE_VIDEO_IF(s) (0x00b00 + ((s) * 0x20))
+#define CDNS_BND_HSYNC2VSYNC(s) (CDNS_SOURCE_VIDEO_IF(s) + \
+ 0x00)
+#define CDNS_IP_DTCT_WIN GENMASK(11, 0)
+#define CDNS_IP_DET_INTERLACE_FORMAT BIT(12)
+#define CDNS_IP_BYPASS_V_INTERFACE BIT(13)
+
+#define CDNS_HSYNC2VSYNC_POL_CTRL(s) (CDNS_SOURCE_VIDEO_IF(s) + \
+ 0x10)
+#define CDNS_H2V_HSYNC_POL_ACTIVE_LOW BIT(1)
+#define CDNS_H2V_VSYNC_POL_ACTIVE_LOW BIT(2)
+
+#define CDNS_DPTX_PHY_CONFIG 0x02000
+#define CDNS_PHY_TRAINING_EN BIT(0)
+#define CDNS_PHY_TRAINING_TYPE(x) (((x) & GENMASK(3, 0)) << 1)
+#define CDNS_PHY_SCRAMBLER_BYPASS BIT(5)
+#define CDNS_PHY_ENCODER_BYPASS BIT(6)
+#define CDNS_PHY_SKEW_BYPASS BIT(7)
+#define CDNS_PHY_TRAINING_AUTO BIT(8)
+#define CDNS_PHY_LANE0_SKEW(x) (((x) & GENMASK(2, 0)) << 9)
+#define CDNS_PHY_LANE1_SKEW(x) (((x) & GENMASK(2, 0)) << 12)
+#define CDNS_PHY_LANE2_SKEW(x) (((x) & GENMASK(2, 0)) << 15)
+#define CDNS_PHY_LANE3_SKEW(x) (((x) & GENMASK(2, 0)) << 18)
+#define CDNS_PHY_COMMON_CONFIG (CDNS_PHY_LANE1_SKEW(1) | \
+ CDNS_PHY_LANE2_SKEW(2) | \
+ CDNS_PHY_LANE3_SKEW(3))
+#define CDNS_PHY_10BIT_EN BIT(21)
+
+#define CDNS_DP_FRAMER_GLOBAL_CONFIG 0x02200
+#define CDNS_DP_NUM_LANES(x) ((x) - 1)
+#define CDNS_DP_MST_EN BIT(2)
+#define CDNS_DP_FRAMER_EN BIT(3)
+#define CDNS_DP_RATE_GOVERNOR_EN BIT(4)
+#define CDNS_DP_NO_VIDEO_MODE BIT(5)
+#define CDNS_DP_DISABLE_PHY_RST BIT(6)
+#define CDNS_DP_WR_FAILING_EDGE_VSYNC BIT(7)
+
+#define CDNS_DP_FRAMER_TU 0x02208
+#define CDNS_DP_FRAMER_TU_SIZE(x) (((x) & GENMASK(6, 0)) << 8)
+#define CDNS_DP_FRAMER_TU_VS(x) ((x) & GENMASK(5, 0))
+#define CDNS_DP_FRAMER_TU_CNT_RST_EN BIT(15)
+
+#define CDNS_DP_MTPH_CONTROL 0x02264
+#define CDNS_DP_MTPH_ECF_EN BIT(0)
+#define CDNS_DP_MTPH_ACT_EN BIT(1)
+#define CDNS_DP_MTPH_LVP_EN BIT(2)
+
+#define CDNS_DP_MTPH_STATUS 0x0226C
+#define CDNS_DP_MTPH_ACT_STATUS BIT(0)
+
+#define CDNS_DP_LANE_EN 0x02300
+#define CDNS_DP_LANE_EN_LANES(x) GENMASK((x) - 1, 0)
+
+#define CDNS_DP_ENHNCD 0x02304
+
+#define CDNS_DPTX_STREAM(s) (0x03000 + (s) * 0x80)
+#define CDNS_DP_MSA_HORIZONTAL_0(s) (CDNS_DPTX_STREAM(s) + 0x00)
+#define CDNS_DP_MSAH0_H_TOTAL(x) (x)
+#define CDNS_DP_MSAH0_HSYNC_START(x) ((x) << 16)
+
+#define CDNS_DP_MSA_HORIZONTAL_1(s) (CDNS_DPTX_STREAM(s) + 0x04)
+#define CDNS_DP_MSAH1_HSYNC_WIDTH(x) (x)
+#define CDNS_DP_MSAH1_HSYNC_POL_LOW BIT(15)
+#define CDNS_DP_MSAH1_HDISP_WIDTH(x) ((x) << 16)
+
+#define CDNS_DP_MSA_VERTICAL_0(s) (CDNS_DPTX_STREAM(s) + 0x08)
+#define CDNS_DP_MSAV0_V_TOTAL(x) (x)
+#define CDNS_DP_MSAV0_VSYNC_START(x) ((x) << 16)
+
+#define CDNS_DP_MSA_VERTICAL_1(s) (CDNS_DPTX_STREAM(s) + 0x0c)
+#define CDNS_DP_MSAV1_VSYNC_WIDTH(x) (x)
+#define CDNS_DP_MSAV1_VSYNC_POL_LOW BIT(15)
+#define CDNS_DP_MSAV1_VDISP_WIDTH(x) ((x) << 16)
+
+#define CDNS_DP_MSA_MISC(s) (CDNS_DPTX_STREAM(s) + 0x10)
+#define CDNS_DP_STREAM_CONFIG(s) (CDNS_DPTX_STREAM(s) + 0x14)
+#define CDNS_DP_STREAM_CONFIG_2(s) (CDNS_DPTX_STREAM(s) + 0x2c)
+#define CDNS_DP_SC2_TU_VS_DIFF(x) ((x) << 8)
+
+#define CDNS_DP_HORIZONTAL(s) (CDNS_DPTX_STREAM(s) + 0x30)
+#define CDNS_DP_H_HSYNC_WIDTH(x) (x)
+#define CDNS_DP_H_H_TOTAL(x) ((x) << 16)
+
+#define CDNS_DP_VERTICAL_0(s) (CDNS_DPTX_STREAM(s) + 0x34)
+#define CDNS_DP_V0_VHEIGHT(x) (x)
+#define CDNS_DP_V0_VSTART(x) ((x) << 16)
+
+#define CDNS_DP_VERTICAL_1(s) (CDNS_DPTX_STREAM(s) + 0x38)
+#define CDNS_DP_V1_VTOTAL(x) (x)
+#define CDNS_DP_V1_VTOTAL_EVEN BIT(16)
+
+#define CDNS_DP_MST_SLOT_ALLOCATE(s) (CDNS_DPTX_STREAM(s) + 0x44)
+#define CDNS_DP_S_ALLOC_START_SLOT(x) (x)
+#define CDNS_DP_S_ALLOC_END_SLOT(x) ((x) << 8)
+
+#define CDNS_DP_RATE_GOVERNING(s) (CDNS_DPTX_STREAM(s) + 0x48)
+#define CDNS_DP_RG_TARG_AV_SLOTS_Y(x) (x)
+#define CDNS_DP_RG_TARG_AV_SLOTS_X(x) ((x) << 4)
+#define CDNS_DP_RG_ENABLE BIT(10)
+
+#define CDNS_DP_FRAMER_PXL_REPR(s) (CDNS_DPTX_STREAM(s) + 0x4c)
+#define CDNS_DP_FRAMER_6_BPC BIT(0)
+#define CDNS_DP_FRAMER_8_BPC BIT(1)
+#define CDNS_DP_FRAMER_10_BPC BIT(2)
+#define CDNS_DP_FRAMER_12_BPC BIT(3)
+#define CDNS_DP_FRAMER_16_BPC BIT(4)
+#define CDNS_DP_FRAMER_PXL_FORMAT 0x8
+#define CDNS_DP_FRAMER_RGB BIT(0)
+#define CDNS_DP_FRAMER_YCBCR444 BIT(1)
+#define CDNS_DP_FRAMER_YCBCR422 BIT(2)
+#define CDNS_DP_FRAMER_YCBCR420 BIT(3)
+#define CDNS_DP_FRAMER_Y_ONLY BIT(4)
+
+#define CDNS_DP_FRAMER_SP(s) (CDNS_DPTX_STREAM(s) + 0x50)
+#define CDNS_DP_FRAMER_VSYNC_POL_LOW BIT(0)
+#define CDNS_DP_FRAMER_HSYNC_POL_LOW BIT(1)
+#define CDNS_DP_FRAMER_INTERLACE BIT(2)
+
+#define CDNS_DP_LINE_THRESH(s) (CDNS_DPTX_STREAM(s) + 0x64)
+#define CDNS_DP_ACTIVE_LINE_THRESH(x) (x)
+
+#define CDNS_DP_VB_ID(s) (CDNS_DPTX_STREAM(s) + 0x68)
+#define CDNS_DP_VB_ID_INTERLACED BIT(2)
+#define CDNS_DP_VB_ID_COMPRESSED BIT(6)
+
+#define CDNS_DP_FRONT_BACK_PORCH(s) (CDNS_DPTX_STREAM(s) + 0x78)
+#define CDNS_DP_BACK_PORCH(x) (x)
+#define CDNS_DP_FRONT_PORCH(x) ((x) << 16)
+
+#define CDNS_DP_BYTE_COUNT(s) (CDNS_DPTX_STREAM(s) + 0x7c)
+#define CDNS_DP_BYTE_COUNT_BYTES_IN_CHUNK_SHIFT 16
+
+/* mailbox */
+#define MAILBOX_RETRY_US 1000
+#define MAILBOX_TIMEOUT_US 2000000
+
+#define MB_OPCODE_ID 0
+#define MB_MODULE_ID 1
+#define MB_SIZE_MSB_ID 2
+#define MB_SIZE_LSB_ID 3
+#define MB_DATA_ID 4
+
+#define MB_MODULE_ID_DP_TX 0x01
+#define MB_MODULE_ID_HDCP_TX 0x07
+#define MB_MODULE_ID_HDCP_RX 0x08
+#define MB_MODULE_ID_HDCP_GENERAL 0x09
+#define MB_MODULE_ID_GENERAL 0x0a
+
+/* firmware and opcodes */
+#define FW_NAME "cadence/mhdp8546.bin"
+#define CDNS_MHDP_IMEM 0x10000
+
+#define GENERAL_MAIN_CONTROL 0x01
+#define GENERAL_TEST_ECHO 0x02
+#define GENERAL_BUS_SETTINGS 0x03
+#define GENERAL_TEST_ACCESS 0x04
+#define GENERAL_REGISTER_READ 0x07
+
+#define DPTX_SET_POWER_MNG 0x00
+#define DPTX_GET_EDID 0x02
+#define DPTX_READ_DPCD 0x03
+#define DPTX_WRITE_DPCD 0x04
+#define DPTX_ENABLE_EVENT 0x05
+#define DPTX_WRITE_REGISTER 0x06
+#define DPTX_READ_REGISTER 0x07
+#define DPTX_WRITE_FIELD 0x08
+#define DPTX_READ_EVENT 0x0a
+#define DPTX_GET_LAST_AUX_STAUS 0x0e
+#define DPTX_HPD_STATE 0x11
+#define DPTX_ADJUST_LT 0x12
+
+#define FW_STANDBY 0
+#define FW_ACTIVE 1
+
+/* HPD */
+#define DPTX_READ_EVENT_HPD_TO_HIGH BIT(0)
+#define DPTX_READ_EVENT_HPD_TO_LOW BIT(1)
+#define DPTX_READ_EVENT_HPD_PULSE BIT(2)
+#define DPTX_READ_EVENT_HPD_STATE BIT(3)
+
+/* general */
+#define CDNS_DP_TRAINING_PATTERN_4 0x7
+
+#define CDNS_KEEP_ALIVE_TIMEOUT 2000
+
+#define CDNS_VOLT_SWING(x) ((x) & GENMASK(1, 0))
+#define CDNS_FORCE_VOLT_SWING BIT(2)
+
+#define CDNS_PRE_EMPHASIS(x) ((x) & GENMASK(1, 0))
+#define CDNS_FORCE_PRE_EMPHASIS BIT(2)
+
+#define CDNS_SUPPORT_TPS(x) BIT((x) - 1)
+
+#define CDNS_FAST_LINK_TRAINING BIT(0)
+
+#define CDNS_LANE_MAPPING_TYPE_C_LANE_0(x) ((x) & GENMASK(1, 0))
+#define CDNS_LANE_MAPPING_TYPE_C_LANE_1(x) ((x) & GENMASK(3, 2))
+#define CDNS_LANE_MAPPING_TYPE_C_LANE_2(x) ((x) & GENMASK(5, 4))
+#define CDNS_LANE_MAPPING_TYPE_C_LANE_3(x) ((x) & GENMASK(7, 6))
+#define CDNS_LANE_MAPPING_NORMAL 0xe4
+#define CDNS_LANE_MAPPING_FLIPPED 0x1b
+
+#define CDNS_DP_MAX_NUM_LANES 4
+#define CDNS_DP_TEST_VSC_SDP BIT(6) /* 1.3+ */
+#define CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY BIT(7)
+
+#define CDNS_MHDP_MAX_STREAMS 4
+
+#define DP_LINK_CAP_ENHANCED_FRAMING BIT(0)
+
+struct cdns_mhdp_link {
+ unsigned char revision;
+ unsigned int rate;
+ unsigned int num_lanes;
+ unsigned long capabilities;
+};
+
+struct cdns_mhdp_host {
+ unsigned int link_rate;
+ u8 lanes_cnt;
+ u8 volt_swing;
+ u8 pre_emphasis;
+ u8 pattern_supp;
+ u8 lane_mapping;
+ bool fast_link;
+ bool enhanced;
+ bool scrambler;
+ bool ssc;
+};
+
+struct cdns_mhdp_sink {
+ unsigned int link_rate;
+ u8 lanes_cnt;
+ u8 pattern_supp;
+ bool fast_link;
+ bool enhanced;
+ bool ssc;
+};
+
+struct cdns_mhdp_display_fmt {
+ u32 color_format;
+ u32 bpc;
+ bool y_only;
+};
+
+/*
+ * These enums present MHDP hw initialization state
+ * Legal state transitions are:
+ * MHDP_HW_READY <-> MHDP_HW_STOPPED
+ */
+enum mhdp_hw_state {
+ MHDP_HW_READY = 1, /* HW ready, FW active */
+ MHDP_HW_STOPPED /* Driver removal FW to be stopped */
+};
+
+struct cdns_mhdp_device;
+
+struct mhdp_platform_ops {
+ int (*init)(struct cdns_mhdp_device *mhdp);
+ void (*exit)(struct cdns_mhdp_device *mhdp);
+ void (*enable)(struct cdns_mhdp_device *mhdp);
+ void (*disable)(struct cdns_mhdp_device *mhdp);
+};
+
+struct cdns_mhdp_bridge_state {
+ struct drm_bridge_state base;
+ struct drm_display_mode *current_mode;
+};
+
+struct cdns_mhdp_platform_info {
+ const struct drm_bridge_timings *timings;
+ const struct mhdp_platform_ops *ops;
+};
+
+#define to_cdns_mhdp_bridge_state(s) \
+ container_of(s, struct cdns_mhdp_bridge_state, base)
+
+struct cdns_mhdp_device {
+ void __iomem *regs;
+ void __iomem *j721e_regs;
+
+ struct device *dev;
+ struct clk *clk;
+ struct phy *phy;
+
+ const struct cdns_mhdp_platform_info *info;
+
+ /* This is to protect mailbox communications with the firmware */
+ struct mutex mbox_mutex;
+
+ /*
+ * "link_mutex" protects the access to all the link parameters
+ * including the link training process. Link training will be
+ * invoked both from threaded interrupt handler and from atomic
+ * callbacks when link_up is not set. So this mutex protects
+ * flags such as link_up, bridge_enabled, link.num_lanes,
+ * link.rate etc.
+ */
+ struct mutex link_mutex;
+
+ struct drm_connector connector;
+ struct drm_bridge bridge;
+
+ struct cdns_mhdp_link link;
+ struct drm_dp_aux aux;
+
+ struct cdns_mhdp_host host;
+ struct cdns_mhdp_sink sink;
+ struct cdns_mhdp_display_fmt display_fmt;
+ u8 stream_id;
+
+ bool link_up;
+ bool plugged;
+
+ /*
+ * "start_lock" protects the access to bridge_attached and
+ * hw_state data members that control the delayed firmware
+ * loading and attaching the bridge. They are accessed from
+ * both the DRM core and cdns_mhdp_fw_cb(). In most cases just
+ * protecting the data members is enough, but the irq mask
+ * setting needs to be protected when enabling the FW.
+ */
+ spinlock_t start_lock;
+ bool bridge_attached;
+ bool bridge_enabled;
+ enum mhdp_hw_state hw_state;
+ wait_queue_head_t fw_load_wq;
+
+ /* Work struct to schedule a uevent on link train failure */
+ struct work_struct modeset_retry_work;
+};
+
+#define connector_to_mhdp(x) container_of(x, struct cdns_mhdp_device, connector)
+#define bridge_to_mhdp(x) container_of(x, struct cdns_mhdp_device, bridge)
+
+#endif
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c
new file mode 100644
index 000000000000..dfe1b59514f7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI j721e Cadence MHDP8546 DP wrapper
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "cdns-mhdp8546-j721e.h"
+
+#define REVISION 0x00
+#define DPTX_IPCFG 0x04
+#define ECC_MEM_CFG 0x08
+#define DPTX_DSC_CFG 0x0c
+#define DPTX_SRC_CFG 0x10
+#define DPTX_VIF_SECURE_MODE_CFG 0x14
+#define DPTX_VIF_CONN_STATUS 0x18
+#define PHY_CLK_STATUS 0x1c
+
+#define DPTX_SRC_AIF_EN BIT(16)
+#define DPTX_SRC_VIF_3_IN30B BIT(11)
+#define DPTX_SRC_VIF_2_IN30B BIT(10)
+#define DPTX_SRC_VIF_1_IN30B BIT(9)
+#define DPTX_SRC_VIF_0_IN30B BIT(8)
+#define DPTX_SRC_VIF_3_SEL_DPI5 BIT(7)
+#define DPTX_SRC_VIF_3_SEL_DPI3 0
+#define DPTX_SRC_VIF_2_SEL_DPI4 BIT(6)
+#define DPTX_SRC_VIF_2_SEL_DPI2 0
+#define DPTX_SRC_VIF_1_SEL_DPI3 BIT(5)
+#define DPTX_SRC_VIF_1_SEL_DPI1 0
+#define DPTX_SRC_VIF_0_SEL_DPI2 BIT(4)
+#define DPTX_SRC_VIF_0_SEL_DPI0 0
+#define DPTX_SRC_VIF_3_EN BIT(3)
+#define DPTX_SRC_VIF_2_EN BIT(2)
+#define DPTX_SRC_VIF_1_EN BIT(1)
+#define DPTX_SRC_VIF_0_EN BIT(0)
+
+/* TODO turn DPTX_IPCFG fw_mem_clk_en at pm_runtime_suspend. */
+
+static int cdns_mhdp_j721e_init(struct cdns_mhdp_device *mhdp)
+{
+ struct platform_device *pdev = to_platform_device(mhdp->dev);
+
+ mhdp->j721e_regs = devm_platform_ioremap_resource(pdev, 1);
+ return PTR_ERR_OR_ZERO(mhdp->j721e_regs);
+}
+
+static void cdns_mhdp_j721e_enable(struct cdns_mhdp_device *mhdp)
+{
+ /*
+ * Enable VIF_0 and select DPI2 as its input. DSS0 DPI0 is connected
+ * to eDP DPI2. This is the only supported SST configuration on
+ * J721E.
+ */
+ writel(DPTX_SRC_VIF_0_EN | DPTX_SRC_VIF_0_SEL_DPI2,
+ mhdp->j721e_regs + DPTX_SRC_CFG);
+}
+
+static void cdns_mhdp_j721e_disable(struct cdns_mhdp_device *mhdp)
+{
+ /* Put everything to defaults */
+ writel(0, mhdp->j721e_regs + DPTX_DSC_CFG);
+}
+
+const struct mhdp_platform_ops mhdp_ti_j721e_ops = {
+ .init = cdns_mhdp_j721e_init,
+ .enable = cdns_mhdp_j721e_enable,
+ .disable = cdns_mhdp_j721e_disable,
+};
+
+const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_DE_HIGH,
+};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h
new file mode 100644
index 000000000000..97d20d115a24
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI j721e Cadence MHDP8546 DP wrapper
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#ifndef CDNS_MHDP8546_J721E_H
+#define CDNS_MHDP8546_J721E_H
+
+#include "cdns-mhdp8546-core.h"
+
+struct mhdp_platform_ops;
+
+extern const struct mhdp_platform_ops mhdp_ti_j721e_ops;
+extern const struct drm_bridge_timings mhdp_ti_j721e_bridge_timings;
+
+#endif /* !CDNS_MHDP8546_J721E_H */
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
new file mode 100644
index 000000000000..d734d9402c35
--- /dev/null
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -0,0 +1,1230 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020. Linaro Limited.
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <sound/hdmi-codec.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#define EDID_SEG_SIZE 256
+#define EDID_LEN 32
+#define EDID_LOOP 8
+#define KEY_DDC_ACCS_DONE 0x02
+#define DDC_NO_ACK 0x50
+
+#define LT9611_4LANES 0
+
+struct lt9611 {
+ struct device *dev;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ struct regmap *regmap;
+
+ struct device_node *dsi0_node;
+ struct device_node *dsi1_node;
+ struct mipi_dsi_device *dsi0;
+ struct mipi_dsi_device *dsi1;
+ struct platform_device *audio_pdev;
+
+ bool ac_mode;
+
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *enable_gpio;
+
+ bool power_on;
+ bool sleep;
+
+ struct regulator_bulk_data supplies[2];
+
+ struct i2c_client *client;
+
+ enum drm_connector_status status;
+
+ u8 edid_buf[EDID_SEG_SIZE];
+ u32 vic;
+};
+
+#define LT9611_PAGE_CONTROL 0xff
+
+static const struct regmap_range_cfg lt9611_ranges[] = {
+ {
+ .name = "register_range",
+ .range_min = 0,
+ .range_max = 0x85ff,
+ .selector_reg = LT9611_PAGE_CONTROL,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x100,
+ },
+};
+
+static const struct regmap_config lt9611_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xffff,
+ .ranges = lt9611_ranges,
+ .num_ranges = ARRAY_SIZE(lt9611_ranges),
+};
+
+struct lt9611_mode {
+ u16 hdisplay;
+ u16 vdisplay;
+ u8 vrefresh;
+ u8 lanes;
+ u8 intfs;
+};
+
+static struct lt9611_mode lt9611_modes[] = {
+ { 3840, 2160, 30, 4, 2 }, /* 3840x2160 24bit 30Hz 4Lane 2ports */
+ { 1920, 1080, 60, 4, 1 }, /* 1080P 24bit 60Hz 4lane 1port */
+ { 1920, 1080, 30, 3, 1 }, /* 1080P 24bit 30Hz 3lane 1port */
+ { 1920, 1080, 24, 3, 1 },
+ { 720, 480, 60, 4, 1 },
+ { 720, 576, 50, 2, 1 },
+ { 640, 480, 60, 2, 1 },
+};
+
+static struct lt9611 *bridge_to_lt9611(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct lt9611, bridge);
+}
+
+static struct lt9611 *connector_to_lt9611(struct drm_connector *connector)
+{
+ return container_of(connector, struct lt9611, connector);
+}
+
+static int lt9611_mipi_input_analog(struct lt9611 *lt9611)
+{
+ const struct reg_sequence reg_cfg[] = {
+ { 0x8106, 0x40 }, /* port A rx current */
+ { 0x810a, 0xfe }, /* port A ldo voltage set */
+ { 0x810b, 0xbf }, /* enable port A lprx */
+ { 0x8111, 0x40 }, /* port B rx current */
+ { 0x8115, 0xfe }, /* port B ldo voltage set */
+ { 0x8116, 0xbf }, /* enable port B lprx */
+
+ { 0x811c, 0x03 }, /* PortA clk lane no-LP mode */
+ { 0x8120, 0x03 }, /* PortB clk lane with-LP mode */
+ };
+
+ return regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+}
+
+static int lt9611_mipi_input_digital(struct lt9611 *lt9611,
+ const struct drm_display_mode *mode)
+{
+ struct reg_sequence reg_cfg[] = {
+ { 0x8300, LT9611_4LANES },
+ { 0x830a, 0x00 },
+ { 0x824f, 0x80 },
+ { 0x8250, 0x10 },
+ { 0x8302, 0x0a },
+ { 0x8306, 0x0a },
+ };
+
+ if (mode->hdisplay == 3840)
+ reg_cfg[1].def = 0x03;
+
+ return regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+}
+
+static void lt9611_mipi_video_setup(struct lt9611 *lt9611,
+ const struct drm_display_mode *mode)
+{
+ u32 h_total, hactive, hsync_len, hfront_porch, hsync_porch;
+ u32 v_total, vactive, vsync_len, vfront_porch, vsync_porch;
+
+ h_total = mode->htotal;
+ v_total = mode->vtotal;
+
+ hactive = mode->hdisplay;
+ hsync_len = mode->hsync_end - mode->hsync_start;
+ hfront_porch = mode->hsync_start - mode->hdisplay;
+ hsync_porch = hsync_len + mode->htotal - mode->hsync_end;
+
+ vactive = mode->vdisplay;
+ vsync_len = mode->vsync_end - mode->vsync_start;
+ vfront_porch = mode->vsync_start - mode->vdisplay;
+ vsync_porch = vsync_len + mode->vtotal - mode->vsync_end;
+
+ regmap_write(lt9611->regmap, 0x830d, (u8)(v_total / 256));
+ regmap_write(lt9611->regmap, 0x830e, (u8)(v_total % 256));
+
+ regmap_write(lt9611->regmap, 0x830f, (u8)(vactive / 256));
+ regmap_write(lt9611->regmap, 0x8310, (u8)(vactive % 256));
+
+ regmap_write(lt9611->regmap, 0x8311, (u8)(h_total / 256));
+ regmap_write(lt9611->regmap, 0x8312, (u8)(h_total % 256));
+
+ regmap_write(lt9611->regmap, 0x8313, (u8)(hactive / 256));
+ regmap_write(lt9611->regmap, 0x8314, (u8)(hactive % 256));
+
+ regmap_write(lt9611->regmap, 0x8315, (u8)(vsync_len % 256));
+ regmap_write(lt9611->regmap, 0x8316, (u8)(hsync_len % 256));
+
+ regmap_write(lt9611->regmap, 0x8317, (u8)(vfront_porch % 256));
+
+ regmap_write(lt9611->regmap, 0x8318, (u8)(vsync_porch % 256));
+
+ regmap_write(lt9611->regmap, 0x8319, (u8)(hfront_porch % 256));
+
+ regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256));
+ regmap_write(lt9611->regmap, 0x831b, (u8)(hsync_porch % 256));
+}
+
+static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode)
+{
+ const struct reg_sequence reg_cfg[] = {
+ { 0x830b, 0x01 },
+ { 0x830c, 0x10 },
+ { 0x8348, 0x00 },
+ { 0x8349, 0x81 },
+
+ /* stage 1 */
+ { 0x8321, 0x4a },
+ { 0x8324, 0x71 },
+ { 0x8325, 0x30 },
+ { 0x832a, 0x01 },
+
+ /* stage 2 */
+ { 0x834a, 0x40 },
+ { 0x831d, 0x10 },
+
+ /* MK limit */
+ { 0x832d, 0x38 },
+ { 0x8331, 0x08 },
+ };
+ const struct reg_sequence reg_cfg2[] = {
+ { 0x830b, 0x03 },
+ { 0x830c, 0xd0 },
+ { 0x8348, 0x03 },
+ { 0x8349, 0xe0 },
+ { 0x8324, 0x72 },
+ { 0x8325, 0x00 },
+ { 0x832a, 0x01 },
+ { 0x834a, 0x10 },
+ { 0x831d, 0x10 },
+ { 0x8326, 0x37 },
+ };
+
+ regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+
+ switch (mode->hdisplay) {
+ case 640:
+ regmap_write(lt9611->regmap, 0x8326, 0x14);
+ break;
+ case 1920:
+ regmap_write(lt9611->regmap, 0x8326, 0x37);
+ break;
+ case 3840:
+ regmap_multi_reg_write(lt9611->regmap, reg_cfg2, ARRAY_SIZE(reg_cfg2));
+ break;
+ }
+
+ /* pcr rst */
+ regmap_write(lt9611->regmap, 0x8011, 0x5a);
+ regmap_write(lt9611->regmap, 0x8011, 0xfa);
+}
+
+static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode)
+{
+ unsigned int pclk = mode->clock;
+ const struct reg_sequence reg_cfg[] = {
+ /* txpll init */
+ { 0x8123, 0x40 },
+ { 0x8124, 0x64 },
+ { 0x8125, 0x80 },
+ { 0x8126, 0x55 },
+ { 0x812c, 0x37 },
+ { 0x812f, 0x01 },
+ { 0x8126, 0x55 },
+ { 0x8127, 0x66 },
+ { 0x8128, 0x88 },
+ };
+
+ regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+
+ if (pclk > 150000)
+ regmap_write(lt9611->regmap, 0x812d, 0x88);
+ else if (pclk > 70000)
+ regmap_write(lt9611->regmap, 0x812d, 0x99);
+ else
+ regmap_write(lt9611->regmap, 0x812d, 0xaa);
+
+ /*
+ * first divide pclk by 2 first
+ * - write divide by 64k to 19:16 bits which means shift by 17
+ * - write divide by 256 to 15:8 bits which means shift by 9
+ * - write remainder to 7:0 bits, which means shift by 1
+ */
+ regmap_write(lt9611->regmap, 0x82e3, pclk >> 17); /* pclk[19:16] */
+ regmap_write(lt9611->regmap, 0x82e4, pclk >> 9); /* pclk[15:8] */
+ regmap_write(lt9611->regmap, 0x82e5, pclk >> 1); /* pclk[7:0] */
+
+ regmap_write(lt9611->regmap, 0x82de, 0x20);
+ regmap_write(lt9611->regmap, 0x82de, 0xe0);
+
+ regmap_write(lt9611->regmap, 0x8016, 0xf1);
+ regmap_write(lt9611->regmap, 0x8016, 0xf3);
+
+ return 0;
+}
+
+static int lt9611_read_video_check(struct lt9611 *lt9611, unsigned int reg)
+{
+ unsigned int temp, temp2;
+ int ret;
+
+ ret = regmap_read(lt9611->regmap, reg, &temp);
+ if (ret)
+ return ret;
+ temp <<= 8;
+ ret = regmap_read(lt9611->regmap, reg + 1, &temp2);
+ if (ret)
+ return ret;
+
+ return (temp + temp2);
+}
+
+static int lt9611_video_check(struct lt9611 *lt9611)
+{
+ u32 v_total, vactive, hactive_a, hactive_b, h_total_sysclk;
+ int temp;
+
+ /* top module video check */
+
+ /* vactive */
+ temp = lt9611_read_video_check(lt9611, 0x8282);
+ if (temp < 0)
+ goto end;
+ vactive = temp;
+
+ /* v_total */
+ temp = lt9611_read_video_check(lt9611, 0x826c);
+ if (temp < 0)
+ goto end;
+ v_total = temp;
+
+ /* h_total_sysclk */
+ temp = lt9611_read_video_check(lt9611, 0x8286);
+ if (temp < 0)
+ goto end;
+ h_total_sysclk = temp;
+
+ /* hactive_a */
+ temp = lt9611_read_video_check(lt9611, 0x8382);
+ if (temp < 0)
+ goto end;
+ hactive_a = temp / 3;
+
+ /* hactive_b */
+ temp = lt9611_read_video_check(lt9611, 0x8386);
+ if (temp < 0)
+ goto end;
+ hactive_b = temp / 3;
+
+ dev_info(lt9611->dev,
+ "video check: hactive_a=%d, hactive_b=%d, vactive=%d, v_total=%d, h_total_sysclk=%d\n",
+ hactive_a, hactive_b, vactive, v_total, h_total_sysclk);
+
+ return 0;
+
+end:
+ dev_err(lt9611->dev, "read video check error\n");
+ return temp;
+}
+
+static void lt9611_hdmi_tx_digital(struct lt9611 *lt9611)
+{
+ regmap_write(lt9611->regmap, 0x8443, 0x46 - lt9611->vic);
+ regmap_write(lt9611->regmap, 0x8447, lt9611->vic);
+ regmap_write(lt9611->regmap, 0x843d, 0x0a); /* UD1 infoframe */
+
+ regmap_write(lt9611->regmap, 0x82d6, 0x8c);
+ regmap_write(lt9611->regmap, 0x82d7, 0x04);
+}
+
+static void lt9611_hdmi_tx_phy(struct lt9611 *lt9611)
+{
+ struct reg_sequence reg_cfg[] = {
+ { 0x8130, 0x6a },
+ { 0x8131, 0x44 }, /* HDMI DC mode */
+ { 0x8132, 0x4a },
+ { 0x8133, 0x0b },
+ { 0x8134, 0x00 },
+ { 0x8135, 0x00 },
+ { 0x8136, 0x00 },
+ { 0x8137, 0x44 },
+ { 0x813f, 0x0f },
+ { 0x8140, 0xa0 },
+ { 0x8141, 0xa0 },
+ { 0x8142, 0xa0 },
+ { 0x8143, 0xa0 },
+ { 0x8144, 0x0a },
+ };
+
+ /* HDMI AC mode */
+ if (lt9611->ac_mode)
+ reg_cfg[2].def = 0x73;
+
+ regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+}
+
+static irqreturn_t lt9611_irq_thread_handler(int irq, void *dev_id)
+{
+ struct lt9611 *lt9611 = dev_id;
+ unsigned int irq_flag0 = 0;
+ unsigned int irq_flag3 = 0;
+
+ regmap_read(lt9611->regmap, 0x820f, &irq_flag3);
+ regmap_read(lt9611->regmap, 0x820c, &irq_flag0);
+
+ /* hpd changed low */
+ if (irq_flag3 & 0x80) {
+ dev_info(lt9611->dev, "hdmi cable disconnected\n");
+
+ regmap_write(lt9611->regmap, 0x8207, 0xbf);
+ regmap_write(lt9611->regmap, 0x8207, 0x3f);
+ }
+
+ /* hpd changed high */
+ if (irq_flag3 & 0x40) {
+ dev_info(lt9611->dev, "hdmi cable connected\n");
+
+ regmap_write(lt9611->regmap, 0x8207, 0x7f);
+ regmap_write(lt9611->regmap, 0x8207, 0x3f);
+ }
+
+ if (irq_flag3 & 0xc0 && lt9611->bridge.dev)
+ drm_kms_helper_hotplug_event(lt9611->bridge.dev);
+
+ /* video input changed */
+ if (irq_flag0 & 0x01) {
+ dev_info(lt9611->dev, "video input changed\n");
+ regmap_write(lt9611->regmap, 0x829e, 0xff);
+ regmap_write(lt9611->regmap, 0x829e, 0xf7);
+ regmap_write(lt9611->regmap, 0x8204, 0xff);
+ regmap_write(lt9611->regmap, 0x8204, 0xfe);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void lt9611_enable_hpd_interrupts(struct lt9611 *lt9611)
+{
+ unsigned int val;
+
+ regmap_read(lt9611->regmap, 0x8203, &val);
+
+ val &= ~0xc0;
+ regmap_write(lt9611->regmap, 0x8203, val);
+ regmap_write(lt9611->regmap, 0x8207, 0xff); /* clear */
+ regmap_write(lt9611->regmap, 0x8207, 0x3f);
+}
+
+static void lt9611_sleep_setup(struct lt9611 *lt9611)
+{
+ const struct reg_sequence sleep_setup[] = {
+ { 0x8024, 0x76 },
+ { 0x8023, 0x01 },
+ { 0x8157, 0x03 }, /* set addr pin as output */
+ { 0x8149, 0x0b },
+ { 0x8151, 0x30 }, /* disable IRQ */
+ { 0x8102, 0x48 }, /* MIPI Rx power down */
+ { 0x8123, 0x80 },
+ { 0x8130, 0x00 },
+ { 0x8100, 0x01 }, /* bandgap power down */
+ { 0x8101, 0x00 }, /* system clk power down */
+ };
+
+ regmap_multi_reg_write(lt9611->regmap,
+ sleep_setup, ARRAY_SIZE(sleep_setup));
+ lt9611->sleep = true;
+}
+
+static int lt9611_power_on(struct lt9611 *lt9611)
+{
+ int ret;
+ const struct reg_sequence seq[] = {
+ /* LT9611_System_Init */
+ { 0x8101, 0x18 }, /* sel xtal clock */
+
+ /* timer for frequency meter */
+ { 0x821b, 0x69 }, /* timer 2 */
+ { 0x821c, 0x78 },
+ { 0x82cb, 0x69 }, /* timer 1 */
+ { 0x82cc, 0x78 },
+
+ /* irq init */
+ { 0x8251, 0x01 },
+ { 0x8258, 0x0a }, /* hpd irq */
+ { 0x8259, 0x80 }, /* hpd debounce width */
+ { 0x829e, 0xf7 }, /* video check irq */
+
+ /* power consumption for work */
+ { 0x8004, 0xf0 },
+ { 0x8006, 0xf0 },
+ { 0x800a, 0x80 },
+ { 0x800b, 0x40 },
+ { 0x800d, 0xef },
+ { 0x8011, 0xfa },
+ };
+
+ if (lt9611->power_on)
+ return 0;
+
+ ret = regmap_multi_reg_write(lt9611->regmap, seq, ARRAY_SIZE(seq));
+ if (!ret)
+ lt9611->power_on = true;
+
+ return ret;
+}
+
+static int lt9611_power_off(struct lt9611 *lt9611)
+{
+ int ret;
+
+ ret = regmap_write(lt9611->regmap, 0x8130, 0x6a);
+ if (!ret)
+ lt9611->power_on = false;
+
+ return ret;
+}
+
+static void lt9611_reset(struct lt9611 *lt9611)
+{
+ gpiod_set_value_cansleep(lt9611->reset_gpio, 1);
+ msleep(20);
+
+ gpiod_set_value_cansleep(lt9611->reset_gpio, 0);
+ msleep(20);
+
+ gpiod_set_value_cansleep(lt9611->reset_gpio, 1);
+ msleep(100);
+}
+
+static void lt9611_assert_5v(struct lt9611 *lt9611)
+{
+ if (!lt9611->enable_gpio)
+ return;
+
+ gpiod_set_value_cansleep(lt9611->enable_gpio, 1);
+ msleep(20);
+}
+
+static int lt9611_regulator_init(struct lt9611 *lt9611)
+{
+ int ret;
+
+ lt9611->supplies[0].supply = "vdd";
+ lt9611->supplies[1].supply = "vcc";
+
+ ret = devm_regulator_bulk_get(lt9611->dev, 2, lt9611->supplies);
+ if (ret < 0)
+ return ret;
+
+ return regulator_set_load(lt9611->supplies[0].consumer, 300000);
+}
+
+static int lt9611_regulator_enable(struct lt9611 *lt9611)
+{
+ int ret;
+
+ ret = regulator_enable(lt9611->supplies[0].consumer);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 10000);
+
+ ret = regulator_enable(lt9611->supplies[1].consumer);
+ if (ret < 0) {
+ regulator_disable(lt9611->supplies[0].consumer);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct lt9611_mode *lt9611_find_mode(const struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lt9611_modes); i++) {
+ if (lt9611_modes[i].hdisplay == mode->hdisplay &&
+ lt9611_modes[i].vdisplay == mode->vdisplay &&
+ lt9611_modes[i].vrefresh == drm_mode_vrefresh(mode)) {
+ return &lt9611_modes[i];
+ }
+ }
+
+ return NULL;
+}
+
+/* connector funcs */
+static enum drm_connector_status
+lt9611_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct lt9611 *lt9611 = connector_to_lt9611(connector);
+ unsigned int reg_val = 0;
+ int connected = 0;
+
+ regmap_read(lt9611->regmap, 0x825e, &reg_val);
+ connected = (reg_val & BIT(2));
+
+ lt9611->status = connected ? connector_status_connected :
+ connector_status_disconnected;
+
+ return lt9611->status;
+}
+
+static int lt9611_read_edid(struct lt9611 *lt9611)
+{
+ unsigned int temp;
+ int ret = 0;
+ int i, j;
+
+ /* memset to clear old buffer, if any */
+ memset(lt9611->edid_buf, 0, sizeof(lt9611->edid_buf));
+
+ regmap_write(lt9611->regmap, 0x8503, 0xc9);
+
+ /* 0xA0 is EDID device address */
+ regmap_write(lt9611->regmap, 0x8504, 0xa0);
+ /* 0x00 is EDID offset address */
+ regmap_write(lt9611->regmap, 0x8505, 0x00);
+
+ /* length for read */
+ regmap_write(lt9611->regmap, 0x8506, EDID_LEN);
+ regmap_write(lt9611->regmap, 0x8514, 0x7f);
+
+ for (i = 0; i < EDID_LOOP; i++) {
+ /* offset address */
+ regmap_write(lt9611->regmap, 0x8505, i * EDID_LEN);
+ regmap_write(lt9611->regmap, 0x8507, 0x36);
+ regmap_write(lt9611->regmap, 0x8507, 0x31);
+ regmap_write(lt9611->regmap, 0x8507, 0x37);
+ usleep_range(5000, 10000);
+
+ regmap_read(lt9611->regmap, 0x8540, &temp);
+
+ if (temp & KEY_DDC_ACCS_DONE) {
+ for (j = 0; j < EDID_LEN; j++) {
+ regmap_read(lt9611->regmap, 0x8583, &temp);
+ lt9611->edid_buf[i * EDID_LEN + j] = temp;
+ }
+
+ } else if (temp & DDC_NO_ACK) { /* DDC No Ack or Abitration lost */
+ dev_err(lt9611->dev, "read edid failed: no ack\n");
+ ret = -EIO;
+ goto end;
+
+ } else {
+ dev_err(lt9611->dev, "read edid failed: access not done\n");
+ ret = -EIO;
+ goto end;
+ }
+ }
+
+end:
+ regmap_write(lt9611->regmap, 0x8507, 0x1f);
+ return ret;
+}
+
+static int
+lt9611_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
+{
+ struct lt9611 *lt9611 = data;
+ int ret;
+
+ if (len > 128)
+ return -EINVAL;
+
+ /* supports up to 1 extension block */
+ /* TODO: add support for more extension blocks */
+ if (block > 1)
+ return -EINVAL;
+
+ if (block == 0) {
+ ret = lt9611_read_edid(lt9611);
+ if (ret) {
+ dev_err(lt9611->dev, "edid read failed\n");
+ return ret;
+ }
+ }
+
+ block %= 2;
+ memcpy(buf, lt9611->edid_buf + (block * 128), len);
+
+ return 0;
+}
+
+static int lt9611_connector_get_modes(struct drm_connector *connector)
+{
+ struct lt9611 *lt9611 = connector_to_lt9611(connector);
+ unsigned int count;
+ struct edid *edid;
+
+ lt9611_power_on(lt9611);
+ edid = drm_do_get_edid(connector, lt9611_get_edid_block, lt9611);
+ drm_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return count;
+}
+
+static enum drm_mode_status
+lt9611_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode);
+
+ return lt9611_mode ? MODE_OK : MODE_BAD;
+}
+
+/* bridge funcs */
+static void lt9611_bridge_enable(struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ if (lt9611_power_on(lt9611)) {
+ dev_err(lt9611->dev, "power on failed\n");
+ return;
+ }
+
+ lt9611_mipi_input_analog(lt9611);
+ lt9611_hdmi_tx_digital(lt9611);
+ lt9611_hdmi_tx_phy(lt9611);
+
+ msleep(500);
+
+ lt9611_video_check(lt9611);
+
+ /* Enable HDMI output */
+ regmap_write(lt9611->regmap, 0x8130, 0xea);
+}
+
+static void lt9611_bridge_disable(struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ int ret;
+
+ /* Disable HDMI output */
+ ret = regmap_write(lt9611->regmap, 0x8130, 0x6a);
+ if (ret) {
+ dev_err(lt9611->dev, "video on failed\n");
+ return;
+ }
+
+ if (lt9611_power_off(lt9611)) {
+ dev_err(lt9611->dev, "power on failed\n");
+ return;
+ }
+}
+
+static struct
+drm_connector_helper_funcs lt9611_bridge_connector_helper_funcs = {
+ .get_modes = lt9611_connector_get_modes,
+ .mode_valid = lt9611_connector_mode_valid,
+};
+
+static const struct drm_connector_funcs lt9611_bridge_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = lt9611_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
+ struct device_node *dsi_node)
+{
+ const struct mipi_dsi_device_info info = { "lt9611", 0, NULL };
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+
+ host = of_find_mipi_dsi_host_by_node(dsi_node);
+ if (!host) {
+ dev_err(lt9611->dev, "failed to find dsi host\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(lt9611->dev, "failed to create dsi device\n");
+ return dsi;
+ }
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_VIDEO_HSE;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(lt9611->dev, "failed to attach dsi to host\n");
+ mipi_dsi_device_unregister(dsi);
+ return ERR_PTR(ret);
+ }
+
+ return dsi;
+}
+
+static void lt9611_bridge_detach(struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ if (lt9611->dsi1) {
+ mipi_dsi_detach(lt9611->dsi1);
+ mipi_dsi_device_unregister(lt9611->dsi1);
+ }
+
+ mipi_dsi_detach(lt9611->dsi0);
+ mipi_dsi_device_unregister(lt9611->dsi0);
+}
+
+static int lt9611_connector_init(struct drm_bridge *bridge, struct lt9611 *lt9611)
+{
+ int ret;
+
+ ret = drm_connector_init(bridge->dev, &lt9611->connector,
+ &lt9611_bridge_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(&lt9611->connector,
+ &lt9611_bridge_connector_helper_funcs);
+ drm_connector_attach_encoder(&lt9611->connector, bridge->encoder);
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int lt9611_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ int ret;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ ret = lt9611_connector_init(bridge, lt9611);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Attach primary DSI */
+ lt9611->dsi0 = lt9611_attach_dsi(lt9611, lt9611->dsi0_node);
+ if (IS_ERR(lt9611->dsi0))
+ return PTR_ERR(lt9611->dsi0);
+
+ /* Attach secondary DSI, if specified */
+ if (lt9611->dsi1_node) {
+ lt9611->dsi1 = lt9611_attach_dsi(lt9611, lt9611->dsi1_node);
+ if (IS_ERR(lt9611->dsi1)) {
+ ret = PTR_ERR(lt9611->dsi1);
+ goto err_unregister_dsi0;
+ }
+ }
+
+ return 0;
+
+err_unregister_dsi0:
+ lt9611_bridge_detach(bridge);
+ drm_connector_cleanup(&lt9611->connector);
+ mipi_dsi_device_unregister(lt9611->dsi0);
+
+ return ret;
+}
+
+static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode);
+
+ return lt9611_mode ? MODE_OK : MODE_BAD;
+}
+
+static void lt9611_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ if (!lt9611->sleep)
+ return;
+
+ lt9611_reset(lt9611);
+ regmap_write(lt9611->regmap, 0x80ee, 0x01);
+
+ lt9611->sleep = false;
+}
+
+static void lt9611_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ lt9611_sleep_setup(lt9611);
+}
+
+static void lt9611_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adj_mode)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ struct hdmi_avi_infoframe avi_frame;
+ int ret;
+
+ lt9611_bridge_pre_enable(bridge);
+
+ lt9611_mipi_input_digital(lt9611, mode);
+ lt9611_pll_setup(lt9611, mode);
+ lt9611_mipi_video_setup(lt9611, mode);
+ lt9611_pcr_setup(lt9611, mode);
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
+ &lt9611->connector,
+ mode);
+ if (!ret)
+ lt9611->vic = avi_frame.video_code;
+}
+
+static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ unsigned int reg_val = 0;
+ int connected;
+
+ regmap_read(lt9611->regmap, 0x825e, &reg_val);
+ connected = reg_val & BIT(2);
+
+ lt9611->status = connected ? connector_status_connected :
+ connector_status_disconnected;
+
+ return lt9611->status;
+}
+
+static struct edid *lt9611_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ lt9611_power_on(lt9611);
+ return drm_do_get_edid(connector, lt9611_get_edid_block, lt9611);
+}
+
+static void lt9611_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ lt9611_enable_hpd_interrupts(lt9611);
+}
+
+static const struct drm_bridge_funcs lt9611_bridge_funcs = {
+ .attach = lt9611_bridge_attach,
+ .detach = lt9611_bridge_detach,
+ .mode_valid = lt9611_bridge_mode_valid,
+ .enable = lt9611_bridge_enable,
+ .disable = lt9611_bridge_disable,
+ .post_disable = lt9611_bridge_post_disable,
+ .mode_set = lt9611_bridge_mode_set,
+ .detect = lt9611_bridge_detect,
+ .get_edid = lt9611_bridge_get_edid,
+ .hpd_enable = lt9611_bridge_hpd_enable,
+};
+
+static int lt9611_parse_dt(struct device *dev,
+ struct lt9611 *lt9611)
+{
+ lt9611->dsi0_node = of_graph_get_remote_node(dev->of_node, 0, -1);
+ if (!lt9611->dsi0_node) {
+ dev_err(lt9611->dev, "failed to get remote node for primary dsi\n");
+ return -ENODEV;
+ }
+
+ lt9611->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1);
+
+ lt9611->ac_mode = of_property_read_bool(dev->of_node, "lt,ac-mode");
+
+ return 0;
+}
+
+static int lt9611_gpio_init(struct lt9611 *lt9611)
+{
+ struct device *dev = lt9611->dev;
+
+ lt9611->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(lt9611->reset_gpio)) {
+ dev_err(dev, "failed to acquire reset gpio\n");
+ return PTR_ERR(lt9611->reset_gpio);
+ }
+
+ lt9611->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lt9611->enable_gpio)) {
+ dev_err(dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(lt9611->enable_gpio);
+ }
+
+ return 0;
+}
+
+static int lt9611_read_device_rev(struct lt9611 *lt9611)
+{
+ unsigned int rev;
+ int ret;
+
+ regmap_write(lt9611->regmap, 0x80ee, 0x01);
+ ret = regmap_read(lt9611->regmap, 0x8002, &rev);
+ if (ret)
+ dev_err(lt9611->dev, "failed to read revision: %d\n", ret);
+ else
+ dev_info(lt9611->dev, "LT9611 revision: 0x%x\n", rev);
+
+ return ret;
+}
+
+static int lt9611_hdmi_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct lt9611 *lt9611 = data;
+
+ if (hparms->sample_rate == 48000)
+ regmap_write(lt9611->regmap, 0x840f, 0x2b);
+ else if (hparms->sample_rate == 96000)
+ regmap_write(lt9611->regmap, 0x840f, 0xab);
+ else
+ return -EINVAL;
+
+ regmap_write(lt9611->regmap, 0x8435, 0x00);
+ regmap_write(lt9611->regmap, 0x8436, 0x18);
+ regmap_write(lt9611->regmap, 0x8437, 0x00);
+
+ return 0;
+}
+
+static int lt9611_audio_startup(struct device *dev, void *data)
+{
+ struct lt9611 *lt9611 = data;
+
+ regmap_write(lt9611->regmap, 0x82d6, 0x8c);
+ regmap_write(lt9611->regmap, 0x82d7, 0x04);
+
+ regmap_write(lt9611->regmap, 0x8406, 0x08);
+ regmap_write(lt9611->regmap, 0x8407, 0x10);
+
+ regmap_write(lt9611->regmap, 0x8434, 0xd5);
+
+ return 0;
+}
+
+static void lt9611_audio_shutdown(struct device *dev, void *data)
+{
+ struct lt9611 *lt9611 = data;
+
+ regmap_write(lt9611->regmap, 0x8406, 0x00);
+ regmap_write(lt9611->regmap, 0x8407, 0x00);
+}
+
+static int lt9611_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
+ struct device_node *endpoint)
+{
+ struct of_endpoint of_ep;
+ int ret;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * HDMI sound should be located as reg = <2>
+ * Then, it is sound port 0
+ */
+ if (of_ep.port == 2)
+ return 0;
+
+ return -EINVAL;
+}
+
+static const struct hdmi_codec_ops lt9611_codec_ops = {
+ .hw_params = lt9611_hdmi_hw_params,
+ .audio_shutdown = lt9611_audio_shutdown,
+ .audio_startup = lt9611_audio_startup,
+ .get_dai_id = lt9611_hdmi_i2s_get_dai_id,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+ .ops = &lt9611_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+};
+
+static int lt9611_audio_init(struct device *dev, struct lt9611 *lt9611)
+{
+ codec_data.data = lt9611;
+ lt9611->audio_pdev =
+ platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(lt9611->audio_pdev);
+}
+
+static void lt9611_audio_exit(struct lt9611 *lt9611)
+{
+ if (lt9611->audio_pdev) {
+ platform_device_unregister(lt9611->audio_pdev);
+ lt9611->audio_pdev = NULL;
+ }
+}
+
+static int lt9611_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lt9611 *lt9611;
+ struct device *dev = &client->dev;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "device doesn't support I2C\n");
+ return -ENODEV;
+ }
+
+ lt9611 = devm_kzalloc(dev, sizeof(*lt9611), GFP_KERNEL);
+ if (!lt9611)
+ return -ENOMEM;
+
+ lt9611->dev = &client->dev;
+ lt9611->client = client;
+ lt9611->sleep = false;
+
+ lt9611->regmap = devm_regmap_init_i2c(client, &lt9611_regmap_config);
+ if (IS_ERR(lt9611->regmap)) {
+ dev_err(lt9611->dev, "regmap i2c init failed\n");
+ return PTR_ERR(lt9611->regmap);
+ }
+
+ ret = lt9611_parse_dt(&client->dev, lt9611);
+ if (ret) {
+ dev_err(dev, "failed to parse device tree\n");
+ return ret;
+ }
+
+ ret = lt9611_gpio_init(lt9611);
+ if (ret < 0)
+ goto err_of_put;
+
+ ret = lt9611_regulator_init(lt9611);
+ if (ret < 0)
+ goto err_of_put;
+
+ lt9611_assert_5v(lt9611);
+
+ ret = lt9611_regulator_enable(lt9611);
+ if (ret)
+ goto err_of_put;
+
+ lt9611_reset(lt9611);
+
+ ret = lt9611_read_device_rev(lt9611);
+ if (ret) {
+ dev_err(dev, "failed to read chip rev\n");
+ goto err_disable_regulators;
+ }
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ lt9611_irq_thread_handler,
+ IRQF_ONESHOT, "lt9611", lt9611);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto err_disable_regulators;
+ }
+
+ i2c_set_clientdata(client, lt9611);
+
+ lt9611->bridge.funcs = &lt9611_bridge_funcs;
+ lt9611->bridge.of_node = client->dev.of_node;
+ lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES;
+ lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+
+ drm_bridge_add(&lt9611->bridge);
+
+ lt9611_enable_hpd_interrupts(lt9611);
+
+ return lt9611_audio_init(dev, lt9611);
+
+err_disable_regulators:
+ regulator_bulk_disable(ARRAY_SIZE(lt9611->supplies), lt9611->supplies);
+
+err_of_put:
+ of_node_put(lt9611->dsi1_node);
+ of_node_put(lt9611->dsi0_node);
+
+ return ret;
+}
+
+static int lt9611_remove(struct i2c_client *client)
+{
+ struct lt9611 *lt9611 = i2c_get_clientdata(client);
+
+ disable_irq(client->irq);
+ lt9611_audio_exit(lt9611);
+ drm_bridge_remove(&lt9611->bridge);
+
+ regulator_bulk_disable(ARRAY_SIZE(lt9611->supplies), lt9611->supplies);
+
+ of_node_put(lt9611->dsi1_node);
+ of_node_put(lt9611->dsi0_node);
+
+ return 0;
+}
+
+static struct i2c_device_id lt9611_id[] = {
+ { "lontium,lt9611", 0 },
+ {}
+};
+
+static const struct of_device_id lt9611_match_table[] = {
+ { .compatible = "lontium,lt9611" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lt9611_match_table);
+
+static struct i2c_driver lt9611_driver = {
+ .driver = {
+ .name = "lt9611",
+ .of_match_table = lt9611_match_table,
+ },
+ .probe = lt9611_probe,
+ .remove = lt9611_remove,
+ .id_table = lt9611_id,
+};
+module_i2c_driver(lt9611_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index f19d9f7a5db2..f52ccffc1bd1 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -10,13 +10,16 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <drm/drm_bridge.h>
#include <drm/drm_panel.h>
struct lvds_codec {
+ struct device *dev;
struct drm_bridge bridge;
struct drm_bridge *panel_bridge;
+ struct regulator *vcc;
struct gpio_desc *powerdown_gpio;
u32 connector_type;
};
@@ -38,6 +41,14 @@ static int lvds_codec_attach(struct drm_bridge *bridge,
static void lvds_codec_enable(struct drm_bridge *bridge)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
+ int ret;
+
+ ret = regulator_enable(lvds_codec->vcc);
+ if (ret) {
+ dev_err(lvds_codec->dev,
+ "Failed to enable regulator \"vcc\": %d\n", ret);
+ return;
+ }
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0);
@@ -46,9 +57,15 @@ static void lvds_codec_enable(struct drm_bridge *bridge)
static void lvds_codec_disable(struct drm_bridge *bridge)
{
struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
+ int ret;
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1);
+
+ ret = regulator_disable(lvds_codec->vcc);
+ if (ret)
+ dev_err(lvds_codec->dev,
+ "Failed to disable regulator \"vcc\": %d\n", ret);
}
static const struct drm_bridge_funcs funcs = {
@@ -63,12 +80,24 @@ static int lvds_codec_probe(struct platform_device *pdev)
struct device_node *panel_node;
struct drm_panel *panel;
struct lvds_codec *lvds_codec;
+ int ret;
lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
if (!lvds_codec)
return -ENOMEM;
+ lvds_codec->dev = &pdev->dev;
lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
+
+ lvds_codec->vcc = devm_regulator_get(lvds_codec->dev, "power");
+ if (IS_ERR(lvds_codec->vcc)) {
+ ret = PTR_ERR(lvds_codec->vcc);
+ if (ret != -EPROBE_DEFER)
+ dev_err(lvds_codec->dev,
+ "Unable to get \"vcc\" supply: %d\n", ret);
+ return ret;
+ }
+
lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown",
GPIOD_OUT_HIGH);
if (IS_ERR(lvds_codec->powerdown_gpio))
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 6200f12a37e6..d2808c4a6fb1 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -61,7 +61,6 @@ struct ge_b850v3_lvds {
struct drm_bridge bridge;
struct i2c_client *stdp4028_i2c;
struct i2c_client *stdp2690_i2c;
- struct edid *edid;
};
static struct ge_b850v3_lvds *ge_b850v3_lvds_ptr;
@@ -131,22 +130,26 @@ err:
return NULL;
}
-static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)
+static struct edid *ge_b850v3_lvds_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
struct i2c_client *client;
- int num_modes = 0;
client = ge_b850v3_lvds_ptr->stdp2690_i2c;
- kfree(ge_b850v3_lvds_ptr->edid);
- ge_b850v3_lvds_ptr->edid = (struct edid *)stdp2690_get_edid(client);
+ return (struct edid *)stdp2690_get_edid(client);
+}
- if (ge_b850v3_lvds_ptr->edid) {
- drm_connector_update_edid_property(connector,
- ge_b850v3_lvds_ptr->edid);
- num_modes = drm_add_edid_modes(connector,
- ge_b850v3_lvds_ptr->edid);
- }
+static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)
+{
+ struct edid *edid;
+ int num_modes;
+
+ edid = ge_b850v3_lvds_get_edid(&ge_b850v3_lvds_ptr->bridge, connector);
+
+ drm_connector_update_edid_property(connector, edid);
+ num_modes = drm_add_edid_modes(connector, edid);
+ kfree(edid);
return num_modes;
}
@@ -163,8 +166,7 @@ drm_connector_helper_funcs ge_b850v3_lvds_connector_helper_funcs = {
.mode_valid = ge_b850v3_lvds_mode_valid,
};
-static enum drm_connector_status ge_b850v3_lvds_detect(
- struct drm_connector *connector, bool force)
+static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge)
{
struct i2c_client *stdp4028_i2c =
ge_b850v3_lvds_ptr->stdp4028_i2c;
@@ -182,6 +184,12 @@ static enum drm_connector_status ge_b850v3_lvds_detect(
return connector_status_unknown;
}
+static enum drm_connector_status ge_b850v3_lvds_detect(struct drm_connector *connector,
+ bool force)
+{
+ return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge);
+}
+
static const struct drm_connector_funcs ge_b850v3_lvds_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = ge_b850v3_lvds_detect,
@@ -191,34 +199,11 @@ static const struct drm_connector_funcs ge_b850v3_lvds_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
-{
- struct i2c_client *stdp4028_i2c
- = ge_b850v3_lvds_ptr->stdp4028_i2c;
-
- i2c_smbus_write_word_data(stdp4028_i2c,
- STDP4028_DPTX_IRQ_STS_REG,
- STDP4028_DPTX_IRQ_CLEAR);
-
- if (ge_b850v3_lvds_ptr->connector.dev)
- drm_kms_helper_hotplug_event(ge_b850v3_lvds_ptr->connector.dev);
-
- return IRQ_HANDLED;
-}
-
-static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
- enum drm_bridge_attach_flags flags)
+static int ge_b850v3_lvds_create_connector(struct drm_bridge *bridge)
{
struct drm_connector *connector = &ge_b850v3_lvds_ptr->connector;
- struct i2c_client *stdp4028_i2c
- = ge_b850v3_lvds_ptr->stdp4028_i2c;
int ret;
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
- }
-
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -237,9 +222,29 @@ static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
return ret;
}
- ret = drm_connector_attach_encoder(connector, bridge->encoder);
- if (ret)
- return ret;
+ return drm_connector_attach_encoder(connector, bridge->encoder);
+}
+
+static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
+{
+ struct i2c_client *stdp4028_i2c
+ = ge_b850v3_lvds_ptr->stdp4028_i2c;
+
+ i2c_smbus_write_word_data(stdp4028_i2c,
+ STDP4028_DPTX_IRQ_STS_REG,
+ STDP4028_DPTX_IRQ_CLEAR);
+
+ if (ge_b850v3_lvds_ptr->bridge.dev)
+ drm_kms_helper_hotplug_event(ge_b850v3_lvds_ptr->bridge.dev);
+
+ return IRQ_HANDLED;
+}
+
+static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct i2c_client *stdp4028_i2c
+ = ge_b850v3_lvds_ptr->stdp4028_i2c;
/* Configures the bridge to re-enable interrupts after each ack. */
i2c_smbus_write_word_data(stdp4028_i2c,
@@ -251,11 +256,16 @@ static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
STDP4028_DPTX_IRQ_EN_REG,
STDP4028_DPTX_IRQ_CONFIG);
- return 0;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
+ return ge_b850v3_lvds_create_connector(bridge);
}
static const struct drm_bridge_funcs ge_b850v3_lvds_funcs = {
.attach = ge_b850v3_lvds_attach,
+ .detect = ge_b850v3_lvds_bridge_detect,
+ .get_edid = ge_b850v3_lvds_get_edid,
};
static int ge_b850v3_lvds_init(struct device *dev)
@@ -291,8 +301,6 @@ static void ge_b850v3_lvds_remove(void)
drm_bridge_remove(&ge_b850v3_lvds_ptr->bridge);
- kfree(ge_b850v3_lvds_ptr->edid);
-
ge_b850v3_lvds_ptr = NULL;
out:
mutex_unlock(&ge_b850v3_lvds_dev_mutex);
@@ -302,14 +310,21 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
const struct i2c_device_id *id)
{
struct device *dev = &stdp4028_i2c->dev;
+ int ret;
+
+ ret = ge_b850v3_lvds_init(dev);
- ge_b850v3_lvds_init(dev);
+ if (ret)
+ return ret;
ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c;
i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr);
/* drm bridge initialization */
ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
+ ge_b850v3_lvds_ptr->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID;
+ ge_b850v3_lvds_ptr->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
ge_b850v3_lvds_ptr->bridge.of_node = dev->of_node;
drm_bridge_add(&ge_b850v3_lvds_ptr->bridge);
@@ -361,8 +376,12 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
const struct i2c_device_id *id)
{
struct device *dev = &stdp2690_i2c->dev;
+ int ret;
+
+ ret = ge_b850v3_lvds_init(dev);
- ge_b850v3_lvds_init(dev);
+ if (ret)
+ return ret;
ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c;
i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 438e566ce0a4..e941c1132598 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -29,8 +29,7 @@ struct ptn3460_bridge {
struct drm_connector connector;
struct i2c_client *client;
struct drm_bridge bridge;
- struct edid *edid;
- struct drm_panel *panel;
+ struct drm_bridge *panel_bridge;
struct gpio_desc *gpio_pd_n;
struct gpio_desc *gpio_rst_n;
u32 edid_emulation;
@@ -127,11 +126,6 @@ static void ptn3460_pre_enable(struct drm_bridge *bridge)
usleep_range(10, 20);
gpiod_set_value(ptn_bridge->gpio_rst_n, 1);
- if (drm_panel_prepare(ptn_bridge->panel)) {
- DRM_ERROR("failed to prepare panel\n");
- return;
- }
-
/*
* There's a bug in the PTN chip where it falsely asserts hotplug before
* it is fully functional. We're forced to wait for the maximum start up
@@ -146,16 +140,6 @@ static void ptn3460_pre_enable(struct drm_bridge *bridge)
ptn_bridge->enabled = true;
}
-static void ptn3460_enable(struct drm_bridge *bridge)
-{
- struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
-
- if (drm_panel_enable(ptn_bridge->panel)) {
- DRM_ERROR("failed to enable panel\n");
- return;
- }
-}
-
static void ptn3460_disable(struct drm_bridge *bridge)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
@@ -165,36 +149,18 @@ static void ptn3460_disable(struct drm_bridge *bridge)
ptn_bridge->enabled = false;
- if (drm_panel_disable(ptn_bridge->panel)) {
- DRM_ERROR("failed to disable panel\n");
- return;
- }
-
gpiod_set_value(ptn_bridge->gpio_rst_n, 1);
gpiod_set_value(ptn_bridge->gpio_pd_n, 0);
}
-static void ptn3460_post_disable(struct drm_bridge *bridge)
-{
- struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
- if (drm_panel_unprepare(ptn_bridge->panel)) {
- DRM_ERROR("failed to unprepare panel\n");
- return;
- }
-}
-
-static int ptn3460_get_modes(struct drm_connector *connector)
+static struct edid *ptn3460_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct ptn3460_bridge *ptn_bridge;
- u8 *edid;
- int ret, num_modes = 0;
+ struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
bool power_off;
-
- ptn_bridge = connector_to_ptn3460(connector);
-
- if (ptn_bridge->edid)
- return drm_add_edid_modes(connector, ptn_bridge->edid);
+ u8 *edid;
+ int ret;
power_off = !ptn_bridge->enabled;
ptn3460_pre_enable(&ptn_bridge->bridge);
@@ -202,30 +168,40 @@ static int ptn3460_get_modes(struct drm_connector *connector)
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (!edid) {
DRM_ERROR("Failed to allocate EDID\n");
- return 0;
+ goto out;
}
ret = ptn3460_read_bytes(ptn_bridge, PTN3460_EDID_ADDR, edid,
- EDID_LENGTH);
+ EDID_LENGTH);
if (ret) {
kfree(edid);
+ edid = NULL;
goto out;
}
- ptn_bridge->edid = (struct edid *)edid;
- drm_connector_update_edid_property(connector, ptn_bridge->edid);
-
- num_modes = drm_add_edid_modes(connector, ptn_bridge->edid);
-
out:
if (power_off)
ptn3460_disable(&ptn_bridge->bridge);
+ return (struct edid *)edid;
+}
+
+static int ptn3460_connector_get_modes(struct drm_connector *connector)
+{
+ struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector);
+ struct edid *edid;
+ int num_modes;
+
+ edid = ptn3460_get_edid(&ptn_bridge->bridge, connector);
+ drm_connector_update_edid_property(connector, edid);
+ num_modes = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
return num_modes;
}
static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
- .get_modes = ptn3460_get_modes,
+ .get_modes = ptn3460_connector_get_modes,
};
static const struct drm_connector_funcs ptn3460_connector_funcs = {
@@ -242,10 +218,14 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge,
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
- }
+ /* Let this driver create connector if requested */
+ ret = drm_bridge_attach(bridge->encoder, ptn_bridge->panel_bridge,
+ bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0)
+ return ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
@@ -265,9 +245,6 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge,
drm_connector_attach_encoder(&ptn_bridge->connector,
bridge->encoder);
- if (ptn_bridge->panel)
- drm_panel_attach(ptn_bridge->panel, &ptn_bridge->connector);
-
drm_helper_hpd_irq_event(ptn_bridge->connector.dev);
return ret;
@@ -275,10 +252,9 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge,
static const struct drm_bridge_funcs ptn3460_bridge_funcs = {
.pre_enable = ptn3460_pre_enable,
- .enable = ptn3460_enable,
.disable = ptn3460_disable,
- .post_disable = ptn3460_post_disable,
.attach = ptn3460_bridge_attach,
+ .get_edid = ptn3460_get_edid,
};
static int ptn3460_probe(struct i2c_client *client,
@@ -286,6 +262,8 @@ static int ptn3460_probe(struct i2c_client *client,
{
struct device *dev = &client->dev;
struct ptn3460_bridge *ptn_bridge;
+ struct drm_bridge *panel_bridge;
+ struct drm_panel *panel;
int ret;
ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL);
@@ -293,10 +271,15 @@ static int ptn3460_probe(struct i2c_client *client,
return -ENOMEM;
}
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &ptn_bridge->panel, NULL);
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &panel, NULL);
if (ret)
return ret;
+ panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
+
+ ptn_bridge->panel_bridge = panel_bridge;
ptn_bridge->client = client;
ptn_bridge->gpio_pd_n = devm_gpiod_get(&client->dev, "powerdown",
@@ -327,6 +310,8 @@ static int ptn3460_probe(struct i2c_client *client,
}
ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs;
+ ptn_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
+ ptn_bridge->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ptn_bridge->bridge.of_node = dev->of_node;
drm_bridge_add(&ptn_bridge->bridge);
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 1e63ed6b18aa..0ddc37551194 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -82,18 +82,11 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
drm_connector_attach_encoder(&panel_bridge->connector,
bridge->encoder);
- ret = drm_panel_attach(panel_bridge->panel, &panel_bridge->connector);
- if (ret < 0)
- return ret;
-
return 0;
}
static void panel_bridge_detach(struct drm_bridge *bridge)
{
- struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
-
- drm_panel_detach(panel_bridge->panel);
}
static void panel_bridge_pre_enable(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index d789ea2a7fb9..614b19f0f1b7 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -42,10 +42,9 @@
#endif
struct ps8622_bridge {
- struct drm_connector connector;
struct i2c_client *client;
struct drm_bridge bridge;
- struct drm_panel *panel;
+ struct drm_bridge *panel_bridge;
struct regulator *v12;
struct backlight_device *bl;
@@ -64,12 +63,6 @@ static inline struct ps8622_bridge *
return container_of(bridge, struct ps8622_bridge, bridge);
}
-static inline struct ps8622_bridge *
- connector_to_ps8622(struct drm_connector *connector)
-{
- return container_of(connector, struct ps8622_bridge, connector);
-}
-
static int ps8622_set(struct i2c_client *client, u8 page, u8 reg, u8 val)
{
int ret;
@@ -365,11 +358,6 @@ static void ps8622_pre_enable(struct drm_bridge *bridge)
DRM_ERROR("fails to enable ps8622->v12");
}
- if (drm_panel_prepare(ps8622->panel)) {
- DRM_ERROR("failed to prepare panel\n");
- return;
- }
-
gpiod_set_value(ps8622->gpio_slp, 1);
/*
@@ -399,24 +387,9 @@ static void ps8622_pre_enable(struct drm_bridge *bridge)
ps8622->enabled = true;
}
-static void ps8622_enable(struct drm_bridge *bridge)
-{
- struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
-
- if (drm_panel_enable(ps8622->panel)) {
- DRM_ERROR("failed to enable panel\n");
- return;
- }
-}
-
static void ps8622_disable(struct drm_bridge *bridge)
{
- struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
-
- if (drm_panel_disable(ps8622->panel)) {
- DRM_ERROR("failed to disable panel\n");
- return;
- }
+ /* Delay after panel is disabled */
msleep(PS8622_PWMO_END_T12_MS);
}
@@ -436,11 +409,6 @@ static void ps8622_post_disable(struct drm_bridge *bridge)
*/
gpiod_set_value(ps8622->gpio_slp, 0);
- if (drm_panel_unprepare(ps8622->panel)) {
- DRM_ERROR("failed to unprepare panel\n");
- return;
- }
-
if (ps8622->v12)
regulator_disable(ps8622->v12);
@@ -455,67 +423,17 @@ static void ps8622_post_disable(struct drm_bridge *bridge)
msleep(PS8622_POWER_OFF_T17_MS);
}
-static int ps8622_get_modes(struct drm_connector *connector)
-{
- struct ps8622_bridge *ps8622;
-
- ps8622 = connector_to_ps8622(connector);
-
- return drm_panel_get_modes(ps8622->panel, connector);
-}
-
-static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
- .get_modes = ps8622_get_modes,
-};
-
-static const struct drm_connector_funcs ps8622_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static int ps8622_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
- int ret;
-
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
- }
- if (!bridge->encoder) {
- DRM_ERROR("Parent encoder object not found");
- return -ENODEV;
- }
-
- ps8622->connector.polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(bridge->dev, &ps8622->connector,
- &ps8622_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
- if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
- }
- drm_connector_helper_add(&ps8622->connector,
- &ps8622_connector_helper_funcs);
- drm_connector_register(&ps8622->connector);
- drm_connector_attach_encoder(&ps8622->connector,
- bridge->encoder);
-
- if (ps8622->panel)
- drm_panel_attach(ps8622->panel, &ps8622->connector);
-
- drm_helper_hpd_irq_event(ps8622->connector.dev);
-
- return ret;
+ return drm_bridge_attach(ps8622->bridge.encoder, ps8622->panel_bridge,
+ &ps8622->bridge, flags);
}
static const struct drm_bridge_funcs ps8622_bridge_funcs = {
.pre_enable = ps8622_pre_enable,
- .enable = ps8622_enable,
.disable = ps8622_disable,
.post_disable = ps8622_post_disable,
.attach = ps8622_attach,
@@ -533,16 +451,23 @@ static int ps8622_probe(struct i2c_client *client,
{
struct device *dev = &client->dev;
struct ps8622_bridge *ps8622;
+ struct drm_bridge *panel_bridge;
+ struct drm_panel *panel;
int ret;
ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL);
if (!ps8622)
return -ENOMEM;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &ps8622->panel, NULL);
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &panel, NULL);
if (ret)
return ret;
+ panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
+
+ ps8622->panel_bridge = panel_bridge;
ps8622->client = client;
ps8622->v12 = devm_regulator_get(dev, "vdd12");
@@ -595,6 +520,7 @@ static int ps8622_probe(struct i2c_client *client,
}
ps8622->bridge.funcs = &ps8622_bridge_funcs;
+ ps8622->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ps8622->bridge.of_node = dev->of_node;
drm_bridge_add(&ps8622->bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 4b099196afeb..7bd0affa057a 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -65,6 +65,7 @@ struct ps8640 {
struct regulator_bulk_data supplies[2];
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_powerdown;
+ bool powered;
};
static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e)
@@ -82,19 +83,24 @@ static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
ret = i2c_smbus_write_i2c_block_data(client, PAGE3_SET_ADD,
sizeof(vdo_ctrl_buf),
vdo_ctrl_buf);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_ERROR("failed to %sable VDO: %d\n",
+ ctrl == ENABLE ? "en" : "dis", ret);
return ret;
+ }
return 0;
}
-static void ps8640_pre_enable(struct drm_bridge *bridge)
+static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
{
- struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL];
unsigned long timeout;
int ret, status;
+ if (ps_bridge->powered)
+ return;
+
ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies),
ps_bridge->supplies);
if (ret < 0) {
@@ -149,12 +155,6 @@ static void ps8640_pre_enable(struct drm_bridge *bridge)
goto err_regulators_disable;
}
- ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE);
- if (ret) {
- DRM_ERROR("failed to enable VDO: %d\n", ret);
- goto err_regulators_disable;
- }
-
/* Switch access edp panel's edid through i2c */
ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS,
I2C_BYPASS_EN);
@@ -163,6 +163,8 @@ static void ps8640_pre_enable(struct drm_bridge *bridge)
goto err_regulators_disable;
}
+ ps_bridge->powered = true;
+
return;
err_regulators_disable:
@@ -170,14 +172,12 @@ err_regulators_disable:
ps_bridge->supplies);
}
-static void ps8640_post_disable(struct drm_bridge *bridge)
+static void ps8640_bridge_poweroff(struct ps8640 *ps_bridge)
{
- struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
int ret;
- ret = ps8640_bridge_vdo_control(ps_bridge, DISABLE);
- if (ret < 0)
- DRM_ERROR("failed to disable VDO: %d\n", ret);
+ if (!ps_bridge->powered)
+ return;
gpiod_set_value(ps_bridge->gpio_reset, 1);
gpiod_set_value(ps_bridge->gpio_powerdown, 1);
@@ -185,6 +185,28 @@ static void ps8640_post_disable(struct drm_bridge *bridge)
ps_bridge->supplies);
if (ret < 0)
DRM_ERROR("cannot disable regulators %d\n", ret);
+
+ ps_bridge->powered = false;
+}
+
+static void ps8640_pre_enable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ int ret;
+
+ ps8640_bridge_poweron(ps_bridge);
+
+ ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE);
+ if (ret < 0)
+ ps8640_bridge_poweroff(ps_bridge);
+}
+
+static void ps8640_post_disable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+
+ ps8640_bridge_vdo_control(ps_bridge, DISABLE);
+ ps8640_bridge_poweroff(ps_bridge);
}
static int ps8640_bridge_attach(struct drm_bridge *bridge,
@@ -200,6 +222,10 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
.channel = 0,
.node = NULL,
};
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
/* port@0 is ps8640 dsi input port */
in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
if (!in_ep)
@@ -242,8 +268,43 @@ err_dsi_attach:
return ret;
}
+static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ bool poweroff = !ps_bridge->powered;
+ struct edid *edid;
+
+ /*
+ * When we end calling get_edid() triggered by an ioctl, i.e
+ *
+ * drm_mode_getconnector (ioctl)
+ * -> drm_helper_probe_single_connector_modes
+ * -> drm_bridge_connector_get_modes
+ * -> ps8640_bridge_get_edid
+ *
+ * We need to make sure that what we need is enabled before reading
+ * EDID, for this chip, we need to do a full poweron, otherwise it will
+ * fail.
+ */
+ drm_bridge_chain_pre_enable(bridge);
+
+ edid = drm_get_edid(connector,
+ ps_bridge->page[PAGE0_DP_CNTL]->adapter);
+
+ /*
+ * If we call the get_edid() function without having enabled the chip
+ * before, return the chip to its original power state.
+ */
+ if (poweroff)
+ drm_bridge_chain_post_disable(bridge);
+
+ return edid;
+}
+
static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.attach = ps8640_bridge_attach,
+ .get_edid = ps8640_bridge_get_edid,
.post_disable = ps8640_post_disable,
.pre_enable = ps8640_pre_enable,
};
@@ -294,6 +355,8 @@ static int ps8640_probe(struct i2c_client *client)
ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
ps_bridge->bridge.of_node = dev->of_node;
+ ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
+ ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP;
ps_bridge->page[PAGE0_DP_CNTL] = client;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index d580b2aa4ce9..6b268f9445b3 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -89,7 +89,9 @@
#define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1
#define VID_MODE_TYPE_BURST 0x2
#define VID_MODE_TYPE_MASK 0x3
+#define ENABLE_LOW_POWER_CMD BIT(15)
#define VID_MODE_VPG_ENABLE BIT(16)
+#define VID_MODE_VPG_MODE BIT(20)
#define VID_MODE_VPG_HORIZONTAL BIT(24)
#define DSI_VID_PKT_SIZE 0x3c
@@ -220,6 +222,21 @@
#define PHY_STATUS_TIMEOUT_US 10000
#define CMD_PKT_STATUS_TIMEOUT_US 20000
+#ifdef CONFIG_DEBUG_FS
+#define VPG_DEFS(name, dsi) \
+ ((void __force *)&((*dsi).vpg_defs.name))
+
+#define REGISTER(name, mask, dsi) \
+ { #name, VPG_DEFS(name, dsi), mask, dsi }
+
+struct debugfs_entries {
+ const char *name;
+ bool *reg;
+ u32 mask;
+ struct dw_mipi_dsi *dsi;
+};
+#endif /* CONFIG_DEBUG_FS */
+
struct dw_mipi_dsi {
struct drm_bridge bridge;
struct mipi_dsi_host dsi_host;
@@ -237,9 +254,12 @@ struct dw_mipi_dsi {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
-
- bool vpg;
- bool vpg_horizontal;
+ struct debugfs_entries *debugfs_vpg;
+ struct {
+ bool vpg;
+ bool vpg_horizontal;
+ bool vpg_ber_pattern;
+ } vpg_defs;
#endif /* CONFIG_DEBUG_FS */
struct dw_mipi_dsi *master; /* dual-dsi master ptr */
@@ -360,13 +380,28 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM;
u32 val = 0;
+ /*
+ * TODO dw drv improvements
+ * largest packet sizes during hfp or during vsa/vpb/vfp
+ * should be computed according to byte lane, lane number and only
+ * if sending lp cmds in high speed is enable (PHY_TXREQUESTCLKHS)
+ */
+ dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(16)
+ | INVACT_LPCMD_TIME(4));
+
if (msg->flags & MIPI_DSI_MSG_REQ_ACK)
val |= ACK_RQST_EN;
if (lpm)
val |= CMD_MODE_ALL_LP;
- dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
dsi_write(dsi, DSI_CMD_MODE_CFG, val);
+
+ val = dsi_read(dsi, DSI_VID_MODE_CFG);
+ if (lpm)
+ val |= ENABLE_LOW_POWER_CMD;
+ else
+ val &= ~ENABLE_LOW_POWER_CMD;
+ dsi_write(dsi, DSI_VID_MODE_CFG, val);
}
static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
@@ -529,9 +564,11 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
#ifdef CONFIG_DEBUG_FS
- if (dsi->vpg) {
+ if (dsi->vpg_defs.vpg) {
val |= VID_MODE_VPG_ENABLE;
- val |= dsi->vpg_horizontal ? VID_MODE_VPG_HORIZONTAL : 0;
+ val |= dsi->vpg_defs.vpg_horizontal ?
+ VID_MODE_VPG_HORIZONTAL : 0;
+ val |= dsi->vpg_defs.vpg_ber_pattern ? VID_MODE_VPG_MODE : 0;
}
#endif /* CONFIG_DEBUG_FS */
@@ -541,16 +578,22 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
unsigned long mode_flags)
{
+ u32 val;
+
dsi_write(dsi, DSI_PWR_UP, RESET);
if (mode_flags & MIPI_DSI_MODE_VIDEO) {
dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
dw_mipi_dsi_video_mode_config(dsi);
- dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
} else {
dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
}
+ val = PHY_TXREQUESTCLKHS;
+ if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ val |= AUTO_CLKLANE_CTRL;
+ dsi_write(dsi, DSI_LPCLK_CTRL, val);
+
dsi_write(dsi, DSI_PWR_UP, POWERUP);
}
@@ -562,15 +605,30 @@ static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi)
static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
{
+ const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
+ unsigned int esc_rate; /* in MHz */
+ u32 esc_clk_division;
+ int ret;
+
/*
* The maximum permitted escape clock is 20MHz and it is derived from
- * lanebyteclk, which is running at "lane_mbps / 8". Thus we want:
- *
- * (lane_mbps >> 3) / esc_clk_division < 20
+ * lanebyteclk, which is running at "lane_mbps / 8".
+ */
+ if (phy_ops->get_esc_clk_rate) {
+ ret = phy_ops->get_esc_clk_rate(dsi->plat_data->priv_data,
+ &esc_rate);
+ if (ret)
+ DRM_DEBUG_DRIVER("Phy get_esc_clk_rate() failed\n");
+ } else
+ esc_rate = 20; /* Default to 20MHz */
+
+ /*
+ * We want :
+ * (lane_mbps >> 3) / esc_clk_division < X
* which is:
- * (lane_mbps >> 3) / 20 > esc_clk_division
+ * (lane_mbps >> 3) / X > esc_clk_division
*/
- u32 esc_clk_division = (dsi->lane_mbps >> 3) / 20 + 1;
+ esc_clk_division = (dsi->lane_mbps >> 3) / esc_rate + 1;
dsi_write(dsi, DSI_PWR_UP, RESET);
@@ -611,14 +669,6 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
dsi_write(dsi, DSI_DPI_VCID, DPI_VCID(dsi->channel));
dsi_write(dsi, DSI_DPI_COLOR_CODING, color);
dsi_write(dsi, DSI_DPI_CFG_POL, val);
- /*
- * TODO dw drv improvements
- * largest packet sizes during hfp or during vsa/vpb/vfp
- * should be computed according to byte lane, lane number and only
- * if sending lp cmds in high speed is enable (PHY_TXREQUESTCLKHS)
- */
- dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(4)
- | INVACT_LPCMD_TIME(4));
}
static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
@@ -964,6 +1014,66 @@ static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
#ifdef CONFIG_DEBUG_FS
+static int dw_mipi_dsi_debugfs_write(void *data, u64 val)
+{
+ struct debugfs_entries *vpg = data;
+ struct dw_mipi_dsi *dsi;
+ u32 mode_cfg;
+
+ if (!vpg)
+ return -ENODEV;
+
+ dsi = vpg->dsi;
+
+ *vpg->reg = (bool)val;
+
+ mode_cfg = dsi_read(dsi, DSI_VID_MODE_CFG);
+
+ if (*vpg->reg)
+ mode_cfg |= vpg->mask;
+ else
+ mode_cfg &= ~vpg->mask;
+
+ dsi_write(dsi, DSI_VID_MODE_CFG, mode_cfg);
+
+ return 0;
+}
+
+static int dw_mipi_dsi_debugfs_show(void *data, u64 *val)
+{
+ struct debugfs_entries *vpg = data;
+
+ if (!vpg)
+ return -ENODEV;
+
+ *val = *vpg->reg;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_mipi_dsi_debugfs_show,
+ dw_mipi_dsi_debugfs_write, "%llu\n");
+
+static void debugfs_create_files(void *data)
+{
+ struct dw_mipi_dsi *dsi = data;
+ struct debugfs_entries debugfs[] = {
+ REGISTER(vpg, VID_MODE_VPG_ENABLE, dsi),
+ REGISTER(vpg_horizontal, VID_MODE_VPG_HORIZONTAL, dsi),
+ REGISTER(vpg_ber_pattern, VID_MODE_VPG_MODE, dsi),
+ };
+ int i;
+
+ dsi->debugfs_vpg = kmemdup(debugfs, sizeof(debugfs), GFP_KERNEL);
+ if (!dsi->debugfs_vpg)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs); i++)
+ debugfs_create_file(dsi->debugfs_vpg[i].name, 0644,
+ dsi->debugfs, &dsi->debugfs_vpg[i],
+ &fops_x32);
+}
+
static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi)
{
dsi->debugfs = debugfs_create_dir(dev_name(dsi->dev), NULL);
@@ -972,14 +1082,13 @@ static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi)
return;
}
- debugfs_create_bool("vpg", 0660, dsi->debugfs, &dsi->vpg);
- debugfs_create_bool("vpg_horizontal", 0660, dsi->debugfs,
- &dsi->vpg_horizontal);
+ debugfs_create_files(dsi);
}
static void dw_mipi_dsi_debugfs_remove(struct dw_mipi_dsi *dsi)
{
debugfs_remove_recursive(dsi->debugfs);
+ kfree(dsi->debugfs_vpg);
}
#else
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
new file mode 100644
index 000000000000..1bfdfc6affaf
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Marek Vasut <marex@denx.de>
+ *
+ * Based on tc358764.c by
+ * Andrzej Hajda <a.hajda@samsung.com>
+ * Maciej Purski <m.purski@samsung.com>
+ *
+ * Based on rpi_touchscreen.c by
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+/* PPI layer registers */
+#define PPI_STARTPPI 0x0104 /* START control bit */
+#define PPI_LPTXTIMECNT 0x0114 /* LPTX timing signal */
+#define PPI_D0S_ATMR 0x0144
+#define PPI_D1S_ATMR 0x0148
+#define PPI_D0S_CLRSIPOCOUNT 0x0164 /* Assertion timer for Lane 0 */
+#define PPI_D1S_CLRSIPOCOUNT 0x0168 /* Assertion timer for Lane 1 */
+#define PPI_START_FUNCTION 1
+
+/* DSI layer registers */
+#define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX */
+#define DSI_LANEENABLE 0x0210 /* Enables each lane */
+#define DSI_RX_START 1
+
+/* LCDC/DPI Host Registers */
+#define LCDCTRL 0x0420
+
+/* SPI Master Registers */
+#define SPICMR 0x0450
+#define SPITCR 0x0454
+
+/* System Controller Registers */
+#define SYSCTRL 0x0464
+
+/* System registers */
+#define LPX_PERIOD 3
+
+/* Lane enable PPI and DSI register bits */
+#define LANEENABLE_CLEN BIT(0)
+#define LANEENABLE_L0EN BIT(1)
+#define LANEENABLE_L1EN BIT(2)
+
+struct tc358762 {
+ struct device *dev;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct regulator *regulator;
+ struct drm_bridge *panel_bridge;
+ bool pre_enabled;
+ int error;
+};
+
+static int tc358762_clear_error(struct tc358762 *ctx)
+{
+ int ret = ctx->error;
+
+ ctx->error = 0;
+ return ret;
+}
+
+static void tc358762_write(struct tc358762 *ctx, u16 addr, u32 val)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ ssize_t ret;
+ u8 data[6];
+
+ if (ctx->error)
+ return;
+
+ data[0] = addr;
+ data[1] = addr >> 8;
+ data[2] = val;
+ data[3] = val >> 8;
+ data[4] = val >> 16;
+ data[5] = val >> 24;
+
+ ret = mipi_dsi_generic_write(dsi, data, sizeof(data));
+ if (ret < 0)
+ ctx->error = ret;
+}
+
+static inline struct tc358762 *bridge_to_tc358762(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tc358762, bridge);
+}
+
+static int tc358762_init(struct tc358762 *ctx)
+{
+ tc358762_write(ctx, DSI_LANEENABLE,
+ LANEENABLE_L0EN | LANEENABLE_CLEN);
+ tc358762_write(ctx, PPI_D0S_CLRSIPOCOUNT, 5);
+ tc358762_write(ctx, PPI_D1S_CLRSIPOCOUNT, 5);
+ tc358762_write(ctx, PPI_D0S_ATMR, 0);
+ tc358762_write(ctx, PPI_D1S_ATMR, 0);
+ tc358762_write(ctx, PPI_LPTXTIMECNT, LPX_PERIOD);
+
+ tc358762_write(ctx, SPICMR, 0x00);
+ tc358762_write(ctx, LCDCTRL, 0x00100150);
+ tc358762_write(ctx, SYSCTRL, 0x040f);
+ msleep(100);
+
+ tc358762_write(ctx, PPI_STARTPPI, PPI_START_FUNCTION);
+ tc358762_write(ctx, DSI_STARTDSI, DSI_RX_START);
+
+ msleep(100);
+
+ return tc358762_clear_error(ctx);
+}
+
+static void tc358762_post_disable(struct drm_bridge *bridge)
+{
+ struct tc358762 *ctx = bridge_to_tc358762(bridge);
+ int ret;
+
+ /*
+ * The post_disable hook might be called multiple times.
+ * We want to avoid regulator imbalance below.
+ */
+ if (!ctx->pre_enabled)
+ return;
+
+ ctx->pre_enabled = false;
+
+ ret = regulator_disable(ctx->regulator);
+ if (ret < 0)
+ dev_err(ctx->dev, "error disabling regulators (%d)\n", ret);
+}
+
+static void tc358762_pre_enable(struct drm_bridge *bridge)
+{
+ struct tc358762 *ctx = bridge_to_tc358762(bridge);
+ int ret;
+
+ ret = regulator_enable(ctx->regulator);
+ if (ret < 0)
+ dev_err(ctx->dev, "error enabling regulators (%d)\n", ret);
+
+ ret = tc358762_init(ctx);
+ if (ret < 0)
+ dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
+
+ ctx->pre_enabled = true;
+}
+
+static int tc358762_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tc358762 *ctx = bridge_to_tc358762(bridge);
+
+ return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ bridge, flags);
+}
+
+static const struct drm_bridge_funcs tc358762_bridge_funcs = {
+ .post_disable = tc358762_post_disable,
+ .pre_enable = tc358762_pre_enable,
+ .attach = tc358762_attach,
+};
+
+static int tc358762_parse_dt(struct tc358762 *ctx)
+{
+ struct drm_bridge *panel_bridge;
+ struct device *dev = ctx->dev;
+ struct drm_panel *panel;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
+ if (ret)
+ return ret;
+
+ panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
+
+ ctx->panel_bridge = panel_bridge;
+
+ return 0;
+}
+
+static int tc358762_configure_regulators(struct tc358762 *ctx)
+{
+ ctx->regulator = devm_regulator_get(ctx->dev, "vddc");
+ if (IS_ERR(ctx->regulator))
+ return PTR_ERR(ctx->regulator);
+
+ return 0;
+}
+
+static int tc358762_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct tc358762 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(struct tc358762), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+ ctx->pre_enabled = false;
+
+ /* TODO: Find out how to get dual-lane mode working */
+ dsi->lanes = 1;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM;
+
+ ret = tc358762_parse_dt(ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = tc358762_configure_regulators(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->bridge.funcs = &tc358762_bridge_funcs;
+ ctx->bridge.type = DRM_MODE_CONNECTOR_DPI;
+ ctx->bridge.of_node = dev->of_node;
+
+ drm_bridge_add(&ctx->bridge);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_bridge_remove(&ctx->bridge);
+ dev_err(dev, "failed to attach dsi\n");
+ }
+
+ return ret;
+}
+
+static int tc358762_remove(struct mipi_dsi_device *dsi)
+{
+ struct tc358762 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_bridge_remove(&ctx->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id tc358762_of_match[] = {
+ { .compatible = "toshiba,tc358762" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc358762_of_match);
+
+static struct mipi_dsi_driver tc358762_driver = {
+ .probe = tc358762_probe,
+ .remove = tc358762_remove,
+ .driver = {
+ .name = "tc358762",
+ .of_match_table = tc358762_of_match,
+ },
+};
+module_mipi_dsi_driver(tc358762_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("MIPI-DSI based Driver for TC358762 DSI/DPI Bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 5ac1430fab04..d89394bc5aa4 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -153,10 +153,9 @@ static const char * const tc358764_supplies[] = {
struct tc358764 {
struct device *dev;
struct drm_bridge bridge;
- struct drm_connector connector;
struct regulator_bulk_data supplies[ARRAY_SIZE(tc358764_supplies)];
struct gpio_desc *gpio_reset;
- struct drm_panel *panel;
+ struct drm_bridge *panel_bridge;
int error;
};
@@ -210,12 +209,6 @@ static inline struct tc358764 *bridge_to_tc358764(struct drm_bridge *bridge)
return container_of(bridge, struct tc358764, bridge);
}
-static inline
-struct tc358764 *connector_to_tc358764(struct drm_connector *connector)
-{
- return container_of(connector, struct tc358764, connector);
-}
-
static int tc358764_init(struct tc358764 *ctx)
{
u32 v = 0;
@@ -278,43 +271,11 @@ static void tc358764_reset(struct tc358764 *ctx)
usleep_range(1000, 2000);
}
-static int tc358764_get_modes(struct drm_connector *connector)
-{
- struct tc358764 *ctx = connector_to_tc358764(connector);
-
- return drm_panel_get_modes(ctx->panel, connector);
-}
-
-static const
-struct drm_connector_helper_funcs tc358764_connector_helper_funcs = {
- .get_modes = tc358764_get_modes,
-};
-
-static const struct drm_connector_funcs tc358764_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static void tc358764_disable(struct drm_bridge *bridge)
-{
- struct tc358764 *ctx = bridge_to_tc358764(bridge);
- int ret = drm_panel_disable(bridge_to_tc358764(bridge)->panel);
-
- if (ret < 0)
- dev_err(ctx->dev, "error disabling panel (%d)\n", ret);
-}
-
static void tc358764_post_disable(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
int ret;
- ret = drm_panel_unprepare(ctx->panel);
- if (ret < 0)
- dev_err(ctx->dev, "error unpreparing panel (%d)\n", ret);
tc358764_reset(ctx);
usleep_range(10000, 15000);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
@@ -335,73 +296,28 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
ret = tc358764_init(ctx);
if (ret < 0)
dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
- ret = drm_panel_prepare(ctx->panel);
- if (ret < 0)
- dev_err(ctx->dev, "error preparing panel (%d)\n", ret);
-}
-
-static void tc358764_enable(struct drm_bridge *bridge)
-{
- struct tc358764 *ctx = bridge_to_tc358764(bridge);
- int ret = drm_panel_enable(ctx->panel);
-
- if (ret < 0)
- dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
}
static int tc358764_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- struct drm_device *drm = bridge->dev;
- int ret;
-
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
- }
-
- ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(drm, &ctx->connector,
- &tc358764_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- if (ret) {
- DRM_ERROR("Failed to initialize connector\n");
- return ret;
- }
-
- drm_connector_helper_add(&ctx->connector,
- &tc358764_connector_helper_funcs);
- drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
- drm_panel_attach(ctx->panel, &ctx->connector);
- ctx->connector.funcs->reset(&ctx->connector);
- drm_connector_register(&ctx->connector);
-
- return 0;
-}
-
-static void tc358764_detach(struct drm_bridge *bridge)
-{
- struct tc358764 *ctx = bridge_to_tc358764(bridge);
- drm_connector_unregister(&ctx->connector);
- drm_panel_detach(ctx->panel);
- ctx->panel = NULL;
- drm_connector_put(&ctx->connector);
+ return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ bridge, flags);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
- .disable = tc358764_disable,
.post_disable = tc358764_post_disable,
- .enable = tc358764_enable,
.pre_enable = tc358764_pre_enable,
.attach = tc358764_attach,
- .detach = tc358764_detach,
};
static int tc358764_parse_dt(struct tc358764 *ctx)
{
+ struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
+ struct drm_panel *panel;
int ret;
ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@@ -410,12 +326,16 @@ static int tc358764_parse_dt(struct tc358764 *ctx)
return PTR_ERR(ctx->gpio_reset);
}
- ret = drm_of_find_panel_or_bridge(ctx->dev->of_node, 1, 0, &ctx->panel,
- NULL);
- if (ret && ret != -EPROBE_DEFER)
- dev_err(dev, "cannot find panel (%d)\n", ret);
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
+ if (ret)
+ return ret;
- return ret;
+ panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
+
+ ctx->panel_bridge = panel_bridge;
+ return 0;
}
static int tc358764_configure_regulators(struct tc358764 *ctx)
@@ -461,6 +381,7 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
return ret;
ctx->bridge.funcs = &tc358764_bridge_funcs;
+ ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index c2777b226c75..34a3e4e9f717 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -244,14 +244,12 @@ struct tc_data {
struct drm_dp_aux aux;
struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
struct drm_connector connector;
- struct drm_panel *panel;
/* link settings */
struct tc_edp_link link;
- /* display edid */
- struct edid *edid;
/* current mode */
struct drm_display_mode mode;
@@ -1236,13 +1234,6 @@ static int tc_stream_disable(struct tc_data *tc)
return 0;
}
-static void tc_bridge_pre_enable(struct drm_bridge *bridge)
-{
- struct tc_data *tc = bridge_to_tc(bridge);
-
- drm_panel_prepare(tc->panel);
-}
-
static void tc_bridge_enable(struct drm_bridge *bridge)
{
struct tc_data *tc = bridge_to_tc(bridge);
@@ -1266,8 +1257,6 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
tc_main_link_disable(tc);
return;
}
-
- drm_panel_enable(tc->panel);
}
static void tc_bridge_disable(struct drm_bridge *bridge)
@@ -1275,8 +1264,6 @@ static void tc_bridge_disable(struct drm_bridge *bridge)
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
- drm_panel_disable(tc->panel);
-
ret = tc_stream_disable(tc);
if (ret < 0)
dev_err(tc->dev, "main link stream stop error: %d\n", ret);
@@ -1286,13 +1273,6 @@ static void tc_bridge_disable(struct drm_bridge *bridge)
dev_err(tc->dev, "main link disable error: %d\n", ret);
}
-static void tc_bridge_post_disable(struct drm_bridge *bridge)
-{
- struct tc_data *tc = bridge_to_tc(bridge);
-
- drm_panel_unprepare(tc->panel);
-}
-
static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adj)
@@ -1335,11 +1315,19 @@ static void tc_bridge_mode_set(struct drm_bridge *bridge,
tc->mode = *mode;
}
+static struct edid *tc_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+
+ return drm_get_edid(connector, &tc->aux.ddc);
+}
+
static int tc_connector_get_modes(struct drm_connector *connector)
{
struct tc_data *tc = connector_to_tc(connector);
+ int num_modes;
struct edid *edid;
- int count;
int ret;
ret = tc_get_display_props(tc);
@@ -1348,42 +1336,30 @@ static int tc_connector_get_modes(struct drm_connector *connector)
return 0;
}
- count = drm_panel_get_modes(tc->panel, connector);
- if (count > 0)
- return count;
-
- edid = drm_get_edid(connector, &tc->aux.ddc);
-
- kfree(tc->edid);
- tc->edid = edid;
- if (!edid)
- return 0;
+ if (tc->panel_bridge) {
+ num_modes = drm_bridge_get_modes(tc->panel_bridge, connector);
+ if (num_modes > 0)
+ return num_modes;
+ }
- drm_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
+ edid = tc_get_edid(&tc->bridge, connector);
+ num_modes = drm_add_edid_modes(connector, edid);
+ kfree(edid);
- return count;
+ return num_modes;
}
static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
.get_modes = tc_connector_get_modes,
};
-static enum drm_connector_status tc_connector_detect(struct drm_connector *connector,
- bool force)
+static enum drm_connector_status tc_bridge_detect(struct drm_bridge *bridge)
{
- struct tc_data *tc = connector_to_tc(connector);
+ struct tc_data *tc = bridge_to_tc(bridge);
bool conn;
u32 val;
int ret;
- if (tc->hpd_pin < 0) {
- if (tc->panel)
- return connector_status_connected;
- else
- return connector_status_unknown;
- }
-
ret = regmap_read(tc->regmap, GPIOI, &val);
if (ret)
return connector_status_unknown;
@@ -1396,6 +1372,20 @@ static enum drm_connector_status tc_connector_detect(struct drm_connector *conne
return connector_status_disconnected;
}
+static enum drm_connector_status
+tc_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tc_data *tc = connector_to_tc(connector);
+
+ if (tc->hpd_pin >= 0)
+ return tc_bridge_detect(&tc->bridge);
+
+ if (tc->panel_bridge)
+ return connector_status_connected;
+ else
+ return connector_status_unknown;
+}
+
static const struct drm_connector_funcs tc_connector_funcs = {
.detect = tc_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1413,16 +1403,20 @@ static int tc_bridge_attach(struct drm_bridge *bridge,
struct drm_device *drm = bridge->dev;
int ret;
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
+ if (tc->panel_bridge) {
+ /* If a connector is required then this driver shall create it */
+ ret = drm_bridge_attach(tc->bridge.encoder, tc->panel_bridge,
+ &tc->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
}
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
/* Create DP/eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
- ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
- tc->panel ? DRM_MODE_CONNECTOR_eDP :
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, tc->bridge.type);
if (ret)
return ret;
@@ -1435,9 +1429,6 @@ static int tc_bridge_attach(struct drm_bridge *bridge,
DRM_CONNECTOR_POLL_DISCONNECT;
}
- if (tc->panel)
- drm_panel_attach(tc->panel, &tc->connector);
-
drm_display_info_set_bus_formats(&tc->connector.display_info,
&bus_format, 1);
tc->connector.display_info.bus_flags =
@@ -1453,11 +1444,11 @@ static const struct drm_bridge_funcs tc_bridge_funcs = {
.attach = tc_bridge_attach,
.mode_valid = tc_mode_valid,
.mode_set = tc_bridge_mode_set,
- .pre_enable = tc_bridge_pre_enable,
.enable = tc_bridge_enable,
.disable = tc_bridge_disable,
- .post_disable = tc_bridge_post_disable,
.mode_fixup = tc_bridge_mode_fixup,
+ .detect = tc_bridge_detect,
+ .get_edid = tc_get_edid,
};
static bool tc_readable_reg(struct device *dev, unsigned int reg)
@@ -1547,6 +1538,7 @@ static irqreturn_t tc_irq_handler(int irq, void *arg)
static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
+ struct drm_panel *panel;
struct tc_data *tc;
int ret;
@@ -1557,10 +1549,23 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
tc->dev = dev;
/* port@2 is the output port */
- ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, NULL);
if (ret && ret != -ENODEV)
return ret;
+ if (panel) {
+ struct drm_bridge *panel_bridge;
+
+ panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
+
+ tc->panel_bridge = panel_bridge;
+ tc->bridge.type = DRM_MODE_CONNECTOR_eDP;
+ } else {
+ tc->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+ }
+
/* Shut down GPIO is optional */
tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
if (IS_ERR(tc->sd_gpio))
@@ -1680,6 +1685,10 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
return ret;
tc->bridge.funcs = &tc_bridge_funcs;
+ if (tc->hpd_pin >= 0)
+ tc->bridge.ops |= DRM_BRIDGE_OP_DETECT;
+ tc->bridge.ops |= DRM_BRIDGE_OP_EDID;
+
tc->bridge.of_node = dev->of_node;
drm_bridge_add(&tc->bridge);
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
new file mode 100644
index 000000000000..2272adcc5b4a
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TC358775 DSI to LVDS bridge driver
+ *
+ * Copyright (C) 2020 SMART Wireless Computing
+ * Author: Vinay Simha BN <simhavcs@gmail.com>
+ *
+ */
+/* #define DEBUG */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <asm/unaligned.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+#define FLD_VAL(val, start, end) FIELD_PREP(GENMASK(start, end), val)
+
+/* Registers */
+
+/* DSI D-PHY Layer Registers */
+#define D0W_DPHYCONTTX 0x0004 /* Data Lane 0 DPHY Tx Control */
+#define CLW_DPHYCONTRX 0x0020 /* Clock Lane DPHY Rx Control */
+#define D0W_DPHYCONTRX 0x0024 /* Data Lane 0 DPHY Rx Control */
+#define D1W_DPHYCONTRX 0x0028 /* Data Lane 1 DPHY Rx Control */
+#define D2W_DPHYCONTRX 0x002C /* Data Lane 2 DPHY Rx Control */
+#define D3W_DPHYCONTRX 0x0030 /* Data Lane 3 DPHY Rx Control */
+#define COM_DPHYCONTRX 0x0038 /* DPHY Rx Common Control */
+#define CLW_CNTRL 0x0040 /* Clock Lane Control */
+#define D0W_CNTRL 0x0044 /* Data Lane 0 Control */
+#define D1W_CNTRL 0x0048 /* Data Lane 1 Control */
+#define D2W_CNTRL 0x004C /* Data Lane 2 Control */
+#define D3W_CNTRL 0x0050 /* Data Lane 3 Control */
+#define DFTMODE_CNTRL 0x0054 /* DFT Mode Control */
+
+/* DSI PPI Layer Registers */
+#define PPI_STARTPPI 0x0104 /* START control bit of PPI-TX function. */
+#define PPI_START_FUNCTION 1
+
+#define PPI_BUSYPPI 0x0108
+#define PPI_LINEINITCNT 0x0110 /* Line Initialization Wait Counter */
+#define PPI_LPTXTIMECNT 0x0114
+#define PPI_LANEENABLE 0x0134 /* Enables each lane at the PPI layer. */
+#define PPI_TX_RX_TA 0x013C /* DSI Bus Turn Around timing parameters */
+
+/* Analog timer function enable */
+#define PPI_CLS_ATMR 0x0140 /* Delay for Clock Lane in LPRX */
+#define PPI_D0S_ATMR 0x0144 /* Delay for Data Lane 0 in LPRX */
+#define PPI_D1S_ATMR 0x0148 /* Delay for Data Lane 1 in LPRX */
+#define PPI_D2S_ATMR 0x014C /* Delay for Data Lane 2 in LPRX */
+#define PPI_D3S_ATMR 0x0150 /* Delay for Data Lane 3 in LPRX */
+
+#define PPI_D0S_CLRSIPOCOUNT 0x0164 /* For lane 0 */
+#define PPI_D1S_CLRSIPOCOUNT 0x0168 /* For lane 1 */
+#define PPI_D2S_CLRSIPOCOUNT 0x016C /* For lane 2 */
+#define PPI_D3S_CLRSIPOCOUNT 0x0170 /* For lane 3 */
+
+#define CLS_PRE 0x0180 /* Digital Counter inside of PHY IO */
+#define D0S_PRE 0x0184 /* Digital Counter inside of PHY IO */
+#define D1S_PRE 0x0188 /* Digital Counter inside of PHY IO */
+#define D2S_PRE 0x018C /* Digital Counter inside of PHY IO */
+#define D3S_PRE 0x0190 /* Digital Counter inside of PHY IO */
+#define CLS_PREP 0x01A0 /* Digital Counter inside of PHY IO */
+#define D0S_PREP 0x01A4 /* Digital Counter inside of PHY IO */
+#define D1S_PREP 0x01A8 /* Digital Counter inside of PHY IO */
+#define D2S_PREP 0x01AC /* Digital Counter inside of PHY IO */
+#define D3S_PREP 0x01B0 /* Digital Counter inside of PHY IO */
+#define CLS_ZERO 0x01C0 /* Digital Counter inside of PHY IO */
+#define D0S_ZERO 0x01C4 /* Digital Counter inside of PHY IO */
+#define D1S_ZERO 0x01C8 /* Digital Counter inside of PHY IO */
+#define D2S_ZERO 0x01CC /* Digital Counter inside of PHY IO */
+#define D3S_ZERO 0x01D0 /* Digital Counter inside of PHY IO */
+
+#define PPI_CLRFLG 0x01E0 /* PRE Counters has reached set values */
+#define PPI_CLRSIPO 0x01E4 /* Clear SIPO values, Slave mode use only. */
+#define HSTIMEOUT 0x01F0 /* HS Rx Time Out Counter */
+#define HSTIMEOUTENABLE 0x01F4 /* Enable HS Rx Time Out Counter */
+#define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX function */
+#define DSI_RX_START 1
+
+#define DSI_BUSYDSI 0x0208
+#define DSI_LANEENABLE 0x0210 /* Enables each lane at the Protocol layer. */
+#define DSI_LANESTATUS0 0x0214 /* Displays lane is in HS RX mode. */
+#define DSI_LANESTATUS1 0x0218 /* Displays lane is in ULPS or STOP state */
+
+#define DSI_INTSTATUS 0x0220 /* Interrupt Status */
+#define DSI_INTMASK 0x0224 /* Interrupt Mask */
+#define DSI_INTCLR 0x0228 /* Interrupt Clear */
+#define DSI_LPTXTO 0x0230 /* Low Power Tx Time Out Counter */
+
+#define DSIERRCNT 0x0300 /* DSI Error Count */
+#define APLCTRL 0x0400 /* Application Layer Control */
+#define RDPKTLN 0x0404 /* Command Read Packet Length */
+
+#define VPCTRL 0x0450 /* Video Path Control */
+#define HTIM1 0x0454 /* Horizontal Timing Control 1 */
+#define HTIM2 0x0458 /* Horizontal Timing Control 2 */
+#define VTIM1 0x045C /* Vertical Timing Control 1 */
+#define VTIM2 0x0460 /* Vertical Timing Control 2 */
+#define VFUEN 0x0464 /* Video Frame Timing Update Enable */
+#define VFUEN_EN BIT(0) /* Upload Enable */
+
+/* Mux Input Select for LVDS LINK Input */
+#define LV_MX0003 0x0480 /* Bit 0 to 3 */
+#define LV_MX0407 0x0484 /* Bit 4 to 7 */
+#define LV_MX0811 0x0488 /* Bit 8 to 11 */
+#define LV_MX1215 0x048C /* Bit 12 to 15 */
+#define LV_MX1619 0x0490 /* Bit 16 to 19 */
+#define LV_MX2023 0x0494 /* Bit 20 to 23 */
+#define LV_MX2427 0x0498 /* Bit 24 to 27 */
+#define LV_MX(b0, b1, b2, b3) (FLD_VAL(b0, 4, 0) | FLD_VAL(b1, 12, 8) | \
+ FLD_VAL(b2, 20, 16) | FLD_VAL(b3, 28, 24))
+
+/* Input bit numbers used in mux registers */
+enum {
+ LVI_R0,
+ LVI_R1,
+ LVI_R2,
+ LVI_R3,
+ LVI_R4,
+ LVI_R5,
+ LVI_R6,
+ LVI_R7,
+ LVI_G0,
+ LVI_G1,
+ LVI_G2,
+ LVI_G3,
+ LVI_G4,
+ LVI_G5,
+ LVI_G6,
+ LVI_G7,
+ LVI_B0,
+ LVI_B1,
+ LVI_B2,
+ LVI_B3,
+ LVI_B4,
+ LVI_B5,
+ LVI_B6,
+ LVI_B7,
+ LVI_HS,
+ LVI_VS,
+ LVI_DE,
+ LVI_L0
+};
+
+#define LVCFG 0x049C /* LVDS Configuration */
+#define LVPHY0 0x04A0 /* LVDS PHY 0 */
+#define LV_PHY0_RST(v) FLD_VAL(v, 22, 22) /* PHY reset */
+#define LV_PHY0_IS(v) FLD_VAL(v, 15, 14)
+#define LV_PHY0_ND(v) FLD_VAL(v, 4, 0) /* Frequency range select */
+#define LV_PHY0_PRBS_ON(v) FLD_VAL(v, 20, 16) /* Clock/Data Flag pins */
+
+#define LVPHY1 0x04A4 /* LVDS PHY 1 */
+#define SYSSTAT 0x0500 /* System Status */
+#define SYSRST 0x0504 /* System Reset */
+
+#define SYS_RST_I2CS BIT(0) /* Reset I2C-Slave controller */
+#define SYS_RST_I2CM BIT(1) /* Reset I2C-Master controller */
+#define SYS_RST_LCD BIT(2) /* Reset LCD controller */
+#define SYS_RST_BM BIT(3) /* Reset Bus Management controller */
+#define SYS_RST_DSIRX BIT(4) /* Reset DSI-RX and App controller */
+#define SYS_RST_REG BIT(5) /* Reset Register module */
+
+/* GPIO Registers */
+#define GPIOC 0x0520 /* GPIO Control */
+#define GPIOO 0x0524 /* GPIO Output */
+#define GPIOI 0x0528 /* GPIO Input */
+
+/* I2C Registers */
+#define I2CTIMCTRL 0x0540 /* I2C IF Timing and Enable Control */
+#define I2CMADDR 0x0544 /* I2C Master Addressing */
+#define WDATAQ 0x0548 /* Write Data Queue */
+#define RDATAQ 0x054C /* Read Data Queue */
+
+/* Chip ID and Revision ID Register */
+#define IDREG 0x0580
+
+#define LPX_PERIOD 4
+#define TTA_GET 0x40000
+#define TTA_SURE 6
+#define SINGLE_LINK 1
+#define DUAL_LINK 2
+
+#define TC358775XBG_ID 0x00007500
+
+/* Debug Registers */
+#define DEBUG00 0x05A0 /* Debug */
+#define DEBUG01 0x05A4 /* LVDS Data */
+
+#define DSI_CLEN_BIT BIT(0)
+#define DIVIDE_BY_3 3 /* PCLK=DCLK/3 */
+#define DIVIDE_BY_6 6 /* PCLK=DCLK/6 */
+#define LVCFG_LVEN_BIT BIT(0)
+
+#define L0EN BIT(1)
+
+#define TC358775_VPCTRL_VSDELAY__MASK 0x3FF00000
+#define TC358775_VPCTRL_VSDELAY__SHIFT 20
+static inline u32 TC358775_VPCTRL_VSDELAY(uint32_t val)
+{
+ return ((val) << TC358775_VPCTRL_VSDELAY__SHIFT) &
+ TC358775_VPCTRL_VSDELAY__MASK;
+}
+
+#define TC358775_VPCTRL_OPXLFMT__MASK 0x00000100
+#define TC358775_VPCTRL_OPXLFMT__SHIFT 8
+static inline u32 TC358775_VPCTRL_OPXLFMT(uint32_t val)
+{
+ return ((val) << TC358775_VPCTRL_OPXLFMT__SHIFT) &
+ TC358775_VPCTRL_OPXLFMT__MASK;
+}
+
+#define TC358775_VPCTRL_MSF__MASK 0x00000001
+#define TC358775_VPCTRL_MSF__SHIFT 0
+static inline u32 TC358775_VPCTRL_MSF(uint32_t val)
+{
+ return ((val) << TC358775_VPCTRL_MSF__SHIFT) &
+ TC358775_VPCTRL_MSF__MASK;
+}
+
+#define TC358775_LVCFG_PCLKDIV__MASK 0x000000f0
+#define TC358775_LVCFG_PCLKDIV__SHIFT 4
+static inline u32 TC358775_LVCFG_PCLKDIV(uint32_t val)
+{
+ return ((val) << TC358775_LVCFG_PCLKDIV__SHIFT) &
+ TC358775_LVCFG_PCLKDIV__MASK;
+}
+
+#define TC358775_LVCFG_LVDLINK__MASK 0x00000002
+#define TC358775_LVCFG_LVDLINK__SHIFT 0
+static inline u32 TC358775_LVCFG_LVDLINK(uint32_t val)
+{
+ return ((val) << TC358775_LVCFG_LVDLINK__SHIFT) &
+ TC358775_LVCFG_LVDLINK__MASK;
+}
+
+enum tc358775_ports {
+ TC358775_DSI_IN,
+ TC358775_LVDS_OUT0,
+ TC358775_LVDS_OUT1,
+};
+
+struct tc_data {
+ struct i2c_client *i2c;
+ struct device *dev;
+
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ u8 num_dsi_lanes;
+
+ struct regulator *vdd;
+ struct regulator *vddio;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *stby_gpio;
+ u8 lvds_link; /* single-link or dual-link */
+ u8 bpc;
+};
+
+static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
+{
+ return container_of(b, struct tc_data, bridge);
+}
+
+static void tc_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+ struct device *dev = &tc->dsi->dev;
+ int ret;
+
+ ret = regulator_enable(tc->vddio);
+ if (ret < 0)
+ dev_err(dev, "regulator vddio enable failed, %d\n", ret);
+ usleep_range(10000, 11000);
+
+ ret = regulator_enable(tc->vdd);
+ if (ret < 0)
+ dev_err(dev, "regulator vdd enable failed, %d\n", ret);
+ usleep_range(10000, 11000);
+
+ gpiod_set_value(tc->stby_gpio, 0);
+ usleep_range(10000, 11000);
+
+ gpiod_set_value(tc->reset_gpio, 0);
+ usleep_range(10, 20);
+}
+
+static void tc_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+ struct device *dev = &tc->dsi->dev;
+ int ret;
+
+ gpiod_set_value(tc->reset_gpio, 1);
+ usleep_range(10, 20);
+
+ gpiod_set_value(tc->stby_gpio, 1);
+ usleep_range(10000, 11000);
+
+ ret = regulator_disable(tc->vdd);
+ if (ret < 0)
+ dev_err(dev, "regulator vdd disable failed, %d\n", ret);
+ usleep_range(10000, 11000);
+
+ ret = regulator_disable(tc->vddio);
+ if (ret < 0)
+ dev_err(dev, "regulator vddio disable failed, %d\n", ret);
+ usleep_range(10000, 11000);
+}
+
+static void d2l_read(struct i2c_client *i2c, u16 addr, u32 *val)
+{
+ int ret;
+ u8 buf_addr[2];
+
+ put_unaligned_be16(addr, buf_addr);
+ ret = i2c_master_send(i2c, buf_addr, sizeof(buf_addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(i2c, (u8 *)val, sizeof(*val));
+ if (ret < 0)
+ goto fail;
+
+ pr_debug("d2l: I2C : addr:%04x value:%08x\n", addr, *val);
+
+fail:
+ dev_err(&i2c->dev, "Error %d reading from subaddress 0x%x\n",
+ ret, addr);
+}
+
+static void d2l_write(struct i2c_client *i2c, u16 addr, u32 val)
+{
+ u8 data[6];
+ int ret;
+
+ put_unaligned_be16(addr, data);
+ put_unaligned_le32(val, data + 2);
+
+ ret = i2c_master_send(i2c, data, ARRAY_SIZE(data));
+ if (ret < 0)
+ dev_err(&i2c->dev, "Error %d writing to subaddress 0x%x\n",
+ ret, addr);
+}
+
+/* helper function to access bus_formats */
+static struct drm_connector *get_connector(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return connector;
+
+ return NULL;
+}
+
+static void tc_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+ u32 hback_porch, hsync_len, hfront_porch, hactive, htime1, htime2;
+ u32 vback_porch, vsync_len, vfront_porch, vactive, vtime1, vtime2;
+ u32 val = 0;
+ u16 dsiclk, clkdiv, byteclk, t1, t2, t3, vsdelay;
+ struct drm_display_mode *mode;
+ struct drm_connector *connector = get_connector(bridge->encoder);
+
+ mode = &bridge->encoder->crtc->state->adjusted_mode;
+
+ hback_porch = mode->htotal - mode->hsync_end;
+ hsync_len = mode->hsync_end - mode->hsync_start;
+ vback_porch = mode->vtotal - mode->vsync_end;
+ vsync_len = mode->vsync_end - mode->vsync_start;
+
+ htime1 = (hback_porch << 16) + hsync_len;
+ vtime1 = (vback_porch << 16) + vsync_len;
+
+ hfront_porch = mode->hsync_start - mode->hdisplay;
+ hactive = mode->hdisplay;
+ vfront_porch = mode->vsync_start - mode->vdisplay;
+ vactive = mode->vdisplay;
+
+ htime2 = (hfront_porch << 16) + hactive;
+ vtime2 = (vfront_porch << 16) + vactive;
+
+ d2l_read(tc->i2c, IDREG, &val);
+
+ dev_info(tc->dev, "DSI2LVDS Chip ID.%02x Revision ID. %02x **\n",
+ (val >> 8) & 0xFF, val & 0xFF);
+
+ d2l_write(tc->i2c, SYSRST, SYS_RST_REG | SYS_RST_DSIRX | SYS_RST_BM |
+ SYS_RST_LCD | SYS_RST_I2CM | SYS_RST_I2CS);
+ usleep_range(30000, 40000);
+
+ d2l_write(tc->i2c, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
+ d2l_write(tc->i2c, PPI_LPTXTIMECNT, LPX_PERIOD);
+ d2l_write(tc->i2c, PPI_D0S_CLRSIPOCOUNT, 3);
+ d2l_write(tc->i2c, PPI_D1S_CLRSIPOCOUNT, 3);
+ d2l_write(tc->i2c, PPI_D2S_CLRSIPOCOUNT, 3);
+ d2l_write(tc->i2c, PPI_D3S_CLRSIPOCOUNT, 3);
+
+ val = ((L0EN << tc->num_dsi_lanes) - L0EN) | DSI_CLEN_BIT;
+ d2l_write(tc->i2c, PPI_LANEENABLE, val);
+ d2l_write(tc->i2c, DSI_LANEENABLE, val);
+
+ d2l_write(tc->i2c, PPI_STARTPPI, PPI_START_FUNCTION);
+ d2l_write(tc->i2c, DSI_STARTDSI, DSI_RX_START);
+
+ if (tc->bpc == 8)
+ val = TC358775_VPCTRL_OPXLFMT(1);
+ else /* bpc = 6; */
+ val = TC358775_VPCTRL_MSF(1);
+
+ dsiclk = mode->crtc_clock * 3 * tc->bpc / tc->num_dsi_lanes / 1000;
+ clkdiv = dsiclk / DIVIDE_BY_3 * tc->lvds_link;
+ byteclk = dsiclk / 4;
+ t1 = hactive * (tc->bpc * 3 / 8) / tc->num_dsi_lanes;
+ t2 = ((100000 / clkdiv)) * (hactive + hback_porch + hsync_len + hfront_porch) / 1000;
+ t3 = ((t2 * byteclk) / 100) - (hactive * (tc->bpc * 3 / 8) /
+ tc->num_dsi_lanes);
+
+ vsdelay = (clkdiv * (t1 + t3) / byteclk) - hback_porch - hsync_len - hactive;
+
+ val |= TC358775_VPCTRL_VSDELAY(vsdelay);
+ d2l_write(tc->i2c, VPCTRL, val);
+
+ d2l_write(tc->i2c, HTIM1, htime1);
+ d2l_write(tc->i2c, VTIM1, vtime1);
+ d2l_write(tc->i2c, HTIM2, htime2);
+ d2l_write(tc->i2c, VTIM2, vtime2);
+
+ d2l_write(tc->i2c, VFUEN, VFUEN_EN);
+ d2l_write(tc->i2c, SYSRST, SYS_RST_LCD);
+ d2l_write(tc->i2c, LVPHY0, LV_PHY0_PRBS_ON(4) | LV_PHY0_ND(6));
+
+ dev_dbg(tc->dev, "bus_formats %04x bpc %d\n",
+ connector->display_info.bus_formats[0],
+ tc->bpc);
+ /*
+ * Default hardware register settings of tc358775 configured
+ * with MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA jeida-24 format
+ */
+ if (connector->display_info.bus_formats[0] ==
+ MEDIA_BUS_FMT_RGB888_1X7X4_SPWG) {
+ /* VESA-24 */
+ d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3));
+ d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_R7, LVI_R5, LVI_G0));
+ d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_G6, LVI_G7));
+ d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0));
+ d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2));
+ d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
+ d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6));
+ } else { /* MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - JEIDA-18 */
+ d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3));
+ d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_L0, LVI_R5, LVI_G0));
+ d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_L0, LVI_L0));
+ d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0));
+ d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_L0, LVI_L0, LVI_B1, LVI_B2));
+ d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
+ d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_L0));
+ }
+
+ d2l_write(tc->i2c, VFUEN, VFUEN_EN);
+
+ val = LVCFG_LVEN_BIT;
+ if (tc->lvds_link == DUAL_LINK) {
+ val |= TC358775_LVCFG_LVDLINK(1);
+ val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_6);
+ } else {
+ val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_3);
+ }
+ d2l_write(tc->i2c, LVCFG, val);
+}
+
+static enum drm_mode_status
+tc_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+
+ /*
+ * Maximum pixel clock speed 135MHz for single-link
+ * 270MHz for dual-link
+ */
+ if ((mode->clock > 135000 && tc->lvds_link == SINGLE_LINK) ||
+ (mode->clock > 270000 && tc->lvds_link == DUAL_LINK))
+ return MODE_CLOCK_HIGH;
+
+ switch (info->bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ /* RGB888 */
+ tc->bpc = 8;
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ /* RGB666 */
+ tc->bpc = 6;
+ break;
+ default:
+ dev_warn(tc->dev,
+ "unsupported LVDS bus format 0x%04x\n",
+ info->bus_formats[0]);
+ return MODE_NOMODE;
+ }
+
+ return MODE_OK;
+}
+
+static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc)
+{
+ struct device_node *endpoint;
+ struct device_node *parent;
+ struct device_node *remote;
+ struct property *prop;
+ int len = 0;
+
+ /*
+ * To get the data-lanes of dsi, we need to access the dsi0_out of port1
+ * of dsi0 endpoint from bridge port0 of d2l_in
+ */
+ endpoint = of_graph_get_endpoint_by_regs(tc->dev->of_node,
+ TC358775_DSI_IN, -1);
+ if (endpoint) {
+ /* dsi0_out node */
+ parent = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+ if (parent) {
+ /* dsi0 port 1 */
+ endpoint = of_graph_get_endpoint_by_regs(parent, 1, -1);
+ of_node_put(parent);
+ if (endpoint) {
+ prop = of_find_property(endpoint, "data-lanes",
+ &len);
+ of_node_put(endpoint);
+ if (!prop) {
+ dev_err(tc->dev,
+ "failed to find data lane\n");
+ return -EPROBE_DEFER;
+ }
+ }
+ }
+ }
+
+ tc->num_dsi_lanes = len / sizeof(u32);
+
+ if (tc->num_dsi_lanes < 1 || tc->num_dsi_lanes > 4)
+ return -EINVAL;
+
+ tc->host_node = of_graph_get_remote_node(np, 0, 0);
+ if (!tc->host_node)
+ return -ENODEV;
+
+ of_node_put(tc->host_node);
+
+ tc->lvds_link = SINGLE_LINK;
+ endpoint = of_graph_get_endpoint_by_regs(tc->dev->of_node,
+ TC358775_LVDS_OUT1, -1);
+ if (endpoint) {
+ remote = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+
+ if (remote) {
+ if (of_device_is_available(remote))
+ tc->lvds_link = DUAL_LINK;
+ of_node_put(remote);
+ }
+ }
+
+ dev_dbg(tc->dev, "no.of dsi lanes: %d\n", tc->num_dsi_lanes);
+ dev_dbg(tc->dev, "operating in %d-link mode\n", tc->lvds_link);
+
+ return 0;
+}
+
+static int tc_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+ struct device *dev = &tc->i2c->dev;
+ struct mipi_dsi_host *host;
+ struct mipi_dsi_device *dsi;
+ int ret;
+
+ const struct mipi_dsi_device_info info = { .type = "tc358775",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ host = of_find_mipi_dsi_host_by_node(tc->host_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dsi device\n");
+ ret = PTR_ERR(dsi);
+ goto err_dsi_device;
+ }
+
+ tc->dsi = dsi;
+
+ dsi->lanes = tc->num_dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+
+ /* Attach the panel-bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, tc->panel_bridge,
+ &tc->bridge, flags);
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+err_dsi_device:
+ return ret;
+}
+
+static const struct drm_bridge_funcs tc_bridge_funcs = {
+ .attach = tc_bridge_attach,
+ .pre_enable = tc_bridge_pre_enable,
+ .enable = tc_bridge_enable,
+ .mode_valid = tc_mode_valid,
+ .post_disable = tc_bridge_post_disable,
+};
+
+static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct drm_panel *panel;
+ struct tc_data *tc;
+ int ret;
+
+ tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
+
+ tc->dev = dev;
+ tc->i2c = client;
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, TC358775_LVDS_OUT0,
+ 0, &panel, NULL);
+ if (ret < 0)
+ return ret;
+ if (!panel)
+ return -ENODEV;
+
+ tc->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(tc->panel_bridge))
+ return PTR_ERR(tc->panel_bridge);
+
+ ret = tc358775_parse_dt(dev->of_node, tc);
+ if (ret)
+ return ret;
+
+ tc->vddio = devm_regulator_get(dev, "vddio-supply");
+ if (IS_ERR(tc->vddio)) {
+ ret = PTR_ERR(tc->vddio);
+ dev_err(dev, "vddio-supply not found\n");
+ return ret;
+ }
+
+ tc->vdd = devm_regulator_get(dev, "vdd-supply");
+ if (IS_ERR(tc->vdd)) {
+ ret = PTR_ERR(tc->vdd);
+ dev_err(dev, "vdd-supply not found\n");
+ return ret;
+ }
+
+ tc->stby_gpio = devm_gpiod_get(dev, "stby", GPIOD_OUT_HIGH);
+ if (IS_ERR(tc->stby_gpio)) {
+ ret = PTR_ERR(tc->stby_gpio);
+ dev_err(dev, "cannot get stby-gpio %d\n", ret);
+ return ret;
+ }
+
+ tc->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(tc->reset_gpio)) {
+ ret = PTR_ERR(tc->reset_gpio);
+ dev_err(dev, "cannot get reset-gpios %d\n", ret);
+ return ret;
+ }
+
+ tc->bridge.funcs = &tc_bridge_funcs;
+ tc->bridge.of_node = dev->of_node;
+ drm_bridge_add(&tc->bridge);
+
+ i2c_set_clientdata(client, tc);
+
+ return 0;
+}
+
+static int tc_remove(struct i2c_client *client)
+{
+ struct tc_data *tc = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&tc->bridge);
+
+ return 0;
+}
+
+static const struct i2c_device_id tc358775_i2c_ids[] = {
+ { "tc358775", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc358775_i2c_ids);
+
+static const struct of_device_id tc358775_of_ids[] = {
+ { .compatible = "toshiba,tc358775", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc358775_of_ids);
+
+static struct i2c_driver tc358775_driver = {
+ .driver = {
+ .name = "tc358775",
+ .of_match_table = tc358775_of_ids,
+ },
+ .id_table = tc358775_i2c_ids,
+ .probe = tc_probe,
+ .remove = tc_remove,
+};
+module_i2c_driver(tc358775_driver);
+
+MODULE_AUTHOR("Vinay Simha BN <simhavcs@gmail.com>");
+MODULE_DESCRIPTION("TC358775 DSI/LVDS bridge driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 5b6e19ecbc84..ecdf9b01340f 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -394,9 +394,6 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
}
pdata->dsi = dsi;
- /* attach panel to bridge */
- drm_panel_attach(pdata->panel, &pdata->connector);
-
return 0;
err_dsi_attach:
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 9e1ad493e689..f9170b4b22e7 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1115,9 +1115,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* @old_state: atomic state object with old state structures
*
* This function updates all the various legacy modeset state pointers in
- * connectors, encoders and CRTCs. It also updates the timestamping constants
- * used for precise vblank timestamps by calling
- * drm_calc_timestamping_constants().
+ * connectors, encoders and CRTCs.
*
* Drivers can use this for building their own atomic commit if they don't have
* a pure helper-based modeset implementation.
@@ -1186,13 +1184,30 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
crtc->x = new_plane_state->src_x >> 16;
crtc->y = new_plane_state->src_y >> 16;
}
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
+/**
+ * drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
+ * @state: atomic state object
+ *
+ * Updates the timestamping constants used for precise vblank timestamps
+ * by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
+ */
+void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->enable)
drm_calc_timestamping_constants(crtc,
&new_crtc_state->adjusted_mode);
}
}
-EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
+EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
static void
crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
@@ -1276,6 +1291,7 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
disable_outputs(dev, old_state);
drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
+ drm_atomic_helper_calc_timestamping_constants(old_state);
crtc_set_mode(dev, old_state);
}
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index c6994fe673f3..a58cbde59c34 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -187,6 +187,7 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
case DRM_MODE_CONNECTOR_DPI:
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_DSI:
+ case DRM_MODE_CONNECTOR_eDP:
status = connector_status_connected;
break;
default:
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 03e01b000f7a..0fe3c496002a 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -127,7 +127,7 @@ drm_clflush_sg(struct sg_table *st)
struct sg_page_iter sg_iter;
mb(); /*CLFLUSH is ordered only by using memory barriers*/
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+ for_each_sgtable_page(st, &sg_iter, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
mb(); /*Make sure that all cache line entry is flushed*/
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 00e40a26a800..717c4e7271b0 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -850,7 +850,7 @@ static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
- { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I, TV-out and DP */
{ DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */
{ DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */
};
@@ -867,7 +867,7 @@ static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
- { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I, TV-out and DP */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */
{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
@@ -876,6 +876,19 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
+static const struct drm_prop_enum_list drm_dp_subconnector_enum_list[] = {
+ { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I, TV-out and DP */
+ { DRM_MODE_SUBCONNECTOR_VGA, "VGA" }, /* DP */
+ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DP */
+ { DRM_MODE_SUBCONNECTOR_HDMIA, "HDMI" }, /* DP */
+ { DRM_MODE_SUBCONNECTOR_DisplayPort, "DP" }, /* DP */
+ { DRM_MODE_SUBCONNECTOR_Wireless, "Wireless" }, /* DP */
+ { DRM_MODE_SUBCONNECTOR_Native, "Native" }, /* DP */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dp_subconnector_name,
+ drm_dp_subconnector_enum_list)
+
static const struct drm_prop_enum_list hdmi_colorspaces[] = {
/* For Default case, driver will set the colorspace */
{ DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
@@ -1217,6 +1230,14 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* can also expose this property to external outputs, in which case they
* must support "None", which should be the default (since external screens
* have a built-in scaler).
+ *
+ * subconnector:
+ * This property is used by DVI-I, TVout and DisplayPort to indicate different
+ * connector subtypes. Enum values more or less match with those from main
+ * connector types.
+ * For DVI-I and TVout there is also a matching property "select subconnector"
+ * allowing to switch between signal types.
+ * DP subconnector corresponds to a downstream port.
*/
int drm_connector_create_standard_properties(struct drm_device *dev)
@@ -1306,6 +1327,30 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
/**
+ * drm_connector_attach_dp_subconnector_property - create subconnector property for DP
+ * @connector: drm_connector to attach property
+ *
+ * Called by a driver when DP connector is created.
+ */
+void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector)
+{
+ struct drm_mode_config *mode_config = &connector->dev->mode_config;
+
+ if (!mode_config->dp_subconnector_property)
+ mode_config->dp_subconnector_property =
+ drm_property_create_enum(connector->dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "subconnector",
+ drm_dp_subconnector_enum_list,
+ ARRAY_SIZE(drm_dp_subconnector_enum_list));
+
+ drm_object_attach_property(&connector->base,
+ mode_config->dp_subconnector_property,
+ DRM_MODE_SUBCONNECTOR_Unknown);
+}
+EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
+
+/**
* DOC: HDMI connector properties
*
* content type (HDMI specific):
@@ -2244,7 +2289,7 @@ static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *conne
static bool
drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
- const struct list_head *export_list,
+ const struct list_head *modes,
const struct drm_file *file_priv)
{
/*
@@ -2260,15 +2305,17 @@ drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
* while preparing the list of user-modes.
*/
if (!file_priv->aspect_ratio_allowed) {
- struct drm_display_mode *mode_itr;
+ const struct drm_display_mode *mode_itr;
- list_for_each_entry(mode_itr, export_list, export_head)
- if (drm_mode_match(mode_itr, mode,
+ list_for_each_entry(mode_itr, modes, head) {
+ if (mode_itr->expose_to_userspace &&
+ drm_mode_match(mode_itr, mode,
DRM_MODE_MATCH_TIMINGS |
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS))
return false;
+ }
}
return true;
@@ -2288,7 +2335,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
struct drm_mode_modeinfo u_mode;
struct drm_mode_modeinfo __user *mode_ptr;
uint32_t __user *encoder_ptr;
- LIST_HEAD(export_list);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
@@ -2332,25 +2378,30 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->connection = connector->status;
/* delayed so we get modes regardless of pre-fill_modes state */
- list_for_each_entry(mode, &connector->modes, head)
- if (drm_mode_expose_to_userspace(mode, &export_list,
+ list_for_each_entry(mode, &connector->modes, head) {
+ WARN_ON(mode->expose_to_userspace);
+
+ if (drm_mode_expose_to_userspace(mode, &connector->modes,
file_priv)) {
- list_add_tail(&mode->export_head, &export_list);
+ mode->expose_to_userspace = true;
mode_count++;
}
+ }
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
- * The modes that need to be exposed to the user are maintained in the
- * 'export_list'. When the ioctl is called first time to determine the,
- * space, the export_list gets filled, to find the no.of modes. In the
- * 2nd time, the user modes are filled, one by one from the export_list.
*/
if ((out_resp->count_modes >= mode_count) && mode_count) {
copied = 0;
mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
- list_for_each_entry(mode, &export_list, export_head) {
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (!mode->expose_to_userspace)
+ continue;
+
+ /* Clear the tag for the next time around */
+ mode->expose_to_userspace = false;
+
drm_mode_convert_to_umode(&u_mode, mode);
/*
* Reset aspect ratio flags of user-mode, if modes with
@@ -2361,13 +2412,26 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) {
ret = -EFAULT;
+
+ /*
+ * Clear the tag for the rest of
+ * the modes for the next time around.
+ */
+ list_for_each_entry_continue(mode, &connector->modes, head)
+ mode->expose_to_userspace = false;
+
mutex_unlock(&dev->mode_config.mutex);
goto out;
}
copied++;
}
+ } else {
+ /* Clear the tag for the next time around */
+ list_for_each_entry(mode, &connector->modes, head)
+ mode->expose_to_userspace = false;
}
+
out_resp->count_modes = mode_count;
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 5d67a41f7c3a..3dd70d813f69 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
source[len - 1] = '\0';
ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
- if (ret)
+ if (ret) {
+ kfree(source);
return ret;
+ }
spin_lock_irq(&crc->lock);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 092c8c985911..deeed73f4ed6 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -363,6 +363,70 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
+static bool is_edid_digital_input_dp(const struct edid *edid)
+{
+ return edid && edid->revision >= 4 &&
+ edid->input & DRM_EDID_INPUT_DIGITAL &&
+ (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_DP;
+}
+
+/**
+ * drm_dp_downstream_is_type() - is the downstream facing port of certain type?
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ * @type: port type to be checked. Can be:
+ * %DP_DS_PORT_TYPE_DP, %DP_DS_PORT_TYPE_VGA, %DP_DS_PORT_TYPE_DVI,
+ * %DP_DS_PORT_TYPE_HDMI, %DP_DS_PORT_TYPE_NON_EDID,
+ * %DP_DS_PORT_TYPE_DP_DUALMODE or %DP_DS_PORT_TYPE_WIRELESS.
+ *
+ * Caveat: Only works with DPCD 1.1+ port caps.
+ *
+ * Returns: whether the downstream facing port matches the type.
+ */
+bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4], u8 type)
+{
+ return drm_dp_is_branch(dpcd) &&
+ dpcd[DP_DPCD_REV] >= 0x11 &&
+ (port_cap[0] & DP_DS_PORT_TYPE_MASK) == type;
+}
+EXPORT_SYMBOL(drm_dp_downstream_is_type);
+
+/**
+ * drm_dp_downstream_is_tmds() - is the downstream facing port TMDS?
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ * @edid: EDID
+ *
+ * Returns: whether the downstream facing port is TMDS (HDMI/DVI).
+ */
+bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct edid *edid)
+{
+ if (dpcd[DP_DPCD_REV] < 0x11) {
+ switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
+ case DP_DWN_STRM_PORT_TYPE_TMDS:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+ case DP_DS_PORT_TYPE_DP_DUALMODE:
+ if (is_edid_digital_input_dp(edid))
+ return false;
+ fallthrough;
+ case DP_DS_PORT_TYPE_DVI:
+ case DP_DS_PORT_TYPE_HDMI:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_is_tmds);
+
/**
* drm_dp_send_real_edid_checksum() - send back real edid checksum value
* @aux: DisplayPort AUX channel
@@ -423,66 +487,313 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_send_real_edid_checksum);
+static u8 drm_dp_downstream_port_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ u8 port_count = dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_PORT_COUNT_MASK;
+
+ if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE && port_count > 4)
+ port_count = 4;
+
+ return port_count;
+}
+
+static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
+ u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ u8 dpcd_ext[6];
+ int ret;
+
+ /*
+ * Prior to DP1.3 the bit represented by
+ * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
+ * If it is set DP_DPCD_REV at 0000h could be at a value less than
+ * the true capability of the panel. The only way to check is to
+ * then compare 0000h and 2200h.
+ */
+ if (!(dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
+ return 0;
+
+ ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext,
+ sizeof(dpcd_ext));
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(dpcd_ext))
+ return -EIO;
+
+ if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
+ DRM_DEBUG_KMS("%s: Extended DPCD rev less than base DPCD rev (%d > %d)\n",
+ aux->name, dpcd[DP_DPCD_REV],
+ dpcd_ext[DP_DPCD_REV]);
+ return 0;
+ }
+
+ if (!memcmp(dpcd, dpcd_ext, sizeof(dpcd_ext)))
+ return 0;
+
+ DRM_DEBUG_KMS("%s: Base DPCD: %*ph\n",
+ aux->name, DP_RECEIVER_CAP_SIZE, dpcd);
+
+ memcpy(dpcd, dpcd_ext, sizeof(dpcd_ext));
+
+ return 0;
+}
+
+/**
+ * drm_dp_read_dpcd_caps() - read DPCD caps and extended DPCD caps if
+ * available
+ * @aux: DisplayPort AUX channel
+ * @dpcd: Buffer to store the resulting DPCD in
+ *
+ * Attempts to read the base DPCD caps for @aux. Additionally, this function
+ * checks for and reads the extended DPRX caps (%DP_DP13_DPCD_REV) if
+ * present.
+ *
+ * Returns: %0 if the DPCD was read successfully, negative error code
+ * otherwise.
+ */
+int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
+ u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ int ret;
+
+ ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE);
+ if (ret < 0)
+ return ret;
+ if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0)
+ return -EIO;
+
+ ret = drm_dp_read_extended_dpcd_caps(aux, dpcd);
+ if (ret < 0)
+ return ret;
+
+ DRM_DEBUG_KMS("%s: DPCD: %*ph\n",
+ aux->name, DP_RECEIVER_CAP_SIZE, dpcd);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_read_dpcd_caps);
+
+/**
+ * drm_dp_read_downstream_info() - read DPCD downstream port info if available
+ * @aux: DisplayPort AUX channel
+ * @dpcd: A cached copy of the port's DPCD
+ * @downstream_ports: buffer to store the downstream port info in
+ *
+ * See also:
+ * drm_dp_downstream_max_clock()
+ * drm_dp_downstream_max_bpc()
+ *
+ * Returns: 0 if either the downstream port info was read successfully or
+ * there was no downstream info to read, or a negative error code otherwise.
+ */
+int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS])
+{
+ int ret;
+ u8 len;
+
+ memset(downstream_ports, 0, DP_MAX_DOWNSTREAM_PORTS);
+
+ /* No downstream info to read */
+ if (!drm_dp_is_branch(dpcd) ||
+ dpcd[DP_DPCD_REV] < DP_DPCD_REV_10 ||
+ !(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+ return 0;
+
+ len = drm_dp_downstream_port_count(dpcd);
+ if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
+ len *= 4;
+
+ ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len);
+ if (ret < 0)
+ return ret;
+ if (ret != len)
+ return -EIO;
+
+ DRM_DEBUG_KMS("%s: DPCD DFP: %*ph\n",
+ aux->name, len, downstream_ports);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_read_downstream_info);
+
/**
- * drm_dp_downstream_max_clock() - extract branch device max
- * pixel rate for legacy VGA
- * converter or max TMDS clock
- * rate for others
+ * drm_dp_downstream_max_dotclock() - extract downstream facing port max dot clock
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
*
- * Returns max clock in kHz on success or 0 if max clock not defined
+ * Returns: Downstream facing port max dot clock in kHz on success,
+ * or 0 if max clock not defined
*/
-int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4])
+int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4])
{
- int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
- bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DETAILED_CAP_INFO_AVAILABLE;
+ if (!drm_dp_is_branch(dpcd))
+ return 0;
- if (!detailed_cap_info)
+ if (dpcd[DP_DPCD_REV] < 0x11)
return 0;
- switch (type) {
+ switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
case DP_DS_PORT_TYPE_VGA:
- return port_cap[1] * 8 * 1000;
- case DP_DS_PORT_TYPE_DVI:
- case DP_DS_PORT_TYPE_HDMI:
+ if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+ return 0;
+ return port_cap[1] * 8000;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_max_dotclock);
+
+/**
+ * drm_dp_downstream_max_tmds_clock() - extract downstream facing port max TMDS clock
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ * @edid: EDID
+ *
+ * Returns: HDMI/DVI downstream facing port max TMDS clock in kHz on success,
+ * or 0 if max TMDS clock not defined
+ */
+int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct edid *edid)
+{
+ if (!drm_dp_is_branch(dpcd))
+ return 0;
+
+ if (dpcd[DP_DPCD_REV] < 0x11) {
+ switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
+ case DP_DWN_STRM_PORT_TYPE_TMDS:
+ return 165000;
+ default:
+ return 0;
+ }
+ }
+
+ switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
case DP_DS_PORT_TYPE_DP_DUALMODE:
+ if (is_edid_digital_input_dp(edid))
+ return 0;
+ /*
+ * It's left up to the driver to check the
+ * DP dual mode adapter's max TMDS clock.
+ *
+ * Unfortunatley it looks like branch devices
+ * may not fordward that the DP dual mode i2c
+ * access so we just usually get i2c nak :(
+ */
+ fallthrough;
+ case DP_DS_PORT_TYPE_HDMI:
+ /*
+ * We should perhaps assume 165 MHz when detailed cap
+ * info is not available. But looks like many typical
+ * branch devices fall into that category and so we'd
+ * probably end up with users complaining that they can't
+ * get high resolution modes with their favorite dongle.
+ *
+ * So let's limit to 300 MHz instead since DPCD 1.4
+ * HDMI 2.0 DFPs are required to have the detailed cap
+ * info. So it's more likely we're dealing with a HDMI 1.4
+ * compatible* device here.
+ */
+ if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+ return 300000;
+ return port_cap[1] * 2500;
+ case DP_DS_PORT_TYPE_DVI:
+ if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+ return 165000;
+ /* FIXME what to do about DVI dual link? */
return port_cap[1] * 2500;
default:
return 0;
}
}
-EXPORT_SYMBOL(drm_dp_downstream_max_clock);
+EXPORT_SYMBOL(drm_dp_downstream_max_tmds_clock);
/**
- * drm_dp_downstream_max_bpc() - extract branch device max
- * bits per component
+ * drm_dp_downstream_min_tmds_clock() - extract downstream facing port min TMDS clock
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
+ * @edid: EDID
*
- * Returns max bpc on success or 0 if max bpc not defined
+ * Returns: HDMI/DVI downstream facing port min TMDS clock in kHz on success,
+ * or 0 if max TMDS clock not defined
*/
-int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4])
+int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct edid *edid)
{
- int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
- bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DETAILED_CAP_INFO_AVAILABLE;
- int bpc;
-
- if (!detailed_cap_info)
+ if (!drm_dp_is_branch(dpcd))
return 0;
- switch (type) {
- case DP_DS_PORT_TYPE_VGA:
+ if (dpcd[DP_DPCD_REV] < 0x11) {
+ switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
+ case DP_DWN_STRM_PORT_TYPE_TMDS:
+ return 25000;
+ default:
+ return 0;
+ }
+ }
+
+ switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+ case DP_DS_PORT_TYPE_DP_DUALMODE:
+ if (is_edid_digital_input_dp(edid))
+ return 0;
+ fallthrough;
case DP_DS_PORT_TYPE_DVI:
case DP_DS_PORT_TYPE_HDMI:
+ /*
+ * Unclear whether the protocol converter could
+ * utilize pixel replication. Assume it won't.
+ */
+ return 25000;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_min_tmds_clock);
+
+/**
+ * drm_dp_downstream_max_bpc() - extract downstream facing port max
+ * bits per component
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: downstream facing port capabilities
+ * @edid: EDID
+ *
+ * Returns: Max bpc on success or 0 if max bpc not defined
+ */
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct edid *edid)
+{
+ if (!drm_dp_is_branch(dpcd))
+ return 0;
+
+ if (dpcd[DP_DPCD_REV] < 0x11) {
+ switch (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) {
+ case DP_DWN_STRM_PORT_TYPE_DP:
+ return 0;
+ default:
+ return 8;
+ }
+ }
+
+ switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+ case DP_DS_PORT_TYPE_DP:
+ return 0;
case DP_DS_PORT_TYPE_DP_DUALMODE:
- bpc = port_cap[2] & DP_DS_MAX_BPC_MASK;
+ if (is_edid_digital_input_dp(edid))
+ return 0;
+ fallthrough;
+ case DP_DS_PORT_TYPE_HDMI:
+ case DP_DS_PORT_TYPE_DVI:
+ case DP_DS_PORT_TYPE_VGA:
+ if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+ return 8;
- switch (bpc) {
+ switch (port_cap[2] & DP_DS_MAX_BPC_MASK) {
case DP_DS_8BPC:
return 8;
case DP_DS_10BPC:
@@ -491,15 +802,132 @@ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
return 12;
case DP_DS_16BPC:
return 16;
+ default:
+ return 8;
}
- fallthrough;
+ break;
default:
- return 0;
+ return 8;
}
}
EXPORT_SYMBOL(drm_dp_downstream_max_bpc);
/**
+ * drm_dp_downstream_420_passthrough() - determine downstream facing port
+ * YCbCr 4:2:0 pass-through capability
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: downstream facing port capabilities
+ *
+ * Returns: whether the downstream facing port can pass through YCbCr 4:2:0
+ */
+bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4])
+{
+ if (!drm_dp_is_branch(dpcd))
+ return false;
+
+ if (dpcd[DP_DPCD_REV] < 0x13)
+ return false;
+
+ switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+ case DP_DS_PORT_TYPE_DP:
+ return true;
+ case DP_DS_PORT_TYPE_HDMI:
+ if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+ return false;
+
+ return port_cap[3] & DP_DS_HDMI_YCBCR420_PASS_THROUGH;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_420_passthrough);
+
+/**
+ * drm_dp_downstream_444_to_420_conversion() - determine downstream facing port
+ * YCbCr 4:4:4->4:2:0 conversion capability
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: downstream facing port capabilities
+ *
+ * Returns: whether the downstream facing port can convert YCbCr 4:4:4 to 4:2:0
+ */
+bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4])
+{
+ if (!drm_dp_is_branch(dpcd))
+ return false;
+
+ if (dpcd[DP_DPCD_REV] < 0x13)
+ return false;
+
+ switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+ case DP_DS_PORT_TYPE_HDMI:
+ if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0)
+ return false;
+
+ return port_cap[3] & DP_DS_HDMI_YCBCR444_TO_420_CONV;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_444_to_420_conversion);
+
+/**
+ * drm_dp_downstream_mode() - return a mode for downstream facing port
+ * @dev: DRM device
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Provides a suitable mode for downstream facing ports without EDID.
+ *
+ * Returns: A new drm_display_mode on success or NULL on failure
+ */
+struct drm_display_mode *
+drm_dp_downstream_mode(struct drm_device *dev,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4])
+
+{
+ u8 vic;
+
+ if (!drm_dp_is_branch(dpcd))
+ return NULL;
+
+ if (dpcd[DP_DPCD_REV] < 0x11)
+ return NULL;
+
+ switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) {
+ case DP_DS_PORT_TYPE_NON_EDID:
+ switch (port_cap[0] & DP_DS_NON_EDID_MASK) {
+ case DP_DS_NON_EDID_720x480i_60:
+ vic = 6;
+ break;
+ case DP_DS_NON_EDID_720x480i_50:
+ vic = 21;
+ break;
+ case DP_DS_NON_EDID_1920x1080i_60:
+ vic = 5;
+ break;
+ case DP_DS_NON_EDID_1920x1080i_50:
+ vic = 20;
+ break;
+ case DP_DS_NON_EDID_1280x720_60:
+ vic = 4;
+ break;
+ case DP_DS_NON_EDID_1280x720_50:
+ vic = 19;
+ break;
+ default:
+ return NULL;
+ }
+ return drm_display_mode_from_cea_vic(dev, vic);
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(drm_dp_downstream_mode);
+
+/**
* drm_dp_downstream_id() - identify branch device
* @aux: DisplayPort AUX channel
* @id: DisplayPort branch device id
@@ -517,12 +945,15 @@ EXPORT_SYMBOL(drm_dp_downstream_id);
* @m: pointer for debugfs file
* @dpcd: DisplayPort configuration data
* @port_cap: port capabilities
+ * @edid: EDID
* @aux: DisplayPort AUX channel
*
*/
void drm_dp_downstream_debug(struct seq_file *m,
const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4], struct drm_dp_aux *aux)
+ const u8 port_cap[4],
+ const struct edid *edid,
+ struct drm_dp_aux *aux)
{
bool detailed_cap_info = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DETAILED_CAP_INFO_AVAILABLE;
@@ -580,16 +1011,19 @@ void drm_dp_downstream_debug(struct seq_file *m,
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
if (detailed_cap_info) {
- clk = drm_dp_downstream_max_clock(dpcd, port_cap);
+ clk = drm_dp_downstream_max_dotclock(dpcd, port_cap);
+ if (clk > 0)
+ seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk);
- if (clk > 0) {
- if (type == DP_DS_PORT_TYPE_VGA)
- seq_printf(m, "\t\tMax dot clock: %d kHz\n", clk);
- else
- seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk);
- }
+ clk = drm_dp_downstream_max_tmds_clock(dpcd, port_cap, edid);
+ if (clk > 0)
+ seq_printf(m, "\t\tMax TMDS clock: %d kHz\n", clk);
+
+ clk = drm_dp_downstream_min_tmds_clock(dpcd, port_cap, edid);
+ if (clk > 0)
+ seq_printf(m, "\t\tMin TMDS clock: %d kHz\n", clk);
- bpc = drm_dp_downstream_max_bpc(dpcd, port_cap);
+ bpc = drm_dp_downstream_max_bpc(dpcd, port_cap, edid);
if (bpc > 0)
seq_printf(m, "\t\tMax bpc: %d\n", bpc);
@@ -597,6 +1031,130 @@ void drm_dp_downstream_debug(struct seq_file *m,
}
EXPORT_SYMBOL(drm_dp_downstream_debug);
+/**
+ * drm_dp_subconnector_type() - get DP branch device type
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ */
+enum drm_mode_subconnector
+drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4])
+{
+ int type;
+ if (!drm_dp_is_branch(dpcd))
+ return DRM_MODE_SUBCONNECTOR_Native;
+ /* DP 1.0 approach */
+ if (dpcd[DP_DPCD_REV] == DP_DPCD_REV_10) {
+ type = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_TYPE_MASK;
+
+ switch (type) {
+ case DP_DWN_STRM_PORT_TYPE_TMDS:
+ /* Can be HDMI or DVI-D, DVI-D is a safer option */
+ return DRM_MODE_SUBCONNECTOR_DVID;
+ case DP_DWN_STRM_PORT_TYPE_ANALOG:
+ /* Can be VGA or DVI-A, VGA is more popular */
+ return DRM_MODE_SUBCONNECTOR_VGA;
+ case DP_DWN_STRM_PORT_TYPE_DP:
+ return DRM_MODE_SUBCONNECTOR_DisplayPort;
+ case DP_DWN_STRM_PORT_TYPE_OTHER:
+ default:
+ return DRM_MODE_SUBCONNECTOR_Unknown;
+ }
+ }
+ type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
+
+ switch (type) {
+ case DP_DS_PORT_TYPE_DP:
+ case DP_DS_PORT_TYPE_DP_DUALMODE:
+ return DRM_MODE_SUBCONNECTOR_DisplayPort;
+ case DP_DS_PORT_TYPE_VGA:
+ return DRM_MODE_SUBCONNECTOR_VGA;
+ case DP_DS_PORT_TYPE_DVI:
+ return DRM_MODE_SUBCONNECTOR_DVID;
+ case DP_DS_PORT_TYPE_HDMI:
+ return DRM_MODE_SUBCONNECTOR_HDMIA;
+ case DP_DS_PORT_TYPE_WIRELESS:
+ return DRM_MODE_SUBCONNECTOR_Wireless;
+ case DP_DS_PORT_TYPE_NON_EDID:
+ default:
+ return DRM_MODE_SUBCONNECTOR_Unknown;
+ }
+}
+EXPORT_SYMBOL(drm_dp_subconnector_type);
+
+/**
+ * drm_mode_set_dp_subconnector_property - set subconnector for DP connector
+ * @connector: connector to set property on
+ * @status: connector status
+ * @dpcd: DisplayPort configuration data
+ * @port_cap: port capabilities
+ *
+ * Called by a driver on every detect event.
+ */
+void drm_dp_set_subconnector_property(struct drm_connector *connector,
+ enum drm_connector_status status,
+ const u8 *dpcd,
+ const u8 port_cap[4])
+{
+ enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+
+ if (status == connector_status_connected)
+ subconnector = drm_dp_subconnector_type(dpcd, port_cap);
+ drm_object_property_set_value(&connector->base,
+ connector->dev->mode_config.dp_subconnector_property,
+ subconnector);
+}
+EXPORT_SYMBOL(drm_dp_set_subconnector_property);
+
+/**
+ * drm_dp_read_sink_count_cap() - Check whether a given connector has a valid sink
+ * count
+ * @connector: The DRM connector to check
+ * @dpcd: A cached copy of the connector's DPCD RX capabilities
+ * @desc: A cached copy of the connector's DP descriptor
+ *
+ * See also: drm_dp_read_sink_count()
+ *
+ * Returns: %True if the (e)DP connector has a valid sink count that should
+ * be probed, %false otherwise.
+ */
+bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const struct drm_dp_desc *desc)
+{
+ /* Some eDP panels don't set a valid value for the sink count */
+ return connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ dpcd[DP_DPCD_REV] >= DP_DPCD_REV_11 &&
+ dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
+ !drm_dp_has_quirk(desc, 0, DP_DPCD_QUIRK_NO_SINK_COUNT);
+}
+EXPORT_SYMBOL(drm_dp_read_sink_count_cap);
+
+/**
+ * drm_dp_read_sink_count() - Retrieve the sink count for a given sink
+ * @aux: The DP AUX channel to use
+ *
+ * See also: drm_dp_read_sink_count_cap()
+ *
+ * Returns: The current sink count reported by @aux, or a negative error code
+ * otherwise.
+ */
+int drm_dp_read_sink_count(struct drm_dp_aux *aux)
+{
+ u8 count;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EIO;
+
+ return DP_GET_SINK_COUNT(count);
+}
+EXPORT_SYMBOL(drm_dp_read_sink_count);
+
/*
* I2C-over-AUX implementation
*/
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 67dd72ea200e..e87542533640 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -20,11 +20,13 @@
* OF THIS SOFTWARE.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/random.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/iopoll.h>
@@ -423,6 +425,22 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
idx += req->u.i2c_write.num_bytes;
break;
+ case DP_QUERY_STREAM_ENC_STATUS: {
+ const struct drm_dp_query_stream_enc_status *msg;
+
+ msg = &req->u.enc_status;
+ buf[idx] = msg->stream_id;
+ idx++;
+ memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
+ idx += sizeof(msg->client_id);
+ buf[idx] = 0;
+ buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
+ buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
+ buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
+ buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
+ idx++;
+ }
+ break;
}
raw->cur_len = idx;
}
@@ -551,6 +569,20 @@ drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
return -ENOMEM;
}
break;
+ case DP_QUERY_STREAM_ENC_STATUS:
+ req->u.enc_status.stream_id = buf[idx++];
+ for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
+ req->u.enc_status.client_id[i] = buf[idx++];
+
+ req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
+ buf[idx]);
+ req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
+ buf[idx]);
+ req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
+ buf[idx]);
+ req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
+ buf[idx]);
+ break;
}
return 0;
@@ -629,6 +661,16 @@ drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req
req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
req->u.i2c_write.bytes);
break;
+ case DP_QUERY_STREAM_ENC_STATUS:
+ P("stream_id=%u client_id=%*ph stream_event=%x "
+ "valid_event=%d stream_behavior=%x valid_behavior=%d",
+ req->u.enc_status.stream_id,
+ (int)ARRAY_SIZE(req->u.enc_status.client_id),
+ req->u.enc_status.client_id, req->u.enc_status.stream_event,
+ req->u.enc_status.valid_stream_event,
+ req->u.enc_status.stream_behavior,
+ req->u.enc_status.valid_stream_behavior);
+ break;
default:
P("???\n");
break;
@@ -936,6 +978,42 @@ static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_ms
return true;
}
+static bool
+drm_dp_sideband_parse_query_stream_enc_status(
+ struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ struct drm_dp_query_stream_enc_status_ack_reply *reply;
+
+ reply = &repmsg->u.enc_status;
+
+ reply->stream_id = raw->msg[3];
+
+ reply->reply_signed = raw->msg[2] & BIT(0);
+
+ /*
+ * NOTE: It's my impression from reading the spec that the below parsing
+ * is correct. However I noticed while testing with an HDCP 1.4 display
+ * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
+ * would expect both bits to be set. So keep the parsing following the
+ * spec, but beware reality might not match the spec (at least for some
+ * configurations).
+ */
+ reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
+ reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
+
+ reply->query_capable_device_present = raw->msg[2] & BIT(5);
+ reply->legacy_device_present = raw->msg[2] & BIT(6);
+ reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
+
+ reply->auth_completed = !!(raw->msg[1] & BIT(3));
+ reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
+ reply->repeater_present = !!(raw->msg[1] & BIT(5));
+ reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
+
+ return true;
+}
+
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *msg)
{
@@ -961,6 +1039,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
case DP_REMOTE_I2C_READ:
return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
+ case DP_REMOTE_I2C_WRITE:
+ return true; /* since there's nothing to parse */
case DP_ENUM_PATH_RESOURCES:
return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
case DP_ALLOCATE_PAYLOAD:
@@ -970,6 +1050,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
case DP_CLEAR_PAYLOAD_ID_TABLE:
return true; /* since there's nothing to parse */
+ case DP_QUERY_STREAM_ENC_STATUS:
+ return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
default:
DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
drm_dp_mst_req_type_str(msg->req_type));
@@ -1121,6 +1203,25 @@ static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
msg->path_msg = true;
}
+static int
+build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
+ u8 *q_id)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_QUERY_STREAM_ENC_STATUS;
+ req.u.enc_status.stream_id = stream_id;
+ memcpy(req.u.enc_status.client_id, q_id,
+ sizeof(req.u.enc_status.client_id));
+ req.u.enc_status.stream_event = 0;
+ req.u.enc_status.valid_stream_event = false;
+ req.u.enc_status.stream_behavior = 0;
+ req.u.enc_status.valid_stream_behavior = false;
+
+ drm_dp_encode_sideband_req(&req, msg);
+ return 0;
+}
+
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_vcpi *vcpi)
{
@@ -3153,6 +3254,57 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
}
EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
+int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_query_stream_enc_status_ack_reply *status)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ u8 nonce[7];
+ int len, ret;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
+ if (!port) {
+ ret = -EINVAL;
+ goto out_get_port;
+ }
+
+ get_random_bytes(nonce, sizeof(nonce));
+
+ /*
+ * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
+ * transaction at the MST Branch device directly connected to the
+ * Source"
+ */
+ txmsg->dst = mgr->mst_primary;
+
+ len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
+ if (ret < 0) {
+ goto out;
+ } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
+ drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = 0;
+ memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
+
+out:
+ drm_dp_mst_topology_put_port(port);
+out_get_port:
+ kfree(txmsg);
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
+
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
int id,
struct drm_dp_payload *payload)
@@ -3487,6 +3639,28 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
}
/**
+ * drm_dp_read_mst_cap() - check whether or not a sink supports MST
+ * @aux: The DP AUX channel to use
+ * @dpcd: A cached copy of the DPCD capabilities for this sink
+ *
+ * Returns: %True if the sink supports MST, %false otherwise
+ */
+bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ u8 mstm_cap;
+
+ if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
+ return false;
+
+ if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
+ return false;
+
+ return mstm_cap & DP_MST_CAP;
+}
+EXPORT_SYMBOL(drm_dp_read_mst_cap);
+
+/**
* drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
* @mgr: manager to set state for
* @mst_state: true to enable MST on this connector - false to disable.
@@ -5327,29 +5501,29 @@ static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
msgs[num - 1].len <= 0xff;
}
-/* I2C device */
-static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
- int num)
+static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
+{
+ int i;
+
+ for (i = 0; i < num - 1; i++) {
+ if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
+ msgs[i].len > 0xff)
+ return false;
+ }
+
+ return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
+}
+
+static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port,
+ struct i2c_msg *msgs, int num)
{
- struct drm_dp_aux *aux = adapter->algo_data;
- struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
- struct drm_dp_mst_branch *mstb;
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
unsigned int i;
struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_sideband_msg_tx *txmsg = NULL;
int ret;
- mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
- if (!mstb)
- return -EREMOTEIO;
-
- if (!remote_i2c_read_ok(msgs, num)) {
- DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
- ret = -EIO;
- goto out;
- }
-
memset(&msg, 0, sizeof(msg));
msg.req_type = DP_REMOTE_I2C_READ;
msg.u.i2c_read.num_transactions = num - 1;
@@ -5390,6 +5564,78 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
}
out:
kfree(txmsg);
+ return ret;
+}
+
+static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port,
+ struct i2c_msg *msgs, int num)
+{
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ unsigned int i;
+ struct drm_dp_sideband_msg_req_body msg;
+ struct drm_dp_sideband_msg_tx *txmsg = NULL;
+ int ret;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < num; i++) {
+ memset(&msg, 0, sizeof(msg));
+ msg.req_type = DP_REMOTE_I2C_WRITE;
+ msg.u.i2c_write.port_number = port->port_num;
+ msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
+ msg.u.i2c_write.num_bytes = msgs[i].len;
+ msg.u.i2c_write.bytes = msgs[i].buf;
+
+ memset(txmsg, 0, sizeof(*txmsg));
+ txmsg->dst = mstb;
+
+ drm_dp_encode_sideband_req(&msg, txmsg);
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
+ ret = -EREMOTEIO;
+ goto out;
+ }
+ } else {
+ goto out;
+ }
+ }
+ ret = num;
+out:
+ kfree(txmsg);
+ return ret;
+}
+
+/* I2C device */
+static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct drm_dp_aux *aux = adapter->algo_data;
+ struct drm_dp_mst_port *port =
+ container_of(aux, struct drm_dp_mst_port, aux);
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ int ret;
+
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
+ if (!mstb)
+ return -EREMOTEIO;
+
+ if (remote_i2c_read_ok(msgs, num)) {
+ ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
+ } else if (remote_i2c_write_ok(msgs, num)) {
+ ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
+ } else {
+ DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
+ ret = -EIO;
+ }
+
drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 13068fdf4331..cd162d406078 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -240,13 +240,13 @@ void drm_minor_release(struct drm_minor *minor)
* DOC: driver instance overview
*
* A device instance for a drm driver is represented by &struct drm_device. This
- * is initialized with drm_dev_init(), usually from bus-specific ->probe()
- * callbacks implemented by the driver. The driver then needs to initialize all
- * the various subsystems for the drm device like memory management, vblank
- * handling, modesetting support and intial output configuration plus obviously
- * initialize all the corresponding hardware bits. Finally when everything is up
- * and running and ready for userspace the device instance can be published
- * using drm_dev_register().
+ * is allocated and initialized with devm_drm_dev_alloc(), usually from
+ * bus-specific ->probe() callbacks implemented by the driver. The driver then
+ * needs to initialize all the various subsystems for the drm device like memory
+ * management, vblank handling, modesetting support and initial output
+ * configuration plus obviously initialize all the corresponding hardware bits.
+ * Finally when everything is up and running and ready for userspace the device
+ * instance can be published using drm_dev_register().
*
* There is also deprecated support for initalizing device instances using
* bus-specific helpers and the &drm_driver.load callback. But due to
@@ -274,7 +274,7 @@ void drm_minor_release(struct drm_minor *minor)
*
* The following example shows a typical structure of a DRM display driver.
* The example focus on the probe() function and the other functions that is
- * almost always present and serves as a demonstration of devm_drm_dev_init().
+ * almost always present and serves as a demonstration of devm_drm_dev_alloc().
*
* .. code-block:: c
*
@@ -294,22 +294,12 @@ void drm_minor_release(struct drm_minor *minor)
* struct drm_device *drm;
* int ret;
*
- * // devm_kzalloc() can't be used here because the drm_device '
- * // lifetime can exceed the device lifetime if driver unbind
- * // happens when userspace still has open file descriptors.
- * priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- * if (!priv)
- * return -ENOMEM;
- *
+ * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
+ * struct driver_device, drm);
+ * if (IS_ERR(priv))
+ * return PTR_ERR(priv);
* drm = &priv->drm;
*
- * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
- * if (ret) {
- * kfree(priv);
- * return ret;
- * }
- * drmm_add_final_kfree(drm, priv);
- *
* ret = drmm_mode_config_init(drm);
* if (ret)
* return ret;
@@ -550,9 +540,9 @@ static void drm_fs_inode_free(struct inode *inode)
* following guidelines apply:
*
* - The entire device initialization procedure should be run from the
- * &component_master_ops.master_bind callback, starting with drm_dev_init(),
- * then binding all components with component_bind_all() and finishing with
- * drm_dev_register().
+ * &component_master_ops.master_bind callback, starting with
+ * devm_drm_dev_alloc(), then binding all components with
+ * component_bind_all() and finishing with drm_dev_register().
*
* - The opaque pointer passed to all components through component_bind_all()
* should point at &struct drm_device of the device instance, not some driver
@@ -583,43 +573,9 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
drm_legacy_destroy_members(dev);
}
-/**
- * drm_dev_init - Initialise new DRM device
- * @dev: DRM device
- * @driver: DRM driver
- * @parent: Parent device object
- *
- * Initialize a new DRM device. No device registration is done.
- * Call drm_dev_register() to advertice the device to user space and register it
- * with other core subsystems. This should be done last in the device
- * initialization sequence to make sure userspace can't access an inconsistent
- * state.
- *
- * The initial ref-count of the object is 1. Use drm_dev_get() and
- * drm_dev_put() to take and drop further ref-counts.
- *
- * It is recommended that drivers embed &struct drm_device into their own device
- * structure.
- *
- * Drivers that do not want to allocate their own device struct
- * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
- * that do embed &struct drm_device it must be placed first in the overall
- * structure, and the overall structure must be allocated using kmalloc(): The
- * drm core's release function unconditionally calls kfree() on the @dev pointer
- * when the final reference is released. To override this behaviour, and so
- * allow embedding of the drm_device inside the driver's device struct at an
- * arbitrary offset, you must supply a &drm_driver.release callback and control
- * the finalization explicitly.
- *
- * Note that drivers must call drmm_add_final_kfree() after this function has
- * completed successfully.
- *
- * RETURNS:
- * 0 on success, or error code on failure.
- */
-int drm_dev_init(struct drm_device *dev,
- struct drm_driver *driver,
- struct device *parent)
+static int drm_dev_init(struct drm_device *dev,
+ struct drm_driver *driver,
+ struct device *parent)
{
int ret;
@@ -699,31 +655,15 @@ err:
return ret;
}
-EXPORT_SYMBOL(drm_dev_init);
static void devm_drm_dev_init_release(void *data)
{
drm_dev_put(data);
}
-/**
- * devm_drm_dev_init - Resource managed drm_dev_init()
- * @parent: Parent device object
- * @dev: DRM device
- * @driver: DRM driver
- *
- * Managed drm_dev_init(). The DRM device initialized with this function is
- * automatically put on driver detach using drm_dev_put().
- *
- * Note that drivers must call drmm_add_final_kfree() after this function has
- * completed successfully.
- *
- * RETURNS:
- * 0 on success, or error code on failure.
- */
-int devm_drm_dev_init(struct device *parent,
- struct drm_device *dev,
- struct drm_driver *driver)
+static int devm_drm_dev_init(struct device *parent,
+ struct drm_device *dev,
+ struct drm_driver *driver)
{
int ret;
@@ -737,7 +677,6 @@ int devm_drm_dev_init(struct device *parent,
return ret;
}
-EXPORT_SYMBOL(devm_drm_dev_init);
void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
size_t size, size_t offset)
@@ -767,19 +706,9 @@ EXPORT_SYMBOL(__devm_drm_dev_alloc);
* @driver: DRM driver to allocate device for
* @parent: Parent device object
*
- * Allocate and initialize a new DRM device. No device registration is done.
- * Call drm_dev_register() to advertice the device to user space and register it
- * with other core subsystems. This should be done last in the device
- * initialization sequence to make sure userspace can't access an inconsistent
- * state.
- *
- * The initial ref-count of the object is 1. Use drm_dev_get() and
- * drm_dev_put() to take and drop further ref-counts.
- *
- * Note that for purely virtual devices @parent can be NULL.
- *
- * Drivers that wish to subclass or embed &struct drm_device into their
- * own struct should look at using drm_dev_init() instead.
+ * This is the deprecated version of devm_drm_dev_alloc(), which does not support
+ * subclassing through embedding the struct &drm_device in a driver private
+ * structure, and which does not support automatic cleanup through devres.
*
* RETURNS:
* Pointer to new DRM device, or ERR_PTR on failure.
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 6840f0530a38..631125b46e04 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3738,6 +3738,34 @@ drm_add_cmdb_modes(struct drm_connector *connector, u8 svd)
bitmap_set(hdmi->y420_cmdb_modes, vic, 1);
}
+/**
+ * drm_display_mode_from_cea_vic() - return a mode for CEA VIC
+ * @dev: DRM device
+ * @video_code: CEA VIC of the mode
+ *
+ * Creates a new mode matching the specified CEA VIC.
+ *
+ * Returns: A new drm_display_mode on success or NULL on failure
+ */
+struct drm_display_mode *
+drm_display_mode_from_cea_vic(struct drm_device *dev,
+ u8 video_code)
+{
+ const struct drm_display_mode *cea_mode;
+ struct drm_display_mode *newmode;
+
+ cea_mode = cea_mode_for_vic(video_code);
+ if (!cea_mode)
+ return NULL;
+
+ newmode = drm_mode_duplicate(dev, cea_mode);
+ if (!newmode)
+ return NULL;
+
+ return newmode;
+}
+EXPORT_SYMBOL(drm_display_mode_from_cea_vic);
+
static int
do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 8697554ccd41..1543d9d10970 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -325,7 +325,7 @@ static void drm_fb_helper_sysrq(int dummy1)
static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.handler = drm_fb_helper_sysrq,
- .help_msg = "force-fb(V)",
+ .help_msg = "force-fb(v)",
.action_msg = "Restore framebuffer console",
};
#else
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index df656366a530..2f5b0c2bb0fe 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -176,8 +176,7 @@ static int framebuffer_check(struct drm_device *dev,
int i;
/* check if the format is supported at all */
- info = __drm_format_info(r->pixel_format);
- if (!info) {
+ if (!__drm_format_info(r->pixel_format)) {
struct drm_format_name_buf format_name;
DRM_DEBUG_KMS("bad framebuffer format %s\n",
@@ -186,9 +185,6 @@ static int framebuffer_check(struct drm_device *dev,
return -EINVAL;
}
- /* now let the driver pick its own format info */
- info = drm_get_format_info(dev, r);
-
if (r->width == 0) {
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
return -EINVAL;
@@ -199,6 +195,9 @@ static int framebuffer_check(struct drm_device *dev,
return -EINVAL;
}
+ /* now let the driver pick its own format info */
+ info = drm_get_format_info(dev, r);
+
for (i = 0; i < info->num_planes; i++) {
unsigned int width = fb_plane_width(r->width, info, i);
unsigned int height = fb_plane_height(r->height, info, i);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 19d73868490e..69c2c079d803 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1085,6 +1085,8 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
*/
drm_gem_object_get(obj);
+ vma->vm_private_data = obj;
+
if (obj->funcs && obj->funcs->mmap) {
ret = obj->funcs->mmap(obj, vma);
if (ret) {
@@ -1107,8 +1109,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
}
- vma->vm_private_data = obj;
-
return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 822edeadbab3..59b9ca207b42 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -471,26 +471,9 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
{
struct drm_gem_cma_object *cma_obj;
- if (sgt->nents != 1) {
- /* check if the entries in the sg_table are contiguous */
- dma_addr_t next_addr = sg_dma_address(sgt->sgl);
- struct scatterlist *s;
- unsigned int i;
-
- for_each_sg(sgt->sgl, s, sgt->nents, i) {
- /*
- * sg_dma_address(s) is only valid for entries
- * that have sg_dma_len(s) != 0
- */
- if (!sg_dma_len(s))
- continue;
-
- if (sg_dma_address(s) != next_addr)
- return ERR_PTR(-EINVAL);
-
- next_addr = sg_dma_address(s) + sg_dma_len(s);
- }
- }
+ /* check if the entries in the sg_table are contiguous */
+ if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
+ return ERR_PTR(-EINVAL);
/* Create a CMA GEM buffer. */
cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 4b7cfbac4daa..e00616d94f26 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -126,8 +126,8 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, shmem->sgt);
} else {
if (shmem->sgt) {
- dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
- shmem->sgt->nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
+ DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
}
@@ -424,8 +424,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
- dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
- shmem->sgt->nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
shmem->sgt = NULL;
@@ -594,8 +593,13 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
/* Remove the fake offset */
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- if (obj->import_attach)
+ if (obj->import_attach) {
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ drm_gem_object_put(obj);
+ vma->vm_private_data = NULL;
+
return dma_buf_mmap(obj->dma_buf, vma, 0);
+ }
shmem = to_drm_gem_shmem_obj(obj);
@@ -656,7 +660,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
WARN_ON(shmem->base.import_attach);
- return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
+ return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
@@ -697,12 +701,17 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
goto err_put_pages;
}
/* Map the pages for use by the h/w. */
- dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+ ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ if (ret)
+ goto err_free_sgt;
shmem->sgt = sgt;
return sgt;
+err_free_sgt:
+ sg_free_table(sgt);
+ kfree(sgt);
err_put_pages:
drm_gem_shmem_put_pages(shmem);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index 892b2288a104..0e4fb9ba43ad 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -43,12 +43,9 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
drm_printf(p, "\n");
- if (bo->mem.bus.is_iomem) {
- drm_printf_indent(p, indent, "bus.base=%lx\n",
- (unsigned long)bo->mem.bus.base);
+ if (bo->mem.bus.is_iomem)
drm_printf_indent(p, indent, "bus.offset=%lx\n",
(unsigned long)bo->mem.bus.offset);
- }
}
EXPORT_SYMBOL(drm_gem_ttm_print_info);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 3296ed3df358..50cad0e4a92e 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -97,8 +97,8 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
* hardware's draing engine.
*
* To access a buffer object's memory from the DRM driver, call
- * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
- * space and returns the memory address. Use drm_gem_vram_kunmap() to
+ * drm_gem_vram_vmap(). It maps the buffer into kernel address
+ * space and returns the memory address. Use drm_gem_vram_vunmap() to
* release the mapping.
*/
@@ -135,28 +135,28 @@ static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
unsigned long pl_flag)
{
+ u32 invariant_flags = 0;
unsigned int i;
unsigned int c = 0;
- u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN;
+
+ if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN)
+ pl_flag = TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
gbo->placement.busy_placement = gbo->placements;
- if (pl_flag & TTM_PL_FLAG_VRAM)
+ if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
+ gbo->placements[c].mem_type = TTM_PL_VRAM;
gbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM |
- invariant_flags;
-
- if (pl_flag & TTM_PL_FLAG_SYSTEM)
- gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM |
invariant_flags;
+ }
- if (!c)
+ if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) {
+ gbo->placements[c].mem_type = TTM_PL_SYSTEM;
gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM |
invariant_flags;
+ }
gbo->placement.num_placement = c;
gbo->placement.num_busy_placement = c;
@@ -167,6 +167,10 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
}
}
+/*
+ * Note that on error, drm_gem_vram_init will free the buffer object.
+ */
+
static int drm_gem_vram_init(struct drm_device *dev,
struct drm_gem_vram_object *gbo,
size_t size, unsigned long pg_align)
@@ -176,32 +180,37 @@ static int drm_gem_vram_init(struct drm_device *dev,
int ret;
size_t acc_size;
- if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
+ if (WARN_ONCE(!vmm, "VRAM MM not initialized")) {
+ kfree(gbo);
return -EINVAL;
+ }
bdev = &vmm->bdev;
gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
ret = drm_gem_object_init(dev, &gbo->bo.base, size);
- if (ret)
+ if (ret) {
+ kfree(gbo);
return ret;
+ }
acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
gbo->bo.bdev = bdev;
- drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+ drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
+ DRM_GEM_VRAM_PL_FLAG_SYSTEM);
ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
&gbo->placement, pg_align, false, acc_size,
NULL, NULL, ttm_buffer_object_destroy);
if (ret)
- goto err_drm_gem_object_release;
+ /*
+ * A failing ttm_bo_init will call ttm_buffer_object_destroy
+ * to release gbo->bo.base and kfree gbo.
+ */
+ return ret;
return 0;
-
-err_drm_gem_object_release:
- drm_gem_object_release(&gbo->bo.base);
- return ret;
}
/**
@@ -235,13 +244,9 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
ret = drm_gem_vram_init(dev, gbo, size, pg_align);
if (ret < 0)
- goto err_kfree;
+ return ERR_PTR(ret);
return gbo;
-
-err_kfree:
- kfree(gbo);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL(drm_gem_vram_create);
@@ -436,39 +441,6 @@ out:
return kmap->virtual;
}
-/**
- * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
- * @gbo: the GEM VRAM object
- * @map: establish a mapping if necessary
- * @is_iomem: returns true if the mapped memory is I/O memory, or false \
- otherwise; can be NULL
- *
- * This function maps the buffer object into the kernel's address space
- * or returns the current mapping. If the parameter map is false, the
- * function only queries the current mapping, but does not establish a
- * new one.
- *
- * Returns:
- * The buffers virtual address if mapped, or
- * NULL if not mapped, or
- * an ERR_PTR()-encoded error code otherwise.
- */
-void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
- bool *is_iomem)
-{
- int ret;
- void *virtual;
-
- ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
- if (ret)
- return ERR_PTR(ret);
- virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem);
- ttm_bo_unreserve(&gbo->bo);
-
- return virtual;
-}
-EXPORT_SYMBOL(drm_gem_vram_kmap);
-
static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
{
if (WARN_ON_ONCE(!gbo->kmap_use_count))
@@ -485,22 +457,6 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
}
/**
- * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
- * @gbo: the GEM VRAM object
- */
-void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
-{
- int ret;
-
- ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
- if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
- return;
- drm_gem_vram_kunmap_locked(gbo);
- ttm_bo_unreserve(&gbo->bo);
-}
-EXPORT_SYMBOL(drm_gem_vram_kunmap);
-
-/**
* drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
* space
* @gbo: The GEM VRAM object to map
@@ -511,9 +467,6 @@ EXPORT_SYMBOL(drm_gem_vram_kunmap);
* permanently. Call drm_gem_vram_vunmap() with the returned address to
* unmap and unpin the GEM VRAM object.
*
- * If you have special requirements for the pinning or mapping operations,
- * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly.
- *
* Returns:
* The buffer's virtual address on success, or
* an ERR_PTR()-encoded error code otherwise.
@@ -647,13 +600,13 @@ static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
struct ttm_placement *pl)
{
- drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
+ drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
*pl = gbo->placement;
}
static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
bool evict,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
@@ -967,16 +920,13 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
* TTM TT
*/
-static void backend_func_destroy(struct ttm_tt *tt)
+static void bo_driver_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
{
+ ttm_tt_destroy_common(bdev, tt);
ttm_tt_fini(tt);
kfree(tt);
}
-static struct ttm_backend_func backend_func = {
- .destroy = backend_func_destroy
-};
-
/*
* TTM BO device
*/
@@ -991,8 +941,6 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
if (!tt)
return NULL;
- tt->func = &backend_func;
-
ret = ttm_tt_init(tt, bo, page_flags);
if (ret < 0)
goto err_ttm_tt_init;
@@ -1004,28 +952,6 @@ err_ttm_tt_init:
return NULL;
}
-static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = 0;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
@@ -1042,7 +968,7 @@ static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
static void bo_driver_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct drm_gem_vram_object *gbo;
@@ -1056,22 +982,15 @@ static void bo_driver_move_notify(struct ttm_buffer_object *bo,
}
static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
- mem->bus.addr = NULL;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
-
switch (mem->mem_type) {
case TTM_PL_SYSTEM: /* nothing to do */
- mem->bus.offset = 0;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
break;
case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = vmm->vram_base;
+ mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base;
mem->bus.is_iomem = true;
break;
default:
@@ -1083,9 +1002,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
static struct ttm_bo_driver bo_driver = {
.ttm_tt_create = bo_driver_ttm_tt_create,
- .ttm_tt_populate = ttm_pool_populate,
- .ttm_tt_unpopulate = ttm_pool_unpopulate,
- .init_mem_type = bo_driver_init_mem_type,
+ .ttm_tt_destroy = bo_driver_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bo_driver_evict_flags,
.move_notify = bo_driver_move_notify,
@@ -1100,12 +1017,10 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
- struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
+ struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM);
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&ttm_bo_glob.lru_lock);
- drm_mm_print(mm, &p);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ ttm_resource_manager_debug(man, &p);
return 0;
}
@@ -1142,7 +1057,8 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
if (ret)
return ret;
- ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
+ ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM,
+ false, vram_size >> PAGE_SHIFT);
if (ret)
return ret;
@@ -1151,6 +1067,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
{
+ ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM);
ttm_bo_device_release(&vmm->bdev);
}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 8e01caaf95cc..b65865c630b0 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -95,6 +95,7 @@ void drm_minor_release(struct drm_minor *minor);
/* drm_managed.c */
void drm_managed_release(struct drm_device *dev);
+void drmm_add_final_kfree(struct drm_device *dev, void *container);
/* drm_vblank.c */
static inline bool drm_vblank_passed(u64 seq, u64 ref)
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index 1e1356560c2e..37d7db6223be 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -27,7 +27,7 @@
* be done directly with drmm_kmalloc() and the related functions. Everything
* will be released on the final drm_dev_put() in reverse order of how the
* release actions have been added and memory has been allocated since driver
- * loading started with drm_dev_init().
+ * loading started with devm_drm_dev_alloc().
*
* Note that release actions and managed memory can also be added and removed
* during the lifetime of the driver, all the functions are fully concurrent
@@ -125,18 +125,6 @@ static void add_dr(struct drm_device *dev, struct drmres *dr)
dr, dr->node.name, (unsigned long) dr->node.size);
}
-/**
- * drmm_add_final_kfree - add release action for the final kfree()
- * @dev: DRM device
- * @container: pointer to the kmalloc allocation containing @dev
- *
- * Since the allocation containing the struct &drm_device must be allocated
- * before it can be initialized with drm_dev_init() there's no way to allocate
- * that memory with drmm_kmalloc(). To side-step this chicken-egg problem the
- * pointer for this final kfree() must be specified by calling this function. It
- * will be released in the final drm_dev_put() for @dev, after all other release
- * actions installed through drmm_add_action() have been processed.
- */
void drmm_add_final_kfree(struct drm_device *dev, void *container)
{
WARN_ON(dev->managed.final_kfree);
@@ -144,7 +132,6 @@ void drmm_add_final_kfree(struct drm_device *dev, void *container)
WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
dev->managed.final_kfree = container;
}
-EXPORT_SYMBOL(drmm_add_final_kfree);
int __drmm_add_action(struct drm_device *dev,
drmres_release_t action,
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 8c7bac85a793..f634371c717a 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -70,16 +70,12 @@ EXPORT_SYMBOL(drm_panel_init);
*
* Add a panel to the global registry so that it can be looked up by display
* drivers.
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_add(struct drm_panel *panel)
+void drm_panel_add(struct drm_panel *panel)
{
mutex_lock(&panel_lock);
list_add_tail(&panel->list, &panel_list);
mutex_unlock(&panel_lock);
-
- return 0;
}
EXPORT_SYMBOL(drm_panel_add);
@@ -98,42 +94,6 @@ void drm_panel_remove(struct drm_panel *panel)
EXPORT_SYMBOL(drm_panel_remove);
/**
- * drm_panel_attach - attach a panel to a connector
- * @panel: DRM panel
- * @connector: DRM connector
- *
- * After obtaining a pointer to a DRM panel a display driver calls this
- * function to attach a panel to a connector.
- *
- * An error is returned if the panel is already attached to another connector.
- *
- * When unloading, the driver should detach from the panel by calling
- * drm_panel_detach().
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
-{
- return 0;
-}
-EXPORT_SYMBOL(drm_panel_attach);
-
-/**
- * drm_panel_detach - detach a panel from a connector
- * @panel: DRM panel
- *
- * Detaches a panel from the connector it is attached to. If a panel is not
- * attached to any connector this is effectively a no-op.
- *
- * This function should not be called by the panel device itself. It
- * is only for the drm device that called drm_panel_attach().
- */
-void drm_panel_detach(struct drm_panel *panel)
-{
-}
-EXPORT_SYMBOL(drm_panel_detach);
-
-/**
* drm_panel_prepare - power on a panel
* @panel: DRM panel
*
@@ -300,6 +260,49 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np)
return ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL(of_drm_find_panel);
+
+/**
+ * of_drm_get_panel_orientation - look up the orientation of the panel through
+ * the "rotation" binding from a device tree node
+ * @np: device tree node of the panel
+ * @orientation: orientation enum to be filled in
+ *
+ * Looks up the rotation of a panel in the device tree. The orientation of the
+ * panel is expressed as a property name "rotation" in the device tree. The
+ * rotation in the device tree is counter clockwise.
+ *
+ * Return: 0 when a valid rotation value (0, 90, 180, or 270) is read or the
+ * rotation property doesn't exist. Return a negative error code on failure.
+ */
+int of_drm_get_panel_orientation(const struct device_node *np,
+ enum drm_panel_orientation *orientation)
+{
+ int rotation, ret;
+
+ ret = of_property_read_u32(np, "rotation", &rotation);
+ if (ret == -EINVAL) {
+ /* Don't return an error if there's no rotation property. */
+ *orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ return 0;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ if (rotation == 0)
+ *orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+ else if (rotation == 90)
+ *orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+ else if (rotation == 180)
+ *orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ else if (rotation == 270)
+ *orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(of_drm_get_panel_orientation);
#endif
#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1693aa7c14b5..9f955f2010c2 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -617,6 +617,7 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct sg_table *sgt;
+ int ret;
if (WARN_ON(dir == DMA_NONE))
return ERR_PTR(-EINVAL);
@@ -626,11 +627,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
else
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC)) {
+ ret = dma_map_sgtable(attach->dev, sgt, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (ret) {
sg_free_table(sgt);
kfree(sgt);
- sgt = ERR_PTR(-ENOMEM);
+ sgt = ERR_PTR(ret);
}
return sgt;
@@ -652,8 +654,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
if (!sgt)
return;
- dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
kfree(sgt);
}
@@ -793,6 +794,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
/**
* drm_prime_pages_to_sg - converts a page array into an sg list
+ * @dev: DRM device
* @pages: pointer to the array of page pointers to convert
* @nr_pages: length of the page vector
*
@@ -802,30 +804,65 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
*
* This is useful for implementing &drm_gem_object_funcs.get_sg_table.
*/
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
+struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
+ struct page **pages, unsigned int nr_pages)
{
- struct sg_table *sg = NULL;
- int ret;
+ struct sg_table *sg;
+ struct scatterlist *sge;
+ size_t max_segment = 0;
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!sg) {
- ret = -ENOMEM;
- goto out;
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+
+ if (dev)
+ max_segment = dma_max_mapping_size(dev->dev);
+ if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
+ max_segment = SCATTERLIST_MAX_SEGMENT;
+ sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT,
+ max_segment,
+ NULL, 0, GFP_KERNEL);
+ if (IS_ERR(sge)) {
+ kfree(sg);
+ sg = ERR_CAST(sge);
}
-
- ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT, GFP_KERNEL);
- if (ret)
- goto out;
-
return sg;
-out:
- kfree(sg);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL(drm_prime_pages_to_sg);
/**
+ * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
+ * @sgt: sg_table describing the buffer to check
+ *
+ * This helper calculates the contiguous size in the DMA address space
+ * of the the buffer described by the provided sg_table.
+ *
+ * This is useful for implementing
+ * &drm_gem_object_funcs.gem_prime_import_sg_table.
+ */
+unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
+{
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
+ struct scatterlist *sg;
+ unsigned long size = 0;
+ int i;
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ unsigned int len = sg_dma_len(sg);
+
+ if (!len)
+ break;
+ if (sg_dma_address(sg) != expected)
+ break;
+ expected += len;
+ size += len;
+ }
+ return size;
+}
+EXPORT_SYMBOL(drm_prime_get_contiguous_size);
+
+/**
* drm_gem_prime_export - helper library implementation of the export callback
* @obj: GEM object to export
* @flags: flags like DRM_CLOEXEC and DRM_RDWR
@@ -959,45 +996,26 @@ EXPORT_SYMBOL(drm_gem_prime_import);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_entries)
{
- unsigned count;
- struct scatterlist *sg;
- struct page *page;
- u32 page_len, page_index;
- dma_addr_t addr;
- u32 dma_len, dma_index;
-
- /*
- * Scatterlist elements contains both pages and DMA addresses, but
- * one shoud not assume 1:1 relation between them. The sg->length is
- * the size of the physical memory chunk described by the sg->page,
- * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
- * described by the sg_dma_address(sg).
- */
- page_index = 0;
- dma_index = 0;
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
- page_len = sg->length;
- page = sg_page(sg);
- dma_len = sg_dma_len(sg);
- addr = sg_dma_address(sg);
-
- while (pages && page_len > 0) {
- if (WARN_ON(page_index >= max_entries))
+ struct sg_dma_page_iter dma_iter;
+ struct sg_page_iter page_iter;
+ struct page **p = pages;
+ dma_addr_t *a = addrs;
+
+ if (pages) {
+ for_each_sgtable_page(sgt, &page_iter, 0) {
+ if (WARN_ON(p - pages >= max_entries))
return -1;
- pages[page_index] = page;
- page++;
- page_len -= PAGE_SIZE;
- page_index++;
+ *p++ = sg_page_iter_page(&page_iter);
}
- while (addrs && dma_len > 0) {
- if (WARN_ON(dma_index >= max_entries))
+ }
+ if (addrs) {
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+ if (WARN_ON(a - addrs >= max_entries))
return -1;
- addrs[dma_index] = addr;
- addr += PAGE_SIZE;
- dma_len -= PAGE_SIZE;
- dma_index++;
+ *a++ = sg_page_iter_dma_address(&dma_iter);
}
}
+
return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 3bf73971daf3..6e74e6745eca 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -297,7 +297,7 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
prev = drm_syncobj_fence_get(syncobj);
/* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
if (prev && prev->seqno >= point)
- DRM_ERROR("You are adding an unorder point to timeline!\n");
+ DRM_DEBUG("You are adding an unorder point to timeline!\n");
dma_fence_chain_init(chain, prev, fence, point);
rcu_assign_pointer(syncobj->fence, &chain->base);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index b18e1efbbae1..f135b79593dd 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -674,7 +674,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
*
* Note that atomic drivers must call drm_calc_timestamping_constants() before
* enabling a CRTC. The atomic helpers already take care of that in
- * drm_atomic_helper_update_legacy_modeset_state().
+ * drm_atomic_helper_calc_timestamping_constants().
*
* Returns:
*
@@ -819,7 +819,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal);
*
* Note that atomic drivers must call drm_calc_timestamping_constants() before
* enabling a CRTC. The atomic helpers already take care of that in
- * drm_atomic_helper_update_legacy_modeset_state().
+ * drm_atomic_helper_calc_timestamping_constants().
*
* Returns:
*
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index f06e19e7be04..d1533bdc1335 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -27,7 +27,7 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
* because display controller, GPU, etc. are not coherent.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
- dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+ dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
@@ -51,7 +51,7 @@ static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj
* discard those writes.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
- dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
/* called with etnaviv_obj->lock held */
@@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt;
- sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+ sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
+ etnaviv_obj->pages, npages);
if (IS_ERR(sgt)) {
dev_err(dev->dev, "failed to allocate sgt: %ld\n",
PTR_ERR(sgt));
@@ -404,9 +405,8 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
- dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
- etnaviv_obj->sgt->nents,
- etnaviv_op_to_dma_dir(op));
+ dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
+ etnaviv_op_to_dma_dir(op));
etnaviv_obj->last_cpu_prep_op = op;
}
@@ -421,8 +421,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
/* fini without a prep is almost certainly a userspace error */
WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
- dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
- etnaviv_obj->sgt->nents,
+ dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
etnaviv_obj->last_cpu_prep_op = 0;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 6d9e5c3c4dd5..4aa3426a9ba4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -19,7 +19,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
return ERR_PTR(-EINVAL);
- return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+ return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages);
}
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 3607d348c298..15d9fa3879e5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -73,13 +73,13 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len, int prot)
{ struct scatterlist *sg;
unsigned int da = iova;
- unsigned int i, j;
+ unsigned int i;
int ret;
if (!context || !sgt)
return -EINVAL;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ for_each_sgtable_dma_sg(sgt, sg, i) {
u32 pa = sg_dma_address(sg) - sg->offset;
size_t bytes = sg_dma_len(sg) + sg->offset;
@@ -95,14 +95,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
return 0;
fail:
- da = iova;
-
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg_dma_len(sg) + sg->offset;
-
- etnaviv_context_unmap(context, da, bytes);
- da += bytes;
- }
+ etnaviv_context_unmap(context, iova, da - iova);
return ret;
}
@@ -113,7 +106,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
unsigned int da = iova;
int i;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ for_each_sgtable_dma_sg(sgt, sg, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;
etnaviv_context_unmap(context, da, bytes);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index 58b89ec11b0e..0644936afee2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -5,7 +5,7 @@
// Author: Andrzej Hajda <a.hajda@samsung.com>
#include <linux/dma-iommu.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
@@ -31,23 +31,6 @@
#define EXYNOS_DEV_ADDR_START 0x20000000
#define EXYNOS_DEV_ADDR_SIZE 0x40000000
-static inline int configure_dma_max_seg_size(struct device *dev)
-{
- if (!dev->dma_parms)
- dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
-
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
- return 0;
-}
-
-static inline void clear_dma_max_seg_size(struct device *dev)
-{
- kfree(dev->dma_parms);
- dev->dma_parms = NULL;
-}
-
/*
* drm_iommu_attach_device- attach device to iommu mapping
*
@@ -69,10 +52,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
return -EINVAL;
}
- ret = configure_dma_max_seg_size(subdrv_dev);
- if (ret)
- return ret;
-
+ dma_set_max_seg_size(subdrv_dev, DMA_BIT_MASK(32));
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
/*
* Keep the original DMA mapping of the sub-device and
@@ -89,9 +69,6 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
ret = iommu_attach_device(priv->mapping, subdrv_dev);
}
- if (ret)
- clear_dma_max_seg_size(subdrv_dev);
-
return ret;
}
@@ -114,8 +91,6 @@ static void drm_iommu_detach_device(struct drm_device *drm_dev,
arm_iommu_attach_device(subdrv_dev, *dma_priv);
} else if (IS_ENABLED(CONFIG_IOMMU_DMA))
iommu_detach_device(priv->mapping, subdrv_dev);
-
- clear_dma_max_seg_size(subdrv_dev);
}
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 7ba5354e7d94..741323a2e6c3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -42,11 +42,6 @@ static inline struct exynos_dpi *encoder_to_dpi(struct drm_encoder *e)
static enum drm_connector_status
exynos_dpi_detect(struct drm_connector *connector, bool force)
{
- struct exynos_dpi *ctx = connector_to_dpi(connector);
-
- if (ctx->panel)
- drm_panel_attach(ctx->panel, &ctx->connector);
-
return connector_status_connected;
}
@@ -249,8 +244,5 @@ int exynos_dpi_remove(struct drm_encoder *encoder)
exynos_dpi_disable(&ctx->encoder);
- if (ctx->panel)
- drm_panel_detach(ctx->panel);
-
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index b38e9b592b8a..5b9666fc7af1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1551,12 +1551,10 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
}
dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (IS_ERR(dsi->panel)) {
+ if (IS_ERR(dsi->panel))
dsi->panel = NULL;
- } else {
- drm_panel_attach(dsi->panel, &dsi->connector);
+ else
dsi->connector.status = connector_status_connected;
- }
}
/*
@@ -1596,7 +1594,6 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
if (dsi->panel) {
mutex_lock(&drm->mode_config.mutex);
exynos_dsi_disable(&dsi->encoder);
- drm_panel_detach(dsi->panel);
dsi->panel = NULL;
dsi->connector.status = connector_status_disconnected;
mutex_unlock(&drm->mode_config.mutex);
@@ -1763,11 +1760,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->supplies[1].supply = "vddio";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
dsi->supplies);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_info(dev, "failed to get regulators: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
dsi->clks = devm_kcalloc(dev,
dsi->driver_data->num_clks, sizeof(*dsi->clks),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 03be31427181..967a5cdc120e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -395,8 +395,8 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
return;
out:
- dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl,
- g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
+ DMA_BIDIRECTIONAL, 0);
pages = frame_vector_pages(g2d_userptr->vec);
if (!IS_ERR(pages)) {
@@ -511,10 +511,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
g2d_userptr->sgt = sgt;
- if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
- DMA_BIDIRECTIONAL)) {
+ ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt,
+ DMA_BIDIRECTIONAL, 0);
+ if (ret) {
DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
- ret = -ENOMEM;
goto err_sg_free_table;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index efa476858db5..7777f19c9d38 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -42,8 +42,6 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
if (exynos_gem->flags & EXYNOS_BO_WC ||
!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
attr |= DMA_ATTR_WRITE_COMBINE;
- else
- attr |= DMA_ATTR_NON_CONSISTENT;
/* FBDev emulation requires kernel mapping */
if (!kvmap)
@@ -431,27 +429,10 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
{
struct exynos_drm_gem *exynos_gem;
- if (sgt->nents < 1)
+ /* check if the entries in the sg_table are contiguous */
+ if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
+ DRM_ERROR("buffer chunks must be mapped contiguously");
return ERR_PTR(-EINVAL);
-
- /*
- * Check if the provided buffer has been mapped as contiguous
- * into DMA address space.
- */
- if (sgt->nents > 1) {
- dma_addr_t next_addr = sg_dma_address(sgt->sgl);
- struct scatterlist *s;
- unsigned int i;
-
- for_each_sg(sgt->sgl, s, sgt->nents, i) {
- if (!sg_dma_len(s))
- break;
- if (sg_dma_address(s) != next_addr) {
- DRM_ERROR("buffer chunks must be mapped contiguously");
- return ERR_PTR(-EINVAL);
- }
- next_addr = sg_dma_address(s) + sg_dma_len(s);
- }
}
exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index c5ba32fca5f3..dc01c188c0e0 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1797,11 +1797,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
hdata->regul_bulk[i].supply = supply[i];
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev, "failed to get regulators\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 9b0c4736c21a..4d4a715b429d 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -40,10 +40,7 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector)
{
- struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
-
drm_connector_unregister(connector);
- drm_panel_detach(fsl_con->panel);
drm_connector_cleanup(connector);
}
@@ -101,12 +98,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
if (ret < 0)
goto err_sysfs;
- ret = drm_panel_attach(panel, connector);
- if (ret) {
- dev_err(fsl_dev->dev, "failed to attach panel\n");
- goto err_sysfs;
- }
-
return 0;
err_sysfs:
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index f41cbb753bb4..720a767118c9 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -2078,7 +2078,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
intel_dp->dpcd,
sizeof(intel_dp->dpcd));
cdv_intel_edp_panel_vdd_off(gma_encoder);
- if (ret == 0) {
+ if (ret <= 0) {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
drm_encoder_cleanup(encoder);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index da02d7e8a8f5..54d9876b5305 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -164,7 +164,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
return 0;
}
-static struct fb_ops psbfb_ops = {
+static const struct fb_ops psbfb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
@@ -175,7 +175,7 @@ static struct fb_ops psbfb_ops = {
.fb_sync = psbfb_sync,
};
-static struct fb_ops psbfb_roll_ops = {
+static const struct fb_ops psbfb_roll_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
@@ -186,7 +186,7 @@ static struct fb_ops psbfb_roll_ops = {
.fb_mmap = psbfb_mmap,
};
-static struct fb_ops psbfb_unaccel_ops = {
+static const struct fb_ops psbfb_unaccel_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg,
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index b718efccdcf2..be9cf6b1e3b3 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -6,6 +6,7 @@
**************************************************************************/
#include <linux/delay.h>
+#include <linux/gpio/machine.h>
#include <asm/intel_scu_ipc.h>
@@ -505,12 +506,31 @@ static const struct psb_offset mdfld_regmap[3] = {
},
};
+/*
+ * The GPIO lines for resetting DSI pipe 0 and 2 are available in the
+ * PCI device 0000:00:0c.0 on the Medfield.
+ */
+static struct gpiod_lookup_table mdfld_dsi_pipe_gpio_table = {
+ .table = {
+ GPIO_LOOKUP("0000:00:0c.0", 128, "dsi-pipe0-reset",
+ GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("0000:00:0c.0", 34, "dsi-pipe2-reset",
+ GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static int mdfld_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
if (pci_enable_msi(dev->pdev))
dev_warn(dev->dev, "Enabling MSI failed!\n");
dev_priv->regmap = mdfld_regmap;
+
+ /* Associate the GPIO lines with the DRM device */
+ mdfld_dsi_pipe_gpio_table.dev_id = dev_name(dev->dev);
+ gpiod_add_lookup_table(&mdfld_dsi_pipe_gpio_table);
+
return mid_chip_setup(dev);
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index c976a9dd9240..ae1223f631a7 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -955,7 +955,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
/* panel hard-reset */
if (p_funcs->reset) {
- ret = p_funcs->reset(pipe);
+ ret = p_funcs->reset(dev, pipe);
if (ret) {
DRM_ERROR("Panel %d hard-reset failed\n", pipe);
return NULL;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index f350ac1ead18..4aab76613bd9 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -28,6 +28,7 @@
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <linux/pm_runtime.h>
+#include <linux/gpio/consumer.h>
#include <asm/intel_scu_ipc.h>
@@ -366,7 +367,7 @@ static enum drm_mode_status mdfld_dsi_connector_mode_valid(struct drm_connector
/**
* FIXME: current DC has no fitting unit, reject any mode setting
* request
- * Will figure out a way to do up-scaling(pannel fitting) later.
+ * Will figure out a way to do up-scaling(panel fitting) later.
**/
if (fixed_mode) {
if (mode->hdisplay != fixed_mode->hdisplay)
@@ -432,42 +433,42 @@ static int mdfld_dsi_get_default_config(struct drm_device *dev,
return 0;
}
-int mdfld_dsi_panel_reset(int pipe)
+int mdfld_dsi_panel_reset(struct drm_device *ddev, int pipe)
{
- unsigned gpio;
- int ret = 0;
-
+ struct device *dev = ddev->dev;
+ struct gpio_desc *gpiod;
+
+ /*
+ * Raise the GPIO reset line for the corresponding pipe to HIGH,
+ * this is probably because it is active low so this takes the
+ * respective pipe out of reset. (We have no code to put it back
+ * into reset in this driver.)
+ */
switch (pipe) {
case 0:
- gpio = 128;
+ gpiod = gpiod_get(dev, "dsi-pipe0-reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
break;
case 2:
- gpio = 34;
+ gpiod = gpiod_get(dev, "dsi-pipe2-reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
break;
default:
- DRM_ERROR("Invalid output\n");
+ DRM_DEV_ERROR(dev, "Invalid output pipe\n");
return -EINVAL;
}
+ gpiod_put(gpiod);
- ret = gpio_request(gpio, "gfx");
- if (ret) {
- DRM_ERROR("gpio_rqueset failed\n");
- return ret;
- }
-
- ret = gpio_direction_output(gpio, 1);
- if (ret) {
- DRM_ERROR("gpio_direction_output failed\n");
- goto gpio_error;
- }
+ /* Flush posted writes on the device */
+ gpiod = gpiod_get(dev, "dsi-pipe0-reset", GPIOD_ASIS);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ gpiod_get_value(gpiod);
+ gpiod_put(gpiod);
- gpio_get_value(128);
-
-gpio_error:
- if (gpio_is_valid(gpio))
- gpio_free(gpio);
-
- return ret;
+ return 0;
}
/*
@@ -531,7 +532,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
dsi_config->connector = dsi_connector;
if (!dsi_config->fixed_mode) {
- DRM_ERROR("No pannel fixed mode was found\n");
+ DRM_ERROR("No panel fixed mode was found\n");
goto dsi_init_err0;
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 0cccfe400a98..5c0db3c2903f 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -372,6 +372,6 @@ extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
u32 *mode, bool hs);
-extern int mdfld_dsi_panel_reset(int pipe);
+extern int mdfld_dsi_panel_reset(struct drm_device *dev, int pipe);
#endif /*__MDFLD_DSI_OUTPUT_H__*/
diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h
index 17a944d70add..37a516cc56be 100644
--- a/drivers/gpu/drm/gma500/mdfld_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_output.h
@@ -54,7 +54,7 @@ struct panel_funcs {
const struct drm_encoder_helper_funcs *encoder_helper_funcs;
struct drm_display_mode * (*get_config_mode)(struct drm_device *);
int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
- int (*reset)(int pipe);
+ int (*reset)(struct drm_device *, int);
void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
};
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 3dd5718c3e31..5340225d6997 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -13,7 +13,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
-#include <linux/gpio.h>
#include "gma_display.h"
/*
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 06e44f47e73e..907f966d6f22 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -125,7 +125,7 @@ struct psb_intel_sdvo {
bool is_lvds;
/**
- * This is sdvo fixed pannel mode pointer
+ * This is sdvo fixed panel mode pointer
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 15eb3770d817..361e3a0c5ab6 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -347,6 +347,7 @@ int psb_irq_postinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -359,20 +360,12 @@ int psb_irq_postinstall(struct drm_device *dev)
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank[0].enabled)
- psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[1].enabled)
- psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[2].enabled)
- psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
- else
- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ for (i = 0; i < dev->num_crtcs; ++i) {
+ if (dev->vblank[i].enabled)
+ psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ else
+ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ }
if (dev_priv->ops->hotplug_enable)
dev_priv->ops->hotplug_enable(dev, true);
@@ -385,6 +378,7 @@ void psb_irq_uninstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
+ unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -393,14 +387,10 @@ void psb_irq_uninstall(struct drm_device *dev)
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- if (dev->vblank[0].enabled)
- psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[1].enabled)
- psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
-
- if (dev->vblank[2].enabled)
- psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ for (i = 0; i < dev->num_crtcs; ++i) {
+ if (dev->vblank[i].enabled)
+ psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ }
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
_PSB_IRQ_MSVDX_FLAG |
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index dfc5aef62f7b..43943e980203 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
- depends on DRM && PCI && MMU && ARM64
+ depends on DRM && PCI && ARM64
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index cc70e836522f..4d57ec688f82 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -17,9 +17,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "hibmc_drm_drv.h"
@@ -74,12 +71,12 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
if (src_w != state->crtc_w || src_h != state->crtc_h) {
- DRM_DEBUG_ATOMIC("scale not support\n");
+ drm_dbg_atomic(plane->dev, "scale not support\n");
return -EINVAL;
}
if (state->crtc_x < 0 || state->crtc_y < 0) {
- DRM_DEBUG_ATOMIC("crtc_x/y of drm_plane state is invalid\n");
+ drm_dbg_atomic(plane->dev, "crtc_x/y of drm_plane state is invalid\n");
return -EINVAL;
}
@@ -90,12 +87,12 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
crtc_state->adjusted_mode.hdisplay ||
state->crtc_y + state->crtc_h >
crtc_state->adjusted_mode.vdisplay) {
- DRM_DEBUG_ATOMIC("visible portion of plane is invalid\n");
+ drm_dbg_atomic(plane->dev, "visible portion of plane is invalid\n");
return -EINVAL;
}
if (state->fb->pitches[0] % 128 != 0) {
- DRM_DEBUG_ATOMIC("wrong stride with 128-byte aligned\n");
+ drm_dbg_atomic(plane->dev, "wrong stride with 128-byte aligned\n");
return -EINVAL;
}
return 0;
@@ -160,37 +157,6 @@ static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
.atomic_update = hibmc_plane_atomic_update,
};
-static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
-{
- struct drm_device *dev = priv->dev;
- struct drm_plane *plane;
- int ret = 0;
-
- plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL);
- if (!plane) {
- DRM_ERROR("failed to alloc memory when init plane\n");
- return ERR_PTR(-ENOMEM);
- }
- /*
- * plane init
- * TODO: Now only support primary plane, overlay planes
- * need to do.
- */
- ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
- channel_formats1,
- ARRAY_SIZE(channel_formats1),
- NULL,
- DRM_PLANE_TYPE_PRIMARY,
- NULL);
- if (ret) {
- DRM_ERROR("failed to init plane: %d\n", ret);
- return ERR_PTR(ret);
- }
-
- drm_plane_helper_add(plane, &hibmc_plane_helper_funcs);
- return plane;
-}
-
static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
{
struct hibmc_drm_private *priv = crtc->dev->dev_private;
@@ -537,32 +503,34 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
int hibmc_de_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = priv->dev;
- struct drm_crtc *crtc;
- struct drm_plane *plane;
+ struct drm_crtc *crtc = &priv->crtc;
+ struct drm_plane *plane = &priv->primary_plane;
int ret;
- plane = hibmc_plane_init(priv);
- if (IS_ERR(plane)) {
- DRM_ERROR("failed to create plane: %ld\n", PTR_ERR(plane));
- return PTR_ERR(plane);
- }
+ ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
+ channel_formats1,
+ ARRAY_SIZE(channel_formats1),
+ NULL,
+ DRM_PLANE_TYPE_PRIMARY,
+ NULL);
- crtc = devm_kzalloc(dev->dev, sizeof(*crtc), GFP_KERNEL);
- if (!crtc) {
- DRM_ERROR("failed to alloc memory when init crtc\n");
- return -ENOMEM;
+ if (ret) {
+ drm_err(dev, "failed to init plane: %d\n", ret);
+ return ret;
}
+ drm_plane_helper_add(plane, &hibmc_plane_helper_funcs);
+
ret = drm_crtc_init_with_planes(dev, crtc, plane,
NULL, &hibmc_crtc_funcs, NULL);
if (ret) {
- DRM_ERROR("failed to init crtc: %d\n", ret);
+ drm_err(dev, "failed to init crtc: %d\n", ret);
return ret;
}
ret = drm_mode_crtc_set_gamma_size(crtc, 256);
if (ret) {
- DRM_ERROR("failed to set gamma size: %d\n", ret);
+ drm_err(dev, "failed to set gamma size: %d\n", ret);
return ret;
}
drm_crtc_helper_add(crtc, &hibmc_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index eea13e60187b..085d1b2fa8c0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -11,18 +11,14 @@
* Jianhua Li <lijianhua@huawei.com>
*/
-#include <linux/console.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_irq.h>
#include <drm/drm_managed.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "hibmc_drm_drv.h"
@@ -102,13 +98,13 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
ret = hibmc_de_init(priv);
if (ret) {
- DRM_ERROR("failed to init de: %d\n", ret);
+ drm_err(priv->dev, "failed to init de: %d\n", ret);
return ret;
}
ret = hibmc_vdac_init(priv);
if (ret) {
- DRM_ERROR("failed to init vdac: %d\n", ret);
+ drm_err(priv->dev, "failed to init vdac: %d\n", ret);
return ret;
}
@@ -216,7 +212,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
iosize = pci_resource_len(pdev, 1);
priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize);
if (!priv->mmio) {
- DRM_ERROR("Cannot map mmio region\n");
+ drm_err(dev, "Cannot map mmio region\n");
return -ENOMEM;
}
@@ -224,7 +220,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
size = pci_resource_len(pdev, 0);
priv->fb_map = devm_ioremap(dev->dev, addr, size);
if (!priv->fb_map) {
- DRM_ERROR("Cannot map framebuffer\n");
+ drm_err(dev, "Cannot map framebuffer\n");
return -ENOMEM;
}
priv->fb_base = addr;
@@ -254,9 +250,8 @@ static int hibmc_unload(struct drm_device *dev)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
- if (priv->msi_enabled)
- pci_disable_msi(dev->pdev);
+ pci_disable_msi(dev->pdev);
hibmc_kms_fini(priv);
hibmc_mm_fini(priv);
dev->dev_private = NULL;
@@ -270,7 +265,7 @@ static int hibmc_load(struct drm_device *dev)
priv = drmm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
- DRM_ERROR("no memory to allocate for hibmc_drm_private\n");
+ drm_err(dev, "no memory to allocate for hibmc_drm_private\n");
return -ENOMEM;
}
dev->dev_private = priv;
@@ -290,19 +285,17 @@ static int hibmc_load(struct drm_device *dev)
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret) {
- DRM_ERROR("failed to initialize vblank: %d\n", ret);
+ drm_err(dev, "failed to initialize vblank: %d\n", ret);
goto err;
}
- priv->msi_enabled = 0;
ret = pci_enable_msi(dev->pdev);
if (ret) {
- DRM_WARN("enabling MSI failed: %d\n", ret);
+ drm_warn(dev, "enabling MSI failed: %d\n", ret);
} else {
- priv->msi_enabled = 1;
ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
- DRM_WARN("install irq failed: %d\n", ret);
+ drm_warn(dev, "install irq failed: %d\n", ret);
}
/* reset all the states of crtc/plane/encoder/connector */
@@ -312,7 +305,7 @@ static int hibmc_load(struct drm_device *dev)
err:
hibmc_unload(dev);
- DRM_ERROR("failed to initialize drm driver: %d\n", ret);
+ drm_err(dev, "failed to initialize drm driver: %d\n", ret);
return ret;
}
@@ -338,19 +331,19 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
ret = pci_enable_device(pdev);
if (ret) {
- DRM_ERROR("failed to enable pci device: %d\n", ret);
+ drm_err(dev, "failed to enable pci device: %d\n", ret);
goto err_free;
}
ret = hibmc_load(dev);
if (ret) {
- DRM_ERROR("failed to load hibmc: %d\n", ret);
+ drm_err(dev, "failed to load hibmc: %d\n", ret);
goto err_disable;
}
ret = drm_dev_register(dev, 0);
if (ret) {
- DRM_ERROR("failed to register drv for userspace access: %d\n",
+ drm_err(dev, "failed to register drv for userspace access: %d\n",
ret);
goto err_unload;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 609768748de6..197485e2fe0b 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -25,10 +25,11 @@ struct hibmc_drm_private {
void __iomem *fb_map;
unsigned long fb_base;
unsigned long fb_size;
- bool msi_enabled;
/* drm */
struct drm_device *dev;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
bool mode_config_initialized;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 2ca69c38491a..376a05ddbc2f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -11,10 +11,8 @@
* Jianhua Li <lijianhua@huawei.com>
*/
-#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_print.h>
#include "hibmc_drm_drv.h"
@@ -87,7 +85,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
DRM_MODE_ENCODER_DAC, NULL);
if (ret) {
- DRM_ERROR("failed to init encoder: %d\n", ret);
+ drm_err(dev, "failed to init encoder: %d\n", ret);
return ret;
}
@@ -96,7 +94,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
ret = drm_connector_init(dev, connector, &hibmc_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
if (ret) {
- DRM_ERROR("failed to init connector: %d\n", ret);
+ drm_err(dev, "failed to init connector: %d\n", ret);
return ret;
}
drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 322bd542e89d..602ece11bb4a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -32,7 +32,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
hibmc->fb_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
- DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
+ drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 303c2d483c6e..88250860f8e4 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -853,11 +853,11 @@ static void i810_dma_quiescent(struct drm_device *dev)
i810_wait_ring(dev, dev_priv->ring.Size - 8);
}
-static int i810_flush_queue(struct drm_device *dev)
+static void i810_flush_queue(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
- int i, ret = 0;
+ int i;
RING_LOCALS;
i810_kernel_lost_context(dev);
@@ -882,7 +882,7 @@ static int i810_flush_queue(struct drm_device *dev)
DRM_DEBUG("still on client\n");
}
- return ret;
+ return;
}
/* Must be called with the lock held */
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 9afa5c4a6bf0..1e1cb245fca7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -25,6 +25,7 @@ config DRM_I915
select CRC32
select SND_HDA_I915 if SND_HDA_CORE
select CEC_CORE if CEC_NOTIFIER
+ select VMAP_PFN
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 1cb28c20807c..25cd9788a4d5 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -153,6 +153,7 @@ config DRM_I915_SELFTEST
select DRM_EXPORT_FOR_TESTS if m
select FAULT_INJECTION
select PRIME_NUMBERS
+ select CRC32
help
Choose this option to allow the driver to perform selftests upon
loading; also requires the i915.selftest=1 module parameter. To
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index bda4c0e408f8..e5574e506a5c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -234,6 +234,7 @@ i915-y += \
display/intel_ddi.o \
display/intel_dp.o \
display/intel_dp_aux_backlight.o \
+ display/intel_dp_hdcp.o \
display/intel_dp_link_training.o \
display/intel_dp_mst.o \
display/intel_dsi.o \
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index f4053dd6bde9..520715b7d5b5 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1646,6 +1646,7 @@ static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = {
};
static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
+ .detect = intel_panel_detect,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_connector_destroy,
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 630f49b7aa01..86be032bcf96 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -527,8 +527,6 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
intel_atomic_clear_global_state(state);
state->dpll_set = state->modeset = false;
- state->global_state_changed = false;
- state->active_pipes = 0;
}
struct intel_crtc_state *
@@ -542,40 +540,3 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
-
-int _intel_atomic_lock_global_state(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- state->global_state_changed = true;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- int ret;
-
- ret = drm_modeset_lock(&crtc->base.mutex,
- state->base.acquire_ctx);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int _intel_atomic_serialize_global_state(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- state->global_state_changed = true;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 11146292b06f..285de07011dc 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -56,8 +56,4 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
-int _intel_atomic_lock_global_state(struct intel_atomic_state *state);
-
-int _intel_atomic_serialize_global_state(struct intel_atomic_state *state);
-
#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ad4aa66fd676..f7de55707746 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -958,13 +958,8 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
if (IS_ERR(cdclk_state))
return PTR_ERR(cdclk_state);
- cdclk_state->force_min_cdclk_changed = true;
cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0;
- ret = intel_atomic_lock_global_state(&cdclk_state->base);
- if (ret)
- return ret;
-
return drm_atomic_commit(&state->base);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index a0a41ec5c341..4716484af62d 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1656,6 +1656,8 @@ static enum port dvo_port_to_port(struct drm_i915_private *dev_priv,
[PORT_E] = { DVO_PORT_HDMIE, DVO_PORT_DPE, DVO_PORT_CRT },
[PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1 },
[PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1 },
+ [PORT_H] = { DVO_PORT_HDMIH, DVO_PORT_DPH, -1 },
+ [PORT_I] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 },
};
/*
* Bspec lists the ports as A, B, C, D - however internally in our
@@ -2133,7 +2135,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->vbt.display_devices);
- if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
+ if (!HAS_DISPLAY(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
"Skipping VBT init due to disabled display.\n");
return;
@@ -2650,6 +2652,12 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
case DP_AUX_G:
aux_ch = AUX_CH_G;
break;
+ case DP_AUX_H:
+ aux_ch = AUX_CH_H;
+ break;
+ case DP_AUX_I:
+ aux_ch = AUX_CH_I;
+ break;
default:
MISSING_CASE(info->alternate_aux_channel);
aux_ch = AUX_CH_A;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 91a8161e7c05..cb93f6cf6d37 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2426,7 +2426,6 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
if (!cdclk_state)
return NULL;
- cdclk_state->force_min_cdclk_changed = false;
cdclk_state->pipe = INVALID_PIPE;
return &cdclk_state->base;
@@ -2501,6 +2500,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
} else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
+ old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk ||
intel_cdclk_changed(&old_cdclk_state->logical,
&new_cdclk_state->logical)) {
ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
@@ -2677,7 +2677,7 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
*/
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_de_write(dev_priv, GMBUSFREQ_VLV,
- DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
+ DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
}
static int cnp_rawclk(struct drm_i915_private *dev_priv)
@@ -2903,9 +2903,10 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.get_cdclk = i85x_get_cdclk;
else if (IS_I845G(dev_priv))
dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
- else { /* 830 */
- drm_WARN(&dev_priv->drm, !IS_I830(dev_priv),
- "Unknown platform. Assuming 133 MHz CDCLK\n");
+ else if (IS_I830(dev_priv))
+ dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
+
+ if (drm_WARN(&dev_priv->drm, !dev_priv->display.get_cdclk,
+ "Unknown platform. Assuming 133 MHz CDCLK\n"))
dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
- }
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 5731806e4cee..b34eb00fb327 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -17,8 +17,8 @@ struct intel_atomic_state;
struct intel_crtc_state;
struct intel_cdclk_vals {
- u16 refclk;
u32 cdclk;
+ u16 refclk;
u8 divider; /* CD2X divider * 2 */
u8 ratio;
};
@@ -49,7 +49,6 @@ struct intel_cdclk_state {
/* forced minimum cdclk for glk+ audio w/a */
int force_min_cdclk;
- bool force_min_cdclk_changed;
/* bitmask of active pipes */
u8 active_pipes;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 5b4510ce5693..4934edd51cb0 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -833,6 +833,9 @@ intel_crt_detect(struct drm_connector *connector,
connector->base.id, connector->name,
force);
+ if (!INTEL_DISPLAY_ENABLED(dev_priv))
+ return connector_status_disconnected;
+
if (dev_priv->params.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c
index f22a7645c249..d5db16764619 100644
--- a/drivers/gpu/drm/i915/display/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_csr.c
@@ -40,12 +40,12 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
-#define RKL_CSR_PATH "i915/rkl_dmc_ver2_01.bin"
-#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 1)
+#define RKL_CSR_PATH "i915/rkl_dmc_ver2_02.bin"
+#define RKL_CSR_VERSION_REQUIRED CSR_VERSION(2, 2)
MODULE_FIRMWARE(RKL_CSR_PATH);
-#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin"
-#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6)
+#define TGL_CSR_PATH "i915/tgl_dmc_ver2_08.bin"
+#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 8)
#define TGL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(TGL_CSR_PATH);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index a49ff3a1a63c..cdcb7b1034ae 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -572,13 +572,13 @@ static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
/* NT mV Trans mV db */
{ 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
{ 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
- { 0xC, 0x64, 0x30, 0x00, 0x0F }, /* 350 700 6.0 */
- { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xC, 0x64, 0x34, 0x00, 0x0B }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 350 900 8.2 */
{ 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
- { 0xC, 0x64, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
- { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x64, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
{ 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
- { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x38, 0x00, 0x07 }, /* 600 900 3.5 */
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
@@ -706,6 +706,42 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] =
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
+static const struct cnl_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
+ { 0xC, 0x60, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */
+ { 0xC, 0x7F, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */
+ { 0xC, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x6F, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 500 900 5.1 */
+ { 0x6, 0x60, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+/*
+ * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries
+ * that DisplayPort specification requires
+ */
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = {
+ /* VS pre-emp */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 0 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 1 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 2 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 0 3 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 0 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 1 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1 2 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 0 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
+};
+
+static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
+{
+ return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+}
+
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
{
@@ -1038,27 +1074,74 @@ static const struct cnl_ddi_buf_trans *
ehl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
int *n_entries)
{
- if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) {
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ switch (type) {
+ case INTEL_OUTPUT_HDMI:
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+ case INTEL_OUTPUT_EDP:
+ if (dev_priv->vbt.edp.low_vswing) {
+ if (rate > 540000) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+ return icl_combo_phy_ddi_translations_edp_hbr3;
+ } else {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+ return icl_combo_phy_ddi_translations_edp_hbr2;
+ }
+ }
+ /* fall through */
+ default:
+ /* All combo DP and eDP ports that do not support low_vswing */
*n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
return ehl_combo_phy_ddi_translations_dp;
}
-
- return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
}
static const struct cnl_ddi_buf_trans *
tgl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate,
int *n_entries)
{
- if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) {
- return icl_get_combo_buf_trans(encoder, type, rate, n_entries);
- } else if (rate > 270000) {
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
- return tgl_combo_phy_ddi_translations_dp_hbr2;
- }
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
- return tgl_combo_phy_ddi_translations_dp_hbr;
+ switch (type) {
+ case INTEL_OUTPUT_HDMI:
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+ case INTEL_OUTPUT_EDP:
+ if (dev_priv->vbt.edp.hobl) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!intel_dp->hobl_failed && rate <= 540000) {
+ /* Same table applies to TGL, RKL and DG1 */
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_edp_hbr2_hobl);
+ return tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+ }
+ }
+
+ if (rate > 540000) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+ return icl_combo_phy_ddi_translations_edp_hbr3;
+ } else if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+ return icl_combo_phy_ddi_translations_edp_hbr2;
+ }
+ /* fall through */
+ default:
+ /* All combo DP and eDP ports that do not support low_vswing */
+ if (rate > 270000) {
+ if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
+ *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
+ return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
+ }
+
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
+ return tgl_combo_phy_ddi_translations_dp_hbr2;
+ }
+
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+ return tgl_combo_phy_ddi_translations_dp_hbr;
+ }
}
static const struct tgl_dkl_phy_ddi_buf_trans *
@@ -1738,6 +1821,8 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ drm_WARN_ON(crtc->base.dev, ctl & TRANS_DDI_HDCP_SIGNALLING);
+
ctl &= ~TRANS_DDI_FUNC_ENABLE;
if (IS_GEN_RANGE(dev_priv, 8, 10))
@@ -1765,12 +1850,12 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
}
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
bool enable)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
intel_wakeref_t wakeref;
- enum pipe pipe = 0;
int ret = 0;
u32 tmp;
@@ -1779,19 +1864,12 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
if (drm_WARN_ON(dev, !wakeref))
return -ENXIO;
- if (drm_WARN_ON(dev,
- !intel_encoder->get_hw_state(intel_encoder, &pipe))) {
- ret = -EIO;
- goto out;
- }
-
- tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (enable)
tmp |= TRANS_DDI_HDCP_SIGNALLING;
else
tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
- intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), tmp);
-out:
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp);
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
}
@@ -2392,6 +2470,15 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
level = n_entries - 1;
}
+ if (type == INTEL_OUTPUT_EDP) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED;
+ intel_dp->hobl_active = is_hobl_buf_trans(ddi_translations);
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val,
+ intel_dp->hobl_active ? val : 0);
+ }
+
/* Set PORT_TX_DW5 */
val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
@@ -2655,7 +2742,7 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
int rate = 0;
- if (type == INTEL_OUTPUT_HDMI) {
+ if (type != INTEL_OUTPUT_HDMI) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
rate = intel_dp->link_rate;
@@ -2802,7 +2889,9 @@ hsw_set_signal_levels(struct intel_dp *intel_dp)
static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
enum phy phy)
{
- if (intel_phy_is_combo(dev_priv, phy)) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ return RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ } else if (intel_phy_is_combo(dev_priv, phy)) {
return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
} else if (intel_phy_is_tc(dev_priv, phy)) {
enum tc_port tc_port = intel_port_to_tc(dev_priv,
@@ -2829,6 +2918,16 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
(val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
if (intel_phy_is_combo(dev_priv, phy)) {
+ u32 mask, sel;
+
+ if (IS_ROCKETLAKE(dev_priv)) {
+ mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ sel = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
+ } else {
+ mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ sel = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
+ }
+
/*
* Even though this register references DDIs, note that we
* want to pass the PHY rather than the port (DDI). For
@@ -2839,8 +2938,8 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
* Clock Select chooses the PLL for both DDIA and DDID and
* drives port A in all cases."
*/
- val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
+ val &= ~mask;
+ val |= sel;
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
}
@@ -3371,6 +3470,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_configure_protocol_converter(intel_dp);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
true);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
@@ -3482,19 +3582,17 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
intel_ddi_pre_enable_hdmi(state, encoder, crtc_state,
conn_state);
} else {
- struct intel_lspcon *lspcon =
- enc_to_intel_lspcon(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
intel_ddi_pre_enable_dp(state, encoder, crtc_state,
conn_state);
- if (lspcon->active) {
- struct intel_digital_port *dig_port =
- enc_to_dig_port(encoder);
+ /* FIXME precompute everything properly */
+ /* FIXME how do we turn infoframes off again? */
+ if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
- }
}
}
@@ -3938,18 +4036,19 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
intel_psr_update(intel_dp, crtc_state, conn_state);
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
- intel_edp_drrs_enable(intel_dp, crtc_state);
+ intel_edp_drrs_update(intel_dp, crtc_state);
intel_panel_update_backlight(state, encoder, crtc_state, conn_state);
}
-static void intel_ddi_update_pipe(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+void intel_ddi_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
+ !intel_encoder_is_mst(encoder))
intel_ddi_update_pipe_dp(state, encoder, crtc_state,
conn_state);
@@ -4037,8 +4136,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
intel_wait_ddi_buf_idle(dev_priv, port);
}
- dp_tp_ctl = DP_TP_CTL_ENABLE |
- DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+ dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
if (intel_dp->link_mst)
dp_tp_ctl |= DP_TP_CTL_MODE_MST;
else {
@@ -4061,16 +4159,10 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
- enum port port = dp_to_dig_port(intel_dp)->base.port;
u32 temp;
temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
- if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
- temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
- else
- temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
-
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & train_pat_mask) {
case DP_TRAINING_PATTERN_DISABLE:
@@ -4091,9 +4183,6 @@ static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
}
intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
-
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
}
static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp)
@@ -4878,6 +4967,64 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
return max_lanes;
}
+static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
+{
+ return i915->hti_state & HDPORT_ENABLED &&
+ (i915->hti_state & HDPORT_PHY_USED_DP(phy) ||
+ i915->hti_state & HDPORT_PHY_USED_HDMI(phy));
+}
+
+static enum hpd_pin tgl_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (port >= PORT_D)
+ return HPD_PORT_TC1 + port - PORT_D;
+ else
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin rkl_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (HAS_PCH_TGP(dev_priv))
+ return tgl_hpd_pin(dev_priv, port);
+
+ if (port >= PORT_D)
+ return HPD_PORT_C + port - PORT_D;
+ else
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin icl_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (port >= PORT_C)
+ return HPD_PORT_TC1 + port - PORT_C;
+ else
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (port == PORT_D)
+ return HPD_PORT_A;
+
+ if (HAS_PCH_MCC(dev_priv))
+ return icl_hpd_pin(dev_priv, port);
+
+ return HPD_PORT_A + port - PORT_A;
+}
+
+static enum hpd_pin cnl_hpd_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (port == PORT_F)
+ return HPD_PORT_E;
+
+ return HPD_PORT_A + port - PORT_A;
+}
+
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
struct intel_digital_port *dig_port;
@@ -4885,6 +5032,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
bool init_hdmi, init_dp, init_lspcon = false;
enum phy phy = intel_port_to_phy(dev_priv, port);
+ /*
+ * On platforms with HTI (aka HDPORT), if it's enabled at boot it may
+ * have taken over some of the PHYs and made them unavailable to the
+ * driver. In that case we should skip initializing the corresponding
+ * outputs.
+ */
+ if (hti_uses_phy(dev_priv, phy)) {
+ drm_dbg_kms(&dev_priv->drm, "PORT %c / PHY %c reserved by HTI\n",
+ port_name(port), phy_name(phy));
+ return;
+ }
+
init_hdmi = intel_bios_port_supports_dvi(dev_priv, port) ||
intel_bios_port_supports_hdmi(dev_priv, port);
init_dp = intel_bios_port_supports_dp(dev_priv, port);
@@ -4918,6 +5077,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
+ mutex_init(&dig_port->hdcp_mutex);
+ dig_port->num_hdcp_streams = 0;
+
encoder->hotplug = intel_ddi_hotplug;
encoder->compute_output_type = intel_ddi_compute_output_type;
encoder->compute_config = intel_ddi_compute_config;
@@ -4939,6 +5101,19 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
+ if (IS_ROCKETLAKE(dev_priv))
+ encoder->hpd_pin = rkl_hpd_pin(dev_priv, port);
+ else if (INTEL_GEN(dev_priv) >= 12)
+ encoder->hpd_pin = tgl_hpd_pin(dev_priv, port);
+ else if (IS_ELKHARTLAKE(dev_priv))
+ encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
+ else if (IS_GEN(dev_priv, 11))
+ encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
+ else if (IS_GEN(dev_priv, 10))
+ encoder->hpd_pin = cnl_hpd_pin(dev_priv, port);
+ else
+ encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+
if (INTEL_GEN(dev_priv) >= 11)
dig_port->saved_port_bits =
intel_de_read(dev_priv, DDI_BUF_CTL(port))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 077e9dbbe367..f5fb62fc9400 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -16,6 +16,7 @@ struct intel_crtc_state;
struct intel_dp;
struct intel_dpll_hw_state;
struct intel_encoder;
+enum transcoder;
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *intel_encoder,
@@ -43,6 +44,7 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
u32 bxt_signal_levels(struct intel_dp *intel_dp);
u32 ddi_signal_levels(struct intel_dp *intel_dp);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+ enum transcoder cpu_transcoder,
bool enable);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index b18c5ac2934d..31337d2a2cde 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -47,6 +47,7 @@
#include "display/intel_ddi.h"
#include "display/intel_dp.h"
#include "display/intel_dp_mst.h"
+#include "display/intel_dpll_mgr.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
#include "display/intel_gmbus.h"
@@ -66,6 +67,7 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_color.h"
+#include "intel_csr.h"
#include "intel_display_types.h"
#include "intel_dp_link_training.h"
#include "intel_fbc.h"
@@ -2310,7 +2312,7 @@ err:
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- i915_gem_object_lock(vma->obj);
+ i915_gem_object_lock(vma->obj, NULL);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
@@ -3432,6 +3434,14 @@ initial_plane_vma(struct drm_i915_private *i915,
if (IS_ERR(obj))
return NULL;
+ /*
+ * Mark it WT ahead of time to avoid changing the
+ * cache_level during fbdev initialization. The
+ * unbind there would get stuck waiting for rcu.
+ */
+ i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
+ I915_CACHE_WT : I915_CACHE_NONE);
+
switch (plane_config->tiling) {
case I915_TILING_NONE:
break;
@@ -3450,7 +3460,7 @@ initial_plane_vma(struct drm_i915_private *i915,
if (IS_ERR(vma))
goto err_obj;
- if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
+ if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
goto err_obj;
if (i915_gem_object_is_tiled(obj) &&
@@ -3761,6 +3771,44 @@ static int glk_max_plane_width(const struct drm_framebuffer *fb,
}
}
+static int icl_min_plane_width(const struct drm_framebuffer *fb)
+{
+ /* Wa_14011264657, Wa_14011050563: gen11+ */
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ return 18;
+ case DRM_FORMAT_RGB565:
+ return 10;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ return 6;
+ case DRM_FORMAT_NV12:
+ return 20;
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return 12;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ return 4;
+ default:
+ return 1;
+ }
+}
+
static int icl_max_plane_width(const struct drm_framebuffer *fb,
int color_plane,
unsigned int rotation)
@@ -3843,29 +3891,31 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
int y = plane_state->uapi.src.y1 >> 16;
int w = drm_rect_width(&plane_state->uapi.src) >> 16;
int h = drm_rect_height(&plane_state->uapi.src) >> 16;
- int max_width;
- int max_height;
- u32 alignment;
- u32 offset;
+ int max_width, min_width, max_height;
+ u32 alignment, offset;
int aux_plane = intel_main_to_aux_plane(fb, 0);
u32 aux_offset = plane_state->color_plane[aux_plane].offset;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
max_width = icl_max_plane_width(fb, 0, rotation);
- else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ min_width = icl_min_plane_width(fb);
+ } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
max_width = glk_max_plane_width(fb, 0, rotation);
- else
+ min_width = 1;
+ } else {
max_width = skl_max_plane_width(fb, 0, rotation);
+ min_width = 1;
+ }
if (INTEL_GEN(dev_priv) >= 11)
max_height = icl_max_plane_height();
else
max_height = skl_max_plane_height();
- if (w > max_width || h > max_height) {
+ if (w > max_width || w < min_width || h > max_height) {
drm_dbg_kms(&dev_priv->drm,
- "requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
- w, h, max_width, max_height);
+ "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ w, h, min_width, max_width, max_height);
return -EINVAL;
}
@@ -4051,8 +4101,7 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int skl_check_plane_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
- int ret;
- bool needs_aux = false;
+ int ret, i;
ret = intel_plane_compute_gtt(plane_state);
if (ret)
@@ -4066,7 +4115,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* it.
*/
if (is_ccs_modifier(fb->modifier)) {
- needs_aux = true;
ret = skl_check_ccs_aux_surface(plane_state);
if (ret)
return ret;
@@ -4074,20 +4122,15 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
if (intel_format_info_is_yuv_semiplanar(fb->format,
fb->modifier)) {
- needs_aux = true;
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
}
- if (!needs_aux) {
- int i;
-
- for (i = 1; i < fb->format->num_planes; i++) {
- plane_state->color_plane[i].offset = ~0xfff;
- plane_state->color_plane[i].x = 0;
- plane_state->color_plane[i].y = 0;
- }
+ for (i = fb->format->num_planes; i < ARRAY_SIZE(plane_state->color_plane); i++) {
+ plane_state->color_plane[i].offset = ~0xfff;
+ plane_state->color_plane[i].x = 0;
+ plane_state->color_plane[i].y = 0;
}
ret = skl_check_main_surface(plane_state);
@@ -7290,6 +7333,10 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
return POWER_DOMAIN_PORT_DDI_F_LANES;
case PORT_G:
return POWER_DOMAIN_PORT_DDI_G_LANES;
+ case PORT_H:
+ return POWER_DOMAIN_PORT_DDI_H_LANES;
+ case PORT_I:
+ return POWER_DOMAIN_PORT_DDI_I_LANES;
default:
MISSING_CASE(port);
return POWER_DOMAIN_PORT_OTHER;
@@ -7315,6 +7362,10 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
return POWER_DOMAIN_AUX_F_TBT;
case AUX_CH_G:
return POWER_DOMAIN_AUX_G_TBT;
+ case AUX_CH_H:
+ return POWER_DOMAIN_AUX_H_TBT;
+ case AUX_CH_I:
+ return POWER_DOMAIN_AUX_I_TBT;
default:
MISSING_CASE(dig_port->aux_ch);
return POWER_DOMAIN_AUX_C_TBT;
@@ -7346,6 +7397,10 @@ intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
return POWER_DOMAIN_AUX_F;
case AUX_CH_G:
return POWER_DOMAIN_AUX_G;
+ case AUX_CH_H:
+ return POWER_DOMAIN_AUX_H;
+ case AUX_CH_I:
+ return POWER_DOMAIN_AUX_I;
default:
MISSING_CASE(aux_ch);
return POWER_DOMAIN_AUX_A;
@@ -8114,7 +8169,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
* which the devices expect also in synchronous clock mode.
*/
if (constant_n)
- *ret_n = 0x8000;
+ *ret_n = DP_LINK_CONSTANT_N_VALUE;
else
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
@@ -10581,6 +10636,10 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
val & PLANE_CTL_FLIP_HORIZONTAL)
plane_config->rotation |= DRM_MODE_REFLECT_X;
+ /* 90/270 degree rotation would require extra work */
+ if (drm_rotation_90_or_270(plane_config->rotation))
+ goto error;
+
base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
plane_config->base = base;
@@ -10802,9 +10861,18 @@ static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
u32 temp;
if (intel_phy_is_combo(dev_priv, phy)) {
- temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
- ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
- id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
+ u32 mask, shift;
+
+ if (IS_ROCKETLAKE(dev_priv)) {
+ mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
+ } else {
+ mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
+ }
+
+ temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask;
+ id = temp >> shift;
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
} else if (intel_phy_is_tc(dev_priv, phy)) {
u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
@@ -12760,6 +12828,9 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
}
+ if (!mode_changed)
+ intel_psr2_sel_fetch_update(state, crtc);
+
return 0;
}
@@ -13418,12 +13489,6 @@ encoder_retry:
"hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
- /*
- * Make drm_calc_timestamping_constants in
- * drm_atomic_helper_update_legacy_modeset_state() happy
- */
- pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
-
return 0;
}
@@ -14244,7 +14309,6 @@ verify_crtc_state(struct intel_crtc *crtc,
struct intel_encoder *encoder;
struct intel_crtc_state *pipe_config = old_crtc_state;
struct drm_atomic_state *state = old_crtc_state->uapi.state;
- bool active;
__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
intel_crtc_free_hw_state(old_crtc_state);
@@ -14254,16 +14318,19 @@ verify_crtc_state(struct intel_crtc *crtc,
drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
crtc->base.name);
- active = dev_priv->display.get_pipe_config(crtc, pipe_config);
+ pipe_config->hw.enable = new_crtc_state->hw.enable;
+
+ pipe_config->hw.active =
+ dev_priv->display.get_pipe_config(crtc, pipe_config);
/* we keep both pipes enabled on 830 */
- if (IS_I830(dev_priv))
- active = new_crtc_state->hw.active;
+ if (IS_I830(dev_priv) && pipe_config->hw.active)
+ pipe_config->hw.active = new_crtc_state->hw.active;
- I915_STATE_WARN(new_crtc_state->hw.active != active,
+ I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n",
- new_crtc_state->hw.active, active);
+ new_crtc_state->hw.active, pipe_config->hw.active);
I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
"transitional active state does not match atomic hw state "
@@ -14272,6 +14339,7 @@ verify_crtc_state(struct intel_crtc *crtc,
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
enum pipe pipe;
+ bool active;
active = encoder->get_hw_state(encoder, &pipe);
I915_STATE_WARN(active != new_crtc_state->hw.active,
@@ -14583,16 +14651,8 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
static int intel_modeset_checks(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int ret;
state->modeset = true;
- state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
-
- if (state->active_pipes != dev_priv->active_pipes) {
- ret = _intel_atomic_lock_global_state(state);
- if (ret)
- return ret;
- }
if (IS_HASWELL(dev_priv))
return hsw_mode_set_planes_workaround(state);
@@ -14736,7 +14796,8 @@ static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
bool *need_cdclk_calc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_cdclk_state *new_cdclk_state;
+ const struct intel_cdclk_state *old_cdclk_state;
+ const struct intel_cdclk_state *new_cdclk_state;
struct intel_plane_state *plane_state;
struct intel_bw_state *new_bw_state;
struct intel_plane *plane;
@@ -14755,9 +14816,11 @@ static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
return ret;
}
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
- if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
+ if (new_cdclk_state &&
+ old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
*need_cdclk_calc = true;
ret = dev_priv->display.bw_calc_min_cdclk(state);
@@ -15134,6 +15197,8 @@ static void commit_pipe_config(struct intel_atomic_state *state,
if (new_crtc_state->update_pipe)
intel_pipe_fastset(old_crtc_state, new_crtc_state);
+
+ intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
}
if (dev_priv->display.atomic_update_watermarks)
@@ -15702,14 +15767,6 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
plane->frontbuffer_bit);
}
-static void assert_global_state_locked(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&dev_priv->drm, crtc)
- drm_modeset_lock_assert_held(&crtc->base.mutex);
-}
-
static int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *_state,
bool nonblock)
@@ -15785,12 +15842,6 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
- if (state->global_state_changed) {
- assert_global_state_locked(dev_priv);
-
- dev_priv->active_pipes = state->active_pipes;
- }
-
drm_atomic_state_get(&state->base);
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
@@ -16837,7 +16888,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_pps_init(dev_priv);
- if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
+ if (!HAS_DISPLAY(dev_priv))
return;
if (IS_ROCKETLAKE(dev_priv)) {
@@ -17137,7 +17188,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (!intel_fb->frontbuffer)
return -ENOMEM;
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
tiling = i915_gem_object_get_tiling(obj);
stride = i915_gem_object_get_stride(obj);
i915_gem_object_unlock(obj);
@@ -17823,6 +17874,27 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
{
int ret;
+ if (i915_inject_probe_failure(i915))
+ return -ENODEV;
+
+ if (HAS_DISPLAY(i915)) {
+ ret = drm_vblank_init(&i915->drm,
+ INTEL_NUM_PIPES(i915));
+ if (ret)
+ return ret;
+ }
+
+ intel_bios_init(i915);
+
+ ret = intel_vga_register(i915);
+ if (ret)
+ goto cleanup_bios;
+
+ /* FIXME: completely on the wrong abstraction layer */
+ intel_power_domains_init_hw(i915, false);
+
+ intel_csr_ucode_init(i915);
+
i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
@@ -17831,15 +17903,15 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
ret = intel_cdclk_init(i915);
if (ret)
- return ret;
+ goto cleanup_vga_client_pw_domain_csr;
ret = intel_dbuf_init(i915);
if (ret)
- return ret;
+ goto cleanup_vga_client_pw_domain_csr;
ret = intel_bw_init(i915);
if (ret)
- return ret;
+ goto cleanup_vga_client_pw_domain_csr;
init_llist_head(&i915->atomic_helper.free_list);
INIT_WORK(&i915->atomic_helper.free_work,
@@ -17850,10 +17922,19 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
intel_fbc_init(i915);
return 0;
+
+cleanup_vga_client_pw_domain_csr:
+ intel_csr_ucode_fini(i915);
+ intel_power_domains_driver_remove(i915);
+ intel_vga_unregister(i915);
+cleanup_bios:
+ intel_bios_driver_remove(i915);
+
+ return ret;
}
-/* part #2: call after irq install */
-int intel_modeset_init(struct drm_i915_private *i915)
+/* part #2: call after irq install, but before gem init */
+int intel_modeset_init_nogem(struct drm_i915_private *i915)
{
struct drm_device *dev = &i915->drm;
enum pipe pipe;
@@ -17870,7 +17951,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
INTEL_NUM_PIPES(i915),
INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
- if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
+ if (HAS_DISPLAY(i915)) {
for_each_pipe(i915, pipe) {
ret = intel_crtc_init(i915, pipe);
if (ret) {
@@ -17892,6 +17973,13 @@ int intel_modeset_init(struct drm_i915_private *i915)
if (i915->max_cdclk_freq == 0)
intel_update_max_cdclk(i915);
+ /*
+ * If the platform has HTI, we need to find out whether it has reserved
+ * any display resources before we create our display outputs.
+ */
+ if (INTEL_INFO(i915)->display.has_hti)
+ i915->hti_state = intel_de_read(i915, HDPORT_STATE);
+
/* Just disable it once at startup */
intel_vga_disable(i915);
intel_setup_outputs(i915);
@@ -17945,6 +18033,30 @@ int intel_modeset_init(struct drm_i915_private *i915)
return 0;
}
+/* part #3: call after gem init */
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+ int ret;
+
+ intel_overlay_setup(i915);
+
+ if (!HAS_DISPLAY(i915))
+ return 0;
+
+ ret = intel_fbdev_init(&i915->drm);
+ if (ret)
+ return ret;
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ intel_hpd_init(i915);
+
+ intel_init_ipc(i915);
+
+ intel_psr_set_force_mode_changed(i915->psr.dp);
+
+ return 0;
+}
+
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
@@ -18829,6 +18941,18 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
intel_fbc_cleanup_cfb(i915);
}
+/* part #3: call after gem init */
+void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
+{
+ intel_csr_ucode_fini(i915);
+
+ intel_power_domains_driver_remove(i915);
+
+ intel_vga_unregister(i915);
+
+ intel_bios_driver_remove(i915);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
struct intel_display_error_state {
@@ -18889,7 +19013,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
- if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
+ if (!HAS_DISPLAY(dev_priv))
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index e890c8fb779b..d10b7c8cde3f 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -272,8 +272,6 @@ enum dpio_phy {
DPIO_PHY2,
};
-#define I915_NUM_PHYS_VLV 2
-
enum aux_ch {
AUX_CH_A,
AUX_CH_B,
@@ -282,6 +280,8 @@ enum aux_ch {
AUX_CH_E, /* ICL+ */
AUX_CH_F,
AUX_CH_G,
+ AUX_CH_H,
+ AUX_CH_I,
};
#define aux_ch_name(a) ((a) + 'A')
@@ -629,9 +629,11 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
int intel_modeset_init_noirq(struct drm_i915_private *i915);
+int intel_modeset_init_nogem(struct drm_i915_private *i915);
int intel_modeset_init(struct drm_i915_private *i915);
void intel_modeset_driver_remove(struct drm_i915_private *i915);
void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
+void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 5a5cfe25085b..0bf31f9a8af5 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -417,6 +417,9 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
seq_printf(m, "%d\t%d\n", frame, su_blocks);
}
+
+ seq_printf(m, "PSR2 selective fetch: %s\n",
+ enableddisabled(psr->psr2_sel_fetch_enabled));
}
unlock:
@@ -598,6 +601,11 @@ static void intel_hdcp_info(struct seq_file *m,
{
bool hdcp_cap, hdcp2_cap;
+ if (!intel_connector->hdcp.shim) {
+ seq_puts(m, "No Connector Support");
+ goto out;
+ }
+
hdcp_cap = intel_hdcp_capable(intel_connector);
hdcp2_cap = intel_hdcp2_capable(intel_connector);
@@ -609,6 +617,7 @@ static void intel_hdcp_info(struct seq_file *m,
if (!hdcp_cap && !hdcp2_cap)
seq_puts(m, "None");
+out:
seq_puts(m, "\n");
}
@@ -617,6 +626,7 @@ static void intel_dp_info(struct seq_file *m,
{
struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+ const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
@@ -624,11 +634,7 @@ static void intel_dp_info(struct seq_file *m,
intel_panel_info(m, &intel_connector->panel);
drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
- &intel_dp->aux);
- if (intel_connector->hdcp.shim) {
- seq_puts(m, "\tHDCP version: ");
- intel_hdcp_info(m, intel_connector);
- }
+ edid ? edid->data : NULL, &intel_dp->aux);
}
static void intel_dp_mst_info(struct seq_file *m,
@@ -646,10 +652,6 @@ static void intel_hdmi_info(struct seq_file *m,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
- if (intel_connector->hdcp.shim) {
- seq_puts(m, "\tHDCP version: ");
- intel_hdcp_info(m, intel_connector);
- }
}
static void intel_lvds_info(struct seq_file *m,
@@ -705,6 +707,9 @@ static void intel_connector_info(struct seq_file *m,
break;
}
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+
seq_printf(m, "\tmodes:\n");
list_for_each_entry(mode, &connector->modes, head)
intel_seq_print_mode(m, 2, mode);
@@ -1066,10 +1071,18 @@ static void drrs_status_per_crtc(struct seq_file *m,
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ bool supported = false;
+
if (connector->state->crtc != &intel_crtc->base)
continue;
seq_printf(m, "%s:\n", connector->name);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ drrs->type == SEAMLESS_DRRS_SUPPORT)
+ supported = true;
+
+ seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
}
drm_connector_list_iter_end(&conn_iter);
@@ -1080,7 +1093,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
mutex_lock(&drrs->mutex);
/* DRRS Supported */
- seq_puts(m, "\tDRRS Supported: Yes\n");
+ seq_puts(m, "\tDRRS Enabled: Yes\n");
/* disable_drrs() will make drrs->dp NULL */
if (!drrs->dp) {
@@ -1115,7 +1128,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
mutex_unlock(&drrs->mutex);
} else {
/* DRRS not supported. Print the VBT parameter*/
- seq_puts(m, "\tDRRS Supported : No");
+ seq_puts(m, "\tDRRS Enabled : No");
}
seq_puts(m, "\n");
}
@@ -2026,10 +2039,6 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
if (connector->status != connector_status_connected)
return -ENODEV;
- /* HDCP is supported by connector */
- if (!intel_connector->hdcp.shim)
- return -EINVAL;
-
seq_printf(m, "%s:%d HDCP version: ", connector->name,
connector->base.id);
intel_hdcp_info(m, intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index e0fcb89c736b..7277e58b01f1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3927,12 +3927,13 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
int ret;
while (1) {
- u32 low_val = 0, high_val;
+ u32 low_val;
+ u32 high_val = 0;
if (block)
- high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
+ low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
else
- high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
+ low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
/*
* Spec states that we should timeout the request after 200us
@@ -3951,8 +3952,7 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
if (++tries == 3)
break;
- if (ret == -EAGAIN)
- msleep(1);
+ msleep(1);
}
if (ret)
@@ -5263,7 +5263,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
int config, i;
- if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
+ if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
/* Wa_1409767108: tgl */
table = wa_1409767108_buddy_page_masks;
else
@@ -5302,6 +5302,12 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ /* Wa_14011294188:ehl,jsl,tgl,rkl */
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
+ INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
+ intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
+ PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
+
/* 1. Enable PCH reset handshake. */
intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index e8f809161c75..3d4bf9b6a0a2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -28,6 +28,7 @@
#include <linux/async.h>
#include <linux/i2c.h>
+#include <linux/pwm.h>
#include <linux/sched/clock.h>
#include <drm/drm_atomic.h>
@@ -223,6 +224,7 @@ struct intel_panel {
bool util_pin_active_low; /* bxt+ */
u8 controller; /* bxt+ only */
struct pwm_device *pwm;
+ struct pwm_state pwm_state;
/* DPCD backlight */
u8 pwmgen_bit_count;
@@ -314,10 +316,12 @@ struct intel_hdcp_shim {
/* Enables HDCP signalling on the port */
int (*toggle_signalling)(struct intel_digital_port *dig_port,
+ enum transcoder cpu_transcoder,
bool enable);
/* Ensures the link is still protected */
- bool (*check_link)(struct intel_digital_port *dig_port);
+ bool (*check_link)(struct intel_digital_port *dig_port,
+ struct intel_connector *connector);
/* Detects panel's hdcp capability. This is optional for HDMI. */
int (*hdcp_capable)(struct intel_digital_port *dig_port,
@@ -479,8 +483,6 @@ struct intel_atomic_state {
bool dpll_set, modeset;
- u8 active_pipes;
-
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
/*
@@ -491,11 +493,6 @@ struct intel_atomic_state {
bool rps_interactive;
- /*
- * active_pipes
- */
- bool global_state_changed;
-
struct i915_sw_fence commit_ready;
struct llist_node freed;
@@ -931,6 +928,7 @@ struct intel_crtc_state {
bool has_psr;
bool has_psr2;
+ bool enable_psr2_sel_fetch;
u32 dc3co_exitline;
/*
@@ -1073,6 +1071,8 @@ struct intel_crtc_state {
/* For DSB related info */
struct intel_dsb *dsb;
+
+ u32 psr2_man_track_ctl;
};
enum intel_pipe_crc_source {
@@ -1272,6 +1272,7 @@ struct intel_dp {
u8 sink_count;
bool link_mst;
bool link_trained;
+ bool has_hdmi_sink;
bool has_audio;
bool reset_link_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -1373,8 +1374,19 @@ struct intel_dp {
/* Displayport compliance testing */
struct intel_dp_compliance compliance;
+ /* Downstream facing port caps */
+ struct {
+ int min_tmds_clock, max_tmds_clock;
+ int max_dotclock;
+ u8 max_bpc;
+ bool ycbcr_444_to_420;
+ } dfp;
+
/* Display stream compression testing */
bool force_dsc_en;
+
+ bool hobl_failed;
+ bool hobl_active;
};
enum lspcon_vendor {
@@ -1409,6 +1421,11 @@ struct intel_digital_port {
enum phy_fia tc_phy_fia;
u8 tc_phy_fia_idx;
+ /* protects num_hdcp_streams reference count */
+ struct mutex hdcp_mutex;
+ /* the number of pipes using HDCP signalling out of this port */
+ unsigned int num_hdcp_streams;
+
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
@@ -1519,6 +1536,18 @@ static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
}
}
+static inline bool intel_encoder_is_mst(struct intel_encoder *encoder)
+{
+ return encoder->type == INTEL_OUTPUT_DP_MST;
+}
+
+static inline struct intel_dp_mst_encoder *
+enc_to_mst(struct intel_encoder *encoder)
+{
+ return container_of(&encoder->base, struct intel_dp_mst_encoder,
+ base.base);
+}
+
static inline struct intel_digital_port *
enc_to_dig_port(struct intel_encoder *encoder)
{
@@ -1527,6 +1556,8 @@ enc_to_dig_port(struct intel_encoder *encoder)
if (intel_encoder_is_dig_port(intel_encoder))
return container_of(&encoder->base, struct intel_digital_port,
base.base);
+ else if (intel_encoder_is_mst(intel_encoder))
+ return enc_to_mst(encoder)->primary;
else
return NULL;
}
@@ -1537,13 +1568,6 @@ intel_attached_dig_port(struct intel_connector *connector)
return enc_to_dig_port(intel_attached_encoder(connector));
}
-static inline struct intel_dp_mst_encoder *
-enc_to_mst(struct intel_encoder *encoder)
-{
- return container_of(&encoder->base, struct intel_dp_mst_encoder,
- base.base);
-}
-
static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index d6295eb20b63..bf1e9cf1c0f3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -38,7 +38,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_hdcp.h>
#include <drm/drm_probe_helper.h>
#include "i915_debugfs.h"
@@ -248,29 +247,6 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
return max_link_clock * max_lanes;
}
-static int
-intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int max_dotclk = dev_priv->max_dotclk_freq;
- int ds_max_dotclk;
-
- int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
-
- if (type != DP_DS_PORT_TYPE_VGA)
- return max_dotclk;
-
- ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
- intel_dp->downstream_ports);
-
- if (ds_max_dotclk != 0)
- max_dotclk = min(max_dotclk, ds_max_dotclk);
-
- return max_dotclk;
-}
-
static int cnl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -636,6 +612,34 @@ static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
}
static enum drm_mode_status
+intel_dp_mode_valid_downstream(struct intel_connector *connector,
+ const struct drm_display_mode *mode,
+ int target_clock)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ const struct drm_display_info *info = &connector->base.display_info;
+ int tmds_clock;
+
+ if (intel_dp->dfp.max_dotclock &&
+ target_clock > intel_dp->dfp.max_dotclock)
+ return MODE_CLOCK_HIGH;
+
+ /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
+ tmds_clock = target_clock;
+ if (drm_mode_is_420_only(info, mode))
+ tmds_clock /= 2;
+
+ if (intel_dp->dfp.min_tmds_clock &&
+ tmds_clock < intel_dp->dfp.min_tmds_clock)
+ return MODE_CLOCK_LOW;
+ if (intel_dp->dfp.max_tmds_clock &&
+ tmds_clock > intel_dp->dfp.max_tmds_clock)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -645,15 +649,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_i915_private *dev_priv = to_i915(connector->dev);
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
- int max_dotclk;
+ int max_dotclk = dev_priv->max_dotclk_freq;
u16 dsc_max_output_bpp = 0;
u8 dsc_slice_count = 0;
+ enum drm_mode_status status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
-
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@@ -709,6 +712,11 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
+ status = intel_dp_mode_valid_downstream(intel_connector,
+ mode, target_clock);
+ if (status != MODE_OK)
+ return status;
+
return intel_mode_valid_max_plane_size(dev_priv, mode);
}
@@ -1563,6 +1571,20 @@ intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
txbuf[3] = msg->size - 1;
}
+static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
+{
+ /*
+ * If we're trying to send the HDCP Aksv, we need to set a the Aksv
+ * select bit to inform the hardware to send the Aksv after our header
+ * since we can't access that data from software.
+ */
+ if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
+ msg->address == DP_AUX_HDCP_AKSV)
+ return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
+
+ return 0;
+}
+
static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
@@ -1570,6 +1592,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 txbuf[20], rxbuf[20];
size_t txsize, rxsize;
+ u32 flags = intel_dp_aux_xfer_flags(msg);
int ret;
intel_dp_aux_header(txbuf, msg);
@@ -1590,7 +1613,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
- rxbuf, rxsize, 0);
+ rxbuf, rxsize, flags);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
@@ -1613,7 +1636,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return -E2BIG;
ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
- rxbuf, rxsize, 0);
+ rxbuf, rxsize, flags);
if (ret > 0) {
msg->reply = rxbuf[0] >> 4;
/*
@@ -1954,19 +1977,72 @@ static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
}
-static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config)
+static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+ intel_dp->dfp.ycbcr_444_to_420);
+}
+
+static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state, int bpc)
+{
+ int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
+
+ if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
+ clock /= 2;
+
+ return clock;
+}
+
+static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state, int bpc)
+{
+ int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
+
+ if (intel_dp->dfp.min_tmds_clock &&
+ tmds_clock < intel_dp->dfp.min_tmds_clock)
+ return false;
+
+ if (intel_dp->dfp.max_tmds_clock &&
+ tmds_clock > intel_dp->dfp.max_tmds_clock)
+ return false;
+
+ return true;
+}
+
+static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int bpc)
+{
+
+ return intel_hdmi_deep_color_possible(crtc_state, bpc,
+ intel_dp->has_hdmi_sink,
+ intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
+ intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
+}
+
+static int intel_dp_max_bpp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_connector *intel_connector = intel_dp->attached_connector;
int bpp, bpc;
- bpp = pipe_config->pipe_bpp;
- bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
+ bpc = crtc_state->pipe_bpp / 3;
+
+ if (intel_dp->dfp.max_bpc)
+ bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
- if (bpc > 0)
- bpp = min(bpp, 3*bpc);
+ if (intel_dp->dfp.min_tmds_clock) {
+ for (; bpc >= 10; bpc -= 2) {
+ if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc))
+ break;
+ }
+ }
+ bpp = bpc * 3;
if (intel_dp_is_edp(intel_dp)) {
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
@@ -2288,7 +2364,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
limits.min_bpp = intel_dp_min_bpp(pipe_config);
- limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
+ limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
if (intel_dp_is_edp(intel_dp)) {
/*
@@ -2363,10 +2439,16 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (!drm_mode_is_420_only(info, adjusted_mode) ||
- !intel_dp_get_colorimetry_status(intel_dp) ||
- !connector->ycbcr_420_allowed)
+ if (!connector->ycbcr_420_allowed)
+ return 0;
+
+ if (!drm_mode_is_420_only(info, adjusted_mode))
+ return 0;
+
+ if (intel_dp->dfp.ycbcr_444_to_420) {
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
return 0;
+ }
crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
@@ -2575,6 +2657,34 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
}
+static void
+intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ int output_bpp, bool constant_n)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ /*
+ * DRRS and PSR can't be enable together, so giving preference to PSR
+ * as it allows more power-savings by complete shutting down display,
+ * so to guarantee this, intel_dp_drrs_compute_config() must be called
+ * after intel_psr_compute_config().
+ */
+ if (pipe_config->has_psr)
+ return;
+
+ if (!intel_connector->panel.downclock_mode ||
+ dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
+ return;
+
+ pipe_config->has_drrs = true;
+ intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
+ intel_connector->panel.downclock_mode->clock,
+ pipe_config->port_clock, &pipe_config->dp_m2_n2,
+ constant_n, pipe_config->fec_enable);
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2605,7 +2715,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- pipe_config->has_drrs = false;
if (!intel_dp_port_has_audio(dev_priv, port))
pipe_config->has_audio = false;
else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
@@ -2657,21 +2766,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
&pipe_config->dp_m_n,
constant_n, pipe_config->fec_enable);
- if (intel_connector->panel.downclock_mode != NULL &&
- dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
- pipe_config->has_drrs = true;
- intel_link_compute_m_n(output_bpp,
- pipe_config->lane_count,
- intel_connector->panel.downclock_mode->clock,
- pipe_config->port_clock,
- &pipe_config->dp_m2_n2,
- constant_n, pipe_config->fec_enable);
- }
-
if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config);
+ intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
+ constant_n);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
@@ -3752,6 +3852,43 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 tmp;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
+ return;
+
+ if (!drm_dp_is_branch(intel_dp->dpcd))
+ return;
+
+ tmp = intel_dp->has_hdmi_sink ?
+ DP_HDMI_DVI_OUTPUT_CONFIG : 0;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
+ drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
+ enableddisabled(intel_dp->has_hdmi_sink));
+
+ tmp = intel_dp->dfp.ycbcr_444_to_420 ?
+ DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
+ drm_dbg_kms(&i915->drm,
+ "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n",
+ enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
+
+ tmp = 0;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n",
+ enableddisabled(false));
+}
+
static void intel_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
@@ -3789,6 +3926,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
}
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_configure_protocol_converter(intel_dp);
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
@@ -4449,62 +4587,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
}
}
-static void
-intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 dpcd_ext[6];
-
- /*
- * Prior to DP1.3 the bit represented by
- * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
- * if it is set DP_DPCD_REV at 0000h could be at a value less than
- * the true capability of the panel. The only way to check is to
- * then compare 0000h and 2200h.
- */
- if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
- return;
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
- &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
- drm_err(&i915->drm,
- "DPCD failed read at extended capabilities\n");
- return;
- }
-
- if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
- drm_dbg_kms(&i915->drm,
- "DPCD extended DPCD rev less than base DPCD rev\n");
- return;
- }
-
- if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
- return;
-
- drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n",
- (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
-
- memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
-}
-
-bool
-intel_dp_read_dpcd(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
- sizeof(intel_dp->dpcd)) < 0)
- return false; /* aux transfer failed */
-
- intel_dp_extended_receiver_capabilities(intel_dp);
-
- drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd),
- intel_dp->dpcd);
-
- return intel_dp->dpcd[DP_DPCD_REV] != 0;
-}
-
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
u8 dprx = 0;
@@ -4563,7 +4645,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
/* this function is meant to be called only once */
drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
- if (!intel_dp_read_dpcd(intel_dp))
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
return false;
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
@@ -4634,11 +4716,23 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
return true;
}
+static bool
+intel_dp_has_sink_count(struct intel_dp *intel_dp)
+{
+ if (!intel_dp->attached_connector)
+ return false;
+
+ return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
+ intel_dp->dpcd,
+ &intel_dp->desc);
+}
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
- if (!intel_dp_read_dpcd(intel_dp))
+ int ret;
+
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
return false;
/*
@@ -4653,18 +4747,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_dp_set_common_rates(intel_dp);
}
- /*
- * Some eDP panels do not set a valid value for sink count, that is why
- * it don't care about read it here and in intel_edp_init_dpcd().
- */
- if (!intel_dp_is_edp(intel_dp) &&
- !drm_dp_has_quirk(&intel_dp->desc, 0,
- DP_DPCD_QUIRK_NO_SINK_COUNT)) {
- u8 count;
- ssize_t r;
-
- r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
- if (r < 1)
+ if (intel_dp_has_sink_count(intel_dp)) {
+ ret = drm_dp_read_sink_count(&intel_dp->aux);
+ if (ret < 0)
return false;
/*
@@ -4672,7 +4757,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* a member variable in intel_dp will track any changes
* between short pulse interrupts.
*/
- intel_dp->sink_count = DP_GET_SINK_COUNT(count);
+ intel_dp->sink_count = ret;
/*
* SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
@@ -4685,32 +4770,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return false;
}
- if (!drm_dp_is_branch(intel_dp->dpcd))
- return true; /* native DP sink */
-
- if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
- return true; /* no per-port downstream info */
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
- intel_dp->downstream_ports,
- DP_MAX_DOWNSTREAM_PORTS) < 0)
- return false; /* downstream port status fetch failed */
-
- return true;
-}
-
-static bool
-intel_dp_sink_can_mst(struct intel_dp *intel_dp)
-{
- u8 mstm_cap;
-
- if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
- return false;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
- return false;
-
- return mstm_cap & DP_MST_CAP;
+ return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
+ intel_dp->downstream_ports) == 0;
}
static bool
@@ -4720,7 +4781,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
return i915->params.enable_dp_mst &&
intel_dp->can_mst &&
- intel_dp_sink_can_mst(intel_dp);
+ drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
}
static void
@@ -4729,7 +4790,7 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_encoder *encoder =
&dp_to_dig_port(intel_dp)->base;
- bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
+ bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
@@ -5963,9 +6024,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
return connector_status_connected;
/* If we're HPD-aware, SINK_COUNT changes dynamically */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ if (intel_dp_has_sink_count(intel_dp) &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
-
return intel_dp->sink_count ?
connector_status_connected : connector_status_disconnected;
}
@@ -6106,16 +6166,103 @@ intel_dp_get_edid(struct intel_dp *intel_dp)
}
static void
+intel_dp_update_dfp(struct intel_dp *intel_dp,
+ const struct edid *edid)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ intel_dp->dfp.max_bpc =
+ drm_dp_downstream_max_bpc(intel_dp->dpcd,
+ intel_dp->downstream_ports, edid);
+
+ intel_dp->dfp.max_dotclock =
+ drm_dp_downstream_max_dotclock(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+
+ intel_dp->dfp.min_tmds_clock =
+ drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ edid);
+ intel_dp->dfp.max_tmds_clock =
+ drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ edid);
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n",
+ connector->base.base.id, connector->base.name,
+ intel_dp->dfp.max_bpc,
+ intel_dp->dfp.max_dotclock,
+ intel_dp->dfp.min_tmds_clock,
+ intel_dp->dfp.max_tmds_clock);
+}
+
+static void
+intel_dp_update_420(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420;
+
+ /* No YCbCr output support on gmch platforms */
+ if (HAS_GMCH(i915))
+ return;
+
+ /*
+ * ILK doesn't seem capable of DP YCbCr output. The
+ * displayed image is severly corrupted. SNB+ is fine.
+ */
+ if (IS_GEN(i915, 5))
+ return;
+
+ is_branch = drm_dp_is_branch(intel_dp->dpcd);
+ ycbcr_420_passthrough =
+ drm_dp_downstream_420_passthrough(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+ ycbcr_444_to_420 =
+ drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports);
+
+ if (INTEL_GEN(i915) >= 11) {
+ /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
+ intel_dp->dfp.ycbcr_444_to_420 =
+ ycbcr_444_to_420 && !ycbcr_420_passthrough;
+
+ connector->base.ycbcr_420_allowed =
+ !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
+ } else {
+ /* 4:4:4->4:2:0 conversion is the only way */
+ intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
+
+ connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
+ connector->base.base.id, connector->base.name,
+ yesno(connector->base.ycbcr_420_allowed),
+ yesno(intel_dp->dfp.ycbcr_444_to_420));
+}
+
+static void
intel_dp_set_edid(struct intel_dp *intel_dp)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_connector *connector = intel_dp->attached_connector;
struct edid *edid;
intel_dp_unset_edid(intel_dp);
edid = intel_dp_get_edid(intel_dp);
- intel_connector->detect_edid = edid;
+ connector->detect_edid = edid;
+
+ intel_dp_update_dfp(intel_dp, edid);
+ intel_dp_update_420(intel_dp);
+
+ if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
+ intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ intel_dp->has_audio = drm_detect_monitor_audio(edid);
+ }
- intel_dp->has_audio = drm_detect_monitor_audio(edid);
drm_dp_cec_set_edid(&intel_dp->aux, edid);
intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
}
@@ -6123,14 +6270,23 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
static void
intel_dp_unset_edid(struct intel_dp *intel_dp)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_connector *connector = intel_dp->attached_connector;
drm_dp_cec_unset_edid(&intel_dp->aux);
- kfree(intel_connector->detect_edid);
- intel_connector->detect_edid = NULL;
+ kfree(connector->detect_edid);
+ connector->detect_edid = NULL;
+ intel_dp->has_hdmi_sink = false;
intel_dp->has_audio = false;
intel_dp->edid_quirks = 0;
+
+ intel_dp->dfp.max_bpc = 0;
+ intel_dp->dfp.max_dotclock = 0;
+ intel_dp->dfp.min_tmds_clock = 0;
+ intel_dp->dfp.max_tmds_clock = 0;
+
+ intel_dp->dfp.ycbcr_444_to_420 = false;
+ connector->base.ycbcr_420_allowed = false;
}
static int
@@ -6149,6 +6305,9 @@ intel_dp_detect(struct drm_connector *connector,
drm_WARN_ON(&dev_priv->drm,
!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ if (!INTEL_DISPLAY_ENABLED(dev_priv))
+ return connector_status_disconnected;
+
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
@@ -6243,6 +6402,11 @@ out:
*/
intel_display_power_flush_work(dev_priv);
+ if (!intel_dp_is_edp(intel_dp))
+ drm_dp_set_subconnector_property(connector,
+ status,
+ intel_dp->dpcd,
+ intel_dp->downstream_ports);
return status;
}
@@ -6284,7 +6448,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, fall back to fixed mode */
- if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
+ if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
@@ -6296,6 +6460,19 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
}
+ if (!edid) {
+ struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
+ struct drm_display_mode *mode;
+
+ mode = drm_dp_downstream_mode(connector->dev,
+ intel_dp->dpcd,
+ intel_dp->downstream_ports);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+ }
+
return 0;
}
@@ -6381,628 +6558,6 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
edp_panel_vdd_off_sync(intel_dp);
}
-static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
-{
- long ret;
-
-#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
- ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
- msecs_to_jiffies(timeout));
-
- if (!ret)
- DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
-}
-
-static
-int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
- u8 *an)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base));
- static const struct drm_dp_aux_msg msg = {
- .request = DP_AUX_NATIVE_WRITE,
- .address = DP_AUX_HDCP_AKSV,
- .size = DRM_HDCP_KSV_LEN,
- };
- u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
- ssize_t dpcd_ret;
- int ret;
-
- /* Output An first, that's easy */
- dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN,
- an, DRM_HDCP_AN_LEN);
- if (dpcd_ret != DRM_HDCP_AN_LEN) {
- drm_dbg_kms(&i915->drm,
- "Failed to write An over DP/AUX (%zd)\n",
- dpcd_ret);
- return dpcd_ret >= 0 ? -EIO : dpcd_ret;
- }
-
- /*
- * Since Aksv is Oh-So-Secret, we can't access it in software. So in
- * order to get it on the wire, we need to create the AUX header as if
- * we were writing the data, and then tickle the hardware to output the
- * data once the header is sent out.
- */
- intel_dp_aux_header(txbuf, &msg);
-
- ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
- rxbuf, sizeof(rxbuf),
- DP_AUX_CH_CTL_AUX_AKSV_SELECT);
- if (ret < 0) {
- drm_dbg_kms(&i915->drm,
- "Write Aksv over DP/AUX failed (%d)\n", ret);
- return ret;
- } else if (ret == 0) {
- drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n");
- return -EIO;
- }
-
- reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
- if (reply != DP_AUX_NATIVE_REPLY_ACK) {
- drm_dbg_kms(&i915->drm,
- "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
- reply);
- return -EIO;
- }
- return 0;
-}
-
-static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
- u8 *bksv)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- ssize_t ret;
-
- ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
- DRM_HDCP_KSV_LEN);
- if (ret != DRM_HDCP_KSV_LEN) {
- drm_dbg_kms(&i915->drm,
- "Read Bksv from DP/AUX failed (%zd)\n", ret);
- return ret >= 0 ? -EIO : ret;
- }
- return 0;
-}
-
-static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
- u8 *bstatus)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- ssize_t ret;
-
- /*
- * For some reason the HDMI and DP HDCP specs call this register
- * definition by different names. In the HDMI spec, it's called BSTATUS,
- * but in DP it's called BINFO.
- */
- ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO,
- bstatus, DRM_HDCP_BSTATUS_LEN);
- if (ret != DRM_HDCP_BSTATUS_LEN) {
- drm_dbg_kms(&i915->drm,
- "Read bstatus from DP/AUX failed (%zd)\n", ret);
- return ret >= 0 ? -EIO : ret;
- }
- return 0;
-}
-
-static
-int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
- u8 *bcaps)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- ssize_t ret;
-
- ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
- bcaps, 1);
- if (ret != 1) {
- drm_dbg_kms(&i915->drm,
- "Read bcaps from DP/AUX failed (%zd)\n", ret);
- return ret >= 0 ? -EIO : ret;
- }
-
- return 0;
-}
-
-static
-int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
- bool *repeater_present)
-{
- ssize_t ret;
- u8 bcaps;
-
- ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
- if (ret)
- return ret;
-
- *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
- return 0;
-}
-
-static
-int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
- u8 *ri_prime)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- ssize_t ret;
-
- ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
- ri_prime, DRM_HDCP_RI_LEN);
- if (ret != DRM_HDCP_RI_LEN) {
- drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
- ret);
- return ret >= 0 ? -EIO : ret;
- }
- return 0;
-}
-
-static
-int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
- bool *ksv_ready)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- ssize_t ret;
- u8 bstatus;
-
- ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
- &bstatus, 1);
- if (ret != 1) {
- drm_dbg_kms(&i915->drm,
- "Read bstatus from DP/AUX failed (%zd)\n", ret);
- return ret >= 0 ? -EIO : ret;
- }
- *ksv_ready = bstatus & DP_BSTATUS_READY;
- return 0;
-}
-
-static
-int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
- int num_downstream, u8 *ksv_fifo)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- ssize_t ret;
- int i;
-
- /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
- for (i = 0; i < num_downstream; i += 3) {
- size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
- ret = drm_dp_dpcd_read(&dig_port->dp.aux,
- DP_AUX_HDCP_KSV_FIFO,
- ksv_fifo + i * DRM_HDCP_KSV_LEN,
- len);
- if (ret != len) {
- drm_dbg_kms(&i915->drm,
- "Read ksv[%d] from DP/AUX failed (%zd)\n",
- i, ret);
- return ret >= 0 ? -EIO : ret;
- }
- }
- return 0;
-}
-
-static
-int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
- int i, u32 *part)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- ssize_t ret;
-
- if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
- return -EINVAL;
-
- ret = drm_dp_dpcd_read(&dig_port->dp.aux,
- DP_AUX_HDCP_V_PRIME(i), part,
- DRM_HDCP_V_PRIME_PART_LEN);
- if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
- drm_dbg_kms(&i915->drm,
- "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
- return ret >= 0 ? -EIO : ret;
- }
- return 0;
-}
-
-static
-int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
- bool enable)
-{
- /* Not used for single stream DisplayPort setups */
- return 0;
-}
-
-static
-bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- ssize_t ret;
- u8 bstatus;
-
- ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
- &bstatus, 1);
- if (ret != 1) {
- drm_dbg_kms(&i915->drm,
- "Read bstatus from DP/AUX failed (%zd)\n", ret);
- return false;
- }
-
- return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
-}
-
-static
-int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
- bool *hdcp_capable)
-{
- ssize_t ret;
- u8 bcaps;
-
- ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
- if (ret)
- return ret;
-
- *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
- return 0;
-}
-
-struct hdcp2_dp_errata_stream_type {
- u8 msg_id;
- u8 stream_type;
-} __packed;
-
-struct hdcp2_dp_msg_data {
- u8 msg_id;
- u32 offset;
- bool msg_detectable;
- u32 timeout;
- u32 timeout2; /* Added for non_paired situation */
-};
-
-static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
- { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
- { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
- false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
- { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
- false, 0, 0 },
- { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
- false, 0, 0 },
- { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
- true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
- HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
- { HDCP_2_2_AKE_SEND_PAIRING_INFO,
- DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
- HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
- { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
- { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
- false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
- { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
- 0, 0 },
- { HDCP_2_2_REP_SEND_RECVID_LIST,
- DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
- HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
- { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
- 0, 0 },
- { HDCP_2_2_REP_STREAM_MANAGE,
- DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
- 0, 0 },
- { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
- false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
-/* local define to shovel this through the write_2_2 interface */
-#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
- { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
- DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
- 0, 0 },
-};
-
-static int
-intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
- u8 *rx_status)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- ssize_t ret;
-
- ret = drm_dp_dpcd_read(&dig_port->dp.aux,
- DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
- HDCP_2_2_DP_RXSTATUS_LEN);
- if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
- drm_dbg_kms(&i915->drm,
- "Read bstatus from DP/AUX failed (%zd)\n", ret);
- return ret >= 0 ? -EIO : ret;
- }
-
- return 0;
-}
-
-static
-int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
- u8 msg_id, bool *msg_ready)
-{
- u8 rx_status;
- int ret;
-
- *msg_ready = false;
- ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
- if (ret < 0)
- return ret;
-
- switch (msg_id) {
- case HDCP_2_2_AKE_SEND_HPRIME:
- if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
- *msg_ready = true;
- break;
- case HDCP_2_2_AKE_SEND_PAIRING_INFO:
- if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
- *msg_ready = true;
- break;
- case HDCP_2_2_REP_SEND_RECVID_LIST:
- if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
- *msg_ready = true;
- break;
- default:
- DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static ssize_t
-intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
- const struct hdcp2_dp_msg_data *hdcp2_msg_data)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_dp *dp = &dig_port->dp;
- struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
- u8 msg_id = hdcp2_msg_data->msg_id;
- int ret, timeout;
- bool msg_ready = false;
-
- if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
- timeout = hdcp2_msg_data->timeout2;
- else
- timeout = hdcp2_msg_data->timeout;
-
- /*
- * There is no way to detect the CERT, LPRIME and STREAM_READY
- * availability. So Wait for timeout and read the msg.
- */
- if (!hdcp2_msg_data->msg_detectable) {
- mdelay(timeout);
- ret = 0;
- } else {
- /*
- * As we want to check the msg availability at timeout, Ignoring
- * the timeout at wait for CP_IRQ.
- */
- intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
- ret = hdcp2_detect_msg_availability(dig_port,
- msg_id, &msg_ready);
- if (!msg_ready)
- ret = -ETIMEDOUT;
- }
-
- if (ret)
- drm_dbg_kms(&i915->drm,
- "msg_id %d, ret %d, timeout(mSec): %d\n",
- hdcp2_msg_data->msg_id, ret, timeout);
-
- return ret;
-}
-
-static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
- if (hdcp2_dp_msg_data[i].msg_id == msg_id)
- return &hdcp2_dp_msg_data[i];
-
- return NULL;
-}
-
-static
-int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
- void *buf, size_t size)
-{
- struct intel_dp *dp = &dig_port->dp;
- struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
- unsigned int offset;
- u8 *byte = buf;
- ssize_t ret, bytes_to_write, len;
- const struct hdcp2_dp_msg_data *hdcp2_msg_data;
-
- hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
- if (!hdcp2_msg_data)
- return -EINVAL;
-
- offset = hdcp2_msg_data->offset;
-
- /* No msg_id in DP HDCP2.2 msgs */
- bytes_to_write = size - 1;
- byte++;
-
- hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
-
- while (bytes_to_write) {
- len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
- DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
-
- ret = drm_dp_dpcd_write(&dig_port->dp.aux,
- offset, (void *)byte, len);
- if (ret < 0)
- return ret;
-
- bytes_to_write -= ret;
- byte += ret;
- offset += ret;
- }
-
- return size;
-}
-
-static
-ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
-{
- u8 rx_info[HDCP_2_2_RXINFO_LEN];
- u32 dev_cnt;
- ssize_t ret;
-
- ret = drm_dp_dpcd_read(&dig_port->dp.aux,
- DP_HDCP_2_2_REG_RXINFO_OFFSET,
- (void *)rx_info, HDCP_2_2_RXINFO_LEN);
- if (ret != HDCP_2_2_RXINFO_LEN)
- return ret >= 0 ? -EIO : ret;
-
- dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
- HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
-
- if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
- dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
-
- ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
- HDCP_2_2_RECEIVER_IDS_MAX_LEN +
- (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
-
- return ret;
-}
-
-static
-int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
- u8 msg_id, void *buf, size_t size)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- unsigned int offset;
- u8 *byte = buf;
- ssize_t ret, bytes_to_recv, len;
- const struct hdcp2_dp_msg_data *hdcp2_msg_data;
-
- hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
- if (!hdcp2_msg_data)
- return -EINVAL;
- offset = hdcp2_msg_data->offset;
-
- ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data);
- if (ret < 0)
- return ret;
-
- if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
- ret = get_receiver_id_list_size(dig_port);
- if (ret < 0)
- return ret;
-
- size = ret;
- }
- bytes_to_recv = size - 1;
-
- /* DP adaptation msgs has no msg_id */
- byte++;
-
- while (bytes_to_recv) {
- len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
- DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
-
- ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset,
- (void *)byte, len);
- if (ret < 0) {
- drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
- msg_id, ret);
- return ret;
- }
-
- bytes_to_recv -= ret;
- byte += ret;
- offset += ret;
- }
- byte = buf;
- *byte = msg_id;
-
- return size;
-}
-
-static
-int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
- bool is_repeater, u8 content_type)
-{
- int ret;
- struct hdcp2_dp_errata_stream_type stream_type_msg;
-
- if (is_repeater)
- return 0;
-
- /*
- * Errata for DP: As Stream type is used for encryption, Receiver
- * should be communicated with stream type for the decryption of the
- * content.
- * Repeater will be communicated with stream type as a part of it's
- * auth later in time.
- */
- stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
- stream_type_msg.stream_type = content_type;
-
- ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg,
- sizeof(stream_type_msg));
-
- return ret < 0 ? ret : 0;
-
-}
-
-static
-int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
-{
- u8 rx_status;
- int ret;
-
- ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
- if (ret)
- return ret;
-
- if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
- ret = HDCP_REAUTH_REQUEST;
- else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
- ret = HDCP_LINK_INTEGRITY_FAILURE;
- else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
- ret = HDCP_TOPOLOGY_CHANGE;
-
- return ret;
-}
-
-static
-int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
- bool *capable)
-{
- u8 rx_caps[3];
- int ret;
-
- *capable = false;
- ret = drm_dp_dpcd_read(&dig_port->dp.aux,
- DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
- rx_caps, HDCP_2_2_RXCAPS_LEN);
- if (ret != HDCP_2_2_RXCAPS_LEN)
- return ret >= 0 ? -EIO : ret;
-
- if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
- HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
- *capable = true;
-
- return 0;
-}
-
-static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
- .write_an_aksv = intel_dp_hdcp_write_an_aksv,
- .read_bksv = intel_dp_hdcp_read_bksv,
- .read_bstatus = intel_dp_hdcp_read_bstatus,
- .repeater_present = intel_dp_hdcp_repeater_present,
- .read_ri_prime = intel_dp_hdcp_read_ri_prime,
- .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
- .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
- .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
- .toggle_signalling = intel_dp_hdcp_toggle_signalling,
- .check_link = intel_dp_hdcp_check_link,
- .hdcp_capable = intel_dp_hdcp_capable,
- .write_2_2_msg = intel_dp_hdcp2_write_msg,
- .read_2_2_msg = intel_dp_hdcp2_read_msg,
- .config_stream_type = intel_dp_hdcp2_config_stream_type,
- .check_2_2_link = intel_dp_hdcp2_check_link,
- .hdcp_2_2_capable = intel_dp_hdcp2_capable,
- .protocol = HDCP_PROTOCOL_DP,
-};
-
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -7312,6 +6867,9 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
struct drm_i915_private *dev_priv = to_i915(connector->dev);
enum port port = dp_to_dig_port(intel_dp)->base.port;
+ if (!intel_dp_is_edp(intel_dp))
+ drm_connector_attach_dp_subconnector_property(connector);
+
if (!IS_G4X(dev_priv) && port != PORT_A)
intel_attach_force_audio_property(connector);
@@ -7710,6 +7268,15 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
refresh_rate);
}
+static void
+intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ dev_priv->drrs.busy_frontbuffer_bits = 0;
+ dev_priv->drrs.dp = intel_dp;
+}
+
/**
* intel_edp_drrs_enable - init drrs struct if supported
* @intel_dp: DP struct
@@ -7722,31 +7289,40 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!crtc_state->has_drrs) {
- drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n");
+ if (!crtc_state->has_drrs)
return;
- }
- if (dev_priv->psr.enabled) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR enabled. Not enabling DRRS.\n");
- return;
- }
+ drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
mutex_lock(&dev_priv->drrs.mutex);
+
if (dev_priv->drrs.dp) {
- drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n");
+ drm_warn(&dev_priv->drm, "DRRS already enabled\n");
goto unlock;
}
- dev_priv->drrs.busy_frontbuffer_bits = 0;
-
- dev_priv->drrs.dp = intel_dp;
+ intel_edp_drrs_enable_locked(intel_dp);
unlock:
mutex_unlock(&dev_priv->drrs.mutex);
}
+static void
+intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
+ int refresh;
+
+ refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
+ intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
+ }
+
+ dev_priv->drrs.dp = NULL;
+}
+
/**
* intel_edp_drrs_disable - Disable DRRS
* @intel_dp: DP struct
@@ -7767,16 +7343,45 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp,
return;
}
- if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv, old_crtc_state,
- drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
-
- dev_priv->drrs.dp = NULL;
+ intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
mutex_unlock(&dev_priv->drrs.mutex);
cancel_delayed_work_sync(&dev_priv->drrs.work);
}
+/**
+ * intel_edp_drrs_update - Update DRRS state
+ * @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
+ *
+ * This function will update DRRS states, disabling or enabling DRRS when
+ * executing fastsets. For full modeset, intel_edp_drrs_disable() and
+ * intel_edp_drrs_enable() should be called instead.
+ */
+void
+intel_edp_drrs_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ /* New state matches current one? */
+ if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
+ goto unlock;
+
+ if (crtc_state->has_drrs)
+ intel_edp_drrs_enable_locked(intel_dp);
+ else
+ intel_edp_drrs_disable_locked(intel_dp, crtc_state);
+
+unlock:
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
static void intel_edp_drrs_downclock_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -8208,10 +7813,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- if (INTEL_GEN(dev_priv) >= 11)
- connector->ycbcr_420_allowed = true;
-
- intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_dp_aux_init(intel_dp);
@@ -8236,7 +7837,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_add_properties(intel_dp, connector);
if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
- int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
+ int ret = intel_dp_init_hdcp(dig_port, intel_connector);
if (ret)
drm_dbg_kms(&dev_priv->drm,
"HDCP init failed, skipping.\n");
@@ -8280,6 +7881,8 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder = &dig_port->base;
encoder = &intel_encoder->base;
+ mutex_init(&dig_port->hdcp_mutex);
+
if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
&intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
"DP %c", port_name(port)))
@@ -8354,6 +7957,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
}
intel_encoder->cloneable = 0;
intel_encoder->port = port;
+ intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
dig_port->hpd_pulse = intel_dp_hpd_pulse;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index b901ab850cbd..08a1c0aa8b94 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -17,6 +17,7 @@ struct drm_encoder;
struct drm_i915_private;
struct drm_modeset_acquire_ctx;
struct drm_dp_vsc_sdp;
+struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_digital_port;
@@ -50,6 +51,7 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable);
@@ -81,6 +83,8 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
@@ -99,7 +103,6 @@ bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
-bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
@@ -128,4 +131,12 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+void intel_ddi_update_pipe(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+
+int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
+ struct intel_connector *intel_connector);
+
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index acbd7eb66cbe..036f504ac7db 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -52,17 +52,11 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
}
}
-/*
- * Read the current backlight value from DPCD register(s) based
- * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
- */
-static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
+static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 read_val[2] = { 0x0 };
u8 mode_reg;
- u16 level = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
@@ -70,15 +64,29 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
drm_dbg_kms(&i915->drm,
"Failed to read the DPCD register 0x%x\n",
DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
- return 0;
+ return false;
}
+ return (mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
+ DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
+}
+
+/*
+ * Read the current backlight value from DPCD register(s) based
+ * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
+ */
+static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 read_val[2] = { 0x0 };
+ u16 level = 0;
+
/*
* If we're not in DPCD control mode yet, the programmed brightness
* value is meaningless and we should assume max brightness
*/
- if ((mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) !=
- DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD)
+ if (!intel_dp_aux_backlight_dpcd_mode(connector))
return connector->panel.backlight.max;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
@@ -319,7 +327,8 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
panel->backlight.min = 0;
panel->backlight.level = intel_dp_aux_get_backlight(connector);
- panel->backlight.enabled = panel->backlight.level != 0;
+ panel->backlight.enabled = intel_dp_aux_backlight_dpcd_mode(connector) &&
+ panel->backlight.level != 0;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
new file mode 100644
index 000000000000..03424d20e9f7
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_hdcp.h>
+#include <drm/drm_print.h>
+
+#include "intel_display_types.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
+#include "intel_hdcp.h"
+
+static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
+{
+ long ret;
+
+#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
+ ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
+ msecs_to_jiffies(timeout));
+
+ if (!ret)
+ DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
+}
+
+static
+int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
+ u8 *an)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ u8 aksv[DRM_HDCP_KSV_LEN] = {};
+ ssize_t dpcd_ret;
+
+ /* Output An first, that's easy */
+ dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN,
+ an, DRM_HDCP_AN_LEN);
+ if (dpcd_ret != DRM_HDCP_AN_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Failed to write An over DP/AUX (%zd)\n",
+ dpcd_ret);
+ return dpcd_ret >= 0 ? -EIO : dpcd_ret;
+ }
+
+ /*
+ * Since Aksv is Oh-So-Secret, we can't access it in software. So we
+ * send an empty buffer of the correct length through the DP helpers. On
+ * the other side, in the transfer hook, we'll generate a flag based on
+ * the destination address which will tickle the hardware to output the
+ * Aksv on our behalf after the header is sent.
+ */
+ dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AKSV,
+ aksv, DRM_HDCP_KSV_LEN);
+ if (dpcd_ret != DRM_HDCP_KSV_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Failed to write Aksv over DP/AUX (%zd)\n",
+ dpcd_ret);
+ return dpcd_ret >= 0 ? -EIO : dpcd_ret;
+ }
+ return 0;
+}
+
+static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
+ u8 *bksv)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
+ DRM_HDCP_KSV_LEN);
+ if (ret != DRM_HDCP_KSV_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Read Bksv from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
+ u8 *bstatus)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ /*
+ * For some reason the HDMI and DP HDCP specs call this register
+ * definition by different names. In the HDMI spec, it's called BSTATUS,
+ * but in DP it's called BINFO.
+ */
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO,
+ bstatus, DRM_HDCP_BSTATUS_LEN);
+ if (ret != DRM_HDCP_BSTATUS_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
+ u8 *bcaps)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
+ bcaps, 1);
+ if (ret != 1) {
+ drm_dbg_kms(&i915->drm,
+ "Read bcaps from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static
+int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
+ bool *repeater_present)
+{
+ ssize_t ret;
+ u8 bcaps;
+
+ ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
+ if (ret)
+ return ret;
+
+ *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
+ u8 *ri_prime)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
+ ri_prime, DRM_HDCP_RI_LEN);
+ if (ret != DRM_HDCP_RI_LEN) {
+ drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
+ ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
+ bool *ksv_ready)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+ u8 bstatus;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ &bstatus, 1);
+ if (ret != 1) {
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ *ksv_ready = bstatus & DP_BSTATUS_READY;
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
+ int num_downstream, u8 *ksv_fifo)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+ int i;
+
+ /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
+ for (i = 0; i < num_downstream; i += 3) {
+ size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+ DP_AUX_HDCP_KSV_FIFO,
+ ksv_fifo + i * DRM_HDCP_KSV_LEN,
+ len);
+ if (ret != len) {
+ drm_dbg_kms(&i915->drm,
+ "Read ksv[%d] from DP/AUX failed (%zd)\n",
+ i, ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
+ int i, u32 *part)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
+ return -EINVAL;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+ DP_AUX_HDCP_V_PRIME(i), part,
+ DRM_HDCP_V_PRIME_PART_LEN);
+ if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+ return 0;
+}
+
+static
+int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
+ enum transcoder cpu_transcoder,
+ bool enable)
+{
+ /* Not used for single stream DisplayPort setups */
+ return 0;
+}
+
+static
+bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+ u8 bstatus;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
+ &bstatus, 1);
+ if (ret != 1) {
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return false;
+ }
+
+ return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
+}
+
+static
+int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
+ bool *hdcp_capable)
+{
+ ssize_t ret;
+ u8 bcaps;
+
+ ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
+ if (ret)
+ return ret;
+
+ *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
+ return 0;
+}
+
+struct hdcp2_dp_errata_stream_type {
+ u8 msg_id;
+ u8 stream_type;
+} __packed;
+
+struct hdcp2_dp_msg_data {
+ u8 msg_id;
+ u32 offset;
+ bool msg_detectable;
+ u32 timeout;
+ u32 timeout2; /* Added for non_paired situation */
+};
+
+static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
+ { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
+ { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
+ false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
+ { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
+ false, 0, 0 },
+ { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
+ false, 0, 0 },
+ { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
+ true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
+ HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
+ { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
+ { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
+ false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
+ { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_SEND_RECVID_LIST,
+ DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
+ { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_STREAM_MANAGE,
+ DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
+ false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
+/* local define to shovel this through the write_2_2 interface */
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
+ { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
+ DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
+ 0, 0 },
+};
+
+static int
+intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
+ u8 *rx_status)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
+ HDCP_2_2_DP_RXSTATUS_LEN);
+ if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
+ drm_dbg_kms(&i915->drm,
+ "Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static
+int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
+ u8 msg_id, bool *msg_ready)
+{
+ u8 rx_status;
+ int ret;
+
+ *msg_ready = false;
+ ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
+ if (ret < 0)
+ return ret;
+
+ switch (msg_id) {
+ case HDCP_2_2_AKE_SEND_HPRIME:
+ if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_AKE_SEND_PAIRING_INFO:
+ if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_REP_SEND_RECVID_LIST:
+ if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ *msg_ready = true;
+ break;
+ default:
+ DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t
+intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+ u8 msg_id = hdcp2_msg_data->msg_id;
+ int ret, timeout;
+ bool msg_ready = false;
+
+ if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
+ timeout = hdcp2_msg_data->timeout2;
+ else
+ timeout = hdcp2_msg_data->timeout;
+
+ /*
+ * There is no way to detect the CERT, LPRIME and STREAM_READY
+ * availability. So Wait for timeout and read the msg.
+ */
+ if (!hdcp2_msg_data->msg_detectable) {
+ mdelay(timeout);
+ ret = 0;
+ } else {
+ /*
+ * As we want to check the msg availability at timeout, Ignoring
+ * the timeout at wait for CP_IRQ.
+ */
+ intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
+ ret = hdcp2_detect_msg_availability(dig_port,
+ msg_id, &msg_ready);
+ if (!msg_ready)
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret)
+ drm_dbg_kms(&i915->drm,
+ "msg_id %d, ret %d, timeout(mSec): %d\n",
+ hdcp2_msg_data->msg_id, ret, timeout);
+
+ return ret;
+}
+
+static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
+ if (hdcp2_dp_msg_data[i].msg_id == msg_id)
+ return &hdcp2_dp_msg_data[i];
+
+ return NULL;
+}
+
+static
+int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
+ void *buf, size_t size)
+{
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_write, len;
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+
+ offset = hdcp2_msg_data->offset;
+
+ /* No msg_id in DP HDCP2.2 msgs */
+ bytes_to_write = size - 1;
+ byte++;
+
+ hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
+
+ while (bytes_to_write) {
+ len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
+
+ ret = drm_dp_dpcd_write(&dig_port->dp.aux,
+ offset, (void *)byte, len);
+ if (ret < 0)
+ return ret;
+
+ bytes_to_write -= ret;
+ byte += ret;
+ offset += ret;
+ }
+
+ return size;
+}
+
+static
+ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
+{
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u32 dev_cnt;
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXINFO_OFFSET,
+ (void *)rx_info, HDCP_2_2_RXINFO_LEN);
+ if (ret != HDCP_2_2_RXINFO_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+ HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+
+ if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
+ dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
+
+ ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
+ HDCP_2_2_RECEIVER_IDS_MAX_LEN +
+ (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
+ u8 msg_id, void *buf, size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_recv, len;
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+ offset = hdcp2_msg_data->offset;
+
+ ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data);
+ if (ret < 0)
+ return ret;
+
+ if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
+ ret = get_receiver_id_list_size(dig_port);
+ if (ret < 0)
+ return ret;
+
+ size = ret;
+ }
+ bytes_to_recv = size - 1;
+
+ /* DP adaptation msgs has no msg_id */
+ byte++;
+
+ while (bytes_to_recv) {
+ len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
+
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset,
+ (void *)byte, len);
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
+ msg_id, ret);
+ return ret;
+ }
+
+ bytes_to_recv -= ret;
+ byte += ret;
+ offset += ret;
+ }
+ byte = buf;
+ *byte = msg_id;
+
+ return size;
+}
+
+static
+int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
+ bool is_repeater, u8 content_type)
+{
+ int ret;
+ struct hdcp2_dp_errata_stream_type stream_type_msg;
+
+ if (is_repeater)
+ return 0;
+
+ /*
+ * Errata for DP: As Stream type is used for encryption, Receiver
+ * should be communicated with stream type for the decryption of the
+ * content.
+ * Repeater will be communicated with stream type as a part of it's
+ * auth later in time.
+ */
+ stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
+ stream_type_msg.stream_type = content_type;
+
+ ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg,
+ sizeof(stream_type_msg));
+
+ return ret < 0 ? ret : 0;
+
+}
+
+static
+int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
+{
+ u8 rx_status;
+ int ret;
+
+ ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
+ if (ret)
+ return ret;
+
+ if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
+ ret = HDCP_REAUTH_REQUEST;
+ else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
+ ret = HDCP_LINK_INTEGRITY_FAILURE;
+ else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ ret = HDCP_TOPOLOGY_CHANGE;
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
+ bool *capable)
+{
+ u8 rx_caps[3];
+ int ret;
+
+ *capable = false;
+ ret = drm_dp_dpcd_read(&dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
+ rx_caps, HDCP_2_2_RXCAPS_LEN);
+ if (ret != HDCP_2_2_RXCAPS_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
+ HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
+ *capable = true;
+
+ return 0;
+}
+
+static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
+ .write_an_aksv = intel_dp_hdcp_write_an_aksv,
+ .read_bksv = intel_dp_hdcp_read_bksv,
+ .read_bstatus = intel_dp_hdcp_read_bstatus,
+ .repeater_present = intel_dp_hdcp_repeater_present,
+ .read_ri_prime = intel_dp_hdcp_read_ri_prime,
+ .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
+ .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
+ .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
+ .toggle_signalling = intel_dp_hdcp_toggle_signalling,
+ .check_link = intel_dp_hdcp_check_link,
+ .hdcp_capable = intel_dp_hdcp_capable,
+ .write_2_2_msg = intel_dp_hdcp2_write_msg,
+ .read_2_2_msg = intel_dp_hdcp2_read_msg,
+ .config_stream_type = intel_dp_hdcp2_config_stream_type,
+ .check_2_2_link = intel_dp_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+ .protocol = HDCP_PROTOCOL_DP,
+};
+
+static int
+intel_dp_mst_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
+ enum transcoder cpu_transcoder,
+ bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ int ret;
+
+ if (!enable)
+ usleep_range(6, 60); /* Bspec says >= 6us */
+
+ ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base,
+ cpu_transcoder, enable);
+ if (ret)
+ drm_dbg_kms(&i915->drm, "%s HDCP signalling failed (%d)\n",
+ enable ? "Enable" : "Disable", ret);
+ return ret;
+}
+
+static
+bool intel_dp_mst_hdcp_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct drm_dp_query_stream_enc_status_ack_reply reply;
+ int ret;
+
+ if (!intel_dp_hdcp_check_link(dig_port, connector))
+ return false;
+
+ ret = drm_dp_send_query_stream_enc_status(&intel_dp->mst_mgr,
+ connector->port, &reply);
+ if (ret) {
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] failed QSES ret=%d\n",
+ connector->base.base.id, connector->base.name, ret);
+ return false;
+ }
+
+ return reply.auth_completed && reply.encryption_enabled;
+}
+
+static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
+ .write_an_aksv = intel_dp_hdcp_write_an_aksv,
+ .read_bksv = intel_dp_hdcp_read_bksv,
+ .read_bstatus = intel_dp_hdcp_read_bstatus,
+ .repeater_present = intel_dp_hdcp_repeater_present,
+ .read_ri_prime = intel_dp_hdcp_read_ri_prime,
+ .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
+ .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
+ .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
+ .toggle_signalling = intel_dp_mst_hdcp_toggle_signalling,
+ .check_link = intel_dp_mst_hdcp_check_link,
+ .hdcp_capable = intel_dp_hdcp_capable,
+
+ .protocol = HDCP_PROTOCOL_DP,
+};
+
+int intel_dp_init_hdcp(struct intel_digital_port *dig_port,
+ struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_encoder *intel_encoder = &dig_port->base;
+ enum port port = intel_encoder->port;
+ struct intel_dp *intel_dp = &dig_port->dp;
+
+ if (!is_hdcp_supported(dev_priv, port))
+ return 0;
+
+ if (intel_connector->mst_port)
+ return intel_hdcp_init(intel_connector, port,
+ &intel_dp_mst_hdcp_shim);
+ else if (!intel_dp_is_edp(intel_dp))
+ return intel_hdcp_init(intel_connector, port,
+ &intel_dp_hdcp_shim);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index a23ed7290843..f2c8b56be9ea 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -410,10 +410,17 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
intel_connector->base.base.id,
intel_connector->base.name,
intel_dp->link_rate, intel_dp->lane_count);
- if (!intel_dp_get_link_train_fallback_values(intel_dp,
- intel_dp->link_rate,
- intel_dp->lane_count))
- /* Schedule a Hotplug Uevent to userspace to start modeset */
- schedule_work(&intel_connector->modeset_retry_work);
- return;
+
+ if (intel_dp->hobl_active) {
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "Link Training failed with HOBL active, not enabling it from now on");
+ intel_dp->hobl_failed = true;
+ } else if (intel_dp_get_link_train_fallback_values(intel_dp,
+ intel_dp->link_rate,
+ intel_dp->lane_count)) {
+ return;
+ }
+
+ /* Schedule a Hotplug Uevent to userspace to start modeset */
+ schedule_work(&intel_connector->modeset_retry_work);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index a2d91a499700..64d885539e94 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -37,6 +37,7 @@
#include "intel_dp.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
+#include "intel_hdcp.h"
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
@@ -352,6 +353,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
drm_dbg_kms(&i915->drm, "active links %d\n",
intel_dp->active_mst_links);
+ intel_hdcp_disable(intel_mst->connector);
+
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
@@ -556,6 +559,13 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
if (pipe_config->has_audio)
intel_audio_codec_enable(encoder, pipe_config, conn_state);
+
+ /* Enable hdcp if it's desired */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ intel_hdcp_enable(to_intel_connector(conn_state->connector),
+ pipe_config->cpu_transcoder,
+ (u8)conn_state->hdcp_content_type);
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -709,9 +719,13 @@ static int
intel_dp_mst_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx, bool force)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
+
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
@@ -799,6 +813,14 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+
+ /* TODO: Figure out how to make HDCP work on GEN12+ */
+ if (INTEL_GEN(dev_priv) < 12) {
+ ret = intel_dp_init_hdcp(dig_port, intel_connector);
+ if (ret)
+ DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ }
+
/*
* Reuse the prop from the SST connector because we're
* not allowed to create new props after device registration.
@@ -865,6 +887,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
+ intel_encoder->update_pipe = intel_ddi_update_pipe;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index afa7a378b31d..e08684e34078 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -147,6 +147,18 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
pll->info->name, onoff(state), onoff(cur_state));
}
+static i915_reg_t
+intel_combo_pll_enable_reg(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+
+ if (IS_ELKHARTLAKE(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
+ return MG_PLL_ENABLE(0);
+
+ return CNL_DPLL_ENABLE(pll->info->id);
+
+
+}
/**
* intel_prepare_shared_dpll - call a dpll's prepare hook
* @crtc_state: CRTC, and its state, which has a shared dpll
@@ -3475,6 +3487,14 @@ static void icl_update_active_dpll(struct intel_atomic_state *state,
icl_set_active_port_dpll(crtc_state, port_dpll_id);
}
+static u32 intel_get_hti_plls(struct drm_i915_private *i915)
+{
+ if (!(i915->hti_state & HDPORT_ENABLED))
+ return 0;
+
+ return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
+}
+
static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -3504,13 +3524,22 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
- if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
+ if (IS_ROCKETLAKE(dev_priv)) {
dpll_mask =
BIT(DPLL_ID_EHL_DPLL4) |
BIT(DPLL_ID_ICL_DPLL1) |
BIT(DPLL_ID_ICL_DPLL0);
- else
+ } else if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) {
+ dpll_mask =
+ BIT(DPLL_ID_EHL_DPLL4) |
+ BIT(DPLL_ID_ICL_DPLL1) |
+ BIT(DPLL_ID_ICL_DPLL0);
+ } else {
dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
+ }
+
+ /* Eliminate DPLLs from consideration if reserved by HTI */
+ dpll_mask &= ~intel_get_hti_plls(dev_priv);
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
@@ -3791,7 +3820,12 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PLL_ENABLE))
goto out;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ RKL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ RKL_DPLL_CFGCR1(id));
+ } else if (INTEL_GEN(dev_priv) >= 12) {
hw_state->cfgcr0 = intel_de_read(dev_priv,
TGL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = intel_de_read(dev_priv,
@@ -3820,12 +3854,7 @@ static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
- i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
-
- if (IS_ELKHARTLAKE(dev_priv) &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
- enable_reg = MG_PLL_ENABLE(0);
- }
+ i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
}
@@ -3844,7 +3873,10 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
i915_reg_t cfgcr0_reg, cfgcr1_reg;
- if (INTEL_GEN(dev_priv) >= 12) {
+ if (IS_ROCKETLAKE(dev_priv)) {
+ cfgcr0_reg = RKL_DPLL_CFGCR0(id);
+ cfgcr1_reg = RKL_DPLL_CFGCR1(id);
+ } else if (INTEL_GEN(dev_priv) >= 12) {
cfgcr0_reg = TGL_DPLL_CFGCR0(id);
cfgcr1_reg = TGL_DPLL_CFGCR1(id);
} else {
@@ -4020,11 +4052,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
static void combo_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+ i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
if (IS_ELKHARTLAKE(dev_priv) &&
pll->info->id == DPLL_ID_EHL_DPLL4) {
- enable_reg = MG_PLL_ENABLE(0);
/*
* We need to disable DC states when this DPLL is enabled.
@@ -4132,19 +4163,14 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
static void combo_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+ i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
- if (IS_ELKHARTLAKE(dev_priv) &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
- enable_reg = MG_PLL_ENABLE(0);
- icl_pll_disable(dev_priv, pll, enable_reg);
+ icl_pll_disable(dev_priv, pll, enable_reg);
+ if (IS_ELKHARTLAKE(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4)
intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
pll->wakeref);
- return;
- }
-
- icl_pll_disable(dev_priv, pll, enable_reg);
}
static void tbt_pll_disable(struct drm_i915_private *dev_priv,
@@ -4276,6 +4302,21 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
+static const struct dpll_info rkl_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr rkl_pll_mgr = {
+ .dpll_info = rkl_plls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
/**
* intel_shared_dpll_init - Initialize shared DPLLs
* @dev: drm device
@@ -4289,7 +4330,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (INTEL_GEN(dev_priv) >= 12)
+ if (IS_ROCKETLAKE(dev_priv))
+ dpll_mgr = &rkl_pll_mgr;
+ else if (INTEL_GEN(dev_priv) >= 12)
dpll_mgr = &tgl_pll_mgr;
else if (IS_ELKHARTLAKE(dev_priv))
dpll_mgr = &ehl_pll_mgr;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 307ed8ae9a19..237dbb1ba0ee 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -313,9 +313,15 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
+
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
+
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 24c3a0f212c6..135f5e8a4d70 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -424,6 +424,14 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
fbc->no_fbc_reason = reason;
}
+static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 5 || IS_G4X(i915))
+ return BIT_ULL(28);
+ else
+ return BIT_ULL(32);
+}
+
static int find_compression_threshold(struct drm_i915_private *dev_priv,
struct drm_mm_node *node,
unsigned int size,
@@ -442,6 +450,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
else
end = U64_MAX;
+ end = min(end, intel_fbc_cfb_base_max(dev_priv));
+
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
*
@@ -1416,6 +1426,13 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
+ /*
+ * Fbc is causing random underruns in CI execution on TGL platforms.
+ * Disabling the same while the problem is being debugged and analyzed.
+ */
+ if (IS_TIGERLAKE(dev_priv))
+ return 0;
+
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index bd39eb6a21b8..842c04e63214 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -451,8 +451,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv) ||
- !INTEL_DISPLAY_ENABLED(dev_priv)))
+ if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 2979ed2588eb..d898b370d7a4 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -232,6 +232,8 @@ static void frontbuffer_release(struct kref *ref)
RCU_INIT_POINTER(obj->frontbuffer, NULL);
spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
+ i915_active_fini(&front->write);
+
i915_gem_object_put(obj);
kfree_rcu(front, rcu);
}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index a8d119b6b45c..e6b8d6dfb598 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -834,7 +834,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
+ if (!HAS_DISPLAY(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 1a0d49af2a08..5492076d1ae0 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -148,9 +148,8 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
enum i915_power_well_id id;
+ intel_wakeref_t wakeref;
bool enabled = false;
/*
@@ -162,17 +161,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
else
id = SKL_DISP_PW_1;
- mutex_lock(&power_domains->lock);
-
/* PG1 (power well #1) needs to be enabled */
- for_each_power_well(dev_priv, power_well) {
- if (power_well->desc->id == id) {
- enabled = power_well->desc->ops->is_enabled(dev_priv,
- power_well);
- break;
- }
- }
- mutex_unlock(&power_domains->lock);
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ enabled = intel_display_power_well_is_enabled(dev_priv, id);
/*
* Another req for hdcp key loadability is enabled state of pll for
@@ -713,7 +704,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
intel_de_write(dev_priv, HDCP_REP_CTL,
intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
- ret = shim->toggle_signalling(dig_port, true);
+ ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
if (ret)
return ret;
@@ -801,6 +792,19 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
+ /*
+ * If there are other connectors on this port using HDCP, don't disable
+ * it. Instead, toggle the HDCP signalling off on that particular
+ * connector/pipe and exit.
+ */
+ if (dig_port->num_hdcp_streams > 0) {
+ ret = hdcp->shim->toggle_signalling(dig_port,
+ cpu_transcoder, false);
+ if (ret)
+ DRM_ERROR("Failed to disable HDCP signalling\n");
+ return ret;
+ }
+
hdcp->hdcp_encrypted = false;
intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
if (intel_de_wait_for_clear(dev_priv,
@@ -816,7 +820,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
intel_de_write(dev_priv, HDCP_REP_CTL,
intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
- ret = hdcp->shim->toggle_signalling(dig_port, false);
+ ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
if (ret) {
drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
return ret;
@@ -876,6 +880,34 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
return container_of(hdcp, struct intel_connector, hdcp);
}
+static void intel_hdcp_update_value(struct intel_connector *connector,
+ u64 value, bool update_property)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
+
+ if (hdcp->value == value)
+ return;
+
+ drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
+
+ if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
+ dig_port->num_hdcp_streams--;
+ } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ dig_port->num_hdcp_streams++;
+ }
+
+ hdcp->value = value;
+ if (update_property) {
+ drm_connector_get(&connector->base);
+ schedule_work(&hdcp->prop_work);
+ }
+}
+
/* Implements Part 3 of the HDCP authorization procedure */
static int intel_hdcp_check_link(struct intel_connector *connector)
{
@@ -887,6 +919,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
int ret = 0;
mutex_lock(&hdcp->mutex);
+ mutex_lock(&dig_port->hdcp_mutex);
+
cpu_transcoder = hdcp->cpu_transcoder;
/* Check_link valid only when HDCP1.4 is enabled */
@@ -903,15 +937,16 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
connector->base.name, connector->base.base.id,
intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
ret = -ENXIO;
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
goto out;
}
- if (hdcp->shim->check_link(dig_port)) {
+ if (hdcp->shim->check_link(dig_port, connector)) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&hdcp->prop_work);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
}
goto out;
}
@@ -923,20 +958,23 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
ret = _intel_hdcp_disable(connector);
if (ret) {
drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
goto out;
}
ret = _intel_hdcp_enable(connector);
if (ret) {
drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
goto out;
}
out:
+ mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -962,6 +1000,8 @@ static void intel_hdcp_prop_work(struct work_struct *work)
mutex_unlock(&hdcp->mutex);
drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
+
+ drm_connector_put(&connector->base);
}
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
@@ -1600,7 +1640,8 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
- ret = hdcp->shim->toggle_signalling(dig_port, true);
+ ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
+ true);
if (ret) {
drm_err(&dev_priv->drm,
"Failed to enable HDCP signalling. %d\n",
@@ -1650,7 +1691,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
if (hdcp->shim->toggle_signalling) {
- ret = hdcp->shim->toggle_signalling(dig_port, false);
+ ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
+ false);
if (ret) {
drm_err(&dev_priv->drm,
"Failed to disable HDCP signalling. %d\n",
@@ -1766,16 +1808,18 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
"HDCP2.2 link stopped the encryption, %x\n",
intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
ret = -ENXIO;
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
goto out;
}
ret = hdcp->shim->check_2_2_link(dig_port);
if (ret == HDCP_LINK_PROTECTED) {
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&hdcp->prop_work);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
}
goto out;
}
@@ -1788,8 +1832,9 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
"HDCP2.2 Downstream topology change\n");
ret = hdcp2_authenticate_repeater_topology(connector);
if (!ret) {
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&hdcp->prop_work);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
goto out;
}
drm_dbg_kms(&dev_priv->drm,
@@ -1807,8 +1852,8 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
drm_err(&dev_priv->drm,
"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
connector->base.name, connector->base.base.id, ret);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
goto out;
}
@@ -1818,8 +1863,9 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
"[%s:%d] Failed to enable hdcp2.2 (%d)\n",
connector->base.name, connector->base.base.id,
ret);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
goto out;
}
@@ -1835,6 +1881,9 @@ static void intel_hdcp_check_work(struct work_struct *work)
check_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+ if (drm_connector_is_unregistered(&connector->base))
+ return;
+
if (!intel_hdcp2_check_link(connector))
schedule_delayed_work(&hdcp->check_work,
DRM_HDCP2_CHECK_PERIOD_MS);
@@ -1896,6 +1945,7 @@ static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
}
static int initialize_hdcp_port_data(struct intel_connector *connector,
+ enum port port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1903,8 +1953,7 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
struct hdcp_port_data *data = &hdcp->port_data;
if (INTEL_GEN(dev_priv) < 12)
- data->fw_ddi =
- intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port);
+ data->fw_ddi = intel_get_mei_fw_ddi_index(port);
else
/*
* As per ME FW API expectation, for GEN 12+, fw_ddi is filled
@@ -1974,14 +2023,14 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
}
}
-static void intel_hdcp2_init(struct intel_connector *connector,
+static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = initialize_hdcp_port_data(connector, shim);
+ ret = initialize_hdcp_port_data(connector, port, shim);
if (ret) {
drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
return;
@@ -1991,6 +2040,7 @@ static void intel_hdcp2_init(struct intel_connector *connector,
}
int intel_hdcp_init(struct intel_connector *connector,
+ enum port port,
const struct intel_hdcp_shim *shim)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -2000,8 +2050,8 @@ int intel_hdcp_init(struct intel_connector *connector,
if (!shim)
return -EINVAL;
- if (is_hdcp2_supported(dev_priv))
- intel_hdcp2_init(connector, shim);
+ if (is_hdcp2_supported(dev_priv) && !connector->mst_port)
+ intel_hdcp2_init(connector, port, shim);
ret =
drm_connector_attach_content_protection_property(&connector->base,
@@ -2025,6 +2075,7 @@ int intel_hdcp_enable(struct intel_connector *connector,
enum transcoder cpu_transcoder, u8 content_type)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
int ret = -EINVAL;
@@ -2033,14 +2084,14 @@ int intel_hdcp_enable(struct intel_connector *connector,
return -ENOENT;
mutex_lock(&hdcp->mutex);
+ mutex_lock(&dig_port->hdcp_mutex);
drm_WARN_ON(&dev_priv->drm,
hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = content_type;
+ hdcp->cpu_transcoder = cpu_transcoder;
- if (INTEL_GEN(dev_priv) >= 12) {
- hdcp->cpu_transcoder = cpu_transcoder;
+ if (INTEL_GEN(dev_priv) >= 12)
hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
- }
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -2063,16 +2114,19 @@ int intel_hdcp_enable(struct intel_connector *connector,
if (!ret) {
schedule_delayed_work(&hdcp->check_work, check_link_interval);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&hdcp->prop_work);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
}
+ mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
return ret;
}
int intel_hdcp_disable(struct intel_connector *connector)
{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret = 0;
@@ -2080,15 +2134,20 @@ int intel_hdcp_disable(struct intel_connector *connector)
return -ENOENT;
mutex_lock(&hdcp->mutex);
+ mutex_lock(&dig_port->hdcp_mutex);
- if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
- if (hdcp->hdcp2_encrypted)
- ret = _intel_hdcp2_disable(connector);
- else if (hdcp->hdcp_encrypted)
- ret = _intel_hdcp_disable(connector);
- }
+ if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ goto out;
+
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
+ if (hdcp->hdcp2_encrypted)
+ ret = _intel_hdcp2_disable(connector);
+ else if (hdcp->hdcp_encrypted)
+ ret = _intel_hdcp_disable(connector);
+out:
+ mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
cancel_delayed_work_sync(&hdcp->check_work);
return ret;
@@ -2102,11 +2161,15 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp;
- bool content_protection_type_changed =
+ bool content_protection_type_changed, desired_and_not_enabled = false;
+
+ if (!connector->hdcp.shim)
+ return;
+
+ content_protection_type_changed =
(conn_state->hdcp_content_type != hdcp->content_type &&
conn_state->content_protection !=
DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
- bool desired_and_not_enabled = false;
/*
* During the HDCP encryption session if Type change is requested,
@@ -2159,12 +2222,39 @@ void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
void intel_hdcp_cleanup(struct intel_connector *connector)
{
- if (!connector->hdcp.shim)
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ if (!hdcp->shim)
return;
- mutex_lock(&connector->hdcp.mutex);
- kfree(connector->hdcp.port_data.streams);
- mutex_unlock(&connector->hdcp.mutex);
+ /*
+ * If the connector is registered, it's possible userspace could kick
+ * off another HDCP enable, which would re-spawn the workers.
+ */
+ drm_WARN_ON(connector->base.dev,
+ connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
+
+ /*
+ * Now that the connector is not registered, check_work won't be run,
+ * but cancel any outstanding instances of it
+ */
+ cancel_delayed_work_sync(&hdcp->check_work);
+
+ /*
+ * We don't cancel prop_work in the same way as check_work since it
+ * requires connection_mutex which could be held while calling this
+ * function. Instead, we rely on the connector references grabbed before
+ * scheduling prop_work to ensure the connector is alive when prop_work
+ * is run. So if we're in the destroy path (which is where this
+ * function should be called), we're "guaranteed" that prop_work is not
+ * active (tl;dr This Should Never Happen).
+ */
+ drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
+
+ mutex_lock(&hdcp->mutex);
+ kfree(hdcp->port_data.streams);
+ hdcp->shim = NULL;
+ mutex_unlock(&hdcp->mutex);
}
void intel_hdcp_atomic_check(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 86bbaec120cc..1bbf5b67ed0a 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -22,7 +22,7 @@ enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
-int intel_hdcp_init(struct intel_connector *connector,
+int intel_hdcp_init(struct intel_connector *connector, enum port port,
const struct intel_hdcp_shim *hdcp_shim);
int intel_hdcp_enable(struct intel_connector *connector,
enum transcoder cpu_transcoder, u8 content_type);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index de2ce5632b94..3f2008d845c2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1477,7 +1477,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
return ret;
}
-static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
+static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
+ enum transcoder cpu_transcoder)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -1494,13 +1495,15 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
usleep_range(25, 50);
}
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, false);
+ ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
+ false);
if (ret) {
drm_err(&dev_priv->drm,
"Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, true);
+ ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
+ true);
if (ret) {
drm_err(&dev_priv->drm,
"Enable HDCP signalling failed (%d)\n", ret);
@@ -1512,6 +1515,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
static
int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
+ enum transcoder cpu_transcoder,
bool enable)
{
struct intel_hdmi *hdmi = &dig_port->hdmi;
@@ -1522,7 +1526,8 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
if (!enable)
usleep_range(6, 60); /* Bspec says >= 6us */
- ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, enable);
+ ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, cpu_transcoder,
+ enable);
if (ret) {
drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
enable ? "Enable" : "Disable", ret);
@@ -1534,17 +1539,17 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
* opportunity and enc_en signalling in KABYLAKE.
*/
if (IS_KABYLAKE(dev_priv) && enable)
- return kbl_repositioning_enc_en_signal(connector);
+ return kbl_repositioning_enc_en_signal(connector,
+ cpu_transcoder);
return 0;
}
static
-bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port)
+bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_connector *connector =
- dig_port->hdmi.attached_connector;
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
@@ -1572,13 +1577,14 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port)
}
static
-bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port)
+bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port,
+ struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
int retry;
for (retry = 0; retry < 3; retry++)
- if (intel_hdmi_hdcp_check_link_once(dig_port))
+ if (intel_hdmi_hdcp_check_link_once(dig_port, connector))
return true;
drm_err(&i915->drm, "Link check failed\n");
@@ -2271,35 +2277,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return intel_mode_valid_max_plane_size(dev_priv, mode);
}
-static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
- int bpc)
+bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
+ int bpc, bool has_hdmi_sink, bool ycbcr420_output)
{
- struct drm_i915_private *dev_priv =
- to_i915(crtc_state->uapi.crtc->dev);
struct drm_atomic_state *state = crtc_state->uapi.state;
struct drm_connector_state *connector_state;
struct drm_connector *connector;
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
int i;
- if (HAS_GMCH(dev_priv))
- return false;
-
- if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
- return false;
-
if (crtc_state->pipe_bpp < bpc * 3)
return false;
- if (!crtc_state->has_hdmi_sink)
- return false;
-
- /*
- * HDMI deep color affects the clocks, so it's only possible
- * when not cloning with other encoder types.
- */
- if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
+ if (!has_hdmi_sink)
return false;
for_each_new_connector_in_state(state, connector, connector_state, i) {
@@ -2308,7 +2297,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
if (connector_state->crtc != crtc_state->uapi.crtc)
continue;
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ if (ycbcr420_output) {
const struct drm_hdmi_info *hdmi = &info->hdmi;
if (bpc == 12 && !(hdmi->y420_dc_modes &
@@ -2327,6 +2316,30 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
}
}
+ return true;
+}
+
+static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
+ int bpc)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (HAS_GMCH(dev_priv))
+ return false;
+
+ if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
+ return false;
+
+ /*
+ * HDMI deep color affects the clocks, so it's only possible
+ * when not cloning with other encoder types.
+ */
+ if (crtc_state->output_types != BIT(INTEL_OUTPUT_HDMI))
+ return false;
+
/* Display Wa_1405510057:icl,ehl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
bpc == 10 && IS_GEN(dev_priv, 11) &&
@@ -2334,7 +2347,10 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
- return true;
+ return intel_hdmi_deep_color_possible(crtc_state, bpc,
+ crtc_state->has_hdmi_sink,
+ crtc_state->output_format ==
+ INTEL_OUTPUT_FORMAT_YCBCR420);
}
static int
@@ -2459,6 +2475,23 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
}
}
+static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+
+ if (!crtc_state->has_hdmi_sink)
+ return false;
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ return intel_hdmi->has_audio;
+ else
+ return intel_conn_state->force_audio == HDMI_AUDIO_ON;
+}
+
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -2468,8 +2501,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
- struct intel_digital_connector_state *intel_conn_state =
- to_intel_digital_connector_state(conn_state);
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -2495,13 +2526,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
- if (pipe_config->has_hdmi_sink) {
- if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- pipe_config->has_audio = intel_hdmi->has_audio;
- else
- pipe_config->has_audio =
- intel_conn_state->force_audio == HDMI_AUDIO_ON;
- }
+ pipe_config->has_audio =
+ intel_hdmi_has_audio(encoder, pipe_config, conn_state);
ret = intel_hdmi_compute_clock(encoder, pipe_config);
if (ret)
@@ -2667,6 +2693,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ if (!INTEL_DISPLAY_ENABLED(dev_priv))
+ return connector_status_disconnected;
+
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
if (INTEL_GEN(dev_priv) >= 11 &&
@@ -3250,7 +3279,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
connector->ycbcr_420_allowed = true;
- intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
if (HAS_DDI(dev_priv))
@@ -3264,7 +3292,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
intel_hdmi->attached_connector = intel_connector;
if (is_hdcp_supported(dev_priv, port)) {
- int ret = intel_hdcp_init(intel_connector,
+ int ret = intel_hdcp_init(intel_connector, port,
&intel_hdmi_hdcp_shim);
if (ret)
drm_dbg_kms(&dev_priv->drm,
@@ -3335,6 +3363,8 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder = &dig_port->base;
+ mutex_init(&dig_port->hdcp_mutex);
+
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
"HDMI %c", port_name(port));
@@ -3382,6 +3412,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->pipe_mask = ~0;
}
intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
+ intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
/*
* BSpec is unclear about HDMI+HDMI cloning on g4x, but it seems
* to work on real hardware. And since g4x can send infoframes to
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 5b348dcab77a..15eb0ccde76e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -48,5 +48,7 @@ void intel_read_infoframe(struct intel_encoder *encoder,
union hdmi_infoframe *frame);
bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
+bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc,
+ bool has_hdmi_sink, bool ycbcr420_output);
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 3f1d7b804a66..5c58c1ed6493 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -81,33 +81,12 @@
*
* It is only valid and used by digital port encoder.
*
- * Return pin that is associatade with @port and HDP_NONE if no pin is
- * hard associated with that @port.
+ * Return pin that is associatade with @port.
*/
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port)
{
- enum phy phy = intel_port_to_phy(dev_priv, port);
-
- /*
- * RKL + TGP PCH is a special case; we effectively choose the hpd_pin
- * based on the DDI rather than the PHY (i.e., the last two outputs
- * shold be HPD_PORT_{D,E} rather than {C,D}. Note that this differs
- * from the behavior of both TGL+TGP and RKL+CMP.
- */
- if (IS_ROCKETLAKE(dev_priv) && HAS_PCH_TGP(dev_priv))
- return HPD_PORT_A + port - PORT_A;
-
- switch (phy) {
- case PHY_F:
- return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
- case PHY_A ... PHY_E:
- case PHY_G ... PHY_I:
- return HPD_PORT_A + phy - PHY_A;
- default:
- MISSING_CASE(phy);
- return HPD_NONE;
- }
+ return HPD_PORT_A + port - PORT_A;
}
#define HPD_STORM_DETECT_PERIOD 1000
@@ -503,7 +482,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* only the one of them (DP) will have ->hpd_pulse().
*/
for_each_intel_encoder(&dev_priv->drm, encoder) {
- bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
enum port port = encoder->port;
bool long_hpd;
@@ -511,7 +489,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!(BIT(pin) & pin_mask))
continue;
- if (!has_hpd_pulse)
+ if (!intel_encoder_has_hpd_pulse(encoder))
continue;
long_hpd = long_mask & BIT(pin);
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index b781bf469644..dc1b35559afd 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -571,7 +571,7 @@ bool lspcon_init(struct intel_digital_port *dig_port)
return false;
}
- if (!intel_dp_read_dpcd(dp)) {
+ if (drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd) != 0) {
DRM_ERROR("LSPCON DPCD read failed\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 1888611244db..e65c2de522c3 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -456,12 +456,6 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return 0;
}
-static enum drm_connector_status
-intel_lvds_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
@@ -490,7 +484,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
- .detect = intel_lvds_detect,
+ .detect = intel_panel_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 4072d7062efd..9f23bac0d792 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -40,8 +40,6 @@
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
-#define CRC_PMIC_PWM_PERIOD_NS 21333
-
void
intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode)
@@ -594,10 +592,10 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
static u32 pwm_get_backlight(struct intel_connector *connector)
{
struct intel_panel *panel = &connector->panel;
- int duty_ns;
+ struct pwm_state state;
- duty_ns = pwm_get_duty_cycle(panel->backlight.pwm);
- return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
+ pwm_get_state(panel->backlight.pwm, &state);
+ return pwm_get_relative_duty_cycle(&state, 100);
}
static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -671,9 +669,9 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32
static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
- int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
- pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
+ pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
}
static void
@@ -842,10 +840,8 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
struct intel_panel *panel = &connector->panel;
- /* Disable the backlight */
- intel_panel_actually_set_backlight(old_conn_state, 0);
- usleep_range(2000, 3000);
- pwm_disable(panel->backlight.pwm);
+ panel->backlight.pwm_state.enabled = false;
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
}
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -1177,9 +1173,12 @@ static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_panel *panel = &connector->panel;
+ int level = panel->backlight.level;
- pwm_enable(panel->backlight.pwm);
- intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
+ level = intel_panel_compute_brightness(connector, level);
+ pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+ panel->backlight.pwm_state.enabled = true;
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
}
static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1543,18 +1542,9 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
}
-static u32 get_backlight_max_vbt(struct intel_connector *connector)
+static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
- u32 pwm;
-
- if (!panel->backlight.hz_to_pwm) {
- drm_dbg_kms(&dev_priv->drm,
- "backlight frequency conversion not supported\n");
- return 0;
- }
if (pwm_freq_hz) {
drm_dbg_kms(&dev_priv->drm,
@@ -1567,6 +1557,22 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
pwm_freq_hz);
}
+ return pwm_freq_hz;
+}
+
+static u32 get_backlight_max_vbt(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
+ u32 pwm;
+
+ if (!panel->backlight.hz_to_pwm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion not supported\n");
+ return 0;
+ }
+
pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
drm_dbg_kms(&dev_priv->drm,
@@ -1891,8 +1897,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
const char *desc;
- u32 level, ns;
- int retval;
+ u32 level;
/* Get the right PWM chip for DSI backlight according to VBT */
if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
@@ -1910,30 +1915,28 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return -ENODEV;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(panel->backlight.pwm);
-
- panel->backlight.min = 0; /* 0% */
panel->backlight.max = 100; /* 100% */
- level = intel_panel_compute_brightness(connector, 100);
- ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+ panel->backlight.min = get_backlight_min_vbt(connector);
- retval = pwm_config(panel->backlight.pwm, ns, CRC_PMIC_PWM_PERIOD_NS);
- if (retval < 0) {
- drm_err(&dev_priv->drm, "Failed to configure the pwm chip\n");
- pwm_put(panel->backlight.pwm);
- panel->backlight.pwm = NULL;
- return retval;
- }
+ if (pwm_is_enabled(panel->backlight.pwm)) {
+ /* PWM is already enabled, use existing settings */
+ pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+
+ level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
+ 100);
+ level = intel_panel_compute_brightness(connector, level);
+ panel->backlight.level = clamp(level, panel->backlight.min,
+ panel->backlight.max);
+ panel->backlight.enabled = true;
- level = DIV_ROUND_UP_ULL(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
- CRC_PMIC_PWM_PERIOD_NS);
- panel->backlight.level =
- intel_panel_compute_brightness(connector, level);
- panel->backlight.enabled = panel->backlight.level != 0;
+ drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
+ get_vbt_pwm_freq(dev_priv), level);
+ } else {
+ /* Set period from VBT frequency, leave other settings at 0. */
+ panel->backlight.pwm_state.period =
+ NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv);
+ }
drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
desc);
@@ -2092,6 +2095,17 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
}
}
+enum drm_connector_status
+intel_panel_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode,
struct drm_display_mode *downclock_mode)
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 968b95281cb4..5b813fe90557 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -23,6 +23,8 @@ int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode,
struct drm_display_mode *downclock_mode);
void intel_panel_fini(struct intel_panel *panel);
+enum drm_connector_status
+intel_panel_detect(struct drm_connector *connector, bool force);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index bf9e320c547d..40e9cb29233d 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -553,6 +553,22 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FAST_WAKE(7);
}
+ if (dev_priv->psr.psr2_sel_fetch_enabled) {
+ /* WA 1408330847 */
+ if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
+ IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
+ intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
+ DIS_RAM_BYPASS_PSR2_MAN_TRACK,
+ DIS_RAM_BYPASS_PSR2_MAN_TRACK);
+
+ intel_de_write(dev_priv,
+ PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder),
+ PSR2_MAN_TRK_CTL_ENABLE);
+ } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
+ intel_de_write(dev_priv,
+ PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0);
+ }
+
/*
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
@@ -663,6 +679,38 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
}
+static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (!dev_priv->params.enable_psr2_sel_fetch) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 sel fetch not enabled, disabled by parameter\n");
+ return false;
+ }
+
+ if (crtc_state->uapi.async_flip) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 sel fetch not enabled, async flip enabled\n");
+ return false;
+ }
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 sel fetch not enabled, plane rotated\n");
+ return false;
+ }
+ }
+
+ return crtc_state->enable_psr2_sel_fetch = true;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -732,22 +780,17 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- /*
- * Some platforms lack PSR2 HW tracking and instead require manual
- * tracking by software. In this case, the driver is required to track
- * the areas that need updates and program hardware to send selective
- * updates.
- *
- * So until the software tracking is implemented, PSR2 needs to be
- * disabled for platforms without PSR2 HW tracking.
- */
- if (!HAS_PSR_HW_TRACKING(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
- "No PSR2 HW tracking in the platform\n");
- return false;
+ if (HAS_PSR2_SEL_FETCH(dev_priv)) {
+ if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
+ !HAS_PSR_HW_TRACKING(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
+ return false;
+ }
}
- if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
+ if (!crtc_state->enable_psr2_sel_fetch &&
+ (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
@@ -898,6 +941,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
val |= EXITLINE_ENABLE;
intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
}
+
+ if (HAS_PSR_HW_TRACKING(dev_priv))
+ intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
+ dev_priv->psr.psr2_sel_fetch_enabled ?
+ IGNORE_PSR2_HW_TRACKING : 0);
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -919,6 +967,7 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
/* DC5/DC6 requires at least 6 idle frames */
val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
dev_priv->psr.dc3co_exit_delay = val;
+ dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
@@ -1058,6 +1107,13 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
psr_status_mask, 2000))
drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
+ /* WA 1408330847 */
+ if (dev_priv->psr.psr2_sel_fetch_enabled &&
+ (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0) ||
+ IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
+ intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
+ DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
+
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -1115,6 +1171,32 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
intel_psr_exit(dev_priv);
}
+void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct i915_psr *psr = &dev_priv->psr;
+
+ if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
+ !crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder),
+ crtc_state->psr2_man_track_ctl);
+}
+
+void intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ crtc_state->psr2_man_track_ctl = PSR2_MAN_TRK_CTL_ENABLE |
+ PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
+}
+
/**
* intel_psr_update - Update PSR state
* @intel_dp: Intel DP
@@ -1672,7 +1754,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
return;
intel_connector = to_intel_connector(connector);
- dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector));
+ dig_port = enc_to_dig_port(to_intel_encoder(new_state->best_encoder));
if (dev_priv->psr.dp != &dig_port->dp)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index b4515186d5f4..6a83c8e682e6 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -13,6 +13,8 @@ struct drm_connector_state;
struct drm_i915_private;
struct intel_crtc_state;
struct intel_dp;
+struct intel_crtc;
+struct intel_atomic_state;
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
@@ -43,5 +45,8 @@ void intel_psr_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp);
+void intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 5e9fb349c829..4eaa4aa86ecd 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2084,14 +2084,18 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
- u16 response;
+ struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
+ u16 response;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
+
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS,
&response, 2))
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index c89f5f7ccb06..63040cb0d4e1 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -1626,8 +1626,7 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
&plane_state->uapi.dst,
0, INT_MAX);
- if (hscale < 0x10000)
- return pixel_rate;
+ hscale = max(hscale, 0x10000u);
/* Decimation steps at 2x,4x,8x,16x */
decimate = ilog2(hscale >> 16);
@@ -1640,8 +1639,8 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
limit -= decimate;
/* -10% for RGB */
- if (fb->format->cpp[0] >= 4)
- limit--; /* -10% for RGB */
+ if (!fb->format->is_yuv)
+ limit--;
/*
* We should also do -10% if sprite scaling is enabled
@@ -2843,8 +2842,9 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
enum plane_id plane_id)
{
- /* Wa_14010477008:tgl[a0..c0] */
- if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
+ /* Wa_14010477008:tgl[a0..c0],rkl[all] */
+ if (IS_ROCKETLAKE(dev_priv) ||
+ IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
return false;
return plane_id < PLANE_SPRITE4;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 777032d9697b..7a7b99b015a5 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1706,6 +1706,9 @@ intel_tv_detect(struct drm_connector *connector,
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name, force);
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
+
if (force) {
struct intel_load_detect_pipe tmp;
int ret;
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 6faabd4f6d49..54bcc6a6947c 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -293,8 +293,12 @@ struct bdb_general_features {
#define DVO_PORT_HDMIE 12 /* 193 */
#define DVO_PORT_DPF 13 /* N/A */
#define DVO_PORT_HDMIF 14 /* N/A */
-#define DVO_PORT_DPG 15
-#define DVO_PORT_HDMIG 16
+#define DVO_PORT_DPG 15 /* 217 */
+#define DVO_PORT_HDMIG 16 /* 217 */
+#define DVO_PORT_DPH 17 /* 217 */
+#define DVO_PORT_HDMIH 18 /* 217 */
+#define DVO_PORT_DPI 19 /* 217 */
+#define DVO_PORT_HDMII 20 /* 217 */
#define DVO_PORT_MIPIA 21 /* 171 */
#define DVO_PORT_MIPIB 22 /* 171 */
#define DVO_PORT_MIPIC 23 /* 171 */
@@ -330,6 +334,8 @@ enum vbt_gmbus_ddi {
#define DP_AUX_E 0x50
#define DP_AUX_F 0x60
#define DP_AUX_G 0x70
+#define DP_AUX_H 0x80
+#define DP_AUX_I 0x90
#define VBT_DP_MAX_LINK_RATE_HBR3 0
#define VBT_DP_MAX_LINK_RATE_HBR2 1
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 052e0b31a2da..5e5522923b1e 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1585,6 +1585,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
};
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
+ .detect = intel_panel_detect,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_connector_destroy,
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index d0a514301575..4070b00c3690 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -483,7 +483,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
drm_err(&dev_priv->drm,
- "Cant get a suitable ratio from DSI PLL ratios\n");
+ "Can't get a suitable ratio from DSI PLL ratios\n");
return -ECHRNG;
} else
drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 278664f831e7..272cf3ea68d5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -32,12 +32,13 @@ static void vma_clear_pages(struct i915_vma *vma)
vma->pages = NULL;
}
-static int vma_bind(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static void vma_bind(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- return vm->vma_ops.bind_vma(vm, vma, cache_level, flags);
+ vm->vma_ops.bind_vma(vm, stash, vma, cache_level, flags);
}
static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
@@ -157,6 +158,7 @@ static void clear_pages_worker(struct work_struct *work)
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
+ struct i915_gem_ww_ctx ww;
struct i915_request *rq;
struct i915_vma *batch;
int err = w->dma.error;
@@ -172,17 +174,20 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (unlikely(err))
+ i915_gem_ww_ctx_init(&ww, false);
+ intel_engine_pm_get(w->ce->engine);
+retry:
+ err = intel_context_pin_ww(w->ce, &ww);
+ if (err)
goto out_signal;
- batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
+ batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
- goto out_unpin;
+ goto out_ctx;
}
- rq = intel_context_create_request(w->ce);
+ rq = i915_request_create(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -224,9 +229,19 @@ out_request:
i915_request_add(rq);
out_batch:
intel_emit_vma_release(w->ce, batch);
-out_unpin:
- i915_vma_unpin(vma);
+out_ctx:
+ intel_context_unpin(w->ce);
out_signal:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ i915_vma_unpin(w->sleeve->vma);
+ intel_engine_pm_put(w->ce->engine);
+
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
dma_fence_signal(&w->dma);
@@ -234,6 +249,44 @@ out_signal:
}
}
+static int pin_wait_clear_pages_work(struct clear_pages_work *w,
+ struct intel_context *ce)
+{
+ struct i915_vma *vma = w->sleeve->vma;
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(vma->obj, &ww);
+ if (err)
+ goto out;
+
+ err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out;
+
+ err = i915_sw_fence_await_reservation(&w->wait,
+ vma->obj->base.resv, NULL,
+ true, 0, I915_FENCE_GFP);
+ if (err)
+ goto err_unpin_vma;
+
+ dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma);
+
+err_unpin_vma:
+ if (err)
+ i915_vma_unpin(vma);
+out:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ return err;
+}
+
static int __i915_sw_fence_call
clear_pages_work_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
@@ -287,17 +340,9 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
- i915_gem_object_lock(obj);
- err = i915_sw_fence_await_reservation(&work->wait,
- obj->base.resv, NULL, true, 0,
- I915_FENCE_GFP);
- if (err < 0) {
+ err = pin_wait_clear_pages_work(work, ce);
+ if (err < 0)
dma_fence_set_error(&work->dma, err);
- } else {
- dma_resv_add_excl_fence(obj->base.resv, &work->dma);
- err = 0;
- }
- i915_gem_object_unlock(obj);
dma_fence_get(&work->dma);
i915_sw_fence_commit(&work->wait);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ef755dd5e68f..4fd38101bb56 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -390,24 +390,6 @@ __context_engines_static(const struct i915_gem_context *ctx)
return rcu_dereference_protected(ctx->engines, true);
}
-static bool __reset_engine(struct intel_engine_cs *engine)
-{
- struct intel_gt *gt = engine->gt;
- bool success = false;
-
- if (!intel_has_reset_engine(gt))
- return false;
-
- if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags)) {
- success = intel_engine_reset(engine, NULL) == 0;
- clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
- &gt->reset.flags);
- }
-
- return success;
-}
-
static void __reset_context(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@@ -431,12 +413,7 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
* kill the banned context, we fallback to doing a local reset
* instead.
*/
- if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
- !intel_engine_pulse(engine))
- return true;
-
- /* If we are unable to send a pulse, try resetting this engine. */
- return __reset_engine(engine);
+ return intel_engine_pulse(engine) == 0;
}
static bool
@@ -460,8 +437,8 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
spin_lock(&locked->active.lock);
}
- if (!i915_request_completed(rq)) {
- if (i915_request_is_active(rq) && rq->fence.error != -EIO)
+ if (i915_request_is_active(rq)) {
+ if (!i915_request_completed(rq))
*active = locked;
ret = true;
}
@@ -479,13 +456,26 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
if (!ce->timeline)
return NULL;
+ /*
+ * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
+ * to the request to prevent it being transferred to a new timeline
+ * (and onto a new timeline->requests list).
+ */
rcu_read_lock();
- list_for_each_entry_rcu(rq, &ce->timeline->requests, link) {
- if (i915_request_is_active(rq) && i915_request_completed(rq))
- continue;
+ list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
+ bool found;
+
+ /* timeline is already completed upto this point? */
+ if (!i915_request_get_rcu(rq))
+ break;
/* Check with the backend if the request is inflight */
- if (__active_engine(rq, &engine))
+ found = true;
+ if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
+ found = __active_engine(rq, &engine);
+
+ i915_request_put(rq);
+ if (found)
break;
}
rcu_read_unlock();
@@ -493,7 +483,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
return engine;
}
-static void kill_engines(struct i915_gem_engines *engines)
+static void kill_engines(struct i915_gem_engines *engines, bool ban)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
@@ -508,7 +498,7 @@ static void kill_engines(struct i915_gem_engines *engines)
for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine;
- if (intel_context_set_banned(ce))
+ if (ban && intel_context_set_banned(ce))
continue;
/*
@@ -521,7 +511,7 @@ static void kill_engines(struct i915_gem_engines *engines)
engine = active_engine(ce);
/* First attempt to gracefully cancel the context */
- if (engine && !__cancel_engine(engine))
+ if (engine && !__cancel_engine(engine) && ban)
/*
* If we are unable to send a preemptive pulse to bump
* the context from the GPU, we have to resort to a full
@@ -531,8 +521,10 @@ static void kill_engines(struct i915_gem_engines *engines)
}
}
-static void kill_stale_engines(struct i915_gem_context *ctx)
+static void kill_context(struct i915_gem_context *ctx)
{
+ bool ban = (!i915_gem_context_is_persistent(ctx) ||
+ !ctx->i915->params.enable_hangcheck);
struct i915_gem_engines *pos, *next;
spin_lock_irq(&ctx->stale.lock);
@@ -545,7 +537,7 @@ static void kill_stale_engines(struct i915_gem_context *ctx)
spin_unlock_irq(&ctx->stale.lock);
- kill_engines(pos);
+ kill_engines(pos, ban);
spin_lock_irq(&ctx->stale.lock);
GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
@@ -557,11 +549,6 @@ static void kill_stale_engines(struct i915_gem_context *ctx)
spin_unlock_irq(&ctx->stale.lock);
}
-static void kill_context(struct i915_gem_context *ctx)
-{
- kill_stale_engines(ctx);
-}
-
static void engines_idle_release(struct i915_gem_context *ctx,
struct i915_gem_engines *engines)
{
@@ -596,7 +583,7 @@ static void engines_idle_release(struct i915_gem_context *ctx,
kill:
if (list_empty(&engines->link)) /* raced, already closed */
- kill_engines(engines);
+ kill_engines(engines, true);
i915_sw_fence_commit(&engines->fence);
}
@@ -654,9 +641,7 @@ static void context_close(struct i915_gem_context *ctx)
* case we opt to forcibly kill off all remaining requests on
* context close.
*/
- if (!i915_gem_context_is_persistent(ctx) ||
- !ctx->i915->params.enable_hangcheck)
- kill_context(ctx);
+ kill_context(ctx);
i915_gem_context_put(ctx);
}
@@ -892,7 +877,7 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
struct intel_timeline *timeline;
- timeline = intel_timeline_create(&i915->gt, NULL);
+ timeline = intel_timeline_create(&i915->gt);
if (IS_ERR(timeline)) {
context_close(ctx);
return ERR_CAST(timeline);
@@ -1106,6 +1091,7 @@ I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines,
bool (*skip)(struct intel_context *ce, void *data),
+ int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
int (*emit)(struct i915_request *rq, void *data),
void (*task)(void *data),
void *data)
@@ -1113,6 +1099,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
struct context_barrier_task *cb;
struct i915_gem_engines_iter it;
struct i915_gem_engines *e;
+ struct i915_gem_ww_ctx ww;
struct intel_context *ce;
int err = 0;
@@ -1150,10 +1137,21 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (skip && skip(ce, data))
continue;
- rq = intel_context_create_request(ce);
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto err;
+
+ if (pin)
+ err = pin(ce, &ww, data);
+ if (err)
+ goto err_unpin;
+
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- break;
+ goto err_unpin;
}
err = 0;
@@ -1163,6 +1161,16 @@ static int context_barrier_task(struct i915_gem_context *ctx,
err = i915_active_add_request(&cb->base, rq);
i915_request_add(rq);
+err_unpin:
+ intel_context_unpin(ce);
+err:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
if (err)
break;
}
@@ -1218,6 +1226,17 @@ static void set_ppgtt_barrier(void *data)
i915_vm_close(old);
}
+static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
+{
+ struct i915_address_space *vm = ce->vm;
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
+ /* ppGTT is not part of the legacy context image */
+ return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww);
+
+ return 0;
+}
+
static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
struct i915_address_space *vm = rq->context->vm;
@@ -1274,20 +1293,10 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
static bool skip_ppgtt_update(struct intel_context *ce, void *data)
{
- if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
- return true;
-
if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
- return false;
-
- if (!atomic_read(&ce->pin_count))
- return true;
-
- /* ppGTT is not part of the legacy context image */
- if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm)))
- return true;
-
- return false;
+ return !ce->state;
+ else
+ return !atomic_read(&ce->pin_count);
}
static int set_ppgtt(struct drm_i915_file_private *file_priv,
@@ -1338,6 +1347,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
*/
err = context_barrier_task(ctx, ALL_ENGINES,
skip_ppgtt_update,
+ pin_ppgtt_update,
emit_ppgtt_update,
set_ppgtt_barrier,
old);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 2679380159fc..8dd295dbe241 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -48,12 +48,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
src = sg_next(src);
}
- if (!dma_map_sg_attrs(attachment->dev,
- st->sgl, st->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC)) {
- ret = -ENOMEM;
+ ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (ret)
goto err_free_sg;
- }
return st;
@@ -73,9 +70,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
- dma_unmap_sg_attrs(attachment->dev,
- sg->sgl, sg->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sg);
kfree(sg);
@@ -128,7 +123,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
if (err)
return err;
- err = i915_gem_object_lock_interruptible(obj);
+ err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out;
@@ -149,7 +144,7 @@ static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direct
if (err)
return err;
- err = i915_gem_object_lock_interruptible(obj);
+ err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 7f76fc68f498..fcce6909f201 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -32,11 +32,17 @@ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
if (!i915_gem_object_is_framebuffer(obj))
return;
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
__i915_gem_object_flush_for_display(obj);
i915_gem_object_unlock(obj);
}
+void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj)
+{
+ if (i915_gem_object_is_framebuffer(obj))
+ __i915_gem_object_flush_for_display(obj);
+}
+
/**
* Moves a single object to the WC read, and possibly write domain.
* @obj: object to act on
@@ -197,18 +203,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- ret = i915_gem_object_lock_interruptible(obj);
- if (ret)
- return ret;
-
/* Always invalidate stale cachelines */
if (obj->cache_level != cache_level) {
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->cache_dirty = true;
}
- i915_gem_object_unlock(obj);
-
/* The cache-level will be applied when each vma is rebound. */
return i915_gem_object_unbind(obj,
I915_GEM_OBJECT_UNBIND_ACTIVE |
@@ -293,7 +293,12 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
+ if (ret)
+ goto out;
+
ret = i915_gem_object_set_cache_level(obj, level);
+ i915_gem_object_unlock(obj);
out:
i915_gem_object_put(obj);
@@ -313,6 +318,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
unsigned int flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
int ret;
@@ -320,6 +326,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
return ERR_PTR(-EINVAL);
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(obj, &ww);
+ if (ret)
+ goto err;
/*
* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
@@ -334,7 +345,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
HAS_WT(i915) ?
I915_CACHE_WT : I915_CACHE_NONE);
if (ret)
- return ERR_PTR(ret);
+ goto err;
/*
* As the user may map the buffer once pinned in the display plane
@@ -347,18 +358,31 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma = ERR_PTR(-ENOSPC);
if ((flags & PIN_MAPPABLE) == 0 &&
(!view || view->type == I915_GGTT_VIEW_NORMAL))
- vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
- flags |
- PIN_MAPPABLE |
- PIN_NONBLOCK);
- if (IS_ERR(vma))
- vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
- if (IS_ERR(vma))
- return vma;
+ vma = i915_gem_object_ggtt_pin_ww(obj, &ww, view, 0, alignment,
+ flags | PIN_MAPPABLE |
+ PIN_NONBLOCK);
+ if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK))
+ vma = i915_gem_object_ggtt_pin_ww(obj, &ww, view, 0,
+ alignment, flags);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
+ }
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
- i915_gem_object_flush_if_display(obj);
+ i915_gem_object_flush_if_display_locked(obj);
+
+err:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ if (ret)
+ return ERR_PTR(ret);
return vma;
}
@@ -485,21 +509,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
/*
- * Already in the desired write domain? Nothing for us to do!
- *
- * We apply a little bit of cunning here to catch a broader set of
- * no-ops. If obj->write_domain is set, we must be in the same
- * obj->read_domains, and only that domain. Therefore, if that
- * obj->write_domain matches the request read_domains, we are
- * already in the same read/write domain and can skip the operation,
- * without having to further check the requested write_domain.
- */
- if (READ_ONCE(obj->write_domain) == read_domains) {
- err = 0;
- goto out;
- }
-
- /*
* Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
@@ -536,7 +545,20 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (err)
goto out;
- err = i915_gem_object_lock_interruptible(obj);
+ /*
+ * Already in the desired write domain? Nothing for us to do!
+ *
+ * We apply a little bit of cunning here to catch a broader set of
+ * no-ops. If obj->write_domain is set, we must be in the same
+ * obj->read_domains, and only that domain. Therefore, if that
+ * obj->write_domain matches the request read_domains, we are
+ * already in the same read/write domain and can skip the operation,
+ * without having to further check the requested write_domain.
+ */
+ if (READ_ONCE(obj->write_domain) == read_domains)
+ goto out_unpin;
+
+ err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out_unpin;
@@ -576,19 +598,17 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_lock_interruptible(obj);
- if (ret)
- return ret;
+ assert_object_held(obj);
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (ret)
- goto err_unlock;
+ return ret;
ret = i915_gem_object_pin_pages(obj);
if (ret)
- goto err_unlock;
+ return ret;
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
@@ -616,8 +636,6 @@ out:
err_unpin:
i915_gem_object_unpin_pages(obj);
-err_unlock:
- i915_gem_object_unlock(obj);
return ret;
}
@@ -630,20 +648,18 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
if (!i915_gem_object_has_struct_page(obj))
return -ENODEV;
- ret = i915_gem_object_lock_interruptible(obj);
- if (ret)
- return ret;
+ assert_object_held(obj);
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT);
if (ret)
- goto err_unlock;
+ return ret;
ret = i915_gem_object_pin_pages(obj);
if (ret)
- goto err_unlock;
+ return ret;
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
@@ -680,7 +696,5 @@ out:
err_unpin:
i915_gem_object_unpin_pages(obj);
-err_unlock:
- i915_gem_object_unlock(obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 446e76e95c38..1904e6e5ea64 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -26,6 +26,7 @@
#include "i915_gem_ioctls.h"
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
+#include "i915_user_extensions.h"
struct eb_vma {
struct i915_vma *vma;
@@ -40,11 +41,6 @@ struct eb_vma {
u32 handle;
};
-struct eb_vma_array {
- struct kref kref;
- struct eb_vma vma[];
-};
-
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
@@ -57,9 +53,11 @@ enum {
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
+#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
-#define __EXEC_INTERNAL_FLAGS (~0u << 31)
+#define __EXEC_ENGINE_PINNED BIT(30)
+#define __EXEC_INTERNAL_FLAGS (~0u << 30)
#define UPDATE PIN_OFFSET_FIXED
#define BATCH_OFFSET_BIAS (256*1024)
@@ -229,6 +227,13 @@ enum {
* the batchbuffer in trusted mode, otherwise the ioctl is rejected.
*/
+struct eb_fence {
+ struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */
+ struct dma_fence *dma_fence;
+ u64 value;
+ struct dma_fence_chain *chain_fence;
+};
+
struct i915_execbuffer {
struct drm_i915_private *i915; /** i915 backpointer */
struct drm_file *file; /** per-file lookup tables and limits */
@@ -253,6 +258,8 @@ struct i915_execbuffer {
/** list of vma that have execobj.relocation_count */
struct list_head relocs;
+ struct i915_gem_ww_ctx ww;
+
/**
* Track the most recently used object for relocations, as we
* frequently have to perform multiple relocations within the same
@@ -268,19 +275,22 @@ struct i915_execbuffer {
bool has_fence : 1;
bool needs_unfenced : 1;
- struct i915_vma *target;
struct i915_request *rq;
- struct i915_vma *rq_vma;
u32 *rq_cmd;
unsigned int rq_size;
+ struct intel_gt_buffer_pool_node *pool;
} reloc_cache;
+ struct intel_gt_buffer_pool_node *reloc_pool; /** relocation pool for -EDEADLK handling */
+ struct intel_context *reloc_context;
+
u64 invalid_flags; /** Set of execobj.flags that are invalid */
u32 context_flags; /** Set of execobj.flags to insert from the ctx */
+ u64 batch_len; /** Length of batch within object */
u32 batch_start_offset; /** Location within object of batch */
- u32 batch_len; /** Length of batch within object */
u32 batch_flags; /** Flags composed for emit_bb_start() */
+ struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
/**
* Indicate either the size of the hastable used to resolve
@@ -289,9 +299,16 @@ struct i915_execbuffer {
*/
int lut_size;
struct hlist_head *buckets; /** ht for relocation handles */
- struct eb_vma_array *array;
+
+ struct eb_fence *fences;
+ unsigned long num_fences;
};
+static int eb_parse(struct i915_execbuffer *eb);
+static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb,
+ bool throttle);
+static void eb_unpin_engine(struct i915_execbuffer *eb);
+
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -299,62 +316,8 @@ static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
eb->args->batch_len);
}
-static struct eb_vma_array *eb_vma_array_create(unsigned int count)
-{
- struct eb_vma_array *arr;
-
- arr = kvmalloc(struct_size(arr, vma, count), GFP_KERNEL | __GFP_NOWARN);
- if (!arr)
- return NULL;
-
- kref_init(&arr->kref);
- arr->vma[0].vma = NULL;
-
- return arr;
-}
-
-static inline void eb_unreserve_vma(struct eb_vma *ev)
-{
- struct i915_vma *vma = ev->vma;
-
- if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
- __i915_vma_unpin_fence(vma);
-
- if (ev->flags & __EXEC_OBJECT_HAS_PIN)
- __i915_vma_unpin(vma);
-
- ev->flags &= ~(__EXEC_OBJECT_HAS_PIN |
- __EXEC_OBJECT_HAS_FENCE);
-}
-
-static void eb_vma_array_destroy(struct kref *kref)
-{
- struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref);
- struct eb_vma *ev = arr->vma;
-
- while (ev->vma) {
- eb_unreserve_vma(ev);
- i915_vma_put(ev->vma);
- ev++;
- }
-
- kvfree(arr);
-}
-
-static void eb_vma_array_put(struct eb_vma_array *arr)
-{
- kref_put(&arr->kref, eb_vma_array_destroy);
-}
-
static int eb_create(struct i915_execbuffer *eb)
{
- /* Allocate an extra slot for use by the command parser + sentinel */
- eb->array = eb_vma_array_create(eb->buffer_count + 2);
- if (!eb->array)
- return -ENOMEM;
-
- eb->vma = eb->array->vma;
-
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
unsigned int size = 1 + ilog2(eb->buffer_count);
@@ -388,10 +351,8 @@ static int eb_create(struct i915_execbuffer *eb)
break;
} while (--size);
- if (unlikely(!size)) {
- eb_vma_array_put(eb->array);
+ if (unlikely(!size))
return -ENOMEM;
- }
eb->lut_size = size;
} else {
@@ -475,16 +436,17 @@ eb_pin_vma(struct i915_execbuffer *eb,
pin_flags |= PIN_GLOBAL;
/* Attempt to reuse the current location if available */
- if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) {
+ /* TODO: Add -EDEADLK handling here */
+ if (unlikely(i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags))) {
if (entry->flags & EXEC_OBJECT_PINNED)
return false;
/* Failing that pick any _free_ space if suitable */
- if (unlikely(i915_vma_pin(vma,
- entry->pad_to_size,
- entry->alignment,
- eb_pin_flags(entry, ev->flags) |
- PIN_USER | PIN_NOEVICT)))
+ if (unlikely(i915_vma_pin_ww(vma, &eb->ww,
+ entry->pad_to_size,
+ entry->alignment,
+ eb_pin_flags(entry, ev->flags) |
+ PIN_USER | PIN_NOEVICT)))
return false;
}
@@ -502,6 +464,19 @@ eb_pin_vma(struct i915_execbuffer *eb,
return !eb_vma_misplaced(entry, vma, ev->flags);
}
+static inline void
+eb_unreserve_vma(struct eb_vma *ev)
+{
+ if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
+ return;
+
+ if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
+ __i915_vma_unpin_fence(ev->vma);
+
+ __i915_vma_unpin(ev->vma);
+ ev->flags &= ~__EXEC_OBJECT_RESERVED;
+}
+
static int
eb_validate_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry,
@@ -593,16 +568,6 @@ eb_add_vma(struct i915_execbuffer *eb,
eb->batch = ev;
}
-
- if (eb_pin_vma(eb, entry, ev)) {
- if (entry->offset != vma->node.start) {
- entry->offset = vma->node.start | UPDATE;
- eb->args->flags |= __EXEC_HAS_RELOC;
- }
- } else {
- eb_unreserve_vma(ev);
- list_add_tail(&ev->bind_link, &eb->unbound);
- }
}
static inline int use_cpu_reloc(const struct reloc_cache *cache,
@@ -622,7 +587,7 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache,
obj->cache_level != I915_CACHE_NONE);
}
-static int eb_reserve_vma(const struct i915_execbuffer *eb,
+static int eb_reserve_vma(struct i915_execbuffer *eb,
struct eb_vma *ev,
u64 pin_flags)
{
@@ -637,7 +602,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
return err;
}
- err = i915_vma_pin(vma,
+ err = i915_vma_pin_ww(vma, &eb->ww,
entry->pad_to_size, entry->alignment,
eb_pin_flags(entry, ev->flags) | pin_flags);
if (err)
@@ -687,10 +652,6 @@ static int eb_reserve(struct i915_execbuffer *eb)
* This avoid unnecessary unbinding of later objects in order to make
* room for the earlier objects *unless* we need to defragment.
*/
-
- if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex))
- return -EINTR;
-
pass = 0;
do {
list_for_each_entry(ev, &eb->unbound, bind_link) {
@@ -698,8 +659,8 @@ static int eb_reserve(struct i915_execbuffer *eb)
if (err)
break;
}
- if (!(err == -ENOSPC || err == -EAGAIN))
- break;
+ if (err != -ENOSPC)
+ return err;
/* Resort *all* the objects into priority order */
INIT_LIST_HEAD(&eb->unbound);
@@ -729,13 +690,6 @@ static int eb_reserve(struct i915_execbuffer *eb)
}
list_splice_tail(&last, &eb->unbound);
- if (err == -EAGAIN) {
- mutex_unlock(&eb->i915->drm.struct_mutex);
- flush_workqueue(eb->i915->mm.userptr_wq);
- mutex_lock(&eb->i915->drm.struct_mutex);
- continue;
- }
-
switch (pass++) {
case 0:
break;
@@ -746,20 +700,15 @@ static int eb_reserve(struct i915_execbuffer *eb)
err = i915_gem_evict_vm(eb->context->vm);
mutex_unlock(&eb->context->vm->mutex);
if (err)
- goto unlock;
+ return err;
break;
default:
- err = -ENOSPC;
- goto unlock;
+ return -ENOSPC;
}
pin_flags = PIN_USER;
} while (1);
-
-unlock:
- mutex_unlock(&eb->i915->drm.struct_mutex);
- return err;
}
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
@@ -882,12 +831,12 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
+ struct drm_i915_private *i915 = eb->i915;
unsigned int batch = eb_batch_index(eb);
unsigned int i;
int err = 0;
INIT_LIST_HEAD(&eb->relocs);
- INIT_LIST_HEAD(&eb->unbound);
for (i = 0; i < eb->buffer_count; i++) {
struct i915_vma *vma;
@@ -895,22 +844,87 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
vma = eb_lookup_vma(eb, eb->exec[i].handle);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- break;
+ goto err;
}
err = eb_validate_vma(eb, &eb->exec[i], vma);
if (unlikely(err)) {
i915_vma_put(vma);
- break;
+ goto err;
}
eb_add_vma(eb, i, batch, vma);
}
+ if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) {
+ drm_dbg(&i915->drm,
+ "Attempting to use self-modifying batch buffer\n");
+ return -EINVAL;
+ }
+
+ if (range_overflows_t(u64,
+ eb->batch_start_offset, eb->batch_len,
+ eb->batch->vma->size)) {
+ drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
+ return -EINVAL;
+ }
+
+ if (eb->batch_len == 0)
+ eb->batch_len = eb->batch->vma->size - eb->batch_start_offset;
+ if (unlikely(eb->batch_len == 0)) { /* impossible! */
+ drm_dbg(&i915->drm, "Invalid batch length\n");
+ return -EINVAL;
+ }
+
+ return 0;
+
+err:
eb->vma[i].vma = NULL;
return err;
}
+static int eb_validate_vmas(struct i915_execbuffer *eb)
+{
+ unsigned int i;
+ int err;
+
+ INIT_LIST_HEAD(&eb->unbound);
+
+ for (i = 0; i < eb->buffer_count; i++) {
+ struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
+
+ err = i915_gem_object_lock(vma->obj, &eb->ww);
+ if (err)
+ return err;
+
+ if (eb_pin_vma(eb, entry, ev)) {
+ if (entry->offset != vma->node.start) {
+ entry->offset = vma->node.start | UPDATE;
+ eb->args->flags |= __EXEC_HAS_RELOC;
+ }
+ } else {
+ eb_unreserve_vma(ev);
+
+ list_add_tail(&ev->bind_link, &eb->unbound);
+ if (drm_mm_node_allocated(&vma->node)) {
+ err = i915_vma_unbind(vma);
+ if (err)
+ return err;
+ }
+ }
+
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
+ eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
+ }
+
+ if (!list_empty(&eb->unbound))
+ return eb_reserve(eb);
+
+ return 0;
+}
+
static struct eb_vma *
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
{
@@ -931,13 +945,31 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
}
}
+static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
+{
+ const unsigned int count = eb->buffer_count;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
+
+ if (!vma)
+ break;
+
+ eb_unreserve_vma(ev);
+
+ if (final)
+ i915_vma_put(vma);
+ }
+
+ eb_unpin_engine(eb);
+}
+
static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
- if (eb->array)
- eb_vma_array_put(eb->array);
-
if (eb->lut_size > 0)
kfree(eb->buckets);
}
@@ -949,6 +981,14 @@ relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
return gen8_canonical_addr((int)reloc->delta + target->node.start);
}
+static void reloc_cache_clear(struct reloc_cache *cache)
+{
+ cache->rq = NULL;
+ cache->rq_cmd = NULL;
+ cache->pool = NULL;
+ cache->rq_size = 0;
+}
+
static void reloc_cache_init(struct reloc_cache *cache,
struct drm_i915_private *i915)
{
@@ -961,8 +1001,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->has_fence = cache->gen < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
cache->node.flags = 0;
- cache->rq = NULL;
- cache->target = NULL;
+ reloc_cache_clear(cache);
}
static inline void *unmask_page(unsigned long p)
@@ -984,132 +1023,60 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
return &i915->ggtt;
}
-#define RELOC_TAIL 4
-
-static int reloc_gpu_chain(struct reloc_cache *cache)
+static void reloc_cache_put_pool(struct i915_execbuffer *eb, struct reloc_cache *cache)
{
- struct intel_gt_buffer_pool_node *pool;
- struct i915_request *rq = cache->rq;
- struct i915_vma *batch;
- u32 *cmd;
- int err;
-
- pool = intel_gt_get_buffer_pool(rq->engine->gt, PAGE_SIZE);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
-
- batch = i915_vma_instance(pool->obj, rq->context->vm, NULL);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_pool;
- }
-
- err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
- if (err)
- goto out_pool;
-
- GEM_BUG_ON(cache->rq_size + RELOC_TAIL > PAGE_SIZE / sizeof(u32));
- cmd = cache->rq_cmd + cache->rq_size;
- *cmd++ = MI_ARB_CHECK;
- if (cache->gen >= 8)
- *cmd++ = MI_BATCH_BUFFER_START_GEN8;
- else if (cache->gen >= 6)
- *cmd++ = MI_BATCH_BUFFER_START;
- else
- *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *cmd++ = lower_32_bits(batch->node.start);
- *cmd++ = upper_32_bits(batch->node.start); /* Always 0 for gen<8 */
- i915_gem_object_flush_map(cache->rq_vma->obj);
- i915_gem_object_unpin_map(cache->rq_vma->obj);
- cache->rq_vma = NULL;
-
- err = intel_gt_buffer_pool_mark_active(pool, rq);
- if (err == 0) {
- i915_vma_lock(batch);
- err = i915_request_await_object(rq, batch->obj, false);
- if (err == 0)
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
- }
- i915_vma_unpin(batch);
- if (err)
- goto out_pool;
-
- cmd = i915_gem_object_pin_map(batch->obj,
- cache->has_llc ?
- I915_MAP_FORCE_WB :
- I915_MAP_FORCE_WC);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto out_pool;
- }
-
- /* Return with batch mapping (cmd) still pinned */
- cache->rq_cmd = cmd;
- cache->rq_size = 0;
- cache->rq_vma = batch;
-
-out_pool:
- intel_gt_buffer_pool_put(pool);
- return err;
-}
+ if (!cache->pool)
+ return;
-static unsigned int reloc_bb_flags(const struct reloc_cache *cache)
-{
- return cache->gen > 5 ? 0 : I915_DISPATCH_SECURE;
+ /*
+ * This is a bit nasty, normally we keep objects locked until the end
+ * of execbuffer, but we already submit this, and have to unlock before
+ * dropping the reference. Fortunately we can only hold 1 pool node at
+ * a time, so this should be harmless.
+ */
+ i915_gem_ww_unlock_single(cache->pool->obj);
+ intel_gt_buffer_pool_put(cache->pool);
+ cache->pool = NULL;
}
-static int reloc_gpu_flush(struct reloc_cache *cache)
+static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cache)
{
- struct i915_request *rq;
- int err;
+ struct drm_i915_gem_object *obj = cache->rq->batch->obj;
- rq = fetch_and_zero(&cache->rq);
- if (!rq)
- return 0;
-
- if (cache->rq_vma) {
- struct drm_i915_gem_object *obj = cache->rq_vma->obj;
+ GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
+ cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
- GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
- cache->rq_cmd[cache->rq_size++] = MI_BATCH_BUFFER_END;
+ __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+ i915_gem_object_unpin_map(obj);
- __i915_gem_object_flush_map(obj,
- 0, sizeof(u32) * cache->rq_size);
- i915_gem_object_unpin_map(obj);
- }
+ intel_gt_chipset_flush(cache->rq->engine->gt);
- err = 0;
- if (rq->engine->emit_init_breadcrumb)
- err = rq->engine->emit_init_breadcrumb(rq);
- if (!err)
- err = rq->engine->emit_bb_start(rq,
- rq->batch->node.start,
- PAGE_SIZE,
- reloc_bb_flags(cache));
- if (err)
- i915_request_set_error_once(rq, err);
-
- intel_gt_chipset_flush(rq->engine->gt);
- i915_request_add(rq);
+ i915_request_add(cache->rq);
+ reloc_cache_put_pool(eb, cache);
+ reloc_cache_clear(cache);
- return err;
+ eb->reloc_pool = NULL;
}
-static void reloc_cache_reset(struct reloc_cache *cache)
+static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
{
void *vaddr;
+ if (cache->rq)
+ reloc_gpu_flush(eb, cache);
+
if (!cache->vaddr)
return;
vaddr = unmask_page(cache->vaddr);
if (cache->vaddr & KMAP) {
+ struct drm_i915_gem_object *obj =
+ (struct drm_i915_gem_object *)cache->node.mm;
if (cache->vaddr & CLFLUSH_AFTER)
mb();
kunmap_atomic(vaddr);
- i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
+ i915_gem_object_finish_access(obj);
} else {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
@@ -1134,9 +1101,10 @@ static void reloc_cache_reset(struct reloc_cache *cache)
static void *reloc_kmap(struct drm_i915_gem_object *obj,
struct reloc_cache *cache,
- unsigned long page)
+ unsigned long pageno)
{
void *vaddr;
+ struct page *page;
if (cache->vaddr) {
kunmap_atomic(unmask_page(cache->vaddr));
@@ -1157,17 +1125,22 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
mb();
}
- vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
+ page = i915_gem_object_get_page(obj, pageno);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ vaddr = kmap_atomic(page);
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
- cache->page = page;
+ cache->page = pageno;
return vaddr;
}
static void *reloc_iomap(struct drm_i915_gem_object *obj,
- struct reloc_cache *cache,
+ struct i915_execbuffer *eb,
unsigned long page)
{
+ struct reloc_cache *cache = &eb->reloc_cache;
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
unsigned long offset;
void *vaddr;
@@ -1185,16 +1158,17 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (use_cpu_reloc(cache, obj))
return NULL;
- i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
if (err)
return ERR_PTR(err);
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK /* NOWARN */ |
- PIN_NOEVICT);
+ vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
+ if (vma == ERR_PTR(-EDEADLK))
+ return vma;
+
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
mutex_lock(&ggtt->vm.mutex);
@@ -1230,9 +1204,10 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
}
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
- struct reloc_cache *cache,
+ struct i915_execbuffer *eb,
unsigned long page)
{
+ struct reloc_cache *cache = &eb->reloc_cache;
void *vaddr;
if (cache->page == page) {
@@ -1240,7 +1215,7 @@ static void *reloc_vaddr(struct drm_i915_gem_object *obj,
} else {
vaddr = NULL;
if ((cache->vaddr & KMAP) == 0)
- vaddr = reloc_iomap(obj, cache, page);
+ vaddr = reloc_iomap(obj, eb, page);
if (!vaddr)
vaddr = reloc_kmap(obj, cache, page);
}
@@ -1276,7 +1251,7 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
struct drm_i915_gem_object *obj = vma->obj;
int err;
- i915_vma_lock(vma);
+ assert_vma_held(vma);
if (obj->cache_dirty & ~obj->cache_coherent)
i915_gem_clflush_object(obj, 0);
@@ -1286,25 +1261,31 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
-
return err;
}
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
struct intel_engine_cs *engine,
+ struct i915_vma *vma,
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
- struct intel_gt_buffer_pool_node *pool;
+ struct intel_gt_buffer_pool_node *pool = eb->reloc_pool;
struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
- pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
+ if (!pool) {
+ pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ }
+ eb->reloc_pool = NULL;
+
+ err = i915_gem_object_lock(pool->obj, &eb->ww);
+ if (err)
+ goto err_pool;
cmd = i915_gem_object_pin_map(pool->obj,
cache->has_llc ?
@@ -1312,35 +1293,42 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
I915_MAP_FORCE_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto out_pool;
+ goto err_pool;
}
- batch = i915_vma_instance(pool->obj, eb->context->vm, NULL);
+ batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_unmap;
}
- err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
+ err = i915_vma_pin_ww(batch, &eb->ww, 0, 0, PIN_USER | PIN_NONBLOCK);
if (err)
goto err_unmap;
if (engine == eb->context->engine) {
rq = i915_request_create(eb->context);
} else {
- struct intel_context *ce;
+ struct intel_context *ce = eb->reloc_context;
- ce = intel_context_create(engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto err_unpin;
+ if (!ce) {
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err_unpin;
+ }
+
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(eb->context->vm);
+ eb->reloc_context = ce;
}
- i915_vm_put(ce->vm);
- ce->vm = i915_vm_get(eb->context->vm);
+ err = intel_context_pin_ww(ce, &eb->ww);
+ if (err)
+ goto err_unpin;
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
+ rq = i915_request_create(ce);
+ intel_context_unpin(ce);
}
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
@@ -1351,11 +1339,20 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_request;
- i915_vma_lock(batch);
+ err = reloc_move_to_gpu(rq, vma);
+ if (err)
+ goto err_request;
+
+ err = eb->engine->emit_bb_start(rq,
+ batch->node.start, PAGE_SIZE,
+ cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
+ if (err)
+ goto skip_request;
+
+ assert_vma_held(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
if (err)
goto skip_request;
@@ -1365,10 +1362,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
cache->rq = rq;
cache->rq_cmd = cmd;
cache->rq_size = 0;
- cache->rq_vma = batch;
+ cache->pool = pool;
/* Return with batch mapping (cmd) still pinned */
- goto out_pool;
+ return 0;
skip_request:
i915_request_set_error_once(rq, err);
@@ -1378,8 +1375,8 @@ err_unpin:
i915_vma_unpin(batch);
err_unmap:
i915_gem_object_unpin_map(pool->obj);
-out_pool:
- intel_gt_buffer_pool_put(pool);
+err_pool:
+ eb->reloc_pool = pool;
return err;
}
@@ -1394,9 +1391,12 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
{
struct reloc_cache *cache = &eb->reloc_cache;
u32 *cmd;
- int err;
+
+ if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
+ reloc_gpu_flush(eb, cache);
if (unlikely(!cache->rq)) {
+ int err;
struct intel_engine_cs *engine = eb->engine;
if (!reloc_can_use_engine(engine)) {
@@ -1405,31 +1405,11 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
return ERR_PTR(-ENODEV);
}
- err = __reloc_gpu_alloc(eb, engine, len);
+ err = __reloc_gpu_alloc(eb, engine, vma, len);
if (unlikely(err))
return ERR_PTR(err);
}
- if (vma != cache->target) {
- err = reloc_move_to_gpu(cache->rq, vma);
- if (unlikely(err)) {
- i915_request_set_error_once(cache->rq, err);
- return ERR_PTR(err);
- }
-
- cache->target = vma;
- }
-
- if (unlikely(cache->rq_size + len >
- PAGE_SIZE / sizeof(u32) - RELOC_TAIL)) {
- err = reloc_gpu_chain(cache);
- if (unlikely(err)) {
- i915_request_set_error_once(cache->rq, err);
- return ERR_PTR(err);
- }
- }
-
- GEM_BUG_ON(cache->rq_size + len >= PAGE_SIZE / sizeof(u32));
cmd = cache->rq_cmd + cache->rq_size;
cache->rq_size += len;
@@ -1461,7 +1441,7 @@ static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
return addr + offset_in_page(offset);
}
-static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
+static int __reloc_entry_gpu(struct i915_execbuffer *eb,
struct i915_vma *vma,
u64 offset,
u64 target_addr)
@@ -1479,7 +1459,9 @@ static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
len = 3;
batch = reloc_gpu(eb, vma, len);
- if (IS_ERR(batch))
+ if (batch == ERR_PTR(-EDEADLK))
+ return -EDEADLK;
+ else if (IS_ERR(batch))
return false;
addr = gen8_canonical_addr(vma->node.start + offset);
@@ -1532,7 +1514,7 @@ static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
return true;
}
-static bool reloc_entry_gpu(struct i915_execbuffer *eb,
+static int reloc_entry_gpu(struct i915_execbuffer *eb,
struct i915_vma *vma,
u64 offset,
u64 target_addr)
@@ -1554,14 +1536,17 @@ relocate_entry(struct i915_vma *vma,
{
u64 target_addr = relocation_target(reloc, target);
u64 offset = reloc->offset;
+ int reloc_gpu = reloc_entry_gpu(eb, vma, offset, target_addr);
+
+ if (reloc_gpu < 0)
+ return reloc_gpu;
- if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
+ if (!reloc_gpu) {
bool wide = eb->reloc_cache.use_64bit_reloc;
void *vaddr;
repeat:
- vaddr = reloc_vaddr(vma->obj,
- &eb->reloc_cache,
+ vaddr = reloc_vaddr(vma->obj, eb,
offset >> PAGE_SHIFT);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -1712,7 +1697,9 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* we would try to acquire the struct mutex again. Obviously
* this is bad and so lockdep complains vehemently.
*/
- copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
+ pagefault_disable();
+ copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
+ pagefault_enable();
if (unlikely(copied)) {
remain = -EFAULT;
goto out;
@@ -1756,74 +1743,400 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
urelocs += ARRAY_SIZE(stack);
} while (remain);
out:
- reloc_cache_reset(&eb->reloc_cache);
+ reloc_cache_reset(&eb->reloc_cache, eb);
return remain;
}
-static int eb_relocate(struct i915_execbuffer *eb)
+static int
+eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev)
{
+ const struct drm_i915_gem_exec_object2 *entry = ev->exec;
+ struct drm_i915_gem_relocation_entry *relocs =
+ u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
+ unsigned int i;
int err;
- err = eb_lookup_vmas(eb);
- if (err)
- return err;
+ for (i = 0; i < entry->relocation_count; i++) {
+ u64 offset = eb_relocate_entry(eb, ev, &relocs[i]);
+
+ if ((s64)offset < 0) {
+ err = (int)offset;
+ goto err;
+ }
+ }
+ err = 0;
+err:
+ reloc_cache_reset(&eb->reloc_cache, eb);
+ return err;
+}
- if (!list_empty(&eb->unbound)) {
- err = eb_reserve(eb);
+static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
+{
+ const char __user *addr, *end;
+ unsigned long size;
+ char __maybe_unused c;
+
+ size = entry->relocation_count;
+ if (size == 0)
+ return 0;
+
+ if (size > N_RELOC(ULONG_MAX))
+ return -EINVAL;
+
+ addr = u64_to_user_ptr(entry->relocs_ptr);
+ size *= sizeof(struct drm_i915_gem_relocation_entry);
+ if (!access_ok(addr, size))
+ return -EFAULT;
+
+ end = addr + size;
+ for (; addr < end; addr += PAGE_SIZE) {
+ int err = __get_user(c, addr);
if (err)
return err;
}
+ return __get_user(c, end - 1);
+}
- /* The objects are in their final locations, apply the relocations. */
- if (eb->args->flags & __EXEC_HAS_RELOC) {
- struct eb_vma *ev;
- int flush;
+static int eb_copy_relocations(const struct i915_execbuffer *eb)
+{
+ struct drm_i915_gem_relocation_entry *relocs;
+ const unsigned int count = eb->buffer_count;
+ unsigned int i;
+ int err;
- list_for_each_entry(ev, &eb->relocs, reloc_link) {
+ for (i = 0; i < count; i++) {
+ const unsigned int nreloc = eb->exec[i].relocation_count;
+ struct drm_i915_gem_relocation_entry __user *urelocs;
+ unsigned long size;
+ unsigned long copied;
+
+ if (nreloc == 0)
+ continue;
+
+ err = check_relocations(&eb->exec[i]);
+ if (err)
+ goto err;
+
+ urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
+ size = nreloc * sizeof(*relocs);
+
+ relocs = kvmalloc_array(size, 1, GFP_KERNEL);
+ if (!relocs) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ /* copy_from_user is limited to < 4GiB */
+ copied = 0;
+ do {
+ unsigned int len =
+ min_t(u64, BIT_ULL(31), size - copied);
+
+ if (__copy_from_user((char *)relocs + copied,
+ (char __user *)urelocs + copied,
+ len))
+ goto end;
+
+ copied += len;
+ } while (copied < size);
+
+ /*
+ * As we do not update the known relocation offsets after
+ * relocating (due to the complexities in lock handling),
+ * we need to mark them as invalid now so that we force the
+ * relocation processing next time. Just in case the target
+ * object is evicted and then rebound into its old
+ * presumed_offset before the next execbuffer - if that
+ * happened we would make the mistake of assuming that the
+ * relocations were valid.
+ */
+ if (!user_access_begin(urelocs, size))
+ goto end;
+
+ for (copied = 0; copied < nreloc; copied++)
+ unsafe_put_user(-1,
+ &urelocs[copied].presumed_offset,
+ end_user);
+ user_access_end();
+
+ eb->exec[i].relocs_ptr = (uintptr_t)relocs;
+ }
+
+ return 0;
+
+end_user:
+ user_access_end();
+end:
+ kvfree(relocs);
+ err = -EFAULT;
+err:
+ while (i--) {
+ relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
+ if (eb->exec[i].relocation_count)
+ kvfree(relocs);
+ }
+ return err;
+}
+
+static int eb_prefault_relocations(const struct i915_execbuffer *eb)
+{
+ const unsigned int count = eb->buffer_count;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ int err;
+
+ err = check_relocations(&eb->exec[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
+ struct i915_request *rq)
+{
+ bool have_copy = false;
+ struct eb_vma *ev;
+ int err = 0;
+
+repeat:
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ goto out;
+ }
+
+ /* We may process another execbuffer during the unlock... */
+ eb_release_vmas(eb, false);
+ i915_gem_ww_ctx_fini(&eb->ww);
+
+ if (rq) {
+ /* nonblocking is always false */
+ if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT) < 0) {
+ i915_request_put(rq);
+ rq = NULL;
+
+ err = -EINTR;
+ goto err_relock;
+ }
+
+ i915_request_put(rq);
+ rq = NULL;
+ }
+
+ /*
+ * We take 3 passes through the slowpatch.
+ *
+ * 1 - we try to just prefault all the user relocation entries and
+ * then attempt to reuse the atomic pagefault disabled fast path again.
+ *
+ * 2 - we copy the user entries to a local buffer here outside of the
+ * local and allow ourselves to wait upon any rendering before
+ * relocations
+ *
+ * 3 - we already have a local copy of the relocation entries, but
+ * were interrupted (EAGAIN) whilst waiting for the objects, try again.
+ */
+ if (!err) {
+ err = eb_prefault_relocations(eb);
+ } else if (!have_copy) {
+ err = eb_copy_relocations(eb);
+ have_copy = err == 0;
+ } else {
+ cond_resched();
+ err = 0;
+ }
+
+ if (!err)
+ flush_workqueue(eb->i915->mm.userptr_wq);
+
+err_relock:
+ i915_gem_ww_ctx_init(&eb->ww, true);
+ if (err)
+ goto out;
+
+ /* reacquire the objects */
+repeat_validate:
+ rq = eb_pin_engine(eb, false);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ rq = NULL;
+ goto err;
+ }
+
+ /* We didn't throttle, should be NULL */
+ GEM_WARN_ON(rq);
+
+ err = eb_validate_vmas(eb);
+ if (err)
+ goto err;
+
+ GEM_BUG_ON(!eb->batch);
+
+ list_for_each_entry(ev, &eb->relocs, reloc_link) {
+ if (!have_copy) {
+ pagefault_disable();
err = eb_relocate_vma(eb, ev);
+ pagefault_enable();
+ if (err)
+ break;
+ } else {
+ err = eb_relocate_vma_slow(eb, ev);
if (err)
break;
}
+ }
+
+ if (err == -EDEADLK)
+ goto err;
- flush = reloc_gpu_flush(&eb->reloc_cache);
+ if (err && !have_copy)
+ goto repeat;
+
+ if (err)
+ goto err;
+
+ /* as last step, parse the command buffer */
+ err = eb_parse(eb);
+ if (err)
+ goto err;
+
+ /*
+ * Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ if (err == -EDEADLK) {
+ eb_release_vmas(eb, false);
+ err = i915_gem_ww_ctx_backoff(&eb->ww);
if (!err)
- err = flush;
+ goto repeat_validate;
+ }
+
+ if (err == -EAGAIN)
+ goto repeat;
+
+out:
+ if (have_copy) {
+ const unsigned int count = eb->buffer_count;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ const struct drm_i915_gem_exec_object2 *entry =
+ &eb->exec[i];
+ struct drm_i915_gem_relocation_entry *relocs;
+
+ if (!entry->relocation_count)
+ continue;
+
+ relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
+ kvfree(relocs);
+ }
}
+ if (rq)
+ i915_request_put(rq);
+
return err;
}
-static int eb_move_to_gpu(struct i915_execbuffer *eb)
+static int eb_relocate_parse(struct i915_execbuffer *eb)
{
- const unsigned int count = eb->buffer_count;
- struct ww_acquire_ctx acquire;
- unsigned int i;
- int err = 0;
+ int err;
+ struct i915_request *rq = NULL;
+ bool throttle = true;
- ww_acquire_init(&acquire, &reservation_ww_class);
+retry:
+ rq = eb_pin_engine(eb, throttle);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ rq = NULL;
+ if (err != -EDEADLK)
+ return err;
- for (i = 0; i < count; i++) {
- struct eb_vma *ev = &eb->vma[i];
- struct i915_vma *vma = ev->vma;
+ goto err;
+ }
- err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
- if (err == -EDEADLK) {
- GEM_BUG_ON(i == 0);
- do {
- int j = i - 1;
+ if (rq) {
+ bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
- ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
+ /* Need to drop all locks now for throttling, take slowpath */
+ err = i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, 0);
+ if (err == -ETIME) {
+ if (nonblock) {
+ err = -EWOULDBLOCK;
+ i915_request_put(rq);
+ goto err;
+ }
+ goto slow;
+ }
+ i915_request_put(rq);
+ rq = NULL;
+ }
- swap(eb->vma[i], eb->vma[j]);
- } while (--i);
+ /* only throttle once, even if we didn't need to throttle */
+ throttle = false;
- err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
- &acquire);
+ err = eb_validate_vmas(eb);
+ if (err == -EAGAIN)
+ goto slow;
+ else if (err)
+ goto err;
+
+ /* The objects are in their final locations, apply the relocations. */
+ if (eb->args->flags & __EXEC_HAS_RELOC) {
+ struct eb_vma *ev;
+
+ list_for_each_entry(ev, &eb->relocs, reloc_link) {
+ err = eb_relocate_vma(eb, ev);
+ if (err)
+ break;
}
- if (err)
- break;
+
+ if (err == -EDEADLK)
+ goto err;
+ else if (err)
+ goto slow;
+ }
+
+ if (!err)
+ err = eb_parse(eb);
+
+err:
+ if (err == -EDEADLK) {
+ eb_release_vmas(eb, false);
+ err = i915_gem_ww_ctx_backoff(&eb->ww);
+ if (!err)
+ goto retry;
}
- ww_acquire_done(&acquire);
+
+ return err;
+
+slow:
+ err = eb_relocate_parse_slow(eb, rq);
+ if (err)
+ /*
+ * If the user expects the execobject.offset and
+ * reloc.presumed_offset to be an exact match,
+ * as for using NO_RELOC, then we cannot update
+ * the execobject.offset until we have completed
+ * relocation.
+ */
+ eb->args->flags &= ~__EXEC_HAS_RELOC;
+
+ return err;
+}
+
+static int eb_move_to_gpu(struct i915_execbuffer *eb)
+{
+ const unsigned int count = eb->buffer_count;
+ unsigned int i = count;
+ int err = 0;
while (i--) {
struct eb_vma *ev = &eb->vma[i];
@@ -1868,13 +2181,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
if (err == 0)
err = i915_vma_move_to_active(vma, eb->request, flags);
-
- i915_vma_unlock(vma);
- eb_unreserve_vma(ev);
}
- ww_acquire_fini(&acquire);
-
- eb_vma_array_put(fetch_and_zero(&eb->array));
if (unlikely(err))
goto err_skip;
@@ -1894,7 +2201,8 @@ static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
return -EINVAL;
/* Kernel clipping was a DRI1 misfeature */
- if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
+ if (!(exec->flags & (I915_EXEC_FENCE_ARRAY |
+ I915_EXEC_USE_EXTENSIONS))) {
if (exec->num_cliprects || exec->cliprects_ptr)
return -EINVAL;
}
@@ -1938,7 +2246,8 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
}
static struct i915_vma *
-shadow_batch_pin(struct drm_i915_gem_object *obj,
+shadow_batch_pin(struct i915_execbuffer *eb,
+ struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
unsigned int flags)
{
@@ -1949,7 +2258,7 @@ shadow_batch_pin(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return vma;
- err = i915_vma_pin(vma, 0, 0, flags);
+ err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags);
if (err)
return ERR_PTR(err);
@@ -1962,8 +2271,8 @@ struct eb_parse_work {
struct i915_vma *batch;
struct i915_vma *shadow;
struct i915_vma *trampoline;
- unsigned int batch_offset;
- unsigned int batch_length;
+ unsigned long batch_offset;
+ unsigned long batch_length;
};
static int __eb_parse(struct dma_fence_work *work)
@@ -2001,7 +2310,7 @@ __parser_mark_active(struct i915_vma *vma,
{
struct intel_gt_buffer_pool_node *node = vma->private;
- return i915_active_ref(&node->active, tl, fence);
+ return i915_active_ref(&node->active, tl->fence_context, fence);
}
static int
@@ -2033,6 +2342,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
struct eb_parse_work *pw;
int err;
+ GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
+ GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
+
pw = kzalloc(sizeof(*pw), GFP_KERNEL);
if (!pw)
return -ENOMEM;
@@ -2065,36 +2377,26 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (err)
goto err_commit;
- err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
- if (err)
- goto err_commit;
-
err = dma_resv_reserve_shared(pw->batch->resv, 1);
if (err)
- goto err_commit_unlock;
+ goto err_commit;
/* Wait for all writes (and relocs) into the batch to complete */
err = i915_sw_fence_await_reservation(&pw->base.chain,
pw->batch->resv, NULL, false,
0, I915_FENCE_GFP);
if (err < 0)
- goto err_commit_unlock;
+ goto err_commit;
/* Keep the batch alive and unwritten as we parse */
dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
- dma_resv_unlock(pw->batch->resv);
-
/* Force execution to wait for completion of the parser */
- dma_resv_lock(shadow->resv, NULL);
dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
- dma_resv_unlock(shadow->resv);
dma_fence_work_commit_imm(&pw->base);
return 0;
-err_commit_unlock:
- dma_resv_unlock(pw->batch->resv);
err_commit:
i915_sw_fence_set_error_once(&pw->base.chain, err);
dma_fence_work_commit_imm(&pw->base);
@@ -2109,16 +2411,33 @@ err_free:
return err;
}
+static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
+{
+ /*
+ * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+ * batch" bit. Hence we need to pin secure batches into the global gtt.
+ * hsw should have this fixed, but bdw mucks it up again. */
+ if (eb->batch_flags & I915_DISPATCH_SECURE)
+ return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, 0);
+
+ return NULL;
+}
+
static int eb_parse(struct i915_execbuffer *eb)
{
struct drm_i915_private *i915 = eb->i915;
- struct intel_gt_buffer_pool_node *pool;
- struct i915_vma *shadow, *trampoline;
- unsigned int len;
+ struct intel_gt_buffer_pool_node *pool = eb->batch_pool;
+ struct i915_vma *shadow, *trampoline, *batch;
+ unsigned long len;
int err;
- if (!eb_use_cmdparser(eb))
- return 0;
+ if (!eb_use_cmdparser(eb)) {
+ batch = eb_dispatch_secure(eb, eb->batch->vma);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ goto secure_batch;
+ }
len = eb->batch_len;
if (!CMDPARSER_USES_GGTT(eb->i915)) {
@@ -2134,12 +2453,21 @@ static int eb_parse(struct i915_execbuffer *eb)
} else {
len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
}
+ if (unlikely(len < eb->batch_len)) /* last paranoid check of overflow */
+ return -EINVAL;
- pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
+ if (!pool) {
+ pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ eb->batch_pool = pool;
+ }
- shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
+ err = i915_gem_object_lock(pool->obj, &eb->ww);
+ if (err)
+ goto err;
+
+ shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER);
if (IS_ERR(shadow)) {
err = PTR_ERR(shadow);
goto err;
@@ -2151,7 +2479,7 @@ static int eb_parse(struct i915_execbuffer *eb)
if (CMDPARSER_USES_GGTT(eb->i915)) {
trampoline = shadow;
- shadow = shadow_batch_pin(pool->obj,
+ shadow = shadow_batch_pin(eb, pool->obj,
&eb->engine->gt->ggtt->vm,
PIN_GLOBAL);
if (IS_ERR(shadow)) {
@@ -2164,42 +2492,43 @@ static int eb_parse(struct i915_execbuffer *eb)
eb->batch_flags |= I915_DISPATCH_SECURE;
}
+ batch = eb_dispatch_secure(eb, shadow);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err_trampoline;
+ }
+
err = eb_parse_pipeline(eb, shadow, trampoline);
if (err)
- goto err_trampoline;
+ goto err_unpin_batch;
- eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
- eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
eb->batch = &eb->vma[eb->buffer_count++];
- eb->vma[eb->buffer_count].vma = NULL;
+ eb->batch->vma = i915_vma_get(shadow);
+ eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
+secure_batch:
+ if (batch) {
+ eb->batch = &eb->vma[eb->buffer_count++];
+ eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
+ eb->batch->vma = i915_vma_get(batch);
+ }
return 0;
+err_unpin_batch:
+ if (batch)
+ i915_vma_unpin(batch);
err_trampoline:
if (trampoline)
i915_vma_unpin(trampoline);
err_shadow:
i915_vma_unpin(shadow);
err:
- intel_gt_buffer_pool_put(pool);
return err;
}
-static void
-add_to_client(struct i915_request *rq, struct drm_file *file)
-{
- struct drm_i915_file_private *file_priv = file->driver_priv;
-
- rq->file_priv = file_priv;
-
- spin_lock(&file_priv->mm.lock);
- list_add_tail(&rq->client_link, &file_priv->mm.request_list);
- spin_unlock(&file_priv->mm.lock);
-}
-
static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
{
int err;
@@ -2281,7 +2610,7 @@ static const enum intel_engine_id user_ring_map[] = {
[I915_EXEC_VEBOX] = VECS0
};
-static struct i915_request *eb_throttle(struct intel_context *ce)
+static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel_context *ce)
{
struct intel_ring *ring = ce->ring;
struct intel_timeline *tl = ce->timeline;
@@ -2315,31 +2644,26 @@ static struct i915_request *eb_throttle(struct intel_context *ce)
return i915_request_get(rq);
}
-static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
+static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throttle)
{
+ struct intel_context *ce = eb->context;
struct intel_timeline *tl;
- struct i915_request *rq;
+ struct i915_request *rq = NULL;
int err;
- /*
- * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
- * EIO if the GPU is already wedged.
- */
- err = intel_gt_terminally_wedged(ce->engine->gt);
- if (err)
- return err;
+ GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED);
if (unlikely(intel_context_is_banned(ce)))
- return -EIO;
+ return ERR_PTR(-EIO);
/*
* Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- err = intel_context_pin(ce);
+ err = intel_context_pin_ww(ce, &eb->ww);
if (err)
- return err;
+ return ERR_PTR(err);
/*
* Take a local wakeref for preparing to dispatch the execbuf as
@@ -2351,45 +2675,17 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
*/
tl = intel_context_timeline_lock(ce);
if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto err_unpin;
+ intel_context_unpin(ce);
+ return ERR_CAST(tl);
}
intel_context_enter(ce);
- rq = eb_throttle(ce);
-
+ if (throttle)
+ rq = eb_throttle(eb, ce);
intel_context_timeline_unlock(tl);
- if (rq) {
- bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
- long timeout;
-
- timeout = MAX_SCHEDULE_TIMEOUT;
- if (nonblock)
- timeout = 0;
-
- timeout = i915_request_wait(rq,
- I915_WAIT_INTERRUPTIBLE,
- timeout);
- i915_request_put(rq);
-
- if (timeout < 0) {
- err = nonblock ? -EWOULDBLOCK : timeout;
- goto err_exit;
- }
- }
-
- eb->engine = ce->engine;
- eb->context = ce;
- return 0;
-
-err_exit:
- mutex_lock(&tl->mutex);
- intel_context_exit(ce);
- intel_context_timeline_unlock(tl);
-err_unpin:
- intel_context_unpin(ce);
- return err;
+ eb->args->flags |= __EXEC_ENGINE_PINNED;
+ return rq;
}
static void eb_unpin_engine(struct i915_execbuffer *eb)
@@ -2397,6 +2693,11 @@ static void eb_unpin_engine(struct i915_execbuffer *eb)
struct intel_context *ce = eb->context;
struct intel_timeline *tl = ce->timeline;
+ if (!(eb->args->flags & __EXEC_ENGINE_PINNED))
+ return;
+
+ eb->args->flags &= ~__EXEC_ENGINE_PINNED;
+
mutex_lock(&tl->mutex);
intel_context_exit(ce);
mutex_unlock(&tl->mutex);
@@ -2405,11 +2706,10 @@ static void eb_unpin_engine(struct i915_execbuffer *eb)
}
static unsigned int
-eb_select_legacy_ring(struct i915_execbuffer *eb,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args)
+eb_select_legacy_ring(struct i915_execbuffer *eb)
{
struct drm_i915_private *i915 = eb->i915;
+ struct drm_i915_gem_execbuffer2 *args = eb->args;
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
if (user_ring_id != I915_EXEC_BSD &&
@@ -2424,7 +2724,7 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
- bsd_idx = gen8_dispatch_bsd_engine(i915, file);
+ bsd_idx = gen8_dispatch_bsd_engine(i915, eb->file);
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
bsd_idx <= I915_EXEC_BSD_RING2) {
bsd_idx >>= I915_EXEC_BSD_SHIFT;
@@ -2449,131 +2749,297 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
}
static int
-eb_pin_engine(struct i915_execbuffer *eb,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args)
+eb_select_engine(struct i915_execbuffer *eb)
{
struct intel_context *ce;
unsigned int idx;
int err;
if (i915_gem_context_user_engines(eb->gem_context))
- idx = args->flags & I915_EXEC_RING_MASK;
+ idx = eb->args->flags & I915_EXEC_RING_MASK;
else
- idx = eb_select_legacy_ring(eb, file, args);
+ idx = eb_select_legacy_ring(eb);
ce = i915_gem_context_get_engine(eb->gem_context, idx);
if (IS_ERR(ce))
return PTR_ERR(ce);
- err = __eb_pin_engine(eb, ce);
- intel_context_put(ce);
+ intel_gt_pm_get(ce->engine->gt);
+
+ if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ err = intel_context_alloc_state(ce);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+ * EIO if the GPU is already wedged.
+ */
+ err = intel_gt_terminally_wedged(ce->engine->gt);
+ if (err)
+ goto err;
+
+ eb->context = ce;
+ eb->engine = ce->engine;
+ /*
+ * Make sure engine pool stays alive even if we call intel_context_put
+ * during ww handling. The pool is destroyed when last pm reference
+ * is dropped, which breaks our -EDEADLK handling.
+ */
+ return err;
+
+err:
+ intel_gt_pm_put(ce->engine->gt);
+ intel_context_put(ce);
return err;
}
static void
-__free_fence_array(struct drm_syncobj **fences, unsigned int n)
+eb_put_engine(struct i915_execbuffer *eb)
{
- while (n--)
- drm_syncobj_put(ptr_mask_bits(fences[n], 2));
+ intel_gt_pm_put(eb->engine->gt);
+ intel_context_put(eb->context);
+}
+
+static void
+__free_fence_array(struct eb_fence *fences, unsigned int n)
+{
+ while (n--) {
+ drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2));
+ dma_fence_put(fences[n].dma_fence);
+ kfree(fences[n].chain_fence);
+ }
kvfree(fences);
}
-static struct drm_syncobj **
-get_fence_array(struct drm_i915_gem_execbuffer2 *args,
- struct drm_file *file)
+static int
+add_timeline_fence_array(struct i915_execbuffer *eb,
+ const struct drm_i915_gem_execbuffer_ext_timeline_fences *timeline_fences)
{
- const unsigned long nfences = args->num_cliprects;
- struct drm_i915_gem_exec_fence __user *user;
- struct drm_syncobj **fences;
- unsigned long n;
- int err;
+ struct drm_i915_gem_exec_fence __user *user_fences;
+ u64 __user *user_values;
+ struct eb_fence *f;
+ u64 nfences;
+ int err = 0;
- if (!(args->flags & I915_EXEC_FENCE_ARRAY))
- return NULL;
+ nfences = timeline_fences->fence_count;
+ if (!nfences)
+ return 0;
/* Check multiplication overflow for access_ok() and kvmalloc_array() */
BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
if (nfences > min_t(unsigned long,
- ULONG_MAX / sizeof(*user),
- SIZE_MAX / sizeof(*fences)))
- return ERR_PTR(-EINVAL);
+ ULONG_MAX / sizeof(*user_fences),
+ SIZE_MAX / sizeof(*f)) - eb->num_fences)
+ return -EINVAL;
- user = u64_to_user_ptr(args->cliprects_ptr);
- if (!access_ok(user, nfences * sizeof(*user)))
- return ERR_PTR(-EFAULT);
+ user_fences = u64_to_user_ptr(timeline_fences->handles_ptr);
+ if (!access_ok(user_fences, nfences * sizeof(*user_fences)))
+ return -EFAULT;
- fences = kvmalloc_array(nfences, sizeof(*fences),
- __GFP_NOWARN | GFP_KERNEL);
- if (!fences)
- return ERR_PTR(-ENOMEM);
+ user_values = u64_to_user_ptr(timeline_fences->values_ptr);
+ if (!access_ok(user_values, nfences * sizeof(*user_values)))
+ return -EFAULT;
- for (n = 0; n < nfences; n++) {
- struct drm_i915_gem_exec_fence fence;
+ f = krealloc(eb->fences,
+ (eb->num_fences + nfences) * sizeof(*f),
+ __GFP_NOWARN | GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ eb->fences = f;
+ f += eb->num_fences;
+
+ BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
+ ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
+
+ while (nfences--) {
+ struct drm_i915_gem_exec_fence user_fence;
struct drm_syncobj *syncobj;
+ struct dma_fence *fence = NULL;
+ u64 point;
- if (__copy_from_user(&fence, user++, sizeof(fence))) {
- err = -EFAULT;
- goto err;
+ if (__copy_from_user(&user_fence,
+ user_fences++,
+ sizeof(user_fence)))
+ return -EFAULT;
+
+ if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
+ return -EINVAL;
+
+ if (__get_user(point, user_values++))
+ return -EFAULT;
+
+ syncobj = drm_syncobj_find(eb->file, user_fence.handle);
+ if (!syncobj) {
+ DRM_DEBUG("Invalid syncobj handle provided\n");
+ return -ENOENT;
}
- if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
- err = -EINVAL;
- goto err;
+ fence = drm_syncobj_fence_get(syncobj);
+
+ if (!fence && user_fence.flags &&
+ !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
+ DRM_DEBUG("Syncobj handle has no fence\n");
+ drm_syncobj_put(syncobj);
+ return -EINVAL;
}
- syncobj = drm_syncobj_find(file, fence.handle);
+ if (fence)
+ err = dma_fence_chain_find_seqno(&fence, point);
+
+ if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
+ DRM_DEBUG("Syncobj handle missing requested point %llu\n", point);
+ dma_fence_put(fence);
+ drm_syncobj_put(syncobj);
+ return err;
+ }
+
+ /*
+ * A point might have been signaled already and
+ * garbage collected from the timeline. In this case
+ * just ignore the point and carry on.
+ */
+ if (!fence && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
+ drm_syncobj_put(syncobj);
+ continue;
+ }
+
+ /*
+ * For timeline syncobjs we need to preallocate chains for
+ * later signaling.
+ */
+ if (point != 0 && user_fence.flags & I915_EXEC_FENCE_SIGNAL) {
+ /*
+ * Waiting and signaling the same point (when point !=
+ * 0) would break the timeline.
+ */
+ if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
+ DRM_DEBUG("Trying to wait & signal the same timeline point.\n");
+ dma_fence_put(fence);
+ drm_syncobj_put(syncobj);
+ return -EINVAL;
+ }
+
+ f->chain_fence =
+ kmalloc(sizeof(*f->chain_fence),
+ GFP_KERNEL);
+ if (!f->chain_fence) {
+ drm_syncobj_put(syncobj);
+ dma_fence_put(fence);
+ return -ENOMEM;
+ }
+ } else {
+ f->chain_fence = NULL;
+ }
+
+ f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
+ f->dma_fence = fence;
+ f->value = point;
+ f++;
+ eb->num_fences++;
+ }
+
+ return 0;
+}
+
+static int add_fence_array(struct i915_execbuffer *eb)
+{
+ struct drm_i915_gem_execbuffer2 *args = eb->args;
+ struct drm_i915_gem_exec_fence __user *user;
+ unsigned long num_fences = args->num_cliprects;
+ struct eb_fence *f;
+
+ if (!(args->flags & I915_EXEC_FENCE_ARRAY))
+ return 0;
+
+ if (!num_fences)
+ return 0;
+
+ /* Check multiplication overflow for access_ok() and kvmalloc_array() */
+ BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
+ if (num_fences > min_t(unsigned long,
+ ULONG_MAX / sizeof(*user),
+ SIZE_MAX / sizeof(*f) - eb->num_fences))
+ return -EINVAL;
+
+ user = u64_to_user_ptr(args->cliprects_ptr);
+ if (!access_ok(user, num_fences * sizeof(*user)))
+ return -EFAULT;
+
+ f = krealloc(eb->fences,
+ (eb->num_fences + num_fences) * sizeof(*f),
+ __GFP_NOWARN | GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ eb->fences = f;
+ f += eb->num_fences;
+ while (num_fences--) {
+ struct drm_i915_gem_exec_fence user_fence;
+ struct drm_syncobj *syncobj;
+ struct dma_fence *fence = NULL;
+
+ if (__copy_from_user(&user_fence, user++, sizeof(user_fence)))
+ return -EFAULT;
+
+ if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
+ return -EINVAL;
+
+ syncobj = drm_syncobj_find(eb->file, user_fence.handle);
if (!syncobj) {
DRM_DEBUG("Invalid syncobj handle provided\n");
- err = -ENOENT;
- goto err;
+ return -ENOENT;
+ }
+
+ if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
+ fence = drm_syncobj_fence_get(syncobj);
+ if (!fence) {
+ DRM_DEBUG("Syncobj handle has no fence\n");
+ drm_syncobj_put(syncobj);
+ return -EINVAL;
+ }
}
BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
- fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
+ f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
+ f->dma_fence = fence;
+ f->value = 0;
+ f->chain_fence = NULL;
+ f++;
+ eb->num_fences++;
}
- return fences;
-
-err:
- __free_fence_array(fences, n);
- return ERR_PTR(err);
+ return 0;
}
-static void
-put_fence_array(struct drm_i915_gem_execbuffer2 *args,
- struct drm_syncobj **fences)
+static void put_fence_array(struct eb_fence *fences, int num_fences)
{
if (fences)
- __free_fence_array(fences, args->num_cliprects);
+ __free_fence_array(fences, num_fences);
}
static int
-await_fence_array(struct i915_execbuffer *eb,
- struct drm_syncobj **fences)
+await_fence_array(struct i915_execbuffer *eb)
{
- const unsigned int nfences = eb->args->num_cliprects;
unsigned int n;
int err;
- for (n = 0; n < nfences; n++) {
+ for (n = 0; n < eb->num_fences; n++) {
struct drm_syncobj *syncobj;
- struct dma_fence *fence;
unsigned int flags;
- syncobj = ptr_unpack_bits(fences[n], &flags, 2);
- if (!(flags & I915_EXEC_FENCE_WAIT))
- continue;
+ syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
- fence = drm_syncobj_fence_get(syncobj);
- if (!fence)
- return -EINVAL;
+ if (!eb->fences[n].dma_fence)
+ continue;
- err = i915_request_await_dma_fence(eb->request, fence);
- dma_fence_put(fence);
+ err = i915_request_await_dma_fence(eb->request,
+ eb->fences[n].dma_fence);
if (err < 0)
return err;
}
@@ -2581,26 +3047,47 @@ await_fence_array(struct i915_execbuffer *eb,
return 0;
}
-static void
-signal_fence_array(struct i915_execbuffer *eb,
- struct drm_syncobj **fences)
+static void signal_fence_array(const struct i915_execbuffer *eb)
{
- const unsigned int nfences = eb->args->num_cliprects;
struct dma_fence * const fence = &eb->request->fence;
unsigned int n;
- for (n = 0; n < nfences; n++) {
+ for (n = 0; n < eb->num_fences; n++) {
struct drm_syncobj *syncobj;
unsigned int flags;
- syncobj = ptr_unpack_bits(fences[n], &flags, 2);
+ syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
if (!(flags & I915_EXEC_FENCE_SIGNAL))
continue;
- drm_syncobj_replace_fence(syncobj, fence);
+ if (eb->fences[n].chain_fence) {
+ drm_syncobj_add_point(syncobj,
+ eb->fences[n].chain_fence,
+ fence,
+ eb->fences[n].value);
+ /*
+ * The chain's ownership is transferred to the
+ * timeline.
+ */
+ eb->fences[n].chain_fence = NULL;
+ } else {
+ drm_syncobj_replace_fence(syncobj, fence);
+ }
}
}
+static int
+parse_timeline_fences(struct i915_user_extension __user *ext, void *data)
+{
+ struct i915_execbuffer *eb = data;
+ struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
+
+ if (copy_from_user(&timeline_fences, ext, sizeof(timeline_fences)))
+ return -EFAULT;
+
+ return add_timeline_fence_array(eb, &timeline_fences);
+}
+
static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
{
struct i915_request *rq, *rn;
@@ -2642,12 +3129,37 @@ static void eb_request_add(struct i915_execbuffer *eb)
mutex_unlock(&tl->mutex);
}
+static const i915_user_extension_fn execbuf_extensions[] = {
+ [DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences,
+};
+
+static int
+parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args,
+ struct i915_execbuffer *eb)
+{
+ if (!(args->flags & I915_EXEC_USE_EXTENSIONS))
+ return 0;
+
+ /* The execbuf2 extension mechanism reuses cliprects_ptr. So we cannot
+ * have another flag also using it at the same time.
+ */
+ if (eb->args->flags & I915_EXEC_FENCE_ARRAY)
+ return -EINVAL;
+
+ if (args->num_cliprects != 0)
+ return -EINVAL;
+
+ return i915_user_extensions(u64_to_user_ptr(args->cliprects_ptr),
+ execbuf_extensions,
+ ARRAY_SIZE(execbuf_extensions),
+ eb);
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec,
- struct drm_syncobj **fences)
+ struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_private *i915 = to_i915(dev);
struct i915_execbuffer eb;
@@ -2668,6 +3180,10 @@ i915_gem_do_execbuffer(struct drm_device *dev,
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
+ eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
+ eb.vma[0].vma = NULL;
+ eb.reloc_pool = eb.batch_pool = NULL;
+ eb.reloc_context = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -2677,6 +3193,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.batch_len = args->batch_len;
eb.trampoline = NULL;
+ eb.fences = NULL;
+ eb.num_fences = 0;
+
eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
if (INTEL_GEN(i915) >= 11)
@@ -2694,14 +3213,24 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (args->flags & I915_EXEC_IS_PINNED)
eb.batch_flags |= I915_DISPATCH_PINNED;
+ err = parse_execbuf2_extensions(args, &eb);
+ if (err)
+ goto err_ext;
+
+ err = add_fence_array(&eb);
+ if (err)
+ goto err_ext;
+
#define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT)
if (args->flags & IN_FENCES) {
if ((args->flags & IN_FENCES) == IN_FENCES)
return -EINVAL;
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
- if (!in_fence)
- return -EINVAL;
+ if (!in_fence) {
+ err = -EINVAL;
+ goto err_ext;
+ }
}
#undef IN_FENCES
@@ -2723,11 +3252,19 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;
- err = eb_pin_engine(&eb, file, args);
+ err = eb_select_engine(&eb);
if (unlikely(err))
goto err_context;
- err = eb_relocate(&eb);
+ err = eb_lookup_vmas(&eb);
+ if (err) {
+ eb_release_vmas(&eb, true);
+ goto err_engine;
+ }
+
+ i915_gem_ww_ctx_init(&eb.ww, true);
+
+ err = eb_relocate_parse(&eb);
if (err) {
/*
* If the user expects the execobject.offset and
@@ -2740,54 +3277,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
- if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
- drm_dbg(&i915->drm,
- "Attempting to use self-modifying batch buffer\n");
- err = -EINVAL;
- goto err_vma;
- }
-
- if (range_overflows_t(u64,
- eb.batch_start_offset, eb.batch_len,
- eb.batch->vma->size)) {
- drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
- err = -EINVAL;
- goto err_vma;
- }
-
- if (eb.batch_len == 0)
- eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
-
- err = eb_parse(&eb);
- if (err)
- goto err_vma;
+ ww_acquire_done(&eb.ww.ctx);
- /*
- * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
- * batch" bit. Hence we need to pin secure batches into the global gtt.
- * hsw should have this fixed, but bdw mucks it up again. */
batch = eb.batch->vma;
- if (eb.batch_flags & I915_DISPATCH_SECURE) {
- struct i915_vma *vma;
-
- /*
- * So on first glance it looks freaky that we pin the batch here
- * outside of the reservation loop. But:
- * - The batch is already pinned into the relevant ppgtt, so we
- * already have the backing storage fully allocated.
- * - No other BO uses the global gtt (well contexts, but meh),
- * so we don't really have issues with multiple objects not
- * fitting due to fragmentation.
- * So this is actually safe.
- */
- vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_parse;
- }
-
- batch = vma;
- }
/* All GPU relocation batches must be submitted prior to the user rq */
GEM_BUG_ON(eb.reloc_cache.rq);
@@ -2796,7 +3288,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.request = i915_request_create(eb.context);
if (IS_ERR(eb.request)) {
err = PTR_ERR(eb.request);
- goto err_batch_unpin;
+ goto err_vma;
}
if (in_fence) {
@@ -2811,8 +3303,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_request;
}
- if (fences) {
- err = await_fence_array(&eb, fences);
+ if (eb.fences) {
+ err = await_fence_array(&eb);
if (err)
goto err_request;
}
@@ -2833,18 +3325,17 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* to explicitly hold another reference here.
*/
eb.request->batch = batch;
- if (batch->private)
- intel_gt_buffer_pool_mark_active(batch->private, eb.request);
+ if (eb.batch_pool)
+ intel_gt_buffer_pool_mark_active(eb.batch_pool, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb, batch);
err_request:
- add_to_client(eb.request, file);
i915_request_get(eb.request);
eb_request_add(&eb);
- if (fences)
- signal_fence_array(&eb, fences);
+ if (eb.fences)
+ signal_fence_array(&eb);
if (out_fence) {
if (err == 0) {
@@ -2858,16 +3349,21 @@ err_request:
}
i915_request_put(eb.request);
-err_batch_unpin:
- if (eb.batch_flags & I915_DISPATCH_SECURE)
- i915_vma_unpin(batch);
-err_parse:
- if (batch->private)
- intel_gt_buffer_pool_put(batch->private);
err_vma:
+ eb_release_vmas(&eb, true);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
- eb_unpin_engine(&eb);
+ WARN_ON(err == -EDEADLK);
+ i915_gem_ww_ctx_fini(&eb.ww);
+
+ if (eb.batch_pool)
+ intel_gt_buffer_pool_put(eb.batch_pool);
+ if (eb.reloc_pool)
+ intel_gt_buffer_pool_put(eb.reloc_pool);
+ if (eb.reloc_context)
+ intel_context_put(eb.reloc_context);
+err_engine:
+ eb_put_engine(&eb);
err_context:
i915_gem_context_put(eb.gem_context);
err_destroy:
@@ -2877,12 +3373,14 @@ err_out_fence:
put_unused_fd(out_fence_fd);
err_in_fence:
dma_fence_put(in_fence);
+err_ext:
+ put_fence_array(eb.fences, eb.num_fences);
return err;
}
static size_t eb_element_size(void)
{
- return sizeof(struct drm_i915_gem_exec_object2);
+ return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
}
static bool check_buffer_count(size_t count)
@@ -2938,7 +3436,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
/* Copy in the exec list from userland */
exec_list = kvmalloc_array(count, sizeof(*exec_list),
__GFP_NOWARN | GFP_KERNEL);
- exec2_list = kvmalloc_array(count, eb_element_size(),
+
+ /* Allocate extra slots for use by the command parser */
+ exec2_list = kvmalloc_array(count + 2, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
drm_dbg(&i915->drm,
@@ -2971,7 +3471,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
exec2_list[i].flags = 0;
}
- err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
+ err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
if (exec2.flags & __EXEC_HAS_RELOC) {
struct drm_i915_gem_exec_object __user *user_exec_list =
u64_to_user_ptr(args->buffers_ptr);
@@ -3003,7 +3503,6 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list;
- struct drm_syncobj **fences = NULL;
const size_t count = args->buffer_count;
int err;
@@ -3016,7 +3515,8 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
if (err)
return err;
- exec2_list = kvmalloc_array(count, eb_element_size(),
+ /* Allocate extra slots for use by the command parser */
+ exec2_list = kvmalloc_array(count + 2, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
@@ -3031,15 +3531,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
return -EFAULT;
}
- if (args->flags & I915_EXEC_FENCE_ARRAY) {
- fences = get_fence_array(args, file);
- if (IS_ERR(fences)) {
- kvfree(exec2_list);
- return PTR_ERR(fences);
- }
- }
-
- err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
+ err = i915_gem_do_execbuffer(dev, file, args, exec2_list);
/*
* Now that we have begun execution of the batchbuffer, we ignore
@@ -3080,7 +3572,6 @@ end:;
}
args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
- put_fence_array(args, fences);
kvfree(exec2_list);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 753f82d87a31..3d69e51f3e4d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -283,37 +283,46 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct intel_runtime_pm *rpm = &i915->runtime_pm;
struct i915_ggtt *ggtt = &i915->ggtt;
bool write = area->vm_flags & VM_WRITE;
+ struct i915_gem_ww_ctx ww;
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
int srcu;
int ret;
- /* Sanity check that we allow writing into this object */
- if (i915_gem_object_is_readonly(obj) && write)
- return VM_FAULT_SIGBUS;
-
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
trace_i915_gem_object_fault(obj, page_offset, true, write);
- ret = i915_gem_object_pin_pages(obj);
+ wakeref = intel_runtime_pm_get(rpm);
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(obj, &ww);
if (ret)
- goto err;
+ goto err_rpm;
- wakeref = intel_runtime_pm_get(rpm);
+ /* Sanity check that we allow writing into this object */
+ if (i915_gem_object_is_readonly(obj) && write) {
+ ret = -EFAULT;
+ goto err_rpm;
+ }
- ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
+ ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err_rpm;
+ ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
+ if (ret)
+ goto err_pages;
+
/* Now pin it into the GTT as needed */
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK /* NOWARN */ |
- PIN_NOEVICT);
- if (IS_ERR(vma)) {
+ vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
+ if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
/* Use a partial view if it is bigger than available space */
struct i915_ggtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
@@ -328,11 +337,11 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
* all hope that the hardware is able to track future writes.
*/
- vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
- if (IS_ERR(vma)) {
+ vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
+ if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
flags = PIN_MAPPABLE;
view.type = I915_GGTT_VIEW_PARTIAL;
- vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+ vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
}
/* The entire mappable GGTT is pinned? Unexpected! */
@@ -389,10 +398,16 @@ err_unpin:
__i915_vma_unpin(vma);
err_reset:
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
+err_pages:
+ i915_gem_object_unpin_pages(obj);
err_rpm:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
intel_runtime_pm_put(rpm, wakeref);
- i915_gem_object_unpin_pages(obj);
-err:
return i915_error_to_vmf_fault(ret);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 9cf4ad78ece6..d46db8d8f38e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -110,20 +110,44 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
-static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
+static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ bool intr)
{
- dma_resv_lock(obj->base.resv, NULL);
+ int ret;
+
+ if (intr)
+ ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
+ else
+ ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
+
+ if (!ret && ww)
+ list_add_tail(&obj->obj_link, &ww->obj_list);
+ if (ret == -EALREADY)
+ ret = 0;
+
+ if (ret == -EDEADLK)
+ ww->contended = obj;
+
+ return ret;
}
-static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww)
{
- return dma_resv_trylock(obj->base.resv);
+ return __i915_gem_object_lock(obj, ww, ww && ww->intr);
}
-static inline int
-i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
+static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww)
{
- return dma_resv_lock_interruptible(obj->base.resv, NULL);
+ WARN_ON(ww && !ww->intr);
+ return __i915_gem_object_lock(obj, ww, true);
+}
+
+static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+{
+ return dma_resv_trylock(obj->base.resv);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
@@ -412,7 +436,6 @@ static inline void
i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
- i915_gem_object_unlock(obj);
}
static inline struct intel_engine_cs *
@@ -435,6 +458,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
+void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index bfdb32d46877..aee7ad3cc3c6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -14,6 +14,7 @@
struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
struct i915_vma *vma,
+ struct i915_gem_ww_ctx *ww,
u32 value)
{
struct drm_i915_private *i915 = ce->vm->i915;
@@ -39,10 +40,24 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
goto out_pm;
}
+ err = i915_gem_object_lock(pool->obj, ww);
+ if (err)
+ goto out_put;
+
+ batch = i915_vma_instance(pool->obj, ce->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put;
+ }
+
+ err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_put;
+
cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto out_put;
+ goto out_unpin;
}
rem = vma->size;
@@ -84,19 +99,11 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
intel_gt_chipset_flush(ce->vm->gt);
- batch = i915_vma_instance(pool->obj, ce->vm, NULL);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_put;
- }
-
- err = i915_vma_pin(batch, 0, 0, PIN_USER);
- if (unlikely(err))
- goto out_put;
-
batch->private = pool;
return batch;
+out_unpin:
+ i915_vma_unpin(batch);
out_put:
intel_gt_buffer_pool_put(pool);
out_pm:
@@ -108,11 +115,9 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
{
int err;
- i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
if (unlikely(err))
return err;
@@ -141,6 +146,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value)
{
+ struct i915_gem_ww_ctx ww;
struct i915_request *rq;
struct i915_vma *batch;
struct i915_vma *vma;
@@ -150,17 +156,28 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (unlikely(err))
- return err;
+ i915_gem_ww_ctx_init(&ww, true);
+ intel_engine_pm_get(ce->engine);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (err)
+ goto out;
- batch = intel_emit_vma_fill_blt(ce, vma, value);
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto out;
+
+ err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
+ if (err)
+ goto out_ctx;
+
+ batch = intel_emit_vma_fill_blt(ce, vma, &ww, value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
- goto out_unpin;
+ goto out_vma;
}
- rq = intel_context_create_request(ce);
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -170,11 +187,9 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
if (unlikely(err))
goto out_request;
- i915_vma_lock(vma);
err = move_obj_to_gpu(vma->obj, rq, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
if (unlikely(err))
goto out_request;
@@ -193,8 +208,18 @@ out_request:
i915_request_add(rq);
out_batch:
intel_emit_vma_release(ce, batch);
-out_unpin:
+out_vma:
i915_vma_unpin(vma);
+out_ctx:
+ intel_context_unpin(ce);
+out:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ intel_engine_pm_put(ce->engine);
return err;
}
@@ -210,6 +235,7 @@ static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size)
}
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
struct i915_vma *src,
struct i915_vma *dst)
{
@@ -236,10 +262,24 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
goto out_pm;
}
+ err = i915_gem_object_lock(pool->obj, ww);
+ if (err)
+ goto out_put;
+
+ batch = i915_vma_instance(pool->obj, ce->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put;
+ }
+
+ err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_put;
+
cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto out_put;
+ goto out_unpin;
}
rem = src->size;
@@ -296,20 +336,11 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
i915_gem_object_unpin_map(pool->obj);
intel_gt_chipset_flush(ce->vm->gt);
-
- batch = i915_vma_instance(pool->obj, ce->vm, NULL);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_put;
- }
-
- err = i915_vma_pin(batch, 0, 0, PIN_USER);
- if (unlikely(err))
- goto out_put;
-
batch->private = pool;
return batch;
+out_unpin:
+ i915_vma_unpin(batch);
out_put:
intel_gt_buffer_pool_put(pool);
out_pm:
@@ -321,10 +352,9 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
struct drm_i915_gem_object *dst,
struct intel_context *ce)
{
- struct drm_gem_object *objs[] = { &src->base, &dst->base };
struct i915_address_space *vm = ce->vm;
struct i915_vma *vma[2], *batch;
- struct ww_acquire_ctx acquire;
+ struct i915_gem_ww_ctx ww;
struct i915_request *rq;
int err, i;
@@ -332,25 +362,36 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
if (IS_ERR(vma[0]))
return PTR_ERR(vma[0]);
- err = i915_vma_pin(vma[0], 0, 0, PIN_USER);
- if (unlikely(err))
- return err;
-
vma[1] = i915_vma_instance(dst, vm, NULL);
if (IS_ERR(vma[1]))
- goto out_unpin_src;
+ return PTR_ERR(vma[1]);
- err = i915_vma_pin(vma[1], 0, 0, PIN_USER);
+ i915_gem_ww_ctx_init(&ww, true);
+ intel_engine_pm_get(ce->engine);
+retry:
+ err = i915_gem_object_lock(src, &ww);
+ if (!err)
+ err = i915_gem_object_lock(dst, &ww);
+ if (!err)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto out;
+
+ err = i915_vma_pin_ww(vma[0], &ww, 0, 0, PIN_USER);
+ if (err)
+ goto out_ctx;
+
+ err = i915_vma_pin_ww(vma[1], &ww, 0, 0, PIN_USER);
if (unlikely(err))
goto out_unpin_src;
- batch = intel_emit_vma_copy_blt(ce, vma[0], vma[1]);
+ batch = intel_emit_vma_copy_blt(ce, &ww, vma[0], vma[1]);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_unpin_dst;
}
- rq = intel_context_create_request(ce);
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -360,14 +401,10 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
if (unlikely(err))
goto out_request;
- err = drm_gem_lock_reservations(objs, ARRAY_SIZE(objs), &acquire);
- if (unlikely(err))
- goto out_request;
-
for (i = 0; i < ARRAY_SIZE(vma); i++) {
err = move_obj_to_gpu(vma[i]->obj, rq, i);
if (unlikely(err))
- goto out_unlock;
+ goto out_request;
}
for (i = 0; i < ARRAY_SIZE(vma); i++) {
@@ -375,20 +412,19 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
err = i915_vma_move_to_active(vma[i], rq, flags);
if (unlikely(err))
- goto out_unlock;
+ goto out_request;
}
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (unlikely(err))
- goto out_unlock;
+ goto out_request;
}
err = rq->engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
0);
-out_unlock:
- drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
+
out_request:
if (unlikely(err))
i915_request_set_error_once(rq, err);
@@ -400,6 +436,16 @@ out_unpin_dst:
i915_vma_unpin(vma[1]);
out_unpin_src:
i915_vma_unpin(vma[0]);
+out_ctx:
+ intel_context_unpin(ce);
+out:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ intel_engine_pm_put(ce->engine);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
index 8bcd336a90dc..2409fdcccf0e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
@@ -13,12 +13,15 @@
#include "i915_vma.h"
struct drm_i915_gem_object;
+struct i915_gem_ww_ctx;
struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
struct i915_vma *vma,
+ struct i915_gem_ww_ctx *ww,
u32 value);
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
struct i915_vma *src,
struct i915_vma *dst);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 5335f799b548..d6711caa7f39 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -56,6 +56,8 @@ struct drm_i915_gem_object_ops {
void (*truncate)(struct drm_i915_gem_object *obj);
void (*writeback)(struct drm_i915_gem_object *obj);
+ int (*pread)(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *arg);
int (*pwrite)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg);
@@ -123,6 +125,15 @@ struct drm_i915_gem_object {
struct list_head lut_list;
spinlock_t lut_lock; /* guards lut_list */
+ /**
+ * @obj_link: Link into @i915_gem_ww_ctx.obj_list
+ *
+ * When we lock this object through i915_gem_object_lock() with a
+ * context, we add it to the list to ensure we can unlock everything
+ * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
+ */
+ struct list_head obj_link;
+
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
union {
@@ -282,6 +293,7 @@ struct drm_i915_gem_object {
} userptr;
unsigned long scratch;
+ u64 encode;
void *gvt_info;
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index e8a083743e09..f60ca6dc911f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -162,8 +162,6 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
{
if (is_vmalloc_addr(ptr))
vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
}
struct sg_table *
@@ -234,50 +232,40 @@ unlock:
return err;
}
-static inline pte_t iomap_pte(resource_size_t base,
- dma_addr_t offset,
- pgprot_t prot)
-{
- return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
-}
-
/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
+static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
{
- unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
- struct sg_table *sgt = obj->mm.pages;
- pte_t *stack[32], **mem;
- struct vm_struct *area;
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
+ struct page *stack[32], **pages = stack, *page;
+ struct sgt_iter iter;
pgprot_t pgprot;
-
- if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
- return NULL;
-
- /* A single page can always be kmapped */
- if (n_pte == 1 && type == I915_MAP_WB)
- return kmap(sg_page(sgt->sgl));
-
- mem = stack;
- if (n_pte > ARRAY_SIZE(stack)) {
- /* Too big for stack -- allocate temporary array instead */
- mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return NULL;
- }
-
- area = alloc_vm_area(obj->base.size, mem);
- if (!area) {
- if (mem != stack)
- kvfree(mem);
- return NULL;
- }
+ void *vaddr;
switch (type) {
default:
MISSING_CASE(type);
fallthrough; /* to use PAGE_KERNEL anyway */
case I915_MAP_WB:
+ /*
+ * On 32b, highmem using a finite set of indirect PTE (i.e.
+ * vmap) to provide virtual mappings of the high pages.
+ * As these are finite, map_new_virtual() must wait for some
+ * other kmap() to finish when it runs out. If we map a large
+ * number of objects, there is no method for it to tell us
+ * to release the mappings, and we deadlock.
+ *
+ * However, if we make an explicit vmap of the page, that
+ * uses a larger vmalloc arena, and also has the ability
+ * to tell us to release unwanted mappings. Most importantly,
+ * it will fail and propagate an error instead of waiting
+ * forever.
+ *
+ * So if the page is beyond the 32b boundary, make an explicit
+ * vmap.
+ */
+ if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
+ return page_address(sg_page(obj->mm.pages->sgl));
pgprot = PAGE_KERNEL;
break;
case I915_MAP_WC:
@@ -285,30 +273,50 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
break;
}
- if (i915_gem_object_has_struct_page(obj)) {
- struct sgt_iter iter;
- struct page *page;
- pte_t **ptes = mem;
+ if (n_pages > ARRAY_SIZE(stack)) {
+ /* Too big for stack -- allocate temporary array instead */
+ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+ }
- for_each_sgt_page(page, iter, sgt)
- **ptes++ = mk_pte(page, pgprot);
- } else {
- resource_size_t iomap;
- struct sgt_iter iter;
- pte_t **ptes = mem;
- dma_addr_t addr;
+ i = 0;
+ for_each_sgt_page(page, iter, obj->mm.pages)
+ pages[i++] = page;
+ vaddr = vmap(pages, n_pages, 0, pgprot);
+ if (pages != stack)
+ kvfree(pages);
+ return vaddr;
+}
- iomap = obj->mm.region->iomap.base;
- iomap -= obj->mm.region->region.start;
+static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
+{
+ resource_size_t iomap = obj->mm.region->iomap.base -
+ obj->mm.region->region.start;
+ unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
+ unsigned long stack[32], *pfns = stack, i;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+ void *vaddr;
+
+ if (type != I915_MAP_WC)
+ return NULL;
- for_each_sgt_daddr(addr, iter, sgt)
- **ptes++ = iomap_pte(iomap, addr, pgprot);
+ if (n_pfn > ARRAY_SIZE(stack)) {
+ /* Too big for stack -- allocate temporary array instead */
+ pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return NULL;
}
- if (mem != stack)
- kvfree(mem);
-
- return area->addr;
+ i = 0;
+ for_each_sgt_daddr(addr, iter, obj->mm.pages)
+ pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
+ vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
+ if (pfns != stack)
+ kvfree(pfns);
+ return vaddr;
}
/* get, pin, and map the pages of the object into kernel space */
@@ -360,7 +368,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
}
if (!ptr) {
- ptr = i915_gem_object_map(obj, type);
+ if (GEM_WARN_ON(type == I915_MAP_WC &&
+ !static_cpu_has(X86_FEATURE_PAT)))
+ ptr = NULL;
+ else if (i915_gem_object_has_struct_page(obj))
+ ptr = i915_gem_object_map_page(obj, type);
+ else
+ ptr = i915_gem_object_map_pfn(obj, type);
if (!ptr) {
err = -ENOMEM;
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 28147aab47b9..3a4dfe2ef1da 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -134,6 +134,58 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
vaddr, dma);
}
+static int
+phys_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *args)
+{
+ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
+ int err;
+
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ /*
+ * We manually control the domain here and pretend that it
+ * remains coherent i.e. in the GTT domain, like shmem_pwrite.
+ */
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+
+ if (copy_from_user(vaddr, user_data, args->size))
+ return -EFAULT;
+
+ drm_clflush_virt_range(vaddr, args->size);
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ return 0;
+}
+
+static int
+phys_pread(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pread *args)
+{
+ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
+ int err;
+
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ drm_clflush_virt_range(vaddr, args->size);
+ if (copy_to_user(user_data, vaddr, args->size))
+ return -EFAULT;
+
+ return 0;
+}
+
static void phys_release(struct drm_i915_gem_object *obj)
{
fput(obj->base.filp);
@@ -144,6 +196,9 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
+ .pread = phys_pread,
+ .pwrite = phys_pwrite,
+
.release = phys_release,
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 3d215164dd5a..40d3e40500fa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -84,7 +84,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
drm_WARN_ON(&i915->drm,
i915_gem_object_set_to_gtt_domain(obj, false));
i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 38113d3c0138..75e8b71c18b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -258,8 +258,8 @@ shmem_writeback(struct drm_i915_gem_object *obj)
for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
struct page *page;
- page = find_lock_entry(mapping, i);
- if (!page || xa_is_value(page))
+ page = find_lock_page(mapping, i);
+ if (!page)
continue;
if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 0be5e8683337..84b2707d8b17 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -53,8 +53,10 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
- return i915_gem_stolen_insert_node_in_range(i915, node, size,
- alignment, 0, U64_MAX);
+ return i915_gem_stolen_insert_node_in_range(i915, node,
+ size, alignment,
+ I915_GEM_STOLEN_BIAS,
+ U64_MAX);
}
void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index e15c0adad8af..61e028063f9f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -30,4 +30,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
resource_size_t stolen_offset,
resource_size_t size);
+#define I915_GEM_STOLEN_BIAS SZ_128K
+
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index 540ef0551789..1929d6cf4150 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -9,6 +9,7 @@
#include <drm/drm_file.h>
#include "i915_drv.h"
+#include "i915_gem_context.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -35,9 +36,10 @@ int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_file_private *file_priv = file->driver_priv;
- unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
- struct i915_request *request, *target = NULL;
+ struct i915_gem_context *ctx;
+ unsigned long idx;
long ret;
/* ABI: return -EIO if already wedged */
@@ -45,27 +47,54 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- spin_lock(&file_priv->mm.lock);
- list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
- if (time_after_eq(request->emitted_jiffies, recent_enough))
- break;
+ rcu_read_lock();
+ xa_for_each(&file_priv->context_xa, idx, ctx) {
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
- if (target && xchg(&target->file_priv, NULL))
- list_del(&target->client_link);
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+ rcu_read_unlock();
- target = request;
- }
- if (target)
- i915_request_get(target);
- spin_unlock(&file_priv->mm.lock);
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx),
+ it) {
+ struct i915_request *rq, *target = NULL;
+
+ if (!ce->timeline)
+ continue;
+
+ mutex_lock(&ce->timeline->mutex);
+ list_for_each_entry_reverse(rq,
+ &ce->timeline->requests,
+ link) {
+ if (i915_request_completed(rq))
+ break;
- if (!target)
- return 0;
+ if (time_after(rq->emitted_jiffies,
+ recent_enough))
+ continue;
- ret = i915_request_wait(target,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- i915_request_put(target);
+ target = i915_request_get(rq);
+ break;
+ }
+ mutex_unlock(&ce->timeline->mutex);
+ if (!target)
+ continue;
+
+ ret = i915_request_wait(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(target);
+ if (ret < 0)
+ break;
+ }
+ i915_gem_context_unlock_engines(ctx);
+ i915_gem_context_put(ctx);
+
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
return ret < 0 ? ret : 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ff72ee2fd9cd..ffcaee74a249 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -249,7 +249,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* whilst executing a fenced command for an untiled object.
*/
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
if (i915_gem_object_is_framebuffer(obj)) {
i915_gem_object_unlock(obj);
return -EBUSY;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 12b30075134a..f2eaed6aca3d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -403,6 +403,7 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
unsigned int sg_page_sizes;
+ struct scatterlist *sg;
int ret;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -410,13 +411,12 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOMEM);
alloc_table:
- ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
- 0, num_pages << PAGE_SHIFT,
- max_segment,
- GFP_KERNEL);
- if (ret) {
+ sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
+ num_pages << PAGE_SHIFT, max_segment,
+ NULL, 0, GFP_KERNEL);
+ if (IS_ERR(sg)) {
kfree(st);
- return ERR_PTR(ret);
+ return ERR_CAST(sg);
}
ret = i915_gem_gtt_prepare_pages(obj, st);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 8291ede6902c..1f35e71429b4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -393,7 +393,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
*/
for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
- unsigned int combination = 0;
+ unsigned int combination = SZ_4K; /* Required for ppGTT */
for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
if (i & BIT(j))
@@ -947,7 +947,7 @@ static int gpu_write(struct intel_context *ce,
{
int err;
- i915_gem_object_lock(vma->obj);
+ i915_gem_object_lock(vma->obj, NULL);
err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
i915_gem_object_unlock(vma->obj);
if (err)
@@ -964,9 +964,10 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
unsigned long n;
int err;
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_prepare_read(obj, &needs_flush);
if (err)
- return err;
+ goto err_unlock;
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
@@ -986,6 +987,8 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
}
i915_gem_object_finish_access(obj);
+err_unlock:
+ i915_gem_object_unlock(obj);
return err;
}
@@ -1614,7 +1617,7 @@ int i915_gem_huge_page_mock_selftests(void)
out_put:
i915_vm_put(&ppgtt->vm);
out_unlock:
- drm_dev_put(&dev_priv->drm);
+ mock_destroy_device(dev_priv);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 299c29e9ad86..4e36d4897ea6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -75,7 +75,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
if (err)
goto err_unpin;
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 87d7d8aa080f..7049a6bbc03d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -27,9 +27,10 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
u32 *cpu;
int err;
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
if (err)
- return err;
+ goto out;
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
@@ -46,7 +47,9 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
kunmap_atomic(map);
i915_gem_object_finish_access(ctx->obj);
- return 0;
+out:
+ i915_gem_object_unlock(ctx->obj);
+ return err;
}
static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
@@ -57,9 +60,10 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
u32 *cpu;
int err;
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
if (err)
- return err;
+ goto out;
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
@@ -73,7 +77,9 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
kunmap_atomic(map);
i915_gem_object_finish_access(ctx->obj);
- return 0;
+out:
+ i915_gem_object_unlock(ctx->obj);
+ return err;
}
static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
@@ -82,7 +88,7 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
u32 __iomem *map;
int err = 0;
- i915_gem_object_lock(ctx->obj);
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
i915_gem_object_unlock(ctx->obj);
if (err)
@@ -115,7 +121,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
u32 __iomem *map;
int err = 0;
- i915_gem_object_lock(ctx->obj);
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_gtt_domain(ctx->obj, false);
i915_gem_object_unlock(ctx->obj);
if (err)
@@ -147,7 +153,7 @@ static int wc_set(struct context *ctx, unsigned long offset, u32 v)
u32 *map;
int err;
- i915_gem_object_lock(ctx->obj);
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_wc_domain(ctx->obj, true);
i915_gem_object_unlock(ctx->obj);
if (err)
@@ -170,7 +176,7 @@ static int wc_get(struct context *ctx, unsigned long offset, u32 *v)
u32 *map;
int err;
- i915_gem_object_lock(ctx->obj);
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_wc_domain(ctx->obj, false);
i915_gem_object_unlock(ctx->obj);
if (err)
@@ -193,27 +199,27 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
u32 *cs;
int err;
- i915_gem_object_lock(ctx->obj);
+ i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
- i915_gem_object_unlock(ctx->obj);
if (err)
- return err;
+ goto out_unlock;
vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unlock;
+ }
rq = intel_engine_create_kernel_request(ctx->engine);
if (IS_ERR(rq)) {
- i915_vma_unpin(vma);
- return PTR_ERR(rq);
+ err = PTR_ERR(rq);
+ goto out_unpin;
}
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
- i915_request_add(rq);
- i915_vma_unpin(vma);
- return PTR_ERR(cs);
+ err = PTR_ERR(cs);
+ goto out_rq;
}
if (INTEL_GEN(ctx->engine->i915) >= 8) {
@@ -234,14 +240,16 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
}
intel_ring_advance(rq, cs);
- i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
- i915_vma_unpin(vma);
+out_rq:
i915_request_add(rq);
+out_unpin:
+ i915_vma_unpin(vma);
+out_unlock:
+ i915_gem_object_unlock(ctx->obj);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7ffc3c751432..d3f87dc4eda3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -461,9 +461,10 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
unsigned int n, m, need_flush;
int err;
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_prepare_write(obj, &need_flush);
if (err)
- return err;
+ goto out;
for (n = 0; n < real_page_count(obj); n++) {
u32 *map;
@@ -479,7 +480,9 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
i915_gem_object_finish_access(obj);
obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
obj->write_domain = 0;
- return 0;
+out:
+ i915_gem_object_unlock(obj);
+ return err;
}
static noinline int cpu_check(struct drm_i915_gem_object *obj,
@@ -488,9 +491,10 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
unsigned int n, m, needs_flush;
int err;
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_prepare_read(obj, &needs_flush);
if (err)
- return err;
+ goto out_unlock;
for (n = 0; n < real_page_count(obj); n++) {
u32 *map;
@@ -527,6 +531,8 @@ out_unmap:
}
i915_gem_object_finish_access(obj);
+out_unlock:
+ i915_gem_object_unlock(obj);
return err;
}
@@ -887,24 +893,15 @@ out_file:
return err;
}
-static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
+static int rpcs_query_batch(struct drm_i915_gem_object *rpcs, struct i915_vma *vma)
{
- struct drm_i915_gem_object *obj;
u32 *cmd;
- int err;
- if (INTEL_GEN(vma->vm->i915) < 8)
- return ERR_PTR(-EINVAL);
+ GEM_BUG_ON(INTEL_GEN(vma->vm->i915) < 8);
- obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
+ cmd = i915_gem_object_pin_map(rpcs, I915_MAP_WB);
+ if (IS_ERR(cmd))
+ return PTR_ERR(cmd);
*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
*cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
@@ -912,26 +909,12 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
*cmd++ = upper_32_bits(vma->node.start);
*cmd = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(obj, 0, 64);
- i915_gem_object_unpin_map(obj);
+ __i915_gem_object_flush_map(rpcs, 0, 64);
+ i915_gem_object_unpin_map(rpcs);
intel_gt_chipset_flush(vma->vm->gt);
- vma = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err;
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
+ return 0;
}
static int
@@ -939,52 +922,68 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
struct intel_context *ce,
struct i915_request **rq_out)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_request *rq;
+ struct i915_gem_ww_ctx ww;
struct i915_vma *batch;
struct i915_vma *vma;
+ struct drm_i915_gem_object *rpcs;
int err;
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
+ if (INTEL_GEN(i915) < 8)
+ return -EINVAL;
+
vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
+ rpcs = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(rpcs))
+ return PTR_ERR(rpcs);
- batch = rpcs_query_batch(vma);
+ batch = i915_vma_instance(rpcs, ce->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
- goto err_vma;
+ goto err_put;
}
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock(rpcs, &ww);
+ if (!err)
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (!err)
+ err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
+ if (err)
+ goto err_put;
+
+ err = i915_vma_pin_ww(batch, &ww, 0, 0, PIN_USER);
+ if (err)
+ goto err_vma;
+
+ err = rpcs_query_batch(rpcs, vma);
+ if (err)
+ goto err_batch;
+
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
- i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
if (err)
goto skip_request;
- i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
if (err)
goto skip_request;
@@ -1000,23 +999,24 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err)
goto skip_request;
- i915_vma_unpin_and_release(&batch, 0);
- i915_vma_unpin(vma);
-
*rq_out = i915_request_get(rq);
- i915_request_add(rq);
-
- return 0;
-
skip_request:
- i915_request_set_error_once(rq, err);
+ if (err)
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
err_batch:
- i915_vma_unpin_and_release(&batch, 0);
+ i915_vma_unpin(batch);
err_vma:
i915_vma_unpin(vma);
-
+err_put:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ i915_gem_object_put(rpcs);
return err;
}
@@ -1709,7 +1709,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_request_add(rq);
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
@@ -1748,7 +1748,7 @@ static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
if (!vm)
return -ENODEV;
- page = vm->scratch[0].base.page;
+ page = __px_page(vm->scratch[0]);
if (!page) {
pr_err("No scratch page!\n");
return -EINVAL;
@@ -1914,8 +1914,8 @@ static int mock_context_barrier(void *arg)
return -ENOMEM;
counter = 0;
- err = context_barrier_task(ctx, 0,
- NULL, NULL, mock_barrier_task, &counter);
+ err = context_barrier_task(ctx, 0, NULL, NULL, NULL,
+ mock_barrier_task, &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
@@ -1927,11 +1927,8 @@ static int mock_context_barrier(void *arg)
}
counter = 0;
- err = context_barrier_task(ctx, ALL_ENGINES,
- skip_unused_engines,
- NULL,
- mock_barrier_task,
- &counter);
+ err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
+ NULL, NULL, mock_barrier_task, &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
@@ -1951,8 +1948,8 @@ static int mock_context_barrier(void *arg)
counter = 0;
context_barrier_inject_fault = BIT(RCS0);
- err = context_barrier_task(ctx, ALL_ENGINES,
- NULL, NULL, mock_barrier_task, &counter);
+ err = context_barrier_task(ctx, ALL_ENGINES, NULL, NULL, NULL,
+ mock_barrier_task, &counter);
context_barrier_inject_fault = 0;
if (err == -ENXIO)
err = 0;
@@ -1966,11 +1963,8 @@ static int mock_context_barrier(void *arg)
goto out;
counter = 0;
- err = context_barrier_task(ctx, ALL_ENGINES,
- skip_unused_engines,
- NULL,
- mock_barrier_task,
- &counter);
+ err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
+ NULL, NULL, mock_barrier_task, &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
@@ -2003,7 +1997,7 @@ int i915_gem_context_mock_selftests(void)
err = i915_subtests(tests, i915);
- drm_dev_put(&i915->drm);
+ mock_destroy_device(i915);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 2a52b92586b9..0845ce1ae37c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -272,7 +272,7 @@ int i915_gem_dmabuf_mock_selftests(void)
err = i915_subtests(tests, i915);
- drm_dev_put(&i915->drm);
+ mock_destroy_device(i915);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index a49016f8ee0d..e1d50a5a1477 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -32,46 +32,39 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
if (IS_ERR(vma))
return PTR_ERR(vma);
- err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ err = i915_gem_object_lock(obj, &eb->ww);
+ if (err)
+ return err;
+
+ err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, PIN_USER | PIN_HIGH);
if (err)
return err;
/* 8-Byte aligned */
- if (!__reloc_entry_gpu(eb, vma,
- offsets[0] * sizeof(u32),
- 0)) {
- err = -EIO;
- goto unpin_vma;
- }
+ err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
+ if (err <= 0)
+ goto reloc_err;
/* !8-Byte aligned */
- if (!__reloc_entry_gpu(eb, vma,
- offsets[1] * sizeof(u32),
- 1)) {
- err = -EIO;
- goto unpin_vma;
- }
+ err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
+ if (err <= 0)
+ goto reloc_err;
/* Skip to the end of the cmd page */
- i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
+ i = PAGE_SIZE / sizeof(u32) - 1;
i -= eb->reloc_cache.rq_size;
memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size,
MI_NOOP, i);
eb->reloc_cache.rq_size += i;
- /* Force batch chaining */
- if (!__reloc_entry_gpu(eb, vma,
- offsets[2] * sizeof(u32),
- 2)) {
- err = -EIO;
- goto unpin_vma;
- }
+ /* Force next batch */
+ err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
+ if (err <= 0)
+ goto reloc_err;
GEM_BUG_ON(!eb->reloc_cache.rq);
rq = i915_request_get(eb->reloc_cache.rq);
- err = reloc_gpu_flush(&eb->reloc_cache);
- if (err)
- goto put_rq;
+ reloc_gpu_flush(eb, &eb->reloc_cache);
GEM_BUG_ON(eb->reloc_cache.rq);
err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
@@ -103,6 +96,11 @@ put_rq:
unpin_vma:
i915_vma_unpin(vma);
return err;
+
+reloc_err:
+ if (!err)
+ err = -EIO;
+ goto unpin_vma;
}
static int igt_gpu_reloc(void *arg)
@@ -124,6 +122,8 @@ static int igt_gpu_reloc(void *arg)
goto err_scratch;
}
+ intel_gt_pm_get(&eb.i915->gt);
+
for_each_uabi_engine(eb.engine, eb.i915) {
reloc_cache_init(&eb.reloc_cache, eb.i915);
memset(map, POISON_INUSE, 4096);
@@ -134,15 +134,29 @@ static int igt_gpu_reloc(void *arg)
err = PTR_ERR(eb.context);
goto err_pm;
}
+ eb.reloc_pool = NULL;
+ eb.reloc_context = NULL;
- err = intel_context_pin(eb.context);
- if (err)
- goto err_put;
+ i915_gem_ww_ctx_init(&eb.ww, false);
+retry:
+ err = intel_context_pin_ww(eb.context, &eb.ww);
+ if (!err) {
+ err = __igt_gpu_reloc(&eb, scratch);
+
+ intel_context_unpin(eb.context);
+ }
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&eb.ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&eb.ww);
- err = __igt_gpu_reloc(&eb, scratch);
+ if (eb.reloc_pool)
+ intel_gt_buffer_pool_put(eb.reloc_pool);
+ if (eb.reloc_context)
+ intel_context_put(eb.reloc_context);
- intel_context_unpin(eb.context);
-err_put:
intel_context_put(eb.context);
err_pm:
intel_engine_pm_put(eb.engine);
@@ -153,6 +167,7 @@ err_pm:
if (igt_flush_test(eb.i915))
err = -EIO;
+ intel_gt_pm_put(&eb.i915->gt);
err_scratch:
i915_gem_object_put(scratch);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 9c7402ce5bf9..d27d87a678c8 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -103,7 +103,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) {
@@ -188,7 +188,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) {
@@ -528,31 +528,42 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
struct i915_vma *vma;
+ struct i915_gem_ww_ctx ww;
int err;
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
if (err)
- return err;
+ goto err;
rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
- i915_vma_unpin(vma);
- return PTR_ERR(rq);
+ err = PTR_ERR(rq);
+ goto err_unpin;
}
- i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq,
EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
i915_request_add(rq);
+err_unpin:
i915_vma_unpin(vma);
+err:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
if (err)
return err;
}
@@ -1123,6 +1134,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
struct i915_vma *vma;
+ struct i915_gem_ww_ctx ww;
vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
if (IS_ERR(vma)) {
@@ -1130,9 +1142,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
goto out_unmap;
}
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
if (err)
- goto out_unmap;
+ goto out_ww;
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
@@ -1140,11 +1156,9 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
goto out_unpin;
}
- i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
i915_request_get(rq);
@@ -1166,6 +1180,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
out_unpin:
i915_vma_unpin(vma);
+out_ww:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
if (err)
goto out_unmap;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index faa5b6d91795..bf853c40ec65 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -85,7 +85,7 @@ int i915_gem_object_mock_selftests(void)
err = i915_subtests(tests, i915);
- drm_dev_put(&i915->drm);
+ mock_destroy_device(i915);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
index 34932871b3a5..8cee68c6a6dc 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -44,7 +44,7 @@ static int mock_phys_object(void *arg)
}
/* Make the object dirty so that put_pages must do copy back the data */
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err) {
@@ -73,6 +73,6 @@ int i915_gem_phys_mock_selftests(void)
err = i915_subtests(tests, i915);
- drm_dev_put(&i915->drm);
+ mock_destroy_device(i915);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index debaf7b18ab5..be30b27e2926 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -28,10 +28,9 @@ static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
sg = sg_next(sg);
}
- if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
- err = -ENOMEM;
+ err = dma_map_sgtable(attachment->dev, st, dir, 0);
+ if (err)
goto err_st;
- }
return st;
@@ -46,7 +45,7 @@ static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *st,
enum dma_data_direction dir)
{
- dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir);
+ dma_unmap_sgtable(attachment->dev, st, dir, 0);
sg_free_table(st);
kfree(st);
}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index cdc0b9c54305..c30adc05fa98 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -16,8 +16,10 @@ static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
const unsigned int pde,
const struct i915_page_table *pt)
{
+ dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]);
+
/* Caller needs to make sure the write completes if necessary */
- iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+ iowrite32(GEN6_PDE_ADDR_ENCODE(addr) | GEN6_PDE_VALID,
ppgtt->pd_addr + pde);
}
@@ -79,7 +81,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
{
struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- const gen6_pte_t scratch_pte = vm->scratch[0].encode;
+ const gen6_pte_t scratch_pte = vm->scratch[0]->encode;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
@@ -90,8 +92,6 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
const unsigned int count = min(num_entries, GEN6_PTES - pte);
gen6_pte_t *vaddr;
- GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
-
num_entries -= count;
GEM_BUG_ON(count > atomic_read(&pt->used));
@@ -127,7 +127,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
- GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
+ GEM_BUG_ON(!pd->entry[act_pt]);
vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
do {
@@ -177,39 +177,36 @@ static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
mutex_unlock(&ppgtt->flush);
}
-static int gen6_alloc_va_range(struct i915_address_space *vm,
- u64 start, u64 length)
+static void gen6_alloc_va_range(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ u64 start, u64 length)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table *pt, *alloc = NULL;
+ struct i915_page_table *pt;
bool flush = false;
u64 from = start;
unsigned int pde;
- int ret = 0;
spin_lock(&pd->lock);
gen6_for_each_pde(pt, pd, start, length, pde) {
const unsigned int count = gen6_pte_count(start, length);
- if (px_base(pt) == px_base(&vm->scratch[1])) {
+ if (!pt) {
spin_unlock(&pd->lock);
- pt = fetch_and_zero(&alloc);
- if (!pt)
- pt = alloc_pt(vm);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto unwind_out;
- }
+ pt = stash->pt[0];
+ __i915_gem_object_pin_pages(pt->base);
+ i915_gem_object_make_unshrinkable(pt->base);
- fill32_px(pt, vm->scratch[0].encode);
+ fill32_px(pt, vm->scratch[0]->encode);
spin_lock(&pd->lock);
- if (pd->entry[pde] == &vm->scratch[1]) {
+ if (!pd->entry[pde]) {
+ stash->pt[0] = pt->stash;
+ atomic_set(&pt->used, 0);
pd->entry[pde] = pt;
} else {
- alloc = pt;
pt = pd->entry[pde];
}
@@ -226,53 +223,51 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
gen6_flush_pd(ppgtt, from, start);
}
-
- goto out;
-
-unwind_out:
- gen6_ppgtt_clear_range(vm, from, start - from);
-out:
- if (alloc)
- free_px(vm, alloc);
- return ret;
}
static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
{
struct i915_address_space * const vm = &ppgtt->base.vm;
- struct i915_page_directory * const pd = ppgtt->base.pd;
int ret;
- ret = setup_scratch_page(vm, __GFP_HIGHMEM);
+ ret = setup_scratch_page(vm);
if (ret)
return ret;
- vm->scratch[0].encode =
- vm->pte_encode(px_dma(&vm->scratch[0]),
+ vm->scratch[0]->encode =
+ vm->pte_encode(px_dma(vm->scratch[0]),
I915_CACHE_NONE, PTE_READ_ONLY);
- if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
- cleanup_scratch_page(vm);
- return -ENOMEM;
+ vm->scratch[1] = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(vm->scratch[1])) {
+ ret = PTR_ERR(vm->scratch[1]);
+ goto err_scratch0;
}
- fill32_px(&vm->scratch[1], vm->scratch[0].encode);
- memset_p(pd->entry, &vm->scratch[1], I915_PDES);
+ ret = pin_pt_dma(vm, vm->scratch[1]);
+ if (ret)
+ goto err_scratch1;
+
+ fill32_px(vm->scratch[1], vm->scratch[0]->encode);
return 0;
+
+err_scratch1:
+ i915_gem_object_put(vm->scratch[1]);
+err_scratch0:
+ i915_gem_object_put(vm->scratch[0]);
+ return ret;
}
static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
{
struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_dma * const scratch =
- px_base(&ppgtt->base.vm.scratch[1]);
struct i915_page_table *pt;
u32 pde;
gen6_for_all_pdes(pt, pd, pde)
- if (px_base(pt) != scratch)
- free_px(&ppgtt->base.vm, pt);
+ if (pt)
+ free_pt(&ppgtt->base.vm, pt);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
@@ -286,7 +281,8 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
mutex_destroy(&ppgtt->flush);
mutex_destroy(&ppgtt->pin_mutex);
- kfree(ppgtt->base.pd);
+
+ free_pd(&ppgtt->base.vm, ppgtt->base.pd);
}
static int pd_vma_set_pages(struct i915_vma *vma)
@@ -302,28 +298,26 @@ static void pd_vma_clear_pages(struct i915_vma *vma)
vma->pages = NULL;
}
-static int pd_vma_bind(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
+static void pd_vma_bind(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct gen6_ppgtt *ppgtt = vma->private;
u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
- px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ ppgtt->pp_dir = ggtt_offset * sizeof(gen6_pte_t) << 10;
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
- return 0;
}
static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
{
struct gen6_ppgtt *ppgtt = vma->private;
struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_dma * const scratch =
- px_base(&ppgtt->base.vm.scratch[1]);
struct i915_page_table *pt;
unsigned int pde;
@@ -332,11 +326,11 @@ static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
/* Free all no longer used page tables */
gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
- if (px_base(pt) == scratch || atomic_read(&pt->used))
+ if (!pt || atomic_read(&pt->used))
continue;
- free_px(&ppgtt->base.vm, pt);
- pd->entry[pde] = scratch;
+ free_pt(&ppgtt->base.vm, pt);
+ pd->entry[pde] = NULL;
}
ppgtt->scan_for_unused_pt = false;
@@ -380,7 +374,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
return vma;
}
-int gen6_ppgtt_pin(struct i915_ppgtt *base)
+int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
@@ -406,7 +400,7 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base)
*/
err = 0;
if (!atomic_read(&ppgtt->pin_count))
- err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH);
+ err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH);
if (!err)
atomic_inc(&ppgtt->pin_count);
mutex_unlock(&ppgtt->pin_mutex);
@@ -448,6 +442,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
mutex_init(&ppgtt->pin_mutex);
ppgtt_init(&ppgtt->base, gt);
+ ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t));
ppgtt->base.vm.top = 1;
ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
@@ -456,9 +451,10 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma;
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
- ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
+ ppgtt->base.pd = __alloc_pd(I915_PDES);
if (!ppgtt->base.pd) {
err = -ENOMEM;
goto err_free;
@@ -479,7 +475,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
err_scratch:
free_scratch(&ppgtt->base.vm);
err_pd:
- kfree(ppgtt->base.pd);
+ free_pd(&ppgtt->base.vm, ppgtt->base.pd);
err_free:
mutex_destroy(&ppgtt->pin_mutex);
kfree(ppgtt);
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
index 72e481806c96..3357228f3304 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h
@@ -8,12 +8,15 @@
#include "intel_gtt.h"
+struct i915_gem_ww_ctx;
+
struct gen6_ppgtt {
struct i915_ppgtt base;
struct mutex flush;
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
+ u32 pp_dir;
atomic_t pin_count;
struct mutex pin_mutex;
@@ -66,7 +69,7 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
(pt = i915_pt_entry(pd, iter), true); \
++iter)
-int gen6_ppgtt_pin(struct i915_ppgtt *base);
+int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww);
void gen6_ppgtt_unpin(struct i915_ppgtt *base);
void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
void gen6_ppgtt_enable(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 699125928272..38c7069b7749 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -181,7 +181,7 @@ static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
} while (pde++, --count);
}
- free_px(vm, pd);
+ free_px(vm, &pd->pt, lvl);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -199,7 +199,7 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
struct i915_page_directory * const pd,
u64 start, const u64 end, int lvl)
{
- const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
unsigned int idx, len;
GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
@@ -239,7 +239,7 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
vaddr = kmap_atomic_px(pt);
memset64(vaddr + gen8_pd_index(start, 0),
- vm->scratch[0].encode,
+ vm->scratch[0]->encode,
count);
kunmap_atomic(vaddr);
@@ -248,7 +248,7 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
}
if (release_pd_entry(pd, idx, pt, scratch))
- free_px(vm, pt);
+ free_px(vm, pt, lvl);
} while (idx++, --len);
return start;
@@ -269,14 +269,12 @@ static void gen8_ppgtt_clear(struct i915_address_space *vm,
start, start + length, vm->top);
}
-static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
- struct i915_page_directory * const pd,
- u64 * const start, const u64 end, int lvl)
+static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_page_directory * const pd,
+ u64 * const start, const u64 end, int lvl)
{
- const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
- struct i915_page_table *alloc = NULL;
unsigned int idx, len;
- int ret = 0;
GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
@@ -297,49 +295,31 @@ static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
__func__, vm, lvl + 1, idx);
- pt = fetch_and_zero(&alloc);
- if (lvl) {
- if (!pt) {
- pt = &alloc_pd(vm)->pt;
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto out;
- }
- }
-
- fill_px(pt, vm->scratch[lvl].encode);
- } else {
- if (!pt) {
- pt = alloc_pt(vm);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto out;
- }
- }
-
- if (intel_vgpu_active(vm->i915) ||
- gen8_pt_count(*start, end) < I915_PDES)
- fill_px(pt, vm->scratch[lvl].encode);
- }
+ pt = stash->pt[!!lvl];
+ __i915_gem_object_pin_pages(pt->base);
+ i915_gem_object_make_unshrinkable(pt->base);
+
+ if (lvl ||
+ gen8_pt_count(*start, end) < I915_PDES ||
+ intel_vgpu_active(vm->i915))
+ fill_px(pt, vm->scratch[lvl]->encode);
spin_lock(&pd->lock);
- if (likely(!pd->entry[idx]))
+ if (likely(!pd->entry[idx])) {
+ stash->pt[!!lvl] = pt->stash;
+ atomic_set(&pt->used, 0);
set_pd_entry(pd, idx, pt);
- else
- alloc = pt, pt = pd->entry[idx];
+ } else {
+ pt = pd->entry[idx];
+ }
}
if (lvl) {
atomic_inc(&pt->used);
spin_unlock(&pd->lock);
- ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
- start, end, lvl);
- if (unlikely(ret)) {
- if (release_pd_entry(pd, idx, pt, scratch))
- free_px(vm, pt);
- goto out;
- }
+ __gen8_ppgtt_alloc(vm, stash,
+ as_pd(pt), start, end, lvl);
spin_lock(&pd->lock);
atomic_dec(&pt->used);
@@ -359,18 +339,12 @@ static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
}
} while (idx++, --len);
spin_unlock(&pd->lock);
-out:
- if (alloc)
- free_px(vm, alloc);
- return ret;
}
-static int gen8_ppgtt_alloc(struct i915_address_space *vm,
- u64 start, u64 length)
+static void gen8_ppgtt_alloc(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ u64 start, u64 length)
{
- u64 from;
- int err;
-
GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
GEM_BUG_ON(range_overflows(start, length, vm->total));
@@ -378,25 +352,9 @@ static int gen8_ppgtt_alloc(struct i915_address_space *vm,
start >>= GEN8_PTE_SHIFT;
length >>= GEN8_PTE_SHIFT;
GEM_BUG_ON(length == 0);
- from = start;
-
- err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
- &start, start + length, vm->top);
- if (unlikely(err && from != start))
- __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
- from, start, vm->top);
-
- return err;
-}
-static __always_inline void
-write_pte(gen8_pte_t *pte, const gen8_pte_t val)
-{
- /* Magic delays? Or can we refine these to flush all in one pass? */
- *pte = val;
- wmb(); /* cpu to cache */
- clflush(pte); /* cache to memory */
- wmb(); /* visible to all */
+ __gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
+ &start, start + length, vm->top);
}
static __always_inline u64
@@ -415,8 +373,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
- write_pte(&vaddr[gen8_pd_index(idx, 0)],
- pte_encode | iter->dma);
+ vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
iter->dma += I915_GTT_PAGE_SIZE;
if (iter->dma >= iter->max) {
@@ -439,10 +396,12 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
pd = pdp->entry[gen8_pd_index(idx, 2)];
}
+ clflush_cache_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
+ clflush_cache_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
return idx;
@@ -498,7 +457,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
do {
GEM_BUG_ON(iter->sg->length < page_size);
- write_pte(&vaddr[index++], encode | iter->dma);
+ vaddr[index++] = encode | iter->dma;
start += page_size;
iter->dma += page_size;
@@ -523,6 +482,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
}
} while (rem >= page_size && index < I915_PDES);
+ clflush_cache_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
/*
@@ -554,7 +514,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
u16 i;
- encode = vma->vm->scratch[0].encode;
+ encode = vma->vm->scratch[0]->encode;
vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
@@ -608,33 +568,44 @@ static int gen8_init_scratch(struct i915_address_space *vm)
GEM_BUG_ON(!clone->has_read_only);
vm->scratch_order = clone->scratch_order;
- memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
- px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
+ for (i = 0; i <= vm->top; i++)
+ vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
+
return 0;
}
- ret = setup_scratch_page(vm, __GFP_HIGHMEM);
+ ret = setup_scratch_page(vm);
if (ret)
return ret;
- vm->scratch[0].encode =
- gen8_pte_encode(px_dma(&vm->scratch[0]),
+ vm->scratch[0]->encode =
+ gen8_pte_encode(px_dma(vm->scratch[0]),
I915_CACHE_LLC, vm->has_read_only);
for (i = 1; i <= vm->top; i++) {
- if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
+ struct drm_i915_gem_object *obj;
+
+ obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(obj))
goto free_scratch;
- fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
- vm->scratch[i].encode =
- gen8_pde_encode(px_dma(&vm->scratch[i]),
- I915_CACHE_LLC);
+ ret = pin_pt_dma(vm, obj);
+ if (ret) {
+ i915_gem_object_put(obj);
+ goto free_scratch;
+ }
+
+ fill_px(obj, vm->scratch[i - 1]->encode);
+ obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC);
+
+ vm->scratch[i] = obj;
}
return 0;
free_scratch:
- free_scratch(vm);
+ while (i--)
+ i915_gem_object_put(vm->scratch[i]);
return -ENOMEM;
}
@@ -649,12 +620,20 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
struct i915_page_directory *pde;
+ int err;
pde = alloc_pd(vm);
if (IS_ERR(pde))
return PTR_ERR(pde);
- fill_px(pde, vm->scratch[1].encode);
+ err = pin_pt_dma(vm, pde->pt.base);
+ if (err) {
+ i915_gem_object_put(pde->pt.base);
+ free_pd(vm, pde);
+ return err;
+ }
+
+ fill_px(pde, vm->scratch[1]->encode);
set_pd_entry(pd, idx, pde);
atomic_inc(px_used(pde)); /* keep pinned */
}
@@ -668,21 +647,32 @@ gen8_alloc_top_pd(struct i915_address_space *vm)
{
const unsigned int count = gen8_pd_top_count(vm);
struct i915_page_directory *pd;
+ int err;
- GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
+ GEM_BUG_ON(count > I915_PDES);
- pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
+ pd = __alloc_pd(count);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
- if (unlikely(setup_page_dma(vm, px_base(pd)))) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
+ pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(pd->pt.base)) {
+ err = PTR_ERR(pd->pt.base);
+ pd->pt.base = NULL;
+ goto err_pd;
}
- fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
+ err = pin_pt_dma(vm, pd->pt.base);
+ if (err)
+ goto err_pd;
+
+ fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
atomic_inc(px_used(pd)); /* mark as pinned */
return pd;
+
+err_pd:
+ free_pd(vm, pd);
+ return ERR_PTR(err);
}
/*
@@ -703,6 +693,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
ppgtt_init(ppgtt, gt);
ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
+ ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
/*
* From bdw, there is hw support for read-only pages in the PPGTT.
@@ -714,12 +705,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
*/
ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
- /*
- * There are only few exceptions for gen >=6. chv and bxt.
- * And we are not sure about the latter so play safe for now.
- */
- if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
- ppgtt->vm.pt_kmap_wc = true;
+ ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
err = gen8_init_scratch(&ppgtt->vm);
if (err)
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 91786310c114..d8b206e53660 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -28,6 +28,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#include "intel_breadcrumbs.h"
+#include "intel_context.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
@@ -53,33 +55,65 @@ static void irq_disable(struct intel_engine_cs *engine)
spin_unlock(&engine->gt->irq_lock);
}
-static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
+static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
- struct intel_engine_cs *engine =
- container_of(b, struct intel_engine_cs, breadcrumbs);
+ lockdep_assert_held(&b->irq_lock);
+
+ if (!b->irq_engine || b->irq_armed)
+ return;
+
+ if (!intel_gt_pm_get_if_awake(b->irq_engine->gt))
+ return;
+
+ /*
+ * The breadcrumb irq will be disarmed on the interrupt after the
+ * waiters are signaled. This gives us a single interrupt window in
+ * which we can add a new waiter and avoid the cost of re-enabling
+ * the irq.
+ */
+ WRITE_ONCE(b->irq_armed, true);
+
+ /*
+ * Since we are waiting on a request, the GPU should be busy
+ * and should have its own rpm reference. This is tracked
+ * by i915->gt.awake, we can forgo holding our own wakref
+ * for the interrupt as before i915->gt.awake is released (when
+ * the driver is idle) we disarm the breadcrumbs.
+ */
+ if (!b->irq_enabled++)
+ irq_enable(b->irq_engine);
+}
+
+static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
+{
lockdep_assert_held(&b->irq_lock);
+ if (!b->irq_engine || !b->irq_armed)
+ return;
+
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
- irq_disable(engine);
+ irq_disable(b->irq_engine);
WRITE_ONCE(b->irq_armed, false);
- intel_gt_pm_put_async(engine->gt);
+ intel_gt_pm_put_async(b->irq_engine->gt);
}
-void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
+static void add_signaling_context(struct intel_breadcrumbs *b,
+ struct intel_context *ce)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- unsigned long flags;
-
- if (!READ_ONCE(b->irq_armed))
- return;
+ intel_context_get(ce);
+ list_add_tail(&ce->signal_link, &b->signalers);
+ if (list_is_first(&ce->signal_link, &b->signalers))
+ __intel_breadcrumbs_arm_irq(b);
+}
- spin_lock_irqsave(&b->irq_lock, flags);
- if (b->irq_armed)
- __intel_breadcrumbs_disarm_irq(b);
- spin_unlock_irqrestore(&b->irq_lock, flags);
+static void remove_signaling_context(struct intel_breadcrumbs *b,
+ struct intel_context *ce)
+{
+ list_del(&ce->signal_link);
+ intel_context_put(ce);
}
static inline bool __request_completed(const struct i915_request *rq)
@@ -90,6 +124,9 @@ static inline bool __request_completed(const struct i915_request *rq)
__maybe_unused static bool
check_signal_order(struct intel_context *ce, struct i915_request *rq)
{
+ if (rq->context != ce)
+ return false;
+
if (!list_is_last(&rq->signal_link, &ce->signals) &&
i915_seqno_passed(rq->fence.seqno,
list_next_entry(rq, signal_link)->fence.seqno))
@@ -133,25 +170,21 @@ __dma_fence_signal__notify(struct dma_fence *fence,
static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
{
- struct intel_engine_cs *engine =
- container_of(b, struct intel_engine_cs, breadcrumbs);
-
- if (unlikely(intel_engine_is_virtual(engine)))
- engine = intel_virtual_engine_get_sibling(engine, 0);
-
- intel_engine_add_retire(engine, tl);
+ if (b->irq_engine)
+ intel_engine_add_retire(b->irq_engine, tl);
}
-static void __signal_request(struct i915_request *rq, struct list_head *signals)
+static bool __signal_request(struct i915_request *rq, struct list_head *signals)
{
- GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
- if (!__dma_fence_signal(&rq->fence))
- return;
+ if (!__dma_fence_signal(&rq->fence)) {
+ i915_request_put(rq);
+ return false;
+ }
- i915_request_get(rq);
list_add_tail(&rq->signal_link, signals);
+ return true;
}
static void signal_irq_work(struct irq_work *work)
@@ -164,7 +197,7 @@ static void signal_irq_work(struct irq_work *work)
spin_lock(&b->irq_lock);
- if (b->irq_armed && list_empty(&b->signalers))
+ if (list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
list_splice_init(&b->signaled_requests, &signal);
@@ -197,8 +230,8 @@ static void signal_irq_work(struct irq_work *work)
/* Advance the list to the first incomplete request */
__list_del_many(&ce->signals, pos);
if (&ce->signals == pos) { /* now empty */
- list_del_init(&ce->signal_link);
add_retire(b, ce->timeline);
+ remove_signaling_context(b, ce);
}
}
}
@@ -220,116 +253,89 @@ static void signal_irq_work(struct irq_work *work)
}
}
-static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
+struct intel_breadcrumbs *
+intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
{
- struct intel_engine_cs *engine =
- container_of(b, struct intel_engine_cs, breadcrumbs);
-
- lockdep_assert_held(&b->irq_lock);
- if (b->irq_armed)
- return true;
-
- if (!intel_gt_pm_get_if_awake(engine->gt))
- return false;
-
- /*
- * The breadcrumb irq will be disarmed on the interrupt after the
- * waiters are signaled. This gives us a single interrupt window in
- * which we can add a new waiter and avoid the cost of re-enabling
- * the irq.
- */
- WRITE_ONCE(b->irq_armed, true);
-
- /*
- * Since we are waiting on a request, the GPU should be busy
- * and should have its own rpm reference. This is tracked
- * by i915->gt.awake, we can forgo holding our own wakref
- * for the interrupt as before i915->gt.awake is released (when
- * the driver is idle) we disarm the breadcrumbs.
- */
-
- if (!b->irq_enabled++)
- irq_enable(engine);
+ struct intel_breadcrumbs *b;
- return true;
-}
-
-void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return NULL;
spin_lock_init(&b->irq_lock);
INIT_LIST_HEAD(&b->signalers);
INIT_LIST_HEAD(&b->signaled_requests);
init_irq_work(&b->irq_work, signal_irq_work);
+
+ b->irq_engine = irq_engine;
+
+ return b;
}
-void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
+void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
+ if (!b->irq_engine)
+ return;
+
spin_lock_irqsave(&b->irq_lock, flags);
if (b->irq_enabled)
- irq_enable(engine);
+ irq_enable(b->irq_engine);
else
- irq_disable(engine);
+ irq_disable(b->irq_engine);
spin_unlock_irqrestore(&b->irq_lock, flags);
}
-void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
- struct intel_context *ce)
+void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
- spin_lock_irqsave(&b->irq_lock, flags);
- if (!list_empty(&ce->signals)) {
- struct i915_request *rq, *next;
-
- /* Queue for executing the signal callbacks in the irq_work */
- list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
- GEM_BUG_ON(rq->engine != engine);
- GEM_BUG_ON(!__request_completed(rq));
-
- __signal_request(rq, &b->signaled_requests);
- }
+ if (!READ_ONCE(b->irq_armed))
+ return;
- INIT_LIST_HEAD(&ce->signals);
- list_del_init(&ce->signal_link);
+ spin_lock_irqsave(&b->irq_lock, flags);
+ __intel_breadcrumbs_disarm_irq(b);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
+ if (!list_empty(&b->signalers))
irq_work_queue(&b->irq_work);
- }
- spin_unlock_irqrestore(&b->irq_lock, flags);
}
-void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
+void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
{
+ kfree(b);
}
-bool i915_request_enable_breadcrumb(struct i915_request *rq)
+static void insert_breadcrumb(struct i915_request *rq,
+ struct intel_breadcrumbs *b)
{
- lockdep_assert_held(&rq->lock);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
- return true;
+ struct intel_context *ce = rq->context;
+ struct list_head *pos;
- if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
- struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
- struct intel_context *ce = rq->context;
- struct list_head *pos;
-
- spin_lock(&b->irq_lock);
+ if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+ return;
- if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
- goto unlock;
+ i915_request_get(rq);
- if (!__intel_breadcrumbs_arm_irq(b))
- goto unlock;
+ /*
+ * If the request is already completed, we can transfer it
+ * straight onto a signaled list, and queue the irq worker for
+ * its signal completion.
+ */
+ if (__request_completed(rq)) {
+ if (__signal_request(rq, &b->signaled_requests))
+ irq_work_queue(&b->irq_work);
+ return;
+ }
+ if (list_empty(&ce->signals)) {
+ add_signaling_context(b, ce);
+ pos = &ce->signals;
+ } else {
/*
* We keep the seqno in retirement order, so we can break
* inside intel_engine_signal_breadcrumbs as soon as we've
@@ -351,24 +357,75 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno))
break;
}
- list_add(&rq->signal_link, pos);
- if (pos == &ce->signals) /* catch transitions from empty list */
- list_move_tail(&ce->signal_link, &b->signalers);
- GEM_BUG_ON(!check_signal_order(ce, rq));
+ }
+ list_add(&rq->signal_link, pos);
+ GEM_BUG_ON(!check_signal_order(ce, rq));
+ set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+
+ /* Check after attaching to irq, interrupt may have already fired. */
+ if (__request_completed(rq))
+ irq_work_queue(&b->irq_work);
+}
- set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
-unlock:
+bool i915_request_enable_breadcrumb(struct i915_request *rq)
+{
+ struct intel_breadcrumbs *b;
+
+ /* Serialises with i915_request_retire() using rq->lock */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ return true;
+
+ /*
+ * Peek at i915_request_submit()/i915_request_unsubmit() status.
+ *
+ * If the request is not yet active (and not signaled), we will
+ * attach the breadcrumb later.
+ */
+ if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
+ return true;
+
+ /*
+ * rq->engine is locked by rq->engine->active.lock. That however
+ * is not known until after rq->engine has been dereferenced and
+ * the lock acquired. Hence we acquire the lock and then validate
+ * that rq->engine still matches the lock we hold for it.
+ *
+ * Here, we are using the breadcrumb lock as a proxy for the
+ * rq->engine->active.lock, and we know that since the breadcrumb
+ * will be serialised within i915_request_submit/i915_request_unsubmit,
+ * the engine cannot change while active as long as we hold the
+ * breadcrumb lock on that engine.
+ *
+ * From the dma_fence_enable_signaling() path, we are outside of the
+ * request submit/unsubmit path, and so we must be more careful to
+ * acquire the right lock.
+ */
+ b = READ_ONCE(rq->engine)->breadcrumbs;
+ spin_lock(&b->irq_lock);
+ while (unlikely(b != READ_ONCE(rq->engine)->breadcrumbs)) {
spin_unlock(&b->irq_lock);
+ b = READ_ONCE(rq->engine)->breadcrumbs;
+ spin_lock(&b->irq_lock);
}
- return !__request_completed(rq);
+ /*
+ * Now that we are finally serialised with request submit/unsubmit,
+ * [with b->irq_lock] and with i915_request_retire() [via checking
+ * SIGNALED with rq->lock] confirm the request is indeed active. If
+ * it is no longer active, the breadcrumb will be attached upon
+ * i915_request_submit().
+ */
+ if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
+ insert_breadcrumb(rq, b);
+
+ spin_unlock(&b->irq_lock);
+
+ return true;
}
void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
- struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
-
- lockdep_assert_held(&rq->lock);
+ struct intel_breadcrumbs *b = rq->engine->breadcrumbs;
/*
* We must wait for b->irq_lock so that we know the interrupt handler
@@ -382,23 +439,19 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
list_del(&rq->signal_link);
if (list_empty(&ce->signals))
- list_del_init(&ce->signal_link);
+ remove_signaling_context(b, ce);
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+ i915_request_put(rq);
}
spin_unlock(&b->irq_lock);
}
-void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
- struct drm_printer *p)
+static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct intel_context *ce;
struct i915_request *rq;
- if (list_empty(&b->signalers))
- return;
-
drm_printf(p, "Signals:\n");
spin_lock_irq(&b->irq_lock);
@@ -414,3 +467,17 @@ void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
}
spin_unlock_irq(&b->irq_lock);
}
+
+void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
+ struct drm_printer *p)
+{
+ struct intel_breadcrumbs *b;
+
+ b = engine->breadcrumbs;
+ if (!b)
+ return;
+
+ drm_printf(p, "IRQ: %s\n", enableddisabled(b->irq_armed));
+ if (!list_empty(&b->signalers))
+ print_signals(b, p);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
new file mode 100644
index 000000000000..ed3d1deabfbd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_BREADCRUMBS__
+#define __INTEL_BREADCRUMBS__
+
+#include <linux/irq_work.h>
+
+#include "intel_engine_types.h"
+
+struct drm_printer;
+struct i915_request;
+struct intel_breadcrumbs;
+
+struct intel_breadcrumbs *
+intel_breadcrumbs_create(struct intel_engine_cs *irq_engine);
+void intel_breadcrumbs_free(struct intel_breadcrumbs *b);
+
+void intel_breadcrumbs_reset(struct intel_breadcrumbs *b);
+void intel_breadcrumbs_park(struct intel_breadcrumbs *b);
+
+static inline void
+intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
+{
+ irq_work_queue(&engine->breadcrumbs->irq_work);
+}
+
+void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
+ struct drm_printer *p);
+
+bool i915_request_enable_breadcrumb(struct i915_request *request);
+void i915_request_cancel_breadcrumb(struct i915_request *request);
+
+#endif /* __INTEL_BREADCRUMBS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
new file mode 100644
index 000000000000..8e53b9942695
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_BREADCRUMBS_TYPES__
+#define __INTEL_BREADCRUMBS_TYPES__
+
+#include <linux/irq_work.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/*
+ * Rather than have every client wait upon all user interrupts,
+ * with the herd waking after every interrupt and each doing the
+ * heavyweight seqno dance, we delegate the task (of being the
+ * bottom-half of the user interrupt) to the first client. After
+ * every interrupt, we wake up one client, who does the heavyweight
+ * coherent seqno read and either goes back to sleep (if incomplete),
+ * or wakes up all the completed clients in parallel, before then
+ * transferring the bottom-half status to the next client in the queue.
+ *
+ * Compared to walking the entire list of waiters in a single dedicated
+ * bottom-half, we reduce the latency of the first waiter by avoiding
+ * a context switch, but incur additional coherent seqno reads when
+ * following the chain of request breadcrumbs. Since it is most likely
+ * that we have a single client waiting on each seqno, then reducing
+ * the overhead of waking that client is much preferred.
+ */
+struct intel_breadcrumbs {
+ spinlock_t irq_lock; /* protects the lists used in hardirq context */
+
+ /* Not all breadcrumbs are attached to physical HW */
+ struct intel_engine_cs *irq_engine;
+
+ struct list_head signalers;
+ struct list_head signaled_requests;
+
+ struct irq_work irq_work; /* for use from inside irq_lock */
+
+ unsigned int irq_enabled;
+
+ bool irq_armed;
+};
+
+#endif /* __INTEL_BREADCRUMBS_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 52db2bde44a3..92a3f25c4006 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -93,57 +93,210 @@ static void intel_context_active_release(struct intel_context *ce)
i915_active_release(&ce->active);
}
-int __intel_context_do_pin(struct intel_context *ce)
+static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
+{
+ unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
+ int err;
+
+ err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
+ if (err)
+ return err;
+
+ err = i915_active_acquire(&vma->active);
+ if (err)
+ goto err_unpin;
+
+ /*
+ * And mark it as a globally pinned object to let the shrinker know
+ * it cannot reclaim the object until we release it.
+ */
+ i915_vma_make_unshrinkable(vma);
+ vma->obj->mm.dirty = true;
+
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+static void __context_unpin_state(struct i915_vma *vma)
+{
+ i915_vma_make_shrinkable(vma);
+ i915_active_release(&vma->active);
+ __i915_vma_unpin(vma);
+}
+
+static int __ring_active(struct intel_ring *ring,
+ struct i915_gem_ww_ctx *ww)
+{
+ int err;
+
+ err = intel_ring_pin(ring, ww);
+ if (err)
+ return err;
+
+ err = i915_active_acquire(&ring->vma->active);
+ if (err)
+ goto err_pin;
+
+ return 0;
+
+err_pin:
+ intel_ring_unpin(ring);
+ return err;
+}
+
+static void __ring_retire(struct intel_ring *ring)
+{
+ i915_active_release(&ring->vma->active);
+ intel_ring_unpin(ring);
+}
+
+static int intel_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww)
{
int err;
+ CE_TRACE(ce, "active\n");
+
+ err = __ring_active(ce->ring, ww);
+ if (err)
+ return err;
+
+ err = intel_timeline_pin(ce->timeline, ww);
+ if (err)
+ goto err_ring;
+
+ if (!ce->state)
+ return 0;
+
+ err = __context_pin_state(ce->state, ww);
+ if (err)
+ goto err_timeline;
+
+
+ return 0;
+
+err_timeline:
+ intel_timeline_unpin(ce->timeline);
+err_ring:
+ __ring_retire(ce->ring);
+ return err;
+}
+
+static void intel_context_post_unpin(struct intel_context *ce)
+{
+ if (ce->state)
+ __context_unpin_state(ce->state);
+
+ intel_timeline_unpin(ce->timeline);
+ __ring_retire(ce->ring);
+}
+
+int __intel_context_do_pin_ww(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww)
+{
+ bool handoff = false;
+ void *vaddr;
+ int err = 0;
+
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = intel_context_alloc_state(ce);
if (err)
return err;
}
- err = i915_active_acquire(&ce->active);
+ /*
+ * We always pin the context/ring/timeline here, to ensure a pin
+ * refcount for __intel_context_active(), which prevent a lock
+ * inversion of ce->pin_mutex vs dma_resv_lock().
+ */
+
+ err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
+ if (!err && ce->ring->vma->obj)
+ err = i915_gem_object_lock(ce->ring->vma->obj, ww);
+ if (!err && ce->state)
+ err = i915_gem_object_lock(ce->state->obj, ww);
+ if (!err)
+ err = intel_context_pre_pin(ce, ww);
if (err)
return err;
- if (mutex_lock_interruptible(&ce->pin_mutex)) {
- err = -EINTR;
- goto out_release;
- }
+ err = i915_active_acquire(&ce->active);
+ if (err)
+ goto err_ctx_unpin;
+
+ err = ce->ops->pre_pin(ce, ww, &vaddr);
+ if (err)
+ goto err_release;
+
+ err = mutex_lock_interruptible(&ce->pin_mutex);
+ if (err)
+ goto err_post_unpin;
if (unlikely(intel_context_is_closed(ce))) {
err = -ENOENT;
- goto out_unlock;
+ goto err_unlock;
}
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
err = intel_context_active_acquire(ce);
if (unlikely(err))
- goto out_unlock;
+ goto err_unlock;
- err = ce->ops->pin(ce);
- if (unlikely(err))
- goto err_active;
+ err = ce->ops->pin(ce, vaddr);
+ if (err) {
+ intel_context_active_release(ce);
+ goto err_unlock;
+ }
CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
i915_ggtt_offset(ce->ring->vma),
ce->ring->head, ce->ring->tail);
+ handoff = true;
smp_mb__before_atomic(); /* flush pin before it is visible */
atomic_inc(&ce->pin_count);
}
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
- goto out_unlock;
-err_active:
- intel_context_active_release(ce);
-out_unlock:
+err_unlock:
mutex_unlock(&ce->pin_mutex);
-out_release:
+err_post_unpin:
+ if (!handoff)
+ ce->ops->post_unpin(ce);
+err_release:
i915_active_release(&ce->active);
+err_ctx_unpin:
+ intel_context_post_unpin(ce);
+
+ /*
+ * Unlock the hwsp_ggtt object since it's shared.
+ * In principle we can unlock all the global state locked above
+ * since it's pinned and doesn't need fencing, and will
+ * thus remain resident until it is explicitly unpinned.
+ */
+ i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
+
+ return err;
+}
+
+int __intel_context_do_pin(struct intel_context *ce)
+{
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = __intel_context_do_pin_ww(ce, &ww);
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
return err;
}
@@ -154,6 +307,7 @@ void intel_context_unpin(struct intel_context *ce)
CE_TRACE(ce, "unpin\n");
ce->ops->unpin(ce);
+ ce->ops->post_unpin(ce);
/*
* Once released, we may asynchronously drop the active reference.
@@ -166,65 +320,6 @@ void intel_context_unpin(struct intel_context *ce)
intel_context_put(ce);
}
-static int __context_pin_state(struct i915_vma *vma)
-{
- unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
- int err;
-
- err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
- if (err)
- return err;
-
- err = i915_active_acquire(&vma->active);
- if (err)
- goto err_unpin;
-
- /*
- * And mark it as a globally pinned object to let the shrinker know
- * it cannot reclaim the object until we release it.
- */
- i915_vma_make_unshrinkable(vma);
- vma->obj->mm.dirty = true;
-
- return 0;
-
-err_unpin:
- i915_vma_unpin(vma);
- return err;
-}
-
-static void __context_unpin_state(struct i915_vma *vma)
-{
- i915_vma_make_shrinkable(vma);
- i915_active_release(&vma->active);
- __i915_vma_unpin(vma);
-}
-
-static int __ring_active(struct intel_ring *ring)
-{
- int err;
-
- err = intel_ring_pin(ring);
- if (err)
- return err;
-
- err = i915_active_acquire(&ring->vma->active);
- if (err)
- goto err_pin;
-
- return 0;
-
-err_pin:
- intel_ring_unpin(ring);
- return err;
-}
-
-static void __ring_retire(struct intel_ring *ring)
-{
- i915_active_release(&ring->vma->active);
- intel_ring_unpin(ring);
-}
-
__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
@@ -235,48 +330,29 @@ static void __intel_context_retire(struct i915_active *active)
intel_context_get_avg_runtime_ns(ce));
set_bit(CONTEXT_VALID_BIT, &ce->flags);
- if (ce->state)
- __context_unpin_state(ce->state);
-
- intel_timeline_unpin(ce->timeline);
- __ring_retire(ce->ring);
-
+ intel_context_post_unpin(ce);
intel_context_put(ce);
}
static int __intel_context_active(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
- int err;
-
- CE_TRACE(ce, "active\n");
intel_context_get(ce);
- err = __ring_active(ce->ring);
- if (err)
- goto err_put;
+ /* everything should already be activated by intel_context_pre_pin() */
+ GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
+ __intel_ring_pin(ce->ring);
- err = intel_timeline_pin(ce->timeline);
- if (err)
- goto err_ring;
+ __intel_timeline_pin(ce->timeline);
- if (!ce->state)
- return 0;
-
- err = __context_pin_state(ce->state);
- if (err)
- goto err_timeline;
+ if (ce->state) {
+ GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
+ __i915_vma_pin(ce->state);
+ i915_vma_make_unshrinkable(ce->state);
+ }
return 0;
-
-err_timeline:
- intel_timeline_unpin(ce->timeline);
-err_ring:
- __ring_retire(ce->ring);
-err_put:
- intel_context_put(ce);
- return err;
}
void
@@ -382,15 +458,38 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *intel_context_create_request(struct intel_context *ce)
{
+ struct i915_gem_ww_ctx ww;
struct i915_request *rq;
int err;
- err = intel_context_pin(ce);
- if (unlikely(err))
- return ERR_PTR(err);
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = intel_context_pin_ww(ce, &ww);
+ if (!err) {
+ rq = i915_request_create(ce);
+ intel_context_unpin(ce);
+ } else if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ rq = ERR_PTR(err);
+ } else {
+ rq = ERR_PTR(err);
+ }
+
+ i915_gem_ww_ctx_fini(&ww);
- rq = i915_request_create(ce);
- intel_context_unpin(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ /*
+ * timeline->mutex should be the inner lock, but is used as outer lock.
+ * Hack around this to shut up lockdep in selftests..
+ */
+ lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
+ mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
+ mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+ rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
return rq;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 07be021882cc..fda2eba81e22 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -25,6 +25,8 @@
##__VA_ARGS__); \
} while (0)
+struct i915_gem_ww_ctx;
+
void intel_context_init(struct intel_context *ce,
struct intel_engine_cs *engine);
void intel_context_fini(struct intel_context *ce);
@@ -81,6 +83,8 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce)
}
int __intel_context_do_pin(struct intel_context *ce);
+int __intel_context_do_pin_ww(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww);
static inline bool intel_context_pin_if_active(struct intel_context *ce)
{
@@ -95,6 +99,15 @@ static inline int intel_context_pin(struct intel_context *ce)
return __intel_context_do_pin(ce);
}
+static inline int intel_context_pin_ww(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww)
+{
+ if (likely(intel_context_pin_if_active(ce)))
+ return 0;
+
+ return __intel_context_do_pin_ww(ce, ww);
+}
+
static inline void __intel_context_pin(struct intel_context *ce)
{
GEM_BUG_ON(!intel_context_is_pinned(ce));
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 4954b0df4864..552cb57a2e8c 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -23,6 +23,7 @@
DECLARE_EWMA(runtime, 3, 8);
struct i915_gem_context;
+struct i915_gem_ww_ctx;
struct i915_vma;
struct intel_context;
struct intel_ring;
@@ -30,8 +31,10 @@ struct intel_ring;
struct intel_context_ops {
int (*alloc)(struct intel_context *ce);
- int (*pin)(struct intel_context *ce);
+ int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
+ int (*pin)(struct intel_context *ce, void *vaddr);
void (*unpin)(struct intel_context *ce);
+ void (*post_unpin)(struct intel_context *ce);
void (*enter)(struct intel_context *ce);
void (*exit)(struct intel_context *ce);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index a9249a23903a..760fefdfe392 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -223,26 +223,6 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
void intel_engine_init_execlists(struct intel_engine_cs *engine);
-void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
-void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-
-void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
-
-static inline void
-intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
-{
- irq_work_queue(&engine->breadcrumbs.irq_work);
-}
-
-void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
-void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-
-void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
- struct intel_context *ce);
-
-void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
- struct drm_printer *p);
-
static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
{
memset(batch, 0, 6 * sizeof(u32));
@@ -265,22 +245,14 @@ static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u
}
static inline u32 *
-__gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
+__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
{
- /* We're using qword write, offset should be aligned to 8 bytes. */
- GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
-
- /* w/a for post sync ops following a GPGPU operation we
- * need a prior CS_STALL, which is emitted by the flush
- * following the batch.
- */
*cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
- *cs++ = flags1 | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
- *cs++ = gtt_offset;
+ *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
+ *cs++ = offset;
*cs++ = 0;
*cs++ = value;
- /* We're thrashing one dword of HWS. */
- *cs++ = 0;
+ *cs++ = 0; /* We're thrashing one extra dword. */
return cs;
}
@@ -288,13 +260,38 @@ __gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 f
static inline u32*
gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
{
- return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, 0, flags);
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ 0,
+ flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
}
static inline u32*
gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
{
- return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, flags0, flags1);
+ /* We're using qword write, offset should be aligned to 8 bytes. */
+ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
+
+ return __gen8_emit_write_rcs(cs,
+ value,
+ gtt_offset,
+ flags0,
+ flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
+}
+
+static inline u32 *
+__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
+{
+ *cs++ = (MI_FLUSH_DW + 1) | flags;
+ *cs++ = gtt_offset;
+ *cs++ = 0;
+ *cs++ = value;
+
+ return cs;
}
static inline u32 *
@@ -305,12 +302,10 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
- *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
- *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
- *cs++ = 0;
- *cs++ = value;
-
- return cs;
+ return __gen8_emit_flush_dw(cs,
+ value,
+ gtt_offset | MI_FLUSH_DW_USE_GTT,
+ flags | MI_FLUSH_DW_OP_STOREDW);
}
static inline void __intel_engine_reset(struct intel_engine_cs *engine,
@@ -357,4 +352,13 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
return intel_engine_has_preemption(engine);
}
+static inline bool
+intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ return false;
+
+ return READ_ONCE(engine->props.heartbeat_interval_ms);
+}
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 26087dd79782..efdeb7b7b2a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -28,6 +28,7 @@
#include "i915_drv.h"
+#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
@@ -370,7 +371,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
* instances.
*/
if ((INTEL_GEN(i915) >= 11 &&
- engine->gt->info.vdbox_sfc_access & engine->mask) ||
+ (engine->gt->info.vdbox_sfc_access &
+ BIT(engine->instance))) ||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
@@ -634,7 +636,7 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
else
flags = PIN_HIGH;
- return i915_ggtt_pin(vma, 0, flags);
+ return i915_ggtt_pin(vma, NULL, 0, flags);
}
static int init_status_page(struct intel_engine_cs *engine)
@@ -700,8 +702,13 @@ static int engine_setup_common(struct intel_engine_cs *engine)
if (err)
return err;
+ engine->breadcrumbs = intel_breadcrumbs_create(engine);
+ if (!engine->breadcrumbs) {
+ err = -ENOMEM;
+ goto err_status;
+ }
+
intel_engine_init_active(engine, ENGINE_PHYSICAL);
- intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
@@ -716,6 +723,10 @@ static int engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_ctx_wa(engine);
return 0;
+
+err_status:
+ cleanup_status_page(engine);
+ return err;
}
struct measure_breadcrumb {
@@ -785,9 +796,11 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
}
static struct intel_context *
-create_kernel_context(struct intel_engine_cs *engine)
+create_pinned_context(struct intel_engine_cs *engine,
+ unsigned int hwsp,
+ struct lock_class_key *key,
+ const char *name)
{
- static struct lock_class_key kernel;
struct intel_context *ce;
int err;
@@ -796,6 +809,7 @@ create_kernel_context(struct intel_engine_cs *engine)
return ce;
__set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
+ ce->timeline = page_pack_bits(NULL, hwsp);
err = intel_context_pin(ce); /* perma-pin so it is always available */
if (err) {
@@ -809,11 +823,20 @@ create_kernel_context(struct intel_engine_cs *engine)
* should we need to inject GPU operations during their request
* construction.
*/
- lockdep_set_class(&ce->timeline->mutex, &kernel);
+ lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
return ce;
}
+static struct intel_context *
+create_kernel_context(struct intel_engine_cs *engine)
+{
+ static struct lock_class_key kernel;
+
+ return create_pinned_context(engine, I915_GEM_HWS_SEQNO_ADDR,
+ &kernel, "kernel_context");
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -902,9 +925,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
cleanup_status_page(engine);
+ intel_breadcrumbs_free(engine->breadcrumbs);
intel_engine_fini_retire(engine);
- intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
if (engine->default_state)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 8ffdf676c0a0..5067d0524d4b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -177,36 +177,82 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
}
+static int __intel_engine_pulse(struct intel_engine_cs *engine)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+
+ lockdep_assert_held(&ce->timeline->mutex);
+ GEM_BUG_ON(!intel_engine_has_preemption(engine));
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+ intel_context_exit(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
+ idle_pulse(engine, rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+ GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
+
+ return 0;
+}
+
+static unsigned long set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay)
+{
+ unsigned long old;
+
+ old = xchg(&engine->props.heartbeat_interval_ms, delay);
+ if (delay)
+ intel_engine_unpark_heartbeat(engine);
+ else
+ intel_engine_park_heartbeat(engine);
+
+ return old;
+}
+
int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
unsigned long delay)
{
- int err;
+ struct intel_context *ce = engine->kernel_context;
+ int err = 0;
- /* Send one last pulse before to cleanup persistent hogs */
- if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) {
- err = intel_engine_pulse(engine);
- if (err)
- return err;
- }
+ if (!delay && !intel_engine_has_preempt_reset(engine))
+ return -ENODEV;
+
+ intel_engine_pm_get(engine);
+
+ err = mutex_lock_interruptible(&ce->timeline->mutex);
+ if (err)
+ goto out_rpm;
- WRITE_ONCE(engine->props.heartbeat_interval_ms, delay);
+ if (delay != engine->props.heartbeat_interval_ms) {
+ unsigned long saved = set_heartbeat(engine, delay);
- if (intel_engine_pm_get_if_awake(engine)) {
- if (delay)
- intel_engine_unpark_heartbeat(engine);
- else
- intel_engine_park_heartbeat(engine);
- intel_engine_pm_put(engine);
+ /* recheck current execution */
+ if (intel_engine_has_preemption(engine)) {
+ err = __intel_engine_pulse(engine);
+ if (err)
+ set_heartbeat(engine, saved);
+ }
}
- return 0;
+ mutex_unlock(&ce->timeline->mutex);
+
+out_rpm:
+ intel_engine_pm_put(engine);
+ return err;
}
int intel_engine_pulse(struct intel_engine_cs *engine)
{
- struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
struct intel_context *ce = engine->kernel_context;
- struct i915_request *rq;
int err;
if (!intel_engine_has_preemption(engine))
@@ -215,30 +261,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
if (!intel_engine_pm_get_if_awake(engine))
return 0;
- if (mutex_lock_interruptible(&ce->timeline->mutex)) {
- err = -EINTR;
- goto out_rpm;
- }
-
- intel_context_enter(ce);
- rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
- intel_context_exit(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_unlock;
+ err = -EINTR;
+ if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
+ err = __intel_engine_pulse(engine);
+ mutex_unlock(&ce->timeline->mutex);
}
- __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
- idle_pulse(engine, rq);
-
- __i915_request_commit(rq);
- __i915_request_queue(rq, &attr);
- GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
- err = 0;
-
-out_unlock:
- mutex_unlock(&ce->timeline->mutex);
-out_rpm:
intel_engine_pm_put(engine);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 8ec3eecf3e39..f7b2e07e2229 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
+#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
@@ -247,7 +248,7 @@ static int __engine_park(struct intel_wakeref *wf)
call_idle_barriers(engine); /* cleanup after wedging */
intel_engine_park_heartbeat(engine);
- intel_engine_disarm_breadcrumbs(engine);
+ intel_breadcrumbs_park(engine->breadcrumbs);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 8de92fd7d392..ee6312601c56 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -22,6 +22,7 @@
#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
+#include "intel_breadcrumbs_types.h"
#include "intel_sseu.h"
#include "intel_timeline_types.h"
#include "intel_uncore.h"
@@ -277,7 +278,7 @@ struct intel_engine_execlists {
*
* Note these register may be either mmio or HWSP shadow.
*/
- u32 *csb_status;
+ u64 *csb_status;
/**
* @csb_size: context status buffer FIFO size
@@ -373,34 +374,8 @@ struct intel_engine_cs {
*/
struct ewma__engine_latency latency;
- /* Rather than have every client wait upon all user interrupts,
- * with the herd waking after every interrupt and each doing the
- * heavyweight seqno dance, we delegate the task (of being the
- * bottom-half of the user interrupt) to the first client. After
- * every interrupt, we wake up one client, who does the heavyweight
- * coherent seqno read and either goes back to sleep (if incomplete),
- * or wakes up all the completed clients in parallel, before then
- * transferring the bottom-half status to the next client in the queue.
- *
- * Compared to walking the entire list of waiters in a single dedicated
- * bottom-half, we reduce the latency of the first waiter by avoiding
- * a context switch, but incur additional coherent seqno reads when
- * following the chain of request breadcrumbs. Since it is most likely
- * that we have a single client waiting on each seqno, then reducing
- * the overhead of waking that client is much preferred.
- */
- struct intel_breadcrumbs {
- spinlock_t irq_lock;
- struct list_head signalers;
-
- struct list_head signaled_requests;
-
- struct irq_work irq_work; /* for use from inside irq_lock */
-
- unsigned int irq_enabled;
-
- bool irq_armed;
- } breadcrumbs;
+ /* Keep track of all the seqno used, a trail of breadcrumbs */
+ struct intel_breadcrumbs *breadcrumbs;
struct intel_engine_pmu {
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 99e28d9021e8..81c05f551b9c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -78,8 +78,6 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
{
int ret;
- stash_init(&i915->mm.wc_stash);
-
/*
* Note that we use page colouring to enforce a guard page at the
* end of the address space. This is required as the CS may prefetch
@@ -232,7 +230,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
/* Fill the allocated but "unused" space beyond the end of the buffer */
while (gte < end)
- gen8_set_pte(gte++, vm->scratch[0].encode);
+ gen8_set_pte(gte++, vm->scratch[0]->encode);
/*
* We want to flush the TLBs only after we're certain all the PTE
@@ -283,7 +281,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
/* Fill the allocated but "unused" space beyond the end of the buffer */
while (gte < end)
- iowrite32(vm->scratch[0].encode, gte++);
+ iowrite32(vm->scratch[0]->encode, gte++);
/*
* We want to flush the TLBs only after we're certain all the PTE
@@ -303,7 +301,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch[0].encode;
+ const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -401,7 +399,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->scratch[0].encode;
+ scratch_pte = vm->scratch[0]->encode;
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
}
@@ -436,16 +434,17 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
-static int ggtt_bind_vma(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static void ggtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK))
- return 0;
+ return;
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
@@ -454,8 +453,6 @@ static int ggtt_bind_vma(struct i915_address_space *vm,
vm->insert_entries(vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-
- return 0;
}
static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
@@ -568,31 +565,25 @@ err:
return ret;
}
-static int aliasing_gtt_bind_vma(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
u32 pte_flags;
- int ret;
/* Currently applicable only to VLV */
pte_flags = 0;
if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
- if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_ppgtt *alias = i915_vm_to_ggtt(vm)->alias;
-
- ret = ppgtt_bind_vma(&alias->vm, vma, cache_level, flags);
- if (ret)
- return ret;
- }
+ if (flags & I915_VMA_LOCAL_BIND)
+ ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
+ stash, vma, cache_level, flags);
if (flags & I915_VMA_GLOBAL_BIND)
vm->insert_entries(vm, vma, cache_level, pte_flags);
-
- return 0;
}
static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
@@ -607,6 +598,7 @@ static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
+ struct i915_vm_pt_stash stash = {};
struct i915_ppgtt *ppgtt;
int err;
@@ -619,15 +611,21 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
goto err_ppgtt;
}
+ err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
+ if (err)
+ goto err_ppgtt;
+
+ err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ if (err)
+ goto err_stash;
+
/*
* Note we only pre-allocate as far as the end of the global
* GTT. On 48b / 4-level page-tables, the difference is very,
* very significant! We have to preallocate as GVT/vgpu does
* not like the page directory disappearing.
*/
- err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
- if (err)
- goto err_ppgtt;
+ ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
ggtt->alias = ppgtt;
ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
@@ -638,8 +636,11 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
return 0;
+err_stash:
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
err_ppgtt:
i915_vm_put(&ppgtt->vm);
return err;
@@ -715,18 +716,11 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
void i915_ggtt_driver_release(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- struct pagevec *pvec;
fini_aliasing_ppgtt(ggtt);
intel_ggtt_fini_fences(ggtt);
ggtt_cleanup_hw(ggtt);
-
- pvec = &i915->mm.wc_stash.pvec;
- if (pvec->nr) {
- set_pages_array_wb(pvec->pages, pvec->nr);
- __pagevec_release(pvec);
- }
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -789,7 +783,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
+ ret = setup_scratch_page(&ggtt->vm);
if (ret) {
drm_err(&i915->drm, "Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -797,8 +791,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return ret;
}
- ggtt->vm.scratch[0].encode =
- ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
+ ggtt->vm.scratch[0]->encode =
+ ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
I915_CACHE_NONE, 0);
return 0;
@@ -824,7 +818,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
iounmap(ggtt->gsm);
- cleanup_scratch_page(vm);
+ free_scratch(vm);
}
static struct resource pci_resource(struct pci_dev *pdev, int bar)
@@ -852,6 +846,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
else
size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->vm.insert_page = gen8_ggtt_insert_page;
@@ -1000,6 +996,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
size = gen6_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+
ggtt->vm.clear_range = nop_clear_range;
if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
ggtt->vm.clear_range = gen6_ggtt_clear_range;
@@ -1050,6 +1048,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->gmadr =
(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+
ggtt->do_idle_maps = needs_idle_maps(i915);
ggtt->vm.insert_page = i915_ggtt_insert_page;
ggtt->vm.insert_entries = i915_ggtt_insert_entries;
@@ -1165,11 +1165,6 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
ggtt->invalidate(ggtt);
}
-static unsigned int clear_bind(struct i915_vma *vma)
-{
- return atomic_fetch_and(~I915_VMA_BIND_MASK, &vma->flags);
-}
-
void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
struct i915_vma *vma;
@@ -1187,11 +1182,13 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
- unsigned int was_bound = clear_bind(vma);
+ unsigned int was_bound =
+ atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
- WARN_ON(i915_vma_bind(vma,
- obj ? obj->cache_level : 0,
- was_bound, NULL));
+ GEM_BUG_ON(!was_bound);
+ vma->ops->bind_vma(&ggtt->vm, NULL, vma,
+ obj ? obj->cache_level : 0,
+ was_bound);
if (obj) { /* only used during resume => exclusive access */
flush |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index e0755f1a904b..39b428c5049c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -356,7 +356,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
goto err_unref;
}
- ret = i915_ggtt_pin(vma, 0, PIN_HIGH);
+ ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
if (ret)
goto err_unref;
@@ -406,21 +406,20 @@ static int __engines_record_defaults(struct intel_gt *gt)
/* We must be able to switch to something! */
GEM_BUG_ON(!engine->kernel_context);
- err = intel_renderstate_init(&so, engine);
- if (err)
- goto out;
-
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
- rq = intel_context_create_request(ce);
+ err = intel_renderstate_init(&so, ce);
+ if (err)
+ goto err;
+
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- intel_context_put(ce);
- goto out;
+ goto err_fini;
}
err = intel_engine_emit_ctx_wa(rq);
@@ -434,9 +433,13 @@ static int __engines_record_defaults(struct intel_gt *gt)
err_rq:
requests[id] = i915_request_get(rq);
i915_request_add(rq);
- intel_renderstate_fini(&so);
- if (err)
+err_fini:
+ intel_renderstate_fini(&so, ce);
+err:
+ if (err) {
+ intel_context_put(ce);
goto out;
+ }
}
/* Flush the default context image to memory, and enable powersaving. */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 418ae184cecf..104cb30e8c13 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -35,39 +35,65 @@ static void node_free(struct intel_gt_buffer_pool_node *node)
{
i915_gem_object_put(node->obj);
i915_active_fini(&node->active);
- kfree(node);
+ kfree_rcu(node, rcu);
}
-static void pool_free_work(struct work_struct *wrk)
+static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep)
{
- struct intel_gt_buffer_pool *pool =
- container_of(wrk, typeof(*pool), work.work);
- struct intel_gt_buffer_pool_node *node, *next;
- unsigned long old = jiffies - HZ;
+ struct intel_gt_buffer_pool_node *node, *stale = NULL;
bool active = false;
- LIST_HEAD(stale);
int n;
/* Free buffers that have not been used in the past second */
- spin_lock_irq(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
struct list_head *list = &pool->cache_list[n];
- /* Most recent at head; oldest at tail */
- list_for_each_entry_safe_reverse(node, next, list, link) {
- if (time_before(node->age, old))
- break;
+ if (list_empty(list))
+ continue;
+
+ if (spin_trylock_irq(&pool->lock)) {
+ struct list_head *pos;
+
+ /* Most recent at head; oldest at tail */
+ list_for_each_prev(pos, list) {
+ unsigned long age;
+
+ node = list_entry(pos, typeof(*node), link);
+
+ age = READ_ONCE(node->age);
+ if (!age || jiffies - age < keep)
+ break;
+
+ /* Check we are the first to claim this node */
+ if (!xchg(&node->age, 0))
+ break;
- list_move(&node->link, &stale);
+ node->free = stale;
+ stale = node;
+ }
+ if (!list_is_last(pos, list))
+ __list_del_many(pos, list);
+
+ spin_unlock_irq(&pool->lock);
}
+
active |= !list_empty(list);
}
- spin_unlock_irq(&pool->lock);
- list_for_each_entry_safe(node, next, &stale, link)
+ while ((node = stale)) {
+ stale = stale->free;
node_free(node);
+ }
+
+ return active;
+}
+
+static void pool_free_work(struct work_struct *wrk)
+{
+ struct intel_gt_buffer_pool *pool =
+ container_of(wrk, typeof(*pool), work.work);
- if (active)
+ if (pool_free_older_than(pool, HZ))
schedule_delayed_work(&pool->work,
round_jiffies_up_relative(HZ));
}
@@ -108,9 +134,10 @@ static void pool_retire(struct i915_active *ref)
/* Return this object to the shrinker pool */
i915_gem_object_make_purgeable(node->obj);
+ GEM_BUG_ON(node->age);
spin_lock_irqsave(&pool->lock, flags);
- node->age = jiffies;
- list_add(&node->link, list);
+ list_add_rcu(&node->link, list);
+ WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
spin_unlock_irqrestore(&pool->lock, flags);
schedule_delayed_work(&pool->work,
@@ -129,6 +156,7 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz)
if (!node)
return ERR_PTR(-ENOMEM);
+ node->age = 0;
node->pool = pool;
i915_active_init(&node->active, pool_active, pool_retire);
@@ -151,20 +179,30 @@ intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
struct intel_gt_buffer_pool_node *node;
struct list_head *list;
- unsigned long flags;
int ret;
size = PAGE_ALIGN(size);
list = bucket_for_size(pool, size);
- spin_lock_irqsave(&pool->lock, flags);
- list_for_each_entry(node, list, link) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(node, list, link) {
+ unsigned long age;
+
if (node->obj->base.size < size)
continue;
- list_del(&node->link);
- break;
+
+ age = READ_ONCE(node->age);
+ if (!age)
+ continue;
+
+ if (cmpxchg(&node->age, age, 0) == age) {
+ spin_lock_irq(&pool->lock);
+ list_del_rcu(&node->link);
+ spin_unlock_irq(&pool->lock);
+ break;
+ }
}
- spin_unlock_irqrestore(&pool->lock, flags);
+ rcu_read_unlock();
if (&node->link == list) {
node = node_create(pool, size);
@@ -192,28 +230,13 @@ void intel_gt_init_buffer_pool(struct intel_gt *gt)
INIT_DELAYED_WORK(&pool->work, pool_free_work);
}
-static void pool_free_imm(struct intel_gt_buffer_pool *pool)
-{
- int n;
-
- spin_lock_irq(&pool->lock);
- for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
- struct intel_gt_buffer_pool_node *node, *next;
- struct list_head *list = &pool->cache_list[n];
-
- list_for_each_entry_safe(node, next, list, link)
- node_free(node);
- INIT_LIST_HEAD(list);
- }
- spin_unlock_irq(&pool->lock);
-}
-
void intel_gt_flush_buffer_pool(struct intel_gt *gt)
{
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
do {
- pool_free_imm(pool);
+ while (pool_free_older_than(pool, 0))
+ ;
} while (cancel_delayed_work_sync(&pool->work));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
index e28bdda771ed..bcf1658c9633 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
@@ -25,7 +25,11 @@ struct intel_gt_buffer_pool_node {
struct i915_active active;
struct drm_i915_gem_object *obj;
struct list_head link;
- struct intel_gt_buffer_pool *pool;
+ union {
+ struct intel_gt_buffer_pool *pool;
+ struct intel_gt_buffer_pool_node *free;
+ struct rcu_head rcu;
+ };
unsigned long age;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index b05da68e52f4..257063a57101 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -8,6 +8,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_uncore.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 2a72cce63fd9..3f1114b58b01 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -11,160 +11,24 @@
#include "intel_gt.h"
#include "intel_gtt.h"
-void stash_init(struct pagestash *stash)
+struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{
- pagevec_init(&stash->pvec);
- spin_lock_init(&stash->lock);
-}
-
-static struct page *stash_pop_page(struct pagestash *stash)
-{
- struct page *page = NULL;
-
- spin_lock(&stash->lock);
- if (likely(stash->pvec.nr))
- page = stash->pvec.pages[--stash->pvec.nr];
- spin_unlock(&stash->lock);
-
- return page;
-}
-
-static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
-{
- unsigned int nr;
-
- spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
-
- nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
- memcpy(stash->pvec.pages + stash->pvec.nr,
- pvec->pages + pvec->nr - nr,
- sizeof(pvec->pages[0]) * nr);
- stash->pvec.nr += nr;
-
- spin_unlock(&stash->lock);
-
- pvec->nr -= nr;
-}
-
-static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
-{
- struct pagevec stack;
- struct page *page;
-
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
- page = stash_pop_page(&vm->free_pages);
- if (page)
- return page;
-
- if (!vm->pt_kmap_wc)
- return alloc_page(gfp);
-
- /* Look in our global stash of WC pages... */
- page = stash_pop_page(&vm->i915->mm.wc_stash);
- if (page)
- return page;
-
- /*
- * Otherwise batch allocate pages to amortize cost of set_pages_wc.
- *
- * We have to be careful as page allocation may trigger the shrinker
- * (via direct reclaim) which will fill up the WC stash underneath us.
- * So we add our WB pages into a temporary pvec on the stack and merge
- * them into the WC stash after all the allocations are complete.
- */
- pagevec_init(&stack);
- do {
- struct page *page;
-
- page = alloc_page(gfp);
- if (unlikely(!page))
- break;
-
- stack.pages[stack.nr++] = page;
- } while (pagevec_space(&stack));
-
- if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
- page = stack.pages[--stack.nr];
-
- /* Merge spare WC pages to the global stash */
- if (stack.nr)
- stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
-
- /* Push any surplus WC pages onto the local VM stash */
- if (stack.nr)
- stash_push_pagevec(&vm->free_pages, &stack);
- }
-
- /* Return unwanted leftovers */
- if (unlikely(stack.nr)) {
- WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
- __pagevec_release(&stack);
- }
-
- return page;
+ return i915_gem_object_create_internal(vm->i915, sz);
}
-static void vm_free_pages_release(struct i915_address_space *vm,
- bool immediate)
+int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
- struct pagevec *pvec = &vm->free_pages.pvec;
- struct pagevec stack;
-
- lockdep_assert_held(&vm->free_pages.lock);
- GEM_BUG_ON(!pagevec_count(pvec));
-
- if (vm->pt_kmap_wc) {
- /*
- * When we use WC, first fill up the global stash and then
- * only if full immediately free the overflow.
- */
- stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
-
- /*
- * As we have made some room in the VM's free_pages,
- * we can wait for it to fill again. Unless we are
- * inside i915_address_space_fini() and must
- * immediately release the pages!
- */
- if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
- return;
+ int err;
- /*
- * We have to drop the lock to allow ourselves to sleep,
- * so take a copy of the pvec and clear the stash for
- * others to use it as we sleep.
- */
- stack = *pvec;
- pagevec_reinit(pvec);
- spin_unlock(&vm->free_pages.lock);
-
- pvec = &stack;
- set_pages_array_wb(pvec->pages, pvec->nr);
-
- spin_lock(&vm->free_pages.lock);
- }
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
- __pagevec_release(pvec);
-}
-
-static void vm_free_page(struct i915_address_space *vm, struct page *page)
-{
- /*
- * On !llc, we need to change the pages back to WB. We only do so
- * in bulk, so we rarely need to change the page attributes here,
- * but doing so requires a stop_machine() from deep inside arch/x86/mm.
- * To make detection of the possible sleep more likely, use an
- * unconditional might_sleep() for everybody.
- */
- might_sleep();
- spin_lock(&vm->free_pages.lock);
- while (!pagevec_space(&vm->free_pages.pvec))
- vm_free_pages_release(vm, false);
- GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
- pagevec_add(&vm->free_pages.pvec, page);
- spin_unlock(&vm->free_pages.lock);
+ i915_gem_object_make_unshrinkable(obj);
+ return 0;
}
void __i915_vm_close(struct i915_address_space *vm)
@@ -194,14 +58,7 @@ void __i915_vm_close(struct i915_address_space *vm)
void i915_address_space_fini(struct i915_address_space *vm)
{
- spin_lock(&vm->free_pages.lock);
- if (pagevec_count(&vm->free_pages.pvec))
- vm_free_pages_release(vm, true);
- GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
- spin_unlock(&vm->free_pages.lock);
-
drm_mm_takedown(&vm->mm);
-
mutex_destroy(&vm->mutex);
}
@@ -246,8 +103,6 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
drm_mm_init(&vm->mm, 0, vm->total);
vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
- stash_init(&vm->free_pages);
-
INIT_LIST_HEAD(&vm->bound_list);
}
@@ -264,64 +119,50 @@ void clear_pages(struct i915_vma *vma)
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
}
-static int __setup_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p,
- gfp_t gfp)
-{
- p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
- if (unlikely(!p->page))
- return -ENOMEM;
-
- p->daddr = dma_map_page_attrs(vm->dma,
- p->page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_NO_WARN);
- if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
- vm_free_page(vm, p->page);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
+dma_addr_t __px_dma(struct drm_i915_gem_object *p)
{
- return __setup_page_dma(vm, p, __GFP_HIGHMEM);
+ GEM_BUG_ON(!i915_gem_object_has_pages(p));
+ return sg_dma_address(p->mm.pages->sgl);
}
-void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p)
+struct page *__px_page(struct drm_i915_gem_object *p)
{
- dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- vm_free_page(vm, p->page);
+ GEM_BUG_ON(!i915_gem_object_has_pages(p));
+ return sg_page(p->mm.pages->sgl);
}
void
-fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
+fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
{
- kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
+ struct page *page = __px_page(p);
+ void *vaddr;
+
+ vaddr = kmap(page);
+ memset64(vaddr, val, count);
+ clflush_cache_range(vaddr, PAGE_SIZE);
+ kunmap(page);
}
-static void poison_scratch_page(struct page *page, unsigned long size)
+static void poison_scratch_page(struct drm_i915_gem_object *scratch)
{
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return;
+ struct sgt_iter sgt;
+ struct page *page;
+ u8 val;
- GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+ val = 0;
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ val = POISON_FREE;
- do {
+ for_each_sgt_page(page, sgt, scratch->mm.pages) {
void *vaddr;
vaddr = kmap(page);
- memset(vaddr, POISON_FREE, PAGE_SIZE);
+ memset(vaddr, val, PAGE_SIZE);
kunmap(page);
-
- page = pfn_to_page(page_to_pfn(page) + 1);
- size -= PAGE_SIZE;
- } while (size);
+ }
}
-int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
+int setup_scratch_page(struct i915_address_space *vm)
{
unsigned long size;
@@ -338,21 +179,27 @@ int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
*/
size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_4lvl(vm) &&
- HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
+ HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
size = I915_GTT_PAGE_SIZE_64K;
- gfp |= __GFP_NOWARN;
- }
- gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
do {
- unsigned int order = get_order(size);
- struct page *page;
- dma_addr_t addr;
+ struct drm_i915_gem_object *obj;
- page = alloc_pages(gfp, order);
- if (unlikely(!page))
+ obj = vm->alloc_pt_dma(vm, size);
+ if (IS_ERR(obj))
goto skip;
+ if (pin_pt_dma(vm, obj))
+ goto skip_obj;
+
+ /* We need a single contiguous page for our scratch */
+ if (obj->mm.page_sizes.sg < size)
+ goto skip_obj;
+
+ /* And it needs to be correspondingly aligned */
+ if (__px_dma(obj) & (size - 1))
+ goto skip_obj;
+
/*
* Use a non-zero scratch page for debugging.
*
@@ -362,61 +209,28 @@ int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* should it ever be accidentally used, the effect should be
* fairly benign.
*/
- poison_scratch_page(page, size);
-
- addr = dma_map_page_attrs(vm->dma,
- page, 0, size,
- PCI_DMA_BIDIRECTIONAL,
- DMA_ATTR_SKIP_CPU_SYNC |
- DMA_ATTR_NO_WARN);
- if (unlikely(dma_mapping_error(vm->dma, addr)))
- goto free_page;
-
- if (unlikely(!IS_ALIGNED(addr, size)))
- goto unmap_page;
-
- vm->scratch[0].base.page = page;
- vm->scratch[0].base.daddr = addr;
- vm->scratch_order = order;
+ poison_scratch_page(obj);
+
+ vm->scratch[0] = obj;
+ vm->scratch_order = get_order(size);
return 0;
-unmap_page:
- dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
-free_page:
- __free_pages(page, order);
+skip_obj:
+ i915_gem_object_put(obj);
skip:
if (size == I915_GTT_PAGE_SIZE_4K)
return -ENOMEM;
size = I915_GTT_PAGE_SIZE_4K;
- gfp &= ~__GFP_NOWARN;
} while (1);
}
-void cleanup_scratch_page(struct i915_address_space *vm)
-{
- struct i915_page_dma *p = px_base(&vm->scratch[0]);
- unsigned int order = vm->scratch_order;
-
- dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
- PCI_DMA_BIDIRECTIONAL);
- __free_pages(p->page, order);
-}
-
void free_scratch(struct i915_address_space *vm)
{
int i;
- if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
- return;
-
- for (i = 1; i <= vm->top; i++) {
- if (!px_dma(&vm->scratch[i]))
- break;
- cleanup_page_dma(vm, px_base(&vm->scratch[i]));
- }
-
- cleanup_scratch_page(vm);
+ for (i = 0; i <= vm->top; i++)
+ i915_gem_object_put(vm->scratch[i]);
}
void gtt_write_workarounds(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index f2b75078e05f..c13c650ced22 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -134,38 +134,29 @@ typedef u64 gen8_pte_t;
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
+enum i915_cache_level;
+
+struct drm_i915_file_private;
+struct drm_i915_gem_object;
struct i915_fence_reg;
+struct i915_vma;
+struct intel_gt;
#define for_each_sgt_daddr(__dp, __iter, __sgt) \
__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
-struct i915_page_dma {
- struct page *page;
+struct i915_page_table {
+ struct drm_i915_gem_object *base;
union {
- dma_addr_t daddr;
-
- /*
- * For gen6/gen7 only. This is the offset in the GGTT
- * where the page directory entries for PPGTT begin
- */
- u32 ggtt_offset;
+ atomic_t used;
+ struct i915_page_table *stash;
};
};
-struct i915_page_scratch {
- struct i915_page_dma base;
- u64 encode;
-};
-
-struct i915_page_table {
- struct i915_page_dma base;
- atomic_t used;
-};
-
struct i915_page_directory {
struct i915_page_table pt;
spinlock_t lock;
- void *entry[512];
+ void **entry;
};
#define __px_choose_expr(x, type, expr, other) \
@@ -176,12 +167,14 @@ struct i915_page_directory {
other)
#define px_base(px) \
- __px_choose_expr(px, struct i915_page_dma *, __x, \
- __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
- __px_choose_expr(px, struct i915_page_table *, &__x->base, \
- __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
- (void)0))))
-#define px_dma(px) (px_base(px)->daddr)
+ __px_choose_expr(px, struct drm_i915_gem_object *, __x, \
+ __px_choose_expr(px, struct i915_page_table *, __x->base, \
+ __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
+ (void)0)))
+
+struct page *__px_page(struct drm_i915_gem_object *p);
+dma_addr_t __px_dma(struct drm_i915_gem_object *p);
+#define px_dma(px) (__px_dma(px_base(px)))
#define px_pt(px) \
__px_choose_expr(px, struct i915_page_table *, __x, \
@@ -189,19 +182,18 @@ struct i915_page_directory {
(void)0))
#define px_used(px) (&px_pt(px)->used)
-enum i915_cache_level;
-
-struct drm_i915_file_private;
-struct drm_i915_gem_object;
-struct i915_vma;
-struct intel_gt;
+struct i915_vm_pt_stash {
+ /* preallocated chains of page tables/directories */
+ struct i915_page_table *pt[2];
+};
struct i915_vma_ops {
/* Map an object into an address space with the given cache flags. */
- int (*bind_vma)(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
+ void (*bind_vma)(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
/*
* Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page.
@@ -213,13 +205,6 @@ struct i915_vma_ops {
void (*clear_pages)(struct i915_vma *vma);
};
-struct pagestash {
- spinlock_t lock;
- struct pagevec pvec;
-};
-
-void stash_init(struct pagestash *stash);
-
struct i915_address_space {
struct kref ref;
struct rcu_work rcu;
@@ -256,33 +241,33 @@ struct i915_address_space {
#define VM_CLASS_GGTT 0
#define VM_CLASS_PPGTT 1
- struct i915_page_scratch scratch[4];
- unsigned int scratch_order;
- unsigned int top;
-
+ struct drm_i915_gem_object *scratch[4];
/**
* List of vma currently bound.
*/
struct list_head bound_list;
- struct pagestash free_pages;
-
/* Global GTT */
bool is_ggtt:1;
- /* Some systems require uncached updates of the page directories */
- bool pt_kmap_wc:1;
-
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
+ u8 top;
+ u8 pd_shift;
+ u8 scratch_order;
+
+ struct drm_i915_gem_object *
+ (*alloc_pt_dma)(struct i915_address_space *vm, int sz);
+
u64 (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
u32 flags); /* Create a valid PTE */
#define PTE_READ_ONLY BIT(0)
- int (*allocate_va_range)(struct i915_address_space *vm,
- u64 start, u64 length);
+ void (*allocate_va_range)(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ u64 start, u64 length);
void (*clear_range)(struct i915_address_space *vm,
u64 start, u64 length);
void (*insert_page)(struct i915_address_space *vm,
@@ -490,9 +475,9 @@ i915_pd_entry(const struct i915_page_directory * const pdp,
static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
{
- struct i915_page_dma *pt = ppgtt->pd->entry[n];
+ struct i915_page_table *pt = ppgtt->pd->entry[n];
- return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
+ return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
}
void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
@@ -517,13 +502,10 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
-int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
-void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
-
-#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
+#define kmap_atomic_px(px) kmap_atomic(__px_page(px_base(px)))
void
-fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count);
+fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
#define fill32_px(px, v) do { \
@@ -531,47 +513,51 @@ fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count);
fill_px((px), v__ << 32 | v__); \
} while (0)
-int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp);
-void cleanup_scratch_page(struct i915_address_space *vm);
+int setup_scratch_page(struct i915_address_space *vm);
void free_scratch(struct i915_address_space *vm);
+struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
struct i915_page_table *alloc_pt(struct i915_address_space *vm);
struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
-struct i915_page_directory *__alloc_pd(size_t sz);
+struct i915_page_directory *__alloc_pd(int npde);
-void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd);
+int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
-#define free_px(vm, px) free_pd(vm, px_base(px))
+void free_px(struct i915_address_space *vm,
+ struct i915_page_table *pt, int lvl);
+#define free_pt(vm, px) free_px(vm, px, 0)
+#define free_pd(vm, px) free_px(vm, px_pt(px), 1)
void
__set_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
- struct i915_page_dma * const to,
+ struct i915_page_table *pt,
u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
#define set_pd_entry(pd, idx, to) \
- __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
+ __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
void
clear_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
- const struct i915_page_scratch * const scratch);
+ const struct drm_i915_gem_object * const scratch);
bool
release_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
struct i915_page_table * const pt,
- const struct i915_page_scratch * const scratch);
+ const struct drm_i915_gem_object * const scratch);
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
int ggtt_set_pages(struct i915_vma *vma);
int ppgtt_set_pages(struct i915_vma *vma);
void clear_pages(struct i915_vma *vma);
-int ppgtt_bind_vma(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
+void ppgtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
void ppgtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma *vma);
@@ -579,6 +565,14 @@ void gtt_write_workarounds(struct intel_gt *gt);
void setup_private_pat(struct intel_uncore *uncore);
+int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ u64 size);
+int i915_vm_pin_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash);
+void i915_vm_free_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash);
+
static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 9eeaca957a7e..f82c6dd1de18 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -137,6 +137,7 @@
#include "i915_perf.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
@@ -1139,29 +1140,14 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
/* Check in case we rollback so far we wrap [size/2] */
if (intel_ring_direction(rq->ring,
- intel_ring_wrap(rq->ring,
- rq->tail),
- rq->ring->tail) > 0)
+ rq->tail,
+ rq->ring->tail + 8) > 0)
rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
active = rq;
} else {
struct intel_engine_cs *owner = rq->context->engine;
- /*
- * Decouple the virtual breadcrumb before moving it
- * back to the virtual engine -- we don't want the
- * request to complete in the background and try
- * and cancel the breadcrumb on the virtual engine
- * (instead of the old engine where it is linked)!
- */
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &rq->fence.flags)) {
- spin_lock_nested(&rq->lock,
- SINGLE_DEPTH_NESTING);
- i915_request_cancel_breadcrumb(rq);
- spin_unlock(&rq->lock);
- }
WRITE_ONCE(rq->engine, owner);
owner->submit_request(rq);
active = NULL;
@@ -1819,16 +1805,31 @@ static bool virtual_matches(const struct virtual_engine *ve,
return true;
}
-static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
+static void virtual_xfer_context(struct virtual_engine *ve,
+ struct intel_engine_cs *engine)
{
+ unsigned int n;
+
+ if (likely(engine == ve->siblings[0]))
+ return;
+
+ GEM_BUG_ON(READ_ONCE(ve->context.inflight));
+ if (!intel_engine_has_relative_mmio(engine))
+ virtual_update_register_offsets(ve->context.lrc_reg_state,
+ engine);
+
/*
- * All the outstanding signals on ve->siblings[0] must have
- * been completed, just pending the interrupt handler. As those
- * signals still refer to the old sibling (via rq->engine), we must
- * transfer those to the old irq_worker to keep our locking
- * consistent.
+ * Move the bound engine to the top of the list for
+ * future execution. We then kick this tasklet first
+ * before checking others, so that we preferentially
+ * reuse this set of bound registers.
*/
- intel_engine_transfer_stale_breadcrumbs(ve->siblings[0], &ve->context);
+ for (n = 1; n < ve->num_siblings; n++) {
+ if (ve->siblings[n] == engine) {
+ swap(ve->siblings[n], ve->siblings[0]);
+ break;
+ }
+ }
}
#define for_each_waiter(p__, rq__) \
@@ -2279,38 +2280,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(!(rq->execution_mask & engine->mask));
WRITE_ONCE(rq->engine, engine);
- if (engine != ve->siblings[0]) {
- u32 *regs = ve->context.lrc_reg_state;
- unsigned int n;
-
- GEM_BUG_ON(READ_ONCE(ve->context.inflight));
-
- if (!intel_engine_has_relative_mmio(engine))
- virtual_update_register_offsets(regs,
- engine);
-
- if (!list_empty(&ve->context.signals))
- virtual_xfer_breadcrumbs(ve);
-
+ if (__i915_request_submit(rq)) {
/*
- * Move the bound engine to the top of the list
- * for future execution. We then kick this
- * tasklet first before checking others, so that
- * we preferentially reuse this set of bound
- * registers.
+ * Only after we confirm that we will submit
+ * this request (i.e. it has not already
+ * completed), do we want to update the context.
+ *
+ * This serves two purposes. It avoids
+ * unnecessary work if we are resubmitting an
+ * already completed request after timeslicing.
+ * But more importantly, it prevents us altering
+ * ve->siblings[] on an idle context, where
+ * we may be using ve->siblings[] in
+ * virtual_context_enter / virtual_context_exit.
*/
- for (n = 1; n < ve->num_siblings; n++) {
- if (ve->siblings[n] == engine) {
- swap(ve->siblings[n],
- ve->siblings[0]);
- break;
- }
- }
-
+ virtual_xfer_context(ve, engine);
GEM_BUG_ON(ve->siblings[0] != engine);
- }
- if (__i915_request_submit(rq)) {
submit = true;
last = rq;
}
@@ -2477,7 +2463,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
}
static inline void
-invalidate_csb_entries(const u32 *first, const u32 *last)
+invalidate_csb_entries(const u64 *first, const u64 *last)
{
clflush((void *)first);
clflush((void *)last);
@@ -2509,14 +2495,25 @@ invalidate_csb_entries(const u32 *first, const u32 *last)
* bits 47-57: sw context id of the lrc the GT switched away from
* bits 58-63: sw counter of the lrc the GT switched away from
*/
-static inline bool
-gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
-{
- u32 lower_dw = csb[0];
- u32 upper_dw = csb[1];
- bool ctx_to_valid = GEN12_CSB_CTX_VALID(lower_dw);
- bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw);
- bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+static inline bool gen12_csb_parse(const u64 *csb)
+{
+ bool ctx_away_valid;
+ bool new_queue;
+ u64 entry;
+
+ /* HSD#22011248461 */
+ entry = READ_ONCE(*csb);
+ if (unlikely(entry == -1)) {
+ preempt_disable();
+ if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 50))
+ GEM_WARN_ON("50us CSB timeout");
+ preempt_enable();
+ }
+ WRITE_ONCE(*(u64 *)csb, -1);
+
+ ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(entry));
+ new_queue =
+ lower_32_bits(entry) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
/*
* The context switch detail is not guaranteed to be 5 when a preemption
@@ -2526,7 +2523,7 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
* would require some extra handling, but we don't support that.
*/
if (!ctx_away_valid || new_queue) {
- GEM_BUG_ON(!ctx_to_valid);
+ GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(entry)));
return true;
}
@@ -2535,12 +2532,11 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
* context switch on an unsuccessful wait instruction since we always
* use polling mode.
*/
- GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw));
+ GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(entry)));
return false;
}
-static inline bool
-gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
+static inline bool gen8_csb_parse(const u64 *csb)
{
return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
}
@@ -2548,7 +2544,7 @@ gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
static void process_csb(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- const u32 * const buf = execlists->csb_status;
+ const u64 * const buf = execlists->csb_status;
const u8 num_entries = execlists->csb_size;
u8 head, tail;
@@ -2629,12 +2625,14 @@ static void process_csb(struct intel_engine_cs *engine)
*/
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
- head, buf[2 * head + 0], buf[2 * head + 1]);
+ head,
+ upper_32_bits(buf[head]),
+ lower_32_bits(buf[head]));
if (INTEL_GEN(engine->i915) >= 12)
- promote = gen12_csb_parse(execlists, buf + 2 * head);
+ promote = gen12_csb_parse(buf + head);
else
- promote = gen8_csb_parse(execlists, buf + 2 * head);
+ promote = gen8_csb_parse(buf + head);
if (promote) {
struct i915_request * const *old = execlists->active;
@@ -2662,6 +2660,9 @@ static void process_csb(struct intel_engine_cs *engine)
smp_wmb(); /* complete the seqlock */
WRITE_ONCE(execlists->active, execlists->inflight);
+ /* XXX Magic delay for tgl */
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
WRITE_ONCE(execlists->pending[0], NULL);
} else {
if (GEM_WARN_ON(!*execlists->active)) {
@@ -3316,7 +3317,10 @@ static void execlists_context_unpin(struct intel_context *ce)
{
check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
ce->engine);
+}
+static void execlists_context_post_unpin(struct intel_context *ce)
+{
i915_gem_object_unpin_map(ce->state->obj);
}
@@ -3478,20 +3482,24 @@ __execlists_update_reg_state(const struct intel_context *ce,
}
static int
-__execlists_context_pin(struct intel_context *ce,
- struct intel_engine_cs *engine)
+execlists_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww, void **vaddr)
{
- void *vaddr;
-
GEM_BUG_ON(!ce->state);
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
- vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(engine->i915) |
+ *vaddr = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(ce->engine->i915) |
I915_MAP_OVERRIDE);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
+ return PTR_ERR_OR_ZERO(*vaddr);
+}
+
+static int
+__execlists_context_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine,
+ void *vaddr)
+{
ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
__execlists_update_reg_state(ce, engine, ce->ring->tail);
@@ -3499,9 +3507,9 @@ __execlists_context_pin(struct intel_context *ce,
return 0;
}
-static int execlists_context_pin(struct intel_context *ce)
+static int execlists_context_pin(struct intel_context *ce, void *vaddr)
{
- return __execlists_context_pin(ce, ce->engine);
+ return __execlists_context_pin(ce, ce->engine, vaddr);
}
static int execlists_context_alloc(struct intel_context *ce)
@@ -3527,8 +3535,10 @@ static void execlists_context_reset(struct intel_context *ce)
static const struct intel_context_ops execlists_context_ops = {
.alloc = execlists_context_alloc,
+ .pre_pin = execlists_context_pre_pin,
.pin = execlists_context_pin,
.unpin = execlists_context_unpin,
+ .post_unpin = execlists_context_post_unpin,
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
@@ -3537,6 +3547,19 @@ static const struct intel_context_ops execlists_context_ops = {
.destroy = execlists_context_destroy,
};
+static u32 hwsp_offset(const struct i915_request *rq)
+{
+ const struct intel_timeline_cacheline *cl;
+
+ /* Before the request is executed, the timeline/cachline is fixed */
+
+ cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
+ if (cl)
+ return cl->ggtt_offset;
+
+ return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
+}
+
static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
@@ -3559,7 +3582,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
*cs++ = MI_NOOP;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = i915_request_timeline(rq)->hwsp_offset;
+ *cs++ = hwsp_offset(rq);
*cs++ = 0;
*cs++ = rq->fence.seqno - 1;
@@ -3892,7 +3915,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
goto err;
}
- err = i915_ggtt_pin(vma, 0, PIN_HIGH);
+ err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
if (err)
goto err;
@@ -4009,6 +4032,8 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
WRITE_ONCE(*execlists->csb_write, reset_value);
wmb(); /* Make sure this is visible to HW (paranoia?) */
+ /* Check that the GPU does indeed update the CSB entries! */
+ memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
invalidate_csb_entries(&execlists->csb_status[0],
&execlists->csb_status[reset_value]);
@@ -4133,7 +4158,7 @@ static int execlists_resume(struct intel_engine_cs *engine)
{
intel_mocs_init_engine(engine);
- intel_engine_reset_breadcrumbs(engine);
+ intel_breadcrumbs_reset(engine->breadcrumbs);
if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
struct drm_printer p = drm_debug_printer(__func__);
@@ -4562,7 +4587,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
vf_flush_wa = true;
/* WaForGAMHang:kbl */
- if (IS_KBL_REVID(request->engine->i915, 0, KBL_REVID_B0))
+ if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0))
dc_flush_wa = true;
}
@@ -4764,14 +4789,21 @@ static int gen12_emit_flush(struct i915_request *request, u32 mode)
intel_engine_mask_t aux_inv = 0;
u32 cmd, *cs;
+ cmd = 4;
+ if (mode & EMIT_INVALIDATE)
+ cmd += 2;
if (mode & EMIT_INVALIDATE)
aux_inv = request->engine->mask & ~BIT(BCS0);
+ if (aux_inv)
+ cmd += 2 * hweight8(aux_inv) + 2;
- cs = intel_ring_begin(request,
- 4 + (aux_inv ? 2 * hweight8(aux_inv) + 2 : 0));
+ cs = intel_ring_begin(request, cmd);
if (IS_ERR(cs))
return PTR_ERR(cs);
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(true);
+
cmd = MI_FLUSH_DW + 1;
/* We always require a command barrier so that subsequent
@@ -4804,6 +4836,10 @@ static int gen12_emit_flush(struct i915_request *request, u32 mode)
}
*cs++ = MI_NOOP;
}
+
+ if (mode & EMIT_INVALIDATE)
+ *cs++ = preparser_disable(false);
+
intel_ring_advance(request, cs);
return 0;
@@ -4863,11 +4899,9 @@ gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
return gen8_emit_wa_tail(request, cs);
}
-static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
{
- u32 addr = i915_request_active_timeline(request)->hwsp_offset;
-
- return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0);
+ return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
}
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
@@ -4886,7 +4920,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
/* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
+ hwsp_offset(request),
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL);
@@ -4898,7 +4932,7 @@ gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
+ hwsp_offset(request),
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
@@ -4960,7 +4994,9 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
{
- return gen12_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
+ /* XXX Stalling flush before seqno write; post-sync not */
+ cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
+ return gen12_emit_fini_breadcrumb_tail(rq, cs);
}
static u32 *
@@ -4968,7 +5004,7 @@ gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
cs = gen12_emit_ggtt_write_rcs(cs,
request->fence.seqno,
- i915_request_active_timeline(request)->hwsp_offset,
+ hwsp_offset(request),
PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TILE_CACHE_FLUSH |
@@ -5150,7 +5186,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
}
execlists->csb_status =
- &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+ (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
execlists->csb_write =
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
@@ -5302,6 +5338,14 @@ populate_lr_context(struct intel_context *ce,
return 0;
}
+static struct intel_timeline *pinned_timeline(struct intel_context *ce)
+{
+ struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
+
+ return intel_timeline_create_from_engine(ce->engine,
+ page_unmask_bits(tl));
+}
+
static int __execlists_context_alloc(struct intel_context *ce,
struct intel_engine_cs *engine)
{
@@ -5332,19 +5376,17 @@ static int __execlists_context_alloc(struct intel_context *ce,
goto error_deref_obj;
}
- if (!ce->timeline) {
+ if (!page_mask_bits(ce->timeline)) {
struct intel_timeline *tl;
- struct i915_vma *hwsp;
/*
* Use the static global HWSP for the kernel context, and
* a dynamically allocated cacheline for everyone else.
*/
- hwsp = NULL;
- if (unlikely(intel_context_is_barrier(ce)))
- hwsp = engine->status_page.vma;
-
- tl = intel_timeline_create(engine->gt, hwsp);
+ if (unlikely(ce->timeline))
+ tl = pinned_timeline(ce);
+ else
+ tl = intel_timeline_create(engine->gt);
if (IS_ERR(tl)) {
ret = PTR_ERR(tl);
goto error_deref_obj;
@@ -5450,12 +5492,12 @@ static int virtual_context_alloc(struct intel_context *ce)
return __execlists_context_alloc(ce, ve->siblings[0]);
}
-static int virtual_context_pin(struct intel_context *ce)
+static int virtual_context_pin(struct intel_context *ce, void *vaddr)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
/* Note: we must use a real engine class for setting up reg state */
- return __execlists_context_pin(ce, ve->siblings[0]);
+ return __execlists_context_pin(ce, ve->siblings[0], vaddr);
}
static void virtual_context_enter(struct intel_context *ce)
@@ -5483,8 +5525,10 @@ static void virtual_context_exit(struct intel_context *ce)
static const struct intel_context_ops virtual_context_ops = {
.alloc = virtual_context_alloc,
+ .pre_pin = execlists_context_pre_pin,
.pin = virtual_context_pin,
.unpin = execlists_context_unpin,
+ .post_unpin = execlists_context_post_unpin,
.enter = virtual_context_enter,
.exit = virtual_context_exit,
@@ -5718,9 +5762,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
- intel_engine_init_breadcrumbs(&ve->base);
intel_engine_init_execlists(&ve->base);
- ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */
ve->base.cops = &virtual_context_ops;
ve->base.request_alloc = execlists_request_alloc;
@@ -5737,6 +5779,12 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
intel_context_init(&ve->context, &ve->base);
+ ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
+ if (!ve->base.breadcrumbs) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
for (n = 0; n < count; n++) {
struct intel_engine_cs *sibling = siblings[n];
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 632e08a4592b..b8f56e62158e 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -234,11 +234,17 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
L3_1_UC)
static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
- /* Base - Error (Reserved for Non-Use) */
- MOCS_ENTRY(0, 0x0, 0x0),
- /* Base - Reserved */
- MOCS_ENTRY(1, 0x0, 0x0),
-
+ /*
+ * NOTE:
+ * Reserved and unspecified MOCS indices have been set to (L3 + LCC).
+ * These reserved entries should never be used, they may be changed
+ * to low performant variants with better coherency in the future if
+ * more entries are needed. We are programming index I915_MOCS_PTE(1)
+ * only, __init_mocs_table() take care to program unused index with
+ * this entry.
+ */
+ MOCS_ENTRY(1, LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_3_WB),
GEN11_MOCS_ENTRIES,
/* Implicitly enable L1 - HDC:L1 + L3 + LLC */
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index f0862e924d11..46d9aceda64c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -18,7 +18,8 @@ struct i915_page_table *alloc_pt(struct i915_address_space *vm)
if (unlikely(!pt))
return ERR_PTR(-ENOMEM);
- if (unlikely(setup_page_dma(vm, &pt->base))) {
+ pt->base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(pt->base)) {
kfree(pt);
return ERR_PTR(-ENOMEM);
}
@@ -27,14 +28,20 @@ struct i915_page_table *alloc_pt(struct i915_address_space *vm)
return pt;
}
-struct i915_page_directory *__alloc_pd(size_t sz)
+struct i915_page_directory *__alloc_pd(int count)
{
struct i915_page_directory *pd;
- pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
+ pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
if (unlikely(!pd))
return NULL;
+ pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
+ if (unlikely(!pd->entry)) {
+ kfree(pd);
+ return NULL;
+ }
+
spin_lock_init(&pd->lock);
return pd;
}
@@ -43,11 +50,13 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
struct i915_page_directory *pd;
- pd = __alloc_pd(sizeof(*pd));
+ pd = __alloc_pd(I915_PDES);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
- if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+ pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(pd->pt.base)) {
+ kfree(pd->entry);
kfree(pd);
return ERR_PTR(-ENOMEM);
}
@@ -55,41 +64,52 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
return pd;
}
-void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
+void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
{
- cleanup_page_dma(vm, pd);
- kfree(pd);
+ BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
+
+ if (lvl) {
+ struct i915_page_directory *pd =
+ container_of(pt, typeof(*pd), pt);
+ kfree(pd->entry);
+ }
+
+ if (pt->base)
+ i915_gem_object_put(pt->base);
+
+ kfree(pt);
}
static inline void
-write_dma_entry(struct i915_page_dma * const pdma,
+write_dma_entry(struct drm_i915_gem_object * const pdma,
const unsigned short idx,
const u64 encoded_entry)
{
- u64 * const vaddr = kmap_atomic(pdma->page);
+ u64 * const vaddr = kmap_atomic(__px_page(pdma));
vaddr[idx] = encoded_entry;
+ clflush_cache_range(&vaddr[idx], sizeof(u64));
kunmap_atomic(vaddr);
}
void
__set_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
- struct i915_page_dma * const to,
+ struct i915_page_table * const to,
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{
/* Each thread pre-pins the pd, and we may have a thread per pde. */
- GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
+ GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
atomic_inc(px_used(pd));
pd->entry[idx] = to;
- write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
+ write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
}
void
clear_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
- const struct i915_page_scratch * const scratch)
+ const struct drm_i915_gem_object * const scratch)
{
GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
@@ -102,7 +122,7 @@ bool
release_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
struct i915_page_table * const pt,
- const struct i915_page_scratch * const scratch)
+ const struct drm_i915_gem_object * const scratch)
{
bool free = false;
@@ -155,19 +175,16 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
return ppgtt;
}
-int ppgtt_bind_vma(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+void ppgtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
u32 pte_flags;
- int err;
if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
- err = vm->allocate_va_range(vm, vma->node.start, vma->size);
- if (err)
- return err;
-
+ vm->allocate_va_range(vm, stash, vma->node.start, vma->size);
set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
}
@@ -178,8 +195,6 @@ int ppgtt_bind_vma(struct i915_address_space *vm,
vm->insert_entries(vm, vma, cache_level, pte_flags);
wmb();
-
- return 0;
}
void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
@@ -188,12 +203,93 @@ void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
vm->clear_range(vm, vma->node.start, vma->size);
}
+static unsigned long pd_count(u64 size, int shift)
+{
+ /* Beware later misalignment */
+ return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
+}
+
+int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ u64 size)
+{
+ unsigned long count;
+ int shift, n;
+
+ shift = vm->pd_shift;
+ if (!shift)
+ return 0;
+
+ count = pd_count(size, shift);
+ while (count--) {
+ struct i915_page_table *pt;
+
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ i915_vm_free_pt_stash(vm, stash);
+ return PTR_ERR(pt);
+ }
+
+ pt->stash = stash->pt[0];
+ stash->pt[0] = pt;
+ }
+
+ for (n = 1; n < vm->top; n++) {
+ shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
+ count = pd_count(size, shift);
+ while (count--) {
+ struct i915_page_directory *pd;
+
+ pd = alloc_pd(vm);
+ if (IS_ERR(pd)) {
+ i915_vm_free_pt_stash(vm, stash);
+ return PTR_ERR(pd);
+ }
+
+ pd->pt.stash = stash->pt[1];
+ stash->pt[1] = &pd->pt;
+ }
+ }
+
+ return 0;
+}
+
+int i915_vm_pin_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash)
+{
+ struct i915_page_table *pt;
+ int n, err;
+
+ for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
+ for (pt = stash->pt[n]; pt; pt = pt->stash) {
+ err = pin_pt_dma(vm, pt->base);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void i915_vm_free_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash)
+{
+ struct i915_page_table *pt;
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
+ while ((pt = stash->pt[n])) {
+ stash->pt[n] = pt->stash;
+ free_px(vm, pt, n);
+ }
+ }
+}
+
int ppgtt_set_pages(struct i915_vma *vma)
{
GEM_BUG_ON(vma->pages);
vma->pages = vma->obj->mm.pages;
-
vma->page_sizes = vma->obj->mm.page_sizes;
return 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 1bfad589c63b..ea2a77c7b469 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
+#include "gt/intel_context.h"
#include "intel_ring.h"
static const struct intel_renderstate_rodata *
@@ -157,33 +158,47 @@ out:
#undef OUT_BATCH
int intel_renderstate_init(struct intel_renderstate *so,
- struct intel_engine_cs *engine)
+ struct intel_context *ce)
{
- struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine = ce->engine;
+ struct drm_i915_gem_object *obj = NULL;
int err;
memset(so, 0, sizeof(*so));
so->rodata = render_state_get_rodata(engine);
- if (!so->rodata)
- return 0;
+ if (so->rodata) {
+ if (so->rodata->batch_items * 4 > PAGE_SIZE)
+ return -EINVAL;
+
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(so->vma)) {
+ err = PTR_ERR(so->vma);
+ goto err_obj;
+ }
+ }
- if (so->rodata->batch_items * 4 > PAGE_SIZE)
- return -EINVAL;
+ i915_gem_ww_ctx_init(&so->ww, true);
+retry:
+ err = intel_context_pin_ww(ce, &so->ww);
+ if (err)
+ goto err_fini;
- obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ /* return early if there's nothing to setup */
+ if (!err && !so->rodata)
+ return 0;
- so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
- if (IS_ERR(so->vma)) {
- err = PTR_ERR(so->vma);
- goto err_obj;
- }
+ err = i915_gem_object_lock(so->vma->obj, &so->ww);
+ if (err)
+ goto err_context;
err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
- goto err_obj;
+ goto err_context;
err = render_state_setup(so, engine->i915);
if (err)
@@ -193,8 +208,18 @@ int intel_renderstate_init(struct intel_renderstate *so,
err_unpin:
i915_vma_unpin(so->vma);
+err_context:
+ intel_context_unpin(ce);
+err_fini:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&so->ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&so->ww);
err_obj:
- i915_gem_object_put(obj);
+ if (obj)
+ i915_gem_object_put(obj);
so->vma = NULL;
return err;
}
@@ -208,11 +233,9 @@ int intel_renderstate_emit(struct intel_renderstate *so,
if (!so->vma)
return 0;
- i915_vma_lock(so->vma);
err = i915_request_await_object(rq, so->vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(so->vma, rq, 0);
- i915_vma_unlock(so->vma);
if (err)
return err;
@@ -233,7 +256,17 @@ int intel_renderstate_emit(struct intel_renderstate *so,
return 0;
}
-void intel_renderstate_fini(struct intel_renderstate *so)
+void intel_renderstate_fini(struct intel_renderstate *so,
+ struct intel_context *ce)
{
- i915_vma_unpin_and_release(&so->vma, 0);
+ if (so->vma) {
+ i915_vma_unpin(so->vma);
+ i915_vma_close(so->vma);
+ }
+
+ intel_context_unpin(ce);
+ i915_gem_ww_ctx_fini(&so->ww);
+
+ if (so->vma)
+ i915_gem_object_put(so->vma->obj);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h
index 5700be69a05a..713aa1e86c80 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h
@@ -25,9 +25,10 @@
#define _INTEL_RENDERSTATE_H_
#include <linux/types.h>
+#include "i915_gem.h"
struct i915_request;
-struct intel_engine_cs;
+struct intel_context;
struct i915_vma;
struct intel_renderstate_rodata {
@@ -49,6 +50,7 @@ extern const struct intel_renderstate_rodata gen8_null_state;
extern const struct intel_renderstate_rodata gen9_null_state;
struct intel_renderstate {
+ struct i915_gem_ww_ctx ww;
const struct intel_renderstate_rodata *rodata;
struct i915_vma *vma;
u32 batch_offset;
@@ -58,9 +60,10 @@ struct intel_renderstate {
};
int intel_renderstate_init(struct intel_renderstate *so,
- struct intel_engine_cs *engine);
+ struct intel_context *ce);
int intel_renderstate_emit(struct intel_renderstate *so,
struct i915_request *rq);
-void intel_renderstate_fini(struct intel_renderstate *so);
+void intel_renderstate_fini(struct intel_renderstate *so,
+ struct intel_context *ce);
#endif /* _INTEL_RENDERSTATE_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 46a5ceffc22f..ac36b67fb46b 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -15,6 +15,7 @@
#include "i915_drv.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
+#include "intel_breadcrumbs.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index bdb324167ef3..4034a4bac7f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -21,7 +21,13 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
return space;
}
-int intel_ring_pin(struct intel_ring *ring)
+void __intel_ring_pin(struct intel_ring *ring)
+{
+ GEM_BUG_ON(!atomic_read(&ring->pin_count));
+ atomic_inc(&ring->pin_count);
+}
+
+int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
{
struct i915_vma *vma = ring->vma;
unsigned int flags;
@@ -39,7 +45,7 @@ int intel_ring_pin(struct intel_ring *ring)
else
flags |= PIN_HIGH;
- ret = i915_ggtt_pin(vma, 0, flags);
+ ret = i915_ggtt_pin(vma, ww, 0, flags);
if (unlikely(ret))
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index cc0ebca65167..1700579bdc93 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -21,7 +21,8 @@ int intel_ring_cacheline_align(struct i915_request *rq);
unsigned int intel_ring_update_space(struct intel_ring *ring);
-int intel_ring_pin(struct intel_ring *ring);
+void __intel_ring_pin(struct intel_ring *ring);
+int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww);
void intel_ring_unpin(struct intel_ring *ring);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 898593ca4889..16b48e72c369 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -32,6 +32,7 @@
#include "gen6_ppgtt.h"
#include "gen7_renderclear.h"
#include "i915_drv.h"
+#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_gt.h"
#include "intel_reset.h"
@@ -201,16 +202,18 @@ static struct i915_address_space *vm_alias(struct i915_address_space *vm)
return vm;
}
+static u32 pp_dir(struct i915_address_space *vm)
+{
+ return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir;
+}
+
static void set_pp_dir(struct intel_engine_cs *engine)
{
struct i915_address_space *vm = vm_alias(engine->gt->vm);
if (vm) {
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-
ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
- ENGINE_WRITE(engine, RING_PP_DIR_BASE,
- px_base(ppgtt->pd)->ggtt_offset << 10);
+ ENGINE_WRITE(engine, RING_PP_DIR_BASE, pp_dir(vm));
}
}
@@ -255,7 +258,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
else
ring_setup_status_page(engine);
- intel_engine_reset_breadcrumbs(engine);
+ intel_breadcrumbs_reset(engine->breadcrumbs);
/* Enforce ordering by reading HEAD register back */
ENGINE_POSTING_READ(engine, RING_HEAD);
@@ -474,14 +477,16 @@ static void ring_context_destroy(struct kref *ref)
intel_context_free(ce);
}
-static int __context_pin_ppgtt(struct intel_context *ce)
+static int ring_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww,
+ void **unused)
{
struct i915_address_space *vm;
int err = 0;
vm = vm_alias(ce->vm);
if (vm)
- err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
+ err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww);
return err;
}
@@ -497,6 +502,10 @@ static void __context_unpin_ppgtt(struct intel_context *ce)
static void ring_context_unpin(struct intel_context *ce)
{
+}
+
+static void ring_context_post_unpin(struct intel_context *ce)
+{
__context_unpin_ppgtt(ce);
}
@@ -584,9 +593,9 @@ static int ring_context_alloc(struct intel_context *ce)
return 0;
}
-static int ring_context_pin(struct intel_context *ce)
+static int ring_context_pin(struct intel_context *ce, void *unused)
{
- return __context_pin_ppgtt(ce);
+ return 0;
}
static void ring_context_reset(struct intel_context *ce)
@@ -597,8 +606,10 @@ static void ring_context_reset(struct intel_context *ce)
static const struct intel_context_ops ring_context_ops = {
.alloc = ring_context_alloc,
+ .pre_pin = ring_context_pre_pin,
.pin = ring_context_pin,
.unpin = ring_context_unpin,
+ .post_unpin = ring_context_post_unpin,
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
@@ -608,7 +619,7 @@ static const struct intel_context_ops ring_context_ops = {
};
static int load_pd_dir(struct i915_request *rq,
- const struct i915_ppgtt *ppgtt,
+ struct i915_address_space *vm,
u32 valid)
{
const struct intel_engine_cs * const engine = rq->engine;
@@ -624,7 +635,7 @@ static int load_pd_dir(struct i915_request *rq,
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
- *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
+ *cs++ = pp_dir(vm);
/* Stall until the page table load is complete? */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
@@ -826,7 +837,7 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
* post-sync op, this extra pass appears vital before a
* mm switch!
*/
- ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G);
+ ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G);
if (ret)
return ret;
@@ -1250,14 +1261,15 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
return -ENODEV;
}
- timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
+ timeline = intel_timeline_create_from_engine(engine,
+ I915_GEM_HWS_SEQNO_ADDR);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
}
GEM_BUG_ON(timeline->has_initial_breadcrumb);
- err = intel_timeline_pin(timeline);
+ err = intel_timeline_pin(timeline, NULL);
if (err)
goto err_timeline;
@@ -1267,7 +1279,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
goto err_timeline_unpin;
}
- err = intel_ring_pin(ring);
+ err = intel_ring_pin(ring, NULL);
if (err)
goto err_ring;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 97ba14ad52e4..e6a00eea0631 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -7,6 +7,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_irq.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 46d20f5f3ddc..7ea94d201fe6 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -188,10 +188,14 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
return cl;
}
-static void cacheline_acquire(struct intel_timeline_cacheline *cl)
+static void cacheline_acquire(struct intel_timeline_cacheline *cl,
+ u32 ggtt_offset)
{
- if (cl)
- i915_active_acquire(&cl->active);
+ if (!cl)
+ return;
+
+ cl->ggtt_offset = ggtt_offset;
+ i915_active_acquire(&cl->active);
}
static void cacheline_release(struct intel_timeline_cacheline *cl)
@@ -215,7 +219,8 @@ static void cacheline_free(struct intel_timeline_cacheline *cl)
static int intel_timeline_init(struct intel_timeline *timeline,
struct intel_gt *gt,
- struct i915_vma *hwsp)
+ struct i915_vma *hwsp,
+ unsigned int offset)
{
void *vaddr;
@@ -246,8 +251,7 @@ static int intel_timeline_init(struct intel_timeline *timeline,
vaddr = page_mask_bits(cl->vaddr);
} else {
- timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
-
+ timeline->hwsp_offset = offset;
vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -297,7 +301,9 @@ static void intel_timeline_fini(struct intel_timeline *timeline)
}
struct intel_timeline *
-intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
+__intel_timeline_create(struct intel_gt *gt,
+ struct i915_vma *global_hwsp,
+ unsigned int offset)
{
struct intel_timeline *timeline;
int err;
@@ -306,7 +312,7 @@ intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
if (!timeline)
return ERR_PTR(-ENOMEM);
- err = intel_timeline_init(timeline, gt, global_hwsp);
+ err = intel_timeline_init(timeline, gt, global_hwsp, offset);
if (err) {
kfree(timeline);
return ERR_PTR(err);
@@ -315,14 +321,20 @@ intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
return timeline;
}
-int intel_timeline_pin(struct intel_timeline *tl)
+void __intel_timeline_pin(struct intel_timeline *tl)
+{
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ atomic_inc(&tl->pin_count);
+}
+
+int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
{
int err;
if (atomic_add_unless(&tl->pin_count, 1, 0))
return 0;
- err = i915_ggtt_pin(tl->hwsp_ggtt, 0, PIN_HIGH);
+ err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH);
if (err)
return err;
@@ -332,7 +344,7 @@ int intel_timeline_pin(struct intel_timeline *tl)
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
tl->fence_context, tl->hwsp_offset);
- cacheline_acquire(tl->hwsp_cacheline);
+ cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset);
if (atomic_fetch_inc(&tl->pin_count)) {
cacheline_release(tl->hwsp_cacheline);
__i915_vma_unpin(tl->hwsp_ggtt);
@@ -465,7 +477,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
goto err_rollback;
}
- err = i915_ggtt_pin(vma, 0, PIN_HIGH);
+ err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
if (err) {
__idle_hwsp_free(vma->private, cacheline);
goto err_rollback;
@@ -484,7 +496,9 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
* free it after the current request is retired, which ensures that
* all writes into the cacheline from previous requests are complete.
*/
- err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence);
+ err = i915_active_ref(&tl->hwsp_cacheline->active,
+ tl->fence_context,
+ &rq->fence);
if (err)
goto err_cacheline;
@@ -505,7 +519,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
tl->fence_context, tl->hwsp_offset);
- cacheline_acquire(cl);
+ cacheline_acquire(cl, tl->hwsp_offset);
tl->hwsp_cacheline = cl;
*seqno = timeline_advance(tl);
@@ -563,9 +577,7 @@ int intel_timeline_read_hwsp(struct i915_request *from,
if (err)
goto out;
- *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
- ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
-
+ *hwsp = cl->ggtt_offset;
out:
i915_active_release(&cl->active);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index 4298b9ac7327..9882cd911d8e 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -29,10 +29,27 @@
#include "i915_active.h"
#include "i915_syncmap.h"
-#include "gt/intel_timeline_types.h"
+#include "intel_timeline_types.h"
struct intel_timeline *
-intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp);
+__intel_timeline_create(struct intel_gt *gt,
+ struct i915_vma *global_hwsp,
+ unsigned int offset);
+
+static inline struct intel_timeline *
+intel_timeline_create(struct intel_gt *gt)
+{
+ return __intel_timeline_create(gt, NULL, 0);
+}
+
+static inline struct intel_timeline *
+intel_timeline_create_from_engine(struct intel_engine_cs *engine,
+ unsigned int offset)
+{
+ return __intel_timeline_create(engine->gt,
+ engine->status_page.vma,
+ offset);
+}
static inline struct intel_timeline *
intel_timeline_get(struct intel_timeline *timeline)
@@ -71,7 +88,8 @@ static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
}
-int intel_timeline_pin(struct intel_timeline *tl);
+void __intel_timeline_pin(struct intel_timeline *tl);
+int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww);
void intel_timeline_enter(struct intel_timeline *tl);
int intel_timeline_get_seqno(struct intel_timeline *tl,
struct i915_request *rq,
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 02181c5020db..4474f487f589 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -94,6 +94,8 @@ struct intel_timeline_cacheline {
struct intel_timeline_hwsp *hwsp;
void *vaddr;
+ u32 ggtt_offset;
+
struct rcu_head rcu;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 5726cd0a37e0..6c580d0d9ea8 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -52,6 +52,37 @@
* - Public functions to init or apply the given workaround type.
*/
+/*
+ * KBL revision ID ordering is bizarre; higher revision ID's map to lower
+ * steppings in some cases. So rather than test against the revision ID
+ * directly, let's map that into our own range of increasing ID's that we
+ * can test against in a regular manner.
+ */
+
+const struct i915_rev_steppings kbl_revids[] = {
+ [0] = { .gt_stepping = KBL_REVID_A0, .disp_stepping = KBL_REVID_A0 },
+ [1] = { .gt_stepping = KBL_REVID_B0, .disp_stepping = KBL_REVID_B0 },
+ [2] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B0 },
+ [3] = { .gt_stepping = KBL_REVID_D0, .disp_stepping = KBL_REVID_B0 },
+ [4] = { .gt_stepping = KBL_REVID_F0, .disp_stepping = KBL_REVID_C0 },
+ [5] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B1 },
+ [6] = { .gt_stepping = KBL_REVID_D1, .disp_stepping = KBL_REVID_B1 },
+ [7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 },
+};
+
+const struct i915_rev_steppings tgl_uy_revids[] = {
+ [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_A0 },
+ [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_C0 },
+ [2] = { .gt_stepping = TGL_REVID_B1, .disp_stepping = TGL_REVID_C0 },
+ [3] = { .gt_stepping = TGL_REVID_C0, .disp_stepping = TGL_REVID_D0 },
+};
+
+/* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
+const struct i915_rev_steppings tgl_revids[] = {
+ [0] = { .gt_stepping = TGL_REVID_A0, .disp_stepping = TGL_REVID_B0 },
+ [1] = { .gt_stepping = TGL_REVID_B0, .disp_stepping = TGL_REVID_D0 },
+};
+
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
{
wal->name = name;
@@ -470,7 +501,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
+ if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
@@ -596,8 +627,8 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
}
-static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
- struct i915_wa_list *wal)
+static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
{
/*
* Wa_1409142259:tgl
@@ -607,12 +638,28 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
* Wa_1409207793:tgl
* Wa_1409178076:tgl
* Wa_1408979724:tgl
+ * Wa_14010443199:rkl
+ * Wa_14010698770:rkl
*/
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+ /* WaDisableGPGPUMidThreadPreemption:gen12 */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
+}
+
+static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ gen12_ctx_workarounds_init(engine, wal);
+
/*
- * Wa_1604555607:gen12 and Wa_1608008084:gen12
+ * Wa_1604555607:tgl,rkl
+ *
+ * Note that the implementation of this workaround is further modified
+ * according to the FF_MODE2 guidance given by Wa_1608008084:gen12.
* FF_MODE2 register will return the wrong value when read. The default
* value for this register is zero for all fields and there are no bit
* masks. So instead of doing a RMW we should just write the GS Timer
@@ -623,11 +670,6 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
FF_MODE2_GS_TIMER_MASK | FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_GS_TIMER_224 | FF_MODE2_TDS_TIMER_128,
0);
-
- /* WaDisableGPGPUMidThreadPreemption:tgl */
- WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
- GEN9_PREEMPT_GPGPU_LEVEL_MASK,
- GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
static void
@@ -642,8 +684,10 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
wa_init_start(wal, name, engine->name);
- if (IS_GEN(i915, 12))
+ if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915))
tgl_ctx_workarounds_init(engine, wal);
+ else if (IS_GEN(i915, 12))
+ gen12_ctx_workarounds_init(engine, wal);
else if (IS_GEN(i915, 11))
icl_ctx_workarounds_init(engine, wal);
else if (IS_CANNONLAKE(i915))
@@ -995,7 +1039,7 @@ kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
gen9_gt_workarounds_init(i915, wal);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+ if (IS_KBL_GT_REVID(i915, 0, KBL_REVID_B0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
@@ -1176,18 +1220,25 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+gen12_gt_workarounds_init(struct drm_i915_private *i915,
+ struct i915_wa_list *wal)
{
wa_init_mcr(i915, wal);
+}
+
+static void
+tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ gen12_gt_workarounds_init(i915, wal);
/* Wa_1409420604:tgl */
- if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
wa_write_or(wal,
SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
/* Wa_1607087056:tgl also know as BUG:1409180338 */
- if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
@@ -1196,8 +1247,10 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- if (IS_GEN(i915, 12))
+ if (IS_TIGERLAKE(i915))
tgl_gt_workarounds_init(i915, wal);
+ else if (IS_GEN(i915, 12))
+ gen12_gt_workarounds_init(i915, wal);
else if (IS_GEN(i915, 11))
icl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
@@ -1620,7 +1673,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
/*
* Wa_1607138336:tgl
* Wa_1607063988:tgl
@@ -1630,18 +1683,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
/*
- * Wa_1607030317:tgl
- * Wa_1607186500:tgl
- * Wa_1607297627:tgl there is 3 entries for this WA on BSpec, 2
- * of then says it is fixed on B0 the other one says it is
- * permanent
- */
- wa_masked_en(wal,
- GEN6_RC_SLEEP_PSMI_CONTROL,
- GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
- GEN8_RC_SEMA_IDLE_MSG_DISABLE);
-
- /*
* Wa_1606679103:tgl
* (see also Wa_1606682166:icl)
*/
@@ -1654,22 +1695,17 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
VSUNIT_CLKGATE_DIS_TGL);
}
- if (IS_TIGERLAKE(i915)) {
- /* Wa_1606931601:tgl */
+ if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
+ /* Wa_1606931601:tgl,rkl */
wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
- /* Wa_1409804808:tgl */
+ /* Wa_1409804808:tgl,rkl */
wa_masked_en(wal, GEN7_ROW_CHICKEN2,
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
- /* Wa_1606700617:tgl */
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
-
/*
* Wa_1409085225:tgl
- * Wa_14010229206:tgl
+ * Wa_14010229206:tgl,rkl
*/
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
@@ -1677,9 +1713,37 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_1407928979:tgl A*
* Wa_18011464164:tgl B0+
* Wa_22010931296:tgl B0+
+ * Wa_14010919138:rkl,tgl
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+ /*
+ * Wa_1607030317:tgl
+ * Wa_1607186500:tgl
+ * Wa_1607297627:tgl,rkl there are multiple entries for this
+ * WA in the BSpec; some indicate this is an A0-only WA,
+ * others indicate it applies to all steppings.
+ */
+ wa_masked_en(wal,
+ GEN6_RC_SLEEP_PSMI_CONTROL,
+ GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
+ GEN8_RC_SEMA_IDLE_MSG_DISABLE);
+
+ /*
+ * Wa_1606700617:tgl
+ * Wa_22010271021:tgl,rkl
+ */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
+ }
+
+ if (IS_GEN(i915, 12)) {
+ /* Wa_1406941453:gen12 */
+ wa_masked_en(wal,
+ GEN10_SAMPLER_MODE,
+ ENABLE_SMALLPL);
}
if (IS_GEN(i915, 11)) {
@@ -1898,7 +1962,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+ if (IS_KBL_GT_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
@@ -2045,6 +2109,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
const struct i915_wa *wa;
struct i915_request *rq;
struct i915_vma *vma;
+ struct i915_gem_ww_ctx ww;
unsigned int i;
u32 *results;
int err;
@@ -2057,29 +2122,34 @@ static int engine_wa_list_verify(struct intel_context *ce,
return PTR_ERR(vma);
intel_engine_pm_get(ce->engine);
- rq = intel_context_create_request(ce);
- intel_engine_pm_put(ce->engine);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(vma->obj, &ww);
+ if (err == 0)
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto err_pm;
+
+ rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_vma;
+ goto err_unpin;
}
- i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
- if (err) {
- i915_request_add(rq);
- goto err_vma;
- }
-
- err = wa_list_srm(rq, wal, vma);
- if (err)
- goto err_vma;
+ if (err == 0)
+ err = wa_list_srm(rq, wal, vma);
i915_request_get(rq);
+ if (err)
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
+
+ if (err)
+ goto err_rq;
+
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
goto err_rq;
@@ -2104,7 +2174,16 @@ static int engine_wa_list_verify(struct intel_context *ce,
err_rq:
i915_request_put(rq);
-err_vma:
+err_unpin:
+ intel_context_unpin(ce);
+err_pm:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ intel_engine_pm_put(ce->engine);
i915_vma_unpin(vma);
i915_vma_put(vma);
return err;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index b8dd3cbc8696..dfd1cfb8a7ec 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -131,6 +131,10 @@ static void mock_context_unpin(struct intel_context *ce)
{
}
+static void mock_context_post_unpin(struct intel_context *ce)
+{
+}
+
static void mock_context_destroy(struct kref *ref)
{
struct intel_context *ce = container_of(ref, typeof(*ce), ref);
@@ -152,8 +156,7 @@ static int mock_context_alloc(struct intel_context *ce)
if (!ce->ring)
return -ENOMEM;
- GEM_BUG_ON(ce->timeline);
- ce->timeline = intel_timeline_create(ce->engine->gt, NULL);
+ ce->timeline = intel_timeline_create(ce->engine->gt);
if (IS_ERR(ce->timeline)) {
kfree(ce->engine);
return PTR_ERR(ce->timeline);
@@ -164,7 +167,13 @@ static int mock_context_alloc(struct intel_context *ce)
return 0;
}
-static int mock_context_pin(struct intel_context *ce)
+static int mock_context_pre_pin(struct intel_context *ce,
+ struct i915_gem_ww_ctx *ww, void **unused)
+{
+ return 0;
+}
+
+static int mock_context_pin(struct intel_context *ce, void *unused)
{
return 0;
}
@@ -176,8 +185,10 @@ static void mock_context_reset(struct intel_context *ce)
static const struct intel_context_ops mock_context_ops = {
.alloc = mock_context_alloc,
+ .pre_pin = mock_context_pre_pin,
.pin = mock_context_pin,
.unpin = mock_context_unpin,
+ .post_unpin = mock_context_post_unpin,
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
@@ -261,11 +272,12 @@ static void mock_engine_release(struct intel_engine_cs *engine)
GEM_BUG_ON(timer_pending(&mock->hw_delay));
+ intel_breadcrumbs_free(engine->breadcrumbs);
+
intel_context_unpin(engine->kernel_context);
intel_context_put(engine->kernel_context);
intel_engine_fini_retire(engine);
- intel_engine_fini_breadcrumbs(engine);
}
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
@@ -323,20 +335,26 @@ int mock_engine_init(struct intel_engine_cs *engine)
struct intel_context *ce;
intel_engine_init_active(engine, ENGINE_MOCK);
- intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
+ engine->breadcrumbs = intel_breadcrumbs_create(NULL);
+ if (!engine->breadcrumbs)
+ return -ENOMEM;
+
ce = create_kernel_context(engine);
if (IS_ERR(ce))
goto err_breadcrumbs;
+ /* We insist the kernel context is using the status_page */
+ engine->status_page.vma = ce->timeline->hwsp_ggtt;
+
engine->kernel_context = ce;
return 0;
err_breadcrumbs:
- intel_engine_fini_breadcrumbs(engine);
+ intel_breadcrumbs_free(engine->breadcrumbs);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 52af1cee9a94..1f4020e906a8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -68,6 +68,8 @@ static int context_sync(struct intel_context *ce)
} while (!err);
mutex_unlock(&tl->mutex);
+ /* Wait for all barriers to complete (remote CPU) before we check */
+ i915_active_unlock_wait(&ce->active);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 73243ba59c7d..e73854dd2fe0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -47,7 +47,10 @@ static int pulse_active(struct i915_active *active)
static void pulse_free(struct kref *kref)
{
- kfree(container_of(kref, struct pulse, kref));
+ struct pulse *p = container_of(kref, typeof(*p), kref);
+
+ i915_active_fini(&p->active);
+ kfree(p);
}
static void pulse_put(struct pulse *p)
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 3fc5de961280..95d41c01d0e0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -2729,7 +2729,7 @@ static int create_gang(struct intel_engine_cs *engine,
i915_gem_object_put(obj);
intel_context_put(ce);
- rq->client_link.next = &(*prev)->client_link;
+ rq->mock.link.next = &(*prev)->mock.link;
*prev = rq;
return 0;
@@ -2970,8 +2970,7 @@ static int live_preempt_gang(void *arg)
}
while (rq) { /* wait for each rq from highest to lowest prio */
- struct i915_request *n =
- list_next_entry(rq, client_link);
+ struct i915_request *n = list_next_entry(rq, mock.link);
if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
@@ -3090,7 +3089,7 @@ static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
return vma;
}
- err = i915_ggtt_pin(vma, 0, 0);
+ err = i915_ggtt_pin(vma, NULL, 0, 0);
if (err) {
i915_vma_put(vma);
return ERR_PTR(err);
@@ -4997,6 +4996,7 @@ static int __live_lrc_state(struct intel_engine_cs *engine,
{
struct intel_context *ce;
struct i915_request *rq;
+ struct i915_gem_ww_ctx ww;
enum {
RING_START_IDX = 0,
RING_TAIL_IDX,
@@ -5011,7 +5011,11 @@ static int __live_lrc_state(struct intel_engine_cs *engine,
if (IS_ERR(ce))
return PTR_ERR(ce);
- err = intel_context_pin(ce);
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(scratch->obj, &ww);
+ if (!err)
+ err = intel_context_pin_ww(ce, &ww);
if (err)
goto err_put;
@@ -5040,11 +5044,9 @@ static int __live_lrc_state(struct intel_engine_cs *engine,
*cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
*cs++ = 0;
- i915_vma_lock(scratch);
err = i915_request_await_object(rq, scratch->obj, true);
if (!err)
err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(scratch);
i915_request_get(rq);
i915_request_add(rq);
@@ -5081,6 +5083,12 @@ err_rq:
err_unpin:
intel_context_unpin(ce);
err_put:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
intel_context_put(ce);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 35406ecdf0b2..ef5aeebbeeb0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -3,9 +3,203 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/crc32.h>
+
+#include "gem/i915_gem_stolen.h"
+
+#include "i915_memcpy.h"
#include "i915_selftest.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_atomic.h"
+#include "selftests/igt_spinner.h"
+
+static int
+__igt_reset_stolen(struct intel_gt *gt,
+ intel_engine_mask_t mask,
+ const char *msg)
+{
+ struct i915_ggtt *ggtt = &gt->i915->ggtt;
+ const struct resource *dsm = &gt->i915->dsm;
+ resource_size_t num_pages, page;
+ struct intel_engine_cs *engine;
+ intel_wakeref_t wakeref;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ long max, count;
+ void *tmp;
+ u32 *crc;
+ int err;
+
+ if (!drm_mm_node_allocated(&ggtt->error_capture))
+ return 0;
+
+ num_pages = resource_size(dsm) >> PAGE_SHIFT;
+ if (!num_pages)
+ return 0;
+
+ crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL);
+ if (!crc)
+ return -ENOMEM;
+
+ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto err_crc;
+ }
+
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ err = igt_spinner_init(&spin, gt);
+ if (err)
+ goto err_lock;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ if (!(mask & engine->mask))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err_spin;
+ }
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_spin;
+ }
+ i915_request_add(rq);
+ }
+
+ for (page = 0; page < num_pages; page++) {
+ dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
+ void __iomem *s;
+ void *in;
+
+ ggtt->vm.insert_page(&ggtt->vm, dma,
+ ggtt->error_capture.start,
+ I915_CACHE_NONE, 0);
+ mb();
+
+ s = io_mapping_map_wc(&ggtt->iomap,
+ ggtt->error_capture.start,
+ PAGE_SIZE);
+
+ if (!__drm_mm_interval_first(&gt->i915->mm.stolen,
+ page << PAGE_SHIFT,
+ ((page + 1) << PAGE_SHIFT) - 1))
+ memset32(s, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ in = s;
+ if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+ in = tmp;
+ crc[page] = crc32_le(0, in, PAGE_SIZE);
+
+ io_mapping_unmap(s);
+ }
+ mb();
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
+
+ if (mask == ALL_ENGINES) {
+ intel_gt_reset(gt, mask, NULL);
+ } else {
+ for_each_engine(engine, gt, id) {
+ if (mask & engine->mask)
+ intel_engine_reset(engine, NULL);
+ }
+ }
+
+ max = -1;
+ count = 0;
+ for (page = 0; page < num_pages; page++) {
+ dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
+ void __iomem *s;
+ void *in;
+ u32 x;
+
+ ggtt->vm.insert_page(&ggtt->vm, dma,
+ ggtt->error_capture.start,
+ I915_CACHE_NONE, 0);
+ mb();
+
+ s = io_mapping_map_wc(&ggtt->iomap,
+ ggtt->error_capture.start,
+ PAGE_SIZE);
+
+ in = s;
+ if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE))
+ in = tmp;
+ x = crc32_le(0, in, PAGE_SIZE);
+
+ if (x != crc[page] &&
+ !__drm_mm_interval_first(&gt->i915->mm.stolen,
+ page << PAGE_SHIFT,
+ ((page + 1) << PAGE_SHIFT) - 1)) {
+ pr_debug("unused stolen page %pa modified by GPU reset\n",
+ &page);
+ if (count++ == 0)
+ igt_hexdump(in, PAGE_SIZE);
+ max = page;
+ }
+
+ io_mapping_unmap(s);
+ }
+ mb();
+ ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
+
+ if (count > 0) {
+ pr_info("%s reset clobbered %ld pages of stolen, last clobber at page %ld\n",
+ msg, count, max);
+ }
+ if (max >= I915_GEM_STOLEN_BIAS >> PAGE_SHIFT) {
+ pr_err("%s reset clobbered unreserved area [above %x] of stolen; may cause severe faults\n",
+ msg, I915_GEM_STOLEN_BIAS);
+ err = -EINVAL;
+ }
+
+err_spin:
+ igt_spinner_fini(&spin);
+
+err_lock:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
+
+ kfree(tmp);
+err_crc:
+ kfree(crc);
+ return err;
+}
+
+static int igt_reset_device_stolen(void *arg)
+{
+ return __igt_reset_stolen(arg, ALL_ENGINES, "device");
+}
+
+static int igt_reset_engines_stolen(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = __igt_reset_stolen(gt, engine->mask, engine->name);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
static int igt_global_reset(void *arg)
{
@@ -164,6 +358,8 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
+ SUBTEST(igt_reset_device_stolen),
+ SUBTEST(igt_reset_engines_stolen),
SUBTEST(igt_wedged_reset),
SUBTEST(igt_atomic_reset),
SUBTEST(igt_atomic_engine_reset),
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 8624f5d2a1f3..3540ba9bd459 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -77,20 +77,20 @@ create_spin_counter(struct intel_engine_cs *engine,
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- return vma;
+ err = PTR_ERR(vma);
+ goto err_put;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err) {
- i915_vma_put(vma);
- return ERR_PTR(err);
- }
+ if (err)
+ goto err_unlock;
+
+ i915_vma_lock(vma);
base = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(base)) {
- i915_gem_object_put(obj);
- return ERR_CAST(base);
+ err = PTR_ERR(base);
+ goto err_unpin;
}
cs = base;
@@ -134,6 +134,14 @@ create_spin_counter(struct intel_engine_cs *engine,
*cancel = base + loop;
*counter = srm ? memset32(base + end, 0, 1) : NULL;
return vma;
+
+err_unpin:
+ i915_vma_unpin(vma);
+err_unlock:
+ i915_vma_unlock(vma);
+err_put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
}
static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms)
@@ -639,7 +647,6 @@ int live_rps_frequency_cs(void *arg)
goto err_vma;
}
- i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
@@ -647,7 +654,6 @@ int live_rps_frequency_cs(void *arg)
err = rq->engine->emit_bb_start(rq,
vma->node.start,
PAGE_SIZE, 0);
- i915_vma_unlock(vma);
i915_request_add(rq);
if (err)
goto err_vma;
@@ -700,7 +706,7 @@ int live_rps_frequency_cs(void *arg)
f = act; /* may skip ahead [pcu granularity] */
}
- err = -EINVAL;
+ err = -EINTR; /* ignore error, continue on with test */
}
err_vma:
@@ -708,6 +714,7 @@ err_vma:
i915_gem_object_flush_map(vma->obj);
i915_gem_object_unpin_map(vma->obj);
i915_vma_unpin(vma);
+ i915_vma_unlock(vma);
i915_vma_put(vma);
st_engine_heartbeat_enable(engine);
@@ -781,7 +788,6 @@ int live_rps_frequency_srm(void *arg)
goto err_vma;
}
- i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
@@ -789,7 +795,6 @@ int live_rps_frequency_srm(void *arg)
err = rq->engine->emit_bb_start(rq,
vma->node.start,
PAGE_SIZE, 0);
- i915_vma_unlock(vma);
i915_request_add(rq);
if (err)
goto err_vma;
@@ -841,7 +846,7 @@ int live_rps_frequency_srm(void *arg)
f = act; /* may skip ahead [pcu granularity] */
}
- err = -EINVAL;
+ err = -EINTR; /* ignore error, continue on with test */
}
err_vma:
@@ -849,6 +854,7 @@ err_vma:
i915_gem_object_flush_map(vma->obj);
i915_gem_object_unpin_map(vma->obj);
i915_vma_unpin(vma);
+ i915_vma_unlock(vma);
i915_vma_put(vma);
st_engine_heartbeat_enable(engine);
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index fb5b7d3498a6..19c2cb166e7c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -72,7 +72,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned long cacheline;
int err;
- tl = intel_timeline_create(state->gt, NULL);
+ tl = intel_timeline_create(state->gt);
if (IS_ERR(tl))
return PTR_ERR(tl);
@@ -158,7 +158,7 @@ out:
__mock_hwsp_record(&state, na, NULL);
kfree(state.history);
err_put:
- drm_dev_put(&i915->drm);
+ mock_destroy_device(i915);
return err;
}
@@ -455,7 +455,7 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
struct i915_request *rq;
int err;
- err = intel_timeline_pin(tl);
+ err = intel_timeline_pin(tl, NULL);
if (err) {
rq = ERR_PTR(err);
goto out;
@@ -487,11 +487,11 @@ checked_intel_timeline_create(struct intel_gt *gt)
{
struct intel_timeline *tl;
- tl = intel_timeline_create(gt, NULL);
+ tl = intel_timeline_create(gt);
if (IS_ERR(tl))
return tl;
- if (*tl->hwsp_seqno != tl->seqno) {
+ if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) {
pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
*tl->hwsp_seqno, tl->seqno);
intel_timeline_put(tl);
@@ -561,9 +561,9 @@ static int live_hwsp_engine(void *arg)
for (n = 0; n < count; n++) {
struct intel_timeline *tl = timelines[n];
- if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
- n, tl->hwsp_offset, *tl->hwsp_seqno);
+ if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
+ GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
+ n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
GEM_TRACE_DUMP();
err = -EINVAL;
}
@@ -633,9 +633,9 @@ out:
for (n = 0; n < count; n++) {
struct intel_timeline *tl = timelines[n];
- if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n",
- n, tl->hwsp_offset, *tl->hwsp_seqno);
+ if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
+ GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
+ n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
GEM_TRACE_DUMP();
err = -EINVAL;
}
@@ -660,14 +660,14 @@ static int live_hwsp_wrap(void *arg)
* foreign GPU references.
*/
- tl = intel_timeline_create(gt, NULL);
+ tl = intel_timeline_create(gt);
if (IS_ERR(tl))
return PTR_ERR(tl);
if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
goto out_free;
- err = intel_timeline_pin(tl);
+ err = intel_timeline_pin(tl, NULL);
if (err)
goto out_free;
@@ -733,7 +733,8 @@ static int live_hwsp_wrap(void *arg)
goto out;
}
- if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) {
+ if (READ_ONCE(*hwsp_seqno[0]) != seqno[0] ||
+ READ_ONCE(*hwsp_seqno[1]) != seqno[1]) {
pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
*hwsp_seqno[0], *hwsp_seqno[1],
seqno[0], seqno[1]);
@@ -966,9 +967,10 @@ static int live_hwsp_recycle(void *arg)
break;
}
- if (*tl->hwsp_seqno != count) {
- pr_err("Invalid seqno stored in timeline %lu @ tl->hwsp_offset, found 0x%x\n",
- count, *tl->hwsp_seqno);
+ if (READ_ONCE(*tl->hwsp_seqno) != count) {
+ GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x found 0x%x\n",
+ count, tl->fence_context,
+ tl->hwsp_offset, *tl->hwsp_seqno);
GEM_TRACE_DUMP();
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index febc9e6692ba..61a0532d0f3d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -214,7 +214,7 @@ static int check_whitelist(struct i915_gem_context *ctx,
return PTR_ERR(results);
err = 0;
- i915_gem_object_lock(results);
+ i915_gem_object_lock(results, NULL);
intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
i915_gem_object_unlock(results);
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 43c7acbdc79d..f011ea42487e 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -49,80 +49,40 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file;
}
-static size_t shmem_npte(struct file *file)
-{
- return file->f_mapping->host->i_size >> PAGE_SHIFT;
-}
-
-static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
-{
- unsigned long pfn;
-
- vunmap(ptr);
-
- for (pfn = 0; pfn < n_pte; pfn++) {
- struct page *page;
-
- page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
- GFP_KERNEL);
- if (!WARN_ON(IS_ERR(page))) {
- put_page(page);
- put_page(page);
- }
- }
-}
-
void *shmem_pin_map(struct file *file)
{
- const size_t n_pte = shmem_npte(file);
- pte_t *stack[32], **ptes, **mem;
- struct vm_struct *area;
- unsigned long pfn;
-
- mem = stack;
- if (n_pte > ARRAY_SIZE(stack)) {
- mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return NULL;
- }
+ struct page **pages;
+ size_t n_pages, i;
+ void *vaddr;
- area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
- if (!area) {
- if (mem != stack)
- kvfree(mem);
+ n_pages = file->f_mapping->host->i_size >> PAGE_SHIFT;
+ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
return NULL;
- }
- ptes = mem;
- for (pfn = 0; pfn < n_pte; pfn++) {
- struct page *page;
-
- page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
- GFP_KERNEL);
- if (IS_ERR(page))
+ for (i = 0; i < n_pages; i++) {
+ pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
+ GFP_KERNEL);
+ if (IS_ERR(pages[i]))
goto err_page;
-
- **ptes++ = mk_pte(page, PAGE_KERNEL);
}
- if (mem != stack)
- kvfree(mem);
-
+ vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL);
+ if (!vaddr)
+ goto err_page;
mapping_set_unevictable(file->f_mapping);
- return area->addr;
-
+ return vaddr;
err_page:
- if (mem != stack)
- kvfree(mem);
-
- __shmem_unpin_map(file, area->addr, pfn);
+ while (--i >= 0)
+ put_page(pages[i]);
+ kvfree(pages);
return NULL;
}
void shmem_unpin_map(struct file *file, void *ptr)
{
mapping_clear_unevictable(file->f_mapping);
- __shmem_unpin_map(file, ptr, shmem_npte(file));
+ vfree(ptr);
}
static int __shmem_rw(struct file *file, loff_t off,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 861657897c0f..942c7c187adb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -677,7 +677,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
goto err;
flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
- ret = i915_ggtt_pin(vma, 0, flags);
+ ret = i915_ggtt_pin(vma, NULL, 0, flags);
if (ret) {
vma = ERR_PTR(ret);
goto err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 59b27aba15c6..80e8b6c3bc8c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -51,8 +51,8 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* Note that RKL uses the same firmware as TGL.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
- fw_def(ROCKETLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
+ fw_def(ROCKETLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 5, 0)) \
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
fw_def(COMETLAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index f1940939260a..16b582cb97ed 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -936,7 +936,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
return -EFAULT;
}
- if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
+ if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
return -EBADRQC;
@@ -976,7 +976,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* inhibit context will restore with correct values
*/
if (IS_GEN(s->engine->i915, 9) &&
- intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
@@ -992,8 +992,6 @@ static int cmd_reg_handler(struct parser_exec_state *s,
}
}
- /* TODO: Update the global mask if this MMIO is a masked-MMIO */
- intel_gvt_mmio_set_cmd_accessed(gvt, offset);
return 0;
}
@@ -1923,6 +1921,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (ret)
goto err_unmap;
+ i915_gem_object_unlock(bb->obj);
INIT_LIST_HEAD(&bb->list);
list_add(&bb->list, &s->workload->shadow_bb);
@@ -2982,7 +2981,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
goto put_obj;
}
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
ret = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (ret) {
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index ff7f2515a6fe..9831361f181e 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -256,11 +256,11 @@ struct intel_gvt_mmio {
/* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4)
/* This reg has been accessed through GPU commands */
-#define F_CMD_ACCESSED (1 << 5)
-/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
-/* This reg is saved/restored in context */
-#define F_IN_CTX (1 << 7)
+/* This reg is in GVT's mmio save-restor list and in hardware
+ * logical context image
+ */
+#define F_SR_IN_CTX (1 << 7)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
@@ -597,39 +597,42 @@ static inline void intel_gvt_mmio_set_accessed(
}
/**
- * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
+ * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command
* @gvt: a GVT device
* @offset: register offset
*
+ * Returns:
+ * True if an MMIO is able to be accessed by GPU commands
*/
-static inline bool intel_gvt_mmio_is_cmd_access(
+static inline bool intel_gvt_mmio_is_cmd_accessible(
struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
}
/**
- * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
+ * intel_gvt_mmio_set_cmd_accessible -
+ * mark a MMIO could be accessible by command
* @gvt: a GVT device
* @offset: register offset
*
*/
-static inline bool intel_gvt_mmio_is_unalign(
+static inline void intel_gvt_mmio_set_cmd_accessible(
struct intel_gvt *gvt, unsigned int offset)
{
- return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS;
}
/**
- * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
+ * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
* @gvt: a GVT device
* @offset: register offset
*
*/
-static inline void intel_gvt_mmio_set_cmd_accessed(
+static inline bool intel_gvt_mmio_is_unalign(
struct intel_gvt *gvt, unsigned int offset)
{
- gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
}
/**
@@ -648,30 +651,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
}
/**
- * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
+ * intel_gvt_mmio_is_sr_in_ctx -
+ * check if an MMIO has F_SR_IN_CTX mask
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
- * True if a MMIO has a in-context mask, false if it isn't.
+ * True if an MMIO has an F_SR_IN_CTX mask, false if it isn't.
*
*/
-static inline bool intel_gvt_mmio_is_in_ctx(
+static inline bool intel_gvt_mmio_is_sr_in_ctx(
struct intel_gvt *gvt, unsigned int offset)
{
- return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX;
}
/**
- * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
+ * intel_gvt_mmio_set_sr_in_ctx -
+ * mask an MMIO in GVT's mmio save-restore list and also
+ * in hardware logical context image
* @gvt: a GVT device
* @offset: register offset
*
*/
-static inline void intel_gvt_mmio_set_in_ctx(
+static inline void intel_gvt_mmio_set_sr_in_ctx(
struct intel_gvt *gvt, unsigned int offset)
{
- gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX;
}
void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 05f3bc98d242..eb342a759943 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1489,7 +1489,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
const struct intel_engine_cs *engine =
intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
- if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
+ if (value != 0 &&
+ !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
offset, value);
return -EINVAL;
@@ -1650,6 +1651,34 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
return 0;
}
+/**
+ * FixMe:
+ * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
+ * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
+ * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
+ * these MI_BATCH_BUFFER.
+ * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
+ * PML4 PTE: PAT(0) PCD(1) PWT(1).
+ * The performance is still expected to be low, will need further improvement.
+ */
+static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u64 pat =
+ GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
+
+ return 0;
+}
+
static int guc_status_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data,
unsigned int bytes)
@@ -1892,7 +1921,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
- MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
+ MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
@@ -1900,7 +1929,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(SDEISR, D_ALL);
- MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
+
MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
gamw_echo_dev_rw_ia_write);
@@ -1927,11 +1957,11 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
- MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
- MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
+ MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
+ MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
+ MMIO_RING_DFH(RING_CTL, D_ALL, 0, NULL, NULL);
+ MMIO_RING_DFH(RING_ACTHD, D_ALL, 0, mmio_read_from_hw, NULL);
+ MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
#define RING_REG(base) _MMIO((base) + 0x29c)
@@ -2686,7 +2716,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
+ MMIO_RING_GM(RING_BBADDR, D_ALL, NULL, NULL);
MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2771,7 +2801,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
- MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
+ MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, 0,
mmio_read_from_hw, NULL);
#define RING_REG(base) _MMIO((base) + 0xd0)
@@ -2785,7 +2815,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
#undef RING_REG
#define RING_REG(base) _MMIO((base) + 0x234)
- MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
+ MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS,
NULL, NULL);
#undef RING_REG
@@ -2811,7 +2841,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
- MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
+ MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
@@ -2820,7 +2850,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
- MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
+ MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2921,7 +2951,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL);
+ MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
MMIO_D(DC_STATE_EN, D_SKL_PLUS);
MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
@@ -3137,8 +3167,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
- MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
+ MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
return 0;
}
@@ -3312,9 +3342,21 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
MMIO_D(GEN6_GFXPAUSE, D_BXT);
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
+ MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
+ 0, 0, D_BXT, NULL, NULL);
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
+
return 0;
}
@@ -3357,7 +3399,10 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
gvt->mmio.mmio_attribute = NULL;
}
-/* Special MMIO blocks. */
+/* Special MMIO blocks. registers in MMIO block ranges should not be command
+ * accessible (should have no F_CMD_ACCESS flag).
+ * otherwise, need to update cmd_reg_handler in cmd_parser.c
+ */
static struct gvt_mmio_block mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 291993615af9..b6811f6a230d 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -251,6 +251,9 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
+ /* uc reset hw expect GS_MIA_IN_RESET */
+ vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
+
if (IS_BROXTON(vgpu->gvt->gt->i915)) {
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
~(BIT(0) | BIT(1));
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 86a60bdf0818..afe574d6b3b5 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -595,7 +595,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->in_context) {
gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
- intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+ intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg);
}
}
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 3c3b9842bbbd..aed2ef6466a2 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -403,6 +403,14 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->indirect_ctx.shadow_va = NULL;
}
+static void set_dma_address(struct i915_page_directory *pd, dma_addr_t addr)
+{
+ struct scatterlist *sg = pd->pt.base->mm.pages->sgl;
+
+ /* This is not a good idea */
+ sg->dma_address = addr;
+}
+
static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct intel_context *ce)
{
@@ -411,7 +419,7 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
int i = 0;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
- px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
+ set_dma_address(ppgtt->pd, mm->ppgtt_mm.shadow_pdps[0]);
} else {
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
struct i915_page_directory * const pd =
@@ -421,7 +429,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
shadow ppgtt. */
if (!pd)
break;
- px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
+
+ set_dma_address(pd, mm->ppgtt_mm.shadow_pdps[i]);
}
}
}
@@ -1240,13 +1249,13 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
int i;
if (i915_vm_is_4lvl(&ppgtt->vm)) {
- px_dma(ppgtt->pd) = s->i915_context_pml4;
+ set_dma_address(ppgtt->pd, s->i915_context_pml4);
} else {
for (i = 0; i < GEN8_3LVL_PDPES; i++) {
struct i915_page_directory * const pd =
i915_pd_entry(ppgtt->pd, i);
- px_dma(pd) = s->i915_context_pdps[i];
+ set_dma_address(pd, s->i915_context_pdps[i]);
}
}
}
@@ -1268,7 +1277,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
for_each_engine(engine, vgpu->gvt->gt, id)
- intel_context_unpin(s->shadow[id]);
+ intel_context_put(s->shadow[id]);
kmem_cache_destroy(s->workloads);
}
@@ -1360,11 +1369,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
ce->ring = __intel_context_ring_size(ring_size);
}
- ret = intel_context_pin(ce);
- intel_context_put(ce);
- if (ret)
- goto out_shadow_ctx;
-
s->shadow[i] = ce;
}
@@ -1396,7 +1400,6 @@ out_shadow_ctx:
if (IS_ERR(s->shadow[i]))
break;
- intel_context_unpin(s->shadow[i]);
intel_context_put(s->shadow[i]);
}
i915_vm_put(&ppgtt->vm);
@@ -1470,6 +1473,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu_submission *s = &workload->vgpu->submission;
+ intel_context_unpin(s->shadow[workload->engine->id]);
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
@@ -1715,6 +1719,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
return ERR_PTR(ret);
}
+ ret = intel_context_pin(s->shadow[engine->id]);
+ if (ret) {
+ intel_vgpu_destroy_workload(workload);
+ return ERR_PTR(ret);
+ }
+
return workload;
}
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index d960d0be5bd2..10a865f3dc09 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -28,12 +28,14 @@ static struct i915_global_active {
} global;
struct active_node {
+ struct rb_node node;
struct i915_active_fence base;
struct i915_active *ref;
- struct rb_node node;
u64 timeline;
};
+#define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
+
static inline struct active_node *
node_from_active(struct i915_active_fence *active)
{
@@ -81,7 +83,7 @@ static void *active_debug_hint(void *addr)
return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
}
-static struct debug_obj_descr active_debug_desc = {
+static const struct debug_obj_descr active_debug_desc = {
.name = "i915_active",
.debug_hint = active_debug_hint,
};
@@ -128,8 +130,8 @@ static inline void debug_active_assert(struct i915_active *ref) { }
static void
__active_retire(struct i915_active *ref)
{
+ struct rb_root root = RB_ROOT;
struct active_node *it, *n;
- struct rb_root root;
unsigned long flags;
GEM_BUG_ON(i915_active_is_idle(ref));
@@ -141,9 +143,25 @@ __active_retire(struct i915_active *ref)
GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
debug_active_deactivate(ref);
- root = ref->tree;
- ref->tree = RB_ROOT;
- ref->cache = NULL;
+ /* Even if we have not used the cache, we may still have a barrier */
+ if (!ref->cache)
+ ref->cache = fetch_node(ref->tree.rb_node);
+
+ /* Keep the MRU cached node for reuse */
+ if (ref->cache) {
+ /* Discard all other nodes in the tree */
+ rb_erase(&ref->cache->node, &ref->tree);
+ root = ref->tree;
+
+ /* Rebuild the tree with only the cached node */
+ rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
+ rb_insert_color(&ref->cache->node, &ref->tree);
+ GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
+
+ /* Make the cached node available for reuse with any timeline */
+ if (IS_ENABLED(CONFIG_64BIT))
+ ref->cache->timeline = 0; /* needs cmpxchg(u64) */
+ }
spin_unlock_irqrestore(&ref->tree_lock, flags);
@@ -154,6 +172,7 @@ __active_retire(struct i915_active *ref)
/* ... except if you wait on it, you must manage your own references! */
wake_up_var(ref);
+ /* Finally free the discarded timeline tree */
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
GEM_BUG_ON(i915_active_fence_isset(&it->base));
kmem_cache_free(global.slab_cache, it);
@@ -216,12 +235,11 @@ excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
active_retire(container_of(cb, struct i915_active, excl.cb));
}
-static struct i915_active_fence *
-active_instance(struct i915_active *ref, struct intel_timeline *tl)
+static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
{
- struct active_node *node, *prealloc;
- struct rb_node **p, *parent;
- u64 idx = tl->fence_context;
+ struct active_node *it;
+
+ GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
/*
* We track the most recently used timeline to skip a rbtree search
@@ -230,8 +248,59 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
* after the previous activity has been retired, or if it matches the
* current timeline.
*/
- node = READ_ONCE(ref->cache);
- if (node && node->timeline == idx)
+ it = READ_ONCE(ref->cache);
+ if (it) {
+ u64 cached = READ_ONCE(it->timeline);
+
+ /* Once claimed, this slot will only belong to this idx */
+ if (cached == idx)
+ return it;
+
+#ifdef CONFIG_64BIT /* for cmpxchg(u64) */
+ /*
+ * An unclaimed cache [.timeline=0] can only be claimed once.
+ *
+ * If the value is already non-zero, some other thread has
+ * claimed the cache and we know that is does not match our
+ * idx. If, and only if, the timeline is currently zero is it
+ * worth competing to claim it atomically for ourselves (for
+ * only the winner of that race will cmpxchg return the old
+ * value of 0).
+ */
+ if (!cached && !cmpxchg(&it->timeline, 0, idx))
+ return it;
+#endif
+ }
+
+ BUILD_BUG_ON(offsetof(typeof(*it), node));
+
+ /* While active, the tree can only be built; not destroyed */
+ GEM_BUG_ON(i915_active_is_idle(ref));
+
+ it = fetch_node(ref->tree.rb_node);
+ while (it) {
+ if (it->timeline < idx) {
+ it = fetch_node(it->node.rb_right);
+ } else if (it->timeline > idx) {
+ it = fetch_node(it->node.rb_left);
+ } else {
+ WRITE_ONCE(ref->cache, it);
+ break;
+ }
+ }
+
+ /* NB: If the tree rotated beneath us, we may miss our target. */
+ return it;
+}
+
+static struct i915_active_fence *
+active_instance(struct i915_active *ref, u64 idx)
+{
+ struct active_node *node, *prealloc;
+ struct rb_node **p, *parent;
+
+ node = __active_lookup(ref, idx);
+ if (likely(node))
return &node->base;
/* Preallocate a replacement, just in case */
@@ -268,10 +337,9 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
rb_insert_color(&node->node, &ref->tree);
out:
- ref->cache = node;
+ WRITE_ONCE(ref->cache, node);
spin_unlock_irq(&ref->tree_lock);
- BUILD_BUG_ON(offsetof(typeof(*node), base));
return &node->base;
}
@@ -353,69 +421,116 @@ __active_del_barrier(struct i915_active *ref, struct active_node *node)
return ____active_del_barrier(ref, node, barrier_to_engine(node));
}
-int i915_active_ref(struct i915_active *ref,
- struct intel_timeline *tl,
- struct dma_fence *fence)
+static bool
+replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
+{
+ if (!is_barrier(active)) /* proto-node used by our idle barrier? */
+ return false;
+
+ /*
+ * This request is on the kernel_context timeline, and so
+ * we can use it to substitute for the pending idle-barrer
+ * request that we want to emit on the kernel_context.
+ */
+ __active_del_barrier(ref, node_from_active(active));
+ return true;
+}
+
+int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
{
struct i915_active_fence *active;
int err;
- lockdep_assert_held(&tl->mutex);
-
/* Prevent reaping in case we malloc/wait while building the tree */
err = i915_active_acquire(ref);
if (err)
return err;
- active = active_instance(ref, tl);
+ active = active_instance(ref, idx);
if (!active) {
err = -ENOMEM;
goto out;
}
- if (is_barrier(active)) { /* proto-node used by our idle barrier */
- /*
- * This request is on the kernel_context timeline, and so
- * we can use it to substitute for the pending idle-barrer
- * request that we want to emit on the kernel_context.
- */
- __active_del_barrier(ref, node_from_active(active));
+ if (replace_barrier(ref, active)) {
RCU_INIT_POINTER(active->fence, NULL);
atomic_dec(&ref->count);
}
if (!__i915_active_fence_set(active, fence))
- atomic_inc(&ref->count);
+ __i915_active_acquire(ref);
out:
i915_active_release(ref);
return err;
}
-struct dma_fence *
-i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+static struct dma_fence *
+__i915_active_set_fence(struct i915_active *ref,
+ struct i915_active_fence *active,
+ struct dma_fence *fence)
{
struct dma_fence *prev;
- /* We expect the caller to manage the exclusive timeline ordering */
- GEM_BUG_ON(i915_active_is_idle(ref));
+ if (replace_barrier(ref, active)) {
+ RCU_INIT_POINTER(active->fence, fence);
+ return NULL;
+ }
rcu_read_lock();
- prev = __i915_active_fence_set(&ref->excl, f);
+ prev = __i915_active_fence_set(active, fence);
if (prev)
prev = dma_fence_get_rcu(prev);
else
- atomic_inc(&ref->count);
+ __i915_active_acquire(ref);
rcu_read_unlock();
return prev;
}
+static struct i915_active_fence *
+__active_fence(struct i915_active *ref, u64 idx)
+{
+ struct active_node *it;
+
+ it = __active_lookup(ref, idx);
+ if (unlikely(!it)) { /* Contention with parallel tree builders! */
+ spin_lock_irq(&ref->tree_lock);
+ it = __active_lookup(ref, idx);
+ spin_unlock_irq(&ref->tree_lock);
+ }
+ GEM_BUG_ON(!it); /* slot must be preallocated */
+
+ return &it->base;
+}
+
+struct dma_fence *
+__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
+{
+ /* Only valid while active, see i915_active_acquire_for_context() */
+ return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
+}
+
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+{
+ /* We expect the caller to manage the exclusive timeline ordering */
+ return __i915_active_set_fence(ref, &ref->excl, f);
+}
+
bool i915_active_acquire_if_busy(struct i915_active *ref)
{
debug_active_assert(ref);
return atomic_add_unless(&ref->count, 1, 0);
}
+static void __i915_active_activate(struct i915_active *ref)
+{
+ spin_lock_irq(&ref->tree_lock); /* __active_retire() */
+ if (!atomic_fetch_inc(&ref->count))
+ debug_active_activate(ref);
+ spin_unlock_irq(&ref->tree_lock);
+}
+
int i915_active_acquire(struct i915_active *ref)
{
int err;
@@ -423,19 +538,19 @@ int i915_active_acquire(struct i915_active *ref)
if (i915_active_acquire_if_busy(ref))
return 0;
+ if (!ref->active) {
+ __i915_active_activate(ref);
+ return 0;
+ }
+
err = mutex_lock_interruptible(&ref->mutex);
if (err)
return err;
if (likely(!i915_active_acquire_if_busy(ref))) {
- if (ref->active)
- err = ref->active(ref);
- if (!err) {
- spin_lock_irq(&ref->tree_lock); /* __active_retire() */
- debug_active_activate(ref);
- atomic_inc(&ref->count);
- spin_unlock_irq(&ref->tree_lock);
- }
+ err = ref->active(ref);
+ if (!err)
+ __i915_active_activate(ref);
}
mutex_unlock(&ref->mutex);
@@ -443,6 +558,24 @@ int i915_active_acquire(struct i915_active *ref)
return err;
}
+int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
+{
+ struct i915_active_fence *active;
+ int err;
+
+ err = i915_active_acquire(ref);
+ if (err)
+ return err;
+
+ active = active_instance(ref, idx);
+ if (!active) {
+ i915_active_release(ref);
+ return -ENOMEM;
+ }
+
+ return 0; /* return with active ref */
+}
+
void i915_active_release(struct i915_active *ref)
{
debug_active_assert(ref);
@@ -651,16 +784,16 @@ int i915_sw_fence_await_active(struct i915_sw_fence *fence,
return await_active(ref, flags, sw_await_fence, fence, fence);
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void i915_active_fini(struct i915_active *ref)
{
debug_active_fini(ref);
GEM_BUG_ON(atomic_read(&ref->count));
GEM_BUG_ON(work_pending(&ref->work));
- GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
mutex_destroy(&ref->mutex);
+
+ if (ref->cache)
+ kmem_cache_free(global.slab_cache, ref->cache);
}
-#endif
static inline bool is_idle_barrier(struct active_node *node, u64 idx)
{
@@ -674,7 +807,6 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
if (RB_EMPTY_ROOT(&ref->tree))
return NULL;
- spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));
/*
@@ -700,9 +832,9 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
prev = p;
if (node->timeline < idx)
- p = p->rb_right;
+ p = READ_ONCE(p->rb_right);
else
- p = p->rb_left;
+ p = READ_ONCE(p->rb_left);
}
/*
@@ -739,14 +871,13 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
goto match;
}
- spin_unlock_irq(&ref->tree_lock);
-
return NULL;
match:
+ spin_lock_irq(&ref->tree_lock);
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
if (p == &ref->cache->node)
- ref->cache = NULL;
+ WRITE_ONCE(ref->cache, NULL);
spin_unlock_irq(&ref->tree_lock);
return rb_entry(p, struct active_node, node);
@@ -758,7 +889,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
intel_engine_mask_t tmp, mask = engine->mask;
struct llist_node *first = NULL, *last = NULL;
struct intel_gt *gt = engine->gt;
- int err;
GEM_BUG_ON(i915_active_is_idle(ref));
@@ -778,13 +908,13 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct llist_node *prev = first;
struct active_node *node;
+ rcu_read_lock();
node = reuse_idle_barrier(ref, idx);
+ rcu_read_unlock();
if (!node) {
node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
- if (!node) {
- err = ENOMEM;
+ if (!node)
goto unwind;
- }
RCU_INIT_POINTER(node->base.fence, NULL);
node->base.cb.func = node_retire;
@@ -804,7 +934,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
*/
RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
node->base.cb.node.prev = (void *)engine;
- atomic_inc(&ref->count);
+ __i915_active_acquire(ref);
}
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
@@ -832,7 +962,7 @@ unwind:
kmem_cache_free(global.slab_cache, node);
}
- return err;
+ return -ENOMEM;
}
void i915_active_acquire_barrier(struct i915_active *ref)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index cf4058150966..fb165d3f01cf 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -163,14 +163,16 @@ void __i915_active_init(struct i915_active *ref,
__i915_active_init(ref, active, retire, &__mkey, &__wkey); \
} while (0)
-int i915_active_ref(struct i915_active *ref,
- struct intel_timeline *tl,
- struct dma_fence *fence);
+struct dma_fence *
+__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence);
+int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence);
static inline int
i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
{
- return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence);
+ return i915_active_ref(ref,
+ i915_request_timeline(rq)->fence_context,
+ &rq->fence);
}
struct dma_fence *
@@ -198,7 +200,9 @@ int i915_request_await_active(struct i915_request *rq,
#define I915_ACTIVE_AWAIT_BARRIER BIT(2)
int i915_active_acquire(struct i915_active *ref);
+int i915_active_acquire_for_context(struct i915_active *ref, u64 idx);
bool i915_active_acquire_if_busy(struct i915_active *ref);
+
void i915_active_release(struct i915_active *ref);
static inline void __i915_active_acquire(struct i915_active *ref)
@@ -213,11 +217,7 @@ i915_active_is_idle(const struct i915_active *ref)
return !atomic_read(&ref->count);
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void i915_active_fini(struct i915_active *ref);
-#else
-static inline void i915_active_fini(struct i915_active *ref) { }
-#endif
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine);
@@ -231,4 +231,19 @@ struct i915_active *i915_active_create(void);
struct i915_active *i915_active_get(struct i915_active *ref);
void i915_active_put(struct i915_active *ref);
+static inline int __i915_request_await_exclusive(struct i915_request *rq,
+ struct i915_active *active)
+{
+ struct dma_fence *fence;
+ int err = 0;
+
+ fence = i915_active_fence_get(&active->excl);
+ if (fence) {
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ return err;
+}
+
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 5ac4a999f05a..e88970256e8e 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1136,7 +1136,7 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj,
- u32 offset, u32 length)
+ unsigned long offset, unsigned long length)
{
bool needs_clflush;
void *dst, *src;
@@ -1166,8 +1166,8 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
if (IS_ERR(src)) {
+ unsigned long x, n;
void *ptr;
- int x, n;
/*
* We can avoid clflushing partial cachelines before the write
@@ -1184,7 +1184,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
ptr = dst;
x = offset_in_page(offset);
for (n = offset >> PAGE_SHIFT; length; n++) {
- int len = min_t(int, length, PAGE_SIZE - x);
+ int len = min(length, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush)
@@ -1414,8 +1414,8 @@ static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
*/
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch,
- u32 batch_offset,
- u32 batch_length,
+ unsigned long batch_offset,
+ unsigned long batch_length,
struct i915_vma *shadow,
bool trampoline)
{
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 784219962193..ea469168cd44 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -326,6 +326,7 @@ static void print_context_stats(struct seq_file *m,
}
i915_gem_context_unlock_engines(ctx);
+ mutex_lock(&ctx->mutex);
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
struct file_stats stats = {
.vm = rcu_access_pointer(ctx->vm),
@@ -346,6 +347,7 @@ static void print_context_stats(struct seq_file *m,
print_file_stats(m, name, stats);
}
+ mutex_unlock(&ctx->mutex);
spin_lock(&i915->gem.contexts.lock);
list_safe_reset_next(ctx, cn, link);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5fd5af4bc855..acc32066cec3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,7 +58,6 @@
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
-#include "display/intel_psr.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
@@ -216,125 +215,6 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv)
release_resource(&dev_priv->mch_res);
}
-/* part #1: call before irq install */
-static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
-{
- int ret;
-
- if (i915_inject_probe_failure(i915))
- return -ENODEV;
-
- if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
- ret = drm_vblank_init(&i915->drm,
- INTEL_NUM_PIPES(i915));
- if (ret)
- return ret;
- }
-
- intel_bios_init(i915);
-
- ret = intel_vga_register(i915);
- if (ret)
- goto cleanup_bios;
-
- intel_power_domains_init_hw(i915, false);
-
- intel_csr_ucode_init(i915);
-
- ret = intel_modeset_init_noirq(i915);
- if (ret)
- goto cleanup_vga_client_pw_domain_csr;
-
- return 0;
-
-cleanup_vga_client_pw_domain_csr:
- intel_csr_ucode_fini(i915);
- intel_power_domains_driver_remove(i915);
- intel_vga_unregister(i915);
-cleanup_bios:
- intel_bios_driver_remove(i915);
- return ret;
-}
-
-/* part #2: call after irq install */
-static int i915_driver_modeset_probe(struct drm_i915_private *i915)
-{
- int ret;
-
- /* Important: The output setup functions called by modeset_init need
- * working irqs for e.g. gmbus and dp aux transfers. */
- ret = intel_modeset_init(i915);
- if (ret)
- goto out;
-
- ret = i915_gem_init(i915);
- if (ret)
- goto cleanup_modeset;
-
- intel_overlay_setup(i915);
-
- if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
- return 0;
-
- ret = intel_fbdev_init(&i915->drm);
- if (ret)
- goto cleanup_gem;
-
- /* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(i915);
-
- intel_init_ipc(i915);
-
- intel_psr_set_force_mode_changed(i915->psr.dp);
-
- return 0;
-
-cleanup_gem:
- i915_gem_suspend(i915);
- i915_gem_driver_remove(i915);
- i915_gem_driver_release(i915);
-cleanup_modeset:
- /* FIXME */
- intel_modeset_driver_remove(i915);
- intel_irq_uninstall(i915);
- intel_modeset_driver_remove_noirq(i915);
-out:
- return ret;
-}
-
-/* part #1: call before irq uninstall */
-static void i915_driver_modeset_remove(struct drm_i915_private *i915)
-{
- intel_modeset_driver_remove(i915);
-}
-
-/* part #2: call after irq uninstall */
-static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
-{
- intel_csr_ucode_fini(i915);
-
- intel_power_domains_driver_remove(i915);
-
- intel_vga_unregister(i915);
-
- intel_bios_driver_remove(i915);
-}
-
-static void intel_init_dpio(struct drm_i915_private *dev_priv)
-{
- /*
- * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
- * CHV x1 PHY (DP/HDMI D)
- * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
- */
- if (IS_CHERRYVIEW(dev_priv)) {
- DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
- DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
- } else if (IS_VALLEYVIEW(dev_priv)) {
- DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
- }
-}
-
static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
/*
@@ -392,7 +272,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_HSW_EARLY_SDV(dev_priv);
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
- pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
+ pre |= IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_A0);
pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
if (pre) {
@@ -463,7 +343,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_detect_pch(dev_priv);
intel_pm_setup(dev_priv);
- intel_init_dpio(dev_priv);
ret = intel_power_domains_init(dev_priv);
if (ret < 0)
goto err_gem;
@@ -798,7 +677,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm,
"Failed to register driver for userspace access!\n");
- if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
+ if (HAS_DISPLAY(dev_priv)) {
/* Must be done after probing outputs */
intel_opregion_register(dev_priv);
acpi_video_register();
@@ -821,7 +700,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* We need to coordinate the hotplugs with the asynchronous fbdev
* configuration, for which we use the fbdev->async_cookie.
*/
- if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
+ if (HAS_DISPLAY(dev_priv))
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
@@ -988,7 +867,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- ret = i915_driver_modeset_probe_noirq(i915);
+ ret = intel_modeset_init_noirq(i915);
if (ret < 0)
goto out_cleanup_hw;
@@ -996,10 +875,18 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset;
- ret = i915_driver_modeset_probe(i915);
- if (ret < 0)
+ ret = intel_modeset_init_nogem(i915);
+ if (ret)
goto out_cleanup_irq;
+ ret = i915_gem_init(i915);
+ if (ret)
+ goto out_cleanup_modeset2;
+
+ ret = intel_modeset_init(i915);
+ if (ret)
+ goto out_cleanup_gem;
+
i915_driver_register(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -1010,10 +897,20 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+out_cleanup_gem:
+ i915_gem_suspend(i915);
+ i915_gem_driver_remove(i915);
+ i915_gem_driver_release(i915);
+out_cleanup_modeset2:
+ /* FIXME clean up the error path */
+ intel_modeset_driver_remove(i915);
+ intel_irq_uninstall(i915);
+ intel_modeset_driver_remove_noirq(i915);
+ goto out_cleanup_modeset;
out_cleanup_irq:
intel_irq_uninstall(i915);
out_cleanup_modeset:
- i915_driver_modeset_remove_noirq(i915);
+ intel_modeset_driver_remove_nogem(i915);
out_cleanup_hw:
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
@@ -1045,7 +942,7 @@ void i915_driver_remove(struct drm_i915_private *i915)
intel_gvt_driver_remove(i915);
- i915_driver_modeset_remove(i915);
+ intel_modeset_driver_remove(i915);
intel_irq_uninstall(i915);
@@ -1054,7 +951,7 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_reset_error_state(i915);
i915_gem_driver_remove(i915);
- i915_driver_modeset_remove_noirq(i915);
+ intel_modeset_driver_remove_nogem(i915);
i915_driver_hw_remove(i915);
@@ -1075,6 +972,7 @@ static void i915_driver_release(struct drm_device *dev)
intel_memory_regions_driver_release(dev_priv);
i915_ggtt_driver_release(dev_priv);
+ i915_gem_drain_freed_objects(dev_priv);
i915_driver_mmio_release(dev_priv);
@@ -1119,7 +1017,6 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
i915_gem_context_close(file);
- i915_gem_release(dev, file);
kfree_rcu(file_priv, rcu);
@@ -1846,7 +1743,8 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_GEM |
- DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE,
.release = i915_driver_release,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e4f7f6518945..8426d5974669 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -33,6 +33,8 @@
#include <uapi/drm/i915_drm.h>
#include <uapi/drm/drm_fourcc.h>
+#include <asm/hypervisor.h>
+
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -108,18 +110,11 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20200715"
-#define DRIVER_TIMESTAMP 1594811881
+#define DRIVER_DATE "20200917"
+#define DRIVER_TIMESTAMP 1600375437
struct drm_i915_gem_object;
-/*
- * The code assumes that the hpd_pins below have consecutive values and
- * starting with HPD_PORT_A, the HPD pin associated with any port can be
- * retrieved by adding the corresponding port (or phy) enum value to
- * HPD_PORT_A in most cases. For example:
- * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A
- */
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -131,10 +126,12 @@ enum hpd_pin {
HPD_PORT_C,
HPD_PORT_D,
HPD_PORT_E,
- HPD_PORT_F,
- HPD_PORT_G,
- HPD_PORT_H,
- HPD_PORT_I,
+ HPD_PORT_TC1,
+ HPD_PORT_TC2,
+ HPD_PORT_TC3,
+ HPD_PORT_TC4,
+ HPD_PORT_TC5,
+ HPD_PORT_TC6,
HPD_NUM_PINS
};
@@ -203,11 +200,6 @@ struct drm_i915_file_private {
struct rcu_head rcu;
};
- struct {
- spinlock_t lock;
- struct list_head request_list;
- } mm;
-
struct xarray context_xa;
struct xarray vm_xa;
@@ -506,6 +498,7 @@ struct i915_psr {
bool link_standby;
bool colorimetry_support;
bool psr2_enabled;
+ bool psr2_sel_fetch_enabled;
u8 sink_sync_latency;
ktime_t last_entry_attempt;
ktime_t last_exit;
@@ -541,13 +534,9 @@ struct intel_gmbus {
struct i915_suspend_saved_registers {
u32 saveDSPARB;
- u32 saveFBC_CONTROL;
- u32 saveCACHE_MODE_0;
- u32 saveMI_ARB_STATE;
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF3[3];
- u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
@@ -592,11 +581,6 @@ struct i915_gem_mm {
atomic_t free_count;
/**
- * Small stash of WC pages
- */
- struct pagestash wc_stash;
-
- /**
* tmpfs instance used for shmem backed objects
*/
struct vfsmount *gemfs;
@@ -1029,8 +1013,6 @@ struct drm_i915_private {
*/
u8 active_pipes;
- int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
-
struct i915_wa_list gt_wa_list;
struct i915_frontbuffer_tracking fb_tracking;
@@ -1045,6 +1027,14 @@ struct drm_i915_private {
struct intel_l3_parity l3_parity;
/*
+ * HTI (aka HDPORT) state read during initial hw readout. Most
+ * platforms don't have HTI, so this will just stay 0. Those that do
+ * will use this later to figure out which PLLs and PHYs are unavailable
+ * for driver usage.
+ */
+ u32 hti_state;
+
+ /*
* edram size in MB.
* Cannot be determined by PCIID. You must always read a register.
*/
@@ -1489,6 +1479,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ICL_WITH_PORT_F(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
+#define IS_TGL_U(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULT)
+
+#define IS_TGL_Y(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX)
+
#define SKL_REVID_A0 0x0
#define SKL_REVID_B0 0x1
#define SKL_REVID_C0 0x2
@@ -1509,14 +1505,34 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_BXT_REVID(dev_priv, since, until) \
(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
-#define KBL_REVID_A0 0x0
-#define KBL_REVID_B0 0x1
-#define KBL_REVID_C0 0x2
-#define KBL_REVID_D0 0x3
-#define KBL_REVID_E0 0x4
+enum {
+ KBL_REVID_A0,
+ KBL_REVID_B0,
+ KBL_REVID_B1,
+ KBL_REVID_C0,
+ KBL_REVID_D0,
+ KBL_REVID_D1,
+ KBL_REVID_E0,
+ KBL_REVID_F0,
+ KBL_REVID_G0,
+};
-#define IS_KBL_REVID(dev_priv, since, until) \
- (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
+struct i915_rev_steppings {
+ u8 gt_stepping;
+ u8 disp_stepping;
+};
+
+/* Defined in intel_workarounds.c */
+extern const struct i915_rev_steppings kbl_revids[];
+
+#define IS_KBL_GT_REVID(dev_priv, since, until) \
+ (IS_KABYLAKE(dev_priv) && \
+ kbl_revids[INTEL_REVID(dev_priv)].gt_stepping >= since && \
+ kbl_revids[INTEL_REVID(dev_priv)].gt_stepping <= until)
+#define IS_KBL_DISP_REVID(dev_priv, since, until) \
+ (IS_KABYLAKE(dev_priv) && \
+ kbl_revids[INTEL_REVID(dev_priv)].disp_stepping >= since && \
+ kbl_revids[INTEL_REVID(dev_priv)].disp_stepping <= until)
#define GLK_REVID_A0 0x0
#define GLK_REVID_A1 0x1
@@ -1547,12 +1563,41 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_EHL_REVID(p, since, until) \
(IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
-#define TGL_REVID_A0 0x0
-#define TGL_REVID_B0 0x1
-#define TGL_REVID_C0 0x2
+enum {
+ TGL_REVID_A0,
+ TGL_REVID_B0,
+ TGL_REVID_B1,
+ TGL_REVID_C0,
+ TGL_REVID_D0,
+};
+
+extern const struct i915_rev_steppings tgl_uy_revids[];
+extern const struct i915_rev_steppings tgl_revids[];
+
+static inline const struct i915_rev_steppings *
+tgl_revids_get(struct drm_i915_private *dev_priv)
+{
+ if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
+ return tgl_uy_revids;
+ else
+ return tgl_revids;
+}
-#define IS_TGL_REVID(p, since, until) \
- (IS_TIGERLAKE(p) && IS_REVID(p, since, until))
+#define IS_TGL_DISP_REVID(p, since, until) \
+ (IS_TIGERLAKE(p) && \
+ tgl_revids_get(p)->disp_stepping >= (since) && \
+ tgl_revids_get(p)->disp_stepping <= (until))
+
+#define IS_TGL_UY_GT_REVID(p, since, until) \
+ ((IS_TGL_U(p) || IS_TGL_Y(p)) && \
+ tgl_uy_revids->gt_stepping >= (since) && \
+ tgl_uy_revids->gt_stepping <= (until))
+
+#define IS_TGL_GT_REVID(p, since, until) \
+ (IS_TIGERLAKE(p) && \
+ !(IS_TGL_U(p) || IS_TGL_Y(p)) && \
+ tgl_revids->gt_stepping >= (since) && \
+ tgl_revids->gt_stepping <= (until))
#define RKL_REVID_A0 0x0
#define RKL_REVID_B0 0x1
@@ -1665,6 +1710,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
#define HAS_PSR_HW_TRACKING(dev_priv) \
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
+#define HAS_PSR2_SEL_FETCH(dev_priv) (INTEL_GEN(dev_priv) >= 12)
#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
@@ -1716,7 +1762,9 @@ static inline bool intel_vtd_active(void)
if (intel_iommu_gfx_mapped)
return true;
#endif
- return false;
+
+ /* Running as a guest, we assume the host is enforcing VT'd */
+ return !hypervisor_is_type(X86_HYPER_NATIVE);
}
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@ -1790,11 +1838,18 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
}
struct i915_vma * __must_check
+i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_ggtt_view *view,
+ u64 size, u64 alignment, u64 flags);
+
+static inline struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
- u64 size,
- u64 alignment,
- u64 flags);
+ u64 size, u64 alignment, u64 flags)
+{
+ return i915_gem_object_ggtt_pin_ww(obj, NULL, view, size, alignment, flags);
+}
int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
@@ -1831,7 +1886,6 @@ void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
-void i915_gem_release(struct drm_device *dev, struct drm_file *file);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
@@ -1899,8 +1953,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch,
- u32 batch_offset,
- u32 batch_length,
+ unsigned long batch_offset,
+ unsigned long batch_length,
struct i915_vma *shadow,
bool trampoline);
#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9aa3066cb75d..58276694c848 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -180,30 +180,6 @@ try_again:
}
static int
-i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
-{
- void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
- char __user *user_data = u64_to_user_ptr(args->data_ptr);
-
- /*
- * We manually control the domain here and pretend that it
- * remains coherent i.e. in the GTT domain, like shmem_pwrite.
- */
- i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
-
- if (copy_from_user(vaddr, user_data, args->size))
- return -EFAULT;
-
- drm_clflush_virt_range(vaddr, args->size);
- intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
-
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
- return 0;
-}
-
-static int
i915_gem_create(struct drm_file *file,
struct intel_memory_region *mr,
u64 *size_p,
@@ -335,12 +311,20 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
u64 remain;
int ret;
- ret = i915_gem_object_prepare_read(obj, &needs_clflush);
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
return ret;
+ ret = i915_gem_object_prepare_read(obj, &needs_clflush);
+ if (ret) {
+ i915_gem_object_unlock(obj);
+ return ret;
+ }
+
fence = i915_gem_object_lock_fence(obj);
i915_gem_object_finish_access(obj);
+ i915_gem_object_unlock(obj);
+
if (!fence)
return -ENOMEM;
@@ -420,7 +404,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- ret = i915_gem_object_lock_interruptible(obj);
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
goto out_unpin;
@@ -519,6 +503,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
trace_i915_gem_object_pread(obj, args->offset, args->size);
+ ret = -ENODEV;
+ if (obj->ops->pread)
+ ret = obj->ops->pread(obj, args);
+ if (ret != -ENODEV)
+ goto out;
+
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
@@ -619,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- ret = i915_gem_object_lock_interruptible(obj);
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
goto out_unpin;
@@ -734,12 +724,20 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
u64 remain;
int ret;
- ret = i915_gem_object_prepare_write(obj, &needs_clflush);
+ ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
return ret;
+ ret = i915_gem_object_prepare_write(obj, &needs_clflush);
+ if (ret) {
+ i915_gem_object_unlock(obj);
+ return ret;
+ }
+
fence = i915_gem_object_lock_fence(obj);
i915_gem_object_finish_access(obj);
+ i915_gem_object_unlock(obj);
+
if (!fence)
return -ENOMEM;
@@ -850,8 +848,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret == -EFAULT || ret == -ENOSPC) {
if (i915_gem_object_has_struct_page(obj))
ret = i915_gem_shmem_pwrite(obj, args);
- else
- ret = i915_gem_phys_pwrite(obj, args, file);
}
i915_gem_object_unpin_pages(obj);
@@ -946,11 +942,10 @@ static void discard_ggtt_vma(struct i915_vma *vma)
}
struct i915_vma *
-i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view,
- u64 size,
- u64 alignment,
- u64 flags)
+i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_ggtt_view *view,
+ u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
@@ -1016,7 +1011,7 @@ new_vma:
return ERR_PTR(ret);
}
- ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
@@ -1290,7 +1285,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
i915_gem_drain_freed_objects(i915);
list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
drm_WARN_ON(&i915->drm,
i915_gem_object_set_to_cpu_domain(obj, true));
i915_gem_object_unlock(obj);
@@ -1301,21 +1296,6 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
return 0;
}
-void i915_gem_release(struct drm_device *dev, struct drm_file *file)
-{
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_request *request;
-
- /* Clean up our request list when the client is going away, so that
- * later retire_requests won't dereference our soon-to-be-gone
- * file_priv.
- */
- spin_lock(&file_priv->mm.lock);
- list_for_each_entry(request, &file_priv->mm.request_list, client_link)
- request->file_priv = NULL;
- spin_unlock(&file_priv->mm.lock);
-}
-
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
@@ -1331,9 +1311,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
file_priv->dev_priv = i915;
file_priv->file = file;
- spin_lock_init(&file_priv->mm.lock);
- INIT_LIST_HEAD(&file_priv->mm.request_list);
-
file_priv->bsd_engine = -1;
file_priv->hang_timestamp = jiffies;
@@ -1344,6 +1321,58 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
return ret;
}
+void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool intr)
+{
+ ww_acquire_init(&ww->ctx, &reservation_ww_class);
+ INIT_LIST_HEAD(&ww->obj_list);
+ ww->intr = intr;
+ ww->contended = NULL;
+}
+
+static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww)
+{
+ struct drm_i915_gem_object *obj;
+
+ while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link))) {
+ list_del(&obj->obj_link);
+ i915_gem_object_unlock(obj);
+ }
+}
+
+void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj)
+{
+ list_del(&obj->obj_link);
+ i915_gem_object_unlock(obj);
+}
+
+void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
+{
+ i915_gem_ww_ctx_unlock_all(ww);
+ WARN_ON(ww->contended);
+ ww_acquire_fini(&ww->ctx);
+}
+
+int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww)
+{
+ int ret = 0;
+
+ if (WARN_ON(!ww->contended))
+ return -EINVAL;
+
+ i915_gem_ww_ctx_unlock_all(ww);
+ if (ww->intr)
+ ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx);
+ else
+ dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx);
+
+ if (!ret)
+ list_add_tail(&ww->contended->obj_link, &ww->obj_list);
+
+ ww->contended = NULL;
+
+ return ret;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gem_device.c"
#include "selftests/i915_gem.c"
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index f333e88a2b6e..a4cad3f154ca 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -116,4 +116,16 @@ static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
return test_bit(TASKLET_STATE_SCHED, &t->state);
}
+struct i915_gem_ww_ctx {
+ struct ww_acquire_ctx ctx;
+ struct list_head obj_list;
+ bool intr;
+ struct drm_i915_gem_object *contended;
+};
+
+void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ctx, bool intr);
+void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ctx);
+int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ctx);
+void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj);
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 421613219ae9..f96032c60a12 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -132,6 +132,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_BATCH_FIRST:
case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
+ case I915_PARAM_HAS_EXEC_TIMELINE_FENCES:
/* For the time being all of these are always true;
* if some supported hardware does not have one of these
* features this value needs to be provided from
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 3e6cbb0d1150..cf6e47adfde6 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -311,6 +311,8 @@ static int compress_page(struct i915_vma_compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
+
+ cond_resched();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -397,6 +399,7 @@ static int compress_page(struct i915_vma_compress *c,
if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
+ cond_resched();
return 0;
}
@@ -1309,7 +1312,7 @@ capture_vma(struct intel_engine_capture_vma *next,
}
strcpy(c->name, name);
- c->vma = i915_vma_get(vma);
+ c->vma = vma; /* reference held while active */
c->next = next;
return c;
@@ -1399,7 +1402,6 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
compress));
i915_active_release(&vma->active);
- i915_vma_put(vma);
capture = this->next;
kfree(this);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1fa67700d8f4..759f523c6a6b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -41,6 +41,7 @@
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"
+#include "gt/intel_breadcrumbs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
@@ -131,40 +132,24 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
};
static const u32 hpd_gen11[HPD_NUM_PINS] = {
- [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
- [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
- [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
- [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
-};
-
-static const u32 hpd_gen12[HPD_NUM_PINS] = {
- [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
- [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
- [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
- [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
- [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
- [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG,
+ [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(PORT_TC1) | GEN11_TBT_HOTPLUG(PORT_TC1),
+ [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(PORT_TC2) | GEN11_TBT_HOTPLUG(PORT_TC2),
+ [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(PORT_TC3) | GEN11_TBT_HOTPLUG(PORT_TC3),
+ [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(PORT_TC4) | GEN11_TBT_HOTPLUG(PORT_TC4),
+ [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(PORT_TC5) | GEN11_TBT_HOTPLUG(PORT_TC5),
+ [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(PORT_TC6) | GEN11_TBT_HOTPLUG(PORT_TC6),
};
static const u32 hpd_icp[HPD_NUM_PINS] = {
[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
- [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
- [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
- [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
- [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
-};
-
-static const u32 hpd_tgp[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
- [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
- [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
- [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
- [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
- [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
- [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
- [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
+ [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
+ [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
+ [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
+ [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
+ [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
@@ -180,9 +165,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
return;
}
- if (INTEL_GEN(dev_priv) >= 12)
- hpd->hpd = hpd_gen12;
- else if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11)
hpd->hpd = hpd_gen11;
else if (IS_GEN9_LP(dev_priv))
hpd->hpd = hpd_bxt;
@@ -196,9 +179,8 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))
return;
- if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv))
- hpd->pch_hpd = hpd_tgp;
- else if (HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
+ if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
+ HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
hpd->pch_hpd = hpd_icp;
else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
hpd->pch_hpd = hpd_spt;
@@ -1048,33 +1030,17 @@ out:
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
- case HPD_PORT_C:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
- case HPD_PORT_D:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
- case HPD_PORT_E:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
- case HPD_PORT_F:
- return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
- default:
- return false;
- }
-}
-
-static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
-{
- switch (pin) {
- case HPD_PORT_D:
+ case HPD_PORT_TC1:
return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
- case HPD_PORT_E:
+ case HPD_PORT_TC2:
return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
- case HPD_PORT_F:
+ case HPD_PORT_TC3:
return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
- case HPD_PORT_G:
+ case HPD_PORT_TC4:
return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
- case HPD_PORT_H:
+ case HPD_PORT_TC5:
return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
- case HPD_PORT_I:
+ case HPD_PORT_TC6:
return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
default:
return false;
@@ -1112,33 +1078,17 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
- case HPD_PORT_C:
- return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
- case HPD_PORT_D:
- return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
- case HPD_PORT_E:
- return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
- case HPD_PORT_F:
- return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
- default:
- return false;
- }
-}
-
-static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
-{
- switch (pin) {
- case HPD_PORT_D:
+ case HPD_PORT_TC1:
return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
- case HPD_PORT_E:
+ case HPD_PORT_TC2:
return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
- case HPD_PORT_F:
+ case HPD_PORT_TC3:
return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
- case HPD_PORT_G:
+ case HPD_PORT_TC4:
return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
- case HPD_PORT_H:
+ case HPD_PORT_TC5:
return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
- case HPD_PORT_I:
+ case HPD_PORT_TC6:
return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
default:
return false;
@@ -1892,19 +1842,16 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
u32 ddi_hotplug_trigger, tc_hotplug_trigger;
u32 pin_mask = 0, long_mask = 0;
- bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
if (HAS_PCH_TGP(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
- tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
} else if (HAS_PCH_JSP(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
tc_hotplug_trigger = 0;
} else if (HAS_PCH_MCC(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
- tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
} else {
drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
"Unrecognized PCH type 0x%x\n",
@@ -1912,7 +1859,6 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
- tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
}
if (ddi_hotplug_trigger) {
@@ -1936,7 +1882,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
dev_priv->hotplug.pch_hpd,
- tc_port_hotplug_long_detect);
+ icp_tc_port_hotplug_long_detect);
}
if (pin_mask)
@@ -2184,12 +2130,6 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 pin_mask = 0, long_mask = 0;
u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
- long_pulse_detect_func long_pulse_detect;
-
- if (INTEL_GEN(dev_priv) >= 12)
- long_pulse_detect = gen12_port_hotplug_long_detect;
- else
- long_pulse_detect = gen11_port_hotplug_long_detect;
if (trigger_tc) {
u32 dig_hotplug_reg;
@@ -2200,7 +2140,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
dev_priv->hotplug.hpd,
- long_pulse_detect);
+ gen11_port_hotplug_long_detect);
}
if (trigger_tbt) {
@@ -2212,7 +2152,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
dev_priv->hotplug.hpd,
- long_pulse_detect);
+ gen11_port_hotplug_long_detect);
}
if (pin_mask)
@@ -3047,6 +2987,18 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
return enabled_irqs;
}
+static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
+ const u32 hpd[HPD_NUM_PINS])
+{
+ struct intel_encoder *encoder;
+ u32 hotplug_irqs = 0;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder)
+ hotplug_irqs |= hpd[encoder->hpd_pin];
+
+ return hotplug_irqs;
+}
+
static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
@@ -3076,50 +3028,50 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- if (HAS_PCH_IBX(dev_priv))
- hotplug_irqs = SDE_HOTPLUG_MASK;
- else
- hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
-
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
ibx_hpd_detection_setup(dev_priv);
}
-static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
- u32 ddi_hotplug_enable_mask,
- u32 tc_hotplug_enable_mask)
+static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv,
+ u32 enable_mask)
{
u32 hotplug;
hotplug = I915_READ(SHOTPLUG_CTL_DDI);
- hotplug |= ddi_hotplug_enable_mask;
+ hotplug |= enable_mask;
I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+}
- if (tc_hotplug_enable_mask) {
- hotplug = I915_READ(SHOTPLUG_CTL_TC);
- hotplug |= tc_hotplug_enable_mask;
- I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
- }
+static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv,
+ u32 enable_mask)
+{
+ u32 hotplug;
+
+ hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug |= enable_mask;
+ I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
}
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
- u32 sde_ddi_mask, u32 sde_tc_mask,
u32 ddi_enable_mask, u32 tc_enable_mask)
{
u32 hotplug_irqs, enabled_irqs;
- hotplug_irqs = sde_ddi_mask | sde_tc_mask;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
- icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
+ icp_ddi_hpd_detection_setup(dev_priv, ddi_enable_mask);
+ if (tc_enable_mask)
+ icp_tc_hpd_detection_setup(dev_priv, tc_enable_mask);
}
/*
@@ -3129,7 +3081,6 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
icp_hpd_irq_setup(dev_priv,
- SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1));
}
@@ -3141,7 +3092,6 @@ static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
icp_hpd_irq_setup(dev_priv,
- SDE_DDI_MASK_TGP, 0,
TGP_DDI_HPD_ENABLE_MASK, 0);
}
@@ -3153,14 +3103,18 @@ static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
- GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
- GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
+ GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
}
@@ -3170,7 +3124,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
u32 val;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
- hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
val = I915_READ(GEN11_DE_HPD_IMR);
val &= ~hotplug_irqs;
@@ -3181,10 +3135,10 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
- icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
+ icp_hpd_irq_setup(dev_priv,
TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
+ icp_hpd_irq_setup(dev_priv,
ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK);
}
@@ -3220,8 +3174,8 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
- hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3248,22 +3202,13 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- if (INTEL_GEN(dev_priv) >= 8) {
- hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
+ if (INTEL_GEN(dev_priv) >= 8)
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
- } else if (INTEL_GEN(dev_priv) >= 7) {
- hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
-
- ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
- } else {
- hotplug_irqs = DE_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
-
+ else
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
- }
ilk_hpd_detection_setup(dev_priv);
@@ -3312,7 +3257,7 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
u32 hotplug_irqs, enabled_irqs;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
- hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3533,17 +3478,18 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
- if (HAS_PCH_TGP(dev_priv))
- icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
- TGP_TC_HPD_ENABLE_MASK);
- else if (HAS_PCH_JSP(dev_priv))
- icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
- else if (HAS_PCH_MCC(dev_priv))
- icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
- ICP_TC_HPD_ENABLE(PORT_TC1));
- else
- icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
- ICP_TC_HPD_ENABLE_MASK);
+ if (HAS_PCH_TGP(dev_priv)) {
+ icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
+ icp_tc_hpd_detection_setup(dev_priv, TGP_TC_HPD_ENABLE_MASK);
+ } else if (HAS_PCH_JSP(dev_priv)) {
+ icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
+ } else if (HAS_PCH_MCC(dev_priv)) {
+ icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
+ icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE(PORT_TC1));
+ } else {
+ icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
+ icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE_MASK);
+ }
}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8d8db9ff0a48..7f139ea4a90b 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -102,6 +102,11 @@ i915_param_named(psr_safest_params, bool, 0400,
"is helpful to detect if PSR issues are related to bad values set in "
" VBT. (0=use VBT parameters, 1=use safest parameters)");
+i915_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
+ "Enable PSR2 selective fetch "
+ "(0=disabled, 1=enabled) "
+ "Default: 0");
+
i915_param_named_unsafe(force_probe, charp, 0400,
"Force probe the driver for specified devices. "
"See CONFIG_DRM_I915_FORCE_PROBE for details.");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 53fb5ba8fbed..330c03e2b4f7 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -54,6 +54,7 @@ struct drm_printer;
param(int, enable_fbc, -1, 0600) \
param(int, enable_psr, -1, 0600) \
param(bool, psr_safest_params, false, 0600) \
+ param(bool, enable_psr2_sel_fetch, false, 0600) \
param(int, disable_power_well, -1, 0400) \
param(int, enable_ips, 1, 0600) \
param(int, invert_brightness, 0, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 2338f92ce490..fb5e30de78c2 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -389,6 +389,7 @@ static const struct intel_device_info ilk_m_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
.is_mobile = 1,
+ .has_rps = true,
.display.has_fbc = 1,
};
@@ -890,6 +891,7 @@ static const struct intel_device_info rkl_info = {
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C),
.require_force_probe = 1,
+ .display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index c6f6370283cf..e94976976571 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1195,24 +1195,39 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
- int err;
+ struct i915_gem_ww_ctx ww;
+ int err = -ENODEV;
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
if (ce->engine != stream->engine) /* first match! */
continue;
- /*
- * As the ID is the gtt offset of the context's vma we
- * pin the vma to ensure the ID remains fixed.
- */
- err = intel_context_pin(ce);
- if (err == 0) {
- stream->pinned_ctx = ce;
- break;
- }
+ err = 0;
+ break;
}
i915_gem_context_unlock_engines(ctx);
+ if (err)
+ return ERR_PTR(err);
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ /*
+ * As the ID is the gtt offset of the context's vma we
+ * pin the vma to ensure the ID remains fixed.
+ */
+ err = intel_context_pin_ww(ce, &ww);
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ if (err)
+ return ERR_PTR(err);
+
+ stream->pinned_ctx = ce;
return stream->pinned_ctx;
}
@@ -1923,15 +1938,22 @@ emit_oa_config(struct i915_perf_stream *stream,
{
struct i915_request *rq;
struct i915_vma *vma;
+ struct i915_gem_ww_ctx ww;
int err;
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
return PTR_ERR(vma);
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(vma->obj, &ww);
+ if (err)
+ goto err;
+
+ err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
- goto err_vma_put;
+ goto err;
intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
@@ -1953,11 +1975,9 @@ emit_oa_config(struct i915_perf_stream *stream,
goto err_add_request;
}
- i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, 0);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
if (err)
goto err_add_request;
@@ -1971,7 +1991,14 @@ err_add_request:
i915_request_add(rq);
err_vma_unpin:
i915_vma_unpin(vma);
-err_vma_put:
+err:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+
+ i915_gem_ww_ctx_fini(&ww);
i915_vma_put(vma);
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4e796ff4d7d0..d805d4da6181 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1382,7 +1382,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define DPIO_CMNRST (1 << 0)
#define DPIO_PHY(pipe) ((pipe) >> 1)
-#define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy])
/*
* Per pipe/PLL DPIO regs
@@ -1898,6 +1897,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define PWR_DOWN_LN_3_1_0 (0xb << 4)
#define PWR_DOWN_LN_MASK (0xf << 4)
#define PWR_DOWN_LN_SHIFT 4
+#define EDP4K2K_MODE_OVRD_EN (1 << 3)
+#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2)
#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy))
#define ICL_LANE_ENABLE_AUX (1 << 0)
@@ -2919,6 +2920,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
+#define HDPORT_STATE _MMIO(0x45050)
+#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12)
+#define HDPORT_PHY_USED_DP(phy) REG_BIT(2 * (phy) + 2)
+#define HDPORT_PHY_USED_HDMI(phy) REG_BIT(2 * (phy) + 1)
+#define HDPORT_ENABLED REG_BIT(0)
+
/* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default
*/
@@ -7752,32 +7759,20 @@ enum {
#define GEN11_DE_HPD_IMR _MMIO(0x44474)
#define GEN11_DE_HPD_IIR _MMIO(0x44478)
#define GEN11_DE_HPD_IER _MMIO(0x4447c)
-#define GEN12_TC6_HOTPLUG (1 << 21)
-#define GEN12_TC5_HOTPLUG (1 << 20)
-#define GEN11_TC4_HOTPLUG (1 << 19)
-#define GEN11_TC3_HOTPLUG (1 << 18)
-#define GEN11_TC2_HOTPLUG (1 << 17)
-#define GEN11_TC1_HOTPLUG (1 << 16)
#define GEN11_TC_HOTPLUG(tc_port) (1 << ((tc_port) + 16))
-#define GEN11_DE_TC_HOTPLUG_MASK (GEN12_TC6_HOTPLUG | \
- GEN12_TC5_HOTPLUG | \
- GEN11_TC4_HOTPLUG | \
- GEN11_TC3_HOTPLUG | \
- GEN11_TC2_HOTPLUG | \
- GEN11_TC1_HOTPLUG)
-#define GEN12_TBT6_HOTPLUG (1 << 5)
-#define GEN12_TBT5_HOTPLUG (1 << 4)
-#define GEN11_TBT4_HOTPLUG (1 << 3)
-#define GEN11_TBT3_HOTPLUG (1 << 2)
-#define GEN11_TBT2_HOTPLUG (1 << 1)
-#define GEN11_TBT1_HOTPLUG (1 << 0)
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC_HOTPLUG(PORT_TC6) | \
+ GEN11_TC_HOTPLUG(PORT_TC5) | \
+ GEN11_TC_HOTPLUG(PORT_TC4) | \
+ GEN11_TC_HOTPLUG(PORT_TC3) | \
+ GEN11_TC_HOTPLUG(PORT_TC2) | \
+ GEN11_TC_HOTPLUG(PORT_TC1))
#define GEN11_TBT_HOTPLUG(tc_port) (1 << (tc_port))
-#define GEN11_DE_TBT_HOTPLUG_MASK (GEN12_TBT6_HOTPLUG | \
- GEN12_TBT5_HOTPLUG | \
- GEN11_TBT4_HOTPLUG | \
- GEN11_TBT3_HOTPLUG | \
- GEN11_TBT2_HOTPLUG | \
- GEN11_TBT1_HOTPLUG)
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT_HOTPLUG(PORT_TC6) | \
+ GEN11_TBT_HOTPLUG(PORT_TC5) | \
+ GEN11_TBT_HOTPLUG(PORT_TC4) | \
+ GEN11_TBT_HOTPLUG(PORT_TC3) | \
+ GEN11_TBT_HOTPLUG(PORT_TC2) | \
+ GEN11_TBT_HOTPLUG(PORT_TC1))
#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
@@ -7870,6 +7865,7 @@ enum {
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
#define CHICKEN_PAR1_1 _MMIO(0x42080)
+#define DIS_RAM_BYPASS_PSR2_MAN_TRACK (1 << 16)
#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15)
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
@@ -8711,6 +8707,7 @@ enum {
#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
+#define PCH_DPMGUNIT_CLOCK_GATE_DISABLE (1 << 15)
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
@@ -9217,8 +9214,8 @@ enum {
#define DISPLAY_IPS_CONTROL 0x19
#define TGL_PCODE_TCCOLD 0x26
#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0)
-#define TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ 0
-#define TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ REG_BIT(0)
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ 0
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ REG_BIT(0)
/* See also IPS_CTL */
#define IPS_PCODE_CONTROL (1 << 30)
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
@@ -9305,6 +9302,7 @@ enum {
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
#define GEN10_SAMPLER_MODE _MMIO(0xE18C)
+#define ENABLE_SMALLPL REG_BIT(15)
#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
/* IVYBRIDGE DPF */
@@ -10277,12 +10275,18 @@ enum skl_power_gate {
#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) REG_BIT((phy) + 10)
#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < PORT_TC4 ? \
(tc_port) + 12 : \
(tc_port) - PORT_TC4 + 21))
#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) _PICK(phy, 0, 2, 4, 27)
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) \
+ (3 << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) \
+ ((pll) << RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
/* CNL PLL */
#define DPLL0_ENABLE 0x46010
@@ -10503,19 +10507,21 @@ enum skl_power_gate {
#define _TGL_DPLL0_CFGCR0 0x164284
#define _TGL_DPLL1_CFGCR0 0x16428C
-/* TODO: add DPLL4 */
#define _TGL_TBTPLL_CFGCR0 0x16429C
#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
_TGL_DPLL1_CFGCR0, \
_TGL_TBTPLL_CFGCR0)
+#define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \
+ _TGL_DPLL1_CFGCR0)
#define _TGL_DPLL0_CFGCR1 0x164288
#define _TGL_DPLL1_CFGCR1 0x164290
-/* TODO: add DPLL4 */
#define _TGL_TBTPLL_CFGCR1 0x1642A0
#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
_TGL_DPLL1_CFGCR1, \
_TGL_TBTPLL_CFGCR1)
+#define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
+ _TGL_DPLL1_CFGCR1)
#define _DKL_PHY1_BASE 0x168000
#define _DKL_PHY2_BASE 0x169000
@@ -12336,4 +12342,10 @@ enum skl_power_gate {
#define DSB_ENABLE (1 << 31)
#define DSB_STATUS (1 << 0)
+#define TGL_ROOT_DEVICE_ID 0x9A00
+#define TGL_ROOT_DEVICE_MASK 0xFF00
+#define TGL_ROOT_DEVICE_SKU_MASK 0xF
+#define TGL_ROOT_DEVICE_SKU_ULX 0x2
+#define TGL_ROOT_DEVICE_SKU_ULT 0x4
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 781a6783affe..0e813819b041 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -31,6 +31,7 @@
#include <linux/sched/signal.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
#include "gt/intel_ring.h"
#include "gt/intel_rps.h"
@@ -186,48 +187,34 @@ static void irq_execute_cb_hook(struct irq_work *wrk)
irq_execute_cb(wrk);
}
-static void __notify_execute_cb(struct i915_request *rq)
+static __always_inline void
+__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
{
struct execute_cb *cb, *cn;
- lockdep_assert_held(&rq->lock);
-
- GEM_BUG_ON(!i915_request_is_active(rq));
if (llist_empty(&rq->execute_cb))
return;
- llist_for_each_entry_safe(cb, cn, rq->execute_cb.first, work.llnode)
- irq_work_queue(&cb->work);
-
- /*
- * XXX Rollback on __i915_request_unsubmit()
- *
- * In the future, perhaps when we have an active time-slicing scheduler,
- * it will be interesting to unsubmit parallel execution and remove
- * busywaits from the GPU until their master is restarted. This is
- * quite hairy, we have to carefully rollback the fence and do a
- * preempt-to-idle cycle on the target engine, all the while the
- * master execute_cb may refire.
- */
- init_llist_head(&rq->execute_cb);
+ llist_for_each_entry_safe(cb, cn,
+ llist_del_all(&rq->execute_cb),
+ work.llnode)
+ fn(&cb->work);
}
-static inline void
-remove_from_client(struct i915_request *request)
+static void __notify_execute_cb_irq(struct i915_request *rq)
{
- struct drm_i915_file_private *file_priv;
+ __notify_execute_cb(rq, irq_work_queue);
+}
- if (!READ_ONCE(request->file_priv))
- return;
+static bool irq_work_imm(struct irq_work *wrk)
+{
+ wrk->func(wrk);
+ return false;
+}
- rcu_read_lock();
- file_priv = xchg(&request->file_priv, NULL);
- if (file_priv) {
- spin_lock(&file_priv->mm.lock);
- list_del(&request->client_link);
- spin_unlock(&file_priv->mm.lock);
- }
- rcu_read_unlock();
+static void __notify_execute_cb_imm(struct i915_request *rq)
+{
+ __notify_execute_cb(rq, irq_work_imm);
}
static void free_capture_list(struct i915_request *request)
@@ -274,9 +261,16 @@ static void remove_from_engine(struct i915_request *rq)
locked = engine;
}
list_del_init(&rq->sched.link);
+
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+
+ /* Prevent further __await_execution() registering a cb, then flush */
+ set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+
spin_unlock_irq(&locked->active.lock);
+
+ __notify_execute_cb_imm(rq);
}
bool i915_request_retire(struct i915_request *rq)
@@ -288,6 +282,7 @@ bool i915_request_retire(struct i915_request *rq)
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
trace_i915_request_retire(rq);
+ i915_request_mark_complete(rq);
/*
* We know the GPU must have read the request to have
@@ -305,32 +300,30 @@ bool i915_request_retire(struct i915_request *rq)
__i915_request_fill(rq, POISON_FREE);
rq->ring->head = rq->postfix;
+ if (!i915_request_signaled(rq)) {
+ spin_lock_irq(&rq->lock);
+ dma_fence_signal_locked(&rq->fence);
+ spin_unlock_irq(&rq->lock);
+ }
+
+ if (i915_request_has_waitboost(rq)) {
+ GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
+ atomic_dec(&rq->engine->gt->rps.num_waiters);
+ }
+
/*
* We only loosely track inflight requests across preemption,
* and so we may find ourselves attempting to retire a _completed_
* request that we have removed from the HW and put back on a run
* queue.
+ *
+ * As we set I915_FENCE_FLAG_ACTIVE on the request, this should be
+ * after removing the breadcrumb and signaling it, so that we do not
+ * inadvertently attach the breadcrumb to a completed request.
*/
remove_from_engine(rq);
-
- spin_lock_irq(&rq->lock);
- i915_request_mark_complete(rq);
- if (!i915_request_signaled(rq))
- dma_fence_signal_locked(&rq->fence);
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
- i915_request_cancel_breadcrumb(rq);
- if (i915_request_has_waitboost(rq)) {
- GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
- atomic_dec(&rq->engine->gt->rps.num_waiters);
- }
- if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
- set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
- __notify_execute_cb(rq);
- }
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
- spin_unlock_irq(&rq->lock);
- remove_from_client(rq);
__list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
intel_context_exit(rq->context);
@@ -357,12 +350,6 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (i915_request_retire(tmp) && tmp != rq);
}
-static void __llist_add(struct llist_node *node, struct llist_head *head)
-{
- node->next = head->first;
- head->first = node;
-}
-
static struct i915_request * const *
__engine_active(struct intel_engine_cs *engine)
{
@@ -460,18 +447,24 @@ __await_execution(struct i915_request *rq,
cb->work.func = irq_execute_cb_hook;
}
- spin_lock_irq(&signal->lock);
- if (i915_request_is_active(signal) || __request_in_flight(signal)) {
- if (hook) {
- hook(rq, &signal->fence);
- i915_request_put(signal);
- }
- i915_sw_fence_complete(cb->fence);
- kmem_cache_free(global.slab_execute_cbs, cb);
- } else {
- __llist_add(&cb->work.llnode, &signal->execute_cb);
+ /*
+ * Register the callback first, then see if the signaler is already
+ * active. This ensures that if we race with the
+ * __notify_execute_cb from i915_request_submit() and we are not
+ * included in that list, we get a second bite of the cherry and
+ * execute it ourselves. After this point, a future
+ * i915_request_submit() will notify us.
+ *
+ * In i915_request_retire() we set the ACTIVE bit on a completed
+ * request (then flush the execute_cb). So by registering the
+ * callback first, then checking the ACTIVE bit, we serialise with
+ * the completed/retired request.
+ */
+ if (llist_add(&cb->work.llnode, &signal->execute_cb)) {
+ if (i915_request_is_active(signal) ||
+ __request_in_flight(signal))
+ __notify_execute_cb_imm(signal);
}
- spin_unlock_irq(&signal->lock);
return 0;
}
@@ -549,8 +542,13 @@ bool __i915_request_submit(struct i915_request *request)
if (i915_request_completed(request))
goto xfer;
+ if (unlikely(intel_context_is_closed(request->context) &&
+ !intel_engine_has_heartbeat(engine)))
+ intel_context_set_banned(request->context);
+
if (unlikely(intel_context_is_banned(request->context)))
i915_request_set_error_once(request, -EIO);
+
if (unlikely(fatal_error(request->fence.error)))
__i915_request_skip(request);
@@ -587,19 +585,21 @@ xfer:
clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
}
- /* We may be recursing from the signal callback of another i915 fence */
- if (!i915_request_signaled(request)) {
- spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
-
- __notify_execute_cb(request);
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &request->fence.flags) &&
- !i915_request_enable_breadcrumb(request))
- intel_engine_signal_breadcrumbs(engine);
+ /*
+ * XXX Rollback bonded-execution on __i915_request_unsubmit()?
+ *
+ * In the future, perhaps when we have an active time-slicing scheduler,
+ * it will be interesting to unsubmit parallel execution and remove
+ * busywaits from the GPU until their master is restarted. This is
+ * quite hairy, we have to carefully rollback the fence and do a
+ * preempt-to-idle cycle on the target engine, all the while the
+ * master execute_cb may refire.
+ */
+ __notify_execute_cb_irq(request);
- spin_unlock(&request->lock);
- GEM_BUG_ON(!llist_empty(&request->execute_cb));
- }
+ /* We may be recursing from the signal callback of another i915 fence */
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+ i915_request_enable_breadcrumb(request);
return result;
}
@@ -621,27 +621,27 @@ void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
+ /*
+ * Only unwind in reverse order, required so that the per-context list
+ * is kept in seqno/ring order.
+ */
RQ_TRACE(request, "\n");
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->active.lock);
/*
- * Only unwind in reverse order, required so that the per-context list
- * is kept in seqno/ring order.
+ * Before we remove this breadcrumb from the signal list, we have
+ * to ensure that a concurrent dma_fence_enable_signaling() does not
+ * attach itself. We first mark the request as no longer active and
+ * make sure that is visible to other cores, and then remove the
+ * breadcrumb if attached.
*/
-
- /* We may be recursing from the signal callback of another i915 fence */
- spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
-
+ GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
+ clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
i915_request_cancel_breadcrumb(request);
- GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
- clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
-
- spin_unlock(&request->lock);
-
/* We've already spun, don't charge on resubmitting. */
if (request->sched.semaphores && i915_request_started(request))
request->sched.semaphores = 0;
@@ -778,7 +778,6 @@ static void __i915_request_ctor(void *arg)
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
- rq->file_priv = NULL;
rq->capture_list = NULL;
init_llist_head(&rq->execute_cb);
@@ -868,7 +867,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
/* No zalloc, everything must be cleared after use */
rq->batch = NULL;
- GEM_BUG_ON(rq->file_priv);
GEM_BUG_ON(rq->capture_list);
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
@@ -1661,7 +1659,7 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct i915_request * const rq, int state)
+static bool __i915_spin_request(struct i915_request * const rq, int state)
{
unsigned long timeout_ns;
unsigned int cpu;
@@ -1694,7 +1692,7 @@ static bool __i915_spin_request(const struct i915_request * const rq, int state)
timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
timeout_ns += local_clock_ns(&cpu);
do {
- if (i915_request_completed(rq))
+ if (dma_fence_is_signaled(&rq->fence))
return true;
if (signal_pending_state(state, current))
@@ -1718,7 +1716,7 @@ static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct request_wait *wait = container_of(cb, typeof(*wait), cb);
- wake_up_process(wait->tsk);
+ wake_up_process(fetch_and_zero(&wait->tsk));
}
/**
@@ -1787,10 +1785,8 @@ long i915_request_wait(struct i915_request *rq,
* duration, which we currently lack.
*/
if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
- __i915_spin_request(rq, state)) {
- dma_fence_signal(&rq->fence);
+ __i915_spin_request(rq, state))
goto out;
- }
/*
* This client is about to stall waiting for the GPU. In many cases
@@ -1804,25 +1800,36 @@ long i915_request_wait(struct i915_request *rq,
* but at a cost of spending more power processing the workload
* (bad for battery).
*/
- if (flags & I915_WAIT_PRIORITY) {
- if (!i915_request_started(rq) &&
- INTEL_GEN(rq->engine->i915) >= 6)
- intel_rps_boost(rq);
- }
+ if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
+ intel_rps_boost(rq);
wait.tsk = current;
if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
goto out;
+ /*
+ * Flush the submission tasklet, but only if it may help this request.
+ *
+ * We sometimes experience some latency between the HW interrupts and
+ * tasklet execution (mostly due to ksoftirqd latency, but it can also
+ * be due to lazy CS events), so lets run the tasklet manually if there
+ * is a chance it may submit this request. If the request is not ready
+ * to run, as it is waiting for other fences to be signaled, flushing
+ * the tasklet is busy work without any advantage for this client.
+ *
+ * If the HW is being lazy, this is the last chance before we go to
+ * sleep to catch any pending events. We will check periodically in
+ * the heartbeat to flush the submission tasklets as a last resort
+ * for unhappy HW.
+ */
+ if (i915_request_is_ready(rq))
+ intel_engine_flush_submission(rq->engine);
+
for (;;) {
set_current_state(state);
- if (i915_request_completed(rq)) {
- dma_fence_signal(&rq->fence);
+ if (dma_fence_is_signaled(&rq->fence))
break;
- }
-
- intel_engine_flush_submission(rq->engine);
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
@@ -1838,7 +1845,9 @@ long i915_request_wait(struct i915_request *rq,
}
__set_current_state(TASK_RUNNING);
- dma_fence_remove_callback(&rq->fence, &wait.cb);
+ if (READ_ONCE(wait.tsk))
+ dma_fence_remove_callback(&rq->fence, &wait.cb);
+ GEM_BUG_ON(!list_empty(&wait.cb.node));
out:
mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 590762820761..16b721080195 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -284,10 +284,6 @@ struct i915_request {
/** timeline->request entry for this request */
struct list_head link;
- struct drm_i915_file_private *file_priv;
- /** file_priv list entry for this request */
- struct list_head client_link;
-
I915_SELFTEST_DECLARE(struct {
struct list_head link;
unsigned long delay;
@@ -365,10 +361,6 @@ void i915_request_submit(struct i915_request *request);
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
-/* Note: part of the intel_breadcrumbs family */
-bool i915_request_enable_breadcrumb(struct i915_request *request);
-void i915_request_cancel_breadcrumb(struct i915_request *request);
-
long i915_request_wait(struct i915_request *rq,
unsigned int flags,
long timeout)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ed2be3489f8e..7b64e7137270 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,17 +34,25 @@
static void i915_save_display(struct drm_i915_private *dev_priv)
{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+
/* Display arbitration control */
if (INTEL_GEN(dev_priv) <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
- /* save FBC interval */
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
- dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+ if (IS_GEN(dev_priv, 4))
+ pci_read_config_word(pdev, GCDGMBUS,
+ &dev_priv->regfile.saveGCDGMBUS);
}
static void i915_restore_display(struct drm_i915_private *dev_priv)
{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+
+ if (IS_GEN(dev_priv, 4))
+ pci_write_config_word(pdev, GCDGMBUS,
+ dev_priv->regfile.saveGCDGMBUS);
+
/* Display arbitration */
if (INTEL_GEN(dev_priv) <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
@@ -52,31 +60,17 @@ static void i915_restore_display(struct drm_i915_private *dev_priv)
/* only restore FBC info on the platform that supports FBC*/
intel_fbc_global_disable(dev_priv);
- /* restore FBC interval */
- if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
- I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
-
intel_vga_redisable(dev_priv);
+
+ intel_gmbus_reset(dev_priv);
}
int i915_save_state(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
i915_save_display(dev_priv);
- if (IS_GEN(dev_priv, 4))
- pci_read_config_word(pdev, GCDGMBUS,
- &dev_priv->regfile.saveGCDGMBUS);
-
- /* Cache mode state */
- if (INTEL_GEN(dev_priv) < 7)
- dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
-
- /* Memory Arbitration state */
- dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
-
/* Scratch space */
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
@@ -102,22 +96,10 @@ int i915_save_state(struct drm_i915_private *dev_priv)
int i915_restore_state(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- if (IS_GEN(dev_priv, 4))
- pci_write_config_word(pdev, GCDGMBUS,
- dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev_priv);
- /* Cache mode state */
- if (INTEL_GEN(dev_priv) < 7)
- I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
- 0xffff0000);
-
- /* Memory arbitration state */
- I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
-
/* Scratch space */
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
@@ -138,7 +120,5 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
}
- intel_gmbus_reset(dev_priv);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 4cd2038cbe35..038d4c6884c5 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -34,7 +34,7 @@ static void *i915_sw_fence_debug_hint(void *addr)
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
-static struct debug_obj_descr i915_sw_fence_debug_descr = {
+static const struct debug_obj_descr i915_sw_fence_debug_descr = {
.name = "i915_sw_fence",
.debug_hint = i915_sw_fence_debug_hint,
};
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index bc64f773dcdb..caa9b041616b 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -291,6 +291,8 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma_work {
struct dma_fence_work base;
+ struct i915_address_space *vm;
+ struct i915_vm_pt_stash stash;
struct i915_vma *vma;
struct drm_i915_gem_object *pinned;
struct i915_sw_dma_fence_cb cb;
@@ -302,21 +304,23 @@ static int __vma_bind(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
struct i915_vma *vma = vw->vma;
- int err;
-
- err = vma->ops->bind_vma(vma->vm, vma, vw->cache_level, vw->flags);
- if (err)
- atomic_or(I915_VMA_ERROR, &vma->flags);
- return err;
+ vma->ops->bind_vma(vw->vm, &vw->stash,
+ vma, vw->cache_level, vw->flags);
+ return 0;
}
static void __vma_release(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
- if (vw->pinned)
+ if (vw->pinned) {
__i915_gem_object_unpin_pages(vw->pinned);
+ i915_gem_object_put(vw->pinned);
+ }
+
+ i915_vm_free_pt_stash(vw->vm, &vw->stash);
+ i915_vm_put(vw->vm);
}
static const struct dma_fence_work_ops bind_ops = {
@@ -376,7 +380,6 @@ int i915_vma_bind(struct i915_vma *vma,
{
u32 bind_flags;
u32 vma_flags;
- int ret;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->size > vma->node.size);
@@ -430,12 +433,10 @@ int i915_vma_bind(struct i915_vma *vma,
if (vma->obj) {
__i915_gem_object_pin_pages(vma->obj);
- work->pinned = vma->obj;
+ work->pinned = i915_gem_object_get(vma->obj);
}
} else {
- ret = vma->ops->bind_vma(vma->vm, vma, cache_level, bind_flags);
- if (ret)
- return ret;
+ vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
}
atomic_or(bind_flags, &vma->flags);
@@ -853,13 +854,19 @@ static void vma_unbind_pages(struct i915_vma *vma)
__vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
}
-int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u64 size, u64 alignment, u64 flags)
{
struct i915_vma_work *work = NULL;
intel_wakeref_t wakeref = 0;
unsigned int bound;
int err;
+#ifdef CONFIG_PROVE_LOCKING
+ if (debug_locks && lockdep_is_held(&vma->vm->i915->drm.struct_mutex))
+ WARN_ON(!ww);
+#endif
+
BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
@@ -873,16 +880,32 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (err)
return err;
+ if (flags & PIN_GLOBAL)
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+
if (flags & vma->vm->bind_async_flags) {
work = i915_vma_work();
if (!work) {
err = -ENOMEM;
- goto err_pages;
+ goto err_rpm;
}
- }
- if (flags & PIN_GLOBAL)
- wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+ work->vm = i915_vm_get(vma->vm);
+
+ /* Allocate enough page directories to used PTE */
+ if (vma->vm->allocate_va_range) {
+ err = i915_vm_alloc_pt_stash(vma->vm,
+ &work->stash,
+ vma->size);
+ if (err)
+ goto err_fence;
+
+ err = i915_vm_pin_pt_stash(vma->vm,
+ &work->stash);
+ if (err)
+ goto err_fence;
+ }
+ }
/*
* Differentiate between user/kernel vma inside the aliasing-ppgtt.
@@ -971,9 +994,9 @@ err_unlock:
err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
+err_rpm:
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
-err_pages:
vma_put_pages(vma);
return err;
}
@@ -989,7 +1012,8 @@ static void flush_idle_contexts(struct intel_gt *gt)
intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
}
-int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
+int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u32 align, unsigned int flags)
{
struct i915_address_space *vm = vma->vm;
int err;
@@ -997,7 +1021,7 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
do {
- err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
+ err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
if (err != -ENOSPC) {
if (!err) {
err = i915_vma_wait_for_bind(vma);
@@ -1167,6 +1191,12 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link);
}
+static int
+__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
+{
+ return __i915_request_await_exclusive(rq, &vma->active);
+}
+
int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
{
int err;
@@ -1174,8 +1204,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */
- err = i915_request_await_active(rq, &vma->active,
- I915_ACTIVE_AWAIT_EXCL);
+ err = __i915_request_await_bind(rq, vma);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index d0d01f909548..5b3a3c653454 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -237,8 +237,17 @@ static inline void i915_vma_unlock(struct i915_vma *vma)
}
int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
-int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags);
+i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u64 size, u64 alignment, u64 flags);
+
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ return i915_vma_pin_ww(vma, NULL, size, alignment, flags);
+}
+
+int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
+ u32 align, unsigned int flags);
static inline int i915_vma_pin_count(const struct i915_vma *vma)
{
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 40c590db3c76..adc836f15fde 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -346,6 +346,25 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
mask = BIT(INTEL_SUBPLATFORM_PORTF);
}
+ if (IS_TIGERLAKE(i915)) {
+ struct pci_dev *root, *pdev = i915->drm.pdev;
+
+ root = list_first_entry(&pdev->bus->devices, typeof(*root), bus_list);
+
+ drm_WARN_ON(&i915->drm, mask);
+ drm_WARN_ON(&i915->drm, (root->device & TGL_ROOT_DEVICE_MASK) !=
+ TGL_ROOT_DEVICE_ID);
+
+ switch (root->device & TGL_ROOT_DEVICE_SKU_MASK) {
+ case TGL_ROOT_DEVICE_SKU_ULX:
+ mask = BIT(INTEL_SUBPLATFORM_ULX);
+ break;
+ case TGL_ROOT_DEVICE_SKU_ULT:
+ mask = BIT(INTEL_SUBPLATFORM_ULT);
+ break;
+ }
+ }
+
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
@@ -497,6 +516,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
S32_MAX),
USEC_PER_SEC));
}
+
+ if (!HAS_DISPLAY(dev_priv)) {
+ dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
+ DRIVER_ATOMIC);
+ memset(&info->display, 0, sizeof(info->display));
+ memset(runtime->num_sprites, 0, sizeof(runtime->num_sprites));
+ memset(runtime->num_scalers, 0, sizeof(runtime->num_scalers));
+ }
}
void intel_driver_caps_print(const struct intel_driver_caps *caps,
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index fd2385457ab6..6a3d607218aa 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -146,6 +146,7 @@ enum intel_ppgtt_type {
func(has_gmch); \
func(has_hdcp); \
func(has_hotplug); \
+ func(has_hti); \
func(has_ipc); \
func(has_modular_fia); \
func(has_overlay); \
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 6b5e9d88646d..180e1078ef7c 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -87,7 +87,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
}
- if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ if (size > mem->mm.size)
return -E2BIG;
n_pages = size >> ilog2(mem->mm.chunk_size);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index cfabbe0481ab..34e0d22d456b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -100,12 +100,6 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
-
- if (IS_SKYLAKE(dev_priv)) {
- /* WaDisableDopClockGating */
- I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
- & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- }
}
static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7142,7 +7136,7 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
/* Wa_1409825376:tgl (pre-prod)*/
- if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+ if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
TGL_VRH_GATING_DIS);
@@ -7223,12 +7217,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
@@ -7251,6 +7245,10 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
{
gen9_init_clock_gating(dev_priv);
+ /* WaDisableDopClockGating:skl */
+ I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) &
+ ~GEN7_DOP_CLOCK_GATE_ENABLE);
+
/* WAC6entrylatency:skl */
I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
FBC_LLC_FULLY_OPEN);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 916ccd1c0e96..5b3279262123 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -231,9 +231,21 @@ void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
SB_CRWRDA_NP, reg, &val);
}
+static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy)
+{
+ /*
+ * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
+ * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
+ */
+ if (IS_CHERRYVIEW(i915))
+ return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO;
+ else
+ return IOSF_PORT_DPIO;
+}
+
u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
{
- int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
+ u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
u32 val = 0;
vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
@@ -252,7 +264,7 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
void vlv_dpio_write(struct drm_i915_private *i915,
enum pipe pipe, int reg, u32 val)
{
- int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
+ u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8d5a933e6af6..97ded2a59cf4 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1209,6 +1209,18 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
spin_unlock(&uncore->debug->lock);
}
+#define __vgpu_read(x) \
+static u##x \
+vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
+ u##x val = __raw_uncore_read##x(uncore, reg); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+ return val; \
+}
+__vgpu_read(8)
+__vgpu_read(16)
+__vgpu_read(32)
+__vgpu_read(64)
+
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
assert_rpm_wakelock_held(uncore->rpm);
@@ -1414,6 +1426,16 @@ __gen_reg_write_funcs(gen8);
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
+#define __vgpu_write(x) \
+static void \
+vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ __raw_uncore_write##x(uncore, reg, val); \
+}
+__vgpu_write(8)
+__vgpu_write(16)
+__vgpu_write(32)
+
#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
do { \
(uncore)->funcs.mmio_writeb = x##_write8; \
@@ -1735,7 +1757,10 @@ static void uncore_raw_init(struct intel_uncore *uncore)
{
GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
- if (IS_GEN(uncore->i915, 5)) {
+ if (intel_vgpu_active(uncore->i915)) {
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
+ } else if (IS_GEN(uncore->i915, 5)) {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
} else {
@@ -1993,13 +2018,14 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
unsigned int slow_timeout_ms,
u32 *out_value)
{
- u32 reg_value;
+ u32 reg_value = 0;
#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
int ret;
/* Catch any overuse of this function */
might_sleep_if(slow_timeout_ms);
GEM_BUG_ON(fast_timeout_us > 20000);
+ GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
ret = -ETIMEDOUT;
if (fast_timeout_us && fast_timeout_us <= 20000)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 88d400b9df88..23a6132c5f4e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -199,11 +199,52 @@ out:
return err;
}
+static int igt_gem_ww_ctx(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj, *obj2;
+ struct i915_gem_ww_ctx ww;
+ int err = 0;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto put1;
+ }
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ /* Lock the objects, twice for good measure (-EALREADY handling) */
+ err = i915_gem_object_lock(obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock_interruptible(obj, &ww);
+ if (!err)
+ err = i915_gem_object_lock_interruptible(obj2, &ww);
+ if (!err)
+ err = i915_gem_object_lock(obj2, &ww);
+
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ i915_gem_object_put(obj2);
+put1:
+ i915_gem_object_put(obj);
+ return err;
+}
+
int i915_gem_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_gem_suspend),
SUBTEST(igt_gem_hibernate),
+ SUBTEST(igt_gem_ww_ctx),
};
if (intel_gt_is_wedged(&i915->gt))
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 028baae9631f..f88473d396f4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -536,7 +536,7 @@ int i915_gem_evict_mock_selftests(void)
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, &i915->gt);
- drm_dev_put(&i915->drm);
+ mock_destroy_device(i915);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 0016ffc7d914..c53a222e3dec 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -172,35 +172,45 @@ static int igt_ppgtt_alloc(void *arg)
/* Check we can allocate the entire range */
for (size = 4096; size <= limit; size <<= 2) {
- err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
+ struct i915_vm_pt_stash stash = {};
+
+ err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
+ if (err)
+ goto err_ppgtt_cleanup;
+
+ err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
if (err) {
- if (err == -ENOMEM) {
- pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
- size, ilog2(size));
- err = 0; /* virtual space too large! */
- }
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
goto err_ppgtt_cleanup;
}
+ ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
cond_resched();
ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
+
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
}
/* Check we can incrementally allocate the entire range */
for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
- err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
- last, size - last);
+ struct i915_vm_pt_stash stash = {};
+
+ err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
+ if (err)
+ goto err_ppgtt_cleanup;
+
+ err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
if (err) {
- if (err == -ENOMEM) {
- pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
- last, size - last, ilog2(size));
- err = 0; /* virtual space too large! */
- }
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
goto err_ppgtt_cleanup;
}
+ ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
+ last, size - last);
cond_resched();
+
+ i915_vm_free_pt_stash(&ppgtt->vm, &stash);
}
err_ppgtt_cleanup:
@@ -284,9 +294,23 @@ static int lowlevel_hole(struct i915_address_space *vm,
break;
}
- if (vm->allocate_va_range &&
- vm->allocate_va_range(vm, addr, BIT_ULL(size)))
- break;
+ if (vm->allocate_va_range) {
+ struct i915_vm_pt_stash stash = {};
+
+ if (i915_vm_alloc_pt_stash(vm, &stash,
+ BIT_ULL(size)))
+ break;
+
+ if (i915_vm_pin_pt_stash(vm, &stash)) {
+ i915_vm_free_pt_stash(vm, &stash);
+ break;
+ }
+
+ vm->allocate_va_range(vm, &stash,
+ addr, BIT_ULL(size));
+
+ i915_vm_free_pt_stash(vm, &stash);
+ }
mock_vma->pages = obj->mm.pages;
mock_vma->node.size = BIT_ULL(size);
@@ -1703,7 +1727,7 @@ int i915_gem_gtt_mock_selftests(void)
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
- drm_dev_put(&i915->drm);
+ mock_destroy_device(i915);
return err;
}
@@ -1881,6 +1905,7 @@ static int igt_cs_tlb(void *arg)
continue;
while (!__igt_timeout(end_time, NULL)) {
+ struct i915_vm_pt_stash stash = {};
struct i915_request *rq;
u64 offset;
@@ -1888,10 +1913,6 @@ static int igt_cs_tlb(void *arg)
0, vm->total - PAGE_SIZE,
chunk_size, PAGE_SIZE);
- err = vm->allocate_va_range(vm, offset, chunk_size);
- if (err)
- goto end;
-
memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
vma = i915_vma_instance(bbe, vm, NULL);
@@ -1904,6 +1925,20 @@ static int igt_cs_tlb(void *arg)
if (err)
goto end;
+ err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
+ if (err)
+ goto end;
+
+ err = i915_vm_pin_pt_stash(vm, &stash);
+ if (err) {
+ i915_vm_free_pt_stash(vm, &stash);
+ goto end;
+ }
+
+ vm->allocate_va_range(vm, &stash, offset, chunk_size);
+
+ i915_vm_free_pt_stash(vm, &stash);
+
/* Prime the TLB with the dummy pages */
for (i = 0; i < count; i++) {
vma->node.start = offset + i * PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index c2d001d9c0ec..debbac660519 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -307,7 +307,7 @@ static int live_noa_gpr(void *arg)
}
/* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */
- scratch = kmap(ce->vm->scratch[0].base.page);
+ scratch = kmap(__px_page(ce->vm->scratch[0]));
memset(scratch, POISON_FREE, PAGE_SIZE);
rq = intel_context_create_request(ce);
@@ -405,7 +405,7 @@ static int live_noa_gpr(void *arg)
out_rq:
i915_request_put(rq);
out_ce:
- kunmap(ce->vm->scratch[0].base.page);
+ kunmap(__px_page(ce->vm->scratch[0]));
intel_context_put(ce);
out:
stream_destroy(stream);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 57dd6f5122ee..64bbb8288249 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -331,7 +331,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
if (!wait) {
i915_sw_fence_commit(submit);
heap_fence_put(submit);
- err = ENOMEM;
+ err = -ENOMEM;
break;
}
@@ -527,7 +527,7 @@ int i915_request_mock_selftests(void)
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915);
- drm_dev_put(&i915->drm);
+ mock_destroy_device(i915);
return err;
}
@@ -862,6 +862,8 @@ static int live_all_engines(void *arg)
goto out_free;
}
+ i915_vma_lock(batch);
+
idx = 0;
for_each_uabi_engine(engine, i915) {
request[idx] = intel_engine_create_kernel_request(engine);
@@ -872,11 +874,9 @@ static int live_all_engines(void *arg)
goto out_request;
}
- i915_vma_lock(batch);
err = i915_request_await_object(request[idx], batch->obj, 0);
if (err == 0)
err = i915_vma_move_to_active(batch, request[idx], 0);
- i915_vma_unlock(batch);
GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx],
@@ -891,6 +891,8 @@ static int live_all_engines(void *arg)
idx++;
}
+ i915_vma_unlock(batch);
+
idx = 0;
for_each_uabi_engine(engine, i915) {
if (i915_request_completed(request[idx])) {
@@ -981,12 +983,13 @@ static int live_sequential_engines(void *arg)
goto out_free;
}
+ i915_vma_lock(batch);
request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
__func__, engine->name, err);
- goto out_request;
+ goto out_unlock;
}
if (prev) {
@@ -996,16 +999,14 @@ static int live_sequential_engines(void *arg)
i915_request_add(request[idx]);
pr_err("%s: Request await failed for %s with err=%d\n",
__func__, engine->name, err);
- goto out_request;
+ goto out_unlock;
}
}
- i915_vma_lock(batch);
err = i915_request_await_object(request[idx],
batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, request[idx], 0);
- i915_vma_unlock(batch);
GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx],
@@ -1020,6 +1021,11 @@ static int live_sequential_engines(void *arg)
prev = request[idx];
idx++;
+
+out_unlock:
+ i915_vma_unlock(batch);
+ if (err)
+ goto out_request;
}
idx = 0;
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index af89c7fc8f59..1b6125e4c1ac 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -841,7 +841,7 @@ int i915_vma_mock_selftests(void)
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
- drm_dev_put(&i915->drm);
+ mock_destroy_device(i915);
return err;
}
@@ -892,7 +892,7 @@ static int igt_vma_remapped_gtt(void *arg)
unsigned int x, y;
int err;
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 6e80d99048e4..0aeba8e3af28 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -261,6 +261,82 @@ err_close_objects:
return err;
}
+static int igt_mock_splintered_region(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+ unsigned int expected_order;
+ LIST_HEAD(objects);
+ u64 size;
+ int err = 0;
+
+ /*
+ * Sanity check we can still allocate everything even if the
+ * mm.max_order != mm.size. i.e our starting address space size is not a
+ * power-of-two.
+ */
+
+ size = (SZ_4G - 1) & PAGE_MASK;
+ mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ if (mem->mm.size != size) {
+ pr_err("%s size mismatch(%llu != %llu)\n",
+ __func__, mem->mm.size, size);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ expected_order = get_order(rounddown_pow_of_two(size));
+ if (mem->mm.max_order != expected_order) {
+ pr_err("%s order mismatch(%u != %u)\n",
+ __func__, mem->mm.max_order, expected_order);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+ close_objects(mem, &objects);
+
+ /*
+ * While we should be able allocate everything without any flag
+ * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are
+ * actually limited to the largest power-of-two for the region size i.e
+ * max_order, due to the inner workings of the buddy allocator. So make
+ * sure that does indeed hold true.
+ */
+
+ obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
+ if (!IS_ERR(obj)) {
+ pr_err("%s too large contiguous allocation was not rejected\n",
+ __func__);
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj)) {
+ pr_err("%s largest possible contiguous allocation failed\n",
+ __func__);
+ err = PTR_ERR(obj);
+ goto out_close;
+ }
+
+out_close:
+ close_objects(mem, &objects);
+out_put:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_gpu_write_dw(struct intel_context *ce,
struct i915_vma *vma,
u32 dword,
@@ -509,7 +585,7 @@ static int igt_lmem_write_cpu(void *arg)
if (err)
goto out_unpin;
- i915_gem_object_lock(obj);
+ i915_gem_object_lock(obj, NULL);
err = i915_gem_object_set_to_wc_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
@@ -522,9 +598,9 @@ static int igt_lmem_write_cpu(void *arg)
goto out_unpin;
}
- /* We want to throw in a random width/align */
- bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
- sizeof(u32));
+ /* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */
+ bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
+ GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
i = 0;
do {
@@ -771,6 +847,7 @@ int intel_memory_region_mock_selftests(void)
static const struct i915_subtest tests[] = {
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
+ SUBTEST(igt_mock_splintered_region),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;
@@ -791,7 +868,7 @@ int intel_memory_region_mock_selftests(void)
intel_memory_region_put(mem);
out_unref:
- drm_dev_put(&i915->drm);
+ mock_destroy_device(i915);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 397c313a8b69..b6c42fd872ad 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -79,8 +79,6 @@ static void mock_device_release(struct drm_device *dev)
out:
i915_params_free(&i915->params);
- put_device(&i915->drm.pdev->dev);
- i915->drm.pdev = NULL;
}
static struct drm_driver mock_driver = {
@@ -123,17 +121,10 @@ struct drm_i915_private *mock_gem_device(void)
#endif
struct drm_i915_private *i915;
struct pci_dev *pdev;
- int err;
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
if (!pdev)
return NULL;
- i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
- if (!i915) {
- kfree(pdev);
- return NULL;
- }
-
device_initialize(&pdev->dev);
pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
pdev->dev.release = release_dev;
@@ -144,8 +135,23 @@ struct drm_i915_private *mock_gem_device(void)
/* HACK to disable iommu for the fake device; force identity mapping */
pdev->dev.iommu = &fake_iommu;
#endif
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ put_device(&pdev->dev);
+ return NULL;
+ }
+
+ i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver,
+ struct drm_i915_private, drm);
+ if (IS_ERR(i915)) {
+ pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915));
+ devres_release_group(&pdev->dev, NULL);
+ put_device(&pdev->dev);
+
+ return NULL;
+ }
pci_set_drvdata(pdev, i915);
+ i915->drm.pdev = pdev;
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
@@ -153,16 +159,6 @@ struct drm_i915_private *mock_gem_device(void)
if (pm_runtime_enabled(&pdev->dev))
WARN_ON(pm_runtime_get_sync(&pdev->dev));
- err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
- if (err) {
- pr_err("Failed to initialise mock GEM device: err=%d\n", err);
- put_device(&pdev->dev);
- kfree(i915);
-
- return NULL;
- }
- i915->drm.pdev = pdev;
- drmm_add_final_kfree(&i915->drm, i915);
i915_params_copy(&i915->params, &i915_modparams);
@@ -222,7 +218,15 @@ err_drv:
intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
- drm_dev_put(&i915->drm);
+ mock_destroy_device(i915);
return NULL;
}
+
+void mock_destroy_device(struct drm_i915_private *i915)
+{
+ struct device *dev = i915->drm.dev;
+
+ devres_release_group(dev, NULL);
+ put_device(dev);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.h b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
index b5dc4e394555..953cfe4fab34 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
@@ -7,4 +7,6 @@ struct drm_i915_private;
struct drm_i915_private *mock_gem_device(void);
void mock_device_flush(struct drm_i915_private *i915);
+void mock_destroy_device(struct drm_i915_private *i915);
+
#endif /* !__MOCK_GEM_DEVICE_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index b173086411ef..7270fc8ca801 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -38,14 +38,14 @@ static void mock_insert_entries(struct i915_address_space *vm,
{
}
-static int mock_bind_ppgtt(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static void mock_bind_ppgtt(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
set_bit(I915_VMA_LOCAL_BIND_BIT, __i915_vma_flags(vma));
- return 0;
}
static void mock_unbind_ppgtt(struct i915_address_space *vm,
@@ -74,9 +74,12 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+ ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
+
ppgtt->vm.clear_range = mock_clear_range;
ppgtt->vm.insert_page = mock_insert_page;
ppgtt->vm.insert_entries = mock_insert_entries;
@@ -90,13 +93,12 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
return ppgtt;
}
-static int mock_bind_ggtt(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static void mock_bind_ggtt(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
- return 0;
}
static void mock_unbind_ggtt(struct i915_address_space *vm,
@@ -116,6 +118,8 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->vm.total = 4096 * PAGE_SIZE;
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+
ggtt->vm.clear_range = mock_clear_range;
ggtt->vm.insert_page = mock_insert_page;
ggtt->vm.insert_entries = mock_insert_entries;
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 09660f5a0a4c..979d96f27c43 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -24,7 +24,7 @@ mock_object_create(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
- if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ if (size > mem->mm.size)
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc();
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 207bf7409dfb..6231048aa5aa 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -39,3 +39,5 @@ config DRM_IMX_HDMI
depends on DRM_IMX
help
Choose this if you want to use HDMI on i.MX6.
+
+source "drivers/gpu/drm/imx/dcss/Kconfig"
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index 21cdcc2faabc..b644deffe948 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
+obj-$(CONFIG_DRM_IMX_DCSS) += dcss/
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
new file mode 100644
index 000000000000..2b17a964ff05
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -0,0 +1,9 @@
+config DRM_IMX_DCSS
+ tristate "i.MX8MQ DCSS"
+ select IMX_IRQSTEER
+ select DRM_KMS_CMA_HELPER
+ select VIDEOMODE_HELPERS
+ depends on DRM && ARCH_MXC && ARM64
+ help
+ Choose this if you have a NXP i.MX8MQ based system and want to use the
+ Display Controller Subsystem. This option enables DCSS support.
diff --git a/drivers/gpu/drm/imx/dcss/Makefile b/drivers/gpu/drm/imx/dcss/Makefile
new file mode 100644
index 000000000000..8c7c8da42792
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/Makefile
@@ -0,0 +1,6 @@
+imx-dcss-objs := dcss-drv.o dcss-dev.o dcss-blkctl.o dcss-ctxld.o dcss-dtg.o \
+ dcss-ss.o dcss-dpr.o dcss-scaler.o dcss-kms.o dcss-crtc.o \
+ dcss-plane.o
+
+obj-$(CONFIG_DRM_IMX_DCSS) += imx-dcss.o
+
diff --git a/drivers/gpu/drm/imx/dcss/dcss-blkctl.c b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c
new file mode 100644
index 000000000000..c9b54bb2692d
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-blkctl.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_BLKCTL_RESET_CTRL 0x00
+#define B_CLK_RESETN BIT(0)
+#define APB_CLK_RESETN BIT(1)
+#define P_CLK_RESETN BIT(2)
+#define RTR_CLK_RESETN BIT(4)
+#define DCSS_BLKCTL_CONTROL0 0x10
+#define HDMI_MIPI_CLK_SEL BIT(0)
+#define DISPMIX_REFCLK_SEL_POS 4
+#define DISPMIX_REFCLK_SEL_MASK GENMASK(5, 4)
+#define DISPMIX_PIXCLK_SEL BIT(8)
+#define HDMI_SRC_SECURE_EN BIT(16)
+
+struct dcss_blkctl {
+ struct dcss_dev *dcss;
+ void __iomem *base_reg;
+};
+
+void dcss_blkctl_cfg(struct dcss_blkctl *blkctl)
+{
+ if (blkctl->dcss->hdmi_output)
+ dcss_writel(0, blkctl->base_reg + DCSS_BLKCTL_CONTROL0);
+ else
+ dcss_writel(DISPMIX_PIXCLK_SEL,
+ blkctl->base_reg + DCSS_BLKCTL_CONTROL0);
+
+ dcss_set(B_CLK_RESETN | APB_CLK_RESETN | P_CLK_RESETN | RTR_CLK_RESETN,
+ blkctl->base_reg + DCSS_BLKCTL_RESET_CTRL);
+}
+
+int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base)
+{
+ struct dcss_blkctl *blkctl;
+
+ blkctl = kzalloc(sizeof(*blkctl), GFP_KERNEL);
+ if (!blkctl)
+ return -ENOMEM;
+
+ blkctl->base_reg = ioremap(blkctl_base, SZ_4K);
+ if (!blkctl->base_reg) {
+ dev_err(dcss->dev, "unable to remap BLK CTRL base\n");
+ kfree(blkctl);
+ return -ENOMEM;
+ }
+
+ dcss->blkctl = blkctl;
+ blkctl->dcss = dcss;
+
+ dcss_blkctl_cfg(blkctl);
+
+ return 0;
+}
+
+void dcss_blkctl_exit(struct dcss_blkctl *blkctl)
+{
+ if (blkctl->base_reg)
+ iounmap(blkctl->base_reg);
+
+ kfree(blkctl);
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
new file mode 100644
index 000000000000..36abff0890b2
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_vblank.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "dcss-dev.h"
+#include "dcss-kms.h"
+
+static int dcss_enable_vblank(struct drm_crtc *crtc)
+{
+ struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
+ base);
+ struct dcss_dev *dcss = crtc->dev->dev_private;
+
+ dcss_dtg_vblank_irq_enable(dcss->dtg, true);
+
+ dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true);
+
+ enable_irq(dcss_crtc->irq);
+
+ return 0;
+}
+
+static void dcss_disable_vblank(struct drm_crtc *crtc)
+{
+ struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
+ base);
+ struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
+
+ disable_irq_nosync(dcss_crtc->irq);
+
+ dcss_dtg_vblank_irq_enable(dcss->dtg, false);
+
+ if (dcss_crtc->disable_ctxld_kick_irq)
+ dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, false);
+}
+
+static const struct drm_crtc_funcs dcss_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = dcss_enable_vblank,
+ .disable_vblank = dcss_disable_vblank,
+};
+
+static void dcss_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+
+static void dcss_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
+ base);
+ struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ if (dcss_dtg_is_enabled(dcss->dtg))
+ dcss_ctxld_enable(dcss->ctxld);
+}
+
+static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
+ base);
+ struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode;
+ struct videomode vm;
+
+ drm_display_mode_to_videomode(mode, &vm);
+
+ pm_runtime_get_sync(dcss->dev);
+
+ vm.pixelclock = mode->crtc_clock * 1000;
+
+ dcss_ss_subsam_set(dcss->ss);
+ dcss_dtg_css_set(dcss->dtg);
+
+ if (!drm_mode_equal(mode, old_mode) || !old_crtc_state->active) {
+ dcss_dtg_sync_set(dcss->dtg, &vm);
+ dcss_ss_sync_set(dcss->ss, &vm,
+ mode->flags & DRM_MODE_FLAG_PHSYNC,
+ mode->flags & DRM_MODE_FLAG_PVSYNC);
+ }
+
+ dcss_enable_dtg_and_ss(dcss);
+
+ dcss_ctxld_enable(dcss->ctxld);
+
+ /* Allow CTXLD kick interrupt to be disabled when VBLANK is disabled. */
+ dcss_crtc->disable_ctxld_kick_irq = true;
+}
+
+static void dcss_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
+ base);
+ struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode;
+
+ drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true);
+
+ reinit_completion(&dcss->disable_completion);
+
+ dcss_disable_dtg_and_ss(dcss);
+
+ dcss_ctxld_enable(dcss->ctxld);
+
+ if (!drm_mode_equal(mode, old_mode) || !crtc->state->active)
+ if (!wait_for_completion_timeout(&dcss->disable_completion,
+ msecs_to_jiffies(100)))
+ dev_err(dcss->dev, "Shutting off DTG timed out.\n");
+
+ /*
+ * Do not shut off CTXLD kick interrupt when shutting VBLANK off. It
+ * will be needed to commit the last changes, before going to suspend.
+ */
+ dcss_crtc->disable_ctxld_kick_irq = false;
+
+ drm_crtc_vblank_off(crtc);
+
+ pm_runtime_mark_last_busy(dcss->dev);
+ pm_runtime_put_autosuspend(dcss->dev);
+}
+
+static const struct drm_crtc_helper_funcs dcss_helper_funcs = {
+ .atomic_begin = dcss_crtc_atomic_begin,
+ .atomic_flush = dcss_crtc_atomic_flush,
+ .atomic_enable = dcss_crtc_atomic_enable,
+ .atomic_disable = dcss_crtc_atomic_disable,
+};
+
+static irqreturn_t dcss_crtc_irq_handler(int irq, void *dev_id)
+{
+ struct dcss_crtc *dcss_crtc = dev_id;
+ struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
+
+ if (!dcss_dtg_vblank_irq_valid(dcss->dtg))
+ return IRQ_NONE;
+
+ if (dcss_ctxld_is_flushed(dcss->ctxld))
+ drm_crtc_handle_vblank(&dcss_crtc->base);
+
+ dcss_dtg_vblank_irq_clear(dcss->dtg);
+
+ return IRQ_HANDLED;
+}
+
+int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm)
+{
+ struct dcss_dev *dcss = drm->dev_private;
+ struct platform_device *pdev = to_platform_device(dcss->dev);
+ int ret;
+
+ crtc->plane[0] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base),
+ DRM_PLANE_TYPE_PRIMARY, 0);
+ if (IS_ERR(crtc->plane[0]))
+ return PTR_ERR(crtc->plane[0]);
+
+ crtc->base.port = dcss->of_port;
+
+ drm_crtc_helper_add(&crtc->base, &dcss_helper_funcs);
+ ret = drm_crtc_init_with_planes(drm, &crtc->base, &crtc->plane[0]->base,
+ NULL, &dcss_crtc_funcs, NULL);
+ if (ret) {
+ dev_err(dcss->dev, "failed to init crtc\n");
+ return ret;
+ }
+
+ crtc->irq = platform_get_irq_byname(pdev, "vblank");
+ if (crtc->irq < 0)
+ return crtc->irq;
+
+ ret = request_irq(crtc->irq, dcss_crtc_irq_handler,
+ 0, "dcss_drm", crtc);
+ if (ret) {
+ dev_err(dcss->dev, "irq request failed with %d.\n", ret);
+ return ret;
+ }
+
+ disable_irq(crtc->irq);
+
+ return 0;
+}
+
+void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm)
+{
+ free_irq(crtc->irq, crtc);
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-ctxld.c b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c
new file mode 100644
index 000000000000..3a84cb3209c4
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-ctxld.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_CTXLD_CONTROL_STATUS 0x0
+#define CTXLD_ENABLE BIT(0)
+#define ARB_SEL BIT(1)
+#define RD_ERR_EN BIT(2)
+#define DB_COMP_EN BIT(3)
+#define SB_HP_COMP_EN BIT(4)
+#define SB_LP_COMP_EN BIT(5)
+#define DB_PEND_SB_REC_EN BIT(6)
+#define SB_PEND_DISP_ACTIVE_EN BIT(7)
+#define AHB_ERR_EN BIT(8)
+#define RD_ERR BIT(16)
+#define DB_COMP BIT(17)
+#define SB_HP_COMP BIT(18)
+#define SB_LP_COMP BIT(19)
+#define DB_PEND_SB_REC BIT(20)
+#define SB_PEND_DISP_ACTIVE BIT(21)
+#define AHB_ERR BIT(22)
+#define DCSS_CTXLD_DB_BASE_ADDR 0x10
+#define DCSS_CTXLD_DB_COUNT 0x14
+#define DCSS_CTXLD_SB_BASE_ADDR 0x18
+#define DCSS_CTXLD_SB_COUNT 0x1C
+#define SB_HP_COUNT_POS 0
+#define SB_HP_COUNT_MASK 0xffff
+#define SB_LP_COUNT_POS 16
+#define SB_LP_COUNT_MASK 0xffff0000
+#define DCSS_AHB_ERR_ADDR 0x20
+
+#define CTXLD_IRQ_COMPLETION (DB_COMP | SB_HP_COMP | SB_LP_COMP)
+#define CTXLD_IRQ_ERROR (RD_ERR | DB_PEND_SB_REC | AHB_ERR)
+
+/* The following sizes are in context loader entries, 8 bytes each. */
+#define CTXLD_DB_CTX_ENTRIES 1024 /* max 65536 */
+#define CTXLD_SB_LP_CTX_ENTRIES 10240 /* max 65536 */
+#define CTXLD_SB_HP_CTX_ENTRIES 20000 /* max 65536 */
+#define CTXLD_SB_CTX_ENTRIES (CTXLD_SB_LP_CTX_ENTRIES + \
+ CTXLD_SB_HP_CTX_ENTRIES)
+
+/* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */
+static u16 dcss_ctxld_ctx_size[3] = {
+ CTXLD_DB_CTX_ENTRIES,
+ CTXLD_SB_HP_CTX_ENTRIES,
+ CTXLD_SB_LP_CTX_ENTRIES
+};
+
+/* this represents an entry in the context loader map */
+struct dcss_ctxld_item {
+ u32 val;
+ u32 ofs;
+};
+
+#define CTX_ITEM_SIZE sizeof(struct dcss_ctxld_item)
+
+struct dcss_ctxld {
+ struct device *dev;
+ void __iomem *ctxld_reg;
+ int irq;
+ bool irq_en;
+
+ struct dcss_ctxld_item *db[2];
+ struct dcss_ctxld_item *sb_hp[2];
+ struct dcss_ctxld_item *sb_lp[2];
+
+ dma_addr_t db_paddr[2];
+ dma_addr_t sb_paddr[2];
+
+ u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */
+ u8 current_ctx;
+
+ bool in_use;
+ bool armed;
+
+ spinlock_t lock; /* protects concurent access to private data */
+};
+
+static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data)
+{
+ struct dcss_ctxld *ctxld = data;
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
+ u32 irq_status;
+
+ irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
+
+ if (irq_status & CTXLD_IRQ_COMPLETION &&
+ !(irq_status & CTXLD_ENABLE) && ctxld->in_use) {
+ ctxld->in_use = false;
+
+ if (dcss && dcss->disable_callback)
+ dcss->disable_callback(dcss);
+ } else if (irq_status & CTXLD_IRQ_ERROR) {
+ /*
+ * Except for throwing an error message and clearing the status
+ * register, there's not much we can do here.
+ */
+ dev_err(ctxld->dev, "ctxld: error encountered: %08x\n",
+ irq_status);
+ dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n",
+ ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB],
+ ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP],
+ ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]);
+ }
+
+ dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION),
+ ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ ctxld->irq = platform_get_irq_byname(pdev, "ctxld");
+ if (ctxld->irq < 0)
+ return ctxld->irq;
+
+ ret = request_irq(ctxld->irq, dcss_ctxld_irq_handler,
+ 0, "dcss_ctxld", ctxld);
+ if (ret) {
+ dev_err(ctxld->dev, "ctxld: irq request failed.\n");
+ return ret;
+ }
+
+ ctxld->irq_en = true;
+
+ return 0;
+}
+
+static void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld)
+{
+ dcss_writel(RD_ERR_EN | SB_HP_COMP_EN |
+ DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR,
+ ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
+}
+
+static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld)
+{
+ struct dcss_ctxld_item *ctx;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (ctxld->db[i]) {
+ dma_free_coherent(ctxld->dev,
+ CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
+ ctxld->db[i], ctxld->db_paddr[i]);
+ ctxld->db[i] = NULL;
+ ctxld->db_paddr[i] = 0;
+ }
+
+ if (ctxld->sb_hp[i]) {
+ dma_free_coherent(ctxld->dev,
+ CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
+ ctxld->sb_hp[i], ctxld->sb_paddr[i]);
+ ctxld->sb_hp[i] = NULL;
+ ctxld->sb_paddr[i] = 0;
+ }
+ }
+}
+
+static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld)
+{
+ struct dcss_ctxld_item *ctx;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ ctx = dma_alloc_coherent(ctxld->dev,
+ CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
+ &ctxld->db_paddr[i], GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctxld->db[i] = ctx;
+
+ ctx = dma_alloc_coherent(ctxld->dev,
+ CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
+ &ctxld->sb_paddr[i], GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctxld->sb_hp[i] = ctx;
+ ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES;
+ }
+
+ return 0;
+}
+
+int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
+{
+ struct dcss_ctxld *ctxld;
+ int ret;
+
+ ctxld = kzalloc(sizeof(*ctxld), GFP_KERNEL);
+ if (!ctxld)
+ return -ENOMEM;
+
+ dcss->ctxld = ctxld;
+ ctxld->dev = dcss->dev;
+
+ spin_lock_init(&ctxld->lock);
+
+ ret = dcss_ctxld_alloc_ctx(ctxld);
+ if (ret) {
+ dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n");
+ goto err;
+ }
+
+ ctxld->ctxld_reg = ioremap(ctxld_base, SZ_4K);
+ if (!ctxld->ctxld_reg) {
+ dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
+ if (ret)
+ goto err_irq;
+
+ dcss_ctxld_hw_cfg(ctxld);
+
+ return 0;
+
+err_irq:
+ iounmap(ctxld->ctxld_reg);
+
+err:
+ dcss_ctxld_free_ctx(ctxld);
+ kfree(ctxld);
+
+ return ret;
+}
+
+void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
+{
+ free_irq(ctxld->irq, ctxld);
+
+ if (ctxld->ctxld_reg)
+ iounmap(ctxld->ctxld_reg);
+
+ dcss_ctxld_free_ctx(ctxld);
+ kfree(ctxld);
+}
+
+static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld)
+{
+ int curr_ctx = ctxld->current_ctx;
+ u32 db_base, sb_base, sb_count;
+ u32 sb_hp_cnt, sb_lp_cnt, db_cnt;
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
+
+ if (!dcss)
+ return 0;
+
+ dcss_dpr_write_sysctrl(dcss->dpr);
+
+ dcss_scaler_write_sclctrl(dcss->scaler);
+
+ sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP];
+ sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP];
+ db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB];
+
+ /* make sure SB_LP context area comes after SB_HP */
+ if (sb_lp_cnt &&
+ ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) {
+ struct dcss_ctxld_item *sb_lp_adjusted;
+
+ sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt;
+
+ memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx],
+ sb_lp_cnt * CTX_ITEM_SIZE);
+ }
+
+ db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0;
+
+ dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR);
+ dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT);
+
+ if (sb_hp_cnt)
+ sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) |
+ ((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK);
+ else
+ sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK;
+
+ sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0;
+
+ dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR);
+ dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT);
+
+ /* enable the context loader */
+ dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
+
+ ctxld->in_use = true;
+
+ /*
+ * Toggle the current context to the alternate one so that any updates
+ * in the modules' settings take place there.
+ */
+ ctxld->current_ctx ^= 1;
+
+ ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0;
+ ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0;
+ ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0;
+
+ return 0;
+}
+
+int dcss_ctxld_enable(struct dcss_ctxld *ctxld)
+{
+ spin_lock_irq(&ctxld->lock);
+ ctxld->armed = true;
+ spin_unlock_irq(&ctxld->lock);
+
+ return 0;
+}
+
+void dcss_ctxld_kick(struct dcss_ctxld *ctxld)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctxld->lock, flags);
+ if (ctxld->armed && !ctxld->in_use) {
+ ctxld->armed = false;
+ dcss_ctxld_enable_locked(ctxld);
+ }
+ spin_unlock_irqrestore(&ctxld->lock, flags);
+}
+
+void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val,
+ u32 reg_ofs)
+{
+ int curr_ctx = ctxld->current_ctx;
+ struct dcss_ctxld_item *ctx[] = {
+ [CTX_DB] = ctxld->db[curr_ctx],
+ [CTX_SB_HP] = ctxld->sb_hp[curr_ctx],
+ [CTX_SB_LP] = ctxld->sb_lp[curr_ctx]
+ };
+ int item_idx = ctxld->ctx_size[curr_ctx][ctx_id];
+
+ if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) {
+ WARN_ON(1);
+ return;
+ }
+
+ ctx[ctx_id][item_idx].val = val;
+ ctx[ctx_id][item_idx].ofs = reg_ofs;
+ ctxld->ctx_size[curr_ctx][ctx_id] += 1;
+}
+
+void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
+ u32 val, u32 reg_ofs)
+{
+ spin_lock_irq(&ctxld->lock);
+ dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs);
+ spin_unlock_irq(&ctxld->lock);
+}
+
+bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld)
+{
+ return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 &&
+ ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 &&
+ ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0;
+}
+
+int dcss_ctxld_resume(struct dcss_ctxld *ctxld)
+{
+ dcss_ctxld_hw_cfg(ctxld);
+
+ if (!ctxld->irq_en) {
+ enable_irq(ctxld->irq);
+ ctxld->irq_en = true;
+ }
+
+ return 0;
+}
+
+int dcss_ctxld_suspend(struct dcss_ctxld *ctxld)
+{
+ int ret = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(500);
+
+ if (!dcss_ctxld_is_flushed(ctxld)) {
+ dcss_ctxld_kick(ctxld);
+
+ while (!time_after(jiffies, timeout) && ctxld->in_use)
+ msleep(20);
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irq(&ctxld->lock);
+
+ if (ctxld->irq_en) {
+ disable_irq_nosync(ctxld->irq);
+ ctxld->irq_en = false;
+ }
+
+ /* reset context region and sizes */
+ ctxld->current_ctx = 0;
+ ctxld->ctx_size[0][CTX_DB] = 0;
+ ctxld->ctx_size[0][CTX_SB_HP] = 0;
+ ctxld->ctx_size[0][CTX_SB_LP] = 0;
+
+ spin_unlock_irq(&ctxld->lock);
+
+ return ret;
+}
+
+void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld)
+{
+ lockdep_assert_held(&ctxld->lock);
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c
new file mode 100644
index 000000000000..c849533ca83e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_modeset_helper.h>
+
+#include "dcss-dev.h"
+#include "dcss-kms.h"
+
+static void dcss_clocks_enable(struct dcss_dev *dcss)
+{
+ clk_prepare_enable(dcss->axi_clk);
+ clk_prepare_enable(dcss->apb_clk);
+ clk_prepare_enable(dcss->rtrm_clk);
+ clk_prepare_enable(dcss->dtrc_clk);
+ clk_prepare_enable(dcss->pix_clk);
+}
+
+static void dcss_clocks_disable(struct dcss_dev *dcss)
+{
+ clk_disable_unprepare(dcss->pix_clk);
+ clk_disable_unprepare(dcss->dtrc_clk);
+ clk_disable_unprepare(dcss->rtrm_clk);
+ clk_disable_unprepare(dcss->apb_clk);
+ clk_disable_unprepare(dcss->axi_clk);
+}
+
+static void dcss_disable_dtg_and_ss_cb(void *data)
+{
+ struct dcss_dev *dcss = data;
+
+ dcss->disable_callback = NULL;
+
+ dcss_ss_shutoff(dcss->ss);
+ dcss_dtg_shutoff(dcss->dtg);
+
+ complete(&dcss->disable_completion);
+}
+
+void dcss_disable_dtg_and_ss(struct dcss_dev *dcss)
+{
+ dcss->disable_callback = dcss_disable_dtg_and_ss_cb;
+}
+
+void dcss_enable_dtg_and_ss(struct dcss_dev *dcss)
+{
+ if (dcss->disable_callback)
+ dcss->disable_callback = NULL;
+
+ dcss_dtg_enable(dcss->dtg);
+ dcss_ss_enable(dcss->ss);
+}
+
+static int dcss_submodules_init(struct dcss_dev *dcss)
+{
+ int ret = 0;
+ u32 base_addr = dcss->start_addr;
+ const struct dcss_type_data *devtype = dcss->devtype;
+
+ dcss_clocks_enable(dcss);
+
+ ret = dcss_blkctl_init(dcss, base_addr + devtype->blkctl_ofs);
+ if (ret)
+ return ret;
+
+ ret = dcss_ctxld_init(dcss, base_addr + devtype->ctxld_ofs);
+ if (ret)
+ goto ctxld_err;
+
+ ret = dcss_dtg_init(dcss, base_addr + devtype->dtg_ofs);
+ if (ret)
+ goto dtg_err;
+
+ ret = dcss_ss_init(dcss, base_addr + devtype->ss_ofs);
+ if (ret)
+ goto ss_err;
+
+ ret = dcss_dpr_init(dcss, base_addr + devtype->dpr_ofs);
+ if (ret)
+ goto dpr_err;
+
+ ret = dcss_scaler_init(dcss, base_addr + devtype->scaler_ofs);
+ if (ret)
+ goto scaler_err;
+
+ dcss_clocks_disable(dcss);
+
+ return 0;
+
+scaler_err:
+ dcss_dpr_exit(dcss->dpr);
+
+dpr_err:
+ dcss_ss_exit(dcss->ss);
+
+ss_err:
+ dcss_dtg_exit(dcss->dtg);
+
+dtg_err:
+ dcss_ctxld_exit(dcss->ctxld);
+
+ctxld_err:
+ dcss_blkctl_exit(dcss->blkctl);
+
+ dcss_clocks_disable(dcss);
+
+ return ret;
+}
+
+static void dcss_submodules_stop(struct dcss_dev *dcss)
+{
+ dcss_clocks_enable(dcss);
+ dcss_scaler_exit(dcss->scaler);
+ dcss_dpr_exit(dcss->dpr);
+ dcss_ss_exit(dcss->ss);
+ dcss_dtg_exit(dcss->dtg);
+ dcss_ctxld_exit(dcss->ctxld);
+ dcss_blkctl_exit(dcss->blkctl);
+ dcss_clocks_disable(dcss);
+}
+
+static int dcss_clks_init(struct dcss_dev *dcss)
+{
+ int i;
+ struct {
+ const char *id;
+ struct clk **clk;
+ } clks[] = {
+ {"apb", &dcss->apb_clk},
+ {"axi", &dcss->axi_clk},
+ {"pix", &dcss->pix_clk},
+ {"rtrm", &dcss->rtrm_clk},
+ {"dtrc", &dcss->dtrc_clk},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ *clks[i].clk = devm_clk_get(dcss->dev, clks[i].id);
+ if (IS_ERR(*clks[i].clk)) {
+ dev_err(dcss->dev, "failed to get %s clock\n",
+ clks[i].id);
+ return PTR_ERR(*clks[i].clk);
+ }
+ }
+
+ return 0;
+}
+
+static void dcss_clks_release(struct dcss_dev *dcss)
+{
+ devm_clk_put(dcss->dev, dcss->dtrc_clk);
+ devm_clk_put(dcss->dev, dcss->rtrm_clk);
+ devm_clk_put(dcss->dev, dcss->pix_clk);
+ devm_clk_put(dcss->dev, dcss->axi_clk);
+ devm_clk_put(dcss->dev, dcss->apb_clk);
+}
+
+struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+ struct resource *res;
+ struct dcss_dev *dcss;
+ const struct dcss_type_data *devtype;
+
+ devtype = of_device_get_match_data(dev);
+ if (!devtype) {
+ dev_err(dev, "no device match found\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "cannot get memory resource\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ dcss = kzalloc(sizeof(*dcss), GFP_KERNEL);
+ if (!dcss)
+ return ERR_PTR(-ENOMEM);
+
+ dcss->dev = dev;
+ dcss->devtype = devtype;
+ dcss->hdmi_output = hdmi_output;
+
+ ret = dcss_clks_init(dcss);
+ if (ret) {
+ dev_err(dev, "clocks initialization failed\n");
+ goto err;
+ }
+
+ dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0);
+ if (!dcss->of_port) {
+ dev_err(dev, "no port@0 node in %s\n", dev->of_node->full_name);
+ ret = -ENODEV;
+ goto clks_err;
+ }
+
+ dcss->start_addr = res->start;
+
+ ret = dcss_submodules_init(dcss);
+ if (ret) {
+ dev_err(dev, "submodules initialization failed\n");
+ goto clks_err;
+ }
+
+ init_completion(&dcss->disable_completion);
+
+ pm_runtime_set_autosuspend_delay(dev, 100);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_allow(dev);
+ pm_runtime_enable(dev);
+
+ return dcss;
+
+clks_err:
+ dcss_clks_release(dcss);
+
+err:
+ kfree(dcss);
+
+ return ERR_PTR(ret);
+}
+
+void dcss_dev_destroy(struct dcss_dev *dcss)
+{
+ if (!pm_runtime_suspended(dcss->dev)) {
+ dcss_ctxld_suspend(dcss->ctxld);
+ dcss_clocks_disable(dcss);
+ }
+
+ pm_runtime_disable(dcss->dev);
+
+ dcss_submodules_stop(dcss);
+
+ dcss_clks_release(dcss);
+
+ kfree(dcss);
+}
+
+#ifdef CONFIG_PM_SLEEP
+int dcss_dev_suspend(struct device *dev)
+{
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
+ struct drm_device *ddev = dcss_drv_dev_to_drm(dev);
+ struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base);
+ int ret;
+
+ drm_bridge_connector_disable_hpd(kms->connector);
+
+ drm_mode_config_helper_suspend(ddev);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ ret = dcss_ctxld_suspend(dcss->ctxld);
+ if (ret)
+ return ret;
+
+ dcss_clocks_disable(dcss);
+
+ return 0;
+}
+
+int dcss_dev_resume(struct device *dev)
+{
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
+ struct drm_device *ddev = dcss_drv_dev_to_drm(dev);
+ struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base);
+
+ if (pm_runtime_suspended(dev)) {
+ drm_mode_config_helper_resume(ddev);
+ return 0;
+ }
+
+ dcss_clocks_enable(dcss);
+
+ dcss_blkctl_cfg(dcss->blkctl);
+
+ dcss_ctxld_resume(dcss->ctxld);
+
+ drm_mode_config_helper_resume(ddev);
+
+ drm_bridge_connector_enable_hpd(kms->connector);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+int dcss_dev_runtime_suspend(struct device *dev)
+{
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
+ int ret;
+
+ ret = dcss_ctxld_suspend(dcss->ctxld);
+ if (ret)
+ return ret;
+
+ dcss_clocks_disable(dcss);
+
+ return 0;
+}
+
+int dcss_dev_runtime_resume(struct device *dev)
+{
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
+
+ dcss_clocks_enable(dcss);
+
+ dcss_blkctl_cfg(dcss->blkctl);
+
+ dcss_ctxld_resume(dcss->ctxld);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h
new file mode 100644
index 000000000000..c642ae17837f
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 NXP.
+ */
+
+#ifndef __DCSS_PRV_H__
+#define __DCSS_PRV_H__
+
+#include <drm/drm_fourcc.h>
+#include <linux/io.h>
+#include <video/videomode.h>
+
+#define SET 0x04
+#define CLR 0x08
+#define TGL 0x0C
+
+#define dcss_writel(v, c) writel((v), (c))
+#define dcss_readl(c) readl(c)
+#define dcss_set(v, c) writel((v), (c) + SET)
+#define dcss_clr(v, c) writel((v), (c) + CLR)
+#define dcss_toggle(v, c) writel((v), (c) + TGL)
+
+static inline void dcss_update(u32 v, u32 m, void __iomem *c)
+{
+ writel((readl(c) & ~(m)) | (v), (c));
+}
+
+#define DCSS_DBG_REG(reg) {.name = #reg, .ofs = reg}
+
+enum {
+ DCSS_IMX8MQ = 0,
+};
+
+struct dcss_type_data {
+ const char *name;
+ u32 blkctl_ofs;
+ u32 ctxld_ofs;
+ u32 rdsrc_ofs;
+ u32 wrscl_ofs;
+ u32 dtg_ofs;
+ u32 scaler_ofs;
+ u32 ss_ofs;
+ u32 dpr_ofs;
+ u32 dtrc_ofs;
+ u32 dec400d_ofs;
+ u32 hdr10_ofs;
+};
+
+struct dcss_debug_reg {
+ char *name;
+ u32 ofs;
+};
+
+enum dcss_ctxld_ctx_type {
+ CTX_DB,
+ CTX_SB_HP, /* high-priority */
+ CTX_SB_LP, /* low-priority */
+};
+
+struct dcss_dev {
+ struct device *dev;
+ const struct dcss_type_data *devtype;
+ struct device_node *of_port;
+
+ u32 start_addr;
+
+ struct dcss_blkctl *blkctl;
+ struct dcss_ctxld *ctxld;
+ struct dcss_dpr *dpr;
+ struct dcss_dtg *dtg;
+ struct dcss_ss *ss;
+ struct dcss_hdr10 *hdr10;
+ struct dcss_scaler *scaler;
+ struct dcss_dtrc *dtrc;
+ struct dcss_dec400d *dec400d;
+ struct dcss_wrscl *wrscl;
+ struct dcss_rdsrc *rdsrc;
+
+ struct clk *apb_clk;
+ struct clk *axi_clk;
+ struct clk *pix_clk;
+ struct clk *rtrm_clk;
+ struct clk *dtrc_clk;
+ struct clk *pll_src_clk;
+ struct clk *pll_phy_ref_clk;
+
+ bool hdmi_output;
+
+ void (*disable_callback)(void *data);
+ struct completion disable_completion;
+};
+
+struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev);
+struct drm_device *dcss_drv_dev_to_drm(struct device *dev);
+struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output);
+void dcss_dev_destroy(struct dcss_dev *dcss);
+int dcss_dev_runtime_suspend(struct device *dev);
+int dcss_dev_runtime_resume(struct device *dev);
+int dcss_dev_suspend(struct device *dev);
+int dcss_dev_resume(struct device *dev);
+void dcss_enable_dtg_and_ss(struct dcss_dev *dcss);
+void dcss_disable_dtg_and_ss(struct dcss_dev *dcss);
+
+/* BLKCTL */
+int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base);
+void dcss_blkctl_cfg(struct dcss_blkctl *blkctl);
+void dcss_blkctl_exit(struct dcss_blkctl *blkctl);
+
+/* CTXLD */
+int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base);
+void dcss_ctxld_exit(struct dcss_ctxld *ctxld);
+void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
+ u32 val, u32 reg_idx);
+int dcss_ctxld_resume(struct dcss_ctxld *dcss_ctxld);
+int dcss_ctxld_suspend(struct dcss_ctxld *dcss_ctxld);
+void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctlxd, u32 ctx_id, u32 val,
+ u32 reg_ofs);
+void dcss_ctxld_kick(struct dcss_ctxld *ctxld);
+bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld);
+int dcss_ctxld_enable(struct dcss_ctxld *ctxld);
+void dcss_ctxld_register_completion(struct dcss_ctxld *ctxld,
+ struct completion *dis_completion);
+void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld);
+
+/* DPR */
+int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base);
+void dcss_dpr_exit(struct dcss_dpr *dpr);
+void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr);
+void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres);
+void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr,
+ u32 chroma_base_addr, u16 pitch);
+void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en);
+void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num,
+ const struct drm_format_info *format, u64 modifier);
+void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation);
+
+/* DTG */
+int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base);
+void dcss_dtg_exit(struct dcss_dtg *dtg);
+bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg);
+void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en);
+void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg);
+void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm);
+void dcss_dtg_css_set(struct dcss_dtg *dtg);
+void dcss_dtg_enable(struct dcss_dtg *dtg);
+void dcss_dtg_shutoff(struct dcss_dtg *dtg);
+bool dcss_dtg_is_enabled(struct dcss_dtg *dtg);
+void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en);
+bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha);
+void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num,
+ const struct drm_format_info *format, int alpha);
+void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num,
+ int px, int py, int pw, int ph);
+void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en);
+
+/* SUBSAM */
+int dcss_ss_init(struct dcss_dev *dcss, unsigned long subsam_base);
+void dcss_ss_exit(struct dcss_ss *ss);
+void dcss_ss_enable(struct dcss_ss *ss);
+void dcss_ss_shutoff(struct dcss_ss *ss);
+void dcss_ss_subsam_set(struct dcss_ss *ss);
+void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
+ bool phsync, bool pvsync);
+
+/* SCALER */
+int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base);
+void dcss_scaler_exit(struct dcss_scaler *scl);
+void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
+ const struct drm_format_info *format,
+ int src_xres, int src_yres, int dst_xres, int dst_yres,
+ u32 vrefresh_hz);
+void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en);
+int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num,
+ int *min, int *max);
+void dcss_scaler_write_sclctrl(struct dcss_scaler *scl);
+
+#endif /* __DCSS_PRV_H__ */
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dpr.c b/drivers/gpu/drm/imx/dcss/dcss-dpr.c
new file mode 100644
index 000000000000..df9dab949bf2
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-dpr.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_DPR_SYSTEM_CTRL0 0x000
+#define RUN_EN BIT(0)
+#define SOFT_RESET BIT(1)
+#define REPEAT_EN BIT(2)
+#define SHADOW_LOAD_EN BIT(3)
+#define SW_SHADOW_LOAD_SEL BIT(4)
+#define BCMD2AXI_MSTR_ID_CTRL BIT(16)
+#define DCSS_DPR_IRQ_MASK 0x020
+#define DCSS_DPR_IRQ_MASK_STATUS 0x030
+#define DCSS_DPR_IRQ_NONMASK_STATUS 0x040
+#define IRQ_DPR_CTRL_DONE BIT(0)
+#define IRQ_DPR_RUN BIT(1)
+#define IRQ_DPR_SHADOW_LOADED BIT(2)
+#define IRQ_AXI_READ_ERR BIT(3)
+#define DPR2RTR_YRGB_FIFO_OVFL BIT(4)
+#define DPR2RTR_UV_FIFO_OVFL BIT(5)
+#define DPR2RTR_FIFO_LD_BUF_RDY_YRGB_ERR BIT(6)
+#define DPR2RTR_FIFO_LD_BUF_RDY_UV_ERR BIT(7)
+#define DCSS_DPR_MODE_CTRL0 0x050
+#define RTR_3BUF_EN BIT(0)
+#define RTR_4LINE_BUF_EN BIT(1)
+#define TILE_TYPE_POS 2
+#define TILE_TYPE_MASK GENMASK(4, 2)
+#define YUV_EN BIT(6)
+#define COMP_2PLANE_EN BIT(7)
+#define PIX_SIZE_POS 8
+#define PIX_SIZE_MASK GENMASK(9, 8)
+#define PIX_LUMA_UV_SWAP BIT(10)
+#define PIX_UV_SWAP BIT(11)
+#define B_COMP_SEL_POS 12
+#define B_COMP_SEL_MASK GENMASK(13, 12)
+#define G_COMP_SEL_POS 14
+#define G_COMP_SEL_MASK GENMASK(15, 14)
+#define R_COMP_SEL_POS 16
+#define R_COMP_SEL_MASK GENMASK(17, 16)
+#define A_COMP_SEL_POS 18
+#define A_COMP_SEL_MASK GENMASK(19, 18)
+#define DCSS_DPR_FRAME_CTRL0 0x070
+#define HFLIP_EN BIT(0)
+#define VFLIP_EN BIT(1)
+#define ROT_ENC_POS 2
+#define ROT_ENC_MASK GENMASK(3, 2)
+#define ROT_FLIP_ORDER_EN BIT(4)
+#define PITCH_POS 16
+#define PITCH_MASK GENMASK(31, 16)
+#define DCSS_DPR_FRAME_1P_CTRL0 0x090
+#define DCSS_DPR_FRAME_1P_PIX_X_CTRL 0x0A0
+#define DCSS_DPR_FRAME_1P_PIX_Y_CTRL 0x0B0
+#define DCSS_DPR_FRAME_1P_BASE_ADDR 0x0C0
+#define DCSS_DPR_FRAME_2P_CTRL0 0x0E0
+#define DCSS_DPR_FRAME_2P_PIX_X_CTRL 0x0F0
+#define DCSS_DPR_FRAME_2P_PIX_Y_CTRL 0x100
+#define DCSS_DPR_FRAME_2P_BASE_ADDR 0x110
+#define DCSS_DPR_STATUS_CTRL0 0x130
+#define STATUS_MUX_SEL_MASK GENMASK(2, 0)
+#define STATUS_SRC_SEL_POS 16
+#define STATUS_SRC_SEL_MASK GENMASK(18, 16)
+#define DCSS_DPR_STATUS_CTRL1 0x140
+#define DCSS_DPR_RTRAM_CTRL0 0x200
+#define NUM_ROWS_ACTIVE BIT(0)
+#define THRES_HIGH_POS 1
+#define THRES_HIGH_MASK GENMASK(3, 1)
+#define THRES_LOW_POS 4
+#define THRES_LOW_MASK GENMASK(6, 4)
+#define ABORT_SEL BIT(7)
+
+enum dcss_tile_type {
+ TILE_LINEAR = 0,
+ TILE_GPU_STANDARD,
+ TILE_GPU_SUPER,
+ TILE_VPU_YUV420,
+ TILE_VPU_VP9,
+};
+
+enum dcss_pix_size {
+ PIX_SIZE_8,
+ PIX_SIZE_16,
+ PIX_SIZE_32,
+};
+
+struct dcss_dpr_ch {
+ struct dcss_dpr *dpr;
+ void __iomem *base_reg;
+ u32 base_ofs;
+
+ struct drm_format_info format;
+ enum dcss_pix_size pix_size;
+ enum dcss_tile_type tile;
+ bool rtram_4line_en;
+ bool rtram_3buf_en;
+
+ u32 frame_ctrl;
+ u32 mode_ctrl;
+ u32 sys_ctrl;
+ u32 rtram_ctrl;
+
+ bool sys_ctrl_chgd;
+
+ int ch_num;
+ int irq;
+};
+
+struct dcss_dpr {
+ struct device *dev;
+ struct dcss_ctxld *ctxld;
+ u32 ctx_id;
+
+ struct dcss_dpr_ch ch[3];
+};
+
+static void dcss_dpr_write(struct dcss_dpr_ch *ch, u32 val, u32 ofs)
+{
+ struct dcss_dpr *dpr = ch->dpr;
+
+ dcss_ctxld_write(dpr->ctxld, dpr->ctx_id, val, ch->base_ofs + ofs);
+}
+
+static int dcss_dpr_ch_init_all(struct dcss_dpr *dpr, unsigned long dpr_base)
+{
+ struct dcss_dpr_ch *ch;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ ch = &dpr->ch[i];
+
+ ch->base_ofs = dpr_base + i * 0x1000;
+
+ ch->base_reg = ioremap(ch->base_ofs, SZ_4K);
+ if (!ch->base_reg) {
+ dev_err(dpr->dev, "dpr: unable to remap ch %d base\n",
+ i);
+ return -ENOMEM;
+ }
+
+ ch->dpr = dpr;
+ ch->ch_num = i;
+
+ dcss_writel(0xff, ch->base_reg + DCSS_DPR_IRQ_MASK);
+ }
+
+ return 0;
+}
+
+int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base)
+{
+ struct dcss_dpr *dpr;
+
+ dpr = kzalloc(sizeof(*dpr), GFP_KERNEL);
+ if (!dpr)
+ return -ENOMEM;
+
+ dcss->dpr = dpr;
+ dpr->dev = dcss->dev;
+ dpr->ctxld = dcss->ctxld;
+ dpr->ctx_id = CTX_SB_HP;
+
+ if (dcss_dpr_ch_init_all(dpr, dpr_base)) {
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (dpr->ch[i].base_reg)
+ iounmap(dpr->ch[i].base_reg);
+ }
+
+ kfree(dpr);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void dcss_dpr_exit(struct dcss_dpr *dpr)
+{
+ int ch_no;
+
+ /* stop DPR on all channels */
+ for (ch_no = 0; ch_no < 3; ch_no++) {
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_no];
+
+ dcss_writel(0, ch->base_reg + DCSS_DPR_SYSTEM_CTRL0);
+
+ if (ch->base_reg)
+ iounmap(ch->base_reg);
+ }
+
+ kfree(dpr);
+}
+
+static u32 dcss_dpr_x_pix_wide_adjust(struct dcss_dpr_ch *ch, u32 pix_wide,
+ u32 pix_format)
+{
+ u8 pix_in_64byte_map[3][5] = {
+ /* LIN, GPU_STD, GPU_SUP, VPU_YUV420, VPU_VP9 */
+ { 64, 8, 8, 8, 16}, /* PIX_SIZE_8 */
+ { 32, 8, 8, 8, 8}, /* PIX_SIZE_16 */
+ { 16, 4, 4, 8, 8}, /* PIX_SIZE_32 */
+ };
+ u32 offset;
+ u32 div_64byte_mod, pix_in_64byte;
+
+ pix_in_64byte = pix_in_64byte_map[ch->pix_size][ch->tile];
+
+ div_64byte_mod = pix_wide % pix_in_64byte;
+ offset = (div_64byte_mod == 0) ? 0 : (pix_in_64byte - div_64byte_mod);
+
+ return pix_wide + offset;
+}
+
+static u32 dcss_dpr_y_pix_high_adjust(struct dcss_dpr_ch *ch, u32 pix_high,
+ u32 pix_format)
+{
+ u8 num_rows_buf = ch->rtram_4line_en ? 4 : 8;
+ u32 offset, pix_y_mod;
+
+ pix_y_mod = pix_high % num_rows_buf;
+ offset = pix_y_mod ? (num_rows_buf - pix_y_mod) : 0;
+
+ return pix_high + offset;
+}
+
+void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres)
+{
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
+ u32 pix_format = ch->format.format;
+ u32 gap = DCSS_DPR_FRAME_2P_BASE_ADDR - DCSS_DPR_FRAME_1P_BASE_ADDR;
+ int plane, max_planes = 1;
+ u32 pix_x_wide, pix_y_high;
+
+ if (pix_format == DRM_FORMAT_NV12 ||
+ pix_format == DRM_FORMAT_NV21)
+ max_planes = 2;
+
+ for (plane = 0; plane < max_planes; plane++) {
+ yres = plane == 1 ? yres >> 1 : yres;
+
+ pix_x_wide = dcss_dpr_x_pix_wide_adjust(ch, xres, pix_format);
+ pix_y_high = dcss_dpr_y_pix_high_adjust(ch, yres, pix_format);
+
+ dcss_dpr_write(ch, pix_x_wide,
+ DCSS_DPR_FRAME_1P_PIX_X_CTRL + plane * gap);
+ dcss_dpr_write(ch, pix_y_high,
+ DCSS_DPR_FRAME_1P_PIX_Y_CTRL + plane * gap);
+
+ dcss_dpr_write(ch, 2, DCSS_DPR_FRAME_1P_CTRL0 + plane * gap);
+ }
+}
+
+void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr,
+ u32 chroma_base_addr, u16 pitch)
+{
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
+
+ dcss_dpr_write(ch, luma_base_addr, DCSS_DPR_FRAME_1P_BASE_ADDR);
+
+ dcss_dpr_write(ch, chroma_base_addr, DCSS_DPR_FRAME_2P_BASE_ADDR);
+
+ ch->frame_ctrl &= ~PITCH_MASK;
+ ch->frame_ctrl |= (((u32)pitch << PITCH_POS) & PITCH_MASK);
+}
+
+static void dcss_dpr_argb_comp_sel(struct dcss_dpr_ch *ch, int a_sel, int r_sel,
+ int g_sel, int b_sel)
+{
+ u32 sel;
+
+ sel = ((a_sel << A_COMP_SEL_POS) & A_COMP_SEL_MASK) |
+ ((r_sel << R_COMP_SEL_POS) & R_COMP_SEL_MASK) |
+ ((g_sel << G_COMP_SEL_POS) & G_COMP_SEL_MASK) |
+ ((b_sel << B_COMP_SEL_POS) & B_COMP_SEL_MASK);
+
+ ch->mode_ctrl &= ~(A_COMP_SEL_MASK | R_COMP_SEL_MASK |
+ G_COMP_SEL_MASK | B_COMP_SEL_MASK);
+ ch->mode_ctrl |= sel;
+}
+
+static void dcss_dpr_pix_size_set(struct dcss_dpr_ch *ch,
+ const struct drm_format_info *format)
+{
+ u32 val;
+
+ switch (format->format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ val = PIX_SIZE_8;
+ break;
+
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ val = PIX_SIZE_16;
+ break;
+
+ default:
+ val = PIX_SIZE_32;
+ break;
+ }
+
+ ch->pix_size = val;
+
+ ch->mode_ctrl &= ~PIX_SIZE_MASK;
+ ch->mode_ctrl |= ((val << PIX_SIZE_POS) & PIX_SIZE_MASK);
+}
+
+static void dcss_dpr_uv_swap(struct dcss_dpr_ch *ch, bool swap)
+{
+ ch->mode_ctrl &= ~PIX_UV_SWAP;
+ ch->mode_ctrl |= (swap ? PIX_UV_SWAP : 0);
+}
+
+static void dcss_dpr_y_uv_swap(struct dcss_dpr_ch *ch, bool swap)
+{
+ ch->mode_ctrl &= ~PIX_LUMA_UV_SWAP;
+ ch->mode_ctrl |= (swap ? PIX_LUMA_UV_SWAP : 0);
+}
+
+static void dcss_dpr_2plane_en(struct dcss_dpr_ch *ch, bool en)
+{
+ ch->mode_ctrl &= ~COMP_2PLANE_EN;
+ ch->mode_ctrl |= (en ? COMP_2PLANE_EN : 0);
+}
+
+static void dcss_dpr_yuv_en(struct dcss_dpr_ch *ch, bool en)
+{
+ ch->mode_ctrl &= ~YUV_EN;
+ ch->mode_ctrl |= (en ? YUV_EN : 0);
+}
+
+void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en)
+{
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
+ u32 sys_ctrl;
+
+ sys_ctrl = (en ? REPEAT_EN | RUN_EN : 0);
+
+ if (en) {
+ dcss_dpr_write(ch, ch->mode_ctrl, DCSS_DPR_MODE_CTRL0);
+ dcss_dpr_write(ch, ch->frame_ctrl, DCSS_DPR_FRAME_CTRL0);
+ dcss_dpr_write(ch, ch->rtram_ctrl, DCSS_DPR_RTRAM_CTRL0);
+ }
+
+ if (ch->sys_ctrl != sys_ctrl)
+ ch->sys_ctrl_chgd = true;
+
+ ch->sys_ctrl = sys_ctrl;
+}
+
+struct rgb_comp_sel {
+ u32 drm_format;
+ int a_sel;
+ int r_sel;
+ int g_sel;
+ int b_sel;
+};
+
+static struct rgb_comp_sel comp_sel_map[] = {
+ {DRM_FORMAT_ARGB8888, 3, 2, 1, 0},
+ {DRM_FORMAT_XRGB8888, 3, 2, 1, 0},
+ {DRM_FORMAT_ABGR8888, 3, 0, 1, 2},
+ {DRM_FORMAT_XBGR8888, 3, 0, 1, 2},
+ {DRM_FORMAT_RGBA8888, 0, 3, 2, 1},
+ {DRM_FORMAT_RGBX8888, 0, 3, 2, 1},
+ {DRM_FORMAT_BGRA8888, 0, 1, 2, 3},
+ {DRM_FORMAT_BGRX8888, 0, 1, 2, 3},
+};
+
+static int to_comp_sel(u32 pix_fmt, int *a_sel, int *r_sel, int *g_sel,
+ int *b_sel)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(comp_sel_map); i++) {
+ if (comp_sel_map[i].drm_format == pix_fmt) {
+ *a_sel = comp_sel_map[i].a_sel;
+ *r_sel = comp_sel_map[i].r_sel;
+ *g_sel = comp_sel_map[i].g_sel;
+ *b_sel = comp_sel_map[i].b_sel;
+
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static void dcss_dpr_rtram_set(struct dcss_dpr_ch *ch, u32 pix_format)
+{
+ u32 val, mask;
+
+ switch (pix_format) {
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV12:
+ ch->rtram_3buf_en = true;
+ ch->rtram_4line_en = false;
+ break;
+
+ default:
+ ch->rtram_3buf_en = true;
+ ch->rtram_4line_en = true;
+ break;
+ }
+
+ val = (ch->rtram_4line_en ? RTR_4LINE_BUF_EN : 0);
+ val |= (ch->rtram_3buf_en ? RTR_3BUF_EN : 0);
+ mask = RTR_4LINE_BUF_EN | RTR_3BUF_EN;
+
+ ch->mode_ctrl &= ~mask;
+ ch->mode_ctrl |= (val & mask);
+
+ val = (ch->rtram_4line_en ? 0 : NUM_ROWS_ACTIVE);
+ val |= (3 << THRES_LOW_POS) & THRES_LOW_MASK;
+ val |= (4 << THRES_HIGH_POS) & THRES_HIGH_MASK;
+ mask = THRES_LOW_MASK | THRES_HIGH_MASK | NUM_ROWS_ACTIVE;
+
+ ch->rtram_ctrl &= ~mask;
+ ch->rtram_ctrl |= (val & mask);
+}
+
+static void dcss_dpr_setup_components(struct dcss_dpr_ch *ch,
+ const struct drm_format_info *format)
+{
+ int a_sel, r_sel, g_sel, b_sel;
+ bool uv_swap, y_uv_swap;
+
+ switch (format->format) {
+ case DRM_FORMAT_YVYU:
+ uv_swap = true;
+ y_uv_swap = true;
+ break;
+
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV21:
+ uv_swap = true;
+ y_uv_swap = false;
+ break;
+
+ case DRM_FORMAT_YUYV:
+ uv_swap = false;
+ y_uv_swap = true;
+ break;
+
+ default:
+ uv_swap = false;
+ y_uv_swap = false;
+ break;
+ }
+
+ dcss_dpr_uv_swap(ch, uv_swap);
+
+ dcss_dpr_y_uv_swap(ch, y_uv_swap);
+
+ if (!format->is_yuv) {
+ if (!to_comp_sel(format->format, &a_sel, &r_sel,
+ &g_sel, &b_sel)) {
+ dcss_dpr_argb_comp_sel(ch, a_sel, r_sel, g_sel, b_sel);
+ } else {
+ dcss_dpr_argb_comp_sel(ch, 3, 2, 1, 0);
+ }
+ } else {
+ dcss_dpr_argb_comp_sel(ch, 0, 0, 0, 0);
+ }
+}
+
+static void dcss_dpr_tile_set(struct dcss_dpr_ch *ch, uint64_t modifier)
+{
+ switch (ch->ch_num) {
+ case 0:
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ ch->tile = TILE_LINEAR;
+ break;
+ case DRM_FORMAT_MOD_VIVANTE_TILED:
+ ch->tile = TILE_GPU_STANDARD;
+ break;
+ case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
+ ch->tile = TILE_GPU_SUPER;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ break;
+ case 1:
+ case 2:
+ ch->tile = TILE_LINEAR;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ ch->mode_ctrl &= ~TILE_TYPE_MASK;
+ ch->mode_ctrl |= ((ch->tile << TILE_TYPE_POS) & TILE_TYPE_MASK);
+}
+
+void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num,
+ const struct drm_format_info *format, u64 modifier)
+{
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
+
+ ch->format = *format;
+
+ dcss_dpr_yuv_en(ch, format->is_yuv);
+
+ dcss_dpr_pix_size_set(ch, format);
+
+ dcss_dpr_setup_components(ch, format);
+
+ dcss_dpr_2plane_en(ch, format->num_planes == 2);
+
+ dcss_dpr_rtram_set(ch, format->format);
+
+ dcss_dpr_tile_set(ch, modifier);
+}
+
+/* This function will be called from interrupt context. */
+void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr)
+{
+ int chnum;
+
+ dcss_ctxld_assert_locked(dpr->ctxld);
+
+ for (chnum = 0; chnum < 3; chnum++) {
+ struct dcss_dpr_ch *ch = &dpr->ch[chnum];
+
+ if (ch->sys_ctrl_chgd) {
+ dcss_ctxld_write_irqsafe(dpr->ctxld, dpr->ctx_id,
+ ch->sys_ctrl,
+ ch->base_ofs +
+ DCSS_DPR_SYSTEM_CTRL0);
+ ch->sys_ctrl_chgd = false;
+ }
+ }
+}
+
+void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation)
+{
+ struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
+
+ ch->frame_ctrl &= ~(HFLIP_EN | VFLIP_EN | ROT_ENC_MASK);
+
+ ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_X ? HFLIP_EN : 0;
+ ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_Y ? VFLIP_EN : 0;
+
+ if (rotation & DRM_MODE_ROTATE_90)
+ ch->frame_ctrl |= 1 << ROT_ENC_POS;
+ else if (rotation & DRM_MODE_ROTATE_180)
+ ch->frame_ctrl |= 2 << ROT_ENC_POS;
+ else if (rotation & DRM_MODE_ROTATE_270)
+ ch->frame_ctrl |= 3 << ROT_ENC_POS;
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
new file mode 100644
index 000000000000..8dc2f85c514b
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <drm/drm_of.h>
+
+#include "dcss-dev.h"
+#include "dcss-kms.h"
+
+struct dcss_drv {
+ struct dcss_dev *dcss;
+ struct dcss_kms_dev *kms;
+};
+
+struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev)
+{
+ struct dcss_drv *mdrv = dev_get_drvdata(dev);
+
+ return mdrv ? mdrv->dcss : NULL;
+}
+
+struct drm_device *dcss_drv_dev_to_drm(struct device *dev)
+{
+ struct dcss_drv *mdrv = dev_get_drvdata(dev);
+
+ return mdrv ? &mdrv->kms->base : NULL;
+}
+
+static int dcss_drv_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *remote;
+ struct dcss_drv *mdrv;
+ int err = 0;
+ bool hdmi_output = true;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ remote = of_graph_get_remote_node(dev->of_node, 0, 0);
+ if (!remote)
+ return -ENODEV;
+
+ hdmi_output = !of_device_is_compatible(remote, "fsl,imx8mq-nwl-dsi");
+
+ of_node_put(remote);
+
+ mdrv = kzalloc(sizeof(*mdrv), GFP_KERNEL);
+ if (!mdrv)
+ return -ENOMEM;
+
+ mdrv->dcss = dcss_dev_create(dev, hdmi_output);
+ if (IS_ERR(mdrv->dcss)) {
+ err = PTR_ERR(mdrv->dcss);
+ goto err;
+ }
+
+ dev_set_drvdata(dev, mdrv);
+
+ mdrv->kms = dcss_kms_attach(mdrv->dcss);
+ if (IS_ERR(mdrv->kms)) {
+ err = PTR_ERR(mdrv->kms);
+ goto dcss_shutoff;
+ }
+
+ return 0;
+
+dcss_shutoff:
+ dcss_dev_destroy(mdrv->dcss);
+
+ dev_set_drvdata(dev, NULL);
+
+err:
+ kfree(mdrv);
+ return err;
+}
+
+static int dcss_drv_platform_remove(struct platform_device *pdev)
+{
+ struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
+
+ if (!mdrv)
+ return 0;
+
+ dcss_kms_detach(mdrv->kms);
+ dcss_dev_destroy(mdrv->dcss);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ kfree(mdrv);
+
+ return 0;
+}
+
+static struct dcss_type_data dcss_types[] = {
+ [DCSS_IMX8MQ] = {
+ .name = "DCSS_IMX8MQ",
+ .blkctl_ofs = 0x2F000,
+ .ctxld_ofs = 0x23000,
+ .dtg_ofs = 0x20000,
+ .scaler_ofs = 0x1C000,
+ .ss_ofs = 0x1B000,
+ .dpr_ofs = 0x18000,
+ },
+};
+
+static const struct of_device_id dcss_of_match[] = {
+ { .compatible = "nxp,imx8mq-dcss", .data = &dcss_types[DCSS_IMX8MQ], },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, dcss_of_match);
+
+static const struct dev_pm_ops dcss_dev_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume)
+ SET_RUNTIME_PM_OPS(dcss_dev_runtime_suspend,
+ dcss_dev_runtime_resume, NULL)
+};
+
+static struct platform_driver dcss_platform_driver = {
+ .probe = dcss_drv_platform_probe,
+ .remove = dcss_drv_platform_remove,
+ .driver = {
+ .name = "imx-dcss",
+ .of_match_table = dcss_of_match,
+ .pm = &dcss_dev_pm,
+ },
+};
+
+module_platform_driver(dcss_platform_driver);
+
+MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@nxp.com>");
+MODULE_DESCRIPTION("DCSS driver for i.MX8MQ");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dtg.c b/drivers/gpu/drm/imx/dcss/dcss-dtg.c
new file mode 100644
index 000000000000..30de00540f63
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-dtg.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_DTG_TC_CONTROL_STATUS 0x00
+#define CH3_EN BIT(0)
+#define CH2_EN BIT(1)
+#define CH1_EN BIT(2)
+#define OVL_DATA_MODE BIT(3)
+#define BLENDER_VIDEO_ALPHA_SEL BIT(7)
+#define DTG_START BIT(8)
+#define DBY_MODE_EN BIT(9)
+#define CH1_ALPHA_SEL BIT(10)
+#define CSS_PIX_COMP_SWAP_POS 12
+#define CSS_PIX_COMP_SWAP_MASK GENMASK(14, 12)
+#define DEFAULT_FG_ALPHA_POS 24
+#define DEFAULT_FG_ALPHA_MASK GENMASK(31, 24)
+#define DCSS_DTG_TC_DTG 0x04
+#define DCSS_DTG_TC_DISP_TOP 0x08
+#define DCSS_DTG_TC_DISP_BOT 0x0C
+#define DCSS_DTG_TC_CH1_TOP 0x10
+#define DCSS_DTG_TC_CH1_BOT 0x14
+#define DCSS_DTG_TC_CH2_TOP 0x18
+#define DCSS_DTG_TC_CH2_BOT 0x1C
+#define DCSS_DTG_TC_CH3_TOP 0x20
+#define DCSS_DTG_TC_CH3_BOT 0x24
+#define TC_X_POS 0
+#define TC_X_MASK GENMASK(12, 0)
+#define TC_Y_POS 16
+#define TC_Y_MASK GENMASK(28, 16)
+#define DCSS_DTG_TC_CTXLD 0x28
+#define TC_CTXLD_DB_Y_POS 0
+#define TC_CTXLD_DB_Y_MASK GENMASK(12, 0)
+#define TC_CTXLD_SB_Y_POS 16
+#define TC_CTXLD_SB_Y_MASK GENMASK(28, 16)
+#define DCSS_DTG_TC_CH1_BKRND 0x2C
+#define DCSS_DTG_TC_CH2_BKRND 0x30
+#define BKRND_R_Y_COMP_POS 20
+#define BKRND_R_Y_COMP_MASK GENMASK(29, 20)
+#define BKRND_G_U_COMP_POS 10
+#define BKRND_G_U_COMP_MASK GENMASK(19, 10)
+#define BKRND_B_V_COMP_POS 0
+#define BKRND_B_V_COMP_MASK GENMASK(9, 0)
+#define DCSS_DTG_BLENDER_DBY_RANGEINV 0x38
+#define DCSS_DTG_BLENDER_DBY_RANGEMIN 0x3C
+#define DCSS_DTG_BLENDER_DBY_BDP 0x40
+#define DCSS_DTG_BLENDER_BKRND_I 0x44
+#define DCSS_DTG_BLENDER_BKRND_P 0x48
+#define DCSS_DTG_BLENDER_BKRND_T 0x4C
+#define DCSS_DTG_LINE0_INT 0x50
+#define DCSS_DTG_LINE1_INT 0x54
+#define DCSS_DTG_BG_ALPHA_DEFAULT 0x58
+#define DCSS_DTG_INT_STATUS 0x5C
+#define DCSS_DTG_INT_CONTROL 0x60
+#define DCSS_DTG_TC_CH3_BKRND 0x64
+#define DCSS_DTG_INT_MASK 0x68
+#define LINE0_IRQ BIT(0)
+#define LINE1_IRQ BIT(1)
+#define LINE2_IRQ BIT(2)
+#define LINE3_IRQ BIT(3)
+#define DCSS_DTG_LINE2_INT 0x6C
+#define DCSS_DTG_LINE3_INT 0x70
+#define DCSS_DTG_DBY_OL 0x74
+#define DCSS_DTG_DBY_BL 0x78
+#define DCSS_DTG_DBY_EL 0x7C
+
+struct dcss_dtg {
+ struct device *dev;
+ struct dcss_ctxld *ctxld;
+ void __iomem *base_reg;
+ u32 base_ofs;
+
+ u32 ctx_id;
+
+ bool in_use;
+
+ u32 dis_ulc_x;
+ u32 dis_ulc_y;
+
+ u32 control_status;
+ u32 alpha;
+ u32 alpha_cfg;
+
+ int ctxld_kick_irq;
+ bool ctxld_kick_irq_en;
+};
+
+static void dcss_dtg_write(struct dcss_dtg *dtg, u32 val, u32 ofs)
+{
+ if (!dtg->in_use)
+ dcss_writel(val, dtg->base_reg + ofs);
+
+ dcss_ctxld_write(dtg->ctxld, dtg->ctx_id,
+ val, dtg->base_ofs + ofs);
+}
+
+static irqreturn_t dcss_dtg_irq_handler(int irq, void *data)
+{
+ struct dcss_dtg *dtg = data;
+ u32 status;
+
+ status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
+
+ if (!(status & LINE0_IRQ))
+ return IRQ_NONE;
+
+ dcss_ctxld_kick(dtg->ctxld);
+
+ dcss_writel(status & LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL);
+
+ return IRQ_HANDLED;
+}
+
+static int dcss_dtg_irq_config(struct dcss_dtg *dtg,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ dtg->ctxld_kick_irq = platform_get_irq_byname(pdev, "ctxld_kick");
+ if (dtg->ctxld_kick_irq < 0)
+ return dtg->ctxld_kick_irq;
+
+ dcss_update(0, LINE0_IRQ | LINE1_IRQ,
+ dtg->base_reg + DCSS_DTG_INT_MASK);
+
+ ret = request_irq(dtg->ctxld_kick_irq, dcss_dtg_irq_handler,
+ 0, "dcss_ctxld_kick", dtg);
+ if (ret) {
+ dev_err(dtg->dev, "dtg: irq request failed.\n");
+ return ret;
+ }
+
+ disable_irq(dtg->ctxld_kick_irq);
+
+ dtg->ctxld_kick_irq_en = false;
+
+ return 0;
+}
+
+int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base)
+{
+ int ret = 0;
+ struct dcss_dtg *dtg;
+
+ dtg = kzalloc(sizeof(*dtg), GFP_KERNEL);
+ if (!dtg)
+ return -ENOMEM;
+
+ dcss->dtg = dtg;
+ dtg->dev = dcss->dev;
+ dtg->ctxld = dcss->ctxld;
+
+ dtg->base_reg = ioremap(dtg_base, SZ_4K);
+ if (!dtg->base_reg) {
+ dev_err(dcss->dev, "dtg: unable to remap dtg base\n");
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ dtg->base_ofs = dtg_base;
+ dtg->ctx_id = CTX_DB;
+
+ dtg->alpha = 255;
+
+ dtg->control_status |= OVL_DATA_MODE | BLENDER_VIDEO_ALPHA_SEL |
+ ((dtg->alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK);
+
+ ret = dcss_dtg_irq_config(dtg, to_platform_device(dcss->dev));
+ if (ret)
+ goto err_irq;
+
+ return 0;
+
+err_irq:
+ iounmap(dtg->base_reg);
+
+err_ioremap:
+ kfree(dtg);
+
+ return ret;
+}
+
+void dcss_dtg_exit(struct dcss_dtg *dtg)
+{
+ free_irq(dtg->ctxld_kick_irq, dtg);
+
+ if (dtg->base_reg)
+ iounmap(dtg->base_reg);
+
+ kfree(dtg);
+}
+
+void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm)
+{
+ struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dtg->dev);
+ u16 dtg_lrc_x, dtg_lrc_y;
+ u16 dis_ulc_x, dis_ulc_y;
+ u16 dis_lrc_x, dis_lrc_y;
+ u32 sb_ctxld_trig, db_ctxld_trig;
+ u32 pixclock = vm->pixelclock;
+ u32 actual_clk;
+
+ dtg_lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
+ vm->hactive - 1;
+ dtg_lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
+ vm->vactive - 1;
+ dis_ulc_x = vm->hsync_len + vm->hback_porch - 1;
+ dis_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch - 1;
+ dis_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1;
+ dis_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch +
+ vm->vactive - 1;
+
+ clk_disable_unprepare(dcss->pix_clk);
+ clk_set_rate(dcss->pix_clk, vm->pixelclock);
+ clk_prepare_enable(dcss->pix_clk);
+
+ actual_clk = clk_get_rate(dcss->pix_clk);
+ if (pixclock != actual_clk) {
+ dev_info(dtg->dev,
+ "Pixel clock set to %u kHz instead of %u kHz.\n",
+ (actual_clk / 1000), (pixclock / 1000));
+ }
+
+ dcss_dtg_write(dtg, ((dtg_lrc_y << TC_Y_POS) | dtg_lrc_x),
+ DCSS_DTG_TC_DTG);
+ dcss_dtg_write(dtg, ((dis_ulc_y << TC_Y_POS) | dis_ulc_x),
+ DCSS_DTG_TC_DISP_TOP);
+ dcss_dtg_write(dtg, ((dis_lrc_y << TC_Y_POS) | dis_lrc_x),
+ DCSS_DTG_TC_DISP_BOT);
+
+ dtg->dis_ulc_x = dis_ulc_x;
+ dtg->dis_ulc_y = dis_ulc_y;
+
+ sb_ctxld_trig = ((0 * dis_lrc_y / 100) << TC_CTXLD_SB_Y_POS) &
+ TC_CTXLD_SB_Y_MASK;
+ db_ctxld_trig = ((99 * dis_lrc_y / 100) << TC_CTXLD_DB_Y_POS) &
+ TC_CTXLD_DB_Y_MASK;
+
+ dcss_dtg_write(dtg, sb_ctxld_trig | db_ctxld_trig, DCSS_DTG_TC_CTXLD);
+
+ /* vblank trigger */
+ dcss_dtg_write(dtg, 0, DCSS_DTG_LINE1_INT);
+
+ /* CTXLD trigger */
+ dcss_dtg_write(dtg, ((90 * dis_lrc_y) / 100) << 16, DCSS_DTG_LINE0_INT);
+}
+
+void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num,
+ int px, int py, int pw, int ph)
+{
+ u16 p_ulc_x, p_ulc_y;
+ u16 p_lrc_x, p_lrc_y;
+
+ p_ulc_x = dtg->dis_ulc_x + px;
+ p_ulc_y = dtg->dis_ulc_y + py;
+ p_lrc_x = p_ulc_x + pw;
+ p_lrc_y = p_ulc_y + ph;
+
+ if (!px && !py && !pw && !ph) {
+ dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num);
+ dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num);
+ } else {
+ dcss_dtg_write(dtg, ((p_ulc_y << TC_Y_POS) | p_ulc_x),
+ DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num);
+ dcss_dtg_write(dtg, ((p_lrc_y << TC_Y_POS) | p_lrc_x),
+ DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num);
+ }
+}
+
+bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha)
+{
+ if (ch_num)
+ return false;
+
+ return alpha != dtg->alpha;
+}
+
+void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num,
+ const struct drm_format_info *format, int alpha)
+{
+ /* we care about alpha only when channel 0 is concerned */
+ if (ch_num)
+ return;
+
+ /*
+ * Use global alpha if pixel format does not have alpha channel or the
+ * user explicitly chose to use global alpha (i.e. alpha is not OPAQUE).
+ */
+ if (!format->has_alpha || alpha != 255)
+ dtg->alpha_cfg = (alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK;
+ else /* use per-pixel alpha otherwise */
+ dtg->alpha_cfg = CH1_ALPHA_SEL;
+
+ dtg->alpha = alpha;
+}
+
+void dcss_dtg_css_set(struct dcss_dtg *dtg)
+{
+ dtg->control_status |=
+ (0x5 << CSS_PIX_COMP_SWAP_POS) & CSS_PIX_COMP_SWAP_MASK;
+}
+
+void dcss_dtg_enable(struct dcss_dtg *dtg)
+{
+ dtg->control_status |= DTG_START;
+
+ dtg->control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK);
+ dtg->control_status |= dtg->alpha_cfg;
+
+ dcss_dtg_write(dtg, dtg->control_status, DCSS_DTG_TC_CONTROL_STATUS);
+
+ dtg->in_use = true;
+}
+
+void dcss_dtg_shutoff(struct dcss_dtg *dtg)
+{
+ dtg->control_status &= ~DTG_START;
+
+ dcss_writel(dtg->control_status,
+ dtg->base_reg + DCSS_DTG_TC_CONTROL_STATUS);
+
+ dtg->in_use = false;
+}
+
+bool dcss_dtg_is_enabled(struct dcss_dtg *dtg)
+{
+ return dtg->in_use;
+}
+
+void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en)
+{
+ u32 ch_en_map[] = {CH1_EN, CH2_EN, CH3_EN};
+ u32 control_status;
+
+ control_status = dtg->control_status & ~ch_en_map[ch_num];
+ control_status |= en ? ch_en_map[ch_num] : 0;
+
+ control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK);
+ control_status |= dtg->alpha_cfg;
+
+ if (dtg->control_status != control_status)
+ dcss_dtg_write(dtg, control_status, DCSS_DTG_TC_CONTROL_STATUS);
+
+ dtg->control_status = control_status;
+}
+
+void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en)
+{
+ u32 status;
+ u32 mask = en ? LINE1_IRQ : 0;
+
+ if (en) {
+ status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
+ dcss_writel(status & LINE1_IRQ,
+ dtg->base_reg + DCSS_DTG_INT_CONTROL);
+ }
+
+ dcss_update(mask, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK);
+}
+
+void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en)
+{
+ u32 status;
+ u32 mask = en ? LINE0_IRQ : 0;
+
+ if (en) {
+ status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
+
+ if (!dtg->ctxld_kick_irq_en) {
+ dcss_writel(status & LINE0_IRQ,
+ dtg->base_reg + DCSS_DTG_INT_CONTROL);
+ enable_irq(dtg->ctxld_kick_irq);
+ dtg->ctxld_kick_irq_en = true;
+ dcss_update(mask, LINE0_IRQ,
+ dtg->base_reg + DCSS_DTG_INT_MASK);
+ }
+
+ return;
+ }
+
+ if (!dtg->ctxld_kick_irq_en)
+ return;
+
+ disable_irq_nosync(dtg->ctxld_kick_irq);
+ dtg->ctxld_kick_irq_en = false;
+
+ dcss_update(mask, LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK);
+}
+
+void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg)
+{
+ dcss_update(LINE1_IRQ, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL);
+}
+
+bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg)
+{
+ return !!(dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS) & LINE1_IRQ);
+}
+
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
new file mode 100644
index 000000000000..135a62366ab8
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dcss-dev.h"
+#include "dcss-kms.h"
+
+DEFINE_DRM_GEM_CMA_FOPS(dcss_cma_fops);
+
+static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static struct drm_driver dcss_kms_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .fops = &dcss_cma_fops,
+ .name = "imx-dcss",
+ .desc = "i.MX8MQ Display Subsystem",
+ .date = "20190917",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+};
+
+static const struct drm_mode_config_helper_funcs dcss_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
+static void dcss_kms_mode_config_init(struct dcss_kms_dev *kms)
+{
+ struct drm_mode_config *config = &kms->base.mode_config;
+
+ drm_mode_config_init(&kms->base);
+
+ config->min_width = 1;
+ config->min_height = 1;
+ config->max_width = 4096;
+ config->max_height = 4096;
+ config->allow_fb_modifiers = true;
+ config->normalize_zpos = true;
+
+ config->funcs = &dcss_drm_mode_config_funcs;
+ config->helper_private = &dcss_mode_config_helpers;
+}
+
+static const struct drm_encoder_funcs dcss_kms_simple_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int dcss_kms_bridge_connector_init(struct dcss_kms_dev *kms)
+{
+ struct drm_device *ddev = &kms->base;
+ struct drm_encoder *encoder = &kms->encoder;
+ struct drm_crtc *crtc = (struct drm_crtc *)&kms->crtc;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(ddev->dev->of_node, 0, 0,
+ &panel, &bridge);
+ if (ret)
+ return ret;
+
+ if (!bridge) {
+ dev_err(ddev->dev, "No bridge found %d.\n", ret);
+ return -ENODEV;
+ }
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = drm_encoder_init(&kms->base, encoder,
+ &dcss_kms_simple_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret) {
+ dev_err(ddev->dev, "Failed initializing encoder %d.\n", ret);
+ return ret;
+ }
+
+ ret = drm_bridge_attach(encoder, bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0) {
+ dev_err(ddev->dev, "Unable to attach bridge %pOF\n",
+ bridge->of_node);
+ return ret;
+ }
+
+ kms->connector = drm_bridge_connector_init(ddev, encoder);
+ if (IS_ERR(kms->connector)) {
+ dev_err(ddev->dev, "Unable to create bridge connector.\n");
+ return PTR_ERR(kms->connector);
+ }
+
+ drm_connector_attach_encoder(kms->connector, encoder);
+
+ return 0;
+}
+
+struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
+{
+ struct dcss_kms_dev *kms;
+ struct drm_device *drm;
+ struct dcss_crtc *crtc;
+ int ret;
+
+ kms = devm_drm_dev_alloc(dcss->dev, &dcss_kms_driver,
+ struct dcss_kms_dev, base);
+ if (IS_ERR(kms))
+ return kms;
+
+ drm = &kms->base;
+ crtc = &kms->crtc;
+
+ drm->dev_private = dcss;
+
+ dcss_kms_mode_config_init(kms);
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret)
+ goto cleanup_mode_config;
+
+ drm->irq_enabled = true;
+
+ ret = dcss_kms_bridge_connector_init(kms);
+ if (ret)
+ goto cleanup_mode_config;
+
+ ret = dcss_crtc_init(crtc, drm);
+ if (ret)
+ goto cleanup_mode_config;
+
+ drm_mode_config_reset(drm);
+
+ drm_kms_helper_poll_init(drm);
+
+ drm_bridge_connector_enable_hpd(kms->connector);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto cleanup_crtc;
+
+ drm_fbdev_generic_setup(drm, 32);
+
+ return kms;
+
+cleanup_crtc:
+ drm_bridge_connector_disable_hpd(kms->connector);
+ drm_kms_helper_poll_fini(drm);
+ dcss_crtc_deinit(crtc, drm);
+
+cleanup_mode_config:
+ drm_mode_config_cleanup(drm);
+ drm->dev_private = NULL;
+
+ return ERR_PTR(ret);
+}
+
+void dcss_kms_detach(struct dcss_kms_dev *kms)
+{
+ struct drm_device *drm = &kms->base;
+
+ drm_dev_unregister(drm);
+ drm_bridge_connector_disable_hpd(kms->connector);
+ drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
+ drm_crtc_vblank_off(&kms->crtc.base);
+ drm->irq_enabled = false;
+ drm_mode_config_cleanup(drm);
+ dcss_crtc_deinit(&kms->crtc, drm);
+ drm->dev_private = NULL;
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.h b/drivers/gpu/drm/imx/dcss/dcss-kms.h
new file mode 100644
index 000000000000..dfe5dd99eea3
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 NXP.
+ */
+
+#ifndef _DCSS_KMS_H_
+#define _DCSS_KMS_H_
+
+#include <drm/drm_encoder.h>
+
+struct dcss_plane {
+ struct drm_plane base;
+
+ int ch_num;
+};
+
+struct dcss_crtc {
+ struct drm_crtc base;
+ struct drm_crtc_state *state;
+
+ struct dcss_plane *plane[3];
+
+ int irq;
+
+ bool disable_ctxld_kick_irq;
+};
+
+struct dcss_kms_dev {
+ struct drm_device base;
+ struct dcss_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector *connector;
+};
+
+struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss);
+void dcss_kms_detach(struct dcss_kms_dev *kms);
+int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm);
+void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm);
+struct dcss_plane *dcss_plane_init(struct drm_device *drm,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type,
+ unsigned int zpos);
+
+#endif
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
new file mode 100644
index 000000000000..961d671f171b
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "dcss-dev.h"
+#include "dcss-kms.h"
+
+static const u32 dcss_common_formats[] = {
+ /* RGB */
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_RGBX1010102,
+ DRM_FORMAT_BGRX1010102,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_RGBA1010102,
+ DRM_FORMAT_BGRA1010102,
+};
+
+static const u64 dcss_video_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static const u64 dcss_graphics_format_modifiers[] = {
+ DRM_FORMAT_MOD_VIVANTE_TILED,
+ DRM_FORMAT_MOD_VIVANTE_SUPER_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static inline struct dcss_plane *to_dcss_plane(struct drm_plane *p)
+{
+ return container_of(p, struct dcss_plane, base);
+}
+
+static inline bool dcss_plane_fb_is_linear(const struct drm_framebuffer *fb)
+{
+ return ((fb->flags & DRM_MODE_FB_MODIFIERS) == 0) ||
+ ((fb->flags & DRM_MODE_FB_MODIFIERS) != 0 &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR);
+}
+
+static void dcss_plane_destroy(struct drm_plane *plane)
+{
+ struct dcss_plane *dcss_plane = container_of(plane, struct dcss_plane,
+ base);
+
+ drm_plane_cleanup(plane);
+ kfree(dcss_plane);
+}
+
+static bool dcss_plane_format_mod_supported(struct drm_plane *plane,
+ u32 format,
+ u64 modifier)
+{
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB2101010:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
+ modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED;
+ default:
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+ }
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+ default:
+ return false;
+ }
+}
+
+static const struct drm_plane_funcs dcss_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = dcss_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .format_mod_supported = dcss_plane_format_mod_supported,
+};
+
+static bool dcss_plane_can_rotate(const struct drm_format_info *format,
+ bool mod_present, u64 modifier,
+ unsigned int rotation)
+{
+ bool linear_format = !mod_present ||
+ (mod_present && modifier == DRM_FORMAT_MOD_LINEAR);
+ u32 supported_rotation = DRM_MODE_ROTATE_0;
+
+ if (!format->is_yuv && linear_format)
+ supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_MASK;
+ else if (!format->is_yuv &&
+ modifier == DRM_FORMAT_MOD_VIVANTE_TILED)
+ supported_rotation = DRM_MODE_ROTATE_MASK |
+ DRM_MODE_REFLECT_MASK;
+ else if (format->is_yuv && linear_format &&
+ (format->format == DRM_FORMAT_NV12 ||
+ format->format == DRM_FORMAT_NV21))
+ supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_MASK;
+
+ return !!(rotation & supported_rotation);
+}
+
+static bool dcss_plane_is_source_size_allowed(u16 src_w, u16 src_h, u32 pix_fmt)
+{
+ if (src_w < 64 &&
+ (pix_fmt == DRM_FORMAT_NV12 || pix_fmt == DRM_FORMAT_NV21))
+ return false;
+ else if (src_w < 32 &&
+ (pix_fmt == DRM_FORMAT_UYVY || pix_fmt == DRM_FORMAT_VYUY ||
+ pix_fmt == DRM_FORMAT_YUYV || pix_fmt == DRM_FORMAT_YVYU))
+ return false;
+
+ return src_w >= 16 && src_h >= 8;
+}
+
+static int dcss_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dcss_plane *dcss_plane = to_dcss_plane(plane);
+ struct dcss_dev *dcss = plane->dev->dev_private;
+ struct drm_framebuffer *fb = state->fb;
+ bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY;
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_crtc_state *crtc_state;
+ int hdisplay, vdisplay;
+ int min, max;
+ int ret;
+
+ if (!fb || !state->crtc)
+ return 0;
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ WARN_ON(!cma_obj);
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+
+ hdisplay = crtc_state->adjusted_mode.hdisplay;
+ vdisplay = crtc_state->adjusted_mode.vdisplay;
+
+ if (!dcss_plane_is_source_size_allowed(state->src_w >> 16,
+ state->src_h >> 16,
+ fb->format->format)) {
+ DRM_DEBUG_KMS("Source plane size is not allowed!\n");
+ return -EINVAL;
+ }
+
+ dcss_scaler_get_min_max_ratios(dcss->scaler, dcss_plane->ch_num,
+ &min, &max);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ min, max, !is_primary_plane,
+ false);
+ if (ret)
+ return ret;
+
+ if (!state->visible)
+ return 0;
+
+ if (!dcss_plane_can_rotate(fb->format,
+ !!(fb->flags & DRM_MODE_FB_MODIFIERS),
+ fb->modifier,
+ state->rotation)) {
+ DRM_DEBUG_KMS("requested rotation is not allowed!\n");
+ return -EINVAL;
+ }
+
+ if ((state->crtc_x < 0 || state->crtc_y < 0 ||
+ state->crtc_x + state->crtc_w > hdisplay ||
+ state->crtc_y + state->crtc_h > vdisplay) &&
+ !dcss_plane_fb_is_linear(fb)) {
+ DRM_DEBUG_KMS("requested cropping operation is not allowed!\n");
+ return -EINVAL;
+ }
+
+ if ((fb->flags & DRM_MODE_FB_MODIFIERS) &&
+ !plane->funcs->format_mod_supported(plane,
+ fb->format->format,
+ fb->modifier)) {
+ DRM_DEBUG_KMS("Invalid modifier: %llx", fb->modifier);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dcss_plane_atomic_set_base(struct dcss_plane *dcss_plane)
+{
+ struct drm_plane *plane = &dcss_plane->base;
+ struct drm_plane_state *state = plane->state;
+ struct dcss_dev *dcss = plane->dev->dev_private;
+ struct drm_framebuffer *fb = state->fb;
+ const struct drm_format_info *format = fb->format;
+ struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ unsigned long p1_ba = 0, p2_ba = 0;
+
+ if (!format->is_yuv ||
+ format->format == DRM_FORMAT_NV12 ||
+ format->format == DRM_FORMAT_NV21)
+ p1_ba = cma_obj->paddr + fb->offsets[0] +
+ fb->pitches[0] * (state->src.y1 >> 16) +
+ format->char_per_block[0] * (state->src.x1 >> 16);
+ else if (format->format == DRM_FORMAT_UYVY ||
+ format->format == DRM_FORMAT_VYUY ||
+ format->format == DRM_FORMAT_YUYV ||
+ format->format == DRM_FORMAT_YVYU)
+ p1_ba = cma_obj->paddr + fb->offsets[0] +
+ fb->pitches[0] * (state->src.y1 >> 16) +
+ 2 * format->char_per_block[0] * (state->src.x1 >> 17);
+
+ if (format->format == DRM_FORMAT_NV12 ||
+ format->format == DRM_FORMAT_NV21)
+ p2_ba = cma_obj->paddr + fb->offsets[1] +
+ (((fb->pitches[1] >> 1) * (state->src.y1 >> 17) +
+ (state->src.x1 >> 17)) << 1);
+
+ dcss_dpr_addr_set(dcss->dpr, dcss_plane->ch_num, p1_ba, p2_ba,
+ fb->pitches[0]);
+}
+
+static bool dcss_plane_needs_setup(struct drm_plane_state *state,
+ struct drm_plane_state *old_state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
+
+ return state->crtc_x != old_state->crtc_x ||
+ state->crtc_y != old_state->crtc_y ||
+ state->crtc_w != old_state->crtc_w ||
+ state->crtc_h != old_state->crtc_h ||
+ state->src_x != old_state->src_x ||
+ state->src_y != old_state->src_y ||
+ state->src_w != old_state->src_w ||
+ state->src_h != old_state->src_h ||
+ fb->format->format != old_fb->format->format ||
+ fb->modifier != old_fb->modifier ||
+ state->rotation != old_state->rotation;
+}
+
+static void dcss_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ struct dcss_plane *dcss_plane = to_dcss_plane(plane);
+ struct dcss_dev *dcss = plane->dev->dev_private;
+ struct drm_framebuffer *fb = state->fb;
+ u32 pixel_format;
+ struct drm_crtc_state *crtc_state;
+ bool modifiers_present;
+ u32 src_w, src_h, dst_w, dst_h;
+ struct drm_rect src, dst;
+ bool enable = true;
+
+ if (!fb || !state->crtc || !state->visible)
+ return;
+
+ pixel_format = state->fb->format->format;
+ crtc_state = state->crtc->state;
+ modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS);
+
+ if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state) &&
+ !dcss_plane_needs_setup(state, old_state)) {
+ dcss_plane_atomic_set_base(dcss_plane);
+ return;
+ }
+
+ src = plane->state->src;
+ dst = plane->state->dst;
+
+ /*
+ * The width and height after clipping.
+ */
+ src_w = drm_rect_width(&src) >> 16;
+ src_h = drm_rect_height(&src) >> 16;
+ dst_w = drm_rect_width(&dst);
+ dst_h = drm_rect_height(&dst);
+
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
+ modifiers_present && fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ modifiers_present = false;
+
+ dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num, state->fb->format,
+ modifiers_present ? fb->modifier :
+ DRM_FORMAT_MOD_LINEAR);
+ dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, src_w, src_h);
+ dcss_dpr_set_rotation(dcss->dpr, dcss_plane->ch_num,
+ state->rotation);
+
+ dcss_plane_atomic_set_base(dcss_plane);
+
+ dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num,
+ state->fb->format, src_w, src_h,
+ dst_w, dst_h,
+ drm_mode_vrefresh(&crtc_state->mode));
+
+ dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num,
+ dst.x1, dst.y1, dst_w, dst_h);
+ dcss_dtg_plane_alpha_set(dcss->dtg, dcss_plane->ch_num,
+ fb->format, state->alpha >> 8);
+
+ if (!dcss_plane->ch_num && (state->alpha >> 8) == 0)
+ enable = false;
+
+ dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, enable);
+ dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, enable);
+
+ if (!enable)
+ dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num,
+ 0, 0, 0, 0);
+
+ dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, enable);
+}
+
+static void dcss_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dcss_plane *dcss_plane = to_dcss_plane(plane);
+ struct dcss_dev *dcss = plane->dev->dev_private;
+
+ dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, false);
+ dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, false);
+ dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, 0, 0, 0, 0);
+ dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, false);
+}
+
+static const struct drm_plane_helper_funcs dcss_plane_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
+ .atomic_check = dcss_plane_atomic_check,
+ .atomic_update = dcss_plane_atomic_update,
+ .atomic_disable = dcss_plane_atomic_disable,
+};
+
+struct dcss_plane *dcss_plane_init(struct drm_device *drm,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type,
+ unsigned int zpos)
+{
+ struct dcss_plane *dcss_plane;
+ const u64 *format_modifiers = dcss_video_format_modifiers;
+ int ret;
+
+ if (zpos > 2)
+ return ERR_PTR(-EINVAL);
+
+ dcss_plane = kzalloc(sizeof(*dcss_plane), GFP_KERNEL);
+ if (!dcss_plane) {
+ DRM_ERROR("failed to allocate plane\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ format_modifiers = dcss_graphics_format_modifiers;
+
+ ret = drm_universal_plane_init(drm, &dcss_plane->base, possible_crtcs,
+ &dcss_plane_funcs, dcss_common_formats,
+ ARRAY_SIZE(dcss_common_formats),
+ format_modifiers, type, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ kfree(dcss_plane);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(&dcss_plane->base, &dcss_plane_helper_funcs);
+
+ ret = drm_plane_create_zpos_immutable_property(&dcss_plane->base, zpos);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_plane_create_rotation_property(&dcss_plane->base,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ dcss_plane->ch_num = zpos;
+
+ return dcss_plane;
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
new file mode 100644
index 000000000000..cd21905de580
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
@@ -0,0 +1,826 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ *
+ * Scaling algorithms were contributed by Dzung Hoang <dzung.hoang@nxp.com>
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_SCALER_CTRL 0x00
+#define SCALER_EN BIT(0)
+#define REPEAT_EN BIT(4)
+#define SCALE2MEM_EN BIT(8)
+#define MEM2OFIFO_EN BIT(12)
+#define DCSS_SCALER_OFIFO_CTRL 0x04
+#define OFIFO_LOW_THRES_POS 0
+#define OFIFO_LOW_THRES_MASK GENMASK(9, 0)
+#define OFIFO_HIGH_THRES_POS 16
+#define OFIFO_HIGH_THRES_MASK GENMASK(25, 16)
+#define UNDERRUN_DETECT_CLR BIT(26)
+#define LOW_THRES_DETECT_CLR BIT(27)
+#define HIGH_THRES_DETECT_CLR BIT(28)
+#define UNDERRUN_DETECT_EN BIT(29)
+#define LOW_THRES_DETECT_EN BIT(30)
+#define HIGH_THRES_DETECT_EN BIT(31)
+#define DCSS_SCALER_SDATA_CTRL 0x08
+#define YUV_EN BIT(0)
+#define RTRAM_8LINES BIT(1)
+#define Y_UV_BYTE_SWAP BIT(4)
+#define A2R10G10B10_FORMAT_POS 8
+#define A2R10G10B10_FORMAT_MASK GENMASK(11, 8)
+#define DCSS_SCALER_BIT_DEPTH 0x0C
+#define LUM_BIT_DEPTH_POS 0
+#define LUM_BIT_DEPTH_MASK GENMASK(1, 0)
+#define CHR_BIT_DEPTH_POS 4
+#define CHR_BIT_DEPTH_MASK GENMASK(5, 4)
+#define DCSS_SCALER_SRC_FORMAT 0x10
+#define DCSS_SCALER_DST_FORMAT 0x14
+#define FORMAT_MASK GENMASK(1, 0)
+#define DCSS_SCALER_SRC_LUM_RES 0x18
+#define DCSS_SCALER_SRC_CHR_RES 0x1C
+#define DCSS_SCALER_DST_LUM_RES 0x20
+#define DCSS_SCALER_DST_CHR_RES 0x24
+#define WIDTH_POS 0
+#define WIDTH_MASK GENMASK(11, 0)
+#define HEIGHT_POS 16
+#define HEIGHT_MASK GENMASK(27, 16)
+#define DCSS_SCALER_V_LUM_START 0x48
+#define V_START_MASK GENMASK(15, 0)
+#define DCSS_SCALER_V_LUM_INC 0x4C
+#define V_INC_MASK GENMASK(15, 0)
+#define DCSS_SCALER_H_LUM_START 0x50
+#define H_START_MASK GENMASK(18, 0)
+#define DCSS_SCALER_H_LUM_INC 0x54
+#define H_INC_MASK GENMASK(15, 0)
+#define DCSS_SCALER_V_CHR_START 0x58
+#define DCSS_SCALER_V_CHR_INC 0x5C
+#define DCSS_SCALER_H_CHR_START 0x60
+#define DCSS_SCALER_H_CHR_INC 0x64
+#define DCSS_SCALER_COEF_VLUM 0x80
+#define DCSS_SCALER_COEF_HLUM 0x140
+#define DCSS_SCALER_COEF_VCHR 0x200
+#define DCSS_SCALER_COEF_HCHR 0x300
+
+struct dcss_scaler_ch {
+ void __iomem *base_reg;
+ u32 base_ofs;
+ struct dcss_scaler *scl;
+
+ u32 sdata_ctrl;
+ u32 scaler_ctrl;
+
+ bool scaler_ctrl_chgd;
+
+ u32 c_vstart;
+ u32 c_hstart;
+};
+
+struct dcss_scaler {
+ struct device *dev;
+
+ struct dcss_ctxld *ctxld;
+ u32 ctx_id;
+
+ struct dcss_scaler_ch ch[3];
+};
+
+/* scaler coefficients generator */
+#define PSC_FRAC_BITS 30
+#define PSC_FRAC_SCALE BIT(PSC_FRAC_BITS)
+#define PSC_BITS_FOR_PHASE 4
+#define PSC_NUM_PHASES 16
+#define PSC_STORED_PHASES (PSC_NUM_PHASES / 2 + 1)
+#define PSC_NUM_TAPS 7
+#define PSC_NUM_TAPS_RGBA 5
+#define PSC_COEFF_PRECISION 10
+#define PSC_PHASE_FRACTION_BITS 13
+#define PSC_PHASE_MASK (PSC_NUM_PHASES - 1)
+#define PSC_Q_FRACTION 19
+#define PSC_Q_ROUND_OFFSET (1 << (PSC_Q_FRACTION - 1))
+
+/**
+ * mult_q() - Performs fixed-point multiplication.
+ * @A: multiplier
+ * @B: multiplicand
+ */
+static int mult_q(int A, int B)
+{
+ int result;
+ s64 temp;
+
+ temp = (int64_t)A * (int64_t)B;
+ temp += PSC_Q_ROUND_OFFSET;
+ result = (int)(temp >> PSC_Q_FRACTION);
+ return result;
+}
+
+/**
+ * div_q() - Performs fixed-point division.
+ * @A: dividend
+ * @B: divisor
+ */
+static int div_q(int A, int B)
+{
+ int result;
+ s64 temp;
+
+ temp = (int64_t)A << PSC_Q_FRACTION;
+ if ((temp >= 0 && B >= 0) || (temp < 0 && B < 0))
+ temp += B / 2;
+ else
+ temp -= B / 2;
+
+ result = (int)(temp / B);
+ return result;
+}
+
+/**
+ * exp_approx_q() - Compute approximation to exp(x) function using Taylor
+ * series.
+ * @x: fixed-point argument of exp function
+ */
+static int exp_approx_q(int x)
+{
+ int sum = 1 << PSC_Q_FRACTION;
+ int term = 1 << PSC_Q_FRACTION;
+
+ term = mult_q(term, div_q(x, 1 << PSC_Q_FRACTION));
+ sum += term;
+ term = mult_q(term, div_q(x, 2 << PSC_Q_FRACTION));
+ sum += term;
+ term = mult_q(term, div_q(x, 3 << PSC_Q_FRACTION));
+ sum += term;
+ term = mult_q(term, div_q(x, 4 << PSC_Q_FRACTION));
+ sum += term;
+
+ return sum;
+}
+
+/**
+ * dcss_scaler_gaussian_filter() - Generate gaussian prototype filter.
+ * @fc_q: fixed-point cutoff frequency normalized to range [0, 1]
+ * @use_5_taps: indicates whether to use 5 taps or 7 taps
+ * @coef: output filter coefficients
+ */
+static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
+ bool phase0_identity,
+ int coef[][PSC_NUM_TAPS])
+{
+ int sigma_q, g0_q, g1_q, g2_q;
+ int tap_cnt1, tap_cnt2, tap_idx, phase_cnt;
+ int mid;
+ int phase;
+ int i;
+ int taps;
+
+ if (use_5_taps)
+ for (phase = 0; phase < PSC_STORED_PHASES; phase++) {
+ coef[phase][0] = 0;
+ coef[phase][PSC_NUM_TAPS - 1] = 0;
+ }
+
+ /* seed coefficient scanner */
+ taps = use_5_taps ? PSC_NUM_TAPS_RGBA : PSC_NUM_TAPS;
+ mid = (PSC_NUM_PHASES * taps) / 2 - 1;
+ phase_cnt = (PSC_NUM_PHASES * (PSC_NUM_TAPS + 1)) / 2;
+ tap_cnt1 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2;
+ tap_cnt2 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2;
+
+ /* seed gaussian filter generator */
+ sigma_q = div_q(PSC_Q_ROUND_OFFSET, fc_q);
+ g0_q = 1 << PSC_Q_FRACTION;
+ g1_q = exp_approx_q(div_q(-PSC_Q_ROUND_OFFSET,
+ mult_q(sigma_q, sigma_q)));
+ g2_q = mult_q(g1_q, g1_q);
+ coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = g0_q;
+
+ for (i = 0; i < mid; i++) {
+ phase_cnt++;
+ tap_cnt1--;
+ tap_cnt2++;
+
+ g0_q = mult_q(g0_q, g1_q);
+ g1_q = mult_q(g1_q, g2_q);
+
+ if ((phase_cnt & PSC_PHASE_MASK) <= 8) {
+ tap_idx = tap_cnt1 >> PSC_BITS_FOR_PHASE;
+ coef[phase_cnt & PSC_PHASE_MASK][tap_idx] = g0_q;
+ }
+ if (((-phase_cnt) & PSC_PHASE_MASK) <= 8) {
+ tap_idx = tap_cnt2 >> PSC_BITS_FOR_PHASE;
+ coef[(-phase_cnt) & PSC_PHASE_MASK][tap_idx] = g0_q;
+ }
+ }
+
+ phase_cnt++;
+ tap_cnt1--;
+ coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = 0;
+
+ /* override phase 0 with identity filter if specified */
+ if (phase0_identity)
+ for (i = 0; i < PSC_NUM_TAPS; i++)
+ coef[0][i] = i == (PSC_NUM_TAPS >> 1) ?
+ (1 << PSC_COEFF_PRECISION) : 0;
+
+ /* normalize coef */
+ for (phase = 0; phase < PSC_STORED_PHASES; phase++) {
+ int sum = 0;
+ s64 ll_temp;
+
+ for (i = 0; i < PSC_NUM_TAPS; i++)
+ sum += coef[phase][i];
+ for (i = 0; i < PSC_NUM_TAPS; i++) {
+ ll_temp = coef[phase][i];
+ ll_temp <<= PSC_COEFF_PRECISION;
+ ll_temp += sum >> 1;
+ ll_temp /= sum;
+ coef[phase][i] = (int)ll_temp;
+ }
+ }
+}
+
+/**
+ * dcss_scaler_filter_design() - Compute filter coefficients using
+ * Gaussian filter.
+ * @src_length: length of input
+ * @dst_length: length of output
+ * @use_5_taps: 0 for 7 taps per phase, 1 for 5 taps
+ * @coef: output coefficients
+ */
+static void dcss_scaler_filter_design(int src_length, int dst_length,
+ bool use_5_taps, bool phase0_identity,
+ int coef[][PSC_NUM_TAPS])
+{
+ int fc_q;
+
+ /* compute cutoff frequency */
+ if (dst_length >= src_length)
+ fc_q = div_q(1, PSC_NUM_PHASES);
+ else
+ fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES);
+
+ /* compute gaussian filter coefficients */
+ dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef);
+}
+
+static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs)
+{
+ struct dcss_scaler *scl = ch->scl;
+
+ dcss_ctxld_write(scl->ctxld, scl->ctx_id, val, ch->base_ofs + ofs);
+}
+
+static int dcss_scaler_ch_init_all(struct dcss_scaler *scl,
+ unsigned long scaler_base)
+{
+ struct dcss_scaler_ch *ch;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ ch = &scl->ch[i];
+
+ ch->base_ofs = scaler_base + i * 0x400;
+
+ ch->base_reg = ioremap(ch->base_ofs, SZ_4K);
+ if (!ch->base_reg) {
+ dev_err(scl->dev, "scaler: unable to remap ch base\n");
+ return -ENOMEM;
+ }
+
+ ch->scl = scl;
+ }
+
+ return 0;
+}
+
+int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base)
+{
+ struct dcss_scaler *scaler;
+
+ scaler = kzalloc(sizeof(*scaler), GFP_KERNEL);
+ if (!scaler)
+ return -ENOMEM;
+
+ dcss->scaler = scaler;
+ scaler->dev = dcss->dev;
+ scaler->ctxld = dcss->ctxld;
+ scaler->ctx_id = CTX_SB_HP;
+
+ if (dcss_scaler_ch_init_all(scaler, scaler_base)) {
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (scaler->ch[i].base_reg)
+ iounmap(scaler->ch[i].base_reg);
+ }
+
+ kfree(scaler);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void dcss_scaler_exit(struct dcss_scaler *scl)
+{
+ int ch_no;
+
+ for (ch_no = 0; ch_no < 3; ch_no++) {
+ struct dcss_scaler_ch *ch = &scl->ch[ch_no];
+
+ dcss_writel(0, ch->base_reg + DCSS_SCALER_CTRL);
+
+ if (ch->base_reg)
+ iounmap(ch->base_reg);
+ }
+
+ kfree(scl);
+}
+
+void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en)
+{
+ struct dcss_scaler_ch *ch = &scl->ch[ch_num];
+ u32 scaler_ctrl;
+
+ scaler_ctrl = en ? SCALER_EN | REPEAT_EN : 0;
+
+ if (en)
+ dcss_scaler_write(ch, ch->sdata_ctrl, DCSS_SCALER_SDATA_CTRL);
+
+ if (ch->scaler_ctrl != scaler_ctrl)
+ ch->scaler_ctrl_chgd = true;
+
+ ch->scaler_ctrl = scaler_ctrl;
+}
+
+static void dcss_scaler_yuv_enable(struct dcss_scaler_ch *ch, bool en)
+{
+ ch->sdata_ctrl &= ~YUV_EN;
+ ch->sdata_ctrl |= en ? YUV_EN : 0;
+}
+
+static void dcss_scaler_rtr_8lines_enable(struct dcss_scaler_ch *ch, bool en)
+{
+ ch->sdata_ctrl &= ~RTRAM_8LINES;
+ ch->sdata_ctrl |= en ? RTRAM_8LINES : 0;
+}
+
+static void dcss_scaler_bit_depth_set(struct dcss_scaler_ch *ch, int depth)
+{
+ u32 val;
+
+ val = depth == 30 ? 2 : 0;
+
+ dcss_scaler_write(ch,
+ ((val << CHR_BIT_DEPTH_POS) & CHR_BIT_DEPTH_MASK) |
+ ((val << LUM_BIT_DEPTH_POS) & LUM_BIT_DEPTH_MASK),
+ DCSS_SCALER_BIT_DEPTH);
+}
+
+enum buffer_format {
+ BUF_FMT_YUV420,
+ BUF_FMT_YUV422,
+ BUF_FMT_ARGB8888_YUV444,
+};
+
+enum chroma_location {
+ PSC_LOC_HORZ_0_VERT_1_OVER_4 = 0,
+ PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4 = 1,
+ PSC_LOC_HORZ_0_VERT_0 = 2,
+ PSC_LOC_HORZ_1_OVER_4_VERT_0 = 3,
+ PSC_LOC_HORZ_0_VERT_1_OVER_2 = 4,
+ PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2 = 5
+};
+
+static void dcss_scaler_format_set(struct dcss_scaler_ch *ch,
+ enum buffer_format src_fmt,
+ enum buffer_format dst_fmt)
+{
+ dcss_scaler_write(ch, src_fmt, DCSS_SCALER_SRC_FORMAT);
+ dcss_scaler_write(ch, dst_fmt, DCSS_SCALER_DST_FORMAT);
+}
+
+static void dcss_scaler_res_set(struct dcss_scaler_ch *ch,
+ int src_xres, int src_yres,
+ int dst_xres, int dst_yres,
+ u32 pix_format, enum buffer_format dst_format)
+{
+ u32 lsrc_xres, lsrc_yres, csrc_xres, csrc_yres;
+ u32 ldst_xres, ldst_yres, cdst_xres, cdst_yres;
+ bool src_is_444 = true;
+
+ lsrc_xres = src_xres;
+ csrc_xres = src_xres;
+ lsrc_yres = src_yres;
+ csrc_yres = src_yres;
+ ldst_xres = dst_xres;
+ cdst_xres = dst_xres;
+ ldst_yres = dst_yres;
+ cdst_yres = dst_yres;
+
+ if (pix_format == DRM_FORMAT_UYVY || pix_format == DRM_FORMAT_VYUY ||
+ pix_format == DRM_FORMAT_YUYV || pix_format == DRM_FORMAT_YVYU) {
+ csrc_xres >>= 1;
+ src_is_444 = false;
+ } else if (pix_format == DRM_FORMAT_NV12 ||
+ pix_format == DRM_FORMAT_NV21) {
+ csrc_xres >>= 1;
+ csrc_yres >>= 1;
+ src_is_444 = false;
+ }
+
+ if (dst_format == BUF_FMT_YUV422)
+ cdst_xres >>= 1;
+
+ /* for 4:4:4 to 4:2:2 conversion, source height should be 1 less */
+ if (src_is_444 && dst_format == BUF_FMT_YUV422) {
+ lsrc_yres--;
+ csrc_yres--;
+ }
+
+ dcss_scaler_write(ch, (((lsrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
+ (((lsrc_xres - 1) << WIDTH_POS) & WIDTH_MASK),
+ DCSS_SCALER_SRC_LUM_RES);
+ dcss_scaler_write(ch, (((csrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
+ (((csrc_xres - 1) << WIDTH_POS) & WIDTH_MASK),
+ DCSS_SCALER_SRC_CHR_RES);
+ dcss_scaler_write(ch, (((ldst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
+ (((ldst_xres - 1) << WIDTH_POS) & WIDTH_MASK),
+ DCSS_SCALER_DST_LUM_RES);
+ dcss_scaler_write(ch, (((cdst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
+ (((cdst_xres - 1) << WIDTH_POS) & WIDTH_MASK),
+ DCSS_SCALER_DST_CHR_RES);
+}
+
+#define downscale_fp(factor, fp_pos) ((factor) << (fp_pos))
+#define upscale_fp(factor, fp_pos) ((1 << (fp_pos)) / (factor))
+
+struct dcss_scaler_factors {
+ int downscale;
+ int upscale;
+};
+
+static const struct dcss_scaler_factors dcss_scaler_factors[] = {
+ {3, 8}, {5, 8}, {5, 8},
+};
+
+static void dcss_scaler_fractions_set(struct dcss_scaler_ch *ch,
+ int src_xres, int src_yres,
+ int dst_xres, int dst_yres,
+ u32 src_format, u32 dst_format,
+ enum chroma_location src_chroma_loc)
+{
+ int src_c_xres, src_c_yres, dst_c_xres, dst_c_yres;
+ u32 l_vinc, l_hinc, c_vinc, c_hinc;
+ u32 c_vstart, c_hstart;
+
+ src_c_xres = src_xres;
+ src_c_yres = src_yres;
+ dst_c_xres = dst_xres;
+ dst_c_yres = dst_yres;
+
+ c_vstart = 0;
+ c_hstart = 0;
+
+ /* adjustments for source chroma location */
+ if (src_format == BUF_FMT_YUV420) {
+ /* vertical input chroma position adjustment */
+ switch (src_chroma_loc) {
+ case PSC_LOC_HORZ_0_VERT_1_OVER_4:
+ case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4:
+ /*
+ * move chroma up to first luma line
+ * (1/4 chroma input line spacing)
+ */
+ c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2));
+ break;
+ case PSC_LOC_HORZ_0_VERT_1_OVER_2:
+ case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2:
+ /*
+ * move chroma up to first luma line
+ * (1/2 chroma input line spacing)
+ */
+ c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 1));
+ break;
+ default:
+ break;
+ }
+ /* horizontal input chroma position adjustment */
+ switch (src_chroma_loc) {
+ case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4:
+ case PSC_LOC_HORZ_1_OVER_4_VERT_0:
+ case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2:
+ /* move chroma left 1/4 chroma input sample spacing */
+ c_hstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2));
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* adjustments to chroma resolution */
+ if (src_format == BUF_FMT_YUV420) {
+ src_c_xres >>= 1;
+ src_c_yres >>= 1;
+ } else if (src_format == BUF_FMT_YUV422) {
+ src_c_xres >>= 1;
+ }
+
+ if (dst_format == BUF_FMT_YUV422)
+ dst_c_xres >>= 1;
+
+ l_vinc = ((src_yres << 13) + (dst_yres >> 1)) / dst_yres;
+ c_vinc = ((src_c_yres << 13) + (dst_c_yres >> 1)) / dst_c_yres;
+ l_hinc = ((src_xres << 13) + (dst_xres >> 1)) / dst_xres;
+ c_hinc = ((src_c_xres << 13) + (dst_c_xres >> 1)) / dst_c_xres;
+
+ /* save chroma start phase */
+ ch->c_vstart = c_vstart;
+ ch->c_hstart = c_hstart;
+
+ dcss_scaler_write(ch, 0, DCSS_SCALER_V_LUM_START);
+ dcss_scaler_write(ch, l_vinc, DCSS_SCALER_V_LUM_INC);
+
+ dcss_scaler_write(ch, 0, DCSS_SCALER_H_LUM_START);
+ dcss_scaler_write(ch, l_hinc, DCSS_SCALER_H_LUM_INC);
+
+ dcss_scaler_write(ch, c_vstart, DCSS_SCALER_V_CHR_START);
+ dcss_scaler_write(ch, c_vinc, DCSS_SCALER_V_CHR_INC);
+
+ dcss_scaler_write(ch, c_hstart, DCSS_SCALER_H_CHR_START);
+ dcss_scaler_write(ch, c_hinc, DCSS_SCALER_H_CHR_INC);
+}
+
+int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num,
+ int *min, int *max)
+{
+ *min = upscale_fp(dcss_scaler_factors[ch_num].upscale, 16);
+ *max = downscale_fp(dcss_scaler_factors[ch_num].downscale, 16);
+
+ return 0;
+}
+
+static void dcss_scaler_program_5_coef_set(struct dcss_scaler_ch *ch,
+ int base_addr,
+ int coef[][PSC_NUM_TAPS])
+{
+ int i, phase;
+
+ for (i = 0; i < PSC_STORED_PHASES; i++) {
+ dcss_scaler_write(ch, ((coef[i][1] & 0xfff) << 16 |
+ (coef[i][2] & 0xfff) << 4 |
+ (coef[i][3] & 0xf00) >> 8),
+ base_addr + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[i][3] & 0x0ff) << 20 |
+ (coef[i][4] & 0xfff) << 8 |
+ (coef[i][5] & 0xff0) >> 4),
+ base_addr + 0x40 + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[i][5] & 0x00f) << 24),
+ base_addr + 0x80 + i * sizeof(u32));
+ }
+
+ /* reverse both phase and tap orderings */
+ for (phase = (PSC_NUM_PHASES >> 1) - 1;
+ i < PSC_NUM_PHASES; i++, phase--) {
+ dcss_scaler_write(ch, ((coef[phase][5] & 0xfff) << 16 |
+ (coef[phase][4] & 0xfff) << 4 |
+ (coef[phase][3] & 0xf00) >> 8),
+ base_addr + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[phase][3] & 0x0ff) << 20 |
+ (coef[phase][2] & 0xfff) << 8 |
+ (coef[phase][1] & 0xff0) >> 4),
+ base_addr + 0x40 + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[phase][1] & 0x00f) << 24),
+ base_addr + 0x80 + i * sizeof(u32));
+ }
+}
+
+static void dcss_scaler_program_7_coef_set(struct dcss_scaler_ch *ch,
+ int base_addr,
+ int coef[][PSC_NUM_TAPS])
+{
+ int i, phase;
+
+ for (i = 0; i < PSC_STORED_PHASES; i++) {
+ dcss_scaler_write(ch, ((coef[i][0] & 0xfff) << 16 |
+ (coef[i][1] & 0xfff) << 4 |
+ (coef[i][2] & 0xf00) >> 8),
+ base_addr + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[i][2] & 0x0ff) << 20 |
+ (coef[i][3] & 0xfff) << 8 |
+ (coef[i][4] & 0xff0) >> 4),
+ base_addr + 0x40 + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[i][4] & 0x00f) << 24 |
+ (coef[i][5] & 0xfff) << 12 |
+ (coef[i][6] & 0xfff)),
+ base_addr + 0x80 + i * sizeof(u32));
+ }
+
+ /* reverse both phase and tap orderings */
+ for (phase = (PSC_NUM_PHASES >> 1) - 1;
+ i < PSC_NUM_PHASES; i++, phase--) {
+ dcss_scaler_write(ch, ((coef[phase][6] & 0xfff) << 16 |
+ (coef[phase][5] & 0xfff) << 4 |
+ (coef[phase][4] & 0xf00) >> 8),
+ base_addr + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[phase][4] & 0x0ff) << 20 |
+ (coef[phase][3] & 0xfff) << 8 |
+ (coef[phase][2] & 0xff0) >> 4),
+ base_addr + 0x40 + i * sizeof(u32));
+ dcss_scaler_write(ch, ((coef[phase][2] & 0x00f) << 24 |
+ (coef[phase][1] & 0xfff) << 12 |
+ (coef[phase][0] & 0xfff)),
+ base_addr + 0x80 + i * sizeof(u32));
+ }
+}
+
+static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch,
+ enum buffer_format src_format,
+ enum buffer_format dst_format,
+ bool use_5_taps,
+ int src_xres, int src_yres, int dst_xres,
+ int dst_yres)
+{
+ int coef[PSC_STORED_PHASES][PSC_NUM_TAPS];
+ bool program_5_taps = use_5_taps ||
+ (dst_format == BUF_FMT_YUV422 &&
+ src_format == BUF_FMT_ARGB8888_YUV444);
+
+ /* horizontal luma */
+ dcss_scaler_filter_design(src_xres, dst_xres, false,
+ src_xres == dst_xres, coef);
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
+
+ /* vertical luma */
+ dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
+ src_yres == dst_yres, coef);
+
+ if (program_5_taps)
+ dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
+ else
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
+
+ /* adjust chroma resolution */
+ if (src_format != BUF_FMT_ARGB8888_YUV444)
+ src_xres >>= 1;
+ if (src_format == BUF_FMT_YUV420)
+ src_yres >>= 1;
+ if (dst_format != BUF_FMT_ARGB8888_YUV444)
+ dst_xres >>= 1;
+ if (dst_format == BUF_FMT_YUV420) /* should not happen */
+ dst_yres >>= 1;
+
+ /* horizontal chroma */
+ dcss_scaler_filter_design(src_xres, dst_xres, false,
+ (src_xres == dst_xres) && (ch->c_hstart == 0),
+ coef);
+
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef);
+
+ /* vertical chroma */
+ dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
+ (src_yres == dst_yres) && (ch->c_vstart == 0),
+ coef);
+ if (program_5_taps)
+ dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
+ else
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
+}
+
+static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch,
+ int src_xres, int src_yres, int dst_xres,
+ int dst_yres)
+{
+ int coef[PSC_STORED_PHASES][PSC_NUM_TAPS];
+
+ /* horizontal RGB */
+ dcss_scaler_filter_design(src_xres, dst_xres, false,
+ src_xres == dst_xres, coef);
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
+
+ /* vertical RGB */
+ dcss_scaler_filter_design(src_yres, dst_yres, false,
+ src_yres == dst_yres, coef);
+ dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
+}
+
+static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch,
+ const struct drm_format_info *format)
+{
+ u32 a2r10g10b10_format;
+
+ if (format->is_yuv)
+ return;
+
+ ch->sdata_ctrl &= ~A2R10G10B10_FORMAT_MASK;
+
+ if (format->depth != 30)
+ return;
+
+ switch (format->format) {
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_XRGB2101010:
+ a2r10g10b10_format = 0;
+ break;
+
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XBGR2101010:
+ a2r10g10b10_format = 5;
+ break;
+
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_RGBX1010102:
+ a2r10g10b10_format = 6;
+ break;
+
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_BGRX1010102:
+ a2r10g10b10_format = 11;
+ break;
+
+ default:
+ a2r10g10b10_format = 0;
+ break;
+ }
+
+ ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS;
+}
+
+void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
+ const struct drm_format_info *format,
+ int src_xres, int src_yres, int dst_xres, int dst_yres,
+ u32 vrefresh_hz)
+{
+ struct dcss_scaler_ch *ch = &scl->ch[ch_num];
+ unsigned int pixel_depth = 0;
+ bool rtr_8line_en = false;
+ bool use_5_taps = false;
+ enum buffer_format src_format = BUF_FMT_ARGB8888_YUV444;
+ enum buffer_format dst_format = BUF_FMT_ARGB8888_YUV444;
+ u32 pix_format = format->format;
+
+ if (format->is_yuv) {
+ dcss_scaler_yuv_enable(ch, true);
+
+ if (pix_format == DRM_FORMAT_NV12 ||
+ pix_format == DRM_FORMAT_NV21) {
+ rtr_8line_en = true;
+ src_format = BUF_FMT_YUV420;
+ } else if (pix_format == DRM_FORMAT_UYVY ||
+ pix_format == DRM_FORMAT_VYUY ||
+ pix_format == DRM_FORMAT_YUYV ||
+ pix_format == DRM_FORMAT_YVYU) {
+ src_format = BUF_FMT_YUV422;
+ }
+
+ use_5_taps = !rtr_8line_en;
+ } else {
+ dcss_scaler_yuv_enable(ch, false);
+
+ pixel_depth = format->depth;
+ }
+
+ dcss_scaler_fractions_set(ch, src_xres, src_yres, dst_xres,
+ dst_yres, src_format, dst_format,
+ PSC_LOC_HORZ_0_VERT_1_OVER_4);
+
+ if (format->is_yuv)
+ dcss_scaler_yuv_coef_set(ch, src_format, dst_format,
+ use_5_taps, src_xres, src_yres,
+ dst_xres, dst_yres);
+ else
+ dcss_scaler_rgb_coef_set(ch, src_xres, src_yres,
+ dst_xres, dst_yres);
+
+ dcss_scaler_rtr_8lines_enable(ch, rtr_8line_en);
+ dcss_scaler_bit_depth_set(ch, pixel_depth);
+ dcss_scaler_set_rgb10_order(ch, format);
+ dcss_scaler_format_set(ch, src_format, dst_format);
+ dcss_scaler_res_set(ch, src_xres, src_yres, dst_xres, dst_yres,
+ pix_format, dst_format);
+}
+
+/* This function will be called from interrupt context. */
+void dcss_scaler_write_sclctrl(struct dcss_scaler *scl)
+{
+ int chnum;
+
+ dcss_ctxld_assert_locked(scl->ctxld);
+
+ for (chnum = 0; chnum < 3; chnum++) {
+ struct dcss_scaler_ch *ch = &scl->ch[chnum];
+
+ if (ch->scaler_ctrl_chgd) {
+ dcss_ctxld_write_irqsafe(scl->ctxld, scl->ctx_id,
+ ch->scaler_ctrl,
+ ch->base_ofs +
+ DCSS_SCALER_CTRL);
+ ch->scaler_ctrl_chgd = false;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-ss.c b/drivers/gpu/drm/imx/dcss/dcss-ss.c
new file mode 100644
index 000000000000..8ddf08da911b
--- /dev/null
+++ b/drivers/gpu/drm/imx/dcss/dcss-ss.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "dcss-dev.h"
+
+#define DCSS_SS_SYS_CTRL 0x00
+#define RUN_EN BIT(0)
+#define DCSS_SS_DISPLAY 0x10
+#define LRC_X_POS 0
+#define LRC_X_MASK GENMASK(12, 0)
+#define LRC_Y_POS 16
+#define LRC_Y_MASK GENMASK(28, 16)
+#define DCSS_SS_HSYNC 0x20
+#define DCSS_SS_VSYNC 0x30
+#define SYNC_START_POS 0
+#define SYNC_START_MASK GENMASK(12, 0)
+#define SYNC_END_POS 16
+#define SYNC_END_MASK GENMASK(28, 16)
+#define SYNC_POL BIT(31)
+#define DCSS_SS_DE_ULC 0x40
+#define ULC_X_POS 0
+#define ULC_X_MASK GENMASK(12, 0)
+#define ULC_Y_POS 16
+#define ULC_Y_MASK GENMASK(28, 16)
+#define ULC_POL BIT(31)
+#define DCSS_SS_DE_LRC 0x50
+#define DCSS_SS_MODE 0x60
+#define PIPE_MODE_POS 0
+#define PIPE_MODE_MASK GENMASK(1, 0)
+#define DCSS_SS_COEFF 0x70
+#define HORIZ_A_POS 0
+#define HORIZ_A_MASK GENMASK(3, 0)
+#define HORIZ_B_POS 4
+#define HORIZ_B_MASK GENMASK(7, 4)
+#define HORIZ_C_POS 8
+#define HORIZ_C_MASK GENMASK(11, 8)
+#define HORIZ_H_NORM_POS 12
+#define HORIZ_H_NORM_MASK GENMASK(14, 12)
+#define VERT_A_POS 16
+#define VERT_A_MASK GENMASK(19, 16)
+#define VERT_B_POS 20
+#define VERT_B_MASK GENMASK(23, 20)
+#define VERT_C_POS 24
+#define VERT_C_MASK GENMASK(27, 24)
+#define VERT_H_NORM_POS 28
+#define VERT_H_NORM_MASK GENMASK(30, 28)
+#define DCSS_SS_CLIP_CB 0x80
+#define DCSS_SS_CLIP_CR 0x90
+#define CLIP_MIN_POS 0
+#define CLIP_MIN_MASK GENMASK(9, 0)
+#define CLIP_MAX_POS 0
+#define CLIP_MAX_MASK GENMASK(23, 16)
+#define DCSS_SS_INTER_MODE 0xA0
+#define INT_EN BIT(0)
+#define VSYNC_SHIFT BIT(1)
+
+struct dcss_ss {
+ struct device *dev;
+ void __iomem *base_reg;
+ u32 base_ofs;
+
+ struct dcss_ctxld *ctxld;
+ u32 ctx_id;
+
+ bool in_use;
+};
+
+static void dcss_ss_write(struct dcss_ss *ss, u32 val, u32 ofs)
+{
+ if (!ss->in_use)
+ dcss_writel(val, ss->base_reg + ofs);
+
+ dcss_ctxld_write(ss->ctxld, ss->ctx_id, val,
+ ss->base_ofs + ofs);
+}
+
+int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base)
+{
+ struct dcss_ss *ss;
+
+ ss = kzalloc(sizeof(*ss), GFP_KERNEL);
+ if (!ss)
+ return -ENOMEM;
+
+ dcss->ss = ss;
+ ss->dev = dcss->dev;
+ ss->ctxld = dcss->ctxld;
+
+ ss->base_reg = ioremap(ss_base, SZ_4K);
+ if (!ss->base_reg) {
+ dev_err(dcss->dev, "ss: unable to remap ss base\n");
+ kfree(ss);
+ return -ENOMEM;
+ }
+
+ ss->base_ofs = ss_base;
+ ss->ctx_id = CTX_SB_HP;
+
+ return 0;
+}
+
+void dcss_ss_exit(struct dcss_ss *ss)
+{
+ /* stop SS */
+ dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL);
+
+ if (ss->base_reg)
+ iounmap(ss->base_reg);
+
+ kfree(ss);
+}
+
+void dcss_ss_subsam_set(struct dcss_ss *ss)
+{
+ dcss_ss_write(ss, 0x41614161, DCSS_SS_COEFF);
+ dcss_ss_write(ss, 0, DCSS_SS_MODE);
+ dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CB);
+ dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CR);
+}
+
+void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
+ bool phsync, bool pvsync)
+{
+ u16 lrc_x, lrc_y;
+ u16 hsync_start, hsync_end;
+ u16 vsync_start, vsync_end;
+ u16 de_ulc_x, de_ulc_y;
+ u16 de_lrc_x, de_lrc_y;
+
+ lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
+ vm->hactive - 1;
+ lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
+ vm->vactive - 1;
+
+ dcss_ss_write(ss, (lrc_y << LRC_Y_POS) | lrc_x, DCSS_SS_DISPLAY);
+
+ hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
+ vm->hactive - 1;
+ hsync_end = vm->hsync_len - 1;
+
+ dcss_ss_write(ss, (phsync ? SYNC_POL : 0) |
+ ((u32)hsync_end << SYNC_END_POS) | hsync_start,
+ DCSS_SS_HSYNC);
+
+ vsync_start = vm->vfront_porch - 1;
+ vsync_end = vm->vfront_porch + vm->vsync_len - 1;
+
+ dcss_ss_write(ss, (pvsync ? SYNC_POL : 0) |
+ ((u32)vsync_end << SYNC_END_POS) | vsync_start,
+ DCSS_SS_VSYNC);
+
+ de_ulc_x = vm->hsync_len + vm->hback_porch - 1;
+ de_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch;
+
+ dcss_ss_write(ss, SYNC_POL | ((u32)de_ulc_y << ULC_Y_POS) | de_ulc_x,
+ DCSS_SS_DE_ULC);
+
+ de_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1;
+ de_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch +
+ vm->vactive - 1;
+
+ dcss_ss_write(ss, (de_lrc_y << LRC_Y_POS) | de_lrc_x, DCSS_SS_DE_LRC);
+}
+
+void dcss_ss_enable(struct dcss_ss *ss)
+{
+ dcss_ss_write(ss, RUN_EN, DCSS_SS_SYS_CTRL);
+ ss->in_use = true;
+}
+
+void dcss_ss_shutoff(struct dcss_ss *ss)
+{
+ dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL);
+ ss->in_use = false;
+}
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 71d84c7a5378..d07b39b8afd2 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -111,10 +111,6 @@ static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
return 0;
}
-static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
-{
-}
-
static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
{
struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
@@ -140,7 +136,6 @@ static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
.enable = dw_hdmi_imx_encoder_enable,
- .disable = dw_hdmi_imx_encoder_disable,
.atomic_check = dw_hdmi_imx_atomic_check,
};
@@ -219,15 +214,9 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
hdmi->dev = &pdev->dev;
encoder = &hdmi->encoder;
- encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
- /*
- * If we failed to find the CRTC(s) which this encoder is
- * supposed to be connected to, it's because the CRTC has
- * not been registered yet. Defer probing, and hope that
- * the required CRTC is added later.
- */
- if (encoder->possible_crtcs == 0)
- return -EPROBE_DEFER;
+ ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node);
+ if (ret)
+ return ret;
ret = dw_hdmi_imx_parse_dt(hdmi);
if (ret < 0)
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 7d00c49fd5a5..9bf5ad6d18a2 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -20,6 +20,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -212,7 +213,9 @@ static int imx_drm_bind(struct device *dev)
drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.normalize_zpos = true;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
@@ -251,7 +254,6 @@ err_poll_fini:
drm_kms_helper_poll_fini(drm);
component_unbind_all(drm->dev, drm);
err_kms:
- drm_mode_config_cleanup(drm);
drm_dev_put(drm);
return ret;
@@ -267,11 +269,9 @@ static void imx_drm_unbind(struct device *dev)
component_unbind_all(drm->dev, drm);
- drm_mode_config_cleanup(drm);
+ drm_dev_put(drm);
dev_set_drvdata(dev, NULL);
-
- drm_dev_put(drm);
}
static const struct component_master_ops imx_drm_ops = {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8791d60be92e..41e2978cb1eb 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -62,7 +62,6 @@ struct imx_ldb_channel {
struct i2c_adapter *ddc;
int chno;
void *edid;
- int edid_len;
struct drm_display_mode mode;
int mode_valid;
u32 bus_format;
@@ -455,13 +454,6 @@ static int imx_ldb_register(struct drm_device *drm,
drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
}
- if (imx_ldb_ch->panel) {
- ret = drm_panel_attach(imx_ldb_ch->panel,
- &imx_ldb_ch->connector);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -543,15 +535,14 @@ static int imx_ldb_panel_ddc(struct device *dev,
}
if (!channel->ddc) {
+ int edid_len;
+
/* if no DDC available, fallback to hardcoded EDID */
dev_dbg(dev, "no ddc available\n");
- edidp = of_get_property(child, "edid",
- &channel->edid_len);
+ edidp = of_get_property(child, "edid", &edid_len);
if (edidp) {
- channel->edid = kmemdup(edidp,
- channel->edid_len,
- GFP_KERNEL);
+ channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
} else if (!channel->panel) {
/* fallback to display-timings node */
ret = of_get_drm_display_mode(child,
@@ -702,9 +693,6 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
for (i = 0; i < 2; i++) {
struct imx_ldb_channel *channel = &imx_ldb->channel[i];
- if (channel->panel)
- drm_panel_detach(channel->panel);
-
kfree(channel->edid);
i2c_put_adapter(channel->ddc);
}
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 813bb6156a68..2a8d2e32e7b4 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -13,7 +13,6 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
-#include <linux/spinlock.h>
#include <linux/videodev2.h>
#include <video/imx-ipu-v3.h>
@@ -104,8 +103,6 @@ struct imx_tve {
struct drm_connector connector;
struct drm_encoder encoder;
struct device *dev;
- spinlock_t lock; /* register lock */
- bool enabled;
int mode;
int di_hsync_pin;
int di_vsync_pin;
@@ -129,30 +126,10 @@ static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
return container_of(e, struct imx_tve, encoder);
}
-static void tve_lock(void *__tve)
-__acquires(&tve->lock)
-{
- struct imx_tve *tve = __tve;
-
- spin_lock(&tve->lock);
-}
-
-static void tve_unlock(void *__tve)
-__releases(&tve->lock)
-{
- struct imx_tve *tve = __tve;
-
- spin_unlock(&tve->lock);
-}
-
static void tve_enable(struct imx_tve *tve)
{
- if (!tve->enabled) {
- tve->enabled = true;
- clk_prepare_enable(tve->clk);
- regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_EN, TVE_EN);
- }
+ clk_prepare_enable(tve->clk);
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, TVE_EN);
/* clear interrupt status register */
regmap_write(tve->regmap, TVE_STAT_REG, 0xffffffff);
@@ -169,11 +146,8 @@ static void tve_enable(struct imx_tve *tve)
static void tve_disable(struct imx_tve *tve)
{
- if (tve->enabled) {
- tve->enabled = false;
- regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0);
- clk_disable_unprepare(tve->clk);
- }
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0);
+ clk_disable_unprepare(tve->clk);
}
static int tve_setup_tvout(struct imx_tve *tve)
@@ -500,8 +474,7 @@ static struct regmap_config tve_regmap_config = {
.readable_reg = imx_tve_readable_reg,
- .lock = tve_lock,
- .unlock = tve_unlock,
+ .fast_io = true,
.max_register = 0xdc,
};
@@ -511,7 +484,7 @@ static const char * const imx_tve_modes[] = {
[TVE_MODE_VGA] = "vga",
};
-static const int of_get_tve_mode(struct device_node *np)
+static int of_get_tve_mode(struct device_node *np)
{
const char *bm;
int ret, i;
@@ -544,7 +517,6 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
memset(tve, 0, sizeof(*tve));
tve->dev = dev;
- spin_lock_init(&tve->lock);
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
if (ddc_node) {
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index a831b5bd1613..2eb8df4697df 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -28,7 +28,6 @@ struct imx_parallel_display {
struct drm_bridge bridge;
struct device *dev;
void *edid;
- int edid_len;
u32 bus_format;
u32 bus_flags;
struct drm_display_mode mode;
@@ -41,11 +40,6 @@ static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
return container_of(c, struct imx_parallel_display, connector);
}
-static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
-{
- return container_of(e, struct imx_parallel_display, encoder);
-}
-
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
{
return container_of(b, struct imx_parallel_display, bridge);
@@ -289,9 +283,6 @@ static int imx_pd_register(struct drm_device *drm,
DRM_MODE_CONNECTOR_DPI);
}
- if (imxpd->panel)
- drm_panel_attach(imxpd->panel, &imxpd->connector);
-
if (imxpd->next_bridge) {
ret = drm_bridge_attach(encoder, imxpd->next_bridge,
&imxpd->bridge, 0);
@@ -313,6 +304,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
struct device_node *np = dev->of_node;
const u8 *edidp;
struct imx_parallel_display *imxpd;
+ int edid_len;
int ret;
u32 bus_format = 0;
const char *fmt;
@@ -326,9 +318,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
if (ret && ret != -ENODEV)
return ret;
- edidp = of_get_property(np, "edid", &imxpd->edid_len);
+ edidp = of_get_property(np, "edid", &edid_len);
if (edidp)
- imxpd->edid = kmemdup(edidp, imxpd->edid_len, GFP_KERNEL);
+ imxpd->edid = devm_kmemdup(dev, edidp, edid_len, GFP_KERNEL);
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
if (!ret) {
@@ -352,20 +344,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
return 0;
}
-static void imx_pd_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
-
- if (imxpd->panel)
- drm_panel_detach(imxpd->panel);
-
- kfree(imxpd->edid);
-}
-
static const struct component_ops imx_pd_ops = {
.bind = imx_pd_bind,
- .unbind = imx_pd_unbind,
};
static int imx_pd_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index b7074161ccf0..a3d1617d7c67 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -199,26 +199,20 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL;
- long rate;
-
- if (!drm_atomic_crtc_needs_modeset(state))
- return 0;
-
- if (state->mode.hdisplay > priv->soc_info->max_width ||
- state->mode.vdisplay > priv->soc_info->max_height)
- return -EINVAL;
- rate = clk_round_rate(priv->pix_clk,
- state->adjusted_mode.clock * 1000);
- if (rate < 0)
- return rate;
-
- if (priv->soc_info->has_osd) {
+ if (drm_atomic_crtc_needs_modeset(state) && priv->soc_info->has_osd) {
f1_state = drm_atomic_get_plane_state(state->state, &priv->f1);
+ if (IS_ERR(f1_state))
+ return PTR_ERR(f1_state);
+
f0_state = drm_atomic_get_plane_state(state->state, &priv->f0);
+ if (IS_ERR(f0_state))
+ return PTR_ERR(f0_state);
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && priv->ipu_plane) {
ipu_state = drm_atomic_get_plane_state(state->state, priv->ipu_plane);
+ if (IS_ERR(ipu_state))
+ return PTR_ERR(ipu_state);
/* IPU and F1 planes cannot be enabled at the same time. */
if (f1_state->fb && ipu_state->fb) {
@@ -235,6 +229,24 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
+static enum drm_mode_status
+ingenic_drm_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+ struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
+ long rate;
+
+ if (mode->hdisplay > priv->soc_info->max_width)
+ return MODE_BAD_HVALUE;
+ if (mode->vdisplay > priv->soc_info->max_height)
+ return MODE_BAD_VVALUE;
+
+ rate = clk_round_rate(priv->pix_clk, mode->clock * 1000);
+ if (rate < 0)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *oldstate)
{
@@ -648,6 +660,7 @@ static const struct drm_crtc_helper_funcs ingenic_drm_crtc_helper_funcs = {
.atomic_begin = ingenic_drm_crtc_atomic_begin,
.atomic_flush = ingenic_drm_crtc_atomic_flush,
.atomic_check = ingenic_drm_crtc_atomic_check,
+ .mode_valid = ingenic_drm_crtc_mode_valid,
};
static const struct drm_encoder_helper_funcs ingenic_drm_encoder_helper_funcs = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
index 7a0a8bd865d3..fc8c6e970ee3 100644
--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
+++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
@@ -35,6 +35,7 @@ struct soc_info {
const u32 *formats;
size_t num_formats;
bool has_bicubic;
+ bool manual_restart;
void (*set_coefs)(struct ingenic_ipu *ipu, unsigned int reg,
unsigned int sharpness, bool downscale,
@@ -48,6 +49,7 @@ struct ingenic_ipu {
struct regmap *map;
struct clk *clk;
const struct soc_info *soc_info;
+ bool clk_enabled;
unsigned int num_w, num_h, denom_w, denom_h;
@@ -287,12 +289,23 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
const struct drm_format_info *finfo;
u32 ctrl, stride = 0, coef_index = 0, format = 0;
bool needs_modeset, upscaling_w, upscaling_h;
+ int err;
if (!state || !state->fb)
return;
finfo = drm_format_info(state->fb->format->format);
+ if (!ipu->clk_enabled) {
+ err = clk_enable(ipu->clk);
+ if (err) {
+ dev_err(ipu->dev, "Unable to enable clock: %d\n", err);
+ return;
+ }
+
+ ipu->clk_enabled = true;
+ }
+
/* Reset all the registers if needed */
needs_modeset = drm_atomic_crtc_needs_modeset(state->crtc->state);
if (needs_modeset) {
@@ -577,6 +590,11 @@ static void ingenic_ipu_plane_atomic_disable(struct drm_plane *plane,
regmap_clear_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_CHIP_EN);
ingenic_drm_plane_disable(ipu->master, plane);
+
+ if (ipu->clk_enabled) {
+ clk_disable(ipu->clk);
+ ipu->clk_enabled = false;
+ }
}
static const struct drm_plane_helper_funcs ingenic_ipu_plane_helper_funcs = {
@@ -645,7 +663,8 @@ static irqreturn_t ingenic_ipu_irq_handler(int irq, void *arg)
unsigned int dummy;
/* dummy read allows CPU to reconfigure IPU */
- regmap_read(ipu->map, JZ_REG_IPU_STATUS, &dummy);
+ if (ipu->soc_info->manual_restart)
+ regmap_read(ipu->map, JZ_REG_IPU_STATUS, &dummy);
/* ACK interrupt */
regmap_write(ipu->map, JZ_REG_IPU_STATUS, 0);
@@ -656,7 +675,8 @@ static irqreturn_t ingenic_ipu_irq_handler(int irq, void *arg)
regmap_write(ipu->map, JZ_REG_IPU_V_ADDR, ipu->addr_v);
/* Run IPU for the new frame */
- regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_RUN);
+ if (ipu->soc_info->manual_restart)
+ regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_RUN);
drm_crtc_handle_vblank(crtc);
@@ -758,9 +778,9 @@ static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d)
drm_object_attach_property(&plane->base, ipu->sharpness_prop,
ipu->sharpness);
- err = clk_prepare_enable(ipu->clk);
+ err = clk_prepare(ipu->clk);
if (err) {
- dev_err(dev, "Unable to enable clock\n");
+ dev_err(dev, "Unable to prepare clock\n");
return err;
}
@@ -772,7 +792,7 @@ static void ingenic_ipu_unbind(struct device *dev,
{
struct ingenic_ipu *ipu = dev_get_drvdata(dev);
- clk_disable_unprepare(ipu->clk);
+ clk_unprepare(ipu->clk);
}
static const struct component_ops ingenic_ipu_ops = {
@@ -792,10 +812,16 @@ static int ingenic_ipu_remove(struct platform_device *pdev)
}
static const u32 jz4725b_ipu_formats[] = {
+ /*
+ * While officially supported, packed YUV 4:2:2 formats can cause
+ * random hardware crashes on JZ4725B under certain circumstances.
+ * It seems to happen with some specific resize ratios.
+ * Until a proper workaround or fix is found, disable these formats.
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
+ */
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
@@ -806,6 +832,7 @@ static const struct soc_info jz4725b_soc_info = {
.formats = jz4725b_ipu_formats,
.num_formats = ARRAY_SIZE(jz4725b_ipu_formats),
.has_bicubic = false,
+ .manual_restart = true,
.set_coefs = jz4725b_set_coefs,
};
@@ -831,6 +858,7 @@ static const struct soc_info jz4760_soc_info = {
.formats = jz4760_ipu_formats,
.num_formats = ARRAY_SIZE(jz4760_ipu_formats),
.has_bicubic = true,
+ .manual_restart = false,
.set_coefs = jz4760_set_coefs,
};
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 155f2b4b4030..11223fe348df 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -69,8 +69,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
return ret;
if (bo->base.sgt) {
- dma_unmap_sg(dev, bo->base.sgt->sgl,
- bo->base.sgt->nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(bo->base.sgt);
} else {
bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
@@ -80,7 +79,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
}
}
- dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL);
+ ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
+ if (ret) {
+ sg_free_table(&sgt);
+ kfree(bo->base.sgt);
+ bo->base.sgt = NULL;
+ return ret;
+ }
*bo->base.sgt = sgt;
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
index 5b92fb82674a..2b2739adc7f5 100644
--- a/drivers/gpu/drm/lima/lima_vm.c
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -124,7 +124,7 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
if (err)
goto err_out1;
- for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
+ for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) {
err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
bo_va->node.start + offset);
if (err)
@@ -298,8 +298,7 @@ int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
mutex_lock(&vm->lock);
base = bo_va->node.start + (pageoff << PAGE_SHIFT);
- for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter,
- bo->base.sgt->nents, pageoff) {
+ for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, pageoff) {
err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
base + offset);
if (err)
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 4d2290f88edb..c271e5bf042e 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-buf.h>
+#include <linux/regulator/consumer.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
@@ -89,7 +90,7 @@ void mcde_display_irq(struct mcde *mcde)
* the update function is called, then we disable the
* flow on the channel once we get the TE IRQ.
*/
- if (mcde->oneshot_mode) {
+ if (mcde->flow_mode == MCDE_COMMAND_ONESHOT_FLOW) {
spin_lock(&mcde->flow_lock);
if (--mcde->flow_active == 0) {
dev_dbg(mcde->dev, "TE0 IRQ\n");
@@ -333,7 +334,7 @@ static void mcde_configure_overlay(struct mcde *mcde, enum mcde_overlay ovl,
enum mcde_extsrc src,
enum mcde_channel ch,
const struct drm_display_mode *mode,
- u32 format)
+ u32 format, int cpp)
{
u32 val;
u32 conf1;
@@ -342,6 +343,7 @@ static void mcde_configure_overlay(struct mcde *mcde, enum mcde_overlay ovl,
u32 ljinc;
u32 cr;
u32 comp;
+ u32 pixel_fetcher_watermark;
switch (ovl) {
case MCDE_OVERLAY_0:
@@ -426,8 +428,33 @@ static void mcde_configure_overlay(struct mcde *mcde, enum mcde_overlay ovl,
format);
break;
}
- /* The default watermark level for overlay 0 is 48 */
- val |= 48 << MCDE_OVLXCONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT;
+
+ /*
+ * Pixel fetch watermark level is max 0x1FFF pixels.
+ * Two basic rules should be followed:
+ * 1. The value should be at least 256 bits.
+ * 2. The sum of all active overlays pixelfetch watermark level
+ * multiplied with bits per pixel, should be lower than the
+ * size of input_fifo_size in bits.
+ * 3. The value should be a multiple of a line (256 bits).
+ */
+ switch (cpp) {
+ case 2:
+ pixel_fetcher_watermark = 128;
+ break;
+ case 3:
+ pixel_fetcher_watermark = 96;
+ break;
+ case 4:
+ pixel_fetcher_watermark = 48;
+ break;
+ default:
+ pixel_fetcher_watermark = 48;
+ break;
+ }
+ dev_dbg(mcde->dev, "pixel fetcher watermark level %d pixels\n",
+ pixel_fetcher_watermark);
+ val |= pixel_fetcher_watermark << MCDE_OVLXCONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT;
writel(val, mcde->regs + conf2);
/* Number of bytes to fetch per line */
@@ -498,19 +525,47 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
}
/* Set up channel 0 sync (based on chnl_update_registers()) */
- if (mcde->video_mode || mcde->te_sync)
+ switch (mcde->flow_mode) {
+ case MCDE_COMMAND_ONESHOT_FLOW:
+ /* Oneshot is achieved with software sync */
+ val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SOFTWARE
+ << MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
+ break;
+ case MCDE_COMMAND_TE_FLOW:
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
- else
- val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SOFTWARE
+ val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0
+ << MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
+ break;
+ case MCDE_COMMAND_BTA_TE_FLOW:
+ val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
+ << MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
+ /*
+ * TODO:
+ * The vendor driver uses the formatter as sync source
+ * for BTA TE mode. Test to use TE if you have a panel
+ * that uses this mode.
+ */
+ val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER
+ << MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
+ break;
+ case MCDE_VIDEO_TE_FLOW:
+ val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
-
- if (mcde->te_sync)
val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0
<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
- else
+ break;
+ case MCDE_VIDEO_FORMATTER_FLOW:
+ val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
+ << MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER
<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
+ break;
+ default:
+ dev_err(mcde->dev, "unknown flow mode %d\n",
+ mcde->flow_mode);
+ break;
+ }
writel(val, mcde->regs + sync);
@@ -825,6 +880,14 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
u32 formatter_frame;
u32 pkt_div;
u32 val;
+ int ret;
+
+ /* This powers up the entire MCDE block and the DSI hardware */
+ ret = regulator_enable(mcde->epod);
+ if (ret) {
+ dev_err(drm->dev, "can't re-enable EPOD regulator\n");
+ return;
+ }
dev_info(drm->dev, "enable MCDE, %d x %d format %s\n",
mode->hdisplay, mode->vdisplay,
@@ -835,6 +898,26 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
return;
}
+ /* Set up the main control, watermark level at 7 */
+ val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT;
+ /* 24 bits DPI: connect LSB Ch B to D[0:7] */
+ val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT;
+ /* TV out: connect LSB Ch B to D[8:15] */
+ val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT;
+ /* Don't care about this muxing */
+ val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT;
+ /* 24 bits DPI: connect MID Ch B to D[24:31] */
+ val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT;
+ /* 5: 24 bits DPI: connect MSB Ch B to D[32:39] */
+ val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT;
+ /* Syncmux bits zero: DPI channel A and B on output pins A and B resp */
+ writel(val, mcde->regs + MCDE_CONF0);
+
+ /* Clear any pending interrupts */
+ mcde_display_disable_irqs(mcde);
+ writel(0, mcde->regs + MCDE_IMSCERR);
+ writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR);
+
dev_info(drm->dev, "output in %s mode, format %dbpp\n",
(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ?
"VIDEO" : "CMD",
@@ -904,7 +987,7 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
* channel 0
*/
mcde_configure_overlay(mcde, MCDE_OVERLAY_0, MCDE_EXTSRC_0,
- MCDE_CHANNEL_0, mode, format);
+ MCDE_CHANNEL_0, mode, format, cpp);
/*
* Configure pixel-per-line and line-per-frame for channel 0 and then
@@ -916,11 +999,25 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0,
fifo_wtrmrk);
+ /*
+ * This brings up the DSI bridge which is tightly connected
+ * to the MCDE DSI formatter.
+ *
+ * FIXME: if we want to use another formatter, such as DPI,
+ * we need to be more elaborate here and select the appropriate
+ * bridge.
+ */
+ mcde_dsi_enable(mcde->bridge);
+
/* Configure the DSI formatter 0 for the DSI panel output */
mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0,
formatter_frame, pkt_size);
- if (mcde->te_sync) {
+ switch (mcde->flow_mode) {
+ case MCDE_COMMAND_TE_FLOW:
+ case MCDE_COMMAND_BTA_TE_FLOW:
+ case MCDE_VIDEO_TE_FLOW:
+ /* We are using TE in some comination */
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
val = MCDE_VSCRC_VSPOL;
else
@@ -930,16 +1027,31 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
val = readl(mcde->regs + MCDE_CRC);
val |= MCDE_CRC_SYCEN0;
writel(val, mcde->regs + MCDE_CRC);
+ break;
+ default:
+ /* No TE capture */
+ break;
}
drm_crtc_vblank_on(crtc);
- if (mcde->video_mode)
- /*
- * Keep FIFO permanently enabled in video mode,
- * otherwise MCDE will stop feeding data to the panel.
- */
+ /*
+ * If we're using oneshot mode we don't start the flow
+ * until each time the display is given an update, and
+ * then we disable it immediately after. For all other
+ * modes (command or video) we start the FIFO flow
+ * right here. This is necessary for the hardware to
+ * behave right.
+ */
+ if (mcde->flow_mode != MCDE_COMMAND_ONESHOT_FLOW) {
mcde_enable_fifo(mcde, MCDE_FIFO_A);
+ dev_dbg(mcde->dev, "started MCDE video FIFO flow\n");
+ }
+
+ /* Enable MCDE with automatic clock gating */
+ val = readl(mcde->regs + MCDE_CR);
+ val |= MCDE_CR_MCDEEN | MCDE_CR_AUTOCLKG_EN;
+ writel(val, mcde->regs + MCDE_CR);
dev_info(drm->dev, "MCDE display is enabled\n");
}
@@ -950,12 +1062,16 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
struct drm_device *drm = crtc->dev;
struct mcde *mcde = to_mcde(drm);
struct drm_pending_vblank_event *event;
+ int ret;
drm_crtc_vblank_off(crtc);
/* Disable FIFO A flow */
mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
+ /* This disables the DSI bridge */
+ mcde_dsi_disable(mcde->bridge);
+
event = crtc->state->event;
if (event) {
crtc->state->event = NULL;
@@ -965,43 +1081,47 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
spin_unlock_irq(&crtc->dev->event_lock);
}
+ ret = regulator_disable(mcde->epod);
+ if (ret)
+ dev_err(drm->dev, "can't disable EPOD regulator\n");
+ /* Make sure we are powered down (before we may power up again) */
+ usleep_range(50000, 70000);
+
dev_info(drm->dev, "MCDE display is disabled\n");
}
-static void mcde_display_send_one_frame(struct mcde *mcde)
+static void mcde_start_flow(struct mcde *mcde)
{
- /* Request a TE ACK */
- if (mcde->te_sync)
+ /* Request a TE ACK only in TE+BTA mode */
+ if (mcde->flow_mode == MCDE_COMMAND_BTA_TE_FLOW)
mcde_dsi_te_request(mcde->mdsi);
/* Enable FIFO A flow */
mcde_enable_fifo(mcde, MCDE_FIFO_A);
- if (mcde->te_sync) {
+ /*
+ * If oneshot mode is enabled, the flow will be disabled
+ * when the TE0 IRQ arrives in the interrupt handler. Otherwise
+ * updates are continuously streamed to the display after this
+ * point.
+ */
+
+ if (mcde->flow_mode == MCDE_COMMAND_ONESHOT_FLOW) {
+ /* Trigger a software sync out on channel 0 */
+ writel(MCDE_CHNLXSYNCHSW_SW_TRIG,
+ mcde->regs + MCDE_CHNL0SYNCHSW);
+
/*
- * If oneshot mode is enabled, the flow will be disabled
- * when the TE0 IRQ arrives in the interrupt handler. Otherwise
- * updates are continuously streamed to the display after this
- * point.
+ * Disable FIFO A flow again: since we are using TE sync we
+ * need to wait for the FIFO to drain before we continue
+ * so repeated calls to this function will not cause a mess
+ * in the hardware by pushing updates will updates are going
+ * on already.
*/
- dev_dbg(mcde->dev, "sent TE0 framebuffer update\n");
- return;
+ mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
}
- /* Trigger a software sync out on channel 0 */
- writel(MCDE_CHNLXSYNCHSW_SW_TRIG,
- mcde->regs + MCDE_CHNL0SYNCHSW);
-
- /*
- * Disable FIFO A flow again: since we are using TE sync we
- * need to wait for the FIFO to drain before we continue
- * so repeated calls to this function will not cause a mess
- * in the hardware by pushing updates will updates are going
- * on already.
- */
- mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
-
- dev_dbg(mcde->dev, "sent SW framebuffer update\n");
+ dev_dbg(mcde->dev, "started MCDE FIFO flow\n");
}
static void mcde_set_extsrc(struct mcde *mcde, u32 buffer_address)
@@ -1060,15 +1180,13 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
*/
if (fb) {
mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
- if (!mcde->video_mode) {
- /*
- * Send a single frame using software sync if the flow
- * is not active yet.
- */
- if (mcde->flow_active == 0)
- mcde_display_send_one_frame(mcde);
- }
- dev_info_once(mcde->dev, "sent first display update\n");
+ dev_info_once(mcde->dev, "first update of display contents\n");
+ /*
+ * Usually the flow is already active, unless we are in
+ * oneshot mode, then we need to kick the flow right here.
+ */
+ if (mcde->flow_active == 0)
+ mcde_start_flow(mcde);
} else {
/*
* If an update is receieved before the MCDE is enabled
diff --git a/drivers/gpu/drm/mcde/mcde_drm.h b/drivers/gpu/drm/mcde/mcde_drm.h
index 679c2c4e6d9d..8253e2f9993e 100644
--- a/drivers/gpu/drm/mcde/mcde_drm.h
+++ b/drivers/gpu/drm/mcde/mcde_drm.h
@@ -9,6 +9,61 @@
#ifndef _MCDE_DRM_H_
#define _MCDE_DRM_H_
+/* Shared basic registers */
+#define MCDE_CR 0x00000000
+#define MCDE_CR_IFIFOEMPTYLINECOUNT_V422_SHIFT 0
+#define MCDE_CR_IFIFOEMPTYLINECOUNT_V422_MASK 0x0000003F
+#define MCDE_CR_IFIFOCTRLEN BIT(15)
+#define MCDE_CR_UFRECOVERY_MODE_V422 BIT(16)
+#define MCDE_CR_WRAP_MODE_V422_SHIFT BIT(17)
+#define MCDE_CR_AUTOCLKG_EN BIT(30)
+#define MCDE_CR_MCDEEN BIT(31)
+
+#define MCDE_CONF0 0x00000004
+#define MCDE_CONF0_SYNCMUX0 BIT(0)
+#define MCDE_CONF0_SYNCMUX1 BIT(1)
+#define MCDE_CONF0_SYNCMUX2 BIT(2)
+#define MCDE_CONF0_SYNCMUX3 BIT(3)
+#define MCDE_CONF0_SYNCMUX4 BIT(4)
+#define MCDE_CONF0_SYNCMUX5 BIT(5)
+#define MCDE_CONF0_SYNCMUX6 BIT(6)
+#define MCDE_CONF0_SYNCMUX7 BIT(7)
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT 12
+#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_MASK 0x00007000
+#define MCDE_CONF0_OUTMUX0_SHIFT 16
+#define MCDE_CONF0_OUTMUX0_MASK 0x00070000
+#define MCDE_CONF0_OUTMUX1_SHIFT 19
+#define MCDE_CONF0_OUTMUX1_MASK 0x00380000
+#define MCDE_CONF0_OUTMUX2_SHIFT 22
+#define MCDE_CONF0_OUTMUX2_MASK 0x01C00000
+#define MCDE_CONF0_OUTMUX3_SHIFT 25
+#define MCDE_CONF0_OUTMUX3_MASK 0x0E000000
+#define MCDE_CONF0_OUTMUX4_SHIFT 28
+#define MCDE_CONF0_OUTMUX4_MASK 0x70000000
+
+#define MCDE_SSP 0x00000008
+#define MCDE_AIS 0x00000100
+#define MCDE_IMSCERR 0x00000110
+#define MCDE_RISERR 0x00000120
+#define MCDE_MISERR 0x00000130
+#define MCDE_SISERR 0x00000140
+
+enum mcde_flow_mode {
+ /* One-shot mode: flow stops after one frame */
+ MCDE_COMMAND_ONESHOT_FLOW,
+ /* Command mode with tearing effect (TE) IRQ sync */
+ MCDE_COMMAND_TE_FLOW,
+ /*
+ * Command mode with bus turn-around (BTA) and tearing effect
+ * (TE) IRQ sync.
+ */
+ MCDE_COMMAND_BTA_TE_FLOW,
+ /* Video mode with tearing effect (TE) sync IRQ */
+ MCDE_VIDEO_TE_FLOW,
+ /* Video mode with the formatter itself as sync source */
+ MCDE_VIDEO_FORMATTER_FLOW,
+};
+
struct mcde {
struct drm_device drm;
struct device *dev;
@@ -18,9 +73,7 @@ struct mcde {
struct drm_simple_display_pipe pipe;
struct mipi_dsi_device *mdsi;
s16 stride;
- bool te_sync;
- bool video_mode;
- bool oneshot_mode;
+ enum mcde_flow_mode flow_mode;
unsigned int flow_active;
spinlock_t flow_lock; /* Locks the channel flow control */
@@ -36,8 +89,16 @@ struct mcde {
#define to_mcde(dev) container_of(dev, struct mcde, drm)
+static inline bool mcde_flow_is_video(struct mcde *mcde)
+{
+ return (mcde->flow_mode == MCDE_VIDEO_TE_FLOW ||
+ mcde->flow_mode == MCDE_VIDEO_FORMATTER_FLOW);
+}
+
bool mcde_dsi_irq(struct mipi_dsi_device *mdsi);
void mcde_dsi_te_request(struct mipi_dsi_device *mdsi);
+void mcde_dsi_enable(struct drm_bridge *bridge);
+void mcde_dsi_disable(struct drm_bridge *bridge);
extern struct platform_driver mcde_dsi_driver;
void mcde_display_irq(struct mcde *mcde);
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 82137ab76cfc..92f8bd907193 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -63,6 +63,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -82,44 +83,6 @@
#define DRIVER_DESC "DRM module for MCDE"
-#define MCDE_CR 0x00000000
-#define MCDE_CR_IFIFOEMPTYLINECOUNT_V422_SHIFT 0
-#define MCDE_CR_IFIFOEMPTYLINECOUNT_V422_MASK 0x0000003F
-#define MCDE_CR_IFIFOCTRLEN BIT(15)
-#define MCDE_CR_UFRECOVERY_MODE_V422 BIT(16)
-#define MCDE_CR_WRAP_MODE_V422_SHIFT BIT(17)
-#define MCDE_CR_AUTOCLKG_EN BIT(30)
-#define MCDE_CR_MCDEEN BIT(31)
-
-#define MCDE_CONF0 0x00000004
-#define MCDE_CONF0_SYNCMUX0 BIT(0)
-#define MCDE_CONF0_SYNCMUX1 BIT(1)
-#define MCDE_CONF0_SYNCMUX2 BIT(2)
-#define MCDE_CONF0_SYNCMUX3 BIT(3)
-#define MCDE_CONF0_SYNCMUX4 BIT(4)
-#define MCDE_CONF0_SYNCMUX5 BIT(5)
-#define MCDE_CONF0_SYNCMUX6 BIT(6)
-#define MCDE_CONF0_SYNCMUX7 BIT(7)
-#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT 12
-#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_MASK 0x00007000
-#define MCDE_CONF0_OUTMUX0_SHIFT 16
-#define MCDE_CONF0_OUTMUX0_MASK 0x00070000
-#define MCDE_CONF0_OUTMUX1_SHIFT 19
-#define MCDE_CONF0_OUTMUX1_MASK 0x00380000
-#define MCDE_CONF0_OUTMUX2_SHIFT 22
-#define MCDE_CONF0_OUTMUX2_MASK 0x01C00000
-#define MCDE_CONF0_OUTMUX3_SHIFT 25
-#define MCDE_CONF0_OUTMUX3_MASK 0x0E000000
-#define MCDE_CONF0_OUTMUX4_SHIFT 28
-#define MCDE_CONF0_OUTMUX4_MASK 0x70000000
-
-#define MCDE_SSP 0x00000008
-#define MCDE_AIS 0x00000100
-#define MCDE_IMSCERR 0x00000110
-#define MCDE_RISERR 0x00000120
-#define MCDE_MISERR 0x00000130
-#define MCDE_SISERR 0x00000140
-
#define MCDE_PID 0x000001FC
#define MCDE_PID_METALFIX_VERSION_SHIFT 0
#define MCDE_PID_METALFIX_VERSION_MASK 0x000000FF
@@ -293,7 +256,6 @@ static int mcde_probe(struct platform_device *pdev)
struct component_match *match = NULL;
struct resource *res;
u32 pid;
- u32 val;
int irq;
int ret;
int i;
@@ -305,9 +267,6 @@ static int mcde_probe(struct platform_device *pdev)
mcde->dev = dev;
platform_set_drvdata(pdev, drm);
- /* Enable continuous updates: this is what Linux' framebuffer expects */
- mcde->oneshot_mode = false;
-
/* First obtain and turn on the main power */
mcde->epod = devm_regulator_get(dev, "epod");
if (IS_ERR(mcde->epod)) {
@@ -405,27 +364,7 @@ static int mcde_probe(struct platform_device *pdev)
goto clk_disable;
}
- /* Set up the main control, watermark level at 7 */
- val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT;
- /* 24 bits DPI: connect LSB Ch B to D[0:7] */
- val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT;
- /* TV out: connect LSB Ch B to D[8:15] */
- val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT;
- /* Don't care about this muxing */
- val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT;
- /* 24 bits DPI: connect MID Ch B to D[24:31] */
- val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT;
- /* 5: 24 bits DPI: connect MSB Ch B to D[32:39] */
- val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT;
- /* Syncmux bits zero: DPI channel A and B on output pins A and B resp */
- writel(val, mcde->regs + MCDE_CONF0);
-
- /* Enable automatic clock gating */
- val = readl(mcde->regs + MCDE_CR);
- val |= MCDE_CR_MCDEEN | MCDE_CR_AUTOCLKG_EN;
- writel(val, mcde->regs + MCDE_CR);
-
- /* Clear any pending interrupts */
+ /* Disable and clear any pending interrupts */
mcde_display_disable_irqs(mcde);
writel(0, mcde->regs + MCDE_IMSCERR);
writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR);
@@ -455,12 +394,34 @@ static int mcde_probe(struct platform_device *pdev)
ret = PTR_ERR(match);
goto clk_disable;
}
+
+ /*
+ * Perform an invasive reset of the MCDE and all blocks by
+ * cutting the power to the subsystem, then bring it back up
+ * later when we enable the display as a result of
+ * component_master_add_with_match().
+ */
+ ret = regulator_disable(mcde->epod);
+ if (ret) {
+ dev_err(dev, "can't disable EPOD regulator\n");
+ return ret;
+ }
+ /* Wait 50 ms so we are sure we cut the power */
+ usleep_range(50000, 70000);
+
ret = component_master_add_with_match(&pdev->dev, &mcde_drm_comp_ops,
match);
if (ret) {
dev_err(dev, "failed to add component master\n");
- goto clk_disable;
+ /*
+ * The EPOD regulator is already disabled at this point so some
+ * special errorpath code is needed
+ */
+ clk_disable_unprepare(mcde->mcde_clk);
+ regulator_disable(mcde->vana);
+ return ret;
}
+
return 0;
clk_disable:
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 981923caa7e6..2314c8122992 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -43,6 +43,7 @@ struct mcde_dsi {
struct drm_bridge *bridge_out;
struct mipi_dsi_host dsi_host;
struct mipi_dsi_device *mdsi;
+ const struct drm_display_mode *mode;
struct clk *hs_clk;
struct clk *lp_clk;
unsigned long hs_freq;
@@ -148,9 +149,22 @@ static void mcde_dsi_attach_to_mcde(struct mcde_dsi *d)
{
d->mcde->mdsi = d->mdsi;
- d->mcde->video_mode = !!(d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO);
- /* Enable use of the TE signal for all command mode panels */
- d->mcde->te_sync = !d->mcde->video_mode;
+ /*
+ * Select the way the DSI data flow is pushing to the display:
+ * currently we just support video or command mode depending
+ * on the type of display. Video mode defaults to using the
+ * formatter itself for synchronization (stateless video panel).
+ *
+ * FIXME: add flags to struct mipi_dsi_device .flags to indicate
+ * displays that require BTA (bus turn around) so we can handle
+ * such displays as well. Figure out how to properly handle
+ * single frame on-demand updates with DRM for command mode
+ * displays (MCDE_COMMAND_ONESHOT_FLOW).
+ */
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO)
+ d->mcde->flow_mode = MCDE_VIDEO_FORMATTER_FLOW;
+ else
+ d->mcde->flow_mode = MCDE_COMMAND_TE_FLOW;
}
static int mcde_dsi_host_attach(struct mipi_dsi_host *host,
@@ -194,79 +208,16 @@ static int mcde_dsi_host_detach(struct mipi_dsi_host *host,
(type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
(type == MIPI_DSI_DCS_READ))
-static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
- const struct mipi_dsi_msg *msg)
+static int mcde_dsi_execute_transfer(struct mcde_dsi *d,
+ const struct mipi_dsi_msg *msg)
{
- struct mcde_dsi *d = host_to_mcde_dsi(host);
const u32 loop_delay_us = 10; /* us */
- const u8 *tx = msg->tx_buf;
u32 loop_counter;
size_t txlen = msg->tx_len;
size_t rxlen = msg->rx_len;
+ int i;
u32 val;
int ret;
- int i;
-
- if (txlen > 16) {
- dev_err(d->dev,
- "dunno how to write more than 16 bytes yet\n");
- return -EIO;
- }
- if (rxlen > 4) {
- dev_err(d->dev,
- "dunno how to read more than 4 bytes yet\n");
- return -EIO;
- }
-
- dev_dbg(d->dev,
- "message to channel %d, write %zd bytes read %zd bytes\n",
- msg->channel, txlen, rxlen);
-
- /* Command "nature" */
- if (MCDE_DSI_HOST_IS_READ(msg->type))
- /* MCTL_MAIN_DATA_CTL already set up */
- val = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_READ;
- else
- val = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_WRITE;
- /*
- * More than 2 bytes will not fit in a single packet, so it's
- * time to set the "long not short" bit. One byte is used by
- * the MIPI DCS command leaving just one byte for the payload
- * in a short package.
- */
- if (mipi_dsi_packet_format_is_long(msg->type))
- val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT;
- val |= 0 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT;
- val |= txlen << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
- val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN;
- val |= msg->type << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT;
- writel(val, d->regs + DSI_DIRECT_CMD_MAIN_SETTINGS);
-
- /* MIPI DCS command is part of the data */
- if (txlen > 0) {
- val = 0;
- for (i = 0; i < 4 && i < txlen; i++)
- val |= tx[i] << (i * 8);
- }
- writel(val, d->regs + DSI_DIRECT_CMD_WRDAT0);
- if (txlen > 4) {
- val = 0;
- for (i = 0; i < 4 && (i + 4) < txlen; i++)
- val |= tx[i + 4] << (i * 8);
- writel(val, d->regs + DSI_DIRECT_CMD_WRDAT1);
- }
- if (txlen > 8) {
- val = 0;
- for (i = 0; i < 4 && (i + 8) < txlen; i++)
- val |= tx[i + 8] << (i * 8);
- writel(val, d->regs + DSI_DIRECT_CMD_WRDAT2);
- }
- if (txlen > 12) {
- val = 0;
- for (i = 0; i < 4 && (i + 12) < txlen; i++)
- val |= tx[i + 12] << (i * 8);
- writel(val, d->regs + DSI_DIRECT_CMD_WRDAT3);
- }
writel(~0, d->regs + DSI_DIRECT_CMD_STS_CLR);
writel(~0, d->regs + DSI_CMD_MODE_STS_CLR);
@@ -283,6 +234,7 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
usleep_range(loop_delay_us, (loop_delay_us * 3) / 2);
if (!loop_counter) {
dev_err(d->dev, "DSI read timeout!\n");
+ /* Set exit code and retry */
return -ETIME;
}
} else {
@@ -293,6 +245,7 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
usleep_range(loop_delay_us, (loop_delay_us * 3) / 2);
if (!loop_counter) {
+ /* Set exit code and retry */
dev_err(d->dev, "DSI write timeout!\n");
return -ETIME;
}
@@ -334,6 +287,93 @@ static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
ret = rdsz;
}
+ /* Successful transmission */
+ return ret;
+}
+
+static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct mcde_dsi *d = host_to_mcde_dsi(host);
+ const u8 *tx = msg->tx_buf;
+ size_t txlen = msg->tx_len;
+ size_t rxlen = msg->rx_len;
+ unsigned int retries = 0;
+ u32 val;
+ int ret;
+ int i;
+
+ if (txlen > 16) {
+ dev_err(d->dev,
+ "dunno how to write more than 16 bytes yet\n");
+ return -EIO;
+ }
+ if (rxlen > 4) {
+ dev_err(d->dev,
+ "dunno how to read more than 4 bytes yet\n");
+ return -EIO;
+ }
+
+ dev_dbg(d->dev,
+ "message to channel %d, write %zd bytes read %zd bytes\n",
+ msg->channel, txlen, rxlen);
+
+ /* Command "nature" */
+ if (MCDE_DSI_HOST_IS_READ(msg->type))
+ /* MCTL_MAIN_DATA_CTL already set up */
+ val = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_READ;
+ else
+ val = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_WRITE;
+ /*
+ * More than 2 bytes will not fit in a single packet, so it's
+ * time to set the "long not short" bit. One byte is used by
+ * the MIPI DCS command leaving just one byte for the payload
+ * in a short package.
+ */
+ if (mipi_dsi_packet_format_is_long(msg->type))
+ val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT;
+ val |= 0 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT;
+ val |= txlen << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
+ val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN;
+ val |= msg->type << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT;
+ writel(val, d->regs + DSI_DIRECT_CMD_MAIN_SETTINGS);
+
+ /* MIPI DCS command is part of the data */
+ if (txlen > 0) {
+ val = 0;
+ for (i = 0; i < 4 && i < txlen; i++)
+ val |= tx[i] << (i * 8);
+ }
+ writel(val, d->regs + DSI_DIRECT_CMD_WRDAT0);
+ if (txlen > 4) {
+ val = 0;
+ for (i = 0; i < 4 && (i + 4) < txlen; i++)
+ val |= tx[i + 4] << (i * 8);
+ writel(val, d->regs + DSI_DIRECT_CMD_WRDAT1);
+ }
+ if (txlen > 8) {
+ val = 0;
+ for (i = 0; i < 4 && (i + 8) < txlen; i++)
+ val |= tx[i + 8] << (i * 8);
+ writel(val, d->regs + DSI_DIRECT_CMD_WRDAT2);
+ }
+ if (txlen > 12) {
+ val = 0;
+ for (i = 0; i < 4 && (i + 12) < txlen; i++)
+ val |= tx[i + 12] << (i * 8);
+ writel(val, d->regs + DSI_DIRECT_CMD_WRDAT3);
+ }
+
+ while (retries < 3) {
+ ret = mcde_dsi_execute_transfer(d, msg);
+ if (ret >= 0)
+ break;
+ retries++;
+ }
+ if (ret < 0 && retries)
+ dev_err(d->dev, "gave up after %d retries\n", retries);
+
+ /* Clear any errors */
writel(~0, d->regs + DSI_DIRECT_CMD_STS_CLR);
writel(~0, d->regs + DSI_CMD_MODE_STS_CLR);
@@ -799,10 +839,11 @@ static void mcde_dsi_start(struct mcde_dsi *d)
/* Command mode, clear IF1 ID */
val = readl(d->regs + DSI_CMD_MODE_CTL);
/*
- * If we enable low-power mode here, with
- * val |= DSI_CMD_MODE_CTL_IF1_LP_EN
+ * If we enable low-power mode here,
* then display updates become really slow.
*/
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_LPM)
+ val |= DSI_CMD_MODE_CTL_IF1_LP_EN;
val &= ~DSI_CMD_MODE_CTL_IF1_ID_MASK;
writel(val, d->regs + DSI_CMD_MODE_CTL);
@@ -811,23 +852,11 @@ static void mcde_dsi_start(struct mcde_dsi *d)
dev_info(d->dev, "DSI link enabled\n");
}
-
-static void mcde_dsi_bridge_enable(struct drm_bridge *bridge)
-{
- struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
- u32 val;
-
- if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- /* Enable video mode */
- val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
- val |= DSI_MCTL_MAIN_DATA_CTL_VID_EN;
- writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
- }
-
- dev_info(d->dev, "enable DSI master\n");
-};
-
-static void mcde_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+/*
+ * Notice that this is called from inside the display controller
+ * and not from the bridge callbacks.
+ */
+void mcde_dsi_enable(struct drm_bridge *bridge)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
unsigned long hs_freq, lp_freq;
@@ -871,7 +900,25 @@ static void mcde_dsi_bridge_pre_enable(struct drm_bridge *bridge)
dev_info(d->dev, "DSI HS clock rate %lu Hz\n",
d->hs_freq);
+ /* Assert RESET through the PRCMU, active low */
+ /* FIXME: which DSI block? */
+ regmap_update_bits(d->prcmu, PRCM_DSI_SW_RESET,
+ PRCM_DSI_SW_RESET_DSI0_SW_RESETN, 0);
+
+ usleep_range(100, 200);
+
+ /* De-assert RESET again */
+ regmap_update_bits(d->prcmu, PRCM_DSI_SW_RESET,
+ PRCM_DSI_SW_RESET_DSI0_SW_RESETN,
+ PRCM_DSI_SW_RESET_DSI0_SW_RESETN);
+
+ /* Start up the hardware */
+ mcde_dsi_start(d);
+
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ /* Set up the video mode from the DRM mode */
+ mcde_dsi_setup_video_mode(d, d->mode);
+
/* Put IF1 into video mode */
val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
val |= DSI_MCTL_MAIN_DATA_CTL_IF1_MODE;
@@ -887,17 +934,25 @@ static void mcde_dsi_bridge_pre_enable(struct drm_bridge *bridge)
val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC;
val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA;
writel(val, d->regs + DSI_VID_MODE_STS_CTL);
+
+ /* Enable video mode */
+ val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
+ val |= DSI_MCTL_MAIN_DATA_CTL_VID_EN;
+ writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
} else {
/* Command mode, clear IF1 ID */
val = readl(d->regs + DSI_CMD_MODE_CTL);
/*
- * If we enable low-power mode here with
- * val |= DSI_CMD_MODE_CTL_IF1_LP_EN
+ * If we enable low-power mode here
* the display updates become really slow.
*/
+ if (d->mdsi->mode_flags & MIPI_DSI_MODE_LPM)
+ val |= DSI_CMD_MODE_CTL_IF1_LP_EN;
val &= ~DSI_CMD_MODE_CTL_IF1_ID_MASK;
writel(val, d->regs + DSI_CMD_MODE_CTL);
}
+
+ dev_info(d->dev, "enabled MCDE DSI master\n");
}
static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
@@ -911,13 +966,12 @@ static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
return;
}
+ d->mode = mode;
+
dev_info(d->dev, "set DSI master to %dx%d %u Hz %s mode\n",
mode->hdisplay, mode->vdisplay, mode->clock * 1000,
(d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ? "VIDEO" : "CMD"
);
-
- if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO)
- mcde_dsi_setup_video_mode(d, mode);
}
static void mcde_dsi_wait_for_command_mode_stop(struct mcde_dsi *d)
@@ -961,14 +1015,15 @@ static void mcde_dsi_wait_for_video_mode_stop(struct mcde_dsi *d)
}
}
-static void mcde_dsi_bridge_disable(struct drm_bridge *bridge)
+/*
+ * Notice that this is called from inside the display controller
+ * and not from the bridge callbacks.
+ */
+void mcde_dsi_disable(struct drm_bridge *bridge)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
u32 val;
- /* Disable all error interrupts */
- writel(0, d->regs + DSI_VID_MODE_STS_CTL);
-
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
/* Stop video mode */
val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
@@ -980,7 +1035,14 @@ static void mcde_dsi_bridge_disable(struct drm_bridge *bridge)
mcde_dsi_wait_for_command_mode_stop(d);
}
- /* Stop clocks */
+ /*
+ * Stop clocks and terminate any DSI traffic here so the panel can
+ * send commands to shut down the display using DSI direct write until
+ * this point.
+ */
+
+ /* Disable all error interrupts */
+ writel(0, d->regs + DSI_VID_MODE_STS_CTL);
clk_disable_unprepare(d->hs_clk);
clk_disable_unprepare(d->lp_clk);
}
@@ -1010,9 +1072,6 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = {
.attach = mcde_dsi_bridge_attach,
.mode_set = mcde_dsi_bridge_mode_set,
- .disable = mcde_dsi_bridge_disable,
- .enable = mcde_dsi_bridge_enable,
- .pre_enable = mcde_dsi_bridge_pre_enable,
};
static int mcde_dsi_bind(struct device *dev, struct device *master,
@@ -1048,21 +1107,6 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
return PTR_ERR(d->lp_clk);
}
- /* Assert RESET through the PRCMU, active low */
- /* FIXME: which DSI block? */
- regmap_update_bits(d->prcmu, PRCM_DSI_SW_RESET,
- PRCM_DSI_SW_RESET_DSI0_SW_RESETN, 0);
-
- usleep_range(100, 200);
-
- /* De-assert RESET again */
- regmap_update_bits(d->prcmu, PRCM_DSI_SW_RESET,
- PRCM_DSI_SW_RESET_DSI0_SW_RESETN,
- PRCM_DSI_SW_RESET_DSI0_SW_RESETN);
-
- /* Start up the hardware */
- mcde_dsi_start(d);
-
/* Look for a panel as a child to this node */
for_each_available_child_of_node(dev->of_node, child) {
panel = of_drm_find_panel(child);
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index aa74aac3cbcc..65cd03a4be29 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -24,6 +24,6 @@ config DRM_MEDIATEK_HDMI
tristate "DRM HDMI Support for Mediatek SoCs"
depends on DRM_MEDIATEK
select SND_SOC_HDMI_CODEC if SND_SOC
- select GENERIC_PHY
+ select PHY_MTK_HDMI
help
DRM/KMS HDMI driver for Mediatek SoCs
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index b7a82ed5788f..77b0fd86063d 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -19,9 +19,6 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
mediatek-drm-hdmi-objs := mtk_cec.o \
mtk_hdmi.o \
- mtk_hdmi_ddc.o \
- mtk_mt2701_hdmi_phy.o \
- mtk_mt8173_hdmi_phy.o \
- mtk_hdmi_phy.o
+ mtk_hdmi_ddc.o
obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index d4f0fb7ad312..cf11c4850b40 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -64,7 +64,8 @@ enum mtk_dpi_out_color_format {
struct mtk_dpi {
struct mtk_ddp_comp ddp_comp;
struct drm_encoder encoder;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
void __iomem *regs;
struct device *dev;
struct clk *engine_clk;
@@ -83,9 +84,9 @@ struct mtk_dpi {
int refcount;
};
-static inline struct mtk_dpi *mtk_dpi_from_encoder(struct drm_encoder *e)
+static inline struct mtk_dpi *bridge_to_dpi(struct drm_bridge *b)
{
- return container_of(e, struct mtk_dpi, encoder);
+ return container_of(b, struct mtk_dpi, bridge);
}
enum mtk_dpi_polarity {
@@ -521,50 +522,53 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
return 0;
}
-static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
{
- return true;
+ drm_encoder_cleanup(encoder);
}
-static void mtk_dpi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
+ .destroy = mtk_dpi_encoder_destroy,
+};
+
+static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
- struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge);
+
+ return drm_bridge_attach(bridge->encoder, dpi->next_bridge,
+ &dpi->bridge, flags);
+}
+
+static void mtk_dpi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge);
drm_mode_copy(&dpi->mode, adjusted_mode);
}
-static void mtk_dpi_encoder_disable(struct drm_encoder *encoder)
+static void mtk_dpi_bridge_disable(struct drm_bridge *bridge)
{
- struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge);
mtk_dpi_power_off(dpi);
}
-static void mtk_dpi_encoder_enable(struct drm_encoder *encoder)
+static void mtk_dpi_bridge_enable(struct drm_bridge *bridge)
{
- struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+ struct mtk_dpi *dpi = bridge_to_dpi(bridge);
mtk_dpi_power_on(dpi);
mtk_dpi_set_display_mode(dpi, &dpi->mode);
}
-static int mtk_dpi_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return 0;
-}
-
-static const struct drm_encoder_helper_funcs mtk_dpi_encoder_helper_funcs = {
- .mode_fixup = mtk_dpi_encoder_mode_fixup,
- .mode_set = mtk_dpi_encoder_mode_set,
- .disable = mtk_dpi_encoder_disable,
- .enable = mtk_dpi_encoder_enable,
- .atomic_check = mtk_dpi_atomic_check,
+static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
+ .attach = mtk_dpi_bridge_attach,
+ .mode_set = mtk_dpi_bridge_mode_set,
+ .disable = mtk_dpi_bridge_disable,
+ .enable = mtk_dpi_bridge_enable,
};
static void mtk_dpi_start(struct mtk_ddp_comp *comp)
@@ -605,12 +609,10 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
dev_err(dev, "Failed to initialize decoder: %d\n", ret);
goto err_unregister;
}
- drm_encoder_helper_add(&dpi->encoder, &mtk_dpi_encoder_helper_funcs);
- /* Currently DPI0 is fixed to be driven by OVL1 */
- dpi->encoder.possible_crtcs = BIT(1);
+ dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->ddp_comp);
- ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL, 0);
+ ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, 0);
if (ret) {
dev_err(dev, "Failed to attach bridge: %d\n", ret);
goto err_cleanup;
@@ -770,11 +772,11 @@ static int mtk_dpi_probe(struct platform_device *pdev)
}
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- NULL, &dpi->bridge);
+ NULL, &dpi->next_bridge);
if (ret)
return ret;
- dev_info(dev, "Found bridge node: %pOF\n", dpi->bridge->of_node);
+ dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node);
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI);
if (comp_id < 0) {
@@ -791,8 +793,15 @@ static int mtk_dpi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dpi);
+ dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
+ dpi->bridge.of_node = dev->of_node;
+ dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
+
+ drm_bridge_add(&dpi->bridge);
+
ret = component_add(dev, &mtk_dpi_component_ops);
if (ret) {
+ drm_bridge_remove(&dpi->bridge);
dev_err(dev, "Failed to add component: %d\n", ret);
return ret;
}
@@ -802,7 +811,10 @@ static int mtk_dpi_probe(struct platform_device *pdev)
static int mtk_dpi_remove(struct platform_device *pdev)
{
+ struct mtk_dpi *dpi = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &mtk_dpi_component_ops);
+ drm_bridge_remove(&dpi->bridge);
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 4d29568be3f5..ac038572164d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -481,7 +481,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
- cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
+ cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
cmdq_pkt_finalize(cmdq_handle);
cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 526648885b97..8eba44be3a8a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -13,6 +13,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include <drm/drm_print.h>
+
#include "mtk_drm_drv.h"
#include "mtk_drm_plane.h"
#include "mtk_drm_ddp_comp.h"
@@ -412,6 +414,22 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
+static bool mtk_drm_find_comp_in_ddp(struct mtk_ddp_comp ddp_comp,
+ const enum mtk_ddp_comp_id *path,
+ unsigned int path_len)
+{
+ unsigned int i;
+
+ if (path == NULL)
+ return false;
+
+ for (i = 0U; i < path_len; i++)
+ if (ddp_comp.id == path[i])
+ return true;
+
+ return false;
+}
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type)
{
@@ -427,6 +445,26 @@ int mtk_ddp_comp_get_id(struct device_node *node,
return -EINVAL;
}
+unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
+ struct mtk_ddp_comp ddp_comp)
+{
+ struct mtk_drm_private *private = drm->dev_private;
+ unsigned int ret = 0;
+
+ if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->main_path, private->data->main_len))
+ ret = BIT(0);
+ else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->ext_path,
+ private->data->ext_len))
+ ret = BIT(1);
+ else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->third_path,
+ private->data->third_len))
+ ret = BIT(2);
+ else
+ DRM_INFO("Failed to find comp in ddp table\n");
+
+ return ret;
+}
+
int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
const struct mtk_ddp_comp_funcs *funcs)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index debe36395fe7..1d9e00b69462 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -202,6 +202,8 @@ static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp,
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
+unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
+ struct mtk_ddp_comp ddp_comp);
int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
const struct mtk_ddp_comp_funcs *funcs);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 2d982740b1a4..59c85c63b7cc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -73,6 +73,19 @@ static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = {
DDP_COMPONENT_DPI0,
};
+static const enum mtk_ddp_comp_id mt7623_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_BLS,
+ DDP_COMPONENT_DPI0,
+};
+
+static const enum mtk_ddp_comp_id mt7623_mtk_ddp_ext[] = {
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_DSI0,
+};
+
static const enum mtk_ddp_comp_id mt2712_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_COLOR0,
@@ -126,6 +139,14 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
.shadow_register = true,
};
+static const struct mtk_mmsys_driver_data mt7623_mmsys_driver_data = {
+ .main_path = mt7623_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt7623_mtk_ddp_main),
+ .ext_path = mt7623_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt7623_mtk_ddp_ext),
+ .shadow_register = true,
+};
+
static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
.main_path = mt2712_mtk_ddp_main,
.main_len = ARRAY_SIZE(mt2712_mtk_ddp_main),
@@ -424,6 +445,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
static const struct of_device_id mtk_drm_of_ids[] = {
{ .compatible = "mediatek,mt2701-mmsys",
.data = &mt2701_mmsys_driver_data},
+ { .compatible = "mediatek,mt7623-mmsys",
+ .data = &mt7623_mmsys_driver_data},
{ .compatible = "mediatek,mt2712-mmsys",
.data = &mt2712_mmsys_driver_data},
{ .compatible = "mediatek,mt8173-mmsys",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 6190cc3b7b0d..0583e557ad37 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -212,46 +212,28 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
struct mtk_drm_gem_obj *mtk_gem;
- int ret;
- struct scatterlist *s;
- unsigned int i;
- dma_addr_t expected;
- mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
+ /* check if the entries in the sg_table are contiguous */
+ if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
+ DRM_ERROR("sg_table is not contiguous");
+ return ERR_PTR(-EINVAL);
+ }
+ mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
if (IS_ERR(mtk_gem))
return ERR_CAST(mtk_gem);
- expected = sg_dma_address(sg->sgl);
- for_each_sg(sg->sgl, s, sg->nents, i) {
- if (!sg_dma_len(s))
- break;
-
- if (sg_dma_address(s) != expected) {
- DRM_ERROR("sg_table is not contiguous");
- ret = -EINVAL;
- goto err_gem_free;
- }
- expected = sg_dma_address(s) + sg_dma_len(s);
- }
-
mtk_gem->dma_addr = sg_dma_address(sg->sgl);
mtk_gem->sg = sg;
return &mtk_gem->base;
-
-err_gem_free:
- kfree(mtk_gem);
- return ERR_PTR(ret);
}
void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
{
struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
struct sg_table *sgt;
- struct sg_page_iter iter;
unsigned int npages;
- unsigned int i = 0;
if (mtk_gem->kvaddr)
return mtk_gem->kvaddr;
@@ -265,11 +247,8 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
if (!mtk_gem->pages)
goto out;
- for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
- mtk_gem->pages[i++] = sg_page_iter_page(&iter);
- if (i > npages)
- break;
- }
+ drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages);
+
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 80b7a082e874..4a188a942c38 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -969,11 +969,7 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
return ret;
}
- /*
- * Currently display data paths are statically assigned to a crtc each.
- * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
- */
- dsi->encoder.possible_crtcs = 1;
+ dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->ddp_comp);
ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index a97725680d4e..97a1ff529a1d 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/of.h>
@@ -145,11 +146,16 @@ struct hdmi_audio_param {
struct hdmi_codec_params codec_params;
};
+struct mtk_hdmi_conf {
+ bool tz_disabled;
+};
+
struct mtk_hdmi {
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct drm_connector conn;
struct device *dev;
+ const struct mtk_hdmi_conf *conf;
struct phy *phy;
struct device *cec_dev;
struct i2c_adapter *ddc_adpt;
@@ -234,7 +240,6 @@ static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
{
struct arm_smccc_res res;
- struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(hdmi->phy);
/*
* MT8173 HDMI hardware has an output control bit to enable/disable HDMI
@@ -242,7 +247,7 @@ static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
* The ARM trusted firmware provides an API for the HDMI driver to set
* this control bit to enable HDMI output in supervisor mode.
*/
- if (hdmi_phy->conf && hdmi_phy->conf->tz_disabled)
+ if (hdmi->conf && hdmi->conf->tz_disabled)
regmap_update_bits(hdmi->sys_regmap,
hdmi->sys_offset + HDMI_SYS_CFG20,
0x80008005, enable ? 0x80000005 : 0x8000);
@@ -1733,6 +1738,7 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
return -ENOMEM;
hdmi->dev = dev;
+ hdmi->conf = of_device_get_match_data(dev);
ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev);
if (ret)
@@ -1813,8 +1819,16 @@ static int mtk_hdmi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
mtk_hdmi_suspend, mtk_hdmi_resume);
+static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = {
+ .tz_disabled = true,
+};
+
static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
- { .compatible = "mediatek,mt8173-hdmi", },
+ { .compatible = "mediatek,mt2701-hdmi",
+ .data = &mtk_hdmi_conf_mt2701,
+ },
+ { .compatible = "mediatek,mt8173-hdmi",
+ },
{}
};
@@ -1829,7 +1843,6 @@ static struct platform_driver mtk_hdmi_driver = {
};
static struct platform_driver * const mtk_hdmi_drivers[] = {
- &mtk_hdmi_phy_driver,
&mtk_hdmi_ddc_driver,
&mtk_cec_driver,
&mtk_hdmi_driver,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
index bb3653de6bd1..472bf141c92b 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.h
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.h
@@ -5,7 +5,6 @@
*/
#ifndef _MTK_HDMI_CTRL_H
#define _MTK_HDMI_CTRL_H
-#include "mtk_hdmi_phy.h"
struct platform_driver;
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 93be766715c9..eec59658a938 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -1,13 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_MGAG200
- tristate "Kernel modesetting driver for MGA G200 server engines"
+ tristate "Matrox G200"
depends on DRM && PCI && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
- This is a KMS driver for the MGA G200 server chips, it
- does not support the original MGA G200 or any of the desktop
- chips. It requires 0.3.0 of the modesetting userspace driver,
- and a version of mga driver that will fail on KMS enabled
- devices.
-
+ This is a KMS driver for Matrox G200 chips. It supports the original
+ MGA G200 desktop chips and the server variants. It requires 0.3.0
+ of the modesetting userspace driver, and a version of mga driver
+ that will fail on KMS enabled devices.
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index e19660f4a637..771b26aeee19 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -9,6 +9,7 @@
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -36,6 +37,7 @@ static struct drm_driver mgag200_driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
+ .gem_create_object = drm_gem_shmem_create_object_cached,
DRM_GEM_SHMEM_DRIVER_OPS,
};
@@ -43,18 +45,66 @@ static struct drm_driver mgag200_driver = {
* DRM device
*/
-static int mgag200_device_init(struct mga_device *mdev, unsigned long flags)
+static bool mgag200_has_sgram(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
- int ret, option;
+ u32 option;
+ int ret;
- mdev->flags = mgag200_flags_from_driver_data(flags);
- mdev->type = mgag200_type_from_driver_data(flags);
+ ret = pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+ if (drm_WARN(dev, ret, "failed to read PCI config dword: %d\n", ret))
+ return false;
- pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
- mdev->has_sdram = !(option & (1 << 14));
+ return !!(option & PCI_MGA_OPTION_HARDPWMSK);
+}
- /* BAR 0 is the framebuffer, BAR 1 contains registers */
+static int mgag200_regs_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ u32 option, option2;
+ u8 crtcext3;
+
+ switch (mdev->type) {
+ case G200_PCI:
+ case G200_AGP:
+ if (mgag200_has_sgram(mdev))
+ option = 0x4049cd21;
+ else
+ option = 0x40499121;
+ option2 = 0x00008000;
+ break;
+ case G200_SE_A:
+ case G200_SE_B:
+ option = 0x40049120;
+ if (mgag200_has_sgram(mdev))
+ option |= PCI_MGA_OPTION_HARDPWMSK;
+ option2 = 0x00008000;
+ break;
+ case G200_WB:
+ case G200_EW3:
+ option = 0x41049120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EV:
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EH:
+ case G200_EH3:
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ default:
+ option = 0;
+ option2 = 0;
+ }
+
+ if (option)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+ if (option2)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+
+ /* BAR 1 contains registers */
mdev->rmmio_base = pci_resource_start(dev->pdev, 1);
mdev->rmmio_size = pci_resource_len(dev->pdev, 1);
@@ -68,12 +118,163 @@ static int mgag200_device_init(struct mga_device *mdev, unsigned long flags)
if (mdev->rmmio == NULL)
return -ENOMEM;
- /* stash G200 SE model number for later use */
- if (IS_G200_SE(mdev)) {
- mdev->unique_rev_id = RREG32(0x1e24);
- drm_dbg(dev, "G200 SE unique revision id is 0x%x\n",
- mdev->unique_rev_id);
+ RREG_ECRT(0x03, crtcext3);
+ crtcext3 |= MGAREG_CRTCEXT3_MGAMODE;
+ WREG_ECRT(0x03, crtcext3);
+
+ return 0;
+}
+
+static void mgag200_g200_interpret_bios(struct mga_device *mdev,
+ const unsigned char *bios,
+ size_t size)
+{
+ static const char matrox[] = {'M', 'A', 'T', 'R', 'O', 'X'};
+ static const unsigned int expected_length[6] = {
+ 0, 64, 64, 64, 128, 128
+ };
+ struct drm_device *dev = &mdev->base;
+ const unsigned char *pins;
+ unsigned int pins_len, version;
+ int offset;
+ int tmp;
+
+ /* Test for MATROX string. */
+ if (size < 45 + sizeof(matrox))
+ return;
+ if (memcmp(&bios[45], matrox, sizeof(matrox)) != 0)
+ return;
+
+ /* Get the PInS offset. */
+ if (size < MGA_BIOS_OFFSET + 2)
+ return;
+ offset = (bios[MGA_BIOS_OFFSET + 1] << 8) | bios[MGA_BIOS_OFFSET];
+
+ /* Get PInS data structure. */
+
+ if (size < offset + 6)
+ return;
+ pins = bios + offset;
+ if (pins[0] == 0x2e && pins[1] == 0x41) {
+ version = pins[5];
+ pins_len = pins[2];
+ } else {
+ version = 1;
+ pins_len = pins[0] + (pins[1] << 8);
+ }
+
+ if (version < 1 || version > 5) {
+ drm_warn(dev, "Unknown BIOS PInS version: %d\n", version);
+ return;
+ }
+ if (pins_len != expected_length[version]) {
+ drm_warn(dev, "Unexpected BIOS PInS size: %d expected: %d\n",
+ pins_len, expected_length[version]);
+ return;
}
+ if (size < offset + pins_len)
+ return;
+
+ drm_dbg_kms(dev, "MATROX BIOS PInS version %d size: %d found\n",
+ version, pins_len);
+
+ /* Extract the clock values */
+
+ switch (version) {
+ case 1:
+ tmp = pins[24] + (pins[25] << 8);
+ if (tmp)
+ mdev->model.g200.pclk_max = tmp * 10;
+ break;
+ case 2:
+ if (pins[41] != 0xff)
+ mdev->model.g200.pclk_max = (pins[41] + 100) * 1000;
+ break;
+ case 3:
+ if (pins[36] != 0xff)
+ mdev->model.g200.pclk_max = (pins[36] + 100) * 1000;
+ if (pins[52] & 0x20)
+ mdev->model.g200.ref_clk = 14318;
+ break;
+ case 4:
+ if (pins[39] != 0xff)
+ mdev->model.g200.pclk_max = pins[39] * 4 * 1000;
+ if (pins[92] & 0x01)
+ mdev->model.g200.ref_clk = 14318;
+ break;
+ case 5:
+ tmp = pins[4] ? 8000 : 6000;
+ if (pins[123] != 0xff)
+ mdev->model.g200.pclk_min = pins[123] * tmp;
+ if (pins[38] != 0xff)
+ mdev->model.g200.pclk_max = pins[38] * tmp;
+ if (pins[110] & 0x01)
+ mdev->model.g200.ref_clk = 14318;
+ break;
+ default:
+ break;
+ }
+}
+
+static void mgag200_g200_init_refclk(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ unsigned char __iomem *rom;
+ unsigned char *bios;
+ size_t size;
+
+ mdev->model.g200.pclk_min = 50000;
+ mdev->model.g200.pclk_max = 230000;
+ mdev->model.g200.ref_clk = 27050;
+
+ rom = pci_map_rom(dev->pdev, &size);
+ if (!rom)
+ return;
+
+ bios = vmalloc(size);
+ if (!bios)
+ goto out;
+ memcpy_fromio(bios, rom, size);
+
+ if (size != 0 && bios[0] == 0x55 && bios[1] == 0xaa)
+ mgag200_g200_interpret_bios(mdev, bios, size);
+
+ drm_dbg_kms(dev, "pclk_min: %ld pclk_max: %ld ref_clk: %ld\n",
+ mdev->model.g200.pclk_min, mdev->model.g200.pclk_max,
+ mdev->model.g200.ref_clk);
+
+ vfree(bios);
+out:
+ pci_unmap_rom(dev->pdev, rom);
+}
+
+static void mgag200_g200se_init_unique_id(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+
+ /* stash G200 SE model number for later use */
+ mdev->model.g200se.unique_rev_id = RREG32(0x1e24);
+
+ drm_dbg(dev, "G200 SE unique revision id is 0x%x\n",
+ mdev->model.g200se.unique_rev_id);
+}
+
+static int mgag200_device_init(struct mga_device *mdev, unsigned long flags)
+{
+ struct drm_device *dev = &mdev->base;
+ int ret;
+
+ mdev->flags = mgag200_flags_from_driver_data(flags);
+ mdev->type = mgag200_type_from_driver_data(flags);
+
+ ret = mgag200_regs_init(mdev);
+ if (ret)
+ return ret;
+
+ if (mdev->type == G200_PCI || mdev->type == G200_AGP)
+ mgag200_g200_init_refclk(mdev);
+ else if (IS_G200_SE(mdev))
+ mgag200_g200se_init_unique_id(mdev);
ret = mgag200_mm_init(mdev);
if (ret)
@@ -116,6 +317,8 @@ mgag200_device_create(struct pci_dev *pdev, unsigned long flags)
*/
static const struct pci_device_id mgag200_pciidlist[] = {
+ { PCI_VENDOR_ID_MATROX, 0x520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_PCI },
+ { PCI_VENDOR_ID_MATROX, 0x521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_AGP },
{ PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD},
{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 3817520bfefc..749a075fe9e4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -38,6 +38,8 @@
#define RREG32(reg) ioread32(((void __iomem *)mdev->rmmio) + (reg))
#define WREG32(reg, v) iowrite32(v, ((void __iomem *)mdev->rmmio) + (reg))
+#define MGA_BIOS_OFFSET 0x7ffc
+
#define ATTR_INDEX 0x1fc0
#define ATTR_DATA 0x1fc1
@@ -129,6 +131,8 @@ struct mga_mc {
};
enum mga_type {
+ G200_PCI,
+ G200_AGP,
G200_SE_A,
G200_SE_B,
G200_WB,
@@ -161,14 +165,23 @@ struct mga_device {
size_t vram_fb_available;
enum mga_type type;
- int has_sdram;
int bpp_shifts[4];
int fb_mtrr;
- /* SE model number stored in reg 0x1e24 */
- u32 unique_rev_id;
+ union {
+ struct {
+ long ref_clk;
+ long pclk_min;
+ long pclk_max;
+ } g200;
+ struct {
+ /* SE model number stored in reg 0x1e24 */
+ u32 unique_rev_id;
+ } g200se;
+ } model;
+
struct mga_connector connector;
struct drm_simple_display_pipe display_pipe;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mm.c b/drivers/gpu/drm/mgag200/mgag200_mm.c
index 7b69392bcb89..641f1aa992be 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mm.c
@@ -90,9 +90,17 @@ static void mgag200_mm_release(struct drm_device *dev, void *ptr)
int mgag200_mm_init(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
+ u8 misc;
resource_size_t start, len;
int ret;
+ WREG_ECRT(0x04, 0x00);
+
+ misc = RREG8(MGA_MISC_IN);
+ misc |= MGAREG_MISC_RAMMAPEN |
+ MGAREG_MISC_HIGH_PG_SEL;
+ WREG8(MGA_MISC_OUT, misc);
+
/* BAR 0 is VRAM */
start = pci_resource_start(dev->pdev, 0);
len = pci_resource_len(dev->pdev, 0);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index e0d037a7413c..38672f9e5c4f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -9,7 +9,6 @@
*/
#include <linux/delay.h>
-#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
@@ -109,10 +108,82 @@ static inline void mga_wait_busy(struct mga_device *mdev)
} while ((status & 0x01) && time_before(jiffies, timeout));
}
+/*
+ * PLL setup
+ */
+
+static int mgag200_g200_set_plls(struct mga_device *mdev, long clock)
+{
+ struct drm_device *dev = &mdev->base;
+ const int post_div_max = 7;
+ const int in_div_min = 1;
+ const int in_div_max = 6;
+ const int feed_div_min = 7;
+ const int feed_div_max = 127;
+ u8 testm, testn;
+ u8 n = 0, m = 0, p, s;
+ long f_vco;
+ long computed;
+ long delta, tmp_delta;
+ long ref_clk = mdev->model.g200.ref_clk;
+ long p_clk_min = mdev->model.g200.pclk_min;
+ long p_clk_max = mdev->model.g200.pclk_max;
+
+ if (clock > p_clk_max) {
+ drm_err(dev, "Pixel Clock %ld too high\n", clock);
+ return 1;
+ }
+
+ if (clock < p_clk_min >> 3)
+ clock = p_clk_min >> 3;
+
+ f_vco = clock;
+ for (p = 0;
+ p <= post_div_max && f_vco < p_clk_min;
+ p = (p << 1) + 1, f_vco <<= 1)
+ ;
+
+ delta = clock;
+
+ for (testm = in_div_min; testm <= in_div_max; testm++) {
+ for (testn = feed_div_min; testn <= feed_div_max; testn++) {
+ computed = ref_clk * (testn + 1) / (testm + 1);
+ if (computed < f_vco)
+ tmp_delta = f_vco - computed;
+ else
+ tmp_delta = computed - f_vco;
+ if (tmp_delta < delta) {
+ delta = tmp_delta;
+ m = testm;
+ n = testn;
+ }
+ }
+ }
+ f_vco = ref_clk * (n + 1) / (m + 1);
+ if (f_vco < 100000)
+ s = 0;
+ else if (f_vco < 140000)
+ s = 1;
+ else if (f_vco < 180000)
+ s = 2;
+ else
+ s = 3;
+
+ drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n",
+ clock, f_vco, m, n, p, s);
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_PIX_PLLC_P, (p | (s << 3)));
+
+ return 0;
+}
+
#define P_ARRAY_SIZE 9
static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
{
+ u32 unique_rev_id = mdev->model.g200se.unique_rev_id;
unsigned int vcomax, vcomin, pllreffreq;
unsigned int delta, tmpdelta, permitteddelta;
unsigned int testp, testm, testn;
@@ -122,7 +193,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
unsigned int fvv;
unsigned int i;
- if (mdev->unique_rev_id <= 0x03) {
+ if (unique_rev_id <= 0x03) {
m = n = p = 0;
vcomax = 320000;
@@ -220,7 +291,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
WREG_DAC(MGA1064_PIX_PLLC_N, n);
WREG_DAC(MGA1064_PIX_PLLC_P, p);
- if (mdev->unique_rev_id >= 0x04) {
+ if (unique_rev_id >= 0x04) {
WREG_DAC(0x1a, 0x09);
msleep(20);
WREG_DAC(0x1a, 0x01);
@@ -717,6 +788,9 @@ static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock)
u8 misc;
switch(mdev->type) {
+ case G200_PCI:
+ case G200_AGP:
+ return mgag200_g200_set_plls(mdev, clock);
case G200_SE_A:
case G200_SE_B:
return mga_g200se_set_plls(mdev, clock);
@@ -877,45 +951,6 @@ static void mgag200_set_startadd(struct mga_device *mdev,
WREG_ECRT(0x00, crtcext0);
}
-static void mgag200_set_pci_regs(struct mga_device *mdev)
-{
- uint32_t option = 0, option2 = 0;
- struct drm_device *dev = &mdev->base;
-
- switch (mdev->type) {
- case G200_SE_A:
- case G200_SE_B:
- if (mdev->has_sdram)
- option = 0x40049120;
- else
- option = 0x4004d120;
- option2 = 0x00008000;
- break;
- case G200_WB:
- case G200_EW3:
- option = 0x41049120;
- option2 = 0x0000b000;
- break;
- case G200_EV:
- option = 0x00000120;
- option2 = 0x0000b000;
- break;
- case G200_EH:
- case G200_EH3:
- option = 0x00000120;
- option2 = 0x0000b000;
- break;
- case G200_ER:
- break;
- }
-
- if (option)
- pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
-
- if (option2)
- pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
-}
-
static void mgag200_set_dac_regs(struct mga_device *mdev)
{
size_t i;
@@ -933,6 +968,12 @@ static void mgag200_set_dac_regs(struct mga_device *mdev)
};
switch (mdev->type) {
+ case G200_PCI:
+ case G200_AGP:
+ dacvalue[MGA1064_SYS_PLL_M] = 0x04;
+ dacvalue[MGA1064_SYS_PLL_N] = 0x2D;
+ dacvalue[MGA1064_SYS_PLL_P] = 0x19;
+ break;
case G200_SE_A:
case G200_SE_B:
dacvalue[MGA1064_VREF_CTL] = 0x03;
@@ -986,9 +1027,8 @@ static void mgag200_set_dac_regs(struct mga_device *mdev)
static void mgag200_init_regs(struct mga_device *mdev)
{
- u8 crtc11, crtcext3, crtcext4, misc;
+ u8 crtc11, misc;
- mgag200_set_pci_regs(mdev);
mgag200_set_dac_regs(mdev);
WREG_SEQ(2, 0x0f);
@@ -1002,14 +1042,6 @@ static void mgag200_init_regs(struct mga_device *mdev)
WREG_CRT(14, 0);
WREG_CRT(15, 0);
- RREG_ECRT(0x03, crtcext3);
-
- crtcext3 |= BIT(7); /* enable MGA mode */
- crtcext4 = 0x00;
-
- WREG_ECRT(0x03, crtcext3);
- WREG_ECRT(0x04, crtcext4);
-
RREG_CRT(0x11, crtc11);
crtc11 &= ~(MGAREG_CRTC11_CRTCPROTECT |
MGAREG_CRTC11_VINTEN |
@@ -1023,9 +1055,7 @@ static void mgag200_init_regs(struct mga_device *mdev)
WREG_ECRT(0x34, 0x5);
misc = RREG8(MGA_MISC_IN);
- misc |= MGAREG_MISC_IOADSEL |
- MGAREG_MISC_RAMMAPEN |
- MGAREG_MISC_HIGH_PG_SEL;
+ misc |= MGAREG_MISC_IOADSEL;
WREG8(MGA_MISC_OUT, misc);
}
@@ -1234,12 +1264,13 @@ static void mgag200_g200se_set_hiprilvl(struct mga_device *mdev,
const struct drm_display_mode *mode,
const struct drm_framebuffer *fb)
{
+ u32 unique_rev_id = mdev->model.g200se.unique_rev_id;
unsigned int hiprilvl;
u8 crtcext6;
- if (mdev->unique_rev_id >= 0x04) {
+ if (unique_rev_id >= 0x04) {
hiprilvl = 0;
- } else if (mdev->unique_rev_id >= 0x02) {
+ } else if (unique_rev_id >= 0x02) {
unsigned int bpp;
unsigned long mb;
@@ -1264,7 +1295,7 @@ static void mgag200_g200se_set_hiprilvl(struct mga_device *mdev,
else
hiprilvl = 5;
- } else if (mdev->unique_rev_id >= 0x01) {
+ } else if (unique_rev_id >= 0x01) {
hiprilvl = 3;
} else {
hiprilvl = 4;
@@ -1388,7 +1419,9 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
int bpp = 32;
if (IS_G200_SE(mdev)) {
- if (mdev->unique_rev_id == 0x01) {
+ u32 unique_rev_id = mdev->model.g200se.unique_rev_id;
+
+ if (unique_rev_id == 0x01) {
if (mode->hdisplay > 1600)
return MODE_VIRTUAL_X;
if (mode->vdisplay > 1200)
@@ -1396,7 +1429,7 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
if (mga_vga_calculate_mode_bandwidth(mode, bpp)
> (24400 * 1024))
return MODE_BANDWIDTH;
- } else if (mdev->unique_rev_id == 0x02) {
+ } else if (unique_rev_id == 0x02) {
if (mode->hdisplay > 1920)
return MODE_VIRTUAL_X;
if (mode->vdisplay > 1200)
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index c3b7bcad52ed..977be0565c06 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -256,6 +256,8 @@
#define MGAREG_CRTCEXT1_VSYNCOFF BIT(5)
#define MGAREG_CRTCEXT1_HSYNCOFF BIT(4)
+#define MGAREG_CRTCEXT3_MGAMODE BIT(7)
+
/* Cursor X and Y position */
#define MGA_CURPOSXL 0x3c0c
#define MGA_CURPOSXH 0x3c0d
@@ -282,6 +284,8 @@
#define PCI_MGA_OPTION2 0x50
#define PCI_MGA_OPTION3 0x54
+#define PCI_MGA_OPTION_HARDPWMSK BIT(14)
+
#define RAMDAC_OFFSET 0x3c00
/* TVP3026 direct registers */
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 6deaa7d01654..e5816b498494 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -6,8 +6,8 @@ config DRM_MSM
depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
- depends on INTERCONNECT || !INTERCONNECT
depends on QCOM_OCMEM || QCOM_OCMEM=n
+ select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
@@ -57,6 +57,15 @@ config DRM_MSM_HDMI_HDCP
help
Choose this option to enable HDCP state machine
+config DRM_MSM_DP
+ bool "Enable DisplayPort support in MSM DRM driver"
+ depends on DRM_MSM
+ default y
+ help
+ Compile in support for DP driver in MSM DRM driver. DP external
+ display support is enabled through this config option. It can
+ be primary or secondary display on device.
+
config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM
@@ -110,3 +119,11 @@ config DRM_MSM_DSI_10NM_PHY
default y
help
Choose this option if DSI PHY on SDM845 is used on the platform.
+
+config DRM_MSM_DSI_7NM_PHY
+ bool "Enable DSI 7nm PHY driver in MSM DRM (used by SM8150/SM8250)"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if DSI PHY on SM8150/SM8250 is used on the
+ platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 42f8aae28b31..340682cd0f32 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -2,6 +2,7 @@
ccflags-y := -I $(srctree)/$(src)
ccflags-y += -I $(srctree)/$(src)/disp/dpu1
ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
+ccflags-$(CONFIG_DRM_MSM_DP) += -I $(srctree)/$(src)/dp
msm-y := \
adreno/adreno_device.o \
@@ -95,10 +96,23 @@ msm-y := \
msm_gpu_tracepoints.o \
msm_gpummu.o
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+ dp/dp_debug.o
msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o
+msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
+ dp/dp_catalog.o \
+ dp/dp_ctrl.o \
+ dp/dp_display.o \
+ dp/dp_drm.o \
+ dp/dp_hpd.o \
+ dp/dp_link.o \
+ dp/dp_panel.o \
+ dp/dp_parser.o \
+ dp/dp_power.o \
+ dp/dp_audio.o
+
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
@@ -119,6 +133,7 @@ msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
+msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
msm-y += dsi/pll/dsi_pll.o
@@ -126,6 +141,7 @@ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
+msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/pll/dsi_pll_7nm.o
endif
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 48fa49f69d6d..7e82c41a85f1 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -10,6 +10,48 @@ extern bool hang_debug;
static void a2xx_dump(struct msm_gpu *gpu);
static bool a2xx_idle(struct msm_gpu *gpu);
+static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (priv->lastctx == submit->queue->ctx)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ OUT_PKT2(ring);
+ break;
+ }
+ }
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->seqno);
+
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+ OUT_PKT3(ring, CP_INTERRUPT, 1);
+ OUT_RING(ring, 0x80000000);
+
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+}
+
static bool a2xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb[0];
@@ -53,7 +95,7 @@ static bool a2xx_me_init(struct msm_gpu *gpu)
OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
- gpu->funcs->flush(gpu, ring);
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
return a2xx_idle(gpu);
}
@@ -421,16 +463,11 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
return aspace;
}
-/* Register offset defines for A2XX - copy of A3XX */
-static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
- REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
- REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
-};
+static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
+ return ring->memptrs->rptr;
+}
static const struct adreno_gpu_funcs funcs = {
.base = {
@@ -439,8 +476,7 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a2xx_recover,
- .submit = adreno_submit,
- .flush = adreno_flush,
+ .submit = a2xx_submit,
.active_ring = adreno_active_ring,
.irq = a2xx_irq,
.destroy = a2xx_destroy,
@@ -450,6 +486,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a2xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = a2xx_create_address_space,
+ .get_rptr = a2xx_get_rptr,
},
};
@@ -491,8 +528,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
else
adreno_gpu->registers = a220_registers;
- adreno_gpu->reg_offsets = a2xx_register_offsets;
-
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index f6471145a7a6..f29c77d9cd42 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -28,6 +28,61 @@ extern bool hang_debug;
static void a3xx_dump(struct msm_gpu *gpu);
static bool a3xx_idle(struct msm_gpu *gpu);
+static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (priv->lastctx == submit->queue->ctx)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ OUT_PKT2(ring);
+ break;
+ }
+ }
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->seqno);
+
+ /* Flush HLSQ lazy updates to make sure there is nothing
+ * pending for indirect loads after the timestamp has
+ * passed:
+ */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, HLSQ_FLUSH);
+
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+
+#if 0
+ /* Dummy set-constant to trigger context rollover */
+ OUT_PKT3(ring, CP_SET_CONSTANT, 2);
+ OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
+ OUT_RING(ring, 0x00000000);
+#endif
+
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+}
+
static bool a3xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb[0];
@@ -51,7 +106,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu, ring);
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
return a3xx_idle(gpu);
}
@@ -423,16 +478,11 @@ static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
return state;
}
-/* Register offset defines for A3XX */
-static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
- REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
- REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
-};
+static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
+ return ring->memptrs->rptr;
+}
static const struct adreno_gpu_funcs funcs = {
.base = {
@@ -441,8 +491,7 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover,
- .submit = adreno_submit,
- .flush = adreno_flush,
+ .submit = a3xx_submit,
.active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
@@ -452,6 +501,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = adreno_iommu_create_address_space,
+ .get_rptr = a3xx_get_rptr,
},
};
@@ -490,7 +540,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
adreno_gpu->registers = a3xx_registers;
- adreno_gpu->reg_offsets = a3xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 954753600625..2b93b33b05e4 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -22,6 +22,54 @@ extern bool hang_debug;
static void a4xx_dump(struct msm_gpu *gpu);
static bool a4xx_idle(struct msm_gpu *gpu);
+static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (priv->lastctx == submit->queue->ctx)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFE, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ OUT_PKT2(ring);
+ break;
+ }
+ }
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->seqno);
+
+ /* Flush HLSQ lazy updates to make sure there is nothing
+ * pending for indirect loads after the timestamp has
+ * passed:
+ */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, HLSQ_FLUSH);
+
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+
+ adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
+}
+
/*
* a4xx_enable_hwcg() - Program the clock control registers
* @device: The adreno device pointer
@@ -129,7 +177,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu, ring);
+ adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
return a4xx_idle(gpu);
}
@@ -515,17 +563,6 @@ static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
return state;
}
-/* Register offset defines for A4XX, in order of enum adreno_regs */
-static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
- REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
- REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
-};
-
static void a4xx_dump(struct msm_gpu *gpu)
{
printk("status: %08x\n",
@@ -576,6 +613,12 @@ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
+static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR);
+ return ring->memptrs->rptr;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -583,8 +626,7 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = a4xx_pm_suspend,
.pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
- .submit = adreno_submit,
- .flush = adreno_flush,
+ .submit = a4xx_submit,
.active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
@@ -594,6 +636,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = adreno_iommu_create_address_space,
+ .get_rptr = a4xx_get_rptr,
},
.get_timestamp = a4xx_get_timestamp,
};
@@ -631,15 +674,12 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
a4xx_registers;
- adreno_gpu->reg_offsets = a4xx_register_offsets;
/* if needed, allocate gmem: */
- if (adreno_is_a4xx(adreno_gpu)) {
- ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
- &a4xx_gpu->ocmem);
- if (ret)
- goto fail;
- }
+ ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
+ &a4xx_gpu->ocmem);
+ if (ret)
+ goto fail;
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index 68eddac7771c..fc2c905b6c9e 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -11,7 +11,7 @@
#include "a5xx_gpu.h"
-static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
@@ -22,11 +22,9 @@ static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA));
}
-
- return 0;
}
-static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void me_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
@@ -37,11 +35,9 @@ static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA));
}
-
- return 0;
}
-static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void meq_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
@@ -52,11 +48,9 @@ static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
drm_printf(p, " %02x: %08x\n", i,
gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
}
-
- return 0;
}
-static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void roq_print(struct msm_gpu *gpu, struct drm_printer *p)
{
int i;
@@ -71,8 +65,6 @@ static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
drm_printf(p, " %02x: %08x %08x %08x %08x\n", i,
val[0], val[1], val[2], val[3]);
}
-
- return 0;
}
static int show(struct seq_file *m, void *arg)
@@ -81,10 +73,11 @@ static int show(struct seq_file *m, void *arg)
struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_printer p = drm_seq_file_printer(m);
- int (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
+ void (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
node->info_ent->data;
- return show(priv->gpu, &p);
+ show(priv->gpu, &p);
+ return 0;
}
#define ENT(n) { .name = #n, .show = show, .data = n ##_print }
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 91726da82ed6..d6804a802355 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -18,13 +18,24 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
-static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ bool sync)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
uint32_t wptr;
unsigned long flags;
+ /*
+ * Most flush operations need to issue a WHERE_AM_I opcode to sync up
+ * the rptr shadow
+ */
+ if (a5xx_gpu->has_whereami && sync) {
+ OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+ OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
+ OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
+ }
+
spin_lock_irqsave(&ring->lock, flags);
/* Copy the shadow to the actual register */
@@ -43,8 +54,7 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
}
-static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
@@ -57,7 +67,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == ctx)
+ if (priv->lastctx == submit->queue->ctx)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -91,7 +101,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
}
}
- a5xx_flush(gpu, ring);
+ a5xx_flush(gpu, ring, true);
a5xx_preempt_trigger(gpu);
/* we might not necessarily have a cmd from userspace to
@@ -103,8 +113,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
msm_gpu_retire(gpu);
}
-static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
@@ -114,7 +123,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
priv->lastctx = NULL;
- a5xx_submit_in_rb(gpu, submit, ctx);
+ a5xx_submit_in_rb(gpu, submit);
return;
}
@@ -148,7 +157,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == ctx)
+ if (priv->lastctx == submit->queue->ctx)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -206,7 +215,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* Set bit 0 to trigger an interrupt on preempt complete */
OUT_RING(ring, 0x01);
- a5xx_flush(gpu, ring);
+ /* A WHERE_AM_I packet is not needed after a YIELD */
+ a5xx_flush(gpu, ring, false);
/* Check to see if we need to start preemption */
a5xx_preempt_trigger(gpu);
@@ -365,7 +375,7 @@ static int a5xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu, ring);
+ a5xx_flush(gpu, ring, true);
return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
@@ -407,11 +417,31 @@ static int a5xx_preempt_start(struct msm_gpu *gpu)
OUT_RING(ring, 0x01);
OUT_RING(ring, 0x01);
- gpu->funcs->flush(gpu, ring);
+ /* The WHERE_AMI_I packet is not needed after a YIELD is issued */
+ a5xx_flush(gpu, ring, false);
return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
+static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
+ struct drm_gem_object *obj)
+{
+ u32 *buf = msm_gem_get_vaddr_active(obj);
+
+ if (IS_ERR(buf))
+ return;
+
+ /*
+ * If the lowest nibble is 0xa that is an indication that this microcode
+ * has been patched. The actual version is in dword [3] but we only care
+ * about the patchlevel which is the lowest nibble of dword [3]
+ */
+ if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
+ a5xx_gpu->has_whereami = true;
+
+ msm_gem_put_vaddr(obj);
+}
+
static int a5xx_ucode_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -447,6 +477,7 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
}
msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
+ a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
}
gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@@ -506,6 +537,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
static int a5xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int ret;
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
@@ -714,9 +746,36 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
+ /*
+ * If the microcode supports the WHERE_AM_I opcode then we can use that
+ * in lieu of the RPTR shadow and enable preemption. Otherwise, we
+ * can't safely use the RPTR shadow or preemption. In either case, the
+ * RPTR shadow should be disabled in hardware.
+ */
gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+ /* Disable preemption if WHERE_AM_I isn't available */
+ if (!a5xx_gpu->has_whereami && gpu->nr_rings > 1) {
+ a5xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+ } else {
+ /* Create a privileged buffer for the RPTR shadow */
+ if (!a5xx_gpu->shadow_bo) {
+ a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
+ sizeof(u32) * gpu->nr_rings,
+ MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
+ gpu->aspace, &a5xx_gpu->shadow_bo,
+ &a5xx_gpu->shadow_iova);
+
+ if (IS_ERR(a5xx_gpu->shadow))
+ return PTR_ERR(a5xx_gpu->shadow);
+ }
+
+ gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
+ REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
+ }
+
a5xx_preempt_hw_init(gpu);
/* Disable the interrupts through the initial bringup stage */
@@ -740,7 +799,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
- gpu->funcs->flush(gpu, gpu->rb[0]);
+ a5xx_flush(gpu, gpu->rb[0], true);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
}
@@ -758,7 +817,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
OUT_RING(gpu->rb[0], 0x00000000);
- gpu->funcs->flush(gpu, gpu->rb[0]);
+ a5xx_flush(gpu, gpu->rb[0], true);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
} else if (ret == -ENODEV) {
@@ -825,6 +884,11 @@ static void a5xx_destroy(struct msm_gpu *gpu)
drm_gem_object_put(a5xx_gpu->gpmu_bo);
}
+ if (a5xx_gpu->shadow_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->shadow_bo);
+ }
+
adreno_gpu_cleanup(adreno_gpu);
kfree(a5xx_gpu);
}
@@ -1057,17 +1121,6 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
return IRQ_HANDLED;
}
-static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
- REG_A5XX_CP_RB_RPTR_ADDR_HI),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
-};
-
static const u32 a5xx_registers[] = {
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
@@ -1432,6 +1485,17 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time;
}
+static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (a5xx_gpu->has_whereami)
+ return a5xx_gpu->shadow[ring->id];
+
+ return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -1440,7 +1504,6 @@ static const struct adreno_gpu_funcs funcs = {
.pm_resume = a5xx_pm_resume,
.recover = a5xx_recover,
.submit = a5xx_submit,
- .flush = a5xx_flush,
.active_ring = a5xx_active_ring,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
@@ -1454,6 +1517,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
.create_address_space = adreno_iommu_create_address_space,
+ .get_rptr = a5xx_get_rptr,
},
.get_timestamp = a5xx_get_timestamp,
};
@@ -1512,14 +1576,12 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
gpu = &adreno_gpu->base;
adreno_gpu->registers = a5xx_registers;
- adreno_gpu->reg_offsets = a5xx_register_offsets;
a5xx_gpu->lm_leakage = 0x4E001A;
check_speed_bin(&pdev->dev);
- /* Restricting nr_rings to 1 to temporarily disable preemption */
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 1e5b1a15a70f..c7187bcc5e90 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -37,6 +37,13 @@ struct a5xx_gpu {
atomic_t preempt_state;
struct timer_list preempt_timer;
+
+ struct drm_gem_object *shadow_bo;
+ uint64_t shadow_iova;
+ uint32_t *shadow;
+
+ /* True if the microcode supports the WHERE_AM_I opcode */
+ bool has_whereami;
};
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -141,6 +148,9 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
return -ETIMEDOUT;
}
+#define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
+ ((ring)->id * sizeof(uint32_t)))
+
bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
@@ -150,6 +160,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu);
void a5xx_preempt_irq(struct msm_gpu *gpu);
void a5xx_preempt_fini(struct msm_gpu *gpu);
+void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync);
+
/* Return true if we are in a preempt state */
static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
{
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 321a8061fd32..f176a6f3eff6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -240,7 +240,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
- gpu->funcs->flush(gpu, ring);
+ a5xx_flush(gpu, ring, true);
if (!a5xx_idle(gpu, ring)) {
DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 9f3fe177b00e..7e04509c4e1f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -259,8 +259,9 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
ptr->info = 0;
ptr->data = 0;
- ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
- ptr->rptr_addr = rbmemptr(ring, rptr);
+ ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
+
+ ptr->rptr_addr = shadowptr(a5xx_gpu, ring);
ptr->counter = counters_iova;
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index e1c7bcd1b1eb..491fee410daf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -11,6 +11,7 @@
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
#include "msm_gem.h"
+#include "msm_gpu_trace.h"
#include "msm_mmu.h"
static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
@@ -124,6 +125,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
gmu->current_perf_index = perf_index;
gmu->freq = gmu->gpu_freqs[perf_index];
+ trace_msm_gmu_freq_change(gmu->freq, perf_index);
+
/*
* This can get called from devfreq while the hardware is idle. Don't
* bring up the power if it isn't already active
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 66a95e22b7b3..948f3656c20c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -51,9 +51,20 @@ bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
uint32_t wptr;
unsigned long flags;
+ /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
+ if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+ OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
+ OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
+ }
+
spin_lock_irqsave(&ring->lock, flags);
/* Copy the shadow to the actual register */
@@ -81,8 +92,50 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
OUT_RING(ring, upper_32_bits(iova));
}
-static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
+ struct msm_ringbuffer *ring, struct msm_file_private *ctx)
+{
+ phys_addr_t ttbr;
+ u32 asid;
+ u64 memptr = rbmemptr(ring, ttbr0);
+
+ if (ctx == a6xx_gpu->cur_ctx)
+ return;
+
+ if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
+ return;
+
+ /* Execute the table update */
+ OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
+ OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
+
+ OUT_RING(ring,
+ CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) |
+ CP_SMMU_TABLE_UPDATE_1_ASID(asid));
+ OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
+ OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
+
+ /*
+ * Write the new TTBR0 to the memstore. This is good for debugging.
+ */
+ OUT_PKT7(ring, CP_MEM_WRITE, 4);
+ OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
+ OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
+ OUT_RING(ring, lower_32_bits(ttbr));
+ OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
+
+ /*
+ * And finally, trigger a uche flush to be sure there isn't anything
+ * lingering in that part of the GPU
+ */
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, 0x31);
+
+ a6xx_gpu->cur_ctx = ctx;
+}
+
+static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
struct msm_drm_private *priv = gpu->dev->dev_private;
@@ -91,6 +144,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
+ a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
rbmemptr_stats(ring, index, cpcycles_start));
@@ -115,7 +170,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == ctx)
+ if (priv->lastctx == submit->queue->ctx)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -464,6 +519,30 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
}
+static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+ struct drm_gem_object *obj)
+{
+ u32 *buf = msm_gem_get_vaddr_active(obj);
+
+ if (IS_ERR(buf))
+ return;
+
+ /*
+ * If the lowest nibble is 0xa that is an indication that this microcode
+ * has been patched. The actual version is in dword [3] but we only care
+ * about the patchlevel which is the lowest nibble of dword [3]
+ *
+ * Otherwise check that the firmware is greater than or equal to 1.90
+ * which was the first version that had this fix built in
+ */
+ if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
+ a6xx_gpu->has_whereami = true;
+ else if ((buf[0] & 0xfff) > 0x190)
+ a6xx_gpu->has_whereami = true;
+
+ msm_gem_put_vaddr(obj);
+}
+
static int a6xx_ucode_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -484,6 +563,7 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
}
msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
+ a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -699,12 +779,43 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
gpu->rb[0]->iova);
- gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
- MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+ /* Targets that support extended APRIV can use the RPTR shadow from
+ * hardware but all the other ones need to disable the feature. Targets
+ * that support the WHERE_AM_I opcode can use that instead
+ */
+ if (adreno_gpu->base.hw_apriv)
+ gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
+ else
+ gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /*
+ * Expanded APRIV and targets that support WHERE_AM_I both need a
+ * privileged buffer to store the RPTR shadow
+ */
+
+ if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
+ if (!a6xx_gpu->shadow_bo) {
+ a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
+ sizeof(u32) * gpu->nr_rings,
+ MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
+ gpu->aspace, &a6xx_gpu->shadow_bo,
+ &a6xx_gpu->shadow_iova);
+
+ if (IS_ERR(a6xx_gpu->shadow))
+ return PTR_ERR(a6xx_gpu->shadow);
+ }
+
+ gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
+ REG_A6XX_CP_RB_RPTR_ADDR_HI,
+ shadowptr(a6xx_gpu, gpu->rb[0]));
+ }
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
+ a6xx_gpu->cur_ctx = NULL;
+
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
@@ -911,18 +1022,6 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
return IRQ_HANDLED;
}
-static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
- REG_A6XX_CP_RB_RPTR_ADDR_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
- REG_A6XX_CP_RB_RPTR_ADDR_HI),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
-};
-
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -931,6 +1030,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
gpu->needs_hw_init = true;
+ trace_msm_gpu_resume(0);
+
ret = a6xx_gmu_resume(a6xx_gpu);
if (ret)
return ret;
@@ -945,6 +1046,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ trace_msm_gpu_suspend(0);
+
devfreq_suspend_device(gpu->devfreq.devfreq);
return a6xx_gmu_stop(a6xx_gpu);
@@ -983,6 +1086,11 @@ static void a6xx_destroy(struct msm_gpu *gpu)
drm_gem_object_put(a6xx_gpu->sqe_bo);
}
+ if (a6xx_gpu->shadow_bo) {
+ msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
+ drm_gem_object_put(a6xx_gpu->shadow_bo);
+ }
+
a6xx_gmu_remove(a6xx_gpu);
adreno_gpu_cleanup(adreno_gpu);
@@ -1017,6 +1125,31 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time;
}
+static struct msm_gem_address_space *
+a6xx_create_private_address_space(struct msm_gpu *gpu)
+{
+ struct msm_mmu *mmu;
+
+ mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
+
+ if (IS_ERR(mmu))
+ return ERR_CAST(mmu);
+
+ return msm_gem_address_space_create(mmu,
+ "gpu", 0x100000000ULL, 0x1ffffffffULL);
+}
+
+static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+ return a6xx_gpu->shadow[ring->id];
+
+ return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -1025,7 +1158,6 @@ static const struct adreno_gpu_funcs funcs = {
.pm_resume = a6xx_pm_resume,
.recover = a6xx_recover,
.submit = a6xx_submit,
- .flush = a6xx_flush,
.active_ring = a6xx_active_ring,
.irq = a6xx_irq,
.destroy = a6xx_destroy,
@@ -1040,6 +1172,8 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_state_put = a6xx_gpu_state_put,
#endif
.create_address_space = adreno_iommu_create_address_space,
+ .create_private_address_space = a6xx_create_private_address_space,
+ .get_rptr = a6xx_get_rptr,
},
.get_timestamp = a6xx_get_timestamp,
};
@@ -1048,6 +1182,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
+ const struct adreno_info *info;
struct device_node *node;
struct a6xx_gpu *a6xx_gpu;
struct adreno_gpu *adreno_gpu;
@@ -1062,9 +1198,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
gpu = &adreno_gpu->base;
adreno_gpu->registers = NULL;
- adreno_gpu->reg_offsets = a6xx_register_offsets;
- if (adreno_is_a650(adreno_gpu))
+ /*
+ * We need to know the platform type before calling into adreno_gpu_init
+ * so that the hw_apriv flag can be correctly set. Snoop into the info
+ * and grab the revision number
+ */
+ info = adreno_info(config->rev);
+
+ if (info && info->revn == 650)
adreno_gpu->base.hw_apriv = true;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 03ba60d5b07f..3eeebf6a754b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -19,8 +19,15 @@ struct a6xx_gpu {
uint64_t sqe_iova;
struct msm_ringbuffer *cur_ring;
+ struct msm_file_private *cur_ctx;
struct a6xx_gmu gmu;
+
+ struct drm_gem_object *shadow_bo;
+ uint64_t shadow_iova;
+ uint32_t *shadow;
+
+ bool has_whereami;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
@@ -50,6 +57,9 @@ static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
return true;
}
+#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \
+ ((_ring)->id * sizeof(uint32_t)))
+
int a6xx_gmu_resume(struct a6xx_gpu *gpu);
int a6xx_gmu_stop(struct a6xx_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index b12f5b4a1bea..e9ede19193b0 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -875,7 +875,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
int i;
a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
- sizeof(a6xx_state->indexed_regs));
+ sizeof(*a6xx_state->indexed_regs));
if (!a6xx_state->indexed_regs)
return;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 9eeb46bf2a5d..58e03b20e1c7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -282,7 +282,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
int ret;
if (pdev)
- gpu = platform_get_drvdata(pdev);
+ gpu = dev_to_gpu(&pdev->dev);
if (!gpu) {
dev_err_once(dev->dev, "no GPU device was found\n");
@@ -417,15 +417,13 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(gpu);
}
- dev_set_drvdata(dev, gpu);
-
return 0;
}
static void adreno_unbind(struct device *dev, struct device *master,
void *data)
{
- struct msm_gpu *gpu = dev_get_drvdata(dev);
+ struct msm_gpu *gpu = dev_to_gpu(dev);
pm_runtime_force_suspend(dev);
gpu->funcs->destroy(gpu);
@@ -490,16 +488,14 @@ static const struct of_device_id dt_match[] = {
#ifdef CONFIG_PM
static int adreno_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_gpu *gpu = platform_get_drvdata(pdev);
+ struct msm_gpu *gpu = dev_to_gpu(dev);
return gpu->funcs->pm_resume(gpu);
}
static int adreno_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_gpu *gpu = platform_get_drvdata(pdev);
+ struct msm_gpu *gpu = dev_to_gpu(dev);
return gpu->funcs->pm_suspend(gpu);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 862dd35b27d3..458b5b26d3c2 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -189,12 +189,27 @@ struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
- struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
- struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
+ struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
+ u64 start, size;
- aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- 0xffffffff - SZ_16M);
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ mmu = msm_iommu_new(&pdev->dev, iommu);
+
+ /*
+ * Use the aperture start or SZ_16M, whichever is greater. This will
+ * ensure that we align with the allocated pagetable range while still
+ * allowing room in the lower 32 bits for GMEM and whatnot
+ */
+ start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
+ size = iommu->geometry.aperture_end - start + 1;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu",
+ start & GENMASK_ULL(48, 0), size);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
@@ -407,8 +422,9 @@ int adreno_hw_init(struct msm_gpu *gpu)
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
struct msm_ringbuffer *ring)
{
- return ring->memptrs->rptr = adreno_gpu_read(
- adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ return gpu->funcs->get_rptr(gpu, ring);
}
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
@@ -434,81 +450,8 @@ void adreno_recover(struct msm_gpu *gpu)
}
}
-void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
-{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct msm_drm_private *priv = gpu->dev->dev_private;
- struct msm_ringbuffer *ring = submit->ring;
- unsigned i;
-
- for (i = 0; i < submit->nr_cmds; i++) {
- switch (submit->cmd[i].type) {
- case MSM_SUBMIT_CMD_IB_TARGET_BUF:
- /* ignore IB-targets */
- break;
- case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- /* ignore if there has not been a ctx switch: */
- if (priv->lastctx == ctx)
- break;
- fallthrough;
- case MSM_SUBMIT_CMD_BUF:
- OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ?
- CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
- OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
- OUT_RING(ring, submit->cmd[i].size);
- OUT_PKT2(ring);
- break;
- }
- }
-
- OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
- OUT_RING(ring, submit->seqno);
-
- if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
- /* Flush HLSQ lazy updates to make sure there is nothing
- * pending for indirect loads after the timestamp has
- * passed:
- */
- OUT_PKT3(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, HLSQ_FLUSH);
- }
-
- /* wait for idle before cache flush/interrupt */
- OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
- OUT_RING(ring, 0x00000000);
-
- if (!adreno_is_a2xx(adreno_gpu)) {
- /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
- OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
- OUT_RING(ring, rbmemptr(ring, fence));
- OUT_RING(ring, submit->seqno);
- } else {
- /* BIT(31) means something else on a2xx */
- OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS);
- OUT_RING(ring, rbmemptr(ring, fence));
- OUT_RING(ring, submit->seqno);
- OUT_PKT3(ring, CP_INTERRUPT, 1);
- OUT_RING(ring, 0x80000000);
- }
-
-#if 0
- if (adreno_is_a3xx(adreno_gpu)) {
- /* Dummy set-constant to trigger context rollover */
- OUT_PKT3(ring, CP_SET_CONSTANT, 2);
- OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
- OUT_RING(ring, 0x00000000);
- }
-#endif
-
- gpu->funcs->flush(gpu, ring);
-}
-
-void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr;
/* Copy the shadow to the actual register */
@@ -524,7 +467,7 @@ void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* ensure writes to ringbuffer have hit system memory: */
mb();
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
+ gpu_write(gpu, reg, wptr);
}
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index e55abae365b5..c3775f79525a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -17,29 +17,8 @@
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
-#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
-#define REG_SKIP ~0
-#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
-
extern bool snapshot_debugbus;
-/**
- * adreno_regs: List of registers that are used in across all
- * 3D devices. Each device type has different offset value for the same
- * register, so an array of register offsets are declared for every device
- * and are indexed by the enumeration values defined in this enum
- */
-enum adreno_regs {
- REG_ADRENO_CP_RB_BASE,
- REG_ADRENO_CP_RB_BASE_HI,
- REG_ADRENO_CP_RB_RPTR_ADDR,
- REG_ADRENO_CP_RB_RPTR_ADDR_HI,
- REG_ADRENO_CP_RB_RPTR,
- REG_ADRENO_CP_RB_WPTR,
- REG_ADRENO_CP_RB_CNTL,
- REG_ADRENO_REGISTER_MAX,
-};
-
enum {
ADRENO_FW_PM4 = 0,
ADRENO_FW_SQE = 0, /* a6xx */
@@ -176,11 +155,6 @@ static inline bool adreno_is_a225(struct adreno_gpu *gpu)
return gpu->revn == 225;
}
-static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
-{
- return (gpu->revn >= 300) && (gpu->revn < 400);
-}
-
static inline bool adreno_is_a305(struct adreno_gpu *gpu)
{
return gpu->revn == 305;
@@ -207,11 +181,6 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
}
-static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
-{
- return (gpu->revn >= 400) && (gpu->revn < 500);
-}
-
static inline int adreno_is_a405(struct adreno_gpu *gpu)
{
return gpu->revn == 405;
@@ -269,9 +238,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova);
int adreno_hw_init(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu);
-void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
-void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg);
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
@@ -365,59 +332,12 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
}
-/*
- * adreno_reg_check() - Checks the validity of a register enum
- * @gpu: Pointer to struct adreno_gpu
- * @offset_name: The register enum that is checked
- */
-static inline bool adreno_reg_check(struct adreno_gpu *gpu,
- enum adreno_regs offset_name)
-{
- BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]);
-
- /*
- * REG_SKIP is a special value that tell us that the register in
- * question isn't implemented on target but don't trigger a BUG(). This
- * is used to cleanly implement adreno_gpu_write64() and
- * adreno_gpu_read64() in a generic fashion
- */
- if (gpu->reg_offsets[offset_name] == REG_SKIP)
- return false;
-
- return true;
-}
-
-static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
- enum adreno_regs offset_name)
-{
- u32 reg = gpu->reg_offsets[offset_name];
- u32 val = 0;
- if(adreno_reg_check(gpu,offset_name))
- val = gpu_read(&gpu->base, reg - 1);
- return val;
-}
-
-static inline void adreno_gpu_write(struct adreno_gpu *gpu,
- enum adreno_regs offset_name, u32 data)
-{
- u32 reg = gpu->reg_offsets[offset_name];
- if(adreno_reg_check(gpu, offset_name))
- gpu_write(&gpu->base, reg - 1, data);
-}
-
struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
-static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
- enum adreno_regs lo, enum adreno_regs hi, u64 data)
-{
- adreno_gpu_write(gpu, lo, lower_32_bits(data));
- adreno_gpu_write(gpu, hi, upper_32_bits(data));
-}
-
static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
{
return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 3931eecadaff..59bb8c1ffce6 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -298,6 +298,7 @@ enum adreno_pm4_type3_packets {
CP_SET_BIN_DATA5_OFFSET = 46,
CP_SET_CTXSWITCH_IB = 85,
CP_REG_WRITE = 109,
+ CP_WHERE_AM_I = 98,
};
enum adreno_state_block {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index f1bc6a1af7a7..84ea09d9692f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -288,19 +288,6 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
}
#ifdef CONFIG_DEBUG_FS
-#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
-static int __prefix ## _open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, __prefix ## _show, inode->i_private); \
-} \
-static const struct file_operations __prefix ## _fops = { \
- .owner = THIS_MODULE, \
- .open = __prefix ## _open, \
- .release = single_release, \
- .read = seq_read, \
- .llseek = seq_lseek, \
-}
-
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_irq *irq_obj = s->private;
@@ -328,7 +315,7 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
return 0;
}
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index b36919d95362..393858ef8a83 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -30,6 +30,74 @@ enum dpu_perf_mode {
DPU_PERF_MODE_MAX
};
+/**
+ * @_dpu_core_perf_calc_bw() - to calculate BW per crtc
+ * @kms - pointer to the dpu_kms
+ * @crtc - pointer to a crtc
+ * Return: returns aggregated BW for all planes in crtc.
+ */
+static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_plane *plane;
+ struct dpu_plane_state *pstate;
+ u64 crtc_plane_bw = 0;
+ u32 bw_factor;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ if (!pstate)
+ continue;
+
+ crtc_plane_bw += pstate->plane_fetch_bw;
+ }
+
+ bw_factor = kms->catalog->perf.bw_inefficiency_factor;
+ if (bw_factor) {
+ crtc_plane_bw *= bw_factor;
+ do_div(crtc_plane_bw, 100);
+ }
+
+ return crtc_plane_bw;
+}
+
+/**
+ * _dpu_core_perf_calc_clk() - to calculate clock per crtc
+ * @kms - pointer to the dpu_kms
+ * @crtc - pointer to a crtc
+ * @state - pointer to a crtc state
+ * Return: returns max clk for all planes in crtc.
+ */
+static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms,
+ struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+ struct drm_plane *plane;
+ struct dpu_plane_state *pstate;
+ struct drm_display_mode *mode;
+ u64 crtc_clk;
+ u32 clk_factor;
+
+ mode = &state->adjusted_mode;
+
+ crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ if (!pstate)
+ continue;
+
+ crtc_clk = max(pstate->plane_clk, crtc_clk);
+ }
+
+ clk_factor = kms->catalog->perf.clk_inefficiency_factor;
+ if (clk_factor) {
+ crtc_clk *= clk_factor;
+ do_div(crtc_clk, 100);
+ }
+
+ return crtc_clk;
+}
+
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv;
@@ -52,12 +120,7 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
dpu_cstate = to_dpu_crtc_state(state);
memset(perf, 0, sizeof(struct dpu_core_perf_params));
- if (!dpu_cstate->bw_control) {
- perf->bw_ctl = kms->catalog->perf.max_bw_high *
- 1000ULL;
- perf->max_per_pipe_ib = perf->bw_ctl;
- perf->core_clk_rate = kms->perf.max_core_clk_rate;
- } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
perf->bw_ctl = 0;
perf->max_per_pipe_ib = 0;
perf->core_clk_rate = 0;
@@ -65,6 +128,10 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
perf->bw_ctl = kms->perf.fix_core_ab_vote;
perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+ } else {
+ perf->bw_ctl = _dpu_core_perf_calc_bw(kms, crtc);
+ perf->max_per_pipe_ib = kms->catalog->perf.min_dram_ib;
+ perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state);
}
DPU_DEBUG(
@@ -116,11 +183,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
tmp_cstate->bw_control);
- /*
- * For bw check only use the bw if the
- * atomic property has been already set
- */
- if (tmp_cstate->bw_control)
+
bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
}
@@ -132,9 +195,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
DPU_DEBUG("final threshold bw limit = %d\n", threshold);
- if (!dpu_cstate->bw_control) {
- DPU_DEBUG("bypass bandwidth check\n");
- } else if (!threshold) {
+ if (!threshold) {
DPU_ERROR("no bandwidth limits specified\n");
return -E2BIG;
} else if (bw > threshold) {
@@ -155,7 +216,11 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
= dpu_crtc_get_client_type(crtc);
struct drm_crtc *tmp_crtc;
struct dpu_crtc_state *dpu_cstate;
- int ret = 0;
+ int i, ret = 0;
+ u64 avg_bw;
+
+ if (!kms->num_paths)
+ return -EINVAL;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (tmp_crtc->enabled &&
@@ -166,10 +231,20 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
dpu_cstate->new_perf.max_per_pipe_ib);
- DPU_DEBUG("crtc=%d bw=%llu\n", tmp_crtc->base.id,
- dpu_cstate->new_perf.bw_ctl);
+ perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
+
+ DPU_DEBUG("crtc=%d bw=%llu paths:%d\n",
+ tmp_crtc->base.id,
+ dpu_cstate->new_perf.bw_ctl, kms->num_paths);
}
}
+
+ avg_bw = perf.bw_ctl;
+ do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
+
+ for (i = 0; i < kms->num_paths; i++)
+ icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib);
+
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index c2729f71e2fa..f56414a06ec4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -265,11 +265,6 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return INTF_MODE_NONE;
- }
-
/*
* TODO: This function is called from dpu debugfs and as part of atomic
* check. When called from debugfs, the crtc->mutex must be held to
@@ -297,7 +292,6 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
dpu_crtc->vblank_cb_time = ktime_get();
else
dpu_crtc->vblank_cb_count++;
- _dpu_crtc_complete_flip(crtc);
drm_crtc_handle_vblank(crtc);
trace_dpu_crtc_vblank_cb(DRMID(crtc));
}
@@ -402,6 +396,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
void dpu_crtc_complete_commit(struct drm_crtc *crtc)
{
trace_dpu_crtc_complete_commit(DRMID(crtc));
+ _dpu_crtc_complete_flip(crtc);
}
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
@@ -421,8 +416,6 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
}
-
- drm_mode_debug_printmodeline(adj_mode);
}
static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
@@ -457,7 +450,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
struct dpu_crtc_mixer *mixer = cstate->mixers;
struct dpu_hw_pcc_cfg cfg;
struct dpu_hw_ctl *ctl;
- struct dpu_hw_mixer *lm;
struct dpu_hw_dspp *dspp;
int i;
@@ -467,7 +459,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
for (i = 0; i < cstate->num_mixers; i++) {
ctl = mixer[i].lm_ctl;
- lm = mixer[i].hw_lm;
dspp = mixer[i].hw_dspp;
if (!dspp || !dspp->ops.setup_pcc)
@@ -496,16 +487,8 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *cstate;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct drm_encoder *encoder;
- struct drm_device *dev;
- unsigned long flags;
-
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
if (!crtc->state->enable) {
DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
@@ -515,21 +498,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
DPU_DEBUG("crtc%d\n", crtc->base.id);
- dpu_crtc = to_dpu_crtc(crtc);
- cstate = to_dpu_crtc_state(crtc->state);
- dev = crtc->dev;
-
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
- if (dpu_crtc->event) {
- WARN_ON(dpu_crtc->event);
- } else {
- spin_lock_irqsave(&dev->event_lock, flags);
- dpu_crtc->event = crtc->state->event;
- crtc->state->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
/* encoder will trigger pending mask now */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_trigger_kickoff_pending(encoder);
@@ -583,14 +553,11 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
return;
}
- if (dpu_crtc->event) {
- DPU_DEBUG("already received dpu_crtc->event\n");
- } else {
- spin_lock_irqsave(&dev->event_lock, flags);
- dpu_crtc->event = crtc->state->event;
- crtc->state->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
+ WARN_ON(dpu_crtc->event);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/*
* If no mixers has been allocated in dpu_crtc_atomic_check(),
@@ -635,14 +602,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct dpu_crtc_state *cstate;
-
- if (!crtc || !state) {
- DPU_ERROR("invalid argument(s)\n");
- return;
- }
-
- cstate = to_dpu_crtc_state(state);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
DPU_DEBUG("crtc%d\n", crtc->base.id);
@@ -731,14 +691,8 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
*/
static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
{
- struct dpu_crtc_state *cstate, *old_cstate;
+ struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
- if (!crtc || !crtc->state) {
- DPU_ERROR("invalid argument(s)\n");
- return NULL;
- }
-
- old_cstate = to_dpu_crtc_state(crtc->state);
cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
if (!cstate) {
DPU_ERROR("failed to allocate state\n");
@@ -754,19 +708,12 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
static void dpu_crtc_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *cstate;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct drm_encoder *encoder;
unsigned long flags;
bool release_bandwidth = false;
- if (!crtc || !crtc->state) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
- dpu_crtc = to_dpu_crtc(crtc);
- cstate = to_dpu_crtc_state(crtc->state);
-
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
/* Disable/save vblank irq handling */
@@ -825,19 +772,13 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
static void dpu_crtc_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *encoder;
bool request_bandwidth = false;
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
pm_runtime_get_sync(crtc->dev->dev);
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
- dpu_crtc = to_dpu_crtc(crtc);
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
/* in video mode, we hold an extra bandwidth reference
@@ -873,15 +814,15 @@ struct plane_state {
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
struct plane_state *pstates;
- struct dpu_crtc_state *cstate;
const struct drm_plane_state *pstate;
struct drm_plane *plane;
struct drm_display_mode *mode;
- int cnt = 0, rc = 0, mixer_width, i, z_pos;
+ int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
int multirect_count = 0;
@@ -889,16 +830,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
int left_zpos_cnt = 0, right_zpos_cnt = 0;
struct drm_rect crtc_rect = { 0 };
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return -EINVAL;
- }
-
pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
- dpu_crtc = to_dpu_crtc(crtc);
- cstate = to_dpu_crtc_state(state);
-
if (!state->enable || !state->active) {
DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
crtc->base.id, state->enable, state->active);
@@ -914,9 +847,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
memset(pipe_staged, 0, sizeof(pipe_staged));
- mixer_width = mode->hdisplay / cstate->num_mixers;
+ if (cstate->num_mixers) {
+ mixer_width = mode->hdisplay / cstate->num_mixers;
- _dpu_crtc_setup_lm_bounds(crtc, state);
+ _dpu_crtc_setup_lm_bounds(crtc, state);
+ }
crtc_rect.x2 = mode->hdisplay;
crtc_rect.y2 = mode->vdisplay;
@@ -1242,23 +1177,7 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
return 0;
}
-static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
-{
- return single_open(file, _dpu_debugfs_status_show, inode->i_private);
-}
-
-#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
-static int __prefix ## _open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, __prefix ## _show, inode->i_private); \
-} \
-static const struct file_operations __prefix ## _fops = { \
- .owner = THIS_MODULE, \
- .open = __prefix ## _open, \
- .release = single_release, \
- .read = seq_read, \
- .llseek = seq_lseek, \
-}
+DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
{
@@ -1275,25 +1194,18 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
return 0;
}
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
+DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- static const struct file_operations debugfs_status_fops = {
- .open = _dpu_debugfs_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
-
dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
debugfs_create_file("status", 0400,
dpu_crtc->debugfs_root,
- dpu_crtc, &debugfs_status_fops);
+ dpu_crtc, &_dpu_debugfs_status_fops);
debugfs_create_file("state", 0600,
dpu_crtc->debugfs_root,
&dpu_crtc->base,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index bd6def436c65..f7f5c258b553 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1001,6 +1001,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc));
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp)
+ msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode);
+
list_for_each_entry(conn_iter, connector_list, head)
if (conn_iter->encoder == drm_enc)
conn = conn_iter;
@@ -1109,6 +1112,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
return;
}
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+ dpu_enc->cur_master->hw_mdptop);
+
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
@@ -1146,6 +1156,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int ret = 0;
+ struct msm_drm_private *priv;
struct drm_display_mode *cur_mode = NULL;
if (!drm_enc) {
@@ -1156,6 +1167,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
mutex_lock(&dpu_enc->enc_lock);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+ priv = drm_enc->dev->dev_private;
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
cur_mode->vdisplay);
@@ -1176,6 +1188,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
_dpu_encoder_virt_enable_helper(drm_enc);
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
+ ret = msm_dp_display_enable(priv->dp,
+ drm_enc);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n",
+ ret);
+ goto out;
+ }
+ }
dpu_enc->enabled = true;
out:
@@ -1211,6 +1232,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
/* wait for idle */
dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
+ if (msm_dp_display_pre_disable(priv->dp, drm_enc))
+ DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n");
+ }
+
dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1220,6 +1246,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
phys->ops.disable(phys);
}
+
/* after phys waits for frame-done, should be no more frames pending */
if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
@@ -1234,6 +1261,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
+ if (msm_dp_display_disable(priv->dp, drm_enc))
+ DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n");
+ }
+
mutex_unlock(&dpu_enc->enc_lock);
}
@@ -1880,24 +1912,13 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
return 0;
}
-static int _dpu_encoder_debugfs_status_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, _dpu_encoder_status_show, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
int i;
- static const struct file_operations debugfs_status_fops = {
- .open = _dpu_encoder_debugfs_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
-
char name[DPU_NAME_SIZE];
if (!drm_enc->dev) {
@@ -1913,7 +1934,7 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
/* don't error check these */
debugfs_create_file("status", 0600,
- dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
+ dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
for (i = 0; i < dpu_enc->num_phys_encs; i++)
if (dpu_enc->phys_encs[i]->ops.late_register)
@@ -2008,7 +2029,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
{
int ret = 0;
int i = 0;
- enum dpu_intf_type intf_type;
+ enum dpu_intf_type intf_type = INTF_NONE;
struct dpu_enc_phys_init_params phys_params;
if (!dpu_enc) {
@@ -2030,9 +2051,9 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
case DRM_MODE_ENCODER_DSI:
intf_type = INTF_DSI;
break;
- default:
- DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
- return -EINVAL;
+ case DRM_MODE_ENCODER_TMDS:
+ intf_type = INTF_DP;
+ break;
}
WARN_ON(disp_info->num_of_h_tiles < 1);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index b5a49050d131..805e059b50b7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -100,6 +100,14 @@ static void drm_mode_to_intf_timing_params(
* display_v_end -= mode->hsync_start - mode->hdisplay;
* }
*/
+ /* for DP/EDP, Shift timings to align it to bottom right */
+ if ((phys_enc->hw_intf->cap->type == INTF_DP) ||
+ (phys_enc->hw_intf->cap->type == INTF_EDP)) {
+ timing->h_back_porch += timing->h_front_porch;
+ timing->h_front_porch = 0;
+ timing->v_back_porch += timing->v_front_porch;
+ timing->v_front_porch = 0;
+ }
}
static u32 get_horizontal_total(const struct intf_timing_params *timing)
@@ -298,7 +306,6 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
struct dpu_hw_ctl *hw_ctl;
unsigned long lock_flags;
u32 flush_register = 0;
- int new_cnt = -1, old_cnt = -1;
hw_ctl = phys_enc->hw_ctl;
@@ -308,7 +315,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
phys_enc);
- old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+ atomic_read(&phys_enc->pending_kickoff_cnt);
/*
* only decrement the pending flush count if we've actually flushed
@@ -320,8 +327,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
- new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
- -1, 0);
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
/* Signal any waiting atomic commit thread */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 97d122eee96d..60b304b72b7c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -684,7 +684,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
.max_bw_high = 6800000,
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
- .min_dram_ib = 800000,
+ .min_dram_ib = 1600000,
+ .min_prefill_lines = 24,
.danger_lut_tbl = {0xff, 0xffff, 0x0},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_linear),
@@ -701,6 +702,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
{.rd_enable = 1, .wr_enable = 1},
{.rd_enable = 1, .wr_enable = 0}
},
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
};
static const struct dpu_perf_cfg sm8150_perf_data = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 1b7a9213a756..3544af1a45c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -659,6 +659,8 @@ struct dpu_perf_cdp_cfg {
* @downscaling_prefill_lines downscaling latency in lines
* @amortizable_theshold minimum y position for traffic shaping prefill
* @min_prefill_lines minimum pipeline latency in lines
+ * @clk_inefficiency_factor DPU src clock inefficiency factor
+ * @bw_inefficiency_factor DPU axi bus bw inefficiency factor
* @safe_lut_tbl: LUT tables for safe signals
* @danger_lut_tbl: LUT tables for danger signals
* @qos_lut_tbl: LUT tables for QoS signals
@@ -683,6 +685,8 @@ struct dpu_perf_cfg {
u32 downscaling_prefill_lines;
u32 amortizable_threshold;
u32 min_prefill_lines;
+ u32 clk_inefficiency_factor;
+ u32 bw_inefficiency_factor;
u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index c0a4d4e16d82..d93c44f6996d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -85,30 +85,17 @@ static int _dpu_danger_signal_status(struct seq_file *s,
return 0;
}
-#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
-static int __prefix ## _open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, __prefix ## _show, inode->i_private); \
-} \
-static const struct file_operations __prefix ## _fops = { \
- .owner = THIS_MODULE, \
- .open = __prefix ## _open, \
- .release = single_release, \
- .read = seq_read, \
- .llseek = seq_lseek, \
-}
-
static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
{
return _dpu_danger_signal_status(s, true);
}
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
{
return _dpu_danger_signal_status(s, false);
}
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
@@ -195,10 +182,15 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
void *p = dpu_hw_util_get_log_mask_ptr();
struct dentry *entry;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
if (!p)
return -EINVAL;
+ dev = dpu_kms->dev;
+ priv = dev->dev_private;
+
entry = debugfs_create_dir("debug", minor->debugfs_root);
debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
@@ -207,6 +199,9 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
dpu_debugfs_vbif_init(dpu_kms, entry);
dpu_debugfs_core_irq_init(dpu_kms, entry);
+ if (priv->dp)
+ msm_dp_debugfs_init(priv->dp, minor);
+
return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
#endif
@@ -290,6 +285,28 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
return 0;
}
+static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
+{
+ struct icc_path *path0;
+ struct icc_path *path1;
+ struct drm_device *dev = dpu_kms->dev;
+
+ path0 = of_icc_get(dev->dev, "mdp0-mem");
+ path1 = of_icc_get(dev->dev, "mdp1-mem");
+
+ if (IS_ERR_OR_NULL(path0))
+ return PTR_ERR_OR_ZERO(path0);
+
+ dpu_kms->path[0] = path0;
+ dpu_kms->num_paths = 1;
+
+ if (!IS_ERR_OR_NULL(path1)) {
+ dpu_kms->path[1] = path1;
+ dpu_kms->num_paths++;
+ }
+ return 0;
+}
+
static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
return dpu_crtc_vblank(crtc, true);
@@ -479,6 +496,33 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
return rc;
}
+static int _dpu_kms_initialize_displayport(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ struct drm_encoder *encoder = NULL;
+ int rc = 0;
+
+ if (!priv->dp)
+ return rc;
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
+
+ rc = msm_dp_modeset_init(priv->dp, dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
+ drm_encoder_cleanup(encoder);
+ return rc;
+ }
+
+ priv->encoders[priv->num_encoders++] = encoder;
+ return rc;
+}
+
/**
* _dpu_kms_setup_displays - create encoders, bridges and connectors
* for underlying displays
@@ -491,12 +535,21 @@ static int _dpu_kms_setup_displays(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
- /**
- * Extend this function to initialize other
- * types of displays
- */
+ int rc = 0;
- return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+ rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+ if (rc) {
+ DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
+ if (rc) {
+ DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ return rc;
}
static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
@@ -681,13 +734,20 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
MSM_DISPLAY_CAP_VID_MODE;
- /* TODO: No support for DSI swap */
- for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
- if (priv->dsi[i]) {
- info.h_tile_instance[info.num_of_h_tiles] = i;
- info.num_of_h_tiles++;
+ switch (info.intf_type) {
+ case DRM_MODE_ENCODER_DSI:
+ /* TODO: No support for DSI swap */
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (priv->dsi[i]) {
+ info.h_tile_instance[info.num_of_h_tiles] = i;
+ info.num_of_h_tiles++;
+ }
}
- }
+ break;
+ case DRM_MODE_ENCODER_TMDS:
+ info.num_of_h_tiles = 1;
+ break;
+ };
rc = dpu_encoder_setup(encoder->dev, encoder, &info);
if (rc)
@@ -709,6 +769,23 @@ static void dpu_irq_preinstall(struct msm_kms *kms)
dpu_core_irq_preinstall(dpu_kms);
}
+static int dpu_irq_postinstall(struct msm_kms *kms)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ if (!dpu_kms || !dpu_kms->dev)
+ return -EINVAL;
+
+ priv = dpu_kms->dev->dev_private;
+ if (!priv)
+ return -EINVAL;
+
+ msm_dp_irq_postinstall(priv->dp);
+
+ return 0;
+}
+
static void dpu_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -719,6 +796,7 @@ static void dpu_irq_uninstall(struct msm_kms *kms)
static const struct msm_kms_funcs kms_funcs = {
.hw_init = dpu_kms_hw_init,
.irq_preinstall = dpu_irq_preinstall,
+ .irq_postinstall = dpu_irq_postinstall,
.irq_uninstall = dpu_irq_uninstall,
.irq = dpu_irq,
.enable_commit = dpu_kms_enable_commit,
@@ -952,6 +1030,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_vbif_init_memtypes(dpu_kms);
+ if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
+ dpu_kms_parse_data_bus_icc_path(dpu_kms);
+
pm_runtime_put_sync(&dpu_kms->pdev->dev);
return 0;
@@ -1079,7 +1160,7 @@ static int dpu_dev_remove(struct platform_device *pdev)
static int __maybe_unused dpu_runtime_suspend(struct device *dev)
{
- int rc = -1;
+ int i, rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct dss_module_power *mp = &dpu_kms->mp;
@@ -1090,6 +1171,9 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
+ for (i = 0; i < dpu_kms->num_paths; i++)
+ icc_set_bw(dpu_kms->path[i], 0, 0);
+
return rc;
}
@@ -1101,8 +1185,15 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
struct drm_encoder *encoder;
struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
+ int i;
ddev = dpu_kms->dev;
+
+ /* Min vote of BW is required before turning on AXI clk */
+ for (i = 0; i < dpu_kms->num_paths; i++)
+ icc_set_bw(dpu_kms->path[i], 0,
+ dpu_kms->catalog->perf.min_dram_ib);
+
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index e140cd633071..1c0e4c0c9ffb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -8,6 +8,8 @@
#ifndef __DPU_KMS_H__
#define __DPU_KMS_H__
+#include <linux/interconnect.h>
+
#include <drm/drm_drv.h>
#include "msm_drv.h"
@@ -140,6 +142,8 @@ struct dpu_kms {
* when disabled.
*/
atomic_t bandwidth_ref;
+ struct icc_path *path[2];
+ u32 num_paths;
};
struct vsync_info {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 7d3fdbb00e7e..cd4078807db1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -8,7 +8,6 @@
#include <linux/irqdesc.h>
#include <linux/irqchip/chained_irq.h>
#include "dpu_kms.h"
-#include <linux/interconnect.h>
#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
@@ -277,9 +276,11 @@ int dpu_mdss_init(struct drm_device *dev)
DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
- ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
- if (ret)
- return ret;
+ if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) {
+ ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
+ if (ret)
+ return ret;
+ }
mp = &dpu_mdss->mp;
ret = msm_dss_parse_clock(pdev, mp);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 29e373d2e7b5..7ea90d25a3b6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -132,6 +132,86 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
}
/**
+ * _dpu_plane_calc_bw - calculate bandwidth required for a plane
+ * @Plane: Pointer to drm plane.
+ * Result: Updates calculated bandwidth in the plane state.
+ * BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest)
+ * Prefill BW Equation: line src bytes * line_time
+ */
+static void _dpu_plane_calc_bw(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate;
+ struct drm_display_mode *mode;
+ const struct dpu_format *fmt = NULL;
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ int src_width, src_height, dst_height, fps;
+ u64 plane_prefill_bw;
+ u64 plane_bw;
+ u32 hw_latency_lines;
+ u64 scale_factor;
+ int vbp, vpw;
+
+ pstate = to_dpu_plane_state(plane->state);
+ mode = &plane->state->crtc->mode;
+
+ fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier);
+
+ src_width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+ src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+ dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ fps = drm_mode_vrefresh(mode);
+ vbp = mode->vtotal - mode->vsync_end;
+ vpw = mode->vsync_end - mode->vsync_start;
+ hw_latency_lines = dpu_kms->catalog->perf.min_prefill_lines;
+ scale_factor = src_height > dst_height ?
+ mult_frac(src_height, 1, dst_height) : 1;
+
+ plane_bw =
+ src_width * mode->vtotal * fps * fmt->bpp *
+ scale_factor;
+
+ plane_prefill_bw =
+ src_width * hw_latency_lines * fps * fmt->bpp *
+ scale_factor * mode->vtotal;
+
+ do_div(plane_prefill_bw, (vbp+vpw));
+
+ pstate->plane_fetch_bw = max(plane_bw, plane_prefill_bw);
+}
+
+/**
+ * _dpu_plane_calc_clk - calculate clock required for a plane
+ * @Plane: Pointer to drm plane.
+ * Result: Updates calculated clock in the plane state.
+ * Clock equation: dst_w * v_total * fps * (src_h / dst_h)
+ */
+static void _dpu_plane_calc_clk(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate;
+ struct drm_display_mode *mode;
+ int dst_width, src_height, dst_height, fps;
+
+ pstate = to_dpu_plane_state(plane->state);
+ mode = &plane->state->crtc->mode;
+
+ src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+ dst_width = drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+ dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ fps = drm_mode_vrefresh(mode);
+
+ pstate->plane_clk =
+ dst_width * mode->vtotal * fps;
+
+ if (src_height > dst_height) {
+ pstate->plane_clk *= src_height;
+ do_div(pstate->plane_clk, dst_height);
+ }
+}
+
+/**
* _dpu_plane_calc_fill_level - calculate fill level of the given source format
* @plane: Pointer to drm plane
* @fmt: Pointer to source buffer format
@@ -1102,6 +1182,10 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
}
_dpu_plane_set_qos_remap(plane);
+
+ _dpu_plane_calc_bw(plane, fb);
+
+ _dpu_plane_calc_clk(plane);
}
static void _dpu_plane_atomic_disable(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 456949713e90..ca83b8753d59 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -25,6 +25,8 @@
* @scaler3_cfg: configuration data for scaler3
* @pixel_ext: configuration data for pixel extensions
* @cdp_cfg: CDP configuration
+ * @plane_fetch_bw: calculated BW per plane
+ * @plane_clk: calculated clk per plane
*/
struct dpu_plane_state {
struct drm_plane_state base;
@@ -39,6 +41,8 @@ struct dpu_plane_state {
struct dpu_hw_pixel_ext pixel_ext;
struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+ u64 plane_fetch_bw;
+ u64 plane_clk;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
index 5d8956055286..88645dbc3785 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
@@ -25,54 +25,9 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-/* not ironically named at all.. no, really.. */
-static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
-{
- struct drm_device *dev = mdp4_dtv_encoder->base.dev;
- struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
-
- if (!dtv_pdata) {
- DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
- return;
- }
-
- if (dtv_pdata->bus_scale_table) {
- mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
- dtv_pdata->bus_scale_table);
- DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
- DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
- if (dtv_pdata->lcdc_power_save)
- dtv_pdata->lcdc_power_save(1);
- }
-}
-
-static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
-{
- if (mdp4_dtv_encoder->bsc) {
- msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
- mdp4_dtv_encoder->bsc = 0;
- }
-}
-
-static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
-{
- if (mdp4_dtv_encoder->bsc) {
- DBG("set bus scaling: %d", idx);
- msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
- }
-}
-#else
-static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
-static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
-static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
-#endif
-
static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
- bs_fini(mdp4_dtv_encoder);
drm_encoder_cleanup(encoder);
kfree(mdp4_dtv_encoder);
}
@@ -162,8 +117,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
- bs_set(mdp4_dtv_encoder, 0);
-
mdp4_dtv_encoder->enabled = false;
}
@@ -185,8 +138,6 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
MDP4_DMA_CONFIG_PACK(0x21));
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1);
- bs_set(mdp4_dtv_encoder, 1);
-
DBG("setting mdp_clk=%lu", pc);
ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
@@ -252,8 +203,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
goto fail;
}
- bs_init(mdp4_dtv_encoder);
-
return encoder;
fail:
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index 18933bd81c77..e8ee92ab7956 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -222,17 +222,4 @@ static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
}
#endif
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-/* bus scaling data is associated with extra pointless platform devices,
- * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
- * to find their pdata to make the bus-scaling stuff work.
- */
-static inline void *mdp4_find_pdata(const char *devname)
-{
- struct device *dev;
- dev = bus_find_device_by_name(&platform_bus_type, NULL, devname);
- return dev ? dev->platform_data : NULL;
-}
-#endif
-
#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 871f3514ef69..10eb3e5b218e 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -30,51 +30,10 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
-{
- struct drm_device *dev = mdp4_lcdc_encoder->base.dev;
- struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
-
- if (!lcdc_pdata) {
- DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
- return;
- }
-
- if (lcdc_pdata->bus_scale_table) {
- mdp4_lcdc_encoder->bsc = msm_bus_scale_register_client(
- lcdc_pdata->bus_scale_table);
- DBG("lvds : bus scale client: %08x", mdp4_lcdc_encoder->bsc);
- }
-}
-
-static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
-{
- if (mdp4_lcdc_encoder->bsc) {
- msm_bus_scale_unregister_client(mdp4_lcdc_encoder->bsc);
- mdp4_lcdc_encoder->bsc = 0;
- }
-}
-
-static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx)
-{
- if (mdp4_lcdc_encoder->bsc) {
- DBG("set bus scaling: %d", idx);
- msm_bus_scale_client_update_request(mdp4_lcdc_encoder->bsc, idx);
- }
-}
-#else
-static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {}
-static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {}
-static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) {}
-#endif
-
static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
- bs_fini(mdp4_lcdc_encoder);
drm_encoder_cleanup(encoder);
kfree(mdp4_lcdc_encoder);
}
@@ -348,8 +307,6 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
}
- bs_set(mdp4_lcdc_encoder, 0);
-
mdp4_lcdc_encoder->enabled = false;
}
@@ -382,8 +339,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
mdp4_crtc_set_config(encoder->crtc, config);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
- bs_set(mdp4_lcdc_encoder, 1);
-
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
if (ret)
@@ -480,8 +435,6 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
}
mdp4_lcdc_encoder->regs[2] = reg;
- bs_init(mdp4_lcdc_encoder);
-
return encoder;
fail:
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index c7df71e2fafc..7288041dd86a 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -50,14 +50,9 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
struct drm_panel *panel = mdp4_lvds_connector->panel;
int ret = 0;
- if (panel) {
- drm_panel_attach(panel, connector);
-
+ if (panel)
ret = drm_panel_get_modes(panel, connector);
- drm_panel_detach(panel);
- }
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index eeef41fcd4e1..ff2c1d583c79 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -14,27 +14,6 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
-
-static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx)
-{
- if (mdp5_cmd_enc->bsc) {
- DBG("set bus scaling: %d", idx);
- /* HACK: scaling down, and then immediately back up
- * seems to leave things broken (underflow).. so
- * never disable:
- */
- idx = 1;
- msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx);
- }
-}
-#else
-static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {}
-#endif
-
#define VSYNC_CLK_RATE 19200000
static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
struct drm_display_mode *mode)
@@ -146,8 +125,6 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
mdp5_ctl_set_encoder_state(ctl, pipeline, false);
mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
- bs_set(mdp5_cmd_enc, 0);
-
mdp5_cmd_enc->enabled = false;
}
@@ -161,7 +138,6 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
if (WARN_ON(mdp5_cmd_enc->enabled))
return;
- bs_set(mdp5_cmd_enc, 1);
if (pingpong_tearcheck_enable(encoder))
return;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index f48827283c2b..79d67c495780 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -16,72 +16,9 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-#include <mach/msm_bus.h>
-#include <mach/msm_bus_board.h>
-#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
- { \
- .src = MSM_BUS_MASTER_MDP_PORT0, \
- .dst = MSM_BUS_SLAVE_EBI_CH0, \
- .ab = (ab_val), \
- .ib = (ib_val), \
- }
-
-static struct msm_bus_vectors mdp_bus_vectors[] = {
- MDP_BUS_VECTOR_ENTRY(0, 0),
- MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
-};
-static struct msm_bus_paths mdp_bus_usecases[] = { {
- .num_paths = 1,
- .vectors = &mdp_bus_vectors[0],
-}, {
- .num_paths = 1,
- .vectors = &mdp_bus_vectors[1],
-} };
-static struct msm_bus_scale_pdata mdp_bus_scale_table = {
- .usecase = mdp_bus_usecases,
- .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
- .name = "mdss_mdp",
-};
-
-static void bs_init(struct mdp5_encoder *mdp5_encoder)
-{
- mdp5_encoder->bsc = msm_bus_scale_register_client(
- &mdp_bus_scale_table);
- DBG("bus scale client: %08x", mdp5_encoder->bsc);
-}
-
-static void bs_fini(struct mdp5_encoder *mdp5_encoder)
-{
- if (mdp5_encoder->bsc) {
- msm_bus_scale_unregister_client(mdp5_encoder->bsc);
- mdp5_encoder->bsc = 0;
- }
-}
-
-static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx)
-{
- if (mdp5_encoder->bsc) {
- DBG("set bus scaling: %d", idx);
- /* HACK: scaling down, and then immediately back up
- * seems to leave things broken (underflow).. so
- * never disable:
- */
- idx = 1;
- msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx);
- }
-}
-#else
-static void bs_init(struct mdp5_encoder *mdp5_encoder) {}
-static void bs_fini(struct mdp5_encoder *mdp5_encoder) {}
-static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {}
-#endif
-
static void mdp5_encoder_destroy(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- bs_fini(mdp5_encoder);
drm_encoder_cleanup(encoder);
kfree(mdp5_encoder);
}
@@ -222,8 +159,6 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
*/
mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
- bs_set(mdp5_encoder, 0);
-
mdp5_encoder->enabled = false;
}
@@ -240,7 +175,6 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
if (WARN_ON(mdp5_encoder->enabled))
return;
- bs_set(mdp5_encoder, 1);
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
@@ -426,8 +360,6 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
- bs_init(mdp5_encoder);
-
return encoder;
fail:
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
new file mode 100644
index 000000000000..82a8673ab8da
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
+ */
+
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <linux/of_platform.h>
+
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+
+#include "dp_catalog.h"
+#include "dp_audio.h"
+#include "dp_panel.h"
+#include "dp_display.h"
+
+#define HEADER_BYTE_2_BIT 0
+#define PARITY_BYTE_2_BIT 8
+#define HEADER_BYTE_1_BIT 16
+#define PARITY_BYTE_1_BIT 24
+#define HEADER_BYTE_3_BIT 16
+#define PARITY_BYTE_3_BIT 24
+
+struct dp_audio_private {
+ struct platform_device *audio_pdev;
+ struct platform_device *pdev;
+ struct dp_catalog *catalog;
+ struct dp_panel *panel;
+
+ bool engine_on;
+ u32 channels;
+
+ struct dp_audio dp_audio;
+};
+
+static u8 dp_audio_get_g0_value(u8 data)
+{
+ u8 c[4];
+ u8 g[4];
+ u8 ret_data = 0;
+ u8 i;
+
+ for (i = 0; i < 4; i++)
+ c[i] = (data >> i) & 0x01;
+
+ g[0] = c[3];
+ g[1] = c[0] ^ c[3];
+ g[2] = c[1];
+ g[3] = c[2];
+
+ for (i = 0; i < 4; i++)
+ ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+ return ret_data;
+}
+
+static u8 dp_audio_get_g1_value(u8 data)
+{
+ u8 c[4];
+ u8 g[4];
+ u8 ret_data = 0;
+ u8 i;
+
+ for (i = 0; i < 4; i++)
+ c[i] = (data >> i) & 0x01;
+
+ g[0] = c[0] ^ c[3];
+ g[1] = c[0] ^ c[1] ^ c[3];
+ g[2] = c[1] ^ c[2];
+ g[3] = c[2] ^ c[3];
+
+ for (i = 0; i < 4; i++)
+ ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+ return ret_data;
+}
+
+static u8 dp_audio_calculate_parity(u32 data)
+{
+ u8 x0 = 0;
+ u8 x1 = 0;
+ u8 ci = 0;
+ u8 iData = 0;
+ u8 i = 0;
+ u8 parity_byte;
+ u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
+
+ for (i = 0; i < num_byte; i++) {
+ iData = (data >> i*4) & 0xF;
+
+ ci = iData ^ x1;
+ x1 = x0 ^ dp_audio_get_g1_value(ci);
+ x0 = dp_audio_get_g0_value(ci);
+ }
+
+ parity_byte = x1 | (x0 << 4);
+
+ return parity_byte;
+}
+
+static u32 dp_audio_get_header(struct dp_catalog *catalog,
+ enum dp_catalog_audio_sdp_type sdp,
+ enum dp_catalog_audio_header_type header)
+{
+ catalog->sdp_type = sdp;
+ catalog->sdp_header = header;
+ dp_catalog_audio_get_header(catalog);
+
+ return catalog->audio_data;
+}
+
+static void dp_audio_set_header(struct dp_catalog *catalog,
+ u32 data,
+ enum dp_catalog_audio_sdp_type sdp,
+ enum dp_catalog_audio_header_type header)
+{
+ catalog->sdp_type = sdp;
+ catalog->sdp_header = header;
+ catalog->audio_data = data;
+ dp_catalog_audio_set_header(catalog);
+}
+
+static void dp_audio_stream_sdp(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ /* Config header and parity byte 1 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
+
+ new_value = 0x02;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
+
+ /* Config header and parity byte 2 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
+ new_value = value;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
+
+ /* Config header and parity byte 3 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
+
+ new_value = audio->channels - 1;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_3_BIT)
+ | (parity_byte << PARITY_BYTE_3_BIT));
+ DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ /* Config header and parity byte 1 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
+
+ new_value = 0x1;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
+
+ /* Config header and parity byte 2 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
+
+ new_value = 0x17;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
+
+ /* Config header and parity byte 3 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
+
+ new_value = (0x0 | (0x11 << 2));
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_3_BIT)
+ | (parity_byte << PARITY_BYTE_3_BIT));
+ DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ /* Config header and parity byte 1 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
+
+ new_value = 0x84;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
+
+ /* Config header and parity byte 2 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
+
+ new_value = 0x1b;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
+
+ /* Config header and parity byte 3 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
+
+ new_value = (0x0 | (0x11 << 2));
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_3_BIT)
+ | (parity_byte << PARITY_BYTE_3_BIT));
+ DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ new_value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ /* Config header and parity byte 1 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
+
+ new_value = 0x05;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
+
+ /* Config header and parity byte 2 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
+
+ new_value = 0x0F;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
+
+ /* Config header and parity byte 3 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
+
+ new_value = 0x0;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_3_BIT)
+ | (parity_byte << PARITY_BYTE_3_BIT));
+ DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ /* Config header and parity byte 1 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
+
+ new_value = 0x06;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
+
+ /* Config header and parity byte 2 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
+
+ new_value = 0x0F;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
+}
+
+static void dp_audio_setup_sdp(struct dp_audio_private *audio)
+{
+ dp_catalog_audio_config_sdp(audio->catalog);
+
+ dp_audio_stream_sdp(audio);
+ dp_audio_timestamp_sdp(audio);
+ dp_audio_infoframe_sdp(audio);
+ dp_audio_copy_management_sdp(audio);
+ dp_audio_isrc_sdp(audio);
+}
+
+static void dp_audio_setup_acr(struct dp_audio_private *audio)
+{
+ u32 select = 0;
+ struct dp_catalog *catalog = audio->catalog;
+
+ switch (audio->dp_audio.bw_code) {
+ case DP_LINK_BW_1_62:
+ select = 0;
+ break;
+ case DP_LINK_BW_2_7:
+ select = 1;
+ break;
+ case DP_LINK_BW_5_4:
+ select = 2;
+ break;
+ case DP_LINK_BW_8_1:
+ select = 3;
+ break;
+ default:
+ DRM_DEBUG_DP("Unknown link rate\n");
+ select = 0;
+ break;
+ }
+
+ catalog->audio_data = select;
+ dp_catalog_audio_config_acr(catalog);
+}
+
+static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 safe_to_exit_level = 0;
+
+ switch (audio->dp_audio.lane_count) {
+ case 1:
+ safe_to_exit_level = 14;
+ break;
+ case 2:
+ safe_to_exit_level = 8;
+ break;
+ case 4:
+ safe_to_exit_level = 5;
+ break;
+ default:
+ DRM_DEBUG_DP("setting the default safe_to_exit_level = %u\n",
+ safe_to_exit_level);
+ safe_to_exit_level = 14;
+ break;
+ }
+
+ catalog->audio_data = safe_to_exit_level;
+ dp_catalog_audio_sfe_level(catalog);
+}
+
+static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
+{
+ struct dp_catalog *catalog = audio->catalog;
+
+ catalog->audio_data = enable;
+ dp_catalog_audio_enable(catalog);
+
+ audio->engine_on = enable;
+}
+
+static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
+{
+ struct dp_audio *dp_audio;
+ struct msm_dp *dp_display;
+
+ if (!pdev) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ dp_display = platform_get_drvdata(pdev);
+ if (!dp_display) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ dp_audio = dp_display->dp_audio;
+
+ if (!dp_audio) {
+ DRM_ERROR("invalid dp_audio data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return container_of(dp_audio, struct dp_audio_private, dp_audio);
+}
+
+static int dp_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+
+ struct platform_device *pdev;
+ struct msm_dp *dp_display;
+
+ pdev = to_platform_device(dev);
+ if (!pdev) {
+ pr_err("invalid input\n");
+ return -ENODEV;
+ }
+
+ dp_display = platform_get_drvdata(pdev);
+ if (!dp_display) {
+ pr_err("invalid input\n");
+ return -ENODEV;
+ }
+
+ return dp_display_set_plugged_cb(dp_display, fn, codec_dev);
+}
+
+static int dp_audio_get_eld(struct device *dev,
+ void *data, uint8_t *buf, size_t len)
+{
+ struct platform_device *pdev;
+ struct msm_dp *dp_display;
+
+ pdev = to_platform_device(dev);
+
+ if (!pdev) {
+ DRM_ERROR("invalid input\n");
+ return -ENODEV;
+ }
+
+ dp_display = platform_get_drvdata(pdev);
+ if (!dp_display) {
+ DRM_ERROR("invalid input\n");
+ return -ENODEV;
+ }
+
+ memcpy(buf, dp_display->connector->eld,
+ min(sizeof(dp_display->connector->eld), len));
+
+ return 0;
+}
+
+int dp_audio_hw_params(struct device *dev,
+ void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ int rc = 0;
+ struct dp_audio_private *audio;
+ struct platform_device *pdev;
+ struct msm_dp *dp_display;
+
+ pdev = to_platform_device(dev);
+ dp_display = platform_get_drvdata(pdev);
+
+ /*
+ * there could be cases where sound card can be opened even
+ * before OR even when DP is not connected . This can cause
+ * unclocked access as the audio subsystem relies on the DP
+ * driver to maintain the correct state of clocks. To protect
+ * such cases check for connection status and bail out if not
+ * connected.
+ */
+ if (!dp_display->power_on) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ audio = dp_audio_get_data(pdev);
+ if (IS_ERR(audio)) {
+ rc = PTR_ERR(audio);
+ goto end;
+ }
+
+ audio->channels = params->channels;
+
+ dp_audio_setup_sdp(audio);
+ dp_audio_setup_acr(audio);
+ dp_audio_safe_to_exit_level(audio);
+ dp_audio_enable(audio, true);
+ dp_display->audio_enabled = true;
+
+end:
+ return rc;
+}
+
+static void dp_audio_shutdown(struct device *dev, void *data)
+{
+ struct dp_audio_private *audio;
+ struct platform_device *pdev;
+ struct msm_dp *dp_display;
+
+ pdev = to_platform_device(dev);
+ dp_display = platform_get_drvdata(pdev);
+ audio = dp_audio_get_data(pdev);
+ if (IS_ERR(audio)) {
+ DRM_ERROR("failed to get audio data\n");
+ return;
+ }
+
+ /*
+ * if audio was not enabled there is no need
+ * to execute the shutdown and we can bail out early.
+ * This also makes sure that we dont cause an unclocked
+ * access when audio subsystem calls this without DP being
+ * connected. is_connected cannot be used here as its set
+ * to false earlier than this call
+ */
+ if (!dp_display->audio_enabled)
+ return;
+
+ dp_audio_enable(audio, false);
+ /* signal the dp display to safely shutdown clocks */
+ dp_display_signal_audio_complete(dp_display);
+}
+
+static const struct hdmi_codec_ops dp_audio_codec_ops = {
+ .hw_params = dp_audio_hw_params,
+ .audio_shutdown = dp_audio_shutdown,
+ .get_eld = dp_audio_get_eld,
+ .hook_plugged_cb = dp_audio_hook_plugged_cb,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+ .ops = &dp_audio_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+};
+
+int dp_register_audio_driver(struct device *dev,
+ struct dp_audio *dp_audio)
+{
+ struct dp_audio_private *audio_priv;
+
+ audio_priv = container_of(dp_audio,
+ struct dp_audio_private, dp_audio);
+
+ audio_priv->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
+}
+
+struct dp_audio *dp_audio_get(struct platform_device *pdev,
+ struct dp_panel *panel,
+ struct dp_catalog *catalog)
+{
+ int rc = 0;
+ struct dp_audio_private *audio;
+ struct dp_audio *dp_audio;
+
+ if (!pdev || !panel || !catalog) {
+ DRM_ERROR("invalid input\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL);
+ if (!audio) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ audio->pdev = pdev;
+ audio->panel = panel;
+ audio->catalog = catalog;
+
+ dp_audio = &audio->dp_audio;
+
+ dp_catalog_audio_init(catalog);
+
+ return dp_audio;
+error:
+ return ERR_PTR(rc);
+}
+
+void dp_audio_put(struct dp_audio *dp_audio)
+{
+ struct dp_audio_private *audio;
+
+ if (!dp_audio)
+ return;
+
+ audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
+
+ devm_kfree(&audio->pdev->dev, audio);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
new file mode 100644
index 000000000000..84e5f4a5d26b
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_AUDIO_H_
+#define _DP_AUDIO_H_
+
+#include <linux/platform_device.h>
+
+#include "dp_panel.h"
+#include "dp_catalog.h"
+#include <sound/hdmi-codec.h>
+
+/**
+ * struct dp_audio
+ * @lane_count: number of lanes configured in current session
+ * @bw_code: link rate's bandwidth code for current session
+ */
+struct dp_audio {
+ u32 lane_count;
+ u32 bw_code;
+};
+
+/**
+ * dp_audio_get()
+ *
+ * Creates and instance of dp audio.
+ *
+ * @pdev: caller's platform device instance.
+ * @panel: an instance of dp_panel module.
+ * @catalog: an instance of dp_catalog module.
+ *
+ * Returns the error code in case of failure, otherwize
+ * an instance of newly created dp_module.
+ */
+struct dp_audio *dp_audio_get(struct platform_device *pdev,
+ struct dp_panel *panel,
+ struct dp_catalog *catalog);
+
+/**
+ * dp_register_audio_driver()
+ *
+ * Registers DP device with hdmi_codec interface.
+ *
+ * @dev: DP device instance.
+ * @dp_audio: an instance of dp_audio module.
+ *
+ *
+ * Returns the error code in case of failure, otherwise
+ * zero on success.
+ */
+int dp_register_audio_driver(struct device *dev,
+ struct dp_audio *dp_audio);
+
+/**
+ * dp_audio_put()
+ *
+ * Cleans the dp_audio instance.
+ *
+ * @dp_audio: an instance of dp_audio.
+ */
+void dp_audio_put(struct dp_audio *dp_audio);
+
+int dp_audio_hw_params(struct device *dev,
+ void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params);
+
+#endif /* _DP_AUDIO_H_ */
+
+
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
new file mode 100644
index 000000000000..19b35ae3e927
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <drm/drm_print.h>
+
+#include "dp_reg.h"
+#include "dp_aux.h"
+
+#define DP_AUX_ENUM_STR(x) #x
+
+struct dp_aux_private {
+ struct device *dev;
+ struct dp_catalog *catalog;
+
+ struct mutex mutex;
+ struct completion comp;
+
+ u32 aux_error_num;
+ u32 retry_cnt;
+ bool cmd_busy;
+ bool native;
+ bool read;
+ bool no_send_addr;
+ bool no_send_stop;
+ u32 offset;
+ u32 segment;
+ u32 isr;
+
+ struct drm_dp_aux dp_aux;
+};
+
+static const char *dp_aux_get_error(u32 aux_error)
+{
+ switch (aux_error) {
+ case DP_AUX_ERR_NONE:
+ return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE);
+ case DP_AUX_ERR_ADDR:
+ return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR);
+ case DP_AUX_ERR_TOUT:
+ return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT);
+ case DP_AUX_ERR_NACK:
+ return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK);
+ case DP_AUX_ERR_DEFER:
+ return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER);
+ case DP_AUX_ERR_NACK_DEFER:
+ return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER);
+ default:
+ return "unknown";
+ }
+}
+
+static u32 dp_aux_write(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ u32 data[4], reg, len;
+ u8 *msgdata = msg->buffer;
+ int const AUX_CMD_FIFO_LEN = 128;
+ int i = 0;
+
+ if (aux->read)
+ len = 4;
+ else
+ len = msg->size + 4;
+
+ /*
+ * cmd fifo only has depth of 144 bytes
+ * limit buf length to 128 bytes here
+ */
+ if (len > AUX_CMD_FIFO_LEN) {
+ DRM_ERROR("buf size greater than allowed size of 128 bytes\n");
+ return 0;
+ }
+
+ /* Pack cmd and write to HW */
+ data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
+ if (aux->read)
+ data[0] |= BIT(4); /* R/W */
+
+ data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */
+ data[2] = msg->address & 0xff; /* addr[7:0] */
+ data[3] = (msg->size - 1) & 0xff; /* len[7:0] */
+
+ for (i = 0; i < len; i++) {
+ reg = (i < 4) ? data[i] : msgdata[i - 4];
+ /* index = 0, write */
+ reg = (((reg) << DP_AUX_DATA_OFFSET)
+ & DP_AUX_DATA_MASK) | DP_AUX_DATA_WRITE;
+ if (i == 0)
+ reg |= DP_AUX_DATA_INDEX_WRITE;
+ aux->catalog->aux_data = reg;
+ dp_catalog_aux_write_data(aux->catalog);
+ }
+
+ dp_catalog_aux_clear_trans(aux->catalog, false);
+ dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+
+ reg = 0; /* Transaction number == 1 */
+ if (!aux->native) { /* i2c */
+ reg |= DP_AUX_TRANS_CTRL_I2C;
+
+ if (aux->no_send_addr)
+ reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR;
+
+ if (aux->no_send_stop)
+ reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP;
+ }
+
+ reg |= DP_AUX_TRANS_CTRL_GO;
+ aux->catalog->aux_data = reg;
+ dp_catalog_aux_write_trans(aux->catalog);
+
+ return len;
+}
+
+static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ u32 ret, len, timeout;
+ int aux_timeout_ms = HZ/4;
+
+ reinit_completion(&aux->comp);
+
+ len = dp_aux_write(aux, msg);
+ if (len == 0) {
+ DRM_ERROR("DP AUX write failed\n");
+ return -EINVAL;
+ }
+
+ timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
+ if (!timeout) {
+ DRM_ERROR("aux %s timeout\n", (aux->read ? "read" : "write"));
+ return -ETIMEDOUT;
+ }
+
+ if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+ ret = len;
+ } else {
+ DRM_ERROR_RATELIMITED("aux err: %s\n",
+ dp_aux_get_error(aux->aux_error_num));
+
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ u32 data;
+ u8 *dp;
+ u32 i, actual_i;
+ u32 len = msg->size;
+
+ dp_catalog_aux_clear_trans(aux->catalog, true);
+
+ data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
+ data |= DP_AUX_DATA_READ; /* read */
+
+ aux->catalog->aux_data = data;
+ dp_catalog_aux_write_data(aux->catalog);
+
+ dp = msg->buffer;
+
+ /* discard first byte */
+ data = dp_catalog_aux_read_data(aux->catalog);
+
+ for (i = 0; i < len; i++) {
+ data = dp_catalog_aux_read_data(aux->catalog);
+ *dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
+
+ actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
+ if (i != actual_i)
+ DRM_ERROR("Index mismatch: expected %d, found %d\n",
+ i, actual_i);
+ }
+}
+
+static void dp_aux_native_handler(struct dp_aux_private *aux)
+{
+ u32 isr = aux->isr;
+
+ if (isr & DP_INTR_AUX_I2C_DONE)
+ aux->aux_error_num = DP_AUX_ERR_NONE;
+ else if (isr & DP_INTR_WRONG_ADDR)
+ aux->aux_error_num = DP_AUX_ERR_ADDR;
+ else if (isr & DP_INTR_TIMEOUT)
+ aux->aux_error_num = DP_AUX_ERR_TOUT;
+ if (isr & DP_INTR_NACK_DEFER)
+ aux->aux_error_num = DP_AUX_ERR_NACK;
+ if (isr & DP_INTR_AUX_ERROR) {
+ aux->aux_error_num = DP_AUX_ERR_PHY;
+ dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ }
+
+ complete(&aux->comp);
+}
+
+static void dp_aux_i2c_handler(struct dp_aux_private *aux)
+{
+ u32 isr = aux->isr;
+
+ if (isr & DP_INTR_AUX_I2C_DONE) {
+ if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER))
+ aux->aux_error_num = DP_AUX_ERR_NACK;
+ else
+ aux->aux_error_num = DP_AUX_ERR_NONE;
+ } else {
+ if (isr & DP_INTR_WRONG_ADDR)
+ aux->aux_error_num = DP_AUX_ERR_ADDR;
+ else if (isr & DP_INTR_TIMEOUT)
+ aux->aux_error_num = DP_AUX_ERR_TOUT;
+ if (isr & DP_INTR_NACK_DEFER)
+ aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
+ if (isr & DP_INTR_I2C_NACK)
+ aux->aux_error_num = DP_AUX_ERR_NACK;
+ if (isr & DP_INTR_I2C_DEFER)
+ aux->aux_error_num = DP_AUX_ERR_DEFER;
+ if (isr & DP_INTR_AUX_ERROR) {
+ aux->aux_error_num = DP_AUX_ERR_PHY;
+ dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ }
+ }
+
+ complete(&aux->comp);
+}
+
+static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *input_msg)
+{
+ u32 edid_address = 0x50;
+ u32 segment_address = 0x30;
+ bool i2c_read = input_msg->request &
+ (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+ u8 *data;
+
+ if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
+ (input_msg->address != segment_address)))
+ return;
+
+
+ data = input_msg->buffer;
+ if (input_msg->address == segment_address)
+ aux->segment = *data;
+ else
+ aux->offset = *data;
+}
+
+/**
+ * dp_aux_transfer_helper() - helper function for EDID read transactions
+ *
+ * @aux: DP AUX private structure
+ * @input_msg: input message from DRM upstream APIs
+ * @send_seg: send the segment to sink
+ *
+ * return: void
+ *
+ * This helper function is used to fix EDID reads for non-compliant
+ * sinks that do not handle the i2c middle-of-transaction flag correctly.
+ */
+static void dp_aux_transfer_helper(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *input_msg,
+ bool send_seg)
+{
+ struct drm_dp_aux_msg helper_msg;
+ u32 message_size = 0x10;
+ u32 segment_address = 0x30;
+ u32 const edid_block_length = 0x80;
+ bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
+ bool i2c_read = input_msg->request &
+ (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+
+ if (!i2c_mot || !i2c_read || (input_msg->size == 0))
+ return;
+
+ /*
+ * Sending the segment value and EDID offset will be performed
+ * from the DRM upstream EDID driver for each block. Avoid
+ * duplicate AUX transactions related to this while reading the
+ * first 16 bytes of each block.
+ */
+ if (!(aux->offset % edid_block_length) || !send_seg)
+ goto end;
+
+ aux->read = false;
+ aux->cmd_busy = true;
+ aux->no_send_addr = true;
+ aux->no_send_stop = true;
+
+ /*
+ * Send the segment address for every i2c read in which the
+ * middle-of-tranaction flag is set. This is required to support EDID
+ * reads of more than 2 blocks as the segment address is reset to 0
+ * since we are overriding the middle-of-transaction flag for read
+ * transactions.
+ */
+
+ if (aux->segment) {
+ memset(&helper_msg, 0, sizeof(helper_msg));
+ helper_msg.address = segment_address;
+ helper_msg.buffer = &aux->segment;
+ helper_msg.size = 1;
+ dp_aux_cmd_fifo_tx(aux, &helper_msg);
+ }
+
+ /*
+ * Send the offset address for every i2c read in which the
+ * middle-of-transaction flag is set. This will ensure that the sink
+ * will update its read pointer and return the correct portion of the
+ * EDID buffer in the subsequent i2c read trasntion triggered in the
+ * native AUX transfer function.
+ */
+ memset(&helper_msg, 0, sizeof(helper_msg));
+ helper_msg.address = input_msg->address;
+ helper_msg.buffer = &aux->offset;
+ helper_msg.size = 1;
+ dp_aux_cmd_fifo_tx(aux, &helper_msg);
+
+end:
+ aux->offset += message_size;
+ if (aux->offset == 0x80 || aux->offset == 0x100)
+ aux->segment = 0x0; /* reset segment at end of block */
+}
+
+/*
+ * This function does the real job to process an AUX transaction.
+ * It will call aux_reset() function to reset the AUX channel,
+ * if the waiting is timeout.
+ */
+static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
+ struct drm_dp_aux_msg *msg)
+{
+ ssize_t ret;
+ int const aux_cmd_native_max = 16;
+ int const aux_cmd_i2c_max = 128;
+ int const retry_count = 5;
+ struct dp_aux_private *aux = container_of(dp_aux,
+ struct dp_aux_private, dp_aux);
+
+ mutex_lock(&aux->mutex);
+
+ aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
+
+ /* Ignore address only message */
+ if ((msg->size == 0) || (msg->buffer == NULL)) {
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ ret = msg->size;
+ goto unlock_exit;
+ }
+
+ /* msg sanity check */
+ if ((aux->native && (msg->size > aux_cmd_native_max)) ||
+ (msg->size > aux_cmd_i2c_max)) {
+ DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n",
+ __func__, msg->size, msg->request);
+ ret = -EINVAL;
+ goto unlock_exit;
+ }
+
+ dp_aux_update_offset_and_segment(aux, msg);
+ dp_aux_transfer_helper(aux, msg, true);
+
+ aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+ aux->cmd_busy = true;
+
+ if (aux->read) {
+ aux->no_send_addr = true;
+ aux->no_send_stop = false;
+ } else {
+ aux->no_send_addr = true;
+ aux->no_send_stop = true;
+ }
+
+ ret = dp_aux_cmd_fifo_tx(aux, msg);
+
+ if (ret < 0) {
+ if (aux->native) {
+ aux->retry_cnt++;
+ if (!(aux->retry_cnt % retry_count))
+ dp_catalog_aux_update_cfg(aux->catalog);
+ dp_catalog_aux_reset(aux->catalog);
+ }
+ usleep_range(400, 500); /* at least 400us to next try */
+ goto unlock_exit;
+ }
+
+ if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+ if (aux->read)
+ dp_aux_cmd_fifo_rx(aux, msg);
+
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ } else {
+ /* Reply defer to retry */
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+ }
+
+ /* Return requested size for success or retry */
+ ret = msg->size;
+ aux->retry_cnt = 0;
+
+unlock_exit:
+ aux->cmd_busy = false;
+ mutex_unlock(&aux->mutex);
+ return ret;
+}
+
+void dp_aux_isr(struct drm_dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ aux->isr = dp_catalog_aux_get_irq(aux->catalog);
+
+ if (!aux->cmd_busy)
+ return;
+
+ if (aux->native)
+ dp_aux_native_handler(aux);
+ else
+ dp_aux_i2c_handler(aux);
+}
+
+void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ dp_catalog_aux_update_cfg(aux->catalog);
+ dp_catalog_aux_reset(aux->catalog);
+}
+
+void dp_aux_init(struct drm_dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ dp_catalog_aux_enable(aux->catalog, true);
+ aux->retry_cnt = 0;
+}
+
+void dp_aux_deinit(struct drm_dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ dp_catalog_aux_enable(aux->catalog, false);
+}
+
+int dp_aux_register(struct drm_dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+ int ret;
+
+ if (!dp_aux) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ aux->dp_aux.name = "dpu_dp_aux";
+ aux->dp_aux.dev = aux->dev;
+ aux->dp_aux.transfer = dp_aux_transfer;
+ ret = drm_dp_aux_register(&aux->dp_aux);
+ if (ret) {
+ DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void dp_aux_unregister(struct drm_dp_aux *dp_aux)
+{
+ drm_dp_aux_unregister(dp_aux);
+}
+
+struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog)
+{
+ struct dp_aux_private *aux;
+
+ if (!catalog) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
+ if (!aux)
+ return ERR_PTR(-ENOMEM);
+
+ init_completion(&aux->comp);
+ aux->cmd_busy = false;
+ mutex_init(&aux->mutex);
+
+ aux->dev = dev;
+ aux->catalog = catalog;
+ aux->retry_cnt = 0;
+
+ return &aux->dp_aux;
+}
+
+void dp_aux_put(struct drm_dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux)
+ return;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ mutex_destroy(&aux->mutex);
+
+ devm_kfree(aux->dev, aux);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
new file mode 100644
index 000000000000..f8b8ba919465
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_AUX_H_
+#define _DP_AUX_H_
+
+#include "dp_catalog.h"
+#include <drm/drm_dp_helper.h>
+
+#define DP_AUX_ERR_NONE 0
+#define DP_AUX_ERR_ADDR -1
+#define DP_AUX_ERR_TOUT -2
+#define DP_AUX_ERR_NACK -3
+#define DP_AUX_ERR_DEFER -4
+#define DP_AUX_ERR_NACK_DEFER -5
+#define DP_AUX_ERR_PHY -6
+
+int dp_aux_register(struct drm_dp_aux *dp_aux);
+void dp_aux_unregister(struct drm_dp_aux *dp_aux);
+void dp_aux_isr(struct drm_dp_aux *dp_aux);
+void dp_aux_init(struct drm_dp_aux *dp_aux);
+void dp_aux_deinit(struct drm_dp_aux *dp_aux);
+void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
+
+struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog);
+void dp_aux_put(struct drm_dp_aux *aux);
+
+#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
new file mode 100644
index 000000000000..b15b4ce4ba35
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <linux/rational.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+#include <linux/rational.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp_catalog.h"
+#include "dp_reg.h"
+
+#define POLLING_SLEEP_US 1000
+#define POLLING_TIMEOUT_US 10000
+
+#define SCRAMBLER_RESET_COUNT_VALUE 0xFC
+
+#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
+#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
+
+#define MSM_DP_CONTROLLER_AHB_OFFSET 0x0000
+#define MSM_DP_CONTROLLER_AHB_SIZE 0x0200
+#define MSM_DP_CONTROLLER_AUX_OFFSET 0x0200
+#define MSM_DP_CONTROLLER_AUX_SIZE 0x0200
+#define MSM_DP_CONTROLLER_LINK_OFFSET 0x0400
+#define MSM_DP_CONTROLLER_LINK_SIZE 0x0C00
+#define MSM_DP_CONTROLLER_P0_OFFSET 0x1000
+#define MSM_DP_CONTROLLER_P0_SIZE 0x0400
+
+#define DP_INTERRUPT_STATUS1 \
+ (DP_INTR_AUX_I2C_DONE| \
+ DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
+ DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
+ DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
+ DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
+
+#define DP_INTERRUPT_STATUS1_ACK \
+ (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS1_MASK \
+ (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+#define DP_INTERRUPT_STATUS2 \
+ (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
+ DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
+
+#define DP_INTERRUPT_STATUS2_ACK \
+ (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS2_MASK \
+ (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+struct dp_catalog_private {
+ struct device *dev;
+ struct dp_io *io;
+ u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
+ struct dp_catalog dp_catalog;
+ u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX];
+};
+
+static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset)
+{
+ offset += MSM_DP_CONTROLLER_AUX_OFFSET;
+ return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_aux(struct dp_catalog_private *catalog,
+ u32 offset, u32 data)
+{
+ offset += MSM_DP_CONTROLLER_AUX_OFFSET;
+ /*
+ * To make sure aux reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, catalog->io->dp_controller.base + offset);
+}
+
+static inline u32 dp_read_ahb(struct dp_catalog_private *catalog, u32 offset)
+{
+ offset += MSM_DP_CONTROLLER_AHB_OFFSET;
+ return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_ahb(struct dp_catalog_private *catalog,
+ u32 offset, u32 data)
+{
+ offset += MSM_DP_CONTROLLER_AHB_OFFSET;
+ /*
+ * To make sure phy reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_p0(struct dp_catalog_private *catalog,
+ u32 offset, u32 data)
+{
+ offset += MSM_DP_CONTROLLER_P0_OFFSET;
+ /*
+ * To make sure interface reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, catalog->io->dp_controller.base + offset);
+}
+
+static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
+ u32 offset)
+{
+ offset += MSM_DP_CONTROLLER_P0_OFFSET;
+ /*
+ * To make sure interface reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset)
+{
+ offset += MSM_DP_CONTROLLER_LINK_OFFSET;
+ return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_link(struct dp_catalog_private *catalog,
+ u32 offset, u32 data)
+{
+ offset += MSM_DP_CONTROLLER_LINK_OFFSET;
+ /*
+ * To make sure link reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, catalog->io->dp_controller.base + offset);
+}
+
+/* aux related catalog functions */
+u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ return dp_read_aux(catalog, REG_DP_AUX_DATA);
+}
+
+int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_aux(catalog, REG_DP_AUX_DATA, dp_catalog->aux_data);
+ return 0;
+}
+
+int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, dp_catalog->aux_data);
+ return 0;
+}
+
+int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read)
+{
+ u32 data;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ if (read) {
+ data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
+ data &= ~DP_AUX_TRANS_CTRL_GO;
+ dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
+ } else {
+ dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
+ }
+ return 0;
+}
+
+int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
+ dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+ dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+ dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+ return 0;
+}
+
+void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
+{
+ u32 aux_ctrl;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+
+ aux_ctrl |= DP_AUX_CTRL_RESET;
+ dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+
+ aux_ctrl &= ~DP_AUX_CTRL_RESET;
+ dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable)
+{
+ u32 aux_ctrl;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+
+ if (enable) {
+ dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
+ dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
+ aux_ctrl |= DP_AUX_CTRL_ENABLE;
+ } else {
+ aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
+ }
+
+ dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ struct dp_io *dp_io = catalog->io;
+ struct phy *phy = dp_io->phy;
+
+ phy_calibrate(phy);
+}
+
+static void dump_regs(void __iomem *base, int len)
+{
+ int i;
+ u32 x0, x4, x8, xc;
+ u32 addr_off = 0;
+
+ len = DIV_ROUND_UP(len, 16);
+ for (i = 0; i < len; i++) {
+ x0 = readl_relaxed(base + addr_off);
+ x4 = readl_relaxed(base + addr_off + 0x04);
+ x8 = readl_relaxed(base + addr_off + 0x08);
+ xc = readl_relaxed(base + addr_off + 0x0c);
+
+ pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc);
+ addr_off += 16;
+ }
+}
+
+void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
+{
+ u32 offset, len;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ pr_info("AHB regs\n");
+ offset = MSM_DP_CONTROLLER_AHB_OFFSET;
+ len = MSM_DP_CONTROLLER_AHB_SIZE;
+ dump_regs(catalog->io->dp_controller.base + offset, len);
+
+ pr_info("AUXCLK regs\n");
+ offset = MSM_DP_CONTROLLER_AUX_OFFSET;
+ len = MSM_DP_CONTROLLER_AUX_SIZE;
+ dump_regs(catalog->io->dp_controller.base + offset, len);
+
+ pr_info("LCLK regs\n");
+ offset = MSM_DP_CONTROLLER_LINK_OFFSET;
+ len = MSM_DP_CONTROLLER_LINK_SIZE;
+ dump_regs(catalog->io->dp_controller.base + offset, len);
+
+ pr_info("P0CLK regs\n");
+ offset = MSM_DP_CONTROLLER_P0_OFFSET;
+ len = MSM_DP_CONTROLLER_P0_SIZE;
+ dump_regs(catalog->io->dp_controller.base + offset, len);
+}
+
+int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 intr, intr_ack;
+
+ intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS);
+ intr &= ~DP_INTERRUPT_STATUS1_MASK;
+ intr_ack = (intr & DP_INTERRUPT_STATUS1)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
+ DP_INTERRUPT_STATUS1_MASK);
+
+ return intr;
+
+}
+
+/* controller related catalog functions */
+void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
+ u32 dp_tu, u32 valid_boundary,
+ u32 valid_boundary2)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
+ dp_write_link(catalog, REG_DP_TU, dp_tu);
+ dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
+}
+
+void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_link(catalog, REG_DP_STATE_CTRL, state);
+}
+
+void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ DRM_DEBUG_DP("DP_CONFIGURATION_CTRL=0x%x\n", cfg);
+
+ dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
+}
+
+void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
+ u32 ln_mapping;
+
+ ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
+ ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
+ ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
+ ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
+
+ dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
+ ln_mapping);
+}
+
+void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog,
+ bool enable)
+{
+ u32 mainlink_ctrl;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ if (enable) {
+ /*
+ * To make sure link reg writes happens before other operation,
+ * dp_write_link() function uses writel()
+ */
+ mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+
+ mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
+ DP_MAINLINK_CTRL_ENABLE);
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
+ DP_MAINLINK_FB_BOUNDARY_SEL);
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ } else {
+ mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ }
+}
+
+void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog,
+ u32 colorimetry_cfg,
+ u32 test_bits_depth)
+{
+ u32 misc_val;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+
+ /* clear bpp bits */
+ misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
+ misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT;
+ misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT;
+ /* Configure clock to synchronous mode */
+ misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
+
+ DRM_DEBUG_DP("misc settings = 0x%x\n", misc_val);
+ dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
+}
+
+void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
+ u32 rate, u32 stream_rate_khz,
+ bool fixed_nvid)
+{
+ u32 pixel_m, pixel_n;
+ u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
+ u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE;
+ u32 const link_rate_hbr2 = 540000;
+ u32 const link_rate_hbr3 = 810000;
+ unsigned long den, num;
+
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ if (rate == link_rate_hbr3)
+ pixel_div = 6;
+ else if (rate == 1620000 || rate == 270000)
+ pixel_div = 2;
+ else if (rate == link_rate_hbr2)
+ pixel_div = 4;
+ else
+ DRM_ERROR("Invalid pixel mux divider\n");
+
+ dispcc_input_rate = (rate * 10) / pixel_div;
+
+ rational_best_approximation(dispcc_input_rate, stream_rate_khz,
+ (unsigned long)(1 << 16) - 1,
+ (unsigned long)(1 << 16) - 1, &den, &num);
+
+ den = ~(den - num);
+ den = den & 0xFFFF;
+ pixel_m = num;
+ pixel_n = den;
+
+ mvid = (pixel_m & 0xFFFF) * 5;
+ nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+
+ if (nvid < nvid_fixed) {
+ u32 temp;
+
+ temp = (nvid_fixed / nvid) * nvid;
+ mvid = (nvid_fixed / nvid) * mvid;
+ nvid = temp;
+ }
+
+ if (link_rate_hbr2 == rate)
+ nvid *= 2;
+
+ if (link_rate_hbr3 == rate)
+ nvid *= 3;
+
+ DRM_DEBUG_DP("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+ dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
+ dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
+ dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
+}
+
+int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
+ u32 pattern)
+{
+ int bit, ret;
+ u32 data;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ bit = BIT(pattern - 1);
+ DRM_DEBUG_DP("hw: bit=%d train=%d\n", bit, pattern);
+ dp_catalog_ctrl_state_ctrl(dp_catalog, bit);
+
+ bit = BIT(pattern - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
+
+ /* Poll for mainlink ready status */
+ ret = readx_poll_timeout(readl, catalog->io->dp_controller.base +
+ MSM_DP_CONTROLLER_LINK_OFFSET +
+ REG_DP_MAINLINK_READY,
+ data, data & bit,
+ POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("set pattern for link_train=%d failed\n", pattern);
+ return ret;
+ }
+ return 0;
+}
+
+void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
+{
+ u32 sw_reset;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET);
+
+ sw_reset |= DP_SW_RESET;
+ dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+
+ sw_reset &= ~DP_SW_RESET;
+ dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+}
+
+bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
+{
+ u32 data;
+ int ret;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ /* Poll for mainlink ready status */
+ ret = readl_poll_timeout(catalog->io->dp_controller.base +
+ MSM_DP_CONTROLLER_LINK_OFFSET +
+ REG_DP_MAINLINK_READY,
+ data, data & DP_MAINLINK_READY_FOR_VIDEO,
+ POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("mainlink not ready\n");
+ return false;
+ }
+
+ return true;
+}
+
+void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog,
+ bool enable)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ if (enable) {
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS,
+ DP_INTERRUPT_STATUS1_MASK);
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+ DP_INTERRUPT_STATUS2_MASK);
+ } else {
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
+ }
+}
+
+void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+ u32 intr_mask, bool en)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
+
+ config = (en ? config | intr_mask : config & ~intr_mask);
+
+ dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
+ config & DP_DP_HPD_INT_MASK);
+}
+
+void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
+
+ /* enable HPD interrupts */
+ dp_catalog_hpd_config_intr(dp_catalog,
+ DP_DP_HPD_PLUG_INT_MASK | DP_DP_IRQ_HPD_INT_MASK
+ | DP_DP_HPD_UNPLUG_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
+
+ /* Configure REFTIMER and enable it */
+ reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
+ dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
+
+ /* Enable HPD */
+ dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
+}
+
+u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ int isr = 0;
+
+ isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+ dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
+ (isr & DP_DP_HPD_INT_MASK));
+
+ return isr;
+}
+
+int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 intr, intr_ack;
+
+ intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
+ intr &= ~DP_INTERRUPT_STATUS2_MASK;
+ intr_ack = (intr & DP_INTERRUPT_STATUS2)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+ intr_ack | DP_INTERRUPT_STATUS2_MASK);
+
+ return intr;
+}
+
+void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_ahb(catalog, REG_DP_PHY_CTRL,
+ DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+ dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
+}
+
+int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog,
+ u8 v_level, u8 p_level)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ struct dp_io *dp_io = catalog->io;
+ struct phy *phy = dp_io->phy;
+ struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+
+ /* TODO: Update for all lanes instead of just first one */
+ opts_dp->voltage[0] = v_level;
+ opts_dp->pre[0] = p_level;
+ opts_dp->set_voltages = 1;
+ phy_configure(phy, &dp_io->phy_opts);
+ opts_dp->set_voltages = 0;
+
+ return 0;
+}
+
+void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+ u32 pattern)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 value = 0x0;
+
+ /* Make sure to clear the current pattern before starting a new one */
+ dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
+
+ switch (pattern) {
+ case DP_PHY_TEST_PATTERN_D10_2:
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
+ break;
+ case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ value &= ~(1 << 16);
+ dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ value |= SCRAMBLER_RESET_COUNT_VALUE;
+ dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+ DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+ break;
+ case DP_PHY_TEST_PATTERN_PRBS7:
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_PRBS7);
+ break;
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
+ /* 00111110000011111000001111100000 */
+ dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
+ 0x3E0F83E0);
+ /* 00001111100000111110000011111000 */
+ dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
+ 0x0F83E0F8);
+ /* 1111100000111110 */
+ dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
+ 0x0000F83E);
+ break;
+ case DP_PHY_TEST_PATTERN_CP2520:
+ value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+
+ value = DP_HBR2_ERM_PATTERN;
+ dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ value |= SCRAMBLER_RESET_COUNT_VALUE;
+ dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+ DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+ value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ value |= DP_MAINLINK_CTRL_ENABLE;
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+ break;
+ case DP_PHY_TEST_PATTERN_SEL_MASK:
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
+ DP_MAINLINK_CTRL_ENABLE);
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
+ break;
+ default:
+ DRM_DEBUG_DP("No valid test pattern requested:0x%x\n", pattern);
+ break;
+ }
+}
+
+u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ return dp_read_link(catalog, REG_DP_MAINLINK_READY);
+}
+
+/* panel related catalog functions */
+int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_link(catalog, REG_DP_TOTAL_HOR_VER,
+ dp_catalog->total);
+ dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC,
+ dp_catalog->sync_start);
+ dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
+ dp_catalog->width_blanking);
+ dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
+ return 0;
+}
+
+void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+ struct drm_display_mode *drm_mode)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 v_sync_width;
+ u32 hsync_ctl;
+ u32 display_hctl;
+
+ /* TPG config parameters*/
+ hsync_period = drm_mode->htotal;
+ vsync_period = drm_mode->vtotal;
+
+ display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) *
+ hsync_period);
+ display_v_end = ((vsync_period - (drm_mode->vsync_start -
+ drm_mode->vdisplay))
+ * hsync_period) - 1;
+
+ display_v_start += drm_mode->htotal - drm_mode->hsync_start;
+ display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay);
+
+ hsync_start_x = drm_mode->htotal - drm_mode->hsync_start;
+ hsync_end_x = hsync_period - (drm_mode->hsync_start -
+ drm_mode->hdisplay) - 1;
+
+ v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start;
+
+ hsync_ctl = (hsync_period << 16) |
+ (drm_mode->hsync_end - drm_mode->hsync_start);
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+
+ dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
+ dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
+ dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
+ hsync_period);
+ dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
+ hsync_period);
+ dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
+ dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
+ dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
+ dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
+ dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
+
+ dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
+ DP_TPG_CHECKERED_RECT_PATTERN);
+ dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
+ DP_TPG_VIDEO_CONFIG_BPP_8BIT |
+ DP_TPG_VIDEO_CONFIG_RGB);
+ dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
+ DP_BIST_ENABLE_DPBIST_EN);
+ dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
+ DP_TIMING_ENGINE_EN_EN);
+ DRM_DEBUG_DP("%s: enabled tpg\n", __func__);
+}
+
+void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+ dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
+ dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
+}
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
+{
+ struct dp_catalog_private *catalog;
+
+ if (!io) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
+ if (!catalog)
+ return ERR_PTR(-ENOMEM);
+
+ catalog->dev = dev;
+ catalog->io = io;
+
+ return &catalog->dp_catalog;
+}
+
+void dp_catalog_audio_get_header(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+ enum dp_catalog_audio_sdp_type sdp;
+ enum dp_catalog_audio_header_type header;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ sdp_map = catalog->audio_map;
+ sdp = dp_catalog->sdp_type;
+ header = dp_catalog->sdp_header;
+
+ dp_catalog->audio_data = dp_read_link(catalog,
+ sdp_map[sdp][header]);
+}
+
+void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+ enum dp_catalog_audio_sdp_type sdp;
+ enum dp_catalog_audio_header_type header;
+ u32 data;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ sdp_map = catalog->audio_map;
+ sdp = dp_catalog->sdp_type;
+ header = dp_catalog->sdp_header;
+ data = dp_catalog->audio_data;
+
+ dp_write_link(catalog, sdp_map[sdp][header], data);
+}
+
+void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 acr_ctrl, select;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ select = dp_catalog->audio_data;
+ acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
+
+ DRM_DEBUG_DP("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl);
+
+ dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
+}
+
+void dp_catalog_audio_enable(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ bool enable;
+ u32 audio_ctrl;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ enable = !!dp_catalog->audio_data;
+ audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
+
+ if (enable)
+ audio_ctrl |= BIT(0);
+ else
+ audio_ctrl &= ~BIT(0);
+
+ DRM_DEBUG_DP("dp_audio_cfg = 0x%x\n", audio_ctrl);
+
+ dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
+ /* make sure audio engine is disabled */
+ wmb();
+}
+
+void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 sdp_cfg = 0;
+ u32 sdp_cfg2 = 0;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
+ /* AUDIO_TIMESTAMP_SDP_EN */
+ sdp_cfg |= BIT(1);
+ /* AUDIO_STREAM_SDP_EN */
+ sdp_cfg |= BIT(2);
+ /* AUDIO_COPY_MANAGEMENT_SDP_EN */
+ sdp_cfg |= BIT(5);
+ /* AUDIO_ISRC_SDP_EN */
+ sdp_cfg |= BIT(6);
+ /* AUDIO_INFOFRAME_SDP_EN */
+ sdp_cfg |= BIT(20);
+
+ DRM_DEBUG_DP("sdp_cfg = 0x%x\n", sdp_cfg);
+
+ dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
+
+ sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+ /* IFRM_REGSRC -> Do not use reg values */
+ sdp_cfg2 &= ~BIT(0);
+ /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
+ sdp_cfg2 &= ~BIT(1);
+
+ DRM_DEBUG_DP("sdp_cfg2 = 0x%x\n", sdp_cfg2);
+
+ dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
+}
+
+void dp_catalog_audio_init(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+
+ static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = {
+ {
+ MMSS_DP_AUDIO_STREAM_0,
+ MMSS_DP_AUDIO_STREAM_1,
+ MMSS_DP_AUDIO_STREAM_1,
+ },
+ {
+ MMSS_DP_AUDIO_TIMESTAMP_0,
+ MMSS_DP_AUDIO_TIMESTAMP_1,
+ MMSS_DP_AUDIO_TIMESTAMP_1,
+ },
+ {
+ MMSS_DP_AUDIO_INFOFRAME_0,
+ MMSS_DP_AUDIO_INFOFRAME_1,
+ MMSS_DP_AUDIO_INFOFRAME_1,
+ },
+ {
+ MMSS_DP_AUDIO_COPYMANAGEMENT_0,
+ MMSS_DP_AUDIO_COPYMANAGEMENT_1,
+ MMSS_DP_AUDIO_COPYMANAGEMENT_1,
+ },
+ {
+ MMSS_DP_AUDIO_ISRC_0,
+ MMSS_DP_AUDIO_ISRC_1,
+ MMSS_DP_AUDIO_ISRC_1,
+ },
+ };
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ catalog->audio_map = sdp_map;
+}
+
+void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 mainlink_levels, safe_to_exit_level;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ safe_to_exit_level = dp_catalog->audio_data;
+ mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
+ mainlink_levels &= 0xFE0;
+ mainlink_levels |= safe_to_exit_level;
+
+ DRM_DEBUG_DP("mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
+ mainlink_levels, safe_to_exit_level);
+
+ dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
new file mode 100644
index 000000000000..4b7666f1fe6f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_CATALOG_H_
+#define _DP_CATALOG_H_
+
+#include <drm/drm_modes.h>
+
+#include "dp_parser.h"
+
+/* interrupts */
+#define DP_INTR_HPD BIT(0)
+#define DP_INTR_AUX_I2C_DONE BIT(3)
+#define DP_INTR_WRONG_ADDR BIT(6)
+#define DP_INTR_TIMEOUT BIT(9)
+#define DP_INTR_NACK_DEFER BIT(12)
+#define DP_INTR_WRONG_DATA_CNT BIT(15)
+#define DP_INTR_I2C_NACK BIT(18)
+#define DP_INTR_I2C_DEFER BIT(21)
+#define DP_INTR_PLL_UNLOCKED BIT(24)
+#define DP_INTR_AUX_ERROR BIT(27)
+
+#define DP_INTR_READY_FOR_VIDEO BIT(0)
+#define DP_INTR_IDLE_PATTERN_SENT BIT(3)
+#define DP_INTR_FRAME_END BIT(6)
+#define DP_INTR_CRC_UPDATED BIT(9)
+
+#define DP_AUX_CFG_MAX_VALUE_CNT 3
+
+/* PHY AUX config registers */
+enum dp_phy_aux_config_type {
+ PHY_AUX_CFG0,
+ PHY_AUX_CFG1,
+ PHY_AUX_CFG2,
+ PHY_AUX_CFG3,
+ PHY_AUX_CFG4,
+ PHY_AUX_CFG5,
+ PHY_AUX_CFG6,
+ PHY_AUX_CFG7,
+ PHY_AUX_CFG8,
+ PHY_AUX_CFG9,
+ PHY_AUX_CFG_MAX,
+};
+
+enum dp_catalog_audio_sdp_type {
+ DP_AUDIO_SDP_STREAM,
+ DP_AUDIO_SDP_TIMESTAMP,
+ DP_AUDIO_SDP_INFOFRAME,
+ DP_AUDIO_SDP_COPYMANAGEMENT,
+ DP_AUDIO_SDP_ISRC,
+ DP_AUDIO_SDP_MAX,
+};
+
+enum dp_catalog_audio_header_type {
+ DP_AUDIO_SDP_HEADER_1,
+ DP_AUDIO_SDP_HEADER_2,
+ DP_AUDIO_SDP_HEADER_3,
+ DP_AUDIO_SDP_HEADER_MAX,
+};
+
+struct dp_catalog {
+ u32 aux_data;
+ u32 total;
+ u32 sync_start;
+ u32 width_blanking;
+ u32 dp_active;
+ enum dp_catalog_audio_sdp_type sdp_type;
+ enum dp_catalog_audio_header_type sdp_header;
+ u32 audio_data;
+};
+
+/* AUX APIs */
+u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read);
+int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
+void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
+void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
+
+/* DP Controller APIs */
+void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state);
+void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config);
+void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
+void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
+ u32 stream_rate_khz, bool fixed_nvid);
+int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, u32 pattern);
+void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
+bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+ u32 intr_mask, bool en);
+void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog);
+u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog);
+int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level,
+ u8 p_level);
+int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
+ u32 dp_tu, u32 valid_boundary,
+ u32 valid_boundary2);
+void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+ u32 pattern);
+u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog);
+
+/* DP Panel APIs */
+int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog);
+void dp_catalog_dump_regs(struct dp_catalog *dp_catalog);
+void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+ struct drm_display_mode *drm_mode);
+void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog);
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
+
+/* DP Audio APIs */
+void dp_catalog_audio_get_header(struct dp_catalog *catalog);
+void dp_catalog_audio_set_header(struct dp_catalog *catalog);
+void dp_catalog_audio_config_acr(struct dp_catalog *catalog);
+void dp_catalog_audio_enable(struct dp_catalog *catalog);
+void dp_catalog_audio_enable(struct dp_catalog *catalog);
+void dp_catalog_audio_config_sdp(struct dp_catalog *catalog);
+void dp_catalog_audio_init(struct dp_catalog *catalog);
+void dp_catalog_audio_sfe_level(struct dp_catalog *catalog);
+
+#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
new file mode 100644
index 000000000000..2e3e1917351f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -0,0 +1,1869 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp_reg.h"
+#include "dp_ctrl.h"
+#include "dp_link.h"
+
+#define DP_KHZ_TO_HZ 1000
+#define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES (30 * HZ / 1000) /* 30 ms */
+#define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2)
+
+#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0)
+#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3)
+
+#define MR_LINK_TRAINING1 0x8
+#define MR_LINK_SYMBOL_ERM 0x80
+#define MR_LINK_PRBS7 0x100
+#define MR_LINK_CUSTOM80 0x200
+#define MR_LINK_TRAINING4 0x40
+
+enum {
+ DP_TRAINING_NONE,
+ DP_TRAINING_1,
+ DP_TRAINING_2,
+};
+
+struct dp_tu_calc_input {
+ u64 lclk; /* 162, 270, 540 and 810 */
+ u64 pclk_khz; /* in KHz */
+ u64 hactive; /* active h-width */
+ u64 hporch; /* bp + fp + pulse */
+ int nlanes; /* no.of.lanes */
+ int bpp; /* bits */
+ int pixel_enc; /* 444, 420, 422 */
+ int dsc_en; /* dsc on/off */
+ int async_en; /* async mode */
+ int fec_en; /* fec */
+ int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */
+ int num_of_dsc_slices; /* number of slices per line */
+};
+
+struct dp_vc_tu_mapping_table {
+ u32 vic;
+ u8 lanes;
+ u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
+ u8 bpp;
+ u8 valid_boundary_link;
+ u16 delay_start_link;
+ bool boundary_moderation_en;
+ u8 valid_lower_boundary_link;
+ u8 upper_boundary_count;
+ u8 lower_boundary_count;
+ u8 tu_size_minus1;
+};
+
+struct dp_ctrl_private {
+ struct dp_ctrl dp_ctrl;
+ struct device *dev;
+ struct drm_dp_aux *aux;
+ struct dp_panel *panel;
+ struct dp_link *link;
+ struct dp_power *power;
+ struct dp_parser *parser;
+ struct dp_catalog *catalog;
+
+ struct completion idle_comp;
+ struct completion video_comp;
+};
+
+struct dp_cr_status {
+ u8 lane_0_1;
+ u8 lane_2_3;
+};
+
+#define DP_LANE0_1_CR_DONE 0x11
+
+static int dp_aux_link_configure(struct drm_dp_aux *aux,
+ struct dp_link_info *link)
+{
+ u8 values[2];
+ int err;
+
+ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ values[1] = link->num_lanes;
+
+ if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ reinit_completion(&ctrl->idle_comp);
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
+
+ if (!wait_for_completion_timeout(&ctrl->idle_comp,
+ IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
+ pr_warn("PUSH_IDLE pattern timedout\n");
+
+ pr_debug("mainlink off done\n");
+}
+
+static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+{
+ u32 config = 0, tbd;
+ u8 *dpcd = ctrl->panel->dpcd;
+
+ /* Default-> LSCLK DIV: 1/4 LCLK */
+ config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT);
+
+ /* Scrambler reset enable */
+ if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP)
+ config |= DP_CONFIGURATION_CTRL_ASSR;
+
+ tbd = dp_link_get_test_bits_depth(ctrl->link,
+ ctrl->panel->dp_mode.bpp);
+
+ if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
+ pr_debug("BIT_DEPTH not set. Configure default\n");
+ tbd = DP_TEST_BIT_DEPTH_8;
+ }
+
+ config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
+
+ /* Num of Lanes */
+ config |= ((ctrl->link->link_params.num_lanes - 1)
+ << DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT);
+
+ if (drm_dp_enhanced_frame_cap(dpcd))
+ config |= DP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
+
+ config |= DP_CONFIGURATION_CTRL_P_INTERLACED; /* progressive video */
+
+ /* sync clock & static Mvid */
+ config |= DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN;
+ config |= DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK;
+
+ dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
+}
+
+static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl)
+{
+ u32 cc, tb;
+
+ dp_catalog_ctrl_lane_mapping(ctrl->catalog);
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+
+ dp_ctrl_config_ctrl(ctrl);
+
+ tb = dp_link_get_test_bits_depth(ctrl->link,
+ ctrl->panel->dp_mode.bpp);
+ cc = dp_link_get_colorimetry_config(ctrl->link);
+ dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
+ dp_panel_timing_cfg(ctrl->panel);
+}
+
+/*
+ * The structure and few functions present below are IP/Hardware
+ * specific implementation. Most of the implementation will not
+ * have coding comments
+ */
+struct tu_algo_data {
+ s64 lclk_fp;
+ s64 pclk_fp;
+ s64 lwidth;
+ s64 lwidth_fp;
+ s64 hbp_relative_to_pclk;
+ s64 hbp_relative_to_pclk_fp;
+ int nlanes;
+ int bpp;
+ int pixelEnc;
+ int dsc_en;
+ int async_en;
+ int bpc;
+
+ uint delay_start_link_extra_pixclk;
+ int extra_buffer_margin;
+ s64 ratio_fp;
+ s64 original_ratio_fp;
+
+ s64 err_fp;
+ s64 n_err_fp;
+ s64 n_n_err_fp;
+ int tu_size;
+ int tu_size_desired;
+ int tu_size_minus1;
+
+ int valid_boundary_link;
+ s64 resulting_valid_fp;
+ s64 total_valid_fp;
+ s64 effective_valid_fp;
+ s64 effective_valid_recorded_fp;
+ int n_tus;
+ int n_tus_per_lane;
+ int paired_tus;
+ int remainder_tus;
+ int remainder_tus_upper;
+ int remainder_tus_lower;
+ int extra_bytes;
+ int filler_size;
+ int delay_start_link;
+
+ int extra_pclk_cycles;
+ int extra_pclk_cycles_in_link_clk;
+ s64 ratio_by_tu_fp;
+ s64 average_valid2_fp;
+ int new_valid_boundary_link;
+ int remainder_symbols_exist;
+ int n_symbols;
+ s64 n_remainder_symbols_per_lane_fp;
+ s64 last_partial_tu_fp;
+ s64 TU_ratio_err_fp;
+
+ int n_tus_incl_last_incomplete_tu;
+ int extra_pclk_cycles_tmp;
+ int extra_pclk_cycles_in_link_clk_tmp;
+ int extra_required_bytes_new_tmp;
+ int filler_size_tmp;
+ int lower_filler_size_tmp;
+ int delay_start_link_tmp;
+
+ bool boundary_moderation_en;
+ int boundary_mod_lower_err;
+ int upper_boundary_count;
+ int lower_boundary_count;
+ int i_upper_boundary_count;
+ int i_lower_boundary_count;
+ int valid_lower_boundary_link;
+ int even_distribution_BF;
+ int even_distribution_legacy;
+ int even_distribution;
+ int min_hblank_violated;
+ s64 delay_start_time_fp;
+ s64 hbp_time_fp;
+ s64 hactive_time_fp;
+ s64 diff_abs_fp;
+
+ s64 ratio;
+};
+
+static int _tu_param_compare(s64 a, s64 b)
+{
+ u32 a_sign;
+ u32 b_sign;
+ s64 a_temp, b_temp, minus_1;
+
+ if (a == b)
+ return 0;
+
+ minus_1 = drm_fixp_from_fraction(-1, 1);
+
+ a_sign = (a >> 32) & 0x80000000 ? 1 : 0;
+
+ b_sign = (b >> 32) & 0x80000000 ? 1 : 0;
+
+ if (a_sign > b_sign)
+ return 2;
+ else if (b_sign > a_sign)
+ return 1;
+
+ if (!a_sign && !b_sign) { /* positive */
+ if (a > b)
+ return 1;
+ else
+ return 2;
+ } else { /* negative */
+ a_temp = drm_fixp_mul(a, minus_1);
+ b_temp = drm_fixp_mul(b, minus_1);
+
+ if (a_temp > b_temp)
+ return 2;
+ else
+ return 1;
+ }
+}
+
+static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in,
+ struct tu_algo_data *tu)
+{
+ int nlanes = in->nlanes;
+ int dsc_num_slices = in->num_of_dsc_slices;
+ int dsc_num_bytes = 0;
+ int numerator;
+ s64 pclk_dsc_fp;
+ s64 dwidth_dsc_fp;
+ s64 hbp_dsc_fp;
+
+ int tot_num_eoc_symbols = 0;
+ int tot_num_hor_bytes = 0;
+ int tot_num_dummy_bytes = 0;
+ int dwidth_dsc_bytes = 0;
+ int eoc_bytes = 0;
+
+ s64 temp1_fp, temp2_fp, temp3_fp;
+
+ tu->lclk_fp = drm_fixp_from_fraction(in->lclk, 1);
+ tu->pclk_fp = drm_fixp_from_fraction(in->pclk_khz, 1000);
+ tu->lwidth = in->hactive;
+ tu->hbp_relative_to_pclk = in->hporch;
+ tu->nlanes = in->nlanes;
+ tu->bpp = in->bpp;
+ tu->pixelEnc = in->pixel_enc;
+ tu->dsc_en = in->dsc_en;
+ tu->async_en = in->async_en;
+ tu->lwidth_fp = drm_fixp_from_fraction(in->hactive, 1);
+ tu->hbp_relative_to_pclk_fp = drm_fixp_from_fraction(in->hporch, 1);
+
+ if (tu->pixelEnc == 420) {
+ temp1_fp = drm_fixp_from_fraction(2, 1);
+ tu->pclk_fp = drm_fixp_div(tu->pclk_fp, temp1_fp);
+ tu->lwidth_fp = drm_fixp_div(tu->lwidth_fp, temp1_fp);
+ tu->hbp_relative_to_pclk_fp =
+ drm_fixp_div(tu->hbp_relative_to_pclk_fp, 2);
+ }
+
+ if (tu->pixelEnc == 422) {
+ switch (tu->bpp) {
+ case 24:
+ tu->bpp = 16;
+ tu->bpc = 8;
+ break;
+ case 30:
+ tu->bpp = 20;
+ tu->bpc = 10;
+ break;
+ default:
+ tu->bpp = 16;
+ tu->bpc = 8;
+ break;
+ }
+ } else {
+ tu->bpc = tu->bpp/3;
+ }
+
+ if (!in->dsc_en)
+ goto fec_check;
+
+ temp1_fp = drm_fixp_from_fraction(in->compress_ratio, 100);
+ temp2_fp = drm_fixp_from_fraction(in->bpp, 1);
+ temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
+ temp2_fp = drm_fixp_mul(tu->lwidth_fp, temp3_fp);
+
+ temp1_fp = drm_fixp_from_fraction(8, 1);
+ temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
+
+ numerator = drm_fixp2int(temp3_fp);
+
+ dsc_num_bytes = numerator / dsc_num_slices;
+ eoc_bytes = dsc_num_bytes % nlanes;
+ tot_num_eoc_symbols = nlanes * dsc_num_slices;
+ tot_num_hor_bytes = dsc_num_bytes * dsc_num_slices;
+ tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices;
+
+ if (dsc_num_bytes == 0)
+ pr_info("incorrect no of bytes per slice=%d\n", dsc_num_bytes);
+
+ dwidth_dsc_bytes = (tot_num_hor_bytes +
+ tot_num_eoc_symbols +
+ (eoc_bytes == 0 ? 0 : tot_num_dummy_bytes));
+
+ dwidth_dsc_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, 3);
+
+ temp2_fp = drm_fixp_mul(tu->pclk_fp, dwidth_dsc_fp);
+ temp1_fp = drm_fixp_div(temp2_fp, tu->lwidth_fp);
+ pclk_dsc_fp = temp1_fp;
+
+ temp1_fp = drm_fixp_div(pclk_dsc_fp, tu->pclk_fp);
+ temp2_fp = drm_fixp_mul(tu->hbp_relative_to_pclk_fp, temp1_fp);
+ hbp_dsc_fp = temp2_fp;
+
+ /* output */
+ tu->pclk_fp = pclk_dsc_fp;
+ tu->lwidth_fp = dwidth_dsc_fp;
+ tu->hbp_relative_to_pclk_fp = hbp_dsc_fp;
+
+fec_check:
+ if (in->fec_en) {
+ temp1_fp = drm_fixp_from_fraction(976, 1000); /* 0.976 */
+ tu->lclk_fp = drm_fixp_mul(tu->lclk_fp, temp1_fp);
+ }
+}
+
+static void _tu_valid_boundary_calc(struct tu_algo_data *tu)
+{
+ s64 temp1_fp, temp2_fp, temp, temp1, temp2;
+ int compare_result_1, compare_result_2, compare_result_3;
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+
+ tu->new_valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
+
+ temp = (tu->i_upper_boundary_count *
+ tu->new_valid_boundary_link +
+ tu->i_lower_boundary_count *
+ (tu->new_valid_boundary_link-1));
+ tu->average_valid2_fp = drm_fixp_from_fraction(temp,
+ (tu->i_upper_boundary_count +
+ tu->i_lower_boundary_count));
+
+ temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+ temp2_fp = tu->lwidth_fp;
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+ temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
+ tu->n_tus = drm_fixp2int(temp2_fp);
+ if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
+ tu->n_tus += 1;
+
+ temp1_fp = drm_fixp_from_fraction(tu->n_tus, 1);
+ temp2_fp = drm_fixp_mul(temp1_fp, tu->average_valid2_fp);
+ temp1_fp = drm_fixp_from_fraction(tu->n_symbols, 1);
+ temp2_fp = temp1_fp - temp2_fp;
+ temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
+ temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+ tu->n_remainder_symbols_per_lane_fp = temp2_fp;
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ tu->last_partial_tu_fp =
+ drm_fixp_div(tu->n_remainder_symbols_per_lane_fp,
+ temp1_fp);
+
+ if (tu->n_remainder_symbols_per_lane_fp != 0)
+ tu->remainder_symbols_exist = 1;
+ else
+ tu->remainder_symbols_exist = 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu->n_tus, tu->nlanes);
+ tu->n_tus_per_lane = drm_fixp2int(temp1_fp);
+
+ tu->paired_tus = (int)((tu->n_tus_per_lane) /
+ (tu->i_upper_boundary_count +
+ tu->i_lower_boundary_count));
+
+ tu->remainder_tus = tu->n_tus_per_lane - tu->paired_tus *
+ (tu->i_upper_boundary_count +
+ tu->i_lower_boundary_count);
+
+ if ((tu->remainder_tus - tu->i_upper_boundary_count) > 0) {
+ tu->remainder_tus_upper = tu->i_upper_boundary_count;
+ tu->remainder_tus_lower = tu->remainder_tus -
+ tu->i_upper_boundary_count;
+ } else {
+ tu->remainder_tus_upper = tu->remainder_tus;
+ tu->remainder_tus_lower = 0;
+ }
+
+ temp = tu->paired_tus * (tu->i_upper_boundary_count *
+ tu->new_valid_boundary_link +
+ tu->i_lower_boundary_count *
+ (tu->new_valid_boundary_link - 1)) +
+ (tu->remainder_tus_upper *
+ tu->new_valid_boundary_link) +
+ (tu->remainder_tus_lower *
+ (tu->new_valid_boundary_link - 1));
+ tu->total_valid_fp = drm_fixp_from_fraction(temp, 1);
+
+ if (tu->remainder_symbols_exist) {
+ temp1_fp = tu->total_valid_fp +
+ tu->n_remainder_symbols_per_lane_fp;
+ temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
+ temp2_fp = temp2_fp + tu->last_partial_tu_fp;
+ temp1_fp = drm_fixp_div(temp1_fp, temp2_fp);
+ } else {
+ temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
+ temp1_fp = drm_fixp_div(tu->total_valid_fp, temp2_fp);
+ }
+ tu->effective_valid_fp = temp1_fp;
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+ tu->n_n_err_fp = tu->effective_valid_fp - temp2_fp;
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+ tu->n_err_fp = tu->average_valid2_fp - temp2_fp;
+
+ tu->even_distribution = tu->n_tus % tu->nlanes == 0 ? 1 : 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+ temp2_fp = tu->lwidth_fp;
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+ temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
+
+ if (temp2_fp)
+ tu->n_tus_incl_last_incomplete_tu = drm_fixp2int_ceil(temp2_fp);
+ else
+ tu->n_tus_incl_last_incomplete_tu = 0;
+
+ temp1 = 0;
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
+ temp1_fp = tu->average_valid2_fp - temp2_fp;
+ temp2_fp = drm_fixp_from_fraction(tu->n_tus_incl_last_incomplete_tu, 1);
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+ if (temp1_fp)
+ temp1 = drm_fixp2int_ceil(temp1_fp);
+
+ temp = tu->i_upper_boundary_count * tu->nlanes;
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
+ temp1_fp = drm_fixp_from_fraction(tu->new_valid_boundary_link, 1);
+ temp2_fp = temp1_fp - temp2_fp;
+ temp1_fp = drm_fixp_from_fraction(temp, 1);
+ temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+ if (temp2_fp)
+ temp2 = drm_fixp2int_ceil(temp2_fp);
+ else
+ temp2 = 0;
+ tu->extra_required_bytes_new_tmp = (int)(temp1 + temp2);
+
+ temp1_fp = drm_fixp_from_fraction(8, tu->bpp);
+ temp2_fp = drm_fixp_from_fraction(
+ tu->extra_required_bytes_new_tmp, 1);
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+ if (temp1_fp)
+ tu->extra_pclk_cycles_tmp = drm_fixp2int_ceil(temp1_fp);
+ else
+ tu->extra_pclk_cycles_tmp = 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles_tmp, 1);
+ temp2_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
+ temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+ if (temp1_fp)
+ tu->extra_pclk_cycles_in_link_clk_tmp =
+ drm_fixp2int_ceil(temp1_fp);
+ else
+ tu->extra_pclk_cycles_in_link_clk_tmp = 0;
+
+ tu->filler_size_tmp = tu->tu_size - tu->new_valid_boundary_link;
+
+ tu->lower_filler_size_tmp = tu->filler_size_tmp + 1;
+
+ tu->delay_start_link_tmp = tu->extra_pclk_cycles_in_link_clk_tmp +
+ tu->lower_filler_size_tmp +
+ tu->extra_buffer_margin;
+
+ temp1_fp = drm_fixp_from_fraction(tu->delay_start_link_tmp, 1);
+ tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
+
+ compare_result_1 = _tu_param_compare(tu->n_n_err_fp, tu->diff_abs_fp);
+ if (compare_result_1 == 2)
+ compare_result_1 = 1;
+ else
+ compare_result_1 = 0;
+
+ compare_result_2 = _tu_param_compare(tu->n_n_err_fp, tu->err_fp);
+ if (compare_result_2 == 2)
+ compare_result_2 = 1;
+ else
+ compare_result_2 = 0;
+
+ compare_result_3 = _tu_param_compare(tu->hbp_time_fp,
+ tu->delay_start_time_fp);
+ if (compare_result_3 == 2)
+ compare_result_3 = 0;
+ else
+ compare_result_3 = 1;
+
+ if (((tu->even_distribution == 1) ||
+ ((tu->even_distribution_BF == 0) &&
+ (tu->even_distribution_legacy == 0))) &&
+ tu->n_err_fp >= 0 && tu->n_n_err_fp >= 0 &&
+ compare_result_2 &&
+ (compare_result_1 || (tu->min_hblank_violated == 1)) &&
+ (tu->new_valid_boundary_link - 1) > 0 &&
+ compare_result_3 &&
+ (tu->delay_start_link_tmp <= 1023)) {
+ tu->upper_boundary_count = tu->i_upper_boundary_count;
+ tu->lower_boundary_count = tu->i_lower_boundary_count;
+ tu->err_fp = tu->n_n_err_fp;
+ tu->boundary_moderation_en = true;
+ tu->tu_size_desired = tu->tu_size;
+ tu->valid_boundary_link = tu->new_valid_boundary_link;
+ tu->effective_valid_recorded_fp = tu->effective_valid_fp;
+ tu->even_distribution_BF = 1;
+ tu->delay_start_link = tu->delay_start_link_tmp;
+ } else if (tu->boundary_mod_lower_err == 0) {
+ compare_result_1 = _tu_param_compare(tu->n_n_err_fp,
+ tu->diff_abs_fp);
+ if (compare_result_1 == 2)
+ tu->boundary_mod_lower_err = 1;
+ }
+}
+
+static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
+ struct dp_vc_tu_mapping_table *tu_table)
+{
+ struct tu_algo_data tu;
+ int compare_result_1, compare_result_2;
+ u64 temp = 0;
+ s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0;
+
+ s64 LCLK_FAST_SKEW_fp = drm_fixp_from_fraction(6, 10000); /* 0.0006 */
+ s64 const_p49_fp = drm_fixp_from_fraction(49, 100); /* 0.49 */
+ s64 const_p56_fp = drm_fixp_from_fraction(56, 100); /* 0.56 */
+ s64 RATIO_SCALE_fp = drm_fixp_from_fraction(1001, 1000);
+
+ u8 DP_BRUTE_FORCE = 1;
+ s64 BRUTE_FORCE_THRESHOLD_fp = drm_fixp_from_fraction(1, 10); /* 0.1 */
+ uint EXTRA_PIXCLK_CYCLE_DELAY = 4;
+ uint HBLANK_MARGIN = 4;
+
+ memset(&tu, 0, sizeof(tu));
+
+ dp_panel_update_tu_timings(in, &tu);
+
+ tu.err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */
+
+ temp1_fp = drm_fixp_from_fraction(4, 1);
+ temp2_fp = drm_fixp_mul(temp1_fp, tu.lclk_fp);
+ temp_fp = drm_fixp_div(temp2_fp, tu.pclk_fp);
+ tu.extra_buffer_margin = drm_fixp2int_ceil(temp_fp);
+
+ temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+ temp2_fp = drm_fixp_mul(tu.pclk_fp, temp1_fp);
+ temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
+ temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+ tu.ratio_fp = drm_fixp_div(temp2_fp, tu.lclk_fp);
+
+ tu.original_ratio_fp = tu.ratio_fp;
+ tu.boundary_moderation_en = false;
+ tu.upper_boundary_count = 0;
+ tu.lower_boundary_count = 0;
+ tu.i_upper_boundary_count = 0;
+ tu.i_lower_boundary_count = 0;
+ tu.valid_lower_boundary_link = 0;
+ tu.even_distribution_BF = 0;
+ tu.even_distribution_legacy = 0;
+ tu.even_distribution = 0;
+ tu.delay_start_time_fp = 0;
+
+ tu.err_fp = drm_fixp_from_fraction(1000, 1);
+ tu.n_err_fp = 0;
+ tu.n_n_err_fp = 0;
+
+ tu.ratio = drm_fixp2int(tu.ratio_fp);
+ temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
+ div64_u64_rem(tu.lwidth_fp, temp1_fp, &temp2_fp);
+ if (temp2_fp != 0 &&
+ !tu.ratio && tu.dsc_en == 0) {
+ tu.ratio_fp = drm_fixp_mul(tu.ratio_fp, RATIO_SCALE_fp);
+ tu.ratio = drm_fixp2int(tu.ratio_fp);
+ if (tu.ratio)
+ tu.ratio_fp = drm_fixp_from_fraction(1, 1);
+ }
+
+ if (tu.ratio > 1)
+ tu.ratio = 1;
+
+ if (tu.ratio == 1)
+ goto tu_size_calc;
+
+ compare_result_1 = _tu_param_compare(tu.ratio_fp, const_p49_fp);
+ if (!compare_result_1 || compare_result_1 == 1)
+ compare_result_1 = 1;
+ else
+ compare_result_1 = 0;
+
+ compare_result_2 = _tu_param_compare(tu.ratio_fp, const_p56_fp);
+ if (!compare_result_2 || compare_result_2 == 2)
+ compare_result_2 = 1;
+ else
+ compare_result_2 = 0;
+
+ if (tu.dsc_en && compare_result_1 && compare_result_2) {
+ HBLANK_MARGIN += 4;
+ DRM_DEBUG_DP("Info: increase HBLANK_MARGIN to %d\n",
+ HBLANK_MARGIN);
+ }
+
+tu_size_calc:
+ for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) {
+ temp1_fp = drm_fixp_from_fraction(tu.tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
+ temp = drm_fixp2int_ceil(temp2_fp);
+ temp1_fp = drm_fixp_from_fraction(temp, 1);
+ tu.n_err_fp = temp1_fp - temp2_fp;
+
+ if (tu.n_err_fp < tu.err_fp) {
+ tu.err_fp = tu.n_err_fp;
+ tu.tu_size_desired = tu.tu_size;
+ }
+ }
+
+ tu.tu_size_minus1 = tu.tu_size_desired - 1;
+
+ temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+ temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
+ tu.valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
+
+ temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+ temp2_fp = tu.lwidth_fp;
+ temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+ temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1);
+ temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+ tu.n_tus = drm_fixp2int(temp2_fp);
+ if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
+ tu.n_tus += 1;
+
+ tu.even_distribution_legacy = tu.n_tus % tu.nlanes == 0 ? 1 : 0;
+ DRM_DEBUG_DP("Info: n_sym = %d, num_of_tus = %d\n",
+ tu.valid_boundary_link, tu.n_tus);
+
+ temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+ temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
+ temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1);
+ temp2_fp = temp1_fp - temp2_fp;
+ temp1_fp = drm_fixp_from_fraction(tu.n_tus + 1, 1);
+ temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+ temp = drm_fixp2int(temp2_fp);
+ if (temp && temp2_fp)
+ tu.extra_bytes = drm_fixp2int_ceil(temp2_fp);
+ else
+ tu.extra_bytes = 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu.extra_bytes, 1);
+ temp2_fp = drm_fixp_from_fraction(8, tu.bpp);
+ temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+ if (temp && temp1_fp)
+ tu.extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp);
+ else
+ tu.extra_pclk_cycles = drm_fixp2int(temp1_fp);
+
+ temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp);
+ temp2_fp = drm_fixp_from_fraction(tu.extra_pclk_cycles, 1);
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+ if (temp1_fp)
+ tu.extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp);
+ else
+ tu.extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp);
+
+ tu.filler_size = tu.tu_size_desired - tu.valid_boundary_link;
+
+ temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+ tu.ratio_by_tu_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
+
+ tu.delay_start_link = tu.extra_pclk_cycles_in_link_clk +
+ tu.filler_size + tu.extra_buffer_margin;
+
+ tu.resulting_valid_fp =
+ drm_fixp_from_fraction(tu.valid_boundary_link, 1);
+
+ temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+ temp2_fp = drm_fixp_div(tu.resulting_valid_fp, temp1_fp);
+ tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp;
+
+ temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1);
+ temp1_fp = tu.hbp_relative_to_pclk_fp - temp1_fp;
+ tu.hbp_time_fp = drm_fixp_div(temp1_fp, tu.pclk_fp);
+
+ temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1);
+ tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp);
+
+ compare_result_1 = _tu_param_compare(tu.hbp_time_fp,
+ tu.delay_start_time_fp);
+ if (compare_result_1 == 2) /* if (hbp_time_fp < delay_start_time_fp) */
+ tu.min_hblank_violated = 1;
+
+ tu.hactive_time_fp = drm_fixp_div(tu.lwidth_fp, tu.pclk_fp);
+
+ compare_result_2 = _tu_param_compare(tu.hactive_time_fp,
+ tu.delay_start_time_fp);
+ if (compare_result_2 == 2)
+ tu.min_hblank_violated = 1;
+
+ tu.delay_start_time_fp = 0;
+
+ /* brute force */
+
+ tu.delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY;
+ tu.diff_abs_fp = tu.resulting_valid_fp - tu.ratio_by_tu_fp;
+
+ temp = drm_fixp2int(tu.diff_abs_fp);
+ if (!temp && tu.diff_abs_fp <= 0xffff)
+ tu.diff_abs_fp = 0;
+
+ /* if(diff_abs < 0) diff_abs *= -1 */
+ if (tu.diff_abs_fp < 0)
+ tu.diff_abs_fp = drm_fixp_mul(tu.diff_abs_fp, -1);
+
+ tu.boundary_mod_lower_err = 0;
+ if ((tu.diff_abs_fp != 0 &&
+ ((tu.diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) ||
+ (tu.even_distribution_legacy == 0) ||
+ (DP_BRUTE_FORCE == 1))) ||
+ (tu.min_hblank_violated == 1)) {
+ do {
+ tu.err_fp = drm_fixp_from_fraction(1000, 1);
+
+ temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp);
+ temp2_fp = drm_fixp_from_fraction(
+ tu.delay_start_link_extra_pixclk, 1);
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+ if (temp1_fp)
+ tu.extra_buffer_margin =
+ drm_fixp2int_ceil(temp1_fp);
+ else
+ tu.extra_buffer_margin = 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+ temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp);
+
+ if (temp1_fp)
+ tu.n_symbols = drm_fixp2int_ceil(temp1_fp);
+ else
+ tu.n_symbols = 0;
+
+ for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) {
+ for (tu.i_upper_boundary_count = 1;
+ tu.i_upper_boundary_count <= 15;
+ tu.i_upper_boundary_count++) {
+ for (tu.i_lower_boundary_count = 1;
+ tu.i_lower_boundary_count <= 15;
+ tu.i_lower_boundary_count++) {
+ _tu_valid_boundary_calc(&tu);
+ }
+ }
+ }
+ tu.delay_start_link_extra_pixclk--;
+ } while (tu.boundary_moderation_en != true &&
+ tu.boundary_mod_lower_err == 1 &&
+ tu.delay_start_link_extra_pixclk != 0);
+
+ if (tu.boundary_moderation_en == true) {
+ temp1_fp = drm_fixp_from_fraction(
+ (tu.upper_boundary_count *
+ tu.valid_boundary_link +
+ tu.lower_boundary_count *
+ (tu.valid_boundary_link - 1)), 1);
+ temp2_fp = drm_fixp_from_fraction(
+ (tu.upper_boundary_count +
+ tu.lower_boundary_count), 1);
+ tu.resulting_valid_fp =
+ drm_fixp_div(temp1_fp, temp2_fp);
+
+ temp1_fp = drm_fixp_from_fraction(
+ tu.tu_size_desired, 1);
+ tu.ratio_by_tu_fp =
+ drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
+
+ tu.valid_lower_boundary_link =
+ tu.valid_boundary_link - 1;
+
+ temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+ temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp);
+ temp2_fp = drm_fixp_div(temp1_fp,
+ tu.resulting_valid_fp);
+ tu.n_tus = drm_fixp2int(temp2_fp);
+
+ tu.tu_size_minus1 = tu.tu_size_desired - 1;
+ tu.even_distribution_BF = 1;
+
+ temp1_fp =
+ drm_fixp_from_fraction(tu.tu_size_desired, 1);
+ temp2_fp =
+ drm_fixp_div(tu.resulting_valid_fp, temp1_fp);
+ tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp;
+ }
+ }
+
+ temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu.lwidth_fp);
+
+ if (temp2_fp)
+ temp = drm_fixp2int_ceil(temp2_fp);
+ else
+ temp = 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
+ temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
+ temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+ temp2_fp = drm_fixp_div(temp1_fp, temp2_fp);
+ temp1_fp = drm_fixp_from_fraction(temp, 1);
+ temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+ temp = drm_fixp2int(temp2_fp);
+
+ if (tu.async_en)
+ tu.delay_start_link += (int)temp;
+
+ temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1);
+ tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp);
+
+ /* OUTPUTS */
+ tu_table->valid_boundary_link = tu.valid_boundary_link;
+ tu_table->delay_start_link = tu.delay_start_link;
+ tu_table->boundary_moderation_en = tu.boundary_moderation_en;
+ tu_table->valid_lower_boundary_link = tu.valid_lower_boundary_link;
+ tu_table->upper_boundary_count = tu.upper_boundary_count;
+ tu_table->lower_boundary_count = tu.lower_boundary_count;
+ tu_table->tu_size_minus1 = tu.tu_size_minus1;
+
+ DRM_DEBUG_DP("TU: valid_boundary_link: %d\n",
+ tu_table->valid_boundary_link);
+ DRM_DEBUG_DP("TU: delay_start_link: %d\n",
+ tu_table->delay_start_link);
+ DRM_DEBUG_DP("TU: boundary_moderation_en: %d\n",
+ tu_table->boundary_moderation_en);
+ DRM_DEBUG_DP("TU: valid_lower_boundary_link: %d\n",
+ tu_table->valid_lower_boundary_link);
+ DRM_DEBUG_DP("TU: upper_boundary_count: %d\n",
+ tu_table->upper_boundary_count);
+ DRM_DEBUG_DP("TU: lower_boundary_count: %d\n",
+ tu_table->lower_boundary_count);
+ DRM_DEBUG_DP("TU: tu_size_minus1: %d\n", tu_table->tu_size_minus1);
+}
+
+static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
+ struct dp_vc_tu_mapping_table *tu_table)
+{
+ struct dp_tu_calc_input in;
+ struct drm_display_mode *drm_mode;
+
+ drm_mode = &ctrl->panel->dp_mode.drm_mode;
+
+ in.lclk = ctrl->link->link_params.rate / 1000;
+ in.pclk_khz = drm_mode->clock;
+ in.hactive = drm_mode->hdisplay;
+ in.hporch = drm_mode->htotal - drm_mode->hdisplay;
+ in.nlanes = ctrl->link->link_params.num_lanes;
+ in.bpp = ctrl->panel->dp_mode.bpp;
+ in.pixel_enc = 444;
+ in.dsc_en = 0;
+ in.async_en = 0;
+ in.fec_en = 0;
+ in.num_of_dsc_slices = 0;
+ in.compress_ratio = 100;
+
+ _dp_ctrl_calc_tu(&in, tu_table);
+}
+
+static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
+{
+ u32 dp_tu = 0x0;
+ u32 valid_boundary = 0x0;
+ u32 valid_boundary2 = 0x0;
+ struct dp_vc_tu_mapping_table tu_calc_table;
+
+ dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
+
+ dp_tu |= tu_calc_table.tu_size_minus1;
+ valid_boundary |= tu_calc_table.valid_boundary_link;
+ valid_boundary |= (tu_calc_table.delay_start_link << 16);
+
+ valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1);
+ valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16);
+ valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20);
+
+ if (tu_calc_table.boundary_moderation_en)
+ valid_boundary2 |= BIT(0);
+
+ pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
+ dp_tu, valid_boundary, valid_boundary2);
+
+ dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
+ dp_tu, valid_boundary, valid_boundary2);
+}
+
+static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+
+ if (!wait_for_completion_timeout(&ctrl->video_comp,
+ WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES)) {
+ DRM_ERROR("wait4video timedout\n");
+ ret = -ETIMEDOUT;
+ }
+ return ret;
+}
+
+static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+{
+ struct dp_link *link = ctrl->link;
+ int ret = 0, lane, lane_cnt;
+ u8 buf[4];
+ u32 max_level_reached = 0;
+ u32 voltage_swing_level = link->phy_params.v_level;
+ u32 pre_emphasis_level = link->phy_params.p_level;
+
+ ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog,
+ voltage_swing_level, pre_emphasis_level);
+
+ if (ret)
+ return ret;
+
+ if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) {
+ DRM_DEBUG_DP("max. voltage swing level reached %d\n",
+ voltage_swing_level);
+ max_level_reached |= DP_TRAIN_MAX_SWING_REACHED;
+ }
+
+ if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) {
+ DRM_DEBUG_DP("max. pre-emphasis level reached %d\n",
+ pre_emphasis_level);
+ max_level_reached |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ }
+
+ pre_emphasis_level <<= DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ lane_cnt = ctrl->link->link_params.num_lanes;
+ for (lane = 0; lane < lane_cnt; lane++)
+ buf[lane] = voltage_swing_level | pre_emphasis_level
+ | max_level_reached;
+
+ DRM_DEBUG_DP("sink: p|v=0x%x\n", voltage_swing_level
+ | pre_emphasis_level);
+ ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET,
+ buf, lane_cnt);
+ if (ret == lane_cnt)
+ ret = 0;
+
+ return ret;
+}
+
+static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
+ u8 pattern)
+{
+ u8 buf;
+ int ret = 0;
+
+ DRM_DEBUG_DP("sink: pattern=%x\n", pattern);
+
+ buf = pattern;
+
+ if (pattern && pattern != DP_TRAINING_PATTERN_4)
+ buf |= DP_LINK_SCRAMBLING_DISABLE;
+
+ ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf);
+ return ret == 1;
+}
+
+static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
+ u8 *link_status)
+{
+ int len = 0;
+ u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS;
+ u32 link_status_read_max_retries = 100;
+
+ while (--link_status_read_max_retries) {
+ len = drm_dp_dpcd_read_link_status(ctrl->aux,
+ link_status);
+ if (len != DP_LINK_STATUS_SIZE) {
+ DRM_ERROR("DP link status read failed, err: %d\n", len);
+ return len;
+ }
+
+ if (!(link_status[offset] & DP_LINK_STATUS_UPDATED))
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
+ struct dp_cr_status *cr, int *training_step)
+{
+ int tries, old_v_level, ret = 0;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ int const maximum_retries = 4;
+
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+ *training_step = DP_TRAINING_1;
+
+ ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, DP_TRAINING_PATTERN_1);
+ if (ret)
+ return ret;
+ dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
+
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret)
+ return ret;
+
+ tries = 0;
+ old_v_level = ctrl->link->phy_params.v_level;
+ for (tries = 0; tries < maximum_retries; tries++) {
+ drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
+
+ ret = dp_ctrl_read_link_status(ctrl, link_status);
+ if (ret)
+ return ret;
+
+ cr->lane_0_1 = link_status[0];
+ cr->lane_2_3 = link_status[1];
+
+ if (drm_dp_clock_recovery_ok(link_status,
+ ctrl->link->link_params.num_lanes)) {
+ return 0;
+ }
+
+ if (ctrl->link->phy_params.v_level >=
+ DP_TRAIN_VOLTAGE_SWING_MAX) {
+ DRM_ERROR_RATELIMITED("max v_level reached\n");
+ return -EAGAIN;
+ }
+
+ if (old_v_level != ctrl->link->phy_params.v_level) {
+ tries = 0;
+ old_v_level = ctrl->link->phy_params.v_level;
+ }
+
+ DRM_DEBUG_DP("clock recovery not done, adjusting vx px\n");
+
+ dp_link_adjust_levels(ctrl->link, link_status);
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret)
+ return ret;
+ }
+
+ DRM_ERROR("max tries reached\n");
+ return -ETIMEDOUT;
+}
+
+static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+
+ switch (ctrl->link->link_params.rate) {
+ case 810000:
+ ctrl->link->link_params.rate = 540000;
+ break;
+ case 540000:
+ ctrl->link->link_params.rate = 270000;
+ break;
+ case 270000:
+ ctrl->link->link_params.rate = 162000;
+ break;
+ case 162000:
+ default:
+ ret = -EINVAL;
+ break;
+ };
+
+ if (!ret)
+ DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate);
+
+ return ret;
+}
+
+static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl)
+{
+
+ if (ctrl->link->link_params.num_lanes == 1)
+ return -1;
+
+ ctrl->link->link_params.num_lanes /= 2;
+ ctrl->link->link_params.rate = ctrl->panel->link_info.rate;
+
+ ctrl->link->phy_params.p_level = 0;
+ ctrl->link->phy_params.v_level = 0;
+
+ return 0;
+}
+
+static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
+{
+ dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
+ drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+}
+
+static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
+ struct dp_cr_status *cr, int *training_step)
+{
+ int tries = 0, ret = 0;
+ char pattern;
+ int const maximum_retries = 5;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+ *training_step = DP_TRAINING_2;
+
+ if (drm_dp_tps3_supported(ctrl->panel->dpcd))
+ pattern = DP_TRAINING_PATTERN_3;
+ else
+ pattern = DP_TRAINING_PATTERN_2;
+
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret)
+ return ret;
+
+ ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern);
+ if (ret)
+ return ret;
+
+ dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+
+ for (tries = 0; tries <= maximum_retries; tries++) {
+ drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+
+ ret = dp_ctrl_read_link_status(ctrl, link_status);
+ if (ret)
+ return ret;
+ cr->lane_0_1 = link_status[0];
+ cr->lane_2_3 = link_status[1];
+
+ if (drm_dp_channel_eq_ok(link_status,
+ ctrl->link->link_params.num_lanes)) {
+ return 0;
+ }
+
+ dp_link_adjust_levels(ctrl->link, link_status);
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret)
+ return ret;
+
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl);
+
+static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
+ struct dp_cr_status *cr, int *training_step)
+{
+ int ret = 0;
+ u8 encoding = DP_SET_ANSI_8B10B;
+ struct dp_link_info link_info = {0};
+
+ dp_ctrl_config_ctrl(ctrl);
+
+ link_info.num_lanes = ctrl->link->link_params.num_lanes;
+ link_info.rate = ctrl->link->link_params.rate;
+ link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
+
+ dp_aux_link_configure(ctrl->aux, &link_info);
+ drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+ &encoding, 1);
+
+ ret = dp_ctrl_link_train_1(ctrl, cr, training_step);
+ if (ret) {
+ DRM_ERROR("link training #1 failed. ret=%d\n", ret);
+ goto end;
+ }
+
+ /* print success info as this is a result of user initiated action */
+ DRM_DEBUG_DP("link training #1 successful\n");
+
+ ret = dp_ctrl_link_train_2(ctrl, cr, training_step);
+ if (ret) {
+ DRM_ERROR("link training #2 failed. ret=%d\n", ret);
+ goto end;
+ }
+
+ /* print success info as this is a result of user initiated action */
+ DRM_DEBUG_DP("link training #2 successful\n");
+
+end:
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+ return ret;
+}
+
+static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
+ struct dp_cr_status *cr, int *training_step)
+{
+ int ret = 0;
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+
+ if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
+ return ret;
+
+ /*
+ * As part of previous calls, DP controller state might have
+ * transitioned to PUSH_IDLE. In order to start transmitting
+ * a link training pattern, we have to first do soft reset.
+ */
+ dp_catalog_ctrl_reset(ctrl->catalog);
+
+ ret = dp_ctrl_link_train(ctrl, cr, training_step);
+
+ return ret;
+}
+
+static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
+ enum dp_pm_type module, char *name, unsigned long rate)
+{
+ u32 num = ctrl->parser->mp[module].num_clk;
+ struct dss_clk *cfg = ctrl->parser->mp[module].clk_config;
+
+ while (num && strcmp(cfg->clk_name, name)) {
+ num--;
+ cfg++;
+ }
+
+ DRM_DEBUG_DP("setting rate=%lu on clk=%s\n", rate, name);
+
+ if (num)
+ cfg->rate = rate;
+ else
+ DRM_ERROR("%s clock doesn't exit to set rate %lu\n",
+ name, rate);
+}
+
+static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+ struct dp_io *dp_io = &ctrl->parser->io;
+ struct phy *phy = dp_io->phy;
+ struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+
+ opts_dp->lanes = ctrl->link->link_params.num_lanes;
+ opts_dp->link_rate = ctrl->link->link_params.rate / 100;
+ dp_ctrl_set_clock_rate(ctrl, DP_CTRL_PM, "ctrl_link",
+ ctrl->link->link_params.rate * 1000);
+
+ phy_configure(phy, &dp_io->phy_opts);
+ phy_power_on(phy);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, true);
+ if (ret)
+ DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
+
+ DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n",
+ ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
+
+ return ret;
+}
+
+static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+
+ dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel",
+ ctrl->dp_ctrl.pixel_rate * 1000);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
+ if (ret)
+ DRM_ERROR("Unabled to start pixel clocks. ret=%d\n", ret);
+
+ DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n",
+ ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
+
+ return ret;
+}
+
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
+{
+ struct dp_ctrl_private *ctrl;
+ struct dp_io *dp_io;
+ struct phy *phy;
+
+ if (!dp_ctrl) {
+ DRM_ERROR("Invalid input data\n");
+ return -EINVAL;
+ }
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ ctrl->dp_ctrl.orientation = flip;
+
+ dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ phy_init(phy);
+ dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
+
+ return 0;
+}
+
+/**
+ * dp_ctrl_host_deinit() - Uninitialize DP controller
+ * @dp_ctrl: Display Port Driver data
+ *
+ * Perform required steps to uninitialize DP controller
+ * and its resources.
+ */
+void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+
+ if (!dp_ctrl) {
+ DRM_ERROR("Invalid input data\n");
+ return;
+ }
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ dp_catalog_ctrl_enable_irq(ctrl->catalog, false);
+
+ DRM_DEBUG_DP("Host deinitialized successfully\n");
+}
+
+static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
+{
+ u8 *dpcd = ctrl->panel->dpcd;
+ u32 edid_quirks = 0;
+
+ edid_quirks = drm_dp_get_edid_quirks(ctrl->panel->edid);
+ /*
+ * For better interop experience, used a fixed NVID=0x8000
+ * whenever connected to a VGA dongle downstream.
+ */
+ if (drm_dp_is_branch(dpcd))
+ return (drm_dp_has_quirk(&ctrl->panel->desc, edid_quirks,
+ DP_DPCD_QUIRK_CONSTANT_N));
+
+ return false;
+}
+
+static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+ struct dp_io *dp_io = &ctrl->parser->io;
+ struct phy *phy = dp_io->phy;
+ struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ opts_dp->lanes = ctrl->link->link_params.num_lanes;
+ phy_configure(phy, &dp_io->phy_opts);
+ /*
+ * Disable and re-enable the mainlink clock since the
+ * link clock might have been adjusted as part of the
+ * link maintenance.
+ */
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable clocks. ret=%d\n", ret);
+ return ret;
+ }
+ phy_power_off(phy);
+ /* hw recommended delay before re-enabling clocks */
+ msleep(20);
+
+ ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+ if (ret) {
+ DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+ struct dp_cr_status cr;
+ int training_step = DP_TRAINING_NONE;
+
+ dp_ctrl_push_idle(&ctrl->dp_ctrl);
+ dp_catalog_ctrl_reset(ctrl->catalog);
+
+ ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+
+ ret = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
+ if (ret)
+ goto end;
+
+ dp_ctrl_clear_training_pattern(ctrl);
+
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+
+ ret = dp_ctrl_wait4video_ready(ctrl);
+end:
+ return ret;
+}
+
+static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+
+ if (!ctrl->link->phy_params.phy_test_pattern_sel) {
+ DRM_DEBUG_DP("no test pattern selected by sink\n");
+ return ret;
+ }
+
+ /*
+ * The global reset will need DP link related clocks to be
+ * running. Add the global reset just before disabling the
+ * link clocks and core clocks.
+ */
+ ret = dp_ctrl_off(&ctrl->dp_ctrl);
+ if (ret) {
+ DRM_ERROR("failed to disable DP controller\n");
+ return ret;
+ }
+
+ ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
+ if (!ret)
+ ret = dp_ctrl_on_stream(&ctrl->dp_ctrl);
+ else
+ DRM_ERROR("failed to enable DP link controller\n");
+
+ return ret;
+}
+
+static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
+{
+ bool success = false;
+ u32 pattern_sent = 0x0;
+ u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel;
+
+ DRM_DEBUG_DP("request: 0x%x\n", pattern_requested);
+
+ if (dp_catalog_ctrl_update_vx_px(ctrl->catalog,
+ ctrl->link->phy_params.v_level,
+ ctrl->link->phy_params.p_level)) {
+ DRM_ERROR("Failed to set v/p levels\n");
+ return false;
+ }
+ dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
+ dp_ctrl_update_vx_px(ctrl);
+ dp_link_send_test_response(ctrl->link);
+
+ pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
+
+ switch (pattern_sent) {
+ case MR_LINK_TRAINING1:
+ success = (pattern_requested ==
+ DP_PHY_TEST_PATTERN_D10_2);
+ break;
+ case MR_LINK_SYMBOL_ERM:
+ success = ((pattern_requested ==
+ DP_PHY_TEST_PATTERN_ERROR_COUNT) ||
+ (pattern_requested ==
+ DP_PHY_TEST_PATTERN_CP2520));
+ break;
+ case MR_LINK_PRBS7:
+ success = (pattern_requested ==
+ DP_PHY_TEST_PATTERN_PRBS7);
+ break;
+ case MR_LINK_CUSTOM80:
+ success = (pattern_requested ==
+ DP_PHY_TEST_PATTERN_80BIT_CUSTOM);
+ break;
+ case MR_LINK_TRAINING4:
+ success = (pattern_requested ==
+ DP_PHY_TEST_PATTERN_SEL_MASK);
+ break;
+ default:
+ success = false;
+ }
+
+ DRM_DEBUG_DP("%s: test->0x%x\n", success ? "success" : "failed",
+ pattern_requested);
+ return success;
+}
+
+void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ u32 sink_request = 0x0;
+
+ if (!dp_ctrl) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ sink_request = ctrl->link->sink_request;
+
+ if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+ DRM_DEBUG_DP("PHY_TEST_PATTERN request\n");
+ if (dp_ctrl_process_phy_test_request(ctrl)) {
+ DRM_ERROR("process phy_test_req failed\n");
+ return;
+ }
+ }
+
+ if (sink_request & DP_LINK_STATUS_UPDATED) {
+ if (dp_ctrl_link_maintenance(ctrl)) {
+ DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
+ return;
+ }
+ }
+
+ if (sink_request & DP_TEST_LINK_TRAINING) {
+ dp_link_send_test_response(ctrl->link);
+ if (dp_ctrl_link_maintenance(ctrl)) {
+ DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
+ return;
+ }
+ }
+}
+
+int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+{
+ int rc = 0;
+ struct dp_ctrl_private *ctrl;
+ u32 rate = 0;
+ int link_train_max_retries = 5;
+ u32 const phy_cts_pixel_clk_khz = 148500;
+ struct dp_cr_status cr;
+ unsigned int training_step;
+
+ if (!dp_ctrl)
+ return -EINVAL;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ rate = ctrl->panel->link_info.rate;
+
+ dp_power_clk_enable(ctrl->power, DP_CORE_PM, true);
+
+ if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+ DRM_DEBUG_DP("using phy test link parameters\n");
+ if (!ctrl->panel->dp_mode.drm_mode.clock)
+ ctrl->dp_ctrl.pixel_rate = phy_cts_pixel_clk_khz;
+ } else {
+ ctrl->link->link_params.rate = rate;
+ ctrl->link->link_params.num_lanes =
+ ctrl->panel->link_info.num_lanes;
+ ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ }
+
+ DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
+ ctrl->link->link_params.rate,
+ ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+
+ rc = dp_ctrl_enable_mainlink_clocks(ctrl);
+ if (rc)
+ return rc;
+
+ ctrl->link->phy_params.p_level = 0;
+ ctrl->link->phy_params.v_level = 0;
+
+ while (--link_train_max_retries &&
+ !atomic_read(&ctrl->dp_ctrl.aborted)) {
+ rc = dp_ctrl_reinitialize_mainlink(ctrl);
+ if (rc) {
+ DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
+ rc);
+ break;
+ }
+
+ training_step = DP_TRAINING_NONE;
+ rc = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
+ if (rc == 0) {
+ /* training completed successfully */
+ break;
+ } else if (training_step == DP_TRAINING_1) {
+ /* link train_1 failed */
+ rc = dp_ctrl_link_rate_down_shift(ctrl);
+ if (rc < 0) { /* already in RBR = 1.6G */
+ if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) {
+ /*
+ * some lanes are ready,
+ * reduce lane number
+ */
+ rc = dp_ctrl_link_lane_down_shift(ctrl);
+ if (rc < 0) { /* lane == 1 already */
+ /* end with failure */
+ break;
+ }
+ } else {
+ /* end with failure */
+ break; /* lane == 1 already */
+ }
+ }
+ } else if (training_step == DP_TRAINING_2) {
+ /* link train_2 failed, lower lane rate */
+ rc = dp_ctrl_link_lane_down_shift(ctrl);
+ if (rc < 0) {
+ /* end with failure */
+ break; /* lane == 1 already */
+ }
+ }
+ }
+
+ if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
+ return rc;
+
+ /* stop txing train pattern */
+ dp_ctrl_clear_training_pattern(ctrl);
+
+ /*
+ * keep transmitting idle pattern until video ready
+ * to avoid main link from loss of sync
+ */
+ if (rc == 0) /* link train successfully */
+ dp_ctrl_push_idle(dp_ctrl);
+
+ return rc;
+}
+
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
+{
+ u32 rate = 0;
+ int ret = 0;
+ bool mainlink_ready = false;
+ struct dp_ctrl_private *ctrl;
+
+ if (!dp_ctrl)
+ return -EINVAL;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ rate = ctrl->panel->link_info.rate;
+
+ ctrl->link->link_params.rate = rate;
+ ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes;
+ ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+
+ DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
+ ctrl->link->link_params.rate,
+ ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+
+ if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */
+ ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+ if (ret) {
+ DRM_ERROR("Failed to start link clocks. ret=%d\n", ret);
+ goto end;
+ }
+ }
+
+ ret = dp_ctrl_enable_stream_clocks(ctrl);
+ if (ret) {
+ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ goto end;
+ }
+
+ if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+ dp_ctrl_send_phy_test_pattern(ctrl);
+ return 0;
+ }
+
+ /*
+ * Set up transfer unit values and set controller state to send
+ * video.
+ */
+ dp_ctrl_configure_source_params(ctrl);
+
+ dp_catalog_ctrl_config_msa(ctrl->catalog,
+ ctrl->link->link_params.rate,
+ ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl));
+
+ reinit_completion(&ctrl->video_comp);
+
+ dp_ctrl_setup_tr_unit(ctrl);
+
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+
+ ret = dp_ctrl_wait4video_ready(ctrl);
+ if (ret)
+ return ret;
+
+ mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
+ DRM_DEBUG_DP("mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
+
+end:
+ return ret;
+}
+
+int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ struct dp_io *dp_io;
+ struct phy *phy;
+ int ret = 0;
+
+ if (!dp_ctrl)
+ return -EINVAL;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+
+ dp_catalog_ctrl_reset(ctrl->catalog);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
+ if (ret)
+ DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
+ }
+
+ phy_power_off(phy);
+ phy_exit(phy);
+
+ DRM_DEBUG_DP("DP off done\n");
+ return ret;
+}
+
+void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ u32 isr;
+
+ if (!dp_ctrl)
+ return;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog);
+
+ if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
+ DRM_DEBUG_DP("dp_video_ready\n");
+ complete(&ctrl->video_comp);
+ }
+
+ if (isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) {
+ DRM_DEBUG_DP("idle_patterns_sent\n");
+ complete(&ctrl->idle_comp);
+ }
+}
+
+struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
+ struct dp_panel *panel, struct drm_dp_aux *aux,
+ struct dp_power *power, struct dp_catalog *catalog,
+ struct dp_parser *parser)
+{
+ struct dp_ctrl_private *ctrl;
+
+ if (!dev || !panel || !aux ||
+ !link || !catalog) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ DRM_ERROR("Mem allocation failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init_completion(&ctrl->idle_comp);
+ init_completion(&ctrl->video_comp);
+
+ /* in parameters */
+ ctrl->parser = parser;
+ ctrl->panel = panel;
+ ctrl->power = power;
+ ctrl->aux = aux;
+ ctrl->link = link;
+ ctrl->catalog = catalog;
+ ctrl->dev = dev;
+
+ return &ctrl->dp_ctrl;
+}
+
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl)
+{
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
new file mode 100644
index 000000000000..f60ba93c8678
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_CTRL_H_
+#define _DP_CTRL_H_
+
+#include "dp_aux.h"
+#include "dp_panel.h"
+#include "dp_link.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+
+struct dp_ctrl {
+ bool orientation;
+ atomic_t aborted;
+ u32 pixel_rate;
+};
+
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip);
+void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl);
+struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
+ struct dp_panel *panel, struct drm_dp_aux *aux,
+ struct dp_power *power, struct dp_catalog *catalog,
+ struct dp_parser *parser);
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl);
+
+#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
new file mode 100644
index 000000000000..84670bcdcfea
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_file.h>
+
+#include "dp_parser.h"
+#include "dp_catalog.h"
+#include "dp_aux.h"
+#include "dp_ctrl.h"
+#include "dp_debug.h"
+#include "dp_display.h"
+
+#define DEBUG_NAME "msm_dp"
+
+struct dp_debug_private {
+ struct dentry *root;
+
+ struct dp_usbpd *usbpd;
+ struct dp_link *link;
+ struct dp_panel *panel;
+ struct drm_connector **connector;
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ struct dp_debug dp_debug;
+};
+
+static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len)
+{
+ if (rc >= *max_size) {
+ DRM_ERROR("buffer overflow\n");
+ return -EINVAL;
+ }
+ *len += rc;
+ *max_size = SZ_4K - *len;
+
+ return 0;
+}
+
+static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff,
+ size_t count, loff_t *ppos)
+{
+ struct dp_debug_private *debug = file->private_data;
+ char *buf;
+ u32 len = 0, rc = 0;
+ u64 lclk = 0;
+ u32 max_size = SZ_4K;
+ u32 link_params_rate;
+ struct drm_display_mode *drm_mode;
+
+ if (!debug)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ drm_mode = &debug->panel->dp_mode.drm_mode;
+
+ rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\tdp_panel\n\t\tmax_pclk_khz = %d\n",
+ debug->panel->max_pclk_khz);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\tdrm_dp_link\n\t\trate = %u\n",
+ debug->panel->link_info.rate);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\tnum_lanes = %u\n",
+ debug->panel->link_info.num_lanes);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\tcapabilities = %lu\n",
+ debug->panel->link_info.capabilities);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\tdp_panel_info:\n\t\tactive = %dx%d\n",
+ drm_mode->hdisplay,
+ drm_mode->vdisplay);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\tback_porch = %dx%d\n",
+ drm_mode->htotal - drm_mode->hsync_end,
+ drm_mode->vtotal - drm_mode->vsync_end);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\tfront_porch = %dx%d\n",
+ drm_mode->hsync_start - drm_mode->hdisplay,
+ drm_mode->vsync_start - drm_mode->vdisplay);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\tsync_width = %dx%d\n",
+ drm_mode->hsync_end - drm_mode->hsync_start,
+ drm_mode->vsync_end - drm_mode->vsync_start);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\tactive_low = %dx%d\n",
+ debug->panel->dp_mode.h_active_low,
+ debug->panel->dp_mode.v_active_low);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\th_skew = %d\n",
+ drm_mode->hskew);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\trefresh rate = %d\n",
+ drm_mode_vrefresh(drm_mode));
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\tpixel clock khz = %d\n",
+ drm_mode->clock);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\tbpp = %d\n",
+ debug->panel->dp_mode.bpp);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ /* Link Information */
+ rc = snprintf(buf + len, max_size,
+ "\tdp_link:\n\t\ttest_requested = %d\n",
+ debug->link->sink_request);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\tnum_lanes = %d\n",
+ debug->link->link_params.num_lanes);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ link_params_rate = debug->link->link_params.rate;
+ rc = snprintf(buf + len, max_size,
+ "\t\tbw_code = %d\n",
+ drm_dp_link_rate_to_bw_code(link_params_rate));
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ lclk = debug->link->link_params.rate * 1000;
+ rc = snprintf(buf + len, max_size,
+ "\t\tlclk = %lld\n", lclk);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\tv_level = %d\n",
+ debug->link->phy_params.v_level);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ rc = snprintf(buf + len, max_size,
+ "\t\tp_level = %d\n",
+ debug->link->phy_params.p_level);
+ if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+ goto error;
+
+ if (copy_to_user(user_buff, buf, len))
+ goto error;
+
+ *ppos += len;
+
+ kfree(buf);
+ return len;
+ error:
+ kfree(buf);
+ return -EINVAL;
+}
+
+static int dp_test_data_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev;
+ struct dp_debug_private *debug;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ u32 bpc;
+
+ debug = m->private;
+ dev = debug->drm_dev;
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->status == connector_status_connected) {
+ bpc = debug->link->test_video.test_bit_depth;
+ seq_printf(m, "hdisplay: %d\n",
+ debug->link->test_video.test_h_width);
+ seq_printf(m, "vdisplay: %d\n",
+ debug->link->test_video.test_v_height);
+ seq_printf(m, "bpc: %u\n",
+ dp_link_bit_depth_to_bpc(bpc));
+ } else
+ seq_puts(m, "0");
+ }
+
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dp_test_data);
+
+static int dp_test_type_show(struct seq_file *m, void *data)
+{
+ struct dp_debug_private *debug = m->private;
+ struct drm_device *dev = debug->drm_dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->status == connector_status_connected)
+ seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN);
+ else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dp_test_type);
+
+static ssize_t dp_test_active_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ char *input_buffer;
+ int status = 0;
+ struct dp_debug_private *debug;
+ struct drm_device *dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ int val = 0;
+
+ debug = ((struct seq_file *)file->private_data)->private;
+ dev = debug->drm_dev;
+
+ if (len == 0)
+ return 0;
+
+ input_buffer = memdup_user_nul(ubuf, len);
+ if (IS_ERR(input_buffer))
+ return PTR_ERR(input_buffer);
+
+ DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->status == connector_status_connected) {
+ status = kstrtoint(input_buffer, 10, &val);
+ if (status < 0)
+ break;
+ DRM_DEBUG_DRIVER("Got %d for test active\n", val);
+ /* To prevent erroneous activation of the compliance
+ * testing code, only accept an actual value of 1 here
+ */
+ if (val == 1)
+ debug->panel->video_test = true;
+ else
+ debug->panel->video_test = false;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ kfree(input_buffer);
+ if (status < 0)
+ return status;
+
+ *offp += len;
+ return len;
+}
+
+static int dp_test_active_show(struct seq_file *m, void *data)
+{
+ struct dp_debug_private *debug = m->private;
+ struct drm_device *dev = debug->drm_dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->status == connector_status_connected) {
+ if (debug->panel->video_test)
+ seq_puts(m, "1");
+ else
+ seq_puts(m, "0");
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
+static int dp_test_active_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, dp_test_active_show,
+ inode->i_private);
+}
+
+static const struct file_operations dp_debug_fops = {
+ .open = simple_open,
+ .read = dp_debug_read_info,
+};
+
+static const struct file_operations test_active_fops = {
+ .owner = THIS_MODULE,
+ .open = dp_test_active_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = dp_test_active_write
+};
+
+static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
+{
+ int rc = 0;
+ struct dp_debug_private *debug = container_of(dp_debug,
+ struct dp_debug_private, dp_debug);
+ struct dentry *file;
+ struct dentry *test_active;
+ struct dentry *test_data, *test_type;
+
+ file = debugfs_create_file("dp_debug", 0444, minor->debugfs_root,
+ debug, &dp_debug_fops);
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
+ DRM_ERROR("[%s] debugfs create file failed, rc=%d\n",
+ DEBUG_NAME, rc);
+ }
+
+ test_active = debugfs_create_file("msm_dp_test_active", 0444,
+ minor->debugfs_root,
+ debug, &test_active_fops);
+ if (IS_ERR_OR_NULL(test_active)) {
+ rc = PTR_ERR(test_active);
+ DRM_ERROR("[%s] debugfs test_active failed, rc=%d\n",
+ DEBUG_NAME, rc);
+ }
+
+ test_data = debugfs_create_file("msm_dp_test_data", 0444,
+ minor->debugfs_root,
+ debug, &dp_test_data_fops);
+ if (IS_ERR_OR_NULL(test_data)) {
+ rc = PTR_ERR(test_data);
+ DRM_ERROR("[%s] debugfs test_data failed, rc=%d\n",
+ DEBUG_NAME, rc);
+ }
+
+ test_type = debugfs_create_file("msm_dp_test_type", 0444,
+ minor->debugfs_root,
+ debug, &dp_test_type_fops);
+ if (IS_ERR_OR_NULL(test_type)) {
+ rc = PTR_ERR(test_type);
+ DRM_ERROR("[%s] debugfs test_type failed, rc=%d\n",
+ DEBUG_NAME, rc);
+ }
+
+ debug->root = minor->debugfs_root;
+
+ return rc;
+}
+
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+ struct dp_usbpd *usbpd, struct dp_link *link,
+ struct drm_connector **connector, struct drm_minor *minor)
+{
+ int rc = 0;
+ struct dp_debug_private *debug;
+ struct dp_debug *dp_debug;
+
+ if (!dev || !panel || !usbpd || !link) {
+ DRM_ERROR("invalid input\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL);
+ if (!debug) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ debug->dp_debug.debug_en = false;
+ debug->usbpd = usbpd;
+ debug->link = link;
+ debug->panel = panel;
+ debug->dev = dev;
+ debug->drm_dev = minor->dev;
+ debug->connector = connector;
+
+ dp_debug = &debug->dp_debug;
+ dp_debug->vdisplay = 0;
+ dp_debug->hdisplay = 0;
+ dp_debug->vrefresh = 0;
+
+ rc = dp_debug_init(dp_debug, minor);
+ if (rc) {
+ devm_kfree(dev, debug);
+ goto error;
+ }
+
+ return dp_debug;
+ error:
+ return ERR_PTR(rc);
+}
+
+static int dp_debug_deinit(struct dp_debug *dp_debug)
+{
+ struct dp_debug_private *debug;
+
+ if (!dp_debug)
+ return -EINVAL;
+
+ debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
+
+ debugfs_remove_recursive(debug->root);
+
+ return 0;
+}
+
+void dp_debug_put(struct dp_debug *dp_debug)
+{
+ struct dp_debug_private *debug;
+
+ if (!dp_debug)
+ return;
+
+ debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
+
+ dp_debug_deinit(dp_debug);
+
+ devm_kfree(debug->dev, debug);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
new file mode 100644
index 000000000000..7eaedfbb149c
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DEBUG_H_
+#define _DP_DEBUG_H_
+
+#include "dp_panel.h"
+#include "dp_link.h"
+
+/**
+ * struct dp_debug
+ * @debug_en: specifies whether debug mode enabled
+ * @vdisplay: used to filter out vdisplay value
+ * @hdisplay: used to filter out hdisplay value
+ * @vrefresh: used to filter out vrefresh value
+ * @tpg_state: specifies whether tpg feature is enabled
+ */
+struct dp_debug {
+ bool debug_en;
+ int aspect_ratio;
+ int vdisplay;
+ int hdisplay;
+ int vrefresh;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dp_debug_get() - configure and get the DisplayPlot debug module data
+ *
+ * @dev: device instance of the caller
+ * @panel: instance of panel module
+ * @usbpd: instance of usbpd module
+ * @link: instance of link module
+ * @connector: double pointer to display connector
+ * @minor: pointer to drm minor number after device registration
+ * return: pointer to allocated debug module data
+ *
+ * This function sets up the debug module and provides a way
+ * for debugfs input to be communicated with existing modules
+ */
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+ struct dp_usbpd *usbpd, struct dp_link *link,
+ struct drm_connector **connector,
+ struct drm_minor *minor);
+
+/**
+ * dp_debug_put()
+ *
+ * Cleans up dp_debug instance
+ *
+ * @dp_debug: instance of dp_debug
+ */
+void dp_debug_put(struct dp_debug *dp_debug);
+
+#else
+
+static inline
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+ struct dp_usbpd *usbpd, struct dp_link *link,
+ struct drm_connector **connector, struct drm_minor *minor)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void dp_debug_put(struct dp_debug *dp_debug)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* _DP_DEBUG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
new file mode 100644
index 000000000000..e175aa3fd3a9
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -0,0 +1,1463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/component.h>
+#include <linux/of_irq.h>
+#include <linux/delay.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "dp_hpd.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+#include "dp_aux.h"
+#include "dp_reg.h"
+#include "dp_link.h"
+#include "dp_panel.h"
+#include "dp_ctrl.h"
+#include "dp_display.h"
+#include "dp_drm.h"
+#include "dp_audio.h"
+#include "dp_debug.h"
+
+static struct msm_dp *g_dp_display;
+#define HPD_STRING_SIZE 30
+
+enum {
+ ISR_DISCONNECTED,
+ ISR_CONNECT_PENDING,
+ ISR_CONNECTED,
+ ISR_HPD_REPLUG_COUNT,
+ ISR_IRQ_HPD_PULSE_COUNT,
+ ISR_HPD_LO_GLITH_COUNT,
+};
+
+/* event thread connection state */
+enum {
+ ST_DISCONNECTED,
+ ST_CONNECT_PENDING,
+ ST_CONNECTED,
+ ST_DISCONNECT_PENDING,
+ ST_SUSPEND_PENDING,
+ ST_SUSPENDED,
+};
+
+enum {
+ EV_NO_EVENT,
+ /* hpd events */
+ EV_HPD_INIT_SETUP,
+ EV_HPD_PLUG_INT,
+ EV_IRQ_HPD_INT,
+ EV_HPD_REPLUG_INT,
+ EV_HPD_UNPLUG_INT,
+ EV_USER_NOTIFICATION,
+ EV_CONNECT_PENDING_TIMEOUT,
+ EV_DISCONNECT_PENDING_TIMEOUT,
+};
+
+#define EVENT_TIMEOUT (HZ/10) /* 100ms */
+#define DP_EVENT_Q_MAX 8
+
+#define DP_TIMEOUT_5_SECOND (5000/EVENT_TIMEOUT)
+#define DP_TIMEOUT_NONE 0
+
+#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2)
+
+struct dp_event {
+ u32 event_id;
+ u32 data;
+ u32 delay;
+};
+
+struct dp_display_private {
+ char *name;
+ int irq;
+
+ /* state variables */
+ bool core_initialized;
+ bool hpd_irq_on;
+ bool audio_supported;
+
+ struct platform_device *pdev;
+ struct dentry *root;
+
+ struct dp_usbpd *usbpd;
+ struct dp_parser *parser;
+ struct dp_power *power;
+ struct dp_catalog *catalog;
+ struct drm_dp_aux *aux;
+ struct dp_link *link;
+ struct dp_panel *panel;
+ struct dp_ctrl *ctrl;
+ struct dp_debug *debug;
+
+ struct dp_usbpd_cb usbpd_cb;
+ struct dp_display_mode dp_mode;
+ struct msm_dp dp_display;
+
+ /* wait for audio signaling */
+ struct completion audio_comp;
+
+ /* event related only access by event thread */
+ struct mutex event_mutex;
+ wait_queue_head_t event_q;
+ atomic_t hpd_state;
+ u32 event_pndx;
+ u32 event_gndx;
+ struct dp_event event_list[DP_EVENT_Q_MAX];
+ spinlock_t event_lock;
+
+ struct completion resume_comp;
+
+ struct dp_audio *audio;
+};
+
+static const struct of_device_id dp_dt_match[] = {
+ {.compatible = "qcom,sc7180-dp"},
+ {}
+};
+
+static int dp_add_event(struct dp_display_private *dp_priv, u32 event,
+ u32 data, u32 delay)
+{
+ unsigned long flag;
+ struct dp_event *todo;
+ int pndx;
+
+ spin_lock_irqsave(&dp_priv->event_lock, flag);
+ pndx = dp_priv->event_pndx + 1;
+ pndx %= DP_EVENT_Q_MAX;
+ if (pndx == dp_priv->event_gndx) {
+ pr_err("event_q is full: pndx=%d gndx=%d\n",
+ dp_priv->event_pndx, dp_priv->event_gndx);
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ return -EPERM;
+ }
+ todo = &dp_priv->event_list[dp_priv->event_pndx++];
+ dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+ todo->event_id = event;
+ todo->data = data;
+ todo->delay = delay;
+ wake_up(&dp_priv->event_q);
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+ return 0;
+}
+
+static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
+{
+ unsigned long flag;
+ struct dp_event *todo;
+ u32 gndx;
+
+ spin_lock_irqsave(&dp_priv->event_lock, flag);
+ if (dp_priv->event_pndx == dp_priv->event_gndx) {
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ return -ENOENT;
+ }
+
+ gndx = dp_priv->event_gndx;
+ while (dp_priv->event_pndx != gndx) {
+ todo = &dp_priv->event_list[gndx];
+ if (todo->event_id == event) {
+ todo->event_id = EV_NO_EVENT; /* deleted */
+ todo->delay = 0;
+ }
+ gndx++;
+ gndx %= DP_EVENT_Q_MAX;
+ }
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+ return 0;
+}
+
+void dp_display_signal_audio_complete(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ complete_all(&dp->audio_comp);
+}
+
+static int dp_display_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ int rc = 0;
+ struct dp_display_private *dp;
+ struct drm_device *drm;
+ struct msm_drm_private *priv;
+
+ drm = dev_get_drvdata(master);
+
+ dp = container_of(g_dp_display,
+ struct dp_display_private, dp_display);
+ if (!dp) {
+ DRM_ERROR("DP driver bind failed. Invalid driver data\n");
+ return -EINVAL;
+ }
+
+ dp->dp_display.drm_dev = drm;
+ priv = drm->dev_private;
+ priv->dp = &(dp->dp_display);
+
+ rc = dp->parser->parse(dp->parser);
+ if (rc) {
+ DRM_ERROR("device tree parsing failed\n");
+ goto end;
+ }
+
+ rc = dp_aux_register(dp->aux);
+ if (rc) {
+ DRM_ERROR("DRM DP AUX register failed\n");
+ goto end;
+ }
+
+ rc = dp_power_client_init(dp->power);
+ if (rc) {
+ DRM_ERROR("Power client create failed\n");
+ goto end;
+ }
+
+ rc = dp_register_audio_driver(dev, dp->audio);
+ if (rc)
+ DRM_ERROR("Audio registration Dp failed\n");
+
+end:
+ return rc;
+}
+
+static void dp_display_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct dp_display_private *dp;
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
+
+ dp = container_of(g_dp_display,
+ struct dp_display_private, dp_display);
+ if (!dp) {
+ DRM_ERROR("Invalid DP driver data\n");
+ return;
+ }
+
+ dp_power_client_deinit(dp->power);
+ dp_aux_unregister(dp->aux);
+ priv->dp = NULL;
+}
+
+static const struct component_ops dp_display_comp_ops = {
+ .bind = dp_display_bind,
+ .unbind = dp_display_unbind,
+};
+
+static bool dp_display_is_ds_bridge(struct dp_panel *panel)
+{
+ return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT);
+}
+
+static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
+{
+ return dp_display_is_ds_bridge(dp->panel) &&
+ (dp->link->sink_count == 0);
+}
+
+static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+ struct drm_connector *connector;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ connector = dp->dp_display.connector;
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
+static int dp_display_send_hpd_notification(struct dp_display_private *dp,
+ bool hpd)
+{
+ static bool encoder_mode_set;
+ struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ if ((hpd && dp->dp_display.is_connected) ||
+ (!hpd && !dp->dp_display.is_connected)) {
+ DRM_DEBUG_DP("HPD already %s\n", (hpd ? "on" : "off"));
+ return 0;
+ }
+
+ /* reset video pattern flag on disconnect */
+ if (!hpd)
+ dp->panel->video_test = false;
+
+ dp->dp_display.is_connected = hpd;
+
+ if (dp->dp_display.is_connected && dp->dp_display.encoder
+ && !encoder_mode_set
+ && kms->funcs->set_encoder_mode) {
+ kms->funcs->set_encoder_mode(kms,
+ dp->dp_display.encoder, false);
+ DRM_DEBUG_DP("set_encoder_mode() Completed\n");
+ encoder_mode_set = true;
+ }
+
+ dp_display_send_hpd_event(&dp->dp_display);
+
+ return 0;
+}
+
+static int dp_display_process_hpd_high(struct dp_display_private *dp)
+{
+ int rc = 0;
+ struct edid *edid;
+
+ dp->panel->max_dp_lanes = dp->parser->max_dp_lanes;
+
+ rc = dp_panel_read_sink_caps(dp->panel, dp->dp_display.connector);
+ if (rc)
+ goto end;
+
+ dp_link_process_request(dp->link);
+
+ edid = dp->panel->edid;
+
+ dp->audio_supported = drm_detect_monitor_audio(edid);
+ dp_panel_handle_sink_request(dp->panel);
+
+ dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
+ dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes;
+
+ rc = dp_ctrl_on_link(dp->ctrl);
+ if (rc) {
+ DRM_ERROR("failed to complete DP link training\n");
+ goto end;
+ }
+
+ dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
+
+
+end:
+ return rc;
+}
+
+static void dp_display_host_init(struct dp_display_private *dp)
+{
+ bool flip = false;
+
+ if (dp->core_initialized) {
+ DRM_DEBUG_DP("DP core already initialized\n");
+ return;
+ }
+
+ if (dp->usbpd->orientation == ORIENTATION_CC2)
+ flip = true;
+
+ dp_power_init(dp->power, flip);
+ dp_ctrl_host_init(dp->ctrl, flip);
+ dp_aux_init(dp->aux);
+ dp->core_initialized = true;
+}
+
+static int dp_display_usbpd_configure_cb(struct device *dev)
+{
+ int rc = 0;
+ struct dp_display_private *dp;
+
+ if (!dev) {
+ DRM_ERROR("invalid dev\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ dp = container_of(g_dp_display,
+ struct dp_display_private, dp_display);
+ if (!dp) {
+ DRM_ERROR("no driver data found\n");
+ rc = -ENODEV;
+ goto end;
+ }
+
+ dp_display_host_init(dp);
+
+ /*
+ * set sink to normal operation mode -- D0
+ * before dpcd read
+ */
+ dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+ rc = dp_display_process_hpd_high(dp);
+end:
+ return rc;
+}
+
+static int dp_display_usbpd_disconnect_cb(struct device *dev)
+{
+ int rc = 0;
+ struct dp_display_private *dp;
+
+ if (!dev) {
+ DRM_ERROR("invalid dev\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ dp = container_of(g_dp_display,
+ struct dp_display_private, dp_display);
+ if (!dp) {
+ DRM_ERROR("no driver data found\n");
+ rc = -ENODEV;
+ return rc;
+ }
+
+ dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+
+ return rc;
+}
+
+static void dp_display_handle_video_request(struct dp_display_private *dp)
+{
+ if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
+ dp->panel->video_test = true;
+ dp_link_send_test_response(dp->link);
+ }
+}
+
+static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
+{
+ u32 sink_request;
+
+ sink_request = dp->link->sink_request;
+
+ if (sink_request & DS_PORT_STATUS_CHANGED) {
+ dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+ if (dp_display_is_sink_count_zero(dp)) {
+ DRM_DEBUG_DP("sink count is zero, nothing to do\n");
+ return 0;
+ }
+
+ return dp_display_process_hpd_high(dp);
+ }
+
+ dp_ctrl_handle_sink_request(dp->ctrl);
+
+ if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN)
+ dp_display_handle_video_request(dp);
+
+ return 0;
+}
+
+static int dp_display_usbpd_attention_cb(struct device *dev)
+{
+ int rc = 0;
+ struct dp_display_private *dp;
+
+ if (!dev) {
+ DRM_ERROR("invalid dev\n");
+ return -EINVAL;
+ }
+
+ dp = container_of(g_dp_display,
+ struct dp_display_private, dp_display);
+ if (!dp) {
+ DRM_ERROR("no driver data found\n");
+ return -ENODEV;
+ }
+
+ /* check for any test request issued by sink */
+ rc = dp_link_process_request(dp->link);
+ if (!rc)
+ dp_display_handle_irq_hpd(dp);
+
+ return rc;
+}
+
+static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+{
+ struct dp_usbpd *hpd = dp->usbpd;
+ u32 state;
+ u32 tout = DP_TIMEOUT_5_SECOND;
+ int ret;
+
+ if (!hpd)
+ return 0;
+
+ mutex_lock(&dp->event_mutex);
+
+ state = atomic_read(&dp->hpd_state);
+ if (state == ST_SUSPEND_PENDING) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ if (state == ST_CONNECT_PENDING || state == ST_CONNECTED) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ if (state == ST_DISCONNECT_PENDING) {
+ /* wait until ST_DISCONNECTED */
+ dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ if (state == ST_SUSPENDED)
+ tout = DP_TIMEOUT_NONE;
+
+ atomic_set(&dp->hpd_state, ST_CONNECT_PENDING);
+
+ hpd->hpd_high = 1;
+
+ ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
+ if (ret) { /* failed */
+ hpd->hpd_high = 0;
+ atomic_set(&dp->hpd_state, ST_DISCONNECTED);
+ }
+
+ /* start sanity checking */
+ dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
+
+ mutex_unlock(&dp->event_mutex);
+
+ /* uevent will complete connection part */
+ return 0;
+};
+
+static int dp_display_enable(struct dp_display_private *dp, u32 data);
+static int dp_display_disable(struct dp_display_private *dp, u32 data);
+
+static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
+{
+ u32 state;
+
+ mutex_lock(&dp->event_mutex);
+
+ state = atomic_read(&dp->hpd_state);
+ if (state == ST_CONNECT_PENDING) {
+ dp_display_enable(dp, 0);
+ atomic_set(&dp->hpd_state, ST_CONNECTED);
+ }
+
+ mutex_unlock(&dp->event_mutex);
+
+ return 0;
+}
+
+static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
+ bool plugged)
+{
+ if (dp_display->plugged_cb && dp_display->codec_dev)
+ dp_display->plugged_cb(dp_display->codec_dev, plugged);
+}
+
+static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+{
+ struct dp_usbpd *hpd = dp->usbpd;
+ u32 state;
+
+ if (!hpd)
+ return 0;
+
+ mutex_lock(&dp->event_mutex);
+
+ state = atomic_read(&dp->hpd_state);
+ if (state == ST_SUSPEND_PENDING) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ if (state == ST_CONNECT_PENDING) {
+ /* wait until CONNECTED */
+ dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ atomic_set(&dp->hpd_state, ST_DISCONNECT_PENDING);
+
+ /* disable HPD plug interrupt until disconnect is done */
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK
+ | DP_DP_IRQ_HPD_INT_MASK, false);
+
+ hpd->hpd_high = 0;
+
+ /*
+ * We don't need separate work for disconnect as
+ * connect/attention interrupts are disabled
+ */
+ dp_display_usbpd_disconnect_cb(&dp->pdev->dev);
+
+ /* start sanity checking */
+ dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+
+ /* signal the disconnect event early to ensure proper teardown */
+ dp_display_handle_plugged_change(g_dp_display, false);
+ reinit_completion(&dp->audio_comp);
+
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
+ DP_DP_IRQ_HPD_INT_MASK, true);
+
+ /* uevent will complete disconnection part */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+}
+
+static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data)
+{
+ u32 state;
+
+ mutex_lock(&dp->event_mutex);
+
+ state = atomic_read(&dp->hpd_state);
+ if (state == ST_DISCONNECT_PENDING) {
+ dp_display_disable(dp, 0);
+ atomic_set(&dp->hpd_state, ST_DISCONNECTED);
+ }
+
+ mutex_unlock(&dp->event_mutex);
+
+ return 0;
+}
+
+static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
+{
+ u32 state;
+
+ mutex_lock(&dp->event_mutex);
+
+ /* irq_hpd can happen at either connected or disconnected state */
+ state = atomic_read(&dp->hpd_state);
+ if (state == ST_SUSPEND_PENDING) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ dp_display_usbpd_attention_cb(&dp->pdev->dev);
+
+ mutex_unlock(&dp->event_mutex);
+
+ return 0;
+}
+
+static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
+{
+ dp_debug_put(dp->debug);
+ dp_ctrl_put(dp->ctrl);
+ dp_panel_put(dp->panel);
+ dp_aux_put(dp->aux);
+ dp_audio_put(dp->audio);
+}
+
+static int dp_init_sub_modules(struct dp_display_private *dp)
+{
+ int rc = 0;
+ struct device *dev = &dp->pdev->dev;
+ struct dp_usbpd_cb *cb = &dp->usbpd_cb;
+ struct dp_panel_in panel_in = {
+ .dev = dev,
+ };
+
+ /* Callback APIs used for cable status change event */
+ cb->configure = dp_display_usbpd_configure_cb;
+ cb->disconnect = dp_display_usbpd_disconnect_cb;
+ cb->attention = dp_display_usbpd_attention_cb;
+
+ dp->usbpd = dp_hpd_get(dev, cb);
+ if (IS_ERR(dp->usbpd)) {
+ rc = PTR_ERR(dp->usbpd);
+ DRM_ERROR("failed to initialize hpd, rc = %d\n", rc);
+ dp->usbpd = NULL;
+ goto error;
+ }
+
+ dp->parser = dp_parser_get(dp->pdev);
+ if (IS_ERR(dp->parser)) {
+ rc = PTR_ERR(dp->parser);
+ DRM_ERROR("failed to initialize parser, rc = %d\n", rc);
+ dp->parser = NULL;
+ goto error;
+ }
+
+ dp->catalog = dp_catalog_get(dev, &dp->parser->io);
+ if (IS_ERR(dp->catalog)) {
+ rc = PTR_ERR(dp->catalog);
+ DRM_ERROR("failed to initialize catalog, rc = %d\n", rc);
+ dp->catalog = NULL;
+ goto error;
+ }
+
+ dp->power = dp_power_get(dp->parser);
+ if (IS_ERR(dp->power)) {
+ rc = PTR_ERR(dp->power);
+ DRM_ERROR("failed to initialize power, rc = %d\n", rc);
+ dp->power = NULL;
+ goto error;
+ }
+
+ dp->aux = dp_aux_get(dev, dp->catalog);
+ if (IS_ERR(dp->aux)) {
+ rc = PTR_ERR(dp->aux);
+ DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
+ dp->aux = NULL;
+ goto error;
+ }
+
+ dp->link = dp_link_get(dev, dp->aux);
+ if (IS_ERR(dp->link)) {
+ rc = PTR_ERR(dp->link);
+ DRM_ERROR("failed to initialize link, rc = %d\n", rc);
+ dp->link = NULL;
+ goto error_link;
+ }
+
+ panel_in.aux = dp->aux;
+ panel_in.catalog = dp->catalog;
+ panel_in.link = dp->link;
+
+ dp->panel = dp_panel_get(&panel_in);
+ if (IS_ERR(dp->panel)) {
+ rc = PTR_ERR(dp->panel);
+ DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
+ dp->panel = NULL;
+ goto error_link;
+ }
+
+ dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
+ dp->power, dp->catalog, dp->parser);
+ if (IS_ERR(dp->ctrl)) {
+ rc = PTR_ERR(dp->ctrl);
+ DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc);
+ dp->ctrl = NULL;
+ goto error_ctrl;
+ }
+
+ dp->audio = dp_audio_get(dp->pdev, dp->panel, dp->catalog);
+ if (IS_ERR(dp->audio)) {
+ rc = PTR_ERR(dp->audio);
+ pr_err("failed to initialize audio, rc = %d\n", rc);
+ dp->audio = NULL;
+ goto error_audio;
+ }
+
+ return rc;
+
+error_audio:
+ dp_ctrl_put(dp->ctrl);
+error_ctrl:
+ dp_panel_put(dp->panel);
+error_link:
+ dp_aux_put(dp->aux);
+error:
+ return rc;
+}
+
+static int dp_display_set_mode(struct msm_dp *dp_display,
+ struct dp_display_mode *mode)
+{
+ struct dp_display_private *dp;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ dp->panel->dp_mode.drm_mode = mode->drm_mode;
+ dp->panel->dp_mode.bpp = mode->bpp;
+ dp->panel->dp_mode.capabilities = mode->capabilities;
+ dp_panel_init_panel_info(dp->panel);
+ return 0;
+}
+
+static int dp_display_prepare(struct msm_dp *dp)
+{
+ return 0;
+}
+
+static int dp_display_enable(struct dp_display_private *dp, u32 data)
+{
+ int rc = 0;
+ struct msm_dp *dp_display;
+
+ dp_display = g_dp_display;
+
+ if (dp_display->power_on) {
+ DRM_DEBUG_DP("Link already setup, return\n");
+ return 0;
+ }
+
+ rc = dp_ctrl_on_stream(dp->ctrl);
+ if (!rc)
+ dp_display->power_on = true;
+
+ /* complete resume_comp regardless it is armed or not */
+ complete(&dp->resume_comp);
+ return rc;
+}
+
+static int dp_display_post_enable(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+ u32 rate;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ rate = dp->link->link_params.rate;
+
+ if (dp->audio_supported) {
+ dp->audio->bw_code = drm_dp_link_rate_to_bw_code(rate);
+ dp->audio->lane_count = dp->link->link_params.num_lanes;
+ }
+
+ /* signal the connect event late to synchronize video and display */
+ dp_display_handle_plugged_change(dp_display, true);
+ return 0;
+}
+
+static int dp_display_disable(struct dp_display_private *dp, u32 data)
+{
+ struct msm_dp *dp_display;
+
+ dp_display = g_dp_display;
+
+ if (!dp_display->power_on)
+ return -EINVAL;
+
+ /* wait only if audio was enabled */
+ if (dp_display->audio_enabled) {
+ if (!wait_for_completion_timeout(&dp->audio_comp,
+ HZ * 5))
+ DRM_ERROR("audio comp timeout\n");
+ }
+
+ dp_display->audio_enabled = false;
+
+ dp_ctrl_off(dp->ctrl);
+
+ dp->core_initialized = false;
+
+ dp_display->power_on = false;
+
+ return 0;
+}
+
+static int dp_display_unprepare(struct msm_dp *dp)
+{
+ return 0;
+}
+
+int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+ hdmi_codec_plugged_cb fn, struct device *codec_dev)
+{
+ bool plugged;
+
+ dp_display->plugged_cb = fn;
+ dp_display->codec_dev = codec_dev;
+ plugged = dp_display->is_connected;
+ dp_display_handle_plugged_change(dp_display, plugged);
+
+ return 0;
+}
+
+int dp_display_validate_mode(struct msm_dp *dp, u32 mode_pclk_khz)
+{
+ const u32 num_components = 3, default_bpp = 24;
+ struct dp_display_private *dp_display;
+ struct dp_link_info *link_info;
+ u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
+
+ if (!dp || !mode_pclk_khz || !dp->connector) {
+ DRM_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+ link_info = &dp_display->panel->link_info;
+
+ mode_bpp = dp->connector->display_info.bpc * num_components;
+ if (!mode_bpp)
+ mode_bpp = default_bpp;
+
+ mode_bpp = dp_panel_get_mode_bpp(dp_display->panel,
+ mode_bpp, mode_pclk_khz);
+
+ mode_rate_khz = mode_pclk_khz * mode_bpp;
+ supported_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+ if (mode_rate_khz > supported_rate_khz)
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+int dp_display_get_modes(struct msm_dp *dp,
+ struct dp_display_mode *dp_mode)
+{
+ struct dp_display_private *dp_display;
+ int ret = 0;
+
+ if (!dp) {
+ DRM_ERROR("invalid params\n");
+ return 0;
+ }
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ ret = dp_panel_get_modes(dp_display->panel,
+ dp->connector, dp_mode);
+ if (dp_mode->drm_mode.clock)
+ dp->max_pclk_khz = dp_mode->drm_mode.clock;
+ return ret;
+}
+
+bool dp_display_check_video_test(struct msm_dp *dp)
+{
+ struct dp_display_private *dp_display;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ return dp_display->panel->video_test;
+}
+
+int dp_display_get_test_bpp(struct msm_dp *dp)
+{
+ struct dp_display_private *dp_display;
+
+ if (!dp) {
+ DRM_ERROR("invalid params\n");
+ return 0;
+ }
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ return dp_link_bit_depth_to_bpp(
+ dp_display->link->test_video.test_bit_depth);
+}
+
+static void dp_display_config_hpd(struct dp_display_private *dp)
+{
+
+ dp_display_host_init(dp);
+ dp_catalog_ctrl_hpd_config(dp->catalog);
+
+ /* Enable interrupt first time
+ * we are leaving dp clocks on during disconnect
+ * and never disable interrupt
+ */
+ enable_irq(dp->irq);
+}
+
+static int hpd_event_thread(void *data)
+{
+ struct dp_display_private *dp_priv;
+ unsigned long flag;
+ struct dp_event *todo;
+ int timeout_mode = 0;
+
+ dp_priv = (struct dp_display_private *)data;
+
+ while (1) {
+ if (timeout_mode) {
+ wait_event_timeout(dp_priv->event_q,
+ (dp_priv->event_pndx == dp_priv->event_gndx),
+ EVENT_TIMEOUT);
+ } else {
+ wait_event_interruptible(dp_priv->event_q,
+ (dp_priv->event_pndx != dp_priv->event_gndx));
+ }
+ spin_lock_irqsave(&dp_priv->event_lock, flag);
+ todo = &dp_priv->event_list[dp_priv->event_gndx];
+ if (todo->delay) {
+ struct dp_event *todo_next;
+
+ dp_priv->event_gndx++;
+ dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+
+ /* re enter delay event into q */
+ todo_next = &dp_priv->event_list[dp_priv->event_pndx++];
+ dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+ todo_next->event_id = todo->event_id;
+ todo_next->data = todo->data;
+ todo_next->delay = todo->delay - 1;
+
+ /* clean up older event */
+ todo->event_id = EV_NO_EVENT;
+ todo->delay = 0;
+
+ /* switch to timeout mode */
+ timeout_mode = 1;
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ continue;
+ }
+
+ /* timeout with no events in q */
+ if (dp_priv->event_pndx == dp_priv->event_gndx) {
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ continue;
+ }
+
+ dp_priv->event_gndx++;
+ dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+ timeout_mode = 0;
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+ switch (todo->event_id) {
+ case EV_HPD_INIT_SETUP:
+ dp_display_config_hpd(dp_priv);
+ break;
+ case EV_HPD_PLUG_INT:
+ dp_hpd_plug_handle(dp_priv, todo->data);
+ break;
+ case EV_HPD_UNPLUG_INT:
+ dp_hpd_unplug_handle(dp_priv, todo->data);
+ break;
+ case EV_IRQ_HPD_INT:
+ dp_irq_hpd_handle(dp_priv, todo->data);
+ break;
+ case EV_HPD_REPLUG_INT:
+ /* do nothing */
+ break;
+ case EV_USER_NOTIFICATION:
+ dp_display_send_hpd_notification(dp_priv,
+ todo->data);
+ break;
+ case EV_CONNECT_PENDING_TIMEOUT:
+ dp_connect_pending_timeout(dp_priv,
+ todo->data);
+ break;
+ case EV_DISCONNECT_PENDING_TIMEOUT:
+ dp_disconnect_pending_timeout(dp_priv,
+ todo->data);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void dp_hpd_event_setup(struct dp_display_private *dp_priv)
+{
+ init_waitqueue_head(&dp_priv->event_q);
+ spin_lock_init(&dp_priv->event_lock);
+
+ kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
+}
+
+static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
+{
+ struct dp_display_private *dp = dev_id;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 hpd_isr_status;
+
+ if (!dp) {
+ DRM_ERROR("invalid data\n");
+ return IRQ_NONE;
+ }
+
+ hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
+
+ if (hpd_isr_status & 0x0F) {
+ /* hpd related interrupts */
+ if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK ||
+ hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
+ dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
+ }
+
+ if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
+ /* delete connect pending event first */
+ dp_del_event(dp, EV_CONNECT_PENDING_TIMEOUT);
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
+ }
+
+ if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK)
+ dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0);
+
+ if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
+ dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+ }
+
+ /* DP controller isr */
+ dp_ctrl_isr(dp->ctrl);
+
+ /* DP aux isr */
+ dp_aux_isr(dp->aux);
+
+ return ret;
+}
+
+int dp_display_request_irq(struct msm_dp *dp_display)
+{
+ int rc = 0;
+ struct dp_display_private *dp;
+
+ if (!dp_display) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
+ if (dp->irq < 0) {
+ rc = dp->irq;
+ DRM_ERROR("failed to get irq: %d\n", rc);
+ return rc;
+ }
+
+ rc = devm_request_irq(&dp->pdev->dev, dp->irq,
+ dp_display_irq_handler,
+ IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
+ if (rc < 0) {
+ DRM_ERROR("failed to request IRQ%u: %d\n",
+ dp->irq, rc);
+ return rc;
+ }
+ disable_irq(dp->irq);
+
+ return 0;
+}
+
+static int dp_display_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct dp_display_private *dp;
+
+ if (!pdev || !pdev->dev.of_node) {
+ DRM_ERROR("pdev not found\n");
+ return -ENODEV;
+ }
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->pdev = pdev;
+ dp->name = "drm_dp";
+
+ rc = dp_init_sub_modules(dp);
+ if (rc) {
+ DRM_ERROR("init sub module failed\n");
+ return -EPROBE_DEFER;
+ }
+
+ mutex_init(&dp->event_mutex);
+
+ init_completion(&dp->resume_comp);
+
+ g_dp_display = &dp->dp_display;
+
+ /* Store DP audio handle inside DP display */
+ g_dp_display->dp_audio = dp->audio;
+
+ init_completion(&dp->audio_comp);
+
+ platform_set_drvdata(pdev, g_dp_display);
+
+ rc = component_add(&pdev->dev, &dp_display_comp_ops);
+ if (rc) {
+ DRM_ERROR("component add failed, rc=%d\n", rc);
+ dp_display_deinit_sub_modules(dp);
+ }
+
+ return rc;
+}
+
+static int dp_display_remove(struct platform_device *pdev)
+{
+ struct dp_display_private *dp;
+
+ dp = container_of(g_dp_display,
+ struct dp_display_private, dp_display);
+
+ dp_display_deinit_sub_modules(dp);
+
+ component_del(&pdev->dev, &dp_display_comp_ops);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int dp_pm_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int dp_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dp_display_private *dp = platform_get_drvdata(pdev);
+
+ if (!dp) {
+ DRM_ERROR("DP driver bind failed. Invalid driver data\n");
+ return -EINVAL;
+ }
+
+ atomic_set(&dp->hpd_state, ST_SUSPENDED);
+
+ return 0;
+}
+
+static int dp_pm_prepare(struct device *dev)
+{
+ return 0;
+}
+
+static void dp_pm_complete(struct device *dev)
+{
+
+}
+
+static const struct dev_pm_ops dp_pm_ops = {
+ .suspend = dp_pm_suspend,
+ .resume = dp_pm_resume,
+ .prepare = dp_pm_prepare,
+ .complete = dp_pm_complete,
+};
+
+static struct platform_driver dp_display_driver = {
+ .probe = dp_display_probe,
+ .remove = dp_display_remove,
+ .driver = {
+ .name = "msm-dp-display",
+ .of_match_table = dp_dt_match,
+ .suppress_bind_attrs = true,
+ .pm = &dp_pm_ops,
+ },
+};
+
+int __init msm_dp_register(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&dp_display_driver);
+ if (ret)
+ DRM_ERROR("Dp display driver register failed");
+
+ return ret;
+}
+
+void __exit msm_dp_unregister(void)
+{
+ platform_driver_unregister(&dp_display_driver);
+}
+
+void msm_dp_irq_postinstall(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+
+ if (!dp_display)
+ return;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ dp_hpd_event_setup(dp);
+
+ dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100);
+}
+
+void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
+{
+ struct dp_display_private *dp;
+ struct device *dev;
+ int rc;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dev = &dp->pdev->dev;
+
+ dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd,
+ dp->link, &dp->dp_display.connector,
+ minor);
+ if (IS_ERR(dp->debug)) {
+ rc = PTR_ERR(dp->debug);
+ DRM_ERROR("failed to initialize debug, rc = %d\n", rc);
+ dp->debug = NULL;
+ }
+}
+
+int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev))
+ return -EINVAL;
+
+ priv = dev->dev_private;
+ dp_display->drm_dev = dev;
+
+ ret = dp_display_request_irq(dp_display);
+ if (ret) {
+ DRM_ERROR("request_irq failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ dp_display->encoder = encoder;
+
+ dp_display->connector = dp_drm_connector_init(dp_display);
+ if (IS_ERR(dp_display->connector)) {
+ ret = PTR_ERR(dp_display->connector);
+ DRM_DEV_ERROR(dev->dev,
+ "failed to create dp connector: %d\n", ret);
+ dp_display->connector = NULL;
+ return ret;
+ }
+
+ priv->connectors[priv->num_connectors++] = dp_display->connector;
+ return 0;
+}
+
+static int dp_display_wait4resume_done(struct dp_display_private *dp)
+{
+ int ret = 0;
+
+ reinit_completion(&dp->resume_comp);
+ if (!wait_for_completion_timeout(&dp->resume_comp,
+ WAIT_FOR_RESUME_TIMEOUT_JIFFIES)) {
+ DRM_ERROR("wait4resume_done timedout\n");
+ ret = -ETIMEDOUT;
+ }
+ return ret;
+}
+
+int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
+{
+ int rc = 0;
+ struct dp_display_private *dp_display;
+ u32 state;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+ if (!dp_display->dp_mode.drm_mode.clock) {
+ DRM_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dp_display->event_mutex);
+
+ rc = dp_display_set_mode(dp, &dp_display->dp_mode);
+ if (rc) {
+ DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc);
+ mutex_unlock(&dp_display->event_mutex);
+ return rc;
+ }
+
+ rc = dp_display_prepare(dp);
+ if (rc) {
+ DRM_ERROR("DP display prepare failed, rc=%d\n", rc);
+ mutex_unlock(&dp_display->event_mutex);
+ return rc;
+ }
+
+ state = atomic_read(&dp_display->hpd_state);
+ if (state == ST_SUSPENDED) {
+ /* start link training */
+ dp_add_event(dp_display, EV_HPD_PLUG_INT, 0, 0);
+ mutex_unlock(&dp_display->event_mutex);
+
+ /* wait until dp interface is up */
+ goto resume_done;
+ }
+
+ dp_display_enable(dp_display, 0);
+
+ rc = dp_display_post_enable(dp);
+ if (rc) {
+ DRM_ERROR("DP display post enable failed, rc=%d\n", rc);
+ dp_display_disable(dp_display, 0);
+ dp_display_unprepare(dp);
+ }
+
+ dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT);
+
+ if (state == ST_SUSPEND_PENDING)
+ dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
+
+ /* completed connection */
+ atomic_set(&dp_display->hpd_state, ST_CONNECTED);
+
+ mutex_unlock(&dp_display->event_mutex);
+
+ return rc;
+
+resume_done:
+ dp_display_wait4resume_done(dp_display);
+ return rc;
+}
+
+int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder)
+{
+ struct dp_display_private *dp_display;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ dp_ctrl_push_idle(dp_display->ctrl);
+
+ return 0;
+}
+
+int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder)
+{
+ int rc = 0;
+ u32 state;
+ struct dp_display_private *dp_display;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ mutex_lock(&dp_display->event_mutex);
+
+ dp_display_disable(dp_display, 0);
+
+ rc = dp_display_unprepare(dp);
+ if (rc)
+ DRM_ERROR("DP display unprepare failed, rc=%d\n", rc);
+
+ dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT);
+
+ state = atomic_read(&dp_display->hpd_state);
+ if (state == ST_DISCONNECT_PENDING) {
+ /* completed disconnection */
+ atomic_set(&dp_display->hpd_state, ST_DISCONNECTED);
+ } else {
+ atomic_set(&dp_display->hpd_state, ST_SUSPEND_PENDING);
+ }
+
+ mutex_unlock(&dp_display->event_mutex);
+ return rc;
+}
+
+void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dp_display_private *dp_display;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode));
+
+ if (dp_display_check_video_test(dp))
+ dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp);
+ else /* Default num_components per pixel = 3 */
+ dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3;
+
+ if (!dp_display->dp_mode.bpp)
+ dp_display->dp_mode.bpp = 24; /* Default bpp */
+
+ drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode);
+
+ dp_display->dp_mode.v_active_low =
+ !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC);
+
+ dp_display->dp_mode.h_active_low =
+ !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
new file mode 100644
index 000000000000..6092ba1ed85e
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DISPLAY_H_
+#define _DP_DISPLAY_H_
+
+#include "dp_panel.h"
+#include <sound/hdmi-codec.h>
+
+struct msm_dp {
+ struct drm_device *drm_dev;
+ struct device *codec_dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ bool is_connected;
+ bool audio_enabled;
+ bool power_on;
+
+ hdmi_codec_plugged_cb plugged_cb;
+
+ u32 max_pclk_khz;
+
+ u32 max_dp_lanes;
+ struct dp_audio *dp_audio;
+};
+
+int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+ hdmi_codec_plugged_cb fn, struct device *codec_dev);
+int dp_display_validate_mode(struct msm_dp *dp_display, u32 mode_pclk_khz);
+int dp_display_get_modes(struct msm_dp *dp_display,
+ struct dp_display_mode *dp_mode);
+int dp_display_request_irq(struct msm_dp *dp_display);
+bool dp_display_check_video_test(struct msm_dp *dp_display);
+int dp_display_get_test_bpp(struct msm_dp *dp_display);
+void dp_display_signal_audio_complete(struct msm_dp *dp_display);
+
+#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
new file mode 100644
index 000000000000..764f4b81017e
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "dp_drm.h"
+
+struct dp_connector {
+ struct drm_connector base;
+ struct msm_dp *dp_display;
+};
+#define to_dp_connector(x) container_of(x, struct dp_connector, base)
+
+/**
+ * dp_connector_detect - callback to determine if connector is connected
+ * @conn: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * Returns: Connector 'is connected' status
+ */
+static enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
+ bool force)
+{
+ struct msm_dp *dp;
+
+ dp = to_dp_connector(conn)->dp_display;
+
+ DRM_DEBUG_DP("is_connected = %s\n",
+ (dp->is_connected) ? "true" : "false");
+
+ return (dp->is_connected) ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+/**
+ * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * Returns: Number of modes added
+ */
+static int dp_connector_get_modes(struct drm_connector *connector)
+{
+ int rc = 0;
+ struct msm_dp *dp;
+ struct dp_display_mode *dp_mode = NULL;
+ struct drm_display_mode *m, drm_mode;
+
+ if (!connector)
+ return 0;
+
+ dp = to_dp_connector(connector)->dp_display;
+
+ dp_mode = kzalloc(sizeof(*dp_mode), GFP_KERNEL);
+ if (!dp_mode)
+ return 0;
+
+ /* pluggable case assumes EDID is read when HPD */
+ if (dp->is_connected) {
+ /*
+ *The get_modes() function might return one mode that is stored
+ * in dp_mode when compliance test is in progress. If not, the
+ * return value is equal to the total number of modes supported
+ * by the sink
+ */
+ rc = dp_display_get_modes(dp, dp_mode);
+ if (rc <= 0) {
+ DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
+ kfree(dp_mode);
+ return rc;
+ }
+ if (dp_mode->drm_mode.clock) { /* valid DP mode */
+ memset(&drm_mode, 0x0, sizeof(drm_mode));
+ drm_mode_copy(&drm_mode, &dp_mode->drm_mode);
+ m = drm_mode_duplicate(connector->dev, &drm_mode);
+ if (!m) {
+ DRM_ERROR("failed to add mode %ux%u\n",
+ drm_mode.hdisplay,
+ drm_mode.vdisplay);
+ kfree(dp_mode);
+ return 0;
+ }
+ drm_mode_probed_add(connector, m);
+ }
+ } else {
+ DRM_DEBUG_DP("No sink connected\n");
+ }
+ kfree(dp_mode);
+ return rc;
+}
+
+/**
+ * dp_connector_mode_valid - callback to determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * Returns: Validity status for specified mode
+ */
+static enum drm_mode_status dp_connector_mode_valid(
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct msm_dp *dp_disp;
+
+ dp_disp = to_dp_connector(connector)->dp_display;
+
+ if ((dp_disp->max_pclk_khz <= 0) ||
+ (dp_disp->max_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) ||
+ (mode->clock > dp_disp->max_pclk_khz))
+ return MODE_BAD;
+
+ return dp_display_validate_mode(dp_disp, mode->clock);
+}
+
+static const struct drm_connector_funcs dp_connector_funcs = {
+ .detect = dp_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs dp_connector_helper_funcs = {
+ .get_modes = dp_connector_get_modes,
+ .mode_valid = dp_connector_mode_valid,
+};
+
+/* connector initialization */
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
+{
+ struct drm_connector *connector = NULL;
+ struct dp_connector *dp_connector;
+ int ret;
+
+ dp_connector = devm_kzalloc(dp_display->drm_dev->dev,
+ sizeof(*dp_connector),
+ GFP_KERNEL);
+ if (!dp_connector)
+ return ERR_PTR(-ENOMEM);
+
+ dp_connector->dp_display = dp_display;
+
+ connector = &dp_connector->base;
+
+ ret = drm_connector_init(dp_display->drm_dev, connector,
+ &dp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_connector_helper_add(connector, &dp_connector_helper_funcs);
+
+ /*
+ * Enable HPD to let hpd event is handled when cable is connected.
+ */
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_attach_encoder(connector, dp_display->encoder);
+
+ return connector;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
new file mode 100644
index 000000000000..c27bfceefdf0
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DRM_H_
+#define _DP_DRM_H_
+
+#include <linux/types.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+#include "dp_display.h"
+
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display);
+
+#endif /* _DP_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
new file mode 100644
index 000000000000..5b8fe32022b5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "dp_hpd.h"
+
+/* DP specific VDM commands */
+#define DP_USBPD_VDM_STATUS 0x10
+#define DP_USBPD_VDM_CONFIGURE 0x11
+
+/* USBPD-TypeC specific Macros */
+#define VDM_VERSION 0x0
+#define USB_C_DP_SID 0xFF01
+
+struct dp_hpd_private {
+ struct device *dev;
+ struct dp_usbpd_cb *dp_cb;
+ struct dp_usbpd dp_usbpd;
+};
+
+int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
+{
+ int rc = 0;
+ struct dp_hpd_private *hpd_priv;
+
+ hpd_priv = container_of(dp_usbpd, struct dp_hpd_private,
+ dp_usbpd);
+
+ dp_usbpd->hpd_high = hpd;
+
+ if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
+ && !hpd_priv->dp_cb->disconnect) {
+ pr_err("hpd dp_cb not initialized\n");
+ return -EINVAL;
+ }
+ if (hpd)
+ hpd_priv->dp_cb->configure(hpd_priv->dev);
+ else
+ hpd_priv->dp_cb->disconnect(hpd_priv->dev);
+
+ return rc;
+}
+
+struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb)
+{
+ struct dp_hpd_private *dp_hpd;
+
+ if (!cb) {
+ pr_err("invalid cb data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ dp_hpd = devm_kzalloc(dev, sizeof(*dp_hpd), GFP_KERNEL);
+ if (!dp_hpd)
+ return ERR_PTR(-ENOMEM);
+
+ dp_hpd->dev = dev;
+ dp_hpd->dp_cb = cb;
+
+ dp_hpd->dp_usbpd.connect = dp_hpd_connect;
+
+ return &dp_hpd->dp_usbpd;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h
new file mode 100644
index 000000000000..5bc5bb64680f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_HPD_H_
+#define _DP_HPD_H_
+
+//#include <linux/usb/usbpd.h>
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+enum plug_orientation {
+ ORIENTATION_NONE,
+ ORIENTATION_CC1,
+ ORIENTATION_CC2,
+};
+
+/**
+ * struct dp_usbpd - DisplayPort status
+ *
+ * @orientation: plug orientation configuration
+ * @low_pow_st: low power state
+ * @adaptor_dp_en: adaptor functionality enabled
+ * @multi_func: multi-function preferred
+ * @usb_config_req: request to switch to usb
+ * @exit_dp_mode: request exit from displayport mode
+ * @hpd_high: Hot Plug Detect signal is high.
+ * @hpd_irq: Change in the status since last message
+ * @alt_mode_cfg_done: bool to specify alt mode status
+ * @debug_en: bool to specify debug mode
+ * @connect: simulate disconnect or connect for debug mode
+ */
+struct dp_usbpd {
+ enum plug_orientation orientation;
+ bool low_pow_st;
+ bool adaptor_dp_en;
+ bool multi_func;
+ bool usb_config_req;
+ bool exit_dp_mode;
+ bool hpd_high;
+ bool hpd_irq;
+ bool alt_mode_cfg_done;
+ bool debug_en;
+
+ int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd);
+};
+
+/**
+ * struct dp_usbpd_cb - callback functions provided by the client
+ *
+ * @configure: called by usbpd module when PD communication has
+ * been completed and the usb peripheral has been configured on
+ * dp mode.
+ * @disconnect: notify the cable disconnect issued by usb.
+ * @attention: notify any attention message issued by usb.
+ */
+struct dp_usbpd_cb {
+ int (*configure)(struct device *dev);
+ int (*disconnect)(struct device *dev);
+ int (*attention)(struct device *dev);
+};
+
+/**
+ * dp_hpd_get() - setup hpd module
+ *
+ * @dev: device instance of the caller
+ * @cb: struct containing callback function pointers.
+ *
+ * This function allows the client to initialize the usbpd
+ * module. The module will communicate with HPD module.
+ */
+struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb);
+
+int dp_hpd_register(struct dp_usbpd *dp_usbpd);
+void dp_hpd_unregister(struct dp_usbpd *dp_usbpd);
+int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd);
+
+#endif /* _DP_HPD_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
new file mode 100644
index 000000000000..c811da515fb3
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <drm/drm_print.h>
+
+#include "dp_link.h"
+#include "dp_panel.h"
+
+#define DP_TEST_REQUEST_MASK 0x7F
+
+enum audio_sample_rate {
+ AUDIO_SAMPLE_RATE_32_KHZ = 0x00,
+ AUDIO_SAMPLE_RATE_44_1_KHZ = 0x01,
+ AUDIO_SAMPLE_RATE_48_KHZ = 0x02,
+ AUDIO_SAMPLE_RATE_88_2_KHZ = 0x03,
+ AUDIO_SAMPLE_RATE_96_KHZ = 0x04,
+ AUDIO_SAMPLE_RATE_176_4_KHZ = 0x05,
+ AUDIO_SAMPLE_RATE_192_KHZ = 0x06,
+};
+
+enum audio_pattern_type {
+ AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0x00,
+ AUDIO_TEST_PATTERN_SAWTOOTH = 0x01,
+};
+
+struct dp_link_request {
+ u32 test_requested;
+ u32 test_link_rate;
+ u32 test_lane_count;
+};
+
+struct dp_link_private {
+ u32 prev_sink_count;
+ struct device *dev;
+ struct drm_dp_aux *aux;
+ struct dp_link dp_link;
+
+ struct dp_link_request request;
+ struct mutex psm_mutex;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+};
+
+static int dp_aux_link_power_up(struct drm_dp_aux *aux,
+ struct dp_link_info *link)
+{
+ u8 value;
+ int err;
+
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int dp_aux_link_power_down(struct drm_dp_aux *aux,
+ struct dp_link_info *link)
+{
+ u8 value;
+ int err;
+
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp_link_get_period(struct dp_link_private *link, int const addr)
+{
+ int ret = 0;
+ u8 data;
+ u32 const max_audio_period = 0xA;
+
+ /* TEST_AUDIO_PERIOD_CH_XX */
+ if (drm_dp_dpcd_readb(link->aux, addr, &data) < 0) {
+ DRM_ERROR("failed to read test_audio_period (0x%x)\n", addr);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Period - Bits 3:0 */
+ data = data & 0xF;
+ if ((int)data > max_audio_period) {
+ DRM_ERROR("invalid test_audio_period_ch_1 = 0x%x\n", data);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = data;
+exit:
+ return ret;
+}
+
+static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
+{
+ int ret = 0;
+ struct dp_link_test_audio *req = &link->dp_link.test_audio;
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_1 = ret;
+ DRM_DEBUG_DP("test_audio_period_ch_1 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_2 = ret;
+ DRM_DEBUG_DP("test_audio_period_ch_2 = 0x%x\n", ret);
+
+ /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_3 = ret;
+ DRM_DEBUG_DP("test_audio_period_ch_3 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_4 = ret;
+ DRM_DEBUG_DP("test_audio_period_ch_4 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_5 = ret;
+ DRM_DEBUG_DP("test_audio_period_ch_5 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_6 = ret;
+ DRM_DEBUG_DP("test_audio_period_ch_6 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_7 = ret;
+ DRM_DEBUG_DP("test_audio_period_ch_7 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_8 = ret;
+ DRM_DEBUG_DP("test_audio_period_ch_8 = 0x%x\n", ret);
+exit:
+ return ret;
+}
+
+static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
+{
+ int ret = 0;
+ u8 data;
+ ssize_t rlen;
+ int const max_audio_pattern_type = 0x1;
+
+ rlen = drm_dp_dpcd_readb(link->aux,
+ DP_TEST_AUDIO_PATTERN_TYPE, &data);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ /* Audio Pattern Type - Bits 7:0 */
+ if ((int)data > max_audio_pattern_type) {
+ DRM_ERROR("invalid audio pattern type = 0x%x\n", data);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ link->dp_link.test_audio.test_audio_pattern_type = data;
+ DRM_DEBUG_DP("audio pattern type = 0x%x\n", data);
+exit:
+ return ret;
+}
+
+static int dp_link_parse_audio_mode(struct dp_link_private *link)
+{
+ int ret = 0;
+ u8 data;
+ ssize_t rlen;
+ int const max_audio_sampling_rate = 0x6;
+ int const max_audio_channel_count = 0x8;
+ int sampling_rate = 0x0;
+ int channel_count = 0x0;
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_AUDIO_MODE, &data);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ /* Sampling Rate - Bits 3:0 */
+ sampling_rate = data & 0xF;
+ if (sampling_rate > max_audio_sampling_rate) {
+ DRM_ERROR("sampling rate (0x%x) greater than max (0x%x)\n",
+ sampling_rate, max_audio_sampling_rate);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Channel Count - Bits 7:4 */
+ channel_count = ((data & 0xF0) >> 4) + 1;
+ if (channel_count > max_audio_channel_count) {
+ DRM_ERROR("channel_count (0x%x) greater than max (0x%x)\n",
+ channel_count, max_audio_channel_count);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate;
+ link->dp_link.test_audio.test_audio_channel_count = channel_count;
+ DRM_DEBUG_DP("sampling_rate = 0x%x, channel_count = 0x%x\n",
+ sampling_rate, channel_count);
+exit:
+ return ret;
+}
+
+static int dp_link_parse_audio_pattern_params(struct dp_link_private *link)
+{
+ int ret = 0;
+
+ ret = dp_link_parse_audio_mode(link);
+ if (ret)
+ goto exit;
+
+ ret = dp_link_parse_audio_pattern_type(link);
+ if (ret)
+ goto exit;
+
+ ret = dp_link_parse_audio_channel_period(link);
+
+exit:
+ return ret;
+}
+
+static bool dp_link_is_video_pattern_valid(u32 pattern)
+{
+ switch (pattern) {
+ case DP_NO_TEST_PATTERN:
+ case DP_COLOR_RAMP:
+ case DP_BLACK_AND_WHITE_VERTICAL_LINES:
+ case DP_COLOR_SQUARE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * dp_link_is_bit_depth_valid() - validates the bit depth requested
+ * @tbd: bit depth requested by the sink
+ *
+ * Returns true if the requested bit depth is supported.
+ */
+static bool dp_link_is_bit_depth_valid(u32 tbd)
+{
+ /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */
+ switch (tbd) {
+ case DP_TEST_BIT_DEPTH_6:
+ case DP_TEST_BIT_DEPTH_8:
+ case DP_TEST_BIT_DEPTH_10:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int dp_link_parse_timing_params1(struct dp_link_private *link,
+ int addr, int len, u32 *val)
+{
+ u8 bp[2];
+ int rlen;
+
+ if (len != 2)
+ return -EINVAL;
+
+ /* Read the requested video link pattern (Byte 0x221). */
+ rlen = drm_dp_dpcd_read(link->aux, addr, bp, len);
+ if (rlen < len) {
+ DRM_ERROR("failed to read 0x%x\n", addr);
+ return -EINVAL;
+ }
+
+ *val = bp[1] | (bp[0] << 8);
+
+ return 0;
+}
+
+static int dp_link_parse_timing_params2(struct dp_link_private *link,
+ int addr, int len,
+ u32 *val1, u32 *val2)
+{
+ u8 bp[2];
+ int rlen;
+
+ if (len != 2)
+ return -EINVAL;
+
+ /* Read the requested video link pattern (Byte 0x221). */
+ rlen = drm_dp_dpcd_read(link->aux, addr, bp, len);
+ if (rlen < len) {
+ DRM_ERROR("failed to read 0x%x\n", addr);
+ return -EINVAL;
+ }
+
+ *val1 = (bp[0] & BIT(7)) >> 7;
+ *val2 = bp[1] | ((bp[0] & 0x7F) << 8);
+
+ return 0;
+}
+
+static int dp_link_parse_timing_params3(struct dp_link_private *link,
+ int addr, u32 *val)
+{
+ u8 bp;
+ u32 len = 1;
+ int rlen;
+
+ rlen = drm_dp_dpcd_read(link->aux, addr, &bp, len);
+ if (rlen < 1) {
+ DRM_ERROR("failed to read 0x%x\n", addr);
+ return -EINVAL;
+ }
+ *val = bp;
+
+ return 0;
+}
+
+/**
+ * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the video link pattern and the link
+ * bit depth requested by the sink and, and if the values parsed are valid.
+ */
+static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
+{
+ int ret = 0;
+ ssize_t rlen;
+ u8 bp;
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_PATTERN, &bp);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read link video pattern. rlen=%zd\n",
+ rlen);
+ return rlen;
+ }
+
+ if (!dp_link_is_video_pattern_valid(bp)) {
+ DRM_ERROR("invalid link video pattern = 0x%x\n", bp);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ link->dp_link.test_video.test_video_pattern = bp;
+
+ /* Read the requested color bit depth and dynamic range (Byte 0x232) */
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read link bit depth. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ /* Dynamic Range */
+ link->dp_link.test_video.test_dyn_range =
+ (bp & DP_TEST_DYNAMIC_RANGE_CEA);
+
+ /* Color bit depth */
+ bp &= DP_TEST_BIT_DEPTH_MASK;
+ if (!dp_link_is_bit_depth_valid(bp)) {
+ DRM_ERROR("invalid link bit depth = 0x%x\n", bp);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ link->dp_link.test_video.test_bit_depth = bp;
+
+ /* resolution timing params */
+ ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2,
+ &link->dp_link.test_video.test_h_total);
+ if (ret) {
+ DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2,
+ &link->dp_link.test_video.test_v_total);
+ if (ret) {
+ DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2,
+ &link->dp_link.test_video.test_h_start);
+ if (ret) {
+ DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2,
+ &link->dp_link.test_video.test_v_start);
+ if (ret) {
+ DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2,
+ &link->dp_link.test_video.test_hsync_pol,
+ &link->dp_link.test_video.test_hsync_width);
+ if (ret) {
+ DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2,
+ &link->dp_link.test_video.test_vsync_pol,
+ &link->dp_link.test_video.test_vsync_width);
+ if (ret) {
+ DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2,
+ &link->dp_link.test_video.test_h_width);
+ if (ret) {
+ DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2,
+ &link->dp_link.test_video.test_v_height);
+ if (ret) {
+ DRM_ERROR("failed to parse test_v_height\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1,
+ &link->dp_link.test_video.test_rr_d);
+ link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR;
+ if (ret) {
+ DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR,
+ &link->dp_link.test_video.test_rr_n);
+ if (ret) {
+ DRM_ERROR("failed to parse test_rr_n\n");
+ return ret;
+ }
+
+ DRM_DEBUG_DP("link video pattern = 0x%x\n"
+ "link dynamic range = 0x%x\n"
+ "link bit depth = 0x%x\n"
+ "TEST_H_TOTAL = %d, TEST_V_TOTAL = %d\n"
+ "TEST_H_START = %d, TEST_V_START = %d\n"
+ "TEST_HSYNC_POL = %d\n"
+ "TEST_HSYNC_WIDTH = %d\n"
+ "TEST_VSYNC_POL = %d\n"
+ "TEST_VSYNC_WIDTH = %d\n"
+ "TEST_H_WIDTH = %d\n"
+ "TEST_V_HEIGHT = %d\n"
+ "TEST_REFRESH_DENOMINATOR = %d\n"
+ "TEST_REFRESH_NUMERATOR = %d\n",
+ link->dp_link.test_video.test_video_pattern,
+ link->dp_link.test_video.test_dyn_range,
+ link->dp_link.test_video.test_bit_depth,
+ link->dp_link.test_video.test_h_total,
+ link->dp_link.test_video.test_v_total,
+ link->dp_link.test_video.test_h_start,
+ link->dp_link.test_video.test_v_start,
+ link->dp_link.test_video.test_hsync_pol,
+ link->dp_link.test_video.test_hsync_width,
+ link->dp_link.test_video.test_vsync_pol,
+ link->dp_link.test_video.test_vsync_width,
+ link->dp_link.test_video.test_h_width,
+ link->dp_link.test_video.test_v_height,
+ link->dp_link.test_video.test_rr_d,
+ link->dp_link.test_video.test_rr_n);
+
+ return ret;
+}
+
+/**
+ * dp_link_parse_link_training_params() - parses link training parameters from
+ * DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
+ * count (Byte 0x220), and if these values parse are valid.
+ */
+static int dp_link_parse_link_training_params(struct dp_link_private *link)
+{
+ u8 bp;
+ ssize_t rlen;
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LINK_RATE, &bp);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read link rate. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ if (!is_link_rate_valid(bp)) {
+ DRM_ERROR("invalid link rate = 0x%x\n", bp);
+ return -EINVAL;
+ }
+
+ link->request.test_link_rate = bp;
+ DRM_DEBUG_DP("link rate = 0x%x\n", link->request.test_link_rate);
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LANE_COUNT, &bp);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read lane count. rlen=%zd\n", rlen);
+ return rlen;
+ }
+ bp &= DP_MAX_LANE_COUNT_MASK;
+
+ if (!is_lane_count_valid(bp)) {
+ DRM_ERROR("invalid lane count = 0x%x\n", bp);
+ return -EINVAL;
+ }
+
+ link->request.test_lane_count = bp;
+ DRM_DEBUG_DP("lane count = 0x%x\n", link->request.test_lane_count);
+ return 0;
+}
+
+/**
+ * dp_parse_phy_test_params() - parses the phy link parameters
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
+ * requested.
+ */
+static int dp_link_parse_phy_test_params(struct dp_link_private *link)
+{
+ u8 data;
+ ssize_t rlen;
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_PHY_TEST_PATTERN,
+ &data);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read phy link pattern. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07;
+
+ DRM_DEBUG_DP("phy_test_pattern_sel = 0x%x\n", data);
+
+ switch (data) {
+ case DP_PHY_TEST_PATTERN_SEL_MASK:
+ case DP_PHY_TEST_PATTERN_NONE:
+ case DP_PHY_TEST_PATTERN_D10_2:
+ case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ case DP_PHY_TEST_PATTERN_PRBS7:
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ case DP_PHY_TEST_PATTERN_CP2520:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * dp_link_is_video_audio_test_requested() - checks for audio/video link request
+ * @link: link requested by the sink
+ *
+ * Returns true if the requested link is a permitted audio/video link.
+ */
+static bool dp_link_is_video_audio_test_requested(u32 link)
+{
+ u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN |
+ DP_TEST_LINK_AUDIO_PATTERN |
+ DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
+
+ return ((link & video_audio_test) &&
+ !(link & ~video_audio_test));
+}
+
+/**
+ * dp_link_parse_request() - parses link request parameters from sink
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD to check if an automated link is requested (Byte 0x201),
+ * and what type of link automation is being requested (Byte 0x218).
+ */
+static int dp_link_parse_request(struct dp_link_private *link)
+{
+ int ret = 0;
+ u8 data;
+ ssize_t rlen;
+
+ /**
+ * Read the device service IRQ vector (Byte 0x201) to determine
+ * whether an automated link has been requested by the sink.
+ */
+ rlen = drm_dp_dpcd_readb(link->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR, &data);
+ if (rlen < 0) {
+ DRM_ERROR("aux read failed. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ DRM_DEBUG_DP("device service irq vector = 0x%x\n", data);
+
+ if (!(data & DP_AUTOMATED_TEST_REQUEST)) {
+ DRM_DEBUG_DP("no test requested\n");
+ return 0;
+ }
+
+ /**
+ * Read the link request byte (Byte 0x218) to determine what type
+ * of automated link has been requested by the sink.
+ */
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_REQUEST, &data);
+ if (rlen < 0) {
+ DRM_ERROR("aux read failed. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ if (!data || (data == DP_TEST_LINK_FAUX_PATTERN)) {
+ DRM_DEBUG_DP("link 0x%x not supported\n", data);
+ goto end;
+ }
+
+ DRM_DEBUG_DP("Test:(0x%x) requested\n", data);
+ link->request.test_requested = data;
+ if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) {
+ ret = dp_link_parse_phy_test_params(link);
+ if (ret)
+ goto end;
+ ret = dp_link_parse_link_training_params(link);
+ if (ret)
+ goto end;
+ }
+
+ if (link->request.test_requested == DP_TEST_LINK_TRAINING) {
+ ret = dp_link_parse_link_training_params(link);
+ if (ret)
+ goto end;
+ }
+
+ if (dp_link_is_video_audio_test_requested(
+ link->request.test_requested)) {
+ ret = dp_link_parse_video_pattern_params(link);
+ if (ret)
+ goto end;
+
+ ret = dp_link_parse_audio_pattern_params(link);
+ }
+end:
+ /*
+ * Send a DP_TEST_ACK if all link parameters are valid, otherwise send
+ * a DP_TEST_NAK.
+ */
+ if (ret) {
+ link->dp_link.test_response = DP_TEST_NAK;
+ } else {
+ if (link->request.test_requested != DP_TEST_LINK_EDID_READ)
+ link->dp_link.test_response = DP_TEST_ACK;
+ else
+ link->dp_link.test_response =
+ DP_TEST_EDID_CHECKSUM_WRITE;
+ }
+
+ return ret;
+}
+
+/**
+ * dp_link_parse_sink_count() - parses the sink count
+ * @dp_link: pointer to link module data
+ *
+ * Parses the DPCD to check if there is an update to the sink count
+ * (Byte 0x200), and whether all the sink devices connected have Content
+ * Protection enabled.
+ */
+static int dp_link_parse_sink_count(struct dp_link *dp_link)
+{
+ ssize_t rlen;
+ bool cp_ready;
+
+ struct dp_link_private *link = container_of(dp_link,
+ struct dp_link_private, dp_link);
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_SINK_COUNT,
+ &link->dp_link.sink_count);
+ if (rlen < 0) {
+ DRM_ERROR("sink count read failed. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ cp_ready = link->dp_link.sink_count & DP_SINK_CP_READY;
+
+ link->dp_link.sink_count =
+ DP_GET_SINK_COUNT(link->dp_link.sink_count);
+
+ DRM_DEBUG_DP("sink_count = 0x%x, cp_ready = 0x%x\n",
+ link->dp_link.sink_count, cp_ready);
+ return 0;
+}
+
+static void dp_link_parse_sink_status_field(struct dp_link_private *link)
+{
+ int len = 0;
+
+ link->prev_sink_count = link->dp_link.sink_count;
+ dp_link_parse_sink_count(&link->dp_link);
+
+ len = drm_dp_dpcd_read_link_status(link->aux,
+ link->link_status);
+ if (len < DP_LINK_STATUS_SIZE)
+ DRM_ERROR("DP link status read failed\n");
+ dp_link_parse_request(link);
+}
+
+/**
+ * dp_link_process_link_training_request() - processes new training requests
+ * @link: Display Port link data
+ *
+ * This function will handle new link training requests that are initiated by
+ * the sink. In particular, it will update the requested lane count and link
+ * rate, and then trigger the link retraining procedure.
+ *
+ * The function will return 0 if a link training request has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_training_request(struct dp_link_private *link)
+{
+ if (link->request.test_requested != DP_TEST_LINK_TRAINING)
+ return -EINVAL;
+
+ DRM_DEBUG_DP("Test:0x%x link rate = 0x%x, lane count = 0x%x\n",
+ DP_TEST_LINK_TRAINING,
+ link->request.test_link_rate,
+ link->request.test_lane_count);
+
+ link->dp_link.link_params.num_lanes = link->request.test_lane_count;
+ link->dp_link.link_params.rate = link->request.test_link_rate;
+
+ return 0;
+}
+
+bool dp_link_send_test_response(struct dp_link *dp_link)
+{
+ struct dp_link_private *link = NULL;
+ int ret = 0;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid input\n");
+ return false;
+ }
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE,
+ dp_link->test_response);
+
+ return ret == 1;
+}
+
+int dp_link_psm_config(struct dp_link *dp_link,
+ struct dp_link_info *link_info, bool enable)
+{
+ struct dp_link_private *link = NULL;
+ int ret = 0;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ mutex_lock(&link->psm_mutex);
+ if (enable)
+ ret = dp_aux_link_power_down(link->aux, link_info);
+ else
+ ret = dp_aux_link_power_up(link->aux, link_info);
+
+ if (ret)
+ DRM_ERROR("Failed to %s low power mode\n", enable ?
+ "enter" : "exit");
+ else
+ dp_link->psm_enabled = enable;
+
+ mutex_unlock(&link->psm_mutex);
+ return ret;
+}
+
+bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum)
+{
+ struct dp_link_private *link = NULL;
+ int ret = 0;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid input\n");
+ return false;
+ }
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM,
+ checksum);
+ return ret == 1;
+}
+
+static int dp_link_parse_vx_px(struct dp_link_private *link)
+{
+ int ret = 0;
+
+ DRM_DEBUG_DP("vx: 0=%d, 1=%d, 2=%d, 3=%d\n",
+ drm_dp_get_adjust_request_voltage(link->link_status, 0),
+ drm_dp_get_adjust_request_voltage(link->link_status, 1),
+ drm_dp_get_adjust_request_voltage(link->link_status, 2),
+ drm_dp_get_adjust_request_voltage(link->link_status, 3));
+
+ DRM_DEBUG_DP("px: 0=%d, 1=%d, 2=%d, 3=%d\n",
+ drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0),
+ drm_dp_get_adjust_request_pre_emphasis(link->link_status, 1),
+ drm_dp_get_adjust_request_pre_emphasis(link->link_status, 2),
+ drm_dp_get_adjust_request_pre_emphasis(link->link_status, 3));
+
+ /**
+ * Update the voltage and pre-emphasis levels as per DPCD request
+ * vector.
+ */
+ DRM_DEBUG_DP("Current: v_level = 0x%x, p_level = 0x%x\n",
+ link->dp_link.phy_params.v_level,
+ link->dp_link.phy_params.p_level);
+ link->dp_link.phy_params.v_level =
+ drm_dp_get_adjust_request_voltage(link->link_status, 0);
+ link->dp_link.phy_params.p_level =
+ drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0);
+ DRM_DEBUG_DP("Requested: v_level = 0x%x, p_level = 0x%x\n",
+ link->dp_link.phy_params.v_level,
+ link->dp_link.phy_params.p_level);
+
+ return ret;
+}
+
+/**
+ * dp_link_process_phy_test_pattern_request() - process new phy link requests
+ * @link: Display Port Driver data
+ *
+ * This function will handle new phy link pattern requests that are initiated
+ * by the sink. The function will return 0 if a phy link pattern has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_phy_test_pattern_request(
+ struct dp_link_private *link)
+{
+ int ret = 0;
+
+ if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) {
+ DRM_DEBUG_DP("no phy test\n");
+ return -EINVAL;
+ }
+
+ if (!is_link_rate_valid(link->request.test_link_rate) ||
+ !is_lane_count_valid(link->request.test_lane_count)) {
+ DRM_ERROR("Invalid: link rate = 0x%x,lane count = 0x%x\n",
+ link->request.test_link_rate,
+ link->request.test_lane_count);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_DP("Current: rate = 0x%x, lane count = 0x%x\n",
+ link->dp_link.link_params.rate,
+ link->dp_link.link_params.num_lanes);
+
+ DRM_DEBUG_DP("Requested: rate = 0x%x, lane count = 0x%x\n",
+ link->request.test_link_rate,
+ link->request.test_lane_count);
+
+ link->dp_link.link_params.num_lanes = link->request.test_lane_count;
+ link->dp_link.link_params.rate = link->request.test_link_rate;
+
+ ret = dp_link_parse_vx_px(link);
+
+ if (ret)
+ DRM_ERROR("parse_vx_px failed. ret=%d\n", ret);
+
+ return ret;
+}
+
+static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+/**
+ * dp_link_process_link_status_update() - processes link status updates
+ * @link: Display Port link module data
+ *
+ * This function will check for changes in the link status, e.g. clock
+ * recovery done on all lanes, and trigger link training if there is a
+ * failure/error on the link.
+ *
+ * The function will return 0 if the a link status update has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_status_update(struct dp_link_private *link)
+{
+ if (!(get_link_status(link->link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED) &
+ DP_LINK_STATUS_UPDATED) ||
+ (drm_dp_clock_recovery_ok(link->link_status,
+ link->dp_link.link_params.num_lanes) &&
+ drm_dp_channel_eq_ok(link->link_status,
+ link->dp_link.link_params.num_lanes)))
+ return -EINVAL;
+
+ DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n",
+ drm_dp_clock_recovery_ok(link->link_status,
+ link->dp_link.link_params.num_lanes),
+ drm_dp_clock_recovery_ok(link->link_status,
+ link->dp_link.link_params.num_lanes));
+
+ return 0;
+}
+
+/**
+ * dp_link_process_downstream_port_status_change() - process port status changes
+ * @link: Display Port Driver data
+ *
+ * This function will handle downstream port updates that are initiated by
+ * the sink. If the downstream port status has changed, the EDID is read via
+ * AUX.
+ *
+ * The function will return 0 if a downstream port update has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_ds_port_status_change(struct dp_link_private *link)
+{
+ if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) &
+ DP_DOWNSTREAM_PORT_STATUS_CHANGED)
+ goto reset;
+
+ if (link->prev_sink_count == link->dp_link.sink_count)
+ return -EINVAL;
+
+reset:
+ /* reset prev_sink_count */
+ link->prev_sink_count = link->dp_link.sink_count;
+
+ return 0;
+}
+
+static bool dp_link_is_video_pattern_requested(struct dp_link_private *link)
+{
+ return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN)
+ && !(link->request.test_requested &
+ DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
+}
+
+static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link)
+{
+ return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN);
+}
+
+static void dp_link_reset_data(struct dp_link_private *link)
+{
+ link->request = (const struct dp_link_request){ 0 };
+ link->dp_link.test_video = (const struct dp_link_test_video){ 0 };
+ link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
+ link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 };
+ link->dp_link.phy_params.phy_test_pattern_sel = 0;
+ link->dp_link.sink_request = 0;
+ link->dp_link.test_response = 0;
+}
+
+/**
+ * dp_link_process_request() - handle HPD IRQ transition to HIGH
+ * @dp_link: pointer to link module data
+ *
+ * This function will handle the HPD IRQ state transitions from LOW to HIGH
+ * (including cases when there are back to back HPD IRQ HIGH) indicating
+ * the start of a new link training request or sink status update.
+ */
+int dp_link_process_request(struct dp_link *dp_link)
+{
+ int ret = 0;
+ struct dp_link_private *link;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ dp_link_reset_data(link);
+
+ dp_link_parse_sink_status_field(link);
+
+ if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
+ dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
+ return ret;
+ }
+
+ ret = dp_link_process_ds_port_status_change(link);
+ if (!ret) {
+ dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
+ return ret;
+ }
+
+ ret = dp_link_process_link_training_request(link);
+ if (!ret) {
+ dp_link->sink_request |= DP_TEST_LINK_TRAINING;
+ return ret;
+ }
+
+ ret = dp_link_process_phy_test_pattern_request(link);
+ if (!ret) {
+ dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
+ return ret;
+ }
+
+ ret = dp_link_process_link_status_update(link);
+ if (!ret) {
+ dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
+ return ret;
+ }
+
+ if (dp_link_is_video_pattern_requested(link)) {
+ ret = 0;
+ dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
+ }
+
+ if (dp_link_is_audio_pattern_requested(link)) {
+ dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+{
+ u32 cc;
+ struct dp_link_private *link;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ /*
+ * Unless a video pattern CTS test is ongoing, use RGB_VESA
+ * Only RGB_VESA and RGB_CEA supported for now
+ */
+ if (dp_link_is_video_pattern_requested(link))
+ cc = link->dp_link.test_video.test_dyn_range;
+ else
+ cc = DP_TEST_DYNAMIC_RANGE_VESA;
+
+ return cc;
+}
+
+int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+{
+ int i;
+ int v_max = 0, p_max = 0;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ /* use the max level across lanes */
+ for (i = 0; i < dp_link->link_params.num_lanes; i++) {
+ u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i);
+ u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status,
+ i);
+ DRM_DEBUG_DP("lane=%d req_vol_swing=%d req_pre_emphasis=%d\n",
+ i, data_v, data_p);
+ if (v_max < data_v)
+ v_max = data_v;
+ if (p_max < data_p)
+ p_max = data_p;
+ }
+
+ dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ /**
+ * Adjust the voltage swing and pre-emphasis level combination to within
+ * the allowable range.
+ */
+ if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) {
+ DRM_DEBUG_DP("Requested vSwingLevel=%d, change to %d\n",
+ dp_link->phy_params.v_level,
+ DP_TRAIN_VOLTAGE_SWING_MAX);
+ dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX;
+ }
+
+ if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) {
+ DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n",
+ dp_link->phy_params.p_level,
+ DP_TRAIN_PRE_EMPHASIS_MAX);
+ dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX;
+ }
+
+ if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1)
+ && (dp_link->phy_params.v_level ==
+ DP_TRAIN_VOLTAGE_SWING_LVL_2)) {
+ DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n",
+ dp_link->phy_params.p_level,
+ DP_TRAIN_PRE_EMPHASIS_LVL_1);
+ dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1;
+ }
+
+ DRM_DEBUG_DP("adjusted: v_level=%d, p_level=%d\n",
+ dp_link->phy_params.v_level, dp_link->phy_params.p_level);
+
+ return 0;
+}
+
+u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+{
+ u32 tbd;
+
+ /*
+ * Few simplistic rules and assumptions made here:
+ * 1. Test bit depth is bit depth per color component
+ * 2. Assume 3 color components
+ */
+ switch (bpp) {
+ case 18:
+ tbd = DP_TEST_BIT_DEPTH_6;
+ break;
+ case 24:
+ tbd = DP_TEST_BIT_DEPTH_8;
+ break;
+ case 30:
+ tbd = DP_TEST_BIT_DEPTH_10;
+ break;
+ default:
+ tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+ break;
+ }
+
+ if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
+ tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+
+ return tbd;
+}
+
+struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux)
+{
+ struct dp_link_private *link;
+ struct dp_link *dp_link;
+
+ if (!dev || !aux) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return ERR_PTR(-ENOMEM);
+
+ link->dev = dev;
+ link->aux = aux;
+
+ mutex_init(&link->psm_mutex);
+ dp_link = &link->dp_link;
+
+ return dp_link;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
new file mode 100644
index 000000000000..49811b6221e5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_LINK_H_
+#define _DP_LINK_H_
+
+#include "dp_aux.h"
+
+#define DS_PORT_STATUS_CHANGED 0x200
+#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
+#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
+
+struct dp_link_info {
+ unsigned char revision;
+ unsigned int rate;
+ unsigned int num_lanes;
+ unsigned long capabilities;
+};
+
+enum dp_link_voltage_level {
+ DP_TRAIN_VOLTAGE_SWING_LVL_0 = 0,
+ DP_TRAIN_VOLTAGE_SWING_LVL_1 = 1,
+ DP_TRAIN_VOLTAGE_SWING_LVL_2 = 2,
+ DP_TRAIN_VOLTAGE_SWING_MAX = DP_TRAIN_VOLTAGE_SWING_LVL_2,
+};
+
+enum dp_link_preemaphasis_level {
+ DP_TRAIN_PRE_EMPHASIS_LVL_0 = 0,
+ DP_TRAIN_PRE_EMPHASIS_LVL_1 = 1,
+ DP_TRAIN_PRE_EMPHASIS_LVL_2 = 2,
+ DP_TRAIN_PRE_EMPHASIS_MAX = DP_TRAIN_PRE_EMPHASIS_LVL_2,
+};
+
+struct dp_link_test_video {
+ u32 test_video_pattern;
+ u32 test_bit_depth;
+ u32 test_dyn_range;
+ u32 test_h_total;
+ u32 test_v_total;
+ u32 test_h_start;
+ u32 test_v_start;
+ u32 test_hsync_pol;
+ u32 test_hsync_width;
+ u32 test_vsync_pol;
+ u32 test_vsync_width;
+ u32 test_h_width;
+ u32 test_v_height;
+ u32 test_rr_d;
+ u32 test_rr_n;
+};
+
+struct dp_link_test_audio {
+ u32 test_audio_sampling_rate;
+ u32 test_audio_channel_count;
+ u32 test_audio_pattern_type;
+ u32 test_audio_period_ch_1;
+ u32 test_audio_period_ch_2;
+ u32 test_audio_period_ch_3;
+ u32 test_audio_period_ch_4;
+ u32 test_audio_period_ch_5;
+ u32 test_audio_period_ch_6;
+ u32 test_audio_period_ch_7;
+ u32 test_audio_period_ch_8;
+};
+
+struct dp_link_phy_params {
+ u32 phy_test_pattern_sel;
+ u8 v_level;
+ u8 p_level;
+};
+
+struct dp_link {
+ u32 sink_request;
+ u32 test_response;
+ bool psm_enabled;
+
+ u8 sink_count;
+ struct dp_link_test_video test_video;
+ struct dp_link_test_audio test_audio;
+ struct dp_link_phy_params phy_params;
+ struct dp_link_info link_params;
+};
+
+/**
+ * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
+ * @tbd: test bit depth
+ *
+ * Returns the bits per pixel (bpp) to be used corresponding to the
+ * git bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
+{
+ /*
+ * Few simplistic rules and assumptions made here:
+ * 1. Bit depth is per color component
+ * 2. If bit depth is unknown return 0
+ * 3. Assume 3 color components
+ */
+ switch (tbd) {
+ case DP_TEST_BIT_DEPTH_6:
+ return 18;
+ case DP_TEST_BIT_DEPTH_8:
+ return 24;
+ case DP_TEST_BIT_DEPTH_10:
+ return 30;
+ case DP_TEST_BIT_DEPTH_UNKNOWN:
+ default:
+ return 0;
+ }
+}
+
+/**
+ * dp_test_bit_depth_to_bpc() - convert test bit depth to bpc
+ * @tbd: test bit depth
+ *
+ * Returns the bits per comp (bpc) to be used corresponding to the
+ * bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpc(u32 tbd)
+{
+ switch (tbd) {
+ case DP_TEST_BIT_DEPTH_6:
+ return 6;
+ case DP_TEST_BIT_DEPTH_8:
+ return 8;
+ case DP_TEST_BIT_DEPTH_10:
+ return 10;
+ case DP_TEST_BIT_DEPTH_UNKNOWN:
+ default:
+ return 0;
+ }
+}
+
+u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp);
+int dp_link_process_request(struct dp_link *dp_link);
+int dp_link_get_colorimetry_config(struct dp_link *dp_link);
+int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status);
+bool dp_link_send_test_response(struct dp_link *dp_link);
+int dp_link_psm_config(struct dp_link *dp_link,
+ struct dp_link_info *link_info, bool enable);
+bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum);
+
+/**
+ * dp_link_get() - get the functionalities of dp test module
+ *
+ *
+ * return: a pointer to dp_link struct
+ */
+struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux);
+
+#endif /* _DP_LINK_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
new file mode 100644
index 000000000000..18cec4fc5e0b
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "dp_panel.h"
+
+#include <drm/drm_connector.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+
+struct dp_panel_private {
+ struct device *dev;
+ struct dp_panel dp_panel;
+ struct drm_dp_aux *aux;
+ struct dp_link *link;
+ struct dp_catalog *catalog;
+ bool panel_on;
+ bool aux_cfg_update_done;
+};
+
+static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+{
+ int rc = 0;
+ size_t len;
+ ssize_t rlen;
+ struct dp_panel_private *panel;
+ struct dp_link_info *link_info;
+ u8 *dpcd, major = 0, minor = 0, temp;
+ u32 offset = DP_DPCD_REV;
+
+ dpcd = dp_panel->dpcd;
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ link_info = &dp_panel->link_info;
+
+ rlen = drm_dp_dpcd_read(panel->aux, offset,
+ dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+ if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
+ DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
+ if (rlen == -ETIMEDOUT)
+ rc = rlen;
+ else
+ rc = -EINVAL;
+
+ goto end;
+ }
+
+ temp = dpcd[DP_TRAINING_AUX_RD_INTERVAL];
+
+ /* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */
+ if (temp & BIT(7)) {
+ DRM_DEBUG_DP("using EXTENDED_RECEIVER_CAPABILITY_FIELD\n");
+ offset = DPRX_EXTENDED_DPCD_FIELD;
+ }
+
+ rlen = drm_dp_dpcd_read(panel->aux, offset,
+ dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+ if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
+ DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
+ if (rlen == -ETIMEDOUT)
+ rc = rlen;
+ else
+ rc = -EINVAL;
+
+ goto end;
+ }
+
+ link_info->revision = dpcd[DP_DPCD_REV];
+ major = (link_info->revision >> 4) & 0x0f;
+ minor = link_info->revision & 0x0f;
+
+ link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+ link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+ if (link_info->num_lanes > dp_panel->max_dp_lanes)
+ link_info->num_lanes = dp_panel->max_dp_lanes;
+
+ /* Limit support upto HBR2 until HBR3 support is added */
+ if (link_info->rate >= (drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4)))
+ link_info->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
+
+ DRM_DEBUG_DP("version: %d.%d\n", major, minor);
+ DRM_DEBUG_DP("link_rate=%d\n", link_info->rate);
+ DRM_DEBUG_DP("lane_count=%d\n", link_info->num_lanes);
+
+ if (drm_dp_enhanced_frame_cap(dpcd))
+ link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
+
+ dp_panel->dfp_present = dpcd[DP_DOWNSTREAMPORT_PRESENT];
+ dp_panel->dfp_present &= DP_DWN_STRM_PORT_PRESENT;
+
+ if (dp_panel->dfp_present && (dpcd[DP_DPCD_REV] > 0x10)) {
+ dp_panel->ds_port_cnt = dpcd[DP_DOWN_STREAM_PORT_COUNT];
+ dp_panel->ds_port_cnt &= DP_PORT_COUNT_MASK;
+ len = DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE;
+
+ rlen = drm_dp_dpcd_read(panel->aux,
+ DP_DOWNSTREAM_PORT_0, dp_panel->ds_cap_info, len);
+ if (rlen < len) {
+ DRM_ERROR("ds port status failed, rlen=%zd\n", rlen);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+end:
+ return rc;
+}
+
+static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
+ u32 mode_edid_bpp, u32 mode_pclk_khz)
+{
+ struct dp_link_info *link_info;
+ const u32 max_supported_bpp = 30, min_supported_bpp = 18;
+ u32 bpp = 0, data_rate_khz = 0;
+
+ bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
+
+ link_info = &dp_panel->link_info;
+ data_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+ while (bpp > min_supported_bpp) {
+ if (mode_pclk_khz * bpp <= data_rate_khz)
+ break;
+ bpp -= 6;
+ }
+
+ return bpp;
+}
+
+static int dp_panel_update_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int rc = 0;
+
+ if (edid) {
+ rc = drm_connector_update_edid_property(connector, edid);
+ if (rc) {
+ DRM_ERROR("failed to update edid property %d\n", rc);
+ return rc;
+ }
+ rc = drm_add_edid_modes(connector, edid);
+ DRM_DEBUG_DP("%s -", __func__);
+ return rc;
+ }
+
+ rc = drm_connector_update_edid_property(connector, NULL);
+ if (rc)
+ DRM_ERROR("failed to update edid property %d\n", rc);
+
+ return rc;
+}
+
+int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+ struct drm_connector *connector)
+{
+ int rc = 0, bw_code;
+ int rlen, count;
+ struct dp_panel_private *panel;
+
+ if (!dp_panel || !connector) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ rc = dp_panel_read_dpcd(dp_panel);
+ bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
+ if (rc || !is_link_rate_valid(bw_code) ||
+ !is_lane_count_valid(dp_panel->link_info.num_lanes) ||
+ (bw_code > dp_panel->max_bw_code)) {
+ DRM_ERROR("read dpcd failed %d\n", rc);
+ return rc;
+ }
+
+ if (dp_panel->dfp_present) {
+ rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT,
+ &count, 1);
+ if (rlen == 1) {
+ count = DP_GET_SINK_COUNT(count);
+ if (!count) {
+ DRM_ERROR("no downstream ports connected\n");
+ panel->link->sink_count = 0;
+ rc = -ENOTCONN;
+ goto end;
+ }
+ }
+ }
+
+ kfree(dp_panel->edid);
+ dp_panel->edid = NULL;
+
+ dp_panel->edid = drm_get_edid(connector,
+ &panel->aux->ddc);
+ if (!dp_panel->edid) {
+ DRM_ERROR("panel edid read failed\n");
+
+ /* fail safe edid */
+ mutex_lock(&connector->dev->mode_config.mutex);
+ if (drm_add_modes_noedid(connector, 640, 480))
+ drm_set_preferred_mode(connector, 640, 480);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+ }
+
+ if (panel->aux_cfg_update_done) {
+ DRM_DEBUG_DP("read DPCD with updated AUX config\n");
+ rc = dp_panel_read_dpcd(dp_panel);
+ bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
+ if (rc || !is_link_rate_valid(bw_code) ||
+ !is_lane_count_valid(dp_panel->link_info.num_lanes)
+ || (bw_code > dp_panel->max_bw_code)) {
+ DRM_ERROR("read dpcd failed %d\n", rc);
+ return rc;
+ }
+ panel->aux_cfg_update_done = false;
+ }
+end:
+ return rc;
+}
+
+u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
+ u32 mode_edid_bpp, u32 mode_pclk_khz)
+{
+ struct dp_panel_private *panel;
+ u32 bpp = mode_edid_bpp;
+
+ if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
+ DRM_ERROR("invalid input\n");
+ return 0;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ if (dp_panel->video_test)
+ bpp = dp_link_bit_depth_to_bpp(
+ panel->link->test_video.test_bit_depth);
+ else
+ bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
+ mode_pclk_khz);
+
+ return bpp;
+}
+
+int dp_panel_get_modes(struct dp_panel *dp_panel,
+ struct drm_connector *connector, struct dp_display_mode *mode)
+{
+ if (!dp_panel) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (dp_panel->edid)
+ return dp_panel_update_modes(connector, dp_panel->edid);
+
+ return 0;
+}
+
+static u8 dp_panel_get_edid_checksum(struct edid *edid)
+{
+ struct edid *last_block;
+ u8 *raw_edid;
+ bool is_edid_corrupt;
+
+ if (!edid) {
+ DRM_ERROR("invalid edid input\n");
+ return 0;
+ }
+
+ raw_edid = (u8 *)edid;
+ raw_edid += (edid->extensions * EDID_LENGTH);
+ last_block = (struct edid *)raw_edid;
+
+ /* block type extension */
+ drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
+ if (!is_edid_corrupt)
+ return last_block->checksum;
+
+ DRM_ERROR("Invalid block, no checksum\n");
+ return 0;
+}
+
+void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+{
+ struct dp_panel_private *panel;
+
+ if (!dp_panel) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
+ u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid);
+
+ dp_link_send_edid_checksum(panel->link, checksum);
+ dp_link_send_test_response(panel->link);
+ }
+}
+
+void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
+{
+ struct dp_catalog *catalog;
+ struct dp_panel_private *panel;
+
+ if (!dp_panel) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ catalog = panel->catalog;
+
+ if (!panel->panel_on) {
+ DRM_DEBUG_DP("DP panel not enabled, handle TPG on next on\n");
+ return;
+ }
+
+ if (!enable) {
+ dp_catalog_panel_tpg_disable(catalog);
+ return;
+ }
+
+ DRM_DEBUG_DP("%s: calling catalog tpg_enable\n", __func__);
+ dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode);
+}
+
+void dp_panel_dump_regs(struct dp_panel *dp_panel)
+{
+ struct dp_catalog *catalog;
+ struct dp_panel_private *panel;
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ catalog = panel->catalog;
+
+ dp_catalog_dump_regs(catalog);
+}
+
+int dp_panel_timing_cfg(struct dp_panel *dp_panel)
+{
+ int rc = 0;
+ u32 data, total_ver, total_hor;
+ struct dp_catalog *catalog;
+ struct dp_panel_private *panel;
+ struct drm_display_mode *drm_mode;
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ catalog = panel->catalog;
+ drm_mode = &panel->dp_panel.dp_mode.drm_mode;
+
+ DRM_DEBUG_DP("width=%d hporch= %d %d %d\n",
+ drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end,
+ drm_mode->hsync_start - drm_mode->hdisplay,
+ drm_mode->hsync_end - drm_mode->hsync_start);
+
+ DRM_DEBUG_DP("height=%d vporch= %d %d %d\n",
+ drm_mode->vdisplay, drm_mode->vtotal - drm_mode->vsync_end,
+ drm_mode->vsync_start - drm_mode->vdisplay,
+ drm_mode->vsync_end - drm_mode->vsync_start);
+
+ total_hor = drm_mode->htotal;
+
+ total_ver = drm_mode->vtotal;
+
+ data = total_ver;
+ data <<= 16;
+ data |= total_hor;
+
+ catalog->total = data;
+
+ data = (drm_mode->vtotal - drm_mode->vsync_start);
+ data <<= 16;
+ data |= (drm_mode->htotal - drm_mode->hsync_start);
+
+ catalog->sync_start = data;
+
+ data = drm_mode->vsync_end - drm_mode->vsync_start;
+ data <<= 16;
+ data |= (panel->dp_panel.dp_mode.v_active_low << 31);
+ data |= drm_mode->hsync_end - drm_mode->hsync_start;
+ data |= (panel->dp_panel.dp_mode.h_active_low << 15);
+
+ catalog->width_blanking = data;
+
+ data = drm_mode->vdisplay;
+ data <<= 16;
+ data |= drm_mode->hdisplay;
+
+ catalog->dp_active = data;
+
+ dp_catalog_panel_timing_cfg(catalog);
+ panel->panel_on = true;
+
+ return rc;
+}
+
+int dp_panel_init_panel_info(struct dp_panel *dp_panel)
+{
+ int rc = 0;
+ struct drm_display_mode *drm_mode;
+
+ drm_mode = &dp_panel->dp_mode.drm_mode;
+
+ /*
+ * print resolution info as this is a result
+ * of user initiated action of cable connection
+ */
+ DRM_DEBUG_DP("SET NEW RESOLUTION:\n");
+ DRM_DEBUG_DP("%dx%d@%dfps\n", drm_mode->hdisplay,
+ drm_mode->vdisplay, drm_mode_vrefresh(drm_mode));
+ DRM_DEBUG_DP("h_porches(back|front|width) = (%d|%d|%d)\n",
+ drm_mode->htotal - drm_mode->hsync_end,
+ drm_mode->hsync_start - drm_mode->hdisplay,
+ drm_mode->hsync_end - drm_mode->hsync_start);
+ DRM_DEBUG_DP("v_porches(back|front|width) = (%d|%d|%d)\n",
+ drm_mode->vtotal - drm_mode->vsync_end,
+ drm_mode->vsync_start - drm_mode->vdisplay,
+ drm_mode->vsync_end - drm_mode->vsync_start);
+ DRM_DEBUG_DP("pixel clock (KHz)=(%d)\n", drm_mode->clock);
+ DRM_DEBUG_DP("bpp = %d\n", dp_panel->dp_mode.bpp);
+
+ dp_panel->dp_mode.bpp = max_t(u32, 18,
+ min_t(u32, dp_panel->dp_mode.bpp, 30));
+ DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp);
+
+ return rc;
+}
+
+struct dp_panel *dp_panel_get(struct dp_panel_in *in)
+{
+ struct dp_panel_private *panel;
+ struct dp_panel *dp_panel;
+
+ if (!in->dev || !in->catalog || !in->aux || !in->link) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return ERR_PTR(-ENOMEM);
+
+ panel->dev = in->dev;
+ panel->aux = in->aux;
+ panel->catalog = in->catalog;
+ panel->link = in->link;
+
+ dp_panel = &panel->dp_panel;
+ dp_panel->max_bw_code = DP_LINK_BW_8_1;
+ panel->aux_cfg_update_done = false;
+
+ return dp_panel;
+}
+
+void dp_panel_put(struct dp_panel *dp_panel)
+{
+ if (!dp_panel)
+ return;
+
+ kfree(dp_panel->edid);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
new file mode 100644
index 000000000000..9023e5bb4b8b
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_PANEL_H_
+#define _DP_PANEL_H_
+
+#include <drm/msm_drm.h>
+
+#include "dp_aux.h"
+#include "dp_link.h"
+#include "dp_hpd.h"
+
+struct edid;
+
+#define DPRX_EXTENDED_DPCD_FIELD 0x2200
+
+#define DP_DOWNSTREAM_PORTS 4
+#define DP_DOWNSTREAM_CAP_SIZE 4
+
+struct dp_display_mode {
+ struct drm_display_mode drm_mode;
+ u32 capabilities;
+ u32 bpp;
+ u32 h_active_low;
+ u32 v_active_low;
+};
+
+struct dp_panel_in {
+ struct device *dev;
+ struct drm_dp_aux *aux;
+ struct dp_link *link;
+ struct dp_catalog *catalog;
+};
+
+struct dp_panel {
+ /* dpcd raw data */
+ u8 dpcd[DP_RECEIVER_CAP_SIZE + 1];
+ u8 ds_cap_info[DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE];
+ u32 ds_port_cnt;
+ u32 dfp_present;
+
+ struct dp_link_info link_info;
+ struct drm_dp_desc desc;
+ struct edid *edid;
+ struct drm_connector *connector;
+ struct dp_display_mode dp_mode;
+ bool video_test;
+
+ u32 vic;
+ u32 max_pclk_khz;
+ u32 max_dp_lanes;
+
+ u32 max_bw_code;
+};
+
+int dp_panel_init_panel_info(struct dp_panel *dp_panel);
+int dp_panel_deinit(struct dp_panel *dp_panel);
+int dp_panel_timing_cfg(struct dp_panel *dp_panel);
+void dp_panel_dump_regs(struct dp_panel *dp_panel);
+int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+ struct drm_connector *connector);
+u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
+ u32 mode_pclk_khz);
+int dp_panel_get_modes(struct dp_panel *dp_panel,
+ struct drm_connector *connector, struct dp_display_mode *mode);
+void dp_panel_handle_sink_request(struct dp_panel *dp_panel);
+void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable);
+
+/**
+ * is_link_rate_valid() - validates the link rate
+ * @lane_rate: link rate requested by the sink
+ *
+ * Returns true if the requested link rate is supported.
+ */
+static inline bool is_link_rate_valid(u32 bw_code)
+{
+ return (bw_code == DP_LINK_BW_1_62 ||
+ bw_code == DP_LINK_BW_2_7 ||
+ bw_code == DP_LINK_BW_5_4 ||
+ bw_code == DP_LINK_BW_8_1);
+}
+
+/**
+ * dp_link_is_lane_count_valid() - validates the lane count
+ * @lane_count: lane count requested by the sink
+ *
+ * Returns true if the requested lane count is supported.
+ */
+static inline bool is_lane_count_valid(u32 lane_count)
+{
+ return (lane_count == 1 ||
+ lane_count == 2 ||
+ lane_count == 4);
+}
+
+struct dp_panel *dp_panel_get(struct dp_panel_in *in);
+void dp_panel_put(struct dp_panel *dp_panel);
+#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
new file mode 100644
index 000000000000..0519dd3ac3c3
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
+
+#include <drm/drm_print.h>
+
+#include "dp_parser.h"
+#include "dp_reg.h"
+
+static const struct dp_regulator_cfg sdm845_dp_reg_cfg = {
+ .num = 2,
+ .regs = {
+ {"vdda-1p2", 21800, 4 }, /* 1.2 V */
+ {"vdda-0p9", 36000, 32 }, /* 0.9 V */
+ },
+};
+
+static int msm_dss_ioremap(struct platform_device *pdev,
+ struct dss_io_data *io_data)
+{
+ struct resource *res = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DRM_ERROR("%pS->%s: msm_dss_get_res failed\n",
+ __builtin_return_address(0), __func__);
+ return -ENODEV;
+ }
+
+ io_data->len = (u32)resource_size(res);
+ io_data->base = ioremap(res->start, io_data->len);
+ if (!io_data->base) {
+ DRM_ERROR("%pS->%s: ioremap failed\n",
+ __builtin_return_address(0), __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+ if (io_data->base) {
+ iounmap(io_data->base);
+ io_data->base = NULL;
+ }
+ io_data->len = 0;
+}
+
+static void dp_parser_unmap_io_resources(struct dp_parser *parser)
+{
+ struct dp_io *io = &parser->io;
+
+ msm_dss_iounmap(&io->dp_controller);
+}
+
+static int dp_parser_ctrl_res(struct dp_parser *parser)
+{
+ int rc = 0;
+ struct platform_device *pdev = parser->pdev;
+ struct dp_io *io = &parser->io;
+
+ rc = msm_dss_ioremap(pdev, &io->dp_controller);
+ if (rc) {
+ DRM_ERROR("unable to remap dp io resources, rc=%d\n", rc);
+ goto err;
+ }
+
+ io->phy = devm_phy_get(&pdev->dev, "dp");
+ if (IS_ERR(io->phy)) {
+ rc = PTR_ERR(io->phy);
+ goto err;
+ }
+
+ return 0;
+err:
+ dp_parser_unmap_io_resources(parser);
+ return rc;
+}
+
+static int dp_parser_misc(struct dp_parser *parser)
+{
+ struct device_node *of_node = parser->pdev->dev.of_node;
+ int len = 0;
+ const char *data_lane_property = "data-lanes";
+
+ len = of_property_count_elems_of_size(of_node,
+ data_lane_property, sizeof(u32));
+ if (len < 0) {
+ DRM_WARN("Invalid property %s, default max DP lanes = %d\n",
+ data_lane_property, DP_MAX_NUM_DP_LANES);
+ len = DP_MAX_NUM_DP_LANES;
+ }
+
+ parser->max_dp_lanes = len;
+ return 0;
+}
+
+static inline bool dp_parser_check_prefix(const char *clk_prefix,
+ const char *clk_name)
+{
+ return !strncmp(clk_prefix, clk_name, strlen(clk_prefix));
+}
+
+static int dp_parser_init_clk_data(struct dp_parser *parser)
+{
+ int num_clk, i, rc;
+ int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
+ const char *clk_name;
+ struct device *dev = &parser->pdev->dev;
+ struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
+ struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
+ struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
+
+ num_clk = of_property_count_strings(dev->of_node, "clock-names");
+ if (num_clk <= 0) {
+ DRM_ERROR("no clocks are defined\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_clk; i++) {
+ rc = of_property_read_string_index(dev->of_node,
+ "clock-names", i, &clk_name);
+ if (rc < 0)
+ return rc;
+
+ if (dp_parser_check_prefix("core", clk_name))
+ core_clk_count++;
+
+ if (dp_parser_check_prefix("ctrl", clk_name))
+ ctrl_clk_count++;
+
+ if (dp_parser_check_prefix("stream", clk_name))
+ stream_clk_count++;
+ }
+
+ /* Initialize the CORE power module */
+ if (core_clk_count == 0) {
+ DRM_ERROR("no core clocks are defined\n");
+ return -EINVAL;
+ }
+
+ core_power->num_clk = core_clk_count;
+ core_power->clk_config = devm_kzalloc(dev,
+ sizeof(struct dss_clk) * core_power->num_clk,
+ GFP_KERNEL);
+ if (!core_power->clk_config)
+ return -EINVAL;
+
+ /* Initialize the CTRL power module */
+ if (ctrl_clk_count == 0) {
+ DRM_ERROR("no ctrl clocks are defined\n");
+ return -EINVAL;
+ }
+
+ ctrl_power->num_clk = ctrl_clk_count;
+ ctrl_power->clk_config = devm_kzalloc(dev,
+ sizeof(struct dss_clk) * ctrl_power->num_clk,
+ GFP_KERNEL);
+ if (!ctrl_power->clk_config) {
+ ctrl_power->num_clk = 0;
+ return -EINVAL;
+ }
+
+ /* Initialize the STREAM power module */
+ if (stream_clk_count == 0) {
+ DRM_ERROR("no stream (pixel) clocks are defined\n");
+ return -EINVAL;
+ }
+
+ stream_power->num_clk = stream_clk_count;
+ stream_power->clk_config = devm_kzalloc(dev,
+ sizeof(struct dss_clk) * stream_power->num_clk,
+ GFP_KERNEL);
+ if (!stream_power->clk_config) {
+ stream_power->num_clk = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dp_parser_clock(struct dp_parser *parser)
+{
+ int rc = 0, i = 0;
+ int num_clk = 0;
+ int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0;
+ int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
+ const char *clk_name;
+ struct device *dev = &parser->pdev->dev;
+ struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
+ struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
+ struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
+
+ rc = dp_parser_init_clk_data(parser);
+ if (rc) {
+ DRM_ERROR("failed to initialize power data %d\n", rc);
+ return -EINVAL;
+ }
+
+ core_clk_count = core_power->num_clk;
+ ctrl_clk_count = ctrl_power->num_clk;
+ stream_clk_count = stream_power->num_clk;
+
+ num_clk = core_clk_count + ctrl_clk_count + stream_clk_count;
+
+ for (i = 0; i < num_clk; i++) {
+ rc = of_property_read_string_index(dev->of_node, "clock-names",
+ i, &clk_name);
+ if (rc) {
+ DRM_ERROR("error reading clock-names %d\n", rc);
+ return rc;
+ }
+ if (dp_parser_check_prefix("core", clk_name) &&
+ core_clk_index < core_clk_count) {
+ struct dss_clk *clk =
+ &core_power->clk_config[core_clk_index];
+ strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+ clk->type = DSS_CLK_AHB;
+ core_clk_index++;
+ } else if (dp_parser_check_prefix("stream", clk_name) &&
+ stream_clk_index < stream_clk_count) {
+ struct dss_clk *clk =
+ &stream_power->clk_config[stream_clk_index];
+ strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+ clk->type = DSS_CLK_PCLK;
+ stream_clk_index++;
+ } else if (dp_parser_check_prefix("ctrl", clk_name) &&
+ ctrl_clk_index < ctrl_clk_count) {
+ struct dss_clk *clk =
+ &ctrl_power->clk_config[ctrl_clk_index];
+ strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+ ctrl_clk_index++;
+ if (dp_parser_check_prefix("ctrl_link", clk_name) ||
+ dp_parser_check_prefix("stream_pixel", clk_name))
+ clk->type = DSS_CLK_PCLK;
+ else
+ clk->type = DSS_CLK_AHB;
+ }
+ }
+
+ DRM_DEBUG_DP("clock parsing successful\n");
+
+ return 0;
+}
+
+static int dp_parser_parse(struct dp_parser *parser)
+{
+ int rc = 0;
+
+ if (!parser) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ rc = dp_parser_ctrl_res(parser);
+ if (rc)
+ return rc;
+
+ rc = dp_parser_misc(parser);
+ if (rc)
+ return rc;
+
+ rc = dp_parser_clock(parser);
+ if (rc)
+ return rc;
+
+ /* Map the corresponding regulator information according to
+ * version. Currently, since we only have one supported platform,
+ * mapping the regulator directly.
+ */
+ parser->regulator_cfg = &sdm845_dp_reg_cfg;
+
+ return 0;
+}
+
+struct dp_parser *dp_parser_get(struct platform_device *pdev)
+{
+ struct dp_parser *parser;
+
+ parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
+ if (!parser)
+ return ERR_PTR(-ENOMEM);
+
+ parser->parse = dp_parser_parse;
+ parser->pdev = pdev;
+
+ return parser;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
new file mode 100644
index 000000000000..34b49628bbaf
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_PARSER_H_
+#define _DP_PARSER_H_
+
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+
+#include "dpu_io_util.h"
+#include "msm_drv.h"
+
+#define DP_LABEL "MDSS DP DISPLAY"
+#define DP_MAX_PIXEL_CLK_KHZ 675000
+#define DP_MAX_NUM_DP_LANES 4
+
+enum dp_pm_type {
+ DP_CORE_PM,
+ DP_CTRL_PM,
+ DP_STREAM_PM,
+ DP_PHY_PM,
+ DP_MAX_PM
+};
+
+struct dss_io_data {
+ u32 len;
+ void __iomem *base;
+};
+
+static inline const char *dp_parser_pm_name(enum dp_pm_type module)
+{
+ switch (module) {
+ case DP_CORE_PM: return "DP_CORE_PM";
+ case DP_CTRL_PM: return "DP_CTRL_PM";
+ case DP_STREAM_PM: return "DP_STREAM_PM";
+ case DP_PHY_PM: return "DP_PHY_PM";
+ default: return "???";
+ }
+}
+
+/**
+ * struct dp_display_data - display related device tree data.
+ *
+ * @ctrl_node: referece to controller device
+ * @phy_node: reference to phy device
+ * @is_active: is the controller currently active
+ * @name: name of the display
+ * @display_type: type of the display
+ */
+struct dp_display_data {
+ struct device_node *ctrl_node;
+ struct device_node *phy_node;
+ bool is_active;
+ const char *name;
+ const char *display_type;
+};
+
+/**
+ * struct dp_ctrl_resource - controller's IO related data
+ *
+ * @dp_controller: Display Port controller mapped memory address
+ * @phy_io: phy's mapped memory address
+ */
+struct dp_io {
+ struct dss_io_data dp_controller;
+ struct phy *phy;
+ union phy_configure_opts phy_opts;
+};
+
+/**
+ * struct dp_pinctrl - DP's pin control
+ *
+ * @pin: pin-controller's instance
+ * @state_active: active state pin control
+ * @state_hpd_active: hpd active state pin control
+ * @state_suspend: suspend state pin control
+ */
+struct dp_pinctrl {
+ struct pinctrl *pin;
+ struct pinctrl_state *state_active;
+ struct pinctrl_state *state_hpd_active;
+ struct pinctrl_state *state_suspend;
+};
+
+#define DP_DEV_REGULATOR_MAX 4
+
+/* Regulators for DP devices */
+struct dp_reg_entry {
+ char name[32];
+ int enable_load;
+ int disable_load;
+};
+
+struct dp_regulator_cfg {
+ int num;
+ struct dp_reg_entry regs[DP_DEV_REGULATOR_MAX];
+};
+
+/**
+ * struct dp_parser - DP parser's data exposed to clients
+ *
+ * @pdev: platform data of the client
+ * @mp: gpio, regulator and clock related data
+ * @pinctrl: pin-control related data
+ * @disp_data: controller's display related data
+ * @parse: function to be called by client to parse device tree.
+ */
+struct dp_parser {
+ struct platform_device *pdev;
+ struct dss_module_power mp[DP_MAX_PM];
+ struct dp_pinctrl pinctrl;
+ struct dp_io io;
+ struct dp_display_data disp_data;
+ const struct dp_regulator_cfg *regulator_cfg;
+ u32 max_dp_lanes;
+
+ int (*parse)(struct dp_parser *parser);
+};
+
+/**
+ * dp_parser_get() - get the DP's device tree parser module
+ *
+ * @pdev: platform data of the client
+ * return: pointer to dp_parser structure.
+ *
+ * This function provides client capability to parse the
+ * device tree and populate the data structures. The data
+ * related to clock, regulators, pin-control and other
+ * can be parsed using this module.
+ */
+struct dp_parser *dp_parser_get(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
new file mode 100644
index 000000000000..17c1fc6a2d44
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regulator/consumer.h>
+#include "dp_power.h"
+#include "msm_drv.h"
+
+struct dp_power_private {
+ struct dp_parser *parser;
+ struct platform_device *pdev;
+ struct clk *link_clk_src;
+ struct clk *pixel_provider;
+ struct clk *link_provider;
+ struct regulator_bulk_data supplies[DP_DEV_REGULATOR_MAX];
+
+ struct dp_power dp_power;
+};
+
+static void dp_power_regulator_disable(struct dp_power_private *power)
+{
+ struct regulator_bulk_data *s = power->supplies;
+ const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
+ int num = power->parser->regulator_cfg->num;
+ int i;
+
+ DBG("");
+ for (i = num - 1; i >= 0; i--)
+ if (regs[i].disable_load >= 0)
+ regulator_set_load(s[i].consumer,
+ regs[i].disable_load);
+
+ regulator_bulk_disable(num, s);
+}
+
+static int dp_power_regulator_enable(struct dp_power_private *power)
+{
+ struct regulator_bulk_data *s = power->supplies;
+ const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
+ int num = power->parser->regulator_cfg->num;
+ int ret, i;
+
+ DBG("");
+ for (i = 0; i < num; i++) {
+ if (regs[i].enable_load >= 0) {
+ ret = regulator_set_load(s[i].consumer,
+ regs[i].enable_load);
+ if (ret < 0) {
+ pr_err("regulator %d set op mode failed, %d\n",
+ i, ret);
+ goto fail;
+ }
+ }
+ }
+
+ ret = regulator_bulk_enable(num, s);
+ if (ret < 0) {
+ pr_err("regulator enable failed, %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ regulator_set_load(s[i].consumer, regs[i].disable_load);
+ return ret;
+}
+
+static int dp_power_regulator_init(struct dp_power_private *power)
+{
+ struct regulator_bulk_data *s = power->supplies;
+ const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
+ struct platform_device *pdev = power->pdev;
+ int num = power->parser->regulator_cfg->num;
+ int i, ret;
+
+ for (i = 0; i < num; i++)
+ s[i].supply = regs[i].name;
+
+ ret = devm_regulator_bulk_get(&pdev->dev, num, s);
+ if (ret < 0) {
+ pr_err("%s: failed to init regulator, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dp_power_clk_init(struct dp_power_private *power)
+{
+ int rc = 0;
+ struct dss_module_power *core, *ctrl, *stream;
+ struct device *dev = &power->pdev->dev;
+
+ core = &power->parser->mp[DP_CORE_PM];
+ ctrl = &power->parser->mp[DP_CTRL_PM];
+ stream = &power->parser->mp[DP_STREAM_PM];
+
+ rc = msm_dss_get_clk(dev, core->clk_config, core->num_clk);
+ if (rc) {
+ DRM_ERROR("failed to get %s clk. err=%d\n",
+ dp_parser_pm_name(DP_CORE_PM), rc);
+ return rc;
+ }
+
+ rc = msm_dss_get_clk(dev, ctrl->clk_config, ctrl->num_clk);
+ if (rc) {
+ DRM_ERROR("failed to get %s clk. err=%d\n",
+ dp_parser_pm_name(DP_CTRL_PM), rc);
+ msm_dss_put_clk(core->clk_config, core->num_clk);
+ return -ENODEV;
+ }
+
+ rc = msm_dss_get_clk(dev, stream->clk_config, stream->num_clk);
+ if (rc) {
+ DRM_ERROR("failed to get %s clk. err=%d\n",
+ dp_parser_pm_name(DP_CTRL_PM), rc);
+ msm_dss_put_clk(core->clk_config, core->num_clk);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int dp_power_clk_deinit(struct dp_power_private *power)
+{
+ struct dss_module_power *core, *ctrl, *stream;
+
+ core = &power->parser->mp[DP_CORE_PM];
+ ctrl = &power->parser->mp[DP_CTRL_PM];
+ stream = &power->parser->mp[DP_STREAM_PM];
+
+ if (!core || !ctrl || !stream) {
+ DRM_ERROR("invalid power_data\n");
+ return -EINVAL;
+ }
+
+ msm_dss_put_clk(ctrl->clk_config, ctrl->num_clk);
+ msm_dss_put_clk(core->clk_config, core->num_clk);
+ msm_dss_put_clk(stream->clk_config, stream->num_clk);
+ return 0;
+}
+
+static int dp_power_clk_set_rate(struct dp_power_private *power,
+ enum dp_pm_type module, bool enable)
+{
+ int rc = 0;
+ struct dss_module_power *mp = &power->parser->mp[module];
+
+ if (enable) {
+ rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+ if (rc) {
+ DRM_ERROR("failed to set clks rate.\n");
+ return rc;
+ }
+ }
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+ if (rc) {
+ DRM_ERROR("failed to %d clks, err: %d\n", enable, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
+{
+ if (pm_type == DP_CORE_PM)
+ return dp_power->core_clks_on;
+
+ if (pm_type == DP_CTRL_PM)
+ return dp_power->link_clks_on;
+
+ if (pm_type == DP_STREAM_PM)
+ return dp_power->stream_clks_on;
+
+ return 0;
+}
+
+int dp_power_clk_enable(struct dp_power *dp_power,
+ enum dp_pm_type pm_type, bool enable)
+{
+ int rc = 0;
+ struct dp_power_private *power;
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ if (pm_type != DP_CORE_PM && pm_type != DP_CTRL_PM &&
+ pm_type != DP_STREAM_PM) {
+ DRM_ERROR("unsupported power module: %s\n",
+ dp_parser_pm_name(pm_type));
+ return -EINVAL;
+ }
+
+ if (enable) {
+ if (pm_type == DP_CORE_PM && dp_power->core_clks_on) {
+ DRM_DEBUG_DP("core clks already enabled\n");
+ return 0;
+ }
+
+ if (pm_type == DP_CTRL_PM && dp_power->link_clks_on) {
+ DRM_DEBUG_DP("links clks already enabled\n");
+ return 0;
+ }
+
+ if (pm_type == DP_STREAM_PM && dp_power->stream_clks_on) {
+ DRM_DEBUG_DP("pixel clks already enabled\n");
+ return 0;
+ }
+
+ if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) {
+ DRM_DEBUG_DP("Enable core clks before link clks\n");
+
+ rc = dp_power_clk_set_rate(power, DP_CORE_PM, enable);
+ if (rc) {
+ DRM_ERROR("fail to enable clks: %s. err=%d\n",
+ dp_parser_pm_name(DP_CORE_PM), rc);
+ return rc;
+ }
+ dp_power->core_clks_on = true;
+ }
+ }
+
+ rc = dp_power_clk_set_rate(power, pm_type, enable);
+ if (rc) {
+ DRM_ERROR("failed to '%s' clks for: %s. err=%d\n",
+ enable ? "enable" : "disable",
+ dp_parser_pm_name(pm_type), rc);
+ return rc;
+ }
+
+ if (pm_type == DP_CORE_PM)
+ dp_power->core_clks_on = enable;
+ else if (pm_type == DP_STREAM_PM)
+ dp_power->stream_clks_on = enable;
+ else
+ dp_power->link_clks_on = enable;
+
+ DRM_DEBUG_DP("%s clocks for %s\n",
+ enable ? "enable" : "disable",
+ dp_parser_pm_name(pm_type));
+ DRM_DEBUG_DP("strem_clks:%s link_clks:%s core_clks:%s\n",
+ dp_power->stream_clks_on ? "on" : "off",
+ dp_power->link_clks_on ? "on" : "off",
+ dp_power->core_clks_on ? "on" : "off");
+
+ return 0;
+}
+
+int dp_power_client_init(struct dp_power *dp_power)
+{
+ int rc = 0;
+ struct dp_power_private *power;
+
+ if (!dp_power) {
+ DRM_ERROR("invalid power data\n");
+ return -EINVAL;
+ }
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ pm_runtime_enable(&power->pdev->dev);
+
+ rc = dp_power_regulator_init(power);
+ if (rc) {
+ DRM_ERROR("failed to init regulators %d\n", rc);
+ goto error;
+ }
+
+ rc = dp_power_clk_init(power);
+ if (rc) {
+ DRM_ERROR("failed to init clocks %d\n", rc);
+ goto error;
+ }
+ return 0;
+
+error:
+ pm_runtime_disable(&power->pdev->dev);
+ return rc;
+}
+
+void dp_power_client_deinit(struct dp_power *dp_power)
+{
+ struct dp_power_private *power;
+
+ if (!dp_power) {
+ DRM_ERROR("invalid power data\n");
+ return;
+ }
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ dp_power_clk_deinit(power);
+ pm_runtime_disable(&power->pdev->dev);
+
+}
+
+int dp_power_init(struct dp_power *dp_power, bool flip)
+{
+ int rc = 0;
+ struct dp_power_private *power = NULL;
+
+ if (!dp_power) {
+ DRM_ERROR("invalid power data\n");
+ return -EINVAL;
+ }
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ pm_runtime_get_sync(&power->pdev->dev);
+ rc = dp_power_regulator_enable(power);
+ if (rc) {
+ DRM_ERROR("failed to enable regulators, %d\n", rc);
+ goto exit;
+ }
+
+ rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
+ if (rc) {
+ DRM_ERROR("failed to enable DP core clocks, %d\n", rc);
+ goto err_clk;
+ }
+
+ return 0;
+
+err_clk:
+ dp_power_regulator_disable(power);
+exit:
+ pm_runtime_put_sync(&power->pdev->dev);
+ return rc;
+}
+
+int dp_power_deinit(struct dp_power *dp_power)
+{
+ struct dp_power_private *power;
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ dp_power_clk_enable(dp_power, DP_CORE_PM, false);
+ dp_power_regulator_disable(power);
+ pm_runtime_put_sync(&power->pdev->dev);
+ return 0;
+}
+
+struct dp_power *dp_power_get(struct dp_parser *parser)
+{
+ struct dp_power_private *power;
+ struct dp_power *dp_power;
+
+ if (!parser) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL);
+ if (!power)
+ return ERR_PTR(-ENOMEM);
+
+ power->parser = parser;
+ power->pdev = parser->pdev;
+
+ dp_power = &power->dp_power;
+
+ return dp_power;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
new file mode 100644
index 000000000000..76743d755833
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_power.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_POWER_H_
+#define _DP_POWER_H_
+
+#include "dp_parser.h"
+
+/**
+ * sruct dp_power - DisplayPort's power related data
+ *
+ * @init: initializes the regulators/core clocks/GPIOs/pinctrl
+ * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl
+ * @clk_enable: enable/disable the DP clocks
+ * @set_pixel_clk_parent: set the parent of DP pixel clock
+ */
+struct dp_power {
+ bool core_clks_on;
+ bool link_clks_on;
+ bool stream_clks_on;
+};
+
+/**
+ * dp_power_init() - enable power supplies for display controller
+ *
+ * @power: instance of power module
+ * @flip: bool for flipping gpio direction
+ * return: 0 if success or error if failure.
+ *
+ * This API will turn on the regulators and configures gpio's
+ * aux/hpd.
+ */
+int dp_power_init(struct dp_power *power, bool flip);
+
+/**
+ * dp_power_deinit() - turn off regulators and gpios.
+ *
+ * @power: instance of power module
+ * return: 0 for success
+ *
+ * This API turns off power and regulators.
+ */
+int dp_power_deinit(struct dp_power *power);
+
+/**
+ * dp_power_clk_status() - display controller clocks status
+ *
+ * @power: instance of power module
+ * @pm_type: type of pm, core/ctrl/phy
+ * return: status of power clocks
+ *
+ * This API return status of DP clocks
+ */
+
+int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type);
+
+/**
+ * dp_power_clk_enable() - enable display controller clocks
+ *
+ * @power: instance of power module
+ * @pm_type: type of pm, core/ctrl/phy
+ * @enable: enables or disables
+ * return: pointer to allocated power module data
+ *
+ * This API will call setrate and enable for DP clocks
+ */
+
+int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type,
+ bool enable);
+
+/**
+ * dp_power_client_init() - initialize clock and regulator modules
+ *
+ * @power: instance of power module
+ * return: 0 for success, error for failure.
+ *
+ * This API will configure the DisplayPort's clocks and regulator
+ * modules.
+ */
+int dp_power_client_init(struct dp_power *power);
+
+/**
+ * dp_power_clinet_deinit() - de-initialize clock and regulator modules
+ *
+ * @power: instance of power module
+ * return: 0 for success, error for failure.
+ *
+ * This API will de-initialize the DisplayPort's clocks and regulator
+ * modueles.
+ */
+void dp_power_client_deinit(struct dp_power *power);
+
+/**
+ * dp_power_get() - configure and get the DisplayPort power module data
+ *
+ * @parser: instance of parser module
+ * return: pointer to allocated power module data
+ *
+ * This API will configure the DisplayPort's power module and provides
+ * methods to be called by the client to configure the power related
+ * modueles.
+ */
+struct dp_power *dp_power_get(struct dp_parser *parser);
+
+#endif /* _DP_POWER_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
new file mode 100644
index 000000000000..43042ff90a19
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_REG_H_
+#define _DP_REG_H_
+
+/* DP_TX Registers */
+#define REG_DP_HW_VERSION (0x00000000)
+
+#define REG_DP_SW_RESET (0x00000010)
+#define DP_SW_RESET (0x00000001)
+
+#define REG_DP_PHY_CTRL (0x00000014)
+#define DP_PHY_CTRL_SW_RESET_PLL (0x00000001)
+#define DP_PHY_CTRL_SW_RESET (0x00000004)
+
+#define REG_DP_CLK_CTRL (0x00000018)
+#define REG_DP_CLK_ACTIVE (0x0000001C)
+#define REG_DP_INTR_STATUS (0x00000020)
+#define REG_DP_INTR_STATUS2 (0x00000024)
+#define REG_DP_INTR_STATUS3 (0x00000028)
+
+#define REG_DP_DP_HPD_CTRL (0x00000000)
+#define DP_DP_HPD_CTRL_HPD_EN (0x00000001)
+
+#define REG_DP_DP_HPD_INT_STATUS (0x00000004)
+
+#define REG_DP_DP_HPD_INT_ACK (0x00000008)
+#define DP_DP_HPD_PLUG_INT_ACK (0x00000001)
+#define DP_DP_IRQ_HPD_INT_ACK (0x00000002)
+#define DP_DP_HPD_REPLUG_INT_ACK (0x00000004)
+#define DP_DP_HPD_UNPLUG_INT_ACK (0x00000008)
+
+#define REG_DP_DP_HPD_INT_MASK (0x0000000C)
+#define DP_DP_HPD_PLUG_INT_MASK (0x00000001)
+#define DP_DP_IRQ_HPD_INT_MASK (0x00000002)
+#define DP_DP_HPD_REPLUG_INT_MASK (0x00000004)
+#define DP_DP_HPD_UNPLUG_INT_MASK (0x00000008)
+#define DP_DP_HPD_INT_MASK (DP_DP_HPD_PLUG_INT_MASK | \
+ DP_DP_IRQ_HPD_INT_MASK | \
+ DP_DP_HPD_REPLUG_INT_MASK | \
+ DP_DP_HPD_UNPLUG_INT_MASK)
+#define DP_DP_HPD_STATE_STATUS_CONNECTED (0x40000000)
+#define DP_DP_HPD_STATE_STATUS_PENDING (0x20000000)
+#define DP_DP_HPD_STATE_STATUS_DISCONNECTED (0x00000000)
+#define DP_DP_HPD_STATE_STATUS_MASK (0xE0000000)
+
+#define REG_DP_DP_HPD_REFTIMER (0x00000018)
+#define DP_DP_HPD_REFTIMER_ENABLE (1 << 16)
+
+#define REG_DP_DP_HPD_EVENT_TIME_0 (0x0000001C)
+#define REG_DP_DP_HPD_EVENT_TIME_1 (0x00000020)
+#define DP_DP_HPD_EVENT_TIME_0_VAL (0x3E800FA)
+#define DP_DP_HPD_EVENT_TIME_1_VAL (0x1F407D0)
+
+#define REG_DP_AUX_CTRL (0x00000030)
+#define DP_AUX_CTRL_ENABLE (0x00000001)
+#define DP_AUX_CTRL_RESET (0x00000002)
+
+#define REG_DP_AUX_DATA (0x00000034)
+#define DP_AUX_DATA_READ (0x00000001)
+#define DP_AUX_DATA_WRITE (0x00000000)
+#define DP_AUX_DATA_OFFSET (0x00000008)
+#define DP_AUX_DATA_INDEX_OFFSET (0x00000010)
+#define DP_AUX_DATA_MASK (0x0000ff00)
+#define DP_AUX_DATA_INDEX_WRITE (0x80000000)
+
+#define REG_DP_AUX_TRANS_CTRL (0x00000038)
+#define DP_AUX_TRANS_CTRL_I2C (0x00000100)
+#define DP_AUX_TRANS_CTRL_GO (0x00000200)
+#define DP_AUX_TRANS_CTRL_NO_SEND_ADDR (0x00000400)
+#define DP_AUX_TRANS_CTRL_NO_SEND_STOP (0x00000800)
+
+#define REG_DP_TIMEOUT_COUNT (0x0000003C)
+#define REG_DP_AUX_LIMITS (0x00000040)
+#define REG_DP_AUX_STATUS (0x00000044)
+
+#define DP_DPCD_CP_IRQ (0x201)
+#define DP_DPCD_RXSTATUS (0x69493)
+
+#define DP_INTERRUPT_TRANS_NUM (0x000000A0)
+
+#define REG_DP_MAINLINK_CTRL (0x00000000)
+#define DP_MAINLINK_CTRL_ENABLE (0x00000001)
+#define DP_MAINLINK_CTRL_RESET (0x00000002)
+#define DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER (0x00000010)
+#define DP_MAINLINK_FB_BOUNDARY_SEL (0x02000000)
+
+#define REG_DP_STATE_CTRL (0x00000004)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN1 (0x00000001)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN2 (0x00000002)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN3 (0x00000004)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN4 (0x00000008)
+#define DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE (0x00000010)
+#define DP_STATE_CTRL_LINK_PRBS7 (0x00000020)
+#define DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN (0x00000040)
+#define DP_STATE_CTRL_SEND_VIDEO (0x00000080)
+#define DP_STATE_CTRL_PUSH_IDLE (0x00000100)
+
+#define REG_DP_CONFIGURATION_CTRL (0x00000008)
+#define DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK (0x00000001)
+#define DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN (0x00000002)
+#define DP_CONFIGURATION_CTRL_P_INTERLACED (0x00000004)
+#define DP_CONFIGURATION_CTRL_INTERLACED_BTF (0x00000008)
+#define DP_CONFIGURATION_CTRL_NUM_OF_LANES (0x00000010)
+#define DP_CONFIGURATION_CTRL_ENHANCED_FRAMING (0x00000040)
+#define DP_CONFIGURATION_CTRL_SEND_VSC (0x00000080)
+#define DP_CONFIGURATION_CTRL_BPC (0x00000100)
+#define DP_CONFIGURATION_CTRL_ASSR (0x00000400)
+#define DP_CONFIGURATION_CTRL_RGB_YUV (0x00000800)
+#define DP_CONFIGURATION_CTRL_LSCLK_DIV (0x00002000)
+#define DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT (0x04)
+#define DP_CONFIGURATION_CTRL_BPC_SHIFT (0x08)
+#define DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT (0x0D)
+
+#define REG_DP_SOFTWARE_MVID (0x00000010)
+#define REG_DP_SOFTWARE_NVID (0x00000018)
+#define REG_DP_TOTAL_HOR_VER (0x0000001C)
+#define REG_DP_START_HOR_VER_FROM_SYNC (0x00000020)
+#define REG_DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000024)
+#define REG_DP_ACTIVE_HOR_VER (0x00000028)
+
+#define REG_DP_MISC1_MISC0 (0x0000002C)
+#define DP_MISC0_SYNCHRONOUS_CLK (0x00000001)
+#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001)
+#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005)
+
+#define REG_DP_VALID_BOUNDARY (0x00000030)
+#define REG_DP_VALID_BOUNDARY_2 (0x00000034)
+
+#define REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING (0x00000038)
+#define LANE0_MAPPING_SHIFT (0x00000000)
+#define LANE1_MAPPING_SHIFT (0x00000002)
+#define LANE2_MAPPING_SHIFT (0x00000004)
+#define LANE3_MAPPING_SHIFT (0x00000006)
+
+#define REG_DP_MAINLINK_READY (0x00000040)
+#define DP_MAINLINK_READY_FOR_VIDEO (0x00000001)
+#define DP_MAINLINK_READY_LINK_TRAINING_SHIFT (0x00000003)
+
+#define REG_DP_MAINLINK_LEVELS (0x00000044)
+#define DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2 (0x00000002)
+
+
+#define REG_DP_TU (0x0000004C)
+
+#define REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054)
+#define DP_HBR2_ERM_PATTERN (0x00010000)
+
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000000C0)
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000000C4)
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000000C8)
+
+#define MMSS_DP_MISC1_MISC0 (0x0000002C)
+#define MMSS_DP_AUDIO_TIMING_GEN (0x00000080)
+#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000084)
+#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000088)
+#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000008C)
+#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000090)
+#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000094)
+#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000098)
+
+#define MMSS_DP_PSR_CRC_RG (0x00000154)
+#define MMSS_DP_PSR_CRC_B (0x00000158)
+
+#define REG_DP_COMPRESSION_MODE_CTRL (0x00000180)
+
+#define MMSS_DP_AUDIO_CFG (0x00000200)
+#define MMSS_DP_AUDIO_STATUS (0x00000204)
+#define MMSS_DP_AUDIO_PKT_CTRL (0x00000208)
+#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000020C)
+#define MMSS_DP_AUDIO_ACR_CTRL (0x00000210)
+#define MMSS_DP_AUDIO_CTRL_RESET (0x00000214)
+
+#define MMSS_DP_SDP_CFG (0x00000228)
+#define MMSS_DP_SDP_CFG2 (0x0000022C)
+#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000230)
+#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000234)
+
+#define MMSS_DP_AUDIO_STREAM_0 (0x00000240)
+#define MMSS_DP_AUDIO_STREAM_1 (0x00000244)
+
+#define MMSS_DP_EXTENSION_0 (0x00000250)
+#define MMSS_DP_EXTENSION_1 (0x00000254)
+#define MMSS_DP_EXTENSION_2 (0x00000258)
+#define MMSS_DP_EXTENSION_3 (0x0000025C)
+#define MMSS_DP_EXTENSION_4 (0x00000260)
+#define MMSS_DP_EXTENSION_5 (0x00000264)
+#define MMSS_DP_EXTENSION_6 (0x00000268)
+#define MMSS_DP_EXTENSION_7 (0x0000026C)
+#define MMSS_DP_EXTENSION_8 (0x00000270)
+#define MMSS_DP_EXTENSION_9 (0x00000274)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000278)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000027C)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000280)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000284)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000288)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000028C)
+#define MMSS_DP_AUDIO_ISRC_0 (0x00000290)
+#define MMSS_DP_AUDIO_ISRC_1 (0x00000294)
+#define MMSS_DP_AUDIO_ISRC_2 (0x00000298)
+#define MMSS_DP_AUDIO_ISRC_3 (0x0000029C)
+#define MMSS_DP_AUDIO_ISRC_4 (0x000002A0)
+#define MMSS_DP_AUDIO_ISRC_5 (0x000002A4)
+#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000002A8)
+#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000002AC)
+#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000002B0)
+
+#define MMSS_DP_GENERIC0_0 (0x00000300)
+#define MMSS_DP_GENERIC0_1 (0x00000304)
+#define MMSS_DP_GENERIC0_2 (0x00000308)
+#define MMSS_DP_GENERIC0_3 (0x0000030C)
+#define MMSS_DP_GENERIC0_4 (0x00000310)
+#define MMSS_DP_GENERIC0_5 (0x00000314)
+#define MMSS_DP_GENERIC0_6 (0x00000318)
+#define MMSS_DP_GENERIC0_7 (0x0000031C)
+#define MMSS_DP_GENERIC0_8 (0x00000320)
+#define MMSS_DP_GENERIC0_9 (0x00000324)
+#define MMSS_DP_GENERIC1_0 (0x00000328)
+#define MMSS_DP_GENERIC1_1 (0x0000032C)
+#define MMSS_DP_GENERIC1_2 (0x00000330)
+#define MMSS_DP_GENERIC1_3 (0x00000334)
+#define MMSS_DP_GENERIC1_4 (0x00000338)
+#define MMSS_DP_GENERIC1_5 (0x0000033C)
+#define MMSS_DP_GENERIC1_6 (0x00000340)
+#define MMSS_DP_GENERIC1_7 (0x00000344)
+#define MMSS_DP_GENERIC1_8 (0x00000348)
+#define MMSS_DP_GENERIC1_9 (0x0000034C)
+
+#define MMSS_DP_VSCEXT_0 (0x000002D0)
+#define MMSS_DP_VSCEXT_1 (0x000002D4)
+#define MMSS_DP_VSCEXT_2 (0x000002D8)
+#define MMSS_DP_VSCEXT_3 (0x000002DC)
+#define MMSS_DP_VSCEXT_4 (0x000002E0)
+#define MMSS_DP_VSCEXT_5 (0x000002E4)
+#define MMSS_DP_VSCEXT_6 (0x000002E8)
+#define MMSS_DP_VSCEXT_7 (0x000002EC)
+#define MMSS_DP_VSCEXT_8 (0x000002F0)
+#define MMSS_DP_VSCEXT_9 (0x000002F4)
+
+#define MMSS_DP_BIST_ENABLE (0x00000000)
+#define DP_BIST_ENABLE_DPBIST_EN (0x00000001)
+
+#define MMSS_DP_TIMING_ENGINE_EN (0x00000010)
+#define DP_TIMING_ENGINE_EN_EN (0x00000001)
+
+#define MMSS_DP_INTF_CONFIG (0x00000014)
+#define MMSS_DP_INTF_HSYNC_CTL (0x00000018)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F0 (0x0000001C)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F1 (0x00000020)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028)
+#define MMSS_INTF_DISPLAY_V_START_F0 (0x0000002C)
+#define MMSS_INTF_DISPLAY_V_START_F1 (0x00000030)
+#define MMSS_DP_INTF_DISPLAY_V_END_F0 (0x00000034)
+#define MMSS_DP_INTF_DISPLAY_V_END_F1 (0x00000038)
+#define MMSS_DP_INTF_ACTIVE_V_START_F0 (0x0000003C)
+#define MMSS_DP_INTF_ACTIVE_V_START_F1 (0x00000040)
+#define MMSS_DP_INTF_ACTIVE_V_END_F0 (0x00000044)
+#define MMSS_DP_INTF_ACTIVE_V_END_F1 (0x00000048)
+#define MMSS_DP_INTF_DISPLAY_HCTL (0x0000004C)
+#define MMSS_DP_INTF_ACTIVE_HCTL (0x00000050)
+#define MMSS_DP_INTF_POLARITY_CTL (0x00000058)
+
+#define MMSS_DP_TPG_MAIN_CONTROL (0x00000060)
+#define MMSS_DP_DSC_DTO (0x0000007C)
+#define DP_TPG_CHECKERED_RECT_PATTERN (0x00000100)
+
+#define MMSS_DP_TPG_VIDEO_CONFIG (0x00000064)
+#define DP_TPG_VIDEO_CONFIG_BPP_8BIT (0x00000001)
+#define DP_TPG_VIDEO_CONFIG_RGB (0x00000004)
+
+#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000088)
+
+#define REG_DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C)
+#define REG_DP_PHY_AUX_BIST_CFG (0x00000050)
+#define REG_DP_PHY_AUX_INTERRUPT_STATUS (0x000000BC)
+
+/* DP HDCP 1.3 registers */
+#define DP_HDCP_CTRL (0x0A0)
+#define DP_HDCP_STATUS (0x0A4)
+#define DP_HDCP_SW_UPPER_AKSV (0x098)
+#define DP_HDCP_SW_LOWER_AKSV (0x09C)
+#define DP_HDCP_ENTROPY_CTRL0 (0x350)
+#define DP_HDCP_ENTROPY_CTRL1 (0x35C)
+#define DP_HDCP_SHA_STATUS (0x0C8)
+#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0)
+#define DP_HDCP_RCVPORT_DATA3 (0x0A4)
+#define DP_HDCP_RCVPORT_DATA4 (0x0A8)
+#define DP_HDCP_RCVPORT_DATA5 (0x0C0)
+#define DP_HDCP_RCVPORT_DATA6 (0x0C4)
+
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL (0x024)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA (0x028)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x004)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x008)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x00C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x010)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x014)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x018)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020)
+
+#endif /* _DP_REG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 4de771d6f0be..78ef5d4ed922 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -30,6 +30,8 @@ enum msm_dsi_phy_type {
MSM_DSI_PHY_28NM_8960,
MSM_DSI_PHY_14NM,
MSM_DSI_PHY_10NM,
+ MSM_DSI_PHY_7NM,
+ MSM_DSI_PHY_7NM_V4_1,
MSM_DSI_PHY_MAX
};
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 8e536e060070..50eb4d1b8fdd 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -1886,5 +1886,428 @@ static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000
#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_7nm_PHY_CMN_CLK_CFG0 0x00000010
+
+#define REG_DSI_7nm_PHY_CMN_CLK_CFG1 0x00000014
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_CTRL 0x00000018
+
+#define REG_DSI_7nm_PHY_CMN_RBUF_CTRL 0x0000001c
+
+#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_0 0x00000020
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_0 0x00000024
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_1 0x00000028
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_2 0x0000002c
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_3 0x00000030
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CFG0 0x00000034
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CFG1 0x00000038
+
+#define REG_DSI_7nm_PHY_CMN_PLL_CNTRL 0x0000003c
+
+#define REG_DSI_7nm_PHY_CMN_DPHY_SOT 0x00000040
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL0 0x000000a0
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL1 0x000000a4
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL2 0x000000a8
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL3 0x000000ac
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL4 0x000000b0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0 0x000000b4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1 0x000000b8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2 0x000000bc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3 0x000000c0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4 0x000000c4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5 0x000000c8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6 0x000000cc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7 0x000000d0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8 0x000000d4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9 0x000000d8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10 0x000000dc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11 0x000000e0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12 0x000000e4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13 0x000000e8
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0 0x000000ec
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1 0x000000f0
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL 0x000000f4
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL 0x000000f8
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL 0x000000fc
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL 0x00000100
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0 0x00000104
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1 0x00000108
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL 0x0000010c
+
+#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_1 0x00000110
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_4 0x00000114
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4 0x00000128
+
+#define REG_DSI_7nm_PHY_CMN_PHY_STATUS 0x00000140
+
+#define REG_DSI_7nm_PHY_CMN_LANE_STATUS0 0x00000148
+
+#define REG_DSI_7nm_PHY_CMN_LANE_STATUS1 0x0000014c
+
+static inline uint32_t REG_DSI_7nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS 0x00000008
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS_TWO 0x0000000c
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FOUR 0x00000014
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE 0x00000018
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_CONTROLS 0x0000001c
+
+#define REG_DSI_7nm_PHY_PLL_DSM_DIVIDER 0x00000020
+
+#define REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000024
+
+#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES 0x00000028
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES 0x0000002c
+
+#define REG_DSI_7nm_PHY_PLL_CMODE 0x00000030
+
+#define REG_DSI_7nm_PHY_PLL_PSM_CTRL 0x00000034
+
+#define REG_DSI_7nm_PHY_PLL_RSM_CTRL 0x00000038
+
+#define REG_DSI_7nm_PHY_PLL_VCO_TUNE_MAP 0x0000003c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_CNTRL 0x00000040
+
+#define REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000044
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW 0x00000048
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH 0x0000004c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS 0x00000050
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MIN 0x00000054
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MAX 0x00000058
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_PFILT 0x0000005c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_IFILT 0x00000060
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO 0x00000064
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000068
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR 0x0000006c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_HIGH 0x00000070
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_LOW 0x00000074
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000078
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_THRESH 0x0000007c
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_HIGH 0x00000080
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_LOW 0x00000084
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH 0x00000088
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_LOW 0x0000008c
+
+#define REG_DSI_7nm_PHY_PLL_PFILT 0x00000090
+
+#define REG_DSI_7nm_PHY_PLL_IFILT 0x00000094
+
+#define REG_DSI_7nm_PHY_PLL_PLL_GAIN 0x00000098
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_LOW 0x0000009c
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_HIGH 0x000000a0
+
+#define REG_DSI_7nm_PHY_PLL_LOCKDET 0x000000a4
+
+#define REG_DSI_7nm_PHY_PLL_OUTDIV 0x000000a8
+
+#define REG_DSI_7nm_PHY_PLL_FASTLOCK_CONTROL 0x000000ac
+
+#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE 0x000000b0
+
+#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO 0x000000b4
+
+#define REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE 0x000000b8
+
+#define REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000bc
+
+#define REG_DSI_7nm_PHY_PLL_RATE_CHANGE 0x000000c0
+
+#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS 0x000000c4
+
+#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000c8
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START 0x000000cc
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW 0x000000d0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID 0x000000d4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH 0x000000d8
+
+#define REG_DSI_7nm_PHY_PLL_DEC_FRAC_MUXES 0x000000dc
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000e0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000e4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000e8
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000ec
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_2 0x000000f0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_2 0x000000f4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_2 0x000000f8
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_2 0x000000fc
+
+#define REG_DSI_7nm_PHY_PLL_MASH_CONTROL 0x00000100
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW 0x00000104
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH 0x00000108
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW 0x0000010c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH 0x00000110
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW 0x00000114
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH 0x00000118
+
+#define REG_DSI_7nm_PHY_PLL_SSC_MUX_CONTROL 0x0000011c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x00000120
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000124
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000128
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x0000012c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1 0x00000130
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1 0x00000134
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_2 0x00000138
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_2 0x0000013c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_2 0x00000140
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_2 0x00000144
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_2 0x00000148
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_2 0x0000014c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_CONTROL 0x00000150
+
+#define REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000154
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000158
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_2 0x0000015c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x00000160
+
+#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_2 0x00000164
+
+#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1 0x00000168
+
+#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_2 0x0000016c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x00000170
+
+#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2 0x00000174
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000178
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2 0x0000017c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FASTLOCK_EN_BAND 0x00000180
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID 0x00000184
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH 0x00000188
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x0000018c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000190
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY 0x00000194
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_MIN_DELAY 0x00000198
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS 0x0000019c
+
+#define REG_DSI_7nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES 0x000001a0
+
+#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_1 0x000001a4
+
+#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_2 0x000001a8
+
+#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1 0x000001ac
+
+#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE 0x000001b0
+
+#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_TWO 0x000001b4
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL 0x000001b8
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW 0x000001bc
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH 0x000001c0
+
+#define REG_DSI_7nm_PHY_PLL_FD_OUT_LOW 0x000001c4
+
+#define REG_DSI_7nm_PHY_PLL_FD_OUT_HIGH 0x000001c8
+
+#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1 0x000001cc
+
+#define REG_DSI_7nm_PHY_PLL_PLL_MISC_CONFIG 0x000001d0
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CONFIG 0x000001d4
+
+#define REG_DSI_7nm_PHY_PLL_FLL_FREQ_ACQ_TIME 0x000001d8
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CODE0 0x000001dc
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CODE1 0x000001e0
+
+#define REG_DSI_7nm_PHY_PLL_FLL_GAIN0 0x000001e4
+
+#define REG_DSI_7nm_PHY_PLL_FLL_GAIN1 0x000001e8
+
+#define REG_DSI_7nm_PHY_PLL_SW_RESET 0x000001ec
+
+#define REG_DSI_7nm_PHY_PLL_FAST_PWRUP 0x000001f0
+
+#define REG_DSI_7nm_PHY_PLL_LOCKTIME0 0x000001f4
+
+#define REG_DSI_7nm_PHY_PLL_LOCKTIME1 0x000001f8
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS_SEL 0x000001fc
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS0 0x00000200
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS1 0x00000204
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS2 0x00000208
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS3 0x0000020c
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES 0x00000210
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG 0x00000214
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS 0x00000218
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS 0x0000021c
+
+#define REG_DSI_7nm_PHY_PLL_RESET_SM_STATUS 0x00000220
+
+#define REG_DSI_7nm_PHY_PLL_TDC_OFFSET 0x00000224
+
+#define REG_DSI_7nm_PHY_PLL_PS3_PWRDOWN_CONTROLS 0x00000228
+
+#define REG_DSI_7nm_PHY_PLL_PS4_PWRDOWN_CONTROLS 0x0000022c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_RST_CONTROLS 0x00000230
+
+#define REG_DSI_7nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS 0x00000234
+
+#define REG_DSI_7nm_PHY_PLL_PSM_CLK_CONTROLS 0x00000238
+
+#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES_2 0x0000023c
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1 0x00000240
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_2 0x00000244
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1 0x00000248
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_2 0x0000024c
+
+#define REG_DSI_7nm_PHY_PLL_CMODE_1 0x00000250
+
+#define REG_DSI_7nm_PHY_PLL_CMODE_2 0x00000254
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1 0x00000258
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2 0x0000025c
+
+#define REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE 0x00000260
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index f892f2cbe8bb..b2ff68a15791 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -265,9 +265,12 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
&sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
-
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index efd469d1db45..ade9b609c7d9 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -21,6 +21,8 @@
#define MSM_DSI_6G_VER_MINOR_V2_1_0 0x20010000
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
+#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000
+#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_V2_VER_MINOR_8064 0x0
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4b363bd7ddff..1d28dfba2c9b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -328,7 +328,6 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
* In dual DSI mode, we have one connector that can be
* attached to the drm_panel.
*/
- drm_panel_attach(panel, connector);
num = drm_panel_get_modes(panel, connector);
if (!num)
return 0;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 009f5b843dd1..e8c1a727179c 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -364,6 +364,102 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
return 0;
}
+int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x8;
+ s32 tmax, tmin;
+ s32 pcnt_clk_prep = 50;
+ s32 pcnt_clk_zero = 2;
+ s32 pcnt_clk_trail = 30;
+ s32 pcnt_hs_prep = 50;
+ s32 pcnt_hs_zero = 10;
+ s32 pcnt_hs_trail = 30;
+ s32 pcnt_hs_exit = 10;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 hb_en;
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ hb_en = 0;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x8 = ui << 3;
+
+ /* TODO: verify these calculations against latest downstream driver
+ * everything except clk_post/clk_pre uses calculations from v3 based
+ * on the downstream driver having the same calculations for v3 and v4
+ */
+
+ temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false);
+
+ temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = (tmin > 255) ? 511 : 255;
+ timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false);
+
+ temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (85 * coeff + 6 * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false);
+
+ temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp / ui_x8) - 1;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false);
+
+ temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+ timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false);
+
+ /* recommended min
+ * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
+ */
+ temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8;
+ tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
+ tmax = 255;
+ timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false);
+
+ /* recommended min
+ * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
+ * val2 = (16 * bit_clk_ns)
+ * final = roundup(val1/val2, 0) - 1
+ */
+ temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff;
+ tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
+ tmax = 255;
+ timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
+
+ DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst);
+
+ return 0;
+}
+
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask)
{
@@ -508,6 +604,12 @@ static const struct of_device_id dsi_phy_dt_match[] = {
{ .compatible = "qcom,dsi-phy-10nm-8998",
.data = &dsi_phy_10nm_8998_cfgs },
#endif
+#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
+ { .compatible = "qcom,dsi-phy-7nm",
+ .data = &dsi_phy_7nm_cfgs },
+ { .compatible = "qcom,dsi-phy-7nm-8150",
+ .data = &dsi_phy_7nm_8150_cfgs },
+#endif
{}
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index ef8672d7b123..d2bd74b6f357 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -48,10 +48,10 @@ extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
struct msm_dsi_dphy_timing {
- u32 clk_pre;
- u32 clk_post;
u32 clk_zero;
u32 clk_trail;
u32 clk_prepare;
@@ -102,6 +102,8 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask);
int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
new file mode 100644
index 000000000000..255b5f5ab2ce
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -0,0 +1,255 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/iopoll.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data = 0;
+
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
+ mb(); /* make sure read happened */
+
+ return (data & BIT(0));
+}
+
+static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *lane_base = phy->lane_base;
+ int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
+
+ /*
+ * LPRX and CDRX need to enabled only for physical data lane
+ * corresponding to the logical data lane 0
+ */
+ if (enable)
+ dsi_phy_write(lane_base +
+ REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
+ else
+ dsi_phy_write(lane_base +
+ REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
+{
+ int i;
+ const u8 tx_dctrl_0[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
+ const u8 tx_dctrl_1[] = { 0x40, 0x40, 0x40, 0x46, 0x41 };
+ const u8 *tx_dctrl = tx_dctrl_0;
+ void __iomem *lane_base = phy->lane_base;
+
+ if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1)
+ tx_dctrl = tx_dctrl_1;
+
+ /* Strength ctrl settings */
+ for (i = 0; i < 5; i++) {
+ /*
+ * Disable LPRX and CDRX for all lanes. And later on, it will
+ * be only enabled for the physical data lane corresponding
+ * to the logical data lane 0
+ */
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i), 0);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i), 0x0);
+ }
+
+ dsi_phy_hw_v4_0_config_lpcdrx(phy, true);
+
+ /* other settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG0(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG1(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG2(i), i == 4 ? 0x8a : 0xa);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i), tx_dctrl[i]);
+ }
+}
+
+static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ int ret;
+ u32 status;
+ u32 const delay_us = 5;
+ u32 const timeout_us = 1000;
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ void __iomem *base = phy->base;
+ bool less_than_1500_mhz;
+ u32 vreg_ctrl_0, glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
+ u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl;
+ u32 data;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc_v4(timing, clk_req)) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dsi_phy_hw_v4_0_is_pll_on(phy))
+ pr_warn("PLL turned on before configuring PHY\n");
+
+ /* wait for REFGEN READY */
+ ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS,
+ status, (status & BIT(0)),
+ delay_us, timeout_us);
+ if (ret) {
+ pr_err("Ref gen not ready. Aborting\n");
+ return -EINVAL;
+ }
+
+ /* TODO: CPHY enable path (this is for DPHY only) */
+
+ /* Alter PHY configurations if data rate less than 1.5GHZ*/
+ less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
+
+ if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1) {
+ vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
+ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
+ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c;
+ glbl_str_swi_cal_sel_ctrl = 0x00;
+ glbl_hstx_str_ctrl_0 = 0x88;
+ } else {
+ vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
+ glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
+ glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
+ glbl_rescode_top_ctrl = 0x03;
+ glbl_rescode_bot_ctrl = 0x3c;
+ }
+
+ /* de-assert digital and pll power down */
+ data = BIT(6) | BIT(5);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
+
+ /* Assert PLL core reset */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+ /* turn off resync FIFO */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00);
+
+ /* program CMN_CTRL_4 for minor_ver 2 chipsets*/
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0);
+ data = data & (0xf0);
+ if (data == 0x20)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04);
+
+ /* Configure PHY lane swap (TODO: we need to calculate this) */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84);
+
+ /* Enable LDO */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, 0x5c);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
+ glbl_str_swi_cal_sel_ctrl);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0,
+ glbl_hstx_str_ctrl_0);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
+ glbl_rescode_top_ctrl);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
+ glbl_rescode_bot_ctrl);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
+
+ /* Remove power down from all blocks */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f);
+
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0x1f);
+
+ /* Select full-rate mode */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
+
+ ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+ if (ret) {
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* DSI PHY timings */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
+ timing->shared_timings.clk_pre);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
+ timing->shared_timings.clk_post);
+
+ /* DSI lane settings */
+ dsi_phy_hw_v4_0_lane_settings(phy);
+
+ DBG("DSI%d PHY enabled", phy->id);
+
+ return 0;
+}
+
+static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ /* TODO */
+}
+
+static int dsi_7nm_phy_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+
+ phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
+ "DSI_PHY_LANE");
+ if (IS_ERR(phy->lane_base)) {
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
+ .type = MSM_DSI_PHY_7NM_V4_1,
+ .src_pll_truthtable = { {false, false}, {true, false} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdds", 36000, 32},
+ },
+ },
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .init = dsi_7nm_phy_init,
+ },
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
+ .type = MSM_DSI_PHY_7NM,
+ .src_pll_truthtable = { {false, false}, {true, false} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdds", 36000, 32},
+ },
+ },
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .init = dsi_7nm_phy_init,
+ },
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 4a4aa3c61d71..a45fe95aff49 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -161,6 +161,10 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
case MSM_DSI_PHY_10NM:
pll = msm_dsi_pll_10nm_init(pdev, id);
break;
+ case MSM_DSI_PHY_7NM:
+ case MSM_DSI_PHY_7NM_V4_1:
+ pll = msm_dsi_pll_7nm_init(pdev, id);
+ break;
default:
pll = ERR_PTR(-ENXIO);
break;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index c6a3623f905d..3405982a092c 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -116,5 +116,15 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
return ERR_PTR(-ENODEV);
}
#endif
+#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
+#else
+static inline struct msm_dsi_pll *
+msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
#endif /* __DSI_PLL_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
new file mode 100644
index 000000000000..de0dfb815125
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
@@ -0,0 +1,904 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
+ *
+ * dsi0_pll_out_div_clk dsi0_pll_bit_clk
+ * | |
+ * | |
+ * +---------+ | +----------+ | +----+
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ * +---------+ | +----------+ | +----+
+ * | |
+ * | | dsi0_pll_by_2_bit_clk
+ * | | |
+ * | | +----+ | |\ dsi0_pclk_mux
+ * | |--| /2 |--o--| \ |
+ * | | +----+ | \ | +---------+
+ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ * |------------------------------| / +---------+
+ * | +-----+ | /
+ * -----------| /4? |--o----------|/
+ * +-----+ | |
+ * | |dsiclk_sel
+ * |
+ * dsi0_pll_post_out_div_clk
+ */
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+#define NUM_PROVIDED_CLKS 2
+
+#define VCO_REF_CLK_RATE 19200000
+
+struct dsi_pll_regs {
+ u32 pll_prop_gain_rate;
+ u32 pll_lockdet_rate;
+ u32 decimal_div_start;
+ u32 frac_div_start_low;
+ u32 frac_div_start_mid;
+ u32 frac_div_start_high;
+ u32 pll_clock_inverters;
+ u32 ssc_stepsize_low;
+ u32 ssc_stepsize_high;
+ u32 ssc_div_per_low;
+ u32 ssc_div_per_high;
+ u32 ssc_adjper_low;
+ u32 ssc_adjper_high;
+ u32 ssc_control;
+};
+
+struct dsi_pll_config {
+ u32 ref_freq;
+ bool div_override;
+ u32 output_div;
+ bool ignore_frac;
+ bool disable_prescaler;
+ bool enable_ssc;
+ bool ssc_center;
+ u32 dec_bits;
+ u32 frac_bits;
+ u32 lock_timer;
+ u32 ssc_freq;
+ u32 ssc_offset;
+ u32 ssc_adj_per;
+ u32 thresh_cycles;
+ u32 refclk_cycles;
+};
+
+struct pll_7nm_cached_state {
+ unsigned long vco_rate;
+ u8 bit_clk_div;
+ u8 pix_clk_div;
+ u8 pll_out_div;
+ u8 pll_mux;
+};
+
+struct dsi_pll_7nm {
+ struct msm_dsi_pll base;
+
+ int id;
+ struct platform_device *pdev;
+
+ void __iomem *phy_cmn_mmio;
+ void __iomem *mmio;
+
+ u64 vco_ref_clk_rate;
+ u64 vco_current_rate;
+
+ /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
+ int vco_delay;
+ struct dsi_pll_config pll_configuration;
+ struct dsi_pll_regs reg_setup;
+
+ /* private clocks: */
+ struct clk_hw *out_div_clk_hw;
+ struct clk_hw *bit_clk_hw;
+ struct clk_hw *byte_clk_hw;
+ struct clk_hw *by_2_bit_clk_hw;
+ struct clk_hw *post_out_div_clk_hw;
+ struct clk_hw *pclk_mux_hw;
+ struct clk_hw *out_dsiclk_hw;
+
+ /* clock-provider: */
+ struct clk_hw_onecell_data *hw_data;
+
+ struct pll_7nm_cached_state cached_state;
+
+ enum msm_dsi_phy_usecase uc;
+ struct dsi_pll_7nm *slave;
+};
+
+#define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, base)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_7nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+
+ config->ref_freq = pll->vco_ref_clk_rate;
+ config->output_div = 1;
+ config->dec_bits = 8;
+ config->frac_bits = 18;
+ config->lock_timer = 64;
+ config->ssc_freq = 31500;
+ config->ssc_offset = 4800;
+ config->ssc_adj_per = 2;
+ config->thresh_cycles = 32;
+ config->refclk_cycles = 256;
+
+ config->div_override = false;
+ config->ignore_frac = false;
+ config->disable_prescaler = false;
+
+ /* TODO: ssc enable */
+ config->enable_ssc = false;
+ config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+ u64 fref = pll->vco_ref_clk_rate;
+ u64 pll_freq;
+ u64 divider;
+ u64 dec, dec_multiple;
+ u32 frac;
+ u64 multiplier;
+
+ pll_freq = pll->vco_current_rate;
+
+ if (config->disable_prescaler)
+ divider = fref;
+ else
+ divider = fref * 2;
+
+ multiplier = 1 << config->frac_bits;
+ dec_multiple = div_u64(pll_freq * multiplier, divider);
+ div_u64_rem(dec_multiple, multiplier, &frac);
+
+ dec = div_u64(dec_multiple, multiplier);
+
+ if (pll->base.type != MSM_DSI_PHY_7NM_V4_1)
+ regs->pll_clock_inverters = 0x28;
+ else if (pll_freq <= 1000000000ULL)
+ regs->pll_clock_inverters = 0xa0;
+ else if (pll_freq <= 2500000000ULL)
+ regs->pll_clock_inverters = 0x20;
+ else if (pll_freq <= 3020000000ULL)
+ regs->pll_clock_inverters = 0x00;
+ else
+ regs->pll_clock_inverters = 0x40;
+
+ regs->pll_lockdet_rate = config->lock_timer;
+ regs->decimal_div_start = dec;
+ regs->frac_div_start_low = (frac & 0xff);
+ regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+ regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+#define SSC_CENTER BIT(0)
+#define SSC_EN BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+ u32 ssc_per;
+ u32 ssc_mod;
+ u64 ssc_step_size;
+ u64 frac;
+
+ if (!config->enable_ssc) {
+ DBG("SSC not enabled\n");
+ return;
+ }
+
+ ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+ ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+ ssc_per -= ssc_mod;
+
+ frac = regs->frac_div_start_low |
+ (regs->frac_div_start_mid << 8) |
+ (regs->frac_div_start_high << 16);
+ ssc_step_size = regs->decimal_div_start;
+ ssc_step_size *= (1 << config->frac_bits);
+ ssc_step_size += frac;
+ ssc_step_size *= config->ssc_offset;
+ ssc_step_size *= (config->ssc_adj_per + 1);
+ ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+ ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+ regs->ssc_div_per_low = ssc_per & 0xFF;
+ regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+ regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+ regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+ regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+ regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+ regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+ pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+ regs->decimal_div_start, frac, config->frac_bits);
+ pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+ ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll)
+{
+ void __iomem *base = pll->mmio;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+
+ if (pll->pll_configuration.enable_ssc) {
+ pr_debug("SSC is enabled\n");
+
+ pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+ regs->ssc_stepsize_low);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+ regs->ssc_stepsize_high);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+ regs->ssc_div_per_low);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+ regs->ssc_div_per_high);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
+ regs->ssc_adjper_low);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
+ regs->ssc_adjper_high);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
+ SSC_EN | regs->ssc_control);
+ }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
+{
+ void __iomem *base = pll->mmio;
+ u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
+
+ if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
+ if (pll->vco_current_rate >= 3100000000ULL)
+ analog_controls_five_1 = 0x03;
+
+ if (pll->vco_current_rate < 1520000000ULL)
+ vco_config_1 = 0x08;
+ else if (pll->vco_current_rate < 2990000000ULL)
+ vco_config_1 = 0x01;
+ }
+
+ pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
+ analog_controls_five_1);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
+ pll->base.type == MSM_DSI_PHY_7NM_V4_1 ? 0x3f : 0x22);
+
+ if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+ if (pll->slave)
+ pll_write(pll->slave->mmio + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+ }
+}
+
+static void dsi_pll_commit(struct dsi_pll_7nm *pll)
+{
+ void __iomem *base = pll->mmio;
+ struct dsi_pll_regs *reg = &pll->reg_setup;
+
+ pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, reg->decimal_div_start);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+ pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
+ pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
+}
+
+static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+
+ DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->id, rate,
+ parent_rate);
+
+ pll_7nm->vco_current_rate = rate;
+ pll_7nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
+
+ dsi_pll_setup_config(pll_7nm);
+
+ dsi_pll_calc_dec_frac(pll_7nm);
+
+ dsi_pll_calc_ssc(pll_7nm);
+
+ dsi_pll_commit(pll_7nm);
+
+ dsi_pll_config_hzindep_reg(pll_7nm);
+
+ dsi_pll_ssc_commit(pll_7nm);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+
+ return 0;
+}
+
+static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
+{
+ int rc;
+ u32 status = 0;
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+
+ rc = readl_poll_timeout_atomic(pll->mmio +
+ REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->id, status);
+
+ return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
+{
+ u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+ pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
+ ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
+{
+ u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+ pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
+ pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+ ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+{
+ u32 data;
+
+ data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
+{
+ u32 data;
+
+ pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
+
+ data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+ data | BIT(5) | BIT(4));
+}
+
+static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
+{
+ /*
+ * Reset the PHY digital domain. This would be needed when
+ * coming out of a CX or analog rail power collapse while
+ * ensuring that the pads maintain LP00 or LP11 state
+ */
+ pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
+ wmb(); /* Ensure that the reset is deasserted */
+ pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
+ wmb(); /* Ensure that the reset is deasserted */
+}
+
+static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+ int rc;
+
+ dsi_pll_enable_pll_bias(pll_7nm);
+ if (pll_7nm->slave)
+ dsi_pll_enable_pll_bias(pll_7nm->slave);
+
+ /* Start PLL */
+ pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
+
+ /*
+ * ensure all PLL configurations are written prior to checking
+ * for PLL lock.
+ */
+ wmb();
+
+ /* Check for PLL lock */
+ rc = dsi_pll_7nm_lock_status(pll_7nm);
+ if (rc) {
+ pr_err("PLL(%d) lock failed\n", pll_7nm->id);
+ goto error;
+ }
+
+ pll->pll_on = true;
+
+ /*
+ * assert power on reset for PHY digital in case the PLL is
+ * enabled after CX of analog domain power collapse. This needs
+ * to be done before enabling the global clk.
+ */
+ dsi_pll_phy_dig_reset(pll_7nm);
+ if (pll_7nm->slave)
+ dsi_pll_phy_dig_reset(pll_7nm->slave);
+
+ dsi_pll_enable_global_clk(pll_7nm);
+ if (pll_7nm->slave)
+ dsi_pll_enable_global_clk(pll_7nm->slave);
+
+error:
+ return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
+{
+ pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
+ dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+
+ /*
+ * To avoid any stray glitches while abruptly powering down the PLL
+ * make sure to gate the clock using the clock enable bit before
+ * powering down the PLL
+ */
+ dsi_pll_disable_global_clk(pll_7nm);
+ pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
+ dsi_pll_disable_sub(pll_7nm);
+ if (pll_7nm->slave) {
+ dsi_pll_disable_global_clk(pll_7nm->slave);
+ dsi_pll_disable_sub(pll_7nm->slave);
+ }
+ /* flush, ensure all register writes are done */
+ wmb();
+ pll->pll_on = false;
+}
+
+static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+ void __iomem *base = pll_7nm->mmio;
+ u64 ref_clk = pll_7nm->vco_ref_clk_rate;
+ u64 vco_rate = 0x0;
+ u64 multiplier;
+ u32 frac;
+ u32 dec;
+ u64 pll_freq, tmp64;
+
+ dec = pll_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
+ dec &= 0xff;
+
+ frac = pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+ frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+ 0xff) << 8);
+ frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+ 0x3) << 16);
+
+ /*
+ * TODO:
+ * 1. Assumes prescaler is disabled
+ * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+ */
+ multiplier = 1 << 18;
+ pll_freq = dec * (ref_clk * 2);
+ tmp64 = (ref_clk * 2 * frac);
+ pll_freq += div_u64(tmp64, multiplier);
+
+ vco_rate = pll_freq;
+
+ DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+ pll_7nm->id, (unsigned long)vco_rate, dec, frac);
+
+ return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
+ .round_rate = msm_dsi_pll_helper_clk_round_rate,
+ .set_rate = dsi_pll_7nm_vco_set_rate,
+ .recalc_rate = dsi_pll_7nm_vco_recalc_rate,
+ .prepare = dsi_pll_7nm_vco_prepare,
+ .unprepare = dsi_pll_7nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_pll_7nm_save_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+ struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+ void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
+ u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+ cached->pll_out_div = pll_read(pll_7nm->mmio +
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+ cached->pll_out_div &= 0x3;
+
+ cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+ cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+ cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+ cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+ DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+ pll_7nm->id, cached->pll_out_div, cached->bit_clk_div,
+ cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+ struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+ void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
+ u32 val;
+
+ val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+ val &= ~0x3;
+ val |= cached->pll_out_div;
+ pll_write(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+ pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+ val = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ val &= ~0x3;
+ val |= cached->pll_mux;
+ pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
+
+ DBG("DSI PLL%d", pll_7nm->id);
+
+ return 0;
+}
+
+static int dsi_pll_7nm_set_usecase(struct msm_dsi_pll *pll,
+ enum msm_dsi_phy_usecase uc)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+ void __iomem *base = pll_7nm->phy_cmn_mmio;
+ u32 data = 0x0; /* internal PLL */
+
+ DBG("DSI PLL%d", pll_7nm->id);
+
+ switch (uc) {
+ case MSM_DSI_PHY_STANDALONE:
+ break;
+ case MSM_DSI_PHY_MASTER:
+ pll_7nm->slave = pll_7nm_list[(pll_7nm->id + 1) % DSI_MAX];
+ break;
+ case MSM_DSI_PHY_SLAVE:
+ data = 0x1; /* external PLL */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set PLL src */
+ pll_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+ pll_7nm->uc = uc;
+
+ return 0;
+}
+
+static int dsi_pll_7nm_get_provider(struct msm_dsi_pll *pll,
+ struct clk **byte_clk_provider,
+ struct clk **pixel_clk_provider)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+ struct clk_hw_onecell_data *hw_data = pll_7nm->hw_data;
+
+ DBG("DSI PLL%d", pll_7nm->id);
+
+ if (byte_clk_provider)
+ *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+ if (pixel_clk_provider)
+ *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+ return 0;
+}
+
+static void dsi_pll_7nm_destroy(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+ struct device *dev = &pll_7nm->pdev->dev;
+
+ DBG("DSI PLL%d", pll_7nm->id);
+ of_clk_del_provider(dev->of_node);
+
+ clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
+ clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
+ clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
+ clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
+ clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
+ clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
+ clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
+ clk_hw_unregister(&pll_7nm->base.clk_hw);
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm)
+{
+ char clk_name[32], parent[32], vco_name[32];
+ char parent2[32], parent3[32], parent4[32];
+ struct clk_init_data vco_init = {
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .name = vco_name,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_7nm_vco,
+ };
+ struct device *dev = &pll_7nm->pdev->dev;
+ struct clk_hw_onecell_data *hw_data;
+ struct clk_hw *hw;
+ int ret;
+
+ DBG("DSI%d", pll_7nm->id);
+
+ hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+ NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->id);
+ pll_7nm->base.clk_hw.init = &vco_init;
+
+ ret = clk_hw_register(dev, &pll_7nm->base.clk_hw);
+ if (ret)
+ return ret;
+
+ snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+ snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->id);
+
+ hw = clk_hw_register_divider(dev, clk_name,
+ parent, CLK_SET_RATE_PARENT,
+ pll_7nm->mmio +
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_base_clk_hw;
+ }
+
+ pll_7nm->out_div_clk_hw = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+ snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+
+ /* BIT CLK: DIV_CTRL_3_0 */
+ hw = clk_hw_register_divider(dev, clk_name, parent,
+ CLK_SET_RATE_PARENT,
+ pll_7nm->phy_cmn_mmio +
+ REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED,
+ &pll_7nm->postdiv_lock);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_out_div_clk_hw;
+ }
+
+ pll_7nm->bit_clk_hw = hw;
+
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+
+ /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ CLK_SET_RATE_PARENT, 1, 8);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_bit_clk_hw;
+ }
+
+ pll_7nm->byte_clk_hw = hw;
+ hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ 0, 1, 2);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_byte_clk_hw;
+ }
+
+ pll_7nm->by_2_bit_clk_hw = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
+ snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ 0, 1, 4);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_by_2_bit_clk_hw;
+ }
+
+ pll_7nm->post_out_div_clk_hw = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+ snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
+ snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+ snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
+
+ hw = clk_hw_register_mux(dev, clk_name,
+ ((const char *[]){
+ parent, parent2, parent3, parent4
+ }), 4, 0, pll_7nm->phy_cmn_mmio +
+ REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+ 0, 2, 0, NULL);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_post_out_div_clk_hw;
+ }
+
+ pll_7nm->pclk_mux_hw = hw;
+
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->id);
+ snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->id);
+
+ /* PIX CLK DIV : DIV_CTRL_7_4*/
+ hw = clk_hw_register_divider(dev, clk_name, parent,
+ 0, pll_7nm->phy_cmn_mmio +
+ REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED,
+ &pll_7nm->postdiv_lock);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_pclk_mux_hw;
+ }
+
+ pll_7nm->out_dsiclk_hw = hw;
+ hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
+
+ hw_data->num = NUM_PROVIDED_CLKS;
+ pll_7nm->hw_data = hw_data;
+
+ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ pll_7nm->hw_data);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+ goto err_dsiclk_hw;
+ }
+
+ return 0;
+
+err_dsiclk_hw:
+ clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
+err_pclk_mux_hw:
+ clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
+err_post_out_div_clk_hw:
+ clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
+err_by_2_bit_clk_hw:
+ clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
+err_byte_clk_hw:
+ clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
+err_bit_clk_hw:
+ clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
+err_out_div_clk_hw:
+ clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
+err_base_clk_hw:
+ clk_hw_unregister(&pll_7nm->base.clk_hw);
+
+ return ret;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+{
+ struct dsi_pll_7nm *pll_7nm;
+ struct msm_dsi_pll *pll;
+ int ret;
+
+ pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);
+ if (!pll_7nm)
+ return ERR_PTR(-ENOMEM);
+
+ DBG("DSI PLL%d", id);
+
+ pll_7nm->pdev = pdev;
+ pll_7nm->id = id;
+ pll_7nm_list[id] = pll_7nm;
+
+ pll_7nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+ if (IS_ERR_OR_NULL(pll_7nm->phy_cmn_mmio)) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll_7nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+ if (IS_ERR_OR_NULL(pll_7nm->mmio)) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&pll_7nm->postdiv_lock);
+
+ pll = &pll_7nm->base;
+ pll->min_rate = 1000000000UL;
+ pll->max_rate = 3500000000UL;
+ if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
+ pll->min_rate = 600000000UL;
+ pll->max_rate = (unsigned long)5000000000ULL;
+ /* workaround for max rate overflowing on 32-bit builds: */
+ pll->max_rate = max(pll->max_rate, 0xffffffffUL);
+ }
+ pll->get_provider = dsi_pll_7nm_get_provider;
+ pll->destroy = dsi_pll_7nm_destroy;
+ pll->save_state = dsi_pll_7nm_save_state;
+ pll->restore_state = dsi_pll_7nm_restore_state;
+ pll->set_usecase = dsi_pll_7nm_set_usecase;
+
+ pll_7nm->vco_delay = 1;
+
+ ret = pll_7nm_register(pll_7nm);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* TODO: Remove this when we have proper display handover support */
+ msm_dsi_pll_save_state(pll);
+
+ return pll;
+}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 79333842f70a..49685571dc0e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -453,15 +453,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
- if (!dev->dma_parms) {
- dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
- GFP_KERNEL);
- if (!dev->dma_parms) {
- ret = -ENOMEM;
- goto err_msm_uninit;
- }
- }
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(dev, UINT_MAX);
msm_gem_shrinker_init(ddev);
@@ -594,9 +586,10 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
if (!ctx)
return -ENOMEM;
+ kref_init(&ctx->ref);
msm_submitqueue_init(dev, ctx);
- ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
+ ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
file->driver_priv = ctx;
return 0;
@@ -615,7 +608,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
static void context_close(struct msm_file_private *ctx)
{
msm_submitqueue_close(ctx);
- kfree(ctx);
+ msm_file_private_put(ctx);
}
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
@@ -779,18 +772,19 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
}
static int msm_ioctl_gem_info_iova(struct drm_device *dev,
- struct drm_gem_object *obj, uint64_t *iova)
+ struct drm_file *file, struct drm_gem_object *obj,
+ uint64_t *iova)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
- if (!priv->gpu)
+ if (!ctx->aspace)
return -EINVAL;
/*
* Don't pin the memory here - just get an address so that userspace can
* be productive
*/
- return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
+ return msm_gem_get_iova(obj, ctx->aspace, iova);
}
static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
@@ -829,7 +823,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
args->value = msm_gem_mmap_offset(obj);
break;
case MSM_INFO_GET_IOVA:
- ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
+ ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
break;
case MSM_INFO_SET_NAME:
/* length check should leave room for terminating null: */
@@ -1358,6 +1352,7 @@ static int __init msm_drm_register(void)
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
+ msm_dp_register();
adreno_register();
return platform_driver_register(&msm_platform_driver);
}
@@ -1366,6 +1361,7 @@ static void __exit msm_drm_unregister(void)
{
DBG("fini");
platform_driver_unregister(&msm_platform_driver);
+ msm_dp_unregister();
msm_hdmi_unregister();
adreno_unregister();
msm_edp_unregister();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index af259b0573ea..b9dd8f8f4887 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -57,6 +57,7 @@ struct msm_file_private {
struct list_head submitqueues;
int queueid;
struct msm_gem_address_space *aspace;
+ struct kref ref;
};
enum msm_mdp_plane_property {
@@ -159,6 +160,8 @@ struct msm_drm_private {
/* DSI is shared by mdp4 and mdp5 */
struct msm_dsi *dsi[2];
+ struct msm_dp *dp;
+
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
@@ -248,6 +251,10 @@ int msm_gem_map_vma(struct msm_gem_address_space *aspace,
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
+
+struct msm_gem_address_space *
+msm_gem_address_space_get(struct msm_gem_address_space *aspace);
+
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
struct msm_gem_address_space *
@@ -302,9 +309,8 @@ void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
-void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
-void msm_gem_move_to_inactive(struct drm_gem_object *obj);
+void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
+void msm_gem_active_put(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
void msm_gem_free_object(struct drm_gem_object *obj);
@@ -378,6 +384,63 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
}
#endif
+#ifdef CONFIG_DRM_MSM_DP
+int __init msm_dp_register(void);
+void __exit msm_dp_unregister(void);
+int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder);
+int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder);
+int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder);
+int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder);
+void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void msm_dp_irq_postinstall(struct msm_dp *dp_display);
+
+void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
+
+#else
+static inline int __init msm_dp_register(void)
+{
+ return -EINVAL;
+}
+static inline void __exit msm_dp_unregister(void)
+{
+}
+static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
+ struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ return -EINVAL;
+}
+static inline int msm_dp_display_enable(struct msm_dp *dp,
+ struct drm_encoder *encoder)
+{
+ return -EINVAL;
+}
+static inline int msm_dp_display_disable(struct msm_dp *dp,
+ struct drm_encoder *encoder)
+{
+ return -EINVAL;
+}
+static inline void msm_dp_display_mode_set(struct msm_dp *dp,
+ struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
+{
+}
+
+static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
+ struct drm_minor *minor)
+{
+}
+
+#endif
+
void __init msm_mdp_register(void);
void __exit msm_mdp_unregister(void);
void __init msm_dpu_register(void);
@@ -398,8 +461,9 @@ void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
#else
static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
__printf(3, 4)
-static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
- const char *fmt, ...) {}
+static inline void msm_rd_dump_submit(struct msm_rd_state *rd,
+ struct msm_gem_submit *submit,
+ const char *fmt, ...) {}
static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
#endif
@@ -419,7 +483,8 @@ struct msm_gpu_submitqueue;
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id);
-int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_create(struct drm_device *drm,
+ struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id);
int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
struct drm_msm_submitqueue_query *args);
@@ -428,6 +493,26 @@ void msm_submitqueue_close(struct msm_file_private *ctx);
void msm_submitqueue_destroy(struct kref *kref);
+static inline void __msm_file_private_destroy(struct kref *kref)
+{
+ struct msm_file_private *ctx = container_of(kref,
+ struct msm_file_private, ref);
+
+ msm_gem_address_space_put(ctx->aspace);
+ kfree(ctx);
+}
+
+static inline void msm_file_private_put(struct msm_file_private *ctx)
+{
+ kref_put(&ctx->ref, __msm_file_private_destroy);
+}
+
+static inline struct msm_file_private *msm_file_private_get(
+ struct msm_file_private *ctx)
+{
+ kref_get(&ctx->ref);
+ return ctx;
+}
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b2f49152b4d4..04be4cfcccc1 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/dma-map-ops.h>
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
@@ -52,26 +53,14 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
- if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
- dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
- } else {
- dma_map_sg(dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
- }
+ dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
- if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
- dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
- } else {
- dma_unmap_sg(dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
- }
+ dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
/* allocate pages from VRAM carveout, used when no IOMMU: */
@@ -126,7 +115,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
msm_obj->pages = p;
- msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt);
@@ -753,31 +742,31 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
return 0;
}
-void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
+void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
- msm_obj->gpu = gpu;
- if (exclusive)
- dma_resv_add_excl_fence(obj->resv, fence);
- else
- dma_resv_add_shared_fence(obj->resv, fence);
- list_del_init(&msm_obj->mm_list);
- list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+
+ if (!atomic_fetch_inc(&msm_obj->active_count)) {
+ msm_obj->gpu = gpu;
+ list_del_init(&msm_obj->mm_list);
+ list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+ }
}
-void msm_gem_move_to_inactive(struct drm_gem_object *obj)
+void msm_gem_active_put(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
- msm_obj->gpu = NULL;
- list_del_init(&msm_obj->mm_list);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ if (!atomic_dec_return(&msm_obj->active_count)) {
+ msm_obj->gpu = NULL;
+ list_del_init(&msm_obj->mm_list);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ }
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -852,11 +841,28 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_puts(m, " vmas:");
- list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
- vma->aspace != NULL ? vma->aspace->name : NULL,
- vma->iova, vma->mapped ? "mapped" : "unmapped",
+ list_for_each_entry(vma, &msm_obj->vmas, list) {
+ const char *name, *comm;
+ if (vma->aspace) {
+ struct msm_gem_address_space *aspace = vma->aspace;
+ struct task_struct *task =
+ get_pid_task(aspace->pid, PIDTYPE_PID);
+ if (task) {
+ comm = kstrdup(task->comm, GFP_KERNEL);
+ } else {
+ comm = NULL;
+ }
+ name = aspace->name;
+ } else {
+ name = comm = NULL;
+ }
+ seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
+ name, comm ? ":" : "", comm ? comm : "",
+ vma->aspace, vma->iova,
+ vma->mapped ? "mapped" : "unmapped",
vma->inuse);
+ kfree(comm);
+ }
seq_puts(m, "\n");
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 972490b14ba5..a1bf741b9b89 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -24,6 +24,11 @@ struct msm_gem_address_space {
spinlock_t lock; /* Protects drm_mm node allocation/removal */
struct msm_mmu *mmu;
struct kref kref;
+
+ /* For address spaces associated with a specific process, this
+ * will be non-NULL:
+ */
+ struct pid *pid;
};
struct msm_gem_vma {
@@ -83,12 +88,14 @@ struct msm_gem_object {
struct mutex lock; /* Protects resources associated with bo */
char name[32]; /* Identifier to print for the debugfs files */
+
+ atomic_t active_count;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
static inline bool is_active(struct msm_gem_object *msm_obj)
{
- return msm_obj->gpu != NULL;
+ return atomic_read(&msm_obj->active_count);
}
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
@@ -142,6 +149,7 @@ struct msm_gem_submit {
bool valid; /* true if no cmdstream patching needed */
bool in_rb; /* "sudo" mode, copy cmds into RB */
struct msm_ringbuffer *ring;
+ struct msm_file_private *ctx;
unsigned int nr_cmds;
unsigned int nr_bos;
u32 ident; /* A "identifier" for the submit for logging */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index d7c8948427fe..515ef80816a0 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -19,7 +19,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
return NULL;
- return drm_prime_pages_to_sg(msm_obj->pages, npages);
+ return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
}
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 722d61668a97..482576d7a39a 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -6,6 +6,7 @@
#include "msm_drv.h"
#include "msm_gem.h"
+#include "msm_gpu_trace.h"
static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
@@ -87,7 +88,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
mutex_unlock(&dev->struct_mutex);
if (freed > 0)
- pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+ trace_msm_gem_purge(freed << PAGE_SHIFT);
return freed;
}
@@ -123,7 +124,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
*(unsigned long *)ptr += unmapped;
if (unmapped > 0)
- pr_info_ratelimited("Purging %u vmaps\n", unmapped);
+ trace_msm_gem_purge_vmaps(unmapped);
return NOTIFY_DONE;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 8cb9aa15ff90..aa5c60a7132d 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -27,7 +27,7 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, struct msm_gem_address_space *aspace,
+ struct msm_gpu *gpu,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
uint32_t nr_cmds)
{
@@ -43,7 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return NULL;
submit->dev = dev;
- submit->aspace = aspace;
+ submit->aspace = queue->ctx->aspace;
submit->gpu = gpu;
submit->fence = NULL;
submit->cmd = (void *)&submit->bos[nr_bos];
@@ -677,7 +677,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
- submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
+ submit = submit_create(dev, gpu, queue, args->nr_bos,
args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
@@ -785,7 +785,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
- msm_gpu_submit(gpu, submit, ctx);
+ msm_gpu_submit(gpu, submit);
args->fence = submit->fence->seqno;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 5f6a11211b64..f914ddbaea89 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -17,6 +17,7 @@ msm_gem_address_space_destroy(struct kref *kref)
drm_mm_takedown(&aspace->mm);
if (aspace->mmu)
aspace->mmu->funcs->destroy(aspace->mmu);
+ put_pid(aspace->pid);
kfree(aspace);
}
@@ -27,6 +28,15 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
+struct msm_gem_address_space *
+msm_gem_address_space_get(struct msm_gem_address_space *aspace)
+{
+ if (!IS_ERR_OR_NULL(aspace))
+ kref_get(&aspace->kref);
+
+ return aspace;
+}
+
/* Actually unmap memory for the vma */
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma)
@@ -78,8 +88,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
size, prot);
- if (ret)
+ if (ret) {
vma->mapped = false;
+ vma->inuse--;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 57ddc9438351..55d16489d0f3 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -24,7 +24,7 @@
static int msm_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+ struct msm_gpu *gpu = dev_to_gpu(dev);
struct dev_pm_opp *opp;
opp = devfreq_recommended_opp(dev, freq, flags);
@@ -32,6 +32,8 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
if (IS_ERR(opp))
return PTR_ERR(opp);
+ trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
+
if (gpu->funcs->gpu_set_freq)
gpu->funcs->gpu_set_freq(gpu, opp);
else
@@ -45,7 +47,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
static int msm_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
- struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+ struct msm_gpu *gpu = dev_to_gpu(dev);
ktime_t time;
if (gpu->funcs->gpu_get_freq)
@@ -64,7 +66,7 @@ static int msm_devfreq_get_dev_status(struct device *dev,
static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
{
- struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+ struct msm_gpu *gpu = dev_to_gpu(dev);
if (gpu->funcs->gpu_get_freq)
*freq = gpu->funcs->gpu_get_freq(gpu);
@@ -200,6 +202,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
int ret;
DBG("%s", gpu->name);
+ trace_msm_gpu_resume(0);
ret = enable_pwrrail(gpu);
if (ret)
@@ -225,6 +228,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
int ret;
DBG("%s", gpu->name);
+ trace_msm_gpu_suspend(0);
devfreq_suspend_device(gpu->devfreq.devfreq);
@@ -520,7 +524,7 @@ static void recover_worker(struct work_struct *work)
struct msm_ringbuffer *ring = gpu->rb[i];
list_for_each_entry(submit, &ring->submits, node)
- gpu->funcs->submit(gpu, submit, NULL);
+ gpu->funcs->submit(gpu, submit);
}
}
@@ -694,8 +698,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- /* move to inactive: */
- msm_gem_move_to_inactive(&msm_obj->base);
+
+ msm_gem_active_put(&msm_obj->base);
msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
drm_gem_object_put_locked(&msm_obj->base);
}
@@ -747,8 +751,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
}
/* add bo's to gpu's ring, and kick gpu: */
-void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -771,6 +774,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct drm_gem_object *drm_obj = &msm_obj->base;
uint64_t iova;
/* can't happen yet.. but when we add 2d support we'll have
@@ -783,13 +787,15 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
- msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
+ dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
- msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
+ dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
+
+ msm_gem_active_get(drm_obj, gpu);
}
- gpu->funcs->submit(gpu, submit, ctx);
- priv->lastctx = ctx;
+ gpu->funcs->submit(gpu, submit);
+ priv->lastctx = submit->queue->ctx;
hangcheck_timer_reset(gpu);
}
@@ -824,6 +830,30 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
return 0;
}
+/* Return a new address space for a msm_drm_private instance */
+struct msm_gem_address_space *
+msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
+{
+ struct msm_gem_address_space *aspace = NULL;
+ if (!gpu)
+ return NULL;
+
+ /*
+ * If the target doesn't support private address spaces then return
+ * the global one
+ */
+ if (gpu->funcs->create_private_address_space) {
+ aspace = gpu->funcs->create_private_address_space(gpu);
+ if (!IS_ERR(aspace))
+ aspace->pid = get_pid(task_pid(task));
+ }
+
+ if (IS_ERR_OR_NULL(aspace))
+ aspace = msm_gem_address_space_get(gpu->aspace);
+
+ return aspace;
+}
+
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
@@ -892,7 +922,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->gpu_cx = NULL;
gpu->pdev = pdev;
- platform_set_drvdata(pdev, gpu);
+ platform_set_drvdata(pdev, &gpu->adreno_smmu);
msm_devfreq_init(gpu);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 37cffac4cbe3..6c9e1fdc1a76 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -7,6 +7,7 @@
#ifndef __MSM_GPU_H__
#define __MSM_GPU_H__
+#include <linux/adreno-smmu-priv.h>
#include <linux/clk.h>
#include <linux/interconnect.h>
#include <linux/pm_opp.h>
@@ -45,8 +46,7 @@ struct msm_gpu_funcs {
int (*hw_init)(struct msm_gpu *gpu);
int (*pm_suspend)(struct msm_gpu *gpu);
int (*pm_resume)(struct msm_gpu *gpu);
- void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
+ void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
irqreturn_t (*irq)(struct msm_gpu *irq);
struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
@@ -66,6 +66,9 @@ struct msm_gpu_funcs {
void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
struct msm_gem_address_space *(*create_address_space)
(struct msm_gpu *gpu, struct platform_device *pdev);
+ struct msm_gem_address_space *(*create_private_address_space)
+ (struct msm_gpu *gpu);
+ uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
};
struct msm_gpu {
@@ -74,6 +77,8 @@ struct msm_gpu {
struct platform_device *pdev;
const struct msm_gpu_funcs *funcs;
+ struct adreno_smmu_priv adreno_smmu;
+
/* performance counters (hw & sw): */
spinlock_t perf_lock;
bool perfcntr_active;
@@ -144,6 +149,12 @@ struct msm_gpu {
bool hw_apriv;
};
+static inline struct msm_gpu *dev_to_gpu(struct device *dev)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
+ return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
+}
+
/* It turns out that all targets use the same ringbuffer size */
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
#define MSM_GPU_RINGBUFFER_BLKSIZE 32
@@ -184,6 +195,7 @@ struct msm_gpu_submitqueue {
u32 flags;
u32 prio;
int faults;
+ struct msm_file_private *ctx;
struct list_head node;
struct kref ref;
};
@@ -283,13 +295,15 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
void msm_gpu_retire(struct msm_gpu *gpu);
-void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config);
+struct msm_gem_address_space *
+msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
+
void msm_gpu_cleanup(struct msm_gpu *gpu);
struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index 122b84789238..03e0c2536b94 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -83,6 +83,89 @@ TRACE_EVENT(msm_gpu_submit_retired,
__entry->start_ticks, __entry->end_ticks)
);
+
+TRACE_EVENT(msm_gpu_freq_change,
+ TP_PROTO(u32 freq),
+ TP_ARGS(freq),
+ TP_STRUCT__entry(
+ __field(u32, freq)
+ ),
+ TP_fast_assign(
+ /* trace freq in MHz to match intel_gpu_freq_change, to make life easier
+ * for userspace
+ */
+ __entry->freq = DIV_ROUND_UP(freq, 1000000);
+ ),
+ TP_printk("new_freq=%u", __entry->freq)
+);
+
+
+TRACE_EVENT(msm_gmu_freq_change,
+ TP_PROTO(u32 freq, u32 perf_index),
+ TP_ARGS(freq, perf_index),
+ TP_STRUCT__entry(
+ __field(u32, freq)
+ __field(u32, perf_index)
+ ),
+ TP_fast_assign(
+ __entry->freq = freq;
+ __entry->perf_index = perf_index;
+ ),
+ TP_printk("freq=%u, perf_index=%u", __entry->freq, __entry->perf_index)
+);
+
+
+TRACE_EVENT(msm_gem_purge,
+ TP_PROTO(u32 bytes),
+ TP_ARGS(bytes),
+ TP_STRUCT__entry(
+ __field(u32, bytes)
+ ),
+ TP_fast_assign(
+ __entry->bytes = bytes;
+ ),
+ TP_printk("Purging %u bytes", __entry->bytes)
+);
+
+
+TRACE_EVENT(msm_gem_purge_vmaps,
+ TP_PROTO(u32 unmapped),
+ TP_ARGS(unmapped),
+ TP_STRUCT__entry(
+ __field(u32, unmapped)
+ ),
+ TP_fast_assign(
+ __entry->unmapped = unmapped;
+ ),
+ TP_printk("Purging %u vmaps", __entry->unmapped)
+);
+
+
+TRACE_EVENT(msm_gpu_suspend,
+ TP_PROTO(int dummy),
+ TP_ARGS(dummy),
+ TP_STRUCT__entry(
+ __field(u32, dummy)
+ ),
+ TP_fast_assign(
+ __entry->dummy = dummy;
+ ),
+ TP_printk("%u", __entry->dummy)
+);
+
+
+TRACE_EVENT(msm_gpu_resume,
+ TP_PROTO(int dummy),
+ TP_ARGS(dummy),
+ TP_STRUCT__entry(
+ __field(u32, dummy)
+ ),
+ TP_fast_assign(
+ __entry->dummy = dummy;
+ ),
+ TP_printk("%u", __entry->dummy)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
index 310a31b05faa..379496186c7f 100644
--- a/drivers/gpu/drm/msm/msm_gpummu.c
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -30,21 +30,20 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
{
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
- struct scatterlist *sg;
+ struct sg_dma_page_iter dma_iter;
unsigned prot_bits = 0;
- unsigned i, j;
if (prot & IOMMU_WRITE)
prot_bits |= 1;
if (prot & IOMMU_READ)
prot_bits |= 2;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- dma_addr_t addr = sg->dma_address;
- for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
- gpummu->table[idx] = addr | prot_bits;
- addr += GPUMMU_PAGE_SIZE;
- }
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+ dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
+ int i;
+
+ for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
+ gpummu->table[idx++] = (addr + i) | prot_bits;
}
/* we can improve by deferring flush for multiple map() */
@@ -102,7 +101,7 @@ struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
}
gpummu->gpu = gpu;
- msm_mmu_init(&gpummu->base, dev, &funcs);
+ msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU);
return &gpummu->base;
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 3a381a9674c9..22ac7c692a81 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -4,15 +4,210 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/adreno-smmu-priv.h>
+#include <linux/io-pgtable.h>
#include "msm_drv.h"
#include "msm_mmu.h"
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
+ atomic_t pagetables;
};
+
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+struct msm_iommu_pagetable {
+ struct msm_mmu base;
+ struct msm_mmu *parent;
+ struct io_pgtable_ops *pgtbl_ops;
+ phys_addr_t ttbr;
+ u32 asid;
+};
+static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
+{
+ return container_of(mmu, struct msm_iommu_pagetable, base);
+}
+
+static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
+ size_t size)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+ size_t unmapped = 0;
+
+ /* Unmap the block one page at a time */
+ while (size) {
+ unmapped += ops->unmap(ops, iova, 4096, NULL);
+ iova += 4096;
+ size -= 4096;
+ }
+
+ iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
+
+ return (unmapped == size) ? 0 : -EINVAL;
+}
+
+static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
+ struct sg_table *sgt, size_t len, int prot)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+ struct scatterlist *sg;
+ size_t mapped = 0;
+ u64 addr = iova;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t size = sg->length;
+ phys_addr_t phys = sg_phys(sg);
+
+ /* Map the block one page at a time */
+ while (size) {
+ if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
+ msm_iommu_pagetable_unmap(mmu, iova, mapped);
+ return -EINVAL;
+ }
+
+ phys += 4096;
+ addr += 4096;
+ size -= 4096;
+ mapped += 4096;
+ }
+ }
+
+ return 0;
+}
+
+static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
+ struct adreno_smmu_priv *adreno_smmu =
+ dev_get_drvdata(pagetable->parent->dev);
+
+ /*
+ * If this is the last attached pagetable for the parent,
+ * disable TTBR0 in the arm-smmu driver
+ */
+ if (atomic_dec_return(&iommu->pagetables) == 0)
+ adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
+
+ free_io_pgtable_ops(pagetable->pgtbl_ops);
+ kfree(pagetable);
+}
+
+int msm_iommu_pagetable_params(struct msm_mmu *mmu,
+ phys_addr_t *ttbr, int *asid)
+{
+ struct msm_iommu_pagetable *pagetable;
+
+ if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
+ return -EINVAL;
+
+ pagetable = to_pagetable(mmu);
+
+ if (ttbr)
+ *ttbr = pagetable->ttbr;
+
+ if (asid)
+ *asid = pagetable->asid;
+
+ return 0;
+}
+
+static const struct msm_mmu_funcs pagetable_funcs = {
+ .map = msm_iommu_pagetable_map,
+ .unmap = msm_iommu_pagetable_unmap,
+ .destroy = msm_iommu_pagetable_destroy,
+};
+
+static void msm_iommu_tlb_flush_all(void *cookie)
+{
+}
+
+static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+}
+
+static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule, void *cookie)
+{
+}
+
+static const struct iommu_flush_ops null_tlb_ops = {
+ .tlb_flush_all = msm_iommu_tlb_flush_all,
+ .tlb_flush_walk = msm_iommu_tlb_flush_walk,
+ .tlb_flush_leaf = msm_iommu_tlb_flush_walk,
+ .tlb_add_page = msm_iommu_tlb_add_page,
+};
+
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
+ struct msm_iommu *iommu = to_msm_iommu(parent);
+ struct msm_iommu_pagetable *pagetable;
+ const struct io_pgtable_cfg *ttbr1_cfg = NULL;
+ struct io_pgtable_cfg ttbr0_cfg;
+ int ret;
+
+ /* Get the pagetable configuration from the domain */
+ if (adreno_smmu->cookie)
+ ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+ if (!ttbr1_cfg)
+ return ERR_PTR(-ENODEV);
+
+ pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
+ if (!pagetable)
+ return ERR_PTR(-ENOMEM);
+
+ msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
+ MSM_MMU_IOMMU_PAGETABLE);
+
+ /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
+ ttbr0_cfg = *ttbr1_cfg;
+
+ /* The incoming cfg will have the TTBR1 quirk enabled */
+ ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
+ ttbr0_cfg.tlb = &null_tlb_ops;
+
+ pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
+ &ttbr0_cfg, iommu->domain);
+
+ if (!pagetable->pgtbl_ops) {
+ kfree(pagetable);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * If this is the first pagetable that we've allocated, send it back to
+ * the arm-smmu driver as a trigger to set up TTBR0
+ */
+ if (atomic_inc_return(&iommu->pagetables) == 1) {
+ ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
+ if (ret) {
+ free_io_pgtable_ops(pagetable->pgtbl_ops);
+ kfree(pagetable);
+ return ERR_PTR(ret);
+ }
+ }
+
+ /* Needed later for TLB flush */
+ pagetable->parent = parent;
+ pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
+
+ /*
+ * TODO we would like each set of page tables to have a unique ASID
+ * to optimize TLB invalidation. But iommu_flush_iotlb_all() will
+ * end up flushing the ASID used for TTBR1 pagetables, which is not
+ * what we want. So for now just use the same ASID as TTBR1.
+ */
+ pagetable->asid = 0;
+
+ return &pagetable->base;
+}
+
static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg)
{
@@ -36,7 +231,11 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
- ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
+ /* The arm-smmu driver expects the addresses to be sign extended */
+ if (iova & BIT_ULL(48))
+ iova |= GENMASK_ULL(63, 49);
+
+ ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
@@ -46,6 +245,9 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
+ if (iova & BIT_ULL(48))
+ iova |= GENMASK_ULL(63, 49);
+
iommu_unmap(iommu->domain, iova, len);
return 0;
@@ -78,9 +280,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
return ERR_PTR(-ENOMEM);
iommu->domain = domain;
- msm_mmu_init(&iommu->base, dev, &funcs);
+ msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
iommu_set_fault_handler(domain, msm_fault_handler, iommu);
+ atomic_set(&iommu->pagetables, 0);
+
ret = iommu_attach_device(iommu->domain, dev);
if (ret) {
kfree(iommu);
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 3a534ee59bf6..61ade89d9e48 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -17,18 +17,26 @@ struct msm_mmu_funcs {
void (*destroy)(struct msm_mmu *mmu);
};
+enum msm_mmu_type {
+ MSM_MMU_GPUMMU,
+ MSM_MMU_IOMMU,
+ MSM_MMU_IOMMU_PAGETABLE,
+};
+
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
int (*handler)(void *arg, unsigned long iova, int flags);
void *arg;
+ enum msm_mmu_type type;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
- const struct msm_mmu_funcs *funcs)
+ const struct msm_mmu_funcs *funcs, enum msm_mmu_type type)
{
mmu->dev = dev;
mmu->funcs = funcs;
+ mmu->type = type;
}
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
@@ -41,7 +49,13 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu->handler = handler;
}
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
+
void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
dma_addr_t *tran_error);
+
+int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
+ int *asid);
+
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 7764373d0ed2..0987d6bf848c 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -31,6 +31,7 @@ struct msm_rbmemptrs {
volatile uint32_t fence;
volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
+ volatile u64 ttbr0;
};
struct msm_ringbuffer {
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index a1d94be7883a..c3d206105d28 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -12,6 +12,8 @@ void msm_submitqueue_destroy(struct kref *kref)
struct msm_gpu_submitqueue *queue = container_of(kref,
struct msm_gpu_submitqueue, ref);
+ msm_file_private_put(queue->ctx);
+
kfree(queue);
}
@@ -49,8 +51,10 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
* No lock needed in close and there won't
* be any more user ioctls coming our way
*/
- list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
+ list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
+ list_del(&entry->node);
msm_submitqueue_put(entry);
+ }
}
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
@@ -81,6 +85,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
write_lock(&ctx->queuelock);
+ queue->ctx = msm_file_private_get(ctx);
queue->id = ctx->queueid++;
if (id)
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index 0dca8f27169e..0143d539f8f8 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -5,7 +5,7 @@ config DRM_MXS
Choose this option to select drivers for MXS FB devices
config DRM_MXSFB
- tristate "i.MX23/i.MX28/i.MX6SX MXSFB LCD controller"
+ tristate "i.MX (e)LCDIF LCD controller"
depends on DRM && OF
depends on COMMON_CLK
select DRM_MXS
@@ -13,8 +13,10 @@ config DRM_MXSFB
select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
+ select DRM_PANEL_BRIDGE
help
- Choose this option if you have an i.MX23/i.MX28/i.MX6SX MXSFB
- LCD controller.
+ Choose this option if you have an LCDIF or eLCDIF LCD controller.
+ Those devices are found in various i.MX SoC (including i.MX23,
+ i.MX28, i.MX6SX, i.MX7 and i.MX8M).
If M is selected the module will be called mxsfb.
diff --git a/drivers/gpu/drm/mxsfb/Makefile b/drivers/gpu/drm/mxsfb/Makefile
index ff6e358088fa..26d153896d72 100644
--- a/drivers/gpu/drm/mxsfb/Makefile
+++ b/drivers/gpu/drm/mxsfb/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-mxsfb-y := mxsfb_drv.o mxsfb_crtc.o mxsfb_out.o
+mxsfb-y := mxsfb_drv.o mxsfb_kms.o
obj-$(CONFIG_DRM_MXSFB) += mxsfb.o
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
deleted file mode 100644
index b69ace8bf526..000000000000
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ /dev/null
@@ -1,343 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 Marek Vasut <marex@denx.de>
- *
- * This code is based on drivers/video/fbdev/mxsfb.c :
- * Copyright (C) 2010 Juergen Beisert, Pengutronix
- * Copyright (C) 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/iopoll.h>
-#include <linux/of_graph.h>
-#include <linux/platform_data/simplefb.h>
-
-#include <video/videomode.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vblank.h>
-
-#include "mxsfb_drv.h"
-#include "mxsfb_regs.h"
-
-#define MXS_SET_ADDR 0x4
-#define MXS_CLR_ADDR 0x8
-#define MODULE_CLKGATE BIT(30)
-#define MODULE_SFTRST BIT(31)
-/* 1 second delay should be plenty of time for block reset */
-#define RESET_TIMEOUT 1000000
-
-static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
-{
- return (val & mxsfb->devdata->hs_wdth_mask) <<
- mxsfb->devdata->hs_wdth_shift;
-}
-
-/* Setup the MXSFB registers for decoding the pixels out of the framebuffer */
-static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
-{
- struct drm_crtc *crtc = &mxsfb->pipe.crtc;
- struct drm_device *drm = crtc->dev;
- const u32 format = crtc->primary->state->fb->format->format;
- u32 ctrl, ctrl1;
-
- ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER;
-
- /*
- * WARNING: The bus width, CTRL_SET_BUS_WIDTH(), is configured to
- * match the selected mode here. This differs from the original
- * MXSFB driver, which had the option to configure the bus width
- * to arbitrary value. This limitation should not pose an issue.
- */
-
- /* CTRL1 contains IRQ config and status bits, preserve those. */
- ctrl1 = readl(mxsfb->base + LCDC_CTRL1);
- ctrl1 &= CTRL1_CUR_FRAME_DONE_IRQ_EN | CTRL1_CUR_FRAME_DONE_IRQ;
-
- switch (format) {
- case DRM_FORMAT_RGB565:
- dev_dbg(drm->dev, "Setting up RGB565 mode\n");
- ctrl |= CTRL_SET_WORD_LENGTH(0);
- ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf);
- break;
- case DRM_FORMAT_XRGB8888:
- dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
- ctrl |= CTRL_SET_WORD_LENGTH(3);
- /* Do not use packed pixels = one pixel per word instead. */
- ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7);
- break;
- default:
- dev_err(drm->dev, "Unhandled pixel format %08x\n", format);
- return -EINVAL;
- }
-
- writel(ctrl1, mxsfb->base + LCDC_CTRL1);
- writel(ctrl, mxsfb->base + LCDC_CTRL);
-
- return 0;
-}
-
-static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
-{
- struct drm_crtc *crtc = &mxsfb->pipe.crtc;
- struct drm_device *drm = crtc->dev;
- u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- u32 reg;
-
- reg = readl(mxsfb->base + LCDC_CTRL);
-
- if (mxsfb->connector->display_info.num_bus_formats)
- bus_format = mxsfb->connector->display_info.bus_formats[0];
-
- DRM_DEV_DEBUG_DRIVER(drm->dev, "Using bus_format: 0x%08X\n",
- bus_format);
-
- reg &= ~CTRL_BUS_WIDTH_MASK;
- switch (bus_format) {
- case MEDIA_BUS_FMT_RGB565_1X16:
- reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
- break;
- case MEDIA_BUS_FMT_RGB666_1X18:
- reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_18BIT);
- break;
- case MEDIA_BUS_FMT_RGB888_1X24:
- reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
- break;
- default:
- dev_err(drm->dev, "Unknown media bus format %d\n", bus_format);
- break;
- }
- writel(reg, mxsfb->base + LCDC_CTRL);
-}
-
-static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
-{
- u32 reg;
-
- if (mxsfb->clk_disp_axi)
- clk_prepare_enable(mxsfb->clk_disp_axi);
- clk_prepare_enable(mxsfb->clk);
-
- /* If it was disabled, re-enable the mode again */
- writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_SET);
-
- /* Enable the SYNC signals first, then the DMA engine */
- reg = readl(mxsfb->base + LCDC_VDCTRL4);
- reg |= VDCTRL4_SYNC_SIGNALS_ON;
- writel(reg, mxsfb->base + LCDC_VDCTRL4);
-
- writel(CTRL_RUN, mxsfb->base + LCDC_CTRL + REG_SET);
-}
-
-static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
-{
- u32 reg;
-
- /*
- * Even if we disable the controller here, it will still continue
- * until its FIFOs are running out of data
- */
- writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_CLR);
-
- readl_poll_timeout(mxsfb->base + LCDC_CTRL, reg, !(reg & CTRL_RUN),
- 0, 1000);
-
- reg = readl(mxsfb->base + LCDC_VDCTRL4);
- reg &= ~VDCTRL4_SYNC_SIGNALS_ON;
- writel(reg, mxsfb->base + LCDC_VDCTRL4);
-
- clk_disable_unprepare(mxsfb->clk);
- if (mxsfb->clk_disp_axi)
- clk_disable_unprepare(mxsfb->clk_disp_axi);
-}
-
-/*
- * Clear the bit and poll it cleared. This is usually called with
- * a reset address and mask being either SFTRST(bit 31) or CLKGATE
- * (bit 30).
- */
-static int clear_poll_bit(void __iomem *addr, u32 mask)
-{
- u32 reg;
-
- writel(mask, addr + MXS_CLR_ADDR);
- return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
-}
-
-static int mxsfb_reset_block(void __iomem *reset_addr)
-{
- int ret;
-
- ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
- if (ret)
- return ret;
-
- writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
-
- ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
- if (ret)
- return ret;
-
- return clear_poll_bit(reset_addr, MODULE_CLKGATE);
-}
-
-static dma_addr_t mxsfb_get_fb_paddr(struct mxsfb_drm_private *mxsfb)
-{
- struct drm_framebuffer *fb = mxsfb->pipe.plane.state->fb;
- struct drm_gem_cma_object *gem;
-
- if (!fb)
- return 0;
-
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- if (!gem)
- return 0;
-
- return gem->paddr;
-}
-
-static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
-{
- struct drm_device *drm = mxsfb->pipe.crtc.dev;
- struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
- u32 bus_flags = mxsfb->connector->display_info.bus_flags;
- u32 vdctrl0, vsync_pulse_len, hsync_pulse_len;
- int err;
-
- /*
- * It seems, you can't re-program the controller if it is still
- * running. This may lead to shifted pictures (FIFO issue?), so
- * first stop the controller and drain its FIFOs.
- */
-
- /* Mandatory eLCDIF reset as per the Reference Manual */
- err = mxsfb_reset_block(mxsfb->base);
- if (err)
- return;
-
- /* Clear the FIFOs */
- writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
-
- err = mxsfb_set_pixel_fmt(mxsfb);
- if (err)
- return;
-
- clk_set_rate(mxsfb->clk, m->crtc_clock * 1000);
-
- if (mxsfb->bridge && mxsfb->bridge->timings)
- bus_flags = mxsfb->bridge->timings->input_bus_flags;
-
- DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
- m->crtc_clock,
- (int)(clk_get_rate(mxsfb->clk) / 1000));
- DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n",
- bus_flags);
- DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
-
- writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) |
- TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay),
- mxsfb->base + mxsfb->devdata->transfer_count);
-
- vsync_pulse_len = m->crtc_vsync_end - m->crtc_vsync_start;
-
- vdctrl0 = VDCTRL0_ENABLE_PRESENT | /* Always in DOTCLOCK mode */
- VDCTRL0_VSYNC_PERIOD_UNIT |
- VDCTRL0_VSYNC_PULSE_WIDTH_UNIT |
- VDCTRL0_SET_VSYNC_PULSE_WIDTH(vsync_pulse_len);
- if (m->flags & DRM_MODE_FLAG_PHSYNC)
- vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
- if (m->flags & DRM_MODE_FLAG_PVSYNC)
- vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
- /* Make sure Data Enable is high active by default */
- if (!(bus_flags & DRM_BUS_FLAG_DE_LOW))
- vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
- /*
- * DRM_BUS_FLAG_PIXDATA_DRIVE_ defines are controller centric,
- * controllers VDCTRL0_DOTCLK is display centric.
- * Drive on positive edge -> display samples on falling edge
- * DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
- */
- if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
- vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
-
- writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
-
- mxsfb_set_bus_fmt(mxsfb);
-
- /* Frame length in lines. */
- writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1);
-
- /* Line length in units of clocks or pixels. */
- hsync_pulse_len = m->crtc_hsync_end - m->crtc_hsync_start;
- writel(set_hsync_pulse_width(mxsfb, hsync_pulse_len) |
- VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal),
- mxsfb->base + LCDC_VDCTRL2);
-
- writel(SET_HOR_WAIT_CNT(m->crtc_htotal - m->crtc_hsync_start) |
- SET_VERT_WAIT_CNT(m->crtc_vtotal - m->crtc_vsync_start),
- mxsfb->base + LCDC_VDCTRL3);
-
- writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay),
- mxsfb->base + LCDC_VDCTRL4);
-}
-
-void mxsfb_crtc_enable(struct mxsfb_drm_private *mxsfb)
-{
- dma_addr_t paddr;
-
- mxsfb_enable_axi_clk(mxsfb);
- mxsfb_crtc_mode_set_nofb(mxsfb);
-
- /* Write cur_buf as well to avoid an initial corrupt frame */
- paddr = mxsfb_get_fb_paddr(mxsfb);
- if (paddr) {
- writel(paddr, mxsfb->base + mxsfb->devdata->cur_buf);
- writel(paddr, mxsfb->base + mxsfb->devdata->next_buf);
- }
-
- mxsfb_enable_controller(mxsfb);
-}
-
-void mxsfb_crtc_disable(struct mxsfb_drm_private *mxsfb)
-{
- mxsfb_disable_controller(mxsfb);
- mxsfb_disable_axi_clk(mxsfb);
-}
-
-void mxsfb_plane_atomic_update(struct mxsfb_drm_private *mxsfb,
- struct drm_plane_state *state)
-{
- struct drm_simple_display_pipe *pipe = &mxsfb->pipe;
- struct drm_crtc *crtc = &pipe->crtc;
- struct drm_pending_vblank_event *event;
- dma_addr_t paddr;
-
- spin_lock_irq(&crtc->dev->event_lock);
- event = crtc->state->event;
- if (event) {
- crtc->state->event = NULL;
-
- if (drm_crtc_vblank_get(crtc) == 0) {
- drm_crtc_arm_vblank_event(crtc, event);
- } else {
- drm_crtc_send_vblank_event(crtc, event);
- }
- }
- spin_unlock_irq(&crtc->dev->event_lock);
-
- paddr = mxsfb_get_fb_paddr(mxsfb);
- if (paddr) {
- mxsfb_enable_axi_clk(mxsfb);
- writel(paddr, mxsfb->base + mxsfb->devdata->next_buf);
- mxsfb_disable_axi_clk(mxsfb);
- }
-}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 508764fccd27..35122aef037b 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -9,30 +9,25 @@
*/
#include <linux/clk.h>
-#include <linux/component.h>
#include <linux/dma-mapping.h>
-#include <linux/list.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/dma-resv.h>
-#include <linux/spinlock.h>
-#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_mode_config.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include "mxsfb_drv.h"
@@ -41,6 +36,11 @@
enum mxsfb_devtype {
MXSFB_V3,
MXSFB_V4,
+ /*
+ * Starting at i.MX6 the hardware version register is gone, use the
+ * i.MX family number as the version.
+ */
+ MXSFB_V6,
};
static const struct mxsfb_devdata mxsfb_devdata[] = {
@@ -48,38 +48,28 @@ static const struct mxsfb_devdata mxsfb_devdata[] = {
.transfer_count = LCDC_V3_TRANSFER_COUNT,
.cur_buf = LCDC_V3_CUR_BUF,
.next_buf = LCDC_V3_NEXT_BUF,
- .debug0 = LCDC_V3_DEBUG0,
.hs_wdth_mask = 0xff,
.hs_wdth_shift = 24,
- .ipversion = 3,
+ .has_overlay = false,
},
[MXSFB_V4] = {
.transfer_count = LCDC_V4_TRANSFER_COUNT,
.cur_buf = LCDC_V4_CUR_BUF,
.next_buf = LCDC_V4_NEXT_BUF,
- .debug0 = LCDC_V4_DEBUG0,
.hs_wdth_mask = 0x3fff,
.hs_wdth_shift = 18,
- .ipversion = 4,
+ .has_overlay = false,
+ },
+ [MXSFB_V6] = {
+ .transfer_count = LCDC_V4_TRANSFER_COUNT,
+ .cur_buf = LCDC_V4_CUR_BUF,
+ .next_buf = LCDC_V4_NEXT_BUF,
+ .hs_wdth_mask = 0x3fff,
+ .hs_wdth_shift = 18,
+ .has_overlay = true,
},
};
-static const uint32_t mxsfb_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB565
-};
-
-static const uint64_t mxsfb_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static struct mxsfb_drm_private *
-drm_pipe_to_mxsfb_drm_private(struct drm_simple_display_pipe *pipe)
-{
- return container_of(pipe, struct mxsfb_drm_private, pipe);
-}
-
void mxsfb_enable_axi_clk(struct mxsfb_drm_private *mxsfb)
{
if (mxsfb->clk_axi)
@@ -92,8 +82,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
clk_disable_unprepare(mxsfb->clk_axi);
}
+static struct drm_framebuffer *
+mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ const struct drm_format_info *info;
+
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info)
+ return ERR_PTR(-EINVAL);
+
+ if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
+ dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
+}
+
static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
- .fb_create = drm_gem_fb_create,
+ .fb_create = mxsfb_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -102,101 +110,51 @@ static const struct drm_mode_config_helper_funcs mxsfb_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
-static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static int mxsfb_attach_bridge(struct mxsfb_drm_private *mxsfb)
{
- struct drm_connector *connector;
- struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
- struct drm_device *drm = pipe->plane.dev;
-
- if (!mxsfb->connector) {
- list_for_each_entry(connector,
- &drm->mode_config.connector_list,
- head)
- if (connector->encoder == &mxsfb->pipe.encoder) {
- mxsfb->connector = connector;
- break;
- }
- }
-
- if (!mxsfb->connector) {
- dev_warn(drm->dev, "No connector attached, using default\n");
- mxsfb->connector = &mxsfb->panel_connector;
- }
-
- pm_runtime_get_sync(drm->dev);
- drm_panel_prepare(mxsfb->panel);
- mxsfb_crtc_enable(mxsfb);
- drm_panel_enable(mxsfb->panel);
-}
+ struct drm_device *drm = mxsfb->drm;
+ struct drm_connector_list_iter iter;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ int ret;
-static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
-{
- struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
- struct drm_device *drm = pipe->plane.dev;
- struct drm_crtc *crtc = &pipe->crtc;
- struct drm_pending_vblank_event *event;
-
- drm_panel_disable(mxsfb->panel);
- mxsfb_crtc_disable(mxsfb);
- drm_panel_unprepare(mxsfb->panel);
- pm_runtime_put_sync(drm->dev);
+ ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel,
+ &bridge);
+ if (ret)
+ return ret;
- spin_lock_irq(&drm->event_lock);
- event = crtc->state->event;
- if (event) {
- crtc->state->event = NULL;
- drm_crtc_send_vblank_event(crtc, event);
+ if (panel) {
+ bridge = devm_drm_panel_bridge_add_typed(drm->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
}
- spin_unlock_irq(&drm->event_lock);
-
- if (mxsfb->connector != &mxsfb->panel_connector)
- mxsfb->connector = NULL;
-}
-static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state)
-{
- struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+ if (!bridge)
+ return -ENODEV;
- mxsfb_plane_atomic_update(mxsfb, plane_state);
-}
+ ret = drm_bridge_attach(&mxsfb->encoder, bridge, NULL, 0);
+ if (ret) {
+ DRM_DEV_ERROR(drm->dev,
+ "failed to attach bridge: %d\n", ret);
+ return ret;
+ }
-static int mxsfb_pipe_enable_vblank(struct drm_simple_display_pipe *pipe)
-{
- struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+ mxsfb->bridge = bridge;
- /* Clear and enable VBLANK IRQ */
- mxsfb_enable_axi_clk(mxsfb);
- writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
- writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_SET);
- mxsfb_disable_axi_clk(mxsfb);
+ /*
+ * Get hold of the connector. This is a bit of a hack, until the bridge
+ * API gives us bus flags and formats.
+ */
+ drm_connector_list_iter_begin(drm, &iter);
+ mxsfb->connector = drm_connector_list_iter_next(&iter);
+ drm_connector_list_iter_end(&iter);
return 0;
}
-static void mxsfb_pipe_disable_vblank(struct drm_simple_display_pipe *pipe)
-{
- struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
-
- /* Disable and clear VBLANK IRQ */
- mxsfb_enable_axi_clk(mxsfb);
- writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
- writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
- mxsfb_disable_axi_clk(mxsfb);
-}
-
-static struct drm_simple_display_pipe_funcs mxsfb_funcs = {
- .enable = mxsfb_pipe_enable,
- .disable = mxsfb_pipe_disable,
- .update = mxsfb_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
- .enable_vblank = mxsfb_pipe_enable_vblank,
- .disable_vblank = mxsfb_pipe_disable_vblank,
-};
-
-static int mxsfb_load(struct drm_device *drm)
+static int mxsfb_load(struct drm_device *drm,
+ const struct mxsfb_devdata *devdata)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct mxsfb_drm_private *mxsfb;
@@ -207,8 +165,9 @@ static int mxsfb_load(struct drm_device *drm)
if (!mxsfb)
return -ENOMEM;
+ mxsfb->drm = drm;
drm->dev_private = mxsfb;
- mxsfb->devdata = &mxsfb_devdata[pdev->id_entry->driver_data];
+ mxsfb->devdata = devdata;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mxsfb->base = devm_ioremap_resource(drm->dev, res);
@@ -233,50 +192,28 @@ static int mxsfb_load(struct drm_device *drm)
pm_runtime_enable(drm->dev);
- ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (ret < 0) {
- dev_err(drm->dev, "Failed to initialise vblank\n");
- goto err_vblank;
- }
-
/* Modeset init */
drm_mode_config_init(drm);
- ret = mxsfb_create_output(drm);
+ ret = mxsfb_kms_init(mxsfb);
if (ret < 0) {
- dev_err(drm->dev, "Failed to create outputs\n");
+ dev_err(drm->dev, "Failed to initialize KMS pipeline\n");
goto err_vblank;
}
- ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
- mxsfb_formats, ARRAY_SIZE(mxsfb_formats),
- mxsfb_modifiers, mxsfb->connector);
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
- dev_err(drm->dev, "Cannot setup simple display pipe\n");
+ dev_err(drm->dev, "Failed to initialise vblank\n");
goto err_vblank;
}
- /*
- * Attach panel only if there is one.
- * If there is no panel attach, it must be a bridge. In this case, we
- * need a reference to its connector for a proper initialization.
- * We will do this check in pipe->enable(), since the connector won't
- * be attached to an encoder until then.
- */
+ /* Start with vertical blanking interrupt reporting disabled. */
+ drm_crtc_vblank_off(&mxsfb->crtc);
- if (mxsfb->panel) {
- ret = drm_panel_attach(mxsfb->panel, mxsfb->connector);
- if (ret) {
- dev_err(drm->dev, "Cannot connect panel: %d\n", ret);
- goto err_vblank;
- }
- } else if (mxsfb->bridge) {
- ret = drm_simple_display_pipe_attach_bridge(&mxsfb->pipe,
- mxsfb->bridge);
- if (ret) {
- dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
- goto err_vblank;
- }
+ ret = mxsfb_attach_bridge(mxsfb);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
+ goto err_vblank;
}
drm->mode_config.min_width = MXSFB_MIN_XRES;
@@ -294,7 +231,7 @@ static int mxsfb_load(struct drm_device *drm)
if (ret < 0) {
dev_err(drm->dev, "Failed to install IRQ handler\n");
- goto err_irq;
+ goto err_vblank;
}
drm_kms_helper_poll_init(drm);
@@ -305,8 +242,6 @@ static int mxsfb_load(struct drm_device *drm)
return 0;
-err_irq:
- drm_panel_detach(mxsfb->panel);
err_vblank:
pm_runtime_disable(drm->dev);
@@ -327,11 +262,13 @@ static void mxsfb_unload(struct drm_device *drm)
pm_runtime_disable(drm->dev);
}
-static void mxsfb_irq_preinstall(struct drm_device *drm)
+static void mxsfb_irq_disable(struct drm_device *drm)
{
struct mxsfb_drm_private *mxsfb = drm->dev_private;
- mxsfb_pipe_disable_vblank(&mxsfb->pipe);
+ mxsfb_enable_axi_clk(mxsfb);
+ mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
+ mxsfb_disable_axi_clk(mxsfb);
}
static irqreturn_t mxsfb_irq_handler(int irq, void *data)
@@ -340,17 +277,13 @@ static irqreturn_t mxsfb_irq_handler(int irq, void *data)
struct mxsfb_drm_private *mxsfb = drm->dev_private;
u32 reg;
- mxsfb_enable_axi_clk(mxsfb);
-
reg = readl(mxsfb->base + LCDC_CTRL1);
if (reg & CTRL1_CUR_FRAME_DONE_IRQ)
- drm_crtc_handle_vblank(&mxsfb->pipe.crtc);
+ drm_crtc_handle_vblank(&mxsfb->crtc);
writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
- mxsfb_disable_axi_clk(mxsfb);
-
return IRQ_HANDLED;
}
@@ -359,8 +292,8 @@ DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver mxsfb_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = mxsfb_irq_handler,
- .irq_preinstall = mxsfb_irq_preinstall,
- .irq_uninstall = mxsfb_irq_preinstall,
+ .irq_preinstall = mxsfb_irq_disable,
+ .irq_uninstall = mxsfb_irq_disable,
DRM_GEM_CMA_DRIVER_OPS,
.fops = &fops,
.name = "mxsfb-drm",
@@ -370,18 +303,10 @@ static struct drm_driver mxsfb_driver = {
.minor = 0,
};
-static const struct platform_device_id mxsfb_devtype[] = {
- { .name = "imx23-fb", .driver_data = MXSFB_V3, },
- { .name = "imx28-fb", .driver_data = MXSFB_V4, },
- { .name = "imx6sx-fb", .driver_data = MXSFB_V4, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
-
static const struct of_device_id mxsfb_dt_ids[] = {
- { .compatible = "fsl,imx23-lcdif", .data = &mxsfb_devtype[0], },
- { .compatible = "fsl,imx28-lcdif", .data = &mxsfb_devtype[1], },
- { .compatible = "fsl,imx6sx-lcdif", .data = &mxsfb_devtype[2], },
+ { .compatible = "fsl,imx23-lcdif", .data = &mxsfb_devdata[MXSFB_V3], },
+ { .compatible = "fsl,imx28-lcdif", .data = &mxsfb_devdata[MXSFB_V4], },
+ { .compatible = "fsl,imx6sx-lcdif", .data = &mxsfb_devdata[MXSFB_V6], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxsfb_dt_ids);
@@ -396,14 +321,11 @@ static int mxsfb_probe(struct platform_device *pdev)
if (!pdev->dev.of_node)
return -ENODEV;
- if (of_id)
- pdev->id_entry = of_id->data;
-
drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
- ret = mxsfb_load(drm);
+ ret = mxsfb_load(drm, of_id->data);
if (ret)
goto err_free;
@@ -457,7 +379,6 @@ static const struct dev_pm_ops mxsfb_pm_ops = {
static struct platform_driver mxsfb_platform_driver = {
.probe = mxsfb_probe,
.remove = mxsfb_remove,
- .id_table = mxsfb_devtype,
.driver = {
.name = "mxsfb",
.of_match_table = mxsfb_dt_ids,
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
index 0b65b5194a9c..399d23e91ed1 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.h
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
@@ -8,14 +8,20 @@
#ifndef __MXSFB_DRV_H__
#define __MXSFB_DRV_H__
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_plane.h>
+
+struct clk;
+
struct mxsfb_devdata {
- unsigned int transfer_count;
- unsigned int cur_buf;
- unsigned int next_buf;
- unsigned int debug0;
- unsigned int hs_wdth_mask;
- unsigned int hs_wdth_shift;
- unsigned int ipversion;
+ unsigned int transfer_count;
+ unsigned int cur_buf;
+ unsigned int next_buf;
+ unsigned int hs_wdth_mask;
+ unsigned int hs_wdth_shift;
+ bool has_overlay;
};
struct mxsfb_drm_private {
@@ -26,22 +32,26 @@ struct mxsfb_drm_private {
struct clk *clk_axi;
struct clk *clk_disp_axi;
- struct drm_simple_display_pipe pipe;
- struct drm_connector panel_connector;
+ struct drm_device *drm;
+ struct {
+ struct drm_plane primary;
+ struct drm_plane overlay;
+ } planes;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector *connector;
- struct drm_panel *panel;
struct drm_bridge *bridge;
};
-int mxsfb_setup_crtc(struct drm_device *dev);
-int mxsfb_create_output(struct drm_device *dev);
+static inline struct mxsfb_drm_private *
+to_mxsfb_drm_private(struct drm_device *drm)
+{
+ return drm->dev_private;
+}
void mxsfb_enable_axi_clk(struct mxsfb_drm_private *mxsfb);
void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb);
-void mxsfb_crtc_enable(struct mxsfb_drm_private *mxsfb);
-void mxsfb_crtc_disable(struct mxsfb_drm_private *mxsfb);
-void mxsfb_plane_atomic_update(struct mxsfb_drm_private *mxsfb,
- struct drm_plane_state *state);
+int mxsfb_kms_init(struct mxsfb_drm_private *mxsfb);
#endif /* __MXSFB_DRV_H__ */
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
new file mode 100644
index 000000000000..b721b8b262ce
--- /dev/null
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * This code is based on drivers/video/fbdev/mxsfb.c :
+ * Copyright (C) 2010 Juergen Beisert, Pengutronix
+ * Copyright (C) 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "mxsfb_drv.h"
+#include "mxsfb_regs.h"
+
+/* 1 second delay should be plenty of time for block reset */
+#define RESET_TIMEOUT 1000000
+
+/* -----------------------------------------------------------------------------
+ * CRTC
+ */
+
+static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
+{
+ return (val & mxsfb->devdata->hs_wdth_mask) <<
+ mxsfb->devdata->hs_wdth_shift;
+}
+
+/*
+ * Setup the MXSFB registers for decoding the pixels out of the framebuffer and
+ * outputting them on the bus.
+ */
+static void mxsfb_set_formats(struct mxsfb_drm_private *mxsfb)
+{
+ struct drm_device *drm = mxsfb->drm;
+ const u32 format = mxsfb->crtc.primary->state->fb->format->format;
+ u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ u32 ctrl, ctrl1;
+
+ if (mxsfb->connector->display_info.num_bus_formats)
+ bus_format = mxsfb->connector->display_info.bus_formats[0];
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Using bus_format: 0x%08X\n",
+ bus_format);
+
+ ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER;
+
+ /* CTRL1 contains IRQ config and status bits, preserve those. */
+ ctrl1 = readl(mxsfb->base + LCDC_CTRL1);
+ ctrl1 &= CTRL1_CUR_FRAME_DONE_IRQ_EN | CTRL1_CUR_FRAME_DONE_IRQ;
+
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ dev_dbg(drm->dev, "Setting up RGB565 mode\n");
+ ctrl |= CTRL_WORD_LENGTH_16;
+ ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf);
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
+ ctrl |= CTRL_WORD_LENGTH_24;
+ /* Do not use packed pixels = one pixel per word instead. */
+ ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7);
+ break;
+ }
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ ctrl |= CTRL_BUS_WIDTH_16;
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ ctrl |= CTRL_BUS_WIDTH_18;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ ctrl |= CTRL_BUS_WIDTH_24;
+ break;
+ default:
+ dev_err(drm->dev, "Unknown media bus format %d\n", bus_format);
+ break;
+ }
+
+ writel(ctrl1, mxsfb->base + LCDC_CTRL1);
+ writel(ctrl, mxsfb->base + LCDC_CTRL);
+}
+
+static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
+{
+ u32 reg;
+
+ if (mxsfb->clk_disp_axi)
+ clk_prepare_enable(mxsfb->clk_disp_axi);
+ clk_prepare_enable(mxsfb->clk);
+
+ /* If it was disabled, re-enable the mode again */
+ writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_SET);
+
+ /* Enable the SYNC signals first, then the DMA engine */
+ reg = readl(mxsfb->base + LCDC_VDCTRL4);
+ reg |= VDCTRL4_SYNC_SIGNALS_ON;
+ writel(reg, mxsfb->base + LCDC_VDCTRL4);
+
+ writel(CTRL_RUN, mxsfb->base + LCDC_CTRL + REG_SET);
+}
+
+static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
+{
+ u32 reg;
+
+ /*
+ * Even if we disable the controller here, it will still continue
+ * until its FIFOs are running out of data
+ */
+ writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_CLR);
+
+ readl_poll_timeout(mxsfb->base + LCDC_CTRL, reg, !(reg & CTRL_RUN),
+ 0, 1000);
+
+ reg = readl(mxsfb->base + LCDC_VDCTRL4);
+ reg &= ~VDCTRL4_SYNC_SIGNALS_ON;
+ writel(reg, mxsfb->base + LCDC_VDCTRL4);
+
+ clk_disable_unprepare(mxsfb->clk);
+ if (mxsfb->clk_disp_axi)
+ clk_disable_unprepare(mxsfb->clk_disp_axi);
+}
+
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ u32 reg;
+
+ writel(mask, addr + REG_CLR);
+ return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
+}
+
+static int mxsfb_reset_block(struct mxsfb_drm_private *mxsfb)
+{
+ int ret;
+
+ ret = clear_poll_bit(mxsfb->base + LCDC_CTRL, CTRL_SFTRST);
+ if (ret)
+ return ret;
+
+ writel(CTRL_CLKGATE, mxsfb->base + LCDC_CTRL + REG_CLR);
+
+ ret = clear_poll_bit(mxsfb->base + LCDC_CTRL, CTRL_SFTRST);
+ if (ret)
+ return ret;
+
+ return clear_poll_bit(mxsfb->base + LCDC_CTRL, CTRL_CLKGATE);
+}
+
+static dma_addr_t mxsfb_get_fb_paddr(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_gem_cma_object *gem;
+
+ if (!fb)
+ return 0;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!gem)
+ return 0;
+
+ return gem->paddr;
+}
+
+static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
+{
+ struct drm_device *drm = mxsfb->crtc.dev;
+ struct drm_display_mode *m = &mxsfb->crtc.state->adjusted_mode;
+ u32 bus_flags = mxsfb->connector->display_info.bus_flags;
+ u32 vdctrl0, vsync_pulse_len, hsync_pulse_len;
+ int err;
+
+ /*
+ * It seems, you can't re-program the controller if it is still
+ * running. This may lead to shifted pictures (FIFO issue?), so
+ * first stop the controller and drain its FIFOs.
+ */
+
+ /* Mandatory eLCDIF reset as per the Reference Manual */
+ err = mxsfb_reset_block(mxsfb);
+ if (err)
+ return;
+
+ /* Clear the FIFOs */
+ writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
+
+ if (mxsfb->devdata->has_overlay)
+ writel(0, mxsfb->base + LCDC_AS_CTRL);
+
+ mxsfb_set_formats(mxsfb);
+
+ clk_set_rate(mxsfb->clk, m->crtc_clock * 1000);
+
+ if (mxsfb->bridge && mxsfb->bridge->timings)
+ bus_flags = mxsfb->bridge->timings->input_bus_flags;
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
+ m->crtc_clock,
+ (int)(clk_get_rate(mxsfb->clk) / 1000));
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n",
+ bus_flags);
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
+
+ writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) |
+ TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay),
+ mxsfb->base + mxsfb->devdata->transfer_count);
+
+ vsync_pulse_len = m->crtc_vsync_end - m->crtc_vsync_start;
+
+ vdctrl0 = VDCTRL0_ENABLE_PRESENT | /* Always in DOTCLOCK mode */
+ VDCTRL0_VSYNC_PERIOD_UNIT |
+ VDCTRL0_VSYNC_PULSE_WIDTH_UNIT |
+ VDCTRL0_SET_VSYNC_PULSE_WIDTH(vsync_pulse_len);
+ if (m->flags & DRM_MODE_FLAG_PHSYNC)
+ vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
+ if (m->flags & DRM_MODE_FLAG_PVSYNC)
+ vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
+ /* Make sure Data Enable is high active by default */
+ if (!(bus_flags & DRM_BUS_FLAG_DE_LOW))
+ vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
+ /*
+ * DRM_BUS_FLAG_PIXDATA_DRIVE_ defines are controller centric,
+ * controllers VDCTRL0_DOTCLK is display centric.
+ * Drive on positive edge -> display samples on falling edge
+ * DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
+ */
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+ vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
+
+ writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
+
+ /* Frame length in lines. */
+ writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1);
+
+ /* Line length in units of clocks or pixels. */
+ hsync_pulse_len = m->crtc_hsync_end - m->crtc_hsync_start;
+ writel(set_hsync_pulse_width(mxsfb, hsync_pulse_len) |
+ VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal),
+ mxsfb->base + LCDC_VDCTRL2);
+
+ writel(SET_HOR_WAIT_CNT(m->crtc_htotal - m->crtc_hsync_start) |
+ SET_VERT_WAIT_CNT(m->crtc_vtotal - m->crtc_vsync_start),
+ mxsfb->base + LCDC_VDCTRL3);
+
+ writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay),
+ mxsfb->base + LCDC_VDCTRL4);
+}
+
+static int mxsfb_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ bool has_primary = state->plane_mask &
+ drm_plane_mask(crtc->primary);
+
+ /* The primary plane has to be enabled when the CRTC is active. */
+ if (state->active && !has_primary)
+ return -EINVAL;
+
+ /* TODO: Is this needed ? */
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static void mxsfb_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_pending_vblank_event *event;
+
+ event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ if (!event)
+ return;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev);
+ struct drm_device *drm = mxsfb->drm;
+ dma_addr_t paddr;
+
+ pm_runtime_get_sync(drm->dev);
+ mxsfb_enable_axi_clk(mxsfb);
+
+ drm_crtc_vblank_on(crtc);
+
+ mxsfb_crtc_mode_set_nofb(mxsfb);
+
+ /* Write cur_buf as well to avoid an initial corrupt frame */
+ paddr = mxsfb_get_fb_paddr(crtc->primary);
+ if (paddr) {
+ writel(paddr, mxsfb->base + mxsfb->devdata->cur_buf);
+ writel(paddr, mxsfb->base + mxsfb->devdata->next_buf);
+ }
+
+ mxsfb_enable_controller(mxsfb);
+}
+
+static void mxsfb_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev);
+ struct drm_device *drm = mxsfb->drm;
+ struct drm_pending_vblank_event *event;
+
+ mxsfb_disable_controller(mxsfb);
+
+ spin_lock_irq(&drm->event_lock);
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ spin_unlock_irq(&drm->event_lock);
+
+ drm_crtc_vblank_off(crtc);
+
+ mxsfb_disable_axi_clk(mxsfb);
+ pm_runtime_put_sync(drm->dev);
+}
+
+static int mxsfb_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev);
+
+ /* Clear and enable VBLANK IRQ */
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_SET);
+
+ return 0;
+}
+
+static void mxsfb_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev);
+
+ /* Disable and clear VBLANK IRQ */
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+}
+
+static const struct drm_crtc_helper_funcs mxsfb_crtc_helper_funcs = {
+ .atomic_check = mxsfb_crtc_atomic_check,
+ .atomic_flush = mxsfb_crtc_atomic_flush,
+ .atomic_enable = mxsfb_crtc_atomic_enable,
+ .atomic_disable = mxsfb_crtc_atomic_disable,
+};
+
+static const struct drm_crtc_funcs mxsfb_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = mxsfb_crtc_enable_vblank,
+ .disable_vblank = mxsfb_crtc_disable_vblank,
+};
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+static const struct drm_encoder_funcs mxsfb_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+/* -----------------------------------------------------------------------------
+ * Planes
+ */
+
+static int mxsfb_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_new_crtc_state(plane_state->state,
+ &mxsfb->crtc);
+
+ return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+}
+
+static void mxsfb_plane_primary_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_pstate)
+{
+ struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
+ dma_addr_t paddr;
+
+ paddr = mxsfb_get_fb_paddr(plane);
+ if (paddr)
+ writel(paddr, mxsfb->base + mxsfb->devdata->next_buf);
+}
+
+static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_pstate)
+{
+ struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
+ struct drm_plane_state *state = plane->state;
+ dma_addr_t paddr;
+ u32 ctrl;
+
+ paddr = mxsfb_get_fb_paddr(plane);
+ if (!paddr) {
+ writel(0, mxsfb->base + LCDC_AS_CTRL);
+ return;
+ }
+
+ /*
+ * HACK: The hardware seems to output 64 bytes of data of unknown
+ * origin, and then to proceed with the framebuffer. Until the reason
+ * is understood, live with the 16 initial invalid pixels on the first
+ * line and start 64 bytes within the framebuffer.
+ */
+ paddr += 64;
+
+ writel(paddr, mxsfb->base + LCDC_AS_NEXT_BUF);
+
+ /*
+ * If the plane was previously disabled, write LCDC_AS_BUF as well to
+ * provide the first buffer.
+ */
+ if (!old_pstate->fb)
+ writel(paddr, mxsfb->base + LCDC_AS_BUF);
+
+ ctrl = AS_CTRL_AS_ENABLE | AS_CTRL_ALPHA(255);
+
+ switch (state->fb->format->format) {
+ case DRM_FORMAT_XRGB4444:
+ ctrl |= AS_CTRL_FORMAT_RGB444 | AS_CTRL_ALPHA_CTRL_OVERRIDE;
+ break;
+ case DRM_FORMAT_ARGB4444:
+ ctrl |= AS_CTRL_FORMAT_ARGB4444 | AS_CTRL_ALPHA_CTRL_EMBEDDED;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ ctrl |= AS_CTRL_FORMAT_RGB555 | AS_CTRL_ALPHA_CTRL_OVERRIDE;
+ break;
+ case DRM_FORMAT_ARGB1555:
+ ctrl |= AS_CTRL_FORMAT_ARGB1555 | AS_CTRL_ALPHA_CTRL_EMBEDDED;
+ break;
+ case DRM_FORMAT_RGB565:
+ ctrl |= AS_CTRL_FORMAT_RGB565 | AS_CTRL_ALPHA_CTRL_OVERRIDE;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ ctrl |= AS_CTRL_FORMAT_RGB888 | AS_CTRL_ALPHA_CTRL_OVERRIDE;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ ctrl |= AS_CTRL_FORMAT_ARGB8888 | AS_CTRL_ALPHA_CTRL_EMBEDDED;
+ break;
+ }
+
+ writel(ctrl, mxsfb->base + LCDC_AS_CTRL);
+}
+
+static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = {
+ .atomic_check = mxsfb_plane_atomic_check,
+ .atomic_update = mxsfb_plane_primary_atomic_update,
+};
+
+static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = {
+ .atomic_check = mxsfb_plane_atomic_check,
+ .atomic_update = mxsfb_plane_overlay_atomic_update,
+};
+
+static const struct drm_plane_funcs mxsfb_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const uint32_t mxsfb_primary_plane_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint32_t mxsfb_overlay_plane_formats[] = {
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+};
+
+static const uint64_t mxsfb_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+int mxsfb_kms_init(struct mxsfb_drm_private *mxsfb)
+{
+ struct drm_encoder *encoder = &mxsfb->encoder;
+ struct drm_crtc *crtc = &mxsfb->crtc;
+ int ret;
+
+ drm_plane_helper_add(&mxsfb->planes.primary,
+ &mxsfb_plane_primary_helper_funcs);
+ ret = drm_universal_plane_init(mxsfb->drm, &mxsfb->planes.primary, 1,
+ &mxsfb_plane_funcs,
+ mxsfb_primary_plane_formats,
+ ARRAY_SIZE(mxsfb_primary_plane_formats),
+ mxsfb_modifiers, DRM_PLANE_TYPE_PRIMARY,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (mxsfb->devdata->has_overlay) {
+ drm_plane_helper_add(&mxsfb->planes.overlay,
+ &mxsfb_plane_overlay_helper_funcs);
+ ret = drm_universal_plane_init(mxsfb->drm,
+ &mxsfb->planes.overlay, 1,
+ &mxsfb_plane_funcs,
+ mxsfb_overlay_plane_formats,
+ ARRAY_SIZE(mxsfb_overlay_plane_formats),
+ mxsfb_modifiers, DRM_PLANE_TYPE_OVERLAY,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &mxsfb_crtc_helper_funcs);
+ ret = drm_crtc_init_with_planes(mxsfb->drm, crtc,
+ &mxsfb->planes.primary, NULL,
+ &mxsfb_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ return drm_encoder_init(mxsfb->drm, encoder, &mxsfb_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
deleted file mode 100644
index 9eca1605d11d..000000000000
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 Marek Vasut <marex@denx.de>
- */
-
-#include <linux/of_graph.h>
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
-
-#include "mxsfb_drv.h"
-
-static struct mxsfb_drm_private *
-drm_connector_to_mxsfb_drm_private(struct drm_connector *connector)
-{
- return container_of(connector, struct mxsfb_drm_private,
- panel_connector);
-}
-
-static int mxsfb_panel_get_modes(struct drm_connector *connector)
-{
- struct mxsfb_drm_private *mxsfb =
- drm_connector_to_mxsfb_drm_private(connector);
-
- if (mxsfb->panel)
- return drm_panel_get_modes(mxsfb->panel, connector);
-
- return 0;
-}
-
-static const struct
-drm_connector_helper_funcs mxsfb_panel_connector_helper_funcs = {
- .get_modes = mxsfb_panel_get_modes,
-};
-
-static enum drm_connector_status
-mxsfb_panel_connector_detect(struct drm_connector *connector, bool force)
-{
- struct mxsfb_drm_private *mxsfb =
- drm_connector_to_mxsfb_drm_private(connector);
-
- if (mxsfb->panel)
- return connector_status_connected;
-
- return connector_status_disconnected;
-}
-
-static void mxsfb_panel_connector_destroy(struct drm_connector *connector)
-{
- struct mxsfb_drm_private *mxsfb =
- drm_connector_to_mxsfb_drm_private(connector);
-
- if (mxsfb->panel)
- drm_panel_detach(mxsfb->panel);
-
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs mxsfb_panel_connector_funcs = {
- .detect = mxsfb_panel_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = mxsfb_panel_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-int mxsfb_create_output(struct drm_device *drm)
-{
- struct mxsfb_drm_private *mxsfb = drm->dev_private;
- int ret;
-
- ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0,
- &mxsfb->panel, &mxsfb->bridge);
- if (ret)
- return ret;
-
- if (mxsfb->panel) {
- mxsfb->connector = &mxsfb->panel_connector;
- mxsfb->connector->dpms = DRM_MODE_DPMS_OFF;
- mxsfb->connector->polled = 0;
- drm_connector_helper_add(mxsfb->connector,
- &mxsfb_panel_connector_helper_funcs);
- ret = drm_connector_init(drm, mxsfb->connector,
- &mxsfb_panel_connector_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
index 932d7ea08fd5..55d28a27f912 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_regs.h
+++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
@@ -27,52 +27,61 @@
#define LCDC_VDCTRL4 0xb0
#define LCDC_V4_DEBUG0 0x1d0
#define LCDC_V3_DEBUG0 0x1f0
-
-#define CTRL_SFTRST (1 << 31)
-#define CTRL_CLKGATE (1 << 30)
-#define CTRL_BYPASS_COUNT (1 << 19)
-#define CTRL_VSYNC_MODE (1 << 18)
-#define CTRL_DOTCLK_MODE (1 << 17)
-#define CTRL_DATA_SELECT (1 << 16)
-#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
-#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
+#define LCDC_AS_CTRL 0x210
+#define LCDC_AS_BUF 0x220
+#define LCDC_AS_NEXT_BUF 0x230
+#define LCDC_AS_CLRKEYLOW 0x240
+#define LCDC_AS_CLRKEYHIGH 0x250
+
+#define CTRL_SFTRST BIT(31)
+#define CTRL_CLKGATE BIT(30)
+#define CTRL_BYPASS_COUNT BIT(19)
+#define CTRL_VSYNC_MODE BIT(18)
+#define CTRL_DOTCLK_MODE BIT(17)
+#define CTRL_DATA_SELECT BIT(16)
+#define CTRL_BUS_WIDTH_16 (0 << 10)
+#define CTRL_BUS_WIDTH_8 (1 << 10)
+#define CTRL_BUS_WIDTH_18 (2 << 10)
+#define CTRL_BUS_WIDTH_24 (3 << 10)
#define CTRL_BUS_WIDTH_MASK (0x3 << 10)
-#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
-#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
-#define CTRL_MASTER (1 << 5)
-#define CTRL_DF16 (1 << 3)
-#define CTRL_DF18 (1 << 2)
-#define CTRL_DF24 (1 << 1)
-#define CTRL_RUN (1 << 0)
-
-#define CTRL1_FIFO_CLEAR (1 << 21)
+#define CTRL_WORD_LENGTH_16 (0 << 8)
+#define CTRL_WORD_LENGTH_8 (1 << 8)
+#define CTRL_WORD_LENGTH_18 (2 << 8)
+#define CTRL_WORD_LENGTH_24 (3 << 8)
+#define CTRL_MASTER BIT(5)
+#define CTRL_DF16 BIT(3)
+#define CTRL_DF18 BIT(2)
+#define CTRL_DF24 BIT(1)
+#define CTRL_RUN BIT(0)
+
+#define CTRL1_FIFO_CLEAR BIT(21)
#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
-#define CTRL1_CUR_FRAME_DONE_IRQ_EN (1 << 13)
-#define CTRL1_CUR_FRAME_DONE_IRQ (1 << 9)
+#define CTRL1_CUR_FRAME_DONE_IRQ_EN BIT(13)
+#define CTRL1_CUR_FRAME_DONE_IRQ BIT(9)
#define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
#define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
#define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff)
#define TRANSFER_COUNT_GET_HCOUNT(x) ((x) & 0xffff)
-#define VDCTRL0_ENABLE_PRESENT (1 << 28)
-#define VDCTRL0_VSYNC_ACT_HIGH (1 << 27)
-#define VDCTRL0_HSYNC_ACT_HIGH (1 << 26)
-#define VDCTRL0_DOTCLK_ACT_FALLING (1 << 25)
-#define VDCTRL0_ENABLE_ACT_HIGH (1 << 24)
-#define VDCTRL0_VSYNC_PERIOD_UNIT (1 << 21)
-#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT (1 << 20)
-#define VDCTRL0_HALF_LINE (1 << 19)
-#define VDCTRL0_HALF_LINE_MODE (1 << 18)
+#define VDCTRL0_ENABLE_PRESENT BIT(28)
+#define VDCTRL0_VSYNC_ACT_HIGH BIT(27)
+#define VDCTRL0_HSYNC_ACT_HIGH BIT(26)
+#define VDCTRL0_DOTCLK_ACT_FALLING BIT(25)
+#define VDCTRL0_ENABLE_ACT_HIGH BIT(24)
+#define VDCTRL0_VSYNC_PERIOD_UNIT BIT(21)
+#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT BIT(20)
+#define VDCTRL0_HALF_LINE BIT(19)
+#define VDCTRL0_HALF_LINE_MODE BIT(18)
#define VDCTRL0_SET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
#define VDCTRL0_GET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff)
#define VDCTRL2_SET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
#define VDCTRL2_GET_HSYNC_PERIOD(x) ((x) & 0x3ffff)
-#define VDCTRL3_MUX_SYNC_SIGNALS (1 << 29)
-#define VDCTRL3_VSYNC_ONLY (1 << 28)
+#define VDCTRL3_MUX_SYNC_SIGNALS BIT(29)
+#define VDCTRL3_VSYNC_ONLY BIT(28)
#define SET_HOR_WAIT_CNT(x) (((x) & 0xfff) << 16)
#define GET_HOR_WAIT_CNT(x) (((x) >> 16) & 0xfff)
#define SET_VERT_WAIT_CNT(x) ((x) & 0xffff)
@@ -80,28 +89,32 @@
#define VDCTRL4_SET_DOTCLK_DLY(x) (((x) & 0x7) << 29) /* v4 only */
#define VDCTRL4_GET_DOTCLK_DLY(x) (((x) >> 29) & 0x7) /* v4 only */
-#define VDCTRL4_SYNC_SIGNALS_ON (1 << 18)
+#define VDCTRL4_SYNC_SIGNALS_ON BIT(18)
#define SET_DOTCLK_H_VALID_DATA_CNT(x) ((x) & 0x3ffff)
-#define DEBUG0_HSYNC (1 < 26)
-#define DEBUG0_VSYNC (1 < 25)
+#define DEBUG0_HSYNC BIT(26)
+#define DEBUG0_VSYNC BIT(25)
+
+#define AS_CTRL_PS_DISABLE BIT(23)
+#define AS_CTRL_ALPHA_INVERT BIT(20)
+#define AS_CTRL_ALPHA(a) (((a) & 0xff) << 8)
+#define AS_CTRL_FORMAT_RGB565 (0xe << 4)
+#define AS_CTRL_FORMAT_RGB444 (0xd << 4)
+#define AS_CTRL_FORMAT_RGB555 (0xc << 4)
+#define AS_CTRL_FORMAT_ARGB4444 (0x9 << 4)
+#define AS_CTRL_FORMAT_ARGB1555 (0x8 << 4)
+#define AS_CTRL_FORMAT_RGB888 (0x4 << 4)
+#define AS_CTRL_FORMAT_ARGB8888 (0x0 << 4)
+#define AS_CTRL_ENABLE_COLORKEY BIT(3)
+#define AS_CTRL_ALPHA_CTRL_ROP (3 << 1)
+#define AS_CTRL_ALPHA_CTRL_MULTIPLY (2 << 1)
+#define AS_CTRL_ALPHA_CTRL_OVERRIDE (1 << 1)
+#define AS_CTRL_ALPHA_CTRL_EMBEDDED (0 << 1)
+#define AS_CTRL_AS_ENABLE BIT(0)
#define MXSFB_MIN_XRES 120
#define MXSFB_MIN_YRES 120
#define MXSFB_MAX_XRES 0xffff
#define MXSFB_MAX_YRES 0xffff
-#define RED 0
-#define GREEN 1
-#define BLUE 2
-#define TRANSP 3
-
-#define STMLCDIF_8BIT 1 /* pixel data bus to the display is of 8 bit width */
-#define STMLCDIF_16BIT 0 /* pixel data bus to the display is of 16 bit width */
-#define STMLCDIF_18BIT 2 /* pixel data bus to the display is of 18 bit width */
-#define STMLCDIF_24BIT 3 /* pixel data bus to the display is of 24 bit width */
-
-#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
-#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negative edge sampling */
-
#endif /* __MXSFB_REGS_H__ */
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6416b6907aeb..f9e962fd94d0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -615,7 +615,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret == 0) {
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
@@ -1172,7 +1172,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -ENOMEM;
if (new_bo != old_bo) {
- ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(new_bo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
goto fail_free;
}
@@ -1336,10 +1336,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
- TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL,
+ NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL,
&nv_crtc->cursor.nvbo);
if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
+ NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index ffdd447d8706..22d10f328559 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -419,7 +419,7 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
- nouveau_encoder_connector_get(nv_encoder)->base.name,
+ nv04_encoder_get_connector(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index f9f4482c79b5..42687ea2a4ca 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -184,7 +184,8 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ struct nouveau_connector *nv_connector =
+ nv04_encoder_get_connector(nv_encoder);
if (!nv_connector->native_mode ||
nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
@@ -478,7 +479,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
- nouveau_encoder_connector_get(nv_encoder)->base.name,
+ nv04_encoder_get_connector(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
@@ -591,7 +592,7 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
struct nouveau_connector *connector =
- nouveau_encoder_connector_get(nv_encoder);
+ nv04_encoder_get_connector(nv_encoder);
if (connector && connector->native_mode)
call_lvds_script(dev, nv_encoder->dcb, head,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 900ab69df7e8..7739f46470d3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -35,9 +35,28 @@
#include <nvif/if0004.h>
+struct nouveau_connector *
+nv04_encoder_get_connector(struct nouveau_encoder *encoder)
+{
+ struct drm_device *dev = to_drm_encoder(encoder)->dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct nouveau_connector *nv_connector = NULL;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->encoder == to_drm_encoder(encoder))
+ nv_connector = nouveau_connector(connector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return nv_connector;
+}
+
static void
-nv04_display_fini(struct drm_device *dev, bool suspend)
+nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend)
{
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_display *disp = nv04_display(dev);
struct drm_crtc *crtc;
@@ -49,6 +68,9 @@ nv04_display_fini(struct drm_device *dev, bool suspend)
if (nv_two_heads(dev))
NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
+ if (!runtime)
+ cancel_work_sync(&drm->hpd_work);
+
if (!suspend)
return;
@@ -112,7 +134,7 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
if (!fb || !fb->obj[0])
continue;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
NV_ERROR(drm, "Could not pin framebuffer\n");
}
@@ -122,7 +144,8 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
if (!nv_crtc->cursor.nvbo)
continue;
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
+ NOUVEAU_GEM_DOMAIN_VRAM, true);
if (!ret && nv_crtc->cursor.set_offset)
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 495d3284e876..5ace5e906949 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -6,6 +6,8 @@
#include "nouveau_display.h"
+struct nouveau_encoder;
+
enum nv04_fp_display_regs {
FP_DISPLAY_END,
FP_TOTAL,
@@ -93,6 +95,8 @@ nv04_display(struct drm_device *dev)
/* nv04_display.c */
int nv04_display_create(struct drm_device *);
+struct nouveau_connector *
+nv04_encoder_get_connector(struct nouveau_encoder *nv_encoder);
/* nv04_crtc.c */
int nv04_crtc_create(struct drm_device *, int index);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 193ba9498f3d..37e63e98cd08 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -142,7 +142,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
return ret;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
return ret;
@@ -387,7 +387,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
return ret;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index b701a4d8fe76..3ba7b59580d5 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -172,7 +172,7 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
- nouveau_encoder_connector_get(nv_encoder)->base.name,
+ nv04_encoder_get_connector(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 3a9489ed6544..be28e7bd7490 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -599,7 +599,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- nouveau_encoder_connector_get(nv_encoder)->base.name,
+ nv04_encoder_get_connector(nv_encoder)->base.name,
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index 498622c0c670..f75088186fba 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -44,6 +44,7 @@ int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
struct nv50_core **);
int core507d_init(struct nv50_core *);
void core507d_ntfy_init(struct nouveau_bo *, u32);
+int core507d_read_caps(struct nv50_disp *disp);
int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *);
int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
int core507d_update(struct nv50_core *, u32 *, bool);
@@ -55,6 +56,7 @@ extern const struct nv50_outp_func pior507d;
int core827d_new(struct nouveau_drm *, s32, struct nv50_core **);
int core907d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int core907d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp);
extern const struct nv50_outp_func dac907d;
extern const struct nv50_outp_func sor907d;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index 248edf69e168..e6f16a7750f0 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -78,19 +78,56 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
}
int
-core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+core507d_read_caps(struct nv50_disp *disp)
{
struct nvif_push *push = disp->core->chan.push;
int ret;
- if ((ret = PUSH_WAIT(push, 2)))
+ ret = PUSH_WAIT(push, 6);
+ if (ret)
return ret;
+ PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) |
+ NVVAL(NV507D, SET_NOTIFIER_CONTROL, OFFSET, NV50_DISP_CORE_NTFY >> 2) |
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, ENABLE));
+
PUSH_MTHD(push, NV507D, GET_CAPABILITIES, 0x00000000);
+
+ PUSH_MTHD(push, NV507D, SET_NOTIFIER_CONTROL,
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
+
return PUSH_KICK(push);
}
int
+core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ struct nv50_core *core = disp->core;
+ struct nouveau_bo *bo = disp->sync;
+ s64 time;
+ int ret;
+
+ NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1,
+ NVDEF(NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, FALSE));
+
+ ret = core507d_read_caps(disp);
+ if (ret < 0)
+ return ret;
+
+ time = nvif_msec(core->chan.base.device, 2000ULL,
+ if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY,
+ NV_DISP_CORE_NOTIFIER_1, CAPABILITIES_1, DONE, ==, TRUE))
+ break;
+ usleep_range(1, 2);
+ );
+ if (time < 0)
+ NV_ERROR(drm, "core caps notifier timeout\n");
+
+ return 0;
+}
+
+int
core507d_init(struct nv50_core *core)
{
struct nvif_push *push = core->chan.push;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
index b17c03529c78..8564d4dffaff 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
@@ -22,11 +22,45 @@
#include "core.h"
#include "head.h"
+#include <nvif/push507c.h>
+#include <nvif/timer.h>
+
+#include <nvhw/class/cl907d.h>
+
+#include "nouveau_bo.h"
+
+int
+core907d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ struct nv50_core *core = disp->core;
+ struct nouveau_bo *bo = disp->sync;
+ s64 time;
+ int ret;
+
+ NVBO_WR32(bo, NV50_DISP_CORE_NTFY, NV907D_CORE_NOTIFIER_3, CAPABILITIES_4,
+ NVDEF(NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, FALSE));
+
+ ret = core507d_read_caps(disp);
+ if (ret < 0)
+ return ret;
+
+ time = nvif_msec(core->chan.base.device, 2000ULL,
+ if (NVBO_TD32(bo, NV50_DISP_CORE_NTFY,
+ NV907D_CORE_NOTIFIER_3, CAPABILITIES_4, DONE, ==, TRUE))
+ break;
+ usleep_range(1, 2);
+ );
+ if (time < 0)
+ NV_ERROR(drm, "core caps notifier timeout\n");
+
+ return 0;
+}
+
static const struct nv50_core_func
core907d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
- .caps_init = core507d_caps_init,
+ .caps_init = core907d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head907d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core917d.c b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
index 66846f372080..1cd3a2a35dfb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
@@ -26,7 +26,7 @@ static const struct nv50_core_func
core917d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
- .caps_init = core507d_caps_init,
+ .caps_init = core907d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head917d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 1ed242070001..36d6b6093d16 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -417,11 +417,45 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
return 0;
}
+struct nouveau_connector *
+nv50_outp_get_new_connector(struct nouveau_encoder *outp,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ struct drm_encoder *encoder = to_drm_encoder(outp);
+ int i;
+
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->best_encoder == encoder)
+ return nouveau_connector(connector);
+ }
+
+ return NULL;
+}
+
+struct nouveau_connector *
+nv50_outp_get_old_connector(struct nouveau_encoder *outp,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ struct drm_encoder *encoder = to_drm_encoder(outp);
+ int i;
+
+ for_each_old_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->best_encoder == encoder)
+ return nouveau_connector(connector);
+ }
+
+ return NULL;
+}
+
/******************************************************************************
* DAC
*****************************************************************************/
static void
-nv50_dac_disable(struct drm_encoder *encoder)
+nv50_dac_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
@@ -433,7 +467,7 @@ nv50_dac_disable(struct drm_encoder *encoder)
}
static void
-nv50_dac_enable(struct drm_encoder *encoder)
+nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -491,8 +525,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
static const struct drm_encoder_helper_funcs
nv50_dac_help = {
.atomic_check = nv50_outp_atomic_check,
- .enable = nv50_dac_enable,
- .disable = nv50_dac_disable,
+ .atomic_enable = nv50_dac_enable,
+ .atomic_disable = nv50_dac_disable,
.detect = nv50_dac_detect
};
@@ -558,16 +592,31 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder;
- struct nouveau_connector *nv_connector;
+ struct drm_connector *connector;
struct nouveau_crtc *nv_crtc;
+ struct drm_connector_list_iter conn_iter;
int ret = 0;
*enabled = false;
+
drm_for_each_encoder(encoder, drm->dev) {
+ struct nouveau_connector *nv_connector = NULL;
+
nv_encoder = nouveau_encoder(encoder);
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
+
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->state->best_encoder == encoder) {
+ nv_connector = nouveau_connector(connector);
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ if (!nv_connector)
+ continue;
+
nv_crtc = nouveau_crtc(encoder->crtc);
- if (!nv_connector || !nv_crtc || nv_encoder->or != port ||
+ if (!nv_crtc || nv_encoder->or != port ||
nv_crtc->index != dev_id)
continue;
*enabled = nv_encoder->audio;
@@ -578,6 +627,7 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
}
break;
}
+
return ret;
}
@@ -671,7 +721,8 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
}
static void
-nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
+nv50_audio_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+ struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
@@ -692,7 +743,7 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
(0x0100 << nv_crtc->index),
};
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
if (!drm_detect_monitor_audio(nv_connector->edid))
return;
@@ -729,7 +780,8 @@ nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
}
static void
-nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
+nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
+ struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
@@ -758,7 +810,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
int ret;
int size;
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
@@ -804,7 +856,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
+ args.pwr.vendor_infoframe_length;
nvif_mthd(&disp->disp->object, 0, &args, size);
- nv50_audio_enable(encoder, mode);
+ nv50_audio_enable(encoder, state, mode);
/* If SCDC is supported by the downstream monitor, update
* divider / scrambling settings to what we programmed above.
@@ -833,16 +885,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
-struct nv50_mstm {
- struct nouveau_encoder *outp;
-
- struct drm_dp_mst_topology_mgr mgr;
-
- bool modified;
- bool disabled;
- int links;
-};
-
struct nv50_mstc {
struct nv50_mstm *mstm;
struct drm_dp_mst_port *port;
@@ -1013,7 +1055,7 @@ nv50_dp_bpc_to_depth(unsigned int bpc)
}
static void
-nv50_msto_enable(struct drm_encoder *encoder)
+nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nv50_head *head = nv50_head(encoder->crtc);
struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state);
@@ -1059,7 +1101,7 @@ nv50_msto_enable(struct drm_encoder *encoder)
}
static void
-nv50_msto_disable(struct drm_encoder *encoder)
+nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
@@ -1076,8 +1118,8 @@ nv50_msto_disable(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs
nv50_msto_help = {
- .disable = nv50_msto_disable,
- .enable = nv50_msto_enable,
+ .atomic_disable = nv50_msto_disable,
+ .atomic_enable = nv50_msto_enable,
.atomic_check = nv50_msto_atomic_check,
};
@@ -1222,7 +1264,10 @@ nv50_mstc_detect(struct drm_connector *connector,
ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
mstc->port);
+ if (ret != connector_status_connected)
+ goto out;
+out:
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
return ret;
@@ -1371,41 +1416,51 @@ nv50_mstm = {
.add_connector = nv50_mstm_add_connector,
};
-void
-nv50_mstm_service(struct nv50_mstm *mstm)
+bool
+nv50_mstm_service(struct nouveau_drm *drm,
+ struct nouveau_connector *nv_connector,
+ struct nv50_mstm *mstm)
{
- struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
- bool handled = true;
- int ret;
+ struct drm_dp_aux *aux = &nv_connector->aux;
+ bool handled = true, ret = true;
+ int rc;
u8 esi[8] = {};
- if (!aux)
- return;
-
while (handled) {
- ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
- if (ret != 8) {
- drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
- return;
+ rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
+ if (rc != 8) {
+ ret = false;
+ break;
}
drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
if (!handled)
break;
- drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
+ rc = drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1],
+ 3);
+ if (rc != 3) {
+ ret = false;
+ break;
+ }
}
+
+ if (!ret)
+ NV_DEBUG(drm, "Failed to handle ESI on %s: %d\n",
+ nv_connector->base.name, rc);
+
+ return ret;
}
void
nv50_mstm_remove(struct nv50_mstm *mstm)
{
- if (mstm)
- drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+ mstm->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
}
static int
-nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
+nv50_mstm_enable(struct nv50_mstm *mstm, int state)
{
struct nouveau_encoder *outp = mstm->outp;
struct {
@@ -1420,106 +1475,85 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
};
struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
struct nvif_object *disp = &drm->display->disp.object;
- int ret;
-
- if (dpcd >= 0x12) {
- /* Even if we're enabling MST, start with disabling the
- * branching unit to clear any sink-side MST topology state
- * that wasn't set by us
- */
- ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
- if (ret < 0)
- return ret;
-
- if (state) {
- /* Now, start initializing */
- ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
- DP_MST_EN);
- if (ret < 0)
- return ret;
- }
- }
return nvif_mthd(disp, 0, &args, sizeof(args));
}
int
-nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
+nv50_mstm_detect(struct nouveau_encoder *outp)
{
+ struct nv50_mstm *mstm = outp->dp.mstm;
struct drm_dp_aux *aux;
int ret;
- bool old_state, new_state;
- u8 mstm_ctrl;
- if (!mstm)
+ if (!mstm || !mstm->can_mst)
return 0;
- mutex_lock(&mstm->mgr.lock);
-
- old_state = mstm->mgr.mst_state;
- new_state = old_state;
aux = mstm->mgr.aux;
- if (old_state) {
- /* Just check that the MST hub is still as we expect it */
- ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
- if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
- DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
- new_state = false;
- }
- } else if (dpcd[0] >= 0x12) {
- ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
- if (ret < 0)
- goto probe_error;
-
- if (!(dpcd[1] & DP_MST_CAP))
- dpcd[0] = 0x11;
- else
- new_state = allow;
- }
-
- if (new_state == old_state) {
- mutex_unlock(&mstm->mgr.lock);
- return new_state;
- }
-
- ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
- if (ret)
- goto probe_error;
-
- mutex_unlock(&mstm->mgr.lock);
+ /* Clear any leftover MST state we didn't set ourselves by first
+ * disabling MST if it was already enabled
+ */
+ ret = drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
+ if (ret < 0)
+ return ret;
- ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
+ /* And start enabling */
+ ret = nv50_mstm_enable(mstm, true);
if (ret)
- return nv50_mstm_enable(mstm, dpcd[0], 0);
+ return ret;
- return new_state;
+ ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, true);
+ if (ret) {
+ nv50_mstm_enable(mstm, false);
+ return ret;
+ }
-probe_error:
- mutex_unlock(&mstm->mgr.lock);
- return ret;
+ mstm->is_mst = true;
+ return 1;
}
static void
-nv50_mstm_fini(struct nv50_mstm *mstm)
+nv50_mstm_fini(struct nouveau_encoder *outp)
{
- if (mstm && mstm->mgr.mst_state)
+ struct nv50_mstm *mstm = outp->dp.mstm;
+
+ if (!mstm)
+ return;
+
+ /* Don't change the MST state of this connector until we've finished
+ * resuming, since we can't safely grab hpd_irq_lock in our resume
+ * path to protect mstm->is_mst without potentially deadlocking
+ */
+ mutex_lock(&outp->dp.hpd_irq_lock);
+ mstm->suspended = true;
+ mutex_unlock(&outp->dp.hpd_irq_lock);
+
+ if (mstm->is_mst)
drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
}
static void
-nv50_mstm_init(struct nv50_mstm *mstm, bool runtime)
+nv50_mstm_init(struct nouveau_encoder *outp, bool runtime)
{
- int ret;
+ struct nv50_mstm *mstm = outp->dp.mstm;
+ int ret = 0;
- if (!mstm || !mstm->mgr.mst_state)
+ if (!mstm)
return;
- ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
- if (ret == -1) {
- drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
- drm_kms_helper_hotplug_event(mstm->mgr.dev);
+ if (mstm->is_mst) {
+ ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
+ if (ret == -1)
+ nv50_mstm_remove(mstm);
}
+
+ mutex_lock(&outp->dp.hpd_irq_lock);
+ mstm->suspended = false;
+ mutex_unlock(&outp->dp.hpd_irq_lock);
+
+ if (ret == -1)
+ drm_kms_helper_hotplug_event(mstm->mgr.dev);
}
static void
@@ -1541,17 +1575,6 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
struct drm_device *dev = outp->base.base.dev;
struct nv50_mstm *mstm;
int ret;
- u8 dpcd;
-
- /* This is a workaround for some monitors not functioning
- * correctly in MST mode on initial module load. I think
- * some bad interaction with the VBIOS may be responsible.
- *
- * A good ol' off and on again seems to work here ;)
- */
- ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
- if (ret >= 0 && dpcd >= 0x12)
- drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
return -ENOMEM;
@@ -1590,23 +1613,27 @@ nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
}
static void
-nv50_sor_disable(struct drm_encoder *encoder)
+nv50_sor_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ struct nouveau_connector *nv_connector =
+ nv50_outp_get_old_connector(nv_encoder, state);
nv_encoder->crtc = NULL;
if (nv_crtc) {
- struct nvkm_i2c_aux *aux = nv_encoder->aux;
+ struct drm_dp_aux *aux = &nv_connector->aux;
u8 pwr;
- if (aux) {
- int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
+ int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
+
if (ret == 0) {
pwr &= ~DP_SET_POWER_MASK;
pwr |= DP_SET_POWER_D3;
- nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
+ drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
}
}
@@ -1618,7 +1645,7 @@ nv50_sor_disable(struct drm_encoder *encoder)
}
static void
-nv50_sor_enable(struct drm_encoder *encoder)
+nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -1642,7 +1669,7 @@ nv50_sor_enable(struct drm_encoder *encoder)
u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
- nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
nv_encoder->crtc = encoder->crtc;
if ((disp->disp->object.oclass == GT214_DISP ||
@@ -1669,7 +1696,7 @@ nv50_sor_enable(struct drm_encoder *encoder)
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
}
- nv50_hdmi_enable(&nv_encoder->base.base, mode);
+ nv50_hdmi_enable(&nv_encoder->base.base, state, mode);
break;
case DCB_OUTPUT_LVDS:
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
@@ -1710,7 +1737,7 @@ nv50_sor_enable(struct drm_encoder *encoder)
else
proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
- nv50_audio_enable(encoder, mode);
+ nv50_audio_enable(encoder, state, mode);
break;
default:
BUG();
@@ -1723,8 +1750,8 @@ nv50_sor_enable(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs
nv50_sor_help = {
.atomic_check = nv50_outp_atomic_check,
- .enable = nv50_sor_enable,
- .disable = nv50_sor_disable,
+ .atomic_enable = nv50_sor_enable,
+ .atomic_disable = nv50_sor_disable,
};
static void
@@ -1733,6 +1760,10 @@ nv50_sor_destroy(struct drm_encoder *encoder)
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
nv50_mstm_del(&nv_encoder->dp.mstm);
drm_encoder_cleanup(encoder);
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+ mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
+
kfree(encoder);
}
@@ -1792,6 +1823,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+ mutex_init(&nv_encoder->dp.hpd_irq_lock);
+
if (aux) {
if (disp->disp->object.oclass < GF110_DISP) {
/* HW has no support for address-only
@@ -1839,7 +1872,7 @@ nv50_pior_atomic_check(struct drm_encoder *encoder,
}
static void
-nv50_pior_disable(struct drm_encoder *encoder)
+nv50_pior_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
@@ -1851,7 +1884,7 @@ nv50_pior_disable(struct drm_encoder *encoder)
}
static void
-nv50_pior_enable(struct drm_encoder *encoder)
+nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -1887,14 +1920,14 @@ nv50_pior_enable(struct drm_encoder *encoder)
}
core->func->pior->ctrl(core, nv_encoder->or, ctrl, asyh);
- nv_encoder->crtc = encoder->crtc;
+ nv_encoder->crtc = &nv_crtc->base;
}
static const struct drm_encoder_helper_funcs
nv50_pior_help = {
.atomic_check = nv50_pior_atomic_check,
- .enable = nv50_pior_enable,
- .disable = nv50_pior_disable,
+ .atomic_enable = nv50_pior_enable,
+ .atomic_disable = nv50_pior_disable,
};
static void
@@ -2035,6 +2068,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_wait_for_fences(dev, state, false);
drm_atomic_helper_wait_for_dependencies(state);
drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_atomic_helper_calc_timestamping_constants(state);
if (atom->lock_core)
mutex_lock(&disp->mutex);
@@ -2083,7 +2117,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
outp->clr.mask, outp->set.mask);
if (outp->clr.mask) {
- help->disable(encoder);
+ help->atomic_disable(encoder, state);
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
if (outp->flush_disable) {
nv50_disp_atomic_commit_wndw(state, interlock);
@@ -2122,7 +2156,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
outp->set.mask, outp->clr.mask);
if (outp->set.mask) {
- help->enable(encoder);
+ help->atomic_enable(encoder, state);
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
}
@@ -2490,9 +2524,9 @@ nv50_disp_func = {
*****************************************************************************/
static void
-nv50_display_fini(struct drm_device *dev, bool suspend)
+nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
{
- struct nouveau_encoder *nv_encoder;
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_encoder *encoder;
struct drm_plane *plane;
@@ -2504,11 +2538,12 @@ nv50_display_fini(struct drm_device *dev, bool suspend)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
- nv_encoder = nouveau_encoder(encoder);
- nv50_mstm_fini(nv_encoder->dp.mstm);
- }
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
+ nv50_mstm_fini(nouveau_encoder(encoder));
}
+
+ if (!runtime)
+ cancel_work_sync(&drm->hpd_work);
}
static int
@@ -2525,7 +2560,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
struct nouveau_encoder *nv_encoder =
nouveau_encoder(encoder);
- nv50_mstm_init(nv_encoder->dp.mstm, runtime);
+ nv50_mstm_init(nv_encoder, runtime);
}
}
@@ -2587,10 +2622,11 @@ nv50_display_create(struct drm_device *dev)
dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
+ NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &disp->sync);
if (!ret) {
- ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (!ret) {
ret = nouveau_bo_map(disp->sync);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 447ecc9fec42..0356474ad6f6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -542,7 +542,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
return 0;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
index 2e444bac701d..6a463f308b64 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl507d.h
@@ -32,7 +32,10 @@
#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_DONE_TRUE 0x00000001
#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_R0 15:1
#define NV_DISP_CORE_NOTIFIER_1_COMPLETION_0_TIMESTAMP 29:16
-
+#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1 0x00000001
+#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE 0:0
+#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE_FALSE 0x00000000
+#define NV_DISP_CORE_NOTIFIER_1_CAPABILITIES_1_DONE_TRUE 0x00000001
// class methods
#define NV507D_UPDATE (0x00000080)
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
index 34bc3eafac7d..79aff6ff3138 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h
@@ -24,6 +24,10 @@
#ifndef _cl907d_h_
#define _cl907d_h_
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000
+#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001
#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014
#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0
#define NV907D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 21537ca1dd39..9a5be6f32424 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -328,7 +328,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
if (ret == 0)
- ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
+ ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
+ false);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7806278dce57..56b335a55966 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -44,6 +44,9 @@
#include <nvif/if500b.h>
#include <nvif/if900b.h>
+static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
+ struct ttm_resource *reg);
+
/*
* NV10-NV40 tiling helpers
*/
@@ -137,6 +140,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct nouveau_bo *nvbo = nouveau_bo(bo);
WARN_ON(nvbo->pin_refcnt > 0);
+ nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
/*
@@ -158,8 +162,7 @@ roundup_64(u64 x, u32 y)
}
static void
-nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
- int *align, u64 *size)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nvif_device *device = &drm->client.device;
@@ -192,7 +195,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
}
struct nouveau_bo *
-nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
+nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
u32 tile_mode, u32 tile_flags)
{
struct nouveau_drm *drm = cli->drm;
@@ -218,7 +221,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
* mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
* into in nouveau_gem_new().
*/
- if (flags & TTM_PL_FLAG_UNCACHED) {
+ if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
/* Determine if we can get a cache-coherent map, forcing
* uncached mapping if we can't.
*/
@@ -258,9 +261,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
* Skip page sizes that can't support needed domains.
*/
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
- (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
+ (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
- if ((flags & TTM_PL_FLAG_TT) &&
+ if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
@@ -287,13 +290,13 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
}
nvbo->page = vmm->page[pi].shift;
- nouveau_bo_fixup_align(nvbo, flags, align, size);
+ nouveau_bo_fixup_align(nvbo, align, size);
return nvbo;
}
int
-nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
+nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj)
{
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
@@ -303,7 +306,8 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
- nouveau_bo_placement_set(nvbo, flags, 0);
+ nouveau_bo_placement_set(nvbo, domain, 0);
+ INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
&nvbo->placement, align >> PAGE_SHIFT, false,
@@ -318,19 +322,19 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
int
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
- uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+ uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
struct sg_table *sg, struct dma_resv *robj,
struct nouveau_bo **pnvbo)
{
struct nouveau_bo *nvbo;
int ret;
- nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
+ nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
- ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
+ ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret)
return ret;
@@ -339,27 +343,48 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
}
static void
-set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
+set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
+ uint32_t domain, uint32_t flags)
{
*n = 0;
- if (type & TTM_PL_FLAG_VRAM)
- pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
- if (type & TTM_PL_FLAG_TT)
- pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
- if (type & TTM_PL_FLAG_SYSTEM)
- pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
+ struct nvif_mmu *mmu = &drm->client.mmu;
+
+ pl[*n].mem_type = TTM_PL_VRAM;
+ pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;
+
+ /* Some BARs do not support being ioremapped WC */
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+ mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED)
+ pl[*n].flags &= ~TTM_PL_FLAG_WC;
+
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_GART) {
+ pl[*n].mem_type = TTM_PL_TT;
+ pl[*n].flags = flags;
+
+ if (drm->agp.bridge)
+ pl[*n].flags &= ~TTM_PL_FLAG_CACHED;
+
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
+ pl[*n].mem_type = TTM_PL_SYSTEM;
+ pl[(*n)++].flags = flags;
+ }
}
static void
-set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
+set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
unsigned i, fpfn, lpfn;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
- nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
+ nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
* Make sure that the color and depth buffers are handled
@@ -386,26 +411,28 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
}
void
-nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
+nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
+ uint32_t busy)
{
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_placement *pl = &nvbo->placement;
uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
TTM_PL_MASK_CACHING) |
(nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
pl->placement = nvbo->placements;
- set_placement_list(nvbo->placements, &pl->num_placement,
- type, flags);
+ set_placement_list(drm, nvbo->placements, &pl->num_placement,
+ domain, flags);
pl->busy_placement = nvbo->busy_placements;
- set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
- type | busy, flags);
+ set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
+ domain | busy, flags);
- set_placement_range(nvbo, type);
+ set_placement_range(nvbo, domain);
}
int
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
@@ -417,7 +444,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
return ret;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
- memtype == TTM_PL_FLAG_VRAM && contig) {
+ domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
if (!nvbo->contig) {
nvbo->contig = true;
force = true;
@@ -426,10 +453,22 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
}
if (nvbo->pin_refcnt) {
- if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
+ bool error = evict;
+
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
+ break;
+ case TTM_PL_TT:
+ error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
+ default:
+ break;
+ }
+
+ if (error) {
NV_ERROR(drm, "bo %p pinned elsewhere: "
"0x%08x vs 0x%08x\n", bo,
- 1 << bo->mem.mem_type, memtype);
+ bo->mem.mem_type, domain);
ret = -EBUSY;
}
nvbo->pin_refcnt++;
@@ -437,14 +476,14 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
}
if (evict) {
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
goto out;
}
nvbo->pin_refcnt++;
- nouveau_bo_placement_set(nvbo, memtype, 0);
+ nouveau_bo_placement_set(nvbo, domain, 0);
/* drop pin_refcnt temporarily, so we don't trip the assertion
* in nouveau_bo_move() that makes sure we're not trying to
@@ -490,7 +529,16 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
if (ref)
goto out;
- nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
+ break;
+ case TTM_PL_TT:
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
+ break;
+ default:
+ break;
+ }
ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
@@ -574,6 +622,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
PAGE_SIZE, DMA_FROM_DEVICE);
}
+void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
+}
+
+void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
+{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ list_del_init(&nvbo->io_reserve_lru);
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
+}
+
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu)
@@ -647,63 +715,33 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
}
static int
-nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
+nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
+ struct ttm_resource *reg)
{
+#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nvif_mmu *mmu = &drm->client.mmu;
-
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = 0;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->flags = TTM_MEMTYPE_FLAG_FIXED;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
-
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- /* Some BARs do not support being ioremapped WC */
- const u8 type = mmu->type[drm->ttm.type_vram].type;
- if (type & NVIF_MEM_UNCACHED) {
- man->available_caching = TTM_PL_FLAG_UNCACHED;
- man->default_caching = TTM_PL_FLAG_UNCACHED;
- }
-
- man->func = &nouveau_vram_manager;
- man->use_io_reserve_lru = true;
- } else {
- man->func = &ttm_bo_manager_func;
- }
- break;
- case TTM_PL_TT:
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
- man->func = &nouveau_gart_manager;
- else
- if (!drm->agp.bridge)
- man->func = &nv04_gart_manager;
- else
- man->func = &ttm_bo_manager_func;
+#endif
+ if (!reg)
+ return -EINVAL;
+#if IS_ENABLED(CONFIG_AGP)
+ if (drm->agp.bridge)
+ return ttm_agp_bind(ttm, reg);
+#endif
+ return nouveau_sgdma_bind(bdev, ttm, reg);
+}
- if (drm->agp.bridge) {
- man->flags = 0;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- } else {
- man->flags = 0;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- }
+static void
+nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+{
+#if IS_ENABLED(CONFIG_AGP)
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
- break;
- default:
- return -EINVAL;
+ if (drm->agp.bridge) {
+ ttm_agp_unbind(ttm);
+ return;
}
- return 0;
+#endif
+ nouveau_sgdma_unbind(bdev, ttm);
}
static void
@@ -713,11 +751,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
- TTM_PL_FLAG_SYSTEM);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
+ NOUVEAU_GEM_DOMAIN_CPU);
break;
default:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
break;
}
@@ -726,7 +764,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *reg)
+ struct ttm_resource *reg)
{
struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
struct nouveau_mem *new_mem = nouveau_mem(reg);
@@ -758,7 +796,7 @@ done:
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_gpu, struct ttm_mem_reg *new_reg)
+ bool no_wait_gpu, struct ttm_resource *new_reg)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
@@ -768,7 +806,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
/* create temporary vmas for the transfer and attach them to the
* old nvkm_mem node, these will get cleaned up after ttm has
- * destroyed the ttm_mem_reg
+ * destroyed the ttm_resource
*/
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_move_prep(drm, bo, new_reg);
@@ -785,7 +823,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
if (ret == 0) {
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
- evict,
+ evict, false,
new_reg);
nouveau_fence_unref(&fence);
}
@@ -804,7 +842,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
s32 oclass;
int (*exec)(struct nouveau_channel *,
struct ttm_buffer_object *,
- struct ttm_mem_reg *, struct ttm_mem_reg *);
+ struct ttm_resource *, struct ttm_resource *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
{ "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -865,16 +903,17 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_gpu, struct ttm_mem_reg *new_reg)
+ bool no_wait_gpu, struct ttm_resource *new_reg)
{
struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
struct ttm_place placement_memtype = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
+ .mem_type = TTM_PL_TT,
+ .flags = TTM_PL_MASK_CACHING
};
struct ttm_placement placement;
- struct ttm_mem_reg tmp_reg;
+ struct ttm_resource tmp_reg;
int ret;
placement.num_placement = placement.num_busy_placement = 1;
@@ -886,7 +925,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx);
+ ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ if (ret)
+ goto out;
+
+ ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
if (ret)
goto out;
@@ -896,22 +939,23 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
out:
- ttm_bo_mem_put(bo, &tmp_reg);
+ ttm_resource_free(bo, &tmp_reg);
return ret;
}
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_gpu, struct ttm_mem_reg *new_reg)
+ bool no_wait_gpu, struct ttm_resource *new_reg)
{
struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
struct ttm_place placement_memtype = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
+ .mem_type = TTM_PL_TT,
+ .flags = TTM_PL_MASK_CACHING
};
struct ttm_placement placement;
- struct ttm_mem_reg tmp_reg;
+ struct ttm_resource tmp_reg;
int ret;
placement.num_placement = placement.num_busy_placement = 1;
@@ -932,13 +976,13 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
out:
- ttm_bo_mem_put(bo, &tmp_reg);
+ ttm_resource_free(bo, &tmp_reg);
return ret;
}
static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
- struct ttm_mem_reg *new_reg)
+ struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -948,6 +992,8 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
if (bo->destroy != nouveau_bo_del_ttm)
return;
+ nouveau_bo_del_io_reserve_lru(bo);
+
if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) {
list_for_each_entry(vma, &nvbo->vma_list, head) {
@@ -970,7 +1016,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
}
static int
-nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
+nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
struct nouveau_drm_tile **new_tile)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -1006,11 +1052,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_reg)
+ struct ttm_resource *new_reg)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct ttm_mem_reg *old_reg = &bo->mem;
+ struct ttm_resource *old_reg = &bo->mem;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
@@ -1029,9 +1075,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
/* Fake bo copy. */
if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
- BUG_ON(bo->mem.mm_node != NULL);
- bo->mem = *new_reg;
- new_reg->mm_node = NULL;
+ ttm_bo_move_null(bo, new_reg);
goto out;
}
@@ -1078,38 +1122,60 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
filp->private_data);
}
+static void
+nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
+ struct ttm_resource *reg)
+{
+ struct nouveau_mem *mem = nouveau_mem(reg);
+
+ if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+ switch (reg->mem_type) {
+ case TTM_PL_TT:
+ if (mem->kind)
+ nvif_object_unmap_handle(&mem->mem.object);
+ break;
+ case TTM_PL_VRAM:
+ nvif_object_unmap_handle(&mem->mem.object);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static int
-nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nouveau_mem *mem = nouveau_mem(reg);
+ int ret;
- reg->bus.addr = NULL;
- reg->bus.offset = 0;
- reg->bus.size = reg->num_pages << PAGE_SHIFT;
- reg->bus.base = 0;
- reg->bus.is_iomem = false;
-
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+retry:
switch (reg->mem_type) {
case TTM_PL_SYSTEM:
/* System memory */
- return 0;
+ ret = 0;
+ goto out;
case TTM_PL_TT:
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
- reg->bus.offset = reg->start << PAGE_SHIFT;
- reg->bus.base = drm->agp.base;
+ reg->bus.offset = (reg->start << PAGE_SHIFT) +
+ drm->agp.base;
reg->bus.is_iomem = !drm->agp.cma;
}
#endif
- if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
+ if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
+ !mem->kind) {
/* untiled */
+ ret = 0;
break;
+ }
fallthrough; /* tiled memory */
case TTM_PL_VRAM:
- reg->bus.offset = reg->start << PAGE_SHIFT;
- reg->bus.base = device->func->resource_addr(device, 1);
+ reg->bus.offset = (reg->start << PAGE_SHIFT) +
+ device->func->resource_addr(device, 1);
reg->bus.is_iomem = true;
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
union {
@@ -1118,7 +1184,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
} args;
u64 handle, length;
u32 argc = 0;
- int ret;
switch (mem->mem.object.oclass) {
case NVIF_CLASS_MEM_NV50:
@@ -1144,39 +1209,46 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
&handle, &length);
if (ret != 1) {
if (WARN_ON(ret == 0))
- return -EINVAL;
- return ret;
+ ret = -EINVAL;
+ goto out;
}
- reg->bus.base = 0;
reg->bus.offset = handle;
+ ret = 0;
}
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+
+out:
+ if (ret == -ENOSPC) {
+ struct nouveau_bo *nvbo;
+
+ nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
+ typeof(*nvbo),
+ io_reserve_lru);
+ if (nvbo) {
+ list_del_init(&nvbo->io_reserve_lru);
+ drm_vma_node_unmap(&nvbo->bo.base.vma_node,
+ bdev->dev_mapping);
+ nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
+ goto retry;
+ }
+
+ }
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
+ return ret;
}
static void
-nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nouveau_mem *mem = nouveau_mem(reg);
- if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
- switch (reg->mem_type) {
- case TTM_PL_TT:
- if (mem->kind)
- nvif_object_unmap_handle(&mem->mem.object);
- break;
- case TTM_PL_VRAM:
- nvif_object_unmap_handle(&mem->mem.object);
- break;
- default:
- break;
- }
- }
+ mutex_lock(&drm->ttm.io_reserve_mutex);
+ nouveau_ttm_io_mem_free_locked(drm, reg);
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
}
static int
@@ -1197,7 +1269,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
+ 0);
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
@@ -1221,37 +1294,36 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->busy_placements[i].lpfn = mappable;
}
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
return nouveau_bo_validate(nvbo, false, false);
}
static int
-nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
struct device *dev;
- unsigned i;
- int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
- if (ttm->state != tt_unpopulated)
+ if (ttm_tt_is_populated(ttm))
return 0;
if (slave && ttm->sg) {
/* make userspace faulting work */
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
ttm_dma->dma_address, ttm->num_pages);
- ttm->state = tt_unbound;
+ ttm_tt_set_populated(ttm);
return 0;
}
- drm = nouveau_bdev(ttm->bdev);
+ drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
- return ttm_agp_tt_populate(ttm, ctx);
+ return ttm_pool_populate(ttm, ctx);
}
#endif
@@ -1260,51 +1332,27 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
return ttm_dma_populate((void *)ttm, dev, ctx);
}
#endif
-
- r = ttm_pool_populate(ttm, ctx);
- if (r) {
- return r;
- }
-
- for (i = 0; i < ttm->num_pages; i++) {
- dma_addr_t addr;
-
- addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(dev, addr)) {
- while (i--) {
- dma_unmap_page(dev, ttm_dma->dma_address[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- ttm_dma->dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
-
- ttm_dma->dma_address[i] = addr;
- }
- return 0;
+ return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
}
static void
-nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
+nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
struct device *dev;
- unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (slave)
return;
- drm = nouveau_bdev(ttm->bdev);
+ drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
- ttm_agp_tt_unpopulate(ttm);
+ ttm_pool_unpopulate(ttm);
return;
}
#endif
@@ -1316,14 +1364,23 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
- for (i = 0; i < ttm->num_pages; i++) {
- if (ttm_dma->dma_address[i]) {
- dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- }
- }
+ ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
+}
- ttm_pool_unpopulate(ttm);
+static void
+nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
+{
+#if IS_ENABLED(CONFIG_AGP)
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+ if (drm->agp.bridge) {
+ ttm_agp_unbind(ttm);
+ ttm_tt_destroy_common(bdev, ttm);
+ ttm_agp_destroy(ttm);
+ return;
+ }
+#endif
+ nouveau_sgdma_destroy(bdev, ttm);
}
void
@@ -1341,7 +1398,9 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
- .init_mem_type = nouveau_bo_init_mem_type,
+ .ttm_tt_bind = &nouveau_ttm_tt_bind,
+ .ttm_tt_unbind = &nouveau_ttm_tt_unbind,
+ .ttm_tt_destroy = &nouveau_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
.move_notify = nouveau_bo_move_ntfy,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 52489ce7d029..2a23c8207436 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -18,6 +18,7 @@ struct nouveau_bo {
bool force_coherent;
struct ttm_bo_kmap_obj kmap;
struct list_head head;
+ struct list_head io_reserve_lru;
/* protected by ttm_bo_reserve() */
struct drm_file *reserved_by;
@@ -76,10 +77,10 @@ extern struct ttm_bo_driver nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
- u32 flags, u32 tile_mode, u32 tile_flags);
-int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags,
+ u32 domain, u32 tile_mode, u32 tile_flags);
+int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj);
-int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
+int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
struct dma_resv *robj,
struct nouveau_bo **);
@@ -96,6 +97,8 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_gpu);
void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
+void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo);
+void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo);
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *
@@ -119,13 +122,13 @@ nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo)
}
static inline int
-nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags,
+nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 domain,
struct nouveau_bo **pnvbo)
{
- int ret = nouveau_bo_new(cli, size, align, flags,
+ int ret = nouveau_bo_new(cli, size, align, domain,
0, 0, NULL, NULL, pnvbo);
if (ret == 0) {
- ret = nouveau_bo_pin(*pnvbo, flags, true);
+ ret = nouveau_bo_pin(*pnvbo, domain, true);
if (ret == 0) {
ret = nouveau_bo_map(*pnvbo);
if (ret == 0)
@@ -139,28 +142,28 @@ nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags,
int nv04_bo_move_init(struct nouveau_channel *, u32);
int nv04_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
- struct ttm_mem_reg *, struct ttm_mem_reg *);
+ struct ttm_resource *, struct ttm_resource *);
int nv50_bo_move_init(struct nouveau_channel *, u32);
int nv50_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
- struct ttm_mem_reg *, struct ttm_mem_reg *);
+ struct ttm_resource *, struct ttm_resource *);
int nv84_bo_move_exec(struct nouveau_channel *, struct ttm_buffer_object *,
- struct ttm_mem_reg *, struct ttm_mem_reg *);
+ struct ttm_resource *, struct ttm_resource *);
int nva3_bo_move_copy(struct nouveau_channel *, struct ttm_buffer_object *,
- struct ttm_mem_reg *, struct ttm_mem_reg *);
+ struct ttm_resource *, struct ttm_resource *);
int nvc0_bo_move_init(struct nouveau_channel *, u32);
int nvc0_bo_move_m2mf(struct nouveau_channel *, struct ttm_buffer_object *,
- struct ttm_mem_reg *, struct ttm_mem_reg *);
+ struct ttm_resource *, struct ttm_resource *);
int nvc0_bo_move_copy(struct nouveau_channel *, struct ttm_buffer_object *,
- struct ttm_mem_reg *, struct ttm_mem_reg *);
+ struct ttm_resource *, struct ttm_resource *);
int nve0_bo_move_init(struct nouveau_channel *, u32);
int nve0_bo_move_copy(struct nouveau_channel *, struct ttm_buffer_object *,
- struct ttm_mem_reg *, struct ttm_mem_reg *);
+ struct ttm_resource *, struct ttm_resource *);
#define NVBO_WR32_(b,o,dr,f) nouveau_bo_wr32((b), (o)/4 + (dr), (f))
#define NVBO_RD32_(b,o,dr) nouveau_bo_rd32((b), (o)/4 + (dr))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo0039.c b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
index bf7ae2cecaf6..7390132129fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo0039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo0039.c
@@ -36,7 +36,7 @@
static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
- struct nouveau_channel *chan, struct ttm_mem_reg *reg)
+ struct nouveau_channel *chan, struct ttm_resource *reg)
{
if (reg->mem_type == TTM_PL_TT)
return NvDmaTT;
@@ -45,7 +45,7 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+ struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nvif_push *push = chan->chan.push;
u32 src_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, old_reg);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo5039.c b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
index f9b9b85abe44..4c75c7b3804c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo5039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo5039.c
@@ -37,7 +37,7 @@
int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+ struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
index 1b5fd78ddcba..ed6c09d67840 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c
@@ -34,7 +34,7 @@
int
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+ struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
index f0df172b029e..dec29b2d8bb2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c
@@ -38,7 +38,7 @@
int
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+ struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo9039.c b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
index 52fefb37064c..776b04976cdf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo9039.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo9039.c
@@ -36,7 +36,7 @@
int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+ struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nvif_push *push = chan->chan.push;
struct nouveau_mem *mem = nouveau_mem(old_reg);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
index 34b79d561c7f..8499f58213e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c
@@ -31,7 +31,7 @@
int
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+ struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
diff --git a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
index 394e29012e50..575212472e7a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
+++ b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c
@@ -36,7 +36,7 @@
int
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
+ struct ttm_resource *old_reg, struct ttm_resource *new_reg)
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index b80e4ebf14a6..8f099601d2f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -163,9 +163,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
atomic_set(&chan->killed, 0);
/* allocate memory for dma push buffer */
- target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
+ target = NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
if (nouveau_vram_pushbuf)
- target = TTM_PL_FLAG_VRAM;
+ target = NOUVEAU_GEM_DOMAIN_VRAM;
ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
&chan->push.buffer);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7674025a4bfe..8b4b3688c7ae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -391,20 +391,6 @@ find_encoder(struct drm_connector *connector, int type)
return NULL;
}
-struct nouveau_connector *
-nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
-{
- struct drm_device *dev = to_drm_encoder(encoder)->dev;
- struct drm_connector *drm_connector;
-
- list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) {
- if (drm_connector->encoder == to_drm_encoder(encoder))
- return nouveau_connector(drm_connector);
- }
-
- return NULL;
-}
-
static void
nouveau_connector_destroy(struct drm_connector *connector)
{
@@ -435,7 +421,8 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_DP:
- ret = nouveau_dp_detect(nv_encoder);
+ ret = nouveau_dp_detect(nouveau_connector(connector),
+ nv_encoder);
if (ret == NOUVEAU_DP_MST)
return NULL;
else if (ret == NOUVEAU_DP_SST)
@@ -541,6 +528,19 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
}
}
+static void
+nouveau_connector_set_edid(struct nouveau_connector *nv_connector,
+ struct edid *edid)
+{
+ if (nv_connector->edid != edid) {
+ struct edid *old_edid = nv_connector->edid;
+
+ drm_connector_update_edid_property(&nv_connector->base, edid);
+ kfree(old_edid);
+ nv_connector->edid = edid;
+ }
+}
+
static enum drm_connector_status
nouveau_connector_detect(struct drm_connector *connector, bool force)
{
@@ -554,13 +554,6 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
int ret;
enum drm_connector_status conn_status = connector_status_disconnected;
- /* Cleanup the previous EDID block. */
- if (nv_connector->edid) {
- drm_connector_update_edid_property(connector, NULL);
- kfree(nv_connector->edid);
- nv_connector->edid = NULL;
- }
-
/* Outputs are only polled while runtime active, so resuming the
* device here is unnecessary (and would deadlock upon runtime suspend
* because it waits for polling to finish). We do however, want to
@@ -573,22 +566,23 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_autosuspend(dev->dev);
+ nouveau_connector_set_edid(nv_connector, NULL);
return conn_status;
}
}
nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
+ struct edid *new_edid;
+
if ((vga_switcheroo_handler_flags() &
VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
nv_connector->type == DCB_CONNECTOR_LVDS)
- nv_connector->edid = drm_get_edid_switcheroo(connector,
- i2c);
+ new_edid = drm_get_edid_switcheroo(connector, i2c);
else
- nv_connector->edid = drm_get_edid(connector, i2c);
+ new_edid = drm_get_edid(connector, i2c);
- drm_connector_update_edid_property(connector,
- nv_connector->edid);
+ nouveau_connector_set_edid(nv_connector, new_edid);
if (!nv_connector->edid) {
NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
connector->name);
@@ -622,6 +616,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
conn_status = connector_status_connected;
drm_dp_cec_set_edid(&nv_connector->aux, nv_connector->edid);
goto out;
+ } else {
+ nouveau_connector_set_edid(nv_connector, NULL);
}
nv_encoder = nouveau_connector_of_detect(connector);
@@ -646,10 +642,11 @@ detect_analog:
conn_status = connector_status_connected;
goto out;
}
-
}
out:
+ if (!nv_connector->edid)
+ drm_dp_cec_unset_edid(&nv_connector->aux);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -664,24 +661,20 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
+ struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
- /* Cleanup the previous EDID block. */
- if (nv_connector->edid) {
- drm_connector_update_edid_property(connector, NULL);
- kfree(nv_connector->edid);
- nv_connector->edid = NULL;
- }
-
nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
if (!nv_encoder)
- return connector_status_disconnected;
+ goto out;
/* Try retrieving EDID via DDC */
if (!drm->vbios.fp_no_ddc) {
status = nouveau_connector_detect(connector, force);
- if (status == connector_status_connected)
+ if (status == connector_status_connected) {
+ edid = nv_connector->edid;
goto out;
+ }
}
/* On some laptops (Sony, i'm looking at you) there appears to
@@ -694,7 +687,8 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
* valid - it's not (rh#613284)
*/
if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
- if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
+ edid = nouveau_acpi_edid(dev, connector);
+ if (edid) {
status = connector_status_connected;
goto out;
}
@@ -714,12 +708,10 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
* stored for the panel stored in them.
*/
if (!drm->vbios.fp_no_ddc) {
- struct edid *edid =
- (struct edid *)nouveau_bios_embedded_edid(dev);
+ edid = (struct edid *)nouveau_bios_embedded_edid(dev);
if (edid) {
- nv_connector->edid =
- kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
- if (nv_connector->edid)
+ edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ if (edid)
status = connector_status_connected;
}
}
@@ -732,7 +724,7 @@ out:
status = connector_status_unknown;
#endif
- drm_connector_update_edid_property(connector, nv_connector->edid);
+ nouveau_connector_set_edid(nv_connector, edid);
nouveau_connector_set_encoder(connector, nv_encoder);
return status;
}
@@ -1035,29 +1027,6 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
return 112000 * duallink_scale;
}
-enum drm_mode_status
-nouveau_conn_mode_clock_valid(const struct drm_display_mode *mode,
- const unsigned min_clock,
- const unsigned max_clock,
- unsigned int *clock_out)
-{
- unsigned int clock = mode->clock;
-
- if ((mode->flags & DRM_MODE_FLAG_3D_MASK) ==
- DRM_MODE_FLAG_3D_FRAME_PACKING)
- clock *= 2;
-
- if (clock < min_clock)
- return MODE_CLOCK_LOW;
- if (clock > max_clock)
- return MODE_CLOCK_HIGH;
-
- if (clock_out)
- *clock_out = clock;
-
- return MODE_OK;
-}
-
static enum drm_mode_status
nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -1065,7 +1034,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
- unsigned min_clock = 25000, max_clock = min_clock;
+ unsigned int min_clock = 25000, max_clock = min_clock, clock = mode->clock;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_LVDS:
@@ -1094,8 +1063,15 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
return MODE_BAD;
}
- return nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
- NULL);
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+ clock *= 2;
+
+ if (clock < min_clock)
+ return MODE_CLOCK_LOW;
+ if (clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
}
static struct drm_encoder *
@@ -1150,59 +1126,39 @@ nouveau_connector_funcs_lvds = {
.early_unregister = nouveau_connector_early_unregister,
};
+void
+nouveau_connector_hpd(struct drm_connector *connector)
+{
+ struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ u32 mask = drm_connector_mask(connector);
+
+ mutex_lock(&drm->hpd_lock);
+ if (!(drm->hpd_pending & mask)) {
+ drm->hpd_pending |= mask;
+ schedule_work(&drm->hpd_work);
+ }
+ mutex_unlock(&drm->hpd_lock);
+}
+
static int
nouveau_connector_hotplug(struct nvif_notify *notify)
{
struct nouveau_connector *nv_connector =
container_of(notify, typeof(*nv_connector), hpd);
struct drm_connector *connector = &nv_connector->base;
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
const struct nvif_notify_conn_rep_v0 *rep = notify->data;
- const char *name = connector->name;
- struct nouveau_encoder *nv_encoder;
- int ret;
bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
- NV_DEBUG(drm, "service %s\n", name);
- drm_dp_cec_irq(&nv_connector->aux);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
- nv50_mstm_service(nv_encoder->dp.mstm);
-
+ nouveau_dp_irq(drm, nv_connector);
return NVIF_NOTIFY_KEEP;
}
- ret = pm_runtime_get(drm->dev->dev);
- if (ret == 0) {
- /* We can't block here if there's a pending PM request
- * running, as we'll deadlock nouveau_display_fini() when it
- * calls nvif_put() on our nvif_notify struct. So, simply
- * defer the hotplug event until the device finishes resuming
- */
- NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
- name);
- schedule_work(&drm->hpd_work);
-
- pm_runtime_put_noidle(drm->dev->dev);
- return NVIF_NOTIFY_KEEP;
- } else if (ret != 1 && ret != -EACCES) {
- NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
- name, ret);
- return NVIF_NOTIFY_DROP;
- }
-
- if (!plugged)
- drm_dp_cec_unset_edid(&nv_connector->aux);
- NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
- if (!plugged)
- nv50_mstm_remove(nv_encoder->dp.mstm);
- }
-
- drm_helper_hpd_irq_event(connector->dev);
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", connector->name);
+ nouveau_connector_hpd(connector);
- pm_runtime_mark_last_busy(drm->dev->dev);
- pm_runtime_put_autosuspend(drm->dev->dev);
return NVIF_NOTIFY_KEEP;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index d6de5cb8e223..d0b859c4a80e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -187,6 +187,7 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
struct drm_connector *
nouveau_connector_create(struct drm_device *, const struct dcb_output *);
+void nouveau_connector_hpd(struct drm_connector *connector);
extern int nouveau_tv_disable;
extern int nouveau_ignorelid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 5f31b11ac2e7..bceb48a2dfca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -457,16 +457,70 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
} \
} while(0)
+void
+nouveau_display_hpd_resume(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ mutex_lock(&drm->hpd_lock);
+ drm->hpd_pending = ~0;
+ mutex_unlock(&drm->hpd_lock);
+
+ schedule_work(&drm->hpd_work);
+}
+
static void
nouveau_display_hpd_work(struct work_struct *work)
{
struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
+ struct drm_device *dev = drm->dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ u32 pending;
+ bool changed = false;
+
+ pm_runtime_get_sync(dev->dev);
- pm_runtime_get_sync(drm->dev->dev);
+ mutex_lock(&drm->hpd_lock);
+ pending = drm->hpd_pending;
+ drm->hpd_pending = 0;
+ mutex_unlock(&drm->hpd_lock);
- drm_helper_hpd_irq_event(drm->dev);
+ /* Nothing to do, exit early without updating the last busy counter */
+ if (!pending)
+ goto noop;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+ enum drm_connector_status old_status = connector->status;
+ u64 old_epoch_counter = connector->epoch_counter;
+
+ if (!(pending & drm_connector_mask(connector)))
+ continue;
+
+ connector->status = drm_helper_probe_detect(connector, NULL,
+ false);
+ if (old_epoch_counter == connector->epoch_counter)
+ continue;
+
+ changed = true;
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
+ connector->base.id, connector->name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status),
+ old_epoch_counter, connector->epoch_counter);
+ }
+
+ drm_connector_list_iter_end(&conn_iter);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(dev);
pm_runtime_mark_last_busy(drm->dev->dev);
+noop:
pm_runtime_put_sync(drm->dev->dev);
}
@@ -490,12 +544,11 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
*/
pm_runtime_put_autosuspend(drm->dev->dev);
} else if (ret == 0) {
- /* This may be the only indication we receive
- * of a connector hotplug on a runtime
- * suspended GPU, schedule hpd_work to check.
+ /* We've started resuming the GPU already, so
+ * it will handle scheduling a full reprobe
+ * itself
*/
NV_DEBUG(drm, "ACPI requested connector reprobe\n");
- schedule_work(&drm->hpd_work);
pm_runtime_put_noidle(drm->dev->dev);
} else {
NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
@@ -569,7 +622,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
cancel_work_sync(&drm->hpd_work);
drm_kms_helper_poll_disable(dev);
- disp->fini(dev, suspend);
+ disp->fini(dev, runtime, suspend);
}
static void
@@ -686,6 +739,7 @@ nouveau_display_create(struct drm_device *dev)
}
INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
+ mutex_init(&drm->hpd_lock);
#ifdef CONFIG_ACPI
drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
register_acpi_notifier(&drm->acpi_nb);
@@ -705,9 +759,10 @@ void
nouveau_display_destroy(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
#ifdef CONFIG_ACPI
- unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb);
+ unregister_acpi_notifier(&drm->acpi_nb);
#endif
drm_kms_helper_poll_fini(dev);
@@ -719,6 +774,7 @@ nouveau_display_destroy(struct drm_device *dev)
nvif_disp_dtor(&disp->disp);
nouveau_drm(dev)->display = NULL;
+ mutex_destroy(&drm->hpd_lock);
kfree(disp);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 6e0d900441d6..616c43427059 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -18,7 +18,7 @@ struct nouveau_display {
void *priv;
void (*dtor)(struct drm_device *);
int (*init)(struct drm_device *, bool resume, bool runtime);
- void (*fini)(struct drm_device *, bool suspend);
+ void (*fini)(struct drm_device *, bool suspend, bool runtime);
struct nvif_disp disp;
@@ -45,6 +45,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev, bool resume, bool runtime);
+void nouveau_display_hpd_resume(struct drm_device *dev);
void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 4e8112fde3e6..92987daa5e17 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -101,7 +101,7 @@ unsigned long nouveau_dmem_page_addr(struct page *page)
{
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
- chunk->pagemap.res.start;
+ chunk->pagemap.range.start;
return chunk->bo->offset + off;
}
@@ -249,17 +249,19 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
chunk->drm = drm;
chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
- chunk->pagemap.res = *res;
+ chunk->pagemap.range.start = res->start;
+ chunk->pagemap.range.end = res->end;
+ chunk->pagemap.nr_range = 1;
chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
chunk->pagemap.owner = drm->dev;
ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
- TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
+ NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
&chunk->bo);
if (ret)
goto out_release;
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
goto out_bo_free;
@@ -273,7 +275,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
list_add(&chunk->list, &drm->dmem->chunks);
mutex_unlock(&drm->dmem->mutex);
- pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
+ pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
page = pfn_to_page(pfn_first);
spin_lock(&drm->dmem->lock);
for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
@@ -294,8 +296,7 @@ out_bo_unpin:
out_bo_free:
nouveau_bo_ref(NULL, &chunk->bo);
out_release:
- release_mem_region(chunk->pagemap.res.start,
- resource_size(&chunk->pagemap.res));
+ release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
out_free:
kfree(chunk);
out:
@@ -346,7 +347,7 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
mutex_lock(&drm->dmem->mutex);
list_for_each_entry(chunk, &drm->dmem->chunks, list) {
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
/* FIXME handle pin failure */
WARN_ON(ret);
}
@@ -382,8 +383,8 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
nouveau_bo_ref(NULL, &chunk->bo);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
- release_mem_region(chunk->pagemap.res.start,
- resource_size(&chunk->pagemap.res));
+ release_mem_region(chunk->pagemap.range.start,
+ range_len(&chunk->pagemap.range));
kfree(chunk);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 8a0f7994e1ae..040ed88d362d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -36,50 +36,123 @@ MODULE_PARM_DESC(mst, "Enable DisplayPort multi-stream (default: enabled)");
static int nouveau_mst = 1;
module_param_named(mst, nouveau_mst, int, 0400);
-static void
-nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_aux *aux, u8 *dpcd)
+static bool
+nouveau_dp_has_sink_count(struct drm_connector *connector,
+ struct nouveau_encoder *outp)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- u8 buf[3];
+ return drm_dp_read_sink_count_cap(connector, outp->dp.dpcd, &outp->dp.desc);
+}
- if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
- return;
+static enum drm_connector_status
+nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
+ struct nouveau_encoder *outp)
+{
+ struct drm_connector *connector = &nv_connector->base;
+ struct drm_dp_aux *aux = &nv_connector->aux;
+ struct nv50_mstm *mstm = NULL;
+ enum drm_connector_status status = connector_status_disconnected;
+ int ret;
+ u8 *dpcd = outp->dp.dpcd;
+
+ ret = drm_dp_read_dpcd_caps(aux, dpcd);
+ if (ret < 0)
+ goto out;
+
+ ret = drm_dp_read_desc(aux, &outp->dp.desc, drm_dp_is_branch(dpcd));
+ if (ret < 0)
+ goto out;
+
+ if (nouveau_mst) {
+ mstm = outp->dp.mstm;
+ if (mstm)
+ mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd);
+ }
+
+ if (nouveau_dp_has_sink_count(connector, outp)) {
+ ret = drm_dp_read_sink_count(aux);
+ if (ret < 0)
+ goto out;
- if (!nvkm_rdaux(aux, DP_SINK_OUI, buf, 3))
- NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n",
- buf[0], buf[1], buf[2]);
+ outp->dp.sink_count = ret;
- if (!nvkm_rdaux(aux, DP_BRANCH_OUI, buf, 3))
- NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n",
- buf[0], buf[1], buf[2]);
+ /*
+ * Dongle connected, but no display. Don't bother reading
+ * downstream port info
+ */
+ if (!outp->dp.sink_count)
+ return connector_status_disconnected;
+ }
+ ret = drm_dp_read_downstream_info(aux, dpcd,
+ outp->dp.downstream_ports);
+ if (ret < 0)
+ goto out;
+
+ status = connector_status_connected;
+out:
+ if (status != connector_status_connected) {
+ /* Clear any cached info */
+ outp->dp.sink_count = 0;
+ }
+ return status;
}
int
-nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
+nouveau_dp_detect(struct nouveau_connector *nv_connector,
+ struct nouveau_encoder *nv_encoder)
{
struct drm_device *dev = nv_encoder->base.base.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_i2c_aux *aux;
- u8 dpcd[8];
- int ret;
+ struct drm_connector *connector = &nv_connector->base;
+ struct nv50_mstm *mstm = nv_encoder->dp.mstm;
+ enum drm_connector_status status;
+ u8 *dpcd = nv_encoder->dp.dpcd;
+ int ret = NOUVEAU_DP_NONE;
+
+ /* If we've already read the DPCD on an eDP device, we don't need to
+ * reread it as it won't change
+ */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ dpcd[DP_DPCD_REV] != 0)
+ return NOUVEAU_DP_SST;
- aux = nv_encoder->aux;
- if (!aux)
- return -ENODEV;
+ mutex_lock(&nv_encoder->dp.hpd_irq_lock);
+ if (mstm) {
+ /* If we're not ready to handle MST state changes yet, just
+ * report the last status of the connector. We'll reprobe it
+ * once we've resumed.
+ */
+ if (mstm->suspended) {
+ if (mstm->is_mst)
+ ret = NOUVEAU_DP_MST;
+ else if (connector->status ==
+ connector_status_connected)
+ ret = NOUVEAU_DP_SST;
- ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
- if (ret)
- return ret;
+ goto out;
+ }
+ }
- nv_encoder->dp.link_bw = 27000 * dpcd[1];
- nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
+ status = nouveau_dp_probe_dpcd(nv_connector, nv_encoder);
+ if (status == connector_status_disconnected)
+ goto out;
+
+ /* If we're in MST mode, we're done here */
+ if (mstm && mstm->can_mst && mstm->is_mst) {
+ ret = NOUVEAU_DP_MST;
+ goto out;
+ }
+
+ nv_encoder->dp.link_bw = 27000 * dpcd[DP_MAX_LINK_RATE];
+ nv_encoder->dp.link_nr =
+ dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n",
- nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]);
+ nv_encoder->dp.link_nr, nv_encoder->dp.link_bw,
+ dpcd[DP_DPCD_REV]);
NV_DEBUG(drm, "encoder: %dx%d\n",
- nv_encoder->dcb->dpconf.link_nr,
- nv_encoder->dcb->dpconf.link_bw);
+ nv_encoder->dcb->dpconf.link_nr,
+ nv_encoder->dcb->dpconf.link_bw);
if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr)
nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
@@ -87,23 +160,68 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw;
NV_DEBUG(drm, "maximum: %dx%d\n",
- nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
+ nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
- nouveau_dp_probe_oui(dev, aux, dpcd);
+ if (mstm && mstm->can_mst) {
+ ret = nv50_mstm_detect(nv_encoder);
+ if (ret == 1) {
+ ret = NOUVEAU_DP_MST;
+ goto out;
+ } else if (ret != 0) {
+ goto out;
+ }
+ }
+ ret = NOUVEAU_DP_SST;
- ret = nv50_mstm_detect(nv_encoder->dp.mstm, dpcd, nouveau_mst);
- if (ret == 1)
- return NOUVEAU_DP_MST;
- if (ret == 0)
- return NOUVEAU_DP_SST;
+out:
+ if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
+ nv50_mstm_remove(mstm);
+
+ mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
return ret;
}
+void nouveau_dp_irq(struct nouveau_drm *drm,
+ struct nouveau_connector *nv_connector)
+{
+ struct drm_connector *connector = &nv_connector->base;
+ struct nouveau_encoder *outp = find_encoder(connector, DCB_OUTPUT_DP);
+ struct nv50_mstm *mstm;
+ int ret;
+ bool send_hpd = false;
+
+ if (!outp)
+ return;
+
+ mstm = outp->dp.mstm;
+ NV_DEBUG(drm, "service %s\n", connector->name);
+
+ mutex_lock(&outp->dp.hpd_irq_lock);
+
+ if (mstm && mstm->is_mst) {
+ if (!nv50_mstm_service(drm, nv_connector, mstm))
+ send_hpd = true;
+ } else {
+ drm_dp_cec_irq(&nv_connector->aux);
+
+ if (nouveau_dp_has_sink_count(connector, outp)) {
+ ret = drm_dp_read_sink_count(&nv_connector->aux);
+ if (ret != outp->dp.sink_count)
+ send_hpd = true;
+ if (ret >= 0)
+ outp->dp.sink_count = ret;
+ }
+ }
+
+ mutex_unlock(&outp->dp.hpd_irq_lock);
+
+ if (send_hpd)
+ nouveau_connector_hpd(connector);
+}
+
/* TODO:
* - Use the minimum possible BPC here, once we add support for the max bpc
* property.
- * - Validate the mode against downstream port caps (see
- * drm_dp_downstream_max_clock())
* - Validate against the DP caps advertised by the GPU (we don't check these
* yet)
*/
@@ -113,19 +231,30 @@ nv50_dp_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode,
unsigned *out_clock)
{
- const unsigned min_clock = 25000;
- unsigned max_clock, clock;
- enum drm_mode_status ret;
+ const unsigned int min_clock = 25000;
+ unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
+ const u8 bpp = connector->display_info.bpc * 3;
if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
return MODE_NO_INTERLACE;
- max_clock = outp->dp.link_nr * outp->dp.link_bw;
- clock = mode->clock * (connector->display_info.bpc * 3) / 10;
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
+ clock *= 2;
+
+ max_rate = outp->dp.link_nr * outp->dp.link_bw;
+ mode_rate = DIV_ROUND_UP(clock * bpp, 8);
+ if (mode_rate > max_rate)
+ return MODE_CLOCK_HIGH;
+
+ ds_max_dotclock = drm_dp_downstream_max_dotclock(outp->dp.dpcd, outp->dp.downstream_ports);
+ if (ds_max_dotclock && clock > ds_max_dotclock)
+ return MODE_CLOCK_HIGH;
+
+ if (clock < min_clock)
+ return MODE_CLOCK_LOW;
- ret = nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
- &clock);
if (out_clock)
*out_clock = clock;
- return ret;
+
+ return MODE_OK;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 22d246acc5e5..42fc5c813a9b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -953,7 +953,7 @@ nouveau_pmops_resume(struct device *dev)
ret = nouveau_do_resume(drm_dev, false);
/* Monitors may have been connected / disconnected during suspend */
- schedule_work(&nouveau_drm(drm_dev)->hpd_work);
+ nouveau_display_hpd_resume(drm_dev);
return ret;
}
@@ -1036,7 +1036,7 @@ nouveau_pmops_runtime_resume(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
/* Monitors may have been connected / disconnected during suspend */
- schedule_work(&nouveau_drm(drm_dev)->hpd_work);
+ nouveau_display_hpd_resume(drm_dev);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index ae76a5865a5a..b8025507a9e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -157,13 +157,15 @@ struct nouveau_drm {
atomic_t validate_sequence;
int (*move)(struct nouveau_channel *,
struct ttm_buffer_object *,
- struct ttm_mem_reg *, struct ttm_mem_reg *);
+ struct ttm_resource *, struct ttm_resource *);
struct nouveau_channel *chan;
struct nvif_object copy;
int mtrr;
int type_vram;
int type_host[2];
int type_ncoh[2];
+ struct mutex io_reserve_mutex;
+ struct list_head io_reserve_lru;
} ttm;
/* GEM interface support */
@@ -198,6 +200,8 @@ struct nouveau_drm {
struct nvbios vbios;
struct nouveau_display *display;
struct work_struct hpd_work;
+ struct mutex hpd_lock;
+ u32 hpd_pending;
struct work_struct fbcon_work;
int fbcon_new_state;
#ifdef CONFIG_ACPI
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index a72c412ac8b1..21937f1c7dd9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -33,6 +33,7 @@
#include <drm/drm_dp_mst_helper.h>
#include "dispnv04/disp.h"
struct nv50_head_atom;
+struct nouveau_connector;
#define NV_DPMS_CLEARED 0x80
@@ -64,6 +65,17 @@ struct nouveau_encoder {
struct nv50_mstm *mstm;
int link_nr;
int link_bw;
+
+ /* Protects DP state that needs to be accessed outside
+ * connector reprobing contexts
+ */
+ struct mutex hpd_irq_lock;
+
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ struct drm_dp_desc desc;
+
+ u8 sink_count;
} dp;
};
@@ -77,6 +89,21 @@ struct nouveau_encoder {
struct nv50_head_atom *, u8 proto, u8 depth);
};
+struct nv50_mstm {
+ struct nouveau_encoder *outp;
+
+ struct drm_dp_mst_topology_mgr mgr;
+
+ /* Protected under nouveau_encoder->dp.hpd_irq_lock */
+ bool can_mst;
+ bool is_mst;
+ bool suspended;
+
+ bool modified;
+ bool disabled;
+ int links;
+};
+
struct nouveau_encoder *
find_encoder(struct drm_connector *connector, int type);
@@ -100,20 +127,29 @@ get_slave_funcs(struct drm_encoder *enc)
/* nouveau_dp.c */
enum nouveau_dp_status {
+ NOUVEAU_DP_NONE,
NOUVEAU_DP_SST,
NOUVEAU_DP_MST,
};
-int nouveau_dp_detect(struct nouveau_encoder *);
+int nouveau_dp_detect(struct nouveau_connector *, struct nouveau_encoder *);
+void nouveau_dp_irq(struct nouveau_drm *drm,
+ struct nouveau_connector *nv_connector);
enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
struct nouveau_encoder *,
const struct drm_display_mode *,
unsigned *clock);
struct nouveau_connector *
-nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
-
-int nv50_mstm_detect(struct nv50_mstm *, u8 dpcd[8], int allow);
-void nv50_mstm_remove(struct nv50_mstm *);
-void nv50_mstm_service(struct nv50_mstm *);
+nv50_outp_get_new_connector(struct nouveau_encoder *outp,
+ struct drm_atomic_state *state);
+struct nouveau_connector *
+nv50_outp_get_old_connector(struct nouveau_encoder *outp,
+ struct drm_atomic_state *state);
+
+int nv50_mstm_detect(struct nouveau_encoder *encoder);
+void nv50_mstm_remove(struct nv50_mstm *mstm);
+bool nv50_mstm_service(struct nouveau_drm *drm,
+ struct nouveau_connector *nv_connector,
+ struct nv50_mstm *mstm);
#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index fad8030ec1f8..24ec5339efb4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -341,7 +341,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
if (ret)
goto out_unref;
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret) {
NV_ERROR(drm, "failed to pin fb: %d\n", ret);
goto out_unref;
@@ -378,8 +378,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = nvbo->bo.mem.bus.base +
- nvbo->bo.mem.bus.offset;
+ info->fix.smem_start = nvbo->bo.mem.bus.offset;
info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT;
info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 81f111ad3f4f..549bc67feabb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -176,20 +176,12 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
- u32 flags = 0;
int ret;
- if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
- flags |= TTM_PL_FLAG_VRAM;
- if (domain & NOUVEAU_GEM_DOMAIN_GART)
- flags |= TTM_PL_FLAG_TT;
- if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
- flags |= TTM_PL_FLAG_SYSTEM;
+ if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
+ domain |= NOUVEAU_GEM_DOMAIN_CPU;
- if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
- flags |= TTM_PL_FLAG_UNCACHED;
-
- nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
+ nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
@@ -198,11 +190,12 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
if (ret) {
- nouveau_bo_ref(NULL, &nvbo);
+ drm_gem_object_release(&nvbo->bo.base);
+ kfree(nvbo);
return ret;
}
- ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL);
+ ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
return ret;
@@ -296,32 +289,28 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
- uint32_t pref_flags = 0, valid_flags = 0;
+ uint32_t pref_domains = 0;;
if (!domains)
return -EINVAL;
- if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
- valid_flags |= TTM_PL_FLAG_VRAM;
-
- if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
- valid_flags |= TTM_PL_FLAG_TT;
+ valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
bo->mem.mem_type == TTM_PL_VRAM)
- pref_flags |= TTM_PL_FLAG_VRAM;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
bo->mem.mem_type == TTM_PL_TT)
- pref_flags |= TTM_PL_FLAG_TT;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
- pref_flags |= TTM_PL_FLAG_VRAM;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else
- pref_flags |= TTM_PL_FLAG_TT;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
- nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
+ nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index e5fae57fffbd..9dfcce1b9846 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -92,7 +92,7 @@ nouveau_mem_fini(struct nouveau_mem *mem)
}
int
-nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
+nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt)
{
struct nouveau_mem *mem = nouveau_mem(reg);
struct nouveau_cli *cli = mem->cli;
@@ -130,7 +130,7 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
}
int
-nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
+nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
{
struct nouveau_mem *mem = nouveau_mem(reg);
struct nouveau_cli *cli = mem->cli;
@@ -173,7 +173,7 @@ nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
}
void
-nouveau_mem_del(struct ttm_mem_reg *reg)
+nouveau_mem_del(struct ttm_resource *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
if (!mem)
@@ -185,7 +185,7 @@ nouveau_mem_del(struct ttm_mem_reg *reg)
int
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
- struct ttm_mem_reg *reg)
+ struct ttm_resource *reg)
{
struct nouveau_mem *mem;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index f6d039e73812..3fe1cfed57a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -7,7 +7,7 @@ struct ttm_dma_tt;
#include <nvif/vmm.h>
static inline struct nouveau_mem *
-nouveau_mem(struct ttm_mem_reg *reg)
+nouveau_mem(struct ttm_resource *reg)
{
return reg->mm_node;
}
@@ -21,10 +21,10 @@ struct nouveau_mem {
};
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
- struct ttm_mem_reg *);
-void nouveau_mem_del(struct ttm_mem_reg *);
-int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page);
-int nouveau_mem_host(struct ttm_mem_reg *, struct ttm_dma_tt *);
+ struct ttm_resource *);
+void nouveau_mem_del(struct ttm_resource *);
+int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
+int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *);
void nouveau_mem_fini(struct nouveau_mem *);
int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index bae6a3eccee0..b2ecb91f8ddc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -32,7 +32,7 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int npages = nvbo->bo.num_pages;
- return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
+ return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
}
void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
@@ -64,14 +64,12 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
struct nouveau_bo *nvbo;
struct dma_resv *robj = attach->dmabuf->resv;
u64 size = attach->dmabuf->size;
- u32 flags = 0;
int align = 0;
int ret;
- flags = TTM_PL_FLAG_TT;
-
dma_resv_lock(robj, NULL);
- nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0);
+ nvbo = nouveau_bo_alloc(&drm->client, &size, &align,
+ NOUVEAU_GEM_DOMAIN_GART, 0, 0);
if (IS_ERR(nvbo)) {
obj = ERR_CAST(nvbo);
goto unlock;
@@ -88,7 +86,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
goto unlock;
}
- ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
+ ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART,
+ sg, robj);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
obj = ERR_PTR(ret);
@@ -108,7 +107,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
int ret;
/* pin buffer into GTT */
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_GART, false);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c3ccf661b7a6..806d9ec310f5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,87 +14,65 @@ struct nouveau_sgdma_be {
struct nouveau_mem *mem;
};
-static void
-nouveau_sgdma_destroy(struct ttm_tt *ttm)
+void
+nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (ttm) {
+ nouveau_sgdma_unbind(bdev, ttm);
+ ttm_tt_destroy_common(bdev, ttm);
ttm_dma_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
}
-static int
-nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
+int
+nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
+ if (nvbe->mem)
+ return 0;
+
ret = nouveau_mem_host(reg, &nvbe->ttm);
if (ret)
return ret;
- ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
- if (ret) {
- nouveau_mem_fini(mem);
- return ret;
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
+ if (ret) {
+ nouveau_mem_fini(mem);
+ return ret;
+ }
}
nvbe->mem = mem;
return 0;
}
-static void
-nv04_sgdma_unbind(struct ttm_tt *ttm)
+void
+nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- nouveau_mem_fini(nvbe->mem);
-}
-
-static struct ttm_backend_func nv04_sgdma_backend = {
- .bind = nv04_sgdma_bind,
- .unbind = nv04_sgdma_unbind,
- .destroy = nouveau_sgdma_destroy
-};
-
-static int
-nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct nouveau_mem *mem = nouveau_mem(reg);
- int ret;
-
- ret = nouveau_mem_host(reg, &nvbe->ttm);
- if (ret)
- return ret;
-
- nvbe->mem = mem;
- return 0;
+ if (nvbe->mem) {
+ nouveau_mem_fini(nvbe->mem);
+ nvbe->mem = NULL;
+ }
}
-static struct ttm_backend_func nv50_sgdma_backend = {
- .bind = nv50_sgdma_bind,
- .unbind = nv04_sgdma_unbind,
- .destroy = nouveau_sgdma_destroy
-};
-
struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
{
- struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe)
return NULL;
- if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
- nvbe->ttm.ttm.func = &nv04_sgdma_backend;
- else
- nvbe->ttm.ttm.func = &nv50_sgdma_backend;
-
if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
kfree(nvbe);
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 2df1c0460559..4f69e4c3dafd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -105,11 +105,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_svm_bind *args = data;
unsigned target, cmd, priority;
- unsigned long addr, end, size;
+ unsigned long addr, end;
struct mm_struct *mm;
args->va_start &= PAGE_MASK;
- args->va_end &= PAGE_MASK;
+ args->va_end = ALIGN(args->va_end, PAGE_SIZE);
/* Sanity check arguments */
if (args->reserved0 || args->reserved1)
@@ -118,8 +118,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
return -EINVAL;
if (args->va_start >= args->va_end)
return -EINVAL;
- if (!args->npages)
- return -EINVAL;
cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
@@ -151,12 +149,6 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
if (args->stride)
return -EINVAL;
- size = ((unsigned long)args->npages) << PAGE_SHIFT;
- if ((args->va_start + size) <= args->va_start)
- return -EINVAL;
- if ((args->va_start + size) > args->va_end)
- return -EINVAL;
-
/*
* Ok we are ask to do something sane, for now we only support migrate
* commands but we will add things like memory policy (what to do on
@@ -171,7 +163,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
return -EINVAL;
}
- for (addr = args->va_start, end = args->va_start + size; addr < end;) {
+ for (addr = args->va_start, end = args->va_end; addr < end;) {
struct vm_area_struct *vma;
unsigned long next;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index e89ea052cf71..427341753441 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -31,35 +31,17 @@
#include <core/tegra.h>
-static int
-nouveau_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
- return 0;
-}
-
-static int
-nouveau_manager_fini(struct ttm_mem_type_manager *man)
-{
- return 0;
-}
-
static void
-nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
+nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
{
nouveau_mem_del(reg);
}
-static void
-nouveau_manager_debug(struct ttm_mem_type_manager *man,
- struct drm_printer *printer)
-{
-}
-
static int
-nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+nouveau_vram_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_mem_reg *reg)
+ struct ttm_resource *reg)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -81,19 +63,16 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
return 0;
}
-const struct ttm_mem_type_manager_func nouveau_vram_manager = {
- .init = nouveau_manager_init,
- .takedown = nouveau_manager_fini,
- .get_node = nouveau_vram_manager_new,
- .put_node = nouveau_manager_del,
- .debug = nouveau_manager_debug,
+const struct ttm_resource_manager_func nouveau_vram_manager = {
+ .alloc = nouveau_vram_manager_new,
+ .free = nouveau_manager_del,
};
static int
-nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+nouveau_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_mem_reg *reg)
+ struct ttm_resource *reg)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -107,19 +86,16 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
return 0;
}
-const struct ttm_mem_type_manager_func nouveau_gart_manager = {
- .init = nouveau_manager_init,
- .takedown = nouveau_manager_fini,
- .get_node = nouveau_gart_manager_new,
- .put_node = nouveau_manager_del,
- .debug = nouveau_manager_debug
+const struct ttm_resource_manager_func nouveau_gart_manager = {
+ .alloc = nouveau_gart_manager_new,
+ .free = nouveau_manager_del,
};
static int
-nv04_gart_manager_new(struct ttm_mem_type_manager *man,
+nv04_gart_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_mem_reg *reg)
+ struct ttm_resource *reg)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -142,12 +118,41 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
return 0;
}
-const struct ttm_mem_type_manager_func nv04_gart_manager = {
- .init = nouveau_manager_init,
- .takedown = nouveau_manager_fini,
- .get_node = nv04_gart_manager_new,
- .put_node = nouveau_manager_del,
- .debug = nouveau_manager_debug
+const struct ttm_resource_manager_func nv04_gart_manager = {
+ .alloc = nv04_gart_manager_new,
+ .free = nouveau_manager_del,
+};
+
+static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ pgprot_t prot;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ nouveau_bo_del_io_reserve_lru(bo);
+
+ prot = vm_get_page_prot(vma->vm_flags);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+ nouveau_bo_add_io_reserve_lru(bo);
+
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
+
+static struct vm_operations_struct nouveau_ttm_vm_ops = {
+ .fault = nouveau_ttm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
};
int
@@ -155,8 +160,14 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
+ int ret;
- return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
+ ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
+ if (ret)
+ return ret;
+
+ vma->vm_ops = &nouveau_ttm_vm_ops;
+ return 0;
}
static int
@@ -180,6 +191,87 @@ nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
return 0;
}
+static int
+nouveau_ttm_init_vram(struct nouveau_drm *drm)
+{
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL);
+
+ if (!man)
+ return -ENOMEM;
+
+ man->func = &nouveau_vram_manager;
+
+ ttm_resource_manager_init(man,
+ drm->gem.vram_available >> PAGE_SHIFT);
+ ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man);
+ ttm_resource_manager_set_used(man, true);
+ return 0;
+ } else {
+ return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false,
+ drm->gem.vram_available >> PAGE_SHIFT);
+ }
+}
+
+static void
+nouveau_ttm_fini_vram(struct nouveau_drm *drm)
+{
+ struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
+
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ttm_resource_manager_set_used(man, false);
+ ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man);
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL);
+ kfree(man);
+ } else
+ ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM);
+}
+
+static int
+nouveau_ttm_init_gtt(struct nouveau_drm *drm)
+{
+ struct ttm_resource_manager *man;
+ unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT;
+ const struct ttm_resource_manager_func *func = NULL;
+
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
+ func = &nouveau_gart_manager;
+ else if (!drm->agp.bridge)
+ func = &nv04_gart_manager;
+ else
+ return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true,
+ size_pages);
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (!man)
+ return -ENOMEM;
+
+ man->func = func;
+ man->use_tt = true;
+ ttm_resource_manager_init(man, size_pages);
+ ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
+ ttm_resource_manager_set_used(man, true);
+ return 0;
+}
+
+static void
+nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
+{
+ struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT);
+
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
+ drm->agp.bridge)
+ ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
+ else {
+ ttm_resource_manager_set_used(man, false);
+ ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man);
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL);
+ kfree(man);
+ }
+}
+
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
@@ -237,8 +329,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
device->func->resource_size(device, 1));
- ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
- drm->gem.vram_available >> PAGE_SHIFT);
+ ret = nouveau_ttm_init_vram(drm);
if (ret) {
NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
return ret;
@@ -254,13 +345,15 @@ nouveau_ttm_init(struct nouveau_drm *drm)
drm->gem.gart_available = drm->agp.size;
}
- ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
- drm->gem.gart_available >> PAGE_SHIFT);
+ ret = nouveau_ttm_init_gtt(drm);
if (ret) {
NV_ERROR(drm, "GART mm init failed, %d\n", ret);
return ret;
}
+ mutex_init(&drm->ttm.io_reserve_mutex);
+ INIT_LIST_HEAD(&drm->ttm.io_reserve_lru);
+
NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
return 0;
@@ -271,8 +364,8 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
{
struct nvkm_device *device = nvxx_device(&drm->client.device);
- ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
- ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
+ nouveau_ttm_fini_vram(drm);
+ nouveau_ttm_fini_gtt(drm);
ttm_bo_device_release(&drm->ttm.bdev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
index 085280754b3e..69552049bb96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -8,9 +8,9 @@ nouveau_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct nouveau_drm, ttm.bdev);
}
-extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
-extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
-extern const struct ttm_mem_type_manager_func nv04_gart_manager;
+extern const struct ttm_resource_manager_func nouveau_vram_manager;
+extern const struct ttm_resource_manager_func nouveau_gart_manager;
+extern const struct ttm_resource_manager_func nv04_gart_manager;
struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo,
u32 page_flags);
@@ -22,4 +22,7 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
int nouveau_ttm_global_init(struct nouveau_drm *);
void nouveau_ttm_global_release(struct nouveau_drm *);
+int nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg);
+void nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+void nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index cd1e87a528a4..1253fdec712d 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -78,7 +78,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
- struct ttm_mem_reg *reg = &priv->bo->bo.mem;
+ struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE;
u32 limit = start + reg->size - 1;
int ret = 0;
@@ -130,10 +130,11 @@ nv17_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
+ NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index ebb740686b44..447238e3cbe7 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -37,7 +37,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
{
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
- struct ttm_mem_reg *reg = &priv->bo->bo.mem;
+ struct ttm_resource *reg = &priv->bo->bo.mem;
u32 start = reg->start * PAGE_SIZE;
u32 limit = start + reg->size - 1;
int ret;
@@ -81,10 +81,11 @@ nv50_fence_create(struct nouveau_drm *drm)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
+ NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 7ed36b3a6b7d..7c9c928c3196 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -209,12 +209,13 @@ nv84_fence_create(struct nouveau_drm *drm)
mutex_init(&priv->mutex);
/* Use VRAM if there is any ; otherwise fallback to system memory */
- domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
- /*
- * fences created in sysmem must be non-cached or we
- * will lose CPU/GPU coherency!
- */
- TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
+ domain = drm->client.device.info.ram_size != 0 ?
+ NOUVEAU_GEM_DOMAIN_VRAM :
+ /*
+ * fences created in sysmem must be non-cached or we
+ * will lose CPU/GPU coherency!
+ */
+ NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0,
domain, 0, 0, NULL, NULL, &priv->bo);
if (ret == 0) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index dcb70677d0ac..7851bec5f0e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2924,17 +2924,34 @@ nvkm_device_del(struct nvkm_device **pdevice)
}
}
+/* returns true if the GPU is in the CPU native byte order */
static inline bool
nvkm_device_endianness(struct nvkm_device *device)
{
- u32 boot1 = nvkm_rd32(device, 0x000004) & 0x01000001;
#ifdef __BIG_ENDIAN
- if (!boot1)
- return false;
+ const bool big_endian = true;
#else
- if (boot1)
- return false;
+ const bool big_endian = false;
#endif
+
+ /* Read NV_PMC_BOOT_1, and assume non-functional endian switch if it
+ * doesn't contain the expected values.
+ */
+ u32 pmc_boot_1 = nvkm_rd32(device, 0x000004);
+ if (pmc_boot_1 && pmc_boot_1 != 0x01000001)
+ return !big_endian; /* Assume GPU is LE in this case. */
+
+ /* 0 means LE and 0x01000001 means BE GPU. Condition is true when
+ * GPU/CPU endianness don't match.
+ */
+ if (big_endian == !pmc_boot_1) {
+ nvkm_wr32(device, 0x000004, 0x01000001);
+ nvkm_rd32(device, 0x000000);
+ if (nvkm_rd32(device, 0x000004) != (big_endian ? 0x01000001 : 0x00000000))
+ return !big_endian; /* Assume GPU is LE on any unexpected read-back. */
+ }
+
+ /* CPU/GPU endianness should (hopefully) match. */
return true;
}
@@ -2987,14 +3004,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
if (detect) {
/* switch mmio to cpu's native endianness */
if (!nvkm_device_endianness(device)) {
- nvkm_wr32(device, 0x000004, 0x01000001);
- nvkm_rd32(device, 0x000000);
- if (!nvkm_device_endianness(device)) {
- nvdev_error(device,
- "GPU not supported on big-endian\n");
- ret = -ENOSYS;
- goto done;
- }
+ nvdev_error(device,
+ "Couldn't switch GPU to CPUs endianess\n");
+ ret = -ENOSYS;
+ goto done;
}
boot0 = nvkm_rd32(device, 0x000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 985f2990ab0d..13d4d7ac0697 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -594,8 +594,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
nvkm_info(&imem->base.subdev, "using IOMMU\n");
} else {
- imem->attrs = DMA_ATTR_NON_CONSISTENT |
- DMA_ATTR_WEAK_ORDERING |
+ imem->attrs = DMA_ATTR_WEAK_ORDERING |
DMA_ATTR_WRITE_COMBINE;
nvkm_info(&imem->base.subdev, "using DMA API\n");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 2578c95570f6..a14fbf06cb30 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -20,7 +20,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <linux/of.h>
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 4d4c1fabd0a1..b738d9750686 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -24,7 +24,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <linux/of.h>
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index bd12eae0cb31..5c027c81760f 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -781,7 +781,7 @@ static int venc_probe_of(struct venc_device *venc)
venc->type = OMAP_DSS_VENC_TYPE_SVIDEO;
break;
default:
- dev_err(&venc->pdev->dev, "bad channel propert '%d'\n",
+ dev_err(&venc->pdev->dev, "bad channel property '%d'\n",
channels);
r = -EINVAL;
goto err;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 4526967978b7..53d5e184ee77 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -349,13 +349,6 @@ static int omap_modeset_init(struct drm_device *dev)
drm_connector_attach_encoder(pipe->connector, encoder);
- if (pipe->output->panel) {
- ret = drm_panel_attach(pipe->output->panel,
- pipe->connector);
- if (ret < 0)
- return ret;
- }
-
crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
@@ -394,18 +387,8 @@ static int omap_modeset_init(struct drm_device *dev)
static void omap_modeset_fini(struct drm_device *ddev)
{
- struct omap_drm_private *priv = ddev->dev_private;
- unsigned int i;
-
omap_drm_irq_uninstall(ddev);
- for (i = 0; i < priv->num_pipes; i++) {
- struct omap_drm_pipeline *pipe = &priv->pipes[i];
-
- if (pipe->output->panel)
- drm_panel_detach(pipe->output->panel);
- }
-
drm_mode_config_cleanup(ddev);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index d0d12d5dd76c..f67f223c6479 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1297,10 +1297,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
omap_obj->dma_addr = sg_dma_address(sgt->sgl);
} else {
/* Create pages list from sgt */
- struct sg_page_iter iter;
struct page **pages;
unsigned int npages;
- unsigned int i = 0;
+ unsigned int ret;
npages = DIV_ROUND_UP(size, PAGE_SIZE);
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
@@ -1311,14 +1310,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
}
omap_obj->pages = pages;
-
- for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
- pages[i++] = sg_page_iter_page(&iter);
- if (i > npages)
- break;
- }
-
- if (WARN_ON(i != npages)) {
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL,
+ npages);
+ if (ret) {
omap_gem_free_object(obj);
obj = ERR_PTR(-ENOMEM);
goto done;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index de2f2a452be5..b9dbedf8f15e 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -217,6 +217,17 @@ config DRM_PANEL_NOVATEK_NT39016
Say Y here if you want to enable support for the panels built
around the Novatek NT39016 display controller.
+config DRM_PANEL_MANTIX_MLAF057WE51
+ tristate "Mantix MLAF057WE51-X MIPI-DSI LCD panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Mantix
+ MLAF057WE51-X MIPI DSI panel as e.g. used in the Librem 5. It
+ has a resolution of 720x1440 pixels, a built in backlight and touch
+ controller.
+
config DRM_PANEL_OLIMEX_LCD_OLINUXINO
tristate "Olimex LCD-OLinuXino panel"
depends on OF
@@ -313,13 +324,30 @@ config DRM_PANEL_SAMSUNG_S6E63J0X03
select VIDEOMODE_HELPERS
config DRM_PANEL_SAMSUNG_S6E63M0
- tristate "Samsung S6E63M0 RGB/SPI panel"
+ tristate "Samsung S6E63M0 RGB panel"
depends on OF
- depends on SPI
depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for Samsung S6E63M0
- AMOLED LCD panel.
+ AMOLED LCD panel. This panel can be accessed using SPI or
+ DSI.
+
+config DRM_PANEL_SAMSUNG_S6E63M0_SPI
+ tristate "Samsung S6E63M0 RGB SPI interface"
+ depends on SPI
+ depends on DRM_PANEL_SAMSUNG_S6E63M0
+ default DRM_PANEL_SAMSUNG_S6E63M0
+ help
+ Say Y here if you want to be able to access the Samsung
+ S6E63M0 panel using SPI.
+
+config DRM_PANEL_SAMSUNG_S6E63M0_DSI
+ tristate "Samsung S6E63M0 RGB DSI interface"
+ depends on DRM_MIPI_DSI
+ depends on DRM_PANEL_SAMSUNG_S6E63M0
+ help
+ Say Y here if you want to be able to access the Samsung
+ S6E63M0 panel using DSI.
config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index e45ceac6286f..2ba560bca61d 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
+obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS) += panel-osd-osd101t2587-53ts.o
@@ -33,6 +34,8 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_SPI) += panel-samsung-s6e63m0-spi.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 47b37fef7ee8..abb0788843c6 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -349,7 +349,9 @@ static int versatile_panel_probe(struct platform_device *pdev)
drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
- return drm_panel_add(&vpanel->panel);
+ drm_panel_add(&vpanel->panel);
+
+ return 0;
}
static const struct of_device_id versatile_panel_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index 9a5b7644d756..e95bc9f60b3f 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -315,11 +315,7 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
return ret;
}
- ret = drm_panel_add(&ctx->panel);
- if (ret < 0) {
- dev_err(dev, "Failed to add panel: %d\n", ret);
- return ret;
- }
+ drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
index 7c27bd5e3486..42854bd37fd5 100644
--- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -19,7 +19,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#include <video/mipi_display.h>
@@ -93,8 +92,7 @@ static int boe_panel_disable(struct drm_panel *panel)
err = mipi_dsi_dcs_set_display_off(pinfo->link);
if (err < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
- err);
+ dev_err(panel->dev, "failed to set display off: %d\n", err);
return err;
}
@@ -113,13 +111,11 @@ static int boe_panel_unprepare(struct drm_panel *panel)
err = mipi_dsi_dcs_set_display_off(pinfo->link);
if (err < 0)
- DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
- err);
+ dev_err(panel->dev, "failed to set display off: %d\n", err);
err = mipi_dsi_dcs_enter_sleep_mode(pinfo->link);
if (err < 0)
- DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
- err);
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
/* sleep_mode_delay: 1ms - 2ms */
usleep_range(1000, 2000);
@@ -163,15 +159,13 @@ static int boe_panel_prepare(struct drm_panel *panel)
/* send init code */
err = send_mipi_cmds(panel, pinfo->desc->on_cmds);
if (err < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to send DCS Init Code: %d\n",
- err);
+ dev_err(panel->dev, "failed to send DCS Init Code: %d\n", err);
goto poweroff;
}
err = mipi_dsi_dcs_exit_sleep_mode(pinfo->link);
if (err < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
- err);
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
goto poweroff;
}
@@ -180,8 +174,7 @@ static int boe_panel_prepare(struct drm_panel *panel)
err = mipi_dsi_dcs_set_display_on(pinfo->link);
if (err < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
- err);
+ dev_err(panel->dev, "failed to set display on: %d\n", err);
goto poweroff;
}
@@ -209,8 +202,7 @@ static int boe_panel_enable(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_on(pinfo->link);
if (ret < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
- ret);
+ dev_err(panel->dev, "failed to set display on: %d\n", ret);
return ret;
}
@@ -228,8 +220,8 @@ static int boe_panel_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
- DRM_DEV_ERROR(pinfo->base.dev, "failed to add mode %ux%u@%u\n",
- m->hdisplay, m->vdisplay, drm_mode_vrefresh(m));
+ dev_err(pinfo->base.dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, drm_mode_vrefresh(m));
return -ENOMEM;
}
@@ -865,8 +857,7 @@ static int panel_add(struct panel_info *pinfo)
if (IS_ERR(pinfo->pp18_gpio)) {
ret = PTR_ERR(pinfo->pp18_gpio);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev, "failed to get pp18 gpio: %d\n",
- ret);
+ dev_err(dev, "failed to get pp18 gpio: %d\n", ret);
return ret;
}
@@ -874,8 +865,7 @@ static int panel_add(struct panel_info *pinfo)
if (IS_ERR(pinfo->pp33_gpio)) {
ret = PTR_ERR(pinfo->pp33_gpio);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev, "failed to get pp33 gpio: %d\n",
- ret);
+ dev_err(dev, "failed to get pp33 gpio: %d\n", ret);
return ret;
}
@@ -883,8 +873,7 @@ static int panel_add(struct panel_info *pinfo)
if (IS_ERR(pinfo->enable_gpio)) {
ret = PTR_ERR(pinfo->enable_gpio);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev, "failed to get enable gpio: %d\n",
- ret);
+ dev_err(dev, "failed to get enable gpio: %d\n", ret);
return ret;
}
@@ -895,7 +884,9 @@ static int panel_add(struct panel_info *pinfo)
if (ret)
return ret;
- return drm_panel_add(&pinfo->base);
+ drm_panel_add(&pinfo->base);
+
+ return 0;
}
static int panel_probe(struct mipi_dsi_device *dsi)
@@ -935,18 +926,15 @@ static int panel_remove(struct mipi_dsi_device *dsi)
err = boe_panel_disable(&pinfo->base);
if (err < 0)
- DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n",
- err);
+ dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
err = boe_panel_unprepare(&pinfo->base);
if (err < 0)
- DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
- err);
+ dev_err(&dsi->dev, "failed to unprepare panel: %d\n", err);
err = mipi_dsi_detach(dsi);
if (err < 0)
- DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
- err);
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
drm_panel_remove(&pinfo->base);
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index e320aa30b9ae..db9d0b86d542 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -11,6 +11,7 @@
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
@@ -43,6 +44,7 @@ struct boe_panel {
const struct panel_desc *desc;
+ enum drm_panel_orientation orientation;
struct regulator *pp1800;
struct regulator *avee;
struct regulator *avdd;
@@ -740,6 +742,7 @@ static int boe_panel_get_modes(struct drm_panel *panel,
connector->display_info.width_mm = boe->desc->size.width_mm;
connector->display_info.height_mm = boe->desc->size.height_mm;
connector->display_info.bpc = boe->desc->bpc;
+ drm_connector_set_panel_orientation(connector, boe->orientation);
return 1;
}
@@ -779,6 +782,11 @@ static int boe_panel_add(struct boe_panel *boe)
drm_panel_init(&boe->base, dev, &boe_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
+ err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation);
+ if (err < 0) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
+ return err;
+ }
err = drm_panel_of_backlight(&boe->base);
if (err)
@@ -787,7 +795,9 @@ static int boe_panel_add(struct boe_panel *boe)
boe->base.funcs = &boe_panel_funcs;
boe->base.dev = &boe->dsi->dev;
- return drm_panel_add(&boe->base);
+ drm_panel_add(&boe->base);
+
+ return 0;
}
static int boe_panel_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index e9675514d77b..bc36aa3c1123 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -22,7 +22,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
/* Manufacturer specific Commands send via DSI */
#define KD35T133_CMD_INTERFACEMODECTRL 0xb0
@@ -89,7 +88,7 @@ static int kd35t133_init_sequence(struct kd35t133 *ctx)
0xa9, 0x51, 0x2c, 0x82);
mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0);
- DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+ dev_dbg(dev, "Panel init sequence done\n");
return 0;
}
@@ -104,13 +103,11 @@ static int kd35t133_unprepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
- DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
- ret);
+ dev_err(ctx->dev, "failed to set display off: %d\n", ret);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
- ret);
+ dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret);
return ret;
}
@@ -131,18 +128,16 @@ static int kd35t133_prepare(struct drm_panel *panel)
if (ctx->prepared)
return 0;
- DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ dev_dbg(ctx->dev, "Resetting the panel\n");
ret = regulator_enable(ctx->vdd);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "Failed to enable vdd supply: %d\n", ret);
+ dev_err(ctx->dev, "Failed to enable vdd supply: %d\n", ret);
return ret;
}
ret = regulator_enable(ctx->iovcc);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "Failed to enable iovcc supply: %d\n", ret);
+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
goto disable_vdd;
}
@@ -156,7 +151,7 @@ static int kd35t133_prepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
goto disable_iovcc;
}
@@ -164,14 +159,13 @@ static int kd35t133_prepare(struct drm_panel *panel)
ret = kd35t133_init_sequence(ctx);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
- ret);
+ dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
goto disable_iovcc;
}
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ dev_err(ctx->dev, "Failed to set display on: %d\n", ret);
goto disable_iovcc;
}
@@ -210,9 +204,9 @@ static int kd35t133_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
@@ -244,7 +238,7 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
- DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ dev_err(dev, "cannot get reset gpio\n");
return PTR_ERR(ctx->reset_gpio);
}
@@ -252,9 +246,7 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->vdd)) {
ret = PTR_ERR(ctx->vdd);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev,
- "Failed to request vdd regulator: %d\n",
- ret);
+ dev_err(dev, "Failed to request vdd regulator: %d\n", ret);
return ret;
}
@@ -262,9 +254,7 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->iovcc)) {
ret = PTR_ERR(ctx->iovcc);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev,
- "Failed to request iovcc regulator: %d\n",
- ret);
+ dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
return ret;
}
@@ -288,7 +278,7 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
@@ -303,13 +293,11 @@ static void kd35t133_shutdown(struct mipi_dsi_device *dsi)
ret = drm_panel_unprepare(&ctx->panel);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
ret = drm_panel_disable(&ctx->panel);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
static int kd35t133_remove(struct mipi_dsi_device *dsi)
@@ -321,8 +309,7 @@ static int kd35t133_remove(struct mipi_dsi_device *dsi)
ret = mipi_dsi_detach(dsi);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
index 54610651ecdb..2a602aee61c3 100644
--- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -13,7 +13,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#define K101_IM2BA02_INIT_CMD_LEN 2
@@ -374,13 +373,11 @@ static int k101_im2ba02_unprepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
if (ret < 0)
- DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
- ret);
+ dev_err(panel->dev, "failed to set display off: %d\n", ret);
ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
if (ret < 0)
- DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
- ret);
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret);
msleep(200);
@@ -416,10 +413,10 @@ static int k101_im2ba02_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &k101_im2ba02_default_mode);
if (!mode) {
- DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
- k101_im2ba02_default_mode.hdisplay,
- k101_im2ba02_default_mode.vdisplay,
- drm_mode_vrefresh(&k101_im2ba02_default_mode));
+ dev_err(&ctx->dsi->dev, "failed to add mode %ux%u@%u\n",
+ k101_im2ba02_default_mode.hdisplay,
+ k101_im2ba02_default_mode.vdisplay,
+ drm_mode_vrefresh(&k101_im2ba02_default_mode));
return -ENOMEM;
}
@@ -460,13 +457,13 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get regulators\n");
+ dev_err(&dsi->dev, "Couldn't get regulators\n");
return ret;
}
ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset)) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
return PTR_ERR(ctx->reset);
}
@@ -477,9 +474,7 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- ret = drm_panel_add(&ctx->panel);
- if (ret < 0)
- return ret;
+ drm_panel_add(&ctx->panel);
dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
dsi->format = MIPI_DSI_FMT_RGB888;
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index 19a6274b10f5..581661b506f8 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -7,7 +7,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
@@ -118,13 +117,11 @@ static int feiyang_unprepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
if (ret < 0)
- DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
- ret);
+ dev_err(panel->dev, "failed to set display off: %d\n", ret);
ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
if (ret < 0)
- DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
- ret);
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret);
/* T13 (backlight fall + video & logic signal fall) T13 >= 200ms */
msleep(200);
@@ -165,10 +162,10 @@ static int feiyang_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &feiyang_default_mode);
if (!mode) {
- DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
- feiyang_default_mode.hdisplay,
- feiyang_default_mode.vdisplay,
- drm_mode_vrefresh(&feiyang_default_mode));
+ dev_err(&ctx->dsi->dev, "failed to add mode %ux%u@%u\n",
+ feiyang_default_mode.hdisplay,
+ feiyang_default_mode.vdisplay,
+ drm_mode_vrefresh(&feiyang_default_mode));
return -ENOMEM;
}
@@ -204,19 +201,19 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd)) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get dvdd regulator\n");
+ dev_err(&dsi->dev, "Couldn't get dvdd regulator\n");
return PTR_ERR(ctx->dvdd);
}
ctx->avdd = devm_regulator_get(&dsi->dev, "avdd");
if (IS_ERR(ctx->avdd)) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get avdd regulator\n");
+ dev_err(&dsi->dev, "Couldn't get avdd regulator\n");
return PTR_ERR(ctx->avdd);
}
ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset)) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
return PTR_ERR(ctx->reset);
}
@@ -224,9 +221,7 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- ret = drm_panel_add(&ctx->panel);
- if (ret < 0)
- return ret;
+ drm_panel_add(&ctx->panel);
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST;
dsi->format = MIPI_DSI_FMT_RGB888;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 67a64d1999f6..074e18559b9f 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -33,7 +33,6 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#define ILI9322_CHIP_ID 0x00
#define ILI9322_CHIP_ID_MAGIC 0x96
@@ -683,7 +682,7 @@ static int ili9322_get_modes(struct drm_panel *panel,
break;
}
if (!mode) {
- DRM_ERROR("bad mode or failed to add mode\n");
+ dev_err(panel->dev, "bad mode or failed to add mode\n");
return -EINVAL;
}
drm_mode_set_name(mode);
@@ -892,7 +891,9 @@ static int ili9322_probe(struct spi_device *spi)
drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
- return drm_panel_add(&ili->panel);
+ drm_panel_add(&ili->panel);
+
+ return 0;
}
static int ili9322_remove(struct spi_device *spi)
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 3ed8635a6fbd..0145129d7c66 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -10,6 +10,7 @@
#include <linux/fb.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
@@ -20,14 +21,6 @@
#include <video/mipi_display.h>
-struct ili9881c {
- struct drm_panel panel;
- struct mipi_dsi_device *dsi;
-
- struct regulator *power;
- struct gpio_desc *reset;
-};
-
enum ili9881c_op {
ILI9881C_SWITCH_PAGE,
ILI9881C_COMMAND,
@@ -45,6 +38,21 @@ struct ili9881c_instr {
} arg;
};
+struct ili9881c_desc {
+ const struct ili9881c_instr *init;
+ const size_t init_length;
+ const struct drm_display_mode *mode;
+};
+
+struct ili9881c {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ const struct ili9881c_desc *desc;
+
+ struct regulator *power;
+ struct gpio_desc *reset;
+};
+
#define ILI9881C_SWITCH_PAGE_INSTR(_page) \
{ \
.op = ILI9881C_SWITCH_PAGE, \
@@ -64,7 +72,7 @@ struct ili9881c_instr {
}, \
}
-static const struct ili9881c_instr ili9881c_init[] = {
+static const struct ili9881c_instr lhr050h41_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(3),
ILI9881C_COMMAND_INSTR(0x01, 0x00),
ILI9881C_COMMAND_INSTR(0x02, 0x00),
@@ -252,6 +260,199 @@ static const struct ili9881c_instr ili9881c_init[] = {
ILI9881C_COMMAND_INSTR(0xD3, 0x3F),
};
+static const struct ili9881c_instr k101_im2byl02_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0x00),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x08),
+ ILI9881C_COMMAND_INSTR(0x07, 0x00),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0A, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0B, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0C, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0D, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0E, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x10, 0x00),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1A, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1B, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1C, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1D, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1E, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1F, 0xC0),
+ ILI9881C_COMMAND_INSTR(0x20, 0x06),
+ ILI9881C_COMMAND_INSTR(0x21, 0x01),
+ ILI9881C_COMMAND_INSTR(0x22, 0x06),
+ ILI9881C_COMMAND_INSTR(0x23, 0x01),
+ ILI9881C_COMMAND_INSTR(0x24, 0x88),
+ ILI9881C_COMMAND_INSTR(0x25, 0x88),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x3B),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2A, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2B, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2C, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2D, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2E, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x00), /* GPWR1/2 non overlap time 2.62us */
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x00),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3A, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3B, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3C, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3D, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3E, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xAB),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5A, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5B, 0xAB),
+ ILI9881C_COMMAND_INSTR(0x5C, 0xCD),
+ ILI9881C_COMMAND_INSTR(0x5D, 0xEF),
+ ILI9881C_COMMAND_INSTR(0x5E, 0x00),
+ ILI9881C_COMMAND_INSTR(0x5F, 0x01),
+ ILI9881C_COMMAND_INSTR(0x60, 0x01),
+ ILI9881C_COMMAND_INSTR(0x61, 0x06),
+ ILI9881C_COMMAND_INSTR(0x62, 0x06),
+ ILI9881C_COMMAND_INSTR(0x63, 0x07),
+ ILI9881C_COMMAND_INSTR(0x64, 0x07),
+ ILI9881C_COMMAND_INSTR(0x65, 0x00),
+ ILI9881C_COMMAND_INSTR(0x66, 0x00),
+ ILI9881C_COMMAND_INSTR(0x67, 0x02),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x05),
+ ILI9881C_COMMAND_INSTR(0x6A, 0x05),
+ ILI9881C_COMMAND_INSTR(0x6B, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6C, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x6D, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x6E, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x6F, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x70, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x71, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x72, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x73, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x01),
+ ILI9881C_COMMAND_INSTR(0x76, 0x01),
+ ILI9881C_COMMAND_INSTR(0x77, 0x06),
+ ILI9881C_COMMAND_INSTR(0x78, 0x06),
+ ILI9881C_COMMAND_INSTR(0x79, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7A, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7B, 0x00),
+ ILI9881C_COMMAND_INSTR(0x7C, 0x00),
+ ILI9881C_COMMAND_INSTR(0x7D, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7E, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7F, 0x05),
+ ILI9881C_COMMAND_INSTR(0x80, 0x05),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x83, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x84, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x85, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x86, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x87, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x88, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x89, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x3B, 0xC0), /* ILI4003D sel */
+ ILI9881C_COMMAND_INSTR(0x6C, 0x15), /* Set VCORE voltage = 1.5V */
+ ILI9881C_COMMAND_INSTR(0x6E, 0x2A), /* di_pwr_reg=0 for power mode 2A, VGH clamp 18V */
+ ILI9881C_COMMAND_INSTR(0x6F, 0x33), /* pumping ratio VGH=5x VGL=-3x */
+ ILI9881C_COMMAND_INSTR(0x8D, 0x1B), /* VGL clamp -10V */
+ ILI9881C_COMMAND_INSTR(0x87, 0xBA), /* ESD */
+ ILI9881C_COMMAND_INSTR(0x3A, 0x24), /* POWER SAVING */
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A), /* BGR, SS */
+ ILI9881C_COMMAND_INSTR(0x31, 0x00), /* Zigzag type3 inversion */
+ ILI9881C_COMMAND_INSTR(0x40, 0x53), /* ILI4003D sel */
+ ILI9881C_COMMAND_INSTR(0x43, 0x66),
+ ILI9881C_COMMAND_INSTR(0x53, 0x4C),
+ ILI9881C_COMMAND_INSTR(0x50, 0x87),
+ ILI9881C_COMMAND_INSTR(0x51, 0x82),
+ ILI9881C_COMMAND_INSTR(0x60, 0x15),
+ ILI9881C_COMMAND_INSTR(0x61, 0x01),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x63, 0x00),
+ ILI9881C_COMMAND_INSTR(0xA0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x13), /* VP251 */
+ ILI9881C_COMMAND_INSTR(0xA2, 0x23), /* VP247 */
+ ILI9881C_COMMAND_INSTR(0xA3, 0x14), /* VP243 */
+ ILI9881C_COMMAND_INSTR(0xA4, 0x16), /* VP239 */
+ ILI9881C_COMMAND_INSTR(0xA5, 0x29), /* VP231 */
+ ILI9881C_COMMAND_INSTR(0xA6, 0x1E), /* VP219 */
+ ILI9881C_COMMAND_INSTR(0xA7, 0x1D), /* VP203 */
+ ILI9881C_COMMAND_INSTR(0xA8, 0x86), /* VP175 */
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1E), /* VP144 */
+ ILI9881C_COMMAND_INSTR(0xAA, 0x29), /* VP111 */
+ ILI9881C_COMMAND_INSTR(0xAB, 0x74), /* VP80 */
+ ILI9881C_COMMAND_INSTR(0xAC, 0x19), /* VP52 */
+ ILI9881C_COMMAND_INSTR(0xAD, 0x17), /* VP36 */
+ ILI9881C_COMMAND_INSTR(0xAE, 0x4B), /* VP24 */
+ ILI9881C_COMMAND_INSTR(0xAF, 0x20), /* VP16 */
+ ILI9881C_COMMAND_INSTR(0xB0, 0x26), /* VP12 */
+ ILI9881C_COMMAND_INSTR(0xB1, 0x4C), /* VP8 */
+ ILI9881C_COMMAND_INSTR(0xB2, 0x5D), /* VP4 */
+ ILI9881C_COMMAND_INSTR(0xB3, 0x3F), /* VP0 */
+ ILI9881C_COMMAND_INSTR(0xC0, 0x00), /* VN255 GAMMA N */
+ ILI9881C_COMMAND_INSTR(0xC1, 0x13), /* VN251 */
+ ILI9881C_COMMAND_INSTR(0xC2, 0x23), /* VN247 */
+ ILI9881C_COMMAND_INSTR(0xC3, 0x14), /* VN243 */
+ ILI9881C_COMMAND_INSTR(0xC4, 0x16), /* VN239 */
+ ILI9881C_COMMAND_INSTR(0xC5, 0x29), /* VN231 */
+ ILI9881C_COMMAND_INSTR(0xC6, 0x1E), /* VN219 */
+ ILI9881C_COMMAND_INSTR(0xC7, 0x1D), /* VN203 */
+ ILI9881C_COMMAND_INSTR(0xC8, 0x86), /* VN175 */
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1E), /* VN144 */
+ ILI9881C_COMMAND_INSTR(0xCA, 0x29), /* VN111 */
+ ILI9881C_COMMAND_INSTR(0xCB, 0x74), /* VN80 */
+ ILI9881C_COMMAND_INSTR(0xCC, 0x19), /* VN52 */
+ ILI9881C_COMMAND_INSTR(0xCD, 0x17), /* VN36 */
+ ILI9881C_COMMAND_INSTR(0xCE, 0x4B), /* VN24 */
+ ILI9881C_COMMAND_INSTR(0xCF, 0x20), /* VN16 */
+ ILI9881C_COMMAND_INSTR(0xD0, 0x26), /* VN12 */
+ ILI9881C_COMMAND_INSTR(0xD1, 0x4C), /* VN8 */
+ ILI9881C_COMMAND_INSTR(0xD2, 0x5D), /* VN4 */
+ ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */
+};
+
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
{
return container_of(panel, struct ili9881c, panel);
@@ -311,8 +512,8 @@ static int ili9881c_prepare(struct drm_panel *panel)
gpiod_set_value(ctx->reset, 0);
msleep(20);
- for (i = 0; i < ARRAY_SIZE(ili9881c_init); i++) {
- const struct ili9881c_instr *instr = &ili9881c_init[i];
+ for (i = 0; i < ctx->desc->init_length; i++) {
+ const struct ili9881c_instr *instr = &ctx->desc->init[i];
if (instr->op == ILI9881C_SWITCH_PAGE)
ret = ili9881c_switch_page(ctx, instr->arg.page);
@@ -368,7 +569,7 @@ static int ili9881c_unprepare(struct drm_panel *panel)
return 0;
}
-static const struct drm_display_mode bananapi_default_mode = {
+static const struct drm_display_mode lhr050h41_default_mode = {
.clock = 62000,
.hdisplay = 720,
@@ -380,6 +581,26 @@ static const struct drm_display_mode bananapi_default_mode = {
.vsync_start = 1280 + 10,
.vsync_end = 1280 + 10 + 10,
.vtotal = 1280 + 10 + 10 + 20,
+
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
+static const struct drm_display_mode k101_im2byl02_default_mode = {
+ .clock = 69700,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 6,
+ .hsync_end = 800 + 6 + 15,
+ .htotal = 800 + 6 + 15 + 16,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 8,
+ .vsync_end = 1280 + 8 + 48,
+ .vtotal = 1280 + 8 + 48 + 52,
+
+ .width_mm = 135,
+ .height_mm = 217,
};
static int ili9881c_get_modes(struct drm_panel *panel,
@@ -388,12 +609,12 @@ static int ili9881c_get_modes(struct drm_panel *panel,
struct ili9881c *ctx = panel_to_ili9881c(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, &bananapi_default_mode);
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
if (!mode) {
dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
- bananapi_default_mode.hdisplay,
- bananapi_default_mode.vdisplay,
- drm_mode_vrefresh(&bananapi_default_mode));
+ ctx->desc->mode->hdisplay,
+ ctx->desc->mode->vdisplay,
+ drm_mode_vrefresh(ctx->desc->mode));
return -ENOMEM;
}
@@ -402,8 +623,8 @@ static int ili9881c_get_modes(struct drm_panel *panel,
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- connector->display_info.width_mm = 62;
- connector->display_info.height_mm = 110;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
return 1;
}
@@ -426,6 +647,7 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
+ ctx->desc = of_device_get_match_data(&dsi->dev);
drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -446,9 +668,7 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- ret = drm_panel_add(&ctx->panel);
- if (ret < 0)
- return ret;
+ drm_panel_add(&ctx->panel);
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
dsi->format = MIPI_DSI_FMT_RGB888;
@@ -467,8 +687,21 @@ static int ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
return 0;
}
+static const struct ili9881c_desc lhr050h41_desc = {
+ .init = lhr050h41_init,
+ .init_length = ARRAY_SIZE(lhr050h41_init),
+ .mode = &lhr050h41_default_mode,
+};
+
+static const struct ili9881c_desc k101_im2byl02_desc = {
+ .init = k101_im2byl02_init,
+ .init_length = ARRAY_SIZE(k101_im2byl02_init),
+ .mode = &k101_im2byl02_default_mode,
+};
+
static const struct of_device_id ili9881c_of_match[] = {
- { .compatible = "bananapi,lhr050h41" },
+ { .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
+ { .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
{ }
};
MODULE_DEVICE_TABLE(of, ili9881c_of_match);
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index fdf030f4cf92..aea316225391 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -17,7 +17,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
struct panel_init_cmd {
size_t len;
@@ -85,13 +84,11 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
err = mipi_dsi_dcs_set_display_off(innolux->link);
if (err < 0)
- DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
- err);
+ dev_err(panel->dev, "failed to set display off: %d\n", err);
err = mipi_dsi_dcs_enter_sleep_mode(innolux->link);
if (err < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
- err);
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
return err;
}
@@ -147,8 +144,7 @@ static int innolux_panel_prepare(struct drm_panel *panel)
err = mipi_dsi_generic_write(innolux->link, cmd->data,
cmd->len);
if (err < 0) {
- dev_err(panel->dev,
- "failed to write command %u\n", i);
+ dev_err(panel->dev, "failed to write command %u\n", i);
goto poweroff;
}
@@ -159,8 +155,7 @@ static int innolux_panel_prepare(struct drm_panel *panel)
*/
err = mipi_dsi_dcs_nop(innolux->link);
if (err < 0) {
- dev_err(panel->dev,
- "failed to send DCS nop: %d\n", err);
+ dev_err(panel->dev, "failed to send DCS nop: %d\n", err);
goto poweroff;
}
}
@@ -168,8 +163,7 @@ static int innolux_panel_prepare(struct drm_panel *panel)
err = mipi_dsi_dcs_exit_sleep_mode(innolux->link);
if (err < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
- err);
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
goto poweroff;
}
@@ -178,8 +172,7 @@ static int innolux_panel_prepare(struct drm_panel *panel)
err = mipi_dsi_dcs_set_display_on(innolux->link);
if (err < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
- err);
+ dev_err(panel->dev, "failed to set display on: %d\n", err);
goto poweroff;
}
@@ -398,8 +391,8 @@ static int innolux_panel_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
- DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
- m->hdisplay, m->vdisplay, drm_mode_vrefresh(m));
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, drm_mode_vrefresh(m));
return -ENOMEM;
}
@@ -475,9 +468,7 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
if (err)
return err;
- err = drm_panel_add(&innolux->base);
- if (err < 0)
- return err;
+ drm_panel_add(&innolux->base);
mipi_dsi_set_drvdata(dsi, innolux);
innolux->link = dsi;
@@ -514,17 +505,15 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
err = drm_panel_unprepare(&innolux->base);
if (err < 0)
- DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
- err);
+ dev_err(&dsi->dev, "failed to unprepare panel: %d\n", err);
err = drm_panel_disable(&innolux->base);
if (err < 0)
- DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n", err);
+ dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
err = mipi_dsi_detach(dsi);
if (err < 0)
- DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
- err);
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
innolux_panel_del(innolux);
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 1e3fd6633981..733010b5e4f5 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -440,9 +440,9 @@ static int jdi_panel_add(struct jdi_panel *jdi)
drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
- ret = drm_panel_add(&jdi->base);
+ drm_panel_add(&jdi->base);
- return ret;
+ return 0;
}
static void jdi_panel_del(struct jdi_panel *jdi)
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 0d397af23afe..86e4213e8bb1 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -16,7 +16,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
struct kingdisplay_panel {
struct drm_panel base;
@@ -191,8 +190,7 @@ static int kingdisplay_panel_disable(struct drm_panel *panel)
err = mipi_dsi_dcs_set_display_off(kingdisplay->link);
if (err < 0)
- DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
- err);
+ dev_err(panel->dev, "failed to set display off: %d\n", err);
kingdisplay->enabled = false;
@@ -209,8 +207,7 @@ static int kingdisplay_panel_unprepare(struct drm_panel *panel)
err = mipi_dsi_dcs_enter_sleep_mode(kingdisplay->link);
if (err < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
- err);
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
return err;
}
@@ -255,16 +252,14 @@ static int kingdisplay_panel_prepare(struct drm_panel *panel)
err = mipi_dsi_generic_write(kingdisplay->link, &init_code[i],
sizeof(struct kingdisplay_panel_cmd));
if (err < 0) {
- DRM_DEV_ERROR(panel->dev, "failed write init cmds: %d\n",
- err);
+ dev_err(panel->dev, "failed write init cmds: %d\n", err);
goto poweroff;
}
}
err = mipi_dsi_dcs_exit_sleep_mode(kingdisplay->link);
if (err < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
- err);
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
goto poweroff;
}
@@ -273,8 +268,7 @@ static int kingdisplay_panel_prepare(struct drm_panel *panel)
err = mipi_dsi_dcs_set_display_on(kingdisplay->link);
if (err < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
- err);
+ dev_err(panel->dev, "failed to set display on: %d\n", err);
goto poweroff;
}
@@ -290,8 +284,7 @@ poweroff:
regulator_err = regulator_disable(kingdisplay->supply);
if (regulator_err)
- DRM_DEV_ERROR(panel->dev, "failed to disable regulator: %d\n",
- regulator_err);
+ dev_err(panel->dev, "failed to disable regulator: %d\n", regulator_err);
return err;
}
@@ -327,9 +320,9 @@ static int kingdisplay_panel_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
@@ -382,7 +375,9 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
if (err)
return err;
- return drm_panel_add(&kingdisplay->base);
+ drm_panel_add(&kingdisplay->base);
+
+ return 0;
}
static void kingdisplay_panel_del(struct kingdisplay_panel *kingdisplay)
@@ -421,17 +416,15 @@ static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
err = drm_panel_unprepare(&kingdisplay->base);
if (err < 0)
- DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
- err);
+ dev_err(&dsi->dev, "failed to unprepare panel: %d\n", err);
err = drm_panel_disable(&kingdisplay->base);
if (err < 0)
- DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n", err);
+ dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
err = mipi_dsi_detach(dsi);
if (err < 0)
- DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
- err);
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
kingdisplay_panel_del(kingdisplay);
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index eaa9da3ebbea..ed0d5f959037 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -17,7 +17,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
struct ltk050h3146w_cmd {
char cmd;
@@ -314,8 +313,7 @@ static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
- ret);
+ dev_err(ctx->dev, "failed to set tear on: %d\n", ret);
return ret;
}
@@ -360,8 +358,7 @@ static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page,
ret = ltk050h3146w_a2_select_page(ctx, page);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "failed to select page %d: %d\n",
- page, ret);
+ dev_err(ctx->dev, "failed to select page %d: %d\n", page, ret);
return ret;
}
@@ -369,9 +366,7 @@ static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page,
ret = mipi_dsi_generic_write(dsi, &cmds[i],
sizeof(struct ltk050h3146w_cmd));
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "failed to write page %d init cmds: %d\n",
- page, ret);
+ dev_err(ctx->dev, "failed to write page %d init cmds: %d\n", page, ret);
return ret;
}
}
@@ -405,15 +400,14 @@ static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx)
ret = ltk050h3146w_a2_select_page(ctx, 0);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "failed to select page 0: %d\n", ret);
+ dev_err(ctx->dev, "failed to select page 0: %d\n", ret);
return ret;
}
/* vendor code called this without param, where there should be one */
ret = mipi_dsi_dcs_set_tear_on(dsi, 0);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
- ret);
+ dev_err(ctx->dev, "failed to set tear on: %d\n", ret);
return ret;
}
@@ -452,15 +446,13 @@ static int ltk050h3146w_unprepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
- ret);
+ dev_err(ctx->dev, "failed to set display off: %d\n", ret);
return ret;
}
mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
- ret);
+ dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret);
return ret;
}
@@ -481,17 +473,15 @@ static int ltk050h3146w_prepare(struct drm_panel *panel)
if (ctx->prepared)
return 0;
- DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ dev_dbg(ctx->dev, "Resetting the panel\n");
ret = regulator_enable(ctx->vci);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "Failed to enable vci supply: %d\n", ret);
+ dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret);
return ret;
}
ret = regulator_enable(ctx->iovcc);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "Failed to enable iovcc supply: %d\n", ret);
+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
goto disable_vci;
}
@@ -502,14 +492,13 @@ static int ltk050h3146w_prepare(struct drm_panel *panel)
ret = ctx->panel_desc->init(ctx);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
- ret);
+ dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
goto disable_iovcc;
}
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
goto disable_iovcc;
}
@@ -518,7 +507,7 @@ static int ltk050h3146w_prepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ dev_err(ctx->dev, "Failed to set display on: %d\n", ret);
goto disable_iovcc;
}
@@ -577,7 +566,7 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
- DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ dev_err(dev, "cannot get reset gpio\n");
return PTR_ERR(ctx->reset_gpio);
}
@@ -585,9 +574,7 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->vci)) {
ret = PTR_ERR(ctx->vci);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev,
- "Failed to request vci regulator: %d\n",
- ret);
+ dev_err(dev, "Failed to request vci regulator: %d\n", ret);
return ret;
}
@@ -595,9 +582,7 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->iovcc)) {
ret = PTR_ERR(ctx->iovcc);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev,
- "Failed to request iovcc regulator: %d\n",
- ret);
+ dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
return ret;
}
@@ -621,7 +606,7 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
@@ -636,13 +621,11 @@ static void ltk050h3146w_shutdown(struct mipi_dsi_device *dsi)
ret = drm_panel_unprepare(&ctx->panel);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
ret = drm_panel_disable(&ctx->panel);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
static int ltk050h3146w_remove(struct mipi_dsi_device *dsi)
@@ -654,8 +637,7 @@ static int ltk050h3146w_remove(struct mipi_dsi_device *dsi)
ret = mipi_dsi_detach(dsi);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 0f6a248c47a5..3c00e4f8f803 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -20,7 +20,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
struct ltk500hd1829 {
struct device *dev;
@@ -278,13 +277,11 @@ static int ltk500hd1829_unprepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
- DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
- ret);
+ dev_err(panel->dev, "failed to set display off: %d\n", ret);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
- ret);
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret);
}
/* 120ms to enter sleep mode */
@@ -310,14 +307,12 @@ static int ltk500hd1829_prepare(struct drm_panel *panel)
ret = regulator_enable(ctx->vcc);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "Failed to enable vci supply: %d\n", ret);
+ dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret);
return ret;
}
ret = regulator_enable(ctx->iovcc);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "Failed to enable iovcc supply: %d\n", ret);
+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
goto disable_vcc;
}
@@ -333,16 +328,14 @@ static int ltk500hd1829_prepare(struct drm_panel *panel)
ret = mipi_dsi_generic_write(dsi, &init_code[i],
sizeof(struct ltk500hd1829_cmd));
if (ret < 0) {
- DRM_DEV_ERROR(panel->dev,
- "failed to write init cmds: %d\n", ret);
+ dev_err(panel->dev, "failed to write init cmds: %d\n", ret);
goto disable_iovcc;
}
}
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
- ret);
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", ret);
goto disable_iovcc;
}
@@ -351,8 +344,7 @@ static int ltk500hd1829_prepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
- ret);
+ dev_err(panel->dev, "failed to set display on: %d\n", ret);
goto disable_iovcc;
}
@@ -389,9 +381,9 @@ static int ltk500hd1829_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_DEV_ERROR(ctx->dev, "failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ dev_err(ctx->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
@@ -423,7 +415,7 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
- DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ dev_err(dev, "cannot get reset gpio\n");
return PTR_ERR(ctx->reset_gpio);
}
@@ -431,9 +423,7 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->vcc)) {
ret = PTR_ERR(ctx->vcc);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev,
- "Failed to request vcc regulator: %d\n",
- ret);
+ dev_err(dev, "Failed to request vcc regulator: %d\n", ret);
return ret;
}
@@ -441,9 +431,7 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->iovcc)) {
ret = PTR_ERR(ctx->iovcc);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev,
- "Failed to request iovcc regulator: %d\n",
- ret);
+ dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
return ret;
}
@@ -467,7 +455,7 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
@@ -482,13 +470,11 @@ static void ltk500hd1829_shutdown(struct mipi_dsi_device *dsi)
ret = drm_panel_unprepare(&ctx->panel);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
ret = drm_panel_disable(&ctx->panel);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
static int ltk500hd1829_remove(struct mipi_dsi_device *dsi)
@@ -500,8 +486,7 @@ static int ltk500hd1829_remove(struct mipi_dsi_device *dsi)
ret = mipi_dsi_detach(dsi);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
- ret);
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index 14456b9cd5c0..f3183b68704f 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -198,7 +198,9 @@ static int lb035q02_probe(struct spi_device *spi)
drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs,
DRM_MODE_CONNECTOR_DPI);
- return drm_panel_add(&lcd->panel);
+ drm_panel_add(&lcd->panel);
+
+ return 0;
}
static int lb035q02_remove(struct spi_device *spi)
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index aedc485d0727..8e5160af1de5 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -261,7 +261,9 @@ static int lg4573_probe(struct spi_device *spi)
drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
- return drm_panel_add(&ctx->panel);
+ drm_panel_add(&ctx->panel);
+
+ return 0;
}
static int lg4573_remove(struct spi_device *spi)
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 5ce3f4a2b7a1..66c7d765b8f7 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -37,6 +37,8 @@ struct panel_lvds {
struct gpio_desc *enable_gpio;
struct gpio_desc *reset_gpio;
+
+ enum drm_panel_orientation orientation;
};
static inline struct panel_lvds *to_panel_lvds(struct drm_panel *panel)
@@ -99,6 +101,7 @@ static int panel_lvds_get_modes(struct drm_panel *panel,
connector->display_info.bus_flags = lvds->data_mirror
? DRM_BUS_FLAG_DATA_LSB_TO_MSB
: DRM_BUS_FLAG_DATA_MSB_TO_LSB;
+ drm_connector_set_panel_orientation(connector, lvds->orientation);
return 1;
}
@@ -116,6 +119,12 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
const char *mapping;
int ret;
+ ret = of_drm_get_panel_orientation(np, &lvds->orientation);
+ if (ret < 0) {
+ dev_err(lvds->dev, "%pOF: failed to get orientation %d\n", np, ret);
+ return ret;
+ }
+
ret = of_get_display_timing(np, "panel-timing", &timing);
if (ret < 0) {
dev_err(lvds->dev, "%pOF: problems parsing panel-timing (%d)\n",
@@ -227,9 +236,7 @@ static int panel_lvds_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = drm_panel_add(&lvds->panel);
- if (ret < 0)
- return ret;
+ drm_panel_add(&lvds->panel);
dev_set_drvdata(lvds->dev, lvds);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
new file mode 100644
index 000000000000..0c5f22e95c2d
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mantix MLAF057WE51 5.7" MIPI-DSI panel driver
+ *
+ * Copyright (C) Purism SPC 2020
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define DRV_NAME "panel-mantix-mlaf057we51"
+
+/* Manufacturer specific Commands send via DSI */
+#define MANTIX_CMD_OTP_STOP_RELOAD_MIPI 0x41
+#define MANTIX_CMD_INT_CANCEL 0x4C
+
+struct mantix {
+ struct device *dev;
+ struct drm_panel panel;
+
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *tp_rstn_gpio;
+
+ struct regulator *avdd;
+ struct regulator *avee;
+ struct regulator *vddi;
+};
+
+static inline struct mantix *panel_to_mantix(struct drm_panel *panel)
+{
+ return container_of(panel, struct mantix, panel);
+}
+
+#define dsi_generic_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int mantix_init_sequence(struct mantix *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct device *dev = ctx->dev;
+
+ /*
+ * Init sequence was supplied by the panel vendor.
+ */
+ dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A);
+
+ dsi_generic_write_seq(dsi, MANTIX_CMD_INT_CANCEL, 0x03);
+ dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x03);
+ dsi_generic_write_seq(dsi, 0x80, 0xA9, 0x00);
+
+ dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x09);
+ dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
+ msleep(20);
+
+ dev_dbg(dev, "Panel init sequence done\n");
+ return 0;
+}
+
+static int mantix_enable(struct drm_panel *panel)
+{
+ struct mantix *ctx = panel_to_mantix(panel);
+ struct device *dev = ctx->dev;
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ int ret;
+
+ ret = mantix_init_sequence(ctx);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode\n");
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret)
+ return ret;
+ usleep_range(10000, 12000);
+
+ ret = mipi_dsi_turn_on_peripheral(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to turn on peripheral\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mantix_disable(struct drm_panel *panel)
+{
+ struct mantix *ctx = panel_to_mantix(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ dev_err(ctx->dev, "Failed to turn off the display: %d\n", ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
+
+
+ return 0;
+}
+
+static int mantix_unprepare(struct drm_panel *panel)
+{
+ struct mantix *ctx = panel_to_mantix(panel);
+
+ gpiod_set_value_cansleep(ctx->tp_rstn_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_disable(ctx->avee);
+ regulator_disable(ctx->avdd);
+ /* T11 */
+ usleep_range(5000, 6000);
+ regulator_disable(ctx->vddi);
+ /* T14 */
+ msleep(50);
+
+ return 0;
+}
+
+static int mantix_prepare(struct drm_panel *panel)
+{
+ struct mantix *ctx = panel_to_mantix(panel);
+ int ret;
+
+ /* Focaltech FT8006P, section 7.3.1 and 7.3.4 */
+ dev_dbg(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vddi);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to enable vddi supply: %d\n", ret);
+ return ret;
+ }
+
+ /* T1 + T2 */
+ usleep_range(8000, 10000);
+
+ ret = regulator_enable(ctx->avdd);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to enable avdd supply: %d\n", ret);
+ return ret;
+ }
+
+ /* T2d */
+ usleep_range(3500, 4000);
+ ret = regulator_enable(ctx->avee);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to enable avee supply: %d\n", ret);
+ return ret;
+ }
+
+ /* T3 + T4 + time for voltage to become stable: */
+ usleep_range(6000, 7000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ gpiod_set_value_cansleep(ctx->tp_rstn_gpio, 0);
+
+ /* T6 */
+ msleep(50);
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 45,
+ .hsync_end = 720 + 45 + 14,
+ .htotal = 720 + 45 + 14 + 25,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 130,
+ .vsync_end = 1440 + 130 + 8,
+ .vtotal = 1440 + 130 + 8 + 106,
+ .clock = 85298,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 65,
+ .height_mm = 130,
+};
+
+static int mantix_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct mantix *ctx = panel_to_mantix(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs mantix_drm_funcs = {
+ .disable = mantix_disable,
+ .unprepare = mantix_unprepare,
+ .prepare = mantix_prepare,
+ .enable = mantix_enable,
+ .get_modes = mantix_get_modes,
+};
+
+static int mantix_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct mantix *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio)) {
+ dev_err(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->tp_rstn_gpio = devm_gpiod_get(dev, "mantix,tp-rstn", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->tp_rstn_gpio)) {
+ dev_err(dev, "cannot get tp-rstn gpio\n");
+ return PTR_ERR(ctx->tp_rstn_gpio);
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+
+ ctx->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(ctx->avdd))
+ return dev_err_probe(dev, PTR_ERR(ctx->avdd), "Failed to request avdd regulator\n");
+
+ ctx->avee = devm_regulator_get(dev, "avee");
+ if (IS_ERR(ctx->avee))
+ return dev_err_probe(dev, PTR_ERR(ctx->avee), "Failed to request avee regulator\n");
+
+ ctx->vddi = devm_regulator_get(dev, "vddi");
+ if (IS_ERR(ctx->vddi))
+ return dev_err_probe(dev, PTR_ERR(ctx->vddi), "Failed to request vddi regulator\n");
+
+ drm_panel_init(&ctx->panel, dev, &mantix_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "mipi_dsi_attach failed (%d). Is host ready?\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ dev_info(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode),
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
+
+ return 0;
+}
+
+static void mantix_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct mantix *ctx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_unprepare(&ctx->panel);
+ drm_panel_disable(&ctx->panel);
+}
+
+static int mantix_remove(struct mipi_dsi_device *dsi)
+{
+ struct mantix *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mantix_shutdown(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id mantix_of_match[] = {
+ { .compatible = "mantix,mlaf057we51-x" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mantix_of_match);
+
+static struct mipi_dsi_driver mantix_driver = {
+ .probe = mantix_probe,
+ .remove = mantix_remove,
+ .shutdown = mantix_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = mantix_of_match,
+ },
+};
+module_mipi_dsi_driver(mantix_driver);
+
+MODULE_AUTHOR("Guido Günther <agx@sigxcpu.org>");
+MODULE_DESCRIPTION("DRM driver for Mantix MLAF057WE51-X MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index f894971c1c7c..6e5ab1debc8b 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -207,7 +207,9 @@ static int nl8048_probe(struct spi_device *spi)
drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs,
DRM_MODE_CONNECTOR_DPI);
- return drm_panel_add(&lcd->panel);
+ drm_panel_add(&lcd->panel);
+
+ return 0;
}
static int nl8048_remove(struct spi_device *spi)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index e98d54df00e7..b9a0e56f33e2 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -35,7 +35,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#define MCS_CMD_MAUCCTR 0xF0 /* Manufacturer command enable */
#define MCS_CMD_READ_ID1 0xDA
@@ -376,6 +375,10 @@ struct nt35510 {
};
/* Manufacturer command has strictly this byte sequence */
+static const u8 nt35510_mauc_mtp_read_param[] = { 0xAA, 0x55, 0x25, 0x01 };
+static const u8 nt35510_mauc_mtp_read_setting[] = { 0x01, 0x02, 0x00, 0x20,
+ 0x33, 0x13, 0x00, 0x40,
+ 0x00, 0x00, 0x23, 0x02 };
static const u8 nt35510_mauc_select_page_0[] = { 0x55, 0xAA, 0x52, 0x08, 0x00 };
static const u8 nt35510_mauc_select_page_1[] = { 0x55, 0xAA, 0x52, 0x08, 0x01 };
static const u8 nt35510_vgh_on[] = { 0x01 };
@@ -400,9 +403,7 @@ static int nt35510_send_long(struct nt35510 *nt, struct mipi_dsi_device *dsi,
chunk = 15;
ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk);
if (ret < 0) {
- DRM_DEV_ERROR(nt->dev,
- "error sending DCS command seq cmd %02x\n",
- cmd);
+ dev_err(nt->dev, "error sending DCS command seq cmd %02x\n", cmd);
return ret;
}
cmdwritten += chunk;
@@ -414,16 +415,13 @@ static int nt35510_send_long(struct nt35510 *nt, struct mipi_dsi_device *dsi,
chunk = 15;
ret = mipi_dsi_generic_write(dsi, seqp, chunk);
if (ret < 0) {
- DRM_DEV_ERROR(nt->dev,
- "error sending generic write seq %02x\n",
- cmd);
+ dev_err(nt->dev, "error sending generic write seq %02x\n", cmd);
return ret;
}
cmdwritten += chunk;
seqp += chunk;
}
- DRM_DEV_DEBUG(nt->dev, "sent command %02x %02x bytes\n",
- cmd, cmdlen);
+ dev_dbg(nt->dev, "sent command %02x %02x bytes\n", cmd, cmdlen);
return 0;
}
@@ -435,17 +433,17 @@ static int nt35510_read_id(struct nt35510 *nt)
ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID1, &id1, 1);
if (ret < 0) {
- DRM_DEV_ERROR(nt->dev, "could not read MTP ID1\n");
+ dev_err(nt->dev, "could not read MTP ID1\n");
return ret;
}
ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID2, &id2, 1);
if (ret < 0) {
- DRM_DEV_ERROR(nt->dev, "could not read MTP ID2\n");
+ dev_err(nt->dev, "could not read MTP ID2\n");
return ret;
}
ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID3, &id3, 1);
if (ret < 0) {
- DRM_DEV_ERROR(nt->dev, "could not read MTP ID3\n");
+ dev_err(nt->dev, "could not read MTP ID3\n");
return ret;
}
@@ -454,9 +452,7 @@ static int nt35510_read_id(struct nt35510 *nt)
* ID (e.g. Hydis 0x55), driver ID (e.g. NT35510 0xc0) and
* version.
*/
- DRM_DEV_INFO(nt->dev,
- "MTP ID manufacturer: %02x version: %02x driver: %02x\n",
- id1, id2, id3);
+ dev_info(nt->dev, "MTP ID manufacturer: %02x version: %02x driver: %02x\n", id1, id2, id3);
return 0;
}
@@ -657,7 +653,7 @@ static int nt35510_set_brightness(struct backlight_device *bl)
u8 brightness = bl->props.brightness;
int ret;
- DRM_DEV_DEBUG(nt->dev, "set brightness %d\n", brightness);
+ dev_dbg(nt->dev, "set brightness %d\n", brightness);
ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
&brightness,
sizeof(brightness));
@@ -698,6 +694,18 @@ static int nt35510_power_on(struct nt35510 *nt)
usleep_range(120000, 140000);
}
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MTP_READ_PARAM,
+ ARRAY_SIZE(nt35510_mauc_mtp_read_param),
+ nt35510_mauc_mtp_read_param);
+ if (ret)
+ return ret;
+
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MTP_READ_SETTING,
+ ARRAY_SIZE(nt35510_mauc_mtp_read_setting),
+ nt35510_mauc_mtp_read_setting);
+ if (ret)
+ return ret;
+
ret = nt35510_read_id(nt);
if (ret)
return ret;
@@ -780,8 +788,7 @@ static int nt35510_unprepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret) {
- DRM_DEV_ERROR(nt->dev, "failed to turn display off (%d)\n",
- ret);
+ dev_err(nt->dev, "failed to turn display off (%d)\n", ret);
return ret;
}
usleep_range(10000, 20000);
@@ -789,8 +796,7 @@ static int nt35510_unprepare(struct drm_panel *panel)
/* Enter sleep mode */
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret) {
- DRM_DEV_ERROR(nt->dev, "failed to enter sleep mode (%d)\n",
- ret);
+ dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret);
return ret;
}
@@ -817,8 +823,7 @@ static int nt35510_prepare(struct drm_panel *panel)
/* Exit sleep mode */
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret) {
- DRM_DEV_ERROR(nt->dev, "failed to exit sleep mode (%d)\n",
- ret);
+ dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret);
return ret;
}
/* Up to 120 ms */
@@ -826,8 +831,7 @@ static int nt35510_prepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret) {
- DRM_DEV_ERROR(nt->dev, "failed to turn display on (%d)\n",
- ret);
+ dev_err(nt->dev, "failed to turn display on (%d)\n", ret);
return ret;
}
/* Some 10 ms */
@@ -848,7 +852,7 @@ static int nt35510_get_modes(struct drm_panel *panel,
info->height_mm = nt->conf->height_mm;
mode = drm_mode_duplicate(connector->dev, &nt->conf->mode);
if (!mode) {
- DRM_ERROR("bad mode or failed to add mode\n");
+ dev_err(panel->dev, "bad mode or failed to add mode\n");
return -EINVAL;
}
drm_mode_set_name(mode);
@@ -947,7 +951,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
bl = devm_backlight_device_register(dev, "nt35510", dev, nt,
&nt35510_bl_ops, NULL);
if (IS_ERR(bl)) {
- DRM_DEV_ERROR(dev, "failed to register backlight device\n");
+ dev_err(dev, "failed to register backlight device\n");
return PTR_ERR(bl);
}
bl->props.max_brightness = 255;
@@ -956,9 +960,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
nt->panel.backlight = bl;
}
- ret = drm_panel_add(&nt->panel);
- if (ret < 0)
- return ret;
+ drm_panel_add(&nt->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index 91df050ba3f6..f8151fe3ac9a 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -6,7 +6,6 @@
* Copyright (C) 2019, Paul Cercueil <paul@crapouillou.net>
*/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
@@ -57,14 +56,11 @@ struct nt39016_panel_info {
struct nt39016 {
struct drm_panel drm_panel;
- struct device *dev;
struct regmap *map;
struct regulator *supply;
const struct nt39016_panel_info *panel_info;
struct gpio_desc *reset_gpio;
-
- struct backlight_device *backlight;
};
static inline struct nt39016 *to_nt39016(struct drm_panel *panel)
@@ -127,7 +123,7 @@ static int nt39016_prepare(struct drm_panel *drm_panel)
err = regulator_enable(panel->supply);
if (err) {
- dev_err(panel->dev, "Failed to enable power supply: %d", err);
+ dev_err(drm_panel->dev, "Failed to enable power supply: %d\n", err);
return err;
}
@@ -146,7 +142,7 @@ static int nt39016_prepare(struct drm_panel *drm_panel)
err = regmap_multi_reg_write(panel->map, nt39016_panel_regs,
ARRAY_SIZE(nt39016_panel_regs));
if (err) {
- dev_err(panel->dev, "Failed to init registers: %d", err);
+ dev_err(drm_panel->dev, "Failed to init registers: %d\n", err);
goto err_disable_regulator;
}
@@ -176,18 +172,16 @@ static int nt39016_enable(struct drm_panel *drm_panel)
ret = regmap_write(panel->map, NT39016_REG_SYSTEM,
NT39016_SYSTEM_RESET_N | NT39016_SYSTEM_STANDBY);
if (ret) {
- dev_err(panel->dev, "Unable to enable panel: %d", ret);
+ dev_err(drm_panel->dev, "Unable to enable panel: %d\n", ret);
return ret;
}
- if (panel->backlight) {
+ if (drm_panel->backlight) {
/* Wait for the picture to be ready before enabling backlight */
msleep(150);
-
- ret = backlight_enable(panel->backlight);
}
- return ret;
+ return 0;
}
static int nt39016_disable(struct drm_panel *drm_panel)
@@ -195,12 +189,10 @@ static int nt39016_disable(struct drm_panel *drm_panel)
struct nt39016 *panel = to_nt39016(drm_panel);
int err;
- backlight_disable(panel->backlight);
-
err = regmap_write(panel->map, NT39016_REG_SYSTEM,
NT39016_SYSTEM_RESET_N);
if (err) {
- dev_err(panel->dev, "Unable to disable panel: %d", err);
+ dev_err(drm_panel->dev, "Unable to disable panel: %d\n", err);
return err;
}
@@ -259,7 +251,6 @@ static int nt39016_probe(struct spi_device *spi)
if (!panel)
return -ENOMEM;
- panel->dev = dev;
spi_set_drvdata(spi, panel);
panel->panel_info = of_device_get_match_data(dev);
@@ -268,13 +259,13 @@ static int nt39016_probe(struct spi_device *spi)
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply)) {
- dev_err(dev, "Failed to get power supply");
+ dev_err(dev, "Failed to get power supply\n");
return PTR_ERR(panel->supply);
}
panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(panel->reset_gpio)) {
- dev_err(dev, "Failed to get reset GPIO");
+ dev_err(dev, "Failed to get reset GPIO\n");
return PTR_ERR(panel->reset_gpio);
}
@@ -282,33 +273,28 @@ static int nt39016_probe(struct spi_device *spi)
spi->mode = SPI_MODE_3 | SPI_3WIRE;
err = spi_setup(spi);
if (err) {
- dev_err(dev, "Failed to setup SPI");
+ dev_err(dev, "Failed to setup SPI\n");
return err;
}
panel->map = devm_regmap_init_spi(spi, &nt39016_regmap_config);
if (IS_ERR(panel->map)) {
- dev_err(dev, "Failed to init regmap");
+ dev_err(dev, "Failed to init regmap\n");
return PTR_ERR(panel->map);
}
- panel->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(panel->backlight)) {
- err = PTR_ERR(panel->backlight);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get backlight handle");
- return err;
- }
-
drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs,
DRM_MODE_CONNECTOR_DPI);
- err = drm_panel_add(&panel->drm_panel);
- if (err < 0) {
- dev_err(dev, "Failed to register panel");
+ err = drm_panel_of_backlight(&panel->drm_panel);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get backlight handle\n");
return err;
}
+ drm_panel_add(&panel->drm_panel);
+
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index ecd76b5391d3..cb5cb27462df 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -283,7 +283,9 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
if (ret)
return ret;
- return drm_panel_add(&lcd->panel);
+ drm_panel_add(&lcd->panel);
+
+ return 0;
}
static int lcd_olinuxino_remove(struct i2c_client *client)
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index d956522f32ee..b6e377aa1131 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -17,7 +17,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#define OTM8009A_BACKLIGHT_DEFAULT 240
#define OTM8009A_BACKLIGHT_MAX 255
@@ -97,7 +96,7 @@ static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
if (mipi_dsi_dcs_write_buffer(dsi, data, len) < 0)
- DRM_WARN("mipi dsi dcs write buffer failed\n");
+ dev_warn(ctx->dev, "mipi dsi dcs write buffer failed\n");
}
static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data,
@@ -313,7 +312,7 @@ static int otm8009a_prepare(struct drm_panel *panel)
ret = regulator_enable(ctx->supply);
if (ret < 0) {
- DRM_ERROR("failed to enable supply: %d\n", ret);
+ dev_err(panel->dev, "failed to enable supply: %d\n", ret);
return ret;
}
@@ -355,9 +354,9 @@ static int otm8009a_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_ERROR("failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
@@ -390,7 +389,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
u8 data[2];
if (!ctx->prepared) {
- DRM_DEBUG("lcd not ready yet for setting its backlight!\n");
+ dev_dbg(&bd->dev, "lcd not ready yet for setting its backlight!\n");
return -ENXIO;
}
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index 83e5aa47f0d6..45b975dee587 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -164,7 +164,9 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
if (ret)
return ret;
- return drm_panel_add(&osd101t2587->base);
+ drm_panel_add(&osd101t2587->base);
+
+ return 0;
}
static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 627dfcf8adb4..3c20beeb1781 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -206,7 +206,9 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
if (ret)
return ret;
- return drm_panel_add(&wuxga_nt->base);
+ drm_panel_add(&wuxga_nt->base);
+
+ return 0;
}
static void wuxga_nt_panel_del(struct wuxga_nt_panel *wuxga_nt)
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index e50ee26474cf..5e9ccefb88f6 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -361,7 +361,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
struct rpi_touchscreen *ts;
struct device_node *endpoint, *dsi_host_node;
struct mipi_dsi_host *host;
- int ret, ver;
+ int ver;
struct mipi_dsi_device_info info = {
.type = RPI_DSI_DRIVER_NAME,
.channel = 0,
@@ -429,9 +429,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
/* This appears last, as it's what will unblock the DSI host
* driver's component bind function.
*/
- ret = drm_panel_add(&ts->base);
- if (ret)
- return ret;
+ drm_panel_add(&ts->base);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index 57ff2b1f6361..572547d1aa83 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -19,7 +19,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
/* Panel specific color-format bits */
#define COL_FMT_16BPP 0x55
@@ -329,7 +328,7 @@ static int rad_panel_enable(struct drm_panel *panel)
ret = rad_panel_push_cmd_list(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to send MCS (%d)\n", ret);
+ dev_err(dev, "Failed to send MCS (%d)\n", ret);
goto fail;
}
@@ -341,7 +340,7 @@ static int rad_panel_enable(struct drm_panel *panel)
/* Software reset */
ret = mipi_dsi_dcs_soft_reset(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to do Software Reset (%d)\n", ret);
+ dev_err(dev, "Failed to do Software Reset (%d)\n", ret);
goto fail;
}
@@ -350,33 +349,32 @@ static int rad_panel_enable(struct drm_panel *panel)
/* Set DSI mode */
ret = mipi_dsi_generic_write(dsi, (u8[]){ 0xC2, 0x0B }, 2);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to set DSI mode (%d)\n", ret);
+ dev_err(dev, "Failed to set DSI mode (%d)\n", ret);
goto fail;
}
/* Set tear ON */
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to set tear ON (%d)\n", ret);
+ dev_err(dev, "Failed to set tear ON (%d)\n", ret);
goto fail;
}
/* Set tear scanline */
ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0x380);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to set tear scanline (%d)\n", ret);
+ dev_err(dev, "Failed to set tear scanline (%d)\n", ret);
goto fail;
}
/* Set pixel format */
ret = mipi_dsi_dcs_set_pixel_format(dsi, color_format);
- DRM_DEV_DEBUG_DRIVER(dev, "Interface color format set to 0x%x\n",
- color_format);
+ dev_dbg(dev, "Interface color format set to 0x%x\n", color_format);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to set pixel format (%d)\n", ret);
+ dev_err(dev, "Failed to set pixel format (%d)\n", ret);
goto fail;
}
/* Exit sleep mode */
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to exit sleep mode (%d)\n", ret);
+ dev_err(dev, "Failed to exit sleep mode (%d)\n", ret);
goto fail;
}
@@ -384,7 +382,7 @@ static int rad_panel_enable(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to set display ON (%d)\n", ret);
+ dev_err(dev, "Failed to set display ON (%d)\n", ret);
goto fail;
}
@@ -418,7 +416,7 @@ static int rad_panel_disable(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to set display OFF (%d)\n", ret);
+ dev_err(dev, "Failed to set display OFF (%d)\n", ret);
return ret;
}
@@ -426,7 +424,7 @@ static int rad_panel_disable(struct drm_panel *panel)
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to enter sleep mode (%d)\n", ret);
+ dev_err(dev, "Failed to enter sleep mode (%d)\n", ret);
return ret;
}
@@ -442,9 +440,9 @@ static int rad_panel_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
@@ -554,8 +552,7 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
panel->dsi = dsi;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
- MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO;
ret = of_property_read_u32(np, "video-mode", &video_mode);
if (!ret) {
@@ -609,9 +606,7 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
DRM_MODE_CONNECTOR_DSI);
dev_set_drvdata(dev, panel);
- ret = drm_panel_add(&panel->panel);
- if (ret)
- return ret;
+ drm_panel_add(&panel->panel);
ret = mipi_dsi_attach(dsi);
if (ret)
@@ -628,8 +623,7 @@ static int rad_panel_remove(struct mipi_dsi_device *dsi)
ret = mipi_dsi_detach(dsi);
if (ret)
- DRM_DEV_ERROR(dev, "Failed to detach from host (%d)\n",
- ret);
+ dev_err(dev, "Failed to detach from host (%d)\n", ret);
drm_panel_remove(&rad->panel);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 81ae8be62d15..f908eeafb1af 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -17,7 +17,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
/*** Manufacturer Command Set ***/
#define MCS_CMD_MODE_SW 0xFE /* CMD Mode Switch */
@@ -110,8 +109,7 @@ static void rm68200_dcs_write_buf(struct rm68200 *ctx, const void *data,
err = mipi_dsi_dcs_write_buffer(dsi, data, len);
if (err < 0)
- DRM_ERROR_RATELIMITED("MIPI DSI DCS write buffer failed: %d\n",
- err);
+ dev_err_ratelimited(ctx->dev, "MIPI DSI DCS write buffer failed: %d\n", err);
}
static void rm68200_dcs_write_cmd(struct rm68200 *ctx, u8 cmd, u8 value)
@@ -121,7 +119,7 @@ static void rm68200_dcs_write_cmd(struct rm68200 *ctx, u8 cmd, u8 value)
err = mipi_dsi_dcs_write(dsi, cmd, &value, 1);
if (err < 0)
- DRM_ERROR_RATELIMITED("MIPI DSI DCS write failed: %d\n", err);
+ dev_err_ratelimited(ctx->dev, "MIPI DSI DCS write failed: %d\n", err);
}
#define dcs_write_seq(ctx, seq...) \
@@ -256,11 +254,11 @@ static int rm68200_unprepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret)
- DRM_WARN("failed to set display off: %d\n", ret);
+ dev_warn(panel->dev, "failed to set display off: %d\n", ret);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret)
- DRM_WARN("failed to enter sleep mode: %d\n", ret);
+ dev_warn(panel->dev, "failed to enter sleep mode: %d\n", ret);
msleep(120);
@@ -287,7 +285,7 @@ static int rm68200_prepare(struct drm_panel *panel)
ret = regulator_enable(ctx->supply);
if (ret < 0) {
- DRM_ERROR("failed to enable supply: %d\n", ret);
+ dev_err(ctx->dev, "failed to enable supply: %d\n", ret);
return ret;
}
@@ -336,9 +334,9 @@ static int rm68200_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_ERROR("failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index a7b0b3e39e1a..535c8d1cca21 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -23,7 +23,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
struct rb070d30_panel {
struct drm_panel panel;
@@ -50,7 +49,7 @@ static int rb070d30_panel_prepare(struct drm_panel *panel)
ret = regulator_enable(ctx->supply);
if (ret < 0) {
- DRM_DEV_ERROR(&ctx->dsi->dev, "Failed to enable supply: %d\n", ret);
+ dev_err(&ctx->dsi->dev, "Failed to enable supply: %d\n", ret);
return ret;
}
@@ -117,9 +116,8 @@ static int rb070d30_panel_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_DEV_ERROR(&ctx->dsi->dev,
- "Failed to add mode " DRM_MODE_FMT "\n",
- DRM_MODE_ARG(&default_mode));
+ dev_err(&ctx->dsi->dev, "Failed to add mode " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&default_mode));
return -EINVAL;
}
@@ -166,13 +164,13 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.reset)) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
return PTR_ERR(ctx->gpios.reset);
}
ctx->gpios.power = devm_gpiod_get(&dsi->dev, "power", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.power)) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get our power GPIO\n");
+ dev_err(&dsi->dev, "Couldn't get our power GPIO\n");
return PTR_ERR(ctx->gpios.power);
}
@@ -182,7 +180,7 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
*/
ctx->gpios.updn = devm_gpiod_get(&dsi->dev, "updn", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.updn)) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get our updn GPIO\n");
+ dev_err(&dsi->dev, "Couldn't get our updn GPIO\n");
return PTR_ERR(ctx->gpios.updn);
}
@@ -192,7 +190,7 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
*/
ctx->gpios.shlr = devm_gpiod_get(&dsi->dev, "shlr", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.shlr)) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get our shlr GPIO\n");
+ dev_err(&dsi->dev, "Couldn't get our shlr GPIO\n");
return PTR_ERR(ctx->gpios.shlr);
}
@@ -200,9 +198,7 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- ret = drm_panel_add(&ctx->panel);
- if (ret < 0)
- return ret;
+ drm_panel_add(&ctx->panel);
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM;
dsi->format = MIPI_DSI_FMT_RGB888;
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 9bb2e8c7934a..f484147fc3a6 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -21,7 +21,6 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
/* Manufacturer Command Set */
#define MCS_MANPWR 0xb0
@@ -269,7 +268,7 @@ static int ld9040_get_modes(struct drm_panel *panel,
mode = drm_mode_create(connector->dev);
if (!mode) {
- DRM_ERROR("failed to create a new display mode\n");
+ dev_err(panel->dev, "failed to create a new display mode\n");
return 0;
}
@@ -354,7 +353,9 @@ static int ld9040_probe(struct spi_device *spi)
drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
- return drm_panel_add(&ctx->panel);
+ drm_panel_add(&ctx->panel);
+
+ return 0;
}
static int ld9040_remove(struct spi_device *spi)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index f02645d396ac..4aac0d1573dd 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -7,7 +7,6 @@
#include <drm/drm_modes.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
@@ -55,8 +54,7 @@ static int s6d16d0_unprepare(struct drm_panel *panel)
/* Enter sleep mode */
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret) {
- DRM_DEV_ERROR(s6->dev, "failed to enter sleep mode (%d)\n",
- ret);
+ dev_err(s6->dev, "failed to enter sleep mode (%d)\n", ret);
return ret;
}
@@ -75,7 +73,7 @@ static int s6d16d0_prepare(struct drm_panel *panel)
ret = regulator_enable(s6->supply);
if (ret) {
- DRM_DEV_ERROR(s6->dev, "failed to enable supply (%d)\n", ret);
+ dev_err(s6->dev, "failed to enable supply (%d)\n", ret);
return ret;
}
@@ -90,15 +88,13 @@ static int s6d16d0_prepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_tear_on(dsi,
MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret) {
- DRM_DEV_ERROR(s6->dev, "failed to enable vblank TE (%d)\n",
- ret);
+ dev_err(s6->dev, "failed to enable vblank TE (%d)\n", ret);
return ret;
}
/* Exit sleep mode and power on */
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret) {
- DRM_DEV_ERROR(s6->dev, "failed to exit sleep mode (%d)\n",
- ret);
+ dev_err(s6->dev, "failed to exit sleep mode (%d)\n", ret);
return ret;
}
@@ -113,8 +109,7 @@ static int s6d16d0_enable(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret) {
- DRM_DEV_ERROR(s6->dev, "failed to turn display on (%d)\n",
- ret);
+ dev_err(s6->dev, "failed to turn display on (%d)\n", ret);
return ret;
}
@@ -129,8 +124,7 @@ static int s6d16d0_disable(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret) {
- DRM_DEV_ERROR(s6->dev, "failed to turn display off (%d)\n",
- ret);
+ dev_err(s6->dev, "failed to turn display off (%d)\n", ret);
return ret;
}
@@ -144,7 +138,7 @@ static int s6d16d0_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &samsung_s6d16d0_mode);
if (!mode) {
- DRM_ERROR("bad mode or failed to add mode\n");
+ dev_err(panel->dev, "bad mode or failed to add mode\n");
return -EINVAL;
}
drm_mode_set_name(mode);
@@ -204,17 +198,14 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(s6->reset_gpio)) {
ret = PTR_ERR(s6->reset_gpio);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev, "failed to request GPIO (%d)\n",
- ret);
+ dev_err(dev, "failed to request GPIO (%d)\n", ret);
return ret;
}
drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
- ret = drm_panel_add(&s6->panel);
- if (ret < 0)
- return ret;
+ drm_panel_add(&s6->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 80ef122e7466..1d1c79a18613 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -18,7 +18,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#define S6E3HA2_MIN_BRIGHTNESS 0
#define S6E3HA2_MAX_BRIGHTNESS 100
@@ -651,7 +650,7 @@ static int s6e3ha2_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
if (!mode) {
- DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
drm_mode_vrefresh(ctx->desc->mode));
return -ENOMEM;
@@ -733,9 +732,7 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
- ret = drm_panel_add(&ctx->panel);
- if (ret < 0)
- goto unregister_backlight;
+ drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0)
@@ -745,8 +742,6 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
remove_panel:
drm_panel_remove(&ctx->panel);
-
-unregister_backlight:
backlight_device_unregister(ctx->bl_dev);
return ret;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index 1247656d73bf..b962c817fb30 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -19,7 +19,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#define MCS_LEVEL2_KEY 0xf0
#define MCS_MTP_KEY 0xf1
@@ -406,7 +405,7 @@ static int s6e63j0x03_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
@@ -479,9 +478,7 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.brightness = DEFAULT_BRIGHTNESS;
ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
- ret = drm_panel_add(&ctx->panel);
- if (ret < 0)
- goto unregister_backlight;
+ drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0)
@@ -491,8 +488,6 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
remove_panel:
drm_panel_remove(&ctx->panel);
-
-unregister_backlight:
backlight_device_unregister(ctx->bl_dev);
return ret;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
new file mode 100644
index 000000000000..eec74c10ddda
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DSI interface to the Samsung S6E63M0 panel.
+ * (C) 2019 Linus Walleij
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
+
+#include "panel-samsung-s6e63m0.h"
+
+#define MCS_GLOBAL_PARAM 0xb0
+#define S6E63M0_DSI_MAX_CHUNK 15 /* CMD + 15 bytes max */
+
+static int s6e63m0_dsi_dcs_read(struct device *dev, const u8 cmd, u8 *data)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_read(dsi, cmd, data, 1);
+ if (ret < 0) {
+ dev_err(dev, "could not read DCS CMD %02x\n", cmd);
+ return ret;
+ }
+
+ dev_info(dev, "DSI read CMD %02x = %02x\n", cmd, *data);
+
+ return 0;
+}
+
+static int s6e63m0_dsi_dcs_write(struct device *dev, const u8 *data, size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ const u8 *seqp = data;
+ u8 cmd;
+ u8 cmdwritten;
+ int remain;
+ int chunk;
+ int ret;
+
+ dev_info(dev, "DSI writing dcs seq: %*ph\n", (int)len, data);
+
+ /* Pick out and skip past the DCS command */
+ cmd = *seqp;
+ seqp++;
+ cmdwritten = 0;
+ remain = len - 1;
+ chunk = remain;
+
+ /* Send max S6E63M0_DSI_MAX_CHUNK bytes at a time */
+ if (chunk > S6E63M0_DSI_MAX_CHUNK)
+ chunk = S6E63M0_DSI_MAX_CHUNK;
+ ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk);
+ if (ret < 0) {
+ dev_err(dev, "error sending DCS command seq cmd %02x\n", cmd);
+ return ret;
+ }
+ cmdwritten += chunk;
+ seqp += chunk;
+
+ while (cmdwritten < remain) {
+ chunk = remain - cmdwritten;
+ if (chunk > S6E63M0_DSI_MAX_CHUNK)
+ chunk = S6E63M0_DSI_MAX_CHUNK;
+ ret = mipi_dsi_dcs_write(dsi, MCS_GLOBAL_PARAM, &cmdwritten, 1);
+ if (ret < 0) {
+ dev_err(dev, "error sending CMD %02x global param %02x\n",
+ cmd, cmdwritten);
+ return ret;
+ }
+ ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk);
+ if (ret < 0) {
+ dev_err(dev, "error sending CMD %02x chunk\n", cmd);
+ return ret;
+ }
+ cmdwritten += chunk;
+ seqp += chunk;
+ }
+ dev_info(dev, "sent command %02x %02x bytes\n", cmd, cmdwritten);
+
+ usleep_range(8000, 9000);
+
+ return 0;
+}
+
+static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->hs_rate = 349440000;
+ dsi->lp_rate = 9600000;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_EOT_PACKET |
+ MIPI_DSI_MODE_VIDEO_BURST;
+
+ ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write,
+ true);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ s6e63m0_remove(dev);
+
+ return ret;
+}
+
+static int s6e63m0_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ mipi_dsi_detach(dsi);
+ return s6e63m0_remove(&dsi->dev);
+}
+
+static const struct of_device_id s6e63m0_dsi_of_match[] = {
+ { .compatible = "samsung,s6e63m0" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s6e63m0_dsi_of_match);
+
+static struct mipi_dsi_driver s6e63m0_dsi_driver = {
+ .probe = s6e63m0_dsi_probe,
+ .remove = s6e63m0_dsi_remove,
+ .driver = {
+ .name = "panel-samsung-s6e63m0",
+ .of_match_table = s6e63m0_dsi_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e63m0_dsi_driver);
+
+MODULE_AUTHOR("Linus Walleij <linusw@kernel.org>");
+MODULE_DESCRIPTION("s6e63m0 LCD DSI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c
new file mode 100644
index 000000000000..d298d780220d
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-spi.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include <drm/drm_print.h>
+
+#include "panel-samsung-s6e63m0.h"
+
+#define DATA_MASK 0x100
+
+static int s6e63m0_spi_dcs_read(struct device *dev, const u8 cmd, u8 *data)
+{
+ /*
+ * FIXME: implement reading DCS commands over SPI so we can
+ * properly identify which physical panel is connected.
+ */
+ *data = 0;
+
+ return 0;
+}
+
+static int s6e63m0_spi_write_word(struct device *dev, u16 data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = &data,
+ };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(spi, &msg);
+}
+
+static int s6e63m0_spi_dcs_write(struct device *dev, const u8 *data, size_t len)
+{
+ int ret = 0;
+
+ dev_dbg(dev, "SPI writing dcs seq: %*ph\n", (int)len, data);
+ ret = s6e63m0_spi_write_word(dev, *data);
+
+ while (!ret && --len) {
+ ++data;
+ ret = s6e63m0_spi_write_word(dev, *data | DATA_MASK);
+ }
+
+ if (ret) {
+ dev_err(dev, "SPI error %d writing dcs seq: %*ph\n", ret,
+ (int)len, data);
+ }
+
+ usleep_range(300, 310);
+
+ return ret;
+}
+
+static int s6e63m0_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ int ret;
+
+ spi->bits_per_word = 9;
+ spi->mode = SPI_MODE_3;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(dev, "spi setup failed.\n");
+ return ret;
+ }
+ return s6e63m0_probe(dev, s6e63m0_spi_dcs_read, s6e63m0_spi_dcs_write,
+ false);
+}
+
+static int s6e63m0_spi_remove(struct spi_device *spi)
+{
+ return s6e63m0_remove(&spi->dev);
+}
+
+static const struct of_device_id s6e63m0_spi_of_match[] = {
+ { .compatible = "samsung,s6e63m0" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s6e63m0_spi_of_match);
+
+static struct spi_driver s6e63m0_spi_driver = {
+ .probe = s6e63m0_spi_probe,
+ .remove = s6e63m0_spi_remove,
+ .driver = {
+ .name = "panel-samsung-s6e63m0",
+ .of_match_table = s6e63m0_spi_of_match,
+ },
+};
+module_spi_driver(s6e63m0_spi_driver);
+
+MODULE_AUTHOR("Paweł Chmiel <pawel.mikolaj.chmiel@gmail.com>");
+MODULE_DESCRIPTION("s6e63m0 LCD SPI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 64421347bfd4..3eee67e2d86a 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -10,32 +10,40 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
-#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include "panel-samsung-s6e63m0.h"
+
/* Manufacturer Command Set */
#define MCS_ELVSS_ON 0xb1
#define MCS_MIECTL1 0xc0
#define MCS_BCMODE 0xc1
+#define MCS_ERROR_CHECK 0xd5
+#define MCS_READ_ID1 0xda
+#define MCS_READ_ID2 0xdb
+#define MCS_READ_ID3 0xdc
+#define MCS_LEVEL_2_KEY 0xf0
+#define MCS_MTP_KEY 0xf1
#define MCS_DISCTL 0xf2
#define MCS_SRCCTL 0xf6
#define MCS_IFCTL 0xf7
#define MCS_PANELCTL 0xF8
#define MCS_PGAMMACTL 0xfa
+#define S6E63M0_LCD_ID_VALUE_M2 0xA4
+#define S6E63M0_LCD_ID_VALUE_SM2 0xB4
+#define S6E63M0_LCD_ID_VALUE_SM2_1 0xB6
+
#define NUM_GAMMA_LEVELS 11
#define GAMMA_TABLE_COUNT 23
-#define DATA_MASK 0x100
-
#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1)
/* array of gamma tables for gamma value 2.2 */
@@ -88,8 +96,11 @@ static u8 const s6e63m0_gamma_22[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = {
struct s6e63m0 {
struct device *dev;
+ int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val);
+ int (*dcs_write)(struct device *dev, const u8 *data, size_t len);
struct drm_panel panel;
struct backlight_device *bl_dev;
+ u8 lcd_type;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
@@ -135,43 +146,20 @@ static int s6e63m0_clear_error(struct s6e63m0 *ctx)
return ret;
}
-static int s6e63m0_spi_write_word(struct s6e63m0 *ctx, u16 data)
+static void s6e63m0_dcs_read(struct s6e63m0 *ctx, const u8 cmd, u8 *data)
{
- struct spi_device *spi = to_spi_device(ctx->dev);
- struct spi_transfer xfer = {
- .len = 2,
- .tx_buf = &data,
- };
- struct spi_message msg;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
+ if (ctx->error < 0)
+ return;
- return spi_sync(spi, &msg);
+ ctx->error = ctx->dcs_read(ctx->dev, cmd, data);
}
static void s6e63m0_dcs_write(struct s6e63m0 *ctx, const u8 *data, size_t len)
{
- int ret = 0;
-
if (ctx->error < 0 || len == 0)
return;
- DRM_DEV_DEBUG(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data);
- ret = s6e63m0_spi_write_word(ctx, *data);
-
- while (!ret && --len) {
- ++data;
- ret = s6e63m0_spi_write_word(ctx, *data | DATA_MASK);
- }
-
- if (ret) {
- DRM_DEV_ERROR(ctx->dev, "error %d writing dcs seq: %*ph\n", ret,
- (int)len, data);
- ctx->error = ret;
- }
-
- usleep_range(300, 310);
+ ctx->error = ctx->dcs_write(ctx->dev, data, len);
}
#define s6e63m0_dcs_write_seq_static(ctx, seq ...) \
@@ -180,6 +168,43 @@ static void s6e63m0_dcs_write(struct s6e63m0 *ctx, const u8 *data, size_t len)
s6e63m0_dcs_write(ctx, d, ARRAY_SIZE(d)); \
})
+static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx)
+{
+ u8 id1, id2, id3;
+ int ret;
+
+ s6e63m0_dcs_read(ctx, MCS_READ_ID1, &id1);
+ s6e63m0_dcs_read(ctx, MCS_READ_ID2, &id2);
+ s6e63m0_dcs_read(ctx, MCS_READ_ID3, &id3);
+
+ ret = s6e63m0_clear_error(ctx);
+ if (ret) {
+ dev_err(ctx->dev, "error checking LCD type (%d)\n", ret);
+ ctx->lcd_type = 0x00;
+ return ret;
+ }
+
+ dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3);
+
+ /* We attempt to detect what panel is mounted on the controller */
+ switch (id2) {
+ case S6E63M0_LCD_ID_VALUE_M2:
+ dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI M2\n");
+ break;
+ case S6E63M0_LCD_ID_VALUE_SM2:
+ case S6E63M0_LCD_ID_VALUE_SM2_1:
+ dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI SM2\n");
+ break;
+ default:
+ dev_info(ctx->dev, "unknown LCD panel type %02x\n", id2);
+ break;
+ }
+
+ ctx->lcd_type = id2;
+
+ return 0;
+}
+
static void s6e63m0_init(struct s6e63m0 *ctx)
{
s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
@@ -251,8 +276,6 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
s6e63m0_dcs_write_seq_static(ctx, MCS_ELVSS_ON,
0x0b);
-
- s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
}
static int s6e63m0_power_on(struct s6e63m0 *ctx)
@@ -265,6 +288,9 @@ static int s6e63m0_power_on(struct s6e63m0 *ctx)
msleep(25);
+ /* Be sure to send a reset pulse */
+ gpiod_set_value(ctx->reset_gpio, 1);
+ msleep(5);
gpiod_set_value(ctx->reset_gpio, 0);
msleep(120);
@@ -294,8 +320,10 @@ static int s6e63m0_disable(struct drm_panel *panel)
backlight_disable(ctx->bl_dev);
+ s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
+ msleep(10);
s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
- msleep(200);
+ msleep(120);
ctx->enabled = false;
@@ -333,6 +361,15 @@ static int s6e63m0_prepare(struct drm_panel *panel)
if (ret < 0)
return ret;
+ /* Magic to unlock level 2 control of the display */
+ s6e63m0_dcs_write_seq_static(ctx, MCS_LEVEL_2_KEY, 0x5a, 0x5a);
+ /* Magic to unlock MTP reading */
+ s6e63m0_dcs_write_seq_static(ctx, MCS_MTP_KEY, 0x5a, 0x5a);
+
+ ret = s6e63m0_check_lcd_type(ctx);
+ if (ret < 0)
+ return ret;
+
s6e63m0_init(ctx);
ret = s6e63m0_clear_error(ctx);
@@ -352,7 +389,15 @@ static int s6e63m0_enable(struct drm_panel *panel)
if (ctx->enabled)
return 0;
+ s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(120);
s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
+ msleep(10);
+
+ s6e63m0_dcs_write_seq_static(ctx, MCS_ERROR_CHECK,
+ 0xE7, 0x14, 0x60, 0x17, 0x0A, 0x49, 0xC3,
+ 0x8F, 0x19, 0x64, 0x91, 0x84, 0x76, 0x20,
+ 0x0F, 0x00);
backlight_enable(ctx->bl_dev);
@@ -368,9 +413,9 @@ static int s6e63m0_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_ERROR("failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
@@ -425,16 +470,17 @@ static int s6e63m0_backlight_register(struct s6e63m0 *ctx)
&props);
if (IS_ERR(ctx->bl_dev)) {
ret = PTR_ERR(ctx->bl_dev);
- DRM_DEV_ERROR(dev, "error registering backlight device (%d)\n",
- ret);
+ dev_err(dev, "error registering backlight device (%d)\n", ret);
}
return ret;
}
-static int s6e63m0_probe(struct spi_device *spi)
+int s6e63m0_probe(struct device *dev,
+ int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val),
+ int (*dcs_write)(struct device *dev, const u8 *data, size_t len),
+ bool dsi_mode)
{
- struct device *dev = &spi->dev;
struct s6e63m0 *ctx;
int ret;
@@ -442,7 +488,9 @@ static int s6e63m0_probe(struct spi_device *spi)
if (!ctx)
return -ENOMEM;
- spi_set_drvdata(spi, ctx);
+ ctx->dcs_read = dcs_read;
+ ctx->dcs_write = dcs_write;
+ dev_set_drvdata(dev, ctx);
ctx->dev = dev;
ctx->enabled = false;
@@ -453,59 +501,39 @@ static int s6e63m0_probe(struct spi_device *spi)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to get regulators: %d\n", ret);
+ dev_err(dev, "failed to get regulators: %d\n", ret);
return ret;
}
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio)) {
- DRM_DEV_ERROR(dev, "cannot get reset-gpios %ld\n",
- PTR_ERR(ctx->reset_gpio));
+ dev_err(dev, "cannot get reset-gpios %ld\n", PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
- spi->bits_per_word = 9;
- spi->mode = SPI_MODE_3;
- ret = spi_setup(spi);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "spi setup failed.\n");
- return ret;
- }
-
drm_panel_init(&ctx->panel, dev, &s6e63m0_drm_funcs,
+ dsi_mode ? DRM_MODE_CONNECTOR_DSI :
DRM_MODE_CONNECTOR_DPI);
ret = s6e63m0_backlight_register(ctx);
if (ret < 0)
return ret;
- return drm_panel_add(&ctx->panel);
+ drm_panel_add(&ctx->panel);
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(s6e63m0_probe);
-static int s6e63m0_remove(struct spi_device *spi)
+int s6e63m0_remove(struct device *dev)
{
- struct s6e63m0 *ctx = spi_get_drvdata(spi);
+ struct s6e63m0 *ctx = dev_get_drvdata(dev);
drm_panel_remove(&ctx->panel);
return 0;
}
-
-static const struct of_device_id s6e63m0_of_match[] = {
- { .compatible = "samsung,s6e63m0" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, s6e63m0_of_match);
-
-static struct spi_driver s6e63m0_driver = {
- .probe = s6e63m0_probe,
- .remove = s6e63m0_remove,
- .driver = {
- .name = "panel-samsung-s6e63m0",
- .of_match_table = s6e63m0_of_match,
- },
-};
-module_spi_driver(s6e63m0_driver);
+EXPORT_SYMBOL_GPL(s6e63m0_remove);
MODULE_AUTHOR("Paweł Chmiel <pawel.mikolaj.chmiel@gmail.com>");
MODULE_DESCRIPTION("s6e63m0 LCD Driver");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h
new file mode 100644
index 000000000000..c669fec91763
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _PANEL_SAMSUNG_S6E63M0_H
+#define _PANEL_SAMSUNG_S6E63M0_H
+
+int s6e63m0_probe(struct device *dev,
+ int (*dcs_read)(struct device *dev, const u8 cmd, u8 *val),
+ int (*dcs_write)(struct device *dev, const u8 *data,
+ size_t len),
+ bool dsi_mode);
+int s6e63m0_remove(struct device *dev);
+
+#endif /* _PANEL_SAMSUNG_S6E63M0_H */
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index 485eabecfcc9..ea63799ff2a1 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -242,11 +242,7 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
- ret = drm_panel_add(&ctx->panel);
- if (ret < 0) {
- dev_err(dev, "Failed to add panel: %d\n", ret);
- return ret;
- }
+ drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 8a028d2bd0d6..527371120266 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -25,7 +25,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#define LDI_MTP_LENGTH 24
#define GAMMA_LEVEL_NUM 25
@@ -928,7 +927,7 @@ static int s6e8aa0_get_modes(struct drm_panel *panel,
mode = drm_mode_create(connector->dev);
if (!mode) {
- DRM_ERROR("failed to create a new display mode\n");
+ dev_err(panel->dev, "failed to create a new display mode\n");
return 0;
}
@@ -1020,9 +1019,7 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
- ret = drm_panel_add(&ctx->panel);
- if (ret < 0)
- return ret;
+ drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index e417dc4921c2..0ee508576231 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -258,9 +258,7 @@ static int seiko_panel_probe(struct device *dev,
if (err)
return err;
- err = drm_panel_add(&panel->base);
- if (err < 0)
- return err;
+ drm_panel_add(&panel->base);
dev_set_drvdata(dev, panel);
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index f07324b705b3..f8cd2a42ed13 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -325,7 +325,9 @@ static int sharp_panel_add(struct sharp_panel *sharp)
if (ret)
return ret;
- return drm_panel_add(&sharp->base);
+ drm_panel_add(&sharp->base);
+
+ return 0;
}
static void sharp_panel_del(struct sharp_panel *sharp)
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index d7bf13b9e1d6..94992f45113a 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -187,7 +187,9 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs,
DRM_MODE_CONNECTOR_DPI);
- return drm_panel_add(&lcd->panel);
+ drm_panel_add(&lcd->panel);
+
+ return 0;
}
static int ls037v7dw01_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index b2e58935529c..16dbf0f353ed 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -261,7 +261,9 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
if (ret)
return ret;
- return drm_panel_add(&sharp_nt->base);
+ drm_panel_add(&sharp_nt->base);
+
+ return 0;
}
static void sharp_nt_panel_del(struct sharp_nt_panel *sharp_nt)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index cb6550d37e85..2be358fb46f7 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -112,6 +112,8 @@ struct panel_simple {
struct gpio_desc *hpd_gpio;
struct drm_display_mode override_mode;
+
+ enum drm_panel_orientation orientation;
};
static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
@@ -371,6 +373,9 @@ static int panel_simple_get_modes(struct drm_panel *panel,
/* add hard-coded panel modes */
num += panel_simple_get_non_edid_modes(p, connector);
+ /* set up connector's "panel orientation" property */
+ drm_connector_set_panel_orientation(connector, p->orientation);
+
return num;
}
@@ -500,6 +505,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
struct panel_simple *panel;
struct display_timing dt;
struct device_node *ddc;
+ int connector_type;
+ u32 bus_flags;
int err;
panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
@@ -530,6 +537,12 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return err;
}
+ err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
+ if (err) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
+ return err;
+ }
+
ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
if (ddc) {
panel->ddc = of_find_i2c_adapter_by_node(ddc);
@@ -549,8 +562,14 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel_simple_parse_panel_timing_node(dev, panel, &dt);
}
- if (desc->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* Catch common mistakes for LVDS panels. */
+ connector_type = desc->connector_type;
+ /* Catch common mistakes for panels. */
+ switch (connector_type) {
+ case 0:
+ dev_warn(dev, "Specify missing connector_type\n");
+ connector_type = DRM_MODE_CONNECTOR_DPI;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
WARN_ON(desc->bus_flags &
~(DRM_BUS_FLAG_DE_LOW |
DRM_BUS_FLAG_DE_HIGH |
@@ -564,18 +583,48 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
WARN_ON((desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG ||
desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA) &&
desc->bpc != 8);
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ if (desc->bus_format == 0)
+ dev_warn(dev, "Specify missing bus_format\n");
+ if (desc->bpc != 6 && desc->bpc != 8)
+ dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
+ break;
+ case DRM_MODE_CONNECTOR_DSI:
+ if (desc->bpc != 6 && desc->bpc != 8)
+ dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
+ break;
+ case DRM_MODE_CONNECTOR_DPI:
+ bus_flags = DRM_BUS_FLAG_DE_LOW |
+ DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_DATA_MSB_TO_LSB |
+ DRM_BUS_FLAG_DATA_LSB_TO_MSB |
+ DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE;
+ if (desc->bus_flags & ~bus_flags)
+ dev_warn(dev, "Unexpected bus_flags(%d)\n", desc->bus_flags & ~bus_flags);
+ if (!(desc->bus_flags & bus_flags))
+ dev_warn(dev, "Specify missing bus_flags\n");
+ if (desc->bus_format == 0)
+ dev_warn(dev, "Specify missing bus_format\n");
+ if (desc->bpc != 6 && desc->bpc != 8)
+ dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
+ break;
+ default:
+ dev_warn(dev, "Specify a valid connector_type: %d\n", desc->connector_type);
+ connector_type = DRM_MODE_CONNECTOR_DPI;
+ break;
}
- drm_panel_init(&panel->base, dev, &panel_simple_funcs,
- desc->connector_type);
+ drm_panel_init(&panel->base, dev, &panel_simple_funcs, connector_type);
err = drm_panel_of_backlight(&panel->base);
if (err)
goto free_ddc;
- err = drm_panel_add(&panel->base);
- if (err < 0)
- goto free_ddc;
+ drm_panel_add(&panel->base);
dev_set_drvdata(dev, panel);
@@ -610,6 +659,32 @@ static void panel_simple_shutdown(struct device *dev)
drm_panel_unprepare(&panel->base);
}
+static const struct drm_display_mode ampire_am_1280800n3tzqw_t00h_mode = {
+ .clock = 71100,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 40,
+ .hsync_end = 1280 + 40 + 80,
+ .htotal = 1280 + 40 + 80 + 40,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 10,
+ .vtotal = 800 + 3 + 10 + 10,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ampire_am_1280800n3tzqw_t00h = {
+ .modes = &ampire_am_1280800n3tzqw_t00h_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -1191,10 +1266,14 @@ static const struct drm_display_mode boe_hv070wsa_mode = {
static const struct panel_desc boe_hv070wsa = {
.modes = &boe_hv070wsa_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 154,
.height = 90,
},
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
@@ -1414,6 +1493,36 @@ static const struct panel_desc cdtech_s070wv95_ct16 = {
},
};
+static const struct display_timing chefree_ch101olhlwh_002_timing = {
+ .pixelclock = { 68900000, 71100000, 73400000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 65, 80, 95 },
+ .hback_porch = { 64, 79, 94 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 7, 11, 14 },
+ .vback_porch = { 7, 11, 14 },
+ .vsync_len = { 1, 1, 1 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc chefree_ch101olhlwh_002 = {
+ .timings = &chefree_ch101olhlwh_002_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 135,
+ },
+ .delay = {
+ .enable = 200,
+ .disable = 200,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode chunghwa_claa070wp03xg_mode = {
.clock = 66770,
.hdisplay = 800,
@@ -2258,6 +2367,34 @@ static const struct panel_desc ivo_m133nwf4_r0 = {
.connector_type = DRM_MODE_CONNECTOR_eDP,
};
+static const struct drm_display_mode kingdisplay_kd116n21_30nv_a010_mode = {
+ .clock = 81000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 32,
+ .htotal = 1366 + 40 + 32 + 62,
+ .vdisplay = 768,
+ .vsync_start = 768 + 5,
+ .vsync_end = 768 + 5 + 5,
+ .vtotal = 768 + 5 + 5 + 122,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc kingdisplay_kd116n21_30nv_a010 = {
+ .modes = &kingdisplay_kd116n21_30nv_a010_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct display_timing koe_tx14d24vm1bpa_timing = {
.pixelclock = { 5580000, 5850000, 6200000 },
.hactive = { 320, 320, 320 },
@@ -2941,12 +3078,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
static const struct panel_desc ortustech_com43h4m85ulc = {
.modes = &ortustech_com43h4m85ulc_mode,
.num_modes = 1,
- .bpc = 8,
+ .bpc = 6,
.size = {
.width = 56,
.height = 93,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
@@ -3000,6 +3137,31 @@ static const struct panel_desc pda_91_00156_a0 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
+ .clock = 24750,
+ .hdisplay = 800,
+ .hsync_start = 800 + 54,
+ .hsync_end = 800 + 54 + 2,
+ .htotal = 800 + 54 + 2 + 44,
+ .vdisplay = 480,
+ .vsync_start = 480 + 49,
+ .vsync_end = 480 + 49 + 2,
+ .vtotal = 480 + 49 + 2 + 22,
+};
+
+static const struct panel_desc powertip_ph800480t013_idf02 = {
+ .modes = &powertip_ph800480t013_idf02_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
static const struct drm_display_mode qd43003c0_40_mode = {
.clock = 9000,
@@ -3301,22 +3463,36 @@ static const struct panel_desc sharp_lq123p1jx31 = {
},
};
-static const struct display_timing sharp_ls020b1dd01d_timing = {
- .pixelclock = { 2000000, 4200000, 5000000 },
- .hactive = { 240, 240, 240 },
- .hfront_porch = { 66, 66, 66 },
- .hback_porch = { 1, 1, 1 },
- .hsync_len = { 1, 1, 1 },
- .vactive = { 160, 160, 160 },
- .vfront_porch = { 52, 52, 52 },
- .vback_porch = { 6, 6, 6 },
- .vsync_len = { 10, 10, 10 },
- .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_LOW,
+static const struct drm_display_mode sharp_ls020b1dd01d_modes[] = {
+ { /* 50 Hz */
+ .clock = 3000,
+ .hdisplay = 240,
+ .hsync_start = 240 + 58,
+ .hsync_end = 240 + 58 + 1,
+ .htotal = 240 + 58 + 1 + 1,
+ .vdisplay = 160,
+ .vsync_start = 160 + 24,
+ .vsync_end = 160 + 24 + 10,
+ .vtotal = 160 + 24 + 10 + 6,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+ { /* 60 Hz */
+ .clock = 3000,
+ .hdisplay = 240,
+ .hsync_start = 240 + 8,
+ .hsync_end = 240 + 8 + 1,
+ .htotal = 240 + 8 + 1 + 1,
+ .vdisplay = 160,
+ .vsync_start = 160 + 24,
+ .vsync_end = 160 + 24 + 10,
+ .vtotal = 160 + 24 + 10 + 6,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
};
static const struct panel_desc sharp_ls020b1dd01d = {
- .timings = &sharp_ls020b1dd01d_timing,
- .num_timings = 1,
+ .modes = sharp_ls020b1dd01d_modes,
+ .num_modes = ARRAY_SIZE(sharp_ls020b1dd01d_modes),
.bpc = 6,
.size = {
.width = 42,
@@ -3725,6 +3901,9 @@ static const struct panel_desc arm_rtsm = {
static const struct of_device_id platform_of_match[] = {
{
+ .compatible = "ampire,am-1280800n3tzqw-t00h",
+ .data = &ampire_am_1280800n3tzqw_t00h,
+ }, {
.compatible = "ampire,am-480272h3tmqw-t01h",
.data = &ampire_am_480272h3tmqw_t01h,
}, {
@@ -3821,6 +4000,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "cdtech,s070wv95-ct16",
.data = &cdtech_s070wv95_ct16,
}, {
+ .compatible = "chefree,ch101olhlwh-002",
+ .data = &chefree_ch101olhlwh_002,
+ }, {
.compatible = "chunghwa,claa070wp03xg",
.data = &chunghwa_claa070wp03xg,
}, {
@@ -3923,6 +4105,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "ivo,m133nwf4-r0",
.data = &ivo_m133nwf4_r0,
}, {
+ .compatible = "kingdisplay,kd116n21-30nv-a010",
+ .data = &kingdisplay_kd116n21_30nv_a010,
+ }, {
.compatible = "koe,tx14d24vm1bpa",
.data = &koe_tx14d24vm1bpa,
}, {
@@ -4013,6 +4198,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
+ .compatible = "powertip,ph800480t013-idf02",
+ .data = &powertip_ph800480t013_idf02,
+ }, {
.compatible = "qiaodian,qd43003c0-40",
.data = &qd43003c0_40,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 692041ae4eb6..4d2a149b202c 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -7,7 +7,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
@@ -269,10 +268,9 @@ static int st7701_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, desc_mode);
if (!mode) {
- DRM_DEV_ERROR(&st7701->dsi->dev,
- "failed to add mode %ux%ux@%u\n",
- desc_mode->hdisplay, desc_mode->vdisplay,
- drm_mode_vrefresh(desc_mode));
+ dev_err(&st7701->dsi->dev, "failed to add mode %ux%u@%u\n",
+ desc_mode->hdisplay, desc_mode->vdisplay,
+ drm_mode_vrefresh(desc_mode));
return -ENOMEM;
}
@@ -358,7 +356,7 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
st7701->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(st7701->reset)) {
- DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
return PTR_ERR(st7701->reset);
}
@@ -380,9 +378,7 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- ret = drm_panel_add(&st7701->panel);
- if (ret < 0)
- return ret;
+ drm_panel_add(&st7701->panel);
mipi_dsi_set_drvdata(dsi, st7701);
st7701->dsi = dsi;
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 8996ced2b721..c22e7c49e077 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -22,7 +22,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#define DRV_NAME "panel-sitronix-st7703"
@@ -364,8 +363,7 @@ static int st7703_enable(struct drm_panel *panel)
ret = ctx->desc->init_sequence(ctx);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
- ret);
+ dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
return ret;
}
@@ -373,7 +371,7 @@ static int st7703_enable(struct drm_panel *panel)
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
@@ -384,7 +382,7 @@ static int st7703_enable(struct drm_panel *panel)
if (ret)
return ret;
- DRM_DEV_DEBUG_DRIVER(ctx->dev, "Panel init sequence done\n");
+ dev_dbg(ctx->dev, "Panel init sequence done\n");
return 0;
}
@@ -397,13 +395,11 @@ static int st7703_disable(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
- DRM_DEV_ERROR(ctx->dev,
- "Failed to turn off the display: %d\n", ret);
+ dev_err(ctx->dev, "Failed to turn off the display: %d\n", ret);
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0)
- DRM_DEV_ERROR(ctx->dev,
- "Failed to enter sleep mode: %d\n", ret);
+ dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
return 0;
}
@@ -431,17 +427,15 @@ static int st7703_prepare(struct drm_panel *panel)
if (ctx->prepared)
return 0;
- DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ dev_dbg(ctx->dev, "Resetting the panel\n");
ret = regulator_enable(ctx->vcc);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "Failed to enable vcc supply: %d\n", ret);
+ dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
return ret;
}
ret = regulator_enable(ctx->iovcc);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "Failed to enable iovcc supply: %d\n", ret);
+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
goto disable_vcc;
}
@@ -467,9 +461,9 @@ static int st7703_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
if (!mode) {
- DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
- ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
- drm_mode_vrefresh(ctx->desc->mode));
+ dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
+ drm_mode_vrefresh(ctx->desc->mode));
return -ENOMEM;
}
@@ -496,7 +490,7 @@ static int allpixelson_set(void *data, u64 val)
struct st7703 *ctx = data;
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- DRM_DEV_DEBUG_DRIVER(ctx->dev, "Setting all pixels on\n");
+ dev_dbg(ctx->dev, "Setting all pixels on\n");
dsi_generic_write_seq(dsi, ST7703_CMD_ALL_PIXEL_ON);
msleep(val * 1000);
/* Reset the panel to get video back */
@@ -537,7 +531,7 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
- DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ dev_err(dev, "cannot get reset gpio\n");
return PTR_ERR(ctx->reset_gpio);
}
@@ -554,18 +548,14 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->vcc)) {
ret = PTR_ERR(ctx->vcc);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev,
- "Failed to request vcc regulator: %d\n",
- ret);
+ dev_err(dev, "Failed to request vcc regulator: %d\n", ret);
return ret;
}
ctx->iovcc = devm_regulator_get(dev, "iovcc");
if (IS_ERR(ctx->iovcc)) {
ret = PTR_ERR(ctx->iovcc);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev,
- "Failed to request iovcc regulator: %d\n",
- ret);
+ dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
return ret;
}
@@ -580,17 +570,15 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev,
- "mipi_dsi_attach failed (%d). Is host ready?\n",
- ret);
+ dev_err(dev, "mipi_dsi_attach failed (%d). Is host ready?\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
- DRM_DEV_INFO(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
- ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
- drm_mode_vrefresh(ctx->desc->mode),
- mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
+ dev_info(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
+ ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
+ drm_mode_vrefresh(ctx->desc->mode),
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
st7703_debugfs_init(ctx);
return 0;
@@ -603,13 +591,11 @@ static void st7703_shutdown(struct mipi_dsi_device *dsi)
ret = drm_panel_unprepare(&ctx->panel);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
ret = drm_panel_disable(&ctx->panel);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
static int st7703_remove(struct mipi_dsi_device *dsi)
@@ -621,8 +607,7 @@ static int st7703_remove(struct mipi_dsi_device *dsi)
ret = mipi_dsi_detach(dsi);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 3513ae40efa8..61e565524542 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -382,9 +382,7 @@ static int st7789v_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = drm_panel_add(&ctx->panel);
- if (ret < 0)
- return ret;
+ drm_panel_add(&ctx->panel);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
index 97a1b4790d3c..065efae213f5 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
@@ -20,7 +20,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#define ACX424_DCS_READ_ID1 0xDA
#define ACX424_DCS_READ_ID2 0xDB
@@ -110,13 +109,11 @@ static int acx424akp_set_brightness(struct backlight_device *bl)
SCALE_FACTOR_NS_DIV_MHZ);
/* Set up PWM dutycycle ONE byte (differs from the standard) */
- DRM_DEV_DEBUG(acx->dev, "calculated duty cycle %02x\n", pwm_ratio);
+ dev_dbg(acx->dev, "calculated duty cycle %02x\n", pwm_ratio);
ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
&pwm_ratio, 1);
if (ret < 0) {
- DRM_DEV_ERROR(acx->dev,
- "failed to set display PWM ratio (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to set display PWM ratio (%d)\n", ret);
return ret;
}
@@ -132,40 +129,30 @@ static int acx424akp_set_brightness(struct backlight_device *bl)
par = 0xaa;
ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1);
if (ret < 0) {
- DRM_DEV_ERROR(acx->dev,
- "failed to unlock CMD 2 (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to unlock CMD 2 (%d)\n", ret);
return ret;
}
par = 0x01;
ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1);
if (ret < 0) {
- DRM_DEV_ERROR(acx->dev,
- "failed to enter page 1 (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to enter page 1 (%d)\n", ret);
return ret;
}
par = 0x01;
ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1);
if (ret < 0) {
- DRM_DEV_ERROR(acx->dev,
- "failed to disable MTP reload (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to disable MTP reload (%d)\n", ret);
return ret;
}
ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1);
if (ret < 0) {
- DRM_DEV_ERROR(acx->dev,
- "failed to set PWM divisor (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to set PWM divisor (%d)\n", ret);
return ret;
}
par = 0xaa;
ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1);
if (ret < 0) {
- DRM_DEV_ERROR(acx->dev,
- "failed to lock CMD 2 (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to lock CMD 2 (%d)\n", ret);
return ret;
}
@@ -174,9 +161,7 @@ static int acx424akp_set_brightness(struct backlight_device *bl)
ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
&par, 1);
if (ret < 0) {
- DRM_DEV_ERROR(acx->dev,
- "failed to enable display backlight (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to enable display backlight (%d)\n", ret);
return ret;
}
@@ -196,22 +181,22 @@ static int acx424akp_read_id(struct acx424akp *acx)
ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID1, &vendor, 1);
if (ret < 0) {
- DRM_DEV_ERROR(acx->dev, "could not vendor ID byte\n");
+ dev_err(acx->dev, "could not vendor ID byte\n");
return ret;
}
ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID2, &version, 1);
if (ret < 0) {
- DRM_DEV_ERROR(acx->dev, "could not read device version byte\n");
+ dev_err(acx->dev, "could not read device version byte\n");
return ret;
}
ret = mipi_dsi_dcs_read(dsi, ACX424_DCS_READ_ID3, &panel, 1);
if (ret < 0) {
- DRM_DEV_ERROR(acx->dev, "could not read panel ID byte\n");
+ dev_err(acx->dev, "could not read panel ID byte\n");
return ret;
}
if (vendor == 0x00) {
- DRM_DEV_ERROR(acx->dev, "device vendor ID is zero\n");
+ dev_err(acx->dev, "device vendor ID is zero\n");
return -ENODEV;
}
@@ -220,14 +205,12 @@ static int acx424akp_read_id(struct acx424akp *acx)
case DISPLAY_SONY_ACX424AKP_ID1:
case DISPLAY_SONY_ACX424AKP_ID2:
case DISPLAY_SONY_ACX424AKP_ID3:
- DRM_DEV_INFO(acx->dev,
- "MTP vendor: %02x, version: %02x, panel: %02x\n",
- vendor, version, panel);
+ dev_info(acx->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n",
+ vendor, version, panel);
break;
default:
- DRM_DEV_INFO(acx->dev,
- "unknown vendor: %02x, version: %02x, panel: %02x\n",
- vendor, version, panel);
+ dev_info(acx->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n",
+ vendor, version, panel);
break;
}
@@ -240,7 +223,7 @@ static int acx424akp_power_on(struct acx424akp *acx)
ret = regulator_enable(acx->supply);
if (ret) {
- DRM_DEV_ERROR(acx->dev, "failed to enable supply (%d)\n", ret);
+ dev_err(acx->dev, "failed to enable supply (%d)\n", ret);
return ret;
}
@@ -276,7 +259,7 @@ static int acx424akp_prepare(struct drm_panel *panel)
ret = acx424akp_read_id(acx);
if (ret) {
- DRM_DEV_ERROR(acx->dev, "failed to read panel ID (%d)\n", ret);
+ dev_err(acx->dev, "failed to read panel ID (%d)\n", ret);
goto err_power_off;
}
@@ -284,8 +267,7 @@ static int acx424akp_prepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_tear_on(dsi,
MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret) {
- DRM_DEV_ERROR(acx->dev, "failed to enable vblank TE (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to enable vblank TE (%d)\n", ret);
goto err_power_off;
}
@@ -302,23 +284,21 @@ static int acx424akp_prepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_write(dsi, ACX424_DCS_SET_MDDI,
&mddi, sizeof(mddi));
if (ret < 0) {
- DRM_DEV_ERROR(acx->dev, "failed to set MDDI (%d)\n", ret);
+ dev_err(acx->dev, "failed to set MDDI (%d)\n", ret);
goto err_power_off;
}
/* Exit sleep mode */
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret) {
- DRM_DEV_ERROR(acx->dev, "failed to exit sleep mode (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to exit sleep mode (%d)\n", ret);
goto err_power_off;
}
msleep(140);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret) {
- DRM_DEV_ERROR(acx->dev, "failed to turn display on (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to turn display on (%d)\n", ret);
goto err_power_off;
}
if (acx->video_mode) {
@@ -351,24 +331,20 @@ static int acx424akp_unprepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
&par, 1);
if (ret) {
- DRM_DEV_ERROR(acx->dev,
- "failed to disable display backlight (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to disable display backlight (%d)\n", ret);
return ret;
}
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret) {
- DRM_DEV_ERROR(acx->dev, "failed to turn display off (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to turn display off (%d)\n", ret);
return ret;
}
/* Enter sleep mode */
ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret) {
- DRM_DEV_ERROR(acx->dev, "failed to enter sleep mode (%d)\n",
- ret);
+ dev_err(acx->dev, "failed to enter sleep mode (%d)\n", ret);
return ret;
}
msleep(85);
@@ -418,7 +394,7 @@ static int acx424akp_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev,
&sony_acx424akp_cmd_mode);
if (!mode) {
- DRM_ERROR("bad mode or failed to add mode\n");
+ dev_err(panel->dev, "bad mode or failed to add mode\n");
return -EINVAL;
}
drm_mode_set_name(mode);
@@ -486,8 +462,7 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(acx->reset_gpio)) {
ret = PTR_ERR(acx->reset_gpio);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev, "failed to request GPIO (%d)\n",
- ret);
+ dev_err(dev, "failed to request GPIO (%d)\n", ret);
return ret;
}
@@ -497,16 +472,14 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi)
acx->bl = devm_backlight_device_register(dev, "acx424akp", dev, acx,
&acx424akp_bl_ops, NULL);
if (IS_ERR(acx->bl)) {
- DRM_DEV_ERROR(dev, "failed to register backlight device\n");
+ dev_err(dev, "failed to register backlight device\n");
return PTR_ERR(acx->bl);
}
acx->bl->props.max_brightness = 1023;
acx->bl->props.brightness = 512;
acx->bl->props.power = FB_BLANK_POWERDOWN;
- ret = drm_panel_add(&acx->panel);
- if (ret < 0)
- return ret;
+ drm_panel_add(&acx->panel);
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index fc6a7e451abe..e95fdfb16b6c 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -650,12 +650,7 @@ static int acx565akm_probe(struct spi_device *spi)
drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs,
DRM_MODE_CONNECTOR_DPI);
- ret = drm_panel_add(&lcd->panel);
- if (ret < 0) {
- if (lcd->has_bc)
- acx565akm_backlight_cleanup(lcd);
- return ret;
- }
+ drm_panel_add(&lcd->panel);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index 58d683cc5215..037c14fd6bac 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -350,7 +350,9 @@ static int td028ttec1_probe(struct spi_device *spi)
if (ret)
return ret;
- return drm_panel_add(&lcd->panel);
+ drm_panel_add(&lcd->panel);
+
+ return 0;
}
static int td028ttec1_remove(struct spi_device *spi)
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index 9b2a356c4d9a..49e6c9386258 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -460,11 +460,7 @@ static int td043mtea1_probe(struct spi_device *spi)
drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs,
DRM_MODE_CONNECTOR_DPI);
- ret = drm_panel_add(&lcd->panel);
- if (ret < 0) {
- sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group);
- return ret;
- }
+ drm_panel_add(&lcd->panel);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index c7a2f0ae5ba5..d57ed75a977c 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -12,7 +12,6 @@
*/
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#include <linux/bitops.h>
#include <linux/delay.h>
@@ -238,7 +237,7 @@ static u8 tpg110_readwrite_reg(struct tpg110 *tpg, bool write,
spi_message_add_tail(&t[1], &m);
ret = spi_sync(tpg->spi, &m);
if (ret) {
- DRM_DEV_ERROR(tpg->dev, "SPI message error %d\n", ret);
+ dev_err(tpg->dev, "SPI message error %d\n", ret);
return ret;
}
if (write)
@@ -265,18 +264,18 @@ static int tpg110_startup(struct tpg110 *tpg)
/* De-assert the reset signal */
gpiod_set_value_cansleep(tpg->grestb, 0);
usleep_range(1000, 2000);
- DRM_DEV_DEBUG(tpg->dev, "de-asserted GRESTB\n");
+ dev_dbg(tpg->dev, "de-asserted GRESTB\n");
/* Test display communication */
tpg110_write_reg(tpg, TPG110_TEST, 0x55);
val = tpg110_read_reg(tpg, TPG110_TEST);
if (val != 0x55) {
- DRM_DEV_ERROR(tpg->dev, "failed communication test\n");
+ dev_err(tpg->dev, "failed communication test\n");
return -ENODEV;
}
val = tpg110_read_reg(tpg, TPG110_CHIPID);
- DRM_DEV_INFO(tpg->dev, "TPG110 chip ID: %d version: %d\n",
+ dev_info(tpg->dev, "TPG110 chip ID: %d version: %d\n",
val >> 4, val & 0x0f);
/* Show display resolution */
@@ -284,27 +283,25 @@ static int tpg110_startup(struct tpg110 *tpg)
val &= TPG110_RES_MASK;
switch (val) {
case TPG110_RES_400X240_D:
- DRM_DEV_INFO(tpg->dev,
- "IN 400x240 RGB -> OUT 800x480 RGB (dual scan)\n");
+ dev_info(tpg->dev, "IN 400x240 RGB -> OUT 800x480 RGB (dual scan)\n");
break;
case TPG110_RES_480X272_D:
- DRM_DEV_INFO(tpg->dev,
- "IN 480x272 RGB -> OUT 800x480 RGB (dual scan)\n");
+ dev_info(tpg->dev, "IN 480x272 RGB -> OUT 800x480 RGB (dual scan)\n");
break;
case TPG110_RES_480X640:
- DRM_DEV_INFO(tpg->dev, "480x640 RGB\n");
+ dev_info(tpg->dev, "480x640 RGB\n");
break;
case TPG110_RES_480X272:
- DRM_DEV_INFO(tpg->dev, "480x272 RGB\n");
+ dev_info(tpg->dev, "480x272 RGB\n");
break;
case TPG110_RES_640X480:
- DRM_DEV_INFO(tpg->dev, "640x480 RGB\n");
+ dev_info(tpg->dev, "640x480 RGB\n");
break;
case TPG110_RES_800X480:
- DRM_DEV_INFO(tpg->dev, "800x480 RGB\n");
+ dev_info(tpg->dev, "800x480 RGB\n");
break;
default:
- DRM_DEV_ERROR(tpg->dev, "ILLEGAL RESOLUTION 0x%02x\n", val);
+ dev_err(tpg->dev, "ILLEGAL RESOLUTION 0x%02x\n", val);
break;
}
@@ -322,13 +319,12 @@ static int tpg110_startup(struct tpg110 *tpg)
}
}
if (i == ARRAY_SIZE(tpg110_modes)) {
- DRM_DEV_ERROR(tpg->dev, "unsupported mode (%02x) detected\n",
- val);
+ dev_err(tpg->dev, "unsupported mode (%02x) detected\n", val);
return -ENODEV;
}
val = tpg110_read_reg(tpg, TPG110_CTRL2);
- DRM_DEV_INFO(tpg->dev, "resolution and standby is controlled by %s\n",
+ dev_info(tpg->dev, "resolution and standby is controlled by %s\n",
(val & TPG110_CTRL2_RES_PM_CTRL) ? "software" : "hardware");
/* Take control over resolution and standby */
val |= TPG110_CTRL2_RES_PM_CTRL;
@@ -414,15 +410,15 @@ static int tpg110_probe(struct spi_device *spi)
/* We get the physical display dimensions from the DT */
ret = of_property_read_u32(np, "width-mm", &tpg->width);
if (ret)
- DRM_DEV_ERROR(dev, "no panel width specified\n");
+ dev_err(dev, "no panel width specified\n");
ret = of_property_read_u32(np, "height-mm", &tpg->height);
if (ret)
- DRM_DEV_ERROR(dev, "no panel height specified\n");
+ dev_err(dev, "no panel height specified\n");
/* This asserts the GRESTB signal, putting the display into reset */
tpg->grestb = devm_gpiod_get(dev, "grestb", GPIOD_OUT_HIGH);
if (IS_ERR(tpg->grestb)) {
- DRM_DEV_ERROR(dev, "no GRESTB GPIO\n");
+ dev_err(dev, "no GRESTB GPIO\n");
return -ENODEV;
}
@@ -430,7 +426,7 @@ static int tpg110_probe(struct spi_device *spi)
spi->mode |= SPI_3WIRE_HIZ;
ret = spi_setup(spi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "spi setup failed.\n");
+ dev_err(dev, "spi setup failed.\n");
return ret;
}
tpg->spi = spi;
@@ -448,7 +444,9 @@ static int tpg110_probe(struct spi_device *spi)
spi_set_drvdata(spi, tpg);
- return drm_panel_add(&tpg->panel);
+ drm_panel_add(&tpg->panel);
+
+ return 0;
}
static int tpg110_remove(struct spi_device *spi)
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 9b9c167b8dc8..b24b92d93ea5 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -17,7 +17,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
static const char * const regulator_names[] = {
"vdda",
@@ -231,9 +230,7 @@ static int truly_dcs_write(struct drm_panel *panel, u32 command)
for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
ret = mipi_dsi_dcs_write(ctx->dsi[i], command, NULL, 0);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "cmd 0x%x failed for dsi = %d\n",
- command, i);
+ dev_err(ctx->dev, "cmd 0x%x failed for dsi = %d\n", command, i);
}
}
@@ -250,8 +247,7 @@ static int truly_dcs_write_buf(struct drm_panel *panel,
for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
ret = mipi_dsi_dcs_write_buffer(ctx->dsi[i], buf, size);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "failed to tx cmd [%d], err: %d\n", i, ret);
+ dev_err(ctx->dev, "failed to tx cmd [%d], err: %d\n", i, ret);
return ret;
}
}
@@ -300,16 +296,14 @@ static int truly_nt35597_power_off(struct truly_nt35597 *ctx)
ret = regulator_set_load(ctx->supplies[i].consumer,
regulator_disable_loads[i]);
if (ret) {
- DRM_DEV_ERROR(ctx->dev,
- "regulator_set_load failed %d\n", ret);
+ dev_err(ctx->dev, "regulator_set_load failed %d\n", ret);
return ret;
}
}
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret) {
- DRM_DEV_ERROR(ctx->dev,
- "regulator_bulk_disable failed %d\n", ret);
+ dev_err(ctx->dev, "regulator_bulk_disable failed %d\n", ret);
}
return ret;
}
@@ -325,8 +319,7 @@ static int truly_nt35597_disable(struct drm_panel *panel)
if (ctx->backlight) {
ret = backlight_disable(ctx->backlight);
if (ret < 0)
- DRM_DEV_ERROR(ctx->dev, "backlight disable failed %d\n",
- ret);
+ dev_err(ctx->dev, "backlight disable failed %d\n", ret);
}
ctx->enabled = false;
@@ -346,9 +339,7 @@ static int truly_nt35597_unprepare(struct drm_panel *panel)
ret = truly_dcs_write(panel, MIPI_DCS_SET_DISPLAY_OFF);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "set_display_off cmd failed ret = %d\n",
- ret);
+ dev_err(ctx->dev, "set_display_off cmd failed ret = %d\n", ret);
}
/* 120ms delay required here as per DCS spec */
@@ -356,13 +347,12 @@ static int truly_nt35597_unprepare(struct drm_panel *panel)
ret = truly_dcs_write(panel, MIPI_DCS_ENTER_SLEEP_MODE);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "enter_sleep cmd failed ret = %d\n", ret);
+ dev_err(ctx->dev, "enter_sleep cmd failed ret = %d\n", ret);
}
ret = truly_nt35597_power_off(ctx);
if (ret < 0)
- DRM_DEV_ERROR(ctx->dev, "power_off failed ret = %d\n", ret);
+ dev_err(ctx->dev, "power_off failed ret = %d\n", ret);
ctx->prepared = false;
return ret;
@@ -396,18 +386,14 @@ static int truly_nt35597_prepare(struct drm_panel *panel)
panel_on_cmds[i].size,
panel_on_cmds[i].commands);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "cmd set tx failed i = %d ret = %d\n",
- i, ret);
+ dev_err(ctx->dev, "cmd set tx failed i = %d ret = %d\n", i, ret);
goto power_off;
}
}
ret = truly_dcs_write(panel, MIPI_DCS_EXIT_SLEEP_MODE);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "exit_sleep_mode cmd failed ret = %d\n",
- ret);
+ dev_err(ctx->dev, "exit_sleep_mode cmd failed ret = %d\n", ret);
goto power_off;
}
@@ -416,8 +402,7 @@ static int truly_nt35597_prepare(struct drm_panel *panel)
ret = truly_dcs_write(panel, MIPI_DCS_SET_DISPLAY_ON);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "set_display_on cmd failed ret = %d\n", ret);
+ dev_err(ctx->dev, "set_display_on cmd failed ret = %d\n", ret);
goto power_off;
}
@@ -430,7 +415,7 @@ static int truly_nt35597_prepare(struct drm_panel *panel)
power_off:
if (truly_nt35597_power_off(ctx))
- DRM_DEV_ERROR(ctx->dev, "power_off failed\n");
+ dev_err(ctx->dev, "power_off failed\n");
return ret;
}
@@ -445,8 +430,7 @@ static int truly_nt35597_enable(struct drm_panel *panel)
if (ctx->backlight) {
ret = backlight_enable(ctx->backlight);
if (ret < 0)
- DRM_DEV_ERROR(ctx->dev, "backlight enable failed %d\n",
- ret);
+ dev_err(ctx->dev, "backlight enable failed %d\n", ret);
}
ctx->enabled = true;
@@ -464,8 +448,7 @@ static int truly_nt35597_get_modes(struct drm_panel *panel,
config = ctx->config;
mode = drm_mode_create(connector->dev);
if (!mode) {
- DRM_DEV_ERROR(ctx->dev,
- "failed to create a new display mode\n");
+ dev_err(ctx->dev, "failed to create a new display mode\n");
return 0;
}
@@ -501,15 +484,13 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
- DRM_DEV_ERROR(dev, "cannot get reset gpio %ld\n",
- PTR_ERR(ctx->reset_gpio));
+ dev_err(dev, "cannot get reset gpio %ld\n", PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
ctx->mode_gpio = devm_gpiod_get(dev, "mode", GPIOD_OUT_LOW);
if (IS_ERR(ctx->mode_gpio)) {
- DRM_DEV_ERROR(dev, "cannot get mode gpio %ld\n",
- PTR_ERR(ctx->mode_gpio));
+ dev_err(dev, "cannot get mode gpio %ld\n", PTR_ERR(ctx->mode_gpio));
return PTR_ERR(ctx->mode_gpio);
}
@@ -584,22 +565,21 @@ static int truly_nt35597_probe(struct mipi_dsi_device *dsi)
dsi1 = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
if (!dsi1) {
- DRM_DEV_ERROR(dev,
- "failed to get remote node for dsi1_device\n");
+ dev_err(dev, "failed to get remote node for dsi1_device\n");
return -ENODEV;
}
dsi1_host = of_find_mipi_dsi_host_by_node(dsi1);
of_node_put(dsi1);
if (!dsi1_host) {
- DRM_DEV_ERROR(dev, "failed to find dsi host\n");
+ dev_err(dev, "failed to find dsi host\n");
return -EPROBE_DEFER;
}
/* register the second DSI device */
dsi1_device = mipi_dsi_device_register_full(dsi1_host, &info);
if (IS_ERR(dsi1_device)) {
- DRM_DEV_ERROR(dev, "failed to create dsi device\n");
+ dev_err(dev, "failed to create dsi device\n");
return PTR_ERR(dsi1_device);
}
@@ -611,7 +591,7 @@ static int truly_nt35597_probe(struct mipi_dsi_device *dsi)
ret = truly_nt35597_panel_add(ctx);
if (ret) {
- DRM_DEV_ERROR(dev, "failed to add panel\n");
+ dev_err(dev, "failed to add panel\n");
goto err_panel_add;
}
@@ -623,8 +603,7 @@ static int truly_nt35597_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS;
ret = mipi_dsi_attach(dsi_dev);
if (ret < 0) {
- DRM_DEV_ERROR(dev,
- "dsi attach failed i = %d\n", i);
+ dev_err(dev, "dsi attach failed i = %d\n", i);
goto err_dsi_attach;
}
}
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index a12976b497ce..eb43503ec97b 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -14,7 +14,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
struct visionox_rm69299 {
struct drm_panel panel;
@@ -69,16 +68,14 @@ static int visionox_rm69299_unprepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
if (ret < 0)
- DRM_DEV_ERROR(ctx->panel.dev,
- "set_display_off cmd failed ret = %d\n", ret);
+ dev_err(ctx->panel.dev, "set_display_off cmd failed ret = %d\n", ret);
/* 120ms delay required here as per DCS spec */
msleep(120);
ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->panel.dev,
- "enter_sleep cmd failed ret = %d\n", ret);
+ dev_err(ctx->panel.dev, "enter_sleep cmd failed ret = %d\n", ret);
}
ret = visionox_rm69299_power_off(ctx);
@@ -103,36 +100,31 @@ static int visionox_rm69299_prepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xfe, 0x00 }, 2);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->panel.dev,
- "cmd set tx 0 failed, ret = %d\n", ret);
+ dev_err(ctx->panel.dev, "cmd set tx 0 failed, ret = %d\n", ret);
goto power_off;
}
ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xc2, 0x08 }, 2);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->panel.dev,
- "cmd set tx 1 failed, ret = %d\n", ret);
+ dev_err(ctx->panel.dev, "cmd set tx 1 failed, ret = %d\n", ret);
goto power_off;
}
ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x35, 0x00 }, 2);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->panel.dev,
- "cmd set tx 2 failed, ret = %d\n", ret);
+ dev_err(ctx->panel.dev, "cmd set tx 2 failed, ret = %d\n", ret);
goto power_off;
}
ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x51, 0xff }, 2);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->panel.dev,
- "cmd set tx 3 failed, ret = %d\n", ret);
+ dev_err(ctx->panel.dev, "cmd set tx 3 failed, ret = %d\n", ret);
goto power_off;
}
ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->panel.dev,
- "exit_sleep_mode cmd failed ret = %d\n", ret);
+ dev_err(ctx->panel.dev, "exit_sleep_mode cmd failed ret = %d\n", ret);
goto power_off;
}
@@ -141,8 +133,7 @@ static int visionox_rm69299_prepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->panel.dev,
- "set_display_on cmd failed ret = %d\n", ret);
+ dev_err(ctx->panel.dev, "set_display_on cmd failed ret = %d\n", ret);
goto power_off;
}
@@ -179,8 +170,7 @@ static int visionox_rm69299_get_modes(struct drm_panel *panel,
mode = drm_mode_create(connector->dev);
if (!mode) {
- DRM_DEV_ERROR(ctx->panel.dev,
- "failed to create a new display mode\n");
+ dev_err(ctx->panel.dev, "failed to create a new display mode\n");
return 0;
}
@@ -225,8 +215,7 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
ctx->reset_gpio = devm_gpiod_get(ctx->panel.dev,
"reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
- DRM_DEV_ERROR(dev, "cannot get reset gpio %ld\n",
- PTR_ERR(ctx->reset_gpio));
+ dev_err(dev, "cannot get reset gpio %ld\n", PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
}
@@ -242,23 +231,19 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS;
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "dsi attach failed ret = %d\n", ret);
+ dev_err(dev, "dsi attach failed ret = %d\n", ret);
goto err_dsi_attach;
}
ret = regulator_set_load(ctx->supplies[0].consumer, 32000);
if (ret) {
- DRM_DEV_ERROR(dev,
- "regulator set load failed for vdda supply ret = %d\n",
- ret);
+ dev_err(dev, "regulator set load failed for vdda supply ret = %d\n", ret);
goto err_set_load;
}
ret = regulator_set_load(ctx->supplies[1].consumer, 13200);
if (ret) {
- DRM_DEV_ERROR(dev,
- "regulator set load failed for vdd3p3 supply ret = %d\n",
- ret);
+ dev_err(dev, "regulator set load failed for vdd3p3 supply ret = %d\n", ret);
goto err_set_load;
}
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 06341deb60ca..55172d63a922 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -12,7 +12,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
-#include <drm/drm_print.h>
#include <video/display_timing.h>
#include <video/mipi_display.h>
@@ -135,7 +134,7 @@ static int xpp055c272_init_sequence(struct xpp055c272 *ctx)
msleep(60);
- DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+ dev_dbg(dev, "Panel init sequence done\n");
return 0;
}
@@ -150,13 +149,11 @@ static int xpp055c272_unprepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0)
- DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
- ret);
+ dev_err(ctx->dev, "failed to set display off: %d\n", ret);
mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
- ret);
+ dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret);
return ret;
}
@@ -177,17 +174,15 @@ static int xpp055c272_prepare(struct drm_panel *panel)
if (ctx->prepared)
return 0;
- DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ dev_dbg(ctx->dev, "Resetting the panel\n");
ret = regulator_enable(ctx->vci);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "Failed to enable vci supply: %d\n", ret);
+ dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret);
return ret;
}
ret = regulator_enable(ctx->iovcc);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev,
- "Failed to enable iovcc supply: %d\n", ret);
+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
goto disable_vci;
}
@@ -201,14 +196,13 @@ static int xpp055c272_prepare(struct drm_panel *panel)
ret = xpp055c272_init_sequence(ctx);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
- ret);
+ dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
goto disable_iovcc;
}
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
goto disable_iovcc;
}
@@ -217,7 +211,7 @@ static int xpp055c272_prepare(struct drm_panel *panel)
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ dev_err(ctx->dev, "Failed to set display on: %d\n", ret);
goto disable_iovcc;
}
@@ -256,9 +250,9 @@ static int xpp055c272_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
- DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
+ dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
@@ -290,7 +284,7 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
- DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ dev_err(dev, "cannot get reset gpio\n");
return PTR_ERR(ctx->reset_gpio);
}
@@ -298,9 +292,7 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->vci)) {
ret = PTR_ERR(ctx->vci);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev,
- "Failed to request vci regulator: %d\n",
- ret);
+ dev_err(dev, "Failed to request vci regulator: %d\n", ret);
return ret;
}
@@ -308,9 +300,7 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->iovcc)) {
ret = PTR_ERR(ctx->iovcc);
if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev,
- "Failed to request iovcc regulator: %d\n",
- ret);
+ dev_err(dev, "Failed to request iovcc regulator: %d\n", ret);
return ret;
}
@@ -334,7 +324,7 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
drm_panel_remove(&ctx->panel);
return ret;
}
@@ -349,13 +339,11 @@ static void xpp055c272_shutdown(struct mipi_dsi_device *dsi)
ret = drm_panel_unprepare(&ctx->panel);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
ret = drm_panel_disable(&ctx->panel);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
static int xpp055c272_remove(struct mipi_dsi_device *dsi)
@@ -367,8 +355,7 @@ static int xpp055c272_remove(struct mipi_dsi_device *dsi)
ret = mipi_dsi_detach(dsi);
if (ret < 0)
- DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
- ret);
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 413987038fbf..8ab025d0035f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -1,20 +1,29 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Collabora ltd. */
+
+#include <linux/clk.h>
#include <linux/devfreq.h>
#include <linux/devfreq_cooling.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
#include "panfrost_device.h"
#include "panfrost_devfreq.h"
-#include "panfrost_features.h"
-#include "panfrost_issues.h"
-#include "panfrost_gpu.h"
-#include "panfrost_regs.h"
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev);
+static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfreq)
+{
+ ktime_t now, last;
+
+ now = ktime_get();
+ last = pfdevfreq->time_last_update;
+
+ if (pfdevfreq->busy_count > 0)
+ pfdevfreq->busy_time += ktime_sub(now, last);
+ else
+ pfdevfreq->idle_time += ktime_sub(now, last);
+
+ pfdevfreq->time_last_update = now;
+}
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
@@ -34,30 +43,37 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
return 0;
}
-static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
+static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
{
- pfdev->devfreq.busy_time = 0;
- pfdev->devfreq.idle_time = 0;
- pfdev->devfreq.time_last_update = ktime_get();
+ pfdevfreq->busy_time = 0;
+ pfdevfreq->idle_time = 0;
+ pfdevfreq->time_last_update = ktime_get();
}
static int panfrost_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
-
- panfrost_devfreq_update_utilization(pfdev);
+ struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
+ unsigned long irqflags;
status->current_frequency = clk_get_rate(pfdev->clock);
- status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.busy_time,
- pfdev->devfreq.idle_time));
- status->busy_time = ktime_to_ns(pfdev->devfreq.busy_time);
+ spin_lock_irqsave(&pfdevfreq->lock, irqflags);
+
+ panfrost_devfreq_update_utilization(pfdevfreq);
+
+ status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time,
+ pfdevfreq->idle_time));
+
+ status->busy_time = ktime_to_ns(pfdevfreq->busy_time);
+
+ panfrost_devfreq_reset(pfdevfreq);
- panfrost_devfreq_reset(pfdev);
+ spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
- dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", status->busy_time,
- status->total_time,
+ dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
+ status->busy_time, status->total_time,
status->busy_time / (status->total_time / 100),
status->current_frequency / 1000 / 1000);
@@ -77,21 +93,43 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
unsigned long cur_freq;
struct device *dev = &pfdev->pdev->dev;
struct devfreq *devfreq;
+ struct opp_table *opp_table;
struct thermal_cooling_device *cooling;
+ struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
+
+ opp_table = dev_pm_opp_set_regulators(dev, pfdev->comp->supply_names,
+ pfdev->comp->num_supplies);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+ /* Continue if the optional regulator is missing */
+ if (ret != -ENODEV) {
+ DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
+ goto err_fini;
+ }
+ } else {
+ pfdevfreq->regulators_opp_table = opp_table;
+ }
ret = dev_pm_opp_of_add_table(dev);
- if (ret == -ENODEV) /* Optional, continue without devfreq */
- return 0;
- else if (ret)
- return ret;
+ if (ret) {
+ /* Optional, continue without devfreq */
+ if (ret == -ENODEV)
+ ret = 0;
+ goto err_fini;
+ }
+ pfdevfreq->opp_of_table_added = true;
+
+ spin_lock_init(&pfdevfreq->lock);
- panfrost_devfreq_reset(pfdev);
+ panfrost_devfreq_reset(pfdevfreq);
cur_freq = clk_get_rate(pfdev->clock);
opp = devfreq_recommended_opp(dev, &cur_freq, 0);
- if (IS_ERR(opp))
- return PTR_ERR(opp);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto err_fini;
+ }
panfrost_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
@@ -100,75 +138,94 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
if (IS_ERR(devfreq)) {
DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
- dev_pm_opp_of_remove_table(dev);
- return PTR_ERR(devfreq);
+ ret = PTR_ERR(devfreq);
+ goto err_fini;
}
- pfdev->devfreq.devfreq = devfreq;
+ pfdevfreq->devfreq = devfreq;
cooling = of_devfreq_cooling_register(dev->of_node, devfreq);
if (IS_ERR(cooling))
DRM_DEV_INFO(dev, "Failed to register cooling device\n");
else
- pfdev->devfreq.cooling = cooling;
+ pfdevfreq->cooling = cooling;
return 0;
+
+err_fini:
+ panfrost_devfreq_fini(pfdev);
+ return ret;
}
void panfrost_devfreq_fini(struct panfrost_device *pfdev)
{
- if (pfdev->devfreq.cooling)
- devfreq_cooling_unregister(pfdev->devfreq.cooling);
- dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
+ struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
+
+ if (pfdevfreq->cooling) {
+ devfreq_cooling_unregister(pfdevfreq->cooling);
+ pfdevfreq->cooling = NULL;
+ }
+
+ if (pfdevfreq->opp_of_table_added) {
+ dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
+ pfdevfreq->opp_of_table_added = false;
+ }
+
+ if (pfdevfreq->regulators_opp_table) {
+ dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table);
+ pfdevfreq->regulators_opp_table = NULL;
+ }
}
void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
- if (!pfdev->devfreq.devfreq)
+ struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
+
+ if (!pfdevfreq->devfreq)
return;
- panfrost_devfreq_reset(pfdev);
+ panfrost_devfreq_reset(pfdevfreq);
- devfreq_resume_device(pfdev->devfreq.devfreq);
+ devfreq_resume_device(pfdevfreq->devfreq);
}
void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
{
- if (!pfdev->devfreq.devfreq)
+ struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
+
+ if (!pfdevfreq->devfreq)
return;
- devfreq_suspend_device(pfdev->devfreq.devfreq);
+ devfreq_suspend_device(pfdevfreq->devfreq);
}
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev)
+void panfrost_devfreq_record_busy(struct panfrost_devfreq *pfdevfreq)
{
- ktime_t now;
- ktime_t last;
+ unsigned long irqflags;
- if (!pfdev->devfreq.devfreq)
+ if (!pfdevfreq->devfreq)
return;
- now = ktime_get();
- last = pfdev->devfreq.time_last_update;
+ spin_lock_irqsave(&pfdevfreq->lock, irqflags);
- if (atomic_read(&pfdev->devfreq.busy_count) > 0)
- pfdev->devfreq.busy_time += ktime_sub(now, last);
- else
- pfdev->devfreq.idle_time += ktime_sub(now, last);
+ panfrost_devfreq_update_utilization(pfdevfreq);
- pfdev->devfreq.time_last_update = now;
-}
+ pfdevfreq->busy_count++;
-void panfrost_devfreq_record_busy(struct panfrost_device *pfdev)
-{
- panfrost_devfreq_update_utilization(pfdev);
- atomic_inc(&pfdev->devfreq.busy_count);
+ spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
}
-void panfrost_devfreq_record_idle(struct panfrost_device *pfdev)
+void panfrost_devfreq_record_idle(struct panfrost_devfreq *pfdevfreq)
{
- int count;
+ unsigned long irqflags;
+
+ if (!pfdevfreq->devfreq)
+ return;
+
+ spin_lock_irqsave(&pfdevfreq->lock, irqflags);
+
+ panfrost_devfreq_update_utilization(pfdevfreq);
+
+ WARN_ON(--pfdevfreq->busy_count < 0);
- panfrost_devfreq_update_utilization(pfdev);
- count = atomic_dec_if_positive(&pfdev->devfreq.busy_count);
- WARN_ON(count < 0);
+ spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index 0611beffc8d0..db6ea48e21f9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -4,13 +4,39 @@
#ifndef __PANFROST_DEVFREQ_H__
#define __PANFROST_DEVFREQ_H__
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+
+struct devfreq;
+struct opp_table;
+struct thermal_cooling_device;
+
+struct panfrost_device;
+
+struct panfrost_devfreq {
+ struct devfreq *devfreq;
+ struct opp_table *regulators_opp_table;
+ struct thermal_cooling_device *cooling;
+ bool opp_of_table_added;
+
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ int busy_count;
+ /*
+ * Protect busy_time, idle_time, time_last_update and busy_count
+ * because these can be updated concurrently between multiple jobs.
+ */
+ spinlock_t lock;
+};
+
int panfrost_devfreq_init(struct panfrost_device *pfdev);
void panfrost_devfreq_fini(struct panfrost_device *pfdev);
void panfrost_devfreq_resume(struct panfrost_device *pfdev);
void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
-void panfrost_devfreq_record_busy(struct panfrost_device *pfdev);
-void panfrost_devfreq_record_idle(struct panfrost_device *pfdev);
+void panfrost_devfreq_record_busy(struct panfrost_devfreq *devfreq);
+void panfrost_devfreq_record_idle(struct panfrost_devfreq *devfreq);
#endif /* __PANFROST_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index b172087eee6a..e6896733838a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -90,9 +90,11 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
{
int ret, i;
- if (WARN(pfdev->comp->num_supplies > ARRAY_SIZE(pfdev->regulators),
- "Too many supplies in compatible structure.\n"))
- return -EINVAL;
+ pfdev->regulators = devm_kcalloc(pfdev->dev, pfdev->comp->num_supplies,
+ sizeof(*pfdev->regulators),
+ GFP_KERNEL);
+ if (!pfdev->regulators)
+ return -ENOMEM;
for (i = 0; i < pfdev->comp->num_supplies; i++)
pfdev->regulators[i].supply = pfdev->comp->supply_names[i];
@@ -119,8 +121,10 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
static void panfrost_regulator_fini(struct panfrost_device *pfdev)
{
- regulator_bulk_disable(pfdev->comp->num_supplies,
- pfdev->regulators);
+ if (!pfdev->regulators)
+ return;
+
+ regulator_bulk_disable(pfdev->comp->num_supplies, pfdev->regulators);
}
static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
@@ -214,58 +218,70 @@ int panfrost_device_init(struct panfrost_device *pfdev)
return err;
}
- err = panfrost_regulator_init(pfdev);
- if (err)
- goto err_out0;
+ err = panfrost_devfreq_init(pfdev);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(pfdev->dev, "devfreq init failed %d\n", err);
+ goto out_clk;
+ }
+
+ /* OPP will handle regulators */
+ if (!pfdev->pfdevfreq.opp_of_table_added) {
+ err = panfrost_regulator_init(pfdev);
+ if (err)
+ goto out_devfreq;
+ }
err = panfrost_reset_init(pfdev);
if (err) {
dev_err(pfdev->dev, "reset init failed %d\n", err);
- goto err_out1;
+ goto out_regulator;
}
err = panfrost_pm_domain_init(pfdev);
if (err)
- goto err_out2;
+ goto out_reset;
res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
if (IS_ERR(pfdev->iomem)) {
dev_err(pfdev->dev, "failed to ioremap iomem\n");
err = PTR_ERR(pfdev->iomem);
- goto err_out3;
+ goto out_pm_domain;
}
err = panfrost_gpu_init(pfdev);
if (err)
- goto err_out3;
+ goto out_pm_domain;
err = panfrost_mmu_init(pfdev);
if (err)
- goto err_out4;
+ goto out_gpu;
err = panfrost_job_init(pfdev);
if (err)
- goto err_out5;
+ goto out_mmu;
err = panfrost_perfcnt_init(pfdev);
if (err)
- goto err_out6;
+ goto out_job;
return 0;
-err_out6:
+out_job:
panfrost_job_fini(pfdev);
-err_out5:
+out_mmu:
panfrost_mmu_fini(pfdev);
-err_out4:
+out_gpu:
panfrost_gpu_fini(pfdev);
-err_out3:
+out_pm_domain:
panfrost_pm_domain_fini(pfdev);
-err_out2:
+out_reset:
panfrost_reset_fini(pfdev);
-err_out1:
+out_regulator:
panfrost_regulator_fini(pfdev);
-err_out0:
+out_devfreq:
+ panfrost_devfreq_fini(pfdev);
+out_clk:
panfrost_clk_fini(pfdev);
return err;
}
@@ -278,6 +294,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
panfrost_gpu_fini(pfdev);
panfrost_pm_domain_fini(pfdev);
panfrost_reset_fini(pfdev);
+ panfrost_devfreq_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index c30c719a8059..2e9cbd1c4a58 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -13,6 +13,8 @@
#include <drm/drm_mm.h>
#include <drm/gpu_scheduler.h>
+#include "panfrost_devfreq.h"
+
struct panfrost_device;
struct panfrost_mmu;
struct panfrost_job_slot;
@@ -20,7 +22,6 @@ struct panfrost_job;
struct panfrost_perfcnt;
#define NUM_JOB_SLOTS 3
-#define MAX_REGULATORS 2
#define MAX_PM_DOMAINS 3
struct panfrost_features {
@@ -69,6 +70,9 @@ struct panfrost_compatible {
int num_pm_domains;
/* Only required if num_pm_domains > 1. */
const char * const *pm_domain_names;
+
+ /* Vendor implementation quirks callback */
+ void (*vendor_quirk)(struct panfrost_device *pfdev);
};
struct panfrost_device {
@@ -79,7 +83,7 @@ struct panfrost_device {
void __iomem *iomem;
struct clk *clock;
struct clk *bus_clock;
- struct regulator_bulk_data regulators[MAX_REGULATORS];
+ struct regulator_bulk_data *regulators;
struct reset_control *rstc;
/* pm_domains for devices with more than one. */
struct device *pm_domain_devs[MAX_PM_DOMAINS];
@@ -107,14 +111,7 @@ struct panfrost_device {
struct list_head shrinker_list;
struct shrinker shrinker;
- struct {
- struct devfreq *devfreq;
- struct thermal_cooling_device *cooling;
- ktime_t busy_time;
- ktime_t idle_time;
- ktime_t time_last_update;
- atomic_t busy_count;
- } devfreq;
+ struct panfrost_devfreq pfdevfreq;
};
struct panfrost_mmu {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index ada51df9a7a3..0fc084110e5b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -14,7 +14,6 @@
#include <drm/drm_utils.h>
#include "panfrost_device.h"
-#include "panfrost_devfreq.h"
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
#include "panfrost_job.h"
@@ -606,13 +605,6 @@ static int panfrost_probe(struct platform_device *pdev)
goto err_out0;
}
- err = panfrost_devfreq_init(pfdev);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Fatal error during devfreq init\n");
- goto err_out1;
- }
-
pm_runtime_set_active(pfdev->dev);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_enable(pfdev->dev);
@@ -625,17 +617,16 @@ static int panfrost_probe(struct platform_device *pdev)
*/
err = drm_dev_register(ddev, 0);
if (err < 0)
- goto err_out2;
+ goto err_out1;
panfrost_gem_shrinker_init(ddev);
return 0;
-err_out2:
- pm_runtime_disable(pfdev->dev);
- panfrost_devfreq_fini(pfdev);
err_out1:
+ pm_runtime_disable(pfdev->dev);
panfrost_device_fini(pfdev);
+ pm_runtime_set_suspended(pfdev->dev);
err_out0:
drm_dev_put(ddev);
return err;
@@ -650,10 +641,9 @@ static int panfrost_remove(struct platform_device *pdev)
panfrost_gem_shrinker_cleanup(ddev);
pm_runtime_get_sync(pfdev->dev);
- panfrost_devfreq_fini(pfdev);
- panfrost_device_fini(pfdev);
- pm_runtime_put_sync_suspend(pfdev->dev);
pm_runtime_disable(pfdev->dev);
+ panfrost_device_fini(pfdev);
+ pm_runtime_set_suspended(pfdev->dev);
drm_dev_put(ddev);
return 0;
@@ -667,7 +657,18 @@ static const struct panfrost_compatible default_data = {
.pm_domain_names = NULL,
};
+static const struct panfrost_compatible amlogic_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies),
+ .supply_names = default_supplies,
+ .vendor_quirk = panfrost_gpu_amlogic_quirk,
+};
+
static const struct of_device_id dt_match[] = {
+ /* Set first to probe before the generic compatibles */
+ { .compatible = "amlogic,meson-gxm-mali",
+ .data = &amlogic_data, },
+ { .compatible = "amlogic,meson-g12a-mali",
+ .data = &amlogic_data, },
{ .compatible = "arm,mali-t604", .data = &default_data, },
{ .compatible = "arm,mali-t624", .data = &default_data, },
{ .compatible = "arm,mali-t628", .data = &default_data, },
@@ -677,6 +678,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "arm,mali-t830", .data = &default_data, },
{ .compatible = "arm,mali-t860", .data = &default_data, },
{ .compatible = "arm,mali-t880", .data = &default_data, },
+ { .compatible = "arm,mali-bifrost", .data = &default_data, },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 33355dd302f1..62d4d710a571 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -41,8 +41,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
for (i = 0; i < n_sgt; i++) {
if (bo->sgts[i].sgl) {
- dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
- bo->sgts[i].nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
+ DMA_BIDIRECTIONAL, 0);
sg_free_table(&bo->sgts[i]);
}
}
@@ -105,14 +105,12 @@ void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
kref_put(&mapping->refcount, panfrost_gem_mapping_release);
}
-void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
{
struct panfrost_gem_mapping *mapping;
- mutex_lock(&bo->mappings.lock);
list_for_each_entry(mapping, &bo->mappings.list, node)
panfrost_gem_teardown_mapping(mapping);
- mutex_unlock(&bo->mappings.lock);
}
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index b3517ff9630c..8088d5fd8480 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -82,7 +82,7 @@ struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
struct panfrost_file_priv *priv);
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
-void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 288e46c40673..1b9f68d8e9aa 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -40,18 +40,26 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ bool ret = false;
if (atomic_read(&bo->gpu_usecount))
return false;
- if (!mutex_trylock(&shmem->pages_lock))
+ if (!mutex_trylock(&bo->mappings.lock))
return false;
- panfrost_gem_teardown_mappings(bo);
+ if (!mutex_trylock(&shmem->pages_lock))
+ goto unlock_mappings;
+
+ panfrost_gem_teardown_mappings_locked(bo);
drm_gem_shmem_purge_locked(obj);
+ ret = true;
mutex_unlock(&shmem->pages_lock);
- return true;
+
+unlock_mappings:
+ mutex_unlock(&bo->mappings.lock);
+ return ret;
}
static unsigned long
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index f2c1ddc41a9b..2aae636f1cf5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include "panfrost_device.h"
#include "panfrost_features.h"
@@ -75,6 +76,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
return 0;
}
+void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
+{
+ /*
+ * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
+ * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
+ * to operate correctly.
+ */
+ gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
+ gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
+}
+
static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
{
u32 quirks = 0;
@@ -135,6 +147,10 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
if (quirks)
gpu_write(pfdev, GPU_JM_CONFIG, quirks);
+
+ /* Here goes platform specific quirks */
+ if (pfdev->comp->vendor_quirk)
+ pfdev->comp->vendor_quirk(pfdev);
}
#define MAX_HW_REVS 6
@@ -304,16 +320,18 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
int ret;
u32 val;
+ panfrost_gpu_init_quirks(pfdev);
+
/* Just turn on everything for now */
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
- val, val == pfdev->features.l2_present, 100, 1000);
+ val, val == pfdev->features.l2_present, 100, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu L2");
gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
- val, val == pfdev->features.shader_present, 100, 1000);
+ val, val == pfdev->features.shader_present, 100, 20000);
if (ret)
dev_err(pfdev->dev, "error powering up gpu shader");
@@ -343,6 +361,7 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
dma_set_mask_and_coherent(pfdev->dev,
DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
+ dma_set_max_seg_size(pfdev->dev, UINT_MAX);
irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
if (irq <= 0)
@@ -355,7 +374,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
return err;
}
- panfrost_gpu_init_quirks(pfdev);
panfrost_gpu_power_on(pfdev);
return 0;
@@ -368,7 +386,16 @@ void panfrost_gpu_fini(struct panfrost_device *pfdev)
u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev)
{
- if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
- return gpu_read(pfdev, GPU_LATEST_FLUSH_ID);
+ u32 flush_id;
+
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) {
+ /* Flush reduction only makes sense when the GPU is kept powered on between jobs */
+ if (pm_runtime_get_if_in_use(pfdev->dev)) {
+ flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID);
+ pm_runtime_put(pfdev->dev);
+ return flush_id;
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
index 4112412087b2..468c51e7e46d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
@@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
void panfrost_gpu_power_on(struct panfrost_device *pfdev);
void panfrost_gpu_power_off(struct panfrost_device *pfdev);
+void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev);
+
#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 360146f6f3d9..30e7b7196dab 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -145,7 +145,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
u64 jc_head = job->jc;
int ret;
- panfrost_devfreq_record_busy(pfdev);
+ panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
ret = pm_runtime_get_sync(pfdev->dev);
if (ret < 0)
@@ -410,7 +410,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
for (i = 0; i < NUM_JOB_SLOTS; i++) {
if (pfdev->jobs[i]) {
pm_runtime_put_noidle(pfdev->dev);
- panfrost_devfreq_record_idle(pfdev);
+ panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
pfdev->jobs[i] = NULL;
}
}
@@ -478,7 +478,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
pfdev->jobs[j] = NULL;
panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
- panfrost_devfreq_record_idle(pfdev);
+ panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
dma_fence_signal_locked(job->done_fence);
pm_runtime_put_autosuspend(pfdev->dev);
@@ -581,10 +581,6 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev)
struct panfrost_job_slot *js = pfdev->js;
int i;
- /* Check whether the hardware is idle */
- if (atomic_read(&pfdev->devfreq.busy_count))
- return false;
-
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
if (atomic_read(&js->queue[i].sched.hw_rq_count))
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index e8f7b11352d2..776448c527ea 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -253,7 +253,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
struct io_pgtable_ops *ops = mmu->pgtbl_ops;
u64 start_iova = iova;
- for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
+ for_each_sgtable_dma_sg(sgt, sgl, count) {
unsigned long paddr = sg_dma_address(sgl);
size_t len = sg_dma_len(sgl);
@@ -517,10 +517,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
if (ret)
goto err_pages;
- if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
- ret = -EINVAL;
+ ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ if (ret)
goto err_map;
- }
mmu_map_sg(pfdev, bomapping->mmu, addr,
IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index ec4695cf3caf..fdbc8d949135 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -83,11 +83,13 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
ret = pm_runtime_get_sync(pfdev->dev);
if (ret < 0)
- return ret;
+ goto err_put_pm;
bo = drm_gem_shmem_create(pfdev->ddev, perfcnt->bosize);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
+ if (IS_ERR(bo)) {
+ ret = PTR_ERR(bo);
+ goto err_put_pm;
+ }
/* Map the perfcnt buf in the address space attached to file_priv. */
ret = panfrost_gem_open(&bo->base, file_priv);
@@ -168,6 +170,8 @@ err_close_bo:
panfrost_gem_close(&bo->base, file_priv);
err_put_bo:
drm_gem_object_put(&bo->base);
+err_put_pm:
+ pm_runtime_put(pfdev->dev);
return ret;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index ea38ac60581c..eddaa62ad8b0 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -51,6 +51,10 @@
#define GPU_STATUS 0x34
#define GPU_STATUS_PRFCNT_ACTIVE BIT(2)
#define GPU_LATEST_FLUSH_ID 0x38
+#define GPU_PWR_KEY 0x50 /* (WO) Power manager key register */
+#define GPU_PWR_KEY_UNLOCK 0x2968A819
+#define GPU_PWR_OVERRIDE0 0x54 /* (RW) Power manager override settings */
+#define GPU_PWR_OVERRIDE1 0x58 /* (RW) Power manager override settings */
#define GPU_FAULT_STATUS 0x3C
#define GPU_FAULT_ADDRESS_LO 0x40
#define GPU_FAULT_ADDRESS_HI 0x44
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 96e58fda75d8..46b0d1c4a16c 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -10,18 +10,11 @@
*/
/**
- * DOC: ARM PrimeCell PL111 CLCD Driver
+ * DOC: ARM PrimeCell PL110 and PL111 CLCD Driver
*
- * The PL111 is a simple LCD controller that can support TFT and STN
- * displays. This driver exposes a standard KMS interface for them.
- *
- * This driver uses the same Device Tree binding as the fbdev CLCD
- * driver. While the fbdev driver supports panels that may be
- * connected to the CLCD internally to the CLCD driver, in DRM the
- * panels get split out to drivers/gpu/drm/panels/. This means that,
- * in converting from using fbdev to using DRM, you also need to write
- * a panel driver (which may be as simple as an entry in
- * panel-simple.c).
+ * The PL110/PL111 is a simple LCD controller that can support TFT
+ * and STN displays. This driver exposes a standard KMS interface
+ * for them.
*
* The driver currently doesn't expose the cursor. The DRM API for
* cursors requires support for 64x64 ARGB8888 cursor images, while
@@ -29,16 +22,13 @@
* cursors. While one could imagine trying to hack something together
* to look at the ARGB8888 and program reasonable in monochrome, we
* just don't expose the cursor at all instead, and leave cursor
- * support to the X11 software cursor layer.
+ * support to the application software cursor layer.
*
* TODO:
*
* - Fix race between setting plane base address and getting IRQ for
* vsync firing the pageflip completion.
*
- * - Use the "max-memory-bandwidth" DT property to filter the
- * supported formats.
- *
* - Read back hardware state at boot to skip reprogramming the
* hardware when doing a no-op modeset.
*
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 798f9dd7ad75..54e3c3a97440 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -588,7 +588,7 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal
{
int ret;
- ret = qxl_bo_reserve(surf, false);
+ ret = qxl_bo_reserve(surf);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 099dca48b0ff..6063f3a15329 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -26,6 +26,7 @@
#include <linux/crc32.h>
#include <linux/delay.h>
+#include <drm/drm_drv.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -162,7 +163,8 @@ static void qxl_update_offset_props(struct qxl_device *qdev)
void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
{
struct drm_device *dev = &qdev->ddev;
- int status, retries;
+ struct drm_modeset_acquire_ctx ctx;
+ int status, retries, ret;
for (retries = 0; retries < 10; retries++) {
status = qxl_display_copy_rom_client_monitors_config(qdev);
@@ -183,9 +185,9 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
return;
}
- drm_modeset_lock_all(dev);
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
qxl_update_offset_props(qdev);
- drm_modeset_unlock_all(dev);
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
if (!drm_helper_hpd_irq_event(dev)) {
/* notify that the monitor configuration changed, to
adjust at the arbitrary resolution */
@@ -403,18 +405,17 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct qxl_device *qdev = to_qxl(fb->dev);
struct drm_clip_rect norect;
struct qxl_bo *qobj;
+ struct drm_modeset_acquire_ctx ctx;
bool is_primary;
- int inc = 1;
+ int inc = 1, ret;
- drm_modeset_lock_all(fb->dev);
+ DRM_MODESET_LOCK_ALL_BEGIN(fb->dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
qobj = gem_to_qxl_bo(fb->obj[0]);
/* if we aren't primary surface ignore this */
is_primary = qobj->shadow ? qobj->shadow->is_primary : qobj->is_primary;
- if (!is_primary) {
- drm_modeset_unlock_all(fb->dev);
- return 0;
- }
+ if (!is_primary)
+ goto out_lock_end;
if (!num_clips) {
num_clips = 1;
@@ -430,7 +431,8 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
qxl_draw_dirty_fb(qdev, fb, qobj, flags, color,
clips, num_clips, inc, 0);
- drm_modeset_unlock_all(fb->dev);
+out_lock_end:
+ DRM_MODESET_LOCK_ALL_END(fb->dev, ctx, ret);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 13872b882775..6e7f16f4cec7 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -96,7 +96,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto disable_pci;
- if (is_vga(pdev)) {
+ if (is_vga(pdev) && pdev->revision < 5) {
ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO);
if (ret) {
DRM_ERROR("can't get legacy vga ioports\n");
@@ -127,7 +127,7 @@ modeset_cleanup:
unload:
qxl_device_fini(qdev);
put_vga:
- if (is_vga(pdev))
+ if (is_vga(pdev) && pdev->revision < 5)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
disable_pci:
pci_disable_device(pdev);
@@ -155,7 +155,7 @@ qxl_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
drm_atomic_helper_shutdown(dev);
- if (is_vga(pdev))
+ if (is_vga(pdev) && pdev->revision < 5)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 9691449aefdb..aae90a9ee1db 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -350,7 +350,7 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
+ struct ttm_resource *mem);
/* qxl image */
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 8f605d5cc149..5cea6eea72ab 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -322,7 +322,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
qobj = gem_to_qxl_bo(gobj);
- ret = qxl_bo_reserve(qobj, false);
+ ret = qxl_bo_reserve(qobj);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 80e7a17aaddd..2bc364412e8b 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -64,16 +64,24 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
- if (domain == QXL_GEM_DOMAIN_VRAM)
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
+ if (domain == QXL_GEM_DOMAIN_VRAM) {
+ qbo->placements[c].mem_type = TTM_PL_VRAM;
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ }
if (domain == QXL_GEM_DOMAIN_SURFACE) {
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
+ qbo->placements[c].mem_type = TTM_PL_PRIV;
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ qbo->placements[c].mem_type = TTM_PL_VRAM;
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ }
+ if (domain == QXL_GEM_DOMAIN_CPU) {
+ qbo->placements[c].mem_type = TTM_PL_SYSTEM;
+ qbo->placements[c++].flags = TTM_PL_MASK_CACHING | pflag;
+ }
+ if (!c) {
+ qbo->placements[c].mem_type = TTM_PL_SYSTEM;
+ qbo->placements[c++].flags = TTM_PL_MASK_CACHING;
}
- if (domain == QXL_GEM_DOMAIN_CPU)
- qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
- if (!c)
- qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
qbo->placement.num_placement = c;
qbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
@@ -167,6 +175,7 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset)
{
+ unsigned long offset;
void *rptr;
int ret;
struct io_mapping *map;
@@ -178,9 +187,8 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
else
goto fallback;
- ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
-
- return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
+ offset = bo->tbo.mem.start << PAGE_SHIFT;
+ return io_mapping_map_atomic_wc(map, offset + page_offset);
fallback:
if (bo->kptr) {
rptr = bo->kptr + (page_offset * PAGE_SIZE);
@@ -284,7 +292,7 @@ int qxl_bo_pin(struct qxl_bo *bo)
{
int r;
- r = qxl_bo_reserve(bo, false);
+ r = qxl_bo_reserve(bo);
if (r)
return r;
@@ -302,7 +310,7 @@ int qxl_bo_unpin(struct qxl_bo *bo)
{
int r;
- r = qxl_bo_reserve(bo, false);
+ r = qxl_bo_reserve(bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 21fa81048f4f..6b434e5ef795 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -27,11 +27,11 @@
#include "qxl_drv.h"
-static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
+static inline int qxl_bo_reserve(struct qxl_bo *bo)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
+ r = ttm_bo_reserve(&bo->tbo, true, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct drm_device *ddev = bo->tbo.base.dev;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index bf9dc451583a..fd691fff8394 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -48,31 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- /* System memory */
- man->flags = 0;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- case TTM_PL_PRIV:
- /* "On-card" video ram */
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
- return -EINVAL;
- }
- return 0;
-}
-
static void qxl_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
@@ -80,7 +55,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_MASK_CACHING
};
if (!qxl_ttm_bo_is_qxl_bo(bo)) {
@@ -96,29 +72,22 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
}
int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
struct qxl_device *qdev = qxl_get_qdev(bdev);
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
-
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
return 0;
case TTM_PL_VRAM:
mem->bus.is_iomem = true;
- mem->bus.base = qdev->vram_base;
- mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base;
break;
case TTM_PL_PRIV:
mem->bus.is_iomem = true;
- mem->bus.base = qdev->surfaceram_base;
- mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset = (mem->start << PAGE_SHIFT) +
+ qdev->surfaceram_base;
break;
default:
return -EINVAL;
@@ -135,8 +104,9 @@ struct qxl_ttm_tt {
u64 offset;
};
-static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
+static int qxl_ttm_backend_bind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_resource *bo_mem)
{
struct qxl_ttm_tt *gtt = (void *)ttm;
@@ -149,25 +119,22 @@ static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
return -1;
}
-static void qxl_ttm_backend_unbind(struct ttm_tt *ttm)
+static void qxl_ttm_backend_unbind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
/* Not implemented */
}
-static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
+static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
struct qxl_ttm_tt *gtt = (void *)ttm;
+ ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(&gtt->ttm);
kfree(gtt);
}
-static struct ttm_backend_func qxl_backend_func = {
- .bind = &qxl_ttm_backend_bind,
- .unbind = &qxl_ttm_backend_unbind,
- .destroy = &qxl_ttm_backend_destroy,
-};
-
static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
@@ -178,7 +145,6 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
if (gtt == NULL)
return NULL;
- gtt->ttm.func = &qxl_backend_func;
gtt->qdev = qdev;
if (ttm_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
@@ -187,21 +153,11 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
return &gtt->ttm;
}
-static void qxl_move_null(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem)
-{
- struct ttm_mem_reg *old_mem = &bo->mem;
-
- BUG_ON(old_mem->mm_node != NULL);
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-}
-
static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = &bo->mem;
int ret;
ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
@@ -209,7 +165,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
return ret;
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
- qxl_move_null(bo, new_mem);
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
return ttm_bo_move_memcpy(bo, ctx, new_mem);
@@ -217,7 +173,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct qxl_bo *qbo;
struct qxl_device *qdev;
@@ -233,7 +189,9 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
- .init_mem_type = &qxl_init_mem_type,
+ .ttm_tt_bind = &qxl_ttm_backend_bind,
+ .ttm_tt_destroy = &qxl_ttm_backend_destroy,
+ .ttm_tt_unbind = &qxl_ttm_backend_unbind,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
@@ -241,6 +199,13 @@ static struct ttm_bo_driver qxl_bo_driver = {
.move_notify = &qxl_bo_move_notify,
};
+static int qxl_ttm_init_mem_type(struct qxl_device *qdev,
+ unsigned int type,
+ uint64_t size)
+{
+ return ttm_range_man_init(&qdev->mman.bdev, type, false, size);
+}
+
int qxl_ttm_init(struct qxl_device *qdev)
{
int r;
@@ -258,14 +223,13 @@ int qxl_ttm_init(struct qxl_device *qdev)
}
/* NOTE: this includes the framebuffer (aka surface 0) */
num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
- r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
- num_io_pages);
+ r = qxl_ttm_init_mem_type(qdev, TTM_PL_VRAM, num_io_pages);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
- qdev->surfaceram_size / PAGE_SIZE);
+ r = qxl_ttm_init_mem_type(qdev, TTM_PL_PRIV,
+ qdev->surfaceram_size / PAGE_SIZE);
if (r) {
DRM_ERROR("Failed initializing Surfaces heap.\n");
return r;
@@ -281,8 +245,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
void qxl_ttm_fini(struct qxl_device *qdev)
{
- ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
- ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
+ ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM);
+ ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV);
ttm_bo_device_release(&qdev->mman.bdev);
DRM_INFO("qxl: ttm finalized\n");
}
@@ -293,12 +257,10 @@ void qxl_ttm_fini(struct qxl_device *qdev)
static int qxl_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+ struct ttm_resource_manager *man = (struct ttm_resource_manager *)node->info_ent->data;
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&ttm_bo_glob.lru_lock);
- drm_mm_print(mm, &p);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ ttm_resource_manager_debug(man, &p);
return 0;
}
#endif
@@ -319,9 +281,9 @@ void qxl_ttm_debugfs_init(struct qxl_device *qdev)
qxl_mem_types_list[i].show = &qxl_mm_dump_table;
qxl_mem_types_list[i].driver_features = 0;
if (i == 0)
- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
+ qxl_mem_types_list[i].data = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
else
- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
+ qxl_mem_types_list[i].data = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
}
qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b7c3fb2bfb54..a6d8de01194a 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2815,10 +2815,12 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
-extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+extern int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
+ struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
-extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm);
-extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
+extern bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm);
+extern bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm);
+bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
@@ -2857,7 +2859,7 @@ int radeon_vm_clear_invalids(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
- struct ttm_mem_reg *mem);
+ struct ttm_resource *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 33ae1b883268..21ce2f9502c0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -160,7 +160,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].allowed_domains = domain;
}
- if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
+ if (radeon_ttm_tt_has_userptr(p->rdev, p->relocs[i].robj->tbo.ttm)) {
uint32_t domain = p->relocs[i].preferred_domains;
if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index f178ba321715..3808a753127b 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -72,8 +72,8 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
{
void *ptr;
- ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
- &rdev->gart.table_addr);
+ ptr = dma_alloc_coherent(&rdev->pdev->dev, rdev->gart.table_size,
+ &rdev->gart.table_addr, GFP_KERNEL);
if (ptr == NULL) {
return -ENOMEM;
}
@@ -85,7 +85,6 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
}
#endif
rdev->gart.ptr = ptr;
- memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
return 0;
}
@@ -110,9 +109,8 @@ void radeon_gart_table_ram_free(struct radeon_device *rdev)
rdev->gart.table_size >> PAGE_SHIFT);
}
#endif
- pci_free_consistent(rdev->pdev, rdev->gart.table_size,
- (void *)rdev->gart.ptr,
- rdev->gart.table_addr);
+ dma_free_coherent(&rdev->pdev->dev, rdev->gart.table_size,
+ (void *)rdev->gart.ptr, rdev->gart.table_addr);
rdev->gart.ptr = NULL;
rdev->gart.table_addr = 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 44157ada9b0e..e5c4271e64ed 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -224,9 +224,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
- struct ttm_mem_type_manager *man;
+ struct ttm_resource_manager *man;
- man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+ man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
args->vram_size = (u64)man->size << PAGE_SHIFT;
args->vram_visible = rdev->mc.visible_vram_size;
@@ -331,7 +331,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto handle_lockup;
bo = gem_to_radeon_bo(gobj);
- r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
+ r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
if (r)
goto release_object;
@@ -420,7 +420,7 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
+ if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
drm_gem_object_put(gobj);
return -EPERM;
}
@@ -721,7 +721,7 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
robj = gem_to_radeon_bo(gobj);
r = -EPERM;
- if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
+ if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
goto out;
r = radeon_bo_reserve(robj, false);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index f93829f08a4d..97b9b6dd6dd3 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -53,7 +53,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
struct ttm_operation_ctx ctx = { false, false };
long r;
- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+ if (!bo->tbo.ttm || !radeon_ttm_tt_is_bound(bo->tbo.bdev, bo->tbo.ttm))
return true;
if (!mmu_notifier_range_blockable(range))
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index f3dee01250da..316e35d3f8a9 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -112,58 +112,58 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
rbo->placements[c].fpfn =
rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ rbo->placements[c].mem_type = TTM_PL_VRAM;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ TTM_PL_FLAG_UNCACHED;
}
rbo->placements[c].fpfn = 0;
+ rbo->placements[c].mem_type = TTM_PL_VRAM;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ TTM_PL_FLAG_UNCACHED;
}
if (domain & RADEON_GEM_DOMAIN_GTT) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_TT;
+ rbo->placements[c].mem_type = TTM_PL_TT;
+ rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED;
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
(rbo->rdev->flags & RADEON_IS_AGP)) {
rbo->placements[c].fpfn = 0;
+ rbo->placements[c].mem_type = TTM_PL_TT;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_TT;
+ TTM_PL_FLAG_UNCACHED;
} else {
rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_TT;
+ rbo->placements[c].mem_type = TTM_PL_TT;
+ rbo->placements[c++].flags = TTM_PL_FLAG_CACHED;
}
}
if (domain & RADEON_GEM_DOMAIN_CPU) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_SYSTEM;
+ rbo->placements[c].mem_type = TTM_PL_SYSTEM;
+ rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED;
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
rbo->rdev->flags & RADEON_IS_AGP) {
rbo->placements[c].fpfn = 0;
+ rbo->placements[c].mem_type = TTM_PL_SYSTEM;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_SYSTEM;
+ TTM_PL_FLAG_UNCACHED;
} else {
rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_SYSTEM;
+ rbo->placements[c].mem_type = TTM_PL_SYSTEM;
+ rbo->placements[c++].flags = TTM_PL_FLAG_CACHED;
}
}
if (!c) {
rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ rbo->placements[c].mem_type = TTM_PL_SYSTEM;
+ rbo->placements[c++].flags = TTM_PL_MASK_CACHING;
}
rbo->placement.num_placement = c;
@@ -171,7 +171,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
for (i = 0; i < c; ++i) {
if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
- (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+ (rbo->placements[i].mem_type == TTM_PL_VRAM) &&
!rbo->placements[i].fpfn)
rbo->placements[i].lpfn =
rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
@@ -331,7 +331,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
struct ttm_operation_ctx ctx = { false, false };
int r, i;
- if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
+ if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm))
return -EPERM;
if (bo->pin_count) {
@@ -360,7 +360,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
radeon_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
- if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+ if ((bo->placements[i].mem_type == TTM_PL_VRAM) &&
!(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
(!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
bo->placements[i].lpfn =
@@ -775,7 +775,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct radeon_bo *rbo;
@@ -824,7 +824,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < rbo->placement.num_placement; i++) {
/* Force into visible VRAM */
- if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
+ if ((rbo->placements[i].mem_type == TTM_PL_VRAM) &&
(!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
rbo->placements[i].lpfn = lpfn;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 60275b822f79..44b47241ee42 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -165,7 +165,7 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem);
+ struct ttm_resource *new_mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8c5d6fda0d75..05c4196a8212 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -712,6 +712,31 @@ static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
+static ssize_t radeon_hwmon_show_sclk(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct radeon_device *rdev = dev_get_drvdata(dev);
+ struct drm_device *ddev = rdev->ddev;
+ u32 sclk = 0;
+
+ /* Can't get clock frequency when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
+ if (rdev->asic->dpm.get_current_sclk)
+ sclk = radeon_dpm_get_current_sclk(rdev);
+
+ /* Value returned by dpm is in 10 KHz units, need to convert it into Hz
+ for hwmon */
+ sclk *= 10000;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", sclk);
+}
+
+static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, radeon_hwmon_show_sclk, NULL,
+ 0);
+
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -721,6 +746,7 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_min.dev_attr.attr,
&sensor_dev_attr_pwm1_max.dev_attr.attr,
+ &sensor_dev_attr_freq1_input.dev_attr.attr,
NULL
};
@@ -738,7 +764,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_freq1_input.dev_attr.attr))
return 0;
/* Skip fan attributes if fan is not present */
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index b906e8fbd5f3..b9de0e51c0be 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -36,7 +36,7 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int npages = bo->tbo.num_pages;
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+ return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
}
void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
@@ -121,7 +121,7 @@ struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
int flags)
{
struct radeon_bo *bo = gem_to_radeon_bo(gobj);
- if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
+ if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm))
return ERR_PTR(-EPERM);
return drm_gem_prime_export(gobj, flags);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 004344dce140..36150b7f31a9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -56,6 +56,10 @@
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
+static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_resource *bo_mem);
+
struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
{
struct radeon_mman *mman;
@@ -66,53 +70,16 @@ struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
return rdev;
}
-static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
+static int radeon_ttm_init_vram(struct radeon_device *rdev)
{
- struct radeon_device *rdev;
-
- rdev = radeon_get_rdev(bdev);
+ return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_VRAM,
+ false, rdev->mc.real_vram_size >> PAGE_SHIFT);
+}
- switch (type) {
- case TTM_PL_SYSTEM:
- /* System memory */
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_TT:
- man->func = &ttm_bo_manager_func;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
-#if IS_ENABLED(CONFIG_AGP)
- if (rdev->flags & RADEON_IS_AGP) {
- if (!rdev->ddev->agp) {
- DRM_ERROR("AGP is not enabled for memory type %u\n",
- (unsigned)type);
- return -EINVAL;
- }
- if (!rdev->ddev->agp->cant_use_aperture)
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- }
-#endif
- break;
- case TTM_PL_VRAM:
- /* "On-card" video ram */
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
- return -EINVAL;
- }
- return 0;
+static int radeon_ttm_init_gtt(struct radeon_device *rdev)
+{
+ return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_TT,
+ true, rdev->mc.gtt_size >> PAGE_SHIFT);
}
static void radeon_evict_flags(struct ttm_buffer_object *bo,
@@ -121,7 +88,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_MASK_CACHING
};
struct radeon_bo *rbo;
@@ -152,7 +120,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
RADEON_GEM_DOMAIN_GTT);
rbo->placement.num_busy_placement = 0;
for (i = 0; i < rbo->placement.num_placement; i++) {
- if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
+ if (rbo->placements[i].mem_type == TTM_PL_VRAM) {
if (rbo->placements[i].fpfn < fpfn)
rbo->placements[i].fpfn = fpfn;
} else {
@@ -174,27 +142,18 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
- if (radeon_ttm_tt_has_userptr(bo->ttm))
+ if (radeon_ttm_tt_has_userptr(rdev, bo->ttm))
return -EPERM;
return drm_vma_node_verify_access(&rbo->tbo.base.vma_node,
filp->private_data);
}
-static void radeon_move_null(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem)
-{
- struct ttm_mem_reg *old_mem = &bo->mem;
-
- BUG_ON(old_mem->mm_node != NULL);
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-}
-
static int radeon_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+ struct ttm_resource *new_mem,
+ struct ttm_resource *old_mem)
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
@@ -241,7 +200,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
if (IS_ERR(fence))
return PTR_ERR(fence);
- r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
+ r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem);
radeon_fence_unref(&fence);
return r;
}
@@ -249,11 +208,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg tmp_mem;
+ struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource tmp_mem;
struct ttm_place placements;
struct ttm_placement placement;
int r;
@@ -266,7 +225,8 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = 0;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = TTM_PL_MASK_CACHING;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
if (unlikely(r)) {
return r;
@@ -277,7 +237,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
goto out_cleanup;
}
- r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx);
+ r = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ if (unlikely(r)) {
+ goto out_cleanup;
+ }
+
+ r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -287,18 +252,18 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
}
r = ttm_bo_move_ttm(bo, &ctx, new_mem);
out_cleanup:
- ttm_bo_mem_put(bo, &tmp_mem);
+ ttm_resource_free(bo, &tmp_mem);
return r;
}
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg tmp_mem;
+ struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
int r;
@@ -311,7 +276,8 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = 0;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = TTM_PL_MASK_CACHING;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
if (unlikely(r)) {
return r;
@@ -325,17 +291,17 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
goto out_cleanup;
}
out_cleanup:
- ttm_bo_mem_put(bo, &tmp_mem);
+ ttm_resource_free(bo, &tmp_mem);
return r;
}
static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct radeon_device *rdev;
struct radeon_bo *rbo;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = &bo->mem;
int r;
r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
@@ -349,7 +315,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
rdev = radeon_get_rdev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
- radeon_move_null(bo, new_mem);
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
if ((old_mem->mem_type == TTM_PL_TT &&
@@ -357,7 +323,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
(old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_TT)) {
/* bind is enough */
- radeon_move_null(bo, new_mem);
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
@@ -392,18 +358,11 @@ memcpy:
return 0;
}
-static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct radeon_device *rdev = radeon_get_rdev(bdev);
+ size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
@@ -412,8 +371,8 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
#if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) {
/* RADEON_IS_AGP is set only if AGP is active */
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = rdev->mc.agp_base;
+ mem->bus.offset = (mem->start << PAGE_SHIFT) +
+ rdev->mc.agp_base;
mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
}
#endif
@@ -421,9 +380,9 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
- if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+ if ((mem->bus.offset + bus_size) > rdev->mc.visible_vram_size)
return -EINVAL;
- mem->bus.base = rdev->mc.aper_base;
+ mem->bus.offset += rdev->mc.aper_base;
mem->bus.is_iomem = true;
#ifdef __alpha__
/*
@@ -432,12 +391,10 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
*/
if (mem->placement & TTM_PL_FLAG_WC)
mem->bus.addr =
- ioremap_wc(mem->bus.base + mem->bus.offset,
- mem->bus.size);
+ ioremap_wc(mem->bus.offset, bus_size);
else
mem->bus.addr =
- ioremap(mem->bus.base + mem->bus.offset,
- mem->bus.size);
+ ioremap(mem->bus.offset, bus_size);
if (!mem->bus.addr)
return -ENOMEM;
@@ -447,7 +404,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
* It then can be used to build PTEs for VRAM
* access, as done in ttm_bo_vm_fault().
*/
- mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
+ mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) +
rdev->ddev->hose->dense_mem_base;
#endif
break;
@@ -462,18 +419,18 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
*/
struct radeon_ttm_tt {
struct ttm_dma_tt ttm;
- struct radeon_device *rdev;
u64 offset;
uint64_t userptr;
struct mm_struct *usermm;
uint32_t userflags;
+ bool bound;
};
/* prepare the sg table with the user pages */
-static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
- struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned pinned = 0;
int r;
@@ -532,9 +489,9 @@ release_pages:
return r;
}
-static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+static void radeon_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
- struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = (void *)ttm;
struct sg_page_iter sg_iter;
@@ -561,16 +518,28 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
sg_free_table(ttm->sg);
}
-static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
+static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void*)ttm;
+
+ return (gtt->bound);
+}
+
+static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_resource *bo_mem)
+{
+ struct radeon_ttm_tt *gtt = (void*)ttm;
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
RADEON_GART_PAGE_WRITE;
int r;
+ if (gtt->bound)
+ return 0;
+
if (gtt->userptr) {
- radeon_ttm_tt_pin_userptr(ttm);
+ radeon_ttm_tt_pin_userptr(bdev, ttm);
flags &= ~RADEON_GART_PAGE_WRITE;
}
@@ -581,40 +550,43 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
}
if (ttm->caching_state == tt_cached)
flags |= RADEON_GART_PAGE_SNOOP;
- r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
+ r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
return r;
}
+ gtt->bound = true;
return 0;
}
-static void radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void *)ttm;
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
- radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+ if (!gtt->bound)
+ return;
+
+ radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
if (gtt->userptr)
- radeon_ttm_tt_unpin_userptr(ttm);
+ radeon_ttm_tt_unpin_userptr(bdev, ttm);
+ gtt->bound = false;
}
-static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
+static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void *)ttm;
+ radeon_ttm_backend_unbind(bdev, ttm);
+ ttm_tt_destroy_common(bdev, ttm);
+
ttm_dma_tt_fini(&gtt->ttm);
kfree(gtt);
}
-static struct ttm_backend_func radeon_backend_func = {
- .bind = &radeon_ttm_backend_bind,
- .unbind = &radeon_ttm_backend_unbind,
- .destroy = &radeon_ttm_backend_destroy,
-};
-
static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
@@ -633,8 +605,6 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
if (gtt == NULL) {
return NULL;
}
- gtt->ttm.ttm.func = &radeon_backend_func;
- gtt->rdev = rdev;
if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
@@ -642,18 +612,25 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
return &gtt->ttm.ttm;
}
-static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
+static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
+ struct ttm_tt *ttm)
{
- if (!ttm || ttm->func != &radeon_backend_func)
+#if IS_ENABLED(CONFIG_AGP)
+ if (rdev->flags & RADEON_IS_AGP)
return NULL;
- return (struct radeon_ttm_tt *)ttm;
+#endif
+
+ if (!ttm)
+ return NULL;
+ return container_of(ttm, struct radeon_ttm_tt, ttm.ttm);
}
-static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
- struct ttm_operation_ctx *ctx)
+static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
- struct radeon_device *rdev;
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+ struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (gtt && gtt->userptr) {
@@ -662,21 +639,20 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
return -ENOMEM;
ttm->page_flags |= TTM_PAGE_FLAG_SG;
- ttm->state = tt_unbound;
+ ttm_tt_set_populated(ttm);
return 0;
}
if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
- ttm->state = tt_unbound;
+ ttm_tt_set_populated(ttm);
return 0;
}
- rdev = radeon_get_rdev(ttm->bdev);
#if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_tt_populate(ttm, ctx);
+ return ttm_pool_populate(ttm, ctx);
}
#endif
@@ -689,10 +665,10 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
}
-static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
+static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
- struct radeon_device *rdev;
- struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+ struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (gtt && gtt->userptr) {
@@ -704,10 +680,9 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave)
return;
- rdev = radeon_get_rdev(ttm->bdev);
#if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) {
- ttm_agp_tt_unpopulate(ttm);
+ ttm_pool_unpopulate(ttm);
return;
}
#endif
@@ -722,10 +697,11 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
ttm_unmap_and_unpopulate_pages(rdev->dev, &gtt->ttm);
}
-int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
+ struct ttm_tt *ttm, uint64_t addr,
uint32_t flags)
{
- struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
+ struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
if (gtt == NULL)
return -EINVAL;
@@ -736,9 +712,69 @@ int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
return 0;
}
-bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
+bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
+{
+#if IS_ENABLED(CONFIG_AGP)
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+ if (rdev->flags & RADEON_IS_AGP)
+ return ttm_agp_is_bound(ttm);
+#endif
+ return radeon_ttm_backend_is_bound(ttm);
+}
+
+static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_resource *bo_mem)
+{
+#if IS_ENABLED(CONFIG_AGP)
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+#endif
+
+ if (!bo_mem)
+ return -EINVAL;
+#if IS_ENABLED(CONFIG_AGP)
+ if (rdev->flags & RADEON_IS_AGP)
+ return ttm_agp_bind(ttm, bo_mem);
+#endif
+
+ return radeon_ttm_backend_bind(bdev, ttm, bo_mem);
+}
+
+static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
- struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
+#if IS_ENABLED(CONFIG_AGP)
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+ if (rdev->flags & RADEON_IS_AGP) {
+ ttm_agp_unbind(ttm);
+ return;
+ }
+#endif
+ radeon_ttm_backend_unbind(bdev, ttm);
+}
+
+static void radeon_ttm_tt_destroy(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
+{
+#if IS_ENABLED(CONFIG_AGP)
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+ if (rdev->flags & RADEON_IS_AGP) {
+ ttm_agp_unbind(ttm);
+ ttm_tt_destroy_common(bdev, ttm);
+ ttm_agp_destroy(ttm);
+ return;
+ }
+#endif
+ radeon_ttm_backend_destroy(bdev, ttm);
+}
+
+bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev,
+ struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
if (gtt == NULL)
return false;
@@ -746,9 +782,10 @@ bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
return !!gtt->userptr;
}
-bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
+bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev,
+ struct ttm_tt *ttm)
{
- struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
+ struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
if (gtt == NULL)
return false;
@@ -760,7 +797,9 @@ static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
- .init_mem_type = &radeon_init_mem_type,
+ .ttm_tt_bind = &radeon_ttm_tt_bind,
+ .ttm_tt_unbind = &radeon_ttm_tt_unbind,
+ .ttm_tt_destroy = &radeon_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
@@ -785,8 +824,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
return r;
}
rdev->mman.initialized = true;
- r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
- rdev->mc.real_vram_size >> PAGE_SHIFT);
+
+ r = radeon_ttm_init_vram(rdev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
@@ -811,8 +850,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
DRM_INFO("radeon: %uM of VRAM memory ready\n",
(unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
- r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
- rdev->mc.gtt_size >> PAGE_SHIFT);
+
+ r = radeon_ttm_init_gtt(rdev);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
@@ -843,8 +882,8 @@ void radeon_ttm_fini(struct radeon_device *rdev)
}
radeon_bo_unref(&rdev->stolen_vga_memory);
}
- ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
- ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
+ ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM);
+ ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT);
ttm_bo_device_release(&rdev->mman.bdev);
radeon_gart_fini(rdev);
rdev->mman.initialized = false;
@@ -855,12 +894,12 @@ void radeon_ttm_fini(struct radeon_device *rdev)
* isn't running */
void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
{
- struct ttm_mem_type_manager *man;
+ struct ttm_resource_manager *man;
if (!rdev->mman.initialized)
return;
- man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+ man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
man->size = size >> PAGE_SHIFT;
}
@@ -914,7 +953,7 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
unsigned ttm_pl = *(int*)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct ttm_mem_type_manager *man = &rdev->mman.bdev.man[ttm_pl];
+ struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev, ttm_pl);
struct drm_printer p = drm_seq_file_printer(m);
man->func->debug(man, &p);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index f60fae0aed11..27b14eff532c 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -188,7 +188,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
return NULL;
- /* we definately need to flush */
+ /* we definitely need to flush */
vm_id->pd_gpu_addr = ~0ll;
/* skip over VMID 0, since it is the system VM */
@@ -911,7 +911,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
*/
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
@@ -942,7 +942,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
- if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm))
+ if (bo_va->bo && radeon_ttm_tt_is_readonly(rdev, bo_va->bo->tbo.ttm))
bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
if (mem) {
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 800721153d51..58557c2263a7 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -117,7 +117,7 @@ int uvd_v1_0_resume(struct radeon_device *rdev)
if (r)
return r;
- /* programm the VCPU memory controller bits 0-27 */
+ /* program the VCPU memory controller bits 0-27 */
addr = (rdev->uvd.gpu_addr >> 3) + 16;
size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size) >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
@@ -360,7 +360,7 @@ int uvd_v1_0_start(struct radeon_device *rdev)
/* Set the write pointer delay */
WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
- /* programm the 4GB memory segment for rptr and ring buffer */
+ /* program the 4GB memory segment for rptr and ring buffer */
WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
(0x7 << 16) | (0x1 << 31));
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 23b18edda20e..6266167886d9 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -109,7 +109,7 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
if (r)
return r;
- /* programm the VCPU memory controller bits 0-27 */
+ /* program the VCPU memory controller bits 0-27 */
addr = rdev->uvd.gpu_addr >> 3;
size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c
index dc54fa4aaea8..f9e97fa63674 100644
--- a/drivers/gpu/drm/radeon/uvd_v4_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v4_2.c
@@ -40,7 +40,7 @@ int uvd_v4_2_resume(struct radeon_device *rdev)
uint64_t addr;
uint32_t size;
- /* programm the VCPU memory controller bits 0-27 */
+ /* program the VCPU memory controller bits 0-27 */
/* skip over the header of the new firmware format */
if (rdev->uvd.fw_header_present)
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index f65d1489dc50..b47e74421e34 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -22,11 +22,11 @@ config DRM_RCAR_CMM
Enable support for R-Car Color Management Module (CMM).
config DRM_RCAR_DW_HDMI
- tristate "R-Car DU Gen3 HDMI Encoder Support"
+ tristate "R-Car Gen3 and RZ/G2 DU HDMI Encoder Support"
depends on DRM && OF
select DRM_DW_HDMI
help
- Enable support for R-Car Gen3 internal HDMI encoder.
+ Enable support for R-Car Gen3 or RZ/G2 internal HDMI encoder.
config DRM_RCAR_LVDS
tristate "R-Car DU LVDS Encoder Support"
@@ -49,3 +49,4 @@ config DRM_RCAR_VSP
config DRM_RCAR_WRITEBACK
bool
default y if ARM64
+ depends on DRM_RCAR_DU
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index f53b0ec71085..447be991fa25 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -186,6 +186,35 @@ static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
.lvds_clk_mask = BIT(1) | BIT(0),
};
+static const struct rcar_du_device_info rcar_du_r8a774e1_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED
+ | RCAR_DU_FEATURE_TVM_SYNC,
+ .channels_mask = BIT(3) | BIT(1) | BIT(0),
+ .routes = {
+ /*
+ * R8A774E1 has one RGB output, one LVDS output and one HDMI
+ * output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_HDMI0] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .port = 2,
+ },
+ },
+ .num_lvds = 1,
+ .dpll_mask = BIT(1),
+};
+
static const struct rcar_du_device_info rcar_du_r8a7779_info = {
.gen = 1,
.features = RCAR_DU_FEATURE_INTERLACED
@@ -216,8 +245,9 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
.channels_mask = BIT(2) | BIT(1) | BIT(0),
.routes = {
/*
- * R8A7790 has one RGB output, two LVDS outputs and one
- * (currently unsupported) TCON output.
+ * R8A7742 and R8A7790 each have one RGB output and two LVDS
+ * outputs. Additionally R8A7790 supports one TCON output
+ * (currently unsupported by the driver).
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2) | BIT(1) | BIT(0),
@@ -443,6 +473,7 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
};
static const struct of_device_id rcar_du_of_table[] = {
+ { .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
{ .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info },
{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
@@ -450,6 +481,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info },
{ .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info },
{ .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info },
+ { .compatible = "renesas,du-r8a774e1", .data = &rcar_du_r8a774e1_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
@@ -458,6 +490,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7794", .data = &rcar_du_r8a7794_info },
{ .compatible = "renesas,du-r8a7795", .data = &rcar_du_r8a7795_info },
{ .compatible = "renesas,du-r8a7796", .data = &rcar_du_r8a7796_info },
+ { .compatible = "renesas,du-r8a77961", .data = &rcar_du_r8a7796_info },
{ .compatible = "renesas,du-r8a77965", .data = &rcar_du_r8a77965_info },
{ .compatible = "renesas,du-r8a77970", .data = &rcar_du_r8a77970_info },
{ .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info },
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 482329102f19..72dda446355f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -40,6 +40,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.v4l2 = V4L2_PIX_FMT_RGB565,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_NONE,
}, {
@@ -47,6 +48,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.v4l2 = V4L2_PIX_FMT_ARGB555,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
.edf = PnDDCR4_EDF_NONE,
}, {
@@ -61,6 +63,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.v4l2 = V4L2_PIX_FMT_XBGR32,
.bpp = 32,
.planes = 1,
+ .hsub = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_RGB888,
}, {
@@ -68,6 +71,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.v4l2 = V4L2_PIX_FMT_ABGR32,
.bpp = 32,
.planes = 1,
+ .hsub = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_ARGB8888,
}, {
@@ -75,6 +79,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.v4l2 = V4L2_PIX_FMT_UYVY,
.bpp = 16,
.planes = 1,
+ .hsub = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
@@ -82,6 +87,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.v4l2 = V4L2_PIX_FMT_YUYV,
.bpp = 16,
.planes = 1,
+ .hsub = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
@@ -89,6 +95,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.v4l2 = V4L2_PIX_FMT_NV12M,
.bpp = 12,
.planes = 2,
+ .hsub = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
@@ -96,6 +103,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.v4l2 = V4L2_PIX_FMT_NV21M,
.bpp = 12,
.planes = 2,
+ .hsub = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
@@ -103,6 +111,7 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.v4l2 = V4L2_PIX_FMT_NV16M,
.bpp = 16,
.planes = 2,
+ .hsub = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
},
@@ -115,156 +124,187 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.v4l2 = V4L2_PIX_FMT_RGB332,
.bpp = 8,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_ARGB4444,
.v4l2 = V4L2_PIX_FMT_ARGB444,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_XRGB4444,
.v4l2 = V4L2_PIX_FMT_XRGB444,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBA4444,
.v4l2 = V4L2_PIX_FMT_RGBA444,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBX4444,
.v4l2 = V4L2_PIX_FMT_RGBX444,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_ABGR4444,
.v4l2 = V4L2_PIX_FMT_ABGR444,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_XBGR4444,
.v4l2 = V4L2_PIX_FMT_XBGR444,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRA4444,
.v4l2 = V4L2_PIX_FMT_BGRA444,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRX4444,
.v4l2 = V4L2_PIX_FMT_BGRX444,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBA5551,
.v4l2 = V4L2_PIX_FMT_RGBA555,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBX5551,
.v4l2 = V4L2_PIX_FMT_RGBX555,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_ABGR1555,
.v4l2 = V4L2_PIX_FMT_ABGR555,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_XBGR1555,
.v4l2 = V4L2_PIX_FMT_XBGR555,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRA5551,
.v4l2 = V4L2_PIX_FMT_BGRA555,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRX5551,
.v4l2 = V4L2_PIX_FMT_BGRX555,
.bpp = 16,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGR888,
.v4l2 = V4L2_PIX_FMT_RGB24,
.bpp = 24,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
.v4l2 = V4L2_PIX_FMT_BGR24,
.bpp = 24,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBA8888,
.v4l2 = V4L2_PIX_FMT_BGRA32,
.bpp = 32,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_RGBX8888,
.v4l2 = V4L2_PIX_FMT_BGRX32,
.bpp = 32,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_ABGR8888,
.v4l2 = V4L2_PIX_FMT_RGBA32,
.bpp = 32,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_XBGR8888,
.v4l2 = V4L2_PIX_FMT_RGBX32,
.bpp = 32,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRA8888,
.v4l2 = V4L2_PIX_FMT_ARGB32,
.bpp = 32,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_BGRX8888,
.v4l2 = V4L2_PIX_FMT_XRGB32,
.bpp = 32,
.planes = 1,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_YVYU,
.v4l2 = V4L2_PIX_FMT_YVYU,
.bpp = 16,
.planes = 1,
+ .hsub = 2,
}, {
.fourcc = DRM_FORMAT_NV61,
.v4l2 = V4L2_PIX_FMT_NV61M,
.bpp = 16,
.planes = 2,
+ .hsub = 2,
}, {
.fourcc = DRM_FORMAT_YUV420,
.v4l2 = V4L2_PIX_FMT_YUV420M,
.bpp = 12,
.planes = 3,
+ .hsub = 2,
}, {
.fourcc = DRM_FORMAT_YVU420,
.v4l2 = V4L2_PIX_FMT_YVU420M,
.bpp = 12,
.planes = 3,
+ .hsub = 2,
}, {
.fourcc = DRM_FORMAT_YUV422,
.v4l2 = V4L2_PIX_FMT_YUV422M,
.bpp = 16,
.planes = 3,
+ .hsub = 2,
}, {
.fourcc = DRM_FORMAT_YVU422,
.v4l2 = V4L2_PIX_FMT_YVU422M,
.bpp = 16,
.planes = 3,
+ .hsub = 2,
}, {
.fourcc = DRM_FORMAT_YUV444,
.v4l2 = V4L2_PIX_FMT_YUV444M,
.bpp = 24,
.planes = 3,
+ .hsub = 1,
}, {
.fourcc = DRM_FORMAT_YVU444,
.v4l2 = V4L2_PIX_FMT_YVU444M,
.bpp = 24,
.planes = 3,
+ .hsub = 1,
},
};
@@ -311,6 +351,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
{
struct rcar_du_device *rcdu = dev->dev_private;
const struct rcar_du_format_info *format;
+ unsigned int chroma_pitch;
unsigned int max_pitch;
unsigned int align;
unsigned int i;
@@ -353,10 +394,19 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
+ /*
+ * Calculate the chroma plane(s) pitch using the horizontal subsampling
+ * factor. For semi-planar formats, the U and V planes are combined, the
+ * pitch must thus be doubled.
+ */
+ chroma_pitch = mode_cmd->pitches[0] / format->hsub;
+ if (format->planes == 2)
+ chroma_pitch *= 2;
+
for (i = 1; i < format->planes; ++i) {
- if (mode_cmd->pitches[i] != mode_cmd->pitches[0]) {
+ if (mode_cmd->pitches[i] != chroma_pitch) {
dev_dbg(dev->dev,
- "luma and chroma pitches do not match\n");
+ "luma and chroma pitches are not compatible\n");
return ERR_PTR(-EINVAL);
}
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index 0346504d8c59..8f5fff176754 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -22,6 +22,7 @@ struct rcar_du_format_info {
u32 v4l2;
unsigned int bpp;
unsigned int planes;
+ unsigned int hsub;
unsigned int pnmr;
unsigned int edf;
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index f1a81c9b184d..f6a69aa116e6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -13,6 +13,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
@@ -197,9 +198,8 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
goto fail;
ret = vsp1_du_map_sg(vsp->vsp, sgt);
- if (!ret) {
+ if (ret) {
sg_free_table(sgt);
- ret = -ENOMEM;
goto fail;
}
}
@@ -279,7 +279,7 @@ static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
if (plane->state->visible)
rcar_du_vsp_plane_setup(rplane);
- else
+ else if (old_state->crtc)
vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe,
rplane->index, NULL);
}
@@ -341,6 +341,13 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
.atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
};
+static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res)
+{
+ struct rcar_du_vsp *vsp = res;
+
+ put_device(vsp->vsp);
+}
+
int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
unsigned int crtcs)
{
@@ -357,6 +364,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
vsp->vsp = &pdev->dev;
+ ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp);
+ if (ret < 0)
+ return ret;
+
ret = vsp1_du_init(vsp->vsp);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index ab0d49618cf9..70dbbe44bb23 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -677,15 +677,11 @@ static int rcar_lvds_attach(struct drm_bridge *bridge,
if (ret < 0)
return ret;
- return drm_panel_attach(lvds->panel, connector);
+ return 0;
}
static void rcar_lvds_detach(struct drm_bridge *bridge)
{
- struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
-
- if (lvds->panel)
- drm_panel_detach(lvds->panel);
}
static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
@@ -982,11 +978,13 @@ static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = {
};
static const struct of_device_id rcar_lvds_of_table[] = {
+ { .compatible = "renesas,r8a7742-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
+ { .compatible = "renesas,r8a774e1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index b9275ba7c5a5..62e5d0970525 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -36,8 +36,8 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
rk_obj->dma_addr = rk_obj->mm.start;
- ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
- rk_obj->sgt->nents, prot);
+ ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
+ prot);
if (ret < rk_obj->base.size) {
DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
ret, rk_obj->base.size);
@@ -85,7 +85,8 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
- rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+ rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
+ rk_obj->pages, rk_obj->num_pages);
if (IS_ERR(rk_obj->sgt)) {
ret = PTR_ERR(rk_obj->sgt);
goto err_put_pages;
@@ -98,11 +99,10 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
* TODO: Replace this by drm_clflush_sg() once it can be implemented
* without relying on symbols that are not exported.
*/
- for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
+ for_each_sgtable_sg(rk_obj->sgt, s, i)
sg_dma_address(s) = sg_phys(s);
- dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
- DMA_TO_DEVICE);
+ dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
return 0;
@@ -350,8 +350,8 @@ void rockchip_gem_free_object(struct drm_gem_object *obj)
if (private->domain) {
rockchip_gem_iommu_unmap(rk_obj);
} else {
- dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
- rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(drm->dev, rk_obj->sgt,
+ DMA_BIDIRECTIONAL, 0);
}
drm_prime_gem_destroy(obj, rk_obj->sgt);
} else {
@@ -442,7 +442,7 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
int ret;
if (rk_obj->pages)
- return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
+ return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
@@ -460,23 +460,6 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
return sgt;
}
-static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
- int count)
-{
- struct scatterlist *s;
- dma_addr_t expected = sg_dma_address(sgt->sgl);
- unsigned int i;
- unsigned long size = 0;
-
- for_each_sg(sgt->sgl, s, count, i) {
- if (sg_dma_address(s) != expected)
- break;
- expected = sg_dma_address(s) + sg_dma_len(s);
- size += sg_dma_len(s);
- }
- return size;
-}
-
static int
rockchip_gem_iommu_map_sg(struct drm_device *drm,
struct dma_buf_attachment *attach,
@@ -493,15 +476,13 @@ rockchip_gem_dma_map_sg(struct drm_device *drm,
struct sg_table *sg,
struct rockchip_gem_object *rk_obj)
{
- int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
- DMA_BIDIRECTIONAL);
- if (!count)
- return -EINVAL;
+ int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
+ if (err)
+ return err;
- if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
+ if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
- dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 63f967902c2d..f292c6a6e20f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -634,13 +634,6 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
"failed to attach encoder: %d\n", ret);
goto err_free_connector;
}
-
- ret = drm_panel_attach(lvds->panel, connector);
- if (ret < 0) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to attach panel: %d\n", ret);
- goto err_free_connector;
- }
} else {
ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0);
if (ret) {
@@ -676,8 +669,6 @@ static void rockchip_lvds_unbind(struct device *dev, struct device *master,
encoder_funcs = lvds->soc_data->helper_funcs;
encoder_funcs->disable(&lvds->encoder);
- if (lvds->panel)
- drm_panel_detach(lvds->panel);
pm_runtime_disable(dev);
drm_connector_cleanup(&lvds->connector);
drm_encoder_cleanup(&lvds->encoder);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 8b45c3a1b84e..69de2c76731f 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -101,7 +101,7 @@ static void drm_sched_fence_free(struct rcu_head *rcu)
/**
* drm_sched_fence_release_scheduled - callback that fence can be freed
*
- * @fence: fence
+ * @f: fence
*
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 96f763d888af..9a0d77a68018 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -625,7 +625,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
return NULL;
/* Kernel run queue has higher priority than normal run queue*/
- for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
if (entity)
break;
@@ -852,7 +852,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->name = name;
sched->timeout = timeout;
sched->hang_limit = hang_limit;
- for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
+ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
drm_sched_rq_init(sched, &sched->sched_rq[i]);
init_waitqueue_head(&sched->wake_up_worker);
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
index bd990d178765..1d696ec001cf 100644
--- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
@@ -5,6 +5,8 @@
#define PREFIX_STR "[drm_dp_mst_helper]"
+#include <linux/random.h>
+
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_print.h>
@@ -237,6 +239,21 @@ int igt_dp_mst_sideband_msg_req_decode(void *unused)
in.u.i2c_write.bytes = data;
DO_TEST();
+ in.req_type = DP_QUERY_STREAM_ENC_STATUS;
+ in.u.enc_status.stream_id = 1;
+ DO_TEST();
+ get_random_bytes(in.u.enc_status.client_id,
+ sizeof(in.u.enc_status.client_id));
+ DO_TEST();
+ in.u.enc_status.stream_event = 3;
+ DO_TEST();
+ in.u.enc_status.valid_stream_event = 0;
+ DO_TEST();
+ in.u.enc_status.stream_behavior = 3;
+ DO_TEST();
+ in.u.enc_status.valid_stream_behavior = 1;
+ DO_TEST();
+
#undef DO_TEST
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index de4af7735c46..ddb4184f0726 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -389,8 +389,6 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
dvo->panel = of_drm_find_panel(dvo->panel_node);
if (IS_ERR(dvo->panel))
dvo->panel = NULL;
- else
- drm_panel_attach(dvo->panel, connector);
}
if (dvo->panel)
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index ed5d86617802..77497b45f9a2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -768,7 +769,7 @@ static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
.vblank_quirk = sun4i_backend_vblank_quirk,
};
-static struct regmap_config sun4i_backend_regmap_config = {
+static const struct regmap_config sun4i_backend_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -810,8 +811,13 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
* because of an old DT, we need to set the DMA offset by hand
* on our device since the RAM mapping is at 0 for the DMA bus,
* unlike the CPU.
+ *
+ * XXX(hch): this has no business in a driver and needs to move
+ * to the device tree.
*/
- drm->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+ ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G);
+ if (ret)
+ return ret;
}
backend->engine.node = dev->of_node;
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 1568f68f9a9e..6825ef46f43f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -35,7 +35,7 @@ static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
};
-static struct drm_mode_config_helper_funcs sun4i_de_mode_config_helpers = {
+static const struct drm_mode_config_helper_funcs sun4i_de_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index ec2a032e07b9..edb60ae0a9b7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -407,6 +407,7 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
uint64_t modifier = fb->modifier;
+ unsigned int ch1_phase_idx;
u32 out_fmt_val;
u32 in_fmt_val, in_mod_val, in_ps_val;
unsigned int i;
@@ -442,18 +443,19 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
* I have no idea what this does exactly, but it seems to be
* related to the scaler FIR filter phase parameters.
*/
+ ch1_phase_idx = (format->num_planes > 1) ? 1 : 0;
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZPHASE_REG,
- frontend->data->ch_phase[0].horzphase);
+ frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZPHASE_REG,
- frontend->data->ch_phase[1].horzphase);
+ frontend->data->ch_phase[ch1_phase_idx]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE0_REG,
- frontend->data->ch_phase[0].vertphase[0]);
+ frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE0_REG,
- frontend->data->ch_phase[1].vertphase[0]);
+ frontend->data->ch_phase[ch1_phase_idx]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG,
- frontend->data->ch_phase[0].vertphase[1]);
+ frontend->data->ch_phase[0]);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG,
- frontend->data->ch_phase[1].vertphase[1]);
+ frontend->data->ch_phase[ch1_phase_idx]);
/*
* Checking the input format is sufficient since we currently only
@@ -545,7 +547,7 @@ int sun4i_frontend_enable(struct sun4i_frontend *frontend)
}
EXPORT_SYMBOL(sun4i_frontend_enable);
-static struct regmap_config sun4i_frontend_regmap_config = {
+static const struct regmap_config sun4i_frontend_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -687,30 +689,12 @@ static const struct dev_pm_ops sun4i_frontend_pm_ops = {
};
static const struct sun4i_frontend_data sun4i_a10_frontend = {
- .ch_phase = {
- {
- .horzphase = 0,
- .vertphase = { 0, 0 },
- },
- {
- .horzphase = 0xfc000,
- .vertphase = { 0xfc000, 0xfc000 },
- },
- },
+ .ch_phase = { 0x000, 0xfc000 },
.has_coef_rdy = true,
};
static const struct sun4i_frontend_data sun8i_a33_frontend = {
- .ch_phase = {
- {
- .horzphase = 0x400,
- .vertphase = { 0x400, 0x400 },
- },
- {
- .horzphase = 0x400,
- .vertphase = { 0x400, 0x400 },
- },
- },
+ .ch_phase = { 0x400, 0xfc400 },
.has_coef_access_ctrl = true,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h
index 0c382c1ddb0f..2e7b76e50c2b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h
@@ -115,11 +115,7 @@ struct reset_control;
struct sun4i_frontend_data {
bool has_coef_access_ctrl;
bool has_coef_rdy;
-
- struct {
- u32 horzphase;
- u32 vertphase[2];
- } ch_phase[2];
+ u32 ch_phase[2];
};
struct sun4i_frontend {
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index ffda3184aa12..ac570437172e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -47,16 +47,13 @@ static int sun4i_lvds_get_modes(struct drm_connector *connector)
return drm_panel_get_modes(lvds->panel, connector);
}
-static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
+static const struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
.get_modes = sun4i_lvds_get_modes,
};
static void
sun4i_lvds_connector_destroy(struct drm_connector *connector)
{
- struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector);
-
- drm_panel_detach(lvds->panel);
drm_connector_cleanup(connector);
}
@@ -141,12 +138,6 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_connector_attach_encoder(&lvds->connector,
&lvds->encoder);
-
- ret = drm_panel_attach(lvds->panel, &lvds->connector);
- if (ret) {
- dev_err(drm->dev, "Couldn't attach our panel\n");
- goto err_cleanup_connector;
- }
}
if (bridge) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 5a7d43939ae6..e172426eb7e9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -138,16 +138,13 @@ out:
return MODE_OK;
}
-static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
+static const struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
.get_modes = sun4i_rgb_get_modes,
};
static void
sun4i_rgb_connector_destroy(struct drm_connector *connector)
{
- struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
-
- drm_panel_detach(rgb->panel);
drm_connector_cleanup(connector);
}
@@ -183,7 +180,7 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
}
}
-static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
+static const struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
.disable = sun4i_rgb_encoder_disable,
.enable = sun4i_rgb_encoder_enable,
.mode_valid = sun4i_rgb_mode_valid,
@@ -233,12 +230,6 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_connector_attach_encoder(&rgb->connector,
&rgb->encoder);
-
- ret = drm_panel_attach(rgb->panel, &rgb->connector);
- if (ret) {
- dev_err(drm->dev, "Couldn't attach our panel\n");
- goto err_cleanup_connector;
- }
}
if (rgb->bridge) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index e40c542254f6..eaaf5d70e352 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -474,9 +474,7 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
SUN4I_TCON0_BASIC2_V_TOTAL(mode->crtc_vtotal * 2) |
SUN4I_TCON0_BASIC2_V_BACKPORCH(bp));
- reg = SUN4I_TCON0_LVDS_IF_CLK_SEL_TCON0 |
- SUN4I_TCON0_LVDS_IF_DATA_POL_NORMAL |
- SUN4I_TCON0_LVDS_IF_CLK_POL_NORMAL;
+ reg = SUN4I_TCON0_LVDS_IF_CLK_SEL_TCON0;
if (sun4i_tcon_get_pixel_depth(encoder) == 24)
reg |= SUN4I_TCON0_LVDS_IF_BITWIDTH_24BITS;
else
@@ -825,7 +823,7 @@ static int sun4i_tcon_init_irq(struct device *dev,
return 0;
}
-static struct regmap_config sun4i_tcon_regmap_config = {
+static const struct regmap_config sun4i_tcon_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 63f4428ac3bf..cb91bc11a0c7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -468,7 +468,7 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
regmap_write(tv->regs, SUN4I_TVE_SLAVE_REG, 0);
}
-static struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
+static const struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
.disable = sun4i_tv_disable,
.enable = sun4i_tv_enable,
.mode_set = sun4i_tv_mode_set,
@@ -504,7 +504,7 @@ static int sun4i_tv_comp_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
.get_modes = sun4i_tv_comp_get_modes,
.mode_valid = sun4i_tv_comp_mode_valid,
};
@@ -523,7 +523,7 @@ static const struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static struct regmap_config sun4i_tv_regmap_config = {
+static const struct regmap_config sun4i_tv_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index de8a11abd66a..4f5efcace68e 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -820,7 +820,7 @@ static int sun6i_dsi_get_modes(struct drm_connector *connector)
return drm_panel_get_modes(dsi->panel, connector);
}
-static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
+static const struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
.get_modes = sun6i_dsi_get_modes,
};
@@ -973,7 +973,6 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host,
dsi->panel = panel;
dsi->device = device;
- drm_panel_attach(dsi->panel, &dsi->connector);
drm_kms_helper_hotplug_event(dsi->drm);
dev_info(host->dev, "Attached device %s\n", device->name);
@@ -985,12 +984,10 @@ static int sun6i_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
- struct drm_panel *panel = dsi->panel;
dsi->panel = NULL;
dsi->device = NULL;
- drm_panel_detach(panel);
drm_kms_helper_hotplug_event(dsi->drm);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 156d00e5165b..35c2133724e2 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -534,7 +534,7 @@ void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
}
}
-static struct regmap_config sun8i_hdmi_phy_regmap_config = {
+static const struct regmap_config sun8i_hdmi_phy_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index c3304028e3dc..5b42cf25cc86 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -303,7 +303,7 @@ static const struct sunxi_engine_ops sun8i_engine_ops = {
.layers_init = sun8i_layers_init,
};
-static struct regmap_config sun8i_mixer_regmap_config = {
+static const struct regmap_config sun8i_mixer_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 54f937a7d5e7..816ad4ce8996 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -298,7 +298,7 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
true, zpos, old_zpos);
}
-static struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
+static const struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
.prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = sun8i_ui_layer_atomic_check,
.atomic_disable = sun8i_ui_layer_atomic_disable,
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index c0147af6a840..76393fc976fe 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -401,7 +401,7 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
true, zpos, old_zpos);
}
-static struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
+static const struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
.prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = sun8i_vi_layer_atomic_check,
.atomic_disable = sun8i_vi_layer_atomic_disable,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index b25443255be6..f38de08e0c95 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -12,6 +12,7 @@
#include <linux/gpio/consumer.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
@@ -116,6 +117,7 @@ struct tegra_output {
struct device_node *of_node;
struct device *dev;
+ struct drm_bridge *bridge;
struct drm_panel *panel;
struct i2c_adapter *ddc;
const struct edid *edid;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 3820e8dff14b..5691ef1b0e58 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -694,11 +694,11 @@ static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
- err = tegra_mipi_calibrate(dsi->mipi);
+ err = tegra_mipi_start_calibration(dsi->mipi);
if (err < 0)
return err;
- return tegra_mipi_wait(dsi->mipi);
+ return tegra_mipi_finish_calibration(dsi->mipi);
}
static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
@@ -1498,10 +1498,8 @@ static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
if (IS_ERR(output->panel))
output->panel = NULL;
- if (output->panel && output->connector.dev) {
- drm_panel_attach(output->panel, &output->connector);
+ if (output->panel && output->connector.dev)
drm_helper_hpd_irq_event(output->connector.dev);
- }
}
return 0;
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 723df142a981..a2bac20ff19d 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -98,8 +98,8 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
* the SG table needs to be copied to avoid overwriting any
* other potential users of the original SG table.
*/
- err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
- GFP_KERNEL);
+ err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
+ obj->sgt->orig_nents, GFP_KERNEL);
if (err < 0)
goto free;
} else {
@@ -196,8 +196,7 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
bo->iova = bo->mm->start;
- bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
- bo->sgt->nents, prot);
+ bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
if (!bo->size) {
dev_err(tegra->drm->dev, "failed to map buffer\n");
err = -ENOMEM;
@@ -264,8 +263,7 @@ free:
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
{
if (bo->pages) {
- dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_FROM_DEVICE);
+ dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
sg_free_table(bo->sgt);
kfree(bo->sgt);
@@ -284,18 +282,15 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
bo->num_pages = bo->gem.size >> PAGE_SHIFT;
- bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+ bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt);
goto put_pages;
}
- err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_FROM_DEVICE);
- if (err == 0) {
- err = -EFAULT;
+ err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
+ if (err)
goto free_sgt;
- }
return 0;
@@ -571,7 +566,7 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
goto free;
}
- if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ if (dma_map_sgtable(attach->dev, sgt, dir, 0))
goto free;
return sgt;
@@ -590,7 +585,7 @@ static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
struct tegra_bo *bo = to_tegra_bo(gem);
if (bo->pages)
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
@@ -609,8 +604,7 @@ static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
struct drm_device *drm = gem->dev;
if (bo->pages)
- dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_FROM_DEVICE);
+ dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
return 0;
}
@@ -623,8 +617,7 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
struct drm_device *drm = gem->dev;
if (bo->pages)
- dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_TO_DEVICE);
+ dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
return 0;
}
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index e36e5e7c2f69..5a4fd0dbf4cf 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
@@ -99,27 +100,38 @@ int tegra_output_probe(struct tegra_output *output)
if (!output->of_node)
output->of_node = output->dev->of_node;
+ err = drm_of_find_panel_or_bridge(output->of_node, -1, -1,
+ &output->panel, &output->bridge);
+ if (err && err != -ENODEV)
+ return err;
+
panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
if (panel) {
+ /*
+ * Don't mix nvidia,panel phandle with the graph in a
+ * device-tree.
+ */
+ WARN_ON(output->panel || output->bridge);
+
output->panel = of_drm_find_panel(panel);
+ of_node_put(panel);
+
if (IS_ERR(output->panel))
return PTR_ERR(output->panel);
-
- of_node_put(panel);
}
output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
if (ddc) {
- output->ddc = of_find_i2c_adapter_by_node(ddc);
+ output->ddc = of_get_i2c_adapter_by_node(ddc);
+ of_node_put(ddc);
+
if (!output->ddc) {
err = -EPROBE_DEFER;
of_node_put(ddc);
return err;
}
-
- of_node_put(ddc);
}
output->hpd_gpio = devm_gpiod_get_from_of_node(output->dev,
@@ -173,19 +185,12 @@ void tegra_output_remove(struct tegra_output *output)
free_irq(output->hpd_irq, output);
if (output->ddc)
- put_device(&output->ddc->dev);
+ i2c_put_adapter(output->ddc);
}
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
{
int connector_type;
- int err;
-
- if (output->panel) {
- err = drm_panel_attach(output->panel, &output->connector);
- if (err < 0)
- return err;
- }
/*
* The connector is now registered and ready to receive hotplug events
@@ -220,9 +225,6 @@ void tegra_output_exit(struct tegra_output *output)
*/
if (output->hpd_gpio)
disable_irq(output->hpd_irq);
-
- if (output->panel)
- drm_panel_detach(output->panel);
}
void tegra_output_find_possible_crtcs(struct tegra_output *output,
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 4cd0461cc508..539d14935728 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -131,12 +131,9 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
}
if (sgt) {
- err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
- DMA_TO_DEVICE);
- if (err == 0) {
- err = -ENOMEM;
+ err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
+ if (err)
goto unpin;
- }
/*
* The display controller needs contiguous memory, so
@@ -144,7 +141,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
* map its SG table to a single contiguous chunk of
* I/O virtual memory.
*/
- if (err > 1) {
+ if (sgt->nents > 1) {
err = -EINVAL;
goto unpin;
}
@@ -166,8 +163,7 @@ unpin:
struct sg_table *sgt = state->sgt[i];
if (sgt)
- dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
- DMA_TO_DEVICE);
+ dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
host1x_bo_unpin(dc->dev, &bo->base, sgt);
state->iova[i] = DMA_MAPPING_ERROR;
@@ -186,8 +182,7 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
struct sg_table *sgt = state->sgt[i];
if (sgt)
- dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
- DMA_TO_DEVICE);
+ dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0);
host1x_bo_unpin(dc->dev, &bo->base, sgt);
state->iova[i] = DMA_MAPPING_ERROR;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 0562a7eb793f..4142a56ca764 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -7,7 +7,7 @@
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_panel.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_simple_kms_helper.h>
#include "drm.h"
@@ -85,45 +85,13 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
tegra_dc_writel(dc, table[i].value, table[i].offset);
}
-static const struct drm_connector_funcs tegra_rgb_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .detect = tegra_output_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = tegra_output_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static enum drm_mode_status
-tegra_rgb_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /*
- * FIXME: For now, always assume that the mode is okay. There are
- * unresolved issues with clk_round_rate(), which doesn't always
- * reliably report whether a frequency can be set or not.
- */
- return MODE_OK;
-}
-
-static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = {
- .get_modes = tegra_output_connector_get_modes,
- .mode_valid = tegra_rgb_connector_mode_valid,
-};
-
static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_rgb *rgb = to_rgb(output);
- if (output->panel)
- drm_panel_disable(output->panel);
-
tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
tegra_dc_commit(rgb->dc);
-
- if (output->panel)
- drm_panel_unprepare(output->panel);
}
static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
@@ -132,9 +100,6 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
struct tegra_rgb *rgb = to_rgb(output);
u32 value;
- if (output->panel)
- drm_panel_prepare(output->panel);
-
tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
@@ -156,9 +121,6 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS);
tegra_dc_commit(rgb->dc);
-
- if (output->panel)
- drm_panel_enable(output->panel);
}
static int
@@ -267,24 +229,68 @@ int tegra_dc_rgb_remove(struct tegra_dc *dc)
int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
{
struct tegra_output *output = dc->rgb;
+ struct drm_connector *connector;
int err;
if (!dc->rgb)
return -ENODEV;
- drm_connector_init(drm, &output->connector, &tegra_rgb_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- drm_connector_helper_add(&output->connector,
- &tegra_rgb_connector_helper_funcs);
- output->connector.dpms = DRM_MODE_DPMS_OFF;
-
drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS);
drm_encoder_helper_add(&output->encoder,
&tegra_rgb_encoder_helper_funcs);
- drm_connector_attach_encoder(&output->connector,
- &output->encoder);
- drm_connector_register(&output->connector);
+ /*
+ * Wrap directly-connected panel into DRM bridge in order to let
+ * DRM core to handle panel for us.
+ */
+ if (output->panel) {
+ output->bridge = devm_drm_panel_bridge_add(output->dev,
+ output->panel);
+ if (IS_ERR(output->bridge)) {
+ dev_err(output->dev,
+ "failed to wrap panel into bridge: %pe\n",
+ output->bridge);
+ return PTR_ERR(output->bridge);
+ }
+
+ output->panel = NULL;
+ }
+
+ /*
+ * Tegra devices that have LVDS panel utilize LVDS encoder bridge
+ * for converting up to 28 LCD LVTTL lanes into 5/4 LVDS lanes that
+ * go to display panel's receiver.
+ *
+ * Encoder usually have a power-down control which needs to be enabled
+ * in order to transmit data to the panel. Historically devices that
+ * use an older device-tree version didn't model the bridge, assuming
+ * that encoder is turned ON by default, while today's DRM allows us
+ * to model LVDS encoder properly.
+ *
+ * Newer device-trees utilize LVDS encoder bridge, which provides
+ * us with a connector and handles the display panel.
+ *
+ * For older device-trees we wrapped panel into the panel-bridge.
+ */
+ if (output->bridge) {
+ err = drm_bridge_attach(&output->encoder, output->bridge,
+ NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (err) {
+ dev_err(output->dev, "failed to attach bridge: %d\n",
+ err);
+ return err;
+ }
+
+ connector = drm_bridge_connector_init(drm, &output->encoder);
+ if (IS_ERR(connector)) {
+ dev_err(output->dev,
+ "failed to initialize bridge connector: %pe\n",
+ connector);
+ return PTR_ERR(connector);
+ }
+
+ drm_connector_attach_encoder(connector, &output->encoder);
+ }
err = tegra_output_init(drm, output);
if (err < 0) {
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 45b5258c77a2..e88a17c2937f 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -3728,7 +3728,12 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux)
return -EPROBE_DEFER;
- sor->output.ddc = &sor->aux->ddc;
+ if (get_device(&sor->aux->ddc.dev)) {
+ if (try_module_get(sor->aux->ddc.owner))
+ sor->output.ddc = &sor->aux->ddc;
+ else
+ put_device(&sor->aux->ddc.dev);
+ }
}
if (!sor->aux) {
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index c3ece2c9d1c8..b669168ae7cb 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/sys_soc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_fb_cma_helper.h>
@@ -302,6 +303,8 @@ struct dispc_device {
u32 num_fourccs;
u32 memory_bandwidth_limit;
+
+ struct dispc_errata errata;
};
static void dispc_write(struct dispc_device *dispc, u16 reg, u32 val)
@@ -2641,6 +2644,19 @@ static int dispc_init_am65x_oldi_io_ctrl(struct device *dev,
return 0;
}
+static void dispc_init_errata(struct dispc_device *dispc)
+{
+ static const struct soc_device_attribute am65x_sr10_soc_devices[] = {
+ { .family = "AM65X", .revision = "SR1.0" },
+ { /* sentinel */ }
+ };
+
+ if (soc_device_match(am65x_sr10_soc_devices)) {
+ dispc->errata.i2000 = true;
+ dev_info(dispc->dev, "WA for erratum i2000: YUV formats disabled\n");
+ }
+}
+
int dispc_init(struct tidss_device *tidss)
{
struct device *dev = tidss->dev;
@@ -2664,19 +2680,27 @@ int dispc_init(struct tidss_device *tidss)
if (!dispc)
return -ENOMEM;
+ dispc->tidss = tidss;
+ dispc->dev = dev;
+ dispc->feat = feat;
+
+ dispc_init_errata(dispc);
+
dispc->fourccs = devm_kcalloc(dev, ARRAY_SIZE(dispc_color_formats),
sizeof(*dispc->fourccs), GFP_KERNEL);
if (!dispc->fourccs)
return -ENOMEM;
num_fourccs = 0;
- for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i)
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
+ if (dispc->errata.i2000 &&
+ dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc)) {
+ continue;
+ }
dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc;
+ }
dispc->num_fourccs = num_fourccs;
- dispc->tidss = tidss;
- dispc->dev = dev;
- dispc->feat = feat;
dispc_common_regmap = dispc->feat->common_regs;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index 5984e0de2cd9..e49432f0abf5 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -46,6 +46,10 @@ struct dispc_features_scaling {
u32 xinc_max;
};
+struct dispc_errata {
+ bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
+};
+
enum dispc_vp_bus_type {
DISPC_VP_DPI, /* DPI output */
DISPC_VP_OLDI, /* OLDI (LVDS) output */
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index caea2a099496..90c0da88cc98 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,7 +4,8 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o
+ ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \
+ ttm_resource.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 38f1351140e2..a98fd795b752 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -48,7 +48,7 @@ struct ttm_agp_backend {
struct agp_bridge_data *bridge;
};
-static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
@@ -57,6 +57,9 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
unsigned i;
+ if (agp_be->mem)
+ return 0;
+
mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
if (unlikely(mem == NULL))
return -ENOMEM;
@@ -81,8 +84,9 @@ static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
return ret;
}
+EXPORT_SYMBOL(ttm_agp_bind);
-static void ttm_agp_unbind(struct ttm_tt *ttm)
+void ttm_agp_unbind(struct ttm_tt *ttm)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
@@ -95,8 +99,20 @@ static void ttm_agp_unbind(struct ttm_tt *ttm)
agp_be->mem = NULL;
}
}
+EXPORT_SYMBOL(ttm_agp_unbind);
+
+bool ttm_agp_is_bound(struct ttm_tt *ttm)
+{
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+
+ if (!ttm)
+ return false;
+
+ return (agp_be->mem != NULL);
+}
+EXPORT_SYMBOL(ttm_agp_is_bound);
-static void ttm_agp_destroy(struct ttm_tt *ttm)
+void ttm_agp_destroy(struct ttm_tt *ttm)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
@@ -105,12 +121,7 @@ static void ttm_agp_destroy(struct ttm_tt *ttm)
ttm_tt_fini(ttm);
kfree(agp_be);
}
-
-static struct ttm_backend_func ttm_agp_func = {
- .bind = ttm_agp_bind,
- .unbind = ttm_agp_unbind,
- .destroy = ttm_agp_destroy,
-};
+EXPORT_SYMBOL(ttm_agp_destroy);
struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
struct agp_bridge_data *bridge,
@@ -124,7 +135,6 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
agp_be->mem = NULL;
agp_be->bridge = bridge;
- agp_be->ttm.func = &ttm_agp_func;
if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) {
kfree(agp_be);
@@ -134,18 +144,3 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
return &agp_be->ttm;
}
EXPORT_SYMBOL(ttm_agp_tt_create);
-
-int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
-{
- if (ttm->state != tt_unpopulated)
- return 0;
-
- return ttm_pool_populate(ttm, ctx);
-}
-EXPORT_SYMBOL(ttm_agp_tt_populate);
-
-void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
-{
- ttm_pool_unpopulate(ttm);
-}
-EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cc6a4e7551e3..eb4b7df02ca0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -64,51 +64,22 @@ static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
kfree(bo);
}
-static inline int ttm_mem_type_from_place(const struct ttm_place *place,
- uint32_t *mem_type)
-{
- int pos;
-
- pos = ffs(place->flags & TTM_PL_MASK_MEM);
- if (unlikely(!pos))
- return -EINVAL;
-
- *mem_type = pos - 1;
- return 0;
-}
-
-static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p,
- int mem_type)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-
- drm_printf(p, " has_type: %d\n", man->has_type);
- drm_printf(p, " use_type: %d\n", man->use_type);
- drm_printf(p, " flags: 0x%08X\n", man->flags);
- drm_printf(p, " size: %llu\n", man->size);
- drm_printf(p, " available_caching: 0x%08X\n", man->available_caching);
- drm_printf(p, " default_caching: 0x%08X\n", man->default_caching);
- if (mem_type != TTM_PL_SYSTEM)
- (*man->func->debug)(man, p);
-}
-
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
struct drm_printer p = drm_debug_printer(TTM_PFX);
- int i, ret, mem_type;
+ struct ttm_resource_manager *man;
+ int i, mem_type;
drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
bo, bo->mem.num_pages, bo->mem.size >> 10,
bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
- ret = ttm_mem_type_from_place(&placement->placement[i],
- &mem_type);
- if (ret)
- return;
+ mem_type = placement->placement[i].mem_type;
drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
i, placement->placement[i].flags, mem_type);
- ttm_mem_type_debug(bo->bdev, &p, mem_type);
+ man = ttm_manager_type(bo->bdev, mem_type);
+ ttm_resource_manager_debug(man, &p);
}
}
@@ -138,17 +109,11 @@ static struct kobj_type ttm_bo_glob_kobj_type = {
.default_attrs = ttm_bo_global_attrs
};
-
-static inline uint32_t ttm_bo_type_flags(unsigned type)
-{
- return 1 << (type);
-}
-
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man;
+ struct ttm_resource_manager *man;
if (!list_empty(&bo->lru))
return;
@@ -156,10 +121,10 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
if (mem->placement & TTM_PL_FLAG_NO_EVICT)
return;
- man = &bdev->man[mem->mem_type];
+ man = ttm_manager_type(bdev, mem->mem_type);
list_add_tail(&bo->lru, &man->lru[bo->priority]);
- if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
+ if (man->use_tt && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
@@ -223,7 +188,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
- struct ttm_mem_type_manager *man;
+ struct ttm_resource_manager *man;
if (!pos->first)
continue;
@@ -231,14 +196,14 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv);
- man = &pos->first->bdev->man[TTM_PL_TT];
+ man = ttm_manager_type(pos->first->bdev, TTM_PL_TT);
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
&pos->last->lru);
}
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
- struct ttm_mem_type_manager *man;
+ struct ttm_resource_manager *man;
if (!pos->first)
continue;
@@ -246,7 +211,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv);
- man = &pos->first->bdev->man[TTM_PL_VRAM];
+ man = ttm_manager_type(pos->first->bdev, TTM_PL_VRAM);
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
&pos->last->lru);
}
@@ -268,38 +233,38 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem, bool evict,
+ struct ttm_resource *mem, bool evict,
struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
- struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
+ struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
+ struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
int ret;
- ret = ttm_mem_io_lock(old_man, true);
- if (unlikely(ret != 0))
- goto out_err;
- ttm_bo_unmap_virtual_locked(bo);
- ttm_mem_io_unlock(old_man);
+ ttm_bo_unmap_virtual(bo);
/*
* Create and bind a ttm if required.
*/
- if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
- if (bo->ttm == NULL) {
- bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
- ret = ttm_tt_create(bo, zero);
- if (ret)
- goto out_err;
- }
+ if (new_man->use_tt) {
+ /* Zero init the new TTM structure if the old location should
+ * have used one as well.
+ */
+ ret = ttm_tt_create(bo, old_man->use_tt);
+ if (ret)
+ goto out_err;
ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
if (ret)
goto out_err;
if (mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_bind(bo->ttm, mem, ctx);
+ ret = ttm_tt_populate(bdev, bo->ttm, ctx);
+ if (ret)
+ goto out_err;
+
+ ret = ttm_bo_tt_bind(bo, mem);
if (ret)
goto out_err;
}
@@ -315,8 +280,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (bdev->driver->move_notify)
bdev->driver->move_notify(bo, evict, mem);
- if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
- !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
+ if (old_man->use_tt && new_man->use_tt)
ret = ttm_bo_move_ttm(bo, ctx, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, ctx, mem);
@@ -334,17 +298,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
moved:
- bo->evicted = false;
-
ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
return 0;
out_err:
- new_man = &bdev->man[bo->mem.mem_type];
- if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
+ new_man = ttm_manager_type(bdev, bo->mem.mem_type);
+ if (!new_man->use_tt)
+ ttm_bo_tt_destroy(bo);
return ret;
}
@@ -362,9 +322,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
if (bo->bdev->driver->move_notify)
bo->bdev->driver->move_notify(bo, false, NULL);
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- ttm_bo_mem_put(bo, &bo->mem);
+ ttm_bo_tt_destroy(bo);
+ ttm_resource_free(bo, &bo->mem);
}
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
@@ -552,7 +511,6 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_buffer_object *bo =
container_of(kref, struct ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
size_t acc_size = bo->acc_size;
int ret;
@@ -570,9 +528,7 @@ static void ttm_bo_release(struct kref *kref)
bo->bdev->driver->release_notify(bo);
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
- ttm_mem_io_lock(man, false);
- ttm_mem_io_free_vm(bo);
- ttm_mem_io_unlock(man);
+ ttm_mem_io_free(bdev, &bo->mem);
}
if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
@@ -643,7 +599,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_reg evict_mem;
+ struct ttm_resource evict_mem;
struct ttm_placement placement;
int ret = 0;
@@ -654,17 +610,16 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
bdev->driver->evict_flags(bo, &placement);
if (!placement.num_placement && !placement.num_busy_placement) {
- ret = ttm_bo_pipeline_gutting(bo);
- if (ret)
- return ret;
+ ttm_bo_wait(bo, false, false);
+ ttm_bo_cleanup_memtype_use(bo);
return ttm_tt_create(bo, false);
}
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- evict_mem.bus.io_reserved_vm = false;
- evict_mem.bus.io_reserved_count = 0;
+ evict_mem.bus.offset = 0;
+ evict_mem.bus.addr = NULL;
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
if (ret) {
@@ -680,10 +635,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
- ttm_bo_mem_put(bo, &evict_mem);
- goto out;
+ ttm_resource_free(bo, &evict_mem);
}
- bo->evicted = true;
out:
return ret;
}
@@ -694,7 +647,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
/* Don't evict this BO if it's outside of the
* requested placement range
*/
- if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
+ if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
(place->lpfn && place->lpfn <= bo->mem.start))
return false;
@@ -769,14 +722,13 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
return r == -EDEADLK ? -EBUSY : r;
}
-static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
- uint32_t mem_type,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
+int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ struct ttm_resource_manager *man,
+ const struct ttm_place *place,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bool locked = false;
unsigned i;
int ret;
@@ -842,38 +794,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret;
}
-static int ttm_bo_mem_get(struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
-
- mem->mm_node = NULL;
- if (!man->func || !man->func->get_node)
- return 0;
-
- return man->func->get_node(man, bo, place, mem);
-}
-
-void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
-
- if (!man->func || !man->func->put_node)
- return;
-
- man->func->put_node(man, mem);
- mem->mm_node = NULL;
- mem->mem_type = TTM_PL_SYSTEM;
-}
-EXPORT_SYMBOL(ttm_bo_mem_put);
-
/**
* Add the last move fence to the BO and reserve a new shared slot.
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
- struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem,
+ struct ttm_resource_manager *man,
+ struct ttm_resource *mem,
bool no_wait_gpu)
{
struct dma_fence *fence;
@@ -910,22 +836,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
struct ww_acquire_ctx *ticket;
int ret;
ticket = dma_resv_locking_ctx(bo->base.resv);
do {
- ret = ttm_bo_mem_get(bo, place, mem);
+ ret = ttm_resource_alloc(bo, place, mem);
if (likely(!ret))
break;
if (unlikely(ret != -ENOSPC))
return ret;
- ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
+ ret = ttm_mem_evict_first(bdev, man, place, ctx,
ticket);
if (unlikely(ret != 0))
return ret;
@@ -934,7 +860,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
}
-static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
+static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man,
uint32_t cur_placement,
uint32_t proposed_placement)
{
@@ -947,8 +873,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
if ((cur_placement & caching) != 0)
result |= (cur_placement & caching);
- else if ((man->default_caching & caching) != 0)
- result |= man->default_caching;
else if ((TTM_PL_FLAG_CACHED & caching) != 0)
result |= TTM_PL_FLAG_CACHED;
else if ((TTM_PL_FLAG_WC & caching) != 0)
@@ -959,25 +883,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
return result;
}
-static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
- uint32_t mem_type,
- const struct ttm_place *place,
- uint32_t *masked_placement)
-{
- uint32_t cur_flags = ttm_bo_type_flags(mem_type);
-
- if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
- return false;
-
- if ((place->flags & man->available_caching) == 0)
- return false;
-
- cur_flags |= (place->flags & man->available_caching);
-
- *masked_placement = cur_flags;
- return true;
-}
-
/**
* ttm_bo_mem_placement - check if placement is compatible
* @bo: BO to find memory for
@@ -991,34 +896,22 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
*/
static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
- uint32_t mem_type = TTM_PL_SYSTEM;
- struct ttm_mem_type_manager *man;
+ struct ttm_resource_manager *man;
uint32_t cur_flags = 0;
- int ret;
-
- ret = ttm_mem_type_from_place(place, &mem_type);
- if (ret)
- return ret;
-
- man = &bdev->man[mem_type];
- if (!man->has_type || !man->use_type)
- return -EBUSY;
- if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
+ man = ttm_manager_type(bdev, place->mem_type);
+ if (!man || !ttm_resource_manager_used(man))
return -EBUSY;
- cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags);
- /*
- * Use the access and other non-mapping-related flag bits from
- * the memory placement flags to the current flags
- */
- ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE);
+ cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+ place->flags);
+ cur_flags |= place->flags & ~TTM_PL_MASK_CACHING;
- mem->mem_type = mem_type;
+ mem->mem_type = place->mem_type;
mem->placement = cur_flags;
spin_lock(&ttm_bo_glob.lru_lock);
@@ -1039,7 +932,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -1052,25 +945,23 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
- struct ttm_mem_type_manager *man;
+ struct ttm_resource_manager *man;
ret = ttm_bo_mem_placement(bo, place, mem, ctx);
- if (ret == -EBUSY)
- continue;
if (ret)
- goto error;
+ continue;
type_found = true;
- ret = ttm_bo_mem_get(bo, place, mem);
+ ret = ttm_resource_alloc(bo, place, mem);
if (ret == -ENOSPC)
continue;
if (unlikely(ret))
goto error;
- man = &bdev->man[mem->mem_type];
+ man = ttm_manager_type(bdev, mem->mem_type);
ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
if (unlikely(ret)) {
- ttm_bo_mem_put(bo, mem);
+ ttm_resource_free(bo, mem);
if (ret == -EBUSY)
continue;
@@ -1083,10 +974,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
const struct ttm_place *place = &placement->busy_placement[i];
ret = ttm_bo_mem_placement(bo, place, mem, ctx);
- if (ret == -EBUSY)
- continue;
if (ret)
- goto error;
+ continue;
type_found = true;
ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
@@ -1105,9 +994,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
error:
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
- spin_lock(&ttm_bo_glob.lru_lock);
- ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
}
return ret;
@@ -1119,15 +1006,15 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx)
{
int ret = 0;
- struct ttm_mem_reg mem;
+ struct ttm_resource mem;
dma_resv_assert_held(bo->base.resv);
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
- mem.bus.io_reserved_vm = false;
- mem.bus.io_reserved_count = 0;
+ mem.bus.offset = 0;
+ mem.bus.addr = NULL;
mem.mm_node = NULL;
/*
@@ -1139,13 +1026,13 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
out_unlock:
if (ret)
- ttm_bo_mem_put(bo, &mem);
+ ttm_resource_free(bo, &mem);
return ret;
}
static bool ttm_bo_places_compat(const struct ttm_place *places,
unsigned num_placement,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
uint32_t *new_flags)
{
unsigned i;
@@ -1159,7 +1046,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places,
*new_flags = heap->flags;
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
- (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
+ (mem->mem_type == heap->mem_type) &&
(!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
(mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
@@ -1168,7 +1055,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places,
}
bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
uint32_t *new_flags)
{
if (ttm_bo_places_compat(placement->placement, placement->num_placement,
@@ -1214,17 +1101,13 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
if (ret)
return ret;
} else {
- /*
- * Use the access and other non-mapping-related flag bits from
- * the compatible memory placement flags to the active flags
- */
- ttm_flag_masked(&bo->mem.placement, new_flags,
- ~TTM_PL_MASK_MEMTYPE);
+ bo->mem.placement &= TTM_PL_MASK_CACHING;
+ bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING;
}
/*
* We might need to add a TTM.
*/
- if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
ret = ttm_tt_create(bo, true);
if (ret)
return ret;
@@ -1276,7 +1159,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
- INIT_LIST_HEAD(&bo->io_reserve_lru);
bo->bdev = bdev;
bo->type = type;
bo->num_pages = num_pages;
@@ -1285,10 +1167,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
- bo->mem.bus.io_reserved_vm = false;
- bo->mem.bus.io_reserved_count = 0;
+ bo->mem.bus.offset = 0;
+ bo->mem.bus.addr = NULL;
bo->moving = NULL;
- bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
+ bo->mem.placement = TTM_PL_FLAG_CACHED;
bo->acc_size = acc_size;
bo->sg = sg;
if (resv) {
@@ -1335,9 +1217,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return ret;
}
- spin_lock(&ttm_bo_glob.lru_lock);
- ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
return ret;
}
@@ -1371,9 +1251,9 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_bo_init);
-size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size)
+static size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size)
{
unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
size_t size = 0;
@@ -1383,7 +1263,6 @@ size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
size += ttm_round_pot(sizeof(struct ttm_tt));
return size;
}
-EXPORT_SYMBOL(ttm_bo_acc_size);
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
unsigned long bo_size,
@@ -1426,144 +1305,24 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_bo_create);
-static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
- unsigned mem_type)
-{
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false,
- .flags = TTM_OPT_FLAG_FORCE_ALLOC
- };
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_bo_global *glob = &ttm_bo_glob;
- struct dma_fence *fence;
- int ret;
- unsigned i;
-
- /*
- * Can't use standard list traversal since we're unlocking.
- */
-
- spin_lock(&glob->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- while (!list_empty(&man->lru[i])) {
- spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx,
- NULL);
- if (ret)
- return ret;
- spin_lock(&glob->lru_lock);
- }
- }
- spin_unlock(&glob->lru_lock);
-
- spin_lock(&man->move_lock);
- fence = dma_fence_get(man->move);
- spin_unlock(&man->move_lock);
-
- if (fence) {
- ret = dma_fence_wait(fence, false);
- dma_fence_put(fence);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
-{
- struct ttm_mem_type_manager *man;
- int ret = -EINVAL;
-
- if (mem_type >= TTM_NUM_MEM_TYPES) {
- pr_err("Illegal memory type %d\n", mem_type);
- return ret;
- }
- man = &bdev->man[mem_type];
-
- if (!man->has_type) {
- pr_err("Trying to take down uninitialized memory manager type %u\n",
- mem_type);
- return ret;
- }
-
- man->use_type = false;
- man->has_type = false;
-
- ret = 0;
- if (mem_type > 0) {
- ret = ttm_bo_force_list_clean(bdev, mem_type);
- if (ret) {
- pr_err("Cleanup eviction failed\n");
- return ret;
- }
-
- ret = (*man->func->takedown)(man);
- }
-
- dma_fence_put(man->move);
- man->move = NULL;
-
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_clean_mm);
-
int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type);
if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
pr_err("Illegal memory manager memory type %u\n", mem_type);
return -EINVAL;
}
- if (!man->has_type) {
+ if (!man) {
pr_err("Memory type %u has not been initialized\n", mem_type);
return 0;
}
- return ttm_bo_force_list_clean(bdev, mem_type);
+ return ttm_resource_manager_force_list_clean(bdev, man);
}
EXPORT_SYMBOL(ttm_bo_evict_mm);
-int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_size)
-{
- int ret;
- struct ttm_mem_type_manager *man;
- unsigned i;
-
- BUG_ON(type >= TTM_NUM_MEM_TYPES);
- man = &bdev->man[type];
- BUG_ON(man->has_type);
- man->use_io_reserve_lru = false;
- mutex_init(&man->io_reserve_mutex);
- spin_lock_init(&man->move_lock);
- INIT_LIST_HEAD(&man->io_reserve_lru);
-
- ret = bdev->driver->init_mem_type(bdev, type, man);
- if (ret)
- return ret;
- man->bdev = bdev;
-
- if (type != TTM_PL_SYSTEM) {
- ret = (*man->func->init)(man, p_size);
- if (ret)
- return ret;
- }
- man->has_type = true;
- man->use_type = true;
- man->size = p_size;
-
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
- INIT_LIST_HEAD(&man->lru[i]);
- man->move = NULL;
-
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_init_mm);
-
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
struct ttm_bo_global *glob =
@@ -1628,21 +1387,12 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret = 0;
- unsigned i = TTM_NUM_MEM_TYPES;
- struct ttm_mem_type_manager *man;
-
- while (i--) {
- man = &bdev->man[i];
- if (man->has_type) {
- man->use_type = false;
- if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
- ret = -EBUSY;
- pr_err("DRM memory manager type %d is not clean\n",
- i);
- }
- man->has_type = false;
- }
- }
+ unsigned i;
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
+ ttm_resource_manager_set_used(man, false);
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
@@ -1655,7 +1405,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
- if (list_empty(&bdev->man[0].lru[0]))
+ if (list_empty(&man->lru[0]))
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
@@ -1666,6 +1416,21 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
}
EXPORT_SYMBOL(ttm_bo_device_release);
+static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
+{
+ struct ttm_resource_manager *man = &bdev->sysman;
+
+ /*
+ * Initialize the system memory buffer type.
+ * Other types need to be driver / IOCTL initialized.
+ */
+ man->use_tt = true;
+
+ ttm_resource_manager_init(man, 0);
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
+ ttm_resource_manager_set_used(man, true);
+}
+
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
@@ -1684,15 +1449,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->driver = driver;
- memset(bdev->man, 0, sizeof(bdev->man));
-
- /*
- * Initialize the system memory buffer type.
- * Other types need to be driver / IOCTL initialized.
- */
- ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
- if (unlikely(ret != 0))
- goto out_no_sys;
+ ttm_bo_init_sysman(bdev);
bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
@@ -1704,9 +1461,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
mutex_unlock(&ttm_global_mutex);
return 0;
-out_no_sys:
- ttm_bo_global_release();
- return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);
@@ -1714,25 +1468,13 @@ EXPORT_SYMBOL(ttm_bo_device_init);
* buffer object vm functions.
*/
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
-
- drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
- ttm_mem_io_free_vm(bo);
-}
-
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
- ttm_mem_io_lock(man, false);
- ttm_bo_unmap_virtual_locked(bo);
- ttm_mem_io_unlock(man);
+ drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
+ ttm_mem_io_free(bdev, &bo->mem);
}
-
-
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
int ttm_bo_wait(struct ttm_buffer_object *bo,
@@ -1812,11 +1554,11 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
if (bo->mem.mem_type != TTM_PL_SYSTEM ||
bo->ttm->caching_state != tt_cached) {
struct ttm_operation_ctx ctx = { false, false };
- struct ttm_mem_reg evict_mem;
+ struct ttm_resource evict_mem;
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+ evict_mem.placement = TTM_PL_FLAG_CACHED;
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
@@ -1842,7 +1584,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
if (bo->bdev->driver->swap_notify)
bo->bdev->driver->swap_notify(bo);
- ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage);
out:
/**
@@ -1867,3 +1609,22 @@ void ttm_bo_swapout_all(void)
while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
+
+void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
+{
+ if (bo->ttm == NULL)
+ return;
+
+ ttm_tt_destroy(bo->bdev, bo->ttm);
+ bo->ttm = NULL;
+}
+
+int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem)
+{
+ return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
+}
+
+void ttm_bo_tt_unbind(struct ttm_buffer_object *bo)
+{
+ bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);
+}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index e6c8bd254055..fb2a25f8408f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -47,15 +47,15 @@ struct ttm_transfer_obj {
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
- ttm_bo_mem_put(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->mem);
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = &bo->mem;
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
@@ -67,10 +67,8 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
return ret;
}
- ttm_tt_unbind(ttm);
+ ttm_bo_tt_unbind(bo);
ttm_bo_free_old_node(bo);
- ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
- TTM_PL_MASK_MEM);
old_mem->mem_type = TTM_PL_SYSTEM;
}
@@ -79,146 +77,70 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
return ret;
if (new_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_bind(ttm, new_mem, ctx);
+
+ ret = ttm_tt_populate(bo->bdev, ttm, ctx);
if (unlikely(ret != 0))
return ret;
- }
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
+ ret = ttm_bo_tt_bind(bo, new_mem);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ ttm_bo_assign_mem(bo, new_mem);
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_ttm);
-int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
-{
- if (likely(!man->use_io_reserve_lru))
- return 0;
-
- if (interruptible)
- return mutex_lock_interruptible(&man->io_reserve_mutex);
-
- mutex_lock(&man->io_reserve_mutex);
- return 0;
-}
-
-void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
-{
- if (likely(!man->use_io_reserve_lru))
- return;
-
- mutex_unlock(&man->io_reserve_mutex);
-}
-
-static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
-{
- struct ttm_buffer_object *bo;
-
- bo = list_first_entry_or_null(&man->io_reserve_lru,
- struct ttm_buffer_object,
- io_reserve_lru);
- if (!bo)
- return -ENOSPC;
-
- list_del_init(&bo->io_reserve_lru);
- ttm_bo_unmap_virtual_locked(bo);
- return 0;
-}
-
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- int ret;
-
- if (mem->bus.io_reserved_count++)
+ if (mem->bus.offset || mem->bus.addr)
return 0;
+ mem->bus.is_iomem = false;
if (!bdev->driver->io_mem_reserve)
return 0;
-retry:
- ret = bdev->driver->io_mem_reserve(bdev, mem);
- if (ret == -ENOSPC) {
- ret = ttm_mem_io_evict(man);
- if (ret == 0)
- goto retry;
- }
- return ret;
+ return bdev->driver->io_mem_reserve(bdev, mem);
}
void ttm_mem_io_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
- if (--mem->bus.io_reserved_count)
+ if (!mem->bus.offset && !mem->bus.addr)
return;
- if (!bdev->driver->io_mem_free)
- return;
+ if (bdev->driver->io_mem_free)
+ bdev->driver->io_mem_free(bdev, mem);
- bdev->driver->io_mem_free(bdev, mem);
+ mem->bus.offset = 0;
+ mem->bus.addr = NULL;
}
-int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_mem_type_manager *man = &bo->bdev->man[bo->mem.mem_type];
- struct ttm_mem_reg *mem = &bo->mem;
- int ret;
-
- if (mem->bus.io_reserved_vm)
- return 0;
-
- ret = ttm_mem_io_reserve(bo->bdev, mem);
- if (unlikely(ret != 0))
- return ret;
- mem->bus.io_reserved_vm = true;
- if (man->use_io_reserve_lru)
- list_add_tail(&bo->io_reserve_lru,
- &man->io_reserve_lru);
- return 0;
-}
-
-void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_mem_reg *mem = &bo->mem;
-
- if (!mem->bus.io_reserved_vm)
- return;
-
- mem->bus.io_reserved_vm = false;
- list_del_init(&bo->io_reserve_lru);
- ttm_mem_io_free(bo->bdev, mem);
-}
-
-static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
+static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
+ struct ttm_resource *mem,
void **virtual)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret;
void *addr;
*virtual = NULL;
- (void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bdev, mem);
- ttm_mem_io_unlock(man);
if (ret || !mem->bus.is_iomem)
return ret;
if (mem->bus.addr) {
addr = mem->bus.addr;
} else {
+ size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+
if (mem->placement & TTM_PL_FLAG_WC)
- addr = ioremap_wc(mem->bus.base + mem->bus.offset,
- mem->bus.size);
+ addr = ioremap_wc(mem->bus.offset, bus_size);
else
- addr = ioremap(mem->bus.base + mem->bus.offset,
- mem->bus.size);
+ addr = ioremap(mem->bus.offset, bus_size);
if (!addr) {
- (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
- ttm_mem_io_unlock(man);
return -ENOMEM;
}
}
@@ -226,19 +148,13 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev,
return 0;
}
-static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
+static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
+ struct ttm_resource *mem,
void *virtual)
{
- struct ttm_mem_type_manager *man;
-
- man = &bdev->man[mem->mem_type];
-
if (virtual && mem->bus.addr == NULL)
iounmap(virtual);
- (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
- ttm_mem_io_unlock(man);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -300,13 +216,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
struct ttm_tt *ttm = bo->ttm;
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg old_copy = *old_mem;
+ struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
@@ -319,10 +235,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (ret)
return ret;
- ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
+ ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
if (ret)
return ret;
- ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
+ ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
if (ret)
goto out;
@@ -336,7 +252,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
* Don't move nonexistent data. Clear destination instead.
*/
if (old_iomap == NULL &&
- (ttm == NULL || (ttm->state == tt_unpopulated &&
+ (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
!(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
@@ -346,7 +262,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
* TTM might be null for moves within the same region.
*/
if (ttm) {
- ret = ttm_tt_populate(ttm, ctx);
+ ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret)
goto out1;
}
@@ -381,24 +297,22 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
mb();
out2:
old_copy = *old_mem;
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
- if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
- ttm_tt_destroy(ttm);
- bo->ttm = NULL;
- }
+ ttm_bo_assign_mem(bo, new_mem);
+
+ if (!man->use_tt)
+ ttm_bo_tt_destroy(bo);
out1:
- ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
+ ttm_resource_iounmap(bdev, old_mem, new_iomap);
out:
- ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+ ttm_resource_iounmap(bdev, &old_copy, old_iomap);
/*
* On error, keep the mm node!
*/
if (!ret)
- ttm_bo_mem_put(bo, &old_copy);
+ ttm_resource_free(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -452,7 +366,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
- INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
@@ -502,7 +415,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_reg *mem = &bo->mem;
+ struct ttm_resource *mem = &bo->mem;
if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
@@ -510,12 +423,10 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->placement & TTM_PL_FLAG_WC)
- map->virtual = ioremap_wc(bo->mem.bus.base +
- bo->mem.bus.offset + offset,
+ map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
size);
else
- map->virtual = ioremap(bo->mem.bus.base +
- bo->mem.bus.offset + offset,
+ map->virtual = ioremap(bo->mem.bus.offset + offset,
size);
}
return (!map->virtual) ? -ENOMEM : 0;
@@ -526,7 +437,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_reg *mem = &bo->mem;
+ struct ttm_resource *mem = &bo->mem;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
@@ -537,7 +448,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
BUG_ON(!ttm);
- ret = ttm_tt_populate(ttm, &ctx);
+ ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
if (ret)
return ret;
@@ -567,8 +478,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_type_manager *man =
- &bo->bdev->man[bo->mem.mem_type];
unsigned long offset, size;
int ret;
@@ -579,9 +488,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (start_page > bo->num_pages)
return -EINVAL;
- (void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
- ttm_mem_io_unlock(man);
if (ret)
return ret;
if (!bo->mem.bus.is_iomem) {
@@ -596,10 +503,6 @@ EXPORT_SYMBOL(ttm_bo_kmap);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
- struct ttm_buffer_object *bo = map->bo;
- struct ttm_mem_type_manager *man =
- &bo->bdev->man[bo->mem.mem_type];
-
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
@@ -617,167 +520,116 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
default:
BUG();
}
- (void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
- ttm_mem_io_unlock(man);
map->virtual = NULL;
map->page = NULL;
}
EXPORT_SYMBOL(ttm_bo_kunmap);
-int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct dma_fence *fence,
- bool evict,
- struct ttm_mem_reg *new_mem)
+static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
+ bool dst_use_tt)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
- struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
- struct ttm_buffer_object *ghost_obj;
-
- dma_resv_add_excl_fence(bo->base.resv, fence);
- if (evict) {
- ret = ttm_bo_wait(bo, false, false);
- if (ret)
- return ret;
-
- if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
- ttm_bo_free_old_node(bo);
- } else {
- /**
- * This should help pipeline ordinary buffer moves.
- *
- * Hang old buffer memory on a new buffer object,
- * and leave it to be released when the GPU
- * operation has completed.
- */
-
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
-
- ret = ttm_buffer_object_transfer(bo, &ghost_obj);
- if (ret)
- return ret;
-
- dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
-
- /**
- * If we're not moving to fixed memory, the TTM object
- * needs to stay alive. Otherwhise hang it on the ghost
- * bo to be unbound and destroyed.
- */
-
- if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ghost_obj->ttm = NULL;
- else
- bo->ttm = NULL;
-
- dma_resv_unlock(&ghost_obj->base._resv);
- ttm_bo_put(ghost_obj);
- }
-
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
+ ret = ttm_bo_wait(bo, false, false);
+ if (ret)
+ return ret;
+ if (!dst_use_tt)
+ ttm_bo_tt_destroy(bo);
+ ttm_bo_free_old_node(bo);
return 0;
}
-EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
-int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
- struct dma_fence *fence, bool evict,
- struct ttm_mem_reg *new_mem)
+static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
+ struct dma_fence *fence,
+ bool dst_use_tt)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_reg *old_mem = &bo->mem;
-
- struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
- struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
-
+ struct ttm_buffer_object *ghost_obj;
int ret;
- dma_resv_add_excl_fence(bo->base.resv, fence);
-
- if (!evict) {
- struct ttm_buffer_object *ghost_obj;
-
- /**
- * This should help pipeline ordinary buffer moves.
- *
- * Hang old buffer memory on a new buffer object,
- * and leave it to be released when the GPU
- * operation has completed.
- */
-
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
+ /**
+ * This should help pipeline ordinary buffer moves.
+ *
+ * Hang old buffer memory on a new buffer object,
+ * and leave it to be released when the GPU
+ * operation has completed.
+ */
- ret = ttm_buffer_object_transfer(bo, &ghost_obj);
- if (ret)
- return ret;
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
- dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+ if (ret)
+ return ret;
- /**
- * If we're not moving to fixed memory, the TTM object
- * needs to stay alive. Otherwhise hang it on the ghost
- * bo to be unbound and destroyed.
- */
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
- if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
- ghost_obj->ttm = NULL;
- else
- bo->ttm = NULL;
+ /**
+ * If we're not moving to fixed memory, the TTM object
+ * needs to stay alive. Otherwhise hang it on the ghost
+ * bo to be unbound and destroyed.
+ */
- dma_resv_unlock(&ghost_obj->base._resv);
- ttm_bo_put(ghost_obj);
+ if (dst_use_tt)
+ ghost_obj->ttm = NULL;
+ else
+ bo->ttm = NULL;
- } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
+ dma_resv_unlock(&ghost_obj->base._resv);
+ ttm_bo_put(ghost_obj);
+ return 0;
+}
- /**
- * BO doesn't have a TTM we need to bind/unbind. Just remember
- * this eviction and free up the allocation
- */
+static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
+ struct dma_fence *fence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
- spin_lock(&from->move_lock);
- if (!from->move || dma_fence_is_later(fence, from->move)) {
- dma_fence_put(from->move);
- from->move = dma_fence_get(fence);
- }
- spin_unlock(&from->move_lock);
+ /**
+ * BO doesn't have a TTM we need to bind/unbind. Just remember
+ * this eviction and free up the allocation
+ */
+ spin_lock(&from->move_lock);
+ if (!from->move || dma_fence_is_later(fence, from->move)) {
+ dma_fence_put(from->move);
+ from->move = dma_fence_get(fence);
+ }
+ spin_unlock(&from->move_lock);
- ttm_bo_free_old_node(bo);
+ ttm_bo_free_old_node(bo);
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get(fence);
+}
- } else {
- /**
- * Last resort, wait for the move to be completed.
- *
- * Should never happen in pratice.
- */
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ struct dma_fence *fence,
+ bool evict,
+ bool pipeline,
+ struct ttm_resource *new_mem)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ int ret = 0;
- ret = ttm_bo_wait(bo, false, false);
- if (ret)
- return ret;
+ dma_resv_add_excl_fence(bo->base.resv, fence);
+ if (!evict)
+ ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
+ else if (!from->use_tt && pipeline)
+ ttm_bo_move_pipeline_evict(bo, fence);
+ else
+ ret = ttm_bo_wait_free_node(bo, man->use_tt);
- if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
- ttm_bo_free_old_node(bo);
- }
+ if (ret)
+ return ret;
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
+ ttm_bo_assign_mem(bo, new_mem);
return 0;
}
-EXPORT_SYMBOL(ttm_bo_pipeline_move);
+EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 4732dcc80e11..98a006fc30a5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -101,8 +101,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
if (bdev->driver->io_mem_pfn)
return bdev->driver->io_mem_pfn(bo, page_offset);
- return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
- + page_offset;
+ return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset;
}
/**
@@ -281,8 +280,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgoff_t i;
vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address;
- struct ttm_mem_type_manager *man =
- &bdev->man[bo->mem.mem_type];
/*
* Refuse to fault imported pages. This should be handled
@@ -308,9 +305,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
}
if (bo->moving != moving) {
- spin_lock(&ttm_bo_glob.lru_lock);
- ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
}
dma_fence_put(moving);
}
@@ -323,24 +318,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
if (unlikely(ret != 0))
return ret;
- err = ttm_mem_io_lock(man, true);
+ err = ttm_mem_io_reserve(bdev, &bo->mem);
if (unlikely(err != 0))
- return VM_FAULT_NOPAGE;
- err = ttm_mem_io_reserve_vm(bo);
- if (unlikely(err != 0)) {
- ret = VM_FAULT_SIGBUS;
- goto out_io_unlock;
- }
+ return VM_FAULT_SIGBUS;
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node);
- if (unlikely(page_offset >= bo->num_pages)) {
- ret = VM_FAULT_SIGBUS;
- goto out_io_unlock;
- }
+ if (unlikely(page_offset >= bo->num_pages))
+ return VM_FAULT_SIGBUS;
prot = ttm_io_prot(bo->mem.placement, prot);
if (!bo->mem.bus.is_iomem) {
@@ -352,21 +340,17 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
};
ttm = bo->ttm;
- if (ttm_tt_populate(bo->ttm, &ctx)) {
- ret = VM_FAULT_OOM;
- goto out_io_unlock;
- }
+ if (ttm_tt_populate(bdev, bo->ttm, &ctx))
+ return VM_FAULT_OOM;
} else {
/* Iomem should not be marked encrypted */
prot = pgprot_decrypted(prot);
}
/* We don't prefault on huge faults. Yet. */
- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) {
- ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset,
- fault_page_size, prot);
- goto out_io_unlock;
- }
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1)
+ return ttm_bo_vm_insert_huge(vmf, bo, page_offset,
+ fault_page_size, prot);
/*
* Speculatively prefault a number of pages. Only error on
@@ -378,8 +362,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
} else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
- ret = VM_FAULT_OOM;
- goto out_io_unlock;
+ return VM_FAULT_OOM;
} else if (unlikely(!page)) {
break;
}
@@ -406,7 +389,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
/* Never error on prefaulted PTEs */
if (unlikely((ret & VM_FAULT_ERROR))) {
if (i == 0)
- goto out_io_unlock;
+ return VM_FAULT_NOPAGE;
else
break;
}
@@ -415,9 +398,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
if (unlikely(++page_offset >= page_last))
break;
}
- ret = VM_FAULT_NOPAGE;
-out_io_unlock:
- ttm_mem_io_unlock(man);
return ret;
}
EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 1797f04c0534..8a8f1a6a83a6 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -93,7 +93,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
+ ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head);
@@ -119,13 +119,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ttm_eu_backoff_reservation_reverse(list, entry);
if (ret == -EDEADLK) {
- if (intr) {
- ret = dma_resv_lock_slow_interruptible(bo->base.resv,
- ticket);
- } else {
- dma_resv_lock_slow(bo->base.resv, ticket);
- ret = 0;
- }
+ ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
}
if (!ret && entry->num_shared)
@@ -133,8 +127,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
entry->num_shared);
if (unlikely(ret != 0)) {
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
if (ticket) {
ww_acquire_done(ticket);
ww_acquire_fini(ticket);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index acd63b70d814..89d50f38c0f2 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -259,7 +259,7 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
return false;
}
-/**
+/*
* At this point we only support a single shrink callback.
* Extend this if needed, perhaps using a linked list of callbacks.
* Note that this function is reentrant:
@@ -554,7 +554,6 @@ ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
return false;
}
-EXPORT_SYMBOL(ttm_check_under_lowerlimit);
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
@@ -682,9 +681,3 @@ size_t ttm_round_pot(size_t size)
return 0;
}
EXPORT_SYMBOL(ttm_round_pot);
-
-uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
-{
- return glob->zone_kernel->max_mem;
-}
-EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index b40a4678c296..14660f723f71 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1044,7 +1044,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
put_pages:
ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
ttm->caching_state);
- ttm->state = tt_unpopulated;
+ ttm_tt_set_unpopulated(ttm);
}
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
@@ -1053,7 +1053,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
unsigned i;
int ret;
- if (ttm->state != tt_unpopulated)
+ if (ttm_tt_is_populated(ttm))
return 0;
if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
@@ -1083,7 +1083,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
}
}
- ttm->state = tt_unbound;
+ ttm_tt_set_populated(ttm);
return 0;
}
EXPORT_SYMBOL(ttm_pool_populate);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index faefaaef7909..5e2df11685e7 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -894,7 +894,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
unsigned i;
int ret;
- if (ttm->state != tt_unpopulated)
+ if (ttm_tt_is_populated(ttm))
return 0;
if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
@@ -982,7 +982,7 @@ skip_huge:
}
}
- ttm->state = tt_unbound;
+ ttm_tt_set_populated(ttm);
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);
@@ -1076,7 +1076,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
/* shrink pool if necessary (only on !is_cached pools)*/
if (npages)
ttm_dma_page_pool_free(pool, npages, false);
- ttm->state = tt_unpopulated;
+ ttm_tt_set_unpopulated(ttm);
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index facd3049c3aa..1da0e277c511 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -44,16 +44,22 @@
*/
struct ttm_range_manager {
+ struct ttm_resource_manager manager;
struct drm_mm mm;
spinlock_t lock;
};
-static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+static inline struct ttm_range_manager *to_range_manager(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct ttm_range_manager, manager);
+}
+
+static int ttm_range_man_alloc(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
- struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct ttm_range_manager *rman = to_range_manager(man);
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node;
enum drm_mm_insert_mode mode;
@@ -89,10 +95,10 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
return ret;
}
-static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+static void ttm_range_man_free(struct ttm_resource_manager *man,
+ struct ttm_resource *mem)
{
- struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct ttm_range_manager *rman = to_range_manager(man);
if (mem->mm_node) {
spin_lock(&rman->lock);
@@ -104,53 +110,73 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
}
}
-static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
+static const struct ttm_resource_manager_func ttm_range_manager_func;
+
+int ttm_range_man_init(struct ttm_bo_device *bdev,
+ unsigned type, bool use_tt,
+ unsigned long p_size)
{
+ struct ttm_resource_manager *man;
struct ttm_range_manager *rman;
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
if (!rman)
return -ENOMEM;
+ man = &rman->manager;
+ man->use_tt = use_tt;
+
+ man->func = &ttm_range_manager_func;
+
+ ttm_resource_manager_init(man, p_size);
+
drm_mm_init(&rman->mm, 0, p_size);
spin_lock_init(&rman->lock);
- man->priv = rman;
+
+ ttm_set_driver_manager(bdev, type, &rman->manager);
+ ttm_resource_manager_set_used(man, true);
return 0;
}
+EXPORT_SYMBOL(ttm_range_man_init);
-static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+int ttm_range_man_fini(struct ttm_bo_device *bdev,
+ unsigned type)
{
- struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
+ struct ttm_range_manager *rman = to_range_manager(man);
struct drm_mm *mm = &rman->mm;
+ int ret;
+
+ ttm_resource_manager_set_used(man, false);
+
+ ret = ttm_resource_manager_force_list_clean(bdev, man);
+ if (ret)
+ return ret;
spin_lock(&rman->lock);
- if (drm_mm_clean(mm)) {
- drm_mm_takedown(mm);
- spin_unlock(&rman->lock);
- kfree(rman);
- man->priv = NULL;
- return 0;
- }
+ drm_mm_clean(mm);
+ drm_mm_takedown(mm);
spin_unlock(&rman->lock);
- return -EBUSY;
+
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(bdev, type, NULL);
+ kfree(rman);
+ return 0;
}
+EXPORT_SYMBOL(ttm_range_man_fini);
-static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
- struct drm_printer *printer)
+static void ttm_range_man_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
{
- struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct ttm_range_manager *rman = to_range_manager(man);
spin_lock(&rman->lock);
drm_mm_print(&rman->mm, printer);
spin_unlock(&rman->lock);
}
-const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
- .init = ttm_bo_man_init,
- .takedown = ttm_bo_man_takedown,
- .get_node = ttm_bo_man_get_node,
- .put_node = ttm_bo_man_put_node,
- .debug = ttm_bo_man_debug
+static const struct ttm_resource_manager_func ttm_range_manager_func = {
+ .alloc = ttm_range_man_alloc,
+ .free = ttm_range_man_free,
+ .debug = ttm_range_man_debug
};
-EXPORT_SYMBOL(ttm_bo_manager_func);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
new file mode 100644
index 000000000000..b325b9264203
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_bo_driver.h>
+
+int ttm_resource_alloc(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource *res)
+{
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, res->mem_type);
+
+ res->mm_node = NULL;
+ if (!man->func || !man->func->alloc)
+ return 0;
+
+ return man->func->alloc(man, bo, place, res);
+}
+
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res)
+{
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, res->mem_type);
+
+ if (man->func && man->func->free)
+ man->func->free(man, res);
+
+ res->mm_node = NULL;
+ res->mem_type = TTM_PL_SYSTEM;
+}
+EXPORT_SYMBOL(ttm_resource_free);
+
+/**
+ * ttm_resource_manager_init
+ *
+ * @man: memory manager object to init
+ * @p_size: size managed area in pages.
+ *
+ * Initialise core parts of a manager object.
+ */
+void ttm_resource_manager_init(struct ttm_resource_manager *man,
+ unsigned long p_size)
+{
+ unsigned i;
+
+ spin_lock_init(&man->move_lock);
+ man->size = p_size;
+
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ INIT_LIST_HEAD(&man->lru[i]);
+ man->move = NULL;
+}
+EXPORT_SYMBOL(ttm_resource_manager_init);
+
+/*
+ * ttm_resource_manager_force_list_clean
+ *
+ * @bdev - device to use
+ * @man - manager to use
+ *
+ * Force all the objects out of a memory manager until clean.
+ * Part of memory manager cleanup sequence.
+ */
+int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
+ struct ttm_resource_manager *man)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false,
+ .flags = TTM_OPT_FLAG_FORCE_ALLOC
+ };
+ struct ttm_bo_global *glob = &ttm_bo_glob;
+ struct dma_fence *fence;
+ int ret;
+ unsigned i;
+
+ /*
+ * Can't use standard list traversal since we're unlocking.
+ */
+
+ spin_lock(&glob->lru_lock);
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+ while (!list_empty(&man->lru[i])) {
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
+ NULL);
+ if (ret)
+ return ret;
+ spin_lock(&glob->lru_lock);
+ }
+ }
+ spin_unlock(&glob->lru_lock);
+
+ spin_lock(&man->move_lock);
+ fence = dma_fence_get(man->move);
+ spin_unlock(&man->move_lock);
+
+ if (fence) {
+ ret = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_resource_manager_force_list_clean);
+
+/**
+ * ttm_resource_manager_debug
+ *
+ * @man: manager type to dump.
+ * @p: printer to use for debug.
+ */
+void ttm_resource_manager_debug(struct ttm_resource_manager *man,
+ struct drm_printer *p)
+{
+ drm_printf(p, " use_type: %d\n", man->use_type);
+ drm_printf(p, " use_tt: %d\n", man->use_tt);
+ drm_printf(p, " size: %llu\n", man->size);
+ if (man->func && man->func->debug)
+ (*man->func->debug)(man, p);
+}
+EXPORT_SYMBOL(ttm_resource_manager_debug);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3437711ddb43..f43fa69a1e65 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -50,6 +50,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
dma_resv_assert_held(bo->base.resv);
+ if (bo->ttm)
+ return 0;
+
if (bdev->need_dma32)
page_flags |= TTM_PAGE_FLAG_DMA32;
@@ -67,7 +70,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
page_flags |= TTM_PAGE_FLAG_SG;
break;
default:
- bo->ttm = NULL;
pr_err("Illegal buffer object type\n");
return -EINVAL;
}
@@ -154,7 +156,7 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
if (ttm->caching_state == c_state)
return 0;
- if (ttm->state == tt_unpopulated) {
+ if (!ttm_tt_is_populated(ttm)) {
/* Change caching but don't populate */
ttm->caching_state = c_state;
return 0;
@@ -205,33 +207,31 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
}
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
-void ttm_tt_destroy(struct ttm_tt *ttm)
+void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
- if (ttm == NULL)
- return;
-
- ttm_tt_unbind(ttm);
-
- if (ttm->state == tt_unbound)
- ttm_tt_unpopulate(ttm);
+ ttm_tt_unpopulate(bdev, ttm);
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
- ttm->func->destroy(ttm);
+}
+EXPORT_SYMBOL(ttm_tt_destroy_common);
+
+void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+{
+ bdev->driver->ttm_tt_destroy(bdev, ttm);
}
static void ttm_tt_init_fields(struct ttm_tt *ttm,
struct ttm_buffer_object *bo,
uint32_t page_flags)
{
- ttm->bdev = bo->bdev;
ttm->num_pages = bo->num_pages;
ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
- ttm->state = tt_unpopulated;
+ ttm_tt_set_unpopulated(ttm);
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
}
@@ -306,39 +306,6 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
-void ttm_tt_unbind(struct ttm_tt *ttm)
-{
- if (ttm->state == tt_bound) {
- ttm->func->unbind(ttm);
- ttm->state = tt_unbound;
- }
-}
-
-int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
- struct ttm_operation_ctx *ctx)
-{
- int ret = 0;
-
- if (!ttm)
- return -EINVAL;
-
- if (ttm->state == tt_bound)
- return 0;
-
- ret = ttm_tt_populate(ttm, ctx);
- if (ret)
- return ret;
-
- ret = ttm->func->bind(ttm, bo_mem);
- if (unlikely(ret != 0))
- return ret;
-
- ttm->state = tt_bound;
-
- return 0;
-}
-EXPORT_SYMBOL(ttm_tt_bind);
-
int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
@@ -381,7 +348,8 @@ out_err:
return ret;
}
-int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
+int ttm_tt_swapout(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm, struct file *persistent_swap_storage)
{
struct address_space *swap_space;
struct file *swap_storage;
@@ -390,7 +358,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
int i;
int ret = -ENOMEM;
- BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
BUG_ON(ttm->caching_state != tt_cached);
if (!persistent_swap_storage) {
@@ -427,7 +394,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
put_page(to_page);
}
- ttm_tt_unpopulate(ttm);
+ ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)
@@ -441,7 +408,7 @@ out_err:
return ret;
}
-static void ttm_tt_add_mapping(struct ttm_tt *ttm)
+static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
pgoff_t i;
@@ -449,24 +416,29 @@ static void ttm_tt_add_mapping(struct ttm_tt *ttm)
return;
for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
+ ttm->pages[i]->mapping = bdev->dev_mapping;
}
-int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+int ttm_tt_populate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
int ret;
- if (ttm->state != tt_unpopulated)
+ if (!ttm)
+ return -EINVAL;
+
+ if (ttm_tt_is_populated(ttm))
return 0;
- if (ttm->bdev->driver->ttm_tt_populate)
- ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
+ if (bdev->driver->ttm_tt_populate)
+ ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
else
ret = ttm_pool_populate(ttm, ctx);
if (!ret)
- ttm_tt_add_mapping(ttm);
+ ttm_tt_add_mapping(bdev, ttm);
return ret;
}
+EXPORT_SYMBOL(ttm_tt_populate);
static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
{
@@ -482,14 +454,15 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
}
}
-void ttm_tt_unpopulate(struct ttm_tt *ttm)
+void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
- if (ttm->state == tt_unpopulated)
+ if (!ttm_tt_is_populated(ttm))
return;
ttm_tt_clear_mapping(ttm);
- if (ttm->bdev->driver->ttm_tt_unpopulate)
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ if (bdev->driver->ttm_tt_unpopulate)
+ bdev->driver->ttm_tt_unpopulate(bdev, ttm);
else
ttm_pool_unpopulate(ttm);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 82a7dfdd14c2..9f7c26193831 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -358,18 +358,7 @@ static struct platform_driver v3d_platform_driver = {
},
};
-static int __init v3d_drm_register(void)
-{
- return platform_driver_register(&v3d_platform_driver);
-}
-
-static void __exit v3d_drm_unregister(void)
-{
- platform_driver_unregister(&v3d_platform_driver);
-}
-
-module_init(v3d_drm_register);
-module_exit(v3d_drm_unregister);
+module_platform_driver(v3d_platform_driver);
MODULE_ALIAS("platform:v3d-drm");
MODULE_DESCRIPTION("Broadcom V3D DRM Driver");
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 915f8bfdb58c..182c586525eb 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -568,7 +568,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl);
if (ret) {
- kfree(bin);
v3d_job_put(&render->base);
kfree(bin);
return ret;
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index 3b81ea28c0bb..5a453532901f 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -90,18 +90,17 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
u32 page = bo->node.start;
u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
- unsigned int count;
- struct scatterlist *sgl;
+ struct sg_dma_page_iter dma_iter;
- for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) {
- u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
+ for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) {
+ dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
+ u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT;
u32 pte = page_prot | page_address;
u32 i;
- BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >=
+ BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >=
BIT(24));
-
- for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++)
+ for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++)
v3d->pt[page++] = pte + i;
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index d9a5af62af89..4fcc0a542b8a 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -397,11 +397,13 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
vbox_crtc->cursor_enabled = true;
- /* pinning is done in prepare/cleanup framebuffer */
- src = drm_gem_vram_kmap(gbo, true, NULL);
+ src = drm_gem_vram_vmap(gbo);
if (IS_ERR(src)) {
+ /*
+ * BUG: we should have pinned the BO in prepare_fb().
+ */
mutex_unlock(&vbox->hw_mutex);
- DRM_WARN("Could not kmap cursor bo, skipping update\n");
+ DRM_WARN("Could not map cursor bo, skipping update\n");
return;
}
@@ -414,7 +416,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
data_size = width * height * 4 + mask_size;
copy_cursor_image(src, vbox->cursor_data, width, height, mask_size);
- drm_gem_vram_kunmap(gbo);
+ drm_gem_vram_vunmap(gbo, src);
flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
VBOX_MOUSE_POINTER_ALPHA;
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index b303703bc7f3..d0163e18e9ca 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -12,6 +12,7 @@ vc4-y := \
vc4_kms.o \
vc4_gem.o \
vc4_hdmi.o \
+ vc4_hdmi_phy.o \
vc4_vec.o \
vc4_hvs.o \
vc4_irq.o \
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 74ceebd62fbc..cc74a3f3a07a 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -449,7 +449,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
}
if (IS_ERR(cma_obj)) {
- struct drm_printer p = drm_info_printer(vc4->dev->dev);
+ struct drm_printer p = drm_info_printer(vc4->base.dev);
DRM_ERROR("Failed to allocate from CMA:\n");
vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
@@ -590,7 +590,7 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
{
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, bo_cache.time_work);
- struct drm_device *dev = vc4->dev;
+ struct drm_device *dev = &vc4->base;
mutex_lock(&vc4->bo_lock);
vc4_bo_cache_free_old(dev);
@@ -1005,6 +1005,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
int vc4_bo_cache_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -1033,10 +1034,10 @@ int vc4_bo_cache_init(struct drm_device *dev)
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
- return 0;
+ return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
}
-void vc4_bo_cache_destroy(struct drm_device *dev)
+static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int i;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 6d8fa6118fc1..482219fb4db2 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -65,6 +65,20 @@ static const struct debugfs_reg32 crtc_regs[] = {
VC4_REG32(PV_HACT_ACT),
};
+static unsigned int
+vc4_crtc_get_cob_allocation(struct vc4_dev *vc4, unsigned int channel)
+{
+ u32 dispbase = HVS_READ(SCALER_DISPBASEX(channel));
+ /* Top/base are supposed to be 4-pixel aligned, but the
+ * Raspberry Pi firmware fills the low bits (which are
+ * presumably ignored).
+ */
+ u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
+ u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+
+ return top - base + 4;
+}
+
static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
@@ -74,6 +88,8 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
+ unsigned int cob_size;
u32 val;
int fifo_lines;
int vblank_lines;
@@ -89,7 +105,7 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
* Read vertical scanline which is currently composed for our
* pixelvalve by the HVS, and also the scaler status.
*/
- val = HVS_READ(SCALER_DISPSTATX(vc4_crtc->channel));
+ val = HVS_READ(SCALER_DISPSTATX(vc4_crtc_state->assigned_channel));
/* Get optional system timestamp after query. */
if (etime)
@@ -109,8 +125,9 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
*hpos += mode->crtc_htotal / 2;
}
+ cob_size = vc4_crtc_get_cob_allocation(vc4, vc4_crtc_state->assigned_channel);
/* This is the offset we need for translating hvs -> pv scanout pos. */
- fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay;
+ fifo_lines = cob_size / mode->crtc_hdisplay;
if (fifo_lines > 0)
ret = true;
@@ -189,10 +206,22 @@ void vc4_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
}
-static u32 vc4_get_fifo_full_level(u32 format)
+static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
{
- static const u32 fifo_len_bytes = 64;
+ const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
+ const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
+ u32 fifo_len_bytes = pv_data->fifo_depth;
+ /*
+ * Pixels are pulled from the HVS if the number of bytes is
+ * lower than the FIFO full level.
+ *
+ * The latency of the pixel fetch mechanism is 6 pixels, so we
+ * need to convert those 6 pixels in bytes, depending on the
+ * format, and then subtract that from the length of the FIFO
+ * to make sure we never end up in a situation where the FIFO
+ * is full.
+ */
switch (format) {
case PV_CONTROL_FORMAT_DSIV_16:
case PV_CONTROL_FORMAT_DSIC_16:
@@ -202,10 +231,30 @@ static u32 vc4_get_fifo_full_level(u32 format)
case PV_CONTROL_FORMAT_24:
case PV_CONTROL_FORMAT_DSIV_24:
default:
+ /*
+ * For some reason, the pixelvalve4 doesn't work with
+ * the usual formula and will only work with 32.
+ */
+ if (crtc_data->hvs_output == 5)
+ return 32;
+
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
}
}
+static u32 vc4_crtc_get_fifo_full_level_bits(struct vc4_crtc *vc4_crtc,
+ u32 format)
+{
+ u32 level = vc4_get_fifo_full_level(vc4_crtc, format);
+ u32 ret = 0;
+
+ ret |= VC4_SET_FIELD((level >> 6),
+ PV5_CONTROL_FIFO_LEVEL_HIGH);
+
+ return ret | VC4_SET_FIELD(level & 0x3f,
+ PV_CONTROL_FIFO_LEVEL);
+}
+
/*
* Returns the encoder attached to the CRTC.
*
@@ -230,11 +279,23 @@ static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc)
return NULL;
}
+static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc)
+{
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+
+ /* The PV needs to be disabled before it can be flushed */
+ CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) & ~PV_CONTROL_EN);
+ CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR);
+}
+
static void vc4_crtc_config_pv(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
struct drm_crtc_state *state = crtc->state;
struct drm_display_mode *mode = &state->adjusted_mode;
bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
@@ -242,24 +303,29 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc)
bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
+ u8 ppc = pv_data->pixels_per_clock;
+ bool debug_dump_regs = false;
- /* Reset the PV fifo. */
- CRTC_WRITE(PV_CONTROL, 0);
- CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR | PV_CONTROL_EN);
- CRTC_WRITE(PV_CONTROL, 0);
+ if (debug_dump_regs) {
+ struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
+ dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n",
+ drm_crtc_index(crtc));
+ drm_print_regset32(&p, &vc4_crtc->regset);
+ }
+
+ vc4_crtc_pixelvalve_reset(crtc);
CRTC_WRITE(PV_HORZA,
- VC4_SET_FIELD((mode->htotal -
- mode->hsync_end) * pixel_rep,
+ VC4_SET_FIELD((mode->htotal - mode->hsync_end) * pixel_rep / ppc,
PV_HORZA_HBP) |
- VC4_SET_FIELD((mode->hsync_end -
- mode->hsync_start) * pixel_rep,
+ VC4_SET_FIELD((mode->hsync_end - mode->hsync_start) * pixel_rep / ppc,
PV_HORZA_HSYNC));
+
CRTC_WRITE(PV_HORZB,
- VC4_SET_FIELD((mode->hsync_start -
- mode->hdisplay) * pixel_rep,
+ VC4_SET_FIELD((mode->hsync_start - mode->hdisplay) * pixel_rep / ppc,
PV_HORZB_HFP) |
- VC4_SET_FIELD(mode->hdisplay * pixel_rep, PV_HORZB_HACTIVE));
+ VC4_SET_FIELD(mode->hdisplay * pixel_rep / ppc,
+ PV_HORZB_HACTIVE));
CRTC_WRITE(PV_VERTA,
VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
@@ -306,35 +372,20 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc)
if (is_dsi)
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
- CRTC_WRITE(PV_CONTROL,
+ if (vc4->hvs->hvs5)
+ CRTC_WRITE(PV_MUX_CFG,
+ VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
+ PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
+
+ CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR |
+ vc4_crtc_get_fifo_full_level_bits(vc4_crtc, format) |
VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
- VC4_SET_FIELD(vc4_get_fifo_full_level(format),
- PV_CONTROL_FIFO_LEVEL) |
VC4_SET_FIELD(pixel_rep - 1, PV_CONTROL_PIXEL_REP) |
PV_CONTROL_CLR_AT_START |
PV_CONTROL_TRIGGER_UNDERFLOW |
PV_CONTROL_WAIT_HSTART |
VC4_SET_FIELD(vc4_encoder->clock_select,
- PV_CONTROL_CLK_SELECT) |
- PV_CONTROL_FIFO_CLR |
- PV_CONTROL_EN);
-}
-
-static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
-{
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- bool debug_dump_regs = false;
-
- if (debug_dump_regs) {
- struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
- dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n",
- drm_crtc_index(crtc));
- drm_print_regset32(&p, &vc4_crtc->regset);
- }
-
- vc4_crtc_config_pv(crtc);
-
- vc4_hvs_mode_set_nofb(crtc);
+ PV_CONTROL_CLK_SELECT));
if (debug_dump_regs) {
struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
@@ -352,24 +403,86 @@ static void require_hvs_enabled(struct drm_device *dev)
SCALER_DISPCTRL_ENABLE);
}
+static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel)
+{
+ struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
+ struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int ret;
+
+ CRTC_WRITE(PV_V_CONTROL,
+ CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
+ ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1);
+ WARN_ONCE(ret, "Timeout waiting for !PV_VCONTROL_VIDEN\n");
+
+ /*
+ * This delay is needed to avoid to get a pixel stuck in an
+ * unflushable FIFO between the pixelvalve and the HDMI
+ * controllers on the BCM2711.
+ *
+ * Timing is fairly sensitive here, so mdelay is the safest
+ * approach.
+ *
+ * If it was to be reworked, the stuck pixel happens on a
+ * BCM2711 when changing mode with a good probability, so a
+ * script that changes mode on a regular basis should trigger
+ * the bug after less than 10 attempts. It manifests itself with
+ * every pixels being shifted by one to the right, and thus the
+ * last pixel of a line actually being displayed as the first
+ * pixel on the next line.
+ */
+ mdelay(20);
+
+ if (vc4_encoder && vc4_encoder->post_crtc_disable)
+ vc4_encoder->post_crtc_disable(encoder);
+
+ vc4_crtc_pixelvalve_reset(crtc);
+ vc4_hvs_stop_channel(dev, channel);
+
+ if (vc4_encoder && vc4_encoder->post_crtc_powerdown)
+ vc4_encoder->post_crtc_powerdown(encoder);
+
+ return 0;
+}
+
+int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
+{
+ struct drm_device *drm = crtc->dev;
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ int channel;
+
+ if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
+ "brcm,bcm2711-pixelvalve2") ||
+ of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
+ "brcm,bcm2711-pixelvalve4")))
+ return 0;
+
+ if (!(CRTC_READ(PV_CONTROL) & PV_CONTROL_EN))
+ return 0;
+
+ if (!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN))
+ return 0;
+
+ channel = vc4_hvs_get_fifo_from_output(drm, vc4_crtc->data->hvs_output);
+ if (channel < 0)
+ return 0;
+
+ return vc4_crtc_disable(crtc, channel);
+}
+
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state);
struct drm_device *dev = crtc->dev;
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- int ret;
require_hvs_enabled(dev);
/* Disable vblank irq handling before crtc is disabled. */
drm_crtc_vblank_off(crtc);
- CRTC_WRITE(PV_V_CONTROL,
- CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
- ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1);
- WARN_ONCE(ret, "Timeout waiting for !PV_VCONTROL_VIDEN\n");
-
- vc4_hvs_atomic_disable(crtc, old_state);
+ vc4_crtc_disable(crtc, old_vc4_state->assigned_channel);
/*
* Make sure we issue a vblank event after disabling the CRTC if
@@ -390,6 +503,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
+ struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
require_hvs_enabled(dev);
@@ -400,11 +515,24 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
vc4_hvs_atomic_enable(crtc, old_state);
+ if (vc4_encoder->pre_crtc_configure)
+ vc4_encoder->pre_crtc_configure(encoder);
+
+ vc4_crtc_config_pv(crtc);
+
+ CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN);
+
+ if (vc4_encoder->pre_crtc_enable)
+ vc4_encoder->pre_crtc_enable(encoder);
+
/* When feeding the transposer block the pixelvalve is unneeded and
* should not be enabled.
*/
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
+
+ if (vc4_encoder->post_crtc_enable)
+ vc4_encoder->post_crtc_enable(encoder);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -499,7 +627,7 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
- u32 chan = vc4_crtc->channel;
+ u32 chan = vc4_state->assigned_channel;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -516,7 +644,7 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
* the CRTC and encoder already reconfigured, leading to
* underruns. This can be seen when reconfiguring the CRTC.
*/
- vc4_hvs_unmask_underrun(dev, vc4_crtc->channel);
+ vc4_hvs_unmask_underrun(dev, chan);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -698,6 +826,7 @@ struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
old_vc4_state = to_vc4_crtc_state(crtc->state);
vc4_state->feed_txp = old_vc4_state->feed_txp;
vc4_state->margins = old_vc4_state->margins;
+ vc4_state->assigned_channel = old_vc4_state->assigned_channel;
__drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
return &vc4_state->base;
@@ -723,11 +852,19 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc,
void vc4_crtc_reset(struct drm_crtc *crtc)
{
+ struct vc4_crtc_state *vc4_crtc_state;
+
if (crtc->state)
vc4_crtc_destroy_state(crtc, crtc->state);
- crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
- if (crtc->state)
- __drm_atomic_helper_crtc_reset(crtc, crtc->state);
+
+ vc4_crtc_state = kzalloc(sizeof(*vc4_crtc_state), GFP_KERNEL);
+ if (!vc4_crtc_state) {
+ crtc->state = NULL;
+ return;
+ }
+
+ vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
+ __drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base);
}
static const struct drm_crtc_funcs vc4_crtc_funcs = {
@@ -747,7 +884,6 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
- .mode_set_nofb = vc4_crtc_mode_set_nofb,
.mode_valid = vc4_crtc_mode_valid,
.atomic_check = vc4_crtc_atomic_check,
.atomic_flush = vc4_hvs_atomic_flush,
@@ -758,9 +894,12 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
static const struct vc4_pv_data bcm2835_pv0_data = {
.base = {
- .hvs_channel = 0,
+ .hvs_available_channels = BIT(0),
+ .hvs_output = 0,
},
.debugfs_name = "crtc0_regs",
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
@@ -769,9 +908,12 @@ static const struct vc4_pv_data bcm2835_pv0_data = {
static const struct vc4_pv_data bcm2835_pv1_data = {
.base = {
- .hvs_channel = 2,
+ .hvs_available_channels = BIT(2),
+ .hvs_output = 2,
},
.debugfs_name = "crtc1_regs",
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
@@ -780,19 +922,94 @@ static const struct vc4_pv_data bcm2835_pv1_data = {
static const struct vc4_pv_data bcm2835_pv2_data = {
.base = {
- .hvs_channel = 1,
+ .hvs_available_channels = BIT(1),
+ .hvs_output = 1,
},
.debugfs_name = "crtc2_regs",
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
.encoder_types = {
- [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI,
+ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI0,
[PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
},
};
+static const struct vc4_pv_data bcm2711_pv0_data = {
+ .base = {
+ .hvs_available_channels = BIT(0),
+ .hvs_output = 0,
+ },
+ .debugfs_name = "crtc0_regs",
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
+ .encoder_types = {
+ [0] = VC4_ENCODER_TYPE_DSI0,
+ [1] = VC4_ENCODER_TYPE_DPI,
+ },
+};
+
+static const struct vc4_pv_data bcm2711_pv1_data = {
+ .base = {
+ .hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
+ .hvs_output = 3,
+ },
+ .debugfs_name = "crtc1_regs",
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
+ .encoder_types = {
+ [0] = VC4_ENCODER_TYPE_DSI1,
+ [1] = VC4_ENCODER_TYPE_SMI,
+ },
+};
+
+static const struct vc4_pv_data bcm2711_pv2_data = {
+ .base = {
+ .hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
+ .hvs_output = 4,
+ },
+ .debugfs_name = "crtc2_regs",
+ .fifo_depth = 256,
+ .pixels_per_clock = 2,
+ .encoder_types = {
+ [0] = VC4_ENCODER_TYPE_HDMI0,
+ },
+};
+
+static const struct vc4_pv_data bcm2711_pv3_data = {
+ .base = {
+ .hvs_available_channels = BIT(1),
+ .hvs_output = 1,
+ },
+ .debugfs_name = "crtc3_regs",
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
+ .encoder_types = {
+ [0] = VC4_ENCODER_TYPE_VEC,
+ },
+};
+
+static const struct vc4_pv_data bcm2711_pv4_data = {
+ .base = {
+ .hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
+ .hvs_output = 5,
+ },
+ .debugfs_name = "crtc4_regs",
+ .fifo_depth = 64,
+ .pixels_per_clock = 2,
+ .encoder_types = {
+ [0] = VC4_ENCODER_TYPE_HDMI1,
+ },
+};
+
static const struct of_device_id vc4_crtc_dt_match[] = {
{ .compatible = "brcm,bcm2835-pixelvalve0", .data = &bcm2835_pv0_data },
{ .compatible = "brcm,bcm2835-pixelvalve1", .data = &bcm2835_pv1_data },
{ .compatible = "brcm,bcm2835-pixelvalve2", .data = &bcm2835_pv2_data },
+ { .compatible = "brcm,bcm2711-pixelvalve0", .data = &bcm2711_pv0_data },
+ { .compatible = "brcm,bcm2711-pixelvalve1", .data = &bcm2711_pv1_data },
+ { .compatible = "brcm,bcm2711-pixelvalve2", .data = &bcm2711_pv2_data },
+ { .compatible = "brcm,bcm2711-pixelvalve3", .data = &bcm2711_pv3_data },
+ { .compatible = "brcm,bcm2711-pixelvalve4", .data = &bcm2711_pv4_data },
{}
};
@@ -819,26 +1036,11 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
}
}
-static void
-vc4_crtc_get_cob_allocation(struct vc4_crtc *vc4_crtc)
-{
- struct drm_device *drm = vc4_crtc->base.dev;
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- u32 dispbase = HVS_READ(SCALER_DISPBASEX(vc4_crtc->channel));
- /* Top/base are supposed to be 4-pixel aligned, but the
- * Raspberry Pi firmware fills the low bits (which are
- * presumably ignored).
- */
- u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
- u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
-
- vc4_crtc->cob_size = top - base + 4;
-}
-
int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
const struct drm_crtc_funcs *crtc_funcs,
const struct drm_crtc_helper_funcs *crtc_helper_funcs)
{
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
struct drm_crtc *crtc = &vc4_crtc->base;
struct drm_plane *primary_plane;
unsigned int i;
@@ -858,15 +1060,17 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
crtc_funcs, NULL);
drm_crtc_helper_add(crtc, crtc_helper_funcs);
- vc4_crtc->channel = vc4_crtc->data->hvs_channel;
- drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
- drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
- /* We support CTM, but only for one CRTC at a time. It's therefore
- * implemented as private driver state in vc4_kms, not here.
- */
- drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size);
- vc4_crtc_get_cob_allocation(vc4_crtc);
+ if (!vc4->hvs->hvs5) {
+ drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
+
+ drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
+
+ /* We support CTM, but only for one CRTC at a time. It's therefore
+ * implemented as private driver state in vc4_kms, not here.
+ */
+ drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size);
+ }
for (i = 0; i < crtc->gamma_size; i++) {
vc4_crtc->lut_r[i] = i;
@@ -915,7 +1119,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
CRTC_WRITE(PV_INTEN, 0);
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
- vc4_crtc_irq_handler, 0, "vc4 crtc", vc4_crtc);
+ vc4_crtc_irq_handler,
+ IRQF_SHARED,
+ "vc4 crtc", vc4_crtc);
if (ret)
goto err_destroy_planes;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 38343d2fb4fb..839610f8092a 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -252,41 +252,42 @@ static int vc4_drm_bind(struct device *dev)
struct drm_device *drm;
struct vc4_dev *vc4;
struct device_node *node;
+ struct drm_crtc *crtc;
int ret = 0;
dev->coherent_dma_mask = DMA_BIT_MASK(32);
- vc4 = devm_kzalloc(dev, sizeof(*vc4), GFP_KERNEL);
- if (!vc4)
- return -ENOMEM;
-
/* If VC4 V3D is missing, don't advertise render nodes. */
node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
if (!node || !of_device_is_available(node))
vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
of_node_put(node);
- drm = drm_dev_alloc(&vc4_drm_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base);
+ if (IS_ERR(vc4))
+ return PTR_ERR(vc4);
+
+ drm = &vc4->base;
platform_set_drvdata(pdev, drm);
- vc4->dev = drm;
- drm->dev_private = vc4;
INIT_LIST_HEAD(&vc4->debugfs_list);
mutex_init(&vc4->bin_bo_lock);
ret = vc4_bo_cache_init(drm);
if (ret)
- goto dev_put;
+ return ret;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
- vc4_gem_init(drm);
+ ret = vc4_gem_init(drm);
+ if (ret)
+ return ret;
ret = component_bind_all(dev, drm);
if (ret)
- goto gem_destroy;
+ return ret;
ret = vc4_plane_create_additional_planes(drm);
if (ret)
@@ -298,6 +299,9 @@ static int vc4_drm_bind(struct device *dev)
if (ret < 0)
goto unbind_all;
+ drm_for_each_crtc(crtc, drm)
+ vc4_crtc_disable_at_boot(crtc);
+
ret = drm_dev_register(drm, 0);
if (ret < 0)
goto unbind_all;
@@ -308,29 +312,17 @@ static int vc4_drm_bind(struct device *dev)
unbind_all:
component_unbind_all(dev, drm);
-gem_destroy:
- vc4_gem_destroy(drm);
- vc4_bo_cache_destroy(drm);
-dev_put:
- drm_dev_put(drm);
+
return ret;
}
static void vc4_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
drm_dev_unregister(drm);
drm_atomic_helper_shutdown(drm);
-
- drm_mode_config_cleanup(drm);
-
- drm_atomic_private_obj_fini(&vc4->load_tracker);
- drm_atomic_private_obj_fini(&vc4->ctm_manager);
-
- drm_dev_put(drm);
}
static const struct component_master_ops vc4_drm_ops = {
@@ -368,6 +360,7 @@ static int vc4_platform_drm_remove(struct platform_device *pdev)
}
static const struct of_device_id vc4_of_match[] = {
+ { .compatible = "brcm,bcm2711-vc5", },
{ .compatible = "brcm,bcm2835-vc4", },
{ .compatible = "brcm,cygnus-vc4", },
{},
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index fa19160c801f..19b75bebd35f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -14,6 +14,7 @@
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mm.h>
#include <drm/drm_modeset_lock.h>
@@ -71,9 +72,8 @@ struct vc4_perfmon {
};
struct vc4_dev {
- struct drm_device *dev;
+ struct drm_device base;
- struct vc4_hdmi *hdmi;
struct vc4_hvs *hvs;
struct vc4_v3d *v3d;
struct vc4_dpi *dpi;
@@ -201,6 +201,9 @@ struct vc4_dev {
int power_refcount;
+ /* Set to true when the load tracker is supported. */
+ bool load_tracker_available;
+
/* Set to true when the load tracker is active. */
bool load_tracker_enabled;
@@ -232,7 +235,7 @@ struct vc4_dev {
static inline struct vc4_dev *
to_vc4_dev(struct drm_device *dev)
{
- return (struct vc4_dev *)dev->dev_private;
+ return container_of(dev, struct vc4_dev, base);
}
struct vc4_bo {
@@ -285,7 +288,7 @@ struct vc4_bo {
static inline struct vc4_bo *
to_vc4_bo(struct drm_gem_object *bo)
{
- return (struct vc4_bo *)bo;
+ return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base);
}
struct vc4_fence {
@@ -298,7 +301,7 @@ struct vc4_fence {
static inline struct vc4_fence *
to_vc4_fence(struct dma_fence *fence)
{
- return (struct vc4_fence *)fence;
+ return container_of(fence, struct vc4_fence, base);
}
struct vc4_seqno_cb {
@@ -320,6 +323,8 @@ struct vc4_hvs {
void __iomem *regs;
u32 __iomem *dlist;
+ struct clk *core_clk;
+
/* Memory manager for CRTCs to allocate space in the display
* list. Units are dwords.
*/
@@ -329,7 +334,11 @@ struct vc4_hvs {
spinlock_t mm_lock;
struct drm_mm_node mitchell_netravali_filter;
+
struct debugfs_regset32 regset;
+
+ /* HVS version 5 flag, therefore requires updated dlist structures */
+ bool hvs5;
};
struct vc4_plane {
@@ -339,7 +348,7 @@ struct vc4_plane {
static inline struct vc4_plane *
to_vc4_plane(struct drm_plane *plane)
{
- return (struct vc4_plane *)plane;
+ return container_of(plane, struct vc4_plane, base);
}
enum vc4_scaling_mode {
@@ -415,12 +424,13 @@ struct vc4_plane_state {
static inline struct vc4_plane_state *
to_vc4_plane_state(struct drm_plane_state *state)
{
- return (struct vc4_plane_state *)state;
+ return container_of(state, struct vc4_plane_state, base);
}
enum vc4_encoder_type {
VC4_ENCODER_TYPE_NONE,
- VC4_ENCODER_TYPE_HDMI,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1,
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_DSI1,
@@ -432,6 +442,13 @@ struct vc4_encoder {
struct drm_encoder base;
enum vc4_encoder_type type;
u32 clock_select;
+
+ void (*pre_crtc_configure)(struct drm_encoder *encoder);
+ void (*pre_crtc_enable)(struct drm_encoder *encoder);
+ void (*post_crtc_enable)(struct drm_encoder *encoder);
+
+ void (*post_crtc_disable)(struct drm_encoder *encoder);
+ void (*post_crtc_powerdown)(struct drm_encoder *encoder);
};
static inline struct vc4_encoder *
@@ -441,13 +458,22 @@ to_vc4_encoder(struct drm_encoder *encoder)
}
struct vc4_crtc_data {
- /* Which channel of the HVS this pixelvalve sources from. */
- int hvs_channel;
+ /* Bitmask of channels (FIFOs) of the HVS that the output can source from */
+ unsigned int hvs_available_channels;
+
+ /* Which output of the HVS this pixelvalve sources from. */
+ int hvs_output;
};
struct vc4_pv_data {
struct vc4_crtc_data base;
+ /* Depth of the PixelValve FIFO in bytes */
+ unsigned int fifo_depth;
+
+ /* Number of pixels output per clock period */
+ u8 pixels_per_clock;
+
enum vc4_encoder_type encoder_types[4];
const char *debugfs_name;
@@ -462,14 +488,9 @@ struct vc4_crtc {
/* Timestamp at start of vblank irq - unaffected by lock delays. */
ktime_t t_vblank;
- /* Which HVS channel we're using for our CRTC. */
- int channel;
-
u8 lut_r[256];
u8 lut_g[256];
u8 lut_b[256];
- /* Size in pixels of the COB memory allocated to this CRTC. */
- u32 cob_size;
struct drm_pending_vblank_event *event;
@@ -479,7 +500,7 @@ struct vc4_crtc {
static inline struct vc4_crtc *
to_vc4_crtc(struct drm_crtc *crtc)
{
- return (struct vc4_crtc *)crtc;
+ return container_of(crtc, struct vc4_crtc, base);
}
static inline const struct vc4_crtc_data *
@@ -502,6 +523,7 @@ struct vc4_crtc_state {
struct drm_mm_node mm;
bool feed_txp;
bool txp_armed;
+ unsigned int assigned_channel;
struct {
unsigned int left;
@@ -511,10 +533,12 @@ struct vc4_crtc_state {
} margins;
};
+#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
+
static inline struct vc4_crtc_state *
to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
{
- return (struct vc4_crtc_state *)crtc_state;
+ return container_of(crtc_state, struct vc4_crtc_state, base);
}
#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
@@ -786,7 +810,6 @@ struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
void *vc4_prime_vmap(struct drm_gem_object *obj);
int vc4_bo_cache_init(struct drm_device *dev);
-void vc4_bo_cache_destroy(struct drm_device *dev);
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
@@ -794,6 +817,7 @@ void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
+int vc4_crtc_disable_at_boot(struct drm_crtc *crtc);
int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
const struct drm_crtc_funcs *crtc_funcs,
const struct drm_crtc_helper_funcs *crtc_helper_funcs);
@@ -850,8 +874,7 @@ extern struct platform_driver vc4_dsi_driver;
extern const struct dma_fence_ops vc4_fence_ops;
/* vc4_gem.c */
-void vc4_gem_init(struct drm_device *dev);
-void vc4_gem_destroy(struct drm_device *dev);
+int vc4_gem_init(struct drm_device *dev);
int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
@@ -888,11 +911,12 @@ void vc4_irq_reset(struct drm_device *dev);
/* vc4_hvs.c */
extern struct platform_driver vc4_hvs_driver;
+void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output);
+int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *state);
-void vc4_hvs_mode_set_nofb(struct drm_crtc *crtc);
void vc4_hvs_dump_state(struct drm_device *dev);
void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 9f01ddd5b932..b641252939d8 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -314,16 +314,16 @@ vc4_reset_work(struct work_struct *work)
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, hangcheck.reset_work);
- vc4_save_hang_state(vc4->dev);
+ vc4_save_hang_state(&vc4->base);
- vc4_reset(vc4->dev);
+ vc4_reset(&vc4->base);
}
static void
vc4_hangcheck_elapsed(struct timer_list *t)
{
struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
- struct drm_device *dev = vc4->dev;
+ struct drm_device *dev = &vc4->base;
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
struct vc4_exec_info *bin_exec, *render_exec;
@@ -1000,7 +1000,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
list_del(&exec->head);
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
- vc4_complete_exec(vc4->dev, exec);
+ vc4_complete_exec(&vc4->base, exec);
spin_lock_irqsave(&vc4->job_lock, irqflags);
}
@@ -1258,13 +1258,13 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
return 0;
fail:
- vc4_complete_exec(vc4->dev, exec);
+ vc4_complete_exec(&vc4->base, exec);
return ret;
}
-void
-vc4_gem_init(struct drm_device *dev)
+static void vc4_gem_destroy(struct drm_device *dev, void *unused);
+int vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -1285,10 +1285,11 @@ vc4_gem_init(struct drm_device *dev)
INIT_LIST_HEAD(&vc4->purgeable.list);
mutex_init(&vc4->purgeable.lock);
+
+ return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
}
-void
-vc4_gem_destroy(struct drm_device *dev)
+static void vc4_gem_destroy(struct drm_device *dev, void *unused)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 6339c6f0f571..95779d50cca0 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -43,177 +43,101 @@
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/rational.h>
+#include <linux/reset.h>
#include <sound/dmaengine_pcm.h>
#include <sound/pcm_drm_eld.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "media/cec.h"
#include "vc4_drv.h"
+#include "vc4_hdmi.h"
+#include "vc4_hdmi_regs.h"
#include "vc4_regs.h"
-#define HSM_CLOCK_FREQ 163682864
-#define CEC_CLOCK_FREQ 40000
-#define CEC_CLOCK_DIV (HSM_CLOCK_FREQ / CEC_CLOCK_FREQ)
-
-/* HDMI audio information */
-struct vc4_hdmi_audio {
- struct snd_soc_card card;
- struct snd_soc_dai_link link;
- struct snd_soc_dai_link_component cpu;
- struct snd_soc_dai_link_component codec;
- struct snd_soc_dai_link_component platform;
- int samplerate;
- int channels;
- struct snd_dmaengine_dai_dma_data dma_data;
- struct snd_pcm_substream *substream;
-};
+#define VC5_HDMI_HORZA_HFP_SHIFT 16
+#define VC5_HDMI_HORZA_HFP_MASK VC4_MASK(28, 16)
+#define VC5_HDMI_HORZA_VPOS BIT(15)
+#define VC5_HDMI_HORZA_HPOS BIT(14)
+#define VC5_HDMI_HORZA_HAP_SHIFT 0
+#define VC5_HDMI_HORZA_HAP_MASK VC4_MASK(13, 0)
-/* General HDMI hardware state. */
-struct vc4_hdmi {
- struct platform_device *pdev;
-
- struct drm_encoder *encoder;
- struct drm_connector *connector;
+#define VC5_HDMI_HORZB_HBP_SHIFT 16
+#define VC5_HDMI_HORZB_HBP_MASK VC4_MASK(26, 16)
+#define VC5_HDMI_HORZB_HSP_SHIFT 0
+#define VC5_HDMI_HORZB_HSP_MASK VC4_MASK(10, 0)
- struct vc4_hdmi_audio audio;
+#define VC5_HDMI_VERTA_VSP_SHIFT 24
+#define VC5_HDMI_VERTA_VSP_MASK VC4_MASK(28, 24)
+#define VC5_HDMI_VERTA_VFP_SHIFT 16
+#define VC5_HDMI_VERTA_VFP_MASK VC4_MASK(22, 16)
+#define VC5_HDMI_VERTA_VAL_SHIFT 0
+#define VC5_HDMI_VERTA_VAL_MASK VC4_MASK(12, 0)
- struct i2c_adapter *ddc;
- void __iomem *hdmicore_regs;
- void __iomem *hd_regs;
- int hpd_gpio;
- bool hpd_active_low;
+#define VC5_HDMI_VERTB_VSPO_SHIFT 16
+#define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16)
- struct cec_adapter *cec_adap;
- struct cec_msg cec_rx_msg;
- bool cec_tx_ok;
- bool cec_irq_was_rx;
+# define VC4_HD_M_SW_RST BIT(2)
+# define VC4_HD_M_ENABLE BIT(0)
- struct clk *pixel_clock;
- struct clk *hsm_clock;
+#define CEC_CLOCK_FREQ 40000
+#define VC4_HSM_MID_CLOCK 149985000
- struct debugfs_regset32 hdmi_regset;
- struct debugfs_regset32 hd_regset;
-};
+static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct vc4_hdmi *vc4_hdmi = node->info_ent->data;
+ struct drm_printer p = drm_seq_file_printer(m);
-#define HDMI_READ(offset) readl(vc4->hdmi->hdmicore_regs + offset)
-#define HDMI_WRITE(offset, val) writel(val, vc4->hdmi->hdmicore_regs + offset)
-#define HD_READ(offset) readl(vc4->hdmi->hd_regs + offset)
-#define HD_WRITE(offset, val) writel(val, vc4->hdmi->hd_regs + offset)
+ drm_print_regset32(&p, &vc4_hdmi->hdmi_regset);
+ drm_print_regset32(&p, &vc4_hdmi->hd_regset);
-/* VC4 HDMI encoder KMS struct */
-struct vc4_hdmi_encoder {
- struct vc4_encoder base;
- bool hdmi_monitor;
- bool limited_rgb_range;
-};
+ return 0;
+}
-static inline struct vc4_hdmi_encoder *
-to_vc4_hdmi_encoder(struct drm_encoder *encoder)
+static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
- return container_of(encoder, struct vc4_hdmi_encoder, base.base);
-}
+ HDMI_WRITE(HDMI_M_CTL, VC4_HD_M_SW_RST);
+ udelay(1);
+ HDMI_WRITE(HDMI_M_CTL, 0);
-/* VC4 HDMI connector KMS struct */
-struct vc4_hdmi_connector {
- struct drm_connector base;
+ HDMI_WRITE(HDMI_M_CTL, VC4_HD_M_ENABLE);
- /* Since the connector is attached to just the one encoder,
- * this is the reference to it so we can do the best_encoder()
- * hook.
- */
- struct drm_encoder *encoder;
-};
+ HDMI_WRITE(HDMI_SW_RESET_CONTROL,
+ VC4_HDMI_SW_RESET_HDMI |
+ VC4_HDMI_SW_RESET_FORMAT_DETECT);
-static inline struct vc4_hdmi_connector *
-to_vc4_hdmi_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct vc4_hdmi_connector, base);
+ HDMI_WRITE(HDMI_SW_RESET_CONTROL, 0);
}
-static const struct debugfs_reg32 hdmi_regs[] = {
- VC4_REG32(VC4_HDMI_CORE_REV),
- VC4_REG32(VC4_HDMI_SW_RESET_CONTROL),
- VC4_REG32(VC4_HDMI_HOTPLUG_INT),
- VC4_REG32(VC4_HDMI_HOTPLUG),
- VC4_REG32(VC4_HDMI_MAI_CHANNEL_MAP),
- VC4_REG32(VC4_HDMI_MAI_CONFIG),
- VC4_REG32(VC4_HDMI_MAI_FORMAT),
- VC4_REG32(VC4_HDMI_AUDIO_PACKET_CONFIG),
- VC4_REG32(VC4_HDMI_RAM_PACKET_CONFIG),
- VC4_REG32(VC4_HDMI_HORZA),
- VC4_REG32(VC4_HDMI_HORZB),
- VC4_REG32(VC4_HDMI_FIFO_CTL),
- VC4_REG32(VC4_HDMI_SCHEDULER_CONTROL),
- VC4_REG32(VC4_HDMI_VERTA0),
- VC4_REG32(VC4_HDMI_VERTA1),
- VC4_REG32(VC4_HDMI_VERTB0),
- VC4_REG32(VC4_HDMI_VERTB1),
- VC4_REG32(VC4_HDMI_TX_PHY_RESET_CTL),
- VC4_REG32(VC4_HDMI_TX_PHY_CTL0),
-
- VC4_REG32(VC4_HDMI_CEC_CNTRL_1),
- VC4_REG32(VC4_HDMI_CEC_CNTRL_2),
- VC4_REG32(VC4_HDMI_CEC_CNTRL_3),
- VC4_REG32(VC4_HDMI_CEC_CNTRL_4),
- VC4_REG32(VC4_HDMI_CEC_CNTRL_5),
- VC4_REG32(VC4_HDMI_CPU_STATUS),
- VC4_REG32(VC4_HDMI_CPU_MASK_STATUS),
-
- VC4_REG32(VC4_HDMI_CEC_RX_DATA_1),
- VC4_REG32(VC4_HDMI_CEC_RX_DATA_2),
- VC4_REG32(VC4_HDMI_CEC_RX_DATA_3),
- VC4_REG32(VC4_HDMI_CEC_RX_DATA_4),
- VC4_REG32(VC4_HDMI_CEC_TX_DATA_1),
- VC4_REG32(VC4_HDMI_CEC_TX_DATA_2),
- VC4_REG32(VC4_HDMI_CEC_TX_DATA_3),
- VC4_REG32(VC4_HDMI_CEC_TX_DATA_4),
-};
-
-static const struct debugfs_reg32 hd_regs[] = {
- VC4_REG32(VC4_HD_M_CTL),
- VC4_REG32(VC4_HD_MAI_CTL),
- VC4_REG32(VC4_HD_MAI_THR),
- VC4_REG32(VC4_HD_MAI_FMT),
- VC4_REG32(VC4_HD_MAI_SMP),
- VC4_REG32(VC4_HD_VID_CTL),
- VC4_REG32(VC4_HD_CSC_CTL),
- VC4_REG32(VC4_HD_FRAME_COUNT),
-};
-
-static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
+static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_hdmi *hdmi = vc4->hdmi;
- struct drm_printer p = drm_seq_file_printer(m);
+ reset_control_reset(vc4_hdmi->reset);
- drm_print_regset32(&p, &hdmi->hdmi_regset);
- drm_print_regset32(&p, &hdmi->hd_regset);
+ HDMI_WRITE(HDMI_DVP_CTL, 0);
- return 0;
+ HDMI_WRITE(HDMI_CLOCK_STOP,
+ HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL);
}
static enum drm_connector_status
vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
- struct drm_device *dev = connector->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
- if (vc4->hdmi->hpd_gpio) {
- if (gpio_get_value_cansleep(vc4->hdmi->hpd_gpio) ^
- vc4->hdmi->hpd_active_low)
+ if (vc4_hdmi->hpd_gpio) {
+ if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
+ vc4_hdmi->hpd_active_low)
return connector_status_connected;
- cec_phys_addr_invalidate(vc4->hdmi->cec_adap);
+ cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
return connector_status_disconnected;
}
- if (drm_probe_ddc(vc4->hdmi->ddc))
+ if (drm_probe_ddc(vc4_hdmi->ddc))
return connector_status_connected;
- if (HDMI_READ(VC4_HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
+ if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
return connector_status_connected;
- cec_phys_addr_invalidate(vc4->hdmi->cec_adap);
+ cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
return connector_status_disconnected;
}
@@ -225,17 +149,13 @@ static void vc4_hdmi_connector_destroy(struct drm_connector *connector)
static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
{
- struct vc4_hdmi_connector *vc4_connector =
- to_vc4_hdmi_connector(connector);
- struct drm_encoder *encoder = vc4_connector->encoder;
- struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
- struct drm_device *dev = connector->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
+ struct vc4_hdmi_encoder *vc4_encoder = &vc4_hdmi->encoder;
int ret = 0;
struct edid *edid;
- edid = drm_get_edid(connector, vc4->hdmi->ddc);
- cec_s_phys_addr_from_edid(vc4->hdmi->cec_adap, edid);
+ edid = drm_get_edid(connector, vc4_hdmi->ddc);
+ cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
if (!edid)
return -ENODEV;
@@ -267,32 +187,23 @@ static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs =
.get_modes = vc4_hdmi_connector_get_modes,
};
-static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
- struct drm_encoder *encoder,
- struct i2c_adapter *ddc)
+static int vc4_hdmi_connector_init(struct drm_device *dev,
+ struct vc4_hdmi *vc4_hdmi)
{
- struct drm_connector *connector;
- struct vc4_hdmi_connector *hdmi_connector;
+ struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
int ret;
- hdmi_connector = devm_kzalloc(dev->dev, sizeof(*hdmi_connector),
- GFP_KERNEL);
- if (!hdmi_connector)
- return ERR_PTR(-ENOMEM);
- connector = &hdmi_connector->base;
-
- hdmi_connector->encoder = encoder;
-
drm_connector_init_with_ddc(dev, connector,
&vc4_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
- ddc);
+ vc4_hdmi->ddc);
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/* Create and attach TV margin props to this connector. */
ret = drm_mode_create_tv_margin_properties(dev);
if (ret)
- return ERR_PTR(ret);
+ return ret;
drm_connector_attach_tv_margin_properties(connector);
@@ -304,35 +215,37 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
drm_connector_attach_encoder(connector, encoder);
- return connector;
+ return 0;
}
static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
enum hdmi_infoframe_type type)
{
- struct drm_device *dev = encoder->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
u32 packet_id = type - 0x80;
- HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
- HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
+ HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
+ HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
- return wait_for(!(HDMI_READ(VC4_HDMI_RAM_PACKET_STATUS) &
+ return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
BIT(packet_id)), 100);
}
static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe *frame)
{
- struct drm_device *dev = encoder->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
u32 packet_id = frame->any.type - 0x80;
- u32 packet_reg = VC4_HDMI_RAM_PACKET(packet_id);
+ const struct vc4_hdmi_register *ram_packet_start =
+ &vc4_hdmi->variant->registers[HDMI_RAM_PACKET_START];
+ u32 packet_reg = ram_packet_start->offset + VC4_HDMI_PACKET_STRIDE * packet_id;
+ void __iomem *base = __vc4_hdmi_get_field_base(vc4_hdmi,
+ ram_packet_start->reg);
uint8_t buffer[VC4_HDMI_PACKET_STRIDE];
ssize_t len, i;
int ret;
- WARN_ONCE(!(HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) &
+ WARN_ONCE(!(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE),
"Packet RAM has to be on to store the packet.");
@@ -347,23 +260,23 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
}
for (i = 0; i < len; i += 7) {
- HDMI_WRITE(packet_reg,
- buffer[i + 0] << 0 |
- buffer[i + 1] << 8 |
- buffer[i + 2] << 16);
+ writel(buffer[i + 0] << 0 |
+ buffer[i + 1] << 8 |
+ buffer[i + 2] << 16,
+ base + packet_reg);
packet_reg += 4;
- HDMI_WRITE(packet_reg,
- buffer[i + 3] << 0 |
- buffer[i + 4] << 8 |
- buffer[i + 5] << 16 |
- buffer[i + 6] << 24);
+ writel(buffer[i + 3] << 0 |
+ buffer[i + 4] << 8 |
+ buffer[i + 5] << 16 |
+ buffer[i + 6] << 24,
+ base + packet_reg);
packet_reg += 4;
}
- HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
- HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) | BIT(packet_id));
- ret = wait_for((HDMI_READ(VC4_HDMI_RAM_PACKET_STATUS) &
+ HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
+ HDMI_READ(HDMI_RAM_PACKET_CONFIG) | BIT(packet_id));
+ ret = wait_for((HDMI_READ(HDMI_RAM_PACKET_STATUS) &
BIT(packet_id)), 100);
if (ret)
DRM_ERROR("Failed to wait for infoframe to start: %d\n", ret);
@@ -371,24 +284,24 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
- struct vc4_dev *vc4 = encoder->dev->dev_private;
- struct vc4_hdmi *hdmi = vc4->hdmi;
- struct drm_connector_state *cstate = hdmi->connector->state;
+ struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_connector_state *cstate = connector->state;
struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
union hdmi_infoframe frame;
int ret;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- hdmi->connector, mode);
+ connector, mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
}
drm_hdmi_avi_infoframe_quant_range(&frame.avi,
- hdmi->connector, mode,
+ connector, mode,
vc4_encoder->limited_rgb_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
@@ -416,9 +329,7 @@ static void vc4_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder)
{
- struct drm_device *drm = encoder->dev;
- struct vc4_dev *vc4 = drm->dev_private;
- struct vc4_hdmi *hdmi = vc4->hdmi;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
union hdmi_infoframe frame;
int ret;
@@ -427,45 +338,139 @@ static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder)
frame.audio.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
frame.audio.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
frame.audio.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
- frame.audio.channels = hdmi->audio.channels;
+ frame.audio.channels = vc4_hdmi->audio.channels;
vc4_hdmi_write_infoframe(encoder, &frame);
}
static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+
vc4_hdmi_set_avi_infoframe(encoder);
vc4_hdmi_set_spd_infoframe(encoder);
+ /*
+ * If audio was streaming, then we need to reenabled the audio
+ * infoframe here during encoder_enable.
+ */
+ if (vc4_hdmi->audio.streaming)
+ vc4_hdmi_set_audio_infoframe(encoder);
}
-static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_hdmi *hdmi = vc4->hdmi;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+
+ HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0);
+
+ HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) |
+ VC4_HD_VID_CTL_CLRRGB | VC4_HD_VID_CTL_CLRSYNC);
+
+ HDMI_WRITE(HDMI_VID_CTL,
+ HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
+}
+
+static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder)
+{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
int ret;
- HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG, 0);
+ if (vc4_hdmi->variant->phy_disable)
+ vc4_hdmi->variant->phy_disable(vc4_hdmi);
- HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16);
- HD_WRITE(VC4_HD_VID_CTL,
- HD_READ(VC4_HD_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
+ HDMI_WRITE(HDMI_VID_CTL,
+ HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
- clk_disable_unprepare(hdmi->pixel_clock);
+ clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
- ret = pm_runtime_put(&hdmi->pdev->dev);
+ ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
if (ret < 0)
DRM_ERROR("Failed to release power domain: %d\n", ret);
}
-static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
+static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+}
+
+static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
+{
+ u32 csc_ctl;
+
+ csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
+ VC4_HD_CSC_CTL_ORDER);
+
+ if (enable) {
+ /* CEA VICs other than #1 requre limited range RGB
+ * output unless overridden by an AVI infoframe.
+ * Apply a colorspace conversion to squash 0-255 down
+ * to 16-235. The matrix here is:
+ *
+ * [ 0 0 0.8594 16]
+ * [ 0 0.8594 0 16]
+ * [ 0.8594 0 0 16]
+ * [ 0 0 0 1]
+ */
+ csc_ctl |= VC4_HD_CSC_CTL_ENABLE;
+ csc_ctl |= VC4_HD_CSC_CTL_RGB2YCC;
+ csc_ctl |= VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM,
+ VC4_HD_CSC_CTL_MODE);
+
+ HDMI_WRITE(HDMI_CSC_12_11, (0x000 << 16) | 0x000);
+ HDMI_WRITE(HDMI_CSC_14_13, (0x100 << 16) | 0x6e0);
+ HDMI_WRITE(HDMI_CSC_22_21, (0x6e0 << 16) | 0x000);
+ HDMI_WRITE(HDMI_CSC_24_23, (0x100 << 16) | 0x000);
+ HDMI_WRITE(HDMI_CSC_32_31, (0x000 << 16) | 0x6e0);
+ HDMI_WRITE(HDMI_CSC_34_33, (0x100 << 16) | 0x000);
+ }
+
+ /* The RGB order applies even when CSC is disabled. */
+ HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
+}
+
+static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable)
+{
+ u32 csc_ctl;
+
+ csc_ctl = 0x07; /* RGB_CONVERT_MODE = custom matrix, || USE_RGB_TO_YCBCR */
+
+ if (enable) {
+ /* CEA VICs other than #1 requre limited range RGB
+ * output unless overridden by an AVI infoframe.
+ * Apply a colorspace conversion to squash 0-255 down
+ * to 16-235. The matrix here is:
+ *
+ * [ 0.8594 0 0 16]
+ * [ 0 0.8594 0 16]
+ * [ 0 0 0.8594 16]
+ * [ 0 0 0 1]
+ * Matrix is signed 2p13 fixed point, with signed 9p6 offsets
+ */
+ HDMI_WRITE(HDMI_CSC_12_11, (0x0000 << 16) | 0x1b80);
+ HDMI_WRITE(HDMI_CSC_14_13, (0x0400 << 16) | 0x0000);
+ HDMI_WRITE(HDMI_CSC_22_21, (0x1b80 << 16) | 0x0000);
+ HDMI_WRITE(HDMI_CSC_24_23, (0x0400 << 16) | 0x0000);
+ HDMI_WRITE(HDMI_CSC_32_31, (0x0000 << 16) | 0x0000);
+ HDMI_WRITE(HDMI_CSC_34_33, (0x0400 << 16) | 0x1b80);
+ } else {
+ /* Still use the matrix for full range, but make it unity.
+ * Matrix is signed 2p13 fixed point, with signed 9p6 offsets
+ */
+ HDMI_WRITE(HDMI_CSC_12_11, (0x0000 << 16) | 0x2000);
+ HDMI_WRITE(HDMI_CSC_14_13, (0x0000 << 16) | 0x0000);
+ HDMI_WRITE(HDMI_CSC_22_21, (0x2000 << 16) | 0x0000);
+ HDMI_WRITE(HDMI_CSC_24_23, (0x0000 << 16) | 0x0000);
+ HDMI_WRITE(HDMI_CSC_32_31, (0x0000 << 16) | 0x0000);
+ HDMI_WRITE(HDMI_CSC_34_33, (0x0000 << 16) | 0x2000);
+ }
+
+ HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
+}
+
+static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ struct drm_display_mode *mode)
{
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_hdmi *hdmi = vc4->hdmi;
- bool debug_dump_regs = false;
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
@@ -483,213 +488,285 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
mode->crtc_vsync_end -
interlaced,
VC4_HDMI_VERTB_VBP));
- u32 csc_ctl;
+
+ HDMI_WRITE(HDMI_HORZA,
+ (vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) |
+ (hsync_pos ? VC4_HDMI_HORZA_HPOS : 0) |
+ VC4_SET_FIELD(mode->hdisplay * pixel_rep,
+ VC4_HDMI_HORZA_HAP));
+
+ HDMI_WRITE(HDMI_HORZB,
+ VC4_SET_FIELD((mode->htotal -
+ mode->hsync_end) * pixel_rep,
+ VC4_HDMI_HORZB_HBP) |
+ VC4_SET_FIELD((mode->hsync_end -
+ mode->hsync_start) * pixel_rep,
+ VC4_HDMI_HORZB_HSP) |
+ VC4_SET_FIELD((mode->hsync_start -
+ mode->hdisplay) * pixel_rep,
+ VC4_HDMI_HORZB_HFP));
+
+ HDMI_WRITE(HDMI_VERTA0, verta);
+ HDMI_WRITE(HDMI_VERTA1, verta);
+
+ HDMI_WRITE(HDMI_VERTB0, vertb_even);
+ HDMI_WRITE(HDMI_VERTB1, vertb);
+}
+static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ struct drm_display_mode *mode)
+{
+ bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
+ bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
+ bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1;
+ u32 verta = (VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start,
+ VC5_HDMI_VERTA_VSP) |
+ VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay,
+ VC5_HDMI_VERTA_VFP) |
+ VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL));
+ u32 vertb = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) |
+ VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
+ VC4_HDMI_VERTB_VBP));
+ u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) |
+ VC4_SET_FIELD(mode->crtc_vtotal -
+ mode->crtc_vsync_end -
+ interlaced,
+ VC4_HDMI_VERTB_VBP));
+
+ HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021);
+ HDMI_WRITE(HDMI_HORZA,
+ (vsync_pos ? VC5_HDMI_HORZA_VPOS : 0) |
+ (hsync_pos ? VC5_HDMI_HORZA_HPOS : 0) |
+ VC4_SET_FIELD(mode->hdisplay * pixel_rep,
+ VC5_HDMI_HORZA_HAP) |
+ VC4_SET_FIELD((mode->hsync_start -
+ mode->hdisplay) * pixel_rep,
+ VC5_HDMI_HORZA_HFP));
+
+ HDMI_WRITE(HDMI_HORZB,
+ VC4_SET_FIELD((mode->htotal -
+ mode->hsync_end) * pixel_rep,
+ VC5_HDMI_HORZB_HBP) |
+ VC4_SET_FIELD((mode->hsync_end -
+ mode->hsync_start) * pixel_rep,
+ VC5_HDMI_HORZB_HSP));
+
+ HDMI_WRITE(HDMI_VERTA0, verta);
+ HDMI_WRITE(HDMI_VERTA1, verta);
+
+ HDMI_WRITE(HDMI_VERTB0, vertb_even);
+ HDMI_WRITE(HDMI_VERTB1, vertb);
+
+ HDMI_WRITE(HDMI_CLOCK_STOP, 0);
+}
+
+static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
+{
+ u32 drift;
+ int ret;
+
+ drift = HDMI_READ(HDMI_FIFO_CTL);
+ drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK;
+
+ HDMI_WRITE(HDMI_FIFO_CTL,
+ drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
+ HDMI_WRITE(HDMI_FIFO_CTL,
+ drift | VC4_HDMI_FIFO_CTL_RECENTER);
+ usleep_range(1000, 1100);
+ HDMI_WRITE(HDMI_FIFO_CTL,
+ drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
+ HDMI_WRITE(HDMI_FIFO_CTL,
+ drift | VC4_HDMI_FIFO_CTL_RECENTER);
+
+ ret = wait_for(HDMI_READ(HDMI_FIFO_CTL) &
+ VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1);
+ WARN_ONCE(ret, "Timeout waiting for "
+ "VC4_HDMI_FIFO_CTL_RECENTER_DONE");
+}
+
+static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
+{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ unsigned long pixel_rate, hsm_rate;
int ret;
- ret = pm_runtime_get_sync(&hdmi->pdev->dev);
+ ret = pm_runtime_get_sync(&vc4_hdmi->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
return;
}
- ret = clk_set_rate(hdmi->pixel_clock,
- mode->clock * 1000 *
- ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1));
+ pixel_rate = mode->clock * 1000 * ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1);
+ ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate);
if (ret) {
DRM_ERROR("Failed to set pixel clock rate: %d\n", ret);
return;
}
- ret = clk_prepare_enable(hdmi->pixel_clock);
+ ret = clk_prepare_enable(vc4_hdmi->pixel_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel clock: %d\n", ret);
return;
}
- HDMI_WRITE(VC4_HDMI_SW_RESET_CONTROL,
- VC4_HDMI_SW_RESET_HDMI |
- VC4_HDMI_SW_RESET_FORMAT_DETECT);
-
- HDMI_WRITE(VC4_HDMI_SW_RESET_CONTROL, 0);
-
- /* PHY should be in reset, like
- * vc4_hdmi_encoder_disable() does.
+ /*
+ * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
+ * be faster than pixel clock, infinitesimally faster, tested in
+ * simulation. Otherwise, exact value is unimportant for HDMI
+ * operation." This conflicts with bcm2835's vc4 documentation, which
+ * states HSM's clock has to be at least 108% of the pixel clock.
+ *
+ * Real life tests reveal that vc4's firmware statement holds up, and
+ * users are able to use pixel clocks closer to HSM's, namely for
+ * 1920x1200@60Hz. So it was decided to have leave a 1% margin between
+ * both clocks. Which, for RPi0-3 implies a maximum pixel clock of
+ * 162MHz.
+ *
+ * Additionally, the AXI clock needs to be at least 25% of
+ * pixel clock, but HSM ends up being the limiting factor.
*/
- HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0xf << 16);
+ hsm_rate = max_t(unsigned long, 120000000, (pixel_rate / 100) * 101);
+ ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate);
+ if (ret) {
+ DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
+ return;
+ }
- HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0);
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ return;
+ }
- if (debug_dump_regs) {
- struct drm_printer p = drm_info_printer(&hdmi->pdev->dev);
+ /*
+ * FIXME: When the pixel freq is 594MHz (4k60), this needs to be setup
+ * at 300MHz.
+ */
+ ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock,
+ (hsm_rate > VC4_HSM_MID_CLOCK ? 150000000 : 75000000));
+ if (ret) {
+ DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ return;
+ }
- dev_info(&hdmi->pdev->dev, "HDMI regs before:\n");
- drm_print_regset32(&p, &hdmi->hdmi_regset);
- drm_print_regset32(&p, &hdmi->hd_regset);
+ ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
+ clk_disable_unprepare(vc4_hdmi->pixel_clock);
+ return;
}
- HD_WRITE(VC4_HD_VID_CTL, 0);
+ if (vc4_hdmi->variant->reset)
+ vc4_hdmi->variant->reset(vc4_hdmi);
- HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
- HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
+ if (vc4_hdmi->variant->phy_init)
+ vc4_hdmi->variant->phy_init(vc4_hdmi, mode);
+
+ HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
+ HDMI_READ(HDMI_SCHEDULER_CONTROL) |
VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT |
VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS);
- HDMI_WRITE(VC4_HDMI_HORZA,
- (vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) |
- (hsync_pos ? VC4_HDMI_HORZA_HPOS : 0) |
- VC4_SET_FIELD(mode->hdisplay * pixel_rep,
- VC4_HDMI_HORZA_HAP));
-
- HDMI_WRITE(VC4_HDMI_HORZB,
- VC4_SET_FIELD((mode->htotal -
- mode->hsync_end) * pixel_rep,
- VC4_HDMI_HORZB_HBP) |
- VC4_SET_FIELD((mode->hsync_end -
- mode->hsync_start) * pixel_rep,
- VC4_HDMI_HORZB_HSP) |
- VC4_SET_FIELD((mode->hsync_start -
- mode->hdisplay) * pixel_rep,
- VC4_HDMI_HORZB_HFP));
-
- HDMI_WRITE(VC4_HDMI_VERTA0, verta);
- HDMI_WRITE(VC4_HDMI_VERTA1, verta);
-
- HDMI_WRITE(VC4_HDMI_VERTB0, vertb_even);
- HDMI_WRITE(VC4_HDMI_VERTB1, vertb);
-
- HD_WRITE(VC4_HD_VID_CTL,
- (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) |
- (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW));
+ if (vc4_hdmi->variant->set_timings)
+ vc4_hdmi->variant->set_timings(vc4_hdmi, mode);
+}
- csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR,
- VC4_HD_CSC_CTL_ORDER);
+static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder)
+{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
if (vc4_encoder->hdmi_monitor &&
- drm_default_rgb_quant_range(mode) ==
- HDMI_QUANTIZATION_RANGE_LIMITED) {
- /* CEA VICs other than #1 requre limited range RGB
- * output unless overridden by an AVI infoframe.
- * Apply a colorspace conversion to squash 0-255 down
- * to 16-235. The matrix here is:
- *
- * [ 0 0 0.8594 16]
- * [ 0 0.8594 0 16]
- * [ 0.8594 0 0 16]
- * [ 0 0 0 1]
- */
- csc_ctl |= VC4_HD_CSC_CTL_ENABLE;
- csc_ctl |= VC4_HD_CSC_CTL_RGB2YCC;
- csc_ctl |= VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM,
- VC4_HD_CSC_CTL_MODE);
+ drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED) {
+ if (vc4_hdmi->variant->csc_setup)
+ vc4_hdmi->variant->csc_setup(vc4_hdmi, true);
- HD_WRITE(VC4_HD_CSC_12_11, (0x000 << 16) | 0x000);
- HD_WRITE(VC4_HD_CSC_14_13, (0x100 << 16) | 0x6e0);
- HD_WRITE(VC4_HD_CSC_22_21, (0x6e0 << 16) | 0x000);
- HD_WRITE(VC4_HD_CSC_24_23, (0x100 << 16) | 0x000);
- HD_WRITE(VC4_HD_CSC_32_31, (0x000 << 16) | 0x6e0);
- HD_WRITE(VC4_HD_CSC_34_33, (0x100 << 16) | 0x000);
vc4_encoder->limited_rgb_range = true;
} else {
+ if (vc4_hdmi->variant->csc_setup)
+ vc4_hdmi->variant->csc_setup(vc4_hdmi, false);
+
vc4_encoder->limited_rgb_range = false;
}
- /* The RGB order applies even when CSC is disabled. */
- HD_WRITE(VC4_HD_CSC_CTL, csc_ctl);
-
- HDMI_WRITE(VC4_HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
+ HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
+}
- if (debug_dump_regs) {
- struct drm_printer p = drm_info_printer(&hdmi->pdev->dev);
+static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder)
+{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
+ bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
+ bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
+ int ret;
- dev_info(&hdmi->pdev->dev, "HDMI regs after:\n");
- drm_print_regset32(&p, &hdmi->hdmi_regset);
- drm_print_regset32(&p, &hdmi->hd_regset);
- }
+ HDMI_WRITE(HDMI_VID_CTL,
+ VC4_HD_VID_CTL_ENABLE |
+ VC4_HD_VID_CTL_UNDERFLOW_ENABLE |
+ VC4_HD_VID_CTL_FRAME_COUNTER_RESET |
+ (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) |
+ (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW));
- HD_WRITE(VC4_HD_VID_CTL,
- HD_READ(VC4_HD_VID_CTL) |
- VC4_HD_VID_CTL_ENABLE |
- VC4_HD_VID_CTL_UNDERFLOW_ENABLE |
- VC4_HD_VID_CTL_FRAME_COUNTER_RESET);
+ HDMI_WRITE(HDMI_VID_CTL,
+ HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_BLANKPIX);
if (vc4_encoder->hdmi_monitor) {
- HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
- HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
+ HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
+ HDMI_READ(HDMI_SCHEDULER_CONTROL) |
VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
- ret = wait_for(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
+ ret = wait_for(HDMI_READ(HDMI_SCHEDULER_CONTROL) &
VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1000);
WARN_ONCE(ret, "Timeout waiting for "
"VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n");
} else {
- HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
- HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) &
+ HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
+ HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
~(VC4_HDMI_RAM_PACKET_ENABLE));
- HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
- HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
+ HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
+ HDMI_READ(HDMI_SCHEDULER_CONTROL) &
~VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI);
- ret = wait_for(!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
+ ret = wait_for(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) &
VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1000);
WARN_ONCE(ret, "Timeout waiting for "
"!VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE\n");
}
if (vc4_encoder->hdmi_monitor) {
- u32 drift;
-
- WARN_ON(!(HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) &
+ WARN_ON(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) &
VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE));
- HDMI_WRITE(VC4_HDMI_SCHEDULER_CONTROL,
- HDMI_READ(VC4_HDMI_SCHEDULER_CONTROL) |
+ HDMI_WRITE(HDMI_SCHEDULER_CONTROL,
+ HDMI_READ(HDMI_SCHEDULER_CONTROL) |
VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT);
- HDMI_WRITE(VC4_HDMI_RAM_PACKET_CONFIG,
+ HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
VC4_HDMI_RAM_PACKET_ENABLE);
vc4_hdmi_set_infoframes(encoder);
-
- drift = HDMI_READ(VC4_HDMI_FIFO_CTL);
- drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK;
-
- HDMI_WRITE(VC4_HDMI_FIFO_CTL,
- drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
- HDMI_WRITE(VC4_HDMI_FIFO_CTL,
- drift | VC4_HDMI_FIFO_CTL_RECENTER);
- usleep_range(1000, 1100);
- HDMI_WRITE(VC4_HDMI_FIFO_CTL,
- drift & ~VC4_HDMI_FIFO_CTL_RECENTER);
- HDMI_WRITE(VC4_HDMI_FIFO_CTL,
- drift | VC4_HDMI_FIFO_CTL_RECENTER);
-
- ret = wait_for(HDMI_READ(VC4_HDMI_FIFO_CTL) &
- VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1);
- WARN_ONCE(ret, "Timeout waiting for "
- "VC4_HDMI_FIFO_CTL_RECENTER_DONE");
}
+
+ vc4_hdmi_recenter_fifo(vc4_hdmi);
+}
+
+static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
}
static enum drm_mode_status
-vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc,
+vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
- /*
- * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
- * be faster than pixel clock, infinitesimally faster, tested in
- * simulation. Otherwise, exact value is unimportant for HDMI
- * operation." This conflicts with bcm2835's vc4 documentation, which
- * states HSM's clock has to be at least 108% of the pixel clock.
- *
- * Real life tests reveal that vc4's firmware statement holds up, and
- * users are able to use pixel clocks closer to HSM's, namely for
- * 1920x1200@60Hz. So it was decided to have leave a 1% margin between
- * both clocks. Which, for RPi0-3 implies a maximum pixel clock of
- * 162MHz.
- *
- * Additionally, the AXI clock needs to be at least 25% of
- * pixel clock, but HSM ends up being the limiting factor.
- */
- if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100))
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+
+ if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock)
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -701,34 +778,54 @@ static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
.enable = vc4_hdmi_encoder_enable,
};
+static u32 vc4_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
+{
+ int i;
+ u32 channel_map = 0;
+
+ for (i = 0; i < 8; i++) {
+ if (channel_mask & BIT(i))
+ channel_map |= i << (3 * i);
+ }
+ return channel_map;
+}
+
+static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
+{
+ int i;
+ u32 channel_map = 0;
+
+ for (i = 0; i < 8; i++) {
+ if (channel_mask & BIT(i))
+ channel_map |= i << (4 * i);
+ }
+ return channel_map;
+}
+
/* HDMI audio codec callbacks */
-static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *hdmi)
+static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi)
{
- struct drm_device *drm = hdmi->encoder->dev;
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- u32 hsm_clock = clk_get_rate(hdmi->hsm_clock);
+ u32 hsm_clock = clk_get_rate(vc4_hdmi->audio_clock);
unsigned long n, m;
- rational_best_approximation(hsm_clock, hdmi->audio.samplerate,
+ rational_best_approximation(hsm_clock, vc4_hdmi->audio.samplerate,
VC4_HD_MAI_SMP_N_MASK >>
VC4_HD_MAI_SMP_N_SHIFT,
(VC4_HD_MAI_SMP_M_MASK >>
VC4_HD_MAI_SMP_M_SHIFT) + 1,
&n, &m);
- HD_WRITE(VC4_HD_MAI_SMP,
- VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) |
- VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M));
+ HDMI_WRITE(HDMI_MAI_SMP,
+ VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) |
+ VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M));
}
-static void vc4_hdmi_set_n_cts(struct vc4_hdmi *hdmi)
+static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi)
{
- struct drm_encoder *encoder = hdmi->encoder;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
struct drm_crtc *crtc = encoder->crtc;
- struct drm_device *drm = encoder->dev;
- struct vc4_dev *vc4 = to_vc4_dev(drm);
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- u32 samplerate = hdmi->audio.samplerate;
+ u32 samplerate = vc4_hdmi->audio.samplerate;
u32 n, cts;
u64 tmp;
@@ -737,7 +834,7 @@ static void vc4_hdmi_set_n_cts(struct vc4_hdmi *hdmi)
do_div(tmp, 128 * samplerate);
cts = tmp;
- HDMI_WRITE(VC4_HDMI_CRP_CFG,
+ HDMI_WRITE(HDMI_CRP_CFG,
VC4_HDMI_CRP_CFG_EXTERNAL_CTS_EN |
VC4_SET_FIELD(n, VC4_HDMI_CRP_CFG_N));
@@ -746,8 +843,8 @@ static void vc4_hdmi_set_n_cts(struct vc4_hdmi *hdmi)
* providing a CTS_1 value. The two CTS values are alternated
* between based on the period fields
*/
- HDMI_WRITE(VC4_HDMI_CTS_0, cts);
- HDMI_WRITE(VC4_HDMI_CTS_1, cts);
+ HDMI_WRITE(HDMI_CTS_0, cts);
+ HDMI_WRITE(HDMI_CTS_1, cts);
}
static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
@@ -760,26 +857,25 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
static int vc4_hdmi_audio_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct vc4_hdmi *hdmi = dai_to_hdmi(dai);
- struct drm_encoder *encoder = hdmi->encoder;
- struct vc4_dev *vc4 = to_vc4_dev(encoder->dev);
+ struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai);
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+ struct drm_connector *connector = &vc4_hdmi->connector;
int ret;
- if (hdmi->audio.substream && hdmi->audio.substream != substream)
+ if (vc4_hdmi->audio.substream && vc4_hdmi->audio.substream != substream)
return -EINVAL;
- hdmi->audio.substream = substream;
+ vc4_hdmi->audio.substream = substream;
/*
* If the HDMI encoder hasn't probed, or the encoder is
* currently in DVI mode, treat the codec dai as missing.
*/
- if (!encoder->crtc || !(HDMI_READ(VC4_HDMI_RAM_PACKET_CONFIG) &
+ if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE))
return -ENODEV;
- ret = snd_pcm_hw_constraint_eld(substream->runtime,
- hdmi->connector->eld);
+ ret = snd_pcm_hw_constraint_eld(substream->runtime, connector->eld);
if (ret)
return ret;
@@ -791,34 +887,33 @@ static int vc4_hdmi_audio_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return 0;
}
-static void vc4_hdmi_audio_reset(struct vc4_hdmi *hdmi)
+static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
{
- struct drm_encoder *encoder = hdmi->encoder;
- struct drm_device *drm = encoder->dev;
- struct device *dev = &hdmi->pdev->dev;
- struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+ struct device *dev = &vc4_hdmi->pdev->dev;
int ret;
+ vc4_hdmi->audio.streaming = false;
ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO);
if (ret)
dev_err(dev, "Failed to stop audio infoframe: %d\n", ret);
- HD_WRITE(VC4_HD_MAI_CTL, VC4_HD_MAI_CTL_RESET);
- HD_WRITE(VC4_HD_MAI_CTL, VC4_HD_MAI_CTL_ERRORF);
- HD_WRITE(VC4_HD_MAI_CTL, VC4_HD_MAI_CTL_FLUSH);
+ HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_RESET);
+ HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_ERRORF);
+ HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_FLUSH);
}
static void vc4_hdmi_audio_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct vc4_hdmi *hdmi = dai_to_hdmi(dai);
+ struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai);
- if (substream != hdmi->audio.substream)
+ if (substream != vc4_hdmi->audio.substream)
return;
- vc4_hdmi_audio_reset(hdmi);
+ vc4_hdmi_audio_reset(vc4_hdmi);
- hdmi->audio.substream = NULL;
+ vc4_hdmi->audio.substream = NULL;
}
/* HDMI audio codec callbacks */
@@ -826,72 +921,68 @@ static int vc4_hdmi_audio_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct vc4_hdmi *hdmi = dai_to_hdmi(dai);
- struct drm_encoder *encoder = hdmi->encoder;
- struct drm_device *drm = encoder->dev;
- struct device *dev = &hdmi->pdev->dev;
- struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai);
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
+ struct device *dev = &vc4_hdmi->pdev->dev;
u32 audio_packet_config, channel_mask;
- u32 channel_map, i;
+ u32 channel_map;
- if (substream != hdmi->audio.substream)
+ if (substream != vc4_hdmi->audio.substream)
return -EINVAL;
dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
params_rate(params), params_width(params),
params_channels(params));
- hdmi->audio.channels = params_channels(params);
- hdmi->audio.samplerate = params_rate(params);
+ vc4_hdmi->audio.channels = params_channels(params);
+ vc4_hdmi->audio.samplerate = params_rate(params);
- HD_WRITE(VC4_HD_MAI_CTL,
- VC4_HD_MAI_CTL_RESET |
- VC4_HD_MAI_CTL_FLUSH |
- VC4_HD_MAI_CTL_DLATE |
- VC4_HD_MAI_CTL_ERRORE |
- VC4_HD_MAI_CTL_ERRORF);
+ HDMI_WRITE(HDMI_MAI_CTL,
+ VC4_HD_MAI_CTL_RESET |
+ VC4_HD_MAI_CTL_FLUSH |
+ VC4_HD_MAI_CTL_DLATE |
+ VC4_HD_MAI_CTL_ERRORE |
+ VC4_HD_MAI_CTL_ERRORF);
- vc4_hdmi_audio_set_mai_clock(hdmi);
+ vc4_hdmi_audio_set_mai_clock(vc4_hdmi);
+ /* The B frame identifier should match the value used by alsa-lib (8) */
audio_packet_config =
VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_SAMPLE_FLAT |
VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_INACTIVE_CHANNELS |
- VC4_SET_FIELD(0xf, VC4_HDMI_AUDIO_PACKET_B_FRAME_IDENTIFIER);
+ VC4_SET_FIELD(0x8, VC4_HDMI_AUDIO_PACKET_B_FRAME_IDENTIFIER);
- channel_mask = GENMASK(hdmi->audio.channels - 1, 0);
+ channel_mask = GENMASK(vc4_hdmi->audio.channels - 1, 0);
audio_packet_config |= VC4_SET_FIELD(channel_mask,
VC4_HDMI_AUDIO_PACKET_CEA_MASK);
/* Set the MAI threshold. This logic mimics the firmware's. */
- if (hdmi->audio.samplerate > 96000) {
- HD_WRITE(VC4_HD_MAI_THR,
- VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQHIGH) |
- VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW));
- } else if (hdmi->audio.samplerate > 48000) {
- HD_WRITE(VC4_HD_MAI_THR,
- VC4_SET_FIELD(0x14, VC4_HD_MAI_THR_DREQHIGH) |
- VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW));
+ if (vc4_hdmi->audio.samplerate > 96000) {
+ HDMI_WRITE(HDMI_MAI_THR,
+ VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQHIGH) |
+ VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW));
+ } else if (vc4_hdmi->audio.samplerate > 48000) {
+ HDMI_WRITE(HDMI_MAI_THR,
+ VC4_SET_FIELD(0x14, VC4_HD_MAI_THR_DREQHIGH) |
+ VC4_SET_FIELD(0x12, VC4_HD_MAI_THR_DREQLOW));
} else {
- HD_WRITE(VC4_HD_MAI_THR,
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) |
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) |
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQHIGH) |
- VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQLOW));
+ HDMI_WRITE(HDMI_MAI_THR,
+ VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) |
+ VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) |
+ VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQHIGH) |
+ VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_DREQLOW));
}
- HDMI_WRITE(VC4_HDMI_MAI_CONFIG,
+ HDMI_WRITE(HDMI_MAI_CONFIG,
VC4_HDMI_MAI_CONFIG_BIT_REVERSE |
VC4_SET_FIELD(channel_mask, VC4_HDMI_MAI_CHANNEL_MASK));
- channel_map = 0;
- for (i = 0; i < 8; i++) {
- if (channel_mask & BIT(i))
- channel_map |= i << (3 * i);
- }
+ channel_map = vc4_hdmi->variant->channel_map(vc4_hdmi, channel_mask);
+ HDMI_WRITE(HDMI_MAI_CHANNEL_MAP, channel_map);
+ HDMI_WRITE(HDMI_AUDIO_PACKET_CONFIG, audio_packet_config);
+ vc4_hdmi_set_n_cts(vc4_hdmi);
- HDMI_WRITE(VC4_HDMI_MAI_CHANNEL_MAP, channel_map);
- HDMI_WRITE(VC4_HDMI_AUDIO_PACKET_CONFIG, audio_packet_config);
- vc4_hdmi_set_n_cts(hdmi);
+ vc4_hdmi_set_audio_infoframe(encoder);
return 0;
}
@@ -899,30 +990,31 @@ static int vc4_hdmi_audio_hw_params(struct snd_pcm_substream *substream,
static int vc4_hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
- struct vc4_hdmi *hdmi = dai_to_hdmi(dai);
- struct drm_encoder *encoder = hdmi->encoder;
- struct drm_device *drm = encoder->dev;
- struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- vc4_hdmi_set_audio_infoframe(encoder);
- HDMI_WRITE(VC4_HDMI_TX_PHY_CTL0,
- HDMI_READ(VC4_HDMI_TX_PHY_CTL0) &
- ~VC4_HDMI_TX_PHY_RNG_PWRDN);
- HD_WRITE(VC4_HD_MAI_CTL,
- VC4_SET_FIELD(hdmi->audio.channels,
- VC4_HD_MAI_CTL_CHNUM) |
- VC4_HD_MAI_CTL_ENABLE);
+ vc4_hdmi->audio.streaming = true;
+
+ if (vc4_hdmi->variant->phy_rng_enable)
+ vc4_hdmi->variant->phy_rng_enable(vc4_hdmi);
+
+ HDMI_WRITE(HDMI_MAI_CTL,
+ VC4_SET_FIELD(vc4_hdmi->audio.channels,
+ VC4_HD_MAI_CTL_CHNUM) |
+ VC4_HD_MAI_CTL_ENABLE);
break;
case SNDRV_PCM_TRIGGER_STOP:
- HD_WRITE(VC4_HD_MAI_CTL,
- VC4_HD_MAI_CTL_DLATE |
- VC4_HD_MAI_CTL_ERRORE |
- VC4_HD_MAI_CTL_ERRORF);
- HDMI_WRITE(VC4_HDMI_TX_PHY_CTL0,
- HDMI_READ(VC4_HDMI_TX_PHY_CTL0) |
- VC4_HDMI_TX_PHY_RNG_PWRDN);
+ HDMI_WRITE(HDMI_MAI_CTL,
+ VC4_HD_MAI_CTL_DLATE |
+ VC4_HD_MAI_CTL_ERRORE |
+ VC4_HD_MAI_CTL_ERRORF);
+
+ if (vc4_hdmi->variant->phy_rng_disable)
+ vc4_hdmi->variant->phy_rng_disable(vc4_hdmi);
+
+ vc4_hdmi->audio.streaming = false;
+
break;
default:
break;
@@ -943,10 +1035,11 @@ static int vc4_hdmi_audio_eld_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct vc4_hdmi *hdmi = snd_component_to_hdmi(component);
+ struct vc4_hdmi *vc4_hdmi = snd_component_to_hdmi(component);
+ struct drm_connector *connector = &vc4_hdmi->connector;
uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
- uinfo->count = sizeof(hdmi->connector->eld);
+ uinfo->count = sizeof(connector->eld);
return 0;
}
@@ -955,10 +1048,11 @@ static int vc4_hdmi_audio_eld_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct vc4_hdmi *hdmi = snd_component_to_hdmi(component);
+ struct vc4_hdmi *vc4_hdmi = snd_component_to_hdmi(component);
+ struct drm_connector *connector = &vc4_hdmi->connector;
- memcpy(ucontrol->value.bytes.data, hdmi->connector->eld,
- sizeof(hdmi->connector->eld));
+ memcpy(ucontrol->value.bytes.data, connector->eld,
+ sizeof(connector->eld));
return 0;
}
@@ -983,6 +1077,7 @@ static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = {
};
static const struct snd_soc_component_driver vc4_hdmi_audio_component_drv = {
+ .name = "vc4-hdmi-codec-dai-component",
.controls = vc4_hdmi_audio_controls,
.num_controls = ARRAY_SIZE(vc4_hdmi_audio_controls),
.dapm_widgets = vc4_hdmi_audio_widgets,
@@ -1023,9 +1118,9 @@ static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
static int vc4_hdmi_audio_cpu_dai_probe(struct snd_soc_dai *dai)
{
- struct vc4_hdmi *hdmi = dai_to_hdmi(dai);
+ struct vc4_hdmi *vc4_hdmi = dai_to_hdmi(dai);
- snd_soc_dai_init_dma_data(dai, &hdmi->audio.dma_data, NULL);
+ snd_soc_dai_init_dma_data(dai, &vc4_hdmi->audio.dma_data, NULL);
return 0;
}
@@ -1051,12 +1146,15 @@ static const struct snd_dmaengine_pcm_config pcm_conf = {
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
};
-static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
+static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
- struct snd_soc_dai_link *dai_link = &hdmi->audio.link;
- struct snd_soc_card *card = &hdmi->audio.card;
- struct device *dev = &hdmi->pdev->dev;
+ const struct vc4_hdmi_register *mai_data =
+ &vc4_hdmi->variant->registers[HDMI_MAI_DATA];
+ struct snd_soc_dai_link *dai_link = &vc4_hdmi->audio.link;
+ struct snd_soc_card *card = &vc4_hdmi->audio.card;
+ struct device *dev = &vc4_hdmi->pdev->dev;
const __be32 *addr;
+ int index;
int ret;
if (!of_find_property(dev->of_node, "dmas", NULL)) {
@@ -1065,6 +1163,11 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
return 0;
}
+ if (mai_data->reg != VC4_HD) {
+ WARN_ONCE(true, "MAI isn't in the HD block\n");
+ return -EINVAL;
+ }
+
/*
* Get the physical address of VC4_HD_MAI_DATA. We need to retrieve
* the bus address specified in the DT, because the physical address
@@ -1072,10 +1175,16 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
* for DMA transfers.
* This VC/MMU should probably be exposed to avoid this kind of hacks.
*/
- addr = of_get_address(dev->of_node, 1, NULL, NULL);
- hdmi->audio.dma_data.addr = be32_to_cpup(addr) + VC4_HD_MAI_DATA;
- hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- hdmi->audio.dma_data.maxburst = 2;
+ index = of_property_match_string(dev->of_node, "reg-names", "hd");
+ /* Before BCM2711, we don't have a named register range */
+ if (index < 0)
+ index = 1;
+
+ addr = of_get_address(dev->of_node, index, NULL, NULL);
+
+ vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset;
+ vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ vc4_hdmi->audio.dma_data.maxburst = 2;
ret = devm_snd_dmaengine_pcm_register(dev, &pcm_conf, 0);
if (ret) {
@@ -1098,9 +1207,9 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
return ret;
}
- dai_link->cpus = &hdmi->audio.cpu;
- dai_link->codecs = &hdmi->audio.codec;
- dai_link->platforms = &hdmi->audio.platform;
+ dai_link->cpus = &vc4_hdmi->audio.cpu;
+ dai_link->codecs = &vc4_hdmi->audio.codec;
+ dai_link->platforms = &vc4_hdmi->audio.platform;
dai_link->num_cpus = 1;
dai_link->num_codecs = 1;
@@ -1115,7 +1224,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
card->dai_link = dai_link;
card->num_links = 1;
- card->name = "vc4-hdmi";
+ card->name = vc4_hdmi->variant->card_name;
card->dev = dev;
card->owner = THIS_MODULE;
@@ -1126,7 +1235,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
* now stored in card->drvdata and should be retrieved with
* snd_soc_card_get_drvdata() if needed.
*/
- snd_soc_card_set_drvdata(card, hdmi);
+ snd_soc_card_set_drvdata(card, vc4_hdmi);
ret = devm_snd_soc_register_card(dev, card);
if (ret)
dev_err(dev, "Could not register sound card: %d\n", ret);
@@ -1138,35 +1247,35 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
#ifdef CONFIG_DRM_VC4_HDMI_CEC
static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
{
- struct vc4_dev *vc4 = priv;
- struct vc4_hdmi *hdmi = vc4->hdmi;
-
- if (hdmi->cec_irq_was_rx) {
- if (hdmi->cec_rx_msg.len)
- cec_received_msg(hdmi->cec_adap, &hdmi->cec_rx_msg);
- } else if (hdmi->cec_tx_ok) {
- cec_transmit_done(hdmi->cec_adap, CEC_TX_STATUS_OK,
+ struct vc4_hdmi *vc4_hdmi = priv;
+
+ if (vc4_hdmi->cec_irq_was_rx) {
+ if (vc4_hdmi->cec_rx_msg.len)
+ cec_received_msg(vc4_hdmi->cec_adap,
+ &vc4_hdmi->cec_rx_msg);
+ } else if (vc4_hdmi->cec_tx_ok) {
+ cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK,
0, 0, 0, 0);
} else {
/*
* This CEC implementation makes 1 retry, so if we
* get a NACK, then that means it made 2 attempts.
*/
- cec_transmit_done(hdmi->cec_adap, CEC_TX_STATUS_NACK,
+ cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_NACK,
0, 2, 0, 0);
}
return IRQ_HANDLED;
}
-static void vc4_cec_read_msg(struct vc4_dev *vc4, u32 cntrl1)
+static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
{
- struct cec_msg *msg = &vc4->hdmi->cec_rx_msg;
+ struct cec_msg *msg = &vc4_hdmi->cec_rx_msg;
unsigned int i;
msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >>
VC4_HDMI_CEC_REC_WRD_CNT_SHIFT);
for (i = 0; i < msg->len; i += 4) {
- u32 val = HDMI_READ(VC4_HDMI_CEC_RX_DATA_1 + i);
+ u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + i);
msg->msg[i] = val & 0xff;
msg->msg[i + 1] = (val >> 8) & 0xff;
@@ -1177,38 +1286,37 @@ static void vc4_cec_read_msg(struct vc4_dev *vc4, u32 cntrl1)
static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
{
- struct vc4_dev *vc4 = priv;
- struct vc4_hdmi *hdmi = vc4->hdmi;
- u32 stat = HDMI_READ(VC4_HDMI_CPU_STATUS);
+ struct vc4_hdmi *vc4_hdmi = priv;
+ u32 stat = HDMI_READ(HDMI_CEC_CPU_STATUS);
u32 cntrl1, cntrl5;
if (!(stat & VC4_HDMI_CPU_CEC))
return IRQ_NONE;
- hdmi->cec_rx_msg.len = 0;
- cntrl1 = HDMI_READ(VC4_HDMI_CEC_CNTRL_1);
- cntrl5 = HDMI_READ(VC4_HDMI_CEC_CNTRL_5);
- hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT;
- if (hdmi->cec_irq_was_rx) {
- vc4_cec_read_msg(vc4, cntrl1);
+ vc4_hdmi->cec_rx_msg.len = 0;
+ cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
+ cntrl5 = HDMI_READ(HDMI_CEC_CNTRL_5);
+ vc4_hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT;
+ if (vc4_hdmi->cec_irq_was_rx) {
+ vc4_cec_read_msg(vc4_hdmi, cntrl1);
cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, cntrl1);
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
} else {
- hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD;
+ vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD;
cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
}
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, cntrl1);
- HDMI_WRITE(VC4_HDMI_CPU_CLEAR, VC4_HDMI_CPU_CEC);
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, cntrl1);
+ HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC);
return IRQ_WAKE_THREAD;
}
static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
- struct vc4_dev *vc4 = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
/* clock period in microseconds */
const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
- u32 val = HDMI_READ(VC4_HDMI_CEC_CNTRL_5);
+ u32 val = HDMI_READ(HDMI_CEC_CNTRL_5);
val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
@@ -1217,30 +1325,30 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT);
if (enable) {
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val |
+ HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val);
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_2,
- ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) |
- ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) |
- ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) |
- ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) |
- ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT));
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_3,
- ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) |
- ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) |
- ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) |
- ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT));
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_4,
- ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) |
- ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) |
- ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) |
- ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT));
-
- HDMI_WRITE(VC4_HDMI_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
+ HDMI_WRITE(HDMI_CEC_CNTRL_5, val);
+ HDMI_WRITE(HDMI_CEC_CNTRL_2,
+ ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) |
+ ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) |
+ ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) |
+ ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) |
+ ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT));
+ HDMI_WRITE(HDMI_CEC_CNTRL_3,
+ ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) |
+ ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) |
+ ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) |
+ ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT));
+ HDMI_WRITE(HDMI_CEC_CNTRL_4,
+ ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) |
+ ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) |
+ ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) |
+ ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT));
+
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
} else {
- HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val |
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
+ HDMI_WRITE(HDMI_CEC_CNTRL_5, val |
VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
}
return 0;
@@ -1248,10 +1356,10 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
- struct vc4_dev *vc4 = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1,
- (HDMI_READ(VC4_HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) |
+ HDMI_WRITE(HDMI_CEC_CNTRL_1,
+ (HDMI_READ(HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) |
(log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT);
return 0;
}
@@ -1259,25 +1367,25 @@ static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
- struct vc4_dev *vc4 = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
u32 val;
unsigned int i;
for (i = 0; i < msg->len; i += 4)
- HDMI_WRITE(VC4_HDMI_CEC_TX_DATA_1 + i,
+ HDMI_WRITE(HDMI_CEC_TX_DATA_1 + i,
(msg->msg[i]) |
(msg->msg[i + 1] << 8) |
(msg->msg[i + 2] << 16) |
(msg->msg[i + 3] << 24));
- val = HDMI_READ(VC4_HDMI_CEC_CNTRL_1);
+ val = HDMI_READ(HDMI_CEC_CNTRL_1);
val &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, val);
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, val);
val &= ~VC4_HDMI_CEC_MESSAGE_LENGTH_MASK;
val |= (msg->len - 1) << VC4_HDMI_CEC_MESSAGE_LENGTH_SHIFT;
val |= VC4_HDMI_CEC_START_XMIT_BEGIN;
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, val);
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, val);
return 0;
}
@@ -1286,61 +1394,275 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
.adap_log_addr = vc4_hdmi_cec_adap_log_addr,
.adap_transmit = vc4_hdmi_cec_adap_transmit,
};
-#endif
-static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
+static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
{
-#ifdef CONFIG_DRM_VC4_HDMI_CEC
struct cec_connector_info conn_info;
-#endif
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = drm->dev_private;
- struct vc4_hdmi *hdmi;
- struct vc4_hdmi_encoder *vc4_hdmi_encoder;
- struct device_node *ddc_node;
+ struct platform_device *pdev = vc4_hdmi->pdev;
u32 value;
int ret;
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
+ if (!vc4_hdmi->variant->cec_available)
+ return 0;
+
+ vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
+ vc4_hdmi, "vc4",
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO, 1);
+ ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap);
+ if (ret < 0)
+ return ret;
+
+ cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector);
+ cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
+
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
+ value = HDMI_READ(HDMI_CEC_CNTRL_1);
+ value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
+ /*
+ * Set the logical address to Unregistered and set the clock
+ * divider: the hsm_clock rate and this divider setting will
+ * give a 40 kHz CEC clock.
+ */
+ value |= VC4_HDMI_CEC_ADDR_MASK |
+ (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT);
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
+ ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
+ vc4_cec_irq_handler,
+ vc4_cec_irq_handler_thread, 0,
+ "vc4 hdmi cec", vc4_hdmi);
+ if (ret)
+ goto err_delete_cec_adap;
+
+ ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
+ if (ret < 0)
+ goto err_delete_cec_adap;
+
+ return 0;
+
+err_delete_cec_adap:
+ cec_delete_adapter(vc4_hdmi->cec_adap);
+
+ return ret;
+}
+
+static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi)
+{
+ cec_unregister_adapter(vc4_hdmi->cec_adap);
+}
+#else
+static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
+{
+ return 0;
+}
+
+static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi) {};
+
+#endif
+
+static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi,
+ struct debugfs_regset32 *regset,
+ enum vc4_hdmi_regs reg)
+{
+ const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
+ struct debugfs_reg32 *regs, *new_regs;
+ unsigned int count = 0;
+ unsigned int i;
+
+ regs = kcalloc(variant->num_registers, sizeof(*regs),
+ GFP_KERNEL);
+ if (!regs)
return -ENOMEM;
- vc4_hdmi_encoder = devm_kzalloc(dev, sizeof(*vc4_hdmi_encoder),
- GFP_KERNEL);
- if (!vc4_hdmi_encoder)
+ for (i = 0; i < variant->num_registers; i++) {
+ const struct vc4_hdmi_register *field = &variant->registers[i];
+
+ if (field->reg != reg)
+ continue;
+
+ regs[count].name = field->name;
+ regs[count].offset = field->offset;
+ count++;
+ }
+
+ new_regs = krealloc(regs, count * sizeof(*regs), GFP_KERNEL);
+ if (!new_regs)
return -ENOMEM;
- vc4_hdmi_encoder->base.type = VC4_ENCODER_TYPE_HDMI;
- hdmi->encoder = &vc4_hdmi_encoder->base.base;
-
- hdmi->pdev = pdev;
- hdmi->hdmicore_regs = vc4_ioremap_regs(pdev, 0);
- if (IS_ERR(hdmi->hdmicore_regs))
- return PTR_ERR(hdmi->hdmicore_regs);
-
- hdmi->hd_regs = vc4_ioremap_regs(pdev, 1);
- if (IS_ERR(hdmi->hd_regs))
- return PTR_ERR(hdmi->hd_regs);
-
- hdmi->hdmi_regset.base = hdmi->hdmicore_regs;
- hdmi->hdmi_regset.regs = hdmi_regs;
- hdmi->hdmi_regset.nregs = ARRAY_SIZE(hdmi_regs);
- hdmi->hd_regset.base = hdmi->hd_regs;
- hdmi->hd_regset.regs = hd_regs;
- hdmi->hd_regset.nregs = ARRAY_SIZE(hd_regs);
-
- hdmi->pixel_clock = devm_clk_get(dev, "pixel");
- if (IS_ERR(hdmi->pixel_clock)) {
- ret = PTR_ERR(hdmi->pixel_clock);
+
+ regset->base = __vc4_hdmi_get_field_base(vc4_hdmi, reg);
+ regset->regs = new_regs;
+ regset->nregs = count;
+
+ return 0;
+}
+
+static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
+{
+ struct platform_device *pdev = vc4_hdmi->pdev;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ vc4_hdmi->hdmicore_regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(vc4_hdmi->hdmicore_regs))
+ return PTR_ERR(vc4_hdmi->hdmicore_regs);
+
+ vc4_hdmi->hd_regs = vc4_ioremap_regs(pdev, 1);
+ if (IS_ERR(vc4_hdmi->hd_regs))
+ return PTR_ERR(vc4_hdmi->hd_regs);
+
+ ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
+ if (ret)
+ return ret;
+
+ ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
+ if (ret)
+ return ret;
+
+ vc4_hdmi->pixel_clock = devm_clk_get(dev, "pixel");
+ if (IS_ERR(vc4_hdmi->pixel_clock)) {
+ ret = PTR_ERR(vc4_hdmi->pixel_clock);
if (ret != -EPROBE_DEFER)
DRM_ERROR("Failed to get pixel clock\n");
return ret;
}
- hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
- if (IS_ERR(hdmi->hsm_clock)) {
+
+ vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
+ if (IS_ERR(vc4_hdmi->hsm_clock)) {
DRM_ERROR("Failed to get HDMI state machine clock\n");
- return PTR_ERR(hdmi->hsm_clock);
+ return PTR_ERR(vc4_hdmi->hsm_clock);
}
+ vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock;
+
+ return 0;
+}
+
+static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
+{
+ struct platform_device *pdev = vc4_hdmi->pdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi");
+ if (!res)
+ return -ENODEV;
+
+ vc4_hdmi->hdmicore_regs = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!vc4_hdmi->hdmicore_regs)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hd");
+ if (!res)
+ return -ENODEV;
+
+ vc4_hdmi->hd_regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!vc4_hdmi->hd_regs)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cec");
+ if (!res)
+ return -ENODEV;
+
+ vc4_hdmi->cec_regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!vc4_hdmi->cec_regs)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csc");
+ if (!res)
+ return -ENODEV;
+
+ vc4_hdmi->csc_regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!vc4_hdmi->csc_regs)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvp");
+ if (!res)
+ return -ENODEV;
+
+ vc4_hdmi->dvp_regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!vc4_hdmi->dvp_regs)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+ if (!res)
+ return -ENODEV;
+
+ vc4_hdmi->phy_regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!vc4_hdmi->phy_regs)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "packet");
+ if (!res)
+ return -ENODEV;
+
+ vc4_hdmi->ram_regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!vc4_hdmi->ram_regs)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rm");
+ if (!res)
+ return -ENODEV;
+
+ vc4_hdmi->rm_regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!vc4_hdmi->rm_regs)
+ return -ENOMEM;
+
+ vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
+ if (IS_ERR(vc4_hdmi->hsm_clock)) {
+ DRM_ERROR("Failed to get HDMI state machine clock\n");
+ return PTR_ERR(vc4_hdmi->hsm_clock);
+ }
+
+ vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb");
+ if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) {
+ DRM_ERROR("Failed to get pixel bvb clock\n");
+ return PTR_ERR(vc4_hdmi->pixel_bvb_clock);
+ }
+
+ vc4_hdmi->audio_clock = devm_clk_get(dev, "audio");
+ if (IS_ERR(vc4_hdmi->audio_clock)) {
+ DRM_ERROR("Failed to get audio clock\n");
+ return PTR_ERR(vc4_hdmi->audio_clock);
+ }
+
+ vc4_hdmi->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(vc4_hdmi->reset)) {
+ DRM_ERROR("Failed to get HDMI reset line\n");
+ return PTR_ERR(vc4_hdmi->reset);
+ }
+
+ return 0;
+}
+
+static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_hdmi *vc4_hdmi;
+ struct drm_encoder *encoder;
+ struct device_node *ddc_node;
+ u32 value;
+ int ret;
+
+ vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL);
+ if (!vc4_hdmi)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, vc4_hdmi);
+ encoder = &vc4_hdmi->encoder.base.base;
+ vc4_hdmi->encoder.base.type = variant->encoder_type;
+ vc4_hdmi->encoder.base.pre_crtc_configure = vc4_hdmi_encoder_pre_crtc_configure;
+ vc4_hdmi->encoder.base.pre_crtc_enable = vc4_hdmi_encoder_pre_crtc_enable;
+ vc4_hdmi->encoder.base.post_crtc_enable = vc4_hdmi_encoder_post_crtc_enable;
+ vc4_hdmi->encoder.base.post_crtc_disable = vc4_hdmi_encoder_post_crtc_disable;
+ vc4_hdmi->encoder.base.post_crtc_powerdown = vc4_hdmi_encoder_post_crtc_powerdown;
+ vc4_hdmi->pdev = pdev;
+ vc4_hdmi->variant = variant;
+
+ ret = variant->init_resources(vc4_hdmi);
+ if (ret)
+ return ret;
ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
if (!ddc_node) {
@@ -1348,123 +1670,62 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
- hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ vc4_hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
- if (!hdmi->ddc) {
+ if (!vc4_hdmi->ddc) {
DRM_DEBUG("Failed to get ddc i2c adapter by node\n");
return -EPROBE_DEFER;
}
- /* This is the rate that is set by the firmware. The number
- * needs to be a bit higher than the pixel clock rate
- * (generally 148.5Mhz).
- */
- ret = clk_set_rate(hdmi->hsm_clock, HSM_CLOCK_FREQ);
- if (ret) {
- DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
- goto err_put_i2c;
- }
-
- ret = clk_prepare_enable(hdmi->hsm_clock);
- if (ret) {
- DRM_ERROR("Failed to turn on HDMI state machine clock: %d\n",
- ret);
- goto err_put_i2c;
- }
-
/* Only use the GPIO HPD pin if present in the DT, otherwise
* we'll use the HDMI core's register.
*/
if (of_find_property(dev->of_node, "hpd-gpios", &value)) {
enum of_gpio_flags hpd_gpio_flags;
- hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node,
- "hpd-gpios", 0,
- &hpd_gpio_flags);
- if (hdmi->hpd_gpio < 0) {
- ret = hdmi->hpd_gpio;
+ vc4_hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node,
+ "hpd-gpios", 0,
+ &hpd_gpio_flags);
+ if (vc4_hdmi->hpd_gpio < 0) {
+ ret = vc4_hdmi->hpd_gpio;
goto err_unprepare_hsm;
}
- hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
+ vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
}
- vc4->hdmi = hdmi;
-
- /* HDMI core must be enabled. */
- if (!(HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE)) {
- HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_SW_RST);
- udelay(1);
- HD_WRITE(VC4_HD_M_CTL, 0);
-
- HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_ENABLE);
- }
pm_runtime_enable(dev);
- drm_simple_encoder_init(drm, hdmi->encoder, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &vc4_hdmi_encoder_helper_funcs);
- hdmi->connector =
- vc4_hdmi_connector_init(drm, hdmi->encoder, hdmi->ddc);
- if (IS_ERR(hdmi->connector)) {
- ret = PTR_ERR(hdmi->connector);
+ ret = vc4_hdmi_connector_init(drm, vc4_hdmi);
+ if (ret)
goto err_destroy_encoder;
- }
-#ifdef CONFIG_DRM_VC4_HDMI_CEC
- hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
- vc4, "vc4",
- CEC_CAP_DEFAULTS |
- CEC_CAP_CONNECTOR_INFO, 1);
- ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
- if (ret < 0)
- goto err_destroy_conn;
- cec_fill_conn_info_from_drm(&conn_info, hdmi->connector);
- cec_s_conn_info(hdmi->cec_adap, &conn_info);
-
- HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff);
- value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1);
- value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
- /*
- * Set the logical address to Unregistered and set the clock
- * divider: the hsm_clock rate and this divider setting will
- * give a 40 kHz CEC clock.
- */
- value |= VC4_HDMI_CEC_ADDR_MASK |
- (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT);
- HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, value);
- ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
- vc4_cec_irq_handler,
- vc4_cec_irq_handler_thread, 0,
- "vc4 hdmi cec", vc4);
+ ret = vc4_hdmi_cec_init(vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
- ret = cec_register_adapter(hdmi->cec_adap, dev);
- if (ret < 0)
- goto err_delete_cec_adap;
-#endif
+ goto err_destroy_conn;
- ret = vc4_hdmi_audio_init(hdmi);
+ ret = vc4_hdmi_audio_init(vc4_hdmi);
if (ret)
- goto err_destroy_encoder;
+ goto err_free_cec;
- vc4_debugfs_add_file(drm, "hdmi_regs", vc4_hdmi_debugfs_regs, hdmi);
+ vc4_debugfs_add_file(drm, variant->debugfs_name,
+ vc4_hdmi_debugfs_regs,
+ vc4_hdmi);
return 0;
-#ifdef CONFIG_DRM_VC4_HDMI_CEC
-err_delete_cec_adap:
- cec_delete_adapter(hdmi->cec_adap);
+err_free_cec:
+ vc4_hdmi_cec_exit(vc4_hdmi);
err_destroy_conn:
- vc4_hdmi_connector_destroy(hdmi->connector);
-#endif
+ vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
err_destroy_encoder:
- drm_encoder_cleanup(hdmi->encoder);
+ drm_encoder_cleanup(encoder);
err_unprepare_hsm:
- clk_disable_unprepare(hdmi->hsm_clock);
pm_runtime_disable(dev);
-err_put_i2c:
- put_device(&hdmi->ddc->dev);
+ put_device(&vc4_hdmi->ddc->dev);
return ret;
}
@@ -1472,20 +1733,39 @@ err_put_i2c:
static void vc4_hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = drm->dev_private;
- struct vc4_hdmi *hdmi = vc4->hdmi;
+ struct vc4_hdmi *vc4_hdmi;
- cec_unregister_adapter(hdmi->cec_adap);
- vc4_hdmi_connector_destroy(hdmi->connector);
- drm_encoder_cleanup(hdmi->encoder);
+ /*
+ * ASoC makes it a bit hard to retrieve a pointer to the
+ * vc4_hdmi structure. Registering the card will overwrite our
+ * device drvdata with a pointer to the snd_soc_card structure,
+ * which can then be used to retrieve whatever drvdata we want
+ * to associate.
+ *
+ * However, that doesn't fly in the case where we wouldn't
+ * register an ASoC card (because of an old DT that is missing
+ * the dmas properties for example), then the card isn't
+ * registered and the device drvdata wouldn't be set.
+ *
+ * We can deal with both cases by making sure a snd_soc_card
+ * pointer and a vc4_hdmi structure are pointing to the same
+ * memory address, so we can treat them indistinctly without any
+ * issue.
+ */
+ BUILD_BUG_ON(offsetof(struct vc4_hdmi_audio, card) != 0);
+ BUILD_BUG_ON(offsetof(struct vc4_hdmi, audio) != 0);
+ vc4_hdmi = dev_get_drvdata(dev);
- clk_disable_unprepare(hdmi->hsm_clock);
- pm_runtime_disable(dev);
+ kfree(vc4_hdmi->hdmi_regset.regs);
+ kfree(vc4_hdmi->hd_regset.regs);
- put_device(&hdmi->ddc->dev);
+ vc4_hdmi_cec_exit(vc4_hdmi);
+ vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
+ drm_encoder_cleanup(&vc4_hdmi->encoder.base.base);
+
+ pm_runtime_disable(dev);
- vc4->hdmi = NULL;
+ put_device(&vc4_hdmi->ddc->dev);
}
static const struct component_ops vc4_hdmi_ops = {
@@ -1504,8 +1784,80 @@ static int vc4_hdmi_dev_remove(struct platform_device *pdev)
return 0;
}
+static const struct vc4_hdmi_variant bcm2835_variant = {
+ .encoder_type = VC4_ENCODER_TYPE_HDMI0,
+ .debugfs_name = "hdmi_regs",
+ .card_name = "vc4-hdmi",
+ .max_pixel_clock = 162000000,
+ .cec_available = true,
+ .registers = vc4_hdmi_fields,
+ .num_registers = ARRAY_SIZE(vc4_hdmi_fields),
+
+ .init_resources = vc4_hdmi_init_resources,
+ .csc_setup = vc4_hdmi_csc_setup,
+ .reset = vc4_hdmi_reset,
+ .set_timings = vc4_hdmi_set_timings,
+ .phy_init = vc4_hdmi_phy_init,
+ .phy_disable = vc4_hdmi_phy_disable,
+ .phy_rng_enable = vc4_hdmi_phy_rng_enable,
+ .phy_rng_disable = vc4_hdmi_phy_rng_disable,
+ .channel_map = vc4_hdmi_channel_map,
+};
+
+static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
+ .encoder_type = VC4_ENCODER_TYPE_HDMI0,
+ .debugfs_name = "hdmi0_regs",
+ .card_name = "vc4-hdmi-0",
+ .max_pixel_clock = 297000000,
+ .registers = vc5_hdmi_hdmi0_fields,
+ .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields),
+ .phy_lane_mapping = {
+ PHY_LANE_0,
+ PHY_LANE_1,
+ PHY_LANE_2,
+ PHY_LANE_CK,
+ },
+
+ .init_resources = vc5_hdmi_init_resources,
+ .csc_setup = vc5_hdmi_csc_setup,
+ .reset = vc5_hdmi_reset,
+ .set_timings = vc5_hdmi_set_timings,
+ .phy_init = vc5_hdmi_phy_init,
+ .phy_disable = vc5_hdmi_phy_disable,
+ .phy_rng_enable = vc5_hdmi_phy_rng_enable,
+ .phy_rng_disable = vc5_hdmi_phy_rng_disable,
+ .channel_map = vc5_hdmi_channel_map,
+};
+
+static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
+ .encoder_type = VC4_ENCODER_TYPE_HDMI1,
+ .debugfs_name = "hdmi1_regs",
+ .card_name = "vc4-hdmi-1",
+ .max_pixel_clock = 297000000,
+ .registers = vc5_hdmi_hdmi1_fields,
+ .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi1_fields),
+ .phy_lane_mapping = {
+ PHY_LANE_1,
+ PHY_LANE_0,
+ PHY_LANE_CK,
+ PHY_LANE_2,
+ },
+
+ .init_resources = vc5_hdmi_init_resources,
+ .csc_setup = vc5_hdmi_csc_setup,
+ .reset = vc5_hdmi_reset,
+ .set_timings = vc5_hdmi_set_timings,
+ .phy_init = vc5_hdmi_phy_init,
+ .phy_disable = vc5_hdmi_phy_disable,
+ .phy_rng_enable = vc5_hdmi_phy_rng_enable,
+ .phy_rng_disable = vc5_hdmi_phy_rng_disable,
+ .channel_map = vc5_hdmi_channel_map,
+};
+
static const struct of_device_id vc4_hdmi_dt_match[] = {
- { .compatible = "brcm,bcm2835-hdmi" },
+ { .compatible = "brcm,bcm2835-hdmi", .data = &bcm2835_variant },
+ { .compatible = "brcm,bcm2711-hdmi0", .data = &bcm2711_hdmi0_variant },
+ { .compatible = "brcm,bcm2711-hdmi1", .data = &bcm2711_hdmi1_variant },
{}
};
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
new file mode 100644
index 000000000000..63c6f8bddf1d
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -0,0 +1,184 @@
+#ifndef _VC4_HDMI_H_
+#define _VC4_HDMI_H_
+
+#include <drm/drm_connector.h>
+#include <media/cec.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/soc.h>
+
+#include "vc4_drv.h"
+
+/* VC4 HDMI encoder KMS struct */
+struct vc4_hdmi_encoder {
+ struct vc4_encoder base;
+ bool hdmi_monitor;
+ bool limited_rgb_range;
+};
+
+static inline struct vc4_hdmi_encoder *
+to_vc4_hdmi_encoder(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_hdmi_encoder, base.base);
+}
+
+struct drm_display_mode;
+
+struct vc4_hdmi;
+struct vc4_hdmi_register;
+
+enum vc4_hdmi_phy_channel {
+ PHY_LANE_0 = 0,
+ PHY_LANE_1,
+ PHY_LANE_2,
+ PHY_LANE_CK,
+};
+
+struct vc4_hdmi_variant {
+ /* Encoder Type for that controller */
+ enum vc4_encoder_type encoder_type;
+
+ /* ALSA card name */
+ const char *card_name;
+
+ /* Filename to expose the registers in debugfs */
+ const char *debugfs_name;
+
+ /* Set to true when the CEC support is available */
+ bool cec_available;
+
+ /* Maximum pixel clock supported by the controller (in Hz) */
+ unsigned long long max_pixel_clock;
+
+ /* List of the registers available on that variant */
+ const struct vc4_hdmi_register *registers;
+
+ /* Number of registers on that variant */
+ unsigned int num_registers;
+
+ /* BCM2711 Only.
+ * The variants don't map the lane in the same order in the
+ * PHY, so this is an array mapping the HDMI channel (index)
+ * to the PHY lane (value).
+ */
+ enum vc4_hdmi_phy_channel phy_lane_mapping[4];
+
+ /* Callback to get the resources (memory region, interrupts,
+ * clocks, etc) for that variant.
+ */
+ int (*init_resources)(struct vc4_hdmi *vc4_hdmi);
+
+ /* Callback to reset the HDMI block */
+ void (*reset)(struct vc4_hdmi *vc4_hdmi);
+
+ /* Callback to enable / disable the CSC */
+ void (*csc_setup)(struct vc4_hdmi *vc4_hdmi, bool enable);
+
+ /* Callback to configure the video timings in the HDMI block */
+ void (*set_timings)(struct vc4_hdmi *vc4_hdmi,
+ struct drm_display_mode *mode);
+
+ /* Callback to initialize the PHY according to the mode */
+ void (*phy_init)(struct vc4_hdmi *vc4_hdmi,
+ struct drm_display_mode *mode);
+
+ /* Callback to disable the PHY */
+ void (*phy_disable)(struct vc4_hdmi *vc4_hdmi);
+
+ /* Callback to enable the RNG in the PHY */
+ void (*phy_rng_enable)(struct vc4_hdmi *vc4_hdmi);
+
+ /* Callback to disable the RNG in the PHY */
+ void (*phy_rng_disable)(struct vc4_hdmi *vc4_hdmi);
+
+ /* Callback to get channel map */
+ u32 (*channel_map)(struct vc4_hdmi *vc4_hdmi, u32 channel_mask);
+};
+
+/* HDMI audio information */
+struct vc4_hdmi_audio {
+ struct snd_soc_card card;
+ struct snd_soc_dai_link link;
+ struct snd_soc_dai_link_component cpu;
+ struct snd_soc_dai_link_component codec;
+ struct snd_soc_dai_link_component platform;
+ int samplerate;
+ int channels;
+ struct snd_dmaengine_dai_dma_data dma_data;
+ struct snd_pcm_substream *substream;
+
+ bool streaming;
+};
+
+/* General HDMI hardware state. */
+struct vc4_hdmi {
+ struct vc4_hdmi_audio audio;
+
+ struct platform_device *pdev;
+ const struct vc4_hdmi_variant *variant;
+
+ struct vc4_hdmi_encoder encoder;
+ struct drm_connector connector;
+
+ struct i2c_adapter *ddc;
+ void __iomem *hdmicore_regs;
+ void __iomem *hd_regs;
+
+ /* VC5 Only */
+ void __iomem *cec_regs;
+ /* VC5 Only */
+ void __iomem *csc_regs;
+ /* VC5 Only */
+ void __iomem *dvp_regs;
+ /* VC5 Only */
+ void __iomem *phy_regs;
+ /* VC5 Only */
+ void __iomem *ram_regs;
+ /* VC5 Only */
+ void __iomem *rm_regs;
+
+ int hpd_gpio;
+ bool hpd_active_low;
+
+ struct cec_adapter *cec_adap;
+ struct cec_msg cec_rx_msg;
+ bool cec_tx_ok;
+ bool cec_irq_was_rx;
+
+ struct clk *pixel_clock;
+ struct clk *hsm_clock;
+ struct clk *audio_clock;
+ struct clk *pixel_bvb_clock;
+
+ struct reset_control *reset;
+
+ struct debugfs_regset32 hdmi_regset;
+ struct debugfs_regset32 hd_regset;
+};
+
+static inline struct vc4_hdmi *
+connector_to_vc4_hdmi(struct drm_connector *connector)
+{
+ return container_of(connector, struct vc4_hdmi, connector);
+}
+
+static inline struct vc4_hdmi *
+encoder_to_vc4_hdmi(struct drm_encoder *encoder)
+{
+ struct vc4_hdmi_encoder *_encoder = to_vc4_hdmi_encoder(encoder);
+
+ return container_of(_encoder, struct vc4_hdmi, encoder);
+}
+
+void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct drm_display_mode *mode);
+void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
+void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
+void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
+
+void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct drm_display_mode *mode);
+void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
+void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
+void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
+
+#endif /* _VC4_HDMI_H_ */
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
new file mode 100644
index 000000000000..057796b54c51
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015 Broadcom
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include "vc4_hdmi.h"
+#include "vc4_regs.h"
+#include "vc4_hdmi_regs.h"
+
+#define VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB BIT(5)
+#define VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB BIT(4)
+#define VC4_HDMI_TX_PHY_RESET_CTL_TX_CK_RESET BIT(3)
+#define VC4_HDMI_TX_PHY_RESET_CTL_TX_2_RESET BIT(2)
+#define VC4_HDMI_TX_PHY_RESET_CTL_TX_1_RESET BIT(1)
+#define VC4_HDMI_TX_PHY_RESET_CTL_TX_0_RESET BIT(0)
+
+#define VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN BIT(4)
+
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP_SHIFT 29
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP_MASK VC4_MASK(31, 29)
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV_SHIFT 24
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV_MASK VC4_MASK(28, 24)
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP_SHIFT 21
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP_MASK VC4_MASK(23, 21)
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV_SHIFT 16
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV_MASK VC4_MASK(20, 16)
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP_SHIFT 13
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP_MASK VC4_MASK(15, 13)
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV_SHIFT 8
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV_MASK VC4_MASK(12, 8)
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP_SHIFT 5
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP_MASK VC4_MASK(7, 5)
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV_SHIFT 0
+#define VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV_MASK VC4_MASK(4, 0)
+
+#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2_SHIFT 15
+#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2_MASK VC4_MASK(19, 15)
+#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1_SHIFT 10
+#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1_MASK VC4_MASK(14, 10)
+#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0_SHIFT 5
+#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0_MASK VC4_MASK(9, 5)
+#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK_SHIFT 0
+#define VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK_MASK VC4_MASK(4, 0)
+
+#define VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN_SHIFT 16
+#define VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN_MASK VC4_MASK(19, 16)
+#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2_SHIFT 12
+#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2_MASK VC4_MASK(15, 12)
+#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1_SHIFT 8
+#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1_MASK VC4_MASK(11, 8)
+#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0_SHIFT 4
+#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0_MASK VC4_MASK(7, 4)
+#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK_SHIFT 0
+#define VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK_MASK VC4_MASK(3, 0)
+
+#define VC4_HDMI_TX_PHY_CTL_3_RP_SHIFT 17
+#define VC4_HDMI_TX_PHY_CTL_3_RP_MASK VC4_MASK(19, 17)
+#define VC4_HDMI_TX_PHY_CTL_3_RZ_SHIFT 12
+#define VC4_HDMI_TX_PHY_CTL_3_RZ_MASK VC4_MASK(16, 12)
+#define VC4_HDMI_TX_PHY_CTL_3_CP1_SHIFT 10
+#define VC4_HDMI_TX_PHY_CTL_3_CP1_MASK VC4_MASK(11, 10)
+#define VC4_HDMI_TX_PHY_CTL_3_CP_SHIFT 8
+#define VC4_HDMI_TX_PHY_CTL_3_CP_MASK VC4_MASK(9, 8)
+#define VC4_HDMI_TX_PHY_CTL_3_CZ_SHIFT 6
+#define VC4_HDMI_TX_PHY_CTL_3_CZ_MASK VC4_MASK(7, 6)
+#define VC4_HDMI_TX_PHY_CTL_3_ICP_SHIFT 0
+#define VC4_HDMI_TX_PHY_CTL_3_ICP_MASK VC4_MASK(5, 0)
+
+#define VC4_HDMI_TX_PHY_PLL_CTL_0_MASH11_MODE BIT(13)
+#define VC4_HDMI_TX_PHY_PLL_CTL_0_VC_RANGE_EN BIT(12)
+#define VC4_HDMI_TX_PHY_PLL_CTL_0_EMULATE_VC_LOW BIT(11)
+#define VC4_HDMI_TX_PHY_PLL_CTL_0_EMULATE_VC_HIGH BIT(10)
+#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL_SHIFT 9
+#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL_MASK VC4_MASK(9, 9)
+#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_FB_DIV2 BIT(8)
+#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_POST_DIV2 BIT(7)
+#define VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_CONT_EN BIT(6)
+#define VC4_HDMI_TX_PHY_PLL_CTL_0_ENA_VCO_CLK BIT(5)
+
+#define VC4_HDMI_TX_PHY_PLL_CTL_1_CPP_SHIFT 16
+#define VC4_HDMI_TX_PHY_PLL_CTL_1_CPP_MASK VC4_MASK(27, 16)
+#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY_SHIFT 14
+#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY_MASK VC4_MASK(15, 14)
+#define VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_ENABLE BIT(13)
+#define VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL_SHIFT 11
+#define VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL_MASK VC4_MASK(12, 11)
+
+#define VC4_HDMI_TX_PHY_CLK_DIV_VCO_SHIFT 8
+#define VC4_HDMI_TX_PHY_CLK_DIV_VCO_MASK VC4_MASK(15, 8)
+
+#define VC4_HDMI_TX_PHY_PLL_CFG_PDIV_SHIFT 0
+#define VC4_HDMI_TX_PHY_PLL_CFG_PDIV_MASK VC4_MASK(3, 0)
+
+#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL_MASK VC4_MASK(13, 12)
+#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL_SHIFT 12
+#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL_MASK VC4_MASK(9, 8)
+#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL_SHIFT 8
+#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL_MASK VC4_MASK(5, 4)
+#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL_SHIFT 4
+#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL_MASK VC4_MASK(1, 0)
+#define VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL_SHIFT 0
+
+#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_MASK VC4_MASK(27, 0)
+#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_SHIFT 0
+
+#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_MASK VC4_MASK(27, 0)
+#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_SHIFT 0
+
+#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD_MASK VC4_MASK(31, 16)
+#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD_SHIFT 16
+#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD_MASK VC4_MASK(15, 0)
+#define VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD_SHIFT 0
+
+#define VC4_HDMI_RM_CONTROL_EN_FREEZE_COUNTERS BIT(19)
+#define VC4_HDMI_RM_CONTROL_EN_LOAD_INTEGRATOR BIT(17)
+#define VC4_HDMI_RM_CONTROL_FREE_RUN BIT(4)
+
+#define VC4_HDMI_RM_OFFSET_ONLY BIT(31)
+#define VC4_HDMI_RM_OFFSET_OFFSET_SHIFT 0
+#define VC4_HDMI_RM_OFFSET_OFFSET_MASK VC4_MASK(30, 0)
+
+#define VC4_HDMI_RM_FORMAT_SHIFT_SHIFT 24
+#define VC4_HDMI_RM_FORMAT_SHIFT_MASK VC4_MASK(25, 24)
+
+#define OSCILLATOR_FREQUENCY 54000000
+
+void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+{
+ /* PHY should be in reset, like
+ * vc4_hdmi_encoder_disable() does.
+ */
+
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16);
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0);
+}
+
+void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi)
+{
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16);
+}
+
+void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi)
+{
+ HDMI_WRITE(HDMI_TX_PHY_CTL_0,
+ HDMI_READ(HDMI_TX_PHY_CTL_0) &
+ ~VC4_HDMI_TX_PHY_RNG_PWRDN);
+}
+
+void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi)
+{
+ HDMI_WRITE(HDMI_TX_PHY_CTL_0,
+ HDMI_READ(HDMI_TX_PHY_CTL_0) |
+ VC4_HDMI_TX_PHY_RNG_PWRDN);
+}
+
+static unsigned long long
+phy_get_vco_freq(unsigned long long clock, u8 *vco_sel, u8 *vco_div)
+{
+ unsigned long long vco_freq = clock;
+ unsigned int _vco_div = 0;
+ unsigned int _vco_sel = 0;
+
+ while (vco_freq < 3000000000ULL) {
+ _vco_div++;
+ vco_freq = clock * _vco_div * 10;
+ }
+
+ if (vco_freq > 4500000000ULL)
+ _vco_sel = 1;
+
+ *vco_sel = _vco_sel;
+ *vco_div = _vco_div;
+
+ return vco_freq;
+}
+
+static u8 phy_get_cp_current(unsigned long vco_freq)
+{
+ if (vco_freq < 3700000000ULL)
+ return 0x1c;
+
+ return 0x18;
+}
+
+static u32 phy_get_rm_offset(unsigned long long vco_freq)
+{
+ unsigned long long fref = OSCILLATOR_FREQUENCY;
+ u64 offset = 0;
+
+ /* RM offset is stored as 9.22 format */
+ offset = vco_freq * 2;
+ offset = offset << 22;
+ do_div(offset, fref);
+ offset >>= 2;
+
+ return offset;
+}
+
+static u8 phy_get_vco_gain(unsigned long long vco_freq)
+{
+ if (vco_freq < 3350000000ULL)
+ return 0xf;
+
+ if (vco_freq < 3700000000ULL)
+ return 0xc;
+
+ if (vco_freq < 4050000000ULL)
+ return 0x6;
+
+ if (vco_freq < 4800000000ULL)
+ return 0x5;
+
+ if (vco_freq < 5200000000ULL)
+ return 0x7;
+
+ return 0x2;
+}
+
+struct phy_lane_settings {
+ struct {
+ u8 preemphasis;
+ u8 main_driver;
+ } amplitude;
+
+ u8 res_sel_data;
+ u8 term_res_sel_data;
+};
+
+struct phy_settings {
+ unsigned long long min_rate;
+ unsigned long long max_rate;
+ struct phy_lane_settings channel[3];
+ struct phy_lane_settings clock;
+};
+
+static const struct phy_settings vc5_hdmi_phy_settings[] = {
+ {
+ 0, 50000000,
+ {
+ {{0x0, 0x0A}, 0x12, 0x0},
+ {{0x0, 0x0A}, 0x12, 0x0},
+ {{0x0, 0x0A}, 0x12, 0x0}
+ },
+ {{0x0, 0x0A}, 0x18, 0x0},
+ },
+ {
+ 50000001, 75000000,
+ {
+ {{0x0, 0x09}, 0x12, 0x0},
+ {{0x0, 0x09}, 0x12, 0x0},
+ {{0x0, 0x09}, 0x12, 0x0}
+ },
+ {{0x0, 0x0C}, 0x18, 0x3},
+ },
+ {
+ 75000001, 165000000,
+ {
+ {{0x0, 0x09}, 0x12, 0x0},
+ {{0x0, 0x09}, 0x12, 0x0},
+ {{0x0, 0x09}, 0x12, 0x0}
+ },
+ {{0x0, 0x0C}, 0x18, 0x3},
+ },
+ {
+ 165000001, 250000000,
+ {
+ {{0x0, 0x0F}, 0x12, 0x1},
+ {{0x0, 0x0F}, 0x12, 0x1},
+ {{0x0, 0x0F}, 0x12, 0x1}
+ },
+ {{0x0, 0x0C}, 0x18, 0x3},
+ },
+ {
+ 250000001, 340000000,
+ {
+ {{0x2, 0x0D}, 0x12, 0x1},
+ {{0x2, 0x0D}, 0x12, 0x1},
+ {{0x2, 0x0D}, 0x12, 0x1}
+ },
+ {{0x0, 0x0C}, 0x18, 0xF},
+ },
+ {
+ 340000001, 450000000,
+ {
+ {{0x0, 0x1B}, 0x12, 0xF},
+ {{0x0, 0x1B}, 0x12, 0xF},
+ {{0x0, 0x1B}, 0x12, 0xF}
+ },
+ {{0x0, 0x0A}, 0x12, 0xF},
+ },
+ {
+ 450000001, 600000000,
+ {
+ {{0x0, 0x1C}, 0x12, 0xF},
+ {{0x0, 0x1C}, 0x12, 0xF},
+ {{0x0, 0x1C}, 0x12, 0xF}
+ },
+ {{0x0, 0x0B}, 0x13, 0xF},
+ },
+};
+
+static const struct phy_settings *phy_get_settings(unsigned long long tmds_rate)
+{
+ unsigned int count = ARRAY_SIZE(vc5_hdmi_phy_settings);
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ const struct phy_settings *s = &vc5_hdmi_phy_settings[i];
+
+ if (tmds_rate >= s->min_rate && tmds_rate <= s->max_rate)
+ return s;
+ }
+
+ /*
+ * If the pixel clock exceeds our max setting, try the max
+ * setting anyway.
+ */
+ return &vc5_hdmi_phy_settings[count - 1];
+}
+
+static const struct phy_lane_settings *
+phy_get_channel_settings(enum vc4_hdmi_phy_channel chan,
+ unsigned long long tmds_rate)
+{
+ const struct phy_settings *settings = phy_get_settings(tmds_rate);
+
+ if (chan == PHY_LANE_CK)
+ return &settings->clock;
+
+ return &settings->channel[chan];
+}
+
+static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
+{
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0x0f);
+ HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10));
+}
+
+void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode)
+{
+ const struct phy_lane_settings *chan0_settings, *chan1_settings, *chan2_settings, *clock_settings;
+ const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
+ unsigned long long pixel_freq = mode->clock * 1000;
+ unsigned long long vco_freq;
+ unsigned char word_sel;
+ u8 vco_sel, vco_div;
+
+ vco_freq = phy_get_vco_freq(pixel_freq, &vco_sel, &vco_div);
+
+ vc5_hdmi_reset_phy(vc4_hdmi);
+
+ HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
+ VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
+
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL,
+ HDMI_READ(HDMI_TX_PHY_RESET_CTL) &
+ ~VC4_HDMI_TX_PHY_RESET_CTL_TX_0_RESET &
+ ~VC4_HDMI_TX_PHY_RESET_CTL_TX_1_RESET &
+ ~VC4_HDMI_TX_PHY_RESET_CTL_TX_2_RESET &
+ ~VC4_HDMI_TX_PHY_RESET_CTL_TX_CK_RESET);
+
+ HDMI_WRITE(HDMI_RM_CONTROL,
+ HDMI_READ(HDMI_RM_CONTROL) |
+ VC4_HDMI_RM_CONTROL_EN_FREEZE_COUNTERS |
+ VC4_HDMI_RM_CONTROL_EN_LOAD_INTEGRATOR |
+ VC4_HDMI_RM_CONTROL_FREE_RUN);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1,
+ (HDMI_READ(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1) &
+ ~VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT_MASK) |
+ VC4_SET_FIELD(0, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1_MIN_LIMIT));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2,
+ (HDMI_READ(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2) &
+ ~VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT_MASK) |
+ VC4_SET_FIELD(0, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2_MAX_LIMIT));
+
+ HDMI_WRITE(HDMI_RM_OFFSET,
+ VC4_SET_FIELD(phy_get_rm_offset(vco_freq),
+ VC4_HDMI_RM_OFFSET_OFFSET) |
+ VC4_HDMI_RM_OFFSET_ONLY);
+
+ HDMI_WRITE(HDMI_TX_PHY_CLK_DIV,
+ VC4_SET_FIELD(vco_div, VC4_HDMI_TX_PHY_CLK_DIV_VCO));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4,
+ VC4_SET_FIELD(0xe147, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_HOLD_THRESHOLD) |
+ VC4_SET_FIELD(0xe14, VC4_HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4_STABLE_THRESHOLD));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_CTL_0,
+ VC4_HDMI_TX_PHY_PLL_CTL_0_ENA_VCO_CLK |
+ VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_CONT_EN |
+ VC4_HDMI_TX_PHY_PLL_CTL_0_MASH11_MODE |
+ VC4_SET_FIELD(vco_sel, VC4_HDMI_TX_PHY_PLL_CTL_0_VCO_SEL));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_CTL_1,
+ HDMI_READ(HDMI_TX_PHY_PLL_CTL_1) |
+ VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_ENABLE |
+ VC4_SET_FIELD(3, VC4_HDMI_TX_PHY_PLL_CTL_1_POST_RST_SEL) |
+ VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_PLL_CTL_1_FREQ_DOUBLER_DELAY) |
+ VC4_SET_FIELD(0x8a, VC4_HDMI_TX_PHY_PLL_CTL_1_CPP));
+
+ HDMI_WRITE(HDMI_RM_FORMAT,
+ HDMI_READ(HDMI_RM_FORMAT) |
+ VC4_SET_FIELD(2, VC4_HDMI_RM_FORMAT_SHIFT));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_CFG,
+ HDMI_READ(HDMI_TX_PHY_PLL_CFG) |
+ VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_PLL_CFG_PDIV));
+
+ if (pixel_freq >= 340000000)
+ word_sel = 3;
+ else
+ word_sel = 0;
+ HDMI_WRITE(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, word_sel);
+
+ HDMI_WRITE(HDMI_TX_PHY_CTL_3,
+ VC4_SET_FIELD(phy_get_cp_current(vco_freq),
+ VC4_HDMI_TX_PHY_CTL_3_ICP) |
+ VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_CTL_3_CP) |
+ VC4_SET_FIELD(1, VC4_HDMI_TX_PHY_CTL_3_CP1) |
+ VC4_SET_FIELD(3, VC4_HDMI_TX_PHY_CTL_3_CZ) |
+ VC4_SET_FIELD(4, VC4_HDMI_TX_PHY_CTL_3_RP) |
+ VC4_SET_FIELD(6, VC4_HDMI_TX_PHY_CTL_3_RZ));
+
+ chan0_settings =
+ phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_0],
+ pixel_freq);
+ chan1_settings =
+ phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_1],
+ pixel_freq);
+ chan2_settings =
+ phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_2],
+ pixel_freq);
+ clock_settings =
+ phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_CK],
+ pixel_freq);
+
+ HDMI_WRITE(HDMI_TX_PHY_CTL_0,
+ VC4_SET_FIELD(chan0_settings->amplitude.preemphasis,
+ VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_PREEMP) |
+ VC4_SET_FIELD(chan0_settings->amplitude.main_driver,
+ VC4_HDMI_TX_PHY_CTL_0_PREEMP_0_MAINDRV) |
+ VC4_SET_FIELD(chan1_settings->amplitude.preemphasis,
+ VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_PREEMP) |
+ VC4_SET_FIELD(chan1_settings->amplitude.main_driver,
+ VC4_HDMI_TX_PHY_CTL_0_PREEMP_1_MAINDRV) |
+ VC4_SET_FIELD(chan2_settings->amplitude.preemphasis,
+ VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_PREEMP) |
+ VC4_SET_FIELD(chan2_settings->amplitude.main_driver,
+ VC4_HDMI_TX_PHY_CTL_0_PREEMP_2_MAINDRV) |
+ VC4_SET_FIELD(clock_settings->amplitude.preemphasis,
+ VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_PREEMP) |
+ VC4_SET_FIELD(clock_settings->amplitude.main_driver,
+ VC4_HDMI_TX_PHY_CTL_0_PREEMP_CK_MAINDRV));
+
+ HDMI_WRITE(HDMI_TX_PHY_CTL_1,
+ HDMI_READ(HDMI_TX_PHY_CTL_1) |
+ VC4_SET_FIELD(chan0_settings->res_sel_data,
+ VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA0) |
+ VC4_SET_FIELD(chan1_settings->res_sel_data,
+ VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA1) |
+ VC4_SET_FIELD(chan2_settings->res_sel_data,
+ VC4_HDMI_TX_PHY_CTL_1_RES_SEL_DATA2) |
+ VC4_SET_FIELD(clock_settings->res_sel_data,
+ VC4_HDMI_TX_PHY_CTL_1_RES_SEL_CK));
+
+ HDMI_WRITE(HDMI_TX_PHY_CTL_2,
+ VC4_SET_FIELD(chan0_settings->term_res_sel_data,
+ VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA0) |
+ VC4_SET_FIELD(chan1_settings->term_res_sel_data,
+ VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA1) |
+ VC4_SET_FIELD(chan2_settings->term_res_sel_data,
+ VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELDATA2) |
+ VC4_SET_FIELD(clock_settings->term_res_sel_data,
+ VC4_HDMI_TX_PHY_CTL_2_TERM_RES_SELCK) |
+ VC4_SET_FIELD(phy_get_vco_gain(vco_freq),
+ VC4_HDMI_TX_PHY_CTL_2_VCO_GAIN));
+
+ HDMI_WRITE(HDMI_TX_PHY_CHANNEL_SWAP,
+ VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_0],
+ VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX0_OUT_SEL) |
+ VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_1],
+ VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX1_OUT_SEL) |
+ VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_2],
+ VC4_HDMI_TX_PHY_CHANNEL_SWAP_TX2_OUT_SEL) |
+ VC4_SET_FIELD(variant->phy_lane_mapping[PHY_LANE_CK],
+ VC4_HDMI_TX_PHY_CHANNEL_SWAP_TXCK_OUT_SEL));
+
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL,
+ HDMI_READ(HDMI_TX_PHY_RESET_CTL) &
+ ~(VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB |
+ VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB));
+
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL,
+ HDMI_READ(HDMI_TX_PHY_RESET_CTL) |
+ VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB |
+ VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB);
+}
+
+void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi)
+{
+ vc5_hdmi_reset_phy(vc4_hdmi);
+}
+
+void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi)
+{
+ HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
+ HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) &
+ ~VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
+}
+
+void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi)
+{
+ HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL,
+ HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) |
+ VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
+}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
new file mode 100644
index 000000000000..7c6b4818f245
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -0,0 +1,442 @@
+#ifndef _VC4_HDMI_REGS_H_
+#define _VC4_HDMI_REGS_H_
+
+#include "vc4_hdmi.h"
+
+#define VC4_HDMI_PACKET_STRIDE 0x24
+
+enum vc4_hdmi_regs {
+ VC4_INVALID = 0,
+ VC4_HDMI,
+ VC4_HD,
+ VC5_CEC,
+ VC5_CSC,
+ VC5_DVP,
+ VC5_PHY,
+ VC5_RAM,
+ VC5_RM,
+};
+
+enum vc4_hdmi_field {
+ HDMI_AUDIO_PACKET_CONFIG,
+ HDMI_CEC_CNTRL_1,
+ HDMI_CEC_CNTRL_2,
+ HDMI_CEC_CNTRL_3,
+ HDMI_CEC_CNTRL_4,
+ HDMI_CEC_CNTRL_5,
+ HDMI_CEC_CPU_CLEAR,
+ HDMI_CEC_CPU_MASK_CLEAR,
+ HDMI_CEC_CPU_MASK_SET,
+ HDMI_CEC_CPU_MASK_STATUS,
+ HDMI_CEC_CPU_STATUS,
+
+ /*
+ * Transmit data, first byte is low byte of the 32-bit reg.
+ * MSB of each byte transmitted first.
+ */
+ HDMI_CEC_RX_DATA_1,
+ HDMI_CEC_RX_DATA_2,
+ HDMI_CEC_RX_DATA_3,
+ HDMI_CEC_RX_DATA_4,
+ HDMI_CEC_TX_DATA_1,
+ HDMI_CEC_TX_DATA_2,
+ HDMI_CEC_TX_DATA_3,
+ HDMI_CEC_TX_DATA_4,
+ HDMI_CLOCK_STOP,
+ HDMI_CORE_REV,
+ HDMI_CRP_CFG,
+ HDMI_CSC_12_11,
+ HDMI_CSC_14_13,
+ HDMI_CSC_22_21,
+ HDMI_CSC_24_23,
+ HDMI_CSC_32_31,
+ HDMI_CSC_34_33,
+ HDMI_CSC_CTL,
+
+ /*
+ * 20-bit fields containing CTS values to be transmitted if
+ * !EXTERNAL_CTS_EN
+ */
+ HDMI_CTS_0,
+ HDMI_CTS_1,
+ HDMI_DVP_CTL,
+ HDMI_FIFO_CTL,
+ HDMI_FRAME_COUNT,
+ HDMI_HORZA,
+ HDMI_HORZB,
+ HDMI_HOTPLUG,
+ HDMI_HOTPLUG_INT,
+
+ /*
+ * 3 bits per field, where each field maps from that
+ * corresponding MAI bus channel to the given HDMI channel.
+ */
+ HDMI_MAI_CHANNEL_MAP,
+ HDMI_MAI_CONFIG,
+ HDMI_MAI_CTL,
+
+ /*
+ * Register for DMAing in audio data to be transported over
+ * the MAI bus to the Falcon core.
+ */
+ HDMI_MAI_DATA,
+
+ /* Format header to be placed on the MAI data. Unused. */
+ HDMI_MAI_FMT,
+
+ /* Last received format word on the MAI bus. */
+ HDMI_MAI_FORMAT,
+ HDMI_MAI_SMP,
+ HDMI_MAI_THR,
+ HDMI_M_CTL,
+ HDMI_RAM_PACKET_CONFIG,
+ HDMI_RAM_PACKET_START,
+ HDMI_RAM_PACKET_STATUS,
+ HDMI_RM_CONTROL,
+ HDMI_RM_FORMAT,
+ HDMI_RM_OFFSET,
+ HDMI_SCHEDULER_CONTROL,
+ HDMI_SW_RESET_CONTROL,
+ HDMI_TX_PHY_CHANNEL_SWAP,
+ HDMI_TX_PHY_CLK_DIV,
+ HDMI_TX_PHY_CTL_0,
+ HDMI_TX_PHY_CTL_1,
+ HDMI_TX_PHY_CTL_2,
+ HDMI_TX_PHY_CTL_3,
+ HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1,
+ HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2,
+ HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4,
+ HDMI_TX_PHY_PLL_CFG,
+ HDMI_TX_PHY_PLL_CTL_0,
+ HDMI_TX_PHY_PLL_CTL_1,
+ HDMI_TX_PHY_POWERDOWN_CTL,
+ HDMI_TX_PHY_RESET_CTL,
+ HDMI_TX_PHY_TMDS_CLK_WORD_SEL,
+ HDMI_VEC_INTERFACE_XBAR,
+ HDMI_VERTA0,
+ HDMI_VERTA1,
+ HDMI_VERTB0,
+ HDMI_VERTB1,
+ HDMI_VID_CTL,
+};
+
+struct vc4_hdmi_register {
+ char *name;
+ enum vc4_hdmi_regs reg;
+ unsigned int offset;
+};
+
+#define _VC4_REG(_base, _reg, _offset) \
+ [_reg] = { \
+ .name = #_reg, \
+ .reg = _base, \
+ .offset = _offset, \
+ }
+
+#define VC4_HD_REG(reg, offset) _VC4_REG(VC4_HD, reg, offset)
+#define VC4_HDMI_REG(reg, offset) _VC4_REG(VC4_HDMI, reg, offset)
+#define VC5_CEC_REG(reg, offset) _VC4_REG(VC5_CEC, reg, offset)
+#define VC5_CSC_REG(reg, offset) _VC4_REG(VC5_CSC, reg, offset)
+#define VC5_DVP_REG(reg, offset) _VC4_REG(VC5_DVP, reg, offset)
+#define VC5_PHY_REG(reg, offset) _VC4_REG(VC5_PHY, reg, offset)
+#define VC5_RAM_REG(reg, offset) _VC4_REG(VC5_RAM, reg, offset)
+#define VC5_RM_REG(reg, offset) _VC4_REG(VC5_RM, reg, offset)
+
+static const struct vc4_hdmi_register vc4_hdmi_fields[] = {
+ VC4_HD_REG(HDMI_M_CTL, 0x000c),
+ VC4_HD_REG(HDMI_MAI_CTL, 0x0014),
+ VC4_HD_REG(HDMI_MAI_THR, 0x0018),
+ VC4_HD_REG(HDMI_MAI_FMT, 0x001c),
+ VC4_HD_REG(HDMI_MAI_DATA, 0x0020),
+ VC4_HD_REG(HDMI_MAI_SMP, 0x002c),
+ VC4_HD_REG(HDMI_VID_CTL, 0x0038),
+ VC4_HD_REG(HDMI_CSC_CTL, 0x0040),
+ VC4_HD_REG(HDMI_CSC_12_11, 0x0044),
+ VC4_HD_REG(HDMI_CSC_14_13, 0x0048),
+ VC4_HD_REG(HDMI_CSC_22_21, 0x004c),
+ VC4_HD_REG(HDMI_CSC_24_23, 0x0050),
+ VC4_HD_REG(HDMI_CSC_32_31, 0x0054),
+ VC4_HD_REG(HDMI_CSC_34_33, 0x0058),
+ VC4_HD_REG(HDMI_FRAME_COUNT, 0x0068),
+
+ VC4_HDMI_REG(HDMI_CORE_REV, 0x0000),
+ VC4_HDMI_REG(HDMI_SW_RESET_CONTROL, 0x0004),
+ VC4_HDMI_REG(HDMI_HOTPLUG_INT, 0x0008),
+ VC4_HDMI_REG(HDMI_HOTPLUG, 0x000c),
+ VC4_HDMI_REG(HDMI_FIFO_CTL, 0x005c),
+ VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x0090),
+ VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0094),
+ VC4_HDMI_REG(HDMI_MAI_FORMAT, 0x0098),
+ VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x009c),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x00a0),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x00a4),
+ VC4_HDMI_REG(HDMI_CRP_CFG, 0x00a8),
+ VC4_HDMI_REG(HDMI_CTS_0, 0x00ac),
+ VC4_HDMI_REG(HDMI_CTS_1, 0x00b0),
+ VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x00c0),
+ VC4_HDMI_REG(HDMI_HORZA, 0x00c4),
+ VC4_HDMI_REG(HDMI_HORZB, 0x00c8),
+ VC4_HDMI_REG(HDMI_VERTA0, 0x00cc),
+ VC4_HDMI_REG(HDMI_VERTB0, 0x00d0),
+ VC4_HDMI_REG(HDMI_VERTA1, 0x00d4),
+ VC4_HDMI_REG(HDMI_VERTB1, 0x00d8),
+ VC4_HDMI_REG(HDMI_CEC_CNTRL_1, 0x00e8),
+ VC4_HDMI_REG(HDMI_CEC_CNTRL_2, 0x00ec),
+ VC4_HDMI_REG(HDMI_CEC_CNTRL_3, 0x00f0),
+ VC4_HDMI_REG(HDMI_CEC_CNTRL_4, 0x00f4),
+ VC4_HDMI_REG(HDMI_CEC_CNTRL_5, 0x00f8),
+ VC4_HDMI_REG(HDMI_CEC_TX_DATA_1, 0x00fc),
+ VC4_HDMI_REG(HDMI_CEC_TX_DATA_2, 0x0100),
+ VC4_HDMI_REG(HDMI_CEC_TX_DATA_3, 0x0104),
+ VC4_HDMI_REG(HDMI_CEC_TX_DATA_4, 0x0108),
+ VC4_HDMI_REG(HDMI_CEC_RX_DATA_1, 0x010c),
+ VC4_HDMI_REG(HDMI_CEC_RX_DATA_2, 0x0110),
+ VC4_HDMI_REG(HDMI_CEC_RX_DATA_3, 0x0114),
+ VC4_HDMI_REG(HDMI_CEC_RX_DATA_4, 0x0118),
+ VC4_HDMI_REG(HDMI_TX_PHY_RESET_CTL, 0x02c0),
+ VC4_HDMI_REG(HDMI_TX_PHY_CTL_0, 0x02c4),
+ VC4_HDMI_REG(HDMI_CEC_CPU_STATUS, 0x0340),
+ VC4_HDMI_REG(HDMI_CEC_CPU_CLEAR, 0x0348),
+ VC4_HDMI_REG(HDMI_CEC_CPU_MASK_STATUS, 0x034c),
+ VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x034c),
+ VC4_HDMI_REG(HDMI_CEC_CPU_MASK_CLEAR, 0x0354),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_START, 0x0400),
+};
+
+static const struct vc4_hdmi_register vc5_hdmi_hdmi0_fields[] = {
+ VC4_HD_REG(HDMI_DVP_CTL, 0x0000),
+ VC4_HD_REG(HDMI_MAI_CTL, 0x0010),
+ VC4_HD_REG(HDMI_MAI_THR, 0x0014),
+ VC4_HD_REG(HDMI_MAI_FMT, 0x0018),
+ VC4_HD_REG(HDMI_MAI_DATA, 0x001c),
+ VC4_HD_REG(HDMI_MAI_SMP, 0x0020),
+ VC4_HD_REG(HDMI_VID_CTL, 0x0044),
+ VC4_HD_REG(HDMI_FRAME_COUNT, 0x0060),
+
+ VC4_HDMI_REG(HDMI_FIFO_CTL, 0x074),
+ VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0b8),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0bc),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0c4),
+ VC4_HDMI_REG(HDMI_CRP_CFG, 0x0c8),
+ VC4_HDMI_REG(HDMI_CTS_0, 0x0cc),
+ VC4_HDMI_REG(HDMI_CTS_1, 0x0d0),
+ VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e0),
+ VC4_HDMI_REG(HDMI_HORZA, 0x0e4),
+ VC4_HDMI_REG(HDMI_HORZB, 0x0e8),
+ VC4_HDMI_REG(HDMI_VERTA0, 0x0ec),
+ VC4_HDMI_REG(HDMI_VERTB0, 0x0f0),
+ VC4_HDMI_REG(HDMI_VERTA1, 0x0f4),
+ VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
+ VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
+ VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
+
+ VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f0),
+
+ VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000),
+ VC5_PHY_REG(HDMI_TX_PHY_POWERDOWN_CTL, 0x004),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_3, 0x014),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_0, 0x01c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_1, 0x020),
+ VC5_PHY_REG(HDMI_TX_PHY_CLK_DIV, 0x028),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x034),
+ VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x044),
+ VC5_PHY_REG(HDMI_TX_PHY_CHANNEL_SWAP, 0x04c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1, 0x050),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2, 0x054),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4, 0x05c),
+
+ VC5_RM_REG(HDMI_RM_CONTROL, 0x000),
+ VC5_RM_REG(HDMI_RM_OFFSET, 0x018),
+ VC5_RM_REG(HDMI_RM_FORMAT, 0x01c),
+
+ VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000),
+
+ VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044),
+
+ VC5_CSC_REG(HDMI_CSC_CTL, 0x000),
+ VC5_CSC_REG(HDMI_CSC_12_11, 0x004),
+ VC5_CSC_REG(HDMI_CSC_14_13, 0x008),
+ VC5_CSC_REG(HDMI_CSC_22_21, 0x00c),
+ VC5_CSC_REG(HDMI_CSC_24_23, 0x010),
+ VC5_CSC_REG(HDMI_CSC_32_31, 0x014),
+ VC5_CSC_REG(HDMI_CSC_34_33, 0x018),
+};
+
+static const struct vc4_hdmi_register vc5_hdmi_hdmi1_fields[] = {
+ VC4_HD_REG(HDMI_DVP_CTL, 0x0000),
+ VC4_HD_REG(HDMI_MAI_CTL, 0x0030),
+ VC4_HD_REG(HDMI_MAI_THR, 0x0034),
+ VC4_HD_REG(HDMI_MAI_FMT, 0x0038),
+ VC4_HD_REG(HDMI_MAI_DATA, 0x003c),
+ VC4_HD_REG(HDMI_MAI_SMP, 0x0040),
+ VC4_HD_REG(HDMI_VID_CTL, 0x0048),
+ VC4_HD_REG(HDMI_FRAME_COUNT, 0x0064),
+
+ VC4_HDMI_REG(HDMI_FIFO_CTL, 0x074),
+ VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0b8),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0bc),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0c4),
+ VC4_HDMI_REG(HDMI_CRP_CFG, 0x0c8),
+ VC4_HDMI_REG(HDMI_CTS_0, 0x0cc),
+ VC4_HDMI_REG(HDMI_CTS_1, 0x0d0),
+ VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e0),
+ VC4_HDMI_REG(HDMI_HORZA, 0x0e4),
+ VC4_HDMI_REG(HDMI_HORZB, 0x0e8),
+ VC4_HDMI_REG(HDMI_VERTA0, 0x0ec),
+ VC4_HDMI_REG(HDMI_VERTB0, 0x0f0),
+ VC4_HDMI_REG(HDMI_VERTA1, 0x0f4),
+ VC4_HDMI_REG(HDMI_VERTB1, 0x0f8),
+ VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c),
+ VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0),
+ VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8),
+
+ VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f0),
+
+ VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000),
+ VC5_PHY_REG(HDMI_TX_PHY_POWERDOWN_CTL, 0x004),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_3, 0x014),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_0, 0x01c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CTL_1, 0x020),
+ VC5_PHY_REG(HDMI_TX_PHY_CLK_DIV, 0x028),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x034),
+ VC5_PHY_REG(HDMI_TX_PHY_CHANNEL_SWAP, 0x04c),
+ VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x044),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1, 0x050),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2, 0x054),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4, 0x05c),
+
+ VC5_RM_REG(HDMI_RM_CONTROL, 0x000),
+ VC5_RM_REG(HDMI_RM_OFFSET, 0x018),
+ VC5_RM_REG(HDMI_RM_FORMAT, 0x01c),
+
+ VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000),
+
+ VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044),
+
+ VC5_CSC_REG(HDMI_CSC_CTL, 0x000),
+ VC5_CSC_REG(HDMI_CSC_12_11, 0x004),
+ VC5_CSC_REG(HDMI_CSC_14_13, 0x008),
+ VC5_CSC_REG(HDMI_CSC_22_21, 0x00c),
+ VC5_CSC_REG(HDMI_CSC_24_23, 0x010),
+ VC5_CSC_REG(HDMI_CSC_32_31, 0x014),
+ VC5_CSC_REG(HDMI_CSC_34_33, 0x018),
+};
+
+static inline
+void __iomem *__vc4_hdmi_get_field_base(struct vc4_hdmi *hdmi,
+ enum vc4_hdmi_regs reg)
+{
+ switch (reg) {
+ case VC4_HD:
+ return hdmi->hd_regs;
+
+ case VC4_HDMI:
+ return hdmi->hdmicore_regs;
+
+ case VC5_CSC:
+ return hdmi->csc_regs;
+
+ case VC5_CEC:
+ return hdmi->cec_regs;
+
+ case VC5_DVP:
+ return hdmi->dvp_regs;
+
+ case VC5_PHY:
+ return hdmi->phy_regs;
+
+ case VC5_RAM:
+ return hdmi->ram_regs;
+
+ case VC5_RM:
+ return hdmi->rm_regs;
+
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static inline u32 vc4_hdmi_read(struct vc4_hdmi *hdmi,
+ enum vc4_hdmi_field reg)
+{
+ const struct vc4_hdmi_register *field;
+ const struct vc4_hdmi_variant *variant = hdmi->variant;
+ void __iomem *base;
+
+ if (reg >= variant->num_registers) {
+ dev_warn(&hdmi->pdev->dev,
+ "Invalid register ID %u\n", reg);
+ return 0;
+ }
+
+ field = &variant->registers[reg];
+ base = __vc4_hdmi_get_field_base(hdmi, field->reg);
+ if (!base) {
+ dev_warn(&hdmi->pdev->dev,
+ "Unknown register ID %u\n", reg);
+ return 0;
+ }
+
+ return readl(base + field->offset);
+}
+#define HDMI_READ(reg) vc4_hdmi_read(vc4_hdmi, reg)
+
+static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi,
+ enum vc4_hdmi_field reg,
+ u32 value)
+{
+ const struct vc4_hdmi_register *field;
+ const struct vc4_hdmi_variant *variant = hdmi->variant;
+ void __iomem *base;
+
+ if (reg >= variant->num_registers) {
+ dev_warn(&hdmi->pdev->dev,
+ "Invalid register ID %u\n", reg);
+ return;
+ }
+
+ field = &variant->registers[reg];
+ base = __vc4_hdmi_get_field_base(hdmi, field->reg);
+ if (!base)
+ return;
+
+ writel(value, base + field->offset);
+}
+#define HDMI_WRITE(reg, val) vc4_hdmi_write(vc4_hdmi, reg, val)
+
+#endif /* _VC4_HDMI_REGS_H_ */
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 2d2bf59c0503..b72b2bd05a81 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -19,6 +19,8 @@
* each CRTC.
*/
+#include <linux/bitfield.h>
+#include <linux/clk.h>
#include <linux/component.h>
#include <linux/platform_device.h>
@@ -160,6 +162,7 @@ static void vc4_hvs_lut_load(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
u32 i;
/* The LUT memory is laid out with each HVS channel in order,
@@ -168,7 +171,7 @@ static void vc4_hvs_lut_load(struct drm_crtc *crtc)
*/
HVS_WRITE(SCALER_GAMADDR,
SCALER_GAMADDR_AUTOINC |
- (vc4_crtc->channel * 3 * crtc->gamma_size));
+ (vc4_state->assigned_channel * 3 * crtc->gamma_size));
for (i = 0; i < crtc->gamma_size; i++)
HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]);
@@ -194,6 +197,135 @@ static void vc4_hvs_update_gamma_lut(struct drm_crtc *crtc)
vc4_hvs_lut_load(crtc);
}
+int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ u32 reg;
+ int ret;
+
+ if (!vc4->hvs->hvs5)
+ return output;
+
+ switch (output) {
+ case 0:
+ return 0;
+
+ case 1:
+ return 1;
+
+ case 2:
+ reg = HVS_READ(SCALER_DISPECTRL);
+ ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg);
+ if (ret == 0)
+ return 2;
+
+ return 0;
+
+ case 3:
+ reg = HVS_READ(SCALER_DISPCTRL);
+ ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg);
+ if (ret == 3)
+ return -EPIPE;
+
+ return ret;
+
+ case 4:
+ reg = HVS_READ(SCALER_DISPEOLN);
+ ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg);
+ if (ret == 3)
+ return -EPIPE;
+
+ return ret;
+
+ case 5:
+ reg = HVS_READ(SCALER_DISPDITHER);
+ ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg);
+ if (ret == 3)
+ return -EPIPE;
+
+ return ret;
+
+ default:
+ return -EPIPE;
+ }
+}
+
+static int vc4_hvs_init_channel(struct vc4_dev *vc4, struct drm_crtc *crtc,
+ struct drm_display_mode *mode, bool oneshot)
+{
+ struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
+ unsigned int chan = vc4_crtc_state->assigned_channel;
+ bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ u32 dispbkgndx;
+ u32 dispctrl;
+
+ HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
+ HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET);
+ HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
+
+ /* Turn on the scaler, which will wait for vstart to start
+ * compositing.
+ * When feeding the transposer, we should operate in oneshot
+ * mode.
+ */
+ dispctrl = SCALER_DISPCTRLX_ENABLE;
+
+ if (!vc4->hvs->hvs5)
+ dispctrl |= VC4_SET_FIELD(mode->hdisplay,
+ SCALER_DISPCTRLX_WIDTH) |
+ VC4_SET_FIELD(mode->vdisplay,
+ SCALER_DISPCTRLX_HEIGHT) |
+ (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0);
+ else
+ dispctrl |= VC4_SET_FIELD(mode->hdisplay,
+ SCALER5_DISPCTRLX_WIDTH) |
+ VC4_SET_FIELD(mode->vdisplay,
+ SCALER5_DISPCTRLX_HEIGHT) |
+ (oneshot ? SCALER5_DISPCTRLX_ONESHOT : 0);
+
+ HVS_WRITE(SCALER_DISPCTRLX(chan), dispctrl);
+
+ dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
+ dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
+ dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE;
+
+ HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
+ SCALER_DISPBKGND_AUTOHS |
+ ((!vc4->hvs->hvs5) ? SCALER_DISPBKGND_GAMMA : 0) |
+ (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
+
+ /* Reload the LUT, since the SRAMs would have been disabled if
+ * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once.
+ */
+ vc4_hvs_lut_load(crtc);
+
+ return 0;
+}
+
+void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int chan)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
+ return;
+
+ HVS_WRITE(SCALER_DISPCTRLX(chan),
+ HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET);
+ HVS_WRITE(SCALER_DISPCTRLX(chan),
+ HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE);
+
+ /* Once we leave, the scaler should be disabled and its fifo empty. */
+ WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET);
+
+ WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)),
+ SCALER_DISPSTATX_MODE) !=
+ SCALER_DISPSTATX_MODE_DISABLED);
+
+ WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) &
+ (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) !=
+ SCALER_DISPSTATX_EMPTY);
+}
+
int vc4_hvs_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -248,12 +380,12 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
crtc->state->event = NULL;
}
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
vc4_state->mm.start);
spin_unlock_irqrestore(&dev->event_lock, flags);
} else {
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
vc4_state->mm.start);
}
}
@@ -263,59 +395,22 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
bool oneshot = vc4_state->feed_txp;
- u32 dispctrl;
vc4_hvs_update_dlist(crtc);
-
- /* Turn on the scaler, which will wait for vstart to start
- * compositing.
- * When feeding the transposer, we should operate in oneshot
- * mode.
- */
- dispctrl = SCALER_DISPCTRLX_ENABLE;
- dispctrl |= VC4_SET_FIELD(mode->hdisplay,
- SCALER_DISPCTRLX_WIDTH) |
- VC4_SET_FIELD(mode->vdisplay,
- SCALER_DISPCTRLX_HEIGHT) |
- (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0);
-
- HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel), dispctrl);
+ vc4_hvs_init_channel(vc4, crtc, mode, oneshot);
}
void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- u32 chan = vc4_crtc->channel;
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state);
+ unsigned int chan = vc4_state->assigned_channel;
- if (HVS_READ(SCALER_DISPCTRLX(chan)) &
- SCALER_DISPCTRLX_ENABLE) {
- HVS_WRITE(SCALER_DISPCTRLX(chan),
- SCALER_DISPCTRLX_RESET);
-
- /* While the docs say that reset is self-clearing, it
- * seems it doesn't actually.
- */
- HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
- }
-
- /* Once we leave, the scaler should be disabled and its fifo empty. */
-
- WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET);
-
- WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)),
- SCALER_DISPSTATX_MODE) !=
- SCALER_DISPSTATX_MODE_DISABLED);
-
- WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) &
- (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) !=
- SCALER_DISPSTATX_EMPTY);
+ vc4_hvs_stop_channel(dev, chan);
}
void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
@@ -323,7 +418,6 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
struct drm_plane *plane;
struct vc4_plane_state *vc4_plane_state;
@@ -365,8 +459,8 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
/* This sets a black background color fill, as is the case
* with other DRM drivers.
*/
- HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
- HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel)) |
+ HVS_WRITE(SCALER_DISPBKGNDX(vc4_state->assigned_channel),
+ HVS_READ(SCALER_DISPBKGNDX(vc4_state->assigned_channel)) |
SCALER_DISPBKGND_FILL);
/* Only update DISPLIST if the CRTC was already running and is not
@@ -380,7 +474,7 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
vc4_hvs_update_dlist(crtc);
if (crtc->state->color_mgmt_changed) {
- u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel));
+ u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(vc4_state->assigned_channel));
if (crtc->state->gamma_lut) {
vc4_hvs_update_gamma_lut(crtc);
@@ -392,7 +486,7 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
*/
dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
}
- HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), dispbkgndx);
+ HVS_WRITE(SCALER_DISPBKGNDX(vc4_state->assigned_channel), dispbkgndx);
}
if (debug_dump_regs) {
@@ -401,50 +495,6 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
}
}
-void vc4_hvs_mode_set_nofb(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
- struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
-
- if (vc4_crtc->data->hvs_channel == 2) {
- u32 dispctrl;
- u32 dsp3_mux;
-
- /*
- * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
- * FIFO X'.
- * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
- *
- * DSP3 is connected to FIFO2 unless the transposer is
- * enabled. In this case, FIFO 2 is directly accessed by the
- * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
- * route.
- */
- if (vc4_state->feed_txp)
- dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
- else
- dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
-
- dispctrl = HVS_READ(SCALER_DISPCTRL) &
- ~SCALER_DISPCTRL_DSP3_MUX_MASK;
- HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
- }
-
- HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
- SCALER_DISPBKGND_AUTOHS |
- SCALER_DISPBKGND_GAMMA |
- (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
-
- /* Reload the LUT, since the SRAMs would have been disabled if
- * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once.
- */
- vc4_hvs_lut_load(crtc);
-}
-
void vc4_hvs_mask_underrun(struct drm_device *dev, int channel)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -510,7 +560,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = drm->dev_private;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = NULL;
int ret;
u32 dispctrl;
@@ -521,6 +571,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
hvs->pdev = pdev;
+ if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm2711-hvs"))
+ hvs->hvs5 = true;
+
hvs->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(hvs->regs))
return PTR_ERR(hvs->regs);
@@ -529,7 +582,24 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
hvs->regset.regs = hvs_regs;
hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
- hvs->dlist = hvs->regs + SCALER_DLIST_START;
+ if (hvs->hvs5) {
+ hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hvs->core_clk)) {
+ dev_err(&pdev->dev, "Couldn't get core clock\n");
+ return PTR_ERR(hvs->core_clk);
+ }
+
+ ret = clk_prepare_enable(hvs->core_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable the core clock\n");
+ return ret;
+ }
+ }
+
+ if (!hvs->hvs5)
+ hvs->dlist = hvs->regs + SCALER_DLIST_START;
+ else
+ hvs->dlist = hvs->regs + SCALER5_DLIST_START;
spin_lock_init(&hvs->mm_lock);
@@ -547,7 +617,12 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
* between planes when they don't overlap on the screen, but
* for now we just allocate globally.
*/
- drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
+ if (!hvs->hvs5)
+ /* 96kB */
+ drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
+ else
+ /* 70k words */
+ drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024);
/* Upload filter kernels. We only have the one for now, so we
* keep it around for the lifetime of the driver.
@@ -604,7 +679,8 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = drm->dev_private;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hvs *hvs = vc4->hvs;
if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
@@ -612,6 +688,8 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
drm_mm_takedown(&vc4->hvs->dlist_mm);
drm_mm_takedown(&vc4->hvs->lbm_mm);
+ clk_disable_unprepare(hvs->core_clk);
+
vc4->hvs = NULL;
}
@@ -632,6 +710,7 @@ static int vc4_hvs_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id vc4_hvs_dt_match[] = {
+ { .compatible = "brcm,bcm2711-hvs" },
{ .compatible = "brcm,bcm2835-hvs" },
{}
};
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 08318e69061b..2b951cae04ad 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -11,6 +11,8 @@
* crtc, HDMI encoder).
*/
+#include <linux/clk.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
@@ -49,7 +51,7 @@ static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
struct drm_private_obj *manager)
{
struct drm_device *dev = state->dev;
- struct vc4_dev *vc4 = dev->dev_private;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_private_state *priv_state;
int ret;
@@ -91,6 +93,29 @@ static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
.atomic_destroy_state = vc4_ctm_destroy_state,
};
+static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ drm_atomic_private_obj_fini(&vc4->ctm_manager);
+}
+
+static int vc4_ctm_obj_init(struct vc4_dev *vc4)
+{
+ struct vc4_ctm_state *ctm_state;
+
+ drm_modeset_lock_init(&vc4->ctm_state_lock);
+
+ ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
+ if (!ctm_state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
+ &vc4_ctm_state_funcs);
+
+ return drmm_add_action(&vc4->base, vc4_ctm_obj_fini, NULL);
+}
+
/* Converts a DRM S31.32 value to the HW S0.9 format. */
static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
{
@@ -144,22 +169,130 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
}
+static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
+ u32 dispctrl;
+ u32 dsp3_mux;
+
+ if (!crtc_state->active)
+ continue;
+
+ if (vc4_state->assigned_channel != 2)
+ continue;
+
+ /*
+ * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
+ * FIFO X'.
+ * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
+ *
+ * DSP3 is connected to FIFO2 unless the transposer is
+ * enabled. In this case, FIFO 2 is directly accessed by the
+ * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
+ * route.
+ */
+ if (vc4_state->feed_txp)
+ dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
+ else
+ dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
+
+ dispctrl = HVS_READ(SCALER_DISPCTRL) &
+ ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
+ }
+}
+
+static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ unsigned char dsp2_mux = 0;
+ unsigned char dsp3_mux = 3;
+ unsigned char dsp4_mux = 3;
+ unsigned char dsp5_mux = 3;
+ unsigned int i;
+ u32 reg;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+
+ if (!crtc_state->active)
+ continue;
+
+ switch (vc4_crtc->data->hvs_output) {
+ case 2:
+ dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
+ break;
+
+ case 3:
+ dsp3_mux = vc4_state->assigned_channel;
+ break;
+
+ case 4:
+ dsp4_mux = vc4_state->assigned_channel;
+ break;
+
+ case 5:
+ dsp5_mux = vc4_state->assigned_channel;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ reg = HVS_READ(SCALER_DISPECTRL);
+ HVS_WRITE(SCALER_DISPECTRL,
+ (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
+ VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX));
+
+ reg = HVS_READ(SCALER_DISPCTRL);
+ HVS_WRITE(SCALER_DISPCTRL,
+ (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
+ VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX));
+
+ reg = HVS_READ(SCALER_DISPEOLN);
+ HVS_WRITE(SCALER_DISPEOLN,
+ (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
+ VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX));
+
+ reg = HVS_READ(SCALER_DISPDITHER);
+ HVS_WRITE(SCALER_DISPDITHER,
+ (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
+ VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX));
+}
+
static void
vc4_atomic_complete_commit(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc;
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
int i;
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- if (!state->crtcs[i].ptr || !state->crtcs[i].commit)
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state;
+
+ if (!new_crtc_state->commit)
continue;
- vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr);
- vc4_hvs_mask_underrun(dev, vc4_crtc->channel);
+ vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
+ vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
}
+ if (vc4->hvs->hvs5)
+ clk_set_min_rate(hvs->core_clk, 500000000);
+
drm_atomic_helper_wait_for_fences(dev, state, false);
drm_atomic_helper_wait_for_dependencies(state);
@@ -168,6 +301,11 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
vc4_ctm_commit(vc4, state);
+ if (vc4->hvs->hvs5)
+ vc5_hvs_pv_muxing_commit(vc4, state);
+ else
+ vc4_hvs_pv_muxing_commit(vc4, state);
+
drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_modeset_enables(dev, state);
@@ -182,6 +320,9 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
drm_atomic_helper_commit_cleanup_done(state);
+ if (vc4->hvs->hvs5)
+ clk_set_min_rate(hvs->core_clk, 0);
+
drm_atomic_state_put(state);
up(&vc4->async_modeset);
@@ -374,8 +515,11 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
/* CTM is being enabled or the matrix changed. */
if (new_crtc_state->ctm) {
+ struct vc4_crtc_state *vc4_crtc_state =
+ to_vc4_crtc_state(new_crtc_state);
+
/* fifo is 1-based since 0 disables CTM. */
- int fifo = to_vc4_crtc(crtc)->channel + 1;
+ int fifo = vc4_crtc_state->assigned_channel + 1;
/* Check userland isn't trying to turn on CTM for more
* than one CRTC at a time.
@@ -415,6 +559,9 @@ static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
struct drm_plane *plane;
int i;
+ if (!vc4->load_tracker_available)
+ return 0;
+
priv_state = drm_atomic_get_private_obj_state(state,
&vc4->load_tracker);
if (IS_ERR(priv_state))
@@ -485,10 +632,115 @@ static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
.atomic_destroy_state = vc4_load_tracker_destroy_state,
};
+static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ if (!vc4->load_tracker_available)
+ return;
+
+ drm_atomic_private_obj_fini(&vc4->load_tracker);
+}
+
+static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
+{
+ struct vc4_load_tracker_state *load_state;
+
+ if (!vc4->load_tracker_available)
+ return 0;
+
+ load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
+ if (!load_state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
+ &load_state->base,
+ &vc4_load_tracker_state_funcs);
+
+ return drmm_add_action(&vc4->base, vc4_load_tracker_obj_fini, NULL);
+}
+
+#define NUM_OUTPUTS 6
+#define NUM_CHANNELS 3
+
static int
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
- int ret;
+ unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0);
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_crtc *crtc;
+ int i, ret;
+
+ /*
+ * Since the HVS FIFOs are shared across all the pixelvalves and
+ * the TXP (and thus all the CRTCs), we need to pull the current
+ * state of all the enabled CRTCs so that an update to a single
+ * CRTC still keeps the previous FIFOs enabled and assigned to
+ * the same CRTCs, instead of evaluating only the CRTC being
+ * modified.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_crtc_state *crtc_state;
+
+ if (!crtc->state->enable)
+ continue;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct vc4_crtc_state *new_vc4_crtc_state =
+ to_vc4_crtc_state(new_crtc_state);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ unsigned int matching_channels;
+
+ if (old_crtc_state->enable && !new_crtc_state->enable)
+ new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
+
+ if (!new_crtc_state->enable)
+ continue;
+
+ if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) {
+ unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel);
+ continue;
+ }
+
+ /*
+ * The problem we have to solve here is that we have
+ * up to 7 encoders, connected to up to 6 CRTCs.
+ *
+ * Those CRTCs, depending on the instance, can be
+ * routed to 1, 2 or 3 HVS FIFOs, and we need to set
+ * the change the muxing between FIFOs and outputs in
+ * the HVS accordingly.
+ *
+ * It would be pretty hard to come up with an
+ * algorithm that would generically solve
+ * this. However, the current routing trees we support
+ * allow us to simplify a bit the problem.
+ *
+ * Indeed, with the current supported layouts, if we
+ * try to assign in the ascending crtc index order the
+ * FIFOs, we can't fall into the situation where an
+ * earlier CRTC that had multiple routes is assigned
+ * one that was the only option for a later CRTC.
+ *
+ * If the layout changes and doesn't give us that in
+ * the future, we will need to have something smarter,
+ * but it works so far.
+ */
+ matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
+ if (matching_channels) {
+ unsigned int channel = ffs(matching_channels) - 1;
+
+ new_vc4_crtc_state->assigned_channel = channel;
+ unassigned_channels &= ~BIT(channel);
+ } else {
+ return -EINVAL;
+ }
+ }
ret = vc4_ctm_atomic_check(dev, state);
if (ret < 0)
@@ -510,14 +762,18 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = {
int vc4_kms_load(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_ctm_state *ctm_state;
- struct vc4_load_tracker_state *load_state;
+ bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
+ "brcm,bcm2711-vc5");
int ret;
- /* Start with the load tracker enabled. Can be disabled through the
- * debugfs load_tracker file.
- */
- vc4->load_tracker_enabled = true;
+ if (!is_vc5) {
+ vc4->load_tracker_available = true;
+
+ /* Start with the load tracker enabled. Can be
+ * disabled through the debugfs load_tracker file.
+ */
+ vc4->load_tracker_enabled = true;
+ }
sema_init(&vc4->async_modeset, 1);
@@ -531,30 +787,26 @@ int vc4_kms_load(struct drm_device *dev)
return ret;
}
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ if (is_vc5) {
+ dev->mode_config.max_width = 7680;
+ dev->mode_config.max_height = 7680;
+ } else {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ }
+
dev->mode_config.funcs = &vc4_mode_funcs;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
dev->mode_config.allow_fb_modifiers = true;
- drm_modeset_lock_init(&vc4->ctm_state_lock);
-
- ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
- if (!ctm_state)
- return -ENOMEM;
-
- drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
- &vc4_ctm_state_funcs);
-
- load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
- if (!load_state) {
- drm_atomic_private_obj_fini(&vc4->ctm_manager);
- return -ENOMEM;
- }
+ ret = vc4_ctm_obj_init(vc4);
+ if (ret)
+ return ret;
- drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base,
- &vc4_load_tracker_state_funcs);
+ ret = vc4_load_tracker_obj_init(vc4);
+ if (ret)
+ return ret;
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index d040d9f12c6d..6b39cc2ca18d 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -32,45 +32,60 @@ static const struct hvs_format {
u32 drm; /* DRM_FORMAT_* */
u32 hvs; /* HVS_FORMAT_* */
u32 pixel_order;
+ u32 pixel_order_hvs5;
} hvs_formats[] = {
{
- .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .drm = DRM_FORMAT_XRGB8888,
+ .hvs = HVS_PIXEL_FORMAT_RGBA8888,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
- .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .drm = DRM_FORMAT_ARGB8888,
+ .hvs = HVS_PIXEL_FORMAT_RGBA8888,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
- .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .drm = DRM_FORMAT_ABGR8888,
+ .hvs = HVS_PIXEL_FORMAT_RGBA8888,
.pixel_order = HVS_PIXEL_ORDER_ARGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
},
{
- .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .drm = DRM_FORMAT_XBGR8888,
+ .hvs = HVS_PIXEL_FORMAT_RGBA8888,
.pixel_order = HVS_PIXEL_ORDER_ARGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
},
{
- .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
+ .drm = DRM_FORMAT_RGB565,
+ .hvs = HVS_PIXEL_FORMAT_RGB565,
.pixel_order = HVS_PIXEL_ORDER_XRGB,
},
{
- .drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565,
+ .drm = DRM_FORMAT_BGR565,
+ .hvs = HVS_PIXEL_FORMAT_RGB565,
.pixel_order = HVS_PIXEL_ORDER_XBGR,
},
{
- .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
+ .drm = DRM_FORMAT_ARGB1555,
+ .hvs = HVS_PIXEL_FORMAT_RGBA5551,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
},
{
- .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551,
+ .drm = DRM_FORMAT_XRGB1555,
+ .hvs = HVS_PIXEL_FORMAT_RGBA5551,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
},
{
- .drm = DRM_FORMAT_RGB888, .hvs = HVS_PIXEL_FORMAT_RGB888,
+ .drm = DRM_FORMAT_RGB888,
+ .hvs = HVS_PIXEL_FORMAT_RGB888,
.pixel_order = HVS_PIXEL_ORDER_XRGB,
},
{
- .drm = DRM_FORMAT_BGR888, .hvs = HVS_PIXEL_FORMAT_RGB888,
+ .drm = DRM_FORMAT_BGR888,
+ .hvs = HVS_PIXEL_FORMAT_RGB888,
.pixel_order = HVS_PIXEL_ORDER_XBGR,
},
{
@@ -422,10 +437,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
static u32 vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
- /* This is the worst case number. One of the two sizes will
- * be used depending on the scaling configuration.
- */
- u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w);
+ u32 pix_per_line;
u32 lbm;
/* LBM is not needed when there's no vertical scaling. */
@@ -433,6 +445,18 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
vc4_state->y_scaling[1] == VC4_SCALING_NONE)
return 0;
+ /*
+ * This can be further optimized in the RGB/YUV444 case if the PPF
+ * decimation factor is between 0.5 and 1.0 by using crtc_w.
+ *
+ * It's not an issue though, since in that case since src_w[0] is going
+ * to be greater than or equal to crtc_w.
+ */
+ if (vc4_state->x_scaling[0] == VC4_SCALING_TPZ)
+ pix_per_line = vc4_state->crtc_w;
+ else
+ pix_per_line = vc4_state->src_w[0];
+
if (!vc4_state->is_yuv) {
if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
lbm = pix_per_line * 8;
@@ -492,6 +516,11 @@ static void vc4_plane_calc_load(struct drm_plane_state *state)
struct vc4_plane_state *vc4_state;
struct drm_crtc_state *crtc_state;
unsigned int vscale_factor;
+ struct vc4_dev *vc4;
+
+ vc4 = to_vc4_dev(state->plane->dev);
+ if (!vc4->load_tracker_available)
+ return;
vc4_state = to_vc4_plane_state(state);
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
@@ -563,7 +592,9 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
&vc4_state->lbm,
- lbm_size, 32, 0, 0);
+ lbm_size,
+ vc4->hvs->hvs5 ? 64 : 32,
+ 0, 0);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
if (ret)
@@ -776,35 +807,6 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
return -EINVAL;
}
- /* Control word */
- vc4_dlist_write(vc4_state,
- SCALER_CTL0_VALID |
- (rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) |
- (rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) |
- VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
- (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
- (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
- VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
- (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
- VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
- VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1));
-
- /* Position Word 0: Image Positions and Alpha Value */
- vc4_state->pos0_offset = vc4_state->dlist_count;
- vc4_dlist_write(vc4_state,
- VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) |
- VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
- VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
-
- /* Position Word 1: Scaled Image Dimensions. */
- if (!vc4_state->is_unity) {
- vc4_dlist_write(vc4_state,
- VC4_SET_FIELD(vc4_state->crtc_w,
- SCALER_POS1_SCL_WIDTH) |
- VC4_SET_FIELD(vc4_state->crtc_h,
- SCALER_POS1_SCL_HEIGHT));
- }
-
/* Don't waste cycles mixing with plane alpha if the set alpha
* is opaque or there is no per-pixel alpha information.
* In any case we use the alpha property value as the fixed alpha.
@@ -812,20 +814,120 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
fb->format->has_alpha;
- /* Position Word 2: Source Image Size, Alpha */
- vc4_state->pos2_offset = vc4_state->dlist_count;
- vc4_dlist_write(vc4_state,
- VC4_SET_FIELD(fb->format->has_alpha ?
- SCALER_POS2_ALPHA_MODE_PIPELINE :
- SCALER_POS2_ALPHA_MODE_FIXED,
- SCALER_POS2_ALPHA_MODE) |
- (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) |
- (fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) |
- VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) |
- VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT));
+ if (!vc4->hvs->hvs5) {
+ /* Control word */
+ vc4_dlist_write(vc4_state,
+ SCALER_CTL0_VALID |
+ (rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) |
+ (rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) |
+ VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
+ (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
+ (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
+ VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
+ (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
+ VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
+ VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1));
+
+ /* Position Word 0: Image Positions and Alpha Value */
+ vc4_state->pos0_offset = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) |
+ VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
+ VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
+
+ /* Position Word 1: Scaled Image Dimensions. */
+ if (!vc4_state->is_unity) {
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(vc4_state->crtc_w,
+ SCALER_POS1_SCL_WIDTH) |
+ VC4_SET_FIELD(vc4_state->crtc_h,
+ SCALER_POS1_SCL_HEIGHT));
+ }
+
+ /* Position Word 2: Source Image Size, Alpha */
+ vc4_state->pos2_offset = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(fb->format->has_alpha ?
+ SCALER_POS2_ALPHA_MODE_PIPELINE :
+ SCALER_POS2_ALPHA_MODE_FIXED,
+ SCALER_POS2_ALPHA_MODE) |
+ (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) |
+ (fb->format->has_alpha ?
+ SCALER_POS2_ALPHA_PREMULT : 0) |
+ VC4_SET_FIELD(vc4_state->src_w[0],
+ SCALER_POS2_WIDTH) |
+ VC4_SET_FIELD(vc4_state->src_h[0],
+ SCALER_POS2_HEIGHT));
+
+ /* Position Word 3: Context. Written by the HVS. */
+ vc4_dlist_write(vc4_state, 0xc0c0c0c0);
- /* Position Word 3: Context. Written by the HVS. */
- vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+ } else {
+ u32 hvs_pixel_order = format->pixel_order;
+
+ if (format->pixel_order_hvs5)
+ hvs_pixel_order = format->pixel_order_hvs5;
+
+ /* Control word */
+ vc4_dlist_write(vc4_state,
+ SCALER_CTL0_VALID |
+ (hvs_pixel_order << SCALER_CTL0_ORDER_SHIFT) |
+ (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
+ VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
+ (vc4_state->is_unity ?
+ SCALER5_CTL0_UNITY : 0) |
+ VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
+ VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1) |
+ SCALER5_CTL0_ALPHA_EXPAND |
+ SCALER5_CTL0_RGB_EXPAND);
+
+ /* Position Word 0: Image Positions and Alpha Value */
+ vc4_state->pos0_offset = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ (rotation & DRM_MODE_REFLECT_Y ?
+ SCALER5_POS0_VFLIP : 0) |
+ VC4_SET_FIELD(vc4_state->crtc_x,
+ SCALER_POS0_START_X) |
+ (rotation & DRM_MODE_REFLECT_X ?
+ SCALER5_POS0_HFLIP : 0) |
+ VC4_SET_FIELD(vc4_state->crtc_y,
+ SCALER5_POS0_START_Y)
+ );
+
+ /* Control Word 2 */
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(state->alpha >> 4,
+ SCALER5_CTL2_ALPHA) |
+ (fb->format->has_alpha ?
+ SCALER5_CTL2_ALPHA_PREMULT : 0) |
+ (mix_plane_alpha ?
+ SCALER5_CTL2_ALPHA_MIX : 0) |
+ VC4_SET_FIELD(fb->format->has_alpha ?
+ SCALER5_CTL2_ALPHA_MODE_PIPELINE :
+ SCALER5_CTL2_ALPHA_MODE_FIXED,
+ SCALER5_CTL2_ALPHA_MODE)
+ );
+
+ /* Position Word 1: Scaled Image Dimensions. */
+ if (!vc4_state->is_unity) {
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(vc4_state->crtc_w,
+ SCALER_POS1_SCL_WIDTH) |
+ VC4_SET_FIELD(vc4_state->crtc_h,
+ SCALER_POS1_SCL_HEIGHT));
+ }
+
+ /* Position Word 2: Source Image Size */
+ vc4_state->pos2_offset = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(vc4_state->src_w[0],
+ SCALER5_POS2_WIDTH) |
+ VC4_SET_FIELD(vc4_state->src_h[0],
+ SCALER5_POS2_HEIGHT));
+
+ /* Position Word 3: Context. Written by the HVS. */
+ vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+ }
/* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers
@@ -1203,6 +1305,10 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
default:
return false;
}
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
@@ -1255,6 +1361,8 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
&vc4_plane_funcs,
formats, ARRAY_SIZE(formats),
modifiers, type, NULL);
+ if (ret)
+ return ERR_PTR(ret);
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
@@ -1283,7 +1391,7 @@ int vc4_plane_create_additional_planes(struct drm_device *drm)
* modest number of planes to expose, that should hopefully
* still cover any sane usecase.
*/
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 16; i++) {
struct drm_plane *plane =
vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 324462cc9cd4..be2c32a519b3 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -129,6 +129,8 @@
#define V3D_ERRSTAT 0x00f20
#define PV_CONTROL 0x00
+# define PV5_CONTROL_FIFO_LEVEL_HIGH_MASK VC4_MASK(26, 25)
+# define PV5_CONTROL_FIFO_LEVEL_HIGH_SHIFT 25
# define PV_CONTROL_FORMAT_MASK VC4_MASK(23, 21)
# define PV_CONTROL_FORMAT_SHIFT 21
# define PV_CONTROL_FORMAT_24 0
@@ -208,6 +210,11 @@
#define PV_HACT_ACT 0x30
+#define PV_MUX_CFG 0x34
+# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_MASK VC4_MASK(5, 2)
+# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_SHIFT 2
+# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP 8
+
#define SCALER_CHANNELS_COUNT 3
#define SCALER_DISPCTRL 0x00000000
@@ -286,9 +293,19 @@
#define SCALER_DISPID 0x00000008
#define SCALER_DISPECTRL 0x0000000c
+# define SCALER_DISPECTRL_DSP2_MUX_SHIFT 31
+# define SCALER_DISPECTRL_DSP2_MUX_MASK VC4_MASK(31, 31)
+
#define SCALER_DISPPROF 0x00000010
+
#define SCALER_DISPDITHER 0x00000014
+# define SCALER_DISPDITHER_DSP5_MUX_SHIFT 30
+# define SCALER_DISPDITHER_DSP5_MUX_MASK VC4_MASK(31, 30)
+
#define SCALER_DISPEOLN 0x00000018
+# define SCALER_DISPEOLN_DSP4_MUX_SHIFT 30
+# define SCALER_DISPEOLN_DSP4_MUX_MASK VC4_MASK(31, 30)
+
#define SCALER_DISPLIST0 0x00000020
#define SCALER_DISPLIST1 0x00000024
#define SCALER_DISPLIST2 0x00000028
@@ -327,6 +344,20 @@
# define SCALER_DISPCTRLX_HEIGHT_MASK VC4_MASK(11, 0)
# define SCALER_DISPCTRLX_HEIGHT_SHIFT 0
+# define SCALER5_DISPCTRLX_WIDTH_MASK VC4_MASK(28, 16)
+# define SCALER5_DISPCTRLX_WIDTH_SHIFT 16
+/* Generates a single frame when VSTART is seen and stops at the last
+ * pixel read from the FIFO.
+ */
+# define SCALER5_DISPCTRLX_ONESHOT BIT(15)
+/* Processes a single context in the dlist and then task switch,
+ * instead of an entire line.
+ */
+# define SCALER5_DISPCTRLX_ONECTX_MASK VC4_MASK(14, 13)
+# define SCALER5_DISPCTRLX_ONECTX_SHIFT 13
+# define SCALER5_DISPCTRLX_HEIGHT_MASK VC4_MASK(12, 0)
+# define SCALER5_DISPCTRLX_HEIGHT_SHIFT 0
+
#define SCALER_DISPBKGND0 0x00000044
# define SCALER_DISPBKGND_AUTOHS BIT(31)
# define SCALER_DISPBKGND_INTERLACE BIT(30)
@@ -460,32 +491,18 @@
#define SCALER_DLIST_START 0x00002000
#define SCALER_DLIST_SIZE 0x00004000
-#define VC4_HDMI_CORE_REV 0x000
+#define SCALER5_DLIST_START 0x00004000
-#define VC4_HDMI_SW_RESET_CONTROL 0x004
# define VC4_HDMI_SW_RESET_FORMAT_DETECT BIT(1)
# define VC4_HDMI_SW_RESET_HDMI BIT(0)
-#define VC4_HDMI_HOTPLUG_INT 0x008
-
-#define VC4_HDMI_HOTPLUG 0x00c
# define VC4_HDMI_HOTPLUG_CONNECTED BIT(0)
-/* 3 bits per field, where each field maps from that corresponding MAI
- * bus channel to the given HDMI channel.
- */
-#define VC4_HDMI_MAI_CHANNEL_MAP 0x090
-
-#define VC4_HDMI_MAI_CONFIG 0x094
# define VC4_HDMI_MAI_CONFIG_FORMAT_REVERSE BIT(27)
# define VC4_HDMI_MAI_CONFIG_BIT_REVERSE BIT(26)
# define VC4_HDMI_MAI_CHANNEL_MASK_MASK VC4_MASK(15, 0)
# define VC4_HDMI_MAI_CHANNEL_MASK_SHIFT 0
-/* Last received format word on the MAI bus. */
-#define VC4_HDMI_MAI_FORMAT 0x098
-
-#define VC4_HDMI_AUDIO_PACKET_CONFIG 0x09c
# define VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_SAMPLE_FLAT BIT(29)
# define VC4_HDMI_AUDIO_PACKET_ZERO_DATA_ON_INACTIVE_CHANNELS BIT(24)
# define VC4_HDMI_AUDIO_PACKET_FORCE_SAMPLE_PRESENT BIT(19)
@@ -499,12 +516,8 @@
# define VC4_HDMI_AUDIO_PACKET_CEA_MASK_MASK VC4_MASK(7, 0)
# define VC4_HDMI_AUDIO_PACKET_CEA_MASK_SHIFT 0
-#define VC4_HDMI_RAM_PACKET_CONFIG 0x0a0
# define VC4_HDMI_RAM_PACKET_ENABLE BIT(16)
-#define VC4_HDMI_RAM_PACKET_STATUS 0x0a4
-
-#define VC4_HDMI_CRP_CFG 0x0a8
/* When set, the CTS_PERIOD counts based on MAI bus sync pulse instead
* of pixel clock.
*/
@@ -518,23 +531,12 @@
# define VC4_HDMI_CRP_CFG_N_MASK VC4_MASK(19, 0)
# define VC4_HDMI_CRP_CFG_N_SHIFT 0
-/* 20-bit fields containing CTS values to be transmitted if !EXTERNAL_CTS_EN */
-#define VC4_HDMI_CTS_0 0x0ac
-#define VC4_HDMI_CTS_1 0x0b0
-/* 20-bit fields containing number of clocks to send CTS0/1 before
- * switching to the other one.
- */
-#define VC4_HDMI_CTS_PERIOD_0 0x0b4
-#define VC4_HDMI_CTS_PERIOD_1 0x0b8
-
-#define VC4_HDMI_HORZA 0x0c4
# define VC4_HDMI_HORZA_VPOS BIT(14)
# define VC4_HDMI_HORZA_HPOS BIT(13)
/* Horizontal active pixels (hdisplay). */
# define VC4_HDMI_HORZA_HAP_MASK VC4_MASK(12, 0)
# define VC4_HDMI_HORZA_HAP_SHIFT 0
-#define VC4_HDMI_HORZB 0x0c8
/* Horizontal pack porch (htotal - hsync_end). */
# define VC4_HDMI_HORZB_HBP_MASK VC4_MASK(29, 20)
# define VC4_HDMI_HORZB_HBP_SHIFT 20
@@ -545,7 +547,6 @@
# define VC4_HDMI_HORZB_HFP_MASK VC4_MASK(9, 0)
# define VC4_HDMI_HORZB_HFP_SHIFT 0
-#define VC4_HDMI_FIFO_CTL 0x05c
# define VC4_HDMI_FIFO_CTL_RECENTER_DONE BIT(14)
# define VC4_HDMI_FIFO_CTL_USE_EMPTY BIT(13)
# define VC4_HDMI_FIFO_CTL_ON_VB BIT(7)
@@ -558,15 +559,12 @@
# define VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N BIT(0)
# define VC4_HDMI_FIFO_VALID_WRITE_MASK 0xefff
-#define VC4_HDMI_SCHEDULER_CONTROL 0x0c0
# define VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT BIT(15)
# define VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS BIT(5)
# define VC4_HDMI_SCHEDULER_CONTROL_VERT_ALWAYS_KEEPOUT BIT(3)
# define VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE BIT(1)
# define VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI BIT(0)
-#define VC4_HDMI_VERTA0 0x0cc
-#define VC4_HDMI_VERTA1 0x0d4
/* Vertical sync pulse (vsync_end - vsync_start). */
# define VC4_HDMI_VERTA_VSP_MASK VC4_MASK(24, 20)
# define VC4_HDMI_VERTA_VSP_SHIFT 20
@@ -577,8 +575,6 @@
# define VC4_HDMI_VERTA_VAL_MASK VC4_MASK(12, 0)
# define VC4_HDMI_VERTA_VAL_SHIFT 0
-#define VC4_HDMI_VERTB0 0x0d0
-#define VC4_HDMI_VERTB1 0x0d8
/* Vertical sync pulse offset (for interlaced) */
# define VC4_HDMI_VERTB_VSPO_MASK VC4_MASK(21, 9)
# define VC4_HDMI_VERTB_VSPO_SHIFT 9
@@ -586,7 +582,6 @@
# define VC4_HDMI_VERTB_VBP_MASK VC4_MASK(8, 0)
# define VC4_HDMI_VERTB_VBP_SHIFT 0
-#define VC4_HDMI_CEC_CNTRL_1 0x0e8
/* Set when the transmission has ended. */
# define VC4_HDMI_CEC_TX_EOM BIT(31)
/* If set, transmission was acked on the 1st or 2nd attempt (only one
@@ -627,7 +622,6 @@
/* Set these fields to how many bit clock cycles get to that many
* microseconds.
*/
-#define VC4_HDMI_CEC_CNTRL_2 0x0ec
# define VC4_HDMI_CEC_CNT_TO_1500_US_MASK VC4_MASK(30, 24)
# define VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT 24
# define VC4_HDMI_CEC_CNT_TO_1300_US_MASK VC4_MASK(23, 17)
@@ -639,7 +633,6 @@
# define VC4_HDMI_CEC_CNT_TO_400_US_MASK VC4_MASK(4, 0)
# define VC4_HDMI_CEC_CNT_TO_400_US_SHIFT 0
-#define VC4_HDMI_CEC_CNTRL_3 0x0f0
# define VC4_HDMI_CEC_CNT_TO_2750_US_MASK VC4_MASK(31, 24)
# define VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT 24
# define VC4_HDMI_CEC_CNT_TO_2400_US_MASK VC4_MASK(23, 16)
@@ -649,7 +642,6 @@
# define VC4_HDMI_CEC_CNT_TO_1700_US_MASK VC4_MASK(7, 0)
# define VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT 0
-#define VC4_HDMI_CEC_CNTRL_4 0x0f4
# define VC4_HDMI_CEC_CNT_TO_4300_US_MASK VC4_MASK(31, 24)
# define VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT 24
# define VC4_HDMI_CEC_CNT_TO_3900_US_MASK VC4_MASK(23, 16)
@@ -659,7 +651,6 @@
# define VC4_HDMI_CEC_CNT_TO_3500_US_MASK VC4_MASK(7, 0)
# define VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT 0
-#define VC4_HDMI_CEC_CNTRL_5 0x0f8
# define VC4_HDMI_CEC_TX_SW_RESET BIT(27)
# define VC4_HDMI_CEC_RX_SW_RESET BIT(26)
# define VC4_HDMI_CEC_PAD_SW_RESET BIT(25)
@@ -672,39 +663,11 @@
# define VC4_HDMI_CEC_CNT_TO_4500_US_MASK VC4_MASK(7, 0)
# define VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT 0
-/* Transmit data, first byte is low byte of the 32-bit reg. MSB of
- * each byte transmitted first.
- */
-#define VC4_HDMI_CEC_TX_DATA_1 0x0fc
-#define VC4_HDMI_CEC_TX_DATA_2 0x100
-#define VC4_HDMI_CEC_TX_DATA_3 0x104
-#define VC4_HDMI_CEC_TX_DATA_4 0x108
-#define VC4_HDMI_CEC_RX_DATA_1 0x10c
-#define VC4_HDMI_CEC_RX_DATA_2 0x110
-#define VC4_HDMI_CEC_RX_DATA_3 0x114
-#define VC4_HDMI_CEC_RX_DATA_4 0x118
-
-#define VC4_HDMI_TX_PHY_RESET_CTL 0x2c0
-
-#define VC4_HDMI_TX_PHY_CTL0 0x2c4
# define VC4_HDMI_TX_PHY_RNG_PWRDN BIT(25)
-/* Interrupt status bits */
-#define VC4_HDMI_CPU_STATUS 0x340
-#define VC4_HDMI_CPU_SET 0x344
-#define VC4_HDMI_CPU_CLEAR 0x348
# define VC4_HDMI_CPU_CEC BIT(6)
# define VC4_HDMI_CPU_HOTPLUG BIT(0)
-#define VC4_HDMI_CPU_MASK_STATUS 0x34c
-#define VC4_HDMI_CPU_MASK_SET 0x350
-#define VC4_HDMI_CPU_MASK_CLEAR 0x354
-
-#define VC4_HDMI_GCP(x) (0x400 + ((x) * 0x4))
-#define VC4_HDMI_RAM_PACKET(x) (0x400 + ((x) * 0x24))
-#define VC4_HDMI_PACKET_STRIDE 0x24
-
-#define VC4_HD_M_CTL 0x00c
/* Debug: Current receive value on the CEC pad. */
# define VC4_HD_CECRXD BIT(9)
/* Debug: Override CEC output to 0. */
@@ -714,7 +677,6 @@
# define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0)
-#define VC4_HD_MAI_CTL 0x014
/* Set when audio stream is received at a slower rate than the
* sampling period, so MAI fifo goes empty. Write 1 to clear.
*/
@@ -739,7 +701,6 @@
/* Single-shot reset bit. Read value is undefined. */
# define VC4_HD_MAI_CTL_RESET BIT(0)
-#define VC4_HD_MAI_THR 0x018
# define VC4_HD_MAI_THR_PANICHIGH_MASK VC4_MASK(29, 24)
# define VC4_HD_MAI_THR_PANICHIGH_SHIFT 24
# define VC4_HD_MAI_THR_PANICLOW_MASK VC4_MASK(21, 16)
@@ -749,31 +710,23 @@
# define VC4_HD_MAI_THR_DREQLOW_MASK VC4_MASK(5, 0)
# define VC4_HD_MAI_THR_DREQLOW_SHIFT 0
-/* Format header to be placed on the MAI data. Unused. */
-#define VC4_HD_MAI_FMT 0x01c
-
-/* Register for DMAing in audio data to be transported over the MAI
- * bus to the Falcon core.
- */
-#define VC4_HD_MAI_DATA 0x020
-
/* Divider from HDMI HSM clock to MAI serial clock. Sampling period
* converges to N / (M + 1) cycles.
*/
-#define VC4_HD_MAI_SMP 0x02c
# define VC4_HD_MAI_SMP_N_MASK VC4_MASK(31, 8)
# define VC4_HD_MAI_SMP_N_SHIFT 8
# define VC4_HD_MAI_SMP_M_MASK VC4_MASK(7, 0)
# define VC4_HD_MAI_SMP_M_SHIFT 0
-#define VC4_HD_VID_CTL 0x038
# define VC4_HD_VID_CTL_ENABLE BIT(31)
# define VC4_HD_VID_CTL_UNDERFLOW_ENABLE BIT(30)
# define VC4_HD_VID_CTL_FRAME_COUNTER_RESET BIT(29)
# define VC4_HD_VID_CTL_VSYNC_LOW BIT(28)
# define VC4_HD_VID_CTL_HSYNC_LOW BIT(27)
+# define VC4_HD_VID_CTL_CLRSYNC BIT(24)
+# define VC4_HD_VID_CTL_CLRRGB BIT(23)
+# define VC4_HD_VID_CTL_BLANKPIX BIT(18)
-#define VC4_HD_CSC_CTL 0x040
# define VC4_HD_CSC_CTL_ORDER_MASK VC4_MASK(7, 5)
# define VC4_HD_CSC_CTL_ORDER_SHIFT 5
# define VC4_HD_CSC_CTL_ORDER_RGB 0
@@ -791,14 +744,7 @@
# define VC4_HD_CSC_CTL_RGB2YCC BIT(1)
# define VC4_HD_CSC_CTL_ENABLE BIT(0)
-#define VC4_HD_CSC_12_11 0x044
-#define VC4_HD_CSC_14_13 0x048
-#define VC4_HD_CSC_22_21 0x04c
-#define VC4_HD_CSC_24_23 0x050
-#define VC4_HD_CSC_32_31 0x054
-#define VC4_HD_CSC_34_33 0x058
-
-#define VC4_HD_FRAME_COUNT 0x068
+# define VC4_DVP_HT_CLOCK_STOP_PIXEL BIT(1)
/* HVS display list information. */
#define HVS_BOOTLOADER_DLIST_END 32
@@ -825,6 +771,8 @@ enum hvs_pixel_format {
HVS_PIXEL_FORMAT_PALETTE = 13,
HVS_PIXEL_FORMAT_YUV444_RGB = 14,
HVS_PIXEL_FORMAT_AYUV444_RGB = 15,
+ HVS_PIXEL_FORMAT_RGBA1010102 = 16,
+ HVS_PIXEL_FORMAT_YCBCR_10BIT = 17,
};
/* Note: the LSB is the rightmost character shown. Only valid for
@@ -879,6 +827,10 @@ enum hvs_pixel_format {
#define SCALER_CTL0_RGBA_EXPAND_MSB 2
#define SCALER_CTL0_RGBA_EXPAND_ROUND 3
+#define SCALER5_CTL0_ALPHA_EXPAND BIT(12)
+
+#define SCALER5_CTL0_RGB_EXPAND BIT(11)
+
#define SCALER_CTL0_SCL1_MASK VC4_MASK(10, 8)
#define SCALER_CTL0_SCL1_SHIFT 8
@@ -896,10 +848,13 @@ enum hvs_pixel_format {
/* Set to indicate no scaling. */
#define SCALER_CTL0_UNITY BIT(4)
+#define SCALER5_CTL0_UNITY BIT(15)
#define SCALER_CTL0_PIXEL_FORMAT_MASK VC4_MASK(3, 0)
#define SCALER_CTL0_PIXEL_FORMAT_SHIFT 0
+#define SCALER5_CTL0_PIXEL_FORMAT_MASK VC4_MASK(4, 0)
+
#define SCALER_POS0_FIXED_ALPHA_MASK VC4_MASK(31, 24)
#define SCALER_POS0_FIXED_ALPHA_SHIFT 24
@@ -909,12 +864,48 @@ enum hvs_pixel_format {
#define SCALER_POS0_START_X_MASK VC4_MASK(11, 0)
#define SCALER_POS0_START_X_SHIFT 0
+#define SCALER5_POS0_START_Y_MASK VC4_MASK(27, 16)
+#define SCALER5_POS0_START_Y_SHIFT 16
+
+#define SCALER5_POS0_START_X_MASK VC4_MASK(13, 0)
+#define SCALER5_POS0_START_X_SHIFT 0
+
+#define SCALER5_POS0_VFLIP BIT(31)
+#define SCALER5_POS0_HFLIP BIT(15)
+
+#define SCALER5_CTL2_ALPHA_MODE_MASK VC4_MASK(31, 30)
+#define SCALER5_CTL2_ALPHA_MODE_SHIFT 30
+#define SCALER5_CTL2_ALPHA_MODE_PIPELINE 0
+#define SCALER5_CTL2_ALPHA_MODE_FIXED 1
+#define SCALER5_CTL2_ALPHA_MODE_FIXED_NONZERO 2
+#define SCALER5_CTL2_ALPHA_MODE_FIXED_OVER_0x07 3
+
+#define SCALER5_CTL2_ALPHA_PREMULT BIT(29)
+
+#define SCALER5_CTL2_ALPHA_MIX BIT(28)
+
+#define SCALER5_CTL2_ALPHA_LOC BIT(25)
+
+#define SCALER5_CTL2_MAP_SEL_MASK VC4_MASK(18, 17)
+#define SCALER5_CTL2_MAP_SEL_SHIFT 17
+
+#define SCALER5_CTL2_GAMMA BIT(16)
+
+#define SCALER5_CTL2_ALPHA_MASK VC4_MASK(15, 4)
+#define SCALER5_CTL2_ALPHA_SHIFT 4
+
#define SCALER_POS1_SCL_HEIGHT_MASK VC4_MASK(27, 16)
#define SCALER_POS1_SCL_HEIGHT_SHIFT 16
#define SCALER_POS1_SCL_WIDTH_MASK VC4_MASK(11, 0)
#define SCALER_POS1_SCL_WIDTH_SHIFT 0
+#define SCALER5_POS1_SCL_HEIGHT_MASK VC4_MASK(28, 16)
+#define SCALER5_POS1_SCL_HEIGHT_SHIFT 16
+
+#define SCALER5_POS1_SCL_WIDTH_MASK VC4_MASK(12, 0)
+#define SCALER5_POS1_SCL_WIDTH_SHIFT 0
+
#define SCALER_POS2_ALPHA_MODE_MASK VC4_MASK(31, 30)
#define SCALER_POS2_ALPHA_MODE_SHIFT 30
#define SCALER_POS2_ALPHA_MODE_PIPELINE 0
@@ -930,6 +921,12 @@ enum hvs_pixel_format {
#define SCALER_POS2_WIDTH_MASK VC4_MASK(11, 0)
#define SCALER_POS2_WIDTH_SHIFT 0
+#define SCALER5_POS2_HEIGHT_MASK VC4_MASK(28, 16)
+#define SCALER5_POS2_HEIGHT_SHIFT 16
+
+#define SCALER5_POS2_WIDTH_MASK VC4_MASK(12, 0)
+#define SCALER5_POS2_WIDTH_SHIFT 0
+
/* Color Space Conversion words. Some values are S2.8 signed
* integers, except that the 2 integer bits map as {0x0: 0, 0x1: 1,
* 0x2: 2, 0x3: -1}
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index a7c3af0005a0..849dcafbfff1 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -436,7 +436,6 @@ static const struct drm_crtc_helper_funcs vc4_txp_crtc_helper_funcs = {
.atomic_flush = vc4_hvs_atomic_flush,
.atomic_enable = vc4_txp_atomic_enable,
.atomic_disable = vc4_txp_atomic_disable,
- .mode_set_nofb = vc4_hvs_mode_set_nofb,
};
static irqreturn_t vc4_txp_interrupt(int irq, void *data)
@@ -452,7 +451,8 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
}
static const struct vc4_crtc_data vc4_txp_crtc_data = {
- .hvs_channel = 2,
+ .hvs_available_channels = BIT(2),
+ .hvs_output = 2,
};
static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index f7ab979721b3..65d0dac69b0b 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -168,7 +168,7 @@ static void vc4_v3d_init_hw(struct drm_device *dev)
int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
{
- struct drm_device *dev = vc4->dev;
+ struct drm_device *dev = &vc4->base;
unsigned long irqflags;
int slot;
uint64_t seqno = 0;
@@ -246,7 +246,7 @@ static int bin_bo_alloc(struct vc4_dev *vc4)
INIT_LIST_HEAD(&list);
while (true) {
- struct vc4_bo *bo = vc4_bo_create(vc4->dev, size, true,
+ struct vc4_bo *bo = vc4_bo_create(&vc4->base, size, true,
VC4_BO_TYPE_BIN);
if (IS_ERR(bo)) {
@@ -361,7 +361,7 @@ static int vc4_v3d_runtime_suspend(struct device *dev)
struct vc4_v3d *v3d = dev_get_drvdata(dev);
struct vc4_dev *vc4 = v3d->vc4;
- vc4_irq_uninstall(vc4->dev);
+ vc4_irq_uninstall(&vc4->base);
clk_disable_unprepare(v3d->clk);
@@ -378,11 +378,11 @@ static int vc4_v3d_runtime_resume(struct device *dev)
if (ret != 0)
return ret;
- vc4_v3d_init_hw(vc4->dev);
+ vc4_v3d_init_hw(&vc4->base);
/* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
- enable_irq(vc4->dev->irq);
- vc4_irq_postinstall(vc4->dev);
+ enable_irq(vc4->base.irq);
+ vc4_irq_postinstall(&vc4->base);
return 0;
}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index a775feda1cc7..cb884c890065 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -321,7 +321,7 @@ static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
+ return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
}
static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
@@ -401,16 +401,8 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
return 0;
}
-static void vgem_release(struct drm_device *dev)
-{
- struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
-
- platform_device_unregister(vgem->platform);
-}
-
static struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
- .release = vgem_release,
.open = vgem_open,
.postclose = vgem_postclose,
.gem_free_object_unlocked = vgem_gem_free_object,
@@ -442,48 +434,49 @@ static struct drm_driver vgem_driver = {
static int __init vgem_init(void)
{
int ret;
+ struct platform_device *pdev;
- vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL);
- if (!vgem_device)
- return -ENOMEM;
+ pdev = platform_device_register_simple("vgem", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
- vgem_device->platform =
- platform_device_register_simple("vgem", -1, NULL, 0);
- if (IS_ERR(vgem_device->platform)) {
- ret = PTR_ERR(vgem_device->platform);
- goto out_free;
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_unregister;
}
- dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
+ dma_coerce_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(64));
- ret = drm_dev_init(&vgem_device->drm, &vgem_driver,
- &vgem_device->platform->dev);
- if (ret)
- goto out_unregister;
- drmm_add_final_kfree(&vgem_device->drm, vgem_device);
+
+ vgem_device = devm_drm_dev_alloc(&pdev->dev, &vgem_driver,
+ struct vgem_device, drm);
+ if (IS_ERR(vgem_device)) {
+ ret = PTR_ERR(vgem_device);
+ goto out_devres;
+ }
+ vgem_device->platform = pdev;
/* Final step: expose the device/driver to userspace */
ret = drm_dev_register(&vgem_device->drm, 0);
if (ret)
- goto out_put;
+ goto out_devres;
return 0;
-out_put:
- drm_dev_put(&vgem_device->drm);
- return ret;
-
+out_devres:
+ devres_release_group(&pdev->dev, NULL);
out_unregister:
- platform_device_unregister(vgem_device->platform);
-out_free:
- kfree(vgem_device);
+ platform_device_unregister(pdev);
return ret;
}
static void __exit vgem_exit(void)
{
+ struct platform_device *pdev = vgem_device->platform;
+
drm_dev_unregister(&vgem_device->drm);
- drm_dev_put(&vgem_device->drm);
+ devres_release_group(&pdev->dev, NULL);
+ platform_device_unregister(pdev);
}
module_init(vgem_init);
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index eff3047052d4..b925b8b1da16 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
- depends on DRM && VIRTIO && MMU
+ depends on DRM && VIRTIO && VIRTIO_MENU && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
+ select VIRTIO_DMA_SHARED_BUFFER
help
This is the virtual GPU driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen).
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 3221520f61f0..d5b0c543bd6d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -48,6 +48,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_add_bool(m, "edid", vgdev->has_edid);
virtio_add_bool(m, "indirect", vgdev->has_indirect);
+ virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid);
virtio_add_int(m, "cap sets", vgdev->num_capsets);
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index afd0f9200f90..f84b7e61311b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -172,8 +172,6 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
if (width == 0 || height == 0) {
- width = XRES_DEF;
- height = YRES_DEF;
drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
} else {
DRM_DEBUG("add mode: %dx%d\n", width, height);
@@ -327,11 +325,14 @@ static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
+int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
{
- int i;
+ int i, ret;
+
+ ret = drmm_mode_config_init(vgdev->ddev);
+ if (ret)
+ return ret;
- drm_mode_config_init(vgdev->ddev);
vgdev->ddev->mode_config.quirk_addfb_prefer_host_byte_order = true;
vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
@@ -345,6 +346,7 @@ void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
vgdev_output_init(vgdev, i);
drm_mode_config_reset(vgdev->ddev);
+ return 0;
}
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
@@ -353,5 +355,4 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
- drm_mode_config_cleanup(vgdev->ddev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index ab4bed78e656..b039f493bda9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -165,6 +165,7 @@ static unsigned int features[] = {
VIRTIO_GPU_F_VIRGL,
#endif
VIRTIO_GPU_F_EDID,
+ VIRTIO_GPU_F_RESOURCE_UUID,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
@@ -202,6 +203,8 @@ static struct drm_driver driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_mmap = drm_gem_prime_mmap,
+ .gem_prime_export = virtgpu_gem_prime_export,
+ .gem_prime_import = virtgpu_gem_prime_import,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_create_object = virtio_gpu_create_object,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index fbc04272db4f..55c34b4fc3e9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -49,6 +49,10 @@
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
+#define UUID_INITIALIZING 0
+#define UUID_INITIALIZED 1
+#define UUID_INITIALIZATION_FAILED 2
+
struct virtio_gpu_object_params {
uint32_t format;
uint32_t width;
@@ -71,6 +75,9 @@ struct virtio_gpu_object {
uint32_t hw_res_handle;
bool dumb;
bool created;
+
+ int uuid_state;
+ uuid_t uuid;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, base.base)
@@ -200,6 +207,7 @@ struct virtio_gpu_device {
bool has_virgl_3d;
bool has_edid;
bool has_indirect;
+ bool has_resource_assign_uuid;
struct work_struct config_changed_work;
@@ -210,6 +218,9 @@ struct virtio_gpu_device {
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
struct list_head cap_cache;
+
+ /* protects resource state when exporting */
+ spinlock_t resource_export_lock;
};
struct virtio_gpu_fpriv {
@@ -336,8 +347,12 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work);
void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
+int
+virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs);
+
/* virtgpu_display.c */
-void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
+int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
/* virtgpu_plane.c */
@@ -367,6 +382,12 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
/* virtgpu_prime.c */
+struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
+ int flags);
+struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buf);
+int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj,
+ uuid_t *uuid);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 24ffacac99e4..c30c75ee83fc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -154,9 +154,8 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
{
struct virtio_gpu_object_array *objs;
- size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
- objs = kmalloc(size, GFP_KERNEL);
+ objs = kmalloc(struct_size(objs, objs, nents), GFP_KERNEL);
if (!objs)
return NULL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 4d944a0dff3e..eed57a931309 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -80,8 +80,10 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
vgdev->capsets[i].id > 0, 5 * HZ);
if (ret == 0) {
DRM_ERROR("timed out waiting for cap set %d\n", i);
+ spin_lock(&vgdev->display_info_lock);
kfree(vgdev->capsets);
vgdev->capsets = NULL;
+ spin_unlock(&vgdev->display_info_lock);
return;
}
DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
@@ -103,7 +105,7 @@ int virtio_gpu_init(struct drm_device *dev)
/* this will expand later */
struct virtqueue *vqs[2];
u32 num_scanouts, num_capsets;
- int ret;
+ int ret = 0;
if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
return -ENODEV;
@@ -118,6 +120,7 @@ int virtio_gpu_init(struct drm_device *dev)
vgdev->dev = dev->dev;
spin_lock_init(&vgdev->display_info_lock);
+ spin_lock_init(&vgdev->resource_export_lock);
ida_init(&vgdev->ctx_id_ida);
ida_init(&vgdev->resource_ida);
init_waitqueue_head(&vgdev->resp_wq);
@@ -146,6 +149,9 @@ int virtio_gpu_init(struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
vgdev->has_indirect = true;
}
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
+ vgdev->has_resource_assign_uuid = true;
+ }
DRM_INFO("features: %cvirgl %cedid\n",
vgdev->has_virgl_3d ? '+' : '-',
@@ -180,7 +186,11 @@ int virtio_gpu_init(struct drm_device *dev)
num_capsets, &num_capsets);
DRM_INFO("number of cap sets: %d\n", num_capsets);
- virtio_gpu_modeset_init(vgdev);
+ ret = virtio_gpu_modeset_init(vgdev);
+ if (ret) {
+ DRM_ERROR("modeset init failed\n");
+ goto err_scanouts;
+ }
virtio_device_ready(vgdev->vdev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 842f8b61aa89..00d6b95e259d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -72,9 +72,8 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
if (shmem->pages) {
if (shmem->mapped) {
- dma_unmap_sg(vgdev->vdev->dev.parent,
- shmem->pages->sgl, shmem->mapped,
- DMA_TO_DEVICE);
+ dma_unmap_sgtable(vgdev->vdev->dev.parent,
+ shmem->pages, DMA_TO_DEVICE, 0);
shmem->mapped = 0;
}
@@ -164,13 +163,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
}
if (use_dma_api) {
- shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
- shmem->pages->sgl,
- shmem->pages->nents,
- DMA_TO_DEVICE);
- *nents = shmem->mapped;
+ ret = dma_map_sgtable(vgdev->vdev->dev.parent,
+ shmem->pages, DMA_TO_DEVICE, 0);
+ if (ret)
+ return ret;
+ *nents = shmem->mapped = shmem->pages->nents;
} else {
- *nents = shmem->pages->nents;
+ *nents = shmem->pages->orig_nents;
}
*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
@@ -180,13 +179,20 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
return -ENOMEM;
}
- for_each_sg(shmem->pages->sgl, sg, *nents, si) {
- (*ents)[si].addr = cpu_to_le64(use_dma_api
- ? sg_dma_address(sg)
- : sg_phys(sg));
- (*ents)[si].length = cpu_to_le32(sg->length);
- (*ents)[si].padding = 0;
+ if (use_dma_api) {
+ for_each_sgtable_dma_sg(shmem->pages, sg, si) {
+ (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
+ (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
+ (*ents)[si].padding = 0;
+ }
+ } else {
+ for_each_sgtable_sg(shmem->pages, sg, si) {
+ (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
+ (*ents)[si].length = cpu_to_le32(sg->length);
+ (*ents)[si].padding = 0;
+ }
}
+
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 050d24c39a8f..acd14ef73d56 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -23,12 +23,102 @@
*/
#include <drm/drm_prime.h>
+#include <linux/virtio_dma_buf.h>
#include "virtgpu_drv.h"
-/* Empty Implementations as there should not be any other driver for a virtual
- * device that might share buffers with virtgpu
- */
+static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
+ uuid_t *uuid)
+{
+ struct drm_gem_object *obj = buf->priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+
+ wait_event(vgdev->resp_wq, bo->uuid_state != UUID_INITIALIZING);
+ if (bo->uuid_state != UUID_INITIALIZED)
+ return -ENODEV;
+
+ uuid_copy(uuid, &bo->uuid);
+
+ return 0;
+}
+
+const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
+ .ops = {
+ .cache_sgt_mapping = true,
+ .attach = virtio_dma_buf_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+ },
+ .device_attach = drm_gem_map_attach,
+ .get_uuid = virtgpu_virtio_get_uuid,
+};
+
+struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
+ int flags)
+{
+ struct dma_buf *buf;
+ struct drm_device *dev = obj->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_object_array *objs;
+ int ret = 0;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ if (vgdev->has_resource_assign_uuid) {
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return ERR_PTR(-ENOMEM);
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
+ if (ret)
+ return ERR_PTR(ret);
+ virtio_gpu_notify(vgdev);
+ } else {
+ bo->uuid_state = UUID_INITIALIZATION_FAILED;
+ }
+
+ exp_info.ops = &virtgpu_dmabuf_ops.ops;
+ exp_info.size = obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = obj;
+ exp_info.resv = obj->resv;
+
+ buf = virtio_dma_buf_export(&exp_info);
+ if (IS_ERR(buf))
+ return buf;
+
+ drm_dev_get(dev);
+ drm_gem_object_get(obj);
+
+ return buf;
+}
+
+struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buf)
+{
+ struct drm_gem_object *obj;
+
+ if (buf->ops == &virtgpu_dmabuf_ops.ops) {
+ obj = buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_get(obj);
+ return obj;
+ }
+ }
+
+ return drm_gem_prime_import(dev, buf);
+}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 53af60d484a4..07945ca238e2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -302,7 +302,7 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return NULL;
}
- for_each_sg(sgt->sgl, sg, *sg_ents, i) {
+ for_each_sgtable_sg(sgt, sg, i) {
pg = vmalloc_to_page(data);
if (!pg) {
sg_free_table(sgt);
@@ -320,13 +320,13 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return sgt;
}
-static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct virtio_gpu_fence *fence,
- int elemcnt,
- struct scatterlist **sgs,
- int outcnt,
- int incnt)
+static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_fence *fence,
+ int elemcnt,
+ struct scatterlist **sgs,
+ int outcnt,
+ int incnt)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
int ret, idx;
@@ -335,7 +335,7 @@ static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
free_vbuf(vgdev, vbuf);
- return;
+ return -1;
}
if (vgdev->has_indirect)
@@ -373,15 +373,16 @@ again:
spin_unlock(&vgdev->ctrlq.qlock);
drm_dev_exit(idx);
+ return 0;
}
-static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct virtio_gpu_fence *fence)
+static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_fence *fence)
{
struct scatterlist *sgs[3], vcmd, vout, vresp;
struct sg_table *sgt = NULL;
- int elemcnt = 0, outcnt = 0, incnt = 0;
+ int elemcnt = 0, outcnt = 0, incnt = 0, ret;
/* set up vcmd */
sg_init_one(&vcmd, vbuf->buf, vbuf->size);
@@ -398,7 +399,7 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (!sgt) {
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
- return;
+ return -1;
}
elemcnt += sg_ents;
@@ -419,13 +420,14 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
incnt++;
}
- virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
- incnt);
+ ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
+ incnt);
if (sgt) {
sg_free_table(sgt);
kfree(sgt);
}
+ return ret;
}
void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
@@ -444,10 +446,10 @@ void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
virtqueue_notify(vgdev->ctrlq.vq);
}
-static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
+static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
{
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
+ return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
}
static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
@@ -534,6 +536,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_resource_unref *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
+ int ret;
cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
virtio_gpu_cmd_unref_cb);
@@ -543,7 +546,9 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
vbuf->resp_cb_data = bo;
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ if (ret < 0)
+ virtio_gpu_cleanup_object(bo);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -603,9 +608,8 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
- dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- shmem->pages->sgl, shmem->pages->nents,
- DMA_TO_DEVICE);
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ shmem->pages, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -684,9 +688,13 @@ static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
int i = le32_to_cpu(cmd->capset_index);
spin_lock(&vgdev->display_info_lock);
- vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
- vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
- vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
+ if (vgdev->capsets) {
+ vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
+ vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
+ vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
+ } else {
+ DRM_ERROR("invalid capset memory.");
+ }
spin_unlock(&vgdev->display_info_lock);
wake_up(&vgdev->resp_wq);
}
@@ -1019,9 +1027,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
- dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- shmem->pages->sgl, shmem->pages->nents,
- DMA_TO_DEVICE);
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ shmem->pages, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1107,3 +1114,58 @@ void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
memcpy(cur_p, &output->cursor, sizeof(output->cursor));
virtio_gpu_queue_cursor(vgdev, vbuf);
}
+
+static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_object *obj =
+ gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
+ struct virtio_gpu_resp_resource_uuid *resp =
+ (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
+ uint32_t resp_type = le32_to_cpu(resp->hdr.type);
+
+ spin_lock(&vgdev->resource_export_lock);
+ WARN_ON(obj->uuid_state != UUID_INITIALIZING);
+
+ if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
+ obj->uuid_state == UUID_INITIALIZING) {
+ memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
+ obj->uuid_state = UUID_INITIALIZED;
+ } else {
+ obj->uuid_state = UUID_INITIALIZATION_FAILED;
+ }
+ spin_unlock(&vgdev->resource_export_lock);
+
+ wake_up_all(&vgdev->resp_wq);
+}
+
+int
+virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_resource_assign_uuid *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_resp_resource_uuid *resp_buf;
+
+ resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
+ if (!resp_buf) {
+ spin_lock(&vgdev->resource_export_lock);
+ bo->uuid_state = UUID_INITIALIZATION_FAILED;
+ spin_unlock(&vgdev->resource_export_lock);
+ virtio_gpu_array_put_free(objs);
+ return -ENOMEM;
+ }
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
+ sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+
+ vbuf->objs = objs;
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 0b767d7efa24..333d3cead0e3 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -1,4 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
-vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o vkms_composer.o
+vkms-y := \
+ vkms_drv.o \
+ vkms_plane.o \
+ vkms_output.o \
+ vkms_crtc.o \
+ vkms_gem.o \
+ vkms_composer.o \
+ vkms_writeback.o
obj-$(CONFIG_DRM_VKMS) += vkms.o
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 4af2f19480f4..33c031f27c2c 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -9,60 +9,92 @@
#include "vkms_drv.h"
+static u32 get_pixel_from_buffer(int x, int y, const u8 *buffer,
+ const struct vkms_composer *composer)
+{
+ u32 pixel;
+ int src_offset = composer->offset + (y * composer->pitch)
+ + (x * composer->cpp);
+
+ pixel = *(u32 *)&buffer[src_offset];
+
+ return pixel;
+}
+
/**
* compute_crc - Compute CRC value on output frame
*
- * @vaddr_out: address to final framebuffer
+ * @vaddr: address to final framebuffer
* @composer: framebuffer's metadata
*
* returns CRC value computed using crc32 on the visible portion of
* the final framebuffer at vaddr_out
*/
-static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
+static uint32_t compute_crc(const u8 *vaddr,
+ const struct vkms_composer *composer)
{
- int i, j, src_offset;
+ int x, y;
+ u32 crc = 0, pixel = 0;
int x_src = composer->src.x1 >> 16;
int y_src = composer->src.y1 >> 16;
int h_src = drm_rect_height(&composer->src) >> 16;
int w_src = drm_rect_width(&composer->src) >> 16;
- u32 crc = 0;
-
- for (i = y_src; i < y_src + h_src; ++i) {
- for (j = x_src; j < x_src + w_src; ++j) {
- src_offset = composer->offset
- + (i * composer->pitch)
- + (j * composer->cpp);
- /* XRGB format ignores Alpha channel */
- memset(vaddr_out + src_offset + 24, 0, 8);
- crc = crc32_le(crc, vaddr_out + src_offset,
- sizeof(u32));
+
+ for (y = y_src; y < y_src + h_src; ++y) {
+ for (x = x_src; x < x_src + w_src; ++x) {
+ pixel = get_pixel_from_buffer(x, y, vaddr, composer);
+ crc = crc32_le(crc, (void *)&pixel, sizeof(u32));
}
}
return crc;
}
+static u8 blend_channel(u8 src, u8 dst, u8 alpha)
+{
+ u32 pre_blend;
+ u8 new_color;
+
+ pre_blend = (src * 255 + dst * (255 - alpha));
+
+ /* Faster div by 255 */
+ new_color = ((pre_blend + ((pre_blend + 257) >> 8)) >> 8);
+
+ return new_color;
+}
+
+static void alpha_blending(const u8 *argb_src, u8 *argb_dst)
+{
+ u8 alpha;
+
+ alpha = argb_src[3];
+ argb_dst[0] = blend_channel(argb_src[0], argb_dst[0], alpha);
+ argb_dst[1] = blend_channel(argb_src[1], argb_dst[1], alpha);
+ argb_dst[2] = blend_channel(argb_src[2], argb_dst[2], alpha);
+ /* Opaque primary */
+ argb_dst[3] = 0xFF;
+}
+
/**
* blend - blend value at vaddr_src with value at vaddr_dst
* @vaddr_dst: destination address
* @vaddr_src: source address
- * @dest_composer: destination framebuffer's metadata
+ * @dst_composer: destination framebuffer's metadata
* @src_composer: source framebuffer's metadata
*
- * Blend value at vaddr_src with value at vaddr_dst.
- * Currently, this function write value of vaddr_src on value
- * at vaddr_dst using buffer's metadata to locate the new values
- * from vaddr_src and their destination at vaddr_dst.
- *
- * TODO: Use the alpha value to blend vaddr_src with vaddr_dst
- * instead of overwriting it.
+ * Blend the vaddr_src value with the vaddr_dst value using the pre-multiplied
+ * alpha blending equation, since DRM currently assumes that the pixel color
+ * values have already been pre-multiplied with the alpha channel values. See
+ * more drm_plane_create_blend_mode_property(). This function uses buffer's
+ * metadata to locate the new composite values at vaddr_dst.
*/
static void blend(void *vaddr_dst, void *vaddr_src,
- struct vkms_composer *dest_composer,
+ struct vkms_composer *dst_composer,
struct vkms_composer *src_composer)
{
int i, j, j_dst, i_dst;
int offset_src, offset_dst;
+ u8 *pixel_dst, *pixel_src;
int x_src = src_composer->src.x1 >> 16;
int y_src = src_composer->src.y1 >> 16;
@@ -77,15 +109,16 @@ static void blend(void *vaddr_dst, void *vaddr_src,
for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
- offset_dst = dest_composer->offset
- + (i_dst * dest_composer->pitch)
- + (j_dst++ * dest_composer->cpp);
+ offset_dst = dst_composer->offset
+ + (i_dst * dst_composer->pitch)
+ + (j_dst++ * dst_composer->cpp);
offset_src = src_composer->offset
+ (i * src_composer->pitch)
+ (j * src_composer->cpp);
- memcpy(vaddr_dst + offset_dst,
- vaddr_src + offset_src, sizeof(u32));
+ pixel_src = (u8 *)(vaddr_src + offset_src);
+ pixel_dst = (u8 *)(vaddr_dst + offset_dst);
+ alpha_blending(pixel_src, pixel_dst);
}
i_dst++;
}
@@ -108,35 +141,31 @@ static void compose_cursor(struct vkms_composer *cursor_composer,
primary_composer, cursor_composer);
}
-static uint32_t _vkms_get_crc(struct vkms_composer *primary_composer,
- struct vkms_composer *cursor_composer)
+static int compose_planes(void **vaddr_out,
+ struct vkms_composer *primary_composer,
+ struct vkms_composer *cursor_composer)
{
struct drm_framebuffer *fb = &primary_composer->fb;
struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
- void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
- u32 crc = 0;
- if (!vaddr_out) {
- DRM_ERROR("Failed to allocate memory for output frame.");
- return 0;
+ if (!*vaddr_out) {
+ *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
+ if (!*vaddr_out) {
+ DRM_ERROR("Cannot allocate memory for output frame.");
+ return -ENOMEM;
+ }
}
- if (WARN_ON(!vkms_obj->vaddr)) {
- kfree(vaddr_out);
- return crc;
- }
+ if (WARN_ON(!vkms_obj->vaddr))
+ return -EINVAL;
- memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
+ memcpy(*vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
if (cursor_composer)
- compose_cursor(cursor_composer, primary_composer, vaddr_out);
-
- crc = compute_crc(vaddr_out, primary_composer);
+ compose_cursor(cursor_composer, primary_composer, *vaddr_out);
- kfree(vaddr_out);
-
- return crc;
+ return 0;
}
/**
@@ -157,14 +186,17 @@ void vkms_composer_worker(struct work_struct *work)
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
struct vkms_composer *primary_composer = NULL;
struct vkms_composer *cursor_composer = NULL;
+ bool crc_pending, wb_pending;
+ void *vaddr_out = NULL;
u32 crc32 = 0;
u64 frame_start, frame_end;
- bool crc_pending;
+ int ret;
spin_lock_irq(&out->composer_lock);
frame_start = crtc_state->frame_start;
frame_end = crtc_state->frame_end;
crc_pending = crtc_state->crc_pending;
+ wb_pending = crtc_state->wb_pending;
crtc_state->frame_start = 0;
crtc_state->frame_end = 0;
crtc_state->crc_pending = false;
@@ -183,8 +215,29 @@ void vkms_composer_worker(struct work_struct *work)
if (crtc_state->num_active_planes == 2)
cursor_composer = crtc_state->active_planes[1]->composer;
- if (primary_composer)
- crc32 = _vkms_get_crc(primary_composer, cursor_composer);
+ if (!primary_composer)
+ return;
+
+ if (wb_pending)
+ vaddr_out = crtc_state->active_writeback;
+
+ ret = compose_planes(&vaddr_out, primary_composer, cursor_composer);
+ if (ret) {
+ if (ret == -EINVAL && !wb_pending)
+ kfree(vaddr_out);
+ return;
+ }
+
+ crc32 = compute_crc(vaddr_out, primary_composer);
+
+ if (wb_pending) {
+ drm_writeback_signal_completion(&out->wb_connector, 0);
+ spin_lock_irq(&out->composer_lock);
+ crtc_state->wb_pending = false;
+ spin_unlock_irq(&out->composer_lock);
+ } else {
+ kfree(vaddr_out);
+ }
/*
* The worker can fall behind the vblank hrtimer, make sure we catch up.
@@ -233,6 +286,22 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
return 0;
}
+void vkms_set_composer(struct vkms_output *out, bool enabled)
+{
+ bool old_enabled;
+
+ if (enabled)
+ drm_crtc_vblank_get(&out->crtc);
+
+ spin_lock_irq(&out->lock);
+ old_enabled = out->composer_enabled;
+ out->composer_enabled = enabled;
+ spin_unlock_irq(&out->lock);
+
+ if (old_enabled)
+ drm_crtc_vblank_put(&out->crtc);
+}
+
int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
{
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
@@ -241,9 +310,7 @@ int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
ret = vkms_crc_parse_source(src_name, &enabled);
- spin_lock_irq(&out->lock);
- out->composer_enabled = enabled;
- spin_unlock_irq(&out->lock);
+ vkms_set_composer(out, enabled);
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index ac85e17428f8..09c012d54d58 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -86,6 +86,11 @@ static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
struct vkms_output *output = &vkmsdev->output;
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ if (!READ_ONCE(vblank->enabled)) {
+ *vblank_time = ktime_get();
+ return true;
+ }
+
*vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
if (WARN_ON(*vblank_time == vblank->time))
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 57a8a397d5e8..cb0b6230c22c 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -61,9 +61,6 @@ static void vkms_release(struct drm_device *dev)
{
struct vkms_device *vkms = container_of(dev, struct vkms_device, drm);
- platform_device_unregister(vkms->platform);
- drm_atomic_helper_shutdown(&vkms->drm);
- drm_mode_config_cleanup(&vkms->drm);
destroy_workqueue(vkms->output.composer_workq);
}
@@ -144,30 +141,31 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
static int __init vkms_init(void)
{
int ret;
+ struct platform_device *pdev;
- vkms_device = kzalloc(sizeof(*vkms_device), GFP_KERNEL);
- if (!vkms_device)
- return -ENOMEM;
+ pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
- vkms_device->platform =
- platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
- if (IS_ERR(vkms_device->platform)) {
- ret = PTR_ERR(vkms_device->platform);
- goto out_free;
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_unregister;
}
- ret = drm_dev_init(&vkms_device->drm, &vkms_driver,
- &vkms_device->platform->dev);
- if (ret)
- goto out_unregister;
- drmm_add_final_kfree(&vkms_device->drm, vkms_device);
+ vkms_device = devm_drm_dev_alloc(&pdev->dev, &vkms_driver,
+ struct vkms_device, drm);
+ if (IS_ERR(vkms_device)) {
+ ret = PTR_ERR(vkms_device);
+ goto out_devres;
+ }
+ vkms_device->platform = pdev;
ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
DMA_BIT_MASK(64));
if (ret) {
DRM_ERROR("Could not initialize DMA support\n");
- goto out_put;
+ goto out_devres;
}
vkms_device->drm.irq_enabled = true;
@@ -175,39 +173,41 @@ static int __init vkms_init(void)
ret = drm_vblank_init(&vkms_device->drm, 1);
if (ret) {
DRM_ERROR("Failed to vblank\n");
- goto out_put;
+ goto out_devres;
}
ret = vkms_modeset_init(vkms_device);
if (ret)
- goto out_put;
+ goto out_devres;
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
- goto out_put;
+ goto out_devres;
return 0;
-out_put:
- drm_dev_put(&vkms_device->drm);
- return ret;
-
+out_devres:
+ devres_release_group(&pdev->dev, NULL);
out_unregister:
- platform_device_unregister(vkms_device->platform);
-out_free:
- kfree(vkms_device);
+ platform_device_unregister(pdev);
return ret;
}
static void __exit vkms_exit(void)
{
+ struct platform_device *pdev;
+
if (!vkms_device) {
DRM_INFO("vkms_device is NULL.\n");
return;
}
+ pdev = vkms_device->platform;
+
drm_dev_unregister(&vkms_device->drm);
- drm_dev_put(&vkms_device->drm);
+ drm_atomic_helper_shutdown(&vkms_device->drm);
+ devres_release_group(&pdev->dev, NULL);
+ platform_device_unregister(pdev);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index f4036bb0b9a8..380a8f27e156 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -8,6 +8,7 @@
#include <drm/drm.h>
#include <drm/drm_gem.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_writeback.h>
#define XRES_MIN 20
#define YRES_MIN 20
@@ -52,9 +53,11 @@ struct vkms_crtc_state {
int num_active_planes;
/* stack of active planes for crc computation, should be in z order */
struct vkms_plane_state **active_planes;
+ void *active_writeback;
- /* below three are protected by vkms_output.composer_lock */
+ /* below four are protected by vkms_output.composer_lock */
bool crc_pending;
+ bool wb_pending;
u64 frame_start;
u64 frame_end;
};
@@ -63,6 +66,7 @@ struct vkms_output {
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
+ struct drm_writeback_connector wb_connector;
struct hrtimer vblank_hrtimer;
ktime_t period_ns;
struct drm_pending_vblank_event *event;
@@ -143,5 +147,9 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
/* Composer Support */
void vkms_composer_worker(struct work_struct *work);
+void vkms_set_composer(struct vkms_output *out, bool enabled);
+
+/* Writeback */
+int vkms_enable_writeback_connector(struct vkms_device *vkmsdev);
#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 85afb77e97f0..4a1848b0318f 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -80,6 +80,10 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
goto err_attach;
}
+ ret = vkms_enable_writeback_connector(vkmsdev);
+ if (ret)
+ DRM_ERROR("Failed to init writeback connector\n");
+
drm_mode_config_reset(dev);
return 0;
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
new file mode 100644
index 000000000000..094fa4aa061d
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "vkms_drv.h"
+#include <drm/drm_fourcc.h>
+#include <drm/drm_writeback.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+static const u32 vkms_wb_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const struct drm_connector_funcs vkms_wb_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_framebuffer *fb;
+ const struct drm_display_mode *mode = &crtc_state->mode;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+ if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
+ DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+ fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ if (fb->format->format != vkms_wb_formats[0]) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("Invalid pixel format %s\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs vkms_wb_encoder_helper_funcs = {
+ .atomic_check = vkms_wb_encoder_atomic_check,
+};
+
+static int vkms_wb_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector,
+ struct drm_writeback_job *job)
+{
+ struct vkms_gem_object *vkms_obj;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ if (!job->fb)
+ return 0;
+
+ gem_obj = drm_gem_fb_get_obj(job->fb, 0);
+ ret = vkms_gem_vmap(gem_obj);
+ if (ret) {
+ DRM_ERROR("vmap failed: %d\n", ret);
+ return ret;
+ }
+
+ vkms_obj = drm_gem_to_vkms_gem(gem_obj);
+ job->priv = vkms_obj->vaddr;
+
+ return 0;
+}
+
+static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct drm_gem_object *gem_obj;
+ struct vkms_device *vkmsdev;
+
+ if (!job->fb)
+ return;
+
+ gem_obj = drm_gem_fb_get_obj(job->fb, 0);
+ vkms_gem_vunmap(gem_obj);
+
+ vkmsdev = drm_device_to_vkms_device(gem_obj->dev);
+ vkms_set_composer(&vkmsdev->output, false);
+}
+
+static void vkms_wb_atomic_commit(struct drm_connector *conn,
+ struct drm_connector_state *state)
+{
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev);
+ struct vkms_output *output = &vkmsdev->output;
+ struct drm_writeback_connector *wb_conn = &output->wb_connector;
+ struct drm_connector_state *conn_state = wb_conn->base.state;
+ struct vkms_crtc_state *crtc_state = output->composer_state;
+
+ if (!conn_state)
+ return;
+
+ vkms_set_composer(&vkmsdev->output, true);
+
+ spin_lock_irq(&output->composer_lock);
+ crtc_state->active_writeback = conn_state->writeback_job->priv;
+ crtc_state->wb_pending = true;
+ spin_unlock_irq(&output->composer_lock);
+ drm_writeback_queue_job(wb_conn, state);
+}
+
+static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
+ .get_modes = vkms_wb_connector_get_modes,
+ .prepare_writeback_job = vkms_wb_prepare_job,
+ .cleanup_writeback_job = vkms_wb_cleanup_job,
+ .atomic_commit = vkms_wb_atomic_commit,
+};
+
+int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
+{
+ struct drm_writeback_connector *wb = &vkmsdev->output.wb_connector;
+
+ vkmsdev->output.wb_connector.encoder.possible_crtcs = 1;
+ drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs);
+
+ return drm_writeback_connector_init(&vkmsdev->drm, wb,
+ &vkms_wb_connector_funcs,
+ &vkms_wb_encoder_helper_funcs,
+ vkms_wb_formats,
+ ARRAY_SIZE(vkms_wb_formats));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 1629427d5734..e8d66182cd7b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -464,14 +464,14 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
dma_resv_assert_held(src->base.resv);
- if (dst->ttm->state == tt_unpopulated) {
- ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
+ if (!ttm_tt_is_populated(dst->ttm)) {
+ ret = dst->bdev->driver->ttm_tt_populate(dst->bdev, dst->ttm, &ctx);
if (ret)
return ret;
}
- if (src->ttm->state == tt_unpopulated) {
- ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx);
+ if (!ttm_tt_is_populated(src->ttm)) {
+ ret = src->bdev->driver->ttm_tt_populate(src->bdev, src->ttm, &ctx);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 1e59c019affa..813f1b148094 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -354,10 +354,12 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
pl.fpfn = 0;
pl.lpfn = 0;
- pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
- | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+ pl.mem_type = bo->mem.mem_type;
+ pl.flags = bo->mem.placement;
if (pin)
pl.flags |= TTM_PL_FLAG_NO_EVICT;
+ else
+ pl.flags &= ~TTM_PL_FLAG_NO_EVICT;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
@@ -1135,14 +1137,14 @@ void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
* vmw_bo_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
- * @mem: The struct ttm_mem_reg indicating to what memory
+ * @mem: The struct ttm_resource indicating to what memory
* region the move is taking place.
*
* Detaches cached maps and device bindings that require that the
* buffer doesn't move.
*/
void vmw_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
struct vmw_buffer_object *vbo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index fb39826f72c1..31e3e5c9f362 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -620,6 +620,28 @@ static int vmw_dma_masks(struct vmw_private *dev_priv)
return ret;
}
+static int vmw_vram_manager_init(struct vmw_private *dev_priv)
+{
+ int ret;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ ret = vmw_thp_init(dev_priv);
+#else
+ ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
+ dev_priv->vram_size >> PAGE_SHIFT);
+#endif
+ ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
+ return ret;
+}
+
+static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ vmw_thp_fini(dev_priv);
+#else
+ ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
+#endif
+}
+
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct vmw_private *dev_priv;
@@ -864,18 +886,23 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* Enable VRAM, but initially don't use it until SVGA is enabled and
* unhidden.
*/
- ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
- (dev_priv->vram_size >> PAGE_SHIFT));
+
+ ret = vmw_vram_manager_init(dev_priv);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
goto out_no_vram;
}
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ /*
+ * "Guest Memory Regions" is an aperture like feature with
+ * one slot per bo. There is an upper limit of the number of
+ * slots as well as the bo size.
+ */
dev_priv->has_gmr = true;
+ /* TODO: This is most likely not correct */
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
- refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
- VMW_PL_GMR) != 0) {
+ refuse_dma ||
+ vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
@@ -883,8 +910,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
dev_priv->has_mob = true;
- if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
- VMW_PL_MOB) != 0) {
+
+ if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
DRM_INFO("No MOB memory available. "
"3D will be disabled.\n");
dev_priv->has_mob = false;
@@ -961,10 +988,10 @@ out_no_fifo:
vmw_kms_close(dev_priv);
out_no_kms:
if (dev_priv->has_mob)
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+ vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
if (dev_priv->has_gmr)
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
+ vmw_vram_manager_fini(dev_priv);
out_no_vram:
(void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
@@ -1012,12 +1039,12 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_overlay_close(dev_priv);
if (dev_priv->has_gmr)
- (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
- (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_release_device_early(dev_priv);
if (dev_priv->has_mob)
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+ vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
+ vmw_vram_manager_fini(dev_priv);
(void) ttm_bo_device_release(&dev_priv->bdev);
drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv);
@@ -1159,10 +1186,12 @@ static void vmw_master_drop(struct drm_device *dev,
*/
static void __vmw_svga_enable(struct vmw_private *dev_priv)
{
+ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
+
spin_lock(&dev_priv->svga_lock);
- if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+ if (!ttm_resource_manager_used(man)) {
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
+ ttm_resource_manager_set_used(man, true);
}
spin_unlock(&dev_priv->svga_lock);
}
@@ -1188,9 +1217,11 @@ void vmw_svga_enable(struct vmw_private *dev_priv)
*/
static void __vmw_svga_disable(struct vmw_private *dev_priv)
{
+ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
+
spin_lock(&dev_priv->svga_lock);
- if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ if (ttm_resource_manager_used(man)) {
+ ttm_resource_manager_set_used(man, false);
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
@@ -1207,6 +1238,7 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
*/
void vmw_svga_disable(struct vmw_private *dev_priv)
{
+ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
/*
* Disabling SVGA will turn off device modesetting capabilities, so
* notify KMS about that so that it doesn't cache atomic state that
@@ -1222,8 +1254,8 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
vmw_kms_lost_device(dev_priv->dev);
ttm_write_lock(&dev_priv->reservation_sem, false);
spin_lock(&dev_priv->svga_lock);
- if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ if (ttm_resource_manager_used(man)) {
+ ttm_resource_manager_set_used(man, false);
spin_unlock(&dev_priv->svga_lock);
if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
DRM_ERROR("Failed evicting VRAM buffers.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 3596f3923ea3..1523b51a7284 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -82,9 +82,7 @@
VMWGFX_NUM_GB_SCREEN_TARGET)
#define VMW_PL_GMR (TTM_PL_PRIV + 0)
-#define VMW_PL_FLAG_GMR (TTM_PL_FLAG_PRIV << 0)
#define VMW_PL_MOB (TTM_PL_PRIV + 1)
-#define VMW_PL_FLAG_MOB (TTM_PL_FLAG_PRIV << 1)
#define VMW_RES_CONTEXT ttm_driver_type0
#define VMW_RES_SURFACE ttm_driver_type1
@@ -793,7 +791,7 @@ extern void vmw_resource_unreserve(struct vmw_resource *res,
struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
+ struct ttm_resource *mem);
extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
@@ -878,7 +876,7 @@ extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
+ struct ttm_resource *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
extern struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
@@ -1019,10 +1017,12 @@ extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_bo_driver vmw_bo_driver;
-extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
-extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
extern const struct vmw_sg_table *
vmw_bo_sg_table(struct ttm_buffer_object *bo);
+extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
+ unsigned long bo_size,
+ struct ttm_buffer_object **bo_p);
+
extern void vmw_piter_start(struct vmw_piter *viter,
const struct vmw_sg_table *vsgt,
unsigned long p_offs);
@@ -1219,7 +1219,8 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
* GMR Id manager
*/
-extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
+int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type);
+void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type);
/**
* Prime - vmwgfx_prime.c
@@ -1518,9 +1519,8 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
/* Transparent hugepage support - vmwgfx_thp.c */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern const struct ttm_mem_type_manager_func vmw_thp_func;
-#else
-#define vmw_thp_func ttm_bo_manager_func
+extern int vmw_thp_init(struct vmw_private *dev_priv);
+void vmw_thp_fini(struct vmw_private *dev_priv);
#endif
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index f8bdd4ea294a..551042489036 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -37,6 +37,7 @@
#include <linux/kernel.h>
struct vmwgfx_gmrid_man {
+ struct ttm_resource_manager manager;
spinlock_t lock;
struct ida gmr_ida;
uint32_t max_gmr_ids;
@@ -44,13 +45,17 @@ struct vmwgfx_gmrid_man {
uint32_t used_gmr_pages;
};
-static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
+static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct vmwgfx_gmrid_man, manager);
+}
+
+static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
- struct vmwgfx_gmrid_man *gman =
- (struct vmwgfx_gmrid_man *)man->priv;
+ struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
int id;
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
@@ -79,11 +84,10 @@ nospace:
return -ENOSPC;
}
-static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
+ struct ttm_resource *mem)
{
- struct vmwgfx_gmrid_man *gman =
- (struct vmwgfx_gmrid_man *)man->priv;
+ struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
if (mem->mm_node) {
ida_free(&gman->gmr_ida, mem->start);
@@ -94,22 +98,28 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
}
}
-static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
+static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
+
+int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
{
- struct vmw_private *dev_priv =
- container_of(man->bdev, struct vmw_private, bdev);
+ struct ttm_resource_manager *man;
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
if (unlikely(!gman))
return -ENOMEM;
+ man = &gman->manager;
+
+ man->func = &vmw_gmrid_manager_func;
+ /* TODO: This is most likely not correct */
+ man->use_tt = true;
+ ttm_resource_manager_init(man, 0);
spin_lock_init(&gman->lock);
gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
- switch (p_size) {
+ switch (type) {
case VMW_PL_GMR:
gman->max_gmr_ids = dev_priv->max_gmr_ids;
gman->max_gmr_pages = dev_priv->max_gmr_pages;
@@ -121,32 +131,29 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
default:
BUG();
}
- man->priv = (void *) gman;
+ ttm_set_driver_manager(&dev_priv->bdev, type, &gman->manager);
+ ttm_resource_manager_set_used(man, true);
return 0;
}
-static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
+void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
{
- struct vmwgfx_gmrid_man *gman =
- (struct vmwgfx_gmrid_man *)man->priv;
+ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, type);
+ struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
- if (gman) {
- ida_destroy(&gman->gmr_ida);
- kfree(gman);
- }
- return 0;
-}
+ ttm_resource_manager_set_used(man, false);
+
+ ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
+
+ ttm_resource_manager_cleanup(man);
+
+ ttm_set_driver_manager(&dev_priv->bdev, type, NULL);
+ ida_destroy(&gman->gmr_ida);
+ kfree(gman);
-static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
- struct drm_printer *printer)
-{
- drm_printf(printer, "No debug info available for the GMR id manager\n");
}
-const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
- .init = vmw_gmrid_man_init,
- .takedown = vmw_gmrid_man_takedown,
- .get_node = vmw_gmrid_man_get_node,
- .put_node = vmw_gmrid_man_put_node,
- .debug = vmw_gmrid_man_debug
+static const struct ttm_resource_manager_func vmw_gmrid_manager_func = {
+ .alloc = vmw_gmrid_man_get_node,
+ .free = vmw_gmrid_man_put_node,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index e8eb42933ca2..7f95ed6aa224 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -238,10 +238,6 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
unsigned long offset;
unsigned long bo_size;
struct vmw_otable *otables = batch->otables;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
SVGAOTableType i;
int ret;
@@ -255,24 +251,9 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
bo_size += otables[i].size;
}
- ret = ttm_bo_create(&dev_priv->bdev, bo_size,
- ttm_bo_type_device,
- &vmw_sys_ne_placement,
- 0, false, &batch->otable_bo);
-
- if (unlikely(ret != 0))
- goto out_no_bo;
-
- ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
- BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
- if (unlikely(ret != 0))
- goto out_unreserve;
- ret = vmw_bo_map_dma(batch->otable_bo);
+ ret = vmw_bo_create_and_populate(dev_priv, bo_size, &batch->otable_bo);
if (unlikely(ret != 0))
- goto out_unreserve;
-
- ttm_bo_unreserve(batch->otable_bo);
+ return ret;
offset = 0;
for (i = 0; i < batch->num_otables; ++i) {
@@ -289,8 +270,6 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
return 0;
-out_unreserve:
- ttm_bo_unreserve(batch->otable_bo);
out_no_setup:
for (i = 0; i < batch->num_otables; ++i) {
if (batch->otables[i].enabled)
@@ -300,7 +279,6 @@ out_no_setup:
ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
-out_no_bo:
return ret;
}
@@ -432,41 +410,9 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
struct vmw_mob *mob)
{
- int ret;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
-
BUG_ON(mob->pt_bo != NULL);
- ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
- ttm_bo_type_device,
- &vmw_sys_ne_placement,
- 0, false, &mob->pt_bo);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
-
- BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
- if (unlikely(ret != 0))
- goto out_unreserve;
- ret = vmw_bo_map_dma(mob->pt_bo);
- if (unlikely(ret != 0))
- goto out_unreserve;
-
- ttm_bo_unreserve(mob->pt_bo);
-
- return 0;
-
-out_unreserve:
- ttm_bo_unreserve(mob->pt_bo);
- ttm_bo_put(mob->pt_bo);
- mob->pt_bo = NULL;
-
- return ret;
+ return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE, &mob->pt_bo);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e9f448a5ebb3..15b5bde69324 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -24,7 +24,7 @@
*
*/
-#include <linux/frame.h>
+#include <linux/objtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -599,4 +599,3 @@ out_open:
return -EINVAL;
}
-
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c8441030637a..c0f156078dda 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -855,7 +855,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
* states from the device.
*/
void vmw_query_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
struct vmw_buffer_object *dx_query_mob;
struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index c8b9335bccd8..c8427998fa35 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -16,14 +16,22 @@
* @lock: Manager lock.
*/
struct vmw_thp_manager {
+ struct ttm_resource_manager manager;
struct drm_mm mm;
spinlock_t lock;
};
+static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct vmw_thp_manager, manager);
+}
+
+static const struct ttm_resource_manager_func vmw_thp_func;
+
static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long align_pages,
const struct ttm_place *place,
- struct ttm_mem_reg *mem,
+ struct ttm_resource *mem,
unsigned long lpfn,
enum drm_mm_insert_mode mode)
{
@@ -38,12 +46,12 @@ static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
return -ENOSPC;
}
-static int vmw_thp_get_node(struct ttm_mem_type_manager *man,
+static int vmw_thp_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
- struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+ struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node;
unsigned long align_pages;
@@ -100,10 +108,10 @@ found_unlock:
-static void vmw_thp_put_node(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+static void vmw_thp_put_node(struct ttm_resource_manager *man,
+ struct ttm_resource *mem)
{
- struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+ struct vmw_thp_manager *rman = to_thp_manager(man);
if (mem->mm_node) {
spin_lock(&rman->lock);
@@ -115,8 +123,7 @@ static void vmw_thp_put_node(struct ttm_mem_type_manager *man,
}
}
-static int vmw_thp_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
+int vmw_thp_init(struct vmw_private *dev_priv)
{
struct vmw_thp_manager *rman;
@@ -124,43 +131,51 @@ static int vmw_thp_init(struct ttm_mem_type_manager *man,
if (!rman)
return -ENOMEM;
- drm_mm_init(&rman->mm, 0, p_size);
+ ttm_resource_manager_init(&rman->manager,
+ dev_priv->vram_size >> PAGE_SHIFT);
+
+ rman->manager.func = &vmw_thp_func;
+ drm_mm_init(&rman->mm, 0, rman->manager.size);
spin_lock_init(&rman->lock);
- man->priv = rman;
+
+ ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
+ ttm_resource_manager_set_used(&rman->manager, true);
return 0;
}
-static int vmw_thp_takedown(struct ttm_mem_type_manager *man)
+void vmw_thp_fini(struct vmw_private *dev_priv)
{
- struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
+ struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm;
+ int ret;
+
+ ttm_resource_manager_set_used(man, false);
+ ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
+ if (ret)
+ return;
spin_lock(&rman->lock);
- if (drm_mm_clean(mm)) {
- drm_mm_takedown(mm);
- spin_unlock(&rman->lock);
- kfree(rman);
- man->priv = NULL;
- return 0;
- }
+ drm_mm_clean(mm);
+ drm_mm_takedown(mm);
spin_unlock(&rman->lock);
- return -EBUSY;
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
+ kfree(rman);
}
-static void vmw_thp_debug(struct ttm_mem_type_manager *man,
+static void vmw_thp_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
- struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
+ struct vmw_thp_manager *rman = to_thp_manager(man);
spin_lock(&rman->lock);
drm_mm_print(&rman->mm, printer);
spin_unlock(&rman->lock);
}
-const struct ttm_mem_type_manager_func vmw_thp_func = {
- .init = vmw_thp_init,
- .takedown = vmw_thp_takedown,
- .get_node = vmw_thp_get_node,
- .put_node = vmw_thp_put_node,
+static const struct ttm_resource_manager_func vmw_thp_func = {
+ .alloc = vmw_thp_get_node,
+ .free = vmw_thp_put_node,
.debug = vmw_thp_debug
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index ab524ab3b0b4..73116ec70ba5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -33,49 +33,57 @@
static const struct ttm_place vram_placement_flags = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+ .mem_type = TTM_PL_VRAM,
+ .flags = TTM_PL_FLAG_CACHED
};
static const struct ttm_place vram_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .mem_type = TTM_PL_VRAM,
+ .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_CACHED
};
static const struct ttm_place sys_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
static const struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
- .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ .mem_type = VMW_PL_GMR,
+ .flags = TTM_PL_FLAG_CACHED
};
static const struct ttm_place gmr_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
- .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .mem_type = VMW_PL_GMR,
+ .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
static const struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
- .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+ .mem_type = VMW_PL_MOB,
+ .flags = TTM_PL_FLAG_CACHED
};
static const struct ttm_place mob_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
- .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .mem_type = VMW_PL_MOB,
+ .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
struct ttm_placement vmw_vram_placement = {
@@ -89,11 +97,13 @@ static const struct ttm_place vram_gmr_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+ .mem_type = TTM_PL_VRAM,
+ .flags = TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
- .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ .mem_type = VMW_PL_GMR,
+ .flags = TTM_PL_FLAG_CACHED
}
};
@@ -101,11 +111,13 @@ static const struct ttm_place gmr_vram_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
- .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ .mem_type = VMW_PL_GMR,
+ .flags = TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+ .mem_type = TTM_PL_VRAM,
+ .flags = TTM_PL_FLAG_CACHED
}
};
@@ -120,12 +132,14 @@ static const struct ttm_place vram_gmr_ne_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
+ .mem_type = TTM_PL_VRAM,
+ .flags = TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT
}, {
.fpfn = 0,
.lpfn = 0,
- .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
+ .mem_type = VMW_PL_GMR,
+ .flags = TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT
}
};
@@ -169,19 +183,23 @@ static const struct ttm_place evictable_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+ .mem_type = TTM_PL_VRAM,
+ .flags = TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
- .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ .mem_type = VMW_PL_GMR,
+ .flags = TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
- .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+ .mem_type = VMW_PL_MOB,
+ .flags = TTM_PL_FLAG_CACHED
}
};
@@ -189,15 +207,18 @@ static const struct ttm_place nonfixed_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
- .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ .mem_type = VMW_PL_GMR,
+ .flags = TTM_PL_FLAG_CACHED
}, {
.fpfn = 0,
.lpfn = 0,
- .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+ .mem_type = VMW_PL_MOB,
+ .flags = TTM_PL_FLAG_CACHED
}
};
@@ -246,6 +267,7 @@ struct vmw_ttm_tt {
struct vmw_sg_table vsgt;
uint64_t sg_alloc_size;
bool mapped;
+ bool bound;
};
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
@@ -362,8 +384,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
{
struct device *dev = vmw_tt->dev_priv->dev->dev;
- dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
}
@@ -383,16 +404,8 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
{
struct device *dev = vmw_tt->dev_priv->dev->dev;
- int ret;
-
- ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(ret == 0))
- return -ENOMEM;
-
- vmw_tt->sgt.nents = ret;
- return 0;
+ return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
}
/**
@@ -419,6 +432,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
int ret = 0;
static size_t sgl_size;
static size_t sgt_size;
+ struct scatterlist *sg;
if (vmw_tt->mapped)
return 0;
@@ -441,18 +455,20 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
if (unlikely(ret != 0))
return ret;
- ret = __sg_alloc_table_from_pages
- (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
- (unsigned long) vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->dev->dev),
- GFP_KERNEL);
- if (unlikely(ret != 0))
+ sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
+ vsgt->num_pages, 0,
+ (unsigned long) vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->dev->dev),
+ NULL, 0, GFP_KERNEL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
goto out_sg_alloc_fail;
+ }
- if (vsgt->num_pages > vmw_tt->sgt.nents) {
+ if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
uint64_t over_alloc =
sgl_size * (vsgt->num_pages -
- vmw_tt->sgt.nents);
+ vmw_tt->sgt.orig_nents);
ttm_mem_global_free(glob, over_alloc);
vmw_tt->sg_alloc_size -= over_alloc;
@@ -519,43 +535,6 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
vmw_tt->mapped = false;
}
-
-/**
- * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
- *
- * @bo: Pointer to a struct ttm_buffer_object
- *
- * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
- * instead of a pointer to a struct vmw_ttm_backend as argument.
- * Note that the buffer object must be either pinned or reserved before
- * calling this function.
- */
-int vmw_bo_map_dma(struct ttm_buffer_object *bo)
-{
- struct vmw_ttm_tt *vmw_tt =
- container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
-
- return vmw_ttm_map_dma(vmw_tt);
-}
-
-
-/**
- * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
- *
- * @bo: Pointer to a struct ttm_buffer_object
- *
- * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
- * instead of a pointer to a struct vmw_ttm_backend as argument.
- */
-void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
-{
- struct vmw_ttm_tt *vmw_tt =
- container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
-
- vmw_ttm_unmap_dma(vmw_tt);
-}
-
-
/**
* vmw_bo_sg_table - Return a struct vmw_sg_table object for a
* TTM buffer object
@@ -576,11 +555,18 @@ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
}
-static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+static int vmw_ttm_bind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
- int ret;
+ int ret = 0;
+
+ if (!bo_mem)
+ return -EINVAL;
+
+ if (vmw_be->bound)
+ return 0;
ret = vmw_ttm_map_dma(vmw_be);
if (unlikely(ret != 0))
@@ -591,8 +577,9 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
switch (bo_mem->mem_type) {
case VMW_PL_GMR:
- return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
+ ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
ttm->num_pages, vmw_be->gmr_id);
+ break;
case VMW_PL_MOB:
if (unlikely(vmw_be->mob == NULL)) {
vmw_be->mob =
@@ -601,20 +588,26 @@ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
return -ENOMEM;
}
- return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
+ ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
&vmw_be->vsgt, ttm->num_pages,
vmw_be->gmr_id);
+ break;
default:
BUG();
}
- return 0;
+ vmw_be->bound = true;
+ return ret;
}
-static void vmw_ttm_unbind(struct ttm_tt *ttm)
+static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ if (!vmw_be->bound)
+ return;
+
switch (vmw_be->mem_type) {
case VMW_PL_GMR:
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
@@ -628,14 +621,17 @@ static void vmw_ttm_unbind(struct ttm_tt *ttm)
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
vmw_ttm_unmap_dma(vmw_be);
+ vmw_be->bound = false;
}
-static void vmw_ttm_destroy(struct ttm_tt *ttm)
+static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ vmw_ttm_unbind(bdev, ttm);
+ ttm_tt_destroy_common(bdev, ttm);
vmw_ttm_unmap_dma(vmw_be);
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
ttm_dma_tt_fini(&vmw_be->dma_ttm);
@@ -649,7 +645,8 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
}
-static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+static int vmw_ttm_populate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct vmw_ttm_tt *vmw_tt =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
@@ -657,7 +654,7 @@ static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
int ret;
- if (ttm->state != tt_unpopulated)
+ if (ttm_tt_is_populated(ttm))
return 0;
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
@@ -677,7 +674,8 @@ static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
return ret;
}
-static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
+static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm.ttm);
@@ -701,12 +699,6 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
ttm_pool_unpopulate(ttm);
}
-static struct ttm_backend_func vmw_ttm_func = {
- .bind = vmw_ttm_bind,
- .unbind = vmw_ttm_unbind,
- .destroy = vmw_ttm_destroy,
-};
-
static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
@@ -717,7 +709,6 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
if (!vmw_be)
return NULL;
- vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
vmw_be->mob = NULL;
@@ -734,40 +725,6 @@ out_no_init:
return NULL;
}
-static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- /* System memory */
- man->available_caching = TTM_PL_FLAG_CACHED;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- /* "On-card" video ram */
- man->func = &vmw_thp_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED;
- man->available_caching = TTM_PL_FLAG_CACHED;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case VMW_PL_GMR:
- case VMW_PL_MOB:
- /*
- * "Guest Memory Regions" is an aperture like feature with
- * one slot per bo. There is an upper limit of the number of
- * slots as well as the bo size.
- */
- man->func = &vmw_gmrid_manager_func;
- man->available_caching = TTM_PL_FLAG_CACHED;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
- return -EINVAL;
- }
- return 0;
-}
-
static void vmw_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
@@ -782,24 +739,18 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return vmw_user_bo_verify_access(bo, tfile);
}
-static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
{
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
- mem->bus.addr = NULL;
- mem->bus.is_iomem = false;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
-
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
case VMW_PL_GMR:
case VMW_PL_MOB:
return 0;
case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = dev_priv->vram_start;
+ mem->bus.offset = (mem->start << PAGE_SHIFT) +
+ dev_priv->vram_start;
mem->bus.is_iomem = true;
break;
default:
@@ -812,7 +763,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
* vmw_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
- * @mem: The struct ttm_mem_reg indicating to what memory
+ * @mem: The struct ttm_resource indicating to what memory
* region the move is taking place.
*
* Calls move_notify for all subsystems needing it.
@@ -820,7 +771,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
*/
static void vmw_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *mem)
+ struct ttm_resource *mem)
{
vmw_bo_move_notify(bo, mem);
vmw_query_move_notify(bo, mem);
@@ -843,7 +794,9 @@ struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
- .init_mem_type = vmw_init_mem_type,
+ .ttm_tt_bind = &vmw_ttm_bind,
+ .ttm_tt_unbind = &vmw_ttm_unbind,
+ .ttm_tt_destroy = &vmw_ttm_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
.move = NULL,
@@ -852,3 +805,38 @@ struct ttm_bo_driver vmw_bo_driver = {
.swap_notify = vmw_swap_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
};
+
+int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
+ unsigned long bo_size,
+ struct ttm_buffer_object **bo_p)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+ struct ttm_buffer_object *bo;
+ int ret;
+
+ ret = ttm_bo_create(&dev_priv->bdev, bo_size,
+ ttm_bo_type_device,
+ &vmw_sys_ne_placement,
+ 0, false, &bo);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, false, true, NULL);
+ BUG_ON(ret != 0);
+ ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
+ if (likely(ret == 0)) {
+ struct vmw_ttm_tt *vmw_tt =
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ ret = vmw_ttm_map_dma(vmw_tt);
+ }
+
+ ttm_bo_unreserve(bo);
+
+ if (likely(ret == 0))
+ *bo_p = bo;
+ return ret;
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 534daf37c97e..2f464ef2d53e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -180,7 +180,8 @@ struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
if (!xen_obj->pages)
return ERR_PTR(-ENOMEM);
- return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
+ return drm_prime_pages_to_sg(gem_obj->dev,
+ xen_obj->pages, xen_obj->num_pages);
}
struct drm_gem_object *
@@ -217,7 +218,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
return ERR_PTR(ret);
DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
- size, sgt->nents);
+ size, sgt->orig_nents);
return &xen_obj->base;
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index a455cfc1bee5..98bd48f13fd1 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -242,12 +242,6 @@ static const u32 scaling_factors_565[] = {
ZYNQMP_DISP_AV_BUF_5BIT_SF,
};
-static const u32 scaling_factors_666[] = {
- ZYNQMP_DISP_AV_BUF_6BIT_SF,
- ZYNQMP_DISP_AV_BUF_6BIT_SF,
- ZYNQMP_DISP_AV_BUF_6BIT_SF,
-};
-
static const u32 scaling_factors_888[] = {
ZYNQMP_DISP_AV_BUF_8BIT_SF,
ZYNQMP_DISP_AV_BUF_8BIT_SF,
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 26328c76305b..8e69303aad3f 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -111,7 +111,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
/* Initialize mode config, vblank and the KMS poll helper. */
ret = drmm_mode_config_init(drm);
if (ret < 0)
- goto err_dev_put;
+ return ret;
drm->mode_config.funcs = &zynqmp_dpsub_mode_config_funcs;
drm->mode_config.min_width = 0;
@@ -121,7 +121,7 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
ret = drm_vblank_init(drm, 1);
if (ret)
- goto err_dev_put;
+ return ret;
drm->irq_enabled = 1;
@@ -154,8 +154,6 @@ static int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
err_poll_fini:
drm_kms_helper_poll_fini(drm);
-err_dev_put:
- drm_dev_put(drm);
return ret;
}
@@ -208,27 +206,16 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
int ret;
/* Allocate private data. */
- dpsub = kzalloc(sizeof(*dpsub), GFP_KERNEL);
- if (!dpsub)
- return -ENOMEM;
+ dpsub = devm_drm_dev_alloc(&pdev->dev, &zynqmp_dpsub_drm_driver,
+ struct zynqmp_dpsub, drm);
+ if (IS_ERR(dpsub))
+ return PTR_ERR(dpsub);
dpsub->dev = &pdev->dev;
platform_set_drvdata(pdev, dpsub);
dma_set_mask(dpsub->dev, DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT));
- /*
- * Initialize the DRM device early, as the DRM core mandates usage of
- * the managed memory helpers tied to the DRM device.
- */
- ret = drm_dev_init(&dpsub->drm, &zynqmp_dpsub_drm_driver, &pdev->dev);
- if (ret < 0) {
- kfree(dpsub);
- return ret;
- }
-
- drmm_add_final_kfree(&dpsub->drm, dpsub);
-
/* Try the reserved memory. Proceed if there's none. */
of_reserved_mem_device_init(&pdev->dev);
@@ -286,8 +273,6 @@ static int zynqmp_dpsub_remove(struct platform_device *pdev)
clk_disable_unprepare(dpsub->apb_clk);
of_reserved_mem_device_release(&pdev->dev);
- drm_dev_put(drm);
-
return 0;
}
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 89b6c14b7392..82d0a60ba3f7 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -170,11 +170,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
- if (!err) {
- err = -ENOMEM;
+ err = dma_map_sgtable(dev, sgt, dir, 0);
+ if (err)
goto unpin;
- }
job->unpins[job->num_unpins].dev = dev;
job->unpins[job->num_unpins].dir = dir;
@@ -228,7 +226,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
}
if (host->domain) {
- for_each_sg(sgt->sgl, sg, sgt->nents, j)
+ for_each_sgtable_sg(sgt, sg, j)
gather_size += sg->length;
gather_size = iova_align(&host->iova, gather_size);
@@ -240,9 +238,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto put;
}
- err = iommu_map_sg(host->domain,
+ err = iommu_map_sgtable(host->domain,
iova_dma_addr(&host->iova, alloc),
- sgt->sgl, sgt->nents, IOMMU_READ);
+ sgt, IOMMU_READ);
if (err == 0) {
__free_iova(&host->iova, alloc);
err = -EINVAL;
@@ -252,12 +250,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
job->unpins[job->num_unpins].size = gather_size;
phys_addr = iova_dma_addr(&host->iova, alloc);
} else if (sgt) {
- err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
- DMA_TO_DEVICE);
- if (!err) {
- err = -ENOMEM;
+ err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
+ if (err)
goto put;
- }
job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
job->unpins[job->num_unpins].dev = host->dev;
@@ -660,8 +655,7 @@ void host1x_job_unpin(struct host1x_job *job)
}
if (unpin->dev && sgt)
- dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
- unpin->dir);
+ dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
host1x_bo_unpin(dev, unpin->bo, sgt);
host1x_bo_put(unpin->bo);
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index e606464aa43c..2efe12dde8bc 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -293,19 +293,13 @@ int tegra_mipi_disable(struct tegra_mipi_device *dev)
}
EXPORT_SYMBOL(tegra_mipi_disable);
-int tegra_mipi_wait(struct tegra_mipi_device *device)
+int tegra_mipi_finish_calibration(struct tegra_mipi_device *device)
{
struct tegra_mipi *mipi = device->mipi;
void __iomem *status_reg = mipi->regs + (MIPI_CAL_STATUS << 2);
u32 value;
int err;
- err = clk_enable(device->mipi->clk);
- if (err < 0)
- return err;
-
- mutex_lock(&device->mipi->lock);
-
err = readl_relaxed_poll_timeout(status_reg, value,
!(value & MIPI_CAL_STATUS_ACTIVE) &&
(value & MIPI_CAL_STATUS_DONE), 50,
@@ -315,9 +309,9 @@ int tegra_mipi_wait(struct tegra_mipi_device *device)
return err;
}
-EXPORT_SYMBOL(tegra_mipi_wait);
+EXPORT_SYMBOL(tegra_mipi_finish_calibration);
-int tegra_mipi_calibrate(struct tegra_mipi_device *device)
+int tegra_mipi_start_calibration(struct tegra_mipi_device *device)
{
const struct tegra_mipi_soc *soc = device->mipi->soc;
unsigned int i;
@@ -381,12 +375,16 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
value |= MIPI_CAL_CTRL_START;
tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
- mutex_unlock(&device->mipi->lock);
- clk_disable(device->mipi->clk);
+ /*
+ * Wait for min 72uS to let calibration logic finish calibration
+ * sequence codes before waiting for pads idle state to apply the
+ * results.
+ */
+ usleep_range(75, 80);
return 0;
}
-EXPORT_SYMBOL(tegra_mipi_calibrate);
+EXPORT_SYMBOL(tegra_mipi_start_calibration);
static const struct tegra_mipi_pad tegra114_mipi_pads[] = {
{ .data = MIPI_CAL_CONFIG_CSIA },
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index b3dae9ec1a38..d166ee262ce4 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -133,73 +133,6 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
}
EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
-bool ipu_pixelformat_is_planar(u32 pixelformat)
-{
- switch (pixelformat) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- case V4L2_PIX_FMT_YUV422P:
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- return true;
- }
-
- return false;
-}
-EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
-
-enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
-{
- switch (mbus_code & 0xf000) {
- case 0x1000:
- return IPUV3_COLORSPACE_RGB;
- case 0x2000:
- return IPUV3_COLORSPACE_YUV;
- default:
- return IPUV3_COLORSPACE_UNKNOWN;
- }
-}
-EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
-
-int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
-{
- switch (pixelformat) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
- case V4L2_PIX_FMT_YUV422P:
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- /*
- * for the planar YUV formats, the stride passed to
- * cpmem must be the stride in bytes of the Y plane.
- * And all the planar YUV formats have an 8-bit
- * Y component.
- */
- return (8 * pixel_stride) >> 3;
- case V4L2_PIX_FMT_RGB565:
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_UYVY:
- return (16 * pixel_stride) >> 3;
- case V4L2_PIX_FMT_BGR24:
- case V4L2_PIX_FMT_RGB24:
- return (24 * pixel_stride) >> 3;
- case V4L2_PIX_FMT_BGR32:
- case V4L2_PIX_FMT_RGB32:
- case V4L2_PIX_FMT_XBGR32:
- case V4L2_PIX_FMT_XRGB32:
- return (32 * pixel_stride) >> 3;
- default:
- break;
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
-
int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
bool hflip, bool vflip)
{
diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c
index 58ea374d8aaa..9ec949a438ef 100644
--- a/drivers/greybus/interface.c
+++ b/drivers/greybus/interface.c
@@ -620,7 +620,7 @@ static struct attribute *interface_common_attrs[] = {
static umode_t interface_unipro_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct gb_interface *intf = to_gb_interface(dev);
switch (intf->type) {
@@ -635,7 +635,7 @@ static umode_t interface_unipro_is_visible(struct kobject *kobj,
static umode_t interface_greybus_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct gb_interface *intf = to_gb_interface(dev);
switch (intf->type) {
@@ -649,7 +649,7 @@ static umode_t interface_greybus_is_visible(struct kobject *kobj,
static umode_t interface_power_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct gb_interface *intf = to_gb_interface(dev);
switch (intf->type) {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 05315b434276..612629678c84 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -397,6 +397,15 @@ config HID_GOOGLE_HAMMER
help
Say Y here if you have a Google Hammer device.
+config HID_VIVALDI
+ tristate "Vivaldi Keyboard"
+ depends on HID
+ help
+ Say Y here if you want to enable support for Vivaldi keyboards.
+
+ Vivaldi keyboards use a vendor-specific (Google) HID usage to report
+ how the keys in the top row are physically ordered.
+
config HID_GT683R
tristate "MSI GT68xR LED support"
depends on LEDS_CLASS && USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index d8ea4b8c95af..4acb583c92a6 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_HID_GEMBIRD) += hid-gembird.o
obj-$(CONFIG_HID_GFRM) += hid-gfrm.o
obj-$(CONFIG_HID_GLORIOUS) += hid-glorious.o
obj-$(CONFIG_HID_GOOGLE_HAMMER) += hid-google-hammer.o
+obj-$(CONFIG_HID_VIVALDI) += hid-vivaldi.o
obj-$(CONFIG_HID_GT683R) += hid-gt683r.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index a9c2de95c5e2..3feaece13ade 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -526,7 +526,7 @@ static int u1_init(struct hid_device *hdev, struct alps_dev *pri_data)
ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_Y,
&sen_line_num_y, 0, true);
- if (ret < 0) {
+ if (ret < 0) {
dev_err(&hdev->dev, "failed U1_NUM_SENS_Y (%d)\n", ret);
goto exit;
}
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index e82f604d33e9..6b8f0d004d34 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -503,6 +503,8 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
.driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO),
+ .driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS),
.driver_data = APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index d2ecc9c45255..56172fe6995c 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -814,6 +814,13 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
+
+ if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
+ for (i = 0; i < parser->local.usage_index; i++)
+ if (parser->local.usage[i] ==
+ (HID_UP_GOOGLEVENDOR | 0x0001))
+ parser->device->group =
+ HID_GROUP_VIVALDI;
}
static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
@@ -920,7 +927,7 @@ static int hid_scan_report(struct hid_device *hid)
/**
* hid_parse_report - parse device report
*
- * @device: hid device
+ * @hid: hid device
* @start: report start
* @size: report size
*
@@ -945,7 +952,7 @@ static const char * const hid_report_names[] = {
/**
* hid_validate_values - validate existing device report's value indexes
*
- * @device: hid device
+ * @hid: hid device
* @type: which report type to examine
* @id: which report ID to examine (0 for first)
* @field_index: which report field to examine
@@ -1444,7 +1451,7 @@ static int search(__s32 *array, __s32 value, unsigned n)
* hid_match_report - check if driver's raw_event should be called
*
* @hid: hid device
- * @report_type: type to match against
+ * @report: hid report to match against
*
* compare hid->driver->report_table->report_type to report->type
*/
@@ -2120,7 +2127,7 @@ struct hid_dynid {
/**
* store_new_id - add a new HID device ID to this driver and re-probe devices
- * @driver: target device driver
+ * @drv: target device driver
* @buf: buffer for scanning device ID data
* @count: input size
*
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index f64517bc33e2..21e15627a461 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1235,6 +1235,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct cp2112_device *dev;
u8 buf[3];
struct cp2112_smbus_config_report config;
+ struct gpio_irq_chip *girq;
int ret;
dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -1338,6 +1339,15 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.can_sleep = 1;
dev->gc.parent = &hdev->dev;
+ girq = &dev->gc.irq;
+ girq->chip = &cp2112_gpio_irqchip;
+ /* The event comes from the outside so no parent handler */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+
ret = gpiochip_add_data(&dev->gc, dev);
if (ret < 0) {
hid_err(hdev, "error registering gpio chip\n");
@@ -1353,17 +1363,8 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
chmod_sysfs_attrs(hdev);
hid_hw_power(hdev, PM_HINT_NORMAL);
- ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_err(dev->gc.parent, "failed to add IRQ chip\n");
- goto err_sysfs_remove;
- }
-
return ret;
-err_sysfs_remove:
- sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
err_gpiochip_remove:
gpiochip_remove(&dev->gc);
err_free_i2c:
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 9453147d020d..d7eaf9100370 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1101,11 +1101,6 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
set_current_state(TASK_INTERRUPTIBLE);
while (kfifo_is_empty(&list->hid_debug_fifo)) {
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
-
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@@ -1122,6 +1117,11 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
goto out;
}
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
/* allow O_NONBLOCK from other threads */
mutex_unlock(&list->read_mutex);
schedule();
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 0b6ee1dee625..978ee2aab2d4 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -104,8 +104,8 @@ struct synthhid_input_report {
#pragma pack(pop)
-#define INPUTVSC_SEND_RING_BUFFER_SIZE (40 * 1024)
-#define INPUTVSC_RECV_RING_BUFFER_SIZE (40 * 1024)
+#define INPUTVSC_SEND_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
+#define INPUTVSC_RECV_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
enum pipe_prot_msg_type {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 74fc1df6e3c2..d69842f79fc6 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -727,6 +727,8 @@
#define USB_DEVICE_ID_LENOVO_TP10UBKBD 0x6062
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
+#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
+#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e
@@ -1123,6 +1125,7 @@
#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
+#define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 88e19996427e..9770db624bfa 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -797,7 +797,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x3b: /* Battery Strength */
hidinput_setup_battery(device, HID_INPUT_REPORT, field);
usage->type = EV_PWR;
- goto ignore;
+ return;
case 0x3c: /* Invert */
map_key_clear(BTN_TOOL_RUBBER);
@@ -1059,7 +1059,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case HID_DC_BATTERYSTRENGTH:
hidinput_setup_battery(device, HID_INPUT_REPORT, field);
usage->type = EV_PWR;
- goto ignore;
+ return;
}
goto unknown;
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 6c55682c5974..044a93f3c117 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -44,6 +44,10 @@ static const struct hid_device_id ite_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_SYNAPTICS,
USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
+ /* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_SYNAPTICS,
+ USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003) },
{ }
};
MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 38ee25a813b9..72fb6e54a50a 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -820,7 +820,7 @@ static void logi_dj_recv_queue_unknown_work(struct dj_receiver_dev *djrcv_dev)
{
struct dj_workitem workitem = { .type = WORKITEM_TYPE_UNKNOWN };
- /* Rate limit queries done because of unhandeled reports to 2/sec */
+ /* Rate limit queries done because of unhandled reports to 2/sec */
if (time_before(jiffies, djrcv_dev->last_query + HZ / 2))
return;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index e3152155c4b8..d670bcd57bde 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1973,6 +1973,18 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
+ /* Lenovo X1 TAB Gen 2 */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_LENOVO,
+ USB_DEVICE_ID_LENOVO_X1_TAB) },
+
+ /* Lenovo X1 TAB Gen 3 */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_LENOVO,
+ USB_DEVICE_ID_LENOVO_X1_TAB3) },
+
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
index 34da38d5b0cd..d6faa0e00f95 100644
--- a/drivers/hid/hid-picolcd_cir.c
+++ b/drivers/hid/hid-picolcd_cir.c
@@ -59,10 +59,10 @@ int picolcd_raw_cir(struct picolcd_data *data,
for (i = 0; i+1 < sz; i += 2) {
w = (raw_data[i] << 8) | (raw_data[i+1]);
rawir.pulse = !!(w & 0x8000);
- rawir.duration = US_TO_NS(rawir.pulse ? (65536 - w) : w);
+ rawir.duration = rawir.pulse ? (65536 - w) : w;
/* Quirk!! - see above */
- if (i == 0 && rawir.duration > 15000000)
- rawir.duration -= 15000000;
+ if (i == 0 && rawir.duration > 15000)
+ rawir.duration -= 15000;
ir_raw_event_store(data->rc_dev, &rawir);
}
ir_raw_event_handle(data->rc_dev);
@@ -114,8 +114,8 @@ int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
rdev->dev.parent = &data->hdev->dev;
rdev->driver_name = PICOLCD_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
- rdev->timeout = MS_TO_NS(100);
- rdev->rx_resolution = US_TO_NS(1);
+ rdev->timeout = MS_TO_US(100);
+ rdev->rx_resolution = 1;
ret = rc_register_device(rdev);
if (ret)
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 7f41213d5ae3..311eee599ce9 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -720,7 +720,7 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
if (data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS)
- rmi_hid_pdata.f30_data.disable = true;
+ rmi_hid_pdata.gpio_data.disable = true;
data->xport.dev = hdev->dev.parent;
data->xport.pdata = rmi_hid_pdata;
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 2ff4c8e366ff..1ca64481145e 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -294,31 +294,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0, difference, old_profile;
+ struct kone_settings *settings = (struct kone_settings *)buf;
/* I need to get my data in one piece */
if (off != 0 || count != sizeof(struct kone_settings))
return -EINVAL;
mutex_lock(&kone->kone_lock);
- difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings));
+ difference = memcmp(settings, &kone->settings,
+ sizeof(struct kone_settings));
if (difference) {
- retval = kone_set_settings(usb_dev,
- (struct kone_settings const *)buf);
- if (retval) {
- mutex_unlock(&kone->kone_lock);
- return retval;
+ if (settings->startup_profile < 1 ||
+ settings->startup_profile > 5) {
+ retval = -EINVAL;
+ goto unlock;
}
+ retval = kone_set_settings(usb_dev, settings);
+ if (retval)
+ goto unlock;
+
old_profile = kone->settings.startup_profile;
- memcpy(&kone->settings, buf, sizeof(struct kone_settings));
+ memcpy(&kone->settings, settings, sizeof(struct kone_settings));
kone_profile_activated(kone, kone->settings.startup_profile);
if (kone->settings.startup_profile != old_profile)
kone_profile_report(kone, kone->settings.startup_profile);
}
+unlock:
mutex_unlock(&kone->kone_lock);
+ if (retval)
+ return retval;
+
return sizeof(struct kone_settings);
}
static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c
new file mode 100644
index 000000000000..cd7ada48b1d9
--- /dev/null
+++ b/drivers/hid/hid-vivaldi.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HID support for Vivaldi Keyboard
+ *
+ * Copyright 2020 Google LLC.
+ * Author: Sean O'Brien <seobrien@chromium.org>
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#define MIN_FN_ROW_KEY 1
+#define MAX_FN_ROW_KEY 24
+#define HID_VD_FN_ROW_PHYSMAP 0x00000001
+#define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP)
+
+static struct hid_driver hid_vivaldi;
+
+struct vivaldi_data {
+ u32 function_row_physmap[MAX_FN_ROW_KEY - MIN_FN_ROW_KEY + 1];
+ int max_function_row_key;
+};
+
+static ssize_t function_row_physmap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
+ ssize_t size = 0;
+ int i;
+
+ if (!drvdata->max_function_row_key)
+ return 0;
+
+ for (i = 0; i < drvdata->max_function_row_key; i++)
+ size += sprintf(buf + size, "%02X ",
+ drvdata->function_row_physmap[i]);
+ size += sprintf(buf + size, "\n");
+ return size;
+}
+
+DEVICE_ATTR_RO(function_row_physmap);
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_function_row_physmap.attr,
+ NULL
+};
+
+static const struct attribute_group input_attribute_group = {
+ .attrs = sysfs_attrs
+};
+
+static int vivaldi_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct vivaldi_data *drvdata;
+ int ret;
+
+ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ hid_set_drvdata(hdev, drvdata);
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static void vivaldi_feature_mapping(struct hid_device *hdev,
+ struct hid_field *field,
+ struct hid_usage *usage)
+{
+ struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
+ int fn_key;
+ int ret;
+ u32 report_len;
+ u8 *buf;
+
+ if (field->logical != HID_USAGE_FN_ROW_PHYSMAP ||
+ (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL)
+ return;
+
+ fn_key = (usage->hid & HID_USAGE);
+ if (fn_key < MIN_FN_ROW_KEY || fn_key > MAX_FN_ROW_KEY)
+ return;
+ if (fn_key > drvdata->max_function_row_key)
+ drvdata->max_function_row_key = fn_key;
+
+ buf = hid_alloc_report_buf(field->report, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ report_len = hid_report_len(field->report);
+ ret = hid_hw_raw_request(hdev, field->report->id, buf,
+ report_len, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0) {
+ dev_warn(&hdev->dev, "failed to fetch feature %d\n",
+ field->report->id);
+ goto out;
+ }
+
+ ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, buf,
+ report_len, 0);
+ if (ret) {
+ dev_warn(&hdev->dev, "failed to report feature %d\n",
+ field->report->id);
+ goto out;
+ }
+
+ drvdata->function_row_physmap[fn_key - MIN_FN_ROW_KEY] =
+ field->value[usage->usage_index];
+
+out:
+ kfree(buf);
+}
+
+static int vivaldi_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
+{
+ return sysfs_create_group(&hdev->dev.kobj, &input_attribute_group);
+}
+
+static const struct hid_device_id vivaldi_table[] = {
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_VIVALDI, HID_ANY_ID,
+ HID_ANY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, vivaldi_table);
+
+static struct hid_driver hid_vivaldi = {
+ .name = "hid-vivaldi",
+ .id_table = vivaldi_table,
+ .probe = vivaldi_probe,
+ .feature_mapping = vivaldi_feature_mapping,
+ .input_configured = vivaldi_input_configured,
+};
+
+module_hid_driver(hid_vivaldi);
+
+MODULE_AUTHOR("Sean O'Brien");
+MODULE_DESCRIPTION("HID vivaldi driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index e484c3618dec..41012681cafd 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1586,7 +1586,7 @@ struct wiiproto_handler {
void (*func)(struct wiimote_data *wdata, const __u8 *payload);
};
-static struct wiiproto_handler handlers[] = {
+static const struct wiiproto_handler handlers[] = {
{ .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
{ .id = WIIPROTO_REQ_STATUS, .size = 2, .func = handler_status_K },
{ .id = WIIPROTO_REQ_DATA, .size = 21, .func = handler_data },
@@ -1618,19 +1618,19 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
struct wiimote_data *wdata = hid_get_drvdata(hdev);
- struct wiiproto_handler *h;
+ const struct wiiproto_handler *h;
int i;
unsigned long flags;
if (size < 1)
return -EINVAL;
- spin_lock_irqsave(&wdata->state.lock, flags);
-
for (i = 0; handlers[i].id; ++i) {
h = &handlers[i];
if (h->id == raw_data[0] && h->size < size) {
+ spin_lock_irqsave(&wdata->state.lock, flags);
h->func(wdata, &raw_data[1]);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
break;
}
}
@@ -1639,8 +1639,6 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
hid_warn(hdev, "Unhandled report %hhu size %d\n", raw_data[0],
size);
- spin_unlock_irqrestore(&wdata->state.lock, flags);
-
return 0;
}
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index dbd04492825d..786e3e9af1c9 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -323,7 +323,7 @@ static int i2c_hid_get_report(struct i2c_client *client, u8 reportType,
* @reportType: 0x03 for HID_FEATURE_REPORT ; 0x02 for HID_OUTPUT_REPORT
* @reportID: the report ID
* @buf: the actual data to transfer, without the report ID
- * @len: size of buf
+ * @data_len: size of buf
* @use_data: true: use SET_REPORT HID command, false: send plain OUTPUT report
*/
static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
@@ -935,6 +935,14 @@ static void i2c_hid_acpi_fix_up_power(struct device *dev)
acpi_device_fix_up_power(adev);
}
+static void i2c_hid_acpi_enable_wakeup(struct device *dev)
+{
+ if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
+ device_set_wakeup_capable(dev, true);
+ device_set_wakeup_enable(dev, false);
+ }
+}
+
static const struct acpi_device_id i2c_hid_acpi_match[] = {
{"ACPI0C50", 0 },
{"PNP0C50", 0 },
@@ -949,6 +957,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
}
static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
+
+static inline void i2c_hid_acpi_enable_wakeup(struct device *dev) {}
#endif
#ifdef CONFIG_OF
@@ -1076,6 +1086,8 @@ static int i2c_hid_probe(struct i2c_client *client,
i2c_hid_acpi_fix_up_power(&client->dev);
+ i2c_hid_acpi_enable_wakeup(&client->dev);
+
device_enable_async_suspend(&client->dev);
/* Make sure there is something at this address */
@@ -1268,6 +1280,7 @@ static struct i2c_driver i2c_hid_driver = {
.driver = {
.name = "i2c_hid",
.pm = &i2c_hid_pm,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
.of_match_table = of_match_ptr(i2c_hid_of_match),
},
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 8f8dfdf64833..a45ac7fa417b 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -755,7 +755,7 @@ static int _ish_hw_reset(struct ishtp_device *dev)
csr |= PCI_D3hot;
pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
- mdelay(pdev->d3_delay);
+ mdelay(pdev->d3hot_delay);
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D0;
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index c47c3328a0f4..bba29cd36d29 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -502,8 +502,6 @@ static void ishtp_bus_remove_device(struct ishtp_cl_device *device)
int ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
struct module *owner)
{
- int err;
-
if (!ishtp_device_ready)
return -ENODEV;
@@ -511,11 +509,7 @@ int ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
driver->driver.owner = owner;
driver->driver.bus = &ishtp_cl_bus_type;
- err = driver_register(&driver->driver);
- if (err)
- return err;
-
- return 0;
+ return driver_register(&driver->driver);
}
EXPORT_SYMBOL(ishtp_cl_driver_register);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 83dfec327c42..1bd0eb71559c 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2773,7 +2773,9 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo
if (report->type != HID_INPUT_REPORT)
return -1;
- if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ if (WACOM_PAD_FIELD(field))
+ return 0;
+ else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
wacom_wac_pen_report(hdev, report);
else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
wacom_wac_finger_report(hdev, report);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 3ebda7707e46..fbdda9938039 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -22,20 +22,97 @@
#include "hyperv_vmbus.h"
-#define NUM_PAGES_SPANNED(addr, len) \
-((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
+/*
+ * hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
+ *
+ * For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
+ *
+ * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
+ * (because of the alignment requirement), however, the hypervisor only
+ * uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
+ * (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
+ * ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
+ * total size that the guest uses minus twice of the gap size.
+ */
+static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
+{
+ switch (type) {
+ case HV_GPADL_BUFFER:
+ return size;
+ case HV_GPADL_RING:
+ /* The size of a ringbuffer must be page-aligned */
+ BUG_ON(size % PAGE_SIZE);
+ /*
+ * Two things to notice here:
+ * 1) We're processing two ring buffers as a unit
+ * 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
+ * the first guest-size page of each of the two ring buffers.
+ * So we effectively subtract out two guest-size pages, and add
+ * back two Hyper-V size pages.
+ */
+ return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
+ }
+ BUG();
+ return 0;
+}
-static unsigned long virt_to_hvpfn(void *addr)
+/*
+ * hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
+ * HV_HYP_PAGE) in a ring gpadl based on the
+ * offset in the guest
+ *
+ * @offset: the offset (in bytes) where the send ringbuffer starts in the
+ * virtual address space of the guest
+ */
+static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
{
- phys_addr_t paddr;
- if (is_vmalloc_addr(addr))
- paddr = page_to_phys(vmalloc_to_page(addr)) +
- offset_in_page(addr);
- else
- paddr = __pa(addr);
+ /*
+ * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
+ * header (because of the alignment requirement), however, the
+ * hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
+ * therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
+ *
+ * And to calculate the effective send offset in gpadl, we need to
+ * substract this gap.
+ */
+ return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
+}
- return paddr >> PAGE_SHIFT;
+/*
+ * hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
+ * the gpadl
+ *
+ * @type: the type of the gpadl
+ * @kbuffer: the pointer to the gpadl in the guest
+ * @size: the total size (in bytes) of the gpadl
+ * @send_offset: the offset (in bytes) where the send ringbuffer starts in the
+ * virtual address space of the guest
+ * @i: the index
+ */
+static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
+ u32 size, u32 send_offset, int i)
+{
+ int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
+ unsigned long delta = 0UL;
+
+ switch (type) {
+ case HV_GPADL_BUFFER:
+ break;
+ case HV_GPADL_RING:
+ if (i == 0)
+ delta = 0;
+ else if (i <= send_idx)
+ delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
+ else
+ delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
}
/*
@@ -112,160 +189,6 @@ int vmbus_alloc_ring(struct vmbus_channel *newchannel,
}
EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
-static int __vmbus_open(struct vmbus_channel *newchannel,
- void *userdata, u32 userdatalen,
- void (*onchannelcallback)(void *context), void *context)
-{
- struct vmbus_channel_open_channel *open_msg;
- struct vmbus_channel_msginfo *open_info = NULL;
- struct page *page = newchannel->ringbuffer_page;
- u32 send_pages, recv_pages;
- unsigned long flags;
- int err;
-
- if (userdatalen > MAX_USER_DEFINED_BYTES)
- return -EINVAL;
-
- send_pages = newchannel->ringbuffer_send_offset;
- recv_pages = newchannel->ringbuffer_pagecount - send_pages;
-
- if (newchannel->state != CHANNEL_OPEN_STATE)
- return -EINVAL;
-
- newchannel->state = CHANNEL_OPENING_STATE;
- newchannel->onchannel_callback = onchannelcallback;
- newchannel->channel_callback_context = context;
-
- err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
- if (err)
- goto error_clean_ring;
-
- err = hv_ringbuffer_init(&newchannel->inbound,
- &page[send_pages], recv_pages);
- if (err)
- goto error_clean_ring;
-
- /* Establish the gpadl for the ring buffer */
- newchannel->ringbuffer_gpadlhandle = 0;
-
- err = vmbus_establish_gpadl(newchannel,
- page_address(newchannel->ringbuffer_page),
- (send_pages + recv_pages) << PAGE_SHIFT,
- &newchannel->ringbuffer_gpadlhandle);
- if (err)
- goto error_clean_ring;
-
- /* Create and init the channel open message */
- open_info = kmalloc(sizeof(*open_info) +
- sizeof(struct vmbus_channel_open_channel),
- GFP_KERNEL);
- if (!open_info) {
- err = -ENOMEM;
- goto error_free_gpadl;
- }
-
- init_completion(&open_info->waitevent);
- open_info->waiting_channel = newchannel;
-
- open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
- open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
- open_msg->openid = newchannel->offermsg.child_relid;
- open_msg->child_relid = newchannel->offermsg.child_relid;
- open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
- open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
- open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
-
- if (userdatalen)
- memcpy(open_msg->userdata, userdata, userdatalen);
-
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_add_tail(&open_info->msglistentry,
- &vmbus_connection.chn_msg_list);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- if (newchannel->rescind) {
- err = -ENODEV;
- goto error_free_info;
- }
-
- err = vmbus_post_msg(open_msg,
- sizeof(struct vmbus_channel_open_channel), true);
-
- trace_vmbus_open(open_msg, err);
-
- if (err != 0)
- goto error_clean_msglist;
-
- wait_for_completion(&open_info->waitevent);
-
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&open_info->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
- if (newchannel->rescind) {
- err = -ENODEV;
- goto error_free_info;
- }
-
- if (open_info->response.open_result.status) {
- err = -EAGAIN;
- goto error_free_info;
- }
-
- newchannel->state = CHANNEL_OPENED_STATE;
- kfree(open_info);
- return 0;
-
-error_clean_msglist:
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
- list_del(&open_info->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-error_free_info:
- kfree(open_info);
-error_free_gpadl:
- vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
- newchannel->ringbuffer_gpadlhandle = 0;
-error_clean_ring:
- hv_ringbuffer_cleanup(&newchannel->outbound);
- hv_ringbuffer_cleanup(&newchannel->inbound);
- newchannel->state = CHANNEL_OPEN_STATE;
- return err;
-}
-
-/*
- * vmbus_connect_ring - Open the channel but reuse ring buffer
- */
-int vmbus_connect_ring(struct vmbus_channel *newchannel,
- void (*onchannelcallback)(void *context), void *context)
-{
- return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
-}
-EXPORT_SYMBOL_GPL(vmbus_connect_ring);
-
-/*
- * vmbus_open - Open the specified channel.
- */
-int vmbus_open(struct vmbus_channel *newchannel,
- u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
- void *userdata, u32 userdatalen,
- void (*onchannelcallback)(void *context), void *context)
-{
- int err;
-
- err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
- recv_ringbuffer_size);
- if (err)
- return err;
-
- err = __vmbus_open(newchannel, userdata, userdatalen,
- onchannelcallback, context);
- if (err)
- vmbus_free_ring(newchannel);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(vmbus_open);
-
/* Used for Hyper-V Socket: a guest client's connect() to the host */
int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
const guid_t *shv_host_servie_id)
@@ -317,7 +240,8 @@ EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
/*
* create_gpadl_header - Creates a gpadl for the specified buffer
*/
-static int create_gpadl_header(void *kbuffer, u32 size,
+static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
+ u32 size, u32 send_offset,
struct vmbus_channel_msginfo **msginfo)
{
int i;
@@ -330,7 +254,7 @@ static int create_gpadl_header(void *kbuffer, u32 size,
int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
- pagecount = size >> PAGE_SHIFT;
+ pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
/* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
@@ -357,10 +281,10 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range_buflen = sizeof(struct gpa_range) +
pagecount * sizeof(u64);
gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = size;
+ gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
- kbuffer + PAGE_SIZE * i);
+ gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+ type, kbuffer, size, send_offset, i);
*msginfo = msgheader;
pfnsum = pfncount;
@@ -411,8 +335,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
* so the hypervisor guarantees that this is ok.
*/
for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = virt_to_hvpfn(
- kbuffer + PAGE_SIZE * (pfnsum + i));
+ gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
+ kbuffer, size, send_offset, pfnsum + i);
/* add to msg header */
list_add_tail(&msgbody->msglistentry,
@@ -438,10 +362,10 @@ static int create_gpadl_header(void *kbuffer, u32 size,
gpadl_header->range_buflen = sizeof(struct gpa_range) +
pagecount * sizeof(u64);
gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = size;
+ gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
- kbuffer + PAGE_SIZE * i);
+ gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+ type, kbuffer, size, send_offset, i);
*msginfo = msgheader;
}
@@ -454,15 +378,20 @@ nomem:
}
/*
- * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
+ * __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
*
* @channel: a channel
+ * @type: the type of the corresponding GPADL, only meaningful for the guest.
* @kbuffer: from kmalloc or vmalloc
* @size: page-size multiple
+ * @send_offset: the offset (in bytes) where the send ring buffer starts,
+ * should be 0 for BUFFER type gpadl
* @gpadl_handle: some funky thing
*/
-int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
- u32 size, u32 *gpadl_handle)
+static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
+ enum hv_gpadl_type type, void *kbuffer,
+ u32 size, u32 send_offset,
+ u32 *gpadl_handle)
{
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
@@ -476,7 +405,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
next_gpadl_handle =
(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
- ret = create_gpadl_header(kbuffer, size, &msginfo);
+ ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
if (ret)
return ret;
@@ -557,8 +486,184 @@ cleanup:
kfree(msginfo);
return ret;
}
+
+/*
+ * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
+ *
+ * @channel: a channel
+ * @kbuffer: from kmalloc or vmalloc
+ * @size: page-size multiple
+ * @gpadl_handle: some funky thing
+ */
+int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+ u32 size, u32 *gpadl_handle)
+{
+ return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
+ 0U, gpadl_handle);
+}
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
+static int __vmbus_open(struct vmbus_channel *newchannel,
+ void *userdata, u32 userdatalen,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ struct vmbus_channel_open_channel *open_msg;
+ struct vmbus_channel_msginfo *open_info = NULL;
+ struct page *page = newchannel->ringbuffer_page;
+ u32 send_pages, recv_pages;
+ unsigned long flags;
+ int err;
+
+ if (userdatalen > MAX_USER_DEFINED_BYTES)
+ return -EINVAL;
+
+ send_pages = newchannel->ringbuffer_send_offset;
+ recv_pages = newchannel->ringbuffer_pagecount - send_pages;
+
+ if (newchannel->state != CHANNEL_OPEN_STATE)
+ return -EINVAL;
+
+ newchannel->state = CHANNEL_OPENING_STATE;
+ newchannel->onchannel_callback = onchannelcallback;
+ newchannel->channel_callback_context = context;
+
+ err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
+ if (err)
+ goto error_clean_ring;
+
+ err = hv_ringbuffer_init(&newchannel->inbound,
+ &page[send_pages], recv_pages);
+ if (err)
+ goto error_clean_ring;
+
+ /* Establish the gpadl for the ring buffer */
+ newchannel->ringbuffer_gpadlhandle = 0;
+
+ err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
+ page_address(newchannel->ringbuffer_page),
+ (send_pages + recv_pages) << PAGE_SHIFT,
+ newchannel->ringbuffer_send_offset << PAGE_SHIFT,
+ &newchannel->ringbuffer_gpadlhandle);
+ if (err)
+ goto error_clean_ring;
+
+ /* Create and init the channel open message */
+ open_info = kmalloc(sizeof(*open_info) +
+ sizeof(struct vmbus_channel_open_channel),
+ GFP_KERNEL);
+ if (!open_info) {
+ err = -ENOMEM;
+ goto error_free_gpadl;
+ }
+
+ init_completion(&open_info->waitevent);
+ open_info->waiting_channel = newchannel;
+
+ open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
+ open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
+ open_msg->openid = newchannel->offermsg.child_relid;
+ open_msg->child_relid = newchannel->offermsg.child_relid;
+ open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
+ /*
+ * The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
+ * the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
+ * here we calculate it into HV_HYP_PAGE.
+ */
+ open_msg->downstream_ringbuffer_pageoffset =
+ hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
+ open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
+
+ if (userdatalen)
+ memcpy(open_msg->userdata, userdata, userdatalen);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_add_tail(&open_info->msglistentry,
+ &vmbus_connection.chn_msg_list);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (newchannel->rescind) {
+ err = -ENODEV;
+ goto error_free_info;
+ }
+
+ err = vmbus_post_msg(open_msg,
+ sizeof(struct vmbus_channel_open_channel), true);
+
+ trace_vmbus_open(open_msg, err);
+
+ if (err != 0)
+ goto error_clean_msglist;
+
+ wait_for_completion(&open_info->waitevent);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (newchannel->rescind) {
+ err = -ENODEV;
+ goto error_free_info;
+ }
+
+ if (open_info->response.open_result.status) {
+ err = -EAGAIN;
+ goto error_free_info;
+ }
+
+ newchannel->state = CHANNEL_OPENED_STATE;
+ kfree(open_info);
+ return 0;
+
+error_clean_msglist:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+error_free_info:
+ kfree(open_info);
+error_free_gpadl:
+ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
+ newchannel->ringbuffer_gpadlhandle = 0;
+error_clean_ring:
+ hv_ringbuffer_cleanup(&newchannel->outbound);
+ hv_ringbuffer_cleanup(&newchannel->inbound);
+ newchannel->state = CHANNEL_OPEN_STATE;
+ return err;
+}
+
+/*
+ * vmbus_connect_ring - Open the channel but reuse ring buffer
+ */
+int vmbus_connect_ring(struct vmbus_channel *newchannel,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
+}
+EXPORT_SYMBOL_GPL(vmbus_connect_ring);
+
+/*
+ * vmbus_open - Open the specified channel.
+ */
+int vmbus_open(struct vmbus_channel *newchannel,
+ u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
+ void *userdata, u32 userdatalen,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ int err;
+
+ err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
+ recv_ringbuffer_size);
+ if (err)
+ return err;
+
+ err = __vmbus_open(newchannel, userdata, userdatalen,
+ onchannelcallback, context);
+ if (err)
+ vmbus_free_ring(newchannel);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(vmbus_open);
+
/*
* vmbus_teardown_gpadl -Teardown the specified GPADL handle
*/
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 75a8638ff68b..f202ac7f4b3d 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -165,7 +165,7 @@ void hv_synic_enable_regs(unsigned int cpu)
hv_get_simp(simp.as_uint64);
simp.simp_enabled = 1;
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
- >> PAGE_SHIFT;
+ >> HV_HYP_PAGE_SHIFT;
hv_set_simp(simp.as_uint64);
@@ -173,14 +173,14 @@ void hv_synic_enable_regs(unsigned int cpu)
hv_get_siefp(siefp.as_uint64);
siefp.siefp_enabled = 1;
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
- >> PAGE_SHIFT;
+ >> HV_HYP_PAGE_SHIFT;
hv_set_siefp(siefp.as_uint64);
/* Setup the shared SINT. */
hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
- shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
+ shared_sint.vector = hv_get_vector();
shared_sint.masked = false;
shared_sint.auto_eoi = hv_recommend_using_aeoi();
hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 0f50295d0214..eb56e09ae15f 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -726,7 +726,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
- (HA_CHUNK << PAGE_SHIFT));
+ (HA_CHUNK << PAGE_SHIFT), MEMHP_MERGE_RESOURCE);
if (ret) {
pr_err("hot_add memory failed error is %d\n", ret);
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index a4e8d96513c2..05566ecdbe4b 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -500,6 +500,9 @@ static void heartbeat_onchannelcallback(void *context)
}
}
+#define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
+#define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
+
static int util_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
@@ -530,8 +533,8 @@ static int util_probe(struct hv_device *dev,
hv_set_drvdata(dev, srv);
- ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
- 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
+ ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
+ HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
dev->channel);
if (ret)
goto error;
@@ -590,8 +593,8 @@ static int util_resume(struct hv_device *dev)
return ret;
}
- ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
- 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
+ ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
+ HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
dev->channel);
return ret;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 946d0aba101f..4fad3e6745e5 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -48,6 +48,10 @@ static int hyperv_cpuhp_online;
static void *hv_panic_page;
+/* Values parsed from ACPI DSDT */
+static int vmbus_irq;
+int vmbus_interrupt;
+
/*
* Boolean to control whether to report panic messages over Hyper-V.
*
@@ -83,7 +87,7 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
void *args)
{
- struct die_args *die = (struct die_args *)args;
+ struct die_args *die = args;
struct pt_regs *regs = die->regs;
/* Don't notify Hyper-V if the die event is other than oops */
@@ -1347,7 +1351,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
- add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
+ add_interrupt_randomness(hv_get_vector(), 0);
}
/*
@@ -1430,7 +1434,9 @@ static int vmbus_bus_init(void)
if (ret)
return ret;
- hv_setup_vmbus_irq(vmbus_isr);
+ ret = hv_setup_vmbus_irq(vmbus_irq, vmbus_isr);
+ if (ret)
+ goto err_setup;
ret = hv_synic_alloc();
if (ret)
@@ -1505,7 +1511,7 @@ err_cpuhp:
hv_synic_free();
err_alloc:
hv_remove_vmbus_irq();
-
+err_setup:
bus_unregister(&hv_bus);
unregister_sysctl_table(hv_ctl_table_hdr);
hv_ctl_table_hdr = NULL;
@@ -2070,6 +2076,7 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
struct resource *new_res;
struct resource **old_res = &hyperv_mmio;
struct resource **prev_res = NULL;
+ struct resource r;
switch (res->type) {
@@ -2088,6 +2095,23 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
end = res->data.address64.address.maximum;
break;
+ /*
+ * The IRQ information is needed only on ARM64, which Hyper-V
+ * sets up in the extended format. IRQ information is present
+ * on x86/x64 in the non-extended format but it is not used by
+ * Linux. So don't bother checking for the non-extended format.
+ */
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ if (!acpi_dev_resource_interrupt(res, 0, &r)) {
+ pr_err("Unable to parse Hyper-V ACPI interrupt\n");
+ return AE_ERROR;
+ }
+ /* ARM64 INTID for VMbus */
+ vmbus_interrupt = res->data.extended_irq.interrupts[0];
+ /* Linux IRQ number */
+ vmbus_irq = r.start;
+ return AE_OK;
+
default:
/* Unused resource type */
return AE_OK;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 8dc28b26916e..a850e4f0e0bd 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1080,7 +1080,7 @@ config SENSORS_MCP3021
will be called mcp3021.
config SENSORS_MLXREG_FAN
- tristate "Mellanox Mellanox FAN driver"
+ tristate "Mellanox FAN driver"
depends on MELLANOX_PLATFORM
imply THERMAL
select REGMAP
@@ -1112,6 +1112,16 @@ config SENSORS_MENF21BMC_HWMON
This driver can also be built as a module. If so the module
will be called menf21bmc_hwmon.
+config SENSORS_MR75203
+ tristate "Moortec Semiconductor MR75203 PVT Controller"
+ select REGMAP_MMIO
+ help
+ If you say yes here you get support for Moortec MR75203
+ PVT controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called mr75203.
+
config SENSORS_ADCXX
tristate "National Semiconductor ADCxxxSxxx"
depends on SPI_MASTER
@@ -1479,6 +1489,16 @@ config SENSORS_RASPBERRYPI_HWMON
This driver can also be built as a module. If so, the module
will be called raspberrypi-hwmon.
+config SENSORS_SL28CPLD
+ tristate "Kontron sl28cpld hardware monitoring driver"
+ depends on MFD_SL28CPLD || COMPILE_TEST
+ help
+ If you say yes here you get support for the fan supervisor of the
+ sl28cpld board management controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called sl28cpld-hwmon.
+
config SENSORS_SHT15
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
depends on GPIOLIB || COMPILE_TEST
@@ -2064,6 +2084,17 @@ config SENSORS_XGENE
If you say yes here you get support for the temperature
and power sensors for APM X-Gene SoC.
+config SENSORS_INTEL_M10_BMC_HWMON
+ tristate "Intel MAX10 BMC Hardware Monitoring"
+ depends on MFD_INTEL_M10_BMC
+ help
+ This driver provides support for the hardware monitoring functionality
+ on Intel MAX10 BMC chip.
+
+ This BMC Chip is used on Intel FPGA PCIe Acceleration Cards (PAC). Its
+ sensors monitor various telemetry data of different components on the
+ card, e.g. board temperature, FPGA core temperature/voltage/current.
+
if ACPI
comment "ACPI drivers"
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index a8f4b35b136b..9db2903b61e5 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -90,6 +90,7 @@ obj-$(CONFIG_SENSORS_IIO_HWMON) += iio_hwmon.o
obj-$(CONFIG_SENSORS_INA209) += ina209.o
obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o
obj-$(CONFIG_SENSORS_INA3221) += ina3221.o
+obj-$(CONFIG_SENSORS_INTEL_M10_BMC_HWMON) += intel-m10-bmc-hwmon.o
obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
@@ -142,6 +143,7 @@ obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
obj-$(CONFIG_SENSORS_TC654) += tc654.o
obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
+obj-$(CONFIG_SENSORS_MR75203) += mr75203.o
obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
obj-$(CONFIG_SENSORS_NCT7802) += nct7802.o
@@ -159,6 +161,7 @@ obj-$(CONFIG_SENSORS_S3C) += s3c-hwmon.o
obj-$(CONFIG_SENSORS_SCH56XX_COMMON)+= sch56xx-common.o
obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o
obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o
+obj-$(CONFIG_SENSORS_SL28CPLD) += sl28cpld-hwmon.o
obj-$(CONFIG_SENSORS_SHT15) += sht15.o
obj-$(CONFIG_SENSORS_SHT21) += sht21.o
obj-$(CONFIG_SENSORS_SHT3x) += sht3x.o
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index a529f2efc790..6a765755d061 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -169,8 +169,7 @@ static struct attribute *ad7414_attrs[] = {
ATTRIBUTE_GROUPS(ad7414);
-static int ad7414_probe(struct i2c_client *client,
- const struct i2c_device_id *dev_id)
+static int ad7414_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ad7414_data *data;
@@ -222,7 +221,7 @@ static struct i2c_driver ad7414_driver = {
.name = "ad7414",
.of_match_table = of_match_ptr(ad7414_of_match),
},
- .probe = ad7414_probe,
+ .probe_new = ad7414_probe,
.id_table = ad7414_id,
};
diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c
index 74542b8ad8ef..d618f6b2f382 100644
--- a/drivers/hwmon/ad7418.c
+++ b/drivers/hwmon/ad7418.c
@@ -230,8 +230,9 @@ static void ad7418_init_client(struct i2c_client *client)
}
}
-static int ad7418_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id ad7418_id[];
+
+static int ad7418_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct i2c_adapter *adapter = client->adapter;
@@ -254,7 +255,7 @@ static int ad7418_probe(struct i2c_client *client,
if (dev->of_node)
data->type = (enum chips)of_device_get_match_data(dev);
else
- data->type = id->driver_data;
+ data->type = i2c_match_id(ad7418_id, client)->driver_data;
switch (data->type) {
case ad7416:
@@ -305,7 +306,7 @@ static struct i2c_driver ad7418_driver = {
.name = "ad7418",
.of_match_table = ad7418_dt_ids,
},
- .probe = ad7418_probe,
+ .probe_new = ad7418_probe,
.id_table = ad7418_id,
};
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 571d5454c6b2..6c9a906631b8 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -427,8 +427,7 @@ static int adc128_init_client(struct adc128_data *data)
return 0;
}
-static int adc128_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adc128_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct regulator *regulator;
@@ -524,7 +523,7 @@ static struct i2c_driver adc128_driver = {
.name = "adc128d818",
.of_match_table = of_match_ptr(adc128_of_match),
},
- .probe = adc128_probe,
+ .probe_new = adc128_probe,
.remove = adc128_remove,
.id_table = adc128_id,
.detect = adc128_detect,
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index c45046241a1c..71deb2cd20f5 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -425,8 +425,9 @@ static void adm1021_init_client(struct i2c_client *client)
i2c_smbus_write_byte_data(client, ADM1021_REG_CONV_RATE_W, 0x04);
}
-static int adm1021_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id adm1021_id[];
+
+static int adm1021_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct adm1021_data *data;
@@ -437,7 +438,7 @@ static int adm1021_probe(struct i2c_client *client,
return -ENOMEM;
data->client = client;
- data->type = id->driver_data;
+ data->type = i2c_match_id(adm1021_id, client)->driver_data;
mutex_init(&data->update_lock);
/* Initialize the ADM1021 chip */
@@ -472,7 +473,7 @@ static struct i2c_driver adm1021_driver = {
.driver = {
.name = "adm1021",
},
- .probe = adm1021_probe,
+ .probe_new = adm1021_probe,
.id_table = adm1021_id,
.detect = adm1021_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index ed15185fa60f..de51e01c061b 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -517,8 +517,7 @@ static void adm1025_init_client(struct i2c_client *client)
(reg&0x7E)|0x01);
}
-static int adm1025_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adm1025_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -560,7 +559,7 @@ static struct i2c_driver adm1025_driver = {
.driver = {
.name = "adm1025",
},
- .probe = adm1025_probe,
+ .probe_new = adm1025_probe,
.id_table = adm1025_id,
.detect = adm1025_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index af77096724fd..49cefbadb156 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -1816,8 +1816,7 @@ static void adm1026_init_client(struct i2c_client *client)
}
}
-static int adm1026_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adm1026_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -1860,7 +1859,7 @@ static struct i2c_driver adm1026_driver = {
.driver = {
.name = "adm1026",
},
- .probe = adm1026_probe,
+ .probe_new = adm1026_probe,
.id_table = adm1026_id,
.detect = adm1026_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index f7752a5bef31..50b1df7b008c 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -352,8 +352,7 @@ static int adm1029_init_client(struct i2c_client *client)
return 1;
}
-static int adm1029_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adm1029_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct adm1029_data *data;
@@ -390,7 +389,7 @@ static struct i2c_driver adm1029_driver = {
.driver = {
.name = "adm1029",
},
- .probe = adm1029_probe,
+ .probe_new = adm1029_probe,
.id_table = adm1029_id,
.detect = adm1029_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 7723a338446d..b538ace2d292 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -1022,8 +1022,9 @@ static void adm1031_init_client(struct i2c_client *client)
data->update_interval = update_intervals[i];
}
-static int adm1031_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id adm1031_id[];
+
+static int adm1031_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -1035,7 +1036,7 @@ static int adm1031_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
data->client = client;
- data->chip_type = id->driver_data;
+ data->chip_type = i2c_match_id(adm1031_id, client)->driver_data;
mutex_init(&data->update_lock);
if (data->chip_type == adm1030)
@@ -1068,7 +1069,7 @@ static struct i2c_driver adm1031_driver = {
.driver = {
.name = "adm1031",
},
- .probe = adm1031_probe,
+ .probe_new = adm1031_probe,
.id_table = adm1031_id,
.detect = adm1031_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c
index d314223a404a..6e8bb661894b 100644
--- a/drivers/hwmon/adm1177.c
+++ b/drivers/hwmon/adm1177.c
@@ -196,8 +196,7 @@ static void adm1177_remove(void *data)
regulator_disable(st->reg);
}
-static int adm1177_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adm1177_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -277,7 +276,7 @@ static struct i2c_driver adm1177_driver = {
.name = "adm1177",
.of_match_table = adm1177_dt_ids,
},
- .probe = adm1177_probe,
+ .probe_new = adm1177_probe,
.id_table = adm1177_id,
};
module_i2c_driver(adm1177_driver);
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 496d47490e10..cc3e0184e720 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -38,6 +38,7 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
+#include <linux/regmap.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
@@ -123,6 +124,7 @@ static inline unsigned int AOUT_FROM_REG(u8 reg)
/* per client data */
struct adm9240_data {
struct i2c_client *client;
+ struct regmap *regmap;
struct mutex update_lock;
char valid;
unsigned long last_updated_measure;
@@ -143,68 +145,141 @@ struct adm9240_data {
};
/* write new fan div, callers must hold data->update_lock */
-static void adm9240_write_fan_div(struct i2c_client *client, int nr,
+static int adm9240_write_fan_div(struct adm9240_data *data, int nr,
u8 fan_div)
{
- u8 reg, old, shift = (nr + 2) * 2;
+ unsigned int reg, old, shift = (nr + 2) * 2;
+ int err;
- reg = i2c_smbus_read_byte_data(client, ADM9240_REG_VID_FAN_DIV);
+ err = regmap_read(data->regmap, ADM9240_REG_VID_FAN_DIV, &reg);
+ if (err < 0)
+ return err;
old = (reg >> shift) & 3;
reg &= ~(3 << shift);
reg |= (fan_div << shift);
- i2c_smbus_write_byte_data(client, ADM9240_REG_VID_FAN_DIV, reg);
- dev_dbg(&client->dev,
+ err = regmap_write(data->regmap, ADM9240_REG_VID_FAN_DIV, reg);
+ if (err < 0)
+ return err;
+ dev_dbg(&data->client->dev,
"fan%d clock divider changed from %u to %u\n",
nr + 1, 1 << old, 1 << fan_div);
+
+ return 0;
+}
+
+static int adm9240_update_measure(struct adm9240_data *data)
+{
+ unsigned int val;
+ u8 regs[2];
+ int err;
+ int i;
+
+ err = regmap_bulk_read(data->regmap, ADM9240_REG_IN(0), &data->in[0], 6);
+ if (err < 0)
+ return err;
+ err = regmap_bulk_read(data->regmap, ADM9240_REG_INT(0), &regs, 2);
+ if (err < 0)
+ return err;
+
+ data->alarms = regs[0] | regs[1] << 8;
+
+ /*
+ * read temperature: assume temperature changes less than
+ * 0.5'C per two measurement cycles thus ignore possible
+ * but unlikely aliasing error on lsb reading. --Grant
+ */
+ err = regmap_read(data->regmap, ADM9240_REG_TEMP, &val);
+ if (err < 0)
+ return err;
+ data->temp = val << 8;
+ err = regmap_read(data->regmap, ADM9240_REG_TEMP_CONF, &val);
+ if (err < 0)
+ return err;
+ data->temp |= val;
+
+ err = regmap_bulk_read(data->regmap, ADM9240_REG_FAN(0),
+ &data->fan[0], 2);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < 2; i++) { /* read fans */
+ /* adjust fan clock divider on overflow */
+ if (data->valid && data->fan[i] == 255 &&
+ data->fan_div[i] < 3) {
+
+ err = adm9240_write_fan_div(data, i,
+ ++data->fan_div[i]);
+ if (err < 0)
+ return err;
+
+ /* adjust fan_min if active, but not to 0 */
+ if (data->fan_min[i] < 255 &&
+ data->fan_min[i] >= 2)
+ data->fan_min[i] /= 2;
+ }
+ }
+
+ return 0;
+}
+
+static int adm9240_update_config(struct adm9240_data *data)
+{
+ unsigned int val;
+ int i;
+ int err;
+
+ for (i = 0; i < 6; i++) {
+ err = regmap_raw_read(data->regmap, ADM9240_REG_IN_MIN(i),
+ &data->in_min[i], 1);
+ if (err < 0)
+ return err;
+ err = regmap_raw_read(data->regmap, ADM9240_REG_IN_MAX(i),
+ &data->in_max[i], 1);
+ if (err < 0)
+ return err;
+ }
+ err = regmap_bulk_read(data->regmap, ADM9240_REG_FAN_MIN(0),
+ &data->fan_min[0], 2);
+ if (err < 0)
+ return err;
+ err = regmap_bulk_read(data->regmap, ADM9240_REG_TEMP_MAX(0),
+ &data->temp_max[0], 2);
+ if (err < 0)
+ return err;
+
+ /* read fan divs and 5-bit VID */
+ err = regmap_read(data->regmap, ADM9240_REG_VID_FAN_DIV, &val);
+ if (err < 0)
+ return err;
+ data->fan_div[0] = (val >> 4) & 3;
+ data->fan_div[1] = (val >> 6) & 3;
+ data->vid = val & 0x0f;
+ err = regmap_read(data->regmap, ADM9240_REG_VID4, &val);
+ if (err < 0)
+ return err;
+ data->vid |= (val & 1) << 4;
+ /* read analog out */
+ err = regmap_raw_read(data->regmap, ADM9240_REG_ANALOG_OUT,
+ &data->aout, 1);
+
+ return err;
}
static struct adm9240_data *adm9240_update_device(struct device *dev)
{
struct adm9240_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int i;
+ int err;
mutex_lock(&data->update_lock);
/* minimum measurement cycle: 1.75 seconds */
if (time_after(jiffies, data->last_updated_measure + (HZ * 7 / 4))
|| !data->valid) {
-
- for (i = 0; i < 6; i++) { /* read voltages */
- data->in[i] = i2c_smbus_read_byte_data(client,
- ADM9240_REG_IN(i));
- }
- data->alarms = i2c_smbus_read_byte_data(client,
- ADM9240_REG_INT(0)) |
- i2c_smbus_read_byte_data(client,
- ADM9240_REG_INT(1)) << 8;
-
- /*
- * read temperature: assume temperature changes less than
- * 0.5'C per two measurement cycles thus ignore possible
- * but unlikely aliasing error on lsb reading. --Grant
- */
- data->temp = (i2c_smbus_read_byte_data(client,
- ADM9240_REG_TEMP) << 8) |
- i2c_smbus_read_byte_data(client,
- ADM9240_REG_TEMP_CONF);
-
- for (i = 0; i < 2; i++) { /* read fans */
- data->fan[i] = i2c_smbus_read_byte_data(client,
- ADM9240_REG_FAN(i));
-
- /* adjust fan clock divider on overflow */
- if (data->valid && data->fan[i] == 255 &&
- data->fan_div[i] < 3) {
-
- adm9240_write_fan_div(client, i,
- ++data->fan_div[i]);
-
- /* adjust fan_min if active, but not to 0 */
- if (data->fan_min[i] < 255 &&
- data->fan_min[i] >= 2)
- data->fan_min[i] /= 2;
- }
+ err = adm9240_update_measure(data);
+ if (err < 0) {
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+ return ERR_PTR(err);
}
data->last_updated_measure = jiffies;
}
@@ -212,33 +287,12 @@ static struct adm9240_data *adm9240_update_device(struct device *dev)
/* minimum config reading cycle: 300 seconds */
if (time_after(jiffies, data->last_updated_config + (HZ * 300))
|| !data->valid) {
-
- for (i = 0; i < 6; i++) {
- data->in_min[i] = i2c_smbus_read_byte_data(client,
- ADM9240_REG_IN_MIN(i));
- data->in_max[i] = i2c_smbus_read_byte_data(client,
- ADM9240_REG_IN_MAX(i));
- }
- for (i = 0; i < 2; i++) {
- data->fan_min[i] = i2c_smbus_read_byte_data(client,
- ADM9240_REG_FAN_MIN(i));
+ err = adm9240_update_config(data);
+ if (err < 0) {
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+ return ERR_PTR(err);
}
- data->temp_max[0] = i2c_smbus_read_byte_data(client,
- ADM9240_REG_TEMP_MAX(0));
- data->temp_max[1] = i2c_smbus_read_byte_data(client,
- ADM9240_REG_TEMP_MAX(1));
-
- /* read fan divs and 5-bit VID */
- i = i2c_smbus_read_byte_data(client, ADM9240_REG_VID_FAN_DIV);
- data->fan_div[0] = (i >> 4) & 3;
- data->fan_div[1] = (i >> 6) & 3;
- data->vid = i & 0x0f;
- data->vid |= (i2c_smbus_read_byte_data(client,
- ADM9240_REG_VID4) & 1) << 4;
- /* read analog out */
- data->aout = i2c_smbus_read_byte_data(client,
- ADM9240_REG_ANALOG_OUT);
-
data->last_updated_config = jiffies;
data->valid = 1;
}
@@ -253,6 +307,10 @@ static ssize_t temp1_input_show(struct device *dev,
struct device_attribute *dummy, char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
}
@@ -261,6 +319,10 @@ static ssize_t max_show(struct device *dev, struct device_attribute *devattr,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->temp_max[attr->index] * 1000);
}
@@ -269,7 +331,6 @@ static ssize_t max_store(struct device *dev, struct device_attribute *devattr,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
long val;
int err;
@@ -279,10 +340,10 @@ static ssize_t max_store(struct device *dev, struct device_attribute *devattr,
mutex_lock(&data->update_lock);
data->temp_max[attr->index] = TEMP_TO_REG(val);
- i2c_smbus_write_byte_data(client, ADM9240_REG_TEMP_MAX(attr->index),
- data->temp_max[attr->index]);
+ err = regmap_write(data->regmap, ADM9240_REG_TEMP_MAX(attr->index),
+ data->temp_max[attr->index]);
mutex_unlock(&data->update_lock);
- return count;
+ return err < 0 ? err : count;
}
static DEVICE_ATTR_RO(temp1_input);
@@ -295,6 +356,10 @@ static ssize_t in_show(struct device *dev, struct device_attribute *devattr,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[attr->index],
attr->index));
}
@@ -304,6 +369,10 @@ static ssize_t in_min_show(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[attr->index],
attr->index));
}
@@ -313,6 +382,10 @@ static ssize_t in_max_show(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[attr->index],
attr->index));
}
@@ -323,7 +396,6 @@ static ssize_t in_min_store(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
unsigned long val;
int err;
@@ -333,10 +405,10 @@ static ssize_t in_min_store(struct device *dev,
mutex_lock(&data->update_lock);
data->in_min[attr->index] = IN_TO_REG(val, attr->index);
- i2c_smbus_write_byte_data(client, ADM9240_REG_IN_MIN(attr->index),
- data->in_min[attr->index]);
+ err = regmap_write(data->regmap, ADM9240_REG_IN_MIN(attr->index),
+ data->in_min[attr->index]);
mutex_unlock(&data->update_lock);
- return count;
+ return err < 0 ? err : count;
}
static ssize_t in_max_store(struct device *dev,
@@ -345,7 +417,6 @@ static ssize_t in_max_store(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
unsigned long val;
int err;
@@ -355,10 +426,10 @@ static ssize_t in_max_store(struct device *dev,
mutex_lock(&data->update_lock);
data->in_max[attr->index] = IN_TO_REG(val, attr->index);
- i2c_smbus_write_byte_data(client, ADM9240_REG_IN_MAX(attr->index),
- data->in_max[attr->index]);
+ err = regmap_write(data->regmap, ADM9240_REG_IN_MAX(attr->index),
+ data->in_max[attr->index]);
mutex_unlock(&data->update_lock);
- return count;
+ return err < 0 ? err : count;
}
static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
@@ -386,6 +457,10 @@ static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
1 << data->fan_div[attr->index]));
}
@@ -395,6 +470,10 @@ static ssize_t fan_min_show(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[attr->index],
1 << data->fan_div[attr->index]));
}
@@ -404,6 +483,10 @@ static ssize_t fan_div_show(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", 1 << data->fan_div[attr->index]);
}
@@ -469,13 +552,13 @@ static ssize_t fan_min_store(struct device *dev,
if (new_div != data->fan_div[nr]) {
data->fan_div[nr] = new_div;
- adm9240_write_fan_div(client, nr, new_div);
+ adm9240_write_fan_div(data, nr, new_div);
}
- i2c_smbus_write_byte_data(client, ADM9240_REG_FAN_MIN(nr),
- data->fan_min[nr]);
+ err = regmap_write(data->regmap, ADM9240_REG_FAN_MIN(nr),
+ data->fan_min[nr]);
mutex_unlock(&data->update_lock);
- return count;
+ return err < 0 ? err : count;
}
static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
@@ -490,6 +573,10 @@ static ssize_t alarms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%u\n", data->alarms);
}
static DEVICE_ATTR_RO(alarms);
@@ -499,6 +586,10 @@ static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
@@ -516,6 +607,10 @@ static ssize_t cpu0_vid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
static DEVICE_ATTR_RO(cpu0_vid);
@@ -525,6 +620,10 @@ static ssize_t aout_output_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", AOUT_FROM_REG(data->aout));
}
@@ -533,7 +632,6 @@ static ssize_t aout_output_store(struct device *dev,
const char *buf, size_t count)
{
struct adm9240_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
long val;
int err;
@@ -543,9 +641,9 @@ static ssize_t aout_output_store(struct device *dev,
mutex_lock(&data->update_lock);
data->aout = AOUT_TO_REG(val);
- i2c_smbus_write_byte_data(client, ADM9240_REG_ANALOG_OUT, data->aout);
+ err = regmap_write(data->regmap, ADM9240_REG_ANALOG_OUT, data->aout);
mutex_unlock(&data->update_lock);
- return count;
+ return err < 0 ? err : count;
}
static DEVICE_ATTR_RW(aout_output);
@@ -553,17 +651,19 @@ static ssize_t alarm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct adm9240_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
unsigned long val;
+ int err;
if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
- i2c_smbus_write_byte_data(client, ADM9240_REG_CHASSIS_CLEAR, 0x80);
+ err = regmap_write(data->regmap, ADM9240_REG_CHASSIS_CLEAR, 0x80);
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
- dev_dbg(&client->dev, "chassis intrusion latch cleared\n");
+ if (err < 0)
+ return err;
+ dev_dbg(&data->client->dev, "chassis intrusion latch cleared\n");
return count;
}
@@ -662,11 +762,18 @@ static int adm9240_detect(struct i2c_client *new_client,
return 0;
}
-static void adm9240_init_client(struct i2c_client *client)
+static int adm9240_init_client(struct i2c_client *client, struct adm9240_data *data)
{
- struct adm9240_data *data = i2c_get_clientdata(client);
- u8 conf = i2c_smbus_read_byte_data(client, ADM9240_REG_CONFIG);
- u8 mode = i2c_smbus_read_byte_data(client, ADM9240_REG_TEMP_CONF) & 3;
+ u8 conf, mode;
+ int err;
+
+ err = regmap_raw_read(data->regmap, ADM9240_REG_CONFIG, &conf, 1);
+ if (err < 0)
+ return err;
+ err = regmap_raw_read(data->regmap, ADM9240_REG_TEMP_CONF, &mode, 1);
+ if (err < 0)
+ return err;
+ mode &= 3;
data->vrm = vid_which_vrm(); /* need this to report vid as mV */
@@ -682,44 +789,67 @@ static void adm9240_init_client(struct i2c_client *client)
int i;
for (i = 0; i < 6; i++) {
- i2c_smbus_write_byte_data(client,
- ADM9240_REG_IN_MIN(i), 0);
- i2c_smbus_write_byte_data(client,
- ADM9240_REG_IN_MAX(i), 255);
+ err = regmap_write(data->regmap,
+ ADM9240_REG_IN_MIN(i), 0);
+ if (err < 0)
+ return err;
+ err = regmap_write(data->regmap,
+ ADM9240_REG_IN_MAX(i), 255);
+ if (err < 0)
+ return err;
+ }
+ for (i = 0; i < 2; i++) {
+ err = regmap_write(data->regmap,
+ ADM9240_REG_FAN_MIN(i), 255);
+ if (err < 0)
+ return err;
+ }
+ for (i = 0; i < 2; i++) {
+ err = regmap_write(data->regmap,
+ ADM9240_REG_TEMP_MAX(i), 127);
+ if (err < 0)
+ return err;
}
- i2c_smbus_write_byte_data(client,
- ADM9240_REG_FAN_MIN(0), 255);
- i2c_smbus_write_byte_data(client,
- ADM9240_REG_FAN_MIN(1), 255);
- i2c_smbus_write_byte_data(client,
- ADM9240_REG_TEMP_MAX(0), 127);
- i2c_smbus_write_byte_data(client,
- ADM9240_REG_TEMP_MAX(1), 127);
/* start measurement cycle */
- i2c_smbus_write_byte_data(client, ADM9240_REG_CONFIG, 1);
+ err = regmap_write(data->regmap, ADM9240_REG_CONFIG, 1);
+ if (err < 0)
+ return err;
dev_info(&client->dev,
"cold start: config was 0x%02x mode %u\n", conf, mode);
}
+
+ return 0;
}
-static int adm9240_probe(struct i2c_client *new_client,
- const struct i2c_device_id *id)
+static const struct regmap_config adm9240_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int adm9240_probe(struct i2c_client *new_client)
{
struct device *dev = &new_client->dev;
struct device *hwmon_dev;
struct adm9240_data *data;
+ int err;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- i2c_set_clientdata(new_client, data);
data->client = new_client;
mutex_init(&data->update_lock);
+ data->regmap = devm_regmap_init_i2c(new_client, &adm9240_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
- adm9240_init_client(new_client);
+ err = adm9240_init_client(new_client, data);
+ if (err < 0)
+ return err;
hwmon_dev = devm_hwmon_device_register_with_groups(dev,
new_client->name,
@@ -741,7 +871,7 @@ static struct i2c_driver adm9240_driver = {
.driver = {
.name = "adm9240",
},
- .probe = adm9240_probe,
+ .probe_new = adm9240_probe,
.id_table = adm9240_id,
.detect = adm9240_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index d895b73fde6f..7246198f0901 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -99,8 +99,9 @@ static const struct regmap_config ads2830_regmap_config = {
.val_bits = 8,
};
-static int ads7828_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id ads7828_device_ids[];
+
+static int ads7828_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ads7828_platform_data *pdata = dev_get_platdata(dev);
@@ -141,7 +142,7 @@ static int ads7828_probe(struct i2c_client *client,
chip = (enum ads7828_chips)
of_device_get_match_data(&client->dev);
else
- chip = id->driver_data;
+ chip = i2c_match_id(ads7828_device_ids, client)->driver_data;
/* Bound Vref with min/max values */
vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN,
@@ -207,7 +208,7 @@ static struct i2c_driver ads7828_driver = {
},
.id_table = ads7828_device_ids,
- .probe = ads7828_probe,
+ .probe_new = ads7828_probe,
};
module_i2c_driver(ads7828_driver);
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index 80f8a4673315..9d80895d0266 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -39,8 +39,7 @@ static const struct adt7x10_ops adt7410_i2c_ops = {
.write_byte = adt7410_i2c_write_byte,
};
-static int adt7410_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adt7410_i2c_probe(struct i2c_client *client)
{
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
@@ -67,7 +66,7 @@ static struct i2c_driver adt7410_driver = {
.name = "adt7410",
.pm = ADT7X10_DEV_PM_OPS,
},
- .probe = adt7410_i2c_probe,
+ .probe_new = adt7410_i2c_probe,
.remove = adt7410_i2c_remove,
.id_table = adt7410_ids,
.address_list = I2C_ADDRS(0x48, 0x49, 0x4a, 0x4b),
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 5a839cc2ed1c..fad74aa62b64 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -666,8 +666,7 @@ static const struct hwmon_chip_info adt7411_chip_info = {
.info = adt7411_info,
};
-static int adt7411_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adt7411_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct adt7411_data *data;
@@ -707,7 +706,7 @@ static struct i2c_driver adt7411_driver = {
.driver = {
.name = "adt7411",
},
- .probe = adt7411_probe,
+ .probe_new = adt7411_probe,
.id_table = adt7411_id,
.detect = adt7411_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 208813158bb4..e75bbd87ad09 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -1787,8 +1787,7 @@ static int adt7462_detect(struct i2c_client *client,
return 0;
}
-static int adt7462_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adt7462_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct adt7462_data *data;
@@ -1820,7 +1819,7 @@ static struct i2c_driver adt7462_driver = {
.driver = {
.name = "adt7462",
},
- .probe = adt7462_probe,
+ .probe_new = adt7462_probe,
.id_table = adt7462_id,
.detect = adt7462_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index a30f34cf512c..740f39a54ab0 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -1217,8 +1217,7 @@ static void adt7470_init_client(struct i2c_client *client)
}
}
-static int adt7470_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adt7470_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct adt7470_data *data;
@@ -1276,7 +1275,7 @@ static struct i2c_driver adt7470_driver = {
.driver = {
.name = "adt7470",
},
- .probe = adt7470_probe,
+ .probe_new = adt7470_probe,
.remove = adt7470_remove,
.id_table = adt7470_id,
.detect = adt7470_detect,
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 054080443b47..9d5b019651f2 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -1539,8 +1539,7 @@ static int adt7475_set_pwm_polarity(struct i2c_client *client)
return 0;
}
-static int adt7475_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adt7475_probe(struct i2c_client *client)
{
enum chips chip;
static const char * const names[] = {
@@ -1554,6 +1553,7 @@ static int adt7475_probe(struct i2c_client *client,
struct device *hwmon_dev;
int i, ret = 0, revision, group_num = 0;
u8 config3;
+ const struct i2c_device_id *id = i2c_match_id(adt7475_id, client);
data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
@@ -1728,7 +1728,7 @@ static struct i2c_driver adt7475_driver = {
.name = "adt7475",
.of_match_table = of_match_ptr(adt7475_of_match),
},
- .probe = adt7475_probe,
+ .probe_new = adt7475_probe,
.id_table = adt7475_id,
.detect = adt7475_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 013fb056b1d0..6b1ce2242c61 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -900,8 +900,7 @@ static int amc6821_init_client(struct i2c_client *client)
return 0;
}
-static int amc6821_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int amc6821_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct amc6821_data *data;
@@ -940,7 +939,7 @@ static struct i2c_driver amc6821_driver = {
.driver = {
.name = "amc6821",
},
- .probe = amc6821_probe,
+ .probe_new = amc6821_probe,
.id_table = amc6821_id,
.detect = amc6821_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
index 29603742c858..3197cda7bcd9 100644
--- a/drivers/hwmon/amd_energy.c
+++ b/drivers/hwmon/amd_energy.c
@@ -35,7 +35,6 @@
struct sensor_accumulator {
u64 energy_ctr;
u64 prev_value;
- char label[10];
};
struct amd_energy_data {
@@ -47,11 +46,13 @@ struct amd_energy_data {
struct mutex lock;
/* An accumulator for each core and socket */
struct sensor_accumulator *accums;
+ unsigned int timeout_ms;
/* Energy Status Units */
- u64 energy_units;
+ int energy_units;
int nr_cpus;
int nr_socks;
int core_id;
+ char (*label)[10];
};
static int amd_energy_read_labels(struct device *dev,
@@ -61,7 +62,7 @@ static int amd_energy_read_labels(struct device *dev,
{
struct amd_energy_data *data = dev_get_drvdata(dev);
- *str = data->accums[channel].label;
+ *str = data->label[channel];
return 0;
}
@@ -73,108 +74,67 @@ static void get_energy_units(struct amd_energy_data *data)
data->energy_units = (rapl_units & AMD_ENERGY_UNIT_MASK) >> 8;
}
-static void accumulate_socket_delta(struct amd_energy_data *data,
- int sock, int cpu)
+static void accumulate_delta(struct amd_energy_data *data,
+ int channel, int cpu, u32 reg)
{
- struct sensor_accumulator *s_accum;
+ struct sensor_accumulator *accum;
u64 input;
mutex_lock(&data->lock);
- rdmsrl_safe_on_cpu(cpu, ENERGY_PKG_MSR, &input);
+ rdmsrl_safe_on_cpu(cpu, reg, &input);
input &= AMD_ENERGY_MASK;
- s_accum = &data->accums[data->nr_cpus + sock];
- if (input >= s_accum->prev_value)
- s_accum->energy_ctr +=
- input - s_accum->prev_value;
+ accum = &data->accums[channel];
+ if (input >= accum->prev_value)
+ accum->energy_ctr +=
+ input - accum->prev_value;
else
- s_accum->energy_ctr += UINT_MAX -
- s_accum->prev_value + input;
+ accum->energy_ctr += UINT_MAX -
+ accum->prev_value + input;
- s_accum->prev_value = input;
+ accum->prev_value = input;
mutex_unlock(&data->lock);
}
-static void accumulate_core_delta(struct amd_energy_data *data)
+static void read_accumulate(struct amd_energy_data *data)
{
- struct sensor_accumulator *c_accum;
- u64 input;
- int cpu;
+ int sock, scpu, cpu;
+
+ for (sock = 0; sock < data->nr_socks; sock++) {
+ scpu = cpumask_first_and(cpu_online_mask,
+ cpumask_of_node(sock));
+
+ accumulate_delta(data, data->nr_cpus + sock,
+ scpu, ENERGY_PKG_MSR);
+ }
- mutex_lock(&data->lock);
if (data->core_id >= data->nr_cpus)
data->core_id = 0;
cpu = data->core_id;
+ if (cpu_online(cpu))
+ accumulate_delta(data, cpu, cpu, ENERGY_CORE_MSR);
- if (!cpu_online(cpu))
- goto out;
-
- rdmsrl_safe_on_cpu(cpu, ENERGY_CORE_MSR, &input);
- input &= AMD_ENERGY_MASK;
-
- c_accum = &data->accums[cpu];
-
- if (input >= c_accum->prev_value)
- c_accum->energy_ctr +=
- input - c_accum->prev_value;
- else
- c_accum->energy_ctr += UINT_MAX -
- c_accum->prev_value + input;
-
- c_accum->prev_value = input;
-
-out:
data->core_id++;
- mutex_unlock(&data->lock);
-}
-
-static void read_accumulate(struct amd_energy_data *data)
-{
- int sock;
-
- for (sock = 0; sock < data->nr_socks; sock++) {
- int cpu;
-
- cpu = cpumask_first_and(cpu_online_mask,
- cpumask_of_node(sock));
-
- accumulate_socket_delta(data, sock, cpu);
- }
-
- accumulate_core_delta(data);
}
static void amd_add_delta(struct amd_energy_data *data, int ch,
- int cpu, long *val, bool is_core)
+ int cpu, long *val, u32 reg)
{
- struct sensor_accumulator *s_accum, *c_accum;
+ struct sensor_accumulator *accum;
u64 input;
mutex_lock(&data->lock);
- if (!is_core) {
- rdmsrl_safe_on_cpu(cpu, ENERGY_PKG_MSR, &input);
- input &= AMD_ENERGY_MASK;
-
- s_accum = &data->accums[ch];
- if (input >= s_accum->prev_value)
- input += s_accum->energy_ctr -
- s_accum->prev_value;
- else
- input += UINT_MAX - s_accum->prev_value +
- s_accum->energy_ctr;
- } else {
- rdmsrl_safe_on_cpu(cpu, ENERGY_CORE_MSR, &input);
- input &= AMD_ENERGY_MASK;
+ rdmsrl_safe_on_cpu(cpu, reg, &input);
+ input &= AMD_ENERGY_MASK;
- c_accum = &data->accums[ch];
- if (input >= c_accum->prev_value)
- input += c_accum->energy_ctr -
- c_accum->prev_value;
- else
- input += UINT_MAX - c_accum->prev_value +
- c_accum->energy_ctr;
- }
+ accum = &data->accums[ch];
+ if (input >= accum->prev_value)
+ input += accum->energy_ctr -
+ accum->prev_value;
+ else
+ input += UINT_MAX - accum->prev_value +
+ accum->energy_ctr;
/* Energy consumed = (1/(2^ESU) * RAW * 1000000UL) μJoules */
*val = div64_ul(input * 1000000UL, BIT(data->energy_units));
@@ -187,20 +147,22 @@ static int amd_energy_read(struct device *dev,
u32 attr, int channel, long *val)
{
struct amd_energy_data *data = dev_get_drvdata(dev);
+ u32 reg;
int cpu;
if (channel >= data->nr_cpus) {
cpu = cpumask_first_and(cpu_online_mask,
cpumask_of_node
(channel - data->nr_cpus));
- amd_add_delta(data, channel, cpu, val, false);
+ reg = ENERGY_PKG_MSR;
} else {
cpu = channel;
if (!cpu_online(cpu))
return -ENODEV;
- amd_add_delta(data, channel, cpu, val, true);
+ reg = ENERGY_CORE_MSR;
}
+ amd_add_delta(data, channel, cpu, val, reg);
return 0;
}
@@ -209,12 +171,13 @@ static umode_t amd_energy_is_visible(const void *_data,
enum hwmon_sensor_types type,
u32 attr, int channel)
{
- return 0444;
+ return 0440;
}
static int energy_accumulator(void *p)
{
struct amd_energy_data *data = (struct amd_energy_data *)p;
+ unsigned int timeout = data->timeout_ms;
while (!kthread_should_stop()) {
/*
@@ -227,14 +190,7 @@ static int energy_accumulator(void *p)
if (kthread_should_stop())
break;
- /*
- * On a 240W system, with default resolution the
- * Socket Energy status register may wrap around in
- * 2^32*15.3 e-6/240 = 273.8041 secs (~4.5 mins)
- *
- * let us accumulate for every 100secs
- */
- schedule_timeout(msecs_to_jiffies(100000));
+ schedule_timeout(msecs_to_jiffies(timeout));
}
return 0;
}
@@ -247,12 +203,13 @@ static const struct hwmon_ops amd_energy_ops = {
static int amd_create_sensor(struct device *dev,
struct amd_energy_data *data,
- u8 type, u32 config)
+ enum hwmon_sensor_types type, u32 config)
{
struct hwmon_channel_info *info = &data->energy_info;
struct sensor_accumulator *accums;
int i, num_siblings, cpus, sockets;
u32 *s_config;
+ char (*label_l)[10];
/* Identify the number of siblings per core */
num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
@@ -276,21 +233,25 @@ static int amd_create_sensor(struct device *dev,
if (!accums)
return -ENOMEM;
+ label_l = devm_kcalloc(dev, cpus + sockets,
+ sizeof(*label_l), GFP_KERNEL);
+ if (!label_l)
+ return -ENOMEM;
+
info->type = type;
info->config = s_config;
data->nr_cpus = cpus;
data->nr_socks = sockets;
data->accums = accums;
+ data->label = label_l;
for (i = 0; i < cpus + sockets; i++) {
s_config[i] = config;
if (i < cpus)
- scnprintf(accums[i].label, 10,
- "Ecore%03u", i);
+ scnprintf(label_l[i], 10, "Ecore%03u", i);
else
- scnprintf(accums[i].label, 10,
- "Esocket%u", (i - cpus));
+ scnprintf(label_l[i], 10, "Esocket%u", (i - cpus));
}
return 0;
@@ -301,6 +262,7 @@ static int amd_energy_probe(struct platform_device *pdev)
struct device *hwmon_dev;
struct amd_energy_data *data;
struct device *dev = &pdev->dev;
+ int ret;
data = devm_kzalloc(dev,
sizeof(struct amd_energy_data), GFP_KERNEL);
@@ -313,8 +275,10 @@ static int amd_energy_probe(struct platform_device *pdev)
dev_set_drvdata(dev, data);
/* Populate per-core energy reporting */
data->info[0] = &data->energy_info;
- amd_create_sensor(dev, data, hwmon_energy,
- HWMON_E_INPUT | HWMON_E_LABEL);
+ ret = amd_create_sensor(dev, data, hwmon_energy,
+ HWMON_E_INPUT | HWMON_E_LABEL);
+ if (ret)
+ return ret;
mutex_init(&data->lock);
get_energy_units(data);
@@ -326,11 +290,15 @@ static int amd_energy_probe(struct platform_device *pdev)
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
+ /*
+ * On a system with peak wattage of 250W
+ * timeout = 2 ^ 32 / 2 ^ energy_units / 250 secs
+ */
+ data->timeout_ms = 1000 *
+ BIT(min(28, 31 - data->energy_units)) / 250;
+
data->wrap_accumulate = kthread_run(energy_accumulator, data,
"%s", dev_name(hwmon_dev));
- if (IS_ERR(data->wrap_accumulate))
- return PTR_ERR(data->wrap_accumulate);
-
return PTR_ERR_OR_ZERO(data->wrap_accumulate);
}
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index a18887990f4a..79b498f816fe 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -32,6 +32,7 @@
#include <linux/hwmon.h>
#include <linux/workqueue.h>
#include <linux/err.h>
+#include <linux/bits.h>
/* data port used by Apple SMC */
#define APPLESMC_DATA_PORT 0x300
@@ -42,10 +43,13 @@
#define APPLESMC_MAX_DATA_LENGTH 32
-/* wait up to 128 ms for a status change. */
-#define APPLESMC_MIN_WAIT 0x0010
-#define APPLESMC_RETRY_WAIT 0x0100
-#define APPLESMC_MAX_WAIT 0x20000
+/* Apple SMC status bits */
+#define SMC_STATUS_AWAITING_DATA BIT(0) /* SMC has data waiting to be read */
+#define SMC_STATUS_IB_CLOSED BIT(1) /* Will ignore any input */
+#define SMC_STATUS_BUSY BIT(2) /* Command in progress */
+
+/* Initial wait is 8us */
+#define APPLESMC_MIN_WAIT 0x0008
#define APPLESMC_READ_CMD 0x10
#define APPLESMC_WRITE_CMD 0x11
@@ -151,65 +155,84 @@ static unsigned int key_at_index;
static struct workqueue_struct *applesmc_led_wq;
/*
- * wait_read - Wait for a byte to appear on SMC port. Callers must
- * hold applesmc_lock.
+ * Wait for specific status bits with a mask on the SMC.
+ * Used before all transactions.
+ * This does 10 fast loops of 8us then exponentially backs off for a
+ * minimum total wait of 262ms. Depending on usleep_range this could
+ * run out past 500ms.
*/
-static int wait_read(void)
+
+static int wait_status(u8 val, u8 mask)
{
- unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC;
u8 status;
int us;
+ int i;
- for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
- usleep_range(us, us * 16);
+ us = APPLESMC_MIN_WAIT;
+ for (i = 0; i < 24 ; i++) {
status = inb(APPLESMC_CMD_PORT);
- /* read: wait for smc to settle */
- if (status & 0x01)
+ if ((status & mask) == val)
return 0;
- /* timeout: give up */
- if (time_after(jiffies, end))
- break;
+ usleep_range(us, us * 2);
+ if (i > 9)
+ us <<= 1;
}
-
- pr_warn("wait_read() fail: 0x%02x\n", status);
return -EIO;
}
-/*
- * send_byte - Write to SMC port, retrying when necessary. Callers
- * must hold applesmc_lock.
- */
+/* send_byte - Write to SMC data port. Callers must hold applesmc_lock. */
+
static int send_byte(u8 cmd, u16 port)
{
- u8 status;
- int us;
- unsigned long end = jiffies + (APPLESMC_MAX_WAIT * HZ) / USEC_PER_SEC;
+ int status;
+
+ status = wait_status(0, SMC_STATUS_IB_CLOSED);
+ if (status)
+ return status;
+ /*
+ * This needs to be a separate read looking for bit 0x04
+ * after bit 0x02 falls. If consolidated with the wait above
+ * this extra read may not happen if status returns both
+ * simultaneously and this would appear to be required.
+ */
+ status = wait_status(SMC_STATUS_BUSY, SMC_STATUS_BUSY);
+ if (status)
+ return status;
outb(cmd, port);
- for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
- usleep_range(us, us * 16);
- status = inb(APPLESMC_CMD_PORT);
- /* write: wait for smc to settle */
- if (status & 0x02)
- continue;
- /* ready: cmd accepted, return */
- if (status & 0x04)
- return 0;
- /* timeout: give up */
- if (time_after(jiffies, end))
- break;
- /* busy: long wait and resend */
- udelay(APPLESMC_RETRY_WAIT);
- outb(cmd, port);
- }
-
- pr_warn("send_byte(0x%02x, 0x%04x) fail: 0x%02x\n", cmd, port, status);
- return -EIO;
+ return 0;
}
+/* send_command - Write a command to the SMC. Callers must hold applesmc_lock. */
+
static int send_command(u8 cmd)
{
- return send_byte(cmd, APPLESMC_CMD_PORT);
+ int ret;
+
+ ret = wait_status(0, SMC_STATUS_IB_CLOSED);
+ if (ret)
+ return ret;
+ outb(cmd, APPLESMC_CMD_PORT);
+ return 0;
+}
+
+/*
+ * Based on logic from the Apple driver. This is issued before any interaction
+ * If busy is stuck high, issue a read command to reset the SMC state machine.
+ * If busy is stuck high after the command then the SMC is jammed.
+ */
+
+static int smc_sane(void)
+{
+ int ret;
+
+ ret = wait_status(0, SMC_STATUS_BUSY);
+ if (!ret)
+ return ret;
+ ret = send_command(APPLESMC_READ_CMD);
+ if (ret)
+ return ret;
+ return wait_status(0, SMC_STATUS_BUSY);
}
static int send_argument(const char *key)
@@ -226,6 +249,11 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
{
u8 status, data = 0;
int i;
+ int ret;
+
+ ret = smc_sane();
+ if (ret)
+ return ret;
if (send_command(cmd) || send_argument(key)) {
pr_warn("%.4s: read arg fail\n", key);
@@ -239,7 +267,8 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
}
for (i = 0; i < len; i++) {
- if (wait_read()) {
+ if (wait_status(SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY,
+ SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY)) {
pr_warn("%.4s: read data[%d] fail\n", key, i);
return -EIO;
}
@@ -250,19 +279,24 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
for (i = 0; i < 16; i++) {
udelay(APPLESMC_MIN_WAIT);
status = inb(APPLESMC_CMD_PORT);
- if (!(status & 0x01))
+ if (!(status & SMC_STATUS_AWAITING_DATA))
break;
data = inb(APPLESMC_DATA_PORT);
}
if (i)
pr_warn("flushed %d bytes, last value is: %d\n", i, data);
- return 0;
+ return wait_status(0, SMC_STATUS_BUSY);
}
static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len)
{
int i;
+ int ret;
+
+ ret = smc_sane();
+ if (ret)
+ return ret;
if (send_command(cmd) || send_argument(key)) {
pr_warn("%s: write arg fail\n", key);
@@ -281,7 +315,7 @@ static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len)
}
}
- return 0;
+ return wait_status(0, SMC_STATUS_BUSY);
}
static int read_register_count(unsigned int *count)
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 4c609e23a4ef..ba9fcf6f9264 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -205,8 +205,7 @@ struct asb100_data {
static int asb100_read_value(struct i2c_client *client, u16 reg);
static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val);
-static int asb100_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
+static int asb100_probe(struct i2c_client *client);
static int asb100_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int asb100_remove(struct i2c_client *client);
@@ -224,7 +223,7 @@ static struct i2c_driver asb100_driver = {
.driver = {
.name = "asb100",
},
- .probe = asb100_probe,
+ .probe_new = asb100_probe,
.remove = asb100_remove,
.id_table = asb100_id,
.detect = asb100_detect,
@@ -775,8 +774,7 @@ static int asb100_detect(struct i2c_client *client,
return 0;
}
-static int asb100_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int asb100_probe(struct i2c_client *client)
{
int err;
struct asb100_data *data;
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 9e14e2829ee9..600ffc7e1900 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -1087,7 +1087,7 @@ static void asc7621_init_client(struct i2c_client *client)
}
static int
-asc7621_probe(struct i2c_client *client, const struct i2c_device_id *id)
+asc7621_probe(struct i2c_client *client)
{
struct asc7621_data *data;
int i, err;
@@ -1193,7 +1193,7 @@ static struct i2c_driver asc7621_driver = {
.driver = {
.name = "asc7621",
},
- .probe = asc7621_probe,
+ .probe_new = asc7621_probe,
.remove = asc7621_remove,
.id_table = asc7621_id,
.detect = asc7621_detect,
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index 79b8df258371..1e08a5431f12 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -244,8 +244,7 @@ static struct attribute *atxp1_attrs[] = {
};
ATTRIBUTE_GROUPS(atxp1);
-static int atxp1_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int atxp1_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct atxp1_data *data;
@@ -288,7 +287,7 @@ static struct i2c_driver atxp1_driver = {
.driver = {
.name = "atxp1",
},
- .probe = atxp1_probe,
+ .probe_new = atxp1_probe,
.id_table = atxp1_id,
};
diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
index 94698cae0497..3e1d56585b91 100644
--- a/drivers/hwmon/bt1-pvt.c
+++ b/drivers/hwmon/bt1-pvt.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
@@ -476,6 +477,7 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
long *val)
{
struct pvt_cache *cache = &pvt->cache[type];
+ unsigned long timeout;
u32 data;
int ret;
@@ -499,7 +501,14 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID, 0);
pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
- wait_for_completion(&cache->conversion);
+ /*
+ * Wait with timeout since in case if the sensor is suddenly powered
+ * down the request won't be completed and the caller will hang up on
+ * this procedure until the power is back up again. Multiply the
+ * timeout by the factor of two to prevent a false timeout.
+ */
+ timeout = 2 * usecs_to_jiffies(ktime_to_us(pvt->timeout));
+ ret = wait_for_completion_timeout(&cache->conversion, timeout);
pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_DVALID,
@@ -509,6 +518,9 @@ static int pvt_read_data(struct pvt_hwmon *pvt, enum pvt_sensor_type type,
mutex_unlock(&pvt->iface_mtx);
+ if (!ret)
+ return -ETIMEDOUT;
+
if (type == PVT_TEMP)
*val = pvt_calc_poly(&poly_N_to_temp, data);
else
@@ -654,44 +666,16 @@ static int pvt_write_trim(struct pvt_hwmon *pvt, long val)
static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
{
- unsigned long rate;
- ktime_t kt;
- u32 data;
-
- rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
- if (!rate)
- return -ENODEV;
-
- /*
- * Don't bother with mutex here, since we just read data from MMIO.
- * We also have to scale the ticks timeout up to compensate the
- * ms-ns-data translations.
- */
- data = readl(pvt->regs + PVT_TTIMEOUT) + 1;
+ int ret;
- /*
- * Calculate ref-clock based delay (Ttotal) between two consecutive
- * data samples of the same sensor. So we first must calculate the
- * delay introduced by the internal ref-clock timer (Tref * Fclk).
- * Then add the constant timeout cuased by each conversion latency
- * (Tmin). The basic formulae for each conversion is following:
- * Ttotal = Tref * Fclk + Tmin
- * Note if alarms are enabled the sensors are polled one after
- * another, so in order to have the delay being applicable for each
- * sensor the requested value must be equally redistirbuted.
- */
-#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
- kt = ktime_set(PVT_SENSORS_NUM * (u64)data, 0);
- kt = ktime_divns(kt, rate);
- kt = ktime_add_ns(kt, PVT_SENSORS_NUM * PVT_TOUT_MIN);
-#else
- kt = ktime_set(data, 0);
- kt = ktime_divns(kt, rate);
- kt = ktime_add_ns(kt, PVT_TOUT_MIN);
-#endif
+ ret = mutex_lock_interruptible(&pvt->iface_mtx);
+ if (ret)
+ return ret;
/* Return the result in msec as hwmon sysfs interface requires. */
- *val = ktime_to_ms(kt);
+ *val = ktime_to_ms(pvt->timeout);
+
+ mutex_unlock(&pvt->iface_mtx);
return 0;
}
@@ -699,7 +683,7 @@ static int pvt_read_timeout(struct pvt_hwmon *pvt, long *val)
static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
{
unsigned long rate;
- ktime_t kt;
+ ktime_t kt, cache;
u32 data;
int ret;
@@ -712,7 +696,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
* between all available sensors to have the requested delay
* applicable to each individual sensor.
*/
- kt = ms_to_ktime(val);
+ cache = kt = ms_to_ktime(val);
#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
kt = ktime_divns(kt, PVT_SENSORS_NUM);
#endif
@@ -741,6 +725,7 @@ static int pvt_write_timeout(struct pvt_hwmon *pvt, long val)
return ret;
pvt_set_tout(pvt, data);
+ pvt->timeout = cache;
mutex_unlock(&pvt->iface_mtx);
@@ -982,10 +967,52 @@ static int pvt_request_clks(struct pvt_hwmon *pvt)
return 0;
}
-static void pvt_init_iface(struct pvt_hwmon *pvt)
+static int pvt_check_pwr(struct pvt_hwmon *pvt)
{
+ unsigned long tout;
+ int ret = 0;
+ u32 data;
+
+ /*
+ * Test out the sensor conversion functionality. If it is not done on
+ * time then the domain must have been unpowered and we won't be able
+ * to use the device later in this driver.
+ * Note If the power source is lost during the normal driver work the
+ * data read procedure will either return -ETIMEDOUT (for the
+ * alarm-less driver configuration) or just stop the repeated
+ * conversion. In the later case alas we won't be able to detect the
+ * problem.
+ */
+ pvt_update(pvt->regs + PVT_INTR_MASK, PVT_INTR_ALL, PVT_INTR_ALL);
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, PVT_CTRL_EN);
+ pvt_set_tout(pvt, 0);
+ readl(pvt->regs + PVT_DATA);
+
+ tout = PVT_TOUT_MIN / NSEC_PER_USEC;
+ usleep_range(tout, 2 * tout);
+
+ data = readl(pvt->regs + PVT_DATA);
+ if (!(data & PVT_DATA_VALID)) {
+ ret = -ENODEV;
+ dev_err(pvt->dev, "Sensor is powered down\n");
+ }
+
+ pvt_update(pvt->regs + PVT_CTRL, PVT_CTRL_EN, 0);
+
+ return ret;
+}
+
+static int pvt_init_iface(struct pvt_hwmon *pvt)
+{
+ unsigned long rate;
u32 trim, temp;
+ rate = clk_get_rate(pvt->clks[PVT_CLOCK_REF].clk);
+ if (!rate) {
+ dev_err(pvt->dev, "Invalid reference clock rate\n");
+ return -ENODEV;
+ }
+
/*
* Make sure all interrupts and controller are disabled so not to
* accidentally have ISR executed before the driver data is fully
@@ -1000,12 +1027,37 @@ static void pvt_init_iface(struct pvt_hwmon *pvt)
pvt_set_mode(pvt, pvt_info[pvt->sensor].mode);
pvt_set_tout(pvt, PVT_TOUT_DEF);
+ /*
+ * Preserve the current ref-clock based delay (Ttotal) between the
+ * sensors data samples in the driver data so not to recalculate it
+ * each time on the data requests and timeout reads. It consists of the
+ * delay introduced by the internal ref-clock timer (N / Fclk) and the
+ * constant timeout caused by each conversion latency (Tmin):
+ * Ttotal = N / Fclk + Tmin
+ * If alarms are enabled the sensors are polled one after another and
+ * in order to get the next measurement of a particular sensor the
+ * caller will have to wait for at most until all the others are
+ * polled. In that case the formulae will look a bit different:
+ * Ttotal = 5 * (N / Fclk + Tmin)
+ */
+#if defined(CONFIG_SENSORS_BT1_PVT_ALARMS)
+ pvt->timeout = ktime_set(PVT_SENSORS_NUM * PVT_TOUT_DEF, 0);
+ pvt->timeout = ktime_divns(pvt->timeout, rate);
+ pvt->timeout = ktime_add_ns(pvt->timeout, PVT_SENSORS_NUM * PVT_TOUT_MIN);
+#else
+ pvt->timeout = ktime_set(PVT_TOUT_DEF, 0);
+ pvt->timeout = ktime_divns(pvt->timeout, rate);
+ pvt->timeout = ktime_add_ns(pvt->timeout, PVT_TOUT_MIN);
+#endif
+
trim = PVT_TRIM_DEF;
if (!of_property_read_u32(pvt->dev->of_node,
"baikal,pvt-temp-offset-millicelsius", &temp))
trim = pvt_calc_trim(temp);
pvt_set_trim(pvt, trim);
+
+ return 0;
}
static int pvt_request_irq(struct pvt_hwmon *pvt)
@@ -1109,7 +1161,13 @@ static int pvt_probe(struct platform_device *pdev)
if (ret)
return ret;
- pvt_init_iface(pvt);
+ ret = pvt_check_pwr(pvt);
+ if (ret)
+ return ret;
+
+ ret = pvt_init_iface(pvt);
+ if (ret)
+ return ret;
ret = pvt_request_irq(pvt);
if (ret)
diff --git a/drivers/hwmon/bt1-pvt.h b/drivers/hwmon/bt1-pvt.h
index 5eac73e94885..93b8dd5e7c94 100644
--- a/drivers/hwmon/bt1-pvt.h
+++ b/drivers/hwmon/bt1-pvt.h
@@ -10,6 +10,7 @@
#include <linux/completion.h>
#include <linux/hwmon.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/mutex.h>
#include <linux/seqlock.h>
@@ -201,6 +202,7 @@ struct pvt_cache {
* if alarms are disabled).
* @sensor: current PVT sensor the data conversion is being performed for.
* @cache: data cache descriptor.
+ * @timeout: conversion timeout cache.
*/
struct pvt_hwmon {
struct device *dev;
@@ -214,6 +216,7 @@ struct pvt_hwmon {
struct mutex iface_mtx;
enum pvt_sensor_type sensor;
struct pvt_cache cache[PVT_SENSORS_NUM];
+ ktime_t timeout;
};
/*
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index c3472b73fa79..c1e4cfb40c3d 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -2461,8 +2461,9 @@ static int dme1737_i2c_detect(struct i2c_client *client,
return 0;
}
-static int dme1737_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id dme1737_id[];
+
+static int dme1737_i2c_probe(struct i2c_client *client)
{
struct dme1737_data *data;
struct device *dev = &client->dev;
@@ -2473,7 +2474,7 @@ static int dme1737_i2c_probe(struct i2c_client *client,
return -ENOMEM;
i2c_set_clientdata(client, data);
- data->type = id->driver_data;
+ data->type = i2c_match_id(dme1737_id, client)->driver_data;
data->client = client;
data->name = client->name;
mutex_init(&data->update_lock);
@@ -2529,7 +2530,7 @@ static struct i2c_driver dme1737_i2c_driver = {
.driver = {
.name = "dme1737",
},
- .probe = dme1737_i2c_probe,
+ .probe_new = dme1737_i2c_probe,
.remove = dme1737_i2c_remove,
.id_table = dme1737_id,
.detect = dme1737_i2c_detect,
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 541bed8732b7..e1d742bfc74c 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -342,8 +342,9 @@ static const struct attribute_group ds1621_group = {
};
__ATTRIBUTE_GROUPS(ds1621);
-static int ds1621_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id ds1621_id[];
+
+static int ds1621_probe(struct i2c_client *client)
{
struct ds1621_data *data;
struct device *hwmon_dev;
@@ -355,7 +356,7 @@ static int ds1621_probe(struct i2c_client *client,
mutex_init(&data->update_lock);
- data->kind = id->driver_data;
+ data->kind = i2c_match_id(ds1621_id, client)->driver_data;
data->client = client;
/* Initialize the DS1621 chip */
@@ -383,7 +384,7 @@ static struct i2c_driver ds1621_driver = {
.driver = {
.name = "ds1621",
},
- .probe = ds1621_probe,
+ .probe_new = ds1621_probe,
.id_table = ds1621_id,
};
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 8f1fc83ac37b..9ec722798c4a 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -211,8 +211,7 @@ static struct attribute *ds620_attrs[] = {
ATTRIBUTE_GROUPS(ds620);
-static int ds620_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds620_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -246,7 +245,7 @@ static struct i2c_driver ds620_driver = {
.driver = {
.name = "ds620",
},
- .probe = ds620_probe,
+ .probe_new = ds620_probe,
.id_table = ds620_id,
};
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index e9c0bbc2caa9..314838272049 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -386,11 +386,13 @@ static const struct regmap_config emc1403_regmap_config = {
.volatile_reg = emc1403_regmap_is_volatile,
};
-static int emc1403_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id emc1403_idtable[];
+
+static int emc1403_probe(struct i2c_client *client)
{
struct thermal_data *data;
struct device *hwmon_dev;
+ const struct i2c_device_id *id = i2c_match_id(emc1403_idtable, client);
data = devm_kzalloc(&client->dev, sizeof(struct thermal_data),
GFP_KERNEL);
@@ -452,7 +454,7 @@ static struct i2c_driver sensor_emc1403 = {
.name = "emc1403",
},
.detect = emc1403_detect,
- .probe = emc1403_probe,
+ .probe_new = emc1403_probe,
.id_table = emc1403_idtable,
.address_list = emc1403_address_list,
};
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 924c02c1631d..e4c95ca9e19f 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -551,7 +551,7 @@ static const struct attribute_group emc2103_temp4_group = {
};
static int
-emc2103_probe(struct i2c_client *client, const struct i2c_device_id *id)
+emc2103_probe(struct i2c_client *client)
{
struct emc2103_data *data;
struct device *hwmon_dev;
@@ -653,7 +653,7 @@ static struct i2c_driver emc2103_driver = {
.driver = {
.name = "emc2103",
},
- .probe = emc2103_probe,
+ .probe_new = emc2103_probe,
.id_table = emc2103_ids,
.detect = emc2103_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index df0f7292e214..ec5c98702bf5 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -444,8 +444,7 @@ static int emc6w201_detect(struct i2c_client *client,
return 0;
}
-static int emc6w201_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int emc6w201_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct emc6w201_data *data;
@@ -475,7 +474,7 @@ static struct i2c_driver emc6w201_driver = {
.driver = {
.name = "emc6w201",
},
- .probe = emc6w201_probe,
+ .probe_new = emc6w201_probe,
.id_table = emc6w201_id,
.detect = emc6w201_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index eb847a7d6b83..3e567be60fb1 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -113,8 +113,7 @@ struct f75375_data {
static int f75375_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int f75375_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
+static int f75375_probe(struct i2c_client *client);
static int f75375_remove(struct i2c_client *client);
static const struct i2c_device_id f75375_id[] = {
@@ -130,7 +129,7 @@ static struct i2c_driver f75375_driver = {
.driver = {
.name = "f75375",
},
- .probe = f75375_probe,
+ .probe_new = f75375_probe,
.remove = f75375_remove,
.id_table = f75375_id,
.detect = f75375_detect,
@@ -814,8 +813,7 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
}
-static int f75375_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int f75375_probe(struct i2c_client *client)
{
struct f75375_data *data;
struct f75375s_platform_data *f75375s_pdata =
@@ -832,7 +830,7 @@ static int f75375_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
- data->kind = id->driver_data;
+ data->kind = i2c_match_id(f75375_id, client)->driver_data;
err = sysfs_create_group(&client->dev.kobj, &f75375_group);
if (err)
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 4136643d8e0c..5191cd85a8d1 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -214,8 +214,7 @@ static const int FSCHMD_NO_TEMP_SENSORS[7] = { 3, 3, 4, 3, 5, 5, 11 };
* Functions declarations
*/
-static int fschmd_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
+static int fschmd_probe(struct i2c_client *client);
static int fschmd_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int fschmd_remove(struct i2c_client *client);
@@ -242,7 +241,7 @@ static struct i2c_driver fschmd_driver = {
.driver = {
.name = "fschmd",
},
- .probe = fschmd_probe,
+ .probe_new = fschmd_probe,
.remove = fschmd_remove,
.id_table = fschmd_id,
.detect = fschmd_detect,
@@ -1081,15 +1080,14 @@ static int fschmd_detect(struct i2c_client *client,
return 0;
}
-static int fschmd_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int fschmd_probe(struct i2c_client *client)
{
struct fschmd_data *data;
const char * const names[7] = { "Poseidon", "Hermes", "Scylla",
"Heracles", "Heimdall", "Hades", "Syleus" };
const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
int i, err;
- enum chips kind = id->driver_data;
+ enum chips kind = i2c_match_id(fschmd_id, client)->driver_data;
data = kzalloc(sizeof(struct fschmd_data), GFP_KERNEL);
if (!data)
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index 371ce7745f5e..ef88a156efc2 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -752,7 +752,7 @@ static int fts_remove(struct i2c_client *client)
return 0;
}
-static int fts_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int fts_probe(struct i2c_client *client)
{
u8 revision;
struct fts_data *data;
@@ -819,7 +819,7 @@ static struct i2c_driver fts_driver = {
.name = "ftsteutates",
},
.id_table = fts_id,
- .probe = fts_probe,
+ .probe_new = fts_probe,
.remove = fts_remove,
.detect = fts_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 31beedcb420f..a692f7b2f6f7 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -170,8 +170,7 @@ ATTRIBUTE_GROUPS(g760a);
* new-style driver model code
*/
-static int g760a_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int g760a_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct g760a_data *data;
@@ -207,7 +206,7 @@ static struct i2c_driver g760a_driver = {
.driver = {
.name = "g760a",
},
- .probe = g760a_probe,
+ .probe_new = g760a_probe,
.id_table = g760a_id,
};
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 5f0f34631580..64a0599b2da5 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -1033,7 +1033,7 @@ static inline int g762_fan_init(struct device *dev)
data->fan_cmd1);
}
-static int g762_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int g762_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -1079,7 +1079,7 @@ static struct i2c_driver g762_driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(g762_dt_match),
},
- .probe = g762_probe,
+ .probe_new = g762_probe,
.id_table = g762_id,
};
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 4964beeea542..7aaee5a48243 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -611,8 +611,7 @@ static void gl518_init_client(struct i2c_client *client)
gl518_write_value(client, GL518_REG_CONF, 0x40 | regvalue);
}
-static int gl518_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int gl518_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -653,7 +652,7 @@ static struct i2c_driver gl518_driver = {
.driver = {
.name = "gl518sm",
},
- .probe = gl518_probe,
+ .probe_new = gl518_probe,
.id_table = gl518_id,
.detect = gl518_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 4689e01cb56d..4ae1295cc3ea 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -854,8 +854,7 @@ static void gl520_init_client(struct i2c_client *client)
gl520_write_value(client, GL520_REG_BEEP_MASK, data->beep_mask);
}
-static int gl520_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int gl520_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -896,7 +895,7 @@ static struct i2c_driver gl520_driver = {
.driver = {
.name = "gl520sm",
},
- .probe = gl520_probe,
+ .probe_new = gl520_probe,
.id_table = gl520_id,
.detect = gl520_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index c6d4567f3952..1fe37418ff46 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -17,6 +17,7 @@
#define GSC_HWMON_MAX_TEMP_CH 16
#define GSC_HWMON_MAX_IN_CH 16
+#define GSC_HWMON_MAX_FAN_CH 16
#define GSC_HWMON_RESOLUTION 12
#define GSC_HWMON_VREF 2500
@@ -27,11 +28,14 @@ struct gsc_hwmon_data {
struct regmap *regmap;
const struct gsc_hwmon_channel *temp_ch[GSC_HWMON_MAX_TEMP_CH];
const struct gsc_hwmon_channel *in_ch[GSC_HWMON_MAX_IN_CH];
+ const struct gsc_hwmon_channel *fan_ch[GSC_HWMON_MAX_FAN_CH];
u32 temp_config[GSC_HWMON_MAX_TEMP_CH + 1];
u32 in_config[GSC_HWMON_MAX_IN_CH + 1];
+ u32 fan_config[GSC_HWMON_MAX_FAN_CH + 1];
struct hwmon_channel_info temp_info;
struct hwmon_channel_info in_info;
- const struct hwmon_channel_info *info[3];
+ struct hwmon_channel_info fan_info;
+ const struct hwmon_channel_info *info[4];
struct hwmon_chip_info chip;
};
@@ -155,6 +159,9 @@ gsc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
case hwmon_temp:
ch = hwmon->temp_ch[channel];
break;
+ case hwmon_fan:
+ ch = hwmon->fan_ch[channel];
+ break;
default:
return -EOPNOTSUPP;
}
@@ -187,6 +194,9 @@ gsc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
/* adjust by uV offset */
tmp += ch->mvoffset;
break;
+ case mode_fan:
+ tmp *= 30; /* convert to revolutions per minute */
+ break;
case mode_voltage_24bit:
case mode_voltage_16bit:
/* no adjustment needed */
@@ -211,6 +221,9 @@ gsc_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
*buf = hwmon->temp_ch[channel]->name;
break;
+ case hwmon_fan:
+ *buf = hwmon->fan_ch[channel]->name;
+ break;
default:
return -ENOTSUPP;
}
@@ -304,7 +317,7 @@ static int gsc_hwmon_probe(struct platform_device *pdev)
struct gsc_hwmon_platform_data *pdata = dev_get_platdata(dev);
struct gsc_hwmon_data *hwmon;
const struct attribute_group **groups;
- int i, i_in, i_temp;
+ int i, i_in, i_temp, i_fan;
if (!pdata) {
pdata = gsc_hwmon_get_devtree_pdata(dev);
@@ -324,7 +337,7 @@ static int gsc_hwmon_probe(struct platform_device *pdev)
if (IS_ERR(hwmon->regmap))
return PTR_ERR(hwmon->regmap);
- for (i = 0, i_in = 0, i_temp = 0; i < hwmon->pdata->nchannels; i++) {
+ for (i = 0, i_in = 0, i_temp = 0, i_fan = 0; i < hwmon->pdata->nchannels; i++) {
const struct gsc_hwmon_channel *ch = &pdata->channels[i];
switch (ch->mode) {
@@ -338,6 +351,16 @@ static int gsc_hwmon_probe(struct platform_device *pdev)
HWMON_T_LABEL;
i_temp++;
break;
+ case mode_fan:
+ if (i_fan == GSC_HWMON_MAX_FAN_CH) {
+ dev_err(gsc->dev, "too many fan channels\n");
+ return -EINVAL;
+ }
+ hwmon->fan_ch[i_fan] = ch;
+ hwmon->fan_config[i_fan] = HWMON_F_INPUT |
+ HWMON_F_LABEL;
+ i_fan++;
+ break;
case mode_voltage_24bit:
case mode_voltage_16bit:
case mode_voltage_raw:
@@ -361,10 +384,13 @@ static int gsc_hwmon_probe(struct platform_device *pdev)
hwmon->chip.info = hwmon->info;
hwmon->info[0] = &hwmon->temp_info;
hwmon->info[1] = &hwmon->in_info;
+ hwmon->info[2] = &hwmon->fan_info;
hwmon->temp_info.type = hwmon_temp;
hwmon->temp_info.config = hwmon->temp_config;
hwmon->in_info.type = hwmon_in;
hwmon->in_info.config = hwmon->in_config;
+ hwmon->fan_info.type = hwmon_fan;
+ hwmon->fan_info.config = hwmon->fan_config;
groups = pdata->fan_base ? gsc_hwmon_groups : NULL;
hwmon_dev = devm_hwmon_device_register_with_info(dev,
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 018df6074f7b..d9394e19fea8 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -204,8 +204,7 @@ static struct attribute *hih6130_attrs[] = {
ATTRIBUTE_GROUPS(hih6130);
-static int hih6130_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int hih6130_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct hih6130 *hih6130;
@@ -250,7 +249,7 @@ static struct i2c_driver hih6130_driver = {
.name = "hih6130",
.of_match_table = of_match_ptr(hih6130_of_match),
},
- .probe = hih6130_probe,
+ .probe_new = hih6130_probe,
.id_table = hih6130_id,
};
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 3f596a5328da..6c684058bfdf 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -431,6 +431,8 @@ static const char * const hwmon_temp_attr_templates[] = {
[hwmon_temp_lowest] = "temp%d_lowest",
[hwmon_temp_highest] = "temp%d_highest",
[hwmon_temp_reset_history] = "temp%d_reset_history",
+ [hwmon_temp_rated_min] = "temp%d_rated_min",
+ [hwmon_temp_rated_max] = "temp%d_rated_max",
};
static const char * const hwmon_in_attr_templates[] = {
@@ -450,6 +452,8 @@ static const char * const hwmon_in_attr_templates[] = {
[hwmon_in_max_alarm] = "in%d_max_alarm",
[hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm",
[hwmon_in_crit_alarm] = "in%d_crit_alarm",
+ [hwmon_in_rated_min] = "in%d_rated_min",
+ [hwmon_in_rated_max] = "in%d_rated_max",
};
static const char * const hwmon_curr_attr_templates[] = {
@@ -469,6 +473,8 @@ static const char * const hwmon_curr_attr_templates[] = {
[hwmon_curr_max_alarm] = "curr%d_max_alarm",
[hwmon_curr_lcrit_alarm] = "curr%d_lcrit_alarm",
[hwmon_curr_crit_alarm] = "curr%d_crit_alarm",
+ [hwmon_curr_rated_min] = "curr%d_rated_min",
+ [hwmon_curr_rated_max] = "curr%d_rated_max",
};
static const char * const hwmon_power_attr_templates[] = {
@@ -501,6 +507,8 @@ static const char * const hwmon_power_attr_templates[] = {
[hwmon_power_max_alarm] = "power%d_max_alarm",
[hwmon_power_lcrit_alarm] = "power%d_lcrit_alarm",
[hwmon_power_crit_alarm] = "power%d_crit_alarm",
+ [hwmon_power_rated_min] = "power%d_rated_min",
+ [hwmon_power_rated_max] = "power%d_rated_max",
};
static const char * const hwmon_energy_attr_templates[] = {
@@ -519,6 +527,8 @@ static const char * const hwmon_humidity_attr_templates[] = {
[hwmon_humidity_max_hyst] = "humidity%d_max_hyst",
[hwmon_humidity_alarm] = "humidity%d_alarm",
[hwmon_humidity_fault] = "humidity%d_fault",
+ [hwmon_humidity_rated_min] = "humidity%d_rated_min",
+ [hwmon_humidity_rated_max] = "humidity%d_rated_max",
};
static const char * const hwmon_fan_attr_templates[] = {
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index 08ee3a64a026..f4c7b5f76359 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -531,8 +531,7 @@ static int ina209_init_client(struct i2c_client *client,
return 0;
}
-static int ina209_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ina209_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct ina209_data *data;
@@ -597,7 +596,7 @@ static struct i2c_driver ina209_driver = {
.name = "ina209",
.of_match_table = of_match_ptr(ina209_of_match),
},
- .probe = ina209_probe,
+ .probe_new = ina209_probe,
.remove = ina209_remove,
.id_table = ina209_id,
};
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 0fc6d5857993..ca97f9e931bc 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -614,8 +614,9 @@ static const struct attribute_group ina226_group = {
.attrs = ina226_attrs,
};
-static int ina2xx_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id ina2xx_id[];
+
+static int ina2xx_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ina2xx_data *data;
@@ -627,7 +628,7 @@ static int ina2xx_probe(struct i2c_client *client,
if (client->dev.of_node)
chip = (enum ina2xx_ids)of_device_get_match_data(&client->dev);
else
- chip = id->driver_data;
+ chip = i2c_match_id(ina2xx_id, client)->driver_data;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -717,7 +718,7 @@ static struct i2c_driver ina2xx_driver = {
.name = "ina2xx",
.of_match_table = of_match_ptr(ina2xx_of_match),
},
- .probe = ina2xx_probe,
+ .probe_new = ina2xx_probe,
.id_table = ina2xx_id,
};
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 81e155692aba..41fb17e0d641 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -822,8 +822,7 @@ static int ina3221_probe_from_dt(struct device *dev, struct ina3221_data *ina)
return 0;
}
-static int ina3221_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ina3221_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ina3221_data *ina;
@@ -1016,7 +1015,7 @@ static const struct i2c_device_id ina3221_ids[] = {
MODULE_DEVICE_TABLE(i2c, ina3221_ids);
static struct i2c_driver ina3221_i2c_driver = {
- .probe = ina3221_probe,
+ .probe_new = ina3221_probe,
.remove = ina3221_remove,
.driver = {
.name = INA3221_DRIVER_NAME,
diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c
new file mode 100644
index 000000000000..17d5e6b91c8a
--- /dev/null
+++ b/drivers/hwmon/intel-m10-bmc-hwmon.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel MAX 10 BMC HWMON Driver
+ *
+ * Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
+ *
+ */
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/mfd/intel-m10-bmc.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+struct m10bmc_sdata {
+ unsigned int reg_input;
+ unsigned int reg_max;
+ unsigned int reg_crit;
+ unsigned int reg_hyst;
+ unsigned int reg_min;
+ unsigned int multiplier;
+ const char *label;
+};
+
+struct m10bmc_hwmon_board_data {
+ const struct m10bmc_sdata *tables[hwmon_max];
+ const struct hwmon_channel_info **hinfo;
+};
+
+struct m10bmc_hwmon {
+ struct device *dev;
+ struct hwmon_chip_info chip;
+ char *hw_name;
+ struct intel_m10bmc *m10bmc;
+ const struct m10bmc_hwmon_board_data *bdata;
+};
+
+static const struct m10bmc_sdata n3000bmc_temp_tbl[] = {
+ { 0x100, 0x104, 0x108, 0x10c, 0x0, 500, "Board Temperature" },
+ { 0x110, 0x114, 0x118, 0x0, 0x0, 500, "FPGA Die Temperature" },
+ { 0x11c, 0x124, 0x120, 0x0, 0x0, 500, "QSFP0 Temperature" },
+ { 0x12c, 0x134, 0x130, 0x0, 0x0, 500, "QSFP1 Temperature" },
+ { 0x168, 0x0, 0x0, 0x0, 0x0, 500, "Retimer A Temperature" },
+ { 0x16c, 0x0, 0x0, 0x0, 0x0, 500, "Retimer A SerDes Temperature" },
+ { 0x170, 0x0, 0x0, 0x0, 0x0, 500, "Retimer B Temperature" },
+ { 0x174, 0x0, 0x0, 0x0, 0x0, 500, "Retimer B SerDes Temperature" },
+};
+
+static const struct m10bmc_sdata n3000bmc_in_tbl[] = {
+ { 0x128, 0x0, 0x0, 0x0, 0x0, 1, "QSFP0 Supply Voltage" },
+ { 0x138, 0x0, 0x0, 0x0, 0x0, 1, "QSFP1 Supply Voltage" },
+ { 0x13c, 0x0, 0x0, 0x0, 0x0, 1, "FPGA Core Voltage" },
+ { 0x144, 0x0, 0x0, 0x0, 0x0, 1, "12V Backplane Voltage" },
+ { 0x14c, 0x0, 0x0, 0x0, 0x0, 1, "1.2V Voltage" },
+ { 0x150, 0x0, 0x0, 0x0, 0x0, 1, "12V AUX Voltage" },
+ { 0x158, 0x0, 0x0, 0x0, 0x0, 1, "1.8V Voltage" },
+ { 0x15c, 0x0, 0x0, 0x0, 0x0, 1, "3.3V Voltage" },
+};
+
+static const struct m10bmc_sdata n3000bmc_curr_tbl[] = {
+ { 0x140, 0x0, 0x0, 0x0, 0x0, 1, "FPGA Core Current" },
+ { 0x148, 0x0, 0x0, 0x0, 0x0, 1, "12V Backplane Current" },
+ { 0x154, 0x0, 0x0, 0x0, 0x0, 1, "12V AUX Current" },
+};
+
+static const struct m10bmc_sdata n3000bmc_power_tbl[] = {
+ { 0x160, 0x0, 0x0, 0x0, 0x0, 1000, "Board Power" },
+};
+
+static const struct hwmon_channel_info *n3000bmc_hinfo[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_LABEL),
+ NULL
+};
+
+static const struct m10bmc_hwmon_board_data n3000bmc_hwmon_bdata = {
+ .tables = {
+ [hwmon_temp] = n3000bmc_temp_tbl,
+ [hwmon_in] = n3000bmc_in_tbl,
+ [hwmon_curr] = n3000bmc_curr_tbl,
+ [hwmon_power] = n3000bmc_power_tbl,
+ },
+
+ .hinfo = n3000bmc_hinfo,
+};
+
+static umode_t
+m10bmc_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static const struct m10bmc_sdata *
+find_sensor_data(struct m10bmc_hwmon *hw, enum hwmon_sensor_types type,
+ int channel)
+{
+ const struct m10bmc_sdata *tbl;
+
+ tbl = hw->bdata->tables[type];
+ if (!tbl)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return &tbl[channel];
+}
+
+static int do_sensor_read(struct m10bmc_hwmon *hw,
+ const struct m10bmc_sdata *data,
+ unsigned int regoff, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = m10bmc_sys_read(hw->m10bmc, regoff, &regval);
+ if (ret)
+ return ret;
+
+ /*
+ * BMC Firmware will return 0xdeadbeef if the sensor value is invalid
+ * at that time. This usually happens on sensor channels which connect
+ * to external pluggable modules, e.g. QSFP temperature and voltage.
+ * When the QSFP is unplugged from cage, driver will get 0xdeadbeef
+ * from their registers.
+ */
+ if (regval == 0xdeadbeef)
+ return -ENODATA;
+
+ *val = regval * data->multiplier;
+
+ return 0;
+}
+
+static int m10bmc_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct m10bmc_hwmon *hw = dev_get_drvdata(dev);
+ unsigned int reg = 0, reg_hyst = 0;
+ const struct m10bmc_sdata *data;
+ long hyst, value;
+ int ret;
+
+ data = find_sensor_data(hw, type, channel);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = data->reg_input;
+ break;
+ case hwmon_temp_max_hyst:
+ reg_hyst = data->reg_hyst;
+ fallthrough;
+ case hwmon_temp_max:
+ reg = data->reg_max;
+ break;
+ case hwmon_temp_crit_hyst:
+ reg_hyst = data->reg_hyst;
+ fallthrough;
+ case hwmon_temp_crit:
+ reg = data->reg_crit;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ reg = data->reg_input;
+ break;
+ case hwmon_in_max:
+ reg = data->reg_max;
+ break;
+ case hwmon_in_crit:
+ reg = data->reg_crit;
+ break;
+ case hwmon_in_min:
+ reg = data->reg_min;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ reg = data->reg_input;
+ break;
+ case hwmon_curr_max:
+ reg = data->reg_max;
+ break;
+ case hwmon_curr_crit:
+ reg = data->reg_crit;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ reg = data->reg_input;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!reg)
+ return -EOPNOTSUPP;
+
+ ret = do_sensor_read(hw, data, reg, &value);
+ if (ret)
+ return ret;
+
+ if (reg_hyst) {
+ ret = do_sensor_read(hw, data, reg_hyst, &hyst);
+ if (ret)
+ return ret;
+
+ value -= hyst;
+ }
+
+ *val = value;
+
+ return 0;
+}
+
+static int m10bmc_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct m10bmc_hwmon *hw = dev_get_drvdata(dev);
+ const struct m10bmc_sdata *data;
+
+ data = find_sensor_data(hw, type, channel);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ *str = data->label;
+
+ return 0;
+}
+
+static const struct hwmon_ops m10bmc_hwmon_ops = {
+ .is_visible = m10bmc_hwmon_is_visible,
+ .read = m10bmc_hwmon_read,
+ .read_string = m10bmc_hwmon_read_string,
+};
+
+static int m10bmc_hwmon_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct intel_m10bmc *m10bmc = dev_get_drvdata(pdev->dev.parent);
+ struct device *hwmon_dev, *dev = &pdev->dev;
+ struct m10bmc_hwmon *hw;
+ int i;
+
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ hw->dev = dev;
+ hw->m10bmc = m10bmc;
+ hw->bdata = (const struct m10bmc_hwmon_board_data *)id->driver_data;
+
+ hw->chip.info = hw->bdata->hinfo;
+ hw->chip.ops = &m10bmc_hwmon_ops;
+
+ hw->hw_name = devm_kstrdup(dev, id->name, GFP_KERNEL);
+ if (!hw->hw_name)
+ return -ENOMEM;
+
+ for (i = 0; hw->hw_name[i]; i++)
+ if (hwmon_is_bad_char(hw->hw_name[i]))
+ hw->hw_name[i] = '_';
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, hw->hw_name,
+ hw, &hw->chip, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct platform_device_id intel_m10bmc_hwmon_ids[] = {
+ {
+ .name = "n3000bmc-hwmon",
+ .driver_data = (unsigned long)&n3000bmc_hwmon_bdata,
+ },
+ { }
+};
+
+static struct platform_driver intel_m10bmc_hwmon_driver = {
+ .probe = m10bmc_hwmon_probe,
+ .driver = {
+ .name = "intel-m10-bmc-hwmon",
+ },
+ .id_table = intel_m10bmc_hwmon_ids,
+};
+module_platform_driver(intel_m10bmc_hwmon_driver);
+
+MODULE_DEVICE_TABLE(platform, intel_m10bmc_hwmon_ids);
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel MAX 10 BMC hardware monitor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index e3f1ebee7130..4a03d010ec5a 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -458,7 +458,7 @@ static const struct hwmon_chip_info jc42_chip_info = {
.info = jc42_info,
};
-static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int jc42_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -581,7 +581,7 @@ static struct i2c_driver jc42_driver = {
.pm = JC42_DEV_PM_OPS,
.of_match_table = of_match_ptr(jc42_of_ids),
},
- .probe = jc42_probe,
+ .probe_new = jc42_probe,
.remove = jc42_remove,
.id_table = jc42_id,
.detect = jc42_detect,
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 8f12995ec133..a250481b5a97 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -21,7 +21,6 @@
*/
#include <linux/bitops.h>
-#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/hwmon.h>
#include <linux/init.h>
@@ -73,22 +72,35 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64
#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
-/* F17h M01h Access througn SMN */
-#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
+/* Common for Zen CPU families (Family 17h and 18h) */
+#define ZEN_REPORTED_TEMP_CTRL_OFFSET 0x00059800
-#define F17H_M70H_CCD_TEMP(x) (0x00059954 + ((x) * 4))
-#define F17H_M70H_CCD_TEMP_VALID BIT(11)
-#define F17H_M70H_CCD_TEMP_MASK GENMASK(10, 0)
+#define ZEN_CCD_TEMP(x) (0x00059954 + ((x) * 4))
+#define ZEN_CCD_TEMP_VALID BIT(11)
+#define ZEN_CCD_TEMP_MASK GENMASK(10, 0)
-#define F17H_M01H_SVI 0x0005A000
-#define F17H_M01H_SVI_TEL_PLANE0 (F17H_M01H_SVI + 0xc)
-#define F17H_M01H_SVI_TEL_PLANE1 (F17H_M01H_SVI + 0x10)
+#define ZEN_CUR_TEMP_SHIFT 21
+#define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19)
-#define CUR_TEMP_SHIFT 21
-#define CUR_TEMP_RANGE_SEL_MASK BIT(19)
+#define ZEN_SVI_BASE 0x0005A000
-#define CFACTOR_ICORE 1000000 /* 1A / LSB */
-#define CFACTOR_ISOC 250000 /* 0.25A / LSB */
+/* F17h thermal registers through SMN */
+#define F17H_M01H_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0xc)
+#define F17H_M01H_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10)
+#define F17H_M31H_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0x14)
+#define F17H_M31H_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10)
+
+#define F17H_M01H_CFACTOR_ICORE 1000000 /* 1A / LSB */
+#define F17H_M01H_CFACTOR_ISOC 250000 /* 0.25A / LSB */
+#define F17H_M31H_CFACTOR_ICORE 1000000 /* 1A / LSB */
+#define F17H_M31H_CFACTOR_ISOC 310000 /* 0.31A / LSB */
+
+/* F19h thermal registers through SMN */
+#define F19H_M01_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0x14)
+#define F19H_M01_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10)
+
+#define F19H_M01H_CFACTOR_ICORE 1000000 /* 1A / LSB */
+#define F19H_M01H_CFACTOR_ISOC 310000 /* 0.31A / LSB */
struct k10temp_data {
struct pci_dev *pdev;
@@ -168,10 +180,10 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
}
-static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
+static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
{
amd_smn_read(amd_pci_dev_to_node_id(pdev),
- F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
+ ZEN_REPORTED_TEMP_CTRL_OFFSET, regval);
}
static long get_raw_temp(struct k10temp_data *data)
@@ -180,7 +192,7 @@ static long get_raw_temp(struct k10temp_data *data)
long temp;
data->read_tempreg(data->pdev, &regval);
- temp = (regval >> CUR_TEMP_SHIFT) * 125;
+ temp = (regval >> ZEN_CUR_TEMP_SHIFT) * 125;
if (regval & data->temp_adjust_mask)
temp -= 49000;
return temp;
@@ -288,8 +300,8 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
break;
case 2 ... 9: /* Tccd{1-8} */
amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
- F17H_M70H_CCD_TEMP(channel - 2), &regval);
- *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000;
+ ZEN_CCD_TEMP(channel - 2), &regval);
+ *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000;
break;
default:
return -EOPNOTSUPP;
@@ -416,76 +428,6 @@ static bool has_erratum_319(struct pci_dev *pdev)
(boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
}
-#ifdef CONFIG_DEBUG_FS
-
-static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev,
- u32 addr, int count)
-{
- u32 reg;
- int i;
-
- for (i = 0; i < count; i++) {
- if (!(i & 3))
- seq_printf(s, "0x%06x: ", addr + i * 4);
- amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, &reg);
- seq_printf(s, "%08x ", reg);
- if ((i & 3) == 3)
- seq_puts(s, "\n");
- }
-}
-
-static int svi_show(struct seq_file *s, void *unused)
-{
- struct k10temp_data *data = s->private;
-
- k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32);
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(svi);
-
-static int thm_show(struct seq_file *s, void *unused)
-{
- struct k10temp_data *data = s->private;
-
- k10temp_smn_regs_show(s, data->pdev,
- F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256);
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(thm);
-
-static void k10temp_debugfs_cleanup(void *ddir)
-{
- debugfs_remove_recursive(ddir);
-}
-
-static void k10temp_init_debugfs(struct k10temp_data *data)
-{
- struct dentry *debugfs;
- char name[32];
-
- /* Only show debugfs data for Family 17h/18h CPUs */
- if (!data->is_zen)
- return;
-
- scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev));
-
- debugfs = debugfs_create_dir(name, NULL);
- if (debugfs) {
- debugfs_create_file("svi", 0444, debugfs, data, &svi_fops);
- debugfs_create_file("thm", 0444, debugfs, data, &thm_fops);
- devm_add_action_or_reset(&data->pdev->dev,
- k10temp_debugfs_cleanup, debugfs);
- }
-}
-
-#else
-
-static void k10temp_init_debugfs(struct k10temp_data *data)
-{
-}
-
-#endif
-
static const struct hwmon_channel_info *k10temp_info[] = {
HWMON_CHANNEL_INFO(temp,
HWMON_T_INPUT | HWMON_T_MAX |
@@ -528,8 +470,8 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev,
for (i = 0; i < limit; i++) {
amd_smn_read(amd_pci_dev_to_node_id(pdev),
- F17H_M70H_CCD_TEMP(i), &regval);
- if (regval & F17H_M70H_CCD_TEMP_VALID)
+ ZEN_CCD_TEMP(i), &regval);
+ if (regval & ZEN_CCD_TEMP_VALID)
data->show_temp |= BIT(TCCD_BIT(i));
}
}
@@ -565,8 +507,8 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data->read_htcreg = read_htcreg_nb_f15;
data->read_tempreg = read_tempreg_nb_f15;
} else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
- data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK;
- data->read_tempreg = read_tempreg_nb_f17;
+ data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
+ data->read_tempreg = read_tempreg_nb_zen;
data->show_temp |= BIT(TDIE_BIT); /* show Tdie */
data->is_zen = true;
@@ -578,17 +520,33 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data->show_current = !is_threadripper() && !is_epyc();
data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0;
data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1;
- data->cfactor[0] = CFACTOR_ICORE;
- data->cfactor[1] = CFACTOR_ISOC;
+ data->cfactor[0] = F17H_M01H_CFACTOR_ICORE;
+ data->cfactor[1] = F17H_M01H_CFACTOR_ISOC;
k10temp_get_ccd_support(pdev, data, 4);
break;
case 0x31: /* Zen2 Threadripper */
case 0x71: /* Zen2 */
data->show_current = !is_threadripper() && !is_epyc();
- data->cfactor[0] = CFACTOR_ICORE;
- data->cfactor[1] = CFACTOR_ISOC;
- data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1;
- data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0;
+ data->cfactor[0] = F17H_M31H_CFACTOR_ICORE;
+ data->cfactor[1] = F17H_M31H_CFACTOR_ISOC;
+ data->svi_addr[0] = F17H_M31H_SVI_TEL_PLANE0;
+ data->svi_addr[1] = F17H_M31H_SVI_TEL_PLANE1;
+ k10temp_get_ccd_support(pdev, data, 8);
+ break;
+ }
+ } else if (boot_cpu_data.x86 == 0x19) {
+ data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
+ data->read_tempreg = read_tempreg_nb_zen;
+ data->show_temp |= BIT(TDIE_BIT);
+ data->is_zen = true;
+
+ switch (boot_cpu_data.x86_model) {
+ case 0x0 ... 0x1: /* Zen3 */
+ data->show_current = true;
+ data->svi_addr[0] = F19H_M01_SVI_TEL_PLANE0;
+ data->svi_addr[1] = F19H_M01_SVI_TEL_PLANE1;
+ data->cfactor[0] = F19H_M01H_CFACTOR_ICORE;
+ data->cfactor[1] = F19H_M01H_CFACTOR_ISOC;
k10temp_get_ccd_support(pdev, data, 8);
break;
}
@@ -610,12 +568,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data,
&k10temp_chip_info,
NULL);
- if (IS_ERR(hwmon_dev))
- return PTR_ERR(hwmon_dev);
-
- k10temp_init_debugfs(data);
-
- return 0;
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct pci_device_id k10temp_id_table[] = {
@@ -634,6 +587,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index ce5b0598524c..c83eb2fd80eb 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -417,8 +417,7 @@ static const struct attribute_group pem_fan_group = {
.attrs = pem_fan_attributes,
};
-static int pem_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pem_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
@@ -512,7 +511,7 @@ static struct i2c_driver pem_driver = {
.driver = {
.name = "lineage_pem",
},
- .probe = pem_probe,
+ .probe_new = pem_probe,
.id_table = pem_id,
};
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 60a817f58db9..50f67265c71d 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1087,8 +1087,9 @@ static void lm63_init_client(struct lm63_data *data)
(data->config_fan & 0x20) ? "manual" : "auto");
}
-static int lm63_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id lm63_id[];
+
+static int lm63_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -1106,7 +1107,7 @@ static int lm63_probe(struct i2c_client *client,
if (client->dev.of_node)
data->kind = (enum chips)of_device_get_match_data(&client->dev);
else
- data->kind = id->driver_data;
+ data->kind = i2c_match_id(lm63_id, client)->driver_data;
if (data->kind == lm64)
data->temp2_offset = 16000;
@@ -1163,7 +1164,7 @@ static struct i2c_driver lm63_driver = {
.name = "lm63",
.of_match_table = of_match_ptr(lm63_of_match),
},
- .probe = lm63_probe,
+ .probe_new = lm63_probe,
.id_table = lm63_id,
.detect = lm63_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 733c48bf6c98..beb0d61bcd82 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -190,7 +190,7 @@ ATTRIBUTE_GROUPS(lm73);
/* device probe and removal */
static int
-lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
+lm73_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -277,7 +277,7 @@ static struct i2c_driver lm73_driver = {
.name = "lm73",
.of_match_table = lm73_of_match,
},
- .probe = lm73_probe,
+ .probe_new = lm73_probe,
.id_table = lm73_ids,
.detect = lm73_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index ba0be48aeadd..e447febd121a 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/util_macros.h>
+#include <linux/regulator/consumer.h>
#include "lm75.h"
/*
@@ -101,6 +102,7 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
struct lm75_data {
struct i2c_client *client;
struct regmap *regmap;
+ struct regulator *vs;
u8 orig_conf;
u8 current_conf;
u8 resolution; /* In bits, 9 to 16 */
@@ -534,6 +536,13 @@ static const struct regmap_config lm75_regmap_config = {
.use_single_write = true,
};
+static void lm75_disable_regulator(void *data)
+{
+ struct lm75_data *lm75 = data;
+
+ regulator_disable(lm75->vs);
+}
+
static void lm75_remove(void *data)
{
struct lm75_data *lm75 = data;
@@ -542,8 +551,9 @@ static void lm75_remove(void *data)
i2c_smbus_write_byte_data(client, LM75_REG_CONF, lm75->orig_conf);
}
-static int
-lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static const struct i2c_device_id lm75_ids[];
+
+static int lm75_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -554,7 +564,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (client->dev.of_node)
kind = (enum lm75_type)of_device_get_match_data(&client->dev);
else
- kind = id->driver_data;
+ kind = i2c_match_id(lm75_ids, client)->driver_data;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
@@ -567,6 +577,10 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->client = client;
data->kind = kind;
+ data->vs = devm_regulator_get(dev, "vs");
+ if (IS_ERR(data->vs))
+ return PTR_ERR(data->vs);
+
data->regmap = devm_regmap_init_i2c(client, &lm75_regmap_config);
if (IS_ERR(data->regmap))
return PTR_ERR(data->regmap);
@@ -581,6 +595,17 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->sample_time = data->params->default_sample_time;
data->resolution = data->params->default_resolution;
+ /* Enable the power */
+ err = regulator_enable(data->vs);
+ if (err) {
+ dev_err(dev, "failed to enable regulator: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(dev, lm75_disable_regulator, data);
+ if (err)
+ return err;
+
/* Cache original configuration */
status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
@@ -893,7 +918,7 @@ static struct i2c_driver lm75_driver = {
.of_match_table = of_match_ptr(lm75_of_match),
.pm = LM75_DEV_PM_OPS,
},
- .probe = lm75_probe,
+ .probe_new = lm75_probe,
.id_table = lm75_ids,
.detect = lm75_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index 671a962fde29..7570c9d50ddc 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -315,7 +315,7 @@ static void lm77_init_client(struct i2c_client *client)
lm77_write_value(client, LM77_REG_CONF, conf & 0xfe);
}
-static int lm77_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int lm77_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -348,7 +348,7 @@ static struct i2c_driver lm77_driver = {
.driver = {
.name = "lm77",
},
- .probe = lm77_probe,
+ .probe_new = lm77_probe,
.id_table = lm77_id,
.detect = lm77_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 2119461ec43a..1aa35ca0c6fe 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -627,8 +627,9 @@ static int lm78_i2c_detect(struct i2c_client *client,
return -ENODEV;
}
-static int lm78_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id lm78_i2c_id[];
+
+static int lm78_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -639,7 +640,7 @@ static int lm78_i2c_probe(struct i2c_client *client,
return -ENOMEM;
data->client = client;
- data->type = id->driver_data;
+ data->type = i2c_match_id(lm78_i2c_id, client)->driver_data;
/* Initialize the LM78 chip */
lm78_init_device(data);
@@ -661,7 +662,7 @@ static struct i2c_driver lm78_driver = {
.driver = {
.name = "lm78",
},
- .probe = lm78_i2c_probe,
+ .probe_new = lm78_i2c_probe,
.id_table = lm78_i2c_id,
.detect = lm78_i2c_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 80520cef7617..ac4adb44b224 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -591,8 +591,7 @@ static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static int lm80_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm80_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -641,7 +640,7 @@ static struct i2c_driver lm80_driver = {
.driver = {
.name = "lm80",
},
- .probe = lm80_probe,
+ .probe_new = lm80_probe,
.id_table = lm80_id,
.detect = lm80_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 8fefca9bbbb7..2ff5ecce608e 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -317,8 +317,9 @@ static int lm83_detect(struct i2c_client *new_client,
return 0;
}
-static int lm83_probe(struct i2c_client *new_client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id lm83_id[];
+
+static int lm83_probe(struct i2c_client *new_client)
{
struct device *hwmon_dev;
struct lm83_data *data;
@@ -338,7 +339,7 @@ static int lm83_probe(struct i2c_client *new_client,
* declare 1 and 3 common, and then 2 and 4 only for the LM83.
*/
data->groups[0] = &lm83_group;
- if (id->driver_data == lm83)
+ if (i2c_match_id(lm83_id, new_client)->driver_data == lm83)
data->groups[1] = &lm83_group_opt;
hwmon_dev = devm_hwmon_device_register_with_groups(&new_client->dev,
@@ -363,7 +364,7 @@ static struct i2c_driver lm83_driver = {
.driver = {
.name = "lm83",
},
- .probe = lm83_probe,
+ .probe_new = lm83_probe,
.id_table = lm83_id,
.detect = lm83_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index cff0aa505a78..c7bf5de7b70f 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -1544,7 +1544,9 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static int lm85_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static const struct i2c_device_id lm85_id[];
+
+static int lm85_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -1559,7 +1561,7 @@ static int lm85_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (client->dev.of_node)
data->type = (enum chips)of_device_get_match_data(&client->dev);
else
- data->type = id->driver_data;
+ data->type = i2c_match_id(lm85_id, client)->driver_data;
mutex_init(&data->update_lock);
/* Fill in the chip specific driver values */
@@ -1696,7 +1698,7 @@ static struct i2c_driver lm85_driver = {
.name = "lm85",
.of_match_table = of_match_ptr(lm85_of_match),
},
- .probe = lm85_probe,
+ .probe_new = lm85_probe,
.id_table = lm85_id,
.detect = lm85_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index c96c4d807e38..b2d820125bb6 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -912,7 +912,7 @@ static int lm87_init_client(struct i2c_client *client)
return 0;
}
-static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int lm87_probe(struct i2c_client *client)
{
struct lm87_data *data;
struct device *hwmon_dev;
@@ -994,7 +994,7 @@ static struct i2c_driver lm87_driver = {
.name = "lm87",
.of_match_table = lm87_of_match,
},
- .probe = lm87_probe,
+ .probe_new = lm87_probe,
.id_table = lm87_id,
.detect = lm87_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 7bdc664af55b..ebbfd5f352c0 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1779,8 +1779,7 @@ static const struct hwmon_ops lm90_ops = {
.write = lm90_write,
};
-static int lm90_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm90_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct i2c_adapter *adapter = client->adapter;
@@ -1816,7 +1815,7 @@ static int lm90_probe(struct i2c_client *client,
if (client->dev.of_node)
data->kind = (enum chips)of_device_get_match_data(&client->dev);
else
- data->kind = id->driver_data;
+ data->kind = i2c_match_id(lm90_id, client)->driver_data;
if (data->kind == adm1032) {
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
client->flags &= ~I2C_CLIENT_PEC;
@@ -1952,7 +1951,7 @@ static struct i2c_driver lm90_driver = {
.name = "lm90",
.of_match_table = of_match_ptr(lm90_of_match),
},
- .probe = lm90_probe,
+ .probe_new = lm90_probe,
.alert = lm90_alert,
.id_table = lm90_id,
.detect = lm90_detect,
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 84347db5edf3..9bf278cf0bd0 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -292,8 +292,7 @@ static int lm92_detect(struct i2c_client *new_client,
return 0;
}
-static int lm92_probe(struct i2c_client *new_client,
- const struct i2c_device_id *id)
+static int lm92_probe(struct i2c_client *new_client)
{
struct device *hwmon_dev;
struct lm92_data *data;
@@ -331,7 +330,7 @@ static struct i2c_driver lm92_driver = {
.driver = {
.name = "lm92",
},
- .probe = lm92_probe,
+ .probe_new = lm92_probe,
.id_table = lm92_id,
.detect = lm92_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index cea8ea323271..78d6dfaf145b 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -2583,8 +2583,7 @@ static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static int lm93_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm93_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lm93_data *data;
@@ -2636,7 +2635,7 @@ static struct i2c_driver lm93_driver = {
.driver = {
.name = "lm93",
},
- .probe = lm93_probe,
+ .probe_new = lm93_probe,
.id_table = lm93_id,
.detect = lm93_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index 8a2a2a490496..ac169a994ae0 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -677,8 +677,9 @@ static int lm95234_init_client(struct i2c_client *client)
return 0;
}
-static int lm95234_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id lm95234_id[];
+
+static int lm95234_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lm95234_data *data;
@@ -698,7 +699,7 @@ static int lm95234_probe(struct i2c_client *client,
return err;
data->groups[0] = &lm95234_common_group;
- if (id->driver_data == lm95234)
+ if (i2c_match_id(lm95234_id, client)->driver_data == lm95234)
data->groups[1] = &lm95234_group;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
@@ -719,7 +720,7 @@ static struct i2c_driver lm95234_driver = {
.driver = {
.name = DRVNAME,
},
- .probe = lm95234_probe,
+ .probe_new = lm95234_probe,
.id_table = lm95234_id,
.detect = lm95234_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 8d66d6e3c0fc..00dbc170c8c6 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -432,8 +432,7 @@ static const struct hwmon_chip_info lm95241_chip_info = {
.info = lm95241_info,
};
-static int lm95241_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm95241_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lm95241_data *data;
@@ -469,7 +468,7 @@ static struct i2c_driver lm95241_driver = {
.driver = {
.name = DEVNAME,
},
- .probe = lm95241_probe,
+ .probe_new = lm95241_probe,
.id_table = lm95241_id,
.detect = lm95241_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 057614e664e1..29388fcf5f74 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -547,8 +547,7 @@ static const struct hwmon_chip_info lm95245_chip_info = {
.info = lm95245_info,
};
-static int lm95245_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm95245_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lm95245_data *data;
@@ -598,7 +597,7 @@ static struct i2c_driver lm95245_driver = {
.name = "lm95245",
.of_match_table = of_match_ptr(lm95245_of_match),
},
- .probe = lm95245_probe,
+ .probe_new = lm95245_probe,
.id_table = lm95245_id,
.detect = lm95245_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c
index 2818276ed3d6..ba9c868a8641 100644
--- a/drivers/hwmon/ltc2945.c
+++ b/drivers/hwmon/ltc2945.c
@@ -445,8 +445,7 @@ static const struct regmap_config ltc2945_regmap_config = {
.max_register = LTC2945_MIN_ADIN_THRES_L,
};
-static int ltc2945_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc2945_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -478,7 +477,7 @@ static struct i2c_driver ltc2945_driver = {
.driver = {
.name = "ltc2945",
},
- .probe = ltc2945_probe,
+ .probe_new = ltc2945_probe,
.id_table = ltc2945_id,
};
diff --git a/drivers/hwmon/ltc2947-i2c.c b/drivers/hwmon/ltc2947-i2c.c
index cf6074b110ae..ad0dfd3efbf8 100644
--- a/drivers/hwmon/ltc2947-i2c.c
+++ b/drivers/hwmon/ltc2947-i2c.c
@@ -15,8 +15,7 @@ static const struct regmap_config ltc2947_regmap_config = {
.val_bits = 8,
};
-static int ltc2947_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int ltc2947_probe(struct i2c_client *i2c)
{
struct regmap *map;
@@ -39,7 +38,7 @@ static struct i2c_driver ltc2947_driver = {
.of_match_table = ltc2947_of_match,
.pm = &ltc2947_pm_ops,
},
- .probe = ltc2947_probe,
+ .probe_new = ltc2947_probe,
.id_table = ltc2947_id,
};
module_i2c_driver(ltc2947_driver);
diff --git a/drivers/hwmon/ltc2990.c b/drivers/hwmon/ltc2990.c
index 53ff5051774c..78b191b26bb2 100644
--- a/drivers/hwmon/ltc2990.c
+++ b/drivers/hwmon/ltc2990.c
@@ -200,8 +200,7 @@ static const struct attribute_group ltc2990_group = {
};
__ATTRIBUTE_GROUPS(ltc2990);
-static int ltc2990_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int ltc2990_i2c_probe(struct i2c_client *i2c)
{
int ret;
struct device *hwmon_dev;
@@ -269,7 +268,7 @@ static struct i2c_driver ltc2990_i2c_driver = {
.driver = {
.name = "ltc2990",
},
- .probe = ltc2990_i2c_probe,
+ .probe_new = ltc2990_i2c_probe,
.id_table = ltc2990_i2c_id,
};
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index 67a529b7ba18..321f54e237bd 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -154,8 +154,7 @@ static struct attribute *ltc4151_attrs[] = {
};
ATTRIBUTE_GROUPS(ltc4151);
-static int ltc4151_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc4151_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
@@ -206,7 +205,7 @@ static struct i2c_driver ltc4151_driver = {
.name = "ltc4151",
.of_match_table = of_match_ptr(ltc4151_match),
},
- .probe = ltc4151_probe,
+ .probe_new = ltc4151_probe,
.id_table = ltc4151_id,
};
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index f783ac19675e..7cef3cb2962b 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -218,8 +218,7 @@ static struct attribute *ltc4215_attrs[] = {
};
ATTRIBUTE_GROUPS(ltc4215);
-static int ltc4215_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc4215_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
@@ -256,7 +255,7 @@ static struct i2c_driver ltc4215_driver = {
.driver = {
.name = "ltc4215",
},
- .probe = ltc4215_probe,
+ .probe_new = ltc4215_probe,
.id_table = ltc4215_id,
};
diff --git a/drivers/hwmon/ltc4222.c b/drivers/hwmon/ltc4222.c
index d15485e93fb8..3efce6d1cb88 100644
--- a/drivers/hwmon/ltc4222.c
+++ b/drivers/hwmon/ltc4222.c
@@ -177,8 +177,7 @@ static const struct regmap_config ltc4222_regmap_config = {
.max_register = LTC4222_ADC_CONTROL,
};
-static int ltc4222_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc4222_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -211,7 +210,7 @@ static struct i2c_driver ltc4222_driver = {
.driver = {
.name = "ltc4222",
},
- .probe = ltc4222_probe,
+ .probe_new = ltc4222_probe,
.id_table = ltc4222_id,
};
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 244a83d675cd..5088d28b3a7c 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -440,8 +440,7 @@ static bool ltc4245_use_extra_gpios(struct i2c_client *client)
return false;
}
-static int ltc4245_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc4245_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct ltc4245_data *data;
@@ -480,7 +479,7 @@ static struct i2c_driver ltc4245_driver = {
.driver = {
.name = "ltc4245",
},
- .probe = ltc4245_probe,
+ .probe_new = ltc4245_probe,
.id_table = ltc4245_id,
};
diff --git a/drivers/hwmon/ltc4260.c b/drivers/hwmon/ltc4260.c
index 8b8fd4a313ee..d0beb43abf3f 100644
--- a/drivers/hwmon/ltc4260.c
+++ b/drivers/hwmon/ltc4260.c
@@ -141,8 +141,7 @@ static const struct regmap_config ltc4260_regmap_config = {
.max_register = LTC4260_ADIN,
};
-static int ltc4260_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc4260_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -174,7 +173,7 @@ static struct i2c_driver ltc4260_driver = {
.driver = {
.name = "ltc4260",
},
- .probe = ltc4260_probe,
+ .probe_new = ltc4260_probe,
.id_table = ltc4260_id,
};
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index c415829ffbf5..1dab84b52df5 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -190,8 +190,7 @@ static struct attribute *ltc4261_attrs[] = {
};
ATTRIBUTE_GROUPS(ltc4261);
-static int ltc4261_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc4261_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
@@ -234,7 +233,7 @@ static struct i2c_driver ltc4261_driver = {
.driver = {
.name = "ltc4261",
},
- .probe = ltc4261_probe,
+ .probe_new = ltc4261_probe,
.id_table = ltc4261_id,
};
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 49b7e0b6d1bb..a26226e7bc37 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -493,8 +493,9 @@ static const struct attribute_group max16065_max_group = {
.is_visible = max16065_secondary_is_visible,
};
-static int max16065_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id max16065_id[];
+
+static int max16065_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct max16065_data *data;
@@ -504,6 +505,7 @@ static int max16065_probe(struct i2c_client *client,
bool have_secondary; /* true if chip has secondary limits */
bool secondary_is_max = false; /* secondary limits reflect max */
int groups = 0;
+ const struct i2c_device_id *id = i2c_match_id(max16065_id, client);
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_READ_WORD_DATA))
@@ -598,7 +600,7 @@ static struct i2c_driver max16065_driver = {
.driver = {
.name = "max16065",
},
- .probe = max16065_probe,
+ .probe_new = max16065_probe,
.id_table = max16065_id,
};
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 87c6665bab3a..8bd941cae4d1 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -261,8 +261,7 @@ static void max1619_init_client(struct i2c_client *client)
config & 0xBF); /* run */
}
-static int max1619_probe(struct i2c_client *new_client,
- const struct i2c_device_id *id)
+static int max1619_probe(struct i2c_client *new_client)
{
struct max1619_data *data;
struct device *hwmon_dev;
@@ -306,7 +305,7 @@ static struct i2c_driver max1619_driver = {
.name = "max1619",
.of_match_table = of_match_ptr(max1619_of_match),
},
- .probe = max1619_probe,
+ .probe_new = max1619_probe,
.id_table = max1619_id,
.detect = max1619_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index fb6d17287365..5c41c78f0458 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -391,8 +391,9 @@ static int max1668_detect(struct i2c_client *client,
return 0;
}
-static int max1668_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id max1668_id[];
+
+static int max1668_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
@@ -407,7 +408,7 @@ static int max1668_probe(struct i2c_client *client,
return -ENOMEM;
data->client = client;
- data->type = id->driver_data;
+ data->type = i2c_match_id(max1668_id, client)->driver_data;
mutex_init(&data->update_lock);
/* sysfs hooks */
@@ -434,7 +435,7 @@ static struct i2c_driver max1668_driver = {
.driver = {
.name = "max1668",
},
- .probe = max1668_probe,
+ .probe_new = max1668_probe,
.id_table = max1668_id,
.detect = max1668_detect,
.address_list = max1668_addr_list,
diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c
index eb22a34dc36b..23598b8b8793 100644
--- a/drivers/hwmon/max31730.c
+++ b/drivers/hwmon/max31730.c
@@ -292,7 +292,7 @@ static void max31730_remove(void *data)
}
static int
-max31730_probe(struct i2c_client *client, const struct i2c_device_id *id)
+max31730_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -427,7 +427,7 @@ static struct i2c_driver max31730_driver = {
.of_match_table = of_match_ptr(max31730_of_match),
.pm = &max31730_pm_ops,
},
- .probe = max31730_probe,
+ .probe_new = max31730_probe,
.id_table = max31730_ids,
.detect = max31730_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 117fb79ef294..86e6c71db685 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -448,8 +448,7 @@ static int max31790_init_client(struct i2c_client *client,
return 0;
}
-static int max31790_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max31790_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
@@ -491,7 +490,7 @@ MODULE_DEVICE_TABLE(i2c, max31790_id);
static struct i2c_driver max31790_driver = {
.class = I2C_CLASS_HWMON,
- .probe = max31790_probe,
+ .probe_new = max31790_probe,
.driver = {
.name = "max31790",
},
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
index a8bb5de14230..367855d5edae 100644
--- a/drivers/hwmon/max6621.c
+++ b/drivers/hwmon/max6621.c
@@ -477,8 +477,7 @@ static const struct hwmon_chip_info max6621_chip_info = {
.info = max6621_info,
};
-static int max6621_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max6621_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct max6621_data *data;
@@ -555,7 +554,7 @@ static struct i2c_driver max6621_driver = {
.name = MAX6621_DRV_NAME,
.of_match_table = of_match_ptr(max6621_of_match),
},
- .probe = max6621_probe,
+ .probe_new = max6621_probe,
.id_table = max6621_id,
};
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 2d56e97aa5fa..b71899c641fa 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -516,8 +516,7 @@ static int max6639_detect(struct i2c_client *client,
return 0;
}
-static int max6639_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max6639_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct max6639_data *data;
@@ -581,7 +580,7 @@ static struct i2c_driver max6639_driver = {
.name = "max6639",
.pm = &max6639_pm_ops,
},
- .probe = max6639_probe,
+ .probe_new = max6639_probe,
.id_table = max6639_id,
.detect = max6639_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 5ab6fdb53b96..23d93142b0b3 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -264,8 +264,7 @@ static struct attribute *max6642_attrs[] = {
};
ATTRIBUTE_GROUPS(max6642);
-static int max6642_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max6642_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct max6642_data *data;
@@ -302,7 +301,7 @@ static struct i2c_driver max6642_driver = {
.driver = {
.name = "max6642",
},
- .probe = max6642_probe,
+ .probe_new = max6642_probe,
.id_table = max6642_id,
.detect = max6642_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 3d9d371c35b5..cc7f2980fe83 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -757,8 +757,9 @@ static const struct hwmon_chip_info max6650_chip_info = {
.info = max6650_info,
};
-static int max6650_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id max6650_id[];
+
+static int max6650_probe(struct i2c_client *client)
{
struct thermal_cooling_device *cooling_dev;
struct device *dev = &client->dev;
@@ -775,7 +776,8 @@ static int max6650_probe(struct i2c_client *client,
data->client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
- data->nr_fans = of_id ? (int)(uintptr_t)of_id->data : id->driver_data;
+ data->nr_fans = of_id ? (int)(uintptr_t)of_id->data :
+ i2c_match_id(max6650_id, client)->driver_data;
/*
* Initialize the max6650 chip
@@ -817,7 +819,7 @@ static struct i2c_driver max6650_driver = {
.name = "max6650",
.of_match_table = of_match_ptr(max6650_dt_match),
},
- .probe = max6650_probe,
+ .probe_new = max6650_probe,
.id_table = max6650_id,
};
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 58781d999caa..fc3241101178 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -685,8 +685,9 @@ done:
return 0;
}
-static int max6697_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id max6697_id[];
+
+static int max6697_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
@@ -704,7 +705,7 @@ static int max6697_probe(struct i2c_client *client,
if (client->dev.of_node)
data->type = (enum chips)of_device_get_match_data(&client->dev);
else
- data->type = id->driver_data;
+ data->type = i2c_match_id(max6697_id, client)->driver_data;
data->chip = &max6697_chip_data[data->type];
data->client = client;
mutex_init(&data->update_lock);
@@ -785,7 +786,7 @@ static struct i2c_driver max6697_driver = {
.name = "max6697",
.of_match_table = of_match_ptr(max6697_of_match),
},
- .probe = max6697_probe,
+ .probe_new = max6697_probe,
.id_table = max6697_id,
};
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index 4e8f995dc773..ce2780768074 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -100,8 +100,9 @@ static ssize_t in0_input_show(struct device *dev,
static DEVICE_ATTR_RO(in0_input);
-static int mcp3021_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id mcp3021_id[];
+
+static int mcp3021_probe(struct i2c_client *client)
{
int err;
struct mcp3021_data *data = NULL;
@@ -132,7 +133,7 @@ static int mcp3021_probe(struct i2c_client *client,
data->vdd = MCP3021_VDD_REF_DEFAULT;
}
- switch (id->driver_data) {
+ switch (i2c_match_id(mcp3021_id, client)->driver_data) {
case mcp3021:
data->sar_shift = MCP3021_SAR_SHIFT;
data->sar_mask = MCP3021_SAR_MASK;
@@ -197,7 +198,7 @@ static struct i2c_driver mcp3021_driver = {
.name = "mcp3021",
.of_match_table = of_match_ptr(of_mcp3021_match),
},
- .probe = mcp3021_probe,
+ .probe_new = mcp3021_probe,
.remove = mcp3021_remove,
.id_table = mcp3021_id,
};
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
new file mode 100644
index 000000000000..18da5a25e89a
--- /dev/null
+++ b/drivers/hwmon/mr75203.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MaxLinear, Inc.
+ *
+ * This driver is a hardware monitoring driver for PVT controller
+ * (MR75203) which is used to configure & control Moortec embedded
+ * analog IP to enable multiple embedded temperature sensor(TS),
+ * voltage monitor(VM) & process detector(PD) modules.
+ */
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* PVT Common register */
+#define PVT_IP_CONFIG 0x04
+#define TS_NUM_MSK GENMASK(4, 0)
+#define TS_NUM_SFT 0
+#define PD_NUM_MSK GENMASK(12, 8)
+#define PD_NUM_SFT 8
+#define VM_NUM_MSK GENMASK(20, 16)
+#define VM_NUM_SFT 16
+#define CH_NUM_MSK GENMASK(31, 24)
+#define CH_NUM_SFT 24
+
+/* Macro Common Register */
+#define CLK_SYNTH 0x00
+#define CLK_SYNTH_LO_SFT 0
+#define CLK_SYNTH_HI_SFT 8
+#define CLK_SYNTH_HOLD_SFT 16
+#define CLK_SYNTH_EN BIT(24)
+#define CLK_SYS_CYCLES_MAX 514
+#define CLK_SYS_CYCLES_MIN 2
+#define HZ_PER_MHZ 1000000L
+
+#define SDIF_DISABLE 0x04
+
+#define SDIF_STAT 0x08
+#define SDIF_BUSY BIT(0)
+#define SDIF_LOCK BIT(1)
+
+#define SDIF_W 0x0c
+#define SDIF_PROG BIT(31)
+#define SDIF_WRN_W BIT(27)
+#define SDIF_WRN_R 0x00
+#define SDIF_ADDR_SFT 24
+
+#define SDIF_HALT 0x10
+#define SDIF_CTRL 0x14
+#define SDIF_SMPL_CTRL 0x20
+
+/* TS & PD Individual Macro Register */
+#define COM_REG_SIZE 0x40
+
+#define SDIF_DONE(n) (COM_REG_SIZE + 0x14 + 0x40 * (n))
+#define SDIF_SMPL_DONE BIT(0)
+
+#define SDIF_DATA(n) (COM_REG_SIZE + 0x18 + 0x40 * (n))
+#define SAMPLE_DATA_MSK GENMASK(15, 0)
+
+#define HILO_RESET(n) (COM_REG_SIZE + 0x2c + 0x40 * (n))
+
+/* VM Individual Macro Register */
+#define VM_COM_REG_SIZE 0x200
+#define VM_SDIF_DONE(n) (VM_COM_REG_SIZE + 0x34 + 0x200 * (n))
+#define VM_SDIF_DATA(n) (VM_COM_REG_SIZE + 0x40 + 0x200 * (n))
+
+/* SDA Slave Register */
+#define IP_CTRL 0x00
+#define IP_RST_REL BIT(1)
+#define IP_RUN_CONT BIT(3)
+#define IP_AUTO BIT(8)
+#define IP_VM_MODE BIT(10)
+
+#define IP_CFG 0x01
+#define CFG0_MODE_2 BIT(0)
+#define CFG0_PARALLEL_OUT 0
+#define CFG0_12_BIT 0
+#define CFG1_VOL_MEAS_MODE 0
+#define CFG1_PARALLEL_OUT 0
+#define CFG1_14_BIT 0
+
+#define IP_DATA 0x03
+
+#define IP_POLL 0x04
+#define VM_CH_INIT BIT(20)
+#define VM_CH_REQ BIT(21)
+
+#define IP_TMR 0x05
+#define POWER_DELAY_CYCLE_256 0x80
+#define POWER_DELAY_CYCLE_64 0x40
+
+#define PVT_POLL_DELAY_US 20
+#define PVT_POLL_TIMEOUT_US 20000
+#define PVT_H_CONST 100000
+#define PVT_CAL5_CONST 2047
+#define PVT_G_CONST 40000
+#define PVT_CONV_BITS 10
+#define PVT_N_CONST 90
+#define PVT_R_CONST 245805
+
+struct pvt_device {
+ struct regmap *c_map;
+ struct regmap *t_map;
+ struct regmap *p_map;
+ struct regmap *v_map;
+ struct clk *clk;
+ struct reset_control *rst;
+ u32 t_num;
+ u32 p_num;
+ u32 v_num;
+ u32 ip_freq;
+ u8 *vm_idx;
+};
+
+static umode_t pvt_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ if (attr == hwmon_temp_input)
+ return 0444;
+ break;
+ case hwmon_in:
+ if (attr == hwmon_in_input)
+ return 0444;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int pvt_read_temp(struct device *dev, u32 attr, int channel, long *val)
+{
+ struct pvt_device *pvt = dev_get_drvdata(dev);
+ struct regmap *t_map = pvt->t_map;
+ u32 stat, nbs;
+ int ret;
+ u64 tmp;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = regmap_read_poll_timeout(t_map, SDIF_DONE(channel),
+ stat, stat & SDIF_SMPL_DONE,
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(t_map, SDIF_DATA(channel), &nbs);
+ if(ret < 0)
+ return ret;
+
+ nbs &= SAMPLE_DATA_MSK;
+
+ /*
+ * Convert the register value to
+ * degrees centigrade temperature
+ */
+ tmp = nbs * PVT_H_CONST;
+ do_div(tmp, PVT_CAL5_CONST);
+ *val = tmp - PVT_G_CONST - pvt->ip_freq;
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
+{
+ struct pvt_device *pvt = dev_get_drvdata(dev);
+ struct regmap *v_map = pvt->v_map;
+ u32 n, stat;
+ u8 vm_idx;
+ int ret;
+
+ if (channel >= pvt->v_num)
+ return -EINVAL;
+
+ vm_idx = pvt->vm_idx[channel];
+
+ switch (attr) {
+ case hwmon_in_input:
+ ret = regmap_read_poll_timeout(v_map, VM_SDIF_DONE(vm_idx),
+ stat, stat & SDIF_SMPL_DONE,
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n);
+ if(ret < 0)
+ return ret;
+
+ n &= SAMPLE_DATA_MSK;
+ /* Convert the N bitstream count into voltage */
+ *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS;
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int pvt_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_temp:
+ return pvt_read_temp(dev, attr, channel, val);
+ case hwmon_in:
+ return pvt_read_in(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const u32 pvt_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info pvt_chip = {
+ .type = hwmon_chip,
+ .config = pvt_chip_config,
+};
+
+static struct hwmon_channel_info pvt_temp = {
+ .type = hwmon_temp,
+};
+
+static struct hwmon_channel_info pvt_in = {
+ .type = hwmon_in,
+};
+
+static const struct hwmon_ops pvt_hwmon_ops = {
+ .is_visible = pvt_is_visible,
+ .read = pvt_read,
+};
+
+static struct hwmon_chip_info pvt_chip_info = {
+ .ops = &pvt_hwmon_ops,
+};
+
+static int pvt_init(struct pvt_device *pvt)
+{
+ u16 sys_freq, key, middle, low = 4, high = 8;
+ struct regmap *t_map = pvt->t_map;
+ struct regmap *p_map = pvt->p_map;
+ struct regmap *v_map = pvt->v_map;
+ u32 t_num = pvt->t_num;
+ u32 p_num = pvt->p_num;
+ u32 v_num = pvt->v_num;
+ u32 clk_synth, val;
+ int ret;
+
+ sys_freq = clk_get_rate(pvt->clk) / HZ_PER_MHZ;
+ while (high >= low) {
+ middle = (low + high + 1) / 2;
+ key = DIV_ROUND_CLOSEST(sys_freq, middle);
+ if (key > CLK_SYS_CYCLES_MAX) {
+ low = middle + 1;
+ continue;
+ } else if (key < CLK_SYS_CYCLES_MIN) {
+ high = middle - 1;
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ /*
+ * The system supports 'clk_sys' to 'clk_ip' frequency ratios
+ * from 2:1 to 512:1
+ */
+ key = clamp_val(key, CLK_SYS_CYCLES_MIN, CLK_SYS_CYCLES_MAX) - 2;
+
+ clk_synth = ((key + 1) >> 1) << CLK_SYNTH_LO_SFT |
+ (key >> 1) << CLK_SYNTH_HI_SFT |
+ (key >> 1) << CLK_SYNTH_HOLD_SFT | CLK_SYNTH_EN;
+
+ pvt->ip_freq = sys_freq * 100 / (key + 2);
+
+ if (t_num) {
+ ret = regmap_write(t_map, SDIF_SMPL_CTRL, 0x0);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_write(t_map, SDIF_HALT, 0x0);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_write(t_map, CLK_SYNTH, clk_synth);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_write(t_map, SDIF_DISABLE, 0x0);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(t_map, SDIF_STAT,
+ val, !(val & SDIF_BUSY),
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ val = CFG0_MODE_2 | CFG0_PARALLEL_OUT | CFG0_12_BIT |
+ IP_CFG << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
+ ret = regmap_write(t_map, SDIF_W, val);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(t_map, SDIF_STAT,
+ val, !(val & SDIF_BUSY),
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ val = POWER_DELAY_CYCLE_256 | IP_TMR << SDIF_ADDR_SFT |
+ SDIF_WRN_W | SDIF_PROG;
+ ret = regmap_write(t_map, SDIF_W, val);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(t_map, SDIF_STAT,
+ val, !(val & SDIF_BUSY),
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ val = IP_RST_REL | IP_RUN_CONT | IP_AUTO |
+ IP_CTRL << SDIF_ADDR_SFT |
+ SDIF_WRN_W | SDIF_PROG;
+ ret = regmap_write(t_map, SDIF_W, val);
+ if(ret < 0)
+ return ret;
+ }
+
+ if (p_num) {
+ ret = regmap_write(p_map, SDIF_HALT, 0x0);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_write(p_map, SDIF_DISABLE, BIT(p_num) - 1);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_write(p_map, CLK_SYNTH, clk_synth);
+ if(ret < 0)
+ return ret;
+ }
+
+ if (v_num) {
+ ret = regmap_write(v_map, SDIF_SMPL_CTRL, 0x0);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_write(v_map, SDIF_HALT, 0x0);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_write(v_map, CLK_SYNTH, clk_synth);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_write(v_map, SDIF_DISABLE, 0x0);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
+ val, !(val & SDIF_BUSY),
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT |
+ CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT |
+ SDIF_WRN_W | SDIF_PROG;
+ ret = regmap_write(v_map, SDIF_W, val);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
+ val, !(val & SDIF_BUSY),
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ val = POWER_DELAY_CYCLE_64 | IP_TMR << SDIF_ADDR_SFT |
+ SDIF_WRN_W | SDIF_PROG;
+ ret = regmap_write(v_map, SDIF_W, val);
+ if(ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
+ val, !(val & SDIF_BUSY),
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ val = IP_RST_REL | IP_RUN_CONT | IP_AUTO | IP_VM_MODE |
+ IP_CTRL << SDIF_ADDR_SFT |
+ SDIF_WRN_W | SDIF_PROG;
+ ret = regmap_write(v_map, SDIF_W, val);
+ if(ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct regmap_config pvt_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static int pvt_get_regmap(struct platform_device *pdev, char *reg_name,
+ struct pvt_device *pvt)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap **reg_map;
+ void __iomem *io_base;
+
+ if (!strcmp(reg_name, "common"))
+ reg_map = &pvt->c_map;
+ else if (!strcmp(reg_name, "ts"))
+ reg_map = &pvt->t_map;
+ else if (!strcmp(reg_name, "pd"))
+ reg_map = &pvt->p_map;
+ else if (!strcmp(reg_name, "vm"))
+ reg_map = &pvt->v_map;
+ else
+ return -EINVAL;
+
+ io_base = devm_platform_ioremap_resource_byname(pdev, reg_name);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ pvt_regmap_config.name = reg_name;
+ *reg_map = devm_regmap_init_mmio(dev, io_base, &pvt_regmap_config);
+ if (IS_ERR(*reg_map)) {
+ dev_err(dev, "failed to init register map\n");
+ return PTR_ERR(*reg_map);
+ }
+
+ return 0;
+}
+
+static void pvt_clk_disable(void *data)
+{
+ struct pvt_device *pvt = data;
+
+ clk_disable_unprepare(pvt->clk);
+}
+
+static int pvt_clk_enable(struct device *dev, struct pvt_device *pvt)
+{
+ int ret;
+
+ ret = clk_prepare_enable(pvt->clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, pvt_clk_disable, pvt);
+}
+
+static void pvt_reset_control_assert(void *data)
+{
+ struct pvt_device *pvt = data;
+
+ reset_control_assert(pvt->rst);
+}
+
+static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt)
+{
+ int ret;
+
+ ret = reset_control_deassert(pvt->rst);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, pvt_reset_control_assert, pvt);
+}
+
+static int mr75203_probe(struct platform_device *pdev)
+{
+ const struct hwmon_channel_info **pvt_info;
+ u32 ts_num, vm_num, pd_num, val, index, i;
+ struct device *dev = &pdev->dev;
+ u32 *temp_config, *in_config;
+ struct device *hwmon_dev;
+ struct pvt_device *pvt;
+ int ret;
+
+ pvt = devm_kzalloc(dev, sizeof(*pvt), GFP_KERNEL);
+ if (!pvt)
+ return -ENOMEM;
+
+ ret = pvt_get_regmap(pdev, "common", pvt);
+ if (ret)
+ return ret;
+
+ pvt->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pvt->clk))
+ return dev_err_probe(dev, PTR_ERR(pvt->clk), "failed to get clock\n");
+
+ ret = pvt_clk_enable(dev, pvt);
+ if (ret) {
+ dev_err(dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ pvt->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(pvt->rst))
+ return dev_err_probe(dev, PTR_ERR(pvt->rst),
+ "failed to get reset control\n");
+
+ ret = pvt_reset_control_deassert(dev, pvt);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot deassert reset control\n");
+
+ ret = regmap_read(pvt->c_map, PVT_IP_CONFIG, &val);
+ if(ret < 0)
+ return ret;
+
+ ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT;
+ pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT;
+ vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT;
+ pvt->t_num = ts_num;
+ pvt->p_num = pd_num;
+ pvt->v_num = vm_num;
+ val = 0;
+ if (ts_num)
+ val++;
+ if (vm_num)
+ val++;
+ if (!val)
+ return -ENODEV;
+
+ pvt_info = devm_kcalloc(dev, val + 2, sizeof(*pvt_info), GFP_KERNEL);
+ if (!pvt_info)
+ return -ENOMEM;
+ pvt_info[0] = &pvt_chip;
+ index = 1;
+
+ if (ts_num) {
+ ret = pvt_get_regmap(pdev, "ts", pvt);
+ if (ret)
+ return ret;
+
+ temp_config = devm_kcalloc(dev, ts_num + 1,
+ sizeof(*temp_config), GFP_KERNEL);
+ if (!temp_config)
+ return -ENOMEM;
+
+ memset32(temp_config, HWMON_T_INPUT, ts_num);
+ pvt_temp.config = temp_config;
+ pvt_info[index++] = &pvt_temp;
+ }
+
+ if (pd_num) {
+ ret = pvt_get_regmap(pdev, "pd", pvt);
+ if (ret)
+ return ret;
+ }
+
+ if (vm_num) {
+ u32 num = vm_num;
+
+ ret = pvt_get_regmap(pdev, "vm", pvt);
+ if (ret)
+ return ret;
+
+ pvt->vm_idx = devm_kcalloc(dev, vm_num, sizeof(*pvt->vm_idx),
+ GFP_KERNEL);
+ if (!pvt->vm_idx)
+ return -ENOMEM;
+
+ ret = device_property_read_u8_array(dev, "intel,vm-map",
+ pvt->vm_idx, vm_num);
+ if (ret) {
+ num = 0;
+ } else {
+ for (i = 0; i < vm_num; i++)
+ if (pvt->vm_idx[i] >= vm_num ||
+ pvt->vm_idx[i] == 0xff) {
+ num = i;
+ break;
+ }
+ }
+
+ /*
+ * Incase intel,vm-map property is not defined, we assume
+ * incremental channel numbers.
+ */
+ for (i = num; i < vm_num; i++)
+ pvt->vm_idx[i] = i;
+
+ in_config = devm_kcalloc(dev, num + 1,
+ sizeof(*in_config), GFP_KERNEL);
+ if (!in_config)
+ return -ENOMEM;
+
+ memset32(in_config, HWMON_I_INPUT, num);
+ in_config[num] = 0;
+ pvt_in.config = in_config;
+
+ pvt_info[index++] = &pvt_in;
+ }
+
+ ret = pvt_init(pvt);
+ if (ret) {
+ dev_err(dev, "failed to init pvt: %d\n", ret);
+ return ret;
+ }
+
+ pvt_chip_info.info = pvt_info;
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "pvt",
+ pvt,
+ &pvt_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id moortec_pvt_of_match[] = {
+ { .compatible = "moortec,mr75203" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, moortec_pvt_of_match);
+
+static struct platform_driver moortec_pvt_driver = {
+ .driver = {
+ .name = "moortec-pvt",
+ .of_match_table = moortec_pvt_of_match,
+ },
+ .probe = mr75203_probe,
+};
+module_platform_driver(moortec_pvt_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 570df8eb5272..604af2f6103a 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -1056,8 +1056,7 @@ static int nct7802_init_chip(struct nct7802_data *data)
return regmap_update_bits(data->regmap, REG_VMON_ENABLE, 0x03, 0x03);
}
-static int nct7802_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int nct7802_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct nct7802_data *data;
@@ -1101,7 +1100,7 @@ static struct i2c_driver nct7802_driver = {
.name = DRVNAME,
},
.detect = nct7802_detect,
- .probe = nct7802_probe,
+ .probe_new = nct7802_probe,
.id_table = nct7802_idtable,
.address_list = nct7802_address_list,
};
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 242ff8bee78d..b1c837fc407a 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -1009,8 +1009,7 @@ static const struct watchdog_ops nct7904_wdt_ops = {
.get_timeleft = nct7904_wdt_get_timeleft,
};
-static int nct7904_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int nct7904_probe(struct i2c_client *client)
{
struct nct7904_data *data;
struct device *hwmon_dev;
@@ -1172,7 +1171,7 @@ static struct i2c_driver nct7904_driver = {
.driver = {
.name = "nct7904",
},
- .probe = nct7904_probe,
+ .probe_new = nct7904_probe,
.id_table = nct7904_id,
.detect = nct7904_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/occ/p8_i2c.c b/drivers/hwmon/occ/p8_i2c.c
index 76fb7870c7d3..0cf8588be35a 100644
--- a/drivers/hwmon/occ/p8_i2c.c
+++ b/drivers/hwmon/occ/p8_i2c.c
@@ -203,8 +203,7 @@ static int p8_i2c_occ_send_cmd(struct occ *occ, u8 *cmd)
return 0;
}
-static int p8_i2c_occ_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int p8_i2c_occ_probe(struct i2c_client *client)
{
struct occ *occ;
struct p8_i2c_occ *ctx = devm_kzalloc(&client->dev, sizeof(*ctx),
@@ -245,7 +244,7 @@ static struct i2c_driver p8_i2c_occ_driver = {
.name = "occ-hwmon",
.of_match_table = p8_i2c_occ_of_match,
},
- .probe = p8_i2c_occ_probe,
+ .probe_new = p8_i2c_occ_probe,
.remove = p8_i2c_occ_remove,
};
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index b7a3a292123d..a97a51005c61 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -179,8 +179,7 @@ static const struct attribute_group pcf8591_attr_group_opt = {
* Real code
*/
-static int pcf8591_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf8591_probe(struct i2c_client *client)
{
struct pcf8591_data *data;
int err;
@@ -295,7 +294,7 @@ static struct i2c_driver pcf8591_driver = {
.driver = {
.name = "pcf8591",
},
- .probe = pcf8591_probe,
+ .probe_new = pcf8591_probe,
.remove = pcf8591_remove,
.id_table = pcf8591_id,
};
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index e35db489b76f..a25faf69fce3 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -26,6 +26,17 @@ config SENSORS_PMBUS
This driver can also be built as a module. If so, the module will
be called pmbus.
+config SENSORS_ADM1266
+ tristate "Analog Devices ADM1266 Sequencer"
+ select CRC8
+ depends on GPIOLIB
+ help
+ If you say yes here you get hardware monitoring support for Analog
+ Devices ADM1266 Cascadable Super Sequencer.
+
+ This driver can also be built as a module. If so, the module will
+ be called adm1266.
+
config SENSORS_ADM1275
tristate "Analog Devices ADM1275 and compatibles"
help
@@ -200,6 +211,15 @@ config SENSORS_MAX8688
This driver can also be built as a module. If so, the module will
be called max8688.
+config SENSORS_MP2975
+ tristate "MPS MP2975"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP2975 Dual Loop Digital Multi-Phase Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp2975.
+
config SENSORS_PXE1610
tristate "Infineon PXE1610"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index c4b15db996ad..4c97ad0bd791 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_PMBUS) += pmbus_core.o
obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
+obj-$(CONFIG_SENSORS_ADM1266) += adm1266.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
obj-$(CONFIG_SENSORS_BEL_PFE) += bel-pfe.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
diff --git a/drivers/hwmon/pmbus/adm1266.c b/drivers/hwmon/pmbus/adm1266.c
new file mode 100644
index 000000000000..c7b373ba92f2
--- /dev/null
+++ b/drivers/hwmon/pmbus/adm1266.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADM1266 - Cascadable Super Sequencer with Margin
+ * Control and Fault Recording
+ *
+ * Copyright 2020 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/crc8.h>
+#include <linux/debugfs.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include "pmbus.h"
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+
+#define ADM1266_BLACKBOX_CONFIG 0xD3
+#define ADM1266_PDIO_CONFIG 0xD4
+#define ADM1266_READ_STATE 0xD9
+#define ADM1266_READ_BLACKBOX 0xDE
+#define ADM1266_SET_RTC 0xDF
+#define ADM1266_GPIO_CONFIG 0xE1
+#define ADM1266_BLACKBOX_INFO 0xE6
+#define ADM1266_PDIO_STATUS 0xE9
+#define ADM1266_GPIO_STATUS 0xEA
+
+/* ADM1266 GPIO defines */
+#define ADM1266_GPIO_NR 9
+#define ADM1266_GPIO_FUNCTIONS(x) FIELD_GET(BIT(0), x)
+#define ADM1266_GPIO_INPUT_EN(x) FIELD_GET(BIT(2), x)
+#define ADM1266_GPIO_OUTPUT_EN(x) FIELD_GET(BIT(3), x)
+#define ADM1266_GPIO_OPEN_DRAIN(x) FIELD_GET(BIT(4), x)
+
+/* ADM1266 PDIO defines */
+#define ADM1266_PDIO_NR 16
+#define ADM1266_PDIO_PIN_CFG(x) FIELD_GET(GENMASK(15, 13), x)
+#define ADM1266_PDIO_GLITCH_FILT(x) FIELD_GET(GENMASK(12, 9), x)
+#define ADM1266_PDIO_OUT_CFG(x) FIELD_GET(GENMASK(2, 0), x)
+
+#define ADM1266_BLACKBOX_OFFSET 0
+#define ADM1266_BLACKBOX_SIZE 64
+
+#define ADM1266_PMBUS_BLOCK_MAX 255
+
+struct adm1266_data {
+ struct pmbus_driver_info info;
+ struct gpio_chip gc;
+ const char *gpio_names[ADM1266_GPIO_NR + ADM1266_PDIO_NR];
+ struct i2c_client *client;
+ struct dentry *debugfs_dir;
+ struct nvmem_config nvmem_config;
+ struct nvmem_device *nvmem;
+ u8 *dev_mem;
+ struct mutex buf_mutex;
+ u8 write_buf[ADM1266_PMBUS_BLOCK_MAX + 1] ____cacheline_aligned;
+ u8 read_buf[ADM1266_PMBUS_BLOCK_MAX + 1] ____cacheline_aligned;
+};
+
+static const struct nvmem_cell_info adm1266_nvmem_cells[] = {
+ {
+ .name = "blackbox",
+ .offset = ADM1266_BLACKBOX_OFFSET,
+ .bytes = 2048,
+ },
+};
+
+DECLARE_CRC8_TABLE(pmbus_crc_table);
+
+/*
+ * Different from Block Read as it sends data and waits for the slave to
+ * return a value dependent on that data. The protocol is simply a Write Block
+ * followed by a Read Block without the Read-Block command field and the
+ * Write-Block STOP bit.
+ */
+static int adm1266_pmbus_block_xfer(struct adm1266_data *data, u8 cmd, u8 w_len, u8 *data_w,
+ u8 *data_r)
+{
+ struct i2c_client *client = data->client;
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = client->addr,
+ .flags = I2C_M_DMA_SAFE,
+ .buf = data->write_buf,
+ .len = w_len + 2,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD | I2C_M_DMA_SAFE,
+ .buf = data->read_buf,
+ .len = ADM1266_PMBUS_BLOCK_MAX + 2,
+ }
+ };
+ u8 addr;
+ u8 crc;
+ int ret;
+
+ mutex_lock(&data->buf_mutex);
+
+ msgs[0].buf[0] = cmd;
+ msgs[0].buf[1] = w_len;
+ memcpy(&msgs[0].buf[2], data_w, w_len);
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret != 2) {
+ if (ret >= 0)
+ ret = -EPROTO;
+
+ mutex_unlock(&data->buf_mutex);
+
+ return ret;
+ }
+
+ if (client->flags & I2C_CLIENT_PEC) {
+ addr = i2c_8bit_addr_from_msg(&msgs[0]);
+ crc = crc8(pmbus_crc_table, &addr, 1, 0);
+ crc = crc8(pmbus_crc_table, msgs[0].buf, msgs[0].len, crc);
+
+ addr = i2c_8bit_addr_from_msg(&msgs[1]);
+ crc = crc8(pmbus_crc_table, &addr, 1, crc);
+ crc = crc8(pmbus_crc_table, msgs[1].buf, msgs[1].buf[0] + 1, crc);
+
+ if (crc != msgs[1].buf[msgs[1].buf[0] + 1]) {
+ mutex_unlock(&data->buf_mutex);
+ return -EBADMSG;
+ }
+ }
+
+ memcpy(data_r, &msgs[1].buf[1], msgs[1].buf[0]);
+
+ ret = msgs[1].buf[0];
+ mutex_unlock(&data->buf_mutex);
+
+ return ret;
+}
+
+static const unsigned int adm1266_gpio_mapping[ADM1266_GPIO_NR][2] = {
+ {1, 0},
+ {2, 1},
+ {3, 2},
+ {4, 8},
+ {5, 9},
+ {6, 10},
+ {7, 11},
+ {8, 6},
+ {9, 7},
+};
+
+static const char *adm1266_names[ADM1266_GPIO_NR + ADM1266_PDIO_NR] = {
+ "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5", "GPIO6", "GPIO7", "GPIO8",
+ "GPIO9", "PDIO1", "PDIO2", "PDIO3", "PDIO4", "PDIO5", "PDIO6",
+ "PDIO7", "PDIO8", "PDIO9", "PDIO10", "PDIO11", "PDIO12", "PDIO13",
+ "PDIO14", "PDIO15", "PDIO16",
+};
+
+static int adm1266_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct adm1266_data *data = gpiochip_get_data(chip);
+ u8 read_buf[I2C_SMBUS_BLOCK_MAX + 1];
+ unsigned long pins_status;
+ unsigned int pmbus_cmd;
+ int ret;
+
+ if (offset < ADM1266_GPIO_NR)
+ pmbus_cmd = ADM1266_GPIO_STATUS;
+ else
+ pmbus_cmd = ADM1266_PDIO_STATUS;
+
+ ret = i2c_smbus_read_block_data(data->client, pmbus_cmd, read_buf);
+ if (ret < 0)
+ return ret;
+
+ pins_status = read_buf[0] + (read_buf[1] << 8);
+ if (offset < ADM1266_GPIO_NR)
+ return test_bit(adm1266_gpio_mapping[offset][1], &pins_status);
+
+ return test_bit(offset - ADM1266_GPIO_NR, &pins_status);
+}
+
+static int adm1266_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct adm1266_data *data = gpiochip_get_data(chip);
+ u8 read_buf[ADM1266_PMBUS_BLOCK_MAX + 1];
+ unsigned long status;
+ unsigned int gpio_nr;
+ int ret;
+
+ ret = i2c_smbus_read_block_data(data->client, ADM1266_GPIO_STATUS, read_buf);
+ if (ret < 0)
+ return ret;
+
+ status = read_buf[0] + (read_buf[1] << 8);
+
+ *bits = 0;
+ for_each_set_bit(gpio_nr, mask, ADM1266_GPIO_NR) {
+ if (test_bit(adm1266_gpio_mapping[gpio_nr][1], &status))
+ set_bit(gpio_nr, bits);
+ }
+
+ ret = i2c_smbus_read_block_data(data->client, ADM1266_PDIO_STATUS, read_buf);
+ if (ret < 0)
+ return ret;
+
+ status = read_buf[0] + (read_buf[1] << 8);
+
+ *bits = 0;
+ for_each_set_bit_from(gpio_nr, mask, ADM1266_GPIO_NR + ADM1266_PDIO_STATUS) {
+ if (test_bit(gpio_nr - ADM1266_GPIO_NR, &status))
+ set_bit(gpio_nr, bits);
+ }
+
+ return 0;
+}
+
+static void adm1266_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ struct adm1266_data *data = gpiochip_get_data(chip);
+ u8 read_buf[ADM1266_PMBUS_BLOCK_MAX + 1];
+ unsigned long gpio_config;
+ unsigned long pdio_config;
+ unsigned long pin_cfg;
+ u8 write_cmd;
+ int ret;
+ int i;
+
+ for (i = 0; i < ADM1266_GPIO_NR; i++) {
+ write_cmd = adm1266_gpio_mapping[i][1];
+ ret = adm1266_pmbus_block_xfer(data, ADM1266_GPIO_CONFIG, 1, &write_cmd, read_buf);
+ if (ret != 2)
+ return;
+
+ gpio_config = read_buf[0];
+ seq_puts(s, adm1266_names[i]);
+
+ seq_puts(s, " ( ");
+ if (!ADM1266_GPIO_FUNCTIONS(gpio_config)) {
+ seq_puts(s, "high-Z )\n");
+ continue;
+ }
+ if (ADM1266_GPIO_INPUT_EN(gpio_config))
+ seq_puts(s, "input ");
+ if (ADM1266_GPIO_OUTPUT_EN(gpio_config))
+ seq_puts(s, "output ");
+ if (ADM1266_GPIO_OPEN_DRAIN(gpio_config))
+ seq_puts(s, "open-drain )\n");
+ else
+ seq_puts(s, "push-pull )\n");
+ }
+
+ write_cmd = 0xFF;
+ ret = adm1266_pmbus_block_xfer(data, ADM1266_PDIO_CONFIG, 1, &write_cmd, read_buf);
+ if (ret != 32)
+ return;
+
+ for (i = 0; i < ADM1266_PDIO_NR; i++) {
+ seq_puts(s, adm1266_names[ADM1266_GPIO_NR + i]);
+
+ pdio_config = read_buf[2 * i];
+ pdio_config += (read_buf[2 * i + 1] << 8);
+ pin_cfg = ADM1266_PDIO_PIN_CFG(pdio_config);
+
+ seq_puts(s, " ( ");
+ if (!pin_cfg || pin_cfg > 5) {
+ seq_puts(s, "high-Z )\n");
+ continue;
+ }
+
+ if (pin_cfg & BIT(0))
+ seq_puts(s, "output ");
+
+ if (pin_cfg & BIT(1))
+ seq_puts(s, "input ");
+
+ seq_puts(s, ")\n");
+ }
+}
+
+static int adm1266_config_gpio(struct adm1266_data *data)
+{
+ const char *name = dev_name(&data->client->dev);
+ char *gpio_name;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(data->gpio_names); i++) {
+ gpio_name = devm_kasprintf(&data->client->dev, GFP_KERNEL, "adm1266-%x-%s",
+ data->client->addr, adm1266_names[i]);
+ if (!gpio_name)
+ return -ENOMEM;
+
+ data->gpio_names[i] = gpio_name;
+ }
+
+ data->gc.label = name;
+ data->gc.parent = &data->client->dev;
+ data->gc.owner = THIS_MODULE;
+ data->gc.base = -1;
+ data->gc.names = data->gpio_names;
+ data->gc.ngpio = ARRAY_SIZE(data->gpio_names);
+ data->gc.get = adm1266_gpio_get;
+ data->gc.get_multiple = adm1266_gpio_get_multiple;
+ data->gc.dbg_show = adm1266_gpio_dbg_show;
+
+ ret = devm_gpiochip_add_data(&data->client->dev, &data->gc, data);
+ if (ret)
+ dev_err(&data->client->dev, "GPIO registering failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int adm1266_state_read(struct seq_file *s, void *pdata)
+{
+ struct device *dev = s->private;
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, ADM1266_READ_STATE);
+ if (ret < 0)
+ return ret;
+
+ seq_printf(s, "%d\n", ret);
+
+ return 0;
+}
+
+static void adm1266_init_debugfs(struct adm1266_data *data)
+{
+ struct dentry *root;
+
+ root = pmbus_get_debugfs_dir(data->client);
+ if (!root)
+ return;
+
+ data->debugfs_dir = debugfs_create_dir(data->client->name, root);
+ if (!data->debugfs_dir)
+ return;
+
+ debugfs_create_devm_seqfile(&data->client->dev, "sequencer_state", data->debugfs_dir,
+ adm1266_state_read);
+}
+
+static int adm1266_nvmem_read_blackbox(struct adm1266_data *data, u8 *read_buff)
+{
+ int record_count;
+ char index;
+ u8 buf[5];
+ int ret;
+
+ ret = i2c_smbus_read_block_data(data->client, ADM1266_BLACKBOX_INFO, buf);
+ if (ret < 0)
+ return ret;
+
+ if (ret != 4)
+ return -EIO;
+
+ record_count = buf[3];
+
+ for (index = 0; index < record_count; index++) {
+ ret = adm1266_pmbus_block_xfer(data, ADM1266_READ_BLACKBOX, 1, &index, read_buff);
+ if (ret < 0)
+ return ret;
+
+ if (ret != ADM1266_BLACKBOX_SIZE)
+ return -EIO;
+
+ read_buff += ADM1266_BLACKBOX_SIZE;
+ }
+
+ return 0;
+}
+
+static int adm1266_nvmem_read(void *priv, unsigned int offset, void *val, size_t bytes)
+{
+ struct adm1266_data *data = priv;
+ int ret;
+
+ if (offset + bytes > data->nvmem_config.size)
+ return -EINVAL;
+
+ if (offset == 0) {
+ memset(data->dev_mem, 0, data->nvmem_config.size);
+
+ ret = adm1266_nvmem_read_blackbox(data, data->dev_mem);
+ if (ret) {
+ dev_err(&data->client->dev, "Could not read blackbox!");
+ return ret;
+ }
+ }
+
+ memcpy(val, data->dev_mem + offset, bytes);
+
+ return 0;
+}
+
+static int adm1266_config_nvmem(struct adm1266_data *data)
+{
+ data->nvmem_config.name = dev_name(&data->client->dev);
+ data->nvmem_config.dev = &data->client->dev;
+ data->nvmem_config.root_only = true;
+ data->nvmem_config.read_only = true;
+ data->nvmem_config.owner = THIS_MODULE;
+ data->nvmem_config.reg_read = adm1266_nvmem_read;
+ data->nvmem_config.cells = adm1266_nvmem_cells;
+ data->nvmem_config.ncells = ARRAY_SIZE(adm1266_nvmem_cells);
+ data->nvmem_config.priv = data;
+ data->nvmem_config.stride = 1;
+ data->nvmem_config.word_size = 1;
+ data->nvmem_config.size = adm1266_nvmem_cells[0].bytes;
+
+ data->dev_mem = devm_kzalloc(&data->client->dev, data->nvmem_config.size, GFP_KERNEL);
+ if (!data->dev_mem)
+ return -ENOMEM;
+
+ data->nvmem = devm_nvmem_register(&data->client->dev, &data->nvmem_config);
+ if (IS_ERR(data->nvmem)) {
+ dev_err(&data->client->dev, "Could not register nvmem!");
+ return PTR_ERR(data->nvmem);
+ }
+
+ return 0;
+}
+
+static int adm1266_set_rtc(struct adm1266_data *data)
+{
+ time64_t kt;
+ char write_buf[6];
+ int i;
+
+ kt = ktime_get_seconds();
+
+ memset(write_buf, 0, sizeof(write_buf));
+
+ for (i = 0; i < 4; i++)
+ write_buf[2 + i] = (kt >> (i * 8)) & 0xFF;
+
+ return i2c_smbus_write_block_data(data->client, ADM1266_SET_RTC, sizeof(write_buf),
+ write_buf);
+}
+
+static int adm1266_probe(struct i2c_client *client)
+{
+ struct adm1266_data *data;
+ int ret;
+ int i;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct adm1266_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ data->info.pages = 17;
+ data->info.format[PSC_VOLTAGE_OUT] = linear;
+ for (i = 0; i < data->info.pages; i++)
+ data->info.func[i] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+
+ crc8_populate_msb(pmbus_crc_table, 0x7);
+ mutex_init(&data->buf_mutex);
+
+ ret = adm1266_config_gpio(data);
+ if (ret < 0)
+ return ret;
+
+ ret = adm1266_set_rtc(data);
+ if (ret < 0)
+ return ret;
+
+ ret = adm1266_config_nvmem(data);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_do_probe(client, &data->info);
+ if (ret)
+ return ret;
+
+ adm1266_init_debugfs(data);
+
+ return 0;
+}
+
+static const struct of_device_id adm1266_of_match[] = {
+ { .compatible = "adi,adm1266" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adm1266_of_match);
+
+static const struct i2c_device_id adm1266_id[] = {
+ { "adm1266", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adm1266_id);
+
+static struct i2c_driver adm1266_driver = {
+ .driver = {
+ .name = "adm1266",
+ .of_match_table = adm1266_of_match,
+ },
+ .probe_new = adm1266_probe,
+ .remove = pmbus_do_remove,
+ .id_table = adm1266_id,
+};
+
+module_i2c_driver(adm1266_driver);
+
+MODULE_AUTHOR("Alexandru Tachici <alexandru.tachici@analog.com>");
+MODULE_DESCRIPTION("PMBus driver for Analog Devices ADM1266");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 651846650a9c..e7997f37b266 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -462,8 +462,7 @@ static const struct i2c_device_id adm1275_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adm1275_id);
-static int adm1275_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adm1275_probe(struct i2c_client *client)
{
s32 (*config_read_fn)(const struct i2c_client *client, u8 reg);
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
@@ -506,10 +505,10 @@ static int adm1275_probe(struct i2c_client *client,
return -ENODEV;
}
- if (id->driver_data != mid->driver_data)
+ if (strcmp(client->name, mid->name) != 0)
dev_notice(&client->dev,
"Device mismatch: Configured %s, detected %s\n",
- id->name, mid->name);
+ client->name, mid->name);
if (mid->driver_data == adm1272 || mid->driver_data == adm1278 ||
mid->driver_data == adm1293 || mid->driver_data == adm1294)
@@ -790,14 +789,14 @@ static int adm1275_probe(struct i2c_client *client,
info->R[PSC_TEMPERATURE] = coefficients[tindex].R;
}
- return pmbus_do_probe(client, id, info);
+ return pmbus_do_probe(client, info);
}
static struct i2c_driver adm1275_driver = {
.driver = {
.name = "adm1275",
},
- .probe = adm1275_probe,
+ .probe_new = adm1275_probe,
.remove = pmbus_do_remove,
.id_table = adm1275_id,
};
diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c
index f236e18f45a5..2c5b853d6c7f 100644
--- a/drivers/hwmon/pmbus/bel-pfe.c
+++ b/drivers/hwmon/pmbus/bel-pfe.c
@@ -87,12 +87,13 @@ static struct pmbus_driver_info pfe_driver_info[] = {
},
};
-static int pfe_pmbus_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id pfe_device_id[];
+
+static int pfe_pmbus_probe(struct i2c_client *client)
{
int model;
- model = (int)id->driver_data;
+ model = (int)i2c_match_id(pfe_device_id, client)->driver_data;
/*
* PFE3000-12-069RA devices may not stay in page 0 during device
@@ -104,7 +105,7 @@ static int pfe_pmbus_probe(struct i2c_client *client,
i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
}
- return pmbus_do_probe(client, id, &pfe_driver_info[model]);
+ return pmbus_do_probe(client, &pfe_driver_info[model]);
}
static const struct i2c_device_id pfe_device_id[] = {
@@ -119,7 +120,7 @@ static struct i2c_driver pfe_pmbus_driver = {
.driver = {
.name = "bel-pfe",
},
- .probe = pfe_pmbus_probe,
+ .probe_new = pfe_pmbus_probe,
.remove = pmbus_do_remove,
.id_table = pfe_device_id,
};
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index 7d300f2f338d..2fb7540ee952 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -91,6 +91,8 @@ struct ibm_cffps {
struct led_classdev led;
};
+static const struct i2c_device_id ibm_cffps_id[];
+
#define to_psu(x, y) container_of((x), struct ibm_cffps, debugfs_entries[(y)])
static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu,
@@ -473,8 +475,7 @@ static struct pmbus_platform_data ibm_cffps_pdata = {
.flags = PMBUS_SKIP_STATUS_CHECK,
};
-static int ibm_cffps_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ibm_cffps_probe(struct i2c_client *client)
{
int i, rc;
enum versions vs = cffps_unknown;
@@ -482,11 +483,15 @@ static int ibm_cffps_probe(struct i2c_client *client,
struct dentry *ibm_cffps_dir;
struct ibm_cffps *psu;
const void *md = of_device_get_match_data(&client->dev);
+ const struct i2c_device_id *id;
- if (md)
+ if (md) {
vs = (enum versions)md;
- else if (id)
- vs = (enum versions)id->driver_data;
+ } else {
+ id = i2c_match_id(ibm_cffps_id, client);
+ if (id)
+ vs = (enum versions)id->driver_data;
+ }
if (vs == cffps_unknown) {
u16 ccin_revision = 0;
@@ -519,7 +524,7 @@ static int ibm_cffps_probe(struct i2c_client *client,
}
client->dev.platform_data = &ibm_cffps_pdata;
- rc = pmbus_do_probe(client, id, &ibm_cffps_info[vs]);
+ rc = pmbus_do_probe(client, &ibm_cffps_info[vs]);
if (rc)
return rc;
@@ -611,7 +616,7 @@ static struct i2c_driver ibm_cffps_driver = {
.name = "ibm-cffps",
.of_match_table = ibm_cffps_of_match,
},
- .probe = ibm_cffps_probe,
+ .probe_new = ibm_cffps_probe,
.remove = pmbus_do_remove,
.id_table = ibm_cffps_id,
};
diff --git a/drivers/hwmon/pmbus/inspur-ipsps.c b/drivers/hwmon/pmbus/inspur-ipsps.c
index 42e01549184a..be493182174d 100644
--- a/drivers/hwmon/pmbus/inspur-ipsps.c
+++ b/drivers/hwmon/pmbus/inspur-ipsps.c
@@ -190,11 +190,10 @@ static struct pmbus_platform_data ipsps_pdata = {
.flags = PMBUS_SKIP_STATUS_CHECK,
};
-static int ipsps_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ipsps_probe(struct i2c_client *client)
{
client->dev.platform_data = &ipsps_pdata;
- return pmbus_do_probe(client, id, &ipsps_info);
+ return pmbus_do_probe(client, &ipsps_info);
}
static const struct i2c_device_id ipsps_id[] = {
@@ -216,7 +215,7 @@ static struct i2c_driver ipsps_driver = {
.name = "inspur-ipsps",
.of_match_table = of_match_ptr(ipsps_of_match),
},
- .probe = ipsps_probe,
+ .probe_new = ipsps_probe,
.remove = pmbus_do_remove,
.id_table = ipsps_id,
};
diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c
index 3eea3e006a96..5fadb1def49f 100644
--- a/drivers/hwmon/pmbus/ir35221.c
+++ b/drivers/hwmon/pmbus/ir35221.c
@@ -67,8 +67,7 @@ static int ir35221_read_word_data(struct i2c_client *client, int page,
return ret;
}
-static int ir35221_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ir35221_probe(struct i2c_client *client)
{
struct pmbus_driver_info *info;
u8 buf[I2C_SMBUS_BLOCK_MAX];
@@ -123,7 +122,7 @@ static int ir35221_probe(struct i2c_client *client,
| PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP;
info->func[1] = info->func[0];
- return pmbus_do_probe(client, id, info);
+ return pmbus_do_probe(client, info);
}
static const struct i2c_device_id ir35221_id[] = {
@@ -137,7 +136,7 @@ static struct i2c_driver ir35221_driver = {
.driver = {
.name = "ir35221",
},
- .probe = ir35221_probe,
+ .probe_new = ir35221_probe,
.remove = pmbus_do_remove,
.id_table = ir35221_id,
};
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
index 1820f5077f66..9ac563ce7dd8 100644
--- a/drivers/hwmon/pmbus/ir38064.c
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -35,10 +35,9 @@ static struct pmbus_driver_info ir38064_info = {
| PMBUS_HAVE_POUT,
};
-static int ir38064_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ir38064_probe(struct i2c_client *client)
{
- return pmbus_do_probe(client, id, &ir38064_info);
+ return pmbus_do_probe(client, &ir38064_info);
}
static const struct i2c_device_id ir38064_id[] = {
@@ -53,7 +52,7 @@ static struct i2c_driver ir38064_driver = {
.driver = {
.name = "ir38064",
},
- .probe = ir38064_probe,
+ .probe_new = ir38064_probe,
.remove = pmbus_do_remove,
.id_table = ir38064_id,
};
diff --git a/drivers/hwmon/pmbus/irps5401.c b/drivers/hwmon/pmbus/irps5401.c
index d37daa001fb3..44aeafcbd56c 100644
--- a/drivers/hwmon/pmbus/irps5401.c
+++ b/drivers/hwmon/pmbus/irps5401.c
@@ -38,10 +38,9 @@ static struct pmbus_driver_info irps5401_info = {
.func[4] = IRPS5401_LDO_FUNC,
};
-static int irps5401_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int irps5401_probe(struct i2c_client *client)
{
- return pmbus_do_probe(client, id, &irps5401_info);
+ return pmbus_do_probe(client, &irps5401_info);
}
static const struct i2c_device_id irps5401_id[] = {
@@ -55,7 +54,7 @@ static struct i2c_driver irps5401_driver = {
.driver = {
.name = "irps5401",
},
- .probe = irps5401_probe,
+ .probe_new = irps5401_probe,
.remove = pmbus_do_remove,
.id_table = irps5401_id,
};
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 58aa95a3c010..7cad76e07f70 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -72,6 +72,8 @@ enum variants {
raa_dmpvr2_hv,
};
+static const struct i2c_device_id raa_dmpvr_id[];
+
static ssize_t isl68137_avs_enable_show_page(struct i2c_client *client,
int page,
char *buf)
@@ -218,8 +220,7 @@ static struct pmbus_driver_info raa_dmpvr_info = {
| PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
};
-static int isl68137_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int isl68137_probe(struct i2c_client *client)
{
struct pmbus_driver_info *info;
@@ -228,7 +229,7 @@ static int isl68137_probe(struct i2c_client *client,
return -ENOMEM;
memcpy(info, &raa_dmpvr_info, sizeof(*info));
- switch (id->driver_data) {
+ switch (i2c_match_id(raa_dmpvr_id, client)->driver_data) {
case raa_dmpvr1_2rail:
info->pages = 2;
info->R[PSC_VOLTAGE_IN] = 3;
@@ -267,7 +268,7 @@ static int isl68137_probe(struct i2c_client *client,
return -ENODEV;
}
- return pmbus_do_probe(client, id, info);
+ return pmbus_do_probe(client, info);
}
static const struct i2c_device_id raa_dmpvr_id[] = {
@@ -322,7 +323,7 @@ static struct i2c_driver isl68137_driver = {
.driver = {
.name = "isl68137",
},
- .probe = isl68137_probe,
+ .probe_new = isl68137_probe,
.remove = pmbus_do_remove,
.id_table = raa_dmpvr_id,
};
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 9e4cf0800186..429172a42902 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -211,6 +211,8 @@ struct lm25066_data {
#define to_lm25066_data(x) container_of(x, struct lm25066_data, info)
+static const struct i2c_device_id lm25066_id[];
+
static int lm25066_read_word_data(struct i2c_client *client, int page,
int phase, int reg)
{
@@ -416,8 +418,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
return ret;
}
-static int lm25066_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm25066_probe(struct i2c_client *client)
{
int config;
struct lm25066_data *data;
@@ -437,7 +438,7 @@ static int lm25066_probe(struct i2c_client *client,
if (config < 0)
return config;
- data->id = id->driver_data;
+ data->id = i2c_match_id(lm25066_id, client)->driver_data;
info = &data->info;
info->pages = 1;
@@ -487,7 +488,7 @@ static int lm25066_probe(struct i2c_client *client,
info->b[PSC_POWER] = coeff[PSC_POWER].b;
}
- return pmbus_do_probe(client, id, info);
+ return pmbus_do_probe(client, info);
}
static const struct i2c_device_id lm25066_id[] = {
@@ -506,7 +507,7 @@ static struct i2c_driver lm25066_driver = {
.driver = {
.name = "lm25066",
},
- .probe = lm25066_probe,
+ .probe_new = lm25066_probe,
.remove = pmbus_do_remove,
.id_table = lm25066_id,
};
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 7b0e6b37e247..9a024cf70145 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -649,12 +649,12 @@ static int ltc2978_get_id(struct i2c_client *client)
return -ENODEV;
}
-static int ltc2978_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc2978_probe(struct i2c_client *client)
{
int i, chip_id;
struct ltc2978_data *data;
struct pmbus_driver_info *info;
+ const struct i2c_device_id *id;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_WORD_DATA))
@@ -670,11 +670,13 @@ static int ltc2978_probe(struct i2c_client *client,
return chip_id;
data->id = chip_id;
+ id = i2c_match_id(ltc2978_id, client);
if (data->id != id->driver_data)
dev_warn(&client->dev,
- "Device mismatch: Configured %s, detected %s\n",
+ "Device mismatch: Configured %s (%d), detected %d\n",
id->name,
- ltc2978_id[data->id].name);
+ (int) id->driver_data,
+ chip_id);
info = &data->info;
info->write_word_data = ltc2978_write_word_data;
@@ -832,7 +834,7 @@ static int ltc2978_probe(struct i2c_client *client,
}
#endif
- return pmbus_do_probe(client, id, info);
+ return pmbus_do_probe(client, info);
}
@@ -872,7 +874,7 @@ static struct i2c_driver ltc2978_driver = {
.name = "ltc2978",
.of_match_table = of_match_ptr(ltc2978_of_match),
},
- .probe = ltc2978_probe,
+ .probe_new = ltc2978_probe,
.remove = pmbus_do_remove,
.id_table = ltc2978_id,
};
diff --git a/drivers/hwmon/pmbus/ltc3815.c b/drivers/hwmon/pmbus/ltc3815.c
index 3036263e0a66..8328fb367ad6 100644
--- a/drivers/hwmon/pmbus/ltc3815.c
+++ b/drivers/hwmon/pmbus/ltc3815.c
@@ -178,8 +178,7 @@ static struct pmbus_driver_info ltc3815_info = {
.write_word_data = ltc3815_write_word_data,
};
-static int ltc3815_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ltc3815_probe(struct i2c_client *client)
{
int chip_id;
@@ -193,14 +192,14 @@ static int ltc3815_probe(struct i2c_client *client,
if ((chip_id & LTC3815_ID_MASK) != LTC3815_ID)
return -ENODEV;
- return pmbus_do_probe(client, id, &ltc3815_info);
+ return pmbus_do_probe(client, &ltc3815_info);
}
static struct i2c_driver ltc3815_driver = {
.driver = {
.name = "ltc3815",
},
- .probe = ltc3815_probe,
+ .probe_new = ltc3815_probe,
.remove = pmbus_do_remove,
.id_table = ltc3815_id,
};
diff --git a/drivers/hwmon/pmbus/max16064.c b/drivers/hwmon/pmbus/max16064.c
index 288e93f74c28..26e7f5ef9d7f 100644
--- a/drivers/hwmon/pmbus/max16064.c
+++ b/drivers/hwmon/pmbus/max16064.c
@@ -85,10 +85,9 @@ static struct pmbus_driver_info max16064_info = {
.write_word_data = max16064_write_word_data,
};
-static int max16064_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max16064_probe(struct i2c_client *client)
{
- return pmbus_do_probe(client, id, &max16064_info);
+ return pmbus_do_probe(client, &max16064_info);
}
static const struct i2c_device_id max16064_id[] = {
@@ -103,7 +102,7 @@ static struct i2c_driver max16064_driver = {
.driver = {
.name = "max16064",
},
- .probe = max16064_probe,
+ .probe_new = max16064_probe,
.remove = pmbus_do_remove,
.id_table = max16064_id,
};
diff --git a/drivers/hwmon/pmbus/max16601.c b/drivers/hwmon/pmbus/max16601.c
index 51cdfaf9023c..71bb74e27a5c 100644
--- a/drivers/hwmon/pmbus/max16601.c
+++ b/drivers/hwmon/pmbus/max16601.c
@@ -239,8 +239,7 @@ static void max16601_remove(void *_data)
i2c_unregister_device(data->vsa);
}
-static int max16601_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max16601_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
@@ -288,7 +287,7 @@ static int max16601_probe(struct i2c_client *client,
data->info = max16601_info;
- return pmbus_do_probe(client, id, &data->info);
+ return pmbus_do_probe(client, &data->info);
}
static const struct i2c_device_id max16601_id[] = {
@@ -302,7 +301,7 @@ static struct i2c_driver max16601_driver = {
.driver = {
.name = "max16601",
},
- .probe = max16601_probe,
+ .probe_new = max16601_probe,
.remove = pmbus_do_remove,
.id_table = max16601_id,
};
diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c
index a151a2b588a5..be83b98411c7 100644
--- a/drivers/hwmon/pmbus/max20730.c
+++ b/drivers/hwmon/pmbus/max20730.c
@@ -8,6 +8,7 @@
*/
#include <linux/bits.h>
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -26,16 +27,370 @@ enum chips {
max20743
};
+enum {
+ MAX20730_DEBUGFS_VOUT_MIN = 0,
+ MAX20730_DEBUGFS_FREQUENCY,
+ MAX20730_DEBUGFS_PG_DELAY,
+ MAX20730_DEBUGFS_INTERNAL_GAIN,
+ MAX20730_DEBUGFS_BOOT_VOLTAGE,
+ MAX20730_DEBUGFS_OUT_V_RAMP_RATE,
+ MAX20730_DEBUGFS_OC_PROTECT_MODE,
+ MAX20730_DEBUGFS_SS_TIMING,
+ MAX20730_DEBUGFS_IMAX,
+ MAX20730_DEBUGFS_OPERATION,
+ MAX20730_DEBUGFS_ON_OFF_CONFIG,
+ MAX20730_DEBUGFS_SMBALERT_MASK,
+ MAX20730_DEBUGFS_VOUT_MODE,
+ MAX20730_DEBUGFS_VOUT_COMMAND,
+ MAX20730_DEBUGFS_VOUT_MAX,
+ MAX20730_DEBUGFS_NUM_ENTRIES
+};
+
struct max20730_data {
enum chips id;
struct pmbus_driver_info info;
struct mutex lock; /* Used to protect against parallel writes */
u16 mfr_devset1;
+ u16 mfr_devset2;
+ u16 mfr_voutmin;
+ u32 vout_voltage_divider[2];
};
#define to_max20730_data(x) container_of(x, struct max20730_data, info)
+#define VOLT_FROM_REG(val) DIV_ROUND_CLOSEST((val), 1 << 9)
+
+#define PMBUS_SMB_ALERT_MASK 0x1B
+
+#define MAX20730_MFR_VOUT_MIN 0xd1
#define MAX20730_MFR_DEVSET1 0xd2
+#define MAX20730_MFR_DEVSET2 0xd3
+
+#define MAX20730_MFR_VOUT_MIN_MASK GENMASK(9, 0)
+#define MAX20730_MFR_VOUT_MIN_BIT_POS 0
+
+#define MAX20730_MFR_DEVSET1_RGAIN_MASK (BIT(13) | BIT(14))
+#define MAX20730_MFR_DEVSET1_OTP_MASK (BIT(11) | BIT(12))
+#define MAX20730_MFR_DEVSET1_VBOOT_MASK (BIT(8) | BIT(9))
+#define MAX20730_MFR_DEVSET1_OCP_MASK (BIT(5) | BIT(6))
+#define MAX20730_MFR_DEVSET1_FSW_MASK GENMASK(4, 2)
+#define MAX20730_MFR_DEVSET1_TSTAT_MASK (BIT(0) | BIT(1))
+
+#define MAX20730_MFR_DEVSET1_RGAIN_BIT_POS 13
+#define MAX20730_MFR_DEVSET1_OTP_BIT_POS 11
+#define MAX20730_MFR_DEVSET1_VBOOT_BIT_POS 8
+#define MAX20730_MFR_DEVSET1_OCP_BIT_POS 5
+#define MAX20730_MFR_DEVSET1_FSW_BIT_POS 2
+#define MAX20730_MFR_DEVSET1_TSTAT_BIT_POS 0
+
+#define MAX20730_MFR_DEVSET2_IMAX_MASK GENMASK(10, 8)
+#define MAX20730_MFR_DEVSET2_VRATE (BIT(6) | BIT(7))
+#define MAX20730_MFR_DEVSET2_OCPM_MASK BIT(5)
+#define MAX20730_MFR_DEVSET2_SS_MASK (BIT(0) | BIT(1))
+
+#define MAX20730_MFR_DEVSET2_IMAX_BIT_POS 8
+#define MAX20730_MFR_DEVSET2_VRATE_BIT_POS 6
+#define MAX20730_MFR_DEVSET2_OCPM_BIT_POS 5
+#define MAX20730_MFR_DEVSET2_SS_BIT_POS 0
+
+#define DEBUG_FS_DATA_MAX 16
+
+struct max20730_debugfs_data {
+ struct i2c_client *client;
+ int debugfs_entries[MAX20730_DEBUGFS_NUM_ENTRIES];
+};
+
+#define to_psu(x, y) container_of((x), \
+ struct max20730_debugfs_data, debugfs_entries[(y)])
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t max20730_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret, len;
+ int *idxp = file->private_data;
+ int idx = *idxp;
+ struct max20730_debugfs_data *psu = to_psu(idxp, idx);
+ const struct pmbus_driver_info *info;
+ const struct max20730_data *data;
+ char tbuf[DEBUG_FS_DATA_MAX] = { 0 };
+ u16 val;
+
+ info = pmbus_get_driver_info(psu->client);
+ data = to_max20730_data(info);
+
+ switch (idx) {
+ case MAX20730_DEBUGFS_VOUT_MIN:
+ ret = VOLT_FROM_REG(data->mfr_voutmin * 10000);
+ len = scnprintf(tbuf, DEBUG_FS_DATA_MAX, "%d.%d\n",
+ ret / 10000, ret % 10000);
+ break;
+ case MAX20730_DEBUGFS_FREQUENCY:
+ val = (data->mfr_devset1 & MAX20730_MFR_DEVSET1_FSW_MASK)
+ >> MAX20730_MFR_DEVSET1_FSW_BIT_POS;
+
+ if (val == 0)
+ ret = 400;
+ else if (val == 1)
+ ret = 500;
+ else if (val == 2 || val == 3)
+ ret = 600;
+ else if (val == 4)
+ ret = 700;
+ else if (val == 5)
+ ret = 800;
+ else
+ ret = 900;
+ len = scnprintf(tbuf, DEBUG_FS_DATA_MAX, "%d\n", ret);
+ break;
+ case MAX20730_DEBUGFS_PG_DELAY:
+ val = (data->mfr_devset1 & MAX20730_MFR_DEVSET1_TSTAT_MASK)
+ >> MAX20730_MFR_DEVSET1_TSTAT_BIT_POS;
+
+ if (val == 0)
+ len = strlcpy(tbuf, "2000\n", DEBUG_FS_DATA_MAX);
+ else if (val == 1)
+ len = strlcpy(tbuf, "125\n", DEBUG_FS_DATA_MAX);
+ else if (val == 2)
+ len = strlcpy(tbuf, "62.5\n", DEBUG_FS_DATA_MAX);
+ else
+ len = strlcpy(tbuf, "32\n", DEBUG_FS_DATA_MAX);
+ break;
+ case MAX20730_DEBUGFS_INTERNAL_GAIN:
+ val = (data->mfr_devset1 & MAX20730_MFR_DEVSET1_RGAIN_MASK)
+ >> MAX20730_MFR_DEVSET1_RGAIN_BIT_POS;
+
+ if (data->id == max20734) {
+ /* AN6209 */
+ if (val == 0)
+ len = strlcpy(tbuf, "0.8\n", DEBUG_FS_DATA_MAX);
+ else if (val == 1)
+ len = strlcpy(tbuf, "3.2\n", DEBUG_FS_DATA_MAX);
+ else if (val == 2)
+ len = strlcpy(tbuf, "1.6\n", DEBUG_FS_DATA_MAX);
+ else
+ len = strlcpy(tbuf, "6.4\n", DEBUG_FS_DATA_MAX);
+ } else if (data->id == max20730 || data->id == max20710) {
+ /* AN6042 or AN6140 */
+ if (val == 0)
+ len = strlcpy(tbuf, "0.9\n", DEBUG_FS_DATA_MAX);
+ else if (val == 1)
+ len = strlcpy(tbuf, "3.6\n", DEBUG_FS_DATA_MAX);
+ else if (val == 2)
+ len = strlcpy(tbuf, "1.8\n", DEBUG_FS_DATA_MAX);
+ else
+ len = strlcpy(tbuf, "7.2\n", DEBUG_FS_DATA_MAX);
+ } else if (data->id == max20743) {
+ /* AN6042 */
+ if (val == 0)
+ len = strlcpy(tbuf, "0.45\n", DEBUG_FS_DATA_MAX);
+ else if (val == 1)
+ len = strlcpy(tbuf, "1.8\n", DEBUG_FS_DATA_MAX);
+ else if (val == 2)
+ len = strlcpy(tbuf, "0.9\n", DEBUG_FS_DATA_MAX);
+ else
+ len = strlcpy(tbuf, "3.6\n", DEBUG_FS_DATA_MAX);
+ } else {
+ len = strlcpy(tbuf, "Not supported\n", DEBUG_FS_DATA_MAX);
+ }
+ break;
+ case MAX20730_DEBUGFS_BOOT_VOLTAGE:
+ val = (data->mfr_devset1 & MAX20730_MFR_DEVSET1_VBOOT_MASK)
+ >> MAX20730_MFR_DEVSET1_VBOOT_BIT_POS;
+
+ if (val == 0)
+ len = strlcpy(tbuf, "0.6484\n", DEBUG_FS_DATA_MAX);
+ else if (val == 1)
+ len = strlcpy(tbuf, "0.8984\n", DEBUG_FS_DATA_MAX);
+ else if (val == 2)
+ len = strlcpy(tbuf, "1.0\n", DEBUG_FS_DATA_MAX);
+ else
+ len = strlcpy(tbuf, "Invalid\n", DEBUG_FS_DATA_MAX);
+ break;
+ case MAX20730_DEBUGFS_OUT_V_RAMP_RATE:
+ val = (data->mfr_devset2 & MAX20730_MFR_DEVSET2_VRATE)
+ >> MAX20730_MFR_DEVSET2_VRATE_BIT_POS;
+
+ if (val == 0)
+ len = strlcpy(tbuf, "4\n", DEBUG_FS_DATA_MAX);
+ else if (val == 1)
+ len = strlcpy(tbuf, "2\n", DEBUG_FS_DATA_MAX);
+ else if (val == 2)
+ len = strlcpy(tbuf, "1\n", DEBUG_FS_DATA_MAX);
+ else
+ len = strlcpy(tbuf, "Invalid\n", DEBUG_FS_DATA_MAX);
+ break;
+ case MAX20730_DEBUGFS_OC_PROTECT_MODE:
+ ret = (data->mfr_devset2 & MAX20730_MFR_DEVSET2_OCPM_MASK)
+ >> MAX20730_MFR_DEVSET2_OCPM_BIT_POS;
+ len = scnprintf(tbuf, DEBUG_FS_DATA_MAX, "%d\n", ret);
+ break;
+ case MAX20730_DEBUGFS_SS_TIMING:
+ val = (data->mfr_devset2 & MAX20730_MFR_DEVSET2_SS_MASK)
+ >> MAX20730_MFR_DEVSET2_SS_BIT_POS;
+
+ if (val == 0)
+ len = strlcpy(tbuf, "0.75\n", DEBUG_FS_DATA_MAX);
+ else if (val == 1)
+ len = strlcpy(tbuf, "1.5\n", DEBUG_FS_DATA_MAX);
+ else if (val == 2)
+ len = strlcpy(tbuf, "3\n", DEBUG_FS_DATA_MAX);
+ else
+ len = strlcpy(tbuf, "6\n", DEBUG_FS_DATA_MAX);
+ break;
+ case MAX20730_DEBUGFS_IMAX:
+ ret = (data->mfr_devset2 & MAX20730_MFR_DEVSET2_IMAX_MASK)
+ >> MAX20730_MFR_DEVSET2_IMAX_BIT_POS;
+ len = scnprintf(tbuf, DEBUG_FS_DATA_MAX, "%d\n", ret);
+ break;
+ case MAX20730_DEBUGFS_OPERATION:
+ ret = i2c_smbus_read_byte_data(psu->client, PMBUS_OPERATION);
+ if (ret < 0)
+ return ret;
+ len = scnprintf(tbuf, DEBUG_FS_DATA_MAX, "%d\n", ret);
+ break;
+ case MAX20730_DEBUGFS_ON_OFF_CONFIG:
+ ret = i2c_smbus_read_byte_data(psu->client, PMBUS_ON_OFF_CONFIG);
+ if (ret < 0)
+ return ret;
+ len = scnprintf(tbuf, DEBUG_FS_DATA_MAX, "%d\n", ret);
+ break;
+ case MAX20730_DEBUGFS_SMBALERT_MASK:
+ ret = i2c_smbus_read_word_data(psu->client,
+ PMBUS_SMB_ALERT_MASK);
+ if (ret < 0)
+ return ret;
+ len = scnprintf(tbuf, DEBUG_FS_DATA_MAX, "%d\n", ret);
+ break;
+ case MAX20730_DEBUGFS_VOUT_MODE:
+ ret = i2c_smbus_read_byte_data(psu->client, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+ len = scnprintf(tbuf, DEBUG_FS_DATA_MAX, "%d\n", ret);
+ break;
+ case MAX20730_DEBUGFS_VOUT_COMMAND:
+ ret = i2c_smbus_read_word_data(psu->client, PMBUS_VOUT_COMMAND);
+ if (ret < 0)
+ return ret;
+
+ ret = VOLT_FROM_REG(ret * 10000);
+ len = scnprintf(tbuf, DEBUG_FS_DATA_MAX,
+ "%d.%d\n", ret / 10000, ret % 10000);
+ break;
+ case MAX20730_DEBUGFS_VOUT_MAX:
+ ret = i2c_smbus_read_word_data(psu->client, PMBUS_VOUT_MAX);
+ if (ret < 0)
+ return ret;
+
+ ret = VOLT_FROM_REG(ret * 10000);
+ len = scnprintf(tbuf, DEBUG_FS_DATA_MAX,
+ "%d.%d\n", ret / 10000, ret % 10000);
+ break;
+ default:
+ len = strlcpy(tbuf, "Invalid\n", DEBUG_FS_DATA_MAX);
+ }
+
+ return simple_read_from_buffer(buf, count, ppos, tbuf, len);
+}
+
+static const struct file_operations max20730_fops = {
+ .llseek = noop_llseek,
+ .read = max20730_debugfs_read,
+ .write = NULL,
+ .open = simple_open,
+};
+
+static int max20730_init_debugfs(struct i2c_client *client,
+ struct max20730_data *data)
+{
+ int ret, i;
+ struct dentry *debugfs;
+ struct dentry *max20730_dir;
+ struct max20730_debugfs_data *psu;
+
+ ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET2);
+ if (ret < 0)
+ return ret;
+ data->mfr_devset2 = ret;
+
+ ret = i2c_smbus_read_word_data(client, MAX20730_MFR_VOUT_MIN);
+ if (ret < 0)
+ return ret;
+ data->mfr_voutmin = ret;
+
+ psu = devm_kzalloc(&client->dev, sizeof(*psu), GFP_KERNEL);
+ if (!psu)
+ return -ENOMEM;
+ psu->client = client;
+
+ debugfs = pmbus_get_debugfs_dir(client);
+ if (!debugfs)
+ return -ENOENT;
+
+ max20730_dir = debugfs_create_dir(client->name, debugfs);
+ if (!max20730_dir)
+ return -ENOENT;
+
+ for (i = 0; i < MAX20730_DEBUGFS_NUM_ENTRIES; ++i)
+ psu->debugfs_entries[i] = i;
+
+ debugfs_create_file("vout_min", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_VOUT_MIN],
+ &max20730_fops);
+ debugfs_create_file("frequency", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_FREQUENCY],
+ &max20730_fops);
+ debugfs_create_file("power_good_delay", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_PG_DELAY],
+ &max20730_fops);
+ debugfs_create_file("internal_gain", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_INTERNAL_GAIN],
+ &max20730_fops);
+ debugfs_create_file("boot_voltage", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_BOOT_VOLTAGE],
+ &max20730_fops);
+ debugfs_create_file("out_voltage_ramp_rate", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_OUT_V_RAMP_RATE],
+ &max20730_fops);
+ debugfs_create_file("oc_protection_mode", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_OC_PROTECT_MODE],
+ &max20730_fops);
+ debugfs_create_file("soft_start_timing", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_SS_TIMING],
+ &max20730_fops);
+ debugfs_create_file("imax", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_IMAX],
+ &max20730_fops);
+ debugfs_create_file("operation", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_OPERATION],
+ &max20730_fops);
+ debugfs_create_file("on_off_config", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_ON_OFF_CONFIG],
+ &max20730_fops);
+ debugfs_create_file("smbalert_mask", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_SMBALERT_MASK],
+ &max20730_fops);
+ debugfs_create_file("vout_mode", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_VOUT_MODE],
+ &max20730_fops);
+ debugfs_create_file("vout_command", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_VOUT_COMMAND],
+ &max20730_fops);
+ debugfs_create_file("vout_max", 0444, max20730_dir,
+ &psu->debugfs_entries[MAX20730_DEBUGFS_VOUT_MAX],
+ &max20730_fops);
+
+ return 0;
+}
+#else
+static int max20730_init_debugfs(struct i2c_client *client,
+ struct max20730_data *data)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static const struct i2c_device_id max20730_id[];
/*
* Convert discreet value to direct data format. Strictly speaking, all passed
@@ -114,6 +469,14 @@ static int max20730_read_word_data(struct i2c_client *client, int page,
max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3];
ret = val_to_direct(max_c, PSC_CURRENT_OUT, info);
break;
+ case PMBUS_READ_VOUT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret > 0 && data->vout_voltage_divider[0] && data->vout_voltage_divider[1]) {
+ u64 temp = DIV_ROUND_CLOSEST_ULL((u64)ret * data->vout_voltage_divider[1],
+ data->vout_voltage_divider[0]);
+ ret = clamp_val(temp, 0, 0xffff);
+ }
+ break;
default:
ret = -ENODATA;
break;
@@ -295,8 +658,7 @@ static const struct pmbus_driver_info max20730_info[] = {
},
};
-static int max20730_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max20730_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
u8 buf[I2C_SMBUS_BLOCK_MAX + 1];
@@ -356,7 +718,7 @@ static int max20730_probe(struct i2c_client *client,
if (client->dev.of_node)
chip_id = (enum chips)of_device_get_match_data(dev);
else
- chip_id = id->driver_data;
+ chip_id = i2c_match_id(max20730_id, client)->driver_data;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -364,13 +726,31 @@ static int max20730_probe(struct i2c_client *client,
data->id = chip_id;
mutex_init(&data->lock);
memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info));
+ if (of_property_read_u32_array(client->dev.of_node, "vout-voltage-divider",
+ data->vout_voltage_divider,
+ ARRAY_SIZE(data->vout_voltage_divider)) != 0)
+ memset(data->vout_voltage_divider, 0, sizeof(data->vout_voltage_divider));
+ if (data->vout_voltage_divider[1] < data->vout_voltage_divider[0]) {
+ dev_err(dev,
+ "The total resistance of voltage divider is less than output resistance\n");
+ return -EINVAL;
+ }
ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1);
if (ret < 0)
return ret;
data->mfr_devset1 = ret;
- return pmbus_do_probe(client, id, &data->info);
+ ret = pmbus_do_probe(client, &data->info);
+ if (ret < 0)
+ return ret;
+
+ ret = max20730_init_debugfs(client, data);
+ if (ret)
+ dev_warn(dev, "Failed to register debugfs: %d\n",
+ ret);
+
+ return 0;
}
static const struct i2c_device_id max20730_id[] = {
@@ -398,7 +778,7 @@ static struct i2c_driver max20730_driver = {
.name = "max20730",
.of_match_table = max20730_of_match,
},
- .probe = max20730_probe,
+ .probe_new = max20730_probe,
.remove = pmbus_do_remove,
.id_table = max20730_id,
};
diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c
index da3c38cb9a5c..921e92d82aec 100644
--- a/drivers/hwmon/pmbus/max20751.c
+++ b/drivers/hwmon/pmbus/max20751.c
@@ -26,10 +26,9 @@ static struct pmbus_driver_info max20751_info = {
PMBUS_HAVE_POUT,
};
-static int max20751_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max20751_probe(struct i2c_client *client)
{
- return pmbus_do_probe(client, id, &max20751_info);
+ return pmbus_do_probe(client, &max20751_info);
}
static const struct i2c_device_id max20751_id[] = {
@@ -43,7 +42,7 @@ static struct i2c_driver max20751_driver = {
.driver = {
.name = "max20751",
},
- .probe = max20751_probe,
+ .probe_new = max20751_probe,
.remove = pmbus_do_remove,
.id_table = max20751_id,
};
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index d9aa5c873d21..839b957bc03e 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -324,8 +324,7 @@ static int max31785_configure_dual_tach(struct i2c_client *client,
return 0;
}
-static int max31785_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max31785_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct pmbus_driver_info *info;
@@ -354,7 +353,7 @@ static int max31785_probe(struct i2c_client *client,
if (ret == MAX31785A) {
dual_tach = true;
} else if (ret == MAX31785) {
- if (!strcmp("max31785a", id->name))
+ if (!strcmp("max31785a", client->name))
dev_warn(dev, "Expected max3175a, found max31785: cannot provide secondary tachometer readings\n");
} else {
return -ENODEV;
@@ -366,7 +365,7 @@ static int max31785_probe(struct i2c_client *client,
return ret;
}
- return pmbus_do_probe(client, id, info);
+ return pmbus_do_probe(client, info);
}
static const struct i2c_device_id max31785_id[] = {
@@ -390,7 +389,7 @@ static struct i2c_driver max31785_driver = {
.name = "max31785",
.of_match_table = max31785_of_match,
},
- .probe = max31785_probe,
+ .probe_new = max31785_probe,
.remove = pmbus_do_remove,
.id_table = max31785_id,
};
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 18b4e071067f..f4cb196aaaf3 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -31,6 +31,13 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
#define MAX34440_STATUS_OT_FAULT BIT(5)
#define MAX34440_STATUS_OT_WARN BIT(6)
+/*
+ * The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT
+ * swapped from the standard pmbus spec addresses.
+ */
+#define MAX34440_IOUT_OC_WARN_LIMIT 0x46
+#define MAX34440_IOUT_OC_FAULT_LIMIT 0x4A
+
#define MAX34451_MFR_CHANNEL_CONFIG 0xe4
#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
@@ -41,6 +48,8 @@ struct max34440_data {
#define to_max34440_data(x) container_of(x, struct max34440_data, info)
+static const struct i2c_device_id max34440_id[];
+
static int max34440_read_word_data(struct i2c_client *client, int page,
int phase, int reg)
{
@@ -49,6 +58,14 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
const struct max34440_data *data = to_max34440_data(info);
switch (reg) {
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase,
+ MAX34440_IOUT_OC_FAULT_LIMIT);
+ break;
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase,
+ MAX34440_IOUT_OC_WARN_LIMIT);
+ break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page, phase,
MAX34440_MFR_VOUT_MIN);
@@ -115,6 +132,14 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
int ret;
switch (reg) {
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT,
+ word);
+ break;
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT,
+ word);
+ break;
case PMBUS_VIRT_RESET_POUT_HISTORY:
ret = pmbus_write_word_data(client, page,
MAX34446_MFR_POUT_PEAK, 0);
@@ -388,7 +413,6 @@ static struct pmbus_driver_info max34440_info[] = {
.func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
- .read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
@@ -419,7 +443,6 @@ static struct pmbus_driver_info max34440_info[] = {
.func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
- .read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
@@ -455,14 +478,12 @@ static struct pmbus_driver_info max34440_info[] = {
.func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
- .read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
};
-static int max34440_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max34440_probe(struct i2c_client *client)
{
struct max34440_data *data;
int rv;
@@ -471,8 +492,8 @@ static int max34440_probe(struct i2c_client *client,
GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->id = id->driver_data;
- data->info = max34440_info[id->driver_data];
+ data->id = i2c_match_id(max34440_id, client)->driver_data;
+ data->info = max34440_info[data->id];
if (data->id == max34451) {
rv = max34451_set_supported_funcs(client, data);
@@ -480,7 +501,7 @@ static int max34440_probe(struct i2c_client *client,
return rv;
}
- return pmbus_do_probe(client, id, &data->info);
+ return pmbus_do_probe(client, &data->info);
}
static const struct i2c_device_id max34440_id[] = {
@@ -499,7 +520,7 @@ static struct i2c_driver max34440_driver = {
.driver = {
.name = "max34440",
},
- .probe = max34440_probe,
+ .probe_new = max34440_probe,
.remove = pmbus_do_remove,
.id_table = max34440_id,
};
diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
index 643ccfc05106..4b2239a6afd3 100644
--- a/drivers/hwmon/pmbus/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -165,10 +165,9 @@ static struct pmbus_driver_info max8688_info = {
.write_word_data = max8688_write_word_data,
};
-static int max8688_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max8688_probe(struct i2c_client *client)
{
- return pmbus_do_probe(client, id, &max8688_info);
+ return pmbus_do_probe(client, &max8688_info);
}
static const struct i2c_device_id max8688_id[] = {
@@ -183,7 +182,7 @@ static struct i2c_driver max8688_driver = {
.driver = {
.name = "max8688",
},
- .probe = max8688_probe,
+ .probe_new = max8688_probe,
.remove = pmbus_do_remove,
.id_table = max8688_id,
};
diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
new file mode 100644
index 000000000000..1c3e2a9453b1
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp2975.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers
+ *
+ * Copyright (C) 2020 Nvidia Technologies Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+/* Vendor specific registers. */
+#define MP2975_MFR_APS_HYS_R2 0x0d
+#define MP2975_MFR_SLOPE_TRIM3 0x1d
+#define MP2975_MFR_VR_MULTI_CONFIG_R1 0x0d
+#define MP2975_MFR_VR_MULTI_CONFIG_R2 0x1d
+#define MP2975_MFR_APS_DECAY_ADV 0x56
+#define MP2975_MFR_DC_LOOP_CTRL 0x59
+#define MP2975_MFR_OCP_UCP_PHASE_SET 0x65
+#define MP2975_MFR_VR_CONFIG1 0x68
+#define MP2975_MFR_READ_CS1_2 0x82
+#define MP2975_MFR_READ_CS3_4 0x83
+#define MP2975_MFR_READ_CS5_6 0x84
+#define MP2975_MFR_READ_CS7_8 0x85
+#define MP2975_MFR_READ_CS9_10 0x86
+#define MP2975_MFR_READ_CS11_12 0x87
+#define MP2975_MFR_READ_IOUT_PK 0x90
+#define MP2975_MFR_READ_POUT_PK 0x91
+#define MP2975_MFR_READ_VREF_R1 0xa1
+#define MP2975_MFR_READ_VREF_R2 0xa3
+#define MP2975_MFR_OVP_TH_SET 0xe5
+#define MP2975_MFR_UVP_SET 0xe6
+
+#define MP2975_VOUT_FORMAT BIT(15)
+#define MP2975_VID_STEP_SEL_R1 BIT(4)
+#define MP2975_IMVP9_EN_R1 BIT(13)
+#define MP2975_VID_STEP_SEL_R2 BIT(3)
+#define MP2975_IMVP9_EN_R2 BIT(12)
+#define MP2975_PRT_THRES_DIV_OV_EN BIT(14)
+#define MP2975_DRMOS_KCS GENMASK(13, 12)
+#define MP2975_PROT_DEV_OV_OFF 10
+#define MP2975_PROT_DEV_OV_ON 5
+#define MP2975_SENSE_AMPL BIT(11)
+#define MP2975_SENSE_AMPL_UNIT 1
+#define MP2975_SENSE_AMPL_HALF 2
+#define MP2975_VIN_UV_LIMIT_UNIT 8
+
+#define MP2975_MAX_PHASE_RAIL1 8
+#define MP2975_MAX_PHASE_RAIL2 4
+#define MP2975_PAGE_NUM 2
+
+#define MP2975_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_PHASE_VIRTUAL)
+
+struct mp2975_data {
+ struct pmbus_driver_info info;
+ int vout_scale;
+ int vid_step[MP2975_PAGE_NUM];
+ int vref[MP2975_PAGE_NUM];
+ int vref_off[MP2975_PAGE_NUM];
+ int vout_max[MP2975_PAGE_NUM];
+ int vout_ov_fixed[MP2975_PAGE_NUM];
+ int vout_format[MP2975_PAGE_NUM];
+ int curr_sense_gain[MP2975_PAGE_NUM];
+};
+
+#define to_mp2975_data(x) container_of(x, struct mp2975_data, info)
+
+static int mp2975_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /*
+ * Enforce VOUT direct format, since device allows to set the
+ * different formats for the different rails. Conversion from
+ * VID to direct provided by driver internally, in case it is
+ * necessary.
+ */
+ return PB_VOUT_MODE_DIRECT;
+ default:
+ return -ENODATA;
+ }
+}
+
+static int
+mp2975_read_word_helper(struct i2c_client *client, int page, int phase, u8 reg,
+ u16 mask)
+{
+ int ret = pmbus_read_word_data(client, page, phase, reg);
+
+ return (ret > 0) ? ret & mask : ret;
+}
+
+static int
+mp2975_vid2direct(int vrf, int val)
+{
+ switch (vrf) {
+ case vr12:
+ if (val >= 0x01)
+ return 250 + (val - 1) * 5;
+ break;
+ case vr13:
+ if (val >= 0x01)
+ return 500 + (val - 1) * 10;
+ break;
+ case imvp9:
+ if (val >= 0x01)
+ return 200 + (val - 1) * 10;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+mp2975_read_phase(struct i2c_client *client, struct mp2975_data *data,
+ int page, int phase, u8 reg)
+{
+ int ph_curr, ret;
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ if (!((phase + 1) % MP2975_PAGE_NUM))
+ ret >>= 8;
+ ret &= 0xff;
+
+ /*
+ * Output value is calculated as: (READ_CSx / 80 – 1.23) / (Kcs * Rcs)
+ * where:
+ * - Kcs is the DrMOS current sense gain of power stage, which is
+ * obtained from the register MP2975_MFR_VR_CONFIG1, bits 13-12 with
+ * the following selection of DrMOS (data->curr_sense_gain[page]):
+ * 00b - 5µA/A, 01b - 8.5µA/A, 10b - 9.7µA/A, 11b - 10µA/A.
+ * - Rcs is the internal phase current sense resistor which is constant
+ * value 1kΩ.
+ */
+ ph_curr = ret * 100 - 9800;
+
+ /*
+ * Current phase sensing, providing by the device is not accurate
+ * for the light load. This because sampling of current occurrence of
+ * bit weight has a big deviation for light load. For handling such
+ * case phase current is represented as the maximum between the value
+ * calculated above and total rail current divided by number phases.
+ */
+ ret = pmbus_read_word_data(client, page, phase, PMBUS_READ_IOUT);
+ if (ret < 0)
+ return ret;
+
+ return max_t(int, DIV_ROUND_CLOSEST(ret, data->info.phases[page]),
+ DIV_ROUND_CLOSEST(ph_curr, data->curr_sense_gain[page]));
+}
+
+static int
+mp2975_read_phases(struct i2c_client *client, struct mp2975_data *data,
+ int page, int phase)
+{
+ int ret;
+
+ if (page) {
+ switch (phase) {
+ case 0 ... 1:
+ ret = mp2975_read_phase(client, data, page, phase,
+ MP2975_MFR_READ_CS7_8);
+ break;
+ case 2 ... 3:
+ ret = mp2975_read_phase(client, data, page, phase,
+ MP2975_MFR_READ_CS9_10);
+ break;
+ case 4 ... 5:
+ ret = mp2975_read_phase(client, data, page, phase,
+ MP2975_MFR_READ_CS11_12);
+ break;
+ default:
+ return -ENODATA;
+ }
+ } else {
+ switch (phase) {
+ case 0 ... 1:
+ ret = mp2975_read_phase(client, data, page, phase,
+ MP2975_MFR_READ_CS1_2);
+ break;
+ case 2 ... 3:
+ ret = mp2975_read_phase(client, data, page, phase,
+ MP2975_MFR_READ_CS3_4);
+ break;
+ case 4 ... 5:
+ ret = mp2975_read_phase(client, data, page, phase,
+ MP2975_MFR_READ_CS5_6);
+ break;
+ case 6 ... 7:
+ ret = mp2975_read_phase(client, data, page, phase,
+ MP2975_MFR_READ_CS7_8);
+ break;
+ case 8 ... 9:
+ ret = mp2975_read_phase(client, data, page, phase,
+ MP2975_MFR_READ_CS9_10);
+ break;
+ case 10 ... 11:
+ ret = mp2975_read_phase(client, data, page, phase,
+ MP2975_MFR_READ_CS11_12);
+ break;
+ default:
+ return -ENODATA;
+ }
+ }
+ return ret;
+}
+
+static int mp2975_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2975_data *data = to_mp2975_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_OT_FAULT_LIMIT:
+ ret = mp2975_read_word_helper(client, page, phase, reg,
+ GENMASK(7, 0));
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ ret = mp2975_read_word_helper(client, page, phase, reg,
+ GENMASK(7, 0));
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST(ret, MP2975_VIN_UV_LIMIT_UNIT);
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ /*
+ * Register provides two values for over-voltage protection
+ * threshold for fixed (ovp2) and tracking (ovp1) modes. The
+ * minimum of these two values is provided as over-voltage
+ * fault alarm.
+ */
+ ret = mp2975_read_word_helper(client, page, phase,
+ MP2975_MFR_OVP_TH_SET,
+ GENMASK(2, 0));
+ if (ret < 0)
+ return ret;
+
+ ret = min_t(int, data->vout_max[page] + 50 * (ret + 1),
+ data->vout_ov_fixed[page]);
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = mp2975_read_word_helper(client, page, phase,
+ MP2975_MFR_UVP_SET,
+ GENMASK(2, 0));
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST(data->vref[page] * 10 - 50 *
+ (ret + 1) * data->vout_scale, 10);
+ break;
+ case PMBUS_READ_VOUT:
+ ret = mp2975_read_word_helper(client, page, phase, reg,
+ GENMASK(11, 0));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * READ_VOUT can be provided in VID or direct format. The
+ * format type is specified by bit 15 of the register
+ * MP2975_MFR_DC_LOOP_CTRL. The driver enforces VOUT direct
+ * format, since device allows to set the different formats for
+ * the different rails and also all VOUT limits registers are
+ * provided in a direct format. In case format is VID - convert
+ * to direct.
+ */
+ if (data->vout_format[page] == vid)
+ ret = mp2975_vid2direct(info->vrm_version[page], ret);
+ break;
+ case PMBUS_VIRT_READ_POUT_MAX:
+ ret = mp2975_read_word_helper(client, page, phase,
+ MP2975_MFR_READ_POUT_PK,
+ GENMASK(12, 0));
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST(ret, 4);
+ break;
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = mp2975_read_word_helper(client, page, phase,
+ MP2975_MFR_READ_IOUT_PK,
+ GENMASK(12, 0));
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST(ret, 4);
+ break;
+ case PMBUS_READ_IOUT:
+ ret = mp2975_read_phases(client, data, page, phase);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case PMBUS_UT_WARN_LIMIT:
+ case PMBUS_UT_FAULT_LIMIT:
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_VOUT_OV_WARN_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_IIN_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_LV_FAULT_LIMIT:
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_UC_FAULT_LIMIT:
+ case PMBUS_POUT_OP_FAULT_LIMIT:
+ case PMBUS_POUT_OP_WARN_LIMIT:
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ return -ENXIO;
+ default:
+ return -ENODATA;
+ }
+
+ return ret;
+}
+
+static int mp2975_identify_multiphase_rail2(struct i2c_client *client)
+{
+ int ret;
+
+ /*
+ * Identify multiphase for rail 2 - could be from 0 to 4.
+ * In case phase number is zero – only page zero is supported
+ */
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 2);
+ if (ret < 0)
+ return ret;
+
+ /* Identify multiphase for rail 2 - could be from 0 to 4. */
+ ret = i2c_smbus_read_word_data(client, MP2975_MFR_VR_MULTI_CONFIG_R2);
+ if (ret < 0)
+ return ret;
+
+ ret &= GENMASK(2, 0);
+ return (ret >= 4) ? 4 : ret;
+}
+
+static void mp2975_set_phase_rail1(struct pmbus_driver_info *info)
+{
+ int i;
+
+ for (i = 0 ; i < info->phases[0]; i++)
+ info->pfunc[i] = PMBUS_HAVE_IOUT;
+}
+
+static void
+mp2975_set_phase_rail2(struct pmbus_driver_info *info, int num_phases)
+{
+ int i;
+
+ /* Set phases for rail 2 from upper to lower. */
+ for (i = 1; i <= num_phases; i++)
+ info->pfunc[MP2975_MAX_PHASE_RAIL1 - i] = PMBUS_HAVE_IOUT;
+}
+
+static int
+mp2975_identify_multiphase(struct i2c_client *client, struct mp2975_data *data,
+ struct pmbus_driver_info *info)
+{
+ int num_phases2, ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 2);
+ if (ret < 0)
+ return ret;
+
+ /* Identify multiphase for rail 1 - could be from 1 to 8. */
+ ret = i2c_smbus_read_word_data(client, MP2975_MFR_VR_MULTI_CONFIG_R1);
+ if (ret <= 0)
+ return ret;
+
+ info->phases[0] = ret & GENMASK(3, 0);
+
+ /*
+ * The device provides a total of 8 PWM pins, and can be configured
+ * to different phase count applications for rail 1 and rail 2.
+ * Rail 1 can be set to 8 phases, while rail 2 can only be set to 4
+ * phases at most. When rail 1’s phase count is configured as 0, rail
+ * 1 operates with 1-phase DCM. When rail 2 phase count is configured
+ * as 0, rail 2 is disabled.
+ */
+ if (info->phases[0] > MP2975_MAX_PHASE_RAIL1)
+ return -EINVAL;
+
+ mp2975_set_phase_rail1(info);
+ num_phases2 = min(MP2975_MAX_PHASE_RAIL1 - info->phases[0],
+ MP2975_MAX_PHASE_RAIL2);
+ if (info->phases[1] && info->phases[1] <= num_phases2)
+ mp2975_set_phase_rail2(info, num_phases2);
+
+ return 0;
+}
+
+static int
+mp2975_identify_vid(struct i2c_client *client, struct mp2975_data *data,
+ struct pmbus_driver_info *info, u32 reg, int page,
+ u32 imvp_bit, u32 vr_bit)
+{
+ int ret;
+
+ /* Identify VID mode and step selection. */
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ if (ret & imvp_bit) {
+ info->vrm_version[page] = imvp9;
+ data->vid_step[page] = MP2975_PROT_DEV_OV_OFF;
+ } else if (ret & vr_bit) {
+ info->vrm_version[page] = vr12;
+ data->vid_step[page] = MP2975_PROT_DEV_OV_ON;
+ } else {
+ info->vrm_version[page] = vr13;
+ data->vid_step[page] = MP2975_PROT_DEV_OV_OFF;
+ }
+
+ return 0;
+}
+
+static int
+mp2975_identify_rails_vid(struct i2c_client *client, struct mp2975_data *data,
+ struct pmbus_driver_info *info)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 2);
+ if (ret < 0)
+ return ret;
+
+ /* Identify VID mode for rail 1. */
+ ret = mp2975_identify_vid(client, data, info,
+ MP2975_MFR_VR_MULTI_CONFIG_R1, 0,
+ MP2975_IMVP9_EN_R1, MP2975_VID_STEP_SEL_R1);
+ if (ret < 0)
+ return ret;
+
+ /* Identify VID mode for rail 2, if connected. */
+ if (info->phases[1])
+ ret = mp2975_identify_vid(client, data, info,
+ MP2975_MFR_VR_MULTI_CONFIG_R2, 1,
+ MP2975_IMVP9_EN_R2,
+ MP2975_VID_STEP_SEL_R2);
+ return ret;
+}
+
+static int
+mp2975_current_sense_gain_get(struct i2c_client *client,
+ struct mp2975_data *data)
+{
+ int i, ret;
+
+ /*
+ * Obtain DrMOS current sense gain of power stage from the register
+ * MP2975_MFR_VR_CONFIG1, bits 13-12. The value is selected as below:
+ * 00b - 5µA/A, 01b - 8.5µA/A, 10b - 9.7µA/A, 11b - 10µA/A. Other
+ * values are invalid.
+ */
+ for (i = 0 ; i < data->info.pages; i++) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_read_word_data(client,
+ MP2975_MFR_VR_CONFIG1);
+ if (ret < 0)
+ return ret;
+
+ switch ((ret & MP2975_DRMOS_KCS) >> 12) {
+ case 0:
+ data->curr_sense_gain[i] = 50;
+ break;
+ case 1:
+ data->curr_sense_gain[i] = 85;
+ break;
+ case 2:
+ data->curr_sense_gain[i] = 97;
+ break;
+ default:
+ data->curr_sense_gain[i] = 100;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mp2975_vref_get(struct i2c_client *client, struct mp2975_data *data,
+ struct pmbus_driver_info *info)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 3);
+ if (ret < 0)
+ return ret;
+
+ /* Get voltage reference value for rail 1. */
+ ret = i2c_smbus_read_word_data(client, MP2975_MFR_READ_VREF_R1);
+ if (ret < 0)
+ return ret;
+
+ data->vref[0] = ret * data->vid_step[0];
+
+ /* Get voltage reference value for rail 2, if connected. */
+ if (data->info.pages == MP2975_PAGE_NUM) {
+ ret = i2c_smbus_read_word_data(client, MP2975_MFR_READ_VREF_R2);
+ if (ret < 0)
+ return ret;
+
+ data->vref[1] = ret * data->vid_step[1];
+ }
+ return 0;
+}
+
+static int
+mp2975_vref_offset_get(struct i2c_client *client, struct mp2975_data *data,
+ int page)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, MP2975_MFR_OVP_TH_SET);
+ if (ret < 0)
+ return ret;
+
+ switch ((ret & GENMASK(5, 3)) >> 3) {
+ case 1:
+ data->vref_off[page] = 140;
+ break;
+ case 2:
+ data->vref_off[page] = 220;
+ break;
+ case 4:
+ data->vref_off[page] = 400;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+mp2975_vout_max_get(struct i2c_client *client, struct mp2975_data *data,
+ struct pmbus_driver_info *info, int page)
+{
+ int ret;
+
+ /* Get maximum reference voltage of VID-DAC in VID format. */
+ ret = i2c_smbus_read_word_data(client, PMBUS_VOUT_MAX);
+ if (ret < 0)
+ return ret;
+
+ data->vout_max[page] = mp2975_vid2direct(info->vrm_version[page], ret &
+ GENMASK(8, 0));
+ return 0;
+}
+
+static int
+mp2975_identify_vout_format(struct i2c_client *client,
+ struct mp2975_data *data, int page)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, MP2975_MFR_DC_LOOP_CTRL);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MP2975_VOUT_FORMAT)
+ data->vout_format[page] = vid;
+ else
+ data->vout_format[page] = direct;
+ return 0;
+}
+
+static int
+mp2975_vout_ov_scale_get(struct i2c_client *client, struct mp2975_data *data,
+ struct pmbus_driver_info *info)
+{
+ int thres_dev, sense_ampl, ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Get divider for over- and under-voltage protection thresholds
+ * configuration from the Advanced Options of Auto Phase Shedding and
+ * decay register.
+ */
+ ret = i2c_smbus_read_word_data(client, MP2975_MFR_APS_DECAY_ADV);
+ if (ret < 0)
+ return ret;
+ thres_dev = ret & MP2975_PRT_THRES_DIV_OV_EN ? MP2975_PROT_DEV_OV_ON :
+ MP2975_PROT_DEV_OV_OFF;
+
+ /* Select the gain of remote sense amplifier. */
+ ret = i2c_smbus_read_word_data(client, PMBUS_VOUT_SCALE_LOOP);
+ if (ret < 0)
+ return ret;
+ sense_ampl = ret & MP2975_SENSE_AMPL ? MP2975_SENSE_AMPL_HALF :
+ MP2975_SENSE_AMPL_UNIT;
+
+ data->vout_scale = sense_ampl * thres_dev;
+
+ return 0;
+}
+
+static int
+mp2975_vout_per_rail_config_get(struct i2c_client *client,
+ struct mp2975_data *data,
+ struct pmbus_driver_info *info)
+{
+ int i, ret;
+
+ for (i = 0; i < data->info.pages; i++) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ if (ret < 0)
+ return ret;
+
+ /* Obtain voltage reference offsets. */
+ ret = mp2975_vref_offset_get(client, data, i);
+ if (ret < 0)
+ return ret;
+
+ /* Obtain maximum voltage values. */
+ ret = mp2975_vout_max_get(client, data, info, i);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Get VOUT format for READ_VOUT command : VID or direct.
+ * Pages on same device can be configured with different
+ * formats.
+ */
+ ret = mp2975_identify_vout_format(client, data, i);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Set over-voltage fixed value. Thresholds are provided as
+ * fixed value, and tracking value. The minimum of them are
+ * exposed as over-voltage critical threshold.
+ */
+ data->vout_ov_fixed[i] = data->vref[i] +
+ DIV_ROUND_CLOSEST(data->vref_off[i] *
+ data->vout_scale,
+ 10);
+ }
+
+ return 0;
+}
+
+static struct pmbus_driver_info mp2975_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .m[PSC_TEMPERATURE] = 1,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .m[PSC_POWER] = 1,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT | PMBUS_PHASE_VIRTUAL,
+ .read_byte_data = mp2975_read_byte_data,
+ .read_word_data = mp2975_read_word_data,
+};
+
+static int mp2975_probe(struct i2c_client *client)
+{
+ struct pmbus_driver_info *info;
+ struct mp2975_data *data;
+ int ret;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct mp2975_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp2975_info, sizeof(*info));
+ info = &data->info;
+
+ /* Identify multiphase configuration for rail 2. */
+ ret = mp2975_identify_multiphase_rail2(client);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ /* Two rails are connected. */
+ data->info.pages = MP2975_PAGE_NUM;
+ data->info.phases[1] = ret;
+ data->info.func[1] = MP2975_RAIL2_FUNC;
+ }
+
+ /* Identify multiphase configuration. */
+ ret = mp2975_identify_multiphase(client, data, info);
+ if (ret)
+ return ret;
+
+ /* Identify VID setting per rail. */
+ ret = mp2975_identify_rails_vid(client, data, info);
+ if (ret < 0)
+ return ret;
+
+ /* Obtain current sense gain of power stage. */
+ ret = mp2975_current_sense_gain_get(client, data);
+ if (ret)
+ return ret;
+
+ /* Obtain voltage reference values. */
+ ret = mp2975_vref_get(client, data, info);
+ if (ret)
+ return ret;
+
+ /* Obtain vout over-voltage scales. */
+ ret = mp2975_vout_ov_scale_get(client, data, info);
+ if (ret < 0)
+ return ret;
+
+ /* Obtain offsets, maximum and format for vout. */
+ ret = mp2975_vout_per_rail_config_get(client, data, info);
+ if (ret)
+ return ret;
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct i2c_device_id mp2975_id[] = {
+ {"mp2975", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, mp2975_id);
+
+static const struct of_device_id __maybe_unused mp2975_of_match[] = {
+ {.compatible = "mps,mp2975"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2975_of_match);
+
+static struct i2c_driver mp2975_driver = {
+ .driver = {
+ .name = "mp2975",
+ .of_match_table = of_match_ptr(mp2975_of_match),
+ },
+ .probe_new = mp2975_probe,
+ .remove = pmbus_do_remove,
+ .id_table = mp2975_id,
+};
+
+module_i2c_driver(mp2975_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@nvidia.com>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP2975 device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 6d384e8ee1db..20f1af9165c2 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -20,6 +20,8 @@ struct pmbus_device_info {
u32 flags;
};
+static const struct i2c_device_id pmbus_id[];
+
/*
* Find sensor groups and status registers on each page.
*/
@@ -159,8 +161,7 @@ abort:
return ret;
}
-static int pmbus_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pmbus_probe(struct i2c_client *client)
{
struct pmbus_driver_info *info;
struct pmbus_platform_data *pdata = NULL;
@@ -171,7 +172,7 @@ static int pmbus_probe(struct i2c_client *client,
if (!info)
return -ENOMEM;
- device_info = (struct pmbus_device_info *)id->driver_data;
+ device_info = (struct pmbus_device_info *)i2c_match_id(pmbus_id, client)->driver_data;
if (device_info->flags & PMBUS_SKIP_STATUS_CHECK) {
pdata = devm_kzalloc(dev, sizeof(struct pmbus_platform_data),
GFP_KERNEL);
@@ -185,7 +186,7 @@ static int pmbus_probe(struct i2c_client *client,
info->identify = pmbus_identify;
dev->platform_data = pdata;
- return pmbus_do_probe(client, id, info);
+ return pmbus_do_probe(client, info);
}
static const struct pmbus_device_info pmbus_info_one = {
@@ -236,7 +237,7 @@ static struct i2c_driver pmbus_driver = {
.driver = {
.name = "pmbus",
},
- .probe = pmbus_probe,
+ .probe_new = pmbus_probe,
.remove = pmbus_do_remove,
.id_table = pmbus_id,
};
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 18e06fc6c53f..88a5df2633fb 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -119,9 +119,22 @@ enum pmbus_regs {
PMBUS_MFR_DATE = 0x9D,
PMBUS_MFR_SERIAL = 0x9E,
+ PMBUS_MFR_VIN_MIN = 0xA0,
+ PMBUS_MFR_VIN_MAX = 0xA1,
+ PMBUS_MFR_IIN_MAX = 0xA2,
+ PMBUS_MFR_PIN_MAX = 0xA3,
+ PMBUS_MFR_VOUT_MIN = 0xA4,
+ PMBUS_MFR_VOUT_MAX = 0xA5,
+ PMBUS_MFR_IOUT_MAX = 0xA6,
+ PMBUS_MFR_POUT_MAX = 0xA7,
+
PMBUS_IC_DEVICE_ID = 0xAD,
PMBUS_IC_DEVICE_REV = 0xAE,
+ PMBUS_MFR_MAX_TEMP_1 = 0xC0,
+ PMBUS_MFR_MAX_TEMP_2 = 0xC1,
+ PMBUS_MFR_MAX_TEMP_3 = 0xC2,
+
/*
* Virtual registers.
* Useful to support attributes which are not supported by standard PMBus
@@ -476,8 +489,7 @@ int pmbus_update_byte_data(struct i2c_client *client, int page, u8 reg,
void pmbus_clear_faults(struct i2c_client *client);
bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
-int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
- struct pmbus_driver_info *info);
+int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info);
int pmbus_do_remove(struct i2c_client *client);
const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client
*client);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 44535add3a4a..b0e2820a2d57 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -16,7 +16,6 @@
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <linux/jiffies.h>
#include <linux/pmbus.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -27,21 +26,6 @@
* with each call to krealloc
*/
#define PMBUS_ATTR_ALLOC_SIZE 32
-
-/*
- * Index into status register array, per status register group
- */
-#define PB_STATUS_BASE 0
-#define PB_STATUS_VOUT_BASE (PB_STATUS_BASE + PMBUS_PAGES)
-#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES)
-#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES)
-#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES)
-#define PB_STATUS_TEMP_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
-#define PB_STATUS_INPUT_BASE (PB_STATUS_TEMP_BASE + PMBUS_PAGES)
-#define PB_STATUS_VMON_BASE (PB_STATUS_INPUT_BASE + 1)
-
-#define PB_NUM_STATUS_REG (PB_STATUS_VMON_BASE + 1)
-
#define PMBUS_NAME_SIZE 24
struct pmbus_sensor {
@@ -77,6 +61,21 @@ struct pmbus_label {
#define to_pmbus_label(_attr) \
container_of(_attr, struct pmbus_label, attribute)
+/* Macros for converting between sensor index and register/page/status mask */
+
+#define PB_STATUS_MASK 0xffff
+#define PB_REG_SHIFT 16
+#define PB_REG_MASK 0x3ff
+#define PB_PAGE_SHIFT 26
+#define PB_PAGE_MASK 0x3f
+
+#define pb_reg_to_index(page, reg, mask) (((page) << PB_PAGE_SHIFT) | \
+ ((reg) << PB_REG_SHIFT) | (mask))
+
+#define pb_index_to_page(index) (((index) >> PB_PAGE_SHIFT) & PB_PAGE_MASK)
+#define pb_index_to_reg(index) (((index) >> PB_REG_SHIFT) & PB_REG_MASK)
+#define pb_index_to_mask(index) ((index) & PB_STATUS_MASK)
+
struct pmbus_data {
struct device *dev;
struct device *hwmon_dev;
@@ -97,14 +96,6 @@ struct pmbus_data {
struct pmbus_sensor *sensors;
struct mutex update_lock;
- bool valid;
- unsigned long last_updated; /* in jiffies */
-
- /*
- * A single status register covers multiple attributes,
- * so we keep them all together.
- */
- u16 status[PB_NUM_STATUS_REG];
bool has_status_word; /* device uses STATUS_WORD register */
int (*read_status)(struct i2c_client *client, int page);
@@ -143,8 +134,10 @@ static const int pmbus_fan_command_registers[] = {
void pmbus_clear_cache(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
+ struct pmbus_sensor *sensor;
- data->valid = false;
+ for (sensor = data->sensors; sensor; sensor = sensor->next)
+ sensor->data = -ENODATA;
}
EXPORT_SYMBOL_GPL(pmbus_clear_cache);
@@ -560,68 +553,29 @@ const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
-static struct _pmbus_status {
- u32 func;
- u16 base;
- u16 reg;
-} pmbus_status[] = {
- { PMBUS_HAVE_STATUS_VOUT, PB_STATUS_VOUT_BASE, PMBUS_STATUS_VOUT },
- { PMBUS_HAVE_STATUS_IOUT, PB_STATUS_IOUT_BASE, PMBUS_STATUS_IOUT },
- { PMBUS_HAVE_STATUS_TEMP, PB_STATUS_TEMP_BASE,
- PMBUS_STATUS_TEMPERATURE },
- { PMBUS_HAVE_STATUS_FAN12, PB_STATUS_FAN_BASE, PMBUS_STATUS_FAN_12 },
- { PMBUS_HAVE_STATUS_FAN34, PB_STATUS_FAN34_BASE, PMBUS_STATUS_FAN_34 },
-};
-
-static struct pmbus_data *pmbus_update_device(struct device *dev)
+static int pmbus_get_status(struct i2c_client *client, int page, int reg)
{
- struct i2c_client *client = to_i2c_client(dev->parent);
struct pmbus_data *data = i2c_get_clientdata(client);
- const struct pmbus_driver_info *info = data->info;
- struct pmbus_sensor *sensor;
-
- mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- int i, j;
-
- for (i = 0; i < info->pages; i++) {
- data->status[PB_STATUS_BASE + i]
- = data->read_status(client, i);
- for (j = 0; j < ARRAY_SIZE(pmbus_status); j++) {
- struct _pmbus_status *s = &pmbus_status[j];
-
- if (!(info->func[i] & s->func))
- continue;
- data->status[s->base + i]
- = _pmbus_read_byte_data(client, i,
- s->reg);
- }
- }
+ int status;
- if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
- data->status[PB_STATUS_INPUT_BASE]
- = _pmbus_read_byte_data(client, 0,
- PMBUS_STATUS_INPUT);
-
- if (info->func[0] & PMBUS_HAVE_STATUS_VMON)
- data->status[PB_STATUS_VMON_BASE]
- = _pmbus_read_byte_data(client, 0,
- PMBUS_VIRT_STATUS_VMON);
-
- for (sensor = data->sensors; sensor; sensor = sensor->next) {
- if (!data->valid || sensor->update)
- sensor->data
- = _pmbus_read_word_data(client,
- sensor->page,
- sensor->phase,
- sensor->reg);
- }
- pmbus_clear_faults(client);
- data->last_updated = jiffies;
- data->valid = 1;
+ switch (reg) {
+ case PMBUS_STATUS_WORD:
+ status = data->read_status(client, page);
+ break;
+ default:
+ status = _pmbus_read_byte_data(client, page, reg);
+ break;
}
- mutex_unlock(&data->update_lock);
- return data;
+ if (status < 0)
+ pmbus_clear_faults(client);
+ return status;
+}
+
+static void pmbus_update_sensor_data(struct i2c_client *client, struct pmbus_sensor *sensor)
+{
+ if (sensor->data < 0 || sensor->update)
+ sensor->data = _pmbus_read_word_data(client, sensor->page,
+ sensor->phase, sensor->reg);
}
/*
@@ -919,38 +873,51 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
* If a negative value is stored in any of the referenced registers, this value
* reflects an error code which will be returned.
*/
-static int pmbus_get_boolean(struct pmbus_data *data, struct pmbus_boolean *b,
+static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
int index)
{
+ struct pmbus_data *data = i2c_get_clientdata(client);
struct pmbus_sensor *s1 = b->s1;
struct pmbus_sensor *s2 = b->s2;
- u16 reg = (index >> 16) & 0xffff;
- u16 mask = index & 0xffff;
+ u16 mask = pb_index_to_mask(index);
+ u8 page = pb_index_to_page(index);
+ u16 reg = pb_index_to_reg(index);
int ret, status;
u16 regval;
- status = data->status[reg];
- if (status < 0)
- return status;
+ mutex_lock(&data->update_lock);
+ status = pmbus_get_status(client, page, reg);
+ if (status < 0) {
+ ret = status;
+ goto unlock;
+ }
+
+ if (s1)
+ pmbus_update_sensor_data(client, s1);
+ if (s2)
+ pmbus_update_sensor_data(client, s2);
regval = status & mask;
- if (!s1 && !s2) {
- ret = !!regval;
- } else if (!s1 || !s2) {
- WARN(1, "Bad boolean descriptor %p: s1=%p, s2=%p\n", b, s1, s2);
- return 0;
- } else {
+ if (s1 && s2) {
s64 v1, v2;
- if (s1->data < 0)
- return s1->data;
- if (s2->data < 0)
- return s2->data;
+ if (s1->data < 0) {
+ ret = s1->data;
+ goto unlock;
+ }
+ if (s2->data < 0) {
+ ret = s2->data;
+ goto unlock;
+ }
v1 = pmbus_reg2data(data, s1);
v2 = pmbus_reg2data(data, s2);
ret = !!(regval && v1 >= v2);
+ } else {
+ ret = !!regval;
}
+unlock:
+ mutex_unlock(&data->update_lock);
return ret;
}
@@ -959,10 +926,10 @@ static ssize_t pmbus_show_boolean(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct pmbus_boolean *boolean = to_pmbus_boolean(attr);
- struct pmbus_data *data = pmbus_update_device(dev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
int val;
- val = pmbus_get_boolean(data, boolean, attr->index);
+ val = pmbus_get_boolean(client, boolean, attr->index);
if (val < 0)
return val;
return snprintf(buf, PAGE_SIZE, "%d\n", val);
@@ -971,13 +938,19 @@ static ssize_t pmbus_show_boolean(struct device *dev,
static ssize_t pmbus_show_sensor(struct device *dev,
struct device_attribute *devattr, char *buf)
{
- struct pmbus_data *data = pmbus_update_device(dev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ ssize_t ret;
+ mutex_lock(&data->update_lock);
+ pmbus_update_sensor_data(client, sensor);
if (sensor->data < 0)
- return sensor->data;
-
- return snprintf(buf, PAGE_SIZE, "%lld\n", pmbus_reg2data(data, sensor));
+ ret = sensor->data;
+ else
+ ret = snprintf(buf, PAGE_SIZE, "%lld\n", pmbus_reg2data(data, sensor));
+ mutex_unlock(&data->update_lock);
+ return ret;
}
static ssize_t pmbus_set_sensor(struct device *dev,
@@ -1018,9 +991,9 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
{
if (data->num_attributes >= data->max_attributes - 1) {
int new_max_attrs = data->max_attributes + PMBUS_ATTR_ALLOC_SIZE;
- void *new_attrs = krealloc(data->group.attrs,
- new_max_attrs * sizeof(void *),
- GFP_KERNEL);
+ void *new_attrs = devm_krealloc(data->dev, data->group.attrs,
+ new_max_attrs * sizeof(void *),
+ GFP_KERNEL);
if (!new_attrs)
return -ENOMEM;
data->group.attrs = new_attrs;
@@ -1068,11 +1041,14 @@ static int pmbus_add_boolean(struct pmbus_data *data,
const char *name, const char *type, int seq,
struct pmbus_sensor *s1,
struct pmbus_sensor *s2,
- u16 reg, u16 mask)
+ u8 page, u16 reg, u16 mask)
{
struct pmbus_boolean *boolean;
struct sensor_device_attribute *a;
+ if (WARN((s1 && !s2) || (!s1 && s2), "Bad s1/s2 parameters\n"))
+ return -EINVAL;
+
boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
if (!boolean)
return -ENOMEM;
@@ -1084,7 +1060,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
boolean->s1 = s1;
boolean->s2 = s2;
pmbus_attr_init(a, boolean->name, 0444, pmbus_show_boolean, NULL,
- (reg << 16) | mask);
+ pb_reg_to_index(page, reg, mask));
return pmbus_add_attribute(data, &a->dev_attr.attr);
}
@@ -1121,6 +1097,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
sensor->class = class;
sensor->update = update;
sensor->convert = convert;
+ sensor->data = -ENODATA;
pmbus_dev_attr_init(a, sensor->name,
readonly ? 0444 : 0644,
pmbus_show_sensor, pmbus_set_sensor);
@@ -1201,7 +1178,7 @@ struct pmbus_sensor_attr {
bool compare; /* true if compare function needed */
u32 func; /* sensor mask */
u32 sfunc; /* sensor status mask */
- int sbase; /* status base register */
+ int sreg; /* status register */
const struct pmbus_limit_attr *limit;/* limit registers */
};
@@ -1239,7 +1216,7 @@ static int pmbus_add_limit_attrs(struct i2c_client *client,
: NULL,
attr->compare ? l->low ? base : curr
: NULL,
- attr->sbase + page, l->sbit);
+ page, attr->sreg, l->sbit);
if (ret)
return ret;
have_alarm = 1;
@@ -1289,7 +1266,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
pmbus_check_status_register(client, page)) {
ret = pmbus_add_boolean(data, name, "alarm", index,
NULL, NULL,
- PB_STATUS_BASE + page,
+ page, PMBUS_STATUS_WORD,
attr->gbit);
if (ret)
return ret;
@@ -1404,6 +1381,12 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
}, {
.reg = PMBUS_VIRT_RESET_VIN_HISTORY,
.attr = "reset_history",
+ }, {
+ .reg = PMBUS_MFR_VIN_MIN,
+ .attr = "rated_min",
+ }, {
+ .reg = PMBUS_MFR_VIN_MAX,
+ .attr = "rated_max",
},
};
@@ -1467,7 +1450,13 @@ static const struct pmbus_limit_attr vout_limit_attrs[] = {
}, {
.reg = PMBUS_VIRT_RESET_VOUT_HISTORY,
.attr = "reset_history",
- }
+ }, {
+ .reg = PMBUS_MFR_VOUT_MIN,
+ .attr = "rated_min",
+ }, {
+ .reg = PMBUS_MFR_VOUT_MAX,
+ .attr = "rated_max",
+ },
};
static const struct pmbus_sensor_attr voltage_attributes[] = {
@@ -1477,7 +1466,7 @@ static const struct pmbus_sensor_attr voltage_attributes[] = {
.label = "vin",
.func = PMBUS_HAVE_VIN,
.sfunc = PMBUS_HAVE_STATUS_INPUT,
- .sbase = PB_STATUS_INPUT_BASE,
+ .sreg = PMBUS_STATUS_INPUT,
.gbit = PB_STATUS_VIN_UV,
.limit = vin_limit_attrs,
.nlimit = ARRAY_SIZE(vin_limit_attrs),
@@ -1487,7 +1476,7 @@ static const struct pmbus_sensor_attr voltage_attributes[] = {
.label = "vmon",
.func = PMBUS_HAVE_VMON,
.sfunc = PMBUS_HAVE_STATUS_VMON,
- .sbase = PB_STATUS_VMON_BASE,
+ .sreg = PMBUS_VIRT_STATUS_VMON,
.limit = vmon_limit_attrs,
.nlimit = ARRAY_SIZE(vmon_limit_attrs),
}, {
@@ -1502,7 +1491,7 @@ static const struct pmbus_sensor_attr voltage_attributes[] = {
.paged = true,
.func = PMBUS_HAVE_VOUT,
.sfunc = PMBUS_HAVE_STATUS_VOUT,
- .sbase = PB_STATUS_VOUT_BASE,
+ .sreg = PMBUS_STATUS_VOUT,
.gbit = PB_STATUS_VOUT_OV,
.limit = vout_limit_attrs,
.nlimit = ARRAY_SIZE(vout_limit_attrs),
@@ -1537,7 +1526,10 @@ static const struct pmbus_limit_attr iin_limit_attrs[] = {
}, {
.reg = PMBUS_VIRT_RESET_IIN_HISTORY,
.attr = "reset_history",
- }
+ }, {
+ .reg = PMBUS_MFR_IIN_MAX,
+ .attr = "rated_max",
+ },
};
static const struct pmbus_limit_attr iout_limit_attrs[] = {
@@ -1571,7 +1563,10 @@ static const struct pmbus_limit_attr iout_limit_attrs[] = {
}, {
.reg = PMBUS_VIRT_RESET_IOUT_HISTORY,
.attr = "reset_history",
- }
+ }, {
+ .reg = PMBUS_MFR_IOUT_MAX,
+ .attr = "rated_max",
+ },
};
static const struct pmbus_sensor_attr current_attributes[] = {
@@ -1581,7 +1576,7 @@ static const struct pmbus_sensor_attr current_attributes[] = {
.label = "iin",
.func = PMBUS_HAVE_IIN,
.sfunc = PMBUS_HAVE_STATUS_INPUT,
- .sbase = PB_STATUS_INPUT_BASE,
+ .sreg = PMBUS_STATUS_INPUT,
.gbit = PB_STATUS_INPUT,
.limit = iin_limit_attrs,
.nlimit = ARRAY_SIZE(iin_limit_attrs),
@@ -1592,7 +1587,7 @@ static const struct pmbus_sensor_attr current_attributes[] = {
.paged = true,
.func = PMBUS_HAVE_IOUT,
.sfunc = PMBUS_HAVE_STATUS_IOUT,
- .sbase = PB_STATUS_IOUT_BASE,
+ .sreg = PMBUS_STATUS_IOUT,
.gbit = PB_STATUS_IOUT_OC,
.limit = iout_limit_attrs,
.nlimit = ARRAY_SIZE(iout_limit_attrs),
@@ -1622,7 +1617,10 @@ static const struct pmbus_limit_attr pin_limit_attrs[] = {
}, {
.reg = PMBUS_VIRT_RESET_PIN_HISTORY,
.attr = "reset_history",
- }
+ }, {
+ .reg = PMBUS_MFR_PIN_MAX,
+ .attr = "rated_max",
+ },
};
static const struct pmbus_limit_attr pout_limit_attrs[] = {
@@ -1656,7 +1654,10 @@ static const struct pmbus_limit_attr pout_limit_attrs[] = {
}, {
.reg = PMBUS_VIRT_RESET_POUT_HISTORY,
.attr = "reset_history",
- }
+ }, {
+ .reg = PMBUS_MFR_POUT_MAX,
+ .attr = "rated_max",
+ },
};
static const struct pmbus_sensor_attr power_attributes[] = {
@@ -1666,7 +1667,7 @@ static const struct pmbus_sensor_attr power_attributes[] = {
.label = "pin",
.func = PMBUS_HAVE_PIN,
.sfunc = PMBUS_HAVE_STATUS_INPUT,
- .sbase = PB_STATUS_INPUT_BASE,
+ .sreg = PMBUS_STATUS_INPUT,
.gbit = PB_STATUS_INPUT,
.limit = pin_limit_attrs,
.nlimit = ARRAY_SIZE(pin_limit_attrs),
@@ -1677,7 +1678,7 @@ static const struct pmbus_sensor_attr power_attributes[] = {
.paged = true,
.func = PMBUS_HAVE_POUT,
.sfunc = PMBUS_HAVE_STATUS_IOUT,
- .sbase = PB_STATUS_IOUT_BASE,
+ .sreg = PMBUS_STATUS_IOUT,
.limit = pout_limit_attrs,
.nlimit = ARRAY_SIZE(pout_limit_attrs),
}
@@ -1720,7 +1721,10 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
}, {
.reg = PMBUS_VIRT_RESET_TEMP_HISTORY,
.attr = "reset_history",
- }
+ }, {
+ .reg = PMBUS_MFR_MAX_TEMP_1,
+ .attr = "rated_max",
+ },
};
static const struct pmbus_limit_attr temp_limit_attrs2[] = {
@@ -1758,7 +1762,10 @@ static const struct pmbus_limit_attr temp_limit_attrs2[] = {
}, {
.reg = PMBUS_VIRT_RESET_TEMP2_HISTORY,
.attr = "reset_history",
- }
+ }, {
+ .reg = PMBUS_MFR_MAX_TEMP_2,
+ .attr = "rated_max",
+ },
};
static const struct pmbus_limit_attr temp_limit_attrs3[] = {
@@ -1784,7 +1791,10 @@ static const struct pmbus_limit_attr temp_limit_attrs3[] = {
.attr = "crit",
.alarm = "crit_alarm",
.sbit = PB_TEMP_OT_FAULT,
- }
+ }, {
+ .reg = PMBUS_MFR_MAX_TEMP_3,
+ .attr = "rated_max",
+ },
};
static const struct pmbus_sensor_attr temp_attributes[] = {
@@ -1796,7 +1806,7 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
.compare = true,
.func = PMBUS_HAVE_TEMP,
.sfunc = PMBUS_HAVE_STATUS_TEMP,
- .sbase = PB_STATUS_TEMP_BASE,
+ .sreg = PMBUS_STATUS_TEMPERATURE,
.gbit = PB_STATUS_TEMPERATURE,
.limit = temp_limit_attrs,
.nlimit = ARRAY_SIZE(temp_limit_attrs),
@@ -1808,7 +1818,7 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
.compare = true,
.func = PMBUS_HAVE_TEMP2,
.sfunc = PMBUS_HAVE_STATUS_TEMP,
- .sbase = PB_STATUS_TEMP_BASE,
+ .sreg = PMBUS_STATUS_TEMPERATURE,
.gbit = PB_STATUS_TEMPERATURE,
.limit = temp_limit_attrs2,
.nlimit = ARRAY_SIZE(temp_limit_attrs2),
@@ -1820,7 +1830,7 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
.compare = true,
.func = PMBUS_HAVE_TEMP3,
.sfunc = PMBUS_HAVE_STATUS_TEMP,
- .sbase = PB_STATUS_TEMP_BASE,
+ .sreg = PMBUS_STATUS_TEMPERATURE,
.gbit = PB_STATUS_TEMPERATURE,
.limit = temp_limit_attrs3,
.nlimit = ARRAY_SIZE(temp_limit_attrs3),
@@ -1945,19 +1955,19 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
if ((info->func[page] & pmbus_fan_status_flags[f]) &&
pmbus_check_byte_register(client,
page, pmbus_fan_status_registers[f])) {
- int base;
+ int reg;
if (f > 1) /* fan 3, 4 */
- base = PB_STATUS_FAN34_BASE + page;
+ reg = PMBUS_STATUS_FAN_34;
else
- base = PB_STATUS_FAN_BASE + page;
+ reg = PMBUS_STATUS_FAN_12;
ret = pmbus_add_boolean(data, "fan",
- "alarm", index, NULL, NULL, base,
+ "alarm", index, NULL, NULL, page, reg,
PB_FAN_FAN1_WARNING >> (f & 1));
if (ret)
return ret;
ret = pmbus_add_boolean(data, "fan",
- "fault", index, NULL, NULL, base,
+ "fault", index, NULL, NULL, page, reg,
PB_FAN_FAN1_FAULT >> (f & 1));
if (ret)
return ret;
@@ -2006,8 +2016,11 @@ static ssize_t pmbus_show_samples(struct device *dev,
int val;
struct i2c_client *client = to_i2c_client(dev->parent);
struct pmbus_samples_reg *reg = to_samples_reg(devattr);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ mutex_lock(&data->update_lock);
val = _pmbus_read_word_data(client, reg->page, 0xff, reg->attr->reg);
+ mutex_unlock(&data->update_lock);
if (val < 0)
return val;
@@ -2346,6 +2359,42 @@ static int pmbus_debugfs_get_status(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(pmbus_debugfs_ops_status, pmbus_debugfs_get_status,
NULL, "0x%04llx\n");
+static int pmbus_debugfs_get_pec(void *data, u64 *val)
+{
+ struct i2c_client *client = data;
+
+ *val = !!(client->flags & I2C_CLIENT_PEC);
+
+ return 0;
+}
+
+static int pmbus_debugfs_set_pec(void *data, u64 val)
+{
+ int rc;
+ struct i2c_client *client = data;
+
+ if (!val) {
+ client->flags &= ~I2C_CLIENT_PEC;
+ return 0;
+ }
+
+ if (val != 1)
+ return -EINVAL;
+
+ rc = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
+ if (rc < 0)
+ return rc;
+
+ if (!(rc & PB_CAPABILITY_ERROR_CHECK))
+ return -EOPNOTSUPP;
+
+ client->flags |= I2C_CLIENT_PEC;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(pmbus_debugfs_ops_pec, pmbus_debugfs_get_pec,
+ pmbus_debugfs_set_pec, "%llu\n");
+
static int pmbus_init_debugfs(struct i2c_client *client,
struct pmbus_data *data)
{
@@ -2374,6 +2423,9 @@ static int pmbus_init_debugfs(struct i2c_client *client,
if (!entries)
return -ENOMEM;
+ debugfs_create_file("pec", 0664, data->debugfs, client,
+ &pmbus_debugfs_ops_pec);
+
for (i = 0; i < data->info->pages; ++i) {
/* Check accessibility of status register if it's not page 0 */
if (!i || pmbus_check_status_register(client, i)) {
@@ -2488,8 +2540,7 @@ static int pmbus_init_debugfs(struct i2c_client *client,
}
#endif /* IS_ENABLED(CONFIG_DEBUG_FS) */
-int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
- struct pmbus_driver_info *info)
+int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
{
struct device *dev = &client->dev;
const struct pmbus_platform_data *pdata = dev_get_platdata(dev);
@@ -2534,7 +2585,7 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
ret = pmbus_find_attributes(client, data);
if (ret)
- goto out_kfree;
+ return ret;
/*
* If there are no attributes, something is wrong.
@@ -2542,35 +2593,27 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
*/
if (!data->num_attributes) {
dev_err(dev, "No attributes found\n");
- ret = -ENODEV;
- goto out_kfree;
+ return -ENODEV;
}
data->groups[0] = &data->group;
memcpy(data->groups + 1, info->groups, sizeof(void *) * groups_num);
- data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
- data, data->groups);
+ data->hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+ client->name, data, data->groups);
if (IS_ERR(data->hwmon_dev)) {
- ret = PTR_ERR(data->hwmon_dev);
dev_err(dev, "Failed to register hwmon device\n");
- goto out_kfree;
+ return PTR_ERR(data->hwmon_dev);
}
ret = pmbus_regulator_register(data);
if (ret)
- goto out_unregister;
+ return ret;
ret = pmbus_init_debugfs(client, data);
if (ret)
dev_warn(dev, "Failed to register debugfs\n");
return 0;
-
-out_unregister:
- hwmon_device_unregister(data->hwmon_dev);
-out_kfree:
- kfree(data->group.attrs);
- return ret;
}
EXPORT_SYMBOL_GPL(pmbus_do_probe);
@@ -2580,8 +2623,6 @@ int pmbus_do_remove(struct i2c_client *client)
debugfs_remove_recursive(data->debugfs);
- hwmon_device_unregister(data->hwmon_dev);
- kfree(data->group.attrs);
return 0;
}
EXPORT_SYMBOL_GPL(pmbus_do_remove);
diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
index 517584cff3de..fa5c5dd29b7a 100644
--- a/drivers/hwmon/pmbus/pxe1610.c
+++ b/drivers/hwmon/pmbus/pxe1610.c
@@ -78,8 +78,7 @@ static struct pmbus_driver_info pxe1610_info = {
.identify = pxe1610_identify,
};
-static int pxe1610_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pxe1610_probe(struct i2c_client *client)
{
struct pmbus_driver_info *info;
u8 buf[I2C_SMBUS_BLOCK_MAX];
@@ -115,7 +114,7 @@ static int pxe1610_probe(struct i2c_client *client,
if (!info)
return -ENOMEM;
- return pmbus_do_probe(client, id, info);
+ return pmbus_do_probe(client, info);
}
static const struct i2c_device_id pxe1610_id[] = {
@@ -131,7 +130,7 @@ static struct i2c_driver pxe1610_driver = {
.driver = {
.name = "pxe1610",
},
- .probe = pxe1610_probe,
+ .probe_new = pxe1610_probe,
.remove = pmbus_do_remove,
.id_table = pxe1610_id,
};
diff --git a/drivers/hwmon/pmbus/tps40422.c b/drivers/hwmon/pmbus/tps40422.c
index 2b83dcda964a..edbdfa809d51 100644
--- a/drivers/hwmon/pmbus/tps40422.c
+++ b/drivers/hwmon/pmbus/tps40422.c
@@ -25,10 +25,9 @@ static struct pmbus_driver_info tps40422_info = {
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
};
-static int tps40422_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tps40422_probe(struct i2c_client *client)
{
- return pmbus_do_probe(client, id, &tps40422_info);
+ return pmbus_do_probe(client, &tps40422_info);
}
static const struct i2c_device_id tps40422_id[] = {
@@ -43,7 +42,7 @@ static struct i2c_driver tps40422_driver = {
.driver = {
.name = "tps40422",
},
- .probe = tps40422_probe,
+ .probe_new = tps40422_probe,
.remove = pmbus_do_remove,
.id_table = tps40422_id,
};
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 157c99ffb52b..db2bdf2a1f02 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -34,6 +34,8 @@ enum chips {
#define TPS53681_MFR_SPECIFIC_20 0xe4 /* Number of phases, per page */
+static const struct i2c_device_id tps53679_id[];
+
static int tps53679_identify_mode(struct i2c_client *client,
struct pmbus_driver_info *info)
{
@@ -183,8 +185,7 @@ static struct pmbus_driver_info tps53679_info = {
.pfunc[5] = PMBUS_HAVE_IOUT,
};
-static int tps53679_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tps53679_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct pmbus_driver_info *info;
@@ -193,7 +194,7 @@ static int tps53679_probe(struct i2c_client *client,
if (dev->of_node)
chip_id = (enum chips)of_device_get_match_data(dev);
else
- chip_id = id->driver_data;
+ chip_id = i2c_match_id(tps53679_id, client)->driver_data;
info = devm_kmemdup(dev, &tps53679_info, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -220,7 +221,7 @@ static int tps53679_probe(struct i2c_client *client,
return -ENODEV;
}
- return pmbus_do_probe(client, id, info);
+ return pmbus_do_probe(client, info);
}
static const struct i2c_device_id tps53679_id[] = {
@@ -249,7 +250,7 @@ static struct i2c_driver tps53679_driver = {
.name = "tps53679",
.of_match_table = of_match_ptr(tps53679_of_match),
},
- .probe = tps53679_probe,
+ .probe_new = tps53679_probe,
.remove = pmbus_do_remove,
.id_table = tps53679_id,
};
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 81f4c4f166cd..f8017993e2b4 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -487,8 +487,7 @@ static int ucd9000_init_debugfs(struct i2c_client *client,
}
#endif /* CONFIG_DEBUG_FS */
-static int ucd9000_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ucd9000_probe(struct i2c_client *client)
{
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
struct ucd9000_data *data;
@@ -523,12 +522,12 @@ static int ucd9000_probe(struct i2c_client *client,
if (client->dev.of_node)
chip = (enum chips)of_device_get_match_data(&client->dev);
else
- chip = id->driver_data;
+ chip = mid->driver_data;
- if (chip != ucd9000 && chip != mid->driver_data)
+ if (chip != ucd9000 && strcmp(client->name, mid->name) != 0)
dev_notice(&client->dev,
"Device mismatch: Configured %s, detected %s\n",
- id->name, mid->name);
+ client->name, mid->name);
data = devm_kzalloc(&client->dev, sizeof(struct ucd9000_data),
GFP_KERNEL);
@@ -603,7 +602,7 @@ static int ucd9000_probe(struct i2c_client *client,
ucd9000_probe_gpio(client, mid, data);
- ret = pmbus_do_probe(client, mid, info);
+ ret = pmbus_do_probe(client, info);
if (ret)
return ret;
@@ -621,7 +620,7 @@ static struct i2c_driver ucd9000_driver = {
.name = "ucd9000",
.of_match_table = of_match_ptr(ucd9000_of_match),
},
- .probe = ucd9000_probe,
+ .probe_new = ucd9000_probe,
.remove = pmbus_do_remove,
.id_table = ucd9000_id,
};
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index 7c04745a9709..e111e25e1619 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -71,8 +71,7 @@ static const struct of_device_id __maybe_unused ucd9200_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ucd9200_of_match);
-static int ucd9200_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ucd9200_probe(struct i2c_client *client)
{
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
struct pmbus_driver_info *info;
@@ -106,12 +105,12 @@ static int ucd9200_probe(struct i2c_client *client,
if (client->dev.of_node)
chip = (enum chips)of_device_get_match_data(&client->dev);
else
- chip = id->driver_data;
+ chip = mid->driver_data;
- if (chip != ucd9200 && chip != mid->driver_data)
+ if (chip != ucd9200 && strcmp(client->name, mid->name) != 0)
dev_notice(&client->dev,
"Device mismatch: Configured %s, detected %s\n",
- id->name, mid->name);
+ client->name, mid->name);
info = devm_kzalloc(&client->dev, sizeof(struct pmbus_driver_info),
GFP_KERNEL);
@@ -192,7 +191,7 @@ static int ucd9200_probe(struct i2c_client *client,
if (mid->driver_data == ucd9240)
info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12;
- return pmbus_do_probe(client, mid, info);
+ return pmbus_do_probe(client, info);
}
/* This is the driver that will be inserted */
@@ -201,7 +200,7 @@ static struct i2c_driver ucd9200_driver = {
.name = "ucd9200",
.of_match_table = of_match_ptr(ucd9200_of_match),
},
- .probe = ucd9200_probe,
+ .probe_new = ucd9200_probe,
.remove = pmbus_do_remove,
.id_table = ucd9200_id,
};
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
index d5103fc9e269..c95ac934fde4 100644
--- a/drivers/hwmon/pmbus/xdpe12284.c
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -127,8 +127,7 @@ static struct pmbus_driver_info xdpe122_info = {
.read_word_data = xdpe122_read_word_data,
};
-static int xdpe122_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int xdpe122_probe(struct i2c_client *client)
{
struct pmbus_driver_info *info;
@@ -137,7 +136,7 @@ static int xdpe122_probe(struct i2c_client *client,
if (!info)
return -ENOMEM;
- return pmbus_do_probe(client, id, info);
+ return pmbus_do_probe(client, info);
}
static const struct i2c_device_id xdpe122_id[] = {
@@ -160,7 +159,7 @@ static struct i2c_driver xdpe122_driver = {
.name = "xdpe12284",
.of_match_table = of_match_ptr(xdpe122_of_match),
},
- .probe = xdpe122_probe,
+ .probe_new = xdpe122_probe,
.remove = pmbus_do_remove,
.id_table = xdpe122_id,
};
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index 3a827d0a881d..e8bda340482b 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -301,8 +301,7 @@ static const struct i2c_device_id zl6100_id[] = {
};
MODULE_DEVICE_TABLE(i2c, zl6100_id);
-static int zl6100_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int zl6100_probe(struct i2c_client *client)
{
int ret;
struct zl6100_data *data;
@@ -333,10 +332,10 @@ static int zl6100_probe(struct i2c_client *client,
dev_err(&client->dev, "Unsupported device\n");
return -ENODEV;
}
- if (id->driver_data != mid->driver_data)
+ if (strcmp(client->name, mid->name) != 0)
dev_notice(&client->dev,
"Device mismatch: Configured %s, detected %s\n",
- id->name, mid->name);
+ client->name, mid->name);
data = devm_kzalloc(&client->dev, sizeof(struct zl6100_data),
GFP_KERNEL);
@@ -389,14 +388,14 @@ static int zl6100_probe(struct i2c_client *client,
info->write_word_data = zl6100_write_word_data;
info->write_byte = zl6100_write_byte;
- return pmbus_do_probe(client, mid, info);
+ return pmbus_do_probe(client, info);
}
static struct i2c_driver zl6100_driver = {
.driver = {
.name = "zl6100",
},
- .probe = zl6100_probe,
+ .probe_new = zl6100_probe,
.remove = pmbus_do_remove,
.id_table = zl6100_id,
};
diff --git a/drivers/hwmon/powr1220.c b/drivers/hwmon/powr1220.c
index a5d1a890d0be..9e086338dcba 100644
--- a/drivers/hwmon/powr1220.c
+++ b/drivers/hwmon/powr1220.c
@@ -297,8 +297,7 @@ static struct attribute *powr1220_attrs[] = {
ATTRIBUTE_GROUPS(powr1220);
-static int powr1220_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int powr1220_probe(struct i2c_client *client)
{
struct powr1220_data *data;
struct device *hwmon_dev;
@@ -331,7 +330,7 @@ static struct i2c_driver powr1220_driver = {
.driver = {
.name = "powr1220",
},
- .probe = powr1220_probe,
+ .probe_new = powr1220_probe,
.id_table = powr1220_ids,
};
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 17bb64299bfd..1f63807c0399 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -54,16 +54,18 @@ static irqreturn_t pulse_handler(int irq, void *dev_id)
static void sample_timer(struct timer_list *t)
{
struct pwm_fan_ctx *ctx = from_timer(ctx, t, rpm_timer);
+ unsigned int delta = ktime_ms_delta(ktime_get(), ctx->sample_start);
int pulses;
- u64 tmp;
- pulses = atomic_read(&ctx->pulses);
- atomic_sub(pulses, &ctx->pulses);
- tmp = (u64)pulses * ktime_ms_delta(ktime_get(), ctx->sample_start) * 60;
- do_div(tmp, ctx->pulses_per_revolution * 1000);
- ctx->rpm = tmp;
+ if (delta) {
+ pulses = atomic_read(&ctx->pulses);
+ atomic_sub(pulses, &ctx->pulses);
+ ctx->rpm = (unsigned int)(pulses * 1000 * 60) /
+ (ctx->pulses_per_revolution * delta);
+
+ ctx->sample_start = ktime_get();
+ }
- ctx->sample_start = ktime_get();
mod_timer(&ctx->rpm_timer, jiffies + HZ);
}
@@ -293,14 +295,8 @@ static int pwm_fan_probe(struct platform_device *pdev)
mutex_init(&ctx->lock);
ctx->pwm = devm_of_pwm_get(dev, dev->of_node, NULL);
- if (IS_ERR(ctx->pwm)) {
- ret = PTR_ERR(ctx->pwm);
-
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Could not get PWM: %d\n", ret);
-
- return ret;
- }
+ if (IS_ERR(ctx->pwm))
+ return dev_err_probe(dev, PTR_ERR(ctx->pwm), "Could not get PWM\n");
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index b490fe3d2ee8..f2703c5460d0 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -20,7 +20,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <plat/adc.h>
+#include <linux/soc/samsung/s3c-adc.h>
#include <linux/platform_data/hwmon-s3c.h>
struct s3c_hwmon_attr {
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index d421e691318b..09ce30cba54b 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -202,8 +202,10 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
}
}
- if (nr_count[hwmon_temp])
- nr_count[hwmon_chip]++, nr_types++;
+ if (nr_count[hwmon_temp]) {
+ nr_count[hwmon_chip]++;
+ nr_types++;
+ }
scmi_hwmon_chan = devm_kcalloc(dev, nr_types, sizeof(*scmi_hwmon_chan),
GFP_KERNEL);
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 8ea5534455f2..7d18ce5d3839 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -250,8 +250,7 @@ static struct attribute *sht21_attrs[] = {
ATTRIBUTE_GROUPS(sht21);
-static int sht21_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sht21_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -286,7 +285,7 @@ MODULE_DEVICE_TABLE(i2c, sht21_id);
static struct i2c_driver sht21_driver = {
.driver.name = "sht21",
- .probe = sht21_probe,
+ .probe_new = sht21_probe,
.id_table = sht21_id,
};
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 7364764baaeb..3f279aa1cee5 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -662,8 +662,9 @@ static struct attribute *sts3x_attrs[] = {
ATTRIBUTE_GROUPS(sht3x);
ATTRIBUTE_GROUPS(sts3x);
-static int sht3x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id sht3x_ids[];
+
+static int sht3x_probe(struct i2c_client *client)
{
int ret;
struct sht3x_data *data;
@@ -715,7 +716,7 @@ static int sht3x_probe(struct i2c_client *client,
if (ret)
return ret;
- if (id->driver_data == sts3x)
+ if (i2c_match_id(sht3x_ids, client)->driver_data == sts3x)
attribute_groups = sts3x_groups;
else
attribute_groups = sht3x_groups;
@@ -742,7 +743,7 @@ MODULE_DEVICE_TABLE(i2c, sht3x_ids);
static struct i2c_driver sht3x_i2c_driver = {
.driver.name = "sht3x",
- .probe = sht3x_probe,
+ .probe_new = sht3x_probe,
.id_table = sht3x_ids,
};
diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
index a0078ccede03..18546ebc8e9f 100644
--- a/drivers/hwmon/shtc1.c
+++ b/drivers/hwmon/shtc1.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/platform_data/shtc1.h>
+#include <linux/of.h>
/* commands (high precision mode) */
static const unsigned char shtc1_cmd_measure_blocking_hpm[] = { 0x7C, 0xA2 };
@@ -185,17 +186,19 @@ static void shtc1_select_command(struct shtc1_data *data)
}
}
-static int shtc1_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id shtc1_id[];
+
+static int shtc1_probe(struct i2c_client *client)
{
int ret;
u16 id_reg;
char id_reg_buf[2];
struct shtc1_data *data;
struct device *hwmon_dev;
- enum shtcx_chips chip = id->driver_data;
+ enum shtcx_chips chip = i2c_match_id(shtc1_id, client)->driver_data;
struct i2c_adapter *adap = client->adapter;
struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
if (!i2c_check_functionality(adap, I2C_FUNC_I2C)) {
dev_err(dev, "plain i2c transactions not supported\n");
@@ -233,8 +236,14 @@ static int shtc1_probe(struct i2c_client *client,
data->client = client;
data->chip = chip;
- if (client->dev.platform_data)
- data->setup = *(struct shtc1_platform_data *)dev->platform_data;
+ if (np) {
+ data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
+ data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision");
+ } else {
+ if (client->dev.platform_data)
+ data->setup = *(struct shtc1_platform_data *)dev->platform_data;
+ }
+
shtc1_select_command(data);
mutex_init(&data->update_lock);
@@ -257,9 +266,20 @@ static const struct i2c_device_id shtc1_id[] = {
};
MODULE_DEVICE_TABLE(i2c, shtc1_id);
+static const struct of_device_id shtc1_of_match[] = {
+ { .compatible = "sensirion,shtc1" },
+ { .compatible = "sensirion,shtw1" },
+ { .compatible = "sensirion,shtc3" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, shtc1_of_match);
+
static struct i2c_driver shtc1_i2c_driver = {
- .driver.name = "shtc1",
- .probe = shtc1_probe,
+ .driver = {
+ .name = "shtc1",
+ .of_match_table = shtc1_of_match,
+ },
+ .probe_new = shtc1_probe,
.id_table = shtc1_id,
};
diff --git a/drivers/hwmon/sl28cpld-hwmon.c b/drivers/hwmon/sl28cpld-hwmon.c
new file mode 100644
index 000000000000..e48f58ec5b9c
--- /dev/null
+++ b/drivers/hwmon/sl28cpld-hwmon.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sl28cpld hardware monitoring driver
+ *
+ * Copyright 2020 Kontron Europe GmbH
+ */
+
+#include <linux/bitfield.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define FAN_INPUT 0x00
+#define FAN_SCALE_X8 BIT(7)
+#define FAN_VALUE_MASK GENMASK(6, 0)
+
+struct sl28cpld_hwmon {
+ struct regmap *regmap;
+ u32 offset;
+};
+
+static umode_t sl28cpld_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static int sl28cpld_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long *input)
+{
+ struct sl28cpld_hwmon *hwmon = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ ret = regmap_read(hwmon->regmap, hwmon->offset + FAN_INPUT,
+ &value);
+ if (ret)
+ return ret;
+ /*
+ * The register has a 7 bit value and 1 bit which indicates the
+ * scale. If the MSB is set, then the lower 7 bit has to be
+ * multiplied by 8, to get the correct reading.
+ */
+ if (value & FAN_SCALE_X8)
+ value = FIELD_GET(FAN_VALUE_MASK, value) << 3;
+
+ /*
+ * The counter period is 1000ms and the sysfs specification
+ * says we should asssume 2 pulses per revolution.
+ */
+ value *= 60 / 2;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *input = value;
+ return 0;
+}
+
+static const u32 sl28cpld_hwmon_fan_config[] = {
+ HWMON_F_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info sl28cpld_hwmon_fan = {
+ .type = hwmon_fan,
+ .config = sl28cpld_hwmon_fan_config,
+};
+
+static const struct hwmon_channel_info *sl28cpld_hwmon_info[] = {
+ &sl28cpld_hwmon_fan,
+ NULL
+};
+
+static const struct hwmon_ops sl28cpld_hwmon_ops = {
+ .is_visible = sl28cpld_hwmon_is_visible,
+ .read = sl28cpld_hwmon_read,
+};
+
+static const struct hwmon_chip_info sl28cpld_hwmon_chip_info = {
+ .ops = &sl28cpld_hwmon_ops,
+ .info = sl28cpld_hwmon_info,
+};
+
+static int sl28cpld_hwmon_probe(struct platform_device *pdev)
+{
+ struct sl28cpld_hwmon *hwmon;
+ struct device *hwmon_dev;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -ENODEV;
+
+ hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+
+ hwmon->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!hwmon->regmap)
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "reg", &hwmon->offset);
+ if (ret)
+ return -EINVAL;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "sl28cpld_hwmon", hwmon,
+ &sl28cpld_hwmon_chip_info, NULL);
+ if (IS_ERR(hwmon_dev))
+ dev_err(&pdev->dev, "failed to register as hwmon device");
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id sl28cpld_hwmon_of_match[] = {
+ { .compatible = "kontron,sl28cpld-fan" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sl28cpld_hwmon_of_match);
+
+static struct platform_driver sl28cpld_hwmon_driver = {
+ .probe = sl28cpld_hwmon_probe,
+ .driver = {
+ .name = "sl28cpld-fan",
+ .of_match_table = sl28cpld_hwmon_of_match,
+ },
+};
+module_platform_driver(sl28cpld_hwmon_driver);
+
+MODULE_DESCRIPTION("sl28cpld Hardware Monitoring Driver");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
index af01f763f7d1..b6cbe9810a1b 100644
--- a/drivers/hwmon/smm665.c
+++ b/drivers/hwmon/smm665.c
@@ -562,8 +562,9 @@ static struct attribute *smm665_attrs[] = {
ATTRIBUTE_GROUPS(smm665);
-static int smm665_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id smm665_id[];
+
+static int smm665_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct smm665_data *data;
@@ -585,7 +586,7 @@ static int smm665_probe(struct i2c_client *client,
mutex_init(&data->update_lock);
data->client = client;
- data->type = id->driver_data;
+ data->type = i2c_match_id(smm665_id, client)->driver_data;
data->cmdreg = i2c_new_dummy_device(adapter, (client->addr & ~SMM665_REGMASK)
| SMM665_CMDREG_BASE);
if (IS_ERR(data->cmdreg))
@@ -694,7 +695,7 @@ static struct i2c_driver smm665_driver = {
.driver = {
.name = "smm665",
},
- .probe = smm665_probe,
+ .probe_new = smm665_probe,
.remove = smm665_remove,
.id_table = smm665_id,
};
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 6cbb119e3d0e..03a87aa2017a 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -587,8 +587,7 @@ static int smsc47m192_detect(struct i2c_client *client,
return 0;
}
-static int smsc47m192_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int smsc47m192_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -629,7 +628,7 @@ static struct i2c_driver smsc47m192_driver = {
.driver = {
.name = "smsc47m192",
},
- .probe = smsc47m192_probe,
+ .probe_new = smsc47m192_probe,
.id_table = smsc47m192_id,
.detect = smsc47m192_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/sparx5-temp.c b/drivers/hwmon/sparx5-temp.c
index 1a2b1026b026..98be48e3a22a 100644
--- a/drivers/hwmon/sparx5-temp.c
+++ b/drivers/hwmon/sparx5-temp.c
@@ -56,7 +56,7 @@ static int s5_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp_input:
stat = readl_relaxed(hwmon->base + TEMP_STAT);
if (!(stat & TEMP_STAT_VALID))
- return -EIO;
+ return -EAGAIN;
value = stat & TEMP_STAT_TEMP;
/*
* From register documentation:
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 35b353c2b0a1..6928be6dbe4e 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -762,8 +762,7 @@ static struct attribute *stts751_attrs[] = {
};
ATTRIBUTE_GROUPS(stts751);
-static int stts751_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int stts751_probe(struct i2c_client *client)
{
struct stts751_priv *priv;
int ret;
@@ -822,7 +821,7 @@ static struct i2c_driver stts751_driver = {
.name = DEVNAME,
.of_match_table = of_match_ptr(stts751_of_match),
},
- .probe = stts751_probe,
+ .probe_new = stts751_probe,
.id_table = stts751_id,
.detect = stts751_detect,
.alert = stts751_alert,
diff --git a/drivers/hwmon/tc654.c b/drivers/hwmon/tc654.c
index 3e3b8c61bd76..a52ca72af120 100644
--- a/drivers/hwmon/tc654.c
+++ b/drivers/hwmon/tc654.c
@@ -446,8 +446,7 @@ ATTRIBUTE_GROUPS(tc654);
* device probe and removal
*/
-static int tc654_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tc654_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tc654_data *data;
@@ -488,7 +487,7 @@ static struct i2c_driver tc654_driver = {
.driver = {
.name = "tc654",
},
- .probe = tc654_probe,
+ .probe_new = tc654_probe,
.id_table = tc654_id,
};
diff --git a/drivers/hwmon/tc74.c b/drivers/hwmon/tc74.c
index fcf638ed16a9..ace55da97fc2 100644
--- a/drivers/hwmon/tc74.c
+++ b/drivers/hwmon/tc74.c
@@ -103,8 +103,7 @@ static struct attribute *tc74_attrs[] = {
ATTRIBUTE_GROUPS(tc74);
-static int tc74_probe(struct i2c_client *client,
- const struct i2c_device_id *dev_id)
+static int tc74_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tc74_data *data;
@@ -161,7 +160,7 @@ static struct i2c_driver tc74_driver = {
.driver = {
.name = "tc74",
},
- .probe = tc74_probe,
+ .probe_new = tc74_probe,
.id_table = tc74_id,
};
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 3f5a983d9289..fde5e2d0825a 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -377,8 +377,9 @@ static void thmc50_init_client(struct thmc50_data *data)
i2c_smbus_write_byte_data(client, THMC50_REG_CONF, config);
}
-static int thmc50_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id thmc50_id[];
+
+static int thmc50_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct thmc50_data *data;
@@ -390,7 +391,7 @@ static int thmc50_probe(struct i2c_client *client,
return -ENOMEM;
data->client = client;
- data->type = id->driver_data;
+ data->type = i2c_match_id(thmc50_id, client)->driver_data;
mutex_init(&data->update_lock);
thmc50_init_client(data);
@@ -419,7 +420,7 @@ static struct i2c_driver thmc50_driver = {
.driver = {
.name = "thmc50",
},
- .probe = thmc50_probe,
+ .probe_new = thmc50_probe,
.id_table = thmc50_id,
.detect = thmc50_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 5fe35e5b2f73..e867a0c2e539 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -189,8 +189,7 @@ static const struct regmap_config tmp102_regmap_config = {
.use_single_write = true,
};
-static int tmp102_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tmp102_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -304,7 +303,7 @@ static struct i2c_driver tmp102_driver = {
.driver.name = DRIVER_NAME,
.driver.of_match_table = of_match_ptr(tmp102_of_match),
.driver.pm = &tmp102_dev_pm_ops,
- .probe = tmp102_probe,
+ .probe_new = tmp102_probe,
.id_table = tmp102_id,
};
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index 49851533935e..a7e202cc8323 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -109,8 +109,7 @@ static const struct regmap_config tmp103_regmap_config = {
.volatile_reg = tmp103_regmap_is_volatile,
};
-static int tmp103_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tmp103_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -172,7 +171,7 @@ static struct i2c_driver tmp103_driver = {
.of_match_table = of_match_ptr(tmp103_of_match),
.pm = &tmp103_dev_pm_ops,
},
- .probe = tmp103_probe,
+ .probe_new = tmp103_probe,
.id_table = tmp103_id,
};
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index fe587d4f9b2d..5435664c3f6e 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -323,8 +323,7 @@ static const struct regmap_config tmp108_regmap_config = {
.use_single_write = true,
};
-static int tmp108_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tmp108_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -433,7 +432,7 @@ static struct i2c_driver tmp108_driver = {
.pm = &tmp108_dev_pm_ops,
.of_match_table = of_match_ptr(tmp108_of_ids),
},
- .probe = tmp108_probe,
+ .probe_new = tmp108_probe,
.id_table = tmp108_i2c_ids,
};
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index fa361d9949db..9dc210b55e69 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -683,8 +683,7 @@ static int tmp401_detect(struct i2c_client *client,
return 0;
}
-static int tmp401_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tmp401_probe(struct i2c_client *client)
{
static const char * const names[] = {
"TMP401", "TMP411", "TMP431", "TMP432", "TMP435", "TMP461"
@@ -700,7 +699,7 @@ static int tmp401_probe(struct i2c_client *client,
data->client = client;
mutex_init(&data->update_lock);
- data->kind = id->driver_data;
+ data->kind = i2c_match_id(tmp401_id, client)->driver_data;
/* Initialize the TMP401 chip */
status = tmp401_init_client(data, client);
@@ -736,7 +735,7 @@ static struct i2c_driver tmp401_driver = {
.driver = {
.name = "tmp401",
},
- .probe = tmp401_probe,
+ .probe_new = tmp401_probe,
.id_table = tmp401_id,
.detect = tmp401_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 83a4fab151d2..ede66ea6a730 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -279,8 +279,7 @@ static const struct hwmon_ops tmp421_ops = {
.read = tmp421_read,
};
-static int tmp421_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tmp421_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -296,7 +295,7 @@ static int tmp421_probe(struct i2c_client *client,
data->channels = (unsigned long)
of_device_get_match_data(&client->dev);
else
- data->channels = id->driver_data;
+ data->channels = i2c_match_id(tmp421_id, client)->driver_data;
data->client = client;
err = tmp421_init_client(client);
@@ -327,7 +326,7 @@ static struct i2c_driver tmp421_driver = {
.name = "tmp421",
.of_match_table = of_match_ptr(tmp421_of_match),
},
- .probe = tmp421_probe,
+ .probe_new = tmp421_probe,
.id_table = tmp421_id,
.detect = tmp421_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index 23908dc5611b..47bbe47e062f 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -192,7 +192,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
/*
* The valus is read in voltage in the chip but reported as
* current to the user.
- * 2's compliment number shifted by one to four depending
+ * 2's complement number shifted by one to four depending
* on the pga gain setting. 1lsb = 10uV
*/
*val = sign_extend32(regval, 17 - tmp51x_get_pga_shift(data));
@@ -709,8 +709,7 @@ static int tmp51x_configure(struct device *dev, struct tmp51x_data *data)
return 0;
}
-static int tmp51x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tmp51x_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tmp51x_data *data;
@@ -724,7 +723,7 @@ static int tmp51x_probe(struct i2c_client *client,
if (client->dev.of_node)
data->id = (enum tmp51x_ids)device_get_match_data(&client->dev);
else
- data->id = id->driver_data;
+ data->id = i2c_match_id(tmp51x_id, client)->driver_data;
ret = tmp51x_configure(dev, data);
if (ret < 0) {
@@ -751,7 +750,7 @@ static int tmp51x_probe(struct i2c_client *client,
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- dev_dbg(dev, "power monitor %s\n", id->name);
+ dev_dbg(dev, "power monitor %s\n", client->name);
return 0;
}
@@ -761,7 +760,7 @@ static struct i2c_driver tmp51x_driver = {
.name = "tmp51x",
.of_match_table = of_match_ptr(tmp51x_of_match),
},
- .probe = tmp51x_probe,
+ .probe_new = tmp51x_probe,
.id_table = tmp51x_id,
};
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 5a5120121e50..3964ceab2817 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1951,8 +1951,12 @@ static int w83627ehf_probe(struct platform_device *pdev)
data,
&w83627ehf_chip_info,
w83627ehf_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto exit_release;
+ }
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ return 0;
exit_release:
release_region(res->start, IOREGION_LENGTH);
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
index 96b695b32572..88d11dc5feb9 100644
--- a/drivers/hwmon/w83773g.c
+++ b/drivers/hwmon/w83773g.c
@@ -259,8 +259,7 @@ static const struct regmap_config w83773_regmap_config = {
.val_bits = 8,
};
-static int w83773_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int w83773_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -296,7 +295,7 @@ static struct i2c_driver w83773_driver = {
.name = "w83773g",
.of_match_table = of_match_ptr(w83773_of_match),
},
- .probe = w83773_probe,
+ .probe_new = w83773_probe,
.id_table = w83773_id,
};
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index d833a4f16c47..e84aa5604e64 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1192,8 +1192,9 @@ static void w83781d_remove_files(struct device *dev)
sysfs_remove_group(&dev->kobj, &w83781d_group_other);
}
-static int
-w83781d_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static const struct i2c_device_id w83781d_ids[];
+
+static int w83781d_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct w83781d_data *data;
@@ -1207,7 +1208,7 @@ w83781d_probe(struct i2c_client *client, const struct i2c_device_id *id)
mutex_init(&data->lock);
mutex_init(&data->update_lock);
- data->type = id->driver_data;
+ data->type = i2c_match_id(w83781d_ids, client)->driver_data;
data->client = client;
/* attach secondary i2c lm75-like clients */
@@ -1575,7 +1576,7 @@ static struct i2c_driver w83781d_driver = {
.driver = {
.name = "w83781d",
},
- .probe = w83781d_probe,
+ .probe_new = w83781d_probe,
.remove = w83781d_remove,
.id_table = w83781d_ids,
.detect = w83781d_detect,
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index aad8d4da5802..37b25a1474c4 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -315,8 +315,7 @@ struct w83791d_data {
u8 vrm; /* hwmon-vid */
};
-static int w83791d_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
+static int w83791d_probe(struct i2c_client *client);
static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83791d_remove(struct i2c_client *client);
@@ -342,7 +341,7 @@ static struct i2c_driver w83791d_driver = {
.driver = {
.name = "w83791d",
},
- .probe = w83791d_probe,
+ .probe_new = w83791d_probe,
.remove = w83791d_remove,
.id_table = w83791d_id,
.detect = w83791d_detect,
@@ -1346,8 +1345,7 @@ static int w83791d_detect(struct i2c_client *client,
return 0;
}
-static int w83791d_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int w83791d_probe(struct i2c_client *client)
{
struct w83791d_data *data;
struct device *dev = &client->dev;
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 7fc8a1160c8f..abd5c3a722b9 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -286,8 +286,7 @@ struct w83792d_data {
u8 sf2_levels[3][4]; /* Smart FanII: Fan1,2,3 duty cycle levels */
};
-static int w83792d_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
+static int w83792d_probe(struct i2c_client *client);
static int w83792d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83792d_remove(struct i2c_client *client);
@@ -310,7 +309,7 @@ static struct i2c_driver w83792d_driver = {
.driver = {
.name = "w83792d",
},
- .probe = w83792d_probe,
+ .probe_new = w83792d_probe,
.remove = w83792d_remove,
.id_table = w83792d_id,
.detect = w83792d_detect,
@@ -1359,7 +1358,7 @@ w83792d_detect(struct i2c_client *client, struct i2c_board_info *info)
}
static int
-w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
+w83792d_probe(struct i2c_client *client)
{
struct w83792d_data *data;
struct device *dev = &client->dev;
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 3f59f2a1a5e3..e7d0484eabe4 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -283,8 +283,7 @@ static void w83793_release_resources(struct kref *ref)
static u8 w83793_read_value(struct i2c_client *client, u16 reg);
static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
-static int w83793_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
+static int w83793_probe(struct i2c_client *client);
static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83793_remove(struct i2c_client *client);
@@ -303,7 +302,7 @@ static struct i2c_driver w83793_driver = {
.driver = {
.name = "w83793",
},
- .probe = w83793_probe,
+ .probe_new = w83793_probe,
.remove = w83793_remove,
.id_table = w83793_id,
.detect = w83793_detect,
@@ -1646,8 +1645,7 @@ static int w83793_detect(struct i2c_client *client,
return 0;
}
-static int w83793_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int w83793_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
static const int watchdog_minors[] = {
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 6d52b530b429..621b05afa837 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -2134,8 +2134,9 @@ static void w83795_apply_temp_config(struct w83795_data *data, u8 config,
}
}
-static int w83795_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const struct i2c_device_id w83795_id[];
+
+static int w83795_probe(struct i2c_client *client)
{
int i;
u8 tmp;
@@ -2148,7 +2149,7 @@ static int w83795_probe(struct i2c_client *client,
return -ENOMEM;
i2c_set_clientdata(client, data);
- data->chip_type = id->driver_data;
+ data->chip_type = i2c_match_id(w83795_id, client)->driver_data;
data->bank = i2c_smbus_read_byte_data(client, W83795_REG_BANKSEL);
mutex_init(&data->update_lock);
@@ -2256,7 +2257,7 @@ static struct i2c_driver w83795_driver = {
.driver = {
.name = "w83795",
},
- .probe = w83795_probe,
+ .probe_new = w83795_probe,
.remove = w83795_remove,
.id_table = w83795_id,
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 6f6d925cf017..656a77102ca6 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -62,8 +62,7 @@ static const unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END };
* Functions declaration
*/
-static int w83l785ts_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
+static int w83l785ts_probe(struct i2c_client *client);
static int w83l785ts_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83l785ts_remove(struct i2c_client *client);
@@ -85,7 +84,7 @@ static struct i2c_driver w83l785ts_driver = {
.driver = {
.name = "w83l785ts",
},
- .probe = w83l785ts_probe,
+ .probe_new = w83l785ts_probe,
.remove = w83l785ts_remove,
.id_table = w83l785ts_id,
.detect = w83l785ts_detect,
@@ -163,8 +162,7 @@ static int w83l785ts_detect(struct i2c_client *client,
return 0;
}
-static int w83l785ts_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int w83l785ts_probe(struct i2c_client *client)
{
struct w83l785ts_data *data;
struct device *dev = &client->dev;
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index ce98ec8794e2..542afff1423b 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -706,7 +706,7 @@ static void w83l786ng_init_client(struct i2c_client *client)
}
static int
-w83l786ng_probe(struct i2c_client *client, const struct i2c_device_id *id)
+w83l786ng_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct w83l786ng_data *data;
@@ -752,7 +752,7 @@ static struct i2c_driver w83l786ng_driver = {
.driver = {
.name = "w83l786ng",
},
- .probe = w83l786ng_probe,
+ .probe_new = w83l786ng_probe,
.id_table = w83l786ng_id,
.detect = w83l786ng_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 02dbb5ca3bcf..c1198245461d 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -3,7 +3,7 @@
# Coresight configuration
#
menuconfig CORESIGHT
- bool "CoreSight Tracing Support"
+ tristate "CoreSight Tracing Support"
depends on ARM || ARM64
depends on OF || ACPI
select ARM_AMBA
@@ -15,17 +15,24 @@ menuconfig CORESIGHT
specification and configure the right series of components when a
trace source gets enabled.
+ To compile this driver as a module, choose M here: the
+ module will be called coresight.
+
if CORESIGHT
config CORESIGHT_LINKS_AND_SINKS
- bool "CoreSight Link and Sink drivers"
+ tristate "CoreSight Link and Sink drivers"
help
This enables support for CoreSight link and sink drivers that are
responsible for transporting and collecting the trace data
respectively. Link and sinks are dynamically aggregated with a trace
entity at run time to form a complete trace path.
+ To compile these drivers as modules, choose M here: the
+ modules will be called coresight-funnel and coresight-replicator.
+
config CORESIGHT_LINK_AND_SINK_TMC
- bool "Coresight generic TMC driver"
+ tristate "Coresight generic TMC driver"
+
depends on CORESIGHT_LINKS_AND_SINKS
help
This enables support for the Trace Memory Controller driver.
@@ -34,8 +41,11 @@ config CORESIGHT_LINK_AND_SINK_TMC
complies with the generic implementation of the component without
special enhancement or added features.
+ To compile this driver as a module, choose M here: the
+ module will be called coresight-tmc.
+
config CORESIGHT_CATU
- bool "Coresight Address Translation Unit (CATU) driver"
+ tristate "Coresight Address Translation Unit (CATU) driver"
depends on CORESIGHT_LINK_AND_SINK_TMC
help
Enable support for the Coresight Address Translation Unit (CATU).
@@ -45,8 +55,11 @@ config CORESIGHT_CATU
by looking up the provided table. CATU can also be used in pass-through
mode where the address is not translated.
+ To compile this driver as a module, choose M here: the
+ module will be called coresight-catu.
+
config CORESIGHT_SINK_TPIU
- bool "Coresight generic TPIU driver"
+ tristate "Coresight generic TPIU driver"
depends on CORESIGHT_LINKS_AND_SINKS
help
This enables support for the Trace Port Interface Unit driver,
@@ -56,16 +69,22 @@ config CORESIGHT_SINK_TPIU
connected to an external host for use case capturing more traces than
the on-board coresight memory can handle.
+ To compile this driver as a module, choose M here: the
+ module will be called coresight-tpiu.
+
config CORESIGHT_SINK_ETBV10
- bool "Coresight ETBv1.0 driver"
+ tristate "Coresight ETBv1.0 driver"
depends on CORESIGHT_LINKS_AND_SINKS
help
This enables support for the Embedded Trace Buffer version 1.0 driver
that complies with the generic implementation of the component without
special enhancement or added features.
+ To compile this driver as a module, choose M here: the
+ module will be called coresight-etb10.
+
config CORESIGHT_SOURCE_ETM3X
- bool "CoreSight Embedded Trace Macrocell 3.x driver"
+ tristate "CoreSight Embedded Trace Macrocell 3.x driver"
depends on !ARM64
select CORESIGHT_LINKS_AND_SINKS
help
@@ -74,8 +93,11 @@ config CORESIGHT_SOURCE_ETM3X
This is primarily useful for instruction level tracing. Depending
the ETM version data tracing may also be available.
+ To compile this driver as a module, choose M here: the
+ module will be called coresight-etm3x.
+
config CORESIGHT_SOURCE_ETM4X
- bool "CoreSight Embedded Trace Macrocell 4.x driver"
+ tristate "CoreSight Embedded Trace Macrocell 4.x driver"
depends on ARM64
select CORESIGHT_LINKS_AND_SINKS
select PID_IN_CONTEXTIDR
@@ -85,8 +107,11 @@ config CORESIGHT_SOURCE_ETM4X
for instruction level tracing. Depending on the implemented version
data tracing may also be available.
+ To compile this driver as a module, choose M here: the
+ module will be called coresight-etm4x.
+
config CORESIGHT_STM
- bool "CoreSight System Trace Macrocell driver"
+ tristate "CoreSight System Trace Macrocell driver"
depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
select CORESIGHT_LINKS_AND_SINKS
select STM
@@ -96,6 +121,9 @@ config CORESIGHT_STM
logging useful software events or data coming from various entities
in the system, possibly running different OSs
+ To compile this driver as a module, choose M here: the
+ module will be called coresight-stm.
+
config CORESIGHT_CPU_DEBUG
tristate "CoreSight CPU Debug driver"
depends on ARM || ARM64
@@ -110,8 +138,11 @@ config CORESIGHT_CPU_DEBUG
properly, please refer Documentation/trace/coresight/coresight-cpu-debug.rst
for detailed description and the example for usage.
+ To compile this driver as a module, choose M here: the
+ module will be called coresight-cpu-debug.
+
config CORESIGHT_CTI
- bool "CoreSight Cross Trigger Interface (CTI) driver"
+ tristate "CoreSight Cross Trigger Interface (CTI) driver"
depends on ARM || ARM64
help
This driver provides support for CoreSight CTI and CTM components.
@@ -122,6 +153,9 @@ config CORESIGHT_CTI
halt compared to disabling sources and sinks normally in driver
software.
+ To compile this driver as a module, choose M here: the
+ module will be called coresight-cti.
+
config CORESIGHT_CTI_INTEGRATION_REGS
bool "Access CTI CoreSight Integration Registers"
depends on CORESIGHT_CTI
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 19497d1d92bf..f20e357758d1 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -2,22 +2,24 @@
#
# Makefile for CoreSight drivers.
#
-obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o \
- coresight-platform.o coresight-sysfs.o
-obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \
- coresight-tmc-etf.o \
- coresight-tmc-etr.o
+obj-$(CONFIG_CORESIGHT) += coresight.o
+coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \
+ coresight-sysfs.o
+obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
+coresight-tmc-y := coresight-tmc-core.o coresight-tmc-etf.o \
+ coresight-tmc-etr.o
obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
coresight-replicator.o
-obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
- coresight-etm3x-sysfs.o
-obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
- coresight-etm4x-sysfs.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o
+coresight-etm3x-y := coresight-etm3x-core.o coresight-etm-cp14.o \
+ coresight-etm3x-sysfs.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
+coresight-etm4x-y := coresight-etm4x-core.o coresight-etm4x-sysfs.o
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
-obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o \
- coresight-cti-platform.o \
- coresight-cti-sysfs.o
+obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
+coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \
+ coresight-cti-sysfs.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 1801804a7762..99430f6cf5a5 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -358,7 +358,7 @@ static int catu_alloc_etr_buf(struct tmc_drvdata *tmc_drvdata,
return 0;
}
-const struct etr_buf_operations etr_catu_buf_ops = {
+static const struct etr_buf_operations etr_catu_buf_ops = {
.alloc = catu_alloc_etr_buf,
.free = catu_free_etr_buf,
.sync = catu_sync_etr_buf,
@@ -567,11 +567,21 @@ out:
return ret;
}
+static int __exit catu_remove(struct amba_device *adev)
+{
+ struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
static struct amba_id catu_ids[] = {
CS_AMBA_ID(0x000bb9ee),
{},
};
+MODULE_DEVICE_TABLE(amba, catu_ids);
+
static struct amba_driver catu_driver = {
.drv = {
.name = "coresight-catu",
@@ -579,7 +589,30 @@ static struct amba_driver catu_driver = {
.suppress_bind_attrs = true,
},
.probe = catu_probe,
+ .remove = catu_remove,
.id_table = catu_ids,
};
-builtin_amba_driver(catu_driver);
+static int __init catu_init(void)
+{
+ int ret;
+
+ ret = amba_driver_register(&catu_driver);
+ if (ret)
+ pr_info("Error registering catu driver\n");
+ tmc_etr_set_catu_ops(&etr_catu_buf_ops);
+ return ret;
+}
+
+static void __exit catu_exit(void)
+{
+ tmc_etr_remove_catu_ops();
+ amba_driver_unregister(&catu_driver);
+}
+
+module_init(catu_init);
+module_exit(catu_exit);
+
+MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>");
+MODULE_DESCRIPTION("Arm CoreSight Address Translation Unit (CATU) Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
index 80ceee3c739c..6160c2d75a56 100644
--- a/drivers/hwtracing/coresight/coresight-catu.h
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -108,6 +108,4 @@ static inline bool coresight_is_catu_device(struct coresight_device *csdev)
return true;
}
-extern const struct etr_buf_operations etr_catu_buf_ops;
-
#endif
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight-core.c
index e9c90f2de34a..cc9e8025c533 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -53,7 +53,22 @@ static struct list_head *stm_path;
* beginning of the data collected in a buffer. That way the decoder knows that
* it needs to look for another sync sequence.
*/
-const u32 barrier_pkt[4] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
+const u32 coresight_barrier_pkt[4] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
+EXPORT_SYMBOL_GPL(coresight_barrier_pkt);
+
+static const struct cti_assoc_op *cti_assoc_ops;
+
+void coresight_set_cti_ops(const struct cti_assoc_op *cti_op)
+{
+ cti_assoc_ops = cti_op;
+}
+EXPORT_SYMBOL_GPL(coresight_set_cti_ops);
+
+void coresight_remove_cti_ops(void)
+{
+ cti_assoc_ops = NULL;
+}
+EXPORT_SYMBOL_GPL(coresight_remove_cti_ops);
static int coresight_id_match(struct device *dev, void *data)
{
@@ -179,6 +194,7 @@ int coresight_claim_device_unlocked(void __iomem *base)
coresight_clear_claim_tags(base);
return -EBUSY;
}
+EXPORT_SYMBOL_GPL(coresight_claim_device_unlocked);
int coresight_claim_device(void __iomem *base)
{
@@ -190,6 +206,7 @@ int coresight_claim_device(void __iomem *base)
return rc;
}
+EXPORT_SYMBOL_GPL(coresight_claim_device);
/*
* coresight_disclaim_device_unlocked : Clear the claim tags for the device.
@@ -208,6 +225,7 @@ void coresight_disclaim_device_unlocked(void __iomem *base)
*/
WARN_ON_ONCE(1);
}
+EXPORT_SYMBOL_GPL(coresight_disclaim_device_unlocked);
void coresight_disclaim_device(void __iomem *base)
{
@@ -215,6 +233,7 @@ void coresight_disclaim_device(void __iomem *base)
coresight_disclaim_device_unlocked(base);
CS_LOCK(base);
}
+EXPORT_SYMBOL_GPL(coresight_disclaim_device);
/* enable or disable an associated CTI device of the supplied CS device */
static int
@@ -222,16 +241,32 @@ coresight_control_assoc_ectdev(struct coresight_device *csdev, bool enable)
{
int ect_ret = 0;
struct coresight_device *ect_csdev = csdev->ect_dev;
+ struct module *mod;
if (!ect_csdev)
return 0;
+ if ((!ect_ops(ect_csdev)->enable) || (!ect_ops(ect_csdev)->disable))
+ return 0;
+ mod = ect_csdev->dev.parent->driver->owner;
if (enable) {
- if (ect_ops(ect_csdev)->enable)
+ if (try_module_get(mod)) {
ect_ret = ect_ops(ect_csdev)->enable(ect_csdev);
+ if (ect_ret) {
+ module_put(mod);
+ } else {
+ get_device(ect_csdev->dev.parent);
+ csdev->ect_enabled = true;
+ }
+ } else
+ ect_ret = -ENODEV;
} else {
- if (ect_ops(ect_csdev)->disable)
+ if (csdev->ect_enabled) {
ect_ret = ect_ops(ect_csdev)->disable(ect_csdev);
+ put_device(ect_csdev->dev.parent);
+ module_put(mod);
+ csdev->ect_enabled = false;
+ }
}
/* output warning if ECT enable is preventing trace operation */
@@ -253,6 +288,7 @@ void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
csdev->ect_dev = ect_csdev;
mutex_unlock(&coresight_mutex);
}
+EXPORT_SYMBOL_GPL(coresight_set_assoc_ectdev_mutex);
static int coresight_enable_sink(struct coresight_device *csdev,
u32 mode, void *data)
@@ -467,6 +503,7 @@ void coresight_disable_path(struct list_head *path)
{
coresight_disable_path_from(path, NULL);
}
+EXPORT_SYMBOL_GPL(coresight_disable_path);
int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data)
{
@@ -540,50 +577,46 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
-static int coresight_enabled_sink(struct device *dev, const void *data)
+static struct coresight_device *
+coresight_find_enabled_sink(struct coresight_device *csdev)
{
- const bool *reset = data;
- struct coresight_device *csdev = to_coresight_device(dev);
+ int i;
+ struct coresight_device *sink;
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
- csdev->activated) {
- /*
- * Now that we have a handle on the sink for this session,
- * disable the sysFS "enable_sink" flag so that possible
- * concurrent perf session that wish to use another sink don't
- * trip on it. Doing so has no ramification for the current
- * session.
- */
- if (*reset)
- csdev->activated = false;
+ csdev->activated)
+ return csdev;
- return 1;
+ /*
+ * Recursively explore each port found on this element.
+ */
+ for (i = 0; i < csdev->pdata->nr_outport; i++) {
+ struct coresight_device *child_dev;
+
+ child_dev = csdev->pdata->conns[i].child_dev;
+ if (child_dev)
+ sink = coresight_find_enabled_sink(child_dev);
+ if (sink)
+ return sink;
}
- return 0;
+ return NULL;
}
/**
- * coresight_get_enabled_sink - returns the first enabled sink found on the bus
- * @deactivate: Whether the 'enable_sink' flag should be reset
- *
- * When operated from perf the deactivate parameter should be set to 'true'.
- * That way the "enabled_sink" flag of the sink that was selected can be reset,
- * allowing for other concurrent perf sessions to choose a different sink.
+ * coresight_get_enabled_sink - returns the first enabled sink using
+ * connection based search starting from the source reference
*
- * When operated from sysFS users have full control and as such the deactivate
- * parameter should be set to 'false', hence mandating users to explicitly
- * clear the flag.
+ * @source: Coresight source device reference
*/
-struct coresight_device *coresight_get_enabled_sink(bool deactivate)
+struct coresight_device *
+coresight_get_enabled_sink(struct coresight_device *source)
{
- struct device *dev = NULL;
-
- dev = bus_find_device(&coresight_bustype, NULL, &deactivate,
- coresight_enabled_sink);
+ if (!source)
+ return NULL;
- return dev ? to_coresight_device(dev) : NULL;
+ return coresight_find_enabled_sink(source);
}
static int coresight_sink_by_id(struct device *dev, const void *data)
@@ -627,13 +660,45 @@ struct coresight_device *coresight_get_sink_by_id(u32 id)
return dev ? to_coresight_device(dev) : NULL;
}
+/**
+ * coresight_get_ref- Helper function to increase reference count to module
+ * and device.
+ * Return true in successful case and power up the device.
+ * Return false when failed to get reference of module.
+ */
+static inline bool coresight_get_ref(struct coresight_device *csdev)
+{
+ struct device *dev = csdev->dev.parent;
+
+ /* Make sure the driver can't be removed */
+ if (!try_module_get(dev->driver->owner))
+ return false;
+ /* Make sure the device can't go away */
+ get_device(dev);
+ pm_runtime_get_sync(dev);
+ return true;
+}
+
+/**
+ * coresight_put_ref- Helper function to decrease reference count to module
+ * and device. Power off the device.
+ */
+static inline void coresight_put_ref(struct coresight_device *csdev)
+{
+ struct device *dev = csdev->dev.parent;
+
+ pm_runtime_put(dev);
+ put_device(dev);
+ module_put(dev->driver->owner);
+}
+
/*
* coresight_grab_device - Power up this device and any of the helper
* devices connected to it for trace operation. Since the helper devices
* don't appear on the trace path, they should be handled along with the
* the master device.
*/
-static void coresight_grab_device(struct coresight_device *csdev)
+static int coresight_grab_device(struct coresight_device *csdev)
{
int i;
@@ -642,9 +707,20 @@ static void coresight_grab_device(struct coresight_device *csdev)
child = csdev->pdata->conns[i].child_dev;
if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
- pm_runtime_get_sync(child->dev.parent);
+ if (!coresight_get_ref(child))
+ goto err;
}
- pm_runtime_get_sync(csdev->dev.parent);
+ if (coresight_get_ref(csdev))
+ return 0;
+err:
+ for (i--; i >= 0; i--) {
+ struct coresight_device *child;
+
+ child = csdev->pdata->conns[i].child_dev;
+ if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
+ coresight_put_ref(child);
+ }
+ return -ENODEV;
}
/*
@@ -655,13 +731,13 @@ static void coresight_drop_device(struct coresight_device *csdev)
{
int i;
- pm_runtime_put(csdev->dev.parent);
+ coresight_put_ref(csdev);
for (i = 0; i < csdev->pdata->nr_outport; i++) {
struct coresight_device *child;
child = csdev->pdata->conns[i].child_dev;
if (child && child->type == CORESIGHT_DEV_TYPE_HELPER)
- pm_runtime_put(child->dev.parent);
+ coresight_put_ref(child);
}
}
@@ -680,7 +756,7 @@ static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink,
struct list_head *path)
{
- int i;
+ int i, ret;
bool found = false;
struct coresight_node *node;
@@ -710,11 +786,14 @@ out:
* is tell the PM runtime core we need this element and add a node
* for it.
*/
+ ret = coresight_grab_device(csdev);
+ if (ret)
+ return ret;
+
node = kzalloc(sizeof(struct coresight_node), GFP_KERNEL);
if (!node)
return -ENOMEM;
- coresight_grab_device(csdev);
node->csdev = csdev;
list_add(&node->link, path);
@@ -988,11 +1067,7 @@ int coresight_enable(struct coresight_device *csdev)
goto out;
}
- /*
- * Search for a valid sink for this session but don't reset the
- * "enable_sink" flag in sysFS. Users get to do that explicitly.
- */
- sink = coresight_get_enabled_sink(false);
+ sink = coresight_get_enabled_sink(csdev);
if (!sink) {
ret = -EINVAL;
goto out;
@@ -1188,7 +1263,6 @@ static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
- cti_remove_assoc_from_csdev(csdev);
fwnode_handle_put(csdev->dev.fwnode);
kfree(csdev->refcnt);
kfree(csdev);
@@ -1376,16 +1450,7 @@ int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
return -EAGAIN;
}
-
-struct bus_type coresight_bustype = {
- .name = "coresight",
-};
-
-static int __init coresight_init(void)
-{
- return bus_register(&coresight_bustype);
-}
-postcore_initcall(coresight_init);
+EXPORT_SYMBOL_GPL(coresight_timeout);
/*
* coresight_release_platform_data: Release references to the devices connected
@@ -1498,8 +1563,8 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
ret = coresight_fixup_device_conns(csdev);
if (!ret)
ret = coresight_fixup_orphan_conns(csdev);
- if (!ret)
- cti_add_assoc_to_csdev(csdev);
+ if (!ret && cti_assoc_ops && cti_assoc_ops->add)
+ cti_assoc_ops->add(csdev);
mutex_unlock(&coresight_mutex);
if (ret) {
@@ -1522,6 +1587,8 @@ void coresight_unregister(struct coresight_device *csdev)
{
etm_perf_del_symlink_sink(csdev);
/* Remove references of that device in the topology */
+ if (cti_assoc_ops && cti_assoc_ops->remove)
+ cti_assoc_ops->remove(csdev);
coresight_remove_conns(csdev);
coresight_clear_default_sink(csdev);
coresight_release_platform_data(csdev, csdev->pdata);
@@ -1552,6 +1619,7 @@ bool coresight_loses_context_with_cpu(struct device *dev)
return fwnode_property_present(dev_fwnode(dev),
"arm,coresight-loses-context-with-cpu");
}
+EXPORT_SYMBOL_GPL(coresight_loses_context_with_cpu);
/*
* coresight_alloc_device_name - Get an index for a given device in the
@@ -1592,3 +1660,36 @@ done:
return name;
}
EXPORT_SYMBOL_GPL(coresight_alloc_device_name);
+
+struct bus_type coresight_bustype = {
+ .name = "coresight",
+};
+
+static int __init coresight_init(void)
+{
+ int ret;
+
+ ret = bus_register(&coresight_bustype);
+ if (ret)
+ return ret;
+
+ ret = etm_perf_init();
+ if (ret)
+ bus_unregister(&coresight_bustype);
+
+ return ret;
+}
+
+static void __exit coresight_exit(void)
+{
+ etm_perf_exit();
+ bus_unregister(&coresight_bustype);
+}
+
+module_init(coresight_init);
+module_exit(coresight_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
+MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
+MODULE_DESCRIPTION("Arm CoreSight tracer driver");
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 7e642fb3ed15..e1d232411d8d 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -665,6 +665,8 @@ static const struct amba_id debug_ids[] = {
{},
};
+MODULE_DEVICE_TABLE(amba, debug_ids);
+
static struct amba_driver debug_driver = {
.drv = {
.name = "coresight-cpu-debug",
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index 3ccc703dc940..d28eae93e55c 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -86,22 +86,16 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static void cti_enable_hw_smp_call(void *info)
-{
- struct cti_drvdata *drvdata = info;
-
- cti_write_all_hw_regs(drvdata);
-}
-
/* write regs to hardware and enable */
static int cti_enable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
struct device *dev = &drvdata->csdev->dev;
+ unsigned long flags;
int rc = 0;
pm_runtime_get_sync(dev->parent);
- spin_lock(&drvdata->spinlock);
+ spin_lock_irqsave(&drvdata->spinlock, flags);
/* no need to do anything if enabled or unpowered*/
if (config->hw_enabled || !config->hw_powered)
@@ -112,19 +106,11 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
if (rc)
goto cti_err_not_enabled;
- if (drvdata->ctidev.cpu >= 0) {
- rc = smp_call_function_single(drvdata->ctidev.cpu,
- cti_enable_hw_smp_call,
- drvdata, 1);
- if (rc)
- goto cti_err_not_enabled;
- } else {
- cti_write_all_hw_regs(drvdata);
- }
+ cti_write_all_hw_regs(drvdata);
config->hw_enabled = true;
atomic_inc(&drvdata->config.enable_req_count);
- spin_unlock(&drvdata->spinlock);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
cti_state_unchanged:
@@ -132,7 +118,7 @@ cti_state_unchanged:
/* cannot enable due to error */
cti_err_not_enabled:
- spin_unlock(&drvdata->spinlock);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
pm_runtime_put(dev->parent);
return rc;
}
@@ -141,9 +127,7 @@ cti_err_not_enabled:
static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
- struct device *dev = &drvdata->csdev->dev;
- pm_runtime_get_sync(dev->parent);
spin_lock(&drvdata->spinlock);
config->hw_powered = true;
@@ -163,7 +147,6 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
/* did not re-enable due to no claim / no request */
cti_hp_not_enabled:
spin_unlock(&drvdata->spinlock);
- pm_runtime_put(dev->parent);
}
/* disable hardware */
@@ -511,12 +494,15 @@ static bool cti_add_sysfs_link(struct cti_drvdata *drvdata,
return !link_err;
}
-static void cti_remove_sysfs_link(struct cti_trig_con *tc)
+static void cti_remove_sysfs_link(struct cti_drvdata *drvdata,
+ struct cti_trig_con *tc)
{
struct coresight_sysfs_link link_info;
+ link_info.orig = drvdata->csdev;
link_info.orig_name = tc->con_dev_name;
link_info.target = tc->con_dev;
+ link_info.target_name = dev_name(&drvdata->csdev->dev);
coresight_remove_sysfs_link(&link_info);
}
@@ -556,7 +542,7 @@ cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name,
* This will set the association if CTI declared before the CS device.
* (called from coresight_register() with coresight_mutex locked).
*/
-void cti_add_assoc_to_csdev(struct coresight_device *csdev)
+static void cti_add_assoc_to_csdev(struct coresight_device *csdev)
{
struct cti_drvdata *ect_item;
struct cti_device *ctidev;
@@ -589,13 +575,12 @@ void cti_add_assoc_to_csdev(struct coresight_device *csdev)
cti_add_done:
mutex_unlock(&ect_mutex);
}
-EXPORT_SYMBOL_GPL(cti_add_assoc_to_csdev);
/*
* Removing the associated devices is easier.
* A CTI will not have a value for csdev->ect_dev.
*/
-void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
+static void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
{
struct cti_drvdata *ctidrv;
struct cti_trig_con *tc;
@@ -606,8 +591,8 @@ void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
ctidrv = csdev_to_cti_drvdata(csdev->ect_dev);
ctidev = &ctidrv->ctidev;
list_for_each_entry(tc, &ctidev->trig_cons, node) {
- if (tc->con_dev == csdev->ect_dev) {
- cti_remove_sysfs_link(tc);
+ if (tc->con_dev == csdev) {
+ cti_remove_sysfs_link(ctidrv, tc);
tc->con_dev = NULL;
break;
}
@@ -616,7 +601,15 @@ void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
}
mutex_unlock(&ect_mutex);
}
-EXPORT_SYMBOL_GPL(cti_remove_assoc_from_csdev);
+
+/*
+ * Operations to add and remove associated CTI.
+ * Register to coresight core driver as call back function.
+ */
+static struct cti_assoc_op cti_assoc_ops = {
+ .add = cti_add_assoc_to_csdev,
+ .remove = cti_remove_assoc_from_csdev
+};
/*
* Update the cross references where the associated device was found
@@ -651,7 +644,7 @@ static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
if (tc->con_dev) {
coresight_set_assoc_ectdev_mutex(tc->con_dev,
NULL);
- cti_remove_sysfs_link(tc);
+ cti_remove_sysfs_link(drvdata, tc);
tc->con_dev = NULL;
}
}
@@ -742,7 +735,8 @@ static int cti_dying_cpu(unsigned int cpu)
spin_lock(&drvdata->spinlock);
drvdata->config.hw_powered = false;
- coresight_disclaim_device(drvdata->base);
+ if (drvdata->config.hw_enabled)
+ coresight_disclaim_device(drvdata->base);
spin_unlock(&drvdata->spinlock);
return 0;
}
@@ -828,7 +822,6 @@ static void cti_device_release(struct device *dev)
struct cti_drvdata *ect_item, *ect_tmp;
mutex_lock(&ect_mutex);
- cti_remove_conn_xrefs(drvdata);
cti_pm_release(drvdata);
/* remove from the list */
@@ -843,6 +836,18 @@ static void cti_device_release(struct device *dev)
if (drvdata->csdev_release)
drvdata->csdev_release(dev);
}
+static int __exit cti_remove(struct amba_device *adev)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ mutex_lock(&ect_mutex);
+ cti_remove_conn_xrefs(drvdata);
+ mutex_unlock(&ect_mutex);
+
+ coresight_unregister(drvdata->csdev);
+
+ return 0;
+}
static int cti_probe(struct amba_device *adev, const struct amba_id *id)
{
@@ -963,6 +968,8 @@ static const struct amba_id cti_ids[] = {
{ 0, 0},
};
+MODULE_DEVICE_TABLE(amba, cti_ids);
+
static struct amba_driver cti_driver = {
.drv = {
.name = "coresight-cti",
@@ -970,6 +977,30 @@ static struct amba_driver cti_driver = {
.suppress_bind_attrs = true,
},
.probe = cti_probe,
+ .remove = cti_remove,
.id_table = cti_ids,
};
-builtin_amba_driver(cti_driver);
+
+static int __init cti_init(void)
+{
+ int ret;
+
+ ret = amba_driver_register(&cti_driver);
+ if (ret)
+ pr_info("Error registering cti driver\n");
+ coresight_set_cti_ops(&cti_assoc_ops);
+ return ret;
+}
+
+static void __exit cti_exit(void)
+{
+ coresight_remove_cti_ops();
+ amba_driver_unregister(&cti_driver);
+}
+
+module_init(cti_init);
+module_exit(cti_exit);
+
+MODULE_AUTHOR("Mike Leach <mike.leach@linaro.org>");
+MODULE_DESCRIPTION("Arm CoreSight CTI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
index 392757f3a019..7ff7e7780bbf 100644
--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -1065,6 +1065,13 @@ static int cti_create_con_sysfs_attr(struct device *dev,
}
eattr->var = con;
con->con_attrs[attr_idx] = &eattr->attr.attr;
+ /*
+ * Initialize the dynamically allocated attribute
+ * to avoid LOCKDEP splat. See include/linux/sysfs.h
+ * for more details.
+ */
+ sysfs_attr_init(con->con_attrs[attr_idx]);
+
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 03e3f2590191..248cc82c838e 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -525,7 +525,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
cur = buf->cur;
offset = buf->offset;
- barrier = barrier_pkt;
+ barrier = coresight_barrier_pkt;
for (i = 0; i < to_read; i += 4) {
buf_ptr = buf->data_pages[cur] + offset;
@@ -801,6 +801,21 @@ err_misc_register:
return ret;
}
+static int __exit etb_remove(struct amba_device *adev)
+{
+ struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ /*
+ * Since misc_open() holds a refcount on the f_ops, which is
+ * etb fops in this case, device is there until last file
+ * handler to this device is closed.
+ */
+ misc_deregister(&drvdata->miscdev);
+ coresight_unregister(drvdata->csdev);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int etb_runtime_suspend(struct device *dev)
{
@@ -835,6 +850,8 @@ static const struct amba_id etb_ids[] = {
{ 0, 0},
};
+MODULE_DEVICE_TABLE(amba, etb_ids);
+
static struct amba_driver etb_driver = {
.drv = {
.name = "coresight-etb10",
@@ -844,6 +861,13 @@ static struct amba_driver etb_driver = {
},
.probe = etb_probe,
+ .remove = etb_remove,
.id_table = etb_ids,
};
-builtin_amba_driver(etb_driver);
+
+module_amba_driver(etb_driver);
+
+MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
+MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
+MODULE_DESCRIPTION("Arm CoreSight Embedded Trace Buffer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 1a3169e69bb1..bdc34ca449f7 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -126,10 +126,10 @@ static void free_sink_buffer(struct etm_event_data *event_data)
cpumask_t *mask = &event_data->mask;
struct coresight_device *sink;
- if (WARN_ON(cpumask_empty(mask)))
+ if (!event_data->snk_config)
return;
- if (!event_data->snk_config)
+ if (WARN_ON(cpumask_empty(mask)))
return;
cpu = cpumask_first(mask);
@@ -210,7 +210,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
u32 id;
int cpu = event->cpu;
cpumask_t *mask;
- struct coresight_device *sink;
+ struct coresight_device *sink = NULL;
struct etm_event_data *event_data = NULL;
event_data = alloc_event_data(cpu);
@@ -222,8 +222,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
if (event->attr.config2) {
id = (u32)event->attr.config2;
sink = coresight_get_sink_by_id(id);
- } else {
- sink = coresight_get_enabled_sink(true);
}
mask = &event_data->mask;
@@ -321,6 +319,16 @@ static void etm_event_start(struct perf_event *event, int flags)
if (!event_data)
goto fail;
+ /*
+ * Check if this ETM is allowed to trace, as decided
+ * at etm_setup_aux(). This could be due to an unreachable
+ * sink from this ETM. We can't do much in this case if
+ * the sink was specified or hinted to the driver. For
+ * now, simply don't record anything on this ETM.
+ */
+ if (!cpumask_test_cpu(cpu, &event_data->mask))
+ goto fail_end_stop;
+
path = etm_event_cpu_path(event_data, cpu);
/* We need a sink, no need to continue without one */
sink = coresight_get_sink(path);
@@ -517,6 +525,7 @@ int etm_perf_symlink(struct coresight_device *csdev, bool link)
return 0;
}
+EXPORT_SYMBOL_GPL(etm_perf_symlink);
static ssize_t etm_perf_sink_name_show(struct device *dev,
struct device_attribute *dattr,
@@ -590,7 +599,7 @@ void etm_perf_del_symlink_sink(struct coresight_device *csdev)
csdev->ea = NULL;
}
-static int __init etm_perf_init(void)
+int __init etm_perf_init(void)
{
int ret;
@@ -617,4 +626,8 @@ static int __init etm_perf_init(void)
return ret;
}
-device_initcall(etm_perf_init);
+
+void __exit etm_perf_exit(void)
+{
+ perf_pmu_unregister(&etm_pmu);
+}
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index 015213abe00a..3e4f2ad5e193 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -57,7 +57,7 @@ struct etm_event_data {
struct list_head * __percpu *path;
};
-#ifdef CONFIG_CORESIGHT
+#if IS_ENABLED(CONFIG_CORESIGHT)
int etm_perf_symlink(struct coresight_device *csdev, bool link);
int etm_perf_add_symlink_sink(struct coresight_device *csdev);
void etm_perf_del_symlink_sink(struct coresight_device *csdev);
@@ -82,4 +82,7 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
#endif /* CONFIG_CORESIGHT */
+int __init etm_perf_init(void);
+void __exit etm_perf_exit(void);
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index bf22dcfd3327..47f610b1c2b1 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -40,8 +40,6 @@
static int boot_enable;
module_param_named(boot_enable, boot_enable, int, S_IRUGO);
-/* The number of ETM/PTM currently registered */
-static int etm_count;
static struct etm_drvdata *etmdrvdata[NR_CPUS];
static enum cpuhp_state hp_online;
@@ -782,6 +780,42 @@ static void etm_init_trace_id(struct etm_drvdata *drvdata)
drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
}
+static int __init etm_hp_setup(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
+ "arm/coresight:starting",
+ etm_starting_cpu, etm_dying_cpu);
+
+ if (ret)
+ return ret;
+
+ ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
+ "arm/coresight:online",
+ etm_online_cpu, NULL);
+
+ /* HP dyn state ID returned in ret on success */
+ if (ret > 0) {
+ hp_online = ret;
+ return 0;
+ }
+
+ /* failed dyn state - remove others */
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
+
+ return ret;
+}
+
+static void etm_hp_clear(void)
+{
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
+ if (hp_online) {
+ cpuhp_remove_state_nocalls(hp_online);
+ hp_online = 0;
+ }
+}
+
static int etm_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
@@ -823,39 +857,20 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
if (!desc.name)
return -ENOMEM;
- cpus_read_lock();
- etmdrvdata[drvdata->cpu] = drvdata;
-
if (smp_call_function_single(drvdata->cpu,
etm_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
- if (!etm_count++) {
- cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
- "arm/coresight:starting",
- etm_starting_cpu, etm_dying_cpu);
- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
- "arm/coresight:online",
- etm_online_cpu, NULL);
- if (ret < 0)
- goto err_arch_supported;
- hp_online = ret;
- }
- cpus_read_unlock();
-
- if (etm_arch_supported(drvdata->arch) == false) {
- ret = -EINVAL;
- goto err_arch_supported;
- }
+ if (etm_arch_supported(drvdata->arch) == false)
+ return -EINVAL;
etm_init_trace_id(drvdata);
etm_set_default(&drvdata->config);
pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto err_arch_supported;
- }
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
adev->dev.platform_data = pdata;
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
@@ -865,17 +880,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
desc.dev = dev;
desc.groups = coresight_etm_groups;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev)) {
- ret = PTR_ERR(drvdata->csdev);
- goto err_arch_supported;
- }
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
ret = etm_perf_symlink(drvdata->csdev, true);
if (ret) {
coresight_unregister(drvdata->csdev);
- goto err_arch_supported;
+ return ret;
}
+ etmdrvdata[drvdata->cpu] = drvdata;
+
pm_runtime_put(&adev->dev);
dev_info(&drvdata->csdev->dev,
"%s initialized\n", (char *)coresight_get_uci_data(id));
@@ -885,14 +900,40 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
}
return 0;
+}
-err_arch_supported:
- if (--etm_count == 0) {
- cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
- if (hp_online)
- cpuhp_remove_state_nocalls(hp_online);
- }
- return ret;
+static void __exit clear_etmdrvdata(void *info)
+{
+ int cpu = *(int *)info;
+
+ etmdrvdata[cpu] = NULL;
+}
+
+static int __exit etm_remove(struct amba_device *adev)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ etm_perf_symlink(drvdata->csdev, false);
+
+ /*
+ * Taking hotplug lock here to avoid racing between etm_remove and
+ * CPU hotplug call backs.
+ */
+ cpus_read_lock();
+ /*
+ * The readers for etmdrvdata[] are CPU hotplug call backs
+ * and PM notification call backs. Change etmdrvdata[i] on
+ * CPU i ensures these call backs has consistent view
+ * inside one call back function.
+ */
+ if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1))
+ etmdrvdata[drvdata->cpu] = NULL;
+
+ cpus_read_unlock();
+
+ coresight_unregister(drvdata->csdev);
+
+ return 0;
}
#ifdef CONFIG_PM
@@ -937,6 +978,8 @@ static const struct amba_id etm_ids[] = {
{ 0, 0},
};
+MODULE_DEVICE_TABLE(amba, etm_ids);
+
static struct amba_driver etm_driver = {
.drv = {
.name = "coresight-etm3x",
@@ -945,6 +988,39 @@ static struct amba_driver etm_driver = {
.suppress_bind_attrs = true,
},
.probe = etm_probe,
+ .remove = etm_remove,
.id_table = etm_ids,
};
-builtin_amba_driver(etm_driver);
+
+static int __init etm_init(void)
+{
+ int ret;
+
+ ret = etm_hp_setup();
+
+ /* etm_hp_setup() does its own cleanup - exit on error */
+ if (ret)
+ return ret;
+
+ ret = amba_driver_register(&etm_driver);
+ if (ret) {
+ pr_err("Error registering etm3x driver\n");
+ etm_hp_clear();
+ }
+
+ return ret;
+}
+
+static void __exit etm_exit(void)
+{
+ amba_driver_unregister(&etm_driver);
+ etm_hp_clear();
+}
+
+module_init(etm_init);
+module_exit(etm_exit);
+
+MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
+MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
+MODULE_DESCRIPTION("Arm CoreSight Program Flow Trace driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 96425e818fc2..abd706b216ac 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -48,12 +48,11 @@ module_param(pm_save_enable, int, 0444);
MODULE_PARM_DESC(pm_save_enable,
"Save/restore state on power down: 1 = never, 2 = self-hosted");
-/* The number of ETMv4 currently registered */
-static int etm4_count;
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
static void etm4_set_default_config(struct etmv4_config *config);
static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
struct perf_event *event);
+static u64 etm4_get_access_type(struct etmv4_config *config);
static enum cpuhp_state hp_online;
@@ -743,8 +742,14 @@ static void etm4_init_arch_data(void *info)
* The number of resource pairs conveyed by the HW starts at 0, i.e a
* value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
* As such add 1 to the value of NUMRSPAIR for a better representation.
+ *
+ * For ETM v4.3 and later, 0x0 means 0, and no pairs are available -
+ * the default TRUE and FALSE resource selectors are omitted.
+ * Otherwise for values 0x1 and above the number is N + 1 as per v4.2.
*/
- drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
+ drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
+ if ((drvdata->arch < ETM4X_ARCH_4V3) || (drvdata->nr_resource > 0))
+ drvdata->nr_resource += 1;
/*
* NUMSSCC, bits[23:20] the number of single-shot
* comparator control for tracing. Read any status regs as these
@@ -785,6 +790,22 @@ static void etm4_init_arch_data(void *info)
CS_LOCK(drvdata->base);
}
+/* Set ELx trace filter access in the TRCVICTLR register */
+static void etm4_set_victlr_access(struct etmv4_config *config)
+{
+ u64 access_type;
+
+ config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK | ETM_EXLEVEL_NS_VICTLR_MASK);
+
+ /*
+ * TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering
+ * bits in vinst_ctrl, same bit pattern as TRCACATRn values returned by
+ * etm4_get_access_type() but with a relative shift in this register.
+ */
+ access_type = etm4_get_access_type(config) << ETM_EXLEVEL_LSHIFT_TRCVICTLR;
+ config->vinst_ctrl |= (u32)access_type;
+}
+
static void etm4_set_default_config(struct etmv4_config *config)
{
/* disable all events tracing */
@@ -802,6 +823,9 @@ static void etm4_set_default_config(struct etmv4_config *config)
/* TRCVICTLR::EVENT = 0x01, select the always on logic */
config->vinst_ctrl = BIT(0);
+
+ /* TRCVICTLR::EXLEVEL_NS:EXLEVELS: Set kernel / user filtering */
+ etm4_set_victlr_access(config);
}
static u64 etm4_get_ns_access_type(struct etmv4_config *config)
@@ -1066,7 +1090,7 @@ out:
void etm4_config_trace_mode(struct etmv4_config *config)
{
- u32 addr_acc, mode;
+ u32 mode;
mode = config->mode;
mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
@@ -1078,15 +1102,7 @@ void etm4_config_trace_mode(struct etmv4_config *config)
if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
return;
- addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
- /* clear default config */
- addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
- ETM_EXLEVEL_NS_HYP);
-
- addr_acc |= etm4_get_ns_access_type(config);
-
- config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
- config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
+ etm4_set_victlr_access(config);
}
static int etm4_online_cpu(unsigned int cpu)
@@ -1183,7 +1199,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
- for (i = 0; i < drvdata->nrseqstate; i++)
+ for (i = 0; i < drvdata->nrseqstate - 1; i++)
state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
@@ -1227,7 +1243,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
- state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1);
+ state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1);
state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
@@ -1288,7 +1304,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
- for (i = 0; i < drvdata->nrseqstate; i++)
+ for (i = 0; i < drvdata->nrseqstate - 1; i++)
writel_relaxed(state->trcseqevr[i],
drvdata->base + TRCSEQEVRn(i));
@@ -1337,7 +1353,7 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
- writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1);
+ writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1);
writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
@@ -1397,28 +1413,25 @@ static struct notifier_block etm4_cpu_pm_nb = {
.notifier_call = etm4_cpu_pm_notify,
};
-/* Setup PM. Called with cpus locked. Deals with error conditions and counts */
-static int etm4_pm_setup_cpuslocked(void)
+/* Setup PM. Deals with error conditions and counts */
+static int __init etm4_pm_setup(void)
{
int ret;
- if (etm4_count++)
- return 0;
-
ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb);
if (ret)
- goto reduce_count;
+ return ret;
- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
- "arm/coresight4:starting",
- etm4_starting_cpu, etm4_dying_cpu);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
+ "arm/coresight4:starting",
+ etm4_starting_cpu, etm4_dying_cpu);
if (ret)
goto unregister_notifier;
- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
- "arm/coresight4:online",
- etm4_online_cpu, NULL);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "arm/coresight4:online",
+ etm4_online_cpu, NULL);
/* HP dyn state ID returned in ret on success */
if (ret > 0) {
@@ -1427,21 +1440,15 @@ static int etm4_pm_setup_cpuslocked(void)
}
/* failed dyn state - remove others */
- cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING);
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
unregister_notifier:
cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
-
-reduce_count:
- --etm4_count;
return ret;
}
static void etm4_pm_clear(void)
{
- if (--etm4_count != 0)
- return;
-
cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
if (hp_online) {
@@ -1497,35 +1504,20 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
if (!desc.name)
return -ENOMEM;
- cpus_read_lock();
- etmdrvdata[drvdata->cpu] = drvdata;
-
if (smp_call_function_single(drvdata->cpu,
etm4_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
- ret = etm4_pm_setup_cpuslocked();
- cpus_read_unlock();
-
- /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */
- if (ret) {
- etmdrvdata[drvdata->cpu] = NULL;
- return ret;
- }
-
- if (etm4_arch_supported(drvdata->arch) == false) {
- ret = -EINVAL;
- goto err_arch_supported;
- }
+ if (etm4_arch_supported(drvdata->arch) == false)
+ return -EINVAL;
etm4_init_trace_id(drvdata);
etm4_set_default(&drvdata->config);
pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto err_arch_supported;
- }
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
adev->dev.platform_data = pdata;
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
@@ -1535,17 +1527,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
desc.dev = dev;
desc.groups = coresight_etmv4_groups;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev)) {
- ret = PTR_ERR(drvdata->csdev);
- goto err_arch_supported;
- }
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
ret = etm_perf_symlink(drvdata->csdev, true);
if (ret) {
coresight_unregister(drvdata->csdev);
- goto err_arch_supported;
+ return ret;
}
+ etmdrvdata[drvdata->cpu] = drvdata;
+
pm_runtime_put(&adev->dev);
dev_info(&drvdata->csdev->dev, "CPU%d: ETM v%d.%d initialized\n",
drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
@@ -1556,11 +1548,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
}
return 0;
-
-err_arch_supported:
- etmdrvdata[drvdata->cpu] = NULL;
- etm4_pm_clear();
- return ret;
}
static struct amba_cs_uci_id uci_id_etm4[] = {
@@ -1572,6 +1559,40 @@ static struct amba_cs_uci_id uci_id_etm4[] = {
}
};
+static void __exit clear_etmdrvdata(void *info)
+{
+ int cpu = *(int *)info;
+
+ etmdrvdata[cpu] = NULL;
+}
+
+static int __exit etm4_remove(struct amba_device *adev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ etm_perf_symlink(drvdata->csdev, false);
+
+ /*
+ * Taking hotplug lock here to avoid racing between etm4_remove and
+ * CPU hotplug call backs.
+ */
+ cpus_read_lock();
+ /*
+ * The readers for etmdrvdata[] are CPU hotplug call backs
+ * and PM notification call backs. Change etmdrvdata[i] on
+ * CPU i ensures these call backs has consistent view
+ * inside one call back function.
+ */
+ if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1))
+ etmdrvdata[drvdata->cpu] = NULL;
+
+ cpus_read_unlock();
+
+ coresight_unregister(drvdata->csdev);
+
+ return 0;
+}
+
static const struct amba_id etm4_ids[] = {
CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
@@ -1586,15 +1607,53 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_UCI_ID(0x000bb805, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A55 */
CS_AMBA_UCI_ID(0x000bb804, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A76 */
CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
+ CS_AMBA_UCI_ID(0x000b6d01, uci_id_etm4),/* HiSilicon-Hip08 */
+ CS_AMBA_UCI_ID(0x000b6d02, uci_id_etm4),/* HiSilicon-Hip09 */
{},
};
+MODULE_DEVICE_TABLE(amba, etm4_ids);
+
static struct amba_driver etm4x_driver = {
.drv = {
.name = "coresight-etm4x",
+ .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = etm4_probe,
+ .remove = etm4_remove,
.id_table = etm4_ids,
};
-builtin_amba_driver(etm4x_driver);
+
+static int __init etm4x_init(void)
+{
+ int ret;
+
+ ret = etm4_pm_setup();
+
+ /* etm4_pm_setup() does its own cleanup - exit on error */
+ if (ret)
+ return ret;
+
+ ret = amba_driver_register(&etm4x_driver);
+ if (ret) {
+ pr_err("Error registering etm4x driver\n");
+ etm4_pm_clear();
+ }
+
+ return ret;
+}
+
+static void __exit etm4x_exit(void)
+{
+ amba_driver_unregister(&etm4x_driver);
+ etm4_pm_clear();
+}
+
+module_init(etm4x_init);
+module_exit(etm4x_exit);
+
+MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
+MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
+MODULE_DESCRIPTION("Arm CoreSight Program Flow Trace v4.x driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index b673e738bc9a..989ce7b8ade7 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -206,7 +206,7 @@ static ssize_t reset_store(struct device *dev,
* each trace run.
*/
config->vinst_ctrl = BIT(0);
- if (drvdata->nr_addr_cmp == true) {
+ if (drvdata->nr_addr_cmp > 0) {
config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* SSSTATUS, bit[9] */
config->vinst_ctrl |= BIT(9);
@@ -236,7 +236,7 @@ static ssize_t reset_store(struct device *dev,
}
config->res_idx = 0x0;
- for (i = 0; i < drvdata->nr_resource; i++)
+ for (i = 2; i < 2 * drvdata->nr_resource; i++)
config->res_ctrl[i] = 0x0;
config->ss_idx = 0x0;
@@ -1663,8 +1663,11 @@ static ssize_t res_idx_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
- /* Resource selector pair 0 is always implemented and reserved */
- if ((val == 0) || (val >= drvdata->nr_resource))
+ /*
+ * Resource selector pair 0 is always implemented and reserved,
+ * namely an idx with 0 and 1 is illegal.
+ */
+ if ((val < 2) || (val >= 2 * drvdata->nr_resource))
return -EINVAL;
/*
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index b8283e1d6d88..eefc7371c6c4 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -192,11 +192,17 @@
#define ETM_EXLEVEL_NS_HYP BIT(14)
#define ETM_EXLEVEL_NS_NA BIT(15)
+/* access level control in TRCVICTLR - same bits as TRCACATRn but shifted */
+#define ETM_EXLEVEL_LSHIFT_TRCVICTLR 8
+
/* secure / non secure masks - TRCVICTLR, IDR3 */
#define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
/* NS MON (EL3) mode never implemented */
#define ETM_EXLEVEL_NS_VICTLR_MASK GENMASK(22, 20)
+/* Interpretation of resource numbers change at ETM v4.3 architecture */
+#define ETM4X_ARCH_4V3 0x43
+
/**
* struct etmv4_config - configuration information related to an ETMv4
* @mode: Controls various modes supported by this ETM.
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 900690a9f7f0..af40814ce560 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -274,6 +274,15 @@ out_disable_clk:
return ret;
}
+static int __exit funnel_remove(struct device *dev)
+{
+ struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
+
+ coresight_unregister(drvdata->csdev);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int funnel_runtime_suspend(struct device *dev)
{
@@ -319,29 +328,41 @@ static int static_funnel_probe(struct platform_device *pdev)
return ret;
}
+static int __exit static_funnel_remove(struct platform_device *pdev)
+{
+ funnel_remove(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
static const struct of_device_id static_funnel_match[] = {
{.compatible = "arm,coresight-static-funnel"},
{}
};
+MODULE_DEVICE_TABLE(of, static_funnel_match);
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id static_funnel_ids[] = {
{"ARMHC9FE", 0},
{},
};
+
+MODULE_DEVICE_TABLE(acpi, static_funnel_ids);
#endif
static struct platform_driver static_funnel_driver = {
.probe = static_funnel_probe,
+ .remove = static_funnel_remove,
.driver = {
.name = "coresight-static-funnel",
+ .owner = THIS_MODULE,
.of_match_table = static_funnel_match,
.acpi_match_table = ACPI_PTR(static_funnel_ids),
.pm = &funnel_dev_pm_ops,
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver(static_funnel_driver);
static int dynamic_funnel_probe(struct amba_device *adev,
const struct amba_id *id)
@@ -349,6 +370,11 @@ static int dynamic_funnel_probe(struct amba_device *adev,
return funnel_probe(&adev->dev, &adev->res);
}
+static int __exit dynamic_funnel_remove(struct amba_device *adev)
+{
+ return funnel_remove(&adev->dev);
+}
+
static const struct amba_id dynamic_funnel_ids[] = {
{
.id = 0x000bb908,
@@ -362,6 +388,8 @@ static const struct amba_id dynamic_funnel_ids[] = {
{ 0, 0},
};
+MODULE_DEVICE_TABLE(amba, dynamic_funnel_ids);
+
static struct amba_driver dynamic_funnel_driver = {
.drv = {
.name = "coresight-dynamic-funnel",
@@ -370,6 +398,39 @@ static struct amba_driver dynamic_funnel_driver = {
.suppress_bind_attrs = true,
},
.probe = dynamic_funnel_probe,
+ .remove = dynamic_funnel_remove,
.id_table = dynamic_funnel_ids,
};
-builtin_amba_driver(dynamic_funnel_driver);
+
+static int __init funnel_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&static_funnel_driver);
+ if (ret) {
+ pr_info("Error registering platform driver\n");
+ return ret;
+ }
+
+ ret = amba_driver_register(&dynamic_funnel_driver);
+ if (ret) {
+ pr_info("Error registering amba driver\n");
+ platform_driver_unregister(&static_funnel_driver);
+ }
+
+ return ret;
+}
+
+static void __exit funnel_exit(void)
+{
+ platform_driver_unregister(&static_funnel_driver);
+ amba_driver_unregister(&dynamic_funnel_driver);
+}
+
+module_init(funnel_init);
+module_exit(funnel_exit);
+
+MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
+MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
+MODULE_DESCRIPTION("Arm CoreSight Funnel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index bfd44231d7ad..3629b7885aca 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -75,6 +75,7 @@ coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode)
}
return csdev;
}
+EXPORT_SYMBOL_GPL(coresight_find_csdev_by_fwnode);
#ifdef CONFIG_OF
static inline bool of_coresight_legacy_ep_is_input(struct device_node *ep)
@@ -711,11 +712,11 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
return dir;
if (dir == ACPI_CORESIGHT_LINK_MASTER) {
- if (ptr->outport > pdata->nr_outport)
- pdata->nr_outport = ptr->outport;
+ if (ptr->outport >= pdata->nr_outport)
+ pdata->nr_outport = ptr->outport + 1;
ptr++;
} else {
- WARN_ON(pdata->nr_inport == ptr->child_port);
+ WARN_ON(pdata->nr_inport == ptr->child_port + 1);
/*
* We do not track input port connections for a device.
* However we need the highest port number described,
@@ -723,8 +724,8 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
* record for an output connection. Hence, do not move
* the ptr for input connections
*/
- if (ptr->child_port > pdata->nr_inport)
- pdata->nr_inport = ptr->child_port;
+ if (ptr->child_port >= pdata->nr_inport)
+ pdata->nr_inport = ptr->child_port + 1;
}
}
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index f2dc625ea585..65a29293b6cb 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -66,8 +66,8 @@ static DEVICE_ATTR_RO(name)
#define coresight_simple_reg64(type, name, lo_off, hi_off) \
__coresight_simple_func(type, NULL, name, lo_off, hi_off)
-extern const u32 barrier_pkt[4];
-#define CORESIGHT_BARRIER_PKT_SIZE (sizeof(barrier_pkt))
+extern const u32 coresight_barrier_pkt[4];
+#define CORESIGHT_BARRIER_PKT_SIZE (sizeof(coresight_barrier_pkt))
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
@@ -104,10 +104,9 @@ struct cs_buffers {
static inline void coresight_insert_barrier_packet(void *buf)
{
if (buf)
- memcpy(buf, barrier_pkt, CORESIGHT_BARRIER_PKT_SIZE);
+ memcpy(buf, coresight_barrier_pkt, CORESIGHT_BARRIER_PKT_SIZE);
}
-
static inline void CS_LOCK(void __iomem *addr)
{
do {
@@ -148,7 +147,8 @@ static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
-struct coresight_device *coresight_get_enabled_sink(bool reset);
+struct coresight_device *
+coresight_get_enabled_sink(struct coresight_device *source);
struct coresight_device *coresight_get_sink_by_id(u32 id);
struct coresight_device *
coresight_find_default_sink(struct coresight_device *csdev);
@@ -165,7 +165,7 @@ int coresight_make_links(struct coresight_device *orig,
void coresight_remove_links(struct coresight_device *orig,
struct coresight_connection *conn);
-#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
+#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X)
extern int etm_readl_cp14(u32 off, unsigned int *val);
extern int etm_writel_cp14(u32 off, u32 val);
#else
@@ -173,15 +173,13 @@ static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; }
static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
#endif
-#ifdef CONFIG_CORESIGHT_CTI
-extern void cti_add_assoc_to_csdev(struct coresight_device *csdev);
-extern void cti_remove_assoc_from_csdev(struct coresight_device *csdev);
+struct cti_assoc_op {
+ void (*add)(struct coresight_device *csdev);
+ void (*remove)(struct coresight_device *csdev);
+};
-#else
-static inline void cti_add_assoc_to_csdev(struct coresight_device *csdev) {}
-static inline void
-cti_remove_assoc_from_csdev(struct coresight_device *csdev) {}
-#endif
+extern void coresight_set_cti_ops(const struct cti_assoc_op *cti_op);
+extern void coresight_remove_cti_ops(void);
/*
* Macros and inline functions to handle CoreSight UCI data and driver
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 78acf29c49ca..62afdde0e5ea 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -291,6 +291,14 @@ out_disable_clk:
return ret;
}
+static int __exit replicator_remove(struct device *dev)
+{
+ struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
+
+ coresight_unregister(drvdata->csdev);
+ return 0;
+}
+
static int static_replicator_probe(struct platform_device *pdev)
{
int ret;
@@ -310,6 +318,13 @@ static int static_replicator_probe(struct platform_device *pdev)
return ret;
}
+static int __exit static_replicator_remove(struct platform_device *pdev)
+{
+ replicator_remove(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
#ifdef CONFIG_PM
static int replicator_runtime_suspend(struct device *dev)
{
@@ -343,24 +358,29 @@ static const struct of_device_id static_replicator_match[] = {
{}
};
+MODULE_DEVICE_TABLE(of, static_replicator_match);
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id static_replicator_acpi_ids[] = {
{"ARMHC985", 0}, /* ARM CoreSight Static Replicator */
{}
};
+
+MODULE_DEVICE_TABLE(acpi, static_replicator_acpi_ids);
#endif
static struct platform_driver static_replicator_driver = {
.probe = static_replicator_probe,
+ .remove = static_replicator_remove,
.driver = {
.name = "coresight-static-replicator",
+ .owner = THIS_MODULE,
.of_match_table = of_match_ptr(static_replicator_match),
.acpi_match_table = ACPI_PTR(static_replicator_acpi_ids),
.pm = &replicator_dev_pm_ops,
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver(static_replicator_driver);
static int dynamic_replicator_probe(struct amba_device *adev,
const struct amba_id *id)
@@ -368,19 +388,60 @@ static int dynamic_replicator_probe(struct amba_device *adev,
return replicator_probe(&adev->dev, &adev->res);
}
+static int __exit dynamic_replicator_remove(struct amba_device *adev)
+{
+ return replicator_remove(&adev->dev);
+}
+
static const struct amba_id dynamic_replicator_ids[] = {
CS_AMBA_ID(0x000bb909),
CS_AMBA_ID(0x000bb9ec), /* Coresight SoC-600 */
{},
};
+MODULE_DEVICE_TABLE(amba, dynamic_replicator_ids);
+
static struct amba_driver dynamic_replicator_driver = {
.drv = {
.name = "coresight-dynamic-replicator",
.pm = &replicator_dev_pm_ops,
+ .owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = dynamic_replicator_probe,
+ .remove = dynamic_replicator_remove,
.id_table = dynamic_replicator_ids,
};
-builtin_amba_driver(dynamic_replicator_driver);
+
+static int __init replicator_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&static_replicator_driver);
+ if (ret) {
+ pr_info("Error registering platform driver\n");
+ return ret;
+ }
+
+ ret = amba_driver_register(&dynamic_replicator_driver);
+ if (ret) {
+ pr_info("Error registering amba driver\n");
+ platform_driver_unregister(&static_replicator_driver);
+ }
+
+ return ret;
+}
+
+static void __exit replicator_exit(void)
+{
+ platform_driver_unregister(&static_replicator_driver);
+ amba_driver_unregister(&dynamic_replicator_driver);
+}
+
+module_init(replicator_init);
+module_exit(replicator_exit);
+
+MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
+MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
+MODULE_DESCRIPTION("Arm CoreSight Replicator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 673d2f56ed1e..b0ad912651a9 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -412,6 +412,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
void __iomem *ch_addr;
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
+ unsigned int stm_flags;
if (!(drvdata && local_read(&drvdata->mode)))
return -EACCES;
@@ -421,8 +422,9 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
ch_addr = stm_channel_addr(drvdata, channel);
- flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0;
- flags |= test_bit(channel, drvdata->chs.guaranteed) ?
+ stm_flags = (flags & STP_PACKET_TIMESTAMPED) ?
+ STM_FLAG_TIMESTAMPED : 0;
+ stm_flags |= test_bit(channel, drvdata->chs.guaranteed) ?
STM_FLAG_GUARANTEED : 0;
if (size > drvdata->write_bytes)
@@ -432,7 +434,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
switch (packet) {
case STP_PACKET_FLAG:
- ch_addr += stm_channel_off(STM_PKT_TYPE_FLAG, flags);
+ ch_addr += stm_channel_off(STM_PKT_TYPE_FLAG, stm_flags);
/*
* The generic STM core sets a size of '0' on flag packets.
@@ -444,7 +446,8 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
break;
case STP_PACKET_DATA:
- ch_addr += stm_channel_off(STM_PKT_TYPE_DATA, flags);
+ stm_flags |= (flags & STP_PACKET_MARKED) ? STM_FLAG_MARKED : 0;
+ ch_addr += stm_channel_off(STM_PKT_TYPE_DATA, stm_flags);
stm_send(ch_addr, payload, size,
drvdata->write_bytes);
break;
@@ -948,6 +951,17 @@ stm_unregister:
return ret;
}
+static int __exit stm_remove(struct amba_device *adev)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ coresight_unregister(drvdata->csdev);
+
+ stm_unregister_device(&drvdata->stm);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int stm_runtime_suspend(struct device *dev)
{
@@ -980,6 +994,8 @@ static const struct amba_id stm_ids[] = {
{ 0, 0},
};
+MODULE_DEVICE_TABLE(amba, stm_ids);
+
static struct amba_driver stm_driver = {
.drv = {
.name = "coresight-stm",
@@ -988,7 +1004,12 @@ static struct amba_driver stm_driver = {
.suppress_bind_attrs = true,
},
.probe = stm_probe,
+ .remove = stm_remove,
.id_table = stm_ids,
};
-builtin_amba_driver(stm_driver);
+module_amba_driver(stm_driver);
+
+MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
+MODULE_DESCRIPTION("Arm CoreSight System Trace Macrocell driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
index 82afeaf2ccc4..34d2a2d31d00 100644
--- a/drivers/hwtracing/coresight/coresight-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -102,6 +102,7 @@ int coresight_add_sysfs_link(struct coresight_sysfs_link *info)
return ret;
}
+EXPORT_SYMBOL_GPL(coresight_add_sysfs_link);
void coresight_remove_sysfs_link(struct coresight_sysfs_link *info)
{
@@ -122,6 +123,7 @@ void coresight_remove_sysfs_link(struct coresight_sysfs_link *info)
info->orig->nr_links--;
info->target->nr_links--;
}
+EXPORT_SYMBOL_GPL(coresight_remove_sysfs_link);
/*
* coresight_make_links: Make a link for a connection from a @orig
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 9ca3aaafcfbc..5653e0945c74 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -559,6 +559,21 @@ out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
+static int __exit tmc_remove(struct amba_device *adev)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ /*
+ * Since misc_open() holds a refcount on the f_ops, which is
+ * etb fops in this case, device is there until last file
+ * handler to this device is closed.
+ */
+ misc_deregister(&drvdata->miscdev);
+ coresight_unregister(drvdata->csdev);
+
+ return 0;
+}
+
static const struct amba_id tmc_ids[] = {
CS_AMBA_ID(0x000bb961),
/* Coresight SoC 600 TMC-ETR/ETS */
@@ -570,6 +585,8 @@ static const struct amba_id tmc_ids[] = {
{ 0, 0},
};
+MODULE_DEVICE_TABLE(amba, tmc_ids);
+
static struct amba_driver tmc_driver = {
.drv = {
.name = "coresight-tmc",
@@ -578,6 +595,12 @@ static struct amba_driver tmc_driver = {
},
.probe = tmc_probe,
.shutdown = tmc_shutdown,
+ .remove = tmc_remove,
.id_table = tmc_ids,
};
-builtin_amba_driver(tmc_driver);
+
+module_amba_driver(tmc_driver);
+
+MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
+MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 6375504ba8b0..44402d413ebb 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -519,7 +519,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
cur = buf->cur;
offset = buf->offset;
- barrier = barrier_pkt;
+ barrier = coresight_barrier_pkt;
/* for every byte to read */
for (i = 0; i < to_read; i += 4) {
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index b29c2db94d96..714f9e867e5f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -255,6 +255,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table)
tmc_free_table_pages(sg_table);
tmc_free_data_pages(sg_table);
}
+EXPORT_SYMBOL_GPL(tmc_free_sg_table);
/*
* Alloc pages for the table. Since this will be used by the device,
@@ -340,6 +341,7 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
return sg_table;
}
+EXPORT_SYMBOL_GPL(tmc_alloc_sg_table);
/*
* tmc_sg_table_sync_data_range: Sync the data buffer written
@@ -360,6 +362,7 @@ void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
PAGE_SIZE, DMA_FROM_DEVICE);
}
}
+EXPORT_SYMBOL_GPL(tmc_sg_table_sync_data_range);
/* tmc_sg_sync_table: Sync the page table */
void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
@@ -372,6 +375,7 @@ void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
dma_sync_single_for_device(real_dev, table_pages->daddrs[i],
PAGE_SIZE, DMA_TO_DEVICE);
}
+EXPORT_SYMBOL_GPL(tmc_sg_table_sync_table);
/*
* tmc_sg_table_get_data: Get the buffer pointer for data @offset
@@ -401,6 +405,7 @@ ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
*bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
return len;
}
+EXPORT_SYMBOL_GPL(tmc_sg_table_get_data);
#ifdef ETR_SG_DEBUG
/* Map a dma address to virtual address */
@@ -766,6 +771,7 @@ tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
return NULL;
}
+EXPORT_SYMBOL_GPL(tmc_etr_get_catu_device);
static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata,
struct etr_buf *etr_buf)
@@ -788,10 +794,21 @@ static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
static const struct etr_buf_operations *etr_buf_ops[] = {
[ETR_MODE_FLAT] = &etr_flat_buf_ops,
[ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
- [ETR_MODE_CATU] = IS_ENABLED(CONFIG_CORESIGHT_CATU)
- ? &etr_catu_buf_ops : NULL,
+ [ETR_MODE_CATU] = NULL,
};
+void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu)
+{
+ etr_buf_ops[ETR_MODE_CATU] = catu;
+}
+EXPORT_SYMBOL_GPL(tmc_etr_set_catu_ops);
+
+void tmc_etr_remove_catu_ops(void)
+{
+ etr_buf_ops[ETR_MODE_CATU] = NULL;
+}
+EXPORT_SYMBOL_GPL(tmc_etr_remove_catu_ops);
+
static inline int tmc_etr_mode_alloc_buf(int mode,
struct tmc_drvdata *drvdata,
struct etr_buf *etr_buf, int node,
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 6e8d2dc33d17..b91ec7dde7bc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -326,4 +326,7 @@ tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
+void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu);
+void tmc_etr_remove_catu_ops(void);
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index f8583e4032a6..566c57e03596 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -173,6 +173,15 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
return PTR_ERR(drvdata->csdev);
}
+static int __exit tpiu_remove(struct amba_device *adev)
+{
+ struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ coresight_unregister(drvdata->csdev);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int tpiu_runtime_suspend(struct device *dev)
{
@@ -216,6 +225,8 @@ static const struct amba_id tpiu_ids[] = {
{ 0, 0},
};
+MODULE_DEVICE_TABLE(amba, tpiu_ids);
+
static struct amba_driver tpiu_driver = {
.drv = {
.name = "coresight-tpiu",
@@ -224,6 +235,13 @@ static struct amba_driver tpiu_driver = {
.suppress_bind_attrs = true,
},
.probe = tpiu_probe,
+ .remove = tpiu_remove,
.id_table = tpiu_ids,
};
-builtin_amba_driver(tpiu_driver);
+
+module_amba_driver(tpiu_driver);
+
+MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
+MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
+MODULE_DESCRIPTION("Arm CoreSight TPIU (Trace Port Interface Unit) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 21fdf0b93516..52acd77438ed 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -263,6 +263,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Alder Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7aa6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Alder Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index d0e92a8a045c..aad594fe79cc 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -71,7 +71,7 @@ config STM_SOURCE_HEARTBEAT
config STM_SOURCE_FTRACE
tristate "Copy the output from kernel Ftrace to STM engine"
- depends on FUNCTION_TRACER
+ depends on TRACING
help
This option can be used to copy the output from kernel Ftrace
to STM engine. Enabling this option will introduce a slight
diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c
index ce868e095410..3bb606dfa634 100644
--- a/drivers/hwtracing/stm/ftrace.c
+++ b/drivers/hwtracing/stm/ftrace.c
@@ -37,8 +37,10 @@ static void notrace
stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
{
struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
+ /* This is called from trace system with preemption disabled */
+ unsigned int cpu = smp_processor_id();
- stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
+ stm_source_write(&stm->data, STM_FTRACE_CHAN + cpu, buf, len);
}
static int stm_ftrace_link(struct stm_source_data *data)
@@ -46,6 +48,8 @@ static int stm_ftrace_link(struct stm_source_data *data)
struct stm_ftrace *sf = container_of(data, struct stm_ftrace, data);
sf->ftrace.write = stm_ftrace_write;
+ sf->ftrace.flags = TRACE_EXPORT_FUNCTION | TRACE_EXPORT_EVENT
+ | TRACE_EXPORT_MARKER;
return register_ftrace_export(&sf->ftrace);
}
@@ -61,6 +65,7 @@ static int __init stm_ftrace_init(void)
{
int ret;
+ stm_ftrace.data.nr_chans = roundup_pow_of_two(num_possible_cpus());
ret = stm_source_register_device(NULL, &stm_ftrace.data);
if (ret)
pr_err("Failed to register stm_source - ftrace.\n");
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index bae1dc08ec9a..438905e2a1d0 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -101,7 +101,6 @@ source "drivers/i2c/busses/Kconfig"
config I2C_STUB
tristate "I2C/SMBus Test Stub"
depends on m
- default 'n'
help
This module may be useful to developers of SMBus client drivers,
especially for certain kinds of sensor chips.
@@ -126,6 +125,14 @@ config I2C_SLAVE_EEPROM
This backend makes Linux behave like an I2C EEPROM. Please read
Documentation/i2c/slave-eeprom-backend.rst for further details.
+config I2C_SLAVE_TESTUNIT
+ tristate "I2C eeprom testunit driver"
+ help
+ This backend can be used to trigger test cases for I2C bus masters
+ which require a remote device with certain capabilities, e.g.
+ multi-master, SMBus Host Notify, etc. Please read
+ Documentation/i2c/slave-testunit-backend.rst for further details.
+
endif
config I2C_DEBUG_CORE
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index bed6ba63c983..c1d493dc9bac 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -16,5 +16,6 @@ obj-$(CONFIG_I2C_MUX) += i2c-mux.o
obj-y += algos/ busses/ muxes/
obj-$(CONFIG_I2C_STUB) += i2c-stub.o
obj-$(CONFIG_I2C_SLAVE_EEPROM) += i2c-slave-eeprom.o
+obj-$(CONFIG_I2C_SLAVE_TESTUNIT) += i2c-slave-testunit.o
ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 293e7a0760e7..a97a9d058198 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -147,6 +147,7 @@ config I2C_I801
Tiger Lake (PCH)
Jasper Lake (SOC)
Emmitsburg (PCH)
+ Alder Lake (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -730,6 +731,19 @@ config I2C_LPC2K
This driver can also be built as a module. If so, the module
will be called i2c-lpc2k.
+config I2C_MLXBF
+ tristate "Mellanox BlueField I2C controller"
+ depends on MELLANOX_PLATFORM && ARM64
+ help
+ Enabling this option will add I2C SMBus support for Mellanox BlueField
+ system.
+
+ This driver can also be built as a module. If so, the module will be
+ called i2c-mlxbf.
+
+ This driver implements an I2C SMBus host controller and enables both
+ master and slave functions.
+
config I2C_MESON
tristate "Amlogic Meson I2C controller"
depends on ARCH_MESON || COMPILE_TEST
@@ -840,7 +854,6 @@ config I2C_PASEMI
config I2C_PCA_PLATFORM
tristate "PCA9564/PCA9665 as platform device"
select I2C_ALGOPCA
- default n
help
This driver supports a memory mapped Philips PCA9564/PCA9665
parallel bus to I2C bus controller.
@@ -1026,6 +1039,7 @@ config I2C_STM32F7
tristate "STMicroelectronics STM32F7 I2C support"
depends on ARCH_STM32 || COMPILE_TEST
select I2C_SLAVE
+ select I2C_SMBUS
help
Enable this option to add support for STM32 I2C controller embedded
in STM32F7 SoCs.
@@ -1181,6 +1195,8 @@ config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
depends on ARCH_RENESAS || COMPILE_TEST
select I2C_SLAVE
+ select I2C_SMBUS
+ select RESET_CONTROLLER if ARCH_RCAR_GEN3
help
If you say yes to this option, support will be included for the
R-Car I2C controller.
@@ -1240,7 +1256,6 @@ config I2C_TAOS_EVM
depends on TTY
select SERIO
select SERIO_SERPORT
- default n
help
This supports TAOS evaluation modules on serial port. In order to
use this driver, you will need the inputattach tool, which is part
@@ -1324,7 +1339,6 @@ config I2C_PCA_ISA
tristate "PCA9564/PCA9665 on an ISA bus"
depends on ISA
select I2C_ALGOPCA
- default n
help
This driver supports ISA boards using the Philips PCA9564/PCA9665
parallel bus to I2C bus controller.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 19aff0e45cb5..683c49faca05 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -140,6 +140,7 @@ obj-$(CONFIG_I2C_BRCMSTB) += i2c-brcmstb.o
obj-$(CONFIG_I2C_CROS_EC_TUNNEL) += i2c-cros-ec-tunnel.o
obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
obj-$(CONFIG_I2C_ICY) += i2c-icy.o
+obj-$(CONFIG_I2C_MLXBF) += i2c-mlxbf.o
obj-$(CONFIG_I2C_MLXCPLD) += i2c-mlxcpld.o
obj-$(CONFIG_I2C_OPAL) += i2c-opal.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index 17df9e8845b6..506433bc0ff2 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -155,7 +155,7 @@ static int i2c_amd_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
struct amd_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
int i;
struct i2c_msg *pmsg;
- int err;
+ int err = 0;
/* the adapter might have been deleted while waiting for the bus lock */
if (unlikely(!i2c_dev->common.mp2_dev))
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 5dc519516292..37443edbf754 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -421,11 +421,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
return PTR_ERR(i2c_dev->regs);
mclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(mclk)) {
- if (PTR_ERR(mclk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get clock\n");
- return PTR_ERR(mclk);
- }
+ if (IS_ERR(mclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mclk),
+ "Could not get clock\n");
i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 44974b53a626..0d15f4c1e9f7 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -159,7 +159,6 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
u32 raw_stat, stat, enabled, tmp;
u8 val = 0, slave_activity;
- regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
regmap_read(dev->map, DW_IC_ENABLE, &enabled);
regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_stat);
regmap_read(dev->map, DW_IC_STATUS, &tmp);
@@ -168,32 +167,30 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
return 0;
+ stat = i2c_dw_read_clear_intrbits_slave(dev);
dev_dbg(dev->dev,
"%#x STATUS SLAVE_ACTIVITY=%#x : RAW_INTR_STAT=%#x : INTR_STAT=%#x\n",
enabled, slave_activity, raw_stat, stat);
- if ((stat & DW_IC_INTR_RX_FULL) && (stat & DW_IC_INTR_STOP_DET))
- i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val);
+ if (stat & DW_IC_INTR_RX_FULL) {
+ if (dev->status != STATUS_WRITE_IN_PROGRESS) {
+ dev->status = STATUS_WRITE_IN_PROGRESS;
+ i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED,
+ &val);
+ }
+
+ regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
+ val = tmp;
+ if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED,
+ &val))
+ dev_vdbg(dev->dev, "Byte %X acked!", val);
+ }
if (stat & DW_IC_INTR_RD_REQ) {
if (slave_activity) {
- if (stat & DW_IC_INTR_RX_FULL) {
- regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
- val = tmp;
-
- if (!i2c_slave_event(dev->slave,
- I2C_SLAVE_WRITE_RECEIVED,
- &val)) {
- dev_vdbg(dev->dev, "Byte %X acked!",
- val);
- }
- regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
- stat = i2c_dw_read_clear_intrbits_slave(dev);
- } else {
- regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
- regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &tmp);
- stat = i2c_dw_read_clear_intrbits_slave(dev);
- }
+ regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp);
+
+ dev->status = STATUS_READ_IN_PROGRESS;
if (!i2c_slave_event(dev->slave,
I2C_SLAVE_READ_REQUESTED,
&val))
@@ -205,21 +202,11 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED,
&val))
regmap_read(dev->map, DW_IC_CLR_RX_DONE, &tmp);
-
- i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val);
- stat = i2c_dw_read_clear_intrbits_slave(dev);
- return 1;
}
- if (stat & DW_IC_INTR_RX_FULL) {
- regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
- val = tmp;
- if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED,
- &val))
- dev_vdbg(dev->dev, "Byte %X acked!", val);
- } else {
+ if (stat & DW_IC_INTR_STOP_DET) {
+ dev->status = STATUS_IDLE;
i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val);
- stat = i2c_dw_read_clear_intrbits_slave(dev);
}
return 1;
@@ -230,7 +217,6 @@ static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id)
struct dw_i2c_dev *dev = dev_id;
int ret;
- i2c_dw_read_clear_intrbits_slave(dev);
ret = i2c_dw_irq_handler_slave(dev);
if (ret > 0)
complete(&dev->cmd_complete);
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
index 838ce0947191..f6e13ceeb2b3 100644
--- a/drivers/i2c/busses/i2c-efm32.c
+++ b/drivers/i2c/busses/i2c-efm32.c
@@ -332,21 +332,15 @@ static int efm32_i2c_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to determine base address\n");
- return -ENODEV;
- }
+ ddata->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
if (resource_size(res) < 0x42) {
dev_err(&pdev->dev, "memory resource too small\n");
return -EINVAL;
}
- ddata->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(ddata->base))
- return PTR_ERR(ddata->base);
-
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
if (!ret)
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index bffca729e1c7..ae90713443fa 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -71,6 +71,7 @@
* Tiger Lake-H (PCH) 0x43a3 32 hard yes yes yes
* Jasper Lake (SOC) 0x4da3 32 hard yes yes yes
* Comet Lake-V (PCH) 0xa3a3 32 hard yes yes yes
+ * Alder Lake-S (PCH) 0x7aa3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -228,6 +229,7 @@
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22
@@ -1081,6 +1083,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
{ 0, }
};
@@ -1274,6 +1277,7 @@ static const struct {
/*
* Additional individual entries were added after verification.
*/
+ { "Latitude 5480", 0x29 },
{ "Vostro V131", 0x1d },
};
@@ -1767,6 +1771,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS:
case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS:
case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
+ case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
priv->features |= FEATURE_BLOCK_PROC;
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 0ab5381aa012..c98529c76348 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1159,11 +1159,9 @@ static int i2c_imx_probe(struct platform_device *pdev)
/* Get I2C clock */
i2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(i2c_imx->clk)) {
- if (PTR_ERR(i2c_imx->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "can't get I2C clock\n");
- return PTR_ERR(i2c_imx->clk);
- }
+ if (IS_ERR(i2c_imx->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c_imx->clk),
+ "can't get I2C clock\n");
ret = clk_prepare_enable(i2c_imx->clk);
if (ret) {
@@ -1171,14 +1169,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
return ret;
}
- /* Request IRQ */
- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
- pdev->name, i2c_imx);
- if (ret) {
- dev_err(&pdev->dev, "can't claim irq %d\n", irq);
- goto clk_disable;
- }
-
/* Init queue */
init_waitqueue_head(&i2c_imx->queue);
@@ -1197,6 +1187,14 @@ static int i2c_imx_probe(struct platform_device *pdev)
if (ret < 0)
goto rpm_disable;
+ /* Request IRQ */
+ ret = request_threaded_irq(irq, i2c_imx_isr, NULL, IRQF_SHARED,
+ pdev->name, i2c_imx);
+ if (ret) {
+ dev_err(&pdev->dev, "can't claim irq %d\n", irq);
+ goto rpm_disable;
+ }
+
/* Set up clock divider */
i2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
ret = of_property_read_u32(pdev->dev.of_node,
@@ -1239,13 +1237,12 @@ static int i2c_imx_probe(struct platform_device *pdev)
clk_notifier_unregister:
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
+ free_irq(irq, i2c_imx);
rpm_disable:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
-
-clk_disable:
clk_disable_unprepare(i2c_imx->clk);
return ret;
}
@@ -1253,7 +1250,7 @@ clk_disable:
static int i2c_imx_remove(struct platform_device *pdev)
{
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
- int ret;
+ int irq, ret;
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
@@ -1273,6 +1270,9 @@ static int i2c_imx_remove(struct platform_device *pdev)
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0)
+ free_irq(irq, i2c_imx);
clk_disable_unprepare(i2c_imx->clk);
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 2f95e25a10f7..a35a27c320e7 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -77,6 +77,7 @@
#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
#define PCI_DEVICE_ID_INTEL_CDF_SMT 0x18ac
#define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac
+#define PCI_DEVICE_ID_INTEL_EBG_SMT 0x1bff
#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
@@ -176,14 +177,12 @@ struct ismt_priv {
u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
};
-/**
- * ismt_ids - PCI device IDs supported by this driver
- */
static const struct pci_device_id ismt_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EBG_SMT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
{ 0, }
};
@@ -197,6 +196,8 @@ MODULE_PARM_DESC(bus_speed, "Bus Speed in kHz (0 = BIOS default)");
/**
* __ismt_desc_dump() - dump the contents of a specific descriptor
+ * @dev: the iSMT device
+ * @desc: the iSMT hardware descriptor
*/
static void __ismt_desc_dump(struct device *dev, const struct ismt_desc *desc)
{
@@ -628,11 +629,6 @@ static u32 ismt_func(struct i2c_adapter *adap)
I2C_FUNC_SMBUS_PEC;
}
-/**
- * smbus_algorithm - the adapter algorithm and supported functionality
- * @smbus_xfer: the adapter algorithm
- * @functionality: functionality supported by the adapter
- */
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = ismt_access,
.functionality = ismt_func,
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index ba831df6661e..cb4a25ebb890 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -752,6 +752,7 @@ static const struct ingenic_i2c_config x1000_i2c_config = {
};
static const struct of_device_id jz4780_i2c_of_matches[] = {
+ { .compatible = "ingenic,jz4770-i2c", .data = &jz4780_i2c_config },
{ .compatible = "ingenic,jz4780-i2c", .data = &jz4780_i2c_config },
{ .compatible = "ingenic,x1000-i2c", .data = &x1000_i2c_config },
{ /* sentinel */ }
@@ -856,7 +857,7 @@ static struct platform_driver jz4780_i2c_driver = {
.remove = jz4780_i2c_remove,
.driver = {
.name = "jz4780-i2c",
- .of_match_table = of_match_ptr(jz4780_i2c_of_matches),
+ .of_match_table = jz4780_i2c_of_matches,
},
};
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
new file mode 100644
index 000000000000..33574d40ea9c
--- /dev/null
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -0,0 +1,2474 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Mellanox BlueField I2C bus driver
+ *
+ * Copyright (C) 2020 Mellanox Technologies, Ltd.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+/* Defines what functionality is present. */
+#define MLXBF_I2C_FUNC_SMBUS_BLOCK \
+ (I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL)
+
+#define MLXBF_I2C_FUNC_SMBUS_DEFAULT \
+ (I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | \
+ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_I2C_BLOCK | \
+ I2C_FUNC_SMBUS_PROC_CALL)
+
+#define MLXBF_I2C_FUNC_ALL \
+ (MLXBF_I2C_FUNC_SMBUS_DEFAULT | MLXBF_I2C_FUNC_SMBUS_BLOCK | \
+ I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SLAVE)
+
+#define MLXBF_I2C_SMBUS_MAX 3
+
+/* Shared resources info in BlueField platforms. */
+
+#define MLXBF_I2C_COALESCE_TYU_ADDR 0x02801300
+#define MLXBF_I2C_COALESCE_TYU_SIZE 0x010
+
+#define MLXBF_I2C_GPIO_TYU_ADDR 0x02802000
+#define MLXBF_I2C_GPIO_TYU_SIZE 0x100
+
+#define MLXBF_I2C_COREPLL_TYU_ADDR 0x02800358
+#define MLXBF_I2C_COREPLL_TYU_SIZE 0x008
+
+#define MLXBF_I2C_COREPLL_YU_ADDR 0x02800c30
+#define MLXBF_I2C_COREPLL_YU_SIZE 0x00c
+
+#define MLXBF_I2C_SHARED_RES_MAX 3
+
+/*
+ * Note that the following SMBus, CAUSE, GPIO and PLL register addresses
+ * refer to their respective offsets relative to the corresponding
+ * memory-mapped region whose addresses are specified in either the DT or
+ * the ACPI tables or above.
+ */
+
+/*
+ * SMBus Master core clock frequency. Timing configurations are
+ * strongly dependent on the core clock frequency of the SMBus
+ * Master. Default value is set to 400MHz.
+ */
+#define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000)
+/* Reference clock for Bluefield - 156 MHz. */
+#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000)
+
+/* Constant used to determine the PLL frequency. */
+#define MLNXBF_I2C_COREPLL_CONST 16384
+
+/* PLL registers. */
+#define MLXBF_I2C_CORE_PLL_REG0 0x0
+#define MLXBF_I2C_CORE_PLL_REG1 0x4
+#define MLXBF_I2C_CORE_PLL_REG2 0x8
+
+/* OR cause register. */
+#define MLXBF_I2C_CAUSE_OR_EVTEN0 0x14
+#define MLXBF_I2C_CAUSE_OR_CLEAR 0x18
+
+/* Arbiter Cause Register. */
+#define MLXBF_I2C_CAUSE_ARBITER 0x1c
+
+/*
+ * Cause Status flags. Note that those bits might be considered
+ * as interrupt enabled bits.
+ */
+
+/* Transaction ended with STOP. */
+#define MLXBF_I2C_CAUSE_TRANSACTION_ENDED BIT(0)
+/* Master arbitration lost. */
+#define MLXBF_I2C_CAUSE_M_ARBITRATION_LOST BIT(1)
+/* Unexpected start detected. */
+#define MLXBF_I2C_CAUSE_UNEXPECTED_START BIT(2)
+/* Unexpected stop detected. */
+#define MLXBF_I2C_CAUSE_UNEXPECTED_STOP BIT(3)
+/* Wait for transfer continuation. */
+#define MLXBF_I2C_CAUSE_WAIT_FOR_FW_DATA BIT(4)
+/* Failed to generate STOP. */
+#define MLXBF_I2C_CAUSE_PUT_STOP_FAILED BIT(5)
+/* Failed to generate START. */
+#define MLXBF_I2C_CAUSE_PUT_START_FAILED BIT(6)
+/* Clock toggle completed. */
+#define MLXBF_I2C_CAUSE_CLK_TOGGLE_DONE BIT(7)
+/* Transfer timeout occurred. */
+#define MLXBF_I2C_CAUSE_M_FW_TIMEOUT BIT(8)
+/* Master busy bit reset. */
+#define MLXBF_I2C_CAUSE_M_GW_BUSY_FALL BIT(9)
+
+#define MLXBF_I2C_CAUSE_MASTER_ARBITER_BITS_MASK GENMASK(9, 0)
+
+#define MLXBF_I2C_CAUSE_MASTER_STATUS_ERROR \
+ (MLXBF_I2C_CAUSE_M_ARBITRATION_LOST | \
+ MLXBF_I2C_CAUSE_UNEXPECTED_START | \
+ MLXBF_I2C_CAUSE_UNEXPECTED_STOP | \
+ MLXBF_I2C_CAUSE_PUT_STOP_FAILED | \
+ MLXBF_I2C_CAUSE_PUT_START_FAILED | \
+ MLXBF_I2C_CAUSE_CLK_TOGGLE_DONE | \
+ MLXBF_I2C_CAUSE_M_FW_TIMEOUT)
+
+/*
+ * Slave cause status flags. Note that those bits might be considered
+ * as interrupt enabled bits.
+ */
+
+/* Write transaction received successfully. */
+#define MLXBF_I2C_CAUSE_WRITE_SUCCESS BIT(0)
+/* Read transaction received, waiting for response. */
+#define MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE BIT(13)
+/* Slave busy bit reset. */
+#define MLXBF_I2C_CAUSE_S_GW_BUSY_FALL BIT(18)
+
+#define MLXBF_I2C_CAUSE_SLAVE_ARBITER_BITS_MASK GENMASK(20, 0)
+
+/* Cause coalesce registers. */
+#define MLXBF_I2C_CAUSE_COALESCE_0 0x00
+#define MLXBF_I2C_CAUSE_COALESCE_1 0x04
+#define MLXBF_I2C_CAUSE_COALESCE_2 0x08
+
+#define MLXBF_I2C_CAUSE_TYU_SLAVE_BIT MLXBF_I2C_SMBUS_MAX
+#define MLXBF_I2C_CAUSE_YU_SLAVE_BIT 1
+
+/* Functional enable register. */
+#define MLXBF_I2C_GPIO_0_FUNC_EN_0 0x28
+/* Force OE enable register. */
+#define MLXBF_I2C_GPIO_0_FORCE_OE_EN 0x30
+/*
+ * Note that Smbus GWs are on GPIOs 30:25. Two pins are used to control
+ * SDA/SCL lines:
+ *
+ * SMBUS GW0 -> bits[26:25]
+ * SMBUS GW1 -> bits[28:27]
+ * SMBUS GW2 -> bits[30:29]
+ */
+#define MLXBF_I2C_GPIO_SMBUS_GW_PINS(num) (25 + ((num) << 1))
+
+/* Note that gw_id can be 0,1 or 2. */
+#define MLXBF_I2C_GPIO_SMBUS_GW_MASK(num) \
+ (0xffffffff & (~(0x3 << MLXBF_I2C_GPIO_SMBUS_GW_PINS(num))))
+
+#define MLXBF_I2C_GPIO_SMBUS_GW_RESET_PINS(num, val) \
+ ((val) & MLXBF_I2C_GPIO_SMBUS_GW_MASK(num))
+
+#define MLXBF_I2C_GPIO_SMBUS_GW_ASSERT_PINS(num, val) \
+ ((val) | (0x3 << MLXBF_I2C_GPIO_SMBUS_GW_PINS(num)))
+
+/* SMBus timing parameters. */
+#define MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH 0x00
+#define MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE 0x04
+#define MLXBF_I2C_SMBUS_TIMER_THOLD 0x08
+#define MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP 0x0c
+#define MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA 0x10
+#define MLXBF_I2C_SMBUS_THIGH_MAX_TBUF 0x14
+#define MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT 0x18
+
+enum {
+ MLXBF_I2C_TIMING_100KHZ = 100000,
+ MLXBF_I2C_TIMING_400KHZ = 400000,
+ MLXBF_I2C_TIMING_1000KHZ = 1000000,
+};
+
+/*
+ * Defines SMBus operating frequency and core clock frequency.
+ * According to ADB files, default values are compliant to 100KHz SMBus
+ * @ 400MHz core clock. The driver should be able to calculate core
+ * frequency based on PLL parameters.
+ */
+#define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ
+
+/* Core PLL TYU configuration. */
+#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0)
+#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0)
+#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0)
+
+#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3
+#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16
+#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20
+
+/* Core PLL YU configuration. */
+#define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0)
+#define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0)
+#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0)
+
+#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0
+#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1
+#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26
+
+/* Core PLL frequency. */
+static u64 mlxbf_i2c_corepll_frequency;
+
+/* SMBus Master GW. */
+#define MLXBF_I2C_SMBUS_MASTER_GW 0x200
+/* Number of bytes received and sent. */
+#define MLXBF_I2C_SMBUS_RS_BYTES 0x300
+/* Packet error check (PEC) value. */
+#define MLXBF_I2C_SMBUS_MASTER_PEC 0x304
+/* Status bits (ACK/NACK/FW Timeout). */
+#define MLXBF_I2C_SMBUS_MASTER_STATUS 0x308
+/* SMbus Master Finite State Machine. */
+#define MLXBF_I2C_SMBUS_MASTER_FSM 0x310
+
+/*
+ * When enabled, the master will issue a stop condition in case of
+ * timeout while waiting for FW response.
+ */
+#define MLXBF_I2C_SMBUS_EN_FW_TIMEOUT 0x31c
+
+/* SMBus master GW control bits offset in MLXBF_I2C_SMBUS_MASTER_GW[31:3]. */
+#define MLXBF_I2C_MASTER_LOCK_BIT BIT(31) /* Lock bit. */
+#define MLXBF_I2C_MASTER_BUSY_BIT BIT(30) /* Busy bit. */
+#define MLXBF_I2C_MASTER_START_BIT BIT(29) /* Control start. */
+#define MLXBF_I2C_MASTER_CTL_WRITE_BIT BIT(28) /* Control write phase. */
+#define MLXBF_I2C_MASTER_CTL_READ_BIT BIT(19) /* Control read phase. */
+#define MLXBF_I2C_MASTER_STOP_BIT BIT(3) /* Control stop. */
+
+#define MLXBF_I2C_MASTER_ENABLE \
+ (MLXBF_I2C_MASTER_LOCK_BIT | MLXBF_I2C_MASTER_BUSY_BIT | \
+ MLXBF_I2C_MASTER_START_BIT | MLXBF_I2C_MASTER_STOP_BIT)
+
+#define MLXBF_I2C_MASTER_ENABLE_WRITE \
+ (MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_WRITE_BIT)
+
+#define MLXBF_I2C_MASTER_ENABLE_READ \
+ (MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_READ_BIT)
+
+#define MLXBF_I2C_MASTER_SLV_ADDR_SHIFT 12 /* Slave address shift. */
+#define MLXBF_I2C_MASTER_WRITE_SHIFT 21 /* Control write bytes shift. */
+#define MLXBF_I2C_MASTER_SEND_PEC_SHIFT 20 /* Send PEC byte shift. */
+#define MLXBF_I2C_MASTER_PARSE_EXP_SHIFT 11 /* Parse expected bytes shift. */
+#define MLXBF_I2C_MASTER_READ_SHIFT 4 /* Control read bytes shift. */
+
+/* SMBus master GW Data descriptor. */
+#define MLXBF_I2C_MASTER_DATA_DESC_ADDR 0x280
+#define MLXBF_I2C_MASTER_DATA_DESC_SIZE 0x80 /* Size in bytes. */
+
+/* Maximum bytes to read/write per SMBus transaction. */
+#define MLXBF_I2C_MASTER_DATA_R_LENGTH MLXBF_I2C_MASTER_DATA_DESC_SIZE
+#define MLXBF_I2C_MASTER_DATA_W_LENGTH (MLXBF_I2C_MASTER_DATA_DESC_SIZE - 1)
+
+/* All bytes were transmitted. */
+#define MLXBF_I2C_SMBUS_STATUS_BYTE_CNT_DONE BIT(0)
+/* NACK received. */
+#define MLXBF_I2C_SMBUS_STATUS_NACK_RCV BIT(1)
+/* Slave's byte count >128 bytes. */
+#define MLXBF_I2C_SMBUS_STATUS_READ_ERR BIT(2)
+/* Timeout occurred. */
+#define MLXBF_I2C_SMBUS_STATUS_FW_TIMEOUT BIT(3)
+
+#define MLXBF_I2C_SMBUS_MASTER_STATUS_MASK GENMASK(3, 0)
+
+#define MLXBF_I2C_SMBUS_MASTER_STATUS_ERROR \
+ (MLXBF_I2C_SMBUS_STATUS_NACK_RCV | \
+ MLXBF_I2C_SMBUS_STATUS_READ_ERR | \
+ MLXBF_I2C_SMBUS_STATUS_FW_TIMEOUT)
+
+#define MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK BIT(31)
+#define MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK BIT(15)
+
+/* SMBus slave GW. */
+#define MLXBF_I2C_SMBUS_SLAVE_GW 0x400
+/* Number of bytes received and sent from/to master. */
+#define MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES 0x500
+/* Packet error check (PEC) value. */
+#define MLXBF_I2C_SMBUS_SLAVE_PEC 0x504
+/* SMBus slave Finite State Machine (FSM). */
+#define MLXBF_I2C_SMBUS_SLAVE_FSM 0x510
+/*
+ * Should be set when all raised causes handled, and cleared by HW on
+ * every new cause.
+ */
+#define MLXBF_I2C_SMBUS_SLAVE_READY 0x52c
+
+/* SMBus slave GW control bits offset in MLXBF_I2C_SMBUS_SLAVE_GW[31:19]. */
+#define MLXBF_I2C_SLAVE_BUSY_BIT BIT(30) /* Busy bit. */
+#define MLXBF_I2C_SLAVE_WRITE_BIT BIT(29) /* Control write enable. */
+
+#define MLXBF_I2C_SLAVE_ENABLE \
+ (MLXBF_I2C_SLAVE_BUSY_BIT | MLXBF_I2C_SLAVE_WRITE_BIT)
+
+#define MLXBF_I2C_SLAVE_WRITE_BYTES_SHIFT 22 /* Number of bytes to write. */
+#define MLXBF_I2C_SLAVE_SEND_PEC_SHIFT 21 /* Send PEC byte shift. */
+
+/* SMBus slave GW Data descriptor. */
+#define MLXBF_I2C_SLAVE_DATA_DESC_ADDR 0x480
+#define MLXBF_I2C_SLAVE_DATA_DESC_SIZE 0x80 /* Size in bytes. */
+
+/* SMbus slave configuration registers. */
+#define MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG 0x514
+#define MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT 16
+#define MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT 7
+#define MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK GENMASK(6, 0)
+
+#define MLXBF_I2C_SLAVE_ADDR_ENABLED(addr) \
+ ((addr) & (1 << MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT))
+
+/*
+ * Timeout is given in microsends. Note also that timeout handling is not
+ * exact.
+ */
+#define MLXBF_I2C_SMBUS_TIMEOUT (300 * 1000) /* 300ms */
+
+/* Encapsulates timing parameters. */
+struct mlxbf_i2c_timings {
+ u16 scl_high; /* Clock high period. */
+ u16 scl_low; /* Clock low period. */
+ u8 sda_rise; /* Data rise time. */
+ u8 sda_fall; /* Data fall time. */
+ u8 scl_rise; /* Clock rise time. */
+ u8 scl_fall; /* Clock fall time. */
+ u16 hold_start; /* Hold time after (REPEATED) START. */
+ u16 hold_data; /* Data hold time. */
+ u16 setup_start; /* REPEATED START condition setup time. */
+ u16 setup_stop; /* STOP condition setup time. */
+ u16 setup_data; /* Data setup time. */
+ u16 pad; /* Padding. */
+ u16 buf; /* Bus free time between STOP and START. */
+ u16 thigh_max; /* Thigh max. */
+ u32 timeout; /* Detect clock low timeout. */
+};
+
+enum {
+ MLXBF_I2C_F_READ = BIT(0),
+ MLXBF_I2C_F_WRITE = BIT(1),
+ MLXBF_I2C_F_NORESTART = BIT(3),
+ MLXBF_I2C_F_SMBUS_OPERATION = BIT(4),
+ MLXBF_I2C_F_SMBUS_BLOCK = BIT(5),
+ MLXBF_I2C_F_SMBUS_PEC = BIT(6),
+ MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7),
+};
+
+struct mlxbf_i2c_smbus_operation {
+ u32 flags;
+ u32 length; /* Buffer length in bytes. */
+ u8 *buffer;
+};
+
+#define MLXBF_I2C_SMBUS_OP_CNT_1 1
+#define MLXBF_I2C_SMBUS_OP_CNT_2 2
+#define MLXBF_I2C_SMBUS_OP_CNT_3 3
+#define MLXBF_I2C_SMBUS_MAX_OP_CNT MLXBF_I2C_SMBUS_OP_CNT_3
+
+struct mlxbf_i2c_smbus_request {
+ u8 slave;
+ u8 operation_cnt;
+ struct mlxbf_i2c_smbus_operation operation[MLXBF_I2C_SMBUS_MAX_OP_CNT];
+};
+
+struct mlxbf_i2c_resource {
+ void __iomem *io;
+ struct resource *params;
+ struct mutex *lock; /* Mutex to protect mlxbf_i2c_resource. */
+ u8 type;
+};
+
+/* List of chip resources that are being accessed by the driver. */
+enum {
+ MLXBF_I2C_SMBUS_RES,
+ MLXBF_I2C_MST_CAUSE_RES,
+ MLXBF_I2C_SLV_CAUSE_RES,
+ MLXBF_I2C_COALESCE_RES,
+ MLXBF_I2C_COREPLL_RES,
+ MLXBF_I2C_GPIO_RES,
+ MLXBF_I2C_END_RES,
+};
+
+/* Helper macro to define an I2C resource parameters. */
+#define MLXBF_I2C_RES_PARAMS(addr, size, str) \
+ { \
+ .start = (addr), \
+ .end = (addr) + (size) - 1, \
+ .name = (str) \
+ }
+
+static struct resource mlxbf_i2c_coalesce_tyu_params =
+ MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COALESCE_TYU_ADDR,
+ MLXBF_I2C_COALESCE_TYU_SIZE,
+ "COALESCE_MEM");
+static struct resource mlxbf_i2c_corepll_tyu_params =
+ MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_TYU_ADDR,
+ MLXBF_I2C_COREPLL_TYU_SIZE,
+ "COREPLL_MEM");
+static struct resource mlxbf_i2c_corepll_yu_params =
+ MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_YU_ADDR,
+ MLXBF_I2C_COREPLL_YU_SIZE,
+ "COREPLL_MEM");
+static struct resource mlxbf_i2c_gpio_tyu_params =
+ MLXBF_I2C_RES_PARAMS(MLXBF_I2C_GPIO_TYU_ADDR,
+ MLXBF_I2C_GPIO_TYU_SIZE,
+ "GPIO_MEM");
+
+static struct mutex mlxbf_i2c_coalesce_lock;
+static struct mutex mlxbf_i2c_corepll_lock;
+static struct mutex mlxbf_i2c_gpio_lock;
+
+/* Mellanox BlueField chip type. */
+enum mlxbf_i2c_chip_type {
+ MLXBF_I2C_CHIP_TYPE_1, /* Mellanox BlueField-1 chip. */
+ MLXBF_I2C_CHIP_TYPE_2, /* Mallanox BlueField-2 chip. */
+};
+
+struct mlxbf_i2c_chip_info {
+ enum mlxbf_i2c_chip_type type;
+ /* Chip shared resources that are being used by the I2C controller. */
+ struct mlxbf_i2c_resource *shared_res[MLXBF_I2C_SHARED_RES_MAX];
+
+ /* Callback to calculate the core PLL frequency. */
+ u64 (*calculate_freq)(struct mlxbf_i2c_resource *corepll_res);
+};
+
+struct mlxbf_i2c_priv {
+ const struct mlxbf_i2c_chip_info *chip;
+ struct i2c_adapter adap;
+ struct mlxbf_i2c_resource *smbus;
+ struct mlxbf_i2c_resource *mst_cause;
+ struct mlxbf_i2c_resource *slv_cause;
+ struct mlxbf_i2c_resource *coalesce;
+ u64 frequency; /* Core frequency in Hz. */
+ int bus; /* Physical bus identifier. */
+ int irq;
+ struct i2c_client *slave;
+};
+
+static struct mlxbf_i2c_resource mlxbf_i2c_coalesce_res[] = {
+ [MLXBF_I2C_CHIP_TYPE_1] = {
+ .params = &mlxbf_i2c_coalesce_tyu_params,
+ .lock = &mlxbf_i2c_coalesce_lock,
+ .type = MLXBF_I2C_COALESCE_RES
+ },
+ {}
+};
+
+static struct mlxbf_i2c_resource mlxbf_i2c_corepll_res[] = {
+ [MLXBF_I2C_CHIP_TYPE_1] = {
+ .params = &mlxbf_i2c_corepll_tyu_params,
+ .lock = &mlxbf_i2c_corepll_lock,
+ .type = MLXBF_I2C_COREPLL_RES
+ },
+ [MLXBF_I2C_CHIP_TYPE_2] = {
+ .params = &mlxbf_i2c_corepll_yu_params,
+ .lock = &mlxbf_i2c_corepll_lock,
+ .type = MLXBF_I2C_COREPLL_RES,
+ }
+};
+
+static struct mlxbf_i2c_resource mlxbf_i2c_gpio_res[] = {
+ [MLXBF_I2C_CHIP_TYPE_1] = {
+ .params = &mlxbf_i2c_gpio_tyu_params,
+ .lock = &mlxbf_i2c_gpio_lock,
+ .type = MLXBF_I2C_GPIO_RES
+ },
+ {}
+};
+
+static u8 mlxbf_i2c_bus_count;
+
+static struct mutex mlxbf_i2c_bus_lock;
+
+/* Polling frequency in microseconds. */
+#define MLXBF_I2C_POLL_FREQ_IN_USEC 200
+
+#define MLXBF_I2C_SHIFT_0 0
+#define MLXBF_I2C_SHIFT_8 8
+#define MLXBF_I2C_SHIFT_16 16
+#define MLXBF_I2C_SHIFT_24 24
+
+#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
+#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
+
+#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000
+
+/*
+ * Function to poll a set of bits at a specific address; it checks whether
+ * the bits are equal to zero when eq_zero is set to 'true', and not equal
+ * to zero when eq_zero is set to 'false'.
+ * Note that the timeout is given in microseconds.
+ */
+static u32 mlxbf_smbus_poll(void __iomem *io, u32 addr, u32 mask,
+ bool eq_zero, u32 timeout)
+{
+ u32 bits;
+
+ timeout = (timeout / MLXBF_I2C_POLL_FREQ_IN_USEC) + 1;
+
+ do {
+ bits = readl(io + addr) & mask;
+ if (eq_zero ? bits == 0 : bits != 0)
+ return eq_zero ? 1 : bits;
+ udelay(MLXBF_I2C_POLL_FREQ_IN_USEC);
+ } while (timeout-- != 0);
+
+ return 0;
+}
+
+/*
+ * SW must make sure that the SMBus Master GW is idle before starting
+ * a transaction. Accordingly, this function polls the Master FSM stop
+ * bit; it returns false when the bit is asserted, true if not.
+ */
+static bool mlxbf_smbus_master_wait_for_idle(struct mlxbf_i2c_priv *priv)
+{
+ u32 mask = MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK;
+ u32 addr = MLXBF_I2C_SMBUS_MASTER_FSM;
+ u32 timeout = MLXBF_I2C_SMBUS_TIMEOUT;
+
+ if (mlxbf_smbus_poll(priv->smbus->io, addr, mask, true, timeout))
+ return true;
+
+ return false;
+}
+
+static bool mlxbf_i2c_smbus_transaction_success(u32 master_status,
+ u32 cause_status)
+{
+ /*
+ * When transaction ended with STOP, all bytes were transmitted,
+ * and no NACK received, then the transaction ended successfully.
+ * On the other hand, when the GW is configured with the stop bit
+ * de-asserted then the SMBus expects the following GW configuration
+ * for transfer continuation.
+ */
+ if ((cause_status & MLXBF_I2C_CAUSE_WAIT_FOR_FW_DATA) ||
+ ((cause_status & MLXBF_I2C_CAUSE_TRANSACTION_ENDED) &&
+ (master_status & MLXBF_I2C_SMBUS_STATUS_BYTE_CNT_DONE) &&
+ !(master_status & MLXBF_I2C_SMBUS_STATUS_NACK_RCV)))
+ return true;
+
+ return false;
+}
+
+/*
+ * Poll SMBus master status and return transaction status,
+ * i.e. whether succeeded or failed. I2C and SMBus fault codes
+ * are returned as negative numbers from most calls, with zero
+ * or some positive number indicating a non-fault return.
+ */
+static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv)
+{
+ u32 master_status_bits;
+ u32 cause_status_bits;
+
+ /*
+ * GW busy bit is raised by the driver and cleared by the HW
+ * when the transaction is completed. The busy bit is a good
+ * indicator of transaction status. So poll the busy bit, and
+ * then read the cause and master status bits to determine if
+ * errors occurred during the transaction.
+ */
+ mlxbf_smbus_poll(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_GW,
+ MLXBF_I2C_MASTER_BUSY_BIT, true,
+ MLXBF_I2C_SMBUS_TIMEOUT);
+
+ /* Read cause status bits. */
+ cause_status_bits = readl(priv->mst_cause->io +
+ MLXBF_I2C_CAUSE_ARBITER);
+ cause_status_bits &= MLXBF_I2C_CAUSE_MASTER_ARBITER_BITS_MASK;
+
+ /*
+ * Parse both Cause and Master GW bits, then return transaction status.
+ */
+
+ master_status_bits = readl(priv->smbus->io +
+ MLXBF_I2C_SMBUS_MASTER_STATUS);
+ master_status_bits &= MLXBF_I2C_SMBUS_MASTER_STATUS_MASK;
+
+ if (mlxbf_i2c_smbus_transaction_success(master_status_bits,
+ cause_status_bits))
+ return 0;
+
+ /*
+ * In case of timeout on GW busy, the ISR will clear busy bit but
+ * transaction ended bits cause will not be set so the transaction
+ * fails. Then, we must check Master GW status bits.
+ */
+ if ((master_status_bits & MLXBF_I2C_SMBUS_MASTER_STATUS_ERROR) &&
+ (cause_status_bits & (MLXBF_I2C_CAUSE_TRANSACTION_ENDED |
+ MLXBF_I2C_CAUSE_M_GW_BUSY_FALL)))
+ return -EIO;
+
+ if (cause_status_bits & MLXBF_I2C_CAUSE_MASTER_STATUS_ERROR)
+ return -EAGAIN;
+
+ return -ETIMEDOUT;
+}
+
+static void mlxbf_i2c_smbus_write_data(struct mlxbf_i2c_priv *priv,
+ const u8 *data, u8 length, u32 addr)
+{
+ u8 offset, aligned_length;
+ u32 data32;
+
+ aligned_length = round_up(length, 4);
+
+ /*
+ * Copy data bytes from 4-byte aligned source buffer.
+ * Data copied to the Master GW Data Descriptor MUST be shifted
+ * left so the data starts at the MSB of the descriptor registers
+ * as required by the underlying hardware. Enable byte swapping
+ * when writing data bytes to the 32 * 32-bit HW Data registers
+ * a.k.a Master GW Data Descriptor.
+ */
+ for (offset = 0; offset < aligned_length; offset += sizeof(u32)) {
+ data32 = *((u32 *)(data + offset));
+ iowrite32be(data32, priv->smbus->io + addr + offset);
+ }
+}
+
+static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv,
+ u8 *data, u8 length, u32 addr)
+{
+ u32 data32, mask;
+ u8 byte, offset;
+
+ mask = sizeof(u32) - 1;
+
+ /*
+ * Data bytes in the Master GW Data Descriptor are shifted left
+ * so the data starts at the MSB of the descriptor registers as
+ * set by the underlying hardware. Enable byte swapping while
+ * reading data bytes from the 32 * 32-bit HW Data registers
+ * a.k.a Master GW Data Descriptor.
+ */
+
+ for (offset = 0; offset < (length & ~mask); offset += sizeof(u32)) {
+ data32 = ioread32be(priv->smbus->io + addr + offset);
+ *((u32 *)(data + offset)) = data32;
+ }
+
+ if (!(length & mask))
+ return;
+
+ data32 = ioread32be(priv->smbus->io + addr + offset);
+
+ for (byte = 0; byte < (length & mask); byte++) {
+ data[offset + byte] = data32 & GENMASK(7, 0);
+ data32 = ror32(data32, MLXBF_I2C_SHIFT_8);
+ }
+}
+
+static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
+ u8 len, u8 block_en, u8 pec_en, bool read)
+{
+ u32 command;
+
+ /* Set Master GW control word. */
+ if (read) {
+ command = MLXBF_I2C_MASTER_ENABLE_READ;
+ command |= rol32(len, MLXBF_I2C_MASTER_READ_SHIFT);
+ } else {
+ command = MLXBF_I2C_MASTER_ENABLE_WRITE;
+ command |= rol32(len, MLXBF_I2C_MASTER_WRITE_SHIFT);
+ }
+ command |= rol32(slave, MLXBF_I2C_MASTER_SLV_ADDR_SHIFT);
+ command |= rol32(block_en, MLXBF_I2C_MASTER_PARSE_EXP_SHIFT);
+ command |= rol32(pec_en, MLXBF_I2C_MASTER_SEND_PEC_SHIFT);
+
+ /* Clear status bits. */
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
+ /* Set the cause data. */
+ writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
+ /* Zero PEC byte. */
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
+ /* Zero byte count. */
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_RS_BYTES);
+
+ /* GW activation. */
+ writel(command, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_GW);
+
+ /*
+ * Poll master status and check status bits. An ACK is sent when
+ * completing writing data to the bus (Master 'byte_count_done' bit
+ * is set to 1).
+ */
+ return mlxbf_i2c_smbus_check_status(priv);
+}
+
+static int
+mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
+ struct mlxbf_i2c_smbus_request *request)
+{
+ u8 data_desc[MLXBF_I2C_MASTER_DATA_DESC_SIZE] = { 0 };
+ u8 op_idx, data_idx, data_len, write_len, read_len;
+ struct mlxbf_i2c_smbus_operation *operation;
+ u8 read_en, write_en, block_en, pec_en;
+ u8 slave, flags, addr;
+ u8 *read_buf;
+ int ret = 0;
+
+ if (request->operation_cnt > MLXBF_I2C_SMBUS_MAX_OP_CNT)
+ return -EINVAL;
+
+ read_buf = NULL;
+ data_idx = 0;
+ read_en = 0;
+ write_en = 0;
+ write_len = 0;
+ read_len = 0;
+ block_en = 0;
+ pec_en = 0;
+ slave = request->slave & GENMASK(6, 0);
+ addr = slave << 1;
+
+ /* First of all, check whether the HW is idle. */
+ if (WARN_ON(!mlxbf_smbus_master_wait_for_idle(priv)))
+ return -EBUSY;
+
+ /* Set first byte. */
+ data_desc[data_idx++] = addr;
+
+ for (op_idx = 0; op_idx < request->operation_cnt; op_idx++) {
+ operation = &request->operation[op_idx];
+ flags = operation->flags;
+
+ /*
+ * Note that read and write operations might be handled by a
+ * single command. If the MLXBF_I2C_F_SMBUS_OPERATION is set
+ * then write command byte and set the optional SMBus specific
+ * bits such as block_en and pec_en. These bits MUST be
+ * submitted by the first operation only.
+ */
+ if (op_idx == 0 && flags & MLXBF_I2C_F_SMBUS_OPERATION) {
+ block_en = flags & MLXBF_I2C_F_SMBUS_BLOCK;
+ pec_en = flags & MLXBF_I2C_F_SMBUS_PEC;
+ }
+
+ if (flags & MLXBF_I2C_F_WRITE) {
+ write_en = 1;
+ write_len += operation->length;
+ memcpy(data_desc + data_idx,
+ operation->buffer, operation->length);
+ data_idx += operation->length;
+ }
+ /*
+ * We assume that read operations are performed only once per
+ * SMBus transaction. *TBD* protect this statement so it won't
+ * be executed twice? or return an error if we try to read more
+ * than once?
+ */
+ if (flags & MLXBF_I2C_F_READ) {
+ read_en = 1;
+ /* Subtract 1 as required by HW. */
+ read_len = operation->length - 1;
+ read_buf = operation->buffer;
+ }
+ }
+
+ /* Set Master GW data descriptor. */
+ data_len = write_len + 1; /* Add one byte of the slave address. */
+ /*
+ * Note that data_len cannot be 0. Indeed, the slave address byte
+ * must be written to the data registers.
+ */
+ mlxbf_i2c_smbus_write_data(priv, (const u8 *)data_desc, data_len,
+ MLXBF_I2C_MASTER_DATA_DESC_ADDR);
+
+ if (write_en) {
+ ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en,
+ pec_en, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (read_en) {
+ /* Write slave address to Master GW data descriptor. */
+ mlxbf_i2c_smbus_write_data(priv, (const u8 *)&addr, 1,
+ MLXBF_I2C_MASTER_DATA_DESC_ADDR);
+ ret = mlxbf_i2c_smbus_enable(priv, slave, read_len, block_en,
+ pec_en, 1);
+ if (!ret) {
+ /* Get Master GW data descriptor. */
+ mlxbf_i2c_smbus_read_data(priv, data_desc, read_len + 1,
+ MLXBF_I2C_MASTER_DATA_DESC_ADDR);
+
+ /* Get data from Master GW data descriptor. */
+ memcpy(read_buf, data_desc, read_len + 1);
+ }
+
+ /*
+ * After a read operation the SMBus FSM ps (present state)
+ * needs to be 'manually' reset. This should be removed in
+ * next tag integration.
+ */
+ writel(MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK,
+ priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_FSM);
+ }
+
+ return ret;
+}
+
+/* I2C SMBus protocols. */
+
+static void
+mlxbf_i2c_smbus_quick_command(struct mlxbf_i2c_smbus_request *request,
+ u8 read)
+{
+ request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_1;
+
+ request->operation[0].length = 0;
+ request->operation[0].flags = MLXBF_I2C_F_WRITE;
+ request->operation[0].flags |= read ? MLXBF_I2C_F_READ : 0;
+}
+
+static void mlxbf_i2c_smbus_byte_func(struct mlxbf_i2c_smbus_request *request,
+ u8 *data, bool read, bool pec_check)
+{
+ request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_1;
+
+ request->operation[0].length = 1;
+ request->operation[0].length += pec_check;
+
+ request->operation[0].flags = MLXBF_I2C_F_SMBUS_OPERATION;
+ request->operation[0].flags |= read ?
+ MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE;
+ request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0;
+
+ request->operation[0].buffer = data;
+}
+
+static void
+mlxbf_i2c_smbus_data_byte_func(struct mlxbf_i2c_smbus_request *request,
+ u8 *command, u8 *data, bool read, bool pec_check)
+{
+ request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2;
+
+ request->operation[0].length = 1;
+ request->operation[0].flags =
+ MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE;
+ request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0;
+ request->operation[0].buffer = command;
+
+ request->operation[1].length = 1;
+ request->operation[1].length += pec_check;
+ request->operation[1].flags = read ?
+ MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE;
+ request->operation[1].buffer = data;
+}
+
+static void
+mlxbf_i2c_smbus_data_word_func(struct mlxbf_i2c_smbus_request *request,
+ u8 *command, u8 *data, bool read, bool pec_check)
+{
+ request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2;
+
+ request->operation[0].length = 1;
+ request->operation[0].flags =
+ MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE;
+ request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0;
+ request->operation[0].buffer = command;
+
+ request->operation[1].length = 2;
+ request->operation[1].length += pec_check;
+ request->operation[1].flags = read ?
+ MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE;
+ request->operation[1].buffer = data;
+}
+
+static void
+mlxbf_i2c_smbus_i2c_block_func(struct mlxbf_i2c_smbus_request *request,
+ u8 *command, u8 *data, u8 *data_len, bool read,
+ bool pec_check)
+{
+ request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2;
+
+ request->operation[0].length = 1;
+ request->operation[0].flags =
+ MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE;
+ request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0;
+ request->operation[0].buffer = command;
+
+ /*
+ * As specified in the standard, the max number of bytes to read/write
+ * per block operation is 32 bytes. In Golan code, the controller can
+ * read up to 128 bytes and write up to 127 bytes.
+ */
+ request->operation[1].length =
+ (*data_len + pec_check > I2C_SMBUS_BLOCK_MAX) ?
+ I2C_SMBUS_BLOCK_MAX : *data_len + pec_check;
+ request->operation[1].flags = read ?
+ MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE;
+ /*
+ * Skip the first data byte, which corresponds to the number of bytes
+ * to read/write.
+ */
+ request->operation[1].buffer = data + 1;
+
+ *data_len = request->operation[1].length;
+
+ /* Set the number of byte to read. This will be used by userspace. */
+ if (read)
+ data[0] = *data_len;
+}
+
+static void mlxbf_i2c_smbus_block_func(struct mlxbf_i2c_smbus_request *request,
+ u8 *command, u8 *data, u8 *data_len,
+ bool read, bool pec_check)
+{
+ request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_2;
+
+ request->operation[0].length = 1;
+ request->operation[0].flags =
+ MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE;
+ request->operation[0].flags |= MLXBF_I2C_F_SMBUS_BLOCK;
+ request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0;
+ request->operation[0].buffer = command;
+
+ request->operation[1].length =
+ (*data_len + pec_check > I2C_SMBUS_BLOCK_MAX) ?
+ I2C_SMBUS_BLOCK_MAX : *data_len + pec_check;
+ request->operation[1].flags = read ?
+ MLXBF_I2C_F_READ : MLXBF_I2C_F_WRITE;
+ request->operation[1].buffer = data + 1;
+
+ *data_len = request->operation[1].length;
+
+ /* Set the number of bytes to read. This will be used by userspace. */
+ if (read)
+ data[0] = *data_len;
+}
+
+static void
+mlxbf_i2c_smbus_process_call_func(struct mlxbf_i2c_smbus_request *request,
+ u8 *command, u8 *data, bool pec_check)
+{
+ request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_3;
+
+ request->operation[0].length = 1;
+ request->operation[0].flags =
+ MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE;
+ request->operation[0].flags |= MLXBF_I2C_F_SMBUS_BLOCK;
+ request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0;
+ request->operation[0].buffer = command;
+
+ request->operation[1].length = 2;
+ request->operation[1].flags = MLXBF_I2C_F_WRITE;
+ request->operation[1].buffer = data;
+
+ request->operation[2].length = 3;
+ request->operation[2].flags = MLXBF_I2C_F_READ;
+ request->operation[2].buffer = data;
+}
+
+static void
+mlxbf_i2c_smbus_blk_process_call_func(struct mlxbf_i2c_smbus_request *request,
+ u8 *command, u8 *data, u8 *data_len,
+ bool pec_check)
+{
+ u32 length;
+
+ request->operation_cnt = MLXBF_I2C_SMBUS_OP_CNT_3;
+
+ request->operation[0].length = 1;
+ request->operation[0].flags =
+ MLXBF_I2C_F_SMBUS_OPERATION | MLXBF_I2C_F_WRITE;
+ request->operation[0].flags |= MLXBF_I2C_F_SMBUS_BLOCK;
+ request->operation[0].flags |= (pec_check) ? MLXBF_I2C_F_SMBUS_PEC : 0;
+ request->operation[0].buffer = command;
+
+ length = (*data_len + pec_check > I2C_SMBUS_BLOCK_MAX) ?
+ I2C_SMBUS_BLOCK_MAX : *data_len + pec_check;
+
+ request->operation[1].length = length - pec_check;
+ request->operation[1].flags = MLXBF_I2C_F_WRITE;
+ request->operation[1].buffer = data;
+
+ request->operation[2].length = length;
+ request->operation[2].flags = MLXBF_I2C_F_READ;
+ request->operation[2].buffer = data;
+
+ *data_len = length; /* including PEC byte. */
+}
+
+/* Initialization functions. */
+
+static bool mlxbf_i2c_has_chip_type(struct mlxbf_i2c_priv *priv, u8 type)
+{
+ return priv->chip->type == type;
+}
+
+static struct mlxbf_i2c_resource *
+mlxbf_i2c_get_shared_resource(struct mlxbf_i2c_priv *priv, u8 type)
+{
+ const struct mlxbf_i2c_chip_info *chip = priv->chip;
+ struct mlxbf_i2c_resource *res;
+ u8 res_idx = 0;
+
+ for (res_idx = 0; res_idx < MLXBF_I2C_SHARED_RES_MAX; res_idx++) {
+ res = chip->shared_res[res_idx];
+ if (res && res->type == type)
+ return res;
+ }
+
+ return NULL;
+}
+
+static int mlxbf_i2c_init_resource(struct platform_device *pdev,
+ struct mlxbf_i2c_resource **res,
+ u8 type)
+{
+ struct mlxbf_i2c_resource *tmp_res;
+ struct device *dev = &pdev->dev;
+
+ if (!res || *res || type >= MLXBF_I2C_END_RES)
+ return -EINVAL;
+
+ tmp_res = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource),
+ GFP_KERNEL);
+ if (!tmp_res)
+ return -ENOMEM;
+
+ tmp_res->params = platform_get_resource(pdev, IORESOURCE_MEM, type);
+ if (!tmp_res->params) {
+ devm_kfree(dev, tmp_res);
+ return -EIO;
+ }
+
+ tmp_res->io = devm_ioremap_resource(dev, tmp_res->params);
+ if (IS_ERR(tmp_res->io)) {
+ devm_kfree(dev, tmp_res);
+ return PTR_ERR(tmp_res->io);
+ }
+
+ tmp_res->type = type;
+
+ *res = tmp_res;
+
+ return 0;
+}
+
+static u32 mlxbf_i2c_get_ticks(struct mlxbf_i2c_priv *priv, u64 nanoseconds,
+ bool minimum)
+{
+ u64 frequency;
+ u32 ticks;
+
+ /*
+ * Compute ticks as follow:
+ *
+ * Ticks
+ * Time = --------- x 10^9 => Ticks = Time x Frequency x 10^-9
+ * Frequency
+ */
+ frequency = priv->frequency;
+ ticks = (nanoseconds * frequency) / MLXBF_I2C_FREQUENCY_1GHZ;
+ /*
+ * The number of ticks is rounded down and if minimum is equal to 1
+ * then add one tick.
+ */
+ if (minimum)
+ ticks++;
+
+ return ticks;
+}
+
+static u32 mlxbf_i2c_set_timer(struct mlxbf_i2c_priv *priv, u64 nsec, bool opt,
+ u32 mask, u8 shift)
+{
+ u32 val = (mlxbf_i2c_get_ticks(priv, nsec, opt) & mask) << shift;
+
+ return val;
+}
+
+static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
+ const struct mlxbf_i2c_timings *timings)
+{
+ u32 timer;
+
+ timer = mlxbf_i2c_set_timer(priv, timings->scl_high,
+ false, MLXBF_I2C_MASK_16,
+ MLXBF_I2C_SHIFT_0);
+ timer |= mlxbf_i2c_set_timer(priv, timings->scl_low,
+ false, MLXBF_I2C_MASK_16,
+ MLXBF_I2C_SHIFT_16);
+ writel(timer, priv->smbus->io +
+ MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH);
+
+ timer = mlxbf_i2c_set_timer(priv, timings->sda_rise, false,
+ MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_0);
+ timer |= mlxbf_i2c_set_timer(priv, timings->sda_fall, false,
+ MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_8);
+ timer |= mlxbf_i2c_set_timer(priv, timings->scl_rise, false,
+ MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_16);
+ timer |= mlxbf_i2c_set_timer(priv, timings->scl_fall, false,
+ MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_24);
+ writel(timer, priv->smbus->io +
+ MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE);
+
+ timer = mlxbf_i2c_set_timer(priv, timings->hold_start, true,
+ MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
+ timer |= mlxbf_i2c_set_timer(priv, timings->hold_data, true,
+ MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
+ writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_THOLD);
+
+ timer = mlxbf_i2c_set_timer(priv, timings->setup_start, true,
+ MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
+ timer |= mlxbf_i2c_set_timer(priv, timings->setup_stop, true,
+ MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
+ writel(timer, priv->smbus->io +
+ MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP);
+
+ timer = mlxbf_i2c_set_timer(priv, timings->setup_data, true,
+ MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
+ writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA);
+
+ timer = mlxbf_i2c_set_timer(priv, timings->buf, false,
+ MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
+ timer |= mlxbf_i2c_set_timer(priv, timings->thigh_max, false,
+ MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
+ writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF);
+
+ timer = timings->timeout;
+ writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT);
+}
+
+enum mlxbf_i2c_timings_config {
+ MLXBF_I2C_TIMING_CONFIG_100KHZ,
+ MLXBF_I2C_TIMING_CONFIG_400KHZ,
+ MLXBF_I2C_TIMING_CONFIG_1000KHZ,
+};
+
+/*
+ * Note that the mlxbf_i2c_timings->timeout value is not related to the
+ * bus frequency, it is impacted by the time it takes the driver to
+ * complete data transmission before transaction abort.
+ */
+static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = {
+ [MLXBF_I2C_TIMING_CONFIG_100KHZ] = {
+ .scl_high = 4810,
+ .scl_low = 5000,
+ .hold_start = 4000,
+ .setup_start = 4800,
+ .setup_stop = 4000,
+ .setup_data = 250,
+ .sda_rise = 50,
+ .sda_fall = 50,
+ .scl_rise = 50,
+ .scl_fall = 50,
+ .hold_data = 300,
+ .buf = 20000,
+ .thigh_max = 5000,
+ .timeout = 106500
+ },
+ [MLXBF_I2C_TIMING_CONFIG_400KHZ] = {
+ .scl_high = 1011,
+ .scl_low = 1300,
+ .hold_start = 600,
+ .setup_start = 700,
+ .setup_stop = 600,
+ .setup_data = 100,
+ .sda_rise = 50,
+ .sda_fall = 50,
+ .scl_rise = 50,
+ .scl_fall = 50,
+ .hold_data = 300,
+ .buf = 20000,
+ .thigh_max = 5000,
+ .timeout = 106500
+ },
+ [MLXBF_I2C_TIMING_CONFIG_1000KHZ] = {
+ .scl_high = 600,
+ .scl_low = 1300,
+ .hold_start = 600,
+ .setup_start = 600,
+ .setup_stop = 600,
+ .setup_data = 100,
+ .sda_rise = 50,
+ .sda_fall = 50,
+ .scl_rise = 50,
+ .scl_fall = 50,
+ .hold_data = 300,
+ .buf = 20000,
+ .thigh_max = 5000,
+ .timeout = 106500
+ }
+};
+
+static int mlxbf_i2c_init_timings(struct platform_device *pdev,
+ struct mlxbf_i2c_priv *priv)
+{
+ enum mlxbf_i2c_timings_config config_idx;
+ struct device *dev = &pdev->dev;
+ u32 config_khz;
+
+ int ret;
+
+ ret = device_property_read_u32(dev, "clock-frequency", &config_khz);
+ if (ret < 0)
+ config_khz = MLXBF_I2C_TIMING_100KHZ;
+
+ switch (config_khz) {
+ default:
+ /* Default settings is 100 KHz. */
+ pr_warn("Illegal value %d: defaulting to 100 KHz\n",
+ config_khz);
+ fallthrough;
+ case MLXBF_I2C_TIMING_100KHZ:
+ config_idx = MLXBF_I2C_TIMING_CONFIG_100KHZ;
+ break;
+
+ case MLXBF_I2C_TIMING_400KHZ:
+ config_idx = MLXBF_I2C_TIMING_CONFIG_400KHZ;
+ break;
+
+ case MLXBF_I2C_TIMING_1000KHZ:
+ config_idx = MLXBF_I2C_TIMING_CONFIG_1000KHZ;
+ break;
+ }
+
+ mlxbf_i2c_set_timings(priv, &mlxbf_i2c_timings[config_idx]);
+
+ return 0;
+}
+
+static int mlxbf_i2c_get_gpio(struct platform_device *pdev,
+ struct mlxbf_i2c_priv *priv)
+{
+ struct mlxbf_i2c_resource *gpio_res;
+ struct device *dev = &pdev->dev;
+ struct resource *params;
+ resource_size_t size;
+
+ gpio_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_GPIO_RES);
+ if (!gpio_res)
+ return -EPERM;
+
+ /*
+ * The GPIO region in TYU space is shared among I2C busses.
+ * This function MUST be serialized to avoid racing when
+ * claiming the memory region and/or setting up the GPIO.
+ */
+ lockdep_assert_held(gpio_res->lock);
+
+ /* Check whether the memory map exist. */
+ if (gpio_res->io)
+ return 0;
+
+ params = gpio_res->params;
+ size = resource_size(params);
+
+ if (!devm_request_mem_region(dev, params->start, size, params->name))
+ return -EFAULT;
+
+ gpio_res->io = devm_ioremap(dev, params->start, size);
+ if (IS_ERR(gpio_res->io)) {
+ devm_release_mem_region(dev, params->start, size);
+ return PTR_ERR(gpio_res->io);
+ }
+
+ return 0;
+}
+
+static int mlxbf_i2c_release_gpio(struct platform_device *pdev,
+ struct mlxbf_i2c_priv *priv)
+{
+ struct mlxbf_i2c_resource *gpio_res;
+ struct device *dev = &pdev->dev;
+ struct resource *params;
+
+ gpio_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_GPIO_RES);
+ if (!gpio_res)
+ return 0;
+
+ mutex_lock(gpio_res->lock);
+
+ if (gpio_res->io) {
+ /* Release the GPIO resource. */
+ params = gpio_res->params;
+ devm_iounmap(dev, gpio_res->io);
+ devm_release_mem_region(dev, params->start,
+ resource_size(params));
+ }
+
+ mutex_unlock(gpio_res->lock);
+
+ return 0;
+}
+
+static int mlxbf_i2c_get_corepll(struct platform_device *pdev,
+ struct mlxbf_i2c_priv *priv)
+{
+ struct mlxbf_i2c_resource *corepll_res;
+ struct device *dev = &pdev->dev;
+ struct resource *params;
+ resource_size_t size;
+
+ corepll_res = mlxbf_i2c_get_shared_resource(priv,
+ MLXBF_I2C_COREPLL_RES);
+ if (!corepll_res)
+ return -EPERM;
+
+ /*
+ * The COREPLL region in TYU space is shared among I2C busses.
+ * This function MUST be serialized to avoid racing when
+ * claiming the memory region.
+ */
+ lockdep_assert_held(corepll_res->lock);
+
+ /* Check whether the memory map exist. */
+ if (corepll_res->io)
+ return 0;
+
+ params = corepll_res->params;
+ size = resource_size(params);
+
+ if (!devm_request_mem_region(dev, params->start, size, params->name))
+ return -EFAULT;
+
+ corepll_res->io = devm_ioremap(dev, params->start, size);
+ if (IS_ERR(corepll_res->io)) {
+ devm_release_mem_region(dev, params->start, size);
+ return PTR_ERR(corepll_res->io);
+ }
+
+ return 0;
+}
+
+static int mlxbf_i2c_release_corepll(struct platform_device *pdev,
+ struct mlxbf_i2c_priv *priv)
+{
+ struct mlxbf_i2c_resource *corepll_res;
+ struct device *dev = &pdev->dev;
+ struct resource *params;
+
+ corepll_res = mlxbf_i2c_get_shared_resource(priv,
+ MLXBF_I2C_COREPLL_RES);
+
+ mutex_lock(corepll_res->lock);
+
+ if (corepll_res->io) {
+ /* Release the CorePLL resource. */
+ params = corepll_res->params;
+ devm_iounmap(dev, corepll_res->io);
+ devm_release_mem_region(dev, params->start,
+ resource_size(params));
+ }
+
+ mutex_unlock(corepll_res->lock);
+
+ return 0;
+}
+
+static int mlxbf_i2c_init_master(struct platform_device *pdev,
+ struct mlxbf_i2c_priv *priv)
+{
+ struct mlxbf_i2c_resource *gpio_res;
+ struct device *dev = &pdev->dev;
+ u32 config_reg;
+ int ret;
+
+ /* This configuration is only needed for BlueField 1. */
+ if (!mlxbf_i2c_has_chip_type(priv, MLXBF_I2C_CHIP_TYPE_1))
+ return 0;
+
+ gpio_res = mlxbf_i2c_get_shared_resource(priv, MLXBF_I2C_GPIO_RES);
+ if (!gpio_res)
+ return -EPERM;
+
+ /*
+ * The GPIO region in TYU space is shared among I2C busses.
+ * This function MUST be serialized to avoid racing when
+ * claiming the memory region and/or setting up the GPIO.
+ */
+
+ mutex_lock(gpio_res->lock);
+
+ ret = mlxbf_i2c_get_gpio(pdev, priv);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get gpio resource");
+ mutex_unlock(gpio_res->lock);
+ return ret;
+ }
+
+ /*
+ * TYU - Configuration for GPIO pins. Those pins must be asserted in
+ * MLXBF_I2C_GPIO_0_FUNC_EN_0, i.e. GPIO 0 is controlled by HW, and must
+ * be reset in MLXBF_I2C_GPIO_0_FORCE_OE_EN, i.e. GPIO_OE will be driven
+ * instead of HW_OE.
+ * For now, we do not reset the GPIO state when the driver is removed.
+ * First, it is not necessary to disable the bus since we are using
+ * the same busses. Then, some busses might be shared among Linux and
+ * platform firmware; disabling the bus might compromise the system
+ * functionality.
+ */
+ config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0);
+ config_reg = MLXBF_I2C_GPIO_SMBUS_GW_ASSERT_PINS(priv->bus,
+ config_reg);
+ writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0);
+
+ config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN);
+ config_reg = MLXBF_I2C_GPIO_SMBUS_GW_RESET_PINS(priv->bus,
+ config_reg);
+ writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN);
+
+ mutex_unlock(gpio_res->lock);
+
+ return 0;
+}
+
+static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
+{
+ u64 core_frequency, pad_frequency;
+ u8 core_od, core_r;
+ u32 corepll_val;
+ u16 core_f;
+
+ pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
+
+ corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
+
+ /* Get Core PLL configuration bits. */
+ core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
+ MLXBF_I2C_COREPLL_CORE_F_TYU_MASK;
+ core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) &
+ MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK;
+ core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) &
+ MLXBF_I2C_COREPLL_CORE_R_TYU_MASK;
+
+ /*
+ * Compute PLL output frequency as follow:
+ *
+ * CORE_F + 1
+ * PLL_OUT_FREQ = PLL_IN_FREQ * ----------------------------
+ * (CORE_R + 1) * (CORE_OD + 1)
+ *
+ * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
+ * and PadFrequency, respectively.
+ */
+ core_frequency = pad_frequency * (++core_f);
+ core_frequency /= (++core_r) * (++core_od);
+
+ return core_frequency;
+}
+
+static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
+{
+ u32 corepll_reg1_val, corepll_reg2_val;
+ u64 corepll_frequency, pad_frequency;
+ u8 core_od, core_r;
+ u32 core_f;
+
+ pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
+
+ corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
+ corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
+
+ /* Get Core PLL configuration bits */
+ core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
+ MLXBF_I2C_COREPLL_CORE_F_YU_MASK;
+ core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) &
+ MLXBF_I2C_COREPLL_CORE_R_YU_MASK;
+ core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) &
+ MLXBF_I2C_COREPLL_CORE_OD_YU_MASK;
+
+ /*
+ * Compute PLL output frequency as follow:
+ *
+ * CORE_F / 16384
+ * PLL_OUT_FREQ = PLL_IN_FREQ * ----------------------------
+ * (CORE_R + 1) * (CORE_OD + 1)
+ *
+ * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
+ * and PadFrequency, respectively.
+ */
+ corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST;
+ corepll_frequency /= (++core_r) * (++core_od);
+
+ return corepll_frequency;
+}
+
+static int mlxbf_i2c_calculate_corepll_freq(struct platform_device *pdev,
+ struct mlxbf_i2c_priv *priv)
+{
+ const struct mlxbf_i2c_chip_info *chip = priv->chip;
+ struct mlxbf_i2c_resource *corepll_res;
+ struct device *dev = &pdev->dev;
+ u64 *freq = &priv->frequency;
+ int ret;
+
+ corepll_res = mlxbf_i2c_get_shared_resource(priv,
+ MLXBF_I2C_COREPLL_RES);
+ if (!corepll_res)
+ return -EPERM;
+
+ /*
+ * First, check whether the TYU core Clock frequency is set.
+ * The TYU core frequency is the same for all I2C busses; when
+ * the first device gets probed the frequency is determined and
+ * stored into a globally visible variable. So, first of all,
+ * check whether the frequency is already set. Here, we assume
+ * that the frequency is expected to be greater than 0.
+ */
+ mutex_lock(corepll_res->lock);
+ if (!mlxbf_i2c_corepll_frequency) {
+ if (!chip->calculate_freq) {
+ mutex_unlock(corepll_res->lock);
+ return -EPERM;
+ }
+
+ ret = mlxbf_i2c_get_corepll(pdev, priv);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get corePLL resource");
+ mutex_unlock(corepll_res->lock);
+ return ret;
+ }
+
+ mlxbf_i2c_corepll_frequency = chip->calculate_freq(corepll_res);
+ }
+ mutex_unlock(corepll_res->lock);
+
+ *freq = mlxbf_i2c_corepll_frequency;
+
+ return 0;
+}
+
+static int mlxbf_slave_enable(struct mlxbf_i2c_priv *priv, u8 addr)
+{
+ u32 slave_reg, slave_reg_tmp, slave_reg_avail, slave_addr_mask;
+ u8 reg, reg_cnt, byte, addr_tmp, reg_avail, byte_avail;
+ bool avail, disabled;
+
+ disabled = false;
+ avail = false;
+
+ if (!priv)
+ return -EPERM;
+
+ reg_cnt = MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT >> 2;
+ slave_addr_mask = MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK;
+
+ /*
+ * Read the slave registers. There are 4 * 32-bit slave registers.
+ * Each slave register can hold up to 4 * 8-bit slave configuration
+ * (7-bit address, 1 status bit (1 if enabled, 0 if not)).
+ */
+ for (reg = 0; reg < reg_cnt; reg++) {
+ slave_reg = readl(priv->smbus->io +
+ MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4);
+ /*
+ * Each register holds 4 slave addresses. So, we have to keep
+ * the byte order consistent with the value read in order to
+ * update the register correctly, if needed.
+ */
+ slave_reg_tmp = slave_reg;
+ for (byte = 0; byte < 4; byte++) {
+ addr_tmp = slave_reg_tmp & GENMASK(7, 0);
+
+ /*
+ * Mark the first available slave address slot, i.e. its
+ * enabled bit should be unset. This slot might be used
+ * later on to register our slave.
+ */
+ if (!avail && !MLXBF_I2C_SLAVE_ADDR_ENABLED(addr_tmp)) {
+ avail = true;
+ reg_avail = reg;
+ byte_avail = byte;
+ slave_reg_avail = slave_reg;
+ }
+
+ /*
+ * Parse slave address bytes and check whether the
+ * slave address already exists and it's enabled,
+ * i.e. most significant bit is set.
+ */
+ if ((addr_tmp & slave_addr_mask) == addr) {
+ if (MLXBF_I2C_SLAVE_ADDR_ENABLED(addr_tmp))
+ return 0;
+ disabled = true;
+ break;
+ }
+
+ /* Parse next byte. */
+ slave_reg_tmp >>= 8;
+ }
+
+ /* Exit the loop if the slave address is found. */
+ if (disabled)
+ break;
+ }
+
+ if (!avail && !disabled)
+ return -EINVAL; /* No room for a new slave address. */
+
+ if (avail && !disabled) {
+ reg = reg_avail;
+ byte = byte_avail;
+ /* Set the slave address. */
+ slave_reg_avail &= ~(slave_addr_mask << (byte * 8));
+ slave_reg_avail |= addr << (byte * 8);
+ slave_reg = slave_reg_avail;
+ }
+
+ /* Enable the slave address and update the register. */
+ slave_reg |= (1 << MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT) << (byte * 8);
+ writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
+ reg * 0x4);
+
+ return 0;
+}
+
+static int mlxbf_slave_disable(struct mlxbf_i2c_priv *priv)
+{
+ u32 slave_reg, slave_reg_tmp, slave_addr_mask;
+ u8 addr, addr_tmp, reg, reg_cnt, slave_byte;
+ struct i2c_client *client = priv->slave;
+ bool exist;
+
+ exist = false;
+
+ addr = client->addr;
+ reg_cnt = MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT >> 2;
+ slave_addr_mask = MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK;
+
+ /*
+ * Read the slave registers. There are 4 * 32-bit slave registers.
+ * Each slave register can hold up to 4 * 8-bit slave configuration
+ * (7-bit address, 1 status bit (1 if enabled, 0 if not)).
+ */
+ for (reg = 0; reg < reg_cnt; reg++) {
+ slave_reg = readl(priv->smbus->io +
+ MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4);
+
+ /* Check whether the address slots are empty. */
+ if (slave_reg == 0)
+ continue;
+
+ /*
+ * Each register holds 4 slave addresses. So, we have to keep
+ * the byte order consistent with the value read in order to
+ * update the register correctly, if needed.
+ */
+ slave_reg_tmp = slave_reg;
+ slave_byte = 0;
+ while (slave_reg_tmp != 0) {
+ addr_tmp = slave_reg_tmp & slave_addr_mask;
+ /*
+ * Parse slave address bytes and check whether the
+ * slave address already exists.
+ */
+ if (addr_tmp == addr) {
+ exist = true;
+ break;
+ }
+
+ /* Parse next byte. */
+ slave_reg_tmp >>= 8;
+ slave_byte += 1;
+ }
+
+ /* Exit the loop if the slave address is found. */
+ if (exist)
+ break;
+ }
+
+ if (!exist)
+ return 0; /* Slave is not registered, nothing to do. */
+
+ /* Cleanup the slave address slot. */
+ slave_reg &= ~(GENMASK(7, 0) << (slave_byte * 8));
+ writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
+ reg * 0x4);
+
+ return 0;
+}
+
+static int mlxbf_i2c_init_coalesce(struct platform_device *pdev,
+ struct mlxbf_i2c_priv *priv)
+{
+ struct mlxbf_i2c_resource *coalesce_res;
+ struct resource *params;
+ resource_size_t size;
+ int ret = 0;
+
+ /*
+ * Unlike BlueField-1 platform, the coalesce registers is a dedicated
+ * resource in the next generations of BlueField.
+ */
+ if (mlxbf_i2c_has_chip_type(priv, MLXBF_I2C_CHIP_TYPE_1)) {
+ coalesce_res = mlxbf_i2c_get_shared_resource(priv,
+ MLXBF_I2C_COALESCE_RES);
+ if (!coalesce_res)
+ return -EPERM;
+
+ /*
+ * The Cause Coalesce group in TYU space is shared among
+ * I2C busses. This function MUST be serialized to avoid
+ * racing when claiming the memory region.
+ */
+ lockdep_assert_held(mlxbf_i2c_gpio_res->lock);
+
+ /* Check whether the memory map exist. */
+ if (coalesce_res->io) {
+ priv->coalesce = coalesce_res;
+ return 0;
+ }
+
+ params = coalesce_res->params;
+ size = resource_size(params);
+
+ if (!request_mem_region(params->start, size, params->name))
+ return -EFAULT;
+
+ coalesce_res->io = ioremap(params->start, size);
+ if (IS_ERR(coalesce_res->io)) {
+ release_mem_region(params->start, size);
+ return PTR_ERR(coalesce_res->io);
+ }
+
+ priv->coalesce = coalesce_res;
+
+ } else {
+ ret = mlxbf_i2c_init_resource(pdev, &priv->coalesce,
+ MLXBF_I2C_COALESCE_RES);
+ }
+
+ return ret;
+}
+
+static int mlxbf_i2c_release_coalesce(struct platform_device *pdev,
+ struct mlxbf_i2c_priv *priv)
+{
+ struct mlxbf_i2c_resource *coalesce_res;
+ struct device *dev = &pdev->dev;
+ struct resource *params;
+ resource_size_t size;
+
+ coalesce_res = priv->coalesce;
+
+ if (coalesce_res->io) {
+ params = coalesce_res->params;
+ size = resource_size(params);
+ if (mlxbf_i2c_has_chip_type(priv, MLXBF_I2C_CHIP_TYPE_1)) {
+ mutex_lock(coalesce_res->lock);
+ iounmap(coalesce_res->io);
+ release_mem_region(params->start, size);
+ mutex_unlock(coalesce_res->lock);
+ } else {
+ devm_release_mem_region(dev, params->start, size);
+ }
+ }
+
+ return 0;
+}
+
+static int mlxbf_i2c_init_slave(struct platform_device *pdev,
+ struct mlxbf_i2c_priv *priv)
+{
+ struct device *dev = &pdev->dev;
+ u32 int_reg;
+ int ret;
+
+ /* Reset FSM. */
+ writel(0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_FSM);
+
+ /*
+ * Enable slave cause interrupt bits. Drive
+ * MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE and
+ * MLXBF_I2C_CAUSE_WRITE_SUCCESS, these are enabled when an external
+ * masters issue a Read and Write, respectively. But, clear all
+ * interrupts first.
+ */
+ writel(~0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
+ int_reg = MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE;
+ int_reg |= MLXBF_I2C_CAUSE_WRITE_SUCCESS;
+ writel(int_reg, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_EVTEN0);
+
+ /* Finally, set the 'ready' bit to start handling transactions. */
+ writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
+
+ /* Initialize the cause coalesce resource. */
+ ret = mlxbf_i2c_init_coalesce(pdev, priv);
+ if (ret < 0) {
+ dev_err(dev, "failed to initialize cause coalesce\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool mlxbf_i2c_has_coalesce(struct mlxbf_i2c_priv *priv, bool *read,
+ bool *write)
+{
+ const struct mlxbf_i2c_chip_info *chip = priv->chip;
+ u32 coalesce0_reg, cause_reg;
+ u8 slave_shift, is_set;
+
+ *write = false;
+ *read = false;
+
+ slave_shift = chip->type != MLXBF_I2C_CHIP_TYPE_1 ?
+ MLXBF_I2C_CAUSE_YU_SLAVE_BIT :
+ priv->bus + MLXBF_I2C_CAUSE_TYU_SLAVE_BIT;
+
+ coalesce0_reg = readl(priv->coalesce->io + MLXBF_I2C_CAUSE_COALESCE_0);
+ is_set = coalesce0_reg & (1 << slave_shift);
+
+ if (!is_set)
+ return false;
+
+ /* Check the source of the interrupt, i.e. whether a Read or Write. */
+ cause_reg = readl(priv->slv_cause->io + MLXBF_I2C_CAUSE_ARBITER);
+ if (cause_reg & MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE)
+ *read = true;
+ else if (cause_reg & MLXBF_I2C_CAUSE_WRITE_SUCCESS)
+ *write = true;
+
+ /* Clear cause bits. */
+ writel(~0x0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
+
+ return true;
+}
+
+static bool mlxbf_smbus_slave_wait_for_idle(struct mlxbf_i2c_priv *priv,
+ u32 timeout)
+{
+ u32 mask = MLXBF_I2C_CAUSE_S_GW_BUSY_FALL;
+ u32 addr = MLXBF_I2C_CAUSE_ARBITER;
+
+ if (mlxbf_smbus_poll(priv->slv_cause->io, addr, mask, false, timeout))
+ return true;
+
+ return false;
+}
+
+/* Send byte to 'external' smbus master. */
+static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
+{
+ u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 };
+ u8 write_size, pec_en, addr, byte, value, byte_cnt, desc_size;
+ struct i2c_client *slave = priv->slave;
+ u32 control32, data32;
+ int ret;
+
+ if (!slave)
+ return -EINVAL;
+
+ addr = 0;
+ byte = 0;
+ desc_size = MLXBF_I2C_SLAVE_DATA_DESC_SIZE;
+
+ /*
+ * Read bytes received from the external master. These bytes should
+ * be located in the first data descriptor register of the slave GW.
+ * These bytes are the slave address byte and the internal register
+ * address, if supplied.
+ */
+ if (recv_bytes > 0) {
+ data32 = ioread32be(priv->smbus->io +
+ MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
+
+ /* Parse the received bytes. */
+ switch (recv_bytes) {
+ case 2:
+ byte = (data32 >> 8) & GENMASK(7, 0);
+ fallthrough;
+ case 1:
+ addr = (data32 & GENMASK(7, 0)) >> 1;
+ }
+
+ /* Check whether it's our slave address. */
+ if (slave->addr != addr)
+ return -EINVAL;
+ }
+
+ /*
+ * I2C read transactions may start by a WRITE followed by a READ.
+ * Indeed, most slave devices would expect the internal address
+ * following the slave address byte. So, write that byte first,
+ * and then, send the requested data bytes to the master.
+ */
+ if (recv_bytes > 1) {
+ i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ value = byte;
+ ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED,
+ &value);
+ i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Now, send data to the master; currently, the driver supports
+ * READ_BYTE, READ_WORD and BLOCK READ protocols. Note that the
+ * hardware can send up to 128 bytes per transfer. That is the
+ * size of its data registers.
+ */
+ i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value);
+
+ for (byte_cnt = 0; byte_cnt < desc_size; byte_cnt++) {
+ data_desc[byte_cnt] = value;
+ i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value);
+ }
+
+ /* Send a stop condition to the backend. */
+ i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
+
+ /* Handle the actual transfer. */
+
+ /* Set the number of bytes to write to master. */
+ write_size = (byte_cnt - 1) & 0x7f;
+
+ /* Write data to Slave GW data descriptor. */
+ mlxbf_i2c_smbus_write_data(priv, data_desc, byte_cnt,
+ MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
+
+ pec_en = 0; /* Disable PEC since it is not supported. */
+
+ /* Prepare control word. */
+ control32 = MLXBF_I2C_SLAVE_ENABLE;
+ control32 |= rol32(write_size, MLXBF_I2C_SLAVE_WRITE_BYTES_SHIFT);
+ control32 |= rol32(pec_en, MLXBF_I2C_SLAVE_SEND_PEC_SHIFT);
+
+ writel(control32, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_GW);
+
+ /*
+ * Wait until the transfer is completed; the driver will wait
+ * until the GW is idle, a cause will rise on fall of GW busy.
+ */
+ mlxbf_smbus_slave_wait_for_idle(priv, MLXBF_I2C_SMBUS_TIMEOUT);
+
+ /* Release the Slave GW. */
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
+ writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
+
+ return 0;
+}
+
+/* Receive bytes from 'external' smbus master. */
+static int mlxbf_smbus_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
+{
+ u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 };
+ struct i2c_client *slave = priv->slave;
+ u8 value, byte, addr;
+ int ret = 0;
+
+ if (!slave)
+ return -EINVAL;
+
+ /* Read data from Slave GW data descriptor. */
+ mlxbf_i2c_smbus_read_data(priv, data_desc, recv_bytes,
+ MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
+
+ /* Check whether its our slave address. */
+ addr = data_desc[0] >> 1;
+ if (slave->addr != addr)
+ return -EINVAL;
+
+ /*
+ * Notify the slave backend; another I2C master wants to write data
+ * to us. This event is sent once the slave address and the write bit
+ * is detected.
+ */
+ i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+
+ /* Send the received data to the slave backend. */
+ for (byte = 1; byte < recv_bytes; byte++) {
+ value = data_desc[byte];
+ ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED,
+ &value);
+ if (ret < 0)
+ break;
+ }
+
+ /* Send a stop condition to the backend. */
+ i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
+
+ /* Release the Slave GW. */
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+ writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
+ writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
+
+ return ret;
+}
+
+static irqreturn_t mlxbf_smbus_irq(int irq, void *ptr)
+{
+ struct mlxbf_i2c_priv *priv = ptr;
+ bool read, write, irq_is_set;
+ u32 rw_bytes_reg;
+ u8 recv_bytes;
+
+ /*
+ * Read TYU interrupt register and determine the source of the
+ * interrupt. Based on the source of the interrupt one of the
+ * following actions are performed:
+ * - Receive data and send response to master.
+ * - Send data and release slave GW.
+ *
+ * Handle read/write transaction only. CRmaster and Iarp requests
+ * are ignored for now.
+ */
+ irq_is_set = mlxbf_i2c_has_coalesce(priv, &read, &write);
+ if (!irq_is_set || (!read && !write)) {
+ /* Nothing to do here, interrupt was not from this device. */
+ return IRQ_NONE;
+ }
+
+ /*
+ * The MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES includes the number of
+ * bytes from/to master. These are defined by 8-bits each. If the lower
+ * 8 bits are set, then the master expect to read N bytes from the
+ * slave, if the higher 8 bits are sent then the slave expect N bytes
+ * from the master.
+ */
+ rw_bytes_reg = readl(priv->smbus->io +
+ MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+ recv_bytes = (rw_bytes_reg >> 8) & GENMASK(7, 0);
+
+ /*
+ * For now, the slave supports 128 bytes transfer. Discard remaining
+ * data bytes if the master wrote more than
+ * MLXBF_I2C_SLAVE_DATA_DESC_SIZE, i.e, the actual size of the slave
+ * data descriptor.
+ *
+ * Note that we will never expect to transfer more than 128 bytes; as
+ * specified in the SMBus standard, block transactions cannot exceed
+ * 32 bytes.
+ */
+ recv_bytes = recv_bytes > MLXBF_I2C_SLAVE_DATA_DESC_SIZE ?
+ MLXBF_I2C_SLAVE_DATA_DESC_SIZE : recv_bytes;
+
+ if (read)
+ mlxbf_smbus_irq_send(priv, recv_bytes);
+ else
+ mlxbf_smbus_irq_recv(priv, recv_bytes);
+
+ return IRQ_HANDLED;
+}
+
+/* Return negative errno on error. */
+static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size,
+ union i2c_smbus_data *data)
+{
+ struct mlxbf_i2c_smbus_request request = { 0 };
+ struct mlxbf_i2c_priv *priv;
+ bool read, pec;
+ u8 byte_cnt;
+
+ request.slave = addr;
+
+ read = (read_write == I2C_SMBUS_READ);
+ pec = flags & I2C_FUNC_SMBUS_PEC;
+
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ mlxbf_i2c_smbus_quick_command(&request, read);
+ dev_dbg(&adap->dev, "smbus quick, slave 0x%02x\n", addr);
+ break;
+
+ case I2C_SMBUS_BYTE:
+ mlxbf_i2c_smbus_byte_func(&request,
+ read ? &data->byte : &command, read,
+ pec);
+ dev_dbg(&adap->dev, "smbus %s byte, slave 0x%02x.\n",
+ read ? "read" : "write", addr);
+ break;
+
+ case I2C_SMBUS_BYTE_DATA:
+ mlxbf_i2c_smbus_data_byte_func(&request, &command, &data->byte,
+ read, pec);
+ dev_dbg(&adap->dev, "smbus %s byte data at 0x%02x, slave 0x%02x.\n",
+ read ? "read" : "write", command, addr);
+ break;
+
+ case I2C_SMBUS_WORD_DATA:
+ mlxbf_i2c_smbus_data_word_func(&request, &command,
+ (u8 *)&data->word, read, pec);
+ dev_dbg(&adap->dev, "smbus %s word data at 0x%02x, slave 0x%02x.\n",
+ read ? "read" : "write", command, addr);
+ break;
+
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ byte_cnt = data->block[0];
+ mlxbf_i2c_smbus_i2c_block_func(&request, &command, data->block,
+ &byte_cnt, read, pec);
+ dev_dbg(&adap->dev, "i2c %s block data, %d bytes at 0x%02x, slave 0x%02x.\n",
+ read ? "read" : "write", byte_cnt, command, addr);
+ break;
+
+ case I2C_SMBUS_BLOCK_DATA:
+ byte_cnt = read ? I2C_SMBUS_BLOCK_MAX : data->block[0];
+ mlxbf_i2c_smbus_block_func(&request, &command, data->block,
+ &byte_cnt, read, pec);
+ dev_dbg(&adap->dev, "smbus %s block data, %d bytes at 0x%02x, slave 0x%02x.\n",
+ read ? "read" : "write", byte_cnt, command, addr);
+ break;
+
+ case I2C_FUNC_SMBUS_PROC_CALL:
+ mlxbf_i2c_smbus_process_call_func(&request, &command,
+ (u8 *)&data->word, pec);
+ dev_dbg(&adap->dev, "process call, wr/rd at 0x%02x, slave 0x%02x.\n",
+ command, addr);
+ break;
+
+ case I2C_FUNC_SMBUS_BLOCK_PROC_CALL:
+ byte_cnt = data->block[0];
+ mlxbf_i2c_smbus_blk_process_call_func(&request, &command,
+ data->block, &byte_cnt,
+ pec);
+ dev_dbg(&adap->dev, "block process call, wr/rd %d bytes, slave 0x%02x.\n",
+ byte_cnt, addr);
+ break;
+
+ default:
+ dev_dbg(&adap->dev, "Unsupported I2C/SMBus command %d\n",
+ size);
+ return -EOPNOTSUPP;
+ }
+
+ priv = i2c_get_adapdata(adap);
+
+ return mlxbf_i2c_smbus_start_transaction(priv, &request);
+}
+
+static int mlxbf_i2c_reg_slave(struct i2c_client *slave)
+{
+ struct mlxbf_i2c_priv *priv = i2c_get_adapdata(slave->adapter);
+ int ret;
+
+ if (priv->slave)
+ return -EBUSY;
+
+ /*
+ * Do not support ten bit chip address and do not use Packet Error
+ * Checking (PEC).
+ */
+ if (slave->flags & (I2C_CLIENT_TEN | I2C_CLIENT_PEC))
+ return -EAFNOSUPPORT;
+
+ ret = mlxbf_slave_enable(priv, slave->addr);
+ if (ret < 0)
+ return ret;
+
+ priv->slave = slave;
+
+ return 0;
+}
+
+static int mlxbf_i2c_unreg_slave(struct i2c_client *slave)
+{
+ struct mlxbf_i2c_priv *priv = i2c_get_adapdata(slave->adapter);
+ int ret;
+
+ WARN_ON(!priv->slave);
+
+ /* Unregister slave, i.e. disable the slave address in hardware. */
+ ret = mlxbf_slave_disable(priv);
+ if (ret < 0)
+ return ret;
+
+ priv->slave = NULL;
+
+ return 0;
+}
+
+static u32 mlxbf_i2c_functionality(struct i2c_adapter *adap)
+{
+ return MLXBF_I2C_FUNC_ALL;
+}
+
+static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = {
+ [MLXBF_I2C_CHIP_TYPE_1] = {
+ .type = MLXBF_I2C_CHIP_TYPE_1,
+ .shared_res = {
+ [0] = &mlxbf_i2c_coalesce_res[MLXBF_I2C_CHIP_TYPE_1],
+ [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1],
+ [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1]
+ },
+ .calculate_freq = mlxbf_calculate_freq_from_tyu
+ },
+ [MLXBF_I2C_CHIP_TYPE_2] = {
+ .type = MLXBF_I2C_CHIP_TYPE_2,
+ .shared_res = {
+ [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2]
+ },
+ .calculate_freq = mlxbf_calculate_freq_from_yu
+ }
+};
+
+static const struct i2c_algorithm mlxbf_i2c_algo = {
+ .smbus_xfer = mlxbf_i2c_smbus_xfer,
+ .functionality = mlxbf_i2c_functionality,
+ .reg_slave = mlxbf_i2c_reg_slave,
+ .unreg_slave = mlxbf_i2c_unreg_slave,
+};
+
+static struct i2c_adapter_quirks mlxbf_i2c_quirks = {
+ .max_read_len = MLXBF_I2C_MASTER_DATA_R_LENGTH,
+ .max_write_len = MLXBF_I2C_MASTER_DATA_W_LENGTH,
+};
+
+static const struct of_device_id mlxbf_i2c_dt_ids[] = {
+ {
+ .compatible = "mellanox,i2c-mlxbf1",
+ .data = &mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1]
+ },
+ {
+ .compatible = "mellanox,i2c-mlxbf2",
+ .data = &mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2]
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mlxbf_i2c_dt_ids);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id mlxbf_i2c_acpi_ids[] = {
+ { "MLNXBF03", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1] },
+ { "MLNXBF23", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2] },
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, mlxbf_i2c_acpi_ids);
+
+static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
+{
+ const struct acpi_device_id *aid;
+ struct acpi_device *adev;
+ unsigned long bus_id = 0;
+ const char *uid;
+ int ret;
+
+ if (acpi_disabled)
+ return -ENOENT;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENXIO;
+
+ aid = acpi_match_device(mlxbf_i2c_acpi_ids, dev);
+ if (!aid)
+ return -ENODEV;
+
+ priv->chip = (struct mlxbf_i2c_chip_info *)aid->driver_data;
+
+ uid = acpi_device_uid(adev);
+ if (!uid || !(*uid)) {
+ dev_err(dev, "Cannot retrieve UID\n");
+ return -ENODEV;
+ }
+
+ ret = kstrtoul(uid, 0, &bus_id);
+ if (!ret)
+ priv->bus = bus_id;
+
+ return ret;
+}
+#else
+static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_ACPI */
+
+static int mlxbf_i2c_of_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
+{
+ const struct of_device_id *oid;
+ int bus_id = -1;
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ oid = of_match_node(mlxbf_i2c_dt_ids, dev->of_node);
+ if (!oid)
+ return -ENODEV;
+
+ priv->chip = oid->data;
+
+ bus_id = of_alias_get_id(dev->of_node, "i2c");
+ if (bus_id >= 0)
+ priv->bus = bus_id;
+ }
+
+ if (bus_id < 0) {
+ dev_err(dev, "Cannot get bus id");
+ return bus_id;
+ }
+
+ return 0;
+}
+
+static int mlxbf_i2c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mlxbf_i2c_priv *priv;
+ struct i2c_adapter *adap;
+ int irq, ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = mlxbf_i2c_acpi_probe(dev, priv);
+ if (ret < 0 && ret != -ENOENT && ret != -ENXIO)
+ ret = mlxbf_i2c_of_probe(dev, priv);
+
+ if (ret < 0)
+ return ret;
+
+ ret = mlxbf_i2c_init_resource(pdev, &priv->smbus,
+ MLXBF_I2C_SMBUS_RES);
+ if (ret < 0) {
+ dev_err(dev, "Cannot fetch smbus resource info");
+ return ret;
+ }
+
+ ret = mlxbf_i2c_init_resource(pdev, &priv->mst_cause,
+ MLXBF_I2C_MST_CAUSE_RES);
+ if (ret < 0) {
+ dev_err(dev, "Cannot fetch cause master resource info");
+ return ret;
+ }
+
+ ret = mlxbf_i2c_init_resource(pdev, &priv->slv_cause,
+ MLXBF_I2C_SLV_CAUSE_RES);
+ if (ret < 0) {
+ dev_err(dev, "Cannot fetch cause slave resource info");
+ return ret;
+ }
+
+ adap = &priv->adap;
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON;
+ adap->algo = &mlxbf_i2c_algo;
+ adap->quirks = &mlxbf_i2c_quirks;
+ adap->dev.parent = dev;
+ adap->dev.of_node = dev->of_node;
+ adap->nr = priv->bus;
+
+ snprintf(adap->name, sizeof(adap->name), "i2c%d", adap->nr);
+ i2c_set_adapdata(adap, priv);
+
+ /* Read Core PLL frequency. */
+ ret = mlxbf_i2c_calculate_corepll_freq(pdev, priv);
+ if (ret < 0) {
+ dev_err(dev, "cannot get core clock frequency\n");
+ /* Set to default value. */
+ priv->frequency = MLXBF_I2C_COREPLL_FREQ;
+ }
+
+ /*
+ * Initialize master.
+ * Note that a physical bus might be shared among Linux and firmware
+ * (e.g., ATF). Thus, the bus should be initialized and ready and
+ * bus initialization would be unnecessary. This requires additional
+ * knowledge about physical busses. But, since an extra initialization
+ * does not really hurt, then keep the code as is.
+ */
+ ret = mlxbf_i2c_init_master(pdev, priv);
+ if (ret < 0) {
+ dev_err(dev, "failed to initialize smbus master %d",
+ priv->bus);
+ return ret;
+ }
+
+ mlxbf_i2c_init_timings(pdev, priv);
+
+ mlxbf_i2c_init_slave(pdev, priv);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
+ IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
+ dev_name(dev), priv);
+ if (ret < 0) {
+ dev_err(dev, "Cannot get irq %d\n", irq);
+ return ret;
+ }
+
+ priv->irq = irq;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = i2c_add_numbered_adapter(adap);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&mlxbf_i2c_bus_lock);
+ mlxbf_i2c_bus_count++;
+ mutex_unlock(&mlxbf_i2c_bus_lock);
+
+ return 0;
+}
+
+static int mlxbf_i2c_remove(struct platform_device *pdev)
+{
+ struct mlxbf_i2c_priv *priv = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct resource *params;
+
+ params = priv->smbus->params;
+ devm_release_mem_region(dev, params->start, resource_size(params));
+
+ params = priv->mst_cause->params;
+ devm_release_mem_region(dev, params->start, resource_size(params));
+
+ params = priv->slv_cause->params;
+ devm_release_mem_region(dev, params->start, resource_size(params));
+
+ /*
+ * Release shared resources. This should be done when releasing
+ * the I2C controller.
+ */
+ mutex_lock(&mlxbf_i2c_bus_lock);
+ if (--mlxbf_i2c_bus_count == 0) {
+ mlxbf_i2c_release_coalesce(pdev, priv);
+ mlxbf_i2c_release_corepll(pdev, priv);
+ mlxbf_i2c_release_gpio(pdev, priv);
+ }
+ mutex_unlock(&mlxbf_i2c_bus_lock);
+
+ devm_free_irq(dev, priv->irq, priv);
+
+ i2c_del_adapter(&priv->adap);
+
+ return 0;
+}
+
+static struct platform_driver mlxbf_i2c_driver = {
+ .probe = mlxbf_i2c_probe,
+ .remove = mlxbf_i2c_remove,
+ .driver = {
+ .name = "i2c-mlxbf",
+ .of_match_table = mlxbf_i2c_dt_ids,
+#ifdef CONFIG_ACPI
+ .acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids),
+#endif /* CONFIG_ACPI */
+ },
+};
+
+static int __init mlxbf_i2c_init(void)
+{
+ mutex_init(&mlxbf_i2c_coalesce_lock);
+ mutex_init(&mlxbf_i2c_corepll_lock);
+ mutex_init(&mlxbf_i2c_gpio_lock);
+
+ mutex_init(&mlxbf_i2c_bus_lock);
+
+ return platform_driver_register(&mlxbf_i2c_driver);
+}
+module_init(mlxbf_i2c_init);
+
+static void __exit mlxbf_i2c_exit(void)
+{
+ platform_driver_unregister(&mlxbf_i2c_driver);
+
+ mutex_destroy(&mlxbf_i2c_bus_lock);
+
+ mutex_destroy(&mlxbf_i2c_gpio_lock);
+ mutex_destroy(&mlxbf_i2c_corepll_lock);
+ mutex_destroy(&mlxbf_i2c_coalesce_lock);
+}
+module_exit(mlxbf_i2c_exit);
+
+MODULE_DESCRIPTION("Mellanox BlueField I2C bus driver");
+MODULE_AUTHOR("Khalil Blaiech <kblaiech@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 0cbdfbe605b5..33de99b7bc20 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -475,6 +475,10 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
+ writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(50);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+
mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
/* Set ioconfig */
@@ -529,10 +533,6 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN);
-
- writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
- udelay(50);
- writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
}
static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 8d9d4ffdcd24..e0e45fc19b8f 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -496,11 +496,10 @@ static irqreturn_t
mv64xxx_i2c_intr(int irq, void *dev_id)
{
struct mv64xxx_i2c_data *drv_data = dev_id;
- unsigned long flags;
u32 status;
irqreturn_t rc = IRQ_NONE;
- spin_lock_irqsave(&drv_data->lock, flags);
+ spin_lock(&drv_data->lock);
if (drv_data->offload_enabled)
rc = mv64xxx_i2c_intr_offload(drv_data);
@@ -517,7 +516,7 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
rc = IRQ_HANDLED;
}
- spin_unlock_irqrestore(&drv_data->lock, flags);
+ spin_unlock(&drv_data->lock);
return rc;
}
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index f480105000b8..f9a69b109e5c 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -125,8 +125,7 @@ static int gpu_i2c_read(struct gpu_i2c_dev *i2cd, u8 *data, u16 len)
put_unaligned_be16(val, data);
break;
case 3:
- put_unaligned_be16(val >> 8, data);
- data[2] = val;
+ put_unaligned_be24(val, data);
break;
case 4:
put_unaligned_be32(val, data);
diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c
index a163b8f308c1..9918b2a0b909 100644
--- a/drivers/i2c/busses/i2c-owl.c
+++ b/drivers/i2c/busses/i2c-owl.c
@@ -165,10 +165,9 @@ static irqreturn_t owl_i2c_interrupt(int irq, void *_dev)
{
struct owl_i2c_dev *i2c_dev = _dev;
struct i2c_msg *msg = i2c_dev->msg;
- unsigned long flags;
unsigned int stat, fifostat;
- spin_lock_irqsave(&i2c_dev->lock, flags);
+ spin_lock(&i2c_dev->lock);
i2c_dev->err = 0;
@@ -214,7 +213,7 @@ stop:
OWL_I2C_STAT_IRQP, true);
complete_all(&i2c_dev->msg_complete);
- spin_unlock_irqrestore(&i2c_dev->lock, flags);
+ spin_unlock(&i2c_dev->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index dead5db3315a..8b4c35f47a70 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -210,9 +210,8 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
u32 dma;
u32 val;
struct i2c_msg *cur;
- unsigned long flags;
- spin_lock_irqsave(&gi2c->lock, flags);
+ spin_lock(&gi2c->lock);
m_stat = readl_relaxed(base + SE_GENI_M_IRQ_STATUS);
rx_st = readl_relaxed(base + SE_GENI_RX_FIFO_STATUS);
dm_tx_st = readl_relaxed(base + SE_DMA_TX_IRQ_STAT);
@@ -294,7 +293,7 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
dm_rx_st & RX_DMA_DONE || dm_rx_st & RX_RESET_DONE)
complete(&gi2c->done);
- spin_unlock_irqrestore(&gi2c->lock, flags);
+ spin_unlock(&gi2c->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index c7c543483b08..217def2d7cb4 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -19,7 +19,9 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -105,10 +107,11 @@
#define ID_ARBLOST (1 << 3)
#define ID_NACK (1 << 4)
/* persistent flags */
+#define ID_P_HOST_NOTIFY BIT(28)
#define ID_P_REP_AFTER_RD BIT(29)
#define ID_P_NO_RXDMA BIT(30) /* HW forbids RXDMA sometimes */
#define ID_P_PM_BLOCKED BIT(31)
-#define ID_P_MASK GENMASK(31, 29)
+#define ID_P_MASK GENMASK(31, 28)
enum rcar_i2c_type {
I2C_RCAR_GEN1,
@@ -140,14 +143,13 @@ struct rcar_i2c_priv {
struct reset_control *rstc;
int irq;
+
+ struct i2c_client *host_notify_client;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
#define rcar_i2c_is_recv(p) ((p)->msg->flags & I2C_M_RD)
-#define LOOP_TIMEOUT 1024
-
-
static void rcar_i2c_write(struct rcar_i2c_priv *priv, int reg, u32 val)
{
writel(val, priv->io + reg);
@@ -221,18 +223,18 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
{
- int i;
+ int ret;
+ u32 val;
- for (i = 0; i < LOOP_TIMEOUT; i++) {
- /* make sure that bus is not busy */
- if (!(rcar_i2c_read(priv, ICMCR) & FSDA))
- return 0;
- udelay(1);
+ ret = readl_poll_timeout(priv->io + ICMCR, val, !(val & FSDA), 10,
+ priv->adap.timeout);
+ if (ret) {
+ /* Waiting did not help, try to recover */
+ priv->recovery_icmcr = MDBS | OBPC | FSDA | FSCL;
+ ret = i2c_recover_bus(&priv->adap);
}
- /* Waiting did not help, try to recover */
- priv->recovery_icmcr = MDBS | OBPC | FSDA | FSCL;
- return i2c_recover_bus(&priv->adap);
+ return ret;
}
static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
@@ -760,20 +762,14 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
/* I2C is a special case, we need to poll the status of a reset */
static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
{
- int i, ret;
+ int ret;
ret = reset_control_reset(priv->rstc);
if (ret)
return ret;
- for (i = 0; i < LOOP_TIMEOUT; i++) {
- ret = reset_control_status(priv->rstc);
- if (ret == 0)
- return 0;
- udelay(1);
- }
-
- return -ETIMEDOUT;
+ return read_poll_timeout_atomic(reset_control_status, ret, ret == 0, 1,
+ 100, false, priv->rstc);
}
static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
@@ -884,14 +880,21 @@ static int rcar_unreg_slave(struct i2c_client *slave)
static u32 rcar_i2c_func(struct i2c_adapter *adap)
{
+ struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
+
/*
* This HW can't do:
* I2C_SMBUS_QUICK (setting FSB during START didn't work)
* I2C_M_NOSTART (automatically sends address after START)
* I2C_M_IGNORE_NAK (automatically sends STOP after NAK)
*/
- return I2C_FUNC_I2C | I2C_FUNC_SLAVE |
- (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ u32 func = I2C_FUNC_I2C | I2C_FUNC_SLAVE |
+ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+
+ if (priv->flags & ID_P_HOST_NOTIFY)
+ func |= I2C_FUNC_SMBUS_HOST_NOTIFY;
+
+ return func;
}
static const struct i2c_algorithm rcar_i2c_algo = {
@@ -991,6 +994,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
else
pm_runtime_put(dev);
+ if (of_property_read_bool(dev->of_node, "smbus"))
+ priv->flags |= ID_P_HOST_NOTIFY;
priv->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
@@ -1005,10 +1010,20 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_disable;
+ if (priv->flags & ID_P_HOST_NOTIFY) {
+ priv->host_notify_client = i2c_new_slave_host_notify_device(adap);
+ if (IS_ERR(priv->host_notify_client)) {
+ ret = PTR_ERR(priv->host_notify_client);
+ goto out_del_device;
+ }
+ }
+
dev_info(dev, "probed\n");
return 0;
+ out_del_device:
+ i2c_del_adapter(&priv->adap);
out_pm_put:
pm_runtime_put(dev);
out_pm_disable:
@@ -1021,6 +1036,8 @@ static int rcar_i2c_remove(struct platform_device *pdev)
struct rcar_i2c_priv *priv = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ if (priv->host_notify_client)
+ i2c_free_slave_host_notify_device(priv->host_notify_client);
i2c_del_adapter(&priv->adap);
rcar_i2c_release_dma(priv);
if (priv->flags & ID_P_PM_BLOCKED)
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 8e3cc85d1921..819ab4ee517e 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -1312,18 +1312,13 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
i2c->pclk = devm_clk_get(&pdev->dev, "pclk");
}
- if (IS_ERR(i2c->clk)) {
- ret = PTR_ERR(i2c->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Can't get bus clk: %d\n", ret);
- return ret;
- }
- if (IS_ERR(i2c->pclk)) {
- ret = PTR_ERR(i2c->pclk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Can't get periph clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(i2c->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk),
+ "Can't get bus clk\n");
+
+ if (IS_ERR(i2c->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk),
+ "Can't get periph clk\n");
ret = clk_prepare(i2c->clk);
if (ret < 0) {
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index cab725559999..bdd60770779a 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -129,6 +129,7 @@ struct sh_mobile_i2c_data {
int sr;
bool send_stop;
bool stop_after_dma;
+ bool atomic_xfer;
struct resource *res;
struct dma_chan *dma_tx;
@@ -330,13 +331,15 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, enum sh_mobile_i2c_op
ret = iic_rd(pd, ICDR);
break;
case OP_RX_STOP: /* enable DTE interrupt, issue stop */
- iic_wr(pd, ICIC,
- ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ if (!pd->atomic_xfer)
+ iic_wr(pd, ICIC,
+ ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK);
break;
case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */
- iic_wr(pd, ICIC,
- ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
+ if (!pd->atomic_xfer)
+ iic_wr(pd, ICIC,
+ ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
ret = iic_rd(pd, ICDR);
iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK);
break;
@@ -429,7 +432,8 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
if (wakeup) {
pd->sr |= SW_DONE;
- wake_up(&pd->wait);
+ if (!pd->atomic_xfer)
+ wake_up(&pd->wait);
}
/* defeat write posting to avoid spurious WAIT interrupts */
@@ -581,6 +585,9 @@ static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
pd->pos = -1;
pd->sr = 0;
+ if (pd->atomic_xfer)
+ return;
+
pd->dma_buf = i2c_get_dma_safe_msg_buf(pd->msg, 8);
if (pd->dma_buf)
sh_mobile_i2c_xfer_dma(pd);
@@ -637,15 +644,13 @@ static int poll_busy(struct sh_mobile_i2c_data *pd)
return i ? 0 : -ETIMEDOUT;
}
-static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs,
- int num)
+static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd,
+ struct i2c_msg *msgs, int num)
{
- struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
struct i2c_msg *msg;
int err = 0;
int i;
- long timeout;
+ long time_left;
/* Wake up device and enable clock */
pm_runtime_get_sync(pd->dev);
@@ -662,15 +667,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
if (do_start)
i2c_op(pd, OP_START);
- /* The interrupt handler takes care of the rest... */
- timeout = wait_event_timeout(pd->wait,
- pd->sr & (ICSR_TACK | SW_DONE),
- adapter->timeout);
-
- /* 'stop_after_dma' tells if DMA transfer was complete */
- i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma);
+ if (pd->atomic_xfer) {
+ unsigned long j = jiffies + pd->adap.timeout;
+
+ time_left = time_before_eq(jiffies, j);
+ while (time_left &&
+ !(pd->sr & (ICSR_TACK | SW_DONE))) {
+ unsigned char sr = iic_rd(pd, ICSR);
+
+ if (sr & (ICSR_AL | ICSR_TACK |
+ ICSR_WAIT | ICSR_DTE)) {
+ sh_mobile_i2c_isr(0, pd);
+ udelay(150);
+ } else {
+ cpu_relax();
+ }
+ time_left = time_before_eq(jiffies, j);
+ }
+ } else {
+ /* The interrupt handler takes care of the rest... */
+ time_left = wait_event_timeout(pd->wait,
+ pd->sr & (ICSR_TACK | SW_DONE),
+ pd->adap.timeout);
+
+ /* 'stop_after_dma' tells if DMA xfer was complete */
+ i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg,
+ pd->stop_after_dma);
+ }
- if (!timeout) {
+ if (!time_left) {
dev_err(pd->dev, "Transfer request timed out\n");
if (pd->dma_direction != DMA_NONE)
sh_mobile_i2c_cleanup_dma(pd);
@@ -696,14 +721,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
return err ?: num;
}
+static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
+
+ pd->atomic_xfer = false;
+ return sh_mobile_xfer(pd, msgs, num);
+}
+
+static int sh_mobile_i2c_xfer_atomic(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
+
+ pd->atomic_xfer = true;
+ return sh_mobile_xfer(pd, msgs, num);
+}
+
static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
}
static const struct i2c_algorithm sh_mobile_i2c_algorithm = {
- .functionality = sh_mobile_i2c_func,
- .master_xfer = sh_mobile_i2c_xfer,
+ .functionality = sh_mobile_i2c_func,
+ .master_xfer = sh_mobile_i2c_xfer,
+ .master_xfer_atomic = sh_mobile_i2c_xfer_atomic,
};
static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = {
diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c
index 3f69a3bb6119..157c64e27d0b 100644
--- a/drivers/i2c/busses/i2c-stm32.c
+++ b/drivers/i2c/busses/i2c-stm32.c
@@ -26,8 +26,9 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
ret = PTR_ERR(dma->chan_tx);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "can't request DMA tx channel\n");
+ if (ret != -ENODEV)
+ ret = dev_err_probe(dev, ret,
+ "can't request DMA tx channel\n");
goto fail_al;
}
@@ -46,8 +47,9 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
dma->chan_rx = dma_request_chan(dev, "rx");
if (IS_ERR(dma->chan_rx)) {
ret = PTR_ERR(dma->chan_rx);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "can't request DMA rx channel\n");
+ if (ret != -ENODEV)
+ ret = dev_err_probe(dev, ret,
+ "can't request DMA rx channel\n");
goto fail_tx;
}
@@ -76,8 +78,6 @@ fail_tx:
dma_release_channel(dma->chan_tx);
fail_al:
devm_kfree(dev, dma);
- if (ret != -EPROBE_DEFER)
- dev_info(dev, "can't use DMA\n");
return ERR_PTR(ret);
}
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 48e269284369..937c2c8fd349 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -797,10 +797,8 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(rst)) {
- ret = PTR_ERR(rst);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Error: Missing reset ctrl\n");
-
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(rst),
+ "Error: Missing reset ctrl\n");
goto clk_free;
}
reset_control_assert(rst);
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index bff3479fe122..f41f51a176a1 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -50,6 +51,7 @@
/* STM32F7 I2C control 1 */
#define STM32F7_I2C_CR1_PECEN BIT(23)
+#define STM32F7_I2C_CR1_SMBHEN BIT(20)
#define STM32F7_I2C_CR1_WUPEN BIT(18)
#define STM32F7_I2C_CR1_SBC BIT(16)
#define STM32F7_I2C_CR1_RXDMAEN BIT(15)
@@ -150,7 +152,12 @@
#define STM32F7_I2C_MAX_LEN 0xff
#define STM32F7_I2C_DMA_LEN_MIN 0x16
-#define STM32F7_I2C_MAX_SLAVE 0x2
+enum {
+ STM32F7_SLAVE_HOSTNOTIFY,
+ STM32F7_SLAVE_7_10_BITS_ADDR,
+ STM32F7_SLAVE_7_BITS_ADDR,
+ STM32F7_I2C_MAX_SLAVE
+};
#define STM32F7_I2C_DNF_DEFAULT 0
#define STM32F7_I2C_DNF_MAX 16
@@ -301,6 +308,8 @@ struct stm32f7_i2c_msg {
* @fmp_creg: register address for clearing Fast Mode Plus bits
* @fmp_mask: mask for Fast Mode Plus bits in set register
* @wakeup_src: boolean to know if the device is a wakeup source
+ * @smbus_mode: states that the controller is configured in SMBus mode
+ * @host_notify_client: SMBus host-notify client
*/
struct stm32f7_i2c_dev {
struct i2c_adapter adap;
@@ -327,6 +336,8 @@ struct stm32f7_i2c_dev {
u32 fmp_creg;
u32 fmp_mask;
bool wakeup_src;
+ bool smbus_mode;
+ struct i2c_client *host_notify_client;
};
/*
@@ -1321,11 +1332,20 @@ static int stm32f7_i2c_get_free_slave_id(struct stm32f7_i2c_dev *i2c_dev,
int i;
/*
- * slave[0] supports 7-bit and 10-bit slave address
- * slave[1] supports 7-bit slave address only
+ * slave[STM32F7_SLAVE_HOSTNOTIFY] support only SMBus Host address (0x8)
+ * slave[STM32F7_SLAVE_7_10_BITS_ADDR] supports 7-bit and 10-bit slave address
+ * slave[STM32F7_SLAVE_7_BITS_ADDR] supports 7-bit slave address only
*/
- for (i = STM32F7_I2C_MAX_SLAVE - 1; i >= 0; i--) {
- if (i == 1 && (slave->flags & I2C_CLIENT_TEN))
+ if (i2c_dev->smbus_mode && (slave->addr == 0x08)) {
+ if (i2c_dev->slave[STM32F7_SLAVE_HOSTNOTIFY])
+ goto fail;
+ *id = STM32F7_SLAVE_HOSTNOTIFY;
+ return 0;
+ }
+
+ for (i = STM32F7_I2C_MAX_SLAVE - 1; i > STM32F7_SLAVE_HOSTNOTIFY; i--) {
+ if ((i == STM32F7_SLAVE_7_BITS_ADDR) &&
+ (slave->flags & I2C_CLIENT_TEN))
continue;
if (!i2c_dev->slave[i]) {
*id = i;
@@ -1333,6 +1353,7 @@ static int stm32f7_i2c_get_free_slave_id(struct stm32f7_i2c_dev *i2c_dev,
}
}
+fail:
dev_err(dev, "Slave 0x%x could not be registered\n", slave->addr);
return -EINVAL;
@@ -1776,7 +1797,13 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
if (!stm32f7_i2c_is_slave_registered(i2c_dev))
stm32f7_i2c_enable_wakeup(i2c_dev, true);
- if (id == 0) {
+ switch (id) {
+ case 0:
+ /* Slave SMBus Host */
+ i2c_dev->slave[id] = slave;
+ break;
+
+ case 1:
/* Configure Own Address 1 */
oar1 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR1);
oar1 &= ~STM32F7_I2C_OAR1_MASK;
@@ -1789,7 +1816,9 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
oar1 |= STM32F7_I2C_OAR1_OA1EN;
i2c_dev->slave[id] = slave;
writel_relaxed(oar1, i2c_dev->base + STM32F7_I2C_OAR1);
- } else if (id == 1) {
+ break;
+
+ case 2:
/* Configure Own Address 2 */
oar2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_OAR2);
oar2 &= ~STM32F7_I2C_OAR2_MASK;
@@ -1802,7 +1831,10 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
oar2 |= STM32F7_I2C_OAR2_OA2EN;
i2c_dev->slave[id] = slave;
writel_relaxed(oar2, i2c_dev->base + STM32F7_I2C_OAR2);
- } else {
+ break;
+
+ default:
+ dev_err(dev, "I2C slave id not supported\n");
ret = -ENODEV;
goto pm_free;
}
@@ -1843,10 +1875,10 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
if (ret < 0)
return ret;
- if (id == 0) {
+ if (id == 1) {
mask = STM32F7_I2C_OAR1_OA1EN;
stm32f7_i2c_clr_bits(base + STM32F7_I2C_OAR1, mask);
- } else {
+ } else if (id == 2) {
mask = STM32F7_I2C_OAR2_OA2EN;
stm32f7_i2c_clr_bits(base + STM32F7_I2C_OAR2, mask);
}
@@ -1911,14 +1943,51 @@ static int stm32f7_i2c_setup_fm_plus_bits(struct platform_device *pdev,
&i2c_dev->fmp_mask);
}
+static int stm32f7_i2c_enable_smbus_host(struct stm32f7_i2c_dev *i2c_dev)
+{
+ struct i2c_adapter *adap = &i2c_dev->adap;
+ void __iomem *base = i2c_dev->base;
+ struct i2c_client *client;
+
+ client = i2c_new_slave_host_notify_device(adap);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
+
+ i2c_dev->host_notify_client = client;
+
+ /* Enable SMBus Host address */
+ stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_SMBHEN);
+
+ return 0;
+}
+
+static void stm32f7_i2c_disable_smbus_host(struct stm32f7_i2c_dev *i2c_dev)
+{
+ void __iomem *base = i2c_dev->base;
+
+ if (i2c_dev->host_notify_client) {
+ /* Disable SMBus Host address */
+ stm32f7_i2c_clr_bits(base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_SMBHEN);
+ i2c_free_slave_host_notify_device(i2c_dev->host_notify_client);
+ }
+}
+
static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SLAVE |
- I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
- I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_PEC |
- I2C_FUNC_SMBUS_I2C_BLOCK;
+ struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+
+ u32 func = I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SLAVE |
+ I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_PEC |
+ I2C_FUNC_SMBUS_I2C_BLOCK;
+
+ if (i2c_dev->smbus_mode)
+ func |= I2C_FUNC_SMBUS_HOST_NOTIFY;
+
+ return func;
}
static const struct i2c_algorithm stm32f7_i2c_algo = {
@@ -1968,11 +2037,9 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
"wakeup-source");
i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(i2c_dev->clk)) {
- if (PTR_ERR(i2c_dev->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get controller clock\n");
- return PTR_ERR(i2c_dev->clk);
- }
+ if (IS_ERR(i2c_dev->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c_dev->clk),
+ "Failed to get controller clock\n");
ret = clk_prepare_enable(i2c_dev->clk);
if (ret) {
@@ -1982,10 +2049,8 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
rst = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(rst)) {
- ret = PTR_ERR(rst);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Error: Missing reset ctrl\n");
-
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(rst),
+ "Error: Missing reset ctrl\n");
goto clk_free;
}
reset_control_assert(rst);
@@ -2052,14 +2117,13 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
i2c_dev->dma = stm32_i2c_dma_request(i2c_dev->dev, phy_addr,
STM32F7_I2C_TXDR,
STM32F7_I2C_RXDR);
- if (PTR_ERR(i2c_dev->dma) == -ENODEV)
- i2c_dev->dma = NULL;
- else if (IS_ERR(i2c_dev->dma)) {
+ if (IS_ERR(i2c_dev->dma)) {
ret = PTR_ERR(i2c_dev->dma);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Failed to request dma error %i\n", ret);
- goto fmp_clear;
+ /* DMA support is optional, only report other errors */
+ if (ret != -ENODEV)
+ goto fmp_clear;
+ dev_dbg(i2c_dev->dev, "No DMA option: fallback using interrupts\n");
+ i2c_dev->dma = NULL;
}
if (i2c_dev->wakeup_src) {
@@ -2084,10 +2148,22 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
stm32f7_i2c_hw_config(i2c_dev);
+ i2c_dev->smbus_mode = of_property_read_bool(pdev->dev.of_node, "smbus");
+
ret = i2c_add_adapter(adap);
if (ret)
goto pm_disable;
+ if (i2c_dev->smbus_mode) {
+ ret = stm32f7_i2c_enable_smbus_host(i2c_dev);
+ if (ret) {
+ dev_err(i2c_dev->dev,
+ "failed to enable SMBus Host-Notify protocol (%d)\n",
+ ret);
+ goto i2c_adapter_remove;
+ }
+ }
+
dev_info(i2c_dev->dev, "STM32F7 I2C-%d bus adapter\n", adap->nr);
pm_runtime_mark_last_busy(i2c_dev->dev);
@@ -2095,6 +2171,9 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
return 0;
+i2c_adapter_remove:
+ i2c_del_adapter(adap);
+
pm_disable:
pm_runtime_put_noidle(i2c_dev->dev);
pm_runtime_disable(i2c_dev->dev);
@@ -2126,6 +2205,8 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
{
struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ stm32f7_i2c_disable_smbus_host(i2c_dev);
+
i2c_del_adapter(&i2c_dev->adap);
pm_runtime_get_sync(i2c_dev->dev);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 00d3e4d7a01e..6f08c0c3238d 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -136,7 +136,7 @@
/* configuration load timeout in microseconds */
#define I2C_CONFIG_LOAD_TIMEOUT 1000000
-/* Packet header size in bytes */
+/* packet header size in bytes */
#define I2C_PACKET_HEADER_SIZE 12
/*
@@ -148,11 +148,10 @@
#define I2C_PIO_MODE_PREFERRED_LEN 32
/*
- * msg_end_type: The bus control which need to be send at end of transfer.
- * @MSG_END_STOP: Send stop pulse at end of transfer.
- * @MSG_END_REPEAT_START: Send repeat start at end of transfer.
- * @MSG_END_CONTINUE: The following on message is coming and so do not send
- * stop or repeat start.
+ * msg_end_type: The bus control which needs to be sent at end of transfer.
+ * @MSG_END_STOP: Send stop pulse.
+ * @MSG_END_REPEAT_START: Send repeat-start.
+ * @MSG_END_CONTINUE: Don't send stop or repeat-start.
*/
enum msg_end_type {
MSG_END_STOP,
@@ -161,13 +160,10 @@ enum msg_end_type {
};
/**
- * struct tegra_i2c_hw_feature : Different HW support on Tegra
- * @has_continue_xfer_support: Continue transfer supports.
+ * struct tegra_i2c_hw_feature : per hardware generation features
+ * @has_continue_xfer_support: continue-transfer supported
* @has_per_pkt_xfer_complete_irq: Has enable/disable capability for transfer
- * complete interrupt per packet basis.
- * @has_single_clk_source: The I2C controller has single clock source. Tegra30
- * and earlier SoCs have two clock sources i.e. div-clk and
- * fast-clk.
+ * completion interrupt on per packet basis.
* @has_config_load_reg: Has the config load register to load the new
* configuration.
* @clk_divisor_hs_mode: Clock divisor in HS mode.
@@ -187,7 +183,7 @@ enum msg_end_type {
* @has_mst_fifo: The I2C controller contains the new MST FIFO interface that
* provides additional features and allows for longer messages to
* be transferred in one go.
- * @quirks: i2c adapter quirks for limiting write/read transfer size and not
+ * @quirks: I2C adapter quirks for limiting write/read transfer size and not
* allowing 0 length transfers.
* @supports_bus_clear: Bus Clear support to recover from bus hang during
* SDA stuck low from device for some unknown reasons.
@@ -208,22 +204,21 @@ enum msg_end_type {
struct tegra_i2c_hw_feature {
bool has_continue_xfer_support;
bool has_per_pkt_xfer_complete_irq;
- bool has_single_clk_source;
bool has_config_load_reg;
- int clk_divisor_hs_mode;
- int clk_divisor_std_mode;
- int clk_divisor_fast_mode;
- u16 clk_divisor_fast_plus_mode;
+ u32 clk_divisor_hs_mode;
+ u32 clk_divisor_std_mode;
+ u32 clk_divisor_fast_mode;
+ u32 clk_divisor_fast_plus_mode;
bool has_multi_master_mode;
bool has_slcg_override_reg;
bool has_mst_fifo;
const struct i2c_adapter_quirks *quirks;
bool supports_bus_clear;
bool has_apb_dma;
- u8 tlow_std_mode;
- u8 thigh_std_mode;
- u8 tlow_fast_fastplus_mode;
- u8 thigh_fast_fastplus_mode;
+ u32 tlow_std_mode;
+ u32 thigh_std_mode;
+ u32 tlow_fast_fastplus_mode;
+ u32 thigh_fast_fastplus_mode;
u32 setup_hold_time_std_mode;
u32 setup_hold_time_fast_fast_plus_mode;
u32 setup_hold_time_hs_mode;
@@ -236,7 +231,8 @@ struct tegra_i2c_hw_feature {
* @hw: Tegra I2C HW feature
* @adapter: core I2C layer adapter information
* @div_clk: clock reference for div clock of I2C controller
- * @fast_clk: clock reference for fast clock of I2C controller
+ * @clocks: array of I2C controller clocks
+ * @nclocks: number of clocks in the array
* @rst: reset control for the I2C controller
* @base: ioremapped registers cookie
* @base_phys: physical base address of the I2C controller
@@ -248,101 +244,103 @@ struct tegra_i2c_hw_feature {
* @msg_err: error code for completed message
* @msg_buf: pointer to current message data
* @msg_buf_remaining: size of unsent data in the message buffer
- * @msg_read: identifies read transfers
+ * @msg_read: indicates that the transfer is a read access
* @bus_clk_rate: current I2C bus clock rate
- * @clk_divisor_non_hs_mode: clock divider for non-high-speed modes
- * @is_multimaster_mode: track if I2C controller is in multi-master mode
+ * @multimaster_mode: indicates that I2C controller is in multi-master mode
* @tx_dma_chan: DMA transmit channel
* @rx_dma_chan: DMA receive channel
* @dma_phys: handle to DMA resources
* @dma_buf: pointer to allocated DMA buffer
* @dma_buf_size: DMA buffer size
- * @is_curr_dma_xfer: indicates active DMA transfer
+ * @dma_mode: indicates active DMA transfer
* @dma_complete: DMA completion notifier
- * @is_curr_atomic_xfer: indicates active atomic transfer
+ * @atomic_mode: indicates active atomic transfer
*/
struct tegra_i2c_dev {
struct device *dev;
- const struct tegra_i2c_hw_feature *hw;
struct i2c_adapter adapter;
- struct clk *div_clk;
- struct clk *fast_clk;
- struct clk *slow_clk;
+
+ const struct tegra_i2c_hw_feature *hw;
struct reset_control *rst;
- void __iomem *base;
+ unsigned int cont_id;
+ unsigned int irq;
+
phys_addr_t base_phys;
- int cont_id;
- int irq;
- int is_dvc;
- bool is_vi;
+ void __iomem *base;
+
+ struct clk_bulk_data clocks[2];
+ unsigned int nclocks;
+
+ struct clk *div_clk;
+ u32 bus_clk_rate;
+
struct completion msg_complete;
+ size_t msg_buf_remaining;
int msg_err;
u8 *msg_buf;
- size_t msg_buf_remaining;
- int msg_read;
- u32 bus_clk_rate;
- u16 clk_divisor_non_hs_mode;
- bool is_multimaster_mode;
+
+ struct completion dma_complete;
struct dma_chan *tx_dma_chan;
struct dma_chan *rx_dma_chan;
- dma_addr_t dma_phys;
- u32 *dma_buf;
unsigned int dma_buf_size;
- bool is_curr_dma_xfer;
- struct completion dma_complete;
- bool is_curr_atomic_xfer;
-};
+ dma_addr_t dma_phys;
+ void *dma_buf;
-static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit);
+ bool multimaster_mode;
+ bool atomic_mode;
+ bool dma_mode;
+ bool msg_read;
+ bool is_dvc;
+ bool is_vi;
+};
static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
- unsigned long reg)
+ unsigned int reg)
{
writel_relaxed(val, i2c_dev->base + reg);
}
-static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
{
return readl_relaxed(i2c_dev->base + reg);
}
/*
- * i2c_writel and i2c_readl will offset the register if necessary to talk
- * to the I2C block inside the DVC block
+ * If necessary, i2c_writel() and i2c_readl() will offset the register
+ * in order to talk to the I2C block inside the DVC block.
*/
-static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev,
- unsigned long reg)
+static u32 tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
{
if (i2c_dev->is_dvc)
reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40;
else if (i2c_dev->is_vi)
reg = 0xc00 + (reg << 2);
+
return reg;
}
-static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
- unsigned long reg)
+static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg)
{
writel_relaxed(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
- /* Read back register to make sure that register writes completed */
+ /* read back register to make sure that register writes completed */
if (reg != I2C_TX_FIFO)
readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
}
-static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
{
return readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
}
static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
- unsigned long reg, int len)
+ unsigned int reg, unsigned int len)
{
writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
}
static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
- unsigned long reg, int len)
+ unsigned int reg, unsigned int len)
{
readsl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
}
@@ -377,21 +375,27 @@ static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len)
struct dma_chan *chan;
dev_dbg(i2c_dev->dev, "starting DMA for length: %zu\n", len);
+
reinit_completion(&i2c_dev->dma_complete);
+
dir = i2c_dev->msg_read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
chan = i2c_dev->msg_read ? i2c_dev->rx_dma_chan : i2c_dev->tx_dma_chan;
+
dma_desc = dmaengine_prep_slave_single(chan, i2c_dev->dma_phys,
len, dir, DMA_PREP_INTERRUPT |
DMA_CTRL_ACK);
if (!dma_desc) {
- dev_err(i2c_dev->dev, "failed to get DMA descriptor\n");
+ dev_err(i2c_dev->dev, "failed to get %s DMA descriptor\n",
+ i2c_dev->msg_read ? "RX" : "TX");
return -EINVAL;
}
dma_desc->callback = tegra_i2c_dma_complete;
dma_desc->callback_param = i2c_dev;
+
dmaengine_submit(dma_desc);
dma_async_issue_pending(chan);
+
return 0;
}
@@ -417,15 +421,15 @@ static void tegra_i2c_release_dma(struct tegra_i2c_dev *i2c_dev)
static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
{
struct dma_chan *chan;
- u32 *dma_buf;
dma_addr_t dma_phys;
+ u32 *dma_buf;
int err;
if (!i2c_dev->hw->has_apb_dma || i2c_dev->is_vi)
return 0;
if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) {
- dev_dbg(i2c_dev->dev, "Support for APB DMA not enabled!\n");
+ dev_dbg(i2c_dev->dev, "DMA support not enabled\n");
return 0;
}
@@ -445,16 +449,20 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
i2c_dev->tx_dma_chan = chan;
+ i2c_dev->dma_buf_size = i2c_dev->hw->quirks->max_write_len +
+ I2C_PACKET_HEADER_SIZE;
+
dma_buf = dma_alloc_coherent(i2c_dev->dev, i2c_dev->dma_buf_size,
&dma_phys, GFP_KERNEL | __GFP_NOWARN);
if (!dma_buf) {
- dev_err(i2c_dev->dev, "failed to allocate the DMA buffer\n");
+ dev_err(i2c_dev->dev, "failed to allocate DMA buffer\n");
err = -ENOMEM;
goto err_out;
}
i2c_dev->dma_buf = dma_buf;
i2c_dev->dma_phys = dma_phys;
+
return 0;
err_out:
@@ -468,171 +476,12 @@ err_out:
return err;
}
-static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
-{
- unsigned long timeout = jiffies + HZ;
- unsigned int offset;
- u32 mask, val;
-
- if (i2c_dev->hw->has_mst_fifo) {
- mask = I2C_MST_FIFO_CONTROL_TX_FLUSH |
- I2C_MST_FIFO_CONTROL_RX_FLUSH;
- offset = I2C_MST_FIFO_CONTROL;
- } else {
- mask = I2C_FIFO_CONTROL_TX_FLUSH |
- I2C_FIFO_CONTROL_RX_FLUSH;
- offset = I2C_FIFO_CONTROL;
- }
-
- val = i2c_readl(i2c_dev, offset);
- val |= mask;
- i2c_writel(i2c_dev, val, offset);
-
- while (i2c_readl(i2c_dev, offset) & mask) {
- if (time_after(jiffies, timeout)) {
- dev_warn(i2c_dev->dev, "timeout waiting for fifo flush\n");
- return -ETIMEDOUT;
- }
- usleep_range(1000, 2000);
- }
- return 0;
-}
-
-static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
-{
- u32 val;
- int rx_fifo_avail;
- u8 *buf = i2c_dev->msg_buf;
- size_t buf_remaining = i2c_dev->msg_buf_remaining;
- int words_to_transfer;
-
- /*
- * Catch overflow due to message fully sent
- * before the check for RX FIFO availability.
- */
- if (WARN_ON_ONCE(!(i2c_dev->msg_buf_remaining)))
- return -EINVAL;
-
- if (i2c_dev->hw->has_mst_fifo) {
- val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS);
- rx_fifo_avail = FIELD_GET(I2C_MST_FIFO_STATUS_RX, val);
- } else {
- val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
- rx_fifo_avail = FIELD_GET(I2C_FIFO_STATUS_RX, val);
- }
-
- /* Rounds down to not include partial word at the end of buf */
- words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
- if (words_to_transfer > rx_fifo_avail)
- words_to_transfer = rx_fifo_avail;
-
- i2c_readsl(i2c_dev, buf, I2C_RX_FIFO, words_to_transfer);
-
- buf += words_to_transfer * BYTES_PER_FIFO_WORD;
- buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
- rx_fifo_avail -= words_to_transfer;
-
- /*
- * If there is a partial word at the end of buf, handle it manually to
- * prevent overwriting past the end of buf
- */
- if (rx_fifo_avail > 0 && buf_remaining > 0) {
- /*
- * buf_remaining > 3 check not needed as rx_fifo_avail == 0
- * when (words_to_transfer was > rx_fifo_avail) earlier
- * in this function.
- */
- val = i2c_readl(i2c_dev, I2C_RX_FIFO);
- val = cpu_to_le32(val);
- memcpy(buf, &val, buf_remaining);
- buf_remaining = 0;
- rx_fifo_avail--;
- }
-
- /* RX FIFO must be drained, otherwise it's an Overflow case. */
- if (WARN_ON_ONCE(rx_fifo_avail))
- return -EINVAL;
-
- i2c_dev->msg_buf_remaining = buf_remaining;
- i2c_dev->msg_buf = buf;
-
- return 0;
-}
-
-static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
-{
- u32 val;
- int tx_fifo_avail;
- u8 *buf = i2c_dev->msg_buf;
- size_t buf_remaining = i2c_dev->msg_buf_remaining;
- int words_to_transfer;
-
- if (i2c_dev->hw->has_mst_fifo) {
- val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS);
- tx_fifo_avail = FIELD_GET(I2C_MST_FIFO_STATUS_TX, val);
- } else {
- val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
- tx_fifo_avail = FIELD_GET(I2C_FIFO_STATUS_TX, val);
- }
-
- /* Rounds down to not include partial word at the end of buf */
- words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
-
- /* It's very common to have < 4 bytes, so optimize that case. */
- if (words_to_transfer) {
- if (words_to_transfer > tx_fifo_avail)
- words_to_transfer = tx_fifo_avail;
-
- /*
- * Update state before writing to FIFO. If this casues us
- * to finish writing all bytes (AKA buf_remaining goes to 0) we
- * have a potential for an interrupt (PACKET_XFER_COMPLETE is
- * not maskable). We need to make sure that the isr sees
- * buf_remaining as 0 and doesn't call us back re-entrantly.
- */
- buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
- tx_fifo_avail -= words_to_transfer;
- i2c_dev->msg_buf_remaining = buf_remaining;
- i2c_dev->msg_buf = buf +
- words_to_transfer * BYTES_PER_FIFO_WORD;
- barrier();
-
- i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
-
- buf += words_to_transfer * BYTES_PER_FIFO_WORD;
- }
-
- /*
- * If there is a partial word at the end of buf, handle it manually to
- * prevent reading past the end of buf, which could cross a page
- * boundary and fault.
- */
- if (tx_fifo_avail > 0 && buf_remaining > 0) {
- /*
- * buf_remaining > 3 check not needed as tx_fifo_avail == 0
- * when (words_to_transfer was > tx_fifo_avail) earlier
- * in this function for non-zero words_to_transfer.
- */
- memcpy(&val, buf, buf_remaining);
- val = le32_to_cpu(val);
-
- /* Again update before writing to FIFO to make sure isr sees. */
- i2c_dev->msg_buf_remaining = 0;
- i2c_dev->msg_buf = NULL;
- barrier();
-
- i2c_writel(i2c_dev, val, I2C_TX_FIFO);
- }
-
- return 0;
-}
-
/*
* One of the Tegra I2C blocks is inside the DVC (Digital Voltage Controller)
* block. This block is identical to the rest of the I2C blocks, except that
* it only supports master mode, it has registers moved around, and it needs
* some extra init to get it into I2C mode. The register moves are handled
- * by i2c_readl and i2c_writel
+ * by i2c_readl() and i2c_writel().
*/
static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
{
@@ -648,140 +497,112 @@ static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
}
-static int __maybe_unused tegra_i2c_runtime_resume(struct device *dev)
+static void tegra_i2c_vi_init(struct tegra_i2c_dev *i2c_dev)
{
- struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
- int ret;
-
- ret = pinctrl_pm_select_default_state(i2c_dev->dev);
- if (ret)
- return ret;
+ u32 value;
- ret = clk_enable(i2c_dev->fast_clk);
- if (ret < 0) {
- dev_err(i2c_dev->dev,
- "Enabling fast clk failed, err %d\n", ret);
- return ret;
- }
+ value = FIELD_PREP(I2C_INTERFACE_TIMING_THIGH, 2) |
+ FIELD_PREP(I2C_INTERFACE_TIMING_TLOW, 4);
+ i2c_writel(i2c_dev, value, I2C_INTERFACE_TIMING_0);
- ret = clk_enable(i2c_dev->slow_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable slow clock: %d\n", ret);
- goto disable_fast_clk;
- }
+ value = FIELD_PREP(I2C_INTERFACE_TIMING_TBUF, 4) |
+ FIELD_PREP(I2C_INTERFACE_TIMING_TSU_STO, 7) |
+ FIELD_PREP(I2C_INTERFACE_TIMING_THD_STA, 4) |
+ FIELD_PREP(I2C_INTERFACE_TIMING_TSU_STA, 4);
+ i2c_writel(i2c_dev, value, I2C_INTERFACE_TIMING_1);
- ret = clk_enable(i2c_dev->div_clk);
- if (ret < 0) {
- dev_err(i2c_dev->dev,
- "Enabling div clk failed, err %d\n", ret);
- goto disable_slow_clk;
- }
+ value = FIELD_PREP(I2C_HS_INTERFACE_TIMING_THIGH, 3) |
+ FIELD_PREP(I2C_HS_INTERFACE_TIMING_TLOW, 8);
+ i2c_writel(i2c_dev, value, I2C_HS_INTERFACE_TIMING_0);
- /*
- * VI I2C device is attached to VE power domain which goes through
- * power ON/OFF during PM runtime resume/suspend. So, controller
- * should go through reset and need to re-initialize after power
- * domain ON.
- */
- if (i2c_dev->is_vi) {
- ret = tegra_i2c_init(i2c_dev, true);
- if (ret)
- goto disable_div_clk;
- }
+ value = FIELD_PREP(I2C_HS_INTERFACE_TIMING_TSU_STO, 11) |
+ FIELD_PREP(I2C_HS_INTERFACE_TIMING_THD_STA, 11) |
+ FIELD_PREP(I2C_HS_INTERFACE_TIMING_TSU_STA, 11);
+ i2c_writel(i2c_dev, value, I2C_HS_INTERFACE_TIMING_1);
- return 0;
+ value = FIELD_PREP(I2C_BC_SCLK_THRESHOLD, 9) | I2C_BC_STOP_COND;
+ i2c_writel(i2c_dev, value, I2C_BUS_CLEAR_CNFG);
-disable_div_clk:
- clk_disable(i2c_dev->div_clk);
-disable_slow_clk:
- clk_disable(i2c_dev->slow_clk);
-disable_fast_clk:
- clk_disable(i2c_dev->fast_clk);
- return ret;
+ i2c_writel(i2c_dev, 0x0, I2C_TLOW_SEXT);
}
-static int __maybe_unused tegra_i2c_runtime_suspend(struct device *dev)
+static int tegra_i2c_poll_register(struct tegra_i2c_dev *i2c_dev,
+ u32 reg, u32 mask, u32 delay_us,
+ u32 timeout_us)
{
- struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+ void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg);
+ u32 val;
- clk_disable(i2c_dev->div_clk);
- clk_disable(i2c_dev->slow_clk);
- clk_disable(i2c_dev->fast_clk);
+ if (!i2c_dev->atomic_mode)
+ return readl_relaxed_poll_timeout(addr, val, !(val & mask),
+ delay_us, timeout_us);
- return pinctrl_pm_select_idle_state(i2c_dev->dev);
+ return readl_relaxed_poll_timeout_atomic(addr, val, !(val & mask),
+ delay_us, timeout_us);
}
-static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
+static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
{
- unsigned long reg_offset;
- void __iomem *addr;
- u32 val;
+ u32 mask, val, offset;
int err;
- if (i2c_dev->hw->has_config_load_reg) {
- reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_CONFIG_LOAD);
- addr = i2c_dev->base + reg_offset;
- i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
+ if (i2c_dev->hw->has_mst_fifo) {
+ mask = I2C_MST_FIFO_CONTROL_TX_FLUSH |
+ I2C_MST_FIFO_CONTROL_RX_FLUSH;
+ offset = I2C_MST_FIFO_CONTROL;
+ } else {
+ mask = I2C_FIFO_CONTROL_TX_FLUSH |
+ I2C_FIFO_CONTROL_RX_FLUSH;
+ offset = I2C_FIFO_CONTROL;
+ }
- if (i2c_dev->is_curr_atomic_xfer)
- err = readl_relaxed_poll_timeout_atomic(
- addr, val, val == 0, 1000,
- I2C_CONFIG_LOAD_TIMEOUT);
- else
- err = readl_relaxed_poll_timeout(
- addr, val, val == 0, 1000,
- I2C_CONFIG_LOAD_TIMEOUT);
+ val = i2c_readl(i2c_dev, offset);
+ val |= mask;
+ i2c_writel(i2c_dev, val, offset);
- if (err) {
- dev_warn(i2c_dev->dev,
- "timeout waiting for config load\n");
- return err;
- }
+ err = tegra_i2c_poll_register(i2c_dev, offset, mask, 1000, 1000000);
+ if (err) {
+ dev_err(i2c_dev->dev, "failed to flush FIFO\n");
+ return err;
}
return 0;
}
-static void tegra_i2c_vi_init(struct tegra_i2c_dev *i2c_dev)
+static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
{
- u32 value;
-
- value = FIELD_PREP(I2C_INTERFACE_TIMING_THIGH, 2) |
- FIELD_PREP(I2C_INTERFACE_TIMING_TLOW, 4);
- i2c_writel(i2c_dev, value, I2C_INTERFACE_TIMING_0);
-
- value = FIELD_PREP(I2C_INTERFACE_TIMING_TBUF, 4) |
- FIELD_PREP(I2C_INTERFACE_TIMING_TSU_STO, 7) |
- FIELD_PREP(I2C_INTERFACE_TIMING_THD_STA, 4) |
- FIELD_PREP(I2C_INTERFACE_TIMING_TSU_STA, 4);
- i2c_writel(i2c_dev, value, I2C_INTERFACE_TIMING_1);
+ int err;
- value = FIELD_PREP(I2C_HS_INTERFACE_TIMING_THIGH, 3) |
- FIELD_PREP(I2C_HS_INTERFACE_TIMING_TLOW, 8);
- i2c_writel(i2c_dev, value, I2C_HS_INTERFACE_TIMING_0);
+ if (!i2c_dev->hw->has_config_load_reg)
+ return 0;
- value = FIELD_PREP(I2C_HS_INTERFACE_TIMING_TSU_STO, 11) |
- FIELD_PREP(I2C_HS_INTERFACE_TIMING_THD_STA, 11) |
- FIELD_PREP(I2C_HS_INTERFACE_TIMING_TSU_STA, 11);
- i2c_writel(i2c_dev, value, I2C_HS_INTERFACE_TIMING_1);
+ i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
- value = FIELD_PREP(I2C_BC_SCLK_THRESHOLD, 9) | I2C_BC_STOP_COND;
- i2c_writel(i2c_dev, value, I2C_BUS_CLEAR_CNFG);
+ err = tegra_i2c_poll_register(i2c_dev, I2C_CONFIG_LOAD, 0xffffffff,
+ 1000, I2C_CONFIG_LOAD_TIMEOUT);
+ if (err) {
+ dev_err(i2c_dev->dev, "failed to load config\n");
+ return err;
+ }
- i2c_writel(i2c_dev, 0x0, I2C_TLOW_SEXT);
+ return 0;
}
-static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
+static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
- u32 val;
+ u32 val, clk_divisor, clk_multiplier, tsu_thd, tlow, thigh, non_hs_mode;
int err;
- u32 clk_divisor, clk_multiplier;
- u32 tsu_thd;
- u8 tlow, thigh;
- reset_control_assert(i2c_dev->rst);
- udelay(2);
- reset_control_deassert(i2c_dev->rst);
+ /*
+ * The reset shouldn't ever fail in practice. The failure will be a
+ * sign of a severe problem that needs to be resolved. Still we don't
+ * want to fail the initialization completely because this may break
+ * kernel boot up since voltage regulators use I2C. Hence, we will
+ * emit a noisy warning on error, which won't stay unnoticed and
+ * won't hose machine entirely.
+ */
+ err = reset_control_reset(i2c_dev->rst);
+ WARN_ON_ONCE(err);
if (i2c_dev->is_dvc)
tegra_dvc_init(i2c_dev);
@@ -798,24 +619,33 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
if (i2c_dev->is_vi)
tegra_i2c_vi_init(i2c_dev);
- /* Make sure clock divisor programmed correctly */
- clk_divisor = FIELD_PREP(I2C_CLK_DIVISOR_HSMODE,
- i2c_dev->hw->clk_divisor_hs_mode) |
- FIELD_PREP(I2C_CLK_DIVISOR_STD_FAST_MODE,
- i2c_dev->clk_divisor_non_hs_mode);
- i2c_writel(i2c_dev, clk_divisor, I2C_CLK_DIVISOR);
-
- if (i2c_dev->bus_clk_rate > I2C_MAX_STANDARD_MODE_FREQ &&
- i2c_dev->bus_clk_rate <= I2C_MAX_FAST_MODE_PLUS_FREQ) {
+ switch (i2c_dev->bus_clk_rate) {
+ case I2C_MAX_STANDARD_MODE_FREQ + 1 ... I2C_MAX_FAST_MODE_PLUS_FREQ:
+ default:
tlow = i2c_dev->hw->tlow_fast_fastplus_mode;
thigh = i2c_dev->hw->thigh_fast_fastplus_mode;
tsu_thd = i2c_dev->hw->setup_hold_time_fast_fast_plus_mode;
- } else {
+
+ if (i2c_dev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ)
+ non_hs_mode = i2c_dev->hw->clk_divisor_fast_plus_mode;
+ else
+ non_hs_mode = i2c_dev->hw->clk_divisor_fast_mode;
+ break;
+
+ case 0 ... I2C_MAX_STANDARD_MODE_FREQ:
tlow = i2c_dev->hw->tlow_std_mode;
thigh = i2c_dev->hw->thigh_std_mode;
tsu_thd = i2c_dev->hw->setup_hold_time_std_mode;
+ non_hs_mode = i2c_dev->hw->clk_divisor_std_mode;
+ break;
}
+ /* make sure clock divisor programmed correctly */
+ clk_divisor = FIELD_PREP(I2C_CLK_DIVISOR_HSMODE,
+ i2c_dev->hw->clk_divisor_hs_mode) |
+ FIELD_PREP(I2C_CLK_DIVISOR_STD_FAST_MODE, non_hs_mode);
+ i2c_writel(i2c_dev, clk_divisor, I2C_CLK_DIVISOR);
+
if (i2c_dev->hw->has_interface_timing_reg) {
val = FIELD_PREP(I2C_INTERFACE_TIMING_THIGH, thigh) |
FIELD_PREP(I2C_INTERFACE_TIMING_TLOW, tlow);
@@ -823,22 +653,19 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
}
/*
- * configure setup and hold times only when tsu_thd is non-zero.
- * otherwise, preserve the chip default values
+ * Configure setup and hold times only when tsu_thd is non-zero.
+ * Otherwise, preserve the chip default values.
*/
if (i2c_dev->hw->has_interface_timing_reg && tsu_thd)
i2c_writel(i2c_dev, tsu_thd, I2C_INTERFACE_TIMING_1);
- if (!clk_reinit) {
- clk_multiplier = (tlow + thigh + 2);
- clk_multiplier *= (i2c_dev->clk_divisor_non_hs_mode + 1);
- err = clk_set_rate(i2c_dev->div_clk,
- i2c_dev->bus_clk_rate * clk_multiplier);
- if (err) {
- dev_err(i2c_dev->dev,
- "failed changing clock rate: %d\n", err);
- return err;
- }
+ clk_multiplier = (tlow + thigh + 2) * (non_hs_mode + 1);
+
+ err = clk_set_rate(i2c_dev->div_clk,
+ i2c_dev->bus_clk_rate * clk_multiplier);
+ if (err) {
+ dev_err(i2c_dev->dev, "failed to set div-clk rate: %d\n", err);
+ return err;
}
if (!i2c_dev->is_dvc && !i2c_dev->is_vi) {
@@ -854,7 +681,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit)
if (err)
return err;
- if (i2c_dev->is_multimaster_mode && i2c_dev->hw->has_slcg_override_reg)
+ if (i2c_dev->multimaster_mode && i2c_dev->hw->has_slcg_override_reg)
i2c_writel(i2c_dev, I2C_MST_CORE_CLKEN_OVR, I2C_CLKEN_OVERRIDE);
err = tegra_i2c_wait_for_config_load(i2c_dev);
@@ -870,7 +697,7 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
/*
* NACK interrupt is generated before the I2C controller generates
- * the STOP condition on the bus. So wait for 2 clock periods
+ * the STOP condition on the bus. So, wait for 2 clock periods
* before disabling the controller so that the STOP condition has
* been delivered properly.
*/
@@ -883,16 +710,145 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
return tegra_i2c_wait_for_config_load(i2c_dev);
}
+static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ unsigned int words_to_transfer, rx_fifo_avail;
+ u8 *buf = i2c_dev->msg_buf;
+ u32 val;
+
+ /*
+ * Catch overflow due to message fully sent before the check for
+ * RX FIFO availability.
+ */
+ if (WARN_ON_ONCE(!(i2c_dev->msg_buf_remaining)))
+ return -EINVAL;
+
+ if (i2c_dev->hw->has_mst_fifo) {
+ val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS);
+ rx_fifo_avail = FIELD_GET(I2C_MST_FIFO_STATUS_RX, val);
+ } else {
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ rx_fifo_avail = FIELD_GET(I2C_FIFO_STATUS_RX, val);
+ }
+
+ /* round down to exclude partial word at the end of buffer */
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > rx_fifo_avail)
+ words_to_transfer = rx_fifo_avail;
+
+ i2c_readsl(i2c_dev, buf, I2C_RX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ rx_fifo_avail -= words_to_transfer;
+
+ /*
+ * If there is a partial word at the end of buffer, handle it
+ * manually to prevent overwriting past the end of buffer.
+ */
+ if (rx_fifo_avail > 0 && buf_remaining > 0) {
+ /*
+ * buf_remaining > 3 check not needed as rx_fifo_avail == 0
+ * when (words_to_transfer was > rx_fifo_avail) earlier
+ * in this function.
+ */
+ val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+ val = cpu_to_le32(val);
+ memcpy(buf, &val, buf_remaining);
+ buf_remaining = 0;
+ rx_fifo_avail--;
+ }
+
+ /* RX FIFO must be drained, otherwise it's an Overflow case. */
+ if (WARN_ON_ONCE(rx_fifo_avail))
+ return -EINVAL;
+
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+
+ return 0;
+}
+
+static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ unsigned int words_to_transfer, tx_fifo_avail;
+ u8 *buf = i2c_dev->msg_buf;
+ u32 val;
+
+ if (i2c_dev->hw->has_mst_fifo) {
+ val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS);
+ tx_fifo_avail = FIELD_GET(I2C_MST_FIFO_STATUS_TX, val);
+ } else {
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ tx_fifo_avail = FIELD_GET(I2C_FIFO_STATUS_TX, val);
+ }
+
+ /* round down to exclude partial word at the end of buffer */
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+
+ /*
+ * This hunk pushes 4 bytes at a time into the TX FIFO.
+ *
+ * It's very common to have < 4 bytes, hence there is no word
+ * to push if we have less than 4 bytes to transfer.
+ */
+ if (words_to_transfer) {
+ if (words_to_transfer > tx_fifo_avail)
+ words_to_transfer = tx_fifo_avail;
+
+ /*
+ * Update state before writing to FIFO. Note that this may
+ * cause us to finish writing all bytes (AKA buf_remaining
+ * goes to 0), hence we have a potential for an interrupt
+ * (PACKET_XFER_COMPLETE is not maskable), but GIC interrupt
+ * is disabled at this point.
+ */
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ tx_fifo_avail -= words_to_transfer;
+
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
+
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ }
+
+ /*
+ * If there is a partial word at the end of buffer, handle it manually
+ * to prevent reading past the end of buffer, which could cross a page
+ * boundary and fault.
+ */
+ if (tx_fifo_avail > 0 && buf_remaining > 0) {
+ /*
+ * buf_remaining > 3 check not needed as tx_fifo_avail == 0
+ * when (words_to_transfer was > tx_fifo_avail) earlier
+ * in this function for non-zero words_to_transfer.
+ */
+ memcpy(&val, buf, buf_remaining);
+ val = le32_to_cpu(val);
+
+ i2c_dev->msg_buf_remaining = 0;
+ i2c_dev->msg_buf = NULL;
+
+ i2c_writel(i2c_dev, val, I2C_TX_FIFO);
+ }
+
+ return 0;
+}
+
static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
{
- u32 status;
const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
struct tegra_i2c_dev *i2c_dev = dev_id;
+ u32 status;
status = i2c_readl(i2c_dev, I2C_INT_STATUS);
if (status == 0) {
- dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n",
+ dev_warn(i2c_dev->dev, "IRQ status 0 %08x %08x %08x\n",
i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS),
i2c_readl(i2c_dev, I2C_STATUS),
i2c_readl(i2c_dev, I2C_CNFG));
@@ -900,7 +856,7 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
goto err;
}
- if (unlikely(status & status_err)) {
+ if (status & status_err) {
tegra_i2c_disable_packet_mode(i2c_dev);
if (status & I2C_INT_NO_ACK)
i2c_dev->msg_err |= I2C_ERR_NO_ACK;
@@ -910,13 +866,13 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
}
/*
- * I2C transfer is terminated during the bus clear so skip
+ * I2C transfer is terminated during the bus clear, so skip
* processing the other interrupts.
*/
if (i2c_dev->hw->supports_bus_clear && (status & I2C_INT_BUS_CLR_DONE))
goto err;
- if (!i2c_dev->is_curr_dma_xfer) {
+ if (!i2c_dev->dma_mode) {
if (i2c_dev->msg_read && (status & I2C_INT_RX_FIFO_DATA_REQ)) {
if (tegra_i2c_empty_rx_fifo(i2c_dev)) {
/*
@@ -946,11 +902,12 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
* During message read XFER_COMPLETE interrupt is triggered prior to
* DMA completion and during message write XFER_COMPLETE interrupt is
* triggered after DMA completion.
- * PACKETS_XFER_COMPLETE indicates completion of all bytes of transfer.
+ *
+ * PACKETS_XFER_COMPLETE indicates completion of all bytes of transfer,
* so forcing msg_buf_remaining to 0 in DMA mode.
*/
if (status & I2C_INT_PACKET_XFER_COMPLETE) {
- if (i2c_dev->is_curr_dma_xfer)
+ if (i2c_dev->dma_mode)
i2c_dev->msg_buf_remaining = 0;
/*
* Underflow error condition: XFER_COMPLETE before message
@@ -964,17 +921,23 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
}
goto done;
err:
- /* An error occurred, mask all interrupts */
- tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST |
- I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ |
- I2C_INT_RX_FIFO_DATA_REQ);
+ /* mask all interrupts on error */
+ tegra_i2c_mask_irq(i2c_dev,
+ I2C_INT_NO_ACK |
+ I2C_INT_ARBITRATION_LOST |
+ I2C_INT_PACKET_XFER_COMPLETE |
+ I2C_INT_TX_FIFO_DATA_REQ |
+ I2C_INT_RX_FIFO_DATA_REQ);
+
if (i2c_dev->hw->supports_bus_clear)
tegra_i2c_mask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE);
+
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+
if (i2c_dev->is_dvc)
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
- if (i2c_dev->is_curr_dma_xfer) {
+ if (i2c_dev->dma_mode) {
if (i2c_dev->msg_read)
dmaengine_terminate_async(i2c_dev->rx_dma_chan);
else
@@ -991,19 +954,17 @@ done:
static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
size_t len)
{
- u32 val, reg;
- u8 dma_burst;
struct dma_slave_config slv_config = {0};
+ u32 val, reg, dma_burst, reg_offset;
struct dma_chan *chan;
- int ret;
- unsigned long reg_offset;
+ int err;
if (i2c_dev->hw->has_mst_fifo)
reg = I2C_MST_FIFO_CONTROL;
else
reg = I2C_FIFO_CONTROL;
- if (i2c_dev->is_curr_dma_xfer) {
+ if (i2c_dev->dma_mode) {
if (len & 0xF)
dma_burst = 1;
else if (len & 0x10)
@@ -1014,6 +975,7 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
if (i2c_dev->msg_read) {
chan = i2c_dev->rx_dma_chan;
reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_RX_FIFO);
+
slv_config.src_addr = i2c_dev->base_phys + reg_offset;
slv_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
slv_config.src_maxburst = dma_burst;
@@ -1025,6 +987,7 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
} else {
chan = i2c_dev->tx_dma_chan;
reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_TX_FIFO);
+
slv_config.dst_addr = i2c_dev->base_phys + reg_offset;
slv_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
slv_config.dst_maxburst = dma_burst;
@@ -1036,13 +999,13 @@ static void tegra_i2c_config_fifo_trig(struct tegra_i2c_dev *i2c_dev,
}
slv_config.device_fc = true;
- ret = dmaengine_slave_config(chan, &slv_config);
- if (ret < 0) {
- dev_err(i2c_dev->dev, "DMA slave config failed: %d\n",
- ret);
+ err = dmaengine_slave_config(chan, &slv_config);
+ if (err) {
+ dev_err(i2c_dev->dev, "DMA config failed: %d\n", err);
dev_err(i2c_dev->dev, "falling back to PIO\n");
+
tegra_i2c_release_dma(i2c_dev);
- i2c_dev->is_curr_dma_xfer = false;
+ i2c_dev->dma_mode = false;
} else {
goto out;
}
@@ -1058,10 +1021,9 @@ out:
i2c_writel(i2c_dev, val, reg);
}
-static unsigned long
-tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev,
- struct completion *complete,
- unsigned int timeout_ms)
+static unsigned long tegra_i2c_poll_completion(struct tegra_i2c_dev *i2c_dev,
+ struct completion *complete,
+ unsigned int timeout_ms)
{
ktime_t ktime = ktime_get();
ktime_t ktimeout = ktime_add_ms(ktime, timeout_ms);
@@ -1085,16 +1047,14 @@ tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev,
return 0;
}
-static unsigned long
-tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
- struct completion *complete,
- unsigned int timeout_ms)
+static unsigned long tegra_i2c_wait_completion(struct tegra_i2c_dev *i2c_dev,
+ struct completion *complete,
+ unsigned int timeout_ms)
{
unsigned long ret;
- if (i2c_dev->is_curr_atomic_xfer) {
- ret = tegra_i2c_poll_completion_timeout(i2c_dev, complete,
- timeout_ms);
+ if (i2c_dev->atomic_mode) {
+ ret = tegra_i2c_poll_completion(i2c_dev, complete, timeout_ms);
} else {
enable_irq(i2c_dev->irq);
ret = wait_for_completion_timeout(complete,
@@ -1112,8 +1072,7 @@ tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
* needs to be checked after timeout.
*/
if (ret == 0)
- ret = tegra_i2c_poll_completion_timeout(i2c_dev,
- complete, 0);
+ ret = tegra_i2c_poll_completion(i2c_dev, complete, 0);
}
return ret;
@@ -1122,60 +1081,134 @@ tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap)
{
struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ u32 val, time_left;
int err;
- unsigned long time_left;
- u32 reg;
reinit_completion(&i2c_dev->msg_complete);
- reg = FIELD_PREP(I2C_BC_SCLK_THRESHOLD, 9) | I2C_BC_STOP_COND |
+
+ val = FIELD_PREP(I2C_BC_SCLK_THRESHOLD, 9) | I2C_BC_STOP_COND |
I2C_BC_TERMINATE;
- i2c_writel(i2c_dev, reg, I2C_BUS_CLEAR_CNFG);
- if (i2c_dev->hw->has_config_load_reg) {
- err = tegra_i2c_wait_for_config_load(i2c_dev);
- if (err)
- return err;
- }
+ i2c_writel(i2c_dev, val, I2C_BUS_CLEAR_CNFG);
+
+ err = tegra_i2c_wait_for_config_load(i2c_dev);
+ if (err)
+ return err;
- reg |= I2C_BC_ENABLE;
- i2c_writel(i2c_dev, reg, I2C_BUS_CLEAR_CNFG);
+ val |= I2C_BC_ENABLE;
+ i2c_writel(i2c_dev, val, I2C_BUS_CLEAR_CNFG);
tegra_i2c_unmask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE);
- time_left = tegra_i2c_wait_completion_timeout(
- i2c_dev, &i2c_dev->msg_complete, 50);
+ time_left = tegra_i2c_wait_completion(i2c_dev, &i2c_dev->msg_complete, 50);
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE);
+
if (time_left == 0) {
- dev_err(i2c_dev->dev, "timed out for bus clear\n");
+ dev_err(i2c_dev->dev, "failed to clear bus\n");
return -ETIMEDOUT;
}
- reg = i2c_readl(i2c_dev, I2C_BUS_CLEAR_STATUS);
- if (!(reg & I2C_BC_STATUS)) {
- dev_err(i2c_dev->dev,
- "un-recovered arbitration lost\n");
+ val = i2c_readl(i2c_dev, I2C_BUS_CLEAR_STATUS);
+ if (!(val & I2C_BC_STATUS)) {
+ dev_err(i2c_dev->dev, "un-recovered arbitration lost\n");
return -EIO;
}
return -EAGAIN;
}
+static void tegra_i2c_push_packet_header(struct tegra_i2c_dev *i2c_dev,
+ struct i2c_msg *msg,
+ enum msg_end_type end_state)
+{
+ u32 *dma_buf = i2c_dev->dma_buf;
+ u32 packet_header;
+
+ packet_header = FIELD_PREP(PACKET_HEADER0_HEADER_SIZE, 0) |
+ FIELD_PREP(PACKET_HEADER0_PROTOCOL,
+ PACKET_HEADER0_PROTOCOL_I2C) |
+ FIELD_PREP(PACKET_HEADER0_CONT_ID, i2c_dev->cont_id) |
+ FIELD_PREP(PACKET_HEADER0_PACKET_ID, 1);
+
+ if (i2c_dev->dma_mode && !i2c_dev->msg_read)
+ *dma_buf++ = packet_header;
+ else
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->len - 1;
+
+ if (i2c_dev->dma_mode && !i2c_dev->msg_read)
+ *dma_buf++ = packet_header;
+ else
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = I2C_HEADER_IE_ENABLE;
+
+ if (end_state == MSG_END_CONTINUE)
+ packet_header |= I2C_HEADER_CONTINUE_XFER;
+ else if (end_state == MSG_END_REPEAT_START)
+ packet_header |= I2C_HEADER_REPEAT_START;
+
+ if (msg->flags & I2C_M_TEN) {
+ packet_header |= msg->addr;
+ packet_header |= I2C_HEADER_10BIT_ADDR;
+ } else {
+ packet_header |= msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+ }
+
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ packet_header |= I2C_HEADER_CONT_ON_NAK;
+
+ if (msg->flags & I2C_M_RD)
+ packet_header |= I2C_HEADER_READ;
+
+ if (i2c_dev->dma_mode && !i2c_dev->msg_read)
+ *dma_buf++ = packet_header;
+ else
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+}
+
+static int tegra_i2c_error_recover(struct tegra_i2c_dev *i2c_dev,
+ struct i2c_msg *msg)
+{
+ if (i2c_dev->msg_err == I2C_ERR_NONE)
+ return 0;
+
+ tegra_i2c_init(i2c_dev);
+
+ /* start recovery upon arbitration loss in single master mode */
+ if (i2c_dev->msg_err == I2C_ERR_ARBITRATION_LOST) {
+ if (!i2c_dev->multimaster_mode)
+ return i2c_recover_bus(&i2c_dev->adapter);
+
+ return -EAGAIN;
+ }
+
+ if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ return 0;
+
+ return -EREMOTEIO;
+ }
+
+ return -EIO;
+}
+
static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
struct i2c_msg *msg,
enum msg_end_type end_state)
{
- u32 packet_header;
- u32 int_mask;
- unsigned long time_left;
+ unsigned long time_left, xfer_time = 100;
size_t xfer_size;
- u32 *buffer = NULL;
- int err = 0;
- bool dma;
- u16 xfer_time = 100;
+ u32 int_mask;
+ int err;
- tegra_i2c_flush_fifos(i2c_dev);
+ err = tegra_i2c_flush_fifos(i2c_dev);
+ if (err)
+ return err;
i2c_dev->msg_buf = msg->buf;
i2c_dev->msg_buf_remaining = msg->len;
i2c_dev->msg_err = I2C_ERR_NONE;
- i2c_dev->msg_read = (msg->flags & I2C_M_RD);
+ i2c_dev->msg_read = !!(msg->flags & I2C_M_RD);
reinit_completion(&i2c_dev->msg_complete);
if (i2c_dev->msg_read)
@@ -1184,93 +1217,52 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
xfer_size = msg->len + I2C_PACKET_HEADER_SIZE;
xfer_size = ALIGN(xfer_size, BYTES_PER_FIFO_WORD);
- i2c_dev->is_curr_dma_xfer = (xfer_size > I2C_PIO_MODE_PREFERRED_LEN) &&
- i2c_dev->dma_buf &&
- !i2c_dev->is_curr_atomic_xfer;
+
+ i2c_dev->dma_mode = xfer_size > I2C_PIO_MODE_PREFERRED_LEN &&
+ i2c_dev->dma_buf && !i2c_dev->atomic_mode;
+
tegra_i2c_config_fifo_trig(i2c_dev, xfer_size);
- dma = i2c_dev->is_curr_dma_xfer;
+
/*
* Transfer time in mSec = Total bits / transfer rate
* Total bits = 9 bits per byte (including ACK bit) + Start & stop bits
*/
xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC,
- i2c_dev->bus_clk_rate);
+ i2c_dev->bus_clk_rate);
int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
tegra_i2c_unmask_irq(i2c_dev, int_mask);
- if (dma) {
+
+ if (i2c_dev->dma_mode) {
if (i2c_dev->msg_read) {
dma_sync_single_for_device(i2c_dev->dev,
i2c_dev->dma_phys,
- xfer_size,
- DMA_FROM_DEVICE);
+ xfer_size, DMA_FROM_DEVICE);
+
err = tegra_i2c_dma_submit(i2c_dev, xfer_size);
- if (err < 0) {
- dev_err(i2c_dev->dev,
- "starting RX DMA failed, err %d\n",
- err);
+ if (err)
return err;
- }
-
} else {
dma_sync_single_for_cpu(i2c_dev->dev,
i2c_dev->dma_phys,
- xfer_size,
- DMA_TO_DEVICE);
- buffer = i2c_dev->dma_buf;
+ xfer_size, DMA_TO_DEVICE);
}
}
- packet_header = FIELD_PREP(PACKET_HEADER0_HEADER_SIZE, 0) |
- FIELD_PREP(PACKET_HEADER0_PROTOCOL,
- PACKET_HEADER0_PROTOCOL_I2C) |
- FIELD_PREP(PACKET_HEADER0_CONT_ID, i2c_dev->cont_id) |
- FIELD_PREP(PACKET_HEADER0_PACKET_ID, 1);
- if (dma && !i2c_dev->msg_read)
- *buffer++ = packet_header;
- else
- i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
-
- packet_header = msg->len - 1;
- if (dma && !i2c_dev->msg_read)
- *buffer++ = packet_header;
- else
- i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
-
- packet_header = I2C_HEADER_IE_ENABLE;
- if (end_state == MSG_END_CONTINUE)
- packet_header |= I2C_HEADER_CONTINUE_XFER;
- else if (end_state == MSG_END_REPEAT_START)
- packet_header |= I2C_HEADER_REPEAT_START;
- if (msg->flags & I2C_M_TEN) {
- packet_header |= msg->addr;
- packet_header |= I2C_HEADER_10BIT_ADDR;
- } else {
- packet_header |= msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
- }
- if (msg->flags & I2C_M_IGNORE_NAK)
- packet_header |= I2C_HEADER_CONT_ON_NAK;
- if (msg->flags & I2C_M_RD)
- packet_header |= I2C_HEADER_READ;
- if (dma && !i2c_dev->msg_read)
- *buffer++ = packet_header;
- else
- i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+ tegra_i2c_push_packet_header(i2c_dev, msg, end_state);
if (!i2c_dev->msg_read) {
- if (dma) {
- memcpy(buffer, msg->buf, msg->len);
+ if (i2c_dev->dma_mode) {
+ memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE,
+ msg->buf, msg->len);
+
dma_sync_single_for_device(i2c_dev->dev,
i2c_dev->dma_phys,
- xfer_size,
- DMA_TO_DEVICE);
+ xfer_size, DMA_TO_DEVICE);
+
err = tegra_i2c_dma_submit(i2c_dev, xfer_size);
- if (err < 0) {
- dev_err(i2c_dev->dev,
- "starting TX DMA failed, err %d\n",
- err);
+ if (err)
return err;
- }
} else {
tegra_i2c_fill_tx_fifo(i2c_dev);
}
@@ -1278,7 +1270,8 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (i2c_dev->hw->has_per_pkt_xfer_complete_irq)
int_mask |= I2C_INT_PACKET_XFER_COMPLETE;
- if (!dma) {
+
+ if (!i2c_dev->dma_mode) {
if (msg->flags & I2C_M_RD)
int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
else if (i2c_dev->msg_buf_remaining)
@@ -1286,12 +1279,13 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
}
tegra_i2c_unmask_irq(i2c_dev, int_mask);
- dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n",
+ dev_dbg(i2c_dev->dev, "unmasked IRQ: %02x\n",
i2c_readl(i2c_dev, I2C_INT_MASK));
- if (dma) {
- time_left = tegra_i2c_wait_completion_timeout(
- i2c_dev, &i2c_dev->dma_complete, xfer_time);
+ if (i2c_dev->dma_mode) {
+ time_left = tegra_i2c_wait_completion(i2c_dev,
+ &i2c_dev->dma_complete,
+ xfer_time);
/*
* Synchronize DMA first, since dmaengine_terminate_sync()
@@ -1307,29 +1301,28 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
i2c_dev->tx_dma_chan);
if (!time_left && !completion_done(&i2c_dev->dma_complete)) {
- dev_err(i2c_dev->dev, "DMA transfer timeout\n");
- tegra_i2c_init(i2c_dev, true);
+ dev_err(i2c_dev->dev, "DMA transfer timed out\n");
+ tegra_i2c_init(i2c_dev);
return -ETIMEDOUT;
}
if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE) {
dma_sync_single_for_cpu(i2c_dev->dev,
i2c_dev->dma_phys,
- xfer_size,
- DMA_FROM_DEVICE);
- memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf,
- msg->len);
+ xfer_size, DMA_FROM_DEVICE);
+
+ memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf, msg->len);
}
}
- time_left = tegra_i2c_wait_completion_timeout(
- i2c_dev, &i2c_dev->msg_complete, xfer_time);
+ time_left = tegra_i2c_wait_completion(i2c_dev, &i2c_dev->msg_complete,
+ xfer_time);
tegra_i2c_mask_irq(i2c_dev, int_mask);
if (time_left == 0) {
- dev_err(i2c_dev->dev, "i2c transfer timed out\n");
- tegra_i2c_init(i2c_dev, true);
+ dev_err(i2c_dev->dev, "I2C transfer timed out\n");
+ tegra_i2c_init(i2c_dev);
return -ETIMEDOUT;
}
@@ -1337,37 +1330,25 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
time_left, completion_done(&i2c_dev->msg_complete),
i2c_dev->msg_err);
- i2c_dev->is_curr_dma_xfer = false;
- if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
- return 0;
+ i2c_dev->dma_mode = false;
- tegra_i2c_init(i2c_dev, true);
- /* start recovery upon arbitration loss in single master mode */
- if (i2c_dev->msg_err == I2C_ERR_ARBITRATION_LOST) {
- if (!i2c_dev->is_multimaster_mode)
- return i2c_recover_bus(&i2c_dev->adapter);
- return -EAGAIN;
- }
-
- if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
- if (msg->flags & I2C_M_IGNORE_NAK)
- return 0;
- return -EREMOTEIO;
- }
+ err = tegra_i2c_error_recover(i2c_dev, msg);
+ if (err)
+ return err;
- return -EIO;
+ return 0;
}
static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
int num)
{
struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
- int i;
- int ret;
+ int i, ret;
ret = pm_runtime_get_sync(i2c_dev->dev);
if (ret < 0) {
dev_err(i2c_dev->dev, "runtime resume failed %d\n", ret);
+ pm_runtime_put_noidle(i2c_dev->dev);
return ret;
}
@@ -1375,6 +1356,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
enum msg_end_type end_type = MSG_END_STOP;
if (i < (num - 1)) {
+ /* check whether follow up message is coming */
if (msgs[i + 1].flags & I2C_M_NOSTART)
end_type = MSG_END_CONTINUE;
else
@@ -1396,9 +1378,9 @@ static int tegra_i2c_xfer_atomic(struct i2c_adapter *adap,
struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
int ret;
- i2c_dev->is_curr_atomic_xfer = true;
+ i2c_dev->atomic_mode = true;
ret = tegra_i2c_xfer(adap, msgs, num);
- i2c_dev->is_curr_atomic_xfer = false;
+ i2c_dev->atomic_mode = false;
return ret;
}
@@ -1411,22 +1393,8 @@ static u32 tegra_i2c_func(struct i2c_adapter *adap)
if (i2c_dev->hw->has_continue_xfer_support)
ret |= I2C_FUNC_NOSTART;
- return ret;
-}
-static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
-{
- struct device_node *np = i2c_dev->dev->of_node;
- int ret;
- bool multi_mode;
-
- ret = of_property_read_u32(np, "clock-frequency",
- &i2c_dev->bus_clk_rate);
- if (ret)
- i2c_dev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ; /* default clock rate */
-
- multi_mode = of_property_read_bool(np, "multi-master");
- i2c_dev->is_multimaster_mode = multi_mode;
+ return ret;
}
static const struct i2c_algorithm tegra_i2c_algo = {
@@ -1454,7 +1422,6 @@ static struct i2c_bus_recovery_info tegra_i2c_recovery_info = {
static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_continue_xfer_support = false,
.has_per_pkt_xfer_complete_irq = false,
- .has_single_clk_source = false,
.clk_divisor_hs_mode = 3,
.clk_divisor_std_mode = 0,
.clk_divisor_fast_mode = 0,
@@ -1479,7 +1446,6 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_continue_xfer_support = true,
.has_per_pkt_xfer_complete_irq = false,
- .has_single_clk_source = false,
.clk_divisor_hs_mode = 3,
.clk_divisor_std_mode = 0,
.clk_divisor_fast_mode = 0,
@@ -1504,7 +1470,6 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.has_continue_xfer_support = true,
.has_per_pkt_xfer_complete_irq = true,
- .has_single_clk_source = true,
.clk_divisor_hs_mode = 1,
.clk_divisor_std_mode = 0x19,
.clk_divisor_fast_mode = 0x19,
@@ -1529,7 +1494,6 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
.has_continue_xfer_support = true,
.has_per_pkt_xfer_complete_irq = true,
- .has_single_clk_source = true,
.clk_divisor_hs_mode = 1,
.clk_divisor_std_mode = 0x19,
.clk_divisor_fast_mode = 0x19,
@@ -1554,7 +1518,6 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
.has_continue_xfer_support = true,
.has_per_pkt_xfer_complete_irq = true,
- .has_single_clk_source = true,
.clk_divisor_hs_mode = 1,
.clk_divisor_std_mode = 0x19,
.clk_divisor_fast_mode = 0x19,
@@ -1579,7 +1542,6 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
static const struct tegra_i2c_hw_feature tegra186_i2c_hw = {
.has_continue_xfer_support = true,
.has_per_pkt_xfer_complete_irq = true,
- .has_single_clk_source = true,
.clk_divisor_hs_mode = 1,
.clk_divisor_std_mode = 0x16,
.clk_divisor_fast_mode = 0x19,
@@ -1604,7 +1566,6 @@ static const struct tegra_i2c_hw_feature tegra186_i2c_hw = {
static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
.has_continue_xfer_support = true,
.has_per_pkt_xfer_complete_irq = true,
- .has_single_clk_source = true,
.clk_divisor_hs_mode = 1,
.clk_divisor_std_mode = 0x4f,
.clk_divisor_fast_mode = 0x3c,
@@ -1626,7 +1587,6 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
.has_interface_timing_reg = true,
};
-/* Match table for of_platform binding */
static const struct of_device_id tegra_i2c_of_match[] = {
{ .compatible = "nvidia,tegra194-i2c", .data = &tegra194_i2c_hw, },
{ .compatible = "nvidia,tegra186-i2c", .data = &tegra186_i2c_hw, },
@@ -1641,223 +1601,196 @@ static const struct of_device_id tegra_i2c_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
-static int tegra_i2c_probe(struct platform_device *pdev)
+static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
{
- struct device *dev = &pdev->dev;
- struct tegra_i2c_dev *i2c_dev;
- struct resource *res;
- struct clk *div_clk;
- struct clk *fast_clk;
- void __iomem *base;
- phys_addr_t base_phys;
- int irq;
- int ret;
+ struct device_node *np = i2c_dev->dev->of_node;
+ bool multi_mode;
+ int err;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_phys = res->start;
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ err = of_property_read_u32(np, "clock-frequency",
+ &i2c_dev->bus_clk_rate);
+ if (err)
+ i2c_dev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "no irq resource\n");
- return -EINVAL;
- }
- irq = res->start;
+ multi_mode = of_property_read_bool(np, "multi-master");
+ i2c_dev->multimaster_mode = multi_mode;
+
+ if (of_device_is_compatible(np, "nvidia,tegra20-i2c-dvc"))
+ i2c_dev->is_dvc = true;
+
+ if (of_device_is_compatible(np, "nvidia,tegra210-i2c-vi"))
+ i2c_dev->is_vi = true;
+}
+
+static int tegra_i2c_init_clocks(struct tegra_i2c_dev *i2c_dev)
+{
+ int err;
+
+ i2c_dev->clocks[i2c_dev->nclocks++].id = "div-clk";
+
+ if (i2c_dev->hw == &tegra20_i2c_hw || i2c_dev->hw == &tegra30_i2c_hw)
+ i2c_dev->clocks[i2c_dev->nclocks++].id = "fast-clk";
- div_clk = devm_clk_get(&pdev->dev, "div-clk");
- if (IS_ERR(div_clk)) {
- if (PTR_ERR(div_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "missing controller clock\n");
+ if (i2c_dev->is_vi)
+ i2c_dev->clocks[i2c_dev->nclocks++].id = "slow";
+
+ err = devm_clk_bulk_get(i2c_dev->dev, i2c_dev->nclocks,
+ i2c_dev->clocks);
+ if (err)
+ return err;
+
+ err = clk_bulk_prepare(i2c_dev->nclocks, i2c_dev->clocks);
+ if (err)
+ return err;
+
+ i2c_dev->div_clk = i2c_dev->clocks[0].clk;
+
+ if (!i2c_dev->multimaster_mode)
+ return 0;
- return PTR_ERR(div_clk);
+ err = clk_enable(i2c_dev->div_clk);
+ if (err) {
+ dev_err(i2c_dev->dev, "failed to enable div-clk: %d\n", err);
+ goto unprepare_clocks;
}
+ return 0;
+
+unprepare_clocks:
+ clk_bulk_unprepare(i2c_dev->nclocks, i2c_dev->clocks);
+
+ return err;
+}
+
+static void tegra_i2c_release_clocks(struct tegra_i2c_dev *i2c_dev)
+{
+ if (i2c_dev->multimaster_mode)
+ clk_disable(i2c_dev->div_clk);
+
+ clk_bulk_unprepare(i2c_dev->nclocks, i2c_dev->clocks);
+}
+
+static int tegra_i2c_init_hardware(struct tegra_i2c_dev *i2c_dev)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(i2c_dev->dev);
+ if (ret < 0)
+ dev_err(i2c_dev->dev, "runtime resume failed: %d\n", ret);
+ else
+ ret = tegra_i2c_init(i2c_dev);
+
+ pm_runtime_put(i2c_dev->dev);
+
+ return ret;
+}
+
+static int tegra_i2c_probe(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev;
+ struct resource *res;
+ int err;
+
i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
if (!i2c_dev)
return -ENOMEM;
- i2c_dev->base = base;
- i2c_dev->base_phys = base_phys;
- i2c_dev->div_clk = div_clk;
- i2c_dev->adapter.algo = &tegra_i2c_algo;
- i2c_dev->adapter.retries = 1;
- i2c_dev->adapter.timeout = 6 * HZ;
- i2c_dev->irq = irq;
+ platform_set_drvdata(pdev, i2c_dev);
+
+ init_completion(&i2c_dev->msg_complete);
+ init_completion(&i2c_dev->dma_complete);
+
+ i2c_dev->hw = of_device_get_match_data(&pdev->dev);
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
- i2c_dev->rst = devm_reset_control_get_exclusive(&pdev->dev, "i2c");
- if (IS_ERR(i2c_dev->rst)) {
- dev_err(&pdev->dev, "missing controller reset\n");
- return PTR_ERR(i2c_dev->rst);
- }
+ i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(i2c_dev->base))
+ return PTR_ERR(i2c_dev->base);
- tegra_i2c_parse_dt(i2c_dev);
-
- i2c_dev->hw = of_device_get_match_data(&pdev->dev);
- i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
- "nvidia,tegra20-i2c-dvc");
- i2c_dev->is_vi = of_device_is_compatible(dev->of_node,
- "nvidia,tegra210-i2c-vi");
- i2c_dev->adapter.quirks = i2c_dev->hw->quirks;
- i2c_dev->dma_buf_size = i2c_dev->adapter.quirks->max_write_len +
- I2C_PACKET_HEADER_SIZE;
- init_completion(&i2c_dev->msg_complete);
- init_completion(&i2c_dev->dma_complete);
+ i2c_dev->base_phys = res->start;
- if (!i2c_dev->hw->has_single_clk_source) {
- fast_clk = devm_clk_get(&pdev->dev, "fast-clk");
- if (IS_ERR(fast_clk)) {
- dev_err(&pdev->dev, "missing fast clock\n");
- return PTR_ERR(fast_clk);
- }
- i2c_dev->fast_clk = fast_clk;
- }
+ err = platform_get_irq(pdev, 0);
+ if (err < 0)
+ return err;
- if (i2c_dev->is_vi) {
- i2c_dev->slow_clk = devm_clk_get(dev, "slow");
- if (IS_ERR(i2c_dev->slow_clk)) {
- if (PTR_ERR(i2c_dev->slow_clk) != -EPROBE_DEFER)
- dev_err(dev, "failed to get slow clock: %ld\n",
- PTR_ERR(i2c_dev->slow_clk));
+ i2c_dev->irq = err;
- return PTR_ERR(i2c_dev->slow_clk);
- }
- }
+ /* interrupt will be enabled during of transfer time */
+ irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN);
- platform_set_drvdata(pdev, i2c_dev);
+ err = devm_request_irq(i2c_dev->dev, i2c_dev->irq, tegra_i2c_isr,
+ IRQF_NO_SUSPEND, dev_name(i2c_dev->dev),
+ i2c_dev);
+ if (err)
+ return err;
- ret = clk_prepare(i2c_dev->fast_clk);
- if (ret < 0) {
- dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
- return ret;
+ i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c");
+ if (IS_ERR(i2c_dev->rst)) {
+ dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst),
+ "failed to get reset control\n");
+ return PTR_ERR(i2c_dev->rst);
}
- ret = clk_prepare(i2c_dev->slow_clk);
- if (ret < 0) {
- dev_err(dev, "failed to prepare slow clock: %d\n", ret);
- goto unprepare_fast_clk;
- }
+ tegra_i2c_parse_dt(i2c_dev);
- if (i2c_dev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ &&
- i2c_dev->bus_clk_rate <= I2C_MAX_FAST_MODE_PLUS_FREQ)
- i2c_dev->clk_divisor_non_hs_mode =
- i2c_dev->hw->clk_divisor_fast_plus_mode;
- else if (i2c_dev->bus_clk_rate > I2C_MAX_STANDARD_MODE_FREQ &&
- i2c_dev->bus_clk_rate <= I2C_MAX_FAST_MODE_FREQ)
- i2c_dev->clk_divisor_non_hs_mode =
- i2c_dev->hw->clk_divisor_fast_mode;
- else
- i2c_dev->clk_divisor_non_hs_mode =
- i2c_dev->hw->clk_divisor_std_mode;
+ err = tegra_i2c_init_clocks(i2c_dev);
+ if (err)
+ return err;
- ret = clk_prepare(i2c_dev->div_clk);
- if (ret < 0) {
- dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret);
- goto unprepare_slow_clk;
- }
+ err = tegra_i2c_init_dma(i2c_dev);
+ if (err)
+ goto release_clocks;
/*
- * VI I2C is in VE power domain which is not always on and not
- * an IRQ safe. So, IRQ safe device can't be attached to a non-IRQ
- * safe domain as it prevents powering off the PM domain.
- * Also, VI I2C device don't need to use runtime IRQ safe as it will
- * not be used for atomic transfers.
+ * VI I2C is in VE power domain which is not always ON and not
+ * IRQ-safe. Thus, IRQ-safe device shouldn't be attached to a
+ * non IRQ-safe domain because this prevents powering off the power
+ * domain.
+ *
+ * VI I2C device shouldn't be marked as IRQ-safe because VI I2C won't
+ * be used for atomic transfers.
*/
if (!i2c_dev->is_vi)
- pm_runtime_irq_safe(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev)) {
- ret = tegra_i2c_runtime_resume(&pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "runtime resume failed\n");
- goto unprepare_div_clk;
- }
- } else {
- ret = pm_runtime_get_sync(i2c_dev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "runtime resume failed\n");
- goto disable_rpm;
- }
- }
+ pm_runtime_irq_safe(i2c_dev->dev);
- if (i2c_dev->is_multimaster_mode) {
- ret = clk_enable(i2c_dev->div_clk);
- if (ret < 0) {
- dev_err(i2c_dev->dev, "div_clk enable failed %d\n",
- ret);
- goto put_rpm;
- }
- }
+ pm_runtime_enable(i2c_dev->dev);
- if (i2c_dev->hw->supports_bus_clear)
- i2c_dev->adapter.bus_recovery_info = &tegra_i2c_recovery_info;
-
- ret = tegra_i2c_init_dma(i2c_dev);
- if (ret < 0)
- goto disable_div_clk;
-
- ret = tegra_i2c_init(i2c_dev, false);
- if (ret) {
- dev_err(&pdev->dev, "Failed to initialize i2c controller\n");
- goto release_dma;
- }
-
- irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN);
-
- ret = devm_request_irq(&pdev->dev, i2c_dev->irq, tegra_i2c_isr,
- IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c_dev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
- goto release_dma;
- }
+ err = tegra_i2c_init_hardware(i2c_dev);
+ if (err)
+ goto release_rpm;
i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
+ i2c_dev->adapter.dev.of_node = i2c_dev->dev->of_node;
+ i2c_dev->adapter.dev.parent = i2c_dev->dev;
+ i2c_dev->adapter.retries = 1;
+ i2c_dev->adapter.timeout = 6 * HZ;
+ i2c_dev->adapter.quirks = i2c_dev->hw->quirks;
i2c_dev->adapter.owner = THIS_MODULE;
i2c_dev->adapter.class = I2C_CLASS_DEPRECATED;
- strlcpy(i2c_dev->adapter.name, dev_name(&pdev->dev),
- sizeof(i2c_dev->adapter.name));
- i2c_dev->adapter.dev.parent = &pdev->dev;
+ i2c_dev->adapter.algo = &tegra_i2c_algo;
i2c_dev->adapter.nr = pdev->id;
- i2c_dev->adapter.dev.of_node = pdev->dev.of_node;
-
- ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
- if (ret)
- goto release_dma;
-
- pm_runtime_put(&pdev->dev);
- return 0;
-
-release_dma:
- tegra_i2c_release_dma(i2c_dev);
-
-disable_div_clk:
- if (i2c_dev->is_multimaster_mode)
- clk_disable(i2c_dev->div_clk);
+ if (i2c_dev->hw->supports_bus_clear)
+ i2c_dev->adapter.bus_recovery_info = &tegra_i2c_recovery_info;
-put_rpm:
- if (pm_runtime_enabled(&pdev->dev))
- pm_runtime_put_sync(&pdev->dev);
- else
- tegra_i2c_runtime_suspend(&pdev->dev);
+ strlcpy(i2c_dev->adapter.name, dev_name(i2c_dev->dev),
+ sizeof(i2c_dev->adapter.name));
-disable_rpm:
- if (pm_runtime_enabled(&pdev->dev))
- pm_runtime_disable(&pdev->dev);
+ err = i2c_add_numbered_adapter(&i2c_dev->adapter);
+ if (err)
+ goto release_rpm;
-unprepare_div_clk:
- clk_unprepare(i2c_dev->div_clk);
+ return 0;
-unprepare_slow_clk:
- clk_unprepare(i2c_dev->slow_clk);
+release_rpm:
+ pm_runtime_disable(i2c_dev->dev);
-unprepare_fast_clk:
- clk_unprepare(i2c_dev->fast_clk);
+ tegra_i2c_release_dma(i2c_dev);
+release_clocks:
+ tegra_i2c_release_clocks(i2c_dev);
- return ret;
+ return err;
}
static int tegra_i2c_remove(struct platform_device *pdev)
@@ -1865,33 +1798,69 @@ static int tegra_i2c_remove(struct platform_device *pdev)
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adapter);
+ pm_runtime_disable(i2c_dev->dev);
- if (i2c_dev->is_multimaster_mode)
- clk_disable(i2c_dev->div_clk);
+ tegra_i2c_release_dma(i2c_dev);
+ tegra_i2c_release_clocks(i2c_dev);
- pm_runtime_disable(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- tegra_i2c_runtime_suspend(&pdev->dev);
+ return 0;
+}
- clk_unprepare(i2c_dev->div_clk);
- clk_unprepare(i2c_dev->slow_clk);
- clk_unprepare(i2c_dev->fast_clk);
+static int __maybe_unused tegra_i2c_runtime_resume(struct device *dev)
+{
+ struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+ int err;
+
+ err = pinctrl_pm_select_default_state(dev);
+ if (err)
+ return err;
+
+ err = clk_bulk_enable(i2c_dev->nclocks, i2c_dev->clocks);
+ if (err)
+ return err;
+
+ /*
+ * VI I2C device is attached to VE power domain which goes through
+ * power ON/OFF during runtime PM resume/suspend, meaning that
+ * controller needs to be re-initialized after power ON.
+ */
+ if (i2c_dev->is_vi) {
+ err = tegra_i2c_init(i2c_dev);
+ if (err)
+ goto disable_clocks;
+ }
- tegra_i2c_release_dma(i2c_dev);
return 0;
+
+disable_clocks:
+ clk_bulk_disable(i2c_dev->nclocks, i2c_dev->clocks);
+
+ return err;
+}
+
+static int __maybe_unused tegra_i2c_runtime_suspend(struct device *dev)
+{
+ struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
+
+ clk_bulk_disable(i2c_dev->nclocks, i2c_dev->clocks);
+
+ return pinctrl_pm_select_idle_state(dev);
}
static int __maybe_unused tegra_i2c_suspend(struct device *dev)
{
struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
- int err = 0;
+ int err;
i2c_mark_adapter_suspended(&i2c_dev->adapter);
- if (!pm_runtime_status_suspended(dev))
+ if (!pm_runtime_status_suspended(dev)) {
err = tegra_i2c_runtime_suspend(dev);
+ if (err)
+ return err;
+ }
- return err;
+ return 0;
}
static int __maybe_unused tegra_i2c_resume(struct device *dev)
@@ -1907,7 +1876,7 @@ static int __maybe_unused tegra_i2c_resume(struct device *dev)
if (err)
return err;
- err = tegra_i2c_init(i2c_dev, false);
+ err = tegra_i2c_init(i2c_dev);
if (err)
return err;
@@ -1934,17 +1903,16 @@ static const struct dev_pm_ops tegra_i2c_pm = {
};
static struct platform_driver tegra_i2c_driver = {
- .probe = tegra_i2c_probe,
- .remove = tegra_i2c_remove,
- .driver = {
- .name = "tegra-i2c",
+ .probe = tegra_i2c_probe,
+ .remove = tegra_i2c_remove,
+ .driver = {
+ .name = "tegra-i2c",
.of_match_table = tegra_i2c_of_match,
- .pm = &tegra_i2c_pm,
+ .pm = &tegra_i2c_pm,
},
};
-
module_platform_driver(tegra_i2c_driver);
-MODULE_DESCRIPTION("nVidia Tegra2 I2C Bus Controller driver");
+MODULE_DESCRIPTION("NVIDIA Tegra I2C Bus Controller driver");
MODULE_AUTHOR("Colin Cross");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 90c1c362394d..087b2951942e 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -46,34 +46,36 @@ enum xiic_endian {
/**
* struct xiic_i2c - Internal representation of the XIIC I2C bus
- * @dev: Pointer to device structure
- * @base: Memory base of the HW registers
- * @wait: Wait queue for callers
- * @adap: Kernel adapter representation
- * @tx_msg: Messages from above to be sent
- * @lock: Mutual exclusion
- * @tx_pos: Current pos in TX message
- * @nmsgs: Number of messages in tx_msg
- * @state: See STATE_
- * @rx_msg: Current RX message
- * @rx_pos: Position within current RX message
+ * @dev: Pointer to device structure
+ * @base: Memory base of the HW registers
+ * @wait: Wait queue for callers
+ * @adap: Kernel adapter representation
+ * @tx_msg: Messages from above to be sent
+ * @lock: Mutual exclusion
+ * @tx_pos: Current pos in TX message
+ * @nmsgs: Number of messages in tx_msg
+ * @rx_msg: Current RX message
+ * @rx_pos: Position within current RX message
* @endianness: big/little-endian byte order
- * @clk: Pointer to AXI4-lite input clock
+ * @clk: Pointer to AXI4-lite input clock
+ * @state: See STATE_
+ * @singlemaster: Indicates bus is single master
*/
struct xiic_i2c {
- struct device *dev;
- void __iomem *base;
- wait_queue_head_t wait;
- struct i2c_adapter adap;
- struct i2c_msg *tx_msg;
- struct mutex lock;
- unsigned int tx_pos;
- unsigned int nmsgs;
- enum xilinx_i2c_state state;
- struct i2c_msg *rx_msg;
- int rx_pos;
- enum xiic_endian endianness;
+ struct device *dev;
+ void __iomem *base;
+ wait_queue_head_t wait;
+ struct i2c_adapter adap;
+ struct i2c_msg *tx_msg;
+ struct mutex lock;
+ unsigned int tx_pos;
+ unsigned int nmsgs;
+ struct i2c_msg *rx_msg;
+ int rx_pos;
+ enum xiic_endian endianness;
struct clk *clk;
+ enum xilinx_i2c_state state;
+ bool singlemaster;
};
@@ -526,6 +528,15 @@ static int xiic_busy(struct xiic_i2c *i2c)
if (i2c->tx_msg)
return -EBUSY;
+ /* In single master mode bus can only be busy, when in use by this
+ * driver. If the register indicates bus being busy for some reason we
+ * should ignore it, since bus will never be released and i2c will be
+ * stuck forever.
+ */
+ if (i2c->singlemaster) {
+ return 0;
+ }
+
/* for instance if previous transfer was terminated due to TX error
* it might be that the bus is on it's way to become available
* give it at most 3 ms to wake
@@ -811,6 +822,9 @@ static int xiic_i2c_probe(struct platform_device *pdev)
goto err_clk_dis;
}
+ i2c->singlemaster =
+ of_property_read_bool(pdev->dev.of_node, "single-master");
+
/*
* Detect endianness
* Try to reset the TX FIFO. Then check the EMPTY flag. If it is not
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index e627d7b2790f..37c510d9347a 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -264,6 +264,7 @@ static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
void i2c_acpi_register_devices(struct i2c_adapter *adap)
{
acpi_status status;
+ acpi_handle handle;
if (!has_acpi_companion(&adap->dev))
return;
@@ -274,6 +275,15 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
adap, NULL);
if (ACPI_FAILURE(status))
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
+
+ if (!adap->dev.parent)
+ return;
+
+ handle = ACPI_HANDLE(adap->dev.parent);
+ if (!handle)
+ return;
+
+ acpi_walk_dep_device_list(handle);
}
static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = {
@@ -719,7 +729,6 @@ int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
return -ENOMEM;
}
- acpi_walk_dep_device_list(handle);
return 0;
}
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
new file mode 100644
index 000000000000..c288102de324
--- /dev/null
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * I2C slave mode testunit
+ *
+ * Copyright (C) 2020 by Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
+ * Copyright (C) 2020 by Renesas Electronics Corporation
+ */
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h> /* FIXME: is system_long_wq the best choice? */
+
+#define TU_CUR_VERSION 0x01
+
+enum testunit_cmds {
+ TU_CMD_READ_BYTES = 1, /* save 0 for ABORT, RESET or similar */
+ TU_CMD_HOST_NOTIFY,
+ TU_NUM_CMDS
+};
+
+enum testunit_regs {
+ TU_REG_CMD,
+ TU_REG_DATAL,
+ TU_REG_DATAH,
+ TU_REG_DELAY,
+ TU_NUM_REGS
+};
+
+enum testunit_flags {
+ TU_FLAG_IN_PROCESS,
+};
+
+struct testunit_data {
+ unsigned long flags;
+ u8 regs[TU_NUM_REGS];
+ u8 reg_idx;
+ struct i2c_client *client;
+ struct delayed_work worker;
+};
+
+static void i2c_slave_testunit_work(struct work_struct *work)
+{
+ struct testunit_data *tu = container_of(work, struct testunit_data, worker.work);
+ struct i2c_msg msg;
+ u8 msgbuf[256];
+ int ret = 0;
+
+ msg.addr = I2C_CLIENT_END;
+ msg.buf = msgbuf;
+
+ switch (tu->regs[TU_REG_CMD]) {
+ case TU_CMD_READ_BYTES:
+ msg.addr = tu->regs[TU_REG_DATAL];
+ msg.flags = I2C_M_RD;
+ msg.len = tu->regs[TU_REG_DATAH];
+ break;
+
+ case TU_CMD_HOST_NOTIFY:
+ msg.addr = 0x08;
+ msg.flags = 0;
+ msg.len = 3;
+ msgbuf[0] = tu->client->addr;
+ msgbuf[1] = tu->regs[TU_REG_DATAL];
+ msgbuf[2] = tu->regs[TU_REG_DATAH];
+ break;
+
+ default:
+ break;
+ }
+
+ if (msg.addr != I2C_CLIENT_END) {
+ ret = i2c_transfer(tu->client->adapter, &msg, 1);
+ /* convert '0 msgs transferred' to errno */
+ ret = (ret == 0) ? -EIO : ret;
+ }
+
+ if (ret < 0)
+ dev_err(&tu->client->dev, "CMD%02X failed (%d)\n", tu->regs[TU_REG_CMD], ret);
+
+ clear_bit(TU_FLAG_IN_PROCESS, &tu->flags);
+}
+
+static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct testunit_data *tu = i2c_get_clientdata(client);
+ int ret = 0;
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
+ return -EBUSY;
+
+ if (tu->reg_idx < TU_NUM_REGS)
+ tu->regs[tu->reg_idx] = *val;
+ else
+ ret = -EMSGSIZE;
+
+ if (tu->reg_idx <= TU_NUM_REGS)
+ tu->reg_idx++;
+
+ /* TU_REG_CMD always written at this point */
+ if (tu->regs[TU_REG_CMD] >= TU_NUM_CMDS)
+ ret = -EINVAL;
+
+ break;
+
+ case I2C_SLAVE_STOP:
+ if (tu->reg_idx == TU_NUM_REGS) {
+ set_bit(TU_FLAG_IN_PROCESS, &tu->flags);
+ queue_delayed_work(system_long_wq, &tu->worker,
+ msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY]));
+ }
+ fallthrough;
+
+ case I2C_SLAVE_WRITE_REQUESTED:
+ tu->reg_idx = 0;
+ break;
+
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_READ_PROCESSED:
+ *val = TU_CUR_VERSION;
+ break;
+ }
+
+ return ret;
+}
+
+static int i2c_slave_testunit_probe(struct i2c_client *client)
+{
+ struct testunit_data *tu;
+
+ tu = devm_kzalloc(&client->dev, sizeof(struct testunit_data), GFP_KERNEL);
+ if (!tu)
+ return -ENOMEM;
+
+ tu->client = client;
+ i2c_set_clientdata(client, tu);
+ INIT_DELAYED_WORK(&tu->worker, i2c_slave_testunit_work);
+
+ return i2c_slave_register(client, i2c_slave_testunit_slave_cb);
+};
+
+static int i2c_slave_testunit_remove(struct i2c_client *client)
+{
+ struct testunit_data *tu = i2c_get_clientdata(client);
+
+ cancel_delayed_work_sync(&tu->worker);
+ i2c_slave_unregister(client);
+ return 0;
+}
+
+static const struct i2c_device_id i2c_slave_testunit_id[] = {
+ { "slave-testunit", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, i2c_slave_testunit_id);
+
+static struct i2c_driver i2c_slave_testunit_driver = {
+ .driver = {
+ .name = "i2c-slave-testunit",
+ },
+ .probe_new = i2c_slave_testunit_probe,
+ .remove = i2c_slave_testunit_remove,
+ .id_table = i2c_slave_testunit_id,
+};
+module_i2c_driver(i2c_slave_testunit_driver);
+
+MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>");
+MODULE_DESCRIPTION("I2C slave mode test unit");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index dc0108287ccf..d3d06e3b4f3b 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -197,6 +197,113 @@ EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
module_i2c_driver(smbalert_driver);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+#define SMBUS_HOST_NOTIFY_LEN 3
+struct i2c_slave_host_notify_status {
+ u8 index;
+ u8 addr;
+};
+
+static int i2c_slave_host_notify_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct i2c_slave_host_notify_status *status = client->dev.platform_data;
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_RECEIVED:
+ /* We only retrieve the first byte received (addr)
+ * since there is currently no support to retrieve the data
+ * parameter from the client.
+ */
+ if (status->index == 0)
+ status->addr = *val;
+ if (status->index < U8_MAX)
+ status->index++;
+ break;
+ case I2C_SLAVE_STOP:
+ if (status->index == SMBUS_HOST_NOTIFY_LEN)
+ i2c_handle_smbus_host_notify(client->adapter,
+ status->addr);
+ fallthrough;
+ case I2C_SLAVE_WRITE_REQUESTED:
+ status->index = 0;
+ break;
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_READ_PROCESSED:
+ *val = 0xff;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * i2c_new_slave_host_notify_device - get a client for SMBus host-notify support
+ * @adapter: the target adapter
+ * Context: can sleep
+ *
+ * Setup handling of the SMBus host-notify protocol on a given I2C bus segment.
+ *
+ * Handling is done by creating a device and its callback and handling data
+ * received via the SMBus host-notify address (0x8)
+ *
+ * This returns the client, which should be ultimately freed using
+ * i2c_free_slave_host_notify_device(); or an ERRPTR to indicate an error.
+ */
+struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter)
+{
+ struct i2c_board_info host_notify_board_info = {
+ I2C_BOARD_INFO("smbus_host_notify", 0x08),
+ .flags = I2C_CLIENT_SLAVE,
+ };
+ struct i2c_slave_host_notify_status *status;
+ struct i2c_client *client;
+ int ret;
+
+ status = kzalloc(sizeof(struct i2c_slave_host_notify_status),
+ GFP_KERNEL);
+ if (!status)
+ return ERR_PTR(-ENOMEM);
+
+ host_notify_board_info.platform_data = status;
+
+ client = i2c_new_client_device(adapter, &host_notify_board_info);
+ if (IS_ERR(client)) {
+ kfree(status);
+ return client;
+ }
+
+ ret = i2c_slave_register(client, i2c_slave_host_notify_cb);
+ if (ret) {
+ i2c_unregister_device(client);
+ kfree(status);
+ return ERR_PTR(ret);
+ }
+
+ return client;
+}
+EXPORT_SYMBOL_GPL(i2c_new_slave_host_notify_device);
+
+/**
+ * i2c_free_slave_host_notify_device - free the client for SMBus host-notify
+ * support
+ * @client: the client to free
+ * Context: can sleep
+ *
+ * Free the i2c_client allocated via i2c_new_slave_host_notify_device
+ */
+void i2c_free_slave_host_notify_device(struct i2c_client *client)
+{
+ if (IS_ERR_OR_NULL(client))
+ return;
+
+ i2c_slave_unregister(client);
+ kfree(client->dev.platform_data);
+ i2c_unregister_device(client);
+}
+EXPORT_SYMBOL_GPL(i2c_free_slave_host_notify_device);
+#endif
+
/*
* SPD is not part of SMBus but we include it here for convenience as the
* target systems are the same.
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
index f830535cff12..d3acd8d66c32 100644
--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -85,18 +85,14 @@ static int i2c_mux_probe(struct platform_device *pdev)
return -ENOMEM;
mux->control = devm_mux_control_get(dev, NULL);
- if (IS_ERR(mux->control)) {
- if (PTR_ERR(mux->control) != -EPROBE_DEFER)
- dev_err(dev, "failed to get control-mux\n");
- return PTR_ERR(mux->control);
- }
+ if (IS_ERR(mux->control))
+ return dev_err_probe(dev, PTR_ERR(mux->control),
+ "failed to get control-mux\n");
parent = mux_parent_adapter(dev);
- if (IS_ERR(parent)) {
- if (PTR_ERR(parent) != -EPROBE_DEFER)
- dev_err(dev, "failed to get i2c-parent adapter\n");
- return PTR_ERR(parent);
- }
+ if (IS_ERR(parent))
+ return dev_err_probe(dev, PTR_ERR(parent),
+ "failed to get i2c-parent adapter\n");
children = of_get_child_count(np);
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index b59a62f8d7a6..0e0679f65cf7 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -171,13 +171,9 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
sizeof(mux->data));
} else {
ret = i2c_mux_reg_probe_dt(mux, pdev);
- if (ret == -EPROBE_DEFER)
- return ret;
-
- if (ret < 0) {
- dev_err(&pdev->dev, "Error parsing device tree");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Error parsing device tree");
}
parent = i2c_get_adapter(mux->data.parent);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 97f2e29265da..1c6b78ad5ade 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1373,7 +1373,9 @@ static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
enum i3c_addr_slot_status status;
int ret;
- if (dev->info.dyn_addr != old_dyn_addr) {
+ if (dev->info.dyn_addr != old_dyn_addr &&
+ (!dev->boardinfo ||
+ dev->info.dyn_addr != dev->boardinfo->init_dyn_addr)) {
status = i3c_bus_get_addr_slot_status(&master->bus,
dev->info.dyn_addr);
if (status != I3C_ADDR_SLOT_FREE)
@@ -1432,33 +1434,49 @@ static void i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
master->ops->detach_i2c_dev(dev);
}
-static void i3c_master_pre_assign_dyn_addr(struct i3c_dev_desc *dev)
+static int i3c_master_early_i3c_dev_add(struct i3c_master_controller *master,
+ struct i3c_dev_boardinfo *boardinfo)
{
- struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ struct i3c_device_info info = {
+ .static_addr = boardinfo->static_addr,
+ };
+ struct i3c_dev_desc *i3cdev;
int ret;
- if (!dev->boardinfo || !dev->boardinfo->init_dyn_addr ||
- !dev->boardinfo->static_addr)
- return;
+ i3cdev = i3c_master_alloc_i3c_dev(master, &info);
+ if (IS_ERR(i3cdev))
+ return -ENOMEM;
+
+ i3cdev->boardinfo = boardinfo;
+
+ ret = i3c_master_attach_i3c_dev(master, i3cdev);
+ if (ret)
+ goto err_free_dev;
- ret = i3c_master_setdasa_locked(master, dev->info.static_addr,
- dev->boardinfo->init_dyn_addr);
+ ret = i3c_master_setdasa_locked(master, i3cdev->info.static_addr,
+ i3cdev->boardinfo->init_dyn_addr);
if (ret)
- return;
+ goto err_detach_dev;
- dev->info.dyn_addr = dev->boardinfo->init_dyn_addr;
- ret = i3c_master_reattach_i3c_dev(dev, 0);
+ i3cdev->info.dyn_addr = i3cdev->boardinfo->init_dyn_addr;
+ ret = i3c_master_reattach_i3c_dev(i3cdev, 0);
if (ret)
goto err_rstdaa;
- ret = i3c_master_retrieve_dev_info(dev);
+ ret = i3c_master_retrieve_dev_info(i3cdev);
if (ret)
goto err_rstdaa;
- return;
+ return 0;
err_rstdaa:
- i3c_master_rstdaa_locked(master, dev->boardinfo->init_dyn_addr);
+ i3c_master_rstdaa_locked(master, i3cdev->boardinfo->init_dyn_addr);
+err_detach_dev:
+ i3c_master_detach_i3c_dev(i3cdev);
+err_free_dev:
+ i3c_master_free_i3c_dev(i3cdev);
+
+ return ret;
}
static void
@@ -1625,8 +1643,8 @@ static void i3c_master_detach_free_devs(struct i3c_master_controller *master)
* This function is following all initialisation steps described in the I3C
* specification:
*
- * 1. Attach I2C and statically defined I3C devs to the master so that the
- * master can fill its internal device table appropriately
+ * 1. Attach I2C devs to the master so that the master can fill its internal
+ * device table appropriately
*
* 2. Call &i3c_master_controller_ops->bus_init() method to initialize
* the master controller. That's usually where the bus mode is selected
@@ -1638,8 +1656,10 @@ static void i3c_master_detach_free_devs(struct i3c_master_controller *master)
*
* 4. Disable all slave events.
*
- * 5. Pre-assign dynamic addresses requested by the FW with SETDASA for I3C
- * devices that have a static address
+ * 5. Reserve address slots for I3C devices with init_dyn_addr. And if devices
+ * also have static_addr, try to pre-assign dynamic addresses requested by
+ * the FW with SETDASA and attach corresponding statically defined I3C
+ * devices to the master.
*
* 6. Do a DAA (Dynamic Address Assignment) to assign dynamic addresses to all
* remaining I3C devices
@@ -1653,7 +1673,6 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
enum i3c_addr_slot_status status;
struct i2c_dev_boardinfo *i2cboardinfo;
struct i3c_dev_boardinfo *i3cboardinfo;
- struct i3c_dev_desc *i3cdev;
struct i2c_dev_desc *i2cdev;
int ret;
@@ -1685,34 +1704,6 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
goto err_detach_devs;
}
}
- list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
- struct i3c_device_info info = {
- .static_addr = i3cboardinfo->static_addr,
- };
-
- if (i3cboardinfo->init_dyn_addr) {
- status = i3c_bus_get_addr_slot_status(&master->bus,
- i3cboardinfo->init_dyn_addr);
- if (status != I3C_ADDR_SLOT_FREE) {
- ret = -EBUSY;
- goto err_detach_devs;
- }
- }
-
- i3cdev = i3c_master_alloc_i3c_dev(master, &info);
- if (IS_ERR(i3cdev)) {
- ret = PTR_ERR(i3cdev);
- goto err_detach_devs;
- }
-
- i3cdev->boardinfo = i3cboardinfo;
-
- ret = i3c_master_attach_i3c_dev(master, i3cdev);
- if (ret) {
- i3c_master_free_i3c_dev(i3cdev);
- goto err_detach_devs;
- }
- }
/*
* Now execute the controller specific ->bus_init() routine, which
@@ -1749,11 +1740,43 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
goto err_bus_cleanup;
/*
- * Pre-assign dynamic address and retrieve device information if
- * needed.
+ * Reserve init_dyn_addr first, and then try to pre-assign dynamic
+ * address and retrieve device information if needed.
+ * In case pre-assign dynamic address fails, setting dynamic address to
+ * the requested init_dyn_addr is retried after DAA is done in
+ * i3c_master_add_i3c_dev_locked().
*/
- i3c_bus_for_each_i3cdev(&master->bus, i3cdev)
- i3c_master_pre_assign_dyn_addr(i3cdev);
+ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
+
+ /*
+ * We don't reserve a dynamic address for devices that
+ * don't explicitly request one.
+ */
+ if (!i3cboardinfo->init_dyn_addr)
+ continue;
+
+ ret = i3c_bus_get_addr_slot_status(&master->bus,
+ i3cboardinfo->init_dyn_addr);
+ if (ret != I3C_ADDR_SLOT_FREE) {
+ ret = -EBUSY;
+ goto err_rstdaa;
+ }
+
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i3cboardinfo->init_dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+
+ /*
+ * Only try to create/attach devices that have a static
+ * address. Other devices will be created/attached when
+ * DAA happens, and the requested dynamic address will
+ * be set using SETNEWDA once those devices become
+ * addressable.
+ */
+
+ if (i3cboardinfo->static_addr)
+ i3c_master_early_i3c_dev_add(master, i3cboardinfo);
+ }
ret = i3c_master_do_daa(master);
if (ret)
@@ -1782,6 +1805,21 @@ static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
i3c_master_detach_free_devs(master);
}
+static void i3c_master_attach_boardinfo(struct i3c_dev_desc *i3cdev)
+{
+ struct i3c_master_controller *master = i3cdev->common.master;
+ struct i3c_dev_boardinfo *i3cboardinfo;
+
+ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
+ if (i3cdev->info.pid != i3cboardinfo->pid)
+ continue;
+
+ i3cdev->boardinfo = i3cboardinfo;
+ i3cdev->info.static_addr = i3cboardinfo->static_addr;
+ return;
+ }
+}
+
static struct i3c_dev_desc *
i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
{
@@ -1837,10 +1875,10 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
if (ret)
goto err_detach_dev;
+ i3c_master_attach_boardinfo(newdev);
+
olddev = i3c_master_search_i3c_dev_duplicate(newdev);
if (olddev) {
- newdev->boardinfo = olddev->boardinfo;
- newdev->info.static_addr = olddev->info.static_addr;
newdev->dev = olddev->dev;
if (newdev->dev)
newdev->dev->desc = newdev;
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 3fee8bd7fe20..3f2226928fe0 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -1635,8 +1635,10 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
sizeof(*master->ibi.slots),
GFP_KERNEL);
- if (!master->ibi.slots)
+ if (!master->ibi.slots) {
+ ret = -ENOMEM;
goto err_disable_sysclk;
+ }
writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
writel(MST_INT_IBIR_THR, master->regs + MST_IER);
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 973ed4b684ce..19abf11c84c8 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -744,9 +744,10 @@ config BLK_DEV_MAC_IDE
depends on MAC
help
This is the IDE driver for the on-board IDE interface on some m68k
- Macintosh models. It supports both the `Quadra style' (used in
- Quadra/ Centris 630 and Performa 588 models) and `Powerbook style'
- (used in the Powerbook 150 and 190 models) IDE interface.
+ Macintosh models, namely Quadra/Centris 630, Performa 588 and
+ Powerbook 150. The IDE interface on the Powerbook 190 is not
+ supported by this driver and requires BLK_DEV_PLATFORM or
+ PATA_PLATFORM.
Say Y if you have such an Macintosh model and want to use IDE
devices (hard disks, CD-ROM drives, etc.) that are connected to the
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 212bb2d8bf34..25d2d88e82ad 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1611,7 +1611,11 @@ static int idecd_open(struct block_device *bdev, fmode_t mode)
struct cdrom_info *info;
int rc = -ENXIO;
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev)) {
+ info = ide_drv_g(bdev->bd_disk, cdrom_info);
+
+ ide_cd_read_toc(info->drive);
+ }
mutex_lock(&ide_cd_mutex);
info = ide_cd_get(bdev->bd_disk);
@@ -1753,15 +1757,6 @@ static unsigned int idecd_check_events(struct gendisk *disk,
return cdrom_check_events(&info->devinfo, clearing);
}
-static int idecd_revalidate_disk(struct gendisk *disk)
-{
- struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
-
- ide_cd_read_toc(info->drive);
-
- return 0;
-}
-
static const struct block_device_operations idecd_ops = {
.owner = THIS_MODULE,
.open = idecd_open,
@@ -1770,7 +1765,6 @@ static const struct block_device_operations idecd_ops = {
.compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
idecd_compat_ioctl : NULL,
.check_events = idecd_check_events,
- .revalidate_disk = idecd_revalidate_disk
};
/* module options */
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 1d3407d7e095..34b9441084f8 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -739,12 +739,9 @@ static void ide_disk_setup(ide_drive_t *drive)
set_wcache(drive, 1);
if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
- (drive->head == 0 || drive->head > 16)) {
+ (drive->head == 0 || drive->head > 16))
printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
drive->name, drive->head);
- drive->dev_flags &= ~IDE_DFLAG_ATTACH;
- } else
- drive->dev_flags |= IDE_DFLAG_ATTACH;
}
static void ide_disk_flush(ide_drive_t *drive)
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index af7503b47dbe..f5a2870aaf54 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -516,8 +516,6 @@ static void ide_floppy_setup(ide_drive_t *drive)
(void) ide_floppy_get_capacity(drive);
ide_proc_register_driver(drive, floppy->driver);
-
- drive->dev_flags |= IDE_DFLAG_ATTACH;
}
static void ide_floppy_flush(ide_drive_t *drive)
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 05c26986637b..e2b6c82586ce 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -225,8 +225,12 @@ static int ide_gd_open(struct block_device *bdev, fmode_t mode)
* and the door_lock is irrelevant at this point.
*/
drive->disk_ops->set_doorlock(drive, disk, 1);
- drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
- check_disk_change(bdev);
+ if (__invalidate_device(bdev, true))
+ pr_warn("VFS: busy inodes on changed media %s\n",
+ bdev->bd_disk->disk_name);
+ drive->disk_ops->get_capacity(drive);
+ set_capacity(disk, ide_gd_capacity(drive));
+ set_bit(GD_NEED_PART_SCAN, &disk->state);
} else if (drive->dev_flags & IDE_DFLAG_FORMAT_IN_PROGRESS) {
ret = -EBUSY;
goto out_put_idkp;
@@ -284,32 +288,6 @@ static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static unsigned int ide_gd_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
- bool ret;
-
- /* do not scan partitions twice if this is a removable device */
- if (drive->dev_flags & IDE_DFLAG_ATTACH) {
- drive->dev_flags &= ~IDE_DFLAG_ATTACH;
- return 0;
- }
-
- /*
- * The following is used to force revalidation on the first open on
- * removeable devices, and never gets reported to userland as
- * DISK_EVENT_FLAG_UEVENT isn't set in genhd->event_flags.
- * This is intended as removable ide disk can't really detect
- * MEDIA_CHANGE events.
- */
- ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
- drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
-
- return ret ? DISK_EVENT_MEDIA_CHANGE : 0;
-}
-
static void ide_gd_unlock_native_capacity(struct gendisk *disk)
{
struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
@@ -320,18 +298,6 @@ static void ide_gd_unlock_native_capacity(struct gendisk *disk)
disk_ops->unlock_native_capacity(drive);
}
-static int ide_gd_revalidate_disk(struct gendisk *disk)
-{
- struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
- ide_drive_t *drive = idkp->drive;
-
- if (ide_gd_check_events(disk, 0))
- drive->disk_ops->get_capacity(drive);
-
- set_capacity(disk, ide_gd_capacity(drive));
- return 0;
-}
-
static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
@@ -364,9 +330,7 @@ static const struct block_device_operations ide_gd_ops = {
.compat_ioctl = ide_gd_compat_ioctl,
#endif
.getgeo = ide_gd_getgeo,
- .check_events = ide_gd_check_events,
.unlock_native_capacity = ide_gd_unlock_native_capacity,
- .revalidate_disk = ide_gd_revalidate_disk
};
static int ide_gd_probe(ide_drive_t *drive)
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 09491098047b..58994da10c06 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -49,7 +49,7 @@ read_val:
return err >= 0 ? put_user_long(err, arg) : err;
set_val:
- if (bdev != bdev->bd_contains)
+ if (bdev_is_partition(bdev))
err = -EINVAL;
else {
if (!capable(CAP_SYS_ADMIN))
@@ -257,7 +257,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
switch (cmd) {
case HDIO_OBSOLETE_IDENTITY:
case HDIO_GET_IDENTITY:
- if (bdev != bdev->bd_contains)
+ if (bdev_is_partition(bdev))
return -EINVAL;
return ide_get_identity_ioctl(drive, cmd, argp);
case HDIO_GET_NICE:
diff --git a/drivers/ide/macide.c b/drivers/ide/macide.c
index adc5fe9daafc..8d2bf73bc548 100644
--- a/drivers/ide/macide.c
+++ b/drivers/ide/macide.c
@@ -18,10 +18,11 @@
#include <linux/delay.h>
#include <linux/ide.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <asm/macintosh.h>
-#include <asm/macints.h>
-#include <asm/mac_baboon.h>
+
+#define DRV_NAME "mac_ide"
#define IDE_BASE 0x50F1A000 /* Base address of IDE controller */
@@ -100,42 +101,61 @@ static const char *mac_ide_name[] =
* Probe for a Macintosh IDE interface
*/
-static int __init macide_init(void)
+static int mac_ide_probe(struct platform_device *pdev)
{
- unsigned long base;
- int irq;
+ struct resource *mem, *irq;
struct ide_hw hw, *hws[] = { &hw };
struct ide_port_info d = macide_port_info;
+ struct ide_host *host;
+ int rc;
if (!MACH_IS_MAC)
return -ENODEV;
- switch (macintosh_config->ide_type) {
- case MAC_IDE_QUADRA:
- base = IDE_BASE;
- irq = IRQ_NUBUS_F;
- break;
- case MAC_IDE_PB:
- base = IDE_BASE;
- irq = IRQ_NUBUS_C;
- break;
- case MAC_IDE_BABOON:
- base = BABOON_BASE;
- d.port_ops = NULL;
- irq = IRQ_BABOON_1;
- break;
- default:
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -ENODEV;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq)
return -ENODEV;
+
+ if (!devm_request_mem_region(&pdev->dev, mem->start,
+ resource_size(mem), DRV_NAME)) {
+ dev_err(&pdev->dev, "resources busy\n");
+ return -EBUSY;
}
printk(KERN_INFO "ide: Macintosh %s IDE controller\n",
mac_ide_name[macintosh_config->ide_type - 1]);
- macide_setup_ports(&hw, base, irq);
+ macide_setup_ports(&hw, mem->start, irq->start);
- return ide_host_add(&d, hws, 1, NULL);
+ rc = ide_host_add(&d, hws, 1, &host);
+ if (rc)
+ return rc;
+
+ platform_set_drvdata(pdev, host);
+ return 0;
}
-module_init(macide_init);
+static int mac_ide_remove(struct platform_device *pdev)
+{
+ struct ide_host *host = platform_get_drvdata(pdev);
+
+ ide_host_remove(host);
+ return 0;
+}
+
+static struct platform_driver mac_ide_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = mac_ide_probe,
+ .remove = mac_ide_remove,
+};
+
+module_platform_driver(mac_ide_driver);
+MODULE_ALIAS("platform:" DRV_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 9a810e4a7946..01bace49a962 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -8,7 +8,7 @@
*/
/*
- * intel_idle is a cpuidle driver that loads on specific Intel processors
+ * intel_idle is a cpuidle driver that loads on all Intel CPUs with MWAIT
* in lieu of the legacy ACPI processor_idle driver. The intent is to
* make Linux more efficient on these processors, as intel_idle knows
* more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
@@ -20,7 +20,11 @@
* All CPUs have same idle states as boot CPU
*
* Chipset BM_STS (bus master status) bit is a NOP
- * for preventing entry into deep C-stats
+ * for preventing entry into deep C-states
+ *
+ * CPU will flush caches as needed when entering a C-state via MWAIT
+ * (in contrast to entering ACPI C3, in which case the WBINVD
+ * instruction needs to be executed to flush the caches)
*/
/*
@@ -1212,14 +1216,13 @@ static bool __init intel_idle_acpi_cst_extract(void)
if (!intel_idle_cst_usable())
continue;
- if (!acpi_processor_claim_cst_control()) {
- acpi_state_table.count = 0;
- return false;
- }
+ if (!acpi_processor_claim_cst_control())
+ break;
return true;
}
+ acpi_state_table.count = 0;
pr_debug("ACPI _CST not found or not usable\n");
return false;
}
@@ -1236,7 +1239,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
- if (intel_idle_max_cstate_reached(cstate))
+ if (intel_idle_max_cstate_reached(cstate - 1))
break;
cx = &acpi_state_table.states[cstate];
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index d5c073a8aa3e..267553386c71 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -63,7 +63,7 @@ config IIO_SW_TRIGGER
using the API provided.
config IIO_TRIGGERED_EVENT
- tristate
+ tristate "Enable triggered events support"
select IIO_TRIGGER
help
Provides helper functions for setting up triggered events.
diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
index 59a24c355a1a..f955cccb3e77 100644
--- a/drivers/iio/accel/adis16201.c
+++ b/drivers/iio/accel/adis16201.c
@@ -281,34 +281,15 @@ static int adis16201_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
+ ret = devm_adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
return ret;
ret = adis_initial_startup(st);
if (ret)
- goto error_cleanup_buffer_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16201_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static struct spi_driver adis16201_driver = {
@@ -316,7 +297,6 @@ static struct spi_driver adis16201_driver = {
.name = "adis16201",
},
.probe = adis16201_probe,
- .remove = adis16201_remove,
};
module_spi_driver(adis16201_driver);
diff --git a/drivers/iio/accel/adis16209.c b/drivers/iio/accel/adis16209.c
index 3d5538e2f76e..4a841aec6268 100644
--- a/drivers/iio/accel/adis16209.c
+++ b/drivers/iio/accel/adis16209.c
@@ -291,33 +291,15 @@ static int adis16209_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
+ ret = devm_adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
return ret;
ret = adis_initial_startup(st);
if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16209_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static struct spi_driver adis16209_driver = {
@@ -325,7 +307,6 @@ static struct spi_driver adis16209_driver = {
.name = "adis16209",
},
.probe = adis16209_probe,
- .remove = adis16209_remove,
};
module_spi_driver(adis16209_driver);
diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c
index e7e316b75e87..aed2a4930fb0 100644
--- a/drivers/iio/accel/adxl372.c
+++ b/drivers/iio/accel/adxl372.c
@@ -5,6 +5,7 @@
* Copyright 2018 Analog Devices Inc.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -113,6 +114,11 @@
#define ADXL372_STATUS_1_AWAKE(x) (((x) >> 6) & 0x1)
#define ADXL372_STATUS_1_ERR_USR_REGS(x) (((x) >> 7) & 0x1)
+/* ADXL372_STATUS_2 */
+#define ADXL372_STATUS_2_INACT(x) (((x) >> 4) & 0x1)
+#define ADXL372_STATUS_2_ACT(x) (((x) >> 5) & 0x1)
+#define ADXL372_STATUS_2_AC2(x) (((x) >> 6) & 0x1)
+
/* ADXL372_INT1_MAP */
#define ADXL372_INT1_MAP_DATA_RDY_MSK BIT(0)
#define ADXL372_INT1_MAP_DATA_RDY_MODE(x) (((x) & 0x1) << 0)
@@ -131,8 +137,17 @@
#define ADXL372_INT1_MAP_LOW_MSK BIT(7)
#define ADXL372_INT1_MAP_LOW_MODE(x) (((x) & 0x1) << 7)
+/* ADX372_THRESH */
+#define ADXL372_THRESH_VAL_H_MSK GENMASK(10, 3)
+#define ADXL372_THRESH_VAL_H_SEL(x) FIELD_GET(ADXL372_THRESH_VAL_H_MSK, x)
+#define ADXL372_THRESH_VAL_L_MSK GENMASK(2, 0)
+#define ADXL372_THRESH_VAL_L_SEL(x) FIELD_GET(ADXL372_THRESH_VAL_L_MSK, x)
+
/* The ADXL372 includes a deep, 512 sample FIFO buffer */
#define ADXL372_FIFO_SIZE 512
+#define ADXL372_X_AXIS_EN(x) ((x) & BIT(0))
+#define ADXL372_Y_AXIS_EN(x) ((x) & BIT(1))
+#define ADXL372_Z_AXIS_EN(x) ((x) & BIT(2))
/*
* At +/- 200g with 12-bit resolution, scale is computed as:
@@ -222,6 +237,20 @@ static const struct adxl372_axis_lookup adxl372_axis_lookup_table[] = {
{ BIT(0) | BIT(1) | BIT(2), ADXL372_XYZ_FIFO },
};
+static const struct iio_event_spec adxl372_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD) | BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ .mask_shared_by_all = BIT(IIO_EV_INFO_PERIOD) | BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
#define ADXL372_ACCEL_CHANNEL(index, reg, axis) { \
.type = IIO_ACCEL, \
.address = reg, \
@@ -239,6 +268,8 @@ static const struct adxl372_axis_lookup adxl372_axis_lookup_table[] = {
.shift = 4, \
.endianness = IIO_BE, \
}, \
+ .event_spec = adxl372_events, \
+ .num_event_specs = ARRAY_SIZE(adxl372_events) \
}
static const struct iio_chan_spec adxl372_channels[] = {
@@ -252,8 +283,10 @@ struct adxl372_state {
struct device *dev;
struct regmap *regmap;
struct iio_trigger *dready_trig;
+ struct iio_trigger *peak_datardy_trig;
enum adxl372_fifo_mode fifo_mode;
enum adxl372_fifo_format fifo_format;
+ unsigned int fifo_axis_mask;
enum adxl372_op_mode op_mode;
enum adxl372_act_proc_mode act_proc_mode;
enum adxl372_odr odr;
@@ -261,10 +294,12 @@ struct adxl372_state {
u32 act_time_ms;
u32 inact_time_ms;
u8 fifo_set_size;
- u8 int1_bitmask;
- u8 int2_bitmask;
+ unsigned long int1_bitmask;
+ unsigned long int2_bitmask;
u16 watermark;
__be16 fifo_buf[ADXL372_FIFO_SIZE];
+ bool peak_fifo_mode_en;
+ struct mutex threshold_m; /* lock for threshold */
};
static const unsigned long adxl372_channel_masks[] = {
@@ -276,6 +311,46 @@ static const unsigned long adxl372_channel_masks[] = {
0
};
+static ssize_t adxl372_read_threshold_value(struct iio_dev *indio_dev, unsigned int addr,
+ u16 *threshold)
+{
+ struct adxl372_state *st = iio_priv(indio_dev);
+ __be16 raw_regval;
+ u16 regval;
+ int ret;
+
+ ret = regmap_bulk_read(st->regmap, addr, &raw_regval, sizeof(raw_regval));
+ if (ret < 0)
+ return ret;
+
+ regval = be16_to_cpu(raw_regval);
+ regval >>= 5;
+
+ *threshold = regval;
+
+ return 0;
+}
+
+static ssize_t adxl372_write_threshold_value(struct iio_dev *indio_dev, unsigned int addr,
+ u16 threshold)
+{
+ struct adxl372_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->threshold_m);
+ ret = regmap_write(st->regmap, addr, ADXL372_THRESH_VAL_H_SEL(threshold));
+ if (ret < 0)
+ goto unlock;
+
+ ret = regmap_update_bits(st->regmap, addr + 1, GENMASK(7, 5),
+ ADXL372_THRESH_VAL_L_SEL(threshold) << 5);
+
+unlock:
+ mutex_unlock(&st->threshold_m);
+
+ return ret;
+}
+
static int adxl372_read_axis(struct adxl372_state *st, u8 addr)
{
__be16 regval;
@@ -453,8 +528,8 @@ static int adxl372_set_inactivity_time_ms(struct adxl372_state *st,
}
static int adxl372_set_interrupts(struct adxl372_state *st,
- unsigned char int1_bitmask,
- unsigned char int2_bitmask)
+ unsigned long int1_bitmask,
+ unsigned long int2_bitmask)
{
int ret;
@@ -523,6 +598,39 @@ static int adxl372_get_status(struct adxl372_state *st,
return ret;
}
+static void adxl372_arrange_axis_data(struct adxl372_state *st, __be16 *sample)
+{
+ __be16 axis_sample[3];
+ int i = 0;
+
+ memset(axis_sample, 0, 3 * sizeof(__be16));
+ if (ADXL372_X_AXIS_EN(st->fifo_axis_mask))
+ axis_sample[i++] = sample[0];
+ if (ADXL372_Y_AXIS_EN(st->fifo_axis_mask))
+ axis_sample[i++] = sample[1];
+ if (ADXL372_Z_AXIS_EN(st->fifo_axis_mask))
+ axis_sample[i++] = sample[2];
+
+ memcpy(sample, axis_sample, 3 * sizeof(__be16));
+}
+
+static void adxl372_push_event(struct iio_dev *indio_dev, s64 timestamp, u8 status2)
+{
+ unsigned int ev_dir = IIO_EV_DIR_NONE;
+
+ if (ADXL372_STATUS_2_ACT(status2))
+ ev_dir = IIO_EV_DIR_RISING;
+
+ if (ADXL372_STATUS_2_INACT(status2))
+ ev_dir = IIO_EV_DIR_FALLING;
+
+ if (ev_dir != IIO_EV_DIR_NONE)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_THRESH, ev_dir),
+ timestamp);
+}
+
static irqreturn_t adxl372_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -536,6 +644,8 @@ static irqreturn_t adxl372_trigger_handler(int irq, void *p)
if (ret < 0)
goto err;
+ adxl372_push_event(indio_dev, iio_get_time_ns(indio_dev), status2);
+
if (st->fifo_mode != ADXL372_FIFO_BYPASSED &&
ADXL372_STATUS_1_FIFO_FULL(status1)) {
/*
@@ -554,8 +664,12 @@ static irqreturn_t adxl372_trigger_handler(int irq, void *p)
goto err;
/* Each sample is 2 bytes */
- for (i = 0; i < fifo_entries; i += st->fifo_set_size)
+ for (i = 0; i < fifo_entries; i += st->fifo_set_size) {
+ /* filter peak detection data */
+ if (st->peak_fifo_mode_en)
+ adxl372_arrange_axis_data(st, &st->fifo_buf[i]);
iio_push_to_buffers(indio_dev, &st->fifo_buf[i]);
+ }
}
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -723,6 +837,129 @@ static int adxl372_write_raw(struct iio_dev *indio_dev,
}
}
+static int adxl372_read_event_value(struct iio_dev *indio_dev, const struct iio_chan_spec *chan,
+ enum iio_event_type type, enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct adxl372_state *st = iio_priv(indio_dev);
+ unsigned int addr;
+ u16 raw_value;
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ addr = ADXL372_X_THRESH_ACT_H + 2 * chan->scan_index;
+ ret = adxl372_read_threshold_value(indio_dev, addr, &raw_value);
+ if (ret < 0)
+ return ret;
+ *val = raw_value * ADXL372_USCALE;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_DIR_FALLING:
+ addr = ADXL372_X_THRESH_INACT_H + 2 * chan->scan_index;
+ ret = adxl372_read_threshold_value(indio_dev, addr, &raw_value);
+ if (ret < 0)
+ return ret;
+ *val = raw_value * ADXL372_USCALE;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = st->act_time_ms;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_DIR_FALLING:
+ *val = st->inact_time_ms;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl372_write_event_value(struct iio_dev *indio_dev, const struct iio_chan_spec *chan,
+ enum iio_event_type type, enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct adxl372_state *st = iio_priv(indio_dev);
+ unsigned int val_ms;
+ unsigned int addr;
+ u16 raw_val;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ raw_val = DIV_ROUND_UP(val * 1000000, ADXL372_USCALE);
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ addr = ADXL372_X_THRESH_ACT_H + 2 * chan->scan_index;
+ return adxl372_write_threshold_value(indio_dev, addr, raw_val);
+ case IIO_EV_DIR_FALLING:
+ addr = ADXL372_X_THRESH_INACT_H + 2 * chan->scan_index;
+ return adxl372_write_threshold_value(indio_dev, addr, raw_val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ val_ms = val * 1000 + DIV_ROUND_UP(val2, 1000);
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return adxl372_set_activity_time_ms(st, val_ms);
+ case IIO_EV_DIR_FALLING:
+ return adxl372_set_inactivity_time_ms(st, val_ms);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl372_read_event_config(struct iio_dev *indio_dev, const struct iio_chan_spec *chan,
+ enum iio_event_type type, enum iio_event_direction dir)
+{
+ struct adxl372_state *st = iio_priv(indio_dev);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return FIELD_GET(ADXL372_INT1_MAP_ACT_MSK, st->int1_bitmask);
+ case IIO_EV_DIR_FALLING:
+ return FIELD_GET(ADXL372_INT1_MAP_INACT_MSK, st->int1_bitmask);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl372_write_event_config(struct iio_dev *indio_dev, const struct iio_chan_spec *chan,
+ enum iio_event_type type, enum iio_event_direction dir,
+ int state)
+{
+ struct adxl372_state *st = iio_priv(indio_dev);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ set_mask_bits(&st->int1_bitmask, ADXL372_INT1_MAP_ACT_MSK,
+ ADXL372_INT1_MAP_ACT_MODE(state));
+ break;
+ case IIO_EV_DIR_FALLING:
+ set_mask_bits(&st->int1_bitmask, ADXL372_INT1_MAP_INACT_MSK,
+ ADXL372_INT1_MAP_INACT_MODE(state));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return adxl372_set_interrupts(st, st->int1_bitmask, 0);
+}
+
static ssize_t adxl372_show_filter_freq_avail(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -795,7 +1032,8 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev)
unsigned int mask;
int i, ret;
- ret = adxl372_set_interrupts(st, ADXL372_INT1_MAP_FIFO_FULL_MSK, 0);
+ st->int1_bitmask |= ADXL372_INT1_MAP_FIFO_FULL_MSK;
+ ret = adxl372_set_interrupts(st, st->int1_bitmask, 0);
if (ret < 0)
return ret;
@@ -810,13 +1048,22 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev)
return -EINVAL;
st->fifo_format = adxl372_axis_lookup_table[i].fifo_format;
+ st->fifo_axis_mask = adxl372_axis_lookup_table[i].bits;
st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength);
+
+ /* Configure the FIFO to store sets of impact event peak. */
+ if (st->peak_fifo_mode_en) {
+ st->fifo_set_size = 3;
+ st->fifo_format = ADXL372_XYZ_PEAK_FIFO;
+ }
+
/*
* The 512 FIFO samples can be allotted in several ways, such as:
* 170 sample sets of concurrent 3-axis data
* 256 sample sets of concurrent 2-axis data (user selectable)
* 512 sample sets of single-axis data
+ * 170 sets of impact event peak (x, y, z)
*/
if ((st->watermark * st->fifo_set_size) > ADXL372_FIFO_SIZE)
st->watermark = (ADXL372_FIFO_SIZE / st->fifo_set_size);
@@ -826,7 +1073,8 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev)
ret = adxl372_configure_fifo(st);
if (ret < 0) {
st->fifo_mode = ADXL372_FIFO_BYPASSED;
- adxl372_set_interrupts(st, 0, 0);
+ st->int1_bitmask &= ~ADXL372_INT1_MAP_FIFO_FULL_MSK;
+ adxl372_set_interrupts(st, st->int1_bitmask, 0);
return ret;
}
@@ -837,7 +1085,8 @@ static int adxl372_buffer_predisable(struct iio_dev *indio_dev)
{
struct adxl372_state *st = iio_priv(indio_dev);
- adxl372_set_interrupts(st, 0, 0);
+ st->int1_bitmask &= ~ADXL372_INT1_MAP_FIFO_FULL_MSK;
+ adxl372_set_interrupts(st, st->int1_bitmask, 0);
st->fifo_mode = ADXL372_FIFO_BYPASSED;
adxl372_configure_fifo(st);
@@ -854,12 +1103,11 @@ static int adxl372_dready_trig_set_state(struct iio_trigger *trig,
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct adxl372_state *st = iio_priv(indio_dev);
- unsigned long int mask = 0;
if (state)
- mask = ADXL372_INT1_MAP_FIFO_FULL_MSK;
+ st->int1_bitmask |= ADXL372_INT1_MAP_FIFO_FULL_MSK;
- return adxl372_set_interrupts(st, mask, 0);
+ return adxl372_set_interrupts(st, st->int1_bitmask, 0);
}
static int adxl372_validate_trigger(struct iio_dev *indio_dev,
@@ -867,7 +1115,7 @@ static int adxl372_validate_trigger(struct iio_dev *indio_dev,
{
struct adxl372_state *st = iio_priv(indio_dev);
- if (st->dready_trig != trig)
+ if (st->dready_trig != trig && st->peak_datardy_trig != trig)
return -EINVAL;
return 0;
@@ -878,6 +1126,25 @@ static const struct iio_trigger_ops adxl372_trigger_ops = {
.set_trigger_state = adxl372_dready_trig_set_state,
};
+static int adxl372_peak_dready_trig_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct adxl372_state *st = iio_priv(indio_dev);
+
+ if (state)
+ st->int1_bitmask |= ADXL372_INT1_MAP_FIFO_FULL_MSK;
+
+ st->peak_fifo_mode_en = state;
+
+ return adxl372_set_interrupts(st, st->int1_bitmask, 0);
+}
+
+static const struct iio_trigger_ops adxl372_peak_data_trigger_ops = {
+ .validate_device = &iio_trigger_validate_own_device,
+ .set_trigger_state = adxl372_peak_dready_trig_set_state,
+};
+
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("400 800 1600 3200 6400");
static IIO_DEVICE_ATTR(in_accel_filter_low_pass_3db_frequency_available,
0444, adxl372_show_filter_freq_avail, NULL, 0);
@@ -897,6 +1164,10 @@ static const struct iio_info adxl372_info = {
.attrs = &adxl372_attrs_group,
.read_raw = adxl372_read_raw,
.write_raw = adxl372_write_raw,
+ .read_event_config = adxl372_read_event_config,
+ .write_event_config = adxl372_write_event_config,
+ .read_event_value = adxl372_read_event_value,
+ .write_event_value = adxl372_write_event_value,
.debugfs_reg_access = &adxl372_reg_access,
.hwfifo_set_watermark = adxl372_set_watermark,
};
@@ -925,6 +1196,8 @@ int adxl372_probe(struct device *dev, struct regmap *regmap,
st->regmap = regmap;
st->irq = irq;
+ mutex_init(&st->threshold_m);
+
indio_dev->channels = adxl372_channels;
indio_dev->num_channels = ARRAY_SIZE(adxl372_channels);
indio_dev->available_scan_masks = adxl372_channel_masks;
@@ -955,13 +1228,27 @@ int adxl372_probe(struct device *dev, struct regmap *regmap,
if (st->dready_trig == NULL)
return -ENOMEM;
+ st->peak_datardy_trig = devm_iio_trigger_alloc(dev,
+ "%s-dev%d-peak",
+ indio_dev->name,
+ indio_dev->id);
+ if (!st->peak_datardy_trig)
+ return -ENOMEM;
+
st->dready_trig->ops = &adxl372_trigger_ops;
+ st->peak_datardy_trig->ops = &adxl372_peak_data_trigger_ops;
st->dready_trig->dev.parent = dev;
+ st->peak_datardy_trig->dev.parent = dev;
iio_trigger_set_drvdata(st->dready_trig, indio_dev);
+ iio_trigger_set_drvdata(st->peak_datardy_trig, indio_dev);
ret = devm_iio_trigger_register(dev, st->dready_trig);
if (ret < 0)
return ret;
+ ret = devm_iio_trigger_register(dev, st->peak_datardy_trig);
+ if (ret < 0)
+ return ret;
+
indio_dev->trig = iio_trigger_get(st->dready_trig);
ret = devm_request_threaded_irq(dev, st->irq,
diff --git a/drivers/iio/accel/adxl372_i2c.c b/drivers/iio/accel/adxl372_i2c.c
index e1affe480c77..9a07ab3d151a 100644
--- a/drivers/iio/accel/adxl372_i2c.c
+++ b/drivers/iio/accel/adxl372_i2c.c
@@ -6,6 +6,7 @@
*/
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -46,9 +47,16 @@ static const struct i2c_device_id adxl372_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adxl372_i2c_id);
+static const struct of_device_id adxl372_of_match[] = {
+ { .compatible = "adi,adxl372" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adxl372_of_match);
+
static struct i2c_driver adxl372_i2c_driver = {
.driver = {
.name = "adxl372_i2c",
+ .of_match_table = adxl372_of_match,
},
.probe = adxl372_i2c_probe,
.id_table = adxl372_i2c_id,
diff --git a/drivers/iio/accel/adxl372_spi.c b/drivers/iio/accel/adxl372_spi.c
index 3ef7e3a4804e..1f1352fee99a 100644
--- a/drivers/iio/accel/adxl372_spi.c
+++ b/drivers/iio/accel/adxl372_spi.c
@@ -40,8 +40,8 @@ static const struct spi_device_id adxl372_spi_id[] = {
MODULE_DEVICE_TABLE(spi, adxl372_spi_id);
static const struct of_device_id adxl372_of_match[] = {
- { .compatible = "adi,adxl372" },
- { },
+ { .compatible = "adi,adxl372" },
+ { }
};
MODULE_DEVICE_TABLE(of, adxl372_of_match);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 5b7a467c7b27..6b74c2b04c15 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -673,7 +673,7 @@ static const struct iio_chan_spec_ext_info bma023_ext_info[] = {
};
static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
- IIO_ENUM("power_mode", true, &bma180_power_mode_enum),
+ IIO_ENUM("power_mode", IIO_SHARED_BY_TYPE, &bma180_power_mode_enum),
IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum),
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma180_accel_get_mount_matrix),
{ }
@@ -1000,19 +1000,15 @@ static int bma180_probe(struct i2c_client *client,
return ret;
data->vdd_supply = devm_regulator_get(dev, "vdd");
- if (IS_ERR(data->vdd_supply)) {
- if (PTR_ERR(data->vdd_supply) != -EPROBE_DEFER)
- dev_err(dev, "Failed to get vdd regulator %d\n",
- (int)PTR_ERR(data->vdd_supply));
- return PTR_ERR(data->vdd_supply);
- }
+ if (IS_ERR(data->vdd_supply))
+ return dev_err_probe(dev, PTR_ERR(data->vdd_supply),
+ "Failed to get vdd regulator\n");
+
data->vddio_supply = devm_regulator_get(dev, "vddio");
- if (IS_ERR(data->vddio_supply)) {
- if (PTR_ERR(data->vddio_supply) != -EPROBE_DEFER)
- dev_err(dev, "Failed to get vddio regulator %d\n",
- (int)PTR_ERR(data->vddio_supply));
- return PTR_ERR(data->vddio_supply);
- }
+ if (IS_ERR(data->vddio_supply))
+ return dev_err_probe(dev, PTR_ERR(data->vddio_supply),
+ "Failed to get vddio regulator\n");
+
/* Typical voltage 2.4V these are min and max */
ret = regulator_set_voltage(data->vdd_supply, 1620000, 3600000);
if (ret)
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index da8b36cc8628..3c9b0c6954e6 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -2,16 +2,18 @@
/**
* BMA220 Digital triaxial acceleration sensor driver
*
- * Copyright (c) 2016, Intel Corporation.
+ * Copyright (c) 2016,2020 Intel Corporation.
*/
-#include <linux/acpi.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/spi/spi.h>
+
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/spi/spi.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -23,14 +25,13 @@
#define BMA220_REG_SUSPEND 0x18
#define BMA220_CHIP_ID 0xDD
-#define BMA220_READ_MASK 0x80
-#define BMA220_RANGE_MASK 0x03
+#define BMA220_READ_MASK BIT(7)
+#define BMA220_RANGE_MASK GENMASK(1, 0)
#define BMA220_DATA_SHIFT 2
#define BMA220_SUSPEND_SLEEP 0xFF
#define BMA220_SUSPEND_WAKE 0x00
#define BMA220_DEVICE_NAME "bma220"
-#define BMA220_SCALE_AVAILABLE "0.623 1.248 2.491 4.983"
#define BMA220_ACCEL_CHANNEL(index, reg, axis) { \
.type = IIO_ACCEL, \
@@ -55,19 +56,8 @@ enum bma220_axis {
AXIS_Z,
};
-static IIO_CONST_ATTR(in_accel_scale_available, BMA220_SCALE_AVAILABLE);
-
-static struct attribute *bma220_attributes[] = {
- &iio_const_attr_in_accel_scale_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group bma220_attribute_group = {
- .attrs = bma220_attributes,
-};
-
-static const int bma220_scale_table[][4] = {
- {0, 623000}, {1, 248000}, {2, 491000}, {4, 983000}
+static const int bma220_scale_table[][2] = {
+ {0, 623000}, {1, 248000}, {2, 491000}, {4, 983000},
};
struct bma220_data {
@@ -182,10 +172,26 @@ static int bma220_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int bma220_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)bma220_scale_table;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = ARRAY_SIZE(bma220_scale_table) * 2;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct iio_info bma220_info = {
.read_raw = bma220_read_raw,
.write_raw = bma220_write_raw,
- .attrs = &bma220_attribute_group,
+ .read_avail = bma220_read_avail,
};
static int bma220_init(struct spi_device *spi)
@@ -198,10 +204,12 @@ static int bma220_init(struct spi_device *spi)
/* Make sure the chip is powered on */
ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
+ if (ret == BMA220_SUSPEND_WAKE)
+ ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
if (ret < 0)
return ret;
- else if (ret == BMA220_SUSPEND_WAKE)
- return bma220_read_reg(spi, BMA220_REG_SUSPEND);
+ if (ret == BMA220_SUSPEND_WAKE)
+ return -EBUSY;
return 0;
}
@@ -212,10 +220,12 @@ static int bma220_deinit(struct spi_device *spi)
/* Make sure the chip is powered off */
ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
+ if (ret == BMA220_SUSPEND_SLEEP)
+ ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
if (ret < 0)
return ret;
- else if (ret == BMA220_SUSPEND_SLEEP)
- return bma220_read_reg(spi, BMA220_REG_SUSPEND);
+ if (ret == BMA220_SUSPEND_SLEEP)
+ return -EBUSY;
return 0;
}
@@ -245,7 +255,7 @@ static int bma220_probe(struct spi_device *spi)
indio_dev->available_scan_masks = bma220_accel_scan_masks;
ret = bma220_init(data->spi_device);
- if (ret < 0)
+ if (ret)
return ret;
ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
@@ -278,56 +288,43 @@ static int bma220_remove(struct spi_device *spi)
return bma220_deinit(spi);
}
-#ifdef CONFIG_PM_SLEEP
-static int bma220_suspend(struct device *dev)
+static __maybe_unused int bma220_suspend(struct device *dev)
{
- struct bma220_data *data =
- iio_priv(spi_get_drvdata(to_spi_device(dev)));
+ struct bma220_data *data = iio_priv(dev_get_drvdata(dev));
/* The chip can be suspended/woken up by a simple register read. */
return bma220_read_reg(data->spi_device, BMA220_REG_SUSPEND);
}
-static int bma220_resume(struct device *dev)
+static __maybe_unused int bma220_resume(struct device *dev)
{
- struct bma220_data *data =
- iio_priv(spi_get_drvdata(to_spi_device(dev)));
+ struct bma220_data *data = iio_priv(dev_get_drvdata(dev));
return bma220_read_reg(data->spi_device, BMA220_REG_SUSPEND);
}
-
static SIMPLE_DEV_PM_OPS(bma220_pm_ops, bma220_suspend, bma220_resume);
-#define BMA220_PM_OPS (&bma220_pm_ops)
-#else
-#define BMA220_PM_OPS NULL
-#endif
-
static const struct spi_device_id bma220_spi_id[] = {
{"bma220", 0},
{}
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id bma220_acpi_id[] = {
{"BMA0220", 0},
{}
};
-
MODULE_DEVICE_TABLE(spi, bma220_spi_id);
-#endif
static struct spi_driver bma220_driver = {
.driver = {
.name = "bma220_spi",
- .pm = BMA220_PM_OPS,
- .acpi_match_table = ACPI_PTR(bma220_acpi_id),
+ .pm = &bma220_pm_ops,
+ .acpi_match_table = bma220_acpi_id,
},
.probe = bma220_probe,
.remove = bma220_remove,
.id_table = bma220_spi_id,
};
-
module_spi_driver(bma220_driver);
MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index b6f3471b62dc..8f1232c38e0d 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -215,7 +215,7 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
return -ENOMEM;
ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
- cros_ec_sensors_capture, NULL);
+ cros_ec_sensors_capture, NULL, false);
if (ret)
return ret;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 853febc29488..bf1d2c8afdbd 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1543,22 +1543,14 @@ static int mma8452_probe(struct i2c_client *client,
data->chip_info = match->data;
data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
- if (IS_ERR(data->vdd_reg)) {
- if (PTR_ERR(data->vdd_reg) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- dev_err(&client->dev, "failed to get VDD regulator!\n");
- return PTR_ERR(data->vdd_reg);
- }
+ if (IS_ERR(data->vdd_reg))
+ return dev_err_probe(&client->dev, PTR_ERR(data->vdd_reg),
+ "failed to get VDD regulator!\n");
data->vddio_reg = devm_regulator_get(&client->dev, "vddio");
- if (IS_ERR(data->vddio_reg)) {
- if (PTR_ERR(data->vddio_reg) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- dev_err(&client->dev, "failed to get VDDIO regulator!\n");
- return PTR_ERR(data->vddio_reg);
- }
+ if (IS_ERR(data->vddio_reg))
+ return dev_err_probe(&client->dev, PTR_ERR(data->vddio_reg),
+ "failed to get VDDIO regulator!\n");
ret = regulator_enable(data->vdd_reg);
if (ret) {
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index d94dc800b842..91ae90514aff 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -340,7 +340,7 @@ config AXP288_ADC
config BCM_IPROC_ADC
tristate "Broadcom IPROC ADC driver"
- depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on (ARCH_BCM_IPROC && OF) || COMPILE_TEST
depends on MFD_SYSCON
default ARCH_BCM_CYGNUS
help
@@ -863,7 +863,7 @@ config RN5T618_ADC
config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
- depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on RESET_CONTROLLER
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
index 62fde2aad282..2301a0e27f23 100644
--- a/drivers/iio/adc/ad7291.c
+++ b/drivers/iio/adc/ad7291.c
@@ -20,8 +20,6 @@
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
-#include <linux/platform_data/ad7291.h>
-
/*
* Simplified handling
*
@@ -465,7 +463,6 @@ static const struct iio_info ad7291_info = {
static int ad7291_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct ad7291_platform_data *pdata = client->dev.platform_data;
struct ad7291_chip_info *chip;
struct iio_dev *indio_dev;
int ret;
@@ -475,16 +472,6 @@ static int ad7291_probe(struct i2c_client *client,
return -ENOMEM;
chip = iio_priv(indio_dev);
- if (pdata && pdata->use_external_ref) {
- chip->reg = devm_regulator_get(&client->dev, "vref");
- if (IS_ERR(chip->reg))
- return PTR_ERR(chip->reg);
-
- ret = regulator_enable(chip->reg);
- if (ret)
- return ret;
- }
-
mutex_init(&chip->state_lock);
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
@@ -495,8 +482,21 @@ static int ad7291_probe(struct i2c_client *client,
AD7291_T_SENSE_MASK | /* Tsense always enabled */
AD7291_ALERT_POLARITY; /* set irq polarity low level */
- if (pdata && pdata->use_external_ref)
+ chip->reg = devm_regulator_get_optional(&client->dev, "vref");
+ if (IS_ERR(chip->reg)) {
+ if (PTR_ERR(chip->reg) != -ENODEV)
+ return PTR_ERR(chip->reg);
+
+ chip->reg = NULL;
+ }
+
+ if (chip->reg) {
+ ret = regulator_enable(chip->reg);
+ if (ret)
+ return ret;
+
chip->command |= AD7291_EXT_REF;
+ }
indio_dev->name = id->name;
indio_dev->channels = ad7291_channels;
@@ -567,9 +567,16 @@ static const struct i2c_device_id ad7291_id[] = {
MODULE_DEVICE_TABLE(i2c, ad7291_id);
+static const struct of_device_id ad7291_of_match[] = {
+ { .compatible = "adi,ad7291" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ad7291_of_match);
+
static struct i2c_driver ad7291_driver = {
.driver = {
.name = KBUILD_MODNAME,
+ .of_match_table = ad7291_of_match,
},
.probe = ad7291_probe,
.remove = ad7291_remove,
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
index 2eafbe7ac7c7..ab204e9199e9 100644
--- a/drivers/iio/adc/ad7292.c
+++ b/drivers/iio/adc/ad7292.c
@@ -310,8 +310,10 @@ static int ad7292_probe(struct spi_device *spi)
for_each_available_child_of_node(spi->dev.of_node, child) {
diff_channels = of_property_read_bool(child, "diff-channels");
- if (diff_channels)
+ if (diff_channels) {
+ of_node_put(child);
break;
+ }
}
if (diff_channels) {
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
index d9566a83988a..5d597e5050f6 100644
--- a/drivers/iio/adc/ad7949.c
+++ b/drivers/iio/adc/ad7949.c
@@ -39,7 +39,7 @@ static const struct ad7949_adc_spec ad7949_adc_spec[] = {
* struct ad7949_adc_chip - AD ADC chip
* @lock: protects write sequences
* @vref: regulator generating Vref
- * @iio_dev: reference to iio structure
+ * @indio_dev: reference to iio structure
* @spi: reference to spi structure
* @resolution: resolution of the chip
* @cfg: copy of the configuration register
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index 1e8fd83b9bc2..19a45dd43796 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -77,6 +77,22 @@
#define AN877_ADC_DCO_DELAY_ENABLE 0x80
/*
+ * Analog Devices AD9265 16-Bit, 125/105/80 MSPS ADC
+ */
+
+#define CHIPID_AD9265 0x64
+#define AD9265_DEF_OUTPUT_MODE 0x40
+#define AD9265_REG_VREF_MASK 0xC0
+
+/*
+ * Analog Devices AD9434 12-Bit, 370/500 MSPS ADC
+ */
+
+#define CHIPID_AD9434 0x6A
+#define AD9434_DEF_OUTPUT_MODE 0x00
+#define AD9434_REG_VREF_MASK 0xC0
+
+/*
* Analog Devices AD9467 16-Bit, 200/250 MSPS ADC
*/
@@ -85,9 +101,20 @@
#define AD9467_REG_VREF_MASK 0x0F
enum {
+ ID_AD9265,
+ ID_AD9434,
ID_AD9467,
};
+struct ad9467_chip_info {
+ struct adi_axi_adc_chip_info axi_adc_info;
+ unsigned int default_output_mode;
+ unsigned int vref_mask;
+};
+
+#define to_ad9467_chip_info(_info) \
+ container_of(_info, struct ad9467_chip_info, axi_adc_info)
+
struct ad9467_state {
struct spi_device *spi;
struct clk *clk;
@@ -149,6 +176,17 @@ static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
return 0;
}
+static const unsigned int ad9265_scale_table[][2] = {
+ {1250, 0x00}, {1500, 0x40}, {1750, 0x80}, {2000, 0xC0},
+};
+
+static const unsigned int ad9434_scale_table[][2] = {
+ {1600, 0x1C}, {1580, 0x1D}, {1550, 0x1E}, {1520, 0x1F}, {1500, 0x00},
+ {1470, 0x01}, {1440, 0x02}, {1420, 0x03}, {1390, 0x04}, {1360, 0x05},
+ {1340, 0x06}, {1310, 0x07}, {1280, 0x08}, {1260, 0x09}, {1230, 0x0A},
+ {1200, 0x0B}, {1180, 0x0C},
+};
+
static const unsigned int ad9467_scale_table[][2] = {
{2000, 0}, {2100, 6}, {2200, 7},
{2300, 8}, {2400, 9}, {2500, 10},
@@ -182,39 +220,63 @@ static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
}, \
}
+static const struct iio_chan_spec ad9434_channels[] = {
+ AD9467_CHAN(0, 0, 12, 'S'),
+};
+
static const struct iio_chan_spec ad9467_channels[] = {
AD9467_CHAN(0, 0, 16, 'S'),
};
-static const struct adi_axi_adc_chip_info ad9467_chip_tbl[] = {
+static const struct ad9467_chip_info ad9467_chip_tbl[] = {
+ [ID_AD9265] = {
+ .axi_adc_info = {
+ .id = CHIPID_AD9265,
+ .max_rate = 125000000UL,
+ .scale_table = ad9265_scale_table,
+ .num_scales = ARRAY_SIZE(ad9265_scale_table),
+ .channels = ad9467_channels,
+ .num_channels = ARRAY_SIZE(ad9467_channels),
+ },
+ .default_output_mode = AD9265_DEF_OUTPUT_MODE,
+ .vref_mask = AD9265_REG_VREF_MASK,
+ },
+ [ID_AD9434] = {
+ .axi_adc_info = {
+ .id = CHIPID_AD9434,
+ .max_rate = 500000000UL,
+ .scale_table = ad9434_scale_table,
+ .num_scales = ARRAY_SIZE(ad9434_scale_table),
+ .channels = ad9434_channels,
+ .num_channels = ARRAY_SIZE(ad9434_channels),
+ },
+ .default_output_mode = AD9434_DEF_OUTPUT_MODE,
+ .vref_mask = AD9434_REG_VREF_MASK,
+ },
[ID_AD9467] = {
- .id = CHIPID_AD9467,
- .max_rate = 250000000UL,
- .scale_table = ad9467_scale_table,
- .num_scales = ARRAY_SIZE(ad9467_scale_table),
- .channels = ad9467_channels,
- .num_channels = ARRAY_SIZE(ad9467_channels),
+ .axi_adc_info = {
+ .id = CHIPID_AD9467,
+ .max_rate = 250000000UL,
+ .scale_table = ad9467_scale_table,
+ .num_scales = ARRAY_SIZE(ad9467_scale_table),
+ .channels = ad9467_channels,
+ .num_channels = ARRAY_SIZE(ad9467_channels),
+ },
+ .default_output_mode = AD9467_DEF_OUTPUT_MODE,
+ .vref_mask = AD9467_REG_VREF_MASK,
},
};
static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
{
const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ const struct ad9467_chip_info *info1 = to_ad9467_chip_info(info);
struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
- unsigned int i, vref_val, vref_mask;
+ unsigned int i, vref_val;
vref_val = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
- switch (info->id) {
- case CHIPID_AD9467:
- vref_mask = AD9467_REG_VREF_MASK;
- break;
- default:
- vref_mask = 0xFFFF;
- break;
- }
-
- vref_val &= vref_mask;
+ vref_val &= info1->vref_mask;
for (i = 0; i < info->num_scales; i++) {
if (vref_val == info->scale_table[i][1])
@@ -316,18 +378,6 @@ static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
return ad9467_outputmode_set(st->spi, st->output_mode);
}
-static int ad9467_setup(struct ad9467_state *st, unsigned int chip_id)
-{
- switch (chip_id) {
- case CHIPID_AD9467:
- st->output_mode = AD9467_DEF_OUTPUT_MODE |
- AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static void ad9467_clk_disable(void *data)
{
struct ad9467_state *st = data;
@@ -337,7 +387,7 @@ static void ad9467_clk_disable(void *data)
static int ad9467_probe(struct spi_device *spi)
{
- const struct adi_axi_adc_chip_info *info;
+ const struct ad9467_chip_info *info;
struct adi_axi_adc_conv *conv;
struct ad9467_state *st;
unsigned int id;
@@ -386,11 +436,12 @@ static int ad9467_probe(struct spi_device *spi)
spi_set_drvdata(spi, st);
- conv->chip_info = info;
+ conv->chip_info = &info->axi_adc_info;
id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
if (id != conv->chip_info->id) {
- dev_err(&spi->dev, "Unrecognized CHIP_ID 0x%X\n", id);
+ dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n",
+ id, conv->chip_info->id);
return -ENODEV;
}
@@ -399,10 +450,15 @@ static int ad9467_probe(struct spi_device *spi)
conv->read_raw = ad9467_read_raw;
conv->preenable_setup = ad9467_preenable_setup;
- return ad9467_setup(st, id);
+ st->output_mode = info->default_output_mode |
+ AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
+
+ return 0;
}
static const struct of_device_id ad9467_of_match[] = {
+ { .compatible = "adi,ad9265", .data = &ad9467_chip_tbl[ID_AD9265], },
+ { .compatible = "adi,ad9434", .data = &ad9467_chip_tbl[ID_AD9434], },
{ .compatible = "adi,ad9467", .data = &ad9467_chip_tbl[ID_AD9467], },
{}
};
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index 86b6b65916ee..9109da2d2e15 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -276,7 +276,7 @@ static struct attribute *adi_axi_adc_attributes[] = {
static umode_t axi_adc_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct adi_axi_adc_state *st = iio_priv(indio_dev);
struct adi_axi_adc_conv *conv = &st->client->conv;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index de9583d6cddd..b917a4714a9c 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -884,7 +884,7 @@ static bool at91_adc_current_chan_is_touch(struct iio_dev *indio_dev)
AT91_SAMA5D2_MAX_CHAN_IDX + 1);
}
-static int at91_adc_buffer_preenable(struct iio_dev *indio_dev)
+static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
{
int ret;
u8 bit;
@@ -901,7 +901,7 @@ static int at91_adc_buffer_preenable(struct iio_dev *indio_dev)
/* we continue with the triggered buffer */
ret = at91_adc_dma_start(indio_dev);
if (ret) {
- dev_err(&indio_dev->dev, "buffer postenable failed\n");
+ dev_err(&indio_dev->dev, "buffer prepare failed\n");
return ret;
}
@@ -989,7 +989,6 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
}
static const struct iio_buffer_setup_ops at91_buffer_setup_ops = {
- .preenable = &at91_adc_buffer_preenable,
.postdisable = &at91_adc_buffer_postdisable,
};
@@ -1563,6 +1562,7 @@ static void at91_adc_dma_disable(struct platform_device *pdev)
static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
{
struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
if (val > AT91_HWFIFO_MAX_SIZE)
return -EINVAL;
@@ -1586,7 +1586,15 @@ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
else if (val > 1)
at91_adc_dma_init(to_platform_device(&indio_dev->dev));
- return 0;
+ /*
+ * We can start the DMA only after setting the watermark and
+ * having the DMA initialization completed
+ */
+ ret = at91_adc_buffer_prepare(indio_dev);
+ if (ret)
+ at91_adc_dma_disable(to_platform_device(&indio_dev->dev));
+
+ return ret;
}
static int at91_adc_update_scan_mode(struct iio_dev *indio_dev,
@@ -1764,17 +1772,13 @@ static int at91_adc_probe(struct platform_device *pdev)
mutex_init(&st->lock);
INIT_WORK(&st->touch_st.workq, at91_adc_workq_handler);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
+ st->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(st->base))
+ return PTR_ERR(st->base);
/* if we plan to use DMA, we need the physical address of the regs */
st->dma_st.phys_addr = res->start;
- st->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(st->base))
- return PTR_ERR(st->base);
-
st->irq = platform_get_irq(pdev, 0);
if (st->irq <= 0) {
if (!st->irq)
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index 798ff2d89691..3e0c0233b431 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -9,10 +9,10 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/thermal.h>
@@ -67,7 +67,7 @@ struct axp_data;
struct axp20x_adc_iio {
struct regmap *regmap;
- struct axp_data *data;
+ const struct axp_data *data;
};
enum axp20x_adc_channel_v {
@@ -670,15 +670,15 @@ static int axp20x_probe(struct platform_device *pdev)
info->regmap = axp20x_dev->regmap;
indio_dev->modes = INDIO_DIRECT_MODE;
- if (!pdev->dev.of_node) {
+ if (!dev_fwnode(&pdev->dev)) {
const struct platform_device_id *id;
id = platform_get_device_id(pdev);
- info->data = (struct axp_data *)id->driver_data;
+ info->data = (const struct axp_data *)id->driver_data;
} else {
struct device *dev = &pdev->dev;
- info->data = (struct axp_data *)of_device_get_match_data(dev);
+ info->data = device_get_match_data(dev);
}
indio_dev->name = platform_get_device_id(pdev)->name;
@@ -742,7 +742,7 @@ static int axp20x_remove(struct platform_device *pdev)
static struct platform_driver axp20x_adc_driver = {
.driver = {
.name = "axp20x-adc",
- .of_match_table = of_match_ptr(axp20x_adc_of_match),
+ .of_match_table = axp20x_adc_of_match,
},
.id_table = axp20x_adc_id_match,
.probe = axp20x_probe,
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 936da32faa9d..44e1e53ada72 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -4,7 +4,7 @@
*/
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
@@ -617,7 +617,7 @@ static struct platform_driver iproc_adc_driver = {
.remove = iproc_adc_remove,
.driver = {
.name = "iproc-static-adc",
- .of_match_table = of_match_ptr(iproc_adc_of_match),
+ .of_match_table = iproc_adc_of_match,
},
};
module_platform_driver(iproc_adc_driver);
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
index 2a4fd3bb64cf..d73eac36153f 100644
--- a/drivers/iio/adc/envelope-detector.c
+++ b/drivers/iio/adc/envelope-detector.c
@@ -348,11 +348,9 @@ static int envelope_detector_probe(struct platform_device *pdev)
indio_dev->num_channels = 1;
env->dac = devm_iio_channel_get(dev, "dac");
- if (IS_ERR(env->dac)) {
- if (PTR_ERR(env->dac) != -EPROBE_DEFER)
- dev_err(dev, "failed to get dac input channel\n");
- return PTR_ERR(env->dac);
- }
+ if (IS_ERR(env->dac))
+ return dev_err_probe(dev, PTR_ERR(env->dac),
+ "failed to get dac input channel\n");
env->comp_irq = platform_get_irq_byname(pdev, "comp");
if (env->comp_irq < 0)
@@ -360,11 +358,9 @@ static int envelope_detector_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, env->comp_irq, envelope_detector_comp_isr,
0, "envelope-detector", env);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to request interrupt\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request interrupt\n");
+
env->comp_irq_trigger = irq_get_trigger_type(env->comp_irq);
if (env->comp_irq_trigger & IRQF_TRIGGER_RISING)
env->comp_irq_trigger_inv |= IRQF_TRIGGER_FALLING;
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 7d23b6c33284..99f4404e9fd1 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -138,6 +138,16 @@ struct exynos_adc {
bool read_ts;
u32 ts_x;
u32 ts_y;
+
+ /*
+ * Lock to protect from potential concurrent access to the
+ * completion callback during a manual conversion. For this driver
+ * a wait-callback is used to wait for the conversion result,
+ * so in the meantime no other read request (or conversion start)
+ * must be performed, otherwise it would interfere with the
+ * current conversion result.
+ */
+ struct mutex lock;
};
struct exynos_adc_data {
@@ -542,7 +552,7 @@ static int exynos_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&info->lock);
reinit_completion(&info->completion);
/* Select the channel to be used and Trigger conversion */
@@ -562,7 +572,7 @@ static int exynos_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&info->lock);
return ret;
}
@@ -573,7 +583,7 @@ static int exynos_read_s3c64xx_ts(struct iio_dev *indio_dev, int *x, int *y)
unsigned long timeout;
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&info->lock);
info->read_ts = true;
reinit_completion(&info->completion);
@@ -598,7 +608,7 @@ static int exynos_read_s3c64xx_ts(struct iio_dev *indio_dev, int *x, int *y)
}
info->read_ts = false;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&info->lock);
return ret;
}
@@ -844,13 +854,9 @@ static int exynos_adc_probe(struct platform_device *pdev)
}
info->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(info->vdd)) {
- if (PTR_ERR(info->vdd) != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed getting regulator, err = %ld\n",
- PTR_ERR(info->vdd));
- return PTR_ERR(info->vdd);
- }
+ if (IS_ERR(info->vdd))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->vdd),
+ "failed getting regulator");
ret = regulator_enable(info->vdd);
if (ret)
@@ -872,6 +878,8 @@ static int exynos_adc_probe(struct platform_device *pdev)
indio_dev->channels = exynos_adc_iio_channels;
indio_dev->num_channels = info->data->num_channels;
+ mutex_init(&info->lock);
+
ret = request_irq(info->irq, exynos_adc_isr,
0, dev_name(&pdev->dev), info);
if (ret < 0) {
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
index 8cb51cf7a816..ab5139e911c3 100644
--- a/drivers/iio/adc/fsl-imx25-gcq.c
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -40,6 +40,15 @@ struct mx25_gcq_priv {
int irq;
struct regulator *vref[4];
u32 channel_vref_mv[MX25_NUM_CFGS];
+ /*
+ * Lock to protect the device state during a potential concurrent
+ * read access from userspace. Reading a raw value requires a sequence
+ * of register writes, then a wait for a completion callback,
+ * and finally a register read, during which userspace could issue
+ * another read request. This lock protects a read access from
+ * ocurring before another one has finished.
+ */
+ struct mutex lock;
};
#define MX25_CQG_CHAN(chan, id) {\
@@ -137,9 +146,9 @@ static int mx25_gcq_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&priv->lock);
ret = mx25_gcq_get_raw_value(&indio_dev->dev, chan, priv, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&priv->lock);
return ret;
case IIO_CHAN_INFO_SCALE:
@@ -314,6 +323,8 @@ static int mx25_gcq_probe(struct platform_device *pdev)
return PTR_ERR(priv->regs);
}
+ mutex_init(&priv->lock);
+
init_completion(&priv->completed);
ret = mx25_gcq_setup_cfgs(pdev, priv);
diff --git a/drivers/iio/adc/ltc2497-core.c b/drivers/iio/adc/ltc2497-core.c
index 9b8fd9c32364..2a485c8a1940 100644
--- a/drivers/iio/adc/ltc2497-core.c
+++ b/drivers/iio/adc/ltc2497-core.c
@@ -180,13 +180,9 @@ int ltc2497core_probe(struct device *dev, struct iio_dev *indio_dev)
return ret;
ddata->ref = devm_regulator_get(dev, "vref");
- if (IS_ERR(ddata->ref)) {
- if (PTR_ERR(ddata->ref) != -EPROBE_DEFER)
- dev_err(dev, "Failed to get vref regulator: %pe\n",
- ddata->ref);
-
- return PTR_ERR(ddata->ref);
- }
+ if (IS_ERR(ddata->ref))
+ return dev_err_probe(dev, PTR_ERR(ddata->ref),
+ "Failed to get vref regulator\n");
ret = regulator_enable(ddata->ref);
if (ret < 0) {
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 1a9189ba69ae..e03988698755 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -719,11 +719,8 @@ static int meson_sar_adc_temp_sensor_init(struct iio_dev *indio_dev)
if (ret == -ENODEV)
return 0;
- if (ret != -EPROBE_DEFER)
- dev_err(indio_dev->dev.parent,
- "failed to get temperature_calib cell\n");
-
- return ret;
+ return dev_err_probe(indio_dev->dev.parent, ret,
+ "failed to get temperature_calib cell\n");
}
priv->tsc_regmap =
@@ -1153,16 +1150,13 @@ static const struct of_device_id meson_sar_adc_of_match[] = {
{
.compatible = "amlogic,meson8-saradc",
.data = &meson_sar_adc_meson8_data,
- },
- {
+ }, {
.compatible = "amlogic,meson8b-saradc",
.data = &meson_sar_adc_meson8b_data,
- },
- {
+ }, {
.compatible = "amlogic,meson8m2-saradc",
.data = &meson_sar_adc_meson8m2_data,
- },
- {
+ }, {
.compatible = "amlogic,meson-gxbb-saradc",
.data = &meson_sar_adc_gxbb_data,
}, {
@@ -1178,7 +1172,7 @@ static const struct of_device_id meson_sar_adc_of_match[] = {
.compatible = "amlogic,meson-g12a-saradc",
.data = &meson_sar_adc_g12a_data,
},
- {},
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_sar_adc_of_match);
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 1ca6570be66a..889b88768b63 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -834,18 +834,7 @@ static struct platform_driver palmas_gpadc_driver = {
.of_match_table = of_palmas_gpadc_match_tbl,
},
};
-
-static int __init palmas_gpadc_init(void)
-{
- return platform_driver_register(&palmas_gpadc_driver);
-}
-module_init(palmas_gpadc_init);
-
-static void __exit palmas_gpadc_exit(void)
-{
- platform_driver_unregister(&palmas_gpadc_driver);
-}
-module_exit(palmas_gpadc_exit);
+module_platform_driver(palmas_gpadc_driver);
MODULE_DESCRIPTION("palmas GPADC driver");
MODULE_AUTHOR("Pradeep Goudagunta<pgoudagunta@nvidia.com>");
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index d2c1419e72a0..9f38cf3c7dc2 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -357,7 +357,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
num_channels = ARRAY_SIZE(rcar_gyroadc_iio_channels_3);
break;
default:
- return -EINVAL;
+ goto err_e_inval;
}
/*
@@ -374,7 +374,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
dev_err(dev,
"Failed to get child reg property of ADC \"%pOFn\".\n",
child);
- return ret;
+ goto err_of_node_put;
}
/* Channel number is too high. */
@@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
dev_err(dev,
"Only %i channels supported with %pOFn, but reg = <%i>.\n",
num_channels, child, reg);
- return -EINVAL;
+ goto err_e_inval;
}
}
@@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
dev_err(dev,
"Channel %i uses different ADC mode than the rest.\n",
reg);
- return -EINVAL;
+ goto err_e_inval;
}
/* Channel is valid, grab the regulator. */
@@ -401,7 +401,8 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
if (IS_ERR(vref)) {
dev_dbg(dev, "Channel %i 'vref' supply not connected.\n",
reg);
- return PTR_ERR(vref);
+ ret = PTR_ERR(vref);
+ goto err_of_node_put;
}
priv->vref[reg] = vref;
@@ -425,8 +426,10 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
* attached to the GyroADC at a time, so if we found it,
* we can stop parsing here.
*/
- if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A)
+ if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A) {
+ of_node_put(child);
break;
+ }
}
if (first) {
@@ -435,6 +438,12 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
}
return 0;
+
+err_e_inval:
+ ret = -EINVAL;
+err_of_node_put:
+ of_node_put(child);
+ return ret;
}
static void rcar_gyroadc_deinit_supplies(struct iio_dev *indio_dev)
@@ -495,12 +504,9 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
return PTR_ERR(priv->regs);
priv->clk = devm_clk_get(dev, "fck");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get IF clock (ret=%i)\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "Failed to get IF clock\n");
ret = rcar_gyroadc_parse_subdevs(indio_dev);
if (ret)
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 0e2068ec068b..cd870c089182 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -582,11 +582,9 @@ static int stm32_adc_core_switches_probe(struct device *dev,
priv->syscfg = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(priv->syscfg)) {
ret = PTR_ERR(priv->syscfg);
- if (ret != -ENODEV) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Can't probe syscfg: %d\n", ret);
- return ret;
- }
+ if (ret != -ENODEV)
+ return dev_err_probe(dev, ret, "Can't probe syscfg\n");
+
priv->syscfg = NULL;
}
@@ -596,12 +594,9 @@ static int stm32_adc_core_switches_probe(struct device *dev,
priv->booster = devm_regulator_get_optional(dev, "booster");
if (IS_ERR(priv->booster)) {
ret = PTR_ERR(priv->booster);
- if (ret != -ENODEV) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "can't get booster %d\n",
- ret);
- return ret;
- }
+ if (ret != -ENODEV)
+ return dev_err_probe(dev, ret, "can't get booster\n");
+
priv->booster = NULL;
}
}
@@ -612,11 +607,9 @@ static int stm32_adc_core_switches_probe(struct device *dev,
priv->vdd = devm_regulator_get_optional(dev, "vdd");
if (IS_ERR(priv->vdd)) {
ret = PTR_ERR(priv->vdd);
- if (ret != -ENODEV) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "can't get vdd %d\n", ret);
- return ret;
- }
+ if (ret != -ENODEV)
+ return dev_err_probe(dev, ret, "can't get vdd\n");
+
priv->vdd = NULL;
}
}
@@ -669,42 +662,24 @@ static int stm32_adc_probe(struct platform_device *pdev)
priv->common.phys_base = res->start;
priv->vdda = devm_regulator_get(&pdev->dev, "vdda");
- if (IS_ERR(priv->vdda)) {
- ret = PTR_ERR(priv->vdda);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "vdda get failed, %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->vdda))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->vdda),
+ "vdda get failed\n");
priv->vref = devm_regulator_get(&pdev->dev, "vref");
- if (IS_ERR(priv->vref)) {
- ret = PTR_ERR(priv->vref);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "vref get failed, %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->vref))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->vref),
+ "vref get failed\n");
- priv->aclk = devm_clk_get(&pdev->dev, "adc");
- if (IS_ERR(priv->aclk)) {
- ret = PTR_ERR(priv->aclk);
- if (ret != -ENOENT) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Can't get 'adc' clock\n");
- return ret;
- }
- priv->aclk = NULL;
- }
+ priv->aclk = devm_clk_get_optional(&pdev->dev, "adc");
+ if (IS_ERR(priv->aclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->aclk),
+ "Can't get 'adc' clock\n");
- priv->bclk = devm_clk_get(&pdev->dev, "bus");
- if (IS_ERR(priv->bclk)) {
- ret = PTR_ERR(priv->bclk);
- if (ret != -ENOENT) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Can't get 'bus' clock\n");
- return ret;
- }
- priv->bclk = NULL;
- }
+ priv->bclk = devm_clk_get_optional(&pdev->dev, "bus");
+ if (IS_ERR(priv->bclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->bclk),
+ "Can't get 'bus' clock\n");
ret = stm32_adc_core_switches_probe(dev, priv);
if (ret)
@@ -794,6 +769,13 @@ static int stm32_adc_core_runtime_resume(struct device *dev)
{
return stm32_adc_core_hw_start(dev);
}
+
+static int stm32_adc_core_runtime_idle(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+
+ return 0;
+}
#endif
static const struct dev_pm_ops stm32_adc_core_pm_ops = {
@@ -801,7 +783,7 @@ static const struct dev_pm_ops stm32_adc_core_pm_ops = {
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(stm32_adc_core_runtime_suspend,
stm32_adc_core_runtime_resume,
- NULL)
+ stm32_adc_core_runtime_idle)
};
static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 3eb9ebe8372f..b3f31f147347 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1805,13 +1805,9 @@ static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev)
adc->dma_chan = dma_request_chan(dev, "rx");
if (IS_ERR(adc->dma_chan)) {
ret = PTR_ERR(adc->dma_chan);
- if (ret != -ENODEV) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev,
- "DMA channel request failed with %d\n",
- ret);
- return ret;
- }
+ if (ret != -ENODEV)
+ return dev_err_probe(dev, ret,
+ "DMA channel request failed with\n");
/* DMA is optional: fall back to IRQ mode */
adc->dma_chan = NULL;
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 5e10fb4f3704..9234f14167b7 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -293,6 +293,7 @@ static int stm32_dfsdm_compute_osrs(struct stm32_dfsdm_filter *fl,
max >>= flo->rshift;
}
flo->max = (s32)max;
+ flo->bits = bits;
pr_debug("%s: fast %d, fosr %d, iosr %d, res 0x%llx/%d bits, rshift %d, lshift %d\n",
__func__, fast, flo->fosr, flo->iosr,
@@ -476,6 +477,9 @@ static int stm32_dfsdm_channels_configure(struct iio_dev *indio_dev,
if (!flo->res)
return -EINVAL;
+ dev_dbg(&indio_dev->dev, "Samples actual resolution: %d bits",
+ min(flo->bits, (u32)DFSDM_DATA_RES - 1));
+
for_each_set_bit(bit, &adc->smask,
sizeof(adc->smask) * BITS_PER_BYTE) {
chan = indio_dev->channels + bit;
@@ -1473,13 +1477,9 @@ static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev)
/* Optionally request DMA */
ret = stm32_dfsdm_dma_request(dev, indio_dev);
if (ret) {
- if (ret != -ENODEV) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev,
- "DMA channel request failed with %d\n",
- ret);
- return ret;
- }
+ if (ret != -ENODEV)
+ return dev_err_probe(dev, ret,
+ "DMA channel request failed with\n");
dev_dbg(dev, "No DMA support\n");
return 0;
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index 26e2011c5868..42a7377704a4 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -226,16 +226,13 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
if (!node)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get memory resource\n");
- return -ENODEV;
- }
- priv->dfsdm.phys_base = res->start;
- priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res);
+ priv->dfsdm.base = devm_platform_get_and_ioremap_resource(pdev, 0,
+ &res);
if (IS_ERR(priv->dfsdm.base))
return PTR_ERR(priv->dfsdm.base);
+ priv->dfsdm.phys_base = res->start;
+
/*
* "dfsdm" clock is mandatory for DFSDM peripheral clocking.
* "dfsdm" or "audio" clocks can be used as source clock for
@@ -243,12 +240,9 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
* on use case.
*/
priv->clk = devm_clk_get(&pdev->dev, "dfsdm");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get clock (%d)\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "Failed to get clock\n");
priv->aclk = devm_clk_get(&pdev->dev, "audio");
if (IS_ERR(priv->aclk))
diff --git a/drivers/iio/adc/stm32-dfsdm.h b/drivers/iio/adc/stm32-dfsdm.h
index 5dbdae4ed881..4afc1f528b78 100644
--- a/drivers/iio/adc/stm32-dfsdm.h
+++ b/drivers/iio/adc/stm32-dfsdm.h
@@ -249,6 +249,7 @@ enum stm32_dfsdm_sinc_order {
* @rshift: output sample right shift (hardware shift)
* @lshift: output sample left shift (software shift)
* @res: output sample resolution
+ * @bits: output sample resolution in bits
* @max: output sample maximum positive value
*/
struct stm32_dfsdm_filter_osr {
@@ -257,6 +258,7 @@ struct stm32_dfsdm_filter_osr {
unsigned int rshift;
unsigned int lshift;
u64 res;
+ u32 bits;
s32 max;
};
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index cf63983a54d9..b64718daa201 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -19,7 +19,6 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/acpi.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -158,17 +157,7 @@ static int adc081c_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
- if (ACPI_COMPANION(&client->dev)) {
- const struct acpi_device_id *ad_id;
-
- ad_id = acpi_match_device(client->dev.driver->acpi_match_table,
- &client->dev);
- if (!ad_id)
- return -ENODEV;
- model = &adcxx1c_models[ad_id->driver_data];
- } else {
- model = &adcxx1c_models[id->driver_data];
- }
+ model = &adcxx1c_models[id->driver_data];
iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!iio)
@@ -243,21 +232,10 @@ static const struct of_device_id adc081c_of_match[] = {
};
MODULE_DEVICE_TABLE(of, adc081c_of_match);
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id adc081c_acpi_match[] = {
- { "ADC081C", ADC081C },
- { "ADC101C", ADC101C },
- { "ADC121C", ADC121C },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match);
-#endif
-
static struct i2c_driver adc081c_driver = {
.driver = {
.name = "adc081c",
.of_match_table = adc081c_of_match,
- .acpi_match_table = ACPI_PTR(adc081c_acpi_match),
},
.probe = adc081c_probe,
.remove = adc081c_remove,
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index c7a085dce1f4..0261b3cfc92b 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -29,6 +29,12 @@ struct adc0832 {
struct regulator *reg;
struct mutex lock;
u8 mux_bits;
+ /*
+ * Max size needed: 16x 1 byte ADC data + 8 bytes timestamp
+ * May be shorter if not all channels are enabled subject
+ * to the timestamp remaining 8 byte aligned.
+ */
+ u8 data[24] __aligned(8);
u8 tx_buf[2] ____cacheline_aligned;
u8 rx_buf[2];
@@ -200,7 +206,6 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc0832 *adc = iio_priv(indio_dev);
- u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */
int scan_index;
int i = 0;
@@ -218,10 +223,10 @@ static irqreturn_t adc0832_trigger_handler(int irq, void *p)
goto out;
}
- data[i] = ret;
+ adc->data[i] = ret;
i++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
index 9b9b27415c93..183b2245e89b 100644
--- a/drivers/iio/adc/ti-adc108s102.c
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -20,6 +20,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -299,13 +300,11 @@ static int adc108s102_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id adc108s102_of_match[] = {
{ .compatible = "ti,adc108s102" },
{ }
};
MODULE_DEVICE_TABLE(of, adc108s102_of_match);
-#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id adc108s102_acpi_ids[] = {
@@ -324,7 +323,7 @@ MODULE_DEVICE_TABLE(spi, adc108s102_id);
static struct spi_driver adc108s102_driver = {
.driver = {
.name = "adc108s102",
- .of_match_table = of_match_ptr(adc108s102_of_match),
+ .of_match_table = adc108s102_of_match,
.acpi_match_table = ACPI_PTR(adc108s102_acpi_ids),
},
.probe = adc108s102_probe,
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index e485719cd2c4..fcd5d39dd03e 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -47,6 +47,12 @@ struct adc12138 {
struct completion complete;
/* The number of cclk periods for the S/H's acquisition time */
unsigned int acquisition_time;
+ /*
+ * Maximum size needed: 16x 2 bytes ADC data + 8 bytes timestamp.
+ * Less may be need if not all channels are enabled, as long as
+ * the 8 byte alignment of the timestamp is maintained.
+ */
+ __be16 data[20] __aligned(8);
u8 tx_buf[2] ____cacheline_aligned;
u8 rx_buf[2];
@@ -329,7 +335,6 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc12138 *adc = iio_priv(indio_dev);
- __be16 data[20] = { }; /* 16x 2 bytes ADC data + 8 bytes timestamp */
__be16 trash;
int ret;
int scan_index;
@@ -345,7 +350,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
reinit_completion(&adc->complete);
ret = adc12138_start_and_read_conv(adc, scan_chan,
- i ? &data[i - 1] : &trash);
+ i ? &adc->data[i - 1] : &trash);
if (ret) {
dev_warn(&adc->spi->dev,
"failed to start conversion\n");
@@ -362,7 +367,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
}
if (i) {
- ret = adc12138_read_conv_data(adc, &data[i - 1]);
+ ret = adc12138_read_conv_data(adc, &adc->data[i - 1]);
if (ret) {
dev_warn(&adc->spi->dev,
"failed to get conversion data\n");
@@ -370,7 +375,7 @@ static irqreturn_t adc12138_trigger_handler(int irq, void *p)
}
}
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index e86f55ce093f..3143f35a6509 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
@@ -220,7 +221,7 @@ MODULE_DEVICE_TABLE(acpi, adc128_acpi_match);
static struct spi_driver adc128_driver = {
.driver = {
.name = "adc128s052",
- .of_match_table = of_match_ptr(adc128_of_match),
+ .of_match_table = adc128_of_match,
.acpi_match_table = ACPI_PTR(adc128_acpi_match),
},
.probe = adc128_probe,
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index d0b7ef296afb..f93c34fe5873 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1092,6 +1092,7 @@ MODULE_DEVICE_TABLE(of, xadc_of_match_table);
static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
unsigned int *conf)
{
+ struct device *dev = indio_dev->dev.parent;
struct xadc *xadc = iio_priv(indio_dev);
struct iio_chan_spec *channels, *chan;
struct device_node *chan_node, *child;
@@ -1136,7 +1137,8 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
*conf |= XADC_CONF0_MUX | XADC_CONF0_CHAN(ext_mux_chan);
}
- channels = kmemdup(xadc_channels, sizeof(xadc_channels), GFP_KERNEL);
+ channels = devm_kmemdup(dev, xadc_channels,
+ sizeof(xadc_channels), GFP_KERNEL);
if (!channels)
return -ENOMEM;
@@ -1172,8 +1174,9 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
of_node_put(chan_node);
indio_dev->num_channels = num_channels;
- indio_dev->channels = krealloc(channels, sizeof(*channels) *
- num_channels, GFP_KERNEL);
+ indio_dev->channels = devm_krealloc(dev, channels,
+ sizeof(*channels) * num_channels,
+ GFP_KERNEL);
/* If we can't resize the channels array, just use the original */
if (!indio_dev->channels)
indio_dev->channels = channels;
@@ -1225,14 +1228,14 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_parse_dt(indio_dev, pdev->dev.of_node, &conf0);
if (ret)
- goto err_device_free;
+ return ret;
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
ret = iio_triggered_buffer_setup(indio_dev,
&iio_pollfunc_store_time, &xadc_trigger_handler,
&xadc_buffer_ops);
if (ret)
- goto err_device_free;
+ return ret;
xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
if (IS_ERR(xadc->convst_trigger)) {
@@ -1350,8 +1353,6 @@ err_free_convst_trigger:
err_triggered_buffer_cleanup:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_triggered_buffer_cleanup(indio_dev);
-err_device_free:
- kfree(indio_dev->channels);
return ret;
}
@@ -1371,7 +1372,6 @@ static int xadc_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&xadc->zynq_unmask_work);
clk_disable_unprepare(xadc->clk);
kfree(xadc->data);
- kfree(indio_dev->channels);
return 0;
}
diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
index 69c0f277ada0..e42ea2b1707d 100644
--- a/drivers/iio/afe/iio-rescale.c
+++ b/drivers/iio/afe/iio-rescale.c
@@ -276,11 +276,9 @@ static int rescale_probe(struct platform_device *pdev)
int ret;
source = devm_iio_channel_get(dev, NULL);
- if (IS_ERR(source)) {
- if (PTR_ERR(source) != -EPROBE_DEFER)
- dev_err(dev, "failed to get source channel\n");
- return PTR_ERR(source);
- }
+ if (IS_ERR(source))
+ return dev_err_probe(dev, PTR_ERR(source),
+ "failed to get source channel\n");
sizeof_ext_info = iio_get_channel_ext_info_count(source);
if (sizeof_ext_info) {
diff --git a/drivers/iio/amplifiers/Kconfig b/drivers/iio/amplifiers/Kconfig
index 9b02c9a2bc8a..5eb1357a9c78 100644
--- a/drivers/iio/amplifiers/Kconfig
+++ b/drivers/iio/amplifiers/Kconfig
@@ -18,6 +18,7 @@ config AD8366
AD8366 Dual-Digital Variable Gain Amplifier (VGA)
ADA4961 BiCMOS RF Digital Gain Amplifier (DGA)
ADL5240 Digitally controlled variable gain amplifier (VGA)
+ HMC1119 0.25 dB LSB, 7-Bit, Silicon Digital Attenuator
To compile this driver as a module, choose M here: the
module will be called ad8366.
diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c
index 582708924e4f..9efa692151f0 100644
--- a/drivers/iio/amplifiers/hmc425a.c
+++ b/drivers/iio/amplifiers/hmc425a.c
@@ -201,12 +201,9 @@ static int hmc425a_probe(struct platform_device *pdev)
st->gain = st->chip_info->default_gain;
st->gpios = devm_gpiod_get_array(&pdev->dev, "ctrl", GPIOD_OUT_LOW);
- if (IS_ERR(st->gpios)) {
- ret = PTR_ERR(st->gpios);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get gpios\n");
- return ret;
- }
+ if (IS_ERR(st->gpios))
+ return dev_err_probe(&pdev->dev, PTR_ERR(st->gpios),
+ "failed to get gpios\n");
if (st->gpios->ndescs != st->chip_info->num_gpios) {
dev_err(&pdev->dev, "%d GPIOs needed to operate\n",
diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig
index 63f265c8b466..047b931591a9 100644
--- a/drivers/iio/buffer/Kconfig
+++ b/drivers/iio/buffer/Kconfig
@@ -11,7 +11,7 @@ config IIO_BUFFER_CB
usage. That is, those where the data is pushed to the consumer.
config IIO_BUFFER_DMA
- tristate
+ tristate "Industrial I/O DMA buffer infrastructure"
help
Provides the generic IIO DMA buffer infrastructure that can be used by
drivers for devices with DMA support to implement the IIO buffer.
@@ -20,13 +20,13 @@ config IIO_BUFFER_DMA
infrastructure.
config IIO_BUFFER_DMAENGINE
- tristate
+ tristate "Industrial I/O DMA buffer integration with DMAEngine"
select IIO_BUFFER_DMA
help
Provides a bonding of the generic IIO DMA buffer infrastructure with the
- DMAengine framework. This can be used by converter drivers with a DMA port
+ DMAEngine framework. This can be used by converter drivers with a DMA port
connected to an external DMA controller which is supported by the
- DMAengine framework.
+ DMAEngine framework.
Should be selected by drivers that want to use this functionality.
@@ -48,7 +48,7 @@ config IIO_KFIFO_BUF
often to read from the buffer.
config IIO_TRIGGERED_BUFFER
- tristate
+ tristate "Industrial I/O triggered buffer support"
select IIO_TRIGGER
select IIO_KFIFO_BUF
help
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 6dedf12b69a4..93b4e9e6bb55 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -45,7 +45,8 @@ static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
return container_of(buffer, struct dmaengine_buffer, queue.buffer);
}
-static void iio_dmaengine_buffer_block_done(void *data)
+static void iio_dmaengine_buffer_block_done(void *data,
+ const struct dmaengine_result *result)
{
struct iio_dma_buffer_block *block = data;
unsigned long flags;
@@ -53,6 +54,7 @@ static void iio_dmaengine_buffer_block_done(void *data)
spin_lock_irqsave(&block->queue->list_lock, flags);
list_del(&block->head);
spin_unlock_irqrestore(&block->queue->list_lock, flags);
+ block->bytes_used -= result->residue;
iio_dma_buffer_block_done(block);
}
@@ -74,7 +76,7 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
if (!desc)
return -ENOMEM;
- desc->callback = iio_dmaengine_buffer_block_done;
+ desc->callback_result = iio_dmaengine_buffer_block_done;
desc->callback_param = block;
cookie = dmaengine_submit(desc);
@@ -157,7 +159,7 @@ static const struct attribute *iio_dmaengine_buffer_attrs[] = {
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
*/
-struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
const char *channel)
{
struct dmaengine_buffer *dmaengine_buffer;
@@ -209,7 +211,6 @@ err_free:
kfree(dmaengine_buffer);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(iio_dmaengine_buffer_alloc);
/**
* iio_dmaengine_buffer_free() - Free dmaengine buffer
@@ -217,7 +218,7 @@ EXPORT_SYMBOL(iio_dmaengine_buffer_alloc);
*
* Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
*/
-void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
+static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
{
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(buffer);
@@ -227,7 +228,6 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
iio_buffer_put(buffer);
}
-EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
{
diff --git a/drivers/iio/chemical/ams-iaq-core.c b/drivers/iio/chemical/ams-iaq-core.c
index 8c1b64fd424a..97be3669c554 100644
--- a/drivers/iio/chemical/ams-iaq-core.c
+++ b/drivers/iio/chemical/ams-iaq-core.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/i2c.h>
@@ -177,7 +178,7 @@ MODULE_DEVICE_TABLE(of, ams_iaqcore_dt_ids);
static struct i2c_driver ams_iaqcore_driver = {
.driver = {
.name = "ams-iaq-core",
- .of_match_table = of_match_ptr(ams_iaqcore_dt_ids),
+ .of_match_table = ams_iaqcore_dt_ids,
},
.probe = ams_iaqcore_probe,
.id_table = ams_iaqcore_id,
diff --git a/drivers/iio/chemical/atlas-ezo-sensor.c b/drivers/iio/chemical/atlas-ezo-sensor.c
index 8b72bb012363..b1bacfe3c3ce 100644
--- a/drivers/iio/chemical/atlas-ezo-sensor.c
+++ b/drivers/iio/chemical/atlas-ezo-sensor.c
@@ -16,10 +16,13 @@
#include <linux/iio/iio.h>
#define ATLAS_EZO_DRV_NAME "atlas-ezo-sensor"
-#define ATLAS_CO2_INT_TIME_IN_MS 950
+#define ATLAS_INT_TIME_IN_MS 950
+#define ATLAS_INT_HUM_TIME_IN_MS 350
enum {
ATLAS_CO2_EZO,
+ ATLAS_O2_EZO,
+ ATLAS_HUM_EZO,
};
struct atlas_ezo_device {
@@ -38,15 +41,37 @@ struct atlas_ezo_data {
u8 buffer[8];
};
+#define ATLAS_CONCENTRATION_CHANNEL(_modifier) \
+ { \
+ .type = IIO_CONCENTRATION, \
+ .modified = 1,\
+ .channel2 = _modifier, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = 0, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+ }
+
static const struct iio_chan_spec atlas_co2_ezo_channels[] = {
+ ATLAS_CONCENTRATION_CHANNEL(IIO_MOD_CO2),
+};
+
+static const struct iio_chan_spec atlas_o2_ezo_channels[] = {
+ ATLAS_CONCENTRATION_CHANNEL(IIO_MOD_O2),
+};
+
+static const struct iio_chan_spec atlas_hum_ezo_channels[] = {
{
- .type = IIO_CONCENTRATION,
- .modified = 1,
- .channel2 = IIO_MOD_CO2,
+ .type = IIO_HUMIDITYRELATIVE,
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 0,
- .scan_type = {
+ .scan_type = {
.sign = 'u',
.realbits = 32,
.storagebits = 32,
@@ -59,10 +84,30 @@ static struct atlas_ezo_device atlas_ezo_devices[] = {
[ATLAS_CO2_EZO] = {
.channels = atlas_co2_ezo_channels,
.num_channels = 1,
- .delay = ATLAS_CO2_INT_TIME_IN_MS,
+ .delay = ATLAS_INT_TIME_IN_MS,
+ },
+ [ATLAS_O2_EZO] = {
+ .channels = atlas_o2_ezo_channels,
+ .num_channels = 1,
+ .delay = ATLAS_INT_TIME_IN_MS,
+ },
+ [ATLAS_HUM_EZO] = {
+ .channels = atlas_hum_ezo_channels,
+ .num_channels = 1,
+ .delay = ATLAS_INT_HUM_TIME_IN_MS,
},
};
+static void atlas_ezo_sanitize(char *buf)
+{
+ char *ptr = strchr(buf, '.');
+
+ if (!ptr)
+ return;
+
+ memmove(ptr, ptr + 1, strlen(ptr));
+}
+
static int atlas_ezo_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -96,6 +141,9 @@ static int atlas_ezo_read_raw(struct iio_dev *indio_dev,
return -EBUSY;
}
+ /* removing floating point for fixed number representation */
+ atlas_ezo_sanitize(data->buffer + 2);
+
ret = kstrtol(data->buffer + 1, 10, &tmp);
*val = tmp;
@@ -105,9 +153,27 @@ static int atlas_ezo_read_raw(struct iio_dev *indio_dev,
return ret ? ret : IIO_VAL_INT;
}
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = 100; /* 0.0001 */
- return IIO_VAL_INT_PLUS_MICRO;
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ *val = 10;
+ return IIO_VAL_INT;
+ case IIO_CONCENTRATION:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* IIO_CONCENTRATION modifiers */
+ switch (chan->channel2) {
+ case IIO_MOD_CO2:
+ *val = 0;
+ *val2 = 100; /* 0.0001 */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MOD_O2:
+ *val = 100;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
}
return 0;
@@ -119,12 +185,16 @@ static const struct iio_info atlas_info = {
static const struct i2c_device_id atlas_ezo_id[] = {
{ "atlas-co2-ezo", ATLAS_CO2_EZO },
+ { "atlas-o2-ezo", ATLAS_O2_EZO },
+ { "atlas-hum-ezo", ATLAS_HUM_EZO },
{}
};
MODULE_DEVICE_TABLE(i2c, atlas_ezo_id);
static const struct of_device_id atlas_ezo_dt_ids[] = {
{ .compatible = "atlas,co2-ezo", .data = (void *)ATLAS_CO2_EZO, },
+ { .compatible = "atlas,o2-ezo", .data = (void *)ATLAS_O2_EZO, },
+ { .compatible = "atlas,hum-ezo", .data = (void *)ATLAS_HUM_EZO, },
{}
};
MODULE_DEVICE_TABLE(of, atlas_ezo_dt_ids);
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 43069636fcd5..cdab9d04dedd 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -15,7 +15,7 @@
#include <linux/irq.h>
#include <linux/irq_work.h>
#include <linux/i2c.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -620,7 +620,6 @@ static int atlas_probe(struct i2c_client *client,
{
struct atlas_data *data;
struct atlas_device *chip;
- const struct of_device_id *of_id;
struct iio_trigger *trig;
struct iio_dev *indio_dev;
int ret;
@@ -629,11 +628,10 @@ static int atlas_probe(struct i2c_client *client,
if (!indio_dev)
return -ENOMEM;
- of_id = of_match_device(atlas_dt_ids, &client->dev);
- if (!of_id)
+ if (!dev_fwnode(&client->dev))
chip = &atlas_devices[id->driver_data];
else
- chip = &atlas_devices[(unsigned long)of_id->data];
+ chip = &atlas_devices[(unsigned long)device_get_match_data(&client->dev)];
indio_dev->info = &atlas_info;
indio_dev->name = ATLAS_DRV_NAME;
@@ -775,7 +773,7 @@ static const struct dev_pm_ops atlas_pm_ops = {
static struct i2c_driver atlas_driver = {
.driver = {
.name = ATLAS_DRV_NAME,
- .of_match_table = of_match_ptr(atlas_dt_ids),
+ .of_match_table = atlas_dt_ids,
.pm = &atlas_pm_ops,
},
.probe = atlas_probe,
diff --git a/drivers/iio/chemical/scd30_core.c b/drivers/iio/chemical/scd30_core.c
index eac76972f83e..4d0d798c7cd3 100644
--- a/drivers/iio/chemical/scd30_core.c
+++ b/drivers/iio/chemical/scd30_core.c
@@ -705,13 +705,8 @@ int scd30_probe(struct device *dev, int irq, const char *name, void *priv,
indio_dev->available_scan_masks = scd30_scan_masks;
state->vdd = devm_regulator_get(dev, "vdd");
- if (IS_ERR(state->vdd)) {
- if (PTR_ERR(state->vdd) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- dev_err(dev, "failed to get regulator\n");
- return PTR_ERR(state->vdd);
- }
+ if (IS_ERR(state->vdd))
+ return dev_err_probe(dev, PTR_ERR(state->vdd), "failed to get regulator\n");
ret = regulator_enable(state->vdd);
if (ret)
diff --git a/drivers/iio/chemical/sgp30.c b/drivers/iio/chemical/sgp30.c
index 2c4086c48136..1029c457be15 100644
--- a/drivers/iio/chemical/sgp30.c
+++ b/drivers/iio/chemical/sgp30.c
@@ -20,9 +20,9 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/i2c.h>
-#include <linux/of_device.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -227,6 +227,7 @@ static int sgp_verify_buffer(const struct sgp_data *data,
* @cmd: SGP Command to issue
* @buf: Raw data buffer to use
* @word_count: Num words to read, excluding CRC bytes
+ * @duration_us: Time taken to sensor to take a reading and data to be ready.
*
* Return: 0 on success, negative error otherwise.
*/
@@ -409,6 +410,7 @@ static int sgp_read_raw(struct iio_dev *indio_dev,
static int sgp_check_compat(struct sgp_data *data,
unsigned int product_id)
{
+ struct device *dev = &data->client->dev;
const struct sgp_version *supported_versions;
u16 ix, num_fs;
u16 product, generation, major, minor;
@@ -416,21 +418,20 @@ static int sgp_check_compat(struct sgp_data *data,
/* driver does not match product */
generation = SGP_VERS_GEN(data);
if (generation != 0) {
- dev_err(&data->client->dev,
+ dev_err(dev,
"incompatible product generation %d != 0", generation);
return -ENODEV;
}
product = SGP_VERS_PRODUCT(data);
if (product != product_id) {
- dev_err(&data->client->dev,
- "sensor reports a different product: 0x%04hx\n",
+ dev_err(dev, "sensor reports a different product: 0x%04hx\n",
product);
return -ENODEV;
}
if (SGP_VERS_RESERVED(data))
- dev_warn(&data->client->dev, "reserved bit is set\n");
+ dev_warn(dev, "reserved bit is set\n");
/* engineering samples are not supported: no interface guarantees */
if (SGP_VERS_ENG_BIT(data))
@@ -456,8 +457,7 @@ static int sgp_check_compat(struct sgp_data *data,
minor >= supported_versions[ix].minor)
return 0;
}
- dev_err(&data->client->dev, "unsupported sgp version: %d.%d\n",
- major, minor);
+ dev_err(dev, "unsupported sgp version: %d.%d\n", major, minor);
return -ENODEV;
}
@@ -499,19 +499,18 @@ static const struct of_device_id sgp_dt_ids[] = {
static int sgp_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct iio_dev *indio_dev;
struct sgp_data *data;
- const struct of_device_id *of_id;
unsigned long product_id;
int ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
- of_id = of_match_device(sgp_dt_ids, &client->dev);
- if (of_id)
- product_id = (unsigned long)of_id->data;
+ if (dev_fwnode(dev))
+ product_id = (unsigned long)device_get_match_data(dev);
else
product_id = id->driver_data;
@@ -541,9 +540,9 @@ static int sgp_probe(struct i2c_client *client,
sgp_init(data);
- ret = devm_iio_device_register(&client->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
- dev_err(&client->dev, "failed to register iio device\n");
+ dev_err(dev, "failed to register iio device\n");
return ret;
}
@@ -576,7 +575,7 @@ MODULE_DEVICE_TABLE(of, sgp_dt_ids);
static struct i2c_driver sgp_driver = {
.driver = {
.name = "sgp30",
- .of_match_table = of_match_ptr(sgp_dt_ids),
+ .of_match_table = sgp_dt_ids,
},
.probe = sgp_probe,
.remove = sgp_remove,
diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c
index 5586eb8e12cd..23b22a5f5c1c 100644
--- a/drivers/iio/chemical/vz89x.c
+++ b/drivers/iio/chemical/vz89x.c
@@ -10,8 +10,7 @@
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/i2c.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -352,12 +351,12 @@ MODULE_DEVICE_TABLE(of, vz89x_dt_ids);
static int vz89x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct iio_dev *indio_dev;
struct vz89x_data *data;
- const struct of_device_id *of_id;
int chip_id;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
@@ -370,11 +369,10 @@ static int vz89x_probe(struct i2c_client *client,
else
return -EOPNOTSUPP;
- of_id = of_match_device(vz89x_dt_ids, &client->dev);
- if (!of_id)
+ if (!dev_fwnode(dev))
chip_id = id->driver_data;
else
- chip_id = (unsigned long)of_id->data;
+ chip_id = (unsigned long)device_get_match_data(dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
@@ -383,13 +381,13 @@ static int vz89x_probe(struct i2c_client *client,
mutex_init(&data->lock);
indio_dev->info = &vz89x_info;
- indio_dev->name = dev_name(&client->dev);
+ indio_dev->name = dev_name(dev);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = data->chip->channels;
indio_dev->num_channels = data->chip->num_channels;
- return devm_iio_device_register(&client->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
static const struct i2c_device_id vz89x_id[] = {
@@ -402,7 +400,7 @@ MODULE_DEVICE_TABLE(i2c, vz89x_id);
static struct i2c_driver vz89x_driver = {
.driver = {
.name = "vz89x",
- .of_match_table = of_match_ptr(vz89x_dt_ids),
+ .of_match_table = vz89x_dt_ids,
},
.probe = vz89x_probe,
.id_table = vz89x_id,
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
index af801e203623..752f59037715 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
@@ -97,7 +97,8 @@ static int cros_ec_lid_angle_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, false, NULL, NULL);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, false, NULL,
+ NULL, false);
if (ret)
return ret;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 130ab8ce0269..dee1191de752 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -73,7 +73,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
st->core.param.sensor_offset.flags = 0;
ret = cros_ec_motion_send_host_cmd(&st->core, 0);
- if (ret == -EPROTO) {
+ if (ret == -EPROTO || ret == -EOPNOTSUPP) {
/* Reading calibscale is not supported on older EC. */
*val = 1;
*val2 = 0;
@@ -236,12 +236,11 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
cros_ec_sensors_capture,
- cros_ec_sensors_push_data);
+ cros_ec_sensors_push_data,
+ true);
if (ret)
return ret;
- iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes);
-
indio_dev->info = &ec_sensors_info;
state = iio_priv(indio_dev);
for (channel = state->channels, i = CROS_EC_SENSOR_X;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 1bc6efa47316..c62cacc04672 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -177,12 +177,11 @@ static ssize_t hwfifo_watermark_max_show(struct device *dev,
static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0);
-const struct attribute *cros_ec_sensor_fifo_attributes[] = {
+static const struct attribute *cros_ec_sensor_fifo_attributes[] = {
&iio_dev_attr_hwfifo_timeout.dev_attr.attr,
&iio_dev_attr_hwfifo_watermark_max.dev_attr.attr,
NULL,
};
-EXPORT_SYMBOL_GPL(cros_ec_sensor_fifo_attributes);
int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
s16 *data,
@@ -241,6 +240,7 @@ static void cros_ec_sensors_core_clean(void *arg)
* for backward compatibility.
* @push_data: function to call when cros_ec_sensorhub receives
* a sample for that sensor.
+ * @has_hw_fifo: Set true if this device has/uses a HW FIFO
*
* Return: 0 on success, -errno on failure.
*/
@@ -248,7 +248,8 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
struct iio_dev *indio_dev,
bool physical_device,
cros_ec_sensors_capture_t trigger_capture,
- cros_ec_sensorhub_push_data_cb_t push_data)
+ cros_ec_sensorhub_push_data_cb_t push_data,
+ bool has_hw_fifo)
{
struct device *dev = &pdev->dev;
struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
@@ -361,6 +362,10 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
NULL);
if (ret)
return ret;
+
+ if (has_hw_fifo)
+ iio_buffer_set_attrs(indio_dev->buffer,
+ cros_ec_sensor_fifo_attributes);
}
}
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index a94dbcf491ce..1aee87100038 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -503,7 +503,8 @@ static int ssp_probe(struct spi_device *spi)
return -ENODEV;
}
- ret = mfd_add_devices(&spi->dev, -1, sensorhub_sensor_devs,
+ ret = mfd_add_devices(&spi->dev, PLATFORM_DEVID_NONE,
+ sensorhub_sensor_devs,
ARRAY_SIZE(sensorhub_sensor_devs), NULL, 0, NULL);
if (ret < 0) {
dev_err(&spi->dev, "mfd add devices fail\n");
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index fef503f8012d..82abd4d6886c 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -68,8 +68,8 @@ enum ad5064_regmap_type {
* struct ad5064_chip_info - chip specific information
* @shared_vref: whether the vref supply is shared between channels
* @internal_vref: internal reference voltage. 0 if the chip has no
- internal vref.
- * @channel: channel specification
+ * internal vref.
+ * @channels: channel specification
* @num_channels: number of channels
* @regmap_type: register map layout variant
*/
@@ -98,6 +98,7 @@ typedef int (*ad5064_write_func)(struct ad5064_state *st, unsigned int cmd,
* @use_internal_vref: set to true if the internal reference voltage should be
* used.
* @write: register write callback
+ * @lock: maintain consistency between cached and dev state
* @data: i2c/spi transfer buffers
*/
@@ -111,7 +112,6 @@ struct ad5064_state {
bool use_internal_vref;
ad5064_write_func write;
- /* Lock used to maintain consistency between cached and dev state */
struct mutex lock;
/*
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 935a6177569f..d87e21016863 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -478,13 +479,11 @@ static const struct spi_device_id ad5446_spi_ids[] = {
};
MODULE_DEVICE_TABLE(spi, ad5446_spi_ids);
-#ifdef CONFIG_OF
static const struct of_device_id ad5446_of_ids[] = {
{ .compatible = "ti,dac7512" },
{ }
};
MODULE_DEVICE_TABLE(of, ad5446_of_ids);
-#endif
static int ad5446_spi_probe(struct spi_device *spi)
{
@@ -502,7 +501,7 @@ static int ad5446_spi_remove(struct spi_device *spi)
static struct spi_driver ad5446_spi_driver = {
.driver = {
.name = "ad5446",
- .of_match_table = of_match_ptr(ad5446_of_ids),
+ .of_match_table = ad5446_of_ids,
},
.probe = ad5446_spi_probe,
.remove = ad5446_spi_remove,
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 1fd75c02a7cd..0405e92b9e8c 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -374,36 +374,36 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
{
struct ad5592r_state *st = iio_priv(iio_dev);
u16 read_val;
- int ret;
+ int ret, mult;
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&st->lock);
-
if (!chan->output) {
+ mutex_lock(&st->lock);
ret = st->ops->read_adc(st, chan->channel, &read_val);
+ mutex_unlock(&st->lock);
if (ret)
- goto unlock;
+ return ret;
if ((read_val >> 12 & 0x7) != (chan->channel & 0x7)) {
dev_err(st->dev, "Error while reading channel %u\n",
chan->channel);
- ret = -EIO;
- goto unlock;
+ return -EIO;
}
read_val &= GENMASK(11, 0);
} else {
+ mutex_lock(&st->lock);
read_val = st->cached_dac[chan->channel];
+ mutex_unlock(&st->lock);
}
dev_dbg(st->dev, "Channel %u read: 0x%04hX\n",
chan->channel, read_val);
*val = (int) read_val;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = ad5592r_get_vref(st);
@@ -412,24 +412,24 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
*val = div_s64_rem(tmp, 1000000000LL, val2);
return IIO_VAL_INT_PLUS_MICRO;
- } else {
- int mult;
+ }
- mutex_lock(&st->lock);
+ mutex_lock(&st->lock);
- if (chan->output)
- mult = !!(st->cached_gp_ctrl &
- AD5592R_REG_CTRL_DAC_RANGE);
- else
- mult = !!(st->cached_gp_ctrl &
- AD5592R_REG_CTRL_ADC_RANGE);
+ if (chan->output)
+ mult = !!(st->cached_gp_ctrl &
+ AD5592R_REG_CTRL_DAC_RANGE);
+ else
+ mult = !!(st->cached_gp_ctrl &
+ AD5592R_REG_CTRL_ADC_RANGE);
- *val *= ++mult;
+ mutex_unlock(&st->lock);
- *val2 = chan->scan_type.realbits;
- ret = IIO_VAL_FRACTIONAL_LOG2;
- }
- break;
+ *val *= ++mult;
+
+ *val2 = chan->scan_type.realbits;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_OFFSET:
ret = ad5592r_get_vref(st);
@@ -439,15 +439,13 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
*val = (-34365 * 25) / ret;
else
*val = (-75365 * 25) / ret;
- ret = IIO_VAL_INT;
- break;
+
+ mutex_unlock(&st->lock);
+
+ return IIO_VAL_INT;
default:
return -EINVAL;
}
-
-unlock:
- mutex_unlock(&st->lock);
- return ret;
}
static int ad5592r_write_raw_get_fmt(struct iio_dev *indio_dev,
@@ -486,7 +484,7 @@ static const struct iio_chan_spec_ext_info ad5592r_ext_info[] = {
{
.name = "scale_available",
.read = ad5592r_show_scale_available,
- .shared = true,
+ .shared = IIO_SHARED_BY_TYPE,
},
{},
};
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 49308ad13c4b..41f651500668 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -10,9 +10,8 @@
#include <linux/bitops.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
-#include <linux/acpi.h>
#define AD5592R_GPIO_READBACK_EN BIT(10)
#define AD5592R_LDAC_READBACK_EN BIT(6)
@@ -157,8 +156,8 @@ MODULE_DEVICE_TABLE(acpi, ad5592r_acpi_match);
static struct spi_driver ad5592r_spi_driver = {
.driver = {
.name = "ad5592r",
- .of_match_table = of_match_ptr(ad5592r_of_match),
- .acpi_match_table = ACPI_PTR(ad5592r_acpi_match),
+ .of_match_table = ad5592r_of_match,
+ .acpi_match_table = ad5592r_acpi_match,
},
.probe = ad5592r_spi_probe,
.remove = ad5592r_spi_remove,
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index 1fbe9c019c7f..5b4df36fdc2a 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -11,8 +11,7 @@
#include <linux/bitops.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#define AD5593R_MODE_CONF (0 << 4)
#define AD5593R_MODE_DAC_WRITE (1 << 4)
@@ -124,8 +123,8 @@ MODULE_DEVICE_TABLE(acpi, ad5593r_acpi_match);
static struct i2c_driver ad5593r_driver = {
.driver = {
.name = "ad5593r",
- .of_match_table = of_match_ptr(ad5593r_of_match),
- .acpi_match_table = ACPI_PTR(ad5593r_acpi_match),
+ .of_match_table = ad5593r_of_match,
+ .acpi_match_table = ad5593r_acpi_match,
},
.probe = ad5593r_i2c_probe,
.remove = ad5593r_i2c_remove,
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 56cf9344d187..148d9541f517 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -206,12 +206,12 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
}
#define DECLARE_AD5693_CHANNELS(name, bits, _shift) \
-static struct iio_chan_spec name[] = { \
+static const struct iio_chan_spec name[] = { \
AD5868_CHANNEL(0, 0, bits, _shift), \
}
#define DECLARE_AD5686_CHANNELS(name, bits, _shift) \
-static struct iio_chan_spec name[] = { \
+static const struct iio_chan_spec name[] = { \
AD5868_CHANNEL(0, 1, bits, _shift), \
AD5868_CHANNEL(1, 2, bits, _shift), \
AD5868_CHANNEL(2, 4, bits, _shift), \
@@ -219,7 +219,7 @@ static struct iio_chan_spec name[] = { \
}
#define DECLARE_AD5676_CHANNELS(name, bits, _shift) \
-static struct iio_chan_spec name[] = { \
+static const struct iio_chan_spec name[] = { \
AD5868_CHANNEL(0, 0, bits, _shift), \
AD5868_CHANNEL(1, 1, bits, _shift), \
AD5868_CHANNEL(2, 2, bits, _shift), \
@@ -231,7 +231,7 @@ static struct iio_chan_spec name[] = { \
}
#define DECLARE_AD5679_CHANNELS(name, bits, _shift) \
-static struct iio_chan_spec name[] = { \
+static const struct iio_chan_spec name[] = { \
AD5868_CHANNEL(0, 0, bits, _shift), \
AD5868_CHANNEL(1, 1, bits, _shift), \
AD5868_CHANNEL(2, 2, bits, _shift), \
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index 52009b5eef88..a15f2970577e 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -104,7 +104,7 @@ typedef int (*ad5686_read_func)(struct ad5686_state *st, u8 addr);
struct ad5686_chip_info {
u16 int_vref_mv;
unsigned int num_channels;
- struct iio_chan_spec *channels;
+ const struct iio_chan_spec *channels;
enum ad5686_regmap_type regmap_type;
};
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 4460aa57a33f..2e46def9d8ee 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
@@ -29,6 +30,9 @@
* @spi: the device for this driver instance
* @config: cached config register value
* @dac_cache: current DAC raw value (chip does not support readback)
+ * @vdd_reg: reference to VDD regulator
+ * @vref_reg: reference to VREF regulator
+ * @lock: protect writes and cache updates
* @data: spi transfer buffer
*/
@@ -287,7 +291,7 @@ MODULE_DEVICE_TABLE(spi, ad7303_spi_ids);
static struct spi_driver ad7303_driver = {
.driver = {
.name = "ad7303",
- .of_match_table = of_match_ptr(ad7303_spi_of_match),
+ .of_match_table = ad7303_spi_of_match,
},
.probe = ad7303_probe,
.remove = ad7303_remove,
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
index 1a9609eda5c5..5d1819448102 100644
--- a/drivers/iio/dac/dpot-dac.c
+++ b/drivers/iio/dac/dpot-dac.c
@@ -184,18 +184,14 @@ static int dpot_dac_probe(struct platform_device *pdev)
indio_dev->num_channels = 1;
dac->vref = devm_regulator_get(dev, "vref");
- if (IS_ERR(dac->vref)) {
- if (PTR_ERR(dac->vref) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get vref regulator\n");
- return PTR_ERR(dac->vref);
- }
+ if (IS_ERR(dac->vref))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dac->vref),
+ "failed to get vref regulator\n");
dac->dpot = devm_iio_channel_get(dev, "dpot");
- if (IS_ERR(dac->dpot)) {
- if (PTR_ERR(dac->dpot) != -EPROBE_DEFER)
- dev_err(dev, "failed to get dpot input channel\n");
- return PTR_ERR(dac->dpot);
- }
+ if (IS_ERR(dac->dpot))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dac->dpot),
+ "failed to get dpot input channel\n");
ret = iio_get_channel_type(dac->dpot, &type);
if (ret < 0)
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index ee174d224110..beb9a15b7c74 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -16,8 +16,8 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -357,29 +357,16 @@ static const struct iio_info mcp4725_info = {
.attrs = &mcp4725_attribute_group,
};
-#ifdef CONFIG_OF
static int mcp4725_probe_dt(struct device *dev,
struct mcp4725_platform_data *pdata)
{
- struct device_node *np = dev->of_node;
-
- if (!np)
- return -ENODEV;
-
/* check if is the vref-supply defined */
- pdata->use_vref = of_property_read_bool(np, "vref-supply");
+ pdata->use_vref = device_property_read_bool(dev, "vref-supply");
pdata->vref_buffered =
- of_property_read_bool(np, "microchip,vref-buffered");
+ device_property_read_bool(dev, "microchip,vref-buffered");
return 0;
}
-#else
-static int mcp4725_probe_dt(struct device *dev,
- struct mcp4725_platform_data *platform_data)
-{
- return -ENODEV;
-}
-#endif
static int mcp4725_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -398,8 +385,8 @@ static int mcp4725_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- if (client->dev.of_node)
- data->id = (enum chip_id)of_device_get_match_data(&client->dev);
+ if (dev_fwnode(&client->dev))
+ data->id = (enum chip_id)device_get_match_data(&client->dev);
else
data->id = id->driver_data;
pdata = dev_get_platdata(&client->dev);
@@ -519,7 +506,6 @@ static const struct i2c_device_id mcp4725_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mcp4725_id);
-#ifdef CONFIG_OF
static const struct of_device_id mcp4725_of_match[] = {
{
.compatible = "microchip,mcp4725",
@@ -532,12 +518,11 @@ static const struct of_device_id mcp4725_of_match[] = {
{ }
};
MODULE_DEVICE_TABLE(of, mcp4725_of_match);
-#endif
static struct i2c_driver mcp4725_driver = {
.driver = {
.name = MCP4725_DRV_NAME,
- .of_match_table = of_match_ptr(mcp4725_of_match),
+ .of_match_table = mcp4725_of_match,
.pm = &mcp4725_pm_ops,
},
.probe = mcp4725_probe,
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index 7e5809ba0dee..906436780347 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -150,10 +150,7 @@ static int stm32_dac_probe(struct platform_device *pdev)
rst = devm_reset_control_get_optional_exclusive(dev, NULL);
if (rst) {
if (IS_ERR(rst)) {
- ret = PTR_ERR(rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "reset get failed, %d\n", ret);
-
+ ret = dev_err_probe(dev, PTR_ERR(rst), "reset get failed\n");
goto err_hw_stop;
}
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index 092c796fa3d9..12dec68c16f7 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -26,9 +26,12 @@
/**
* struct stm32_dac - private data of DAC driver
* @common: reference to DAC common data
+ * @lock: lock to protect against potential races when reading
+ * and update CR, to keep it in sync with pm_runtime
*/
struct stm32_dac {
struct stm32_dac_common *common;
+ struct mutex lock;
};
static int stm32_dac_is_enabled(struct iio_dev *indio_dev, int channel)
@@ -58,10 +61,10 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
int ret;
/* already enabled / disabled ? */
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&dac->lock);
ret = stm32_dac_is_enabled(indio_dev, ch);
if (ret < 0 || enable == !!ret) {
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&dac->lock);
return ret < 0 ? ret : 0;
}
@@ -69,13 +72,13 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&dac->lock);
return ret;
}
}
ret = regmap_update_bits(dac->common->regmap, STM32_DAC_CR, msk, en);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&dac->lock);
if (ret < 0) {
dev_err(&indio_dev->dev, "%s failed\n", en ?
"Enable" : "Disable");
@@ -327,6 +330,8 @@ static int stm32_dac_probe(struct platform_device *pdev)
indio_dev->info = &stm32_dac_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ mutex_init(&dac->lock);
+
ret = stm32_dac_chan_of_init(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c
index 86bfb1c3f9b9..de33c1fc6e0b 100644
--- a/drivers/iio/dac/ti-dac082s085.c
+++ b/drivers/iio/dac/ti-dac082s085.c
@@ -14,6 +14,7 @@
#include <linux/iio/iio.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -324,7 +325,6 @@ static int ti_dac_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id ti_dac_of_id[] = {
{ .compatible = "ti,dac082s085" },
{ .compatible = "ti,dac102s085" },
@@ -335,7 +335,6 @@ static const struct of_device_id ti_dac_of_id[] = {
{ }
};
MODULE_DEVICE_TABLE(of, ti_dac_of_id);
-#endif
static const struct spi_device_id ti_dac_spi_id[] = {
{ "dac082s085", dual_8bit },
@@ -351,7 +350,7 @@ MODULE_DEVICE_TABLE(spi, ti_dac_spi_id);
static struct spi_driver ti_dac_driver = {
.driver = {
.name = "ti-dac082s085",
- .of_match_table = of_match_ptr(ti_dac_of_id),
+ .of_match_table = ti_dac_of_id,
},
.probe = ti_dac_probe,
.remove = ti_dac_remove,
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index 00fc7db8eb65..d3295767a079 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -18,8 +18,7 @@
#include <linux/iio/iio.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/regulator/consumer.h>
enum chip_id {
@@ -47,8 +46,8 @@ struct dac5571_data {
struct mutex lock;
struct regulator *vref;
u16 val[4];
- bool powerdown;
- u8 powerdown_mode;
+ bool powerdown[4];
+ u8 powerdown_mode[4];
struct dac5571_spec const *spec;
int (*dac5571_cmd)(struct dac5571_data *data, int channel, u16 val);
int (*dac5571_pwrdwn)(struct dac5571_data *data, int channel, u8 pwrdwn);
@@ -125,7 +124,7 @@ static int dac5571_get_powerdown_mode(struct iio_dev *indio_dev,
{
struct dac5571_data *data = iio_priv(indio_dev);
- return data->powerdown_mode;
+ return data->powerdown_mode[chan->channel];
}
static int dac5571_set_powerdown_mode(struct iio_dev *indio_dev,
@@ -135,17 +134,17 @@ static int dac5571_set_powerdown_mode(struct iio_dev *indio_dev,
struct dac5571_data *data = iio_priv(indio_dev);
int ret = 0;
- if (data->powerdown_mode == mode)
+ if (data->powerdown_mode[chan->channel] == mode)
return 0;
mutex_lock(&data->lock);
- if (data->powerdown) {
+ if (data->powerdown[chan->channel]) {
ret = data->dac5571_pwrdwn(data, chan->channel,
DAC5571_POWERDOWN(mode));
if (ret)
goto out;
}
- data->powerdown_mode = mode;
+ data->powerdown_mode[chan->channel] = mode;
out:
mutex_unlock(&data->lock);
@@ -167,7 +166,7 @@ static ssize_t dac5571_read_powerdown(struct iio_dev *indio_dev,
{
struct dac5571_data *data = iio_priv(indio_dev);
- return sprintf(buf, "%d\n", data->powerdown);
+ return sprintf(buf, "%d\n", data->powerdown[chan->channel]);
}
static ssize_t dac5571_write_powerdown(struct iio_dev *indio_dev,
@@ -183,19 +182,20 @@ static ssize_t dac5571_write_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- if (data->powerdown == powerdown)
+ if (data->powerdown[chan->channel] == powerdown)
return len;
mutex_lock(&data->lock);
if (powerdown)
ret = data->dac5571_pwrdwn(data, chan->channel,
- DAC5571_POWERDOWN(data->powerdown_mode));
+ DAC5571_POWERDOWN(data->powerdown_mode[chan->channel]));
else
- ret = data->dac5571_cmd(data, chan->channel, data->val[0]);
+ ret = data->dac5571_cmd(data, chan->channel,
+ data->val[chan->channel]);
if (ret)
goto out;
- data->powerdown = powerdown;
+ data->powerdown[chan->channel] = powerdown;
out:
mutex_unlock(&data->lock);
@@ -209,9 +209,9 @@ static const struct iio_chan_spec_ext_info dac5571_ext_info[] = {
.name = "powerdown",
.read = dac5571_read_powerdown,
.write = dac5571_write_powerdown,
- .shared = IIO_SHARED_BY_TYPE,
+ .shared = IIO_SEPARATE,
},
- IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &dac5571_powerdown_mode),
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &dac5571_powerdown_mode),
IIO_ENUM_AVAILABLE("powerdown_mode", &dac5571_powerdown_mode),
{},
};
@@ -276,7 +276,7 @@ static int dac5571_write_raw(struct iio_dev *indio_dev,
if (val >= (1 << data->spec->resolution) || val < 0)
return -EINVAL;
- if (data->powerdown)
+ if (data->powerdown[chan->channel])
return -EBUSY;
mutex_lock(&data->lock);
@@ -383,7 +383,6 @@ static int dac5571_remove(struct i2c_client *i2c)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id dac5571_of_id[] = {
{.compatible = "ti,dac5571"},
{.compatible = "ti,dac6571"},
@@ -397,7 +396,6 @@ static const struct of_device_id dac5571_of_id[] = {
{}
};
MODULE_DEVICE_TABLE(of, dac5571_of_id);
-#endif
static const struct i2c_device_id dac5571_id[] = {
{"dac5571", single_8bit},
@@ -416,7 +414,7 @@ MODULE_DEVICE_TABLE(i2c, dac5571_id);
static struct i2c_driver dac5571_driver = {
.driver = {
.name = "ti-dac5571",
- .of_match_table = of_match_ptr(dac5571_of_id),
+ .of_match_table = dac5571_of_id,
},
.probe = dac5571_probe,
.remove = dac5571_remove,
diff --git a/drivers/iio/dac/ti-dac7612.c b/drivers/iio/dac/ti-dac7612.c
index 07c9f39d54f1..4c0f4b5e9ff4 100644
--- a/drivers/iio/dac/ti-dac7612.c
+++ b/drivers/iio/dac/ti-dac7612.c
@@ -23,6 +23,14 @@ struct dac7612 {
uint16_t cache[2];
/*
+ * Lock to protect the state of the device from potential concurrent
+ * write accesses from userspace. The write operation requires an
+ * SPI write, then toggling of a GPIO, so the lock aims to protect
+ * the sanity of the entire sequence of operation.
+ */
+ struct mutex lock;
+
+ /*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
@@ -101,9 +109,9 @@ static int dac7612_write_raw(struct iio_dev *iio_dev,
if (val == priv->cache[chan->channel])
return 0;
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&priv->lock);
ret = dac7612_cmd_single(priv, chan->channel, val);
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&priv->lock);
return ret;
}
@@ -145,6 +153,8 @@ static int dac7612_probe(struct spi_device *spi)
iio_dev->num_channels = ARRAY_SIZE(priv->cache);
iio_dev->name = spi_get_device_id(spi)->name;
+ mutex_init(&priv->lock);
+
for (i = 0; i < ARRAY_SIZE(priv->cache); i++) {
ret = dac7612_cmd_single(priv, i, 0);
if (ret)
diff --git a/drivers/iio/dummy/iio_dummy_evgen.c b/drivers/iio/dummy/iio_dummy_evgen.c
index ee85d596e528..5a0072727ba4 100644
--- a/drivers/iio/dummy/iio_dummy_evgen.c
+++ b/drivers/iio/dummy/iio_dummy_evgen.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (c) 2011 Jonathan Cameron
*
* Companion module to the iio simple dummy example driver.
@@ -27,11 +27,13 @@
#define IIO_EVENTGEN_NO 10
/**
+ * struct iio_dummy_eventgen - event generator specific state
* @regs: irq regs we are faking
* @lock: protect the evgen state
* @inuse: mask of which irqs are connected
* @irq_sim: interrupt simulator
* @base: base of irq range
+ * @irq_sim_domain: irq simulator domain
*/
struct iio_dummy_eventgen {
struct iio_dummy_regs regs[IIO_EVENTGEN_NO];
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 334e1d779d6d..bdb0bc3b12dd 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -969,6 +969,13 @@ static int ad9523_setup(struct iio_dev *indio_dev)
return 0;
}
+static void ad9523_reg_disable(void *data)
+{
+ struct regulator *reg = data;
+
+ regulator_disable(reg);
+}
+
static int ad9523_probe(struct spi_device *spi)
{
struct ad9523_platform_data *pdata = spi->dev.platform_data;
@@ -994,21 +1001,22 @@ static int ad9523_probe(struct spi_device *spi)
ret = regulator_enable(st->reg);
if (ret)
return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, ad9523_reg_disable,
+ st->reg);
+ if (ret)
+ return ret;
}
st->pwrdown_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
GPIOD_OUT_HIGH);
- if (IS_ERR(st->pwrdown_gpio)) {
- ret = PTR_ERR(st->pwrdown_gpio);
- goto error_disable_reg;
- }
+ if (IS_ERR(st->pwrdown_gpio))
+ return PTR_ERR(st->pwrdown_gpio);
st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
GPIOD_OUT_LOW);
- if (IS_ERR(st->reset_gpio)) {
- ret = PTR_ERR(st->reset_gpio);
- goto error_disable_reg;
- }
+ if (IS_ERR(st->reset_gpio))
+ return PTR_ERR(st->reset_gpio);
if (st->reset_gpio) {
udelay(1);
@@ -1017,10 +1025,8 @@ static int ad9523_probe(struct spi_device *spi)
st->sync_gpio = devm_gpiod_get_optional(&spi->dev, "sync",
GPIOD_OUT_HIGH);
- if (IS_ERR(st->sync_gpio)) {
- ret = PTR_ERR(st->sync_gpio);
- goto error_disable_reg;
- }
+ if (IS_ERR(st->sync_gpio))
+ return PTR_ERR(st->sync_gpio);
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
@@ -1035,34 +1041,9 @@ static int ad9523_probe(struct spi_device *spi)
ret = ad9523_setup(indio_dev);
if (ret < 0)
- goto error_disable_reg;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_disable_reg;
-
- dev_info(&spi->dev, "probed %s\n", indio_dev->name);
-
- return 0;
-
-error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static int ad9523_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad9523_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct spi_device_id ad9523_id[] = {
@@ -1076,7 +1057,6 @@ static struct spi_driver ad9523_driver = {
.name = "ad9523",
},
.probe = ad9523_probe,
- .remove = ad9523_remove,
.id_table = ad9523_id,
};
module_spi_driver(ad9523_driver);
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 409c9c47161e..82c050a3899d 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -48,6 +48,13 @@ struct adf4350_state {
unsigned long regs_hw[6];
unsigned long long freq_req;
/*
+ * Lock to protect the state of the device from potential concurrent
+ * writes. The device is configured via a sequence of SPI writes,
+ * and this lock is meant to prevent the start of another sequence
+ * before another one has finished.
+ */
+ struct mutex lock;
+ /*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
@@ -99,7 +106,7 @@ static int adf4350_reg_access(struct iio_dev *indio_dev,
if (reg > ADF4350_REG5)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (readval == NULL) {
st->regs[reg] = writeval & ~(BIT(0) | BIT(1) | BIT(2));
ret = adf4350_sync_config(st);
@@ -107,7 +114,7 @@ static int adf4350_reg_access(struct iio_dev *indio_dev,
*readval = st->regs_hw[reg];
ret = 0;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -254,7 +261,7 @@ static ssize_t adf4350_write(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)private) {
case ADF4350_FREQ:
ret = adf4350_set_freq(st, readin);
@@ -295,7 +302,7 @@ static ssize_t adf4350_write(struct iio_dev *indio_dev,
default:
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -309,7 +316,7 @@ static ssize_t adf4350_read(struct iio_dev *indio_dev,
unsigned long long val;
int ret = 0;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)private) {
case ADF4350_FREQ:
val = (u64)((st->r0_int * st->r1_mod) + st->r0_fract) *
@@ -338,7 +345,7 @@ static ssize_t adf4350_read(struct iio_dev *indio_dev,
ret = -EINVAL;
val = 0;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret < 0 ? ret : sprintf(buf, "%llu\n", val);
}
@@ -539,6 +546,8 @@ static int adf4350_probe(struct spi_device *spi)
indio_dev->channels = &adf4350_chan;
indio_dev->num_channels = 1;
+ mutex_init(&st->lock);
+
st->chspc = pdata->channel_spacing;
if (clk) {
st->clk = clk;
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 6daeddf37f60..5824f2edf975 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -41,6 +41,18 @@ config ADIS16260
This driver can also be built as a module. If so, the module
will be called adis16260.
+config ADXRS290
+ tristate "Analog Devices ADXRS290 Dual-Axis MEMS Gyroscope SPI driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices ADXRS290 programmable
+ digital output gyroscope.
+
+ This driver can also be built as a module. If so, the module will be
+ called adxrs290.
+
config ADXRS450
tristate "Analog Devices ADXRS450/3 Digital Output Gyroscope SPI driver"
depends on SPI
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index 45cbd5dc644e..0319b397dc3f 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_ADIS16080) += adis16080.o
obj-$(CONFIG_ADIS16130) += adis16130.o
obj-$(CONFIG_ADIS16136) += adis16136.o
obj-$(CONFIG_ADIS16260) += adis16260.o
+obj-$(CONFIG_ADXRS290) += adxrs290.o
obj-$(CONFIG_ADXRS450) += adxrs450.o
obj-$(CONFIG_BMG160) += bmg160_core.o
obj-$(CONFIG_BMG160_I2C) += bmg160_i2c.o
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index 6e5e2d98943c..e2f4d943e220 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -38,7 +38,7 @@ struct adis16080_chip_info {
* @us: actual spi_device to write data
* @info: chip specific parameters
* @buf: transmit or receive buffer
- * @lock lock to protect buffer during reads
+ * @lock: lock to protect buffer during reads
**/
struct adis16080_state {
struct spi_device *us;
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index d8a96f6bbae2..a11ae9db0d11 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -523,6 +523,11 @@ static const struct adis16136_chip_info adis16136_chip_info[] = {
},
};
+static void adis16136_stop(void *data)
+{
+ adis16136_stop_device(data);
+}
+
static int adis16136_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -552,38 +557,23 @@ static int adis16136_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = adis_setup_buffer_and_trigger(&adis16136->adis, indio_dev, NULL);
+ ret = devm_adis_setup_buffer_and_trigger(&adis16136->adis, indio_dev, NULL);
if (ret)
return ret;
ret = adis16136_initial_setup(indio_dev);
if (ret)
- goto error_cleanup_buffer;
+ return ret;
- ret = iio_device_register(indio_dev);
+ ret = devm_add_action_or_reset(&spi->dev, adis16136_stop, indio_dev);
if (ret)
- goto error_stop_device;
-
- adis16136_debugfs_init(indio_dev);
-
- return 0;
-
-error_stop_device:
- adis16136_stop_device(indio_dev);
-error_cleanup_buffer:
- adis_cleanup_buffer_and_trigger(&adis16136->adis, indio_dev);
- return ret;
-}
-
-static int adis16136_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis16136 *adis16136 = iio_priv(indio_dev);
+ return ret;
- iio_device_unregister(indio_dev);
- adis16136_stop_device(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
- adis_cleanup_buffer_and_trigger(&adis16136->adis, indio_dev);
+ adis16136_debugfs_init(indio_dev);
return 0;
}
@@ -603,7 +593,6 @@ static struct spi_driver adis16136_driver = {
},
.id_table = adis16136_ids,
.probe = adis16136_probe,
- .remove = adis16136_remove,
};
module_spi_driver(adis16136_driver);
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index e638d56e1574..e7c9a3e31c45 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -359,6 +359,11 @@ static const struct adis_data adis16260_data = {
BIT(ADIS16260_DIAG_STAT_POWER_LOW_BIT),
};
+static void adis16260_stop(void *data)
+{
+ adis16260_stop_device(data);
+}
+
static int adis16260_probe(struct spi_device *spi)
{
const struct spi_device_id *id;
@@ -390,35 +395,20 @@ static int adis16260_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = adis_setup_buffer_and_trigger(&adis16260->adis, indio_dev, NULL);
+ ret = devm_adis_setup_buffer_and_trigger(&adis16260->adis, indio_dev, NULL);
if (ret)
return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(&adis16260->adis);
if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(&adis16260->adis, indio_dev);
- return ret;
-}
-
-static int adis16260_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis16260 *adis16260 = iio_priv(indio_dev);
+ return ret;
- iio_device_unregister(indio_dev);
- adis16260_stop_device(indio_dev);
- adis_cleanup_buffer_and_trigger(&adis16260->adis, indio_dev);
+ ret = devm_add_action_or_reset(&spi->dev, adis16260_stop, indio_dev);
+ if (ret)
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
/*
@@ -441,7 +431,6 @@ static struct spi_driver adis16260_driver = {
.name = "adis16260",
},
.probe = adis16260_probe,
- .remove = adis16260_remove,
.id_table = adis16260_id,
};
module_spi_driver(adis16260_driver);
diff --git a/drivers/iio/gyro/adxrs290.c b/drivers/iio/gyro/adxrs290.c
new file mode 100644
index 000000000000..ca6fc234076e
--- /dev/null
+++ b/drivers/iio/gyro/adxrs290.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ADXRS290 SPI Gyroscope Driver
+ *
+ * Copyright (C) 2020 Nishant Malpani <nish.malpani25@gmail.com>
+ * Copyright (C) 2020 Analog Devices, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define ADXRS290_ADI_ID 0xAD
+#define ADXRS290_MEMS_ID 0x1D
+#define ADXRS290_DEV_ID 0x92
+
+#define ADXRS290_REG_ADI_ID 0x00
+#define ADXRS290_REG_MEMS_ID 0x01
+#define ADXRS290_REG_DEV_ID 0x02
+#define ADXRS290_REG_REV_ID 0x03
+#define ADXRS290_REG_SN0 0x04 /* Serial Number Registers, 4 bytes */
+#define ADXRS290_REG_DATAX0 0x08 /* Roll Rate o/p Data Regs, 2 bytes */
+#define ADXRS290_REG_DATAY0 0x0A /* Pitch Rate o/p Data Regs, 2 bytes */
+#define ADXRS290_REG_TEMP0 0x0C
+#define ADXRS290_REG_POWER_CTL 0x10
+#define ADXRS290_REG_FILTER 0x11
+#define ADXRS290_REG_DATA_RDY 0x12
+
+#define ADXRS290_READ BIT(7)
+#define ADXRS290_TSM BIT(0)
+#define ADXRS290_MEASUREMENT BIT(1)
+#define ADXRS290_DATA_RDY_OUT BIT(0)
+#define ADXRS290_SYNC_MASK GENMASK(1, 0)
+#define ADXRS290_SYNC(x) FIELD_PREP(ADXRS290_SYNC_MASK, x)
+#define ADXRS290_LPF_MASK GENMASK(2, 0)
+#define ADXRS290_LPF(x) FIELD_PREP(ADXRS290_LPF_MASK, x)
+#define ADXRS290_HPF_MASK GENMASK(7, 4)
+#define ADXRS290_HPF(x) FIELD_PREP(ADXRS290_HPF_MASK, x)
+
+#define ADXRS290_READ_REG(reg) (ADXRS290_READ | (reg))
+
+#define ADXRS290_MAX_TRANSITION_TIME_MS 100
+
+enum adxrs290_mode {
+ ADXRS290_MODE_STANDBY,
+ ADXRS290_MODE_MEASUREMENT,
+};
+
+enum adxrs290_scan_index {
+ ADXRS290_IDX_X,
+ ADXRS290_IDX_Y,
+ ADXRS290_IDX_TEMP,
+ ADXRS290_IDX_TS,
+};
+
+struct adxrs290_state {
+ struct spi_device *spi;
+ /* Serialize reads and their subsequent processing */
+ struct mutex lock;
+ enum adxrs290_mode mode;
+ unsigned int lpf_3db_freq_idx;
+ unsigned int hpf_3db_freq_idx;
+ struct iio_trigger *dready_trig;
+ /* Ensure correct alignment of timestamp when present */
+ struct {
+ s16 channels[3];
+ s64 ts __aligned(8);
+ } buffer;
+};
+
+/*
+ * Available cut-off frequencies of the low pass filter in Hz.
+ * The integer part and fractional part are represented separately.
+ */
+static const int adxrs290_lpf_3db_freq_hz_table[][2] = {
+ [0] = {480, 0},
+ [1] = {320, 0},
+ [2] = {160, 0},
+ [3] = {80, 0},
+ [4] = {56, 600000},
+ [5] = {40, 0},
+ [6] = {28, 300000},
+ [7] = {20, 0},
+};
+
+/*
+ * Available cut-off frequencies of the high pass filter in Hz.
+ * The integer part and fractional part are represented separately.
+ */
+static const int adxrs290_hpf_3db_freq_hz_table[][2] = {
+ [0] = {0, 0},
+ [1] = {0, 11000},
+ [2] = {0, 22000},
+ [3] = {0, 44000},
+ [4] = {0, 87000},
+ [5] = {0, 175000},
+ [6] = {0, 350000},
+ [7] = {0, 700000},
+ [8] = {1, 400000},
+ [9] = {2, 800000},
+ [10] = {11, 300000},
+};
+
+static int adxrs290_get_rate_data(struct iio_dev *indio_dev, const u8 cmd, int *val)
+{
+ struct adxrs290_state *st = iio_priv(indio_dev);
+ int ret = 0;
+ int temp;
+
+ mutex_lock(&st->lock);
+ temp = spi_w8r16(st->spi, cmd);
+ if (temp < 0) {
+ ret = temp;
+ goto err_unlock;
+ }
+
+ *val = temp;
+
+err_unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int adxrs290_get_temp_data(struct iio_dev *indio_dev, int *val)
+{
+ const u8 cmd = ADXRS290_READ_REG(ADXRS290_REG_TEMP0);
+ struct adxrs290_state *st = iio_priv(indio_dev);
+ int ret = 0;
+ int temp;
+
+ mutex_lock(&st->lock);
+ temp = spi_w8r16(st->spi, cmd);
+ if (temp < 0) {
+ ret = temp;
+ goto err_unlock;
+ }
+
+ /* extract lower 12 bits temperature reading */
+ *val = temp & 0x0FFF;
+
+err_unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int adxrs290_get_3db_freq(struct iio_dev *indio_dev, u8 *val, u8 *val2)
+{
+ const u8 cmd = ADXRS290_READ_REG(ADXRS290_REG_FILTER);
+ struct adxrs290_state *st = iio_priv(indio_dev);
+ int ret = 0;
+ short temp;
+
+ mutex_lock(&st->lock);
+ temp = spi_w8r8(st->spi, cmd);
+ if (temp < 0) {
+ ret = temp;
+ goto err_unlock;
+ }
+
+ *val = FIELD_GET(ADXRS290_LPF_MASK, temp);
+ *val2 = FIELD_GET(ADXRS290_HPF_MASK, temp);
+
+err_unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int adxrs290_spi_write_reg(struct spi_device *spi, const u8 reg,
+ const u8 val)
+{
+ u8 buf[2];
+
+ buf[0] = reg;
+ buf[1] = val;
+
+ return spi_write_then_read(spi, buf, ARRAY_SIZE(buf), NULL, 0);
+}
+
+static int adxrs290_find_match(const int (*freq_tbl)[2], const int n,
+ const int val, const int val2)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (freq_tbl[i][0] == val && freq_tbl[i][1] == val2)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int adxrs290_set_filter_freq(struct iio_dev *indio_dev,
+ const unsigned int lpf_idx,
+ const unsigned int hpf_idx)
+{
+ struct adxrs290_state *st = iio_priv(indio_dev);
+ u8 val;
+
+ val = ADXRS290_HPF(hpf_idx) | ADXRS290_LPF(lpf_idx);
+
+ return adxrs290_spi_write_reg(st->spi, ADXRS290_REG_FILTER, val);
+}
+
+static int adxrs290_set_mode(struct iio_dev *indio_dev, enum adxrs290_mode mode)
+{
+ struct adxrs290_state *st = iio_priv(indio_dev);
+ int val, ret;
+
+ if (st->mode == mode)
+ return 0;
+
+ mutex_lock(&st->lock);
+
+ ret = spi_w8r8(st->spi, ADXRS290_READ_REG(ADXRS290_REG_POWER_CTL));
+ if (ret < 0)
+ goto out_unlock;
+
+ val = ret;
+
+ switch (mode) {
+ case ADXRS290_MODE_STANDBY:
+ val &= ~ADXRS290_MEASUREMENT;
+ break;
+ case ADXRS290_MODE_MEASUREMENT:
+ val |= ADXRS290_MEASUREMENT;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = adxrs290_spi_write_reg(st->spi, ADXRS290_REG_POWER_CTL, val);
+ if (ret < 0) {
+ dev_err(&st->spi->dev, "unable to set mode: %d\n", ret);
+ goto out_unlock;
+ }
+
+ /* update cached mode */
+ st->mode = mode;
+
+out_unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static void adxrs290_chip_off_action(void *data)
+{
+ struct iio_dev *indio_dev = data;
+
+ adxrs290_set_mode(indio_dev, ADXRS290_MODE_STANDBY);
+}
+
+static int adxrs290_initial_setup(struct iio_dev *indio_dev)
+{
+ struct adxrs290_state *st = iio_priv(indio_dev);
+ struct spi_device *spi = st->spi;
+ int ret;
+
+ ret = adxrs290_spi_write_reg(spi, ADXRS290_REG_POWER_CTL,
+ ADXRS290_MEASUREMENT | ADXRS290_TSM);
+ if (ret < 0)
+ return ret;
+
+ st->mode = ADXRS290_MODE_MEASUREMENT;
+
+ return devm_add_action_or_reset(&spi->dev, adxrs290_chip_off_action,
+ indio_dev);
+}
+
+static int adxrs290_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct adxrs290_state *st = iio_priv(indio_dev);
+ unsigned int t;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ ret = adxrs290_get_rate_data(indio_dev,
+ ADXRS290_READ_REG(chan->address),
+ val);
+ if (ret < 0)
+ break;
+
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_TEMP:
+ ret = adxrs290_get_temp_data(indio_dev, val);
+ if (ret < 0)
+ break;
+
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ /* 1 LSB = 0.005 degrees/sec */
+ *val = 0;
+ *val2 = 87266;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_TEMP:
+ /* 1 LSB = 0.1 degrees Celsius */
+ *val = 100;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ t = st->lpf_3db_freq_idx;
+ *val = adxrs290_lpf_3db_freq_hz_table[t][0];
+ *val2 = adxrs290_lpf_3db_freq_hz_table[t][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ t = st->hpf_3db_freq_idx;
+ *val = adxrs290_hpf_3db_freq_hz_table[t][0];
+ *val2 = adxrs290_hpf_3db_freq_hz_table[t][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int adxrs290_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct adxrs290_state *st = iio_priv(indio_dev);
+ int ret, lpf_idx, hpf_idx;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ lpf_idx = adxrs290_find_match(adxrs290_lpf_3db_freq_hz_table,
+ ARRAY_SIZE(adxrs290_lpf_3db_freq_hz_table),
+ val, val2);
+ if (lpf_idx < 0) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* caching the updated state of the low-pass filter */
+ st->lpf_3db_freq_idx = lpf_idx;
+ /* retrieving the current state of the high-pass filter */
+ hpf_idx = st->hpf_3db_freq_idx;
+ ret = adxrs290_set_filter_freq(indio_dev, lpf_idx, hpf_idx);
+ break;
+
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ hpf_idx = adxrs290_find_match(adxrs290_hpf_3db_freq_hz_table,
+ ARRAY_SIZE(adxrs290_hpf_3db_freq_hz_table),
+ val, val2);
+ if (hpf_idx < 0) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* caching the updated state of the high-pass filter */
+ st->hpf_3db_freq_idx = hpf_idx;
+ /* retrieving the current state of the low-pass filter */
+ lpf_idx = st->lpf_3db_freq_idx;
+ ret = adxrs290_set_filter_freq(indio_dev, lpf_idx, hpf_idx);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+}
+
+static int adxrs290_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (const int *)adxrs290_lpf_3db_freq_hz_table;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(adxrs290_lpf_3db_freq_hz_table) * 2;
+
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (const int *)adxrs290_hpf_3db_freq_hz_table;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(adxrs290_hpf_3db_freq_hz_table) * 2;
+
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxrs290_reg_access_rw(struct spi_device *spi, unsigned int reg,
+ unsigned int *readval)
+{
+ int ret;
+
+ ret = spi_w8r8(spi, ADXRS290_READ_REG(reg));
+ if (ret < 0)
+ return ret;
+
+ *readval = ret;
+
+ return 0;
+}
+
+static int adxrs290_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct adxrs290_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return adxrs290_reg_access_rw(st->spi, reg, readval);
+ else
+ return adxrs290_spi_write_reg(st->spi, reg, writeval);
+}
+
+static int adxrs290_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct adxrs290_state *st = iio_priv(indio_dev);
+ int ret;
+ u8 val;
+
+ val = state ? ADXRS290_SYNC(ADXRS290_DATA_RDY_OUT) : 0;
+
+ ret = adxrs290_spi_write_reg(st->spi, ADXRS290_REG_DATA_RDY, val);
+ if (ret < 0)
+ dev_err(&st->spi->dev, "failed to start data rdy interrupt\n");
+
+ return ret;
+}
+
+static int adxrs290_reset_trig(struct iio_trigger *trig)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ int val;
+
+ /*
+ * Data ready interrupt is reset after a read of the data registers.
+ * Here, we only read the 16b DATAY registers as that marks the end of
+ * a read of the data registers and initiates a reset for the interrupt
+ * line.
+ */
+ adxrs290_get_rate_data(indio_dev,
+ ADXRS290_READ_REG(ADXRS290_REG_DATAY0), &val);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops adxrs290_trigger_ops = {
+ .set_trigger_state = &adxrs290_data_rdy_trigger_set_state,
+ .validate_device = &iio_trigger_validate_own_device,
+ .try_reenable = &adxrs290_reset_trig,
+};
+
+static irqreturn_t adxrs290_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adxrs290_state *st = iio_priv(indio_dev);
+ u8 tx = ADXRS290_READ_REG(ADXRS290_REG_DATAX0);
+ int ret;
+
+ mutex_lock(&st->lock);
+
+ /* exercise a bulk data capture starting from reg DATAX0... */
+ ret = spi_write_then_read(st->spi, &tx, sizeof(tx), st->buffer.channels,
+ sizeof(st->buffer.channels));
+ if (ret < 0)
+ goto out_unlock_notify;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
+ pf->timestamp);
+
+out_unlock_notify:
+ mutex_unlock(&st->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+#define ADXRS290_ANGL_VEL_CHANNEL(reg, axis) { \
+ .type = IIO_ANGL_VEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+ .scan_index = ADXRS290_IDX_##axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+}
+
+static const struct iio_chan_spec adxrs290_channels[] = {
+ ADXRS290_ANGL_VEL_CHANNEL(ADXRS290_REG_DATAX0, X),
+ ADXRS290_ANGL_VEL_CHANNEL(ADXRS290_REG_DATAY0, Y),
+ {
+ .type = IIO_TEMP,
+ .address = ADXRS290_REG_TEMP0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = ADXRS290_IDX_TEMP,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 12,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(ADXRS290_IDX_TS),
+};
+
+static const unsigned long adxrs290_avail_scan_masks[] = {
+ BIT(ADXRS290_IDX_X) | BIT(ADXRS290_IDX_Y) | BIT(ADXRS290_IDX_TEMP),
+ 0
+};
+
+static const struct iio_info adxrs290_info = {
+ .read_raw = &adxrs290_read_raw,
+ .write_raw = &adxrs290_write_raw,
+ .read_avail = &adxrs290_read_avail,
+ .debugfs_reg_access = &adxrs290_reg_access,
+};
+
+static int adxrs290_probe_trigger(struct iio_dev *indio_dev)
+{
+ struct adxrs290_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (!st->spi->irq) {
+ dev_info(&st->spi->dev, "no irq, using polling\n");
+ return 0;
+ }
+
+ st->dready_trig = devm_iio_trigger_alloc(&st->spi->dev, "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!st->dready_trig)
+ return -ENOMEM;
+
+ st->dready_trig->dev.parent = &st->spi->dev;
+ st->dready_trig->ops = &adxrs290_trigger_ops;
+ iio_trigger_set_drvdata(st->dready_trig, indio_dev);
+
+ ret = devm_request_irq(&st->spi->dev, st->spi->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ IRQF_ONESHOT, "adxrs290_irq", st->dready_trig);
+ if (ret < 0)
+ return dev_err_probe(&st->spi->dev, ret,
+ "request irq %d failed\n", st->spi->irq);
+
+ ret = devm_iio_trigger_register(&st->spi->dev, st->dready_trig);
+ if (ret) {
+ dev_err(&st->spi->dev, "iio trigger register failed\n");
+ return ret;
+ }
+
+ indio_dev->trig = iio_trigger_get(st->dready_trig);
+
+ return 0;
+}
+
+static int adxrs290_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adxrs290_state *st;
+ u8 val, val2;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ indio_dev->name = "adxrs290";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = adxrs290_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adxrs290_channels);
+ indio_dev->info = &adxrs290_info;
+ indio_dev->available_scan_masks = adxrs290_avail_scan_masks;
+
+ mutex_init(&st->lock);
+
+ val = spi_w8r8(spi, ADXRS290_READ_REG(ADXRS290_REG_ADI_ID));
+ if (val != ADXRS290_ADI_ID) {
+ dev_err(&spi->dev, "Wrong ADI ID 0x%02x\n", val);
+ return -ENODEV;
+ }
+
+ val = spi_w8r8(spi, ADXRS290_READ_REG(ADXRS290_REG_MEMS_ID));
+ if (val != ADXRS290_MEMS_ID) {
+ dev_err(&spi->dev, "Wrong MEMS ID 0x%02x\n", val);
+ return -ENODEV;
+ }
+
+ val = spi_w8r8(spi, ADXRS290_READ_REG(ADXRS290_REG_DEV_ID));
+ if (val != ADXRS290_DEV_ID) {
+ dev_err(&spi->dev, "Wrong DEV ID 0x%02x\n", val);
+ return -ENODEV;
+ }
+
+ /* default mode the gyroscope starts in */
+ st->mode = ADXRS290_MODE_STANDBY;
+
+ /* switch to measurement mode and switch on the temperature sensor */
+ ret = adxrs290_initial_setup(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ /* max transition time to measurement mode */
+ msleep(ADXRS290_MAX_TRANSITION_TIME_MS);
+
+ ret = adxrs290_get_3db_freq(indio_dev, &val, &val2);
+ if (ret < 0)
+ return ret;
+
+ st->lpf_3db_freq_idx = val;
+ st->hpf_3db_freq_idx = val2;
+
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &adxrs290_trigger_handler, NULL);
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "iio triggered buffer setup failed\n");
+
+ ret = adxrs290_probe_trigger(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct of_device_id adxrs290_of_match[] = {
+ { .compatible = "adi,adxrs290" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adxrs290_of_match);
+
+static struct spi_driver adxrs290_driver = {
+ .driver = {
+ .name = "adxrs290",
+ .of_match_table = adxrs290_of_match,
+ },
+ .probe = adxrs290_probe,
+};
+module_spi_driver(adxrs290_driver);
+
+MODULE_AUTHOR("Nishant Malpani <nish.malpani25@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADXRS290 Gyroscope SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
index d3fbe9d86467..1c3c1bd53374 100644
--- a/drivers/iio/gyro/itg3200_buffer.c
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -46,13 +46,20 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct itg3200 *st = iio_priv(indio_dev);
- __be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)];
-
- int ret = itg3200_read_all_channels(st->i2c, buf);
+ /*
+ * Ensure correct alignment and padding including for the
+ * timestamp that may be inserted.
+ */
+ struct {
+ __be16 buf[ITG3200_SCAN_ELEMENTS];
+ s64 ts __aligned(8);
+ } scan;
+
+ int ret = itg3200_read_all_channels(st->i2c, scan.buf);
if (ret < 0)
goto error_ret;
- iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index d9b2ed80882a..b35557a54ee2 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -2,7 +2,7 @@
/*
* max30102.c - Support for MAX30102 heart rate and pulse oximeter sensor
*
- * Copyright (C) 2017 Matt Ranostay <matt@ranostay.consulting>
+ * Copyright (C) 2017 Matt Ranostay <matt.ranostay@konsulko.com>
*
* Support for MAX30105 optical particle sensor
* Copyright (C) 2017 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
@@ -19,7 +19,7 @@
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -323,11 +323,10 @@ static int max30102_get_current_idx(unsigned int val, int *reg)
static int max30102_led_init(struct max30102_data *data)
{
struct device *dev = &data->client->dev;
- struct device_node *np = dev->of_node;
unsigned int val;
int reg, ret;
- ret = of_property_read_u32(np, "maxim,red-led-current-microamp", &val);
+ ret = device_property_read_u32(dev, "maxim,red-led-current-microamp", &val);
if (ret) {
dev_info(dev, "no red-led-current-microamp set\n");
@@ -346,7 +345,7 @@ static int max30102_led_init(struct max30102_data *data)
return ret;
if (data->chip_id == max30105) {
- ret = of_property_read_u32(np,
+ ret = device_property_read_u32(dev,
"maxim,green-led-current-microamp", &val);
if (ret) {
dev_info(dev, "no green-led-current-microamp set\n");
@@ -368,7 +367,7 @@ static int max30102_led_init(struct max30102_data *data)
return ret;
}
- ret = of_property_read_u32(np, "maxim,ir-led-current-microamp", &val);
+ ret = device_property_read_u32(dev, "maxim,ir-led-current-microamp", &val);
if (ret) {
dev_info(dev, "no ir-led-current-microamp set\n");
@@ -624,7 +623,7 @@ MODULE_DEVICE_TABLE(of, max30102_dt_ids);
static struct i2c_driver max30102_driver = {
.driver = {
.name = MAX30102_DRV_NAME,
- .of_match_table = of_match_ptr(max30102_dt_ids),
+ .of_match_table = max30102_dt_ids,
},
.probe = max30102_probe,
.remove = max30102_remove,
@@ -632,6 +631,6 @@ static struct i2c_driver max30102_driver = {
};
module_i2c_driver(max30102_driver);
-MODULE_AUTHOR("Matt Ranostay <matt@ranostay.consulting>");
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
MODULE_DESCRIPTION("MAX30102 heart rate/pulse oximeter and MAX30105 particle sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 6c5507a6cd74..6549fcf6db69 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -38,6 +38,16 @@ config HDC100X
To compile this driver as a module, choose M here: the module
will be called hdc100x.
+config HDC2010
+ tristate "TI HDC2010 relative humidity and temperature sensor"
+ depends on I2C
+ help
+ Say yes here to build support for the Texas Instruments
+ HDC2010 and HDC2080 relative humidity and temperature sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hdc2010.
+
config HID_SENSOR_HUMIDITY
tristate "HID Environmental humidity sensor"
depends on HID_SENSOR_HUB
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index ae4204995017..f19ff3de97c5 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_HDC100X) += hdc100x.o
+obj-$(CONFIG_HDC2010) += hdc2010.o
obj-$(CONFIG_HID_SENSOR_HUMIDITY) += hid-sensor-humidity.o
hts221-y := hts221_core.o \
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 071cb2b12bb6..2a957f19048e 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/i2c.h>
@@ -417,7 +418,7 @@ MODULE_DEVICE_TABLE(of, hdc100x_dt_ids);
static struct i2c_driver hdc100x_driver = {
.driver = {
.name = "hdc100x",
- .of_match_table = of_match_ptr(hdc100x_dt_ids),
+ .of_match_table = hdc100x_dt_ids,
},
.probe = hdc100x_probe,
.id_table = hdc100x_id,
diff --git a/drivers/iio/humidity/hdc2010.c b/drivers/iio/humidity/hdc2010.c
new file mode 100644
index 000000000000..83f5b9f60780
--- /dev/null
+++ b/drivers/iio/humidity/hdc2010.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * hdc2010.c - Support for the TI HDC2010 and HDC2080
+ * temperature + relative humidity sensors
+ *
+ * Copyright (C) 2020 Norphonic AS
+ * Author: Eugene Zaikonnikov <ez@norphonic.com>
+ *
+ * Datasheet: https://www.ti.com/product/HDC2010/datasheet
+ * Datasheet: https://www.ti.com/product/HDC2080/datasheet
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/bitops.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define HDC2010_REG_TEMP_LOW 0x00
+#define HDC2010_REG_TEMP_HIGH 0x01
+#define HDC2010_REG_HUMIDITY_LOW 0x02
+#define HDC2010_REG_HUMIDITY_HIGH 0x03
+#define HDC2010_REG_INTERRUPT_DRDY 0x04
+#define HDC2010_REG_TEMP_MAX 0x05
+#define HDC2010_REG_HUMIDITY_MAX 0x06
+#define HDC2010_REG_INTERRUPT_EN 0x07
+#define HDC2010_REG_TEMP_OFFSET_ADJ 0x08
+#define HDC2010_REG_HUMIDITY_OFFSET_ADJ 0x09
+#define HDC2010_REG_TEMP_THR_L 0x0a
+#define HDC2010_REG_TEMP_THR_H 0x0b
+#define HDC2010_REG_RH_THR_L 0x0c
+#define HDC2010_REG_RH_THR_H 0x0d
+#define HDC2010_REG_RESET_DRDY_INT_CONF 0x0e
+#define HDC2010_REG_MEASUREMENT_CONF 0x0f
+
+#define HDC2010_MEAS_CONF GENMASK(2, 1)
+#define HDC2010_MEAS_TRIG BIT(0)
+#define HDC2010_HEATER_EN BIT(3)
+#define HDC2010_AMM GENMASK(6, 4)
+
+struct hdc2010_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ u8 measurement_config;
+ u8 interrupt_config;
+ u8 drdy_config;
+};
+
+enum hdc2010_addr_groups {
+ HDC2010_GROUP_TEMP = 0,
+ HDC2010_GROUP_HUMIDITY,
+};
+
+struct hdc2010_reg_record {
+ unsigned long primary;
+ unsigned long peak;
+};
+
+static const struct hdc2010_reg_record hdc2010_reg_translation[] = {
+ [HDC2010_GROUP_TEMP] = {
+ .primary = HDC2010_REG_TEMP_LOW,
+ .peak = HDC2010_REG_TEMP_MAX,
+ },
+ [HDC2010_GROUP_HUMIDITY] = {
+ .primary = HDC2010_REG_HUMIDITY_LOW,
+ .peak = HDC2010_REG_HUMIDITY_MAX,
+ },
+};
+
+static IIO_CONST_ATTR(out_current_heater_raw_available, "0 1");
+
+static struct attribute *hdc2010_attributes[] = {
+ &iio_const_attr_out_current_heater_raw_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hdc2010_attribute_group = {
+ .attrs = hdc2010_attributes,
+};
+
+static const struct iio_chan_spec hdc2010_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .address = HDC2010_GROUP_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PEAK) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .address = HDC2010_GROUP_HUMIDITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PEAK) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .extend_name = "heater",
+ .output = 1,
+ },
+};
+
+static int hdc2010_update_drdy_config(struct hdc2010_data *data,
+ char mask, char val)
+{
+ u8 tmp = (~mask & data->drdy_config) | val;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ HDC2010_REG_RESET_DRDY_INT_CONF, tmp);
+ if (ret)
+ return ret;
+
+ data->drdy_config = tmp;
+
+ return 0;
+}
+
+static int hdc2010_get_prim_measurement_word(struct hdc2010_data *data,
+ struct iio_chan_spec const *chan)
+{
+ struct i2c_client *client = data->client;
+ s32 ret;
+
+ ret = i2c_smbus_read_word_data(client,
+ hdc2010_reg_translation[chan->address].primary);
+
+ if (ret < 0)
+ dev_err(&client->dev, "Could not read sensor measurement word\n");
+
+ return ret;
+}
+
+static int hdc2010_get_peak_measurement_byte(struct hdc2010_data *data,
+ struct iio_chan_spec const *chan)
+{
+ struct i2c_client *client = data->client;
+ s32 ret;
+
+ ret = i2c_smbus_read_byte_data(client,
+ hdc2010_reg_translation[chan->address].peak);
+
+ if (ret < 0)
+ dev_err(&client->dev, "Could not read sensor measurement byte\n");
+
+ return ret;
+}
+
+static int hdc2010_get_heater_status(struct hdc2010_data *data)
+{
+ return !!(data->drdy_config & HDC2010_HEATER_EN);
+}
+
+static int hdc2010_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct hdc2010_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ int ret;
+
+ if (chan->type == IIO_CURRENT) {
+ *val = hdc2010_get_heater_status(data);
+ return IIO_VAL_INT;
+ }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&data->lock);
+ ret = hdc2010_get_prim_measurement_word(data, chan);
+ mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_PEAK: {
+ int ret;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&data->lock);
+ ret = hdc2010_get_peak_measurement_byte(data, chan);
+ mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
+ /* Scaling up the value so we can use same offset as RAW */
+ *val = ret * 256;
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val2 = 65536;
+ if (chan->type == IIO_TEMP)
+ *val = 165000;
+ else
+ *val = 100000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -15887;
+ *val2 = 515151;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hdc2010_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct hdc2010_data *data = iio_priv(indio_dev);
+ int new, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type != IIO_CURRENT || val2 != 0)
+ return -EINVAL;
+
+ switch (val) {
+ case 1:
+ new = HDC2010_HEATER_EN;
+ break;
+ case 0:
+ new = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&data->lock);
+ ret = hdc2010_update_drdy_config(data, HDC2010_HEATER_EN, new);
+ mutex_unlock(&data->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info hdc2010_info = {
+ .read_raw = hdc2010_read_raw,
+ .write_raw = hdc2010_write_raw,
+ .attrs = &hdc2010_attribute_group,
+};
+
+static int hdc2010_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct hdc2010_data *data;
+ u8 tmp;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ /*
+ * As DEVICE ID register does not differentiate between
+ * HDC2010 and HDC2080, we have the name hardcoded
+ */
+ indio_dev->name = "hdc2010";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &hdc2010_info;
+
+ indio_dev->channels = hdc2010_channels;
+ indio_dev->num_channels = ARRAY_SIZE(hdc2010_channels);
+
+ /* Enable Automatic Measurement Mode at 5Hz */
+ ret = hdc2010_update_drdy_config(data, HDC2010_AMM, HDC2010_AMM);
+ if (ret)
+ return ret;
+
+ /*
+ * We enable both temp and humidity measurement.
+ * However the measurement won't start even in AMM until triggered.
+ */
+ tmp = (data->measurement_config & ~HDC2010_MEAS_CONF) |
+ HDC2010_MEAS_TRIG;
+
+ ret = i2c_smbus_write_byte_data(client, HDC2010_REG_MEASUREMENT_CONF, tmp);
+ if (ret) {
+ dev_warn(&client->dev, "Unable to set up measurement\n");
+ if (hdc2010_update_drdy_config(data, HDC2010_AMM, 0))
+ dev_warn(&client->dev, "Unable to restore default AMM\n");
+ return ret;
+ }
+
+ data->measurement_config = tmp;
+
+ return iio_device_register(indio_dev);
+}
+
+static int hdc2010_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct hdc2010_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ /* Disable Automatic Measurement Mode */
+ if (hdc2010_update_drdy_config(data, HDC2010_AMM, 0))
+ dev_warn(&client->dev, "Unable to restore default AMM\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id hdc2010_id[] = {
+ { "hdc2010" },
+ { "hdc2080" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, hdc2010_id);
+
+static const struct of_device_id hdc2010_dt_ids[] = {
+ { .compatible = "ti,hdc2010" },
+ { .compatible = "ti,hdc2080" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hdc2010_dt_ids);
+
+static struct i2c_driver hdc2010_driver = {
+ .driver = {
+ .name = "hdc2010",
+ .of_match_table = hdc2010_dt_ids,
+ },
+ .probe = hdc2010_probe,
+ .remove = hdc2010_remove,
+ .id_table = hdc2010_id,
+};
+module_i2c_driver(hdc2010_driver);
+
+MODULE_AUTHOR("Eugene Zaikonnikov <ez@norphonic.com>");
+MODULE_DESCRIPTION("TI HDC2010 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c
index 4f5d9d1c05ab..36df2a102ca4 100644
--- a/drivers/iio/humidity/htu21.c
+++ b/drivers/iio/humidity/htu21.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/stat.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -247,7 +248,7 @@ static struct i2c_driver htu21_driver = {
.id_table = htu21_id,
.driver = {
.name = "htu21",
- .of_match_table = of_match_ptr(htu21_of_match),
+ .of_match_table = htu21_of_match,
},
};
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index a09b5773d377..ab6537f136ba 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
@@ -153,7 +154,7 @@ MODULE_DEVICE_TABLE(of, si7020_dt_ids);
static struct i2c_driver si7020_driver = {
.driver = {
.name = "si7020",
- .of_match_table = of_match_ptr(si7020_dt_ids),
+ .of_match_table = si7020_dt_ids,
},
.probe = si7020_probe,
.id_table = si7020_id,
diff --git a/drivers/iio/iio_core_trigger.h b/drivers/iio/iio_core_trigger.h
index 9d1a92cc6480..374816bc3e73 100644
--- a/drivers/iio/iio_core_trigger.h
+++ b/drivers/iio/iio_core_trigger.h
@@ -30,7 +30,7 @@ int iio_trigger_detach_poll_func(struct iio_trigger *trig,
* iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
* @indio_dev: iio_dev associated with the device that will consume the trigger
**/
-static int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
+static inline int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
return 0;
}
@@ -39,7 +39,7 @@ static int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
* iio_device_unregister_trigger_consumer() - reverse the registration process
* @indio_dev: iio_dev associated with the device that consumed the trigger
**/
-static void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
+static inline void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
{
}
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 1ebe3e50d3e6..54af2ed664f6 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -173,6 +173,8 @@ struct adis16400_chip_info {
* @variant: chip variant info
* @filt_int: integer part of requested filter frequency
* @adis: adis device
+ * @avail_scan_mask: NULL terminated array of bitmaps of channels
+ * that must be enabled together
**/
struct adis16400_state {
struct adis16400_chip_info *variant;
@@ -317,11 +319,6 @@ enum adis16400_chip_variant {
ADIS16448,
};
-static struct adis_burst adis16400_burst = {
- .en = true,
- .reg_cmd = ADIS16400_GLOB_CMD,
-};
-
static int adis16334_get_freq(struct adis16400_state *st)
{
int ret;
@@ -947,7 +944,7 @@ static const char * const adis16400_status_error_msgs[] = {
[ADIS16400_DIAG_STAT_POWER_LOW] = "Power supply below 4.75V",
};
-#define ADIS16400_DATA(_timeouts) \
+#define ADIS16400_DATA(_timeouts, _burst_len) \
{ \
.msc_ctrl_reg = ADIS16400_MSC_CTRL, \
.glob_cmd_reg = ADIS16400_GLOB_CMD, \
@@ -973,6 +970,8 @@ static const char * const adis16400_status_error_msgs[] = {
BIT(ADIS16400_DIAG_STAT_POWER_HIGH) | \
BIT(ADIS16400_DIAG_STAT_POWER_LOW), \
.timeouts = (_timeouts), \
+ .burst_reg_cmd = ADIS16400_GLOB_CMD, \
+ .burst_len = (_burst_len) \
}
static const struct adis_timeout adis16300_timeouts = {
@@ -1023,7 +1022,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .adis_data = ADIS16400_DATA(&adis16300_timeouts),
+ .adis_data = ADIS16400_DATA(&adis16300_timeouts, 18),
},
[ADIS16334] = {
.channels = adis16334_channels,
@@ -1036,7 +1035,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 67850, /* 25 C = 0x00 */
.set_freq = adis16334_set_freq,
.get_freq = adis16334_get_freq,
- .adis_data = ADIS16400_DATA(&adis16334_timeouts),
+ .adis_data = ADIS16400_DATA(&adis16334_timeouts, 0),
},
[ADIS16350] = {
.channels = adis16350_channels,
@@ -1048,7 +1047,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.flags = ADIS16400_NO_BURST | ADIS16400_HAS_SLOW_MODE,
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .adis_data = ADIS16400_DATA(&adis16300_timeouts),
+ .adis_data = ADIS16400_DATA(&adis16300_timeouts, 0),
},
[ADIS16360] = {
.channels = adis16350_channels,
@@ -1061,7 +1060,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .adis_data = ADIS16400_DATA(&adis16300_timeouts),
+ .adis_data = ADIS16400_DATA(&adis16300_timeouts, 28),
},
[ADIS16362] = {
.channels = adis16350_channels,
@@ -1074,7 +1073,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .adis_data = ADIS16400_DATA(&adis16362_timeouts),
+ .adis_data = ADIS16400_DATA(&adis16362_timeouts, 28),
},
[ADIS16364] = {
.channels = adis16350_channels,
@@ -1087,7 +1086,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .adis_data = ADIS16400_DATA(&adis16362_timeouts),
+ .adis_data = ADIS16400_DATA(&adis16362_timeouts, 28),
},
[ADIS16367] = {
.channels = adis16350_channels,
@@ -1100,7 +1099,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .adis_data = ADIS16400_DATA(&adis16300_timeouts),
+ .adis_data = ADIS16400_DATA(&adis16300_timeouts, 28),
},
[ADIS16400] = {
.channels = adis16400_channels,
@@ -1112,7 +1111,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 25000000 / 140000, /* 25 C = 0x00 */
.set_freq = adis16400_set_freq,
.get_freq = adis16400_get_freq,
- .adis_data = ADIS16400_DATA(&adis16400_timeouts),
+ .adis_data = ADIS16400_DATA(&adis16400_timeouts, 24),
},
[ADIS16445] = {
.channels = adis16445_channels,
@@ -1126,7 +1125,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
.set_freq = adis16334_set_freq,
.get_freq = adis16334_get_freq,
- .adis_data = ADIS16400_DATA(&adis16445_timeouts),
+ .adis_data = ADIS16400_DATA(&adis16445_timeouts, 16),
},
[ADIS16448] = {
.channels = adis16448_channels,
@@ -1140,7 +1139,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
.set_freq = adis16334_set_freq,
.get_freq = adis16334_get_freq,
- .adis_data = ADIS16400_DATA(&adis16448_timeouts),
+ .adis_data = ADIS16400_DATA(&adis16448_timeouts, 24),
}
};
@@ -1164,6 +1163,12 @@ static void adis16400_setup_chan_mask(struct adis16400_state *st)
st->avail_scan_mask[0] |= BIT(ch->scan_index);
}
}
+
+static void adis16400_stop(void *data)
+{
+ adis16400_stop_device(data);
+}
+
static int adis16400_probe(struct spi_device *spi)
{
struct adis16400_state *st;
@@ -1190,9 +1195,6 @@ static int adis16400_probe(struct spi_device *spi)
if (!(st->variant->flags & ADIS16400_NO_BURST)) {
adis16400_setup_chan_mask(st);
indio_dev->available_scan_masks = st->avail_scan_mask;
- st->adis.burst = &adis16400_burst;
- if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
- st->adis.burst_extra_len = sizeof(u16);
}
adis16400_data = &st->variant->adis_data;
@@ -1201,37 +1203,24 @@ static int adis16400_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev,
- adis16400_trigger_handler);
+ ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, adis16400_trigger_handler);
if (ret)
return ret;
/* Get the device into a sane initial state */
ret = adis16400_initial_setup(indio_dev);
if (ret)
- goto error_cleanup_buffer;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer;
-
- adis16400_debugfs_init(indio_dev);
- return 0;
-
-error_cleanup_buffer:
- adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
- return ret;
-}
-
-static int adis16400_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis16400_state *st = iio_priv(indio_dev);
+ return ret;
- iio_device_unregister(indio_dev);
- adis16400_stop_device(indio_dev);
+ ret = devm_add_action_or_reset(&spi->dev, adis16400_stop, indio_dev);
+ if (ret)
+ return ret;
- adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+ adis16400_debugfs_init(indio_dev);
return 0;
}
@@ -1261,7 +1250,6 @@ static struct spi_driver adis16400_driver = {
},
.id_table = adis16400_id,
.probe = adis16400_probe,
- .remove = adis16400_remove,
};
module_spi_driver(adis16400_driver);
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index b26a5f1bc51a..74a161e39733 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -403,7 +403,7 @@ static int adis16460_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
+ ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
if (ret)
return ret;
@@ -411,31 +411,15 @@ static int adis16460_probe(struct spi_device *spi)
ret = __adis_initial_startup(&st->adis);
if (ret)
- goto error_cleanup_buffer;
+ return ret;
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
- goto error_cleanup_buffer;
+ return ret;
adis16460_debugfs_init(indio_dev);
return 0;
-
-error_cleanup_buffer:
- adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
- return ret;
-}
-
-static int adis16460_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis16460 *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
-
- return 0;
}
static const struct spi_device_id adis16460_ids[] = {
@@ -457,7 +441,6 @@ static struct spi_driver adis16460_driver = {
},
.id_table = adis16460_ids,
.probe = adis16460_probe,
- .remove = adis16460_remove,
};
module_spi_driver(adis16460_driver);
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 35d10ccb66c2..197d48240991 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -565,6 +565,9 @@ static int adis16475_enable_irq(struct adis *adis, bool enable)
BIT(ADIS16475_DIAG_STAT_CLK), \
.enable_irq = adis16475_enable_irq, \
.timeouts = (_timeouts), \
+ .burst_reg_cmd = ADIS16475_REG_GLOB_CMD, \
+ .burst_len = ADIS16475_BURST_MAX_DATA, \
+ .burst_max_len = ADIS16475_BURST32_MAX_DATA \
}
static const struct adis16475_sync adis16475_sync_mode[] = {
@@ -910,20 +913,6 @@ static const struct iio_info adis16475_info = {
.debugfs_reg_access = adis_debugfs_reg_access,
};
-static struct adis_burst adis16475_burst = {
- .en = true,
- .reg_cmd = ADIS16475_REG_GLOB_CMD,
- /*
- * adis_update_scan_mode_burst() sets the burst length in respect with
- * the number of channels and allocates 16 bits for each. However,
- * adis1647x devices also need space for DIAG_STAT, DATA_CNTR or
- * TIME_STAMP (depending on the clock mode but for us these bytes are
- * don't care...) and CRC.
- */
- .extra_len = 3 * sizeof(u16),
- .burst_max_len = ADIS16475_BURST32_MAX_DATA,
-};
-
static bool adis16475_validate_crc(const u8 *buffer, u16 crc,
const bool burst32)
{
@@ -1279,7 +1268,6 @@ static int adis16475_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
- st->adis.burst = &adis16475_burst;
st->info = device_get_match_data(&spi->dev);
if (!st->info)
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 1eb4f98076f1..dfe86c589325 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -1212,6 +1212,16 @@ static int adis16480_get_ext_clocks(struct adis16480 *st)
return 0;
}
+static void adis16480_stop(void *data)
+{
+ adis16480_stop_device(data);
+}
+
+static void adis16480_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int adis16480_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -1245,18 +1255,26 @@ static int adis16480_probe(struct spi_device *spi)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&spi->dev, adis16480_stop, indio_dev);
+ if (ret)
+ return ret;
+
ret = adis16480_config_irq_pin(spi->dev.of_node, st);
if (ret)
- goto error_stop_device;
+ return ret;
ret = adis16480_get_ext_clocks(st);
if (ret)
- goto error_stop_device;
+ return ret;
if (!IS_ERR_OR_NULL(st->ext_clk)) {
ret = adis16480_ext_clk_config(st, spi->dev.of_node, true);
if (ret)
- goto error_stop_device;
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, adis16480_clk_disable, st->ext_clk);
+ if (ret)
+ return ret;
st->clk_freq = clk_get_rate(st->ext_clk);
st->clk_freq *= 1000; /* micro */
@@ -1264,39 +1282,17 @@ static int adis16480_probe(struct spi_device *spi)
st->clk_freq = st->chip_info->int_clk;
}
- ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
+ ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
if (ret)
- goto error_clk_disable_unprepare;
+ return ret;
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
- goto error_cleanup_buffer;
+ return ret;
adis16480_debugfs_init(indio_dev);
return 0;
-
-error_cleanup_buffer:
- adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
-error_clk_disable_unprepare:
- clk_disable_unprepare(st->ext_clk);
-error_stop_device:
- adis16480_stop_device(indio_dev);
- return ret;
-}
-
-static int adis16480_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis16480 *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis16480_stop_device(indio_dev);
-
- adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
- clk_disable_unprepare(st->ext_clk);
-
- return 0;
}
static const struct spi_device_id adis16480_ids[] = {
@@ -1338,7 +1334,6 @@ static struct spi_driver adis16480_driver = {
},
.id_table = adis16480_ids,
.probe = adis16480_probe,
- .remove = adis16480_remove,
};
module_spi_driver(adis16480_driver);
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 5b4225ee09b9..ac354321f63a 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -26,12 +26,10 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
unsigned int burst_length, burst_max_length;
u8 *tx;
- /* All but the timestamp channel */
- burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
- burst_length += adis->burst->extra_len + adis->burst_extra_len;
+ burst_length = adis->data->burst_len + adis->burst_extra_len;
- if (adis->burst->burst_max_len)
- burst_max_length = adis->burst->burst_max_len;
+ if (adis->data->burst_max_len)
+ burst_max_length = adis->data->burst_max_len;
else
burst_max_length = burst_length;
@@ -47,7 +45,7 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
}
tx = adis->buffer + burst_max_length;
- tx[0] = ADIS_READ_REG(adis->burst->reg_cmd);
+ tx[0] = ADIS_READ_REG(adis->data->burst_reg_cmd);
tx[1] = 0;
adis->xfer[0].tx_buf = tx;
@@ -76,7 +74,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
kfree(adis->xfer);
kfree(adis->buffer);
- if (adis->burst && adis->burst->en)
+ if (adis->data->burst_len)
return adis_update_scan_mode_burst(indio_dev, scan_mask);
scan_count = indio_dev->scan_bytes / 2;
@@ -170,48 +168,6 @@ static void adis_buffer_cleanup(void *arg)
}
/**
- * adis_setup_buffer_and_trigger() - Sets up buffer and trigger for the adis device
- * @adis: The adis device.
- * @indio_dev: The IIO device.
- * @trigger_handler: Optional trigger handler, may be NULL.
- *
- * Returns 0 on success, a negative error code otherwise.
- *
- * This function sets up the buffer and trigger for a adis devices. If
- * 'trigger_handler' is NULL the default trigger handler will be used. The
- * default trigger handler will simply read the registers assigned to the
- * currently active channels.
- *
- * adis_cleanup_buffer_and_trigger() should be called to free the resources
- * allocated by this function.
- */
-int adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
- irqreturn_t (*trigger_handler)(int, void *))
-{
- int ret;
-
- if (!trigger_handler)
- trigger_handler = adis_trigger_handler;
-
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- trigger_handler, NULL);
- if (ret)
- return ret;
-
- if (adis->spi->irq) {
- ret = adis_probe_trigger(adis, indio_dev);
- if (ret)
- goto error_buffer_cleanup;
- }
- return 0;
-
-error_buffer_cleanup:
- iio_triggered_buffer_cleanup(indio_dev);
- return ret;
-}
-EXPORT_SYMBOL_GPL(adis_setup_buffer_and_trigger);
-
-/**
* devm_adis_setup_buffer_and_trigger() - Sets up buffer and trigger for
* the managed adis device
* @adis: The adis device
@@ -220,7 +176,10 @@ EXPORT_SYMBOL_GPL(adis_setup_buffer_and_trigger);
*
* Returns 0 on success, a negative error code otherwise.
*
- * This function perfoms exactly the same as adis_setup_buffer_and_trigger()
+ * This function sets up the buffer and trigger for a adis devices. If
+ * 'trigger_handler' is NULL the default trigger handler will be used. The
+ * default trigger handler will simply read the registers assigned to the
+ * currently active channels.
*/
int
devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
@@ -248,20 +207,3 @@ devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_GPL(devm_adis_setup_buffer_and_trigger);
-/**
- * adis_cleanup_buffer_and_trigger() - Free buffer and trigger resources
- * @adis: The adis device.
- * @indio_dev: The IIO device.
- *
- * Frees resources allocated by adis_setup_buffer_and_trigger()
- */
-void adis_cleanup_buffer_and_trigger(struct adis *adis,
- struct iio_dev *indio_dev)
-{
- if (adis->spi->irq)
- adis_remove_trigger(adis);
- kfree(adis->buffer);
- kfree(adis->xfer);
- iio_triggered_buffer_cleanup(indio_dev);
-}
-EXPORT_SYMBOL_GPL(adis_cleanup_buffer_and_trigger);
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index 8afe71947c00..64e0ba51cb18 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -55,53 +55,6 @@ static int adis_validate_irq_flag(struct adis *adis)
return 0;
}
-/**
- * adis_probe_trigger() - Sets up trigger for a adis device
- * @adis: The adis device
- * @indio_dev: The IIO device
- *
- * Returns 0 on success or a negative error code
- *
- * adis_remove_trigger() should be used to free the trigger.
- */
-int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
-{
- int ret;
-
- adis->trig = iio_trigger_alloc("%s-dev%d", indio_dev->name,
- indio_dev->id);
- if (adis->trig == NULL)
- return -ENOMEM;
-
- adis_trigger_setup(adis);
-
- ret = adis_validate_irq_flag(adis);
- if (ret)
- return ret;
-
- ret = request_irq(adis->spi->irq,
- &iio_trigger_generic_data_rdy_poll,
- adis->irq_flag,
- indio_dev->name,
- adis->trig);
- if (ret)
- goto error_free_trig;
-
- ret = iio_trigger_register(adis->trig);
-
- indio_dev->trig = iio_trigger_get(adis->trig);
- if (ret)
- goto error_free_irq;
-
- return 0;
-
-error_free_irq:
- free_irq(adis->spi->irq, adis->trig);
-error_free_trig:
- iio_trigger_free(adis->trig);
- return ret;
-}
-EXPORT_SYMBOL_GPL(adis_probe_trigger);
/**
* devm_adis_probe_trigger() - Sets up trigger for a managed adis device
@@ -137,16 +90,3 @@ int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
}
EXPORT_SYMBOL_GPL(devm_adis_probe_trigger);
-/**
- * adis_remove_trigger() - Remove trigger for a adis devices
- * @adis: The adis device
- *
- * Removes the trigger previously registered with adis_probe_trigger().
- */
-void adis_remove_trigger(struct adis *adis)
-{
- iio_trigger_unregister(adis->trig);
- free_irq(adis->spi->irq, adis->trig);
- iio_trigger_free(adis->trig);
-}
-EXPORT_SYMBOL_GPL(adis_remove_trigger);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 3fee3947f772..18a1898e3e34 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -1475,22 +1475,14 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
}
st->vdd_supply = devm_regulator_get(dev, "vdd");
- if (IS_ERR(st->vdd_supply)) {
- if (PTR_ERR(st->vdd_supply) != -EPROBE_DEFER)
- dev_err(dev, "Failed to get vdd regulator %d\n",
- (int)PTR_ERR(st->vdd_supply));
-
- return PTR_ERR(st->vdd_supply);
- }
+ if (IS_ERR(st->vdd_supply))
+ return dev_err_probe(dev, PTR_ERR(st->vdd_supply),
+ "Failed to get vdd regulator\n");
st->vddio_supply = devm_regulator_get(dev, "vddio");
- if (IS_ERR(st->vddio_supply)) {
- if (PTR_ERR(st->vddio_supply) != -EPROBE_DEFER)
- dev_err(dev, "Failed to get vddio regulator %d\n",
- (int)PTR_ERR(st->vddio_supply));
-
- return PTR_ERR(st->vddio_supply);
- }
+ if (IS_ERR(st->vddio_supply))
+ return dev_err_probe(dev, PTR_ERR(st->vddio_supply),
+ "Failed to get vddio regulator\n");
result = regulator_enable(st->vdd_supply);
if (result) {
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index cd38b3fccc7b..eb522b38acf3 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -122,6 +122,13 @@ struct inv_mpu6050_chip_config {
u8 user_ctrl;
};
+/*
+ * Maximum of 6 + 6 + 2 + 7 (for MPU9x50) = 21 round up to 24 and plus 8.
+ * May be less if fewer channels are enabled, as long as the timestamp
+ * remains 8 byte aligned
+ */
+#define INV_MPU6050_OUTPUT_DATA_SIZE 32
+
/**
* struct inv_mpu6050_hw - Other important hardware information.
* @whoami: Self identification byte from WHO_AM_I register
@@ -165,6 +172,7 @@ struct inv_mpu6050_hw {
* @magn_raw_to_gauss: coefficient to convert mag raw value to Gauss.
* @magn_orient: magnetometer sensor chip orientation if available.
* @suspended_sensors: sensors mask of sensors turned off for suspend
+ * @data: dma safe buffer used for bulk reads.
*/
struct inv_mpu6050_state {
struct mutex lock;
@@ -190,6 +198,7 @@ struct inv_mpu6050_state {
s32 magn_raw_to_gauss[3];
struct iio_mount_matrix magn_orient;
unsigned int suspended_sensors;
+ u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] ____cacheline_aligned;
};
/*register and associated bit definition*/
@@ -334,9 +343,6 @@ struct inv_mpu6050_state {
#define INV_ICM20608_TEMP_OFFSET 8170
#define INV_ICM20608_TEMP_SCALE 3059976
-/* 6 + 6 + 2 + 7 (for MPU9x50) = 21 round up to 24 and plus 8 */
-#define INV_MPU6050_OUTPUT_DATA_SIZE 32
-
#define INV_MPU6050_REG_INT_PIN_CFG 0x37
#define INV_MPU6050_ACTIVE_HIGH 0x00
#define INV_MPU6050_ACTIVE_LOW 0x80
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index b533fa2dad0a..45c37525c2f1 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/math64.h>
-#include <asm/unaligned.h>
#include "inv_mpu_iio.h"
/**
@@ -121,7 +120,6 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
struct inv_mpu6050_state *st = iio_priv(indio_dev);
size_t bytes_per_datum;
int result;
- u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
u16 fifo_count;
s64 timestamp;
int int_status;
@@ -160,11 +158,11 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
* read fifo_count register to know how many bytes are inside the FIFO
* right now
*/
- result = regmap_bulk_read(st->map, st->reg->fifo_count_h, data,
- INV_MPU6050_FIFO_COUNT_BYTE);
+ result = regmap_bulk_read(st->map, st->reg->fifo_count_h,
+ st->data, INV_MPU6050_FIFO_COUNT_BYTE);
if (result)
goto end_session;
- fifo_count = get_unaligned_be16(&data[0]);
+ fifo_count = be16_to_cpup((__be16 *)&st->data[0]);
/*
* Handle fifo overflow by resetting fifo.
@@ -181,8 +179,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
nb = fifo_count / bytes_per_datum;
inv_mpu6050_update_period(st, pf->timestamp, nb);
for (i = 0; i < nb; ++i) {
- result = regmap_bulk_read(st->map, st->reg->fifo_r_w,
- data, bytes_per_datum);
+ result = regmap_noinc_read(st->map, st->reg->fifo_r_w,
+ st->data, bytes_per_datum);
if (result)
goto flush_fifo;
/* skip first samples if needed */
@@ -191,7 +189,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
continue;
}
timestamp = inv_mpu6050_get_timestamp(st);
- iio_push_to_buffers_with_timestamp(indio_dev, data, timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data, timestamp);
}
end_session:
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index d80ba2e688ed..9275346a9cc1 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -383,6 +383,7 @@ struct st_lsm6dsx_sensor {
* @iio_devs: Pointers to acc/gyro iio_dev instances.
* @settings: Pointer to the specific sensor settings in use.
* @orientation: sensor chip orientation relative to main hardware.
+ * @scan: Temporary buffers used to align data before iio_push_to_buffers()
*/
struct st_lsm6dsx_hw {
struct device *dev;
@@ -411,6 +412,11 @@ struct st_lsm6dsx_hw {
const struct st_lsm6dsx_settings *settings;
struct iio_mount_matrix orientation;
+ /* Ensure natural alignment of buffer elements */
+ struct {
+ __le16 channels[3];
+ s64 ts __aligned(8);
+ } scan[3];
};
static __maybe_unused const struct iio_event_spec st_lsm6dsx_event = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 7de10bd636ea..12ed0a2e55e4 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -353,9 +353,6 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
int err, sip, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset;
u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE;
u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask;
- u8 gyro_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
- u8 acc_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
- u8 ext_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
bool reset_ts = false;
__le16 fifo_status;
s64 ts = 0;
@@ -416,19 +413,22 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
while (acc_sip > 0 || gyro_sip > 0 || ext_sip > 0) {
if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) {
- memcpy(gyro_buff, &hw->buff[offset],
- ST_LSM6DSX_SAMPLE_SIZE);
- offset += ST_LSM6DSX_SAMPLE_SIZE;
+ memcpy(hw->scan[ST_LSM6DSX_ID_GYRO].channels,
+ &hw->buff[offset],
+ sizeof(hw->scan[ST_LSM6DSX_ID_GYRO].channels));
+ offset += sizeof(hw->scan[ST_LSM6DSX_ID_GYRO].channels);
}
if (acc_sip > 0 && !(sip % acc_sensor->decimator)) {
- memcpy(acc_buff, &hw->buff[offset],
- ST_LSM6DSX_SAMPLE_SIZE);
- offset += ST_LSM6DSX_SAMPLE_SIZE;
+ memcpy(hw->scan[ST_LSM6DSX_ID_ACC].channels,
+ &hw->buff[offset],
+ sizeof(hw->scan[ST_LSM6DSX_ID_ACC].channels));
+ offset += sizeof(hw->scan[ST_LSM6DSX_ID_ACC].channels);
}
if (ext_sip > 0 && !(sip % ext_sensor->decimator)) {
- memcpy(ext_buff, &hw->buff[offset],
- ST_LSM6DSX_SAMPLE_SIZE);
- offset += ST_LSM6DSX_SAMPLE_SIZE;
+ memcpy(hw->scan[ST_LSM6DSX_ID_EXT0].channels,
+ &hw->buff[offset],
+ sizeof(hw->scan[ST_LSM6DSX_ID_EXT0].channels));
+ offset += sizeof(hw->scan[ST_LSM6DSX_ID_EXT0].channels);
}
if (ts_sip-- > 0) {
@@ -458,19 +458,22 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) {
iio_push_to_buffers_with_timestamp(
hw->iio_devs[ST_LSM6DSX_ID_GYRO],
- gyro_buff, gyro_sensor->ts_ref + ts);
+ &hw->scan[ST_LSM6DSX_ID_GYRO],
+ gyro_sensor->ts_ref + ts);
gyro_sip--;
}
if (acc_sip > 0 && !(sip % acc_sensor->decimator)) {
iio_push_to_buffers_with_timestamp(
hw->iio_devs[ST_LSM6DSX_ID_ACC],
- acc_buff, acc_sensor->ts_ref + ts);
+ &hw->scan[ST_LSM6DSX_ID_ACC],
+ acc_sensor->ts_ref + ts);
acc_sip--;
}
if (ext_sip > 0 && !(sip % ext_sensor->decimator)) {
iio_push_to_buffers_with_timestamp(
hw->iio_devs[ST_LSM6DSX_ID_EXT0],
- ext_buff, ext_sensor->ts_ref + ts);
+ &hw->scan[ST_LSM6DSX_ID_EXT0],
+ ext_sensor->ts_ref + ts);
ext_sip--;
}
sip++;
@@ -555,7 +558,14 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
{
u16 pattern_len = hw->sip * ST_LSM6DSX_TAGGED_SAMPLE_SIZE;
u16 fifo_len, fifo_diff_mask;
- u8 iio_buff[ST_LSM6DSX_IIO_BUFF_SIZE], tag;
+ /*
+ * Alignment needed as this can ultimately be passed to a
+ * call to iio_push_to_buffers_with_timestamp() which
+ * must be passed a buffer that is aligned to 8 bytes so
+ * as to allow insertion of a naturally aligned timestamp.
+ */
+ u8 iio_buff[ST_LSM6DSX_IIO_BUFF_SIZE] __aligned(8);
+ u8 tag;
bool reset_ts = false;
int i, err, read_len;
__le16 fifo_status;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 346c24281d26..42f485634d04 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -157,10 +157,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x20,
.mask = GENMASK(4, 3),
},
- .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
- .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
- .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
- .fs_avl[3] = { IIO_G_TO_M_S_2(732), 0x1 },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61000), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122000), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244000), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(732000), 0x1 },
.fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
@@ -169,9 +169,9 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(4, 3),
},
- .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
- .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
- .fs_avl[2] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750000), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500000), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(70000000), 0x3 },
.fs_len = 3,
},
},
@@ -259,10 +259,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
- .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
- .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
- .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61000), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122000), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244000), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488000), 0x1 },
.fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
@@ -270,10 +270,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x11,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
- .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
- .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
- .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750000), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500000), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000000), 0x3 },
.fs_len = 4,
},
},
@@ -425,10 +425,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
- .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
- .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
- .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61000), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122000), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244000), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488000), 0x1 },
.fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
@@ -436,10 +436,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x11,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
- .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
- .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
- .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750000), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500000), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000000), 0x3 },
.fs_len = 4,
},
},
@@ -600,10 +600,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
- .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
- .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
- .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61000), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122000), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244000), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488000), 0x1 },
.fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
@@ -611,10 +611,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x11,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
- .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
- .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
- .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750000), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500000), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000000), 0x3 },
.fs_len = 4,
},
},
@@ -816,10 +816,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
- .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
- .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
- .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61000), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122000), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244000), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488000), 0x1 },
.fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
@@ -827,10 +827,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x11,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
- .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
- .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
- .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750000), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500000), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000000), 0x3 },
.fs_len = 4,
},
},
@@ -1021,10 +1021,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
- .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
- .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
- .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61000), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122000), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244000), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488000), 0x1 },
.fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
@@ -1032,10 +1032,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x11,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
- .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
- .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
- .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750000), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500000), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000000), 0x3 },
.fs_len = 4,
},
},
@@ -1200,10 +1200,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_G_TO_M_S_2(61), 0x0 },
- .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
- .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
- .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+ .fs_avl[0] = { IIO_G_TO_M_S_2(61000), 0x0 },
+ .fs_avl[1] = { IIO_G_TO_M_S_2(122000), 0x2 },
+ .fs_avl[2] = { IIO_G_TO_M_S_2(244000), 0x3 },
+ .fs_avl[3] = { IIO_G_TO_M_S_2(488000), 0x1 },
.fs_len = 4,
},
[ST_LSM6DSX_ID_GYRO] = {
@@ -1211,10 +1211,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x11,
.mask = GENMASK(3, 2),
},
- .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
- .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
- .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
- .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750000), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500000), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000000), 0x2 },
+ .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000000), 0x3 },
.fs_len = 4,
},
},
@@ -1598,7 +1598,7 @@ static int st_lsm6dsx_read_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = sensor->gain;
- ret = IIO_VAL_INT_PLUS_MICRO;
+ ret = IIO_VAL_INT_PLUS_NANO;
break;
default:
ret = -EINVAL;
@@ -1836,13 +1836,31 @@ static ssize_t st_lsm6dsx_sysfs_scale_avail(struct device *dev,
fs_table = &hw->settings->fs_table[sensor->id];
for (i = 0; i < fs_table->fs_len; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%09u ",
fs_table->fs_avl[i].gain);
buf[len - 1] = '\n';
return len;
}
+static int st_lsm6dsx_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ case IIO_ACCEL:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(st_lsm6dsx_sysfs_sampling_frequency_avail);
static IIO_DEVICE_ATTR(in_accel_scale_available, 0444,
st_lsm6dsx_sysfs_scale_avail, NULL, 0);
@@ -1868,6 +1886,7 @@ static const struct iio_info st_lsm6dsx_acc_info = {
.read_event_config = st_lsm6dsx_read_event_config,
.write_event_config = st_lsm6dsx_write_event_config,
.hwfifo_set_watermark = st_lsm6dsx_set_watermark,
+ .write_raw_get_fmt = st_lsm6dsx_write_raw_get_fmt,
};
static struct attribute *st_lsm6dsx_gyro_attributes[] = {
@@ -1885,6 +1904,7 @@ static const struct iio_info st_lsm6dsx_gyro_info = {
.read_raw = st_lsm6dsx_read_raw,
.write_raw = st_lsm6dsx_write_raw,
.hwfifo_set_watermark = st_lsm6dsx_set_watermark,
+ .write_raw_get_fmt = st_lsm6dsx_write_raw_get_fmt,
};
static int st_lsm6dsx_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index ed83471dc7dd..8c8d8870ca07 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -313,6 +313,8 @@ st_lsm6dsx_shub_read(struct st_lsm6dsx_sensor *sensor, u8 addr,
err = st_lsm6dsx_shub_read_output(hw, data,
len & ST_LS6DSX_READ_OP_MASK);
+ if (err < 0)
+ return err;
st_lsm6dsx_shub_master_enable(sensor, false);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index a7d7e5143ed2..a4f6bb96d4f4 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -1264,26 +1264,14 @@ static struct attribute *iio_buffer_attrs[] = {
&dev_attr_data_available.attr,
};
-int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
+static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
+ struct iio_dev *indio_dev)
{
struct iio_dev_attr *p;
struct attribute **attr;
- struct iio_buffer *buffer = indio_dev->buffer;
int ret, i, attrn, attrcount;
const struct iio_chan_spec *channels;
- channels = indio_dev->channels;
- if (channels) {
- int ml = indio_dev->masklength;
-
- for (i = 0; i < indio_dev->num_channels; i++)
- ml = max(ml, channels[i].scan_index + 1);
- indio_dev->masklength = ml;
- }
-
- if (!buffer)
- return 0;
-
attrcount = 0;
if (buffer->attrs) {
while (buffer->attrs[attrcount] != NULL)
@@ -1367,19 +1355,45 @@ error_cleanup_dynamic:
return ret;
}
-void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
+int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
{
struct iio_buffer *buffer = indio_dev->buffer;
+ const struct iio_chan_spec *channels;
+ int i;
+
+ channels = indio_dev->channels;
+ if (channels) {
+ int ml = indio_dev->masklength;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ ml = max(ml, channels[i].scan_index + 1);
+ indio_dev->masklength = ml;
+ }
if (!buffer)
- return;
+ return 0;
+
+ return __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev);
+}
+static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer)
+{
bitmap_free(buffer->scan_mask);
kfree(buffer->buffer_group.attrs);
kfree(buffer->scan_el_group.attrs);
iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
}
+void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (!buffer)
+ return;
+
+ __iio_buffer_free_sysfs_and_mask(buffer);
+}
+
/**
* iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
* @indio_dev: the iio device
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index cdcd16f19500..261d3b17edc9 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -133,6 +133,7 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_PM10] = "pm10",
[IIO_MOD_ETHANOL] = "ethanol",
[IIO_MOD_H2] = "h2",
+ [IIO_MOD_O2] = "o2",
};
/* relies on pairs of these shared then separate */
@@ -165,10 +166,11 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity",
[IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
[IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
+ [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
};
#if defined(CONFIG_DEBUG_FS)
-/**
+/*
* There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for
* iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined
*/
@@ -1523,6 +1525,7 @@ struct device_type iio_device_type = {
/**
* iio_device_alloc() - allocate an iio_dev from a driver
+ * @parent: Parent device.
* @sizeof_priv: Space to allocate for private structure.
**/
struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 2ab4d4c44427..99ba657b8568 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -477,6 +477,7 @@ static const char *iio_event_group_name = "events";
int iio_device_register_eventset(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ struct iio_event_interface *ev_int;
struct iio_dev_attr *p;
int ret = 0, attrcount_orig = 0, attrcount, attrn;
struct attribute **attr;
@@ -485,14 +486,15 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
iio_check_for_dynamic_events(indio_dev)))
return 0;
- iio_dev_opaque->event_interface =
- kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
- if (iio_dev_opaque->event_interface == NULL)
+ ev_int = kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
+ if (ev_int == NULL)
return -ENOMEM;
- INIT_LIST_HEAD(&iio_dev_opaque->event_interface->dev_attr_list);
+ iio_dev_opaque->event_interface = ev_int;
- iio_setup_ev_int(iio_dev_opaque->event_interface);
+ INIT_LIST_HEAD(&ev_int->dev_attr_list);
+
+ iio_setup_ev_int(ev_int);
if (indio_dev->info->event_attrs != NULL) {
attr = indio_dev->info->event_attrs->attrs;
while (*attr++ != NULL)
@@ -506,34 +508,29 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
attrcount += ret;
}
- iio_dev_opaque->event_interface->group.name = iio_event_group_name;
- iio_dev_opaque->event_interface->group.attrs = kcalloc(attrcount + 1,
- sizeof(iio_dev_opaque->event_interface->group.attrs[0]),
- GFP_KERNEL);
- if (iio_dev_opaque->event_interface->group.attrs == NULL) {
+ ev_int->group.name = iio_event_group_name;
+ ev_int->group.attrs = kcalloc(attrcount + 1,
+ sizeof(ev_int->group.attrs[0]),
+ GFP_KERNEL);
+ if (ev_int->group.attrs == NULL) {
ret = -ENOMEM;
goto error_free_setup_event_lines;
}
if (indio_dev->info->event_attrs)
- memcpy(iio_dev_opaque->event_interface->group.attrs,
+ memcpy(ev_int->group.attrs,
indio_dev->info->event_attrs->attrs,
- sizeof(iio_dev_opaque->event_interface->group.attrs[0])
- *attrcount_orig);
+ sizeof(ev_int->group.attrs[0]) * attrcount_orig);
attrn = attrcount_orig;
/* Add all elements from the list. */
- list_for_each_entry(p,
- &iio_dev_opaque->event_interface->dev_attr_list,
- l)
- iio_dev_opaque->event_interface->group.attrs[attrn++] =
- &p->dev_attr.attr;
- indio_dev->groups[indio_dev->groupcounter++] =
- &iio_dev_opaque->event_interface->group;
+ list_for_each_entry(p, &ev_int->dev_attr_list, l)
+ ev_int->group.attrs[attrn++] = &p->dev_attr.attr;
+ indio_dev->groups[indio_dev->groupcounter++] = &ev_int->group;
return 0;
error_free_setup_event_lines:
- iio_free_chan_devattr_list(&iio_dev_opaque->event_interface->dev_attr_list);
- kfree(iio_dev_opaque->event_interface);
+ iio_free_chan_devattr_list(&ev_int->dev_attr_list);
+ kfree(ev_int);
iio_dev_opaque->event_interface = NULL;
return ret;
}
@@ -557,10 +554,12 @@ void iio_device_wakeup_eventset(struct iio_dev *indio_dev)
void iio_device_unregister_eventset(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
- if (iio_dev_opaque->event_interface == NULL)
+ if (ev_int == NULL)
return;
- iio_free_chan_devattr_list(&iio_dev_opaque->event_interface->dev_attr_list);
- kfree(iio_dev_opaque->event_interface->group.attrs);
- kfree(iio_dev_opaque->event_interface);
+ iio_free_chan_devattr_list(&ev_int->dev_attr_list);
+ kfree(ev_int->group.attrs);
+ kfree(ev_int);
+ iio_dev_opaque->event_interface = NULL;
}
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 6f16357fd732..583bb51f65a7 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -516,7 +516,8 @@ static void iio_trig_subirqunmask(struct irq_data *d)
trig->subirqs[d->irq - trig->subirq_base].enabled = true;
}
-static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
+static __printf(1, 0)
+struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
{
struct iio_trigger *trig;
int i;
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 182bd18c4bb2..cade6dc0305b 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -86,6 +86,21 @@ config APDS9960
To compile this driver as a module, choose M here: the
module will be called apds9960
+config AS73211
+ tristate "AMS AS73211 XYZ color sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for the AMS AS73211
+ JENCOLOR(R) Digital XYZ Sensor.
+
+ For triggered measurements, you will need an additional trigger driver
+ like IIO_HRTIMER_TRIGGER or IIO_SYSFS_TRIGGER.
+
+ This driver can also be built as a module. If so, the module
+ will be called as73211.
+
config BH1750
tristate "ROHM BH1750 ambient light sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index d1c8aa30b9a8..ea376deaca54 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_AL3010) += al3010.o
obj-$(CONFIG_AL3320A) += al3320a.o
obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_APDS9960) += apds9960.o
+obj-$(CONFIG_AS73211) += as73211.o
obj-$(CONFIG_BH1750) += bh1750.o
obj-$(CONFIG_BH1780) += bh1780.o
obj-$(CONFIG_CM32181) += cm32181.o
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
new file mode 100644
index 000000000000..7b32dfaee9b3
--- /dev/null
+++ b/drivers/iio/light/as73211.c
@@ -0,0 +1,800 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Support for AMS AS73211 JENCOLOR(R) Digital XYZ Sensor
+ *
+ * Author: Christian Eggers <ceggers@arri.de>
+ *
+ * Copyright (c) 2020 ARRI Lighting
+ *
+ * Color light sensor with 16-bit channels for x, y, z and temperature);
+ * 7-bit I2C slave address 0x74 .. 0x77.
+ *
+ * Datasheet: https://ams.com/documents/20143/36005/AS73211_DS000556_3-01.pdf
+ */
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+
+#define HZ_PER_KHZ 1000
+
+#define AS73211_DRV_NAME "as73211"
+
+/* AS73211 configuration registers */
+#define AS73211_REG_OSR 0x0
+#define AS73211_REG_AGEN 0x2
+#define AS73211_REG_CREG1 0x6
+#define AS73211_REG_CREG2 0x7
+#define AS73211_REG_CREG3 0x8
+
+/* AS73211 output register bank */
+#define AS73211_OUT_OSR_STATUS 0
+#define AS73211_OUT_TEMP 1
+#define AS73211_OUT_MRES1 2
+#define AS73211_OUT_MRES2 3
+#define AS73211_OUT_MRES3 4
+
+#define AS73211_OSR_SS BIT(7)
+#define AS73211_OSR_PD BIT(6)
+#define AS73211_OSR_SW_RES BIT(3)
+#define AS73211_OSR_DOS_MASK GENMASK(2, 0)
+#define AS73211_OSR_DOS_CONFIG FIELD_PREP(AS73211_OSR_DOS_MASK, 0x2)
+#define AS73211_OSR_DOS_MEASURE FIELD_PREP(AS73211_OSR_DOS_MASK, 0x3)
+
+#define AS73211_AGEN_DEVID_MASK GENMASK(7, 4)
+#define AS73211_AGEN_DEVID(x) FIELD_PREP(AS73211_AGEN_DEVID_MASK, (x))
+#define AS73211_AGEN_MUT_MASK GENMASK(3, 0)
+#define AS73211_AGEN_MUT(x) FIELD_PREP(AS73211_AGEN_MUT_MASK, (x))
+
+#define AS73211_CREG1_GAIN_MASK GENMASK(7, 4)
+#define AS73211_CREG1_GAIN_1 11
+#define AS73211_CREG1_TIME_MASK GENMASK(3, 0)
+
+#define AS73211_CREG3_CCLK_MASK GENMASK(1, 0)
+
+#define AS73211_OSR_STATUS_OUTCONVOF BIT(15)
+#define AS73211_OSR_STATUS_MRESOF BIT(14)
+#define AS73211_OSR_STATUS_ADCOF BIT(13)
+#define AS73211_OSR_STATUS_LDATA BIT(12)
+#define AS73211_OSR_STATUS_NDATA BIT(11)
+#define AS73211_OSR_STATUS_NOTREADY BIT(10)
+
+#define AS73211_SAMPLE_FREQ_BASE 1024000
+
+#define AS73211_SAMPLE_TIME_NUM 15
+#define AS73211_SAMPLE_TIME_MAX_MS BIT(AS73211_SAMPLE_TIME_NUM - 1)
+
+/* Available sample frequencies are 1.024MHz multiplied by powers of two. */
+static const int as73211_samp_freq_avail[] = {
+ AS73211_SAMPLE_FREQ_BASE * 1,
+ AS73211_SAMPLE_FREQ_BASE * 2,
+ AS73211_SAMPLE_FREQ_BASE * 4,
+ AS73211_SAMPLE_FREQ_BASE * 8,
+};
+
+static const int as73211_hardwaregain_avail[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048,
+};
+
+/**
+ * struct as73211_data - Instance data for one AS73211
+ * @client: I2C client.
+ * @osr: Cached Operational State Register.
+ * @creg1: Cached Configuration Register 1.
+ * @creg2: Cached Configuration Register 2.
+ * @creg3: Cached Configuration Register 3.
+ * @mutex: Keeps cached registers in sync with the device.
+ * @completion: Completion to wait for interrupt.
+ * @int_time_avail: Available integration times (depend on sampling frequency).
+ */
+struct as73211_data {
+ struct i2c_client *client;
+ u8 osr;
+ u8 creg1;
+ u8 creg2;
+ u8 creg3;
+ struct mutex mutex;
+ struct completion completion;
+ int int_time_avail[AS73211_SAMPLE_TIME_NUM * 2];
+};
+
+#define AS73211_COLOR_CHANNEL(_color, _si, _addr) { \
+ .type = IIO_INTENSITY, \
+ .modified = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN) | \
+ BIT(IIO_CHAN_INFO_INT_TIME), \
+ .channel2 = IIO_MOD_##_color, \
+ .address = _addr, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+}
+
+#define AS73211_OFFSET_TEMP_INT (-66)
+#define AS73211_OFFSET_TEMP_MICRO 900000
+#define AS73211_SCALE_TEMP_INT 0
+#define AS73211_SCALE_TEMP_MICRO 50000
+
+#define AS73211_SCALE_X 277071108 /* nW/m^2 */
+#define AS73211_SCALE_Y 298384270 /* nW/m^2 */
+#define AS73211_SCALE_Z 160241927 /* nW/m^2 */
+
+/* Channel order MUST match devices result register order */
+#define AS73211_SCAN_INDEX_TEMP 0
+#define AS73211_SCAN_INDEX_X 1
+#define AS73211_SCAN_INDEX_Y 2
+#define AS73211_SCAN_INDEX_Z 3
+#define AS73211_SCAN_INDEX_TS 4
+
+#define AS73211_SCAN_MASK_COLOR ( \
+ BIT(AS73211_SCAN_INDEX_X) | \
+ BIT(AS73211_SCAN_INDEX_Y) | \
+ BIT(AS73211_SCAN_INDEX_Z))
+
+#define AS73211_SCAN_MASK_ALL ( \
+ BIT(AS73211_SCAN_INDEX_TEMP) | \
+ AS73211_SCAN_MASK_COLOR)
+
+static const struct iio_chan_spec as73211_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = AS73211_OUT_TEMP,
+ .scan_index = AS73211_SCAN_INDEX_TEMP,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ }
+ },
+ AS73211_COLOR_CHANNEL(X, AS73211_SCAN_INDEX_X, AS73211_OUT_MRES1),
+ AS73211_COLOR_CHANNEL(Y, AS73211_SCAN_INDEX_Y, AS73211_OUT_MRES2),
+ AS73211_COLOR_CHANNEL(Z, AS73211_SCAN_INDEX_Z, AS73211_OUT_MRES3),
+ IIO_CHAN_SOFT_TIMESTAMP(AS73211_SCAN_INDEX_TS),
+};
+
+static unsigned int as73211_integration_time_1024cyc(struct as73211_data *data)
+{
+ /*
+ * Return integration time in units of 1024 clock cycles. Integration time
+ * in CREG1 is in powers of 2 (x 1024 cycles).
+ */
+ return BIT(FIELD_GET(AS73211_CREG1_TIME_MASK, data->creg1));
+}
+
+static unsigned int as73211_integration_time_us(struct as73211_data *data,
+ unsigned int integration_time_1024cyc)
+{
+ /*
+ * f_samp is configured in CREG3 in powers of 2 (x 1.024 MHz)
+ * t_cycl is configured in CREG1 in powers of 2 (x 1024 cycles)
+ * t_int_us = 1 / (f_samp) * t_cycl * US_PER_SEC
+ * = 1 / (2^CREG3_CCLK * 1,024,000) * 2^CREG1_CYCLES * 1,024 * US_PER_SEC
+ * = 2^(-CREG3_CCLK) * 2^CREG1_CYCLES * 1,000
+ * In order to get rid of negative exponents, we extend the "fraction"
+ * by 2^3 (CREG3_CCLK,max = 3)
+ * t_int_us = 2^(3-CREG3_CCLK) * 2^CREG1_CYCLES * 125
+ */
+ return BIT(3 - FIELD_GET(AS73211_CREG3_CCLK_MASK, data->creg3)) *
+ integration_time_1024cyc * 125;
+}
+
+static void as73211_integration_time_calc_avail(struct as73211_data *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(data->int_time_avail) / 2; i++) {
+ unsigned int time_us = as73211_integration_time_us(data, BIT(i));
+
+ data->int_time_avail[i * 2 + 0] = time_us / USEC_PER_SEC;
+ data->int_time_avail[i * 2 + 1] = time_us % USEC_PER_SEC;
+ }
+}
+
+static unsigned int as73211_gain(struct as73211_data *data)
+{
+ /* gain can be calculated from CREG1 as 2^(11 - CREG1_GAIN) */
+ return BIT(AS73211_CREG1_GAIN_1 - FIELD_GET(AS73211_CREG1_GAIN_MASK, data->creg1));
+}
+
+/* must be called with as73211_data::mutex held. */
+static int as73211_req_data(struct as73211_data *data)
+{
+ unsigned int time_us = as73211_integration_time_us(data,
+ as73211_integration_time_1024cyc(data));
+ struct device *dev = &data->client->dev;
+ union i2c_smbus_data smbus_data;
+ u16 osr_status;
+ int ret;
+
+ if (data->client->irq)
+ reinit_completion(&data->completion);
+
+ /*
+ * During measurement, there should be no traffic on the i2c bus as the
+ * electrical noise would disturb the measurement process.
+ */
+ i2c_lock_bus(data->client->adapter, I2C_LOCK_SEGMENT);
+
+ data->osr &= ~AS73211_OSR_DOS_MASK;
+ data->osr |= AS73211_OSR_DOS_MEASURE | AS73211_OSR_SS;
+
+ smbus_data.byte = data->osr;
+ ret = __i2c_smbus_xfer(data->client->adapter, data->client->addr,
+ data->client->flags, I2C_SMBUS_WRITE,
+ AS73211_REG_OSR, I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0) {
+ i2c_unlock_bus(data->client->adapter, I2C_LOCK_SEGMENT);
+ return ret;
+ }
+
+ /*
+ * Reset AS73211_OSR_SS (is self clearing) in order to avoid unintentional
+ * triggering of further measurements later.
+ */
+ data->osr &= ~AS73211_OSR_SS;
+
+ /*
+ * Add 33% extra margin for the timeout. fclk,min = fclk,typ - 27%.
+ */
+ time_us += time_us / 3;
+ if (data->client->irq) {
+ ret = wait_for_completion_timeout(&data->completion, usecs_to_jiffies(time_us));
+ if (!ret) {
+ dev_err(dev, "timeout waiting for READY IRQ\n");
+ i2c_unlock_bus(data->client->adapter, I2C_LOCK_SEGMENT);
+ return -ETIMEDOUT;
+ }
+ } else {
+ /* Wait integration time */
+ usleep_range(time_us, 2 * time_us);
+ }
+
+ i2c_unlock_bus(data->client->adapter, I2C_LOCK_SEGMENT);
+
+ ret = i2c_smbus_read_word_data(data->client, AS73211_OUT_OSR_STATUS);
+ if (ret < 0)
+ return ret;
+
+ osr_status = ret;
+ if (osr_status != (AS73211_OSR_DOS_MEASURE | AS73211_OSR_STATUS_NDATA)) {
+ if (osr_status & AS73211_OSR_SS) {
+ dev_err(dev, "%s() Measurement has not stopped\n", __func__);
+ return -ETIME;
+ }
+ if (osr_status & AS73211_OSR_STATUS_NOTREADY) {
+ dev_err(dev, "%s() Data is not ready\n", __func__);
+ return -ENODATA;
+ }
+ if (!(osr_status & AS73211_OSR_STATUS_NDATA)) {
+ dev_err(dev, "%s() No new data available\n", __func__);
+ return -ENODATA;
+ }
+ if (osr_status & AS73211_OSR_STATUS_LDATA) {
+ dev_err(dev, "%s() Result buffer overrun\n", __func__);
+ return -ENOBUFS;
+ }
+ if (osr_status & AS73211_OSR_STATUS_ADCOF) {
+ dev_err(dev, "%s() ADC overflow\n", __func__);
+ return -EOVERFLOW;
+ }
+ if (osr_status & AS73211_OSR_STATUS_MRESOF) {
+ dev_err(dev, "%s() Measurement result overflow\n", __func__);
+ return -EOVERFLOW;
+ }
+ if (osr_status & AS73211_OSR_STATUS_OUTCONVOF) {
+ dev_err(dev, "%s() Timer overflow\n", __func__);
+ return -EOVERFLOW;
+ }
+ dev_err(dev, "%s() Unexpected status value\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int as73211_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct as73211_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ int ret;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = as73211_req_data(data);
+ if (ret < 0) {
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ }
+
+ ret = i2c_smbus_read_word_data(data->client, chan->address);
+ iio_device_release_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ *val = AS73211_OFFSET_TEMP_INT;
+ *val2 = AS73211_OFFSET_TEMP_MICRO;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = AS73211_SCALE_TEMP_INT;
+ *val2 = AS73211_SCALE_TEMP_MICRO;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ case IIO_INTENSITY: {
+ unsigned int scale;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ scale = AS73211_SCALE_X;
+ break;
+ case IIO_MOD_Y:
+ scale = AS73211_SCALE_Y;
+ break;
+ case IIO_MOD_Z:
+ scale = AS73211_SCALE_Z;
+ break;
+ default:
+ return -EINVAL;
+ }
+ scale /= as73211_gain(data);
+ scale /= as73211_integration_time_1024cyc(data);
+ *val = scale;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }}
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ /* f_samp is configured in CREG3 in powers of 2 (x 1.024 MHz) */
+ *val = BIT(FIELD_GET(AS73211_CREG3_CCLK_MASK, data->creg3)) *
+ AS73211_SAMPLE_FREQ_BASE;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ *val = as73211_gain(data);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_INT_TIME: {
+ unsigned int time_us;
+
+ mutex_lock(&data->mutex);
+ time_us = as73211_integration_time_us(data, as73211_integration_time_1024cyc(data));
+ mutex_unlock(&data->mutex);
+ *val = time_us / USEC_PER_SEC;
+ *val2 = time_us % USEC_PER_SEC;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ default:
+ return -EINVAL;
+ }}
+}
+
+static int as73211_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length, long mask)
+{
+ struct as73211_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *length = ARRAY_SIZE(as73211_samp_freq_avail);
+ *vals = as73211_samp_freq_avail;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ *length = ARRAY_SIZE(as73211_hardwaregain_avail);
+ *vals = as73211_hardwaregain_avail;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(data->int_time_avail);
+ *vals = data->int_time_avail;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int _as73211_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan __always_unused,
+ int val, int val2, long mask)
+{
+ struct as73211_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ int reg_bits, freq_kHz = val / HZ_PER_KHZ; /* 1024, 2048, ... */
+
+ /* val must be 1024 * 2^x */
+ if (val < 0 || (freq_kHz * HZ_PER_KHZ) != val ||
+ !is_power_of_2(freq_kHz) || val2)
+ return -EINVAL;
+
+ /* f_samp is configured in CREG3 in powers of 2 (x 1.024 MHz (=2^10)) */
+ reg_bits = ilog2(freq_kHz) - 10;
+ if (!FIELD_FIT(AS73211_CREG3_CCLK_MASK, reg_bits))
+ return -EINVAL;
+
+ data->creg3 &= ~AS73211_CREG3_CCLK_MASK;
+ data->creg3 |= FIELD_PREP(AS73211_CREG3_CCLK_MASK, reg_bits);
+ as73211_integration_time_calc_avail(data);
+
+ ret = i2c_smbus_write_byte_data(data->client, AS73211_REG_CREG3, data->creg3);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+ }
+ case IIO_CHAN_INFO_HARDWAREGAIN: {
+ unsigned int reg_bits;
+
+ if (val < 0 || !is_power_of_2(val) || val2)
+ return -EINVAL;
+
+ /* gain can be calculated from CREG1 as 2^(11 - CREG1_GAIN) */
+ reg_bits = AS73211_CREG1_GAIN_1 - ilog2(val);
+ if (!FIELD_FIT(AS73211_CREG1_GAIN_MASK, reg_bits))
+ return -EINVAL;
+
+ data->creg1 &= ~AS73211_CREG1_GAIN_MASK;
+ data->creg1 |= FIELD_PREP(AS73211_CREG1_GAIN_MASK, reg_bits);
+
+ ret = i2c_smbus_write_byte_data(data->client, AS73211_REG_CREG1, data->creg1);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+ }
+ case IIO_CHAN_INFO_INT_TIME: {
+ int val_us = val * USEC_PER_SEC + val2;
+ int time_ms;
+ int reg_bits;
+
+ /* f_samp is configured in CREG3 in powers of 2 (x 1.024 MHz) */
+ int f_samp_1_024mhz = BIT(FIELD_GET(AS73211_CREG3_CCLK_MASK, data->creg3));
+
+ /*
+ * time_ms = time_us * US_PER_MS * f_samp_1_024mhz / MHZ_PER_HZ
+ * = time_us * f_samp_1_024mhz / 1000
+ */
+ time_ms = (val_us * f_samp_1_024mhz) / 1000; /* 1 ms, 2 ms, ... (power of two) */
+ if (time_ms < 0 || !is_power_of_2(time_ms) || time_ms > AS73211_SAMPLE_TIME_MAX_MS)
+ return -EINVAL;
+
+ reg_bits = ilog2(time_ms);
+ if (!FIELD_FIT(AS73211_CREG1_TIME_MASK, reg_bits))
+ return -EINVAL; /* not possible due to previous tests */
+
+ data->creg1 &= ~AS73211_CREG1_TIME_MASK;
+ data->creg1 |= FIELD_PREP(AS73211_CREG1_TIME_MASK, reg_bits);
+
+ ret = i2c_smbus_write_byte_data(data->client, AS73211_REG_CREG1, data->creg1);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+ default:
+ return -EINVAL;
+ }}
+}
+
+static int as73211_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct as73211_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret < 0)
+ goto error_unlock;
+
+ /* Need to switch to config mode ... */
+ if ((data->osr & AS73211_OSR_DOS_MASK) != AS73211_OSR_DOS_CONFIG) {
+ data->osr &= ~AS73211_OSR_DOS_MASK;
+ data->osr |= AS73211_OSR_DOS_CONFIG;
+
+ ret = i2c_smbus_write_byte_data(data->client, AS73211_REG_OSR, data->osr);
+ if (ret < 0)
+ goto error_release;
+ }
+
+ ret = _as73211_write_raw(indio_dev, chan, val, val2, mask);
+
+error_release:
+ iio_device_release_direct_mode(indio_dev);
+error_unlock:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static irqreturn_t as73211_ready_handler(int irq __always_unused, void *priv)
+{
+ struct as73211_data *data = iio_priv(priv);
+
+ complete(&data->completion);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct as73211_data *data = iio_priv(indio_dev);
+ struct {
+ __le16 chan[4];
+ s64 ts __aligned(8);
+ } scan;
+ int data_result, ret;
+
+ mutex_lock(&data->mutex);
+
+ data_result = as73211_req_data(data);
+ if (data_result < 0 && data_result != -EOVERFLOW)
+ goto done; /* don't push any data for errors other than EOVERFLOW */
+
+ if (*indio_dev->active_scan_mask == AS73211_SCAN_MASK_ALL) {
+ /* Optimization for reading all (color + temperature) channels */
+ u8 addr = as73211_channels[0].address;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = data->client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .addr = data->client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(scan.chan),
+ .buf = (u8 *)&scan.chan,
+ },
+ };
+
+ ret = i2c_transfer(data->client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ goto done;
+ } else {
+ /* Optimization for reading only color channels */
+
+ /* AS73211 starts reading at address 2 */
+ ret = i2c_master_recv(data->client,
+ (char *)&scan.chan[1], 3 * sizeof(scan.chan[1]));
+ if (ret < 0)
+ goto done;
+ }
+
+ if (data_result) {
+ /*
+ * Saturate all channels (in case of overflows). Temperature channel
+ * is not affected by overflows.
+ */
+ scan.chan[1] = cpu_to_le16(U16_MAX);
+ scan.chan[2] = cpu_to_le16(U16_MAX);
+ scan.chan[3] = cpu_to_le16(U16_MAX);
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev));
+
+done:
+ mutex_unlock(&data->mutex);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info as73211_info = {
+ .read_raw = as73211_read_raw,
+ .read_avail = as73211_read_avail,
+ .write_raw = as73211_write_raw,
+};
+
+static int as73211_power(struct iio_dev *indio_dev, bool state)
+{
+ struct as73211_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ if (state)
+ data->osr &= ~AS73211_OSR_PD;
+ else
+ data->osr |= AS73211_OSR_PD;
+
+ ret = i2c_smbus_write_byte_data(data->client, AS73211_REG_OSR, data->osr);
+
+ mutex_unlock(&data->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void as73211_power_disable(void *data)
+{
+ struct iio_dev *indio_dev = data;
+
+ as73211_power(indio_dev, false);
+}
+
+static int as73211_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct as73211_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ mutex_init(&data->mutex);
+ init_completion(&data->completion);
+
+ indio_dev->info = &as73211_info;
+ indio_dev->name = AS73211_DRV_NAME;
+ indio_dev->channels = as73211_channels;
+ indio_dev->num_channels = ARRAY_SIZE(as73211_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_OSR);
+ if (ret < 0)
+ return ret;
+ data->osr = ret;
+
+ /* reset device */
+ data->osr |= AS73211_OSR_SW_RES;
+ ret = i2c_smbus_write_byte_data(data->client, AS73211_REG_OSR, data->osr);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_OSR);
+ if (ret < 0)
+ return ret;
+ data->osr = ret;
+
+ /*
+ * Reading AGEN is only possible after reset (AGEN is not available if
+ * device is in measurement mode).
+ */
+ ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_AGEN);
+ if (ret < 0)
+ return ret;
+
+ /* At the time of writing this driver, only DEVID 2 and MUT 1 are known. */
+ if ((ret & AS73211_AGEN_DEVID_MASK) != AS73211_AGEN_DEVID(2) ||
+ (ret & AS73211_AGEN_MUT_MASK) != AS73211_AGEN_MUT(1))
+ return -ENODEV;
+
+ ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_CREG1);
+ if (ret < 0)
+ return ret;
+ data->creg1 = ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_CREG2);
+ if (ret < 0)
+ return ret;
+ data->creg2 = ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_CREG3);
+ if (ret < 0)
+ return ret;
+ data->creg3 = ret;
+ as73211_integration_time_calc_avail(data);
+
+ ret = as73211_power(indio_dev, true);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, as73211_power_disable, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, as73211_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL,
+ as73211_ready_handler,
+ IRQF_ONESHOT,
+ client->name, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static int __maybe_unused as73211_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+
+ return as73211_power(indio_dev, false);
+}
+
+static int __maybe_unused as73211_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+
+ return as73211_power(indio_dev, true);
+}
+
+static SIMPLE_DEV_PM_OPS(as73211_pm_ops, as73211_suspend, as73211_resume);
+
+static const struct of_device_id as73211_of_match[] = {
+ { .compatible = "ams,as73211" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, as73211_of_match);
+
+static const struct i2c_device_id as73211_id[] = {
+ { "as73211", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, as73211_id);
+
+static struct i2c_driver as73211_driver = {
+ .driver = {
+ .name = AS73211_DRV_NAME,
+ .of_match_table = as73211_of_match,
+ .pm = &as73211_pm_ops,
+ },
+ .probe_new = as73211_probe,
+ .id_table = as73211_id,
+};
+module_i2c_driver(as73211_driver);
+
+MODULE_AUTHOR("Christian Eggers <ceggers@arri.de>");
+MODULE_DESCRIPTION("AS73211 XYZ True Color Sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index fed79ba27fda..75d6b5fcf2cc 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -182,12 +182,11 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
cros_ec_sensors_capture,
- cros_ec_sensors_push_data);
+ cros_ec_sensors_push_data,
+ true);
if (ret)
return ret;
- iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes);
-
indio_dev->info = &cros_ec_light_prox_info;
state = iio_priv(indio_dev);
state->core.type = state->core.resp->info.type;
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index d5e1cd27eb46..7ba7aa59437c 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -566,7 +566,7 @@ static int gp2ap002_probe(struct i2c_client *client,
/*
* Initialize the device and signal to runtime PM that now we are
- * definately up and using power.
+ * definitely up and using power.
*/
ret = gp2ap002_init(gp2ap002);
if (ret) {
diff --git a/drivers/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index ac8ad0f32689..2689867467a8 100644
--- a/drivers/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -746,12 +746,9 @@ static int isl29018_probe(struct i2c_client *client,
chip->suspended = false;
chip->vcc_reg = devm_regulator_get(&client->dev, "vcc");
- if (IS_ERR(chip->vcc_reg)) {
- err = PTR_ERR(chip->vcc_reg);
- if (err != -EPROBE_DEFER)
- dev_err(&client->dev, "failed to get VCC regulator!\n");
- return err;
- }
+ if (IS_ERR(chip->vcc_reg))
+ return dev_err_probe(&client->dev, PTR_ERR(chip->vcc_reg),
+ "failed to get VCC regulator!\n");
err = regulator_enable(chip->vcc_reg);
if (err) {
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 8f5f857c2e7d..b304801c7916 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -168,6 +168,7 @@ struct si1145_part_info {
* @part_info: Part information
* @trig: Pointer to iio trigger
* @meas_rate: Value of MEAS_RATE register. Only set in HW in auto mode
+ * @buffer: Used to pack data read from sensor.
*/
struct si1145_data {
struct i2c_client *client;
@@ -179,6 +180,14 @@ struct si1145_data {
bool autonomous;
struct iio_trigger *trig;
int meas_rate;
+ /*
+ * Ensure timestamp will be naturally aligned if present.
+ * Maximum buffer size (may be only partly used if not all
+ * channels are enabled):
+ * 6*2 bytes channels data + 4 bytes alignment +
+ * 8 bytes timestamp
+ */
+ u8 buffer[24] __aligned(8);
};
/*
@@ -440,12 +449,6 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
struct iio_poll_func *pf = private;
struct iio_dev *indio_dev = pf->indio_dev;
struct si1145_data *data = iio_priv(indio_dev);
- /*
- * Maximum buffer size:
- * 6*2 bytes channels data + 4 bytes alignment +
- * 8 bytes timestamp
- */
- u8 buffer[24];
int i, j = 0;
int ret;
u8 irq_status = 0;
@@ -478,7 +481,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
ret = i2c_smbus_read_i2c_block_data_or_emulated(
data->client, indio_dev->channels[i].address,
- sizeof(u16) * run, &buffer[j]);
+ sizeof(u16) * run, &data->buffer[j]);
if (ret < 0)
goto done;
j += run * sizeof(u16);
@@ -493,7 +496,7 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
goto done;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index 735399405417..d79205361dfa 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -1776,14 +1776,8 @@ static int tsl2772_probe(struct i2c_client *clientp,
ret = devm_regulator_bulk_get(&clientp->dev,
ARRAY_SIZE(chip->supplies),
chip->supplies);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&clientp->dev,
- "Failed to get regulators: %d\n",
- ret);
-
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&clientp->dev, ret, "Failed to get regulators\n");
ret = regulator_bulk_enable(ARRAY_SIZE(chip->supplies), chip->supplies);
if (ret < 0) {
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index cbb44e401c0a..24b2f7b1fe44 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -12,6 +12,7 @@
* Author: Linus Walleij <linus.walleij@linaro.org>
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -843,15 +844,8 @@ static int ak8974_probe(struct i2c_client *i2c,
ret = devm_regulator_bulk_get(&i2c->dev,
ARRAY_SIZE(ak8974->regs),
ak8974->regs);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&i2c->dev, "cannot get regulators: %d\n", ret);
- else
- dev_dbg(&i2c->dev,
- "regulators unavailable, deferring probe\n");
-
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&i2c->dev, ret, "cannot get regulators\n");
ret = regulator_bulk_enable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
if (ret < 0) {
@@ -1058,7 +1052,7 @@ static struct i2c_driver ak8974_driver = {
.driver = {
.name = "ak8974",
.pm = &ak8974_dev_pm_ops,
- .of_match_table = of_match_ptr(ak8974_of_match),
+ .of_match_table = ak8974_of_match,
},
.probe = ak8974_probe,
.remove = ak8974_remove,
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 623766ff800b..d988b6ac3659 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -17,7 +18,6 @@
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/gpio/consumer.h>
-#include <linux/acpi.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
@@ -779,7 +779,6 @@ static const struct iio_info ak8975_info = {
.read_raw = &ak8975_read_raw,
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id ak_acpi_match[] = {
{"AK8975", AK8975},
{"AK8963", AK8963},
@@ -791,7 +790,6 @@ static const struct acpi_device_id ak_acpi_match[] = {
{ }
};
MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
-#endif
static void ak8975_fill_buffer(struct iio_dev *indio_dev)
{
@@ -1081,8 +1079,8 @@ static struct i2c_driver ak8975_driver = {
.driver = {
.name = "ak8975",
.pm = &ak8975_dev_pm_ops,
- .of_match_table = of_match_ptr(ak8975_of_match),
- .acpi_match_table = ACPI_PTR(ak_acpi_match),
+ .of_match_table = ak8975_of_match,
+ .acpi_match_table = ak_acpi_match,
},
.probe = ak8975_probe,
.remove = ak8975_remove,
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index 1474ba63babe..780faea61d82 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -245,7 +245,7 @@ static const struct iio_enum hmc5843_meas_conf_enum = {
};
static const struct iio_chan_spec_ext_info hmc5843_ext_info[] = {
- IIO_ENUM("meas_conf", true, &hmc5843_meas_conf_enum),
+ IIO_ENUM("meas_conf", IIO_SHARED_BY_TYPE, &hmc5843_meas_conf_enum),
IIO_ENUM_AVAILABLE("meas_conf", &hmc5843_meas_conf_enum),
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, hmc5843_get_mount_matrix),
{ }
@@ -259,7 +259,7 @@ static const struct iio_enum hmc5983_meas_conf_enum = {
};
static const struct iio_chan_spec_ext_info hmc5983_ext_info[] = {
- IIO_ENUM("meas_conf", true, &hmc5983_meas_conf_enum),
+ IIO_ENUM("meas_conf", IIO_SHARED_BY_TYPE, &hmc5983_meas_conf_enum),
IIO_ENUM_AVAILABLE("meas_conf", &hmc5983_meas_conf_enum),
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, hmc5843_get_mount_matrix),
{ }
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 4d305a21c379..838b13c8bb3d 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -476,22 +476,14 @@ static int mag3110_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
- if (IS_ERR(data->vdd_reg)) {
- if (PTR_ERR(data->vdd_reg) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- dev_err(&client->dev, "failed to get VDD regulator!\n");
- return PTR_ERR(data->vdd_reg);
- }
+ if (IS_ERR(data->vdd_reg))
+ return dev_err_probe(&client->dev, PTR_ERR(data->vdd_reg),
+ "failed to get VDD regulator!\n");
data->vddio_reg = devm_regulator_get(&client->dev, "vddio");
- if (IS_ERR(data->vddio_reg)) {
- if (PTR_ERR(data->vddio_reg) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- dev_err(&client->dev, "failed to get VDDIO regulator!\n");
- return PTR_ERR(data->vddio_reg);
- }
+ if (IS_ERR(data->vddio_reg))
+ return dev_err_probe(&client->dev, PTR_ERR(data->vddio_reg),
+ "failed to get VDDIO regulator!\n");
ret = regulator_enable(data->vdd_reg);
if (ret) {
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
index 6910218fdb00..d54ae5cbe51b 100644
--- a/drivers/iio/multiplexer/iio-mux.c
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -354,11 +354,9 @@ static int mux_probe(struct platform_device *pdev)
return -ENODEV;
parent = devm_iio_channel_get(dev, "parent");
- if (IS_ERR(parent)) {
- if (PTR_ERR(parent) != -EPROBE_DEFER)
- dev_err(dev, "failed to get parent channel\n");
- return PTR_ERR(parent);
- }
+ if (IS_ERR(parent))
+ return dev_err_probe(dev, PTR_ERR(parent),
+ "failed to get parent channel\n");
sizeof_ext_info = iio_get_channel_ext_info_count(parent);
if (sizeof_ext_info) {
diff --git a/drivers/iio/potentiometer/ad5272.c b/drivers/iio/potentiometer/ad5272.c
index 933afcf7e925..70c45d346df0 100644
--- a/drivers/iio/potentiometer/ad5272.c
+++ b/drivers/iio/potentiometer/ad5272.c
@@ -15,6 +15,7 @@
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#define AD5272_RDAC_WR 1
#define AD5272_RDAC_RD 2
@@ -192,7 +193,6 @@ static int ad5272_probe(struct i2c_client *client,
return devm_iio_device_register(dev, indio_dev);
}
-#if defined(CONFIG_OF)
static const struct of_device_id ad5272_dt_ids[] = {
{ .compatible = "adi,ad5272-020", .data = (void *)AD5272_020 },
{ .compatible = "adi,ad5272-050", .data = (void *)AD5272_050 },
@@ -202,7 +202,6 @@ static const struct of_device_id ad5272_dt_ids[] = {
{}
};
MODULE_DEVICE_TABLE(of, ad5272_dt_ids);
-#endif /* CONFIG_OF */
static const struct i2c_device_id ad5272_id[] = {
{ "ad5272-020", AD5272_020 },
@@ -217,7 +216,7 @@ MODULE_DEVICE_TABLE(i2c, ad5272_id);
static struct i2c_driver ad5272_driver = {
.driver = {
.name = "ad5272",
- .of_match_table = of_match_ptr(ad5272_dt_ids),
+ .of_match_table = ad5272_dt_ids,
},
.probe = ad5272_probe,
.id_table = ad5272_id,
diff --git a/drivers/iio/potentiometer/ds1803.c b/drivers/iio/potentiometer/ds1803.c
index 5c061ab8f46c..20b45407eaac 100644
--- a/drivers/iio/potentiometer/ds1803.c
+++ b/drivers/iio/potentiometer/ds1803.c
@@ -14,7 +14,7 @@
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#define DS1803_MAX_POS 255
#define DS1803_WRITE(chan) (0xa8 | ((chan) + 1))
@@ -134,7 +134,6 @@ static int ds1803_probe(struct i2c_client *client,
return devm_iio_device_register(dev, indio_dev);
}
-#if defined(CONFIG_OF)
static const struct of_device_id ds1803_dt_ids[] = {
{ .compatible = "maxim,ds1803-010", .data = &ds1803_cfg[DS1803_010] },
{ .compatible = "maxim,ds1803-050", .data = &ds1803_cfg[DS1803_050] },
@@ -142,7 +141,6 @@ static const struct of_device_id ds1803_dt_ids[] = {
{}
};
MODULE_DEVICE_TABLE(of, ds1803_dt_ids);
-#endif /* CONFIG_OF */
static const struct i2c_device_id ds1803_id[] = {
{ "ds1803-010", DS1803_010 },
@@ -155,7 +153,7 @@ MODULE_DEVICE_TABLE(i2c, ds1803_id);
static struct i2c_driver ds1803_driver = {
.driver = {
.name = "ds1803",
- .of_match_table = of_match_ptr(ds1803_dt_ids),
+ .of_match_table = ds1803_dt_ids,
},
.probe = ds1803_probe,
.id_table = ds1803_id,
diff --git a/drivers/iio/potentiometer/max5432.c b/drivers/iio/potentiometer/max5432.c
index 280de9c54471..aed3b6ab82a2 100644
--- a/drivers/iio/potentiometer/max5432.c
+++ b/drivers/iio/potentiometer/max5432.c
@@ -11,8 +11,8 @@
#include <linux/iio/iio.h>
#include <linux/limits.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
/* All chip variants have 32 wiper positions. */
#define MAX5432_MAX_POS 31
@@ -100,7 +100,7 @@ static int max5432_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
data->client = client;
- data->ohm = (unsigned long)of_device_get_match_data(dev);
+ data->ohm = (unsigned long)device_get_match_data(dev);
indio_dev->info = &max5432_info;
indio_dev->channels = max5432_channels;
@@ -122,7 +122,7 @@ MODULE_DEVICE_TABLE(of, max5432_dt_ids);
static struct i2c_driver max5432_driver = {
.driver = {
.name = "max5432",
- .of_match_table = of_match_ptr(max5432_dt_ids),
+ .of_match_table = max5432_dt_ids,
},
.probe = max5432_probe,
};
diff --git a/drivers/iio/potentiometer/max5481.c b/drivers/iio/potentiometer/max5481.c
index 5f5988189796..a88ed0eb3adc 100644
--- a/drivers/iio/potentiometer/max5481.c
+++ b/drivers/iio/potentiometer/max5481.c
@@ -7,12 +7,11 @@
* https://datasheets.maximintegrated.com/en/ds/MAX5481-MAX5484.pdf
*/
-#include <linux/acpi.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
/* write wiper reg */
@@ -117,7 +116,6 @@ static const struct iio_info max5481_info = {
.write_raw = max5481_write_raw,
};
-#if defined(CONFIG_OF)
static const struct of_device_id max5481_match[] = {
{ .compatible = "maxim,max5481", .data = &max5481_cfg[max5481] },
{ .compatible = "maxim,max5482", .data = &max5481_cfg[max5482] },
@@ -126,7 +124,6 @@ static const struct of_device_id max5481_match[] = {
{ }
};
MODULE_DEVICE_TABLE(of, max5481_match);
-#endif
static int max5481_probe(struct spi_device *spi)
{
@@ -144,7 +141,7 @@ static int max5481_probe(struct spi_device *spi)
data->spi = spi;
- data->cfg = of_device_get_match_data(&spi->dev);
+ data->cfg = device_get_match_data(&spi->dev);
if (!data->cfg)
data->cfg = &max5481_cfg[id->driver_data];
@@ -184,22 +181,10 @@ static const struct spi_device_id max5481_id_table[] = {
};
MODULE_DEVICE_TABLE(spi, max5481_id_table);
-#if defined(CONFIG_ACPI)
-static const struct acpi_device_id max5481_acpi_match[] = {
- { "max5481", max5481 },
- { "max5482", max5482 },
- { "max5483", max5483 },
- { "max5484", max5484 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, max5481_acpi_match);
-#endif
-
static struct spi_driver max5481_driver = {
.driver = {
.name = "max5481",
- .of_match_table = of_match_ptr(max5481_match),
- .acpi_match_table = ACPI_PTR(max5481_acpi_match),
+ .of_match_table = max5481_match,
},
.probe = max5481_probe,
.remove = max5481_remove,
diff --git a/drivers/iio/potentiometer/mcp4018.c b/drivers/iio/potentiometer/mcp4018.c
index fd0579ad3c83..c0e171fec062 100644
--- a/drivers/iio/potentiometer/mcp4018.c
+++ b/drivers/iio/potentiometer/mcp4018.c
@@ -16,8 +16,8 @@
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#define MCP4018_WIPER_MAX 127
@@ -116,8 +116,6 @@ static const struct i2c_device_id mcp4018_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mcp4018_id);
-#ifdef CONFIG_OF
-
#define MCP4018_COMPATIBLE(of_compatible, cfg) { \
.compatible = of_compatible, \
.data = &mcp4018_cfg[cfg], \
@@ -140,8 +138,6 @@ static const struct of_device_id mcp4018_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mcp4018_of_match);
-#endif
-
static int mcp4018_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -161,7 +157,7 @@ static int mcp4018_probe(struct i2c_client *client)
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->cfg = of_device_get_match_data(dev);
+ data->cfg = device_get_match_data(dev);
if (!data->cfg)
data->cfg = &mcp4018_cfg[i2c_match_id(mcp4018_id, client)->driver_data];
@@ -176,7 +172,7 @@ static int mcp4018_probe(struct i2c_client *client)
static struct i2c_driver mcp4018_driver = {
.driver = {
.name = "mcp4018",
- .of_match_table = of_match_ptr(mcp4018_of_match),
+ .of_match_table = mcp4018_of_match,
},
.probe_new = mcp4018_probe,
.id_table = mcp4018_id,
diff --git a/drivers/iio/potentiometer/mcp4131.c b/drivers/iio/potentiometer/mcp4131.c
index 2923ce250fc3..7c8c18ab8764 100644
--- a/drivers/iio/potentiometer/mcp4131.c
+++ b/drivers/iio/potentiometer/mcp4131.c
@@ -37,9 +37,9 @@
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#define MCP4131_WRITE (0x00 << 2)
@@ -252,7 +252,7 @@ static int mcp4131_probe(struct spi_device *spi)
data = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
data->spi = spi;
- data->cfg = of_device_get_match_data(&spi->dev);
+ data->cfg = device_get_match_data(&spi->dev);
if (!data->cfg) {
devid = spi_get_device_id(spi)->driver_data;
data->cfg = &mcp4131_cfg[devid];
@@ -479,7 +479,7 @@ MODULE_DEVICE_TABLE(spi, mcp4131_id);
static struct spi_driver mcp4131_driver = {
.driver = {
.name = "mcp4131",
- .of_match_table = of_match_ptr(mcp4131_dt_ids),
+ .of_match_table = mcp4131_dt_ids,
},
.probe = mcp4131_probe,
.id_table = mcp4131_id,
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 95efc4b40514..c25f84b4a270 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -28,8 +28,8 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/iio/iio.h>
@@ -275,8 +275,6 @@ static const struct i2c_device_id mcp4531_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mcp4531_id);
-#ifdef CONFIG_OF
-
#define MCP4531_COMPATIBLE(of_compatible, cfg) { \
.compatible = of_compatible, \
.data = &mcp4531_cfg[cfg], \
@@ -350,7 +348,6 @@ static const struct of_device_id mcp4531_of_match[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mcp4531_of_match);
-#endif
static int mcp4531_probe(struct i2c_client *client)
{
@@ -371,7 +368,7 @@ static int mcp4531_probe(struct i2c_client *client)
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->cfg = of_device_get_match_data(dev);
+ data->cfg = device_get_match_data(dev);
if (!data->cfg)
data->cfg = &mcp4531_cfg[i2c_match_id(mcp4531_id, client)->driver_data];
@@ -386,7 +383,7 @@ static int mcp4531_probe(struct i2c_client *client)
static struct i2c_driver mcp4531_driver = {
.driver = {
.name = "mcp4531",
- .of_match_table = of_match_ptr(mcp4531_of_match),
+ .of_match_table = mcp4531_of_match,
},
.probe_new = mcp4531_probe,
.id_table = mcp4531_id,
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index 67ae635a05f3..f34ca769dc20 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/delay.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -205,13 +205,12 @@ static const struct iio_info lmp91000_info = {
static int lmp91000_read_config(struct lmp91000_data *data)
{
struct device *dev = data->dev;
- struct device_node *np = dev->of_node;
unsigned int reg, val;
int i, ret;
- ret = of_property_read_u32(np, "ti,tia-gain-ohm", &val);
+ ret = device_property_read_u32(dev, "ti,tia-gain-ohm", &val);
if (ret) {
- if (!of_property_read_bool(np, "ti,external-tia-resistor")) {
+ if (!device_property_read_bool(dev, "ti,external-tia-resistor")) {
dev_err(dev, "no ti,tia-gain-ohm defined and external resistor not specified\n");
return ret;
}
@@ -232,7 +231,7 @@ static int lmp91000_read_config(struct lmp91000_data *data)
return ret;
}
- ret = of_property_read_u32(np, "ti,rload-ohm", &val);
+ ret = device_property_read_u32(dev, "ti,rload-ohm", &val);
if (ret) {
val = 100;
dev_info(dev, "no ti,rload-ohm defined, default to %d\n", val);
@@ -422,7 +421,7 @@ MODULE_DEVICE_TABLE(i2c, lmp91000_id);
static struct i2c_driver lmp91000_driver = {
.driver = {
.name = LMP91000_DRV_NAME,
- .of_match_table = of_match_ptr(lmp91000_of_match),
+ .of_match_table = lmp91000_of_match,
},
.probe = lmp91000_probe,
.remove = lmp91000_remove,
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index f0938b6fbba0..aa043cb9ac42 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -139,12 +139,11 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
cros_ec_sensors_capture,
- cros_ec_sensors_push_data);
+ cros_ec_sensors_push_data,
+ true);
if (ret)
return ret;
- iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes);
-
indio_dev->info = &cros_ec_baro_info;
state = iio_priv(indio_dev);
state->core.type = state->core.resp->info.type;
diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c
index 90c0df068bbb..48759fc4bf18 100644
--- a/drivers/iio/pressure/icp10100.c
+++ b/drivers/iio/pressure/icp10100.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <linux/crc8.h>
@@ -645,7 +646,7 @@ static struct i2c_driver icp10100_driver = {
.driver = {
.name = "icp10100",
.pm = &icp10100_pm,
- .of_match_table = of_match_ptr(icp10100_of_match),
+ .of_match_table = icp10100_of_match,
},
.probe = icp10100_probe,
.id_table = icp10100_id,
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 072c106dd66d..7c04f730430c 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <asm/unaligned.h>
@@ -113,14 +113,12 @@ static int ms5611_i2c_remove(struct i2c_client *client)
return ms5611_remove(i2c_get_clientdata(client));
}
-#if defined(CONFIG_OF)
static const struct of_device_id ms5611_i2c_matches[] = {
{ .compatible = "meas,ms5611" },
{ .compatible = "meas,ms5607" },
{ }
};
MODULE_DEVICE_TABLE(of, ms5611_i2c_matches);
-#endif
static const struct i2c_device_id ms5611_id[] = {
{ "ms5611", MS5611 },
@@ -132,7 +130,7 @@ MODULE_DEVICE_TABLE(i2c, ms5611_id);
static struct i2c_driver ms5611_driver = {
.driver = {
.name = "ms5611",
- .of_match_table = of_match_ptr(ms5611_i2c_matches)
+ .of_match_table = ms5611_i2c_matches,
},
.id_table = ms5611_id,
.probe = ms5611_i2c_probe,
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 4799aa57135e..45d3a7d5be8e 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <asm/unaligned.h>
@@ -115,14 +115,12 @@ static int ms5611_spi_remove(struct spi_device *spi)
return ms5611_remove(spi_get_drvdata(spi));
}
-#if defined(CONFIG_OF)
static const struct of_device_id ms5611_spi_matches[] = {
{ .compatible = "meas,ms5611" },
{ .compatible = "meas,ms5607" },
{ }
};
MODULE_DEVICE_TABLE(of, ms5611_spi_matches);
-#endif
static const struct spi_device_id ms5611_id[] = {
{ "ms5611", MS5611 },
@@ -134,7 +132,7 @@ MODULE_DEVICE_TABLE(spi, ms5611_id);
static struct spi_driver ms5611_driver = {
.driver = {
.name = "ms5611",
- .of_match_table = of_match_ptr(ms5611_spi_matches)
+ .of_match_table = ms5611_spi_matches
},
.id_table = ms5611_id,
.probe = ms5611_spi_probe,
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index 05e0ef7260d5..5b59a4137d32 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/stat.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -192,7 +193,7 @@ static struct i2c_driver ms5637_driver = {
.id_table = ms5637_id,
.driver = {
.name = "ms5637",
- .of_match_table = of_match_ptr(ms5637_of_match),
+ .of_match_table = ms5637_of_match,
},
};
diff --git a/drivers/iio/pressure/zpa2326_i2c.c b/drivers/iio/pressure/zpa2326_i2c.c
index 1a65791ba279..95d9739444c4 100644
--- a/drivers/iio/pressure/zpa2326_i2c.c
+++ b/drivers/iio/pressure/zpa2326_i2c.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/i2c.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include "zpa2326.h"
/*
@@ -66,18 +66,16 @@ static const struct i2c_device_id zpa2326_i2c_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, zpa2326_i2c_ids);
-#if defined(CONFIG_OF)
static const struct of_device_id zpa2326_i2c_matches[] = {
{ .compatible = "murata,zpa2326" },
{ }
};
MODULE_DEVICE_TABLE(of, zpa2326_i2c_matches);
-#endif
static struct i2c_driver zpa2326_i2c_driver = {
.driver = {
.name = "zpa2326-i2c",
- .of_match_table = of_match_ptr(zpa2326_i2c_matches),
+ .of_match_table = zpa2326_i2c_matches,
.pm = ZPA2326_PM_OPS,
},
.probe = zpa2326_probe_i2c,
diff --git a/drivers/iio/pressure/zpa2326_spi.c b/drivers/iio/pressure/zpa2326_spi.c
index f37a4c738c75..85201a4bae44 100644
--- a/drivers/iio/pressure/zpa2326_spi.c
+++ b/drivers/iio/pressure/zpa2326_spi.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include "zpa2326.h"
/*
@@ -70,18 +70,16 @@ static const struct spi_device_id zpa2326_spi_ids[] = {
};
MODULE_DEVICE_TABLE(spi, zpa2326_spi_ids);
-#if defined(CONFIG_OF)
static const struct of_device_id zpa2326_spi_matches[] = {
{ .compatible = "murata,zpa2326" },
{ }
};
MODULE_DEVICE_TABLE(of, zpa2326_spi_matches);
-#endif
static struct spi_driver zpa2326_spi_driver = {
.driver = {
.name = "zpa2326-spi",
- .of_match_table = of_match_ptr(zpa2326_spi_matches),
+ .of_match_table = zpa2326_spi_matches,
.pm = ZPA2326_PM_OPS,
},
.probe = zpa2326_probe_spi,
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index c339e7339ec8..b79ada839e01 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -352,19 +353,19 @@ static void as3935_stop_work(void *data)
static int as3935_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct iio_dev *indio_dev;
struct iio_trigger *trig;
struct as3935_state *st;
- struct device_node *np = spi->dev.of_node;
int ret;
/* Be sure lightning event interrupt is specified */
if (!spi->irq) {
- dev_err(&spi->dev, "unable to get event interrupt\n");
+ dev_err(dev, "unable to get event interrupt\n");
return -EINVAL;
}
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
@@ -374,27 +375,24 @@ static int as3935_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
mutex_init(&st->lock);
- ret = of_property_read_u32(np,
+ ret = device_property_read_u32(dev,
"ams,tuning-capacitor-pf", &st->tune_cap);
if (ret) {
st->tune_cap = 0;
- dev_warn(&spi->dev,
- "no tuning-capacitor-pf set, defaulting to %d",
+ dev_warn(dev, "no tuning-capacitor-pf set, defaulting to %d",
st->tune_cap);
}
if (st->tune_cap > MAX_PF_CAP) {
- dev_err(&spi->dev,
- "wrong tuning-capacitor-pf setting of %d\n",
+ dev_err(dev, "wrong tuning-capacitor-pf setting of %d\n",
st->tune_cap);
return -EINVAL;
}
- ret = of_property_read_u32(np,
+ ret = device_property_read_u32(dev,
"ams,nflwdth", &st->nflwdth_reg);
if (!ret && st->nflwdth_reg > AS3935_NFLWDTH_MASK) {
- dev_err(&spi->dev,
- "invalid nflwdth setting of %d\n",
+ dev_err(dev, "invalid nflwdth setting of %d\n",
st->nflwdth_reg);
return -EINVAL;
}
@@ -405,7 +403,7 @@ static int as3935_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &as3935_info;
- trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
+ trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
indio_dev->name, indio_dev->id);
if (!trig)
@@ -417,42 +415,42 @@ static int as3935_probe(struct spi_device *spi)
iio_trigger_set_drvdata(trig, indio_dev);
trig->ops = &iio_interrupt_trigger_ops;
- ret = devm_iio_trigger_register(&spi->dev, trig);
+ ret = devm_iio_trigger_register(dev, trig);
if (ret) {
- dev_err(&spi->dev, "failed to register trigger\n");
+ dev_err(dev, "failed to register trigger\n");
return ret;
}
- ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
iio_pollfunc_store_time,
as3935_trigger_handler, NULL);
if (ret) {
- dev_err(&spi->dev, "cannot setup iio trigger\n");
+ dev_err(dev, "cannot setup iio trigger\n");
return ret;
}
calibrate_as3935(st);
INIT_DELAYED_WORK(&st->work, as3935_event_work);
- ret = devm_add_action(&spi->dev, as3935_stop_work, indio_dev);
+ ret = devm_add_action(dev, as3935_stop_work, indio_dev);
if (ret)
return ret;
- ret = devm_request_irq(&spi->dev, spi->irq,
+ ret = devm_request_irq(dev, spi->irq,
&as3935_interrupt_handler,
IRQF_TRIGGER_RISING,
- dev_name(&spi->dev),
+ dev_name(dev),
indio_dev);
if (ret) {
- dev_err(&spi->dev, "unable to request irq\n");
+ dev_err(dev, "unable to request irq\n");
return ret;
}
- ret = devm_iio_device_register(&spi->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret < 0) {
- dev_err(&spi->dev, "unable to register device\n");
+ dev_err(dev, "unable to register device\n");
return ret;
}
return 0;
@@ -473,7 +471,7 @@ MODULE_DEVICE_TABLE(spi, as3935_id);
static struct spi_driver as3935_driver = {
.driver = {
.name = "as3935",
- .of_match_table = of_match_ptr(as3935_of_match),
+ .of_match_table = as3935_of_match,
.pm = AS3935_PM_OPS,
},
.probe = as3935_probe,
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index a8e716dbd24e..c685f10b5ae4 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -13,6 +13,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -360,7 +361,7 @@ static const struct dev_pm_ops lidar_pm_ops = {
static struct i2c_driver lidar_driver = {
.driver = {
.name = LIDAR_DRV_NAME,
- .of_match_table = of_match_ptr(lidar_dt_ids),
+ .of_match_table = lidar_dt_ids,
.pm = &lidar_pm_ops,
},
.probe = lidar_probe,
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index dc2e11b43431..6d3f4ab8c6b2 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -6,19 +6,21 @@
* Based on SX9500 driver and Semtech driver using the input framework
* <https://my.syncplicity.com/share/teouwsim8niiaud/
* linux-driver-SX9310_NoSmartHSensing>.
- * Reworked April 2019 by Evan Green <evgreen@chromium.org>
- * and January 2020 by Daniel Campello <campello@chromium.org>
+ * Reworked in April 2019 by Evan Green <evgreen@chromium.org>
+ * and in January 2020 by Daniel Campello <campello@chromium.org>.
*/
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/pm.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/iio/buffer.h>
@@ -33,45 +35,44 @@
#define SX9310_REG_IRQ_SRC 0x00
#define SX9310_REG_STAT0 0x01
#define SX9310_REG_STAT1 0x02
+#define SX9310_REG_STAT1_COMPSTAT_MASK GENMASK(3, 0)
#define SX9310_REG_IRQ_MSK 0x03
#define SX9310_CONVDONE_IRQ BIT(3)
#define SX9310_FAR_IRQ BIT(5)
#define SX9310_CLOSE_IRQ BIT(6)
-#define SX9310_EVENT_IRQ (SX9310_FAR_IRQ | \
- SX9310_CLOSE_IRQ)
#define SX9310_REG_IRQ_FUNC 0x04
#define SX9310_REG_PROX_CTRL0 0x10
-#define SX9310_REG_PROX_CTRL0_PROXSTAT2 0x10
-#define SX9310_REG_PROX_CTRL0_EN_MASK 0x0F
+#define SX9310_REG_PROX_CTRL0_SENSOREN_MASK GENMASK(3, 0)
+#define SX9310_REG_PROX_CTRL0_SCANPERIOD_MASK GENMASK(7, 4)
+#define SX9310_REG_PROX_CTRL0_SCANPERIOD_15MS 0x01
#define SX9310_REG_PROX_CTRL1 0x11
#define SX9310_REG_PROX_CTRL2 0x12
-#define SX9310_REG_PROX_CTRL2_COMBMODE_ALL 0x80
-#define SX9310_REG_PROX_CTRL2_SHIELDEN_DYNAMIC 0x04
+#define SX9310_REG_PROX_CTRL2_COMBMODE_CS1_CS2 (0x02 << 6)
+#define SX9310_REG_PROX_CTRL2_SHIELDEN_DYNAMIC (0x01 << 2)
#define SX9310_REG_PROX_CTRL3 0x13
-#define SX9310_REG_PROX_CTRL3_GAIN0_X8 0x0c
+#define SX9310_REG_PROX_CTRL3_GAIN0_X8 (0x03 << 2)
#define SX9310_REG_PROX_CTRL3_GAIN12_X4 0x02
#define SX9310_REG_PROX_CTRL4 0x14
#define SX9310_REG_PROX_CTRL4_RESOLUTION_FINEST 0x07
#define SX9310_REG_PROX_CTRL5 0x15
-#define SX9310_REG_PROX_CTRL5_RANGE_SMALL 0xc0
-#define SX9310_REG_PROX_CTRL5_STARTUPSENS_CS1 0x04
+#define SX9310_REG_PROX_CTRL5_RANGE_SMALL (0x03 << 6)
+#define SX9310_REG_PROX_CTRL5_STARTUPSENS_CS1 (0x01 << 2)
#define SX9310_REG_PROX_CTRL5_RAWFILT_1P25 0x02
#define SX9310_REG_PROX_CTRL6 0x16
-#define SX9310_REG_PROX_CTRL6_COMP_COMMON 0x20
+#define SX9310_REG_PROX_CTRL6_AVGTHRESH_DEFAULT 0x20
#define SX9310_REG_PROX_CTRL7 0x17
-#define SX9310_REG_PROX_CTRL7_AVGNEGFILT_2 0x08
+#define SX9310_REG_PROX_CTRL7_AVGNEGFILT_2 (0x01 << 3)
#define SX9310_REG_PROX_CTRL7_AVGPOSFILT_512 0x05
#define SX9310_REG_PROX_CTRL8 0x18
#define SX9310_REG_PROX_CTRL9 0x19
-#define SX9310_REG_PROX_CTRL8_9_PTHRESH12_28 0x40
-#define SX9310_REG_PROX_CTRL8_9_PTHRESH_96 0x88
+#define SX9310_REG_PROX_CTRL8_9_PTHRESH_28 (0x08 << 3)
+#define SX9310_REG_PROX_CTRL8_9_PTHRESH_96 (0x11 << 3)
#define SX9310_REG_PROX_CTRL8_9_BODYTHRESH_900 0x03
#define SX9310_REG_PROX_CTRL8_9_BODYTHRESH_1500 0x05
#define SX9310_REG_PROX_CTRL10 0x1a
-#define SX9310_REG_PROX_CTRL10_HYST_6PCT 0x10
-#define SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_8 0x12
-#define SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_8 0x03
+#define SX9310_REG_PROX_CTRL10_HYST_6PCT (0x01 << 4)
+#define SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_2 0x01
#define SX9310_REG_PROX_CTRL11 0x1b
#define SX9310_REG_PROX_CTRL12 0x1c
#define SX9310_REG_PROX_CTRL13 0x1d
@@ -82,8 +83,8 @@
#define SX9310_REG_PROX_CTRL18 0x22
#define SX9310_REG_PROX_CTRL19 0x23
#define SX9310_REG_SAR_CTRL0 0x2a
-#define SX9310_REG_SAR_CTRL0_SARDEB_4_SAMPLES 0x40
-#define SX9310_REG_SAR_CTRL0_SARHYST_8 0x10
+#define SX9310_REG_SAR_CTRL0_SARDEB_4_SAMPLES (0x02 << 5)
+#define SX9310_REG_SAR_CTRL0_SARHYST_8 (0x02 << 3)
#define SX9310_REG_SAR_CTRL1 0x2b
/* Each increment of the slope register is 0.0078125. */
#define SX9310_REG_SAR_CTRL1_SLOPE(_hnslope) (_hnslope / 78125)
@@ -91,39 +92,28 @@
#define SX9310_REG_SAR_CTRL2_SAROFFSET_DEFAULT 0x3c
#define SX9310_REG_SENSOR_SEL 0x30
-
#define SX9310_REG_USE_MSB 0x31
#define SX9310_REG_USE_LSB 0x32
-
#define SX9310_REG_AVG_MSB 0x33
#define SX9310_REG_AVG_LSB 0x34
-
#define SX9310_REG_DIFF_MSB 0x35
#define SX9310_REG_DIFF_LSB 0x36
-
#define SX9310_REG_OFFSET_MSB 0x37
#define SX9310_REG_OFFSET_LSB 0x38
-
#define SX9310_REG_SAR_MSB 0x39
#define SX9310_REG_SAR_LSB 0x3a
-
-#define SX9310_REG_I2CADDR 0x40
+#define SX9310_REG_I2C_ADDR 0x40
#define SX9310_REG_PAUSE 0x41
#define SX9310_REG_WHOAMI 0x42
#define SX9310_WHOAMI_VALUE 0x01
#define SX9311_WHOAMI_VALUE 0x02
-
#define SX9310_REG_RESET 0x7f
#define SX9310_SOFT_RESET 0xde
-#define SX9310_SCAN_PERIOD_MASK GENMASK(7, 4)
-#define SX9310_SCAN_PERIOD_SHIFT 4
-
-#define SX9310_COMPSTAT_MASK GENMASK(3, 0)
/* 4 hardware channels, as defined in STAT0: COMB, CS2, CS1 and CS0. */
#define SX9310_NUM_CHANNELS 4
-#define SX9310_CHAN_ENABLED_MASK GENMASK(3, 0)
+static_assert(SX9310_NUM_CHANNELS < BITS_PER_LONG);
struct sx9310_data {
/* Serialize access to registers and channel configuration */
@@ -131,20 +121,24 @@ struct sx9310_data {
struct i2c_client *client;
struct iio_trigger *trig;
struct regmap *regmap;
+ struct regulator_bulk_data supplies[2];
/*
* Last reading of the proximity status for each channel.
* We only send an event to user space when this changes.
*/
- bool prox_stat[SX9310_NUM_CHANNELS];
+ unsigned long chan_prox_stat;
bool trigger_enabled;
- __be16 buffer[SX9310_NUM_CHANNELS +
- 4]; /* 64-bit data + 64-bit timestamp */
+ /* Ensure correct alignment of timestamp when present. */
+ struct {
+ __be16 channels[SX9310_NUM_CHANNELS];
+ s64 ts __aligned(8);
+ } buffer;
/* Remember enabled channels and sample rate during suspend. */
unsigned int suspend_ctrl0;
struct completion completion;
- unsigned int chan_read, chan_event;
- int channel_users[SX9310_NUM_CHANNELS];
- int whoami;
+ unsigned long chan_read;
+ unsigned long chan_event;
+ unsigned int whoami;
};
static const struct iio_event_spec sx9310_events[] = {
@@ -251,7 +245,7 @@ static const struct regmap_range sx9310_readable_reg_ranges[] = {
regmap_reg_range(SX9310_REG_PROX_CTRL0, SX9310_REG_PROX_CTRL19),
regmap_reg_range(SX9310_REG_SAR_CTRL0, SX9310_REG_SAR_CTRL2),
regmap_reg_range(SX9310_REG_SENSOR_SEL, SX9310_REG_SAR_LSB),
- regmap_reg_range(SX9310_REG_I2CADDR, SX9310_REG_WHOAMI),
+ regmap_reg_range(SX9310_REG_I2C_ADDR, SX9310_REG_WHOAMI),
regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
};
@@ -285,15 +279,16 @@ static const struct regmap_config sx9310_regmap_config = {
};
static int sx9310_update_chan_en(struct sx9310_data *data,
- unsigned int chan_read,
- unsigned int chan_event)
+ unsigned long chan_read,
+ unsigned long chan_event)
{
int ret;
+ unsigned long channels = chan_read | chan_event;
- if ((data->chan_read | data->chan_event) != (chan_read | chan_event)) {
+ if ((data->chan_read | data->chan_event) != channels) {
ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL0,
- SX9310_CHAN_ENABLED_MASK,
- chan_read | chan_event);
+ SX9310_REG_PROX_CTRL0_SENSOREN_MASK,
+ channels);
if (ret)
return ret;
}
@@ -328,11 +323,15 @@ static int sx9310_put_event_channel(struct sx9310_data *data, int channel)
static int sx9310_enable_irq(struct sx9310_data *data, unsigned int irq)
{
+ if (!data->client->irq)
+ return 0;
return regmap_update_bits(data->regmap, SX9310_REG_IRQ_MSK, irq, irq);
}
static int sx9310_disable_irq(struct sx9310_data *data, unsigned int irq)
{
+ if (!data->client->irq)
+ return 0;
return regmap_update_bits(data->regmap, SX9310_REG_IRQ_MSK, irq, 0);
}
@@ -342,10 +341,10 @@ static int sx9310_read_prox_data(struct sx9310_data *data,
int ret;
ret = regmap_write(data->regmap, SX9310_REG_SENSOR_SEL, chan->channel);
- if (ret < 0)
+ if (ret)
return ret;
- return regmap_bulk_read(data->regmap, chan->address, val, 2);
+ return regmap_bulk_read(data->regmap, chan->address, val, sizeof(*val));
}
/*
@@ -358,10 +357,10 @@ static int sx9310_wait_for_sample(struct sx9310_data *data)
unsigned int val;
ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &val);
- if (ret < 0)
+ if (ret)
return ret;
- val = (val & SX9310_SCAN_PERIOD_MASK) >> SX9310_SCAN_PERIOD_SHIFT;
+ val = FIELD_GET(SX9310_REG_PROX_CTRL0_SCANPERIOD_MASK, val);
msleep(sx9310_scan_period_table[val]);
@@ -371,22 +370,22 @@ static int sx9310_wait_for_sample(struct sx9310_data *data)
static int sx9310_read_proximity(struct sx9310_data *data,
const struct iio_chan_spec *chan, int *val)
{
- int ret = 0;
+ int ret;
__be16 rawval;
mutex_lock(&data->mutex);
ret = sx9310_get_read_channel(data, chan->channel);
- if (ret < 0)
+ if (ret)
goto out;
ret = sx9310_enable_irq(data, SX9310_CONVDONE_IRQ);
- if (ret < 0)
+ if (ret)
goto out_put_channel;
mutex_unlock(&data->mutex);
- if (data->client->irq > 0) {
+ if (data->client->irq) {
ret = wait_for_completion_interruptible(&data->completion);
reinit_completion(&data->completion);
} else {
@@ -395,22 +394,22 @@ static int sx9310_read_proximity(struct sx9310_data *data,
mutex_lock(&data->mutex);
- if (ret < 0)
+ if (ret)
goto out_disable_irq;
ret = sx9310_read_prox_data(data, chan, &rawval);
- if (ret < 0)
+ if (ret)
goto out_disable_irq;
*val = sign_extend32(be16_to_cpu(rawval),
- (chan->address == SX9310_REG_DIFF_MSB ? 11 : 15));
+ chan->address == SX9310_REG_DIFF_MSB ? 11 : 15);
ret = sx9310_disable_irq(data, SX9310_CONVDONE_IRQ);
- if (ret < 0)
+ if (ret)
goto out_put_channel;
ret = sx9310_put_read_channel(data, chan->channel);
- if (ret < 0)
+ if (ret)
goto out;
mutex_unlock(&data->mutex);
@@ -430,12 +429,13 @@ out:
static int sx9310_read_samp_freq(struct sx9310_data *data, int *val, int *val2)
{
unsigned int regval;
- int ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &regval);
+ int ret;
- if (ret < 0)
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &regval);
+ if (ret)
return ret;
- regval = (regval & SX9310_SCAN_PERIOD_MASK) >> SX9310_SCAN_PERIOD_SHIFT;
+ regval = FIELD_GET(SX9310_REG_PROX_CTRL0_SCANPERIOD_MASK, regval);
*val = sx9310_samp_freq_table[regval].val;
*val2 = sx9310_samp_freq_table[regval].val2;
@@ -482,9 +482,10 @@ static int sx9310_set_samp_freq(struct sx9310_data *data, int val, int val2)
mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL0,
- SX9310_SCAN_PERIOD_MASK,
- i << SX9310_SCAN_PERIOD_SHIFT);
+ ret = regmap_update_bits(
+ data->regmap, SX9310_REG_PROX_CTRL0,
+ SX9310_REG_PROX_CTRL0_SCANPERIOD_MASK,
+ FIELD_PREP(SX9310_REG_PROX_CTRL0_SCANPERIOD_MASK, i));
mutex_unlock(&data->mutex);
@@ -515,10 +516,9 @@ static irqreturn_t sx9310_irq_handler(int irq, void *private)
iio_trigger_poll(data->trig);
/*
- * Even if no event is enabled, we need to wake the thread to
- * clear the interrupt state by reading SX9310_REG_IRQ_SRC. It
- * is not possible to do that here because regmap_read takes a
- * mutex.
+ * Even if no event is enabled, we need to wake the thread to clear the
+ * interrupt state by reading SX9310_REG_IRQ_SRC.
+ * It is not possible to do that here because regmap_read takes a mutex.
*/
return IRQ_WAKE_THREAD;
}
@@ -529,32 +529,32 @@ static void sx9310_push_events(struct iio_dev *indio_dev)
unsigned int val, chan;
struct sx9310_data *data = iio_priv(indio_dev);
s64 timestamp = iio_get_time_ns(indio_dev);
+ unsigned long prox_changed;
/* Read proximity state on all channels */
ret = regmap_read(data->regmap, SX9310_REG_STAT0, &val);
- if (ret < 0) {
+ if (ret) {
dev_err(&data->client->dev, "i2c transfer error in irq\n");
return;
}
- for (chan = 0; chan < SX9310_NUM_CHANNELS; chan++) {
+ /*
+ * Only iterate over channels with changes on proximity status that have
+ * events enabled.
+ */
+ prox_changed = (data->chan_prox_stat ^ val) & data->chan_event;
+
+ for_each_set_bit(chan, &prox_changed, SX9310_NUM_CHANNELS) {
int dir;
u64 ev;
- bool new_prox = val & BIT(chan);
- if (!(data->chan_event & BIT(chan)))
- continue;
- if (new_prox == data->prox_stat[chan])
- /* No change on this channel. */
- continue;
-
- dir = new_prox ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
+ dir = (val & BIT(chan)) ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
IIO_EV_TYPE_THRESH, dir);
iio_push_event(indio_dev, ev, timestamp);
- data->prox_stat[chan] = new_prox;
}
+ data->chan_prox_stat = val;
}
static irqreturn_t sx9310_irq_thread_handler(int irq, void *private)
@@ -567,12 +567,12 @@ static irqreturn_t sx9310_irq_thread_handler(int irq, void *private)
mutex_lock(&data->mutex);
ret = regmap_read(data->regmap, SX9310_REG_IRQ_SRC, &val);
- if (ret < 0) {
+ if (ret) {
dev_err(&data->client->dev, "i2c transfer error in irq\n");
goto out;
}
- if (val & SX9310_EVENT_IRQ)
+ if (val & (SX9310_FAR_IRQ | SX9310_CLOSE_IRQ))
sx9310_push_events(indio_dev);
if (val & SX9310_CONVDONE_IRQ)
@@ -600,6 +600,7 @@ static int sx9310_write_event_config(struct iio_dev *indio_dev,
enum iio_event_direction dir, int state)
{
struct sx9310_data *data = iio_priv(indio_dev);
+ unsigned int eventirq = SX9310_FAR_IRQ | SX9310_CLOSE_IRQ;
int ret;
/* If the state hasn't changed, there's nothing to do. */
@@ -609,20 +610,20 @@ static int sx9310_write_event_config(struct iio_dev *indio_dev,
mutex_lock(&data->mutex);
if (state) {
ret = sx9310_get_event_channel(data, chan->channel);
- if (ret < 0)
+ if (ret)
goto out_unlock;
if (!(data->chan_event & ~BIT(chan->channel))) {
- ret = sx9310_enable_irq(data, SX9310_EVENT_IRQ);
- if (ret < 0)
+ ret = sx9310_enable_irq(data, eventirq);
+ if (ret)
sx9310_put_event_channel(data, chan->channel);
}
} else {
ret = sx9310_put_event_channel(data, chan->channel);
- if (ret < 0)
+ if (ret)
goto out_unlock;
if (!data->chan_event) {
- ret = sx9310_disable_irq(data, SX9310_EVENT_IRQ);
- if (ret < 0)
+ ret = sx9310_disable_irq(data, eventirq);
+ if (ret)
sx9310_get_event_channel(data, chan->channel);
}
}
@@ -634,7 +635,7 @@ out_unlock:
static struct attribute *sx9310_attributes[] = {
&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
- NULL,
+ NULL
};
static const struct attribute_group sx9310_attribute_group = {
@@ -661,7 +662,7 @@ static int sx9310_set_trigger_state(struct iio_trigger *trig, bool state)
ret = sx9310_enable_irq(data, SX9310_CONVDONE_IRQ);
else if (!data->chan_read)
ret = sx9310_disable_irq(data, SX9310_CONVDONE_IRQ);
- if (ret < 0)
+ if (ret)
goto out;
data->trigger_enabled = state;
@@ -690,13 +691,13 @@ static irqreturn_t sx9310_trigger_handler(int irq, void *private)
indio_dev->masklength) {
ret = sx9310_read_prox_data(data, &indio_dev->channels[bit],
&val);
- if (ret < 0)
+ if (ret)
goto out;
- data->buffer[i++] = val;
+ data->buffer.channels[i++] = val;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
pf->timestamp);
out:
@@ -710,13 +711,13 @@ out:
static int sx9310_buffer_preenable(struct iio_dev *indio_dev)
{
struct sx9310_data *data = iio_priv(indio_dev);
- unsigned int channels = 0;
+ unsigned long channels = 0;
int bit, ret;
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength)
- channels |= BIT(indio_dev->channels[bit].channel);
+ __set_bit(indio_dev->channels[bit].channel, &channels);
ret = sx9310_update_chan_en(data, channels, data->chan_event);
mutex_unlock(&data->mutex);
@@ -744,89 +745,77 @@ struct sx9310_reg_default {
u8 def;
};
-#define SX_INIT(_reg, _def) \
- { \
- .reg = SX9310_REG_##_reg, \
- .def = _def, \
- }
-
static const struct sx9310_reg_default sx9310_default_regs[] = {
- SX_INIT(IRQ_MSK, 0x00),
- SX_INIT(IRQ_FUNC, 0x00),
+ { SX9310_REG_IRQ_MSK, 0x00 },
+ { SX9310_REG_IRQ_FUNC, 0x00 },
/*
* The lower 4 bits should not be set as it enable sensors measurements.
* Turning the detection on before the configuration values are set to
* good values can cause the device to return erroneous readings.
*/
- SX_INIT(PROX_CTRL0, SX9310_REG_PROX_CTRL0_PROXSTAT2),
- SX_INIT(PROX_CTRL1, 0x00),
- SX_INIT(PROX_CTRL2, SX9310_REG_PROX_CTRL2_COMBMODE_ALL |
- SX9310_REG_PROX_CTRL2_SHIELDEN_DYNAMIC),
- SX_INIT(PROX_CTRL3, SX9310_REG_PROX_CTRL3_GAIN0_X8 |
- SX9310_REG_PROX_CTRL3_GAIN12_X4),
- SX_INIT(PROX_CTRL4, SX9310_REG_PROX_CTRL4_RESOLUTION_FINEST),
- SX_INIT(PROX_CTRL5, SX9310_REG_PROX_CTRL5_RANGE_SMALL |
- SX9310_REG_PROX_CTRL5_STARTUPSENS_CS1 |
- SX9310_REG_PROX_CTRL5_RAWFILT_1P25),
- SX_INIT(PROX_CTRL6, SX9310_REG_PROX_CTRL6_COMP_COMMON),
- SX_INIT(PROX_CTRL7, SX9310_REG_PROX_CTRL7_AVGNEGFILT_2 |
- SX9310_REG_PROX_CTRL7_AVGPOSFILT_512),
- SX_INIT(PROX_CTRL8, SX9310_REG_PROX_CTRL8_9_PTHRESH_96 |
- SX9310_REG_PROX_CTRL8_9_BODYTHRESH_1500),
- SX_INIT(PROX_CTRL9, SX9310_REG_PROX_CTRL8_9_PTHRESH12_28 |
- SX9310_REG_PROX_CTRL8_9_BODYTHRESH_900),
- SX_INIT(PROX_CTRL10, SX9310_REG_PROX_CTRL10_HYST_6PCT |
- SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_8 |
- SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_8),
- SX_INIT(PROX_CTRL11, 0x00),
- SX_INIT(PROX_CTRL12, 0x00),
- SX_INIT(PROX_CTRL13, 0x00),
- SX_INIT(PROX_CTRL14, 0x00),
- SX_INIT(PROX_CTRL15, 0x00),
- SX_INIT(PROX_CTRL16, 0x00),
- SX_INIT(PROX_CTRL17, 0x00),
- SX_INIT(PROX_CTRL18, 0x00),
- SX_INIT(PROX_CTRL19, 0x00),
- SX_INIT(SAR_CTRL0, SX9310_REG_SAR_CTRL0_SARDEB_4_SAMPLES |
- SX9310_REG_SAR_CTRL0_SARHYST_8),
- SX_INIT(SAR_CTRL1, SX9310_REG_SAR_CTRL1_SLOPE(10781250)),
- SX_INIT(SAR_CTRL2, SX9310_REG_SAR_CTRL2_SAROFFSET_DEFAULT),
+ { SX9310_REG_PROX_CTRL0, SX9310_REG_PROX_CTRL0_SCANPERIOD_15MS },
+ { SX9310_REG_PROX_CTRL1, 0x00 },
+ { SX9310_REG_PROX_CTRL2, SX9310_REG_PROX_CTRL2_COMBMODE_CS1_CS2 |
+ SX9310_REG_PROX_CTRL2_SHIELDEN_DYNAMIC },
+ { SX9310_REG_PROX_CTRL3, SX9310_REG_PROX_CTRL3_GAIN0_X8 |
+ SX9310_REG_PROX_CTRL3_GAIN12_X4 },
+ { SX9310_REG_PROX_CTRL4, SX9310_REG_PROX_CTRL4_RESOLUTION_FINEST },
+ { SX9310_REG_PROX_CTRL5, SX9310_REG_PROX_CTRL5_RANGE_SMALL |
+ SX9310_REG_PROX_CTRL5_STARTUPSENS_CS1 |
+ SX9310_REG_PROX_CTRL5_RAWFILT_1P25 },
+ { SX9310_REG_PROX_CTRL6, SX9310_REG_PROX_CTRL6_AVGTHRESH_DEFAULT },
+ { SX9310_REG_PROX_CTRL7, SX9310_REG_PROX_CTRL7_AVGNEGFILT_2 |
+ SX9310_REG_PROX_CTRL7_AVGPOSFILT_512 },
+ { SX9310_REG_PROX_CTRL8, SX9310_REG_PROX_CTRL8_9_PTHRESH_96 |
+ SX9310_REG_PROX_CTRL8_9_BODYTHRESH_1500 },
+ { SX9310_REG_PROX_CTRL9, SX9310_REG_PROX_CTRL8_9_PTHRESH_28 |
+ SX9310_REG_PROX_CTRL8_9_BODYTHRESH_900 },
+ { SX9310_REG_PROX_CTRL10, SX9310_REG_PROX_CTRL10_HYST_6PCT |
+ SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_2 },
+ { SX9310_REG_PROX_CTRL11, 0x00 },
+ { SX9310_REG_PROX_CTRL12, 0x00 },
+ { SX9310_REG_PROX_CTRL13, 0x00 },
+ { SX9310_REG_PROX_CTRL14, 0x00 },
+ { SX9310_REG_PROX_CTRL15, 0x00 },
+ { SX9310_REG_PROX_CTRL16, 0x00 },
+ { SX9310_REG_PROX_CTRL17, 0x00 },
+ { SX9310_REG_PROX_CTRL18, 0x00 },
+ { SX9310_REG_PROX_CTRL19, 0x00 },
+ { SX9310_REG_SAR_CTRL0, SX9310_REG_SAR_CTRL0_SARDEB_4_SAMPLES |
+ SX9310_REG_SAR_CTRL0_SARHYST_8 },
+ { SX9310_REG_SAR_CTRL1, SX9310_REG_SAR_CTRL1_SLOPE(10781250) },
+ { SX9310_REG_SAR_CTRL2, SX9310_REG_SAR_CTRL2_SAROFFSET_DEFAULT },
};
/* Activate all channels and perform an initial compensation. */
static int sx9310_init_compensation(struct iio_dev *indio_dev)
{
struct sx9310_data *data = iio_priv(indio_dev);
- int i, ret;
+ int ret;
unsigned int val;
unsigned int ctrl0;
ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &ctrl0);
- if (ret < 0)
+ if (ret)
return ret;
/* run the compensation phase on all channels */
ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0,
- ctrl0 | SX9310_REG_PROX_CTRL0_EN_MASK);
- if (ret < 0)
+ ctrl0 | SX9310_REG_PROX_CTRL0_SENSOREN_MASK);
+ if (ret)
return ret;
- for (i = 100; i >= 0; i--) {
- msleep(20);
- ret = regmap_read(data->regmap, SX9310_REG_STAT1, &val);
- if (ret < 0)
- goto out;
- if (!(val & SX9310_COMPSTAT_MASK))
- break;
- }
-
- if (i < 0) {
- dev_err(&data->client->dev,
- "initial compensation timed out: 0x%02x", val);
- ret = -ETIMEDOUT;
+ ret = regmap_read_poll_timeout(data->regmap, SX9310_REG_STAT1, val,
+ !(val & SX9310_REG_STAT1_COMPSTAT_MASK),
+ 20000, 2000000);
+ if (ret) {
+ if (ret == -ETIMEDOUT)
+ dev_err(&data->client->dev,
+ "initial compensation timed out: 0x%02x\n",
+ val);
+ return ret;
}
-out:
regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0);
return ret;
}
@@ -839,21 +828,21 @@ static int sx9310_init_device(struct iio_dev *indio_dev)
unsigned int i, val;
ret = regmap_write(data->regmap, SX9310_REG_RESET, SX9310_SOFT_RESET);
- if (ret < 0)
+ if (ret)
return ret;
usleep_range(1000, 2000); /* power-up time is ~1ms. */
/* Clear reset interrupt state by reading SX9310_REG_IRQ_SRC. */
ret = regmap_read(data->regmap, SX9310_REG_IRQ_SRC, &val);
- if (ret < 0)
+ if (ret)
return ret;
/* Program some sane defaults. */
for (i = 0; i < ARRAY_SIZE(sx9310_default_regs); i++) {
initval = &sx9310_default_regs[i];
ret = regmap_write(data->regmap, initval->reg, initval->def);
- if (ret < 0)
+ if (ret)
return ret;
}
@@ -862,24 +851,15 @@ static int sx9310_init_device(struct iio_dev *indio_dev)
static int sx9310_set_indio_dev_name(struct device *dev,
struct iio_dev *indio_dev,
- const struct i2c_device_id *id, int whoami)
+ unsigned int whoami)
{
- const struct acpi_device_id *acpi_id;
-
- /* id will be NULL when enumerated via ACPI */
- if (id) {
- if (id->driver_data != whoami)
- dev_err(dev, "WHOAMI does not match i2c_device_id: %s",
- id->name);
- } else if (ACPI_HANDLE(dev)) {
- acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!acpi_id)
- return -ENODEV;
- if (acpi_id->driver_data != whoami)
- dev_err(dev, "WHOAMI does not match acpi_device_id: %s",
- acpi_id->id);
- } else
+ unsigned int long ddata;
+
+ ddata = (uintptr_t)device_get_match_data(dev);
+ if (ddata != whoami) {
+ dev_err(dev, "WHOAMI does not match device data: %u\n", whoami);
return -ENODEV;
+ }
switch (whoami) {
case SX9310_WHOAMI_VALUE:
@@ -889,26 +869,35 @@ static int sx9310_set_indio_dev_name(struct device *dev,
indio_dev->name = "sx9311";
break;
default:
- dev_err(dev, "unexpected WHOAMI response: %u", whoami);
+ dev_err(dev, "unexpected WHOAMI response: %u\n", whoami);
return -ENODEV;
}
return 0;
}
-static int sx9310_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static void sx9310_regulator_disable(void *_data)
+{
+ struct sx9310_data *data = _data;
+
+ regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies);
+}
+
+static int sx9310_probe(struct i2c_client *client)
{
int ret;
+ struct device *dev = &client->dev;
struct iio_dev *indio_dev;
struct sx9310_data *data;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (indio_dev == NULL)
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
data->client = client;
+ data->supplies[0].supply = "vdd";
+ data->supplies[1].supply = "svdd";
mutex_init(&data->mutex);
init_completion(&data->completion);
@@ -916,19 +905,32 @@ static int sx9310_probe(struct i2c_client *client,
if (IS_ERR(data->regmap))
return PTR_ERR(data->regmap);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->supplies),
+ data->supplies);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->supplies), data->supplies);
+ if (ret)
+ return ret;
+ /* Must wait for Tpor time after initial power up */
+ usleep_range(1000, 1100);
+
+ ret = devm_add_action_or_reset(dev, sx9310_regulator_disable, data);
+ if (ret)
+ return ret;
+
ret = regmap_read(data->regmap, SX9310_REG_WHOAMI, &data->whoami);
- if (ret < 0) {
- dev_err(&client->dev, "error in reading WHOAMI register: %d",
- ret);
+ if (ret) {
+ dev_err(dev, "error in reading WHOAMI register: %d", ret);
return ret;
}
- ret = sx9310_set_indio_dev_name(&client->dev, indio_dev, id,
- data->whoami);
- if (ret < 0)
+ ret = sx9310_set_indio_dev_name(dev, indio_dev, data->whoami);
+ if (ret)
return ret;
- ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(&client->dev));
+ ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(dev));
indio_dev->channels = sx9310_channels;
indio_dev->num_channels = ARRAY_SIZE(sx9310_channels);
indio_dev->info = &sx9310_info;
@@ -936,41 +938,41 @@ static int sx9310_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
ret = sx9310_init_device(indio_dev);
- if (ret < 0)
+ if (ret)
return ret;
if (client->irq) {
- ret = devm_request_threaded_irq(&client->dev, client->irq,
+ ret = devm_request_threaded_irq(dev, client->irq,
sx9310_irq_handler,
sx9310_irq_thread_handler,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ IRQF_ONESHOT,
"sx9310_event", indio_dev);
- if (ret < 0)
+ if (ret)
return ret;
- data->trig =
- devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
- indio_dev->name, indio_dev->id);
+ data->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
if (!data->trig)
return -ENOMEM;
- data->trig->dev.parent = &client->dev;
+ data->trig->dev.parent = dev;
data->trig->ops = &sx9310_trigger_ops;
iio_trigger_set_drvdata(data->trig, indio_dev);
- ret = devm_iio_trigger_register(&client->dev, data->trig);
+ ret = devm_iio_trigger_register(dev, data->trig);
if (ret)
return ret;
}
- ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
iio_pollfunc_store_time,
sx9310_trigger_handler,
&sx9310_buffer_setup_ops);
- if (ret < 0)
+ if (ret)
return ret;
- return devm_iio_device_register(&client->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
static int __maybe_unused sx9310_suspend(struct device *dev)
@@ -985,11 +987,10 @@ static int __maybe_unused sx9310_suspend(struct device *dev)
mutex_lock(&data->mutex);
ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0,
&data->suspend_ctrl0);
-
if (ret)
goto out;
- ctrl0 = data->suspend_ctrl0 & ~SX9310_REG_PROX_CTRL0_EN_MASK;
+ ctrl0 = data->suspend_ctrl0 & ~SX9310_REG_PROX_CTRL0_SENSOREN_MASK;
ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0);
if (ret)
goto out;
@@ -1017,10 +1018,11 @@ static int __maybe_unused sx9310_resume(struct device *dev)
out:
mutex_unlock(&data->mutex);
+ if (ret)
+ return ret;
enable_irq(data->client->irq);
-
- return ret;
+ return 0;
}
static const struct dev_pm_ops sx9310_pm_ops = {
@@ -1030,32 +1032,39 @@ static const struct dev_pm_ops sx9310_pm_ops = {
static const struct acpi_device_id sx9310_acpi_match[] = {
{ "STH9310", SX9310_WHOAMI_VALUE },
{ "STH9311", SX9311_WHOAMI_VALUE },
- {},
+ {}
};
MODULE_DEVICE_TABLE(acpi, sx9310_acpi_match);
static const struct of_device_id sx9310_of_match[] = {
- { .compatible = "semtech,sx9310" },
- { .compatible = "semtech,sx9311" },
- {},
+ { .compatible = "semtech,sx9310", (void *)SX9310_WHOAMI_VALUE },
+ { .compatible = "semtech,sx9311", (void *)SX9311_WHOAMI_VALUE },
+ {}
};
MODULE_DEVICE_TABLE(of, sx9310_of_match);
static const struct i2c_device_id sx9310_id[] = {
{ "sx9310", SX9310_WHOAMI_VALUE },
{ "sx9311", SX9311_WHOAMI_VALUE },
- {},
+ {}
};
MODULE_DEVICE_TABLE(i2c, sx9310_id);
static struct i2c_driver sx9310_driver = {
.driver = {
.name = "sx9310",
- .acpi_match_table = ACPI_PTR(sx9310_acpi_match),
- .of_match_table = of_match_ptr(sx9310_of_match),
+ .acpi_match_table = sx9310_acpi_match,
+ .of_match_table = sx9310_of_match,
.pm = &sx9310_pm_ops,
+
+ /*
+ * Lots of i2c transfers in probe + over 200 ms waiting in
+ * sx9310_init_compensation() mean a slow probe; prefer async
+ * so we don't delay boot if we're builtin to the kernel.
+ */
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe = sx9310_probe,
+ .probe_new = sx9310_probe,
.id_table = sx9310_id,
};
module_i2c_driver(sx9310_driver);
diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c
index 5fbda9475ba9..235e125aeb3a 100644
--- a/drivers/iio/proximity/vl53l0x-i2c.c
+++ b/drivers/iio/proximity/vl53l0x-i2c.c
@@ -4,18 +4,19 @@
*
* Copyright (C) 2016 STMicroelectronics Imaging Division.
* Copyright (C) 2018 Song Qiang <songqiang1304521@gmail.com>
+ * Copyright (C) 2020 Ivan Drobyshevskyi <drobyshevskyi@gmail.com>
*
* Datasheet available at
* <https://www.st.com/resource/en/datasheet/vl53l0x.pdf>
*
* Default 7-bit i2c slave address 0x29.
*
- * TODO: FIFO buffer, continuous mode, interrupts, range selection,
- * sensor ID check.
+ * TODO: FIFO buffer, continuous mode, range selection, sensor ID check.
*/
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
@@ -29,14 +30,72 @@
#define VL_REG_SYSRANGE_MODE_TIMED BIT(2)
#define VL_REG_SYSRANGE_MODE_HISTOGRAM BIT(3)
+#define VL_REG_SYSTEM_INTERRUPT_CONFIG_GPIO 0x0A
+#define VL_REG_SYSTEM_INTERRUPT_GPIO_NEW_SAMPLE_READY BIT(2)
+
+#define VL_REG_SYSTEM_INTERRUPT_CLEAR 0x0B
+
#define VL_REG_RESULT_INT_STATUS 0x13
#define VL_REG_RESULT_RANGE_STATUS 0x14
#define VL_REG_RESULT_RANGE_STATUS_COMPLETE BIT(0)
struct vl53l0x_data {
struct i2c_client *client;
+ struct completion completion;
};
+static irqreturn_t vl53l0x_handle_irq(int irq, void *priv)
+{
+ struct iio_dev *indio_dev = priv;
+ struct vl53l0x_data *data = iio_priv(indio_dev);
+
+ complete(&data->completion);
+
+ return IRQ_HANDLED;
+}
+
+static int vl53l0x_configure_irq(struct i2c_client *client,
+ struct iio_dev *indio_dev)
+{
+ struct vl53l0x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = devm_request_irq(&client->dev, client->irq, vl53l0x_handle_irq,
+ IRQF_TRIGGER_FALLING, indio_dev->name, indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "devm_request_irq error: %d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ VL_REG_SYSTEM_INTERRUPT_CONFIG_GPIO,
+ VL_REG_SYSTEM_INTERRUPT_GPIO_NEW_SAMPLE_READY);
+ if (ret < 0)
+ dev_err(&client->dev, "failed to configure IRQ: %d\n", ret);
+
+ return ret;
+}
+
+static void vl53l0x_clear_irq(struct vl53l0x_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ VL_REG_SYSTEM_INTERRUPT_CLEAR, 1);
+ if (ret < 0)
+ dev_err(dev, "failed to clear error irq: %d\n", ret);
+
+ ret = i2c_smbus_write_byte_data(data->client,
+ VL_REG_SYSTEM_INTERRUPT_CLEAR, 0);
+ if (ret < 0)
+ dev_err(dev, "failed to clear range irq: %d\n", ret);
+
+ ret = i2c_smbus_read_byte_data(data->client, VL_REG_RESULT_INT_STATUS);
+ if (ret < 0 || ret & 0x07)
+ dev_err(dev, "failed to clear irq: %d\n", ret);
+}
+
static int vl53l0x_read_proximity(struct vl53l0x_data *data,
const struct iio_chan_spec *chan,
int *val)
@@ -50,19 +109,31 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
if (ret < 0)
return ret;
- do {
- ret = i2c_smbus_read_byte_data(client,
- VL_REG_RESULT_RANGE_STATUS);
+ if (data->client->irq) {
+ reinit_completion(&data->completion);
+
+ ret = wait_for_completion_timeout(&data->completion, HZ/10);
if (ret < 0)
return ret;
+ else if (ret == 0)
+ return -ETIMEDOUT;
- if (ret & VL_REG_RESULT_RANGE_STATUS_COMPLETE)
- break;
+ vl53l0x_clear_irq(data);
+ } else {
+ do {
+ ret = i2c_smbus_read_byte_data(client,
+ VL_REG_RESULT_RANGE_STATUS);
+ if (ret < 0)
+ return ret;
- usleep_range(1000, 5000);
- } while (--tries);
- if (!tries)
- return -ETIMEDOUT;
+ if (ret & VL_REG_RESULT_RANGE_STATUS_COMPLETE)
+ break;
+
+ usleep_range(1000, 5000);
+ } while (--tries);
+ if (!tries)
+ return -ETIMEDOUT;
+ }
ret = i2c_smbus_read_i2c_block_data(client, VL_REG_RESULT_RANGE_STATUS,
12, buffer);
@@ -140,6 +211,17 @@ static int vl53l0x_probe(struct i2c_client *client)
indio_dev->num_channels = ARRAY_SIZE(vl53l0x_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
+ /* usage of interrupt is optional */
+ if (client->irq) {
+ int ret;
+
+ init_completion(&data->completion);
+
+ ret = vl53l0x_configure_irq(client, indio_dev);
+ if (ret)
+ return ret;
+ }
+
return devm_iio_device_register(&client->dev, indio_dev);
}
diff --git a/drivers/iio/resolver/ad2s1200.c b/drivers/iio/resolver/ad2s1200.c
index 6007abad116b..9746bd935628 100644
--- a/drivers/iio/resolver/ad2s1200.c
+++ b/drivers/iio/resolver/ad2s1200.c
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/spi/spi.h>
#include <linux/sysfs.h>
@@ -192,7 +193,7 @@ MODULE_DEVICE_TABLE(spi, ad2s1200_id);
static struct spi_driver ad2s1200_driver = {
.driver = {
.name = DRV_NAME,
- .of_match_table = of_match_ptr(ad2s1200_of_match),
+ .of_match_table = ad2s1200_of_match,
},
.probe = ad2s1200_probe,
.id_table = ad2s1200_id,
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index 55ff28a0f1c7..3b5ba26d7d86 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -1285,18 +1285,20 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
ret = of_property_read_u32(child, "reg", &sensor.chan);
if (ret) {
dev_err(dev, "reg property must given for child nodes\n");
- return ret;
+ goto put_child;
}
/* check if we have a valid channel */
if (sensor.chan < LTC2983_MIN_CHANNELS_NR ||
sensor.chan > LTC2983_MAX_CHANNELS_NR) {
+ ret = -EINVAL;
dev_err(dev,
"chan:%d must be from 1 to 20\n", sensor.chan);
- return -EINVAL;
+ goto put_child;
} else if (channel_avail_mask & BIT(sensor.chan)) {
+ ret = -EINVAL;
dev_err(dev, "chan:%d already in use\n", sensor.chan);
- return -EINVAL;
+ goto put_child;
}
ret = of_property_read_u32(child, "adi,sensor-type",
@@ -1304,7 +1306,7 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
if (ret) {
dev_err(dev,
"adi,sensor-type property must given for child nodes\n");
- return ret;
+ goto put_child;
}
dev_dbg(dev, "Create new sensor, type %u, chann %u",
@@ -1334,13 +1336,15 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
st->sensors[chan] = ltc2983_adc_new(child, st, &sensor);
} else {
dev_err(dev, "Unknown sensor type %d\n", sensor.type);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_child;
}
if (IS_ERR(st->sensors[chan])) {
dev_err(dev, "Failed to create sensor %ld",
PTR_ERR(st->sensors[chan]));
- return PTR_ERR(st->sensors[chan]);
+ ret = PTR_ERR(st->sensors[chan]);
+ goto put_child;
}
/* set generic sensor parameters */
st->sensors[chan]->chan = sensor.chan;
@@ -1351,6 +1355,9 @@ static int ltc2983_parse_dt(struct ltc2983_data *st)
}
return 0;
+put_child:
+ of_node_put(child);
+ return ret;
}
static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio)
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index 51b812bcff2e..503fe54a0bb9 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -10,7 +10,9 @@
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/module.h>
#include <linux/math64.h>
#include <linux/of.h>
@@ -58,6 +60,8 @@
/* Control register address - volatile */
#define MLX90632_REG_CONTROL 0x3001 /* Control Register address */
#define MLX90632_CFG_PWR_MASK GENMASK(2, 1) /* PowerMode Mask */
+#define MLX90632_CFG_MTYP_MASK GENMASK(8, 4) /* Meas select Mask */
+
/* PowerModes statuses */
#define MLX90632_PWR_STATUS(ctrl_val) (ctrl_val << 1)
#define MLX90632_PWR_STATUS_HALT MLX90632_PWR_STATUS(0) /* hold */
@@ -65,6 +69,18 @@
#define MLX90632_PWR_STATUS_STEP MLX90632_PWR_STATUS(2) /* step */
#define MLX90632_PWR_STATUS_CONTINUOUS MLX90632_PWR_STATUS(3) /* continuous*/
+/* Measurement types */
+#define MLX90632_MTYP_MEDICAL 0
+#define MLX90632_MTYP_EXTENDED 17
+
+/* Measurement type select*/
+#define MLX90632_MTYP_STATUS(ctrl_val) (ctrl_val << 4)
+#define MLX90632_MTYP_STATUS_MEDICAL MLX90632_MTYP_STATUS(MLX90632_MTYP_MEDICAL)
+#define MLX90632_MTYP_STATUS_EXTENDED MLX90632_MTYP_STATUS(MLX90632_MTYP_EXTENDED)
+
+/* I2C command register - volatile */
+#define MLX90632_REG_I2C_CMD 0x3005 /* I2C command Register address */
+
/* Device status register - volatile */
#define MLX90632_REG_STATUS 0x3fff /* Device status register */
#define MLX90632_STAT_BUSY BIT(10) /* Device busy indicator */
@@ -78,26 +94,53 @@
#define MLX90632_RAM_2(meas_num) (MLX90632_ADDR_RAM + 3 * meas_num + 1)
#define MLX90632_RAM_3(meas_num) (MLX90632_ADDR_RAM + 3 * meas_num + 2)
+/* Name important RAM_MEAS channels */
+#define MLX90632_RAM_DSP5_EXTENDED_AMBIENT_1 MLX90632_RAM_3(17)
+#define MLX90632_RAM_DSP5_EXTENDED_AMBIENT_2 MLX90632_RAM_3(18)
+#define MLX90632_RAM_DSP5_EXTENDED_OBJECT_1 MLX90632_RAM_1(17)
+#define MLX90632_RAM_DSP5_EXTENDED_OBJECT_2 MLX90632_RAM_2(17)
+#define MLX90632_RAM_DSP5_EXTENDED_OBJECT_3 MLX90632_RAM_1(18)
+#define MLX90632_RAM_DSP5_EXTENDED_OBJECT_4 MLX90632_RAM_2(18)
+#define MLX90632_RAM_DSP5_EXTENDED_OBJECT_5 MLX90632_RAM_1(19)
+#define MLX90632_RAM_DSP5_EXTENDED_OBJECT_6 MLX90632_RAM_2(19)
+
/* Magic constants */
#define MLX90632_ID_MEDICAL 0x0105 /* EEPROM DSPv5 Medical device id */
#define MLX90632_ID_CONSUMER 0x0205 /* EEPROM DSPv5 Consumer device id */
+#define MLX90632_ID_EXTENDED 0x0505 /* EEPROM DSPv5 Extended range device id */
+#define MLX90632_ID_MASK GENMASK(14, 0) /* DSP version and device ID in EE_VERSION */
#define MLX90632_DSP_VERSION 5 /* DSP version */
#define MLX90632_DSP_MASK GENMASK(7, 0) /* DSP version in EE_VERSION */
#define MLX90632_RESET_CMD 0x0006 /* Reset sensor (address or global) */
-#define MLX90632_REF_12 12LL /**< ResCtrlRef value of Ch 1 or Ch 2 */
-#define MLX90632_REF_3 12LL /**< ResCtrlRef value of Channel 3 */
-#define MLX90632_MAX_MEAS_NUM 31 /**< Maximum measurements in list */
-#define MLX90632_SLEEP_DELAY_MS 3000 /**< Autosleep delay */
+#define MLX90632_REF_12 12LL /* ResCtrlRef value of Ch 1 or Ch 2 */
+#define MLX90632_REF_3 12LL /* ResCtrlRef value of Channel 3 */
+#define MLX90632_MAX_MEAS_NUM 31 /* Maximum measurements in list */
+#define MLX90632_SLEEP_DELAY_MS 3000 /* Autosleep delay */
+#define MLX90632_EXTENDED_LIMIT 27000 /* Extended mode raw value limit */
+/**
+ * struct mlx90632_data - private data for the MLX90632 device
+ * @client: I2C client of the device
+ * @lock: Internal mutex for multiple reads for single measurement
+ * @regmap: Regmap of the device
+ * @emissivity: Object emissivity from 0 to 1000 where 1000 = 1.
+ * @mtyp: Measurement type physical sensor configuration for extended range
+ * calculations
+ * @object_ambient_temperature: Ambient temperature at object (might differ of
+ * the ambient temperature of sensor.
+ */
struct mlx90632_data {
struct i2c_client *client;
- struct mutex lock; /* Multiple reads for single measurement */
+ struct mutex lock;
struct regmap *regmap;
u16 emissivity;
+ u8 mtyp;
+ u32 object_ambient_temperature;
};
static const struct regmap_range mlx90632_volatile_reg_range[] = {
regmap_reg_range(MLX90632_REG_I2C_ADDR, MLX90632_REG_CONTROL),
+ regmap_reg_range(MLX90632_REG_I2C_CMD, MLX90632_REG_I2C_CMD),
regmap_reg_range(MLX90632_REG_STATUS, MLX90632_REG_STATUS),
regmap_reg_range(MLX90632_RAM_1(0),
MLX90632_RAM_3(MLX90632_MAX_MEAS_NUM)),
@@ -113,6 +156,7 @@ static const struct regmap_range mlx90632_read_reg_range[] = {
regmap_reg_range(MLX90632_EE_CTRL, MLX90632_EE_I2C_ADDR),
regmap_reg_range(MLX90632_EE_Ha, MLX90632_EE_Hb),
regmap_reg_range(MLX90632_REG_I2C_ADDR, MLX90632_REG_CONTROL),
+ regmap_reg_range(MLX90632_REG_I2C_CMD, MLX90632_REG_I2C_CMD),
regmap_reg_range(MLX90632_REG_STATUS, MLX90632_REG_STATUS),
regmap_reg_range(MLX90632_RAM_1(0),
MLX90632_RAM_3(MLX90632_MAX_MEAS_NUM)),
@@ -173,25 +217,19 @@ static s32 mlx90632_pwr_continuous(struct regmap *regmap)
*/
static int mlx90632_perform_measurement(struct mlx90632_data *data)
{
- int ret, tries = 100;
unsigned int reg_status;
+ int ret;
ret = regmap_update_bits(data->regmap, MLX90632_REG_STATUS,
MLX90632_STAT_DATA_RDY, 0);
if (ret < 0)
return ret;
- while (tries-- > 0) {
- ret = regmap_read(data->regmap, MLX90632_REG_STATUS,
- &reg_status);
- if (ret < 0)
- return ret;
- if (reg_status & MLX90632_STAT_DATA_RDY)
- break;
- usleep_range(10000, 11000);
- }
+ ret = regmap_read_poll_timeout(data->regmap, MLX90632_REG_STATUS, reg_status,
+ !(reg_status & MLX90632_STAT_DATA_RDY), 10000,
+ 100 * 10000);
- if (tries < 0) {
+ if (ret < 0) {
dev_err(&data->client->dev, "data not ready");
return -ETIMEDOUT;
}
@@ -199,6 +237,26 @@ static int mlx90632_perform_measurement(struct mlx90632_data *data)
return (reg_status & MLX90632_STAT_CYCLE_POS) >> 2;
}
+static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
+{
+ int ret;
+
+ if ((type != MLX90632_MTYP_MEDICAL) && (type != MLX90632_MTYP_EXTENDED))
+ return -EINVAL;
+
+ ret = regmap_write(regmap, MLX90632_REG_I2C_CMD, MLX90632_RESET_CMD);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
+ (MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
+ (MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
+ if (ret < 0)
+ return ret;
+
+ return mlx90632_pwr_continuous(regmap);
+}
+
static int mlx90632_channel_new_select(int perform_ret, uint8_t *channel_new,
uint8_t *channel_old)
{
@@ -300,6 +358,97 @@ read_unlock:
return ret;
}
+static int mlx90632_read_ambient_raw_extended(struct regmap *regmap,
+ s16 *ambient_new_raw, s16 *ambient_old_raw)
+{
+ unsigned int read_tmp;
+ int ret;
+
+ ret = regmap_read(regmap, MLX90632_RAM_DSP5_EXTENDED_AMBIENT_1, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *ambient_new_raw = (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90632_RAM_DSP5_EXTENDED_AMBIENT_2, &read_tmp);
+ if (ret < 0)
+ return ret;
+ *ambient_old_raw = (s16)read_tmp;
+
+ return 0;
+}
+
+static int mlx90632_read_object_raw_extended(struct regmap *regmap, s16 *object_new_raw)
+{
+ unsigned int read_tmp;
+ s32 read;
+ int ret;
+
+ ret = regmap_read(regmap, MLX90632_RAM_DSP5_EXTENDED_OBJECT_1, &read_tmp);
+ if (ret < 0)
+ return ret;
+ read = (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90632_RAM_DSP5_EXTENDED_OBJECT_2, &read_tmp);
+ if (ret < 0)
+ return ret;
+ read = read - (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90632_RAM_DSP5_EXTENDED_OBJECT_3, &read_tmp);
+ if (ret < 0)
+ return ret;
+ read = read - (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90632_RAM_DSP5_EXTENDED_OBJECT_4, &read_tmp);
+ if (ret < 0)
+ return ret;
+ read = (read + (s16)read_tmp) / 2;
+
+ ret = regmap_read(regmap, MLX90632_RAM_DSP5_EXTENDED_OBJECT_5, &read_tmp);
+ if (ret < 0)
+ return ret;
+ read = read + (s16)read_tmp;
+
+ ret = regmap_read(regmap, MLX90632_RAM_DSP5_EXTENDED_OBJECT_6, &read_tmp);
+ if (ret < 0)
+ return ret;
+ read = read + (s16)read_tmp;
+
+ if (read > S16_MAX || read < S16_MIN)
+ return -ERANGE;
+
+ *object_new_raw = read;
+
+ return 0;
+}
+
+static int mlx90632_read_all_channel_extended(struct mlx90632_data *data, s16 *object_new_raw,
+ s16 *ambient_new_raw, s16 *ambient_old_raw)
+{
+ s32 ret, meas;
+
+ mutex_lock(&data->lock);
+ ret = mlx90632_set_meas_type(data->regmap, MLX90632_MTYP_EXTENDED);
+ if (ret < 0)
+ goto read_unlock;
+
+ ret = read_poll_timeout(mlx90632_perform_measurement, meas, meas == 19,
+ 50000, 800000, false, data);
+ if (ret != 0)
+ goto read_unlock;
+
+ ret = mlx90632_read_object_raw_extended(data->regmap, object_new_raw);
+ if (ret < 0)
+ goto read_unlock;
+
+ ret = mlx90632_read_ambient_raw_extended(data->regmap, ambient_new_raw, ambient_old_raw);
+
+read_unlock:
+ (void) mlx90632_set_meas_type(data->regmap, MLX90632_MTYP_MEDICAL);
+
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
static int mlx90632_read_ee_register(struct regmap *regmap, u16 reg_lsb,
s32 *reg_value)
{
@@ -354,9 +503,23 @@ static s64 mlx90632_preprocess_temp_obj(s16 object_new_raw, s16 object_old_raw,
return div64_s64((tmp << 19ULL), 1000LL);
}
+static s64 mlx90632_preprocess_temp_obj_extended(s16 object_new_raw, s16 ambient_new_raw,
+ s16 ambient_old_raw, s16 Ka)
+{
+ s64 VR_IR, kKa, tmp;
+
+ kKa = ((s64)Ka * 1000LL) >> 10ULL;
+ VR_IR = (s64)ambient_old_raw * 1000000LL +
+ kKa * div64_s64((s64)ambient_new_raw * 1000LL,
+ MLX90632_REF_3);
+ tmp = div64_s64(
+ div64_s64((s64) object_new_raw * 1000000000000LL, MLX90632_REF_12),
+ VR_IR);
+ return div64_s64(tmp << 19ULL, 1000LL);
+}
+
static s32 mlx90632_calc_temp_ambient(s16 ambient_new_raw, s16 ambient_old_raw,
- s32 P_T, s32 P_R, s32 P_G, s32 P_O,
- s16 Gb)
+ s32 P_T, s32 P_R, s32 P_G, s32 P_O, s16 Gb)
{
s64 Asub, Bsub, Ablock, Bblock, Cblock, AMB, sum;
@@ -374,11 +537,11 @@ static s32 mlx90632_calc_temp_ambient(s16 ambient_new_raw, s16 ambient_old_raw,
}
static s32 mlx90632_calc_temp_object_iteration(s32 prev_object_temp, s64 object,
- s64 TAdut, s32 Fa, s32 Fb,
+ s64 TAdut, s64 TAdut4, s32 Fa, s32 Fb,
s32 Ga, s16 Ha, s16 Hb,
u16 emissivity)
{
- s64 calcedKsTO, calcedKsTA, ir_Alpha, TAdut4, Alpha_corr;
+ s64 calcedKsTO, calcedKsTA, ir_Alpha, Alpha_corr;
s64 Ha_customer, Hb_customer;
Ha_customer = ((s64)Ha * 1000000LL) >> 14ULL;
@@ -393,36 +556,66 @@ static s32 mlx90632_calc_temp_object_iteration(s32 prev_object_temp, s64 object,
Alpha_corr = emissivity * div64_s64(Alpha_corr, 100000LL);
Alpha_corr = div64_s64(Alpha_corr, 1000LL);
ir_Alpha = div64_s64((s64)object * 10000000LL, Alpha_corr);
- TAdut4 = (div64_s64(TAdut, 10000LL) + 27315) *
- (div64_s64(TAdut, 10000LL) + 27315) *
- (div64_s64(TAdut, 10000LL) + 27315) *
- (div64_s64(TAdut, 10000LL) + 27315);
return (int_sqrt64(int_sqrt64(ir_Alpha * 1000000000000LL + TAdut4))
- 27315 - Hb_customer) * 10;
}
+static s64 mlx90632_calc_ta4(s64 TAdut, s64 scale)
+{
+ return (div64_s64(TAdut, scale) + 27315) *
+ (div64_s64(TAdut, scale) + 27315) *
+ (div64_s64(TAdut, scale) + 27315) *
+ (div64_s64(TAdut, scale) + 27315);
+}
+
static s32 mlx90632_calc_temp_object(s64 object, s64 ambient, s32 Ea, s32 Eb,
s32 Fa, s32 Fb, s32 Ga, s16 Ha, s16 Hb,
u16 tmp_emi)
{
- s64 kTA, kTA0, TAdut;
+ s64 kTA, kTA0, TAdut, TAdut4;
s64 temp = 25000;
s8 i;
kTA = (Ea * 1000LL) >> 16LL;
kTA0 = (Eb * 1000LL) >> 8LL;
TAdut = div64_s64(((ambient - kTA0) * 1000000LL), kTA) + 25 * 1000000LL;
+ TAdut4 = mlx90632_calc_ta4(TAdut, 10000LL);
/* Iterations of calculation as described in datasheet */
for (i = 0; i < 5; ++i) {
- temp = mlx90632_calc_temp_object_iteration(temp, object, TAdut,
+ temp = mlx90632_calc_temp_object_iteration(temp, object, TAdut, TAdut4,
Fa, Fb, Ga, Ha, Hb,
tmp_emi);
}
return temp;
}
+static s32 mlx90632_calc_temp_object_extended(s64 object, s64 ambient, s64 reflected,
+ s32 Ea, s32 Eb, s32 Fa, s32 Fb, s32 Ga,
+ s16 Ha, s16 Hb, u16 tmp_emi)
+{
+ s64 kTA, kTA0, TAdut, TAdut4, Tr4, TaTr4;
+ s64 temp = 25000;
+ s8 i;
+
+ kTA = (Ea * 1000LL) >> 16LL;
+ kTA0 = (Eb * 1000LL) >> 8LL;
+ TAdut = div64_s64((ambient - kTA0) * 1000000LL, kTA) + 25 * 1000000LL;
+ Tr4 = mlx90632_calc_ta4(reflected, 10);
+ TAdut4 = mlx90632_calc_ta4(TAdut, 10000LL);
+ TaTr4 = Tr4 - div64_s64(Tr4 - TAdut4, tmp_emi) * 1000;
+
+ /* Iterations of calculation as described in datasheet */
+ for (i = 0; i < 5; ++i) {
+ temp = mlx90632_calc_temp_object_iteration(temp, object, TAdut, TaTr4,
+ Fa / 2, Fb, Ga, Ha, Hb,
+ tmp_emi);
+ }
+
+ return temp;
+}
+
static int mlx90632_calc_object_dsp105(struct mlx90632_data *data, int *val)
{
s32 ret;
@@ -470,6 +663,26 @@ static int mlx90632_calc_object_dsp105(struct mlx90632_data *data, int *val)
if (ret < 0)
return ret;
+ if (object_new_raw > MLX90632_EXTENDED_LIMIT &&
+ data->mtyp == MLX90632_MTYP_EXTENDED) {
+ ret = mlx90632_read_all_channel_extended(data, &object_new_raw,
+ &ambient_new_raw, &ambient_old_raw);
+ if (ret < 0)
+ return ret;
+
+ /* Use extended mode calculations */
+ ambient = mlx90632_preprocess_temp_amb(ambient_new_raw,
+ ambient_old_raw, Gb);
+ object = mlx90632_preprocess_temp_obj_extended(object_new_raw,
+ ambient_new_raw,
+ ambient_old_raw, Ka);
+ *val = mlx90632_calc_temp_object_extended(object, ambient,
+ data->object_ambient_temperature,
+ Ea, Eb, Fa, Fb, Ga,
+ Ha, Hb, data->emissivity);
+ return 0;
+ }
+
ambient = mlx90632_preprocess_temp_amb(ambient_new_raw,
ambient_old_raw, Gb);
object = mlx90632_preprocess_temp_obj(object_new_raw,
@@ -548,7 +761,9 @@ static int mlx90632_read_raw(struct iio_dev *indio_dev,
*val2 = data->emissivity * 1000;
}
return IIO_VAL_INT_PLUS_MICRO;
-
+ case IIO_CHAN_INFO_CALIBAMBIENT:
+ *val = data->object_ambient_temperature;
+ return IIO_VAL_INT;
default:
return -EINVAL;
}
@@ -568,6 +783,9 @@ static int mlx90632_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
data->emissivity = val * 1000 + val2 / 1000;
return 0;
+ case IIO_CHAN_INFO_CALIBAMBIENT:
+ data->object_ambient_temperature = val;
+ return 0;
default:
return -EINVAL;
}
@@ -585,7 +803,7 @@ static const struct iio_chan_spec mlx90632_channels[] = {
.modified = 1,
.channel2 = IIO_MOD_TEMP_OBJECT,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_CALIBEMISSIVITY),
+ BIT(IIO_CHAN_INFO_CALIBEMISSIVITY) | BIT(IIO_CHAN_INFO_CALIBAMBIENT),
},
};
@@ -643,6 +861,7 @@ static int mlx90632_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
mlx90632->client = client;
mlx90632->regmap = regmap;
+ mlx90632->mtyp = MLX90632_MTYP_MEDICAL;
mutex_init(&mlx90632->lock);
indio_dev->name = id->name;
@@ -662,15 +881,20 @@ static int mlx90632_probe(struct i2c_client *client,
dev_err(&client->dev, "read of version failed: %d\n", ret);
return ret;
}
+ read = read & MLX90632_ID_MASK;
if (read == MLX90632_ID_MEDICAL) {
dev_dbg(&client->dev,
"Detected Medical EEPROM calibration %x\n", read);
} else if (read == MLX90632_ID_CONSUMER) {
dev_dbg(&client->dev,
"Detected Consumer EEPROM calibration %x\n", read);
+ } else if (read == MLX90632_ID_EXTENDED) {
+ dev_dbg(&client->dev,
+ "Detected Extended range EEPROM calibration %x\n", read);
+ mlx90632->mtyp = MLX90632_MTYP_EXTENDED;
} else if ((read & MLX90632_DSP_MASK) == MLX90632_DSP_VERSION) {
dev_dbg(&client->dev,
- "Detected Unknown EEPROM calibration %x\n", read);
+ "Detected Unknown EEPROM calibration %x\n", read);
} else {
dev_err(&client->dev,
"Wrong DSP version %x (expected %x)\n",
@@ -679,6 +903,7 @@ static int mlx90632_probe(struct i2c_client *client,
}
mlx90632->emissivity = 1000;
+ mlx90632->object_ambient_temperature = 25000; /* 25 degrees milliCelsius */
pm_runtime_disable(&client->dev);
ret = pm_runtime_set_active(&client->dev);
diff --git a/drivers/iio/temperature/tmp007.c b/drivers/iio/temperature/tmp007.c
index f90fe9e5617b..ad2b35c65548 100644
--- a/drivers/iio/temperature/tmp007.c
+++ b/drivers/iio/temperature/tmp007.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/bitops.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -578,7 +578,7 @@ MODULE_DEVICE_TABLE(i2c, tmp007_id);
static struct i2c_driver tmp007_driver = {
.driver = {
.name = "tmp007",
- .of_match_table = of_match_ptr(tmp007_of_match),
+ .of_match_table = tmp007_of_match,
.pm = &tmp007_pm_ops,
},
.probe = tmp007_probe,
diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c
index 2c631a1ca33b..bbfbad9a8767 100644
--- a/drivers/iio/temperature/tsys01.c
+++ b/drivers/iio/temperature/tsys01.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/stat.h>
@@ -222,7 +223,7 @@ static struct i2c_driver tsys01_driver = {
.id_table = tsys01_id,
.driver = {
.name = "tsys01",
- .of_match_table = of_match_ptr(tsys01_of_match),
+ .of_match_table = tsys01_of_match,
},
};
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 91b023341b77..32a51432ec4f 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -48,6 +48,7 @@ config INFINIBAND_ON_DEMAND_PAGING
depends on INFINIBAND_USER_MEM
select MMU_NOTIFIER
select INTERVAL_TREE
+ select HMM_MIRROR
default y
help
On demand paging support for the InfiniBand subsystem.
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 24cb71a16a28..ccf2670ef45e 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -17,7 +17,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
-ib_cm-y := cm.o
+ib_cm-y := cm.o cm_trace.o
iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 3a98439bba83..0abce004a959 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -647,13 +647,12 @@ static void process_one_req(struct work_struct *_work)
req->callback = NULL;
spin_lock_bh(&lock);
+ /*
+ * Although the work will normally have been canceled by the workqueue,
+ * it can still be requeued as long as it is on the req_list.
+ */
+ cancel_delayed_work(&req->work);
if (!list_empty(&req->list)) {
- /*
- * Although the work will normally have been canceled by the
- * workqueue, it can still be requeued as long as it is on the
- * req_list.
- */
- cancel_delayed_work(&req->work);
list_del_init(&req->list);
kfree(req);
}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 5a76611e684a..8017c40dd110 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -133,7 +133,11 @@ static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
}
static const char * const gid_type_str[] = {
+ /* IB/RoCE v1 value is set for IB_GID_TYPE_IB and IB_GID_TYPE_ROCE for
+ * user space compatibility reasons.
+ */
[IB_GID_TYPE_IB] = "IB/RoCE v1",
+ [IB_GID_TYPE_ROCE] = "IB/RoCE v1",
[IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
};
@@ -1220,7 +1224,7 @@ EXPORT_SYMBOL(ib_get_cached_port_state);
const struct ib_gid_attr *
rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
{
- const struct ib_gid_attr *attr = ERR_PTR(-EINVAL);
+ const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
struct ib_gid_table *table;
unsigned long flags;
@@ -1244,6 +1248,67 @@ done:
EXPORT_SYMBOL(rdma_get_gid_attr);
/**
+ * rdma_query_gid_table - Reads GID table entries of all the ports of a device up to max_entries.
+ * @device: The device to query.
+ * @entries: Entries where GID entries are returned.
+ * @max_entries: Maximum number of entries that can be returned.
+ * Entries array must be allocated to hold max_entries number of entries.
+ * @num_entries: Updated to the number of entries that were successfully read.
+ *
+ * Returns number of entries on success or appropriate error code.
+ */
+ssize_t rdma_query_gid_table(struct ib_device *device,
+ struct ib_uverbs_gid_entry *entries,
+ size_t max_entries)
+{
+ const struct ib_gid_attr *gid_attr;
+ ssize_t num_entries = 0, ret;
+ struct ib_gid_table *table;
+ unsigned int port_num, i;
+ struct net_device *ndev;
+ unsigned long flags;
+
+ rdma_for_each_port(device, port_num) {
+ if (!rdma_ib_or_roce(device, port_num))
+ continue;
+
+ table = rdma_gid_table(device, port_num);
+ read_lock_irqsave(&table->rwlock, flags);
+ for (i = 0; i < table->sz; i++) {
+ if (!is_gid_entry_valid(table->data_vec[i]))
+ continue;
+ if (num_entries >= max_entries) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ gid_attr = &table->data_vec[i]->attr;
+
+ memcpy(&entries->gid, &gid_attr->gid,
+ sizeof(gid_attr->gid));
+ entries->gid_index = gid_attr->index;
+ entries->port_num = gid_attr->port_num;
+ entries->gid_type = gid_attr->gid_type;
+ ndev = rcu_dereference_protected(
+ gid_attr->ndev,
+ lockdep_is_held(&table->rwlock));
+ if (ndev)
+ entries->netdev_ifindex = ndev->ifindex;
+
+ num_entries++;
+ entries++;
+ }
+ read_unlock_irqrestore(&table->rwlock, flags);
+ }
+
+ return num_entries;
+err:
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_query_gid_table);
+
+/**
* rdma_put_gid_attr - Release reference to the GID attribute
* @attr: Pointer to the GID attribute whose reference
* needs to be released.
@@ -1299,7 +1364,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
struct ib_gid_table_entry *entry =
container_of(attr, struct ib_gid_table_entry, attr);
struct ib_device *device = entry->attr.device;
- struct net_device *ndev = ERR_PTR(-ENODEV);
+ struct net_device *ndev = ERR_PTR(-EINVAL);
u8 port_num = entry->attr.port_num;
struct ib_gid_table *table;
unsigned long flags;
@@ -1311,8 +1376,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
valid = is_gid_entry_valid(table->data_vec[attr->index]);
if (valid) {
ndev = rcu_dereference(attr->ndev);
- if (!ndev ||
- (ndev && ((READ_ONCE(ndev->flags) & IFF_UP) == 0)))
+ if (!ndev)
ndev = ERR_PTR(-ENODEV);
}
read_unlock_irqrestore(&table->rwlock, flags);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index fbc28f1a8b92..5740d1ba3568 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -27,6 +27,7 @@
#include <rdma/ib_cm.h>
#include "cm_msgs.h"
#include "core_priv.h"
+#include "cm_trace.h"
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
@@ -201,7 +202,6 @@ static struct attribute *cm_counter_default_attrs[] = {
struct cm_port {
struct cm_device *cm_dev;
struct ib_mad_agent *mad_agent;
- struct kobject port_obj;
u8 port_num;
struct list_head cm_priv_prim_list;
struct list_head cm_priv_altr_list;
@@ -1563,6 +1563,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
+ trace_icm_send_req(&cm_id_priv->id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
ret = ib_post_send_mad(cm_id_priv->msg, NULL);
if (ret) {
@@ -1610,6 +1611,9 @@ static int cm_issue_rej(struct cm_port *port,
IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
}
+ trace_icm_issue_rej(
+ IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
+ IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
ret = ib_post_send_mad(msg, NULL);
if (ret)
cm_free_msg(msg);
@@ -1961,6 +1965,7 @@ static void cm_dup_req_handler(struct cm_work *work,
}
spin_unlock_irq(&cm_id_priv->lock);
+ trace_icm_send_dup_req(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto free;
@@ -2124,8 +2129,7 @@ static int cm_req_handler(struct cm_work *work)
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
- pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
- be32_to_cpu(cm_id_priv->id.local_id));
+ trace_icm_no_listener_err(&cm_id_priv->id);
cm_id_priv->id.state = IB_CM_IDLE;
ret = -EINVAL;
goto destroy;
@@ -2274,8 +2278,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REQ_RCVD &&
cm_id->state != IB_CM_MRA_REQ_SENT) {
- pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
- be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
+ trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2289,6 +2292,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
msg->timeout_ms = cm_id_priv->timeout_ms;
msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
+ trace_icm_send_rep(cm_id);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -2348,8 +2352,7 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REP_RCVD &&
cm_id->state != IB_CM_MRA_REP_SENT) {
- pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
- be32_to_cpu(cm_id->local_id), cm_id->state);
+ trace_icm_send_cm_rtu_err(cm_id);
ret = -EINVAL;
goto error;
}
@@ -2361,6 +2364,7 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
+ trace_icm_send_rtu(cm_id);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -2442,6 +2446,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
goto unlock;
spin_unlock_irq(&cm_id_priv->lock);
+ trace_icm_send_dup_rep(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto free;
@@ -2465,7 +2470,7 @@ static int cm_rep_handler(struct cm_work *work)
cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
if (!cm_id_priv) {
cm_dup_rep_handler(work);
- pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
+ trace_icm_remote_no_priv_err(
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
return -EINVAL;
}
@@ -2479,11 +2484,10 @@ static int cm_rep_handler(struct cm_work *work)
break;
default:
ret = -EINVAL;
- pr_debug(
- "%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
- __func__, cm_id_priv->id.state,
+ trace_icm_rep_unknown_err(
IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
- IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
+ IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
+ cm_id_priv->id.state);
spin_unlock_irq(&cm_id_priv->lock);
goto error;
}
@@ -2500,7 +2504,7 @@ static int cm_rep_handler(struct cm_work *work)
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
- pr_debug("%s: Failed to insert remote id %d\n", __func__,
+ trace_icm_insert_failed_err(
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
goto error;
}
@@ -2517,9 +2521,8 @@ static int cm_rep_handler(struct cm_work *work)
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
ret = -EINVAL;
- pr_debug(
- "%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
- __func__, IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
+ trace_icm_staleconn_err(
+ IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
if (cur_cm_id_priv) {
@@ -2646,9 +2649,7 @@ static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
return -EINVAL;
if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
- pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
- be32_to_cpu(cm_id_priv->id.local_id),
- cm_id_priv->id.state);
+ trace_icm_dreq_skipped(&cm_id_priv->id);
return -EINVAL;
}
@@ -2667,6 +2668,7 @@ static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
msg->timeout_ms = cm_id_priv->timeout_ms;
msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
+ trace_icm_send_dreq(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_enter_timewait(cm_id_priv);
@@ -2722,10 +2724,7 @@ static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
return -EINVAL;
if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
- pr_debug(
- "%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
- cm_id_priv->id.state);
+ trace_icm_send_drep_err(&cm_id_priv->id);
kfree(private_data);
return -EINVAL;
}
@@ -2740,6 +2739,7 @@ static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
+ trace_icm_send_drep(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_free_msg(msg);
@@ -2789,6 +2789,9 @@ static int cm_issue_drep(struct cm_port *port,
IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
+ trace_icm_issue_drep(
+ IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
+ IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
ret = ib_post_send_mad(msg, NULL);
if (ret)
cm_free_msg(msg);
@@ -2810,9 +2813,8 @@ static int cm_dreq_handler(struct cm_work *work)
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
- pr_debug(
- "%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
- __func__, IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
+ trace_icm_no_priv_err(
+ IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
return -EINVAL;
}
@@ -2858,9 +2860,7 @@ static int cm_dreq_handler(struct cm_work *work)
counter[CM_DREQ_COUNTER]);
goto unlock;
default:
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
- cm_id_priv->id.state);
+ trace_icm_dreq_unknown_err(&cm_id_priv->id);
goto unlock;
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
@@ -2945,12 +2945,11 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
state);
break;
default:
- pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
- be32_to_cpu(cm_id_priv->id.local_id),
- cm_id_priv->id.state);
+ trace_icm_send_unknown_rej_err(&cm_id_priv->id);
return -EINVAL;
}
+ trace_icm_send_rej(&cm_id_priv->id, reason);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_free_msg(msg);
@@ -3060,9 +3059,7 @@ static int cm_rej_handler(struct cm_work *work)
}
fallthrough;
default:
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
- cm_id_priv->id.state);
+ trace_icm_rej_unknown_err(&cm_id_priv->id);
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
@@ -3118,9 +3115,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
}
fallthrough;
default:
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
- cm_id_priv->id.state);
+ trace_icm_send_mra_unknown_err(&cm_id_priv->id);
ret = -EINVAL;
goto error1;
}
@@ -3133,6 +3128,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
msg_response, service_timeout,
private_data, private_data_len);
+ trace_icm_send_mra(cm_id);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto error2;
@@ -3229,9 +3225,7 @@ static int cm_mra_handler(struct cm_work *work)
counter[CM_MRA_COUNTER]);
fallthrough;
default:
- pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
- cm_id_priv->id.state);
+ trace_icm_mra_unknown_err(&cm_id_priv->id);
goto out;
}
@@ -3505,10 +3499,12 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id->state == IB_CM_IDLE)
+ if (cm_id->state == IB_CM_IDLE) {
+ trace_icm_send_sidr_req(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
- else
+ } else {
ret = -EINVAL;
+ }
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -3670,6 +3666,7 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
param);
+ trace_icm_send_sidr_rep(&cm_id_priv->id);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_free_msg(msg);
@@ -3767,8 +3764,7 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
goto discard;
- pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
- state, ib_wc_status_msg(wc_status));
+ trace_icm_mad_send_err(state, wc_status);
switch (state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
@@ -3891,7 +3887,7 @@ static void cm_work_handler(struct work_struct *_work)
ret = cm_timewait_handler(work);
break;
default:
- pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
+ trace_icm_handler_err(work->cm_event.event);
ret = -EINVAL;
break;
}
@@ -3927,8 +3923,7 @@ static int cm_establish(struct ib_cm_id *cm_id)
ret = -EISCONN;
break;
default:
- pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
- be32_to_cpu(cm_id->local_id), cm_id->state);
+ trace_icm_establish_err(cm_id);
ret = -EINVAL;
break;
}
@@ -4125,9 +4120,7 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
- cm_id_priv->id.state);
+ trace_icm_qp_init_err(&cm_id_priv->id);
ret = -EINVAL;
break;
}
@@ -4175,9 +4168,7 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
- cm_id_priv->id.state);
+ trace_icm_qp_rtr_err(&cm_id_priv->id);
ret = -EINVAL;
break;
}
@@ -4237,9 +4228,7 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
- pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
- __func__, be32_to_cpu(cm_id_priv->id.local_id),
- cm_id_priv->id.state);
+ trace_icm_qp_rts_err(&cm_id_priv->id);
ret = -EINVAL;
break;
}
@@ -4295,20 +4284,6 @@ static struct kobj_type cm_counter_obj_type = {
.default_attrs = cm_counter_default_attrs
};
-static char *cm_devnode(struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0666;
- return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
-}
-
-struct class cm_class = {
- .owner = THIS_MODULE,
- .name = "infiniband_cm",
- .devnode = cm_devnode,
-};
-EXPORT_SYMBOL(cm_class);
-
static int cm_create_port_fs(struct cm_port *port)
{
int i, ret;
@@ -4511,12 +4486,6 @@ static int __init ib_cm_init(void)
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
INIT_LIST_HEAD(&cm.timewait_list);
- ret = class_register(&cm_class);
- if (ret) {
- ret = -ENOMEM;
- goto error1;
- }
-
cm.wq = alloc_workqueue("ib_cm", 0, 1);
if (!cm.wq) {
ret = -ENOMEM;
@@ -4531,8 +4500,6 @@ static int __init ib_cm_init(void)
error3:
destroy_workqueue(cm.wq);
error2:
- class_unregister(&cm_class);
-error1:
return ret;
}
@@ -4553,7 +4520,6 @@ static void __exit ib_cm_cleanup(void)
kfree(timewait_info);
}
- class_unregister(&cm_class);
WARN_ON(!xa_empty(&cm.local_id_table));
}
diff --git a/drivers/infiniband/core/cm_trace.c b/drivers/infiniband/core/cm_trace.c
new file mode 100644
index 000000000000..8f3482f66338
--- /dev/null
+++ b/drivers/infiniband/core/cm_trace.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Trace points for the IB Connection Manager.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#include <rdma/rdma_cm.h>
+#include "cma_priv.h"
+
+#define CREATE_TRACE_POINTS
+
+#include "cm_trace.h"
diff --git a/drivers/infiniband/core/cm_trace.h b/drivers/infiniband/core/cm_trace.h
new file mode 100644
index 000000000000..e9d282679ef1
--- /dev/null
+++ b/drivers/infiniband/core/cm_trace.h
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Trace point definitions for the RDMA Connect Manager.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2020 Oracle and/or its affiliates.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ib_cma
+
+#if !defined(_TRACE_IB_CMA_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#define _TRACE_IB_CMA_H
+
+#include <linux/tracepoint.h>
+#include <rdma/ib_cm.h>
+#include <trace/events/rdma.h>
+
+/*
+ * enum ib_cm_state, from include/rdma/ib_cm.h
+ */
+#define IB_CM_STATE_LIST \
+ ib_cm_state(IDLE) \
+ ib_cm_state(LISTEN) \
+ ib_cm_state(REQ_SENT) \
+ ib_cm_state(REQ_RCVD) \
+ ib_cm_state(MRA_REQ_SENT) \
+ ib_cm_state(MRA_REQ_RCVD) \
+ ib_cm_state(REP_SENT) \
+ ib_cm_state(REP_RCVD) \
+ ib_cm_state(MRA_REP_SENT) \
+ ib_cm_state(MRA_REP_RCVD) \
+ ib_cm_state(ESTABLISHED) \
+ ib_cm_state(DREQ_SENT) \
+ ib_cm_state(DREQ_RCVD) \
+ ib_cm_state(TIMEWAIT) \
+ ib_cm_state(SIDR_REQ_SENT) \
+ ib_cm_state_end(SIDR_REQ_RCVD)
+
+#undef ib_cm_state
+#undef ib_cm_state_end
+#define ib_cm_state(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+#define ib_cm_state_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+
+IB_CM_STATE_LIST
+
+#undef ib_cm_state
+#undef ib_cm_state_end
+#define ib_cm_state(x) { IB_CM_##x, #x },
+#define ib_cm_state_end(x) { IB_CM_##x, #x }
+
+#define show_ib_cm_state(x) \
+ __print_symbolic(x, IB_CM_STATE_LIST)
+
+/*
+ * enum ib_cm_lap_state, from include/rdma/ib_cm.h
+ */
+#define IB_CM_LAP_STATE_LIST \
+ ib_cm_lap_state(LAP_UNINIT) \
+ ib_cm_lap_state(LAP_IDLE) \
+ ib_cm_lap_state(LAP_SENT) \
+ ib_cm_lap_state(LAP_RCVD) \
+ ib_cm_lap_state(MRA_LAP_SENT) \
+ ib_cm_lap_state_end(MRA_LAP_RCVD)
+
+#undef ib_cm_lap_state
+#undef ib_cm_lap_state_end
+#define ib_cm_lap_state(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+#define ib_cm_lap_state_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+
+IB_CM_LAP_STATE_LIST
+
+#undef ib_cm_lap_state
+#undef ib_cm_lap_state_end
+#define ib_cm_lap_state(x) { IB_CM_##x, #x },
+#define ib_cm_lap_state_end(x) { IB_CM_##x, #x }
+
+#define show_ib_cm_lap_state(x) \
+ __print_symbolic(x, IB_CM_LAP_STATE_LIST)
+
+/*
+ * enum ib_cm_rej_reason, from include/rdma/ib_cm.h
+ */
+#define IB_CM_REJ_REASON_LIST \
+ ib_cm_rej_reason(REJ_NO_QP) \
+ ib_cm_rej_reason(REJ_NO_EEC) \
+ ib_cm_rej_reason(REJ_NO_RESOURCES) \
+ ib_cm_rej_reason(REJ_TIMEOUT) \
+ ib_cm_rej_reason(REJ_UNSUPPORTED) \
+ ib_cm_rej_reason(REJ_INVALID_COMM_ID) \
+ ib_cm_rej_reason(REJ_INVALID_COMM_INSTANCE) \
+ ib_cm_rej_reason(REJ_INVALID_SERVICE_ID) \
+ ib_cm_rej_reason(REJ_INVALID_TRANSPORT_TYPE) \
+ ib_cm_rej_reason(REJ_STALE_CONN) \
+ ib_cm_rej_reason(REJ_RDC_NOT_EXIST) \
+ ib_cm_rej_reason(REJ_INVALID_GID) \
+ ib_cm_rej_reason(REJ_INVALID_LID) \
+ ib_cm_rej_reason(REJ_INVALID_SL) \
+ ib_cm_rej_reason(REJ_INVALID_TRAFFIC_CLASS) \
+ ib_cm_rej_reason(REJ_INVALID_HOP_LIMIT) \
+ ib_cm_rej_reason(REJ_INVALID_PACKET_RATE) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_GID) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_LID) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_SL) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_TRAFFIC_CLASS) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_HOP_LIMIT) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_PACKET_RATE) \
+ ib_cm_rej_reason(REJ_PORT_CM_REDIRECT) \
+ ib_cm_rej_reason(REJ_PORT_REDIRECT) \
+ ib_cm_rej_reason(REJ_INVALID_MTU) \
+ ib_cm_rej_reason(REJ_INSUFFICIENT_RESP_RESOURCES) \
+ ib_cm_rej_reason(REJ_CONSUMER_DEFINED) \
+ ib_cm_rej_reason(REJ_INVALID_RNR_RETRY) \
+ ib_cm_rej_reason(REJ_DUPLICATE_LOCAL_COMM_ID) \
+ ib_cm_rej_reason(REJ_INVALID_CLASS_VERSION) \
+ ib_cm_rej_reason(REJ_INVALID_FLOW_LABEL) \
+ ib_cm_rej_reason(REJ_INVALID_ALT_FLOW_LABEL) \
+ ib_cm_rej_reason_end(REJ_VENDOR_OPTION_NOT_SUPPORTED)
+
+#undef ib_cm_rej_reason
+#undef ib_cm_rej_reason_end
+#define ib_cm_rej_reason(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+#define ib_cm_rej_reason_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
+
+IB_CM_REJ_REASON_LIST
+
+#undef ib_cm_rej_reason
+#undef ib_cm_rej_reason_end
+#define ib_cm_rej_reason(x) { IB_CM_##x, #x },
+#define ib_cm_rej_reason_end(x) { IB_CM_##x, #x }
+
+#define show_ib_cm_rej_reason(x) \
+ __print_symbolic(x, IB_CM_REJ_REASON_LIST)
+
+DECLARE_EVENT_CLASS(icm_id_class,
+ TP_PROTO(
+ const struct ib_cm_id *cm_id
+ ),
+
+ TP_ARGS(cm_id),
+
+ TP_STRUCT__entry(
+ __field(const void *, cm_id) /* for eBPF scripts */
+ __field(unsigned int, local_id)
+ __field(unsigned int, remote_id)
+ __field(unsigned long, state)
+ __field(unsigned long, lap_state)
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = cm_id;
+ __entry->local_id = be32_to_cpu(cm_id->local_id);
+ __entry->remote_id = be32_to_cpu(cm_id->remote_id);
+ __entry->state = cm_id->state;
+ __entry->lap_state = cm_id->lap_state;
+ ),
+
+ TP_printk("local_id=%u remote_id=%u state=%s lap_state=%s",
+ __entry->local_id, __entry->remote_id,
+ show_ib_cm_state(__entry->state),
+ show_ib_cm_lap_state(__entry->lap_state)
+ )
+);
+
+#define DEFINE_CM_SEND_EVENT(name) \
+ DEFINE_EVENT(icm_id_class, \
+ icm_send_##name, \
+ TP_PROTO( \
+ const struct ib_cm_id *cm_id \
+ ), \
+ TP_ARGS(cm_id))
+
+DEFINE_CM_SEND_EVENT(req);
+DEFINE_CM_SEND_EVENT(rep);
+DEFINE_CM_SEND_EVENT(dup_req);
+DEFINE_CM_SEND_EVENT(dup_rep);
+DEFINE_CM_SEND_EVENT(rtu);
+DEFINE_CM_SEND_EVENT(mra);
+DEFINE_CM_SEND_EVENT(sidr_req);
+DEFINE_CM_SEND_EVENT(sidr_rep);
+DEFINE_CM_SEND_EVENT(dreq);
+DEFINE_CM_SEND_EVENT(drep);
+
+TRACE_EVENT(icm_send_rej,
+ TP_PROTO(
+ const struct ib_cm_id *cm_id,
+ enum ib_cm_rej_reason reason
+ ),
+
+ TP_ARGS(cm_id, reason),
+
+ TP_STRUCT__entry(
+ __field(const void *, cm_id)
+ __field(u32, local_id)
+ __field(u32, remote_id)
+ __field(unsigned long, state)
+ __field(unsigned long, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->cm_id = cm_id;
+ __entry->local_id = be32_to_cpu(cm_id->local_id);
+ __entry->remote_id = be32_to_cpu(cm_id->remote_id);
+ __entry->state = cm_id->state;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("local_id=%u remote_id=%u state=%s reason=%s",
+ __entry->local_id, __entry->remote_id,
+ show_ib_cm_state(__entry->state),
+ show_ib_cm_rej_reason(__entry->reason)
+ )
+);
+
+#define DEFINE_CM_ERR_EVENT(name) \
+ DEFINE_EVENT(icm_id_class, \
+ icm_##name##_err, \
+ TP_PROTO( \
+ const struct ib_cm_id *cm_id \
+ ), \
+ TP_ARGS(cm_id))
+
+DEFINE_CM_ERR_EVENT(send_cm_rtu);
+DEFINE_CM_ERR_EVENT(establish);
+DEFINE_CM_ERR_EVENT(no_listener);
+DEFINE_CM_ERR_EVENT(send_drep);
+DEFINE_CM_ERR_EVENT(dreq_unknown);
+DEFINE_CM_ERR_EVENT(send_unknown_rej);
+DEFINE_CM_ERR_EVENT(rej_unknown);
+DEFINE_CM_ERR_EVENT(send_mra_unknown);
+DEFINE_CM_ERR_EVENT(mra_unknown);
+DEFINE_CM_ERR_EVENT(qp_init);
+DEFINE_CM_ERR_EVENT(qp_rtr);
+DEFINE_CM_ERR_EVENT(qp_rts);
+
+DEFINE_EVENT(icm_id_class, \
+ icm_dreq_skipped, \
+ TP_PROTO( \
+ const struct ib_cm_id *cm_id \
+ ), \
+ TP_ARGS(cm_id) \
+);
+
+DECLARE_EVENT_CLASS(icm_local_class,
+ TP_PROTO(
+ unsigned int local_id,
+ unsigned int remote_id
+ ),
+
+ TP_ARGS(local_id, remote_id),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, local_id)
+ __field(unsigned int, remote_id)
+ ),
+
+ TP_fast_assign(
+ __entry->local_id = local_id;
+ __entry->remote_id = remote_id;
+ ),
+
+ TP_printk("local_id=%u remote_id=%u",
+ __entry->local_id, __entry->remote_id
+ )
+);
+
+#define DEFINE_CM_LOCAL_EVENT(name) \
+ DEFINE_EVENT(icm_local_class, \
+ icm_##name, \
+ TP_PROTO( \
+ unsigned int local_id, \
+ unsigned int remote_id \
+ ), \
+ TP_ARGS(local_id, remote_id))
+
+DEFINE_CM_LOCAL_EVENT(issue_rej);
+DEFINE_CM_LOCAL_EVENT(issue_drep);
+DEFINE_CM_LOCAL_EVENT(staleconn_err);
+DEFINE_CM_LOCAL_EVENT(no_priv_err);
+
+DECLARE_EVENT_CLASS(icm_remote_class,
+ TP_PROTO(
+ u32 remote_id
+ ),
+
+ TP_ARGS(remote_id),
+
+ TP_STRUCT__entry(
+ __field(u32, remote_id)
+ ),
+
+ TP_fast_assign(
+ __entry->remote_id = remote_id;
+ ),
+
+ TP_printk("remote_id=%u",
+ __entry->remote_id
+ )
+);
+
+#define DEFINE_CM_REMOTE_EVENT(name) \
+ DEFINE_EVENT(icm_remote_class, \
+ icm_##name, \
+ TP_PROTO( \
+ u32 remote_id \
+ ), \
+ TP_ARGS(remote_id))
+
+DEFINE_CM_REMOTE_EVENT(remote_no_priv_err);
+DEFINE_CM_REMOTE_EVENT(insert_failed_err);
+
+TRACE_EVENT(icm_send_rep_err,
+ TP_PROTO(
+ __be32 local_id,
+ enum ib_cm_state state
+ ),
+
+ TP_ARGS(local_id, state),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, local_id)
+ __field(unsigned long, state)
+ ),
+
+ TP_fast_assign(
+ __entry->local_id = be32_to_cpu(local_id);
+ __entry->state = state;
+ ),
+
+ TP_printk("local_id=%u state=%s",
+ __entry->local_id, show_ib_cm_state(__entry->state)
+ )
+);
+
+TRACE_EVENT(icm_rep_unknown_err,
+ TP_PROTO(
+ unsigned int local_id,
+ unsigned int remote_id,
+ enum ib_cm_state state
+ ),
+
+ TP_ARGS(local_id, remote_id, state),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, local_id)
+ __field(unsigned int, remote_id)
+ __field(unsigned long, state)
+ ),
+
+ TP_fast_assign(
+ __entry->local_id = local_id;
+ __entry->remote_id = remote_id;
+ __entry->state = state;
+ ),
+
+ TP_printk("local_id=%u remote_id=%u state=%s",
+ __entry->local_id, __entry->remote_id,
+ show_ib_cm_state(__entry->state)
+ )
+);
+
+TRACE_EVENT(icm_handler_err,
+ TP_PROTO(
+ enum ib_cm_event_type event
+ ),
+
+ TP_ARGS(event),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, event)
+ ),
+
+ TP_fast_assign(
+ __entry->event = event;
+ ),
+
+ TP_printk("unhandled event=%s",
+ rdma_show_ib_cm_event(__entry->event)
+ )
+);
+
+TRACE_EVENT(icm_mad_send_err,
+ TP_PROTO(
+ enum ib_cm_state state,
+ enum ib_wc_status wc_status
+ ),
+
+ TP_ARGS(state, wc_status),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __field(unsigned long, wc_status)
+ ),
+
+ TP_fast_assign(
+ __entry->state = state;
+ __entry->wc_status = wc_status;
+ ),
+
+ TP_printk("state=%s completion status=%s",
+ show_ib_cm_state(__entry->state),
+ rdma_show_wc_status(__entry->wc_status)
+ )
+);
+
+#endif /* _TRACE_IB_CMA_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/infiniband/core
+#define TRACE_INCLUDE_FILE cm_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 5888311b2119..a77750b8954d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -68,6 +68,9 @@ static const char * const cma_events[] = {
[RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
};
+static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
+ union ib_gid *mgid);
+
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
{
size_t index = event;
@@ -301,6 +304,10 @@ int cma_set_default_gid_type(struct cma_device *cma_dev,
if (!rdma_is_port_valid(cma_dev->device, port))
return -EINVAL;
+ if (default_gid_type == IB_GID_TYPE_IB &&
+ rdma_protocol_roce_eth_encap(cma_dev->device, port))
+ default_gid_type = IB_GID_TYPE_ROCE;
+
supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
if (!(supported_gids & 1 << default_gid_type))
@@ -345,13 +352,10 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
struct cma_multicast {
struct rdma_id_private *id_priv;
- union {
- struct ib_sa_multicast *ib;
- } multicast;
+ struct ib_sa_multicast *sa_mc;
struct list_head list;
void *context;
struct sockaddr_storage addr;
- struct kref mcref;
u8 join_state;
};
@@ -363,18 +367,6 @@ struct cma_work {
struct rdma_cm_event event;
};
-struct cma_ndev_work {
- struct work_struct work;
- struct rdma_id_private *id;
- struct rdma_cm_event event;
-};
-
-struct iboe_mcast_work {
- struct work_struct work;
- struct rdma_id_private *id;
- struct cma_multicast *mc;
-};
-
union cma_ip_addr {
struct in6_addr ip6;
struct {
@@ -404,23 +396,21 @@ struct cma_req_info {
u16 pkey;
};
-static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&id_priv->lock, flags);
- ret = (id_priv->state == comp);
- spin_unlock_irqrestore(&id_priv->lock, flags);
- return ret;
-}
-
static int cma_comp_exch(struct rdma_id_private *id_priv,
enum rdma_cm_state comp, enum rdma_cm_state exch)
{
unsigned long flags;
int ret;
+ /*
+ * The FSM uses a funny double locking where state is protected by both
+ * the handler_mutex and the spinlock. State is not allowed to change
+ * to/from a handler_mutex protected value without also holding
+ * handler_mutex.
+ */
+ if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
+ lockdep_assert_held(&id_priv->handler_mutex);
+
spin_lock_irqsave(&id_priv->lock, flags);
if ((ret = (id_priv->state == comp)))
id_priv->state = exch;
@@ -467,10 +457,8 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
id_priv->id.route.addr.dev_addr.transport =
rdma_node_get_transport(cma_dev->device->node_type);
list_add_tail(&id_priv->list, &cma_dev->id_list);
- if (id_priv->res.kern_name)
- rdma_restrack_kadd(&id_priv->res);
- else
- rdma_restrack_uadd(&id_priv->res);
+ rdma_restrack_add(&id_priv->res);
+
trace_cm_id_attach(id_priv, cma_dev->device);
}
@@ -483,14 +471,6 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
rdma_start_port(cma_dev->device)];
}
-static inline void release_mc(struct kref *kref)
-{
- struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
-
- kfree(mc->multicast.ib);
- kfree(mc);
-}
-
static void cma_release_dev(struct rdma_id_private *id_priv)
{
mutex_lock(&lock);
@@ -844,10 +824,10 @@ static void cma_id_put(struct rdma_id_private *id_priv)
complete(&id_priv->comp);
}
-struct rdma_cm_id *__rdma_create_id(struct net *net,
- rdma_cm_event_handler event_handler,
- void *context, enum rdma_ucm_port_space ps,
- enum ib_qp_type qp_type, const char *caller)
+static struct rdma_id_private *
+__rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
+ void *context, enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type, const struct rdma_id_private *parent)
{
struct rdma_id_private *id_priv;
@@ -855,8 +835,6 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
if (!id_priv)
return ERR_PTR(-ENOMEM);
- rdma_restrack_set_task(&id_priv->res, caller);
- id_priv->res.type = RDMA_RESTRACK_CM_ID;
id_priv->state = RDMA_CM_IDLE;
id_priv->id.context = context;
id_priv->id.event_handler = event_handler;
@@ -876,9 +854,45 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
id_priv->id.route.addr.dev_addr.net = get_net(net);
id_priv->seq_num &= 0x00ffffff;
- return &id_priv->id;
+ rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
+ if (parent)
+ rdma_restrack_parent_name(&id_priv->res, &parent->res);
+
+ return id_priv;
}
-EXPORT_SYMBOL(__rdma_create_id);
+
+struct rdma_cm_id *
+__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
+ void *context, enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type, const char *caller)
+{
+ struct rdma_id_private *ret;
+
+ ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL);
+ if (IS_ERR(ret))
+ return ERR_CAST(ret);
+
+ rdma_restrack_set_name(&ret->res, caller);
+ return &ret->id;
+}
+EXPORT_SYMBOL(__rdma_create_kernel_id);
+
+struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
+ void *context,
+ enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type)
+{
+ struct rdma_id_private *ret;
+
+ ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context,
+ ps, qp_type, NULL);
+ if (IS_ERR(ret))
+ return ERR_CAST(ret);
+
+ rdma_restrack_set_name(&ret->res, NULL);
+ return &ret->id;
+}
+EXPORT_SYMBOL(rdma_create_user_id);
static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
{
@@ -1783,19 +1797,30 @@ static void cma_release_port(struct rdma_id_private *id_priv)
mutex_unlock(&lock);
}
-static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv,
- struct cma_multicast *mc)
+static void destroy_mc(struct rdma_id_private *id_priv,
+ struct cma_multicast *mc)
{
- struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
- struct net_device *ndev = NULL;
+ if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
+ ib_sa_free_multicast(mc->sa_mc);
- if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
- if (ndev) {
- cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
- dev_put(ndev);
+ if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
+ struct rdma_dev_addr *dev_addr =
+ &id_priv->id.route.addr.dev_addr;
+ struct net_device *ndev = NULL;
+
+ if (dev_addr->bound_dev_if)
+ ndev = dev_get_by_index(dev_addr->net,
+ dev_addr->bound_dev_if);
+ if (ndev) {
+ union ib_gid mgid;
+
+ cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
+ &mgid);
+ cma_igmp_send(ndev, &mgid, false);
+ dev_put(ndev);
+ }
}
- kref_put(&mc->mcref, release_mc);
+ kfree(mc);
}
static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
@@ -1803,16 +1828,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
struct cma_multicast *mc;
while (!list_empty(&id_priv->mc_list)) {
- mc = container_of(id_priv->mc_list.next,
- struct cma_multicast, list);
+ mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
+ list);
list_del(&mc->list);
- if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
- id_priv->id.port_num)) {
- ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
- } else {
- cma_leave_roce_mc_group(id_priv, mc);
- }
+ destroy_mc(id_priv, mc);
}
}
@@ -1821,7 +1840,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
{
cma_cancel_operation(id_priv, state);
- rdma_restrack_del(&id_priv->res);
if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
if (id_priv->cm_id.ib)
@@ -1847,6 +1865,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
put_net(id_priv->id.route.addr.dev_addr.net);
+ rdma_restrack_del(&id_priv->res);
kfree(id_priv);
}
@@ -1949,13 +1968,15 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
{
struct rdma_id_private *id_priv = cm_id->context;
struct rdma_cm_event event = {};
+ enum rdma_cm_state state;
int ret;
mutex_lock(&id_priv->handler_mutex);
+ state = READ_ONCE(id_priv->state);
if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
- id_priv->state != RDMA_CM_CONNECT) ||
+ state != RDMA_CM_CONNECT) ||
(ib_event->event == IB_CM_TIMEWAIT_EXIT &&
- id_priv->state != RDMA_CM_DISCONNECT))
+ state != RDMA_CM_DISCONNECT))
goto out;
switch (ib_event->event) {
@@ -1965,7 +1986,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id,
event.status = -ETIMEDOUT;
break;
case IB_CM_REP_RECEIVED:
- if (cma_comp(id_priv, RDMA_CM_CONNECT) &&
+ if (state == RDMA_CM_CONNECT &&
(id_priv->id.qp_type != IB_QPT_UD)) {
trace_cm_send_mra(id_priv);
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
@@ -2043,14 +2064,15 @@ cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
int ret;
listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
- id = __rdma_create_id(listen_id->route.addr.dev_addr.net,
- listen_id->event_handler, listen_id->context,
- listen_id->ps, ib_event->param.req_rcvd.qp_type,
- listen_id_priv->res.kern_name);
- if (IS_ERR(id))
+ id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net,
+ listen_id->event_handler, listen_id->context,
+ listen_id->ps,
+ ib_event->param.req_rcvd.qp_type,
+ listen_id_priv);
+ if (IS_ERR(id_priv))
return NULL;
- id_priv = container_of(id, struct rdma_id_private, id);
+ id = &id_priv->id;
if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
(struct sockaddr *)&id->route.addr.dst_addr,
listen_id, ib_event, ss_family, service_id))
@@ -2104,13 +2126,13 @@ cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
int ret;
listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
- id = __rdma_create_id(net, listen_id->event_handler, listen_id->context,
- listen_id->ps, IB_QPT_UD,
- listen_id_priv->res.kern_name);
- if (IS_ERR(id))
+ id_priv = __rdma_create_id(net, listen_id->event_handler,
+ listen_id->context, listen_id->ps, IB_QPT_UD,
+ listen_id_priv);
+ if (IS_ERR(id_priv))
return NULL;
- id_priv = container_of(id, struct rdma_id_private, id);
+ id = &id_priv->id;
if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
(struct sockaddr *)&id->route.addr.dst_addr,
listen_id, ib_event, ss_family,
@@ -2184,7 +2206,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
}
mutex_lock(&listen_id->handler_mutex);
- if (listen_id->state != RDMA_CM_LISTEN) {
+ if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) {
ret = -ECONNABORTED;
goto err_unlock;
}
@@ -2226,8 +2248,8 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
goto net_dev_put;
}
- if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
- (conn_id->id.qp_type != IB_QPT_UD)) {
+ if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
+ conn_id->id.qp_type != IB_QPT_UD) {
trace_cm_send_mra(cm_id->context);
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
}
@@ -2288,7 +2310,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
mutex_lock(&id_priv->handler_mutex);
- if (id_priv->state != RDMA_CM_CONNECT)
+ if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
goto out;
switch (iw_event->event) {
@@ -2346,7 +2368,6 @@ out:
static int iw_conn_req_handler(struct iw_cm_id *cm_id,
struct iw_cm_event *iw_event)
{
- struct rdma_cm_id *new_cm_id;
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event = {};
int ret = -ECONNABORTED;
@@ -2362,20 +2383,18 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
listen_id = cm_id->context;
mutex_lock(&listen_id->handler_mutex);
- if (listen_id->state != RDMA_CM_LISTEN)
+ if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN)
goto out;
/* Create a new RDMA id for the new IW CM ID */
- new_cm_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
- listen_id->id.event_handler,
- listen_id->id.context,
- RDMA_PS_TCP, IB_QPT_RC,
- listen_id->res.kern_name);
- if (IS_ERR(new_cm_id)) {
+ conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
+ listen_id->id.event_handler,
+ listen_id->id.context, RDMA_PS_TCP,
+ IB_QPT_RC, listen_id);
+ if (IS_ERR(conn_id)) {
ret = -ENOMEM;
goto out;
}
- conn_id = container_of(new_cm_id, struct rdma_id_private, id);
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = RDMA_CM_CONNECT;
@@ -2480,7 +2499,6 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
struct cma_device *cma_dev)
{
struct rdma_id_private *dev_id_priv;
- struct rdma_cm_id *id;
struct net *net = id_priv->id.route.addr.dev_addr.net;
int ret;
@@ -2489,13 +2507,12 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
return;
- id = __rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps,
- id_priv->id.qp_type, id_priv->res.kern_name);
- if (IS_ERR(id))
+ dev_id_priv =
+ __rdma_create_id(net, cma_listen_handler, id_priv,
+ id_priv->id.ps, id_priv->id.qp_type, id_priv);
+ if (IS_ERR(dev_id_priv))
return;
- dev_id_priv = container_of(id, struct rdma_id_private, id);
-
dev_id_priv->state = RDMA_CM_ADDR_BOUND;
memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
rdma_addr_size(cma_src_addr(id_priv)));
@@ -2508,7 +2525,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
dev_id_priv->tos_set = id_priv->tos_set;
dev_id_priv->tos = id_priv->tos;
- ret = rdma_listen(id, id_priv->backlog);
+ ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
if (ret)
dev_warn(&cma_dev->device->dev,
"RDMA CMA: cma_listen_on_dev, error %d\n", ret);
@@ -2647,32 +2664,14 @@ static void cma_work_handler(struct work_struct *_work)
struct rdma_id_private *id_priv = work->id;
mutex_lock(&id_priv->handler_mutex);
- if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
+ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
goto out_unlock;
-
- if (cma_cm_event_handler(id_priv, &work->event)) {
- cma_id_put(id_priv);
- destroy_id_handler_unlock(id_priv);
- goto out_free;
+ if (work->old_state != 0 || work->new_state != 0) {
+ if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
+ goto out_unlock;
}
-out_unlock:
- mutex_unlock(&id_priv->handler_mutex);
- cma_id_put(id_priv);
-out_free:
- kfree(work);
-}
-
-static void cma_ndev_work_handler(struct work_struct *_work)
-{
- struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
- struct rdma_id_private *id_priv = work->id;
-
- mutex_lock(&id_priv->handler_mutex);
- if (id_priv->state == RDMA_CM_DESTROYING ||
- id_priv->state == RDMA_CM_DEVICE_REMOVAL)
- goto out_unlock;
-
if (cma_cm_event_handler(id_priv, &work->event)) {
cma_id_put(id_priv);
destroy_id_handler_unlock(id_priv);
@@ -2683,6 +2682,8 @@ out_unlock:
mutex_unlock(&id_priv->handler_mutex);
cma_id_put(id_priv);
out_free:
+ if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
+ rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
kfree(work);
}
@@ -3240,32 +3241,54 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
return rdma_bind_addr(id, src_addr);
}
-int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- const struct sockaddr *dst_addr, unsigned long timeout_ms)
+/*
+ * If required, resolve the source address for bind and leave the id_priv in
+ * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
+ * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
+ * ignored.
+ */
+static int resolve_prepare_src(struct rdma_id_private *id_priv,
+ struct sockaddr *src_addr,
+ const struct sockaddr *dst_addr)
{
- struct rdma_id_private *id_priv;
int ret;
- id_priv = container_of(id, struct rdma_id_private, id);
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
- if (id_priv->state == RDMA_CM_IDLE) {
- ret = cma_bind_addr(id, src_addr, dst_addr);
- if (ret) {
- memset(cma_dst_addr(id_priv), 0,
- rdma_addr_size(dst_addr));
- return ret;
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
+ /* For a well behaved ULP state will be RDMA_CM_IDLE */
+ ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
+ if (ret)
+ goto err_dst;
+ if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
+ RDMA_CM_ADDR_QUERY))) {
+ ret = -EINVAL;
+ goto err_dst;
}
}
if (cma_family(id_priv) != dst_addr->sa_family) {
- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_state;
}
+ return 0;
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
- return -EINVAL;
- }
+err_state:
+ cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
+err_dst:
+ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
+ return ret;
+}
+
+int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
+ const struct sockaddr *dst_addr, unsigned long timeout_ms)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+ int ret;
+
+ ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
+ if (ret)
+ return ret;
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
@@ -3297,7 +3320,8 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irqsave(&id_priv->lock, flags);
- if (reuse || id_priv->state == RDMA_CM_IDLE) {
+ if ((reuse && id_priv->state != RDMA_CM_LISTEN) ||
+ id_priv->state == RDMA_CM_IDLE) {
id_priv->reuseaddr = reuse;
ret = 0;
} else {
@@ -3491,8 +3515,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
if (id_priv == cur_id)
continue;
- if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
- cur_id->reuseaddr)
+ if (reuseaddr && cur_id->reuseaddr)
continue;
cur_addr = cma_src_addr(cur_id);
@@ -3533,18 +3556,6 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
return ret;
}
-static int cma_bind_listen(struct rdma_id_private *id_priv)
-{
- struct rdma_bind_list *bind_list = id_priv->bind_list;
- int ret = 0;
-
- mutex_lock(&lock);
- if (bind_list->owners.first->next)
- ret = cma_check_port(bind_list, id_priv, 0);
- mutex_unlock(&lock);
- return ret;
-}
-
static enum rdma_ucm_port_space
cma_select_inet_ps(struct rdma_id_private *id_priv)
{
@@ -3638,22 +3649,31 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
int rdma_listen(struct rdma_cm_id *id, int backlog)
{
- struct rdma_id_private *id_priv;
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
int ret;
- id_priv = container_of(id, struct rdma_id_private, id);
- if (id_priv->state == RDMA_CM_IDLE) {
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
+ /* For a well behaved ULP state will be RDMA_CM_IDLE */
id->route.addr.src_addr.ss_family = AF_INET;
ret = rdma_bind_addr(id, cma_src_addr(id_priv));
if (ret)
return ret;
+ if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
+ RDMA_CM_LISTEN)))
+ return -EINVAL;
}
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
- return -EINVAL;
-
+ /*
+ * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
+ * any more, and has to be unique in the bind list.
+ */
if (id_priv->reuseaddr) {
- ret = cma_bind_listen(id_priv);
+ mutex_lock(&lock);
+ ret = cma_check_port(id_priv->bind_list, id_priv, 0);
+ if (!ret)
+ id_priv->reuseaddr = 0;
+ mutex_unlock(&lock);
if (ret)
goto err;
}
@@ -3678,6 +3698,10 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
return 0;
err:
id_priv->backlog = 0;
+ /*
+ * All the failure paths that lead here will not allow the req_handler's
+ * to have run.
+ */
cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
return ret;
}
@@ -3732,7 +3756,6 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
return 0;
err2:
- rdma_restrack_del(&id_priv->res);
if (id_priv->cma_dev)
cma_release_dev(id_priv);
err1:
@@ -3781,7 +3804,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
int ret;
mutex_lock(&id_priv->handler_mutex);
- if (id_priv->state != RDMA_CM_CONNECT)
+ if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
goto out;
switch (ib_event->event) {
@@ -4015,12 +4038,21 @@ out:
return ret;
}
-int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+/**
+ * rdma_connect_locked - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Same as rdma_connect() but can only be called from the
+ * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
+ */
+int rdma_connect_locked(struct rdma_cm_id *id,
+ struct rdma_conn_param *conn_param)
{
- struct rdma_id_private *id_priv;
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
int ret;
- id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
return -EINVAL;
@@ -4039,13 +4071,37 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
else
ret = -ENOSYS;
if (ret)
- goto err;
-
+ goto err_state;
return 0;
-err:
+err_state:
cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
return ret;
}
+EXPORT_SYMBOL(rdma_connect_locked);
+
+/**
+ * rdma_connect - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ *
+ * Users must have resolved a route for the rdma_cm_id to connect with by having
+ * called rdma_resolve_route before calling this routine.
+ *
+ * This call will either connect to a remote QP or obtain remote QP information
+ * for unconnected rdma_cm_id's. The actual operation is based on the
+ * rdma_cm_id's port space.
+ */
+int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+ int ret;
+
+ mutex_lock(&id_priv->handler_mutex);
+ ret = rdma_connect_locked(id, conn_param);
+ mutex_unlock(&id_priv->handler_mutex);
+ return ret;
+}
EXPORT_SYMBOL(rdma_connect);
/**
@@ -4155,17 +4211,33 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
}
-int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
- const char *caller)
+/**
+ * rdma_accept - Called to accept a connection request or response.
+ * @id: Connection identifier associated with the request.
+ * @conn_param: Information needed to establish the connection. This must be
+ * provided if accepting a connection request. If accepting a connection
+ * response, this parameter must be NULL.
+ *
+ * Typically, this routine is only called by the listener to accept a connection
+ * request. It must also be called on the active side of a connection if the
+ * user is performing their own QP transitions.
+ *
+ * In the case of error, a reject message is sent to the remote side and the
+ * state of the qp associated with the id is modified to error, such that any
+ * previously posted receive buffers would be flushed.
+ *
+ * This function is for use by kernel ULPs and must be called from under the
+ * handler callback.
+ */
+int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
{
- struct rdma_id_private *id_priv;
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
int ret;
- id_priv = container_of(id, struct rdma_id_private, id);
-
- rdma_restrack_set_task(&id_priv->res, caller);
+ lockdep_assert_held(&id_priv->handler_mutex);
- if (!cma_comp(id_priv, RDMA_CM_CONNECT))
+ if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
return -EINVAL;
if (!id->qp && conn_param) {
@@ -4203,10 +4275,10 @@ reject:
rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
return ret;
}
-EXPORT_SYMBOL(__rdma_accept);
+EXPORT_SYMBOL(rdma_accept);
-int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
- const char *caller, struct rdma_ucm_ece *ece)
+int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ struct rdma_ucm_ece *ece)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
@@ -4214,9 +4286,27 @@ int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
id_priv->ece.vendor_id = ece->vendor_id;
id_priv->ece.attr_mod = ece->attr_mod;
- return __rdma_accept(id, conn_param, caller);
+ return rdma_accept(id, conn_param);
+}
+EXPORT_SYMBOL(rdma_accept_ece);
+
+void rdma_lock_handler(struct rdma_cm_id *id)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ mutex_lock(&id_priv->handler_mutex);
+}
+EXPORT_SYMBOL(rdma_lock_handler);
+
+void rdma_unlock_handler(struct rdma_cm_id *id)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ mutex_unlock(&id_priv->handler_mutex);
}
-EXPORT_SYMBOL(__rdma_accept_ece);
+EXPORT_SYMBOL(rdma_unlock_handler);
int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
{
@@ -4299,63 +4389,66 @@ out:
}
EXPORT_SYMBOL(rdma_disconnect);
-static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
+ struct ib_sa_multicast *multicast,
+ struct rdma_cm_event *event,
+ struct cma_multicast *mc)
{
- struct rdma_id_private *id_priv;
- struct cma_multicast *mc = multicast->context;
- struct rdma_cm_event event = {};
- int ret = 0;
-
- id_priv = mc->id_priv;
- mutex_lock(&id_priv->handler_mutex);
- if (id_priv->state != RDMA_CM_ADDR_BOUND &&
- id_priv->state != RDMA_CM_ADDR_RESOLVED)
- goto out;
+ struct rdma_dev_addr *dev_addr;
+ enum ib_gid_type gid_type;
+ struct net_device *ndev;
if (!status)
status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
else
pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
status);
- mutex_lock(&id_priv->qp_mutex);
- if (!status && id_priv->id.qp) {
- status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
- be16_to_cpu(multicast->rec.mlid));
- if (status)
- pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to attach QP. status %d\n",
- status);
+
+ event->status = status;
+ event->param.ud.private_data = mc->context;
+ if (status) {
+ event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
+ return;
}
- mutex_unlock(&id_priv->qp_mutex);
- event.status = status;
- event.param.ud.private_data = mc->context;
- if (!status) {
- struct rdma_dev_addr *dev_addr =
- &id_priv->id.route.addr.dev_addr;
- struct net_device *ndev =
- dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
- enum ib_gid_type gid_type =
- id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
- rdma_start_port(id_priv->cma_dev->device)];
-
- event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
- ret = ib_init_ah_from_mcmember(id_priv->id.device,
- id_priv->id.port_num,
- &multicast->rec,
- ndev, gid_type,
- &event.param.ud.ah_attr);
- if (ret)
- event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
+ dev_addr = &id_priv->id.route.addr.dev_addr;
+ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ gid_type =
+ id_priv->cma_dev
+ ->default_gid_type[id_priv->id.port_num -
+ rdma_start_port(
+ id_priv->cma_dev->device)];
+
+ event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
+ if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
+ &multicast->rec, ndev, gid_type,
+ &event->param.ud.ah_attr)) {
+ event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
+ goto out;
+ }
- event.param.ud.qp_num = 0xFFFFFF;
- event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
- if (ndev)
- dev_put(ndev);
- } else
- event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
+ event->param.ud.qp_num = 0xFFFFFF;
+ event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
- ret = cma_cm_event_handler(id_priv, &event);
+out:
+ if (ndev)
+ dev_put(ndev);
+}
+
+static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+{
+ struct cma_multicast *mc = multicast->context;
+ struct rdma_id_private *id_priv = mc->id_priv;
+ struct rdma_cm_event event = {};
+ int ret = 0;
+
+ mutex_lock(&id_priv->handler_mutex);
+ if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL ||
+ READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
+ goto out;
+ cma_make_mc_event(status, id_priv, multicast, &event, mc);
+ ret = cma_cm_event_handler(id_priv, &event);
rdma_destroy_ah_attr(&event.param.ud.ah_attr);
if (ret) {
destroy_id_handler_unlock(id_priv);
@@ -4445,23 +4538,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
IB_SA_MCMEMBER_REC_MTU |
IB_SA_MCMEMBER_REC_HOP_LIMIT;
- mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
- id_priv->id.port_num, &rec,
- comp_mask, GFP_KERNEL,
- cma_ib_mc_handler, mc);
- return PTR_ERR_OR_ZERO(mc->multicast.ib);
-}
-
-static void iboe_mcast_work_handler(struct work_struct *work)
-{
- struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
- struct cma_multicast *mc = mw->mc;
- struct ib_sa_multicast *m = mc->multicast.ib;
-
- mc->multicast.ib->context = mc;
- cma_ib_mc_handler(0, m);
- kref_put(&mc->mcref, release_mc);
- kfree(mw);
+ mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
+ id_priv->id.port_num, &rec, comp_mask,
+ GFP_KERNEL, cma_ib_mc_handler, mc);
+ return PTR_ERR_OR_ZERO(mc->sa_mc);
}
static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
@@ -4496,52 +4576,47 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
struct cma_multicast *mc)
{
- struct iboe_mcast_work *work;
+ struct cma_work *work;
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
int err = 0;
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL;
+ struct ib_sa_multicast ib;
enum ib_gid_type gid_type;
bool send_only;
send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
- if (cma_zero_addr((struct sockaddr *)&mc->addr))
+ if (cma_zero_addr(addr))
return -EINVAL;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work)
return -ENOMEM;
- mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
- if (!mc->multicast.ib) {
- err = -ENOMEM;
- goto out1;
- }
-
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
- cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type);
+ cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
- mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
+ ib.rec.pkey = cpu_to_be16(0xffff);
if (id_priv->id.ps == RDMA_PS_UDP)
- mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
+ ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
if (!ndev) {
err = -ENODEV;
- goto out2;
+ goto err_free;
}
- mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
- mc->multicast.ib->rec.hop_limit = 1;
- mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
+ ib.rec.rate = iboe_get_rate(ndev);
+ ib.rec.hop_limit = 1;
+ ib.rec.mtu = iboe_get_mtu(ndev->mtu);
if (addr->sa_family == AF_INET) {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
- mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
+ ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
if (!send_only) {
- err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
+ err = cma_igmp_send(ndev, &ib.rec.mgid,
true);
}
}
@@ -4550,24 +4625,22 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
err = -ENOTSUPP;
}
dev_put(ndev);
- if (err || !mc->multicast.ib->rec.mtu) {
+ if (err || !ib.rec.mtu) {
if (!err)
err = -EINVAL;
- goto out2;
+ goto err_free;
}
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
- &mc->multicast.ib->rec.port_gid);
+ &ib.rec.port_gid);
work->id = id_priv;
- work->mc = mc;
- INIT_WORK(&work->work, iboe_mcast_work_handler);
- kref_get(&mc->mcref);
+ INIT_WORK(&work->work, cma_work_handler);
+ cma_make_mc_event(0, id_priv, &ib, &work->event, mc);
+ /* Balances with cma_id_put() in cma_work_handler */
+ cma_id_get(id_priv);
queue_work(cma_wq, &work->work);
-
return 0;
-out2:
- kfree(mc->multicast.ib);
-out1:
+err_free:
kfree(work);
return err;
}
@@ -4575,19 +4648,21 @@ out1:
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
u8 join_state, void *context)
{
- struct rdma_id_private *id_priv;
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
struct cma_multicast *mc;
int ret;
- if (!id->device)
+ /* Not supported for kernel QPs */
+ if (WARN_ON(id->qp))
return -EINVAL;
- id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
- !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
+ /* ULP is calling this wrong. */
+ if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND &&
+ READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
return -EINVAL;
- mc = kmalloc(sizeof *mc, GFP_KERNEL);
+ mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
@@ -4597,7 +4672,6 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
mc->join_state = join_state;
if (rdma_protocol_roce(id->device, id->port_num)) {
- kref_init(&mc->mcref);
ret = cma_iboe_join_multicast(id_priv, mc);
if (ret)
goto out_err;
@@ -4629,25 +4703,14 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irq(&id_priv->lock);
list_for_each_entry(mc, &id_priv->mc_list, list) {
- if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
- list_del(&mc->list);
- spin_unlock_irq(&id_priv->lock);
-
- if (id->qp)
- ib_detach_mcast(id->qp,
- &mc->multicast.ib->rec.mgid,
- be16_to_cpu(mc->multicast.ib->rec.mlid));
-
- BUG_ON(id_priv->cma_dev->device != id->device);
-
- if (rdma_cap_ib_mcast(id->device, id->port_num)) {
- ib_sa_free_multicast(mc->multicast.ib);
- kfree(mc);
- } else if (rdma_protocol_roce(id->device, id->port_num)) {
- cma_leave_roce_mc_group(id_priv, mc);
- }
- return;
- }
+ if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
+ continue;
+ list_del(&mc->list);
+ spin_unlock_irq(&id_priv->lock);
+
+ WARN_ON(id_priv->cma_dev->device != id->device);
+ destroy_mc(id_priv, mc);
+ return;
}
spin_unlock_irq(&id_priv->lock);
}
@@ -4656,7 +4719,7 @@ EXPORT_SYMBOL(rdma_leave_multicast);
static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr;
- struct cma_ndev_work *work;
+ struct cma_work *work;
dev_addr = &id_priv->id.route.addr.dev_addr;
@@ -4669,7 +4732,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
if (!work)
return -ENOMEM;
- INIT_WORK(&work->work, cma_ndev_work_handler);
+ INIT_WORK(&work->work, cma_work_handler);
work->id = id_priv;
work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
cma_id_get(id_priv);
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 3c1e2ca564fe..7ec4af2ed87a 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -123,16 +123,17 @@ static ssize_t default_roce_mode_store(struct config_item *item,
{
struct cma_device *cma_dev;
struct cma_dev_port_group *group;
- int gid_type = ib_cache_gid_parse_type_str(buf);
+ int gid_type;
ssize_t ret;
- if (gid_type < 0)
- return -EINVAL;
-
ret = cma_configfs_params_get(item, &cma_dev, &group);
if (ret)
return ret;
+ gid_type = ib_cache_gid_parse_type_str(buf);
+ if (gid_type < 0)
+ return -EINVAL;
+
ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type);
cma_configfs_params_put(cma_dev);
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
index e6e20c36c538..e45264267bcc 100644
--- a/drivers/infiniband/core/cma_trace.h
+++ b/drivers/infiniband/core/cma_trace.h
@@ -17,46 +17,6 @@
#include <linux/tracepoint.h>
#include <trace/events/rdma.h>
-/*
- * enum ib_cm_event_type, from include/rdma/ib_cm.h
- */
-#define IB_CM_EVENT_LIST \
- ib_cm_event(REQ_ERROR) \
- ib_cm_event(REQ_RECEIVED) \
- ib_cm_event(REP_ERROR) \
- ib_cm_event(REP_RECEIVED) \
- ib_cm_event(RTU_RECEIVED) \
- ib_cm_event(USER_ESTABLISHED) \
- ib_cm_event(DREQ_ERROR) \
- ib_cm_event(DREQ_RECEIVED) \
- ib_cm_event(DREP_RECEIVED) \
- ib_cm_event(TIMEWAIT_EXIT) \
- ib_cm_event(MRA_RECEIVED) \
- ib_cm_event(REJ_RECEIVED) \
- ib_cm_event(LAP_ERROR) \
- ib_cm_event(LAP_RECEIVED) \
- ib_cm_event(APR_RECEIVED) \
- ib_cm_event(SIDR_REQ_ERROR) \
- ib_cm_event(SIDR_REQ_RECEIVED) \
- ib_cm_event_end(SIDR_REP_RECEIVED)
-
-#undef ib_cm_event
-#undef ib_cm_event_end
-
-#define ib_cm_event(x) TRACE_DEFINE_ENUM(IB_CM_##x);
-#define ib_cm_event_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
-
-IB_CM_EVENT_LIST
-
-#undef ib_cm_event
-#undef ib_cm_event_end
-
-#define ib_cm_event(x) { IB_CM_##x, #x },
-#define ib_cm_event_end(x) { IB_CM_##x, #x }
-
-#define rdma_show_ib_cm_event(x) \
- __print_symbolic(x, IB_CM_EVENT_LIST)
-
DECLARE_EVENT_CLASS(cma_fsm_class,
TP_PROTO(
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index a1e6a67b2c4a..e84b0fedaacb 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -44,6 +44,7 @@
#include <rdma/ib_mad.h>
#include <rdma/restrack.h>
#include "mad_priv.h"
+#include "restrack.h"
/* Total number of ports combined across all struct ib_devices's */
#define RDMA_MAX_PORTS 8192
@@ -352,6 +353,7 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
INIT_LIST_HEAD(&qp->rdma_mrs);
INIT_LIST_HEAD(&qp->sig_mrs);
+ rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
/*
* We don't track XRC QPs for now, because they don't have PD
* and more importantly they are created internaly by driver,
@@ -359,14 +361,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
*/
is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
- qp->res.type = RDMA_RESTRACK_QP;
- if (uobj)
- rdma_restrack_uadd(&qp->res);
- else
- rdma_restrack_kadd(&qp->res);
- } else
- qp->res.valid = false;
-
+ rdma_restrack_parent_name(&qp->res, &pd->res);
+ rdma_restrack_add(&qp->res);
+ }
return qp;
}
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 636166880442..e4ff0d3328b6 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -80,8 +80,9 @@ static struct rdma_counter *rdma_counter_alloc(struct ib_device *dev, u8 port,
counter->device = dev;
counter->port = port;
- counter->res.type = RDMA_RESTRACK_COUNTER;
- counter->stats = dev->ops.counter_alloc_stats(counter);
+
+ rdma_restrack_new(&counter->res, RDMA_RESTRACK_COUNTER);
+ counter->stats = dev->ops.counter_alloc_stats(counter);
if (!counter->stats)
goto err_stats;
@@ -107,6 +108,7 @@ err_mode:
mutex_unlock(&port_counter->lock);
kfree(counter->stats);
err_stats:
+ rdma_restrack_put(&counter->res);
kfree(counter);
return NULL;
}
@@ -248,13 +250,8 @@ next:
static void rdma_counter_res_add(struct rdma_counter *counter,
struct ib_qp *qp)
{
- if (rdma_is_kernel_res(&qp->res)) {
- rdma_restrack_set_task(&counter->res, qp->res.kern_name);
- rdma_restrack_kadd(&counter->res);
- } else {
- rdma_restrack_attach_task(&counter->res, qp->res.task);
- rdma_restrack_uadd(&counter->res);
- }
+ rdma_restrack_parent_name(&counter->res, &qp->res);
+ rdma_restrack_add(&counter->res);
}
static void counter_release(struct kref *kref)
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index a92fc3f90bb5..12ebacf52958 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -197,24 +197,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
}
/**
- * __ib_alloc_cq_user - allocate a completion queue
+ * __ib_alloc_cq allocate a completion queue
* @dev: device to allocate the CQ for
* @private: driver private data, accessible from cq->cq_context
* @nr_cqe: number of CQEs to allocate
* @comp_vector: HCA completion vectors for this CQ
* @poll_ctx: context to poll the CQ from.
* @caller: module owner name.
- * @udata: Valid user data or NULL for kernel object
*
* This is the proper interface to allocate a CQ for in-kernel users. A
* CQ allocated with this interface will automatically be polled from the
* specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
* to use this CQ abstraction.
*/
-struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector,
- enum ib_poll_context poll_ctx,
- const char *caller, struct ib_udata *udata)
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
+ int comp_vector, enum ib_poll_context poll_ctx,
+ const char *caller)
{
struct ib_cq_init_attr cq_attr = {
.cqe = nr_cqe,
@@ -237,15 +235,13 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
if (!cq->wc)
goto out_free_cq;
- cq->res.type = RDMA_RESTRACK_CQ;
- rdma_restrack_set_task(&cq->res, caller);
+ rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
+ rdma_restrack_set_name(&cq->res, caller);
ret = dev->ops.create_cq(cq, &cq_attr, NULL);
if (ret)
goto out_free_wc;
- rdma_restrack_kadd(&cq->res);
-
rdma_dim_init(cq);
switch (cq->poll_ctx) {
@@ -271,21 +267,22 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
goto out_destroy_cq;
}
+ rdma_restrack_add(&cq->res);
trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx);
return cq;
out_destroy_cq:
rdma_dim_destroy(cq);
- rdma_restrack_del(&cq->res);
- cq->device->ops.destroy_cq(cq, udata);
+ cq->device->ops.destroy_cq(cq, NULL);
out_free_wc:
+ rdma_restrack_put(&cq->res);
kfree(cq->wc);
out_free_cq:
kfree(cq);
trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(__ib_alloc_cq_user);
+EXPORT_SYMBOL(__ib_alloc_cq);
/**
* __ib_alloc_cq_any - allocate a completion queue
@@ -310,18 +307,19 @@ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
atomic_inc_return(&counter) %
min_t(int, dev->num_comp_vectors, num_online_cpus());
- return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
- caller, NULL);
+ return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
+ caller);
}
EXPORT_SYMBOL(__ib_alloc_cq_any);
/**
- * ib_free_cq_user - free a completion queue
+ * ib_free_cq - free a completion queue
* @cq: completion queue to free.
- * @udata: User data or NULL for kernel object
*/
-void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
+void ib_free_cq(struct ib_cq *cq)
{
+ int ret;
+
if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
return;
if (WARN_ON_ONCE(cq->cqe_used))
@@ -343,12 +341,13 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
rdma_dim_destroy(cq);
trace_cq_free(cq);
+ ret = cq->device->ops.destroy_cq(cq, NULL);
+ WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
rdma_restrack_del(&cq->res);
- cq->device->ops.destroy_cq(cq, udata);
kfree(cq->wc);
kfree(cq);
}
-EXPORT_SYMBOL(ib_free_cq_user);
+EXPORT_SYMBOL(ib_free_cq);
void ib_cq_pool_init(struct ib_device *dev)
{
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 23ee65a9185f..a3b1fc84cdca 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1177,58 +1177,23 @@ out:
return ret;
}
-static void setup_dma_device(struct ib_device *device)
+static void setup_dma_device(struct ib_device *device,
+ struct device *dma_device)
{
- struct device *parent = device->dev.parent;
-
- WARN_ON_ONCE(device->dma_device);
-
-#ifdef CONFIG_DMA_OPS
- if (device->dev.dma_ops) {
- /*
- * The caller provided custom DMA operations. Copy the
- * DMA-related fields that are used by e.g. dma_alloc_coherent()
- * into device->dev.
- */
- device->dma_device = &device->dev;
- if (!device->dev.dma_mask) {
- if (parent)
- device->dev.dma_mask = parent->dma_mask;
- else
- WARN_ON_ONCE(true);
- }
- if (!device->dev.coherent_dma_mask) {
- if (parent)
- device->dev.coherent_dma_mask =
- parent->coherent_dma_mask;
- else
- WARN_ON_ONCE(true);
- }
- } else
-#endif /* CONFIG_DMA_OPS */
- {
- /*
- * The caller did not provide custom DMA operations. Use the
- * DMA mapping operations of the parent device.
- */
- WARN_ON_ONCE(!parent);
- device->dma_device = parent;
- }
-
- if (!device->dev.dma_parms) {
- if (parent) {
- /*
- * The caller did not provide DMA parameters, so
- * 'parent' probably represents a PCI device. The PCI
- * core sets the maximum segment size to 64
- * KB. Increase this parameter to 2 GB.
- */
- device->dev.dma_parms = parent->dma_parms;
- dma_set_max_seg_size(device->dma_device, SZ_2G);
- } else {
- WARN_ON_ONCE(true);
- }
+ /*
+ * If the caller does not provide a DMA capable device then the IB
+ * device will be used. In this case the caller should fully setup the
+ * ibdev for DMA. This usually means using dma_virt_ops.
+ */
+#ifdef CONFIG_DMA_VIRT_OPS
+ if (!dma_device) {
+ device->dev.dma_ops = &dma_virt_ops;
+ dma_device = &device->dev;
}
+#endif
+ WARN_ON(!dma_device);
+ device->dma_device = dma_device;
+ WARN_ON(!device->dma_device->dma_parms);
}
/*
@@ -1241,7 +1206,6 @@ static int setup_device(struct ib_device *device)
struct ib_udata uhw = {.outlen = 0, .inlen = 0};
int ret;
- setup_dma_device(device);
ib_device_check_mandatory(device);
ret = setup_port_data(device);
@@ -1354,7 +1318,10 @@ static void prevent_dealloc_device(struct ib_device *ib_dev)
* ib_register_device - Register an IB device with IB core
* @device: Device to register
* @name: unique string device name. This may include a '%' which will
- * cause a unique index to be added to the passed device name.
+ * cause a unique index to be added to the passed device name.
+ * @dma_device: pointer to a DMA-capable device. If %NULL, then the IB
+ * device will be used. In this case the caller should fully
+ * setup the ibdev for DMA. This usually means using dma_virt_ops.
*
* Low-level drivers use ib_register_device() to register their
* devices with the IB core. All registered clients will receive a
@@ -1365,7 +1332,8 @@ static void prevent_dealloc_device(struct ib_device *ib_dev)
* asynchronously then the device pointer may become freed as soon as this
* function returns.
*/
-int ib_register_device(struct ib_device *device, const char *name)
+int ib_register_device(struct ib_device *device, const char *name,
+ struct device *dma_device)
{
int ret;
@@ -1373,6 +1341,7 @@ int ib_register_device(struct ib_device *device, const char *name)
if (ret)
return ret;
+ setup_dma_device(device, dma_device);
ret = setup_device(device);
if (ret)
return ret;
@@ -2697,7 +2666,9 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_counters);
SET_OBJ_SIZE(dev_ops, ib_cq);
+ SET_OBJ_SIZE(dev_ops, ib_mw);
SET_OBJ_SIZE(dev_ops, ib_pd);
+ SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
SET_OBJ_SIZE(dev_ops, ib_srq);
SET_OBJ_SIZE(dev_ops, ib_ucontext);
SET_OBJ_SIZE(dev_ops, ib_xrcd);
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 6d3ed7c6e19e..ffe11b03724c 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -130,17 +130,6 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
lockdep_assert_held(&ufile->hw_destroy_rwsem);
assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
- if (reason == RDMA_REMOVE_ABORT_HWOBJ) {
- reason = RDMA_REMOVE_ABORT;
- ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
- attrs);
- /*
- * Drivers are not permitted to ignore RDMA_REMOVE_ABORT, see
- * ib_is_destroy_retryable, cleanup_retryable == false here.
- */
- WARN_ON(ret);
- }
-
if (reason == RDMA_REMOVE_ABORT) {
WARN_ON(!list_empty(&uobj->list));
WARN_ON(!uobj->context);
@@ -674,11 +663,22 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
bool hw_obj_valid)
{
struct ib_uverbs_file *ufile = uobj->ufile;
+ int ret;
+
+ if (hw_obj_valid) {
+ ret = uobj->uapi_object->type_class->destroy_hw(
+ uobj, RDMA_REMOVE_ABORT, attrs);
+ /*
+ * If the driver couldn't destroy the object then go ahead and
+ * commit it. Leaking objects that can't be destroyed is only
+ * done during FD close after the driver has a few more tries to
+ * destroy it.
+ */
+ if (WARN_ON(ret))
+ return rdma_alloc_commit_uobject(uobj, attrs);
+ }
- uverbs_destroy_uobject(uobj,
- hw_obj_valid ? RDMA_REMOVE_ABORT_HWOBJ :
- RDMA_REMOVE_ABORT,
- attrs);
+ uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
@@ -889,14 +889,14 @@ void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile,
if (!ufile->ucontext)
goto done;
- ufile->ucontext->closing = true;
ufile->ucontext->cleanup_retryable = true;
while (!list_empty(&ufile->uobjects))
if (__uverbs_cleanup_ufile(ufile, reason)) {
/*
* No entry was cleaned-up successfully during this
- * iteration
+ * iteration. It is a driver bug to fail destruction.
*/
+ WARN_ON(!list_empty(&ufile->uobjects));
break;
}
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 62fbb0ae9cb4..4aeeaaed0f17 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -123,32 +123,6 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
}
EXPORT_SYMBOL(rdma_restrack_count);
-static void set_kern_name(struct rdma_restrack_entry *res)
-{
- struct ib_pd *pd;
-
- switch (res->type) {
- case RDMA_RESTRACK_QP:
- pd = container_of(res, struct ib_qp, res)->pd;
- if (!pd) {
- WARN_ONCE(true, "XRC QPs are not supported\n");
- /* Survive, despite the programmer's error */
- res->kern_name = " ";
- }
- break;
- case RDMA_RESTRACK_MR:
- pd = container_of(res, struct ib_mr, res)->pd;
- break;
- default:
- /* Other types set kern_name directly */
- pd = NULL;
- break;
- }
-
- if (pd)
- res->kern_name = pd->res.kern_name;
-}
-
static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
{
switch (res->type) {
@@ -173,36 +147,77 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
}
}
-void rdma_restrack_set_task(struct rdma_restrack_entry *res,
- const char *caller)
+/**
+ * rdma_restrack_attach_task() - attach the task onto this resource,
+ * valid for user space restrack entries.
+ * @res: resource entry
+ * @task: the task to attach
+ */
+static void rdma_restrack_attach_task(struct rdma_restrack_entry *res,
+ struct task_struct *task)
{
- if (caller) {
- res->kern_name = caller;
+ if (WARN_ON_ONCE(!task))
return;
- }
if (res->task)
put_task_struct(res->task);
- get_task_struct(current);
- res->task = current;
+ get_task_struct(task);
+ res->task = task;
+ res->user = true;
}
-EXPORT_SYMBOL(rdma_restrack_set_task);
/**
- * rdma_restrack_attach_task() - attach the task onto this resource
+ * rdma_restrack_set_name() - set the task for this resource
* @res: resource entry
- * @task: the task to attach, the current task will be used if it is NULL.
+ * @caller: kernel name, the current task will be used if the caller is NULL.
*/
-void rdma_restrack_attach_task(struct rdma_restrack_entry *res,
- struct task_struct *task)
+void rdma_restrack_set_name(struct rdma_restrack_entry *res, const char *caller)
{
- if (res->task)
- put_task_struct(res->task);
- get_task_struct(task);
- res->task = task;
+ if (caller) {
+ res->kern_name = caller;
+ return;
+ }
+
+ rdma_restrack_attach_task(res, current);
+}
+EXPORT_SYMBOL(rdma_restrack_set_name);
+
+/**
+ * rdma_restrack_parent_name() - set the restrack name properties based
+ * on parent restrack
+ * @dst: destination resource entry
+ * @parent: parent resource entry
+ */
+void rdma_restrack_parent_name(struct rdma_restrack_entry *dst,
+ const struct rdma_restrack_entry *parent)
+{
+ if (rdma_is_kernel_res(parent))
+ dst->kern_name = parent->kern_name;
+ else
+ rdma_restrack_attach_task(dst, parent->task);
+}
+EXPORT_SYMBOL(rdma_restrack_parent_name);
+
+/**
+ * rdma_restrack_new() - Initializes new restrack entry to allow _put() interface
+ * to release memory in fully automatic way.
+ * @res - Entry to initialize
+ * @type - REstrack type
+ */
+void rdma_restrack_new(struct rdma_restrack_entry *res,
+ enum rdma_restrack_type type)
+{
+ kref_init(&res->kref);
+ init_completion(&res->comp);
+ res->type = type;
}
+EXPORT_SYMBOL(rdma_restrack_new);
-static void rdma_restrack_add(struct rdma_restrack_entry *res)
+/**
+ * rdma_restrack_add() - add object to the reource tracking database
+ * @res: resource entry
+ */
+void rdma_restrack_add(struct rdma_restrack_entry *res)
{
struct ib_device *dev = res_to_dev(res);
struct rdma_restrack_root *rt;
@@ -213,8 +228,6 @@ static void rdma_restrack_add(struct rdma_restrack_entry *res)
rt = &dev->res[res->type];
- kref_init(&res->kref);
- init_completion(&res->comp);
if (res->type == RDMA_RESTRACK_QP) {
/* Special case to ensure that LQPN points to right QP */
struct ib_qp *qp = container_of(res, struct ib_qp, res);
@@ -236,38 +249,7 @@ static void rdma_restrack_add(struct rdma_restrack_entry *res)
if (!ret)
res->valid = true;
}
-
-/**
- * rdma_restrack_kadd() - add kernel object to the reource tracking database
- * @res: resource entry
- */
-void rdma_restrack_kadd(struct rdma_restrack_entry *res)
-{
- res->task = NULL;
- set_kern_name(res);
- res->user = false;
- rdma_restrack_add(res);
-}
-EXPORT_SYMBOL(rdma_restrack_kadd);
-
-/**
- * rdma_restrack_uadd() - add user object to the reource tracking database
- * @res: resource entry
- */
-void rdma_restrack_uadd(struct rdma_restrack_entry *res)
-{
- if ((res->type != RDMA_RESTRACK_CM_ID) &&
- (res->type != RDMA_RESTRACK_COUNTER))
- res->task = NULL;
-
- if (!res->task)
- rdma_restrack_set_task(res, NULL);
- res->kern_name = NULL;
-
- res->user = true;
- rdma_restrack_add(res);
-}
-EXPORT_SYMBOL(rdma_restrack_uadd);
+EXPORT_SYMBOL(rdma_restrack_add);
int __must_check rdma_restrack_get(struct rdma_restrack_entry *res)
{
@@ -305,6 +287,10 @@ static void restrack_release(struct kref *kref)
struct rdma_restrack_entry *res;
res = container_of(kref, struct rdma_restrack_entry, kref);
+ if (res->task) {
+ put_task_struct(res->task);
+ res->task = NULL;
+ }
complete(&res->comp);
}
@@ -314,14 +300,23 @@ int rdma_restrack_put(struct rdma_restrack_entry *res)
}
EXPORT_SYMBOL(rdma_restrack_put);
+/**
+ * rdma_restrack_del() - delete object from the reource tracking database
+ * @res: resource entry
+ */
void rdma_restrack_del(struct rdma_restrack_entry *res)
{
struct rdma_restrack_entry *old;
struct rdma_restrack_root *rt;
struct ib_device *dev;
- if (!res->valid)
- goto out;
+ if (!res->valid) {
+ if (res->task) {
+ put_task_struct(res->task);
+ res->task = NULL;
+ }
+ return;
+ }
dev = res_to_dev(res);
if (WARN_ON(!dev))
@@ -330,16 +325,12 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
rt = &dev->res[res->type];
old = xa_erase(&rt->xa, res->id);
+ if (res->type == RDMA_RESTRACK_MR || res->type == RDMA_RESTRACK_QP)
+ return;
WARN_ON(old != res);
res->valid = false;
rdma_restrack_put(res);
wait_for_completion(&res->comp);
-
-out:
- if (res->task) {
- put_task_struct(res->task);
- res->task = NULL;
- }
}
EXPORT_SYMBOL(rdma_restrack_del);
diff --git a/drivers/infiniband/core/restrack.h b/drivers/infiniband/core/restrack.h
index d084e5f89849..6a04fc41f738 100644
--- a/drivers/infiniband/core/restrack.h
+++ b/drivers/infiniband/core/restrack.h
@@ -25,6 +25,12 @@ struct rdma_restrack_root {
int rdma_restrack_init(struct ib_device *dev);
void rdma_restrack_clean(struct ib_device *dev);
-void rdma_restrack_attach_task(struct rdma_restrack_entry *res,
- struct task_struct *task);
+void rdma_restrack_add(struct rdma_restrack_entry *res);
+void rdma_restrack_del(struct rdma_restrack_entry *res);
+void rdma_restrack_new(struct rdma_restrack_entry *res,
+ enum rdma_restrack_type type);
+void rdma_restrack_set_name(struct rdma_restrack_entry *res,
+ const char *caller);
+void rdma_restrack_parent_name(struct rdma_restrack_entry *dst,
+ const struct rdma_restrack_entry *parent);
#endif /* _RDMA_CORE_RESTRACK_H_ */
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index c11e50510e49..914cddea525d 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -59,7 +59,7 @@ struct ib_port {
struct gid_attr_group *gid_attr_group;
struct attribute_group gid_group;
struct attribute_group *pkey_group;
- struct attribute_group *pma_table;
+ const struct attribute_group *pma_table;
struct attribute_group *hw_stats_ag;
struct rdma_hw_stats *hw_stats;
u8 port_num;
@@ -387,7 +387,8 @@ static ssize_t _show_port_gid_attr(
gid_attr = rdma_get_gid_attr(p->ibdev, p->port_num, tab_attr->index);
if (IS_ERR(gid_attr))
- return PTR_ERR(gid_attr);
+ /* -EINVAL is returned for user space compatibility reasons. */
+ return -EINVAL;
ret = print(gid_attr, buf);
rdma_put_gid_attr(gid_attr);
@@ -653,17 +654,17 @@ static struct attribute *pma_attrs_noietf[] = {
NULL
};
-static struct attribute_group pma_group = {
+static const struct attribute_group pma_group = {
.name = "counters",
.attrs = pma_attrs
};
-static struct attribute_group pma_group_ext = {
+static const struct attribute_group pma_group_ext = {
.name = "counters",
.attrs = pma_attrs_ext
};
-static struct attribute_group pma_group_noietf = {
+static const struct attribute_group pma_group_noietf = {
.name = "counters",
.attrs = pma_attrs_noietf
};
@@ -778,8 +779,8 @@ err:
* Figure out which counter table to use depending on
* the device capabilities.
*/
-static struct attribute_group *get_counter_table(struct ib_device *dev,
- int port_num)
+static const struct attribute_group *get_counter_table(struct ib_device *dev,
+ int port_num)
{
struct ib_class_port_info cpi;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 1d184ea05eba..ffe2563ad345 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -80,7 +80,6 @@ struct ucma_file {
struct list_head ctx_list;
struct list_head event_list;
wait_queue_head_t poll_wait;
- struct workqueue_struct *close_wq;
};
struct ucma_context {
@@ -88,7 +87,7 @@ struct ucma_context {
struct completion comp;
refcount_t ref;
int events_reported;
- int backlog;
+ atomic_t backlog;
struct ucma_file *file;
struct rdma_cm_id *cm_id;
@@ -96,11 +95,6 @@ struct ucma_context {
u64 uid;
struct list_head list;
- struct list_head mc_list;
- /* mark that device is in process of destroying the internal HW
- * resources, protected by the ctx_table lock
- */
- int closing;
/* sync between removal event and id destroy, protected by file mut */
int destroying;
struct work_struct close_work;
@@ -113,23 +107,22 @@ struct ucma_multicast {
u64 uid;
u8 join_state;
- struct list_head list;
struct sockaddr_storage addr;
};
struct ucma_event {
struct ucma_context *ctx;
+ struct ucma_context *conn_req_ctx;
struct ucma_multicast *mc;
struct list_head list;
- struct rdma_cm_id *cm_id;
struct rdma_ucm_event_resp resp;
- struct work_struct close_work;
};
static DEFINE_XARRAY_ALLOC(ctx_table);
static DEFINE_XARRAY_ALLOC(multicast_table);
static const struct file_operations ucma_fops;
+static int __destroy_id(struct ucma_context *ctx);
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
@@ -139,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
ctx = xa_load(&ctx_table, id);
if (!ctx)
ctx = ERR_PTR(-ENOENT);
- else if (ctx->file != file || !ctx->cm_id)
+ else if (ctx->file != file)
ctx = ERR_PTR(-EINVAL);
return ctx;
}
@@ -150,12 +143,9 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
xa_lock(&ctx_table);
ctx = _ucma_find_context(id, file);
- if (!IS_ERR(ctx)) {
- if (ctx->closing)
- ctx = ERR_PTR(-EIO);
- else
- refcount_inc(&ctx->ref);
- }
+ if (!IS_ERR(ctx))
+ if (!refcount_inc_not_zero(&ctx->ref))
+ ctx = ERR_PTR(-ENXIO);
xa_unlock(&ctx_table);
return ctx;
}
@@ -183,14 +173,6 @@ static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
return ctx;
}
-static void ucma_close_event_id(struct work_struct *work)
-{
- struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
-
- rdma_destroy_id(uevent_close->cm_id);
- kfree(uevent_close);
-}
-
static void ucma_close_id(struct work_struct *work)
{
struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
@@ -203,6 +185,14 @@ static void ucma_close_id(struct work_struct *work)
wait_for_completion(&ctx->comp);
/* No new events will be generated after destroying the id. */
rdma_destroy_id(ctx->cm_id);
+
+ /*
+ * At this point ctx->ref is zero so the only place the ctx can be is in
+ * a uevent or in __destroy_id(). Since the former doesn't touch
+ * ctx->cm_id and the latter sync cancels this, there is no races with
+ * this store.
+ */
+ ctx->cm_id = NULL;
}
static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
@@ -216,39 +206,23 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
INIT_WORK(&ctx->close_work, ucma_close_id);
refcount_set(&ctx->ref, 1);
init_completion(&ctx->comp);
- INIT_LIST_HEAD(&ctx->mc_list);
+ /* So list_del() will work if we don't do ucma_finish_ctx() */
+ INIT_LIST_HEAD(&ctx->list);
ctx->file = file;
mutex_init(&ctx->mutex);
- if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
- goto error;
-
- list_add_tail(&ctx->list, &file->ctx_list);
+ if (xa_alloc(&ctx_table, &ctx->id, NULL, xa_limit_32b, GFP_KERNEL)) {
+ kfree(ctx);
+ return NULL;
+ }
return ctx;
-
-error:
- kfree(ctx);
- return NULL;
}
-static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
+static void ucma_finish_ctx(struct ucma_context *ctx)
{
- struct ucma_multicast *mc;
-
- mc = kzalloc(sizeof(*mc), GFP_KERNEL);
- if (!mc)
- return NULL;
-
- mc->ctx = ctx;
- if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL))
- goto error;
-
- list_add_tail(&mc->list, &ctx->mc_list);
- return mc;
-
-error:
- kfree(mc);
- return NULL;
+ lockdep_assert_held(&ctx->file->mut);
+ list_add_tail(&ctx->list, &ctx->file->ctx_list);
+ xa_store(&ctx_table, ctx->id, ctx, GFP_KERNEL);
}
static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
@@ -280,10 +254,15 @@ static void ucma_copy_ud_event(struct ib_device *device,
dst->qkey = src->qkey;
}
-static void ucma_set_event_context(struct ucma_context *ctx,
- struct rdma_cm_event *event,
- struct ucma_event *uevent)
+static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
+ struct rdma_cm_event *event)
{
+ struct ucma_event *uevent;
+
+ uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
+ if (!uevent)
+ return NULL;
+
uevent->ctx = ctx;
switch (event->event) {
case RDMA_CM_EVENT_MULTICAST_JOIN:
@@ -298,44 +277,56 @@ static void ucma_set_event_context(struct ucma_context *ctx,
uevent->resp.id = ctx->id;
break;
}
+ uevent->resp.event = event->event;
+ uevent->resp.status = event->status;
+ if (ctx->cm_id->qp_type == IB_QPT_UD)
+ ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud,
+ &event->param.ud);
+ else
+ ucma_copy_conn_event(&uevent->resp.param.conn,
+ &event->param.conn);
+
+ uevent->resp.ece.vendor_id = event->ece.vendor_id;
+ uevent->resp.ece.attr_mod = event->ece.attr_mod;
+ return uevent;
}
-/* Called with file->mut locked for the relevant context. */
-static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
+static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
{
- struct ucma_context *ctx = cm_id->context;
- struct ucma_event *con_req_eve;
- int event_found = 0;
+ struct ucma_context *listen_ctx = cm_id->context;
+ struct ucma_context *ctx;
+ struct ucma_event *uevent;
- if (ctx->destroying)
- return;
+ if (!atomic_add_unless(&listen_ctx->backlog, -1, 0))
+ return -ENOMEM;
+ ctx = ucma_alloc_ctx(listen_ctx->file);
+ if (!ctx)
+ goto err_backlog;
+ ctx->cm_id = cm_id;
- /* only if context is pointing to cm_id that it owns it and can be
- * queued to be closed, otherwise that cm_id is an inflight one that
- * is part of that context event list pending to be detached and
- * reattached to its new context as part of ucma_get_event,
- * handled separately below.
- */
- if (ctx->cm_id == cm_id) {
- xa_lock(&ctx_table);
- ctx->closing = 1;
- xa_unlock(&ctx_table);
- queue_work(ctx->file->close_wq, &ctx->close_work);
- return;
- }
+ uevent = ucma_create_uevent(listen_ctx, event);
+ if (!uevent)
+ goto err_alloc;
+ uevent->conn_req_ctx = ctx;
+ uevent->resp.id = ctx->id;
- list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
- if (con_req_eve->cm_id == cm_id &&
- con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
- list_del(&con_req_eve->list);
- INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
- queue_work(ctx->file->close_wq, &con_req_eve->close_work);
- event_found = 1;
- break;
- }
- }
- if (!event_found)
- pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
+ ctx->cm_id->context = ctx;
+
+ mutex_lock(&ctx->file->mut);
+ ucma_finish_ctx(ctx);
+ list_add_tail(&uevent->list, &ctx->file->event_list);
+ mutex_unlock(&ctx->file->mut);
+ wake_up_interruptible(&ctx->file->poll_wait);
+ return 0;
+
+err_alloc:
+ xa_erase(&ctx_table, ctx->id);
+ kfree(ctx);
+err_backlog:
+ atomic_inc(&listen_ctx->backlog);
+ /* Returning error causes the new ID to be destroyed */
+ return -ENOMEM;
}
static int ucma_event_handler(struct rdma_cm_id *cm_id,
@@ -343,66 +334,38 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
{
struct ucma_event *uevent;
struct ucma_context *ctx = cm_id->context;
- int ret = 0;
-
- uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
- if (!uevent)
- return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
- mutex_lock(&ctx->file->mut);
- uevent->cm_id = cm_id;
- ucma_set_event_context(ctx, event, uevent);
- uevent->resp.event = event->event;
- uevent->resp.status = event->status;
- if (cm_id->qp_type == IB_QPT_UD)
- ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud,
- &event->param.ud);
- else
- ucma_copy_conn_event(&uevent->resp.param.conn,
- &event->param.conn);
-
- uevent->resp.ece.vendor_id = event->ece.vendor_id;
- uevent->resp.ece.attr_mod = event->ece.attr_mod;
-
- if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
- if (!ctx->backlog) {
- ret = -ENOMEM;
- kfree(uevent);
- goto out;
- }
- ctx->backlog--;
- } else if (!ctx->uid || ctx->cm_id != cm_id) {
- /*
- * We ignore events for new connections until userspace has set
- * their context. This can only happen if an error occurs on a
- * new connection before the user accepts it. This is okay,
- * since the accept will just fail later. However, we do need
- * to release the underlying HW resources in case of a device
- * removal event.
- */
- if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
- ucma_removal_event_handler(cm_id);
+ if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST)
+ return ucma_connect_event_handler(cm_id, event);
- kfree(uevent);
- goto out;
+ /*
+ * We ignore events for new connections until userspace has set their
+ * context. This can only happen if an error occurs on a new connection
+ * before the user accepts it. This is okay, since the accept will just
+ * fail later. However, we do need to release the underlying HW
+ * resources in case of a device removal event.
+ */
+ if (ctx->uid) {
+ uevent = ucma_create_uevent(ctx, event);
+ if (!uevent)
+ return 0;
+
+ mutex_lock(&ctx->file->mut);
+ list_add_tail(&uevent->list, &ctx->file->event_list);
+ mutex_unlock(&ctx->file->mut);
+ wake_up_interruptible(&ctx->file->poll_wait);
}
- list_add_tail(&uevent->list, &ctx->file->event_list);
- wake_up_interruptible(&ctx->file->poll_wait);
- if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
- ucma_removal_event_handler(cm_id);
-out:
- mutex_unlock(&ctx->file->mut);
- return ret;
+ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying)
+ queue_work(system_unbound_wq, &ctx->close_work);
+ return 0;
}
static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
- struct ucma_context *ctx;
struct rdma_ucm_get_event cmd;
struct ucma_event *uevent;
- int ret = 0;
/*
* Old 32 bit user space does not send the 4 byte padding in the
@@ -429,35 +392,25 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
mutex_lock(&file->mut);
}
- uevent = list_entry(file->event_list.next, struct ucma_event, list);
-
- if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
- ctx = ucma_alloc_ctx(file);
- if (!ctx) {
- ret = -ENOMEM;
- goto done;
- }
- uevent->ctx->backlog++;
- ctx->cm_id = uevent->cm_id;
- ctx->cm_id->context = ctx;
- uevent->resp.id = ctx->id;
- }
+ uevent = list_first_entry(&file->event_list, struct ucma_event, list);
if (copy_to_user(u64_to_user_ptr(cmd.response),
&uevent->resp,
min_t(size_t, out_len, sizeof(uevent->resp)))) {
- ret = -EFAULT;
- goto done;
+ mutex_unlock(&file->mut);
+ return -EFAULT;
}
list_del(&uevent->list);
uevent->ctx->events_reported++;
if (uevent->mc)
uevent->mc->events_reported++;
- kfree(uevent);
-done:
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
+ atomic_inc(&uevent->ctx->backlog);
mutex_unlock(&file->mut);
- return ret;
+
+ kfree(uevent);
+ return 0;
}
static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
@@ -498,58 +451,60 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
if (ret)
return ret;
- mutex_lock(&file->mut);
ctx = ucma_alloc_ctx(file);
- mutex_unlock(&file->mut);
if (!ctx)
return -ENOMEM;
ctx->uid = cmd.uid;
- cm_id = __rdma_create_id(current->nsproxy->net_ns,
- ucma_event_handler, ctx, cmd.ps, qp_type, NULL);
+ cm_id = rdma_create_user_id(ucma_event_handler, ctx, cmd.ps, qp_type);
if (IS_ERR(cm_id)) {
ret = PTR_ERR(cm_id);
goto err1;
}
+ ctx->cm_id = cm_id;
resp.id = ctx->id;
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp))) {
- ret = -EFAULT;
- goto err2;
+ xa_erase(&ctx_table, ctx->id);
+ __destroy_id(ctx);
+ return -EFAULT;
}
- ctx->cm_id = cm_id;
+ mutex_lock(&file->mut);
+ ucma_finish_ctx(ctx);
+ mutex_unlock(&file->mut);
return 0;
-err2:
- rdma_destroy_id(cm_id);
err1:
xa_erase(&ctx_table, ctx->id);
- mutex_lock(&file->mut);
- list_del(&ctx->list);
- mutex_unlock(&file->mut);
kfree(ctx);
return ret;
}
static void ucma_cleanup_multicast(struct ucma_context *ctx)
{
- struct ucma_multicast *mc, *tmp;
+ struct ucma_multicast *mc;
+ unsigned long index;
- mutex_lock(&ctx->file->mut);
- list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
- list_del(&mc->list);
- xa_erase(&multicast_table, mc->id);
+ xa_for_each(&multicast_table, index, mc) {
+ if (mc->ctx != ctx)
+ continue;
+ /*
+ * At this point mc->ctx->ref is 0 so the mc cannot leave the
+ * lock on the reader and this is enough serialization
+ */
+ xa_erase(&multicast_table, index);
kfree(mc);
}
- mutex_unlock(&ctx->file->mut);
}
static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
{
struct ucma_event *uevent, *tmp;
+ rdma_lock_handler(mc->ctx->cm_id);
+ mutex_lock(&mc->ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
if (uevent->mc != mc)
continue;
@@ -557,6 +512,8 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
list_del(&uevent->list);
kfree(uevent);
}
+ mutex_unlock(&mc->ctx->file->mut);
+ rdma_unlock_handler(mc->ctx->cm_id);
}
/*
@@ -564,10 +521,6 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
* this point, no new events will be reported from the hardware. However, we
* still need to cleanup the UCMA context for this ID. Specifically, there
* might be events that have not yet been consumed by the user space software.
- * These might include pending connect requests which we have not completed
- * processing. We cannot call rdma_destroy_id while holding the lock of the
- * context (file->mut), as it might cause a deadlock. We therefore extract all
- * relevant events from the context pending events list while holding the
* mutex. After that we release them as needed.
*/
static int ucma_free_ctx(struct ucma_context *ctx)
@@ -576,31 +529,57 @@ static int ucma_free_ctx(struct ucma_context *ctx)
struct ucma_event *uevent, *tmp;
LIST_HEAD(list);
-
ucma_cleanup_multicast(ctx);
/* Cleanup events not yet reported to the user. */
mutex_lock(&ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
- if (uevent->ctx == ctx)
+ if (uevent->ctx == ctx || uevent->conn_req_ctx == ctx)
list_move_tail(&uevent->list, &list);
}
list_del(&ctx->list);
+ events_reported = ctx->events_reported;
mutex_unlock(&ctx->file->mut);
+ /*
+ * If this was a listening ID then any connections spawned from it
+ * that have not been delivered to userspace are cleaned up too.
+ * Must be done outside any locks.
+ */
list_for_each_entry_safe(uevent, tmp, &list, list) {
list_del(&uevent->list);
- if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
- rdma_destroy_id(uevent->cm_id);
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
+ uevent->conn_req_ctx != ctx)
+ __destroy_id(uevent->conn_req_ctx);
kfree(uevent);
}
- events_reported = ctx->events_reported;
mutex_destroy(&ctx->mutex);
kfree(ctx);
return events_reported;
}
+static int __destroy_id(struct ucma_context *ctx)
+{
+ /*
+ * If the refcount is already 0 then ucma_close_id() has already
+ * destroyed the cm_id, otherwise holding the refcount keeps cm_id
+ * valid. Prevent queue_work() from being called.
+ */
+ if (refcount_inc_not_zero(&ctx->ref)) {
+ rdma_lock_handler(ctx->cm_id);
+ ctx->destroying = 1;
+ rdma_unlock_handler(ctx->cm_id);
+ ucma_put_ctx(ctx);
+ }
+
+ cancel_work_sync(&ctx->close_work);
+ /* At this point it's guaranteed that there is no inflight closing task */
+ if (ctx->cm_id)
+ ucma_close_id(&ctx->close_work);
+ return ucma_free_ctx(ctx);
+}
+
static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
@@ -624,24 +603,7 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- mutex_lock(&ctx->file->mut);
- ctx->destroying = 1;
- mutex_unlock(&ctx->file->mut);
-
- flush_workqueue(ctx->file->close_wq);
- /* At this point it's guaranteed that there is no inflight
- * closing task */
- xa_lock(&ctx_table);
- if (!ctx->closing) {
- xa_unlock(&ctx_table);
- ucma_put_ctx(ctx);
- wait_for_completion(&ctx->comp);
- rdma_destroy_id(ctx->cm_id);
- } else {
- xa_unlock(&ctx_table);
- }
-
- resp.events_reported = ucma_free_ctx(ctx);
+ resp.events_reported = __destroy_id(ctx);
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
@@ -1124,10 +1086,12 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
- cmd.backlog : max_backlog;
+ if (cmd.backlog <= 0 || cmd.backlog > max_backlog)
+ cmd.backlog = max_backlog;
+ atomic_set(&ctx->backlog, cmd.backlog);
+
mutex_lock(&ctx->mutex);
- ret = rdma_listen(ctx->cm_id, ctx->backlog);
+ ret = rdma_listen(ctx->cm_id, cmd.backlog);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
@@ -1160,16 +1124,20 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
if (cmd.conn_param.valid) {
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
- mutex_lock(&file->mut);
mutex_lock(&ctx->mutex);
- ret = __rdma_accept_ece(ctx->cm_id, &conn_param, NULL, &ece);
- mutex_unlock(&ctx->mutex);
- if (!ret)
+ rdma_lock_handler(ctx->cm_id);
+ ret = rdma_accept_ece(ctx->cm_id, &conn_param, &ece);
+ if (!ret) {
+ /* The uid must be set atomically with the handler */
ctx->uid = cmd.uid;
- mutex_unlock(&file->mut);
+ }
+ rdma_unlock_handler(ctx->cm_id);
+ mutex_unlock(&ctx->mutex);
} else {
mutex_lock(&ctx->mutex);
- ret = __rdma_accept_ece(ctx->cm_id, NULL, NULL, &ece);
+ rdma_lock_handler(ctx->cm_id);
+ ret = rdma_accept_ece(ctx->cm_id, NULL, &ece);
+ rdma_unlock_handler(ctx->cm_id);
mutex_unlock(&ctx->mutex);
}
ucma_put_ctx(ctx);
@@ -1482,44 +1450,52 @@ static ssize_t ucma_process_join(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- mutex_lock(&file->mut);
- mc = ucma_alloc_multicast(ctx);
+ mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc) {
ret = -ENOMEM;
- goto err1;
+ goto err_put_ctx;
}
+
+ mc->ctx = ctx;
mc->join_state = join_state;
mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size);
+
+ if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
+ GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto err_free_mc;
+ }
+
mutex_lock(&ctx->mutex);
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
join_state, mc);
mutex_unlock(&ctx->mutex);
if (ret)
- goto err2;
+ goto err_xa_erase;
resp.id = mc->id;
if (copy_to_user(u64_to_user_ptr(cmd->response),
&resp, sizeof(resp))) {
ret = -EFAULT;
- goto err3;
+ goto err_leave_multicast;
}
xa_store(&multicast_table, mc->id, mc, 0);
- mutex_unlock(&file->mut);
ucma_put_ctx(ctx);
return 0;
-err3:
+err_leave_multicast:
+ mutex_lock(&ctx->mutex);
rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
+ mutex_unlock(&ctx->mutex);
ucma_cleanup_mc_events(mc);
-err2:
+err_xa_erase:
xa_erase(&multicast_table, mc->id);
- list_del(&mc->list);
+err_free_mc:
kfree(mc);
-err1:
- mutex_unlock(&file->mut);
+err_put_ctx:
ucma_put_ctx(ctx);
return ret;
}
@@ -1581,7 +1557,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
mc = xa_load(&multicast_table, cmd.id);
if (!mc)
mc = ERR_PTR(-ENOENT);
- else if (mc->ctx->file != file)
+ else if (READ_ONCE(mc->ctx->file) != file)
mc = ERR_PTR(-EINVAL);
else if (!refcount_inc_not_zero(&mc->ctx->ref))
mc = ERR_PTR(-ENXIO);
@@ -1598,10 +1574,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
mutex_unlock(&mc->ctx->mutex);
- mutex_lock(&mc->ctx->file->mut);
ucma_cleanup_mc_events(mc);
- list_del(&mc->list);
- mutex_unlock(&mc->ctx->file->mut);
ucma_put_ctx(mc->ctx);
resp.events_reported = mc->events_reported;
@@ -1614,45 +1587,15 @@ out:
return ret;
}
-static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
-{
- /* Acquire mutex's based on pointer comparison to prevent deadlock. */
- if (file1 < file2) {
- mutex_lock(&file1->mut);
- mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
- } else {
- mutex_lock(&file2->mut);
- mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
- }
-}
-
-static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
-{
- if (file1 < file2) {
- mutex_unlock(&file2->mut);
- mutex_unlock(&file1->mut);
- } else {
- mutex_unlock(&file1->mut);
- mutex_unlock(&file2->mut);
- }
-}
-
-static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
-{
- struct ucma_event *uevent, *tmp;
-
- list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
- if (uevent->ctx == ctx)
- list_move_tail(&uevent->list, &file->event_list);
-}
-
static ssize_t ucma_migrate_id(struct ucma_file *new_file,
const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_migrate_id cmd;
struct rdma_ucm_migrate_resp resp;
+ struct ucma_event *uevent, *tmp;
struct ucma_context *ctx;
+ LIST_HEAD(event_list);
struct fd f;
struct ucma_file *cur_file;
int ret = 0;
@@ -1668,40 +1611,53 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
ret = -EINVAL;
goto file_put;
}
+ cur_file = f.file->private_data;
/* Validate current fd and prevent destruction of id. */
- ctx = ucma_get_ctx(f.file->private_data, cmd.id);
+ ctx = ucma_get_ctx(cur_file, cmd.id);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto file_put;
}
- cur_file = ctx->file;
- if (cur_file == new_file) {
- resp.events_reported = ctx->events_reported;
- goto response;
- }
-
+ rdma_lock_handler(ctx->cm_id);
/*
- * Migrate events between fd's, maintaining order, and avoiding new
- * events being added before existing events.
+ * ctx->file can only be changed under the handler & xa_lock. xa_load()
+ * must be checked again to ensure the ctx hasn't begun destruction
+ * since the ucma_get_ctx().
*/
- ucma_lock_files(cur_file, new_file);
xa_lock(&ctx_table);
-
- list_move_tail(&ctx->list, &new_file->ctx_list);
- ucma_move_events(ctx, new_file);
+ if (_ucma_find_context(cmd.id, cur_file) != ctx) {
+ xa_unlock(&ctx_table);
+ ret = -ENOENT;
+ goto err_unlock;
+ }
ctx->file = new_file;
+ xa_unlock(&ctx_table);
+
+ mutex_lock(&cur_file->mut);
+ list_del(&ctx->list);
+ /*
+ * At this point lock_handler() prevents addition of new uevents for
+ * this ctx.
+ */
+ list_for_each_entry_safe(uevent, tmp, &cur_file->event_list, list)
+ if (uevent->ctx == ctx)
+ list_move_tail(&uevent->list, &event_list);
resp.events_reported = ctx->events_reported;
+ mutex_unlock(&cur_file->mut);
- xa_unlock(&ctx_table);
- ucma_unlock_files(cur_file, new_file);
+ mutex_lock(&new_file->mut);
+ list_add_tail(&ctx->list, &new_file->ctx_list);
+ list_splice_tail(&event_list, &new_file->event_list);
+ mutex_unlock(&new_file->mut);
-response:
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
+err_unlock:
+ rdma_unlock_handler(ctx->cm_id);
ucma_put_ctx(ctx);
file_put:
fdput(f);
@@ -1801,13 +1757,6 @@ static int ucma_open(struct inode *inode, struct file *filp)
if (!file)
return -ENOMEM;
- file->close_wq = alloc_ordered_workqueue("ucma_close_id",
- WQ_MEM_RECLAIM);
- if (!file->close_wq) {
- kfree(file);
- return -ENOMEM;
- }
-
INIT_LIST_HEAD(&file->event_list);
INIT_LIST_HEAD(&file->ctx_list);
init_waitqueue_head(&file->poll_wait);
@@ -1822,37 +1771,22 @@ static int ucma_open(struct inode *inode, struct file *filp)
static int ucma_close(struct inode *inode, struct file *filp)
{
struct ucma_file *file = filp->private_data;
- struct ucma_context *ctx, *tmp;
- mutex_lock(&file->mut);
- list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
- ctx->destroying = 1;
- mutex_unlock(&file->mut);
+ /*
+ * All paths that touch ctx_list or ctx_list starting from write() are
+ * prevented by this being a FD release function. The list_add_tail() in
+ * ucma_connect_event_handler() can run concurrently, however it only
+ * adds to the list *after* a listening ID. By only reading the first of
+ * the list, and relying on __destroy_id() to block
+ * ucma_connect_event_handler(), no additional locking is needed.
+ */
+ while (!list_empty(&file->ctx_list)) {
+ struct ucma_context *ctx = list_first_entry(
+ &file->ctx_list, struct ucma_context, list);
xa_erase(&ctx_table, ctx->id);
- flush_workqueue(file->close_wq);
- /* At that step once ctx was marked as destroying and workqueue
- * was flushed we are safe from any inflights handlers that
- * might put other closing task.
- */
- xa_lock(&ctx_table);
- if (!ctx->closing) {
- xa_unlock(&ctx_table);
- ucma_put_ctx(ctx);
- wait_for_completion(&ctx->comp);
- /* rdma_destroy_id ensures that no event handlers are
- * inflight for that id before releasing it.
- */
- rdma_destroy_id(ctx->cm_id);
- } else {
- xa_unlock(&ctx_table);
- }
-
- ucma_free_ctx(ctx);
- mutex_lock(&file->mut);
+ __destroy_id(ctx);
}
- mutex_unlock(&file->mut);
- destroy_workqueue(file->close_wq);
kfree(file);
return 0;
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 831bff8d52e5..e9fecbdf391b 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -39,6 +39,7 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
+#include <linux/count_zeros.h>
#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
@@ -60,73 +61,6 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
sg_free_table(&umem->sg_head);
}
-/* ib_umem_add_sg_table - Add N contiguous pages to scatter table
- *
- * sg: current scatterlist entry
- * page_list: array of npage struct page pointers
- * npages: number of pages in page_list
- * max_seg_sz: maximum segment size in bytes
- * nents: [out] number of entries in the scatterlist
- *
- * Return new end of scatterlist
- */
-static struct scatterlist *ib_umem_add_sg_table(struct scatterlist *sg,
- struct page **page_list,
- unsigned long npages,
- unsigned int max_seg_sz,
- int *nents)
-{
- unsigned long first_pfn;
- unsigned long i = 0;
- bool update_cur_sg = false;
- bool first = !sg_page(sg);
-
- /* Check if new page_list is contiguous with end of previous page_list.
- * sg->length here is a multiple of PAGE_SIZE and sg->offset is 0.
- */
- if (!first && (page_to_pfn(sg_page(sg)) + (sg->length >> PAGE_SHIFT) ==
- page_to_pfn(page_list[0])))
- update_cur_sg = true;
-
- while (i != npages) {
- unsigned long len;
- struct page *first_page = page_list[i];
-
- first_pfn = page_to_pfn(first_page);
-
- /* Compute the number of contiguous pages we have starting
- * at i
- */
- for (len = 0; i != npages &&
- first_pfn + len == page_to_pfn(page_list[i]) &&
- len < (max_seg_sz >> PAGE_SHIFT);
- len++)
- i++;
-
- /* Squash N contiguous pages from page_list into current sge */
- if (update_cur_sg) {
- if ((max_seg_sz - sg->length) >= (len << PAGE_SHIFT)) {
- sg_set_page(sg, sg_page(sg),
- sg->length + (len << PAGE_SHIFT),
- 0);
- update_cur_sg = false;
- continue;
- }
- update_cur_sg = false;
- }
-
- /* Squash N contiguous pages into next sge or first sge */
- if (!first)
- sg = sg_next(sg);
-
- (*nents)++;
- sg_set_page(sg, first_page, len << PAGE_SHIFT, 0);
- first = false;
- }
-
- return sg;
-}
-
/**
* ib_umem_find_best_pgsz - Find best HW page size to use for this MR
*
@@ -146,18 +80,28 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long virt)
{
struct scatterlist *sg;
- unsigned int best_pg_bit;
unsigned long va, pgoff;
dma_addr_t mask;
int i;
+ /* rdma_for_each_block() has a bug if the page size is smaller than the
+ * page size used to build the umem. For now prevent smaller page sizes
+ * from being returned.
+ */
+ pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
+
/* At minimum, drivers must support PAGE_SIZE or smaller */
if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
return 0;
- va = virt;
- /* max page size not to exceed MR length */
- mask = roundup_pow_of_two(umem->length);
+ umem->iova = va = virt;
+ /* The best result is the smallest page size that results in the minimum
+ * number of required pages. Compute the largest page size that could
+ * work based on VA address bits that don't change.
+ */
+ mask = pgsz_bitmap &
+ GENMASK(BITS_PER_LONG - 1,
+ bits_per((umem->length - 1 + virt) ^ virt));
/* offset into first SGL */
pgoff = umem->address & ~PAGE_MASK;
@@ -175,9 +119,14 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
mask |= va;
pgoff = 0;
}
- best_pg_bit = rdma_find_pg_bit(mask, pgsz_bitmap);
- return BIT_ULL(best_pg_bit);
+ /* The mask accumulates 1's in each position where the VA and physical
+ * address differ, thus the length of trailing 0 is the largest page
+ * size that can pass the VA through to the physical.
+ */
+ if (mask)
+ pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
+ return rounddown_pow_of_two(pgsz_bitmap);
}
EXPORT_SYMBOL(ib_umem_find_best_pgsz);
@@ -201,7 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
struct mm_struct *mm;
unsigned long npages;
int ret;
- struct scatterlist *sg;
+ struct scatterlist *sg = NULL;
unsigned int gup_flags = FOLL_WRITE;
/*
@@ -224,6 +173,11 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
umem->ibdev = device;
umem->length = size;
umem->address = addr;
+ /*
+ * Drivers should call ib_umem_find_best_pgsz() to set the iova
+ * correctly.
+ */
+ umem->iova = addr;
umem->writable = ib_access_writable(access);
umem->owning_mm = mm = current->mm;
mmgrab(mm);
@@ -251,15 +205,9 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
cur_base = addr & PAGE_MASK;
- ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
- if (ret)
- goto vma;
-
if (!umem->writable)
gup_flags |= FOLL_FORCE;
- sg = umem->sg_head.sgl;
-
while (npages) {
cond_resched();
ret = pin_user_pages_fast(cur_base,
@@ -271,15 +219,19 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
goto umem_release;
cur_base += ret * PAGE_SIZE;
- npages -= ret;
-
- sg = ib_umem_add_sg_table(sg, page_list, ret,
- dma_get_max_seg_size(device->dma_device),
- &umem->sg_nents);
+ npages -= ret;
+ sg = __sg_alloc_table_from_pages(
+ &umem->sg_head, page_list, ret, 0, ret << PAGE_SHIFT,
+ dma_get_max_seg_size(device->dma_device), sg, npages,
+ GFP_KERNEL);
+ umem->sg_nents = umem->sg_head.nents;
+ if (IS_ERR(sg)) {
+ unpin_user_pages_dirty_lock(page_list, ret, 0);
+ ret = PTR_ERR(sg);
+ goto umem_release;
+ }
}
- sg_mark_end(sg);
-
if (access & IB_ACCESS_RELAXED_ORDERING)
dma_attr |= DMA_ATTR_WEAK_ORDERING;
@@ -297,7 +249,6 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
umem_release:
__ib_umem_release(device, umem, 0);
-vma:
atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out:
free_page((unsigned long) page_list);
@@ -329,18 +280,6 @@ void ib_umem_release(struct ib_umem *umem)
}
EXPORT_SYMBOL(ib_umem_release);
-int ib_umem_page_count(struct ib_umem *umem)
-{
- int i, n = 0;
- struct scatterlist *sg;
-
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
- n += sg_dma_len(sg) >> PAGE_SHIFT;
-
- return n;
-}
-EXPORT_SYMBOL(ib_umem_page_count);
-
/*
* Copy from the given ib_umem's pages to the given buffer.
*
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index cc6b4befde7c..323f6cf00682 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -40,6 +40,7 @@
#include <linux/vmalloc.h>
#include <linux/hugetlb.h>
#include <linux/interval_tree.h>
+#include <linux/hmm.h>
#include <linux/pagemap.h>
#include <rdma/ib_verbs.h>
@@ -60,7 +61,7 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
size_t page_size = 1UL << umem_odp->page_shift;
unsigned long start;
unsigned long end;
- size_t pages;
+ size_t ndmas, npfns;
start = ALIGN_DOWN(umem_odp->umem.address, page_size);
if (check_add_overflow(umem_odp->umem.address,
@@ -71,20 +72,21 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
if (unlikely(end < page_size))
return -EOVERFLOW;
- pages = (end - start) >> umem_odp->page_shift;
- if (!pages)
+ ndmas = (end - start) >> umem_odp->page_shift;
+ if (!ndmas)
return -EINVAL;
- umem_odp->page_list = kvcalloc(
- pages, sizeof(*umem_odp->page_list), GFP_KERNEL);
- if (!umem_odp->page_list)
+ npfns = (end - start) >> PAGE_SHIFT;
+ umem_odp->pfn_list = kvcalloc(
+ npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
+ if (!umem_odp->pfn_list)
return -ENOMEM;
umem_odp->dma_list = kvcalloc(
- pages, sizeof(*umem_odp->dma_list), GFP_KERNEL);
+ ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL);
if (!umem_odp->dma_list) {
ret = -ENOMEM;
- goto out_page_list;
+ goto out_pfn_list;
}
ret = mmu_interval_notifier_insert(&umem_odp->notifier,
@@ -98,8 +100,8 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
out_dma_list:
kvfree(umem_odp->dma_list);
-out_page_list:
- kvfree(umem_odp->page_list);
+out_pfn_list:
+ kvfree(umem_odp->pfn_list);
return ret;
}
@@ -276,7 +278,7 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
mutex_unlock(&umem_odp->umem_mutex);
mmu_interval_notifier_remove(&umem_odp->notifier);
kvfree(umem_odp->dma_list);
- kvfree(umem_odp->page_list);
+ kvfree(umem_odp->pfn_list);
}
put_pid(umem_odp->tgid);
kfree(umem_odp);
@@ -287,87 +289,56 @@ EXPORT_SYMBOL(ib_umem_odp_release);
* Map for DMA and insert a single page into the on-demand paging page tables.
*
* @umem: the umem to insert the page to.
- * @page_index: index in the umem to add the page to.
+ * @dma_index: index in the umem to add the dma to.
* @page: the page struct to map and add.
* @access_mask: access permissions needed for this page.
* @current_seq: sequence number for synchronization with invalidations.
* the sequence number is taken from
* umem_odp->notifiers_seq.
*
- * The function returns -EFAULT if the DMA mapping operation fails. It returns
- * -EAGAIN if a concurrent invalidation prevents us from updating the page.
+ * The function returns -EFAULT if the DMA mapping operation fails.
*
- * The page is released via put_page even if the operation failed. For on-demand
- * pinning, the page is released whenever it isn't stored in the umem.
*/
static int ib_umem_odp_map_dma_single_page(
struct ib_umem_odp *umem_odp,
- unsigned int page_index,
+ unsigned int dma_index,
struct page *page,
- u64 access_mask,
- unsigned long current_seq)
+ u64 access_mask)
{
struct ib_device *dev = umem_odp->umem.ibdev;
- dma_addr_t dma_addr;
- int ret = 0;
+ dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index];
- if (mmu_interval_check_retry(&umem_odp->notifier, current_seq)) {
- ret = -EAGAIN;
- goto out;
- }
- if (!(umem_odp->dma_list[page_index])) {
- dma_addr =
- ib_dma_map_page(dev, page, 0, BIT(umem_odp->page_shift),
- DMA_BIDIRECTIONAL);
- if (ib_dma_mapping_error(dev, dma_addr)) {
- ret = -EFAULT;
- goto out;
- }
- umem_odp->dma_list[page_index] = dma_addr | access_mask;
- umem_odp->page_list[page_index] = page;
- umem_odp->npages++;
- } else if (umem_odp->page_list[page_index] == page) {
- umem_odp->dma_list[page_index] |= access_mask;
- } else {
+ if (*dma_addr) {
/*
- * This is a race here where we could have done:
- *
- * CPU0 CPU1
- * get_user_pages()
- * invalidate()
- * page_fault()
- * mutex_lock(umem_mutex)
- * page from GUP != page in ODP
- *
- * It should be prevented by the retry test above as reading
- * the seq number should be reliable under the
- * umem_mutex. Thus something is really not working right if
- * things get here.
+ * If the page is already dma mapped it means it went through
+ * a non-invalidating trasition, like read-only to writable.
+ * Resync the flags.
*/
- WARN(true,
- "Got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
- umem_odp->page_list[page_index], page);
- ret = -EAGAIN;
+ *dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask;
+ return 0;
}
-out:
- put_page(page);
- return ret;
+ *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
+ DMA_BIDIRECTIONAL);
+ if (ib_dma_mapping_error(dev, *dma_addr)) {
+ *dma_addr = 0;
+ return -EFAULT;
+ }
+ umem_odp->npages++;
+ *dma_addr |= access_mask;
+ return 0;
}
/**
- * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
+ * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
*
- * Pins the range of pages passed in the argument, and maps them to
- * DMA addresses. The DMA addresses of the mapped pages is updated in
- * umem_odp->dma_list.
+ * Maps the range passed in the argument to DMA addresses.
+ * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
+ * Upon success the ODP MR will be locked to let caller complete its device
+ * page table update.
*
* Returns the number of pages mapped in success, negative error code
* for failure.
- * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
- * the function from completing its task.
- * An -ENOENT error code indicates that userspace process is being terminated
- * and mm was already destroyed.
* @umem_odp: the umem to map and pin
* @user_virt: the address from which we need to map.
* @bcnt: the minimal number of bytes to pin and map. The mapping might be
@@ -376,21 +347,19 @@ out:
* the return value.
* @access_mask: bit mask of the requested access permissions for the given
* range.
- * @current_seq: the MMU notifiers sequance value for synchronization with
- * invalidations. the sequance number is read from
- * umem_odp->notifiers_seq before calling this function
+ * @fault: is faulting required for the given range
*/
-int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
- u64 bcnt, u64 access_mask,
- unsigned long current_seq)
+int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
+ u64 bcnt, u64 access_mask, bool fault)
+ __acquires(&umem_odp->umem_mutex)
{
struct task_struct *owning_process = NULL;
struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
- struct page **local_page_list = NULL;
- u64 page_mask, off;
- int j, k, ret = 0, start_idx, npages = 0;
- unsigned int flags = 0, page_shift;
- phys_addr_t p = 0;
+ int pfn_index, dma_index, ret = 0, start_idx;
+ unsigned int page_shift, hmm_order, pfn_start_idx;
+ unsigned long num_pfns, current_seq;
+ struct hmm_range range = {};
+ unsigned long timeout;
if (access_mask == 0)
return -EINVAL;
@@ -399,15 +368,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
user_virt + bcnt > ib_umem_end(umem_odp))
return -EFAULT;
- local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
- if (!local_page_list)
- return -ENOMEM;
-
page_shift = umem_odp->page_shift;
- page_mask = ~(BIT(page_shift) - 1);
- off = user_virt & (~page_mask);
- user_virt = user_virt & page_mask;
- bcnt += off; /* Charge for the first page offset as well. */
/*
* owning_process is allowed to be NULL, this means somehow the mm is
@@ -420,99 +381,104 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
goto out_put_task;
}
- if (access_mask & ODP_WRITE_ALLOWED_BIT)
- flags |= FOLL_WRITE;
+ range.notifier = &umem_odp->notifier;
+ range.start = ALIGN_DOWN(user_virt, 1UL << page_shift);
+ range.end = ALIGN(user_virt + bcnt, 1UL << page_shift);
+ pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
+ num_pfns = (range.end - range.start) >> PAGE_SHIFT;
+ if (fault) {
+ range.default_flags = HMM_PFN_REQ_FAULT;
- start_idx = (user_virt - ib_umem_start(umem_odp)) >> page_shift;
- k = start_idx;
+ if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ range.default_flags |= HMM_PFN_REQ_WRITE;
+ }
- while (bcnt > 0) {
- const size_t gup_num_pages = min_t(size_t,
- ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
- PAGE_SIZE / sizeof(struct page *));
+ range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]);
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
- mmap_read_lock(owning_mm);
- /*
- * Note: this might result in redundent page getting. We can
- * avoid this by checking dma_list to be 0 before calling
- * get_user_pages. However, this make the code much more
- * complex (and doesn't gain us much performance in most use
- * cases).
- */
- npages = get_user_pages_remote(owning_mm,
- user_virt, gup_num_pages,
- flags, local_page_list, NULL, NULL);
- mmap_read_unlock(owning_mm);
-
- if (npages < 0) {
- if (npages != -EAGAIN)
- pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
- else
- pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
- break;
- }
+retry:
+ current_seq = range.notifier_seq =
+ mmu_interval_read_begin(&umem_odp->notifier);
- bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
- mutex_lock(&umem_odp->umem_mutex);
- for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
- if (user_virt & ~page_mask) {
- p += PAGE_SIZE;
- if (page_to_phys(local_page_list[j]) != p) {
- ret = -EFAULT;
- break;
- }
- put_page(local_page_list[j]);
- continue;
- }
+ mmap_read_lock(owning_mm);
+ ret = hmm_range_fault(&range);
+ mmap_read_unlock(owning_mm);
+ if (unlikely(ret)) {
+ if (ret == -EBUSY && !time_after(jiffies, timeout))
+ goto retry;
+ goto out_put_mm;
+ }
- ret = ib_umem_odp_map_dma_single_page(
- umem_odp, k, local_page_list[j],
- access_mask, current_seq);
- if (ret < 0) {
- if (ret != -EAGAIN)
- pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
- else
- pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
- break;
- }
+ start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift;
+ dma_index = start_idx;
- p = page_to_phys(local_page_list[j]);
- k++;
- }
+ mutex_lock(&umem_odp->umem_mutex);
+ if (mmu_interval_read_retry(&umem_odp->notifier, current_seq)) {
mutex_unlock(&umem_odp->umem_mutex);
+ goto retry;
+ }
- if (ret < 0) {
+ for (pfn_index = 0; pfn_index < num_pfns;
+ pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
+
+ if (fault) {
/*
- * Release pages, remembering that the first page
- * to hit an error was already released by
- * ib_umem_odp_map_dma_single_page().
+ * Since we asked for hmm_range_fault() to populate
+ * pages it shouldn't return an error entry on success.
*/
- if (npages - (j + 1) > 0)
- release_pages(&local_page_list[j+1],
- npages - (j + 1));
+ WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
+ WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
+ } else {
+ if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) {
+ WARN_ON(umem_odp->dma_list[dma_index]);
+ continue;
+ }
+ access_mask = ODP_READ_ALLOWED_BIT;
+ if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE)
+ access_mask |= ODP_WRITE_ALLOWED_BIT;
+ }
+
+ hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
+ /* If a hugepage was detected and ODP wasn't set for, the umem
+ * page_shift will be used, the opposite case is an error.
+ */
+ if (hmm_order + PAGE_SHIFT < page_shift) {
+ ret = -EINVAL;
+ ibdev_dbg(umem_odp->umem.ibdev,
+ "%s: un-expected hmm_order %d, page_shift %d\n",
+ __func__, hmm_order, page_shift);
break;
}
- }
- if (ret >= 0) {
- if (npages < 0 && k == start_idx)
- ret = npages;
- else
- ret = k - start_idx;
+ ret = ib_umem_odp_map_dma_single_page(
+ umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]),
+ access_mask);
+ if (ret < 0) {
+ ibdev_dbg(umem_odp->umem.ibdev,
+ "ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
+ break;
+ }
}
+ /* upon sucesss lock should stay on hold for the callee */
+ if (!ret)
+ ret = dma_index - start_idx;
+ else
+ mutex_unlock(&umem_odp->umem_mutex);
+out_put_mm:
mmput(owning_mm);
out_put_task:
if (owning_process)
put_task_struct(owning_process);
- free_page((unsigned long)local_page_list);
return ret;
}
-EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
+EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 bound)
{
+ dma_addr_t dma_addr;
+ dma_addr_t dma;
int idx;
u64 addr;
struct ib_device *dev = umem_odp->umem.ibdev;
@@ -521,20 +487,16 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
virt = max_t(u64, virt, ib_umem_start(umem_odp));
bound = min_t(u64, bound, ib_umem_end(umem_odp));
- /* Note that during the run of this function, the
- * notifiers_count of the MR is > 0, preventing any racing
- * faults from completion. We might be racing with other
- * invalidations, so we must make sure we free each page only
- * once. */
for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
- if (umem_odp->page_list[idx]) {
- struct page *page = umem_odp->page_list[idx];
- dma_addr_t dma = umem_odp->dma_list[idx];
- dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
+ dma = umem_odp->dma_list[idx];
- WARN_ON(!dma_addr);
+ /* The access flags guaranteed a valid DMA address in case was NULL */
+ if (dma) {
+ unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
+ struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
+ dma_addr = dma & ODP_DMA_ADDR_MASK;
ib_dma_unmap_page(dev, dma_addr,
BIT(umem_odp->page_shift),
DMA_BIDIRECTIONAL);
@@ -551,7 +513,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
*/
set_page_dirty(head_page);
}
- umem_odp->page_list[idx] = NULL;
umem_odp->dma_list[idx] = 0;
umem_odp->npages--;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 2fbc583d5bdd..418d133a8fb0 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -218,10 +218,12 @@ int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs)
if (!ucontext)
return -ENOMEM;
- ucontext->res.type = RDMA_RESTRACK_CTX;
ucontext->device = ib_dev;
ucontext->ufile = ufile;
xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
+
+ rdma_restrack_new(&ucontext->res, RDMA_RESTRACK_CTX);
+ rdma_restrack_set_name(&ucontext->res, NULL);
attrs->context = ucontext;
return 0;
}
@@ -250,7 +252,7 @@ int ib_init_ucontext(struct uverbs_attr_bundle *attrs)
if (ret)
goto err_uncharge;
- rdma_restrack_uadd(&ucontext->res);
+ rdma_restrack_add(&ucontext->res);
/*
* Make sure that ib_uverbs_get_ucontext() sees the pointer update
@@ -313,6 +315,7 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
err_uobj:
rdma_alloc_abort_uobject(uobj, attrs, false);
err_ucontext:
+ rdma_restrack_put(&attrs->context->res);
kfree(attrs->context);
attrs->context = NULL;
return ret;
@@ -439,12 +442,14 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
pd->device = ib_dev;
pd->uobject = uobj;
atomic_set(&pd->usecnt, 0);
- pd->res.type = RDMA_RESTRACK_PD;
+
+ rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
+ rdma_restrack_set_name(&pd->res, NULL);
ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata);
if (ret)
goto err_alloc;
- rdma_restrack_uadd(&pd->res);
+ rdma_restrack_add(&pd->res);
uobj->object = pd;
uobj_finalize_uobj_create(uobj, attrs);
@@ -453,6 +458,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
return uverbs_response(attrs, &resp, sizeof(resp));
err_alloc:
+ rdma_restrack_put(&pd->res);
kfree(pd);
err:
uobj_alloc_abort(uobj, attrs);
@@ -742,9 +748,11 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
mr->sig_attrs = NULL;
mr->uobject = uobj;
atomic_inc(&pd->usecnt);
- mr->res.type = RDMA_RESTRACK_MR;
mr->iova = cmd.hca_va;
- rdma_restrack_uadd(&mr->res);
+
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_set_name(&mr->res, NULL);
+ rdma_restrack_add(&mr->res);
uobj->object = mr;
uobj_put_obj_read(pd);
@@ -858,7 +866,7 @@ static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs)
static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_alloc_mw cmd;
- struct ib_uverbs_alloc_mw_resp resp;
+ struct ib_uverbs_alloc_mw_resp resp = {};
struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_mw *mw;
@@ -884,15 +892,21 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
goto err_put;
}
- mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
- if (IS_ERR(mw)) {
- ret = PTR_ERR(mw);
+ mw = rdma_zalloc_drv_obj(ib_dev, ib_mw);
+ if (!mw) {
+ ret = -ENOMEM;
goto err_put;
}
- mw->device = pd->device;
- mw->pd = pd;
+ mw->device = ib_dev;
+ mw->pd = pd;
mw->uobject = uobj;
+ mw->type = cmd.mw_type;
+
+ ret = pd->device->ops.alloc_mw(mw, &attrs->driver_udata);
+ if (ret)
+ goto err_alloc;
+
atomic_inc(&pd->usecnt);
uobj->object = mw;
@@ -903,6 +917,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
resp.mw_handle = uobj->id;
return uverbs_response(attrs, &resp, sizeof(resp));
+err_alloc:
+ kfree(mw);
err_put:
uobj_put_obj_read(pd);
err_free:
@@ -994,12 +1010,14 @@ static int create_cq(struct uverbs_attr_bundle *attrs,
cq->event_handler = ib_uverbs_cq_event_handler;
cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
atomic_set(&cq->usecnt, 0);
- cq->res.type = RDMA_RESTRACK_CQ;
+
+ rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
+ rdma_restrack_set_name(&cq->res, NULL);
ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata);
if (ret)
goto err_free;
- rdma_restrack_uadd(&cq->res);
+ rdma_restrack_add(&cq->res);
obj->uevent.uobject.object = cq;
obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
@@ -1013,6 +1031,7 @@ static int create_cq(struct uverbs_attr_bundle *attrs,
return uverbs_response(attrs, &resp, sizeof(resp));
err_free:
+ rdma_restrack_put(&cq->res);
kfree(cq);
err_file:
if (ev_file)
@@ -1237,8 +1256,21 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
bool has_sq = true;
struct ib_device *ib_dev;
- if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
- return -EPERM;
+ switch (cmd->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+ break;
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ case IB_QPT_UD:
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT:
+ case IB_QPT_DRIVER:
+ break;
+ default:
+ return -EINVAL;
+ }
obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs,
&ib_dev);
@@ -2985,11 +3017,11 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_ex_create_rwq_ind_table cmd;
struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
- struct ib_uobject *uobj;
+ struct ib_uobject *uobj;
int err;
struct ib_rwq_ind_table_init_attr init_attr = {};
struct ib_rwq_ind_table *rwq_ind_tbl;
- struct ib_wq **wqs = NULL;
+ struct ib_wq **wqs = NULL;
u32 *wqs_handles = NULL;
struct ib_wq *wq = NULL;
int i, num_read_wqs;
@@ -3047,17 +3079,15 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
goto put_wqs;
}
- init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
- init_attr.ind_tbl = wqs;
-
- rwq_ind_tbl = ib_dev->ops.create_rwq_ind_table(ib_dev, &init_attr,
- &attrs->driver_udata);
-
- if (IS_ERR(rwq_ind_tbl)) {
- err = PTR_ERR(rwq_ind_tbl);
+ rwq_ind_tbl = rdma_zalloc_drv_obj(ib_dev, ib_rwq_ind_table);
+ if (!rwq_ind_tbl) {
+ err = -ENOMEM;
goto err_uobj;
}
+ init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
+ init_attr.ind_tbl = wqs;
+
rwq_ind_tbl->ind_tbl = wqs;
rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
rwq_ind_tbl->uobject = uobj;
@@ -3065,6 +3095,11 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
rwq_ind_tbl->device = ib_dev;
atomic_set(&rwq_ind_tbl->usecnt, 0);
+ err = ib_dev->ops.create_rwq_ind_table(rwq_ind_tbl, &init_attr,
+ &attrs->driver_udata);
+ if (err)
+ goto err_create;
+
for (i = 0; i < num_wq_handles; i++)
rdma_lookup_put_uobject(&wqs[i]->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
@@ -3076,6 +3111,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
resp.response_length = uverbs_response_length(attrs, sizeof(resp));
return uverbs_response(attrs, &resp, sizeof(resp));
+err_create:
+ kfree(rwq_ind_tbl);
err_uobj:
uobj_alloc_abort(uobj, attrs);
put_wqs:
@@ -3232,8 +3269,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
goto err_free;
}
- flow_id = qp->device->ops.create_flow(
- qp, flow_attr, IB_FLOW_DOMAIN_USER, &attrs->driver_udata);
+ flow_id = qp->device->ops.create_flow(qp, flow_attr,
+ &attrs->driver_udata);
if (IS_ERR(flow_id)) {
err = PTR_ERR(flow_id);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 37794d88b1f3..4bb7c642f80c 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -108,8 +108,11 @@ int uverbs_dealloc_mw(struct ib_mw *mw)
int ret;
ret = mw->device->ops.dealloc_mw(mw);
- if (!ret)
- atomic_dec(&pd->usecnt);
+ if (ret)
+ return ret;
+
+ atomic_dec(&pd->usecnt);
+ kfree(mw);
return ret;
}
@@ -845,8 +848,6 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
* will only be one mm, so no big deal.
*/
mmap_read_lock(mm);
- if (!mmget_still_valid(mm))
- goto skip_mm;
mutex_lock(&ufile->umap_lock);
list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
list) {
@@ -865,7 +866,6 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
}
}
mutex_unlock(&ufile->umap_lock);
- skip_mm:
mmap_read_unlock(mm);
mmput(mm);
}
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 08c39cfb1bd9..0658101fca00 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -81,12 +81,20 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
{
struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
- int ret;
+ u32 table_size = (1 << rwq_ind_tbl->log_ind_tbl_size);
+ int ret, i;
+
+ if (atomic_read(&rwq_ind_tbl->usecnt))
+ return -EBUSY;
- ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
+ ret = rwq_ind_tbl->device->ops.destroy_rwq_ind_table(rwq_ind_tbl);
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
+ for (i = 0; i < table_size; i++)
+ atomic_dec(&ind_tbl[i]->usecnt);
+
+ kfree(rwq_ind_tbl);
kfree(ind_tbl);
return ret;
}
@@ -122,8 +130,7 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
if (ret)
return ret;
- ib_dealloc_pd_user(pd, &attrs->driver_udata);
- return 0;
+ return ib_dealloc_pd_user(pd, &attrs->driver_udata);
}
void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue)
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
index c7e7438752bc..b3c6c066b601 100644
--- a/drivers/infiniband/core/uverbs_std_types_counters.c
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -46,7 +46,9 @@ static int uverbs_free_counters(struct ib_uobject *uobject,
if (ret)
return ret;
- counters->device->ops.destroy_counters(counters);
+ ret = counters->device->ops.destroy_counters(counters);
+ if (ret)
+ return ret;
kfree(counters);
return 0;
}
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index b1c7dacc02de..8dabd05988b2 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -33,6 +33,7 @@
#include <rdma/uverbs_std_types.h>
#include "rdma_core.h"
#include "uverbs.h"
+#include "restrack.h"
static int uverbs_free_cq(struct ib_uobject *uobject,
enum rdma_remove_reason why,
@@ -123,7 +124,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
cq->event_handler = ib_uverbs_cq_event_handler;
cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
atomic_set(&cq->usecnt, 0);
- cq->res.type = RDMA_RESTRACK_CQ;
+
+ rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
+ rdma_restrack_set_name(&cq->res, NULL);
ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata);
if (ret)
@@ -131,7 +134,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
obj->uevent.uobject.object = cq;
obj->uevent.uobject.user_handle = user_handle;
- rdma_restrack_uadd(&cq->res);
+ rdma_restrack_add(&cq->res);
uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE);
ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
@@ -139,6 +142,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
return ret;
err_free:
+ rdma_restrack_put(&cq->res);
kfree(cq);
err_event_file:
if (obj->uevent.event_file)
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 75df2094a010..302f898c5833 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -3,11 +3,13 @@
* Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
*/
+#include <linux/overflow.h>
#include <rdma/uverbs_std_types.h>
#include "rdma_core.h"
#include "uverbs.h"
#include <rdma/uverbs_ioctl.h>
#include <rdma/opa_addr.h>
+#include <rdma/ib_cache.h>
/*
* This ioctl method allows calling any defined write or write_ex
@@ -165,7 +167,8 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr,
resp->subnet_timeout = attr->subnet_timeout;
resp->init_type_reply = attr->init_type_reply;
resp->active_width = attr->active_width;
- resp->active_speed = attr->active_speed;
+ /* This ABI needs to be extended to provide any speed more than IB_SPEED_NDR */
+ resp->active_speed = min_t(u16, attr->active_speed, IB_SPEED_NDR);
resp->phys_state = attr->phys_state;
resp->link_layer = rdma_port_get_link_layer(ib_dev, port_num);
}
@@ -265,6 +268,169 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_CONTEXT)(
return ucontext->device->ops.query_ucontext(ucontext, attrs);
}
+static int copy_gid_entries_to_user(struct uverbs_attr_bundle *attrs,
+ struct ib_uverbs_gid_entry *entries,
+ size_t num_entries, size_t user_entry_size)
+{
+ const struct uverbs_attr *attr;
+ void __user *user_entries;
+ size_t copy_len;
+ int ret;
+ int i;
+
+ if (user_entry_size == sizeof(*entries)) {
+ ret = uverbs_copy_to(attrs,
+ UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
+ entries, sizeof(*entries) * num_entries);
+ return ret;
+ }
+
+ copy_len = min_t(size_t, user_entry_size, sizeof(*entries));
+ attr = uverbs_attr_get(attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES);
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ user_entries = u64_to_user_ptr(attr->ptr_attr.data);
+ for (i = 0; i < num_entries; i++) {
+ if (copy_to_user(user_entries, entries, copy_len))
+ return -EFAULT;
+
+ if (user_entry_size > sizeof(*entries)) {
+ if (clear_user(user_entries + sizeof(*entries),
+ user_entry_size - sizeof(*entries)))
+ return -EFAULT;
+ }
+
+ entries++;
+ user_entries += user_entry_size;
+ }
+
+ return uverbs_output_written(attrs,
+ UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES);
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_gid_entry *entries;
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
+ size_t user_entry_size;
+ ssize_t num_entries;
+ size_t max_entries;
+ size_t num_bytes;
+ u32 flags;
+ int ret;
+
+ ret = uverbs_get_flags32(&flags, attrs,
+ UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, 0);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_const(&user_entry_size, attrs,
+ UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE);
+ if (ret)
+ return ret;
+
+ max_entries = uverbs_attr_ptr_get_array_size(
+ attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
+ user_entry_size);
+ if (max_entries <= 0)
+ return -EINVAL;
+
+ ucontext = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
+
+ if (check_mul_overflow(max_entries, sizeof(*entries), &num_bytes))
+ return -EINVAL;
+
+ entries = uverbs_zalloc(attrs, num_bytes);
+ if (!entries)
+ return -ENOMEM;
+
+ num_entries = rdma_query_gid_table(ib_dev, entries, max_entries);
+ if (num_entries < 0)
+ return -EINVAL;
+
+ ret = copy_gid_entries_to_user(attrs, entries, num_entries,
+ user_entry_size);
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs,
+ UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
+ &num_entries, sizeof(num_entries));
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_ENTRY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_gid_entry entry = {};
+ const struct ib_gid_attr *gid_attr;
+ struct ib_ucontext *ucontext;
+ struct ib_device *ib_dev;
+ struct net_device *ndev;
+ u32 gid_index;
+ u32 port_num;
+ u32 flags;
+ int ret;
+
+ ret = uverbs_get_flags32(&flags, attrs,
+ UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, 0);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_const(&port_num, attrs,
+ UVERBS_ATTR_QUERY_GID_ENTRY_PORT);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_const(&gid_index, attrs,
+ UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX);
+ if (ret)
+ return ret;
+
+ ucontext = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ucontext))
+ return PTR_ERR(ucontext);
+ ib_dev = ucontext->device;
+
+ if (!rdma_is_port_valid(ib_dev, port_num))
+ return -EINVAL;
+
+ gid_attr = rdma_get_gid_attr(ib_dev, port_num, gid_index);
+ if (IS_ERR(gid_attr))
+ return PTR_ERR(gid_attr);
+
+ memcpy(&entry.gid, &gid_attr->gid, sizeof(gid_attr->gid));
+ entry.gid_index = gid_attr->index;
+ entry.port_num = gid_attr->port_num;
+ entry.gid_type = gid_attr->gid_type;
+
+ rcu_read_lock();
+ ndev = rdma_read_gid_attr_ndev_rcu(gid_attr);
+ if (IS_ERR(ndev)) {
+ if (PTR_ERR(ndev) != -ENODEV) {
+ ret = PTR_ERR(ndev);
+ rcu_read_unlock();
+ goto out;
+ }
+ } else {
+ entry.netdev_ifindex = ndev->ifindex;
+ }
+ rcu_read_unlock();
+
+ ret = uverbs_copy_to_struct_or_zero(
+ attrs, UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY, &entry,
+ sizeof(entry));
+out:
+ rdma_put_gid_attr(gid_attr);
+ return ret;
+}
+
DECLARE_UVERBS_NAMED_METHOD(
UVERBS_METHOD_GET_CONTEXT,
UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
@@ -299,12 +465,38 @@ DECLARE_UVERBS_NAMED_METHOD(
reserved),
UA_MANDATORY));
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QUERY_GID_TABLE,
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE, u64,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, u32,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
+ UVERBS_ATTR_MIN_SIZE(0), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QUERY_GID_ENTRY,
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_PORT, u32,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX, u32,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, u32,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_gid_entry,
+ netdev_ifindex),
+ UA_MANDATORY));
+
DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE,
&UVERBS_METHOD(UVERBS_METHOD_GET_CONTEXT),
&UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE),
&UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES),
&UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT),
- &UVERBS_METHOD(UVERBS_METHOD_QUERY_CONTEXT));
+ &UVERBS_METHOD(UVERBS_METHOD_QUERY_CONTEXT),
+ &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_TABLE),
+ &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_ENTRY));
const struct uapi_definition uverbs_def_obj_device[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DEVICE),
diff --git a/drivers/infiniband/core/uverbs_std_types_wq.c b/drivers/infiniband/core/uverbs_std_types_wq.c
index cad842ede077..f2e6a625724a 100644
--- a/drivers/infiniband/core/uverbs_std_types_wq.c
+++ b/drivers/infiniband/core/uverbs_std_types_wq.c
@@ -16,7 +16,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
container_of(uobject, struct ib_uwq_object, uevent.uobject);
int ret;
- ret = ib_destroy_wq(wq, &attrs->driver_udata);
+ ret = ib_destroy_wq_user(wq, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 307886737646..740f8454b6b4 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -272,15 +272,16 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
atomic_set(&pd->usecnt, 0);
pd->flags = flags;
- pd->res.type = RDMA_RESTRACK_PD;
- rdma_restrack_set_task(&pd->res, caller);
+ rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
+ rdma_restrack_set_name(&pd->res, caller);
ret = device->ops.alloc_pd(pd, NULL);
if (ret) {
+ rdma_restrack_put(&pd->res);
kfree(pd);
return ERR_PTR(ret);
}
- rdma_restrack_kadd(&pd->res);
+ rdma_restrack_add(&pd->res);
if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
pd->local_dma_lkey = device->local_dma_lkey;
@@ -329,7 +330,7 @@ EXPORT_SYMBOL(__ib_alloc_pd);
* exist. The caller is responsible to synchronously destroy them and
* guarantee no new allocations will happen.
*/
-void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
+int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
{
int ret;
@@ -343,9 +344,13 @@ void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
requires the caller to guarantee we can't race here. */
WARN_ON(atomic_read(&pd->usecnt));
+ ret = pd->device->ops.dealloc_pd(pd, udata);
+ if (ret)
+ return ret;
+
rdma_restrack_del(&pd->res);
- pd->device->ops.dealloc_pd(pd, udata);
kfree(pd);
+ return ret;
}
EXPORT_SYMBOL(ib_dealloc_pd_user);
@@ -728,7 +733,7 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
(struct in6_addr *)dgid);
return 0;
} else if (net_type == RDMA_NETWORK_IPV6 ||
- net_type == RDMA_NETWORK_IB) {
+ net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) {
*dgid = hdr->ibgrh.dgid;
*sgid = hdr->ibgrh.sgid;
return 0;
@@ -964,18 +969,22 @@ int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
{
const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
struct ib_pd *pd;
+ int ret;
might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
pd = ah->pd;
- ah->device->ops.destroy_ah(ah, flags);
+ ret = ah->device->ops.destroy_ah(ah, flags);
+ if (ret)
+ return ret;
+
atomic_dec(&pd->usecnt);
if (sgid_attr)
rdma_put_gid_attr(sgid_attr);
kfree(ah);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(rdma_destroy_ah_user);
@@ -1060,10 +1069,14 @@ EXPORT_SYMBOL(ib_query_srq);
int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
{
+ int ret;
+
if (atomic_read(&srq->usecnt))
return -EBUSY;
- srq->device->ops.destroy_srq(srq, udata);
+ ret = srq->device->ops.destroy_srq(srq, udata);
+ if (ret)
+ return ret;
atomic_dec(&srq->pd->usecnt);
if (srq->srq_type == IB_SRQT_XRC)
@@ -1072,7 +1085,7 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
atomic_dec(&srq->ext.cq->usecnt);
kfree(srq);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ib_destroy_srq_user);
@@ -1781,7 +1794,7 @@ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
-int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
+int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width)
{
int rc;
u32 netdev_speed;
@@ -1984,16 +1997,18 @@ struct ib_cq *__ib_create_cq(struct ib_device *device,
cq->event_handler = event_handler;
cq->cq_context = cq_context;
atomic_set(&cq->usecnt, 0);
- cq->res.type = RDMA_RESTRACK_CQ;
- rdma_restrack_set_task(&cq->res, caller);
+
+ rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
+ rdma_restrack_set_name(&cq->res, caller);
ret = device->ops.create_cq(cq, cq_attr, NULL);
if (ret) {
+ rdma_restrack_put(&cq->res);
kfree(cq);
return ERR_PTR(ret);
}
- rdma_restrack_kadd(&cq->res);
+ rdma_restrack_add(&cq->res);
return cq;
}
EXPORT_SYMBOL(__ib_create_cq);
@@ -2011,16 +2026,21 @@ EXPORT_SYMBOL(rdma_set_cq_moderation);
int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{
+ int ret;
+
if (WARN_ON_ONCE(cq->shared))
return -EOPNOTSUPP;
if (atomic_read(&cq->usecnt))
return -EBUSY;
+ ret = cq->device->ops.destroy_cq(cq, udata);
+ if (ret)
+ return ret;
+
rdma_restrack_del(&cq->res);
- cq->device->ops.destroy_cq(cq, udata);
kfree(cq);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ib_destroy_cq_user);
@@ -2059,8 +2079,10 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->pd = pd;
mr->dm = NULL;
atomic_inc(&pd->usecnt);
- mr->res.type = RDMA_RESTRACK_MR;
- rdma_restrack_kadd(&mr->res);
+
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_parent_name(&mr->res, &pd->res);
+ rdma_restrack_add(&mr->res);
return mr;
}
@@ -2139,11 +2161,12 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
mr->uobject = NULL;
atomic_inc(&pd->usecnt);
mr->need_inval = false;
- mr->res.type = RDMA_RESTRACK_MR;
- rdma_restrack_kadd(&mr->res);
mr->type = mr_type;
mr->sig_attrs = NULL;
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_parent_name(&mr->res, &pd->res);
+ rdma_restrack_add(&mr->res);
out:
trace_mr_alloc(pd, mr_type, max_num_sg, mr);
return mr;
@@ -2199,11 +2222,12 @@ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
mr->uobject = NULL;
atomic_inc(&pd->usecnt);
mr->need_inval = false;
- mr->res.type = RDMA_RESTRACK_MR;
- rdma_restrack_kadd(&mr->res);
mr->type = IB_MR_TYPE_INTEGRITY;
mr->sig_attrs = sig_attrs;
+ rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
+ rdma_restrack_parent_name(&mr->res, &pd->res);
+ rdma_restrack_add(&mr->res);
out:
trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
return mr;
@@ -2328,13 +2352,17 @@ EXPORT_SYMBOL(ib_alloc_xrcd_user);
*/
int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
{
+ int ret;
+
if (atomic_read(&xrcd->usecnt))
return -EBUSY;
WARN_ON(!xa_empty(&xrcd->tgt_qps));
- xrcd->device->ops.dealloc_xrcd(xrcd, udata);
+ ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata);
+ if (ret)
+ return ret;
kfree(xrcd);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ib_dealloc_xrcd_user);
@@ -2378,25 +2406,28 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
EXPORT_SYMBOL(ib_create_wq);
/**
- * ib_destroy_wq - Destroys the specified user WQ.
+ * ib_destroy_wq_user - Destroys the specified user WQ.
* @wq: The WQ to destroy.
* @udata: Valid user data
*/
-int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
+int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
{
struct ib_cq *cq = wq->cq;
struct ib_pd *pd = wq->pd;
+ int ret;
if (atomic_read(&wq->usecnt))
return -EBUSY;
- wq->device->ops.destroy_wq(wq, udata);
+ ret = wq->device->ops.destroy_wq(wq, udata);
+ if (ret)
+ return ret;
+
atomic_dec(&pd->usecnt);
atomic_dec(&cq->usecnt);
-
- return 0;
+ return ret;
}
-EXPORT_SYMBOL(ib_destroy_wq);
+EXPORT_SYMBOL(ib_destroy_wq_user);
/**
* ib_modify_wq - Modifies the specified WQ.
@@ -2419,29 +2450,6 @@ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
}
EXPORT_SYMBOL(ib_modify_wq);
-/*
- * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
- * @wq_ind_table: The Indirection Table to destroy.
-*/
-int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
-{
- int err, i;
- u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
- struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
-
- if (atomic_read(&rwq_ind_table->usecnt))
- return -EBUSY;
-
- err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
- if (!err) {
- for (i = 0; i < table_size; i++)
- atomic_dec(&ind_tbl[i]->usecnt);
- }
-
- return err;
-}
-EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
-
int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status)
{
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index a300588634c5..b930ea3dab7a 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -150,7 +150,7 @@ struct bnxt_re_dev {
struct delayed_work worker;
u8 cur_prio_map;
- u8 active_speed;
+ u16 active_speed;
u8 active_width;
/* FP Notification Queue (CQ & SRQ) */
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 1d7a9ca5240c..cf3db9628397 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -532,7 +532,7 @@ fail:
}
/* Protection Domains */
-void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
+int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
@@ -542,6 +542,7 @@ void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
if (pd->qplib_pd.id)
bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
&pd->qplib_pd);
+ return 0;
}
int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
@@ -601,13 +602,14 @@ fail:
}
/* Address Handles */
-void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
+int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
{
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
struct bnxt_re_dev *rdev = ah->rdev;
bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
!(flags & RDMA_DESTROY_AH_SLEEPABLE));
+ return 0;
}
static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
@@ -938,9 +940,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
return PTR_ERR(umem);
qp->sumem = umem;
- qplib_qp->sq.sg_info.sghead = umem->sg_head.sgl;
- qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem);
- qplib_qp->sq.sg_info.nmap = umem->nmap;
+ qplib_qp->sq.sg_info.umem = umem;
qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
qplib_qp->qp_handle = ureq.qp_handle;
@@ -953,9 +953,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (IS_ERR(umem))
goto rqfail;
qp->rumem = umem;
- qplib_qp->rq.sg_info.sghead = umem->sg_head.sgl;
- qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
- qplib_qp->rq.sg_info.nmap = umem->nmap;
+ qplib_qp->rq.sg_info.umem = umem;
qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
}
@@ -1568,7 +1566,7 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
}
/* Shared Receive Queues */
-void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
+int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
{
struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
ib_srq);
@@ -1583,6 +1581,7 @@ void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
atomic_dec(&rdev->srq_count);
if (nq)
nq->budget--;
+ return 0;
}
static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
@@ -1608,9 +1607,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
return PTR_ERR(umem);
srq->umem = umem;
- qplib_srq->sg_info.sghead = umem->sg_head.sgl;
- qplib_srq->sg_info.npages = ib_umem_num_pages(umem);
- qplib_srq->sg_info.nmap = umem->nmap;
+ qplib_srq->sg_info.umem = umem;
qplib_srq->sg_info.pgsize = PAGE_SIZE;
qplib_srq->sg_info.pgshft = PAGE_SHIFT;
qplib_srq->srq_handle = ureq.srq_handle;
@@ -2800,7 +2797,7 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
}
/* Completion Queues */
-void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct bnxt_re_cq *cq;
struct bnxt_qplib_nq *nq;
@@ -2816,6 +2813,7 @@ void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
atomic_dec(&rdev->cq_count);
nq->budget--;
kfree(cq->cql);
+ return 0;
}
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
@@ -2860,9 +2858,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
rc = PTR_ERR(cq->umem);
goto fail;
}
- cq->qplib_cq.sg_info.sghead = cq->umem->sg_head.sgl;
- cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
- cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
+ cq->qplib_cq.sg_info.umem = cq->umem;
cq->qplib_cq.dpi = &uctx->dpi;
} else {
cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
@@ -3774,23 +3770,6 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
return rc;
}
-static int bnxt_re_page_size_ok(int page_shift)
-{
- switch (page_shift) {
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
- case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
- return 1;
- default:
- return 0;
- }
-}
-
static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
int page_shift)
{
@@ -3798,7 +3777,7 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
u64 page_size = BIT_ULL(page_shift);
struct ib_block_iter biter;
- rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
+ rdma_umem_for_each_dma_block(umem, &biter, page_size)
*pbl_tbl++ = rdma_block_iter_dma_address(&biter);
return pbl_tbl - pbl_tbl_orig;
@@ -3814,7 +3793,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct bnxt_re_mr *mr;
struct ib_umem *umem;
u64 *pbl_tbl = NULL;
- int umem_pgs, page_shift, rc;
+ unsigned long page_size;
+ int umem_pgs, rc;
if (length > BNXT_RE_MAX_MR_SIZE) {
ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
@@ -3848,42 +3828,34 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
mr->ib_umem = umem;
mr->qplib_mr.va = virt_addr;
- umem_pgs = ib_umem_page_count(umem);
- if (!umem_pgs) {
- ibdev_err(&rdev->ibdev, "umem is invalid!");
- rc = -EINVAL;
- goto free_umem;
- }
- mr->qplib_mr.total_size = length;
-
- pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
- if (!pbl_tbl) {
- rc = -ENOMEM;
- goto free_umem;
- }
-
- page_shift = __ffs(ib_umem_find_best_pgsz(umem,
- BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
- virt_addr));
-
- if (!bnxt_re_page_size_ok(page_shift)) {
+ page_size = ib_umem_find_best_pgsz(
+ umem, BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M, virt_addr);
+ if (!page_size) {
ibdev_err(&rdev->ibdev, "umem page size unsupported!");
rc = -EFAULT;
- goto fail;
+ goto free_umem;
}
+ mr->qplib_mr.total_size = length;
- if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
+ if (page_size == BNXT_RE_PAGE_SIZE_4K &&
length > BNXT_RE_MAX_MR_SIZE_LOW) {
ibdev_err(&rdev->ibdev, "Requested MR Sz:%llu Max sup:%llu",
length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
rc = -EINVAL;
- goto fail;
+ goto free_umem;
+ }
+
+ umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
+ pbl_tbl = kcalloc(umem_pgs, sizeof(*pbl_tbl), GFP_KERNEL);
+ if (!pbl_tbl) {
+ rc = -ENOMEM;
+ goto free_umem;
}
/* Map umem buf ptrs to the PBL */
- umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
+ umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, order_base_2(page_size));
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
- umem_pgs, false, 1 << page_shift);
+ umem_pgs, false, page_size);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to register user MR");
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 1daeb30e06fd..9a8130b79256 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -163,12 +163,12 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
u8 port_num);
int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
-void bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
+int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
int bnxt_re_create_srq(struct ib_srq *srq,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
@@ -176,7 +176,7 @@ int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-void bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
@@ -193,7 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-void bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 53aee5a42ab8..04621ba8fa76 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -736,7 +736,8 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
if (ret)
return ret;
- return ib_register_device(ibdev, "bnxt_re%d");
+ dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX);
+ return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
}
static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index f78da54a0bc5..995d4633b0a1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -295,9 +295,9 @@ static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
}
}
-static void bnxt_qplib_service_nq(unsigned long data)
+static void bnxt_qplib_service_nq(struct tasklet_struct *t)
{
- struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
+ struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
struct bnxt_qplib_hwq *hwq = &nq->hwq;
int num_srqne_processed = 0;
int num_cqne_processed = 0;
@@ -448,8 +448,7 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
nq->msix_vec = msix_vector;
if (need_init)
- tasklet_init(&nq->nq_tasklet, bnxt_qplib_service_nq,
- (unsigned long)nq);
+ tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
else
tasklet_enable(&nq->nq_tasklet);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index f7736e34ac64..441eb421e5e5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -50,7 +50,7 @@
#include "qplib_sp.h"
#include "qplib_fp.h"
-static void bnxt_qplib_service_creq(unsigned long data);
+static void bnxt_qplib_service_creq(struct tasklet_struct *t);
/* Hardware communication channel */
static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
@@ -79,7 +79,7 @@ static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
goto done;
do {
mdelay(1); /* 1m sec */
- bnxt_qplib_service_creq((unsigned long)rcfw);
+ bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
} while (test_bit(cbit, cmdq->cmdq_bitmap) && --count);
done:
return count ? 0 : -ETIMEDOUT;
@@ -370,9 +370,9 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
}
/* SP - CREQ Completion handlers */
-static void bnxt_qplib_service_creq(unsigned long data)
+static void bnxt_qplib_service_creq(struct tasklet_struct *t)
{
- struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
+ struct bnxt_qplib_rcfw *rcfw = from_tasklet(rcfw, t, creq.creq_tasklet);
struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
struct bnxt_qplib_hwq *hwq = &creq->hwq;
@@ -687,8 +687,7 @@ int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
creq->msix_vec = msix_vector;
if (need_init)
- tasklet_init(&creq->creq_tasklet,
- bnxt_qplib_service_creq, (unsigned long)rcfw);
+ tasklet_setup(&creq->creq_tasklet, bnxt_qplib_service_creq);
else
tasklet_enable(&creq->creq_tasklet);
rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 7efa6e5dce62..fa7878336100 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -45,6 +45,9 @@
#include <linux/dma-mapping.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -87,12 +90,11 @@ static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
struct bnxt_qplib_sg_info *sginfo)
{
- struct scatterlist *sghead = sginfo->sghead;
- struct sg_dma_page_iter sg_iter;
+ struct ib_block_iter biter;
int i = 0;
- for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) {
- pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
+ rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
+ pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
pbl->pg_arr[i] = NULL;
pbl->pg_count++;
i++;
@@ -104,15 +106,16 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sg_info *sginfo)
{
struct pci_dev *pdev = res->pdev;
- struct scatterlist *sghead;
bool is_umem = false;
u32 pages;
int i;
if (sginfo->nopte)
return 0;
- pages = sginfo->npages;
- sghead = sginfo->sghead;
+ if (sginfo->umem)
+ pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
+ else
+ pages = sginfo->npages;
/* page ptr arrays */
pbl->pg_arr = vmalloc(pages * sizeof(void *));
if (!pbl->pg_arr)
@@ -127,7 +130,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
pbl->pg_count = 0;
pbl->pg_size = sginfo->pgsize;
- if (!sghead) {
+ if (!sginfo->umem) {
for (i = 0; i < pages; i++) {
pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
pbl->pg_size,
@@ -183,14 +186,12 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
struct bnxt_qplib_sg_info sginfo = {};
u32 depth, stride, npbl, npde;
dma_addr_t *src_phys_ptr, **dst_virt_ptr;
- struct scatterlist *sghead = NULL;
struct bnxt_qplib_res *res;
struct pci_dev *pdev;
int i, rc, lvl;
res = hwq_attr->res;
pdev = res->pdev;
- sghead = hwq_attr->sginfo->sghead;
pg_size = hwq_attr->sginfo->pgsize;
hwq->level = PBL_LVL_MAX;
@@ -204,7 +205,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
aux_pages++;
}
- if (!sghead) {
+ if (!hwq_attr->sginfo->umem) {
hwq->is_user = false;
npages = (depth * stride) / pg_size + aux_pages;
if ((depth * stride) % pg_size)
@@ -213,11 +214,14 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
return -EINVAL;
hwq_attr->sginfo->npages = npages;
} else {
+ unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
+ hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
+
hwq->is_user = true;
- npages = hwq_attr->sginfo->npages;
+ npages = sginfo_num_pages;
npages = (npages * PAGE_SIZE) /
BIT_ULL(hwq_attr->sginfo->pgshft);
- if ((hwq_attr->sginfo->npages * PAGE_SIZE) %
+ if ((sginfo_num_pages * PAGE_SIZE) %
BIT_ULL(hwq_attr->sginfo->pgshft))
if (!npages)
npages++;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 9da470d1e4a3..7a1ab38b95da 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -126,8 +126,7 @@ struct bnxt_qplib_pbl {
};
struct bnxt_qplib_sg_info {
- struct scatterlist *sghead;
- u32 nmap;
+ struct ib_umem *umem;
u32 npages;
u32 pgshft;
u32 pgsize;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 1f288c73ccfc..8769e7aa097f 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -77,9 +77,9 @@ static int enable_ecn;
module_param(enable_ecn, int, 0644);
MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
-static int dack_mode = 1;
+static int dack_mode;
module_param(dack_mode, int, 0644);
-MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
+MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
uint c4iw_max_read_depth = 32;
module_param(c4iw_max_read_depth, int, 0644);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 352b8af1998a..28349ed50885 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -967,7 +967,7 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return !err || err == -ENODATA ? npolled : err;
}
-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct c4iw_cq *chp;
struct c4iw_ucontext *ucontext;
@@ -985,6 +985,7 @@ void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
chp->destroy_skb, chp->wr_waitp);
c4iw_put_wr_wait(chp->wr_waitp);
+ return 0;
}
int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 2b2b009b371a..a27899402f59 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -985,21 +985,20 @@ int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int c4iw_dealloc_mw(struct ib_mw *mw);
void c4iw_dealloc(struct uld_ctx *ctx);
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata);
+int c4iw_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
struct ib_udata *udata);
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
-void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
-void c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
+int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs,
struct ib_udata *udata);
int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 73936c3341b7..42234df896fb 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -510,7 +510,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
__be64 *pages;
int shift, n, i;
int err = -ENOMEM;
- struct sg_dma_page_iter sg_iter;
+ struct ib_block_iter biter;
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mr *mhp;
@@ -548,7 +548,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
shift = PAGE_SHIFT;
- n = ib_umem_num_pages(mhp->umem);
+ n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift);
err = alloc_pbl(mhp, n);
if (err)
goto err_umem_release;
@@ -561,8 +561,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0;
- for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
- pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
+ rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) {
+ pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter));
if (i == PAGE_SIZE / sizeof(*pages)) {
err = write_pbl(&mhp->rhp->rdev, pages,
mhp->attr.pbl_addr + (n << 3), i,
@@ -611,30 +611,23 @@ err_free_mhp:
return ERR_PTR(err);
}
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
+int c4iw_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{
+ struct c4iw_mw *mhp = to_c4iw_mw(ibmw);
struct c4iw_dev *rhp;
struct c4iw_pd *php;
- struct c4iw_mw *mhp;
u32 mmid;
u32 stag = 0;
int ret;
- if (type != IB_MW_TYPE_1)
- return ERR_PTR(-EINVAL);
+ if (ibmw->type != IB_MW_TYPE_1)
+ return -EINVAL;
- php = to_c4iw_pd(pd);
+ php = to_c4iw_pd(ibmw->pd);
rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
- if (!mhp->wr_waitp) {
- ret = -ENOMEM;
- goto free_mhp;
- }
+ if (!mhp->wr_waitp)
+ return -ENOMEM;
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
if (!mhp->dereg_skb) {
@@ -645,18 +638,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
if (ret)
goto free_skb;
+
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_MW;
mhp->attr.stag = stag;
mmid = (stag) >> 8;
- mhp->ibmw.rkey = stag;
+ ibmw->rkey = stag;
if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
ret = -ENOMEM;
goto dealloc_win;
}
pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
- return &(mhp->ibmw);
+ return 0;
dealloc_win:
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
@@ -665,9 +659,7 @@ free_skb:
kfree_skb(mhp->dereg_skb);
free_wr_wait:
c4iw_put_wr_wait(mhp->wr_waitp);
-free_mhp:
- kfree(mhp);
- return ERR_PTR(ret);
+ return ret;
}
int c4iw_dealloc_mw(struct ib_mw *mw)
@@ -684,8 +676,6 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
mhp->wr_waitp);
kfree_skb(mhp->dereg_skb);
c4iw_put_wr_wait(mhp->wr_waitp);
- pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
- kfree(mhp);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 6c579d2d3997..8138c57a1e43 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -190,7 +190,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return ret;
}
-static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
+static int c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
@@ -202,6 +202,7 @@ static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur--;
mutex_unlock(&rhp->rdev.stats.lock);
+ return 0;
}
static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
@@ -497,8 +498,10 @@ static const struct ib_device_ops c4iw_dev_ops = {
.query_qp = c4iw_ib_query_qp,
.reg_user_mr = c4iw_reg_user_mr,
.req_notify_cq = c4iw_arm_cq,
- INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
+
INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw),
+ INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
};
@@ -567,7 +570,9 @@ void c4iw_register_device(struct work_struct *work)
ret = set_netdevs(&dev->ibdev, &dev->rdev);
if (ret)
goto err_dealloc_ctx;
- ret = ib_register_device(&dev->ibdev, "cxgb4_%d");
+ dma_set_max_seg_size(&dev->rdev.lldi.pdev->dev, UINT_MAX);
+ ret = ib_register_device(&dev->ibdev, "cxgb4_%d",
+ &dev->rdev.lldi.pdev->dev);
if (ret)
goto err_dealloc_ctx;
return;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cbddb20c6121..f20379e4e2ec 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2797,7 +2797,7 @@ err_free_wr_wait:
return ret;
}
-void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_srq *srq;
@@ -2813,4 +2813,5 @@ void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
srq->wr_waitp);
c4iw_free_srq_idx(&rhp->rdev, srq->idx);
c4iw_put_wr_wait(srq->wr_waitp);
+ return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 1889dd172a25..e5d9712e98c4 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -33,7 +33,8 @@ struct efa_irq {
char name[EFA_IRQNAME_SIZE];
};
-struct efa_sw_stats {
+/* Don't use anything other than atomic64 */
+struct efa_stats {
atomic64_t alloc_pd_err;
atomic64_t create_qp_err;
atomic64_t create_cq_err;
@@ -41,11 +42,6 @@ struct efa_sw_stats {
atomic64_t alloc_ucontext_err;
atomic64_t create_ah_err;
atomic64_t mmap_err;
-};
-
-/* Don't use anything other than atomic64 */
-struct efa_stats {
- struct efa_sw_stats sw_stats;
atomic64_t keep_alive_rcvd;
};
@@ -134,12 +130,12 @@ int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey);
int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
-void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
@@ -156,7 +152,7 @@ void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int efa_create_ah(struct ib_ah *ibah,
struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
-void efa_destroy_ah(struct ib_ah *ibah, u32 flags);
+int efa_destroy_ah(struct ib_ah *ibah, u32 flags);
int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 5484b08bbc5d..b199e4ac6cf9 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -61,6 +61,8 @@ enum efa_admin_qp_state {
enum efa_admin_get_stats_type {
EFA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+ EFA_ADMIN_GET_STATS_TYPE_MESSAGES = 1,
+ EFA_ADMIN_GET_STATS_TYPE_RDMA_READ = 2,
};
enum efa_admin_get_stats_scope {
@@ -68,14 +70,6 @@ enum efa_admin_get_stats_scope {
EFA_ADMIN_GET_STATS_SCOPE_QUEUE = 1,
};
-enum efa_admin_modify_qp_mask_bits {
- EFA_ADMIN_QP_STATE_BIT = 0,
- EFA_ADMIN_CUR_QP_STATE_BIT = 1,
- EFA_ADMIN_QKEY_BIT = 2,
- EFA_ADMIN_SQ_PSN_BIT = 3,
- EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT = 4,
-};
-
/*
* QP allocation sizes, converted by fabric QueuePair (QP) create command
* from QP capabilities.
@@ -199,8 +193,14 @@ struct efa_admin_modify_qp_cmd {
struct efa_admin_aq_common_desc aq_common_desc;
/*
- * Mask indicating which fields should be updated see enum
- * efa_admin_modify_qp_mask_bits
+ * Mask indicating which fields should be updated
+ * 0 : qp_state
+ * 1 : cur_qp_state
+ * 2 : qkey
+ * 3 : sq_psn
+ * 4 : sq_drained_async_notify
+ * 5 : rnr_retry
+ * 31:6 : reserved
*/
u32 modify_mask;
@@ -222,8 +222,8 @@ struct efa_admin_modify_qp_cmd {
/* Enable async notification when SQ is drained */
u8 sq_drained_async_notify;
- /* MBZ */
- u8 reserved1;
+ /* Number of RNR retries (valid only for SRD QPs) */
+ u8 rnr_retry;
/* MBZ */
u16 reserved2;
@@ -258,8 +258,8 @@ struct efa_admin_query_qp_resp {
/* Indicates that draining is in progress */
u8 sq_draining;
- /* MBZ */
- u8 reserved1;
+ /* Number of RNR retries (valid only for SRD QPs) */
+ u8 rnr_retry;
/* MBZ */
u16 reserved2;
@@ -530,10 +530,36 @@ struct efa_admin_basic_stats {
u64 rx_drops;
};
+struct efa_admin_messages_stats {
+ u64 send_bytes;
+
+ u64 send_wrs;
+
+ u64 recv_bytes;
+
+ u64 recv_wrs;
+};
+
+struct efa_admin_rdma_read_stats {
+ u64 read_wrs;
+
+ u64 read_bytes;
+
+ u64 read_wr_err;
+
+ u64 read_resp_bytes;
+};
+
struct efa_admin_acq_get_stats_resp {
struct efa_admin_acq_common_desc acq_common_desc;
- struct efa_admin_basic_stats basic_stats;
+ union {
+ struct efa_admin_basic_stats basic_stats;
+
+ struct efa_admin_messages_stats messages_stats;
+
+ struct efa_admin_rdma_read_stats rdma_read_stats;
+ } u;
};
struct efa_admin_get_set_feature_common_desc {
@@ -576,7 +602,9 @@ struct efa_admin_feature_device_attr_desc {
/*
* 0 : rdma_read - If set, RDMA Read is supported on
* TX queues
- * 31:1 : reserved - MBZ
+ * 1 : rnr_retry - If set, RNR retry is supported on
+ * modify QP command
+ * 31:2 : reserved - MBZ
*/
u32 device_caps;
@@ -862,6 +890,14 @@ struct efa_admin_host_info {
#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
+/* modify_qp_cmd */
+#define EFA_ADMIN_MODIFY_QP_CMD_QP_STATE_MASK BIT(0)
+#define EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE_MASK BIT(1)
+#define EFA_ADMIN_MODIFY_QP_CMD_QKEY_MASK BIT(2)
+#define EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN_MASK BIT(3)
+#define EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY_MASK BIT(4)
+#define EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY_MASK BIT(5)
+
/* reg_mr_cmd */
#define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK GENMASK(4, 0)
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
@@ -878,6 +914,7 @@ struct efa_admin_host_info {
/* feature_device_attr_desc */
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
+#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1)
/* host_info */
#define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK GENMASK(7, 0)
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index 6ac23627f65a..f752ef64159c 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -76,6 +76,7 @@ int efa_com_modify_qp(struct efa_com_dev *edev,
cmd.qkey = params->qkey;
cmd.sq_psn = params->sq_psn;
cmd.sq_drained_async_notify = params->sq_drained_async_notify;
+ cmd.rnr_retry = params->rnr_retry;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&cmd,
@@ -121,6 +122,7 @@ int efa_com_query_qp(struct efa_com_dev *edev,
result->qkey = resp.qkey;
result->sq_draining = resp.sq_draining;
result->sq_psn = resp.sq_psn;
+ result->rnr_retry = resp.rnr_retry;
return 0;
}
@@ -750,11 +752,27 @@ int efa_com_get_stats(struct efa_com_dev *edev,
return err;
}
- result->basic_stats.tx_bytes = resp.basic_stats.tx_bytes;
- result->basic_stats.tx_pkts = resp.basic_stats.tx_pkts;
- result->basic_stats.rx_bytes = resp.basic_stats.rx_bytes;
- result->basic_stats.rx_pkts = resp.basic_stats.rx_pkts;
- result->basic_stats.rx_drops = resp.basic_stats.rx_drops;
+ switch (cmd.type) {
+ case EFA_ADMIN_GET_STATS_TYPE_BASIC:
+ result->basic_stats.tx_bytes = resp.u.basic_stats.tx_bytes;
+ result->basic_stats.tx_pkts = resp.u.basic_stats.tx_pkts;
+ result->basic_stats.rx_bytes = resp.u.basic_stats.rx_bytes;
+ result->basic_stats.rx_pkts = resp.u.basic_stats.rx_pkts;
+ result->basic_stats.rx_drops = resp.u.basic_stats.rx_drops;
+ break;
+ case EFA_ADMIN_GET_STATS_TYPE_MESSAGES:
+ result->messages_stats.send_bytes = resp.u.messages_stats.send_bytes;
+ result->messages_stats.send_wrs = resp.u.messages_stats.send_wrs;
+ result->messages_stats.recv_bytes = resp.u.messages_stats.recv_bytes;
+ result->messages_stats.recv_wrs = resp.u.messages_stats.recv_wrs;
+ break;
+ case EFA_ADMIN_GET_STATS_TYPE_RDMA_READ:
+ result->rdma_read_stats.read_wrs = resp.u.rdma_read_stats.read_wrs;
+ result->rdma_read_stats.read_bytes = resp.u.rdma_read_stats.read_bytes;
+ result->rdma_read_stats.read_wr_err = resp.u.rdma_read_stats.read_wr_err;
+ result->rdma_read_stats.read_resp_bytes = resp.u.rdma_read_stats.read_resp_bytes;
+ break;
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 190bac23f585..eea4ebfbe6ec 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -47,6 +47,7 @@ struct efa_com_modify_qp_params {
u32 qkey;
u32 sq_psn;
u8 sq_drained_async_notify;
+ u8 rnr_retry;
};
struct efa_com_query_qp_params {
@@ -58,6 +59,7 @@ struct efa_com_query_qp_result {
u32 qkey;
u32 sq_draining;
u32 sq_psn;
+ u8 rnr_retry;
};
struct efa_com_destroy_qp_params {
@@ -238,8 +240,24 @@ struct efa_com_basic_stats {
u64 rx_drops;
};
+struct efa_com_messages_stats {
+ u64 send_bytes;
+ u64 send_wrs;
+ u64 recv_bytes;
+ u64 recv_wrs;
+};
+
+struct efa_com_rdma_read_stats {
+ u64 read_wrs;
+ u64 read_bytes;
+ u64 read_wr_err;
+ u64 read_resp_bytes;
+};
+
union efa_com_get_stats_result {
struct efa_com_basic_stats basic_stats;
+ struct efa_com_messages_stats messages_stats;
+ struct efa_com_rdma_read_stats rdma_read_stats;
};
void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 92d701146320..6faed3a81e08 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -331,7 +331,7 @@ static int efa_ib_device_add(struct efa_dev *dev)
ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
- err = ib_register_device(&dev->ibdev, "efa_%d");
+ err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
if (err)
goto err_release_doorbell_bar;
@@ -418,7 +418,7 @@ static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
err);
return err;
}
-
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 9e201f169289..191e0843f090 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -4,6 +4,7 @@
*/
#include <linux/vmalloc.h>
+#include <linux/log2.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
@@ -35,6 +36,14 @@ struct efa_user_mmap_entry {
op(EFA_RX_BYTES, "rx_bytes") \
op(EFA_RX_PKTS, "rx_pkts") \
op(EFA_RX_DROPS, "rx_drops") \
+ op(EFA_SEND_BYTES, "send_bytes") \
+ op(EFA_SEND_WRS, "send_wrs") \
+ op(EFA_RECV_BYTES, "recv_bytes") \
+ op(EFA_RECV_WRS, "recv_wrs") \
+ op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
+ op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
+ op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
+ op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
op(EFA_COMPLETED_CMDS, "completed_cmds") \
op(EFA_CMDS_ERR, "cmds_err") \
@@ -142,10 +151,9 @@ to_emmap(struct rdma_user_mmap_entry *rdma_entry)
return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
}
-static inline bool is_rdma_read_cap(struct efa_dev *dev)
-{
- return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
-}
+#define EFA_DEV_CAP(dev, cap) \
+ ((dev)->dev_attr.device_caps & \
+ EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
#define is_reserved_cleared(reserved) \
!memchr_inv(reserved, 0, sizeof(reserved))
@@ -221,9 +229,12 @@ int efa_query_device(struct ib_device *ibdev,
resp.max_rq_wr = dev_attr->max_rq_depth;
resp.max_rdma_size = dev_attr->max_rdma_size;
- if (is_rdma_read_cap(dev))
+ if (EFA_DEV_CAP(dev, RDMA_READ))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
+ if (EFA_DEV_CAP(dev, RNR_RETRY))
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
+
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
if (err) {
@@ -269,7 +280,7 @@ int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
#define EFA_QUERY_QP_SUPP_MASK \
(IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
- IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP)
+ IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
ibdev_dbg(&dev->ibdev,
@@ -291,6 +302,7 @@ int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->sq_psn = result.sq_psn;
qp_attr->sq_draining = result.sq_draining;
qp_attr->port_num = 1;
+ qp_attr->rnr_retry = result.rnr_retry;
qp_attr->cap.max_send_wr = qp->max_send_wr;
qp_attr->cap.max_recv_wr = qp->max_recv_wr;
@@ -376,17 +388,18 @@ int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
err_dealloc_pd:
efa_pd_dealloc(dev, result.pdn);
err_out:
- atomic64_inc(&dev->stats.sw_stats.alloc_pd_err);
+ atomic64_inc(&dev->stats.alloc_pd_err);
return err;
}
-void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibpd->device);
struct efa_pd *pd = to_epd(ibpd);
ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
efa_pd_dealloc(dev, pd->pdn);
+ return 0;
}
static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
@@ -737,18 +750,130 @@ err_free_mapped:
err_free_qp:
kfree(qp);
err_out:
- atomic64_inc(&dev->stats.sw_stats.create_qp_err);
+ atomic64_inc(&dev->stats.create_qp_err);
return ERR_PTR(err);
}
+static const struct {
+ int valid;
+ enum ib_qp_attr_mask req_param;
+ enum ib_qp_attr_mask opt_param;
+} srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_INIT] = {
+ .valid = 1,
+ .req_param = IB_QP_PKEY_INDEX |
+ IB_QP_PORT |
+ IB_QP_QKEY,
+ },
+ },
+ [IB_QPS_INIT] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ [IB_QPS_INIT] = {
+ .valid = 1,
+ .opt_param = IB_QP_PKEY_INDEX |
+ IB_QP_PORT |
+ IB_QP_QKEY,
+ },
+ [IB_QPS_RTR] = {
+ .valid = 1,
+ .opt_param = IB_QP_PKEY_INDEX |
+ IB_QP_QKEY,
+ },
+ },
+ [IB_QPS_RTR] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ [IB_QPS_RTS] = {
+ .valid = 1,
+ .req_param = IB_QP_SQ_PSN,
+ .opt_param = IB_QP_CUR_STATE |
+ IB_QP_QKEY |
+ IB_QP_RNR_RETRY,
+
+ }
+ },
+ [IB_QPS_RTS] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ [IB_QPS_RTS] = {
+ .valid = 1,
+ .opt_param = IB_QP_CUR_STATE |
+ IB_QP_QKEY,
+ },
+ [IB_QPS_SQD] = {
+ .valid = 1,
+ .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
+ },
+ },
+ [IB_QPS_SQD] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ [IB_QPS_RTS] = {
+ .valid = 1,
+ .opt_param = IB_QP_CUR_STATE |
+ IB_QP_QKEY,
+ },
+ [IB_QPS_SQD] = {
+ .valid = 1,
+ .opt_param = IB_QP_PKEY_INDEX |
+ IB_QP_QKEY,
+ }
+ },
+ [IB_QPS_SQE] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ [IB_QPS_RTS] = {
+ .valid = 1,
+ .opt_param = IB_QP_CUR_STATE |
+ IB_QP_QKEY,
+ }
+ },
+ [IB_QPS_ERR] = {
+ [IB_QPS_RESET] = { .valid = 1 },
+ [IB_QPS_ERR] = { .valid = 1 },
+ }
+};
+
+static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
+ enum ib_qp_state next_state,
+ enum ib_qp_attr_mask mask)
+{
+ enum ib_qp_attr_mask req_param, opt_param;
+
+ if (mask & IB_QP_CUR_STATE &&
+ cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
+ cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
+ return false;
+
+ if (!srd_qp_state_table[cur_state][next_state].valid)
+ return false;
+
+ req_param = srd_qp_state_table[cur_state][next_state].req_param;
+ opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
+
+ if ((mask & req_param) != req_param)
+ return false;
+
+ if (mask & ~(req_param | opt_param | IB_QP_STATE))
+ return false;
+
+ return true;
+}
+
static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
struct ib_qp_attr *qp_attr, int qp_attr_mask,
enum ib_qp_state cur_state,
enum ib_qp_state new_state)
{
+ int err;
+
#define EFA_MODIFY_QP_SUPP_MASK \
(IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
- IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN)
+ IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
+ IB_QP_RNR_RETRY)
if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
ibdev_dbg(&dev->ibdev,
@@ -757,8 +882,14 @@ static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
return -EOPNOTSUPP;
}
- if (!ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
- qp_attr_mask)) {
+ if (qp->ibqp.qp_type == IB_QPT_DRIVER)
+ err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
+ qp_attr_mask);
+ else
+ err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
+ qp_attr_mask);
+
+ if (err) {
ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
return -EINVAL;
}
@@ -805,28 +936,36 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
params.qp_handle = qp->qp_handle;
if (qp_attr_mask & IB_QP_STATE) {
- params.modify_mask |= BIT(EFA_ADMIN_QP_STATE_BIT) |
- BIT(EFA_ADMIN_CUR_QP_STATE_BIT);
+ EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
+ 1);
+ EFA_SET(&params.modify_mask,
+ EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
params.cur_qp_state = qp_attr->cur_qp_state;
params.qp_state = qp_attr->qp_state;
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
- params.modify_mask |=
- BIT(EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT);
+ EFA_SET(&params.modify_mask,
+ EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
}
if (qp_attr_mask & IB_QP_QKEY) {
- params.modify_mask |= BIT(EFA_ADMIN_QKEY_BIT);
+ EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
params.qkey = qp_attr->qkey;
}
if (qp_attr_mask & IB_QP_SQ_PSN) {
- params.modify_mask |= BIT(EFA_ADMIN_SQ_PSN_BIT);
+ EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
params.sq_psn = qp_attr->sq_psn;
}
+ if (qp_attr_mask & IB_QP_RNR_RETRY) {
+ EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
+ 1);
+ params.rnr_retry = qp_attr->rnr_retry;
+ }
+
err = efa_com_modify_qp(&dev->edev, &params);
if (err)
return err;
@@ -843,7 +982,7 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
return efa_com_destroy_cq(&dev->edev, &params);
}
-void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibcq->device);
struct efa_cq *cq = to_ecq(ibcq);
@@ -856,6 +995,7 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
efa_destroy_cq_idx(dev, cq->cq_idx);
efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
+ return 0;
}
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
@@ -996,7 +1136,7 @@ err_free_mapped:
DMA_FROM_DEVICE);
err_out:
- atomic64_inc(&dev->stats.sw_stats.create_cq_err);
+ atomic64_inc(&dev->stats.create_cq_err);
return err;
}
@@ -1013,8 +1153,7 @@ static int umem_to_page_list(struct efa_dev *dev,
ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
hp_cnt, pages_in_hp);
- rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
- BIT(hp_shift))
+ rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
return 0;
@@ -1026,7 +1165,7 @@ static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
struct page *pg;
int i;
- sglist = kcalloc(page_cnt, sizeof(*sglist), GFP_KERNEL);
+ sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL);
if (!sglist)
return NULL;
sg_init_table(sglist, page_cnt);
@@ -1370,7 +1509,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
supp_access_flags =
IB_ACCESS_LOCAL_WRITE |
- (is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
+ (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0);
access_flags &= ~IB_ACCESS_OPTIONAL;
if (access_flags & ~supp_access_flags) {
@@ -1410,9 +1549,8 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_unmap;
}
- params.page_shift = __ffs(pg_sz);
- params.page_num = DIV_ROUND_UP(length + (start & (pg_sz - 1)),
- pg_sz);
+ params.page_shift = order_base_2(pg_sz);
+ params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
ibdev_dbg(&dev->ibdev,
"start %#llx length %#llx params.page_shift %u params.page_num %u\n",
@@ -1451,7 +1589,7 @@ err_unmap:
err_free:
kfree(mr);
err_out:
- atomic64_inc(&dev->stats.sw_stats.reg_mr_err);
+ atomic64_inc(&dev->stats.reg_mr_err);
return ERR_PTR(err);
}
@@ -1569,19 +1707,17 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
resp.max_tx_batch = dev->dev_attr.max_tx_batch;
resp.min_sq_wr = dev->dev_attr.min_sq_depth;
- if (udata && udata->outlen) {
- err = ib_copy_to_udata(udata, &resp,
- min(sizeof(resp), udata->outlen));
- if (err)
- goto err_dealloc_uar;
- }
+ err = ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen));
+ if (err)
+ goto err_dealloc_uar;
return 0;
err_dealloc_uar:
efa_dealloc_uar(dev, result.uarn);
err_out:
- atomic64_inc(&dev->stats.sw_stats.alloc_ucontext_err);
+ atomic64_inc(&dev->stats.alloc_ucontext_err);
return err;
}
@@ -1614,7 +1750,7 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
ibdev_dbg(&dev->ibdev,
"pgoff[%#lx] does not have valid entry\n",
vma->vm_pgoff);
- atomic64_inc(&dev->stats.sw_stats.mmap_err);
+ atomic64_inc(&dev->stats.mmap_err);
return -EINVAL;
}
entry = to_emmap(rdma_entry);
@@ -1656,7 +1792,7 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
"Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
entry->address, rdma_entry->npages * PAGE_SIZE,
entry->mmap_flag, err);
- atomic64_inc(&dev->stats.sw_stats.mmap_err);
+ atomic64_inc(&dev->stats.mmap_err);
}
rdma_user_mmap_entry_put(rdma_entry);
@@ -1741,11 +1877,11 @@ int efa_create_ah(struct ib_ah *ibah,
err_destroy_ah:
efa_ah_destroy(dev, ah);
err_out:
- atomic64_inc(&dev->stats.sw_stats.create_ah_err);
+ atomic64_inc(&dev->stats.create_ah_err);
return err;
}
-void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
+int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct efa_dev *dev = to_edev(ibah->pd->device);
struct efa_ah *ah = to_eah(ibah);
@@ -1755,10 +1891,11 @@ void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
ibdev_dbg(&dev->ibdev,
"Destroy address handle is not supported in atomic context\n");
- return;
+ return -EOPNOTSUPP;
}
efa_ah_destroy(dev, ah);
+ return 0;
}
struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
@@ -1774,13 +1911,15 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
struct efa_com_get_stats_params params = {};
union efa_com_get_stats_result result;
struct efa_dev *dev = to_edev(ibdev);
+ struct efa_com_rdma_read_stats *rrs;
+ struct efa_com_messages_stats *ms;
struct efa_com_basic_stats *bs;
struct efa_com_stats_admin *as;
struct efa_stats *s;
int err;
- params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
+ params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
err = efa_com_get_stats(&dev->edev, &params, &result);
if (err)
@@ -1793,6 +1932,28 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
stats->value[EFA_RX_PKTS] = bs->rx_pkts;
stats->value[EFA_RX_DROPS] = bs->rx_drops;
+ params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ ms = &result.messages_stats;
+ stats->value[EFA_SEND_BYTES] = ms->send_bytes;
+ stats->value[EFA_SEND_WRS] = ms->send_wrs;
+ stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
+ stats->value[EFA_RECV_WRS] = ms->recv_wrs;
+
+ params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ rrs = &result.rdma_read_stats;
+ stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
+ stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
+ stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
+ stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
+
as = &dev->edev.aq.stats;
stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
@@ -1801,13 +1962,14 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
s = &dev->stats;
stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
- stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
- stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
- stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->sw_stats.create_cq_err);
- stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
- stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
- stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
- stats->value[EFA_MMAP_ERR] = atomic64_read(&s->sw_stats.mmap_err);
+ stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
+ stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
+ stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
+ stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
+ stats->value[EFA_ALLOC_UCONTEXT_ERR] =
+ atomic64_read(&s->alloc_ucontext_err);
+ stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
+ stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
return ARRAY_SIZE(efa_stats_names);
}
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index 014351ebbefa..9f71b9d706bd 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -97,41 +97,9 @@ static void hfi1_ipoib_dev_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *storage)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
- u64 rx_packets = 0ull;
- u64 rx_bytes = 0ull;
- u64 tx_packets = 0ull;
- u64 tx_bytes = 0ull;
- int i;
netdev_stats_to_stats64(storage, &dev->stats);
-
- for_each_possible_cpu(i) {
- const struct pcpu_sw_netstats *stats;
- unsigned int start;
- u64 trx_packets;
- u64 trx_bytes;
- u64 ttx_packets;
- u64 ttx_bytes;
-
- stats = per_cpu_ptr(priv->netstats, i);
- do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- trx_packets = stats->rx_packets;
- trx_bytes = stats->rx_bytes;
- ttx_packets = stats->tx_packets;
- ttx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-
- rx_packets += trx_packets;
- rx_bytes += trx_bytes;
- tx_packets += ttx_packets;
- tx_bytes += ttx_bytes;
- }
-
- storage->rx_packets += rx_packets;
- storage->rx_bytes += rx_bytes;
- storage->tx_packets += tx_packets;
- storage->tx_bytes += tx_bytes;
+ dev_fetch_sw_netstats(storage, priv->netstats);
}
static const struct net_device_ops hfi1_ipoib_netdev_ops = {
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 04575c9afd61..a307d4c8b15a 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -232,11 +232,11 @@ static const struct sdma_set_state_action sdma_action_table[] = {
static void sdma_complete(struct kref *);
static void sdma_finalput(struct sdma_state *);
static void sdma_get(struct sdma_state *);
-static void sdma_hw_clean_up_task(unsigned long);
+static void sdma_hw_clean_up_task(struct tasklet_struct *);
static void sdma_put(struct sdma_state *);
static void sdma_set_state(struct sdma_engine *, enum sdma_states);
static void sdma_start_hw_clean_up(struct sdma_engine *);
-static void sdma_sw_clean_up_task(unsigned long);
+static void sdma_sw_clean_up_task(struct tasklet_struct *);
static void sdma_sendctrl(struct sdma_engine *, unsigned);
static void init_sdma_regs(struct sdma_engine *, u32, uint);
static void sdma_process_event(
@@ -545,9 +545,10 @@ static void sdma_err_progress_check(struct timer_list *t)
schedule_work(&sde->err_halt_worker);
}
-static void sdma_hw_clean_up_task(unsigned long opaque)
+static void sdma_hw_clean_up_task(struct tasklet_struct *t)
{
- struct sdma_engine *sde = (struct sdma_engine *)opaque;
+ struct sdma_engine *sde = from_tasklet(sde, t,
+ sdma_hw_clean_up_task);
u64 statuscsr;
while (1) {
@@ -604,9 +605,9 @@ static void sdma_flush_descq(struct sdma_engine *sde)
sdma_desc_avail(sde, sdma_descq_freecnt(sde));
}
-static void sdma_sw_clean_up_task(unsigned long opaque)
+static void sdma_sw_clean_up_task(struct tasklet_struct *t)
{
- struct sdma_engine *sde = (struct sdma_engine *)opaque;
+ struct sdma_engine *sde = from_tasklet(sde, t, sdma_sw_clean_up_task);
unsigned long flags;
spin_lock_irqsave(&sde->tail_lock, flags);
@@ -1454,11 +1455,10 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
sde->tail_csr =
get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
- tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
- (unsigned long)sde);
-
- tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
- (unsigned long)sde);
+ tasklet_setup(&sde->sdma_hw_clean_up_task,
+ sdma_hw_clean_up_task);
+ tasklet_setup(&sde->sdma_sw_clean_up_task,
+ sdma_sw_clean_up_task);
INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
INIT_WORK(&sde->flush_worker, sdma_field_flush);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 30865635b449..3591923abebb 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1424,7 +1424,7 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
/* see rate_show() in ib core/sysfs.c */
- props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
+ props->active_speed = opa_speed_to_ib(ppd->link_speed_active);
props->max_vl_num = ppd->vls_supported;
/* Once we are a "first class" citizen and have added the OPA MTUs to
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 5b2f9314edd3..75b06db60f7c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -39,6 +39,22 @@
#define HNS_ROCE_VLAN_SL_BIT_MASK 7
#define HNS_ROCE_VLAN_SL_SHIFT 13
+static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
+{
+ u32 fl = ah_attr->grh.flow_label;
+ u16 sport;
+
+ if (!fl)
+ sport = get_random_u32() %
+ (IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 -
+ IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) +
+ IB_ROCE_UDP_ENCAP_VALID_PORT_MIN;
+ else
+ sport = rdma_flow_label_to_udp_sport(fl);
+
+ return sport;
+}
+
int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
@@ -79,6 +95,8 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
ah->av.sl = rdma_ah_get_sl(ah_attr);
+ ah->av.flowlabel = grh->flow_label;
+ ah->av.udp_sport = get_ah_udp_sport(ah_attr);
return 0;
}
@@ -98,8 +116,3 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
return 0;
}
-
-void hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
-{
- return;
-}
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index a522cb2d29ea..a6b23dec1adc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -268,8 +268,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
}
/* convert system page cnt to hw page cnt */
- rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
- 1 << page_shift) {
+ rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
addr = rdma_block_iter_dma_address(&biter);
if (idx >= start) {
bufs[total++] = addr;
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index e87d616f7988..809b22aa5056 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -150,7 +150,7 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
int err;
buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
- buf_attr.region[0].size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
+ buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
buf_attr.region_count = 1;
buf_attr.fixed_page = true;
@@ -224,6 +224,21 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
}
}
+static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
+ struct hns_roce_ib_create_cq *ucmd)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+
+ if (udata) {
+ if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size))
+ hr_cq->cqe_size = ucmd->cqe_size;
+ else
+ hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
+ } else {
+ hr_cq->cqe_size = hr_dev->caps.cqe_sz;
+ }
+}
+
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
@@ -258,7 +273,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
INIT_LIST_HEAD(&hr_cq->rq_list);
if (udata) {
- ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
+ ret = ib_copy_from_udata(&ucmd, udata,
+ min(sizeof(ucmd), udata->inlen));
if (ret) {
ibdev_err(ibdev, "Failed to copy CQ udata, err %d\n",
ret);
@@ -266,6 +282,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
}
}
+ set_cqe_size(hr_cq, udata, &ucmd);
+
ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
if (ret) {
ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret);
@@ -287,7 +305,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
/*
* For the QP created by kernel space, tptr value should be initialized
* to zero; For the QP created by user space, it will cause synchronous
- * problems if tptr is set to zero here, so we initialze it in user
+ * problems if tptr is set to zero here, so we initialize it in user
* space.
*/
if (!udata && hr_cq->tptr_addr)
@@ -311,7 +329,7 @@ err_cq_buf:
return ret;
}
-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
@@ -322,6 +340,7 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
free_cq_buf(hr_dev, hr_cq);
free_cq_db(hr_dev, hr_cq, udata);
free_cqc(hr_dev, hr_cq);
+ return 0;
}
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 6edcbdcd8f43..6d2acff69f98 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -37,8 +37,8 @@
#define DRV_NAME "hns_roce"
-/* hip08 is a pci device */
#define PCI_REVISION_ID_HIP08 0x21
+#define PCI_REVISION_ID_HIP09 0x30
#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
@@ -57,7 +57,6 @@
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
-#define HNS_ROCE_MAX_SGE_NUM 2
#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
@@ -76,15 +75,18 @@
#define HNS_ROCE_CEQ 0
#define HNS_ROCE_AEQ 1
-#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
-#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
+#define HNS_ROCE_CEQE_SIZE 0x4
+#define HNS_ROCE_AEQE_SIZE 0x10
-#define HNS_ROCE_SL_SHIFT 28
-#define HNS_ROCE_TCLASS_SHIFT 20
-#define HNS_ROCE_FLOW_LABEL_MASK 0xfffff
+#define HNS_ROCE_V3_EQE_SIZE 0x40
+
+#define HNS_ROCE_V2_CQE_SIZE 32
+#define HNS_ROCE_V3_CQE_SIZE 64
+
+#define HNS_ROCE_V2_QPC_SZ 256
+#define HNS_ROCE_V3_QPC_SZ 512
#define HNS_ROCE_MAX_PORTS 6
-#define HNS_ROCE_MAX_GID_NUM 16
#define HNS_ROCE_GID_SIZE 16
#define HNS_ROCE_SGE_SIZE 16
@@ -112,8 +114,6 @@
#define PAGES_SHIFT_24 24
#define PAGES_SHIFT_32 32
-#define HNS_ROCE_PCI_BAR_NUM 2
-
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define SRQ_DB_REG 0x230
@@ -467,6 +467,7 @@ struct hns_roce_cq {
void __iomem *cq_db_l;
u16 *tptr_addr;
int arm_sn;
+ int cqe_size;
unsigned long cqn;
u32 vector;
atomic_t refcount;
@@ -535,17 +536,18 @@ struct hns_roce_raq_table {
};
struct hns_roce_av {
- u8 port;
- u8 gid_index;
- u8 stat_rate;
- u8 hop_limit;
- u32 flowlabel;
- u8 sl;
- u8 tclass;
- u8 dgid[HNS_ROCE_GID_SIZE];
- u8 mac[ETH_ALEN];
- u16 vlan_id;
- bool vlan_en;
+ u8 port;
+ u8 gid_index;
+ u8 stat_rate;
+ u8 hop_limit;
+ u32 flowlabel;
+ u16 udp_sport;
+ u8 sl;
+ u8 tclass;
+ u8 dgid[HNS_ROCE_GID_SIZE];
+ u8 mac[ETH_ALEN];
+ u16 vlan_id;
+ bool vlan_en;
};
struct hns_roce_ah {
@@ -655,6 +657,8 @@ struct hns_roce_qp {
struct hns_roce_sge sge;
u32 next_sge;
+ enum ib_mtu path_mtu;
+ u32 max_inline_data;
/* 0: flush needed, 1: unneeded */
unsigned long flush_flag;
@@ -678,7 +682,8 @@ enum {
};
struct hns_roce_ceqe {
- __le32 comp;
+ __le32 comp;
+ __le32 rsv[15];
};
struct hns_roce_aeqe {
@@ -715,6 +720,7 @@ struct hns_roce_aeqe {
u8 rsv0;
} __packed cmd;
} event;
+ __le32 rsv[12];
};
struct hns_roce_eq {
@@ -791,15 +797,15 @@ struct hns_roce_caps {
int num_pds;
int reserved_pds;
u32 mtt_entry_sz;
- u32 cq_entry_sz;
+ u32 cqe_sz;
u32 page_size_cap;
u32 reserved_lkey;
int mtpt_entry_sz;
- int qpc_entry_sz;
+ int qpc_sz;
int irrl_entry_sz;
int trrl_entry_sz;
int cqc_entry_sz;
- int sccc_entry_sz;
+ int sccc_sz;
int qpc_timer_entry_sz;
int cqc_timer_entry_sz;
int srqc_entry_sz;
@@ -809,6 +815,8 @@ struct hns_roce_caps {
u32 pbl_hop_num;
int aeqe_depth;
int ceqe_depth;
+ u32 aeqe_size;
+ u32 ceqe_size;
enum ib_mtu max_mtu;
u32 qpc_bt_num;
u32 qpc_timer_bt_num;
@@ -930,7 +938,7 @@ struct hns_roce_hw {
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
struct ib_udata *udata);
- void (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
+ int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
@@ -1178,10 +1186,13 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
-void hns_roce_destroy_ah(struct ib_ah *ah, u32 flags);
+static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
+{
+ return 0;
+}
int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@@ -1200,8 +1211,7 @@ int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
-struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
- struct ib_udata *udata);
+int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int hns_roce_dealloc_mw(struct ib_mw *ibmw);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
@@ -1220,7 +1230,7 @@ int hns_roce_create_srq(struct ib_srq *srq,
int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
-void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr,
@@ -1247,7 +1257,7 @@ int to_hr_qp_type(int qp_type);
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index c8db6f8ae018..7487cf3d2c37 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -338,8 +338,8 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
void __iomem *bt_cmd;
__le32 bt_cmd_val[2];
__le32 bt_cmd_h = 0;
- __le32 bt_cmd_l = 0;
- u64 bt_ba = 0;
+ __le32 bt_cmd_l;
+ u64 bt_ba;
int ret = 0;
/* Find the HEM(Hardware Entry Memory) entry */
@@ -1027,7 +1027,7 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.cqc_timer_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->cqc_timer_table);
- if (hr_dev->caps.sccc_entry_sz)
+ if (hr_dev->caps.sccc_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table);
if (hr_dev->caps.trrl_entry_sz)
@@ -1404,7 +1404,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
{
const struct hns_roce_buf_region *r;
int ofs, end;
- int ret = 0;
+ int ret;
int unit;
int i;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index aeb3a6fa7d47..5f4d8a32ed6d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -70,15 +70,15 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
struct hns_roce_qp *qp = to_hr_qp(ibqp);
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_sq_db sq_db = {};
- int ps_opcode = 0, i = 0;
+ int ps_opcode, i;
unsigned long flags = 0;
void *wqe = NULL;
__le32 doorbell[2];
- u32 wqe_idx = 0;
- int nreq = 0;
int ret = 0;
- u8 *smac;
int loopback;
+ u32 wqe_idx;
+ int nreq;
+ u8 *smac;
if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_RC)) {
@@ -271,7 +271,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
break;
case IB_WR_LOCAL_INV:
- break;
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
case IB_WR_LSO:
@@ -888,7 +887,7 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
u32 odb_ext_mod;
u32 sdb_evt_mod;
u32 odb_evt_mod;
- int ret = 0;
+ int ret;
memset(db, 0, sizeof(*db));
@@ -1148,8 +1147,8 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv = hr_dev->priv;
struct hns_roce_raq_table *raq = &priv->raq_table;
struct device *dev = &hr_dev->pdev->dev;
- int raq_shift = 0;
dma_addr_t addr;
+ int raq_shift;
__le32 tmp;
u32 val;
int ret;
@@ -1360,7 +1359,7 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv = hr_dev->priv;
struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct device *dev = &hr_dev->pdev->dev;
- int ret = 0;
+ int ret;
free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
if (!free_mr->free_mr_wq) {
@@ -1440,8 +1439,8 @@ static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
{
- int i = 0;
struct hns_roce_caps *caps = &hr_dev->caps;
+ int i;
hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
@@ -1471,12 +1470,12 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
- caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
+ caps->qpc_sz = HNS_ROCE_V1_QPC_SIZE;
caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
- caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
+ caps->cqe_sz = HNS_ROCE_V1_CQE_SIZE;
caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
caps->reserved_lkey = 0;
caps->reserved_pds = 0;
@@ -1643,7 +1642,7 @@ static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
unsigned long timeout)
{
u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
- unsigned long end = 0;
+ unsigned long end;
u32 status = 0;
end = msecs_to_jiffies(timeout) + jiffies;
@@ -1671,7 +1670,7 @@ static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
{
unsigned long flags;
u32 *p = NULL;
- u8 gid_idx = 0;
+ u8 gid_idx;
gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
@@ -1897,8 +1896,7 @@ static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(hr_cq->mtr.kmem,
- n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE);
}
static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
@@ -2445,7 +2443,7 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox;
struct device *dev = &hr_dev->pdev->dev;
- int ret = 0;
+ int ret;
if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
new_state >= HNS_ROCE_QP_NUM_STATE ||
@@ -3394,7 +3392,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_qp_context *context;
- int tmp_qp_state = 0;
+ int tmp_qp_state;
int ret = 0;
int state;
@@ -3572,7 +3570,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
return 0;
}
-static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
@@ -3603,6 +3601,7 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
}
wait_time++;
}
+ return 0;
}
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
@@ -3775,8 +3774,7 @@ static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
{
- unsigned long off = (entry & (eq->entries - 1)) *
- HNS_ROCE_AEQ_ENTRY_SIZE;
+ unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQE_SIZE;
return (struct hns_roce_aeqe *)((u8 *)
(eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
@@ -3881,8 +3879,7 @@ static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
{
- unsigned long off = (entry & (eq->entries - 1)) *
- HNS_ROCE_CEQ_ENTRY_SIZE;
+ unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQE_SIZE;
return (struct hns_roce_ceqe *)((u8 *)
(eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
@@ -3934,7 +3931,7 @@ static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
{
struct hns_roce_eq *eq = eq_ptr;
struct hns_roce_dev *hr_dev = eq->hr_dev;
- int int_work = 0;
+ int int_work;
if (eq->type_flag == HNS_ROCE_CEQ)
/* CEQ irq routine, CEQ is pulse irq, not clear */
@@ -4132,9 +4129,9 @@ static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
struct device *dev = &hr_dev->pdev->dev;
dma_addr_t tmp_dma_addr;
- u32 eqconsindx_val = 0;
u32 eqcuridx_val = 0;
- u32 eqshift_val = 0;
+ u32 eqconsindx_val;
+ u32 eqshift_val;
__le32 tmp2 = 0;
__le32 tmp1 = 0;
__le32 tmp = 0;
@@ -4253,7 +4250,7 @@ static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
CEQ_REG_OFFSET * i;
eq->entries = hr_dev->caps.ceqe_depth;
eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+ eq->eqe_size = HNS_ROCE_CEQE_SIZE;
} else {
/* AEQ */
eq_table->eqc_base[i] = hr_dev->reg_base +
@@ -4263,7 +4260,7 @@ static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
ROCEE_CAEP_AEQE_CONS_IDX_REG;
eq->entries = hr_dev->caps.aeqe_depth;
eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+ eq->eqe_size = HNS_ROCE_AEQE_SIZE;
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 52307b2c7100..ffd0156080f5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -68,13 +68,13 @@
#define HNS_ROCE_V1_COMP_EQE_NUM 0x8000
#define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400
-#define HNS_ROCE_V1_QPC_ENTRY_SIZE 256
+#define HNS_ROCE_V1_QPC_SIZE 256
#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8
#define HNS_ROCE_V1_CQC_ENTRY_SIZE 64
#define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64
#define HNS_ROCE_V1_MTT_ENTRY_SIZE 64
-#define HNS_ROCE_V1_CQE_ENTRY_SIZE 32
+#define HNS_ROCE_V1_CQE_SIZE 32
#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000
#define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 4cda95ed1fbe..6d30850696c5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -153,6 +153,67 @@ static void set_atomic_seg(const struct ib_send_wr *wr,
V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
}
+static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
+ const struct ib_send_wr *wr,
+ unsigned int *sge_idx, u32 msg_len)
+{
+ struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
+ unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
+ unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
+ unsigned int left_len_in_pg;
+ unsigned int idx = *sge_idx;
+ unsigned int i = 0;
+ unsigned int len;
+ void *addr;
+ void *dseg;
+
+ if (msg_len > ext_sge_sz) {
+ ibdev_err(ibdev,
+ "no enough extended sge space for inline data.\n");
+ return -EINVAL;
+ }
+
+ dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
+ left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
+ len = wr->sg_list[0].length;
+ addr = (void *)(unsigned long)(wr->sg_list[0].addr);
+
+ /* When copying data to extended sge space, the left length in page may
+ * not long enough for current user's sge. So the data should be
+ * splited into several parts, one in the first page, and the others in
+ * the subsequent pages.
+ */
+ while (1) {
+ if (len <= left_len_in_pg) {
+ memcpy(dseg, addr, len);
+
+ idx += len / dseg_len;
+
+ i++;
+ if (i >= wr->num_sge)
+ break;
+
+ left_len_in_pg -= len;
+ len = wr->sg_list[i].length;
+ addr = (void *)(unsigned long)(wr->sg_list[i].addr);
+ dseg += len;
+ } else {
+ memcpy(dseg, addr, left_len_in_pg);
+
+ len -= left_len_in_pg;
+ addr += left_len_in_pg;
+ idx += left_len_in_pg / dseg_len;
+ dseg = hns_roce_get_extend_sge(qp,
+ idx & (qp->sge.sge_cnt - 1));
+ left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
+ }
+ }
+
+ *sge_idx = idx;
+
+ return 0;
+}
+
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
unsigned int *sge_ind, unsigned int valid_num_sge)
{
@@ -177,73 +238,115 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
*sge_ind = idx;
}
+static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ int mtu = ib_mtu_enum_to_int(qp->path_mtu);
+
+ if (len > qp->max_inline_data || len > mtu) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
+ len, qp->max_inline_data, mtu);
+ return false;
+ }
+
+ return true;
+}
+
+static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ unsigned int *sge_idx)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
+ u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned int curr_idx = *sge_idx;
+ void *dseg = rc_sq_wqe;
+ unsigned int i;
+ int ret;
+
+ if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
+ ibdev_err(ibdev, "invalid inline parameters!\n");
+ return -EINVAL;
+ }
+
+ if (!check_inl_data_len(qp, msg_len))
+ return -EINVAL;
+
+ dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
+
+ if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
+ roce_set_bit(rc_sq_wqe->byte_20,
+ V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(dseg, ((void *)wr->sg_list[i].addr),
+ wr->sg_list[i].length);
+ dseg += wr->sg_list[i].length;
+ }
+ } else {
+ roce_set_bit(rc_sq_wqe->byte_20,
+ V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1);
+
+ ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
+ if (ret)
+ return ret;
+
+ roce_set_field(rc_sq_wqe->byte_16,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
+ curr_idx - *sge_idx);
+ }
+
+ *sge_idx = curr_idx;
+
+ return 0;
+}
+
static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
unsigned int *sge_ind,
unsigned int valid_num_sge)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_v2_wqe_data_seg *dseg =
(void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
- struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
- void *wqe = dseg;
int j = 0;
int i;
- if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
- if (unlikely(le32_to_cpu(rc_sq_wqe->msg_len) >
- hr_dev->caps.max_sq_inline)) {
- ibdev_err(ibdev, "inline len(1-%d)=%d, illegal",
- rc_sq_wqe->msg_len,
- hr_dev->caps.max_sq_inline);
- return -EINVAL;
- }
+ roce_set_field(rc_sq_wqe->byte_20,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
- if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
- ibdev_err(ibdev, "Not support inline data!\n");
- return -EINVAL;
- }
+ if (wr->send_flags & IB_SEND_INLINE)
+ return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
+ if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
for (i = 0; i < wr->num_sge; i++) {
- memcpy(wqe, ((void *)wr->sg_list[i].addr),
- wr->sg_list[i].length);
- wqe += wr->sg_list[i].length;
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
}
-
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
- 1);
} else {
- if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
- for (i = 0; i < wr->num_sge; i++) {
- if (likely(wr->sg_list[i].length)) {
- set_data_seg_v2(dseg, wr->sg_list + i);
- dseg++;
- }
+ for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ j++;
}
- } else {
- roce_set_field(rc_sq_wqe->byte_20,
- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
- (*sge_ind) & (qp->sge.sge_cnt - 1));
-
- for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE;
- i++) {
- if (likely(wr->sg_list[i].length)) {
- set_data_seg_v2(dseg, wr->sg_list + i);
- dseg++;
- j++;
- }
- }
-
- set_extend_sge(qp, wr, sge_ind, valid_num_sge);
}
- roce_set_field(rc_sq_wqe->byte_16,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
+ set_extend_sge(qp, wr, sge_ind, valid_num_sge);
}
+ roce_set_field(rc_sq_wqe->byte_16,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
+
return 0;
}
@@ -292,6 +395,33 @@ static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
return valid_num;
}
+static __le32 get_immtdata(const struct ib_send_wr *wr)
+{
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
+ default:
+ return 0;
+ }
+}
+
+static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
+ const struct ib_send_wr *wr)
+{
+ u32 ib_op = wr->opcode;
+
+ if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
+ return -EINVAL;
+
+ ud_sq_wqe->immtdata = get_immtdata(wr);
+
+ roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
+
+ return 0;
+}
+
static inline int set_ud_wqe(struct hns_roce_qp *qp,
const struct ib_send_wr *wr,
void *wqe, unsigned int *sge_idx,
@@ -305,10 +435,15 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
u32 msg_len = 0;
bool loopback;
u8 *smac;
+ int ret;
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
+ ret = set_ud_opcode(ud_sq_wqe, wr);
+ if (WARN_ON(ret))
+ return ret;
+
roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
@@ -329,23 +464,8 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
roce_set_bit(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
- roce_set_field(ud_sq_wqe->byte_4,
- V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
- V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND);
-
ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ud_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
- break;
- default:
- ud_sq_wqe->immtdata = 0;
- break;
- }
-
/* Set sig attr */
roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
(wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
@@ -369,7 +489,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
curr_idx & (qp->sge.sge_cnt - 1));
roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
- V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport);
ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
qp->qkey : ud_wr(wr)->remote_qkey);
roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
@@ -402,6 +522,46 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
return 0;
}
+static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ const struct ib_send_wr *wr)
+{
+ u32 ib_op = wr->opcode;
+
+ rc_sq_wqe->immtdata = get_immtdata(wr);
+
+ switch (ib_op) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
+ break;
+ case IB_WR_REG_MR:
+ set_frmr_seg(rc_sq_wqe, reg_wr(wr));
+ break;
+ case IB_WR_LOCAL_INV:
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
+ fallthrough;
+ case IB_WR_SEND_WITH_INV:
+ rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
+
+ return 0;
+}
static inline int set_rc_wqe(struct hns_roce_qp *qp,
const struct ib_send_wr *wr,
void *wqe, unsigned int *sge_idx,
@@ -411,25 +571,16 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
unsigned int curr_idx = *sge_idx;
unsigned int valid_num_sge;
u32 msg_len = 0;
- int ret = 0;
+ int ret;
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- rc_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
- break;
- case IB_WR_SEND_WITH_INV:
- rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
- break;
- default:
- rc_sq_wqe->immtdata = 0;
- break;
- }
+ ret = set_rc_opcode(rc_sq_wqe, wr);
+ if (WARN_ON(ret))
+ return ret;
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
(wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
@@ -443,33 +594,6 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
owner_bit);
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_LOCAL_INV:
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
- rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
- break;
- case IB_WR_REG_MR:
- set_frmr_seg(rc_sq_wqe, reg_wr(wr));
- break;
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
- break;
- default:
- break;
- }
-
- roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- to_hr_opcode(wr->opcode));
-
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
@@ -1682,7 +1806,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
- caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
+ caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
@@ -1690,7 +1814,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
- caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
+ caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
caps->reserved_lkey = 0;
caps->reserved_pds = 0;
@@ -1739,6 +1863,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
+ caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
+ caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
@@ -1760,19 +1886,26 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->cqc_timer_buf_pg_sz = 0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
+ caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
caps->sccc_ba_pg_sz = 0;
caps->sccc_buf_pg_sz = 0;
caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
+ caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
+ caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
+ caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
+ }
}
static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
int *buf_page_size, int *bt_page_size, u32 hem_type)
{
u64 obj_per_chunk;
- int bt_chunk_size = 1 << PAGE_SHIFT;
- int buf_chunk_size = 1 << PAGE_SHIFT;
- int obj_per_chunk_default = buf_chunk_size / obj_size;
+ u64 bt_chunk_size = PAGE_SIZE;
+ u64 buf_chunk_size = PAGE_SIZE;
+ u64 obj_per_chunk_default = buf_chunk_size / obj_size;
*buf_page_size = 0;
*bt_page_size = 0;
@@ -1855,7 +1988,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
- caps->cq_entry_sz = resp_a->cq_entry_sz;
+ caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
caps->irrl_entry_sz = resp_b->irrl_entry_sz;
@@ -1863,9 +1996,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->cqc_entry_sz = resp_b->cqc_entry_sz;
caps->srqc_entry_sz = resp_b->srqc_entry_sz;
caps->idx_entry_sz = resp_b->idx_entry_sz;
- caps->sccc_entry_sz = resp_b->scc_ctx_entry_sz;
+ caps->sccc_sz = resp_b->sccc_sz;
caps->max_mtu = resp_b->max_mtu;
- caps->qpc_entry_sz = le16_to_cpu(resp_b->qpc_entry_sz);
+ caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
caps->min_cqes = resp_b->min_cqes;
caps->min_wqes = resp_b->min_wqes;
caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
@@ -1958,6 +2091,8 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
+ caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
+ caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
caps->mtt_ba_pg_sz = 0;
caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
@@ -1981,7 +2116,15 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
- calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num,
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
+ caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
+ caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
+ caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
+ caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
+ }
+
+ calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
HEM_TYPE_QPC);
calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
@@ -1998,7 +2141,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
- calc_pg_sz(caps->num_qps, caps->sccc_entry_sz,
+ calc_pg_sz(caps->num_qps, caps->sccc_sz,
caps->sccc_hop_num, caps->sccc_bt_num,
&caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
HEM_TYPE_SCCC);
@@ -2018,6 +2161,56 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
return 0;
}
+static int hns_roce_config_qpc_size(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cfg_entry_size *cfg_size =
+ (struct hns_roce_cfg_entry_size *)desc.data;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
+ false);
+
+ cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_QPC_SIZE);
+ cfg_size->size = cpu_to_le32(hr_dev->caps.qpc_sz);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int hns_roce_config_sccc_size(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc;
+ struct hns_roce_cfg_entry_size *cfg_size =
+ (struct hns_roce_cfg_entry_size *)desc.data;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
+ false);
+
+ cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_SCCC_SIZE);
+ cfg_size->size = cpu_to_le32(hr_dev->caps.sccc_sz);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
+{
+ int ret;
+
+ if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
+ return 0;
+
+ ret = hns_roce_config_qpc_size(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hns_roce_config_sccc_size(hr_dev);
+ if (ret)
+ dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
+
+ return ret;
+}
+
static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
{
struct hns_roce_caps *caps = &hr_dev->caps;
@@ -2090,9 +2283,14 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
}
ret = hns_roce_v2_set_bt(hr_dev);
- if (ret)
- dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
- ret);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Configure bt attribute fail, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Configure the size of QPC, SCCC, etc. */
+ ret = hns_roce_config_entry_size(hr_dev);
return ret;
}
@@ -2757,8 +2955,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(hr_cq->mtr.kmem,
- n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
}
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
@@ -2858,6 +3055,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
+ roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQE_SIZE_M,
+ V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size ==
+ HNS_ROCE_V3_CQE_SIZE ? 1 : 0);
+
cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
roce_set_field(cq_context->byte_16_hop_addr,
@@ -3025,7 +3226,8 @@ out:
}
static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
- struct hns_roce_v2_cqe *cqe, struct ib_wc *wc)
+ struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
+ struct ib_wc *wc)
{
static const struct {
u32 cqe_status;
@@ -3066,7 +3268,7 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
- sizeof(*cqe), false);
+ cq->cqe_size, false);
/*
* For hns ROCEE, GENERAL_ERR is an error type that is not defined in
@@ -3163,7 +3365,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
++wq->tail;
}
- get_cqe_status(hr_dev, *cur_qp, cqe, wc);
+ get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc);
if (unlikely(wc->status != IB_WC_SUCCESS))
return 0;
@@ -3514,16 +3716,21 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask,
struct hns_roce_qp *hr_qp)
{
struct hns_roce_cmd_mailbox *mailbox;
+ int qpc_size;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- memcpy(mailbox->buf, context, sizeof(*context) * 2);
+ /* The qpc size of HIP08 is only 256B, which is half of HIP09 */
+ qpc_size = hr_dev->caps.qpc_sz;
+ memcpy(mailbox->buf, context, qpc_size);
+ memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
HNS_ROCE_CMD_MODIFY_QPC,
@@ -3641,9 +3848,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_76_SRQ_EN_S, 1);
}
- roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
- V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
-
roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
hr_qp->access_flags = attr->qp_access_flags;
@@ -3954,6 +4158,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
enum ib_mtu mtu;
+ u8 lp_pktn_ini;
u8 port_num;
u64 *mtts;
u8 *dmac;
@@ -4052,6 +4257,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_52_DMAC_S, 0);
mtu = get_mtu(ibqp, attr);
+ hr_qp->path_mtu = mtu;
if (attr_mask & IB_QP_PATH_MTU) {
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
@@ -4061,13 +4267,21 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
}
#define MAX_LP_MSG_LEN 65536
- /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
+ /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
+ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu));
+
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
- V2_QPC_BYTE_56_LP_PKTN_INI_S,
- ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
+ V2_QPC_BYTE_56_LP_PKTN_INI_S, lp_pktn_ini);
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
+ /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
+ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, lp_pktn_ini);
+ roce_set_field(qpc_mask->byte_172_sq_psn,
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
+
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
@@ -4164,6 +4378,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return 0;
}
+static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
+{
+ if (!fl)
+ fl = rdma_calc_flow_label(lqpn, rqpn);
+
+ return rdma_flow_label_to_udp_sport(fl);
+}
+
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4227,7 +4449,8 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
V2_QPC_BYTE_52_UDPSPN_S,
- is_udp ? 0x12b7 : 0);
+ is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
+ attr->dest_qp_num) : 0);
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
V2_QPC_BYTE_52_UDPSPN_S, 0);
@@ -4259,11 +4482,19 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
V2_QPC_BYTE_28_FL_S, 0);
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
+ ibdev_err(ibdev,
+ "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
+ hr_qp->sl, MAX_SERVICE_LEVEL);
+ return -EINVAL;
+ }
+
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
- V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
+ V2_QPC_BYTE_28_SL_S, hr_qp->sl);
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, 0);
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
return 0;
}
@@ -4309,7 +4540,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
}
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
- memset(qpc_mask, 0, sizeof(*qpc_mask));
+ memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
@@ -4532,8 +4763,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
- memset(context, 0, sizeof(*context));
- memset(qpc_mask, 0xff, sizeof(*qpc_mask));
+ memset(context, 0, hr_dev->caps.qpc_sz);
+ memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
+
ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
new_state, context, qpc_mask);
if (ret)
@@ -4583,7 +4815,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
V2_QPC_BYTE_60_QP_ST_S, 0);
/* SW pass context to HW */
- ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp);
+ ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
if (ret) {
ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret);
goto out;
@@ -4646,7 +4878,7 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
if (ret)
goto out;
- memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
+ memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -4759,7 +4991,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
V2_QPC_BYTE_212_RETRY_CNT_M,
V2_QPC_BYTE_212_RETRY_CNT_S);
- qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
+ qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RNR_CNT_M,
+ V2_QPC_BYTE_244_RNR_CNT_S);
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
@@ -4775,6 +5009,7 @@ done:
}
qp_init_attr->cap = qp_attr->cap;
+ qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
out:
mutex_unlock(&hr_qp->mutex);
@@ -5004,6 +5239,10 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
struct hns_roce_cmd_mailbox *mailbox;
int ret;
+ /* Resizing SRQs is not supported yet */
+ if (srq_attr_mask & IB_SRQ_MAX_WR)
+ return -EINVAL;
+
if (srq_attr_mask & IB_SRQ_LIMIT) {
if (srq_attr->srq_limit >= srq->wqe_cnt)
return -EINVAL;
@@ -5233,7 +5472,7 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
aeqe = hns_roce_buf_offset(eq->mtr.kmem,
(eq->cons_index & (eq->entries - 1)) *
- HNS_ROCE_AEQ_ENTRY_SIZE);
+ eq->eqe_size);
return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
@@ -5333,7 +5572,8 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
ceqe = hns_roce_buf_offset(eq->mtr.kmem,
(eq->cons_index & (eq->entries - 1)) *
- HNS_ROCE_CEQ_ENTRY_SIZE);
+ eq->eqe_size);
+
return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
(!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
}
@@ -5374,7 +5614,7 @@ static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
{
struct hns_roce_eq *eq = eq_ptr;
struct hns_roce_dev *hr_dev = eq->hr_dev;
- int int_work = 0;
+ int int_work;
if (eq->type_flag == HNS_ROCE_CEQ)
/* Completion event interrupt */
@@ -5609,14 +5849,16 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
- /* set nex_eqe_ba[43:12] */
- roce_set_field(eqc->nxt_eqe_ba0, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
+ roce_set_field(eqc->byte_40, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12);
- /* set nex_eqe_ba[63:44] */
- roce_set_field(eqc->nxt_eqe_ba1, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
+ roce_set_field(eqc->byte_44, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44);
+ roce_set_field(eqc->byte_44, HNS_ROCE_EQC_EQE_SIZE_M,
+ HNS_ROCE_EQC_EQE_SIZE_S,
+ eq->eqe_size == HNS_ROCE_V3_EQE_SIZE ? 1 : 0);
+
return 0;
}
@@ -5807,7 +6049,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
eq->type_flag = HNS_ROCE_CEQ;
eq->entries = hr_dev->caps.ceqe_depth;
- eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+ eq->eqe_size = hr_dev->caps.ceqe_size;
eq->irq = hr_dev->irq[i + other_num + aeq_num];
eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
@@ -5816,7 +6058,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
eq->type_flag = HNS_ROCE_AEQ;
eq->entries = hr_dev->caps.aeqe_depth;
- eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+ eq->eqe_size = hr_dev->caps.aeqe_size;
eq->irq = hr_dev->irq[i - comp_num + other_num];
eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index ac29be43b6bd..29c9dd4bcbc6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -60,6 +60,7 @@
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
+#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
#define HNS_ROCE_V2_MAX_IRQ_NUM 65
@@ -77,7 +78,6 @@
#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
-#define HNS_ROCE_V2_QPC_ENTRY_SZ 256
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48
#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100
@@ -86,8 +86,10 @@
#define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
#define HNS_ROCE_V2_IDX_ENTRY_SZ 4
-#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
-#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
+
+#define HNS_ROCE_V2_SCCC_SZ 32
+#define HNS_ROCE_V3_SCCC_SZ 64
+
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
@@ -229,6 +231,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
HNS_ROCE_OPC_QUERY_PF_CAPS_NUM = 0x8408,
+ HNS_ROCE_OPC_CFG_ENTRY_SIZE = 0x8409,
HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501,
HNS_ROCE_OPC_POST_MB = 0x8504,
@@ -309,6 +312,9 @@ struct hns_roce_v2_cq_context {
#define V2_CQC_BYTE_8_CQN_S 0
#define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0)
+#define V2_CQC_BYTE_8_CQE_SIZE_S 27
+#define V2_CQC_BYTE_8_CQE_SIZE_M GENMASK(28, 27)
+
#define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S 0
#define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M GENMASK(19, 0)
@@ -512,6 +518,7 @@ struct hns_roce_v2_qp_context {
__le32 byte_248_ack_psn;
__le32 byte_252_err_txcqn;
__le32 byte_256_sqflush_rqcqe;
+ __le32 ext[64];
};
#define V2_QPC_BYTE_4_TST_S 0
@@ -896,6 +903,7 @@ struct hns_roce_v2_cqe {
u8 smac[4];
__le32 byte_28;
__le32 byte_32;
+ __le32 rsv[8];
};
#define V2_CQE_BYTE_4_OPCODE_S 0
@@ -1187,6 +1195,8 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+#define V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S 31
+
struct hns_roce_wqe_frmr_seg {
__le32 pbl_size;
__le32 mode_buf_pg_sz;
@@ -1537,6 +1547,18 @@ struct hns_roce_cfg_sgid_tb {
__le32 vf_sgid_h;
__le32 vf_sgid_type_rsv;
};
+
+enum {
+ HNS_ROCE_CFG_QPC_SIZE = BIT(0),
+ HNS_ROCE_CFG_SCCC_SIZE = BIT(1),
+};
+
+struct hns_roce_cfg_entry_size {
+ __le32 type;
+ __le32 rsv[4];
+ __le32 size;
+};
+
#define CFG_SGID_TB_TABLE_IDX_S 0
#define CFG_SGID_TB_TABLE_IDX_M GENMASK(7, 0)
@@ -1571,7 +1593,7 @@ struct hns_roce_query_pf_caps_a {
u8 max_sq_desc_sz;
u8 max_rq_desc_sz;
u8 max_srq_desc_sz;
- u8 cq_entry_sz;
+ u8 cqe_sz;
};
struct hns_roce_query_pf_caps_b {
@@ -1581,9 +1603,9 @@ struct hns_roce_query_pf_caps_b {
u8 cqc_entry_sz;
u8 srqc_entry_sz;
u8 idx_entry_sz;
- u8 scc_ctx_entry_sz;
+ u8 sccc_sz;
u8 max_mtu;
- __le16 qpc_entry_sz;
+ __le16 qpc_sz;
__le16 qpc_timer_entry_sz;
__le16 cqc_timer_entry_sz;
u8 min_cqes;
@@ -1777,8 +1799,8 @@ struct hns_roce_eq_context {
__le32 byte_28;
__le32 byte_32;
__le32 byte_36;
- __le32 nxt_eqe_ba0;
- __le32 nxt_eqe_ba1;
+ __le32 byte_40;
+ __le32 byte_44;
__le32 rsv[5];
};
@@ -1920,6 +1942,9 @@ struct hns_roce_eq_context {
#define HNS_ROCE_EQC_NXT_EQE_BA_H_S 0
#define HNS_ROCE_EQC_NXT_EQE_BA_H_M GENMASK(19, 0)
+#define HNS_ROCE_EQC_EQE_SIZE_S 20
+#define HNS_ROCE_EQC_EQE_SIZE_M GENMASK(21, 20)
+
#define HNS_ROCE_V2_CEQE_COMP_CQN_S 0
#define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0)
@@ -1941,6 +1966,8 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
+#define MAX_SERVICE_LEVEL 0x7
+
struct hns_roce_wqe_atomic_seg {
__le64 fetchadd_swap_data;
__le64 cmp_data;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 5907cfd878a6..afeffafc59f9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -141,8 +141,8 @@ static int hns_roce_netdev_event(struct notifier_block *self,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct hns_roce_ib_iboe *iboe = NULL;
struct hns_roce_dev *hr_dev = NULL;
- u8 port = 0;
- int ret = 0;
+ int ret;
+ u8 port;
hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
iboe = &hr_dev->iboe;
@@ -323,6 +323,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
mutex_init(&context->page_mutex);
}
+ resp.cqe_size = hr_dev->caps.cqe_sz;
+
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (ret)
goto error_fail_copy_to_udata;
@@ -454,6 +456,8 @@ static const struct ib_device_ops hns_roce_dev_mr_ops = {
static const struct ib_device_ops hns_roce_dev_mw_ops = {
.alloc_mw = hns_roce_alloc_mw,
.dealloc_mw = hns_roce_dealloc_mw,
+
+ INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
};
static const struct ib_device_ops hns_roce_dev_frmr_ops = {
@@ -545,7 +549,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
if (ret)
return ret;
}
- ret = ib_register_device(ib_dev, "hns_%d");
+ dma_set_max_seg_size(dev, UINT_MAX);
+ ret = ib_register_device(ib_dev, "hns_%d", dev);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
return ret;
@@ -587,7 +592,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
- HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz,
+ HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
hr_dev->caps.num_qps, 1);
if (ret) {
dev_err(dev, "Failed to init QP context memory, aborting.\n");
@@ -638,11 +643,11 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
}
}
- if (hr_dev->caps.sccc_entry_sz) {
+ if (hr_dev->caps.sccc_sz) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table,
HEM_TYPE_SCCC,
- hr_dev->caps.sccc_entry_sz,
+ hr_dev->caps.sccc_sz,
hr_dev->caps.num_qps, 1);
if (ret) {
dev_err(dev,
@@ -682,7 +687,7 @@ err_unmap_qpc_timer:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
err_unmap_ctx:
- if (hr_dev->caps.sccc_entry_sz)
+ if (hr_dev->caps.sccc_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table);
err_unmap_srq:
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index e5df3884b41d..7f81a695e9af 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -589,28 +589,22 @@ err_table:
return ret;
}
-struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
- struct ib_udata *udata)
+int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device);
- struct hns_roce_mw *mw;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
+ struct hns_roce_mw *mw = to_hr_mw(ibmw);
unsigned long index = 0;
int ret;
- mw = kmalloc(sizeof(*mw), GFP_KERNEL);
- if (!mw)
- return ERR_PTR(-ENOMEM);
-
/* Allocate a key for mw from bitmap */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
if (ret)
- goto err_bitmap;
+ return ret;
mw->rkey = hw_index_to_key(index);
- mw->ibmw.rkey = mw->rkey;
- mw->ibmw.type = type;
- mw->pdn = to_hr_pd(ib_pd)->pdn;
+ ibmw->rkey = mw->rkey;
+ mw->pdn = to_hr_pd(ibmw->pd)->pdn;
mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
@@ -619,15 +613,11 @@ struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
if (ret)
goto err_mw;
- return &mw->ibmw;
+ return 0;
err_mw:
hns_roce_mw_free(hr_dev, mw);
-
-err_bitmap:
- kfree(mw);
-
- return ERR_PTR(ret);
+ return ret;
}
int hns_roce_dealloc_mw(struct ib_mw *ibmw)
@@ -636,8 +626,6 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw)
struct hns_roce_mw *mw = to_hr_mw(ibmw);
hns_roce_mw_free(hr_dev, mw);
- kfree(mw);
-
return 0;
}
@@ -707,19 +695,6 @@ static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
return size;
}
-static inline int mtr_umem_page_count(struct ib_umem *umem,
- unsigned int page_shift)
-{
- int count = ib_umem_page_count(umem);
-
- if (page_shift >= PAGE_SHIFT)
- count >>= page_shift - PAGE_SHIFT;
- else
- count <<= PAGE_SHIFT - page_shift;
-
- return count;
-}
-
static inline size_t mtr_kmem_direct_size(bool is_direct, size_t alloc_size,
unsigned int page_shift)
{
@@ -767,13 +742,11 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct ib_udata *udata, unsigned long user_addr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
- unsigned int max_pg_shift = buf_attr->page_shift;
- unsigned int best_pg_shift = 0;
+ unsigned int best_pg_shift;
int all_pg_count = 0;
size_t direct_size;
size_t total_size;
- unsigned long tmp;
- int ret = 0;
+ int ret;
total_size = mtr_bufs_size(buf_attr);
if (total_size < 1) {
@@ -782,6 +755,9 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
}
if (udata) {
+ unsigned long pgsz_bitmap;
+ unsigned long page_size;
+
mtr->kmem = NULL;
mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
buf_attr->user_access);
@@ -790,15 +766,17 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
PTR_ERR(mtr->umem));
return -ENOMEM;
}
- if (buf_attr->fixed_page) {
- best_pg_shift = max_pg_shift;
- } else {
- tmp = GENMASK(max_pg_shift, 0);
- ret = ib_umem_find_best_pgsz(mtr->umem, tmp, user_addr);
- best_pg_shift = (ret <= PAGE_SIZE) ?
- PAGE_SHIFT : ilog2(ret);
- }
- all_pg_count = mtr_umem_page_count(mtr->umem, best_pg_shift);
+ if (buf_attr->fixed_page)
+ pgsz_bitmap = 1 << buf_attr->page_shift;
+ else
+ pgsz_bitmap = GENMASK(buf_attr->page_shift, PAGE_SHIFT);
+
+ page_size = ib_umem_find_best_pgsz(mtr->umem, pgsz_bitmap,
+ user_addr);
+ if (!page_size)
+ return -EINVAL;
+ best_pg_shift = order_base_2(page_size);
+ all_pg_count = ib_umem_num_dma_blocks(mtr->umem, page_size);
ret = 0;
} else {
mtr->umem = NULL;
@@ -808,16 +786,15 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return -ENOMEM;
}
direct_size = mtr_kmem_direct_size(is_direct, total_size,
- max_pg_shift);
+ buf_attr->page_shift);
ret = hns_roce_buf_alloc(hr_dev, total_size, direct_size,
- mtr->kmem, max_pg_shift);
+ mtr->kmem, buf_attr->page_shift);
if (ret) {
ibdev_err(ibdev, "Failed to alloc kmem, ret %d\n", ret);
goto err_alloc_mem;
- } else {
- best_pg_shift = max_pg_shift;
- all_pg_count = mtr->kmem->npages;
}
+ best_pg_shift = buf_attr->page_shift;
+ all_pg_count = mtr->kmem->npages;
}
/* must bigger than minimum hardware page shift */
@@ -967,7 +944,7 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
unsigned int *buf_page_shift)
{
struct hns_roce_buf_region *r;
- unsigned int page_shift = 0;
+ unsigned int page_shift;
int page_cnt = 0;
size_t buf_size;
int region_cnt;
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index b10c50b8736e..98f69496adb4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -82,9 +82,10 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
return 0;
}
-void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn);
+ return 0;
}
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index c063c450c715..6c081dd985fc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -41,8 +41,6 @@
#include "hns_roce_hem.h"
#include <rdma/hns-abi.h>
-#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
-
static void flush_work_handle(struct work_struct *work)
{
struct hns_roce_work *flush_work = container_of(work,
@@ -288,7 +286,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
}
}
- if (hr_dev->caps.sccc_entry_sz) {
+ if (hr_dev->caps.sccc_sz) {
/* Alloc memory for SCC CTX */
ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
hr_qp->qpn);
@@ -551,10 +549,9 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
int ret;
if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
- cap->max_send_sge > hr_dev->caps.max_sq_sg ||
- cap->max_inline_data > hr_dev->caps.max_sq_inline) {
+ cap->max_send_sge > hr_dev->caps.max_sq_sg) {
ibdev_err(ibdev,
- "failed to check SQ WR, SGE or inline num, ret = %d.\n",
+ "failed to check SQ WR or SGE num, ret = %d.\n",
-EINVAL);
return -EINVAL;
}
@@ -577,9 +574,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
cap->max_send_wr = cnt;
cap->max_send_sge = hr_qp->sq.max_gs;
- /* We don't support inline sends for kernel QPs (yet) */
- cap->max_inline_data = 0;
-
return 0;
}
@@ -847,6 +841,11 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
hr_qp->ibqp.qp_type = init_attr->qp_type;
+ if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline)
+ init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline;
+
+ hr_qp->max_inline_data = init_attr->cap.max_inline_data;
+
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
else
@@ -1014,53 +1013,32 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
int ret;
switch (init_attr->qp_type) {
- case IB_QPT_RC: {
- hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
- if (!hr_qp)
- return ERR_PTR(-ENOMEM);
-
- ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
- hr_qp);
- if (ret) {
- ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n",
- hr_qp->qpn, ret);
- kfree(hr_qp);
- return ERR_PTR(ret);
- }
-
+ case IB_QPT_RC:
+ case IB_QPT_GSI:
break;
+ default:
+ ibdev_err(ibdev, "not support QP type %d\n",
+ init_attr->qp_type);
+ return ERR_PTR(-EOPNOTSUPP);
}
- case IB_QPT_GSI: {
- /* Userspace is not allowed to create special QPs: */
- if (udata) {
- ibdev_err(ibdev, "not support usr space GSI\n");
- return ERR_PTR(-EINVAL);
- }
- hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
- if (!hr_qp)
- return ERR_PTR(-ENOMEM);
+ hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
+ if (!hr_qp)
+ return ERR_PTR(-ENOMEM);
+ if (init_attr->qp_type == IB_QPT_GSI) {
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
-
- ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
- hr_qp);
- if (ret) {
- ibdev_err(ibdev, "Create GSI QP failed!\n");
- kfree(hr_qp);
- return ERR_PTR(ret);
- }
-
- break;
- }
- default:{
- ibdev_err(ibdev, "not support QP type %d\n",
- init_attr->qp_type);
- return ERR_PTR(-EOPNOTSUPP);
- }
}
+ ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
+ if (ret) {
+ ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
+ init_attr->qp_type, ret);
+ ibdev_err(ibdev, "Create GSI QP failed!\n");
+ kfree(hr_qp);
+ return ERR_PTR(ret);
+ }
return &hr_qp->ibqp;
}
@@ -1161,8 +1139,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
mutex_lock(&hr_qp->mutex);
- cur_state = attr_mask & IB_QP_CUR_STATE ?
- attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
+ if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
+ goto out;
+
+ cur_state = hr_qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (ibqp->uobject &&
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index b9e2dbd372b6..8caf74e44efd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -285,7 +285,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
struct hns_roce_srq *srq = to_hr_srq(ib_srq);
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_ib_create_srq ucmd = {};
- int ret = 0;
+ int ret;
u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */
@@ -363,7 +363,7 @@ err_buf_alloc:
return ret;
}
-void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
@@ -372,6 +372,7 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
free_srq_idx(hr_dev, srq);
free_srq_wrid(srq);
free_srq_buf(hr_dev, srq);
+ return 0;
}
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 25747b85a79c..832b80de004f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -409,8 +409,8 @@ static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
}
/* i40iw.c */
-void i40iw_add_ref(struct ib_qp *);
-void i40iw_rem_ref(struct ib_qp *);
+void i40iw_qp_add_ref(struct ib_qp *ibqp);
+void i40iw_qp_rem_ref(struct ib_qp *ibqp);
struct ib_qp *i40iw_get_qp(struct ib_device *, int);
void i40iw_flush_wqes(struct i40iw_device *iwdev,
@@ -554,9 +554,8 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
bool wait);
void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
- struct i40iw_qp *iwqp,
- u32 qp_num);
+void i40iw_free_qp_resources(struct i40iw_qp *iwqp);
+
enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
struct i40iw_dma_mem *memptr,
u32 size, u32 mask);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index a3b95805c154..3053c345a5a3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -2322,7 +2322,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
iwqp = cm_node->iwqp;
if (iwqp) {
iwqp->cm_node = NULL;
- i40iw_rem_ref(&iwqp->ibqp);
+ i40iw_qp_rem_ref(&iwqp->ibqp);
cm_node->iwqp = NULL;
} else if (cm_node->qhash_set) {
i40iw_get_addr_info(cm_node, &nfo);
@@ -3452,7 +3452,7 @@ void i40iw_cm_disconn(struct i40iw_qp *iwqp)
kfree(work);
return;
}
- i40iw_add_ref(&iwqp->ibqp);
+ i40iw_qp_add_ref(&iwqp->ibqp);
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
work->iwqp = iwqp;
@@ -3623,7 +3623,7 @@ static void i40iw_disconnect_worker(struct work_struct *work)
kfree(dwork);
i40iw_cm_disconn_true(iwqp);
- i40iw_rem_ref(&iwqp->ibqp);
+ i40iw_qp_rem_ref(&iwqp->ibqp);
}
/**
@@ -3745,7 +3745,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->lsmm_size = accept.size + conn_param->private_data_len;
i40iw_cm_init_tsa_conn(iwqp, cm_node);
cm_id->add_ref(cm_id);
- i40iw_add_ref(&iwqp->ibqp);
+ i40iw_qp_add_ref(&iwqp->ibqp);
attr.qp_state = IB_QPS_RTS;
cm_node->qhash_set = false;
@@ -3908,7 +3908,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
iwqp->cm_node = cm_node;
cm_node->iwqp = iwqp;
iwqp->cm_id = cm_id;
- i40iw_add_ref(&iwqp->ibqp);
+ i40iw_qp_add_ref(&iwqp->ibqp);
if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
cm_node->state = I40IW_CM_STATE_SYN_SENT;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index e1085634b8d9..56fdc161f6f8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -313,7 +313,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
__func__, info->qp_cq_id);
continue;
}
- i40iw_add_ref(&iwqp->ibqp);
+ i40iw_qp_add_ref(&iwqp->ibqp);
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
qp = &iwqp->sc_qp;
spin_lock_irqsave(&iwqp->lock, flags);
@@ -426,7 +426,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
break;
}
if (info->qp)
- i40iw_rem_ref(&iwqp->ibqp);
+ i40iw_qp_rem_ref(&iwqp->ibqp);
} while (1);
if (aeqcnt)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 58a433135a03..2408b279e4c2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -192,9 +192,9 @@ static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
* i40iw_dpc - tasklet for aeq and ceq 0
* @data: iwarp device
*/
-static void i40iw_dpc(unsigned long data)
+static void i40iw_dpc(struct tasklet_struct *t)
{
- struct i40iw_device *iwdev = (struct i40iw_device *)data;
+ struct i40iw_device *iwdev = from_tasklet(iwdev, t, dpc_tasklet);
if (iwdev->msix_shared)
i40iw_process_ceq(iwdev, iwdev->ceqlist);
@@ -206,9 +206,9 @@ static void i40iw_dpc(unsigned long data)
* i40iw_ceq_dpc - dpc handler for CEQ
* @data: data points to CEQ
*/
-static void i40iw_ceq_dpc(unsigned long data)
+static void i40iw_ceq_dpc(struct tasklet_struct *t)
{
- struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
+ struct i40iw_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
struct i40iw_device *iwdev = iwceq->iwdev;
i40iw_process_ceq(iwdev, iwceq);
@@ -689,10 +689,10 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
enum i40iw_status_code status;
if (iwdev->msix_shared && !ceq_id) {
- tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
+ tasklet_setup(&iwdev->dpc_tasklet, i40iw_dpc);
status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
} else {
- tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
+ tasklet_setup(&iwceq->dpc_tasklet, i40iw_ceq_dpc);
status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
}
@@ -841,7 +841,7 @@ static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iw
u32 ret = 0;
if (!iwdev->msix_shared) {
- tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
+ tasklet_setup(&iwdev->dpc_tasklet, i40iw_dpc);
ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
}
if (ret) {
@@ -1573,7 +1573,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
status = i40iw_save_msix_info(iwdev, ldev);
if (status)
return status;
- iwdev->hw.dev_context = (void *)ldev->pcidev;
+ iwdev->hw.pcidev = ldev->pcidev;
iwdev->hw.hw_addr = ldev->hw_addr;
status = i40iw_allocate_dma_mem(&iwdev->hw,
&iwdev->obj_mem, 8192, 4096);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
index 540aab5e502d..5f97643e22e5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -167,7 +167,7 @@ static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
*/
static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
{
- struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+ struct pci_dev *pcidev = hw->pcidev;
int i;
if (!chunk->pg_cnt)
@@ -193,7 +193,7 @@ static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
struct i40iw_chunk *chunk,
int pg_cnt)
{
- struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+ struct pci_dev *pcidev = hw->pcidev;
struct page *page;
u8 *addr;
u32 size;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 54c323c40d96..c3babf3cbb8e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -73,6 +73,7 @@ struct i40iw_pd_ops;
struct i40iw_priv_qp_ops;
struct i40iw_priv_cq_ops;
struct i40iw_hmc_ops;
+struct pci_dev;
enum i40iw_page_size {
I40IW_PAGE_SIZE_4K,
@@ -261,7 +262,7 @@ struct i40iw_vsi_pestat {
struct i40iw_hw {
u8 __iomem *hw_addr;
- void *dev_context;
+ struct pci_dev *pcidev;
struct i40iw_hmc_info hmc;
};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index e07fb37af086..644f8c641aa0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -478,25 +478,6 @@ void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
}
/**
- * i40iw_free_qp - callback after destroy cqp completes
- * @cqp_request: cqp request for destroy qp
- * @num: not used
- */
-static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
-{
- struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
- struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
- struct i40iw_device *iwdev;
- u32 qp_num = iwqp->ibqp.qp_num;
-
- iwdev = iwqp->iwdev;
-
- i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
- i40iw_rem_devusecount(iwdev);
-}
-
-/**
* i40iw_wait_event - wait for completion
* @iwdev: iwarp device
* @cqp_request: cqp request to wait
@@ -616,26 +597,23 @@ void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
}
/**
- * i40iw_add_ref - add refcount for qp
+ * i40iw_qp_add_ref - add refcount for qp
* @ibqp: iqarp qp
*/
-void i40iw_add_ref(struct ib_qp *ibqp)
+void i40iw_qp_add_ref(struct ib_qp *ibqp)
{
struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
- atomic_inc(&iwqp->refcount);
+ refcount_inc(&iwqp->refcount);
}
/**
- * i40iw_rem_ref - rem refcount for qp and free if 0
+ * i40iw_qp_rem_ref - rem refcount for qp and free if 0
* @ibqp: iqarp qp
*/
-void i40iw_rem_ref(struct ib_qp *ibqp)
+void i40iw_qp_rem_ref(struct ib_qp *ibqp)
{
struct i40iw_qp *iwqp;
- enum i40iw_status_code status;
- struct i40iw_cqp_request *cqp_request;
- struct cqp_commands_info *cqp_info;
struct i40iw_device *iwdev;
u32 qp_num;
unsigned long flags;
@@ -643,7 +621,7 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
iwqp = to_iwqp(ibqp);
iwdev = iwqp->iwdev;
spin_lock_irqsave(&iwdev->qptable_lock, flags);
- if (!atomic_dec_and_test(&iwqp->refcount)) {
+ if (!refcount_dec_and_test(&iwqp->refcount)) {
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
return;
}
@@ -651,25 +629,8 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
qp_num = iwqp->ibqp.qp_num;
iwdev->qp_table[qp_num] = NULL;
spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
- cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
- if (!cqp_request)
- return;
-
- cqp_request->callback_fcn = i40iw_free_qp;
- cqp_request->param = (void *)&iwqp->sc_qp;
- cqp_info = &cqp_request->info;
- cqp_info->cqp_cmd = OP_QP_DESTROY;
- cqp_info->post_sq = 1;
- cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
- cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
- cqp_info->in.u.qp_destroy.remove_hash_idx = true;
- status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (!status)
- return;
+ complete(&iwqp->free_qp);
- i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
- i40iw_rem_devusecount(iwdev);
}
/**
@@ -751,7 +712,7 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
u64 size,
u32 alignment)
{
- struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+ struct pci_dev *pcidev = hw->pcidev;
if (!mem)
return I40IW_ERR_PARAM;
@@ -770,7 +731,7 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
*/
void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem)
{
- struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+ struct pci_dev *pcidev = hw->pcidev;
if (!mem || !mem->va)
return;
@@ -936,7 +897,7 @@ static void i40iw_terminate_timeout(struct timer_list *t)
struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
i40iw_terminate_done(qp, 1);
- i40iw_rem_ref(&iwqp->ibqp);
+ i40iw_qp_rem_ref(&iwqp->ibqp);
}
/**
@@ -948,7 +909,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
struct i40iw_qp *iwqp;
iwqp = (struct i40iw_qp *)qp->back_qp;
- i40iw_add_ref(&iwqp->ibqp);
+ i40iw_qp_add_ref(&iwqp->ibqp);
timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
iwqp->terminate_timer.expires = jiffies + HZ;
add_timer(&iwqp->terminate_timer);
@@ -964,7 +925,7 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
iwqp = (struct i40iw_qp *)qp->back_qp;
if (del_timer(&iwqp->terminate_timer))
- i40iw_rem_ref(&iwqp->ibqp);
+ i40iw_qp_rem_ref(&iwqp->ibqp);
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index b51339328a51..581ecbadf586 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -328,12 +328,13 @@ error:
* @ibpd: ptr of pd to be deallocated
* @udata: user data or null for kernel object
*/
-static void i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+static int i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct i40iw_pd *iwpd = to_iwpd(ibpd);
struct i40iw_device *iwdev = to_iwdev(ibpd->device);
i40iw_rem_pdusecount(iwpd, iwdev);
+ return 0;
}
/**
@@ -363,11 +364,11 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
* @iwqp: qp ptr (user or kernel)
* @qp_num: qp number assigned
*/
-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
- struct i40iw_qp *iwqp,
- u32 qp_num)
+void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
{
struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
+ struct i40iw_device *iwdev = iwqp->iwdev;
+ u32 qp_num = iwqp->ibqp.qp_num;
i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
@@ -379,7 +380,7 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
kfree(iwqp->kqp.wrid_mem);
iwqp->kqp.wrid_mem = NULL;
- kfree(iwqp->allocated_buffer);
+ kfree(iwqp);
}
/**
@@ -401,6 +402,10 @@ static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct i40iw_qp *iwqp = to_iwqp(ibqp);
+ struct ib_qp_attr attr;
+ struct i40iw_device *iwdev = iwqp->iwdev;
+
+ memset(&attr, 0, sizeof(attr));
iwqp->destroyed = 1;
@@ -415,7 +420,15 @@ static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
}
}
- i40iw_rem_ref(&iwqp->ibqp);
+ attr.qp_state = IB_QPS_ERR;
+ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ i40iw_qp_rem_ref(&iwqp->ibqp);
+ wait_for_completion(&iwqp->free_qp);
+ i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
+ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+ i40iw_free_qp_resources(iwqp);
+ i40iw_rem_devusecount(iwdev);
+
return 0;
}
@@ -524,7 +537,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
struct i40iw_create_qp_req req;
struct i40iw_create_qp_resp uresp;
u32 qp_num = 0;
- void *mem;
enum i40iw_status_code ret;
int err_code;
int sq_size;
@@ -566,16 +578,15 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
- mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
- if (!mem)
+ iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
+ if (!iwqp)
return ERR_PTR(-ENOMEM);
- iwqp = (struct i40iw_qp *)mem;
- iwqp->allocated_buffer = mem;
qp = &iwqp->sc_qp;
qp->back_qp = (void *)iwqp;
qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
+ iwqp->iwdev = iwdev;
iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
if (i40iw_allocate_dma_mem(dev->hw,
@@ -600,7 +611,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
goto error;
}
- iwqp->iwdev = iwdev;
iwqp->iwpd = iwpd;
iwqp->ibqp.qp_num = qp_num;
qp = &iwqp->sc_qp;
@@ -714,7 +724,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
goto error;
}
- i40iw_add_ref(&iwqp->ibqp);
+ refcount_set(&iwqp->refcount, 1);
spin_lock_init(&iwqp->lock);
iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
iwdev->qp_table[qp_num] = iwqp;
@@ -736,10 +746,11 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
}
init_completion(&iwqp->sq_drained);
init_completion(&iwqp->rq_drained);
+ init_completion(&iwqp->free_qp);
return &iwqp->ibqp;
error:
- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+ i40iw_free_qp_resources(iwqp);
return ERR_PTR(err_code);
}
@@ -1052,7 +1063,7 @@ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
* @ib_cq: cq pointer
* @udata: user data or NULL for kernel object
*/
-static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct i40iw_cq *iwcq;
struct i40iw_device *iwdev;
@@ -1064,6 +1075,7 @@ static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
i40iw_cq_wq_destroy(iwdev, cq);
cq_free_resources(iwdev, iwcq);
i40iw_rem_devusecount(iwdev);
+ return 0;
}
/**
@@ -1320,8 +1332,7 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
if (iwmr->type == IW_MEMREG_TYPE_QP)
iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
- rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,
- iwmr->page_size) {
+ rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
*pbl = rdma_block_iter_dma_address(&biter);
pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
}
@@ -1744,15 +1755,12 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
struct i40iw_mr *iwmr;
struct ib_umem *region;
struct i40iw_mem_reg_req req;
- u64 pbl_depth = 0;
u32 stag = 0;
u16 access;
- u64 region_length;
bool use_pbles = false;
unsigned long flags;
int err = -ENOSYS;
int ret;
- int pg_shift;
if (!udata)
return ERR_PTR(-EOPNOTSUPP);
@@ -1787,18 +1795,13 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (req.reg_type == IW_MEMREG_TYPE_MEM)
iwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M,
virt);
-
- region_length = region->length + (start & (iwmr->page_size - 1));
- pg_shift = ffs(iwmr->page_size) - 1;
- pbl_depth = region_length >> pg_shift;
- pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
iwmr->length = region->length;
iwpbl->user_base = virt;
palloc = &iwpbl->pble_alloc;
iwmr->type = req.reg_type;
- iwmr->page_cnt = (u32)pbl_depth;
+ iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
switch (req.reg_type) {
case IW_MEMREG_TYPE_QP:
@@ -2636,13 +2639,13 @@ static const struct ib_device_ops i40iw_dev_ops = {
.get_hw_stats = i40iw_get_hw_stats,
.get_port_immutable = i40iw_port_immutable,
.iw_accept = i40iw_accept,
- .iw_add_ref = i40iw_add_ref,
+ .iw_add_ref = i40iw_qp_add_ref,
.iw_connect = i40iw_connect,
.iw_create_listen = i40iw_create_listen,
.iw_destroy_listen = i40iw_destroy_listen,
.iw_get_qp = i40iw_get_qp,
.iw_reject = i40iw_reject,
- .iw_rem_ref = i40iw_rem_ref,
+ .iw_rem_ref = i40iw_qp_rem_ref,
.map_mr_sg = i40iw_map_mr_sg,
.mmap = i40iw_mmap,
.modify_qp = i40iw_modify_qp,
@@ -2668,7 +2671,7 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
{
struct i40iw_ib_device *iwibdev;
struct net_device *netdev = iwdev->netdev;
- struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
+ struct pci_dev *pcidev = iwdev->hw.pcidev;
iwibdev = ib_alloc_device(i40iw_ib_device, ibdev);
if (!iwibdev) {
@@ -2758,7 +2761,8 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev)
if (ret)
goto error;
- ret = ib_register_device(&iwibdev->ibdev, "i40iw%d");
+ dma_set_max_seg_size(&iwdev->hw.pcidev->dev, UINT_MAX);
+ ret = ib_register_device(&iwibdev->ibdev, "i40iw%d", &iwdev->hw.pcidev->dev);
if (ret)
goto error;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 331bc21cbcc7..bab71f3e5637 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -139,7 +139,7 @@ struct i40iw_qp {
struct i40iw_qp_host_ctx_info ctx_info;
struct i40iwarp_offload_info iwarp_info;
void *allocated_buffer;
- atomic_t refcount;
+ refcount_t refcount;
struct iw_cm_id *cm_id;
void *cm_node;
struct ib_mr *lsmm_mr;
@@ -174,5 +174,6 @@ struct i40iw_qp {
struct i40iw_dma_mem ietf_mem;
struct completion sq_drained;
struct completion rq_drained;
+ struct completion free_qp;
};
#endif
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 5f8f8d5c0ce0..7321d6ab5fe1 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -232,8 +232,3 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
return 0;
}
-
-void mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags)
-{
- return;
-}
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index b591861934b3..4aff1c8298b1 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -54,11 +54,20 @@ struct id_map_entry {
struct delayed_work timeout;
};
+struct rej_tmout_entry {
+ int slave;
+ u32 rem_pv_cm_id;
+ struct delayed_work timeout;
+ struct xarray *xa_rej_tmout;
+};
+
struct cm_generic_msg {
struct ib_mad_hdr hdr;
__be32 local_comm_id;
__be32 remote_comm_id;
+ unsigned char unused[2];
+ __be16 rej_reason;
};
struct cm_sidr_generic_msg {
@@ -280,11 +289,15 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
if (!sriov->is_going_down && !id->scheduled_delete) {
id->scheduled_delete = 1;
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+ } else if (id->scheduled_delete) {
+ /* Adjust timeout if already scheduled */
+ mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
}
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
spin_unlock(&sriov->id_map_lock);
}
+#define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason)
int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
struct ib_mad *mad)
{
@@ -293,8 +306,10 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
int pv_cm_id = -1;
if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
- mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
- mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+ mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_MRA_ATTR_ID ||
+ mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID ||
+ (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) {
sl_cm_id = get_local_comm_id(mad);
id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
if (id)
@@ -314,8 +329,8 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
}
if (!id) {
- pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n",
- slave_id, sl_cm_id);
+ pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n",
+ slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
return -EINVAL;
}
@@ -327,11 +342,94 @@ cont:
return 0;
}
+static void rej_tmout_timeout(struct work_struct *work)
+{
+ struct delayed_work *delay = to_delayed_work(work);
+ struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout);
+ struct rej_tmout_entry *deleted;
+
+ deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0);
+
+ if (deleted != item)
+ pr_debug("deleted(%p) != item(%p)\n", deleted, item);
+
+ kfree(item);
+}
+
+static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave)
+{
+ struct rej_tmout_entry *item;
+ struct rej_tmout_entry *old;
+ int ret = 0;
+
+ xa_lock(&sriov->xa_rej_tmout);
+ item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
+
+ if (item) {
+ if (xa_err(item))
+ ret = xa_err(item);
+ else
+ /* If a retry, adjust delayed work */
+ mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+ goto err_or_exists;
+ }
+ xa_unlock(&sriov->xa_rej_tmout);
+
+ item = kmalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout);
+ item->slave = slave;
+ item->rem_pv_cm_id = rem_pv_cm_id;
+ item->xa_rej_tmout = &sriov->xa_rej_tmout;
+
+ old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL);
+ if (old) {
+ pr_debug(
+ "Non-null old entry (%p) or error (%d) when inserting\n",
+ old, xa_err(old));
+ kfree(item);
+ return xa_err(old);
+ }
+
+ schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+
+ return 0;
+
+err_or_exists:
+ xa_unlock(&sriov->xa_rej_tmout);
+ return ret;
+}
+
+static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id)
+{
+ struct rej_tmout_entry *item;
+ int slave;
+
+ xa_lock(&sriov->xa_rej_tmout);
+ item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
+
+ if (!item || xa_err(item)) {
+ pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n",
+ rem_pv_cm_id, xa_err(item));
+ slave = !item ? -ENOENT : xa_err(item);
+ } else {
+ slave = item->slave;
+ }
+ xa_unlock(&sriov->xa_rej_tmout);
+
+ return slave;
+}
+
int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
struct ib_mad *mad)
{
+ struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
+ u32 rem_pv_cm_id = get_local_comm_id(mad);
u32 pv_cm_id;
struct id_map_entry *id;
+ int sts;
if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
@@ -347,6 +445,13 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
be64_to_cpu(gid.global.interface_id));
return -ENOENT;
}
+
+ sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave);
+ if (sts)
+ /* Even if this fails, we pass on the REQ to the slave */
+ pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n",
+ rem_pv_cm_id, *slave, sts);
+
return 0;
}
@@ -354,7 +459,14 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
if (!id) {
- pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
+ if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID &&
+ REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) {
+ *slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id);
+
+ return (*slave < 0) ? *slave : 0;
+ }
+ pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n",
+ pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
return -ENOENT;
}
@@ -375,6 +487,34 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
INIT_LIST_HEAD(&dev->sriov.cm_list);
dev->sriov.sl_id_map = RB_ROOT;
xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
+ xa_init(&dev->sriov.xa_rej_tmout);
+}
+
+static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
+{
+ struct rej_tmout_entry *item;
+ bool flush_needed = false;
+ unsigned long id;
+ int cnt = 0;
+
+ xa_lock(&sriov->xa_rej_tmout);
+ xa_for_each(&sriov->xa_rej_tmout, id, item) {
+ if (slave < 0 || slave == item->slave) {
+ mod_delayed_work(system_wq, &item->timeout, 0);
+ flush_needed = true;
+ ++cnt;
+ }
+ }
+ xa_unlock(&sriov->xa_rej_tmout);
+
+ if (flush_needed) {
+ flush_scheduled_work();
+ pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
+ cnt, slave);
+ }
+
+ if (slave < 0)
+ WARN_ON(!xa_empty(&sriov->xa_rej_tmout));
}
/* slave = -1 ==> all slaves */
@@ -444,4 +584,6 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
list_del(&map->list);
kfree(map);
}
+
+ rej_tmout_xa_cleanup(sriov, slave);
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 8a3436994f80..e9b5a4d57fb1 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -149,7 +149,6 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
if (IS_ERR(*umem))
return PTR_ERR(*umem);
- n = ib_umem_page_count(*umem);
shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
@@ -475,7 +474,7 @@ out:
return err;
}
-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(cq->device);
struct mlx4_ib_cq *mcq = to_mcq(cq);
@@ -495,6 +494,7 @@ void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
mlx4_db_free(dev->dev, &mcq->db);
}
ib_umem_release(mcq->umem);
+ return 0;
}
static void dump_cqe(void *cqe)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index abe68708d6d6..8bd16474708f 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -500,6 +500,13 @@ static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid,
sgid, dgid);
}
+static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
+{
+ int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
+
+ return (qpn >= proxy_start && qpn <= proxy_start + 1);
+}
+
int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
enum ib_qp_type dest_qpt, struct ib_wc *wc,
struct ib_grh *grh, struct ib_mad *mad)
@@ -520,8 +527,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
u16 cached_pkey;
u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
- if (dest_qpt > IB_QPT_GSI)
+ if (dest_qpt > IB_QPT_GSI) {
+ pr_debug("dest_qpt (%d) > IB_QPT_GSI\n", dest_qpt);
return -EINVAL;
+ }
tun_ctx = dev->sriov.demux[port-1].tun[slave];
@@ -538,12 +547,20 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
if (dest_qpt) {
u16 pkey_ix;
ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
- if (ret)
+ if (ret) {
+ pr_debug("unable to get %s cached pkey for index %d, ret %d\n",
+ is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
+ wc->pkey_index, ret);
return -EINVAL;
+ }
ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
- if (ret)
+ if (ret) {
+ pr_debug("unable to get %s pkey ix for pkey 0x%x, ret %d\n",
+ is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
+ cached_pkey, ret);
return -EINVAL;
+ }
tun_pkey_ix = pkey_ix;
} else
tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
@@ -715,7 +732,8 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
if (err)
- pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
+ pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n",
+ is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
slave, err);
return 0;
}
@@ -794,7 +812,8 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
if (err)
- pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
+ pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n",
+ is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
slave, err);
return 0;
}
@@ -807,27 +826,6 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int err;
struct ib_port_attr pattr;
- if (in_wc && in_wc->qp) {
- pr_debug("received MAD: port:%d slid:%d sqpn:%d "
- "dlid_bits:%d dqpn:%d wc_flags:0x%x tid:%016llx cls:%x mtd:%x atr:%x\n",
- port_num,
- in_wc->slid, in_wc->src_qp,
- in_wc->dlid_path_bits,
- in_wc->qp->qp_num,
- in_wc->wc_flags,
- be64_to_cpu(in_mad->mad_hdr.tid),
- in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
- be16_to_cpu(in_mad->mad_hdr.attr_id));
- if (in_wc->wc_flags & IB_WC_GRH) {
- pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
- be64_to_cpu(in_grh->sgid.global.subnet_prefix),
- be64_to_cpu(in_grh->sgid.global.interface_id));
- pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
- be64_to_cpu(in_grh->dgid.global.subnet_prefix),
- be64_to_cpu(in_grh->dgid.global.interface_id));
- }
- }
-
slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
@@ -1299,6 +1297,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
}
+static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
+{
+ unsigned long flags;
+ struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
+ struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
+
+ spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
+ if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
+ queue_work(ctx->wi_wq, &ctx->work);
+ spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
+}
+
static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
struct mlx4_ib_demux_pv_qp *tun_qp,
int index)
@@ -1341,14 +1351,6 @@ static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
return ret;
}
-static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
-{
- int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
-
- return (qpn >= proxy_start && qpn <= proxy_start + 1);
-}
-
-
int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
enum ib_qp_type dest_qpt, u16 pkey_index,
u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr,
@@ -1401,10 +1403,10 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
spin_lock(&sqp->tx_lock);
if (sqp->tx_ix_head - sqp->tx_ix_tail >=
- (MLX4_NUM_TUNNEL_BUFS - 1))
+ (MLX4_NUM_WIRE_BUFS - 1))
ret = -EAGAIN;
else
- wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
+ wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_WIRE_BUFS - 1);
spin_unlock(&sqp->tx_lock);
if (ret)
goto out;
@@ -1484,6 +1486,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
u16 vlan_id;
u8 qos;
u8 *dmac;
+ int sts;
/* Get slave that sent this packet */
if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
@@ -1580,13 +1583,17 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
&vlan_id, &qos))
rdma_ah_set_sl(&ah_attr, qos);
- mlx4_ib_send_to_wire(dev, slave, ctx->port,
- is_proxy_qp0(dev, wc->src_qp, slave) ?
- IB_QPT_SMI : IB_QPT_GSI,
- be16_to_cpu(tunnel->hdr.pkey_index),
- be32_to_cpu(tunnel->hdr.remote_qpn),
- be32_to_cpu(tunnel->hdr.qkey),
- &ah_attr, wc->smac, vlan_id, &tunnel->mad);
+ sts = mlx4_ib_send_to_wire(dev, slave, ctx->port,
+ is_proxy_qp0(dev, wc->src_qp, slave) ?
+ IB_QPT_SMI : IB_QPT_GSI,
+ be16_to_cpu(tunnel->hdr.pkey_index),
+ be32_to_cpu(tunnel->hdr.remote_qpn),
+ be32_to_cpu(tunnel->hdr.qkey),
+ &ah_attr, wc->smac, vlan_id, &tunnel->mad);
+ if (sts)
+ pr_debug("failed sending %s to wire on behalf of slave %d (%d)\n",
+ is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
+ slave, sts);
}
static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
@@ -1595,19 +1602,20 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
int i;
struct mlx4_ib_demux_pv_qp *tun_qp;
int rx_buf_size, tx_buf_size;
+ const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
if (qp_type > IB_QPT_GSI)
return -EINVAL;
tun_qp = &ctx->qp[qp_type];
- tun_qp->ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
+ tun_qp->ring = kcalloc(nmbr_bufs,
sizeof(struct mlx4_ib_buf),
GFP_KERNEL);
if (!tun_qp->ring)
return -ENOMEM;
- tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
+ tun_qp->tx_ring = kcalloc(nmbr_bufs,
sizeof (struct mlx4_ib_tun_tx_buf),
GFP_KERNEL);
if (!tun_qp->tx_ring) {
@@ -1624,7 +1632,7 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
}
- for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+ for (i = 0; i < nmbr_bufs; i++) {
tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
if (!tun_qp->ring[i].addr)
goto err;
@@ -1638,7 +1646,7 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
}
}
- for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+ for (i = 0; i < nmbr_bufs; i++) {
tun_qp->tx_ring[i].buf.addr =
kmalloc(tx_buf_size, GFP_KERNEL);
if (!tun_qp->tx_ring[i].buf.addr)
@@ -1669,7 +1677,7 @@ tx_err:
tx_buf_size, DMA_TO_DEVICE);
kfree(tun_qp->tx_ring[i].buf.addr);
}
- i = MLX4_NUM_TUNNEL_BUFS;
+ i = nmbr_bufs;
err:
while (i > 0) {
--i;
@@ -1690,6 +1698,7 @@ static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
int i;
struct mlx4_ib_demux_pv_qp *tun_qp;
int rx_buf_size, tx_buf_size;
+ const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
if (qp_type > IB_QPT_GSI)
return;
@@ -1704,13 +1713,13 @@ static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
}
- for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+ for (i = 0; i < nmbr_bufs; i++) {
ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
rx_buf_size, DMA_FROM_DEVICE);
kfree(tun_qp->ring[i].addr);
}
- for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+ for (i = 0; i < nmbr_bufs; i++) {
ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
tx_buf_size, DMA_TO_DEVICE);
kfree(tun_qp->tx_ring[i].buf.addr);
@@ -1744,9 +1753,6 @@ static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
"buf:%lld\n", wc.wr_id);
break;
case IB_WC_SEND:
- pr_debug("received tunnel send completion:"
- "wrid=0x%llx, status=0x%x\n",
- wc.wr_id, wc.status);
rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
(MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
@@ -1793,6 +1799,7 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
struct ib_qp_attr attr;
int qp_attr_mask_INIT;
+ const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
if (qp_type > IB_QPT_GSI)
return -EINVAL;
@@ -1803,8 +1810,8 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
qp_init_attr.init_attr.send_cq = ctx->cq;
qp_init_attr.init_attr.recv_cq = ctx->cq;
qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
- qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
- qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
+ qp_init_attr.init_attr.cap.max_send_wr = nmbr_bufs;
+ qp_init_attr.init_attr.cap.max_recv_wr = nmbr_bufs;
qp_init_attr.init_attr.cap.max_send_sge = 1;
qp_init_attr.init_attr.cap.max_recv_sge = 1;
if (create_tun) {
@@ -1866,7 +1873,7 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
goto err_qp;
}
- for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
+ for (i = 0; i < nmbr_bufs; i++) {
ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
if (ret) {
pr_err(" mlx4_ib_post_pv_buf error"
@@ -1902,8 +1909,8 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
switch (wc.opcode) {
case IB_WC_SEND:
kfree(sqp->tx_ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
- sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
+ (MLX4_NUM_WIRE_BUFS - 1)].ah);
+ sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah
= NULL;
spin_lock(&sqp->tx_lock);
sqp->tx_ix_tail++;
@@ -1912,13 +1919,13 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
case IB_WC_RECV:
mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
(sqp->ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
+ (MLX4_NUM_WIRE_BUFS - 1)].addr))->payload);
grh = &(((struct mlx4_mad_rcv_buf *)
(sqp->ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
+ (MLX4_NUM_WIRE_BUFS - 1)].addr))->grh);
mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)))
+ (MLX4_NUM_WIRE_BUFS - 1)))
pr_err("Failed reposting SQP "
"buf:%lld\n", wc.wr_id);
break;
@@ -1931,8 +1938,8 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
ctx->slave, wc.status, wc.wr_id);
if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
kfree(sqp->tx_ring[wc.wr_id &
- (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
- sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
+ (MLX4_NUM_WIRE_BUFS - 1)].ah);
+ sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah
= NULL;
spin_lock(&sqp->tx_lock);
sqp->tx_ix_tail++;
@@ -1972,6 +1979,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
{
int ret, cq_size;
struct ib_cq_init_attr cq_attr = {};
+ const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
if (ctx->state != DEMUX_PV_STATE_DOWN)
return -EEXIST;
@@ -1996,12 +2004,13 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
goto err_out_qp0;
}
- cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
+ cq_size = 2 * nmbr_bufs;
if (ctx->has_smi)
cq_size *= 2;
cq_attr.cqe = cq_size;
- ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
+ ctx->cq = ib_create_cq(ctx->ib_dev,
+ create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
NULL, ctx, &cq_attr);
if (IS_ERR(ctx->cq)) {
ret = PTR_ERR(ctx->cq);
@@ -2038,6 +2047,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
+ ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
if (ret) {
@@ -2181,7 +2191,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
goto err_mcg;
}
- snprintf(name, sizeof name, "mlx4_ibt%d", port);
+ snprintf(name, sizeof(name), "mlx4_ibt%d", port);
ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->wq) {
pr_err("Failed to create tunnelling WQ for port %d\n", port);
@@ -2189,7 +2199,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
goto err_wq;
}
- snprintf(name, sizeof name, "mlx4_ibud%d", port);
+ snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
+ ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+ if (!ctx->wi_wq) {
+ pr_err("Failed to create wire WQ for port %d\n", port);
+ ret = -ENOMEM;
+ goto err_wiwq;
+ }
+
+ snprintf(name, sizeof(name), "mlx4_ibud%d", port);
ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->ud_wq) {
pr_err("Failed to create up/down WQ for port %d\n", port);
@@ -2200,6 +2218,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
return 0;
err_udwq:
+ destroy_workqueue(ctx->wi_wq);
+ ctx->wi_wq = NULL;
+
+err_wiwq:
destroy_workqueue(ctx->wq);
ctx->wq = NULL;
@@ -2247,12 +2269,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
}
flush_workqueue(ctx->wq);
+ flush_workqueue(ctx->wi_wq);
for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
free_pv_object(dev, i, ctx->port);
}
kfree(ctx->tun);
destroy_workqueue(ctx->ud_wq);
+ destroy_workqueue(ctx->wi_wq);
destroy_workqueue(ctx->wq);
}
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index bd4f975e7f9a..cd0fba6b0964 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1215,9 +1215,10 @@ static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
return 0;
}
-static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
+ return 0;
}
static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
@@ -1256,11 +1257,12 @@ err2:
return err;
}
-static void mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
+static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
{
ib_destroy_cq(to_mxrcd(xrcd)->cq);
ib_dealloc_pd(to_mxrcd(xrcd)->pd);
mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
+ return 0;
}
static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
@@ -1533,23 +1535,11 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
int default_flow;
- static const u16 __mlx4_domain[] = {
- [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
- [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
- [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
- [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
- };
-
if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
pr_err("Invalid priority value %d\n", flow_attr->priority);
return -EINVAL;
}
- if (domain >= IB_FLOW_DOMAIN_NUM) {
- pr_err("Invalid domain value %d\n", domain);
- return -EINVAL;
- }
-
if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
return -EINVAL;
@@ -1558,8 +1548,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
return PTR_ERR(mailbox);
ctrl = mailbox->buf;
- ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
- flow_attr->priority);
+ ctrl->prio = cpu_to_be16(domain | flow_attr->priority);
ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
ctrl->port = flow_attr->port;
ctrl->qpn = cpu_to_be32(qp->qp_num);
@@ -1701,8 +1690,8 @@ static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
}
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
- struct ib_flow_attr *flow_attr,
- int domain, struct ib_udata *udata)
+ struct ib_flow_attr *flow_attr,
+ struct ib_udata *udata)
{
int err = 0, i = 0, j = 0;
struct mlx4_ib_flow *mflow;
@@ -1768,8 +1757,8 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
}
while (i < ARRAY_SIZE(type) && type[i]) {
- err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
- &mflow->reg_id[i].id);
+ err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS,
+ type[i], &mflow->reg_id[i].id);
if (err)
goto err_create_flow;
if (is_bonded) {
@@ -1778,7 +1767,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
*/
flow_attr->port = 2;
err = __mlx4_ib_create_flow(qp, flow_attr,
- domain, type[j],
+ MLX4_DOMAIN_UVERBS, type[j],
&mflow->reg_id[j].mirror);
flow_attr->port = 1;
if (err)
@@ -2589,11 +2578,16 @@ static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
.destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
.destroy_wq = mlx4_ib_destroy_wq,
.modify_wq = mlx4_ib_modify_wq,
+
+ INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
+ ib_rwq_ind_tbl),
};
static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
.alloc_mw = mlx4_ib_alloc_mw,
.dealloc_mw = mlx4_ib_dealloc_mw,
+
+ INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
};
static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
@@ -2847,7 +2841,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_steer_free_bitmap;
rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
- if (ib_register_device(&ibdev->ib_dev, "mlx4_%d"))
+ if (ib_register_device(&ibdev->ib_dev, "mlx4_%d",
+ &dev->persist->pdev->dev))
goto err_diag_counters;
if (mlx4_ib_mad_init(ibdev))
@@ -2989,10 +2984,8 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
/* Add an empty rule for IB L2 */
memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
- err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
- IB_FLOW_DOMAIN_NIC,
- MLX4_FS_REGULAR,
- &mqp->reg_id);
+ err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
+ MLX4_FS_REGULAR, &mqp->reg_id);
} else {
err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 38e87a700a2a..58df06492d69 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -233,7 +233,8 @@ enum mlx4_ib_mad_ifc_flags {
};
enum {
- MLX4_NUM_TUNNEL_BUFS = 256,
+ MLX4_NUM_TUNNEL_BUFS = 512,
+ MLX4_NUM_WIRE_BUFS = 2048,
};
struct mlx4_ib_tunnel_header {
@@ -298,6 +299,26 @@ struct mlx4_ib_rss {
u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
};
+enum {
+ /*
+ * Largest possible UD header: send with GRH and immediate
+ * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
+ * tag. (LRH would only use 8 bytes, so Ethernet is the
+ * biggest case)
+ */
+ MLX4_IB_UD_HEADER_SIZE = 82,
+ MLX4_IB_LSO_HEADER_SPARE = 128,
+};
+
+struct mlx4_ib_sqp {
+ int pkey_index;
+ u32 qkey;
+ u32 send_psn;
+ struct ib_ud_header ud_header;
+ u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
+ struct ib_qp *roce_v2_gsi;
+};
+
struct mlx4_ib_qp {
union {
struct ib_qp ibqp;
@@ -343,7 +364,10 @@ struct mlx4_ib_qp {
struct mlx4_wqn_range *wqn_range;
/* Number of RSS QP parents that uses this WQ */
u32 rss_usecnt;
- struct mlx4_ib_rss *rss_ctx;
+ union {
+ struct mlx4_ib_rss *rss_ctx;
+ struct mlx4_ib_sqp *sqp;
+ };
};
struct mlx4_ib_srq {
@@ -366,6 +390,10 @@ struct mlx4_ib_ah {
union mlx4_ext_av av;
};
+struct mlx4_ib_rwq_ind_table {
+ struct ib_rwq_ind_table ib_rwq_ind_tbl;
+};
+
/****************************************/
/* alias guid support */
/****************************************/
@@ -454,6 +482,7 @@ struct mlx4_ib_demux_pv_ctx {
struct ib_pd *pd;
struct work_struct work;
struct workqueue_struct *wq;
+ struct workqueue_struct *wi_wq;
struct mlx4_ib_demux_pv_qp qp[2];
};
@@ -461,6 +490,7 @@ struct mlx4_ib_demux_ctx {
struct ib_device *ib_dev;
int port;
struct workqueue_struct *wq;
+ struct workqueue_struct *wi_wq;
struct workqueue_struct *ud_wq;
spinlock_t ud_lock;
atomic64_t subnet_prefix;
@@ -492,6 +522,7 @@ struct mlx4_ib_sriov {
spinlock_t id_map_lock;
struct rb_root sl_id_map;
struct list_head cm_list;
+ struct xarray xa_rej_tmout;
};
struct gid_cache_context {
@@ -725,8 +756,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata);
+int mlx4_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int mlx4_ib_dealloc_mw(struct ib_mw *mw);
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
@@ -736,7 +766,7 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
@@ -747,14 +777,17 @@ int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
int slave_sgid_index, u8 *s_mac, u16 vlan_tag);
int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
-void mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags);
+static inline int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags)
+{
+ return 0;
+}
int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
struct ib_udata *udata);
int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
@@ -890,15 +923,18 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
-void mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
+int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata);
-struct ib_rwq_ind_table
-*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata);
-int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
+static inline int
+mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
+{
+ return 0;
+}
int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
int *num_of_mtts);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 1d5ef0de12c9..426fed005d53 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -271,6 +271,8 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
u64 total_len = 0;
int i;
+ *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
+
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
/*
* Initialization - save the first chunk start as the
@@ -421,7 +423,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_free;
}
- n = ib_umem_page_count(mr->umem);
shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
@@ -511,7 +512,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mmr->umem = NULL;
goto release_mpt_entry;
}
- n = ib_umem_page_count(mmr->umem);
+ n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE);
shift = PAGE_SHIFT;
err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
@@ -610,37 +611,27 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return 0;
}
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
+int mlx4_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
- struct mlx4_ib_mw *mw;
+ struct mlx4_ib_dev *dev = to_mdev(ibmw->device);
+ struct mlx4_ib_mw *mw = to_mmw(ibmw);
int err;
- mw = kmalloc(sizeof(*mw), GFP_KERNEL);
- if (!mw)
- return ERR_PTR(-ENOMEM);
-
- err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
- to_mlx4_type(type), &mw->mmw);
+ err = mlx4_mw_alloc(dev->dev, to_mpd(ibmw->pd)->pdn,
+ to_mlx4_type(ibmw->type), &mw->mmw);
if (err)
- goto err_free;
+ return err;
err = mlx4_mw_enable(dev->dev, &mw->mmw);
if (err)
goto err_mw;
- mw->ibmw.rkey = mw->mmw.key;
-
- return &mw->ibmw;
+ ibmw->rkey = mw->mmw.key;
+ return 0;
err_mw:
mlx4_mw_free(dev->dev, &mw->mmw);
-
-err_free:
- kfree(mw);
-
- return ERR_PTR(err);
+ return err;
}
int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
@@ -648,8 +639,6 @@ int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
struct mlx4_ib_mw *mw = to_mmw(ibmw);
mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
- kfree(mw);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 2975f350b9fd..5cb8e602294c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -68,27 +68,6 @@ enum {
};
enum {
- /*
- * Largest possible UD header: send with GRH and immediate
- * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
- * tag. (LRH would only use 8 bytes, so Ethernet is the
- * biggest case)
- */
- MLX4_IB_UD_HEADER_SIZE = 82,
- MLX4_IB_LSO_HEADER_SPARE = 128,
-};
-
-struct mlx4_ib_sqp {
- struct mlx4_ib_qp qp;
- int pkey_index;
- u32 qkey;
- u32 send_psn;
- struct ib_ud_header ud_header;
- u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
- struct ib_qp *roce_v2_gsi;
-};
-
-enum {
MLX4_IB_MIN_SQ_STRIDE = 6,
MLX4_IB_CACHE_LINE_SIZE = 64,
};
@@ -123,11 +102,6 @@ enum mlx4_ib_source_type {
MLX4_IB_RWQ_SRC = 1,
};
-static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
-{
- return container_of(mqp, struct mlx4_ib_sqp, qp);
-}
-
static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{
if (!mlx4_is_master(dev->dev))
@@ -656,8 +630,6 @@ static int create_qp_rss(struct mlx4_ib_dev *dev,
if (err)
goto err_qpn;
- mutex_init(&qp->mutex);
-
INIT_LIST_HEAD(&qp->gid_list);
INIT_LIST_HEAD(&qp->steering_rules);
@@ -696,80 +668,72 @@ err_qpn:
return err;
}
-static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct mlx4_ib_qp *qp;
struct mlx4_ib_create_qp_rss ucmd = {};
size_t required_cmd_sz;
int err;
if (!udata) {
pr_debug("RSS QP with NULL udata\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (udata->outlen)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
required_cmd_sz = offsetof(typeof(ucmd), reserved1) +
sizeof(ucmd.reserved1);
if (udata->inlen < required_cmd_sz) {
pr_debug("invalid inlen\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
pr_debug("copy failed\n");
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)))
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (ucmd.comp_mask || ucmd.reserved1)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (udata->inlen > sizeof(ucmd) &&
!ib_is_udata_cleared(udata, sizeof(ucmd),
udata->inlen - sizeof(ucmd))) {
pr_debug("inlen is not supported\n");
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
pr_debug("RSS QP with unsupported QP type %d\n",
init_attr->qp_type);
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
if (init_attr->create_flags) {
pr_debug("RSS QP doesn't support create flags\n");
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
if (init_attr->send_cq || init_attr->cap.max_send_wr) {
pr_debug("RSS QP with unsupported send attributes\n");
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
-
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
- if (err) {
- kfree(qp);
- return ERR_PTR(err);
- }
+ if (err)
+ return err;
qp->ibqp.qp_num = qp->mqp.qpn;
-
- return &qp->ibqp;
+ return 0;
}
/*
@@ -873,7 +837,6 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
- mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
INIT_LIST_HEAD(&qp->gid_list);
@@ -922,7 +885,6 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
goto err;
}
- n = ib_umem_page_count(qp->umem);
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
@@ -989,13 +951,11 @@ err:
static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, int sqpn,
- struct mlx4_ib_qp **caller_qp)
+ struct mlx4_ib_qp *qp)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
int qpn;
int err;
- struct mlx4_ib_sqp *sqp = NULL;
- struct mlx4_ib_qp *qp;
struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
udata, struct mlx4_ib_ucontext, ibucontext);
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
@@ -1043,27 +1003,18 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
sqpn = qpn;
}
- if (!*caller_qp) {
- if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
- (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
- MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
- sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
- if (!sqp)
- return -ENOMEM;
- qp = &sqp->qp;
- } else {
- qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
- if (!qp)
- return -ENOMEM;
- }
- qp->pri.vid = 0xFFFF;
- qp->alt.vid = 0xFFFF;
- } else
- qp = *caller_qp;
+ if (init_attr->qp_type == IB_QPT_SMI ||
+ init_attr->qp_type == IB_QPT_GSI || qp_type == MLX4_IB_QPT_SMI ||
+ qp_type == MLX4_IB_QPT_GSI ||
+ (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
+ MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
+ qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
+ if (!qp->sqp)
+ return -ENOMEM;
+ }
qp->mlx4_ib_qp_type = qp_type;
- mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
INIT_LIST_HEAD(&qp->gid_list);
@@ -1117,7 +1068,6 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
goto err;
}
- n = ib_umem_page_count(qp->umem);
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
@@ -1239,9 +1189,6 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
qp->mqp.event = mlx4_ib_qp_event;
- if (!*caller_qp)
- *caller_qp = qp;
-
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq));
@@ -1293,10 +1240,7 @@ err_db:
mlx4_db_free(dev->dev, &qp->db);
err:
- if (!sqp && !*caller_qp)
- kfree(qp);
- kfree(sqp);
-
+ kfree(qp->sqp);
return err;
}
@@ -1410,7 +1354,6 @@ static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
mlx4_qp_free(dev->dev, &qp->mqp);
mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
del_gid_entries(qp);
- kfree(qp->rss_ctx);
}
static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
@@ -1529,17 +1472,16 @@ static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
return dev->dev->caps.spec_qps[attr->port_num - 1].qp1_proxy;
}
-static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct mlx4_ib_qp *qp = NULL;
int err;
int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
u16 xrcdn = 0;
if (init_attr->rwq_ind_tbl)
- return _mlx4_ib_create_qp_rss(pd, init_attr, udata);
+ return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata);
/*
* We only support LSO, vendor flag1, and multicast loopback blocking,
@@ -1551,16 +1493,16 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
MLX4_IB_SRIOV_SQP |
MLX4_IB_QP_NETIF |
MLX4_IB_QP_CREATE_ROCE_V2_GSI))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
if (init_attr->qp_type != IB_QPT_UD)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (init_attr->create_flags) {
if (udata && init_attr->create_flags & ~(sup_u_create_flags))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
MLX4_IB_QP_CREATE_ROCE_V2_GSI |
@@ -1570,7 +1512,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
init_attr->qp_type > IB_QPT_GSI) ||
(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI &&
init_attr->qp_type != IB_QPT_GSI))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
switch (init_attr->qp_type) {
@@ -1581,53 +1523,43 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
fallthrough;
case IB_QPT_XRC_INI:
if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
- return ERR_PTR(-ENOSYS);
+ return -ENOSYS;
init_attr->recv_cq = init_attr->send_cq;
fallthrough;
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_RAW_PACKET:
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ case IB_QPT_UD:
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
- fallthrough;
- case IB_QPT_UD:
- {
- err = create_qp_common(pd, init_attr, udata, 0, &qp);
- if (err) {
- kfree(qp);
- return ERR_PTR(err);
- }
+ err = create_qp_common(pd, init_attr, udata, 0, qp);
+ if (err)
+ return err;
qp->ibqp.qp_num = qp->mqp.qpn;
qp->xrcdn = xrcdn;
-
break;
- }
case IB_QPT_SMI:
case IB_QPT_GSI:
{
int sqpn;
- /* Userspace is not allowed to create special QPs: */
- if (udata)
- return ERR_PTR(-EINVAL);
if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) {
int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev,
1, 1, &sqpn, 0,
MLX4_RES_USAGE_DRIVER);
if (res)
- return ERR_PTR(res);
+ return res;
} else {
sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
}
- err = create_qp_common(pd, init_attr, udata, sqpn, &qp);
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
+ err = create_qp_common(pd, init_attr, udata, sqpn, qp);
if (err)
- return ERR_PTR(err);
+ return err;
qp->port = init_attr->port_num;
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
@@ -1636,25 +1568,33 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
}
default:
/* Don't support raw QPs */
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
-
- return &qp->ibqp;
+ return 0;
}
struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata) {
struct ib_device *device = pd ? pd->device : init_attr->xrcd->device;
- struct ib_qp *ibqp;
struct mlx4_ib_dev *dev = to_mdev(device);
+ struct mlx4_ib_qp *qp;
+ int ret;
- ibqp = _mlx4_ib_create_qp(pd, init_attr, udata);
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
- if (!IS_ERR(ibqp) &&
- (init_attr->qp_type == IB_QPT_GSI) &&
+ mutex_init(&qp->mutex);
+ ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata);
+ if (ret) {
+ kfree(qp);
+ return ERR_PTR(ret);
+ }
+
+ if (init_attr->qp_type == IB_QPT_GSI &&
!(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
- struct mlx4_ib_sqp *sqp = to_msqp((to_mqp(ibqp)));
+ struct mlx4_ib_sqp *sqp = qp->sqp;
int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num);
if (is_eth &&
@@ -1666,14 +1606,14 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
sqp->roce_v2_gsi = NULL;
} else {
- sqp = to_msqp(to_mqp(sqp->roce_v2_gsi));
- sqp->qp.flags |= MLX4_IB_ROCE_V2_GSI_QP;
+ to_mqp(sqp->roce_v2_gsi)->flags |=
+ MLX4_IB_ROCE_V2_GSI_QP;
}
init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
}
}
- return ibqp;
+ return &qp->ibqp;
}
static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
@@ -1700,10 +1640,8 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata);
}
- if (is_sqp(dev, mqp))
- kfree(to_msqp(mqp));
- else
- kfree(mqp);
+ kfree(mqp->sqp);
+ kfree(mqp);
return 0;
}
@@ -1713,7 +1651,7 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
struct mlx4_ib_qp *mqp = to_mqp(qp);
if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
- struct mlx4_ib_sqp *sqp = to_msqp(mqp);
+ struct mlx4_ib_sqp *sqp = mqp->sqp;
if (sqp->roce_v2_gsi)
ib_destroy_qp(sqp->roce_v2_gsi);
@@ -2575,7 +2513,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
qp->alt_port = attr->alt_port_num;
if (is_sqp(dev, qp))
- store_sqp_attrs(to_msqp(qp), attr, attr_mask);
+ store_sqp_attrs(qp->sqp, attr, attr_mask);
/*
* If we moved QP0 to RTR, bring the IB link up; if we moved
@@ -2852,7 +2790,7 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata);
if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
- struct mlx4_ib_sqp *sqp = to_msqp(mqp);
+ struct mlx4_ib_sqp *sqp = mqp->sqp;
int err = 0;
if (sqp->roce_v2_gsi)
@@ -2877,12 +2815,13 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
return -EINVAL;
}
-static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
+static int build_sriov_qp0_header(struct mlx4_ib_qp *qp,
const struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
- struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
- struct ib_device *ib_dev = &mdev->ib_dev;
+ struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device);
+ struct mlx4_ib_sqp *sqp = qp->sqp;
+ struct ib_device *ib_dev = qp->ibqp.device;
struct mlx4_wqe_mlx_seg *mlx = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
struct mlx4_ib_ah *ah = to_mah(wr->ah);
@@ -2904,12 +2843,12 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
/* for proxy-qp0 sends, need to add in size of tunnel header */
/* for tunnel-qp0 sends, tunnel header is already in s/g list */
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
send_size += sizeof (struct mlx4_ib_tunnel_header);
ib_ud_header_init(send_size, 1, 0, 0, 0, 0, 0, 0, &sqp->ud_header);
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
sqp->ud_header.lrh.service_level =
be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
sqp->ud_header.lrh.destination_lid =
@@ -2926,26 +2865,26 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
sqp->ud_header.lrh.virtual_lane = 0;
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
- err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+ err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey);
if (err)
return err;
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
else
sqp->ud_header.bth.destination_qpn =
- cpu_to_be32(mdev->dev->caps.spec_qps[sqp->qp.port - 1].qp0_tunnel);
+ cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
if (mlx4_is_master(mdev->dev)) {
- if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
+ if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey))
return -EINVAL;
} else {
- if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
+ if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey))
return -EINVAL;
}
sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn);
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn);
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
sqp->ud_header.immediate_present = 0;
@@ -3029,10 +2968,11 @@ static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num,
}
#define MLX4_ROCEV2_QP1_SPORT 0xC000
-static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
+static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
- struct ib_device *ib_dev = sqp->qp.ibqp.device;
+ struct mlx4_ib_sqp *sqp = qp->sqp;
+ struct ib_device *ib_dev = qp->ibqp.device;
struct mlx4_ib_dev *ibdev = to_mdev(ib_dev);
struct mlx4_wqe_mlx_seg *mlx = wqe;
struct mlx4_wqe_ctrl_seg *ctrl = wqe;
@@ -3056,7 +2996,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
for (i = 0; i < wr->wr.num_sge; ++i)
send_size += wr->wr.sg_list[i].length;
- is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
+ is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET;
is_grh = mlx4_ib_ah_grh_present(ah);
if (is_eth) {
enum ib_gid_type gid_type;
@@ -3070,9 +3010,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
if (err)
return err;
} else {
- err = fill_gid_by_hw_index(ibdev, sqp->qp.port,
- ah->av.ib.gid_index,
- &sgid, &gid_type);
+ err = fill_gid_by_hw_index(ibdev, qp->port,
+ ah->av.ib.gid_index, &sgid,
+ &gid_type);
if (!err) {
is_udp = gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
if (is_udp) {
@@ -3117,13 +3057,18 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
* indexes don't necessarily match the hw ones, so
* we must use our own cache
*/
- sqp->ud_header.grh.source_gid.global.subnet_prefix =
- cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
- demux[sqp->qp.port - 1].
- subnet_prefix)));
- sqp->ud_header.grh.source_gid.global.interface_id =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- guid_cache[ah->av.ib.gid_index];
+ sqp->ud_header.grh.source_gid.global
+ .subnet_prefix =
+ cpu_to_be64(atomic64_read(
+ &(to_mdev(ib_dev)
+ ->sriov
+ .demux[qp->port - 1]
+ .subnet_prefix)));
+ sqp->ud_header.grh.source_gid.global
+ .interface_id =
+ to_mdev(ib_dev)
+ ->sriov.demux[qp->port - 1]
+ .guid_cache[ah->av.ib.gid_index];
} else {
sqp->ud_header.grh.source_gid =
ah->ibah.sgid_attr->gid;
@@ -3155,10 +3100,13 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
if (!is_eth) {
- mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
- (sqp->ud_header.lrh.destination_lid ==
- IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
- (sqp->ud_header.lrh.service_level << 8));
+ mlx->flags |=
+ cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
+ (sqp->ud_header.lrh.destination_lid ==
+ IB_LID_PERMISSIVE ?
+ MLX4_WQE_MLX_SLR :
+ 0) |
+ (sqp->ud_header.lrh.service_level << 8));
if (ah->av.ib.port_pd & cpu_to_be32(0x80000000))
mlx->flags |= cpu_to_be32(0x1); /* force loopback */
mlx->rlid = sqp->ud_header.lrh.destination_lid;
@@ -3204,21 +3152,23 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
}
} else {
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 :
- sl_to_vl(to_mdev(ib_dev),
- sqp->ud_header.lrh.service_level,
- sqp->qp.port);
- if (sqp->qp.ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
+ sqp->ud_header.lrh.virtual_lane =
+ !qp->ibqp.qp_num ?
+ 15 :
+ sl_to_vl(to_mdev(ib_dev),
+ sqp->ud_header.lrh.service_level,
+ qp->port);
+ if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
return -EINVAL;
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
}
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
- if (!sqp->qp.ibqp.qp_num)
- err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
+ if (!qp->ibqp.qp_num)
+ err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index,
&pkey);
else
- err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
+ err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index,
&pkey);
if (err)
return err;
@@ -3228,7 +3178,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
sqp->qkey : wr->remote_qkey);
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
@@ -3551,14 +3501,14 @@ static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
- struct mlx4_ib_sqp *sqp = to_msqp(qp);
+ struct mlx4_ib_sqp *sqp = qp->sqp;
if (sqp->roce_v2_gsi) {
struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah);
enum ib_gid_type gid_type;
union ib_gid gid;
- if (!fill_gid_by_hw_index(mdev, sqp->qp.port,
+ if (!fill_gid_by_hw_index(mdev, qp->port,
ah->av.ib.gid_index,
&gid, &gid_type))
qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
@@ -3678,8 +3628,8 @@ static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
break;
case MLX4_IB_QPT_TUN_SMI_OWNER:
- err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
- ctrl, &seglen);
+ err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
+ &seglen);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
@@ -3715,8 +3665,8 @@ static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
break;
case MLX4_IB_QPT_PROXY_SMI_OWNER:
- err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
- ctrl, &seglen);
+ err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
+ &seglen);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
@@ -3749,8 +3699,7 @@ static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
case MLX4_IB_QPT_SMI:
case MLX4_IB_QPT_GSI:
- err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl,
- &seglen);
+ err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
@@ -4172,6 +4121,7 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
if (!qp)
return ERR_PTR(-ENOMEM);
+ mutex_init(&qp->mutex);
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
@@ -4327,7 +4277,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
return err;
}
-void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
+int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
@@ -4338,36 +4288,35 @@ void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
kfree(qp);
+ return 0;
}
-struct ib_rwq_ind_table
-*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata)
+int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct ib_rwq_ind_table *rwq_ind_table;
struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
+ struct ib_device *device = rwq_ind_table->device;
unsigned int base_wqn;
size_t min_resp_len;
- int i;
- int err;
+ int i, err = 0;
if (udata->inlen > 0 &&
!ib_is_udata_cleared(udata, 0,
udata->inlen))
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
if (udata->outlen && udata->outlen < min_resp_len)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (ind_tbl_size >
device->attrs.rss_caps.max_rwq_indirection_table_size) {
pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
ind_tbl_size,
device->attrs.rss_caps.max_rwq_indirection_table_size);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
base_wqn = init_attr->ind_tbl[0]->wq_num;
@@ -4375,39 +4324,23 @@ struct ib_rwq_ind_table
if (base_wqn % ind_tbl_size) {
pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
base_wqn);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
for (i = 1; i < ind_tbl_size; i++) {
if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
pr_debug("indirection table's WQNs aren't consecutive\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
}
- rwq_ind_table = kzalloc(sizeof(*rwq_ind_table), GFP_KERNEL);
- if (!rwq_ind_table)
- return ERR_PTR(-ENOMEM);
-
if (udata->outlen) {
resp.response_length = offsetof(typeof(resp), response_length) +
sizeof(resp.response_length);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
- if (err)
- goto err;
}
- return rwq_ind_table;
-
-err:
- kfree(rwq_ind_table);
- return ERR_PTR(err);
-}
-
-int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
-{
- kfree(ib_rwq_ind_tbl);
- return 0;
+ return err;
}
struct mlx4_ib_drain_cqe {
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 8f9d5035142d..bf618529e734 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -115,8 +115,9 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
- err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
- PAGE_SHIFT, &srq->mtt);
+ err = mlx4_mtt_init(
+ dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE),
+ PAGE_SHIFT, &srq->mtt);
if (err)
goto err_buf;
@@ -260,7 +261,7 @@ int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
return 0;
}
-void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
+int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(srq->device);
struct mlx4_ib_srq *msrq = to_msrq(srq);
@@ -282,6 +283,7 @@ void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
mlx4_db_free(dev->dev, &msrq->db);
}
ib_umem_release(msrq->umem);
+ return 0;
}
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 59e5ec39b447..505bc47fd575 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -106,8 +106,8 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && udata) {
int err;
struct mlx5_ib_create_ah_resp resp = {};
- u32 min_resp_len = offsetof(typeof(resp), dmac) +
- sizeof(resp.dmac);
+ u32 min_resp_len =
+ offsetofend(struct mlx5_ib_create_ah_resp, dmac);
if (udata->outlen < min_resp_len)
return -EINVAL;
@@ -147,8 +147,3 @@ int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
return 0;
}
-
-void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
-{
- return;
-}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index ebb2f108b64f..234f29912ba9 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -168,14 +168,14 @@ void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
mlx5_cmd_exec_in(dev, destroy_tis, in);
}
-void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
+int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
MLX5_SET(destroy_rqt_in, in, uid, uid);
- mlx5_cmd_exec_in(dev, destroy_rqt, in);
+ return mlx5_cmd_exec_in(dev, destroy_rqt, in);
}
int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
@@ -209,14 +209,14 @@ void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
}
-void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
+int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
MLX5_SET(dealloc_pd_in, in, pd, pdn);
MLX5_SET(dealloc_pd_in, in, uid, uid);
- mlx5_cmd_exec_in(dev, dealloc_pd, in);
+ return mlx5_cmd_exec_in(dev, dealloc_pd, in);
}
int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 1d192a8ca87d..88ea6ef8f2cb 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -44,10 +44,10 @@ int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
u64 length, u32 alignment);
void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
-void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
+int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
-void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid);
+int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid);
int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
u16 uid);
void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 145f3cb40ccb..70c8fd67ee2f 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -117,7 +117,7 @@ err_bound:
return ret;
}
-static void mlx5_ib_destroy_counters(struct ib_counters *counters)
+static int mlx5_ib_destroy_counters(struct ib_counters *counters)
{
struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
@@ -125,6 +125,7 @@ static void mlx5_ib_destroy_counters(struct ib_counters *counters)
if (mcounters->hw_cntrs_hndl)
mlx5_fc_destroy(to_mdev(counters->device)->mdev,
mcounters->hw_cntrs_hndl);
+ return 0;
}
static int mlx5_ib_create_counters(struct ib_counters *counters,
@@ -456,12 +457,12 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
}
- cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
+ cnts->names = kcalloc(num_counters, sizeof(*cnts->names), GFP_KERNEL);
if (!cnts->names)
return -ENOMEM;
cnts->offsets = kcalloc(num_counters,
- sizeof(cnts->offsets), GFP_KERNEL);
+ sizeof(*cnts->offsets), GFP_KERNEL);
if (!cnts->offsets)
goto err_names;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index dceb0eb2bed1..fb62f1d04afa 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -168,7 +168,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
{
enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
- struct mlx5_ib_srq *srq;
+ struct mlx5_ib_srq *srq = NULL;
struct mlx5_ib_wq *wq;
u16 wqe_ctr;
u8 roce_packet_type;
@@ -180,7 +180,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
if (qp->ibqp.xrcd) {
msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
- srq = to_mibsrq(msrq);
+ if (msrq)
+ srq = to_mibsrq(msrq);
} else {
srq = to_msrq(qp->ibqp.srq);
}
@@ -254,7 +255,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
switch (roce_packet_type) {
case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
- wc->network_hdr_type = RDMA_NETWORK_IB;
+ wc->network_hdr_type = RDMA_NETWORK_ROCE_V1;
break;
case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
wc->network_hdr_type = RDMA_NETWORK_IPV6;
@@ -1023,16 +1024,21 @@ err_cqb:
return err;
}
-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
+ int ret;
+
+ ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
+ if (ret)
+ return ret;
- mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
if (udata)
destroy_cq_user(mcq, udata);
else
destroy_cq_kernel(dev, mcq);
+ return 0;
}
static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index e9cfb9a2ef41..492cfe063bca 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -136,12 +136,9 @@ static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
#define LAST_COUNTERS_FIELD counters
/* Field is the last supported field */
-#define FIELDS_NOT_SUPPORTED(filter, field)\
- memchr_inv((void *)&filter.field +\
- sizeof(filter.field), 0,\
- sizeof(filter) -\
- offsetof(typeof(filter), field) -\
- sizeof(filter.field))
+#define FIELDS_NOT_SUPPORTED(filter, field) \
+ memchr_inv((void *)&filter.field + sizeof(filter.field), 0, \
+ sizeof(filter) - offsetofend(typeof(filter), field))
int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
bool is_egress,
@@ -767,6 +764,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
{
bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
struct mlx5_flow_namespace *ns = NULL;
+ enum mlx5_flow_namespace_type fn_type;
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_table *ft;
int max_table_size;
@@ -780,11 +778,9 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
log_max_ft_size));
esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- enum mlx5_flow_namespace_type fn_type;
-
- if (flow_is_multicast_only(flow_attr) &&
- !dont_trap)
+ switch (flow_attr->type) {
+ case IB_FLOW_ATTR_NORMAL:
+ if (flow_is_multicast_only(flow_attr) && !dont_trap)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
priority = ib_prio_to_core_prio(flow_attr->priority,
@@ -797,12 +793,11 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
if (!dev->is_rep && !esw_encap &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
- reformat_l3_tunnel_to_l2))
+ reformat_l3_tunnel_to_l2))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
} else {
- max_table_size =
- BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
- log_max_ft_size));
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(
+ dev->mdev, log_max_ft_size));
fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
prio = &dev->flow_db->egress_prios[priority];
if (!dev->is_rep && !esw_encap &&
@@ -812,27 +807,31 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
num_entries = MLX5_FS_MAX_ENTRIES;
num_groups = MLX5_FS_MAX_TYPES;
- } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
- flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
+ break;
+ case IB_FLOW_ATTR_ALL_DEFAULT:
+ case IB_FLOW_ATTR_MC_DEFAULT:
ns = mlx5_get_flow_namespace(dev->mdev,
MLX5_FLOW_NAMESPACE_LEFTOVERS);
- build_leftovers_ft_param(&priority,
- &num_entries,
- &num_groups);
+ build_leftovers_ft_param(&priority, &num_entries, &num_groups);
prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
- } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
+ break;
+ case IB_FLOW_ATTR_SNIFFER:
if (!MLX5_CAP_FLOWTABLE(dev->mdev,
allow_sniffer_and_nic_rx_shared_tir))
return ERR_PTR(-EOPNOTSUPP);
- ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
- MLX5_FLOW_NAMESPACE_SNIFFER_RX :
- MLX5_FLOW_NAMESPACE_SNIFFER_TX);
+ ns = mlx5_get_flow_namespace(
+ dev->mdev, ft_type == MLX5_IB_FT_RX ?
+ MLX5_FLOW_NAMESPACE_SNIFFER_RX :
+ MLX5_FLOW_NAMESPACE_SNIFFER_TX);
prio = &dev->flow_db->sniffer[ft_type];
priority = 0;
num_entries = 1;
num_groups = 1;
+ break;
+ default:
+ break;
}
if (!ns)
@@ -954,7 +953,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!flow_is_multicast_only(flow_attr))
set_underlay_qp(dev, spec, underlay_qpn);
- if (dev->is_rep) {
+ if (dev->is_rep && flow_attr->type != IB_FLOW_ATTR_SNIFFER) {
struct mlx5_eswitch_rep *rep;
rep = dev->port[flow_attr->port - 1].rep;
@@ -1116,6 +1115,7 @@ static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
int err;
static const struct ib_flow_attr flow_attr = {
.num_of_specs = 0,
+ .type = IB_FLOW_ATTR_SNIFFER,
.size = sizeof(flow_attr)
};
@@ -1143,10 +1143,8 @@ err:
return ERR_PTR(err);
}
-
static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
- int domain,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
@@ -1162,8 +1160,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
int underlay_qpn;
if (udata && udata->inlen) {
- min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
- sizeof(ucmd_hdr.reserved);
+ min_ucmd_sz = offsetofend(struct mlx5_ib_create_flow, reserved);
if (udata->inlen < min_ucmd_sz)
return ERR_PTR(-EOPNOTSUPP);
@@ -1197,10 +1194,9 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
goto free_ucmd;
}
- if (domain != IB_FLOW_DOMAIN_USER ||
- flow_attr->port > dev->num_ports ||
- (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
- IB_FLOW_ATTR_FLAGS_EGRESS))) {
+ if (flow_attr->port > dev->num_ports ||
+ (flow_attr->flags &
+ ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) {
err = -EINVAL;
goto free_ucmd;
}
@@ -1245,19 +1241,22 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
dst->tir_num = mqp->raw_packet_qp.rq.tirn;
}
- if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
+ switch (flow_attr->type) {
+ case IB_FLOW_ATTR_NORMAL:
underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ?
mqp->underlay_qpn :
0;
handler = _create_flow_rule(dev, ft_prio, flow_attr, dst,
underlay_qpn, ucmd);
- } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
- flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
- handler = create_leftovers_rule(dev, ft_prio, flow_attr,
- dst);
- } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
+ break;
+ case IB_FLOW_ATTR_ALL_DEFAULT:
+ case IB_FLOW_ATTR_MC_DEFAULT:
+ handler = create_leftovers_rule(dev, ft_prio, flow_attr, dst);
+ break;
+ case IB_FLOW_ATTR_SNIFFER:
handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
- } else {
+ break;
+ default:
err = -EINVAL;
goto destroy_ft;
}
@@ -1305,39 +1304,47 @@ _get_flow_table(struct mlx5_ib_dev *dev,
esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
- max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
- log_max_ft_size));
+ switch (fs_matcher->ns_type) {
+ case MLX5_FLOW_NAMESPACE_BYPASS:
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size));
if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
reformat_l3_tunnel_to_l2) &&
!esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
+ break;
+ case MLX5_FLOW_NAMESPACE_EGRESS:
max_table_size = BIT(
MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
- if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap)
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) &&
+ !esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ break;
+ case MLX5_FLOW_NAMESPACE_FDB:
max_table_size = BIT(
MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
- if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) &&
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev,
+ reformat_l3_tunnel_to_l2) &&
esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
priority = FDB_BYPASS_PATH;
- } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
- max_table_size =
- BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
- log_max_ft_size));
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX:
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, log_max_ft_size));
priority = fs_matcher->priority;
- } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
- max_table_size =
- BIT(MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
- log_max_ft_size));
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, log_max_ft_size));
priority = fs_matcher->priority;
+ break;
+ default:
+ break;
}
max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
@@ -1346,16 +1353,24 @@ _get_flow_table(struct mlx5_ib_dev *dev,
if (!ns)
return ERR_PTR(-EOPNOTSUPP);
- if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
+ switch (fs_matcher->ns_type) {
+ case MLX5_FLOW_NAMESPACE_BYPASS:
prio = &dev->flow_db->prios[priority];
- else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
+ break;
+ case MLX5_FLOW_NAMESPACE_EGRESS:
prio = &dev->flow_db->egress_prios[priority];
- else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ break;
+ case MLX5_FLOW_NAMESPACE_FDB:
prio = &dev->flow_db->fdb;
- else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX)
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX:
prio = &dev->flow_db->rdma_rx[priority];
- else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX)
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
prio = &dev->flow_db->rdma_tx[priority];
+ break;
+ default: return ERR_PTR(-EINVAL);
+ }
if (!prio)
return ERR_PTR(-EINVAL);
@@ -1488,20 +1503,25 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
goto unlock;
}
- if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
+ switch (dest_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
dst[dst_num].type = dest_type;
dst[dst_num++].tir_num = dest_id;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
dst[dst_num++].ft_num = dest_id;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_PORT:
dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ break;
+ default:
+ break;
}
-
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dst[dst_num].counter_id = counter_id;
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 40d418153891..7fcad9135276 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -35,44 +35,19 @@
struct mlx5_ib_gsi_wr {
struct ib_cqe cqe;
struct ib_wc wc;
- int send_flags;
bool completed:1;
};
-struct mlx5_ib_gsi_qp {
- struct ib_qp ibqp;
- struct ib_qp *rx_qp;
- u8 port_num;
- struct ib_qp_cap cap;
- enum ib_sig_type sq_sig_type;
- /* Serialize qp state modifications */
- struct mutex mutex;
- struct ib_cq *cq;
- struct mlx5_ib_gsi_wr *outstanding_wrs;
- u32 outstanding_pi, outstanding_ci;
- int num_qps;
- /* Protects access to the tx_qps. Post send operations synchronize
- * with tx_qp creation in setup_qp(). Also protects the
- * outstanding_wrs array and indices.
- */
- spinlock_t lock;
- struct ib_qp **tx_qps;
-};
-
-static struct mlx5_ib_gsi_qp *gsi_qp(struct ib_qp *qp)
-{
- return container_of(qp, struct mlx5_ib_gsi_qp, ibqp);
-}
-
static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
{
return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
}
/* Call with gsi->lock locked */
-static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
+static void generate_completions(struct mlx5_ib_qp *mqp)
{
- struct ib_cq *gsi_cq = gsi->ibqp.send_cq;
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ struct ib_cq *gsi_cq = mqp->ibqp.send_cq;
struct mlx5_ib_gsi_wr *wr;
u32 index;
@@ -83,10 +58,7 @@ static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
if (!wr->completed)
break;
- if (gsi->sq_sig_type == IB_SIGNAL_ALL_WR ||
- wr->send_flags & IB_SEND_SIGNALED)
- WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
-
+ WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
wr->completed = false;
}
@@ -98,6 +70,7 @@ static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
struct mlx5_ib_gsi_qp *gsi = cq->cq_context;
struct mlx5_ib_gsi_wr *wr =
container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe);
+ struct mlx5_ib_qp *mqp = container_of(gsi, struct mlx5_ib_qp, gsi);
u64 wr_id;
unsigned long flags;
@@ -106,19 +79,19 @@ static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
wr_id = wr->wc.wr_id;
wr->wc = *wc;
wr->wc.wr_id = wr_id;
- wr->wc.qp = &gsi->ibqp;
+ wr->wc.qp = &mqp->ibqp;
- generate_completions(gsi);
+ generate_completions(mqp);
spin_unlock_irqrestore(&gsi->lock, flags);
}
-struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr)
+int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
+ struct ib_qp_init_attr *attr)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_gsi_qp *gsi;
- struct ib_qp_init_attr hw_init_attr = *init_attr;
- const u8 port_num = init_attr->port_num;
+ struct ib_qp_init_attr hw_init_attr = *attr;
+ const u8 port_num = attr->port_num;
int num_qps = 0;
int ret;
@@ -130,26 +103,19 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
num_qps = MLX5_MAX_PORTS;
}
- gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
- if (!gsi)
- return ERR_PTR(-ENOMEM);
-
+ gsi = &mqp->gsi;
gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
- if (!gsi->tx_qps) {
- ret = -ENOMEM;
- goto err_free;
- }
+ if (!gsi->tx_qps)
+ return -ENOMEM;
- gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr,
- sizeof(*gsi->outstanding_wrs),
- GFP_KERNEL);
+ gsi->outstanding_wrs =
+ kcalloc(attr->cap.max_send_wr, sizeof(*gsi->outstanding_wrs),
+ GFP_KERNEL);
if (!gsi->outstanding_wrs) {
ret = -ENOMEM;
goto err_free_tx;
}
- mutex_init(&gsi->mutex);
-
mutex_lock(&dev->devr.mutex);
if (dev->devr.ports[port_num - 1].gsi) {
@@ -161,12 +127,10 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
gsi->num_qps = num_qps;
spin_lock_init(&gsi->lock);
- gsi->cap = init_attr->cap;
- gsi->sq_sig_type = init_attr->sq_sig_type;
- gsi->ibqp.qp_num = 1;
+ gsi->cap = attr->cap;
gsi->port_num = port_num;
- gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0,
+ gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0,
IB_POLL_SOFTIRQ);
if (IS_ERR(gsi->cq)) {
mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
@@ -182,19 +146,31 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
hw_init_attr.cap.max_send_sge = 0;
hw_init_attr.cap.max_inline_data = 0;
}
- gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
+
+ gsi->rx_qp = mlx5_ib_create_qp(pd, &hw_init_attr, NULL);
if (IS_ERR(gsi->rx_qp)) {
mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
PTR_ERR(gsi->rx_qp));
ret = PTR_ERR(gsi->rx_qp);
goto err_destroy_cq;
}
+ gsi->rx_qp->device = pd->device;
+ gsi->rx_qp->pd = pd;
+ gsi->rx_qp->real_qp = gsi->rx_qp;
+
+ gsi->rx_qp->qp_type = hw_init_attr.qp_type;
+ gsi->rx_qp->send_cq = hw_init_attr.send_cq;
+ gsi->rx_qp->recv_cq = hw_init_attr.recv_cq;
+ gsi->rx_qp->event_handler = hw_init_attr.event_handler;
+ spin_lock_init(&gsi->rx_qp->mr_lock);
+ INIT_LIST_HEAD(&gsi->rx_qp->rdma_mrs);
+ INIT_LIST_HEAD(&gsi->rx_qp->sig_mrs);
- dev->devr.ports[init_attr->port_num - 1].gsi = gsi;
+ dev->devr.ports[attr->port_num - 1].gsi = gsi;
mutex_unlock(&dev->devr.mutex);
- return &gsi->ibqp;
+ return 0;
err_destroy_cq:
ib_free_cq(gsi->cq);
@@ -203,23 +179,19 @@ err_free_wrs:
kfree(gsi->outstanding_wrs);
err_free_tx:
kfree(gsi->tx_qps);
-err_free:
- kfree(gsi);
- return ERR_PTR(ret);
+ return ret;
}
-int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp)
+int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp)
{
- struct mlx5_ib_dev *dev = to_mdev(qp->device);
- struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
const int port_num = gsi->port_num;
int qp_index;
int ret;
- mlx5_ib_dbg(dev, "destroying GSI QP\n");
-
mutex_lock(&dev->devr.mutex);
- ret = ib_destroy_qp(gsi->rx_qp);
+ ret = mlx5_ib_destroy_qp(gsi->rx_qp, NULL);
if (ret) {
mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n",
ret);
@@ -241,7 +213,7 @@ int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp)
kfree(gsi->outstanding_wrs);
kfree(gsi->tx_qps);
- kfree(gsi);
+ kfree(mqp);
return 0;
}
@@ -259,7 +231,6 @@ static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
.max_send_sge = gsi->cap.max_send_sge,
.max_inline_data = gsi->cap.max_inline_data,
},
- .sq_sig_type = gsi->sq_sig_type,
.qp_type = IB_QPT_UD,
.create_flags = MLX5_IB_QP_CREATE_SQPN_QP1,
};
@@ -370,56 +341,54 @@ err_destroy_qp:
static void setup_qps(struct mlx5_ib_gsi_qp *gsi)
{
+ struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
u16 qp_index;
+ mutex_lock(&dev->devr.mutex);
for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
setup_qp(gsi, qp_index);
+ mutex_unlock(&dev->devr.mutex);
}
int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int attr_mask)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
- struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
int ret;
mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state);
- mutex_lock(&gsi->mutex);
ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask);
if (ret) {
mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret);
- goto unlock;
+ return ret;
}
if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS)
setup_qps(gsi);
-
-unlock:
- mutex_unlock(&gsi->mutex);
-
- return ret;
+ return 0;
}
int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr)
{
- struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
int ret;
- mutex_lock(&gsi->mutex);
ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr);
qp_init_attr->cap = gsi->cap;
- mutex_unlock(&gsi->mutex);
-
return ret;
}
/* Call with gsi->lock locked */
-static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
+static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_qp *mqp,
struct ib_ud_wr *wr, struct ib_wc *wc)
{
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
struct mlx5_ib_gsi_wr *gsi_wr;
@@ -448,22 +417,21 @@ static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
}
/* Call with gsi->lock locked */
-static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi,
- struct ib_ud_wr *wr)
+static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_qp *mqp, struct ib_ud_wr *wr)
{
struct ib_wc wc = {
{ .wr_id = wr->wr.wr_id },
.status = IB_WC_SUCCESS,
.opcode = IB_WC_SEND,
- .qp = &gsi->ibqp,
+ .qp = &mqp->ibqp,
};
int ret;
- ret = mlx5_ib_add_outstanding_wr(gsi, wr, &wc);
+ ret = mlx5_ib_add_outstanding_wr(mqp, wr, &wc);
if (ret)
return ret;
- generate_completions(gsi);
+ generate_completions(mqp);
return 0;
}
@@ -490,7 +458,8 @@ static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
- struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
struct ib_qp *tx_qp;
unsigned long flags;
int ret;
@@ -503,14 +472,14 @@ int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
spin_lock_irqsave(&gsi->lock, flags);
tx_qp = get_tx_qp(gsi, &cur_wr);
if (!tx_qp) {
- ret = mlx5_ib_gsi_silent_drop(gsi, &cur_wr);
+ ret = mlx5_ib_gsi_silent_drop(mqp, &cur_wr);
if (ret)
goto err;
spin_unlock_irqrestore(&gsi->lock, flags);
continue;
}
- ret = mlx5_ib_add_outstanding_wr(gsi, &cur_wr, NULL);
+ ret = mlx5_ib_add_outstanding_wr(mqp, &cur_wr, NULL);
if (ret)
goto err;
@@ -534,7 +503,8 @@ err:
int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
- struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
return ib_post_recv(gsi->rx_qp, wr, bad_wr);
}
@@ -544,7 +514,5 @@ void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi)
if (!gsi)
return;
- mutex_lock(&gsi->mutex);
setup_qps(gsi);
- mutex_unlock(&gsi->mutex);
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d60d63221b14..246e3cbe0b2c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -326,8 +326,8 @@ out:
spin_unlock(&port->mp.mpi_lock);
}
-static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
- u8 *active_width)
+static int translate_eth_legacy_proto_oper(u32 eth_proto_oper,
+ u16 *active_speed, u8 *active_width)
{
switch (eth_proto_oper) {
case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
@@ -384,7 +384,7 @@ static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
return 0;
}
-static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
+static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
u8 *active_width)
{
switch (eth_proto_oper) {
@@ -436,7 +436,7 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
return 0;
}
-static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
+static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
u8 *active_width, bool ext)
{
return ext ?
@@ -546,7 +546,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
- enum ib_gid_type gid_type = IB_GID_TYPE_IB;
+ enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
u16 vlan_id = 0xffff;
u8 roce_version = 0;
u8 roce_l3_type = 0;
@@ -561,7 +561,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
}
switch (gid_type) {
- case IB_GID_TYPE_IB:
+ case IB_GID_TYPE_ROCE:
roce_version = MLX5_ROCE_VERSION_1;
break;
case IB_GID_TYPE_ROCE_UDP_ENCAP:
@@ -840,7 +840,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
/* We support 'Gappy' memory registration too */
props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
}
- props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ /* IB_WR_REG_MR always requires changing the entity size with UMR */
+ if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
if (MLX5_CAP_GEN(mdev, sho)) {
props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
/* At this stage no support for signature handover */
@@ -1175,32 +1177,24 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
return 0;
}
-enum mlx5_ib_width {
- MLX5_IB_WIDTH_1X = 1 << 0,
- MLX5_IB_WIDTH_2X = 1 << 1,
- MLX5_IB_WIDTH_4X = 1 << 2,
- MLX5_IB_WIDTH_8X = 1 << 3,
- MLX5_IB_WIDTH_12X = 1 << 4
-};
-
-static void translate_active_width(struct ib_device *ibdev, u8 active_width,
- u8 *ib_width)
+static void translate_active_width(struct ib_device *ibdev, u16 active_width,
+ u8 *ib_width)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- if (active_width & MLX5_IB_WIDTH_1X)
+ if (active_width & MLX5_PTYS_WIDTH_1X)
*ib_width = IB_WIDTH_1X;
- else if (active_width & MLX5_IB_WIDTH_2X)
+ else if (active_width & MLX5_PTYS_WIDTH_2X)
*ib_width = IB_WIDTH_2X;
- else if (active_width & MLX5_IB_WIDTH_4X)
+ else if (active_width & MLX5_PTYS_WIDTH_4X)
*ib_width = IB_WIDTH_4X;
- else if (active_width & MLX5_IB_WIDTH_8X)
+ else if (active_width & MLX5_PTYS_WIDTH_8X)
*ib_width = IB_WIDTH_8X;
- else if (active_width & MLX5_IB_WIDTH_12X)
+ else if (active_width & MLX5_PTYS_WIDTH_12X)
*ib_width = IB_WIDTH_12X;
else {
mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
- (int)active_width);
+ active_width);
*ib_width = IB_WIDTH_4X;
}
@@ -1277,7 +1271,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
u16 max_mtu;
u16 oper_mtu;
int err;
- u8 ib_link_width_oper;
+ u16 ib_link_width_oper;
u8 vl_hw_cap;
rep = kzalloc(sizeof(*rep), GFP_KERNEL);
@@ -1310,16 +1304,13 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
props->port_cap_flags2 = rep->cap_mask2;
- err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
+ err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
+ &props->active_speed, port);
if (err)
goto out;
translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
- err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
- if (err)
- goto out;
-
mlx5_query_port_max_mtu(mdev, &max_mtu, port);
props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
@@ -2354,7 +2345,9 @@ static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
return -EPERM;
if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner)))
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner_v2)))
return -EOPNOTSUPP;
break;
}
@@ -2569,12 +2562,12 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
return 0;
}
-static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+static int mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd);
- mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
+ return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
}
static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
@@ -2699,9 +2692,7 @@ static void pkey_change_handler(struct work_struct *work)
container_of(work, struct mlx5_ib_port_resources,
pkey_change_work);
- mutex_lock(&ports->devr->mutex);
mlx5_ib_gsi_pkey_change(ports->gsi);
- mutex_unlock(&ports->devr->mutex);
}
static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
@@ -3127,11 +3118,9 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s1->usecnt, 0);
- for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
+ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
INIT_WORK(&devr->ports[port].pkey_change_work,
pkey_change_handler);
- devr->ports[port].devr = devr;
- }
return 0;
@@ -3316,7 +3305,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
int err;
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
+ err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
+ &dev->port[port_num].roce.nb);
if (err) {
dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
@@ -3328,7 +3318,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
- unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
+ unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
+ &dev->port[port_num].roce.nb);
dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
@@ -4098,6 +4089,8 @@ static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
.alloc_mw = mlx5_ib_alloc_mw,
.dealloc_mw = mlx5_ib_dealloc_mw,
+
+ INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw),
};
static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
@@ -4268,6 +4261,9 @@ static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
.destroy_wq = mlx5_ib_destroy_wq,
.get_netdev = mlx5_ib_get_netdev,
.modify_wq = mlx5_ib_modify_wq,
+
+ INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
+ ib_rwq_ind_tbl),
};
static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
@@ -4386,7 +4382,7 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
name = "mlx5_%d";
else
name = "mlx5_bond_%d";
- return ib_register_device(&dev->ib_dev, name);
+ return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
}
static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index c19ec9fd8a63..13de3d2edd34 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -169,8 +169,8 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, __be64 *pas, int access_flags)
{
return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
- ib_umem_num_pages(umem), pas,
- access_flags);
+ ib_umem_num_dma_blocks(umem, PAGE_SIZE),
+ pas, access_flags);
}
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
{
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 5287fc868662..b1f2b34e5955 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -384,6 +384,22 @@ struct mlx5_ib_dct {
u32 *in;
};
+struct mlx5_ib_gsi_qp {
+ struct ib_qp *rx_qp;
+ u8 port_num;
+ struct ib_qp_cap cap;
+ struct ib_cq *cq;
+ struct mlx5_ib_gsi_wr *outstanding_wrs;
+ u32 outstanding_pi, outstanding_ci;
+ int num_qps;
+ /* Protects access to the tx_qps. Post send operations synchronize
+ * with tx_qp creation in setup_qp(). Also protects the
+ * outstanding_wrs array and indices.
+ */
+ spinlock_t lock;
+ struct ib_qp **tx_qps;
+};
+
struct mlx5_ib_qp {
struct ib_qp ibqp;
union {
@@ -391,6 +407,7 @@ struct mlx5_ib_qp {
struct mlx5_ib_raw_packet_qp raw_packet_qp;
struct mlx5_ib_rss_qp rss_qp;
struct mlx5_ib_dct dct;
+ struct mlx5_ib_gsi_qp gsi;
};
struct mlx5_frag_buf buf;
@@ -693,10 +710,7 @@ struct mlx5_mr_cache {
unsigned long last_add;
};
-struct mlx5_ib_gsi_qp;
-
struct mlx5_ib_port_resources {
- struct mlx5_ib_resources *devr;
struct mlx5_ib_gsi_qp *gsi;
struct work_struct pkey_change_work;
};
@@ -1119,13 +1133,16 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
-void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
+static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
+{
+ return 0;
+}
int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
-void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
@@ -1148,7 +1165,7 @@ int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
size_t buflen, size_t *bc);
int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
@@ -1163,8 +1180,7 @@ int mlx5_ib_advise_mr(struct ib_pd *pd,
struct ib_sge *sg_list,
u32 num_sge,
struct uverbs_attr_bundle *attrs);
-struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata);
+int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int mlx5_ib_dealloc_mw(struct ib_mw *mw);
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
@@ -1193,7 +1209,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad *in, struct ib_mad *out,
size_t *out_mad_size, u16 *out_mad_pkey_index);
int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
-void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
+int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
@@ -1229,7 +1245,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
- unsigned int entry);
+ unsigned int entry, int access_flags);
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
@@ -1238,12 +1254,12 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
-void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
+int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata);
-struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata);
+int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
struct ib_ucontext *context,
@@ -1267,6 +1283,7 @@ void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags, struct ib_sge *sg_list, u32 num_sge);
+int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
@@ -1288,6 +1305,10 @@ mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
{
return -EOPNOTSUPP;
}
+static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
@@ -1318,9 +1339,9 @@ void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
/* GSI QP helper functions */
-struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr);
-int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
+int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
+ struct ib_qp_init_attr *attr);
+int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
int attr_mask);
int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
@@ -1358,7 +1379,7 @@ static inline void init_query_mad(struct ib_smp *mad)
static inline int is_qp1(enum ib_qp_type qp_type)
{
- return qp_type == MLX5_IB_QPT_HW_GSI;
+ return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
}
#define MLX5_MAX_UMR_SHIFT 16
@@ -1442,25 +1463,54 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi, u32 bfregn,
bool dyn_bfreg);
-static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
- bool do_modify_atomic, int access_flags)
+static inline bool mlx5_ib_can_load_pas_with_umr(struct mlx5_ib_dev *dev,
+ size_t length)
{
+ /*
+ * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
+ * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
+ * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
+ * can never be enabled without this capability. Simplify this weird
+ * quirky hardware by just saying it can't use PAS lists with UMR at
+ * all.
+ */
if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
return false;
- if (do_modify_atomic &&
+ /*
+ * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
+ * used.
+ */
+ if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
+ length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
+ return false;
+ return true;
+}
+
+/*
+ * true if an existing MR can be reconfigured to new access_flags using UMR.
+ * Older HW cannot use UMR to update certain elements of the MKC. See
+ * umr_check_mkey_mask(), get_umr_update_access_mask() and umr_check_mkey_mask()
+ */
+static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
+ unsigned int current_access_flags,
+ unsigned int target_access_flags)
+{
+ unsigned int diffs = current_access_flags ^ target_access_flags;
+
+ if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
MLX5_CAP_GEN(dev->mdev, atomic) &&
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
return false;
- if (access_flags & IB_ACCESS_RELAXED_ORDERING &&
+ if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
return false;
- if (access_flags & IB_ACCESS_RELAXED_ORDERING &&
- MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
- !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
+ if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
+ MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
return false;
return true;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3e6f2f9c6655..b261797b258f 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -50,6 +50,29 @@ enum {
static void
create_mkey_callback(int status, struct mlx5_async_work *context);
+static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
+ struct ib_pd *pd)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+
+ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+ MLX5_SET(mkc, mkc, relaxed_ordering_write,
+ !!(acc & IB_ACCESS_RELAXED_ORDERING));
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+ MLX5_SET(mkc, mkc, relaxed_ordering_read,
+ !!(acc & IB_ACCESS_RELAXED_ORDERING));
+
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET64(mkc, mkc, start_addr, start_addr);
+}
+
static void
assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
u32 *in)
@@ -100,7 +123,8 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
-static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
+static inline bool mlx5_ib_pas_fits_in_mr(struct mlx5_ib_mr *mr, u64 start,
+ u64 length)
{
return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
@@ -152,12 +176,12 @@ static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
mr->cache_ent = ent;
mr->dev = ent->dev;
+ set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
MLX5_SET(mkc, mkc, log_page_size, ent->page);
return mr;
@@ -534,7 +558,7 @@ static void cache_work_func(struct work_struct *work)
/* Allocate a special entry from the cache */
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
- unsigned int entry)
+ unsigned int entry, int access_flags)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
@@ -544,6 +568,10 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
entry >= ARRAY_SIZE(cache->ent)))
return ERR_PTR(-EINVAL);
+ /* Matches access in alloc_cache_mr() */
+ if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
+ return ERR_PTR(-EOPNOTSUPP);
+
ent = &cache->ent[entry];
spin_lock_irq(&ent->lock);
if (list_empty(&ent->head)) {
@@ -558,6 +586,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
}
+ mr->access_flags = access_flags;
return mr;
}
@@ -730,8 +759,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
MLX5_IB_UMR_OCTOWORD;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
- !dev->is_rep &&
- mlx5_core_is_pf(dev->mdev))
+ !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
+ mlx5_ib_can_load_pas_with_umr(dev, 0))
ent->limit = dev->mdev->profile->mr_cache[i].limit;
else
ent->limit = 0;
@@ -774,29 +803,6 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
return 0;
}
-static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
- struct ib_pd *pd)
-{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
-
- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
- MLX5_SET(mkc, mkc, lr, 1);
-
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
- MLX5_SET(mkc, mkc, relaxed_ordering_write,
- !!(acc & IB_ACCESS_RELAXED_ORDERING));
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
- MLX5_SET(mkc, mkc, relaxed_ordering_read,
- !!(acc & IB_ACCESS_RELAXED_ORDERING));
-
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET64(mkc, mkc, start_addr, start_addr);
-}
-
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -979,6 +985,11 @@ alloc_mr_from_cache(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr,
if (!ent)
return ERR_PTR(-E2BIG);
+
+ /* Matches access in alloc_cache_mr() */
+ if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
+ return ERR_PTR(-EOPNOTSUPP);
+
mr = get_cache_mr(ent);
if (!mr) {
mr = create_cache_mr(ent);
@@ -1181,38 +1192,31 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
goto err_1;
}
pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
+ if (populate) {
+ if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) {
+ err = -EINVAL;
+ goto err_2;
+ }
mlx5_ib_populate_pas(dev, umem, page_shift, pas,
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
+ }
/* The pg_access bit allows setting the access flags
* in the page list submitted with the command. */
MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr,
+ populate ? pd : dev->umrc.pd);
MLX5_SET(mkc, mkc, free, !populate);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
- MLX5_SET(mkc, mkc, relaxed_ordering_write,
- !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
- MLX5_SET(mkc, mkc, relaxed_ordering_read,
- !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
- MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
- MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
- MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
- MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
- MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
- MLX5_SET64(mkc, mkc, start_addr, virt_addr);
MLX5_SET64(mkc, mkc, len, length);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
MLX5_SET(mkc, mkc, translations_octword_size,
get_octo_len(virt_addr, length, page_shift));
MLX5_SET(mkc, mkc, log_page_size, page_shift);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
if (populate) {
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
get_octo_len(virt_addr, length, page_shift));
@@ -1308,7 +1312,8 @@ int mlx5_ib_advise_mr(struct ib_pd *pd,
struct uverbs_attr_bundle *attrs)
{
if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
- advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE)
+ advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
return -EOPNOTSUPP;
return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
@@ -1353,7 +1358,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
- bool use_umr;
+ bool xlt_with_umr;
struct ib_umem *umem;
int page_shift;
int npages;
@@ -1367,6 +1372,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
+ xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, length);
+ /* ODP requires xlt update via umr to work. */
+ if (!xlt_with_umr && (access_flags & IB_ACCESS_ON_DEMAND))
+ return ERR_PTR(-EINVAL);
+
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
length == U64_MAX) {
if (virt_addr != start)
@@ -1387,28 +1397,17 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (err < 0)
return ERR_PTR(err);
- use_umr = mlx5_ib_can_use_umr(dev, true, access_flags);
-
- if (order <= mr_cache_max_order(dev) && use_umr) {
+ if (xlt_with_umr) {
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
page_shift, order, access_flags);
- if (PTR_ERR(mr) == -EAGAIN) {
- mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
+ if (IS_ERR(mr))
mr = NULL;
- }
- } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
- if (access_flags & IB_ACCESS_ON_DEMAND) {
- err = -EINVAL;
- pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
- goto error;
- }
- use_umr = false;
}
if (!mr) {
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
- page_shift, access_flags, !use_umr);
+ page_shift, access_flags, !xlt_with_umr);
mutex_unlock(&dev->slow_path_mutex);
}
@@ -1422,15 +1421,16 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->umem = umem;
set_mr_fields(dev, mr, npages, length, access_flags);
- if (use_umr) {
+ if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) {
+ /*
+ * If the MR was created with reg_create then it will be
+ * configured properly but left disabled. It is safe to go ahead
+ * and configure it again via UMR while enabling it.
+ */
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
- if (access_flags & IB_ACCESS_ON_DEMAND)
- update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
-
err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
update_xlt_flags);
-
if (err) {
dereg_mr(dev, mr);
return ERR_PTR(err);
@@ -1448,6 +1448,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
dereg_mr(dev, mr);
return ERR_PTR(err);
}
+
+ err = mlx5_ib_init_odp_mr(mr, xlt_with_umr);
+ if (err) {
+ dereg_mr(dev, mr);
+ return ERR_PTR(err);
+ }
}
return &mr->ibmr;
@@ -1555,8 +1561,11 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
goto err;
}
- if (!mlx5_ib_can_use_umr(dev, true, access_flags) ||
- (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
+ if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags,
+ access_flags) ||
+ !mlx5_ib_can_load_pas_with_umr(dev, len) ||
+ (flags & IB_MR_REREG_TRANS &&
+ !mlx5_ib_pas_fits_in_mr(mr, addr, len))) {
/*
* UMR can't be used - MKey needs to be replaced.
*/
@@ -1727,9 +1736,9 @@ static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ /* This is only used from the kernel, so setting the PD is OK. */
+ set_mkc_access_pd_addr_fields(mkc, 0, 0, pd);
MLX5_SET(mkc, mkc, free, 1);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
@@ -1973,12 +1982,11 @@ struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
max_num_meta_sg);
}
-struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
+int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- struct mlx5_ib_mw *mw = NULL;
+ struct mlx5_ib_mw *mw = to_mmw(ibmw);
u32 *in = NULL;
void *mkc;
int ndescs;
@@ -1991,21 +1999,20 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
if (err)
- return ERR_PTR(err);
+ return err;
if (req.comp_mask || req.reserved1 || req.reserved2)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (udata->inlen > sizeof(req) &&
!ib_is_udata_cleared(udata, sizeof(req),
udata->inlen - sizeof(req)))
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
- mw = kzalloc(sizeof(*mw), GFP_KERNEL);
in = kzalloc(inlen, GFP_KERNEL);
- if (!mw || !in) {
+ if (!in) {
err = -ENOMEM;
goto free;
}
@@ -2014,11 +2021,11 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
- MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
+ MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
MLX5_SET(mkc, mkc, qpn, 0xffffff);
err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
@@ -2026,17 +2033,15 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
goto free;
mw->mmkey.type = MLX5_MKEY_MW;
- mw->ibmw.rkey = mw->mmkey.key;
+ ibmw->rkey = mw->mmkey.key;
mw->ndescs = ndescs;
- resp.response_length = min(offsetof(typeof(resp), response_length) +
- sizeof(resp.response_length), udata->outlen);
+ resp.response_length =
+ min(offsetofend(typeof(resp), response_length), udata->outlen);
if (resp.response_length) {
err = ib_copy_to_udata(udata, &resp, resp.response_length);
- if (err) {
- mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
- goto free;
- }
+ if (err)
+ goto free_mkey;
}
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
@@ -2048,21 +2053,19 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
}
kfree(in);
- return &mw->ibmw;
+ return 0;
free_mkey:
mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
free:
- kfree(mw);
kfree(in);
- return ERR_PTR(err);
+ return err;
}
int mlx5_ib_dealloc_mw(struct ib_mw *mw)
{
struct mlx5_ib_dev *dev = to_mdev(mw->device);
struct mlx5_ib_mw *mmw = to_mmw(mw);
- int err;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
@@ -2073,11 +2076,7 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
synchronize_srcu(&dev->odp_srcu);
}
- err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
- if (err)
- return err;
- kfree(mmw);
- return 0;
+ return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
}
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index cfd7efab114e..5c853ec1b0d8 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -382,7 +382,7 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
memset(caps, 0, sizeof(*caps));
if (!MLX5_CAP_GEN(dev->mdev, pg) ||
- !mlx5_ib_can_use_umr(dev, true, 0))
+ !mlx5_ib_can_load_pas_with_umr(dev, 0))
return;
caps->general_caps = IB_ODP_SUPPORT;
@@ -476,12 +476,12 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
if (IS_ERR(odp))
return ERR_CAST(odp);
- ret = mr = mlx5_mr_cache_alloc(imr->dev, MLX5_IMR_MTT_CACHE_ENTRY);
+ ret = mr = mlx5_mr_cache_alloc(imr->dev, MLX5_IMR_MTT_CACHE_ENTRY,
+ imr->access_flags);
if (IS_ERR(mr))
goto out_umem;
mr->ibmr.pd = imr->ibmr.pd;
- mr->access_flags = imr->access_flags;
mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
@@ -540,14 +540,13 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
- imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY);
+ imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY, access_flags);
if (IS_ERR(imr)) {
err = PTR_ERR(imr);
goto out_umem;
}
imr->ibmr.pd = &pd->ibpd;
- imr->access_flags = access_flags;
imr->mmkey.iova = 0;
imr->umem = &umem_odp->umem;
imr->ibmr.lkey = imr->mmkey.key;
@@ -666,15 +665,21 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
}
#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
+#define MLX5_PF_FLAGS_SNAPSHOT BIT(2)
+#define MLX5_PF_FLAGS_ENABLE BIT(3)
static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
u64 user_va, size_t bcnt, u32 *bytes_mapped,
u32 flags)
{
int page_shift, ret, np;
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
- unsigned long current_seq;
u64 access_mask;
u64 start_idx;
+ bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
+ u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
+
+ if (flags & MLX5_PF_FLAGS_ENABLE)
+ xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
page_shift = odp->page_shift;
start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
@@ -683,25 +688,15 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
if (odp->umem.writable && !downgrade)
access_mask |= ODP_WRITE_ALLOWED_BIT;
- current_seq = mmu_interval_read_begin(&odp->notifier);
-
- np = ib_umem_odp_map_dma_pages(odp, user_va, bcnt, access_mask,
- current_seq);
+ np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
if (np < 0)
return np;
- mutex_lock(&odp->umem_mutex);
- if (!mmu_interval_read_retry(&odp->notifier, current_seq)) {
- /*
- * No need to check whether the MTTs really belong to
- * this MR, since ib_umem_odp_map_dma_pages already
- * checks this.
- */
- ret = mlx5_ib_update_xlt(mr, start_idx, np,
- page_shift, MLX5_IB_UPD_XLT_ATOMIC);
- } else {
- ret = -EAGAIN;
- }
+ /*
+ * No need to check whether the MTTs really belong to this MR, since
+ * ib_umem_odp_map_dma_and_lock already checks this.
+ */
+ ret = mlx5_ib_update_xlt(mr, start_idx, np, page_shift, xlt_flags);
mutex_unlock(&odp->umem_mutex);
if (ret < 0) {
@@ -836,6 +831,20 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
flags);
}
+int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable)
+{
+ u32 flags = MLX5_PF_FLAGS_SNAPSHOT;
+ int ret;
+
+ if (enable)
+ flags |= MLX5_PF_FLAGS_ENABLE;
+
+ ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem),
+ mr->umem->address, mr->umem->length, NULL,
+ flags);
+ return ret >= 0 ? 0 : ret;
+}
+
struct pf_frame {
struct pf_frame *next;
u32 key;
@@ -1862,6 +1871,9 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
+ pf_flags |= MLX5_PF_FLAGS_SNAPSHOT;
+
if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
num_sge);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 5758dbe64045..600e056798c0 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1477,7 +1477,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
resp->tirn = rq->tirn;
resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
- if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) {
resp->tir_icm_addr = MLX5_GET(
create_tir_out, out, icm_address_31_0);
resp->tir_icm_addr |=
@@ -1739,7 +1740,8 @@ create_tir:
if (mucontext->devx_uid) {
params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
params->resp.tirn = qp->rss_qp.tirn;
- if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) {
params->resp.tir_icm_addr =
MLX5_GET(create_tir_out, out, icm_address_31_0);
params->resp.tir_icm_addr |=
@@ -2409,6 +2411,9 @@ static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 uidx = params->uidx;
void *dctc;
+ if (mlx5_lag_is_active(dev->mdev) && !MLX5_CAP_GEN(dev->mdev, lag_dct))
+ return -EOPNOTSUPP;
+
qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
if (!qp->dct.in)
return -ENOMEM;
@@ -2506,18 +2511,6 @@ static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EINVAL;
}
- switch (attr->qp_type) {
- case IB_QPT_SMI:
- case MLX5_IB_QPT_HW_GSI:
- case MLX5_IB_QPT_REG_UMR:
- case IB_QPT_GSI:
- mlx5_ib_dbg(dev, "Kernel doesn't support QP type %d\n",
- attr->qp_type);
- return -EINVAL;
- default:
- break;
- }
-
/*
* We don't need to see this warning, it means that kernel code
* missing ib_pd. Placed here to catch developer's mistakes.
@@ -2780,21 +2773,23 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto out;
}
- if (qp->type == MLX5_IB_QPT_DCT) {
+ switch (qp->type) {
+ case MLX5_IB_QPT_DCT:
err = create_dct(dev, pd, qp, params);
- goto out;
- }
-
- if (qp->type == IB_QPT_XRC_TGT) {
+ break;
+ case IB_QPT_XRC_TGT:
err = create_xrc_tgt_qp(dev, qp, params);
- goto out;
+ break;
+ case IB_QPT_GSI:
+ err = mlx5_ib_create_gsi(pd, qp, params->attr);
+ break;
+ default:
+ if (params->udata)
+ err = create_user_qp(dev, pd, qp, params);
+ else
+ err = create_kernel_qp(dev, pd, qp, params);
}
- if (params->udata)
- err = create_user_qp(dev, pd, qp, params);
- else
- err = create_kernel_qp(dev, pd, qp, params);
-
out:
if (err) {
mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type);
@@ -2934,9 +2929,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
if (err)
return ERR_PTR(err);
- if (attr->qp_type == IB_QPT_GSI)
- return mlx5_ib_gsi_create_qp(pd, attr);
-
params.udata = udata;
params.uidx = MLX5_IB_DEFAULT_UIDX;
params.attr = attr;
@@ -3005,9 +2997,14 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
return &qp->ibqp;
destroy_qp:
- if (qp->type == MLX5_IB_QPT_DCT) {
+ switch (qp->type) {
+ case MLX5_IB_QPT_DCT:
mlx5_ib_destroy_dct(qp);
- } else {
+ break;
+ case IB_QPT_GSI:
+ mlx5_ib_destroy_gsi(qp);
+ break;
+ default:
/*
* These lines below are temp solution till QP allocation
* will be moved to be under IB/core responsiblity.
@@ -3032,7 +3029,7 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
struct mlx5_ib_qp *mqp = to_mqp(qp);
if (unlikely(qp->qp_type == IB_QPT_GSI))
- return mlx5_ib_gsi_destroy_qp(qp);
+ return mlx5_ib_destroy_gsi(mqp);
if (mqp->type == MLX5_IB_QPT_DCT)
return mlx5_ib_destroy_dct(mqp);
@@ -3088,20 +3085,44 @@ enum {
MLX5_PATH_FLAG_COUNTER = 1 << 2,
};
+static int ib_to_mlx5_rate_map(u8 rate)
+{
+ switch (rate) {
+ case IB_RATE_PORT_CURRENT:
+ return 0;
+ case IB_RATE_56_GBPS:
+ return 1;
+ case IB_RATE_25_GBPS:
+ return 2;
+ case IB_RATE_100_GBPS:
+ return 3;
+ case IB_RATE_200_GBPS:
+ return 4;
+ case IB_RATE_50_GBPS:
+ return 5;
+ default:
+ return rate + MLX5_STAT_RATE_OFFSET;
+ };
+
+ return 0;
+}
+
static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{
+ u32 stat_rate_support;
+
if (rate == IB_RATE_PORT_CURRENT)
return 0;
if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS)
return -EINVAL;
+ stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support);
while (rate != IB_RATE_PORT_CURRENT &&
- !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
- MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
+ !(1 << ib_to_mlx5_rate_map(rate) & stat_rate_support))
--rate;
- return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
+ return ib_to_mlx5_rate_map(rate);
}
static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
@@ -3643,14 +3664,12 @@ static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
MLX5_MAX_PORTS + 1;
}
-static bool qp_supports_affinity(struct ib_qp *qp)
+static bool qp_supports_affinity(struct mlx5_ib_qp *qp)
{
- if ((qp->qp_type == IB_QPT_RC) ||
- (qp->qp_type == IB_QPT_UD) ||
- (qp->qp_type == IB_QPT_UC) ||
- (qp->qp_type == IB_QPT_RAW_PACKET) ||
- (qp->qp_type == IB_QPT_XRC_INI) ||
- (qp->qp_type == IB_QPT_XRC_TGT))
+ if ((qp->type == IB_QPT_RC) || (qp->type == IB_QPT_UD) ||
+ (qp->type == IB_QPT_UC) || (qp->type == IB_QPT_RAW_PACKET) ||
+ (qp->type == IB_QPT_XRC_INI) || (qp->type == IB_QPT_XRC_TGT) ||
+ (qp->type == MLX5_IB_QPT_DCI))
return true;
return false;
}
@@ -3668,7 +3687,7 @@ static unsigned int get_tx_affinity(struct ib_qp *qp,
unsigned int tx_affinity;
if (!(mlx5_ib_lag_should_assign_affinity(dev) &&
- qp_supports_affinity(qp)))
+ qp_supports_affinity(mqp)))
return 0;
if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
@@ -4161,7 +4180,11 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, rae, 1);
}
MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
- MLX5_SET(dctc, dctc, port, attr->port_num);
+ if (mlx5_lag_is_active(dev->mdev))
+ MLX5_SET(dctc, dctc, port,
+ get_tx_affinity_rr(dev, udata));
+ else
+ MLX5_SET(dctc, dctc, port, attr->port_num);
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
MLX5_SET(dctc, dctc, counter_set_id, set_id);
@@ -4716,12 +4739,12 @@ int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
}
-void mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
+int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
- mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
+ return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
}
static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
@@ -4921,8 +4944,8 @@ static int prepare_user_rq(struct ib_pd *pd,
int err;
size_t required_cmd_sz;
- required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes)
- + sizeof(ucmd.single_stride_log_num_of_bytes);
+ required_cmd_sz = offsetofend(struct mlx5_ib_create_wq,
+ single_stride_log_num_of_bytes);
if (udata->inlen < required_cmd_sz) {
mlx5_ib_dbg(dev, "invalid inlen\n");
return -EINVAL;
@@ -5006,7 +5029,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
if (!udata)
return ERR_PTR(-ENOSYS);
- min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+ min_resp_len = offsetofend(struct mlx5_ib_create_wq_resp, reserved);
if (udata->outlen && udata->outlen < min_resp_len)
return ERR_PTR(-EINVAL);
@@ -5036,8 +5059,8 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
rwq->ibwq.wq_num = rwq->core_qp.qpn;
rwq->ibwq.state = IB_WQS_RESET;
if (udata->outlen) {
- resp.response_length = offsetof(typeof(resp), response_length) +
- sizeof(resp.response_length);
+ resp.response_length = offsetofend(
+ struct mlx5_ib_create_wq_resp, response_length);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto err_copy;
@@ -5056,22 +5079,27 @@ err:
return ERR_PTR(err);
}
-void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
+int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
+ int ret;
- mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
+ ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
+ if (ret)
+ return ret;
destroy_user_rq(dev, wq->pd, rwq, udata);
kfree(rwq);
+ return 0;
}
-struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata)
+int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata)
{
- struct mlx5_ib_dev *dev = to_mdev(device);
- struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
+ struct mlx5_ib_rwq_ind_table *rwq_ind_tbl =
+ to_mrwq_ind_table(ib_rwq_ind_table);
+ struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_table->device);
int sz = 1 << init_attr->log_ind_tbl_size;
struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
size_t min_resp_len;
@@ -5084,30 +5112,25 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
if (udata->inlen > 0 &&
!ib_is_udata_cleared(udata, 0,
udata->inlen))
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (init_attr->log_ind_tbl_size >
MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
init_attr->log_ind_tbl_size,
MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+ min_resp_len =
+ offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, reserved);
if (udata->outlen && udata->outlen < min_resp_len)
- return ERR_PTR(-EINVAL);
-
- rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
- if (!rwq_ind_tbl)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in) {
- err = -ENOMEM;
- goto err;
- }
+ if (!in)
+ return -ENOMEM;
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
@@ -5122,26 +5145,24 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
kvfree(in);
-
if (err)
- goto err;
+ return err;
rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
if (udata->outlen) {
- resp.response_length = offsetof(typeof(resp), response_length) +
- sizeof(resp.response_length);
+ resp.response_length =
+ offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp,
+ response_length);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto err_copy;
}
- return &rwq_ind_tbl->ib_rwq_ind_tbl;
+ return 0;
err_copy:
mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
-err:
- kfree(rwq_ind_tbl);
- return ERR_PTR(err);
+ return err;
}
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
@@ -5149,10 +5170,7 @@ int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
- mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
-
- kfree(rwq_ind_tbl);
- return 0;
+ return mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
}
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
@@ -5169,7 +5187,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
void *rqc;
void *in;
- required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
+ required_cmd_sz = offsetofend(struct mlx5_ib_modify_wq, reserved);
if (udata->inlen < required_cmd_sz)
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
index ba899df44c5b..5d4e140db99c 100644
--- a/drivers/infiniband/hw/mlx5/qp.h
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -26,8 +26,8 @@ int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
-void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
- struct mlx5_core_qp *rq);
+int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *rq);
int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq);
void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index 7c3968ef9cd1..c683d7000168 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -576,11 +576,12 @@ err_destroy_rq:
return err;
}
-void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
- struct mlx5_core_qp *rq)
+int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *rq)
{
destroy_resource_common(dev, rq);
destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ return 0;
}
static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 7e10cbcb6d5c..e2f720eec1e1 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -389,24 +389,21 @@ out_box:
return ret;
}
-void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
+int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(srq->device);
struct mlx5_ib_srq *msrq = to_msrq(srq);
+ int ret;
+
+ ret = mlx5_cmd_destroy_srq(dev, &msrq->msrq);
+ if (ret)
+ return ret;
- mlx5_cmd_destroy_srq(dev, &msrq->msrq);
-
- if (srq->uobject) {
- mlx5_ib_db_unmap_user(
- rdma_udata_to_drv_context(
- udata,
- struct mlx5_ib_ucontext,
- ibucontext),
- &msrq->db);
- ib_umem_release(msrq->umem);
- } else {
+ if (udata)
+ destroy_srq_user(srq->pd, msrq, udata);
+ else
destroy_srq_kernel(dev, msrq);
- }
+ return 0;
}
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
diff --git a/drivers/infiniband/hw/mlx5/srq.h b/drivers/infiniband/hw/mlx5/srq.h
index af197c36d757..2c3627b2509d 100644
--- a/drivers/infiniband/hw/mlx5/srq.h
+++ b/drivers/infiniband/hw/mlx5/srq.h
@@ -56,7 +56,7 @@ struct mlx5_srq_table {
int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *in);
-void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
+int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out);
int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 37aaacebd3f2..db889ec3fd48 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -590,22 +590,32 @@ err_destroy_srq_split:
return err;
}
-void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *tmp;
int err;
- tmp = xa_erase_irq(&table->array, srq->srqn);
- if (!tmp || tmp != srq)
- return;
+ /* Delete entry, but leave index occupied */
+ tmp = xa_cmpxchg_irq(&table->array, srq->srqn, srq, XA_ZERO_ENTRY, 0);
+ if (WARN_ON(tmp != srq))
+ return xa_err(tmp) ?: -EINVAL;
err = destroy_srq_split(dev, srq);
- if (err)
- return;
+ if (err) {
+ /*
+ * We don't need to check returned result for an error,
+ * because we are storing in pre-allocated space xarray
+ * entry and it can't fail at this stage.
+ */
+ xa_cmpxchg_irq(&table->array, srq->srqn, XA_ZERO_ENTRY, srq, 0);
+ return err;
+ }
+ xa_erase_irq(&table->array, srq->srqn);
mlx5_core_res_put(&srq->common);
wait_for_completion(&srq->common.free);
+ return 0;
}
int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index 43880973a512..d6038fb6c50c 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -398,7 +398,8 @@ static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
seg->status = MLX5_MKEY_STATUS_FREE;
}
-static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
+static void set_reg_mkey_segment(struct mlx5_ib_dev *dev,
+ struct mlx5_mkey_seg *seg,
const struct ib_send_wr *wr)
{
const struct mlx5_umr_wr *umrwr = umr_wr(wr);
@@ -414,10 +415,12 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ));
MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE));
MLX5_SET(mkc, seg, lr, 1);
- MLX5_SET(mkc, seg, relaxed_ordering_write,
- !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));
- MLX5_SET(mkc, seg, relaxed_ordering_read,
- !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
+ MLX5_SET(mkc, seg, relaxed_ordering_write,
+ !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
+ MLX5_SET(mkc, seg, relaxed_ordering_read,
+ !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));
if (umrwr->pd)
MLX5_SET(mkc, seg, pd, to_mpd(umrwr->pd)->pdn);
@@ -863,13 +866,11 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
u8 flags = 0;
- if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
- mlx5_ib_warn(to_mdev(qp->ibqp.device),
- "Fast update of %s for MR is disabled\n",
- (MLX5_CAP_GEN(dev->mdev,
- umr_modify_entity_size_disabled)) ?
- "entity size" :
- "atomic access");
+ /* Matches access in mlx5_set_umr_free_mkey() */
+ if (!mlx5_ib_can_reconfig_with_umr(dev, 0, wr->access)) {
+ mlx5_ib_warn(
+ to_mdev(qp->ibqp.device),
+ "Fast update for MR access flags is not possible\n");
return -EINVAL;
}
@@ -1263,7 +1264,7 @@ static int handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
- set_reg_mkey_segment(*seg, wr);
+ set_reg_mkey_segment(dev, *seg, wr);
*seg += sizeof(struct mlx5_mkey_seg);
*size += sizeof(struct mlx5_mkey_seg) / 16;
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 7550e9d03dec..9dbbf4d16796 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -548,7 +548,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
struct ib_qp_cap *cap,
int qpn,
int port,
- struct mthca_sqp *sqp,
+ struct mthca_qp *qp,
struct ib_udata *udata);
void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
int mthca_create_ah(struct mthca_dev *dev,
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 9fa2f9164a47..c4d9cdc4ee97 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -373,9 +373,10 @@ static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
return 0;
}
-static void mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+static int mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
+ return 0;
}
static int mthca_ah_create(struct ib_ah *ibah,
@@ -389,9 +390,10 @@ static int mthca_ah_create(struct ib_ah *ibah,
init_attr->ah_attr, ah);
}
-static void mthca_ah_destroy(struct ib_ah *ah, u32 flags)
+static int mthca_ah_destroy(struct ib_ah *ah, u32 flags)
{
mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
+ return 0;
}
static int mthca_create_srq(struct ib_srq *ibsrq,
@@ -440,7 +442,7 @@ static int mthca_create_srq(struct ib_srq *ibsrq,
return 0;
}
-static void mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
+static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
if (udata) {
struct mthca_ucontext *context =
@@ -454,6 +456,7 @@ static void mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
}
mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
+ return 0;
}
static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
@@ -532,13 +535,14 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
case IB_QPT_SMI:
case IB_QPT_GSI:
{
- /* Don't allow userspace to create special QPs */
- if (udata)
- return ERR_PTR(-EINVAL);
-
- qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
+ qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
+ if (!qp->sqp) {
+ kfree(qp);
+ return ERR_PTR(-ENOMEM);
+ }
qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
@@ -547,7 +551,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
to_mcq(init_attr->recv_cq),
init_attr->sq_sig_type, &init_attr->cap,
qp->ibqp.qp_num, init_attr->port_num,
- to_msqp(qp), udata);
+ qp, udata);
break;
}
default:
@@ -556,6 +560,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
}
if (err) {
+ kfree(qp->sqp);
kfree(qp);
return ERR_PTR(err);
}
@@ -588,7 +593,8 @@ static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
to_mqp(qp)->rq.db_index);
}
mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
- kfree(qp);
+ kfree(to_mqp(qp)->sqp);
+ kfree(to_mqp(qp));
return 0;
}
@@ -789,7 +795,7 @@ out:
return ret;
}
-static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
if (udata) {
struct mthca_ucontext *context =
@@ -808,6 +814,7 @@ static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
to_mcq(cq)->set_ci_db_index);
}
mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
+ return 0;
}
static inline u32 convert_access(int acc)
@@ -846,7 +853,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(pd->device);
- struct sg_dma_page_iter sg_iter;
+ struct ib_block_iter biter;
struct mthca_ucontext *context = rdma_udata_to_drv_context(
udata, struct mthca_ucontext, ibucontext);
struct mthca_mr *mr;
@@ -877,7 +884,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err;
}
- n = ib_umem_num_pages(mr->umem);
+ n = ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE);
mr->mtt = mthca_alloc_mtt(dev, n);
if (IS_ERR(mr->mtt)) {
@@ -895,8 +902,8 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
- for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
- pages[i++] = sg_page_iter_dma_address(&sg_iter);
+ rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) {
+ pages[i++] = rdma_block_iter_dma_address(&biter);
/*
* Be friendly to write_mtt and pass it chunks
@@ -1199,7 +1206,7 @@ int mthca_register_device(struct mthca_dev *dev)
mutex_init(&dev->cap_mask_mutex);
rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
- ret = ib_register_device(&dev->ib_dev, "mthca%d");
+ ret = ib_register_device(&dev->ib_dev, "mthca%d", &dev->pdev->dev);
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 84c64bff0d92..8a77483bb33c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -240,6 +240,16 @@ struct mthca_wq {
__be32 *db;
};
+struct mthca_sqp {
+ int pkey_index;
+ u32 qkey;
+ u32 send_psn;
+ struct ib_ud_header ud_header;
+ int header_buf_size;
+ void *header_buf;
+ dma_addr_t header_dma;
+};
+
struct mthca_qp {
struct ib_qp ibqp;
int refcount;
@@ -265,17 +275,7 @@ struct mthca_qp {
wait_queue_head_t wait;
struct mutex mutex;
-};
-
-struct mthca_sqp {
- struct mthca_qp qp;
- int pkey_index;
- u32 qkey;
- u32 send_psn;
- struct ib_ud_header ud_header;
- int header_buf_size;
- void *header_buf;
- dma_addr_t header_dma;
+ struct mthca_sqp *sqp;
};
static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -313,9 +313,4 @@ static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
return container_of(ibqp, struct mthca_qp, ibqp);
}
-static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
-{
- return container_of(qp, struct mthca_sqp, qp);
-}
-
#endif /* MTHCA_PROVIDER_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index c6e95d0d760a..08a2a7afafd3 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -809,7 +809,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
qp->alt_port = attr->alt_port_num;
if (is_sqp(dev, qp))
- store_attrs(to_msqp(qp), attr, attr_mask);
+ store_attrs(qp->sqp, attr, attr_mask);
/*
* If we moved QP0 to RTR, bring the IB link up; if we moved
@@ -1368,39 +1368,40 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
struct ib_qp_cap *cap,
int qpn,
int port,
- struct mthca_sqp *sqp,
+ struct mthca_qp *qp,
struct ib_udata *udata)
{
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
int err;
- sqp->qp.transport = MLX;
- err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
+ qp->transport = MLX;
+ err = mthca_set_qp_size(dev, cap, pd, qp);
if (err)
return err;
- sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
- sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
- &sqp->header_dma, GFP_KERNEL);
- if (!sqp->header_buf)
+ qp->sqp->header_buf_size = qp->sq.max * MTHCA_UD_HEADER_SIZE;
+ qp->sqp->header_buf =
+ dma_alloc_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
+ &qp->sqp->header_dma, GFP_KERNEL);
+ if (!qp->sqp->header_buf)
return -ENOMEM;
spin_lock_irq(&dev->qp_table.lock);
if (mthca_array_get(&dev->qp_table.qp, mqpn))
err = -EBUSY;
else
- mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
+ mthca_array_set(&dev->qp_table.qp, mqpn, qp->sqp);
spin_unlock_irq(&dev->qp_table.lock);
if (err)
goto err_out;
- sqp->qp.port = port;
- sqp->qp.qpn = mqpn;
- sqp->qp.transport = MLX;
+ qp->port = port;
+ qp->qpn = mqpn;
+ qp->transport = MLX;
err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
- send_policy, &sqp->qp, udata);
+ send_policy, qp, udata);
if (err)
goto err_out_free;
@@ -1421,10 +1422,9 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
mthca_unlock_cqs(send_cq, recv_cq);
- err_out:
- dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
- sqp->header_buf, sqp->header_dma);
-
+err_out:
+ dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
+ qp->sqp->header_buf, qp->sqp->header_dma);
return err;
}
@@ -1487,20 +1487,19 @@ void mthca_free_qp(struct mthca_dev *dev,
if (is_sqp(dev, qp)) {
atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
- dma_free_coherent(&dev->pdev->dev,
- to_msqp(qp)->header_buf_size,
- to_msqp(qp)->header_buf,
- to_msqp(qp)->header_dma);
+ dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
+ qp->sqp->header_buf, qp->sqp->header_dma);
} else
mthca_free(&dev->qp_table.alloc, qp->qpn);
}
/* Create UD header for an MLX send and build a data segment for it */
-static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
- int ind, const struct ib_ud_wr *wr,
+static int build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind,
+ const struct ib_ud_wr *wr,
struct mthca_mlx_seg *mlx,
struct mthca_data_seg *data)
{
+ struct mthca_sqp *sqp = qp->sqp;
int header_size;
int err;
u16 pkey;
@@ -1513,7 +1512,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
if (err)
return err;
mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
- mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
+ mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
(sqp->ud_header.lrh.destination_lid ==
IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
(sqp->ud_header.lrh.service_level << 8));
@@ -1534,29 +1533,29 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
return -EINVAL;
}
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
+ sqp->ud_header.lrh.virtual_lane = !qp->ibqp.qp_num ? 15 : 0;
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
- if (!sqp->qp.ibqp.qp_num)
- ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
- sqp->pkey_index, &pkey);
+ if (!qp->ibqp.qp_num)
+ ib_get_cached_pkey(&dev->ib_dev, qp->port, sqp->pkey_index,
+ &pkey);
else
- ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
- wr->pkey_index, &pkey);
+ ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index,
+ &pkey);
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
sqp->qkey : wr->remote_qkey);
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
header_size = ib_ud_header_pack(&sqp->ud_header,
sqp->header_buf +
ind * MTHCA_UD_HEADER_SIZE);
data->byte_count = cpu_to_be32(header_size);
- data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
+ data->lkey = cpu_to_be32(to_mpd(qp->ibqp.pd)->ntmr.ibmr.lkey);
data->addr = cpu_to_be64(sqp->header_dma +
ind * MTHCA_UD_HEADER_SIZE);
@@ -1735,9 +1734,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
break;
case MLX:
- err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
- wqe - sizeof (struct mthca_next_seg),
- wqe);
+ err = build_mlx_header(
+ dev, qp, ind, ud_wr(wr),
+ wqe - sizeof(struct mthca_next_seg), wqe);
if (err) {
*bad_wr = wr;
goto out;
@@ -2065,9 +2064,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
break;
case MLX:
- err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
- wqe - sizeof (struct mthca_next_seg),
- wqe);
+ err = build_mlx_header(
+ dev, qp, ind, ud_wr(wr),
+ wqe - sizeof(struct mthca_next_seg), wqe);
if (err) {
*bad_wr = wr;
goto out;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index fcfe0e82197a..5eb61c110090 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -185,7 +185,6 @@ struct ocrdma_hw_mr {
u32 num_pbes;
u32 pbl_size;
u32 pbe_size;
- u64 fbo;
u64 va;
};
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 6eea02b18968..699a8b719ed6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -215,12 +215,13 @@ av_err:
return status;
}
-void ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags)
+int ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
ocrdma_free_av(dev, ah);
+ return 0;
}
int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 8b73b3489f3a..35cf2e2ff391 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -53,7 +53,7 @@ enum {
int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
-void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
+int ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index e07bf0b2209a..c51c3f40700e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1962,6 +1962,7 @@ static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
int i;
struct ocrdma_reg_nsmr *cmd;
struct ocrdma_reg_nsmr_rsp *rsp;
+ u64 fbo = hwmr->va & (hwmr->pbe_size - 1);
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
if (!cmd)
@@ -1987,8 +1988,8 @@ static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
cmd->totlen_low = hwmr->len;
cmd->totlen_high = upper_32_bits(hwmr->len);
- cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);
- cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);
+ cmd->fbo_low = lower_32_bits(fbo);
+ cmd->fbo_high = upper_32_bits(fbo);
cmd->va_loaddr = (u32) hwmr->va;
cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index d8c47d24d6d6..9b96661a7143 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -255,7 +255,9 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
if (ret)
return ret;
- return ib_register_device(&dev->ibdev, "ocrdma%d");
+ dma_set_max_seg_size(&dev->nic_info.pdev->dev, UINT_MAX);
+ return ib_register_device(&dev->ibdev, "ocrdma%d",
+ &dev->nic_info.pdev->dev);
}
static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index c1751c9a0f62..7350fe16f164 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -112,7 +112,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
}
static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
- u8 *ib_speed, u8 *ib_width)
+ u16 *ib_speed, u8 *ib_width)
{
int status;
u8 speed;
@@ -664,7 +664,7 @@ exit:
return status;
}
-void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+int ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
@@ -682,10 +682,11 @@ void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
if (is_ucontext_pd(uctx, pd)) {
ocrdma_release_ucontext_pd(uctx);
- return;
+ return 0;
}
}
_ocrdma_dealloc_pd(dev, pd);
+ return 0;
}
static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
@@ -810,14 +811,12 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
return status;
}
-static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
- u32 num_pbes)
+static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr)
{
struct ocrdma_pbe *pbe;
- struct sg_dma_page_iter sg_iter;
+ struct ib_block_iter biter;
struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
- struct ib_umem *umem = mr->umem;
- int pbe_cnt, total_num_pbes = 0;
+ int pbe_cnt;
u64 pg_addr;
if (!mr->hwmr.num_pbes)
@@ -826,19 +825,14 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
pbe = (struct ocrdma_pbe *)pbl_tbl->va;
pbe_cnt = 0;
- for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+ rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) {
/* store the page address in pbe */
- pg_addr = sg_page_iter_dma_address(&sg_iter);
+ pg_addr = rdma_block_iter_dma_address(&biter);
pbe->pa_lo = cpu_to_le32(pg_addr);
pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
pbe_cnt += 1;
- total_num_pbes += 1;
pbe++;
- /* if done building pbes, issue the mbx cmd. */
- if (total_num_pbes == num_pbes)
- return;
-
/* if the given pbl is full storing the pbes,
* move to next pbl.
*/
@@ -857,7 +851,6 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_mr *mr;
struct ocrdma_pd *pd;
- u32 num_pbes;
pd = get_ocrdma_pd(ibpd);
@@ -872,13 +865,12 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
status = -EFAULT;
goto umem_err;
}
- num_pbes = ib_umem_page_count(mr->umem);
- status = ocrdma_get_pbl_info(dev, mr, num_pbes);
+ status = ocrdma_get_pbl_info(
+ dev, mr, ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE));
if (status)
goto umem_err;
mr->hwmr.pbe_size = PAGE_SIZE;
- mr->hwmr.fbo = ib_umem_offset(mr->umem);
mr->hwmr.va = usr_addr;
mr->hwmr.len = len;
mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
@@ -889,7 +881,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
if (status)
goto umem_err;
- build_user_pbes(dev, mr, num_pbes);
+ build_user_pbes(dev, mr);
status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
if (status)
goto mbx_err;
@@ -1056,7 +1048,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
spin_unlock_irqrestore(&cq->cq_lock, flags);
}
-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
struct ocrdma_eq *eq = NULL;
@@ -1081,6 +1073,7 @@ void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
ocrdma_get_db_addr(dev, pdid),
dev->nic_info.db_page_size);
}
+ return 0;
}
static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
@@ -1857,7 +1850,7 @@ int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
return status;
}
-void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct ocrdma_srq *srq;
struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
@@ -1872,6 +1865,7 @@ void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
kfree(srq->idx_bit_fields);
kfree(srq->rqe_wr_id_tbl);
+ return 0;
}
/* unprivileged verbs and their support functions. */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index df8e3b923a44..425d554e7f3f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -67,12 +67,12 @@ void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
-void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
struct ib_qp *ocrdma_create_qp(struct ib_pd *,
struct ib_qp_init_attr *attrs,
@@ -92,7 +92,7 @@ int ocrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attr,
int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
enum ib_srq_attr_mask, struct ib_udata *);
int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
-void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *,
const struct ib_recv_wr **bad_recv_wr);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index d85f992bac29..967641662b24 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -177,6 +177,8 @@ static int qedr_iw_register_device(struct qedr_dev *dev)
}
static const struct ib_device_ops qedr_roce_dev_ops = {
+ .alloc_xrcd = qedr_alloc_xrcd,
+ .dealloc_xrcd = qedr_dealloc_xrcd,
.get_port_immutable = qedr_roce_port_immutable,
.query_pkey = qedr_query_pkey,
};
@@ -186,6 +188,10 @@ static void qedr_roce_register_device(struct qedr_dev *dev)
dev->ibdev.node_type = RDMA_NODE_IB_CA;
ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops);
+
+ dev->ibdev.uverbs_cmd_mask |= QEDR_UVERBS(OPEN_XRCD) |
+ QEDR_UVERBS(CLOSE_XRCD) |
+ QEDR_UVERBS(CREATE_XSRQ);
}
static const struct ib_device_ops qedr_dev_ops = {
@@ -232,6 +238,7 @@ static const struct ib_device_ops qedr_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd),
INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
};
@@ -286,7 +293,8 @@ static int qedr_register_device(struct qedr_dev *dev)
if (rc)
return rc;
- return ib_register_device(&dev->ibdev, "qedr%d");
+ dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX);
+ return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev);
}
/* This function allocates fast-path status block memory */
@@ -602,7 +610,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
/* Part 2 - check capabilities */
- page_size = ~dev->attr.page_size_caps + 1;
+ page_size = ~qed_attr->page_size_caps + 1;
if (page_size > PAGE_SIZE) {
DP_ERR(dev,
"Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
@@ -705,6 +713,18 @@ static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
event.event = IB_EVENT_SRQ_ERR;
event_type = EVENT_TYPE_SRQ;
break;
+ case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR:
+ event.event = IB_EVENT_CQ_ERR;
+ event_type = EVENT_TYPE_CQ;
+ break;
default:
DP_ERR(dev, "unsupported event %d on handle=%llx\n",
e_code, roce_handle64);
@@ -1026,6 +1046,13 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
case QEDE_CHANGE_ADDR:
qedr_mac_address_change(dev);
break;
+ case QEDE_CHANGE_MTU:
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ if (dev->ndev->mtu != dev->iwarp_max_mtu)
+ DP_NOTICE(dev,
+ "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n",
+ dev->iwarp_max_mtu, dev->ndev->mtu);
+ break;
default:
pr_err("Event not supported\n");
}
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 460292179b32..9dde70373a55 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -310,6 +310,11 @@ struct qedr_pd {
struct qedr_ucontext *uctx;
};
+struct qedr_xrcd {
+ struct ib_xrcd ibxrcd;
+ u16 xrcd_id;
+};
+
struct qedr_qp_hwq_info {
/* WQE Elements */
struct qed_chain pbl;
@@ -361,6 +366,7 @@ struct qedr_srq {
struct ib_umem *prod_umem;
u16 srq_id;
u32 srq_limit;
+ bool is_xrc;
/* lock to protect srq recv post */
spinlock_t lock;
};
@@ -573,6 +579,11 @@ static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
return container_of(ibpd, struct qedr_pd, ibpd);
}
+static inline struct qedr_xrcd *get_qedr_xrcd(struct ib_xrcd *ibxrcd)
+{
+ return container_of(ibxrcd, struct qedr_xrcd, ibxrcd);
+}
+
static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct qedr_cq, ibcq);
@@ -598,6 +609,28 @@ static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
return container_of(ibsrq, struct qedr_srq, ibsrq);
}
+static inline bool qedr_qp_has_srq(struct qedr_qp *qp)
+{
+ return qp->srq;
+}
+
+static inline bool qedr_qp_has_sq(struct qedr_qp *qp)
+{
+ if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_TGT)
+ return 0;
+
+ return 1;
+}
+
+static inline bool qedr_qp_has_rq(struct qedr_qp *qp)
+{
+ if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_XRC_INI ||
+ qp->qp_type == IB_QPT_XRC_TGT || qedr_qp_has_srq(qp))
+ return 0;
+
+ return 1;
+}
+
static inline struct qedr_user_mmap_entry *
get_qedr_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
{
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 97fc7dd353b0..c4bc58736e48 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -727,6 +727,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
listener->qed_handle);
cm_id->rem_ref(cm_id);
+ kfree(listener);
return rc;
}
@@ -736,7 +737,7 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct qedr_dev *dev = ep->dev;
struct qedr_qp *qp;
struct qed_iwarp_accept_in params;
- int rc = 0;
+ int rc;
DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
@@ -759,8 +760,10 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
params.ord = conn_param->ord;
if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
- &qp->iwarp_cm_flags))
+ &qp->iwarp_cm_flags)) {
+ rc = -EINVAL;
goto err; /* QP already destroyed */
+ }
rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
if (rc) {
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b49bef94637e..019642ff24a7 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -136,6 +136,8 @@ int qedr_query_device(struct ib_device *ibdev,
IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
+ if (!rdma_protocol_iwarp(&dev->ibdev, 1))
+ attr->device_cap_flags |= IB_DEVICE_XRC;
attr->max_send_sge = qattr->max_sge;
attr->max_recv_sge = qattr->max_sge;
attr->max_sge_rd = qattr->max_sge;
@@ -157,13 +159,13 @@ int qedr_query_device(struct ib_device *ibdev,
attr->local_ca_ack_delay = qattr->dev_ack_delay;
attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
- attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
+ attr->max_pkeys = qattr->max_pkey;
attr->max_ah = qattr->max_ah;
return 0;
}
-static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
u8 *ib_width)
{
switch (speed) {
@@ -231,15 +233,16 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
}
attr->max_mtu = IB_MTU_4096;
- attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
attr->lid = 0;
attr->lmc = 0;
attr->sm_lid = 0;
attr->sm_sl = 0;
attr->ip_gids = true;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
attr->gid_tbl_len = 1;
} else {
+ attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
attr->gid_tbl_len = QEDR_MAX_SGID;
attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
}
@@ -471,15 +474,33 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
return 0;
}
-void qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
struct qedr_pd *pd = get_qedr_pd(ibpd);
DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
+ return 0;
+}
+
+
+int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
+
+ return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
}
+int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
+ u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
+
+ dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
+ return 0;
+}
static void qedr_free_pbl(struct qedr_dev *dev,
struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
{
@@ -600,11 +621,9 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
struct qedr_pbl_info *pbl_info, u32 pg_shift)
{
int pbe_cnt, total_num_pbes = 0;
- u32 fw_pg_cnt, fw_pg_per_umem_pg;
struct qedr_pbl *pbl_tbl;
- struct sg_dma_page_iter sg_iter;
+ struct ib_block_iter biter;
struct regpair *pbe;
- u64 pg_addr;
if (!pbl_info->num_pbes)
return;
@@ -625,32 +644,25 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
pbe_cnt = 0;
- fw_pg_per_umem_pg = BIT(PAGE_SHIFT - pg_shift);
+ rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
+ u64 pg_addr = rdma_block_iter_dma_address(&biter);
- for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
- pg_addr = sg_page_iter_dma_address(&sg_iter);
- for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
- pbe->lo = cpu_to_le32(pg_addr);
- pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
+ pbe->lo = cpu_to_le32(pg_addr);
+ pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
- pg_addr += BIT(pg_shift);
- pbe_cnt++;
- total_num_pbes++;
- pbe++;
+ pbe_cnt++;
+ total_num_pbes++;
+ pbe++;
- if (total_num_pbes == pbl_info->num_pbes)
- return;
+ if (total_num_pbes == pbl_info->num_pbes)
+ return;
- /* If the given pbl is full storing the pbes,
- * move to next pbl.
- */
- if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
- pbl_tbl++;
- pbe = (struct regpair *)pbl_tbl->va;
- pbe_cnt = 0;
- }
-
- fw_pg_cnt++;
+ /* If the given pbl is full storing the pbes, move to next pbl.
+ */
+ if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct regpair *)pbl_tbl->va;
+ pbe_cnt = 0;
}
}
}
@@ -792,9 +804,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
return PTR_ERR(q->umem);
}
- fw_pages = ib_umem_page_count(q->umem) <<
- (PAGE_SHIFT - FW_PAGE_SHIFT);
-
+ fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
if (rc)
goto err0;
@@ -999,7 +1009,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
/* Generate doorbell address. */
cq->db.data.icid = cq->icid;
cq->db_addr = dev->db_addr + db_offset;
- cq->db.data.params = DB_AGG_CMD_SET <<
+ cq->db.data.params = DB_AGG_CMD_MAX <<
RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
/* point to the very last element, passing it we will toggle */
@@ -1051,7 +1061,7 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
#define QEDR_DESTROY_CQ_ITER_DURATION (10)
-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibcq->device);
struct qed_rdma_destroy_cq_out_params oparams;
@@ -1066,7 +1076,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
/* GSIs CQs are handled by driver, so they don't exist in the FW */
if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
- return;
+ return 0;
}
iparams.icid = cq->icid;
@@ -1114,6 +1124,7 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
* Since the destroy CQ ramrod has also been received on the EQ we can
* be certain that there's no event handler in process.
*/
+ return 0;
}
static inline int get_gid_info_from_table(struct ib_qp *ibqp,
@@ -1146,7 +1157,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
SET_FIELD(qp_params->modify_flags,
QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
break;
- case RDMA_NETWORK_IB:
+ case RDMA_NETWORK_ROCE_V1:
memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
sizeof(qp_params->sgid));
memcpy(&qp_params->dgid.bytes[0],
@@ -1166,6 +1177,8 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
qp_params->roce_mode = ROCE_V2_IPV4;
break;
+ default:
+ return -EINVAL;
}
for (i = 0; i < 4; i++) {
@@ -1186,7 +1199,10 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
struct qedr_device_attr *qattr = &dev->attr;
/* QP0... attrs->qp_type == IB_QPT_GSI */
- if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
+ if (attrs->qp_type != IB_QPT_RC &&
+ attrs->qp_type != IB_QPT_GSI &&
+ attrs->qp_type != IB_QPT_XRC_INI &&
+ attrs->qp_type != IB_QPT_XRC_TGT) {
DP_DEBUG(dev, QEDR_MSG_QP,
"create qp: unsupported qp type=0x%x requested\n",
attrs->qp_type);
@@ -1221,12 +1237,20 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
return -EINVAL;
}
- /* Unprivileged user space cannot create special QP */
- if (udata && attrs->qp_type == IB_QPT_GSI) {
- DP_ERR(dev,
- "create qp: userspace can't create special QPs of type=0x%x\n",
- attrs->qp_type);
- return -EINVAL;
+ /* verify consumer QPs are not trying to use GSI QP's CQ.
+ * TGT QP isn't associated with RQ/SQ
+ */
+ if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
+ (attrs->qp_type != IB_QPT_XRC_TGT)) {
+ struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
+ struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
+
+ if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
+ (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
+ DP_ERR(dev,
+ "create qp: consumer QP cannot use GSI CQs.\n");
+ return -EINVAL;
+ }
}
return 0;
@@ -1248,8 +1272,8 @@ static int qedr_copy_srq_uresp(struct qedr_dev *dev,
}
static void qedr_copy_rq_uresp(struct qedr_dev *dev,
- struct qedr_create_qp_uresp *uresp,
- struct qedr_qp *qp)
+ struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
{
/* iWARP requires two doorbells per RQ. */
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
@@ -1291,8 +1315,12 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
int rc;
memset(uresp, 0, sizeof(*uresp));
- qedr_copy_sq_uresp(dev, uresp, qp);
- qedr_copy_rq_uresp(dev, uresp, qp);
+
+ if (qedr_qp_has_sq(qp))
+ qedr_copy_sq_uresp(dev, uresp, qp);
+
+ if (qedr_qp_has_rq(qp))
+ qedr_copy_rq_uresp(dev, uresp, qp);
uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
uresp->qp_id = qp->qp_id;
@@ -1316,18 +1344,25 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
kref_init(&qp->refcnt);
init_completion(&qp->iwarp_cm_comp);
}
+
qp->pd = pd;
qp->qp_type = attrs->qp_type;
qp->max_inline_data = attrs->cap.max_inline_data;
- qp->sq.max_sges = attrs->cap.max_send_sge;
qp->state = QED_ROCE_QP_STATE_RESET;
qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
- qp->sq_cq = get_qedr_cq(attrs->send_cq);
qp->dev = dev;
+ if (qedr_qp_has_sq(qp)) {
+ qp->sq.max_sges = attrs->cap.max_send_sge;
+ qp->sq_cq = get_qedr_cq(attrs->send_cq);
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
+ qp->sq.max_sges, qp->sq_cq->icid);
+ }
- if (attrs->srq) {
+ if (attrs->srq)
qp->srq = get_qedr_srq(attrs->srq);
- } else {
+
+ if (qedr_qp_has_rq(qp)) {
qp->rq_cq = get_qedr_cq(attrs->recv_cq);
qp->rq.max_sges = attrs->cap.max_recv_sge;
DP_DEBUG(dev, QEDR_MSG_QP,
@@ -1346,30 +1381,26 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
- int rc;
+ int rc = 0;
- qp->sq.db = dev->db_addr +
- DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
- qp->sq.db_data.data.icid = qp->icid + 1;
- rc = qedr_db_recovery_add(dev, qp->sq.db,
- &qp->sq.db_data,
- DB_REC_WIDTH_32B,
- DB_REC_KERNEL);
- if (rc)
- return rc;
+ if (qedr_qp_has_sq(qp)) {
+ qp->sq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ qp->sq.db_data.data.icid = qp->icid + 1;
+ rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc)
+ return rc;
+ }
- if (!qp->srq) {
+ if (qedr_qp_has_rq(qp)) {
qp->rq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
qp->rq.db_data.data.icid = qp->icid;
-
- rc = qedr_db_recovery_add(dev, qp->rq.db,
- &qp->rq.db_data,
- DB_REC_WIDTH_32B,
- DB_REC_KERNEL);
- if (rc)
- qedr_db_recovery_del(dev, qp->sq.db,
- &qp->sq.db_data);
+ rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc && qedr_qp_has_sq(qp))
+ qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
}
return rc;
@@ -1392,6 +1423,10 @@ static int qedr_check_srq_params(struct qedr_dev *dev,
DP_ERR(dev,
"create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
attrs->attr.max_sge, qattr->max_sge);
+ }
+
+ if (!udata && attrs->srq_type == IB_SRQT_XRC) {
+ DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
return -EINVAL;
}
@@ -1516,6 +1551,7 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
return -EINVAL;
srq->dev = dev;
+ srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
hw_srq = &srq->hw_srq;
spin_lock_init(&srq->lock);
@@ -1557,6 +1593,14 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
in_params.prod_pair_addr = phy_prod_pair_addr;
in_params.num_pages = page_cnt;
in_params.page_size = page_size;
+ if (srq->is_xrc) {
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
+ struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
+
+ in_params.is_xrc = 1;
+ in_params.xrcd_id = xrcd->xrcd_id;
+ in_params.cq_cid = cq->icid;
+ }
rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
if (rc)
@@ -1591,7 +1635,7 @@ err0:
return -EFAULT;
}
-void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct qed_rdma_destroy_srq_in_params in_params = {};
struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
@@ -1599,6 +1643,7 @@ void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
xa_erase_irq(&dev->srqs, srq->srq_id);
in_params.srq_id = srq->srq_id;
+ in_params.is_xrc = srq->is_xrc;
dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
if (ibsrq->uobject)
@@ -1609,6 +1654,7 @@ void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
DP_DEBUG(dev, QEDR_MSG_SRQ,
"destroy srq: destroyed srq with srq_id=0x%0x\n",
srq->srq_id);
+ return 0;
}
int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
@@ -1649,6 +1695,20 @@ int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return 0;
}
+static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
+{
+ switch (ib_qp_type) {
+ case IB_QPT_RC:
+ return QED_RDMA_QP_TYPE_RC;
+ case IB_QPT_XRC_INI:
+ return QED_RDMA_QP_TYPE_XRC_INI;
+ case IB_QPT_XRC_TGT:
+ return QED_RDMA_QP_TYPE_XRC_TGT;
+ default:
+ return QED_RDMA_QP_TYPE_INVAL;
+ }
+}
+
static inline void
qedr_init_common_qp_in_params(struct qedr_dev *dev,
struct qedr_pd *pd,
@@ -1663,20 +1723,27 @@ qedr_init_common_qp_in_params(struct qedr_dev *dev,
params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
- params->pd = pd->pd_id;
- params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
- params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
+ params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
params->stats_queue = 0;
- params->srq_id = 0;
- params->use_srq = false;
- if (!qp->srq) {
+ if (pd) {
+ params->pd = pd->pd_id;
+ params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
+ }
+
+ if (qedr_qp_has_sq(qp))
+ params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
+
+ if (qedr_qp_has_rq(qp))
params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
- } else {
+ if (qedr_qp_has_srq(qp)) {
params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
params->srq_id = qp->srq->srq_id;
params->use_srq = true;
+ } else {
+ params->srq_id = 0;
+ params->use_srq = false;
}
}
@@ -1690,8 +1757,10 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
"rq_len=%zd"
"\n",
qp,
- qp->usq.buf_addr,
- qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
+ qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
+ qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
+ qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
+ qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
}
static inline void
@@ -1717,11 +1786,15 @@ static void qedr_cleanup_user(struct qedr_dev *dev,
struct qedr_ucontext *ctx,
struct qedr_qp *qp)
{
- ib_umem_release(qp->usq.umem);
- qp->usq.umem = NULL;
+ if (qedr_qp_has_sq(qp)) {
+ ib_umem_release(qp->usq.umem);
+ qp->usq.umem = NULL;
+ }
- ib_umem_release(qp->urq.umem);
- qp->urq.umem = NULL;
+ if (qedr_qp_has_rq(qp)) {
+ ib_umem_release(qp->urq.umem);
+ qp->urq.umem = NULL;
+ }
if (rdma_protocol_roce(&dev->ibdev, 1)) {
qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
@@ -1756,28 +1829,38 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
{
struct qed_rdma_create_qp_in_params in_params;
struct qed_rdma_create_qp_out_params out_params;
- struct qedr_pd *pd = get_qedr_pd(ibpd);
- struct qedr_create_qp_uresp uresp;
- struct qedr_ucontext *ctx = pd ? pd->uctx : NULL;
- struct qedr_create_qp_ureq ureq;
+ struct qedr_create_qp_uresp uresp = {};
+ struct qedr_create_qp_ureq ureq = {};
int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
- int rc = -EINVAL;
+ struct qedr_ucontext *ctx = NULL;
+ struct qedr_pd *pd = NULL;
+ int rc = 0;
qp->create_type = QEDR_QP_CREATE_USER;
- memset(&ureq, 0, sizeof(ureq));
- rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen));
- if (rc) {
- DP_ERR(dev, "Problem copying data from user space\n");
- return rc;
+
+ if (ibpd) {
+ pd = get_qedr_pd(ibpd);
+ ctx = pd->uctx;
}
- /* SQ - read access only (0) */
- rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
- ureq.sq_len, true, 0, alloc_and_init);
- if (rc)
- return rc;
+ if (udata) {
+ rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen));
+ if (rc) {
+ DP_ERR(dev, "Problem copying data from user space\n");
+ return rc;
+ }
+ }
- if (!qp->srq) {
+ if (qedr_qp_has_sq(qp)) {
+ /* SQ - read access only (0) */
+ rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
+ ureq.sq_len, true, 0, alloc_and_init);
+ if (rc)
+ return rc;
+ }
+
+ if (qedr_qp_has_rq(qp)) {
/* RQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
ureq.rq_len, true, 0, alloc_and_init);
@@ -1789,9 +1872,21 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
in_params.qp_handle_lo = ureq.qp_handle_lo;
in_params.qp_handle_hi = ureq.qp_handle_hi;
- in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
- in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
- if (!qp->srq) {
+
+ if (qp->qp_type == IB_QPT_XRC_TGT) {
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
+
+ in_params.xrcd_id = xrcd->xrcd_id;
+ in_params.qp_handle_lo = qp->qp_id;
+ in_params.use_srq = 1;
+ }
+
+ if (qedr_qp_has_sq(qp)) {
+ in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
+ in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
+ }
+
+ if (qedr_qp_has_rq(qp)) {
in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
}
@@ -1813,39 +1908,32 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
- if (rc)
- goto err;
+ if (udata) {
+ rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
+ if (rc)
+ goto err;
+ }
/* db offset was calculated in copy_qp_uresp, now set in the user q */
- ctx = pd->uctx;
- qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
- qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
-
- if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
- qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
-
- /* calculate the db_rec_db2 data since it is constant so no
- * need to reflect from user
- */
- qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
- qp->urq.db_rec_db2_data.data.value =
- cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
+ if (qedr_qp_has_sq(qp)) {
+ qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
+ rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
}
- rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
- &qp->usq.db_rec_data->db_data,
- DB_REC_WIDTH_32B,
- DB_REC_USER);
- if (rc)
- goto err;
-
- rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
- &qp->urq.db_rec_data->db_data,
- DB_REC_WIDTH_32B,
- DB_REC_USER);
- if (rc)
- goto err;
+ if (qedr_qp_has_rq(qp)) {
+ qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
+ rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+ }
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
@@ -1856,7 +1944,6 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
goto err;
}
qedr_qp_user_print(dev, qp);
-
return rc;
err:
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
@@ -2112,16 +2199,47 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
return rc;
}
+static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct ib_udata *udata)
+{
+ struct qedr_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ int rc;
+
+ if (qp->qp_type != IB_QPT_GSI) {
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ return rc;
+ }
+
+ if (qp->create_type == QEDR_QP_CREATE_USER)
+ qedr_cleanup_user(dev, ctx, qp);
+ else
+ qedr_cleanup_kernel(dev, qp);
+
+ return 0;
+}
+
struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
struct ib_qp_init_attr *attrs,
struct ib_udata *udata)
{
- struct qedr_dev *dev = get_qedr_dev(ibpd->device);
- struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_xrcd *xrcd = NULL;
+ struct qedr_pd *pd = NULL;
+ struct qedr_dev *dev;
struct qedr_qp *qp;
struct ib_qp *ibqp;
int rc = 0;
+ if (attrs->qp_type == IB_QPT_XRC_TGT) {
+ xrcd = get_qedr_xrcd(attrs->xrcd);
+ dev = get_qedr_dev(xrcd->ibxrcd.device);
+ } else {
+ pd = get_qedr_pd(ibpd);
+ dev = get_qedr_dev(ibpd->device);
+ }
+
DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
udata ? "user library" : "kernel", pd);
@@ -2152,25 +2270,27 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
return ibqp;
}
- if (udata)
+ if (udata || xrcd)
rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
else
rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
if (rc)
- goto err;
+ goto out_free_qp;
qp->ibqp.qp_num = qp->qp_id;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
if (rc)
- goto err;
+ goto out_free_qp_resources;
}
return &qp->ibqp;
-err:
+out_free_qp_resources:
+ qedr_free_qp_resources(dev, qp, udata);
+out_free_qp:
kfree(qp);
return ERR_PTR(-EFAULT);
@@ -2636,7 +2756,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_wr;
qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
- qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+ qp_attr->cap.max_inline_data = dev->attr.max_inline;
qp_init_attr->cap = qp_attr->cap;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
@@ -2671,28 +2791,6 @@ err:
return rc;
}
-static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
- struct ib_udata *udata)
-{
- struct qedr_ucontext *ctx =
- rdma_udata_to_drv_context(udata, struct qedr_ucontext,
- ibucontext);
- int rc;
-
- if (qp->qp_type != IB_QPT_GSI) {
- rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
- if (rc)
- return rc;
- }
-
- if (qp->create_type == QEDR_QP_CREATE_USER)
- qedr_cleanup_user(dev, ctx, qp);
- else
- qedr_cleanup_kernel(dev, qp);
-
- return 0;
-}
-
int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
@@ -2752,6 +2850,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (rdma_protocol_iwarp(&dev->ibdev, 1))
qedr_iw_qp_rem_ref(&qp->ibqp);
+ else
+ kfree(qp);
return 0;
}
@@ -2766,11 +2866,12 @@ int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
return 0;
}
-void qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
+int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct qedr_ah *ah = get_qedr_ah(ibah);
rdma_destroy_ah_attr(&ah->attr);
+ return 0;
}
static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
@@ -2861,7 +2962,8 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
goto err0;
}
- rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
+ rc = init_mr_info(dev, &mr->info,
+ ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
if (rc)
goto err1;
@@ -2888,10 +2990,8 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
mr->hw_mr.page_size_log = PAGE_SHIFT;
- mr->hw_mr.fbo = ib_umem_offset(mr->umem);
mr->hw_mr.length = len;
mr->hw_mr.vaddr = usr_addr;
- mr->hw_mr.zbva = false;
mr->hw_mr.phy_mr = false;
mr->hw_mr.dma_mr = false;
@@ -2984,10 +3084,8 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
mr->hw_mr.pbl_ptr = 0;
mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
- mr->hw_mr.fbo = 0;
mr->hw_mr.length = 0;
mr->hw_mr.vaddr = 0;
- mr->hw_mr.zbva = false;
mr->hw_mr.phy_mr = true;
mr->hw_mr.dma_mr = false;
@@ -3765,10 +3863,10 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
* in first 4 bytes and need to update WQE producer in
* next 4 bytes.
*/
- srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod;
+ srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
/* Make sure sge producer is updated first */
dma_wmb();
- srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod;
+ srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
wr = wr->next;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 39dd6286ba39..2672c32bc2f7 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -47,12 +47,13 @@ void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma);
void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-
+int qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata);
+int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata);
int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
-void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
struct ib_udata *);
@@ -67,12 +68,12 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *attr,
int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_recv_wr);
int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
-void qedr_destroy_ah(struct ib_ah *ibah, u32 flags);
+int qedr_destroy_ah(struct ib_ah *ibah, u32 flags);
int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 432d6d0fd7f4..ee211423058a 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -619,11 +619,11 @@ struct qib_pportdata {
/* LID mask control */
u8 lmc;
u8 link_width_supported;
- u8 link_speed_supported;
+ u16 link_speed_supported;
u8 link_width_enabled;
- u8 link_speed_enabled;
+ u16 link_speed_enabled;
u8 link_width_active;
- u8 link_speed_active;
+ u16 link_speed_active;
u8 vls_supported;
u8 vls_operational;
/* Rx Polarity inversion (compensate for ~tx on partner) */
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index a10eab89aee4..189a0ce6056a 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1733,9 +1733,9 @@ done:
return;
}
-static void qib_error_tasklet(unsigned long data)
+static void qib_error_tasklet(struct tasklet_struct *t)
{
- struct qib_devdata *dd = (struct qib_devdata *)data;
+ struct qib_devdata *dd = from_tasklet(dd, t, error_tasklet);
handle_7322_errors(dd);
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
@@ -3537,8 +3537,7 @@ try_intx:
for (i = 0; i < ARRAY_SIZE(redirect); i++)
qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
dd->cspec->main_int_mask = mask;
- tasklet_init(&dd->error_tasklet, qib_error_tasklet,
- (unsigned long)dd);
+ tasklet_setup(&dd->error_tasklet, qib_error_tasklet);
}
/**
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index e7789e724f56..f83e331977f8 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2293,76 +2293,50 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
struct ib_mad *out_mad)
{
struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
- int ret;
-
*out_mad = *in_mad;
if (ccp->class_version != 2) {
ccp->status |= IB_SMP_UNSUP_VERSION;
- ret = reply((struct ib_smp *)ccp);
- goto bail;
+ return reply((struct ib_smp *)ccp);
}
switch (ccp->method) {
case IB_MGMT_METHOD_GET:
switch (ccp->attr_id) {
case IB_CC_ATTR_CLASSPORTINFO:
- ret = cc_get_classportinfo(ccp, ibdev);
- goto bail;
-
+ return cc_get_classportinfo(ccp, ibdev);
case IB_CC_ATTR_CONGESTION_INFO:
- ret = cc_get_congestion_info(ccp, ibdev, port);
- goto bail;
-
+ return cc_get_congestion_info(ccp, ibdev, port);
case IB_CC_ATTR_CA_CONGESTION_SETTING:
- ret = cc_get_congestion_setting(ccp, ibdev, port);
- goto bail;
-
+ return cc_get_congestion_setting(ccp, ibdev, port);
case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
- ret = cc_get_congestion_control_table(ccp, ibdev, port);
- goto bail;
-
- fallthrough;
+ return cc_get_congestion_control_table(ccp, ibdev, port);
default:
ccp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) ccp);
- goto bail;
+ return reply((struct ib_smp *) ccp);
}
-
case IB_MGMT_METHOD_SET:
switch (ccp->attr_id) {
case IB_CC_ATTR_CA_CONGESTION_SETTING:
- ret = cc_set_congestion_setting(ccp, ibdev, port);
- goto bail;
-
+ return cc_set_congestion_setting(ccp, ibdev, port);
case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
- ret = cc_set_congestion_control_table(ccp, ibdev, port);
- goto bail;
-
- fallthrough;
+ return cc_set_congestion_control_table(ccp, ibdev, port);
default:
ccp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) ccp);
- goto bail;
+ return reply((struct ib_smp *) ccp);
}
-
case IB_MGMT_METHOD_GET_RESP:
/*
* The ib_mad module will call us to process responses
* before checking for other consumers.
* Just tell the caller to process it normally.
*/
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
-
- case IB_MGMT_METHOD_TRAP:
- default:
- ccp->status |= IB_SMP_UNSUP_METHOD;
- ret = reply((struct ib_smp *) ccp);
+ return IB_MAD_RESULT_SUCCESS;
}
-bail:
- return ret;
+ /* method is unsupported */
+ ccp->status |= IB_SMP_UNSUP_METHOD;
+ return reply((struct ib_smp *) ccp);
}
/**
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 8f8d61736656..5e86cbf7d70e 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -62,7 +62,7 @@ static void sdma_get(struct qib_sdma_state *);
static void sdma_put(struct qib_sdma_state *);
static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
static void sdma_start_sw_clean_up(struct qib_pportdata *);
-static void sdma_sw_clean_up_task(unsigned long);
+static void sdma_sw_clean_up_task(struct tasklet_struct *);
static void unmap_desc(struct qib_pportdata *, unsigned);
static void sdma_get(struct qib_sdma_state *ss)
@@ -119,9 +119,10 @@ static void clear_sdma_activelist(struct qib_pportdata *ppd)
}
}
-static void sdma_sw_clean_up_task(unsigned long opaque)
+static void sdma_sw_clean_up_task(struct tasklet_struct *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
+ struct qib_pportdata *ppd = from_tasklet(ppd, t,
+ sdma_sw_clean_up_task);
unsigned long flags;
spin_lock_irqsave(&ppd->sdma_lock, flags);
@@ -436,8 +437,7 @@ int qib_setup_sdma(struct qib_pportdata *ppd)
INIT_LIST_HEAD(&ppd->sdma_activelist);
- tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
- (unsigned long)ppd);
+ tasklet_setup(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task);
ret = dd->f_init_sdma_regs(ppd);
if (ret)
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 662e7fc7f628..aa2e65fc5cd6 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -315,7 +315,6 @@ static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
if (err)
return err;
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
return 0;
@@ -355,7 +354,6 @@ static const struct ib_device_ops usnic_dev_ops = {
.modify_qp = usnic_ib_modify_qp,
.query_device = usnic_ib_query_device,
.query_gid = usnic_ib_query_gid,
- .query_pkey = usnic_ib_query_pkey,
.query_port = usnic_ib_query_port,
.query_qp = usnic_ib_query_qp,
.reg_user_mr = usnic_ib_reg_mr,
@@ -427,7 +425,8 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
if (ret)
goto err_fwd_dealloc;
- if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d"))
+ dma_set_max_seg_size(&dev->dev, SZ_2G);
+ if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", &dev->dev))
goto err_fwd_dealloc;
usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index b8a77ce11590..9e961f8ffa10 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -367,7 +367,6 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
props->port_cap_flags = 0;
props->gid_tbl_len = 1;
- props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
props->qkey_viol_cntr = 0;
props->max_mtu = IB_MTU_4096;
@@ -437,16 +436,6 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
return 0;
}
-int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
-{
- if (index > 0)
- return -EINVAL;
-
- *pkey = 0xffff;
- return 0;
-}
-
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct usnic_ib_pd *pd = to_upd(ibpd);
@@ -460,9 +449,10 @@ int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
return 0;
}
-void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
+ return 0;
}
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
@@ -596,9 +586,9 @@ int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0;
}
-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
- return;
+ return 0;
}
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 2aedf78c13cf..11fe1ba6bbc9 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -48,10 +48,8 @@ int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
struct ib_qp_init_attr *qp_init_attr);
int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid);
-int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey);
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
-void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
@@ -60,7 +58,7 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 4f6cc0de7ef9..319546a39a0d 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -142,7 +142,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_cq;
}
- npages = ib_umem_page_count(cq->umem);
+ npages = ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
} else {
/* One extra page for shared ring state */
npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
@@ -235,7 +235,7 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
* @cq: the completion queue to destroy.
* @udata: user data or null for kernel object
*/
-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
struct pvrdma_cq *vcq = to_vcq(cq);
union pvrdma_cmd_req req;
@@ -261,6 +261,7 @@ void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
pvrdma_free_cq(dev, vcq);
atomic_dec(&dev->num_cqs);
+ return 0;
}
static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
@@ -375,7 +376,7 @@ retry:
* pvrdma_poll_cq - poll for work completion queue entries
* @ibcq: completion queue
* @num_entries: the maximum number of entries
- * @entry: pointer to work completion array
+ * @wc: pointer to work completion array
*
* @return: number of polled completion entries
*/
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 780fd2dfc07e..fa2a3fa0c3e4 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -270,7 +270,7 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
spin_lock_init(&dev->srq_tbl_lock);
rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
- ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d");
+ ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", &dev->pdev->dev);
if (ret)
goto err_srq_free;
@@ -854,7 +854,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
goto err_free_resource;
}
}
-
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
pci_set_master(pdev);
/* Map register space */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
index 7944c58ded0e..ba43ad07898c 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
@@ -182,17 +182,16 @@ int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
struct ib_umem *umem, u64 offset)
{
+ struct ib_block_iter biter;
u64 i = offset;
int ret = 0;
- struct sg_dma_page_iter sg_iter;
if (offset >= pdir->npages)
return -EINVAL;
- for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
- dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
-
- ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
+ rdma_umem_for_each_dma_block (umem, &biter, PAGE_SIZE) {
+ ret = pvrdma_page_dir_insert_dma(
+ pdir, i, rdma_block_iter_dma_address(&biter));
if (ret)
goto exit;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index 77a010e68208..e80848bfb3bd 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -133,7 +133,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_CAST(umem);
}
- npages = ib_umem_num_pages(umem);
+ npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
npages);
@@ -270,6 +270,7 @@ freemr:
/**
* pvrdma_dereg_mr - deregister a memory region
* @ibmr: memory region
+ * @udata: pointer to user data
*
* @return: 0 on success.
*/
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 9a8f2a9507be..428256c55065 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -232,8 +232,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
switch (init_attr->qp_type) {
case IB_QPT_GSI:
if (init_attr->port_num == 0 ||
- init_attr->port_num > pd->device->phys_port_cnt ||
- udata) {
+ init_attr->port_num > pd->device->phys_port_cnt) {
dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
ret = -EINVAL;
goto err_qp;
@@ -298,9 +297,11 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
goto err_qp;
}
- qp->npages_send = ib_umem_page_count(qp->sumem);
+ qp->npages_send =
+ ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE);
if (!is_srq)
- qp->npages_recv = ib_umem_page_count(qp->rumem);
+ qp->npages_recv = ib_umem_num_dma_blocks(
+ qp->rumem, PAGE_SIZE);
else
qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index d330decfb80a..082208f9aa90 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -90,7 +90,7 @@ int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
/**
* pvrdma_create_srq - create shared receive queue
- * @pd: protection domain
+ * @ibsrq: the IB shared receive queue
* @init_attr: shared receive queue attributes
* @udata: user data
*
@@ -152,7 +152,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err_srq;
}
- srq->npages = ib_umem_page_count(srq->umem);
+ srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE);
if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
dev_warn(&dev->pdev->dev,
@@ -240,7 +240,7 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
*
* @return: 0 for success.
*/
-void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
+int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{
struct pvrdma_srq *vsrq = to_vsrq(srq);
union pvrdma_cmd_req req;
@@ -259,6 +259,7 @@ void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
ret);
pvrdma_free_srq(dev, vsrq);
+ return 0;
}
/**
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index ccbded2d26ce..fc412cbfd042 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -479,9 +479,9 @@ err:
* @pd: the protection domain to be released
* @udata: user data or null for kernel object
*
- * @return: 0 on success, otherwise errno.
+ * @return: Always 0
*/
-void pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+int pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct pvrdma_dev *dev = to_vdev(pd->device);
union pvrdma_cmd_req req = {};
@@ -498,14 +498,14 @@ void pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
ret);
atomic_dec(&dev->num_pds);
+ return 0;
}
/**
* pvrdma_create_ah - create an address handle
- * @pd: the protection domain
- * @ah_attr: the attributes of the AH
- * @udata: user data blob
- * @flags: create address handle flags (see enum rdma_create_ah_flags)
+ * @ibah: the IB address handle
+ * @init_attr: the attributes of the AH
+ * @udata: pointer to user data
*
* @return: 0 on success, otherwise errno.
*/
@@ -548,9 +548,10 @@ int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
* @flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
*
*/
-void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags)
+int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags)
{
struct pvrdma_dev *dev = to_vdev(ah->device);
atomic_dec(&dev->num_ahs);
+ return 0;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index 699b20849a7e..97ed8f952f6e 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -399,7 +399,7 @@ int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void pvrdma_dealloc_ucontext(struct ib_ucontext *context);
int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-void pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
@@ -411,19 +411,19 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
-void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
+int pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int pvrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
struct ib_udata *udata);
int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index 75a04b1497c4..b938c4ffa99a 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -132,7 +132,7 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
*
* Return: 0 on success
*/
-void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags)
+int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags)
{
struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
struct rvt_ah *ah = ibah_to_rvtah(ibah);
@@ -143,6 +143,7 @@ void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags)
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
rdma_destroy_ah_attr(&ah->attr);
+ return 0;
}
/**
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
index 40b7123fec76..5a85edd06491 100644
--- a/drivers/infiniband/sw/rdmavt/ah.h
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -52,7 +52,7 @@
int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
-void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags);
+int rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags);
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 04d2e72017fe..19248be14093 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -315,7 +315,7 @@ bail_wc:
*
* Called by ib_destroy_cq() in the generic verbs code.
*/
-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
struct rvt_dev_info *rdi = cq->rdi;
@@ -328,6 +328,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
kref_put(&cq->ip->ref, rvt_release_mmap_info);
else
vfree(cq->kqueue);
+ return 0;
}
/**
diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
index 5e26a2eb19a4..feb01e7ee004 100644
--- a/drivers/infiniband/sw/rdmavt/cq.h
+++ b/drivers/infiniband/sw/rdmavt/cq.h
@@ -53,7 +53,7 @@
int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 2f7c25fea44a..8490fdb9c91e 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -499,7 +499,7 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
rvt_pr_err(rdi,
"%s timeout mr %p pd %p lkey %x refcount %ld\n",
t, mr, mr->pd, mr->lkey,
- atomic_long_read(&mr->refcount.count));
+ atomic_long_read(&mr->refcount.data->count));
rvt_get_mr(mr);
return -EBUSY;
}
diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c
index a403718f0b5e..01b7abf91520 100644
--- a/drivers/infiniband/sw/rdmavt/pd.c
+++ b/drivers/infiniband/sw/rdmavt/pd.c
@@ -95,11 +95,12 @@ bail:
*
* Return: always 0
*/
-void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+int rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
spin_lock(&dev->n_pds_lock);
dev->n_pds_allocated--;
spin_unlock(&dev->n_pds_lock);
+ return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h
index 71ba76d72b1d..06a6a38beedc 100644
--- a/drivers/infiniband/sw/rdmavt/pd.h
+++ b/drivers/infiniband/sw/rdmavt/pd.h
@@ -51,6 +51,6 @@
#include <rdma/rdma_vt.h>
int rvt_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
#endif /* DEF_RDMAVTPD_H */
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index f547c115af03..64d98bf238ab 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -332,7 +332,7 @@ int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
* @ibsrq: srq object to destroy
*
*/
-void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
@@ -343,4 +343,5 @@ void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
if (srq->ip)
kref_put(&srq->ip->ref, rvt_release_mmap_info);
kvfree(srq->rq.kwq);
+ return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/srq.h b/drivers/infiniband/sw/rdmavt/srq.h
index 6427d7d62a9a..d5a1a053b1b9 100644
--- a/drivers/infiniband/sw/rdmavt/srq.h
+++ b/drivers/infiniband/sw/rdmavt/srq.h
@@ -56,6 +56,6 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask,
struct ib_udata *udata);
int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
#endif /* DEF_RVTSRQ_H */
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index f904bb34477a..670a9623b46e 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -95,9 +95,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
if (!rdi)
return rdi;
- rdi->ports = kcalloc(nports,
- sizeof(struct rvt_ibport **),
- GFP_KERNEL);
+ rdi->ports = kcalloc(nports, sizeof(*rdi->ports), GFP_KERNEL);
if (!rdi->ports)
ib_dealloc_device(&rdi->ibdev);
@@ -526,6 +524,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
int rvt_register_device(struct rvt_dev_info *rdi)
{
int ret = 0, i;
+ u64 dma_mask;
if (!rdi)
return -EINVAL;
@@ -581,7 +580,11 @@ int rvt_register_device(struct rvt_dev_info *rdi)
spin_lock_init(&rdi->n_cqs_lock);
/* DMA Operations */
- rdi->ibdev.dev.dma_ops = rdi->ibdev.dev.dma_ops ? : &dma_virt_ops;
+ rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms;
+ dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+ ret = dma_coerce_mask_and_coherent(&rdi->ibdev.dev, dma_mask);
+ if (ret)
+ goto bail_wss;
/* Protection Domain */
spin_lock_init(&rdi->n_pds_lock);
@@ -629,7 +632,7 @@ int rvt_register_device(struct rvt_dev_info *rdi)
rdi->ibdev.num_comp_vectors = 1;
/* We are now good to announce we exist */
- ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev));
+ ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev), NULL);
if (ret) {
rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
goto bail_wss;
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 77f2c7cd1216..95f0de0c8b49 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <rdma/rdma_netlink.h>
@@ -279,6 +252,12 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
struct rxe_dev *exists;
int err = 0;
+ if (is_vlan_dev(ndev)) {
+ pr_err("rxe creation allowed on top of a real device only\n");
+ err = -EPERM;
+ goto err;
+ }
+
exists = rxe_get_dev_from_net(ndev);
if (exists) {
ib_device_put(&exists->ib_dev);
@@ -305,13 +284,6 @@ static int __init rxe_module_init(void)
{
int err;
- /* initialize slab caches for managed objects */
- err = rxe_cache_init();
- if (err) {
- pr_err("unable to init object pools\n");
- return err;
- }
-
err = rxe_net_init();
if (err)
return err;
@@ -327,7 +299,6 @@ static void __exit rxe_module_exit(void)
rdma_link_unregister(&rxe_link_ops);
ib_unregister_driver(RDMA_DRIVER_RXE);
rxe_net_exit();
- rxe_cache_exit();
rxe_initialized = false;
pr_info("unloaded\n");
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index cae1b0a24c85..623fd17df02d 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef RXE_H
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 81ee756c19b8..df0d173d6acb 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include "rxe.h"
@@ -43,15 +16,24 @@ void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av)
int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr)
{
+ const struct ib_global_route *grh = rdma_ah_read_grh(attr);
struct rxe_port *port;
+ int type;
port = &rxe->port;
if (rdma_ah_get_ah_flags(attr) & IB_AH_GRH) {
- u8 sgid_index = rdma_ah_read_grh(attr)->sgid_index;
+ if (grh->sgid_index > port->attr.gid_tbl_len) {
+ pr_warn("invalid sgid index = %d\n",
+ grh->sgid_index);
+ return -EINVAL;
+ }
- if (sgid_index > port->attr.gid_tbl_len) {
- pr_warn("invalid sgid index = %d\n", sgid_index);
+ type = rdma_gid_attr_network_type(grh->sgid_attr);
+ if (type < RDMA_NETWORK_IPV4 ||
+ type > RDMA_NETWORK_IPV6) {
+ pr_warn("invalid network type for rdma_rxe = %d\n",
+ type);
return -EINVAL;
}
}
@@ -92,11 +74,29 @@ void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr)
void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
{
const struct ib_gid_attr *sgid_attr = attr->grh.sgid_attr;
+ int ibtype;
+ int type;
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
rdma_gid2ip((struct sockaddr *)&av->dgid_addr,
&rdma_ah_read_grh(attr)->dgid);
- av->network_type = rdma_gid_attr_network_type(sgid_attr);
+
+ ibtype = rdma_gid_attr_network_type(sgid_attr);
+
+ switch (ibtype) {
+ case RDMA_NETWORK_IPV4:
+ type = RXE_NETWORK_TYPE_IPV4;
+ break;
+ case RDMA_NETWORK_IPV6:
+ type = RXE_NETWORK_TYPE_IPV4;
+ break;
+ default:
+ /* not reached - checked in rxe_av_chk_attr */
+ type = 0;
+ break;
+ }
+
+ av->network_type = type;
}
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 7b4df0028388..0a1e6393250b 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/skbuff.h>
@@ -690,9 +663,8 @@ int rxe_completer(void *arg)
*/
/* there is nothing to retry in this case */
- if (!wqe || (wqe->state == wqe_state_posted)) {
+ if (!wqe || (wqe->state == wqe_state_posted))
goto exit;
- }
/* if we've started a retry, don't start another
* retry sequence, unless this is a timeout.
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index ad3090131126..43394c3f29d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/vmalloc.h>
#include "rxe.h"
@@ -66,9 +39,9 @@ err1:
return -EINVAL;
}
-static void rxe_send_complete(unsigned long data)
+static void rxe_send_complete(struct tasklet_struct *t)
{
- struct rxe_cq *cq = (struct rxe_cq *)data;
+ struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
unsigned long flags;
spin_lock_irqsave(&cq->cq_lock, flags);
@@ -107,7 +80,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
cq->is_dying = false;
- tasklet_init(&cq->comp_task, rxe_send_complete, (unsigned long)cq);
+ tasklet_setup(&cq->comp_task, rxe_send_complete);
spin_lock_init(&cq->cq_lock);
cq->ibcq.cqe = cqe;
diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
index ce003666b800..3b483b75dfe3 100644
--- a/drivers/infiniband/sw/rxe/rxe_hdr.h
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef RXE_HDR_H
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
index 636edb5f4cf4..ac9154f0593d 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2017 Mellanox Technologies Ltd. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include "rxe.h"
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.h b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
index 72c0d63c79e0..49ee6f96656d 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.h
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2017 Mellanox Technologies Ltd. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef RXE_HW_COUNTERS_H
diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
index 39e0be31aab1..66b2aad54bb7 100644
--- a/drivers/infiniband/sw/rxe/rxe_icrc.c
+++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include "rxe.h"
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 39dc3bfa5d5d..0d758760b9ae 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef RXE_LOC_H
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index 522a7942c56c..c02315aed8d1 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include "rxe.h"
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index 7887f623f62c..035f226af133 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/module.h>
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index ce24144de16a..d2ce852447c1 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include "rxe.h"
@@ -79,13 +52,8 @@ static void rxe_mem_init(int access, struct rxe_mem *mem)
u32 lkey = mem->pelem.index << 8 | rxe_get_key();
u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
- if (mem->pelem.pool->type == RXE_TYPE_MR) {
- mem->ibmr.lkey = lkey;
- mem->ibmr.rkey = rkey;
- }
-
- mem->lkey = lkey;
- mem->rkey = rkey;
+ mem->ibmr.lkey = lkey;
+ mem->ibmr.rkey = rkey;
mem->state = RXE_MEM_STATE_INVALID;
mem->type = RXE_MEM_TYPE_NONE;
mem->map_shift = ilog2(RXE_BUF_PER_MAP);
@@ -149,7 +117,7 @@ void rxe_mem_init_dma(struct rxe_pd *pd,
{
rxe_mem_init(access, mem);
- mem->pd = pd;
+ mem->ibmr.pd = &pd->ibpd;
mem->access = access;
mem->state = RXE_MEM_STATE_VALID;
mem->type = RXE_MEM_TYPE_DMA;
@@ -218,7 +186,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
}
}
- mem->pd = pd;
+ mem->ibmr.pd = &pd->ibpd;
mem->umem = umem;
mem->access = access;
mem->length = length;
@@ -248,7 +216,7 @@ int rxe_mem_init_fast(struct rxe_pd *pd,
if (err)
goto err1;
- mem->pd = pd;
+ mem->ibmr.pd = &pd->ibpd;
mem->max_buf = max_pages;
mem->state = RXE_MEM_STATE_FREE;
mem->type = RXE_MEM_TYPE_MR;
@@ -368,7 +336,7 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
memcpy(dest, src, length);
if (crcp)
- *crcp = rxe_crc32(to_rdev(mem->pd->ibpd.device),
+ *crcp = rxe_crc32(to_rdev(mem->ibmr.device),
*crcp, dest, length);
return 0;
@@ -402,7 +370,7 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
memcpy(dest, src, bytes);
if (crcp)
- crc = rxe_crc32(to_rdev(mem->pd->ibpd.device),
+ crc = rxe_crc32(to_rdev(mem->ibmr.device),
crc, dest, bytes);
length -= bytes;
@@ -575,9 +543,9 @@ struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
if (!mem)
return NULL;
- if (unlikely((type == lookup_local && mem->lkey != key) ||
- (type == lookup_remote && mem->rkey != key) ||
- mem->pd != pd ||
+ if (unlikely((type == lookup_local && mr_lkey(mem) != key) ||
+ (type == lookup_remote && mr_rkey(mem) != key) ||
+ mr_pd(mem) != pd ||
(access && !(access & mem->access)) ||
mem->state != RXE_MEM_STATE_VALID)) {
rxe_drop_ref(mem);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 0c3808611f95..34bef7d8e6b4 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/skbuff.h>
@@ -120,7 +93,7 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
recv_sockets.sk6->sk, &fl6,
NULL);
- if (unlikely(IS_ERR(ndst))) {
+ if (IS_ERR(ndst)) {
pr_err_ratelimited("no route to %pI6\n", daddr);
return NULL;
}
@@ -160,14 +133,14 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev,
if (dst)
dst_release(dst);
- if (av->network_type == RDMA_NETWORK_IPV4) {
+ if (av->network_type == RXE_NETWORK_TYPE_IPV4) {
struct in_addr *saddr;
struct in_addr *daddr;
saddr = &av->sgid_addr._sockaddr_in.sin_addr;
daddr = &av->dgid_addr._sockaddr_in.sin_addr;
dst = rxe_find_route4(ndev, saddr, daddr);
- } else if (av->network_type == RDMA_NETWORK_IPV6) {
+ } else if (av->network_type == RXE_NETWORK_TYPE_IPV6) {
struct in6_addr *saddr6;
struct in6_addr *daddr6;
@@ -469,7 +442,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
if (IS_ERR(attr))
return NULL;
- if (av->network_type == RDMA_NETWORK_IPV4)
+ if (av->network_type == RXE_NETWORK_TYPE_IPV4)
hdr_len = ETH_HLEN + sizeof(struct udphdr) +
sizeof(struct iphdr);
else
@@ -496,7 +469,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
skb->dev = ndev;
rcu_read_unlock();
- if (av->network_type == RDMA_NETWORK_IPV4)
+ if (av->network_type == RXE_NETWORK_TYPE_IPV4)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 2ca71d3d245c..45d80d00f86b 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef RXE_NET_H
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index 4cf11063e0b5..0cb4b01fd910 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <rdma/ib_pack.h>
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h
index 307604e9c78d..1041ac9a9233 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.h
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef RXE_OPCODE_H
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 2f381aeafcb5..25ab50d9b7c2 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef RXE_PARAM_H
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index fbcbac52290b..b374eb53e2fe 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include "rxe.h"
@@ -110,62 +83,6 @@ static inline const char *pool_name(struct rxe_pool *pool)
return rxe_type_info[pool->type].name;
}
-static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
-{
- return rxe_type_info[pool->type].cache;
-}
-
-static void rxe_cache_clean(size_t cnt)
-{
- int i;
- struct rxe_type_info *type;
-
- for (i = 0; i < cnt; i++) {
- type = &rxe_type_info[i];
- if (!(type->flags & RXE_POOL_NO_ALLOC)) {
- kmem_cache_destroy(type->cache);
- type->cache = NULL;
- }
- }
-}
-
-int rxe_cache_init(void)
-{
- int err;
- int i;
- size_t size;
- struct rxe_type_info *type;
-
- for (i = 0; i < RXE_NUM_TYPES; i++) {
- type = &rxe_type_info[i];
- size = ALIGN(type->size, RXE_POOL_ALIGN);
- if (!(type->flags & RXE_POOL_NO_ALLOC)) {
- type->cache =
- kmem_cache_create(type->name, size,
- RXE_POOL_ALIGN,
- RXE_POOL_CACHE_FLAGS, NULL);
- if (!type->cache) {
- pr_err("Unable to init kmem cache for %s\n",
- type->name);
- err = -ENOMEM;
- goto err1;
- }
- }
- }
-
- return 0;
-
-err1:
- rxe_cache_clean(i);
-
- return err;
-}
-
-void rxe_cache_exit(void)
-{
- rxe_cache_clean(RXE_NUM_TYPES);
-}
-
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
{
int err = 0;
@@ -406,7 +323,7 @@ void *rxe_alloc(struct rxe_pool *pool)
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- elem = kmem_cache_zalloc(pool_cache(pool),
+ elem = kzalloc(rxe_type_info[pool->type].size,
(pool->flags & RXE_POOL_ATOMIC) ?
GFP_ATOMIC : GFP_KERNEL);
if (!elem)
@@ -468,7 +385,7 @@ void rxe_elem_release(struct kref *kref)
pool->cleanup(elem);
if (!(pool->flags & RXE_POOL_NO_ALLOC))
- kmem_cache_free(pool_cache(pool), elem);
+ kfree(elem);
atomic_dec(&pool->num_elem);
ib_device_put(&pool->rxe->ib_dev);
rxe_pool_put(pool);
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 2f2cff1cbe43..432745ffc8d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef RXE_POOL_H
@@ -69,7 +42,6 @@ struct rxe_type_info {
u32 min_index;
size_t key_offset;
size_t key_size;
- struct kmem_cache *cache;
};
extern struct rxe_type_info rxe_type_info[];
@@ -113,12 +85,6 @@ struct rxe_pool {
size_t key_size;
};
-/* initialize slab caches for managed objects */
-int rxe_cache_init(void);
-
-/* cleanup slab caches for managed objects */
-void rxe_cache_exit(void);
-
/* initialize a pool of objects with given limit on
* number of elements. gets parameters from rxe_type_info
* pool elements will be allocated out of a slab cache
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 6c11c3aeeca6..656a5b4be847 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/skbuff.h>
@@ -628,9 +601,8 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
if (mask & IB_QP_QKEY)
qp->attr.qkey = attr->qkey;
- if (mask & IB_QP_AV) {
+ if (mask & IB_QP_AV)
rxe_init_av(&attr->ah_attr, &qp->pri_av);
- }
if (mask & IB_QP_ALT_PATH) {
rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 245040c3a35d..fa69241b1187 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must retailuce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/vmalloc.h>
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 8ef17d617022..7d434a6837a7 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef RXE_QUEUE_H
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 7e123d3c4d09..c9984a28eecc 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/skbuff.h>
@@ -260,6 +233,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
struct rxe_mc_elem *mce;
struct rxe_qp *qp;
union ib_gid dgid;
+ struct sk_buff *per_qp_skb;
+ struct rxe_pkt_info *per_qp_pkt;
int err;
if (skb->protocol == htons(ETH_P_IP))
@@ -288,26 +263,44 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
if (err)
continue;
- /* if *not* the last qp in the list
- * increase the users of the skb then post to the next qp
+ /* for all but the last qp create a new clone of the
+ * skb and pass to the qp.
*/
if (mce->qp_list.next != &mcg->qp_list)
- skb_get(skb);
+ per_qp_skb = skb_clone(skb, GFP_ATOMIC);
+ else
+ per_qp_skb = skb;
+
+ if (unlikely(!per_qp_skb))
+ continue;
- pkt->qp = qp;
+ per_qp_pkt = SKB_TO_PKT(per_qp_skb);
+ per_qp_pkt->qp = qp;
rxe_add_ref(qp);
- rxe_rcv_pkt(pkt, skb);
+ rxe_rcv_pkt(per_qp_pkt, per_qp_skb);
}
spin_unlock_bh(&mcg->mcg_lock);
rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
+ return;
+
err1:
kfree_skb(skb);
}
-static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
+/**
+ * rxe_chk_dgid - validate destination IP address
+ * @rxe: rxe device that received packet
+ * @skb: the received packet buffer
+ *
+ * Accept any loopback packets
+ * Extract IP address from packet and
+ * Accept if multicast packet
+ * Accept if matches an SGID table entry
+ */
+static int rxe_chk_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
{
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
const struct ib_gid_attr *gid_attr;
@@ -325,6 +318,9 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
}
+ if (rdma_is_multicast_addr((struct in6_addr *)pdgid))
+ return 0;
+
gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid,
IB_GID_TYPE_ROCE_UDP_ENCAP,
1, skb->dev);
@@ -349,8 +345,8 @@ void rxe_rcv(struct sk_buff *skb)
if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
goto drop;
- if (rxe_match_dgid(rxe, skb) < 0) {
- pr_warn_ratelimited("failed matching dgid\n");
+ if (rxe_chk_dgid(rxe, skb) < 0) {
+ pr_warn_ratelimited("failed checking dgid\n");
goto drop;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 34df2b55e650..af3923bf0a36 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/skbuff.h>
@@ -644,8 +617,8 @@ next_wqe:
rmr->state = RXE_MEM_STATE_VALID;
rmr->access = wqe->wr.wr.reg.access;
- rmr->lkey = wqe->wr.wr.reg.key;
- rmr->rkey = wqe->wr.wr.reg.key;
+ rmr->ibmr.lkey = wqe->wr.wr.reg.key;
+ rmr->ibmr.rkey = wqe->wr.wr.reg.key;
rmr->iova = wqe->wr.wr.reg.mr->iova;
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index c4a8195bf670..c7e3b6a4af38 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/skbuff.h>
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index d8459431534e..41b0d1e11baf 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/vmalloc.h>
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
index 2af31d421bfc..666202ddff48 100644
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include "rxe.h"
@@ -78,6 +51,12 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
return -EINVAL;
}
+ if (is_vlan_dev(ndev)) {
+ pr_err("rxe creation allowed on top of a real device only\n");
+ err = -EPERM;
+ goto err;
+ }
+
exists = rxe_get_dev_from_net(ndev);
if (exists) {
ib_device_put(&exists->ib_dev);
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index ecdac3f8fcc9..6951fdcb31bf 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/kernel.h>
@@ -55,12 +28,12 @@ int __rxe_do_task(struct rxe_task *task)
* a second caller finds the task already running
* but looks just after the last call to func
*/
-void rxe_do_task(unsigned long data)
+void rxe_do_task(struct tasklet_struct *t)
{
int cont;
int ret;
unsigned long flags;
- struct rxe_task *task = (struct rxe_task *)data;
+ struct rxe_task *task = from_tasklet(task, t, tasklet);
spin_lock_irqsave(&task->state_lock, flags);
switch (task->state) {
@@ -123,7 +96,7 @@ int rxe_init_task(void *obj, struct rxe_task *task,
snprintf(task->name, sizeof(task->name), "%s", name);
task->destroyed = false;
- tasklet_init(&task->tasklet, rxe_do_task, (unsigned long)task);
+ tasklet_setup(&task->tasklet, rxe_do_task);
task->state = TASK_STATE_START;
spin_lock_init(&task->state_lock);
@@ -159,7 +132,7 @@ void rxe_run_task(struct rxe_task *task, int sched)
if (sched)
tasklet_schedule(&task->tasklet);
else
- rxe_do_task((unsigned long)task);
+ rxe_do_task(&task->tasklet);
}
void rxe_disable_task(struct rxe_task *task)
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index 08ff42d451c6..11d183fd3338 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef RXE_TASK_H
@@ -60,7 +33,7 @@ struct rxe_task {
/*
* init rxe_task structure
* arg => parameter to pass to fcn
- * fcn => function to call until it returns != 0
+ * func => function to call until it returns != 0
*/
int rxe_init_task(void *obj, struct rxe_task *task,
void *arg, int (*func)(void *), char *name);
@@ -80,7 +53,7 @@ int __rxe_do_task(struct rxe_task *task);
* work to do someone must reschedule the task before
* leaving
*/
-void rxe_do_task(unsigned long data);
+void rxe_do_task(struct tasklet_struct *t);
/* run a task, else schedule it to run as a tasklet, The decision
* to run or schedule tasklet is based on the parameter sched.
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 8522e9a3e914..f9c832e82552 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/dma-mapping.h>
@@ -175,11 +148,12 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
}
-static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rxe_pd *pd = to_rpd(ibpd);
rxe_drop_ref(pd);
+ return 0;
}
static int rxe_create_ah(struct ib_ah *ibah,
@@ -227,11 +201,12 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
}
-static void rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
+static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
rxe_drop_ref(ah);
+ return 0;
}
static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
@@ -365,7 +340,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
return 0;
}
-static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct rxe_srq *srq = to_rsrq(ibsrq);
@@ -374,6 +349,7 @@ static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
rxe_drop_ref(srq->pd);
rxe_drop_ref(srq);
+ return 0;
}
static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
@@ -803,13 +779,14 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
}
-static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct rxe_cq *cq = to_rcq(ibcq);
rxe_cq_disable(cq);
rxe_drop_ref(cq);
+ return 0;
}
static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
@@ -944,7 +921,7 @@ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
struct rxe_mem *mr = to_rmr(ibmr);
mr->state = RXE_MEM_STATE_ZOMBIE;
- rxe_drop_ref(mr->pd);
+ rxe_drop_ref(mr_pd(mr));
rxe_drop_index(mr);
rxe_drop_ref(mr);
return 0;
@@ -1141,6 +1118,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
int err;
struct ib_device *dev = &rxe->ib_dev;
struct crypto_shash *tfm;
+ u64 dma_mask;
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
@@ -1151,12 +1129,12 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
- dev->dev.dma_ops = &dma_virt_ops;
dev->dev.dma_parms = &rxe->dma_parms;
- rxe->dma_parms = (struct device_dma_parameters)
- { .max_segment_size = SZ_2G };
- dma_coerce_mask_and_coherent(&dev->dev,
- dma_get_required_mask(&dev->dev));
+ dma_set_max_seg_size(&dev->dev, UINT_MAX);
+ dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+ err = dma_coerce_mask_and_coherent(&dev->dev, dma_mask);
+ if (err)
+ return err;
dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
| BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
@@ -1205,7 +1183,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
rxe->tfm = tfm;
rdma_set_device_sysfs_group(dev, &rxe_attr_group);
- err = ib_register_device(dev, ibdev_name);
+ err = ib_register_device(dev, ibdev_name, NULL);
if (err)
pr_warn("%s failed with error %d\n", __func__, err);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index c664c7f36ab5..3414b341b709 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef RXE_VERBS_H
@@ -322,12 +295,8 @@ struct rxe_mem {
struct ib_mw ibmw;
};
- struct rxe_pd *pd;
struct ib_umem *umem;
- u32 lkey;
- u32 rkey;
-
enum rxe_mem_state state;
enum rxe_mem_type type;
u64 va;
@@ -465,6 +434,21 @@ static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
}
+static inline struct rxe_pd *mr_pd(struct rxe_mem *mr)
+{
+ return to_rpd(mr->ibmr.pd);
+}
+
+static inline u32 mr_lkey(struct rxe_mem *mr)
+{
+ return mr->ibmr.lkey;
+}
+
+static inline u32 mr_rkey(struct rxe_mem *mr)
+{
+ return mr->ibmr.rkey;
+}
+
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
void rxe_mc_cleanup(struct rxe_pool_entry *arg);
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index d862bec84376..181e06c1c43d 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -69,7 +69,7 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
sdev->vendor_part_id = dev_id++;
- rv = ib_register_device(base_dev, name);
+ rv = ib_register_device(base_dev, name, NULL);
if (rv) {
pr_warn("siw: device registration error %d\n", rv);
return rv;
@@ -306,6 +306,7 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
struct siw_device *sdev = NULL;
struct ib_device *base_dev;
struct device *parent = netdev->dev.parent;
+ u64 dma_mask;
int rv;
if (!parent) {
@@ -382,10 +383,12 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
*/
base_dev->phys_port_cnt = 1;
base_dev->dev.parent = parent;
- base_dev->dev.dma_ops = &dma_virt_ops;
base_dev->dev.dma_parms = &sdev->dma_parms;
- sdev->dma_parms = (struct device_dma_parameters)
- { .max_segment_size = SZ_2G };
+ dma_set_max_seg_size(&base_dev->dev, UINT_MAX);
+ dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+ if (dma_coerce_mask_and_coherent(&base_dev->dev, dma_mask))
+ goto error;
+
base_dev->num_comp_vectors = num_possible_cpus();
xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index adafa1b8bebe..7cf3242ffb41 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -234,12 +234,13 @@ int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
return 0;
}
-void siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
struct siw_device *sdev = to_siw_dev(pd->device);
siw_dbg_pd(pd, "free PD\n");
atomic_dec(&sdev->num_pd);
+ return 0;
}
void siw_qp_get_ref(struct ib_qp *base_qp)
@@ -1055,7 +1056,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
return rv > 0 ? 0 : rv;
}
-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
+int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
{
struct siw_cq *cq = to_siw_cq(base_cq);
struct siw_device *sdev = to_siw_dev(base_cq->device);
@@ -1073,6 +1074,7 @@ void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
atomic_dec(&sdev->num_cq);
vfree(cq->queue);
+ return 0;
}
/*
@@ -1690,7 +1692,7 @@ int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs)
* QP anymore - the code trusts the RDMA core environment to keep track
* of QP references.
*/
-void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
+int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
{
struct siw_srq *srq = to_siw_srq(base_srq);
struct siw_device *sdev = to_siw_dev(base_srq->device);
@@ -1702,6 +1704,7 @@ void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
rdma_user_mmap_entry_remove(srq->srq_entry);
vfree(srq->recvq);
atomic_dec(&sdev->num_srq);
+ return 0;
}
/*
diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
index d9572275a6b6..637454529357 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.h
+++ b/drivers/infiniband/sw/siw/siw_verbs.h
@@ -49,7 +49,7 @@ int siw_query_port(struct ib_device *base_dev, u8 port,
int siw_query_gid(struct ib_device *base_dev, u8 port, int idx,
union ib_gid *gid);
int siw_alloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
-void siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
+int siw_dealloc_pd(struct ib_pd *base_pd, struct ib_udata *udata);
struct ib_qp *siw_create_qp(struct ib_pd *base_pd,
struct ib_qp_init_attr *attr,
struct ib_udata *udata);
@@ -62,7 +62,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
-void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
+int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata);
int siw_poll_cq(struct ib_cq *base_cq, int num_entries, struct ib_wc *wc);
int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags);
struct ib_mr *siw_reg_user_mr(struct ib_pd *base_pd, u64 start, u64 len,
@@ -78,7 +78,7 @@ int siw_create_srq(struct ib_srq *base_srq, struct ib_srq_init_attr *attr,
int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask mask, struct ib_udata *udata);
int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attr);
-void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata);
+int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata);
int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7c41fb040f7c..8f0b598a46ec 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1647,17 +1647,13 @@ int ipoib_cm_dev_init(struct net_device *dev)
void ipoib_cm_dev_cleanup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- int ret;
if (!priv->cm.srq)
return;
ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
- ret = ib_destroy_srq(priv->cm.srq);
- if (ret)
- ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
-
+ ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
if (!priv->cm.srq_ring)
return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 64c19f6fa931..12ba7a0fe0b5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -124,35 +124,14 @@ static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
return 0;
}
-static const struct seq_operations ipoib_mcg_seq_ops = {
+static const struct seq_operations ipoib_mcg_sops = {
.start = ipoib_mcg_seq_start,
.next = ipoib_mcg_seq_next,
.stop = ipoib_mcg_seq_stop,
.show = ipoib_mcg_seq_show,
};
-static int ipoib_mcg_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int ret;
-
- ret = seq_open(file, &ipoib_mcg_seq_ops);
- if (ret)
- return ret;
-
- seq = file->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-static const struct file_operations ipoib_mcg_fops = {
- .owner = THIS_MODULE,
- .open = ipoib_mcg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
+DEFINE_SEQ_ATTRIBUTE(ipoib_mcg);
static void *ipoib_path_seq_start(struct seq_file *file, loff_t *pos)
{
@@ -229,35 +208,14 @@ static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
return 0;
}
-static const struct seq_operations ipoib_path_seq_ops = {
+static const struct seq_operations ipoib_path_sops = {
.start = ipoib_path_seq_start,
.next = ipoib_path_seq_next,
.stop = ipoib_path_seq_stop,
.show = ipoib_path_seq_show,
};
-static int ipoib_path_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int ret;
-
- ret = seq_open(file, &ipoib_path_seq_ops);
- if (ret)
- return ret;
-
- seq = file->private_data;
- seq->private = inode->i_private;
-
- return 0;
-}
-
-static const struct file_operations ipoib_path_fops = {
- .owner = THIS_MODULE,
- .open = ipoib_path_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
+DEFINE_SEQ_ATTRIBUTE(ipoib_path);
void ipoib_create_debug_files(struct net_device *dev)
{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index f772fe8c5b66..abfab89423f4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2480,6 +2480,8 @@ static struct net_device *ipoib_add_port(const char *format,
/* call event handler to ensure pkey in sync */
queue_work(ipoib_workqueue, &priv->flush_heavy);
+ ndev->rtnl_link_ops = ipoib_get_link_ops();
+
result = register_netdev(ndev);
if (result) {
pr_warn("%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 38c984d16996..d5a90a66b45c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -144,6 +144,16 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
return 0;
}
+static void ipoib_del_child_link(struct net_device *dev, struct list_head *head)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!priv->parent)
+ return;
+
+ unregister_netdevice_queue(dev, head);
+}
+
static size_t ipoib_get_size(const struct net_device *dev)
{
return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
@@ -158,6 +168,7 @@ static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
.priv_size = sizeof(struct ipoib_dev_priv),
.setup = ipoib_setup_common,
.newlink = ipoib_new_child_link,
+ .dellink = ipoib_del_child_link,
.changelink = ipoib_changelink,
.get_size = ipoib_get_size,
.fill_info = ipoib_fill_info,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 30865605e098..4c50a87ed7cc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -195,6 +195,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
}
priv = ipoib_priv(ndev);
+ ndev->rtnl_link_ops = ipoib_get_link_ops();
+
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
if (result && ndev->reg_state == NETREG_UNINITIALIZED)
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 2f3ebc0a75d9..2bd18b006893 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -620,7 +620,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
conn_param.private_data = (void *)&req_hdr;
conn_param.private_data_len = sizeof(struct iser_cm_hdr);
- ret = rdma_connect(cma_id, &conn_param);
+ ret = rdma_connect_locked(cma_id, &conn_param);
if (ret) {
iser_err("failure connecting: %d\n", ret);
goto failure;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 695f701dc43d..436e17f1d0e5 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1141,12 +1141,7 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
* multiple data-outs on the same command can arrive -
* so post the buffer before hand
*/
- rc = isert_post_recv(isert_conn, rx_desc);
- if (rc) {
- isert_err("ib_post_recv failed with %d\n", rc);
- return rc;
- }
- return 0;
+ return isert_post_recv(isert_conn, rx_desc);
}
static int
@@ -1723,10 +1718,8 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
int ret;
ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
- if (ret) {
- isert_err("ib_post_recv failed with %d\n", ret);
+ if (ret)
return ret;
- }
ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL);
if (ret) {
@@ -2098,10 +2091,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
&isert_cmd->tx_desc.send_wr);
rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
- if (rc) {
- isert_err("ib_post_recv failed with %d\n", rc);
+ if (rc)
return rc;
- }
chain_wr = &isert_cmd->tx_desc.send_wr;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index 298b747d0330..ac4c49cbf153 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -312,7 +312,7 @@ static struct attribute *rtrs_clt_stats_attrs[] = {
NULL
};
-static struct attribute_group rtrs_clt_stats_attr_group = {
+static const struct attribute_group rtrs_clt_stats_attr_group = {
.attrs = rtrs_clt_stats_attrs,
};
@@ -388,7 +388,7 @@ static struct attribute *rtrs_clt_sess_attrs[] = {
NULL,
};
-static struct attribute_group rtrs_clt_sess_attr_group = {
+static const struct attribute_group rtrs_clt_sess_attr_group = {
.attrs = rtrs_clt_sess_attrs,
};
@@ -460,7 +460,7 @@ static struct attribute *rtrs_clt_attrs[] = {
NULL,
};
-static struct attribute_group rtrs_clt_attr_group = {
+static const struct attribute_group rtrs_clt_attr_group = {
.attrs = rtrs_clt_attrs,
};
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 776e89231c52..f298adc02acb 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -1674,9 +1674,9 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
uuid_copy(&msg.sess_uuid, &sess->s.uuid);
uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
- err = rdma_connect(con->c.cm_id, &param);
+ err = rdma_connect_locked(con->c.cm_id, &param);
if (err)
- rtrs_err(clt, "rdma_connect(): %d\n", err);
+ rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
return err;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 0a93c87ef92b..b8e43dc4d95a 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -115,7 +115,6 @@ struct rtrs_sess {
/* rtrs information unit */
struct rtrs_iu {
- struct list_head list;
struct ib_cqe cqe;
dma_addr_t dma_addr;
void *buf;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index cf6a2be61695..07fbb063555d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -135,7 +135,7 @@ static struct attribute *rtrs_srv_sess_attrs[] = {
NULL,
};
-static struct attribute_group rtrs_srv_sess_attr_group = {
+static const struct attribute_group rtrs_srv_sess_attr_group = {
.attrs = rtrs_srv_sess_attrs,
};
@@ -148,7 +148,7 @@ static struct attribute *rtrs_srv_stats_attrs[] = {
NULL,
};
-static struct attribute_group rtrs_srv_stats_attr_group = {
+static const struct attribute_group rtrs_srv_stats_attr_group = {
.attrs = rtrs_srv_stats_attrs,
};
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 28f6414dfa3d..d6f93601712e 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -16,6 +16,7 @@
#include "rtrs-srv.h"
#include "rtrs-log.h"
#include <rdma/ib_cm.h>
+#include <rdma/ib_verbs.h>
MODULE_DESCRIPTION("RDMA Transport Server");
MODULE_LICENSE("GPL");
@@ -31,6 +32,7 @@ MODULE_LICENSE("GPL");
static struct rtrs_rdma_dev_pd dev_pd;
static mempool_t *chunk_pool;
struct class *rtrs_dev_class;
+static struct rtrs_srv_ib_ctx ib_ctx;
static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
@@ -2042,6 +2044,70 @@ static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
kfree(ctx);
}
+static int rtrs_srv_add_one(struct ib_device *device)
+{
+ struct rtrs_srv_ctx *ctx;
+ int ret = 0;
+
+ mutex_lock(&ib_ctx.ib_dev_mutex);
+ if (ib_ctx.ib_dev_count)
+ goto out;
+
+ /*
+ * Since our CM IDs are NOT bound to any ib device we will create them
+ * only once
+ */
+ ctx = ib_ctx.srv_ctx;
+ ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
+ if (ret) {
+ /*
+ * We errored out here.
+ * According to the ib code, if we encounter an error here then the
+ * error code is ignored, and no more calls to our ops are made.
+ */
+ pr_err("Failed to initialize RDMA connection");
+ goto err_out;
+ }
+
+out:
+ /*
+ * Keep a track on the number of ib devices added
+ */
+ ib_ctx.ib_dev_count++;
+
+err_out:
+ mutex_unlock(&ib_ctx.ib_dev_mutex);
+ return ret;
+}
+
+static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
+{
+ struct rtrs_srv_ctx *ctx;
+
+ mutex_lock(&ib_ctx.ib_dev_mutex);
+ ib_ctx.ib_dev_count--;
+
+ if (ib_ctx.ib_dev_count)
+ goto out;
+
+ /*
+ * Since our CM IDs are NOT bound to any ib device we will remove them
+ * only once, when the last device is removed
+ */
+ ctx = ib_ctx.srv_ctx;
+ rdma_destroy_id(ctx->cm_id_ip);
+ rdma_destroy_id(ctx->cm_id_ib);
+
+out:
+ mutex_unlock(&ib_ctx.ib_dev_mutex);
+}
+
+static struct ib_client rtrs_srv_client = {
+ .name = "rtrs_server",
+ .add = rtrs_srv_add_one,
+ .remove = rtrs_srv_remove_one
+};
+
/**
* rtrs_srv_open() - open RTRS server context
* @ops: callback functions
@@ -2060,7 +2126,11 @@ struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
if (!ctx)
return ERR_PTR(-ENOMEM);
- err = rtrs_srv_rdma_init(ctx, port);
+ mutex_init(&ib_ctx.ib_dev_mutex);
+ ib_ctx.srv_ctx = ctx;
+ ib_ctx.port = port;
+
+ err = ib_register_client(&rtrs_srv_client);
if (err) {
free_srv_ctx(ctx);
return ERR_PTR(err);
@@ -2099,8 +2169,8 @@ static void close_ctx(struct rtrs_srv_ctx *ctx)
*/
void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
{
- rdma_destroy_id(ctx->cm_id_ip);
- rdma_destroy_id(ctx->cm_id_ib);
+ ib_unregister_client(&rtrs_srv_client);
+ mutex_destroy(&ib_ctx.ib_dev_mutex);
close_ctx(ctx);
free_srv_ctx(ctx);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index dc95b0932f0d..08b0b8a6eebe 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -118,6 +118,13 @@ struct rtrs_srv_ctx {
struct list_head srv_list;
};
+struct rtrs_srv_ib_ctx {
+ struct rtrs_srv_ctx *srv_ctx;
+ u16 port;
+ struct mutex ib_dev_mutex;
+ int ib_dev_count;
+};
+
extern struct class *rtrs_dev_class;
void close_sess(struct rtrs_srv_sess *sess);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0065eb17ae36..53a8becac827 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -622,10 +622,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
/**
* srpt_unregister_mad_agent - unregister MAD callback functions
* @sdev: SRPT HCA pointer.
+ * @port_cnt: number of ports with registered MAD
*
* Note: It is safe to call this function more than once for the same device.
*/
-static void srpt_unregister_mad_agent(struct srpt_device *sdev)
+static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt)
{
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
@@ -633,7 +634,7 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
struct srpt_port *sport;
int i;
- for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+ for (i = 1; i <= port_cnt; i++) {
sport = &sdev->port[i - 1];
WARN_ON(sport->port != i);
if (sport->mad_agent) {
@@ -3185,7 +3186,8 @@ static int srpt_add_one(struct ib_device *device)
if (ret) {
pr_err("MAD registration failed for %s-%d.\n",
dev_name(&sdev->device->dev), i);
- goto err_event;
+ i--;
+ goto err_port;
}
}
@@ -3197,7 +3199,8 @@ static int srpt_add_one(struct ib_device *device)
pr_debug("added %s.\n", dev_name(&device->dev));
return 0;
-err_event:
+err_port:
+ srpt_unregister_mad_agent(sdev, i);
ib_unregister_event_handler(&sdev->event_handler);
err_cm:
if (sdev->cm_id)
@@ -3221,7 +3224,7 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
struct srpt_device *sdev = client_data;
int i;
- srpt_unregister_mad_agent(sdev);
+ srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt);
ib_unregister_event_handler(&sdev->event_handler);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 41435a699b53..bdeb010efee6 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -256,6 +256,7 @@ enum rdma_ch_state {
* @rdma_cm: See below.
* @rdma_cm.cm_id: RDMA CM ID associated with the channel.
* @cq: IB completion queue for this channel.
+ * @cq_size: Number of CQEs in @cq.
* @zw_cqe: Zero-length write CQE.
* @rcu: RCU head.
* @kref: kref for this channel.
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index e494295d1c7b..95f90699d2b1 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -28,7 +28,6 @@
struct evdev {
int open;
struct input_handle handle;
- wait_queue_head_t wait;
struct evdev_client __rcu *grab;
struct list_head client_list;
spinlock_t client_lock; /* protects client_list */
@@ -43,6 +42,7 @@ struct evdev_client {
unsigned int tail;
unsigned int packet_head; /* [future] position of the first element of next packet */
spinlock_t buffer_lock; /* protects access to buffer, head and tail */
+ wait_queue_head_t wait;
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
@@ -245,7 +245,6 @@ static void evdev_pass_values(struct evdev_client *client,
const struct input_value *vals, unsigned int count,
ktime_t *ev_time)
{
- struct evdev *evdev = client->evdev;
const struct input_value *v;
struct input_event event;
struct timespec64 ts;
@@ -282,7 +281,7 @@ static void evdev_pass_values(struct evdev_client *client,
spin_unlock(&client->buffer_lock);
if (wakeup)
- wake_up_interruptible_poll(&evdev->wait,
+ wake_up_interruptible_poll(&client->wait,
EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM);
}
@@ -426,11 +425,11 @@ static void evdev_hangup(struct evdev *evdev)
struct evdev_client *client;
spin_lock(&evdev->client_lock);
- list_for_each_entry(client, &evdev->client_list, node)
+ list_for_each_entry(client, &evdev->client_list, node) {
kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+ wake_up_interruptible_poll(&client->wait, EPOLLHUP | EPOLLERR);
+ }
spin_unlock(&evdev->client_lock);
-
- wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR);
}
static int evdev_release(struct inode *inode, struct file *file)
@@ -479,6 +478,7 @@ static int evdev_open(struct inode *inode, struct file *file)
if (!client)
return -ENOMEM;
+ init_waitqueue_head(&client->wait);
client->bufsize = bufsize;
spin_lock_init(&client->buffer_lock);
client->evdev = evdev;
@@ -595,7 +595,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
break;
if (!(file->f_flags & O_NONBLOCK)) {
- error = wait_event_interruptible(evdev->wait,
+ error = wait_event_interruptible(client->wait,
client->packet_head != client->tail ||
!evdev->exist || client->revoked);
if (error)
@@ -613,7 +613,7 @@ static __poll_t evdev_poll(struct file *file, poll_table *wait)
struct evdev *evdev = client->evdev;
__poll_t mask;
- poll_wait(file, &evdev->wait, wait);
+ poll_wait(file, &client->wait, wait);
if (evdev->exist && !client->revoked)
mask = EPOLLOUT | EPOLLWRNORM;
@@ -946,7 +946,7 @@ static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
client->revoked = true;
evdev_ungrab(evdev, client);
input_flush_device(&evdev->handle, file);
- wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR);
+ wake_up_interruptible_poll(&client->wait, EPOLLHUP | EPOLLERR);
return 0;
}
@@ -1358,7 +1358,6 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
INIT_LIST_HEAD(&evdev->client_list);
spin_lock_init(&evdev->client_lock);
mutex_init(&evdev->mutex);
- init_waitqueue_head(&evdev->wait);
evdev->exist = true;
dev_no = minor;
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index f699538bdac4..44fe6f2f063c 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -323,11 +323,14 @@ static int adjust_dual(int *begin, int step, int *end, int eq, int mu)
p = begin + step;
s = p == end ? f + 1 : *p;
- for (; p != end; p += step)
- if (*p < f)
- s = f, f = *p;
- else if (*p < s)
+ for (; p != end; p += step) {
+ if (*p < f) {
+ s = f;
+ f = *p;
+ } else if (*p < s) {
s = *p;
+ }
+ }
c = (f + s + 1) / 2;
if (c == 0 || (c > mu && (!eq || mu > 0)))
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index eb031b7a4866..b080f0cfb068 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -42,6 +42,16 @@ config JOYSTICK_A3D
To compile this driver as a module, choose M here: the
module will be called a3d.
+config JOYSTICK_ADC
+ tristate "Simple joystick connected over ADC"
+ depends on IIO
+ select IIO_BUFFER_CB
+ help
+ Say Y here if you have a simple joystick connected over ADC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adc-joystick.
+
config JOYSTICK_ADI
tristate "Logitech ADI digital joysticks and gamepads"
select GAMEPORT
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index 8656023f6ef5..58232b3057d3 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -6,6 +6,7 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_JOYSTICK_A3D) += a3d.o
+obj-$(CONFIG_JOYSTICK_ADC) += adc-joystick.o
obj-$(CONFIG_JOYSTICK_ADI) += adi.o
obj-$(CONFIG_JOYSTICK_AMIGA) += amijoy.o
obj-$(CONFIG_JOYSTICK_AS5011) += as5011.o
diff --git a/drivers/input/joystick/adc-joystick.c b/drivers/input/joystick/adc-joystick.c
new file mode 100644
index 000000000000..78ebca7d400a
--- /dev/null
+++ b/drivers/input/joystick/adc-joystick.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Input driver for joysticks connected over ADC.
+ * Copyright (c) 2019-2020 Artur Rojek <contact@artur-rojek.eu>
+ */
+#include <linux/ctype.h>
+#include <linux/input.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/consumer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include <asm/unaligned.h>
+
+struct adc_joystick_axis {
+ u32 code;
+ s32 range[2];
+ s32 fuzz;
+ s32 flat;
+};
+
+struct adc_joystick {
+ struct input_dev *input;
+ struct iio_cb_buffer *buffer;
+ struct adc_joystick_axis *axes;
+ struct iio_channel *chans;
+ int num_chans;
+};
+
+static int adc_joystick_handle(const void *data, void *private)
+{
+ struct adc_joystick *joy = private;
+ enum iio_endian endianness;
+ int bytes, msb, val, idx, i;
+ const u16 *data_u16;
+ bool sign;
+
+ bytes = joy->chans[0].channel->scan_type.storagebits >> 3;
+
+ for (i = 0; i < joy->num_chans; ++i) {
+ idx = joy->chans[i].channel->scan_index;
+ endianness = joy->chans[i].channel->scan_type.endianness;
+ msb = joy->chans[i].channel->scan_type.realbits - 1;
+ sign = tolower(joy->chans[i].channel->scan_type.sign) == 's';
+
+ switch (bytes) {
+ case 1:
+ val = ((const u8 *)data)[idx];
+ break;
+ case 2:
+ data_u16 = (const u16 *)data + idx;
+
+ /*
+ * Data is aligned to the sample size by IIO core.
+ * Call `get_unaligned_xe16` to hide type casting.
+ */
+ if (endianness == IIO_BE)
+ val = get_unaligned_be16(data_u16);
+ else if (endianness == IIO_LE)
+ val = get_unaligned_le16(data_u16);
+ else /* IIO_CPU */
+ val = *data_u16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val >>= joy->chans[i].channel->scan_type.shift;
+ if (sign)
+ val = sign_extend32(val, msb);
+ else
+ val &= GENMASK(msb, 0);
+ input_report_abs(joy->input, joy->axes[i].code, val);
+ }
+
+ input_sync(joy->input);
+
+ return 0;
+}
+
+static int adc_joystick_open(struct input_dev *dev)
+{
+ struct adc_joystick *joy = input_get_drvdata(dev);
+ struct device *devp = &dev->dev;
+ int ret;
+
+ ret = iio_channel_start_all_cb(joy->buffer);
+ if (ret)
+ dev_err(devp, "Unable to start callback buffer: %d\n", ret);
+
+ return ret;
+}
+
+static void adc_joystick_close(struct input_dev *dev)
+{
+ struct adc_joystick *joy = input_get_drvdata(dev);
+
+ iio_channel_stop_all_cb(joy->buffer);
+}
+
+static void adc_joystick_cleanup(void *data)
+{
+ iio_channel_release_all_cb(data);
+}
+
+static int adc_joystick_set_axes(struct device *dev, struct adc_joystick *joy)
+{
+ struct adc_joystick_axis *axes;
+ struct fwnode_handle *child;
+ int num_axes, error, i;
+
+ num_axes = device_get_child_node_count(dev);
+ if (!num_axes) {
+ dev_err(dev, "Unable to find child nodes\n");
+ return -EINVAL;
+ }
+
+ if (num_axes != joy->num_chans) {
+ dev_err(dev, "Got %d child nodes for %d channels\n",
+ num_axes, joy->num_chans);
+ return -EINVAL;
+ }
+
+ axes = devm_kmalloc_array(dev, num_axes, sizeof(*axes), GFP_KERNEL);
+ if (!axes)
+ return -ENOMEM;
+
+ device_for_each_child_node(dev, child) {
+ error = fwnode_property_read_u32(child, "reg", &i);
+ if (error) {
+ dev_err(dev, "reg invalid or missing\n");
+ goto err_fwnode_put;
+ }
+
+ if (i >= num_axes) {
+ error = -EINVAL;
+ dev_err(dev, "No matching axis for reg %d\n", i);
+ goto err_fwnode_put;
+ }
+
+ error = fwnode_property_read_u32(child, "linux,code",
+ &axes[i].code);
+ if (error) {
+ dev_err(dev, "linux,code invalid or missing\n");
+ goto err_fwnode_put;
+ }
+
+ error = fwnode_property_read_u32_array(child, "abs-range",
+ axes[i].range, 2);
+ if (error) {
+ dev_err(dev, "abs-range invalid or missing\n");
+ goto err_fwnode_put;
+ }
+
+ fwnode_property_read_u32(child, "abs-fuzz", &axes[i].fuzz);
+ fwnode_property_read_u32(child, "abs-flat", &axes[i].flat);
+
+ input_set_abs_params(joy->input, axes[i].code,
+ axes[i].range[0], axes[i].range[1],
+ axes[i].fuzz, axes[i].flat);
+ input_set_capability(joy->input, EV_ABS, axes[i].code);
+ }
+
+ joy->axes = axes;
+
+ return 0;
+
+err_fwnode_put:
+ fwnode_handle_put(child);
+ return error;
+}
+
+static int adc_joystick_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct adc_joystick *joy;
+ struct input_dev *input;
+ int error;
+ int bits;
+ int i;
+
+ joy = devm_kzalloc(dev, sizeof(*joy), GFP_KERNEL);
+ if (!joy)
+ return -ENOMEM;
+
+ joy->chans = devm_iio_channel_get_all(dev);
+ if (IS_ERR(joy->chans)) {
+ error = PTR_ERR(joy->chans);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "Unable to get IIO channels");
+ return error;
+ }
+
+ /* Count how many channels we got. NULL terminated. */
+ for (i = 0; joy->chans[i].indio_dev; i++) {
+ bits = joy->chans[i].channel->scan_type.storagebits;
+ if (!bits || bits > 16) {
+ dev_err(dev, "Unsupported channel storage size\n");
+ return -EINVAL;
+ }
+ if (bits != joy->chans[0].channel->scan_type.storagebits) {
+ dev_err(dev, "Channels must have equal storage size\n");
+ return -EINVAL;
+ }
+ }
+ joy->num_chans = i;
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "Unable to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ joy->input = input;
+ input->name = pdev->name;
+ input->id.bustype = BUS_HOST;
+ input->open = adc_joystick_open;
+ input->close = adc_joystick_close;
+
+ error = adc_joystick_set_axes(dev, joy);
+ if (error)
+ return error;
+
+ input_set_drvdata(input, joy);
+ error = input_register_device(input);
+ if (error) {
+ dev_err(dev, "Unable to register input device\n");
+ return error;
+ }
+
+ joy->buffer = iio_channel_get_all_cb(dev, adc_joystick_handle, joy);
+ if (IS_ERR(joy->buffer)) {
+ dev_err(dev, "Unable to allocate callback buffer\n");
+ return PTR_ERR(joy->buffer);
+ }
+
+ error = devm_add_action_or_reset(dev, adc_joystick_cleanup, joy->buffer);
+ if (error) {
+ dev_err(dev, "Unable to add action\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id adc_joystick_of_match[] = {
+ { .compatible = "adc-joystick", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adc_joystick_of_match);
+
+static struct platform_driver adc_joystick_driver = {
+ .driver = {
+ .name = "adc-joystick",
+ .of_match_table = adc_joystick_of_match,
+ },
+ .probe = adc_joystick_probe,
+};
+module_platform_driver(adc_joystick_driver);
+
+MODULE_DESCRIPTION("Input driver for joysticks connected over ADC");
+MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index fc1793ca2f17..15d17c717081 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -348,7 +348,7 @@ static int cros_ec_keyb_info(struct cros_ec_device *ec_dev,
params->event_type = event_type;
ret = cros_ec_cmd_xfer_status(ec_dev, msg);
- if (ret == -ENOTSUPP) {
+ if (ret == -ENOPROTOOPT) {
/* With older ECs we just return 0 for everything */
memset(result, 0, result_size);
ret = 0;
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 7c70492d9d6b..f831f01501d5 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -250,8 +250,8 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
}
keypad->irq = platform_get_irq(pdev, 0);
- if (!keypad->irq) {
- err = -ENXIO;
+ if (keypad->irq < 0) {
+ err = keypad->irq;
goto failed_free;
}
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 94c94d7f5155..d6c924032aaa 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -240,10 +240,8 @@ static int omap4_keypad_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(&pdev->dev, "no keyboard irq assigned\n");
- return -EINVAL;
- }
+ if (irq < 0)
+ return irq;
keypad_data = kzalloc(sizeof(struct omap4_keypad), GFP_KERNEL);
if (!keypad_data) {
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index af3a6824f1a4..77e0743a3cf8 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -50,7 +50,7 @@ struct twl4030_keypad {
bool autorepeat;
unsigned int n_rows;
unsigned int n_cols;
- unsigned int irq;
+ int irq;
struct device *dbg_dev;
struct input_dev *input;
@@ -376,10 +376,8 @@ static int twl4030_kp_probe(struct platform_device *pdev)
}
kp->irq = platform_get_irq(pdev, 0);
- if (!kp->irq) {
- dev_err(&pdev->dev, "no keyboard irq assigned\n");
- return -EINVAL;
- }
+ if (kp->irq < 0)
+ return kp->irq;
error = matrix_keypad_build_keymap(keymap_data, NULL,
TWL4030_MAX_ROWS,
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 08520b3a18b8..cae1a3fae83a 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio_keys.h>
#include <linux/gpio.h>
@@ -23,6 +24,7 @@ struct soc_button_info {
unsigned int event_code;
bool autorepeat;
bool wakeup;
+ bool active_low;
};
struct soc_device_data {
@@ -42,22 +44,65 @@ struct soc_button_data {
};
/*
+ * Some 2-in-1s which use the soc_button_array driver have this ugly issue in
+ * their DSDT where the _LID method modifies the irq-type settings of the GPIOs
+ * used for the power and home buttons. The intend of this AML code is to
+ * disable these buttons when the lid is closed.
+ * The AML does this by directly poking the GPIO controllers registers. This is
+ * problematic because when re-enabling the irq, which happens whenever _LID
+ * gets called with the lid open (e.g. on boot and on resume), it sets the
+ * irq-type to IRQ_TYPE_LEVEL_LOW. Where as the gpio-keys driver programs the
+ * type to, and expects it to be, IRQ_TYPE_EDGE_BOTH.
+ * To work around this we don't set gpio_keys_button.gpio on these 2-in-1s,
+ * instead we get the irq for the GPIO ourselves, configure it as
+ * IRQ_TYPE_LEVEL_LOW (to match how the _LID AML code configures it) and pass
+ * the irq in gpio_keys_button.irq. Below is a list of affected devices.
+ */
+static const struct dmi_system_id dmi_use_low_level_irq[] = {
+ {
+ /*
+ * Acer Switch 10 SW5-012. _LID method messes with home- and
+ * power-button GPIO IRQ settings. When (re-)enabling the irq
+ * it ors in its own flags without clearing the previous set
+ * ones, leading to an irq-type of IRQ_TYPE_LEVEL_LOW |
+ * IRQ_TYPE_LEVEL_HIGH causing a continuous interrupt storm.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ },
+ },
+ {
+ /*
+ * Acer One S1003. _LID method messes with power-button GPIO
+ * IRQ settings, leading to a non working power-button.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "One S1003"),
+ },
+ },
+ {} /* Terminating entry */
+};
+
+/*
* Get the Nth GPIO number from the ACPI object.
*/
-static int soc_button_lookup_gpio(struct device *dev, int acpi_index)
+static int soc_button_lookup_gpio(struct device *dev, int acpi_index,
+ int *gpio_ret, int *irq_ret)
{
struct gpio_desc *desc;
- int gpio;
desc = gpiod_get_index(dev, NULL, acpi_index, GPIOD_ASIS);
if (IS_ERR(desc))
return PTR_ERR(desc);
- gpio = desc_to_gpio(desc);
+ *gpio_ret = desc_to_gpio(desc);
+ *irq_ret = gpiod_to_irq(desc);
gpiod_put(desc);
- return gpio;
+ return 0;
}
static struct platform_device *
@@ -69,9 +114,8 @@ soc_button_device_create(struct platform_device *pdev,
struct platform_device *pd;
struct gpio_keys_button *gpio_keys;
struct gpio_keys_platform_data *gpio_keys_pdata;
+ int error, gpio, irq;
int n_buttons = 0;
- int gpio;
- int error;
for (info = button_info; info->name; info++)
if (info->autorepeat == autorepeat)
@@ -91,8 +135,8 @@ soc_button_device_create(struct platform_device *pdev,
if (info->autorepeat != autorepeat)
continue;
- gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
- if (!gpio_is_valid(gpio)) {
+ error = soc_button_lookup_gpio(&pdev->dev, info->acpi_index, &gpio, &irq);
+ if (error || irq < 0) {
/*
* Skip GPIO if not present. Note we deliberately
* ignore -EPROBE_DEFER errors here. On some devices
@@ -107,10 +151,18 @@ soc_button_device_create(struct platform_device *pdev,
continue;
}
+ /* See dmi_use_low_level_irq[] comment */
+ if (!autorepeat && dmi_check_system(dmi_use_low_level_irq)) {
+ irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+ gpio_keys[n_buttons].irq = irq;
+ gpio_keys[n_buttons].gpio = -ENOENT;
+ } else {
+ gpio_keys[n_buttons].gpio = gpio;
+ }
+
gpio_keys[n_buttons].type = info->event_type;
gpio_keys[n_buttons].code = info->event_code;
- gpio_keys[n_buttons].gpio = gpio;
- gpio_keys[n_buttons].active_low = 1;
+ gpio_keys[n_buttons].active_low = info->active_low;
gpio_keys[n_buttons].desc = info->name;
gpio_keys[n_buttons].wakeup = info->wakeup;
/* These devices often use cheap buttons, use 50 ms debounce */
@@ -173,6 +225,7 @@ static int soc_button_parse_btn_desc(struct device *dev,
}
info->event_type = EV_KEY;
+ info->active_low = true;
info->acpi_index =
soc_button_get_acpi_object_int(&desc->package.elements[1]);
upage = soc_button_get_acpi_object_int(&desc->package.elements[3]);
@@ -383,11 +436,11 @@ static int soc_button_probe(struct platform_device *pdev)
* Platforms"
*/
static const struct soc_button_info soc_button_PNP0C40[] = {
- { "power", 0, EV_KEY, KEY_POWER, false, true },
- { "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
- { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
- { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
- { "rotation_lock", 4, EV_KEY, KEY_ROTATE_LOCK_TOGGLE, false, false },
+ { "power", 0, EV_KEY, KEY_POWER, false, true, true },
+ { "home", 1, EV_KEY, KEY_LEFTMETA, false, true, true },
+ { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false, true },
+ { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false, true },
+ { "rotation_lock", 4, EV_KEY, KEY_ROTATE_LOCK_TOGGLE, false, false, true },
{ }
};
@@ -395,6 +448,15 @@ static const struct soc_device_data soc_device_PNP0C40 = {
.button_info = soc_button_PNP0C40,
};
+static const struct soc_button_info soc_button_INT33D3[] = {
+ { "tablet_mode", 0, EV_SW, SW_TABLET_MODE, false, false, false },
+ { }
+};
+
+static const struct soc_device_data soc_device_INT33D3 = {
+ .button_info = soc_button_INT33D3,
+};
+
/*
* Special device check for Surface Book 2 and Surface Pro (2017).
* Both, the Surface Pro 4 (surfacepro3_button.c) and the above mentioned
@@ -444,9 +506,9 @@ static int soc_device_check_MSHW0040(struct device *dev)
* Obtained from DSDT/testing.
*/
static const struct soc_button_info soc_button_MSHW0040[] = {
- { "power", 0, EV_KEY, KEY_POWER, false, true },
- { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
- { "volume_down", 4, EV_KEY, KEY_VOLUMEDOWN, true, false },
+ { "power", 0, EV_KEY, KEY_POWER, false, true, true },
+ { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false, true },
+ { "volume_down", 4, EV_KEY, KEY_VOLUMEDOWN, true, false, true },
{ }
};
@@ -457,6 +519,8 @@ static const struct soc_device_data soc_device_MSHW0040 = {
static const struct acpi_device_id soc_button_acpi_match[] = {
{ "PNP0C40", (unsigned long)&soc_device_PNP0C40 },
+ { "INT33D3", (unsigned long)&soc_device_INT33D3 },
+ { "ID9001", (unsigned long)&soc_device_INT33D3 },
{ "ACPI0011", 0 },
/* Microsoft Surface Devices (5th and 6th generation) */
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 4b81b2d0fe06..82577095e175 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -179,12 +179,14 @@ static const char * const smbus_pnp_ids[] = {
"LEN0093", /* T480 */
"LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */
- "LEN0099", /* X1 Extreme 1st */
+ "LEN0099", /* X1 Extreme Gen 1 / P1 Gen 1 */
"LEN009b", /* T580 */
+ "LEN0402", /* X1 Extreme Gen 2 / P1 Gen 2 */
"LEN200f", /* T450s */
"LEN2044", /* L470 */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
+ "LEN2068", /* T14 Gen 1 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
@@ -1752,7 +1754,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
.kernel_tracking = false,
.topbuttonpad = topbuttonpad,
},
- .f30_data = {
+ .gpio_data = {
.buttonpad = SYN_CAP_CLICKPAD(info->ext_cap_0c),
.trackstick_buttons =
!!SYN_CAP_EXT_BUTTONS_STICK(info->ext_cap_10),
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index a212ff706f74..16119f760d11 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -100,6 +100,14 @@ config RMI4_F34
device via the firmware loader interface. This is triggered using a
sysfs attribute.
+config RMI4_F3A
+ bool "RMI4 Function 3A (GPIO)"
+ help
+ Say Y here if you want to add support for RMI4 function 3A.
+
+ Function 3A provides GPIO support for RMI4 devices. This includes
+ support for buttons on TouchPads and ClickPads.
+
config RMI4_F54
bool "RMI4 Function 54 (Analog diagnostics)"
depends on VIDEO_V4L2=y || (RMI4_CORE=m && VIDEO_V4L2=m)
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
index f17631656987..02f14c846861 100644
--- a/drivers/input/rmi4/Makefile
+++ b/drivers/input/rmi4/Makefile
@@ -10,6 +10,7 @@ rmi_core-$(CONFIG_RMI4_F11) += rmi_f11.o
rmi_core-$(CONFIG_RMI4_F12) += rmi_f12.o
rmi_core-$(CONFIG_RMI4_F30) += rmi_f30.o
rmi_core-$(CONFIG_RMI4_F34) += rmi_f34.o rmi_f34v7.o
+rmi_core-$(CONFIG_RMI4_F3A) += rmi_f3a.o
rmi_core-$(CONFIG_RMI4_F54) += rmi_f54.o
rmi_core-$(CONFIG_RMI4_F55) += rmi_f55.o
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index af706a583656..47d1b97ed6cf 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -365,6 +365,9 @@ static struct rmi_function_handler *fn_handlers[] = {
#ifdef CONFIG_RMI4_F34
&rmi_f34_handler,
#endif
+#ifdef CONFIG_RMI4_F3A
+ &rmi_f3a_handler,
+#endif
#ifdef CONFIG_RMI4_F54
&rmi_f54_handler,
#endif
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
index 65bfaa95e193..1c6c6086c0e5 100644
--- a/drivers/input/rmi4/rmi_driver.h
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -135,6 +135,7 @@ extern struct rmi_function_handler rmi_f11_handler;
extern struct rmi_function_handler rmi_f12_handler;
extern struct rmi_function_handler rmi_f30_handler;
extern struct rmi_function_handler rmi_f34_handler;
+extern struct rmi_function_handler rmi_f3a_handler;
extern struct rmi_function_handler rmi_f54_handler;
extern struct rmi_function_handler rmi_f55_handler;
#endif
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index a90dad1d9ac7..35045f161dc2 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -168,17 +168,17 @@ static int rmi_f30_config(struct rmi_function *fn)
rmi_get_platform_data(fn->rmi_dev);
int error;
- /* can happen if f30_data.disable is set */
+ /* can happen if gpio_data.disable is set */
if (!f30)
return 0;
- if (pdata->f30_data.trackstick_buttons) {
+ if (pdata->gpio_data.trackstick_buttons) {
/* Try [re-]establish link to F03. */
f30->f03 = rmi_find_function(fn->rmi_dev, 0x03);
f30->trackstick_buttons = f30->f03 != NULL;
}
- if (pdata->f30_data.disable) {
+ if (pdata->gpio_data.disable) {
drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
} else {
/* Write Control Register values back to device */
@@ -245,10 +245,10 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
if (!rmi_f30_is_valid_button(i, f30->ctrl))
continue;
- if (pdata->f30_data.trackstick_buttons &&
+ if (pdata->gpio_data.trackstick_buttons &&
i >= TRACKSTICK_RANGE_START && i < TRACKSTICK_RANGE_END) {
f30->gpioled_key_map[i] = trackstick_button++;
- } else if (!pdata->f30_data.buttonpad || !button_mapped) {
+ } else if (!pdata->gpio_data.buttonpad || !button_mapped) {
f30->gpioled_key_map[i] = button;
input_set_capability(input, EV_KEY, button++);
button_mapped = true;
@@ -264,7 +264,7 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
* but I am not sure, so use only the pdata info and the number of
* mapped buttons.
*/
- if (pdata->f30_data.buttonpad || (button - BTN_LEFT == 1))
+ if (pdata->gpio_data.buttonpad || (button - BTN_LEFT == 1))
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
return 0;
@@ -372,7 +372,7 @@ static int rmi_f30_probe(struct rmi_function *fn)
struct f30_data *f30;
int error;
- if (pdata->f30_data.disable)
+ if (pdata->gpio_data.disable)
return 0;
if (!drv_data->input) {
diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c
index 74f7c6f214ff..8d7ec9d89b18 100644
--- a/drivers/input/rmi4/rmi_f34v7.c
+++ b/drivers/input/rmi4/rmi_f34v7.c
@@ -1364,9 +1364,14 @@ int rmi_f34v7_probe(struct f34_data *f34)
f34->bl_version = 6;
} else if (f34->bootloader_id[1] == 7) {
f34->bl_version = 7;
+ } else if (f34->bootloader_id[1] == 8) {
+ f34->bl_version = 8;
} else {
- dev_err(&f34->fn->dev, "%s: Unrecognized bootloader version\n",
- __func__);
+ dev_err(&f34->fn->dev,
+ "%s: Unrecognized bootloader version: %d (%c) %d (%c)\n",
+ __func__,
+ f34->bootloader_id[0], f34->bootloader_id[0],
+ f34->bootloader_id[1], f34->bootloader_id[1]);
return -EINVAL;
}
diff --git a/drivers/input/rmi4/rmi_f3a.c b/drivers/input/rmi4/rmi_f3a.c
new file mode 100644
index 000000000000..0e8baed84dbb
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f3a.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020 Synaptics Incorporated
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include "rmi_driver.h"
+
+#define RMI_F3A_MAX_GPIO_COUNT 128
+#define RMI_F3A_MAX_REG_SIZE DIV_ROUND_UP(RMI_F3A_MAX_GPIO_COUNT, 8)
+
+/* Defs for Query 0 */
+#define RMI_F3A_GPIO_COUNT 0x7F
+
+#define RMI_F3A_DATA_REGS_MAX_SIZE RMI_F3A_MAX_REG_SIZE
+
+#define TRACKSTICK_RANGE_START 3
+#define TRACKSTICK_RANGE_END 6
+
+struct f3a_data {
+ /* Query Data */
+ u8 gpio_count;
+
+ u8 register_count;
+
+ u8 data_regs[RMI_F3A_DATA_REGS_MAX_SIZE];
+ u16 *gpio_key_map;
+
+ struct input_dev *input;
+
+ struct rmi_function *f03;
+ bool trackstick_buttons;
+};
+
+static void rmi_f3a_report_button(struct rmi_function *fn,
+ struct f3a_data *f3a, unsigned int button)
+{
+ u16 key_code = f3a->gpio_key_map[button];
+ bool key_down = !(f3a->data_regs[0] & BIT(button));
+
+ if (f3a->trackstick_buttons &&
+ button >= TRACKSTICK_RANGE_START &&
+ button <= TRACKSTICK_RANGE_END) {
+ rmi_f03_overwrite_button(f3a->f03, key_code, key_down);
+ } else {
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev,
+ "%s: call input report key (0x%04x) value (0x%02x)",
+ __func__, key_code, key_down);
+ input_report_key(f3a->input, key_code, key_down);
+ }
+}
+
+static irqreturn_t rmi_f3a_attention(int irq, void *ctx)
+{
+ struct rmi_function *fn = ctx;
+ struct f3a_data *f3a = dev_get_drvdata(&fn->dev);
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
+ int error;
+ int i;
+
+ if (drvdata->attn_data.data) {
+ if (drvdata->attn_data.size < f3a->register_count) {
+ dev_warn(&fn->dev,
+ "F3A interrupted, but data is missing\n");
+ return IRQ_HANDLED;
+ }
+ memcpy(f3a->data_regs, drvdata->attn_data.data,
+ f3a->register_count);
+ drvdata->attn_data.data += f3a->register_count;
+ drvdata->attn_data.size -= f3a->register_count;
+ } else {
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr,
+ f3a->data_regs, f3a->register_count);
+ if (error) {
+ dev_err(&fn->dev,
+ "%s: Failed to read F3a data registers: %d\n",
+ __func__, error);
+ return IRQ_RETVAL(error);
+ }
+ }
+
+ for (i = 0; i < f3a->gpio_count; i++)
+ if (f3a->gpio_key_map[i] != KEY_RESERVED)
+ rmi_f3a_report_button(fn, f3a, i);
+ if (f3a->trackstick_buttons)
+ rmi_f03_commit_buttons(f3a->f03);
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_f3a_config(struct rmi_function *fn)
+{
+ struct f3a_data *f3a = dev_get_drvdata(&fn->dev);
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+ const struct rmi_device_platform_data *pdata =
+ rmi_get_platform_data(fn->rmi_dev);
+
+ if (!f3a)
+ return 0;
+
+ if (pdata->gpio_data.trackstick_buttons) {
+ /* Try [re-]establish link to F03. */
+ f3a->f03 = rmi_find_function(fn->rmi_dev, 0x03);
+ f3a->trackstick_buttons = f3a->f03 != NULL;
+ }
+
+ drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+}
+
+static bool rmi_f3a_is_valid_button(int button, struct f3a_data *f3a,
+ u8 *query1_regs, u8 *ctrl1_regs)
+{
+ /* gpio exist && direction input */
+ return (query1_regs[0] & BIT(button)) && !(ctrl1_regs[0] & BIT(button));
+}
+
+static int rmi_f3a_map_gpios(struct rmi_function *fn, struct f3a_data *f3a,
+ u8 *query1_regs, u8 *ctrl1_regs)
+{
+ const struct rmi_device_platform_data *pdata =
+ rmi_get_platform_data(fn->rmi_dev);
+ struct input_dev *input = f3a->input;
+ unsigned int button = BTN_LEFT;
+ unsigned int trackstick_button = BTN_LEFT;
+ bool button_mapped = false;
+ int i;
+ int button_count = min_t(u8, f3a->gpio_count, TRACKSTICK_RANGE_END);
+
+ f3a->gpio_key_map = devm_kcalloc(&fn->dev,
+ button_count,
+ sizeof(f3a->gpio_key_map[0]),
+ GFP_KERNEL);
+ if (!f3a->gpio_key_map) {
+ dev_err(&fn->dev, "Failed to allocate gpio map memory.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < button_count; i++) {
+ if (!rmi_f3a_is_valid_button(i, f3a, query1_regs, ctrl1_regs))
+ continue;
+
+ if (pdata->gpio_data.trackstick_buttons &&
+ i >= TRACKSTICK_RANGE_START &&
+ i < TRACKSTICK_RANGE_END) {
+ f3a->gpio_key_map[i] = trackstick_button++;
+ } else if (!pdata->gpio_data.buttonpad || !button_mapped) {
+ f3a->gpio_key_map[i] = button;
+ input_set_capability(input, EV_KEY, button++);
+ button_mapped = true;
+ }
+ }
+ input->keycode = f3a->gpio_key_map;
+ input->keycodesize = sizeof(f3a->gpio_key_map[0]);
+ input->keycodemax = f3a->gpio_count;
+
+ if (pdata->gpio_data.buttonpad || (button - BTN_LEFT == 1))
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+ return 0;
+}
+
+static int rmi_f3a_initialize(struct rmi_function *fn, struct f3a_data *f3a)
+{
+ u8 query1[RMI_F3A_MAX_REG_SIZE];
+ u8 ctrl1[RMI_F3A_MAX_REG_SIZE];
+ u8 buf;
+ int error;
+
+ error = rmi_read(fn->rmi_dev, fn->fd.query_base_addr, &buf);
+ if (error < 0) {
+ dev_err(&fn->dev, "Failed to read general info register: %d\n",
+ error);
+ return -ENODEV;
+ }
+
+ f3a->gpio_count = buf & RMI_F3A_GPIO_COUNT;
+ f3a->register_count = DIV_ROUND_UP(f3a->gpio_count, 8);
+
+ /* Query1 -> gpio exist */
+ error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr + 1,
+ query1, f3a->register_count);
+ if (error) {
+ dev_err(&fn->dev, "Failed to read query1 register\n");
+ return error;
+ }
+
+ /* Ctrl1 -> gpio direction */
+ error = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr + 1,
+ ctrl1, f3a->register_count);
+ if (error) {
+ dev_err(&fn->dev, "Failed to read control1 register\n");
+ return error;
+ }
+
+ error = rmi_f3a_map_gpios(fn, f3a, query1, ctrl1);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int rmi_f3a_probe(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ struct f3a_data *f3a;
+ int error;
+
+ if (!drv_data->input) {
+ dev_info(&fn->dev, "F3A: no input device found, ignoring\n");
+ return -ENXIO;
+ }
+
+ f3a = devm_kzalloc(&fn->dev, sizeof(*f3a), GFP_KERNEL);
+ if (!f3a)
+ return -ENOMEM;
+
+ f3a->input = drv_data->input;
+
+ error = rmi_f3a_initialize(fn, f3a);
+ if (error)
+ return error;
+
+ dev_set_drvdata(&fn->dev, f3a);
+ return 0;
+}
+
+struct rmi_function_handler rmi_f3a_handler = {
+ .driver = {
+ .name = "rmi4_f3a",
+ },
+ .func = 0x3a,
+ .probe = rmi_f3a_probe,
+ .config = rmi_f3a_config,
+ .attention = rmi_f3a_attention,
+};
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index 65f4e9d62a67..d36e89d6fc54 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(hil_mlc_unregister);
static LIST_HEAD(hil_mlcs);
static DEFINE_RWLOCK(hil_mlcs_lock);
static struct timer_list hil_mlcs_kicker;
-static int hil_mlcs_probe;
+static int hil_mlcs_probe, hil_mlc_stop;
static void hil_mlcs_process(unsigned long unused);
static DECLARE_TASKLET_DISABLED_OLD(hil_mlcs_tasklet, hil_mlcs_process);
@@ -702,9 +702,13 @@ static int hilse_donode(hil_mlc *mlc)
if (!mlc->ostarted) {
mlc->ostarted = 1;
mlc->opacket = pack;
- mlc->out(mlc);
+ rc = mlc->out(mlc);
nextidx = HILSEN_DOZE;
write_unlock_irqrestore(&mlc->lock, flags);
+ if (rc) {
+ hil_mlc_stop = 1;
+ return 1;
+ }
break;
}
mlc->ostarted = 0;
@@ -715,8 +719,13 @@ static int hilse_donode(hil_mlc *mlc)
case HILSE_CTS:
write_lock_irqsave(&mlc->lock, flags);
- nextidx = mlc->cts(mlc) ? node->bad : node->good;
+ rc = mlc->cts(mlc);
+ nextidx = rc ? node->bad : node->good;
write_unlock_irqrestore(&mlc->lock, flags);
+ if (rc) {
+ hil_mlc_stop = 1;
+ return 1;
+ }
break;
default:
@@ -780,6 +789,12 @@ static void hil_mlcs_process(unsigned long unused)
static void hil_mlcs_timer(struct timer_list *unused)
{
+ if (hil_mlc_stop) {
+ /* could not send packet - stop immediately. */
+ pr_warn(PREFIX "HIL seems stuck - Disabling HIL MLC.\n");
+ return;
+ }
+
hil_mlcs_probe = 1;
tasklet_schedule(&hil_mlcs_tasklet);
/* Re-insert the periodic task. */
diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c
index 232d30c825bd..3e85e9039374 100644
--- a/drivers/input/serio/hp_sdc_mlc.c
+++ b/drivers/input/serio/hp_sdc_mlc.c
@@ -210,7 +210,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc)
priv->tseq[2] = 1;
priv->tseq[3] = 0;
priv->tseq[4] = 0;
- __hp_sdc_enqueue_transaction(&priv->trans);
+ return __hp_sdc_enqueue_transaction(&priv->trans);
busy:
return 1;
done:
@@ -219,7 +219,7 @@ static int hp_sdc_mlc_cts(hil_mlc *mlc)
return 0;
}
-static void hp_sdc_mlc_out(hil_mlc *mlc)
+static int hp_sdc_mlc_out(hil_mlc *mlc)
{
struct hp_sdc_mlc_priv_s *priv;
@@ -234,7 +234,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc)
do_data:
if (priv->emtestmode) {
up(&mlc->osem);
- return;
+ return 0;
}
/* Shouldn't be sending commands when loop may be busy */
BUG_ON(down_trylock(&mlc->csem));
@@ -296,7 +296,7 @@ static void hp_sdc_mlc_out(hil_mlc *mlc)
BUG_ON(down_trylock(&mlc->csem));
}
enqueue:
- hp_sdc_enqueue_transaction(&priv->trans);
+ return hp_sdc_enqueue_transaction(&priv->trans);
}
static int __init hp_sdc_mlc_init(void)
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index df4e9f6f4529..1a7b72a9016d 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -75,8 +75,8 @@ struct synth_kbd_keystroke {
#define HK_MAXIMUM_MESSAGE_SIZE 256
-#define KBD_VSC_SEND_RING_BUFFER_SIZE (40 * 1024)
-#define KBD_VSC_RECV_RING_BUFFER_SIZE (40 * 1024)
+#define KBD_VSC_SEND_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
+#define KBD_VSC_RECV_RING_BUFFER_SIZE VMBUS_RING_SIZE(36 * 1024)
#define XTKBD_EMUL0 0xe0
#define XTKBD_EMUL1 0xe1
diff --git a/drivers/input/serio/sun4i-ps2.c b/drivers/input/serio/sun4i-ps2.c
index a681a2c04e39..f15ed3dcdb9b 100644
--- a/drivers/input/serio/sun4i-ps2.c
+++ b/drivers/input/serio/sun4i-ps2.c
@@ -211,7 +211,6 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
struct sun4i_ps2data *drvdata;
struct serio *serio;
struct device *dev = &pdev->dev;
- unsigned int irq;
int error;
drvdata = kzalloc(sizeof(struct sun4i_ps2data), GFP_KERNEL);
@@ -264,14 +263,12 @@ static int sun4i_ps2_probe(struct platform_device *pdev)
writel(0, drvdata->reg_base + PS2_REG_GCTL);
/* Get IRQ for the device */
- irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(dev, "no IRQ found\n");
- error = -ENXIO;
+ drvdata->irq = platform_get_irq(pdev, 0);
+ if (drvdata->irq < 0) {
+ error = drvdata->irq;
goto err_disable_clk;
}
- drvdata->irq = irq;
drvdata->serio = serio;
drvdata->dev = dev;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 35c867b2d9a7..f012fe746df0 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1322,4 +1322,16 @@ config TOUCHSCREEN_IQS5XX
To compile this driver as a module, choose M here: the
module will be called iqs5xx.
+config TOUCHSCREEN_ZINITIX
+ tristate "Zinitix touchscreen support"
+ depends on I2C
+ help
+ Say Y here if you have a touchscreen using Zinitix bt541,
+ or something similar enough.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called zinitix.
+
endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 30d1e1b42492..6233541e9173 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -111,3 +111,4 @@ obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o
obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023) += rohm_bu21023.o
obj-$(CONFIG_TOUCHSCREEN_RASPBERRYPI_FW) += raspberrypi-ts.o
obj-$(CONFIG_TOUCHSCREEN_IQS5XX) += iqs5xx.o
+obj-$(CONFIG_TOUCHSCREEN_ZINITIX) += zinitix.o
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index b0bd5bb079be..50c348297e38 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -90,7 +90,7 @@
/* FW read command, 0x53 0x?? 0x0, 0x01 */
#define E_ELAN_INFO_FW_VER 0x00
#define E_ELAN_INFO_BC_VER 0x10
-#define E_ELAN_INFO_REK 0xE0
+#define E_ELAN_INFO_REK 0xD0
#define E_ELAN_INFO_TEST_VER 0xE0
#define E_ELAN_INFO_FW_ID 0xF0
#define E_INFO_OSR 0xD6
@@ -134,6 +134,7 @@ struct elants_data {
u8 bc_version;
u8 iap_version;
u16 hw_version;
+ u8 major_res;
unsigned int x_res; /* resolution in units/mm */
unsigned int y_res;
unsigned int x_max;
@@ -459,6 +460,9 @@ static int elants_i2c_query_ts_info(struct elants_data *ts)
rows = resp[2] + resp[6] + resp[10];
cols = resp[3] + resp[7] + resp[11];
+ /* Get report resolution value of ABS_MT_TOUCH_MAJOR */
+ ts->major_res = resp[16];
+
/* Process mm_to_pixel information */
error = elants_i2c_execute_command(client,
get_osr_cmd, sizeof(get_osr_cmd),
@@ -1325,6 +1329,8 @@ static int elants_i2c_probe(struct i2c_client *client,
0, MT_TOOL_PALM, 0, 0);
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
+ if (ts->major_res > 0)
+ input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
touchscreen_parse_properties(ts->input, true, &ts->prop);
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index 9ed258854349..cd369f9ac5e6 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -315,9 +315,8 @@ static irqreturn_t adc_irq_fn(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int imx6ul_tsc_open(struct input_dev *input_dev)
+static int imx6ul_tsc_start(struct imx6ul_tsc *tsc)
{
- struct imx6ul_tsc *tsc = input_get_drvdata(input_dev);
int err;
err = clk_prepare_enable(tsc->adc_clk);
@@ -349,16 +348,29 @@ disable_adc_clk:
return err;
}
-static void imx6ul_tsc_close(struct input_dev *input_dev)
+static void imx6ul_tsc_stop(struct imx6ul_tsc *tsc)
{
- struct imx6ul_tsc *tsc = input_get_drvdata(input_dev);
-
imx6ul_tsc_disable(tsc);
clk_disable_unprepare(tsc->tsc_clk);
clk_disable_unprepare(tsc->adc_clk);
}
+
+static int imx6ul_tsc_open(struct input_dev *input_dev)
+{
+ struct imx6ul_tsc *tsc = input_get_drvdata(input_dev);
+
+ return imx6ul_tsc_start(tsc);
+}
+
+static void imx6ul_tsc_close(struct input_dev *input_dev)
+{
+ struct imx6ul_tsc *tsc = input_get_drvdata(input_dev);
+
+ imx6ul_tsc_stop(tsc);
+}
+
static int imx6ul_tsc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -509,12 +521,8 @@ static int __maybe_unused imx6ul_tsc_suspend(struct device *dev)
mutex_lock(&input_dev->mutex);
- if (input_dev->users) {
- imx6ul_tsc_disable(tsc);
-
- clk_disable_unprepare(tsc->tsc_clk);
- clk_disable_unprepare(tsc->adc_clk);
- }
+ if (input_dev->users)
+ imx6ul_tsc_stop(tsc);
mutex_unlock(&input_dev->mutex);
@@ -530,22 +538,11 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
mutex_lock(&input_dev->mutex);
- if (input_dev->users) {
- retval = clk_prepare_enable(tsc->adc_clk);
- if (retval)
- goto out;
-
- retval = clk_prepare_enable(tsc->tsc_clk);
- if (retval) {
- clk_disable_unprepare(tsc->adc_clk);
- goto out;
- }
-
- retval = imx6ul_tsc_init(tsc);
- }
+ if (input_dev->users)
+ retval = imx6ul_tsc_start(tsc);
-out:
mutex_unlock(&input_dev->mutex);
+
return retval;
}
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index fe245439adee..e694a9b2b1e5 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -51,6 +51,7 @@
/* Touch relative info */
#define RM_MAX_RETRIES 3
+#define RM_RETRY_DELAY_MS 20
#define RM_MAX_TOUCH_NUM 10
#define RM_BOOT_DELAY_MS 100
@@ -136,83 +137,82 @@ struct raydium_data {
bool wake_irq_enabled;
};
-static int raydium_i2c_send(struct i2c_client *client,
- u8 addr, const void *data, size_t len)
+static int raydium_i2c_xfer(struct i2c_client *client,
+ u32 addr, void *data, size_t len, bool is_read)
{
- u8 *buf;
- int tries = 0;
- int ret;
-
- buf = kmalloc(len + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- buf[0] = addr;
- memcpy(buf + 1, data, len);
-
- do {
- ret = i2c_master_send(client, buf, len + 1);
- if (likely(ret == len + 1))
- break;
-
- msleep(20);
- } while (++tries < RM_MAX_RETRIES);
-
- kfree(buf);
-
- if (unlikely(ret != len + 1)) {
- if (ret >= 0)
- ret = -EIO;
- dev_err(&client->dev, "%s failed: %d\n", __func__, ret);
- return ret;
- }
+ struct raydium_bank_switch_header {
+ u8 cmd;
+ __be32 be_addr;
+ } __packed header = {
+ .cmd = RM_CMD_BANK_SWITCH,
+ .be_addr = cpu_to_be32(addr),
+ };
- return 0;
-}
+ u8 reg_addr = addr & 0xff;
-static int raydium_i2c_read(struct i2c_client *client,
- u8 addr, void *data, size_t len)
-{
struct i2c_msg xfer[] = {
{
.addr = client->addr,
+ .len = sizeof(header),
+ .buf = (u8 *)&header,
+ },
+ {
+ .addr = client->addr,
.len = 1,
- .buf = &addr,
+ .buf = &reg_addr,
},
{
.addr = client->addr,
- .flags = I2C_M_RD,
.len = len,
.buf = data,
+ .flags = is_read ? I2C_M_RD : 0,
}
};
+
+ /*
+ * If address is greater than 255, then RM_CMD_BANK_SWITCH needs to be
+ * sent first. Else, skip the header i.e. xfer[0].
+ */
+ int xfer_start_idx = (addr > 0xff) ? 0 : 1;
+ size_t xfer_count = ARRAY_SIZE(xfer) - xfer_start_idx;
int ret;
- ret = i2c_transfer(client->adapter, xfer, ARRAY_SIZE(xfer));
- if (unlikely(ret != ARRAY_SIZE(xfer)))
- return ret < 0 ? ret : -EIO;
+ ret = i2c_transfer(client->adapter, &xfer[xfer_start_idx], xfer_count);
+ if (likely(ret == xfer_count))
+ return 0;
+
+ return ret < 0 ? ret : -EIO;
+}
- return 0;
+static int raydium_i2c_send(struct i2c_client *client,
+ u32 addr, const void *data, size_t len)
+{
+ int tries = 0;
+ int error;
+
+ do {
+ error = raydium_i2c_xfer(client, addr, (void *)data, len,
+ false);
+ if (likely(!error))
+ return 0;
+
+ msleep(RM_RETRY_DELAY_MS);
+ } while (++tries < RM_MAX_RETRIES);
+
+ dev_err(&client->dev, "%s failed: %d\n", __func__, error);
+ return error;
}
-static int raydium_i2c_read_message(struct i2c_client *client,
- u32 addr, void *data, size_t len)
+static int raydium_i2c_read(struct i2c_client *client,
+ u32 addr, void *data, size_t len)
{
- __be32 be_addr;
size_t xfer_len;
int error;
while (len) {
xfer_len = min_t(size_t, len, RM_MAX_READ_SIZE);
-
- be_addr = cpu_to_be32(addr);
-
- error = raydium_i2c_send(client, RM_CMD_BANK_SWITCH,
- &be_addr, sizeof(be_addr));
- if (!error)
- error = raydium_i2c_read(client, addr & 0xff,
- data, xfer_len);
- if (error)
+ error = raydium_i2c_xfer(client, addr, data, xfer_len, true);
+ if (unlikely(error))
return error;
len -= xfer_len;
@@ -223,27 +223,13 @@ static int raydium_i2c_read_message(struct i2c_client *client,
return 0;
}
-static int raydium_i2c_send_message(struct i2c_client *client,
- u32 addr, const void *data, size_t len)
-{
- __be32 be_addr = cpu_to_be32(addr);
- int error;
-
- error = raydium_i2c_send(client, RM_CMD_BANK_SWITCH,
- &be_addr, sizeof(be_addr));
- if (!error)
- error = raydium_i2c_send(client, addr & 0xff, data, len);
-
- return error;
-}
-
static int raydium_i2c_sw_reset(struct i2c_client *client)
{
const u8 soft_rst_cmd = 0x01;
int error;
- error = raydium_i2c_send_message(client, RM_RESET_MSG_ADDR,
- &soft_rst_cmd, sizeof(soft_rst_cmd));
+ error = raydium_i2c_send(client, RM_RESET_MSG_ADDR, &soft_rst_cmd,
+ sizeof(soft_rst_cmd));
if (error) {
dev_err(&client->dev, "software reset failed: %d\n", error);
return error;
@@ -295,9 +281,8 @@ static int raydium_i2c_query_ts_info(struct raydium_data *ts)
if (error)
continue;
- error = raydium_i2c_read_message(client,
- le32_to_cpu(query_bank_addr),
- &ts->info, sizeof(ts->info));
+ error = raydium_i2c_read(client, le32_to_cpu(query_bank_addr),
+ &ts->info, sizeof(ts->info));
if (error)
continue;
@@ -834,8 +819,8 @@ static irqreturn_t raydium_i2c_irq(int irq, void *_dev)
if (ts->boot_mode != RAYDIUM_TS_MAIN)
goto out;
- error = raydium_i2c_read_message(ts->client, ts->data_bank_addr,
- ts->report_data, ts->pkg_size);
+ error = raydium_i2c_read(ts->client, ts->data_bank_addr,
+ ts->report_data, ts->pkg_size);
if (error)
goto out;
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 82920ff46f72..2e70c0b79444 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -20,10 +20,43 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <plat/adc.h>
-#include <plat/regs-adc.h>
+#include <linux/soc/samsung/s3c-adc.h>
#include <linux/platform_data/touchscreen-s3c2410.h>
+#define S3C2410_ADCCON (0x00)
+#define S3C2410_ADCTSC (0x04)
+#define S3C2410_ADCDLY (0x08)
+#define S3C2410_ADCDAT0 (0x0C)
+#define S3C2410_ADCDAT1 (0x10)
+#define S3C64XX_ADCUPDN (0x14)
+#define S3C2443_ADCMUX (0x18)
+#define S3C64XX_ADCCLRINT (0x18)
+#define S5P_ADCMUX (0x1C)
+#define S3C64XX_ADCCLRINTPNDNUP (0x20)
+
+/* ADCTSC Register Bits */
+#define S3C2443_ADCTSC_UD_SEN (1 << 8)
+#define S3C2410_ADCTSC_YM_SEN (1<<7)
+#define S3C2410_ADCTSC_YP_SEN (1<<6)
+#define S3C2410_ADCTSC_XM_SEN (1<<5)
+#define S3C2410_ADCTSC_XP_SEN (1<<4)
+#define S3C2410_ADCTSC_PULL_UP_DISABLE (1<<3)
+#define S3C2410_ADCTSC_AUTO_PST (1<<2)
+#define S3C2410_ADCTSC_XY_PST(x) (((x)&0x3)<<0)
+
+/* ADCDAT0 Bits */
+#define S3C2410_ADCDAT0_UPDOWN (1<<15)
+#define S3C2410_ADCDAT0_AUTO_PST (1<<14)
+#define S3C2410_ADCDAT0_XY_PST (0x3<<12)
+#define S3C2410_ADCDAT0_XPDATA_MASK (0x03FF)
+
+/* ADCDAT1 Bits */
+#define S3C2410_ADCDAT1_UPDOWN (1<<15)
+#define S3C2410_ADCDAT1_AUTO_PST (1<<14)
+#define S3C2410_ADCDAT1_XY_PST (0x3<<12)
+#define S3C2410_ADCDAT1_YPDATA_MASK (0x03FF)
+
+
#define TSC_SLEEP (S3C2410_ADCTSC_PULL_UP_DISABLE | S3C2410_ADCTSC_XY_PST(0))
#define INT_DOWN (0)
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index df946869d4cd..9a64e1dbc04a 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -479,7 +479,7 @@ static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev,
mutex_lock(&sdata->mutex);
- if (value & sdata->hover_enabled)
+ if (value && sdata->hover_enabled)
goto out;
if (sdata->running)
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
new file mode 100644
index 000000000000..1acc2eb2bcb3
--- /dev/null
+++ b/drivers/input/touchscreen/zinitix.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+/* Register Map */
+
+#define BT541_SWRESET_CMD 0x0000
+#define BT541_WAKEUP_CMD 0x0001
+
+#define BT541_IDLE_CMD 0x0004
+#define BT541_SLEEP_CMD 0x0005
+
+#define BT541_CLEAR_INT_STATUS_CMD 0x0003
+#define BT541_CALIBRATE_CMD 0x0006
+#define BT541_SAVE_STATUS_CMD 0x0007
+#define BT541_SAVE_CALIBRATION_CMD 0x0008
+#define BT541_RECALL_FACTORY_CMD 0x000f
+
+#define BT541_THRESHOLD 0x0020
+
+#define BT541_LARGE_PALM_REJECT_AREA_TH 0x003F
+
+#define BT541_DEBUG_REG 0x0115 /* 0~7 */
+
+#define BT541_TOUCH_MODE 0x0010
+#define BT541_CHIP_REVISION 0x0011
+#define BT541_FIRMWARE_VERSION 0x0012
+
+#define ZINITIX_USB_DETECT 0x116
+
+#define BT541_MINOR_FW_VERSION 0x0121
+
+#define BT541_VENDOR_ID 0x001C
+#define BT541_HW_ID 0x0014
+
+#define BT541_DATA_VERSION_REG 0x0013
+#define BT541_SUPPORTED_FINGER_NUM 0x0015
+#define BT541_EEPROM_INFO 0x0018
+#define BT541_INITIAL_TOUCH_MODE 0x0019
+
+#define BT541_TOTAL_NUMBER_OF_X 0x0060
+#define BT541_TOTAL_NUMBER_OF_Y 0x0061
+
+#define BT541_DELAY_RAW_FOR_HOST 0x007f
+
+#define BT541_BUTTON_SUPPORTED_NUM 0x00B0
+#define BT541_BUTTON_SENSITIVITY 0x00B2
+#define BT541_DUMMY_BUTTON_SENSITIVITY 0X00C8
+
+#define BT541_X_RESOLUTION 0x00C0
+#define BT541_Y_RESOLUTION 0x00C1
+
+#define BT541_POINT_STATUS_REG 0x0080
+#define BT541_ICON_STATUS_REG 0x00AA
+
+#define BT541_POINT_COORD_REG (BT541_POINT_STATUS_REG + 2)
+
+#define BT541_AFE_FREQUENCY 0x0100
+#define BT541_DND_N_COUNT 0x0122
+#define BT541_DND_U_COUNT 0x0135
+
+#define BT541_RAWDATA_REG 0x0200
+
+#define BT541_EEPROM_INFO_REG 0x0018
+
+#define BT541_INT_ENABLE_FLAG 0x00f0
+#define BT541_PERIODICAL_INTERRUPT_INTERVAL 0x00f1
+
+#define BT541_BTN_WIDTH 0x016d
+
+#define BT541_CHECKSUM_RESULT 0x012c
+
+#define BT541_INIT_FLASH 0x01d0
+#define BT541_WRITE_FLASH 0x01d1
+#define BT541_READ_FLASH 0x01d2
+
+#define ZINITIX_INTERNAL_FLAG_02 0x011e
+#define ZINITIX_INTERNAL_FLAG_03 0x011f
+
+#define ZINITIX_I2C_CHECKSUM_WCNT 0x016a
+#define ZINITIX_I2C_CHECKSUM_RESULT 0x016c
+
+/* Interrupt & status register flags */
+
+#define BIT_PT_CNT_CHANGE BIT(0)
+#define BIT_DOWN BIT(1)
+#define BIT_MOVE BIT(2)
+#define BIT_UP BIT(3)
+#define BIT_PALM BIT(4)
+#define BIT_PALM_REJECT BIT(5)
+#define BIT_RESERVED_0 BIT(6)
+#define BIT_RESERVED_1 BIT(7)
+#define BIT_WEIGHT_CHANGE BIT(8)
+#define BIT_PT_NO_CHANGE BIT(9)
+#define BIT_REJECT BIT(10)
+#define BIT_PT_EXIST BIT(11)
+#define BIT_RESERVED_2 BIT(12)
+#define BIT_ERROR BIT(13)
+#define BIT_DEBUG BIT(14)
+#define BIT_ICON_EVENT BIT(15)
+
+#define SUB_BIT_EXIST BIT(0)
+#define SUB_BIT_DOWN BIT(1)
+#define SUB_BIT_MOVE BIT(2)
+#define SUB_BIT_UP BIT(3)
+#define SUB_BIT_UPDATE BIT(4)
+#define SUB_BIT_WAIT BIT(5)
+
+#define DEFAULT_TOUCH_POINT_MODE 2
+#define MAX_SUPPORTED_FINGER_NUM 5
+
+#define CHIP_ON_DELAY 15 // ms
+#define FIRMWARE_ON_DELAY 40 // ms
+
+struct point_coord {
+ __le16 x;
+ __le16 y;
+ u8 width;
+ u8 sub_status;
+ // currently unused, but needed as padding:
+ u8 minor_width;
+ u8 angle;
+};
+
+struct touch_event {
+ __le16 status;
+ u8 finger_cnt;
+ u8 time_stamp;
+ struct point_coord point_coord[MAX_SUPPORTED_FINGER_NUM];
+};
+
+struct bt541_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ struct touchscreen_properties prop;
+ struct regulator_bulk_data supplies[2];
+ u32 zinitix_mode;
+};
+
+static int zinitix_read_data(struct i2c_client *client,
+ u16 reg, void *values, size_t length)
+{
+ __le16 reg_le = cpu_to_le16(reg);
+ int ret;
+
+ /* A single i2c_transfer() transaction does not work here. */
+ ret = i2c_master_send(client, (u8 *)&reg_le, sizeof(reg_le));
+ if (ret != sizeof(reg_le))
+ return ret < 0 ? ret : -EIO;
+
+ ret = i2c_master_recv(client, (u8 *)values, length);
+ if (ret != length)
+ return ret < 0 ? ret : -EIO; ;
+
+ return 0;
+}
+
+static int zinitix_write_u16(struct i2c_client *client, u16 reg, u16 value)
+{
+ __le16 packet[2] = {cpu_to_le16(reg), cpu_to_le16(value)};
+ int ret;
+
+ ret = i2c_master_send(client, (u8 *)packet, sizeof(packet));
+ if (ret != sizeof(packet))
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int zinitix_write_cmd(struct i2c_client *client, u16 reg)
+{
+ __le16 reg_le = cpu_to_le16(reg);
+ int ret;
+
+ ret = i2c_master_send(client, (u8 *)&reg_le, sizeof(reg_le));
+ if (ret != sizeof(reg_le))
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static bool zinitix_init_touch(struct bt541_ts_data *bt541)
+{
+ struct i2c_client *client = bt541->client;
+ int i;
+ int error;
+
+ error = zinitix_write_cmd(client, BT541_SWRESET_CMD);
+ if (error) {
+ dev_err(&client->dev, "Failed to write reset command\n");
+ return error;
+ }
+
+ error = zinitix_write_u16(client, BT541_INT_ENABLE_FLAG, 0x0);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to reset interrupt enable flag\n");
+ return error;
+ }
+
+ /* initialize */
+ error = zinitix_write_u16(client, BT541_X_RESOLUTION,
+ bt541->prop.max_x);
+ if (error)
+ return error;
+
+ error = zinitix_write_u16(client, BT541_Y_RESOLUTION,
+ bt541->prop.max_y);
+ if (error)
+ return error;
+
+ error = zinitix_write_u16(client, BT541_SUPPORTED_FINGER_NUM,
+ MAX_SUPPORTED_FINGER_NUM);
+ if (error)
+ return error;
+
+ error = zinitix_write_u16(client, BT541_INITIAL_TOUCH_MODE,
+ bt541->zinitix_mode);
+ if (error)
+ return error;
+
+ error = zinitix_write_u16(client, BT541_TOUCH_MODE,
+ bt541->zinitix_mode);
+ if (error)
+ return error;
+
+ error = zinitix_write_u16(client, BT541_INT_ENABLE_FLAG,
+ BIT_PT_CNT_CHANGE | BIT_DOWN | BIT_MOVE |
+ BIT_UP);
+ if (error)
+ return error;
+
+ /* clear queue */
+ for (i = 0; i < 10; i++) {
+ zinitix_write_cmd(client, BT541_CLEAR_INT_STATUS_CMD);
+ udelay(10);
+ }
+
+ return 0;
+}
+
+static int zinitix_init_regulators(struct bt541_ts_data *bt541)
+{
+ struct i2c_client *client = bt541->client;
+ int error;
+
+ bt541->supplies[0].supply = "vdd";
+ bt541->supplies[1].supply = "vddo";
+ error = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(bt541->supplies),
+ bt541->supplies);
+ if (error < 0) {
+ dev_err(&client->dev, "Failed to get regulators: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int zinitix_send_power_on_sequence(struct bt541_ts_data *bt541)
+{
+ int error;
+ struct i2c_client *client = bt541->client;
+
+ error = zinitix_write_u16(client, 0xc000, 0x0001);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to send power sequence(vendor cmd enable)\n");
+ return error;
+ }
+ udelay(10);
+
+ error = zinitix_write_cmd(client, 0xc004);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to send power sequence (intn clear)\n");
+ return error;
+ }
+ udelay(10);
+
+ error = zinitix_write_u16(client, 0xc002, 0x0001);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to send power sequence (nvm init)\n");
+ return error;
+ }
+ mdelay(2);
+
+ error = zinitix_write_u16(client, 0xc001, 0x0001);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to send power sequence (program start)\n");
+ return error;
+ }
+ msleep(FIRMWARE_ON_DELAY);
+
+ return 0;
+}
+
+static void zinitix_report_finger(struct bt541_ts_data *bt541, int slot,
+ const struct point_coord *p)
+{
+ input_mt_slot(bt541->input_dev, slot);
+ input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, true);
+ touchscreen_report_pos(bt541->input_dev, &bt541->prop,
+ le16_to_cpu(p->x), le16_to_cpu(p->y), true);
+ input_report_abs(bt541->input_dev, ABS_MT_TOUCH_MAJOR, p->width);
+}
+
+static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler)
+{
+ struct bt541_ts_data *bt541 = bt541_handler;
+ struct i2c_client *client = bt541->client;
+ struct touch_event touch_event;
+ int error;
+ int i;
+
+ memset(&touch_event, 0, sizeof(struct touch_event));
+
+ error = zinitix_read_data(bt541->client, BT541_POINT_STATUS_REG,
+ &touch_event, sizeof(struct touch_event));
+ if (error) {
+ dev_err(&client->dev, "Failed to read in touchpoint struct\n");
+ goto out;
+ }
+
+ for (i = 0; i < MAX_SUPPORTED_FINGER_NUM; i++)
+ if (touch_event.point_coord[i].sub_status & SUB_BIT_EXIST)
+ zinitix_report_finger(bt541, i,
+ &touch_event.point_coord[i]);
+
+ input_mt_sync_frame(bt541->input_dev);
+ input_sync(bt541->input_dev);
+
+out:
+ zinitix_write_cmd(bt541->client, BT541_CLEAR_INT_STATUS_CMD);
+ return IRQ_HANDLED;
+}
+
+static int zinitix_start(struct bt541_ts_data *bt541)
+{
+ int error;
+
+ error = regulator_bulk_enable(ARRAY_SIZE(bt541->supplies),
+ bt541->supplies);
+ if (error) {
+ dev_err(&bt541->client->dev,
+ "Failed to enable regulators: %d\n", error);
+ return error;
+ }
+
+ msleep(CHIP_ON_DELAY);
+
+ error = zinitix_send_power_on_sequence(bt541);
+ if (error) {
+ dev_err(&bt541->client->dev,
+ "Error while sending power-on sequence: %d\n", error);
+ return error;
+ }
+
+ error = zinitix_init_touch(bt541);
+ if (error) {
+ dev_err(&bt541->client->dev,
+ "Error while configuring touch IC\n");
+ return error;
+ }
+
+ enable_irq(bt541->client->irq);
+
+ return 0;
+}
+
+static int zinitix_stop(struct bt541_ts_data *bt541)
+{
+ int error;
+
+ disable_irq(bt541->client->irq);
+
+ error = regulator_bulk_disable(ARRAY_SIZE(bt541->supplies),
+ bt541->supplies);
+ if (error) {
+ dev_err(&bt541->client->dev,
+ "Failed to disable regulators: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int zinitix_input_open(struct input_dev *dev)
+{
+ struct bt541_ts_data *bt541 = input_get_drvdata(dev);
+
+ return zinitix_start(bt541);
+}
+
+static void zinitix_input_close(struct input_dev *dev)
+{
+ struct bt541_ts_data *bt541 = input_get_drvdata(dev);
+
+ zinitix_stop(bt541);
+}
+
+static int zinitix_init_input_dev(struct bt541_ts_data *bt541)
+{
+ struct input_dev *input_dev;
+ int error;
+
+ input_dev = devm_input_allocate_device(&bt541->client->dev);
+ if (!input_dev) {
+ dev_err(&bt541->client->dev,
+ "Failed to allocate input device.");
+ return -ENOMEM;
+ }
+
+ input_set_drvdata(input_dev, bt541);
+ bt541->input_dev = input_dev;
+
+ input_dev->name = "Zinitix Capacitive TouchScreen";
+ input_dev->phys = "input/ts";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->open = zinitix_input_open;
+ input_dev->close = zinitix_input_close;
+
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+ input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
+ touchscreen_parse_properties(input_dev, true, &bt541->prop);
+ if (!bt541->prop.max_x || !bt541->prop.max_y) {
+ dev_err(&bt541->client->dev,
+ "Touchscreen-size-x and/or touchscreen-size-y not set in dts\n");
+ return -EINVAL;
+ }
+
+ error = input_mt_init_slots(input_dev, MAX_SUPPORTED_FINGER_NUM,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(&bt541->client->dev,
+ "Failed to initialize MT slots: %d", error);
+ return error;
+ }
+
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(&bt541->client->dev,
+ "Failed to register input device: %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int zinitix_ts_probe(struct i2c_client *client)
+{
+ struct bt541_ts_data *bt541;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev,
+ "Failed to assert adapter's support for plain I2C.\n");
+ return -ENXIO;
+ }
+
+ bt541 = devm_kzalloc(&client->dev, sizeof(*bt541), GFP_KERNEL);
+ if (!bt541)
+ return -ENOMEM;
+
+ bt541->client = client;
+ i2c_set_clientdata(client, bt541);
+
+ error = zinitix_init_regulators(bt541);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to initialize regulators: %d\n", error);
+ return error;
+ }
+
+ error = zinitix_init_input_dev(bt541);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to initialize input device: %d\n", error);
+ return error;
+ }
+
+ error = device_property_read_u32(&client->dev, "zinitix,mode",
+ &bt541->zinitix_mode);
+ if (error < 0) {
+ /* fall back to mode 2 */
+ bt541->zinitix_mode = DEFAULT_TOUCH_POINT_MODE;
+ }
+
+ if (bt541->zinitix_mode != 2) {
+ /*
+ * If there are devices that don't support mode 2, support
+ * for other modes (0, 1) will be needed.
+ */
+ dev_err(&client->dev,
+ "Malformed zinitix,mode property, must be 2 (supplied: %d)\n",
+ bt541->zinitix_mode);
+ return -EINVAL;
+ }
+
+ irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, zinitix_ts_irq_handler,
+ IRQF_ONESHOT, client->name, bt541);
+ if (error) {
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused zinitix_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bt541_ts_data *bt541 = i2c_get_clientdata(client);
+
+ mutex_lock(&bt541->input_dev->mutex);
+
+ if (bt541->input_dev->users)
+ zinitix_stop(bt541);
+
+ mutex_unlock(&bt541->input_dev->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused zinitix_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bt541_ts_data *bt541 = i2c_get_clientdata(client);
+ int ret = 0;
+
+ mutex_lock(&bt541->input_dev->mutex);
+
+ if (bt541->input_dev->users)
+ ret = zinitix_start(bt541);
+
+ mutex_unlock(&bt541->input_dev->mutex);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(zinitix_pm_ops, zinitix_suspend, zinitix_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id zinitix_of_match[] = {
+ { .compatible = "zinitix,bt541" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, zinitix_of_match);
+#endif
+
+static struct i2c_driver zinitix_ts_driver = {
+ .probe_new = zinitix_ts_probe,
+ .driver = {
+ .name = "Zinitix-TS",
+ .pm = &zinitix_pm_ops,
+ .of_match_table = of_match_ptr(zinitix_of_match),
+ },
+};
+module_i2c_driver(zinitix_ts_driver);
+
+MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>");
+MODULE_DESCRIPTION("Zinitix touchscreen driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
index 4825c287ca13..d203520b0a56 100644
--- a/drivers/interconnect/Makefile
+++ b/drivers/interconnect/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS_core.o := -I$(src)
-icc-core-objs := core.o
+icc-core-objs := core.o bulk.o
obj-$(CONFIG_INTERCONNECT) += icc-core.o
obj-$(CONFIG_INTERCONNECT_IMX) += imx/
diff --git a/drivers/interconnect/bulk.c b/drivers/interconnect/bulk.c
new file mode 100644
index 000000000000..73e2c8d0a412
--- /dev/null
+++ b/drivers/interconnect/bulk.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/interconnect-provider.h>
+#include <linux/device.h>
+#include <linux/export.h>
+
+/**
+ * of_icc_bulk_get() - get interconnect paths
+ * @dev: the device requesting the path
+ * @num_paths: the number of icc_bulk_data
+ * @paths: the table with the paths we want to get
+ *
+ * Returns 0 on success or negative errno otherwise.
+ */
+int __must_check of_icc_bulk_get(struct device *dev, int num_paths,
+ struct icc_bulk_data *paths)
+{
+ int ret, i;
+
+ for (i = 0; i < num_paths; i++) {
+ paths[i].path = of_icc_get(dev, paths[i].name);
+ if (IS_ERR(paths[i].path)) {
+ ret = PTR_ERR(paths[i].path);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "of_icc_get() failed on path %s (%d)\n",
+ paths[i].name, ret);
+ paths[i].path = NULL;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ icc_bulk_put(i, paths);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_icc_bulk_get);
+
+/**
+ * icc_bulk_put() - put a list of interconnect paths
+ * @num_paths: the number of icc_bulk_data
+ * @paths: the icc_bulk_data table with the paths being put
+ */
+void icc_bulk_put(int num_paths, struct icc_bulk_data *paths)
+{
+ while (--num_paths >= 0) {
+ icc_put(paths[num_paths].path);
+ paths[num_paths].path = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(icc_bulk_put);
+
+/**
+ * icc_bulk_set() - set bandwidth to a set of paths
+ * @num_paths: the number of icc_bulk_data
+ * @paths: the icc_bulk_data table containing the paths and bandwidth
+ *
+ * Returns 0 on success or negative errno otherwise.
+ */
+int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < num_paths; i++) {
+ ret = icc_set_bw(paths[i].path, paths[i].avg_bw, paths[i].peak_bw);
+ if (ret) {
+ pr_err("icc_set_bw() failed on path %s (%d)\n", paths[i].name, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_bulk_set_bw);
+
+/**
+ * icc_bulk_enable() - enable a previously disabled set of paths
+ * @num_paths: the number of icc_bulk_data
+ * @paths: the icc_bulk_data table containing the paths and bandwidth
+ *
+ * Returns 0 on success or negative errno otherwise.
+ */
+int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths)
+{
+ int ret, i;
+
+ for (i = 0; i < num_paths; i++) {
+ ret = icc_enable(paths[i].path);
+ if (ret) {
+ pr_err("icc_enable() failed on path %s (%d)\n", paths[i].name, ret);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ icc_bulk_disable(i, paths);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icc_bulk_enable);
+
+/**
+ * icc_bulk_disable() - disable a set of interconnect paths
+ * @num_paths: the number of icc_bulk_data
+ * @paths: the icc_bulk_data table containing the paths and bandwidth
+ */
+void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths)
+{
+ while (--num_paths >= 0)
+ icc_disable(paths[num_paths].path);
+}
+EXPORT_SYMBOL_GPL(icc_bulk_disable);
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index cf07491b7415..974a66725d09 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -26,6 +26,8 @@
static DEFINE_IDR(icc_idr);
static LIST_HEAD(icc_providers);
+static int providers_count;
+static bool synced_state;
static DEFINE_MUTEX(icc_lock);
static struct dentry *icc_debugfs_dir;
@@ -267,6 +269,12 @@ static int aggregate_requests(struct icc_node *node)
}
p->aggregate(node, r->tag, avg_bw, peak_bw,
&node->avg_bw, &node->peak_bw);
+
+ /* during boot use the initial bandwidth as a floor value */
+ if (!synced_state) {
+ node->avg_bw = max(node->avg_bw, node->init_avg);
+ node->peak_bw = max(node->peak_bw, node->init_peak);
+ }
}
return 0;
@@ -342,12 +350,13 @@ EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
* Looks for interconnect provider under the node specified by @spec and if
* found, uses xlate function of the provider to map phandle args to node.
*
- * Returns a valid pointer to struct icc_node on success or ERR_PTR()
+ * Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
* on failure.
*/
-struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
+struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
{
struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
+ struct icc_node_data *data = NULL;
struct icc_provider *provider;
if (!spec)
@@ -355,14 +364,33 @@ struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
mutex_lock(&icc_lock);
list_for_each_entry(provider, &icc_providers, provider_list) {
- if (provider->dev->of_node == spec->np)
- node = provider->xlate(spec, provider->data);
- if (!IS_ERR(node))
- break;
+ if (provider->dev->of_node == spec->np) {
+ if (provider->xlate_extended) {
+ data = provider->xlate_extended(spec, provider->data);
+ if (!IS_ERR(data)) {
+ node = data->node;
+ break;
+ }
+ } else {
+ node = provider->xlate(spec, provider->data);
+ if (!IS_ERR(node))
+ break;
+ }
+ }
}
mutex_unlock(&icc_lock);
- return node;
+ if (IS_ERR(node))
+ return ERR_CAST(node);
+
+ if (!data) {
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+ data->node = node;
+ }
+
+ return data;
}
EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
@@ -409,7 +437,7 @@ EXPORT_SYMBOL_GPL(devm_of_icc_get);
struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
{
struct icc_path *path;
- struct icc_node *src_node, *dst_node;
+ struct icc_node_data *src_data, *dst_data;
struct device_node *np;
struct of_phandle_args src_args, dst_args;
int ret;
@@ -447,39 +475,42 @@ struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
of_node_put(dst_args.np);
- src_node = of_icc_get_from_provider(&src_args);
+ src_data = of_icc_get_from_provider(&src_args);
- if (IS_ERR(src_node)) {
- if (PTR_ERR(src_node) != -EPROBE_DEFER)
- dev_err(dev, "error finding src node: %ld\n",
- PTR_ERR(src_node));
- return ERR_CAST(src_node);
+ if (IS_ERR(src_data)) {
+ dev_err_probe(dev, PTR_ERR(src_data), "error finding src node\n");
+ return ERR_CAST(src_data);
}
- dst_node = of_icc_get_from_provider(&dst_args);
+ dst_data = of_icc_get_from_provider(&dst_args);
- if (IS_ERR(dst_node)) {
- if (PTR_ERR(dst_node) != -EPROBE_DEFER)
- dev_err(dev, "error finding dst node: %ld\n",
- PTR_ERR(dst_node));
- return ERR_CAST(dst_node);
+ if (IS_ERR(dst_data)) {
+ dev_err_probe(dev, PTR_ERR(dst_data), "error finding dst node\n");
+ kfree(src_data);
+ return ERR_CAST(dst_data);
}
mutex_lock(&icc_lock);
- path = path_find(dev, src_node, dst_node);
+ path = path_find(dev, src_data->node, dst_data->node);
mutex_unlock(&icc_lock);
if (IS_ERR(path)) {
dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
- return path;
+ goto free_icc_data;
}
+ if (src_data->tag && src_data->tag == dst_data->tag)
+ icc_set_tag(path, src_data->tag);
+
path->name = kasprintf(GFP_KERNEL, "%s-%s",
- src_node->name, dst_node->name);
+ src_data->node->name, dst_data->node->name);
if (!path->name) {
kfree(path);
- return ERR_PTR(-ENOMEM);
+ path = ERR_PTR(-ENOMEM);
}
+free_icc_data:
+ kfree(src_data);
+ kfree(dst_data);
return path;
}
EXPORT_SYMBOL_GPL(of_icc_get_by_index);
@@ -931,6 +962,22 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
node->provider = provider;
list_add_tail(&node->node_list, &provider->nodes);
+ /* get the initial bandwidth values and sync them with hardware */
+ if (provider->get_bw) {
+ provider->get_bw(node, &node->init_avg, &node->init_peak);
+ } else {
+ node->init_avg = INT_MAX;
+ node->init_peak = INT_MAX;
+ }
+ node->avg_bw = node->init_avg;
+ node->peak_bw = node->init_peak;
+ if (provider->aggregate)
+ provider->aggregate(node, 0, node->init_avg, node->init_peak,
+ &node->avg_bw, &node->peak_bw);
+ provider->set(node, node);
+ node->avg_bw = 0;
+ node->peak_bw = 0;
+
mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_node_add);
@@ -981,7 +1028,7 @@ int icc_provider_add(struct icc_provider *provider)
{
if (WARN_ON(!provider->set))
return -EINVAL;
- if (WARN_ON(!provider->xlate))
+ if (WARN_ON(!provider->xlate && !provider->xlate_extended))
return -EINVAL;
mutex_lock(&icc_lock);
@@ -1026,8 +1073,54 @@ int icc_provider_del(struct icc_provider *provider)
}
EXPORT_SYMBOL_GPL(icc_provider_del);
+static int of_count_icc_providers(struct device_node *np)
+{
+ struct device_node *child;
+ int count = 0;
+
+ for_each_available_child_of_node(np, child) {
+ if (of_property_read_bool(child, "#interconnect-cells"))
+ count++;
+ count += of_count_icc_providers(child);
+ }
+ of_node_put(np);
+
+ return count;
+}
+
+void icc_sync_state(struct device *dev)
+{
+ struct icc_provider *p;
+ struct icc_node *n;
+ static int count;
+
+ count++;
+
+ if (count < providers_count)
+ return;
+
+ mutex_lock(&icc_lock);
+ synced_state = true;
+ list_for_each_entry(p, &icc_providers, provider_list) {
+ dev_dbg(p->dev, "interconnect provider is in synced state\n");
+ list_for_each_entry(n, &p->nodes, node_list) {
+ if (n->init_avg || n->init_peak) {
+ aggregate_requests(n);
+ p->set(n, n);
+ }
+ }
+ }
+ mutex_unlock(&icc_lock);
+}
+EXPORT_SYMBOL_GPL(icc_sync_state);
+
static int __init icc_init(void)
{
+ struct device_node *root = of_find_node_by_path("/");
+
+ providers_count = of_count_icc_providers(root);
+ of_node_put(root);
+
icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
debugfs_create_file("interconnect_summary", 0444,
icc_debugfs_dir, NULL, &icc_summary_fops);
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index ac420f86008e..41dba7090c2a 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -184,10 +184,8 @@ static int imx_icc_register_nodes(struct icc_provider *provider,
node = imx_icc_node_add(provider, node_desc);
if (IS_ERR(node)) {
- ret = PTR_ERR(node);
- if (ret != -EPROBE_DEFER)
- dev_err(provider->dev, "failed to add %s: %d\n",
- node_desc->name, ret);
+ ret = dev_err_probe(provider->dev, PTR_ERR(node),
+ "failed to add %s\n", node_desc->name);
goto err;
}
provider_data->nodes[node->id] = node;
@@ -269,15 +267,10 @@ EXPORT_SYMBOL_GPL(imx_icc_register);
int imx_icc_unregister(struct platform_device *pdev)
{
struct icc_provider *provider = platform_get_drvdata(pdev);
- int ret;
imx_icc_unregister_nodes(provider);
- ret = icc_provider_del(provider);
- if (ret)
- return ret;
-
- return 0;
+ return icc_provider_del(provider);
}
EXPORT_SYMBOL_GPL(imx_icc_unregister);
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index a88f2f07bc27..a8f93ba265f8 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -65,5 +65,25 @@ config INTERCONNECT_QCOM_SDM845
This is a driver for the Qualcomm Network-on-Chip on sdm845-based
platforms.
+config INTERCONNECT_QCOM_SM8150
+ tristate "Qualcomm SM8150 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sm8150-based
+ platforms.
+
+config INTERCONNECT_QCOM_SM8250
+ tristate "Qualcomm SM8250 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sm8250-based
+ platforms.
+
config INTERCONNECT_QCOM_SMD_RPM
tristate
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 3a047fe6e45a..cf628f7990cd 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -8,6 +8,8 @@ qnoc-qcs404-objs := qcs404.o
icc-rpmh-obj := icc-rpmh.o
qnoc-sc7180-objs := sc7180.o
qnoc-sdm845-objs := sdm845.o
+qnoc-sm8150-objs := sm8150.o
+qnoc-sm8250-objs := sm8250.o
icc-smd-rpm-objs := smd-rpm.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
@@ -18,4 +20,6 @@ obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
index 609db9c95fd7..887d13721e52 100644
--- a/drivers/interconnect/qcom/bcm-voter.c
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -27,6 +27,7 @@ static DEFINE_MUTEX(bcm_voter_lock);
* @commit_list: list containing bcms to be committed to hardware
* @ws_list: list containing bcms that have different wake/sleep votes
* @voter_node: list of bcm voters
+ * @tcs_wait: mask for which buckets require TCS completion
*/
struct bcm_voter {
struct device *dev;
@@ -35,6 +36,7 @@ struct bcm_voter {
struct list_head commit_list;
struct list_head ws_list;
struct list_head voter_node;
+ u32 tcs_wait;
};
static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b)
@@ -83,10 +85,10 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
agg_peak[bucket] = max(agg_peak[bucket], temp);
}
- temp = agg_avg[bucket] * 1000ULL;
+ temp = agg_avg[bucket] * bcm->vote_scale;
bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit);
- temp = agg_peak[bucket] * 1000ULL;
+ temp = agg_peak[bucket] * bcm->vote_scale;
bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
}
@@ -100,7 +102,7 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
}
static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
- u32 addr, bool commit)
+ u32 addr, bool commit, bool wait)
{
bool valid = true;
@@ -125,15 +127,16 @@ static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
* Set the wait for completion flag on command that need to be completed
* before the next command.
*/
- cmd->wait = commit;
+ cmd->wait = wait;
}
-static void tcs_list_gen(struct list_head *bcm_list, int bucket,
- struct tcs_cmd tcs_list[MAX_BCMS],
+static void tcs_list_gen(struct bcm_voter *voter, int bucket,
+ struct tcs_cmd tcs_list[MAX_VCD],
int n[MAX_VCD + 1])
{
+ struct list_head *bcm_list = &voter->commit_list;
struct qcom_icc_bcm *bcm;
- bool commit;
+ bool commit, wait;
size_t idx = 0, batch = 0, cur_vcd_size = 0;
memset(n, 0, sizeof(int) * (MAX_VCD + 1));
@@ -146,8 +149,11 @@ static void tcs_list_gen(struct list_head *bcm_list, int bucket,
commit = true;
cur_vcd_size = 0;
}
+
+ wait = commit && (voter->tcs_wait & BIT(bucket));
+
tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
- bcm->vote_y[bucket], bcm->addr, commit);
+ bcm->vote_y[bucket], bcm->addr, commit, wait);
idx++;
n[batch]++;
/*
@@ -272,8 +278,7 @@ int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
* Construct the command list based on a pre ordered list of BCMs
* based on VCD.
*/
- tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
-
+ tcs_list_gen(voter, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
if (!commit_idx[0])
goto out;
@@ -309,7 +314,7 @@ int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
list_sort(NULL, &voter->commit_list, cmp_vcd);
- tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
+ tcs_list_gen(voter, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
if (ret) {
@@ -317,7 +322,7 @@ int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
goto out;
}
- tcs_list_gen(&voter->commit_list, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
+ tcs_list_gen(voter, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
if (ret) {
@@ -336,6 +341,7 @@ EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_commit);
static int qcom_icc_bcm_voter_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct bcm_voter *voter;
voter = devm_kzalloc(&pdev->dev, sizeof(*voter), GFP_KERNEL);
@@ -343,7 +349,11 @@ static int qcom_icc_bcm_voter_probe(struct platform_device *pdev)
return -ENOMEM;
voter->dev = &pdev->dev;
- voter->np = pdev->dev.of_node;
+ voter->np = np;
+
+ if (of_property_read_u32(np, "qcom,tcs-wait", &voter->tcs_wait))
+ voter->tcs_wait = QCOM_ICC_TAG_ACTIVE_ONLY;
+
mutex_init(&voter->lock);
INIT_LIST_HEAD(&voter->commit_list);
INIT_LIST_HEAD(&voter->ws_list);
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 3ac5182c9ab2..bf01d09dba6c 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -6,6 +6,8 @@
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
@@ -77,6 +79,7 @@ EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_icc_provider *qp;
+ struct qcom_icc_node *qn;
struct icc_node *node;
if (!src)
@@ -85,6 +88,12 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
node = src;
qp = to_qcom_provider(node->provider);
+ qn = node->data;
+
+ qn->sum_avg[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->sum_avg[QCOM_ICC_BUCKET_AMC],
+ node->avg_bw);
+ qn->max_peak[QCOM_ICC_BUCKET_AMC] = max_t(u64, qn->max_peak[QCOM_ICC_BUCKET_AMC],
+ node->peak_bw);
qcom_icc_bcm_voter_commit(qp->voter);
@@ -92,6 +101,31 @@ int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
}
EXPORT_SYMBOL_GPL(qcom_icc_set);
+struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ node = of_icc_xlate_onecell(spec, data);
+ if (IS_ERR(node))
+ return ERR_CAST(node);
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ ndata->node = node;
+
+ if (spec->args_count == 2)
+ ndata->tag = spec->args[1];
+
+ if (spec->args_count > 2)
+ pr_warn("%pOF: Too many arguments, path tag is not parsed\n", spec->np);
+
+ return ndata;
+}
+EXPORT_SYMBOL_GPL(qcom_icc_xlate_extended);
+
/**
* qcom_icc_bcm_init - populates bcm aux data and connect qnodes
* @bcm: bcm to be initialized
@@ -136,6 +170,9 @@ int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev)
INIT_LIST_HEAD(&bcm->list);
INIT_LIST_HEAD(&bcm->ws_list);
+ if (!bcm->vote_scale)
+ bcm->vote_scale = 1000;
+
/* Link Qnodes to their respective BCMs */
for (i = 0; i < bcm->num_nodes; i++) {
qn = bcm->nodes[i];
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
index 903d25e61984..e5f61ab989e7 100644
--- a/drivers/interconnect/qcom/icc-rpmh.h
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -6,6 +6,8 @@
#ifndef __DRIVERS_INTERCONNECT_QCOM_ICC_RPMH_H__
#define __DRIVERS_INTERCONNECT_QCOM_ICC_RPMH_H__
+#include <dt-bindings/interconnect/qcom,icc.h>
+
#define to_qcom_provider(_provider) \
container_of(_provider, struct qcom_icc_provider, provider)
@@ -44,22 +46,6 @@ struct bcm_db {
#define MAX_BCM_PER_NODE 3
#define MAX_VCD 10
-/*
- * The AMC bucket denotes constraints that are applied to hardware when
- * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
- * when the execution environment transitions between active and low power mode.
- */
-#define QCOM_ICC_BUCKET_AMC 0
-#define QCOM_ICC_BUCKET_WAKE 1
-#define QCOM_ICC_BUCKET_SLEEP 2
-#define QCOM_ICC_NUM_BUCKETS 3
-#define QCOM_ICC_TAG_AMC BIT(QCOM_ICC_BUCKET_AMC)
-#define QCOM_ICC_TAG_WAKE BIT(QCOM_ICC_BUCKET_WAKE)
-#define QCOM_ICC_TAG_SLEEP BIT(QCOM_ICC_BUCKET_SLEEP)
-#define QCOM_ICC_TAG_ACTIVE_ONLY (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
-#define QCOM_ICC_TAG_ALWAYS (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
- QCOM_ICC_TAG_SLEEP)
-
/**
* struct qcom_icc_node - Qualcomm specific interconnect nodes
* @name: the node name used in debugfs
@@ -94,6 +80,7 @@ struct qcom_icc_node {
* @addr: address offsets used when voting to RPMH
* @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm
* @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm
+ * @vote_scale: scaling factor for vote_x and vote_y
* @dirty: flag used to indicate whether the bcm needs to be committed
* @keepalive: flag used to indicate whether a keepalive is required
* @aux_data: auxiliary data used when calculating threshold values and
@@ -109,6 +96,7 @@ struct qcom_icc_bcm {
u32 addr;
u64 vote_x[QCOM_ICC_NUM_BUCKETS];
u64 vote_y[QCOM_ICC_NUM_BUCKETS];
+ u64 vote_scale;
bool dirty;
bool keepalive;
struct bcm_db aux_data;
@@ -143,6 +131,7 @@ struct qcom_icc_desc {
int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
int qcom_icc_set(struct icc_node *src, struct icc_node *dst);
+struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data);
int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev);
void qcom_icc_pre_aggregate(struct icc_node *node);
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index 96fb9ff5ff2e..695f28789e98 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -16,17 +16,24 @@
#include "sc7180.h"
#include "sdm845.h"
+#include "sm8150.h"
+#include "sm8250.h"
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
#define LUT_L_VAL GENMASK(7, 0)
-#define LUT_ROW_SIZE 32
#define CLK_HW_DIV 2
-/* Register offsets */
+/* OSM Register offsets */
#define REG_ENABLE 0x0
-#define REG_FREQ_LUT 0x110
-#define REG_PERF_STATE 0x920
+#define OSM_LUT_ROW_SIZE 32
+#define OSM_REG_FREQ_LUT 0x110
+#define OSM_REG_PERF_STATE 0x920
+
+/* EPSS Register offsets */
+#define EPSS_LUT_ROW_SIZE 4
+#define EPSS_REG_FREQ_LUT 0x100
+#define EPSS_REG_PERF_STATE 0x320
#define OSM_L3_MAX_LINKS 1
@@ -36,6 +43,7 @@
struct qcom_osm_l3_icc_provider {
void __iomem *base;
unsigned int max_state;
+ unsigned int reg_perf_state;
unsigned long lut_tables[LUT_MAX_ENTRIES];
struct icc_provider provider;
};
@@ -57,12 +65,15 @@ struct qcom_icc_node {
};
struct qcom_icc_desc {
- struct qcom_icc_node **nodes;
+ const struct qcom_icc_node **nodes;
size_t num_nodes;
+ unsigned int lut_row_size;
+ unsigned int reg_freq_lut;
+ unsigned int reg_perf_state;
};
#define DEFINE_QNODE(_name, _id, _buswidth, ...) \
- static struct qcom_icc_node _name = { \
+ static const struct qcom_icc_node _name = { \
.name = #_name, \
.id = _id, \
.buswidth = _buswidth, \
@@ -73,7 +84,7 @@ struct qcom_icc_desc {
DEFINE_QNODE(sdm845_osm_apps_l3, SDM845_MASTER_OSM_L3_APPS, 16, SDM845_SLAVE_OSM_L3);
DEFINE_QNODE(sdm845_osm_l3, SDM845_SLAVE_OSM_L3, 16);
-static struct qcom_icc_node *sdm845_osm_l3_nodes[] = {
+static const struct qcom_icc_node *sdm845_osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &sdm845_osm_apps_l3,
[SLAVE_OSM_L3] = &sdm845_osm_l3,
};
@@ -81,12 +92,15 @@ static struct qcom_icc_node *sdm845_osm_l3_nodes[] = {
static const struct qcom_icc_desc sdm845_icc_osm_l3 = {
.nodes = sdm845_osm_l3_nodes,
.num_nodes = ARRAY_SIZE(sdm845_osm_l3_nodes),
+ .lut_row_size = OSM_LUT_ROW_SIZE,
+ .reg_freq_lut = OSM_REG_FREQ_LUT,
+ .reg_perf_state = OSM_REG_PERF_STATE,
};
DEFINE_QNODE(sc7180_osm_apps_l3, SC7180_MASTER_OSM_L3_APPS, 16, SC7180_SLAVE_OSM_L3);
DEFINE_QNODE(sc7180_osm_l3, SC7180_SLAVE_OSM_L3, 16);
-static struct qcom_icc_node *sc7180_osm_l3_nodes[] = {
+static const struct qcom_icc_node *sc7180_osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &sc7180_osm_apps_l3,
[SLAVE_OSM_L3] = &sc7180_osm_l3,
};
@@ -94,13 +108,48 @@ static struct qcom_icc_node *sc7180_osm_l3_nodes[] = {
static const struct qcom_icc_desc sc7180_icc_osm_l3 = {
.nodes = sc7180_osm_l3_nodes,
.num_nodes = ARRAY_SIZE(sc7180_osm_l3_nodes),
+ .lut_row_size = OSM_LUT_ROW_SIZE,
+ .reg_freq_lut = OSM_REG_FREQ_LUT,
+ .reg_perf_state = OSM_REG_PERF_STATE,
+};
+
+DEFINE_QNODE(sm8150_osm_apps_l3, SM8150_MASTER_OSM_L3_APPS, 32, SM8150_SLAVE_OSM_L3);
+DEFINE_QNODE(sm8150_osm_l3, SM8150_SLAVE_OSM_L3, 32);
+
+static const struct qcom_icc_node *sm8150_osm_l3_nodes[] = {
+ [MASTER_OSM_L3_APPS] = &sm8150_osm_apps_l3,
+ [SLAVE_OSM_L3] = &sm8150_osm_l3,
+};
+
+static const struct qcom_icc_desc sm8150_icc_osm_l3 = {
+ .nodes = sm8150_osm_l3_nodes,
+ .num_nodes = ARRAY_SIZE(sm8150_osm_l3_nodes),
+ .lut_row_size = OSM_LUT_ROW_SIZE,
+ .reg_freq_lut = OSM_REG_FREQ_LUT,
+ .reg_perf_state = OSM_REG_PERF_STATE,
+};
+
+DEFINE_QNODE(sm8250_epss_apps_l3, SM8250_MASTER_EPSS_L3_APPS, 32, SM8250_SLAVE_EPSS_L3);
+DEFINE_QNODE(sm8250_epss_l3, SM8250_SLAVE_EPSS_L3, 32);
+
+static const struct qcom_icc_node *sm8250_epss_l3_nodes[] = {
+ [MASTER_EPSS_L3_APPS] = &sm8250_epss_apps_l3,
+ [SLAVE_EPSS_L3_SHARED] = &sm8250_epss_l3,
+};
+
+static const struct qcom_icc_desc sm8250_icc_epss_l3 = {
+ .nodes = sm8250_epss_l3_nodes,
+ .num_nodes = ARRAY_SIZE(sm8250_epss_l3_nodes),
+ .lut_row_size = EPSS_LUT_ROW_SIZE,
+ .reg_freq_lut = EPSS_REG_FREQ_LUT,
+ .reg_perf_state = EPSS_REG_PERF_STATE,
};
static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_osm_l3_icc_provider *qp;
struct icc_provider *provider;
- struct qcom_icc_node *qn;
+ const struct qcom_icc_node *qn;
struct icc_node *n;
unsigned int index;
u32 agg_peak = 0;
@@ -124,7 +173,7 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
break;
}
- writel_relaxed(index, qp->base + REG_PERF_STATE);
+ writel_relaxed(index, qp->base + qp->reg_perf_state);
return 0;
}
@@ -145,7 +194,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
const struct qcom_icc_desc *desc;
struct icc_onecell_data *data;
struct icc_provider *provider;
- struct qcom_icc_node **qnodes;
+ const struct qcom_icc_node **qnodes;
struct icc_node *node;
size_t num_nodes;
struct clk *clk;
@@ -179,9 +228,15 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
return -ENODEV;
}
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qp->reg_perf_state = desc->reg_perf_state;
+
for (i = 0; i < LUT_MAX_ENTRIES; i++) {
- info = readl_relaxed(qp->base + REG_FREQ_LUT +
- i * LUT_ROW_SIZE);
+ info = readl_relaxed(qp->base + desc->reg_freq_lut +
+ i * desc->lut_row_size);
src = FIELD_GET(LUT_SRC, info);
lval = FIELD_GET(LUT_L_VAL, info);
if (src)
@@ -200,10 +255,6 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
}
qp->max_state = i;
- desc = device_get_match_data(&pdev->dev);
- if (!desc)
- return -EINVAL;
-
qnodes = desc->nodes;
num_nodes = desc->num_nodes;
@@ -235,7 +286,8 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
}
node->name = qnodes[i]->name;
- node->data = qnodes[i];
+ /* Cast away const and add it back in qcom_icc_set() */
+ node->data = (void *)qnodes[i];
icc_node_add(node, provider);
for (j = 0; j < qnodes[i]->num_links; j++)
@@ -258,6 +310,8 @@ err:
static const struct of_device_id osm_l3_of_match[] = {
{ .compatible = "qcom,sc7180-osm-l3", .data = &sc7180_icc_osm_l3 },
{ .compatible = "qcom,sdm845-osm-l3", .data = &sdm845_icc_osm_l3 },
+ { .compatible = "qcom,sm8150-osm-l3", .data = &sm8150_icc_osm_l3 },
+ { .compatible = "qcom,sm8250-epss-l3", .data = &sm8250_icc_epss_l3 },
{ }
};
MODULE_DEVICE_TABLE(of, osm_l3_of_match);
@@ -268,6 +322,7 @@ static struct platform_driver osm_l3_driver = {
.driver = {
.name = "osm-l3",
.of_match_table = osm_l3_of_match,
+ .sync_state = icc_sync_state,
},
};
module_platform_driver(osm_l3_driver);
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
index dcf493d07928..8d9044ed18ab 100644
--- a/drivers/interconnect/qcom/sc7180.c
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -535,7 +535,7 @@ static int qnoc_probe(struct platform_device *pdev)
provider->set = qcom_icc_set;
provider->pre_aggregate = qcom_icc_pre_aggregate;
provider->aggregate = qcom_icc_aggregate;
- provider->xlate = of_icc_xlate_onecell;
+ provider->xlate_extended = qcom_icc_xlate_extended;
INIT_LIST_HEAD(&provider->nodes);
provider->data = data;
@@ -553,6 +553,9 @@ static int qnoc_probe(struct platform_device *pdev)
return ret;
}
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
for (i = 0; i < num_nodes; i++) {
size_t j;
@@ -576,9 +579,6 @@ static int qnoc_probe(struct platform_device *pdev)
}
data->num_nodes = num_nodes;
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
platform_set_drvdata(pdev, qp);
return 0;
@@ -633,6 +633,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sc7180",
.of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index f6c7b969520d..5304aea3b058 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -151,7 +151,7 @@ DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
@@ -469,7 +469,7 @@ static int qnoc_probe(struct platform_device *pdev)
provider->set = qcom_icc_set;
provider->pre_aggregate = qcom_icc_pre_aggregate;
provider->aggregate = qcom_icc_aggregate;
- provider->xlate = of_icc_xlate_onecell;
+ provider->xlate_extended = qcom_icc_xlate_extended;
INIT_LIST_HEAD(&provider->nodes);
provider->data = data;
@@ -489,6 +489,9 @@ static int qnoc_probe(struct platform_device *pdev)
return ret;
}
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
for (i = 0; i < num_nodes; i++) {
size_t j;
@@ -512,9 +515,6 @@ static int qnoc_probe(struct platform_device *pdev)
}
data->num_nodes = num_nodes;
- for (i = 0; i < qp->num_bcms; i++)
- qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
-
platform_set_drvdata(pdev, qp);
return 0;
@@ -559,6 +559,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sdm845",
.of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
new file mode 100644
index 000000000000..c76b2c7f9b10
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sm8150.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sm8150.h"
+
+DEFINE_QNODE(qhm_a1noc_cfg, SM8150_MASTER_A1NOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qup0, SM8150_MASTER_QUP_0, 1, 4, SM8150_A1NOC_SNOC_SLV);
+DEFINE_QNODE(xm_emac, SM8150_MASTER_EMAC, 1, 8, SM8150_A1NOC_SNOC_SLV);
+DEFINE_QNODE(xm_ufs_mem, SM8150_MASTER_UFS_MEM, 1, 8, SM8150_A1NOC_SNOC_SLV);
+DEFINE_QNODE(xm_usb3_0, SM8150_MASTER_USB3, 1, 8, SM8150_A1NOC_SNOC_SLV);
+DEFINE_QNODE(xm_usb3_1, SM8150_MASTER_USB3_1, 1, 8, SM8150_A1NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_a2noc_cfg, SM8150_MASTER_A2NOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, SM8150_MASTER_QDSS_BAM, 1, 4, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_qspi, SM8150_MASTER_QSPI, 1, 4, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_qup1, SM8150_MASTER_QUP_1, 1, 4, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_qup2, SM8150_MASTER_QUP_2, 1, 4, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_sensorss_ahb, SM8150_MASTER_SENSORS_AHB, 1, 4, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_tsif, SM8150_MASTER_TSIF, 1, 4, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qnm_cnoc, SM8150_MASTER_CNOC_A2NOC, 1, 8, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qxm_crypto, SM8150_MASTER_CRYPTO_CORE_0, 1, 8, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qxm_ipa, SM8150_MASTER_IPA, 1, 8, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(xm_pcie3_0, SM8150_MASTER_PCIE, 1, 8, SM8150_SLAVE_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(xm_pcie3_1, SM8150_MASTER_PCIE_1, 1, 8, SM8150_SLAVE_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(xm_qdss_etr, SM8150_MASTER_QDSS_ETR, 1, 8, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(xm_sdc2, SM8150_MASTER_SDCC_2, 1, 8, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(xm_sdc4, SM8150_MASTER_SDCC_4, 1, 8, SM8150_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SM8150_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SM8150_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_hf1_uncomp, SM8150_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SM8150_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_sf_uncomp, SM8150_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SM8150_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qnm_npu, SM8150_MASTER_NPU, 1, 32, SM8150_SLAVE_CDSP_MEM_NOC);
+DEFINE_QNODE(qhm_spdm, SM8150_MASTER_SPDM, 1, 4, SM8150_SLAVE_CNOC_A2NOC);
+DEFINE_QNODE(qnm_snoc, SM8150_SNOC_CNOC_MAS, 1, 8, SM8150_SLAVE_TLMM_SOUTH, SM8150_SLAVE_CDSP_CFG, SM8150_SLAVE_SPSS_CFG, SM8150_SLAVE_CAMERA_CFG, SM8150_SLAVE_SDCC_4, SM8150_SLAVE_SDCC_2, SM8150_SLAVE_CNOC_MNOC_CFG, SM8150_SLAVE_EMAC_CFG, SM8150_SLAVE_UFS_MEM_CFG, SM8150_SLAVE_TLMM_EAST, SM8150_SLAVE_SSC_CFG, SM8150_SLAVE_SNOC_CFG, SM8150_SLAVE_NORTH_PHY_CFG, SM8150_SLAVE_QUP_0, SM8150_SLAVE_GLM, SM8150_SLAVE_PCIE_1_CFG, SM8150_SLAVE_A2NOC_CFG, SM8150_SLAVE_QDSS_CFG, SM8150_SLAVE_DISPLAY_CFG, SM8150_SLAVE_TCSR, SM8150_SLAVE_CNOC_DDRSS, SM8150_SLAVE_RBCPR_MMCX_CFG, SM8150_SLAVE_NPU_CFG, SM8150_SLAVE_PCIE_0_CFG, SM8150_SLAVE_GRAPHICS_3D_CFG, SM8150_SLAVE_VENUS_CFG, SM8150_SLAVE_TSIF, SM8150_SLAVE_IPA_CFG, SM8150_SLAVE_CLK_CTL, SM8150_SLAVE_AOP, SM8150_SLAVE_QUP_1, SM8150_SLAVE_AHB2PHY_SOUTH, SM8150_SLAVE_USB3_1, SM8150_SLAVE_SERVICE_CNOC, SM8150_SLAVE_UFS_CARD_CFG, SM8150_SLAVE_QUP_2, SM8150_SLAVE_RBCPR_CX_CFG, SM8150_SLAVE_TLMM_WEST, SM8150_SLAVE_A1NOC_CFG, SM8150_SLAVE_AOSS, SM8150_SLAVE_PRNG, SM8150_SLAVE_VSENSE_CTRL_CFG, SM8150_SLAVE_QSPI, SM8150_SLAVE_USB3, SM8150_SLAVE_SPDM_WRAPPER, SM8150_SLAVE_CRYPTO_0_CFG, SM8150_SLAVE_PIMEM_CFG, SM8150_SLAVE_TLMM_NORTH, SM8150_SLAVE_RBCPR_MX_CFG, SM8150_SLAVE_IMEM_CFG);
+DEFINE_QNODE(xm_qdss_dap, SM8150_MASTER_QDSS_DAP, 1, 8, SM8150_SLAVE_TLMM_SOUTH, SM8150_SLAVE_CDSP_CFG, SM8150_SLAVE_SPSS_CFG, SM8150_SLAVE_CAMERA_CFG, SM8150_SLAVE_SDCC_4, SM8150_SLAVE_SDCC_2, SM8150_SLAVE_CNOC_MNOC_CFG, SM8150_SLAVE_EMAC_CFG, SM8150_SLAVE_UFS_MEM_CFG, SM8150_SLAVE_TLMM_EAST, SM8150_SLAVE_SSC_CFG, SM8150_SLAVE_SNOC_CFG, SM8150_SLAVE_NORTH_PHY_CFG, SM8150_SLAVE_QUP_0, SM8150_SLAVE_GLM, SM8150_SLAVE_PCIE_1_CFG, SM8150_SLAVE_A2NOC_CFG, SM8150_SLAVE_QDSS_CFG, SM8150_SLAVE_DISPLAY_CFG, SM8150_SLAVE_TCSR, SM8150_SLAVE_CNOC_DDRSS, SM8150_SLAVE_CNOC_A2NOC, SM8150_SLAVE_RBCPR_MMCX_CFG, SM8150_SLAVE_NPU_CFG, SM8150_SLAVE_PCIE_0_CFG, SM8150_SLAVE_GRAPHICS_3D_CFG, SM8150_SLAVE_VENUS_CFG, SM8150_SLAVE_TSIF, SM8150_SLAVE_IPA_CFG, SM8150_SLAVE_CLK_CTL, SM8150_SLAVE_AOP, SM8150_SLAVE_QUP_1, SM8150_SLAVE_AHB2PHY_SOUTH, SM8150_SLAVE_USB3_1, SM8150_SLAVE_SERVICE_CNOC, SM8150_SLAVE_UFS_CARD_CFG, SM8150_SLAVE_QUP_2, SM8150_SLAVE_RBCPR_CX_CFG, SM8150_SLAVE_TLMM_WEST, SM8150_SLAVE_A1NOC_CFG, SM8150_SLAVE_AOSS, SM8150_SLAVE_PRNG, SM8150_SLAVE_VSENSE_CTRL_CFG, SM8150_SLAVE_QSPI, SM8150_SLAVE_USB3, SM8150_SLAVE_SPDM_WRAPPER, SM8150_SLAVE_CRYPTO_0_CFG, SM8150_SLAVE_PIMEM_CFG, SM8150_SLAVE_TLMM_NORTH, SM8150_SLAVE_RBCPR_MX_CFG, SM8150_SLAVE_IMEM_CFG);
+DEFINE_QNODE(qhm_cnoc_dc_noc, SM8150_MASTER_CNOC_DC_NOC, 1, 4, SM8150_SLAVE_GEM_NOC_CFG, SM8150_SLAVE_LLCC_CFG);
+DEFINE_QNODE(acm_apps, SM8150_MASTER_AMPSS_M0, 2, 32, SM8150_SLAVE_ECC, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(acm_gpu_tcu, SM8150_MASTER_GPU_TCU, 1, 8, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(acm_sys_tcu, SM8150_MASTER_SYS_TCU, 1, 8, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qhm_gemnoc_cfg, SM8150_MASTER_GEM_NOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_GEM_NOC, SM8150_SLAVE_MSS_PROC_MS_MPU_CFG);
+DEFINE_QNODE(qnm_cmpnoc, SM8150_MASTER_COMPUTE_NOC, 2, 32, SM8150_SLAVE_ECC, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qnm_gpu, SM8150_MASTER_GRAPHICS_3D, 2, 32, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qnm_mnoc_hf, SM8150_MASTER_MNOC_HF_MEM_NOC, 2, 32, SM8150_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, SM8150_MASTER_MNOC_SF_MEM_NOC, 1, 32, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qnm_pcie, SM8150_MASTER_GEM_NOC_PCIE_SNOC, 1, 16, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, SM8150_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM8150_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, SM8150_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM8150_SLAVE_LLCC);
+DEFINE_QNODE(qxm_ecc, SM8150_MASTER_ECC, 2, 32, SM8150_SLAVE_LLCC);
+DEFINE_QNODE(ipa_core_master, SM8150_MASTER_IPA_CORE, 1, 8, SM8150_SLAVE_IPA_CORE);
+DEFINE_QNODE(llcc_mc, SM8150_MASTER_LLCC, 4, 4, SM8150_SLAVE_EBI_CH0);
+DEFINE_QNODE(qhm_mnoc_cfg, SM8150_MASTER_CNOC_MNOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qxm_camnoc_hf0, SM8150_MASTER_CAMNOC_HF0, 1, 32, SM8150_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_hf1, SM8150_MASTER_CAMNOC_HF1, 1, 32, SM8150_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_sf, SM8150_MASTER_CAMNOC_SF, 1, 32, SM8150_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, SM8150_MASTER_MDP_PORT0, 1, 32, SM8150_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp1, SM8150_MASTER_MDP_PORT1, 1, 32, SM8150_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, SM8150_MASTER_ROTATOR, 1, 32, SM8150_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus0, SM8150_MASTER_VIDEO_P0, 1, 32, SM8150_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus1, SM8150_MASTER_VIDEO_P1, 1, 32, SM8150_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus_arm9, SM8150_MASTER_VIDEO_PROC, 1, 8, SM8150_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qhm_snoc_cfg, SM8150_MASTER_SNOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, SM8150_A1NOC_SNOC_MAS, 1, 16, SM8150_SLAVE_SNOC_GEM_NOC_SF, SM8150_SLAVE_PIMEM, SM8150_SLAVE_OCIMEM, SM8150_SLAVE_APPSS, SM8150_SNOC_CNOC_SLV, SM8150_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_aggre2_noc, SM8150_A2NOC_SNOC_MAS, 1, 16, SM8150_SLAVE_SNOC_GEM_NOC_SF, SM8150_SLAVE_PIMEM, SM8150_SLAVE_OCIMEM, SM8150_SLAVE_APPSS, SM8150_SNOC_CNOC_SLV, SM8150_SLAVE_PCIE_0, SM8150_SLAVE_PCIE_1, SM8150_SLAVE_TCU, SM8150_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_gemnoc, SM8150_MASTER_GEM_NOC_SNOC, 1, 8, SM8150_SLAVE_PIMEM, SM8150_SLAVE_OCIMEM, SM8150_SLAVE_APPSS, SM8150_SNOC_CNOC_SLV, SM8150_SLAVE_TCU, SM8150_SLAVE_QDSS_STM);
+DEFINE_QNODE(qxm_pimem, SM8150_MASTER_PIMEM, 1, 8, SM8150_SLAVE_SNOC_GEM_NOC_GC, SM8150_SLAVE_OCIMEM);
+DEFINE_QNODE(xm_gic, SM8150_MASTER_GIC, 1, 8, SM8150_SLAVE_SNOC_GEM_NOC_GC, SM8150_SLAVE_OCIMEM);
+DEFINE_QNODE(qns_a1noc_snoc, SM8150_A1NOC_SNOC_SLV, 1, 16, SM8150_A1NOC_SNOC_MAS);
+DEFINE_QNODE(srvc_aggre1_noc, SM8150_SLAVE_SERVICE_A1NOC, 1, 4);
+DEFINE_QNODE(qns_a2noc_snoc, SM8150_A2NOC_SNOC_SLV, 1, 16, SM8150_A2NOC_SNOC_MAS);
+DEFINE_QNODE(qns_pcie_mem_noc, SM8150_SLAVE_ANOC_PCIE_GEM_NOC, 1, 16, SM8150_MASTER_GEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(srvc_aggre2_noc, SM8150_SLAVE_SERVICE_A2NOC, 1, 4);
+DEFINE_QNODE(qns_camnoc_uncomp, SM8150_SLAVE_CAMNOC_UNCOMP, 1, 32);
+DEFINE_QNODE(qns_cdsp_mem_noc, SM8150_SLAVE_CDSP_MEM_NOC, 2, 32, SM8150_MASTER_COMPUTE_NOC);
+DEFINE_QNODE(qhs_a1_noc_cfg, SM8150_SLAVE_A1NOC_CFG, 1, 4, SM8150_MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SM8150_SLAVE_A2NOC_CFG, 1, 4, SM8150_MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_ahb2phy_south, SM8150_SLAVE_AHB2PHY_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_aop, SM8150_SLAVE_AOP, 1, 4);
+DEFINE_QNODE(qhs_aoss, SM8150_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_camera_cfg, SM8150_SLAVE_CAMERA_CFG, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SM8150_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_compute_dsp, SM8150_SLAVE_CDSP_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_cx, SM8150_SLAVE_RBCPR_CX_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_mmcx, SM8150_SLAVE_RBCPR_MMCX_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_mx, SM8150_SLAVE_RBCPR_MX_CFG, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SM8150_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_ddrss_cfg, SM8150_SLAVE_CNOC_DDRSS, 1, 4, SM8150_MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_display_cfg, SM8150_SLAVE_DISPLAY_CFG, 1, 4);
+DEFINE_QNODE(qhs_emac_cfg, SM8150_SLAVE_EMAC_CFG, 1, 4);
+DEFINE_QNODE(qhs_glm, SM8150_SLAVE_GLM, 1, 4);
+DEFINE_QNODE(qhs_gpuss_cfg, SM8150_SLAVE_GRAPHICS_3D_CFG, 1, 8);
+DEFINE_QNODE(qhs_imem_cfg, SM8150_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SM8150_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mnoc_cfg, SM8150_SLAVE_CNOC_MNOC_CFG, 1, 4, SM8150_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_npu_cfg, SM8150_SLAVE_NPU_CFG, 1, 4);
+DEFINE_QNODE(qhs_pcie0_cfg, SM8150_SLAVE_PCIE_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_pcie1_cfg, SM8150_SLAVE_PCIE_1_CFG, 1, 4);
+DEFINE_QNODE(qhs_phy_refgen_north, SM8150_SLAVE_NORTH_PHY_CFG, 1, 4);
+DEFINE_QNODE(qhs_pimem_cfg, SM8150_SLAVE_PIMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_prng, SM8150_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SM8150_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qspi, SM8150_SLAVE_QSPI, 1, 4);
+DEFINE_QNODE(qhs_qupv3_east, SM8150_SLAVE_QUP_2, 1, 4);
+DEFINE_QNODE(qhs_qupv3_north, SM8150_SLAVE_QUP_1, 1, 4);
+DEFINE_QNODE(qhs_qupv3_south, SM8150_SLAVE_QUP_0, 1, 4);
+DEFINE_QNODE(qhs_sdc2, SM8150_SLAVE_SDCC_2, 1, 4);
+DEFINE_QNODE(qhs_sdc4, SM8150_SLAVE_SDCC_4, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SM8150_SLAVE_SNOC_CFG, 1, 4, SM8150_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spdm, SM8150_SLAVE_SPDM_WRAPPER, 1, 4);
+DEFINE_QNODE(qhs_spss_cfg, SM8150_SLAVE_SPSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_ssc_cfg, SM8150_SLAVE_SSC_CFG, 1, 4);
+DEFINE_QNODE(qhs_tcsr, SM8150_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm_east, SM8150_SLAVE_TLMM_EAST, 1, 4);
+DEFINE_QNODE(qhs_tlmm_north, SM8150_SLAVE_TLMM_NORTH, 1, 4);
+DEFINE_QNODE(qhs_tlmm_south, SM8150_SLAVE_TLMM_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_tlmm_west, SM8150_SLAVE_TLMM_WEST, 1, 4);
+DEFINE_QNODE(qhs_tsif, SM8150_SLAVE_TSIF, 1, 4);
+DEFINE_QNODE(qhs_ufs_card_cfg, SM8150_SLAVE_UFS_CARD_CFG, 1, 4);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SM8150_SLAVE_UFS_MEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_usb3_0, SM8150_SLAVE_USB3, 1, 4);
+DEFINE_QNODE(qhs_usb3_1, SM8150_SLAVE_USB3_1, 1, 4);
+DEFINE_QNODE(qhs_venus_cfg, SM8150_SLAVE_VENUS_CFG, 1, 4);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SM8150_SLAVE_VSENSE_CTRL_CFG, 1, 4);
+DEFINE_QNODE(qns_cnoc_a2noc, SM8150_SLAVE_CNOC_A2NOC, 1, 8, SM8150_MASTER_CNOC_A2NOC);
+DEFINE_QNODE(srvc_cnoc, SM8150_SLAVE_SERVICE_CNOC, 1, 4);
+DEFINE_QNODE(qhs_llcc, SM8150_SLAVE_LLCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_memnoc, SM8150_SLAVE_GEM_NOC_CFG, 1, 4, SM8150_MASTER_GEM_NOC_CFG);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SM8150_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qns_ecc, SM8150_SLAVE_ECC, 1, 32);
+DEFINE_QNODE(qns_gem_noc_snoc, SM8150_SLAVE_GEM_NOC_SNOC, 1, 8, SM8150_MASTER_GEM_NOC_SNOC);
+DEFINE_QNODE(qns_llcc, SM8150_SLAVE_LLCC, 4, 16, SM8150_MASTER_LLCC);
+DEFINE_QNODE(srvc_gemnoc, SM8150_SLAVE_SERVICE_GEM_NOC, 1, 4);
+DEFINE_QNODE(ipa_core_slave, SM8150_SLAVE_IPA_CORE, 1, 8);
+DEFINE_QNODE(ebi, SM8150_SLAVE_EBI_CH0, 4, 4);
+DEFINE_QNODE(qns2_mem_noc, SM8150_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SM8150_MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_hf, SM8150_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SM8150_MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SM8150_SLAVE_SERVICE_MNOC, 1, 4);
+DEFINE_QNODE(qhs_apss, SM8150_SLAVE_APPSS, 1, 8);
+DEFINE_QNODE(qns_cnoc, SM8150_SNOC_CNOC_SLV, 1, 8, SM8150_SNOC_CNOC_MAS);
+DEFINE_QNODE(qns_gemnoc_gc, SM8150_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SM8150_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_gemnoc_sf, SM8150_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SM8150_MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SM8150_SLAVE_OCIMEM, 1, 8);
+DEFINE_QNODE(qxs_pimem, SM8150_SLAVE_PIMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SM8150_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_pcie_0, SM8150_SLAVE_PCIE_0, 1, 8);
+DEFINE_QNODE(xs_pcie_1, SM8150_SLAVE_PCIE_1, 1, 8);
+DEFINE_QNODE(xs_qdss_stm, SM8150_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SM8150_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_gem_noc_snoc);
+DEFINE_QBCM(bcm_mm2, "MM2", false, &qxm_camnoc_sf, &qns2_mem_noc);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_gpu_tcu, &acm_sys_tcu);
+DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+DEFINE_QBCM(bcm_sh4, "SH4", false, &qnm_cmpnoc);
+DEFINE_QBCM(bcm_sh5, "SH5", false, &acm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_mem_noc);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_co1, "CO1", false, &qnm_npu);
+DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
+DEFINE_QBCM(bcm_cn0, "CN0", true, &qhm_spdm, &qnm_snoc, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy_south, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_emac_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_npu_cfg, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_phy_refgen_north, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qspi, &qhs_qupv3_east, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_ssc_cfg, &qhs_tcsr, &qhs_tlmm_east, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tlmm_west, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup0, &qhm_qup1, &qhm_qup2);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &srvc_aggre1_noc, &srvc_aggre2_noc, &qns_cnoc);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &qxs_pimem);
+DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &xs_pcie_0, &xs_pcie_1);
+DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn12, "SN12", false, &qxm_pimem, &xm_gic);
+DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
+DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_gemnoc);
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+ &bcm_qup0,
+ &bcm_sn3,
+};
+
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_EMAC] = &xm_emac,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [A1NOC_SNOC_SLV] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static struct qcom_icc_desc sm8150_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_qup0,
+ &bcm_sn14,
+ &bcm_sn3,
+};
+
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QSPI] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_SENSORS_AHB] = &qhm_sensorss_ahb,
+ [MASTER_TSIF] = &qhm_tsif,
+ [MASTER_CNOC_A2NOC] = &qnm_cnoc,
+ [MASTER_CRYPTO_CORE_0] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_PCIE] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [A2NOC_SNOC_SLV] = &qns_a2noc_snoc,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static struct qcom_icc_desc sm8150_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *camnoc_virt_bcms[] = {
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node *camnoc_virt_nodes[] = {
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+};
+
+static struct qcom_icc_desc sm8150_camnoc_virt = {
+ .nodes = camnoc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
+ .bcms = camnoc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *compute_noc_bcms[] = {
+ &bcm_co0,
+ &bcm_co1,
+};
+
+static struct qcom_icc_node *compute_noc_nodes[] = {
+ [MASTER_NPU] = &qnm_npu,
+ [SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc,
+};
+
+static struct qcom_icc_desc sm8150_compute_noc = {
+ .nodes = compute_noc_nodes,
+ .num_nodes = ARRAY_SIZE(compute_noc_nodes),
+ .bcms = compute_noc_bcms,
+ .num_bcms = ARRAY_SIZE(compute_noc_bcms),
+};
+
+static struct qcom_icc_bcm *config_noc_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+ [MASTER_SPDM] = &qhm_spdm,
+ [SNOC_CNOC_MAS] = &qnm_snoc,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy_south,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_dsp,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_EMAC_CFG] = &qhs_emac_cfg,
+ [SLAVE_GLM] = &qhs_glm,
+ [SLAVE_GRAPHICS_3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_NPU_CFG] = &qhs_npu_cfg,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_NORTH_PHY_CFG] = &qhs_phy_refgen_north,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI] = &qhs_qspi,
+ [SLAVE_QUP_2] = &qhs_qupv3_east,
+ [SLAVE_QUP_1] = &qhs_qupv3_north,
+ [SLAVE_QUP_0] = &qhs_qupv3_south,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPDM_WRAPPER] = &qhs_spdm,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_SSC_CFG] = &qhs_ssc_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM_EAST] = &qhs_tlmm_east,
+ [SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
+ [SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
+ [SLAVE_TLMM_WEST] = &qhs_tlmm_west,
+ [SLAVE_TSIF] = &qhs_tsif,
+ [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3] = &qhs_usb3_0,
+ [SLAVE_USB3_1] = &qhs_usb3_1,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+static struct qcom_icc_desc sm8150_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm *dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_GEM_NOC_CFG] = &qhs_memnoc,
+};
+
+static struct qcom_icc_desc sm8150_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm *gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+ &bcm_sh3,
+ &bcm_sh4,
+ &bcm_sh5,
+};
+
+static struct qcom_icc_node *gem_noc_nodes[] = {
+ [MASTER_AMPSS_M0] = &acm_apps,
+ [MASTER_GPU_TCU] = &acm_gpu_tcu,
+ [MASTER_SYS_TCU] = &acm_sys_tcu,
+ [MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
+ [MASTER_GRAPHICS_3D] = &qnm_gpu,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_ECC] = &qxm_ecc,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_ECC] = &qns_ecc,
+ [SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
+};
+
+static struct qcom_icc_desc sm8150_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+ &bcm_ip0,
+};
+
+static struct qcom_icc_node *ipa_virt_nodes[] = {
+ [MASTER_IPA_CORE] = &ipa_core_master,
+ [SLAVE_IPA_CORE] = &ipa_core_slave,
+};
+
+static struct qcom_icc_desc sm8150_ipa_virt = {
+ .nodes = ipa_virt_nodes,
+ .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
+ .bcms = ipa_virt_bcms,
+ .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI_CH0] = &ebi,
+};
+
+static struct qcom_icc_desc sm8150_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm2,
+ &bcm_mm3,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
+ [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_MDP_PORT0] = &qxm_mdp0,
+ [MASTER_MDP_PORT1] = &qxm_mdp1,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_P1] = &qxm_venus1,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static struct qcom_icc_desc sm8150_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn11,
+ &bcm_sn12,
+ &bcm_sn15,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn5,
+ &bcm_sn8,
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
+ [A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
+ [MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SNOC_CNOC_SLV] = &qns_cnoc,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_OCIMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_desc sm8150_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter))
+ return PTR_ERR(qp->voter);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ if (!qnodes[i])
+ continue;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm8150-aggre1-noc",
+ .data = &sm8150_aggre1_noc},
+ { .compatible = "qcom,sm8150-aggre2-noc",
+ .data = &sm8150_aggre2_noc},
+ { .compatible = "qcom,sm8150-camnoc-virt",
+ .data = &sm8150_camnoc_virt},
+ { .compatible = "qcom,sm8150-compute-noc",
+ .data = &sm8150_compute_noc},
+ { .compatible = "qcom,sm8150-config-noc",
+ .data = &sm8150_config_noc},
+ { .compatible = "qcom,sm8150-dc-noc",
+ .data = &sm8150_dc_noc},
+ { .compatible = "qcom,sm8150-gem-noc",
+ .data = &sm8150_gem_noc},
+ { .compatible = "qcom,sm8150-ipa-virt",
+ .data = &sm8150_ipa_virt},
+ { .compatible = "qcom,sm8150-mc-virt",
+ .data = &sm8150_mc_virt},
+ { .compatible = "qcom,sm8150-mmss-noc",
+ .data = &sm8150_mmss_noc},
+ { .compatible = "qcom,sm8150-system-noc",
+ .data = &sm8150_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sm8150",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("Qualcomm SM8150 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sm8150.h b/drivers/interconnect/qcom/sm8150.h
new file mode 100644
index 000000000000..97996f64d799
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8150.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm #define SM8250 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8150_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM8150_H
+
+#define SM8150_A1NOC_SNOC_MAS 0
+#define SM8150_A1NOC_SNOC_SLV 1
+#define SM8150_A2NOC_SNOC_MAS 2
+#define SM8150_A2NOC_SNOC_SLV 3
+#define SM8150_MASTER_A1NOC_CFG 4
+#define SM8150_MASTER_A2NOC_CFG 5
+#define SM8150_MASTER_AMPSS_M0 6
+#define SM8150_MASTER_CAMNOC_HF0 7
+#define SM8150_MASTER_CAMNOC_HF0_UNCOMP 8
+#define SM8150_MASTER_CAMNOC_HF1 9
+#define SM8150_MASTER_CAMNOC_HF1_UNCOMP 10
+#define SM8150_MASTER_CAMNOC_SF 11
+#define SM8150_MASTER_CAMNOC_SF_UNCOMP 12
+#define SM8150_MASTER_CNOC_A2NOC 13
+#define SM8150_MASTER_CNOC_DC_NOC 14
+#define SM8150_MASTER_CNOC_MNOC_CFG 15
+#define SM8150_MASTER_COMPUTE_NOC 16
+#define SM8150_MASTER_CRYPTO_CORE_0 17
+#define SM8150_MASTER_ECC 18
+#define SM8150_MASTER_EMAC 19
+#define SM8150_MASTER_GEM_NOC_CFG 20
+#define SM8150_MASTER_GEM_NOC_PCIE_SNOC 21
+#define SM8150_MASTER_GEM_NOC_SNOC 22
+#define SM8150_MASTER_GIC 23
+#define SM8150_MASTER_GPU_TCU 24
+#define SM8150_MASTER_GRAPHICS_3D 25
+#define SM8150_MASTER_IPA 26
+#define SM8150_MASTER_IPA_CORE 27
+#define SM8150_MASTER_LLCC 28
+#define SM8150_MASTER_MDP_PORT0 29
+#define SM8150_MASTER_MDP_PORT1 30
+#define SM8150_MASTER_MNOC_HF_MEM_NOC 31
+#define SM8150_MASTER_MNOC_SF_MEM_NOC 32
+#define SM8150_MASTER_NPU 33
+#define SM8150_MASTER_PCIE 34
+#define SM8150_MASTER_PCIE_1 35
+#define SM8150_MASTER_PIMEM 36
+#define SM8150_MASTER_QDSS_BAM 37
+#define SM8150_MASTER_QDSS_DAP 38
+#define SM8150_MASTER_QDSS_ETR 39
+#define SM8150_MASTER_QSPI 40
+#define SM8150_MASTER_QUP_0 41
+#define SM8150_MASTER_QUP_1 42
+#define SM8150_MASTER_QUP_2 43
+#define SM8150_MASTER_ROTATOR 44
+#define SM8150_MASTER_SDCC_2 45
+#define SM8150_MASTER_SDCC_4 46
+#define SM8150_MASTER_SENSORS_AHB 47
+#define SM8150_MASTER_SNOC_CFG 48
+#define SM8150_MASTER_SNOC_GC_MEM_NOC 49
+#define SM8150_MASTER_SNOC_SF_MEM_NOC 50
+#define SM8150_MASTER_SPDM 51
+#define SM8150_MASTER_SYS_TCU 52
+#define SM8150_MASTER_TSIF 53
+#define SM8150_MASTER_UFS_MEM 54
+#define SM8150_MASTER_USB3 55
+#define SM8150_MASTER_USB3_1 56
+#define SM8150_MASTER_VIDEO_P0 57
+#define SM8150_MASTER_VIDEO_P1 58
+#define SM8150_MASTER_VIDEO_PROC 59
+#define SM8150_SLAVE_A1NOC_CFG 60
+#define SM8150_SLAVE_A2NOC_CFG 61
+#define SM8150_SLAVE_AHB2PHY_SOUTH 62
+#define SM8150_SLAVE_ANOC_PCIE_GEM_NOC 63
+#define SM8150_SLAVE_AOP 64
+#define SM8150_SLAVE_AOSS 65
+#define SM8150_SLAVE_APPSS 66
+#define SM8150_SLAVE_CAMERA_CFG 67
+#define SM8150_SLAVE_CAMNOC_UNCOMP 68
+#define SM8150_SLAVE_CDSP_CFG 69
+#define SM8150_SLAVE_CDSP_MEM_NOC 70
+#define SM8150_SLAVE_CLK_CTL 71
+#define SM8150_SLAVE_CNOC_A2NOC 72
+#define SM8150_SLAVE_CNOC_DDRSS 73
+#define SM8150_SLAVE_CNOC_MNOC_CFG 74
+#define SM8150_SLAVE_CRYPTO_0_CFG 75
+#define SM8150_SLAVE_DISPLAY_CFG 76
+#define SM8150_SLAVE_EBI_CH0 77
+#define SM8150_SLAVE_ECC 78
+#define SM8150_SLAVE_EMAC_CFG 79
+#define SM8150_SLAVE_GEM_NOC_CFG 80
+#define SM8150_SLAVE_GEM_NOC_SNOC 81
+#define SM8150_SLAVE_GLM 82
+#define SM8150_SLAVE_GRAPHICS_3D_CFG 83
+#define SM8150_SLAVE_IMEM_CFG 84
+#define SM8150_SLAVE_IPA_CFG 85
+#define SM8150_SLAVE_IPA_CORE 86
+#define SM8150_SLAVE_LLCC 87
+#define SM8150_SLAVE_LLCC_CFG 88
+#define SM8150_SLAVE_MNOC_HF_MEM_NOC 89
+#define SM8150_SLAVE_MNOC_SF_MEM_NOC 90
+#define SM8150_SLAVE_MSS_PROC_MS_MPU_CFG 91
+#define SM8150_SLAVE_NORTH_PHY_CFG 92
+#define SM8150_SLAVE_NPU_CFG 93
+#define SM8150_SLAVE_OCIMEM 94
+#define SM8150_SLAVE_PCIE_0 95
+#define SM8150_SLAVE_PCIE_0_CFG 96
+#define SM8150_SLAVE_PCIE_1 97
+#define SM8150_SLAVE_PCIE_1_CFG 98
+#define SM8150_SLAVE_PIMEM 99
+#define SM8150_SLAVE_PIMEM_CFG 100
+#define SM8150_SLAVE_PRNG 101
+#define SM8150_SLAVE_QDSS_CFG 102
+#define SM8150_SLAVE_QDSS_STM 103
+#define SM8150_SLAVE_QSPI 104
+#define SM8150_SLAVE_QUP_0 105
+#define SM8150_SLAVE_QUP_1 106
+#define SM8150_SLAVE_QUP_2 107
+#define SM8150_SLAVE_RBCPR_CX_CFG 108
+#define SM8150_SLAVE_RBCPR_MMCX_CFG 109
+#define SM8150_SLAVE_RBCPR_MX_CFG 110
+#define SM8150_SLAVE_SDCC_2 111
+#define SM8150_SLAVE_SDCC_4 112
+#define SM8150_SLAVE_SERVICE_A1NOC 113
+#define SM8150_SLAVE_SERVICE_A2NOC 114
+#define SM8150_SLAVE_SERVICE_CNOC 115
+#define SM8150_SLAVE_SERVICE_GEM_NOC 116
+#define SM8150_SLAVE_SERVICE_MNOC 117
+#define SM8150_SLAVE_SERVICE_SNOC 118
+#define SM8150_SLAVE_SNOC_CFG 119
+#define SM8150_SLAVE_SNOC_GEM_NOC_GC 120
+#define SM8150_SLAVE_SNOC_GEM_NOC_SF 121
+#define SM8150_SLAVE_SPDM_WRAPPER 122
+#define SM8150_SLAVE_SPSS_CFG 123
+#define SM8150_SLAVE_SSC_CFG 124
+#define SM8150_SLAVE_TCSR 125
+#define SM8150_SLAVE_TCU 126
+#define SM8150_SLAVE_TLMM_EAST 127
+#define SM8150_SLAVE_TLMM_NORTH 128
+#define SM8150_SLAVE_TLMM_SOUTH 129
+#define SM8150_SLAVE_TLMM_WEST 130
+#define SM8150_SLAVE_TSIF 131
+#define SM8150_SLAVE_UFS_CARD_CFG 132
+#define SM8150_SLAVE_UFS_MEM_CFG 133
+#define SM8150_SLAVE_USB3 134
+#define SM8150_SLAVE_USB3_1 135
+#define SM8150_SLAVE_VENUS_CFG 136
+#define SM8150_SLAVE_VSENSE_CTRL_CFG 137
+#define SM8150_SNOC_CNOC_MAS 138
+#define SM8150_SNOC_CNOC_SLV 139
+#define SM8150_MASTER_OSM_L3_APPS 140
+#define SM8150_SLAVE_OSM_L3 141
+
+#endif
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
new file mode 100644
index 000000000000..cc558fec74e3
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -0,0 +1,652 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sm8250.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sm8250.h"
+
+DEFINE_QNODE(qhm_a1noc_cfg, SM8250_MASTER_A1NOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qspi, SM8250_MASTER_QSPI_0, 1, 4, SM8250_A1NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_qup1, SM8250_MASTER_QUP_1, 1, 4, SM8250_A1NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_qup2, SM8250_MASTER_QUP_2, 1, 4, SM8250_A1NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_tsif, SM8250_MASTER_TSIF, 1, 4, SM8250_A1NOC_SNOC_SLV);
+DEFINE_QNODE(xm_pcie3_modem, SM8250_MASTER_PCIE_2, 1, 8, SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1);
+DEFINE_QNODE(xm_sdc4, SM8250_MASTER_SDCC_4, 1, 8, SM8250_A1NOC_SNOC_SLV);
+DEFINE_QNODE(xm_ufs_mem, SM8250_MASTER_UFS_MEM, 1, 8, SM8250_A1NOC_SNOC_SLV);
+DEFINE_QNODE(xm_usb3_0, SM8250_MASTER_USB3, 1, 8, SM8250_A1NOC_SNOC_SLV);
+DEFINE_QNODE(xm_usb3_1, SM8250_MASTER_USB3_1, 1, 8, SM8250_A1NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_a2noc_cfg, SM8250_MASTER_A2NOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, SM8250_MASTER_QDSS_BAM, 1, 4, SM8250_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qhm_qup0, SM8250_MASTER_QUP_0, 1, 4, SM8250_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qnm_cnoc, SM8250_MASTER_CNOC_A2NOC, 1, 8, SM8250_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qxm_crypto, SM8250_MASTER_CRYPTO_CORE_0, 1, 8, SM8250_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qxm_ipa, SM8250_MASTER_IPA, 1, 8, SM8250_A2NOC_SNOC_SLV);
+DEFINE_QNODE(xm_pcie3_0, SM8250_MASTER_PCIE, 1, 8, SM8250_SLAVE_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(xm_pcie3_1, SM8250_MASTER_PCIE_1, 1, 8, SM8250_SLAVE_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(xm_qdss_etr, SM8250_MASTER_QDSS_ETR, 1, 8, SM8250_A2NOC_SNOC_SLV);
+DEFINE_QNODE(xm_sdc2, SM8250_MASTER_SDCC_2, 1, 8, SM8250_A2NOC_SNOC_SLV);
+DEFINE_QNODE(xm_ufs_card, SM8250_MASTER_UFS_CARD, 1, 8, SM8250_A2NOC_SNOC_SLV);
+DEFINE_QNODE(qnm_npu, SM8250_MASTER_NPU, 2, 32, SM8250_SLAVE_CDSP_MEM_NOC);
+DEFINE_QNODE(qnm_snoc, SM8250_SNOC_CNOC_MAS, 1, 8, SM8250_SLAVE_CDSP_CFG, SM8250_SLAVE_CAMERA_CFG, SM8250_SLAVE_TLMM_SOUTH, SM8250_SLAVE_TLMM_NORTH, SM8250_SLAVE_SDCC_4, SM8250_SLAVE_TLMM_WEST, SM8250_SLAVE_SDCC_2, SM8250_SLAVE_CNOC_MNOC_CFG, SM8250_SLAVE_UFS_MEM_CFG, SM8250_SLAVE_SNOC_CFG, SM8250_SLAVE_PDM, SM8250_SLAVE_CX_RDPM, SM8250_SLAVE_PCIE_1_CFG, SM8250_SLAVE_A2NOC_CFG, SM8250_SLAVE_QDSS_CFG, SM8250_SLAVE_DISPLAY_CFG, SM8250_SLAVE_PCIE_2_CFG, SM8250_SLAVE_TCSR, SM8250_SLAVE_DCC_CFG, SM8250_SLAVE_CNOC_DDRSS, SM8250_SLAVE_IPC_ROUTER_CFG, SM8250_SLAVE_PCIE_0_CFG, SM8250_SLAVE_RBCPR_MMCX_CFG, SM8250_SLAVE_NPU_CFG, SM8250_SLAVE_AHB2PHY_SOUTH, SM8250_SLAVE_AHB2PHY_NORTH, SM8250_SLAVE_GRAPHICS_3D_CFG, SM8250_SLAVE_VENUS_CFG, SM8250_SLAVE_TSIF, SM8250_SLAVE_IPA_CFG, SM8250_SLAVE_IMEM_CFG, SM8250_SLAVE_USB3, SM8250_SLAVE_SERVICE_CNOC, SM8250_SLAVE_UFS_CARD_CFG, SM8250_SLAVE_USB3_1, SM8250_SLAVE_LPASS, SM8250_SLAVE_RBCPR_CX_CFG, SM8250_SLAVE_A1NOC_CFG, SM8250_SLAVE_AOSS, SM8250_SLAVE_PRNG, SM8250_SLAVE_VSENSE_CTRL_CFG, SM8250_SLAVE_QSPI_0, SM8250_SLAVE_CRYPTO_0_CFG, SM8250_SLAVE_PIMEM_CFG, SM8250_SLAVE_RBCPR_MX_CFG, SM8250_SLAVE_QUP_0, SM8250_SLAVE_QUP_1, SM8250_SLAVE_QUP_2, SM8250_SLAVE_CLK_CTL);
+DEFINE_QNODE(xm_qdss_dap, SM8250_MASTER_QDSS_DAP, 1, 8, SM8250_SLAVE_CDSP_CFG, SM8250_SLAVE_CAMERA_CFG, SM8250_SLAVE_TLMM_SOUTH, SM8250_SLAVE_TLMM_NORTH, SM8250_SLAVE_SDCC_4, SM8250_SLAVE_TLMM_WEST, SM8250_SLAVE_SDCC_2, SM8250_SLAVE_CNOC_MNOC_CFG, SM8250_SLAVE_UFS_MEM_CFG, SM8250_SLAVE_SNOC_CFG, SM8250_SLAVE_PDM, SM8250_SLAVE_CX_RDPM, SM8250_SLAVE_PCIE_1_CFG, SM8250_SLAVE_A2NOC_CFG, SM8250_SLAVE_QDSS_CFG, SM8250_SLAVE_DISPLAY_CFG, SM8250_SLAVE_PCIE_2_CFG, SM8250_SLAVE_TCSR, SM8250_SLAVE_DCC_CFG, SM8250_SLAVE_CNOC_DDRSS, SM8250_SLAVE_IPC_ROUTER_CFG, SM8250_SLAVE_CNOC_A2NOC, SM8250_SLAVE_PCIE_0_CFG, SM8250_SLAVE_RBCPR_MMCX_CFG, SM8250_SLAVE_NPU_CFG, SM8250_SLAVE_AHB2PHY_SOUTH, SM8250_SLAVE_AHB2PHY_NORTH, SM8250_SLAVE_GRAPHICS_3D_CFG, SM8250_SLAVE_VENUS_CFG, SM8250_SLAVE_TSIF, SM8250_SLAVE_IPA_CFG, SM8250_SLAVE_IMEM_CFG, SM8250_SLAVE_USB3, SM8250_SLAVE_SERVICE_CNOC, SM8250_SLAVE_UFS_CARD_CFG, SM8250_SLAVE_USB3_1, SM8250_SLAVE_LPASS, SM8250_SLAVE_RBCPR_CX_CFG, SM8250_SLAVE_A1NOC_CFG, SM8250_SLAVE_AOSS, SM8250_SLAVE_PRNG, SM8250_SLAVE_VSENSE_CTRL_CFG, SM8250_SLAVE_QSPI_0, SM8250_SLAVE_CRYPTO_0_CFG, SM8250_SLAVE_PIMEM_CFG, SM8250_SLAVE_RBCPR_MX_CFG, SM8250_SLAVE_QUP_0, SM8250_SLAVE_QUP_1, SM8250_SLAVE_QUP_2, SM8250_SLAVE_CLK_CTL);
+DEFINE_QNODE(qhm_cnoc_dc_noc, SM8250_MASTER_CNOC_DC_NOC, 1, 4, SM8250_SLAVE_GEM_NOC_CFG, SM8250_SLAVE_LLCC_CFG);
+DEFINE_QNODE(alm_gpu_tcu, SM8250_MASTER_GPU_TCU, 1, 8, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(alm_sys_tcu, SM8250_MASTER_SYS_TCU, 1, 8, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(chm_apps, SM8250_MASTER_AMPSS_M0, 2, 32, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC, SM8250_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(qhm_gemnoc_cfg, SM8250_MASTER_GEM_NOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_GEM_NOC_2, SM8250_SLAVE_SERVICE_GEM_NOC_1, SM8250_SLAVE_SERVICE_GEM_NOC);
+DEFINE_QNODE(qnm_cmpnoc, SM8250_MASTER_COMPUTE_NOC, 2, 32, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qnm_gpu, SM8250_MASTER_GRAPHICS_3D, 2, 32, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qnm_mnoc_hf, SM8250_MASTER_MNOC_HF_MEM_NOC, 2, 32, SM8250_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, SM8250_MASTER_MNOC_SF_MEM_NOC, 2, 32, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qnm_pcie, SM8250_MASTER_ANOC_PCIE_GEM_NOC, 1, 16, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, SM8250_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM8250_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, SM8250_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC, SM8250_SLAVE_MEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(ipa_core_master, SM8250_MASTER_IPA_CORE, 1, 8, SM8250_SLAVE_IPA_CORE);
+DEFINE_QNODE(llcc_mc, SM8250_MASTER_LLCC, 4, 4, SM8250_SLAVE_EBI_CH0);
+DEFINE_QNODE(qhm_mnoc_cfg, SM8250_MASTER_CNOC_MNOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qnm_camnoc_hf, SM8250_MASTER_CAMNOC_HF, 2, 32, SM8250_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qnm_camnoc_icp, SM8250_MASTER_CAMNOC_ICP, 1, 8, SM8250_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_camnoc_sf, SM8250_MASTER_CAMNOC_SF, 2, 32, SM8250_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_video0, SM8250_MASTER_VIDEO_P0, 1, 32, SM8250_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_video1, SM8250_MASTER_VIDEO_P1, 1, 32, SM8250_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qnm_video_cvp, SM8250_MASTER_VIDEO_PROC, 1, 32, SM8250_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, SM8250_MASTER_MDP_PORT0, 1, 32, SM8250_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp1, SM8250_MASTER_MDP_PORT1, 1, 32, SM8250_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, SM8250_MASTER_ROTATOR, 1, 32, SM8250_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(amm_npu_sys, SM8250_MASTER_NPU_SYS, 4, 32, SM8250_SLAVE_NPU_COMPUTE_NOC);
+DEFINE_QNODE(amm_npu_sys_cdp_w, SM8250_MASTER_NPU_CDP, 2, 16, SM8250_SLAVE_NPU_COMPUTE_NOC);
+DEFINE_QNODE(qhm_cfg, SM8250_MASTER_NPU_NOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_NPU_NOC, SM8250_SLAVE_ISENSE_CFG, SM8250_SLAVE_NPU_LLM_CFG, SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG, SM8250_SLAVE_NPU_CP, SM8250_SLAVE_NPU_TCM, SM8250_SLAVE_NPU_CAL_DP0, SM8250_SLAVE_NPU_CAL_DP1, SM8250_SLAVE_NPU_DPM);
+DEFINE_QNODE(qhm_snoc_cfg, SM8250_MASTER_SNOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, SM8250_A1NOC_SNOC_MAS, 1, 16, SM8250_SLAVE_SNOC_GEM_NOC_SF);
+DEFINE_QNODE(qnm_aggre2_noc, SM8250_A2NOC_SNOC_MAS, 1, 16, SM8250_SLAVE_SNOC_GEM_NOC_SF);
+DEFINE_QNODE(qnm_gemnoc, SM8250_MASTER_GEM_NOC_SNOC, 1, 16, SM8250_SLAVE_PIMEM, SM8250_SLAVE_OCIMEM, SM8250_SLAVE_APPSS, SM8250_SNOC_CNOC_SLV, SM8250_SLAVE_TCU, SM8250_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_gemnoc_pcie, SM8250_MASTER_GEM_NOC_PCIE_SNOC, 1, 8, SM8250_SLAVE_PCIE_2, SM8250_SLAVE_PCIE_0, SM8250_SLAVE_PCIE_1);
+DEFINE_QNODE(qxm_pimem, SM8250_MASTER_PIMEM, 1, 8, SM8250_SLAVE_SNOC_GEM_NOC_GC);
+DEFINE_QNODE(xm_gic, SM8250_MASTER_GIC, 1, 8, SM8250_SLAVE_SNOC_GEM_NOC_GC);
+DEFINE_QNODE(qns_a1noc_snoc, SM8250_A1NOC_SNOC_SLV, 1, 16, SM8250_A1NOC_SNOC_MAS);
+DEFINE_QNODE(qns_pcie_modem_mem_noc, SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1, 1, 16, SM8250_MASTER_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(srvc_aggre1_noc, SM8250_SLAVE_SERVICE_A1NOC, 1, 4);
+DEFINE_QNODE(qns_a2noc_snoc, SM8250_A2NOC_SNOC_SLV, 1, 16, SM8250_A2NOC_SNOC_MAS);
+DEFINE_QNODE(qns_pcie_mem_noc, SM8250_SLAVE_ANOC_PCIE_GEM_NOC, 1, 16, SM8250_MASTER_ANOC_PCIE_GEM_NOC);
+DEFINE_QNODE(srvc_aggre2_noc, SM8250_SLAVE_SERVICE_A2NOC, 1, 4);
+DEFINE_QNODE(qns_cdsp_mem_noc, SM8250_SLAVE_CDSP_MEM_NOC, 2, 32, SM8250_MASTER_COMPUTE_NOC);
+DEFINE_QNODE(qhs_a1_noc_cfg, SM8250_SLAVE_A1NOC_CFG, 1, 4, SM8250_MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SM8250_SLAVE_A2NOC_CFG, 1, 4, SM8250_MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_ahb2phy0, SM8250_SLAVE_AHB2PHY_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_ahb2phy1, SM8250_SLAVE_AHB2PHY_NORTH, 1, 4);
+DEFINE_QNODE(qhs_aoss, SM8250_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_camera_cfg, SM8250_SLAVE_CAMERA_CFG, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SM8250_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_compute_dsp, SM8250_SLAVE_CDSP_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_cx, SM8250_SLAVE_RBCPR_CX_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_mmcx, SM8250_SLAVE_RBCPR_MMCX_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_mx, SM8250_SLAVE_RBCPR_MX_CFG, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SM8250_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_cx_rdpm, SM8250_SLAVE_CX_RDPM, 1, 4);
+DEFINE_QNODE(qhs_dcc_cfg, SM8250_SLAVE_DCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_ddrss_cfg, SM8250_SLAVE_CNOC_DDRSS, 1, 4, SM8250_MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_display_cfg, SM8250_SLAVE_DISPLAY_CFG, 1, 4);
+DEFINE_QNODE(qhs_gpuss_cfg, SM8250_SLAVE_GRAPHICS_3D_CFG, 1, 8);
+DEFINE_QNODE(qhs_imem_cfg, SM8250_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SM8250_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipc_router, SM8250_SLAVE_IPC_ROUTER_CFG, 1, 4);
+DEFINE_QNODE(qhs_lpass_cfg, SM8250_SLAVE_LPASS, 1, 4);
+DEFINE_QNODE(qhs_mnoc_cfg, SM8250_SLAVE_CNOC_MNOC_CFG, 1, 4, SM8250_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_npu_cfg, SM8250_SLAVE_NPU_CFG, 1, 4, SM8250_MASTER_NPU_NOC_CFG);
+DEFINE_QNODE(qhs_pcie0_cfg, SM8250_SLAVE_PCIE_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_pcie1_cfg, SM8250_SLAVE_PCIE_1_CFG, 1, 4);
+DEFINE_QNODE(qhs_pcie_modem_cfg, SM8250_SLAVE_PCIE_2_CFG, 1, 4);
+DEFINE_QNODE(qhs_pdm, SM8250_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_pimem_cfg, SM8250_SLAVE_PIMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_prng, SM8250_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SM8250_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qspi, SM8250_SLAVE_QSPI_0, 1, 4);
+DEFINE_QNODE(qhs_qup0, SM8250_SLAVE_QUP_0, 1, 4);
+DEFINE_QNODE(qhs_qup1, SM8250_SLAVE_QUP_1, 1, 4);
+DEFINE_QNODE(qhs_qup2, SM8250_SLAVE_QUP_2, 1, 4);
+DEFINE_QNODE(qhs_sdc2, SM8250_SLAVE_SDCC_2, 1, 4);
+DEFINE_QNODE(qhs_sdc4, SM8250_SLAVE_SDCC_4, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SM8250_SLAVE_SNOC_CFG, 1, 4, SM8250_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_tcsr, SM8250_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm0, SM8250_SLAVE_TLMM_NORTH, 1, 4);
+DEFINE_QNODE(qhs_tlmm1, SM8250_SLAVE_TLMM_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_tlmm2, SM8250_SLAVE_TLMM_WEST, 1, 4);
+DEFINE_QNODE(qhs_tsif, SM8250_SLAVE_TSIF, 1, 4);
+DEFINE_QNODE(qhs_ufs_card_cfg, SM8250_SLAVE_UFS_CARD_CFG, 1, 4);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SM8250_SLAVE_UFS_MEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_usb3_0, SM8250_SLAVE_USB3, 1, 4);
+DEFINE_QNODE(qhs_usb3_1, SM8250_SLAVE_USB3_1, 1, 4);
+DEFINE_QNODE(qhs_venus_cfg, SM8250_SLAVE_VENUS_CFG, 1, 4);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SM8250_SLAVE_VSENSE_CTRL_CFG, 1, 4);
+DEFINE_QNODE(qns_cnoc_a2noc, SM8250_SLAVE_CNOC_A2NOC, 1, 8, SM8250_MASTER_CNOC_A2NOC);
+DEFINE_QNODE(srvc_cnoc, SM8250_SLAVE_SERVICE_CNOC, 1, 4);
+DEFINE_QNODE(qhs_llcc, SM8250_SLAVE_LLCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_memnoc, SM8250_SLAVE_GEM_NOC_CFG, 1, 4, SM8250_MASTER_GEM_NOC_CFG);
+DEFINE_QNODE(qns_gem_noc_snoc, SM8250_SLAVE_GEM_NOC_SNOC, 1, 16, SM8250_MASTER_GEM_NOC_SNOC);
+DEFINE_QNODE(qns_llcc, SM8250_SLAVE_LLCC, 4, 16, SM8250_MASTER_LLCC);
+DEFINE_QNODE(qns_sys_pcie, SM8250_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SM8250_MASTER_GEM_NOC_PCIE_SNOC);
+DEFINE_QNODE(srvc_even_gemnoc, SM8250_SLAVE_SERVICE_GEM_NOC_1, 1, 4);
+DEFINE_QNODE(srvc_odd_gemnoc, SM8250_SLAVE_SERVICE_GEM_NOC_2, 1, 4);
+DEFINE_QNODE(srvc_sys_gemnoc, SM8250_SLAVE_SERVICE_GEM_NOC, 1, 4);
+DEFINE_QNODE(ipa_core_slave, SM8250_SLAVE_IPA_CORE, 1, 8);
+DEFINE_QNODE(ebi, SM8250_SLAVE_EBI_CH0, 4, 4);
+DEFINE_QNODE(qns_mem_noc_hf, SM8250_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SM8250_MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_sf, SM8250_SLAVE_MNOC_SF_MEM_NOC, 2, 32, SM8250_MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SM8250_SLAVE_SERVICE_MNOC, 1, 4);
+DEFINE_QNODE(qhs_cal_dp0, SM8250_SLAVE_NPU_CAL_DP0, 1, 4);
+DEFINE_QNODE(qhs_cal_dp1, SM8250_SLAVE_NPU_CAL_DP1, 1, 4);
+DEFINE_QNODE(qhs_cp, SM8250_SLAVE_NPU_CP, 1, 4);
+DEFINE_QNODE(qhs_dma_bwmon, SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG, 1, 4);
+DEFINE_QNODE(qhs_dpm, SM8250_SLAVE_NPU_DPM, 1, 4);
+DEFINE_QNODE(qhs_isense, SM8250_SLAVE_ISENSE_CFG, 1, 4);
+DEFINE_QNODE(qhs_llm, SM8250_SLAVE_NPU_LLM_CFG, 1, 4);
+DEFINE_QNODE(qhs_tcm, SM8250_SLAVE_NPU_TCM, 1, 4);
+DEFINE_QNODE(qns_npu_sys, SM8250_SLAVE_NPU_COMPUTE_NOC, 2, 32);
+DEFINE_QNODE(srvc_noc, SM8250_SLAVE_SERVICE_NPU_NOC, 1, 4);
+DEFINE_QNODE(qhs_apss, SM8250_SLAVE_APPSS, 1, 8);
+DEFINE_QNODE(qns_cnoc, SM8250_SNOC_CNOC_SLV, 1, 8, SM8250_SNOC_CNOC_MAS);
+DEFINE_QNODE(qns_gemnoc_gc, SM8250_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SM8250_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_gemnoc_sf, SM8250_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SM8250_MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SM8250_SLAVE_OCIMEM, 1, 8);
+DEFINE_QNODE(qxs_pimem, SM8250_SLAVE_PIMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SM8250_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_pcie_0, SM8250_SLAVE_PCIE_0, 1, 8);
+DEFINE_QNODE(xs_pcie_1, SM8250_SLAVE_PCIE_1, 1, 8);
+DEFINE_QNODE(xs_pcie_modem, SM8250_SLAVE_PCIE_2, 1, 8);
+DEFINE_QNODE(xs_qdss_stm, SM8250_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SM8250_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
+DEFINE_QBCM(bcm_mm1, "MM1", false, &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_sh2, "SH2", false, &alm_gpu_tcu, &alm_sys_tcu);
+DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup1, &qhm_qup2, &qhm_qup0);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
+DEFINE_QBCM(bcm_mm3, "MM3", false, &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_video0, &qnm_video1, &qnm_video_cvp);
+DEFINE_QBCM(bcm_sh4, "SH4", false, &chm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
+DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_mem_noc);
+DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_ahb2phy1, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_cx_rdpm, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_ipc_router, &qhs_lpass_cfg, &qhs_mnoc_cfg, &qhs_npu_cfg, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_pcie_modem_cfg, &qhs_pdm, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qspi, &qhs_qup0, &qhs_qup1, &qhs_qup2, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm0, &qhs_tlmm1, &qhs_tlmm2, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
+DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_pcie_modem);
+DEFINE_QBCM(bcm_sn6, "SN6", false, &xs_pcie_0, &xs_pcie_1);
+DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_aggre2_noc);
+DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_gemnoc_pcie);
+DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_gemnoc);
+DEFINE_QBCM(bcm_sn12, "SN12", false, &qns_pcie_modem_mem_noc, &qns_pcie_mem_noc);
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+ &bcm_qup0,
+ &bcm_sn12,
+};
+
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_TSIF] = &qhm_tsif,
+ [MASTER_PCIE_2] = &xm_pcie3_modem,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [A1NOC_SNOC_SLV] = &qns_a1noc_snoc,
+ [SLAVE_ANOC_PCIE_GEM_NOC_1] = &qns_pcie_modem_mem_noc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static struct qcom_icc_desc sm8250_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_qup0,
+ &bcm_sn12,
+};
+
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_CNOC_A2NOC] = &qnm_cnoc,
+ [MASTER_CRYPTO_CORE_0] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_PCIE] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_UFS_CARD] = &xm_ufs_card,
+ [A2NOC_SNOC_SLV] = &qns_a2noc_snoc,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static struct qcom_icc_desc sm8250_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *compute_noc_bcms[] = {
+ &bcm_co0,
+ &bcm_co2,
+};
+
+static struct qcom_icc_node *compute_noc_nodes[] = {
+ [MASTER_NPU] = &qnm_npu,
+ [SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc,
+};
+
+static struct qcom_icc_desc sm8250_compute_noc = {
+ .nodes = compute_noc_nodes,
+ .num_nodes = ARRAY_SIZE(compute_noc_nodes),
+ .bcms = compute_noc_bcms,
+ .num_bcms = ARRAY_SIZE(compute_noc_bcms),
+};
+
+static struct qcom_icc_bcm *config_noc_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+ [SNOC_CNOC_MAS] = &qnm_snoc,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_dsp,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GRAPHICS_3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_LPASS] = &qhs_lpass_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_NPU_CFG] = &qhs_npu_cfg,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PCIE_2_CFG] = &qhs_pcie_modem_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM_NORTH] = &qhs_tlmm0,
+ [SLAVE_TLMM_SOUTH] = &qhs_tlmm1,
+ [SLAVE_TLMM_WEST] = &qhs_tlmm2,
+ [SLAVE_TSIF] = &qhs_tsif,
+ [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3] = &qhs_usb3_0,
+ [SLAVE_USB3_1] = &qhs_usb3_1,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+static struct qcom_icc_desc sm8250_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm *dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_GEM_NOC_CFG] = &qhs_memnoc,
+};
+
+static struct qcom_icc_desc sm8250_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm *gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+ &bcm_sh3,
+ &bcm_sh4,
+};
+
+static struct qcom_icc_node *gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_AMPSS_M0] = &chm_apps,
+ [MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
+ [MASTER_GRAPHICS_3D] = &qnm_gpu,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_sys_pcie,
+ [SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
+};
+
+static struct qcom_icc_desc sm8250_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *ipa_virt_bcms[] = {
+ &bcm_ip0,
+};
+
+static struct qcom_icc_node *ipa_virt_nodes[] = {
+ [MASTER_IPA_CORE] = &ipa_core_master,
+ [SLAVE_IPA_CORE] = &ipa_core_slave,
+};
+
+static struct qcom_icc_desc sm8250_ipa_virt = {
+ .nodes = ipa_virt_nodes,
+ .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
+ .bcms = ipa_virt_bcms,
+ .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI_CH0] = &ebi,
+};
+
+static struct qcom_icc_desc sm8250_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm2,
+ &bcm_mm3,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_VIDEO_P0] = &qnm_video0,
+ [MASTER_VIDEO_P1] = &qnm_video1,
+ [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+ [MASTER_MDP_PORT0] = &qxm_mdp0,
+ [MASTER_MDP_PORT1] = &qxm_mdp1,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static struct qcom_icc_desc sm8250_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm *npu_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *npu_noc_nodes[] = {
+ [MASTER_NPU_SYS] = &amm_npu_sys,
+ [MASTER_NPU_CDP] = &amm_npu_sys_cdp_w,
+ [MASTER_NPU_NOC_CFG] = &qhm_cfg,
+ [SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0,
+ [SLAVE_NPU_CAL_DP1] = &qhs_cal_dp1,
+ [SLAVE_NPU_CP] = &qhs_cp,
+ [SLAVE_NPU_INT_DMA_BWMON_CFG] = &qhs_dma_bwmon,
+ [SLAVE_NPU_DPM] = &qhs_dpm,
+ [SLAVE_ISENSE_CFG] = &qhs_isense,
+ [SLAVE_NPU_LLM_CFG] = &qhs_llm,
+ [SLAVE_NPU_TCM] = &qhs_tcm,
+ [SLAVE_NPU_COMPUTE_NOC] = &qns_npu_sys,
+ [SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
+};
+
+static struct qcom_icc_desc sm8250_npu_noc = {
+ .nodes = npu_noc_nodes,
+ .num_nodes = ARRAY_SIZE(npu_noc_nodes),
+ .bcms = npu_noc_bcms,
+ .num_bcms = ARRAY_SIZE(npu_noc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn11,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn5,
+ &bcm_sn6,
+ &bcm_sn7,
+ &bcm_sn8,
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
+ [A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
+ [MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SNOC_CNOC_SLV] = &qns_cnoc,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_OCIMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_PCIE_2] = &xs_pcie_modem,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_desc sm8250_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter))
+ return PTR_ERR(qp->voter);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ if (!qnodes[i])
+ continue;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm8250-aggre1-noc",
+ .data = &sm8250_aggre1_noc},
+ { .compatible = "qcom,sm8250-aggre2-noc",
+ .data = &sm8250_aggre2_noc},
+ { .compatible = "qcom,sm8250-compute-noc",
+ .data = &sm8250_compute_noc},
+ { .compatible = "qcom,sm8250-config-noc",
+ .data = &sm8250_config_noc},
+ { .compatible = "qcom,sm8250-dc-noc",
+ .data = &sm8250_dc_noc},
+ { .compatible = "qcom,sm8250-gem-noc",
+ .data = &sm8250_gem_noc},
+ { .compatible = "qcom,sm8250-ipa-virt",
+ .data = &sm8250_ipa_virt},
+ { .compatible = "qcom,sm8250-mc-virt",
+ .data = &sm8250_mc_virt},
+ { .compatible = "qcom,sm8250-mmss-noc",
+ .data = &sm8250_mmss_noc},
+ { .compatible = "qcom,sm8250-npu-noc",
+ .data = &sm8250_npu_noc},
+ { .compatible = "qcom,sm8250-system-noc",
+ .data = &sm8250_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sm8250",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("Qualcomm SM8250 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sm8250.h b/drivers/interconnect/qcom/sm8250.h
new file mode 100644
index 000000000000..b31fb431a20f
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8250.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm #define SM8250 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8250_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM8250_H
+
+#define SM8250_A1NOC_SNOC_MAS 0
+#define SM8250_A1NOC_SNOC_SLV 1
+#define SM8250_A2NOC_SNOC_MAS 2
+#define SM8250_A2NOC_SNOC_SLV 3
+#define SM8250_MASTER_A1NOC_CFG 4
+#define SM8250_MASTER_A2NOC_CFG 5
+#define SM8250_MASTER_AMPSS_M0 6
+#define SM8250_MASTER_ANOC_PCIE_GEM_NOC 7
+#define SM8250_MASTER_CAMNOC_HF 8
+#define SM8250_MASTER_CAMNOC_ICP 9
+#define SM8250_MASTER_CAMNOC_SF 10
+#define SM8250_MASTER_CNOC_A2NOC 11
+#define SM8250_MASTER_CNOC_DC_NOC 12
+#define SM8250_MASTER_CNOC_MNOC_CFG 13
+#define SM8250_MASTER_COMPUTE_NOC 14
+#define SM8250_MASTER_CRYPTO_CORE_0 15
+#define SM8250_MASTER_GEM_NOC_CFG 16
+#define SM8250_MASTER_GEM_NOC_PCIE_SNOC 17
+#define SM8250_MASTER_GEM_NOC_SNOC 18
+#define SM8250_MASTER_GIC 19
+#define SM8250_MASTER_GPU_TCU 20
+#define SM8250_MASTER_GRAPHICS_3D 21
+#define SM8250_MASTER_IPA 22
+#define SM8250_MASTER_IPA_CORE 23
+#define SM8250_MASTER_LLCC 24
+#define SM8250_MASTER_MDP_PORT0 25
+#define SM8250_MASTER_MDP_PORT1 26
+#define SM8250_MASTER_MNOC_HF_MEM_NOC 27
+#define SM8250_MASTER_MNOC_SF_MEM_NOC 28
+#define SM8250_MASTER_NPU 29
+#define SM8250_MASTER_NPU_CDP 30
+#define SM8250_MASTER_NPU_NOC_CFG 31
+#define SM8250_MASTER_NPU_SYS 32
+#define SM8250_MASTER_PCIE 33
+#define SM8250_MASTER_PCIE_1 34
+#define SM8250_MASTER_PCIE_2 35
+#define SM8250_MASTER_PIMEM 36
+#define SM8250_MASTER_QDSS_BAM 37
+#define SM8250_MASTER_QDSS_DAP 38
+#define SM8250_MASTER_QDSS_ETR 39
+#define SM8250_MASTER_QSPI_0 40
+#define SM8250_MASTER_QUP_0 41
+#define SM8250_MASTER_QUP_1 42
+#define SM8250_MASTER_QUP_2 43
+#define SM8250_MASTER_ROTATOR 44
+#define SM8250_MASTER_SDCC_2 45
+#define SM8250_MASTER_SDCC_4 46
+#define SM8250_MASTER_SNOC_CFG 47
+#define SM8250_MASTER_SNOC_GC_MEM_NOC 48
+#define SM8250_MASTER_SNOC_SF_MEM_NOC 49
+#define SM8250_MASTER_SYS_TCU 50
+#define SM8250_MASTER_TSIF 51
+#define SM8250_MASTER_UFS_CARD 52
+#define SM8250_MASTER_UFS_MEM 53
+#define SM8250_MASTER_USB3 54
+#define SM8250_MASTER_USB3_1 55
+#define SM8250_MASTER_VIDEO_P0 56
+#define SM8250_MASTER_VIDEO_P1 57
+#define SM8250_MASTER_VIDEO_PROC 58
+#define SM8250_SLAVE_A1NOC_CFG 59
+#define SM8250_SLAVE_A2NOC_CFG 60
+#define SM8250_SLAVE_AHB2PHY_NORTH 61
+#define SM8250_SLAVE_AHB2PHY_SOUTH 62
+#define SM8250_SLAVE_ANOC_PCIE_GEM_NOC 63
+#define SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1 64
+#define SM8250_SLAVE_AOSS 65
+#define SM8250_SLAVE_APPSS 66
+#define SM8250_SLAVE_CAMERA_CFG 67
+#define SM8250_SLAVE_CDSP_CFG 68
+#define SM8250_SLAVE_CDSP_MEM_NOC 69
+#define SM8250_SLAVE_CLK_CTL 70
+#define SM8250_SLAVE_CNOC_A2NOC 71
+#define SM8250_SLAVE_CNOC_DDRSS 72
+#define SM8250_SLAVE_CNOC_MNOC_CFG 73
+#define SM8250_SLAVE_CRYPTO_0_CFG 74
+#define SM8250_SLAVE_CX_RDPM 75
+#define SM8250_SLAVE_DCC_CFG 76
+#define SM8250_SLAVE_DISPLAY_CFG 77
+#define SM8250_SLAVE_EBI_CH0 78
+#define SM8250_SLAVE_GEM_NOC_CFG 79
+#define SM8250_SLAVE_GEM_NOC_SNOC 80
+#define SM8250_SLAVE_GRAPHICS_3D_CFG 81
+#define SM8250_SLAVE_IMEM_CFG 82
+#define SM8250_SLAVE_IPA_CFG 83
+#define SM8250_SLAVE_IPA_CORE 84
+#define SM8250_SLAVE_IPC_ROUTER_CFG 85
+#define SM8250_SLAVE_ISENSE_CFG 86
+#define SM8250_SLAVE_LLCC 87
+#define SM8250_SLAVE_LLCC_CFG 88
+#define SM8250_SLAVE_LPASS 89
+#define SM8250_SLAVE_MEM_NOC_PCIE_SNOC 90
+#define SM8250_SLAVE_MNOC_HF_MEM_NOC 91
+#define SM8250_SLAVE_MNOC_SF_MEM_NOC 92
+#define SM8250_SLAVE_NPU_CAL_DP0 93
+#define SM8250_SLAVE_NPU_CAL_DP1 94
+#define SM8250_SLAVE_NPU_CFG 95
+#define SM8250_SLAVE_NPU_COMPUTE_NOC 96
+#define SM8250_SLAVE_NPU_CP 97
+#define SM8250_SLAVE_NPU_DPM 98
+#define SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG 99
+#define SM8250_SLAVE_NPU_LLM_CFG 100
+#define SM8250_SLAVE_NPU_TCM 101
+#define SM8250_SLAVE_OCIMEM 102
+#define SM8250_SLAVE_PCIE_0 103
+#define SM8250_SLAVE_PCIE_0_CFG 104
+#define SM8250_SLAVE_PCIE_1 105
+#define SM8250_SLAVE_PCIE_1_CFG 106
+#define SM8250_SLAVE_PCIE_2 107
+#define SM8250_SLAVE_PCIE_2_CFG 108
+#define SM8250_SLAVE_PDM 109
+#define SM8250_SLAVE_PIMEM 110
+#define SM8250_SLAVE_PIMEM_CFG 111
+#define SM8250_SLAVE_PRNG 112
+#define SM8250_SLAVE_QDSS_CFG 113
+#define SM8250_SLAVE_QDSS_STM 114
+#define SM8250_SLAVE_QSPI_0 115
+#define SM8250_SLAVE_QUP_0 116
+#define SM8250_SLAVE_QUP_1 117
+#define SM8250_SLAVE_QUP_2 118
+#define SM8250_SLAVE_RBCPR_CX_CFG 119
+#define SM8250_SLAVE_RBCPR_MMCX_CFG 120
+#define SM8250_SLAVE_RBCPR_MX_CFG 121
+#define SM8250_SLAVE_SDCC_2 122
+#define SM8250_SLAVE_SDCC_4 123
+#define SM8250_SLAVE_SERVICE_A1NOC 124
+#define SM8250_SLAVE_SERVICE_A2NOC 125
+#define SM8250_SLAVE_SERVICE_CNOC 126
+#define SM8250_SLAVE_SERVICE_GEM_NOC 127
+#define SM8250_SLAVE_SERVICE_GEM_NOC_1 128
+#define SM8250_SLAVE_SERVICE_GEM_NOC_2 129
+#define SM8250_SLAVE_SERVICE_MNOC 130
+#define SM8250_SLAVE_SERVICE_NPU_NOC 131
+#define SM8250_SLAVE_SERVICE_SNOC 132
+#define SM8250_SLAVE_SNOC_CFG 133
+#define SM8250_SLAVE_SNOC_GEM_NOC_GC 134
+#define SM8250_SLAVE_SNOC_GEM_NOC_SF 135
+#define SM8250_SLAVE_TCSR 136
+#define SM8250_SLAVE_TCU 137
+#define SM8250_SLAVE_TLMM_NORTH 138
+#define SM8250_SLAVE_TLMM_SOUTH 139
+#define SM8250_SLAVE_TLMM_WEST 140
+#define SM8250_SLAVE_TSIF 141
+#define SM8250_SLAVE_UFS_CARD_CFG 142
+#define SM8250_SLAVE_UFS_MEM_CFG 143
+#define SM8250_SLAVE_USB3 144
+#define SM8250_SLAVE_USB3_1 145
+#define SM8250_SLAVE_VENUS_CFG 146
+#define SM8250_SLAVE_VSENSE_CTRL_CFG 147
+#define SM8250_SNOC_CNOC_MAS 148
+#define SM8250_SNOC_CNOC_SLV 149
+#define SM8250_MASTER_EPSS_L3_APPS 150
+#define SM8250_SLAVE_EPSS_L3 151
+
+#endif
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index bef5d75e306b..04878caf6da4 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -232,7 +232,7 @@ config IPMMU_VMSA
select ARM_DMA_USE_IOMMU
help
Support for the Renesas VMSA-compatible IPMMU found in the R-Mobile
- APE6, R-Car Gen2, and R-Car Gen3 SoCs.
+ APE6, R-Car Gen{2,3} and RZ/G{1,2} SoCs.
If unsure, say N.
@@ -308,6 +308,16 @@ config ARM_SMMU_V3
Say Y here if your system includes an IOMMU device implementing
the ARM SMMUv3 architecture.
+config ARM_SMMU_V3_SVA
+ bool "Shared Virtual Addressing support for the ARM SMMUv3"
+ depends on ARM_SMMU_V3
+ help
+ Support for sharing process address spaces with devices using the
+ SMMUv3.
+
+ Say Y here if your system supports SVA extensions such as PCIe PASID
+ and PRI.
+
config S390_IOMMU
def_bool y if S390 && PCI
depends on S390 && PCI
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 57309716fd18..6b8cbdf71714 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -41,16 +41,25 @@ extern int amd_iommu_guest_ir;
struct iommu_domain;
extern bool amd_iommu_v2_supported(void);
+extern struct amd_iommu *get_amd_iommu(unsigned int idx);
+extern u8 amd_iommu_pc_get_max_banks(unsigned int idx);
+extern bool amd_iommu_pc_supported(void);
+extern u8 amd_iommu_pc_get_max_counters(unsigned int idx);
+extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value);
+extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value);
+
extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
-extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+extern int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
u64 address);
-extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
-extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
+extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3);
-extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
+extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
#ifdef CONFIG_IRQ_REMAP
@@ -66,7 +75,7 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
#define PPR_INVALID 0x1
#define PPR_FAILURE 0xf
-extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+extern int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
int status, int tag);
static inline bool is_rd890_iommu(struct pci_dev *pdev)
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 30a5d412255a..89647700bab2 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -93,6 +93,7 @@
#define FEATURE_PC (1ULL<<9)
#define FEATURE_GAM_VAPIC (1ULL<<21)
#define FEATURE_EPHSUP (1ULL<<50)
+#define FEATURE_SNP (1ULL<<63)
#define FEATURE_PASID_SHIFT 32
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
@@ -128,6 +129,8 @@
#define EVENT_TYPE_IOTLB_INV_TO 0x7
#define EVENT_TYPE_INV_DEV_REQ 0x8
#define EVENT_TYPE_INV_PPR_REQ 0x9
+#define EVENT_TYPE_RMP_FAULT 0xd
+#define EVENT_TYPE_RMP_HW_ERR 0xe
#define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0
#define EVENT_DOMID_MASK_LO 0xffff
@@ -406,7 +409,11 @@ extern bool amd_iommu_np_cache;
/* Only true if all IOMMUs support device IOTLBs */
extern bool amd_iommu_iotlb_sup;
-#define MAX_IRQS_PER_TABLE 256
+/*
+ * AMD IOMMU hardware only support 512 IRTEs despite
+ * the architectural limitation of 2048 entries.
+ */
+#define MAX_IRQS_PER_TABLE 512
#define IRQ_TABLE_ALIGNMENT 128
struct irq_remap_table {
@@ -595,7 +602,8 @@ struct amd_iommu {
#endif
u32 flags;
- volatile u64 __aligned(8) cmd_sem;
+ volatile u64 *cmd_sem;
+ u64 cmd_sem_val;
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
/* DebugFS Info */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 1ba6b4cc56e8..82e4af8f09bb 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -359,6 +359,29 @@ static void iommu_set_exclusion_range(struct amd_iommu *iommu)
&entry, sizeof(entry));
}
+static void iommu_set_cwwb_range(struct amd_iommu *iommu)
+{
+ u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
+ u64 entry = start & PM_ADDR_MASK;
+
+ if (!iommu_feature(iommu, FEATURE_SNP))
+ return;
+
+ /* Note:
+ * Re-purpose Exclusion base/limit registers for Completion wait
+ * write-back base/limit.
+ */
+ memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
+ &entry, sizeof(entry));
+
+ /* Note:
+ * Default to 4 Kbytes, which can be specified by setting base
+ * address equal to the limit address.
+ */
+ memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
+ &entry, sizeof(entry));
+}
+
/* Programs the physical address of the device table into the IOMMU hardware */
static void iommu_set_device_table(struct amd_iommu *iommu)
{
@@ -813,6 +836,19 @@ static int iommu_init_ga(struct amd_iommu *iommu)
return ret;
}
+static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
+{
+ iommu->cmd_sem = (void *)get_zeroed_page(GFP_KERNEL);
+
+ return iommu->cmd_sem ? 0 : -ENOMEM;
+}
+
+static void __init free_cwwb_sem(struct amd_iommu *iommu)
+{
+ if (iommu->cmd_sem)
+ free_page((unsigned long)iommu->cmd_sem);
+}
+
static void iommu_enable_xt(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
@@ -1376,6 +1412,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
static void __init free_iommu_one(struct amd_iommu *iommu)
{
+ free_cwwb_sem(iommu);
free_command_buffer(iommu);
free_event_buffer(iommu);
free_ppr_log(iommu);
@@ -1462,6 +1499,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
int ret;
raw_spin_lock_init(&iommu->lock);
+ iommu->cmd_sem_val = 0;
/* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list);
@@ -1539,6 +1577,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->mmio_base)
return -ENOMEM;
+ if (alloc_cwwb_sem(iommu))
+ return -ENOMEM;
+
if (alloc_command_buffer(iommu))
return -ENOMEM;
@@ -1576,7 +1617,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
/**
* get_highest_supported_ivhd_type - Look up the appropriate IVHD type
- * @ivrs Pointer to the IVRS header
+ * @ivrs: Pointer to the IVRS header
*
* This function search through all IVDB of the maximum supported IVHD
*/
@@ -1864,6 +1905,9 @@ static int __init amd_iommu_init_pci(void)
ret = iommu_init_pci(iommu);
if (ret)
break;
+
+ /* Need to setup range after PCI init */
+ iommu_set_cwwb_range(iommu);
}
/*
@@ -1927,7 +1971,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
#define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
#define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
-/**
+/*
* Setup the IntCapXT registers with interrupt routing information
* based on the PCI MSI capability block registers, accessed via
* MMIO MSI address low/hi and MSI data registers.
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 10e4200d3552..b9cf59443843 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -18,7 +18,7 @@
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
#include <linux/iommu-helper.h>
@@ -28,7 +28,6 @@
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/msi.h>
-#include <linux/dma-contiguous.h>
#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/iova.h>
@@ -486,6 +485,67 @@ static void dump_command(unsigned long phys_addr)
pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
}
+static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
+{
+ struct iommu_dev_data *dev_data = NULL;
+ int devid, vmg_tag, flags;
+ struct pci_dev *pdev;
+ u64 spa;
+
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ vmg_tag = (event[1]) & 0xFFFF;
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ spa = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
+
+ pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ devid & 0xff);
+ if (pdev)
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+
+ if (dev_data && __ratelimit(&dev_data->rs)) {
+ pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
+ vmg_tag, spa, flags);
+ } else {
+ pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ vmg_tag, spa, flags);
+ }
+
+ if (pdev)
+ pci_dev_put(pdev);
+}
+
+static void amd_iommu_report_rmp_fault(volatile u32 *event)
+{
+ struct iommu_dev_data *dev_data = NULL;
+ int devid, flags_rmp, vmg_tag, flags;
+ struct pci_dev *pdev;
+ u64 gpa;
+
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
+ vmg_tag = (event[1]) & 0xFFFF;
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ gpa = ((u64)event[3] << 32) | event[2];
+
+ pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
+ devid & 0xff);
+ if (pdev)
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+
+ if (dev_data && __ratelimit(&dev_data->rs)) {
+ pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
+ vmg_tag, gpa, flags_rmp, flags);
+ } else {
+ pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ vmg_tag, gpa, flags_rmp, flags);
+ }
+
+ if (pdev)
+ pci_dev_put(pdev);
+}
+
static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
u64 address, int flags)
{
@@ -513,10 +573,11 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
struct device *dev = iommu->iommu.dev;
- int type, devid, pasid, flags, tag;
+ int type, devid, flags, tag;
volatile u32 *event = __evt;
int count = 0;
u64 address;
+ u32 pasid;
retry:
type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
@@ -577,6 +638,12 @@ retry:
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
break;
+ case EVENT_TYPE_RMP_FAULT:
+ amd_iommu_report_rmp_fault(event);
+ break;
+ case EVENT_TYPE_RMP_HW_ERR:
+ amd_iommu_report_rmp_hw_error(event);
+ break;
case EVENT_TYPE_INV_PPR_REQ:
pasid = PPR_PASID(*((u64 *)__evt));
tag = event[1] & 0x03FF;
@@ -729,7 +796,21 @@ static void iommu_poll_ga_log(struct amd_iommu *iommu)
}
}
}
-#endif /* CONFIG_IRQ_REMAP */
+
+static void
+amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
+{
+ if (!irq_remapping_enabled || !dev_is_pci(dev) ||
+ pci_dev_has_special_msi_domain(to_pci_dev(dev)))
+ return;
+
+ dev_set_msi_domain(dev, iommu->msi_domain);
+}
+
+#else /* CONFIG_IRQ_REMAP */
+static inline void
+amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
+#endif /* !CONFIG_IRQ_REMAP */
#define AMD_IOMMU_INT_MASK \
(MMIO_STATUS_EVT_INT_MASK | \
@@ -792,11 +873,11 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
*
****************************************************************************/
-static int wait_on_sem(volatile u64 *sem)
+static int wait_on_sem(struct amd_iommu *iommu, u64 data)
{
int i = 0;
- while (*sem == 0 && i < LOOP_TIMEOUT) {
+ while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
udelay(1);
i += 1;
}
@@ -827,16 +908,16 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
}
-static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
+static void build_completion_wait(struct iommu_cmd *cmd,
+ struct amd_iommu *iommu,
+ u64 data)
{
- u64 paddr = iommu_virt_to_phys((void *)address);
-
- WARN_ON(address & 0x7ULL);
+ u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(paddr);
- cmd->data[2] = 1;
+ cmd->data[2] = data;
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}
@@ -909,7 +990,7 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
}
-static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
+static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
u64 address, bool size)
{
memset(cmd, 0, sizeof(*cmd));
@@ -927,7 +1008,7 @@ static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
}
-static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
+static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
int qdep, u64 address, bool size)
{
memset(cmd, 0, sizeof(*cmd));
@@ -947,7 +1028,7 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
}
-static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
+static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
int status, int tag, bool gn)
{
memset(cmd, 0, sizeof(*cmd));
@@ -1045,22 +1126,21 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
struct iommu_cmd cmd;
unsigned long flags;
int ret;
+ u64 data;
if (!iommu->need_sync)
return 0;
-
- build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
-
raw_spin_lock_irqsave(&iommu->lock, flags);
- iommu->cmd_sem = 0;
+ data = ++iommu->cmd_sem_val;
+ build_completion_wait(&cmd, iommu, data);
ret = __iommu_queue_command_sync(iommu, &cmd, false);
if (ret)
goto out_unlock;
- ret = wait_on_sem(&iommu->cmd_sem);
+ ret = wait_on_sem(iommu, data);
out_unlock:
raw_spin_unlock_irqrestore(&iommu->lock, flags);
@@ -2157,6 +2237,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
iommu_dev = ERR_PTR(ret);
iommu_ignore_device(dev);
} else {
+ amd_iommu_set_pci_msi_domain(dev, iommu);
iommu_dev = &iommu->iommu;
}
@@ -2786,7 +2867,7 @@ out:
}
EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
-static int __flush_pasid(struct protection_domain *domain, int pasid,
+static int __flush_pasid(struct protection_domain *domain, u32 pasid,
u64 address, bool size)
{
struct iommu_dev_data *dev_data;
@@ -2847,13 +2928,13 @@ out:
return ret;
}
-static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
+static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
u64 address)
{
return __flush_pasid(domain, pasid, address, false);
}
-int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
u64 address)
{
struct protection_domain *domain = to_pdomain(dom);
@@ -2868,13 +2949,13 @@ int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
}
EXPORT_SYMBOL(amd_iommu_flush_page);
-static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
+static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
{
return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
true);
}
-int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
+int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
{
struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
@@ -2888,7 +2969,7 @@ int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
}
EXPORT_SYMBOL(amd_iommu_flush_tlb);
-static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
+static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
{
int index;
u64 *pte;
@@ -2920,7 +3001,7 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
return pte;
}
-static int __set_gcr3(struct protection_domain *domain, int pasid,
+static int __set_gcr3(struct protection_domain *domain, u32 pasid,
unsigned long cr3)
{
struct domain_pgtable pgtable;
@@ -2939,7 +3020,7 @@ static int __set_gcr3(struct protection_domain *domain, int pasid,
return __amd_iommu_flush_tlb(domain, pasid);
}
-static int __clear_gcr3(struct protection_domain *domain, int pasid)
+static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
{
struct domain_pgtable pgtable;
u64 *pte;
@@ -2957,7 +3038,7 @@ static int __clear_gcr3(struct protection_domain *domain, int pasid)
return __amd_iommu_flush_tlb(domain, pasid);
}
-int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3)
{
struct protection_domain *domain = to_pdomain(dom);
@@ -2972,7 +3053,7 @@ int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
}
EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
-int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
+int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
{
struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
@@ -2986,7 +3067,7 @@ int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
}
EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
-int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
int status, int tag)
{
struct iommu_dev_data *dev_data;
@@ -3519,69 +3600,51 @@ static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
static int get_devid(struct irq_alloc_info *info)
{
- int devid = -1;
-
switch (info->type) {
case X86_IRQ_ALLOC_TYPE_IOAPIC:
- devid = get_ioapic_devid(info->ioapic_id);
- break;
+ case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT:
+ return get_ioapic_devid(info->devid);
case X86_IRQ_ALLOC_TYPE_HPET:
- devid = get_hpet_devid(info->hpet_id);
- break;
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
- devid = get_device_id(&info->msi_dev->dev);
- break;
+ case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT:
+ return get_hpet_devid(info->devid);
+ case X86_IRQ_ALLOC_TYPE_PCI_MSI:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
+ return get_device_id(msi_desc_to_dev(info->desc));
default:
- BUG_ON(1);
- break;
+ WARN_ON_ONCE(1);
+ return -1;
}
-
- return devid;
}
-static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
+static struct irq_domain *get_irq_domain_for_devid(struct irq_alloc_info *info,
+ int devid)
{
- struct amd_iommu *iommu;
- int devid;
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
- if (!info)
+ if (!iommu)
return NULL;
- devid = get_devid(info);
- if (devid >= 0) {
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu)
- return iommu->ir_domain;
+ switch (info->type) {
+ case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT:
+ case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT:
+ return iommu->ir_domain;
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
}
-
- return NULL;
}
static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
{
- struct amd_iommu *iommu;
int devid;
if (!info)
return NULL;
- switch (info->type) {
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
- devid = get_device_id(&info->msi_dev->dev);
- if (devid < 0)
- return NULL;
-
- iommu = amd_iommu_rlookup_table[devid];
- if (iommu)
- return iommu->msi_domain;
- break;
- default:
- break;
- }
-
- return NULL;
+ devid = get_devid(info);
+ if (devid < 0)
+ return NULL;
+ return get_irq_domain_for_devid(info, devid);
}
struct irq_remap_ops amd_iommu_irq_ops = {
@@ -3590,7 +3653,6 @@ struct irq_remap_ops amd_iommu_irq_ops = {
.disable = amd_iommu_disable,
.reenable = amd_iommu_reenable,
.enable_faulting = amd_iommu_enable_faulting,
- .get_ir_irq_domain = get_ir_irq_domain,
.get_irq_domain = get_irq_domain,
};
@@ -3616,21 +3678,21 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
switch (info->type) {
case X86_IRQ_ALLOC_TYPE_IOAPIC:
/* Setup IOAPIC entry */
- entry = info->ioapic_entry;
- info->ioapic_entry = NULL;
+ entry = info->ioapic.entry;
+ info->ioapic.entry = NULL;
memset(entry, 0, sizeof(*entry));
entry->vector = index;
entry->mask = 0;
- entry->trigger = info->ioapic_trigger;
- entry->polarity = info->ioapic_polarity;
+ entry->trigger = info->ioapic.trigger;
+ entry->polarity = info->ioapic.polarity;
/* Mask level triggered irqs. */
- if (info->ioapic_trigger)
+ if (info->ioapic.trigger)
entry->mask = 1;
break;
case X86_IRQ_ALLOC_TYPE_HPET:
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSI:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
msg->address_hi = MSI_ADDR_BASE_HI;
msg->address_lo = MSI_ADDR_BASE_LO;
msg->data = irte_info->index;
@@ -3674,15 +3736,15 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (!info)
return -EINVAL;
- if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
- info->type != X86_IRQ_ALLOC_TYPE_MSIX)
+ if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI &&
+ info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX)
return -EINVAL;
/*
* With IRQ remapping enabled, don't need contiguous CPU vectors
* to support multiple MSI interrupts.
*/
- if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
+ if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
devid = get_devid(info);
@@ -3710,15 +3772,16 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
iommu->irte_ops->set_allocated(table, i);
}
WARN_ON(table->min_index != 32);
- index = info->ioapic_pin;
+ index = info->ioapic.pin;
} else {
index = -ENOMEM;
}
- } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
- info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
- bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
+ } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
+ info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
+ bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
- index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
+ index = alloc_irq_index(devid, nr_irqs, align,
+ msi_desc_to_pci_dev(info->desc));
} else {
index = alloc_irq_index(devid, nr_irqs, false, NULL);
}
@@ -3731,8 +3794,8 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_data = irq_domain_get_irq_data(domain, virq + i);
- cfg = irqd_cfg(irq_data);
- if (!irq_data || !cfg) {
+ cfg = irq_data ? irqd_cfg(irq_data) : NULL;
+ if (!cfg) {
ret = -EINVAL;
goto out_free_data;
}
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 0d175aed1d92..5ecc0bc608ec 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -40,7 +40,7 @@ struct pasid_state {
struct mmu_notifier mn; /* mmu_notifier handle */
struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
struct device_state *device_state; /* Link to our device_state */
- int pasid; /* PASID index */
+ u32 pasid; /* PASID index */
bool invalid; /* Used during setup and
teardown of the pasid */
spinlock_t lock; /* Protect pri_queues and
@@ -70,7 +70,7 @@ struct fault {
struct mm_struct *mm;
u64 address;
u16 devid;
- u16 pasid;
+ u32 pasid;
u16 tag;
u16 finish;
u16 flags;
@@ -150,7 +150,7 @@ static void put_device_state(struct device_state *dev_state)
/* Must be called under dev_state->lock */
static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
- int pasid, bool alloc)
+ u32 pasid, bool alloc)
{
struct pasid_state **root, **ptr;
int level, index;
@@ -184,7 +184,7 @@ static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state
static int set_pasid_state(struct device_state *dev_state,
struct pasid_state *pasid_state,
- int pasid)
+ u32 pasid)
{
struct pasid_state **ptr;
unsigned long flags;
@@ -211,7 +211,7 @@ out_unlock:
return ret;
}
-static void clear_pasid_state(struct device_state *dev_state, int pasid)
+static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
{
struct pasid_state **ptr;
unsigned long flags;
@@ -229,7 +229,7 @@ out_unlock:
}
static struct pasid_state *get_pasid_state(struct device_state *dev_state,
- int pasid)
+ u32 pasid)
{
struct pasid_state **ptr, *ret = NULL;
unsigned long flags;
@@ -594,7 +594,7 @@ static struct notifier_block ppr_nb = {
.notifier_call = ppr_notifier,
};
-int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
struct task_struct *task)
{
struct pasid_state *pasid_state;
@@ -615,7 +615,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
return -EINVAL;
ret = -EINVAL;
- if (pasid < 0 || pasid >= dev_state->max_pasids)
+ if (pasid >= dev_state->max_pasids)
goto out;
ret = -ENOMEM;
@@ -679,7 +679,7 @@ out:
}
EXPORT_SYMBOL(amd_iommu_bind_pasid);
-void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
+void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
{
struct pasid_state *pasid_state;
struct device_state *dev_state;
@@ -695,7 +695,7 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
if (dev_state == NULL)
return;
- if (pasid < 0 || pasid >= dev_state->max_pasids)
+ if (pasid >= dev_state->max_pasids)
goto out;
pasid_state = get_pasid_state(dev_state, pasid);
diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile
index 569e24e9f162..54feb1ecccad 100644
--- a/drivers/iommu/arm/arm-smmu-v3/Makefile
+++ b/drivers/iommu/arm/arm-smmu-v3/Makefile
@@ -1,2 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
+obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
+arm_smmu_v3-objs-y += arm-smmu-v3.o
+arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
+arm_smmu_v3-objs := $(arm_smmu_v3-objs-y)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
new file mode 100644
index 000000000000..9255c9600fb8
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of the IOMMU SVA API for the ARM SMMUv3
+ */
+
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
+#include <linux/slab.h>
+
+#include "arm-smmu-v3.h"
+#include "../../io-pgtable-arm.h"
+
+static DEFINE_MUTEX(sva_lock);
+
+/*
+ * Check if the CPU ASID is available on the SMMU side. If a private context
+ * descriptor is using it, try to replace it.
+ */
+static struct arm_smmu_ctx_desc *
+arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
+{
+ int ret;
+ u32 new_asid;
+ struct arm_smmu_ctx_desc *cd;
+ struct arm_smmu_device *smmu;
+ struct arm_smmu_domain *smmu_domain;
+
+ cd = xa_load(&arm_smmu_asid_xa, asid);
+ if (!cd)
+ return NULL;
+
+ if (cd->mm) {
+ if (WARN_ON(cd->mm != mm))
+ return ERR_PTR(-EINVAL);
+ /* All devices bound to this mm use the same cd struct. */
+ refcount_inc(&cd->refs);
+ return cd;
+ }
+
+ smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
+ smmu = smmu_domain->smmu;
+
+ ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
+ XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(-ENOSPC);
+ /*
+ * Race with unmap: TLB invalidations will start targeting the new ASID,
+ * which isn't assigned yet. We'll do an invalidate-all on the old ASID
+ * later, so it doesn't matter.
+ */
+ cd->asid = new_asid;
+ /*
+ * Update ASID and invalidate CD in all associated masters. There will
+ * be some overlap between use of both ASIDs, until we invalidate the
+ * TLB.
+ */
+ arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
+
+ /* Invalidate TLB entries previously associated with that context */
+ arm_smmu_tlb_inv_asid(smmu, asid);
+
+ xa_erase(&arm_smmu_asid_xa, asid);
+ return NULL;
+}
+
+__maybe_unused
+static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
+{
+ u16 asid;
+ int err = 0;
+ u64 tcr, par, reg;
+ struct arm_smmu_ctx_desc *cd;
+ struct arm_smmu_ctx_desc *ret = NULL;
+
+ asid = arm64_mm_context_get(mm);
+ if (!asid)
+ return ERR_PTR(-ESRCH);
+
+ cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd) {
+ err = -ENOMEM;
+ goto out_put_context;
+ }
+
+ refcount_set(&cd->refs, 1);
+
+ mutex_lock(&arm_smmu_asid_lock);
+ ret = arm_smmu_share_asid(mm, asid);
+ if (ret) {
+ mutex_unlock(&arm_smmu_asid_lock);
+ goto out_free_cd;
+ }
+
+ err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
+ mutex_unlock(&arm_smmu_asid_lock);
+
+ if (err)
+ goto out_free_asid;
+
+ tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
+ FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
+ CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
+
+ switch (PAGE_SIZE) {
+ case SZ_4K:
+ tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
+ break;
+ case SZ_16K:
+ tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
+ break;
+ case SZ_64K:
+ tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
+ break;
+ default:
+ WARN_ON(1);
+ err = -EINVAL;
+ goto out_free_asid;
+ }
+
+ reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
+ tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
+
+ cd->ttbr = virt_to_phys(mm->pgd);
+ cd->tcr = tcr;
+ /*
+ * MAIR value is pretty much constant and global, so we can just get it
+ * from the current CPU register
+ */
+ cd->mair = read_sysreg(mair_el1);
+ cd->asid = asid;
+ cd->mm = mm;
+
+ return cd;
+
+out_free_asid:
+ arm_smmu_free_asid(cd);
+out_free_cd:
+ kfree(cd);
+out_put_context:
+ arm64_mm_context_put(mm);
+ return err < 0 ? ERR_PTR(err) : ret;
+}
+
+__maybe_unused
+static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
+{
+ if (arm_smmu_free_asid(cd)) {
+ /* Unpin ASID */
+ arm64_mm_context_put(cd->mm);
+ kfree(cd);
+ }
+}
+
+bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
+{
+ unsigned long reg, fld;
+ unsigned long oas;
+ unsigned long asid_bits;
+ u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY;
+
+ if (vabits_actual == 52)
+ feat_mask |= ARM_SMMU_FEAT_VAX;
+
+ if ((smmu->features & feat_mask) != feat_mask)
+ return false;
+
+ if (!(smmu->pgsize_bitmap & PAGE_SIZE))
+ return false;
+
+ /*
+ * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
+ * not even pretending to support AArch32 here. Abort if the MMU outputs
+ * addresses larger than what we support.
+ */
+ reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
+ oas = id_aa64mmfr0_parange_to_phys_shift(fld);
+ if (smmu->oas < oas)
+ return false;
+
+ /* We can support bigger ASIDs than the CPU, but not smaller */
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
+ asid_bits = fld ? 16 : 8;
+ if (smmu->asid_bits < asid_bits)
+ return false;
+
+ /*
+ * See max_pinned_asids in arch/arm64/mm/context.c. The following is
+ * generally the maximum number of bindable processes.
+ */
+ if (arm64_kernel_unmapped_at_el0())
+ asid_bits--;
+ dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
+ num_possible_cpus() - 2);
+
+ return true;
+}
+
+static bool arm_smmu_iopf_supported(struct arm_smmu_master *master)
+{
+ return false;
+}
+
+bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
+{
+ if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
+ return false;
+
+ /* SSID and IOPF support are mandatory for the moment */
+ return master->ssid_bits && arm_smmu_iopf_supported(master);
+}
+
+bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
+{
+ bool enabled;
+
+ mutex_lock(&sva_lock);
+ enabled = master->sva_enabled;
+ mutex_unlock(&sva_lock);
+ return enabled;
+}
+
+int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
+{
+ mutex_lock(&sva_lock);
+ master->sva_enabled = true;
+ mutex_unlock(&sva_lock);
+
+ return 0;
+}
+
+int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
+{
+ mutex_lock(&sva_lock);
+ if (!list_empty(&master->bonds)) {
+ dev_err(master->dev, "cannot disable SVA, device is bound\n");
+ mutex_unlock(&sva_lock);
+ return -EBUSY;
+ }
+ master->sva_enabled = false;
+ mutex_unlock(&sva_lock);
+
+ return 0;
+}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index c192544e874b..e634bbe60573 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -11,7 +11,6 @@
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
-#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
@@ -19,7 +18,6 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io-pgtable.h>
-#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/msi.h>
@@ -33,396 +31,17 @@
#include <linux/amba/bus.h>
-/* MMIO registers */
-#define ARM_SMMU_IDR0 0x0
-#define IDR0_ST_LVL GENMASK(28, 27)
-#define IDR0_ST_LVL_2LVL 1
-#define IDR0_STALL_MODEL GENMASK(25, 24)
-#define IDR0_STALL_MODEL_STALL 0
-#define IDR0_STALL_MODEL_FORCE 2
-#define IDR0_TTENDIAN GENMASK(22, 21)
-#define IDR0_TTENDIAN_MIXED 0
-#define IDR0_TTENDIAN_LE 2
-#define IDR0_TTENDIAN_BE 3
-#define IDR0_CD2L (1 << 19)
-#define IDR0_VMID16 (1 << 18)
-#define IDR0_PRI (1 << 16)
-#define IDR0_SEV (1 << 14)
-#define IDR0_MSI (1 << 13)
-#define IDR0_ASID16 (1 << 12)
-#define IDR0_ATS (1 << 10)
-#define IDR0_HYP (1 << 9)
-#define IDR0_COHACC (1 << 4)
-#define IDR0_TTF GENMASK(3, 2)
-#define IDR0_TTF_AARCH64 2
-#define IDR0_TTF_AARCH32_64 3
-#define IDR0_S1P (1 << 1)
-#define IDR0_S2P (1 << 0)
-
-#define ARM_SMMU_IDR1 0x4
-#define IDR1_TABLES_PRESET (1 << 30)
-#define IDR1_QUEUES_PRESET (1 << 29)
-#define IDR1_REL (1 << 28)
-#define IDR1_CMDQS GENMASK(25, 21)
-#define IDR1_EVTQS GENMASK(20, 16)
-#define IDR1_PRIQS GENMASK(15, 11)
-#define IDR1_SSIDSIZE GENMASK(10, 6)
-#define IDR1_SIDSIZE GENMASK(5, 0)
-
-#define ARM_SMMU_IDR3 0xc
-#define IDR3_RIL (1 << 10)
-
-#define ARM_SMMU_IDR5 0x14
-#define IDR5_STALL_MAX GENMASK(31, 16)
-#define IDR5_GRAN64K (1 << 6)
-#define IDR5_GRAN16K (1 << 5)
-#define IDR5_GRAN4K (1 << 4)
-#define IDR5_OAS GENMASK(2, 0)
-#define IDR5_OAS_32_BIT 0
-#define IDR5_OAS_36_BIT 1
-#define IDR5_OAS_40_BIT 2
-#define IDR5_OAS_42_BIT 3
-#define IDR5_OAS_44_BIT 4
-#define IDR5_OAS_48_BIT 5
-#define IDR5_OAS_52_BIT 6
-#define IDR5_VAX GENMASK(11, 10)
-#define IDR5_VAX_52_BIT 1
-
-#define ARM_SMMU_CR0 0x20
-#define CR0_ATSCHK (1 << 4)
-#define CR0_CMDQEN (1 << 3)
-#define CR0_EVTQEN (1 << 2)
-#define CR0_PRIQEN (1 << 1)
-#define CR0_SMMUEN (1 << 0)
-
-#define ARM_SMMU_CR0ACK 0x24
-
-#define ARM_SMMU_CR1 0x28
-#define CR1_TABLE_SH GENMASK(11, 10)
-#define CR1_TABLE_OC GENMASK(9, 8)
-#define CR1_TABLE_IC GENMASK(7, 6)
-#define CR1_QUEUE_SH GENMASK(5, 4)
-#define CR1_QUEUE_OC GENMASK(3, 2)
-#define CR1_QUEUE_IC GENMASK(1, 0)
-/* CR1 cacheability fields don't quite follow the usual TCR-style encoding */
-#define CR1_CACHE_NC 0
-#define CR1_CACHE_WB 1
-#define CR1_CACHE_WT 2
-
-#define ARM_SMMU_CR2 0x2c
-#define CR2_PTM (1 << 2)
-#define CR2_RECINVSID (1 << 1)
-#define CR2_E2H (1 << 0)
-
-#define ARM_SMMU_GBPA 0x44
-#define GBPA_UPDATE (1 << 31)
-#define GBPA_ABORT (1 << 20)
-
-#define ARM_SMMU_IRQ_CTRL 0x50
-#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
-#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
-#define IRQ_CTRL_GERROR_IRQEN (1 << 0)
-
-#define ARM_SMMU_IRQ_CTRLACK 0x54
-
-#define ARM_SMMU_GERROR 0x60
-#define GERROR_SFM_ERR (1 << 8)
-#define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
-#define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
-#define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
-#define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
-#define GERROR_PRIQ_ABT_ERR (1 << 3)
-#define GERROR_EVTQ_ABT_ERR (1 << 2)
-#define GERROR_CMDQ_ERR (1 << 0)
-#define GERROR_ERR_MASK 0xfd
-
-#define ARM_SMMU_GERRORN 0x64
-
-#define ARM_SMMU_GERROR_IRQ_CFG0 0x68
-#define ARM_SMMU_GERROR_IRQ_CFG1 0x70
-#define ARM_SMMU_GERROR_IRQ_CFG2 0x74
-
-#define ARM_SMMU_STRTAB_BASE 0x80
-#define STRTAB_BASE_RA (1UL << 62)
-#define STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
-
-#define ARM_SMMU_STRTAB_BASE_CFG 0x88
-#define STRTAB_BASE_CFG_FMT GENMASK(17, 16)
-#define STRTAB_BASE_CFG_FMT_LINEAR 0
-#define STRTAB_BASE_CFG_FMT_2LVL 1
-#define STRTAB_BASE_CFG_SPLIT GENMASK(10, 6)
-#define STRTAB_BASE_CFG_LOG2SIZE GENMASK(5, 0)
-
-#define ARM_SMMU_CMDQ_BASE 0x90
-#define ARM_SMMU_CMDQ_PROD 0x98
-#define ARM_SMMU_CMDQ_CONS 0x9c
-
-#define ARM_SMMU_EVTQ_BASE 0xa0
-#define ARM_SMMU_EVTQ_PROD 0x100a8
-#define ARM_SMMU_EVTQ_CONS 0x100ac
-#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
-#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
-#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
-
-#define ARM_SMMU_PRIQ_BASE 0xc0
-#define ARM_SMMU_PRIQ_PROD 0x100c8
-#define ARM_SMMU_PRIQ_CONS 0x100cc
-#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
-#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
-#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
-
-#define ARM_SMMU_REG_SZ 0xe00
-
-/* Common MSI config fields */
-#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
-#define MSI_CFG2_SH GENMASK(5, 4)
-#define MSI_CFG2_MEMATTR GENMASK(3, 0)
-
-/* Common memory attribute values */
-#define ARM_SMMU_SH_NSH 0
-#define ARM_SMMU_SH_OSH 2
-#define ARM_SMMU_SH_ISH 3
-#define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1
-#define ARM_SMMU_MEMATTR_OIWB 0xf
-
-#define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1))
-#define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift))
-#define Q_OVERFLOW_FLAG (1U << 31)
-#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG)
-#define Q_ENT(q, p) ((q)->base + \
- Q_IDX(&((q)->llq), p) * \
- (q)->ent_dwords)
-
-#define Q_BASE_RWA (1UL << 62)
-#define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
-#define Q_BASE_LOG2SIZE GENMASK(4, 0)
-
-/* Ensure DMA allocations are naturally aligned */
-#ifdef CONFIG_CMA_ALIGNMENT
-#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
-#else
-#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
-#endif
-
-/*
- * Stream table.
- *
- * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
- * 2lvl: 128k L1 entries,
- * 256 lazy entries per table (each table covers a PCI bus)
- */
-#define STRTAB_L1_SZ_SHIFT 20
-#define STRTAB_SPLIT 8
-
-#define STRTAB_L1_DESC_DWORDS 1
-#define STRTAB_L1_DESC_SPAN GENMASK_ULL(4, 0)
-#define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6)
-
-#define STRTAB_STE_DWORDS 8
-#define STRTAB_STE_0_V (1UL << 0)
-#define STRTAB_STE_0_CFG GENMASK_ULL(3, 1)
-#define STRTAB_STE_0_CFG_ABORT 0
-#define STRTAB_STE_0_CFG_BYPASS 4
-#define STRTAB_STE_0_CFG_S1_TRANS 5
-#define STRTAB_STE_0_CFG_S2_TRANS 6
-
-#define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4)
-#define STRTAB_STE_0_S1FMT_LINEAR 0
-#define STRTAB_STE_0_S1FMT_64K_L2 2
-#define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
-#define STRTAB_STE_0_S1CDMAX GENMASK_ULL(63, 59)
-
-#define STRTAB_STE_1_S1DSS GENMASK_ULL(1, 0)
-#define STRTAB_STE_1_S1DSS_TERMINATE 0x0
-#define STRTAB_STE_1_S1DSS_BYPASS 0x1
-#define STRTAB_STE_1_S1DSS_SSID0 0x2
-
-#define STRTAB_STE_1_S1C_CACHE_NC 0UL
-#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
-#define STRTAB_STE_1_S1C_CACHE_WT 2UL
-#define STRTAB_STE_1_S1C_CACHE_WB 3UL
-#define STRTAB_STE_1_S1CIR GENMASK_ULL(3, 2)
-#define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
-#define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
-
-#define STRTAB_STE_1_S1STALLD (1UL << 27)
-
-#define STRTAB_STE_1_EATS GENMASK_ULL(29, 28)
-#define STRTAB_STE_1_EATS_ABT 0UL
-#define STRTAB_STE_1_EATS_TRANS 1UL
-#define STRTAB_STE_1_EATS_S1CHK 2UL
-
-#define STRTAB_STE_1_STRW GENMASK_ULL(31, 30)
-#define STRTAB_STE_1_STRW_NSEL1 0UL
-#define STRTAB_STE_1_STRW_EL2 2UL
-
-#define STRTAB_STE_1_SHCFG GENMASK_ULL(45, 44)
-#define STRTAB_STE_1_SHCFG_INCOMING 1UL
-
-#define STRTAB_STE_2_S2VMID GENMASK_ULL(15, 0)
-#define STRTAB_STE_2_VTCR GENMASK_ULL(50, 32)
-#define STRTAB_STE_2_VTCR_S2T0SZ GENMASK_ULL(5, 0)
-#define STRTAB_STE_2_VTCR_S2SL0 GENMASK_ULL(7, 6)
-#define STRTAB_STE_2_VTCR_S2IR0 GENMASK_ULL(9, 8)
-#define STRTAB_STE_2_VTCR_S2OR0 GENMASK_ULL(11, 10)
-#define STRTAB_STE_2_VTCR_S2SH0 GENMASK_ULL(13, 12)
-#define STRTAB_STE_2_VTCR_S2TG GENMASK_ULL(15, 14)
-#define STRTAB_STE_2_VTCR_S2PS GENMASK_ULL(18, 16)
-#define STRTAB_STE_2_S2AA64 (1UL << 51)
-#define STRTAB_STE_2_S2ENDI (1UL << 52)
-#define STRTAB_STE_2_S2PTW (1UL << 54)
-#define STRTAB_STE_2_S2R (1UL << 58)
-
-#define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4)
-
-/*
- * Context descriptors.
- *
- * Linear: when less than 1024 SSIDs are supported
- * 2lvl: at most 1024 L1 entries,
- * 1024 lazy entries per table.
- */
-#define CTXDESC_SPLIT 10
-#define CTXDESC_L2_ENTRIES (1 << CTXDESC_SPLIT)
-
-#define CTXDESC_L1_DESC_DWORDS 1
-#define CTXDESC_L1_DESC_V (1UL << 0)
-#define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12)
-
-#define CTXDESC_CD_DWORDS 8
-#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
-#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
-#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
-#define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
-#define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
-#define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14)
-#define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30)
-
-#define CTXDESC_CD_0_ENDI (1UL << 15)
-#define CTXDESC_CD_0_V (1UL << 31)
-
-#define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32)
-#define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38)
-
-#define CTXDESC_CD_0_AA64 (1UL << 41)
-#define CTXDESC_CD_0_S (1UL << 44)
-#define CTXDESC_CD_0_R (1UL << 45)
-#define CTXDESC_CD_0_A (1UL << 46)
-#define CTXDESC_CD_0_ASET (1UL << 47)
-#define CTXDESC_CD_0_ASID GENMASK_ULL(63, 48)
-
-#define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
-
-/*
- * When the SMMU only supports linear context descriptor tables, pick a
- * reasonable size limit (64kB).
- */
-#define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / (CTXDESC_CD_DWORDS << 3))
-
-/* Command queue */
-#define CMDQ_ENT_SZ_SHIFT 4
-#define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
-#define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)
-
-#define CMDQ_CONS_ERR GENMASK(30, 24)
-#define CMDQ_ERR_CERROR_NONE_IDX 0
-#define CMDQ_ERR_CERROR_ILL_IDX 1
-#define CMDQ_ERR_CERROR_ABT_IDX 2
-#define CMDQ_ERR_CERROR_ATC_INV_IDX 3
-
-#define CMDQ_PROD_OWNED_FLAG Q_OVERFLOW_FLAG
-
-/*
- * This is used to size the command queue and therefore must be at least
- * BITS_PER_LONG so that the valid_map works correctly (it relies on the
- * total number of queue entries being a multiple of BITS_PER_LONG).
- */
-#define CMDQ_BATCH_ENTRIES BITS_PER_LONG
-
-#define CMDQ_0_OP GENMASK_ULL(7, 0)
-#define CMDQ_0_SSV (1UL << 11)
-
-#define CMDQ_PREFETCH_0_SID GENMASK_ULL(63, 32)
-#define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0)
-#define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12)
-
-#define CMDQ_CFGI_0_SSID GENMASK_ULL(31, 12)
-#define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32)
-#define CMDQ_CFGI_1_LEAF (1UL << 0)
-#define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
-
-#define CMDQ_TLBI_0_NUM GENMASK_ULL(16, 12)
-#define CMDQ_TLBI_RANGE_NUM_MAX 31
-#define CMDQ_TLBI_0_SCALE GENMASK_ULL(24, 20)
-#define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
-#define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
-#define CMDQ_TLBI_1_LEAF (1UL << 0)
-#define CMDQ_TLBI_1_TTL GENMASK_ULL(9, 8)
-#define CMDQ_TLBI_1_TG GENMASK_ULL(11, 10)
-#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
-#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
-
-#define CMDQ_ATC_0_SSID GENMASK_ULL(31, 12)
-#define CMDQ_ATC_0_SID GENMASK_ULL(63, 32)
-#define CMDQ_ATC_0_GLOBAL (1UL << 9)
-#define CMDQ_ATC_1_SIZE GENMASK_ULL(5, 0)
-#define CMDQ_ATC_1_ADDR_MASK GENMASK_ULL(63, 12)
-
-#define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
-#define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
-#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
-#define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12)
-
-#define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12)
-#define CMDQ_SYNC_0_CS_NONE 0
-#define CMDQ_SYNC_0_CS_IRQ 1
-#define CMDQ_SYNC_0_CS_SEV 2
-#define CMDQ_SYNC_0_MSH GENMASK_ULL(23, 22)
-#define CMDQ_SYNC_0_MSIATTR GENMASK_ULL(27, 24)
-#define CMDQ_SYNC_0_MSIDATA GENMASK_ULL(63, 32)
-#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2)
-
-/* Event queue */
-#define EVTQ_ENT_SZ_SHIFT 5
-#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
-#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
-
-#define EVTQ_0_ID GENMASK_ULL(7, 0)
-
-/* PRI queue */
-#define PRIQ_ENT_SZ_SHIFT 4
-#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
-#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
-
-#define PRIQ_0_SID GENMASK_ULL(31, 0)
-#define PRIQ_0_SSID GENMASK_ULL(51, 32)
-#define PRIQ_0_PERM_PRIV (1UL << 58)
-#define PRIQ_0_PERM_EXEC (1UL << 59)
-#define PRIQ_0_PERM_READ (1UL << 60)
-#define PRIQ_0_PERM_WRITE (1UL << 61)
-#define PRIQ_0_PRG_LAST (1UL << 62)
-#define PRIQ_0_SSID_V (1UL << 63)
-
-#define PRIQ_1_PRG_IDX GENMASK_ULL(8, 0)
-#define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12)
-
-/* High-level queue structures */
-#define ARM_SMMU_POLL_TIMEOUT_US 1000000 /* 1s! */
-#define ARM_SMMU_POLL_SPIN_COUNT 10
-
-#define MSI_IOVA_BASE 0x8000000
-#define MSI_IOVA_LENGTH 0x100000
+#include "arm-smmu-v3.h"
static bool disable_bypass = 1;
-module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
+module_param(disable_bypass, bool, 0444);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
-enum pri_resp {
- PRI_RESP_DENY = 0,
- PRI_RESP_FAIL = 1,
- PRI_RESP_SUCC = 2,
-};
+static bool disable_msipolling;
+module_param(disable_msipolling, bool, 0444);
+MODULE_PARM_DESC(disable_msipolling,
+ "Disable MSI-based polling for CMD_SYNC completion.");
enum arm_smmu_msi_index {
EVTQ_MSI_INDEX,
@@ -449,284 +68,13 @@ static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
},
};
-struct arm_smmu_cmdq_ent {
- /* Common fields */
- u8 opcode;
- bool substream_valid;
-
- /* Command-specific fields */
- union {
- #define CMDQ_OP_PREFETCH_CFG 0x1
- struct {
- u32 sid;
- u8 size;
- u64 addr;
- } prefetch;
-
- #define CMDQ_OP_CFGI_STE 0x3
- #define CMDQ_OP_CFGI_ALL 0x4
- #define CMDQ_OP_CFGI_CD 0x5
- #define CMDQ_OP_CFGI_CD_ALL 0x6
- struct {
- u32 sid;
- u32 ssid;
- union {
- bool leaf;
- u8 span;
- };
- } cfgi;
-
- #define CMDQ_OP_TLBI_NH_ASID 0x11
- #define CMDQ_OP_TLBI_NH_VA 0x12
- #define CMDQ_OP_TLBI_EL2_ALL 0x20
- #define CMDQ_OP_TLBI_S12_VMALL 0x28
- #define CMDQ_OP_TLBI_S2_IPA 0x2a
- #define CMDQ_OP_TLBI_NSNH_ALL 0x30
- struct {
- u8 num;
- u8 scale;
- u16 asid;
- u16 vmid;
- bool leaf;
- u8 ttl;
- u8 tg;
- u64 addr;
- } tlbi;
-
- #define CMDQ_OP_ATC_INV 0x40
- #define ATC_INV_SIZE_ALL 52
- struct {
- u32 sid;
- u32 ssid;
- u64 addr;
- u8 size;
- bool global;
- } atc;
-
- #define CMDQ_OP_PRI_RESP 0x41
- struct {
- u32 sid;
- u32 ssid;
- u16 grpid;
- enum pri_resp resp;
- } pri;
-
- #define CMDQ_OP_CMD_SYNC 0x46
- struct {
- u64 msiaddr;
- } sync;
- };
-};
-
-struct arm_smmu_ll_queue {
- union {
- u64 val;
- struct {
- u32 prod;
- u32 cons;
- };
- struct {
- atomic_t prod;
- atomic_t cons;
- } atomic;
- u8 __pad[SMP_CACHE_BYTES];
- } ____cacheline_aligned_in_smp;
- u32 max_n_shift;
-};
-
-struct arm_smmu_queue {
- struct arm_smmu_ll_queue llq;
- int irq; /* Wired interrupt */
-
- __le64 *base;
- dma_addr_t base_dma;
- u64 q_base;
-
- size_t ent_dwords;
-
- u32 __iomem *prod_reg;
- u32 __iomem *cons_reg;
-};
-
-struct arm_smmu_queue_poll {
- ktime_t timeout;
- unsigned int delay;
- unsigned int spin_cnt;
- bool wfe;
-};
-
-struct arm_smmu_cmdq {
- struct arm_smmu_queue q;
- atomic_long_t *valid_map;
- atomic_t owner_prod;
- atomic_t lock;
-};
-
-struct arm_smmu_cmdq_batch {
- u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
- int num;
-};
-
-struct arm_smmu_evtq {
- struct arm_smmu_queue q;
- u32 max_stalls;
-};
-
-struct arm_smmu_priq {
- struct arm_smmu_queue q;
-};
-
-/* High-level stream table and context descriptor structures */
-struct arm_smmu_strtab_l1_desc {
- u8 span;
-
- __le64 *l2ptr;
- dma_addr_t l2ptr_dma;
-};
-
-struct arm_smmu_ctx_desc {
- u16 asid;
- u64 ttbr;
- u64 tcr;
- u64 mair;
-};
-
-struct arm_smmu_l1_ctx_desc {
- __le64 *l2ptr;
- dma_addr_t l2ptr_dma;
-};
-
-struct arm_smmu_ctx_desc_cfg {
- __le64 *cdtab;
- dma_addr_t cdtab_dma;
- struct arm_smmu_l1_ctx_desc *l1_desc;
- unsigned int num_l1_ents;
-};
-
-struct arm_smmu_s1_cfg {
- struct arm_smmu_ctx_desc_cfg cdcfg;
- struct arm_smmu_ctx_desc cd;
- u8 s1fmt;
- u8 s1cdmax;
-};
-
-struct arm_smmu_s2_cfg {
- u16 vmid;
- u64 vttbr;
- u64 vtcr;
-};
-
-struct arm_smmu_strtab_cfg {
- __le64 *strtab;
- dma_addr_t strtab_dma;
- struct arm_smmu_strtab_l1_desc *l1_desc;
- unsigned int num_l1_ents;
-
- u64 strtab_base;
- u32 strtab_base_cfg;
-};
-
-/* An SMMUv3 instance */
-struct arm_smmu_device {
- struct device *dev;
- void __iomem *base;
- void __iomem *page1;
-
-#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
-#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
-#define ARM_SMMU_FEAT_TT_LE (1 << 2)
-#define ARM_SMMU_FEAT_TT_BE (1 << 3)
-#define ARM_SMMU_FEAT_PRI (1 << 4)
-#define ARM_SMMU_FEAT_ATS (1 << 5)
-#define ARM_SMMU_FEAT_SEV (1 << 6)
-#define ARM_SMMU_FEAT_MSI (1 << 7)
-#define ARM_SMMU_FEAT_COHERENCY (1 << 8)
-#define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
-#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
-#define ARM_SMMU_FEAT_STALLS (1 << 11)
-#define ARM_SMMU_FEAT_HYP (1 << 12)
-#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
-#define ARM_SMMU_FEAT_VAX (1 << 14)
-#define ARM_SMMU_FEAT_RANGE_INV (1 << 15)
- u32 features;
-
-#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
-#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
- u32 options;
-
- struct arm_smmu_cmdq cmdq;
- struct arm_smmu_evtq evtq;
- struct arm_smmu_priq priq;
-
- int gerr_irq;
- int combined_irq;
-
- unsigned long ias; /* IPA */
- unsigned long oas; /* PA */
- unsigned long pgsize_bitmap;
-
-#define ARM_SMMU_MAX_ASIDS (1 << 16)
- unsigned int asid_bits;
-
-#define ARM_SMMU_MAX_VMIDS (1 << 16)
- unsigned int vmid_bits;
- DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
-
- unsigned int ssid_bits;
- unsigned int sid_bits;
-
- struct arm_smmu_strtab_cfg strtab_cfg;
-
- /* IOMMU core code handle */
- struct iommu_device iommu;
-};
-
-/* SMMU private data for each master */
-struct arm_smmu_master {
- struct arm_smmu_device *smmu;
- struct device *dev;
- struct arm_smmu_domain *domain;
- struct list_head domain_head;
- u32 *sids;
- unsigned int num_sids;
- bool ats_enabled;
- unsigned int ssid_bits;
-};
-
-/* SMMU private data for an IOMMU domain */
-enum arm_smmu_domain_stage {
- ARM_SMMU_DOMAIN_S1 = 0,
- ARM_SMMU_DOMAIN_S2,
- ARM_SMMU_DOMAIN_NESTED,
- ARM_SMMU_DOMAIN_BYPASS,
-};
-
-struct arm_smmu_domain {
- struct arm_smmu_device *smmu;
- struct mutex init_mutex; /* Protects smmu pointer */
-
- struct io_pgtable_ops *pgtbl_ops;
- bool non_strict;
- atomic_t nr_ats_masters;
-
- enum arm_smmu_domain_stage stage;
- union {
- struct arm_smmu_s1_cfg s1_cfg;
- struct arm_smmu_s2_cfg s2_cfg;
- };
-
- struct iommu_domain domain;
-
- struct list_head devices;
- spinlock_t devices_lock;
-};
-
struct arm_smmu_option_prop {
u32 opt;
const char *prop;
};
-static DEFINE_XARRAY_ALLOC1(asid_xa);
+DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa);
+DEFINE_MUTEX(arm_smmu_asid_lock);
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
@@ -804,7 +152,7 @@ static void queue_sync_cons_out(struct arm_smmu_queue *q)
* Ensure that all CPU accesses (reads and writes) to the queue
* are complete before we update the cons pointer.
*/
- mb();
+ __iomb();
writel_relaxed(q->llq.cons, q->cons_reg);
}
@@ -816,8 +164,15 @@ static void queue_inc_cons(struct arm_smmu_ll_queue *q)
static int queue_sync_prod_in(struct arm_smmu_queue *q)
{
+ u32 prod;
int ret = 0;
- u32 prod = readl_relaxed(q->prod_reg);
+
+ /*
+ * We can't use the _relaxed() variant here, as we must prevent
+ * speculative reads of the queue before we have determined that
+ * prod has indeed moved.
+ */
+ prod = readl(q->prod_reg);
if (Q_OVF(prod) != Q_OVF(q->llq.prod))
ret = -EOVERFLOW;
@@ -867,7 +222,7 @@ static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
*dst++ = cpu_to_le64(*src++);
}
-static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
+static void queue_read(u64 *dst, __le64 *src, size_t n_dwords)
{
int i;
@@ -992,8 +347,7 @@ static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
* Beware that Hi16xx adds an extra 32 bits of goodness to its MSI
* payload, so the write will zero the entire command on that platform.
*/
- if (smmu->features & ARM_SMMU_FEAT_MSI &&
- smmu->features & ARM_SMMU_FEAT_COHERENCY) {
+ if (smmu->options & ARM_SMMU_OPT_MSIPOLL) {
ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
q->ent_dwords * 8;
}
@@ -1331,8 +685,7 @@ static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
struct arm_smmu_ll_queue *llq)
{
- if (smmu->features & ARM_SMMU_FEAT_MSI &&
- smmu->features & ARM_SMMU_FEAT_COHERENCY)
+ if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
@@ -1529,6 +882,17 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
}
/* Context descriptor manipulation functions */
+void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
+{
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = CMDQ_OP_TLBI_NH_ASID,
+ .tlbi.asid = asid,
+ };
+
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
+}
+
static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
int ssid, bool leaf)
{
@@ -1609,8 +973,8 @@ static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
}
-static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
- int ssid, struct arm_smmu_ctx_desc *cd)
+int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
+ struct arm_smmu_ctx_desc *cd)
{
/*
* This function handles the following cases:
@@ -1661,7 +1025,8 @@ static int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain,
#ifdef __BIG_ENDIAN
CTXDESC_CD_0_ENDI |
#endif
- CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET |
+ CTXDESC_CD_0_R | CTXDESC_CD_0_A |
+ (cd->mm ? 0 : CTXDESC_CD_0_ASET) |
CTXDESC_CD_0_AA64 |
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
CTXDESC_CD_0_V;
@@ -1765,12 +1130,20 @@ static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
cdcfg->cdtab = NULL;
}
-static void arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
+bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
{
+ bool free;
+ struct arm_smmu_ctx_desc *old_cd;
+
if (!cd->asid)
- return;
+ return false;
- xa_erase(&asid_xa, cd->asid);
+ free = refcount_dec_and_test(&cd->refs);
+ if (free) {
+ old_cd = xa_erase(&arm_smmu_asid_xa, cd->asid);
+ WARN_ON(old_cd != cd);
+ }
+ return free;
}
/* Stream table manipulation functions */
@@ -1939,7 +1312,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
}
-static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
+static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent)
{
unsigned int i;
@@ -2257,15 +1630,6 @@ static void arm_smmu_tlb_inv_context(void *cookie)
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cmdq_ent cmd;
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
- cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
- cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
- cmd.tlbi.vmid = 0;
- } else {
- cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
- cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
- }
-
/*
* NOTE: when io-pgtable is in non-strict mode, we may get here with
* PTEs previously cleared by unmaps on the current CPU not yet visible
@@ -2273,8 +1637,14 @@ static void arm_smmu_tlb_inv_context(void *cookie)
* insertion to guarantee those are observed before the TLBI. Do be
* careful, 007.
*/
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- arm_smmu_cmdq_issue_sync(smmu);
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
+ } else {
+ cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
+ cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
+ }
arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
}
@@ -2458,9 +1828,12 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+ /* Prevent SVA from touching the CD while we're freeing it */
+ mutex_lock(&arm_smmu_asid_lock);
if (cfg->cdcfg.cdtab)
arm_smmu_free_cd_tables(smmu_domain);
arm_smmu_free_asid(&cfg->cd);
+ mutex_unlock(&arm_smmu_asid_lock);
} else {
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
if (cfg->vmid)
@@ -2480,10 +1853,14 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
- ret = xa_alloc(&asid_xa, &asid, &cfg->cd,
+ refcount_set(&cfg->cd.refs, 1);
+
+ /* Prevent SVA from modifying the ASID until it is written to the CD */
+ mutex_lock(&arm_smmu_asid_lock);
+ ret = xa_alloc(&arm_smmu_asid_xa, &asid, &cfg->cd,
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
if (ret)
- return ret;
+ goto out_unlock;
cfg->s1cdmax = master->ssid_bits;
@@ -2511,12 +1888,15 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
if (ret)
goto out_free_cd_tables;
+ mutex_unlock(&arm_smmu_asid_lock);
return 0;
out_free_cd_tables:
arm_smmu_free_cd_tables(smmu_domain);
out_free_asid:
arm_smmu_free_asid(&cfg->cd);
+out_unlock:
+ mutex_unlock(&arm_smmu_asid_lock);
return ret;
}
@@ -2796,6 +2176,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
master = dev_iommu_priv_get(dev);
smmu = master->smmu;
+ /*
+ * Checking that SVA is disabled ensures that this device isn't bound to
+ * any mm, and can be safely detached from its old domain. Bonds cannot
+ * be removed concurrently since we're holding the group mutex.
+ */
+ if (arm_smmu_master_sva_enabled(master)) {
+ dev_err(dev, "cannot attach - SVA enabled\n");
+ return -EBUSY;
+ }
+
arm_smmu_detach_dev(master);
mutex_lock(&smmu_domain->init_mutex);
@@ -2943,6 +2333,7 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->smmu = smmu;
master->sids = fwspec->ids;
master->num_sids = fwspec->num_ids;
+ INIT_LIST_HEAD(&master->bonds);
dev_iommu_priv_set(dev, master);
/* Check the SIDs are in range of the SMMU and our stream table */
@@ -2995,6 +2386,7 @@ static void arm_smmu_release_device(struct device *dev)
return;
master = dev_iommu_priv_get(dev);
+ WARN_ON(arm_smmu_master_sva_enabled(master));
arm_smmu_detach_dev(master);
arm_smmu_disable_pasid(master);
kfree(master);
@@ -3112,6 +2504,69 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
+static bool arm_smmu_dev_has_feature(struct device *dev,
+ enum iommu_dev_features feat)
+{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ if (!master)
+ return false;
+
+ switch (feat) {
+ case IOMMU_DEV_FEAT_SVA:
+ return arm_smmu_master_sva_supported(master);
+ default:
+ return false;
+ }
+}
+
+static bool arm_smmu_dev_feature_enabled(struct device *dev,
+ enum iommu_dev_features feat)
+{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ if (!master)
+ return false;
+
+ switch (feat) {
+ case IOMMU_DEV_FEAT_SVA:
+ return arm_smmu_master_sva_enabled(master);
+ default:
+ return false;
+ }
+}
+
+static int arm_smmu_dev_enable_feature(struct device *dev,
+ enum iommu_dev_features feat)
+{
+ if (!arm_smmu_dev_has_feature(dev, feat))
+ return -ENODEV;
+
+ if (arm_smmu_dev_feature_enabled(dev, feat))
+ return -EBUSY;
+
+ switch (feat) {
+ case IOMMU_DEV_FEAT_SVA:
+ return arm_smmu_master_enable_sva(dev_iommu_priv_get(dev));
+ default:
+ return -EINVAL;
+ }
+}
+
+static int arm_smmu_dev_disable_feature(struct device *dev,
+ enum iommu_dev_features feat)
+{
+ if (!arm_smmu_dev_feature_enabled(dev, feat))
+ return -EINVAL;
+
+ switch (feat) {
+ case IOMMU_DEV_FEAT_SVA:
+ return arm_smmu_master_disable_sva(dev_iommu_priv_get(dev));
+ default:
+ return -EINVAL;
+ }
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -3130,6 +2585,10 @@ static struct iommu_ops arm_smmu_ops = {
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
+ .dev_has_feat = arm_smmu_dev_has_feature,
+ .dev_feat_enabled = arm_smmu_dev_feature_enabled,
+ .dev_enable_feat = arm_smmu_dev_enable_feature,
+ .dev_disable_feat = arm_smmu_dev_disable_feature,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -3280,7 +2739,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
if (!strtab) {
dev_err(smmu->dev,
"failed to allocate l1 stream table (%u bytes)\n",
- size);
+ l1size);
return -ENOMEM;
}
cfg->strtab = strtab;
@@ -3740,8 +3199,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
if (reg & IDR0_SEV)
smmu->features |= ARM_SMMU_FEAT_SEV;
- if (reg & IDR0_MSI)
+ if (reg & IDR0_MSI) {
smmu->features |= ARM_SMMU_FEAT_MSI;
+ if (coherent && !disable_msipolling)
+ smmu->options |= ARM_SMMU_OPT_MSIPOLL;
+ }
if (reg & IDR0_HYP)
smmu->features |= ARM_SMMU_FEAT_HYP;
@@ -3891,6 +3353,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->ias = max(smmu->ias, smmu->oas);
+ if (arm_smmu_sva_supported(smmu))
+ smmu->features |= ARM_SMMU_FEAT_SVA;
+
dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
smmu->ias, smmu->oas, smmu->features);
return 0;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
new file mode 100644
index 000000000000..d4b7f40ccb02
--- /dev/null
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -0,0 +1,723 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IOMMU API for ARM architected SMMUv3 implementations.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#ifndef _ARM_SMMU_V3_H
+#define _ARM_SMMU_V3_H
+
+#include <linux/bitfield.h>
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/mmzone.h>
+#include <linux/sizes.h>
+
+/* MMIO registers */
+#define ARM_SMMU_IDR0 0x0
+#define IDR0_ST_LVL GENMASK(28, 27)
+#define IDR0_ST_LVL_2LVL 1
+#define IDR0_STALL_MODEL GENMASK(25, 24)
+#define IDR0_STALL_MODEL_STALL 0
+#define IDR0_STALL_MODEL_FORCE 2
+#define IDR0_TTENDIAN GENMASK(22, 21)
+#define IDR0_TTENDIAN_MIXED 0
+#define IDR0_TTENDIAN_LE 2
+#define IDR0_TTENDIAN_BE 3
+#define IDR0_CD2L (1 << 19)
+#define IDR0_VMID16 (1 << 18)
+#define IDR0_PRI (1 << 16)
+#define IDR0_SEV (1 << 14)
+#define IDR0_MSI (1 << 13)
+#define IDR0_ASID16 (1 << 12)
+#define IDR0_ATS (1 << 10)
+#define IDR0_HYP (1 << 9)
+#define IDR0_COHACC (1 << 4)
+#define IDR0_TTF GENMASK(3, 2)
+#define IDR0_TTF_AARCH64 2
+#define IDR0_TTF_AARCH32_64 3
+#define IDR0_S1P (1 << 1)
+#define IDR0_S2P (1 << 0)
+
+#define ARM_SMMU_IDR1 0x4
+#define IDR1_TABLES_PRESET (1 << 30)
+#define IDR1_QUEUES_PRESET (1 << 29)
+#define IDR1_REL (1 << 28)
+#define IDR1_CMDQS GENMASK(25, 21)
+#define IDR1_EVTQS GENMASK(20, 16)
+#define IDR1_PRIQS GENMASK(15, 11)
+#define IDR1_SSIDSIZE GENMASK(10, 6)
+#define IDR1_SIDSIZE GENMASK(5, 0)
+
+#define ARM_SMMU_IDR3 0xc
+#define IDR3_RIL (1 << 10)
+
+#define ARM_SMMU_IDR5 0x14
+#define IDR5_STALL_MAX GENMASK(31, 16)
+#define IDR5_GRAN64K (1 << 6)
+#define IDR5_GRAN16K (1 << 5)
+#define IDR5_GRAN4K (1 << 4)
+#define IDR5_OAS GENMASK(2, 0)
+#define IDR5_OAS_32_BIT 0
+#define IDR5_OAS_36_BIT 1
+#define IDR5_OAS_40_BIT 2
+#define IDR5_OAS_42_BIT 3
+#define IDR5_OAS_44_BIT 4
+#define IDR5_OAS_48_BIT 5
+#define IDR5_OAS_52_BIT 6
+#define IDR5_VAX GENMASK(11, 10)
+#define IDR5_VAX_52_BIT 1
+
+#define ARM_SMMU_CR0 0x20
+#define CR0_ATSCHK (1 << 4)
+#define CR0_CMDQEN (1 << 3)
+#define CR0_EVTQEN (1 << 2)
+#define CR0_PRIQEN (1 << 1)
+#define CR0_SMMUEN (1 << 0)
+
+#define ARM_SMMU_CR0ACK 0x24
+
+#define ARM_SMMU_CR1 0x28
+#define CR1_TABLE_SH GENMASK(11, 10)
+#define CR1_TABLE_OC GENMASK(9, 8)
+#define CR1_TABLE_IC GENMASK(7, 6)
+#define CR1_QUEUE_SH GENMASK(5, 4)
+#define CR1_QUEUE_OC GENMASK(3, 2)
+#define CR1_QUEUE_IC GENMASK(1, 0)
+/* CR1 cacheability fields don't quite follow the usual TCR-style encoding */
+#define CR1_CACHE_NC 0
+#define CR1_CACHE_WB 1
+#define CR1_CACHE_WT 2
+
+#define ARM_SMMU_CR2 0x2c
+#define CR2_PTM (1 << 2)
+#define CR2_RECINVSID (1 << 1)
+#define CR2_E2H (1 << 0)
+
+#define ARM_SMMU_GBPA 0x44
+#define GBPA_UPDATE (1 << 31)
+#define GBPA_ABORT (1 << 20)
+
+#define ARM_SMMU_IRQ_CTRL 0x50
+#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
+#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
+#define IRQ_CTRL_GERROR_IRQEN (1 << 0)
+
+#define ARM_SMMU_IRQ_CTRLACK 0x54
+
+#define ARM_SMMU_GERROR 0x60
+#define GERROR_SFM_ERR (1 << 8)
+#define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
+#define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
+#define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
+#define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
+#define GERROR_PRIQ_ABT_ERR (1 << 3)
+#define GERROR_EVTQ_ABT_ERR (1 << 2)
+#define GERROR_CMDQ_ERR (1 << 0)
+#define GERROR_ERR_MASK 0xfd
+
+#define ARM_SMMU_GERRORN 0x64
+
+#define ARM_SMMU_GERROR_IRQ_CFG0 0x68
+#define ARM_SMMU_GERROR_IRQ_CFG1 0x70
+#define ARM_SMMU_GERROR_IRQ_CFG2 0x74
+
+#define ARM_SMMU_STRTAB_BASE 0x80
+#define STRTAB_BASE_RA (1UL << 62)
+#define STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
+
+#define ARM_SMMU_STRTAB_BASE_CFG 0x88
+#define STRTAB_BASE_CFG_FMT GENMASK(17, 16)
+#define STRTAB_BASE_CFG_FMT_LINEAR 0
+#define STRTAB_BASE_CFG_FMT_2LVL 1
+#define STRTAB_BASE_CFG_SPLIT GENMASK(10, 6)
+#define STRTAB_BASE_CFG_LOG2SIZE GENMASK(5, 0)
+
+#define ARM_SMMU_CMDQ_BASE 0x90
+#define ARM_SMMU_CMDQ_PROD 0x98
+#define ARM_SMMU_CMDQ_CONS 0x9c
+
+#define ARM_SMMU_EVTQ_BASE 0xa0
+#define ARM_SMMU_EVTQ_PROD 0x100a8
+#define ARM_SMMU_EVTQ_CONS 0x100ac
+#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
+#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
+#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
+
+#define ARM_SMMU_PRIQ_BASE 0xc0
+#define ARM_SMMU_PRIQ_PROD 0x100c8
+#define ARM_SMMU_PRIQ_CONS 0x100cc
+#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
+#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
+#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
+
+#define ARM_SMMU_REG_SZ 0xe00
+
+/* Common MSI config fields */
+#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
+#define MSI_CFG2_SH GENMASK(5, 4)
+#define MSI_CFG2_MEMATTR GENMASK(3, 0)
+
+/* Common memory attribute values */
+#define ARM_SMMU_SH_NSH 0
+#define ARM_SMMU_SH_OSH 2
+#define ARM_SMMU_SH_ISH 3
+#define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1
+#define ARM_SMMU_MEMATTR_OIWB 0xf
+
+#define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1))
+#define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift))
+#define Q_OVERFLOW_FLAG (1U << 31)
+#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG)
+#define Q_ENT(q, p) ((q)->base + \
+ Q_IDX(&((q)->llq), p) * \
+ (q)->ent_dwords)
+
+#define Q_BASE_RWA (1UL << 62)
+#define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
+#define Q_BASE_LOG2SIZE GENMASK(4, 0)
+
+/* Ensure DMA allocations are naturally aligned */
+#ifdef CONFIG_CMA_ALIGNMENT
+#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
+#else
+#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
+#endif
+
+/*
+ * Stream table.
+ *
+ * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
+ * 2lvl: 128k L1 entries,
+ * 256 lazy entries per table (each table covers a PCI bus)
+ */
+#define STRTAB_L1_SZ_SHIFT 20
+#define STRTAB_SPLIT 8
+
+#define STRTAB_L1_DESC_DWORDS 1
+#define STRTAB_L1_DESC_SPAN GENMASK_ULL(4, 0)
+#define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6)
+
+#define STRTAB_STE_DWORDS 8
+#define STRTAB_STE_0_V (1UL << 0)
+#define STRTAB_STE_0_CFG GENMASK_ULL(3, 1)
+#define STRTAB_STE_0_CFG_ABORT 0
+#define STRTAB_STE_0_CFG_BYPASS 4
+#define STRTAB_STE_0_CFG_S1_TRANS 5
+#define STRTAB_STE_0_CFG_S2_TRANS 6
+
+#define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4)
+#define STRTAB_STE_0_S1FMT_LINEAR 0
+#define STRTAB_STE_0_S1FMT_64K_L2 2
+#define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
+#define STRTAB_STE_0_S1CDMAX GENMASK_ULL(63, 59)
+
+#define STRTAB_STE_1_S1DSS GENMASK_ULL(1, 0)
+#define STRTAB_STE_1_S1DSS_TERMINATE 0x0
+#define STRTAB_STE_1_S1DSS_BYPASS 0x1
+#define STRTAB_STE_1_S1DSS_SSID0 0x2
+
+#define STRTAB_STE_1_S1C_CACHE_NC 0UL
+#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
+#define STRTAB_STE_1_S1C_CACHE_WT 2UL
+#define STRTAB_STE_1_S1C_CACHE_WB 3UL
+#define STRTAB_STE_1_S1CIR GENMASK_ULL(3, 2)
+#define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
+#define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
+
+#define STRTAB_STE_1_S1STALLD (1UL << 27)
+
+#define STRTAB_STE_1_EATS GENMASK_ULL(29, 28)
+#define STRTAB_STE_1_EATS_ABT 0UL
+#define STRTAB_STE_1_EATS_TRANS 1UL
+#define STRTAB_STE_1_EATS_S1CHK 2UL
+
+#define STRTAB_STE_1_STRW GENMASK_ULL(31, 30)
+#define STRTAB_STE_1_STRW_NSEL1 0UL
+#define STRTAB_STE_1_STRW_EL2 2UL
+
+#define STRTAB_STE_1_SHCFG GENMASK_ULL(45, 44)
+#define STRTAB_STE_1_SHCFG_INCOMING 1UL
+
+#define STRTAB_STE_2_S2VMID GENMASK_ULL(15, 0)
+#define STRTAB_STE_2_VTCR GENMASK_ULL(50, 32)
+#define STRTAB_STE_2_VTCR_S2T0SZ GENMASK_ULL(5, 0)
+#define STRTAB_STE_2_VTCR_S2SL0 GENMASK_ULL(7, 6)
+#define STRTAB_STE_2_VTCR_S2IR0 GENMASK_ULL(9, 8)
+#define STRTAB_STE_2_VTCR_S2OR0 GENMASK_ULL(11, 10)
+#define STRTAB_STE_2_VTCR_S2SH0 GENMASK_ULL(13, 12)
+#define STRTAB_STE_2_VTCR_S2TG GENMASK_ULL(15, 14)
+#define STRTAB_STE_2_VTCR_S2PS GENMASK_ULL(18, 16)
+#define STRTAB_STE_2_S2AA64 (1UL << 51)
+#define STRTAB_STE_2_S2ENDI (1UL << 52)
+#define STRTAB_STE_2_S2PTW (1UL << 54)
+#define STRTAB_STE_2_S2R (1UL << 58)
+
+#define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4)
+
+/*
+ * Context descriptors.
+ *
+ * Linear: when less than 1024 SSIDs are supported
+ * 2lvl: at most 1024 L1 entries,
+ * 1024 lazy entries per table.
+ */
+#define CTXDESC_SPLIT 10
+#define CTXDESC_L2_ENTRIES (1 << CTXDESC_SPLIT)
+
+#define CTXDESC_L1_DESC_DWORDS 1
+#define CTXDESC_L1_DESC_V (1UL << 0)
+#define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12)
+
+#define CTXDESC_CD_DWORDS 8
+#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
+#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
+#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
+#define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
+#define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
+#define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14)
+#define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30)
+
+#define CTXDESC_CD_0_ENDI (1UL << 15)
+#define CTXDESC_CD_0_V (1UL << 31)
+
+#define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32)
+#define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38)
+
+#define CTXDESC_CD_0_AA64 (1UL << 41)
+#define CTXDESC_CD_0_S (1UL << 44)
+#define CTXDESC_CD_0_R (1UL << 45)
+#define CTXDESC_CD_0_A (1UL << 46)
+#define CTXDESC_CD_0_ASET (1UL << 47)
+#define CTXDESC_CD_0_ASID GENMASK_ULL(63, 48)
+
+#define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
+
+/*
+ * When the SMMU only supports linear context descriptor tables, pick a
+ * reasonable size limit (64kB).
+ */
+#define CTXDESC_LINEAR_CDMAX ilog2(SZ_64K / (CTXDESC_CD_DWORDS << 3))
+
+/* Command queue */
+#define CMDQ_ENT_SZ_SHIFT 4
+#define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
+#define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)
+
+#define CMDQ_CONS_ERR GENMASK(30, 24)
+#define CMDQ_ERR_CERROR_NONE_IDX 0
+#define CMDQ_ERR_CERROR_ILL_IDX 1
+#define CMDQ_ERR_CERROR_ABT_IDX 2
+#define CMDQ_ERR_CERROR_ATC_INV_IDX 3
+
+#define CMDQ_PROD_OWNED_FLAG Q_OVERFLOW_FLAG
+
+/*
+ * This is used to size the command queue and therefore must be at least
+ * BITS_PER_LONG so that the valid_map works correctly (it relies on the
+ * total number of queue entries being a multiple of BITS_PER_LONG).
+ */
+#define CMDQ_BATCH_ENTRIES BITS_PER_LONG
+
+#define CMDQ_0_OP GENMASK_ULL(7, 0)
+#define CMDQ_0_SSV (1UL << 11)
+
+#define CMDQ_PREFETCH_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0)
+#define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12)
+
+#define CMDQ_CFGI_0_SSID GENMASK_ULL(31, 12)
+#define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_CFGI_1_LEAF (1UL << 0)
+#define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
+
+#define CMDQ_TLBI_0_NUM GENMASK_ULL(16, 12)
+#define CMDQ_TLBI_RANGE_NUM_MAX 31
+#define CMDQ_TLBI_0_SCALE GENMASK_ULL(24, 20)
+#define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
+#define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
+#define CMDQ_TLBI_1_LEAF (1UL << 0)
+#define CMDQ_TLBI_1_TTL GENMASK_ULL(9, 8)
+#define CMDQ_TLBI_1_TG GENMASK_ULL(11, 10)
+#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
+#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
+
+#define CMDQ_ATC_0_SSID GENMASK_ULL(31, 12)
+#define CMDQ_ATC_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_ATC_0_GLOBAL (1UL << 9)
+#define CMDQ_ATC_1_SIZE GENMASK_ULL(5, 0)
+#define CMDQ_ATC_1_ADDR_MASK GENMASK_ULL(63, 12)
+
+#define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
+#define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
+#define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
+#define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12)
+
+#define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12)
+#define CMDQ_SYNC_0_CS_NONE 0
+#define CMDQ_SYNC_0_CS_IRQ 1
+#define CMDQ_SYNC_0_CS_SEV 2
+#define CMDQ_SYNC_0_MSH GENMASK_ULL(23, 22)
+#define CMDQ_SYNC_0_MSIATTR GENMASK_ULL(27, 24)
+#define CMDQ_SYNC_0_MSIDATA GENMASK_ULL(63, 32)
+#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2)
+
+/* Event queue */
+#define EVTQ_ENT_SZ_SHIFT 5
+#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
+#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
+
+#define EVTQ_0_ID GENMASK_ULL(7, 0)
+
+/* PRI queue */
+#define PRIQ_ENT_SZ_SHIFT 4
+#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
+#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
+
+#define PRIQ_0_SID GENMASK_ULL(31, 0)
+#define PRIQ_0_SSID GENMASK_ULL(51, 32)
+#define PRIQ_0_PERM_PRIV (1UL << 58)
+#define PRIQ_0_PERM_EXEC (1UL << 59)
+#define PRIQ_0_PERM_READ (1UL << 60)
+#define PRIQ_0_PERM_WRITE (1UL << 61)
+#define PRIQ_0_PRG_LAST (1UL << 62)
+#define PRIQ_0_SSID_V (1UL << 63)
+
+#define PRIQ_1_PRG_IDX GENMASK_ULL(8, 0)
+#define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12)
+
+/* High-level queue structures */
+#define ARM_SMMU_POLL_TIMEOUT_US 1000000 /* 1s! */
+#define ARM_SMMU_POLL_SPIN_COUNT 10
+
+#define MSI_IOVA_BASE 0x8000000
+#define MSI_IOVA_LENGTH 0x100000
+
+enum pri_resp {
+ PRI_RESP_DENY = 0,
+ PRI_RESP_FAIL = 1,
+ PRI_RESP_SUCC = 2,
+};
+
+struct arm_smmu_cmdq_ent {
+ /* Common fields */
+ u8 opcode;
+ bool substream_valid;
+
+ /* Command-specific fields */
+ union {
+ #define CMDQ_OP_PREFETCH_CFG 0x1
+ struct {
+ u32 sid;
+ u8 size;
+ u64 addr;
+ } prefetch;
+
+ #define CMDQ_OP_CFGI_STE 0x3
+ #define CMDQ_OP_CFGI_ALL 0x4
+ #define CMDQ_OP_CFGI_CD 0x5
+ #define CMDQ_OP_CFGI_CD_ALL 0x6
+ struct {
+ u32 sid;
+ u32 ssid;
+ union {
+ bool leaf;
+ u8 span;
+ };
+ } cfgi;
+
+ #define CMDQ_OP_TLBI_NH_ASID 0x11
+ #define CMDQ_OP_TLBI_NH_VA 0x12
+ #define CMDQ_OP_TLBI_EL2_ALL 0x20
+ #define CMDQ_OP_TLBI_S12_VMALL 0x28
+ #define CMDQ_OP_TLBI_S2_IPA 0x2a
+ #define CMDQ_OP_TLBI_NSNH_ALL 0x30
+ struct {
+ u8 num;
+ u8 scale;
+ u16 asid;
+ u16 vmid;
+ bool leaf;
+ u8 ttl;
+ u8 tg;
+ u64 addr;
+ } tlbi;
+
+ #define CMDQ_OP_ATC_INV 0x40
+ #define ATC_INV_SIZE_ALL 52
+ struct {
+ u32 sid;
+ u32 ssid;
+ u64 addr;
+ u8 size;
+ bool global;
+ } atc;
+
+ #define CMDQ_OP_PRI_RESP 0x41
+ struct {
+ u32 sid;
+ u32 ssid;
+ u16 grpid;
+ enum pri_resp resp;
+ } pri;
+
+ #define CMDQ_OP_CMD_SYNC 0x46
+ struct {
+ u64 msiaddr;
+ } sync;
+ };
+};
+
+struct arm_smmu_ll_queue {
+ union {
+ u64 val;
+ struct {
+ u32 prod;
+ u32 cons;
+ };
+ struct {
+ atomic_t prod;
+ atomic_t cons;
+ } atomic;
+ u8 __pad[SMP_CACHE_BYTES];
+ } ____cacheline_aligned_in_smp;
+ u32 max_n_shift;
+};
+
+struct arm_smmu_queue {
+ struct arm_smmu_ll_queue llq;
+ int irq; /* Wired interrupt */
+
+ __le64 *base;
+ dma_addr_t base_dma;
+ u64 q_base;
+
+ size_t ent_dwords;
+
+ u32 __iomem *prod_reg;
+ u32 __iomem *cons_reg;
+};
+
+struct arm_smmu_queue_poll {
+ ktime_t timeout;
+ unsigned int delay;
+ unsigned int spin_cnt;
+ bool wfe;
+};
+
+struct arm_smmu_cmdq {
+ struct arm_smmu_queue q;
+ atomic_long_t *valid_map;
+ atomic_t owner_prod;
+ atomic_t lock;
+};
+
+struct arm_smmu_cmdq_batch {
+ u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
+ int num;
+};
+
+struct arm_smmu_evtq {
+ struct arm_smmu_queue q;
+ u32 max_stalls;
+};
+
+struct arm_smmu_priq {
+ struct arm_smmu_queue q;
+};
+
+/* High-level stream table and context descriptor structures */
+struct arm_smmu_strtab_l1_desc {
+ u8 span;
+
+ __le64 *l2ptr;
+ dma_addr_t l2ptr_dma;
+};
+
+struct arm_smmu_ctx_desc {
+ u16 asid;
+ u64 ttbr;
+ u64 tcr;
+ u64 mair;
+
+ refcount_t refs;
+ struct mm_struct *mm;
+};
+
+struct arm_smmu_l1_ctx_desc {
+ __le64 *l2ptr;
+ dma_addr_t l2ptr_dma;
+};
+
+struct arm_smmu_ctx_desc_cfg {
+ __le64 *cdtab;
+ dma_addr_t cdtab_dma;
+ struct arm_smmu_l1_ctx_desc *l1_desc;
+ unsigned int num_l1_ents;
+};
+
+struct arm_smmu_s1_cfg {
+ struct arm_smmu_ctx_desc_cfg cdcfg;
+ struct arm_smmu_ctx_desc cd;
+ u8 s1fmt;
+ u8 s1cdmax;
+};
+
+struct arm_smmu_s2_cfg {
+ u16 vmid;
+ u64 vttbr;
+ u64 vtcr;
+};
+
+struct arm_smmu_strtab_cfg {
+ __le64 *strtab;
+ dma_addr_t strtab_dma;
+ struct arm_smmu_strtab_l1_desc *l1_desc;
+ unsigned int num_l1_ents;
+
+ u64 strtab_base;
+ u32 strtab_base_cfg;
+};
+
+/* An SMMUv3 instance */
+struct arm_smmu_device {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *page1;
+
+#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
+#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
+#define ARM_SMMU_FEAT_TT_LE (1 << 2)
+#define ARM_SMMU_FEAT_TT_BE (1 << 3)
+#define ARM_SMMU_FEAT_PRI (1 << 4)
+#define ARM_SMMU_FEAT_ATS (1 << 5)
+#define ARM_SMMU_FEAT_SEV (1 << 6)
+#define ARM_SMMU_FEAT_MSI (1 << 7)
+#define ARM_SMMU_FEAT_COHERENCY (1 << 8)
+#define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
+#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
+#define ARM_SMMU_FEAT_STALLS (1 << 11)
+#define ARM_SMMU_FEAT_HYP (1 << 12)
+#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
+#define ARM_SMMU_FEAT_VAX (1 << 14)
+#define ARM_SMMU_FEAT_RANGE_INV (1 << 15)
+#define ARM_SMMU_FEAT_BTM (1 << 16)
+#define ARM_SMMU_FEAT_SVA (1 << 17)
+ u32 features;
+
+#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
+#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
+#define ARM_SMMU_OPT_MSIPOLL (1 << 2)
+ u32 options;
+
+ struct arm_smmu_cmdq cmdq;
+ struct arm_smmu_evtq evtq;
+ struct arm_smmu_priq priq;
+
+ int gerr_irq;
+ int combined_irq;
+
+ unsigned long ias; /* IPA */
+ unsigned long oas; /* PA */
+ unsigned long pgsize_bitmap;
+
+#define ARM_SMMU_MAX_ASIDS (1 << 16)
+ unsigned int asid_bits;
+
+#define ARM_SMMU_MAX_VMIDS (1 << 16)
+ unsigned int vmid_bits;
+ DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
+
+ unsigned int ssid_bits;
+ unsigned int sid_bits;
+
+ struct arm_smmu_strtab_cfg strtab_cfg;
+
+ /* IOMMU core code handle */
+ struct iommu_device iommu;
+};
+
+/* SMMU private data for each master */
+struct arm_smmu_master {
+ struct arm_smmu_device *smmu;
+ struct device *dev;
+ struct arm_smmu_domain *domain;
+ struct list_head domain_head;
+ u32 *sids;
+ unsigned int num_sids;
+ bool ats_enabled;
+ bool sva_enabled;
+ struct list_head bonds;
+ unsigned int ssid_bits;
+};
+
+/* SMMU private data for an IOMMU domain */
+enum arm_smmu_domain_stage {
+ ARM_SMMU_DOMAIN_S1 = 0,
+ ARM_SMMU_DOMAIN_S2,
+ ARM_SMMU_DOMAIN_NESTED,
+ ARM_SMMU_DOMAIN_BYPASS,
+};
+
+struct arm_smmu_domain {
+ struct arm_smmu_device *smmu;
+ struct mutex init_mutex; /* Protects smmu pointer */
+
+ struct io_pgtable_ops *pgtbl_ops;
+ bool non_strict;
+ atomic_t nr_ats_masters;
+
+ enum arm_smmu_domain_stage stage;
+ union {
+ struct arm_smmu_s1_cfg s1_cfg;
+ struct arm_smmu_s2_cfg s2_cfg;
+ };
+
+ struct iommu_domain domain;
+
+ struct list_head devices;
+ spinlock_t devices_lock;
+};
+
+extern struct xarray arm_smmu_asid_xa;
+extern struct mutex arm_smmu_asid_lock;
+
+int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
+ struct arm_smmu_ctx_desc *cd);
+void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
+bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
+
+#ifdef CONFIG_ARM_SMMU_V3_SVA
+bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
+bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
+bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
+int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
+int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
+#else /* CONFIG_ARM_SMMU_V3_SVA */
+static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
+{
+ return false;
+}
+
+static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
+{
+ return false;
+}
+
+static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
+{
+ return false;
+}
+
+static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
+{
+ return -ENODEV;
+}
+
+static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_ARM_SMMU_V3_SVA */
+#endif /* _ARM_SMMU_V3_H */
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index f4ff124a1967..88f17cc33023 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -68,7 +68,8 @@ static int cavium_cfg_probe(struct arm_smmu_device *smmu)
return 0;
}
-static int cavium_init_context(struct arm_smmu_domain *smmu_domain)
+static int cavium_init_context(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
struct cavium_smmu *cs = container_of(smmu_domain->smmu,
struct cavium_smmu, smmu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 09c42af9f31e..dad7fa86fbd4 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -65,41 +65,10 @@ module_param(disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
-struct arm_smmu_s2cr {
- struct iommu_group *group;
- int count;
- enum arm_smmu_s2cr_type type;
- enum arm_smmu_s2cr_privcfg privcfg;
- u8 cbndx;
-};
-
#define s2cr_init_val (struct arm_smmu_s2cr){ \
.type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
}
-struct arm_smmu_smr {
- u16 mask;
- u16 id;
- bool valid;
-};
-
-struct arm_smmu_cb {
- u64 ttbr[2];
- u32 tcr[2];
- u32 mair[2];
- struct arm_smmu_cfg *cfg;
-};
-
-struct arm_smmu_master_cfg {
- struct arm_smmu_device *smmu;
- s16 smendx[];
-};
-#define INVALID_SMENDX -1
-#define cfg_smendx(cfg, fw, i) \
- (i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i])
-#define for_each_cfg_sme(cfg, fw, i, idx) \
- for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i)
-
static bool using_legacy_binding, using_generic_binding;
static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
@@ -234,19 +203,6 @@ static int arm_smmu_register_legacy_master(struct device *dev,
}
#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
-static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
-{
- int idx;
-
- do {
- idx = find_next_zero_bit(map, end, start);
- if (idx == end)
- return -ENOSPC;
- } while (test_and_set_bit(idx, map));
-
- return idx;
-}
-
static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
{
clear_bit(idx, map);
@@ -552,11 +508,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
cb->ttbr[1] = 0;
} else {
- cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
- cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID,
- cfg->asid);
+ cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
+ cfg->asid);
cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
cfg->asid);
+
+ if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+ cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+ else
+ cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
}
} else {
cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
@@ -574,7 +534,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
}
}
-static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
+void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
{
u32 reg;
bool stage1;
@@ -660,8 +620,19 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
}
+static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu,
+ struct device *dev, unsigned int start)
+{
+ if (smmu->impl && smmu->impl->alloc_context_bank)
+ return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
+
+ return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
+}
+
static int arm_smmu_init_domain_context(struct iommu_domain *domain,
- struct arm_smmu_device *smmu)
+ struct arm_smmu_device *smmu,
+ struct device *dev)
{
int irq, start, ret = 0;
unsigned long ias, oas;
@@ -776,10 +747,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ret = -EINVAL;
goto out_unlock;
}
- ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
- smmu->num_context_banks);
- if (ret < 0)
+
+ ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
+ if (ret < 0) {
goto out_unlock;
+ }
+
+ smmu_domain->smmu = smmu;
cfg->cbndx = ret;
if (smmu->version < ARM_SMMU_V2) {
@@ -794,13 +768,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
else
cfg->asid = cfg->cbndx;
- smmu_domain->smmu = smmu;
- if (smmu->impl && smmu->impl->init_context) {
- ret = smmu->impl->init_context(smmu_domain);
- if (ret)
- goto out_unlock;
- }
-
pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
@@ -810,6 +777,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.iommu_dev = smmu->dev,
};
+ if (smmu->impl && smmu->impl->init_context) {
+ ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
+ if (ret)
+ goto out_clear_smmu;
+ }
+
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
@@ -821,7 +794,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
/* Update the domain's page sizes to reflect the page table format */
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
- domain->geometry.aperture_end = (1UL << ias) - 1;
+
+ if (pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
+ domain->geometry.aperture_start = ~0UL << ias;
+ domain->geometry.aperture_end = ~0UL;
+ } else {
+ domain->geometry.aperture_end = (1UL << ias) - 1;
+ }
+
domain->geometry.force_aperture = true;
/* Initialise the context bank with our page table cfg */
@@ -1182,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return ret;
/* Ensure that the domain is finalised */
- ret = arm_smmu_init_domain_context(domain, smmu);
+ ret = arm_smmu_init_domain_context(domain, smmu, dev);
if (ret < 0)
goto rpm_put;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index d890a4a968e8..1a746476927c 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -169,10 +169,12 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_TCR 0x30
#define ARM_SMMU_TCR_EAE BIT(31)
#define ARM_SMMU_TCR_EPD1 BIT(23)
+#define ARM_SMMU_TCR_A1 BIT(22)
#define ARM_SMMU_TCR_TG0 GENMASK(15, 14)
#define ARM_SMMU_TCR_SH0 GENMASK(13, 12)
#define ARM_SMMU_TCR_ORGN0 GENMASK(11, 10)
#define ARM_SMMU_TCR_IRGN0 GENMASK(9, 8)
+#define ARM_SMMU_TCR_EPD0 BIT(7)
#define ARM_SMMU_TCR_T0SZ GENMASK(5, 0)
#define ARM_SMMU_VTCR_RES1 BIT(31)
@@ -254,6 +256,21 @@ enum arm_smmu_implementation {
QCOM_SMMUV2,
};
+struct arm_smmu_s2cr {
+ struct iommu_group *group;
+ int count;
+ enum arm_smmu_s2cr_type type;
+ enum arm_smmu_s2cr_privcfg privcfg;
+ u8 cbndx;
+};
+
+struct arm_smmu_smr {
+ u16 mask;
+ u16 id;
+ bool valid;
+ bool pinned;
+};
+
struct arm_smmu_device {
struct device *dev;
@@ -329,6 +346,13 @@ struct arm_smmu_cfg {
};
#define ARM_SMMU_INVALID_IRPTNDX 0xff
+struct arm_smmu_cb {
+ u64 ttbr[2];
+ u32 tcr[2];
+ u32 mair[2];
+ struct arm_smmu_cfg *cfg;
+};
+
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
@@ -348,23 +372,39 @@ struct arm_smmu_domain {
struct iommu_domain domain;
};
-static inline u32 arm_smmu_lpae_tcr(struct io_pgtable_cfg *cfg)
+struct arm_smmu_master_cfg {
+ struct arm_smmu_device *smmu;
+ s16 smendx[];
+};
+
+static inline u32 arm_smmu_lpae_tcr(const struct io_pgtable_cfg *cfg)
{
- return ARM_SMMU_TCR_EPD1 |
- FIELD_PREP(ARM_SMMU_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) |
- FIELD_PREP(ARM_SMMU_TCR_SH0, cfg->arm_lpae_s1_cfg.tcr.sh) |
- FIELD_PREP(ARM_SMMU_TCR_ORGN0, cfg->arm_lpae_s1_cfg.tcr.orgn) |
- FIELD_PREP(ARM_SMMU_TCR_IRGN0, cfg->arm_lpae_s1_cfg.tcr.irgn) |
- FIELD_PREP(ARM_SMMU_TCR_T0SZ, cfg->arm_lpae_s1_cfg.tcr.tsz);
+ u32 tcr = FIELD_PREP(ARM_SMMU_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) |
+ FIELD_PREP(ARM_SMMU_TCR_SH0, cfg->arm_lpae_s1_cfg.tcr.sh) |
+ FIELD_PREP(ARM_SMMU_TCR_ORGN0, cfg->arm_lpae_s1_cfg.tcr.orgn) |
+ FIELD_PREP(ARM_SMMU_TCR_IRGN0, cfg->arm_lpae_s1_cfg.tcr.irgn) |
+ FIELD_PREP(ARM_SMMU_TCR_T0SZ, cfg->arm_lpae_s1_cfg.tcr.tsz);
+
+ /*
+ * When TTBR1 is selected shift the TCR fields by 16 bits and disable
+ * translation in TTBR0
+ */
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
+ tcr = (tcr << 16) & ~ARM_SMMU_TCR_A1;
+ tcr |= ARM_SMMU_TCR_EPD0;
+ } else
+ tcr |= ARM_SMMU_TCR_EPD1;
+
+ return tcr;
}
-static inline u32 arm_smmu_lpae_tcr2(struct io_pgtable_cfg *cfg)
+static inline u32 arm_smmu_lpae_tcr2(const struct io_pgtable_cfg *cfg)
{
return FIELD_PREP(ARM_SMMU_TCR2_PASIZE, cfg->arm_lpae_s1_cfg.tcr.ips) |
FIELD_PREP(ARM_SMMU_TCR2_SEP, ARM_SMMU_TCR2_SEP_UPSTREAM);
}
-static inline u32 arm_smmu_lpae_vtcr(struct io_pgtable_cfg *cfg)
+static inline u32 arm_smmu_lpae_vtcr(const struct io_pgtable_cfg *cfg)
{
return ARM_SMMU_VTCR_RES1 |
FIELD_PREP(ARM_SMMU_VTCR_PS, cfg->arm_lpae_s2_cfg.vtcr.ps) |
@@ -386,14 +426,37 @@ struct arm_smmu_impl {
u64 val);
int (*cfg_probe)(struct arm_smmu_device *smmu);
int (*reset)(struct arm_smmu_device *smmu);
- int (*init_context)(struct arm_smmu_domain *smmu_domain);
+ int (*init_context)(struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *cfg, struct device *dev);
void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
int status);
int (*def_domain_type)(struct device *dev);
irqreturn_t (*global_fault)(int irq, void *dev);
irqreturn_t (*context_fault)(int irq, void *dev);
+ int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu,
+ struct device *dev, int start);
};
+#define INVALID_SMENDX -1
+#define cfg_smendx(cfg, fw, i) \
+ (i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i])
+#define for_each_cfg_sme(cfg, fw, i, idx) \
+ for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i)
+
+static inline int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
+{
+ int idx;
+
+ do {
+ idx = find_next_zero_bit(map, end, start);
+ if (idx == end)
+ return -ENOSPC;
+ } while (test_and_set_bit(idx, map));
+
+ return idx;
+}
+
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
{
return smmu->base + (n << smmu->pgshift);
@@ -458,6 +521,7 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu);
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
+void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx);
int arm_mmu500_reset(struct arm_smmu_device *smmu);
#endif /* _ARM_SMMU_H */
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index af6bec3ace00..b30d6c966e2c 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -584,8 +584,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
* index into qcom_iommu->ctxs:
*/
if (WARN_ON(asid < 1) ||
- WARN_ON(asid > qcom_iommu->num_ctxs))
+ WARN_ON(asid > qcom_iommu->num_ctxs)) {
+ put_device(&iommu_pdev->dev);
return -EINVAL;
+ }
if (!dev_iommu_priv_get(dev)) {
dev_iommu_priv_set(dev, qcom_iommu);
@@ -594,8 +596,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
* multiple different iommu devices. Multiple context
* banks are ok, but multiple devices are not:
*/
- if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
+ if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) {
+ put_device(&iommu_pdev->dev);
return -EINVAL;
+ }
}
return iommu_fwspec_add_ids(dev, &asid, 1);
@@ -752,7 +756,7 @@ static const struct of_device_id ctx_of_match[] = {
static struct platform_driver qcom_iommu_ctx_driver = {
.driver = {
.name = "qcom-iommu-ctx",
- .of_match_table = of_match_ptr(ctx_of_match),
+ .of_match_table = ctx_of_match,
},
.probe = qcom_iommu_ctx_probe,
.remove = qcom_iommu_ctx_remove,
@@ -915,7 +919,7 @@ static const struct of_device_id qcom_iommu_of_match[] = {
static struct platform_driver qcom_iommu_driver = {
.driver = {
.name = "qcom-iommu",
- .of_match_table = of_match_ptr(qcom_iommu_of_match),
+ .of_match_table = qcom_iommu_of_match,
.pm = &qcom_iommu_pm_ops,
},
.probe = qcom_iommu_device_probe,
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 5141d49a046b..0cbcd3fc3e7e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -10,9 +10,8 @@
#include <linux/acpi_iort.h>
#include <linux/device.h>
-#include <linux/dma-contiguous.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-iommu.h>
-#include <linux/dma-noncoherent.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
@@ -343,8 +342,11 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
- cookie->fq_domain = domain;
- init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
+ if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
+ NULL))
+ pr_warn("iova flush queue initialization failed\n");
+ else
+ cookie->fq_domain = domain;
}
if (!dev)
@@ -471,7 +473,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
WARN_ON(unmapped != size);
if (!cookie->fq_domain)
- iommu_tlb_sync(domain, &iotlb_gather);
+ iommu_iotlb_sync(domain, &iotlb_gather);
iommu_dma_free_iova(cookie, dma_addr, size);
}
@@ -524,6 +526,9 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
/* IOMMU can map any pages, so himem can also be used here */
gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+ /* It makes no sense to muck about with huge pages */
+ gfp &= ~__GFP_COMP;
+
while (count) {
struct page *page = NULL;
unsigned int order_size;
@@ -544,15 +549,9 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
page = alloc_pages_node(nid, alloc_flags, order);
if (!page)
continue;
- if (!order)
- break;
- if (!PageCompound(page)) {
+ if (order)
split_page(page, order);
- break;
- } else if (!split_huge_page(page)) {
- break;
- }
- __free_pages(page, order);
+ break;
}
if (!page) {
__iommu_dma_free_pages(pages, i);
@@ -572,6 +571,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
* @size: Size of buffer in bytes
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
+ * @prot: pgprot_t to use for the remapped mapping
* @attrs: DMA attributes for this allocation
*
* If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
@@ -580,14 +580,14 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
* Return: Mapped virtual address, or NULL on failure.
*/
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
+ unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
- pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
@@ -1030,8 +1030,10 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
gfp |= __GFP_ZERO;
if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
- !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
- return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
+ !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
+ return iommu_dma_alloc_remap(dev, size, handle, gfp,
+ dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
+ }
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
@@ -1052,6 +1054,34 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
return cpu_addr;
}
+#ifdef CONFIG_DMA_REMAP
+static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
+{
+ if (!gfpflags_allow_blocking(gfp)) {
+ struct page *page;
+
+ page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
+ if (!page)
+ return NULL;
+ return page_address(page);
+ }
+
+ return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
+ PAGE_KERNEL, 0);
+}
+
+static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
+{
+ __iommu_dma_unmap(dev, handle, size);
+ __iommu_dma_free(dev, size, cpu_addr);
+}
+#else
+#define iommu_dma_alloc_noncoherent NULL
+#define iommu_dma_free_noncoherent NULL
+#endif /* CONFIG_DMA_REMAP */
+
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
@@ -1120,6 +1150,10 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
static const struct dma_map_ops iommu_dma_ops = {
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
+ .alloc_noncoherent = iommu_dma_alloc_noncoherent,
+ .free_noncoherent = iommu_dma_free_noncoherent,
.mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page,
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index 099a11a35fb9..b9a974d97831 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -1174,7 +1174,7 @@ error:
if (irq != NO_IRQ)
free_irq(irq, data);
- kzfree(data);
+ kfree_sensitive(data);
if (pamu_regs)
iounmap(pamu_regs);
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 8919c1c70b68..e09e2d734c57 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -101,7 +101,7 @@ static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
* in the chip_data and hyperv_irq_remapping_activate()/hyperv_ir_set_
* affinity() set vector and dest_apicid directly into IO-APIC entry.
*/
- irq_data->chip_data = info->ioapic_entry;
+ irq_data->chip_data = info->ioapic.entry;
/*
* Hypver-V IO APIC irq affinity should be in the scope of
@@ -182,9 +182,9 @@ static int __init hyperv_enable_irq_remapping(void)
return IRQ_REMAP_X2APIC_MODE;
}
-static struct irq_domain *hyperv_get_ir_irq_domain(struct irq_alloc_info *info)
+static struct irq_domain *hyperv_get_irq_domain(struct irq_alloc_info *info)
{
- if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC)
+ if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT)
return ioapic_ir_domain;
else
return NULL;
@@ -193,7 +193,7 @@ static struct irq_domain *hyperv_get_ir_irq_domain(struct irq_alloc_info *info)
struct irq_remap_ops hyperv_irq_remap_ops = {
.prepare = hyperv_prepare_irq_remapping,
.enable = hyperv_enable_irq_remapping,
- .get_ir_irq_domain = hyperv_get_ir_irq_domain,
+ .get_irq_domain = hyperv_get_irq_domain,
};
#endif
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 93e6345f3414..b2e804473209 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -316,6 +316,9 @@ static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
if (ret < 0 && dmar_dev_scope_status == 0)
dmar_dev_scope_status = ret;
+ if (ret >= 0)
+ intel_irq_remap_add_device(info);
+
return ret;
}
@@ -330,6 +333,11 @@ static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
dmar_iommu_notify_scope_dev(info);
}
+static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
+{
+ dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&pdev->physfn->dev));
+}
+
static int dmar_pci_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -339,8 +347,20 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
/* Only care about add/remove events for physical functions.
* For VFs we actually do the lookup based on the corresponding
* PF in device_to_iommu() anyway. */
- if (pdev->is_virtfn)
+ if (pdev->is_virtfn) {
+ /*
+ * Ensure that the VF device inherits the irq domain of the
+ * PF device. Ideally the device would inherit the domain
+ * from the bus, but DMAR can have multiple units per bus
+ * which makes this impossible. The VF 'bus' could inherit
+ * from the PF device, but that's yet another x86'sism to
+ * inflict on everybody else.
+ */
+ if (action == BUS_NOTIFY_ADD_DEVICE)
+ vf_inherit_msi_domain(pdev);
return NOTIFY_DONE;
+ }
+
if (action != BUS_NOTIFY_ADD_DEVICE &&
action != BUS_NOTIFY_REMOVED_DEVICE)
return NOTIFY_DONE;
@@ -380,7 +400,7 @@ dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
return NULL;
}
-/**
+/*
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
* present in the platform
@@ -473,7 +493,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
rhsa = (struct acpi_dmar_rhsa *)header;
for_each_drhd_unit(drhd) {
if (drhd->reg_base_addr == rhsa->base_address) {
- int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
+ int node = pxm_to_node(rhsa->proximity_domain);
if (!node_online(node))
node = NUMA_NO_NODE;
@@ -1024,8 +1044,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
- int agaw = 0;
- int msagaw = 0;
+ int agaw = -1;
+ int msagaw = -1;
int err;
if (!drhd->reg_base_addr) {
@@ -1050,17 +1070,28 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
err = -EINVAL;
- agaw = iommu_calculate_agaw(iommu);
- if (agaw < 0) {
- pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
- iommu->seq_id);
- goto err_unmap;
- }
- msagaw = iommu_calculate_max_sagaw(iommu);
- if (msagaw < 0) {
- pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
- iommu->seq_id);
- goto err_unmap;
+ if (cap_sagaw(iommu->cap) == 0) {
+ pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
+ iommu->name);
+ drhd->ignored = 1;
+ }
+
+ if (!drhd->ignored) {
+ agaw = iommu_calculate_agaw(iommu);
+ if (agaw < 0) {
+ pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ iommu->seq_id);
+ drhd->ignored = 1;
+ }
+ }
+ if (!drhd->ignored) {
+ msagaw = iommu_calculate_max_sagaw(iommu);
+ if (msagaw < 0) {
+ pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
+ iommu->seq_id);
+ drhd->ignored = 1;
+ agaw = -1;
+ }
}
iommu->agaw = agaw;
iommu->msagaw = msagaw;
@@ -1087,7 +1118,12 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
raw_spin_lock_init(&iommu->register_lock);
- if (intel_iommu_enabled) {
+ /*
+ * This is only for hotplug; at boot time intel_iommu_enabled won't
+ * be set yet. When intel_iommu_init() runs, it registers the units
+ * present at boot time, then sets intel_iommu_enabled.
+ */
+ if (intel_iommu_enabled && !drhd->ignored) {
err = iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
"%s", iommu->name);
@@ -1117,7 +1153,7 @@ error:
static void free_iommu(struct intel_iommu *iommu)
{
- if (intel_iommu_enabled) {
+ if (intel_iommu_enabled && !iommu->drhd->ignored) {
iommu_device_unregister(&iommu->iommu);
iommu_device_sysfs_remove(&iommu->iommu);
}
@@ -1482,7 +1518,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
}
void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
- u64 granu, int pasid)
+ u64 granu, u32 pasid)
{
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
@@ -1796,7 +1832,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
}
static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
- u8 fault_reason, int pasid, u16 source_id,
+ u8 fault_reason, u32 pasid, u16 source_id,
unsigned long long addr)
{
const char *reason;
@@ -1846,7 +1882,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
u8 fault_reason;
u16 source_id;
u64 guest_addr;
- int type, pasid;
+ u32 pasid;
+ int type;
u32 data;
bool pasid_present;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 2239c211178b..c6622011d493 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -23,7 +23,7 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/dmar.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/mempool.h>
#include <linux/memory.h>
#include <linux/cpu.h>
@@ -37,7 +37,7 @@
#include <linux/dmi.h>
#include <linux/pci-ats.h>
#include <linux/memblock.h>
-#include <linux/dma-contiguous.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-direct.h>
#include <linux/crash_dump.h>
#include <linux/numa.h>
@@ -698,12 +698,47 @@ static int domain_update_iommu_superpage(struct dmar_domain *domain,
return fls(mask);
}
+static int domain_update_device_node(struct dmar_domain *domain)
+{
+ struct device_domain_info *info;
+ int nid = NUMA_NO_NODE;
+
+ assert_spin_locked(&device_domain_lock);
+
+ if (list_empty(&domain->devices))
+ return NUMA_NO_NODE;
+
+ list_for_each_entry(info, &domain->devices, link) {
+ if (!info->dev)
+ continue;
+
+ /*
+ * There could possibly be multiple device numa nodes as devices
+ * within the same domain may sit behind different IOMMUs. There
+ * isn't perfect answer in such situation, so we select first
+ * come first served policy.
+ */
+ nid = dev_to_node(info->dev);
+ if (nid != NUMA_NO_NODE)
+ break;
+ }
+
+ return nid;
+}
+
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
domain->iommu_snooping = domain_update_iommu_snooping(NULL);
domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
+
+ /*
+ * If RHSA is missing, we should default to the device numa domain
+ * as fall back.
+ */
+ if (domain->nid == NUMA_NO_NODE)
+ domain->nid = domain_update_device_node(domain);
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -2490,6 +2525,9 @@ struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
+ if (unlikely(!dev || !dev->iommu))
+ return NULL;
+
if (unlikely(attach_deferred(dev)))
return NULL;
@@ -2527,7 +2565,7 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
static int domain_setup_first_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev,
- int pasid)
+ u32 pasid)
{
int flags = PASID_FLAG_SUPERVISOR_MODE;
struct dma_pte *pgd = domain->pgd;
@@ -3712,6 +3750,8 @@ static const struct dma_map_ops intel_dma_ops = {
.dma_supported = dma_direct_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
.get_required_mask = intel_get_required_mask,
};
@@ -3778,9 +3818,8 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
* page aligned, we don't need to use a bounce page.
*/
if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
- tlb_addr = swiotlb_tbl_map_single(dev,
- __phys_to_dma(dev, io_tlb_start),
- paddr, size, aligned_size, dir, attrs);
+ tlb_addr = swiotlb_tbl_map_single(dev, paddr, size,
+ aligned_size, dir, attrs);
if (tlb_addr == DMA_MAPPING_ERROR) {
goto swiotlb_error;
} else {
@@ -3965,6 +4004,8 @@ static const struct dma_map_ops bounce_dma_ops = {
.sync_sg_for_device = bounce_sync_sg_for_device,
.map_resource = bounce_map_resource,
.unmap_resource = bounce_unmap_resource,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
.dma_supported = dma_direct_supported,
};
@@ -5095,8 +5136,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
if (type == IOMMU_DOMAIN_DMA)
intel_init_iova_domain(dmar_domain);
- domain_update_iommu_cap(dmar_domain);
-
domain = &dmar_domain->domain;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end =
@@ -5173,7 +5212,7 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
return -ENODEV;
if (domain->default_pasid <= 0) {
- int pasid;
+ u32 pasid;
/* No private data needed for the default pasid */
pasid = ioasid_alloc(NULL, PASID_MIN,
@@ -5408,8 +5447,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
int ret = 0;
u64 size = 0;
- if (!inv_info || !dmar_domain ||
- inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
+ if (!inv_info || !dmar_domain)
return -EINVAL;
if (!dev || !dev_is_pci(dev))
@@ -5434,8 +5472,8 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
/* Size is only valid in address selective invalidation */
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
- size = to_vtd_size(inv_info->addr_info.granule_size,
- inv_info->addr_info.nb_granules);
+ size = to_vtd_size(inv_info->granu.addr_info.granule_size,
+ inv_info->granu.addr_info.nb_granules);
for_each_set_bit(cache_type,
(unsigned long *)&inv_info->cache,
@@ -5456,20 +5494,20 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
* granularity.
*/
if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
- (inv_info->pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
- pasid = inv_info->pasid_info.pasid;
+ (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
+ pasid = inv_info->granu.pasid_info.pasid;
else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
- (inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
- pasid = inv_info->addr_info.pasid;
+ (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
+ pasid = inv_info->granu.addr_info.pasid;
switch (BIT(cache_type)) {
case IOMMU_CACHE_INV_TYPE_IOTLB:
/* HW will ignore LSB bits based on address mask */
if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
size &&
- (inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
+ (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
- inv_info->addr_info.addr, size);
+ inv_info->granu.addr_info.addr, size);
}
/*
@@ -5477,9 +5515,9 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
* We use npages = -1 to indicate that.
*/
qi_flush_piotlb(iommu, did, pasid,
- mm_to_dma_pfn(inv_info->addr_info.addr),
+ mm_to_dma_pfn(inv_info->granu.addr_info.addr),
(granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
- inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
+ inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
if (!info->ats_enabled)
break;
@@ -5502,7 +5540,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
size = 64 - VTD_PAGE_SHIFT;
addr = 0;
} else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
- addr = inv_info->addr_info.addr;
+ addr = inv_info->granu.addr_info.addr;
}
if (info->ats_enabled)
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 8f4ce72570ce..0cfce1d3b7bb 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -204,35 +204,40 @@ static int modify_irte(struct irq_2_iommu *irq_iommu,
return rc;
}
-static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
+static struct irq_domain *map_hpet_to_ir(u8 hpet_id)
{
int i;
- for (i = 0; i < MAX_HPET_TBS; i++)
+ for (i = 0; i < MAX_HPET_TBS; i++) {
if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
- return ir_hpet[i].iommu;
+ return ir_hpet[i].iommu->ir_domain;
+ }
return NULL;
}
-static struct intel_iommu *map_ioapic_to_ir(int apic)
+static struct intel_iommu *map_ioapic_to_iommu(int apic)
{
int i;
- for (i = 0; i < MAX_IO_APICS; i++)
+ for (i = 0; i < MAX_IO_APICS; i++) {
if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
return ir_ioapic[i].iommu;
+ }
return NULL;
}
-static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
+static struct irq_domain *map_ioapic_to_ir(int apic)
{
- struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = map_ioapic_to_iommu(apic);
- drhd = dmar_find_matched_drhd_unit(dev);
- if (!drhd)
- return NULL;
+ return iommu ? iommu->ir_domain : NULL;
+}
+
+static struct irq_domain *map_dev_to_ir(struct pci_dev *dev)
+{
+ struct dmar_drhd_unit *drhd = dmar_find_matched_drhd_unit(dev);
- return drhd->iommu;
+ return drhd ? drhd->iommu->ir_msi_domain : NULL;
}
static int clear_entries(struct irq_2_iommu *irq_iommu)
@@ -1002,7 +1007,7 @@ static int __init parse_ioapics_under_ir(void)
for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
int ioapic_id = mpc_ioapic_id(ioapic_idx);
- if (!map_ioapic_to_ir(ioapic_id)) {
+ if (!map_ioapic_to_iommu(ioapic_id)) {
pr_err(FW_BUG "ioapic %d has no mapping iommu, "
"interrupt remapping will be disabled\n",
ioapic_id);
@@ -1087,6 +1092,22 @@ error:
return -1;
}
+/*
+ * Store the MSI remapping domain pointer in the device if enabled.
+ *
+ * This is called from dmar_pci_bus_add_dev() so it works even when DMA
+ * remapping is disabled. Only update the pointer if the device is not
+ * already handled by a non default PCI/MSI interrupt domain. This protects
+ * e.g. VMD devices.
+ */
+void intel_irq_remap_add_device(struct dmar_pci_notify_info *info)
+{
+ if (!irq_remapping_enabled || pci_dev_has_special_msi_domain(info->dev))
+ return;
+
+ dev_set_msi_domain(&info->dev->dev, map_dev_to_ir(info->dev));
+}
+
static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
{
memset(irte, 0, sizeof(*irte));
@@ -1107,51 +1128,20 @@ static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
irte->redir_hint = 1;
}
-static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info)
-{
- struct intel_iommu *iommu = NULL;
-
- if (!info)
- return NULL;
-
- switch (info->type) {
- case X86_IRQ_ALLOC_TYPE_IOAPIC:
- iommu = map_ioapic_to_ir(info->ioapic_id);
- break;
- case X86_IRQ_ALLOC_TYPE_HPET:
- iommu = map_hpet_to_ir(info->hpet_id);
- break;
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
- iommu = map_dev_to_ir(info->msi_dev);
- break;
- default:
- BUG_ON(1);
- break;
- }
-
- return iommu ? iommu->ir_domain : NULL;
-}
-
static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info)
{
- struct intel_iommu *iommu;
-
if (!info)
return NULL;
switch (info->type) {
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
- iommu = map_dev_to_ir(info->msi_dev);
- if (iommu)
- return iommu->ir_msi_domain;
- break;
+ case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT:
+ return map_ioapic_to_ir(info->devid);
+ case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT:
+ return map_hpet_to_ir(info->devid);
default:
- break;
+ WARN_ON_ONCE(1);
+ return NULL;
}
-
- return NULL;
}
struct irq_remap_ops intel_irq_remap_ops = {
@@ -1160,7 +1150,6 @@ struct irq_remap_ops intel_irq_remap_ops = {
.disable = disable_irq_remapping,
.reenable = reenable_irq_remapping,
.enable_faulting = enable_drhd_fault_handling,
- .get_ir_irq_domain = intel_get_ir_irq_domain,
.get_irq_domain = intel_get_irq_domain,
};
@@ -1284,16 +1273,16 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
switch (info->type) {
case X86_IRQ_ALLOC_TYPE_IOAPIC:
/* Set source-id of interrupt request */
- set_ioapic_sid(irte, info->ioapic_id);
+ set_ioapic_sid(irte, info->devid);
apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
- info->ioapic_id, irte->present, irte->fpd,
+ info->devid, irte->present, irte->fpd,
irte->dst_mode, irte->redir_hint,
irte->trigger_mode, irte->dlvry_mode,
irte->avail, irte->vector, irte->dest_id,
irte->sid, irte->sq, irte->svt);
- entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry;
- info->ioapic_entry = NULL;
+ entry = (struct IR_IO_APIC_route_entry *)info->ioapic.entry;
+ info->ioapic.entry = NULL;
memset(entry, 0, sizeof(*entry));
entry->index2 = (index >> 15) & 0x1;
entry->zero = 0;
@@ -1303,21 +1292,21 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
* IO-APIC RTE will be configured with virtual vector.
* irq handler will do the explicit EOI to the io-apic.
*/
- entry->vector = info->ioapic_pin;
+ entry->vector = info->ioapic.pin;
entry->mask = 0; /* enable IRQ */
- entry->trigger = info->ioapic_trigger;
- entry->polarity = info->ioapic_polarity;
- if (info->ioapic_trigger)
+ entry->trigger = info->ioapic.trigger;
+ entry->polarity = info->ioapic.polarity;
+ if (info->ioapic.trigger)
entry->mask = 1; /* Mask level triggered irqs. */
break;
case X86_IRQ_ALLOC_TYPE_HPET:
- case X86_IRQ_ALLOC_TYPE_MSI:
- case X86_IRQ_ALLOC_TYPE_MSIX:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSI:
+ case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
if (info->type == X86_IRQ_ALLOC_TYPE_HPET)
- set_hpet_sid(irte, info->hpet_id);
+ set_hpet_sid(irte, info->devid);
else
- set_msi_sid(irte, info->msi_dev);
+ set_msi_sid(irte, msi_desc_to_pci_dev(info->desc));
msg->address_hi = MSI_ADDR_BASE_HI;
msg->data = sub_handle;
@@ -1368,15 +1357,15 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
if (!info || !iommu)
return -EINVAL;
- if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
- info->type != X86_IRQ_ALLOC_TYPE_MSIX)
+ if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI &&
+ info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX)
return -EINVAL;
/*
* With IRQ remapping enabled, don't need contiguous CPU vectors
* to support multiple MSI interrupts.
*/
- if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
+ if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index e6faedf42fd4..b92af83b79bd 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -27,7 +27,7 @@
static DEFINE_SPINLOCK(pasid_lock);
u32 intel_pasid_max_id = PASID_MAX;
-int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid)
+int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
{
unsigned long flags;
u8 status_code;
@@ -58,7 +58,7 @@ int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid)
return ret;
}
-void vcmd_free_pasid(struct intel_iommu *iommu, unsigned int pasid)
+void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
{
unsigned long flags;
u8 status_code;
@@ -146,7 +146,7 @@ int intel_pasid_alloc_table(struct device *dev)
struct pasid_table *pasid_table;
struct pasid_table_opaque data;
struct page *pages;
- int max_pasid = 0;
+ u32 max_pasid = 0;
int ret, order;
int size;
@@ -168,7 +168,7 @@ int intel_pasid_alloc_table(struct device *dev)
INIT_LIST_HEAD(&pasid_table->dev);
if (info->pasid_supported)
- max_pasid = min_t(int, pci_max_pasids(to_pci_dev(dev)),
+ max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
intel_pasid_max_id);
size = max_pasid >> (PASID_PDE_SHIFT - 3);
@@ -242,7 +242,7 @@ int intel_pasid_get_dev_max_id(struct device *dev)
return info->pasid_table->max_pasid;
}
-struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid)
+struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
{
struct device_domain_info *info;
struct pasid_table *pasid_table;
@@ -251,8 +251,7 @@ struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid)
int dir_index, index;
pasid_table = intel_pasid_get_table(dev);
- if (WARN_ON(!pasid_table || pasid < 0 ||
- pasid >= intel_pasid_get_dev_max_id(dev)))
+ if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev)))
return NULL;
dir = pasid_table->table;
@@ -305,7 +304,7 @@ static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
}
static void
-intel_pasid_clear_entry(struct device *dev, int pasid, bool fault_ignore)
+intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
{
struct pasid_entry *pe;
@@ -444,7 +443,7 @@ pasid_set_eafe(struct pasid_entry *pe)
static void
pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
- u16 did, int pasid)
+ u16 did, u32 pasid)
{
struct qi_desc desc;
@@ -473,7 +472,7 @@ iotlb_invalidation_with_pasid(struct intel_iommu *iommu, u16 did, u32 pasid)
static void
devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
- struct device *dev, int pasid)
+ struct device *dev, u32 pasid)
{
struct device_domain_info *info;
u16 sid, qdep, pfsid;
@@ -499,7 +498,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
}
void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
- int pasid, bool fault_ignore)
+ u32 pasid, bool fault_ignore)
{
struct pasid_entry *pte;
u16 did;
@@ -524,7 +523,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
static void pasid_flush_caches(struct intel_iommu *iommu,
struct pasid_entry *pte,
- int pasid, u16 did)
+ u32 pasid, u16 did)
{
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
@@ -543,7 +542,7 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
*/
int intel_pasid_setup_first_level(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd,
- int pasid, u16 did, int flags)
+ u32 pasid, u16 did, int flags)
{
struct pasid_entry *pte;
@@ -616,7 +615,7 @@ static inline int iommu_skip_agaw(struct dmar_domain *domain,
*/
int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
- struct device *dev, int pasid)
+ struct device *dev, u32 pasid)
{
struct pasid_entry *pte;
struct dma_pte *pgd;
@@ -674,7 +673,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
*/
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct dmar_domain *domain,
- struct device *dev, int pasid)
+ struct device *dev, u32 pasid)
{
u16 did = FLPT_DEFAULT_DID;
struct pasid_entry *pte;
@@ -760,7 +759,7 @@ intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
* @addr_width: Address width of the first level (guest)
*/
int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
- pgd_t *gpgd, int pasid,
+ pgd_t *gpgd, u32 pasid,
struct iommu_gpasid_bind_data_vtd *pasid_data,
struct dmar_domain *domain, int addr_width)
{
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index c9850766c3a9..97dfcffbf495 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -72,7 +72,7 @@ struct pasid_entry {
struct pasid_table {
void *table; /* pasid table pointer */
int order; /* page order of pasid table */
- int max_pasid; /* max pasid */
+ u32 max_pasid; /* max pasid */
struct list_head dev; /* device list */
};
@@ -98,31 +98,31 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
}
-extern u32 intel_pasid_max_id;
+extern unsigned int intel_pasid_max_id;
int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
-void intel_pasid_free_id(int pasid);
-void *intel_pasid_lookup_id(int pasid);
+void intel_pasid_free_id(u32 pasid);
+void *intel_pasid_lookup_id(u32 pasid);
int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev);
struct pasid_table *intel_pasid_get_table(struct device *dev);
int intel_pasid_get_dev_max_id(struct device *dev);
-struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid);
+struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid);
int intel_pasid_setup_first_level(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd,
- int pasid, u16 did, int flags);
+ u32 pasid, u16 did, int flags);
int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
- struct device *dev, int pasid);
+ struct device *dev, u32 pasid);
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct dmar_domain *domain,
- struct device *dev, int pasid);
+ struct device *dev, u32 pasid);
int intel_pasid_setup_nested(struct intel_iommu *iommu,
- struct device *dev, pgd_t *pgd, int pasid,
+ struct device *dev, pgd_t *pgd, u32 pasid,
struct iommu_gpasid_bind_data_vtd *pasid_data,
struct dmar_domain *domain, int addr_width);
void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
- struct device *dev, int pasid,
+ struct device *dev, u32 pasid,
bool fault_ignore);
-int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid);
-void vcmd_free_pasid(struct intel_iommu *iommu, unsigned int pasid);
+int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid);
+void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid);
#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 95c3164a2302..3242ebd0bca3 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -19,11 +19,12 @@
#include <linux/mm_types.h>
#include <linux/ioasid.h>
#include <asm/page.h>
+#include <asm/fpu/api.h>
#include "pasid.h"
static irqreturn_t prq_event_thread(int irq, void *d);
-static void intel_svm_drain_prq(struct device *dev, int pasid);
+static void intel_svm_drain_prq(struct device *dev, u32 pasid);
#define PRQ_ORDER 0
@@ -278,14 +279,22 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev = NULL;
struct dmar_domain *dmar_domain;
+ struct device_domain_info *info;
struct intel_svm *svm = NULL;
int ret = 0;
if (WARN_ON(!iommu) || !data)
return -EINVAL;
- if (data->version != IOMMU_GPASID_BIND_VERSION_1 ||
- data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
+ if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
+ return -EINVAL;
+
+ /* IOMMU core ensures argsz is more than the start of the union */
+ if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd))
+ return -EINVAL;
+
+ /* Make sure no undefined flags are used in vendor data */
+ if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1))
return -EINVAL;
if (!dev_is_pci(dev))
@@ -302,6 +311,10 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
return -EINVAL;
+ info = get_domain_info(dev);
+ if (!info)
+ return -EINVAL;
+
dmar_domain = to_dmar_domain(domain);
mutex_lock(&pasid_mutex);
@@ -349,6 +362,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
goto out;
}
sdev->dev = dev;
+ sdev->sid = PCI_DEVID(info->bus, info->devfn);
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@@ -370,7 +384,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
spin_lock(&iommu->lock);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
- data->hpasid, &data->vtd, dmar_domain,
+ data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
spin_unlock(&iommu->lock);
if (ret) {
@@ -399,7 +413,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
return ret;
}
-int intel_svm_unbind_gpasid(struct device *dev, int pasid)
+int intel_svm_unbind_gpasid(struct device *dev, u32 pasid)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev;
@@ -444,9 +458,28 @@ out:
return ret;
}
+static void _load_pasid(void *unused)
+{
+ update_pasid();
+}
+
+static void load_pasid(struct mm_struct *mm, u32 pasid)
+{
+ mutex_lock(&mm->context.lock);
+
+ /* Synchronize with READ_ONCE in update_pasid(). */
+ smp_store_release(&mm->pasid, pasid);
+
+ /* Update PASID MSR on all CPUs running the mm's tasks. */
+ on_each_cpu_mask(mm_cpumask(mm), _load_pasid, NULL, true);
+
+ mutex_unlock(&mm->context.lock);
+}
+
/* Caller must hold pasid_mutex, mm reference */
static int
-intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops,
+intel_svm_bind_mm(struct device *dev, unsigned int flags,
+ struct svm_dev_ops *ops,
struct mm_struct *mm, struct intel_svm_dev **sd)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
@@ -590,6 +623,10 @@ intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops,
}
list_add_tail(&svm->list, &global_svm_list);
+ if (mm) {
+ /* The newly allocated pasid is loaded to the mm. */
+ load_pasid(mm, svm->pasid);
+ }
} else {
/*
* Binding a new device with existing PASID, need to setup
@@ -620,7 +657,7 @@ out:
}
/* Caller must hold pasid_mutex */
-static int intel_svm_unbind_mm(struct device *dev, int pasid)
+static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
{
struct intel_svm_dev *sdev;
struct intel_iommu *iommu;
@@ -653,8 +690,11 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid)
if (list_empty(&svm->devs)) {
ioasid_free(svm->pasid);
- if (svm->mm)
+ if (svm->mm) {
mmu_notifier_unregister(&svm->notifier, svm->mm);
+ /* Clear mm's pasid. */
+ load_pasid(svm->mm, PASID_DISABLED);
+ }
list_del(&svm->list);
/* We mandate that no page faults may be outstanding
* for the PASID when intel_svm_unbind_mm() is called.
@@ -739,7 +779,7 @@ static bool is_canonical_address(u64 addr)
* described in VT-d spec CH7.10 to drain all page requests and page
* responses pending in the hardware.
*/
-static void intel_svm_drain_prq(struct device *dev, int pasid)
+static void intel_svm_drain_prq(struct device *dev, u32 pasid)
{
struct device_domain_info *info;
struct dmar_domain *domain;
@@ -995,7 +1035,7 @@ no_pasid:
resp.qw0 = QI_PGRP_PASID(req->pasid) |
QI_PGRP_DID(req->rid) |
QI_PGRP_PASID_P(req->pasid_present) |
- QI_PGRP_PDP(req->pasid_present) |
+ QI_PGRP_PDP(req->priv_data_present) |
QI_PGRP_RESP_CODE(result) |
QI_PGRP_RESP_TYPE;
resp.qw1 = QI_PGRP_IDX(req->prg_index) |
@@ -1033,7 +1073,7 @@ intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
{
struct iommu_sva *sva = ERR_PTR(-EINVAL);
struct intel_svm_dev *sdev = NULL;
- int flags = 0;
+ unsigned int flags = 0;
int ret;
/*
@@ -1042,7 +1082,7 @@ intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
* and intel_svm etc.
*/
if (drvdata)
- flags = *(int *)drvdata;
+ flags = *(unsigned int *)drvdata;
mutex_lock(&pasid_mutex);
ret = intel_svm_bind_mm(dev, flags, NULL, mm, &sdev);
if (ret)
@@ -1067,10 +1107,10 @@ void intel_svm_unbind(struct iommu_sva *sva)
mutex_unlock(&pasid_mutex);
}
-int intel_svm_get_pasid(struct iommu_sva *sva)
+u32 intel_svm_get_pasid(struct iommu_sva *sva)
{
struct intel_svm_dev *sdev;
- int pasid;
+ u32 pasid;
mutex_lock(&pasid_mutex);
sdev = to_intel_svm_dev(sva);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index dc7bcf858b6d..a7a9bc08dcd1 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -20,6 +20,8 @@
#include <asm/barrier.h>
+#include "io-pgtable-arm.h"
+
#define ARM_LPAE_MAX_ADDR_BITS 52
#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
#define ARM_LPAE_MAX_LEVELS 4
@@ -100,23 +102,6 @@
#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
/* Register bits */
-#define ARM_LPAE_TCR_TG0_4K 0
-#define ARM_LPAE_TCR_TG0_64K 1
-#define ARM_LPAE_TCR_TG0_16K 2
-
-#define ARM_LPAE_TCR_TG1_16K 1
-#define ARM_LPAE_TCR_TG1_4K 2
-#define ARM_LPAE_TCR_TG1_64K 3
-
-#define ARM_LPAE_TCR_SH_NS 0
-#define ARM_LPAE_TCR_SH_OS 2
-#define ARM_LPAE_TCR_SH_IS 3
-
-#define ARM_LPAE_TCR_RGN_NC 0
-#define ARM_LPAE_TCR_RGN_WBWA 1
-#define ARM_LPAE_TCR_RGN_WT 2
-#define ARM_LPAE_TCR_RGN_WB 3
-
#define ARM_LPAE_VTCR_SL0_MASK 0x3
#define ARM_LPAE_TCR_T0SZ_SHIFT 0
@@ -124,14 +109,6 @@
#define ARM_LPAE_VTCR_PS_SHIFT 16
#define ARM_LPAE_VTCR_PS_MASK 0x7
-#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
-#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
-#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
-#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
-#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
-#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
-#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
-
#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
@@ -751,11 +728,6 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
return NULL;
- if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
- dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
- return NULL;
- }
-
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
diff --git a/drivers/iommu/io-pgtable-arm.h b/drivers/iommu/io-pgtable-arm.h
new file mode 100644
index 000000000000..ba7cfdf7afa0
--- /dev/null
+++ b/drivers/iommu/io-pgtable-arm.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef IO_PGTABLE_ARM_H_
+#define IO_PGTABLE_ARM_H_
+
+#define ARM_LPAE_TCR_TG0_4K 0
+#define ARM_LPAE_TCR_TG0_64K 1
+#define ARM_LPAE_TCR_TG0_16K 2
+
+#define ARM_LPAE_TCR_TG1_16K 1
+#define ARM_LPAE_TCR_TG1_4K 2
+#define ARM_LPAE_TCR_TG1_64K 3
+
+#define ARM_LPAE_TCR_SH_NS 0
+#define ARM_LPAE_TCR_SH_OS 2
+#define ARM_LPAE_TCR_SH_IS 3
+
+#define ARM_LPAE_TCR_RGN_NC 0
+#define ARM_LPAE_TCR_RGN_WBWA 1
+#define ARM_LPAE_TCR_RGN_WT 2
+#define ARM_LPAE_TCR_RGN_WB 3
+
+#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
+#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
+#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
+#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
+#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
+#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
+#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
+
+#endif /* IO_PGTABLE_ARM_H_ */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 609bd25bf154..b53446bb8c6b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -762,7 +762,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
}
- iommu_flush_tlb_all(domain);
+ iommu_flush_iotlb_all(domain);
out:
iommu_put_resv_regions(dev, &mappings);
@@ -1961,25 +1961,188 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
-int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
- struct iommu_cache_invalidate_info *inv_info)
+/*
+ * Check flags and other user provided data for valid combinations. We also
+ * make sure no reserved fields or unused flags are set. This is to ensure
+ * not breaking userspace in the future when these fields or flags are used.
+ */
+static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
{
+ u32 mask;
+ int i;
+
+ if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
+ return -EINVAL;
+
+ mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
+ if (info->cache & ~mask)
+ return -EINVAL;
+
+ if (info->granularity >= IOMMU_INV_GRANU_NR)
+ return -EINVAL;
+
+ switch (info->granularity) {
+ case IOMMU_INV_GRANU_ADDR:
+ if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
+ return -EINVAL;
+
+ mask = IOMMU_INV_ADDR_FLAGS_PASID |
+ IOMMU_INV_ADDR_FLAGS_ARCHID |
+ IOMMU_INV_ADDR_FLAGS_LEAF;
+
+ if (info->granu.addr_info.flags & ~mask)
+ return -EINVAL;
+ break;
+ case IOMMU_INV_GRANU_PASID:
+ mask = IOMMU_INV_PASID_FLAGS_PASID |
+ IOMMU_INV_PASID_FLAGS_ARCHID;
+ if (info->granu.pasid_info.flags & ~mask)
+ return -EINVAL;
+
+ break;
+ case IOMMU_INV_GRANU_DOMAIN:
+ if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Check reserved padding fields */
+ for (i = 0; i < sizeof(info->padding); i++) {
+ if (info->padding[i])
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
+ void __user *uinfo)
+{
+ struct iommu_cache_invalidate_info inv_info = { 0 };
+ u32 minsz;
+ int ret;
+
if (unlikely(!domain->ops->cache_invalidate))
return -ENODEV;
- return domain->ops->cache_invalidate(domain, dev, inv_info);
+ /*
+ * No new spaces can be added before the variable sized union, the
+ * minimum size is the offset to the union.
+ */
+ minsz = offsetof(struct iommu_cache_invalidate_info, granu);
+
+ /* Copy minsz from user to get flags and argsz */
+ if (copy_from_user(&inv_info, uinfo, minsz))
+ return -EFAULT;
+
+ /* Fields before the variable size union are mandatory */
+ if (inv_info.argsz < minsz)
+ return -EINVAL;
+
+ /* PASID and address granu require additional info beyond minsz */
+ if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
+ inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
+ return -EINVAL;
+
+ if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
+ inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
+ return -EINVAL;
+
+ /*
+ * User might be using a newer UAPI header which has a larger data
+ * size, we shall support the existing flags within the current
+ * size. Copy the remaining user data _after_ minsz but not more
+ * than the current kernel supported size.
+ */
+ if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
+ min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
+ return -EFAULT;
+
+ /* Now the argsz is validated, check the content */
+ ret = iommu_check_cache_invl_data(&inv_info);
+ if (ret)
+ return ret;
+
+ return domain->ops->cache_invalidate(domain, dev, &inv_info);
}
-EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
+EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
-int iommu_sva_bind_gpasid(struct iommu_domain *domain,
- struct device *dev, struct iommu_gpasid_bind_data *data)
+static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
{
+ u64 mask;
+ int i;
+
+ if (data->version != IOMMU_GPASID_BIND_VERSION_1)
+ return -EINVAL;
+
+ /* Check the range of supported formats */
+ if (data->format >= IOMMU_PASID_FORMAT_LAST)
+ return -EINVAL;
+
+ /* Check all flags */
+ mask = IOMMU_SVA_GPASID_VAL;
+ if (data->flags & ~mask)
+ return -EINVAL;
+
+ /* Check reserved padding fields */
+ for (i = 0; i < sizeof(data->padding); i++) {
+ if (data->padding[i])
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iommu_sva_prepare_bind_data(void __user *udata,
+ struct iommu_gpasid_bind_data *data)
+{
+ u32 minsz;
+
+ /*
+ * No new spaces can be added before the variable sized union, the
+ * minimum size is the offset to the union.
+ */
+ minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
+
+ /* Copy minsz from user to get flags and argsz */
+ if (copy_from_user(data, udata, minsz))
+ return -EFAULT;
+
+ /* Fields before the variable size union are mandatory */
+ if (data->argsz < minsz)
+ return -EINVAL;
+ /*
+ * User might be using a newer UAPI header, we shall let IOMMU vendor
+ * driver decide on what size it needs. Since the guest PASID bind data
+ * can be vendor specific, larger argsz could be the result of extension
+ * for one vendor but it should not affect another vendor.
+ * Copy the remaining user data _after_ minsz
+ */
+ if (copy_from_user((void *)data + minsz, udata + minsz,
+ min_t(u32, data->argsz, sizeof(*data)) - minsz))
+ return -EFAULT;
+
+ return iommu_check_bind_data(data);
+}
+
+int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
+ void __user *udata)
+{
+ struct iommu_gpasid_bind_data data = { 0 };
+ int ret;
+
if (unlikely(!domain->ops->sva_bind_gpasid))
return -ENODEV;
- return domain->ops->sva_bind_gpasid(domain, dev, data);
+ ret = iommu_sva_prepare_bind_data(udata, &data);
+ if (ret)
+ return ret;
+
+ return domain->ops->sva_bind_gpasid(domain, dev, &data);
}
-EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
+EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
ioasid_t pasid)
@@ -1991,6 +2154,23 @@ int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
+int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
+ void __user *udata)
+{
+ struct iommu_gpasid_bind_data data = { 0 };
+ int ret;
+
+ if (unlikely(!domain->ops->sva_bind_gpasid))
+ return -ENODEV;
+
+ ret = iommu_sva_prepare_bind_data(udata, &data);
+ if (ret)
+ return ret;
+
+ return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
+}
+EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
+
static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -2316,7 +2496,7 @@ size_t iommu_unmap(struct iommu_domain *domain,
iommu_iotlb_gather_init(&iotlb_gather);
ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
- iommu_tlb_sync(domain, &iotlb_gather);
+ iommu_iotlb_sync(domain, &iotlb_gather);
return ret;
}
@@ -2839,7 +3019,7 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
-int iommu_sva_get_pasid(struct iommu_sva *handle)
+u32 iommu_sva_get_pasid(struct iommu_sva *handle)
{
const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 45a251da5453..30d969a4c5fd 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -579,7 +579,7 @@ void queue_iova(struct iova_domain *iovad,
/* Avoid false sharing as much as possible. */
if (!atomic_read(&iovad->fq_timer_on) &&
- !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
+ !atomic_xchg(&iovad->fq_timer_on, 1))
mod_timer(&iovad->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
}
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 83f36f61416e..2d84b1ed205e 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -160,33 +160,12 @@ void panic_if_irq_remap(const char *msg)
}
/**
- * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU
- * device serving request @info
- * @info: interrupt allocation information, used to identify the IOMMU device
- *
- * It's used to get parent irqdomain for HPET and IOAPIC irqdomains.
- * Returns pointer to IRQ domain, or NULL on failure.
- */
-struct irq_domain *
-irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info)
-{
- if (!remap_ops || !remap_ops->get_ir_irq_domain)
- return NULL;
-
- return remap_ops->get_ir_irq_domain(info);
-}
-
-/**
* irq_remapping_get_irq_domain - Get the irqdomain serving the request @info
* @info: interrupt allocation information, used to identify the IOMMU device
*
- * There will be one PCI MSI/MSIX irqdomain associated with each interrupt
- * remapping device, so this interface is used to retrieve the PCI MSI/MSIX
- * irqdomain serving request @info.
* Returns pointer to IRQ domain, or NULL on failure.
*/
-struct irq_domain *
-irq_remapping_get_irq_domain(struct irq_alloc_info *info)
+struct irq_domain *irq_remapping_get_irq_domain(struct irq_alloc_info *info)
{
if (!remap_ops || !remap_ops->get_irq_domain)
return NULL;
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index 6a190d504eb6..1661b3d75920 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -43,10 +43,7 @@ struct irq_remap_ops {
/* Enable fault handling */
int (*enable_faulting)(void);
- /* Get the irqdomain associated the IOMMU device */
- struct irq_domain *(*get_ir_irq_domain)(struct irq_alloc_info *);
-
- /* Get the MSI irqdomain associated with the IOMMU device */
+ /* Get the irqdomain associated to IOMMU device */
struct irq_domain *(*get_irq_domain)(struct irq_alloc_info *);
};
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 785b228d39a6..c072cee532c2 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -3,7 +3,6 @@
* Copyright (c) 2015-2016 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
-#include <linux/memblock.h>
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/component.h>
@@ -15,13 +14,16 @@
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/list.h>
+#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/soc/mediatek/infracfg.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -116,6 +118,7 @@
#define OUT_ORDER_WR_EN BIT(4)
#define HAS_SUB_COMM BIT(5)
#define WR_THROT_EN BIT(6)
+#define HAS_LEGACY_IVRP_PADDR BIT(7)
#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
((((pdata)->flags) & (_x)) == (_x))
@@ -582,7 +585,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
- if (data->plat_data->m4u_plat == M4U_MT8173)
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR))
regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
else
regval = lower_32_bits(data->protect_base) |
@@ -640,8 +643,11 @@ static int mtk_iommu_probe(struct platform_device *pdev)
struct resource *res;
resource_size_t ioaddr;
struct component_match *match = NULL;
+ struct regmap *infracfg;
void *protect;
int i, larb_nr, ret;
+ u32 val;
+ char *p;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -655,10 +661,28 @@ static int mtk_iommu_probe(struct platform_device *pdev)
return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
- /* Whether the current dram is over 4GB */
- data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
- if (!MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
- data->enable_4GB = false;
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) {
+ switch (data->plat_data->m4u_plat) {
+ case M4U_MT2712:
+ p = "mediatek,mt2712-infracfg";
+ break;
+ case M4U_MT8173:
+ p = "mediatek,mt8173-infracfg";
+ break;
+ default:
+ p = NULL;
+ }
+
+ infracfg = syscon_regmap_lookup_by_compatible(p);
+
+ if (IS_ERR(infracfg))
+ return PTR_ERR(infracfg);
+
+ ret = regmap_read(infracfg, REG_INFRA_MISC, &val);
+ if (ret)
+ return ret;
+ data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN);
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->base = devm_ioremap_resource(dev, res);
@@ -816,9 +840,17 @@ static const struct mtk_iommu_plat_data mt6779_data = {
.larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
};
+static const struct mtk_iommu_plat_data mt8167_data = {
+ .m4u_plat = M4U_MT8167,
+ .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */
+};
+
static const struct mtk_iommu_plat_data mt8173_data = {
.m4u_plat = M4U_MT8173,
- .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI,
+ .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
+ HAS_LEGACY_IVRP_PADDR,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
};
@@ -833,6 +865,7 @@ static const struct mtk_iommu_plat_data mt8183_data = {
static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
+ { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
{}
@@ -843,7 +876,7 @@ static struct platform_driver mtk_iommu_driver = {
.remove = mtk_iommu_remove,
.driver = {
.name = "mtk-iommu",
- .of_match_table = of_match_ptr(mtk_iommu_of_ids),
+ .of_match_table = mtk_iommu_of_ids,
.pm = &mtk_iommu_pm_ops,
}
};
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 122925dbe547..df32b3e3408b 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -39,6 +39,7 @@ enum mtk_iommu_plat {
M4U_MT2701,
M4U_MT2712,
M4U_MT6779,
+ M4U_MT8167,
M4U_MT8173,
M4U_MT8183,
};
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 3b1bf2fb94f5..ea6db1341916 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -881,7 +881,6 @@ static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
{
struct sun50i_iommu *iommu = dev_id;
- phys_addr_t iova;
u32 status;
spin_lock(&iommu->iommu_lock);
@@ -893,15 +892,15 @@ static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
}
if (status & IOMMU_INT_INVALID_L2PG)
- iova = sun50i_iommu_handle_pt_irq(iommu,
- IOMMU_INT_ERR_ADDR_L2_REG,
- IOMMU_L2PG_INT_REG);
+ sun50i_iommu_handle_pt_irq(iommu,
+ IOMMU_INT_ERR_ADDR_L2_REG,
+ IOMMU_L2PG_INT_REG);
else if (status & IOMMU_INT_INVALID_L1PG)
- iova = sun50i_iommu_handle_pt_irq(iommu,
- IOMMU_INT_ERR_ADDR_L1_REG,
- IOMMU_L1PG_INT_REG);
+ sun50i_iommu_handle_pt_irq(iommu,
+ IOMMU_INT_ERR_ADDR_L1_REG,
+ IOMMU_L1PG_INT_REG);
else
- iova = sun50i_iommu_handle_perm_irq(iommu);
+ sun50i_iommu_handle_perm_irq(iommu);
iommu_write(iommu, IOMMU_INT_CLR_REG, status);
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 124c8848ab7e..0becdbfea306 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -12,6 +12,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <soc/tegra/ahb.h>
@@ -19,8 +20,10 @@
struct tegra_smmu_group {
struct list_head list;
+ struct tegra_smmu *smmu;
const struct tegra_smmu_group_soc *soc;
struct iommu_group *group;
+ unsigned int swgroup;
};
struct tegra_smmu {
@@ -49,6 +52,7 @@ struct tegra_smmu_as {
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
+ spinlock_t lock;
u32 *count;
struct page **pts;
struct page *pd;
@@ -127,6 +131,11 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_SHIFT 22
#define SMMU_PTE_SHIFT 12
+#define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
+#define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
+#define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
+#define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
+
#define SMMU_PD_READABLE (1 << 31)
#define SMMU_PD_WRITABLE (1 << 30)
#define SMMU_PD_NONSECURE (1 << 29)
@@ -308,6 +317,8 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
return NULL;
}
+ spin_lock_init(&as->lock);
+
/* setup aperture */
as->domain.geometry.aperture_start = 0;
as->domain.geometry.aperture_end = 0xffffffff;
@@ -569,19 +580,14 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
}
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
- dma_addr_t *dmap)
+ dma_addr_t *dmap, struct page *page)
{
unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
if (!as->pts[pde]) {
- struct page *page;
dma_addr_t dma;
- page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
- if (!page)
- return NULL;
-
dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, dma)) {
@@ -644,7 +650,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
u32 *pte, dma_addr_t pte_dma, u32 val)
{
struct tegra_smmu *smmu = as->smmu;
- unsigned long offset = offset_in_page(pte);
+ unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
*pte = val;
@@ -655,15 +661,61 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static struct page *as_get_pde_page(struct tegra_smmu_as *as,
+ unsigned long iova, gfp_t gfp,
+ unsigned long *flags)
+{
+ unsigned int pde = iova_pd_index(iova);
+ struct page *page = as->pts[pde];
+
+ /* at first check whether allocation needs to be done at all */
+ if (page)
+ return page;
+
+ /*
+ * In order to prevent exhaustion of the atomic memory pool, we
+ * allocate page in a sleeping context if GFP flags permit. Hence
+ * spinlock needs to be unlocked and re-locked after allocation.
+ */
+ if (!(gfp & __GFP_ATOMIC))
+ spin_unlock_irqrestore(&as->lock, *flags);
+
+ page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
+
+ if (!(gfp & __GFP_ATOMIC))
+ spin_lock_irqsave(&as->lock, *flags);
+
+ /*
+ * In a case of blocking allocation, a concurrent mapping may win
+ * the PDE allocation. In this case the allocated page isn't needed
+ * if allocation succeeded and the allocation failure isn't fatal.
+ */
+ if (as->pts[pde]) {
+ if (page)
+ __free_page(page);
+
+ page = as->pts[pde];
+ }
+
+ return page;
+}
+
+static int
+__tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
+ unsigned long *flags)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
+ struct page *page;
u32 pte_attrs;
u32 *pte;
- pte = as_get_pte(as, iova, &pte_dma);
+ page = as_get_pde_page(as, iova, gfp, flags);
+ if (!page)
+ return -ENOMEM;
+
+ pte = as_get_pte(as, iova, &pte_dma, page);
if (!pte)
return -ENOMEM;
@@ -680,13 +732,14 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
pte_attrs |= SMMU_PTE_WRITABLE;
tegra_smmu_set_pte(as, iova, pte, pte_dma,
- __phys_to_pfn(paddr) | pte_attrs);
+ SMMU_PHYS_PFN(paddr) | pte_attrs);
return 0;
}
-static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t
+__tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
@@ -702,6 +755,33 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return size;
}
+static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ struct tegra_smmu_as *as = to_smmu_as(domain);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&as->lock, flags);
+ ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
+ spin_unlock_irqrestore(&as->lock, flags);
+
+ return ret;
+}
+
+static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
+{
+ struct tegra_smmu_as *as = to_smmu_as(domain);
+ unsigned long flags;
+
+ spin_lock_irqsave(&as->lock, flags);
+ size = __tegra_smmu_unmap(domain, iova, size, gather);
+ spin_unlock_irqrestore(&as->lock, flags);
+
+ return size;
+}
+
static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -716,7 +796,7 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
pfn = *pte & as->smmu->pfn_mask;
- return PFN_PHYS(pfn);
+ return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
}
static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
@@ -813,22 +893,34 @@ tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
return NULL;
}
+static void tegra_smmu_group_release(void *iommu_data)
+{
+ struct tegra_smmu_group *group = iommu_data;
+ struct tegra_smmu *smmu = group->smmu;
+
+ mutex_lock(&smmu->lock);
+ list_del(&group->list);
+ mutex_unlock(&smmu->lock);
+}
+
static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
unsigned int swgroup)
{
const struct tegra_smmu_group_soc *soc;
struct tegra_smmu_group *group;
+ struct iommu_group *grp;
+ /* Find group_soc associating with swgroup */
soc = tegra_smmu_find_group(smmu, swgroup);
- if (!soc)
- return NULL;
mutex_lock(&smmu->lock);
+ /* Find existing iommu_group associating with swgroup or group_soc */
list_for_each_entry(group, &smmu->groups, list)
- if (group->soc == soc) {
+ if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
+ grp = iommu_group_ref_get(group->group);
mutex_unlock(&smmu->lock);
- return group->group;
+ return grp;
}
group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
@@ -838,6 +930,8 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
}
INIT_LIST_HEAD(&group->list);
+ group->swgroup = swgroup;
+ group->smmu = smmu;
group->soc = soc;
group->group = iommu_group_alloc();
@@ -847,6 +941,9 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
return NULL;
}
+ iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
+ if (soc)
+ iommu_group_set_name(group->group, soc->name);
list_add_tail(&group->list, &smmu->groups);
mutex_unlock(&smmu->lock);
@@ -1019,10 +1116,11 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->dev = dev;
smmu->mc = mc;
- smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
+ smmu->pfn_mask =
+ BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
mc->soc->num_address_bits, smmu->pfn_mask);
- smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
+ smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
smmu->tlb_mask);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index bfc9719dbcdc..2aa79c32ee22 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -148,7 +148,7 @@ config DAVINCI_CP_INTC
config DW_APB_ICTL
bool
select GENERIC_IRQ_CHIP
- select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
config FARADAY_FTINTC010
bool
@@ -180,7 +180,6 @@ config IRQ_MIPS_CPU
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
select IRQ_DOMAIN
- select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config CLPS711X_IRQCHIP
@@ -232,12 +231,12 @@ config RENESAS_INTC_IRQPIN
interrupt pins, as found on SH/R-Mobile and R-Car Gen1 SoCs.
config RENESAS_IRQC
- bool "Renesas R-Mobile APE6 and R-Car IRQC support" if COMPILE_TEST
+ bool "Renesas R-Mobile APE6, R-Car Gen{2,3} and RZ/G{1,2} IRQC support" if COMPILE_TEST
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
help
Enable support for the Renesas Interrupt Controller for external
- devices, as found on R-Mobile APE6, R-Car Gen2, and R-Car Gen3 SoCs.
+ devices, as found on R-Mobile APE6, R-Car Gen{2,3} and RZ/G{1,2} SoCs.
config RENESAS_RZA1_IRQC
bool "Renesas RZ/A1 IRQC support" if COMPILE_TEST
@@ -246,6 +245,14 @@ config RENESAS_RZA1_IRQC
Enable support for the Renesas RZ/A1 Interrupt Controller, to use up
to 8 external interrupts with configurable sense select.
+config SL28CPLD_INTC
+ bool "Kontron sl28cpld IRQ controller"
+ depends on MFD_SL28CPLD=y || COMPILE_TEST
+ select REGMAP_IRQ
+ help
+ Interrupt controller driver for the board management controller
+ found on the Kontron sl28 CPLD.
+
config ST_IRQCHIP
bool
select REGMAP
@@ -307,7 +314,6 @@ config KEYSTONE_IRQ
config MIPS_GIC
bool
select GENERIC_IRQ_IPI
- select IRQ_DOMAIN_HIERARCHY
select MIPS_CM
config INGENIC_IRQ
@@ -493,6 +499,16 @@ config TI_SCI_INTA_IRQCHIP
If you wish to use interrupt aggregator irq resources managed by the
TI System Controller, say Y here. Otherwise, say N.
+config TI_PRUSS_INTC
+ tristate "TI PRU-ICSS Interrupt Controller"
+ depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+ select IRQ_DOMAIN
+ help
+ This enables support for the PRU-ICSS Local Interrupt Controller
+ present within a PRU-ICSS subsystem present on various TI SoCs.
+ The PRUSS INTC enables various interrupts to be routed to multiple
+ different processors within the SoC.
+
config RISCV_INTC
bool "RISC-V Local Interrupt Controller"
depends on RISCV
@@ -571,4 +587,13 @@ config LOONGSON_PCH_MSI
help
Support for the Loongson PCH MSI Controller.
+config MST_IRQ
+ bool "MStar Interrupt Controller"
+ depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST
+ default ARCH_MEDIATEK
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support MStar Interrupt Controller.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 133f9c45744a..94c2885882ee 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_ATH79) += irq-ath79-cpu.o
obj-$(CONFIG_ATH79) += irq-ath79-misc.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
+obj-$(CONFIG_ARCH_ACTIONS) += irq-owl-sirq.o
obj-$(CONFIG_DAVINCI_AINTC) += irq-davinci-aintc.o
obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o
@@ -16,7 +17,6 @@ obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
-obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o
obj-$(CONFIG_OMPIC) += irq-ompic.o
@@ -106,8 +106,11 @@ obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
+obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o
obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o
obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o
obj-$(CONFIG_LOONGSON_HTVEC) += irq-loongson-htvec.o
obj-$(CONFIG_LOONGSON_PCH_PIC) += irq-loongson-pch-pic.o
obj-$(CONFIG_LOONGSON_PCH_MSI) += irq-loongson-pch-msi.o
+obj-$(CONFIG_MST_IRQ) += irq-mst-intc.o
+obj-$(CONFIG_SL28CPLD_INTC) += irq-sl28cpld.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index c9bdc5221b82..d7eb2e93db8f 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -310,7 +310,134 @@ static inline int armada_370_xp_msi_init(struct device_node *node,
}
#endif
+static void armada_xp_mpic_perf_init(void)
+{
+ unsigned long cpuid = cpu_logical_map(smp_processor_id());
+
+ /* Enable Performance Counter Overflow interrupts */
+ writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
+ per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS);
+}
+
#ifdef CONFIG_SMP
+static struct irq_domain *ipi_domain;
+
+static void armada_370_xp_ipi_mask(struct irq_data *d)
+{
+ u32 reg;
+ reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ reg &= ~BIT(d->hwirq);
+ writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+}
+
+static void armada_370_xp_ipi_unmask(struct irq_data *d)
+{
+ u32 reg;
+ reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+ reg |= BIT(d->hwirq);
+ writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+}
+
+static void armada_370_xp_ipi_send_mask(struct irq_data *d,
+ const struct cpumask *mask)
+{
+ unsigned long map = 0;
+ int cpu;
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= 1 << cpu_logical_map(cpu);
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before issuing the IPI.
+ */
+ dsb();
+
+ /* submit softirq */
+ writel((map << 8) | d->hwirq, main_int_base +
+ ARMADA_370_XP_SW_TRIG_INT_OFFS);
+}
+
+static void armada_370_xp_ipi_eoi(struct irq_data *d)
+{
+ writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+}
+
+static struct irq_chip ipi_irqchip = {
+ .name = "IPI",
+ .irq_mask = armada_370_xp_ipi_mask,
+ .irq_unmask = armada_370_xp_ipi_unmask,
+ .irq_eoi = armada_370_xp_ipi_eoi,
+ .ipi_send_mask = armada_370_xp_ipi_send_mask,
+};
+
+static int armada_370_xp_ipi_alloc(struct irq_domain *d,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_set_percpu_devid(virq + i);
+ irq_domain_set_info(d, virq + i, i, &ipi_irqchip,
+ d->host_data,
+ handle_percpu_devid_fasteoi_ipi,
+ NULL, NULL);
+ }
+
+ return 0;
+}
+
+static void armada_370_xp_ipi_free(struct irq_domain *d,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ /* Not freeing IPIs */
+}
+
+static const struct irq_domain_ops ipi_domain_ops = {
+ .alloc = armada_370_xp_ipi_alloc,
+ .free = armada_370_xp_ipi_free,
+};
+
+static void ipi_resume(void)
+{
+ int i;
+
+ for (i = 0; i < IPI_DOORBELL_END; i++) {
+ int irq;
+
+ irq = irq_find_mapping(ipi_domain, i);
+ if (irq <= 0)
+ continue;
+ if (irq_percpu_is_enabled(irq)) {
+ struct irq_data *d;
+ d = irq_domain_get_irq_data(ipi_domain, irq);
+ armada_370_xp_ipi_unmask(d);
+ }
+ }
+}
+
+static __init void armada_xp_ipi_init(struct device_node *node)
+{
+ int base_ipi;
+
+ ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ IPI_DOORBELL_END,
+ &ipi_domain_ops, NULL);
+ if (WARN_ON(!ipi_domain))
+ return;
+
+ irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
+ base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, IPI_DOORBELL_END,
+ NUMA_NO_NODE, NULL, false, NULL);
+ if (WARN_ON(!base_ipi))
+ return;
+
+ set_smp_ipi_range(base_ipi, IPI_DOORBELL_END);
+}
+
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
static int armada_xp_set_affinity(struct irq_data *d,
@@ -334,43 +461,6 @@ static int armada_xp_set_affinity(struct irq_data *d,
return IRQ_SET_MASK_OK;
}
-#endif
-
-static struct irq_chip armada_370_xp_irq_chip = {
- .name = "MPIC",
- .irq_mask = armada_370_xp_irq_mask,
- .irq_mask_ack = armada_370_xp_irq_mask,
- .irq_unmask = armada_370_xp_irq_unmask,
-#ifdef CONFIG_SMP
- .irq_set_affinity = armada_xp_set_affinity,
-#endif
- .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
-};
-
-static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
- unsigned int virq, irq_hw_number_t hw)
-{
- armada_370_xp_irq_mask(irq_get_irq_data(virq));
- if (!is_percpu_irq(hw))
- writel(hw, per_cpu_int_base +
- ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
- else
- writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
- irq_set_status_flags(virq, IRQ_LEVEL);
-
- if (is_percpu_irq(hw)) {
- irq_set_percpu_devid(virq);
- irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
- handle_percpu_devid_irq);
- } else {
- irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
- handle_level_irq);
- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
- }
- irq_set_probe(virq);
-
- return 0;
-}
static void armada_xp_mpic_smp_cpu_init(void)
{
@@ -383,48 +473,16 @@ static void armada_xp_mpic_smp_cpu_init(void)
for (i = 0; i < nr_irqs; i++)
writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+ /* Disable all IPIs */
+ writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
/* Clear pending IPIs */
writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
- /* Enable first 8 IPIs */
- writel(IPI_DOORBELL_MASK, per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
-
/* Unmask IPI interrupt */
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
-static void armada_xp_mpic_perf_init(void)
-{
- unsigned long cpuid = cpu_logical_map(smp_processor_id());
-
- /* Enable Performance Counter Overflow interrupts */
- writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
- per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS);
-}
-
-#ifdef CONFIG_SMP
-static void armada_mpic_send_doorbell(const struct cpumask *mask,
- unsigned int irq)
-{
- int cpu;
- unsigned long map = 0;
-
- /* Convert our logical CPU mask into a physical one. */
- for_each_cpu(cpu, mask)
- map |= 1 << cpu_logical_map(cpu);
-
- /*
- * Ensure that stores to Normal memory are visible to the
- * other CPUs before issuing the IPI.
- */
- dsb();
-
- /* submit softirq */
- writel((map << 8) | irq, main_int_base +
- ARMADA_370_XP_SW_TRIG_INT_OFFS);
-}
-
static void armada_xp_mpic_reenable_percpu(void)
{
unsigned int irq;
@@ -445,6 +503,8 @@ static void armada_xp_mpic_reenable_percpu(void)
armada_370_xp_irq_unmask(data);
}
+
+ ipi_resume();
}
static int armada_xp_mpic_starting_cpu(unsigned int cpu)
@@ -462,7 +522,46 @@ static int mpic_cascaded_starting_cpu(unsigned int cpu)
enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
return 0;
}
+#else
+static void armada_xp_mpic_smp_cpu_init(void) {}
+static void ipi_resume(void) {}
+#endif
+
+static struct irq_chip armada_370_xp_irq_chip = {
+ .name = "MPIC",
+ .irq_mask = armada_370_xp_irq_mask,
+ .irq_mask_ack = armada_370_xp_irq_mask,
+ .irq_unmask = armada_370_xp_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = armada_xp_set_affinity,
#endif
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ armada_370_xp_irq_mask(irq_get_irq_data(virq));
+ if (!is_percpu_irq(hw))
+ writel(hw, per_cpu_int_base +
+ ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+ else
+ writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+
+ if (is_percpu_irq(hw)) {
+ irq_set_percpu_devid(virq);
+ irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+ handle_percpu_devid_irq);
+ } else {
+ irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+ handle_level_irq);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
+ }
+ irq_set_probe(virq);
+
+ return 0;
+}
static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
.map = armada_370_xp_mpic_irq_map,
@@ -562,22 +661,15 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
#ifdef CONFIG_SMP
/* IPI Handling */
if (irqnr == 0) {
- u32 ipimask, ipinr;
+ unsigned long ipimask;
+ int ipi;
ipimask = readl_relaxed(per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& IPI_DOORBELL_MASK;
- writel(~ipimask, per_cpu_int_base +
- ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
-
- /* Handle all pending doorbells */
- for (ipinr = IPI_DOORBELL_START;
- ipinr < IPI_DOORBELL_END; ipinr++) {
- if (ipimask & (0x1 << ipinr))
- handle_IPI(ipinr, regs);
- }
- continue;
+ for_each_set_bit(ipi, &ipimask, IPI_DOORBELL_END)
+ handle_domain_irq(ipi_domain, ipi, regs);
}
#endif
@@ -636,6 +728,8 @@ static void armada_370_xp_mpic_resume(void)
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
+ ipi_resume();
}
static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
@@ -691,7 +785,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
irq_set_default_host(armada_370_xp_mpic_domain);
set_handle_irq(armada_370_xp_handle_irq);
#ifdef CONFIG_SMP
- set_smp_cross_call(armada_mpic_send_doorbell);
+ armada_xp_ipi_init(node);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
"irqchip/armada/ipi:starting",
armada_xp_mpic_starting_cpu, NULL);
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 2038693f074c..cbc7c740e4dc 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -10,6 +10,7 @@
#include <linux/of_irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/irq-bcm2836.h>
#include <asm/exception.h>
@@ -89,12 +90,24 @@ static struct irq_chip bcm2836_arm_irqchip_gpu = {
.irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
};
+static void bcm2836_arm_irqchip_dummy_op(struct irq_data *d)
+{
+}
+
+static struct irq_chip bcm2836_arm_irqchip_dummy = {
+ .name = "bcm2836-dummy",
+ .irq_eoi = bcm2836_arm_irqchip_dummy_op,
+};
+
static int bcm2836_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
struct irq_chip *chip;
switch (hw) {
+ case LOCAL_IRQ_MAILBOX0:
+ chip = &bcm2836_arm_irqchip_dummy;
+ break;
case LOCAL_IRQ_CNTPSIRQ:
case LOCAL_IRQ_CNTPNSIRQ:
case LOCAL_IRQ_CNTHPIRQ:
@@ -127,17 +140,7 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
u32 stat;
stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu);
- if (stat & BIT(LOCAL_IRQ_MAILBOX0)) {
-#ifdef CONFIG_SMP
- void __iomem *mailbox0 = (intc.base +
- LOCAL_MAILBOX0_CLR0 + 16 * cpu);
- u32 mbox_val = readl(mailbox0);
- u32 ipi = ffs(mbox_val) - 1;
-
- writel(1 << ipi, mailbox0);
- handle_IPI(ipi, regs);
-#endif
- } else if (stat) {
+ if (stat) {
u32 hwirq = ffs(stat) - 1;
handle_domain_irq(intc.domain, hwirq, regs);
@@ -145,8 +148,35 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
}
#ifdef CONFIG_SMP
-static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
- unsigned int ipi)
+static struct irq_domain *ipi_domain;
+
+static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int cpu = smp_processor_id();
+ u32 mbox_val;
+
+ chained_irq_enter(chip, desc);
+
+ mbox_val = readl_relaxed(intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu);
+ if (mbox_val) {
+ int hwirq = ffs(mbox_val) - 1;
+ generic_handle_irq(irq_find_mapping(ipi_domain, hwirq));
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
+{
+ int cpu = smp_processor_id();
+
+ writel_relaxed(BIT(d->hwirq),
+ intc.base + LOCAL_MAILBOX0_CLR0 + 16 * cpu);
+}
+
+static void bcm2836_arm_irqchip_ipi_send_mask(struct irq_data *d,
+ const struct cpumask *mask)
{
int cpu;
void __iomem *mailbox0_base = intc.base + LOCAL_MAILBOX0_SET0;
@@ -157,11 +187,47 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
*/
smp_wmb();
- for_each_cpu(cpu, mask) {
- writel(1 << ipi, mailbox0_base + 16 * cpu);
+ for_each_cpu(cpu, mask)
+ writel_relaxed(BIT(d->hwirq), mailbox0_base + 16 * cpu);
+}
+
+static struct irq_chip bcm2836_arm_irqchip_ipi = {
+ .name = "IPI",
+ .irq_mask = bcm2836_arm_irqchip_dummy_op,
+ .irq_unmask = bcm2836_arm_irqchip_dummy_op,
+ .irq_eoi = bcm2836_arm_irqchip_ipi_eoi,
+ .ipi_send_mask = bcm2836_arm_irqchip_ipi_send_mask,
+};
+
+static int bcm2836_arm_irqchip_ipi_alloc(struct irq_domain *d,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_set_percpu_devid(virq + i);
+ irq_domain_set_info(d, virq + i, i, &bcm2836_arm_irqchip_ipi,
+ d->host_data,
+ handle_percpu_devid_fasteoi_ipi,
+ NULL, NULL);
}
+
+ return 0;
}
+static void bcm2836_arm_irqchip_ipi_free(struct irq_domain *d,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ /* Not freeing IPIs */
+}
+
+static const struct irq_domain_ops ipi_domain_ops = {
+ .alloc = bcm2836_arm_irqchip_ipi_alloc,
+ .free = bcm2836_arm_irqchip_ipi_free,
+};
+
static int bcm2836_cpu_starting(unsigned int cpu)
{
bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
@@ -175,25 +241,58 @@ static int bcm2836_cpu_dying(unsigned int cpu)
cpu);
return 0;
}
-#endif
-static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
- .xlate = irq_domain_xlate_onetwocell,
- .map = bcm2836_map,
-};
+#define BITS_PER_MBOX 32
-static void
-bcm2836_arm_irqchip_smp_init(void)
+static void __init bcm2836_arm_irqchip_smp_init(void)
{
-#ifdef CONFIG_SMP
+ struct irq_fwspec ipi_fwspec = {
+ .fwnode = intc.domain->fwnode,
+ .param_count = 1,
+ .param = {
+ [0] = LOCAL_IRQ_MAILBOX0,
+ },
+ };
+ int base_ipi, mux_irq;
+
+ mux_irq = irq_create_fwspec_mapping(&ipi_fwspec);
+ if (WARN_ON(mux_irq <= 0))
+ return;
+
+ ipi_domain = irq_domain_create_linear(intc.domain->fwnode,
+ BITS_PER_MBOX, &ipi_domain_ops,
+ NULL);
+ if (WARN_ON(!ipi_domain))
+ return;
+
+ ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
+ irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
+
+ base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, BITS_PER_MBOX,
+ NUMA_NO_NODE, NULL,
+ false, NULL);
+
+ if (WARN_ON(!base_ipi))
+ return;
+
+ set_smp_ipi_range(base_ipi, BITS_PER_MBOX);
+
+ irq_set_chained_handler_and_data(mux_irq,
+ bcm2836_arm_irqchip_handle_ipi, NULL);
+
/* Unmask IPIs to the boot CPU. */
cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
"irqchip/bcm2836:starting", bcm2836_cpu_starting,
bcm2836_cpu_dying);
-
- set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
-#endif
}
+#else
+#define bcm2836_arm_irqchip_smp_init() do { } while(0)
+#endif
+
+static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = bcm2836_map,
+};
/*
* The LOCAL_IRQ_CNT* timer firings are based off of the external
@@ -232,6 +331,8 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
if (!intc.domain)
panic("%pOF: unable to create IRQ domain\n", node);
+ irq_domain_update_bus_token(intc.domain, DOMAIN_BUS_WIRED);
+
bcm2836_arm_irqchip_smp_init();
set_handle_irq(bcm2836_arm_irqchip_handle_irq);
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index e4550e9c810b..54b09d6c407c 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -17,6 +17,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/interrupt.h>
#define APB_INT_ENABLE_L 0x00
#define APB_INT_ENABLE_H 0x04
@@ -26,7 +27,28 @@
#define APB_INT_FINALSTATUS_H 0x34
#define APB_INT_BASE_OFFSET 0x04
-static void dw_apb_ictl_handler(struct irq_desc *desc)
+/* irq domain of the primary interrupt controller. */
+static struct irq_domain *dw_apb_ictl_irq_domain;
+
+static void __irq_entry dw_apb_ictl_handle_irq(struct pt_regs *regs)
+{
+ struct irq_domain *d = dw_apb_ictl_irq_domain;
+ int n;
+
+ for (n = 0; n < d->revmap_size; n += 32) {
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n);
+ u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L);
+
+ while (stat) {
+ u32 hwirq = ffs(stat) - 1;
+
+ handle_domain_irq(d, hwirq, regs);
+ stat &= ~BIT(hwirq);
+ }
+ }
+}
+
+static void dw_apb_ictl_handle_irq_cascaded(struct irq_desc *desc)
{
struct irq_domain *d = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -43,13 +65,37 @@ static void dw_apb_ictl_handler(struct irq_desc *desc)
u32 virq = irq_find_mapping(d, gc->irq_base + hwirq);
generic_handle_irq(virq);
- stat &= ~(1 << hwirq);
+ stat &= ~BIT(hwirq);
}
}
chained_irq_exit(chip, desc);
}
+static int dw_apb_ictl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int i, ret;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = arg;
+
+ ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_map_generic_chip(domain, virq + i, hwirq + i);
+
+ return 0;
+}
+
+static const struct irq_domain_ops dw_apb_ictl_irq_domain_ops = {
+ .translate = irq_domain_translate_onecell,
+ .alloc = dw_apb_ictl_irq_domain_alloc,
+ .free = irq_domain_free_irqs_top,
+};
+
#ifdef CONFIG_PM
static void dw_apb_ictl_resume(struct irq_data *d)
{
@@ -68,19 +114,27 @@ static void dw_apb_ictl_resume(struct irq_data *d)
static int __init dw_apb_ictl_init(struct device_node *np,
struct device_node *parent)
{
+ const struct irq_domain_ops *domain_ops;
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct resource r;
struct irq_domain *domain;
struct irq_chip_generic *gc;
void __iomem *iobase;
- int ret, nrirqs, irq, i;
+ int ret, nrirqs, parent_irq, i;
u32 reg;
- /* Map the parent interrupt for the chained handler */
- irq = irq_of_parse_and_map(np, 0);
- if (irq <= 0) {
- pr_err("%pOF: unable to parse irq\n", np);
- return -EINVAL;
+ if (!parent) {
+ /* Used as the primary interrupt controller */
+ parent_irq = 0;
+ domain_ops = &dw_apb_ictl_irq_domain_ops;
+ } else {
+ /* Map the parent interrupt for the chained handler */
+ parent_irq = irq_of_parse_and_map(np, 0);
+ if (parent_irq <= 0) {
+ pr_err("%pOF: unable to parse irq\n", np);
+ return -EINVAL;
+ }
+ domain_ops = &irq_generic_chip_ops;
}
ret = of_address_to_resource(np, 0, &r);
@@ -120,8 +174,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
else
nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L));
- domain = irq_domain_add_linear(np, nrirqs,
- &irq_generic_chip_ops, NULL);
+ domain = irq_domain_add_linear(np, nrirqs, domain_ops, NULL);
if (!domain) {
pr_err("%pOF: unable to add irq domain\n", np);
ret = -ENOMEM;
@@ -146,7 +199,13 @@ static int __init dw_apb_ictl_init(struct device_node *np,
gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
}
- irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain);
+ if (parent_irq) {
+ irq_set_chained_handler_and_data(parent_irq,
+ dw_apb_ictl_handle_irq_cascaded, domain);
+ } else {
+ dw_apb_ictl_irq_domain = domain;
+ set_handle_irq(dw_apb_ictl_handle_irq);
+ }
return 0;
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 82520006195d..f47b41dfd023 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -152,9 +152,6 @@ void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void))
writel_relaxed(GICD_INT_DEF_PRI_X4,
base + GIC_DIST_PRI + i * 4 / 4);
- /* Ensure all SGI interrupts are now enabled */
- writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET);
-
if (sync_access)
sync_access();
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 548de7538632..0fec31931e11 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1720,6 +1720,11 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
return 0;
}
+static int its_irq_retrigger(struct irq_data *d)
+{
+ return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
+}
+
/*
* Two favourable cases:
*
@@ -1971,6 +1976,7 @@ static struct irq_chip its_irq_chip = {
.irq_set_affinity = its_set_affinity,
.irq_compose_msi_msg = its_irq_compose_msi_msg,
.irq_set_irqchip_state = its_irq_set_irqchip_state,
+ .irq_retrigger = its_irq_retrigger,
.irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
};
@@ -2192,7 +2198,7 @@ static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
addr_end = addr + size - 1;
- for_each_reserved_mem_region(i, &start, &end) {
+ for_each_reserved_mem_range(i, &start, &end) {
if (addr >= start && addr_end <= end)
return true;
}
@@ -5263,7 +5269,12 @@ static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
return -EINVAL;
}
- node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
+ /*
+ * Note that in theory a new proximity node could be created by this
+ * entry as it is an SRAT resource allocation structure.
+ * We do not currently support doing so.
+ */
+ node = pxm_to_node(its_affinity->proximity_domain);
if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 850842f27bee..16fecc0febe8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -36,6 +36,8 @@
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
+#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
+
struct redist_region {
void __iomem *redist_base;
phys_addr_t phys_base;
@@ -75,16 +77,14 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
*
* If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure
* EL1 are subject to a similar operation thus matching the priorities presented
- * from the (re)distributor when security is enabled.
+ * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
+ * these values are unchanched by the GIC.
*
* see GICv3/GICv4 Architecture Specification (IHI0069D):
* - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
* priorities.
* - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
* interrupt.
- *
- * For now, we only support pseudo-NMIs if we have non-secure view of
- * priorities.
*/
static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
@@ -97,6 +97,9 @@ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
EXPORT_SYMBOL(gic_pmr_sync);
+DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
+EXPORT_SYMBOL(gic_nonsecure_priorities);
+
/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
static refcount_t *ppi_nmi_refs;
@@ -112,6 +115,7 @@ static DEFINE_PER_CPU(bool, has_rss);
#define DEFAULT_PMR_VALUE 0xf0
enum gic_intid_range {
+ SGI_RANGE,
PPI_RANGE,
SPI_RANGE,
EPPI_RANGE,
@@ -123,6 +127,8 @@ enum gic_intid_range {
static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
{
switch (hwirq) {
+ case 0 ... 15:
+ return SGI_RANGE;
case 16 ... 31:
return PPI_RANGE;
case 32 ... 1019:
@@ -148,15 +154,22 @@ static inline unsigned int gic_irq(struct irq_data *d)
return d->hwirq;
}
-static inline int gic_irq_in_rdist(struct irq_data *d)
+static inline bool gic_irq_in_rdist(struct irq_data *d)
{
- enum gic_intid_range range = get_intid_range(d);
- return range == PPI_RANGE || range == EPPI_RANGE;
+ switch (get_intid_range(d)) {
+ case SGI_RANGE:
+ case PPI_RANGE:
+ case EPPI_RANGE:
+ return true;
+ default:
+ return false;
+ }
}
static inline void __iomem *gic_dist_base(struct irq_data *d)
{
switch (get_intid_range(d)) {
+ case SGI_RANGE:
case PPI_RANGE:
case EPPI_RANGE:
/* SGI+PPI -> SGI_base for this CPU */
@@ -253,6 +266,7 @@ static void gic_enable_redist(bool enable)
static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
{
switch (get_intid_range(d)) {
+ case SGI_RANGE:
case PPI_RANGE:
case SPI_RANGE:
*index = d->hwirq;
@@ -372,7 +386,7 @@ static int gic_irq_set_irqchip_state(struct irq_data *d,
{
u32 reg;
- if (d->hwirq >= 8192) /* PPI/SPI only */
+ if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
return -EINVAL;
switch (which) {
@@ -539,12 +553,12 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
u32 offset, index;
int ret;
- /* Interrupt configuration for SGIs can't be changed */
- if (irq < 16)
- return -EINVAL;
-
range = get_intid_range(d);
+ /* Interrupt configuration for SGIs can't be changed */
+ if (range == SGI_RANGE)
+ return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
+
/* SPIs have restrictions on the supported types */
if ((range == SPI_RANGE || range == ESPI_RANGE) &&
type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
@@ -572,6 +586,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
+ if (get_intid_range(d) == SGI_RANGE)
+ return -EINVAL;
+
if (vcpu)
irqd_set_forwarded_to_vcpu(d);
else
@@ -646,38 +663,14 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
if ((irqnr >= 1020 && irqnr <= 1023))
return;
- /* Treat anything but SGIs in a uniform way */
- if (likely(irqnr > 15)) {
- int err;
-
- if (static_branch_likely(&supports_deactivate_key))
- gic_write_eoir(irqnr);
- else
- isb();
-
- err = handle_domain_irq(gic_data.domain, irqnr, regs);
- if (err) {
- WARN_ONCE(true, "Unexpected interrupt received!\n");
- gic_deactivate_unhandled(irqnr);
- }
- return;
- }
- if (irqnr < 16) {
+ if (static_branch_likely(&supports_deactivate_key))
gic_write_eoir(irqnr);
- if (static_branch_likely(&supports_deactivate_key))
- gic_write_dir(irqnr);
-#ifdef CONFIG_SMP
- /*
- * Unlike GICv2, we don't need an smp_rmb() here.
- * The control dependency from gic_read_iar to
- * the ISB in gic_write_eoir is enough to ensure
- * that any shared data read by handle_IPI will
- * be read after the ACK.
- */
- handle_IPI(irqnr, regs);
-#else
- WARN_ONCE(true, "Unexpected SGI received!\n");
-#endif
+ else
+ isb();
+
+ if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
+ WARN_ONCE(true, "Unexpected interrupt received!\n");
+ gic_deactivate_unhandled(irqnr);
}
}
@@ -932,14 +925,20 @@ static void gic_cpu_sys_reg_init(void)
/* Set priority mask register */
if (!gic_prio_masking_enabled()) {
write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
- } else {
+ } else if (gic_supports_nmi()) {
/*
* Mismatch configuration with boot CPU, the system is likely
* to die as interrupt masking will not work properly on all
* CPUs
+ *
+ * The boot CPU calls this function before enabling NMI support,
+ * and as a result we'll never see this warning in the boot path
+ * for that CPU.
*/
- WARN_ON(gic_supports_nmi() && group0 &&
- !gic_dist_security_disabled());
+ if (static_branch_unlikely(&gic_nonsecure_priorities))
+ WARN_ON(!group0 || gic_dist_security_disabled());
+ else
+ WARN_ON(group0 && !gic_dist_security_disabled());
}
/*
@@ -1125,11 +1124,11 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
gic_write_sgi1r(val);
}
-static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
{
int cpu;
- if (WARN_ON(irq >= 16))
+ if (WARN_ON(d->hwirq >= 16))
return;
/*
@@ -1143,7 +1142,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
u16 tlist;
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
- gic_send_sgi(cluster_id, tlist, irq);
+ gic_send_sgi(cluster_id, tlist, d->hwirq);
}
/* Force the above writes to ICC_SGI1R_EL1 to be executed */
@@ -1152,10 +1151,24 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
static void __init gic_smp_init(void)
{
- set_smp_cross_call(gic_raise_softirq);
+ struct irq_fwspec sgi_fwspec = {
+ .fwnode = gic_data.fwnode,
+ .param_count = 1,
+ };
+ int base_sgi;
+
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
"irqchip/arm/gicv3:starting",
gic_starting_cpu, NULL);
+
+ /* Register all 8 non-secure SGIs */
+ base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
+ NUMA_NO_NODE, &sgi_fwspec,
+ false, NULL);
+ if (WARN_ON(base_sgi <= 0))
+ return;
+
+ set_smp_ipi_range(base_sgi, 8);
}
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
@@ -1204,9 +1217,15 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
}
#else
#define gic_set_affinity NULL
+#define gic_ipi_send_mask NULL
#define gic_smp_init() do { } while(0)
#endif
+static int gic_retrigger(struct irq_data *data)
+{
+ return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
+}
+
#ifdef CONFIG_CPU_PM
static int gic_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
@@ -1242,10 +1261,12 @@ static struct irq_chip gic_chip = {
.irq_eoi = gic_eoi_irq,
.irq_set_type = gic_set_type,
.irq_set_affinity = gic_set_affinity,
+ .irq_retrigger = gic_retrigger,
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.irq_nmi_setup = gic_irq_nmi_setup,
.irq_nmi_teardown = gic_irq_nmi_teardown,
+ .ipi_send_mask = gic_ipi_send_mask,
.flags = IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_MASK_ON_SUSPEND,
@@ -1258,11 +1279,13 @@ static struct irq_chip gic_eoimode1_chip = {
.irq_eoi = gic_eoimode1_eoi_irq,
.irq_set_type = gic_set_type,
.irq_set_affinity = gic_set_affinity,
+ .irq_retrigger = gic_retrigger,
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
.irq_nmi_setup = gic_irq_nmi_setup,
.irq_nmi_teardown = gic_irq_nmi_teardown,
+ .ipi_send_mask = gic_ipi_send_mask,
.flags = IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_MASK_ON_SUSPEND,
@@ -1272,11 +1295,19 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
struct irq_chip *chip = &gic_chip;
+ struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
if (static_branch_likely(&supports_deactivate_key))
chip = &gic_eoimode1_chip;
switch (__get_intid_range(hw)) {
+ case SGI_RANGE:
+ irq_set_percpu_devid(irq);
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_percpu_devid_fasteoi_ipi,
+ NULL, NULL);
+ break;
+
case PPI_RANGE:
case EPPI_RANGE:
irq_set_percpu_devid(irq);
@@ -1289,7 +1320,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq);
- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
+ irqd_set_single_target(irqd);
break;
case LPI_RANGE:
@@ -1303,16 +1334,22 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
return -EPERM;
}
+ /* Prevents SW retriggers which mess up the ACK/EOI ordering */
+ irqd_set_handle_enforce_irqctx(irqd);
return 0;
}
-#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
-
static int gic_irq_domain_translate(struct irq_domain *d,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
unsigned int *type)
{
+ if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
+ *hwirq = fwspec->param[0];
+ *type = IRQ_TYPE_EDGE_RISING;
+ return 0;
+ }
+
if (is_of_node(fwspec->fwnode)) {
if (fwspec->param_count < 3)
return -EINVAL;
@@ -1544,11 +1581,6 @@ static void gic_enable_nmi_support(void)
if (!gic_prio_masking_enabled())
return;
- if (gic_has_group0() && !gic_dist_security_disabled()) {
- pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n");
- return;
- }
-
ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
if (!ppi_nmi_refs)
return;
@@ -1564,8 +1596,38 @@ static void gic_enable_nmi_support(void)
if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
static_branch_enable(&gic_pmr_sync);
- pr_info("%s ICC_PMR_EL1 synchronisation\n",
- static_branch_unlikely(&gic_pmr_sync) ? "Forcing" : "Relaxing");
+ pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
+ static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
+
+ /*
+ * How priority values are used by the GIC depends on two things:
+ * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
+ * and if Group 0 interrupts can be delivered to Linux in the non-secure
+ * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
+ * the ICC_PMR_EL1 register and the priority that software assigns to
+ * interrupts:
+ *
+ * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
+ * -----------------------------------------------------------
+ * 1 | - | unchanged | unchanged
+ * -----------------------------------------------------------
+ * 0 | 1 | non-secure | non-secure
+ * -----------------------------------------------------------
+ * 0 | 0 | unchanged | non-secure
+ *
+ * where non-secure means that the value is right-shifted by one and the
+ * MSB bit set, to make it fit in the non-secure priority range.
+ *
+ * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
+ * are both either modified or unchanged, we can use the same set of
+ * priorities.
+ *
+ * In the last case, where only the interrupt priorities are modified to
+ * be in the non-secure range, we use a different PMR value to mask IRQs
+ * and the rest of the values that we use remain unchanged.
+ */
+ if (gic_has_group0() && !gic_dist_security_disabled())
+ static_branch_enable(&gic_nonsecure_priorities);
static_branch_enable(&supports_pseudo_nmis);
@@ -1644,9 +1706,9 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_update_rdist_properties();
- gic_smp_init();
gic_dist_init();
gic_cpu_init();
+ gic_smp_init();
gic_cpu_pm_init();
if (gic_dist_supports_lpis()) {
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index a27ba2cc1dce..6053245a4754 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -83,9 +83,6 @@ struct gic_chip_data {
#endif
struct irq_domain *domain;
unsigned int gic_irqs;
-#ifdef CONFIG_GIC_NON_BANKED
- void __iomem *(*get_base)(union gic_base *);
-#endif
};
#ifdef CONFIG_BL_SWITCHER
@@ -124,36 +121,30 @@ static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
static struct gic_kvm_info gic_v2_kvm_info;
+static DEFINE_PER_CPU(u32, sgi_intid);
+
#ifdef CONFIG_GIC_NON_BANKED
-static void __iomem *gic_get_percpu_base(union gic_base *base)
-{
- return raw_cpu_read(*base->percpu_base);
-}
+static DEFINE_STATIC_KEY_FALSE(frankengic_key);
-static void __iomem *gic_get_common_base(union gic_base *base)
+static void enable_frankengic(void)
{
- return base->common_base;
+ static_branch_enable(&frankengic_key);
}
-static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
+static inline void __iomem *__get_base(union gic_base *base)
{
- return data->get_base(&data->dist_base);
-}
+ if (static_branch_unlikely(&frankengic_key))
+ return raw_cpu_read(*base->percpu_base);
-static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
-{
- return data->get_base(&data->cpu_base);
+ return base->common_base;
}
-static inline void gic_set_base_accessor(struct gic_chip_data *data,
- void __iomem *(*f)(union gic_base *))
-{
- data->get_base = f;
-}
+#define gic_data_dist_base(d) __get_base(&(d)->dist_base)
+#define gic_data_cpu_base(d) __get_base(&(d)->cpu_base)
#else
#define gic_data_dist_base(d) ((d)->dist_base.common_base)
#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
-#define gic_set_base_accessor(d, f)
+#define enable_frankengic() do { } while(0)
#endif
static inline void __iomem *gic_dist_base(struct irq_data *d)
@@ -226,16 +217,26 @@ static void gic_unmask_irq(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d)
{
- writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+ u32 hwirq = gic_irq(d);
+
+ if (hwirq < 16)
+ hwirq = this_cpu_read(sgi_intid);
+
+ writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_EOI);
}
static void gic_eoimode1_eoi_irq(struct irq_data *d)
{
+ u32 hwirq = gic_irq(d);
+
/* Do not deactivate an IRQ forwarded to a vcpu. */
if (irqd_is_forwarded_to_vcpu(d))
return;
- writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
+ if (hwirq < 16)
+ hwirq = this_cpu_read(sgi_intid);
+
+ writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
}
static int gic_irq_set_irqchip_state(struct irq_data *d,
@@ -295,7 +296,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
/* Interrupt configuration for SGIs can't be changed */
if (gicirq < 16)
- return -EINVAL;
+ return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
/* SPIs have restrictions on the supported types */
if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
@@ -315,7 +316,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
- if (cascading_gic_irq(d))
+ if (cascading_gic_irq(d) || gic_irq(d) < 16)
return -EINVAL;
if (vcpu)
@@ -325,27 +326,10 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
return 0;
}
-#ifdef CONFIG_SMP
-static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
- bool force)
+static int gic_retrigger(struct irq_data *data)
{
- void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
- unsigned int cpu;
-
- if (!force)
- cpu = cpumask_any_and(mask_val, cpu_online_mask);
- else
- cpu = cpumask_first(mask_val);
-
- if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
- return -EINVAL;
-
- writeb_relaxed(gic_cpu_map[cpu], reg);
- irq_data_update_effective_affinity(d, cpumask_of(cpu));
-
- return IRQ_SET_MASK_OK_DONE;
+ return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
}
-#endif
static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
@@ -357,31 +341,33 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
- if (likely(irqnr > 15 && irqnr < 1020)) {
- if (static_branch_likely(&supports_deactivate_key))
- writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
- isb();
- handle_domain_irq(gic->domain, irqnr, regs);
- continue;
- }
- if (irqnr < 16) {
+ if (unlikely(irqnr >= 1020))
+ break;
+
+ if (static_branch_likely(&supports_deactivate_key))
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
- if (static_branch_likely(&supports_deactivate_key))
- writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
-#ifdef CONFIG_SMP
+ isb();
+
+ /*
+ * Ensure any shared data written by the CPU sending the IPI
+ * is read after we've read the ACK register on the GIC.
+ *
+ * Pairs with the write barrier in gic_ipi_send_mask
+ */
+ if (irqnr <= 15) {
+ smp_rmb();
+
/*
- * Ensure any shared data written by the CPU sending
- * the IPI is read after we've read the ACK register
- * on the GIC.
- *
- * Pairs with the write barrier in gic_raise_softirq
+ * The GIC encodes the source CPU in GICC_IAR,
+ * leading to the deactivation to fail if not
+ * written back as is to GICC_EOI. Stash the INTID
+ * away for gic_eoi_irq() to write back. This only
+ * works because we don't nest SGIs...
*/
- smp_rmb();
- handle_IPI(irqnr, regs);
-#endif
- continue;
+ this_cpu_write(sgi_intid, irqstat);
}
- break;
+
+ handle_domain_irq(gic->domain, irqnr, regs);
} while (1);
}
@@ -417,6 +403,7 @@ static const struct irq_chip gic_chip = {
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoi_irq,
.irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.flags = IRQCHIP_SET_TYPE_MASKED |
@@ -728,11 +715,6 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
int i;
for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
-#ifdef CONFIG_GIC_NON_BANKED
- /* Skip over unused GICs */
- if (!gic_data[i].get_base)
- continue;
-#endif
switch (cmd) {
case CPU_PM_ENTER:
gic_cpu_save(&gic_data[i]);
@@ -795,14 +777,34 @@ static int gic_pm_init(struct gic_chip_data *gic)
#endif
#ifdef CONFIG_SMP
-static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ unsigned int cpu;
+
+ if (!force)
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask_val);
+
+ if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ writeb_relaxed(gic_cpu_map[cpu], reg);
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
{
int cpu;
unsigned long flags, map = 0;
if (unlikely(nr_cpu_ids == 1)) {
/* Only one CPU? let's do a self-IPI... */
- writel_relaxed(2 << 24 | irq,
+ writel_relaxed(2 << 24 | d->hwirq,
gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
return;
}
@@ -820,10 +822,41 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
dmb(ishst);
/* this always happens on GIC0 */
- writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+ writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
gic_unlock_irqrestore(flags);
}
+
+static int gic_starting_cpu(unsigned int cpu)
+{
+ gic_cpu_init(&gic_data[0]);
+ return 0;
+}
+
+static __init void gic_smp_init(void)
+{
+ struct irq_fwspec sgi_fwspec = {
+ .fwnode = gic_data[0].domain->fwnode,
+ .param_count = 1,
+ };
+ int base_sgi;
+
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+ "irqchip/arm/gic:starting",
+ gic_starting_cpu, NULL);
+
+ base_sgi = __irq_domain_alloc_irqs(gic_data[0].domain, -1, 8,
+ NUMA_NO_NODE, &sgi_fwspec,
+ false, NULL);
+ if (WARN_ON(base_sgi <= 0))
+ return;
+
+ set_smp_ipi_range(base_sgi, 8);
+}
+#else
+#define gic_smp_init() do { } while(0)
+#define gic_set_affinity NULL
+#define gic_ipi_send_mask NULL
#endif
#ifdef CONFIG_BL_SWITCHER
@@ -969,17 +1002,30 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
struct gic_chip_data *gic = d->host_data;
+ struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
- if (hw < 32) {
+ switch (hw) {
+ case 0 ... 15:
+ irq_set_percpu_devid(irq);
+ irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
+ handle_percpu_devid_fasteoi_ipi,
+ NULL, NULL);
+ break;
+ case 16 ... 31:
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
- } else {
+ break;
+ default:
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq);
- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
+ irqd_set_single_target(irqd);
+ break;
}
+
+ /* Prevents SW retriggers which mess up the ACK/EOI ordering */
+ irqd_set_handle_enforce_irqctx(irqd);
return 0;
}
@@ -992,19 +1038,26 @@ static int gic_irq_domain_translate(struct irq_domain *d,
unsigned long *hwirq,
unsigned int *type)
{
+ if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
+ *hwirq = fwspec->param[0];
+ *type = IRQ_TYPE_EDGE_RISING;
+ return 0;
+ }
+
if (is_of_node(fwspec->fwnode)) {
if (fwspec->param_count < 3)
return -EINVAL;
- /* Get the interrupt number and add 16 to skip over SGIs */
- *hwirq = fwspec->param[1] + 16;
-
- /*
- * For SPIs, we need to add 16 more to get the GIC irq
- * ID number
- */
- if (!fwspec->param[0])
- *hwirq += 16;
+ switch (fwspec->param[0]) {
+ case 0: /* SPI */
+ *hwirq = fwspec->param[1] + 32;
+ break;
+ case 1: /* PPI */
+ *hwirq = fwspec->param[1] + 16;
+ break;
+ default:
+ return -EINVAL;
+ }
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
@@ -1027,12 +1080,6 @@ static int gic_irq_domain_translate(struct irq_domain *d,
return -EINVAL;
}
-static int gic_starting_cpu(unsigned int cpu)
-{
- gic_cpu_init(&gic_data[0]);
- return 0;
-}
-
static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
@@ -1079,10 +1126,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
}
-#ifdef CONFIG_SMP
- if (gic == &gic_data[0])
+ if (gic == &gic_data[0]) {
gic->chip.irq_set_affinity = gic_set_affinity;
-#endif
+ gic->chip.ipi_send_mask = gic_ipi_send_mask;
+ }
}
static int gic_init_bases(struct gic_chip_data *gic,
@@ -1112,7 +1159,7 @@ static int gic_init_bases(struct gic_chip_data *gic,
gic->raw_cpu_base + offset;
}
- gic_set_base_accessor(gic, gic_get_percpu_base);
+ enable_frankengic();
} else {
/* Normal, sane GIC... */
WARN(gic->percpu_offset,
@@ -1120,7 +1167,6 @@ static int gic_init_bases(struct gic_chip_data *gic,
gic->percpu_offset);
gic->dist_base.common_base = gic->raw_dist_base;
gic->cpu_base.common_base = gic->raw_cpu_base;
- gic_set_base_accessor(gic, gic_get_common_base);
}
/*
@@ -1199,12 +1245,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
*/
for (i = 0; i < NR_GIC_CPU_IF; i++)
gic_cpu_map[i] = 0xff;
-#ifdef CONFIG_SMP
- set_smp_cross_call(gic_raise_softirq);
-#endif
- cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
- "irqchip/arm/gic:starting",
- gic_starting_cpu, NULL);
+
set_handle_irq(gic_handle_irq);
if (static_branch_likely(&supports_deactivate_key))
pr_info("GIC: Using split EOI/Deactivate mode\n");
@@ -1221,6 +1262,8 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
ret = gic_init_bases(gic, handle);
if (ret)
kfree(name);
+ else if (gic == &gic_data[0])
+ gic_smp_init();
return ret;
}
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 130caa1c9d93..9b73dcfaf48d 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -171,6 +171,29 @@ static int hip04_irq_set_affinity(struct irq_data *d,
return IRQ_SET_MASK_OK;
}
+
+static void hip04_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
+{
+ int cpu;
+ unsigned long flags, map = 0;
+
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
+
+ /* Convert our logical CPU mask into a physical one. */
+ for_each_cpu(cpu, mask)
+ map |= hip04_cpu_map[cpu];
+
+ /*
+ * Ensure that stores to Normal memory are visible to the
+ * other CPUs before they observe us issuing the IPI.
+ */
+ dmb(ishst);
+
+ /* this always happens on GIC0 */
+ writel_relaxed(map << 8 | d->hwirq, hip04_data.dist_base + GIC_DIST_SOFTINT);
+
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
#endif
static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
@@ -182,19 +205,9 @@ static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
- if (likely(irqnr > 15 && irqnr <= HIP04_MAX_IRQS)) {
+ if (irqnr <= HIP04_MAX_IRQS)
handle_domain_irq(hip04_data.domain, irqnr, regs);
- continue;
- }
- if (irqnr < 16) {
- writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
-#ifdef CONFIG_SMP
- handle_IPI(irqnr, regs);
-#endif
- continue;
- }
- break;
- } while (1);
+ } while (irqnr > HIP04_MAX_IRQS);
}
static struct irq_chip hip04_irq_chip = {
@@ -205,6 +218,7 @@ static struct irq_chip hip04_irq_chip = {
.irq_set_type = hip04_irq_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = hip04_irq_set_affinity,
+ .ipi_send_mask = hip04_ipi_send_mask,
#endif
.flags = IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE |
@@ -279,39 +293,17 @@ static void hip04_irq_cpu_init(struct hip04_irq_data *intc)
writel_relaxed(1, base + GIC_CPU_CTRL);
}
-#ifdef CONFIG_SMP
-static void hip04_raise_softirq(const struct cpumask *mask, unsigned int irq)
-{
- int cpu;
- unsigned long flags, map = 0;
-
- raw_spin_lock_irqsave(&irq_controller_lock, flags);
-
- /* Convert our logical CPU mask into a physical one. */
- for_each_cpu(cpu, mask)
- map |= hip04_cpu_map[cpu];
-
- /*
- * Ensure that stores to Normal memory are visible to the
- * other CPUs before they observe us issuing the IPI.
- */
- dmb(ishst);
-
- /* this always happens on GIC0 */
- writel_relaxed(map << 8 | irq, hip04_data.dist_base + GIC_DIST_SOFTINT);
-
- raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
-}
-#endif
-
static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
- if (hw < 32) {
+ if (hw < 16) {
+ irq_set_percpu_devid(irq);
+ irq_set_chip_and_handler(irq, &hip04_irq_chip,
+ handle_percpu_devid_fasteoi_ipi);
+ } else if (hw < 32) {
irq_set_percpu_devid(irq);
irq_set_chip_and_handler(irq, &hip04_irq_chip,
handle_percpu_devid_irq);
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
} else {
irq_set_chip_and_handler(irq, &hip04_irq_chip,
handle_fasteoi_irq);
@@ -328,10 +320,13 @@ static int hip04_irq_domain_xlate(struct irq_domain *d,
unsigned long *out_hwirq,
unsigned int *out_type)
{
- unsigned long ret = 0;
-
if (irq_domain_get_of_node(d) != controller)
return -EINVAL;
+ if (intsize == 1 && intspec[0] < 16) {
+ *out_hwirq = intspec[0];
+ *out_type = IRQ_TYPE_EDGE_RISING;
+ return 0;
+ }
if (intsize < 3)
return -EINVAL;
@@ -344,7 +339,7 @@ static int hip04_irq_domain_xlate(struct irq_domain *d,
*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
- return ret;
+ return 0;
}
static int hip04_irq_starting_cpu(unsigned int cpu)
@@ -361,7 +356,6 @@ static const struct irq_domain_ops hip04_irq_domain_ops = {
static int __init
hip04_of_init(struct device_node *node, struct device_node *parent)
{
- irq_hw_number_t hwirq_base = 16;
int nr_irqs, irq_base, i;
if (WARN_ON(!node))
@@ -390,24 +384,21 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
nr_irqs = HIP04_MAX_IRQS;
hip04_data.nr_irqs = nr_irqs;
- nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */
-
- irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id());
+ irq_base = irq_alloc_descs(-1, 0, nr_irqs, numa_node_id());
if (irq_base < 0) {
pr_err("failed to allocate IRQ numbers\n");
return -EINVAL;
}
hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base,
- hwirq_base,
+ 0,
&hip04_irq_domain_ops,
&hip04_data);
-
if (WARN_ON(!hip04_data.domain))
return -EINVAL;
#ifdef CONFIG_SMP
- set_smp_cross_call(hip04_raise_softirq);
+ set_smp_ipi_range(irq_base, 16);
#endif
set_handle_irq(hip04_handle_irq);
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
index e35b7b09c3ab..7709f9712cb3 100644
--- a/drivers/irqchip/irq-imx-intmux.c
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -226,12 +226,9 @@ static int imx_intmux_probe(struct platform_device *pdev)
}
data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(data->ipg_clk)) {
- ret = PTR_ERR(data->ipg_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(data->ipg_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ipg_clk),
+ "failed to get ipg clk\n");
data->channum = channum;
raw_spin_lock_init(&data->lock);
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index 290531ec3d61..1edf7692a790 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -158,12 +158,9 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
}
data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(data->ipg_clk)) {
- ret = PTR_ERR(data->ipg_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(data->ipg_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->ipg_clk),
+ "failed to get ipg clk\n");
raw_spin_lock_init(&data->lock);
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
index 13e6016fe464..6392aafb9a63 100644
--- a/drivers/irqchip/irq-loongson-htvec.c
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -151,7 +151,7 @@ static void htvec_reset(struct htvec *priv)
/* Clear IRQ cause registers, mask all interrupts */
for (idx = 0; idx < priv->num_parents; idx++) {
writel_relaxed(0x0, priv->base + HTVEC_EN_OFF + 4 * idx);
- writel_relaxed(0xFFFFFFFF, priv->base);
+ writel_relaxed(0xFFFFFFFF, priv->base + 4 * idx);
}
}
@@ -172,7 +172,7 @@ static int htvec_of_init(struct device_node *node,
goto free_priv;
}
- /* Interrupt may come from any of the 4 interrupt line */
+ /* Interrupt may come from any of the 8 interrupt lines */
for (i = 0; i < HTVEC_MAX_PARENT_IRQ; i++) {
parent_irq[i] = irq_of_parse_and_map(node, i);
if (parent_irq[i] <= 0)
diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c
new file mode 100644
index 000000000000..143657b0cf28
--- /dev/null
+++ b/drivers/irqchip/irq-mst-intc.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author Mark-PK Tsai <mark-pk.tsai@mediatek.com>
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define INTC_MASK 0x0
+#define INTC_EOI 0x20
+
+struct mst_intc_chip_data {
+ raw_spinlock_t lock;
+ unsigned int irq_start, nr_irqs;
+ void __iomem *base;
+ bool no_eoi;
+};
+
+static void mst_set_irq(struct irq_data *d, u32 offset)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct mst_intc_chip_data *cd = irq_data_get_irq_chip_data(d);
+ u16 val, mask;
+ unsigned long flags;
+
+ mask = 1 << (hwirq % 16);
+ offset += (hwirq / 16) * 4;
+
+ raw_spin_lock_irqsave(&cd->lock, flags);
+ val = readw_relaxed(cd->base + offset) | mask;
+ writew_relaxed(val, cd->base + offset);
+ raw_spin_unlock_irqrestore(&cd->lock, flags);
+}
+
+static void mst_clear_irq(struct irq_data *d, u32 offset)
+{
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ struct mst_intc_chip_data *cd = irq_data_get_irq_chip_data(d);
+ u16 val, mask;
+ unsigned long flags;
+
+ mask = 1 << (hwirq % 16);
+ offset += (hwirq / 16) * 4;
+
+ raw_spin_lock_irqsave(&cd->lock, flags);
+ val = readw_relaxed(cd->base + offset) & ~mask;
+ writew_relaxed(val, cd->base + offset);
+ raw_spin_unlock_irqrestore(&cd->lock, flags);
+}
+
+static void mst_intc_mask_irq(struct irq_data *d)
+{
+ mst_set_irq(d, INTC_MASK);
+ irq_chip_mask_parent(d);
+}
+
+static void mst_intc_unmask_irq(struct irq_data *d)
+{
+ mst_clear_irq(d, INTC_MASK);
+ irq_chip_unmask_parent(d);
+}
+
+static void mst_intc_eoi_irq(struct irq_data *d)
+{
+ struct mst_intc_chip_data *cd = irq_data_get_irq_chip_data(d);
+
+ if (!cd->no_eoi)
+ mst_set_irq(d, INTC_EOI);
+
+ irq_chip_eoi_parent(d);
+}
+
+static struct irq_chip mst_intc_chip = {
+ .name = "mst-intc",
+ .irq_mask = mst_intc_mask_irq,
+ .irq_unmask = mst_intc_unmask_irq,
+ .irq_eoi = mst_intc_eoi_irq,
+ .irq_get_irqchip_state = irq_chip_get_parent_state,
+ .irq_set_irqchip_state = irq_chip_set_parent_state,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int mst_intc_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct mst_intc_chip_data *cd = d->host_data;
+
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (fwspec->param[0] != 0)
+ return -EINVAL;
+
+ if (fwspec->param[1] >= cd->nr_irqs)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int mst_intc_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ int i;
+ irq_hw_number_t hwirq;
+ struct irq_fwspec parent_fwspec, *fwspec = data;
+ struct mst_intc_chip_data *cd = domain->host_data;
+
+ /* Not GIC compliant */
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ /* No PPI should point to this domain */
+ if (fwspec->param[0])
+ return -EINVAL;
+
+ hwirq = fwspec->param[1];
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &mst_intc_chip,
+ domain->host_data);
+
+ parent_fwspec = *fwspec;
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param[1] = cd->irq_start + hwirq;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_fwspec);
+}
+
+static const struct irq_domain_ops mst_intc_domain_ops = {
+ .translate = mst_intc_domain_translate,
+ .alloc = mst_intc_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init mst_intc_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *domain_parent;
+ struct mst_intc_chip_data *cd;
+ u32 irq_start, irq_end;
+
+ domain_parent = irq_find_host(parent);
+ if (!domain_parent) {
+ pr_err("mst-intc: interrupt-parent not found\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32_index(dn, "mstar,irqs-map-range", 0, &irq_start) ||
+ of_property_read_u32_index(dn, "mstar,irqs-map-range", 1, &irq_end))
+ return -EINVAL;
+
+ cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ cd->base = of_iomap(dn, 0);
+ if (!cd->base) {
+ kfree(cd);
+ return -ENOMEM;
+ }
+
+ cd->no_eoi = of_property_read_bool(dn, "mstar,intc-no-eoi");
+ raw_spin_lock_init(&cd->lock);
+ cd->irq_start = irq_start;
+ cd->nr_irqs = irq_end - irq_start + 1;
+ domain = irq_domain_add_hierarchy(domain_parent, 0, cd->nr_irqs, dn,
+ &mst_intc_domain_ops, cd);
+ if (!domain) {
+ iounmap(cd->base);
+ kfree(cd);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(mst_intc, "mstar,mst-intc", mst_intc_of_init);
diff --git a/drivers/irqchip/irq-owl-sirq.c b/drivers/irqchip/irq-owl-sirq.c
new file mode 100644
index 000000000000..6e4127465094
--- /dev/null
+++ b/drivers/irqchip/irq-owl-sirq.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Actions Semi Owl SoCs SIRQ interrupt controller driver
+ *
+ * Copyright (C) 2014 Actions Semi Inc.
+ * David Liu <liuwei@actions-semi.com>
+ *
+ * Author: Parthiban Nallathambi <pn@denx.de>
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ * Author: Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define NUM_SIRQ 3
+
+#define INTC_EXTCTL_PENDING BIT(0)
+#define INTC_EXTCTL_CLK_SEL BIT(4)
+#define INTC_EXTCTL_EN BIT(5)
+#define INTC_EXTCTL_TYPE_MASK GENMASK(7, 6)
+#define INTC_EXTCTL_TYPE_HIGH 0
+#define INTC_EXTCTL_TYPE_LOW BIT(6)
+#define INTC_EXTCTL_TYPE_RISING BIT(7)
+#define INTC_EXTCTL_TYPE_FALLING (BIT(6) | BIT(7))
+
+/* S500 & S700 SIRQ control register masks */
+#define INTC_EXTCTL_SIRQ0_MASK GENMASK(23, 16)
+#define INTC_EXTCTL_SIRQ1_MASK GENMASK(15, 8)
+#define INTC_EXTCTL_SIRQ2_MASK GENMASK(7, 0)
+
+/* S900 SIRQ control register offsets, relative to controller base address */
+#define INTC_EXTCTL0 0x0000
+#define INTC_EXTCTL1 0x0328
+#define INTC_EXTCTL2 0x032c
+
+struct owl_sirq_params {
+ /* INTC_EXTCTL reg shared for all three SIRQ lines */
+ bool reg_shared;
+ /* INTC_EXTCTL reg offsets relative to controller base address */
+ u16 reg_offset[NUM_SIRQ];
+};
+
+struct owl_sirq_chip_data {
+ const struct owl_sirq_params *params;
+ void __iomem *base;
+ raw_spinlock_t lock;
+ u32 ext_irqs[NUM_SIRQ];
+};
+
+/* S500 & S700 SoCs */
+static const struct owl_sirq_params owl_sirq_s500_params = {
+ .reg_shared = true,
+ .reg_offset = { 0, 0, 0 },
+};
+
+/* S900 SoC */
+static const struct owl_sirq_params owl_sirq_s900_params = {
+ .reg_shared = false,
+ .reg_offset = { INTC_EXTCTL0, INTC_EXTCTL1, INTC_EXTCTL2 },
+};
+
+static u32 owl_field_get(u32 val, u32 index)
+{
+ switch (index) {
+ case 0:
+ return FIELD_GET(INTC_EXTCTL_SIRQ0_MASK, val);
+ case 1:
+ return FIELD_GET(INTC_EXTCTL_SIRQ1_MASK, val);
+ case 2:
+ default:
+ return FIELD_GET(INTC_EXTCTL_SIRQ2_MASK, val);
+ }
+}
+
+static u32 owl_field_prep(u32 val, u32 index)
+{
+ switch (index) {
+ case 0:
+ return FIELD_PREP(INTC_EXTCTL_SIRQ0_MASK, val);
+ case 1:
+ return FIELD_PREP(INTC_EXTCTL_SIRQ1_MASK, val);
+ case 2:
+ default:
+ return FIELD_PREP(INTC_EXTCTL_SIRQ2_MASK, val);
+ }
+}
+
+static u32 owl_sirq_read_extctl(struct owl_sirq_chip_data *data, u32 index)
+{
+ u32 val;
+
+ val = readl_relaxed(data->base + data->params->reg_offset[index]);
+ if (data->params->reg_shared)
+ val = owl_field_get(val, index);
+
+ return val;
+}
+
+static void owl_sirq_write_extctl(struct owl_sirq_chip_data *data,
+ u32 extctl, u32 index)
+{
+ u32 val;
+
+ if (data->params->reg_shared) {
+ val = readl_relaxed(data->base + data->params->reg_offset[index]);
+ val &= ~owl_field_prep(0xff, index);
+ extctl = owl_field_prep(extctl, index) | val;
+ }
+
+ writel_relaxed(extctl, data->base + data->params->reg_offset[index]);
+}
+
+static void owl_sirq_clear_set_extctl(struct owl_sirq_chip_data *d,
+ u32 clear, u32 set, u32 index)
+{
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&d->lock, flags);
+ val = owl_sirq_read_extctl(d, index);
+ val &= ~clear;
+ val |= set;
+ owl_sirq_write_extctl(d, val, index);
+ raw_spin_unlock_irqrestore(&d->lock, flags);
+}
+
+static void owl_sirq_eoi(struct irq_data *data)
+{
+ struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
+
+ /*
+ * Software must clear external interrupt pending, when interrupt type
+ * is edge triggered, so we need per SIRQ based clearing.
+ */
+ if (!irqd_is_level_type(data))
+ owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_PENDING,
+ data->hwirq);
+
+ irq_chip_eoi_parent(data);
+}
+
+static void owl_sirq_mask(struct irq_data *data)
+{
+ struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
+
+ owl_sirq_clear_set_extctl(chip_data, INTC_EXTCTL_EN, 0, data->hwirq);
+ irq_chip_mask_parent(data);
+}
+
+static void owl_sirq_unmask(struct irq_data *data)
+{
+ struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
+
+ owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_EN, data->hwirq);
+ irq_chip_unmask_parent(data);
+}
+
+/*
+ * GIC does not handle falling edge or active low, hence SIRQ shall be
+ * programmed to convert falling edge to rising edge signal and active
+ * low to active high signal.
+ */
+static int owl_sirq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
+ u32 sirq_type;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ sirq_type = INTC_EXTCTL_TYPE_LOW;
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ sirq_type = INTC_EXTCTL_TYPE_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ sirq_type = INTC_EXTCTL_TYPE_FALLING;
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ sirq_type = INTC_EXTCTL_TYPE_RISING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ owl_sirq_clear_set_extctl(chip_data, INTC_EXTCTL_TYPE_MASK, sirq_type,
+ data->hwirq);
+
+ return irq_chip_set_type_parent(data, type);
+}
+
+static struct irq_chip owl_sirq_chip = {
+ .name = "owl-sirq",
+ .irq_mask = owl_sirq_mask,
+ .irq_unmask = owl_sirq_unmask,
+ .irq_eoi = owl_sirq_eoi,
+ .irq_set_type = owl_sirq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int owl_sirq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (!is_of_node(fwspec->fwnode))
+ return -EINVAL;
+
+ if (fwspec->param_count != 2 || fwspec->param[0] >= NUM_SIRQ)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+
+ return 0;
+}
+
+static int owl_sirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct owl_sirq_chip_data *chip_data = domain->host_data;
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = owl_sirq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &owl_sirq_chip,
+ chip_data);
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = GIC_SPI;
+ parent_fwspec.param[1] = chip_data->ext_irqs[hwirq];
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+}
+
+static const struct irq_domain_ops owl_sirq_domain_ops = {
+ .translate = owl_sirq_domain_translate,
+ .alloc = owl_sirq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init owl_sirq_init(const struct owl_sirq_params *params,
+ struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *parent_domain;
+ struct owl_sirq_chip_data *chip_data;
+ int ret, i;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: failed to find sirq parent domain\n", node);
+ return -ENXIO;
+ }
+
+ chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
+ if (!chip_data)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&chip_data->lock);
+
+ chip_data->params = params;
+
+ chip_data->base = of_iomap(node, 0);
+ if (!chip_data->base) {
+ pr_err("%pOF: failed to map sirq registers\n", node);
+ ret = -ENXIO;
+ goto out_free;
+ }
+
+ for (i = 0; i < NUM_SIRQ; i++) {
+ struct of_phandle_args irq;
+
+ ret = of_irq_parse_one(node, i, &irq);
+ if (ret) {
+ pr_err("%pOF: failed to parse interrupt %d\n", node, i);
+ goto out_unmap;
+ }
+
+ if (WARN_ON(irq.args_count != 3)) {
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ chip_data->ext_irqs[i] = irq.args[1];
+
+ /* Set 24MHz external interrupt clock freq */
+ owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_CLK_SEL, i);
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_SIRQ, node,
+ &owl_sirq_domain_ops, chip_data);
+ if (!domain) {
+ pr_err("%pOF: failed to add domain\n", node);
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ return 0;
+
+out_unmap:
+ iounmap(chip_data->base);
+out_free:
+ kfree(chip_data);
+
+ return ret;
+}
+
+static int __init owl_sirq_s500_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return owl_sirq_init(&owl_sirq_s500_params, node, parent);
+}
+
+IRQCHIP_DECLARE(owl_sirq_s500, "actions,s500-sirq", owl_sirq_s500_of_init);
+IRQCHIP_DECLARE(owl_sirq_s700, "actions,s700-sirq", owl_sirq_s500_of_init);
+
+static int __init owl_sirq_s900_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return owl_sirq_init(&owl_sirq_s900_params, node, parent);
+}
+
+IRQCHIP_DECLARE(owl_sirq_s900, "actions,s900-sirq", owl_sirq_s900_of_init);
diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c
new file mode 100644
index 000000000000..92fb5780dc10
--- /dev/null
+++ b/drivers/irqchip/irq-pruss-intc.c
@@ -0,0 +1,664 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU-ICSS INTC IRQChip driver for various TI SoCs
+ *
+ * Copyright (C) 2016-2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author(s):
+ * Andrew F. Davis <afd@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ * Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org> for Texas Instruments
+ *
+ * Copyright (C) 2019 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+/*
+ * Number of host interrupts reaching the main MPU sub-system. Note that this
+ * is not the same as the total number of host interrupts supported by the PRUSS
+ * INTC instance
+ */
+#define MAX_NUM_HOST_IRQS 8
+
+/* minimum starting host interrupt number for MPU */
+#define FIRST_PRU_HOST_INT 2
+
+/* PRU_ICSS_INTC registers */
+#define PRU_INTC_REVID 0x0000
+#define PRU_INTC_CR 0x0004
+#define PRU_INTC_GER 0x0010
+#define PRU_INTC_GNLR 0x001c
+#define PRU_INTC_SISR 0x0020
+#define PRU_INTC_SICR 0x0024
+#define PRU_INTC_EISR 0x0028
+#define PRU_INTC_EICR 0x002c
+#define PRU_INTC_HIEISR 0x0034
+#define PRU_INTC_HIDISR 0x0038
+#define PRU_INTC_GPIR 0x0080
+#define PRU_INTC_SRSR(x) (0x0200 + (x) * 4)
+#define PRU_INTC_SECR(x) (0x0280 + (x) * 4)
+#define PRU_INTC_ESR(x) (0x0300 + (x) * 4)
+#define PRU_INTC_ECR(x) (0x0380 + (x) * 4)
+#define PRU_INTC_CMR(x) (0x0400 + (x) * 4)
+#define PRU_INTC_HMR(x) (0x0800 + (x) * 4)
+#define PRU_INTC_HIPIR(x) (0x0900 + (x) * 4)
+#define PRU_INTC_SIPR(x) (0x0d00 + (x) * 4)
+#define PRU_INTC_SITR(x) (0x0d80 + (x) * 4)
+#define PRU_INTC_HINLR(x) (0x1100 + (x) * 4)
+#define PRU_INTC_HIER 0x1500
+
+/* CMR register bit-field macros */
+#define CMR_EVT_MAP_MASK 0xf
+#define CMR_EVT_MAP_BITS 8
+#define CMR_EVT_PER_REG 4
+
+/* HMR register bit-field macros */
+#define HMR_CH_MAP_MASK 0xf
+#define HMR_CH_MAP_BITS 8
+#define HMR_CH_PER_REG 4
+
+/* HIPIR register bit-fields */
+#define INTC_HIPIR_NONE_HINT 0x80000000
+
+#define MAX_PRU_SYS_EVENTS 160
+#define MAX_PRU_CHANNELS 20
+
+/**
+ * struct pruss_intc_map_record - keeps track of actual mapping state
+ * @value: The currently mapped value (channel or host)
+ * @ref_count: Keeps track of number of current users of this resource
+ */
+struct pruss_intc_map_record {
+ u8 value;
+ u8 ref_count;
+};
+
+/**
+ * struct pruss_intc_match_data - match data to handle SoC variations
+ * @num_system_events: number of input system events handled by the PRUSS INTC
+ * @num_host_events: number of host events (which is equal to number of
+ * channels) supported by the PRUSS INTC
+ */
+struct pruss_intc_match_data {
+ u8 num_system_events;
+ u8 num_host_events;
+};
+
+/**
+ * struct pruss_intc - PRUSS interrupt controller structure
+ * @event_channel: current state of system event to channel mappings
+ * @channel_host: current state of channel to host mappings
+ * @irqs: kernel irq numbers corresponding to PRUSS host interrupts
+ * @base: base virtual address of INTC register space
+ * @domain: irq domain for this interrupt controller
+ * @soc_config: cached PRUSS INTC IP configuration data
+ * @dev: PRUSS INTC device pointer
+ * @lock: mutex to serialize interrupts mapping
+ */
+struct pruss_intc {
+ struct pruss_intc_map_record event_channel[MAX_PRU_SYS_EVENTS];
+ struct pruss_intc_map_record channel_host[MAX_PRU_CHANNELS];
+ unsigned int irqs[MAX_NUM_HOST_IRQS];
+ void __iomem *base;
+ struct irq_domain *domain;
+ const struct pruss_intc_match_data *soc_config;
+ struct device *dev;
+ struct mutex lock; /* PRUSS INTC lock */
+};
+
+/**
+ * struct pruss_host_irq_data - PRUSS host irq data structure
+ * @intc: PRUSS interrupt controller pointer
+ * @host_irq: host irq number
+ */
+struct pruss_host_irq_data {
+ struct pruss_intc *intc;
+ u8 host_irq;
+};
+
+static inline u32 pruss_intc_read_reg(struct pruss_intc *intc, unsigned int reg)
+{
+ return readl_relaxed(intc->base + reg);
+}
+
+static inline void pruss_intc_write_reg(struct pruss_intc *intc,
+ unsigned int reg, u32 val)
+{
+ writel_relaxed(val, intc->base + reg);
+}
+
+static void pruss_intc_update_cmr(struct pruss_intc *intc, unsigned int evt,
+ u8 ch)
+{
+ u32 idx, offset, val;
+
+ idx = evt / CMR_EVT_PER_REG;
+ offset = (evt % CMR_EVT_PER_REG) * CMR_EVT_MAP_BITS;
+
+ val = pruss_intc_read_reg(intc, PRU_INTC_CMR(idx));
+ val &= ~(CMR_EVT_MAP_MASK << offset);
+ val |= ch << offset;
+ pruss_intc_write_reg(intc, PRU_INTC_CMR(idx), val);
+
+ dev_dbg(intc->dev, "SYSEV%u -> CH%d (CMR%d 0x%08x)\n", evt, ch,
+ idx, pruss_intc_read_reg(intc, PRU_INTC_CMR(idx)));
+}
+
+static void pruss_intc_update_hmr(struct pruss_intc *intc, u8 ch, u8 host)
+{
+ u32 idx, offset, val;
+
+ idx = ch / HMR_CH_PER_REG;
+ offset = (ch % HMR_CH_PER_REG) * HMR_CH_MAP_BITS;
+
+ val = pruss_intc_read_reg(intc, PRU_INTC_HMR(idx));
+ val &= ~(HMR_CH_MAP_MASK << offset);
+ val |= host << offset;
+ pruss_intc_write_reg(intc, PRU_INTC_HMR(idx), val);
+
+ dev_dbg(intc->dev, "CH%d -> HOST%d (HMR%d 0x%08x)\n", ch, host, idx,
+ pruss_intc_read_reg(intc, PRU_INTC_HMR(idx)));
+}
+
+/**
+ * pruss_intc_map() - configure the PRUSS INTC
+ * @intc: PRUSS interrupt controller pointer
+ * @hwirq: the system event number
+ *
+ * Configures the PRUSS INTC with the provided configuration from the one parsed
+ * in the xlate function.
+ */
+static void pruss_intc_map(struct pruss_intc *intc, unsigned long hwirq)
+{
+ struct device *dev = intc->dev;
+ u8 ch, host, reg_idx;
+ u32 val;
+
+ mutex_lock(&intc->lock);
+
+ intc->event_channel[hwirq].ref_count++;
+
+ ch = intc->event_channel[hwirq].value;
+ host = intc->channel_host[ch].value;
+
+ pruss_intc_update_cmr(intc, hwirq, ch);
+
+ reg_idx = hwirq / 32;
+ val = BIT(hwirq % 32);
+
+ /* clear and enable system event */
+ pruss_intc_write_reg(intc, PRU_INTC_ESR(reg_idx), val);
+ pruss_intc_write_reg(intc, PRU_INTC_SECR(reg_idx), val);
+
+ if (++intc->channel_host[ch].ref_count == 1) {
+ pruss_intc_update_hmr(intc, ch, host);
+
+ /* enable host interrupts */
+ pruss_intc_write_reg(intc, PRU_INTC_HIEISR, host);
+ }
+
+ dev_dbg(dev, "mapped system_event = %lu channel = %d host = %d",
+ hwirq, ch, host);
+
+ mutex_unlock(&intc->lock);
+}
+
+/**
+ * pruss_intc_unmap() - unconfigure the PRUSS INTC
+ * @intc: PRUSS interrupt controller pointer
+ * @hwirq: the system event number
+ *
+ * Undo whatever was done in pruss_intc_map() for a PRU core.
+ * Mappings are reference counted, so resources are only disabled when there
+ * are no longer any users.
+ */
+static void pruss_intc_unmap(struct pruss_intc *intc, unsigned long hwirq)
+{
+ u8 ch, host, reg_idx;
+ u32 val;
+
+ mutex_lock(&intc->lock);
+
+ ch = intc->event_channel[hwirq].value;
+ host = intc->channel_host[ch].value;
+
+ if (--intc->channel_host[ch].ref_count == 0) {
+ /* disable host interrupts */
+ pruss_intc_write_reg(intc, PRU_INTC_HIDISR, host);
+
+ /* clear the map using reset value 0 */
+ pruss_intc_update_hmr(intc, ch, 0);
+ }
+
+ intc->event_channel[hwirq].ref_count--;
+ reg_idx = hwirq / 32;
+ val = BIT(hwirq % 32);
+
+ /* disable system events */
+ pruss_intc_write_reg(intc, PRU_INTC_ECR(reg_idx), val);
+ /* clear any pending status */
+ pruss_intc_write_reg(intc, PRU_INTC_SECR(reg_idx), val);
+
+ /* clear the map using reset value 0 */
+ pruss_intc_update_cmr(intc, hwirq, 0);
+
+ dev_dbg(intc->dev, "unmapped system_event = %lu channel = %d host = %d\n",
+ hwirq, ch, host);
+
+ mutex_unlock(&intc->lock);
+}
+
+static void pruss_intc_init(struct pruss_intc *intc)
+{
+ const struct pruss_intc_match_data *soc_config = intc->soc_config;
+ int num_chnl_map_regs, num_host_intr_regs, num_event_type_regs, i;
+
+ num_chnl_map_regs = DIV_ROUND_UP(soc_config->num_system_events,
+ CMR_EVT_PER_REG);
+ num_host_intr_regs = DIV_ROUND_UP(soc_config->num_host_events,
+ HMR_CH_PER_REG);
+ num_event_type_regs = DIV_ROUND_UP(soc_config->num_system_events, 32);
+
+ /*
+ * configure polarity (SIPR register) to active high and
+ * type (SITR register) to level interrupt for all system events
+ */
+ for (i = 0; i < num_event_type_regs; i++) {
+ pruss_intc_write_reg(intc, PRU_INTC_SIPR(i), 0xffffffff);
+ pruss_intc_write_reg(intc, PRU_INTC_SITR(i), 0);
+ }
+
+ /* clear all interrupt channel map registers, 4 events per register */
+ for (i = 0; i < num_chnl_map_regs; i++)
+ pruss_intc_write_reg(intc, PRU_INTC_CMR(i), 0);
+
+ /* clear all host interrupt map registers, 4 channels per register */
+ for (i = 0; i < num_host_intr_regs; i++)
+ pruss_intc_write_reg(intc, PRU_INTC_HMR(i), 0);
+
+ /* global interrupt enable */
+ pruss_intc_write_reg(intc, PRU_INTC_GER, 1);
+}
+
+static void pruss_intc_irq_ack(struct irq_data *data)
+{
+ struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
+ unsigned int hwirq = data->hwirq;
+
+ pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq);
+}
+
+static void pruss_intc_irq_mask(struct irq_data *data)
+{
+ struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
+ unsigned int hwirq = data->hwirq;
+
+ pruss_intc_write_reg(intc, PRU_INTC_EICR, hwirq);
+}
+
+static void pruss_intc_irq_unmask(struct irq_data *data)
+{
+ struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
+ unsigned int hwirq = data->hwirq;
+
+ pruss_intc_write_reg(intc, PRU_INTC_EISR, hwirq);
+}
+
+static int pruss_intc_irq_reqres(struct irq_data *data)
+{
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void pruss_intc_irq_relres(struct irq_data *data)
+{
+ module_put(THIS_MODULE);
+}
+
+static int pruss_intc_irq_get_irqchip_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
+ u32 reg, mask, srsr;
+
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ reg = PRU_INTC_SRSR(data->hwirq / 32);
+ mask = BIT(data->hwirq % 32);
+
+ srsr = pruss_intc_read_reg(intc, reg);
+
+ *state = !!(srsr & mask);
+
+ return 0;
+}
+
+static int pruss_intc_irq_set_irqchip_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ struct pruss_intc *intc = irq_data_get_irq_chip_data(data);
+
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ if (state)
+ pruss_intc_write_reg(intc, PRU_INTC_SISR, data->hwirq);
+ else
+ pruss_intc_write_reg(intc, PRU_INTC_SICR, data->hwirq);
+
+ return 0;
+}
+
+static struct irq_chip pruss_irqchip = {
+ .name = "pruss-intc",
+ .irq_ack = pruss_intc_irq_ack,
+ .irq_mask = pruss_intc_irq_mask,
+ .irq_unmask = pruss_intc_irq_unmask,
+ .irq_request_resources = pruss_intc_irq_reqres,
+ .irq_release_resources = pruss_intc_irq_relres,
+ .irq_get_irqchip_state = pruss_intc_irq_get_irqchip_state,
+ .irq_set_irqchip_state = pruss_intc_irq_set_irqchip_state,
+};
+
+static int pruss_intc_validate_mapping(struct pruss_intc *intc, int event,
+ int channel, int host)
+{
+ struct device *dev = intc->dev;
+ int ret = 0;
+
+ mutex_lock(&intc->lock);
+
+ /* check if sysevent already assigned */
+ if (intc->event_channel[event].ref_count > 0 &&
+ intc->event_channel[event].value != channel) {
+ dev_err(dev, "event %d (req. ch %d) already assigned to channel %d\n",
+ event, channel, intc->event_channel[event].value);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* check if channel already assigned */
+ if (intc->channel_host[channel].ref_count > 0 &&
+ intc->channel_host[channel].value != host) {
+ dev_err(dev, "channel %d (req. host %d) already assigned to host %d\n",
+ channel, host, intc->channel_host[channel].value);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ intc->event_channel[event].value = channel;
+ intc->channel_host[channel].value = host;
+
+unlock:
+ mutex_unlock(&intc->lock);
+ return ret;
+}
+
+static int
+pruss_intc_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ struct pruss_intc *intc = d->host_data;
+ struct device *dev = intc->dev;
+ int ret, sys_event, channel, host;
+
+ if (intsize < 3)
+ return -EINVAL;
+
+ sys_event = intspec[0];
+ if (sys_event < 0 || sys_event >= intc->soc_config->num_system_events) {
+ dev_err(dev, "%d is not valid event number\n", sys_event);
+ return -EINVAL;
+ }
+
+ channel = intspec[1];
+ if (channel < 0 || channel >= intc->soc_config->num_host_events) {
+ dev_err(dev, "%d is not valid channel number", channel);
+ return -EINVAL;
+ }
+
+ host = intspec[2];
+ if (host < 0 || host >= intc->soc_config->num_host_events) {
+ dev_err(dev, "%d is not valid host irq number\n", host);
+ return -EINVAL;
+ }
+
+ /* check if requested sys_event was already mapped, if so validate it */
+ ret = pruss_intc_validate_mapping(intc, sys_event, channel, host);
+ if (ret)
+ return ret;
+
+ *out_hwirq = sys_event;
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static int pruss_intc_irq_domain_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct pruss_intc *intc = d->host_data;
+
+ pruss_intc_map(intc, hw);
+
+ irq_set_chip_data(virq, intc);
+ irq_set_chip_and_handler(virq, &pruss_irqchip, handle_level_irq);
+
+ return 0;
+}
+
+static void pruss_intc_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
+{
+ struct pruss_intc *intc = d->host_data;
+ unsigned long hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
+
+ irq_set_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_data(virq, NULL);
+ pruss_intc_unmap(intc, hwirq);
+}
+
+static const struct irq_domain_ops pruss_intc_irq_domain_ops = {
+ .xlate = pruss_intc_irq_domain_xlate,
+ .map = pruss_intc_irq_domain_map,
+ .unmap = pruss_intc_irq_domain_unmap,
+};
+
+static void pruss_intc_irq_handler(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct pruss_host_irq_data *host_irq_data = irq_get_handler_data(irq);
+ struct pruss_intc *intc = host_irq_data->intc;
+ u8 host_irq = host_irq_data->host_irq + FIRST_PRU_HOST_INT;
+
+ chained_irq_enter(chip, desc);
+
+ while (true) {
+ u32 hipir;
+ unsigned int virq;
+ int hwirq;
+
+ /* get highest priority pending PRUSS system event */
+ hipir = pruss_intc_read_reg(intc, PRU_INTC_HIPIR(host_irq));
+ if (hipir & INTC_HIPIR_NONE_HINT)
+ break;
+
+ hwirq = hipir & GENMASK(9, 0);
+ virq = irq_find_mapping(intc->domain, hwirq);
+
+ /*
+ * NOTE: manually ACK any system events that do not have a
+ * handler mapped yet
+ */
+ if (WARN_ON_ONCE(!virq))
+ pruss_intc_write_reg(intc, PRU_INTC_SICR, hwirq);
+ else
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static const char * const irq_names[MAX_NUM_HOST_IRQS] = {
+ "host_intr0", "host_intr1", "host_intr2", "host_intr3",
+ "host_intr4", "host_intr5", "host_intr6", "host_intr7",
+};
+
+static int pruss_intc_probe(struct platform_device *pdev)
+{
+ const struct pruss_intc_match_data *data;
+ struct device *dev = &pdev->dev;
+ struct pruss_intc *intc;
+ struct pruss_host_irq_data *host_data;
+ int i, irq, ret;
+ u8 max_system_events, irqs_reserved = 0;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ max_system_events = data->num_system_events;
+
+ intc = devm_kzalloc(dev, sizeof(*intc), GFP_KERNEL);
+ if (!intc)
+ return -ENOMEM;
+
+ intc->soc_config = data;
+ intc->dev = dev;
+ platform_set_drvdata(pdev, intc);
+
+ intc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(intc->base))
+ return PTR_ERR(intc->base);
+
+ ret = of_property_read_u8(dev->of_node, "ti,irqs-reserved",
+ &irqs_reserved);
+
+ /*
+ * The irqs-reserved is used only for some SoC's therefore not having
+ * this property is still valid
+ */
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
+ pruss_intc_init(intc);
+
+ mutex_init(&intc->lock);
+
+ intc->domain = irq_domain_add_linear(dev->of_node, max_system_events,
+ &pruss_intc_irq_domain_ops, intc);
+ if (!intc->domain)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_NUM_HOST_IRQS; i++) {
+ if (irqs_reserved & BIT(i))
+ continue;
+
+ irq = platform_get_irq_byname(pdev, irq_names[i]);
+ if (irq <= 0) {
+ ret = (irq == 0) ? -EINVAL : irq;
+ goto fail_irq;
+ }
+
+ intc->irqs[i] = irq;
+
+ host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
+ if (!host_data) {
+ ret = -ENOMEM;
+ goto fail_irq;
+ }
+
+ host_data->intc = intc;
+ host_data->host_irq = i;
+
+ irq_set_handler_data(irq, host_data);
+ irq_set_chained_handler(irq, pruss_intc_irq_handler);
+ }
+
+ return 0;
+
+fail_irq:
+ while (--i >= 0) {
+ if (intc->irqs[i])
+ irq_set_chained_handler_and_data(intc->irqs[i], NULL,
+ NULL);
+ }
+
+ irq_domain_remove(intc->domain);
+
+ return ret;
+}
+
+static int pruss_intc_remove(struct platform_device *pdev)
+{
+ struct pruss_intc *intc = platform_get_drvdata(pdev);
+ u8 max_system_events = intc->soc_config->num_system_events;
+ unsigned int hwirq;
+ int i;
+
+ for (i = 0; i < MAX_NUM_HOST_IRQS; i++) {
+ if (intc->irqs[i])
+ irq_set_chained_handler_and_data(intc->irqs[i], NULL,
+ NULL);
+ }
+
+ for (hwirq = 0; hwirq < max_system_events; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(intc->domain, hwirq));
+
+ irq_domain_remove(intc->domain);
+
+ return 0;
+}
+
+static const struct pruss_intc_match_data pruss_intc_data = {
+ .num_system_events = 64,
+ .num_host_events = 10,
+};
+
+static const struct pruss_intc_match_data icssg_intc_data = {
+ .num_system_events = 160,
+ .num_host_events = 20,
+};
+
+static const struct of_device_id pruss_intc_of_match[] = {
+ {
+ .compatible = "ti,pruss-intc",
+ .data = &pruss_intc_data,
+ },
+ {
+ .compatible = "ti,icssg-intc",
+ .data = &icssg_intc_data,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, pruss_intc_of_match);
+
+static struct platform_driver pruss_intc_driver = {
+ .driver = {
+ .name = "pruss-intc",
+ .of_match_table = pruss_intc_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pruss_intc_probe,
+ .remove = pruss_intc_remove,
+};
+module_platform_driver(pruss_intc_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_AUTHOR("Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org>");
+MODULE_DESCRIPTION("TI PRU-ICSS INTC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 3819185bfd02..cb7f60b3b4a9 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -71,8 +71,7 @@ struct intc_irqpin_priv {
};
struct intc_irqpin_config {
- unsigned int irlm_bit;
- unsigned needs_irlm:1;
+ int irlm_bit; /* -1 if non-existent */
};
static unsigned long intc_irqpin_read32(void __iomem *iomem)
@@ -349,11 +348,10 @@ static const struct irq_domain_ops intc_irqpin_irq_domain_ops = {
static const struct intc_irqpin_config intc_irqpin_irlm_r8a777x = {
.irlm_bit = 23, /* ICR0.IRLM0 */
- .needs_irlm = 1,
};
static const struct intc_irqpin_config intc_irqpin_rmobile = {
- .needs_irlm = 0,
+ .irlm_bit = -1,
};
static const struct of_device_id intc_irqpin_dt_ids[] = {
@@ -470,7 +468,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
}
/* configure "individual IRQ mode" where needed */
- if (config && config->needs_irlm) {
+ if (config && config->irlm_bit >= 0) {
if (io[INTC_IRQPIN_REG_IRLM])
intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM,
config->irlm_bit, 1, 1);
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
deleted file mode 100644
index d2031fecc386..000000000000
--- a/drivers/irqchip/irq-s3c24xx.c
+++ /dev/null
@@ -1,1330 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * S3C24XX IRQ handling
- *
- * Copyright (c) 2003-2004 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * Copyright (c) 2012 Heiko Stuebner <heiko@sntech.de>
-*/
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/irqdomain.h>
-#include <linux/irqchip.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-
-#include <asm/exception.h>
-#include <asm/mach/irq.h>
-
-#include <mach/regs-irq.h>
-#include <mach/regs-gpio.h>
-
-#include <plat/cpu.h>
-#include <plat/regs-irqtype.h>
-#include <plat/pm.h>
-
-#define S3C_IRQTYPE_NONE 0
-#define S3C_IRQTYPE_EINT 1
-#define S3C_IRQTYPE_EDGE 2
-#define S3C_IRQTYPE_LEVEL 3
-
-struct s3c_irq_data {
- unsigned int type;
- unsigned long offset;
- unsigned long parent_irq;
-
- /* data gets filled during init */
- struct s3c_irq_intc *intc;
- unsigned long sub_bits;
- struct s3c_irq_intc *sub_intc;
-};
-
-/*
- * Structure holding the controller data
- * @reg_pending register holding pending irqs
- * @reg_intpnd special register intpnd in main intc
- * @reg_mask mask register
- * @domain irq_domain of the controller
- * @parent parent controller for ext and sub irqs
- * @irqs irq-data, always s3c_irq_data[32]
- */
-struct s3c_irq_intc {
- void __iomem *reg_pending;
- void __iomem *reg_intpnd;
- void __iomem *reg_mask;
- struct irq_domain *domain;
- struct s3c_irq_intc *parent;
- struct s3c_irq_data *irqs;
-};
-
-/*
- * Array holding pointers to the global controller structs
- * [0] ... main_intc
- * [1] ... sub_intc
- * [2] ... main_intc2 on s3c2416
- */
-static struct s3c_irq_intc *s3c_intc[3];
-
-static void s3c_irq_mask(struct irq_data *data)
-{
- struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data);
- struct s3c_irq_intc *intc = irq_data->intc;
- struct s3c_irq_intc *parent_intc = intc->parent;
- struct s3c_irq_data *parent_data;
- unsigned long mask;
- unsigned int irqno;
-
- mask = readl_relaxed(intc->reg_mask);
- mask |= (1UL << irq_data->offset);
- writel_relaxed(mask, intc->reg_mask);
-
- if (parent_intc) {
- parent_data = &parent_intc->irqs[irq_data->parent_irq];
-
- /* check to see if we need to mask the parent IRQ
- * The parent_irq is always in main_intc, so the hwirq
- * for find_mapping does not need an offset in any case.
- */
- if ((mask & parent_data->sub_bits) == parent_data->sub_bits) {
- irqno = irq_find_mapping(parent_intc->domain,
- irq_data->parent_irq);
- s3c_irq_mask(irq_get_irq_data(irqno));
- }
- }
-}
-
-static void s3c_irq_unmask(struct irq_data *data)
-{
- struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data);
- struct s3c_irq_intc *intc = irq_data->intc;
- struct s3c_irq_intc *parent_intc = intc->parent;
- unsigned long mask;
- unsigned int irqno;
-
- mask = readl_relaxed(intc->reg_mask);
- mask &= ~(1UL << irq_data->offset);
- writel_relaxed(mask, intc->reg_mask);
-
- if (parent_intc) {
- irqno = irq_find_mapping(parent_intc->domain,
- irq_data->parent_irq);
- s3c_irq_unmask(irq_get_irq_data(irqno));
- }
-}
-
-static inline void s3c_irq_ack(struct irq_data *data)
-{
- struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data);
- struct s3c_irq_intc *intc = irq_data->intc;
- unsigned long bitval = 1UL << irq_data->offset;
-
- writel_relaxed(bitval, intc->reg_pending);
- if (intc->reg_intpnd)
- writel_relaxed(bitval, intc->reg_intpnd);
-}
-
-static int s3c_irq_type(struct irq_data *data, unsigned int type)
-{
- switch (type) {
- case IRQ_TYPE_NONE:
- break;
- case IRQ_TYPE_EDGE_RISING:
- case IRQ_TYPE_EDGE_FALLING:
- case IRQ_TYPE_EDGE_BOTH:
- irq_set_handler(data->irq, handle_edge_irq);
- break;
- case IRQ_TYPE_LEVEL_LOW:
- case IRQ_TYPE_LEVEL_HIGH:
- irq_set_handler(data->irq, handle_level_irq);
- break;
- default:
- pr_err("No such irq type %d\n", type);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int s3c_irqext_type_set(void __iomem *gpcon_reg,
- void __iomem *extint_reg,
- unsigned long gpcon_offset,
- unsigned long extint_offset,
- unsigned int type)
-{
- unsigned long newvalue = 0, value;
-
- /* Set the GPIO to external interrupt mode */
- value = readl_relaxed(gpcon_reg);
- value = (value & ~(3 << gpcon_offset)) | (0x02 << gpcon_offset);
- writel_relaxed(value, gpcon_reg);
-
- /* Set the external interrupt to pointed trigger type */
- switch (type)
- {
- case IRQ_TYPE_NONE:
- pr_warn("No edge setting!\n");
- break;
-
- case IRQ_TYPE_EDGE_RISING:
- newvalue = S3C2410_EXTINT_RISEEDGE;
- break;
-
- case IRQ_TYPE_EDGE_FALLING:
- newvalue = S3C2410_EXTINT_FALLEDGE;
- break;
-
- case IRQ_TYPE_EDGE_BOTH:
- newvalue = S3C2410_EXTINT_BOTHEDGE;
- break;
-
- case IRQ_TYPE_LEVEL_LOW:
- newvalue = S3C2410_EXTINT_LOWLEV;
- break;
-
- case IRQ_TYPE_LEVEL_HIGH:
- newvalue = S3C2410_EXTINT_HILEV;
- break;
-
- default:
- pr_err("No such irq type %d\n", type);
- return -EINVAL;
- }
-
- value = readl_relaxed(extint_reg);
- value = (value & ~(7 << extint_offset)) | (newvalue << extint_offset);
- writel_relaxed(value, extint_reg);
-
- return 0;
-}
-
-static int s3c_irqext_type(struct irq_data *data, unsigned int type)
-{
- void __iomem *extint_reg;
- void __iomem *gpcon_reg;
- unsigned long gpcon_offset, extint_offset;
-
- if ((data->hwirq >= 4) && (data->hwirq <= 7)) {
- gpcon_reg = S3C2410_GPFCON;
- extint_reg = S3C24XX_EXTINT0;
- gpcon_offset = (data->hwirq) * 2;
- extint_offset = (data->hwirq) * 4;
- } else if ((data->hwirq >= 8) && (data->hwirq <= 15)) {
- gpcon_reg = S3C2410_GPGCON;
- extint_reg = S3C24XX_EXTINT1;
- gpcon_offset = (data->hwirq - 8) * 2;
- extint_offset = (data->hwirq - 8) * 4;
- } else if ((data->hwirq >= 16) && (data->hwirq <= 23)) {
- gpcon_reg = S3C2410_GPGCON;
- extint_reg = S3C24XX_EXTINT2;
- gpcon_offset = (data->hwirq - 8) * 2;
- extint_offset = (data->hwirq - 16) * 4;
- } else {
- return -EINVAL;
- }
-
- return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset,
- extint_offset, type);
-}
-
-static int s3c_irqext0_type(struct irq_data *data, unsigned int type)
-{
- void __iomem *extint_reg;
- void __iomem *gpcon_reg;
- unsigned long gpcon_offset, extint_offset;
-
- if (data->hwirq <= 3) {
- gpcon_reg = S3C2410_GPFCON;
- extint_reg = S3C24XX_EXTINT0;
- gpcon_offset = (data->hwirq) * 2;
- extint_offset = (data->hwirq) * 4;
- } else {
- return -EINVAL;
- }
-
- return s3c_irqext_type_set(gpcon_reg, extint_reg, gpcon_offset,
- extint_offset, type);
-}
-
-static struct irq_chip s3c_irq_chip = {
- .name = "s3c",
- .irq_ack = s3c_irq_ack,
- .irq_mask = s3c_irq_mask,
- .irq_unmask = s3c_irq_unmask,
- .irq_set_type = s3c_irq_type,
- .irq_set_wake = s3c_irq_wake
-};
-
-static struct irq_chip s3c_irq_level_chip = {
- .name = "s3c-level",
- .irq_mask = s3c_irq_mask,
- .irq_unmask = s3c_irq_unmask,
- .irq_ack = s3c_irq_ack,
- .irq_set_type = s3c_irq_type,
-};
-
-static struct irq_chip s3c_irqext_chip = {
- .name = "s3c-ext",
- .irq_mask = s3c_irq_mask,
- .irq_unmask = s3c_irq_unmask,
- .irq_ack = s3c_irq_ack,
- .irq_set_type = s3c_irqext_type,
- .irq_set_wake = s3c_irqext_wake
-};
-
-static struct irq_chip s3c_irq_eint0t4 = {
- .name = "s3c-ext0",
- .irq_ack = s3c_irq_ack,
- .irq_mask = s3c_irq_mask,
- .irq_unmask = s3c_irq_unmask,
- .irq_set_wake = s3c_irq_wake,
- .irq_set_type = s3c_irqext0_type,
-};
-
-static void s3c_irq_demux(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc);
- struct s3c_irq_intc *intc = irq_data->intc;
- struct s3c_irq_intc *sub_intc = irq_data->sub_intc;
- unsigned int n, offset, irq;
- unsigned long src, msk;
-
- /* we're using individual domains for the non-dt case
- * and one big domain for the dt case where the subintc
- * starts at hwirq number 32.
- */
- offset = irq_domain_get_of_node(intc->domain) ? 32 : 0;
-
- chained_irq_enter(chip, desc);
-
- src = readl_relaxed(sub_intc->reg_pending);
- msk = readl_relaxed(sub_intc->reg_mask);
-
- src &= ~msk;
- src &= irq_data->sub_bits;
-
- while (src) {
- n = __ffs(src);
- src &= ~(1 << n);
- irq = irq_find_mapping(sub_intc->domain, offset + n);
- generic_handle_irq(irq);
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
- struct pt_regs *regs, int intc_offset)
-{
- int pnd;
- int offset;
-
- pnd = readl_relaxed(intc->reg_intpnd);
- if (!pnd)
- return false;
-
- /* non-dt machines use individual domains */
- if (!irq_domain_get_of_node(intc->domain))
- intc_offset = 0;
-
- /* We have a problem that the INTOFFSET register does not always
- * show one interrupt. Occasionally we get two interrupts through
- * the prioritiser, and this causes the INTOFFSET register to show
- * what looks like the logical-or of the two interrupt numbers.
- *
- * Thanks to Klaus, Shannon, et al for helping to debug this problem
- */
- offset = readl_relaxed(intc->reg_intpnd + 4);
-
- /* Find the bit manually, when the offset is wrong.
- * The pending register only ever contains the one bit of the next
- * interrupt to handle.
- */
- if (!(pnd & (1 << offset)))
- offset = __ffs(pnd);
-
- handle_domain_irq(intc->domain, intc_offset + offset, regs);
- return true;
-}
-
-asmlinkage void __exception_irq_entry s3c24xx_handle_irq(struct pt_regs *regs)
-{
- do {
- if (likely(s3c_intc[0]))
- if (s3c24xx_handle_intc(s3c_intc[0], regs, 0))
- continue;
-
- if (s3c_intc[2])
- if (s3c24xx_handle_intc(s3c_intc[2], regs, 64))
- continue;
-
- break;
- } while (1);
-}
-
-#ifdef CONFIG_FIQ
-/**
- * s3c24xx_set_fiq - set the FIQ routing
- * @irq: IRQ number to route to FIQ on processor.
- * @on: Whether to route @irq to the FIQ, or to remove the FIQ routing.
- *
- * Change the state of the IRQ to FIQ routing depending on @irq and @on. If
- * @on is true, the @irq is checked to see if it can be routed and the
- * interrupt controller updated to route the IRQ. If @on is false, the FIQ
- * routing is cleared, regardless of which @irq is specified.
- */
-int s3c24xx_set_fiq(unsigned int irq, bool on)
-{
- u32 intmod;
- unsigned offs;
-
- if (on) {
- offs = irq - FIQ_START;
- if (offs > 31)
- return -EINVAL;
-
- intmod = 1 << offs;
- } else {
- intmod = 0;
- }
-
- writel_relaxed(intmod, S3C2410_INTMOD);
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(s3c24xx_set_fiq);
-#endif
-
-static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct s3c_irq_intc *intc = h->host_data;
- struct s3c_irq_data *irq_data = &intc->irqs[hw];
- struct s3c_irq_intc *parent_intc;
- struct s3c_irq_data *parent_irq_data;
- unsigned int irqno;
-
- /* attach controller pointer to irq_data */
- irq_data->intc = intc;
- irq_data->offset = hw;
-
- parent_intc = intc->parent;
-
- /* set handler and flags */
- switch (irq_data->type) {
- case S3C_IRQTYPE_NONE:
- return 0;
- case S3C_IRQTYPE_EINT:
- /* On the S3C2412, the EINT0to3 have a parent irq
- * but need the s3c_irq_eint0t4 chip
- */
- if (parent_intc && (!soc_is_s3c2412() || hw >= 4))
- irq_set_chip_and_handler(virq, &s3c_irqext_chip,
- handle_edge_irq);
- else
- irq_set_chip_and_handler(virq, &s3c_irq_eint0t4,
- handle_edge_irq);
- break;
- case S3C_IRQTYPE_EDGE:
- if (parent_intc || intc->reg_pending == S3C2416_SRCPND2)
- irq_set_chip_and_handler(virq, &s3c_irq_level_chip,
- handle_edge_irq);
- else
- irq_set_chip_and_handler(virq, &s3c_irq_chip,
- handle_edge_irq);
- break;
- case S3C_IRQTYPE_LEVEL:
- if (parent_intc)
- irq_set_chip_and_handler(virq, &s3c_irq_level_chip,
- handle_level_irq);
- else
- irq_set_chip_and_handler(virq, &s3c_irq_chip,
- handle_level_irq);
- break;
- default:
- pr_err("irq-s3c24xx: unsupported irqtype %d\n", irq_data->type);
- return -EINVAL;
- }
-
- irq_set_chip_data(virq, irq_data);
-
- if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) {
- if (irq_data->parent_irq > 31) {
- pr_err("irq-s3c24xx: parent irq %lu is out of range\n",
- irq_data->parent_irq);
- return -EINVAL;
- }
-
- parent_irq_data = &parent_intc->irqs[irq_data->parent_irq];
- parent_irq_data->sub_intc = intc;
- parent_irq_data->sub_bits |= (1UL << hw);
-
- /* attach the demuxer to the parent irq */
- irqno = irq_find_mapping(parent_intc->domain,
- irq_data->parent_irq);
- if (!irqno) {
- pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n",
- irq_data->parent_irq);
- return -EINVAL;
- }
- irq_set_chained_handler(irqno, s3c_irq_demux);
- }
-
- return 0;
-}
-
-static const struct irq_domain_ops s3c24xx_irq_ops = {
- .map = s3c24xx_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static void s3c24xx_clear_intc(struct s3c_irq_intc *intc)
-{
- void __iomem *reg_source;
- unsigned long pend;
- unsigned long last;
- int i;
-
- /* if intpnd is set, read the next pending irq from there */
- reg_source = intc->reg_intpnd ? intc->reg_intpnd : intc->reg_pending;
-
- last = 0;
- for (i = 0; i < 4; i++) {
- pend = readl_relaxed(reg_source);
-
- if (pend == 0 || pend == last)
- break;
-
- writel_relaxed(pend, intc->reg_pending);
- if (intc->reg_intpnd)
- writel_relaxed(pend, intc->reg_intpnd);
-
- pr_info("irq: clearing pending status %08x\n", (int)pend);
- last = pend;
- }
-}
-
-static struct s3c_irq_intc * __init s3c24xx_init_intc(struct device_node *np,
- struct s3c_irq_data *irq_data,
- struct s3c_irq_intc *parent,
- unsigned long address)
-{
- struct s3c_irq_intc *intc;
- void __iomem *base = (void *)0xf6000000; /* static mapping */
- int irq_num;
- int irq_start;
- int ret;
-
- intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL);
- if (!intc)
- return ERR_PTR(-ENOMEM);
-
- intc->irqs = irq_data;
-
- if (parent)
- intc->parent = parent;
-
- /* select the correct data for the controller.
- * Need to hard code the irq num start and offset
- * to preserve the static mapping for now
- */
- switch (address) {
- case 0x4a000000:
- pr_debug("irq: found main intc\n");
- intc->reg_pending = base;
- intc->reg_mask = base + 0x08;
- intc->reg_intpnd = base + 0x10;
- irq_num = 32;
- irq_start = S3C2410_IRQ(0);
- break;
- case 0x4a000018:
- pr_debug("irq: found subintc\n");
- intc->reg_pending = base + 0x18;
- intc->reg_mask = base + 0x1c;
- irq_num = 29;
- irq_start = S3C2410_IRQSUB(0);
- break;
- case 0x4a000040:
- pr_debug("irq: found intc2\n");
- intc->reg_pending = base + 0x40;
- intc->reg_mask = base + 0x48;
- intc->reg_intpnd = base + 0x50;
- irq_num = 8;
- irq_start = S3C2416_IRQ(0);
- break;
- case 0x560000a4:
- pr_debug("irq: found eintc\n");
- base = (void *)0xfd000000;
-
- intc->reg_mask = base + 0xa4;
- intc->reg_pending = base + 0xa8;
- irq_num = 24;
- irq_start = S3C2410_IRQ(32);
- break;
- default:
- pr_err("irq: unsupported controller address\n");
- ret = -EINVAL;
- goto err;
- }
-
- /* now that all the data is complete, init the irq-domain */
- s3c24xx_clear_intc(intc);
- intc->domain = irq_domain_add_legacy(np, irq_num, irq_start,
- 0, &s3c24xx_irq_ops,
- intc);
- if (!intc->domain) {
- pr_err("irq: could not create irq-domain\n");
- ret = -EINVAL;
- goto err;
- }
-
- set_handle_irq(s3c24xx_handle_irq);
-
- return intc;
-
-err:
- kfree(intc);
- return ERR_PTR(ret);
-}
-
-static struct s3c_irq_data __maybe_unused init_eint[32] = {
- { .type = S3C_IRQTYPE_NONE, }, /* reserved */
- { .type = S3C_IRQTYPE_NONE, }, /* reserved */
- { .type = S3C_IRQTYPE_NONE, }, /* reserved */
- { .type = S3C_IRQTYPE_NONE, }, /* reserved */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */
-};
-
-#ifdef CONFIG_CPU_S3C2410
-static struct s3c_irq_data init_s3c2410base[32] = {
- { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
- { .type = S3C_IRQTYPE_NONE, }, /* reserved */
- { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
- { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
- { .type = S3C_IRQTYPE_EDGE, }, /* WDT */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* LCD */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SDI */
- { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
- { .type = S3C_IRQTYPE_NONE, }, /* reserved */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
- { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
-};
-
-static struct s3c_irq_data init_s3c2410subint[32] = {
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
-};
-
-void __init s3c2410_init_irq(void)
-{
-#ifdef CONFIG_FIQ
- init_FIQ(FIQ_START);
-#endif
-
- s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2410base[0], NULL,
- 0x4a000000);
- if (IS_ERR(s3c_intc[0])) {
- pr_err("irq: could not create main interrupt controller\n");
- return;
- }
-
- s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2410subint[0],
- s3c_intc[0], 0x4a000018);
- s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
-}
-#endif
-
-#ifdef CONFIG_CPU_S3C2412
-static struct s3c_irq_data init_s3c2412base[32] = {
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT0 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT1 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT2 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT3 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
- { .type = S3C_IRQTYPE_NONE, }, /* reserved */
- { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
- { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
- { .type = S3C_IRQTYPE_EDGE, }, /* WDT */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* LCD */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* SDI/CF */
- { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
- { .type = S3C_IRQTYPE_NONE, }, /* reserved */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
- { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
-};
-
-static struct s3c_irq_data init_s3c2412eint[32] = {
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 0 }, /* EINT0 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 1 }, /* EINT1 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 2 }, /* EINT2 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 3 }, /* EINT3 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT4 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT5 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT6 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 4 }, /* EINT7 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT8 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT9 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT10 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT11 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT12 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT13 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT14 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT15 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT16 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT17 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT18 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT19 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT20 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT21 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT22 */
- { .type = S3C_IRQTYPE_EINT, .parent_irq = 5 }, /* EINT23 */
-};
-
-static struct s3c_irq_data init_s3c2412subint[32] = {
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
- { .type = S3C_IRQTYPE_NONE, },
- { .type = S3C_IRQTYPE_NONE, },
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* SDI */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 21 }, /* CF */
-};
-
-void __init s3c2412_init_irq(void)
-{
- pr_info("S3C2412: IRQ Support\n");
-
-#ifdef CONFIG_FIQ
- init_FIQ(FIQ_START);
-#endif
-
- s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2412base[0], NULL,
- 0x4a000000);
- if (IS_ERR(s3c_intc[0])) {
- pr_err("irq: could not create main interrupt controller\n");
- return;
- }
-
- s3c24xx_init_intc(NULL, &init_s3c2412eint[0], s3c_intc[0], 0x560000a4);
- s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2412subint[0],
- s3c_intc[0], 0x4a000018);
-}
-#endif
-
-#ifdef CONFIG_CPU_S3C2416
-static struct s3c_irq_data init_s3c2416base[32] = {
- { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
- { .type = S3C_IRQTYPE_NONE, }, /* reserved */
- { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
- { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
- { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* LCD */
- { .type = S3C_IRQTYPE_LEVEL, }, /* DMA */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */
- { .type = S3C_IRQTYPE_NONE, }, /* reserved */
- { .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* NAND */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
- { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
- { .type = S3C_IRQTYPE_NONE, },
- { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
-};
-
-static struct s3c_irq_data init_s3c2416subint[32] = {
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
- { .type = S3C_IRQTYPE_NONE }, /* reserved */
- { .type = S3C_IRQTYPE_NONE }, /* reserved */
- { .type = S3C_IRQTYPE_NONE }, /* reserved */
- { .type = S3C_IRQTYPE_NONE }, /* reserved */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */
-};
-
-static struct s3c_irq_data init_s3c2416_second[32] = {
- { .type = S3C_IRQTYPE_EDGE }, /* 2D */
- { .type = S3C_IRQTYPE_NONE }, /* reserved */
- { .type = S3C_IRQTYPE_NONE }, /* reserved */
- { .type = S3C_IRQTYPE_NONE }, /* reserved */
- { .type = S3C_IRQTYPE_EDGE }, /* PCM0 */
- { .type = S3C_IRQTYPE_NONE }, /* reserved */
- { .type = S3C_IRQTYPE_EDGE }, /* I2S0 */
-};
-
-void __init s3c2416_init_irq(void)
-{
- pr_info("S3C2416: IRQ Support\n");
-
-#ifdef CONFIG_FIQ
- init_FIQ(FIQ_START);
-#endif
-
- s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2416base[0], NULL,
- 0x4a000000);
- if (IS_ERR(s3c_intc[0])) {
- pr_err("irq: could not create main interrupt controller\n");
- return;
- }
-
- s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
- s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2416subint[0],
- s3c_intc[0], 0x4a000018);
-
- s3c_intc[2] = s3c24xx_init_intc(NULL, &init_s3c2416_second[0],
- NULL, 0x4a000040);
-}
-
-#endif
-
-#ifdef CONFIG_CPU_S3C2440
-static struct s3c_irq_data init_s3c2440base[32] = {
- { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */
- { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
- { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
- { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* LCD */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SDI */
- { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
- { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
-};
-
-static struct s3c_irq_data init_s3c2440subint[32] = {
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */
-};
-
-void __init s3c2440_init_irq(void)
-{
- pr_info("S3C2440: IRQ Support\n");
-
-#ifdef CONFIG_FIQ
- init_FIQ(FIQ_START);
-#endif
-
- s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2440base[0], NULL,
- 0x4a000000);
- if (IS_ERR(s3c_intc[0])) {
- pr_err("irq: could not create main interrupt controller\n");
- return;
- }
-
- s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
- s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2440subint[0],
- s3c_intc[0], 0x4a000018);
-}
-#endif
-
-#ifdef CONFIG_CPU_S3C2442
-static struct s3c_irq_data init_s3c2442base[32] = {
- { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */
- { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
- { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
- { .type = S3C_IRQTYPE_EDGE, }, /* WDT */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* LCD */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* DMA3 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SDI */
- { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* NFCON */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
- { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
-};
-
-static struct s3c_irq_data init_s3c2442subint[32] = {
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */
-};
-
-void __init s3c2442_init_irq(void)
-{
- pr_info("S3C2442: IRQ Support\n");
-
-#ifdef CONFIG_FIQ
- init_FIQ(FIQ_START);
-#endif
-
- s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2442base[0], NULL,
- 0x4a000000);
- if (IS_ERR(s3c_intc[0])) {
- pr_err("irq: could not create main interrupt controller\n");
- return;
- }
-
- s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
- s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2442subint[0],
- s3c_intc[0], 0x4a000018);
-}
-#endif
-
-#ifdef CONFIG_CPU_S3C2443
-static struct s3c_irq_data init_s3c2443base[32] = {
- { .type = S3C_IRQTYPE_EINT, }, /* EINT0 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT1 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT2 */
- { .type = S3C_IRQTYPE_EINT, }, /* EINT3 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT4to7 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* EINT8to23 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* CAM */
- { .type = S3C_IRQTYPE_EDGE, }, /* nBATT_FLT */
- { .type = S3C_IRQTYPE_EDGE, }, /* TICK */
- { .type = S3C_IRQTYPE_LEVEL, }, /* WDT/AC97 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER2 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER3 */
- { .type = S3C_IRQTYPE_EDGE, }, /* TIMER4 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART2 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* LCD */
- { .type = S3C_IRQTYPE_LEVEL, }, /* DMA */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART3 */
- { .type = S3C_IRQTYPE_EDGE, }, /* CFON */
- { .type = S3C_IRQTYPE_EDGE, }, /* SDI1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SDI0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SPI0 */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* NAND */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBD */
- { .type = S3C_IRQTYPE_EDGE, }, /* USBH */
- { .type = S3C_IRQTYPE_EDGE, }, /* IIC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* UART0 */
- { .type = S3C_IRQTYPE_EDGE, }, /* SPI1 */
- { .type = S3C_IRQTYPE_EDGE, }, /* RTC */
- { .type = S3C_IRQTYPE_LEVEL, }, /* ADCPARENT */
-};
-
-
-static struct s3c_irq_data init_s3c2443subint[32] = {
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 28 }, /* UART0-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 23 }, /* UART1-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 15 }, /* UART2-ERR */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* TC */
- { .type = S3C_IRQTYPE_EDGE, .parent_irq = 31 }, /* ADC */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_C */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 6 }, /* CAM_P */
- { .type = S3C_IRQTYPE_NONE }, /* reserved */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD1 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD2 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD3 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 16 }, /* LCD4 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA0 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA1 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA2 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA3 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA4 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 17 }, /* DMA5 */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-RX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-TX */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 18 }, /* UART3-ERR */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* WDT */
- { .type = S3C_IRQTYPE_LEVEL, .parent_irq = 9 }, /* AC97 */
-};
-
-void __init s3c2443_init_irq(void)
-{
- pr_info("S3C2443: IRQ Support\n");
-
-#ifdef CONFIG_FIQ
- init_FIQ(FIQ_START);
-#endif
-
- s3c_intc[0] = s3c24xx_init_intc(NULL, &init_s3c2443base[0], NULL,
- 0x4a000000);
- if (IS_ERR(s3c_intc[0])) {
- pr_err("irq: could not create main interrupt controller\n");
- return;
- }
-
- s3c24xx_init_intc(NULL, &init_eint[0], s3c_intc[0], 0x560000a4);
- s3c_intc[1] = s3c24xx_init_intc(NULL, &init_s3c2443subint[0],
- s3c_intc[0], 0x4a000018);
-}
-#endif
-
-#ifdef CONFIG_OF
-static int s3c24xx_irq_map_of(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- unsigned int ctrl_num = hw / 32;
- unsigned int intc_hw = hw % 32;
- struct s3c_irq_intc *intc = s3c_intc[ctrl_num];
- struct s3c_irq_intc *parent_intc = intc->parent;
- struct s3c_irq_data *irq_data = &intc->irqs[intc_hw];
-
- /* attach controller pointer to irq_data */
- irq_data->intc = intc;
- irq_data->offset = intc_hw;
-
- if (!parent_intc)
- irq_set_chip_and_handler(virq, &s3c_irq_chip, handle_edge_irq);
- else
- irq_set_chip_and_handler(virq, &s3c_irq_level_chip,
- handle_edge_irq);
-
- irq_set_chip_data(virq, irq_data);
-
- return 0;
-}
-
-/* Translate our of irq notation
- * format: <ctrl_num ctrl_irq parent_irq type>
- */
-static int s3c24xx_irq_xlate_of(struct irq_domain *d, struct device_node *n,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type)
-{
- struct s3c_irq_intc *intc;
- struct s3c_irq_intc *parent_intc;
- struct s3c_irq_data *irq_data;
- struct s3c_irq_data *parent_irq_data;
- int irqno;
-
- if (WARN_ON(intsize < 4))
- return -EINVAL;
-
- if (intspec[0] > 2 || !s3c_intc[intspec[0]]) {
- pr_err("controller number %d invalid\n", intspec[0]);
- return -EINVAL;
- }
- intc = s3c_intc[intspec[0]];
-
- *out_hwirq = intspec[0] * 32 + intspec[2];
- *out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
-
- parent_intc = intc->parent;
- if (parent_intc) {
- irq_data = &intc->irqs[intspec[2]];
- irq_data->parent_irq = intspec[1];
- parent_irq_data = &parent_intc->irqs[irq_data->parent_irq];
- parent_irq_data->sub_intc = intc;
- parent_irq_data->sub_bits |= (1UL << intspec[2]);
-
- /* parent_intc is always s3c_intc[0], so no offset */
- irqno = irq_create_mapping(parent_intc->domain, intspec[1]);
- if (irqno < 0) {
- pr_err("irq: could not map parent interrupt\n");
- return irqno;
- }
-
- irq_set_chained_handler(irqno, s3c_irq_demux);
- }
-
- return 0;
-}
-
-static const struct irq_domain_ops s3c24xx_irq_ops_of = {
- .map = s3c24xx_irq_map_of,
- .xlate = s3c24xx_irq_xlate_of,
-};
-
-struct s3c24xx_irq_of_ctrl {
- char *name;
- unsigned long offset;
- struct s3c_irq_intc **handle;
- struct s3c_irq_intc **parent;
- struct irq_domain_ops *ops;
-};
-
-static int __init s3c_init_intc_of(struct device_node *np,
- struct device_node *interrupt_parent,
- struct s3c24xx_irq_of_ctrl *s3c_ctrl, int num_ctrl)
-{
- struct s3c_irq_intc *intc;
- struct s3c24xx_irq_of_ctrl *ctrl;
- struct irq_domain *domain;
- void __iomem *reg_base;
- int i;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- pr_err("irq-s3c24xx: could not map irq registers\n");
- return -EINVAL;
- }
-
- domain = irq_domain_add_linear(np, num_ctrl * 32,
- &s3c24xx_irq_ops_of, NULL);
- if (!domain) {
- pr_err("irq: could not create irq-domain\n");
- return -EINVAL;
- }
-
- for (i = 0; i < num_ctrl; i++) {
- ctrl = &s3c_ctrl[i];
-
- pr_debug("irq: found controller %s\n", ctrl->name);
-
- intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL);
- if (!intc)
- return -ENOMEM;
-
- intc->domain = domain;
- intc->irqs = kcalloc(32, sizeof(struct s3c_irq_data),
- GFP_KERNEL);
- if (!intc->irqs) {
- kfree(intc);
- return -ENOMEM;
- }
-
- if (ctrl->parent) {
- intc->reg_pending = reg_base + ctrl->offset;
- intc->reg_mask = reg_base + ctrl->offset + 0x4;
-
- if (*(ctrl->parent)) {
- intc->parent = *(ctrl->parent);
- } else {
- pr_warn("irq: parent of %s missing\n",
- ctrl->name);
- kfree(intc->irqs);
- kfree(intc);
- continue;
- }
- } else {
- intc->reg_pending = reg_base + ctrl->offset;
- intc->reg_mask = reg_base + ctrl->offset + 0x08;
- intc->reg_intpnd = reg_base + ctrl->offset + 0x10;
- }
-
- s3c24xx_clear_intc(intc);
- s3c_intc[i] = intc;
- }
-
- set_handle_irq(s3c24xx_handle_irq);
-
- return 0;
-}
-
-static struct s3c24xx_irq_of_ctrl s3c2410_ctrl[] = {
- {
- .name = "intc",
- .offset = 0,
- }, {
- .name = "subintc",
- .offset = 0x18,
- .parent = &s3c_intc[0],
- }
-};
-
-int __init s3c2410_init_intc_of(struct device_node *np,
- struct device_node *interrupt_parent)
-{
- return s3c_init_intc_of(np, interrupt_parent,
- s3c2410_ctrl, ARRAY_SIZE(s3c2410_ctrl));
-}
-IRQCHIP_DECLARE(s3c2410_irq, "samsung,s3c2410-irq", s3c2410_init_intc_of);
-
-static struct s3c24xx_irq_of_ctrl s3c2416_ctrl[] = {
- {
- .name = "intc",
- .offset = 0,
- }, {
- .name = "subintc",
- .offset = 0x18,
- .parent = &s3c_intc[0],
- }, {
- .name = "intc2",
- .offset = 0x40,
- }
-};
-
-int __init s3c2416_init_intc_of(struct device_node *np,
- struct device_node *interrupt_parent)
-{
- return s3c_init_intc_of(np, interrupt_parent,
- s3c2416_ctrl, ARRAY_SIZE(s3c2416_ctrl));
-}
-IRQCHIP_DECLARE(s3c2416_irq, "samsung,s3c2416-irq", s3c2416_init_intc_of);
-#endif
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index eaa3e9fe54e9..6f432d2a5ceb 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -99,7 +99,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
struct irq_data *d, int enable)
{
int cpu;
- struct plic_priv *priv = irq_get_chip_data(d->irq);
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
for_each_cpu(cpu, mask) {
@@ -115,7 +115,7 @@ static void plic_irq_unmask(struct irq_data *d)
{
struct cpumask amask;
unsigned int cpu;
- struct plic_priv *priv = irq_get_chip_data(d->irq);
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
cpumask_and(&amask, &priv->lmask, cpu_online_mask);
cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
@@ -127,7 +127,7 @@ static void plic_irq_unmask(struct irq_data *d)
static void plic_irq_mask(struct irq_data *d)
{
- struct plic_priv *priv = irq_get_chip_data(d->irq);
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
plic_irq_toggle(&priv->lmask, d, 0);
}
@@ -138,7 +138,7 @@ static int plic_set_affinity(struct irq_data *d,
{
unsigned int cpu;
struct cpumask amask;
- struct plic_priv *priv = irq_get_chip_data(d->irq);
+ struct plic_priv *priv = irq_data_get_irq_chip_data(d);
cpumask_and(&amask, &priv->lmask, mask_val);
@@ -151,7 +151,7 @@ static int plic_set_affinity(struct irq_data *d,
return -EINVAL;
plic_irq_toggle(&priv->lmask, d, 0);
- plic_irq_toggle(cpumask_of(cpu), d, 1);
+ plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d));
irq_data_update_effective_affinity(d, cpumask_of(cpu));
diff --git a/drivers/irqchip/irq-sl28cpld.c b/drivers/irqchip/irq-sl28cpld.c
new file mode 100644
index 000000000000..0aa50d025ef6
--- /dev/null
+++ b/drivers/irqchip/irq-sl28cpld.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sl28cpld interrupt controller driver
+ *
+ * Copyright 2020 Kontron Europe GmbH
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define INTC_IE 0x00
+#define INTC_IP 0x01
+
+static const struct regmap_irq sl28cpld_irqs[] = {
+ REGMAP_IRQ_REG_LINE(0, 8),
+ REGMAP_IRQ_REG_LINE(1, 8),
+ REGMAP_IRQ_REG_LINE(2, 8),
+ REGMAP_IRQ_REG_LINE(3, 8),
+ REGMAP_IRQ_REG_LINE(4, 8),
+ REGMAP_IRQ_REG_LINE(5, 8),
+ REGMAP_IRQ_REG_LINE(6, 8),
+ REGMAP_IRQ_REG_LINE(7, 8),
+};
+
+struct sl28cpld_intc {
+ struct regmap *regmap;
+ struct regmap_irq_chip chip;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+static int sl28cpld_intc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sl28cpld_intc *irqchip;
+ int irq;
+ u32 base;
+ int ret;
+
+ if (!dev->parent)
+ return -ENODEV;
+
+ irqchip = devm_kzalloc(dev, sizeof(*irqchip), GFP_KERNEL);
+ if (!irqchip)
+ return -ENOMEM;
+
+ irqchip->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!irqchip->regmap)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = device_property_read_u32(&pdev->dev, "reg", &base);
+ if (ret)
+ return -EINVAL;
+
+ irqchip->chip.name = "sl28cpld-intc";
+ irqchip->chip.irqs = sl28cpld_irqs;
+ irqchip->chip.num_irqs = ARRAY_SIZE(sl28cpld_irqs);
+ irqchip->chip.num_regs = 1;
+ irqchip->chip.status_base = base + INTC_IP;
+ irqchip->chip.mask_base = base + INTC_IE;
+ irqchip->chip.mask_invert = true,
+ irqchip->chip.ack_base = base + INTC_IP;
+
+ return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
+ irqchip->regmap, irq,
+ IRQF_SHARED | IRQF_ONESHOT, 0,
+ &irqchip->chip,
+ &irqchip->irq_data);
+}
+
+static const struct of_device_id sl28cpld_intc_of_match[] = {
+ { .compatible = "kontron,sl28cpld-intc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sl28cpld_intc_of_match);
+
+static struct platform_driver sl28cpld_intc_driver = {
+ .probe = sl28cpld_intc_probe,
+ .driver = {
+ .name = "sl28cpld-intc",
+ .of_match_table = sl28cpld_intc_of_match,
+ }
+};
+module_platform_driver(sl28cpld_intc_driver);
+
+MODULE_DESCRIPTION("sl28cpld Interrupt Controller Driver");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 0c2c61db26b4..8662d7b7b262 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -195,6 +195,10 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
{ .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct },
{ .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct },
{ .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 48, .irq_parent = 138, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 50, .irq_parent = 139, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 52, .irq_parent = 140, .chip = &stm32_exti_h_chip_direct },
+ { .exti = 53, .irq_parent = 141, .chip = &stm32_exti_h_chip_direct },
{ .exti = 54, .irq_parent = 135, .chip = &stm32_exti_h_chip_direct },
{ .exti = 61, .irq_parent = 100, .chip = &stm32_exti_h_chip_direct },
{ .exti = 65, .irq_parent = 144, .chip = &stm32_exti_h_chip },
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index d4e97605456b..b2ab8db439d9 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -85,6 +85,17 @@ struct ti_sci_inta_vint_desc {
* @base: Base address of the memory mapped IO registers
* @pdev: Pointer to platform device.
* @ti_sci_id: TI-SCI device identifier
+ * @unmapped_cnt: Number of @unmapped_dev_ids entries
+ * @unmapped_dev_ids: Pointer to an array of TI-SCI device identifiers of
+ * unmapped event sources.
+ * Unmapped Events are not part of the Global Event Map and
+ * they are converted to Global event within INTA to be
+ * received by the same INTA to generate an interrupt.
+ * In case an interrupt request comes for a device which is
+ * generating Unmapped Event, we must use the INTA's TI-SCI
+ * device identifier in place of the source device
+ * identifier to let sysfw know where it has to program the
+ * Global Event number.
*/
struct ti_sci_inta_irq_domain {
const struct ti_sci_handle *sci;
@@ -96,11 +107,37 @@ struct ti_sci_inta_irq_domain {
void __iomem *base;
struct platform_device *pdev;
u32 ti_sci_id;
+
+ int unmapped_cnt;
+ u16 *unmapped_dev_ids;
};
#define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \
events[i])
+static u16 ti_sci_inta_get_dev_id(struct ti_sci_inta_irq_domain *inta, u32 hwirq)
+{
+ u16 dev_id = HWIRQ_TO_DEVID(hwirq);
+ int i;
+
+ if (inta->unmapped_cnt == 0)
+ return dev_id;
+
+ /*
+ * For devices sending Unmapped Events we must use the INTA's TI-SCI
+ * device identifier number to be able to convert it to a Global Event
+ * and map it to an interrupt.
+ */
+ for (i = 0; i < inta->unmapped_cnt; i++) {
+ if (dev_id == inta->unmapped_dev_ids[i]) {
+ dev_id = inta->ti_sci_id;
+ break;
+ }
+ }
+
+ return dev_id;
+}
+
/**
* ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs
* @desc: Pointer to irq_desc corresponding to the irq
@@ -175,8 +212,8 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
struct irq_fwspec parent_fwspec;
struct device_node *parent_node;
unsigned int parent_virq;
- u16 vint_id, p_hwirq;
- int ret;
+ int p_hwirq, ret;
+ u16 vint_id;
vint_id = ti_sci_get_free_resource(inta->vint);
if (vint_id == TI_SCI_RESOURCE_NULL)
@@ -251,7 +288,7 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta
u16 dev_id, dev_index;
int err;
- dev_id = HWIRQ_TO_DEVID(hwirq);
+ dev_id = ti_sci_inta_get_dev_id(inta, hwirq);
dev_index = HWIRQ_TO_IRQID(hwirq);
event_desc = &vint_desc->events[free_bit];
@@ -352,14 +389,15 @@ static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc,
{
struct ti_sci_inta_vint_desc *vint_desc;
struct ti_sci_inta_irq_domain *inta;
+ u16 dev_id;
vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
inta = vint_desc->domain->host_data;
+ dev_id = ti_sci_inta_get_dev_id(inta, hwirq);
/* free event irq */
mutex_lock(&inta->vint_mutex);
inta->sci->ops.rm_irq_ops.free_event_map(inta->sci,
- HWIRQ_TO_DEVID(hwirq),
- HWIRQ_TO_IRQID(hwirq),
+ dev_id, HWIRQ_TO_IRQID(hwirq),
inta->ti_sci_id,
vint_desc->vint_id,
event_desc->global_event,
@@ -574,6 +612,41 @@ static struct msi_domain_info ti_sci_inta_msi_domain_info = {
.chip = &ti_sci_inta_msi_irq_chip,
};
+static int ti_sci_inta_get_unmapped_sources(struct ti_sci_inta_irq_domain *inta)
+{
+ struct device *dev = &inta->pdev->dev;
+ struct device_node *node = dev_of_node(dev);
+ struct of_phandle_iterator it;
+ int count, err, ret, i;
+
+ count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL);
+ if (count <= 0)
+ return 0;
+
+ inta->unmapped_dev_ids = devm_kcalloc(dev, count,
+ sizeof(*inta->unmapped_dev_ids),
+ GFP_KERNEL);
+ if (!inta->unmapped_dev_ids)
+ return -ENOMEM;
+
+ i = 0;
+ of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) {
+ u32 dev_id;
+
+ ret = of_property_read_u32(it.node, "ti,sci-dev-id", &dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read failure for %pOFf\n", it.node);
+ of_node_put(it.node);
+ return ret;
+ }
+ inta->unmapped_dev_ids[i++] = dev_id;
+ }
+
+ inta->unmapped_cnt = count;
+
+ return 0;
+}
+
static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
{
struct irq_domain *parent_domain, *domain, *msi_domain;
@@ -600,13 +673,9 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
inta->pdev = pdev;
inta->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
- if (IS_ERR(inta->sci)) {
- ret = PTR_ERR(inta->sci);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "ti,sci read fail %d\n", ret);
- inta->sci = NULL;
- return ret;
- }
+ if (IS_ERR(inta->sci))
+ return dev_err_probe(dev, PTR_ERR(inta->sci),
+ "ti,sci read fail\n");
ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &inta->ti_sci_id);
if (ret) {
@@ -633,6 +702,10 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
if (IS_ERR(inta->base))
return PTR_ERR(inta->base);
+ ret = ti_sci_inta_get_unmapped_sources(inta);
+ if (ret)
+ return ret;
+
domain = irq_domain_add_linear(dev_of_node(dev),
ti_sci_get_num_resources(inta->vint),
&ti_sci_inta_irq_domain_ops, inta);
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index cbc1758228d9..ac9d6d658e65 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -137,8 +137,8 @@ static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain,
struct ti_sci_intr_irq_domain *intr = domain->host_data;
struct device_node *parent_node;
struct irq_fwspec fwspec;
- u16 out_irq, p_hwirq;
- int err = 0;
+ int p_hwirq, err = 0;
+ u16 out_irq;
out_irq = ti_sci_get_free_resource(intr->out_irqs);
if (out_irq == TI_SCI_RESOURCE_NULL)
@@ -254,13 +254,9 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
}
intr->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
- if (IS_ERR(intr->sci)) {
- ret = PTR_ERR(intr->sci);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "ti,sci read fail %d\n", ret);
- intr->sci = NULL;
- return ret;
- }
+ if (IS_ERR(intr->sci))
+ return dev_err_probe(dev, PTR_ERR(intr->sci),
+ "ti,sci read fail\n");
ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dev-id",
&intr->ti_sci_id);
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index d2341153e181..3570f0a588c4 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -22,7 +22,7 @@
* special section.
*/
static const struct of_device_id
-irqchip_of_match_end __used __section(__irqchip_of_table_end);
+irqchip_of_match_end __used __section("__irqchip_of_table_end");
extern struct of_device_id __irqchip_of_table[];
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 6ae9e1f0819d..bd39e9de6ecf 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -205,7 +205,8 @@ static struct irq_chip qcom_pdc_gic_chip = {
.irq_set_type = qcom_pdc_gic_set_type,
.flags = IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_SET_TYPE_MASKED |
- IRQCHIP_SKIP_SET_WAKE,
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND,
.irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
};
@@ -340,7 +341,8 @@ static const struct irq_domain_ops qcom_pdc_gpio_ops = {
static int pdc_setup_pin_mapping(struct device_node *np)
{
- int ret, n;
+ int ret, n, i;
+ u32 irq_index, reg_index, val;
n = of_property_count_elems_of_size(np, "qcom,pdc-ranges", sizeof(u32));
if (n <= 0 || n % 3)
@@ -369,6 +371,14 @@ static int pdc_setup_pin_mapping(struct device_node *np)
&pdc_region[n].cnt);
if (ret)
return ret;
+
+ for (i = 0; i < pdc_region[n].cnt; i++) {
+ reg_index = (i + pdc_region[n].pin_base) >> 5;
+ irq_index = (i + pdc_region[n].pin_base) & 0x1f;
+ val = pdc_reg_read(IRQ_ENABLE_BANK, reg_index);
+ val &= ~BIT(irq_index);
+ pdc_reg_write(IRQ_ENABLE_BANK, reg_index, val);
+ }
}
return 0;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 1c181df24eae..849d3c5f908e 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -274,7 +274,7 @@ config LEDS_MT6323
config LEDS_S3C24XX
tristate "LED Support for Samsung S3C24XX GPIO LEDs"
depends on LEDS_CLASS
- depends on ARCH_S3C24XX
+ depends on ARCH_S3C24XX || COMPILE_TEST
help
This option enables support for LEDs connected to GPIO lines
on Samsung S3C24XX series CPUs, such as the S3C2410 and S3C2440.
@@ -304,13 +304,13 @@ config LEDS_WRAP
config LEDS_COBALT_QUBE
tristate "LED Support for the Cobalt Qube series front LED"
depends on LEDS_CLASS
- depends on MIPS_COBALT
+ depends on MIPS_COBALT || COMPILE_TEST
help
This option enables support for the front LED on Cobalt Qube series
config LEDS_COBALT_RAQ
bool "LED Support for the Cobalt Raq series"
- depends on LEDS_CLASS=y && MIPS_COBALT
+ depends on LEDS_CLASS=y && (MIPS_COBALT || COMPILE_TEST)
select LEDS_TRIGGERS
help
This option enables support for the Cobalt Raq series LEDs.
@@ -395,8 +395,20 @@ config LEDS_LP3952
To compile this driver as a module, choose M here: the
module will be called leds-lp3952.
+config LEDS_LP50XX
+ tristate "LED Support for TI LP5036/30/24/18/12/9 LED driver chip"
+ depends on LEDS_CLASS && REGMAP_I2C
+ depends on LEDS_CLASS_MULTICOLOR || !LEDS_CLASS_MULTICOLOR
+ help
+ If you say yes here you get support for the Texas Instruments
+ LP5036, LP5030, LP5024, LP5018, LP5012 and LP5009 LED driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called leds-lp50xx.
+
config LEDS_LP55XX_COMMON
tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501"
+ depends on LEDS_CLASS
depends on LEDS_CLASS_MULTICOLOR || !LEDS_CLASS_MULTICOLOR
depends on OF
depends on I2C
@@ -632,7 +644,7 @@ config LEDS_MC13783
config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
depends on LEDS_CLASS
- depends on MACH_KIRKWOOD || MACH_ARMADA_370
+ depends on MACH_KIRKWOOD || MACH_ARMADA_370 || COMPILE_TEST
default y
help
This option enables support for the dual-GPIO LEDs found on the
@@ -646,7 +658,7 @@ config LEDS_NS2
config LEDS_NETXBIG
tristate "LED support for Big Network series LEDs"
depends on LEDS_CLASS
- depends on MACH_KIRKWOOD
+ depends on MACH_KIRKWOOD || COMPILE_TEST
depends on OF_GPIO
default y
help
@@ -893,7 +905,7 @@ config LEDS_TPS6105X
config LEDS_IP30
tristate "LED support for SGI Octane machines"
depends on LEDS_CLASS
- depends on SGI_MFD_IOC3
+ depends on SGI_MFD_IOC3 || COMPILE_TEST
help
This option enables support for the Red and White LEDs of
SGI Octane machines.
@@ -909,6 +921,13 @@ config LEDS_SGM3140
This option enables support for the SGM3140 500mA Buck/Boost Charge
Pump LED Driver.
+config LEDS_ACER_A500
+ tristate "Power button LED support for Acer Iconia Tab A500"
+ depends on LEDS_CLASS && MFD_ACER_A500_EC
+ help
+ This option enables support for the Power Button LED of
+ Acer Iconia Tab A500.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index c2c7d7ade0d0..73e603e1727e 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
# LED Platform Drivers (keep this sorted, M-| sort)
obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
+obj-$(CONFIG_LEDS_ACER_A500) += leds-acer-a500.o
obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o
obj-$(CONFIG_LEDS_APU) += leds-apu.o
@@ -49,6 +50,7 @@ obj-$(CONFIG_LEDS_LM3697) += leds-lm3697.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_LP3952) += leds-lp3952.o
+obj-$(CONFIG_LEDS_LP50XX) += leds-lp50xx.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_LP5562) += leds-lp5562.o
diff --git a/drivers/leds/TODO b/drivers/leds/TODO
new file mode 100644
index 000000000000..bfa60fa1d812
--- /dev/null
+++ b/drivers/leds/TODO
@@ -0,0 +1,75 @@
+-*- org -*-
+
+* On/off LEDs should have max_brightness of 1
+* Get rid of enum led_brightness
+
+It is really an integer, as maximum is configurable. Get rid of it, or
+make it into typedef or something.
+
+* Review atomicity requirements in LED subsystem
+
+Calls that may and that may not block are mixed in same structure, and
+semantics is sometimes non-intuitive. (For example blink callback may
+not sleep.) Review the requirements for any bugs and document them
+clearly.
+
+* LED names are still a mess
+
+No two LEDs have same name, so the names are probably unusable for the
+userland. Nudge authors into creating common LED names for common
+functionality.
+
+? Perhaps check for known LED names during boot, and warn if there are
+LEDs not on the list?
+
+* Split drivers into subdirectories
+
+The number of drivers is getting big, and driver for on/off LED on a
+i/o port is really quite different from camera flash LED, which is
+really different from driver for RGB color LED that can run its own
+microcode. Split the drivers somehow.
+
+* Figure out what to do with RGB leds
+
+Multicolor is a bit too abstract. Yes, we can have
+Green-Magenta-Ultraviolet LED, but so far all the LEDs we support are
+RGB, and not even RGB-White or RGB-Yellow variants emerged.
+
+Multicolor is not a good fit for RGB LED. It does not really know
+about LED color. In particular, there's no way to make LED "white".
+
+Userspace is interested in knowing "this LED can produce arbitrary
+color", which not all multicolor LEDs can.
+
+ Proposal: let's add "rgb" to led_colors in drivers/leds/led-core.c,
+ add corresponding device tree defines, and use that, instead of
+ multicolor for RGB LEDs.
+
+ We really need to do that now; "white" stuff can wait.
+
+RGB LEDs are quite common, and it would be good to be able to turn LED
+white and to turn it into any arbitrary color. It is essential that
+userspace is able to set arbitrary colors, and it might be good to
+have that ability from kernel, too... to allow full-color triggers.
+
+* Command line utility to manipulate the LEDs?
+
+/sys interface is not really suitable to use by hand, should we have
+an utility to perform LED control?
+
+In particular, LED names are still a mess (see above) and utility
+could help there by presenting both old and new names while we clean
+them up.
+
+In future, I'd like utility to accept both old and new names while we
+clean them up.
+
+It would be also nice to have useful listing mode -- name, type,
+current brightness/trigger...
+
+In future, it would be good to be able to set rgb led to particular
+color.
+
+And probably user-friendly interface to access LEDs for particular
+ethernet interface would be nice.
+
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index cc3929f858b6..131ca83f5fb3 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -354,6 +354,11 @@ int led_classdev_register_ext(struct device *parent,
ret = led_compose_name(parent, init_data, composed_name);
if (ret < 0)
return ret;
+
+ if (init_data->fwnode)
+ fwnode_property_read_string(init_data->fwnode,
+ "linux,default-trigger",
+ &led_cdev->default_trigger);
} else {
proposed_name = led_cdev->name;
}
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 465c3755cf2e..508d0d859f2e 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -118,14 +118,14 @@ static int pm860x_led_dt_init(struct platform_device *pdev,
struct device_node *nproot, *np;
int iset = 0;
- if (!pdev->dev.parent->of_node)
+ if (!dev_of_node(pdev->dev.parent))
return -ENODEV;
- nproot = of_get_child_by_name(pdev->dev.parent->of_node, "leds");
+ nproot = of_get_child_by_name(dev_of_node(pdev->dev.parent), "leds");
if (!nproot) {
dev_err(&pdev->dev, "failed to find leds node\n");
return -ENODEV;
}
- for_each_child_of_node(nproot, np) {
+ for_each_available_child_of_node(nproot, np) {
if (of_node_name_eq(np, data->name)) {
of_property_read_u32(np, "marvell,88pm860x-iset",
&iset);
diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c
index 5a0fe7b7b8bc..589484b22c79 100644
--- a/drivers/leds/leds-aat1290.c
+++ b/drivers/leds/leds-aat1290.c
@@ -248,7 +248,7 @@ static int aat1290_led_parse_dt(struct aat1290_led *led,
}
#endif
- child_node = of_get_next_available_child(dev->of_node, NULL);
+ child_node = of_get_next_available_child(dev_of_node(dev), NULL);
if (!child_node) {
dev_err(dev, "No DT child node found for connected LED.\n");
return -EINVAL;
diff --git a/drivers/leds/leds-acer-a500.c b/drivers/leds/leds-acer-a500.c
new file mode 100644
index 000000000000..8cf0b11f4390
--- /dev/null
+++ b/drivers/leds/leds-acer-a500.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define A500_EC_LED_DELAY_USEC (100 * 1000)
+
+enum {
+ REG_RESET_LEDS = 0x40,
+ REG_POWER_LED_ON = 0x42,
+ REG_CHARGE_LED_ON = 0x43,
+ REG_ANDROID_LEDS_OFF = 0x5a,
+};
+
+struct a500_led {
+ struct led_classdev cdev;
+ const struct reg_sequence *enable_seq;
+ struct a500_led *other;
+ struct regmap *rmap;
+};
+
+static const struct reg_sequence a500_ec_leds_reset_seq[] = {
+ REG_SEQ(REG_RESET_LEDS, 0x0, A500_EC_LED_DELAY_USEC),
+ REG_SEQ(REG_ANDROID_LEDS_OFF, 0x0, A500_EC_LED_DELAY_USEC),
+};
+
+static const struct reg_sequence a500_ec_white_led_enable_seq[] = {
+ REG_SEQ(REG_POWER_LED_ON, 0x0, A500_EC_LED_DELAY_USEC),
+};
+
+static const struct reg_sequence a500_ec_orange_led_enable_seq[] = {
+ REG_SEQ(REG_CHARGE_LED_ON, 0x0, A500_EC_LED_DELAY_USEC),
+};
+
+static int a500_ec_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct a500_led *led = container_of(led_cdev, struct a500_led, cdev);
+ struct reg_sequence control_seq[2];
+ unsigned int num_regs = 1;
+
+ if (value) {
+ control_seq[0] = led->enable_seq[0];
+ } else {
+ /*
+ * There is no separate controls which can disable LEDs
+ * individually, there is only RESET_LEDS command that turns
+ * off both LEDs.
+ *
+ * RESET_LEDS turns off both LEDs, thus restore other LED if
+ * it's turned ON.
+ */
+ if (led->other->cdev.brightness)
+ num_regs = 2;
+
+ control_seq[0] = a500_ec_leds_reset_seq[0];
+ control_seq[1] = led->other->enable_seq[0];
+ }
+
+ return regmap_multi_reg_write(led->rmap, control_seq, num_regs);
+}
+
+static int a500_ec_leds_probe(struct platform_device *pdev)
+{
+ struct a500_led *white_led, *orange_led;
+ struct regmap *rmap;
+ int err;
+
+ rmap = dev_get_regmap(pdev->dev.parent, "KB930");
+ if (!rmap)
+ return -EINVAL;
+
+ /* reset and turn off LEDs */
+ regmap_multi_reg_write(rmap, a500_ec_leds_reset_seq, 2);
+
+ white_led = devm_kzalloc(&pdev->dev, sizeof(*white_led), GFP_KERNEL);
+ if (!white_led)
+ return -ENOMEM;
+
+ white_led->cdev.name = "power:white";
+ white_led->cdev.brightness_set_blocking = a500_ec_led_brightness_set;
+ white_led->cdev.flags = LED_CORE_SUSPENDRESUME;
+ white_led->cdev.max_brightness = 1;
+ white_led->enable_seq = a500_ec_white_led_enable_seq;
+ white_led->rmap = rmap;
+
+ orange_led = devm_kzalloc(&pdev->dev, sizeof(*orange_led), GFP_KERNEL);
+ if (!orange_led)
+ return -ENOMEM;
+
+ orange_led->cdev.name = "power:orange";
+ orange_led->cdev.brightness_set_blocking = a500_ec_led_brightness_set;
+ orange_led->cdev.flags = LED_CORE_SUSPENDRESUME;
+ orange_led->cdev.max_brightness = 1;
+ orange_led->enable_seq = a500_ec_orange_led_enable_seq;
+ orange_led->rmap = rmap;
+
+ white_led->other = orange_led;
+ orange_led->other = white_led;
+
+ err = devm_led_classdev_register(&pdev->dev, &white_led->cdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register white LED\n");
+ return err;
+ }
+
+ err = devm_led_classdev_register(&pdev->dev, &orange_led->cdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register orange LED\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static struct platform_driver a500_ec_leds_driver = {
+ .driver = {
+ .name = "acer-a500-iconia-leds",
+ },
+ .probe = a500_ec_leds_probe,
+};
+module_platform_driver(a500_ec_leds_driver);
+
+MODULE_DESCRIPTION("LED driver for Acer Iconia Tab A500 Power Button");
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_ALIAS("platform:acer-a500-iconia-leds");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
index 82350a28a564..a0df1fb28774 100644
--- a/drivers/leds/leds-an30259a.c
+++ b/drivers/leds/leds-an30259a.c
@@ -202,13 +202,13 @@ error:
static int an30259a_dt_init(struct i2c_client *client,
struct an30259a *chip)
{
- struct device_node *np = client->dev.of_node, *child;
+ struct device_node *np = dev_of_node(&client->dev), *child;
int count, ret;
int i = 0;
const char *str;
struct an30259a_led *led;
- count = of_get_child_count(np);
+ count = of_get_available_child_count(np);
if (!count || count > AN30259A_MAX_LEDS)
return -EINVAL;
@@ -238,9 +238,6 @@ static int an30259a_dt_init(struct i2c_client *client,
led->default_state = STATE_OFF;
}
- of_property_read_string(child, "linux,default-trigger",
- &led->cdev.default_trigger);
-
i++;
}
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
index d709cc1f949e..80d937454aee 100644
--- a/drivers/leds/leds-aw2013.c
+++ b/drivers/leds/leds-aw2013.c
@@ -261,11 +261,11 @@ out:
static int aw2013_probe_dt(struct aw2013 *chip)
{
- struct device_node *np = chip->client->dev.of_node, *child;
+ struct device_node *np = dev_of_node(&chip->client->dev), *child;
int count, ret = 0, i = 0;
struct aw2013_led *led;
- count = of_get_child_count(np);
+ count = of_get_available_child_count(np);
if (!count || count > AW2013_MAX_LEDS)
return -EINVAL;
@@ -297,16 +297,15 @@ static int aw2013_probe_dt(struct aw2013 *chip)
"DT property led-max-microamp is missing\n");
}
- of_property_read_string(child, "linux,default-trigger",
- &led->cdev.default_trigger);
-
led->cdev.brightness_set_blocking = aw2013_brightness_set;
led->cdev.blink_set = aw2013_blink_set;
ret = devm_led_classdev_register_ext(&chip->client->dev,
&led->cdev, &init_data);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(child);
return ret;
+ }
i++;
}
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index bad7efb75112..226d17d253ed 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -328,6 +328,7 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
void __iomem *mem, spinlock_t *lock,
unsigned long *blink_leds, unsigned long *blink_delay)
{
+ struct led_init_data init_data = {};
struct bcm6328_led *led;
const char *state;
int rc;
@@ -345,11 +346,6 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
if (of_property_read_bool(nc, "active-low"))
led->active_low = true;
- led->cdev.name = of_get_property(nc, "label", NULL) ? : nc->name;
- led->cdev.default_trigger = of_get_property(nc,
- "linux,default-trigger",
- NULL);
-
if (!of_property_read_string(nc, "default-state", &state)) {
if (!strcmp(state, "on")) {
led->cdev.brightness = LED_FULL;
@@ -382,8 +378,9 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
led->cdev.brightness_set = bcm6328_led_set;
led->cdev.blink_set = bcm6328_blink_set;
+ init_data.fwnode = of_fwnode_handle(nc);
- rc = led_classdev_register(dev, &led->cdev);
+ rc = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
if (rc < 0)
return rc;
@@ -395,7 +392,7 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
static int bcm6328_leds_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev_of_node(&pdev->dev);
struct device_node *child;
void __iomem *mem;
spinlock_t *lock; /* memory lock */
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
index 94fefd456ba0..9d2e487fa08a 100644
--- a/drivers/leds/leds-bcm6358.c
+++ b/drivers/leds/leds-bcm6358.c
@@ -94,6 +94,7 @@ static void bcm6358_led_set(struct led_classdev *led_cdev,
static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg,
void __iomem *mem, spinlock_t *lock)
{
+ struct led_init_data init_data = {};
struct bcm6358_led *led;
const char *state;
int rc;
@@ -109,11 +110,6 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg,
if (of_property_read_bool(nc, "active-low"))
led->active_low = true;
- led->cdev.name = of_get_property(nc, "label", NULL) ? : nc->name;
- led->cdev.default_trigger = of_get_property(nc,
- "linux,default-trigger",
- NULL);
-
if (!of_property_read_string(nc, "default-state", &state)) {
if (!strcmp(state, "on")) {
led->cdev.brightness = LED_FULL;
@@ -136,8 +132,9 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg,
bcm6358_led_set(&led->cdev, led->cdev.brightness);
led->cdev.brightness_set = bcm6358_led_set;
+ init_data.fwnode = of_fwnode_handle(nc);
- rc = led_classdev_register(dev, &led->cdev);
+ rc = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
if (rc < 0)
return rc;
@@ -149,7 +146,7 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg,
static int bcm6358_leds_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev_of_node(&pdev->dev);
struct device_node *child;
void __iomem *mem;
spinlock_t *lock; /* memory lock */
diff --git a/drivers/leds/leds-cpcap.c b/drivers/leds/leds-cpcap.c
index 9f3fa4737213..7d41ce8c9bb1 100644
--- a/drivers/leds/leds-cpcap.c
+++ b/drivers/leds/leds-cpcap.c
@@ -158,19 +158,14 @@ MODULE_DEVICE_TABLE(of, cpcap_led_of_match);
static int cpcap_led_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct cpcap_led *led;
int err;
- match = of_match_device(of_match_ptr(cpcap_led_of_match), &pdev->dev);
- if (!match || !match->data)
- return -EINVAL;
-
led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
platform_set_drvdata(pdev, led);
- led->info = match->data;
+ led->info = device_get_match_data(&pdev->dev);
led->dev = &pdev->dev;
if (led->info->reg == 0x0000) {
diff --git a/drivers/leds/leds-cr0014114.c b/drivers/leds/leds-cr0014114.c
index 2da448ae718e..d03cfd3c0bfb 100644
--- a/drivers/leds/leds-cr0014114.c
+++ b/drivers/leds/leds-cr0014114.c
@@ -188,9 +188,6 @@ static int cr0014114_probe_dt(struct cr0014114 *priv)
device_for_each_child_node(priv->dev, child) {
led = &priv->leds[i];
- fwnode_property_read_string(child, "linux,default-trigger",
- &led->ldev.default_trigger);
-
led->priv = priv;
led->ldev.max_brightness = CR_MAX_BRIGHTNESS;
led->ldev.brightness_set_blocking = cr0014114_set_sync;
diff --git a/drivers/leds/leds-el15203000.c b/drivers/leds/leds-el15203000.c
index 298b13e4807a..6ca47f2a2004 100644
--- a/drivers/leds/leds-el15203000.c
+++ b/drivers/leds/leds-el15203000.c
@@ -263,9 +263,6 @@ static int el15203000_probe_dt(struct el15203000 *priv)
return -EINVAL;
}
- fwnode_property_read_string(child, "linux,default-trigger",
- &led->ldev.default_trigger);
-
led->priv = priv;
led->ldev.max_brightness = LED_ON;
led->ldev.brightness_set_blocking = el15203000_set_blocking;
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index cf84096d88ce..93f5b1b60fde 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -160,9 +160,6 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
led_dat->gpiod = led.gpiod;
- fwnode_property_read_string(child, "linux,default-trigger",
- &led.default_trigger);
-
if (!fwnode_property_read_string(child, "default-state",
&state)) {
if (!strcmp(state, "keep"))
diff --git a/drivers/leds/leds-ip30.c b/drivers/leds/leds-ip30.c
index d4ec7361c616..1f952bad0fe8 100644
--- a/drivers/leds/leds-ip30.c
+++ b/drivers/leds/leds-ip30.c
@@ -3,6 +3,7 @@
* LED Driver for SGI Octane machines
*/
+#include <asm/io.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index ca6634b8683c..4161b9dd7e48 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -16,6 +16,8 @@
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
/* register numbers */
#define IS31FL319X_SHUTDOWN 0x00
@@ -61,6 +63,7 @@
struct is31fl319x_chip {
const struct is31fl319x_chipdef *cdef;
struct i2c_client *client;
+ struct gpio_desc *shutdown_gpio;
struct regmap *regmap;
struct mutex lock;
u32 audio_gain_db;
@@ -199,26 +202,27 @@ static int is31fl319x_parse_child_dt(const struct device *dev,
static int is31fl319x_parse_dt(struct device *dev,
struct is31fl319x_chip *is31)
{
- struct device_node *np = dev->of_node, *child;
- const struct of_device_id *of_dev_id;
+ struct device_node *np = dev_of_node(dev), *child;
int count;
int ret;
if (!np)
return -ENODEV;
- of_dev_id = of_match_device(of_is31fl319x_match, dev);
- if (!of_dev_id) {
- dev_err(dev, "Failed to match device with supported chips\n");
- return -EINVAL;
+ is31->shutdown_gpio = devm_gpiod_get_optional(dev,
+ "shutdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(is31->shutdown_gpio)) {
+ ret = PTR_ERR(is31->shutdown_gpio);
+ dev_err(dev, "Failed to get shutdown gpio: %d\n", ret);
+ return ret;
}
- is31->cdef = of_dev_id->data;
+ is31->cdef = device_get_match_data(dev);
- count = of_get_child_count(np);
+ count = of_get_available_child_count(np);
- dev_dbg(dev, "probe %s with %d leds defined in DT\n",
- of_dev_id->compatible, count);
+ dev_dbg(dev, "probing with %d leds defined in DT\n", count);
if (!count || count > is31->cdef->num_leds) {
dev_err(dev, "Number of leds defined must be between 1 and %u\n",
@@ -226,7 +230,7 @@ static int is31fl319x_parse_dt(struct device *dev,
return -ENODEV;
}
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
struct is31fl319x_led *led;
u32 reg;
@@ -350,6 +354,12 @@ static int is31fl319x_probe(struct i2c_client *client,
if (err)
goto free_mutex;
+ if (is31->shutdown_gpio) {
+ gpiod_direction_output(is31->shutdown_gpio, 0);
+ mdelay(5);
+ gpiod_direction_output(is31->shutdown_gpio, 1);
+ }
+
is31->client = client;
is31->regmap = devm_regmap_init_i2c(client, &regmap_config);
if (IS_ERR(is31->regmap)) {
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index cd768f991da1..2180255ad339 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -332,9 +332,6 @@ static int is31fl32xx_parse_child_dt(const struct device *dev,
int ret = 0;
u32 reg;
- if (of_property_read_string(child, "label", &cdev->name))
- cdev->name = child->name;
-
ret = of_property_read_u32(child, "reg", &reg);
if (ret || reg < 1 || reg > led_data->priv->cdef->channels) {
dev_err(dev,
@@ -344,9 +341,6 @@ static int is31fl32xx_parse_child_dt(const struct device *dev,
}
led_data->channel = reg;
- of_property_read_string(child, "linux,default-trigger",
- &cdev->default_trigger);
-
cdev->brightness_set_blocking = is31fl32xx_brightness_set;
return 0;
@@ -372,7 +366,8 @@ static int is31fl32xx_parse_dt(struct device *dev,
struct device_node *child;
int ret = 0;
- for_each_child_of_node(dev->of_node, child) {
+ for_each_available_child_of_node(dev_of_node(dev), child) {
+ struct led_init_data init_data = {};
struct is31fl32xx_led_data *led_data =
&priv->leds[priv->num_leds];
const struct is31fl32xx_led_data *other_led_data;
@@ -388,17 +383,18 @@ static int is31fl32xx_parse_dt(struct device *dev,
led_data->channel);
if (other_led_data) {
dev_err(dev,
- "%s and %s both attempting to use channel %d\n",
- led_data->cdev.name,
- other_led_data->cdev.name,
- led_data->channel);
+ "Node %pOF 'reg' conflicts with another LED\n",
+ child);
goto err;
}
- ret = devm_led_classdev_register(dev, &led_data->cdev);
+ init_data.fwnode = of_fwnode_handle(child);
+
+ ret = devm_led_classdev_register_ext(dev, &led_data->cdev,
+ &init_data);
if (ret) {
- dev_err(dev, "failed to register PWM led for %s: %d\n",
- led_data->cdev.name, ret);
+ dev_err(dev, "Failed to register LED for %pOF: %d\n",
+ child, ret);
goto err;
}
@@ -428,19 +424,14 @@ static int is31fl32xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct is31fl32xx_chipdef *cdef;
- const struct of_device_id *of_dev_id;
struct device *dev = &client->dev;
struct is31fl32xx_priv *priv;
int count;
int ret = 0;
- of_dev_id = of_match_device(of_is31fl32xx_match, dev);
- if (!of_dev_id)
- return -EINVAL;
-
- cdef = of_dev_id->data;
+ cdef = device_get_match_data(dev);
- count = of_get_child_count(dev->of_node);
+ count = of_get_available_child_count(dev_of_node(dev));
if (!count)
return -EINVAL;
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
index 670efee9b131..632f10db4b3f 100644
--- a/drivers/leds/leds-ktd2692.c
+++ b/drivers/leds/leds-ktd2692.c
@@ -259,11 +259,11 @@ static void ktd2692_setup(struct ktd2692_context *led)
static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
struct ktd2692_led_config_data *cfg)
{
- struct device_node *np = dev->of_node;
+ struct device_node *np = dev_of_node(dev);
struct device_node *child_node;
int ret;
- if (!dev->of_node)
+ if (!dev_of_node(dev))
return -ENXIO;
led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index 946ad67eaecb..0bf25bdde02f 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -96,15 +96,15 @@
/*
* struct lm3532_als_data
- * @config - value of ALS configuration register
- * @als1_imp_sel - value of ALS1 resistor select register
- * @als2_imp_sel - value of ALS2 resistor select register
- * @als_avrg_time - ALS averaging time
- * @als_input_mode - ALS input mode for brightness control
- * @als_vmin - Minimum ALS voltage
- * @als_vmax - Maximum ALS voltage
- * @zone_lo - values of ALS lo ZB(Zone Boundary) registers
- * @zone_hi - values of ALS hi ZB(Zone Boundary) registers
+ * @config: value of ALS configuration register
+ * @als1_imp_sel: value of ALS1 resistor select register
+ * @als2_imp_sel: value of ALS2 resistor select register
+ * @als_avrg_time: ALS averaging time
+ * @als_input_mode: ALS input mode for brightness control
+ * @als_vmin: Minimum ALS voltage
+ * @als_vmax: Maximum ALS voltage
+ * @zone_lo: values of ALS lo ZB(Zone Boundary) registers
+ * @zone_hi: values of ALS hi ZB(Zone Boundary) registers
*/
struct lm3532_als_data {
u8 config;
@@ -121,15 +121,14 @@ struct lm3532_als_data {
/**
* struct lm3532_led
* @led_dev: led class device
- * @priv - Pointer the device data structure
- * @control_bank - Control bank the LED is associated to
- * @mode - Mode of the LED string
- * @ctrl_brt_pointer - Zone target register that controls the sink
- * @num_leds - Number of LED strings are supported in this array
- * @full_scale_current - The full-scale current setting for the current sink.
- * @led_strings - The LED strings supported in this array
- * @enabled - Enabled status
- * @label - LED label
+ * @priv: Pointer the device data structure
+ * @control_bank: Control bank the LED is associated to
+ * @mode: Mode of the LED string
+ * @ctrl_brt_pointer: Zone target register that controls the sink
+ * @num_leds: Number of LED strings are supported in this array
+ * @full_scale_current: The full-scale current setting for the current sink.
+ * @led_strings: The LED strings supported in this array
+ * @enabled: Enabled status
*/
struct lm3532_led {
struct led_classdev led_dev;
@@ -142,21 +141,20 @@ struct lm3532_led {
int full_scale_current;
unsigned int enabled:1;
u32 led_strings[LM3532_MAX_CONTROL_BANKS];
- char label[LED_MAX_NAME_SIZE];
};
/**
* struct lm3532_data
- * @enable_gpio - Hardware enable gpio
+ * @enable_gpio: Hardware enable gpio
* @regulator: regulator
* @client: i2c client
- * @regmap - Devices register map
- * @dev - Pointer to the devices device struct
- * @lock - Lock for reading/writing the device
- * @als_data - Pointer to the als data struct
- * @runtime_ramp_up - Runtime ramp up setting
- * @runtime_ramp_down - Runtime ramp down setting
- * @leds - Array of LED strings
+ * @regmap: Devices register map
+ * @dev: Pointer to the devices device struct
+ * @lock: Lock for reading/writing the device
+ * @als_data: Pointer to the als data struct
+ * @runtime_ramp_up: Runtime ramp up setting
+ * @runtime_ramp_down: Runtime ramp down setting
+ * @leds: Array of LED strings
*/
struct lm3532_data {
struct gpio_desc *enable_gpio;
@@ -548,7 +546,6 @@ static int lm3532_parse_node(struct lm3532_data *priv)
{
struct fwnode_handle *child = NULL;
struct lm3532_led *led;
- const char *name;
int control_bank;
u32 ramp_time;
size_t i = 0;
@@ -643,19 +640,7 @@ static int lm3532_parse_node(struct lm3532_data *priv)
goto child_out;
}
- fwnode_property_read_string(child, "linux,default-trigger",
- &led->led_dev.default_trigger);
-
- ret = fwnode_property_read_string(child, "label", &name);
- if (ret)
- snprintf(led->label, sizeof(led->label),
- "%s::", priv->client->name);
- else
- snprintf(led->label, sizeof(led->label),
- "%s:%s", priv->client->name, name);
-
led->priv = priv;
- led->led_dev.name = led->label;
led->led_dev.brightness_set_blocking = lm3532_brightness_set;
ret = devm_led_classdev_register_ext(priv->dev, &led->led_dev, &idata);
diff --git a/drivers/leds/leds-lm36274.c b/drivers/leds/leds-lm36274.c
index bfeee03a0053..aadb03468a40 100644
--- a/drivers/leds/leds-lm36274.c
+++ b/drivers/leds/leds-lm36274.c
@@ -26,8 +26,8 @@
* @lmu_data: Register and setting values for common code
* @regmap: Devices register map
* @dev: Pointer to the devices device struct
- * @led_sources - The LED strings supported in this array
- * @num_leds - Number of LED strings are supported in this array
+ * @led_sources: The LED strings supported in this array
+ * @num_leds: Number of LED strings are supported in this array
*/
struct lm36274 {
struct platform_device *pdev;
@@ -41,122 +41,113 @@ struct lm36274 {
};
static int lm36274_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness brt_val)
+ enum led_brightness brt_val)
{
- struct lm36274 *led = container_of(led_cdev, struct lm36274, led_dev);
+ struct lm36274 *chip = container_of(led_cdev, struct lm36274, led_dev);
- return ti_lmu_common_set_brightness(&led->lmu_data, brt_val);
+ return ti_lmu_common_set_brightness(&chip->lmu_data, brt_val);
}
-static int lm36274_init(struct lm36274 *lm36274_data)
+static int lm36274_init(struct lm36274 *chip)
{
int enable_val = 0;
int i;
- for (i = 0; i < lm36274_data->num_leds; i++)
- enable_val |= (1 << lm36274_data->led_sources[i]);
+ for (i = 0; i < chip->num_leds; i++)
+ enable_val |= (1 << chip->led_sources[i]);
if (!enable_val) {
- dev_err(lm36274_data->dev, "No LEDs were enabled\n");
+ dev_err(chip->dev, "No LEDs were enabled\n");
return -EINVAL;
}
enable_val |= LM36274_BL_EN;
- return regmap_write(lm36274_data->regmap, LM36274_REG_BL_EN,
- enable_val);
+ return regmap_write(chip->regmap, LM36274_REG_BL_EN, enable_val);
}
-static int lm36274_parse_dt(struct lm36274 *lm36274_data)
+static int lm36274_parse_dt(struct lm36274 *chip,
+ struct led_init_data *init_data)
{
- struct fwnode_handle *child = NULL;
- char label[LED_MAX_NAME_SIZE];
- struct device *dev = &lm36274_data->pdev->dev;
- const char *name;
- int child_cnt;
- int ret = -EINVAL;
+ struct device *dev = chip->dev;
+ struct fwnode_handle *child;
+ int ret;
/* There should only be 1 node */
- child_cnt = device_get_child_node_count(dev);
- if (child_cnt != 1)
+ if (device_get_child_node_count(dev) != 1)
return -EINVAL;
- device_for_each_child_node(dev, child) {
- ret = fwnode_property_read_string(child, "label", &name);
- if (ret)
- snprintf(label, sizeof(label),
- "%s::", lm36274_data->pdev->name);
- else
- snprintf(label, sizeof(label),
- "%s:%s", lm36274_data->pdev->name, name);
-
- lm36274_data->num_leds = fwnode_property_count_u32(child, "led-sources");
- if (lm36274_data->num_leds <= 0)
- return -ENODEV;
-
- ret = fwnode_property_read_u32_array(child, "led-sources",
- lm36274_data->led_sources,
- lm36274_data->num_leds);
- if (ret) {
- dev_err(dev, "led-sources property missing\n");
- return ret;
- }
-
- fwnode_property_read_string(child, "linux,default-trigger",
- &lm36274_data->led_dev.default_trigger);
+ child = device_get_next_child_node(dev, NULL);
- }
+ init_data->fwnode = child;
+ init_data->devicename = chip->pdev->name;
+ /* for backwards compatibility when `label` property is not present */
+ init_data->default_label = ":";
- lm36274_data->lmu_data.regmap = lm36274_data->regmap;
- lm36274_data->lmu_data.max_brightness = MAX_BRIGHTNESS_11BIT;
- lm36274_data->lmu_data.msb_brightness_reg = LM36274_REG_BRT_MSB;
- lm36274_data->lmu_data.lsb_brightness_reg = LM36274_REG_BRT_LSB;
+ chip->num_leds = fwnode_property_count_u32(child, "led-sources");
+ if (chip->num_leds <= 0) {
+ ret = -ENODEV;
+ goto err;
+ }
- lm36274_data->led_dev.name = label;
- lm36274_data->led_dev.max_brightness = MAX_BRIGHTNESS_11BIT;
- lm36274_data->led_dev.brightness_set_blocking = lm36274_brightness_set;
+ ret = fwnode_property_read_u32_array(child, "led-sources",
+ chip->led_sources, chip->num_leds);
+ if (ret) {
+ dev_err(dev, "led-sources property missing\n");
+ goto err;
+ }
return 0;
+err:
+ fwnode_handle_put(child);
+ return ret;
}
static int lm36274_probe(struct platform_device *pdev)
{
struct ti_lmu *lmu = dev_get_drvdata(pdev->dev.parent);
- struct lm36274 *lm36274_data;
+ struct led_init_data init_data = {};
+ struct lm36274 *chip;
int ret;
- lm36274_data = devm_kzalloc(&pdev->dev, sizeof(*lm36274_data),
- GFP_KERNEL);
- if (!lm36274_data)
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
return -ENOMEM;
- lm36274_data->pdev = pdev;
- lm36274_data->dev = lmu->dev;
- lm36274_data->regmap = lmu->regmap;
- platform_set_drvdata(pdev, lm36274_data);
+ chip->pdev = pdev;
+ chip->dev = &pdev->dev;
+ chip->regmap = lmu->regmap;
+ platform_set_drvdata(pdev, chip);
- ret = lm36274_parse_dt(lm36274_data);
+ ret = lm36274_parse_dt(chip, &init_data);
if (ret) {
- dev_err(lm36274_data->dev, "Failed to parse DT node\n");
+ dev_err(chip->dev, "Failed to parse DT node\n");
return ret;
}
- ret = lm36274_init(lm36274_data);
+ ret = lm36274_init(chip);
if (ret) {
- dev_err(lm36274_data->dev, "Failed to init the device\n");
+ dev_err(chip->dev, "Failed to init the device\n");
return ret;
}
- return led_classdev_register(lm36274_data->dev, &lm36274_data->led_dev);
-}
+ chip->lmu_data.regmap = chip->regmap;
+ chip->lmu_data.max_brightness = MAX_BRIGHTNESS_11BIT;
+ chip->lmu_data.msb_brightness_reg = LM36274_REG_BRT_MSB;
+ chip->lmu_data.lsb_brightness_reg = LM36274_REG_BRT_LSB;
-static int lm36274_remove(struct platform_device *pdev)
-{
- struct lm36274 *lm36274_data = platform_get_drvdata(pdev);
+ chip->led_dev.max_brightness = MAX_BRIGHTNESS_11BIT;
+ chip->led_dev.brightness_set_blocking = lm36274_brightness_set;
- led_classdev_unregister(&lm36274_data->led_dev);
+ ret = devm_led_classdev_register_ext(chip->dev, &chip->led_dev,
+ &init_data);
+ if (ret)
+ dev_err(chip->dev, "Failed to register LED for node %pfw\n",
+ init_data.fwnode);
- return 0;
+ fwnode_handle_put(init_data.fwnode);
+
+ return ret;
}
static const struct of_device_id of_lm36274_leds_match[] = {
@@ -167,9 +158,9 @@ MODULE_DEVICE_TABLE(of, of_lm36274_leds_match);
static struct platform_driver lm36274_driver = {
.probe = lm36274_probe,
- .remove = lm36274_remove,
.driver = {
.name = "lm36274-leds",
+ .of_match_table = of_lm36274_leds_match,
},
};
module_platform_driver(lm36274_driver)
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index e1e2d2b64a56..e945de45388c 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -394,13 +394,10 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
led->regulator = devm_regulator_get_optional(&led->client->dev, "vled");
if (IS_ERR(led->regulator)) {
ret = PTR_ERR(led->regulator);
- if (ret != -ENODEV) {
- if (ret != -EPROBE_DEFER)
- dev_err(&led->client->dev,
- "Failed to get vled regulator: %d\n",
- ret);
- return ret;
- }
+ if (ret != -ENODEV)
+ return dev_err_probe(&led->client->dev, ret,
+ "Failed to get vled regulator\n");
+
led->regulator = NULL;
}
@@ -436,9 +433,6 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
return -ENODEV;
}
- fwnode_property_read_string(child, "linux,default-trigger",
- &led->led_dev.default_trigger);
-
ret = fwnode_property_read_u32(child, "reg", &led->led_enable);
if (ret) {
dev_err(&led->client->dev, "reg DT property missing\n");
diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
index 024983088d59..7d216cdb91a8 100644
--- a/drivers/leds/leds-lm3697.c
+++ b/drivers/leds/leds-lm3697.c
@@ -78,6 +78,7 @@ struct lm3697 {
struct mutex lock;
int bank_cfg;
+ int num_banks;
struct lm3697_led leds[];
};
@@ -115,6 +116,7 @@ static int lm3697_brightness_set(struct led_classdev *led_cdev,
struct lm3697_led *led = container_of(led_cdev, struct lm3697_led,
led_dev);
int ctrl_en_val = (1 << led->control_bank);
+ struct device *dev = led->priv->dev;
int ret;
mutex_lock(&led->priv->lock);
@@ -123,7 +125,7 @@ static int lm3697_brightness_set(struct led_classdev *led_cdev,
ret = regmap_update_bits(led->priv->regmap, LM3697_CTRL_ENABLE,
ctrl_en_val, ~ctrl_en_val);
if (ret) {
- dev_err(&led->priv->client->dev, "Cannot write ctrl register\n");
+ dev_err(dev, "Cannot write ctrl register\n");
goto brightness_out;
}
@@ -131,8 +133,7 @@ static int lm3697_brightness_set(struct led_classdev *led_cdev,
} else {
ret = ti_lmu_common_set_brightness(&led->lmu_data, brt_val);
if (ret) {
- dev_err(&led->priv->client->dev,
- "Cannot write brightness\n");
+ dev_err(dev, "Cannot write brightness\n");
goto brightness_out;
}
@@ -141,8 +142,7 @@ static int lm3697_brightness_set(struct led_classdev *led_cdev,
LM3697_CTRL_ENABLE,
ctrl_en_val, ctrl_en_val);
if (ret) {
- dev_err(&led->priv->client->dev,
- "Cannot enable the device\n");
+ dev_err(dev, "Cannot enable the device\n");
goto brightness_out;
}
@@ -157,6 +157,7 @@ brightness_out:
static int lm3697_init(struct lm3697 *priv)
{
+ struct device *dev = priv->dev;
struct lm3697_led *led;
int i, ret;
@@ -165,26 +166,26 @@ static int lm3697_init(struct lm3697 *priv)
} else {
ret = regmap_write(priv->regmap, LM3697_RESET, LM3697_SW_RESET);
if (ret) {
- dev_err(&priv->client->dev, "Cannot reset the device\n");
+ dev_err(dev, "Cannot reset the device\n");
goto out;
}
}
ret = regmap_write(priv->regmap, LM3697_CTRL_ENABLE, 0x0);
if (ret) {
- dev_err(&priv->client->dev, "Cannot write ctrl enable\n");
+ dev_err(dev, "Cannot write ctrl enable\n");
goto out;
}
ret = regmap_write(priv->regmap, LM3697_OUTPUT_CONFIG, priv->bank_cfg);
if (ret)
- dev_err(&priv->client->dev, "Cannot write OUTPUT config\n");
+ dev_err(dev, "Cannot write OUTPUT config\n");
- for (i = 0; i < LM3697_MAX_CONTROL_BANKS; i++) {
+ for (i = 0; i < priv->num_banks; i++) {
led = &priv->leds[i];
ret = ti_lmu_common_set_ramp(&led->lmu_data);
if (ret)
- dev_err(&priv->client->dev, "Setting the ramp rate failed\n");
+ dev_err(dev, "Setting the ramp rate failed\n");
}
out:
return ret;
@@ -193,36 +194,37 @@ out:
static int lm3697_probe_dt(struct lm3697 *priv)
{
struct fwnode_handle *child = NULL;
+ struct device *dev = priv->dev;
struct lm3697_led *led;
- const char *name;
+ int ret = -EINVAL;
int control_bank;
size_t i = 0;
- int ret = -EINVAL;
int j;
- priv->enable_gpio = devm_gpiod_get_optional(&priv->client->dev,
- "enable", GPIOD_OUT_LOW);
+ priv->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
if (IS_ERR(priv->enable_gpio)) {
ret = PTR_ERR(priv->enable_gpio);
- dev_err(&priv->client->dev, "Failed to get enable gpio: %d\n",
- ret);
+ dev_err(dev, "Failed to get enable gpio: %d\n", ret);
return ret;
}
- priv->regulator = devm_regulator_get(&priv->client->dev, "vled");
+ priv->regulator = devm_regulator_get(dev, "vled");
if (IS_ERR(priv->regulator))
priv->regulator = NULL;
- device_for_each_child_node(priv->dev, child) {
+ device_for_each_child_node(dev, child) {
+ struct led_init_data init_data = {};
+
ret = fwnode_property_read_u32(child, "reg", &control_bank);
if (ret) {
- dev_err(&priv->client->dev, "reg property missing\n");
+ dev_err(dev, "reg property missing\n");
fwnode_handle_put(child);
goto child_out;
}
if (control_bank > LM3697_CONTROL_B) {
- dev_err(&priv->client->dev, "reg property is invalid\n");
+ dev_err(dev, "reg property is invalid\n");
ret = -EINVAL;
fwnode_handle_put(child);
goto child_out;
@@ -230,10 +232,10 @@ static int lm3697_probe_dt(struct lm3697 *priv)
led = &priv->leds[i];
- ret = ti_lmu_common_get_brt_res(&priv->client->dev,
- child, &led->lmu_data);
+ ret = ti_lmu_common_get_brt_res(dev, child, &led->lmu_data);
if (ret)
- dev_warn(&priv->client->dev, "brightness resolution property missing\n");
+ dev_warn(dev,
+ "brightness resolution property missing\n");
led->control_bank = control_bank;
led->lmu_data.regmap = priv->regmap;
@@ -246,7 +248,7 @@ static int lm3697_probe_dt(struct lm3697 *priv)
led->num_leds = fwnode_property_count_u32(child, "led-sources");
if (led->num_leds > LM3697_MAX_LED_STRINGS) {
- dev_err(&priv->client->dev, "Too many LED strings defined\n");
+ dev_err(dev, "Too many LED strings defined\n");
continue;
}
@@ -254,7 +256,7 @@ static int lm3697_probe_dt(struct lm3697 *priv)
led->hvled_strings,
led->num_leds);
if (ret) {
- dev_err(&priv->client->dev, "led-sources property missing\n");
+ dev_err(dev, "led-sources property missing\n");
fwnode_handle_put(child);
goto child_out;
}
@@ -263,31 +265,23 @@ static int lm3697_probe_dt(struct lm3697 *priv)
priv->bank_cfg |=
(led->control_bank << led->hvled_strings[j]);
- ret = ti_lmu_common_get_ramp_params(&priv->client->dev,
- child, &led->lmu_data);
+ ret = ti_lmu_common_get_ramp_params(dev, child, &led->lmu_data);
if (ret)
- dev_warn(&priv->client->dev, "runtime-ramp properties missing\n");
+ dev_warn(dev, "runtime-ramp properties missing\n");
- fwnode_property_read_string(child, "linux,default-trigger",
- &led->led_dev.default_trigger);
-
- ret = fwnode_property_read_string(child, "label", &name);
- if (ret)
- snprintf(led->label, sizeof(led->label),
- "%s::", priv->client->name);
- else
- snprintf(led->label, sizeof(led->label),
- "%s:%s", priv->client->name, name);
+ init_data.fwnode = child;
+ init_data.devicename = priv->client->name;
+ /* for backwards compatibility if `label` is not present */
+ init_data.default_label = ":";
led->priv = priv;
- led->led_dev.name = led->label;
led->led_dev.max_brightness = led->lmu_data.max_brightness;
led->led_dev.brightness_set_blocking = lm3697_brightness_set;
- ret = devm_led_classdev_register(priv->dev, &led->led_dev);
+ ret = devm_led_classdev_register_ext(dev, &led->led_dev,
+ &init_data);
if (ret) {
- dev_err(&priv->client->dev, "led register err: %d\n",
- ret);
+ dev_err(dev, "led register err: %d\n", ret);
fwnode_handle_put(child);
goto child_out;
}
@@ -302,18 +296,18 @@ child_out:
static int lm3697_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct lm3697 *led;
int count;
int ret;
- count = device_get_child_node_count(&client->dev);
- if (!count) {
- dev_err(&client->dev, "LEDs are not defined in device tree!");
+ count = device_get_child_node_count(dev);
+ if (!count || count > LM3697_MAX_CONTROL_BANKS) {
+ dev_err(dev, "Strange device tree!");
return -ENODEV;
}
- led = devm_kzalloc(&client->dev, struct_size(led, leds, count),
- GFP_KERNEL);
+ led = devm_kzalloc(dev, struct_size(led, leds, count), GFP_KERNEL);
if (!led)
return -ENOMEM;
@@ -321,12 +315,12 @@ static int lm3697_probe(struct i2c_client *client,
i2c_set_clientdata(client, led);
led->client = client;
- led->dev = &client->dev;
+ led->dev = dev;
+ led->num_banks = count;
led->regmap = devm_regmap_init_i2c(client, &lm3697_regmap_config);
if (IS_ERR(led->regmap)) {
ret = PTR_ERR(led->regmap);
- dev_err(&client->dev, "Failed to allocate register map: %d\n",
- ret);
+ dev_err(dev, "Failed to allocate register map: %d\n", ret);
return ret;
}
@@ -340,12 +334,13 @@ static int lm3697_probe(struct i2c_client *client,
static int lm3697_remove(struct i2c_client *client)
{
struct lm3697 *led = i2c_get_clientdata(client);
+ struct device *dev = &led->client->dev;
int ret;
ret = regmap_update_bits(led->regmap, LM3697_CTRL_ENABLE,
LM3697_CTRL_A_B_EN, 0);
if (ret) {
- dev_err(&led->client->dev, "Failed to disable the device\n");
+ dev_err(dev, "Failed to disable the device\n");
return ret;
}
@@ -355,8 +350,7 @@ static int lm3697_remove(struct i2c_client *client)
if (led->regulator) {
ret = regulator_disable(led->regulator);
if (ret)
- dev_err(&led->client->dev,
- "Failed to disable regulator\n");
+ dev_err(dev, "Failed to disable regulator\n");
}
mutex_destroy(&led->lock);
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
new file mode 100644
index 000000000000..5fb4f24aeb2e
--- /dev/null
+++ b/drivers/leds/leds-lp50xx.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: GPL-2.0
+// TI LP50XX LED chip family driver
+// Copyright (C) 2018-20 Texas Instruments Incorporated - https://www.ti.com/
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
+
+#include <linux/led-class-multicolor.h>
+
+#include "leds.h"
+
+#define LP50XX_DEV_CFG0 0x00
+#define LP50XX_DEV_CFG1 0x01
+#define LP50XX_LED_CFG0 0x02
+
+/* LP5009 and LP5012 registers */
+#define LP5012_BNK_BRT 0x03
+#define LP5012_BNKA_CLR 0x04
+#define LP5012_BNKB_CLR 0x05
+#define LP5012_BNKC_CLR 0x06
+#define LP5012_LED0_BRT 0x07
+#define LP5012_OUT0_CLR 0x0b
+#define LP5012_RESET 0x17
+
+/* LP5018 and LP5024 registers */
+#define LP5024_BNK_BRT 0x03
+#define LP5024_BNKA_CLR 0x04
+#define LP5024_BNKB_CLR 0x05
+#define LP5024_BNKC_CLR 0x06
+#define LP5024_LED0_BRT 0x07
+#define LP5024_OUT0_CLR 0x0f
+#define LP5024_RESET 0x27
+
+/* LP5030 and LP5036 registers */
+#define LP5036_LED_CFG1 0x03
+#define LP5036_BNK_BRT 0x04
+#define LP5036_BNKA_CLR 0x05
+#define LP5036_BNKB_CLR 0x06
+#define LP5036_BNKC_CLR 0x07
+#define LP5036_LED0_BRT 0x08
+#define LP5036_OUT0_CLR 0x14
+#define LP5036_RESET 0x38
+
+#define LP50XX_SW_RESET 0xff
+#define LP50XX_CHIP_EN BIT(6)
+
+/* There are 3 LED outputs per bank */
+#define LP50XX_LEDS_PER_MODULE 3
+
+#define LP5009_MAX_LED_MODULES 2
+#define LP5012_MAX_LED_MODULES 4
+#define LP5018_MAX_LED_MODULES 6
+#define LP5024_MAX_LED_MODULES 8
+#define LP5030_MAX_LED_MODULES 10
+#define LP5036_MAX_LED_MODULES 12
+
+static const struct reg_default lp5012_reg_defs[] = {
+ {LP50XX_DEV_CFG0, 0x0},
+ {LP50XX_DEV_CFG1, 0x3c},
+ {LP50XX_LED_CFG0, 0x0},
+ {LP5012_BNK_BRT, 0xff},
+ {LP5012_BNKA_CLR, 0x0f},
+ {LP5012_BNKB_CLR, 0x0f},
+ {LP5012_BNKC_CLR, 0x0f},
+ {LP5012_LED0_BRT, 0x0f},
+ /* LEDX_BRT registers are all 0xff for defaults */
+ {0x08, 0xff}, {0x09, 0xff}, {0x0a, 0xff},
+ {LP5012_OUT0_CLR, 0x0f},
+ /* OUTX_CLR registers are all 0x0 for defaults */
+ {0x0c, 0x00}, {0x0d, 0x00}, {0x0e, 0x00}, {0x0f, 0x00}, {0x10, 0x00},
+ {0x11, 0x00}, {0x12, 0x00}, {0x13, 0x00}, {0x14, 0x00}, {0x15, 0x00},
+ {0x16, 0x00},
+ {LP5012_RESET, 0x00}
+};
+
+static const struct reg_default lp5024_reg_defs[] = {
+ {LP50XX_DEV_CFG0, 0x0},
+ {LP50XX_DEV_CFG1, 0x3c},
+ {LP50XX_LED_CFG0, 0x0},
+ {LP5024_BNK_BRT, 0xff},
+ {LP5024_BNKA_CLR, 0x0f},
+ {LP5024_BNKB_CLR, 0x0f},
+ {LP5024_BNKC_CLR, 0x0f},
+ {LP5024_LED0_BRT, 0x0f},
+ /* LEDX_BRT registers are all 0xff for defaults */
+ {0x08, 0xff}, {0x09, 0xff}, {0x0a, 0xff}, {0x0b, 0xff}, {0x0c, 0xff},
+ {0x0d, 0xff}, {0x0e, 0xff},
+ {LP5024_OUT0_CLR, 0x0f},
+ /* OUTX_CLR registers are all 0x0 for defaults */
+ {0x10, 0x00}, {0x11, 0x00}, {0x12, 0x00}, {0x13, 0x00}, {0x14, 0x00},
+ {0x15, 0x00}, {0x16, 0x00}, {0x17, 0x00}, {0x18, 0x00}, {0x19, 0x00},
+ {0x1a, 0x00}, {0x1b, 0x00}, {0x1c, 0x00}, {0x1d, 0x00}, {0x1e, 0x00},
+ {0x1f, 0x00}, {0x20, 0x00}, {0x21, 0x00}, {0x22, 0x00}, {0x23, 0x00},
+ {0x24, 0x00}, {0x25, 0x00}, {0x26, 0x00},
+ {LP5024_RESET, 0x00}
+};
+
+static const struct reg_default lp5036_reg_defs[] = {
+ {LP50XX_DEV_CFG0, 0x0},
+ {LP50XX_DEV_CFG1, 0x3c},
+ {LP50XX_LED_CFG0, 0x0},
+ {LP5036_LED_CFG1, 0x0},
+ {LP5036_BNK_BRT, 0xff},
+ {LP5036_BNKA_CLR, 0x0f},
+ {LP5036_BNKB_CLR, 0x0f},
+ {LP5036_BNKC_CLR, 0x0f},
+ {LP5036_LED0_BRT, 0x0f},
+ /* LEDX_BRT registers are all 0xff for defaults */
+ {0x08, 0xff}, {0x09, 0xff}, {0x0a, 0xff}, {0x0b, 0xff}, {0x0c, 0xff},
+ {0x0d, 0xff}, {0x0e, 0xff}, {0x0f, 0xff}, {0x10, 0xff}, {0x11, 0xff},
+ {0x12, 0xff}, {0x13, 0xff},
+ {LP5036_OUT0_CLR, 0x0f},
+ /* OUTX_CLR registers are all 0x0 for defaults */
+ {0x15, 0x00}, {0x16, 0x00}, {0x17, 0x00}, {0x18, 0x00}, {0x19, 0x00},
+ {0x1a, 0x00}, {0x1b, 0x00}, {0x1c, 0x00}, {0x1d, 0x00}, {0x1e, 0x00},
+ {0x1f, 0x00}, {0x20, 0x00}, {0x21, 0x00}, {0x22, 0x00}, {0x23, 0x00},
+ {0x24, 0x00}, {0x25, 0x00}, {0x26, 0x00}, {0x27, 0x00}, {0x28, 0x00},
+ {0x29, 0x00}, {0x2a, 0x00}, {0x2b, 0x00}, {0x2c, 0x00}, {0x2d, 0x00},
+ {0x2e, 0x00}, {0x2f, 0x00}, {0x30, 0x00}, {0x31, 0x00}, {0x32, 0x00},
+ {0x33, 0x00}, {0x34, 0x00}, {0x35, 0x00}, {0x36, 0x00}, {0x37, 0x00},
+ {LP5036_RESET, 0x00}
+};
+
+static const struct regmap_config lp5012_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LP5012_RESET,
+ .reg_defaults = lp5012_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(lp5012_reg_defs),
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config lp5024_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LP5024_RESET,
+ .reg_defaults = lp5024_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(lp5024_reg_defs),
+ .cache_type = REGCACHE_FLAT,
+};
+
+static const struct regmap_config lp5036_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LP5036_RESET,
+ .reg_defaults = lp5036_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(lp5036_reg_defs),
+ .cache_type = REGCACHE_FLAT,
+};
+
+enum lp50xx_model {
+ LP5009,
+ LP5012,
+ LP5018,
+ LP5024,
+ LP5030,
+ LP5036,
+};
+
+/**
+ * struct lp50xx_chip_info -
+ * @lp50xx_regmap_config: regmap register configuration
+ * @model_id: LED device model
+ * @max_modules: total number of supported LED modules
+ * @num_leds: number of LED outputs available on the device
+ * @led_brightness0_reg: first brightness register of the device
+ * @mix_out0_reg: first color mix register of the device
+ * @bank_brt_reg: bank brightness register
+ * @bank_mix_reg: color mix register
+ * @reset_reg: device reset register
+ */
+struct lp50xx_chip_info {
+ const struct regmap_config *lp50xx_regmap_config;
+ int model_id;
+ u8 max_modules;
+ u8 num_leds;
+ u8 led_brightness0_reg;
+ u8 mix_out0_reg;
+ u8 bank_brt_reg;
+ u8 bank_mix_reg;
+ u8 reset_reg;
+};
+
+static const struct lp50xx_chip_info lp50xx_chip_info_tbl[] = {
+ [LP5009] = {
+ .model_id = LP5009,
+ .max_modules = LP5009_MAX_LED_MODULES,
+ .num_leds = LP5009_MAX_LED_MODULES * LP50XX_LEDS_PER_MODULE,
+ .led_brightness0_reg = LP5012_LED0_BRT,
+ .mix_out0_reg = LP5012_OUT0_CLR,
+ .bank_brt_reg = LP5012_BNK_BRT,
+ .bank_mix_reg = LP5012_BNKA_CLR,
+ .reset_reg = LP5012_RESET,
+ .lp50xx_regmap_config = &lp5012_regmap_config,
+ },
+ [LP5012] = {
+ .model_id = LP5012,
+ .max_modules = LP5012_MAX_LED_MODULES,
+ .num_leds = LP5012_MAX_LED_MODULES * LP50XX_LEDS_PER_MODULE,
+ .led_brightness0_reg = LP5012_LED0_BRT,
+ .mix_out0_reg = LP5012_OUT0_CLR,
+ .bank_brt_reg = LP5012_BNK_BRT,
+ .bank_mix_reg = LP5012_BNKA_CLR,
+ .reset_reg = LP5012_RESET,
+ .lp50xx_regmap_config = &lp5012_regmap_config,
+ },
+ [LP5018] = {
+ .model_id = LP5018,
+ .max_modules = LP5018_MAX_LED_MODULES,
+ .num_leds = LP5018_MAX_LED_MODULES * LP50XX_LEDS_PER_MODULE,
+ .led_brightness0_reg = LP5024_LED0_BRT,
+ .mix_out0_reg = LP5024_OUT0_CLR,
+ .bank_brt_reg = LP5024_BNK_BRT,
+ .bank_mix_reg = LP5024_BNKA_CLR,
+ .reset_reg = LP5024_RESET,
+ .lp50xx_regmap_config = &lp5024_regmap_config,
+ },
+ [LP5024] = {
+ .model_id = LP5024,
+ .max_modules = LP5024_MAX_LED_MODULES,
+ .num_leds = LP5024_MAX_LED_MODULES * LP50XX_LEDS_PER_MODULE,
+ .led_brightness0_reg = LP5024_LED0_BRT,
+ .mix_out0_reg = LP5024_OUT0_CLR,
+ .bank_brt_reg = LP5024_BNK_BRT,
+ .bank_mix_reg = LP5024_BNKA_CLR,
+ .reset_reg = LP5024_RESET,
+ .lp50xx_regmap_config = &lp5024_regmap_config,
+ },
+ [LP5030] = {
+ .model_id = LP5030,
+ .max_modules = LP5030_MAX_LED_MODULES,
+ .num_leds = LP5030_MAX_LED_MODULES * LP50XX_LEDS_PER_MODULE,
+ .led_brightness0_reg = LP5036_LED0_BRT,
+ .mix_out0_reg = LP5036_OUT0_CLR,
+ .bank_brt_reg = LP5036_BNK_BRT,
+ .bank_mix_reg = LP5036_BNKA_CLR,
+ .reset_reg = LP5036_RESET,
+ .lp50xx_regmap_config = &lp5036_regmap_config,
+ },
+ [LP5036] = {
+ .model_id = LP5036,
+ .max_modules = LP5036_MAX_LED_MODULES,
+ .num_leds = LP5036_MAX_LED_MODULES * LP50XX_LEDS_PER_MODULE,
+ .led_brightness0_reg = LP5036_LED0_BRT,
+ .mix_out0_reg = LP5036_OUT0_CLR,
+ .bank_brt_reg = LP5036_BNK_BRT,
+ .bank_mix_reg = LP5036_BNKA_CLR,
+ .reset_reg = LP5036_RESET,
+ .lp50xx_regmap_config = &lp5036_regmap_config,
+ },
+};
+
+struct lp50xx_led {
+ struct led_classdev_mc mc_cdev;
+ struct lp50xx *priv;
+ unsigned long bank_modules;
+ int led_intensity[LP50XX_LEDS_PER_MODULE];
+ u8 ctrl_bank_enabled;
+ int led_number;
+};
+
+/**
+ * struct lp50xx -
+ * @enable_gpio: hardware enable gpio
+ * @regulator: LED supply regulator pointer
+ * @client: pointer to the I2C client
+ * @regmap: device register map
+ * @dev: pointer to the devices device struct
+ * @lock: lock for reading/writing the device
+ * @chip_info: chip specific information (ie num_leds)
+ * @num_of_banked_leds: holds the number of banked LEDs
+ * @leds: array of LED strings
+ */
+struct lp50xx {
+ struct gpio_desc *enable_gpio;
+ struct regulator *regulator;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct device *dev;
+ struct mutex lock;
+ const struct lp50xx_chip_info *chip_info;
+ int num_of_banked_leds;
+
+ /* This needs to be at the end of the struct */
+ struct lp50xx_led leds[];
+};
+
+static struct lp50xx_led *mcled_cdev_to_led(struct led_classdev_mc *mc_cdev)
+{
+ return container_of(mc_cdev, struct lp50xx_led, mc_cdev);
+}
+
+static int lp50xx_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct led_classdev_mc *mc_dev = lcdev_to_mccdev(cdev);
+ struct lp50xx_led *led = mcled_cdev_to_led(mc_dev);
+ const struct lp50xx_chip_info *led_chip = led->priv->chip_info;
+ u8 led_offset, reg_val;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&led->priv->lock);
+ if (led->ctrl_bank_enabled)
+ reg_val = led_chip->bank_brt_reg;
+ else
+ reg_val = led_chip->led_brightness0_reg +
+ led->led_number;
+
+ ret = regmap_write(led->priv->regmap, reg_val, brightness);
+ if (ret) {
+ dev_err(&led->priv->client->dev,
+ "Cannot write brightness value %d\n", ret);
+ goto out;
+ }
+
+ for (i = 0; i < led->mc_cdev.num_colors; i++) {
+ if (led->ctrl_bank_enabled) {
+ reg_val = led_chip->bank_mix_reg + i;
+ } else {
+ led_offset = (led->led_number * 3) + i;
+ reg_val = led_chip->mix_out0_reg + led_offset;
+ }
+
+ ret = regmap_write(led->priv->regmap, reg_val,
+ mc_dev->subled_info[i].intensity);
+ if (ret) {
+ dev_err(&led->priv->client->dev,
+ "Cannot write intensity value %d\n", ret);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&led->priv->lock);
+ return ret;
+}
+
+static int lp50xx_set_banks(struct lp50xx *priv, u32 led_banks[])
+{
+ u8 led_config_lo, led_config_hi;
+ u32 bank_enable_mask = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i < priv->chip_info->max_modules; i++) {
+ if (led_banks[i])
+ bank_enable_mask |= (1 << led_banks[i]);
+ }
+
+ led_config_lo = (u8)(bank_enable_mask & 0xff);
+ led_config_hi = (u8)(bank_enable_mask >> 8) & 0xff;
+
+ ret = regmap_write(priv->regmap, LP50XX_LED_CFG0, led_config_lo);
+ if (ret)
+ return ret;
+
+ if (priv->chip_info->model_id >= LP5030)
+ ret = regmap_write(priv->regmap, LP5036_LED_CFG1, led_config_hi);
+
+ return ret;
+}
+
+static int lp50xx_reset(struct lp50xx *priv)
+{
+ return regmap_write(priv->regmap, priv->chip_info->reset_reg, LP50XX_SW_RESET);
+}
+
+static int lp50xx_enable_disable(struct lp50xx *priv, int enable_disable)
+{
+ int ret;
+
+ if (priv->enable_gpio) {
+ ret = gpiod_direction_output(priv->enable_gpio, enable_disable);
+ if (ret)
+ return ret;
+ }
+
+ if (enable_disable)
+ return regmap_write(priv->regmap, LP50XX_DEV_CFG0, LP50XX_CHIP_EN);
+ else
+ return regmap_write(priv->regmap, LP50XX_DEV_CFG0, 0);
+
+}
+
+static int lp50xx_probe_leds(struct fwnode_handle *child, struct lp50xx *priv,
+ struct lp50xx_led *led, int num_leds)
+{
+ u32 led_banks[LP5036_MAX_LED_MODULES] = {0};
+ int led_number;
+ int ret;
+
+ if (num_leds > 1) {
+ if (num_leds > priv->chip_info->max_modules) {
+ dev_err(&priv->client->dev, "reg property is invalid\n");
+ return -EINVAL;
+ }
+
+ priv->num_of_banked_leds = num_leds;
+
+ ret = fwnode_property_read_u32_array(child, "reg", led_banks, num_leds);
+ if (ret) {
+ dev_err(&priv->client->dev, "reg property is missing\n");
+ return ret;
+ }
+
+ ret = lp50xx_set_banks(priv, led_banks);
+ if (ret) {
+ dev_err(&priv->client->dev, "Cannot setup banked LEDs\n");
+ return ret;
+ }
+
+ led->ctrl_bank_enabled = 1;
+ } else {
+ ret = fwnode_property_read_u32(child, "reg", &led_number);
+ if (ret) {
+ dev_err(&priv->client->dev, "led reg property missing\n");
+ return ret;
+ }
+
+ if (led_number > priv->chip_info->num_leds) {
+ dev_err(&priv->client->dev, "led-sources property is invalid\n");
+ return -EINVAL;
+ }
+
+ led->led_number = led_number;
+ }
+
+ return 0;
+}
+
+static int lp50xx_probe_dt(struct lp50xx *priv)
+{
+ struct fwnode_handle *child = NULL;
+ struct fwnode_handle *led_node = NULL;
+ struct led_init_data init_data = {};
+ struct led_classdev *led_cdev;
+ struct mc_subled *mc_led_info;
+ struct lp50xx_led *led;
+ int ret = -EINVAL;
+ int num_colors;
+ u32 color_id;
+ int i = 0;
+
+ priv->enable_gpio = devm_gpiod_get_optional(priv->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->enable_gpio)) {
+ ret = PTR_ERR(priv->enable_gpio);
+ dev_err(&priv->client->dev, "Failed to get enable gpio: %d\n",
+ ret);
+ return ret;
+ }
+
+ priv->regulator = devm_regulator_get(priv->dev, "vled");
+ if (IS_ERR(priv->regulator))
+ priv->regulator = NULL;
+
+ device_for_each_child_node(priv->dev, child) {
+ led = &priv->leds[i];
+ ret = fwnode_property_count_u32(child, "reg");
+ if (ret < 0) {
+ dev_err(&priv->client->dev, "reg property is invalid\n");
+ goto child_out;
+ }
+
+ ret = lp50xx_probe_leds(child, priv, led, ret);
+ if (ret)
+ goto child_out;
+
+ init_data.fwnode = child;
+ num_colors = 0;
+
+ /*
+ * There are only 3 LEDs per module otherwise they should be
+ * banked which also is presented as 3 LEDs.
+ */
+ mc_led_info = devm_kcalloc(priv->dev, LP50XX_LEDS_PER_MODULE,
+ sizeof(*mc_led_info), GFP_KERNEL);
+ if (!mc_led_info)
+ return -ENOMEM;
+
+ fwnode_for_each_child_node(child, led_node) {
+ ret = fwnode_property_read_u32(led_node, "color",
+ &color_id);
+ if (ret) {
+ dev_err(priv->dev, "Cannot read color\n");
+ goto child_out;
+ }
+
+ mc_led_info[num_colors].color_index = color_id;
+ num_colors++;
+ }
+
+ led->priv = priv;
+ led->mc_cdev.num_colors = num_colors;
+ led->mc_cdev.subled_info = mc_led_info;
+ led_cdev = &led->mc_cdev.led_cdev;
+ led_cdev->brightness_set_blocking = lp50xx_brightness_set;
+
+ ret = devm_led_classdev_multicolor_register_ext(&priv->client->dev,
+ &led->mc_cdev,
+ &init_data);
+ if (ret) {
+ dev_err(&priv->client->dev, "led register err: %d\n",
+ ret);
+ goto child_out;
+ }
+ i++;
+ fwnode_handle_put(child);
+ }
+
+ return 0;
+
+child_out:
+ fwnode_handle_put(child);
+ return ret;
+}
+
+static int lp50xx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lp50xx *led;
+ int count;
+ int ret;
+
+ count = device_get_child_node_count(&client->dev);
+ if (!count) {
+ dev_err(&client->dev, "LEDs are not defined in device tree!");
+ return -ENODEV;
+ }
+
+ led = devm_kzalloc(&client->dev, struct_size(led, leds, count),
+ GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ mutex_init(&led->lock);
+ led->client = client;
+ led->dev = &client->dev;
+ led->chip_info = &lp50xx_chip_info_tbl[id->driver_data];
+ i2c_set_clientdata(client, led);
+ led->regmap = devm_regmap_init_i2c(client,
+ led->chip_info->lp50xx_regmap_config);
+ if (IS_ERR(led->regmap)) {
+ ret = PTR_ERR(led->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = lp50xx_reset(led);
+ if (ret)
+ return ret;
+
+ ret = lp50xx_enable_disable(led, 1);
+ if (ret)
+ return ret;
+
+ return lp50xx_probe_dt(led);
+}
+
+static int lp50xx_remove(struct i2c_client *client)
+{
+ struct lp50xx *led = i2c_get_clientdata(client);
+ int ret;
+
+ ret = lp50xx_enable_disable(led, 0);
+ if (ret) {
+ dev_err(&led->client->dev, "Failed to disable chip\n");
+ return ret;
+ }
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ mutex_destroy(&led->lock);
+
+ return 0;
+}
+
+static const struct i2c_device_id lp50xx_id[] = {
+ { "lp5009", LP5009 },
+ { "lp5012", LP5012 },
+ { "lp5018", LP5018 },
+ { "lp5024", LP5024 },
+ { "lp5030", LP5030 },
+ { "lp5036", LP5036 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lp50xx_id);
+
+static const struct of_device_id of_lp50xx_leds_match[] = {
+ { .compatible = "ti,lp5009", .data = (void *)LP5009 },
+ { .compatible = "ti,lp5012", .data = (void *)LP5012 },
+ { .compatible = "ti,lp5018", .data = (void *)LP5018 },
+ { .compatible = "ti,lp5024", .data = (void *)LP5024 },
+ { .compatible = "ti,lp5030", .data = (void *)LP5030 },
+ { .compatible = "ti,lp5036", .data = (void *)LP5036 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lp50xx_leds_match);
+
+static struct i2c_driver lp50xx_driver = {
+ .driver = {
+ .name = "lp50xx",
+ .of_match_table = of_lp50xx_leds_match,
+ },
+ .probe = lp50xx_probe,
+ .remove = lp50xx_remove,
+ .id_table = lp50xx_id,
+};
+module_i2c_driver(lp50xx_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP50XX LED driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index ef8c3bfa8f3c..a9e7507c998c 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -523,7 +523,7 @@ static int lp5521_probe(struct i2c_client *client,
struct lp55xx_chip *chip;
struct lp55xx_led *led;
struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *np = client->dev.of_node;
+ struct device_node *np = dev_of_node(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index f55d97258d5e..fc433e63b1dc 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -891,7 +891,7 @@ static int lp5523_probe(struct i2c_client *client,
struct lp55xx_chip *chip;
struct lp55xx_led *led;
struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *np = client->dev.of_node;
+ struct device_node *np = dev_of_node(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index 7ecdd199d7ef..31c14016d289 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -518,7 +518,7 @@ static int lp5562_probe(struct i2c_client *client,
struct lp55xx_chip *chip;
struct lp55xx_led *led;
struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *np = client->dev.of_node;
+ struct device_node *np = dev_of_node(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 56210f4ad919..81de1346bf5d 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -611,11 +611,13 @@ static int lp55xx_parse_multi_led(struct device_node *np,
struct device_node *child;
int num_colors = 0, ret;
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
ret = lp55xx_parse_multi_led_child(child, cfg, child_number,
num_colors);
- if (ret)
+ if (ret) {
+ of_node_put(child);
return ret;
+ }
num_colors++;
}
@@ -665,7 +667,7 @@ struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev,
if (!pdata)
return ERR_PTR(-ENOMEM);
- num_channels = of_get_child_count(np);
+ num_channels = of_get_available_child_count(np);
if (num_channels == 0) {
dev_err(dev, "no LED channels\n");
return ERR_PTR(-EINVAL);
@@ -679,10 +681,12 @@ struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev,
pdata->num_channels = num_channels;
cfg->max_channel = chip->cfg->max_channel;
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
ret = lp55xx_parse_logical_led(child, cfg, i);
- if (ret)
+ if (ret) {
+ of_node_put(child);
return ERR_PTR(-EINVAL);
+ }
i++;
}
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index ac2c31db4a65..2d2fda2ab104 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -306,7 +306,7 @@ static int lp8501_probe(struct i2c_client *client,
struct lp55xx_chip *chip;
struct lp55xx_led *led;
struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *np = client->dev.of_node;
+ struct device_node *np = dev_of_node(&client->dev);
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index ac2f5d6272dc..f0533a337bc1 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -380,7 +380,7 @@ static int lp8860_probe(struct i2c_client *client,
{
int ret;
struct lp8860_led *led;
- struct device_node *np = client->dev.of_node;
+ struct device_node *np = dev_of_node(&client->dev);
struct device_node *child_node;
struct led_init_data init_data = {};
@@ -392,10 +392,6 @@ static int lp8860_probe(struct i2c_client *client,
if (!child_node)
return -EINVAL;
- led->led_dev.default_trigger = of_get_property(child_node,
- "linux,default-trigger",
- NULL);
-
led->enable_gpio = devm_gpiod_get_optional(&client->dev,
"enable", GPIOD_OUT_LOW);
if (IS_ERR(led->enable_gpio)) {
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 9079850e6ea4..68e06434ac08 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -68,7 +68,7 @@ static int lt3593_led_probe(struct platform_device *pdev)
struct led_init_data init_data = {};
const char *tmp;
- if (!dev->of_node)
+ if (!dev_of_node(dev))
return -ENODEV;
led_data = devm_kzalloc(dev, sizeof(*led_data), GFP_KERNEL);
@@ -86,9 +86,6 @@ static int lt3593_led_probe(struct platform_device *pdev)
child = device_get_next_child_node(dev, NULL);
- fwnode_property_read_string(child, "linux,default-trigger",
- &led_data->cdev.default_trigger);
-
if (!fwnode_property_read_string(child, "default-state", &tmp)) {
if (!strcmp(tmp, "on"))
state = LEDS_GPIO_DEFSTATE_ON;
@@ -107,7 +104,6 @@ static int lt3593_led_probe(struct platform_device *pdev)
return ret;
}
- led_data->cdev.dev->of_node = dev->of_node;
platform_set_drvdata(pdev, led_data);
return 0;
diff --git a/drivers/leds/leds-max77650.c b/drivers/leds/leds-max77650.c
index a0d4b725c917..1eeac56b0014 100644
--- a/drivers/leds/leds-max77650.c
+++ b/drivers/leds/leds-max77650.c
@@ -66,7 +66,6 @@ static int max77650_led_probe(struct platform_device *pdev)
struct max77650_led *leds, *led;
struct device *dev;
struct regmap *map;
- const char *label;
int rv, num_leds;
u32 reg;
@@ -86,6 +85,8 @@ static int max77650_led_probe(struct platform_device *pdev)
return -ENODEV;
device_for_each_child_node(dev, child) {
+ struct led_init_data init_data = {};
+
rv = fwnode_property_read_u32(child, "reg", &reg);
if (rv || reg >= MAX77650_LED_NUM_LEDS) {
rv = -EINVAL;
@@ -99,22 +100,13 @@ static int max77650_led_probe(struct platform_device *pdev)
led->cdev.brightness_set_blocking = max77650_led_brightness_set;
led->cdev.max_brightness = MAX77650_LED_MAX_BRIGHTNESS;
- rv = fwnode_property_read_string(child, "label", &label);
- if (rv) {
- led->cdev.name = "max77650::";
- } else {
- led->cdev.name = devm_kasprintf(dev, GFP_KERNEL,
- "max77650:%s", label);
- if (!led->cdev.name) {
- rv = -ENOMEM;
- goto err_node_put;
- }
- }
-
- fwnode_property_read_string(child, "linux,default-trigger",
- &led->cdev.default_trigger);
+ init_data.fwnode = child;
+ init_data.devicename = "max77650";
+ /* for backwards compatibility if `label` is not present */
+ init_data.default_label = ":";
- rv = devm_led_classdev_register(dev, &led->cdev);
+ rv = devm_led_classdev_register_ext(dev, &led->cdev,
+ &init_data);
if (rv)
goto err_node_put;
diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c
index fec56090c2ba..5c1faeb55a31 100644
--- a/drivers/leds/leds-max77693.c
+++ b/drivers/leds/leds-max77693.c
@@ -599,7 +599,7 @@ static int max77693_led_parse_dt(struct max77693_led_device *led,
{
struct device *dev = &led->pdev->dev;
struct max77693_sub_led *sub_leds = led->sub_leds;
- struct device_node *node = dev->of_node, *child_node;
+ struct device_node *node = dev_of_node(dev), *child_node;
struct property *prop;
u32 led_sources[2];
int i, ret, fled_id;
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index 5cd810c545f3..675502c15c2b 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -121,7 +121,7 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
if (!pdata)
return ERR_PTR(-ENOMEM);
- parent = of_get_child_by_name(dev->parent->of_node, "leds");
+ parent = of_get_child_by_name(dev_of_node(dev->parent), "leds");
if (!parent)
goto out_node_put;
@@ -131,7 +131,7 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
if (ret)
goto out_node_put;
- pdata->num_leds = of_get_child_count(parent);
+ pdata->num_leds = of_get_available_child_count(parent);
pdata->led = devm_kcalloc(dev, pdata->num_leds, sizeof(*pdata->led),
GFP_KERNEL);
@@ -140,7 +140,7 @@ static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
goto out_node_put;
}
- for_each_child_of_node(parent, child) {
+ for_each_available_child_of_node(parent, child) {
const char *str;
u32 tmp;
@@ -192,7 +192,7 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev)
leds->master = mcdev;
platform_set_drvdata(pdev, leds);
- if (dev->parent->of_node) {
+ if (dev_of_node(dev->parent)) {
pdata = mc13xxx_led_probe_dt(pdev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
diff --git a/drivers/leds/leds-mt6323.c b/drivers/leds/leds-mt6323.c
index 2a13e3161bf4..f59e0e8bda8b 100644
--- a/drivers/leds/leds-mt6323.c
+++ b/drivers/leds/leds-mt6323.c
@@ -249,15 +249,6 @@ static int mt6323_led_set_blink(struct led_classdev *cdev,
int ret;
/*
- * Units are in ms, if over the hardware able
- * to support, fallback into software blink
- */
- period = *delay_on + *delay_off;
-
- if (period > MT6323_MAX_PERIOD)
- return -EINVAL;
-
- /*
* LED subsystem requires a default user
* friendly blink pattern for the LED so using
* 1Hz duty cycle 50% here if without specific
@@ -269,6 +260,15 @@ static int mt6323_led_set_blink(struct led_classdev *cdev,
}
/*
+ * Units are in ms, if over the hardware able
+ * to support, fallback into software blink
+ */
+ period = *delay_on + *delay_off;
+
+ if (period > MT6323_MAX_PERIOD)
+ return -EINVAL;
+
+ /*
* Calculate duty_hw based on the percentage of period during
* which the led is ON.
*/
@@ -342,11 +342,6 @@ static int mt6323_led_set_dt_default(struct led_classdev *cdev,
const char *state;
int ret = 0;
- led->cdev.name = of_get_property(np, "label", NULL) ? : np->name;
- led->cdev.default_trigger = of_get_property(np,
- "linux,default-trigger",
- NULL);
-
state = of_get_property(np, "default-state", NULL);
if (state) {
if (!strcmp(state, "keep")) {
@@ -369,9 +364,9 @@ static int mt6323_led_set_dt_default(struct led_classdev *cdev,
static int mt6323_led_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev_of_node(dev);
struct device_node *child;
- struct mt6397_chip *hw = dev_get_drvdata(pdev->dev.parent);
+ struct mt6397_chip *hw = dev_get_drvdata(dev->parent);
struct mt6323_leds *leds;
struct mt6323_led *led;
int ret;
@@ -402,6 +397,8 @@ static int mt6323_led_probe(struct platform_device *pdev)
}
for_each_available_child_of_node(np, child) {
+ struct led_init_data init_data = {};
+
ret = of_property_read_u32(child, "reg", &reg);
if (ret) {
dev_err(dev, "Failed to read led 'reg' property\n");
@@ -437,13 +434,14 @@ static int mt6323_led_probe(struct platform_device *pdev)
goto put_child_node;
}
- ret = devm_led_classdev_register(dev, &leds->led[reg]->cdev);
+ init_data.fwnode = of_fwnode_handle(child);
+
+ ret = devm_led_classdev_register_ext(dev, &leds->led[reg]->cdev,
+ &init_data);
if (ret) {
- dev_err(&pdev->dev, "Failed to register LED: %d\n",
- ret);
+ dev_err(dev, "Failed to register LED: %d\n", ret);
goto put_child_node;
}
- leds->led[reg]->cdev.dev->of_node = child;
}
return 0;
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index ceceeb6a0e96..e6fd47365b58 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -419,7 +419,7 @@ static int netxbig_gpio_ext_get(struct device *dev,
static int netxbig_leds_get_of_pdata(struct device *dev,
struct netxbig_led_platform_data *pdata)
{
- struct device_node *np = dev->of_node;
+ struct device_node *np = dev_of_node(dev);
struct device_node *gpio_ext_np;
struct platform_device *gpio_ext_pdev;
struct device *gpio_ext_dev;
@@ -485,7 +485,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
}
/* LEDs */
- num_leds = of_get_child_count(np);
+ num_leds = of_get_available_child_count(np);
if (!num_leds) {
dev_err(dev, "No LED subnodes found in DT\n");
return -ENODEV;
@@ -496,7 +496,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
return -ENOMEM;
led = leds;
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
const char *string;
int *mode_val;
int num_modes;
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index bd806e7c8017..1677d66d8b0e 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -24,25 +24,16 @@ enum ns2_led_modes {
NS_V2_LED_SATA,
};
+/*
+ * If the size of this structure or types of its members is changed,
+ * the filling of array modval in function ns2_led_register must be changed
+ * accordingly.
+ */
struct ns2_led_modval {
- enum ns2_led_modes mode;
- int cmd_level;
- int slow_level;
-};
-
-struct ns2_led {
- const char *name;
- const char *default_trigger;
- struct gpio_desc *cmd;
- struct gpio_desc *slow;
- int num_modes;
- struct ns2_led_modval *modval;
-};
-
-struct ns2_led_platform_data {
- int num_leds;
- struct ns2_led *leds;
-};
+ u32 mode;
+ u32 cmd_level;
+ u32 slow_level;
+} __packed;
/*
* The Network Space v2 dual-GPIO LED is wired to a CPLD. Three different LED
@@ -51,7 +42,7 @@ struct ns2_led_platform_data {
* for the command/slow GPIOs corresponds to a LED mode.
*/
-struct ns2_led_data {
+struct ns2_led {
struct led_classdev cdev;
struct gpio_desc *cmd;
struct gpio_desc *slow;
@@ -62,77 +53,67 @@ struct ns2_led_data {
struct ns2_led_modval *modval;
};
-static int ns2_led_get_mode(struct ns2_led_data *led_dat,
- enum ns2_led_modes *mode)
+static int ns2_led_get_mode(struct ns2_led *led, enum ns2_led_modes *mode)
{
int i;
- int ret = -EINVAL;
int cmd_level;
int slow_level;
- cmd_level = gpiod_get_value_cansleep(led_dat->cmd);
- slow_level = gpiod_get_value_cansleep(led_dat->slow);
+ cmd_level = gpiod_get_value_cansleep(led->cmd);
+ slow_level = gpiod_get_value_cansleep(led->slow);
- for (i = 0; i < led_dat->num_modes; i++) {
- if (cmd_level == led_dat->modval[i].cmd_level &&
- slow_level == led_dat->modval[i].slow_level) {
- *mode = led_dat->modval[i].mode;
- ret = 0;
- break;
+ for (i = 0; i < led->num_modes; i++) {
+ if (cmd_level == led->modval[i].cmd_level &&
+ slow_level == led->modval[i].slow_level) {
+ *mode = led->modval[i].mode;
+ return 0;
}
}
- return ret;
+ return -EINVAL;
}
-static void ns2_led_set_mode(struct ns2_led_data *led_dat,
- enum ns2_led_modes mode)
+static void ns2_led_set_mode(struct ns2_led *led, enum ns2_led_modes mode)
{
int i;
- bool found = false;
unsigned long flags;
- for (i = 0; i < led_dat->num_modes; i++)
- if (mode == led_dat->modval[i].mode) {
- found = true;
+ for (i = 0; i < led->num_modes; i++)
+ if (mode == led->modval[i].mode)
break;
- }
- if (!found)
+ if (i == led->num_modes)
return;
- write_lock_irqsave(&led_dat->rw_lock, flags);
+ write_lock_irqsave(&led->rw_lock, flags);
- if (!led_dat->can_sleep) {
- gpiod_set_value(led_dat->cmd,
- led_dat->modval[i].cmd_level);
- gpiod_set_value(led_dat->slow,
- led_dat->modval[i].slow_level);
+ if (!led->can_sleep) {
+ gpiod_set_value(led->cmd, led->modval[i].cmd_level);
+ gpiod_set_value(led->slow, led->modval[i].slow_level);
goto exit_unlock;
}
- gpiod_set_value_cansleep(led_dat->cmd, led_dat->modval[i].cmd_level);
- gpiod_set_value_cansleep(led_dat->slow, led_dat->modval[i].slow_level);
+ gpiod_set_value_cansleep(led->cmd, led->modval[i].cmd_level);
+ gpiod_set_value_cansleep(led->slow, led->modval[i].slow_level);
exit_unlock:
- write_unlock_irqrestore(&led_dat->rw_lock, flags);
+ write_unlock_irqrestore(&led->rw_lock, flags);
}
static void ns2_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
- struct ns2_led_data *led_dat =
- container_of(led_cdev, struct ns2_led_data, cdev);
+ struct ns2_led *led = container_of(led_cdev, struct ns2_led, cdev);
enum ns2_led_modes mode;
if (value == LED_OFF)
mode = NS_V2_LED_OFF;
- else if (led_dat->sata)
+ else if (led->sata)
mode = NS_V2_LED_SATA;
else
mode = NS_V2_LED_ON;
- ns2_led_set_mode(led_dat, mode);
+ ns2_led_set_mode(led, mode);
}
static int ns2_led_set_blocking(struct led_classdev *led_cdev,
@@ -147,8 +128,7 @@ static ssize_t ns2_led_sata_store(struct device *dev,
const char *buff, size_t count)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct ns2_led_data *led_dat =
- container_of(led_cdev, struct ns2_led_data, cdev);
+ struct ns2_led *led = container_of(led_cdev, struct ns2_led, cdev);
int ret;
unsigned long enable;
@@ -158,18 +138,18 @@ static ssize_t ns2_led_sata_store(struct device *dev,
enable = !!enable;
- if (led_dat->sata == enable)
+ if (led->sata == enable)
goto exit;
- led_dat->sata = enable;
+ led->sata = enable;
if (!led_get_brightness(led_cdev))
goto exit;
if (enable)
- ns2_led_set_mode(led_dat, NS_V2_LED_SATA);
+ ns2_led_set_mode(led, NS_V2_LED_SATA);
else
- ns2_led_set_mode(led_dat, NS_V2_LED_ON);
+ ns2_led_set_mode(led, NS_V2_LED_ON);
exit:
return count;
@@ -179,10 +159,9 @@ static ssize_t ns2_led_sata_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct ns2_led_data *led_dat =
- container_of(led_cdev, struct ns2_led_data, cdev);
+ struct ns2_led *led = container_of(led_cdev, struct ns2_led, cdev);
- return sprintf(buf, "%d\n", led_dat->sata);
+ return sprintf(buf, "%d\n", led->sata);
}
static DEVICE_ATTR(sata, 0644, ns2_led_sata_show, ns2_led_sata_store);
@@ -193,147 +172,94 @@ static struct attribute *ns2_led_attrs[] = {
};
ATTRIBUTE_GROUPS(ns2_led);
-static int
-create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
- const struct ns2_led *template)
+static int ns2_led_register(struct device *dev, struct fwnode_handle *node,
+ struct ns2_led *led)
{
- int ret;
+ struct led_init_data init_data = {};
+ struct ns2_led_modval *modval;
enum ns2_led_modes mode;
+ int nmodes, ret;
+
+ led->cmd = devm_fwnode_gpiod_get_index(dev, node, "cmd", 0, GPIOD_ASIS,
+ fwnode_get_name(node));
+ if (IS_ERR(led->cmd))
+ return PTR_ERR(led->cmd);
+
+ led->slow = devm_fwnode_gpiod_get_index(dev, node, "slow", 0,
+ GPIOD_ASIS,
+ fwnode_get_name(node));
+ if (IS_ERR(led->slow))
+ return PTR_ERR(led->slow);
+
+ ret = fwnode_property_count_u32(node, "modes-map");
+ if (ret < 0 || ret % 3) {
+ dev_err(dev, "Missing or malformed modes-map for %pfw\n", node);
+ return -EINVAL;
+ }
+
+ nmodes = ret / 3;
+ modval = devm_kcalloc(dev, nmodes, sizeof(*modval), GFP_KERNEL);
+ if (!modval)
+ return -ENOMEM;
+
+ fwnode_property_read_u32_array(node, "modes-map", (void *)modval,
+ nmodes * 3);
+
+ rwlock_init(&led->rw_lock);
- rwlock_init(&led_dat->rw_lock);
-
- led_dat->cdev.name = template->name;
- led_dat->cdev.default_trigger = template->default_trigger;
- led_dat->cdev.blink_set = NULL;
- led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- led_dat->cdev.groups = ns2_led_groups;
- led_dat->cmd = template->cmd;
- led_dat->slow = template->slow;
- led_dat->can_sleep = gpiod_cansleep(led_dat->cmd) |
- gpiod_cansleep(led_dat->slow);
- if (led_dat->can_sleep)
- led_dat->cdev.brightness_set_blocking = ns2_led_set_blocking;
+ led->cdev.blink_set = NULL;
+ led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led->cdev.groups = ns2_led_groups;
+ led->can_sleep = gpiod_cansleep(led->cmd) || gpiod_cansleep(led->slow);
+ if (led->can_sleep)
+ led->cdev.brightness_set_blocking = ns2_led_set_blocking;
else
- led_dat->cdev.brightness_set = ns2_led_set;
- led_dat->modval = template->modval;
- led_dat->num_modes = template->num_modes;
+ led->cdev.brightness_set = ns2_led_set;
+ led->num_modes = nmodes;
+ led->modval = modval;
- ret = ns2_led_get_mode(led_dat, &mode);
+ ret = ns2_led_get_mode(led, &mode);
if (ret < 0)
return ret;
/* Set LED initial state. */
- led_dat->sata = (mode == NS_V2_LED_SATA) ? 1 : 0;
- led_dat->cdev.brightness =
- (mode == NS_V2_LED_OFF) ? LED_OFF : LED_FULL;
+ led->sata = (mode == NS_V2_LED_SATA) ? 1 : 0;
+ led->cdev.brightness = (mode == NS_V2_LED_OFF) ? LED_OFF : LED_FULL;
- ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
- if (ret < 0)
- return ret;
+ init_data.fwnode = node;
- return 0;
-}
+ ret = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
+ if (ret)
+ dev_err(dev, "Failed to register LED for node %pfw\n", node);
-static void delete_ns2_led(struct ns2_led_data *led_dat)
-{
- led_classdev_unregister(&led_dat->cdev);
+ return ret;
}
-#ifdef CONFIG_OF_GPIO
-/*
- * Translate OpenFirmware node properties into platform_data.
- */
-static int
-ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata)
+static int ns2_led_probe(struct platform_device *pdev)
{
- struct device_node *np = dev->of_node;
- struct device_node *child;
- struct ns2_led *led, *leds;
- int ret, num_leds = 0;
+ struct device *dev = &pdev->dev;
+ struct fwnode_handle *child;
+ struct ns2_led *leds;
+ int count;
+ int ret;
- num_leds = of_get_child_count(np);
- if (!num_leds)
+ count = device_get_child_node_count(dev);
+ if (!count)
return -ENODEV;
- leds = devm_kcalloc(dev, num_leds, sizeof(struct ns2_led),
- GFP_KERNEL);
+ leds = devm_kzalloc(dev, array_size(sizeof(*leds), count), GFP_KERNEL);
if (!leds)
return -ENOMEM;
- led = leds;
- for_each_child_of_node(np, child) {
- const char *string;
- int i, num_modes;
- struct ns2_led_modval *modval;
- struct gpio_desc *gd;
-
- ret = of_property_read_string(child, "label", &string);
- led->name = (ret == 0) ? string : child->name;
-
- gd = gpiod_get_from_of_node(child, "cmd-gpio", 0,
- GPIOD_ASIS, led->name);
- if (IS_ERR(gd)) {
- ret = PTR_ERR(gd);
- goto err_node_put;
- }
- led->cmd = gd;
- gd = gpiod_get_from_of_node(child, "slow-gpio", 0,
- GPIOD_ASIS, led->name);
- if (IS_ERR(gd)) {
- ret = PTR_ERR(gd);
- goto err_node_put;
- }
- led->slow = gd;
-
- ret = of_property_read_string(child, "linux,default-trigger",
- &string);
- if (ret == 0)
- led->default_trigger = string;
-
- ret = of_property_count_u32_elems(child, "modes-map");
- if (ret < 0 || ret % 3) {
- dev_err(dev,
- "Missing or malformed modes-map property\n");
- ret = -EINVAL;
- goto err_node_put;
- }
-
- num_modes = ret / 3;
- modval = devm_kcalloc(dev,
- num_modes,
- sizeof(struct ns2_led_modval),
- GFP_KERNEL);
- if (!modval) {
- ret = -ENOMEM;
- goto err_node_put;
- }
-
- for (i = 0; i < num_modes; i++) {
- of_property_read_u32_index(child,
- "modes-map", 3 * i,
- (u32 *) &modval[i].mode);
- of_property_read_u32_index(child,
- "modes-map", 3 * i + 1,
- (u32 *) &modval[i].cmd_level);
- of_property_read_u32_index(child,
- "modes-map", 3 * i + 2,
- (u32 *) &modval[i].slow_level);
+ device_for_each_child_node(dev, child) {
+ ret = ns2_led_register(dev, child, leds++);
+ if (ret) {
+ fwnode_handle_put(child);
+ return ret;
}
-
- led->num_modes = num_modes;
- led->modval = modval;
-
- led++;
}
- pdata->leds = leds;
- pdata->num_leds = num_leds;
-
return 0;
-
-err_node_put:
- of_node_put(child);
- return ret;
}
static const struct of_device_id of_ns2_leds_match[] = {
@@ -341,76 +267,12 @@ static const struct of_device_id of_ns2_leds_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, of_ns2_leds_match);
-#endif /* CONFIG_OF_GPIO */
-
-struct ns2_led_priv {
- int num_leds;
- struct ns2_led_data leds_data[];
-};
-
-static int ns2_led_probe(struct platform_device *pdev)
-{
- struct ns2_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct ns2_led_priv *priv;
- int i;
- int ret;
-
-#ifdef CONFIG_OF_GPIO
- if (!pdata) {
- pdata = devm_kzalloc(&pdev->dev,
- sizeof(struct ns2_led_platform_data),
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- ret = ns2_leds_get_of_pdata(&pdev->dev, pdata);
- if (ret)
- return ret;
- }
-#else
- if (!pdata)
- return -EINVAL;
-#endif /* CONFIG_OF_GPIO */
-
- priv = devm_kzalloc(&pdev->dev, struct_size(priv, leds_data, pdata->num_leds), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- priv->num_leds = pdata->num_leds;
-
- for (i = 0; i < priv->num_leds; i++) {
- ret = create_ns2_led(pdev, &priv->leds_data[i],
- &pdata->leds[i]);
- if (ret < 0) {
- for (i = i - 1; i >= 0; i--)
- delete_ns2_led(&priv->leds_data[i]);
- return ret;
- }
- }
-
- platform_set_drvdata(pdev, priv);
-
- return 0;
-}
-
-static int ns2_led_remove(struct platform_device *pdev)
-{
- int i;
- struct ns2_led_priv *priv;
-
- priv = platform_get_drvdata(pdev);
-
- for (i = 0; i < priv->num_leds; i++)
- delete_ns2_led(&priv->leds_data[i]);
-
- return 0;
-}
static struct platform_driver ns2_led_driver = {
.probe = ns2_led_probe,
- .remove = ns2_led_remove,
.driver = {
.name = "leds-ns2",
- .of_match_table = of_match_ptr(of_ns2_leds_match),
+ .of_match_table = of_ns2_leds_match,
},
};
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7d515d5e57bd..27d027165472 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -27,6 +27,8 @@
#define PCA9532_REG_PWM(m, i) (PCA9532_REG_OFFSET(m) + 0x2 + (i) * 2)
#define LED_REG(m, led) (PCA9532_REG_OFFSET(m) + 0x5 + (led >> 2))
#define LED_NUM(led) (led & 0x3)
+#define LED_SHIFT(led) (LED_NUM(led) * 2)
+#define LED_MASK(led) (0x3 << LED_SHIFT(led))
#define ldev_to_led(c) container_of(c, struct pca9532_led, ldev)
@@ -162,9 +164,9 @@ static void pca9532_setled(struct pca9532_led *led)
mutex_lock(&data->update_lock);
reg = i2c_smbus_read_byte_data(client, LED_REG(maxleds, led->id));
/* zero led bits */
- reg = reg & ~(0x3<<LED_NUM(led->id)*2);
+ reg = reg & ~LED_MASK(led->id);
/* set the new value */
- reg = reg | (led->state << LED_NUM(led->id)*2);
+ reg = reg | (led->state << LED_SHIFT(led->id));
i2c_smbus_write_byte_data(client, LED_REG(maxleds, led->id), reg);
mutex_unlock(&data->update_lock);
}
@@ -260,7 +262,7 @@ static enum pca9532_state pca9532_getled(struct pca9532_led *led)
mutex_lock(&data->update_lock);
reg = i2c_smbus_read_byte_data(client, LED_REG(maxleds, led->id));
- ret = reg >> LED_NUM(led->id)/2;
+ ret = (reg & LED_MASK(led->id)) >> LED_SHIFT(led->id);
mutex_unlock(&data->update_lock);
return ret;
}
@@ -478,7 +480,12 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
if (!pdata)
return ERR_PTR(-ENOMEM);
- for_each_child_of_node(np, child) {
+ of_property_read_u8_array(np, "nxp,pwm", &pdata->pwm[0],
+ ARRAY_SIZE(pdata->pwm));
+ of_property_read_u8_array(np, "nxp,psc", &pdata->psc[0],
+ ARRAY_SIZE(pdata->psc));
+
+ for_each_available_child_of_node(np, child) {
if (of_property_read_string(child, "label",
&pdata->leds[i].name))
pdata->leds[i].name = child->name;
@@ -507,7 +514,7 @@ static int pca9532_probe(struct i2c_client *client,
struct pca9532_data *data = i2c_get_clientdata(client);
struct pca9532_platform_data *pca9532_pdata =
dev_get_platdata(&client->dev);
- struct device_node *np = client->dev.of_node;
+ struct device_node *np = dev_of_node(&client->dev);
if (!pca9532_pdata) {
if (np) {
@@ -545,13 +552,8 @@ static int pca9532_probe(struct i2c_client *client,
static int pca9532_remove(struct i2c_client *client)
{
struct pca9532_data *data = i2c_get_clientdata(client);
- int err;
- err = pca9532_destroy_devices(data, data->chip_info->num_leds);
- if (err)
- return err;
-
- return 0;
+ return pca9532_destroy_devices(data, data->chip_info->num_leds);
}
module_i2c_driver(pca9532_driver);
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 131f8e922ade..7087ca4592fc 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -65,6 +65,7 @@ enum pca955x_type {
pca9550,
pca9551,
pca9552,
+ ibm_pca9552,
pca9553,
};
@@ -90,6 +91,11 @@ static struct pca955x_chipdef pca955x_chipdefs[] = {
.slv_addr = /* 1100xxx */ 0x60,
.slv_addr_shift = 3,
},
+ [ibm_pca9552] = {
+ .bits = 16,
+ .slv_addr = /* 0110xxx */ 0x30,
+ .slv_addr_shift = 3,
+ },
[pca9553] = {
.bits = 4,
.slv_addr = /* 110001x */ 0x62,
@@ -101,6 +107,7 @@ static const struct i2c_device_id pca955x_id[] = {
{ "pca9550", pca9550 },
{ "pca9551", pca9551 },
{ "pca9552", pca9552 },
+ { "ibm-pca9552", ibm_pca9552 },
{ "pca9553", pca9553 },
{ }
};
@@ -412,6 +419,7 @@ static const struct of_device_id of_pca955x_match[] = {
{ .compatible = "nxp,pca9550", .data = (void *)pca9550 },
{ .compatible = "nxp,pca9551", .data = (void *)pca9551 },
{ .compatible = "nxp,pca9552", .data = (void *)pca9552 },
+ { .compatible = "ibm,pca9552", .data = (void *)ibm_pca9552 },
{ .compatible = "nxp,pca9553", .data = (void *)pca9553 },
{},
};
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index d288acbc99c7..00aecd67e348 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -32,7 +32,6 @@
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/platform_data/leds-pca963x.h>
/* LED select registers determine the source that drives LED outputs */
#define PCA963X_LED_OFF 0x0 /* LED driver off */
@@ -96,142 +95,148 @@ static const struct i2c_device_id pca963x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca963x_id);
-struct pca963x_led;
-
-struct pca963x {
- struct pca963x_chipdef *chipdef;
- struct mutex mutex;
- struct i2c_client *client;
- struct pca963x_led *leds;
- unsigned long leds_on;
-};
+struct pca963x;
struct pca963x_led {
struct pca963x *chip;
struct led_classdev led_cdev;
int led_num; /* 0 .. 15 potentially */
- char name[32];
u8 gdc;
u8 gfrq;
};
-static int pca963x_brightness(struct pca963x_led *pca963x,
- enum led_brightness brightness)
+struct pca963x {
+ struct pca963x_chipdef *chipdef;
+ struct mutex mutex;
+ struct i2c_client *client;
+ unsigned long leds_on;
+ struct pca963x_led leds[];
+};
+
+static int pca963x_brightness(struct pca963x_led *led,
+ enum led_brightness brightness)
{
- u8 ledout_addr = pca963x->chip->chipdef->ledout_base
- + (pca963x->led_num / 4);
- u8 ledout;
- int shift = 2 * (pca963x->led_num % 4);
- u8 mask = 0x3 << shift;
+ struct i2c_client *client = led->chip->client;
+ struct pca963x_chipdef *chipdef = led->chip->chipdef;
+ u8 ledout_addr, ledout, mask, val;
+ int shift;
int ret;
- ledout = i2c_smbus_read_byte_data(pca963x->chip->client, ledout_addr);
+ ledout_addr = chipdef->ledout_base + (led->led_num / 4);
+ shift = 2 * (led->led_num % 4);
+ mask = 0x3 << shift;
+ ledout = i2c_smbus_read_byte_data(client, ledout_addr);
+
switch (brightness) {
case LED_FULL:
- ret = i2c_smbus_write_byte_data(pca963x->chip->client,
- ledout_addr,
- (ledout & ~mask) | (PCA963X_LED_ON << shift));
+ val = (ledout & ~mask) | (PCA963X_LED_ON << shift);
+ ret = i2c_smbus_write_byte_data(client, ledout_addr, val);
break;
case LED_OFF:
- ret = i2c_smbus_write_byte_data(pca963x->chip->client,
- ledout_addr, ledout & ~mask);
+ val = ledout & ~mask;
+ ret = i2c_smbus_write_byte_data(client, ledout_addr, val);
break;
default:
- ret = i2c_smbus_write_byte_data(pca963x->chip->client,
- PCA963X_PWM_BASE + pca963x->led_num,
- brightness);
+ ret = i2c_smbus_write_byte_data(client,
+ PCA963X_PWM_BASE +
+ led->led_num,
+ brightness);
if (ret < 0)
return ret;
- ret = i2c_smbus_write_byte_data(pca963x->chip->client,
- ledout_addr,
- (ledout & ~mask) | (PCA963X_LED_PWM << shift));
+
+ val = (ledout & ~mask) | (PCA963X_LED_PWM << shift);
+ ret = i2c_smbus_write_byte_data(client, ledout_addr, val);
break;
}
return ret;
}
-static void pca963x_blink(struct pca963x_led *pca963x)
+static void pca963x_blink(struct pca963x_led *led)
{
- u8 ledout_addr = pca963x->chip->chipdef->ledout_base +
- (pca963x->led_num / 4);
- u8 ledout;
- u8 mode2 = i2c_smbus_read_byte_data(pca963x->chip->client,
- PCA963X_MODE2);
- int shift = 2 * (pca963x->led_num % 4);
- u8 mask = 0x3 << shift;
+ struct i2c_client *client = led->chip->client;
+ struct pca963x_chipdef *chipdef = led->chip->chipdef;
+ u8 ledout_addr, ledout, mask, val, mode2;
+ int shift;
+
+ ledout_addr = chipdef->ledout_base + (led->led_num / 4);
+ shift = 2 * (led->led_num % 4);
+ mask = 0x3 << shift;
+ mode2 = i2c_smbus_read_byte_data(client, PCA963X_MODE2);
- i2c_smbus_write_byte_data(pca963x->chip->client,
- pca963x->chip->chipdef->grppwm, pca963x->gdc);
+ i2c_smbus_write_byte_data(client, chipdef->grppwm, led->gdc);
- i2c_smbus_write_byte_data(pca963x->chip->client,
- pca963x->chip->chipdef->grpfreq, pca963x->gfrq);
+ i2c_smbus_write_byte_data(client, chipdef->grpfreq, led->gfrq);
if (!(mode2 & PCA963X_MODE2_DMBLNK))
- i2c_smbus_write_byte_data(pca963x->chip->client, PCA963X_MODE2,
- mode2 | PCA963X_MODE2_DMBLNK);
-
- mutex_lock(&pca963x->chip->mutex);
- ledout = i2c_smbus_read_byte_data(pca963x->chip->client, ledout_addr);
- if ((ledout & mask) != (PCA963X_LED_GRP_PWM << shift))
- i2c_smbus_write_byte_data(pca963x->chip->client, ledout_addr,
- (ledout & ~mask) | (PCA963X_LED_GRP_PWM << shift));
- mutex_unlock(&pca963x->chip->mutex);
+ i2c_smbus_write_byte_data(client, PCA963X_MODE2,
+ mode2 | PCA963X_MODE2_DMBLNK);
+
+ mutex_lock(&led->chip->mutex);
+
+ ledout = i2c_smbus_read_byte_data(client, ledout_addr);
+ if ((ledout & mask) != (PCA963X_LED_GRP_PWM << shift)) {
+ val = (ledout & ~mask) | (PCA963X_LED_GRP_PWM << shift);
+ i2c_smbus_write_byte_data(client, ledout_addr, val);
+ }
+
+ mutex_unlock(&led->chip->mutex);
}
-static int pca963x_power_state(struct pca963x_led *pca963x)
+static int pca963x_power_state(struct pca963x_led *led)
{
- unsigned long *leds_on = &pca963x->chip->leds_on;
- unsigned long cached_leds = pca963x->chip->leds_on;
+ struct i2c_client *client = led->chip->client;
+ unsigned long *leds_on = &led->chip->leds_on;
+ unsigned long cached_leds = *leds_on;
- if (pca963x->led_cdev.brightness)
- set_bit(pca963x->led_num, leds_on);
+ if (led->led_cdev.brightness)
+ set_bit(led->led_num, leds_on);
else
- clear_bit(pca963x->led_num, leds_on);
+ clear_bit(led->led_num, leds_on);
if (!(*leds_on) != !cached_leds)
- return i2c_smbus_write_byte_data(pca963x->chip->client,
- PCA963X_MODE1, *leds_on ? 0 : BIT(4));
+ return i2c_smbus_write_byte_data(client, PCA963X_MODE1,
+ *leds_on ? 0 : BIT(4));
return 0;
}
static int pca963x_led_set(struct led_classdev *led_cdev,
- enum led_brightness value)
+ enum led_brightness value)
{
- struct pca963x_led *pca963x;
+ struct pca963x_led *led;
int ret;
- pca963x = container_of(led_cdev, struct pca963x_led, led_cdev);
+ led = container_of(led_cdev, struct pca963x_led, led_cdev);
- mutex_lock(&pca963x->chip->mutex);
+ mutex_lock(&led->chip->mutex);
- ret = pca963x_brightness(pca963x, value);
+ ret = pca963x_brightness(led, value);
if (ret < 0)
goto unlock;
- ret = pca963x_power_state(pca963x);
+ ret = pca963x_power_state(led);
unlock:
- mutex_unlock(&pca963x->chip->mutex);
+ mutex_unlock(&led->chip->mutex);
return ret;
}
-static unsigned int pca963x_period_scale(struct pca963x_led *pca963x,
- unsigned int val)
+static unsigned int pca963x_period_scale(struct pca963x_led *led,
+ unsigned int val)
{
- unsigned int scaling = pca963x->chip->chipdef->scaling;
+ unsigned int scaling = led->chip->chipdef->scaling;
return scaling ? DIV_ROUND_CLOSEST(val * scaling, 1000) : val;
}
static int pca963x_blink_set(struct led_classdev *led_cdev,
- unsigned long *delay_on, unsigned long *delay_off)
+ unsigned long *delay_on, unsigned long *delay_off)
{
- struct pca963x_led *pca963x;
unsigned long time_on, time_off, period;
+ struct pca963x_led *led;
u8 gdc, gfrq;
- pca963x = container_of(led_cdev, struct pca963x_led, led_cdev);
+ led = container_of(led_cdev, struct pca963x_led, led_cdev);
time_on = *delay_on;
time_off = *delay_off;
@@ -242,14 +247,14 @@ static int pca963x_blink_set(struct led_classdev *led_cdev,
time_off = 500;
}
- period = pca963x_period_scale(pca963x, time_on + time_off);
+ period = pca963x_period_scale(led, time_on + time_off);
/* If period not supported by hardware, default to someting sane. */
if ((period < PCA963X_BLINK_PERIOD_MIN) ||
(period > PCA963X_BLINK_PERIOD_MAX)) {
time_on = 500;
time_off = 500;
- period = pca963x_period_scale(pca963x, 1000);
+ period = pca963x_period_scale(led, 1000);
}
/*
@@ -257,7 +262,7 @@ static int pca963x_blink_set(struct led_classdev *led_cdev,
* (time_on / period) = (GDC / 256) ->
* GDC = ((time_on * 256) / period)
*/
- gdc = (pca963x_period_scale(pca963x, time_on) * 256) / period;
+ gdc = (pca963x_period_scale(led, time_on) * 256) / period;
/*
* From manual: period = ((GFRQ + 1) / 24) in seconds.
@@ -266,10 +271,10 @@ static int pca963x_blink_set(struct led_classdev *led_cdev,
*/
gfrq = (period * 24 / 1000) - 1;
- pca963x->gdc = gdc;
- pca963x->gfrq = gfrq;
+ led->gdc = gdc;
+ led->gfrq = gfrq;
- pca963x_blink(pca963x);
+ pca963x_blink(led);
*delay_on = time_on;
*delay_off = time_off;
@@ -277,72 +282,84 @@ static int pca963x_blink_set(struct led_classdev *led_cdev,
return 0;
}
-static struct pca963x_platform_data *
-pca963x_get_pdata(struct i2c_client *client, struct pca963x_chipdef *chip)
+static int pca963x_register_leds(struct i2c_client *client,
+ struct pca963x *chip)
{
- struct pca963x_platform_data *pdata;
- struct led_info *pca963x_leds;
+ struct pca963x_chipdef *chipdef = chip->chipdef;
+ struct pca963x_led *led = chip->leds;
+ struct device *dev = &client->dev;
struct fwnode_handle *child;
- int count;
-
- count = device_get_child_node_count(&client->dev);
- if (!count || count > chip->n_leds)
- return ERR_PTR(-ENODEV);
-
- pca963x_leds = devm_kcalloc(&client->dev,
- chip->n_leds, sizeof(struct led_info), GFP_KERNEL);
- if (!pca963x_leds)
- return ERR_PTR(-ENOMEM);
-
- device_for_each_child_node(&client->dev, child) {
- struct led_info led = {};
- u32 reg;
- int res;
-
- res = fwnode_property_read_u32(child, "reg", &reg);
- if ((res != 0) || (reg >= chip->n_leds))
- continue;
+ bool hw_blink;
+ s32 mode2;
+ u32 reg;
+ int ret;
- res = fwnode_property_read_string(child, "label", &led.name);
- if ((res != 0) && is_of_node(child))
- led.name = to_of_node(child)->name;
+ if (device_property_read_u32(dev, "nxp,period-scale",
+ &chipdef->scaling))
+ chipdef->scaling = 1000;
- fwnode_property_read_string(child, "linux,default-trigger",
- &led.default_trigger);
+ hw_blink = device_property_read_bool(dev, "nxp,hw-blink");
- pca963x_leds[reg] = led;
- }
- pdata = devm_kzalloc(&client->dev,
- sizeof(struct pca963x_platform_data), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->leds.leds = pca963x_leds;
- pdata->leds.num_leds = chip->n_leds;
+ mode2 = i2c_smbus_read_byte_data(client, PCA963X_MODE2);
+ if (mode2 < 0)
+ return mode2;
/* default to open-drain unless totem pole (push-pull) is specified */
- if (device_property_read_bool(&client->dev, "nxp,totem-pole"))
- pdata->outdrv = PCA963X_TOTEM_POLE;
+ if (device_property_read_bool(dev, "nxp,totem-pole"))
+ mode2 |= PCA963X_MODE2_OUTDRV;
else
- pdata->outdrv = PCA963X_OPEN_DRAIN;
+ mode2 &= ~PCA963X_MODE2_OUTDRV;
- /* default to software blinking unless hardware blinking is specified */
- if (device_property_read_bool(&client->dev, "nxp,hw-blink"))
- pdata->blink_type = PCA963X_HW_BLINK;
+ /* default to non-inverted output, unless inverted is specified */
+ if (device_property_read_bool(dev, "nxp,inverted-out"))
+ mode2 |= PCA963X_MODE2_INVRT;
else
- pdata->blink_type = PCA963X_SW_BLINK;
+ mode2 &= ~PCA963X_MODE2_INVRT;
+
+ ret = i2c_smbus_write_byte_data(client, PCA963X_MODE2, mode2);
+ if (ret < 0)
+ return ret;
+
+ device_for_each_child_node(dev, child) {
+ struct led_init_data init_data = {};
+ char default_label[32];
+
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg >= chipdef->n_leds) {
+ dev_err(dev, "Invalid 'reg' property for node %pfw\n",
+ child);
+ ret = -EINVAL;
+ goto err;
+ }
- if (device_property_read_u32(&client->dev, "nxp,period-scale",
- &chip->scaling))
- chip->scaling = 1000;
+ led->led_num = reg;
+ led->chip = chip;
+ led->led_cdev.brightness_set_blocking = pca963x_led_set;
+ if (hw_blink)
+ led->led_cdev.blink_set = pca963x_blink_set;
+
+ init_data.fwnode = child;
+ /* for backwards compatibility */
+ init_data.devicename = "pca963x";
+ snprintf(default_label, sizeof(default_label), "%d:%.2x:%u",
+ client->adapter->nr, client->addr, reg);
+ init_data.default_label = default_label;
+
+ ret = devm_led_classdev_register_ext(dev, &led->led_cdev,
+ &init_data);
+ if (ret) {
+ dev_err(dev, "Failed to register LED for node %pfw\n",
+ child);
+ goto err;
+ }
- /* default to non-inverted output, unless inverted is specified */
- if (device_property_read_bool(&client->dev, "nxp,inverted-out"))
- pdata->dir = PCA963X_INVERTED;
- else
- pdata->dir = PCA963X_NORMAL;
+ ++led;
+ }
- return pdata;
+ return 0;
+err:
+ fwnode_handle_put(child);
+ return ret;
}
static const struct of_device_id of_pca963x_match[] = {
@@ -355,119 +372,40 @@ static const struct of_device_id of_pca963x_match[] = {
MODULE_DEVICE_TABLE(of, of_pca963x_match);
static int pca963x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
- struct pca963x *pca963x_chip;
- struct pca963x_led *pca963x;
- struct pca963x_platform_data *pdata;
- struct pca963x_chipdef *chip;
- int i, err;
-
- chip = &pca963x_chipdefs[id->driver_data];
- pdata = dev_get_platdata(&client->dev);
-
- if (!pdata) {
- pdata = pca963x_get_pdata(client, chip);
- if (IS_ERR(pdata)) {
- dev_warn(&client->dev, "could not parse configuration\n");
- pdata = NULL;
- }
- }
+ struct device *dev = &client->dev;
+ struct pca963x_chipdef *chipdef;
+ struct pca963x *chip;
+ int i, count;
- if (pdata && (pdata->leds.num_leds < 1 ||
- pdata->leds.num_leds > chip->n_leds)) {
- dev_err(&client->dev, "board info must claim 1-%d LEDs",
- chip->n_leds);
+ chipdef = &pca963x_chipdefs[id->driver_data];
+
+ count = device_get_child_node_count(dev);
+ if (!count || count > chipdef->n_leds) {
+ dev_err(dev, "Node %pfw must define between 1 and %d LEDs\n",
+ dev_fwnode(dev), chipdef->n_leds);
return -EINVAL;
}
- pca963x_chip = devm_kzalloc(&client->dev, sizeof(*pca963x_chip),
- GFP_KERNEL);
- if (!pca963x_chip)
- return -ENOMEM;
- pca963x = devm_kcalloc(&client->dev, chip->n_leds, sizeof(*pca963x),
- GFP_KERNEL);
- if (!pca963x)
+ chip = devm_kzalloc(dev, struct_size(chip, leds, count), GFP_KERNEL);
+ if (!chip)
return -ENOMEM;
- i2c_set_clientdata(client, pca963x_chip);
+ i2c_set_clientdata(client, chip);
- mutex_init(&pca963x_chip->mutex);
- pca963x_chip->chipdef = chip;
- pca963x_chip->client = client;
- pca963x_chip->leds = pca963x;
+ mutex_init(&chip->mutex);
+ chip->chipdef = chipdef;
+ chip->client = client;
/* Turn off LEDs by default*/
- for (i = 0; i < chip->n_leds / 4; i++)
- i2c_smbus_write_byte_data(client, chip->ledout_base + i, 0x00);
-
- for (i = 0; i < chip->n_leds; i++) {
- pca963x[i].led_num = i;
- pca963x[i].chip = pca963x_chip;
-
- /* Platform data can specify LED names and default triggers */
- if (pdata && i < pdata->leds.num_leds) {
- if (pdata->leds.leds[i].name)
- snprintf(pca963x[i].name,
- sizeof(pca963x[i].name), "pca963x:%s",
- pdata->leds.leds[i].name);
- if (pdata->leds.leds[i].default_trigger)
- pca963x[i].led_cdev.default_trigger =
- pdata->leds.leds[i].default_trigger;
- }
- if (!pdata || i >= pdata->leds.num_leds ||
- !pdata->leds.leds[i].name)
- snprintf(pca963x[i].name, sizeof(pca963x[i].name),
- "pca963x:%d:%.2x:%d", client->adapter->nr,
- client->addr, i);
-
- pca963x[i].led_cdev.name = pca963x[i].name;
- pca963x[i].led_cdev.brightness_set_blocking = pca963x_led_set;
-
- if (pdata && pdata->blink_type == PCA963X_HW_BLINK)
- pca963x[i].led_cdev.blink_set = pca963x_blink_set;
-
- err = led_classdev_register(&client->dev, &pca963x[i].led_cdev);
- if (err < 0)
- goto exit;
- }
+ for (i = 0; i < chipdef->n_leds / 4; i++)
+ i2c_smbus_write_byte_data(client, chipdef->ledout_base + i, 0x00);
/* Disable LED all-call address, and power down initially */
i2c_smbus_write_byte_data(client, PCA963X_MODE1, BIT(4));
- if (pdata) {
- u8 mode2 = i2c_smbus_read_byte_data(pca963x->chip->client,
- PCA963X_MODE2);
- /* Configure output: open-drain or totem pole (push-pull) */
- if (pdata->outdrv == PCA963X_OPEN_DRAIN)
- mode2 &= ~PCA963X_MODE2_OUTDRV;
- else
- mode2 |= PCA963X_MODE2_OUTDRV;
- /* Configure direction: normal or inverted */
- if (pdata->dir == PCA963X_INVERTED)
- mode2 |= PCA963X_MODE2_INVRT;
- i2c_smbus_write_byte_data(pca963x->chip->client, PCA963X_MODE2,
- mode2);
- }
-
- return 0;
-
-exit:
- while (i--)
- led_classdev_unregister(&pca963x[i].led_cdev);
-
- return err;
-}
-
-static int pca963x_remove(struct i2c_client *client)
-{
- struct pca963x *pca963x = i2c_get_clientdata(client);
- int i;
-
- for (i = 0; i < pca963x->chipdef->n_leds; i++)
- led_classdev_unregister(&pca963x->leds[i].led_cdev);
-
- return 0;
+ return pca963x_register_leds(client, chip);
}
static struct i2c_driver pca963x_driver = {
@@ -476,7 +414,6 @@ static struct i2c_driver pca963x_driver = {
.of_match_table = of_pca963x_match,
},
.probe = pca963x_probe,
- .remove = pca963x_remove,
.id_table = pca963x_id,
};
diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c
index 7869ccdf70ce..fb2ab72c0c40 100644
--- a/drivers/leds/leds-pm8058.c
+++ b/drivers/leds/leds-pm8058.c
@@ -87,36 +87,36 @@ static enum led_brightness pm8058_led_get(struct led_classdev *cled)
static int pm8058_led_probe(struct platform_device *pdev)
{
+ struct led_init_data init_data = {};
+ struct device *dev = &pdev->dev;
struct pm8058_led *led;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np;
int ret;
struct regmap *map;
const char *state;
enum led_brightness maxbright;
- led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
- led->ledtype = (u32)(unsigned long)of_device_get_match_data(&pdev->dev);
+ led->ledtype = (u32)(unsigned long)of_device_get_match_data(dev);
- map = dev_get_regmap(pdev->dev.parent, NULL);
+ map = dev_get_regmap(dev->parent, NULL);
if (!map) {
- dev_err(&pdev->dev, "Parent regmap unavailable.\n");
+ dev_err(dev, "Parent regmap unavailable.\n");
return -ENXIO;
}
led->map = map;
+ np = dev_of_node(dev);
+
ret = of_property_read_u32(np, "reg", &led->reg);
if (ret) {
- dev_err(&pdev->dev, "no register offset specified\n");
+ dev_err(dev, "no register offset specified\n");
return -EINVAL;
}
- /* Use label else node name */
- led->cdev.name = of_get_property(np, "label", NULL) ? : np->name;
- led->cdev.default_trigger =
- of_get_property(np, "linux,default-trigger", NULL);
led->cdev.brightness_set = pm8058_led_set;
led->cdev.brightness_get = pm8058_led_get;
if (led->ledtype == PM8058_LED_TYPE_COMMON)
@@ -142,14 +142,13 @@ static int pm8058_led_probe(struct platform_device *pdev)
led->ledtype == PM8058_LED_TYPE_FLASH)
led->cdev.flags = LED_CORE_SUSPENDRESUME;
- ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
- if (ret) {
- dev_err(&pdev->dev, "unable to register led \"%s\"\n",
- led->cdev.name);
- return ret;
- }
+ init_data.fwnode = of_fwnode_handle(np);
+
+ ret = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
+ if (ret)
+ dev_err(dev, "Failed to register LED for %pOF\n", np);
- return 0;
+ return ret;
}
static const struct of_device_id pm8058_leds_id_table[] = {
diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c
index cd43d5dff7f4..743e2cdd0891 100644
--- a/drivers/leds/leds-powernv.c
+++ b/drivers/leds/leds-powernv.c
@@ -250,7 +250,7 @@ static int powernv_led_classdev(struct platform_device *pdev,
struct powernv_led_data *powernv_led;
struct device *dev = &pdev->dev;
- for_each_child_of_node(led_node, np) {
+ for_each_available_child_of_node(led_node, np) {
p = of_find_property(np, "led-types", NULL);
while ((cur = of_prop_next_string(p, cur)) != NULL) {
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index ef7b91bd2064..f53f9309ca6c 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -20,16 +20,10 @@
struct led_pwm {
const char *name;
- const char *default_trigger;
u8 active_low;
unsigned int max_brightness;
};
-struct led_pwm_platform_data {
- int num_leds;
- struct led_pwm *leds;
-};
-
struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
@@ -61,36 +55,31 @@ static int led_pwm_set(struct led_classdev *led_cdev,
return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
}
+__attribute__((nonnull))
static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
struct led_pwm *led, struct fwnode_handle *fwnode)
{
struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
+ struct led_init_data init_data = { .fwnode = fwnode };
int ret;
led_data->active_low = led->active_low;
led_data->cdev.name = led->name;
- led_data->cdev.default_trigger = led->default_trigger;
led_data->cdev.brightness = LED_OFF;
led_data->cdev.max_brightness = led->max_brightness;
led_data->cdev.flags = LED_CORE_SUSPENDRESUME;
- if (fwnode)
- led_data->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL);
- else
- led_data->pwm = devm_pwm_get(dev, led->name);
- if (IS_ERR(led_data->pwm)) {
- ret = PTR_ERR(led_data->pwm);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "unable to request PWM for %s: %d\n",
- led->name, ret);
- return ret;
- }
+ led_data->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL);
+ if (IS_ERR(led_data->pwm))
+ return dev_err_probe(dev, PTR_ERR(led_data->pwm),
+ "unable to request PWM for %s\n",
+ led->name);
led_data->cdev.brightness_set_blocking = led_pwm_set;
pwm_init_state(led_data->pwm, &led_data->pwmstate);
- ret = devm_led_classdev_register(dev, &led_data->cdev);
+ ret = devm_led_classdev_register_ext(dev, &led_data->cdev, &init_data);
if (ret) {
dev_err(dev, "failed to register PWM led for %s: %d\n",
led->name, ret);
@@ -126,9 +115,6 @@ static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
return -EINVAL;
}
- fwnode_property_read_string(fwnode, "linux,default-trigger",
- &led.default_trigger);
-
led.active_low = fwnode_property_read_bool(fwnode,
"active-low");
fwnode_property_read_u32(fwnode, "max-brightness",
@@ -146,15 +132,11 @@ static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
static int led_pwm_probe(struct platform_device *pdev)
{
- struct led_pwm_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct led_pwm_priv *priv;
- int count, i;
int ret = 0;
+ int count;
- if (pdata)
- count = pdata->num_leds;
- else
- count = device_get_child_node_count(&pdev->dev);
+ count = device_get_child_node_count(&pdev->dev);
if (!count)
return -EINVAL;
@@ -164,16 +146,7 @@ static int led_pwm_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- if (pdata) {
- for (i = 0; i < count; i++) {
- ret = led_pwm_add(&pdev->dev, priv, &pdata->leds[i],
- NULL);
- if (ret)
- break;
- }
- } else {
- ret = led_pwm_create_fwnode(&pdev->dev, priv);
- }
+ ret = led_pwm_create_fwnode(&pdev->dev, priv);
if (ret)
return ret;
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 9b5e67664ba3..3c0c7aa63b8c 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -16,8 +16,6 @@
#include <linux/module.h>
#include <linux/platform_data/leds-s3c24xx.h>
-#include <mach/regs-gpio.h>
-
/* our context */
struct s3c24xx_gpio_led {
diff --git a/drivers/leds/leds-sc27xx-bltc.c b/drivers/leds/leds-sc27xx-bltc.c
index 0ede87420bfc..e199ea15e406 100644
--- a/drivers/leds/leds-sc27xx-bltc.c
+++ b/drivers/leds/leds-sc27xx-bltc.c
@@ -276,12 +276,12 @@ static int sc27xx_led_register(struct device *dev, struct sc27xx_led_priv *priv)
static int sc27xx_led_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node, *child;
+ struct device_node *np = dev_of_node(dev), *child;
struct sc27xx_led_priv *priv;
u32 base, count, reg;
int err;
- count = of_get_child_count(np);
+ count = of_get_available_child_count(np);
if (!count || count > SC27XX_LEDS_MAX)
return -EINVAL;
@@ -305,7 +305,7 @@ static int sc27xx_led_probe(struct platform_device *pdev)
return err;
}
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
err = of_property_read_u32(child, "reg", &reg);
if (err) {
of_node_put(child);
diff --git a/drivers/leds/leds-sgm3140.c b/drivers/leds/leds-sgm3140.c
index c494b934ae09..f4f831570f11 100644
--- a/drivers/leds/leds-sgm3140.c
+++ b/drivers/leds/leds-sgm3140.c
@@ -195,30 +195,21 @@ static int sgm3140_probe(struct platform_device *pdev)
priv->flash_gpio = devm_gpiod_get(&pdev->dev, "flash", GPIOD_OUT_LOW);
ret = PTR_ERR_OR_ZERO(priv->flash_gpio);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Failed to request flash gpio: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to request flash gpio\n");
priv->enable_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
ret = PTR_ERR_OR_ZERO(priv->enable_gpio);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Failed to request enable gpio: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to request enable gpio\n");
priv->vin_regulator = devm_regulator_get(&pdev->dev, "vin");
ret = PTR_ERR_OR_ZERO(priv->vin_regulator);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Failed to request regulator: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to request regulator\n");
child_node = fwnode_get_next_available_child_node(pdev->dev.fwnode,
NULL);
@@ -316,5 +307,5 @@ static struct platform_driver sgm3140_driver = {
module_platform_driver(sgm3140_driver);
MODULE_AUTHOR("Luca Weiss <luca@z3ntu.xyz>");
-MODULE_DESCRIPTION("SG Micro SGM3140 charge pump led driver");
+MODULE_DESCRIPTION("SG Micro SGM3140 charge pump LED driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
index b231b563b7bb..f1964c96fb15 100644
--- a/drivers/leds/leds-spi-byte.c
+++ b/drivers/leds/leds-spi-byte.c
@@ -80,7 +80,6 @@ static int spi_byte_brightness_set_blocking(struct led_classdev *dev,
static int spi_byte_probe(struct spi_device *spi)
{
- const struct of_device_id *of_dev_id;
struct device_node *child;
struct device *dev = &spi->dev;
struct spi_byte_led *led;
@@ -88,15 +87,11 @@ static int spi_byte_probe(struct spi_device *spi)
const char *state;
int ret;
- of_dev_id = of_match_device(spi_byte_dt_ids, dev);
- if (!of_dev_id)
- return -EINVAL;
-
- if (of_get_child_count(dev->of_node) != 1) {
+ if (of_get_available_child_count(dev_of_node(dev)) != 1) {
dev_err(dev, "Device must have exactly one LED sub-node.");
return -EINVAL;
}
- child = of_get_next_child(dev->of_node, NULL);
+ child = of_get_next_available_child(dev_of_node(dev), NULL);
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
if (!led)
@@ -106,7 +101,7 @@ static int spi_byte_probe(struct spi_device *spi)
strlcpy(led->name, name, sizeof(led->name));
led->spi = spi;
mutex_init(&led->mutex);
- led->cdef = of_dev_id->data;
+ led->cdef = device_get_match_data(dev);
led->ldev.name = led->name;
led->ldev.brightness = LED_OFF;
led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value;
diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c
index b58f3cafe16f..7eddb8ecb44e 100644
--- a/drivers/leds/leds-syscon.c
+++ b/drivers/leds/leds-syscon.c
@@ -55,8 +55,9 @@ static void syscon_led_set(struct led_classdev *led_cdev,
static int syscon_led_probe(struct platform_device *pdev)
{
+ struct led_init_data init_data = {};
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
+ struct device_node *np = dev_of_node(dev);
struct device *parent;
struct regmap *map;
struct syscon_led *sled;
@@ -68,7 +69,7 @@ static int syscon_led_probe(struct platform_device *pdev)
dev_err(dev, "no parent for syscon LED\n");
return -ENODEV;
}
- map = syscon_node_to_regmap(parent->of_node);
+ map = syscon_node_to_regmap(dev_of_node(parent));
if (IS_ERR(map)) {
dev_err(dev, "no regmap for syscon LED parent\n");
return PTR_ERR(map);
@@ -84,10 +85,6 @@ static int syscon_led_probe(struct platform_device *pdev)
return -EINVAL;
if (of_property_read_u32(np, "mask", &sled->mask))
return -EINVAL;
- sled->cdev.name =
- of_get_property(np, "label", NULL) ? : np->name;
- sled->cdev.default_trigger =
- of_get_property(np, "linux,default-trigger", NULL);
state = of_get_property(np, "default-state", NULL);
if (state) {
@@ -115,7 +112,9 @@ static int syscon_led_probe(struct platform_device *pdev)
}
sled->cdev.brightness_set = syscon_led_set;
- ret = devm_led_classdev_register(dev, &sled->cdev);
+ init_data.fwnode = of_fwnode_handle(np);
+
+ ret = devm_led_classdev_register_ext(dev, &sled->cdev, &init_data);
if (ret < 0)
return ret;
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 1128ac75443c..225b765830bd 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -69,23 +69,6 @@
* defaulted. Similarly the banks know if each time was explicit or a
* default. Defaults are permitted to be changed freely - they are
* not recognised when matching.
- *
- *
- * An led-tca6507 device must be provided with platform data or
- * configured via devicetree.
- *
- * The platform-data lists for each output: the name, default trigger,
- * and whether the signal is being used as a GPIO rather than an LED.
- * 'struct led_plaform_data' is used for this. If 'name' is NULL, the
- * output isn't used. If 'flags' is TCA6507_MAKE_GPIO, the output is
- * a GPO. The "struct led_platform_data" can be embedded in a "struct
- * tca6507_platform_data" which adds a 'gpio_base' for the GPIOs, and
- * a 'setup' callback which is called once the GPIOs are available.
- *
- * When configured via devicetree there is one child for each output.
- * The "reg" determines the output number and "compatible" determines
- * whether it is an LED or a GPIO. "linux,default-trigger" can set a
- * default trigger.
*/
#include <linux/module.h>
@@ -94,9 +77,8 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/gpio/driver.h>
+#include <linux/property.h>
#include <linux/workqueue.h>
-#include <linux/leds-tca6507.h>
-#include <linux/of.h>
/* LED select registers determine the source that drives LED outputs */
#define TCA6507_LS_LED_OFF 0x0 /* Output HI-Z (off) */
@@ -108,6 +90,15 @@
#define TCA6507_LS_BLINK0 0x6 /* Blink at Bank0 rate */
#define TCA6507_LS_BLINK1 0x7 /* Blink at Bank1 rate */
+struct tca6507_platform_data {
+ struct led_platform_data leds;
+#ifdef CONFIG_GPIOLIB
+ int gpio_base;
+#endif
+};
+
+#define TCA6507_MAKE_GPIO 1
+
enum {
BANK0,
BANK1,
@@ -189,7 +180,6 @@ struct tca6507_chip {
} leds[NUM_LEDS];
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio;
- const char *gpio_name[NUM_LEDS];
int gpio_map[NUM_LEDS];
#endif
};
@@ -628,7 +618,7 @@ static int tca6507_gpio_direction_output(struct gpio_chip *gc,
return 0;
}
-static int tca6507_probe_gpios(struct i2c_client *client,
+static int tca6507_probe_gpios(struct device *dev,
struct tca6507_chip *tca,
struct tca6507_platform_data *pdata)
{
@@ -639,7 +629,6 @@ static int tca6507_probe_gpios(struct i2c_client *client,
for (i = 0; i < NUM_LEDS; i++)
if (pdata->leds.leds[i].name && pdata->leds.leds[i].flags) {
/* Configure as a gpio */
- tca->gpio_name[gpios] = pdata->leds.leds[i].name;
tca->gpio_map[gpios] = i;
gpios++;
}
@@ -648,23 +637,20 @@ static int tca6507_probe_gpios(struct i2c_client *client,
return 0;
tca->gpio.label = "gpio-tca6507";
- tca->gpio.names = tca->gpio_name;
tca->gpio.ngpio = gpios;
tca->gpio.base = pdata->gpio_base;
tca->gpio.owner = THIS_MODULE;
tca->gpio.direction_output = tca6507_gpio_direction_output;
tca->gpio.set = tca6507_gpio_set_value;
- tca->gpio.parent = &client->dev;
+ tca->gpio.parent = dev;
#ifdef CONFIG_OF_GPIO
- tca->gpio.of_node = of_node_get(client->dev.of_node);
+ tca->gpio.of_node = of_node_get(dev_of_node(dev));
#endif
err = gpiochip_add_data(&tca->gpio, tca);
if (err) {
tca->gpio.ngpio = 0;
return err;
}
- if (pdata->setup)
- pdata->setup(tca->gpio.base, tca->gpio.ngpio);
return 0;
}
@@ -674,7 +660,7 @@ static void tca6507_remove_gpio(struct tca6507_chip *tca)
gpiochip_remove(&tca->gpio);
}
#else /* CONFIG_GPIOLIB */
-static int tca6507_probe_gpios(struct i2c_client *client,
+static int tca6507_probe_gpios(struct device *dev,
struct tca6507_chip *tca,
struct tca6507_platform_data *pdata)
{
@@ -685,44 +671,50 @@ static void tca6507_remove_gpio(struct tca6507_chip *tca)
}
#endif /* CONFIG_GPIOLIB */
-#ifdef CONFIG_OF
static struct tca6507_platform_data *
-tca6507_led_dt_init(struct i2c_client *client)
+tca6507_led_dt_init(struct device *dev)
{
- struct device_node *np = client->dev.of_node, *child;
struct tca6507_platform_data *pdata;
+ struct fwnode_handle *child;
struct led_info *tca_leds;
int count;
- count = of_get_child_count(np);
+ count = device_get_child_node_count(dev);
if (!count || count > NUM_LEDS)
return ERR_PTR(-ENODEV);
- tca_leds = devm_kcalloc(&client->dev,
- NUM_LEDS, sizeof(struct led_info), GFP_KERNEL);
+ tca_leds = devm_kcalloc(dev, NUM_LEDS, sizeof(struct led_info),
+ GFP_KERNEL);
if (!tca_leds)
return ERR_PTR(-ENOMEM);
- for_each_child_of_node(np, child) {
+ device_for_each_child_node(dev, child) {
struct led_info led;
u32 reg;
int ret;
- led.name =
- of_get_property(child, "label", NULL) ? : child->name;
- led.default_trigger =
- of_get_property(child, "linux,default-trigger", NULL);
+ if (fwnode_property_read_string(child, "label", &led.name))
+ led.name = fwnode_get_name(child);
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led.default_trigger);
+
led.flags = 0;
- if (of_property_match_string(child, "compatible", "gpio") >= 0)
+ if (fwnode_property_match_string(child, "compatible",
+ "gpio") >= 0)
led.flags |= TCA6507_MAKE_GPIO;
- ret = of_property_read_u32(child, "reg", &reg);
- if (ret != 0 || reg >= NUM_LEDS)
- continue;
+
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg >= NUM_LEDS) {
+ fwnode_handle_put(child);
+ return ERR_PTR(ret ? : -EINVAL);
+ }
tca_leds[reg] = led;
}
- pdata = devm_kzalloc(&client->dev,
- sizeof(struct tca6507_platform_data), GFP_KERNEL);
+
+ pdata = devm_kzalloc(dev, sizeof(struct tca6507_platform_data),
+ GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
@@ -731,48 +723,37 @@ tca6507_led_dt_init(struct i2c_client *client)
#ifdef CONFIG_GPIOLIB
pdata->gpio_base = -1;
#endif
+
return pdata;
}
-static const struct of_device_id of_tca6507_leds_match[] = {
+static const struct of_device_id __maybe_unused of_tca6507_leds_match[] = {
{ .compatible = "ti,tca6507", },
{},
};
MODULE_DEVICE_TABLE(of, of_tca6507_leds_match);
-#else
-static struct tca6507_platform_data *
-tca6507_led_dt_init(struct i2c_client *client)
-{
- return ERR_PTR(-ENODEV);
-}
-
-#endif
-
static int tca6507_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct tca6507_chip *tca;
+ struct device *dev = &client->dev;
struct i2c_adapter *adapter;
+ struct tca6507_chip *tca;
struct tca6507_platform_data *pdata;
int err;
int i = 0;
adapter = client->adapter;
- pdata = dev_get_platdata(&client->dev);
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
return -EIO;
- if (!pdata || pdata->leds.num_leds != NUM_LEDS) {
- pdata = tca6507_led_dt_init(client);
- if (IS_ERR(pdata)) {
- dev_err(&client->dev, "Need %d entries in platform-data list\n",
- NUM_LEDS);
- return PTR_ERR(pdata);
- }
+ pdata = tca6507_led_dt_init(dev);
+ if (IS_ERR(pdata)) {
+ dev_err(dev, "Need %d entries in platform-data list\n", NUM_LEDS);
+ return PTR_ERR(pdata);
}
- tca = devm_kzalloc(&client->dev, sizeof(*tca), GFP_KERNEL);
+ tca = devm_kzalloc(dev, sizeof(*tca), GFP_KERNEL);
if (!tca)
return -ENOMEM;
@@ -793,13 +774,12 @@ static int tca6507_probe(struct i2c_client *client,
l->led_cdev.brightness_set = tca6507_brightness_set;
l->led_cdev.blink_set = tca6507_blink_set;
l->bank = -1;
- err = led_classdev_register(&client->dev,
- &l->led_cdev);
+ err = led_classdev_register(dev, &l->led_cdev);
if (err < 0)
goto exit;
}
}
- err = tca6507_probe_gpios(client, tca, pdata);
+ err = tca6507_probe_gpios(dev, tca, pdata);
if (err)
goto exit;
/* set all registers to known state - zero */
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index 0929f1275814..5b9dfdf743ec 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -148,22 +148,17 @@ static int
tlc591xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct device_node *np = client->dev.of_node, *child;
+ struct device_node *np = dev_of_node(&client->dev), *child;
struct device *dev = &client->dev;
- const struct of_device_id *match;
const struct tlc591xx *tlc591xx;
struct tlc591xx_priv *priv;
int err, count, reg;
- match = of_match_device(of_tlc591xx_leds_match, dev);
- if (!match)
- return -ENODEV;
-
- tlc591xx = match->data;
+ tlc591xx = device_get_match_data(dev);
if (!np)
return -ENODEV;
- count = of_get_child_count(np);
+ count = of_get_available_child_count(np);
if (!count || count > tlc591xx->max_leds)
return -EINVAL;
@@ -185,7 +180,7 @@ tlc591xx_probe(struct i2c_client *client,
if (err < 0)
return err;
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
struct tlc591xx_led *led;
struct led_init_data init_data = {};
@@ -204,9 +199,6 @@ tlc591xx_probe(struct i2c_client *client,
led = &priv->leds[reg];
led->active = true;
- led->ldev.default_trigger =
- of_get_property(child, "linux,default-trigger", NULL);
-
led->priv = priv;
led->led_no = reg;
led->ldev.brightness_set_blocking = tlc591xx_brightness_set;
@@ -214,10 +206,10 @@ tlc591xx_probe(struct i2c_client *client,
err = devm_led_classdev_register_ext(dev, &led->ldev,
&init_data);
if (err < 0) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "couldn't register LED %s\n",
- led->ldev.name);
- return err;
+ of_node_put(child);
+ return dev_err_probe(dev, err,
+ "couldn't register LED %s\n",
+ led->ldev.name);
}
}
return 0;
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index bb23d8e16614..8c5bdc3847ee 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -121,8 +121,6 @@ static int omnia_led_register(struct i2c_client *client, struct omnia_led *led,
cdev->max_brightness = 255;
cdev->brightness_set_blocking = omnia_led_brightness_set_blocking;
- of_property_read_string(np, "linux,default-trigger", &cdev->default_trigger);
-
/* put the LED into software mode */
ret = i2c_smbus_write_byte_data(client, CMD_LED_MODE,
CMD_LED_MODE_LED(led->reg) |
@@ -210,7 +208,7 @@ static int omnia_leds_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- struct device_node *np = dev->of_node, *child;
+ struct device_node *np = dev_of_node(dev), *child;
struct omnia_leds *leds;
struct omnia_led *led;
int ret, count;
@@ -236,8 +234,10 @@ static int omnia_leds_probe(struct i2c_client *client,
led = &leds->leds[0];
for_each_available_child_of_node(np, child) {
ret = omnia_led_register(client, led, child);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(child);
return ret;
+ }
led += ret;
}
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 869976d1b734..fca62d503590 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -2,14 +2,18 @@
/*
* ledtrig-cpu.c - LED trigger based on CPU activity
*
- * This LED trigger will be registered for each possible CPU and named as
- * cpu0, cpu1, cpu2, cpu3, etc.
+ * This LED trigger will be registered for first 8 CPUs and named
+ * as cpu0..cpu7. There's additional trigger called cpu that
+ * is on when any CPU is active.
+ *
+ * If you want support for arbitrary number of CPUs, make it one trigger,
+ * with additional sysfs file selecting which CPU to watch.
*
* It can be bound to any LED just like other triggers using either a
* board file or via sysfs interface.
*
* An API named ledtrig_cpu is exported for any user, who want to add CPU
- * activity indication in their code
+ * activity indication in their code.
*
* Copyright 2011 Linus Walleij <linus.walleij@linaro.org>
* Copyright 2011 - 2012 Bryan Wu <bryan.wu@canonical.com>
@@ -145,6 +149,9 @@ static int __init ledtrig_cpu_init(void)
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
+ if (cpu >= 8)
+ continue;
+
snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
led_trigger_register_simple(trig->name, &trig->_trig);
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index fe78bf0fdce5..c1bcac71008c 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -1311,8 +1311,9 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
i++;
- if (i > 31) {
- pr_err("max 31 devices can be reported.\n");
+ if (i >= ARRAY_SIZE(devices->info)) {
+ pr_err("max %zd devices can be reported.\n",
+ ARRAY_SIZE(devices->info));
break;
}
}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 96684581a25d..94fb63a7b357 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -638,7 +638,7 @@ static void smu_expose_childs(struct work_struct *unused)
{
struct device_node *np;
- for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;)
+ for_each_child_of_node(smu->of_node, np)
if (of_device_is_compatible(np, "smu-sensors"))
of_platform_device_create(np, "smu-sensors",
&smu->of_dev->dev);
@@ -1015,7 +1015,7 @@ static struct smu_sdbp_header *smu_create_sdb_partition(int id)
/* Note: Only allowed to return error code in pointers (using ERR_PTR)
* when interruptible is 1
*/
-const struct smu_sdbp_header *__smu_get_sdb_partition(int id,
+static const struct smu_sdbp_header *__smu_get_sdb_partition(int id,
unsigned int *size, int interruptible)
{
char pname[32];
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 1e5fa09845e7..29f48c2028b6 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -152,8 +152,6 @@ static int wf_lm75_remove(struct i2c_client *client)
{
struct wf_lm75_sensor *lm = i2c_get_clientdata(client);
- DBG("wf_lm75: i2c detatch called for %s\n", lm->sens.name);
-
/* Mark client detached */
lm->i2c = NULL;
diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c
index d011899c0a8a..9fab0b47cd3d 100644
--- a/drivers/macintosh/windfarm_lm87_sensor.c
+++ b/drivers/macintosh/windfarm_lm87_sensor.c
@@ -149,8 +149,6 @@ static int wf_lm87_remove(struct i2c_client *client)
{
struct wf_lm87_sensor *lm = i2c_get_clientdata(client);
- DBG("wf_lm87: i2c detatch called for %s\n", lm->sens.name);
-
/* Mark client detached */
lm->i2c = NULL;
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index cb75dc035616..e46e1153a0b4 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -216,8 +216,7 @@ static int wf_sat_probe(struct i2c_client *client,
vsens[0] = vsens[1] = -1;
isens[0] = isens[1] = -1;
- child = NULL;
- while ((child = of_get_next_child(dev, child)) != NULL) {
+ for_each_child_of_node(dev, child) {
reg = of_get_property(child, "reg", NULL);
loc = of_get_property(child, "location", NULL);
if (reg == NULL || loc == NULL)
diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
index 3e6059eaa138..c8706cfb83fd 100644
--- a/drivers/macintosh/windfarm_smu_sensors.c
+++ b/drivers/macintosh/windfarm_smu_sensors.c
@@ -421,8 +421,7 @@ static int __init smu_sensors_init(void)
return -ENODEV;
/* Look for sensors subdir */
- for (sensors = NULL;
- (sensors = of_get_next_child(smu, sensors)) != NULL;)
+ for_each_child_of_node(smu, sensors)
if (of_node_name_eq(sensors, "sensors"))
break;
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 60d224b723a1..2e06e02b2e03 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_MAILBOX) += mailbox.o
obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o
-obj-$(CONFIG_ARM_MHU) += arm_mhu.o
+obj-$(CONFIG_ARM_MHU) += arm_mhu.o arm_mhu_db.o
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
index 9da236552bd7..b7fbf276eb62 100644
--- a/drivers/mailbox/arm_mhu.c
+++ b/drivers/mailbox/arm_mhu.c
@@ -113,6 +113,9 @@ static int mhu_probe(struct amba_device *adev, const struct amba_id *id)
struct device *dev = &adev->dev;
int mhu_reg[MHU_CHANS] = {MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET};
+ if (!of_device_is_compatible(dev->of_node, "arm,mhu"))
+ return -ENODEV;
+
/* Allocate memory for device */
mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
if (!mhu)
diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c
new file mode 100644
index 000000000000..275efe4cca0c
--- /dev/null
+++ b/drivers/mailbox/arm_mhu_db.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2015 Fujitsu Semiconductor Ltd.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Based on ARM MHU driver by Jassi Brar <jaswinder.singh@linaro.org>
+ * Copyright (C) 2020 ARM Ltd.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#define INTR_STAT_OFS 0x0
+#define INTR_SET_OFS 0x8
+#define INTR_CLR_OFS 0x10
+
+#define MHU_LP_OFFSET 0x0
+#define MHU_HP_OFFSET 0x20
+#define MHU_SEC_OFFSET 0x200
+#define TX_REG_OFFSET 0x100
+
+#define MHU_CHANS 3 /* Secure, Non-Secure High and Low Priority */
+#define MHU_CHAN_MAX 20 /* Max channels to save on unused RAM */
+#define MHU_NUM_DOORBELLS 32
+
+struct mhu_db_link {
+ unsigned int irq;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+};
+
+struct arm_mhu {
+ void __iomem *base;
+ struct mhu_db_link mlink[MHU_CHANS];
+ struct mbox_controller mbox;
+ struct device *dev;
+};
+
+/**
+ * ARM MHU Mailbox allocated channel information
+ *
+ * @mhu: Pointer to parent mailbox device
+ * @pchan: Physical channel within which this doorbell resides in
+ * @doorbell: doorbell number pertaining to this channel
+ */
+struct mhu_db_channel {
+ struct arm_mhu *mhu;
+ unsigned int pchan;
+ unsigned int doorbell;
+};
+
+static inline struct mbox_chan *
+mhu_db_mbox_to_channel(struct mbox_controller *mbox, unsigned int pchan,
+ unsigned int doorbell)
+{
+ int i;
+ struct mhu_db_channel *chan_info;
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ chan_info = mbox->chans[i].con_priv;
+ if (chan_info && chan_info->pchan == pchan &&
+ chan_info->doorbell == doorbell)
+ return &mbox->chans[i];
+ }
+
+ return NULL;
+}
+
+static void mhu_db_mbox_clear_irq(struct mbox_chan *chan)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].rx_reg;
+
+ writel_relaxed(BIT(chan_info->doorbell), base + INTR_CLR_OFS);
+}
+
+static unsigned int mhu_db_mbox_irq_to_pchan_num(struct arm_mhu *mhu, int irq)
+{
+ unsigned int pchan;
+
+ for (pchan = 0; pchan < MHU_CHANS; pchan++)
+ if (mhu->mlink[pchan].irq == irq)
+ break;
+ return pchan;
+}
+
+static struct mbox_chan *
+mhu_db_mbox_irq_to_channel(struct arm_mhu *mhu, unsigned int pchan)
+{
+ unsigned long bits;
+ unsigned int doorbell;
+ struct mbox_chan *chan = NULL;
+ struct mbox_controller *mbox = &mhu->mbox;
+ void __iomem *base = mhu->mlink[pchan].rx_reg;
+
+ bits = readl_relaxed(base + INTR_STAT_OFS);
+ if (!bits)
+ /* No IRQs fired in specified physical channel */
+ return NULL;
+
+ /* An IRQ has fired, find the associated channel */
+ for (doorbell = 0; bits; doorbell++) {
+ if (!test_and_clear_bit(doorbell, &bits))
+ continue;
+
+ chan = mhu_db_mbox_to_channel(mbox, pchan, doorbell);
+ if (chan)
+ break;
+ dev_err(mbox->dev,
+ "Channel not registered: pchan: %d doorbell: %d\n",
+ pchan, doorbell);
+ }
+
+ return chan;
+}
+
+static irqreturn_t mhu_db_mbox_rx_handler(int irq, void *data)
+{
+ struct mbox_chan *chan;
+ struct arm_mhu *mhu = data;
+ unsigned int pchan = mhu_db_mbox_irq_to_pchan_num(mhu, irq);
+
+ while (NULL != (chan = mhu_db_mbox_irq_to_channel(mhu, pchan))) {
+ mbox_chan_received_data(chan, NULL);
+ mhu_db_mbox_clear_irq(chan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static bool mhu_db_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].tx_reg;
+
+ if (readl_relaxed(base + INTR_STAT_OFS) & BIT(chan_info->doorbell))
+ return false;
+
+ return true;
+}
+
+static int mhu_db_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ void __iomem *base = chan_info->mhu->mlink[chan_info->pchan].tx_reg;
+
+ /* Send event to co-processor */
+ writel_relaxed(BIT(chan_info->doorbell), base + INTR_SET_OFS);
+
+ return 0;
+}
+
+static int mhu_db_startup(struct mbox_chan *chan)
+{
+ mhu_db_mbox_clear_irq(chan);
+ return 0;
+}
+
+static void mhu_db_shutdown(struct mbox_chan *chan)
+{
+ struct mhu_db_channel *chan_info = chan->con_priv;
+ struct mbox_controller *mbox = &chan_info->mhu->mbox;
+ int i;
+
+ for (i = 0; i < mbox->num_chans; i++)
+ if (chan == &mbox->chans[i])
+ break;
+
+ if (mbox->num_chans == i) {
+ dev_warn(mbox->dev, "Request to free non-existent channel\n");
+ return;
+ }
+
+ /* Reset channel */
+ mhu_db_mbox_clear_irq(chan);
+ kfree(chan->con_priv);
+ chan->con_priv = NULL;
+}
+
+static struct mbox_chan *mhu_db_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *spec)
+{
+ struct arm_mhu *mhu = dev_get_drvdata(mbox->dev);
+ struct mhu_db_channel *chan_info;
+ struct mbox_chan *chan;
+ unsigned int pchan = spec->args[0];
+ unsigned int doorbell = spec->args[1];
+ int i;
+
+ /* Bounds checking */
+ if (pchan >= MHU_CHANS || doorbell >= MHU_NUM_DOORBELLS) {
+ dev_err(mbox->dev,
+ "Invalid channel requested pchan: %d doorbell: %d\n",
+ pchan, doorbell);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Is requested channel free? */
+ chan = mhu_db_mbox_to_channel(mbox, pchan, doorbell);
+ if (chan) {
+ dev_err(mbox->dev, "Channel in use: pchan: %d doorbell: %d\n",
+ pchan, doorbell);
+ return ERR_PTR(-EBUSY);
+ }
+
+ /* Find the first free slot */
+ for (i = 0; i < mbox->num_chans; i++)
+ if (!mbox->chans[i].con_priv)
+ break;
+
+ if (mbox->num_chans == i) {
+ dev_err(mbox->dev, "No free channels left\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ chan = &mbox->chans[i];
+
+ chan_info = devm_kzalloc(mbox->dev, sizeof(*chan_info), GFP_KERNEL);
+ if (!chan_info)
+ return ERR_PTR(-ENOMEM);
+
+ chan_info->mhu = mhu;
+ chan_info->pchan = pchan;
+ chan_info->doorbell = doorbell;
+
+ chan->con_priv = chan_info;
+
+ dev_dbg(mbox->dev, "mbox: created channel phys: %d doorbell: %d\n",
+ pchan, doorbell);
+
+ return chan;
+}
+
+static const struct mbox_chan_ops mhu_db_ops = {
+ .send_data = mhu_db_send_data,
+ .startup = mhu_db_startup,
+ .shutdown = mhu_db_shutdown,
+ .last_tx_done = mhu_db_last_tx_done,
+};
+
+static int mhu_db_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ u32 cell_count;
+ int i, err, max_chans;
+ struct arm_mhu *mhu;
+ struct mbox_chan *chans;
+ struct device *dev = &adev->dev;
+ struct device_node *np = dev->of_node;
+ int mhu_reg[MHU_CHANS] = {
+ MHU_LP_OFFSET, MHU_HP_OFFSET, MHU_SEC_OFFSET,
+ };
+
+ if (!of_device_is_compatible(np, "arm,mhu-doorbell"))
+ return -ENODEV;
+
+ err = of_property_read_u32(np, "#mbox-cells", &cell_count);
+ if (err) {
+ dev_err(dev, "failed to read #mbox-cells in '%pOF'\n", np);
+ return err;
+ }
+
+ if (cell_count == 2) {
+ max_chans = MHU_CHAN_MAX;
+ } else {
+ dev_err(dev, "incorrect value of #mbox-cells in '%pOF'\n", np);
+ return -EINVAL;
+ }
+
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(mhu->base)) {
+ dev_err(dev, "ioremap failed\n");
+ return PTR_ERR(mhu->base);
+ }
+
+ chans = devm_kcalloc(dev, max_chans, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ mhu->dev = dev;
+ mhu->mbox.dev = dev;
+ mhu->mbox.chans = chans;
+ mhu->mbox.num_chans = max_chans;
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+ mhu->mbox.of_xlate = mhu_db_mbox_xlate;
+ amba_set_drvdata(adev, mhu);
+
+ mhu->mbox.ops = &mhu_db_ops;
+
+ err = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (err) {
+ dev_err(dev, "Failed to register mailboxes %d\n", err);
+ return err;
+ }
+
+ for (i = 0; i < MHU_CHANS; i++) {
+ int irq = mhu->mlink[i].irq = adev->irq[i];
+
+ if (irq <= 0) {
+ dev_dbg(dev, "No IRQ found for Channel %d\n", i);
+ continue;
+ }
+
+ mhu->mlink[i].rx_reg = mhu->base + mhu_reg[i];
+ mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
+
+ err = devm_request_threaded_irq(dev, irq, NULL,
+ mhu_db_mbox_rx_handler,
+ IRQF_ONESHOT, "mhu_db_link", mhu);
+ if (err) {
+ dev_err(dev, "Can't claim IRQ %d\n", irq);
+ mbox_controller_unregister(&mhu->mbox);
+ return err;
+ }
+ }
+
+ dev_info(dev, "ARM MHU Doorbell mailbox registered\n");
+ return 0;
+}
+
+static struct amba_id mhu_ids[] = {
+ {
+ .id = 0x1bb098,
+ .mask = 0xffffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, mhu_ids);
+
+static struct amba_driver arm_mhu_db_driver = {
+ .drv = {
+ .name = "mhu-doorbell",
+ },
+ .id_table = mhu_ids,
+ .probe = mhu_db_probe,
+};
+module_amba_driver(arm_mhu_db_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ARM MHU Doorbell Driver");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 53945ca5d785..5b375985f7b8 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -962,9 +962,9 @@ static irqreturn_t pdc_irq_handler(int irq, void *data)
* a DMA receive interrupt. Reenables the receive interrupt.
* @data: PDC state structure
*/
-static void pdc_tasklet_cb(unsigned long data)
+static void pdc_tasklet_cb(struct tasklet_struct *t)
{
- struct pdc_state *pdcs = (struct pdc_state *)data;
+ struct pdc_state *pdcs = from_tasklet(pdcs, t, rx_tasklet);
pdc_receive(pdcs);
@@ -1589,7 +1589,7 @@ static int pdc_probe(struct platform_device *pdev)
pdc_hw_init(pdcs);
/* Init tasklet for deferred DMA rx processing */
- tasklet_init(&pdcs->rx_tasklet, pdc_tasklet_cb, (unsigned long)pdcs);
+ tasklet_setup(&pdcs->rx_tasklet, pdc_tasklet_cb);
err = pdc_interrupts_init(pdcs);
if (err)
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 0b821a5b2db8..3e7d4b20ab34 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -82,9 +82,12 @@ static void msg_submit(struct mbox_chan *chan)
exit:
spin_unlock_irqrestore(&chan->lock, flags);
- if (!err && (chan->txdone_method & TXDONE_BY_POLL))
- /* kick start the timer immediately to avoid delays */
- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
+ /* kick start the timer immediately to avoid delays */
+ if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
+ /* but only if not already active */
+ if (!hrtimer_active(&chan->mbox->poll_hrt))
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
+ }
}
static void tx_tick(struct mbox_chan *chan, int r)
@@ -122,11 +125,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
struct mbox_chan *chan = &mbox->chans[i];
if (chan->active_req && chan->cl) {
+ resched = true;
txdone = chan->mbox->ops->last_tx_done(chan);
if (txdone)
tx_tick(chan, 0);
- else
- resched = true;
}
}
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 484d4438cd83..5665b6ea8119 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -69,7 +69,7 @@ struct cmdq_task {
struct cmdq {
struct mbox_controller mbox;
void __iomem *base;
- u32 irq;
+ int irq;
u32 thread_nr;
u32 irq_mask;
struct cmdq_thread *thread;
@@ -525,10 +525,8 @@ static int cmdq_probe(struct platform_device *pdev)
}
cmdq->irq = platform_get_irq(pdev, 0);
- if (!cmdq->irq) {
- dev_err(dev, "failed to get irq\n");
- return -EINVAL;
- }
+ if (cmdq->irq < 0)
+ return cmdq->irq;
plat_data = (struct gce_plat *)of_device_get_match_data(dev);
if (!plat_data) {
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index 834b35dc3b13..e07091d71986 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -13,6 +13,8 @@
#include <linux/pm.h>
#include <linux/slab.h>
+#include <soc/tegra/fuse.h>
+
#include <dt-bindings/mailbox/tegra186-hsp.h>
#include "mailbox.h"
@@ -322,7 +324,12 @@ static int tegra_hsp_doorbell_startup(struct mbox_chan *chan)
if (!ccplex)
return -ENODEV;
- if (!tegra_hsp_doorbell_can_ring(db))
+ /*
+ * On simulation platforms the BPMP hasn't had a chance yet to mark
+ * the doorbell as ringable by the CCPLEX, so we want to skip extra
+ * checks here.
+ */
+ if (tegra_is_silicon() && !tegra_hsp_doorbell_can_ring(db))
return -ENODEV;
spin_lock_irqsave(&hsp->lock, flags);
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 52035a78d836..8c371d5eef8e 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -49,7 +49,7 @@
*
* bch_bucket_alloc() allocates a single bucket from a specific cache.
*
- * bch_bucket_alloc_set() allocates one or more buckets from different caches
+ * bch_bucket_alloc_set() allocates one bucket from different caches
* out of a cache set.
*
* free_some_buckets() drives all the processes described above. It's called
@@ -87,8 +87,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
{
struct cache *ca;
struct bucket *b;
- unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
- unsigned int i;
+ unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
int r;
atomic_sub(sectors, &c->rescale);
@@ -104,14 +103,14 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
c->min_prio = USHRT_MAX;
- for_each_cache(ca, c, i)
- for_each_bucket(b, ca)
- if (b->prio &&
- b->prio != BTREE_PRIO &&
- !atomic_read(&b->pin)) {
- b->prio--;
- c->min_prio = min(c->min_prio, b->prio);
- }
+ ca = c->cache;
+ for_each_bucket(b, ca)
+ if (b->prio &&
+ b->prio != BTREE_PRIO &&
+ !atomic_read(&b->pin)) {
+ b->prio--;
+ c->min_prio = min(c->min_prio, b->prio);
+ }
mutex_unlock(&c->bucket_lock);
}
@@ -362,7 +361,7 @@ retry_invalidate:
* new stuff to them:
*/
allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
- if (CACHE_SYNC(&ca->set->sb)) {
+ if (CACHE_SYNC(&ca->sb)) {
/*
* This could deadlock if an allocation with a btree
* node locked ever blocked - having the btree node
@@ -488,34 +487,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
}
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- struct bkey *k, int n, bool wait)
+ struct bkey *k, bool wait)
{
- int i;
+ struct cache *ca;
+ long b;
/* No allocation if CACHE_SET_IO_DISABLE bit is set */
if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
return -1;
lockdep_assert_held(&c->bucket_lock);
- BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
bkey_init(k);
- /* sort by free space/prio of oldest data in caches */
-
- for (i = 0; i < n; i++) {
- struct cache *ca = c->cache_by_alloc[i];
- long b = bch_bucket_alloc(ca, reserve, wait);
+ ca = c->cache;
+ b = bch_bucket_alloc(ca, reserve, wait);
+ if (b == -1)
+ goto err;
- if (b == -1)
- goto err;
+ k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
+ bucket_to_sector(c, b),
+ ca->sb.nr_this_dev);
- k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
- bucket_to_sector(c, b),
- ca->sb.nr_this_dev);
-
- SET_KEY_PTRS(k, i + 1);
- }
+ SET_KEY_PTRS(k, 1);
return 0;
err:
@@ -525,12 +519,12 @@ err:
}
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- struct bkey *k, int n, bool wait)
+ struct bkey *k, bool wait)
{
int ret;
mutex_lock(&c->bucket_lock);
- ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
+ ret = __bch_bucket_alloc_set(c, reserve, k, wait);
mutex_unlock(&c->bucket_lock);
return ret;
}
@@ -589,7 +583,7 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket, list);
found:
if (!ret->sectors_free && KEY_PTRS(alloc)) {
- ret->sectors_free = c->sb.bucket_size;
+ ret->sectors_free = c->cache->sb.bucket_size;
bkey_copy(&ret->key, alloc);
bkey_init(alloc);
}
@@ -638,7 +632,7 @@ bool bch_alloc_sectors(struct cache_set *c,
spin_unlock(&c->data_bucket_lock);
- if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
+ if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
return false;
spin_lock(&c->data_bucket_lock);
@@ -683,7 +677,7 @@ bool bch_alloc_sectors(struct cache_set *c,
&PTR_CACHE(c, &b->key, i)->sectors_written);
}
- if (b->sectors_free < c->sb.block_size)
+ if (b->sectors_free < c->cache->sb.block_size)
b->sectors_free = 0;
/*
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4fd03d2496d8..1d57f48307e6 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -517,11 +517,7 @@ struct cache_set {
atomic_t idle_counter;
atomic_t at_max_writeback_rate;
- struct cache_sb sb;
-
- struct cache *cache[MAX_CACHES_PER_SET];
- struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
- int caches_loaded;
+ struct cache *cache;
struct bcache_device **devices;
unsigned int devices_max_used;
@@ -670,6 +666,7 @@ struct cache_set {
struct mutex verify_lock;
#endif
+ uint8_t set_uuid[16];
unsigned int nr_uuids;
struct uuid_entry *uuids;
BKEY_PADDED(uuid_bucket);
@@ -758,9 +755,8 @@ struct bbio {
#define btree_default_blocks(c) \
((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
-#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
-#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
-#define block_bytes(c) ((c)->sb.block_size << 9)
+#define bucket_bytes(ca) ((ca)->sb.bucket_size << 9)
+#define block_bytes(ca) ((ca)->sb.block_size << 9)
static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
{
@@ -801,14 +797,14 @@ static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
{
- return s & (c->sb.bucket_size - 1);
+ return s & (c->cache->sb.bucket_size - 1);
}
static inline struct cache *PTR_CACHE(struct cache_set *c,
const struct bkey *k,
unsigned int ptr)
{
- return c->cache[PTR_DEV(k, ptr)];
+ return c->cache;
}
static inline size_t PTR_BUCKET_NR(struct cache_set *c,
@@ -889,9 +885,6 @@ do { \
/* Looping macros */
-#define for_each_cache(ca, cs, iter) \
- for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
-
#define for_each_bucket(b, ca) \
for (b = (ca)->buckets + (ca)->sb.first_bucket; \
b < (ca)->buckets + (ca)->sb.nbuckets; b++)
@@ -933,11 +926,9 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
static inline void wake_up_allocators(struct cache_set *c)
{
- struct cache *ca;
- unsigned int i;
+ struct cache *ca = c->cache;
- for_each_cache(ca, c, i)
- wake_up_process(ca->alloc_thread);
+ wake_up_process(ca->alloc_thread);
}
static inline void closure_bio_submit(struct cache_set *c,
@@ -994,9 +985,9 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k);
long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- struct bkey *k, int n, bool wait);
+ struct bkey *k, bool wait);
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
- struct bkey *k, int n, bool wait);
+ struct bkey *k, bool wait);
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
unsigned int sectors, unsigned int write_point,
unsigned int write_prio, bool wait);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 3d8bd0692af3..910df242c83d 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -104,7 +104,7 @@
static inline struct bset *write_block(struct btree *b)
{
- return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
+ return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
}
static void bch_btree_init_next(struct btree *b)
@@ -117,7 +117,7 @@ static void bch_btree_init_next(struct btree *b)
if (b->written < btree_blocks(b))
bch_bset_init_next(&b->keys, write_block(b),
- bset_magic(&b->c->sb));
+ bset_magic(&b->c->cache->sb));
}
@@ -155,7 +155,7 @@ void bch_btree_node_read_done(struct btree *b)
* See the comment arount cache_set->fill_iter.
*/
iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
- iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
+ iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
iter->used = 0;
#ifdef CONFIG_BCACHE_DEBUG
@@ -173,12 +173,12 @@ void bch_btree_node_read_done(struct btree *b)
goto err;
err = "bad btree header";
- if (b->written + set_blocks(i, block_bytes(b->c)) >
+ if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
btree_blocks(b))
goto err;
err = "bad magic";
- if (i->magic != bset_magic(&b->c->sb))
+ if (i->magic != bset_magic(&b->c->cache->sb))
goto err;
err = "bad checksum";
@@ -199,13 +199,13 @@ void bch_btree_node_read_done(struct btree *b)
bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
- b->written += set_blocks(i, block_bytes(b->c));
+ b->written += set_blocks(i, block_bytes(b->c->cache));
}
err = "corrupted btree";
for (i = write_block(b);
bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
- i = ((void *) i) + block_bytes(b->c))
+ i = ((void *) i) + block_bytes(b->c->cache))
if (i->seq == b->keys.set[0].data->seq)
goto err;
@@ -219,7 +219,7 @@ void bch_btree_node_read_done(struct btree *b)
if (b->written < btree_blocks(b))
bch_bset_init_next(&b->keys, write_block(b),
- bset_magic(&b->c->sb));
+ bset_magic(&b->c->cache->sb));
out:
mempool_free(iter, &b->c->fill_iter);
return;
@@ -347,7 +347,7 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl;
- b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
+ b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
bch_bio_map(b->bio, i);
@@ -423,10 +423,10 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
do_btree_node_write(b);
- atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
+ atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
- b->written += set_blocks(i, block_bytes(b->c));
+ b->written += set_blocks(i, block_bytes(b->c->cache));
}
void bch_btree_node_write(struct btree *b, struct closure *parent)
@@ -514,7 +514,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
* mca -> memory cache
*/
-#define mca_reserve(c) (((c->root && c->root->level) \
+#define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
? c->root->level : 1) * 8 + 16)
#define mca_can_free(c) \
max_t(int, 0, c->btree_cache_used - mca_reserve(c))
@@ -738,7 +738,7 @@ void bch_btree_cache_free(struct cache_set *c)
if (c->verify_data)
list_move(&c->verify_data->list, &c->btree_cache);
- free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->sb)));
+ free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
#endif
list_splice(&c->btree_cache_freeable,
@@ -785,7 +785,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
mutex_init(&c->verify_lock);
c->verify_ondisk = (void *)
- __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(meta_bucket_pages(&c->sb)));
+ __get_free_pages(GFP_KERNEL|__GFP_COMP,
+ ilog2(meta_bucket_pages(&c->cache->sb)));
if (!c->verify_ondisk) {
/*
* Don't worry about the mca_rereserve buckets
@@ -1091,7 +1092,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
mutex_lock(&c->bucket_lock);
retry:
- if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
+ if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
goto err;
bkey_put(c, &k.key);
@@ -1108,7 +1109,7 @@ retry:
}
b->parent = parent;
- bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
+ bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
mutex_unlock(&c->bucket_lock);
@@ -1167,19 +1168,18 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
static int btree_check_reserve(struct btree *b, struct btree_op *op)
{
struct cache_set *c = b->c;
- struct cache *ca;
- unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
+ struct cache *ca = c->cache;
+ unsigned int reserve = (c->root->level - b->level) * 2 + 1;
mutex_lock(&c->bucket_lock);
- for_each_cache(ca, c, i)
- if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
- if (op)
- prepare_to_wait(&c->btree_cache_wait, &op->wait,
- TASK_UNINTERRUPTIBLE);
- mutex_unlock(&c->bucket_lock);
- return -EINTR;
- }
+ if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
+ if (op)
+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
+ TASK_UNINTERRUPTIBLE);
+ mutex_unlock(&c->bucket_lock);
+ return -EINTR;
+ }
mutex_unlock(&c->bucket_lock);
@@ -1345,7 +1345,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
if (nodes < 2 ||
__set_blocks(b->keys.set[0].data, keys,
- block_bytes(b->c)) > blocks * (nodes - 1))
+ block_bytes(b->c->cache)) > blocks * (nodes - 1))
return 0;
for (i = 0; i < nodes; i++) {
@@ -1379,7 +1379,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
k = bkey_next(k)) {
if (__set_blocks(n1, n1->keys + keys +
bkey_u64s(k),
- block_bytes(b->c)) > blocks)
+ block_bytes(b->c->cache)) > blocks)
break;
last = k;
@@ -1395,7 +1395,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
* though)
*/
if (__set_blocks(n1, n1->keys + n2->keys,
- block_bytes(b->c)) >
+ block_bytes(b->c->cache)) >
btree_blocks(new_nodes[i]))
goto out_unlock_nocoalesce;
@@ -1404,7 +1404,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
last = &r->b->key;
}
- BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
+ BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
btree_blocks(new_nodes[i]));
if (last)
@@ -1695,7 +1695,6 @@ static void btree_gc_start(struct cache_set *c)
{
struct cache *ca;
struct bucket *b;
- unsigned int i;
if (!c->gc_mark_valid)
return;
@@ -1705,14 +1704,14 @@ static void btree_gc_start(struct cache_set *c)
c->gc_mark_valid = 0;
c->gc_done = ZERO_KEY;
- for_each_cache(ca, c, i)
- for_each_bucket(b, ca) {
- b->last_gc = b->gen;
- if (!atomic_read(&b->pin)) {
- SET_GC_MARK(b, 0);
- SET_GC_SECTORS_USED(b, 0);
- }
+ ca = c->cache;
+ for_each_bucket(b, ca) {
+ b->last_gc = b->gen;
+ if (!atomic_read(&b->pin)) {
+ SET_GC_MARK(b, 0);
+ SET_GC_SECTORS_USED(b, 0);
}
+ }
mutex_unlock(&c->bucket_lock);
}
@@ -1721,7 +1720,8 @@ static void bch_btree_gc_finish(struct cache_set *c)
{
struct bucket *b;
struct cache *ca;
- unsigned int i;
+ unsigned int i, j;
+ uint64_t *k;
mutex_lock(&c->bucket_lock);
@@ -1739,7 +1739,6 @@ static void bch_btree_gc_finish(struct cache_set *c)
struct bcache_device *d = c->devices[i];
struct cached_dev *dc;
struct keybuf_key *w, *n;
- unsigned int j;
if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
continue;
@@ -1756,29 +1755,27 @@ static void bch_btree_gc_finish(struct cache_set *c)
rcu_read_unlock();
c->avail_nbuckets = 0;
- for_each_cache(ca, c, i) {
- uint64_t *i;
- ca->invalidate_needs_gc = 0;
+ ca = c->cache;
+ ca->invalidate_needs_gc = 0;
- for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
+ for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
+ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
- for (i = ca->prio_buckets;
- i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
- SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
+ for (k = ca->prio_buckets;
+ k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
+ SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
- for_each_bucket(b, ca) {
- c->need_gc = max(c->need_gc, bucket_gc_gen(b));
+ for_each_bucket(b, ca) {
+ c->need_gc = max(c->need_gc, bucket_gc_gen(b));
- if (atomic_read(&b->pin))
- continue;
+ if (atomic_read(&b->pin))
+ continue;
- BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
+ BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
- if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
- c->avail_nbuckets++;
- }
+ if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
+ c->avail_nbuckets++;
}
mutex_unlock(&c->bucket_lock);
@@ -1830,12 +1827,10 @@ static void bch_btree_gc(struct cache_set *c)
static bool gc_should_run(struct cache_set *c)
{
- struct cache *ca;
- unsigned int i;
+ struct cache *ca = c->cache;
- for_each_cache(ca, c, i)
- if (ca->invalidate_needs_gc)
- return true;
+ if (ca->invalidate_needs_gc)
+ return true;
if (atomic_read(&c->sectors_to_gc) < 0)
return true;
@@ -2081,9 +2076,8 @@ out:
void bch_initial_gc_finish(struct cache_set *c)
{
- struct cache *ca;
+ struct cache *ca = c->cache;
struct bucket *b;
- unsigned int i;
bch_btree_gc_finish(c);
@@ -2098,20 +2092,18 @@ void bch_initial_gc_finish(struct cache_set *c)
* This is only safe for buckets that have no live data in them, which
* there should always be some of.
*/
- for_each_cache(ca, c, i) {
- for_each_bucket(b, ca) {
- if (fifo_full(&ca->free[RESERVE_PRIO]) &&
- fifo_full(&ca->free[RESERVE_BTREE]))
- break;
+ for_each_bucket(b, ca) {
+ if (fifo_full(&ca->free[RESERVE_PRIO]) &&
+ fifo_full(&ca->free[RESERVE_BTREE]))
+ break;
- if (bch_can_invalidate_bucket(ca, b) &&
- !GC_MARK(b)) {
- __bch_invalidate_one_bucket(ca, b);
- if (!fifo_push(&ca->free[RESERVE_PRIO],
- b - ca->buckets))
- fifo_push(&ca->free[RESERVE_BTREE],
- b - ca->buckets);
- }
+ if (bch_can_invalidate_bucket(ca, b) &&
+ !GC_MARK(b)) {
+ __bch_invalidate_one_bucket(ca, b);
+ if (!fifo_push(&ca->free[RESERVE_PRIO],
+ b - ca->buckets))
+ fifo_push(&ca->free[RESERVE_BTREE],
+ b - ca->buckets);
}
}
@@ -2219,7 +2211,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
goto err;
split = set_blocks(btree_bset_first(n1),
- block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
+ block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
if (split) {
unsigned int keys = 0;
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 257969980c49..50482107134f 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -194,7 +194,7 @@ static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
static inline void set_gc_sectors(struct cache_set *c)
{
- atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
+ atomic_set(&c->sectors_to_gc, c->cache->sb.bucket_size * c->nbuckets / 16);
}
void bkey_put(struct cache_set *c, struct bkey *k);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 0164a1fe94a9..d8d9394a6beb 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -159,7 +159,7 @@ void closure_debug_destroy(struct closure *cl)
static struct dentry *closure_debug;
-static int debug_seq_show(struct seq_file *f, void *data)
+static int debug_show(struct seq_file *f, void *data)
{
struct closure *cl;
@@ -188,17 +188,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
return 0;
}
-static int debug_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, debug_seq_show, NULL);
-}
-
-static const struct file_operations debug_ops = {
- .owner = THIS_MODULE,
- .open = debug_seq_open,
- .read = seq_read,
- .release = single_release
-};
+DEFINE_SHOW_ATTRIBUTE(debug);
void __init closure_debug_init(void)
{
@@ -209,7 +199,7 @@ void __init closure_debug_init(void)
* about this.
*/
closure_debug = debugfs_create_file(
- "closures", 0400, bcache_debug, NULL, &debug_ops);
+ "closures", 0400, bcache_debug, NULL, &debug_fops);
}
#endif
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 336f43910383..b00fd08d696b 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -25,8 +25,8 @@ struct dentry *bcache_debug;
for (i = (start); \
(void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
i->seq == (start)->seq; \
- i = (void *) i + set_blocks(i, block_bytes(b->c)) * \
- block_bytes(b->c))
+ i = (void *) i + set_blocks(i, block_bytes(b->c->cache)) * \
+ block_bytes(b->c->cache))
void bch_btree_verify(struct btree *b)
{
@@ -82,14 +82,14 @@ void bch_btree_verify(struct btree *b)
for_each_written_bset(b, ondisk, i) {
unsigned int block = ((void *) i - (void *) ondisk) /
- block_bytes(b->c);
+ block_bytes(b->c->cache);
pr_err("*** on disk block %u:\n", block);
bch_dump_bset(&b->keys, i, block);
}
pr_err("*** block %zu not written\n",
- ((void *) i - (void *) ondisk) / block_bytes(b->c));
+ ((void *) i - (void *) ondisk) / block_bytes(b->c->cache));
for (j = 0; j < inmemory->keys; j++)
if (inmemory->d[j] != sorted->d[j])
@@ -238,7 +238,7 @@ void bch_debug_init_cache_set(struct cache_set *c)
if (!IS_ERR_OR_NULL(bcache_debug)) {
char name[50];
- snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
+ snprintf(name, 50, "bcache-%pU", c->set_uuid);
c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
&cache_set_debug_ops);
}
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 9162af5bb6ec..f4658a1f37b8 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -54,7 +54,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
size_t bucket = PTR_BUCKET_NR(c, k, i);
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
- if (KEY_SIZE(k) + r > c->sb.bucket_size ||
+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
bucket < ca->sb.first_bucket ||
bucket >= ca->sb.nbuckets)
return true;
@@ -75,7 +75,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
size_t bucket = PTR_BUCKET_NR(c, k, i);
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
- if (KEY_SIZE(k) + r > c->sb.bucket_size)
+ if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
return "bad, length too big";
if (bucket < ca->sb.first_bucket)
return "bad, short offset";
@@ -136,7 +136,7 @@ static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
size_t n = PTR_BUCKET_NR(b->c, k, j);
pr_cont(" bucket %zu", n);
- if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
+ if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
pr_cont(" prio %i",
PTR_BUCKET(b->c, k, j)->prio);
}
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
index 4442df48d28c..6469223f0b77 100644
--- a/drivers/md/bcache/features.c
+++ b/drivers/md/bcache/features.c
@@ -30,7 +30,7 @@ static struct feature feature_list[] = {
for (f = &feature_list[0]; f->compat != 0; f++) { \
if (f->compat != BCH_FEATURE_ ## type) \
continue; \
- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) { \
+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) { \
if (first) { \
out += snprintf(out, buf + size - out, \
"["); \
@@ -44,7 +44,7 @@ static struct feature feature_list[] = {
\
out += snprintf(out, buf + size - out, "%s", f->string);\
\
- if (BCH_HAS_ ## type ## _FEATURE(&c->sb, f->mask)) \
+ if (BCH_HAS_ ## type ## _FEATURE(&c->cache->sb, f->mask)) \
out += snprintf(out, buf + size - out, "]"); \
\
first = false; \
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index a14a445618b4..dad71a6b7889 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -26,7 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->sb));
+ bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index c1227bdb57e7..aefbdb7e003b 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -98,7 +98,7 @@ reread: left = ca->sb.bucket_size - offset;
return ret;
}
- blocks = set_blocks(j, block_bytes(ca->set));
+ blocks = set_blocks(j, block_bytes(ca));
/*
* Nodes in 'list' are in linear increasing order of
@@ -179,112 +179,109 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
ret; \
})
- struct cache *ca;
- unsigned int iter;
+ struct cache *ca = c->cache;
int ret = 0;
+ struct journal_device *ja = &ca->journal;
+ DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
+ unsigned int i, l, r, m;
+ uint64_t seq;
- for_each_cache(ca, c, iter) {
- struct journal_device *ja = &ca->journal;
- DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
- unsigned int i, l, r, m;
- uint64_t seq;
-
- bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
- pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
+ bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
+ pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
+ /*
+ * Read journal buckets ordered by golden ratio hash to quickly
+ * find a sequence of buckets with valid journal entries
+ */
+ for (i = 0; i < ca->sb.njournal_buckets; i++) {
/*
- * Read journal buckets ordered by golden ratio hash to quickly
- * find a sequence of buckets with valid journal entries
+ * We must try the index l with ZERO first for
+ * correctness due to the scenario that the journal
+ * bucket is circular buffer which might have wrapped
*/
- for (i = 0; i < ca->sb.njournal_buckets; i++) {
- /*
- * We must try the index l with ZERO first for
- * correctness due to the scenario that the journal
- * bucket is circular buffer which might have wrapped
- */
- l = (i * 2654435769U) % ca->sb.njournal_buckets;
+ l = (i * 2654435769U) % ca->sb.njournal_buckets;
- if (test_bit(l, bitmap))
- break;
+ if (test_bit(l, bitmap))
+ break;
- if (read_bucket(l))
- goto bsearch;
- }
+ if (read_bucket(l))
+ goto bsearch;
+ }
- /*
- * If that fails, check all the buckets we haven't checked
- * already
- */
- pr_debug("falling back to linear search\n");
+ /*
+ * If that fails, check all the buckets we haven't checked
+ * already
+ */
+ pr_debug("falling back to linear search\n");
- for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
- if (read_bucket(l))
- goto bsearch;
+ for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
+ if (read_bucket(l))
+ goto bsearch;
- /* no journal entries on this device? */
- if (l == ca->sb.njournal_buckets)
- continue;
+ /* no journal entries on this device? */
+ if (l == ca->sb.njournal_buckets)
+ goto out;
bsearch:
- BUG_ON(list_empty(list));
+ BUG_ON(list_empty(list));
- /* Binary search */
- m = l;
- r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
- pr_debug("starting binary search, l %u r %u\n", l, r);
+ /* Binary search */
+ m = l;
+ r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
+ pr_debug("starting binary search, l %u r %u\n", l, r);
- while (l + 1 < r) {
- seq = list_entry(list->prev, struct journal_replay,
- list)->j.seq;
+ while (l + 1 < r) {
+ seq = list_entry(list->prev, struct journal_replay,
+ list)->j.seq;
- m = (l + r) >> 1;
- read_bucket(m);
+ m = (l + r) >> 1;
+ read_bucket(m);
- if (seq != list_entry(list->prev, struct journal_replay,
- list)->j.seq)
- l = m;
- else
- r = m;
- }
+ if (seq != list_entry(list->prev, struct journal_replay,
+ list)->j.seq)
+ l = m;
+ else
+ r = m;
+ }
- /*
- * Read buckets in reverse order until we stop finding more
- * journal entries
- */
- pr_debug("finishing up: m %u njournal_buckets %u\n",
- m, ca->sb.njournal_buckets);
- l = m;
+ /*
+ * Read buckets in reverse order until we stop finding more
+ * journal entries
+ */
+ pr_debug("finishing up: m %u njournal_buckets %u\n",
+ m, ca->sb.njournal_buckets);
+ l = m;
- while (1) {
- if (!l--)
- l = ca->sb.njournal_buckets - 1;
+ while (1) {
+ if (!l--)
+ l = ca->sb.njournal_buckets - 1;
- if (l == m)
- break;
+ if (l == m)
+ break;
- if (test_bit(l, bitmap))
- continue;
+ if (test_bit(l, bitmap))
+ continue;
- if (!read_bucket(l))
- break;
- }
+ if (!read_bucket(l))
+ break;
+ }
- seq = 0;
+ seq = 0;
- for (i = 0; i < ca->sb.njournal_buckets; i++)
- if (ja->seq[i] > seq) {
- seq = ja->seq[i];
- /*
- * When journal_reclaim() goes to allocate for
- * the first time, it'll use the bucket after
- * ja->cur_idx
- */
- ja->cur_idx = i;
- ja->last_idx = ja->discard_idx = (i + 1) %
- ca->sb.njournal_buckets;
+ for (i = 0; i < ca->sb.njournal_buckets; i++)
+ if (ja->seq[i] > seq) {
+ seq = ja->seq[i];
+ /*
+ * When journal_reclaim() goes to allocate for
+ * the first time, it'll use the bucket after
+ * ja->cur_idx
+ */
+ ja->cur_idx = i;
+ ja->last_idx = ja->discard_idx = (i + 1) %
+ ca->sb.njournal_buckets;
- }
- }
+ }
+out:
if (!list_empty(list))
c->journal.seq = list_entry(list->prev,
struct journal_replay,
@@ -342,12 +339,10 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
static bool is_discard_enabled(struct cache_set *s)
{
- struct cache *ca;
- unsigned int i;
+ struct cache *ca = s->cache;
- for_each_cache(ca, s, i)
- if (ca->discard)
- return true;
+ if (ca->discard)
+ return true;
return false;
}
@@ -633,9 +628,10 @@ static void do_journal_discard(struct cache *ca)
static void journal_reclaim(struct cache_set *c)
{
struct bkey *k = &c->journal.key;
- struct cache *ca;
+ struct cache *ca = c->cache;
uint64_t last_seq;
- unsigned int iter, n = 0;
+ unsigned int next;
+ struct journal_device *ja = &ca->journal;
atomic_t p __maybe_unused;
atomic_long_inc(&c->reclaim);
@@ -647,46 +643,31 @@ static void journal_reclaim(struct cache_set *c)
/* Update last_idx */
- for_each_cache(ca, c, iter) {
- struct journal_device *ja = &ca->journal;
-
- while (ja->last_idx != ja->cur_idx &&
- ja->seq[ja->last_idx] < last_seq)
- ja->last_idx = (ja->last_idx + 1) %
- ca->sb.njournal_buckets;
- }
+ while (ja->last_idx != ja->cur_idx &&
+ ja->seq[ja->last_idx] < last_seq)
+ ja->last_idx = (ja->last_idx + 1) %
+ ca->sb.njournal_buckets;
- for_each_cache(ca, c, iter)
- do_journal_discard(ca);
+ do_journal_discard(ca);
if (c->journal.blocks_free)
goto out;
- /*
- * Allocate:
- * XXX: Sort by free journal space
- */
-
- for_each_cache(ca, c, iter) {
- struct journal_device *ja = &ca->journal;
- unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
+ next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
+ /* No space available on this device */
+ if (next == ja->discard_idx)
+ goto out;
- /* No space available on this device */
- if (next == ja->discard_idx)
- continue;
+ ja->cur_idx = next;
+ k->ptr[0] = MAKE_PTR(0,
+ bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
+ ca->sb.nr_this_dev);
+ atomic_long_inc(&c->reclaimed_journal_buckets);
- ja->cur_idx = next;
- k->ptr[n++] = MAKE_PTR(0,
- bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
- ca->sb.nr_this_dev);
- atomic_long_inc(&c->reclaimed_journal_buckets);
- }
+ bkey_init(k);
+ SET_KEY_PTRS(k, 1);
+ c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
- if (n) {
- bkey_init(k);
- SET_KEY_PTRS(k, n);
- c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
- }
out:
if (!journal_full(&c->journal))
__closure_wake_up(&c->journal.wait);
@@ -750,11 +731,11 @@ static void journal_write_unlocked(struct closure *cl)
__releases(c->journal.lock)
{
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
- struct cache *ca;
+ struct cache *ca = c->cache;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
- unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
- c->sb.block_size;
+ unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
+ ca->sb.block_size;
struct bio *bio;
struct bio_list list;
@@ -773,17 +754,15 @@ static void journal_write_unlocked(struct closure *cl)
return;
}
- c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
+ c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
w->data->btree_level = c->root->level;
bkey_copy(&w->data->btree_root, &c->root->key);
bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
- for_each_cache(ca, c, i)
- w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
-
- w->data->magic = jset_magic(&c->sb);
+ w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
+ w->data->magic = jset_magic(&ca->sb);
w->data->version = BCACHE_JSET_VERSION;
w->data->last_seq = last_seq(&c->journal);
w->data->csum = csum_set(w->data);
@@ -859,6 +838,7 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
size_t sectors;
struct closure cl;
bool wait = false;
+ struct cache *ca = c->cache;
closure_init_stack(&cl);
@@ -868,10 +848,10 @@ static struct journal_write *journal_wait_for_write(struct cache_set *c,
struct journal_write *w = c->journal.cur;
sectors = __set_blocks(w->data, w->data->keys + nkeys,
- block_bytes(c)) * c->sb.block_size;
+ block_bytes(ca)) * ca->sb.block_size;
if (sectors <= min_t(size_t,
- c->journal.blocks_free * c->sb.block_size,
+ c->journal.blocks_free * ca->sb.block_size,
PAGE_SECTORS << JSET_BITS))
return w;
@@ -936,7 +916,7 @@ atomic_t *bch_journal(struct cache_set *c,
if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
return NULL;
- if (!CACHE_SYNC(&c->sb))
+ if (!CACHE_SYNC(&c->cache->sb))
return NULL;
w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 5872d6470470..b9c3d27ec093 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -196,50 +196,48 @@ static unsigned int bucket_heap_top(struct cache *ca)
void bch_moving_gc(struct cache_set *c)
{
- struct cache *ca;
+ struct cache *ca = c->cache;
struct bucket *b;
- unsigned int i;
+ unsigned long sectors_to_move, reserve_sectors;
if (!c->copy_gc_enabled)
return;
mutex_lock(&c->bucket_lock);
- for_each_cache(ca, c, i) {
- unsigned long sectors_to_move = 0;
- unsigned long reserve_sectors = ca->sb.bucket_size *
+ sectors_to_move = 0;
+ reserve_sectors = ca->sb.bucket_size *
fifo_used(&ca->free[RESERVE_MOVINGGC]);
- ca->heap.used = 0;
-
- for_each_bucket(b, ca) {
- if (GC_MARK(b) == GC_MARK_METADATA ||
- !GC_SECTORS_USED(b) ||
- GC_SECTORS_USED(b) == ca->sb.bucket_size ||
- atomic_read(&b->pin))
- continue;
-
- if (!heap_full(&ca->heap)) {
- sectors_to_move += GC_SECTORS_USED(b);
- heap_add(&ca->heap, b, bucket_cmp);
- } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
- sectors_to_move -= bucket_heap_top(ca);
- sectors_to_move += GC_SECTORS_USED(b);
-
- ca->heap.data[0] = b;
- heap_sift(&ca->heap, 0, bucket_cmp);
- }
- }
+ ca->heap.used = 0;
+
+ for_each_bucket(b, ca) {
+ if (GC_MARK(b) == GC_MARK_METADATA ||
+ !GC_SECTORS_USED(b) ||
+ GC_SECTORS_USED(b) == ca->sb.bucket_size ||
+ atomic_read(&b->pin))
+ continue;
- while (sectors_to_move > reserve_sectors) {
- heap_pop(&ca->heap, b, bucket_cmp);
- sectors_to_move -= GC_SECTORS_USED(b);
+ if (!heap_full(&ca->heap)) {
+ sectors_to_move += GC_SECTORS_USED(b);
+ heap_add(&ca->heap, b, bucket_cmp);
+ } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
+ sectors_to_move -= bucket_heap_top(ca);
+ sectors_to_move += GC_SECTORS_USED(b);
+
+ ca->heap.data[0] = b;
+ heap_sift(&ca->heap, 0, bucket_cmp);
}
+ }
- while (heap_pop(&ca->heap, b, bucket_cmp))
- SET_GC_MOVE(b, 1);
+ while (sectors_to_move > reserve_sectors) {
+ heap_pop(&ca->heap, b, bucket_cmp);
+ sectors_to_move -= GC_SECTORS_USED(b);
}
+ while (heap_pop(&ca->heap, b, bucket_cmp))
+ SET_GC_MOVE(b, 1);
+
mutex_unlock(&c->bucket_lock);
c->moving_gc_keys.last_scanned = ZERO_KEY;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index c7cadaafa947..214326383145 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -99,7 +99,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
* bch_data_insert_keys() will insert the keys created so far
* and finish the rest when the keylist is empty.
*/
- if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
+ if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
return -ENOMEM;
return __bch_keylist_realloc(l, u64s);
@@ -394,8 +394,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
goto skip;
}
- if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
- bio_sectors(bio) & (c->sb.block_size - 1)) {
+ if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
+ bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
pr_debug("skipping unaligned io\n");
goto skip;
}
@@ -475,6 +475,7 @@ struct search {
unsigned int read_dirty_data:1;
unsigned int cache_missed:1;
+ struct hd_struct *part;
unsigned long start_time;
struct btree_op op;
@@ -669,7 +670,7 @@ static void bio_complete(struct search *s)
{
if (s->orig_bio) {
/* Count on bcache device */
- disk_end_io_acct(s->d->disk, bio_op(s->orig_bio), s->start_time);
+ part_end_io_acct(s->part, s->orig_bio, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
s->orig_bio->bi_status = s->iop.status;
@@ -731,7 +732,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->write = op_is_write(bio_op(bio));
s->read_dirty_data = 0;
/* Count on the bcache device */
- s->start_time = disk_start_io_acct(d->disk, bio_sectors(bio), bio_op(bio));
+ s->start_time = part_start_io_acct(d->disk, &s->part, bio);
s->iop.c = d->c;
s->iop.bio = NULL;
s->iop.inode = d->id;
@@ -1072,6 +1073,7 @@ struct detached_dev_io_private {
unsigned long start_time;
bio_end_io_t *bi_end_io;
void *bi_private;
+ struct hd_struct *part;
};
static void detached_dev_end_io(struct bio *bio)
@@ -1083,7 +1085,7 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_private = ddip->bi_private;
/* Count on the bcache device */
- disk_end_io_acct(ddip->d->disk, bio_op(bio), ddip->start_time);
+ part_end_io_acct(ddip->part, bio, ddip->start_time);
if (bio->bi_status) {
struct cached_dev *dc = container_of(ddip->d,
@@ -1109,7 +1111,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
ddip->d = d;
/* Count on the bcache device */
- ddip->start_time = disk_start_io_acct(d->disk, bio_sectors(bio), bio_op(bio));
+ ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio);
ddip->bi_end_io = bio->bi_end_io;
ddip->bi_private = bio->bi_private;
bio->bi_end_io = detached_dev_end_io;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 1bbdc410ee3c..46a00134a36a 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -343,34 +343,25 @@ static void bcache_write_super_unlock(struct closure *cl)
void bcache_write_super(struct cache_set *c)
{
struct closure *cl = &c->sb_write;
- struct cache *ca;
- unsigned int i, version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
+ struct cache *ca = c->cache;
+ struct bio *bio = &ca->sb_bio;
+ unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
down(&c->sb_write_mutex);
closure_init(cl, &c->cl);
- c->sb.seq++;
-
- if (c->sb.version > version)
- version = c->sb.version;
-
- for_each_cache(ca, c, i) {
- struct bio *bio = &ca->sb_bio;
-
- ca->sb.version = version;
- ca->sb.seq = c->sb.seq;
- ca->sb.last_mount = c->sb.last_mount;
+ ca->sb.seq++;
- SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
+ if (ca->sb.version < version)
+ ca->sb.version = version;
- bio_init(bio, ca->sb_bv, 1);
- bio_set_dev(bio, ca->bdev);
- bio->bi_end_io = write_super_endio;
- bio->bi_private = ca;
+ bio_init(bio, ca->sb_bv, 1);
+ bio_set_dev(bio, ca->bdev);
+ bio->bi_end_io = write_super_endio;
+ bio->bi_private = ca;
- closure_get(cl);
- __write_super(&ca->sb, ca->sb_disk, bio);
- }
+ closure_get(cl);
+ __write_super(&ca->sb, ca->sb_disk, bio);
closure_return_with_destructor(cl, bcache_write_super_unlock);
}
@@ -480,22 +471,21 @@ static int __uuid_write(struct cache_set *c)
{
BKEY_PADDED(key) k;
struct closure cl;
- struct cache *ca;
+ struct cache *ca = c->cache;
unsigned int size;
closure_init_stack(&cl);
lockdep_assert_held(&bch_register_lock);
- if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
+ if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
return 1;
- size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
+ size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
SET_KEY_SIZE(&k.key, size);
uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
closure_sync(&cl);
/* Only one bucket used for uuid write */
- ca = PTR_CACHE(c, &k.key, 0);
atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
bkey_copy(&c->uuid_bucket, &k.key);
@@ -772,26 +762,22 @@ static void bcache_device_unlink(struct bcache_device *d)
lockdep_assert_held(&bch_register_lock);
if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
- unsigned int i;
- struct cache *ca;
+ struct cache *ca = d->c->cache;
sysfs_remove_link(&d->c->kobj, d->name);
sysfs_remove_link(&d->kobj, "cache");
- for_each_cache(ca, d->c, i)
- bd_unlink_disk_holder(ca->bdev, d->disk);
+ bd_unlink_disk_holder(ca->bdev, d->disk);
}
}
static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
const char *name)
{
- unsigned int i;
- struct cache *ca;
+ struct cache *ca = c->cache;
int ret;
- for_each_cache(ca, d->c, i)
- bd_link_disk_holder(ca->bdev, d->disk);
+ bd_link_disk_holder(ca->bdev, d->disk);
snprintf(d->name, BCACHEDEVNAME_SIZE,
"%s%u", name, d->id);
@@ -1196,8 +1182,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
struct cached_dev *exist_dc, *t;
int ret = 0;
- if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
- (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
+ if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
+ (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
return -ENOENT;
if (dc->disk.c) {
@@ -1212,7 +1198,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return -EINVAL;
}
- if (dc->sb.block_size < c->sb.block_size) {
+ if (dc->sb.block_size < c->cache->sb.block_size) {
/* Will die */
pr_err("Couldn't attach %s: block size less than set's block size\n",
dc->backing_dev_name);
@@ -1269,7 +1255,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
u->first_reg = u->last_reg = rtime;
bch_uuid_write(c);
- memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
+ memcpy(dc->sb.set_uuid, c->set_uuid, 16);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
bch_write_bdev_super(dc, &cl);
@@ -1331,7 +1317,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
pr_info("Caching %s as %s on set %pU\n",
dc->backing_dev_name,
dc->disk.disk->disk_name,
- dc->disk.c->sb.set_uuid);
+ dc->disk.c->set_uuid);
return 0;
}
@@ -1427,9 +1413,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
if (ret)
return ret;
- dc->disk.disk->queue->backing_dev_info->ra_pages =
- max(dc->disk.disk->queue->backing_dev_info->ra_pages,
- q->backing_dev_info->ra_pages);
+ blk_queue_io_opt(dc->disk.disk->queue,
+ max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q)));
atomic_set(&dc->io_errors, 0);
dc->io_disable = false;
@@ -1535,7 +1520,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
kobject_init(&d->kobj, &bch_flash_dev_ktype);
- if (bcache_device_init(d, block_bytes(c), u->sectors,
+ if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
NULL, &bcache_flash_ops))
goto err;
@@ -1639,7 +1624,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
vaf.va = &args;
pr_err("error on %pU: %pV, disabling caching\n",
- c->sb.set_uuid, &vaf);
+ c->set_uuid, &vaf);
va_end(args);
@@ -1663,7 +1648,6 @@ static void cache_set_free(struct closure *cl)
{
struct cache_set *c = container_of(cl, struct cache_set, cl);
struct cache *ca;
- unsigned int i;
debugfs_remove(c->debug);
@@ -1672,15 +1656,16 @@ static void cache_set_free(struct closure *cl)
bch_journal_free(c);
mutex_lock(&bch_register_lock);
- for_each_cache(ca, c, i)
- if (ca) {
- ca->set = NULL;
- c->cache[ca->sb.nr_this_dev] = NULL;
- kobject_put(&ca->kobj);
- }
-
bch_bset_sort_state_free(&c->sort);
- free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
+ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
+
+ ca = c->cache;
+ if (ca) {
+ ca->set = NULL;
+ c->cache = NULL;
+ kobject_put(&ca->kobj);
+ }
+
if (c->moving_gc_wq)
destroy_workqueue(c->moving_gc_wq);
@@ -1693,7 +1678,7 @@ static void cache_set_free(struct closure *cl)
list_del(&c->list);
mutex_unlock(&bch_register_lock);
- pr_info("Cache set %pU unregistered\n", c->sb.set_uuid);
+ pr_info("Cache set %pU unregistered\n", c->set_uuid);
wake_up(&unregister_wait);
closure_debug_destroy(&c->cl);
@@ -1703,9 +1688,8 @@ static void cache_set_free(struct closure *cl)
static void cache_set_flush(struct closure *cl)
{
struct cache_set *c = container_of(cl, struct cache_set, caching);
- struct cache *ca;
+ struct cache *ca = c->cache;
struct btree *b;
- unsigned int i;
bch_cache_accounting_destroy(&c->accounting);
@@ -1730,9 +1714,8 @@ static void cache_set_flush(struct closure *cl)
mutex_unlock(&b->write_lock);
}
- for_each_cache(ca, c, i)
- if (ca->alloc_thread)
- kthread_stop(ca->alloc_thread);
+ if (ca->alloc_thread)
+ kthread_stop(ca->alloc_thread);
if (c->journal.cur) {
cancel_delayed_work_sync(&c->journal.work);
@@ -1765,7 +1748,7 @@ static void conditional_stop_bcache_device(struct cache_set *c,
{
if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
- d->disk->disk_name, c->sb.set_uuid);
+ d->disk->disk_name, c->set_uuid);
bcache_device_stop(d);
} else if (atomic_read(&dc->has_dirty)) {
/*
@@ -1842,15 +1825,13 @@ void bch_cache_set_unregister(struct cache_set *c)
bch_cache_set_stop(c);
}
-#define alloc_bucket_pages(gfp, c) \
- ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
-
#define alloc_meta_bucket_pages(gfp, sb) \
((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{
int iter_size;
+ struct cache *ca = container_of(sb, struct cache, sb);
struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
if (!c)
@@ -1872,24 +1853,16 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
bch_cache_accounting_init(&c->accounting, &c->cl);
- memcpy(c->sb.set_uuid, sb->set_uuid, 16);
- c->sb.block_size = sb->block_size;
- c->sb.bucket_size = sb->bucket_size;
- c->sb.nr_in_set = sb->nr_in_set;
- c->sb.last_mount = sb->last_mount;
- c->sb.version = sb->version;
- if (c->sb.version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
- c->sb.feature_compat = sb->feature_compat;
- c->sb.feature_ro_compat = sb->feature_ro_compat;
- c->sb.feature_incompat = sb->feature_incompat;
- }
+ memcpy(c->set_uuid, sb->set_uuid, 16);
+ c->cache = ca;
+ c->cache->set = c;
c->bucket_bits = ilog2(sb->bucket_size);
c->block_bits = ilog2(sb->block_size);
- c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry);
+ c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
c->devices_max_used = 0;
atomic_set(&c->attached_dev_nr, 0);
- c->btree_pages = meta_bucket_pages(&c->sb);
+ c->btree_pages = meta_bucket_pages(sb);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
BTREE_MAX_PAGES);
@@ -1927,7 +1900,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
sizeof(struct bbio) +
- sizeof(struct bio_vec) * meta_bucket_pages(&c->sb)))
+ sizeof(struct bio_vec) * meta_bucket_pages(sb)))
goto err;
if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
@@ -1937,7 +1910,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
goto err;
- c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb);
+ c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
if (!c->uuids)
goto err;
@@ -1973,19 +1946,17 @@ static int run_cache_set(struct cache_set *c)
{
const char *err = "cannot allocate memory";
struct cached_dev *dc, *t;
- struct cache *ca;
+ struct cache *ca = c->cache;
struct closure cl;
- unsigned int i;
LIST_HEAD(journal);
struct journal_replay *l;
closure_init_stack(&cl);
- for_each_cache(ca, c, i)
- c->nbuckets += ca->sb.nbuckets;
+ c->nbuckets = ca->sb.nbuckets;
set_gc_sectors(c);
- if (CACHE_SYNC(&c->sb)) {
+ if (CACHE_SYNC(&c->cache->sb)) {
struct bkey *k;
struct jset *j;
@@ -2002,10 +1973,8 @@ static int run_cache_set(struct cache_set *c)
j = &list_entry(journal.prev, struct journal_replay, list)->j;
err = "IO error reading priorities";
- for_each_cache(ca, c, i) {
- if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
- goto err;
- }
+ if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
+ goto err;
/*
* If prio_read() fails it'll call cache_set_error and we'll
@@ -2049,9 +2018,8 @@ static int run_cache_set(struct cache_set *c)
bch_journal_next(&c->journal);
err = "error starting allocator thread";
- for_each_cache(ca, c, i)
- if (bch_cache_allocator_start(ca))
- goto err;
+ if (bch_cache_allocator_start(ca))
+ goto err;
/*
* First place it's safe to allocate: btree_check() and
@@ -2070,28 +2038,23 @@ static int run_cache_set(struct cache_set *c)
if (bch_journal_replay(c, &journal))
goto err;
} else {
- pr_notice("invalidating existing data\n");
+ unsigned int j;
- for_each_cache(ca, c, i) {
- unsigned int j;
+ pr_notice("invalidating existing data\n");
+ ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
+ 2, SB_JOURNAL_BUCKETS);
- ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
- 2, SB_JOURNAL_BUCKETS);
-
- for (j = 0; j < ca->sb.keys; j++)
- ca->sb.d[j] = ca->sb.first_bucket + j;
- }
+ for (j = 0; j < ca->sb.keys; j++)
+ ca->sb.d[j] = ca->sb.first_bucket + j;
bch_initial_gc_finish(c);
err = "error starting allocator thread";
- for_each_cache(ca, c, i)
- if (bch_cache_allocator_start(ca))
- goto err;
+ if (bch_cache_allocator_start(ca))
+ goto err;
mutex_lock(&c->bucket_lock);
- for_each_cache(ca, c, i)
- bch_prio_write(ca, true);
+ bch_prio_write(ca, true);
mutex_unlock(&c->bucket_lock);
err = "cannot allocate new UUID bucket";
@@ -2116,7 +2079,7 @@ static int run_cache_set(struct cache_set *c)
* everything is set up - fortunately journal entries won't be
* written until the SET_CACHE_SYNC() here:
*/
- SET_CACHE_SYNC(&c->sb, true);
+ SET_CACHE_SYNC(&c->cache->sb, true);
bch_journal_next(&c->journal);
bch_journal_meta(c, &cl);
@@ -2127,7 +2090,7 @@ static int run_cache_set(struct cache_set *c)
goto err;
closure_sync(&cl);
- c->sb.last_mount = (u32)ktime_get_real_seconds();
+ c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c);
list_for_each_entry_safe(dc, t, &uncached_devices, list)
@@ -2151,13 +2114,6 @@ err:
return -EIO;
}
-static bool can_attach_cache(struct cache *ca, struct cache_set *c)
-{
- return ca->sb.block_size == c->sb.block_size &&
- ca->sb.bucket_size == c->sb.bucket_size &&
- ca->sb.nr_in_set == c->sb.nr_in_set;
-}
-
static const char *register_cache_set(struct cache *ca)
{
char buf[12];
@@ -2165,16 +2121,10 @@ static const char *register_cache_set(struct cache *ca)
struct cache_set *c;
list_for_each_entry(c, &bch_cache_sets, list)
- if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
- if (c->cache[ca->sb.nr_this_dev])
+ if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
+ if (c->cache)
return "duplicate cache set member";
- if (!can_attach_cache(ca, c))
- return "cache sb does not match set";
-
- if (!CACHE_SYNC(&ca->sb))
- SET_CACHE_SYNC(&c->sb, false);
-
goto found;
}
@@ -2183,7 +2133,7 @@ static const char *register_cache_set(struct cache *ca)
return err;
err = "error creating kobject";
- if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
+ if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
kobject_add(&c->internal, &c->kobj, "internal"))
goto err;
@@ -2199,31 +2149,13 @@ found:
sysfs_create_link(&c->kobj, &ca->kobj, buf))
goto err;
- /*
- * A special case is both ca->sb.seq and c->sb.seq are 0,
- * such condition happens on a new created cache device whose
- * super block is never flushed yet. In this case c->sb.version
- * and other members should be updated too, otherwise we will
- * have a mistaken super block version in cache set.
- */
- if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
- c->sb.version = ca->sb.version;
- memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
- c->sb.flags = ca->sb.flags;
- c->sb.seq = ca->sb.seq;
- pr_debug("set version = %llu\n", c->sb.version);
- }
-
kobject_get(&ca->kobj);
ca->set = c;
- ca->set->cache[ca->sb.nr_this_dev] = ca;
- c->cache_by_alloc[c->caches_loaded++] = ca;
+ ca->set->cache = ca;
- if (c->caches_loaded == c->sb.nr_in_set) {
- err = "failed to run cache set";
- if (run_cache_set(c) < 0)
- goto err;
- }
+ err = "failed to run cache set";
+ if (run_cache_set(c) < 0)
+ goto err;
return NULL;
err:
@@ -2240,8 +2172,8 @@ void bch_cache_release(struct kobject *kobj)
unsigned int i;
if (ca->set) {
- BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
- ca->set->cache[ca->sb.nr_this_dev] = NULL;
+ BUG_ON(ca->set->cache != ca);
+ ca->set->cache = NULL;
}
free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
@@ -2449,7 +2381,6 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
kobj_attribute_write(register, register_bcache);
kobj_attribute_write(register_quiet, register_bcache);
-kobj_attribute_write(register_async, register_bcache);
kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
static bool bch_is_open_backing(struct block_device *bdev)
@@ -2470,13 +2401,14 @@ static bool bch_is_open_backing(struct block_device *bdev)
static bool bch_is_open_cache(struct block_device *bdev)
{
struct cache_set *c, *tc;
- struct cache *ca;
- unsigned int i;
- list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
- for_each_cache(ca, c, i)
- if (ca->bdev == bdev)
- return true;
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
+ struct cache *ca = c->cache;
+
+ if (ca->bdev == bdev)
+ return true;
+ }
+
return false;
}
@@ -2572,6 +2504,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
struct cache_sb_disk *sb_disk;
struct block_device *bdev;
ssize_t ret;
+ bool async_registration = false;
+
+#ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
+ async_registration = true;
+#endif
ret = -EBUSY;
err = "failed to reference bcache module";
@@ -2625,7 +2562,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
goto out_blkdev_put;
err = "failed to register device";
- if (attr == &ksysfs_register_async) {
+
+ if (async_registration) {
/* register in asynchronous way */
struct async_reg_args *args =
kzalloc(sizeof(struct async_reg_args), GFP_KERNEL);
@@ -2720,7 +2658,7 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
char *pdev_set_uuid = pdev->dc->sb.set_uuid;
- char *set_uuid = c->sb.uuid;
+ char *set_uuid = c->set_uuid;
if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
list_del(&pdev->list);
@@ -2888,9 +2826,6 @@ static int __init bcache_init(void)
static const struct attribute *files[] = {
&ksysfs_register.attr,
&ksysfs_register_quiet.attr,
-#ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
- &ksysfs_register_async.attr,
-#endif
&ksysfs_pendings_cleanup.attr,
NULL
};
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index ac06c0bc3c0a..554e3afc9b68 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -711,10 +711,10 @@ SHOW(__bch_cache_set)
{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
- sysfs_print(synchronous, CACHE_SYNC(&c->sb));
+ sysfs_print(synchronous, CACHE_SYNC(&c->cache->sb));
sysfs_print(journal_delay_ms, c->journal_delay_ms);
- sysfs_hprint(bucket_size, bucket_bytes(c));
- sysfs_hprint(block_size, block_bytes(c));
+ sysfs_hprint(bucket_size, bucket_bytes(c->cache));
+ sysfs_hprint(block_size, block_bytes(c->cache));
sysfs_print(tree_depth, c->root->level);
sysfs_print(root_usage_percent, bch_root_usage(c));
@@ -812,8 +812,8 @@ STORE(__bch_cache_set)
if (attr == &sysfs_synchronous) {
bool sync = strtoul_or_return(buf);
- if (sync != CACHE_SYNC(&c->sb)) {
- SET_CACHE_SYNC(&c->sb, sync);
+ if (sync != CACHE_SYNC(&c->cache->sb)) {
+ SET_CACHE_SYNC(&c->cache->sb, sync);
bcache_write_super(c);
}
}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 4f4ad6b3d43a..3c74996978da 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -35,7 +35,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
* This is the size of the cache, minus the amount used for
* flash-only devices
*/
- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
+ uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
atomic_long_read(&c->flash_dev_dirty_sectors);
/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 96c93802ee4d..9644424591da 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -925,7 +925,7 @@ static enum cache_metadata_mode get_cache_mode(struct cache *cache)
static const char *cache_device_name(struct cache *cache)
{
- return dm_device_name(dm_table_get_md(cache->ti->table));
+ return dm_table_device_name(cache->ti->table);
}
static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index c4ef1fceead6..d522093cb39d 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -11,6 +11,7 @@
#include <linux/kthread.h>
#include <linux/ktime.h>
+#include <linux/genhd.h>
#include <linux/blk-mq.h>
#include <trace/events/block.h>
@@ -25,9 +26,11 @@ struct dm_kobject_holder {
};
/*
- * DM core internal structure that used directly by dm.c and dm-rq.c
- * DM targets must _not_ deference a mapped_device to directly access its members!
+ * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
+ * DM targets must _not_ deference a mapped_device or dm_table to directly
+ * access their members!
*/
+
struct mapped_device {
struct mutex suspend_lock;
@@ -119,6 +122,55 @@ void disable_discard(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);
+static inline sector_t dm_get_size(struct mapped_device *md)
+{
+ return get_capacity(md->disk);
+}
+
+static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
+{
+ return &md->stats;
+}
+
+#define DM_TABLE_MAX_DEPTH 16
+
+struct dm_table {
+ struct mapped_device *md;
+ enum dm_queue_mode type;
+
+ /* btree table */
+ unsigned int depth;
+ unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
+ sector_t *index[DM_TABLE_MAX_DEPTH];
+
+ unsigned int num_targets;
+ unsigned int num_allocated;
+ sector_t *highs;
+ struct dm_target *targets;
+
+ struct target_type *immutable_target_type;
+
+ bool integrity_supported:1;
+ bool singleton:1;
+ unsigned integrity_added:1;
+
+ /*
+ * Indicates the rw permissions for the new logical
+ * device. This should be a combination of FMODE_READ
+ * and FMODE_WRITE.
+ */
+ fmode_t mode;
+
+ /* a list of devices used by this table */
+ struct list_head devices;
+
+ /* events get handed up using this callback */
+ void (*event_fn)(void *);
+ void *event_context;
+
+ struct dm_md_mempools *mempools;
+};
+
static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
{
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 380386c36921..392337f16ecf 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -424,7 +424,8 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
return -EINVAL;
}
- lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
+ lmk->hash_tfm = crypto_alloc_shash("md5", 0,
+ CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(lmk->hash_tfm)) {
ti->error = "Error initializing LMK hash";
return PTR_ERR(lmk->hash_tfm);
@@ -586,7 +587,8 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
return -EINVAL;
}
- tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
+ tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
+ CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(tcw->crc32_tfm)) {
ti->error = "Error initializing CRC32 in TCW";
return PTR_ERR(tcw->crc32_tfm);
@@ -773,7 +775,8 @@ static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
int r;
- elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+ elephant->tfm = crypto_alloc_skcipher("ecb(aes)", 0,
+ CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(elephant->tfm)) {
r = PTR_ERR(elephant->tfm);
elephant->tfm = NULL;
@@ -2154,7 +2157,8 @@ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
return -ENOMEM;
for (i = 0; i < cc->tfms_count; i++) {
- cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
+ cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
+ CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(cc->cipher_tfm.tfms[i])) {
err = PTR_ERR(cc->cipher_tfm.tfms[i]);
crypt_free_tfms(cc);
@@ -2180,7 +2184,8 @@ static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
if (!cc->cipher_tfm.tfms)
return -ENOMEM;
- cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0);
+ cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
+ CRYPTO_ALG_ALLOCATES_MEMORY);
if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
crypt_free_tfms(cc);
@@ -2667,7 +2672,7 @@ static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
return -ENOMEM;
strncpy(mac_alg, start, end - start);
- mac = crypto_alloc_ahash(mac_alg, 0, 0);
+ mac = crypto_alloc_ahash(mac_alg, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
kfree(mac_alg);
if (IS_ERR(mac))
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 28122e850ea1..cd0478d44058 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -2044,7 +2044,7 @@ out:
return r;
}
-
+EXPORT_SYMBOL_GPL(dm_copy_name_and_uuid);
/**
* dm_early_create - create a mapped device in early boot.
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index e1db43446327..00774b5d7668 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -228,10 +228,11 @@ static struct target_type linear_target = {
.name = "linear",
.version = {1, 4, 0},
#ifdef CONFIG_BLK_DEV_ZONED
- .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
+ DM_TARGET_ZONED_HM,
.report_zones = linear_report_zones,
#else
- .features = DM_TARGET_PASSES_INTEGRITY,
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT,
#endif
.module = THIS_MODULE,
.ctr = linear_ctr,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index de4da825ade6..bced42f082b0 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -466,10 +466,8 @@ failed:
*/
#define dm_report_EIO(m) \
do { \
- struct mapped_device *md = dm_table_get_md((m)->ti->table); \
- \
DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
- dm_device_name(md), \
+ dm_table_device_name((m)->ti->table), \
test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
dm_noflush_suspending((m)->ti)); \
@@ -736,7 +734,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
{
unsigned long flags;
bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
- const char *dm_dev_name = dm_device_name(dm_table_get_md(m->ti->table));
+ const char *dm_dev_name = dm_table_device_name(m->ti->table);
DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
@@ -781,9 +779,9 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
static void queue_if_no_path_timeout_work(struct timer_list *t)
{
struct multipath *m = from_timer(m, t, nopath_timer);
- struct mapped_device *md = dm_table_get_md(m->ti->table);
- DMWARN("queue_if_no_path timeout on %s, failing queued IO", dm_device_name(md));
+ DMWARN("queue_if_no_path timeout on %s, failing queued IO",
+ dm_table_device_name(m->ti->table));
queue_if_no_path(m, false, false, __func__);
}
@@ -1334,7 +1332,7 @@ static int fail_path(struct pgpath *pgpath)
goto out;
DMWARN("%s: Failing path %s.",
- dm_device_name(dm_table_get_md(m->ti->table)),
+ dm_table_device_name(m->ti->table),
pgpath->path.dev->name);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
@@ -1375,7 +1373,7 @@ static int reinstate_path(struct pgpath *pgpath)
goto out;
DMWARN("%s: Reinstating path %s.",
- dm_device_name(dm_table_get_md(m->ti->table)),
+ dm_table_device_name(m->ti->table),
pgpath->path.dev->name);
r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
@@ -1766,7 +1764,7 @@ static void multipath_resume(struct dm_target *ti)
}
DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
- dm_device_name(dm_table_get_md(m->ti->table)), __func__,
+ dm_table_device_name(m->ti->table), __func__,
test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8d2b835d7a10..9c1f7c4de65b 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -701,7 +701,7 @@ static void rs_set_capacity(struct raid_set *rs)
struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
set_capacity(gendisk, rs->md.array_sectors);
- revalidate_disk(gendisk);
+ revalidate_disk_size(gendisk, true);
}
/*
@@ -3728,15 +3728,6 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_min(limits, chunk_size_bytes);
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
-
- /*
- * RAID1 and RAID10 personalities require bio splitting,
- * RAID0/4/5/6 don't and process large discard bios properly.
- */
- if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
- limits->discard_granularity = chunk_size_bytes;
- limits->max_discard_sectors = rs->md.chunk_sectors;
- }
}
static void raid_postsuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6d743ff6a314..729a72ec30cc 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -175,7 +175,7 @@ static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long mse
void dm_mq_kick_requeue_list(struct mapped_device *md)
{
- __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
+ __dm_mq_kick_requeue_list(md->queue, 0);
}
EXPORT_SYMBOL(dm_mq_kick_requeue_list);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 63fab7c769be..8e329c3f3a78 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -284,16 +284,9 @@ static void skip_metadata(struct pstore *ps)
*/
static int area_io(struct pstore *ps, int op, int op_flags)
{
- int r;
- chunk_t chunk;
-
- chunk = area_location(ps, ps->current_area);
-
- r = chunk_io(ps, ps->area, chunk, op, op_flags, 0);
- if (r)
- return r;
+ chunk_t chunk = area_location(ps, ps->current_area);
- return 0;
+ return chunk_io(ps, ps->area, chunk, op, op_flags, 0);
}
static void zero_memory_area(struct pstore *ps)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 229f461e7def..ce543b761be7 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -18,54 +18,17 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <linux/lcm.h>
#include <linux/blk-mq.h>
#include <linux/mount.h>
#include <linux/dax.h>
#define DM_MSG_PREFIX "table"
-#define MAX_DEPTH 16
#define NODE_SIZE L1_CACHE_BYTES
#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
-struct dm_table {
- struct mapped_device *md;
- enum dm_queue_mode type;
-
- /* btree table */
- unsigned int depth;
- unsigned int counts[MAX_DEPTH]; /* in nodes */
- sector_t *index[MAX_DEPTH];
-
- unsigned int num_targets;
- unsigned int num_allocated;
- sector_t *highs;
- struct dm_target *targets;
-
- struct target_type *immutable_target_type;
-
- bool integrity_supported:1;
- bool singleton:1;
- unsigned integrity_added:1;
-
- /*
- * Indicates the rw permissions for the new logical
- * device. This should be a combination of FMODE_READ
- * and FMODE_WRITE.
- */
- fmode_t mode;
-
- /* a list of devices used by this table */
- struct list_head devices;
-
- /* events get handed up using this callback */
- void (*event_fn)(void *);
- void *event_context;
-
- struct dm_md_mempools *mempools;
-};
-
/*
* Similar to ceiling(log_size(n))
*/
@@ -841,8 +804,7 @@ EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{
return (table_type == DM_TYPE_BIO_BASED ||
- table_type == DM_TYPE_DAX_BIO_BASED ||
- table_type == DM_TYPE_NVME_BIO_BASED);
+ table_type == DM_TYPE_DAX_BIO_BASED);
}
static bool __table_type_request_based(enum dm_queue_mode table_type)
@@ -898,8 +860,6 @@ bool dm_table_supports_dax(struct dm_table *t,
return true;
}
-static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
-
static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -907,7 +867,7 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
struct request_queue *q = bdev_get_queue(bdev);
/* request-based cannot stack on partitions! */
- if (bdev != bdev->bd_contains)
+ if (bdev_is_partition(bdev))
return false;
return queue_is_mq(q);
@@ -929,7 +889,6 @@ static int dm_table_determine_type(struct dm_table *t)
goto verify_bio_based;
}
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
- BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
goto verify_rq_based;
}
@@ -968,15 +927,6 @@ verify_bio_based:
if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED;
- } else {
- /* Check if upgrading to NVMe bio-based is valid or required */
- tgt = dm_table_get_immutable_target(t);
- if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
- t->type = DM_TYPE_NVME_BIO_BASED;
- goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
- } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
- t->type = DM_TYPE_NVME_BIO_BASED;
- }
}
return 0;
}
@@ -993,8 +943,7 @@ verify_rq_based:
* (e.g. request completion process for partial completion.)
*/
if (t->num_targets > 1) {
- DMERR("%s DM doesn't support multiple targets",
- t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
+ DMERR("request-based DM doesn't support multiple targets");
return -EINVAL;
}
@@ -1506,6 +1455,10 @@ int dm_calculate_queue_limits(struct dm_table *table,
zone_sectors = ti_limits.chunk_sectors;
}
+ /* Stack chunk_sectors if target-specific splitting is required */
+ if (ti->max_io_len)
+ ti_limits.chunk_sectors = lcm_not_zero(ti->max_io_len,
+ ti_limits.chunk_sectors);
/* Set I/O hints portion of queue limits */
if (ti->type->io_hints)
ti->type->io_hints(ti, &ti_limits);
@@ -1684,20 +1637,6 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
return true;
}
-static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- char b[BDEVNAME_SIZE];
-
- /* For now, NVMe devices are the only devices of this class */
- return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
-}
-
-static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
-{
- return dm_table_all_devices_attribute(t, device_no_partial_completion);
-}
-
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1752,6 +1691,33 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
return true;
}
+static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && !blk_queue_nowait(q);
+}
+
+static bool dm_table_supports_nowait(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!dm_target_supports_nowait(ti->type))
+ return false;
+
+ if (!ti->type->iterate_devices ||
+ ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
+ return false;
+ }
+
+ return true;
+}
+
static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1819,7 +1785,7 @@ static int device_requires_stable_pages(struct dm_target *ti,
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && bdi_cap_stable_pages_required(q->backing_dev_info);
+ return q && blk_queue_stable_writes(q);
}
/*
@@ -1854,6 +1820,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
q->limits = *limits;
+ if (dm_table_supports_nowait(t))
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
+
if (!dm_table_supports_discards(t)) {
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
/* Must also clear discard limits... */
@@ -1904,9 +1875,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* because they do their own checksumming.
*/
if (dm_table_requires_stable_pages(t))
- q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
else
- q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
/*
* Determine whether or not this queue's I/O timings contribute
@@ -1929,8 +1900,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
#endif
- /* Allow reads to exceed readahead limits */
- q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
+ blk_queue_update_readahead(q);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
@@ -2049,16 +2019,11 @@ EXPORT_SYMBOL_GPL(dm_table_device_name);
void dm_table_run_md_queue_async(struct dm_table *t)
{
- struct mapped_device *md;
- struct request_queue *queue;
-
if (!dm_table_request_based(t))
return;
- md = dm_table_get_md(t);
- queue = dm_get_md_queue(md);
- if (queue)
- blk_mq_run_hw_queues(queue, true);
+ if (t->md->queue)
+ blk_mq_run_hw_queues(t->md->queue, true);
}
EXPORT_SYMBOL(dm_table_run_md_queue_async);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index b461836b6d26..6ebb2127f3e2 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1051,12 +1051,11 @@ static int __create_thin(struct dm_pool_metadata *pmd,
int r;
dm_block_t dev_root;
uint64_t key = dev;
- struct disk_device_details details_le;
struct dm_thin_device *td;
__le64 value;
r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
- &key, &details_le);
+ &key, NULL);
if (!r)
return -EEXIST;
@@ -1129,12 +1128,11 @@ static int __create_snap(struct dm_pool_metadata *pmd,
dm_block_t origin_root;
uint64_t key = origin, dev_key = dev;
struct dm_thin_device *td;
- struct disk_device_details details_le;
__le64 value;
/* check this device is unused */
r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
- &dev_key, &details_le);
+ &dev_key, NULL);
if (!r)
return -EEXIST;
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 6271d1e741cf..9ae4ce7df95c 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -49,7 +49,7 @@ do { \
#define pmem_assign(dest, src) ((dest) = (src))
#endif
-#if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
+#if IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && defined(DM_WRITECACHE_HAS_PMEM)
#define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
#endif
@@ -992,7 +992,8 @@ static void writecache_resume(struct dm_target *ti)
}
wc->freelist_size = 0;
- r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
+ r = copy_mc_to_kernel(&sb_seq_count, &sb(wc)->seq_count,
+ sizeof(uint64_t));
if (r) {
writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
sb_seq_count = cpu_to_le64(0);
@@ -1008,7 +1009,8 @@ static void writecache_resume(struct dm_target *ti)
e->seq_count = -1;
continue;
}
- r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
+ r = copy_mc_to_kernel(&wme, memory_entry(wc, e),
+ sizeof(struct wc_memory_entry));
if (r) {
writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
(unsigned long)b, r);
@@ -1206,7 +1208,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
if (rw == READ) {
int r;
- r = memcpy_mcsafe(buf, data, size);
+ r = copy_mc_to_kernel(buf, data, size);
flush_dcache_page(bio_page(bio));
if (unlikely(r)) {
writecache_error(wc, r, "hardware memory error when reading data: %d", r);
@@ -2349,7 +2351,7 @@ invalid_optional:
}
}
- r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
+ r = copy_mc_to_kernel(&s, sb(wc), sizeof(struct wc_memory_superblock));
if (r) {
ti->error = "Hardware memory error when reading superblock";
goto bad;
@@ -2360,7 +2362,8 @@ invalid_optional:
ti->error = "Unable to initialize device";
goto bad;
}
- r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
+ r = copy_mc_to_kernel(&s, sb(wc),
+ sizeof(struct wc_memory_superblock));
if (r) {
ti->error = "Hardware memory error when reading superblock";
goto bad;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6ed05ca65a0f..c18fc2548518 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -422,21 +422,6 @@ static void do_deferred_remove(struct work_struct *w)
dm_deferred_remove();
}
-sector_t dm_get_size(struct mapped_device *md)
-{
- return get_capacity(md->disk);
-}
-
-struct request_queue *dm_get_md_queue(struct mapped_device *md)
-{
- return md->queue;
-}
-
-struct dm_stats *dm_get_stats(struct mapped_device *md)
-{
- return &md->stats;
-}
-
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct mapped_device *md = bdev->bd_disk->private_data;
@@ -591,7 +576,44 @@ out:
return r;
}
-static void start_io_acct(struct dm_io *io);
+u64 dm_start_time_ns_from_clone(struct bio *bio)
+{
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ struct dm_io *io = tio->io;
+
+ return jiffies_to_nsecs(io->start_time);
+}
+EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
+
+static void start_io_acct(struct dm_io *io)
+{
+ struct mapped_device *md = io->md;
+ struct bio *bio = io->orig_bio;
+
+ io->start_time = bio_start_io_acct(bio);
+ if (unlikely(dm_stats_used(&md->stats)))
+ dm_stats_account_io(&md->stats, bio_data_dir(bio),
+ bio->bi_iter.bi_sector, bio_sectors(bio),
+ false, 0, &io->stats_aux);
+}
+
+static void end_io_acct(struct dm_io *io)
+{
+ struct mapped_device *md = io->md;
+ struct bio *bio = io->orig_bio;
+ unsigned long duration = jiffies - io->start_time;
+
+ bio_end_io_acct(bio, io->start_time);
+
+ if (unlikely(dm_stats_used(&md->stats)))
+ dm_stats_account_io(&md->stats, bio_data_dir(bio),
+ bio->bi_iter.bi_sector, bio_sectors(bio),
+ true, duration, &io->stats_aux);
+
+ /* nudge anyone waiting on suspend queue */
+ if (unlikely(wq_has_sleeper(&md->wait)))
+ wake_up(&md->wait);
+}
static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
{
@@ -657,45 +679,6 @@ static void free_tio(struct dm_target_io *tio)
bio_put(&tio->clone);
}
-u64 dm_start_time_ns_from_clone(struct bio *bio)
-{
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct dm_io *io = tio->io;
-
- return jiffies_to_nsecs(io->start_time);
-}
-EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
-
-static void start_io_acct(struct dm_io *io)
-{
- struct mapped_device *md = io->md;
- struct bio *bio = io->orig_bio;
-
- io->start_time = bio_start_io_acct(bio);
- if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio_data_dir(bio),
- bio->bi_iter.bi_sector, bio_sectors(bio),
- false, 0, &io->stats_aux);
-}
-
-static void end_io_acct(struct dm_io *io)
-{
- struct mapped_device *md = io->md;
- struct bio *bio = io->orig_bio;
- unsigned long duration = jiffies - io->start_time;
-
- bio_end_io_acct(bio, io->start_time);
-
- if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio_data_dir(bio),
- bio->bi_iter.bi_sector, bio_sectors(bio),
- true, duration, &io->stats_aux);
-
- /* nudge anyone waiting on suspend queue */
- if (unlikely(wq_has_sleeper(&md->wait)))
- wake_up(&md->wait);
-}
-
/*
* Add the bio to the list of deferred io.
*/
@@ -992,7 +975,7 @@ static void clone_endio(struct bio *bio)
dm_endio_fn endio = tio->ti->type->end_io;
struct bio *orig_bio = io->orig_bio;
- if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
+ if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_DISCARD &&
!bio->bi_disk->queue->limits.max_discard_sectors)
disable_discard(md);
@@ -1041,32 +1024,28 @@ static void clone_endio(struct bio *bio)
* Return maximum size of I/O possible at the supplied sector up to the current
* target boundary.
*/
-static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
+static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
+ sector_t target_offset)
{
- sector_t target_offset = dm_target_offset(ti, sector);
-
return ti->len - target_offset;
}
-static sector_t max_io_len(sector_t sector, struct dm_target *ti)
+static sector_t max_io_len(struct dm_target *ti, sector_t sector)
{
- sector_t len = max_io_len_target_boundary(sector, ti);
- sector_t offset, max_len;
+ sector_t target_offset = dm_target_offset(ti, sector);
+ sector_t len = max_io_len_target_boundary(ti, target_offset);
+ sector_t max_len;
/*
* Does the target need to split even further?
+ * - q->limits.chunk_sectors reflects ti->max_io_len so
+ * blk_max_size_offset() provides required splitting.
+ * - blk_max_size_offset() also respects q->limits.max_sectors
*/
- if (ti->max_io_len) {
- offset = dm_target_offset(ti, sector);
- if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
- max_len = sector_div(offset, ti->max_io_len);
- else
- max_len = offset & (ti->max_io_len - 1);
- max_len = ti->max_io_len - max_len;
-
- if (len > max_len)
- len = max_len;
- }
+ max_len = blk_max_size_offset(ti->table->md->queue,
+ target_offset);
+ if (len > max_len)
+ len = max_len;
return len;
}
@@ -1119,7 +1098,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
goto out;
if (!ti->type->direct_access)
goto out;
- len = max_io_len(sector, ti) / PAGE_SECTORS;
+ len = max_io_len(ti, sector) / PAGE_SECTORS;
if (len < 1)
goto out;
nr_pages = min(len, nr_pages);
@@ -1327,14 +1306,15 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
sector_t sector, unsigned len)
{
struct bio *clone = &tio->clone;
+ int r;
__bio_clone_fast(clone, bio);
- bio_crypt_clone(clone, bio, GFP_NOIO);
+ r = bio_crypt_clone(clone, bio, GFP_NOIO);
+ if (r < 0)
+ return r;
if (bio_integrity(bio)) {
- int r;
-
if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
!dm_target_passes_integrity(tio->ti->type))) {
DMWARN("%s: the target %s doesn't support integrity data.",
@@ -1430,6 +1410,17 @@ static int __send_empty_flush(struct clone_info *ci)
{
unsigned target_nr = 0;
struct dm_target *ti;
+ struct bio flush_bio;
+
+ /*
+ * Use an on-stack bio for this, it's safe since we don't
+ * need to reference it after submit. It's just used as
+ * the basis for the clone(s).
+ */
+ bio_init(&flush_bio, NULL, 0);
+ flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+ ci->bio = &flush_bio;
+ ci->sector_count = 0;
/*
* Empty flush uses a statically initialized bio, as the base for
@@ -1443,6 +1434,8 @@ static int __send_empty_flush(struct clone_info *ci)
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
+
+ bio_uninit(ci->bio);
return 0;
}
@@ -1465,28 +1458,6 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
return 0;
}
-typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
-
-static unsigned get_num_discard_bios(struct dm_target *ti)
-{
- return ti->num_discard_bios;
-}
-
-static unsigned get_num_secure_erase_bios(struct dm_target *ti)
-{
- return ti->num_secure_erase_bios;
-}
-
-static unsigned get_num_write_same_bios(struct dm_target *ti)
-{
- return ti->num_write_same_bios;
-}
-
-static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
-{
- return ti->num_write_zeroes_bios;
-}
-
static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios)
{
@@ -1501,7 +1472,8 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *
if (!num_bios)
return -EOPNOTSUPP;
- len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+ len = min_t(sector_t, ci->sector_count,
+ max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
__send_duplicate_bios(ci, ti, num_bios, &len);
@@ -1511,26 +1483,6 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *
return 0;
}
-static int __send_discard(struct clone_info *ci, struct dm_target *ti)
-{
- return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
-}
-
-static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
-{
- return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
-}
-
-static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
-{
- return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
-}
-
-static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
-{
- return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
-}
-
static bool is_abnormal_io(struct bio *bio)
{
bool r = false;
@@ -1551,18 +1503,26 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
int *result)
{
struct bio *bio = ci->bio;
+ unsigned num_bios = 0;
- if (bio_op(bio) == REQ_OP_DISCARD)
- *result = __send_discard(ci, ti);
- else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
- *result = __send_secure_erase(ci, ti);
- else if (bio_op(bio) == REQ_OP_WRITE_SAME)
- *result = __send_write_same(ci, ti);
- else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
- *result = __send_write_zeroes(ci, ti);
- else
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ num_bios = ti->num_discard_bios;
+ break;
+ case REQ_OP_SECURE_ERASE:
+ num_bios = ti->num_secure_erase_bios;
+ break;
+ case REQ_OP_WRITE_SAME:
+ num_bios = ti->num_write_same_bios;
+ break;
+ case REQ_OP_WRITE_ZEROES:
+ num_bios = ti->num_write_zeroes_bios;
+ break;
+ default:
return false;
+ }
+ *result = __send_changing_extent_only(ci, ti, num_bios);
return true;
}
@@ -1582,7 +1542,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (__process_abnormal_io(ci, ti, &r))
return r;
- len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
+ len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
if (r < 0)
@@ -1618,19 +1578,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
- struct bio flush_bio;
-
- /*
- * Use an on-stack bio for this, it's safe since we don't
- * need to reference it after submit. It's just used as
- * the basis for the clone(s).
- */
- bio_init(&flush_bio, NULL, 0);
- flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
- ci.bio = &flush_bio;
- ci.sector_count = 0;
error = __send_empty_flush(&ci);
- bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else if (op_is_zone_mgmt(bio_op(bio))) {
ci.bio = bio;
@@ -1679,88 +1627,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
return ret;
}
-/*
- * Optimized variant of __split_and_process_bio that leverages the
- * fact that targets that use it do _not_ have a need to split bios.
- */
-static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
- struct bio *bio, struct dm_target *ti)
-{
- struct clone_info ci;
- blk_qc_t ret = BLK_QC_T_NONE;
- int error = 0;
-
- init_clone_info(&ci, md, map, bio);
-
- if (bio->bi_opf & REQ_PREFLUSH) {
- struct bio flush_bio;
-
- /*
- * Use an on-stack bio for this, it's safe since we don't
- * need to reference it after submit. It's just used as
- * the basis for the clone(s).
- */
- bio_init(&flush_bio, NULL, 0);
- flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
- ci.bio = &flush_bio;
- ci.sector_count = 0;
- error = __send_empty_flush(&ci);
- bio_uninit(ci.bio);
- /* dec_pending submits any data associated with flush */
- } else {
- struct dm_target_io *tio;
-
- ci.bio = bio;
- ci.sector_count = bio_sectors(bio);
- if (__process_abnormal_io(&ci, ti, &error))
- goto out;
-
- tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
- ret = __clone_and_map_simple_bio(&ci, tio, NULL);
- }
-out:
- /* drop the extra reference count */
- dec_pending(ci.io, errno_to_blk_status(error));
- return ret;
-}
-
-static blk_qc_t dm_process_bio(struct mapped_device *md,
- struct dm_table *map, struct bio *bio)
-{
- blk_qc_t ret = BLK_QC_T_NONE;
- struct dm_target *ti = md->immutable_target;
-
- if (unlikely(!map)) {
- bio_io_error(bio);
- return ret;
- }
-
- if (!ti) {
- ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
- if (unlikely(!ti)) {
- bio_io_error(bio);
- return ret;
- }
- }
-
- /*
- * If in ->submit_bio we need to use blk_queue_split(), otherwise
- * queue_limits for abnormal requests (e.g. discard, writesame, etc)
- * won't be imposed.
- * If called from dm_wq_work() for deferred bio processing, bio
- * was already handled by following code with previous ->submit_bio.
- */
- if (current->bio_list) {
- if (is_abnormal_io(bio))
- blk_queue_split(&bio);
- /* regular IO is split by __split_and_process_bio */
- }
-
- if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
- return __process_bio(md, map, bio, ti);
- return __split_and_process_bio(md, map, bio);
-}
-
static blk_qc_t dm_submit_bio(struct bio *bio)
{
struct mapped_device *md = bio->bi_disk->private_data;
@@ -1768,33 +1634,34 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
int srcu_idx;
struct dm_table *map;
- if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
- /*
- * We are called with a live reference on q_usage_counter, but
- * that one will be released as soon as we return. Grab an
- * extra one as blk_mq_submit_bio expects to be able to consume
- * a reference (which lives until the request is freed in case a
- * request is allocated).
- */
- percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
- return blk_mq_submit_bio(bio);
- }
-
map = dm_get_live_table(md, &srcu_idx);
+ if (unlikely(!map)) {
+ DMERR_LIMIT("%s: mapping table unavailable, erroring io",
+ dm_device_name(md));
+ bio_io_error(bio);
+ goto out;
+ }
- /* if we're suspended, we have to queue this io for later */
+ /* If suspended, queue this IO for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
- dm_put_live_table(md, srcu_idx);
-
- if (!(bio->bi_opf & REQ_RAHEAD))
- queue_io(md, bio);
- else
+ if (bio->bi_opf & REQ_NOWAIT)
+ bio_wouldblock_error(bio);
+ else if (bio->bi_opf & REQ_RAHEAD)
bio_io_error(bio);
- return ret;
+ else
+ queue_io(md, bio);
+ goto out;
}
- ret = dm_process_bio(md, map, bio);
+ /*
+ * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
+ * otherwise associated queue_limits won't be imposed.
+ */
+ if (is_abnormal_io(bio))
+ blk_queue_split(&bio);
+ ret = __split_and_process_bio(md, map, bio);
+out:
dm_put_live_table(md, srcu_idx);
return ret;
}
@@ -1849,6 +1716,7 @@ static int next_free_minor(int *minor)
}
static const struct block_device_operations dm_blk_dops;
+static const struct block_device_operations dm_rq_blk_dops;
static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
@@ -2082,18 +1950,6 @@ static void event_callback(void *context)
}
/*
- * Protected by md->suspend_lock obtained by dm_swap_table().
- */
-static void __set_size(struct mapped_device *md, sector_t size)
-{
- lockdep_assert_held(&md->suspend_lock);
-
- set_capacity(md->disk, size);
-
- i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
-}
-
-/*
* Returns old map, which caller must destroy.
*/
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
@@ -2115,7 +1971,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (size != dm_get_size(md))
memset(&md->geometry, 0, sizeof(md->geometry));
- __set_size(md, size);
+ set_capacity(md->disk, size);
+ bd_set_nr_sectors(md->bdev, size);
dm_table_event_callback(t, event_callback, md);
@@ -2129,12 +1986,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (request_based)
dm_stop_queue(q);
- if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
+ if (request_based) {
/*
- * Leverage the fact that request-based DM targets and
- * NVMe bio based targets are immutable singletons
- * - used to optimize both dm_request_fn and dm_mq_queue_rq;
- * and __process_bio.
+ * Leverage the fact that request-based DM targets are
+ * immutable singletons - used to optimize dm_mq_queue_rq.
*/
md->immutable_target = dm_table_get_immutable_target(t);
}
@@ -2248,15 +2103,15 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) {
case DM_TYPE_REQUEST_BASED:
+ md->disk->fops = &dm_rq_blk_dops;
r = dm_mq_init_request_queue(md, t);
if (r) {
- DMERR("Cannot initialize queue for request-based dm-mq mapped device");
+ DMERR("Cannot initialize queue for request-based dm mapped device");
return r;
}
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- case DM_TYPE_NVME_BIO_BASED:
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
@@ -2461,29 +2316,19 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
*/
static void dm_wq_work(struct work_struct *work)
{
- struct mapped_device *md = container_of(work, struct mapped_device,
- work);
- struct bio *c;
- int srcu_idx;
- struct dm_table *map;
-
- map = dm_get_live_table(md, &srcu_idx);
+ struct mapped_device *md = container_of(work, struct mapped_device, work);
+ struct bio *bio;
while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
spin_lock_irq(&md->deferred_lock);
- c = bio_list_pop(&md->deferred);
+ bio = bio_list_pop(&md->deferred);
spin_unlock_irq(&md->deferred_lock);
- if (!c)
+ if (!bio)
break;
- if (dm_request_based(md))
- (void) submit_bio_noacct(c);
- else
- (void) dm_process_bio(md, map, c);
+ submit_bio_noacct(bio);
}
-
- dm_put_live_table(md, srcu_idx);
}
static void dm_queue_flush(struct mapped_device *md)
@@ -2620,13 +2465,12 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
/*
* Here we must make sure that no processes are submitting requests
* to target drivers i.e. no one may be executing
- * __split_and_process_bio. This is called from dm_request and
- * dm_wq_work.
+ * __split_and_process_bio from dm_submit_bio.
*
- * To get all processes out of __split_and_process_bio in dm_request,
+ * To get all processes out of __split_and_process_bio in dm_submit_bio,
* we take the write lock. To prevent any process from reentering
- * __split_and_process_bio from dm_request and quiesce the thread
- * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
+ * __split_and_process_bio from dm_submit_bio and quiesce the thread
+ * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
* flush_workqueue(md->wq).
*/
set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
@@ -2994,19 +2838,19 @@ int dm_test_deferred_remove_flag(struct mapped_device *md)
int dm_suspended(struct dm_target *ti)
{
- return dm_suspended_md(dm_table_get_md(ti->table));
+ return dm_suspended_md(ti->table->md);
}
EXPORT_SYMBOL_GPL(dm_suspended);
int dm_post_suspending(struct dm_target *ti)
{
- return dm_post_suspending_md(dm_table_get_md(ti->table));
+ return dm_post_suspending_md(ti->table->md);
}
EXPORT_SYMBOL_GPL(dm_post_suspending);
int dm_noflush_suspending(struct dm_target *ti)
{
- return __noflush_suspending(dm_table_get_md(ti->table));
+ return __noflush_suspending(ti->table->md);
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
@@ -3025,7 +2869,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
switch (type) {
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- case DM_TYPE_NVME_BIO_BASED:
pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
@@ -3243,6 +3086,15 @@ static const struct block_device_operations dm_blk_dops = {
.owner = THIS_MODULE
};
+static const struct block_device_operations dm_rq_blk_dops = {
+ .open = dm_blk_open,
+ .release = dm_blk_close,
+ .ioctl = dm_blk_ioctl,
+ .getgeo = dm_blk_getgeo,
+ .pr_ops = &dm_pr_ops,
+ .owner = THIS_MODULE
+};
+
static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access,
.dax_supported = dm_dax_supported,
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 4f5fe664d05a..fffe1e289c53 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -179,12 +179,9 @@ int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
int dm_cancel_deferred_remove(struct mapped_device *md);
int dm_request_based(struct mapped_device *md);
-sector_t dm_get_size(struct mapped_device *md);
-struct request_queue *dm_get_md_queue(struct mapped_device *md);
int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
struct dm_dev **result);
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
-struct dm_stats *dm_get_stats(struct mapped_device *md);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index b10c51988c8e..200c5d0f08bf 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -357,11 +357,12 @@ static int read_page(struct file *file, unsigned long index,
struct inode *inode = file_inode(file);
struct buffer_head *bh;
sector_t block, blk_cur;
+ unsigned long blocksize = i_blocksize(inode);
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT);
- bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false);
+ bh = alloc_page_buffers(page, blocksize, false);
if (!bh) {
ret = -ENOMEM;
goto out;
@@ -383,10 +384,10 @@ static int read_page(struct file *file, unsigned long index,
bh->b_blocknr = block;
bh->b_bdev = inode->i_sb->s_bdev;
- if (count < (1<<inode->i_blkbits))
+ if (count < blocksize)
count = 0;
else
- count -= (1<<inode->i_blkbits);
+ count -= blocksize;
bh->b_end_io = end_bitmap_write;
bh->b_private = bitmap;
@@ -605,8 +606,8 @@ re_read:
if (bitmap->cluster_slot >= 0) {
sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
- sector_div(bm_blocks,
- bitmap->mddev->bitmap_info.chunksize >> 9);
+ bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks,
+ (bitmap->mddev->bitmap_info.chunksize >> 9));
/* bits to bytes */
bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
/* to 4k blocks */
@@ -1367,7 +1368,7 @@ __acquires(bitmap->lock)
if (bitmap->bp[page].hijacked ||
bitmap->bp[page].map == NULL)
csize = ((sector_t)1) << (bitmap->chunkshift +
- PAGE_COUNTER_SHIFT - 1);
+ PAGE_COUNTER_SHIFT);
else
csize = ((sector_t)1) << bitmap->chunkshift;
*blocks = csize - (offset & (csize - 1));
@@ -1949,6 +1950,7 @@ out:
}
EXPORT_SYMBOL_GPL(md_bitmap_load);
+/* caller need to free returned bitmap with md_bitmap_free() */
struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
{
int rv = 0;
@@ -2012,6 +2014,7 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
md_bitmap_unplug(mddev->bitmap);
*low = lo;
*high = hi;
+ md_bitmap_free(bitmap);
return rv;
}
@@ -2615,4 +2618,3 @@ struct attribute_group md_bitmap_group = {
.name = "bitmap",
.attrs = md_bitmap_attrs,
};
-
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index d50737ec4039..4aaf4820b6f6 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -582,7 +582,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
break;
case CHANGE_CAPACITY:
set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
break;
case RESYNCING:
set_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
@@ -1166,6 +1166,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
* can't resize bitmap
*/
goto out;
+ md_bitmap_free(bitmap);
}
return 0;
@@ -1296,12 +1297,12 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
pr_err("%s:%d: failed to send CHANGE_CAPACITY msg\n",
__func__, __LINE__);
set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
} else {
/* revert to previous sectors */
ret = mddev->pers->resize(mddev, old_dev_sectors);
if (!ret)
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
ret = __sendmsg(cinfo, &cmsg);
if (ret)
pr_err("%s:%d: failed to send METADATA_UPDATED msg\n",
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index c2ae9125c4c3..5ab22069b5be 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -202,7 +202,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
mddev_resume(mddev);
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
kfree_rcu(oldconf, rcu);
return 0;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 607278207023..98bac4f304ae 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -464,6 +464,7 @@ struct md_io {
bio_end_io_t *orig_bi_end_io;
void *orig_bi_private;
unsigned long start_time;
+ struct hd_struct *part;
};
static void md_end_io(struct bio *bio)
@@ -471,7 +472,7 @@ static void md_end_io(struct bio *bio)
struct md_io *md_io = bio->bi_private;
struct mddev *mddev = md_io->mddev;
- disk_end_io_acct(mddev->gendisk, bio_op(bio), md_io->start_time);
+ part_end_io_acct(md_io->part, bio, md_io->start_time);
bio->bi_end_io = md_io->orig_bi_end_io;
bio->bi_private = md_io->orig_bi_private;
@@ -517,9 +518,8 @@ static blk_qc_t md_submit_bio(struct bio *bio)
bio->bi_end_io = md_end_io;
bio->bi_private = md_io;
- md_io->start_time = disk_start_io_acct(mddev->gendisk,
- bio_sectors(bio),
- bio_op(bio));
+ md_io->start_time = part_start_io_acct(mddev->gendisk,
+ &md_io->part, bio);
}
/* bio could be mergeable after passing to underlayer */
@@ -2322,8 +2322,7 @@ static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
test_bit(Journal, &rdev2->flags) ||
rdev2->raid_disk == -1)
continue;
- if (rdev->bdev->bd_contains ==
- rdev2->bdev->bd_contains) {
+ if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
rcu_read_unlock();
return 1;
}
@@ -5358,7 +5357,7 @@ array_size_store(struct mddev *mddev, const char *buf, size_t len)
mddev->array_sectors = sectors;
if (mddev->pers) {
set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
}
}
mddev_unlock(mddev);
@@ -5944,8 +5943,8 @@ int md_run(struct mddev *mddev)
rdev_for_each(rdev, mddev)
rdev_for_each(rdev2, mddev) {
if (rdev < rdev2 &&
- rdev->bdev->bd_contains ==
- rdev2->bdev->bd_contains) {
+ rdev->bdev->bd_disk ==
+ rdev2->bdev->bd_disk) {
pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
mdname(mddev),
bdevname(rdev->bdev,b),
@@ -6109,7 +6108,7 @@ int do_md_run(struct mddev *mddev)
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
clear_bit(MD_NOT_READY, &mddev->flags);
mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
@@ -6427,7 +6426,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
mddev->changed = 1;
- revalidate_disk(disk);
+ revalidate_disk_size(disk, true);
if (mddev->ro)
mddev->ro = 0;
@@ -7259,7 +7258,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
md_cluster_ops->update_size(mddev, old_dev_sectors);
else if (mddev->queue) {
set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
}
}
return rv;
@@ -7848,7 +7847,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_change(bdev);
+ bdev_check_media_change(bdev);
out:
if (err)
mddev_put(mddev);
@@ -8445,7 +8444,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
idle = 1;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
- struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
+ struct gendisk *disk = rdev->bdev->bd_disk;
curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
@@ -8583,6 +8582,26 @@ void md_write_end(struct mddev *mddev)
EXPORT_SYMBOL(md_write_end);
+/* This is used by raid0 and raid10 */
+void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
+ struct bio *bio, sector_t start, sector_t size)
+{
+ struct bio *discard_bio = NULL;
+
+ if (__blkdev_issue_discard(rdev->bdev, start, size,
+ GFP_NOIO, 0, &discard_bio) || !discard_bio)
+ return;
+
+ bio_chain(discard_bio, bio);
+ bio_clone_blkg_association(discard_bio, bio);
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(rdev->bdev),
+ discard_bio, disk_devt(mddev->gendisk),
+ bio->bi_iter.bi_sector);
+ submit_bio_noacct(discard_bio);
+}
+EXPORT_SYMBOL(md_submit_discard_bio);
+
/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before
@@ -9018,7 +9037,7 @@ void md_do_sync(struct md_thread *thread)
mddev_unlock(mddev);
if (!mddev_is_clustered(mddev)) {
set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ revalidate_disk_size(mddev->gendisk, true);
}
}
@@ -9545,7 +9564,7 @@ static int __init md_init(void)
goto err_misc_wq;
md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
- if (!md_misc_wq)
+ if (!md_rdev_misc_wq)
goto err_rdev_misc_wq;
if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d9c4e6b7e939..ccfb69868c2e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -397,7 +397,7 @@ struct mddev {
* These locks are separate due to conflicting interactions
* with bdev->bd_mutex.
* Lock ordering is:
- * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
+ * reconfig_mutex -> bd_mutex
* bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
*/
struct mutex open_mutex;
@@ -551,7 +551,7 @@ extern void mddev_unlock(struct mddev *mddev);
static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
{
- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
+ atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
}
static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
@@ -713,6 +713,8 @@ extern void md_write_end(struct mddev *mddev);
extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);
+extern void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
+ struct bio *bio, sector_t start, sector_t size);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 8aae0624a297..ef6e78d45d5b 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -366,7 +366,8 @@ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
} while (!(flags & LEAF_NODE));
*result_key = le64_to_cpu(ro_node(s)->keys[i]);
- memcpy(v, value_ptr(ro_node(s), i), value_size);
+ if (v)
+ memcpy(v, value_ptr(ro_node(s), i), value_size);
return 0;
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f54a449f97aa..6f44177593a5 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -410,22 +410,6 @@ static int raid0_run(struct mddev *mddev)
mdname(mddev),
(unsigned long long)mddev->array_sectors);
- if (mddev->queue) {
- /* calculate the max read-ahead size.
- * For read-ahead of large files to be effective, we need to
- * readahead at least twice a whole stripe. i.e. number of devices
- * multiplied by chunk size times 2.
- * If an individual device has an ra_pages greater than the
- * chunk size, then we will not drive that device as hard as it
- * wants. We consider this a configuration error: a larger
- * chunksize should be used in that case.
- */
- int stripe = mddev->raid_disks *
- (mddev->chunk_sectors << 9) / PAGE_SIZE;
- if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
- mddev->queue->backing_dev_info->ra_pages = 2* stripe;
- }
-
dump_zones(mddev);
ret = md_integrity_register(mddev);
@@ -442,23 +426,6 @@ static void raid0_free(struct mddev *mddev, void *priv)
kfree(conf);
}
-/*
- * Is io distribute over 1 or more chunks ?
-*/
-static inline int is_io_in_chunk_boundary(struct mddev *mddev,
- unsigned int chunk_sects, struct bio *bio)
-{
- if (likely(is_power_of_2(chunk_sects))) {
- return chunk_sects >=
- ((bio->bi_iter.bi_sector & (chunk_sects-1))
- + bio_sectors(bio));
- } else{
- sector_t sector = bio->bi_iter.bi_sector;
- return chunk_sects >= (sector_div(sector, chunk_sects)
- + bio_sectors(bio));
- }
-}
-
static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
{
struct r0conf *conf = mddev->private;
@@ -510,7 +477,6 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
for (disk = 0; disk < zone->nb_dev; disk++) {
sector_t dev_start, dev_end;
- struct bio *discard_bio = NULL;
struct md_rdev *rdev;
if (disk < start_disk_index)
@@ -533,18 +499,9 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
rdev = conf->devlist[(zone - conf->strip_zone) *
conf->strip_zone[0].nb_dev + disk];
- if (__blkdev_issue_discard(rdev->bdev,
+ md_submit_discard_bio(mddev, rdev, bio,
dev_start + zone->dev_start + rdev->data_offset,
- dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
- !discard_bio)
- continue;
- bio_chain(discard_bio, bio);
- bio_clone_blkg_association(discard_bio, bio);
- if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(rdev->bdev),
- discard_bio, disk_devt(mddev->gendisk),
- bio->bi_iter.bi_sector);
- submit_bio_noacct(discard_bio);
+ dev_end - dev_start);
}
bio_endio(bio);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e8fa32733917..b7bca6703df8 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -91,7 +91,7 @@ static inline struct r10bio *get_resync_r10bio(struct bio *bio)
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct r10conf *conf = data;
- int size = offsetof(struct r10bio, devs[conf->copies]);
+ int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
/* allocate a r10bio with room for raid_disks entries in the
* bios array */
@@ -238,7 +238,7 @@ static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
{
int i;
- for (i = 0; i < conf->copies; i++) {
+ for (i = 0; i < conf->geo.raid_disks; i++) {
struct bio **bio = & r10_bio->devs[i].bio;
if (!BIO_SPECIAL(*bio))
bio_put(*bio);
@@ -327,7 +327,7 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
int slot;
int repl = 0;
- for (slot = 0; slot < conf->copies; slot++) {
+ for (slot = 0; slot < conf->geo.raid_disks; slot++) {
if (r10_bio->devs[slot].bio == bio)
break;
if (r10_bio->devs[slot].repl_bio == bio) {
@@ -336,7 +336,6 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
}
}
- BUG_ON(slot == conf->copies);
update_head_pos(slot, r10_bio);
if (slotp)
@@ -1276,12 +1275,75 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
}
}
+static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
+{
+ int i;
+ struct r10conf *conf = mddev->private;
+ struct md_rdev *blocked_rdev;
+
+retry_wait:
+ blocked_rdev = NULL;
+ rcu_read_lock();
+ for (i = 0; i < conf->copies; i++) {
+ struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ struct md_rdev *rrdev = rcu_dereference(
+ conf->mirrors[i].replacement);
+ if (rdev == rrdev)
+ rrdev = NULL;
+ if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+ atomic_inc(&rdev->nr_pending);
+ blocked_rdev = rdev;
+ break;
+ }
+ if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
+ atomic_inc(&rrdev->nr_pending);
+ blocked_rdev = rrdev;
+ break;
+ }
+
+ if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
+ sector_t first_bad;
+ sector_t dev_sector = r10_bio->devs[i].addr;
+ int bad_sectors;
+ int is_bad;
+
+ /* Discard request doesn't care the write result
+ * so it doesn't need to wait blocked disk here.
+ */
+ if (!r10_bio->sectors)
+ continue;
+
+ is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors,
+ &first_bad, &bad_sectors);
+ if (is_bad < 0) {
+ /* Mustn't write here until the bad block
+ * is acknowledged
+ */
+ atomic_inc(&rdev->nr_pending);
+ set_bit(BlockedBadBlocks, &rdev->flags);
+ blocked_rdev = rdev;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ if (unlikely(blocked_rdev)) {
+ /* Have to wait for this device to get unblocked, then retry */
+ allow_barrier(conf);
+ raid10_log(conf->mddev, "%s wait rdev %d blocked",
+ __func__, blocked_rdev->raid_disk);
+ md_wait_for_blocked_rdev(blocked_rdev, mddev);
+ wait_barrier(conf);
+ goto retry_wait;
+ }
+}
+
static void raid10_write_request(struct mddev *mddev, struct bio *bio,
struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
int i;
- struct md_rdev *blocked_rdev;
sector_t sectors;
int max_sectors;
@@ -1339,8 +1401,9 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
raid10_find_phys(conf, r10_bio);
-retry_write:
- blocked_rdev = NULL;
+
+ wait_blocked_dev(mddev, r10_bio);
+
rcu_read_lock();
max_sectors = r10_bio->sectors;
@@ -1351,16 +1414,6 @@ retry_write:
conf->mirrors[d].replacement);
if (rdev == rrdev)
rrdev = NULL;
- if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
- atomic_inc(&rdev->nr_pending);
- blocked_rdev = rdev;
- break;
- }
- if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
- atomic_inc(&rrdev->nr_pending);
- blocked_rdev = rrdev;
- break;
- }
if (rdev && (test_bit(Faulty, &rdev->flags)))
rdev = NULL;
if (rrdev && (test_bit(Faulty, &rrdev->flags)))
@@ -1381,15 +1434,6 @@ retry_write:
is_bad = is_badblock(rdev, dev_sector, max_sectors,
&first_bad, &bad_sectors);
- if (is_bad < 0) {
- /* Mustn't write here until the bad block
- * is acknowledged
- */
- atomic_inc(&rdev->nr_pending);
- set_bit(BlockedBadBlocks, &rdev->flags);
- blocked_rdev = rdev;
- break;
- }
if (is_bad && first_bad <= dev_sector) {
/* Cannot write here at all */
bad_sectors -= (dev_sector - first_bad);
@@ -1425,35 +1469,6 @@ retry_write:
}
rcu_read_unlock();
- if (unlikely(blocked_rdev)) {
- /* Have to wait for this device to get unblocked, then retry */
- int j;
- int d;
-
- for (j = 0; j < i; j++) {
- if (r10_bio->devs[j].bio) {
- d = r10_bio->devs[j].devnum;
- rdev_dec_pending(conf->mirrors[d].rdev, mddev);
- }
- if (r10_bio->devs[j].repl_bio) {
- struct md_rdev *rdev;
- d = r10_bio->devs[j].devnum;
- rdev = conf->mirrors[d].replacement;
- if (!rdev) {
- /* Race with remove_disk */
- smp_mb();
- rdev = conf->mirrors[d].rdev;
- }
- rdev_dec_pending(rdev, mddev);
- }
- }
- allow_barrier(conf);
- raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
- md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf);
- goto retry_write;
- }
-
if (max_sectors < r10_bio->sectors)
r10_bio->sectors = max_sectors;
@@ -1493,7 +1508,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_iter.bi_sector;
r10_bio->state = 0;
- memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
+ memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->geo.raid_disks);
if (bio_data_dir(bio) == READ)
raid10_read_request(mddev, bio, r10_bio);
@@ -1501,6 +1516,296 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
raid10_write_request(mddev, bio, r10_bio);
}
+static struct bio *raid10_split_bio(struct r10conf *conf,
+ struct bio *bio, sector_t sectors, bool want_first)
+{
+ struct bio *split;
+
+ split = bio_split(bio, sectors, GFP_NOIO, &conf->bio_split);
+ bio_chain(split, bio);
+ allow_barrier(conf);
+ if (want_first) {
+ submit_bio_noacct(bio);
+ bio = split;
+ } else
+ submit_bio_noacct(split);
+ wait_barrier(conf);
+
+ return bio;
+}
+
+static void raid_end_discard_bio(struct r10bio *r10bio)
+{
+ struct r10conf *conf = r10bio->mddev->private;
+ struct r10bio *first_r10bio;
+
+ while (atomic_dec_and_test(&r10bio->remaining)) {
+
+ allow_barrier(conf);
+
+ if (!test_bit(R10BIO_Discard, &r10bio->state)) {
+ first_r10bio = (struct r10bio *)r10bio->master_bio;
+ free_r10bio(r10bio);
+ r10bio = first_r10bio;
+ } else {
+ md_write_end(r10bio->mddev);
+ bio_endio(r10bio->master_bio);
+ free_r10bio(r10bio);
+ break;
+ }
+ }
+}
+
+static void raid10_end_discard_request(struct bio *bio)
+{
+ struct r10bio *r10_bio = bio->bi_private;
+ struct r10conf *conf = r10_bio->mddev->private;
+ struct md_rdev *rdev = NULL;
+ int dev;
+ int slot, repl;
+
+ /*
+ * We don't care the return value of discard bio
+ */
+ if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
+ set_bit(R10BIO_Uptodate, &r10_bio->state);
+
+ dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+ if (repl)
+ rdev = conf->mirrors[dev].replacement;
+ if (!rdev) {
+ /* raid10_remove_disk uses smp_mb to make sure rdev is set to
+ * replacement before setting replacement to NULL. It can read
+ * rdev first without barrier protect even replacment is NULL
+ */
+ smp_rmb();
+ rdev = conf->mirrors[dev].rdev;
+ }
+
+ raid_end_discard_bio(r10_bio);
+ rdev_dec_pending(rdev, conf->mddev);
+}
+
+/* There are some limitations to handle discard bio
+ * 1st, the discard size is bigger than stripe_size*2.
+ * 2st, if the discard bio spans reshape progress, we use the old way to
+ * handle discard bio
+ */
+static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
+{
+ struct r10conf *conf = mddev->private;
+ struct geom *geo = &conf->geo;
+ struct r10bio *r10_bio, *first_r10bio;
+ int far_copies = geo->far_copies;
+ bool first_copy = true;
+
+ int disk;
+ sector_t chunk;
+ unsigned int stripe_size;
+ sector_t split_size;
+
+ sector_t bio_start, bio_end;
+ sector_t first_stripe_index, last_stripe_index;
+ sector_t start_disk_offset;
+ unsigned int start_disk_index;
+ sector_t end_disk_offset;
+ unsigned int end_disk_index;
+ unsigned int remainder;
+
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ return -EAGAIN;
+
+ wait_barrier(conf);
+
+ /* Check reshape again to avoid reshape happens after checking
+ * MD_RECOVERY_RESHAPE and before wait_barrier
+ */
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ goto out;
+
+ stripe_size = geo->raid_disks << geo->chunk_shift;
+ bio_start = bio->bi_iter.bi_sector;
+ bio_end = bio_end_sector(bio);
+
+ /* Maybe one discard bio is smaller than strip size or across one stripe
+ * and discard region is larger than one stripe size. For far offset layout,
+ * if the discard region is not aligned with stripe size, there is hole
+ * when we submit discard bio to member disk. For simplicity, we only
+ * handle discard bio which discard region is bigger than stripe_size*2
+ */
+ if (bio_sectors(bio) < stripe_size*2)
+ goto out;
+
+ /* For far and far offset layout, if bio is not aligned with stripe size,
+ * it splits the part that is not aligned with strip size.
+ */
+ div_u64_rem(bio_start, stripe_size, &remainder);
+ if ((far_copies > 1) && remainder) {
+ split_size = stripe_size - remainder;
+ bio = raid10_split_bio(conf, bio, split_size, false);
+ }
+ div_u64_rem(bio_end, stripe_size, &remainder);
+ if ((far_copies > 1) && remainder) {
+ split_size = bio_sectors(bio) - remainder;
+ bio = raid10_split_bio(conf, bio, split_size, true);
+ }
+
+ bio_start = bio->bi_iter.bi_sector;
+ bio_end = bio_end_sector(bio);
+
+ /* raid10 uses chunk as the unit to store data. It's similar like raid0.
+ * One stripe contains the chunks from all member disk (one chunk from
+ * one disk at the same HBA address). For layout detail, see 'man md 4'
+ */
+ chunk = bio_start >> geo->chunk_shift;
+ chunk *= geo->near_copies;
+ first_stripe_index = chunk;
+ start_disk_index = sector_div(first_stripe_index, geo->raid_disks);
+ if (geo->far_offset)
+ first_stripe_index *= geo->far_copies;
+ start_disk_offset = (bio_start & geo->chunk_mask) +
+ (first_stripe_index << geo->chunk_shift);
+
+ chunk = bio_end >> geo->chunk_shift;
+ chunk *= geo->near_copies;
+ last_stripe_index = chunk;
+ end_disk_index = sector_div(last_stripe_index, geo->raid_disks);
+ if (geo->far_offset)
+ last_stripe_index *= geo->far_copies;
+ end_disk_offset = (bio_end & geo->chunk_mask) +
+ (last_stripe_index << geo->chunk_shift);
+
+retry_discard:
+ r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
+ r10_bio->mddev = mddev;
+ r10_bio->state = 0;
+ r10_bio->sectors = 0;
+ memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
+ wait_blocked_dev(mddev, r10_bio);
+
+ /* For far layout it needs more than one r10bio to cover all regions.
+ * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
+ * to record the discard bio. Other r10bio->master_bio record the first
+ * r10bio. The first r10bio only release after all other r10bios finish.
+ * The discard bio returns only first r10bio finishes
+ */
+ if (first_copy) {
+ r10_bio->master_bio = bio;
+ set_bit(R10BIO_Discard, &r10_bio->state);
+ first_copy = false;
+ first_r10bio = r10_bio;
+ } else
+ r10_bio->master_bio = (struct bio *)first_r10bio;
+
+ rcu_read_lock();
+ for (disk = 0; disk < geo->raid_disks; disk++) {
+ struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
+ struct md_rdev *rrdev = rcu_dereference(
+ conf->mirrors[disk].replacement);
+
+ r10_bio->devs[disk].bio = NULL;
+ r10_bio->devs[disk].repl_bio = NULL;
+
+ if (rdev && (test_bit(Faulty, &rdev->flags)))
+ rdev = NULL;
+ if (rrdev && (test_bit(Faulty, &rrdev->flags)))
+ rrdev = NULL;
+ if (!rdev && !rrdev)
+ continue;
+
+ if (rdev) {
+ r10_bio->devs[disk].bio = bio;
+ atomic_inc(&rdev->nr_pending);
+ }
+ if (rrdev) {
+ r10_bio->devs[disk].repl_bio = bio;
+ atomic_inc(&rrdev->nr_pending);
+ }
+ }
+ rcu_read_unlock();
+
+ atomic_set(&r10_bio->remaining, 1);
+ for (disk = 0; disk < geo->raid_disks; disk++) {
+ sector_t dev_start, dev_end;
+ struct bio *mbio, *rbio = NULL;
+ struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
+ struct md_rdev *rrdev = rcu_dereference(
+ conf->mirrors[disk].replacement);
+
+ /*
+ * Now start to calculate the start and end address for each disk.
+ * The space between dev_start and dev_end is the discard region.
+ *
+ * For dev_start, it needs to consider three conditions:
+ * 1st, the disk is before start_disk, you can imagine the disk in
+ * the next stripe. So the dev_start is the start address of next
+ * stripe.
+ * 2st, the disk is after start_disk, it means the disk is at the
+ * same stripe of first disk
+ * 3st, the first disk itself, we can use start_disk_offset directly
+ */
+ if (disk < start_disk_index)
+ dev_start = (first_stripe_index + 1) * mddev->chunk_sectors;
+ else if (disk > start_disk_index)
+ dev_start = first_stripe_index * mddev->chunk_sectors;
+ else
+ dev_start = start_disk_offset;
+
+ if (disk < end_disk_index)
+ dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
+ else if (disk > end_disk_index)
+ dev_end = last_stripe_index * mddev->chunk_sectors;
+ else
+ dev_end = end_disk_offset;
+
+ /* It only handles discard bio which size is >= stripe size, so
+ * dev_end > dev_start all the time
+ */
+ if (r10_bio->devs[disk].bio) {
+ mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ mbio->bi_end_io = raid10_end_discard_request;
+ mbio->bi_private = r10_bio;
+ r10_bio->devs[disk].bio = mbio;
+ r10_bio->devs[disk].devnum = disk;
+ atomic_inc(&r10_bio->remaining);
+ md_submit_discard_bio(mddev, rdev, mbio,
+ dev_start + choose_data_offset(r10_bio, rdev),
+ dev_end - dev_start);
+ bio_endio(mbio);
+ }
+ if (r10_bio->devs[disk].repl_bio) {
+ rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
+ rbio->bi_end_io = raid10_end_discard_request;
+ rbio->bi_private = r10_bio;
+ r10_bio->devs[disk].repl_bio = rbio;
+ r10_bio->devs[disk].devnum = disk;
+ atomic_inc(&r10_bio->remaining);
+ md_submit_discard_bio(mddev, rrdev, rbio,
+ dev_start + choose_data_offset(r10_bio, rrdev),
+ dev_end - dev_start);
+ bio_endio(rbio);
+ }
+ }
+
+ if (!geo->far_offset && --far_copies) {
+ first_stripe_index += geo->stride >> geo->chunk_shift;
+ start_disk_offset += geo->stride;
+ last_stripe_index += geo->stride >> geo->chunk_shift;
+ end_disk_offset += geo->stride;
+ atomic_inc(&first_r10bio->remaining);
+ raid_end_discard_bio(r10_bio);
+ wait_barrier(conf);
+ goto retry_discard;
+ }
+
+ raid_end_discard_bio(r10_bio);
+
+ return 0;
+out:
+ allow_barrier(conf);
+ return -EAGAIN;
+}
+
static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
@@ -1515,6 +1820,10 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
if (!md_write_start(mddev, bio))
return false;
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
+ if (!raid10_handle_discard(mddev, bio))
+ return true;
+
/*
* If this request crosses a chunk boundary, we need to split
* it.
@@ -3703,10 +4012,20 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err);
}
+static void raid10_set_io_opt(struct r10conf *conf)
+{
+ int raid_disks = conf->geo.raid_disks;
+
+ if (!(conf->geo.raid_disks % conf->geo.near_copies))
+ raid_disks /= conf->geo.near_copies;
+ blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
+ raid_disks);
+}
+
static int raid10_run(struct mddev *mddev)
{
struct r10conf *conf;
- int i, disk_idx, chunk_size;
+ int i, disk_idx;
struct raid10_info *disk;
struct md_rdev *rdev;
sector_t size;
@@ -3742,18 +4061,13 @@ static int raid10_run(struct mddev *mddev)
mddev->thread = conf->thread;
conf->thread = NULL;
- chunk_size = mddev->chunk_sectors << 9;
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
- mddev->chunk_sectors);
+ UINT_MAX);
blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
- blk_queue_io_min(mddev->queue, chunk_size);
- if (conf->geo.raid_disks % conf->geo.near_copies)
- blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
- else
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->geo.raid_disks / conf->geo.near_copies));
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ raid10_set_io_opt(conf);
}
rdev_for_each(rdev, mddev) {
@@ -3868,19 +4182,6 @@ static int raid10_run(struct mddev *mddev)
mddev->resync_max_sectors = size;
set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
- if (mddev->queue) {
- int stripe = conf->geo.raid_disks *
- ((mddev->chunk_sectors << 9) / PAGE_SIZE);
-
- /* Calculate max read-ahead size.
- * We need to readahead at least twice a whole stripe....
- * maybe...
- */
- stripe /= conf->geo.near_copies;
- if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
- }
-
if (md_integrity_register(mddev))
goto out_free_conf;
@@ -4466,8 +4767,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
last = conf->reshape_progress - 1;
sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
& conf->prev.chunk_mask);
- if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
- sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
+ if (sector_nr + RESYNC_SECTORS < last)
+ sector_nr = last + 1 - RESYNC_SECTORS;
} else {
/* 'next' is after the last device address that we
* might write to for this chunk in the new layout
@@ -4489,8 +4790,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
last = sector_nr | (conf->geo.chunk_mask
& conf->prev.chunk_mask);
- if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
- last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
+ if (sector_nr + RESYNC_SECTORS <= last)
+ last = sector_nr + RESYNC_SECTORS - 1;
}
if (need_flush ||
@@ -4718,16 +5019,8 @@ static void end_reshape(struct r10conf *conf)
conf->reshape_safe = MaxSector;
spin_unlock_irq(&conf->device_lock);
- /* read-ahead size must cover two whole stripes, which is
- * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
- */
- if (conf->mddev->queue) {
- int stripe = conf->geo.raid_disks *
- ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
- stripe /= conf->geo.near_copies;
- if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
- }
+ if (conf->mddev->queue)
+ raid10_set_io_opt(conf);
conf->fullsync = 0;
}
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 79cd2b7d3128..1461fd55311b 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -179,5 +179,6 @@ enum r10bio_state {
R10BIO_Previous,
/* failfast devices did receive failfast requests. */
R10BIO_FailFast,
+ R10BIO_Discard,
};
#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 225380efd1e2..39343479ac2a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -448,13 +448,74 @@ out:
return sh;
}
-static void shrink_buffers(struct stripe_head *sh)
+#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
+static void free_stripe_pages(struct stripe_head *sh)
{
+ int i;
struct page *p;
+
+ /* Have not allocate page pool */
+ if (!sh->pages)
+ return;
+
+ for (i = 0; i < sh->nr_pages; i++) {
+ p = sh->pages[i];
+ if (p)
+ put_page(p);
+ sh->pages[i] = NULL;
+ }
+}
+
+static int alloc_stripe_pages(struct stripe_head *sh, gfp_t gfp)
+{
+ int i;
+ struct page *p;
+
+ for (i = 0; i < sh->nr_pages; i++) {
+ /* The page have allocated. */
+ if (sh->pages[i])
+ continue;
+
+ p = alloc_page(gfp);
+ if (!p) {
+ free_stripe_pages(sh);
+ return -ENOMEM;
+ }
+ sh->pages[i] = p;
+ }
+ return 0;
+}
+
+static int
+init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks)
+{
+ int nr_pages, cnt;
+
+ if (sh->pages)
+ return 0;
+
+ /* Each of the sh->dev[i] need one conf->stripe_size */
+ cnt = PAGE_SIZE / conf->stripe_size;
+ nr_pages = (disks + cnt - 1) / cnt;
+
+ sh->pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!sh->pages)
+ return -ENOMEM;
+ sh->nr_pages = nr_pages;
+ sh->stripes_per_page = cnt;
+ return 0;
+}
+#endif
+
+static void shrink_buffers(struct stripe_head *sh)
+{
int i;
int num = sh->raid_conf->pool_size;
+#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
for (i = 0; i < num ; i++) {
+ struct page *p;
+
WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
p = sh->dev[i].page;
if (!p)
@@ -462,6 +523,11 @@ static void shrink_buffers(struct stripe_head *sh)
sh->dev[i].page = NULL;
put_page(p);
}
+#else
+ for (i = 0; i < num; i++)
+ sh->dev[i].page = NULL;
+ free_stripe_pages(sh); /* Free pages */
+#endif
}
static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
@@ -469,6 +535,7 @@ static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
int i;
int num = sh->raid_conf->pool_size;
+#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
for (i = 0; i < num; i++) {
struct page *page;
@@ -477,8 +544,18 @@ static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
}
sh->dev[i].page = page;
sh->dev[i].orig_page = page;
+ sh->dev[i].offset = 0;
}
+#else
+ if (alloc_stripe_pages(sh, gfp))
+ return -ENOMEM;
+ for (i = 0; i < num; i++) {
+ sh->dev[i].page = raid5_get_dev_page(sh, i);
+ sh->dev[i].orig_page = sh->dev[i].page;
+ sh->dev[i].offset = raid5_get_page_offset(sh, i);
+ }
+#endif
return 0;
}
@@ -1130,7 +1207,7 @@ again:
sh->dev[i].vec.bv_page = sh->dev[i].page;
bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
- bi->bi_io_vec[0].bv_offset = 0;
+ bi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
bi->bi_write_hint = sh->dev[i].write_hint;
if (!rrdev)
@@ -1184,7 +1261,7 @@ again:
sh->dev[i].rvec.bv_page = sh->dev[i].page;
rbi->bi_vcnt = 1;
rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
- rbi->bi_io_vec[0].bv_offset = 0;
+ rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
rbi->bi_write_hint = sh->dev[i].write_hint;
sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
@@ -1226,7 +1303,7 @@ again:
static struct dma_async_tx_descriptor *
async_copy_data(int frombio, struct bio *bio, struct page **page,
- sector_t sector, struct dma_async_tx_descriptor *tx,
+ unsigned int poff, sector_t sector, struct dma_async_tx_descriptor *tx,
struct stripe_head *sh, int no_skipcopy)
{
struct bio_vec bvl;
@@ -1272,11 +1349,11 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
!no_skipcopy)
*page = bio_page;
else
- tx = async_memcpy(*page, bio_page, page_offset,
+ tx = async_memcpy(*page, bio_page, page_offset + poff,
b_offset, clen, &submit);
} else
tx = async_memcpy(bio_page, *page, b_offset,
- page_offset, clen, &submit);
+ page_offset + poff, clen, &submit);
}
/* chain the operations */
submit.depend_tx = tx;
@@ -1349,6 +1426,7 @@ static void ops_run_biofill(struct stripe_head *sh)
while (rbi && rbi->bi_iter.bi_sector <
dev->sector + RAID5_STRIPE_SECTORS(conf)) {
tx = async_copy_data(0, rbi, &dev->page,
+ dev->offset,
dev->sector, tx, sh, 0);
rbi = r5_next_bio(conf, rbi, dev->sector);
}
@@ -1404,14 +1482,25 @@ static addr_conv_t *to_addr_conv(struct stripe_head *sh,
return (void *) (to_addr_page(percpu, i) + sh->disks + 2);
}
+/*
+ * Return a pointer to record offset address.
+ */
+static unsigned int *
+to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu)
+{
+ return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2);
+}
+
static struct dma_async_tx_descriptor *
ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int disks = sh->disks;
struct page **xor_srcs = to_addr_page(percpu, 0);
+ unsigned int *off_srcs = to_addr_offs(sh, percpu);
int target = sh->ops.target;
struct r5dev *tgt = &sh->dev[target];
struct page *xor_dest = tgt->page;
+ unsigned int off_dest = tgt->offset;
int count = 0;
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
@@ -1423,19 +1512,22 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
__func__, (unsigned long long)sh->sector, target);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
- for (i = disks; i--; )
- if (i != target)
+ for (i = disks; i--; ) {
+ if (i != target) {
+ off_srcs[count] = sh->dev[i].offset;
xor_srcs[count++] = sh->dev[i].page;
+ }
+ }
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
if (unlikely(count == 1))
- tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0,
+ tx = async_memcpy(xor_dest, xor_srcs[0], off_dest, off_srcs[0],
RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
else
- tx = async_xor(xor_dest, xor_srcs, 0, count,
+ tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
return tx;
@@ -1443,6 +1535,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
/* set_syndrome_sources - populate source buffers for gen_syndrome
* @srcs - (struct page *) array of size sh->disks
+ * @offs - (unsigned int) array of offset for each page
* @sh - stripe_head to parse
*
* Populates srcs in proper layout order for the stripe and returns the
@@ -1451,6 +1544,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
* is recorded in srcs[count+1]].
*/
static int set_syndrome_sources(struct page **srcs,
+ unsigned int *offs,
struct stripe_head *sh,
int srctype)
{
@@ -1481,6 +1575,12 @@ static int set_syndrome_sources(struct page **srcs,
srcs[slot] = sh->dev[i].orig_page;
else
srcs[slot] = sh->dev[i].page;
+ /*
+ * For R5_InJournal, PAGE_SIZE must be 4KB and will
+ * not shared page. In that case, dev[i].offset
+ * is 0.
+ */
+ offs[slot] = sh->dev[i].offset;
}
i = raid6_next_disk(i, disks);
} while (i != d0_idx);
@@ -1493,12 +1593,14 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int disks = sh->disks;
struct page **blocks = to_addr_page(percpu, 0);
+ unsigned int *offs = to_addr_offs(sh, percpu);
int target;
int qd_idx = sh->qd_idx;
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
struct r5dev *tgt;
struct page *dest;
+ unsigned int dest_off;
int i;
int count;
@@ -1517,17 +1619,18 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
tgt = &sh->dev[target];
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
dest = tgt->page;
+ dest_off = tgt->offset;
atomic_inc(&sh->count);
if (target == qd_idx) {
- count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
+ count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL);
blocks[count] = NULL; /* regenerating p is not necessary */
BUG_ON(blocks[count+1] != dest); /* q should already be set */
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
to_addr_conv(sh, percpu, 0));
- tx = async_gen_syndrome(blocks, 0, count+2,
+ tx = async_gen_syndrome(blocks, offs, count+2,
RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
} else {
/* Compute any data- or p-drive using XOR */
@@ -1535,13 +1638,14 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
for (i = disks; i-- ; ) {
if (i == target || i == qd_idx)
continue;
+ offs[count] = sh->dev[i].offset;
blocks[count++] = sh->dev[i].page;
}
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, ops_complete_compute, sh,
to_addr_conv(sh, percpu, 0));
- tx = async_xor(dest, blocks, 0, count,
+ tx = async_xor_offs(dest, dest_off, blocks, offs, count,
RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
}
@@ -1561,6 +1665,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
struct r5dev *tgt2 = &sh->dev[target2];
struct dma_async_tx_descriptor *tx;
struct page **blocks = to_addr_page(percpu, 0);
+ unsigned int *offs = to_addr_offs(sh, percpu);
struct async_submit_ctl submit;
BUG_ON(sh->batch_head);
@@ -1573,13 +1678,16 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
/* we need to open-code set_syndrome_sources to handle the
* slot number conversion for 'faila' and 'failb'
*/
- for (i = 0; i < disks ; i++)
+ for (i = 0; i < disks ; i++) {
+ offs[i] = 0;
blocks[i] = NULL;
+ }
count = 0;
i = d0_idx;
do {
int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
+ offs[slot] = sh->dev[i].offset;
blocks[slot] = sh->dev[i].page;
if (i == target)
@@ -1604,11 +1712,12 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
to_addr_conv(sh, percpu, 0));
- return async_gen_syndrome(blocks, 0, syndrome_disks+2,
+ return async_gen_syndrome(blocks, offs, syndrome_disks+2,
RAID5_STRIPE_SIZE(sh->raid_conf),
&submit);
} else {
struct page *dest;
+ unsigned int dest_off;
int data_target;
int qd_idx = sh->qd_idx;
@@ -1622,22 +1731,24 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
for (i = disks; i-- ; ) {
if (i == data_target || i == qd_idx)
continue;
+ offs[count] = sh->dev[i].offset;
blocks[count++] = sh->dev[i].page;
}
dest = sh->dev[data_target].page;
+ dest_off = sh->dev[data_target].offset;
init_async_submit(&submit,
ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, NULL, NULL,
to_addr_conv(sh, percpu, 0));
- tx = async_xor(dest, blocks, 0, count,
+ tx = async_xor_offs(dest, dest_off, blocks, offs, count,
RAID5_STRIPE_SIZE(sh->raid_conf),
&submit);
- count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
+ count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL);
init_async_submit(&submit, ASYNC_TX_FENCE, tx,
ops_complete_compute, sh,
to_addr_conv(sh, percpu, 0));
- return async_gen_syndrome(blocks, 0, count+2,
+ return async_gen_syndrome(blocks, offs, count+2,
RAID5_STRIPE_SIZE(sh->raid_conf),
&submit);
}
@@ -1650,13 +1761,13 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
return async_raid6_datap_recov(syndrome_disks+2,
RAID5_STRIPE_SIZE(sh->raid_conf),
faila,
- blocks, &submit);
+ blocks, offs, &submit);
} else {
/* We're missing D+D. */
return async_raid6_2data_recov(syndrome_disks+2,
RAID5_STRIPE_SIZE(sh->raid_conf),
faila, failb,
- blocks, &submit);
+ blocks, offs, &submit);
}
}
}
@@ -1682,10 +1793,12 @@ ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
{
int disks = sh->disks;
struct page **xor_srcs = to_addr_page(percpu, 0);
+ unsigned int *off_srcs = to_addr_offs(sh, percpu);
int count = 0, pd_idx = sh->pd_idx, i;
struct async_submit_ctl submit;
/* existing parity data subtracted */
+ unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset;
struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
BUG_ON(sh->batch_head);
@@ -1695,15 +1808,22 @@ ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
/* Only process blocks that are known to be uptodate */
- if (test_bit(R5_InJournal, &dev->flags))
+ if (test_bit(R5_InJournal, &dev->flags)) {
+ /*
+ * For this case, PAGE_SIZE must be equal to 4KB and
+ * page offset is zero.
+ */
+ off_srcs[count] = dev->offset;
xor_srcs[count++] = dev->orig_page;
- else if (test_bit(R5_Wantdrain, &dev->flags))
+ } else if (test_bit(R5_Wantdrain, &dev->flags)) {
+ off_srcs[count] = dev->offset;
xor_srcs[count++] = dev->page;
+ }
}
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
- tx = async_xor(xor_dest, xor_srcs, 0, count,
+ tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
return tx;
@@ -1714,17 +1834,18 @@ ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
struct page **blocks = to_addr_page(percpu, 0);
+ unsigned int *offs = to_addr_offs(sh, percpu);
int count;
struct async_submit_ctl submit;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
- count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN);
+ count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_WANT_DRAIN);
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
- tx = async_gen_syndrome(blocks, 0, count+2,
+ tx = async_gen_syndrome(blocks, offs, count+2,
RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
return tx;
@@ -1775,6 +1896,7 @@ again:
set_bit(R5_Discard, &dev->flags);
else {
tx = async_copy_data(1, wbi, &dev->page,
+ dev->offset,
dev->sector, tx, sh,
r5c_is_writeback(conf->log));
if (dev->page != dev->orig_page &&
@@ -1854,9 +1976,11 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
{
int disks = sh->disks;
struct page **xor_srcs;
+ unsigned int *off_srcs;
struct async_submit_ctl submit;
int count, pd_idx = sh->pd_idx, i;
struct page *xor_dest;
+ unsigned int off_dest;
int prexor = 0;
unsigned long flags;
int j = 0;
@@ -1881,24 +2005,31 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
again:
count = 0;
xor_srcs = to_addr_page(percpu, j);
+ off_srcs = to_addr_offs(sh, percpu);
/* check if prexor is active which means only process blocks
* that are part of a read-modify-write (written)
*/
if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
prexor = 1;
+ off_dest = off_srcs[count] = sh->dev[pd_idx].offset;
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (head_sh->dev[i].written ||
- test_bit(R5_InJournal, &head_sh->dev[i].flags))
+ test_bit(R5_InJournal, &head_sh->dev[i].flags)) {
+ off_srcs[count] = dev->offset;
xor_srcs[count++] = dev->page;
+ }
}
} else {
xor_dest = sh->dev[pd_idx].page;
+ off_dest = sh->dev[pd_idx].offset;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (i != pd_idx)
+ if (i != pd_idx) {
+ off_srcs[count] = dev->offset;
xor_srcs[count++] = dev->page;
+ }
}
}
@@ -1924,10 +2055,10 @@ again:
}
if (unlikely(count == 1))
- tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0,
+ tx = async_memcpy(xor_dest, xor_srcs[0], off_dest, off_srcs[0],
RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
else
- tx = async_xor(xor_dest, xor_srcs, 0, count,
+ tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
if (!last_stripe) {
j++;
@@ -1943,6 +2074,7 @@ ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
{
struct async_submit_ctl submit;
struct page **blocks;
+ unsigned int *offs;
int count, i, j = 0;
struct stripe_head *head_sh = sh;
int last_stripe;
@@ -1967,6 +2099,7 @@ ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
again:
blocks = to_addr_page(percpu, j);
+ offs = to_addr_offs(sh, percpu);
if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
synflags = SYNDROME_SRC_WRITTEN;
@@ -1976,7 +2109,7 @@ again:
txflags = ASYNC_TX_ACK;
}
- count = set_syndrome_sources(blocks, sh, synflags);
+ count = set_syndrome_sources(blocks, offs, sh, synflags);
last_stripe = !head_sh->batch_head ||
list_first_entry(&sh->batch_list,
struct stripe_head, batch_list) == head_sh;
@@ -1988,7 +2121,7 @@ again:
} else
init_async_submit(&submit, 0, tx, NULL, NULL,
to_addr_conv(sh, percpu, j));
- tx = async_gen_syndrome(blocks, 0, count+2,
+ tx = async_gen_syndrome(blocks, offs, count+2,
RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
if (!last_stripe) {
j++;
@@ -2016,7 +2149,9 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
struct page *xor_dest;
+ unsigned int off_dest;
struct page **xor_srcs = to_addr_page(percpu, 0);
+ unsigned int *off_srcs = to_addr_offs(sh, percpu);
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
int count;
@@ -2028,16 +2163,19 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
BUG_ON(sh->batch_head);
count = 0;
xor_dest = sh->dev[pd_idx].page;
+ off_dest = sh->dev[pd_idx].offset;
+ off_srcs[count] = off_dest;
xor_srcs[count++] = xor_dest;
for (i = disks; i--; ) {
if (i == pd_idx || i == qd_idx)
continue;
+ off_srcs[count] = sh->dev[i].offset;
xor_srcs[count++] = sh->dev[i].page;
}
init_async_submit(&submit, 0, NULL, NULL, NULL,
to_addr_conv(sh, percpu, 0));
- tx = async_xor_val(xor_dest, xor_srcs, 0, count,
+ tx = async_xor_val_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
RAID5_STRIPE_SIZE(sh->raid_conf),
&sh->ops.zero_sum_result, &submit);
@@ -2049,6 +2187,7 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
{
struct page **srcs = to_addr_page(percpu, 0);
+ unsigned int *offs = to_addr_offs(sh, percpu);
struct async_submit_ctl submit;
int count;
@@ -2056,16 +2195,16 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu
(unsigned long long)sh->sector, checkp);
BUG_ON(sh->batch_head);
- count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL);
+ count = set_syndrome_sources(srcs, offs, sh, SYNDROME_SRC_ALL);
if (!checkp)
srcs[count] = NULL;
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
sh, to_addr_conv(sh, percpu, 0));
- async_syndrome_val(srcs, 0, count+2,
+ async_syndrome_val(srcs, offs, count+2,
RAID5_STRIPE_SIZE(sh->raid_conf),
- &sh->ops.zero_sum_result, percpu->spare_page, &submit);
+ &sh->ops.zero_sum_result, percpu->spare_page, 0, &submit);
}
static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
@@ -2142,6 +2281,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
{
+#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
+ kfree(sh->pages);
+#endif
if (sh->ppl_page)
__free_page(sh->ppl_page);
kmem_cache_free(sc, sh);
@@ -2175,9 +2317,15 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
sh->ppl_page = alloc_page(gfp);
if (!sh->ppl_page) {
free_stripe(sc, sh);
- sh = NULL;
+ return NULL;
}
}
+#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
+ if (init_stripe_shared_pages(sh, conf, disks)) {
+ free_stripe(sc, sh);
+ return NULL;
+ }
+#endif
}
return sh;
}
@@ -2253,8 +2401,9 @@ static int scribble_alloc(struct raid5_percpu *percpu,
int num, int cnt)
{
size_t obj_size =
- sizeof(struct page *) * (num+2) +
- sizeof(addr_conv_t) * (num+2);
+ sizeof(struct page *) * (num + 2) +
+ sizeof(addr_conv_t) * (num + 2) +
+ sizeof(unsigned int) * (num + 2);
void *scribble;
/*
@@ -2386,9 +2535,16 @@ static int resize_stripes(struct r5conf *conf, int newsize)
osh = get_free_stripe(conf, hash);
unlock_device_hash_lock(conf, hash);
+#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
+ for (i = 0; i < osh->nr_pages; i++) {
+ nsh->pages[i] = osh->pages[i];
+ osh->pages[i] = NULL;
+ }
+#endif
for(i=0; i<conf->pool_size; i++) {
nsh->dev[i].page = osh->dev[i].page;
nsh->dev[i].orig_page = osh->dev[i].page;
+ nsh->dev[i].offset = osh->dev[i].offset;
}
nsh->hash_lock_index = hash;
free_stripe(conf->slab_cache, osh);
@@ -2429,8 +2585,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
} else
err = -ENOMEM;
- mutex_unlock(&conf->cache_size_mutex);
-
conf->slab_cache = sc;
conf->active_name = 1-conf->active_name;
@@ -2439,20 +2593,41 @@ static int resize_stripes(struct r5conf *conf, int newsize)
nsh = list_entry(newstripes.next, struct stripe_head, lru);
list_del_init(&nsh->lru);
+#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
+ for (i = 0; i < nsh->nr_pages; i++) {
+ if (nsh->pages[i])
+ continue;
+ nsh->pages[i] = alloc_page(GFP_NOIO);
+ if (!nsh->pages[i])
+ err = -ENOMEM;
+ }
+
+ for (i = conf->raid_disks; i < newsize; i++) {
+ if (nsh->dev[i].page)
+ continue;
+ nsh->dev[i].page = raid5_get_dev_page(nsh, i);
+ nsh->dev[i].orig_page = nsh->dev[i].page;
+ nsh->dev[i].offset = raid5_get_page_offset(nsh, i);
+ }
+#else
for (i=conf->raid_disks; i < newsize; i++)
if (nsh->dev[i].page == NULL) {
struct page *p = alloc_page(GFP_NOIO);
nsh->dev[i].page = p;
nsh->dev[i].orig_page = p;
+ nsh->dev[i].offset = 0;
if (!p)
err = -ENOMEM;
}
+#endif
raid5_release_stripe(nsh);
}
/* critical section pass, GFP_NOIO no longer needed */
if (!err)
conf->pool_size = newsize;
+ mutex_unlock(&conf->cache_size_mutex);
+
return err;
}
@@ -4369,7 +4544,8 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
/* place all the copies on one channel */
init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
tx = async_memcpy(sh2->dev[dd_idx].page,
- sh->dev[i].page, 0, 0, RAID5_STRIPE_SIZE(conf),
+ sh->dev[i].page, sh2->dev[dd_idx].offset,
+ sh->dev[i].offset, RAID5_STRIPE_SIZE(conf),
&submit);
set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
@@ -6506,6 +6682,7 @@ raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len)
struct r5conf *conf;
unsigned long new;
int err;
+ int size;
if (len >= PAGE_SIZE)
return -EINVAL;
@@ -6538,10 +6715,29 @@ raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len)
pr_debug("md/raid: change stripe_size from %lu to %lu\n",
conf->stripe_size, new);
+ if (mddev->sync_thread ||
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ mddev->reshape_position != MaxSector ||
+ mddev->sysfs_active) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
mddev_suspend(mddev);
+ mutex_lock(&conf->cache_size_mutex);
+ size = conf->max_nr_stripes;
+
+ shrink_stripes(conf);
+
conf->stripe_size = new;
conf->stripe_shift = ilog2(new) - 9;
conf->stripe_sectors = new >> 9;
+ if (grow_stripes(conf, size)) {
+ pr_warn("md/raid:%s: couldn't allocate buffers\n",
+ mdname(mddev));
+ err = -ENOMEM;
+ }
+ mutex_unlock(&conf->cache_size_mutex);
mddev_resume(mddev);
out_unlock:
@@ -6638,14 +6834,14 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
if (!conf)
err = -ENODEV;
else if (new != conf->skip_copy) {
+ struct request_queue *q = mddev->queue;
+
mddev_suspend(mddev);
conf->skip_copy = new;
if (new)
- mddev->queue->backing_dev_info->capabilities |=
- BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
else
- mddev->queue->backing_dev_info->capabilities &=
- ~BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
mddev_resume(mddev);
}
mddev_unlock(mddev);
@@ -7232,6 +7428,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 0;
}
+static void raid5_set_io_opt(struct r5conf *conf)
+{
+ blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
+ (conf->raid_disks - conf->max_degraded));
+}
+
static int raid5_run(struct mddev *mddev)
{
struct r5conf *conf;
@@ -7516,13 +7718,10 @@ static int raid5_run(struct mddev *mddev)
int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
- if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->raid_disks - conf->max_degraded));
+ raid5_set_io_opt(conf);
mddev->queue->limits.raid_partial_stripes_expensive = 1;
/*
* We can only discard a whole stripe. It doesn't make sense to
@@ -8106,16 +8305,8 @@ static void end_reshape(struct r5conf *conf)
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
- /* read-ahead size must cover two whole stripes, which is
- * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
- */
- if (conf->mddev->queue) {
- int data_disks = conf->raid_disks - conf->max_degraded;
- int stripe = data_disks * ((conf->chunk_sectors << 9)
- / PAGE_SIZE);
- if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
- }
+ if (conf->mddev->queue)
+ raid5_set_io_opt(conf);
}
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 16fc29472f5c..5c05acf20e1f 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -195,6 +195,7 @@ enum reconstruct_states {
reconstruct_state_result,
};
+#define DEFAULT_STRIPE_SIZE 4096
struct stripe_head {
struct hlist_node hash;
struct list_head lru; /* inactive_list or handle_list */
@@ -246,6 +247,13 @@ struct stripe_head {
int target, target2;
enum sum_check_flags zero_sum_result;
} ops;
+
+#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
+ /* These pages will be used by bios in dev[i] */
+ struct page **pages;
+ int nr_pages; /* page array size */
+ int stripes_per_page;
+#endif
struct r5dev {
/* rreq and rvec are used for the replacement device when
* writing data to both devices.
@@ -253,6 +261,7 @@ struct stripe_head {
struct bio req, rreq;
struct bio_vec vec, rvec;
struct page *page, *orig_page;
+ unsigned int offset; /* offset of the page */
struct bio *toread, *read, *towrite, *written;
sector_t sector; /* sector of this page */
unsigned long flags;
@@ -472,7 +481,6 @@ struct disk_info {
*/
#define NR_STRIPES 256
-#define DEFAULT_STRIPE_SIZE 4096
#if PAGE_SIZE == DEFAULT_STRIPE_SIZE
#define STRIPE_SIZE PAGE_SIZE
@@ -771,6 +779,25 @@ static inline int algorithm_is_DDF(int layout)
return layout >= 8 && layout <= 10;
}
+#if PAGE_SIZE != DEFAULT_STRIPE_SIZE
+/*
+ * Return offset of the corresponding page for r5dev.
+ */
+static inline int raid5_get_page_offset(struct stripe_head *sh, int disk_idx)
+{
+ return (disk_idx % sh->stripes_per_page) * RAID5_STRIPE_SIZE(sh->raid_conf);
+}
+
+/*
+ * Return corresponding page address for r5dev.
+ */
+static inline struct page *
+raid5_get_dev_page(struct stripe_head *sh, int disk_idx)
+{
+ return sh->pages[disk_idx / sh->stripes_per_page];
+}
+#endif
+
extern void md_raid5_kick_device(struct r5conf *conf);
extern int raid5_set_cache_size(struct mddev *mddev, int size);
extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 926d65db6d3e..d5d5d28d0b36 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -751,6 +751,9 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
struct cec_data *data;
bool is_raw = msg_is_raw(msg);
+ if (adap->devnode.unregistered)
+ return -ENODEV;
+
msg->rx_ts = 0;
msg->tx_ts = 0;
msg->rx_status = 0;
@@ -1049,6 +1052,9 @@ void cec_received_msg_ts(struct cec_adapter *adap,
if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE))
return;
+ if (adap->devnode.unregistered)
+ return;
+
/*
* Some CEC adapters will receive the messages that they transmitted.
* This test filters out those messages by checking if we are the
@@ -1928,7 +1934,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
*/
if (!adap->passthrough && from_unregistered)
return 0;
- /* Fall through */
+ fallthrough;
case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
case CEC_MSG_GIVE_FEATURES:
case CEC_MSG_GIVE_PHYSICAL_ADDR:
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index c599cd94dd62..ece236291f35 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -309,7 +309,7 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->rc->allowed_protocols = RC_PROTO_BIT_CEC;
adap->rc->priv = adap;
adap->rc->map_name = RC_MAP_CEC;
- adap->rc->timeout = MS_TO_NS(550);
+ adap->rc->timeout = MS_TO_US(550);
#endif
return adap;
}
@@ -359,27 +359,16 @@ int cec_register_adapter(struct cec_adapter *adap,
if (!top_cec_dir)
return 0;
- adap->cec_dir = debugfs_create_dir(dev_name(&adap->devnode.dev), top_cec_dir);
- if (IS_ERR_OR_NULL(adap->cec_dir)) {
- pr_warn("cec-%s: Failed to create debugfs dir\n", adap->name);
- return 0;
- }
- adap->status_file = debugfs_create_devm_seqfile(&adap->devnode.dev,
- "status", adap->cec_dir, cec_adap_status);
- if (IS_ERR_OR_NULL(adap->status_file)) {
- pr_warn("cec-%s: Failed to create status file\n", adap->name);
- debugfs_remove_recursive(adap->cec_dir);
- adap->cec_dir = NULL;
- return 0;
- }
+ adap->cec_dir = debugfs_create_dir(dev_name(&adap->devnode.dev),
+ top_cec_dir);
+
+ debugfs_create_devm_seqfile(&adap->devnode.dev, "status", adap->cec_dir,
+ cec_adap_status);
+
if (!adap->ops->error_inj_show || !adap->ops->error_inj_parse_line)
return 0;
- adap->error_inj_file = debugfs_create_file("error-inj", 0644,
- adap->cec_dir, adap,
- &cec_error_inj_fops);
- if (IS_ERR_OR_NULL(adap->error_inj_file))
- pr_warn("cec-%s: Failed to create error-inj file\n",
- adap->name);
+ debugfs_create_file("error-inj", 0644, adap->cec_dir, adap,
+ &cec_error_inj_fops);
#endif
return 0;
}
@@ -407,9 +396,9 @@ void cec_delete_adapter(struct cec_adapter *adap)
{
if (IS_ERR_OR_NULL(adap))
return;
- kthread_stop(adap->kthread);
if (adap->kthread_config)
kthread_stop(adap->kthread_config);
+ kthread_stop(adap->kthread);
if (adap->ops->adap_free)
adap->ops->adap_free(adap);
#ifdef CONFIG_MEDIA_CEC_RC
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index 660fe111f540..f006bd8eec63 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -417,7 +417,7 @@ static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts)
wake_up_interruptible(&pin->kthread_waitq);
break;
}
- /* fall through */
+ fallthrough;
case CEC_ST_TX_DATA_BIT_0_HIGH:
case CEC_ST_TX_DATA_BIT_0_HIGH_SHORT:
case CEC_ST_TX_DATA_BIT_0_HIGH_LONG:
@@ -445,7 +445,7 @@ static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts)
wake_up_interruptible(&pin->kthread_waitq);
break;
}
- /* fall through */
+ fallthrough;
case CEC_ST_TX_DATA_BIT_HIGH_CUSTOM:
if (tx_last_bit(pin)) {
/* Error Injection: just stop sending after this bit */
@@ -459,7 +459,7 @@ static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts)
break;
}
pin->tx_bit++;
- /* fall through */
+ fallthrough;
case CEC_ST_TX_START_BIT_HIGH:
case CEC_ST_TX_START_BIT_HIGH_SHORT:
case CEC_ST_TX_START_BIT_HIGH_LONG:
diff --git a/drivers/media/cec/platform/seco/seco-cec.c b/drivers/media/cec/platform/seco/seco-cec.c
index 075dd79beb6f..ae138cc253fd 100644
--- a/drivers/media/cec/platform/seco/seco-cec.c
+++ b/drivers/media/cec/platform/seco/seco-cec.c
@@ -369,7 +369,7 @@ static int secocec_ir_probe(void *priv)
cec->ir->allowed_protocols = RC_PROTO_BIT_RC5;
cec->ir->priv = cec;
cec->ir->map_name = RC_MAP_HAUPPAUGE;
- cec->ir->timeout = MS_TO_NS(100);
+ cec->ir->timeout = MS_TO_US(100);
/* Clear the status register */
status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c
index beae6aa12638..e4d8446b87da 100644
--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
+++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
@@ -389,7 +389,7 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
pulse8->new_rx_msg[0] = pulse8->buf[1];
break;
}
- /* fall through */
+ fallthrough;
case MSGCODE_FRAME_DATA:
if (pulse8->new_rx_msg_len < CEC_MAX_MSG_SIZE)
pulse8->new_rx_msg[pulse8->new_rx_msg_len++] =
diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
index 6b06ea590074..21fb16cc5ca1 100644
--- a/drivers/media/common/saa7146/saa7146_core.c
+++ b/drivers/media/common/saa7146/saa7146_core.c
@@ -140,7 +140,7 @@ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
struct page *pg;
int i;
- sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
+ sglist = kmalloc_array(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
sg_init_table(sglist, nr_pages);
diff --git a/drivers/media/common/siano/sms-cards.c b/drivers/media/common/siano/sms-cards.c
index e67ee3d55488..d4a116ab6c88 100644
--- a/drivers/media/common/siano/sms-cards.c
+++ b/drivers/media/common/siano/sms-cards.c
@@ -79,7 +79,7 @@ static struct sms_board sms_boards[] = {
.board_cfg.rf_switch_uhf = 17,
},
[SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2] = {
- .name = "Hauppauge WinTV MiniCard",
+ .name = "Hauppauge WinTV MiniCard Rev 2",
.type = SMS_NOVA_B0,
.fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVBT_HCW_55XXX,
.default_mode = DEVICE_MODE_DVBT_BDA,
diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c
index 79bd627f84b8..d85c78c104b9 100644
--- a/drivers/media/common/siano/smsir.c
+++ b/drivers/media/common/siano/smsir.c
@@ -27,7 +27,7 @@ void sms_ir_event(struct smscore_device_t *coredev, const char *buf, int len)
for (i = 0; i < len >> 2; i++) {
struct ir_raw_event ev = {
- .duration = abs(samples[i]) * 1000, /* Convert to ns */
+ .duration = abs(samples[i]),
.pulse = (samples[i] > 0) ? false : true
};
@@ -48,7 +48,7 @@ int sms_ir_init(struct smscore_device_t *coredev)
return -ENOMEM;
coredev->ir.controller = 0; /* Todo: vega/nova SPI number */
- coredev->ir.timeout = IR_DEFAULT_TIMEOUT;
+ coredev->ir.timeout = US_TO_NS(IR_DEFAULT_TIMEOUT);
pr_debug("IR port %d, timeout %d ms\n",
coredev->ir.controller, coredev->ir.timeout);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 7b1b86ec942d..2f3a5996d3fc 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -53,10 +53,10 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
unsigned int i;
unsigned long size = 0;
- for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ for_each_sgtable_dma_sg(sgt, s, i) {
if (sg_dma_address(s) != expected)
break;
- expected = sg_dma_address(s) + sg_dma_len(s);
+ expected += sg_dma_len(s);
size += sg_dma_len(s);
}
return size;
@@ -98,8 +98,7 @@ static void vb2_dc_prepare(void *buf_priv)
if (!sgt)
return;
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir);
+ dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
}
static void vb2_dc_finish(void *buf_priv)
@@ -110,7 +109,7 @@ static void vb2_dc_finish(void *buf_priv)
if (!sgt)
return;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
}
/*********************************************/
@@ -270,8 +269,8 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
* memory locations do not require any explicit cache
* maintenance prior or after being used by the device.
*/
- dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
kfree(attach);
db_attach->priv = NULL;
@@ -296,8 +295,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
/* release any previous cache */
if (attach->dma_dir != DMA_NONE) {
- dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
attach->dma_dir = DMA_NONE;
}
@@ -305,9 +304,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
* mapping to the client with new direction, no cache sync
* required see comment in vb2_dc_dmabuf_ops_detach()
*/
- sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
- dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (!sgt->nents) {
+ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO);
@@ -436,8 +434,8 @@ static void vb2_dc_put_userptr(void *buf_priv)
* No need to sync to CPU, it's already synced to the CPU
* since the finish() memop will have been called before this.
*/
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
pages = frame_vector_pages(buf->vec);
/* sgt should exist only if vector contains pages... */
BUG_ON(IS_ERR(pages));
@@ -534,9 +532,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (sgt->nents <= 0) {
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
pr_err("failed to map scatterlist\n");
ret = -EIO;
goto fail_sgt_init;
@@ -558,8 +555,7 @@ out:
return buf;
fail_map_sg:
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
fail_sgt_init:
sg_free_table(sgt);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index a86fce5d8ea8..748131151c49 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -147,9 +147,8 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (!sgt->nents)
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
goto fail_map;
buf->handler.refcount = &buf->refcount;
@@ -185,8 +184,8 @@ static void vb2_dma_sg_put(void *buf_priv)
if (refcount_dec_and_test(&buf->refcount)) {
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
@@ -203,8 +202,7 @@ static void vb2_dma_sg_prepare(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir);
+ dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
}
static void vb2_dma_sg_finish(void *buf_priv)
@@ -212,7 +210,7 @@ static void vb2_dma_sg_finish(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+ dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
}
static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
@@ -255,9 +253,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (!sgt->nents)
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
goto userptr_fail_map;
return buf;
@@ -283,8 +280,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages);
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
- DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
@@ -407,8 +403,7 @@ static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
/* release the scatterlist cache */
if (attach->dma_dir != DMA_NONE)
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
sg_free_table(sgt);
kfree(attach);
db_attach->priv = NULL;
@@ -433,15 +428,12 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
/* release any previous cache */
if (attach->dma_dir != DMA_NONE) {
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
attach->dma_dir = DMA_NONE;
}
/* mapping to the client with new direction */
- sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- dma_dir);
- if (!sgt->nents) {
+ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO);
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index cfe197df970d..96d3b2b2aa31 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -14,21 +14,22 @@
* the Free Software Foundation.
*/
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/freezer.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/kthread.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/poll.h>
-#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/freezer.h>
-#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
-#include <media/v4l2-common.h>
+#include <media/v4l2-fh.h>
#include <media/videobuf2-v4l2.h>
@@ -600,7 +601,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
break;
case VB2_BUF_STATE_ERROR:
b->flags |= V4L2_BUF_FLAG_ERROR;
- /* fall through */
+ fallthrough;
case VB2_BUF_STATE_DONE:
b->flags |= V4L2_BUF_FLAG_DONE;
break;
@@ -1220,6 +1221,44 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
#endif
+void vb2_video_unregister_device(struct video_device *vdev)
+{
+ /* Check if vdev was ever registered at all */
+ if (!vdev || !video_is_registered(vdev))
+ return;
+
+ /*
+ * Calling this function only makes sense if vdev->queue is set.
+ * If it is NULL, then just call video_unregister_device() instead.
+ */
+ WARN_ON(!vdev->queue);
+
+ /*
+ * Take a reference to the device since video_unregister_device()
+ * calls device_unregister(), but we don't want that to release
+ * the device since we want to clean up the queue first.
+ */
+ get_device(&vdev->dev);
+ video_unregister_device(vdev);
+ if (vdev->queue && vdev->queue->owner) {
+ struct mutex *lock = vdev->queue->lock ?
+ vdev->queue->lock : vdev->lock;
+
+ if (lock)
+ mutex_lock(lock);
+ vb2_queue_release(vdev->queue);
+ vdev->queue->owner = NULL;
+ if (lock)
+ mutex_unlock(lock);
+ }
+ /*
+ * Now we put the device, and in most cases this will release
+ * everything.
+ */
+ put_device(&vdev->dev);
+}
+EXPORT_SYMBOL_GPL(vb2_video_unregister_device);
+
/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
void vb2_ops_wait_prepare(struct vb2_queue *vq)
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index c66fda4a65e4..bf5ac63a5742 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -229,7 +229,7 @@ static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
kfree(attach);
return ret;
}
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ for_each_sgtable_sg(sgt, sg, i) {
struct page *page = vmalloc_to_page(vaddr);
if (!page) {
@@ -259,8 +259,7 @@ static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
/* release the scatterlist cache */
if (attach->dma_dir != DMA_NONE)
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
sg_free_table(sgt);
kfree(attach);
db_attach->priv = NULL;
@@ -285,15 +284,12 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
/* release any previous cache */
if (attach->dma_dir != DMA_NONE) {
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- attach->dma_dir);
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
attach->dma_dir = DMA_NONE;
}
/* mapping to the client with new direction */
- sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
- dma_dir);
- if (!sgt->nents) {
+ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO);
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index 7281899bd7ae..7d7c341b2bd8 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -597,7 +597,7 @@ static int af9013_read_status(struct dvb_frontend *fe, enum fe_status *status)
state->strength_en = 2;
break;
}
- /* Fall through */
+ fallthrough;
case 1:
if (time_is_after_jiffies(state->strength_jiffies + msecs_to_jiffies(2000)))
break;
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 32f9346deb3e..a57470bf71bf 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1011,8 +1011,7 @@ static int hi_command(struct drxk_state *state, u16 cmd, u16 *p_result)
retry_count += 1;
status = read16(state, SIO_HI_RA_RAM_CMD__A,
&wait_cmd);
- } while ((status < 0) && (retry_count < DRXK_MAX_RETRIES)
- && (wait_cmd != 0));
+ } while ((status < 0 || wait_cmd) && (retry_count < DRXK_MAX_RETRIES));
if (status < 0)
goto error;
status = read16(state, SIO_HI_RA_RAM_RES__A, p_result);
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index 10c152f461dd..f343066c297e 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1408,7 +1408,7 @@ struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
switch (config->lg_chip) {
default:
lg_warn("invalid chip requested, defaulting to LG2160");
- /* fall-thru */
+ fallthrough;
case LG2160:
memcpy(&state->frontend.ops, &lg2160_ops,
sizeof(struct dvb_frontend_ops));
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index f204e715bc59..ad6d9d564a87 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -906,7 +906,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
}
- /* fall through */
+ fallthrough;
default:
u16tmp = DIV_ROUND_UP(target_mclk, dev->cfg->ts_clk);
u8tmp1 = u16tmp / 2 - 1;
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index 3843181bba16..2505f1e5794e 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -1452,11 +1452,8 @@ static int mb86a16_set_fe(struct mb86a16_state *state)
wait_t = (786432 + state->srate / 2) / state->srate;
else
wait_t = (1572864 + state->srate / 2) / state->srate;
- if (state->srate < 5000)
- /* FIXME ! , should be a long wait ! */
- msleep_interruptible(wait_t);
- else
- msleep_interruptible(wait_t);
+
+ msleep_interruptible(wait_t);
if (sync_chk(state, &junk) == 0) {
iq_vt_set(state, 1);
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 4404ace82981..0b00a23436ed 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -27,7 +27,6 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <asm/div64.h>
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 720756728f2d..ef6feb299d46 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1411,6 +1411,7 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
default:
v4l2_ctrl_handler_init(&dev->hdl, 0);
dev_err(&pdev->dev, "Unsupported tuner\n");
+ ret = -ENODEV;
goto err_v4l2_ctrl_handler_free;
}
if (dev->hdl.error) {
diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
index 9fb207b41576..faa6e54b3372 100644
--- a/drivers/media/dvb-frontends/tda10021.c
+++ b/drivers/media/dvb-frontends/tda10021.c
@@ -137,26 +137,36 @@ static int tda10021_set_symbolrate (struct tda10021_state* state, u32 symbolrate
{
s32 BDR;
s32 BDRI;
- s16 SFIL=0;
+ s16 SFIL = 0;
u16 NDEC = 0;
u32 tmp, ratio;
- if (symbolrate > XIN/2)
- symbolrate = XIN/2;
- if (symbolrate < 500000)
+ if (symbolrate > XIN / 2)
+ symbolrate = XIN / 2;
+ else if (symbolrate < 500000)
symbolrate = 500000;
- if (symbolrate < XIN/16) NDEC = 1;
- if (symbolrate < XIN/32) NDEC = 2;
- if (symbolrate < XIN/64) NDEC = 3;
-
- if (symbolrate < (u32)(XIN/12.3)) SFIL = 1;
- if (symbolrate < (u32)(XIN/16)) SFIL = 0;
- if (symbolrate < (u32)(XIN/24.6)) SFIL = 1;
- if (symbolrate < (u32)(XIN/32)) SFIL = 0;
- if (symbolrate < (u32)(XIN/49.2)) SFIL = 1;
- if (symbolrate < (u32)(XIN/64)) SFIL = 0;
- if (symbolrate < (u32)(XIN/98.4)) SFIL = 1;
+ if (symbolrate < XIN / 16)
+ NDEC = 1;
+ if (symbolrate < XIN / 32)
+ NDEC = 2;
+ if (symbolrate < XIN / 64)
+ NDEC = 3;
+
+ if (symbolrate < XIN * 10 / 123)
+ SFIL = 1;
+ if (symbolrate < XIN * 10 / 160)
+ SFIL = 0;
+ if (symbolrate < XIN * 10 / 246)
+ SFIL = 1;
+ if (symbolrate < XIN * 10 / 320)
+ SFIL = 0;
+ if (symbolrate < XIN * 10 / 492)
+ SFIL = 1;
+ if (symbolrate < XIN * 10 / 640)
+ SFIL = 0;
+ if (symbolrate < XIN * 10 / 984)
+ SFIL = 1;
symbolrate <<= NDEC;
ratio = (symbolrate << 4) / FIN;
diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
index be6b40138f6e..cdcf97664bba 100644
--- a/drivers/media/dvb-frontends/tda10086.c
+++ b/drivers/media/dvb-frontends/tda10086.c
@@ -17,7 +17,7 @@
#include <media/dvb_frontend.h>
#include "tda10086.h"
-#define SACLK 96000000
+#define SACLK 96000000U
struct tda10086_state {
struct i2c_adapter* i2c;
@@ -297,34 +297,34 @@ static int tda10086_set_symbol_rate(struct tda10086_state *state,
dprintk ("%s %i\n", __func__, symbol_rate);
/* setup the decimation and anti-aliasing filters.. */
- if (symbol_rate < (u32) (SACLK * 0.0137)) {
+ if (symbol_rate < SACLK / 10000 * 137) {
dfn=4;
afs=1;
- } else if (symbol_rate < (u32) (SACLK * 0.0208)) {
+ } else if (symbol_rate < SACLK / 10000 * 208) {
dfn=4;
afs=0;
- } else if (symbol_rate < (u32) (SACLK * 0.0270)) {
+ } else if (symbol_rate < SACLK / 10000 * 270) {
dfn=3;
afs=1;
- } else if (symbol_rate < (u32) (SACLK * 0.0416)) {
+ } else if (symbol_rate < SACLK / 10000 * 416) {
dfn=3;
afs=0;
- } else if (symbol_rate < (u32) (SACLK * 0.0550)) {
+ } else if (symbol_rate < SACLK / 10000 * 550) {
dfn=2;
afs=1;
- } else if (symbol_rate < (u32) (SACLK * 0.0833)) {
+ } else if (symbol_rate < SACLK / 10000 * 833) {
dfn=2;
afs=0;
- } else if (symbol_rate < (u32) (SACLK * 0.1100)) {
+ } else if (symbol_rate < SACLK / 10000 * 1100) {
dfn=1;
afs=1;
- } else if (symbol_rate < (u32) (SACLK * 0.1666)) {
+ } else if (symbol_rate < SACLK / 10000 * 1666) {
dfn=1;
afs=0;
- } else if (symbol_rate < (u32) (SACLK * 0.2200)) {
+ } else if (symbol_rate < SACLK / 10000 * 2200) {
dfn=0;
afs=1;
- } else if (symbol_rate < (u32) (SACLK * 0.3333)) {
+ } else if (symbol_rate < SACLK / 10000 * 3333) {
dfn=0;
afs=0;
} else {
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index 43312bba1aec..a34834487943 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -198,58 +198,55 @@ static void reset(struct tda_state *state)
state->m_bFMInput = (ulFMInput == 2);
}
-static bool SearchMap1(struct SMap Map[],
- u32 Frequency, u8 *pParam)
+static bool SearchMap1(const struct SMap map[], u32 frequency, u8 *param)
{
int i = 0;
- while ((Map[i].m_Frequency != 0) && (Frequency > Map[i].m_Frequency))
+ while ((map[i].m_Frequency != 0) && (frequency > map[i].m_Frequency))
i += 1;
- if (Map[i].m_Frequency == 0)
+ if (map[i].m_Frequency == 0)
return false;
- *pParam = Map[i].m_Param;
+ *param = map[i].m_Param;
return true;
}
-static bool SearchMap2(struct SMapI Map[],
- u32 Frequency, s32 *pParam)
+static bool SearchMap2(const struct SMapI map[], u32 frequency, s32 *param)
{
int i = 0;
- while ((Map[i].m_Frequency != 0) &&
- (Frequency > Map[i].m_Frequency))
+ while ((map[i].m_Frequency != 0) &&
+ (frequency > map[i].m_Frequency))
i += 1;
- if (Map[i].m_Frequency == 0)
+ if (map[i].m_Frequency == 0)
return false;
- *pParam = Map[i].m_Param;
+ *param = map[i].m_Param;
return true;
}
-static bool SearchMap3(struct SMap2 Map[], u32 Frequency,
- u8 *pParam1, u8 *pParam2)
+static bool SearchMap3(const struct SMap2 map[], u32 frequency, u8 *param1,
+ u8 *param2)
{
int i = 0;
- while ((Map[i].m_Frequency != 0) &&
- (Frequency > Map[i].m_Frequency))
+ while ((map[i].m_Frequency != 0) &&
+ (frequency > map[i].m_Frequency))
i += 1;
- if (Map[i].m_Frequency == 0)
+ if (map[i].m_Frequency == 0)
return false;
- *pParam1 = Map[i].m_Param1;
- *pParam2 = Map[i].m_Param2;
+ *param1 = map[i].m_Param1;
+ *param2 = map[i].m_Param2;
return true;
}
-static bool SearchMap4(struct SRFBandMap Map[],
- u32 Frequency, u8 *pRFBand)
+static bool SearchMap4(const struct SRFBandMap map[], u32 frequency, u8 *rfband)
{
int i = 0;
- while (i < 7 && (Frequency > Map[i].m_RF_max))
+ while (i < 7 && (frequency > map[i].m_RF_max))
i += 1;
if (i == 7)
return false;
- *pRFBand = i;
+ *rfband = i;
return true;
}
diff --git a/drivers/media/dvb-frontends/tda18271c2dd_maps.h b/drivers/media/dvb-frontends/tda18271c2dd_maps.h
index 5f75516bc0cb..82218e02d77d 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd_maps.h
+++ b/drivers/media/dvb-frontends/tda18271c2dd_maps.h
@@ -6,7 +6,7 @@ enum HF_S {
HF_DVBC_8MHZ, HF_DVBC
};
-static struct SStandardParam m_StandardTable[] = {
+static const struct SStandardParam m_StandardTable[] = {
{ 0, 0, 0x00, 0x00 }, /* HF_None */
{ 6000000, 7000000, 0x1D, 0x2C }, /* HF_B, */
{ 6900000, 8000000, 0x1E, 0x2C }, /* HF_DK, */
@@ -28,7 +28,7 @@ static struct SStandardParam m_StandardTable[] = {
{ 0, 0, 0x00, 0x00 }, /* HF_DVBC (Unused) */
};
-static struct SMap m_BP_Filter_Map[] = {
+static const struct SMap m_BP_Filter_Map[] = {
{ 62000000, 0x00 },
{ 84000000, 0x01 },
{ 100000000, 0x02 },
@@ -39,7 +39,7 @@ static struct SMap m_BP_Filter_Map[] = {
{ 0, 0x00 }, /* Table End */
};
-static struct SMapI m_RF_Cal_Map[] = {
+static const struct SMapI m_RF_Cal_Map[] = {
{ 41000000, 0x0F },
{ 43000000, 0x1C },
{ 45000000, 0x2F },
@@ -481,7 +481,7 @@ static struct SMapI m_RF_Cal_Map[] = {
};
-static struct SMap2 m_KM_Map[] = {
+static const struct SMap2 m_KM_Map[] = {
{ 47900000, 3, 2 },
{ 61100000, 3, 1 },
{ 350000000, 3, 0 },
@@ -490,7 +490,7 @@ static struct SMap2 m_KM_Map[] = {
{ 0, 0x00 }, /* Table End */
};
-static struct SMap2 m_Main_PLL_Map[] = {
+static const struct SMap2 m_Main_PLL_Map[] = {
{ 33125000, 0x57, 0xF0 },
{ 35500000, 0x56, 0xE0 },
{ 38188000, 0x55, 0xD0 },
@@ -534,7 +534,7 @@ static struct SMap2 m_Main_PLL_Map[] = {
{ 0, 0x00, 0x00 }, /* Table End */
};
-static struct SMap2 m_Cal_PLL_Map[] = {
+static const struct SMap2 m_Cal_PLL_Map[] = {
{ 33813000, 0xDD, 0xD0 },
{ 36625000, 0xDC, 0xC0 },
{ 39938000, 0xDB, 0xB0 },
@@ -572,7 +572,7 @@ static struct SMap2 m_Cal_PLL_Map[] = {
{ 0, 0x00, 0x00 }, /* Table End */
};
-static struct SMap m_GainTaper_Map[] = {
+static const struct SMap m_GainTaper_Map[] = {
{ 45400000, 0x1F },
{ 45800000, 0x1E },
{ 46200000, 0x1D },
@@ -661,7 +661,7 @@ static struct SMap m_GainTaper_Map[] = {
{ 0, 0x00 }, /* Table End */
};
-static struct SMap m_RF_Cal_DC_Over_DT_Map[] = {
+static const struct SMap m_RF_Cal_DC_Over_DT_Map[] = {
{ 47900000, 0x00 },
{ 55000000, 0x00 },
{ 61100000, 0x0A },
@@ -767,14 +767,14 @@ static struct SMap m_RF_Cal_DC_Over_DT_Map[] = {
};
-static struct SMap m_IR_Meas_Map[] = {
+static const struct SMap m_IR_Meas_Map[] = {
{ 200000000, 0x05 },
{ 400000000, 0x06 },
{ 865000000, 0x07 },
{ 0, 0x00 }, /* Table End */
};
-static struct SMap2 m_CID_Target_Map[] = {
+static const struct SMap2 m_CID_Target_Map[] = {
{ 46000000, 0x04, 18 },
{ 52200000, 0x0A, 15 },
{ 70100000, 0x01, 40 },
@@ -790,7 +790,7 @@ static struct SMap2 m_CID_Target_Map[] = {
{ 0, 0x00, 0 }, /* Table End */
};
-static struct SRFBandMap m_RF_Band_Map[7] = {
+static const struct SRFBandMap m_RF_Band_Map[7] = {
{ 47900000, 46000000, 0, 0},
{ 61100000, 52200000, 0, 0},
{ 152600000, 70100000, 136800000, 0},
diff --git a/drivers/media/dvb-frontends/zd1301_demod.h b/drivers/media/dvb-frontends/zd1301_demod.h
index d56196f5c801..01eaacf76a13 100644
--- a/drivers/media/dvb-frontends/zd1301_demod.h
+++ b/drivers/media/dvb-frontends/zd1301_demod.h
@@ -43,12 +43,6 @@ struct i2c_adapter *zd1301_demod_get_i2c_adapter(struct platform_device *pdev);
#else
-/**
- * zd1301_demod_get_dvb_frontend() - Attach a zd1301 frontend
- * @dev: Pointer to platform device
- *
- * Return: Pointer to %struct dvb_frontend or NULL if attach fails.
- */
static inline struct dvb_frontend *zd1301_demod_get_dvb_frontend(struct platform_device *dev)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c
index 3f1ca40b9b98..8a8585261bb8 100644
--- a/drivers/media/firewire/firedtv-fw.c
+++ b/drivers/media/firewire/firedtv-fw.c
@@ -272,8 +272,10 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
name_len = fw_csr_string(unit->directory, CSR_MODEL,
name, sizeof(name));
- if (name_len < 0)
- return name_len;
+ if (name_len < 0) {
+ err = name_len;
+ goto fail_free;
+ }
for (i = ARRAY_SIZE(model_names); --i; )
if (strlen(model_names[i]) <= name_len &&
strncmp(name, model_names[i], name_len) == 0)
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index c7ba76fee599..878f66ef2719 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -1015,7 +1015,7 @@ config VIDEO_OV7670
config VIDEO_OV7740
tristate "OmniVision OV7740 sensor support"
depends on I2C && VIDEO_V4L2
- select REGMAP_I2C
+ select REGMAP_SCCB
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7740 VGA camera sensor.
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 00159daa6fcd..4498d14d3429 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -726,7 +726,7 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
case V4L2_FIELD_NONE:
if (state->chip_info->flags & ADV7180_FLAG_I2P)
break;
- /* fall through */
+ fallthrough;
default:
format->format.field = V4L2_FIELD_ALTERNATE;
break;
@@ -760,8 +760,9 @@ static int adv7180_init_cfg(struct v4l2_subdev *sd,
return adv7180_set_pad_format(sd, cfg, &fmt);
}
-static int adv7180_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
+static int adv7180_get_mbus_config(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_config *cfg)
{
struct adv7180_state *state = to_state(sd);
@@ -852,7 +853,6 @@ static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.querystd = adv7180_querystd,
.g_input_status = adv7180_g_input_status,
.s_routing = adv7180_s_routing,
- .g_mbus_config = adv7180_g_mbus_config,
.g_pixelaspect = adv7180_g_pixelaspect,
.g_tvnorms = adv7180_g_tvnorms,
.s_stream = adv7180_s_stream,
@@ -869,6 +869,7 @@ static const struct v4l2_subdev_pad_ops adv7180_pad_ops = {
.enum_mbus_code = adv7180_enum_mbus_code,
.set_fmt = adv7180_set_pad_format,
.get_fmt = adv7180_get_pad_format,
+ .get_mbus_config = adv7180_get_mbus_config,
};
static const struct v4l2_subdev_sensor_ops adv7180_sensor_ops = {
diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c
index 23e02ff27b17..1fe7f97c6d52 100644
--- a/drivers/media/i2c/adv748x/adv748x-core.c
+++ b/drivers/media/i2c/adv748x/adv748x-core.c
@@ -241,10 +241,10 @@ static int adv748x_power_up_tx(struct adv748x_csi2 *tx)
int ret = 0;
/* Enable n-lane MIPI */
- adv748x_write_check(state, page, 0x00, 0x80 | tx->num_lanes, &ret);
+ adv748x_write_check(state, page, 0x00, 0x80 | tx->active_lanes, &ret);
/* Set Auto DPHY Timing */
- adv748x_write_check(state, page, 0x00, 0xa0 | tx->num_lanes, &ret);
+ adv748x_write_check(state, page, 0x00, 0xa0 | tx->active_lanes, &ret);
/* ADI Required Write */
if (tx->src == &state->hdmi.sd) {
@@ -270,7 +270,7 @@ static int adv748x_power_up_tx(struct adv748x_csi2 *tx)
usleep_range(2000, 2500);
/* Power-up CSI-TX */
- adv748x_write_check(state, page, 0x00, 0x20 | tx->num_lanes, &ret);
+ adv748x_write_check(state, page, 0x00, 0x20 | tx->active_lanes, &ret);
usleep_range(1000, 1500);
/* ADI Required Writes */
@@ -292,7 +292,7 @@ static int adv748x_power_down_tx(struct adv748x_csi2 *tx)
adv748x_write_check(state, page, 0x1e, 0x00, &ret);
/* Enable n-lane MIPI */
- adv748x_write_check(state, page, 0x00, 0x80 | tx->num_lanes, &ret);
+ adv748x_write_check(state, page, 0x00, 0x80 | tx->active_lanes, &ret);
/* i2c_mipi_pll_en - 1'b1 */
adv748x_write_check(state, page, 0xda, 0x01, &ret);
@@ -357,14 +357,29 @@ static int adv748x_link_setup(struct media_entity *entity,
if (state->afe.tx) {
/* AFE Requires TXA enabled, even when output to TXB */
io10 |= ADV748X_IO_10_CSI4_EN;
- if (is_txa(tx))
+ if (is_txa(tx)) {
+ /*
+ * Output from the SD-core (480i and 576i) from the TXA
+ * interface requires reducing the number of enabled
+ * data lanes in order to guarantee a valid link
+ * frequency.
+ */
+ tx->active_lanes = min(tx->num_lanes, 2U);
io10 |= ADV748X_IO_10_CSI4_IN_SEL_AFE;
- else
+ } else {
+ /* TXB has a single data lane, no need to adjust. */
io10 |= ADV748X_IO_10_CSI1_EN;
+ }
}
- if (state->hdmi.tx)
+ if (state->hdmi.tx) {
+ /*
+ * Restore the number of active lanes, in case we have gone
+ * through an AFE->TXA streaming sessions.
+ */
+ tx->active_lanes = tx->num_lanes;
io10 |= ADV748X_IO_10_CSI4_EN;
+ }
return io_clrset(state, ADV748X_IO_10, io10_mask, io10);
}
@@ -596,6 +611,7 @@ static int adv748x_parse_csi2_lanes(struct adv748x_state *state,
}
state->txa.num_lanes = num_lanes;
+ state->txa.active_lanes = num_lanes;
adv_dbg(state, "TXA: using %u lanes\n", state->txa.num_lanes);
}
@@ -607,6 +623,7 @@ static int adv748x_parse_csi2_lanes(struct adv748x_state *state,
}
state->txb.num_lanes = num_lanes;
+ state->txb.active_lanes = num_lanes;
adv_dbg(state, "TXB: using %u lanes\n", state->txb.num_lanes);
}
diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c
index 2091cda50935..99bb63d05eef 100644
--- a/drivers/media/i2c/adv748x/adv748x-csi2.c
+++ b/drivers/media/i2c/adv748x/adv748x-csi2.c
@@ -214,9 +214,40 @@ unlock:
return ret;
}
+static int adv748x_csi2_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_config *config)
+{
+ struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
+
+ if (pad != ADV748X_CSI2_SOURCE)
+ return -EINVAL;
+
+ config->type = V4L2_MBUS_CSI2_DPHY;
+ switch (tx->active_lanes) {
+ case 1:
+ config->flags = V4L2_MBUS_CSI2_1_LANE;
+ break;
+
+ case 2:
+ config->flags = V4L2_MBUS_CSI2_2_LANE;
+ break;
+
+ case 3:
+ config->flags = V4L2_MBUS_CSI2_3_LANE;
+ break;
+
+ case 4:
+ config->flags = V4L2_MBUS_CSI2_4_LANE;
+ break;
+ }
+
+ return 0;
+}
+
static const struct v4l2_subdev_pad_ops adv748x_csi2_pad_ops = {
.get_fmt = adv748x_csi2_get_format,
.set_fmt = adv748x_csi2_set_format,
+ .get_mbus_config = adv748x_csi2_get_mbus_config,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
index fccb388ce179..1061f425ece5 100644
--- a/drivers/media/i2c/adv748x/adv748x.h
+++ b/drivers/media/i2c/adv748x/adv748x.h
@@ -79,6 +79,7 @@ struct adv748x_csi2 {
unsigned int page;
unsigned int port;
unsigned int num_lanes;
+ unsigned int active_lanes;
struct media_pad pads[ADV748X_CSI2_NR_PADS];
struct v4l2_ctrl_handler ctrl_hdl;
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 62763ec4cd07..a3161d709015 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -470,7 +470,7 @@ static int adv7511_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *
reg->val = adv7511_cec_read(sd, reg->reg & 0xff);
break;
}
- /* fall through */
+ fallthrough;
default:
v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
adv7511_inv_register(sd);
@@ -492,7 +492,7 @@ static int adv7511_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_regi
adv7511_cec_write(sd, reg->reg & 0xff, reg->val & 0xff);
break;
}
- /* fall through */
+ fallthrough;
default:
v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
adv7511_inv_register(sd);
diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c
index 2181c8a347fc..2cf3e6a1f9e1 100644
--- a/drivers/media/i2c/cx25840/cx25840-ir.c
+++ b/drivers/media/i2c/cx25840/cx25840-ir.c
@@ -688,7 +688,7 @@ static int cx25840_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
}
v = (unsigned) pulse_width_count_to_ns(
- (u16) (p->hw_fifo_data & FIFO_RXTX), divider);
+ (u16)(p->hw_fifo_data & FIFO_RXTX), divider) / 1000;
if (v > IR_MAX_DURATION)
v = IR_MAX_DURATION;
diff --git a/drivers/media/i2c/dw9807-vcm.c b/drivers/media/i2c/dw9807-vcm.c
index b38a4e6d270d..438a44b76da8 100644
--- a/drivers/media/i2c/dw9807-vcm.c
+++ b/drivers/media/i2c/dw9807-vcm.c
@@ -324,6 +324,6 @@ static struct i2c_driver dw9807_i2c_driver = {
module_i2c_driver(dw9807_i2c_driver);
-MODULE_AUTHOR("Chiang, Alan <alanx.chiang@intel.com>");
+MODULE_AUTHOR("Chiang, Alan");
MODULE_DESCRIPTION("DW9807 VCM driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index f64c0ef7a897..1cee45e35355 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -1188,7 +1188,7 @@ static int __maybe_unused imx219_resume(struct device *dev)
error:
imx219_stop_streaming(imx219);
- imx219->streaming = 0;
+ imx219->streaming = false;
return ret;
}
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index f86ae18bc104..ccb55fd1d506 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -1304,7 +1304,7 @@ static struct i2c_driver imx258_i2c_driver = {
module_i2c_driver(imx258_i2c_driver);
MODULE_AUTHOR("Yeh, Andy <andy.yeh@intel.com>");
-MODULE_AUTHOR("Chiang, Alan <alanx.chiang@intel.com>");
+MODULE_AUTHOR("Chiang, Alan");
MODULE_AUTHOR("Chen, Jason <jasonx.z.chen@intel.com>");
MODULE_DESCRIPTION("Sony IMX258 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 6011cec5e351..e6aa9f32b6a8 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -1235,6 +1235,8 @@ static int imx274_s_frame_interval(struct v4l2_subdev *sd,
ret = imx274_set_frame_interval(imx274, fi->interval);
if (!ret) {
+ fi->interval = imx274->frame_interval;
+
/*
* exposure time range is decided by frame interval
* need to update it after frame interval changes
@@ -1730,9 +1732,9 @@ static int imx274_set_frame_interval(struct stimx274 *priv,
__func__, frame_interval.numerator,
frame_interval.denominator);
- if (frame_interval.numerator == 0) {
- err = -EINVAL;
- goto fail;
+ if (frame_interval.numerator == 0 || frame_interval.denominator == 0) {
+ frame_interval.denominator = IMX274_DEF_FRAME_RATE;
+ frame_interval.numerator = 1;
}
req_frame_rate = (u32)(frame_interval.denominator
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index de295114ca48..21666d705e37 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -764,7 +764,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
if (ret) {
- info->set_power(&client->dev, 0);
+ if (info->set_power)
+ info->set_power(&client->dev, 0);
return ret;
}
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 47f280518fdb..c82c1493e099 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -135,13 +135,19 @@
#define MAX9286_SRC_PAD 4
struct max9286_source {
- struct v4l2_async_subdev asd;
struct v4l2_subdev *sd;
struct fwnode_handle *fwnode;
};
-#define asd_to_max9286_source(_asd) \
- container_of(_asd, struct max9286_source, asd)
+struct max9286_asd {
+ struct v4l2_async_subdev base;
+ struct max9286_source *source;
+};
+
+static inline struct max9286_asd *to_max9286_asd(struct v4l2_async_subdev *asd)
+{
+ return container_of(asd, struct max9286_asd, base);
+}
struct max9286_priv {
struct i2c_client *client;
@@ -405,10 +411,11 @@ static int max9286_check_config_link(struct max9286_priv *priv,
* to 5 milliseconds.
*/
for (i = 0; i < 10; i++) {
- ret = max9286_read(priv, 0x49) & 0xf0;
+ ret = max9286_read(priv, 0x49);
if (ret < 0)
return -EIO;
+ ret &= 0xf0;
if (ret == conflink_mask)
break;
@@ -480,7 +487,7 @@ static int max9286_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_async_subdev *asd)
{
struct max9286_priv *priv = sd_to_max9286(notifier->sd);
- struct max9286_source *source = asd_to_max9286_source(asd);
+ struct max9286_source *source = to_max9286_asd(asd)->source;
unsigned int index = to_index(priv, source);
unsigned int src_pad;
int ret;
@@ -544,7 +551,7 @@ static void max9286_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_async_subdev *asd)
{
struct max9286_priv *priv = sd_to_max9286(notifier->sd);
- struct max9286_source *source = asd_to_max9286_source(asd);
+ struct max9286_source *source = to_max9286_asd(asd)->source;
unsigned int index = to_index(priv, source);
source->sd = NULL;
@@ -569,23 +576,19 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
for_each_source(priv, source) {
unsigned int i = to_index(priv, source);
-
- source->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- source->asd.match.fwnode = source->fwnode;
-
- ret = v4l2_async_notifier_add_subdev(&priv->notifier,
- &source->asd);
- if (ret) {
- dev_err(dev, "Failed to add subdev for source %d", i);
+ struct v4l2_async_subdev *asd;
+
+ asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
+ source->fwnode,
+ sizeof(*asd));
+ if (IS_ERR(asd)) {
+ dev_err(dev, "Failed to add subdev for source %u: %ld",
+ i, PTR_ERR(asd));
v4l2_async_notifier_cleanup(&priv->notifier);
- return ret;
+ return PTR_ERR(asd);
}
- /*
- * Balance the reference counting handled through
- * v4l2_async_notifier_cleanup()
- */
- fwnode_handle_get(source->fwnode);
+ to_max9286_asd(asd)->source = source;
}
priv->notifier.ops = &max9286_notify_ops;
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index c444bd6a0658..ff212335326a 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -219,8 +219,9 @@ static int ml86v7667_fill_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int ml86v7667_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
+static int ml86v7667_get_mbus_config(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_config *cfg)
{
cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
V4L2_MBUS_DATA_ACTIVE_HIGH;
@@ -291,13 +292,13 @@ static const struct v4l2_subdev_video_ops ml86v7667_subdev_video_ops = {
.s_std = ml86v7667_s_std,
.querystd = ml86v7667_querystd,
.g_input_status = ml86v7667_g_input_status,
- .g_mbus_config = ml86v7667_g_mbus_config,
};
static const struct v4l2_subdev_pad_ops ml86v7667_subdev_pad_ops = {
.enum_mbus_code = ml86v7667_enum_mbus_code,
.get_fmt = ml86v7667_fill_fmt,
.set_fmt = ml86v7667_fill_fmt,
+ .get_mbus_config = ml86v7667_get_mbus_config,
};
static const struct v4l2_subdev_core_ops ml86v7667_subdev_core_ops = {
diff --git a/drivers/media/i2c/msp3400-kthreads.c b/drivers/media/i2c/msp3400-kthreads.c
index d3b0d1c18efd..52e506f86de5 100644
--- a/drivers/media/i2c/msp3400-kthreads.c
+++ b/drivers/media/i2c/msp3400-kthreads.c
@@ -646,7 +646,7 @@ restart:
break;
case 0: /* 4.5 */
state->detected_std = V4L2_STD_MN;
- /* fall-through */
+ fallthrough;
default:
no_second:
state->second = msp3400c_carrier_detect_main[max1].cdo;
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 210ea76adb53..3b0ba8ed5233 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -689,8 +689,9 @@ static int mt9m001_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
-static int mt9m001_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
+static int mt9m001_get_mbus_config(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_config *cfg)
{
/* MT9M001 has all capture_format parameters fixed */
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
@@ -703,7 +704,6 @@ static int mt9m001_g_mbus_config(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
.s_stream = mt9m001_s_stream,
- .g_mbus_config = mt9m001_g_mbus_config,
};
static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
@@ -717,6 +717,7 @@ static const struct v4l2_subdev_pad_ops mt9m001_subdev_pad_ops = {
.set_selection = mt9m001_set_selection,
.get_fmt = mt9m001_get_fmt,
.set_fmt = mt9m001_set_fmt,
+ .get_mbus_config = mt9m001_get_mbus_config,
};
static const struct v4l2_subdev_ops mt9m001_subdev_ops = {
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 17e8253f5748..69697386ffcd 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -1137,8 +1137,9 @@ static int mt9m111_init_cfg(struct v4l2_subdev *sd,
return 0;
}
-static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
+static int mt9m111_get_mbus_config(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_config *cfg)
{
struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
@@ -1155,7 +1156,6 @@ static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
- .g_mbus_config = mt9m111_g_mbus_config,
.s_stream = mt9m111_s_stream,
.g_frame_interval = mt9m111_g_frame_interval,
.s_frame_interval = mt9m111_s_frame_interval,
@@ -1168,6 +1168,7 @@ static const struct v4l2_subdev_pad_ops mt9m111_subdev_pad_ops = {
.set_selection = mt9m111_set_selection,
.get_fmt = mt9m111_get_fmt,
.set_fmt = mt9m111_set_fmt,
+ .get_mbus_config = mt9m111_get_mbus_config,
};
static const struct v4l2_subdev_ops mt9m111_subdev_ops = {
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index fd0b6a903ec1..bd0d45b0d43f 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1018,6 +1018,10 @@ static int ov2740_register_nvmem(struct i2c_client *client)
if (!nvm)
return -ENOMEM;
+ nvm->nvm_buffer = devm_kzalloc(dev, CUSTOMER_USE_OTP_SIZE, GFP_KERNEL);
+ if (!nvm->nvm_buffer)
+ return -ENOMEM;
+
regmap_config.val_bits = 8;
regmap_config.reg_bits = 16;
regmap_config.disable_locking = true;
@@ -1027,6 +1031,12 @@ static int ov2740_register_nvmem(struct i2c_client *client)
nvm->regmap = regmap;
+ ret = ov2740_load_otp_data(client, nvm);
+ if (ret) {
+ dev_err(dev, "failed to load OTP data, ret %d\n", ret);
+ return ret;
+ }
+
nvmem_config.name = dev_name(dev);
nvmem_config.dev = dev;
nvmem_config.read_only = true;
@@ -1042,18 +1052,8 @@ static int ov2740_register_nvmem(struct i2c_client *client)
nvmem_config.size = CUSTOMER_USE_OTP_SIZE;
nvm->nvmem = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(nvm->nvmem))
- return PTR_ERR(nvm->nvmem);
- nvm->nvm_buffer = devm_kzalloc(dev, CUSTOMER_USE_OTP_SIZE, GFP_KERNEL);
- if (!nvm->nvm_buffer)
- return -ENOMEM;
-
- ret = ov2740_load_otp_data(client, nvm);
- if (ret)
- dev_err(dev, "failed to load OTP data, ret %d\n", ret);
-
- return ret;
+ return PTR_ERR_OR_ZERO(nvm->nvmem);
}
static int ov2740_probe(struct i2c_client *client)
@@ -1107,7 +1107,7 @@ static int ov2740_probe(struct i2c_client *client)
ret = ov2740_register_nvmem(client);
if (ret)
- dev_err(&client->dev, "register nvmem failed, ret %d\n", ret);
+ dev_warn(&client->dev, "register nvmem failed, ret %d\n", ret);
/*
* Device is already turned on by i2c-core with ACPI domain PM.
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 2fe4a7ac0592..8d0254d0e5ea 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -34,6 +34,8 @@
#define OV5640_REG_SYS_RESET02 0x3002
#define OV5640_REG_SYS_CLOCK_ENABLE02 0x3006
#define OV5640_REG_SYS_CTRL0 0x3008
+#define OV5640_REG_SYS_CTRL0_SW_PWDN 0x42
+#define OV5640_REG_SYS_CTRL0_SW_PWUP 0x02
#define OV5640_REG_CHIP_ID 0x300a
#define OV5640_REG_IO_MIPI_CTRL00 0x300e
#define OV5640_REG_PAD_OUTPUT_ENABLE01 0x3017
@@ -82,6 +84,7 @@
#define OV5640_REG_VFIFO_HSIZE 0x4602
#define OV5640_REG_VFIFO_VSIZE 0x4604
#define OV5640_REG_JPG_MODE_SELECT 0x4713
+#define OV5640_REG_CCIR656_CTRL00 0x4730
#define OV5640_REG_POLARITY_CTRL00 0x4740
#define OV5640_REG_MIPI_CTRL00 0x4800
#define OV5640_REG_DEBUG_MODE 0x4814
@@ -274,8 +277,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
/* YUV422 UYVY VGA@30fps */
static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
{0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
- {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0},
- {0x3630, 0x36, 0, 0},
+ {0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
{0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
{0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
{0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
@@ -751,7 +753,7 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
* +->| PLL Root Div | - reg 0x3037, bit 4
* +-+------------+
* | +---------+
- * +->| Bit Div | - reg 0x3035, bits 0-3
+ * +->| Bit Div | - reg 0x3034, bits 0-3
* +-+-------+
* | +-------------+
* +->| SCLK Div | - reg 0x3108, bits 0-1
@@ -1120,6 +1122,12 @@ static int ov5640_load_regs(struct ov5640_dev *sensor,
val = regs->val;
mask = regs->mask;
+ /* remain in power down mode for DVP */
+ if (regs->reg_addr == OV5640_REG_SYS_CTRL0 &&
+ val == OV5640_REG_SYS_CTRL0_SW_PWUP &&
+ sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
+ continue;
+
if (mask)
ret = ov5640_mod_reg(sensor, reg_addr, mask, val);
else
@@ -1208,98 +1216,25 @@ static int ov5640_set_autogain(struct ov5640_dev *sensor, bool on)
BIT(1), on ? 0 : BIT(1));
}
-static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on)
+static int ov5640_set_stream_bt656(struct ov5640_dev *sensor, bool on)
{
int ret;
- unsigned int flags = sensor->ep.bus.parallel.flags;
- u8 pclk_pol = 0;
- u8 hsync_pol = 0;
- u8 vsync_pol = 0;
-
- /*
- * Note about parallel port configuration.
- *
- * When configured in parallel mode, the OV5640 will
- * output 10 bits data on DVP data lines [9:0].
- * If only 8 bits data are wanted, the 8 bits data lines
- * of the camera interface must be physically connected
- * on the DVP data lines [9:2].
- *
- * Control lines polarity can be configured through
- * devicetree endpoint control lines properties.
- * If no endpoint control lines properties are set,
- * polarity will be as below:
- * - VSYNC: active high
- * - HREF: active low
- * - PCLK: active low
- */
- if (on) {
- /*
- * configure parallel port control lines polarity
- *
- * POLARITY CTRL0
- * - [5]: PCLK polarity (0: active low, 1: active high)
- * - [1]: HREF polarity (0: active low, 1: active high)
- * - [0]: VSYNC polarity (mismatch here between
- * datasheet and hardware, 0 is active high
- * and 1 is active low...)
- */
- if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
- pclk_pol = 1;
- if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
- hsync_pol = 1;
- if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
- vsync_pol = 1;
-
- ret = ov5640_write_reg(sensor,
- OV5640_REG_POLARITY_CTRL00,
- (pclk_pol << 5) |
- (hsync_pol << 1) |
- vsync_pol);
-
- if (ret)
- return ret;
- }
-
- /*
- * powerdown MIPI TX/RX PHY & disable MIPI
- *
- * MIPI CONTROL 00
- * 4: PWDN PHY TX
- * 3: PWDN PHY RX
- * 2: MIPI enable
- */
- ret = ov5640_write_reg(sensor,
- OV5640_REG_IO_MIPI_CTRL00, on ? 0x18 : 0);
+ ret = ov5640_write_reg(sensor, OV5640_REG_CCIR656_CTRL00,
+ on ? 0x1 : 0x00);
if (ret)
return ret;
- /*
- * enable VSYNC/HREF/PCLK DVP control lines
- * & D[9:6] DVP data lines
- *
- * PAD OUTPUT ENABLE 01
- * - 6: VSYNC output enable
- * - 5: HREF output enable
- * - 4: PCLK output enable
- * - [3:0]: D[9:6] output enable
- */
- ret = ov5640_write_reg(sensor,
- OV5640_REG_PAD_OUTPUT_ENABLE01,
- on ? 0x7f : 0);
- if (ret)
- return ret;
+ return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ?
+ OV5640_REG_SYS_CTRL0_SW_PWUP :
+ OV5640_REG_SYS_CTRL0_SW_PWDN);
+}
- /*
- * enable D[5:0] DVP data lines
- *
- * PAD OUTPUT ENABLE 02
- * - [7:2]: D[5:0] output enable
- */
- return ov5640_write_reg(sensor,
- OV5640_REG_PAD_OUTPUT_ENABLE02,
- on ? 0xfc : 0);
+static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on)
+{
+ return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ?
+ OV5640_REG_SYS_CTRL0_SW_PWUP :
+ OV5640_REG_SYS_CTRL0_SW_PWDN);
}
static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on)
@@ -2001,79 +1936,181 @@ static void ov5640_set_power_off(struct ov5640_dev *sensor)
clk_disable_unprepare(sensor->xclk);
}
-static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
+static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
{
- int ret = 0;
+ int ret;
+
+ if (!on) {
+ /* Reset MIPI bus settings to their default values. */
+ ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
+ ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x04);
+ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x00);
+ return 0;
+ }
+
+ /*
+ * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
+ *
+ * 0x300e = 0x40
+ * [7:5] = 010 : 2 data lanes mode (see FIXME note in
+ * "ov5640_set_stream_mipi()")
+ * [4] = 0 : Power up MIPI HS Tx
+ * [3] = 0 : Power up MIPI LS Rx
+ * [2] = 0 : MIPI interface disabled
+ */
+ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
+ if (ret)
+ return ret;
+
+ /*
+ * Gate clock and set LP11 in 'no packets mode' (idle)
+ *
+ * 0x4800 = 0x24
+ * [5] = 1 : Gate clock when 'no packets'
+ * [2] = 1 : MIPI bus in LP11 when 'no packets'
+ */
+ ret = ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x24);
+ if (ret)
+ return ret;
+
+ /*
+ * Set data lanes and clock in LP11 when 'sleeping'
+ *
+ * 0x3019 = 0x70
+ * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
+ * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
+ * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
+ */
+ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x70);
+ if (ret)
+ return ret;
+
+ /* Give lanes some time to coax into LP11 state. */
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on)
+{
+ unsigned int flags = sensor->ep.bus.parallel.flags;
+ u8 pclk_pol = 0;
+ u8 hsync_pol = 0;
+ u8 vsync_pol = 0;
+ int ret;
+
+ if (!on) {
+ /* Reset settings to their default values. */
+ ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
+ ov5640_write_reg(sensor, OV5640_REG_POLARITY_CTRL00, 0x20);
+ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00);
+ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0x00);
+ return 0;
+ }
+
+ /*
+ * Note about parallel port configuration.
+ *
+ * When configured in parallel mode, the OV5640 will
+ * output 10 bits data on DVP data lines [9:0].
+ * If only 8 bits data are wanted, the 8 bits data lines
+ * of the camera interface must be physically connected
+ * on the DVP data lines [9:2].
+ *
+ * Control lines polarity can be configured through
+ * devicetree endpoint control lines properties.
+ * If no endpoint control lines properties are set,
+ * polarity will be as below:
+ * - VSYNC: active high
+ * - HREF: active low
+ * - PCLK: active low
+ */
+ /*
+ * configure parallel port control lines polarity
+ *
+ * POLARITY CTRL0
+ * - [5]: PCLK polarity (0: active low, 1: active high)
+ * - [1]: HREF polarity (0: active low, 1: active high)
+ * - [0]: VSYNC polarity (mismatch here between
+ * datasheet and hardware, 0 is active high
+ * and 1 is active low...)
+ */
+ if (sensor->ep.bus_type == V4L2_MBUS_PARALLEL) {
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ pclk_pol = 1;
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ hsync_pol = 1;
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ vsync_pol = 1;
+
+ ret = ov5640_write_reg(sensor, OV5640_REG_POLARITY_CTRL00,
+ (pclk_pol << 5) | (hsync_pol << 1) |
+ vsync_pol);
- if (on) {
- ret = ov5640_set_power_on(sensor);
if (ret)
return ret;
+ }
- ret = ov5640_restore_mode(sensor);
- if (ret)
- goto power_off;
+ /*
+ * powerdown MIPI TX/RX PHY & disable MIPI
+ *
+ * MIPI CONTROL 00
+ * 4: PWDN PHY TX
+ * 3: PWDN PHY RX
+ * 2: MIPI enable
+ */
+ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x18);
+ if (ret)
+ return ret;
- /* We're done here for DVP bus, while CSI-2 needs setup. */
- if (sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
- return 0;
+ /*
+ * enable VSYNC/HREF/PCLK DVP control lines
+ * & D[9:6] DVP data lines
+ *
+ * PAD OUTPUT ENABLE 01
+ * - 6: VSYNC output enable
+ * - 5: HREF output enable
+ * - 4: PCLK output enable
+ * - [3:0]: D[9:6] output enable
+ */
+ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01,
+ sensor->ep.bus_type == V4L2_MBUS_PARALLEL ?
+ 0x7f : 0x1f);
+ if (ret)
+ return ret;
- /*
- * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
- *
- * 0x300e = 0x40
- * [7:5] = 010 : 2 data lanes mode (see FIXME note in
- * "ov5640_set_stream_mipi()")
- * [4] = 0 : Power up MIPI HS Tx
- * [3] = 0 : Power up MIPI LS Rx
- * [2] = 0 : MIPI interface disabled
- */
- ret = ov5640_write_reg(sensor,
- OV5640_REG_IO_MIPI_CTRL00, 0x40);
- if (ret)
- goto power_off;
+ /*
+ * enable D[5:0] DVP data lines
+ *
+ * PAD OUTPUT ENABLE 02
+ * - [7:2]: D[5:0] output enable
+ */
+ return ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0xfc);
+}
- /*
- * Gate clock and set LP11 in 'no packets mode' (idle)
- *
- * 0x4800 = 0x24
- * [5] = 1 : Gate clock when 'no packets'
- * [2] = 1 : MIPI bus in LP11 when 'no packets'
- */
- ret = ov5640_write_reg(sensor,
- OV5640_REG_MIPI_CTRL00, 0x24);
+static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
+{
+ int ret = 0;
+
+ if (on) {
+ ret = ov5640_set_power_on(sensor);
if (ret)
- goto power_off;
+ return ret;
- /*
- * Set data lanes and clock in LP11 when 'sleeping'
- *
- * 0x3019 = 0x70
- * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
- * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
- * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
- */
- ret = ov5640_write_reg(sensor,
- OV5640_REG_PAD_OUTPUT00, 0x70);
+ ret = ov5640_restore_mode(sensor);
if (ret)
goto power_off;
+ }
- /* Give lanes some time to coax into LP11 state. */
- usleep_range(500, 1000);
-
- } else {
- if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
- /* Reset MIPI bus settings to their default values. */
- ov5640_write_reg(sensor,
- OV5640_REG_IO_MIPI_CTRL00, 0x58);
- ov5640_write_reg(sensor,
- OV5640_REG_MIPI_CTRL00, 0x04);
- ov5640_write_reg(sensor,
- OV5640_REG_PAD_OUTPUT00, 0x00);
- }
+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
+ ret = ov5640_set_power_mipi(sensor, on);
+ else
+ ret = ov5640_set_power_dvp(sensor, on);
+ if (ret)
+ goto power_off;
+ if (!on)
ov5640_set_power_off(sensor);
- }
return 0;
@@ -2888,6 +2925,8 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable)
if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
ret = ov5640_set_stream_mipi(sensor, enable);
+ else if (sensor->ep.bus_type == V4L2_MBUS_BT656)
+ ret = ov5640_set_stream_bt656(sensor, enable);
else
ret = ov5640_set_stream_dvp(sensor, enable);
@@ -3010,7 +3049,7 @@ static int ov5640_probe(struct i2c_client *client)
switch (rotation) {
case 180:
sensor->upside_down = true;
- /* fall through */
+ fallthrough;
case 0:
break;
default:
@@ -3033,6 +3072,13 @@ static int ov5640_probe(struct i2c_client *client)
return ret;
}
+ if (sensor->ep.bus_type != V4L2_MBUS_PARALLEL &&
+ sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY &&
+ sensor->ep.bus_type != V4L2_MBUS_BT656) {
+ dev_err(dev, "Unsupported bus type %d\n", sensor->ep.bus_type);
+ return -EINVAL;
+ }
+
/* get system clock (xclk) */
sensor->xclk = devm_clk_get(dev, "xclk");
if (IS_ERR(sensor->xclk)) {
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index 8537cc4ca108..9540ce8918f0 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -666,8 +666,8 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
/* Propagate change of current control to all related controls */
if (ctrl->id == V4L2_CID_VBLANK) {
/* Update max exposure while meeting expected vblanking */
- exposure_max = (ov5675->cur_mode->height + ctrl->val -
- OV5675_EXPOSURE_MAX_MARGIN) / 2;
+ exposure_max = ov5675->cur_mode->height + ctrl->val -
+ OV5675_EXPOSURE_MAX_MARGIN;
__v4l2_ctrl_modify_range(ov5675->exposure,
ov5675->exposure->minimum,
exposure_max, ov5675->exposure->step,
@@ -689,7 +689,13 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_EXPOSURE:
- /* 3 least significant bits of expsoure are fractional part */
+ /* 4 least significant bits of expsoure are fractional part
+ * val = val << 4
+ * for ov5675, the unit of exposure is differnt from other
+ * OmniVision sensors, its exposure value is twice of the
+ * register value, the exposure should be divided by 2 before
+ * set register, e.g. val << 3.
+ */
ret = ov5675_write_reg(ov5675, OV5675_REG_EXPOSURE,
OV5675_REG_VALUE_24BIT, ctrl->val << 3);
break;
@@ -770,8 +776,7 @@ static int ov5675_init_controls(struct ov5675 *ov5675)
v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
OV5675_DGTL_GAIN_MIN, OV5675_DGTL_GAIN_MAX,
OV5675_DGTL_GAIN_STEP, OV5675_DGTL_GAIN_DEFAULT);
- exposure_max = (ov5675->cur_mode->vts_def -
- OV5675_EXPOSURE_MAX_MARGIN) / 2;
+ exposure_max = (ov5675->cur_mode->vts_def - OV5675_EXPOSURE_MAX_MARGIN);
ov5675->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops,
V4L2_CID_EXPOSURE,
OV5675_EXPOSURE_MIN, exposure_max,
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 91906b94f978..d73f9f540932 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -685,7 +685,7 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
switch (mf->code) {
case MEDIA_BUS_FMT_Y10_1X10:
mf->code = MEDIA_BUS_FMT_Y8_1X8;
- /* fall through */
+ fallthrough;
case MEDIA_BUS_FMT_Y8_1X8:
case MEDIA_BUS_FMT_YVYU8_2X8:
case MEDIA_BUS_FMT_YUYV8_2X8:
@@ -694,7 +694,7 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
break;
default:
mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
- /* fall through */
+ fallthrough;
case MEDIA_BUS_FMT_SBGGR8_1X8:
break;
}
@@ -921,55 +921,74 @@ static const struct v4l2_subdev_core_ops ov6650_core_ops = {
};
/* Request bus settings on camera side */
-static int ov6650_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
+static int ov6650_get_mbus_config(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_config *cfg)
{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 comj, comf;
+ int ret;
+
+ ret = ov6650_reg_read(client, REG_COMJ, &comj);
+ if (ret)
+ return ret;
- cfg->flags = V4L2_MBUS_MASTER |
- V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
- V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW |
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
- V4L2_MBUS_DATA_ACTIVE_HIGH;
+ ret = ov6650_reg_read(client, REG_COMF, &comf);
+ if (ret)
+ return ret;
+
+ cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH
+ | ((comj & COMJ_VSYNC_HIGH) ? V4L2_MBUS_VSYNC_ACTIVE_HIGH
+ : V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ | ((comf & COMF_HREF_LOW) ? V4L2_MBUS_HSYNC_ACTIVE_LOW
+ : V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ | ((comj & COMJ_PCLK_RISING) ? V4L2_MBUS_PCLK_SAMPLE_RISING
+ : V4L2_MBUS_PCLK_SAMPLE_FALLING);
cfg->type = V4L2_MBUS_PARALLEL;
return 0;
}
/* Alter bus settings on camera side */
-static int ov6650_s_mbus_config(struct v4l2_subdev *sd,
- const struct v4l2_mbus_config *cfg)
+static int ov6650_set_mbus_config(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
+ int ret = 0;
if (cfg->flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_PCLK_RISING, 0);
- else
+ else if (cfg->flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_PCLK_RISING);
if (ret)
return ret;
if (cfg->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
ret = ov6650_reg_rmw(client, REG_COMF, COMF_HREF_LOW, 0);
- else
+ else if (cfg->flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
ret = ov6650_reg_rmw(client, REG_COMF, 0, COMF_HREF_LOW);
if (ret)
return ret;
if (cfg->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_VSYNC_HIGH, 0);
- else
+ else if (cfg->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_VSYNC_HIGH);
+ if (ret)
+ return ret;
- return ret;
+ /*
+ * Update the configuration to report what is actually applied to
+ * the hardware.
+ */
+ return ov6650_get_mbus_config(sd, pad, cfg);
}
static const struct v4l2_subdev_video_ops ov6650_video_ops = {
.s_stream = ov6650_s_stream,
.g_frame_interval = ov6650_g_frame_interval,
.s_frame_interval = ov6650_s_frame_interval,
- .g_mbus_config = ov6650_g_mbus_config,
- .s_mbus_config = ov6650_s_mbus_config,
};
static const struct v4l2_subdev_pad_ops ov6650_pad_ops = {
@@ -978,6 +997,8 @@ static const struct v4l2_subdev_pad_ops ov6650_pad_ops = {
.set_selection = ov6650_set_selection,
.get_fmt = ov6650_get_fmt,
.set_fmt = ov6650_set_fmt,
+ .get_mbus_config = ov6650_get_mbus_config,
+ .set_mbus_config = ov6650_set_mbus_config,
};
static const struct v4l2_subdev_ops ov6650_subdev_ops = {
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 732655fe4ba3..5832461c032d 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -1068,13 +1068,6 @@ static int ov7740_probe(struct i2c_client *client)
struct v4l2_subdev *sd;
int ret;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&client->dev,
- "OV7740: I2C-Adapter doesn't support SMBUS\n");
- return -EIO;
- }
-
ov7740 = devm_kzalloc(&client->dev, sizeof(*ov7740), GFP_KERNEL);
if (!ov7740)
return -ENOMEM;
@@ -1091,7 +1084,7 @@ static int ov7740_probe(struct i2c_client *client)
if (ret)
return ret;
- ov7740->regmap = devm_regmap_init_i2c(client, &ov7740_regmap_config);
+ ov7740->regmap = devm_regmap_init_sccb(client, &ov7740_regmap_config);
if (IS_ERR(ov7740->regmap)) {
ret = PTR_ERR(ov7740->regmap);
dev_err(&client->dev, "Failed to allocate register map: %d\n",
@@ -1100,7 +1093,6 @@ static int ov7740_probe(struct i2c_client *client)
}
sd = &ov7740->subdev;
- client->flags |= I2C_CLIENT_SCCB;
v4l2_i2c_subdev_init(sd, client, &ov7740_subdev_ops);
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index 4ca27675cc5a..2f4ceaa80593 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -338,6 +338,209 @@ static const struct ov8856_reg mode_3280x2464_regs[] = {
{0x5e00, 0x00}
};
+static const struct ov8856_reg mode_3264x2448_regs[] = {
+ {0x0103, 0x01},
+ {0x0302, 0x3c},
+ {0x0303, 0x01},
+ {0x031e, 0x0c},
+ {0x3000, 0x20},
+ {0x3003, 0x08},
+ {0x300e, 0x20},
+ {0x3010, 0x00},
+ {0x3015, 0x84},
+ {0x3018, 0x72},
+ {0x3021, 0x23},
+ {0x3033, 0x24},
+ {0x3500, 0x00},
+ {0x3501, 0x9a},
+ {0x3502, 0x20},
+ {0x3503, 0x08},
+ {0x3505, 0x83},
+ {0x3508, 0x01},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x72},
+ {0x3601, 0x40},
+ {0x3602, 0x30},
+ {0x3610, 0xc5},
+ {0x3611, 0x58},
+ {0x3612, 0x5c},
+ {0x3613, 0xca},
+ {0x3614, 0x60},
+ {0x3628, 0xff},
+ {0x3629, 0xff},
+ {0x362a, 0xff},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3636, 0x10},
+ {0x3663, 0x08},
+ {0x3669, 0x34},
+ {0x366d, 0x00},
+ {0x366e, 0x10},
+ {0x3706, 0x86},
+ {0x370b, 0x7e},
+ {0x3714, 0x23},
+ {0x3730, 0x12},
+ {0x3733, 0x10},
+ {0x3764, 0x00},
+ {0x3765, 0x00},
+ {0x3769, 0x62},
+ {0x376a, 0x2a},
+ {0x376b, 0x30},
+ {0x3780, 0x00},
+ {0x3781, 0x24},
+ {0x3782, 0x00},
+ {0x3783, 0x23},
+ {0x3798, 0x2f},
+ {0x37a1, 0x60},
+ {0x37a8, 0x6a},
+ {0x37ab, 0x3f},
+ {0x37c2, 0x04},
+ {0x37c3, 0xf1},
+ {0x37c9, 0x80},
+ {0x37cb, 0x16},
+ {0x37cc, 0x16},
+ {0x37cd, 0x16},
+ {0x37ce, 0x16},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x0c},
+ {0x3804, 0x0c},
+ {0x3805, 0xdf},
+ {0x3806, 0x09},
+ {0x3807, 0xa3},
+ {0x3808, 0x0c},
+ {0x3809, 0xc0},
+ {0x380a, 0x09},
+ {0x380b, 0x90},
+ {0x380c, 0x07},
+ {0x380d, 0x8c},
+ {0x380e, 0x09},
+ {0x380f, 0xb2},
+ {0x3810, 0x00},
+ {0x3811, 0x04},
+ {0x3812, 0x00},
+ {0x3813, 0x02},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x10},
+ {0x3820, 0x80},
+ {0x3821, 0x46},
+ {0x382a, 0x01},
+ {0x382b, 0x01},
+ {0x3830, 0x06},
+ {0x3836, 0x02},
+ {0x3862, 0x04},
+ {0x3863, 0x08},
+ {0x3cc0, 0x33},
+ {0x3d85, 0x17},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xde},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x0b},
+ {0x400a, 0x00},
+ {0x400b, 0x84},
+ {0x400f, 0x80},
+ {0x4010, 0xf0},
+ {0x4011, 0xff},
+ {0x4012, 0x02},
+ {0x4013, 0x01},
+ {0x4014, 0x01},
+ {0x4015, 0x01},
+ {0x4042, 0x00},
+ {0x4043, 0x80},
+ {0x4044, 0x00},
+ {0x4045, 0x80},
+ {0x4046, 0x00},
+ {0x4047, 0x80},
+ {0x4048, 0x00},
+ {0x4049, 0x80},
+ {0x4041, 0x03},
+ {0x404c, 0x20},
+ {0x404d, 0x00},
+ {0x404e, 0x20},
+ {0x4203, 0x80},
+ {0x4307, 0x30},
+ {0x4317, 0x00},
+ {0x4502, 0x50},
+ {0x4503, 0x08},
+ {0x4601, 0x80},
+ {0x4800, 0x44},
+ {0x4816, 0x53},
+ {0x481b, 0x50},
+ {0x481f, 0x27},
+ {0x4823, 0x3c},
+ {0x482b, 0x00},
+ {0x4831, 0x66},
+ {0x4837, 0x16},
+ {0x483c, 0x0f},
+ {0x484b, 0x05},
+ {0x5000, 0x77},
+ {0x5001, 0x0a},
+ {0x5003, 0xc8},
+ {0x5004, 0x04},
+ {0x5006, 0x00},
+ {0x5007, 0x00},
+ {0x502e, 0x03},
+ {0x5030, 0x41},
+ {0x5780, 0x14},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x5795, 0x02},
+ {0x5796, 0x20},
+ {0x5797, 0x20},
+ {0x5798, 0xd5},
+ {0x5799, 0xd5},
+ {0x579a, 0x00},
+ {0x579b, 0x50},
+ {0x579c, 0x00},
+ {0x579d, 0x2c},
+ {0x579e, 0x0c},
+ {0x579f, 0x40},
+ {0x57a0, 0x09},
+ {0x57a1, 0x40},
+ {0x59f8, 0x3d},
+ {0x5a08, 0x02},
+ {0x5b00, 0x02},
+ {0x5b01, 0x10},
+ {0x5b02, 0x03},
+ {0x5b03, 0xcf},
+ {0x5b05, 0x6c},
+ {0x5e00, 0x00},
+ {0x5e10, 0xfc}
+};
+
static const struct ov8856_reg mode_1640x1232_regs[] = {
{0x3000, 0x20},
{0x3003, 0x08},
@@ -528,6 +731,209 @@ static const struct ov8856_reg mode_1640x1232_regs[] = {
{0x5e00, 0x00}
};
+static const struct ov8856_reg mode_1632x1224_regs[] = {
+ {0x0103, 0x01},
+ {0x0302, 0x3c},
+ {0x0303, 0x01},
+ {0x031e, 0x0c},
+ {0x3000, 0x20},
+ {0x3003, 0x08},
+ {0x300e, 0x20},
+ {0x3010, 0x00},
+ {0x3015, 0x84},
+ {0x3018, 0x72},
+ {0x3021, 0x23},
+ {0x3033, 0x24},
+ {0x3500, 0x00},
+ {0x3501, 0x4c},
+ {0x3502, 0xe0},
+ {0x3503, 0x08},
+ {0x3505, 0x83},
+ {0x3508, 0x01},
+ {0x3509, 0x80},
+ {0x350c, 0x00},
+ {0x350d, 0x80},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x72},
+ {0x3601, 0x40},
+ {0x3602, 0x30},
+ {0x3610, 0xc5},
+ {0x3611, 0x58},
+ {0x3612, 0x5c},
+ {0x3613, 0xca},
+ {0x3614, 0x60},
+ {0x3628, 0xff},
+ {0x3629, 0xff},
+ {0x362a, 0xff},
+ {0x3633, 0x10},
+ {0x3634, 0x10},
+ {0x3635, 0x10},
+ {0x3636, 0x10},
+ {0x3663, 0x08},
+ {0x3669, 0x34},
+ {0x366d, 0x00},
+ {0x366e, 0x08},
+ {0x3706, 0x86},
+ {0x370b, 0x7e},
+ {0x3714, 0x27},
+ {0x3730, 0x12},
+ {0x3733, 0x10},
+ {0x3764, 0x00},
+ {0x3765, 0x00},
+ {0x3769, 0x62},
+ {0x376a, 0x2a},
+ {0x376b, 0x30},
+ {0x3780, 0x00},
+ {0x3781, 0x24},
+ {0x3782, 0x00},
+ {0x3783, 0x23},
+ {0x3798, 0x2f},
+ {0x37a1, 0x60},
+ {0x37a8, 0x6a},
+ {0x37ab, 0x3f},
+ {0x37c2, 0x14},
+ {0x37c3, 0xf1},
+ {0x37c9, 0x80},
+ {0x37cb, 0x16},
+ {0x37cc, 0x16},
+ {0x37cd, 0x16},
+ {0x37ce, 0x16},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x0c},
+ {0x3804, 0x0c},
+ {0x3805, 0xdf},
+ {0x3806, 0x09},
+ {0x3807, 0xa3},
+ {0x3808, 0x06},
+ {0x3809, 0x60},
+ {0x380a, 0x04},
+ {0x380b, 0xc8},
+ {0x380c, 0x07},
+ {0x380d, 0x8c},
+ {0x380e, 0x09},
+ {0x380f, 0xb2},
+ {0x3810, 0x00},
+ {0x3811, 0x02},
+ {0x3812, 0x00},
+ {0x3813, 0x02},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x10},
+ {0x3820, 0x80},
+ {0x3821, 0x47},
+ {0x382a, 0x03},
+ {0x382b, 0x01},
+ {0x3830, 0x06},
+ {0x3836, 0x02},
+ {0x3862, 0x04},
+ {0x3863, 0x08},
+ {0x3cc0, 0x33},
+ {0x3d85, 0x17},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xde},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x400a, 0x00},
+ {0x400b, 0x84},
+ {0x400f, 0x80},
+ {0x4010, 0xf0},
+ {0x4011, 0xff},
+ {0x4012, 0x02},
+ {0x4013, 0x01},
+ {0x4014, 0x01},
+ {0x4015, 0x01},
+ {0x4042, 0x00},
+ {0x4043, 0x80},
+ {0x4044, 0x00},
+ {0x4045, 0x80},
+ {0x4046, 0x00},
+ {0x4047, 0x80},
+ {0x4048, 0x00},
+ {0x4049, 0x80},
+ {0x4041, 0x03},
+ {0x404c, 0x20},
+ {0x404d, 0x00},
+ {0x404e, 0x20},
+ {0x4203, 0x80},
+ {0x4307, 0x30},
+ {0x4317, 0x00},
+ {0x4502, 0x50},
+ {0x4503, 0x08},
+ {0x4601, 0x80},
+ {0x4800, 0x44},
+ {0x4816, 0x53},
+ {0x481b, 0x50},
+ {0x481f, 0x27},
+ {0x4823, 0x3c},
+ {0x482b, 0x00},
+ {0x4831, 0x66},
+ {0x4837, 0x16},
+ {0x483c, 0x0f},
+ {0x484b, 0x05},
+ {0x5000, 0x77},
+ {0x5001, 0x0a},
+ {0x5003, 0xc8},
+ {0x5004, 0x04},
+ {0x5006, 0x00},
+ {0x5007, 0x00},
+ {0x502e, 0x03},
+ {0x5030, 0x41},
+ {0x5795, 0x00},
+ {0x5796, 0x10},
+ {0x5797, 0x10},
+ {0x5798, 0x73},
+ {0x5799, 0x73},
+ {0x579a, 0x00},
+ {0x579b, 0x28},
+ {0x579c, 0x00},
+ {0x579d, 0x16},
+ {0x579e, 0x06},
+ {0x579f, 0x20},
+ {0x57a0, 0x04},
+ {0x57a1, 0xa0},
+ {0x5780, 0x14},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x59f8, 0x3d},
+ {0x5a08, 0x02},
+ {0x5b00, 0x02},
+ {0x5b01, 0x10},
+ {0x5b02, 0x03},
+ {0x5b03, 0xcf},
+ {0x5b05, 0x6c},
+ {0x5e00, 0x00},
+ {0x5e10, 0xfc}
+};
+
static const char * const ov8856_test_pattern_menu[] = {
"Disabled",
"Standard Color Bar",
@@ -570,6 +976,18 @@ static const struct ov8856_mode supported_modes[] = {
.link_freq_index = OV8856_LINK_FREQ_720MBPS,
},
{
+ .width = 3264,
+ .height = 2448,
+ .hts = 1932,
+ .vts_def = 2482,
+ .vts_min = 2482,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3264x2448_regs),
+ .regs = mode_3264x2448_regs,
+ },
+ .link_freq_index = OV8856_LINK_FREQ_720MBPS,
+ },
+ {
.width = 1640,
.height = 1232,
.hts = 3820,
@@ -580,6 +998,18 @@ static const struct ov8856_mode supported_modes[] = {
.regs = mode_1640x1232_regs,
},
.link_freq_index = OV8856_LINK_FREQ_360MBPS,
+ },
+ {
+ .width = 1632,
+ .height = 1224,
+ .hts = 1932,
+ .vts_def = 2482,
+ .vts_min = 2482,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1632x1224_regs),
+ .regs = mode_1632x1224_regs,
+ },
+ .link_freq_index = OV8856_LINK_FREQ_360MBPS,
}
};
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index 3a21f51d9325..e2a25240fc85 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -538,7 +538,7 @@ static int ov9640_set_fmt(struct v4l2_subdev *sd,
break;
default:
mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
- /* fall through */
+ fallthrough;
case MEDIA_BUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
break;
@@ -648,8 +648,9 @@ static const struct v4l2_subdev_core_ops ov9640_core_ops = {
};
/* Request bus settings on camera side */
-static int ov9640_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
+static int ov9640_get_mbus_config(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_config *cfg)
{
cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
@@ -661,13 +662,13 @@ static int ov9640_g_mbus_config(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops ov9640_video_ops = {
.s_stream = ov9640_s_stream,
- .g_mbus_config = ov9640_g_mbus_config,
};
static const struct v4l2_subdev_pad_ops ov9640_pad_ops = {
.enum_mbus_code = ov9640_enum_mbus_code,
.get_selection = ov9640_get_selection,
.set_fmt = ov9640_set_fmt,
+ .get_mbus_config = ov9640_get_mbus_config,
};
static const struct v4l2_subdev_ops ov9640_subdev_ops = {
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
index 71cf68a95bb2..141ad0ba7f5a 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
@@ -46,7 +46,7 @@ static int s5c73m3_get_af_status(struct s5c73m3 *state, struct v4l2_ctrl *ctrl)
break;
default:
v4l2_info(&state->sensor_sd, "Unknown AF status %#x\n", reg);
- /* Fall through */
+ fallthrough;
case REG_CAF_STATUS_UNFOCUSED:
case REG_AF_STATUS_UNFOCUSED:
case REG_AF_STATUS_INVALID:
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 42584a088273..ec6f22efe19a 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -280,8 +280,7 @@ struct s5k5baf_fw {
struct {
u16 id;
u16 offset;
- } seq[0];
- u16 data[];
+ } seq[];
};
struct s5k5baf {
@@ -563,7 +562,7 @@ static u16 *s5k5baf_fw_get_seq(struct s5k5baf *state, u16 seq_id)
if (fw == NULL)
return NULL;
- data = fw->data + 2 * fw->count;
+ data = &fw->seq[0].id + 2 * fw->count;
for (i = 0; i < fw->count; ++i) {
if (fw->seq[i].id == seq_id)
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 8a9c7de0c056..6fc0680a93d0 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -1721,7 +1721,7 @@ static void smiapp_propagate(struct v4l2_subdev *subdev,
sensor->binning_vertical = 1;
}
}
- /* Fall through */
+ fallthrough;
case V4L2_SEL_TGT_COMPOSE:
*crops[SMIAPP_PAD_SRC] = *comp;
break;
@@ -2120,7 +2120,7 @@ static int __smiapp_sel_supported(struct v4l2_subdev *subdev,
&& SMIA_LIM(sensor, SCALING_CAPABILITY)
!= SMIAPP_SCALING_CAPABILITY_NONE)
return 0;
- /* Fall through */
+ fallthrough;
default:
return -EINVAL;
}
@@ -2795,7 +2795,7 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
case 180:
hwcfg->module_board_orient =
SMIAPP_MODULE_BOARD_ORIENT_180;
- /* Fall through */
+ fallthrough;
case 0:
break;
default:
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index dbbab75f135e..831b5b54fd78 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = {
.adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
};
-static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
- bool *handled)
+static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus,
+ bool *handled)
{
struct tc358743_state *state = to_state(sd);
unsigned int cec_rxint, cec_txint;
@@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
cec_transmit_attempt_done(state->cec_adap,
CEC_TX_STATUS_ERROR);
}
- *handled = true;
+ if (handled)
+ *handled = true;
}
if ((intstatus & MASK_CEC_RINT) &&
(cec_rxint & MASK_CECRIEND)) {
@@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
msg.msg[i] = v & 0xff;
}
cec_received_msg(state->cec_adap, &msg);
- *handled = true;
+ if (handled)
+ *handled = true;
}
i2c_wr16(sd, INTSTATUS,
intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
@@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
#ifdef CONFIG_VIDEO_TC358743_CEC
if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
- tc358743_cec_isr(sd, intstatus, handled);
+ tc358743_cec_handler(sd, intstatus, handled);
i2c_wr16(sd, INTSTATUS,
intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
@@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
{
struct tc358743_state *state = dev_id;
- bool handled;
+ bool handled = false;
tc358743_isr(&state->sd, 0, &handled);
@@ -1602,8 +1604,9 @@ static int tc358743_dv_timings_cap(struct v4l2_subdev *sd,
return 0;
}
-static int tc358743_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
+static int tc358743_get_mbus_config(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_config *cfg)
{
struct tc358743_state *state = to_state(sd);
@@ -1836,7 +1839,6 @@ static const struct v4l2_subdev_video_ops tc358743_video_ops = {
.s_dv_timings = tc358743_s_dv_timings,
.g_dv_timings = tc358743_g_dv_timings,
.query_dv_timings = tc358743_query_dv_timings,
- .g_mbus_config = tc358743_g_mbus_config,
.s_stream = tc358743_s_stream,
};
@@ -1848,6 +1850,7 @@ static const struct v4l2_subdev_pad_ops tc358743_pad_ops = {
.set_edid = tc358743_s_edid,
.enum_dv_timings = tc358743_enum_dv_timings,
.dv_timings_cap = tc358743_dv_timings_cap,
+ .get_mbus_config = tc358743_get_mbus_config,
};
static const struct v4l2_subdev_ops tc358743_ops = {
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 5e68182001ec..a09bf0a39d05 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -908,7 +908,7 @@ tda1997x_configure_audout(struct v4l2_subdev *sd, u8 channel_assignment)
{
struct tda1997x_state *state = to_state(sd);
struct tda1997x_platform_data *pdata = &state->pdata;
- bool sp_used_by_fifo = 1;
+ bool sp_used_by_fifo = true;
u8 reg;
if (!pdata->audout_format)
@@ -936,7 +936,7 @@ tda1997x_configure_audout(struct v4l2_subdev *sd, u8 channel_assignment)
break;
case AUDCFG_TYPE_DST:
reg |= AUDCFG_TYPE_DST << AUDCFG_TYPE_SHIFT;
- sp_used_by_fifo = 0;
+ sp_used_by_fifo = false;
break;
case AUDCFG_TYPE_HBR:
reg |= AUDCFG_TYPE_HBR << AUDCFG_TYPE_SHIFT;
@@ -944,7 +944,7 @@ tda1997x_configure_audout(struct v4l2_subdev *sd, u8 channel_assignment)
/* demuxed via AP0:AP3 */
reg |= AUDCFG_HBR_DEMUX << AUDCFG_HBR_SHIFT;
if (pdata->audout_format == AUDFMT_TYPE_SPDIF)
- sp_used_by_fifo = 0;
+ sp_used_by_fifo = false;
} else {
/* straight via AP0 */
reg |= AUDCFG_HBR_STRAIGHT << AUDCFG_HBR_SHIFT;
@@ -2588,7 +2588,7 @@ static int tda1997x_probe(struct i2c_client *client,
case 36:
mbus_codes[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
mbus_codes[i++] = MEDIA_BUS_FMT_YUV12_1X36;
- /* fall-through */
+ fallthrough;
case 24:
mbus_codes[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
break;
@@ -2617,10 +2617,10 @@ static int tda1997x_probe(struct i2c_client *client,
mbus_codes[i++] = MEDIA_BUS_FMT_RGB888_1X24;
mbus_codes[i++] = MEDIA_BUS_FMT_YUV8_1X24;
mbus_codes[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
- /* fall through */
+ fallthrough;
case 20:
mbus_codes[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
- /* fall through */
+ fallthrough;
case 16:
mbus_codes[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
break;
@@ -2633,10 +2633,10 @@ static int tda1997x_probe(struct i2c_client *client,
case 16:
case 12:
mbus_codes[i++] = MEDIA_BUS_FMT_UYVY12_2X12;
- /* fall through */
+ fallthrough;
case 10:
mbus_codes[i++] = MEDIA_BUS_FMT_UYVY10_2X10;
- /* fall through */
+ fallthrough;
case 8:
mbus_codes[i++] = MEDIA_BUS_FMT_UYVY8_2X8;
break;
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 9df575238952..7d9401219a3a 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -293,7 +293,7 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
switch (decoder->input) {
case TVP5150_COMPOSITE1:
input |= 2;
- /* fall through */
+ fallthrough;
case TVP5150_COMPOSITE0:
break;
case TVP5150_SVIDEO:
@@ -1191,8 +1191,9 @@ static int tvp5150_get_selection(struct v4l2_subdev *sd,
}
}
-static int tvp5150_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
+static int tvp5150_get_mbus_config(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_config *cfg)
{
struct tvp5150 *decoder = to_tvp5150(sd);
@@ -1721,7 +1722,6 @@ static const struct v4l2_subdev_video_ops tvp5150_video_ops = {
.querystd = tvp5150_querystd,
.s_stream = tvp5150_s_stream,
.s_routing = tvp5150_s_routing,
- .g_mbus_config = tvp5150_g_mbus_config,
};
static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
@@ -1739,6 +1739,7 @@ static const struct v4l2_subdev_pad_ops tvp5150_pad_ops = {
.get_fmt = tvp5150_fill_fmt,
.get_selection = tvp5150_get_selection,
.set_selection = tvp5150_set_selection,
+ .get_mbus_config = tvp5150_get_mbus_config,
};
static const struct v4l2_subdev_ops tvp5150_ops = {
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index de313b1306da..ada4ec5ef782 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -688,9 +688,11 @@ static int tvp7002_g_register(struct v4l2_subdev *sd,
int ret;
ret = tvp7002_read(sd, reg->reg & 0xff, &val);
+ if (ret < 0)
+ return ret;
reg->val = val;
reg->size = 1;
- return ret;
+ return 0;
}
/*
diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c
index da8088351135..9e56d2ad6b94 100644
--- a/drivers/media/mc/mc-device.c
+++ b/drivers/media/mc/mc-device.c
@@ -370,10 +370,11 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
return ret;
}
-static long media_device_request_alloc(struct media_device *mdev,
- int *alloc_fd)
+static long media_device_request_alloc(struct media_device *mdev, void *arg)
{
#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
+ int *alloc_fd = arg;
+
if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
return -ENOTTY;
@@ -407,7 +408,7 @@ static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd)
#define MEDIA_IOC_ARG(__cmd, func, fl, from_user, to_user) \
[_IOC_NR(MEDIA_IOC_##__cmd)] = { \
.cmd = MEDIA_IOC_##__cmd, \
- .fn = (long (*)(struct media_device *, void *))func, \
+ .fn = func, \
.flags = fl, \
.arg_from_user = from_user, \
.arg_to_user = to_user, \
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 9144f795fb93..8824dd0fb331 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2332,7 +2332,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
field = V4L2_FIELD_SEQ_TB;
break;
}
- /* fall through */
+ fallthrough;
default: /* FIELD_ANY case */
height2 = btv->crop[!!fh->do_crop].rect.height >> 1;
field = (f->fmt.pix.height > height2)
@@ -4013,11 +4013,13 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
btv->id = dev->device;
if (pci_enable_device(dev)) {
pr_warn("%d: Can't enable device\n", btv->c.nr);
- return -EIO;
+ result = -EIO;
+ goto free_mem;
}
if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
pr_warn("%d: No suitable DMA available\n", btv->c.nr);
- return -EIO;
+ result = -EIO;
+ goto free_mem;
}
if (!request_mem_region(pci_resource_start(dev,0),
pci_resource_len(dev,0),
@@ -4025,7 +4027,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
pr_warn("%d: can't request iomem (0x%llx)\n",
btv->c.nr,
(unsigned long long)pci_resource_start(dev, 0));
- return -EBUSY;
+ result = -EBUSY;
+ goto free_mem;
}
pci_set_master(dev);
pci_set_command(dev);
@@ -4211,6 +4214,10 @@ fail0:
release_mem_region(pci_resource_start(btv->c.pci,0),
pci_resource_len(btv->c.pci,0));
pci_disable_device(btv->c.pci);
+
+free_mem:
+ bttvs[btv->c.nr] = NULL;
+ kfree(btv);
return result;
}
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index 02ebd43e672e..4cb890b949c3 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -39,9 +39,10 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define IF_FREQUENCYx6 217 /* 6 * 36.16666666667MHz */
-static void dvb_bt8xx_task(unsigned long data)
+static void dvb_bt8xx_task(struct tasklet_struct *t)
{
- struct dvb_bt8xx_card *card = (struct dvb_bt8xx_card *)data;
+ struct bt878 *bt = from_tasklet(bt, t, tasklet);
+ struct dvb_bt8xx_card *card = dev_get_drvdata(&bt->adapter->dev);
dprintk("%d\n", card->bt->finished_block);
@@ -777,7 +778,7 @@ static int dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type)
goto err_disconnect_frontend;
}
- tasklet_init(&card->bt->tasklet, dvb_bt8xx_task, (unsigned long) card);
+ tasklet_setup(&card->bt->tasklet, dvb_bt8xx_task);
frontend_init(card, type);
diff --git a/drivers/media/pci/cobalt/cobalt-i2c.c b/drivers/media/pci/cobalt/cobalt-i2c.c
index c374dae78bf7..10c9ee33f73e 100644
--- a/drivers/media/pci/cobalt/cobalt-i2c.c
+++ b/drivers/media/pci/cobalt/cobalt-i2c.c
@@ -118,11 +118,11 @@ static int cobalt_tx_bytes(struct cobalt_i2c_regs __iomem *regs,
iowrite8(data[i], &regs->txr_rxr);
/* Setup command */
- if (i == 0 && start != 0) {
+ if (i == 0 && start) {
/* Write + Start */
cmd = M00018_CR_BITMAP_WR_MSK |
M00018_CR_BITMAP_STA_MSK;
- } else if (i == len - 1 && stop != 0) {
+ } else if (i == len - 1 && stop) {
/* Write + Stop */
cmd = M00018_CR_BITMAP_WR_MSK |
M00018_CR_BITMAP_STO_MSK;
@@ -173,11 +173,11 @@ static int cobalt_rx_bytes(struct cobalt_i2c_regs __iomem *regs,
for (i = 0; i < len; i++) {
/* Setup command */
- if (i == 0 && start != 0) {
+ if (i == 0 && start) {
/* Read + Start */
cmd = M00018_CR_BITMAP_RD_MSK |
M00018_CR_BITMAP_STA_MSK;
- } else if (i == len - 1 && stop != 0) {
+ } else if (i == len - 1 && stop) {
/* Read + Stop */
cmd = M00018_CR_BITMAP_RD_MSK |
M00018_CR_BITMAP_STO_MSK;
diff --git a/drivers/media/pci/cobalt/cobalt-omnitek.c b/drivers/media/pci/cobalt/cobalt-omnitek.c
index 4c137453e679..01b82a2e8d33 100644
--- a/drivers/media/pci/cobalt/cobalt-omnitek.c
+++ b/drivers/media/pci/cobalt/cobalt-omnitek.c
@@ -116,7 +116,7 @@ void omni_sg_dma_abort_channel(struct cobalt_stream *s)
{
struct cobalt *cobalt = s->cobalt;
- if (is_dma_done(s) == false)
+ if (!is_dma_done(s))
iowrite32(ABORT, CS_REG(s->dma_channel));
}
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index df44ed7393a0..13689c5dd47f 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -68,7 +68,8 @@ MODULE_PARM_DESC(audio_debug, "enable debug messages [analog audio]");
#define AUD_INT_MCHG_IRQ (1 << 21)
#define GP_COUNT_CONTROL_RESET 0x3
-static int cx23885_alsa_dma_init(struct cx23885_audio_dev *chip, int nr_pages)
+static int cx23885_alsa_dma_init(struct cx23885_audio_dev *chip,
+ unsigned long nr_pages)
{
struct cx23885_audio_buffer *buf = chip->buf;
struct page *pg;
@@ -76,11 +77,11 @@ static int cx23885_alsa_dma_init(struct cx23885_audio_dev *chip, int nr_pages)
buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
if (NULL == buf->vaddr) {
- dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
+ dprintk(1, "vmalloc_32(%lu pages) failed\n", nr_pages);
return -ENOMEM;
}
- dprintk(1, "vmalloc is at addr %p, size=%d\n",
+ dprintk(1, "vmalloc is at addr %p, size=%lu\n",
buf->vaddr, nr_pages << PAGE_SHIFT);
memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
@@ -113,7 +114,7 @@ static int cx23885_alsa_dma_map(struct cx23885_audio_dev *dev)
struct cx23885_audio_buffer *buf = dev->buf;
buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
- buf->nr_pages, PCI_DMA_FROMDEVICE);
+ buf->nr_pages, DMA_FROM_DEVICE);
if (0 == buf->sglen) {
pr_warn("%s: cx23885_alsa_map_sg failed\n", __func__);
@@ -129,7 +130,7 @@ static int cx23885_alsa_dma_unmap(struct cx23885_audio_dev *dev)
if (!buf->sglen)
return 0;
- dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen, PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->nr_pages, DMA_FROM_DEVICE);
buf->sglen = 0;
return 0;
}
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 440d108b7ddd..a380e0920a21 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -637,7 +637,7 @@ static int vidioc_querycap(struct file *file, void *priv,
sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
cap->capabilities = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_AUDIO | V4L2_CAP_VBI_CAPTURE |
- V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_DEVICE_CAPS;
switch (dev->board) { /* i2c device tuners */
case CX23885_BOARD_HAUPPAUGE_HVR1265_K4:
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index c472498e57c4..349462ee2c48 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -325,8 +325,8 @@ struct cx23885_audio_buffer {
struct cx23885_riscmem risc;
void *vaddr;
struct scatterlist *sglist;
- int sglen;
- int nr_pages;
+ int sglen;
+ unsigned long nr_pages;
};
struct cx23885_audio_dev {
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index ad7f8ccad526..ddfd2eb37484 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -663,7 +663,7 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
}
v = (unsigned) pulse_width_count_to_ns(
- (u16) (p->hw_fifo_data & FIFO_RXTX), divider);
+ (u16)(p->hw_fifo_data & FIFO_RXTX), divider) / 1000;
if (v > IR_MAX_DURATION)
v = IR_MAX_DURATION;
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index 301616426d8a..608fbaf0f659 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -53,8 +53,8 @@ struct cx25821_audio_buffer {
struct cx25821_riscmem risc;
void *vaddr;
struct scatterlist *sglist;
- int sglen;
- int nr_pages;
+ int sglen;
+ unsigned long nr_pages;
};
struct cx25821_audio_dev {
@@ -131,7 +131,8 @@ MODULE_PARM_DESC(debug, "enable debug messages");
#define PCI_MSK_AUD_EXT (1 << 4)
#define PCI_MSK_AUD_INT (1 << 3)
-static int cx25821_alsa_dma_init(struct cx25821_audio_dev *chip, int nr_pages)
+static int cx25821_alsa_dma_init(struct cx25821_audio_dev *chip,
+ unsigned long nr_pages)
{
struct cx25821_audio_buffer *buf = chip->buf;
struct page *pg;
@@ -139,11 +140,11 @@ static int cx25821_alsa_dma_init(struct cx25821_audio_dev *chip, int nr_pages)
buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
if (NULL == buf->vaddr) {
- dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
+ dprintk(1, "vmalloc_32(%lu pages) failed\n", nr_pages);
return -ENOMEM;
}
- dprintk(1, "vmalloc is at addr 0x%p, size=%d\n",
+ dprintk(1, "vmalloc is at addr 0x%p, size=%lu\n",
buf->vaddr,
nr_pages << PAGE_SHIFT);
@@ -177,7 +178,7 @@ static int cx25821_alsa_dma_map(struct cx25821_audio_dev *dev)
struct cx25821_audio_buffer *buf = dev->buf;
buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
- buf->nr_pages, PCI_DMA_FROMDEVICE);
+ buf->nr_pages, DMA_FROM_DEVICE);
if (0 == buf->sglen) {
pr_warn("%s: cx25821_alsa_map_sg failed\n", __func__);
@@ -193,7 +194,7 @@ static int cx25821_alsa_dma_unmap(struct cx25821_audio_dev *dev)
if (!buf->sglen)
return 0;
- dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen, PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->nr_pages, DMA_FROM_DEVICE);
buf->sglen = 0;
return 0;
}
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 7d7aceecc985..95e0cbb1277d 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -42,12 +42,12 @@
*/
struct cx88_audio_buffer {
- unsigned int bpl;
- struct cx88_riscmem risc;
+ unsigned int bpl;
+ struct cx88_riscmem risc;
void *vaddr;
struct scatterlist *sglist;
int sglen;
- int nr_pages;
+ unsigned long nr_pages;
};
struct cx88_audio_dev {
@@ -271,7 +271,8 @@ static irqreturn_t cx8801_irq(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static int cx88_alsa_dma_init(struct cx88_audio_dev *chip, int nr_pages)
+static int cx88_alsa_dma_init(struct cx88_audio_dev *chip,
+ unsigned long nr_pages)
{
struct cx88_audio_buffer *buf = chip->buf;
struct page *pg;
@@ -279,11 +280,11 @@ static int cx88_alsa_dma_init(struct cx88_audio_dev *chip, int nr_pages)
buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
if (!buf->vaddr) {
- dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
+ dprintk(1, "vmalloc_32(%lu pages) failed\n", nr_pages);
return -ENOMEM;
}
- dprintk(1, "vmalloc is at addr %p, size=%d\n",
+ dprintk(1, "vmalloc is at addr %p, size=%lu\n",
buf->vaddr, nr_pages << PAGE_SHIFT);
memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
@@ -316,7 +317,7 @@ static int cx88_alsa_dma_map(struct cx88_audio_dev *dev)
struct cx88_audio_buffer *buf = dev->buf;
buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
- buf->nr_pages, PCI_DMA_FROMDEVICE);
+ buf->nr_pages, DMA_FROM_DEVICE);
if (buf->sglen == 0) {
pr_warn("%s: cx88_alsa_map_sg failed\n", __func__);
@@ -332,8 +333,8 @@ static int cx88_alsa_dma_unmap(struct cx88_audio_dev *dev)
if (!buf->sglen)
return 0;
- dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->nr_pages,
+ DMA_FROM_DEVICE);
buf->sglen = 0;
return 0;
}
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 9fa388626bae..8e224fc0474d 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -3499,7 +3499,7 @@ static void cx88_card_setup(struct cx88_core *core)
cx_clear(MO_GP0_IO, 0x00000040);
msleep(1000);
cx_set(MO_GP0_IO, 0x00004040);
- /* FALLTHROUGH */
+ fallthrough;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID:
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index 7e0fed9cd200..ce0ef0b8186f 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -479,7 +479,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
dev->scancode_mask = hardware_mask;
if (ir->sampling) {
- dev->timeout = 10 * 1000 * 1000; /* 10 ms */
+ dev->timeout = MS_TO_US(10); /* 10 ms */
} else {
dev->driver_type = RC_DRIVER_SCANCODE;
dev->allowed_protocols = rc_proto;
@@ -544,7 +544,7 @@ void cx88_ir_irq(struct cx88_core *core)
for (todo = 32; todo > 0; todo -= bits) {
ev.pulse = samples & 0x80000000 ? false : true;
bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples));
- ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate;
+ ev.duration = (bits * (USEC_PER_SEC / 1000)) / ir_samplerate;
ir_raw_event_store_with_filter(ir->dev, &ev);
samples <<= bits;
}
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index e7fd7516787c..8cffdacf6007 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1385,7 +1385,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
request_module("rtc-isl1208");
core->i2c_rtc = i2c_new_client_device(&core->i2c_adap, &rtc_info);
}
- /* fall-through */
+ fallthrough;
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
request_module("ir-kbd-i2c");
}
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index ef8d5c9cfffe..961f844de99c 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -575,9 +575,8 @@ static void dt3155_remove(struct pci_dev *pdev)
struct dt3155_priv *pd = container_of(v4l2_dev, struct dt3155_priv,
v4l2_dev);
- video_unregister_device(&pd->vdev);
+ vb2_video_unregister_device(&pd->vdev);
free_irq(pd->pdev->irq, pd);
- vb2_queue_release(&pd->vidq);
v4l2_device_unregister(&pd->v4l2_dev);
pci_iounmap(pdev, pd->regs);
pci_release_region(pdev, 0);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 92f5eadf2c99..4e598e937dfe 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2017 Intel Corporation
+ * Copyright (C) 2017,2020 Intel Corporation
*
* Based partially on Intel IPU4 driver written by
* Sakari Ailus <sakari.ailus@linux.intel.com>
@@ -9,13 +9,14 @@
* Jouni Ukkonen <jouni.ukkonen@intel.com>
* Antti Laakso <antti.laakso@intel.com>
* et al.
- *
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pfn.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/vmalloc.h>
@@ -96,12 +97,12 @@ static inline u32 cio2_bytesperline(const unsigned int width)
static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
{
if (cio2->dummy_lop) {
- dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
+ dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
cio2->dummy_lop, cio2->dummy_lop_bus_addr);
cio2->dummy_lop = NULL;
}
if (cio2->dummy_page) {
- dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
+ dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
cio2->dummy_page, cio2->dummy_page_bus_addr);
cio2->dummy_page = NULL;
}
@@ -111,12 +112,10 @@ static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
{
unsigned int i;
- cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev,
- CIO2_PAGE_SIZE,
+ cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
&cio2->dummy_page_bus_addr,
GFP_KERNEL);
- cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev,
- CIO2_PAGE_SIZE,
+ cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
&cio2->dummy_lop_bus_addr,
GFP_KERNEL);
if (!cio2->dummy_page || !cio2->dummy_lop) {
@@ -127,8 +126,8 @@ static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
* List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
* Initialize each entry to dummy_page bus base address.
*/
- for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++)
- cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
+ for (i = 0; i < CIO2_LOP_ENTRIES; i++)
+ cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
return 0;
}
@@ -160,12 +159,11 @@ static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
unsigned int i;
entry[0].first_entry.first_page_offset = 0;
- entry[1].second_entry.num_of_pages =
- CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS;
- entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1;
+ entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
+ entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
for (i = 0; i < CIO2_MAX_LOPS; i++)
- entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
+ entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
cio2_fbpt_entry_enable(cio2, entry);
}
@@ -182,26 +180,24 @@ static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
entry[0].first_entry.first_page_offset = b->offset;
remaining = length + entry[0].first_entry.first_page_offset;
- entry[1].second_entry.num_of_pages =
- DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE);
+ entry[1].second_entry.num_of_pages = PFN_UP(remaining);
/*
* last_page_available_bytes has the offset of the last byte in the
* last page which is still accessible by DMA. DMA cannot access
* beyond this point. Valid range for this is from 0 to 4095.
* 0 indicates 1st byte in the page is DMA accessible.
- * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page
+ * 4095 (PAGE_SIZE - 1) means every single byte in the last page
* is available for DMA transfer.
*/
entry[1].second_entry.last_page_available_bytes =
(remaining & ~PAGE_MASK) ?
- (remaining & ~PAGE_MASK) - 1 :
- CIO2_PAGE_SIZE - 1;
+ (remaining & ~PAGE_MASK) - 1 : PAGE_SIZE - 1;
/* Fill FBPT */
remaining = length;
i = 0;
while (remaining > 0) {
- entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT;
- remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE;
+ entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
+ remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
entry++;
i++;
}
@@ -209,7 +205,7 @@ static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
/*
* The first not meaningful FBPT entry should point to a valid LOP
*/
- entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
+ entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
cio2_fbpt_entry_enable(cio2, entry);
}
@@ -295,7 +291,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
struct cio2_csi2_timing *timing)
{
struct device *dev = &cio2->pci_dev->dev;
- struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
+ struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
struct v4l2_ctrl *link_freq;
s64 freq;
int r;
@@ -475,8 +471,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
}
/* Enable DMA */
- writel(q->fbpt_bus_addr >> PAGE_SHIFT,
- base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
+ writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
@@ -512,8 +507,10 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
{
- void __iomem *base = cio2->base;
- unsigned int i, maxloops = 1000;
+ void __iomem *const base = cio2->base;
+ unsigned int i;
+ u32 value;
+ int ret;
/* Disable CSI receiver and MIPI backend devices */
writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
@@ -523,13 +520,10 @@ static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
/* Halt DMA */
writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
- do {
- if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) &
- CIO2_CDMAC0_DMA_HALTED)
- break;
- usleep_range(1000, 2000);
- } while (--maxloops);
- if (!maxloops)
+ ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
+ value, value & CIO2_CDMAC0_DMA_HALTED,
+ 4000, 2000000);
+ if (ret)
dev_err(&cio2->pci_dev->dev,
"DMA %i can not be halted\n", CIO2_DMA_CHAN);
@@ -545,7 +539,7 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
{
struct device *dev = &cio2->pci_dev->dev;
struct cio2_queue *q = cio2->cur_queue;
- int buffers_found = 0;
+ struct cio2_fbpt_entry *entry;
u64 ns = ktime_get_ns();
if (dma_chan >= CIO2_QUEUES) {
@@ -553,15 +547,18 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
return;
}
+ entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
+ if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
+ dev_warn(&cio2->pci_dev->dev,
+ "no ready buffers found on DMA channel %u\n",
+ dma_chan);
+ return;
+ }
+
/* Find out which buffer(s) are ready */
do {
- struct cio2_fbpt_entry *const entry =
- &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
struct cio2_buffer *b;
- if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID)
- break;
-
b = q->bufs[q->bufs_first];
if (b) {
unsigned int bytes = entry[1].second_entry.num_of_bytes;
@@ -583,13 +580,8 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
atomic_inc(&q->frame_sequence);
cio2_fbpt_entry_init_dummy(cio2, entry);
q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
- buffers_found++;
- } while (1);
-
- if (buffers_found == 0)
- dev_warn(&cio2->pci_dev->dev,
- "no ready buffers found on DMA channel %u\n",
- dma_chan);
+ entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
+ } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
}
static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
@@ -841,13 +833,11 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb)
struct device *dev = &cio2->pci_dev->dev;
struct cio2_buffer *b =
container_of(vb, struct cio2_buffer, vbb.vb2_buf);
- static const unsigned int entries_per_page =
- CIO2_PAGE_SIZE / sizeof(u32);
- unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE);
- unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page);
+ unsigned int pages = PFN_UP(vb->planes[0].length);
+ unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
struct sg_table *sg;
struct sg_dma_page_iter sg_iter;
- int i, j;
+ unsigned int i, j;
if (lops <= 0 || lops > CIO2_MAX_LOPS) {
dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
@@ -858,7 +848,7 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb)
memset(b->lop, 0, sizeof(b->lop));
/* Allocate LOP table */
for (i = 0; i < lops; i++) {
- b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE,
+ b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
&b->lop_bus_addr[i], GFP_KERNEL);
if (!b->lop[i])
goto fail;
@@ -873,23 +863,22 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb)
b->offset = sg->sgl->offset;
i = j = 0;
- for_each_sg_dma_page (sg->sgl, &sg_iter, sg->nents, 0) {
+ for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
if (!pages--)
break;
- b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT;
+ b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
j++;
- if (j == entries_per_page) {
+ if (j == CIO2_LOP_ENTRIES) {
i++;
j = 0;
}
}
- b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
+ b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
return 0;
fail:
- for (i--; i >= 0; i--)
- dma_free_coherent(dev, CIO2_PAGE_SIZE,
- b->lop[i], b->lop_bus_addr[i]);
+ while (i--)
+ dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
return -ENOMEM;
}
@@ -979,7 +968,7 @@ static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
/* Free LOP table */
for (i = 0; i < CIO2_MAX_LOPS; i++) {
if (b->lop[i])
- dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
+ dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
b->lop[i], b->lop_bus_addr[i]);
}
}
@@ -1633,7 +1622,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
if (r) {
dev_err(&cio2->pci_dev->dev,
"failed to initialize videobuf2 queue (%d)\n", r);
- goto fail_vbq;
+ goto fail_subdev;
}
/* Initialize vdev */
@@ -1664,10 +1653,8 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
return 0;
fail_link:
- video_unregister_device(&q->vdev);
+ vb2_video_unregister_device(&q->vdev);
fail_vdev:
- vb2_queue_release(vbq);
-fail_vbq:
v4l2_device_unregister_subdev(subdev);
fail_subdev:
media_entity_cleanup(&vdev->entity);
@@ -1683,9 +1670,8 @@ fail_fbpt:
static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
{
- video_unregister_device(&q->vdev);
+ vb2_video_unregister_device(&q->vdev);
media_entity_cleanup(&q->vdev.entity);
- vb2_queue_release(&q->vbq);
v4l2_device_unregister_subdev(&q->subdev);
media_entity_cleanup(&q->subdev.entity);
cio2_fbpt_exit(q, &cio2->pci_dev->dev);
@@ -1721,29 +1707,10 @@ static void cio2_queues_exit(struct cio2_device *cio2)
/**************** PCI interface ****************/
-static int cio2_pci_config_setup(struct pci_dev *dev)
-{
- u16 pci_command;
- int r = pci_enable_msi(dev);
-
- if (r) {
- dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
- return r;
- }
-
- pci_read_config_word(dev, PCI_COMMAND, &pci_command);
- pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
- PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(dev, PCI_COMMAND, pci_command);
-
- return 0;
-}
-
static int cio2_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
struct cio2_device *cio2;
- void __iomem *const *iomap;
int r;
cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
@@ -1766,13 +1733,7 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
return -ENODEV;
}
- iomap = pcim_iomap_table(pci_dev);
- if (!iomap) {
- dev_err(&pci_dev->dev, "failed to iomap table\n");
- return -ENODEV;
- }
-
- cio2->base = iomap[CIO2_PCI_BAR];
+ cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
pci_set_drvdata(pci_dev, cio2);
@@ -1784,9 +1745,11 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
return -ENODEV;
}
- r = cio2_pci_config_setup(pci_dev);
- if (r)
- return -ENODEV;
+ r = pci_enable_msi(pci_dev);
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r);
+ return r;
+ }
r = cio2_fbpt_init_dummy(cio2);
if (r)
@@ -2012,8 +1975,8 @@ static int __maybe_unused cio2_suspend(struct device *dev)
static int __maybe_unused cio2_resume(struct device *dev)
{
struct cio2_device *cio2 = dev_get_drvdata(dev);
- int r = 0;
struct cio2_queue *q = cio2->cur_queue;
+ int r;
dev_dbg(dev, "cio2 resume\n");
if (!cio2->streaming)
@@ -2040,7 +2003,7 @@ static const struct dev_pm_ops cio2_pm_ops = {
static const struct pci_device_id cio2_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
- { 0 }
+ { }
};
MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
index 7caab9b8c2b9..549b08f88f0c 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -4,6 +4,8 @@
#ifndef __IPU3_CIO2_H
#define __IPU3_CIO2_H
+#include <linux/types.h>
+
#define CIO2_NAME "ipu3-cio2"
#define CIO2_DEVICE_NAME "Intel IPU3 CIO2"
#define CIO2_ENTITY_NAME "ipu3-csi2"
@@ -17,6 +19,7 @@
/* 32MB = 8xFBPT_entry */
#define CIO2_MAX_LOPS 8
#define CIO2_MAX_BUFFERS (PAGE_SIZE / 16 / CIO2_MAX_LOPS)
+#define CIO2_LOP_ENTRIES (PAGE_SIZE / sizeof(u32))
#define CIO2_PAD_SINK 0
#define CIO2_PAD_SOURCE 1
@@ -389,7 +392,6 @@ struct cio2_device {
sizeof(struct cio2_fbpt_entry))
#define CIO2_FBPT_SUBENTRY_UNIT 4
-#define CIO2_PAGE_SIZE 4096
/* cio2 fbpt first_entry ctrl status */
#define CIO2_FBPT_CTRL_VALID BIT(0)
diff --git a/drivers/media/pci/mantis/mantis_dma.c b/drivers/media/pci/mantis/mantis_dma.c
index affc5977387f..4df571ff272b 100644
--- a/drivers/media/pci/mantis/mantis_dma.c
+++ b/drivers/media/pci/mantis/mantis_dma.c
@@ -200,9 +200,9 @@ void mantis_dma_stop(struct mantis_pci *mantis)
}
-void mantis_dma_xfer(unsigned long data)
+void mantis_dma_xfer(struct tasklet_struct *t)
{
- struct mantis_pci *mantis = (struct mantis_pci *) data;
+ struct mantis_pci *mantis = from_tasklet(mantis, t, tasklet);
struct mantis_hwconfig *config = mantis->hwconfig;
while (mantis->last_block != mantis->busy_block) {
diff --git a/drivers/media/pci/mantis/mantis_dma.h b/drivers/media/pci/mantis/mantis_dma.h
index 421663443d62..37da982c9c29 100644
--- a/drivers/media/pci/mantis/mantis_dma.h
+++ b/drivers/media/pci/mantis/mantis_dma.h
@@ -13,6 +13,6 @@ extern int mantis_dma_init(struct mantis_pci *mantis);
extern int mantis_dma_exit(struct mantis_pci *mantis);
extern void mantis_dma_start(struct mantis_pci *mantis);
extern void mantis_dma_stop(struct mantis_pci *mantis);
-extern void mantis_dma_xfer(unsigned long data);
+extern void mantis_dma_xfer(struct tasklet_struct *t);
#endif /* __MANTIS_DMA_H */
diff --git a/drivers/media/pci/mantis/mantis_dvb.c b/drivers/media/pci/mantis/mantis_dvb.c
index 2da94be5b373..c7ba4a76e608 100644
--- a/drivers/media/pci/mantis/mantis_dvb.c
+++ b/drivers/media/pci/mantis/mantis_dvb.c
@@ -205,7 +205,7 @@ int mantis_dvb_init(struct mantis_pci *mantis)
}
dvb_net_init(&mantis->dvb_adapter, &mantis->dvbnet, &mantis->demux.dmx);
- tasklet_init(&mantis->tasklet, mantis_dma_xfer, (unsigned long) mantis);
+ tasklet_setup(&mantis->tasklet, mantis_dma_xfer);
tasklet_disable(&mantis->tasklet);
if (mantis->hwconfig) {
result = config->frontend_init(mantis, mantis->fe);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 80a7c41baa90..6f3125c2d097 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -1016,8 +1016,6 @@ static struct pci_driver netup_unidvb_pci_driver = {
.id_table = netup_unidvb_pci_tbl,
.probe = netup_unidvb_initdev,
.remove = netup_unidvb_finidev,
- .suspend = NULL,
- .resume = NULL,
};
module_pci_driver(netup_unidvb_pci_driver);
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index af15ca1c501b..f9f94f47d76b 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -50,9 +50,9 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
/* nGene interrupt handler **************************************************/
/****************************************************************************/
-static void event_tasklet(unsigned long data)
+static void event_tasklet(struct tasklet_struct *t)
{
- struct ngene *dev = (struct ngene *)data;
+ struct ngene *dev = from_tasklet(dev, t, event_tasklet);
while (dev->EventQueueReadIndex != dev->EventQueueWriteIndex) {
struct EVENT_BUFFER Event =
@@ -68,9 +68,9 @@ static void event_tasklet(unsigned long data)
}
}
-static void demux_tasklet(unsigned long data)
+static void demux_tasklet(struct tasklet_struct *t)
{
- struct ngene_channel *chan = (struct ngene_channel *)data;
+ struct ngene_channel *chan = from_tasklet(chan, t, demux_tasklet);
struct device *pdev = &chan->dev->pci_dev->dev;
struct SBufferHeader *Cur = chan->nextBuffer;
@@ -1181,7 +1181,7 @@ static void ngene_init(struct ngene *dev)
struct device *pdev = &dev->pci_dev->dev;
int i;
- tasklet_init(&dev->event_tasklet, event_tasklet, (unsigned long)dev);
+ tasklet_setup(&dev->event_tasklet, event_tasklet);
memset_io(dev->iomem + 0xc000, 0x00, 0x220);
memset_io(dev->iomem + 0xc400, 0x00, 0x100);
@@ -1445,7 +1445,7 @@ static int init_channel(struct ngene_channel *chan)
struct ngene_info *ni = dev->card_info;
int io = ni->io_type[nr];
- tasklet_init(&chan->demux_tasklet, demux_tasklet, (unsigned long)chan);
+ tasklet_setup(&chan->demux_tasklet, demux_tasklet);
chan->users = 0;
chan->type = io;
chan->mode = chan->type; /* for now only one mode */
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index 544ca57eee75..7a1fb067b0e0 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -252,7 +252,8 @@ static int snd_card_saa7134_capture_trigger(struct snd_pcm_substream * substream
return err;
}
-static int saa7134_alsa_dma_init(struct saa7134_dev *dev, int nr_pages)
+static int saa7134_alsa_dma_init(struct saa7134_dev *dev,
+ unsigned long nr_pages)
{
struct saa7134_dmasound *dma = &dev->dmasound;
struct page *pg;
@@ -260,11 +261,11 @@ static int saa7134_alsa_dma_init(struct saa7134_dev *dev, int nr_pages)
dma->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
if (NULL == dma->vaddr) {
- pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
+ pr_debug("vmalloc_32(%lu pages) failed\n", nr_pages);
return -ENOMEM;
}
- pr_debug("vmalloc is at addr %p, size=%d\n",
+ pr_debug("vmalloc is at addr %p, size=%lu\n",
dma->vaddr, nr_pages << PAGE_SHIFT);
memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT);
@@ -297,7 +298,7 @@ static int saa7134_alsa_dma_map(struct saa7134_dev *dev)
struct saa7134_dmasound *dma = &dev->dmasound;
dma->sglen = dma_map_sg(&dev->pci->dev, dma->sglist,
- dma->nr_pages, PCI_DMA_FROMDEVICE);
+ dma->nr_pages, DMA_FROM_DEVICE);
if (0 == dma->sglen) {
pr_warn("%s: saa7134_alsa_map_sg failed\n", __func__);
@@ -313,7 +314,7 @@ static int saa7134_alsa_dma_unmap(struct saa7134_dev *dev)
if (!dma->sglen)
return 0;
- dma_unmap_sg(&dev->pci->dev, dma->sglist, dma->sglen, PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(&dev->pci->dev, dma->sglist, dma->nr_pages, DMA_FROM_DEVICE);
dma->sglen = 0;
return 0;
}
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index c1937c33c33d..ce449c941171 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -7812,7 +7812,7 @@ int saa7134_board_init2(struct saa7134_dev *dev)
dev->name, saa7134_boards[dev->board].name);
break;
}
- /* fall-through */
+ fallthrough;
case SAA7134_BOARD_VIDEOMATE_DVBT_300:
case SAA7134_BOARD_ASUS_EUROPA2_HYBRID:
case SAA7134_BOARD_ASUS_EUROPA_HYBRID:
@@ -7870,7 +7870,7 @@ int saa7134_board_init2(struct saa7134_dev *dev)
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1110:
hauppauge_eeprom(dev, dev->eedata+0x80);
- /* fall-through */
+ fallthrough;
case SAA7134_BOARD_PINNACLE_PCTV_310i:
case SAA7134_BOARD_KWORLD_DVBT_210:
case SAA7134_BOARD_TEVION_DVBT_220RF:
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index e4623ed2f831..391572a6ec76 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -359,14 +359,12 @@ void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
struct saa7134_buf *tmp;
spin_lock_irqsave(&dev->slock, flags);
- if (!list_empty(&q->queue)) {
- list_for_each_safe(pos, n, &q->queue) {
- tmp = list_entry(pos, struct saa7134_buf, entry);
- vb2_buffer_done(&tmp->vb2.vb2_buf,
- VB2_BUF_STATE_ERROR);
- list_del(pos);
- tmp = NULL;
- }
+ list_for_each_safe(pos, n, &q->queue) {
+ tmp = list_entry(pos, struct saa7134_buf, entry);
+ vb2_buffer_done(&tmp->vb2.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ list_del(pos);
+ tmp = NULL;
}
spin_unlock_irqrestore(&dev->slock, flags);
saa7134_buffer_timeout(&q->timeout); /* also calls del_timer(&q->timeout) */
@@ -965,21 +963,21 @@ static void saa7134_unregister_video(struct saa7134_dev *dev)
if (dev->video_dev) {
if (video_is_registered(dev->video_dev))
- video_unregister_device(dev->video_dev);
+ vb2_video_unregister_device(dev->video_dev);
else
video_device_release(dev->video_dev);
dev->video_dev = NULL;
}
if (dev->vbi_dev) {
if (video_is_registered(dev->vbi_dev))
- video_unregister_device(dev->vbi_dev);
+ vb2_video_unregister_device(dev->vbi_dev);
else
video_device_release(dev->vbi_dev);
dev->vbi_dev = NULL;
}
if (dev->radio_dev) {
if (video_is_registered(dev->radio_dev))
- video_unregister_device(dev->radio_dev);
+ vb2_video_unregister_device(dev->radio_dev);
else
video_device_release(dev->radio_dev);
dev->radio_dev = NULL;
@@ -1370,11 +1368,9 @@ static void saa7134_finidev(struct pci_dev *pci_dev)
kfree(dev);
}
-#ifdef CONFIG_PM
-
/* resends a current buffer in queue after resume */
-static int saa7134_buffer_requeue(struct saa7134_dev *dev,
- struct saa7134_dmaqueue *q)
+static int __maybe_unused saa7134_buffer_requeue(struct saa7134_dev *dev,
+ struct saa7134_dmaqueue *q)
{
struct saa7134_buf *buf, *next;
@@ -1397,8 +1393,9 @@ static int saa7134_buffer_requeue(struct saa7134_dev *dev,
return 0;
}
-static int saa7134_suspend(struct pci_dev *pci_dev , pm_message_t state)
+static int __maybe_unused saa7134_suspend(struct device *dev_d)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev_d);
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
@@ -1428,21 +1425,15 @@ static int saa7134_suspend(struct pci_dev *pci_dev , pm_message_t state)
if (dev->remote && dev->remote->dev->users)
saa7134_ir_close(dev->remote->dev);
- pci_save_state(pci_dev);
- pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
-
return 0;
}
-static int saa7134_resume(struct pci_dev *pci_dev)
+static int __maybe_unused saa7134_resume(struct device *dev_d)
{
- struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(dev_d);
struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
unsigned long flags;
- pci_set_power_state(pci_dev, PCI_D0);
- pci_restore_state(pci_dev);
-
/* Do things that are done in saa7134_initdev ,
except of initializing memory structures.*/
@@ -1490,7 +1481,6 @@ static int saa7134_resume(struct pci_dev *pci_dev)
return 0;
}
-#endif
/* ----------------------------------------------------------- */
@@ -1522,15 +1512,14 @@ EXPORT_SYMBOL(saa7134_ts_unregister);
/* ----------------------------------------------------------- */
+static SIMPLE_DEV_PM_OPS(saa7134_pm_ops, saa7134_suspend, saa7134_resume);
+
static struct pci_driver saa7134_pci_driver = {
.name = "saa7134",
.id_table = saa7134_pci_tbl,
.probe = saa7134_initdev,
.remove = saa7134_finidev,
-#ifdef CONFIG_PM
- .suspend = saa7134_suspend,
- .resume = saa7134_resume
-#endif
+ .driver.pm = &saa7134_pm_ops,
};
static int __init saa7134_init(void)
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 8ad7879bd840..39e3c7f8c5b4 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -314,8 +314,7 @@ static int empress_fini(struct saa7134_dev *dev)
if (NULL == dev->empress_dev)
return 0;
flush_work(&dev->empress_workqueue);
- video_unregister_device(dev->empress_dev);
- vb2_queue_release(&dev->empress_vbq);
+ vb2_video_unregister_device(dev->empress_dev);
v4l2_ctrl_handler_free(&dev->empress_ctrl_handler);
dev->empress_dev = NULL;
return 0;
diff --git a/drivers/media/pci/saa7134/saa7134-go7007.c b/drivers/media/pci/saa7134/saa7134-go7007.c
index e1b034663958..f319edb39c0e 100644
--- a/drivers/media/pci/saa7134/saa7134-go7007.c
+++ b/drivers/media/pci/saa7134/saa7134-go7007.c
@@ -493,7 +493,7 @@ static int saa7134_go7007_fini(struct saa7134_dev *dev)
free_page((unsigned long)saa->bottom);
v4l2_device_unregister_subdev(&saa->sd);
kfree(saa);
- video_unregister_device(&go->vdev);
+ vb2_video_unregister_device(&go->vdev);
v4l2_device_put(&go->v4l2_dev);
dev->empress_dev = NULL;
diff --git a/drivers/media/pci/saa7134/saa7134-tvaudio.c b/drivers/media/pci/saa7134/saa7134-tvaudio.c
index 79e1afb71075..5cc4ef21f9d3 100644
--- a/drivers/media/pci/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/pci/saa7134/saa7134-tvaudio.c
@@ -683,7 +683,8 @@ int saa_dsp_writel(struct saa7134_dev *dev, int reg, u32 value)
{
int err;
- audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n", reg << 2, value);
+ audio_dbg(2, "dsp write reg 0x%x = 0x%06x\n",
+ (reg << 2) & 0xffffffff, value);
err = saa_dsp_wait_bit(dev,SAA7135_DSP_RWSTATE_WRR);
if (err < 0)
return err;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index a8ac94fadc14..9a6a6b68f8e3 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2154,9 +2154,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
void saa7134_video_fini(struct saa7134_dev *dev)
{
/* free stuff */
- vb2_queue_release(&dev->video_vbq);
saa7134_pgtable_free(dev->pci, &dev->video_q.pt);
- vb2_queue_release(&dev->vbi_vbq);
saa7134_pgtable_free(dev->pci, &dev->vbi_q.pt);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
if (card_has_radio(dev))
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 77c325e64a97..d29499cd7370 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -509,7 +509,7 @@ struct saa7134_dmasound {
void *vaddr;
struct scatterlist *sglist;
int sglen;
- int nr_pages;
+ unsigned long nr_pages;
unsigned int dma_blk;
unsigned int read_offset;
unsigned int read_count;
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index 289cb901985b..245d9db280aa 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -250,15 +250,14 @@ int saa7164_buffer_cfg_port(struct saa7164_port *port)
list_for_each_safe(c, n, &port->dmaqueue.list) {
buf = list_entry(c, struct saa7164_buffer, list);
- if (buf->flags != SAA7164_BUFFER_FREE)
- BUG();
+ BUG_ON(buf->flags != SAA7164_BUFFER_FREE);
/* Place the buffer in the h/w queue */
saa7164_buffer_activate(buf, i);
/* Don't exceed the device maximum # bufs */
- if (i++ > port->hwcfg.buffercount)
- BUG();
+ BUG_ON(i > port->hwcfg.buffercount);
+ i++;
}
mutex_unlock(&port->dmaqueue_lock);
@@ -302,4 +301,3 @@ void saa7164_buffer_dealloc_user(struct saa7164_user_buffer *buf)
kfree(buf);
}
-
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 4b637891b79a..6c08b77bfd47 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -575,8 +575,8 @@ static irqreturn_t saa7164_irq_ts(struct saa7164_port *port)
/* Find the current write point from the hardware */
wp = saa7164_readl(port->bufcounter);
- if (wp > (port->hwcfg.buffercount - 1))
- BUG();
+
+ BUG_ON(wp > (port->hwcfg.buffercount - 1));
/* Find the previous buffer to the current write point */
if (wp == 0)
@@ -588,8 +588,8 @@ static irqreturn_t saa7164_irq_ts(struct saa7164_port *port)
/* TODO: turn this into a worker thread */
list_for_each_safe(c, n, &port->dmaqueue.list) {
buf = list_entry(c, struct saa7164_buffer, list);
- if (i++ > port->hwcfg.buffercount)
- BUG();
+ BUG_ON(i > port->hwcfg.buffercount);
+ i++;
if (buf->idx == rp) {
/* Found the buffer, deal with it */
@@ -894,8 +894,7 @@ static int saa7164_port_init(struct saa7164_dev *dev, int portnr)
{
struct saa7164_port *port = NULL;
- if ((portnr < 0) || (portnr >= SAA7164_MAX_PORTS))
- BUG();
+ BUG_ON((portnr < 0) || (portnr >= SAA7164_MAX_PORTS));
port = &dev->ports[portnr];
@@ -1563,4 +1562,3 @@ static void __exit saa7164_fini(void)
module_init(saa7164_init);
module_exit(saa7164_fini);
-
diff --git a/drivers/media/pci/saa7164/saa7164-dvb.c b/drivers/media/pci/saa7164/saa7164-dvb.c
index bf8c2bb8852e..24421c116b0b 100644
--- a/drivers/media/pci/saa7164/saa7164-dvb.c
+++ b/drivers/media/pci/saa7164/saa7164-dvb.c
@@ -337,8 +337,7 @@ static int dvb_register(struct saa7164_port *port)
dprintk(DBGLVL_DVB, "%s(port=%d)\n", __func__, port->nr);
- if (port->type != SAA7164_MPEG_DVB)
- BUG();
+ BUG_ON(port->type != SAA7164_MPEG_DVB);
/* Sanity check that the PCI configuration space is active */
if (port->hwcfg.BARLocation == 0) {
@@ -479,8 +478,7 @@ int saa7164_dvb_unregister(struct saa7164_port *port)
dprintk(DBGLVL_DVB, "%s()\n", __func__);
- if (port->type != SAA7164_MPEG_DVB)
- BUG();
+ BUG_ON(port->type != SAA7164_MPEG_DVB);
/* Remove any allocated buffers */
mutex_lock(&port->dmaqueue_lock);
@@ -740,4 +738,3 @@ frontend_detach:
printk(KERN_ERR "%s() Frontend/I2C initialization failed\n", __func__);
return -1;
}
-
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index 49d61a64c8cb..cb2e09f0841d 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -703,8 +703,7 @@ int saa7164_vbi_register(struct saa7164_port *port)
dprintk(DBGLVL_VBI, "%s()\n", __func__);
- if (port->type != SAA7164_MPEG_VBI)
- BUG();
+ BUG_ON(port->type != SAA7164_MPEG_VBI);
/* Sanity check that the PCI configuration space is active */
if (port->hwcfg.BARLocation == 0) {
@@ -756,8 +755,7 @@ void saa7164_vbi_unregister(struct saa7164_port *port)
dprintk(DBGLVL_VBI, "%s(port=%d)\n", __func__, port->nr);
- if (port->type != SAA7164_MPEG_VBI)
- BUG();
+ BUG_ON(port->type != SAA7164_MPEG_VBI);
if (port->v4l_device) {
if (port->v4l_device->minor != -1)
diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
index 9445d792bfc9..e6b74e161a05 100644
--- a/drivers/media/pci/smipcie/smipcie-ir.c
+++ b/drivers/media/pci/smipcie/smipcie-ir.c
@@ -87,8 +87,7 @@ static void smi_ir_decode(struct smi_rc *ir)
struct ir_raw_event rawir = {};
rawir.pulse = 0;
- rawir.duration = US_TO_NS(SMI_SAMPLE_PERIOD *
- SMI_SAMPLE_IDLEMIN);
+ rawir.duration = SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN;
ir_raw_event_store_with_filter(rc_dev, &rawir);
smi_set(IR_Init_Reg, rbIRhighidle);
}
@@ -151,8 +150,8 @@ int smi_ir_init(struct smi_dev *dev)
rc_dev->dev.parent = &dev->pci_dev->dev;
rc_dev->map_name = dev->info->rc_map;
- rc_dev->timeout = MS_TO_NS(100);
- rc_dev->rx_resolution = US_TO_NS(SMI_SAMPLE_PERIOD);
+ rc_dev->timeout = MS_TO_US(100);
+ rc_dev->rx_resolution = SMI_SAMPLE_PERIOD;
ir->rc_dev = rc_dev;
ir->dev = dev;
diff --git a/drivers/media/pci/smipcie/smipcie-main.c b/drivers/media/pci/smipcie/smipcie-main.c
index 9ca0fc3e6f80..e7604b7ecc8d 100644
--- a/drivers/media/pci/smipcie/smipcie-main.c
+++ b/drivers/media/pci/smipcie/smipcie-main.c
@@ -280,9 +280,9 @@ static void smi_port_clearInterrupt(struct smi_port *port)
}
/* tasklet handler: DMA data to dmx.*/
-static void smi_dma_xfer(unsigned long data)
+static void smi_dma_xfer(struct tasklet_struct *t)
{
- struct smi_port *port = (struct smi_port *) data;
+ struct smi_port *port = from_tasklet(port, t, tasklet);
struct smi_dev *dev = port->dev;
u32 intr_status, finishedData, dmaManagement;
u8 dmaChan0State, dmaChan1State;
@@ -422,7 +422,7 @@ static int smi_port_init(struct smi_port *port, int dmaChanUsed)
}
smi_port_disableInterrupt(port);
- tasklet_init(&port->tasklet, smi_dma_xfer, (unsigned long)port);
+ tasklet_setup(&port->tasklet, smi_dma_xfer);
tasklet_disable(&port->tasklet);
port->enable = 1;
return 0;
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index c6e0090f27e8..d497afc7e7b7 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -503,7 +503,7 @@ static int solo_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
default:
dev_warn(&pdev->dev, "Invalid chip_id 0x%02x, assuming 4 ch\n",
chip_id);
- /* fall through */
+ fallthrough;
case 5:
solo_dev->nr_chans = 4;
solo_dev->nr_ext = 1;
diff --git a/drivers/media/pci/solo6x10/solo6x10-i2c.c b/drivers/media/pci/solo6x10/solo6x10-i2c.c
index f86f12fa6350..7db785e9c997 100644
--- a/drivers/media/pci/solo6x10/solo6x10-i2c.c
+++ b/drivers/media/pci/solo6x10/solo6x10-i2c.c
@@ -183,7 +183,7 @@ int solo_i2c_isr(struct solo_dev *solo_dev)
}
solo_dev->i2c_state = IIC_STATE_WRITE;
- /* fall through */
+ fallthrough;
case IIC_STATE_WRITE:
ret = solo_i2c_handle_write(solo_dev);
break;
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 0fdb0fd6e764..336df65c8af1 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -1101,12 +1101,11 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
vunreg:
video_set_drvdata(&vip->video_dev, NULL);
vrelease:
- video_unregister_device(&vip->video_dev);
+ vb2_video_unregister_device(&vip->video_dev);
free_irq(pdev->irq, vip);
release_buf:
pci_disable_msi(pdev);
unmap:
- vb2_queue_release(&vip->vb_vidq);
pci_iounmap(pdev, vip->iomem);
release:
pci_release_regions(pdev);
@@ -1146,10 +1145,9 @@ static void sta2x11_vip_remove_one(struct pci_dev *pdev)
sta2x11_vip_clear_register(vip);
video_set_drvdata(&vip->video_dev, NULL);
- video_unregister_device(&vip->video_dev);
+ vb2_video_unregister_device(&vip->video_dev);
free_irq(pdev->irq, vip);
pci_disable_msi(pdev);
- vb2_queue_release(&vip->vb_vidq);
pci_iounmap(pdev, vip->iomem);
pci_release_regions(pdev);
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 45228f4f6fc6..2f7069e19b78 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -357,9 +357,9 @@ static inline void start_debi_dma(struct av7110 *av7110, int dir,
irdebi(av7110, DEBISWAB, addr, 0, len);
}
-static void debiirq(unsigned long cookie)
+static void debiirq(struct tasklet_struct *t)
{
- struct av7110 *av7110 = (struct av7110 *)cookie;
+ struct av7110 *av7110 = from_tasklet(av7110, t, debi_tasklet);
int type = av7110->debitype;
int handle = (type >> 8) & 0x1f;
unsigned int xfer = 0;
@@ -458,9 +458,9 @@ debi_done:
}
/* irq from av7110 firmware writing the mailbox register in the DPRAM */
-static void gpioirq(unsigned long cookie)
+static void gpioirq(struct tasklet_struct *t)
{
- struct av7110 *av7110 = (struct av7110 *)cookie;
+ struct av7110 *av7110 = from_tasklet(av7110, t, gpio_tasklet);
u32 rxbuf, txbuf;
int len;
@@ -1230,9 +1230,9 @@ static int budget_stop_feed(struct dvb_demux_feed *feed)
return status;
}
-static void vpeirq(unsigned long cookie)
+static void vpeirq(struct tasklet_struct *t)
{
- struct av7110 *budget = (struct av7110 *)cookie;
+ struct av7110 *budget = from_tasklet(budget, t, vpe_tasklet);
u8 *mem = (u8 *) (budget->grabbing);
u32 olddma = budget->ttbp;
u32 newdma = saa7146_read(budget->dev, PCI_VDP3);
@@ -2518,7 +2518,7 @@ static int av7110_attach(struct saa7146_dev* dev,
saa7146_write(dev, NUM_LINE_BYTE3, (TS_HEIGHT << 16) | TS_WIDTH);
saa7146_write(dev, MC2, MASK_04 | MASK_20);
- tasklet_init(&av7110->vpe_tasklet, vpeirq, (unsigned long) av7110);
+ tasklet_setup(&av7110->vpe_tasklet, vpeirq);
} else if (budgetpatch) {
spin_lock_init(&av7110->feedlock1);
@@ -2599,7 +2599,7 @@ static int av7110_attach(struct saa7146_dev* dev,
saa7146_write(dev, MC1, (MASK_13 | MASK_29));
/* end of budgetpatch register initialization */
- tasklet_init (&av7110->vpe_tasklet, vpeirq, (unsigned long) av7110);
+ tasklet_setup(&av7110->vpe_tasklet, vpeirq);
} else {
saa7146_write(dev, PCI_BT_V1, 0x1c00101f);
saa7146_write(dev, BCS_CTRL, 0x80400040);
@@ -2614,8 +2614,8 @@ static int av7110_attach(struct saa7146_dev* dev,
saa7146_write(dev, GPIO_CTRL, 0x000000);
}
- tasklet_init (&av7110->debi_tasklet, debiirq, (unsigned long) av7110);
- tasklet_init (&av7110->gpio_tasklet, gpioirq, (unsigned long) av7110);
+ tasklet_setup(&av7110->debi_tasklet, debiirq);
+ tasklet_setup(&av7110->gpio_tasklet, gpioirq);
mutex_init(&av7110->pid_mutex);
diff --git a/drivers/media/pci/ttpci/av7110_v4l.c b/drivers/media/pci/ttpci/av7110_v4l.c
index cabe006658dd..c89f536f699c 100644
--- a/drivers/media/pci/ttpci/av7110_v4l.c
+++ b/drivers/media/pci/ttpci/av7110_v4l.c
@@ -160,9 +160,9 @@ static int ves1820_set_tv_freq(struct saa7146_dev *dev, u32 freq)
buf[1] = div & 0xff;
buf[2] = 0x8e;
- if (freq < (u32) (16 * 168.25))
+ if (freq < 16U * 16825 / 100)
config = 0xa0;
- else if (freq < (u32) (16 * 447.25))
+ else if (freq < 16U * 44725 / 100)
config = 0x90;
else
config = 0x30;
diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/media/pci/ttpci/budget-ci.c
index 77b102b8a013..d59d18647371 100644
--- a/drivers/media/pci/ttpci/budget-ci.c
+++ b/drivers/media/pci/ttpci/budget-ci.c
@@ -99,9 +99,10 @@ struct budget_ci {
u8 tuner_pll_address; /* used for philips_tdm1316l configs */
};
-static void msp430_ir_interrupt(unsigned long data)
+static void msp430_ir_interrupt(struct tasklet_struct *t)
{
- struct budget_ci *budget_ci = (struct budget_ci *) data;
+ struct budget_ci_ir *ir = from_tasklet(ir, t, msp430_irq_tasklet);
+ struct budget_ci *budget_ci = container_of(ir, typeof(*budget_ci), ir);
struct rc_dev *dev = budget_ci->ir.dev;
u32 command = ttpci_budget_debiread(&budget_ci->budget, DEBINOSWAP, DEBIADDR_IR, 2, 1, 0) >> 8;
@@ -229,8 +230,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
budget_ci->ir.dev = dev;
- tasklet_init(&budget_ci->ir.msp430_irq_tasklet, msp430_ir_interrupt,
- (unsigned long) budget_ci);
+ tasklet_setup(&budget_ci->ir.msp430_irq_tasklet, msp430_ir_interrupt);
SAA7146_IER_ENABLE(saa, MASK_06);
saa7146_setgpio(saa, 3, SAA7146_GPIO_IRQHI);
@@ -348,9 +348,10 @@ static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
return 0;
}
-static void ciintf_interrupt(unsigned long data)
+static void ciintf_interrupt(struct tasklet_struct *t)
{
- struct budget_ci *budget_ci = (struct budget_ci *) data;
+ struct budget_ci *budget_ci = from_tasklet(budget_ci, t,
+ ciintf_irq_tasklet);
struct saa7146_dev *saa = budget_ci->budget.dev;
unsigned int flags;
@@ -491,7 +492,7 @@ static int ciintf_init(struct budget_ci *budget_ci)
// Setup CI slot IRQ
if (budget_ci->ci_irq) {
- tasklet_init(&budget_ci->ciintf_irq_tasklet, ciintf_interrupt, (unsigned long) budget_ci);
+ tasklet_setup(&budget_ci->ciintf_irq_tasklet, ciintf_interrupt);
if (budget_ci->slot_status != SLOTSTATUS_NONE) {
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQLO);
} else {
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
index 293867b9e796..d405eea5c37f 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/media/pci/ttpci/budget-core.c
@@ -171,9 +171,9 @@ static int budget_read_fe_status(struct dvb_frontend *fe,
return ret;
}
-static void vpeirq(unsigned long data)
+static void vpeirq(struct tasklet_struct *t)
{
- struct budget *budget = (struct budget *) data;
+ struct budget *budget = from_tasklet(budget, t, vpe_tasklet);
u8 *mem = (u8 *) (budget->grabbing);
u32 olddma = budget->ttbp;
u32 newdma = saa7146_read(budget->dev, PCI_VDP3);
@@ -519,7 +519,7 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
/* upload all */
saa7146_write(dev, GPIO_CTRL, 0x000000);
- tasklet_init(&budget->vpe_tasklet, vpeirq, (unsigned long) budget);
+ tasklet_setup(&budget->vpe_tasklet, vpeirq);
/* frontend power on */
if (bi->type != BUDGET_FS_ACTIVY)
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index ec1e06da7e4f..9131265c2b87 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -175,7 +175,7 @@ static const unsigned int intra4x4_lambda3[] = {
static v4l2_std_id tw5864_get_v4l2_std(enum tw5864_vid_std std);
static enum tw5864_vid_std tw5864_from_v4l2_std(v4l2_std_id v4l2_std);
-static void tw5864_handle_frame_task(unsigned long data);
+static void tw5864_handle_frame_task(struct tasklet_struct *t);
static void tw5864_handle_frame(struct tw5864_h264_frame *frame);
static void tw5864_frame_interval_set(struct tw5864_input *input);
@@ -767,6 +767,9 @@ static int tw5864_enum_frameintervals(struct file *file, void *priv,
fintv->type = V4L2_FRMIVAL_TYPE_STEPWISE;
ret = tw5864_frameinterval_get(input, &frameinterval);
+ if (ret)
+ return ret;
+
fintv->stepwise.step = frameinterval;
fintv->stepwise.min = frameinterval;
fintv->stepwise.max = frameinterval;
@@ -785,6 +788,9 @@ static int tw5864_g_parm(struct file *file, void *priv,
cp->capability = V4L2_CAP_TIMEPERFRAME;
ret = tw5864_frameinterval_get(input, &cp->timeperframe);
+ if (ret)
+ return ret;
+
cp->timeperframe.numerator *= input->frame_interval;
cp->capturemode = 0;
cp->readbuffers = 2;
@@ -1057,8 +1063,7 @@ int tw5864_video_init(struct tw5864_dev *dev, int *video_nr)
dev->irqmask |= TW5864_INTR_VLC_DONE | TW5864_INTR_TIMER;
tw5864_irqmask_apply(dev);
- tasklet_init(&dev->tasklet, tw5864_handle_frame_task,
- (unsigned long)dev);
+ tasklet_setup(&dev->tasklet, tw5864_handle_frame_task);
for (i = 0; i < TW5864_INPUTS; i++) {
dev->inputs[i].root = dev;
@@ -1178,7 +1183,6 @@ static int tw5864_video_input_init(struct tw5864_input *input, int video_nr)
free_v4l2_hdl:
v4l2_ctrl_handler_free(hdl);
- vb2_queue_release(&input->vidq);
free_mutex:
mutex_destroy(&input->lock);
@@ -1187,9 +1191,8 @@ free_mutex:
static void tw5864_video_input_fini(struct tw5864_input *dev)
{
- video_unregister_device(&dev->vdev);
+ vb2_video_unregister_device(&dev->vdev);
v4l2_ctrl_handler_free(&dev->hdl);
- vb2_queue_release(&dev->vidq);
}
void tw5864_video_fini(struct tw5864_dev *dev)
@@ -1313,9 +1316,9 @@ static int tw5864_is_motion_triggered(struct tw5864_h264_frame *frame)
return detected;
}
-static void tw5864_handle_frame_task(unsigned long data)
+static void tw5864_handle_frame_task(struct tasklet_struct *t)
{
- struct tw5864_dev *dev = (struct tw5864_dev *)data;
+ struct tw5864_dev *dev = from_tasklet(dev, t, tasklet);
unsigned long flags;
int batch_size = H264_BUF_CNT;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index c57ee78fa99d..a3cb104956d5 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -256,13 +256,14 @@ config VIDEO_MEDIATEK_VCODEC
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
select VIDEO_MEDIATEK_VPU
+ select MTK_SCP
help
Mediatek video codec driver provides HW capability to
encode and decode in a range of video formats
This driver rely on VPU driver to communicate with VPU.
- To compile this driver as a module, choose M here: the
- module will be called mtk-vcodec
+ To compile this driver as modules, choose M here: the
+ modules will be called mtk-vcodec-dec and mtk-vcodec-enc.
config VIDEO_MEM2MEM_DEINTERLACE
tristate "Deinterlace support"
@@ -426,8 +427,8 @@ config VIDEO_RENESAS_FCP
help
This is a driver for the Renesas Frame Compression Processor (FCP).
The FCP is a companion module of video processing modules in the
- Renesas R-Car Gen3 SoCs. It handles memory access for the codec,
- VSP and FDP modules.
+ Renesas R-Car Gen3 and RZ/G2 SoCs. It handles memory access for
+ the codec, VSP and FDP modules.
To compile this driver as a module, choose M here: the module
will be called rcar-fcp.
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index 7d98db1d9b52..c46a79eace98 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -1597,7 +1597,6 @@ static int aspeed_video_setup_video(struct aspeed_video *video)
video_set_drvdata(vdev, video);
rc = video_register_device(vdev, VFL_TYPE_VIDEO, 0);
if (rc) {
- vb2_queue_release(vbq);
v4l2_ctrl_handler_free(&video->ctrl_handler);
v4l2_device_unregister(v4l2_dev);
@@ -1737,9 +1736,7 @@ static int aspeed_video_remove(struct platform_device *pdev)
clk_unprepare(video->vclk);
clk_unprepare(video->eclk);
- video_unregister_device(&video->vdev);
-
- vb2_queue_release(&video->queue);
+ vb2_video_unregister_device(&video->vdev);
v4l2_ctrl_handler_free(&video->ctrl_handler);
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index b021604eceaa..bf75927bac4e 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1101,7 +1101,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
break;
case CODA_960:
coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
- /* fallthrough */
+ fallthrough;
case CODA_HX4:
case CODA_7541:
coda_write(dev, CODA7_STREAM_BUF_DYNALLOC_EN |
@@ -1141,7 +1141,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
CODA7_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
break;
}
- /* fallthrough */
+ fallthrough;
case CODA_960:
value = (q_data_src->rect.width & CODA7_PICWIDTH_MASK)
<< CODA7_PICWIDTH_OFFSET;
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 3ab3d976d8ca..87a2c706f747 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -808,7 +808,7 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
break;
}
- /* else fall through */
+ fallthrough;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_YUV422P:
@@ -1015,7 +1015,7 @@ static int coda_g_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
rsel = &r;
- /* fallthrough */
+ fallthrough;
case V4L2_SEL_TGT_CROP:
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
ctx->inst_type == CODA_INST_DECODER)
@@ -1024,7 +1024,7 @@ static int coda_g_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
case V4L2_SEL_TGT_COMPOSE_PADDED:
rsel = &r;
- /* fallthrough */
+ fallthrough;
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
@@ -1074,7 +1074,7 @@ static int coda_s_selection(struct file *file, void *fh,
return 0;
}
- /* else fall through */
+ fallthrough;
case V4L2_SEL_TGT_NATIVE_SIZE:
case V4L2_SEL_TGT_COMPOSE:
return coda_g_selection(file, fh, s);
@@ -1937,9 +1937,6 @@ int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
buf->blob.size = size;
buf->dentry = debugfs_create_blob(name, 0644, parent,
&buf->blob);
- if (!buf->dentry)
- dev_warn(dev->dev,
- "failed to create debugfs entry %s\n", name);
}
return 0;
@@ -2628,7 +2625,7 @@ static int coda_open(struct file *file)
*/
if (enable_bwb || ctx->inst_type == CODA_INST_ENCODER)
ctx->frame_mem_ctrl = CODA9_FRAME_ENABLE_BWB;
- /* fallthrough */
+ fallthrough;
case CODA_HX4:
case CODA_7541:
ctx->reg_idx = 0;
@@ -3211,8 +3208,6 @@ static int coda_probe(struct platform_device *pdev)
ida_init(&dev->ida);
dev->debugfs_root = debugfs_create_dir("coda", NULL);
- if (!dev->debugfs_root)
- dev_warn(&pdev->dev, "failed to create debugfs root\n");
/* allocate auxiliary per-device buffers for the BIT processor */
if (dev->devtype->product == CODA_DX6) {
@@ -3269,6 +3264,8 @@ static int coda_probe(struct platform_device *pdev)
return 0;
err_alloc_workqueue:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
destroy_workqueue(dev->workqueue);
err_v4l2_register:
v4l2_device_unregister(&dev->v4l2_dev);
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index e7a4b06e6dfe..6000a4e789ad 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -495,17 +495,6 @@ static int fimc_capture_open(struct file *file)
ret = fimc_pipeline_call(ve, open, &ve->vdev.entity, true);
- if (ret == 0 && vc->user_subdev_api && vc->inh_sensor_ctrls) {
- /*
- * Recreate controls of the the video node to drop
- * any controls inherited from the sensor subdev.
- */
- fimc_ctrls_delete(vc->ctx);
-
- ret = fimc_ctrls_create(vc->ctx);
- if (ret == 0)
- vc->inh_sensor_ctrls = false;
- }
if (ret == 0)
ve->vdev.entity.use_count++;
@@ -1246,8 +1235,11 @@ static int fimc_cap_streamoff(struct file *file, void *priv,
if (ret < 0)
return ret;
- media_pipeline_stop(&vc->ve.vdev.entity);
- vc->streaming = false;
+ if (vc->streaming) {
+ media_pipeline_stop(&vc->ve.vdev.entity);
+ vc->streaming = false;
+ }
+
return 0;
}
@@ -1279,7 +1271,7 @@ static int fimc_cap_g_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
f = &ctx->d_frame;
- /* fall through */
+ fallthrough;
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
s->r.left = 0;
@@ -1290,7 +1282,7 @@ static int fimc_cap_g_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_COMPOSE:
f = &ctx->d_frame;
- /* fall through */
+ fallthrough;
case V4L2_SEL_TGT_CROP:
s->r.left = f->offs_h;
s->r.top = f->offs_v;
@@ -1398,7 +1390,7 @@ static int fimc_link_setup(struct media_entity *entity,
vc->input = sd->grp_id;
- if (vc->user_subdev_api || vc->inh_sensor_ctrls)
+ if (vc->user_subdev_api)
return 0;
/* Inherit V4L2 controls from the image sensor subdev. */
@@ -1601,7 +1593,7 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
f = &ctx->d_frame;
- /* fall through */
+ fallthrough;
case V4L2_SEL_TGT_CROP_BOUNDS:
r->width = f->o_width;
r->height = f->o_height;
@@ -1888,6 +1880,7 @@ int fimc_initialize_capture_subdev(struct fimc_dev *fimc)
return ret;
sd->entity.ops = &fimc_sd_media_ops;
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
sd->internal_ops = &fimc_capture_sd_internal_ops;
v4l2_set_subdevdata(sd, fimc);
return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index cde60fbb23a8..08d1f39a914c 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -954,9 +954,11 @@ static int fimc_probe(struct platform_device *pdev)
spin_lock_init(&fimc->slock);
mutex_init(&fimc->lock);
- fimc->sysreg = fimc_get_sysreg_regmap(dev->of_node);
- if (IS_ERR(fimc->sysreg))
- return PTR_ERR(fimc->sysreg);
+ if (fimc->variant->has_isp_wb) {
+ fimc->sysreg = fimc_get_sysreg_regmap(dev->of_node);
+ if (IS_ERR(fimc->sysreg))
+ return PTR_ERR(fimc->sysreg);
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fimc->regs = devm_ioremap_resource(dev, res);
@@ -1110,67 +1112,8 @@ static int fimc_remove(struct platform_device *pdev)
return 0;
}
-/* Image pixel limits, similar across several FIMC HW revisions. */
-static const struct fimc_pix_limit s5p_pix_limit[4] = {
- [0] = {
- .scaler_en_w = 3264,
- .scaler_dis_w = 8192,
- .out_rot_en_w = 1920,
- .out_rot_dis_w = 4224,
- },
- [1] = {
- .scaler_en_w = 4224,
- .scaler_dis_w = 8192,
- .out_rot_en_w = 1920,
- .out_rot_dis_w = 4224,
- },
- [2] = {
- .scaler_en_w = 1920,
- .scaler_dis_w = 8192,
- .out_rot_en_w = 1280,
- .out_rot_dis_w = 1920,
- },
-};
-
-static const struct fimc_variant fimc0_variant_s5pv210 = {
- .has_inp_rot = 1,
- .has_out_rot = 1,
- .has_cam_if = 1,
- .min_inp_pixsize = 16,
- .min_out_pixsize = 16,
- .hor_offs_align = 8,
- .min_vsize_align = 16,
- .pix_limit = &s5p_pix_limit[1],
-};
-
-static const struct fimc_variant fimc1_variant_s5pv210 = {
- .has_inp_rot = 1,
- .has_out_rot = 1,
- .has_cam_if = 1,
- .has_mainscaler_ext = 1,
- .min_inp_pixsize = 16,
- .min_out_pixsize = 16,
- .hor_offs_align = 1,
- .min_vsize_align = 1,
- .pix_limit = &s5p_pix_limit[2],
-};
-
-static const struct fimc_variant fimc2_variant_s5pv210 = {
- .has_cam_if = 1,
- .min_inp_pixsize = 16,
- .min_out_pixsize = 16,
- .hor_offs_align = 8,
- .min_vsize_align = 16,
- .pix_limit = &s5p_pix_limit[2],
-};
-
/* S5PV210, S5PC110 */
static const struct fimc_drvdata fimc_drvdata_s5pv210 = {
- .variant = {
- [0] = &fimc0_variant_s5pv210,
- [1] = &fimc1_variant_s5pv210,
- [2] = &fimc2_variant_s5pv210,
- },
.num_entities = 3,
.lclk_frequency = 166000000UL,
.out_buf_count = 4,
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
index d130f664a60b..e4a56232907a 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -296,11 +296,8 @@ struct fimc_m2m_device {
* @buf_index: index for managing the output DMA buffers
* @frame_count: the frame counter for statistics
* @reqbufs_count: the number of buffers requested in REQBUFS ioctl
- * @input_index: input (camera sensor) index
* @input: capture input type, grp_id of the attached subdev
* @user_subdev_api: true if subdevs are not configured by the host driver
- * @inh_sensor_ctrls: a flag indicating v4l2 controls are inherited from
- * an image sensor subdev
*/
struct fimc_vid_cap {
struct fimc_ctx *ctx;
@@ -319,10 +316,8 @@ struct fimc_vid_cap {
unsigned int frame_count;
unsigned int reqbufs_count;
bool streaming;
- int input_index;
u32 input;
bool user_subdev_api;
- bool inh_sensor_ctrls;
};
/**
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index a474014f0a0f..32ab01e89196 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -12,7 +12,6 @@
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/dma-contiguous.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
@@ -756,18 +755,12 @@ static void fimc_is_debugfs_remove(struct fimc_is *is)
is->debugfs_entry = NULL;
}
-static int fimc_is_debugfs_create(struct fimc_is *is)
+static void fimc_is_debugfs_create(struct fimc_is *is)
{
- struct dentry *dentry;
-
is->debugfs_entry = debugfs_create_dir("fimc_is", NULL);
- dentry = debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry,
- is, &fimc_is_fops);
- if (!dentry)
- fimc_is_debugfs_remove(is);
-
- return is->debugfs_entry == NULL ? -EIO : 0;
+ debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry, is,
+ &fimc_is_fops);
}
static int fimc_is_runtime_resume(struct device *dev);
@@ -853,9 +846,7 @@ static int fimc_is_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm;
- ret = fimc_is_debugfs_create(is);
- if (ret < 0)
- goto err_sd;
+ fimc_is_debugfs_create(is);
ret = fimc_is_request_firmware(is, FIMC_IS_FW_FILENAME);
if (ret < 0)
@@ -868,7 +859,6 @@ static int fimc_is_probe(struct platform_device *pdev)
err_dfs:
fimc_is_debugfs_remove(is);
-err_sd:
fimc_is_unregister_subdevs(is);
err_pm:
pm_runtime_put_noidle(dev);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index cde0d254ec1c..a77c49b18511 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -305,8 +305,10 @@ static int fimc_isp_subdev_s_power(struct v4l2_subdev *sd, int on)
if (on) {
ret = pm_runtime_get_sync(&is->pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(&is->pdev->dev);
return ret;
+ }
set_bit(IS_ST_PWR_ON, &is->state);
ret = fimc_is_start_firmware(is);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 9c666f663ab4..fdd0d369b192 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -471,7 +471,7 @@ static int fimc_lite_open(struct file *file)
set_bit(ST_FLITE_IN_USE, &fimc->state);
ret = pm_runtime_get_sync(&fimc->pdev->dev);
if (ret < 0)
- goto unlock;
+ goto err_pm;
ret = v4l2_fh_open(file);
if (ret < 0)
diff --git a/drivers/media/platform/exynos4-is/fimc-reg.c b/drivers/media/platform/exynos4-is/fimc-reg.c
index 5ce2bdebd424..8764999a5fd7 100644
--- a/drivers/media/platform/exynos4-is/fimc-reg.c
+++ b/drivers/media/platform/exynos4-is/fimc-reg.c
@@ -606,6 +606,11 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
switch (source->fimc_bus_type) {
case FIMC_BUS_TYPE_ITU_601:
case FIMC_BUS_TYPE_ITU_656:
+ if (fimc_fmt_is_user_defined(f->fmt->color)) {
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+ break;
+ }
+
for (i = 0; i < ARRAY_SIZE(pix_desc); i++) {
if (vc->ci_fmt.code == pix_desc[i].pixelcode) {
cfg = pix_desc[i].cisrcfmt;
@@ -707,10 +712,12 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656:
if (source->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
cfg |= FIMC_REG_CIGCTRL_SELCAM_ITU_A;
+ if (vid_cap->ci_fmt.code == MEDIA_BUS_FMT_JPEG_1X8)
+ cfg |= FIMC_REG_CIGCTRL_CAM_JPEG;
break;
case FIMC_BUS_TYPE_LCD_WRITEBACK_A:
cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
- /* fall through */
+ fallthrough;
case FIMC_BUS_TYPE_ISP_WRITEBACK:
if (fimc->variant->has_isp_wb)
cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 16dd660137a8..e636c33e847b 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -92,7 +93,7 @@ static void fimc_pipeline_prepare(struct fimc_pipeline *p,
switch (sd->grp_id) {
case GRP_ID_SENSOR:
sensor = sd;
- /* fall through */
+ fallthrough;
case GRP_ID_FIMC_IS_SENSOR:
p->subdevs[IDX_SENSOR] = sd;
break;
@@ -289,11 +290,26 @@ static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
{ IDX_CSIS, IDX_FLITE, IDX_FIMC, IDX_SENSOR, IDX_IS_ISP },
};
struct fimc_pipeline *p = to_fimc_pipeline(ep);
- struct fimc_md *fmd = entity_to_fimc_mdev(&p->subdevs[IDX_CSIS]->entity);
enum fimc_subdev_index sd_id;
int i, ret = 0;
if (p->subdevs[IDX_SENSOR] == NULL) {
+ struct fimc_md *fmd;
+ struct v4l2_subdev *sd = p->subdevs[IDX_CSIS];
+
+ if (!sd)
+ sd = p->subdevs[IDX_FIMC];
+
+ if (!sd) {
+ /*
+ * If neither CSIS nor FIMC was set up,
+ * it's impossible to have any sensors
+ */
+ return -ENODEV;
+ }
+
+ fmd = entity_to_fimc_mdev(&sd->entity);
+
if (!fmd->user_subdev_api) {
/*
* Sensor must be already discovered if we
@@ -379,21 +395,15 @@ static void fimc_md_pipelines_free(struct fimc_md *fmd)
}
}
-/* Parse port node and register as a sub-device any sensor specified there. */
-static int fimc_md_parse_port_node(struct fimc_md *fmd,
- struct device_node *port,
- unsigned int index)
+static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
+ struct device_node *ep)
{
+ int index = fmd->num_sensors;
struct fimc_source_info *pd = &fmd->sensor[index].pdata;
- struct device_node *rem, *ep, *np;
+ struct device_node *rem, *np;
struct v4l2_fwnode_endpoint endpoint = { .bus_type = 0 };
int ret;
- /* Assume here a port node can have only one endpoint node. */
- ep = of_get_next_child(port, NULL);
- if (!ep)
- return 0;
-
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &endpoint);
if (ret) {
of_node_put(ep);
@@ -467,13 +477,28 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
return 0;
}
+/* Parse port node and register as a sub-device any sensor specified there. */
+static int fimc_md_parse_port_node(struct fimc_md *fmd,
+ struct device_node *port)
+{
+ struct device_node *ep;
+ int ret;
+
+ for_each_child_of_node(port, ep) {
+ ret = fimc_md_parse_one_endpoint(fmd, ep);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
/* Register all SoC external sub-devices */
static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
{
struct device_node *parent = fmd->pdev->dev.of_node;
struct device_node *ports = NULL;
struct device_node *node;
- int index = 0;
int ret;
/*
@@ -484,8 +509,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
return -ENXIO;
ret = pm_runtime_get_sync(fmd->pmf);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(fmd->pmf);
return ret;
+ }
fmd->num_sensors = 0;
@@ -500,13 +527,12 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
if (!port)
continue;
- ret = fimc_md_parse_port_node(fmd, port, index);
+ ret = fimc_md_parse_port_node(fmd, port);
of_node_put(port);
if (ret < 0) {
of_node_put(node);
goto cleanup;
}
- index++;
}
/* Attach sensors listed in the parallel-ports node */
@@ -515,12 +541,11 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
goto rpm_put;
for_each_child_of_node(ports, node) {
- ret = fimc_md_parse_port_node(fmd, node, index);
+ ret = fimc_md_parse_port_node(fmd, node);
if (ret < 0) {
of_node_put(node);
goto cleanup;
}
- index++;
}
of_node_put(ports);
@@ -1254,28 +1279,6 @@ static ssize_t fimc_md_sysfs_store(struct device *dev,
static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO,
fimc_md_sysfs_show, fimc_md_sysfs_store);
-static int fimc_md_get_pinctrl(struct fimc_md *fmd)
-{
- struct device *dev = &fmd->pdev->dev;
- struct fimc_pinctrl *pctl = &fmd->pinctl;
-
- pctl->pinctrl = devm_pinctrl_get(dev);
- if (IS_ERR(pctl->pinctrl))
- return PTR_ERR(pctl->pinctrl);
-
- pctl->state_default = pinctrl_lookup_state(pctl->pinctrl,
- PINCTRL_STATE_DEFAULT);
- if (IS_ERR(pctl->state_default))
- return PTR_ERR(pctl->state_default);
-
- pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
- PINCTRL_STATE_IDLE);
- if (IS_ERR(pctl->state_idle))
- return PTR_ERR(pctl->state_idle);
-
- return 0;
-}
-
static int cam_clk_prepare(struct clk_hw *hw)
{
struct cam_clk *camclk = to_cam_clk(hw);
@@ -1431,6 +1434,7 @@ static int fimc_md_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct v4l2_device *v4l2_dev;
+ struct pinctrl *pinctrl;
struct fimc_md *fmd;
int ret;
@@ -1467,8 +1471,9 @@ static int fimc_md_probe(struct platform_device *pdev)
if (ret)
goto err_v4l2dev;
- ret = fimc_md_get_pinctrl(fmd);
- if (ret < 0) {
+ pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(pinctrl)) {
+ ret = PTR_ERR(pinctrl);
if (ret != EPROBE_DEFER)
dev_err(dev, "Failed to get pinctrl: %d\n", ret);
goto err_clk;
diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h
index 4b8f9ac52ebc..9447fafe23c6 100644
--- a/drivers/media/platform/exynos4-is/media-dev.h
+++ b/drivers/media/platform/exynos4-is/media-dev.h
@@ -27,8 +27,6 @@
#define FIMC_IS_OF_NODE_NAME "fimc-is"
#define CSIS_OF_NODE_NAME "csis"
-#define PINCTRL_STATE_IDLE "idle"
-
#define FIMC_MAX_SENSORS 4
#define FIMC_MAX_CAMCLKS 2
#define DEFAULT_SENSOR_CLK_FREQ 24000000U
@@ -109,9 +107,6 @@ struct cam_clk {
* @media_dev: top level media device
* @v4l2_dev: top level v4l2_device holding up the subdevs
* @pdev: platform device this media device is hooked up into
- * @pinctrl: camera port pinctrl handle
- * @state_default: pinctrl default state handle
- * @state_idle: pinctrl idle state handle
* @cam_clk_provider: CAMCLK clock provider structure
* @user_subdev_api: true if subdevs are not configured by the host driver
* @slock: spinlock protecting @sensor array
@@ -131,12 +126,6 @@ struct fimc_md {
struct v4l2_device v4l2_dev;
struct platform_device *pdev;
- struct fimc_pinctrl {
- struct pinctrl *pinctrl;
- struct pinctrl_state *state_default;
- struct pinctrl_state *state_idle;
- } pinctl;
-
struct cam_clk_provider {
struct clk *clks[FIMC_MAX_CAMCLKS];
struct clk_onecell_data clk_data;
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 540151bbf58f..1aac167abb17 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -510,8 +510,10 @@ static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
if (enable) {
s5pcsis_clear_counters(state);
ret = pm_runtime_get_sync(&state->pdev->dev);
- if (ret && ret != 1)
+ if (ret && ret != 1) {
+ pm_runtime_put_noidle(&state->pdev->dev);
return ret;
+ }
}
mutex_lock(&state->lock);
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 84633a3b8475..4f2a0f992905 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -32,7 +32,7 @@
#define VIU_VERSION "0.5.1"
/* Allow building this driver with COMPILE_TEST */
-#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE)
+#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE) && !defined(CONFIG_M68K)
#define out_be32(v, a) iowrite32be(a, (void __iomem *)v)
#define in_be32(a) ioread32be((void __iomem *)a)
#endif
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index 58b9915ac7a4..00f623d62c96 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -497,6 +497,7 @@ static int cafe_pci_probe(struct pci_dev *pdev,
cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
if (cam == NULL)
goto out;
+ pci_set_drvdata(pdev, cam);
cam->pdev = pdev;
mcam = &cam->mcam;
mcam->chip_id = MCAM_CAFE;
@@ -592,8 +593,7 @@ static void cafe_shutdown(struct cafe_camera *cam)
static void cafe_pci_remove(struct pci_dev *pdev)
{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
- struct cafe_camera *cam = to_cam(v4l2_dev);
+ struct cafe_camera *cam = pci_get_drvdata(pdev);
if (cam == NULL) {
printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
@@ -609,8 +609,7 @@ static void cafe_pci_remove(struct pci_dev *pdev)
*/
static int __maybe_unused cafe_pci_suspend(struct device *dev)
{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
- struct cafe_camera *cam = to_cam(v4l2_dev);
+ struct cafe_camera *cam = dev_get_drvdata(dev);
mccic_suspend(&cam->mcam);
return 0;
@@ -619,8 +618,7 @@ static int __maybe_unused cafe_pci_suspend(struct device *dev)
static int __maybe_unused cafe_pci_resume(struct device *dev)
{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
- struct cafe_camera *cam = to_cam(v4l2_dev);
+ struct cafe_camera *cam = dev_get_drvdata(dev);
cafe_ctlr_init(&cam->mcam);
return mccic_resume(&cam->mcam);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 3d4242b8182b..c012fd2e1d29 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -24,6 +24,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/videodev2.h>
+#include <linux/pm_runtime.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
@@ -388,7 +389,7 @@ static int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
dma_free_coherent(cam->dev, cam->dma_buf_size,
cam->dma_bufs[0], cam->dma_handles[0]);
cam->nbufs = 0;
- /* fall-through */
+ fallthrough;
case 0:
cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
return -ENOMEM;
@@ -438,9 +439,9 @@ static void mcam_ctlr_dma_vmalloc(struct mcam_camera *cam)
/*
* Copy data out to user space in the vmalloc case
*/
-static void mcam_frame_tasklet(unsigned long data)
+static void mcam_frame_tasklet(struct tasklet_struct *t)
{
- struct mcam_camera *cam = (struct mcam_camera *) data;
+ struct mcam_camera *cam = from_tasklet(cam, t, s_tasklet);
int i;
unsigned long flags;
struct mcam_vb_buffer *buf;
@@ -895,30 +896,6 @@ static void mcam_ctlr_power_down(struct mcam_camera *cam)
/* ---------------------------------------------------------------------- */
/*
- * Controller clocks.
- */
-static void mcam_clk_enable(struct mcam_camera *mcam)
-{
- unsigned int i;
-
- for (i = 0; i < NR_MCAM_CLK; i++) {
- if (!IS_ERR(mcam->clk[i]))
- clk_prepare_enable(mcam->clk[i]);
- }
-}
-
-static void mcam_clk_disable(struct mcam_camera *mcam)
-{
- int i;
-
- for (i = NR_MCAM_CLK - 1; i >= 0; i--) {
- if (!IS_ERR(mcam->clk[i]))
- clk_disable_unprepare(mcam->clk[i]);
- }
-}
-
-/* ---------------------------------------------------------------------- */
-/*
* Master sensor clock.
*/
static int mclk_prepare(struct clk_hw *hw)
@@ -1323,8 +1300,7 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
break;
case B_vmalloc:
#ifdef MCAM_MODE_VMALLOC
- tasklet_init(&cam->s_tasklet, mcam_frame_tasklet,
- (unsigned long) cam);
+ tasklet_setup(&cam->s_tasklet, mcam_frame_tasklet);
vq->ops = &mcam_vb2_ops;
vq->mem_ops = &vb2_vmalloc_memops;
cam->dma_setup = mcam_ctlr_dma_vmalloc;
@@ -1633,7 +1609,7 @@ static int mcam_v4l_open(struct file *filp)
ret = sensor_call(cam, core, s_power, 1);
if (ret)
goto out;
- mcam_clk_enable(cam);
+ pm_runtime_get_sync(cam->dev);
__mcam_cam_reset(cam);
mcam_set_config_needed(cam, 1);
}
@@ -1656,7 +1632,7 @@ static int mcam_v4l_release(struct file *filp)
if (last_open) {
mcam_disable_mipi(cam);
sensor_call(cam, core, s_power, 0);
- mcam_clk_disable(cam);
+ pm_runtime_put(cam->dev);
if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
mcam_free_dma_bufs(cam);
}
@@ -1977,7 +1953,6 @@ void mccic_suspend(struct mcam_camera *cam)
mcam_ctlr_stop_dma(cam);
sensor_call(cam, core, s_power, 0);
- mcam_clk_disable(cam);
cam->state = cstate;
}
mutex_unlock(&cam->s_mutex);
@@ -1990,7 +1965,6 @@ int mccic_resume(struct mcam_camera *cam)
mutex_lock(&cam->s_mutex);
if (!list_empty(&cam->vdev.fh_list)) {
- mcam_clk_enable(cam);
ret = sensor_call(cam, core, s_power, 1);
if (ret) {
mutex_unlock(&cam->s_mutex);
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index 92b92255dac6..cd902b180669 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/pm.h>
@@ -47,49 +48,6 @@ static inline struct mmp_camera *mcam_to_cam(struct mcam_camera *mcam)
}
/*
- * A silly little infrastructure so we can keep track of our devices.
- * Chances are that we will never have more than one of them, but
- * the Armada 610 *does* have two controllers...
- */
-
-static LIST_HEAD(mmpcam_devices);
-static struct mutex mmpcam_devices_lock;
-
-static void mmpcam_add_device(struct mmp_camera *cam)
-{
- mutex_lock(&mmpcam_devices_lock);
- list_add(&cam->devlist, &mmpcam_devices);
- mutex_unlock(&mmpcam_devices_lock);
-}
-
-static void mmpcam_remove_device(struct mmp_camera *cam)
-{
- mutex_lock(&mmpcam_devices_lock);
- list_del(&cam->devlist);
- mutex_unlock(&mmpcam_devices_lock);
-}
-
-/*
- * Platform dev remove passes us a platform_device, and there's
- * no handy unused drvdata to stash a backpointer in. So just
- * dig it out of our list.
- */
-static struct mmp_camera *mmpcam_find_device(struct platform_device *pdev)
-{
- struct mmp_camera *cam;
-
- mutex_lock(&mmpcam_devices_lock);
- list_for_each_entry(cam, &mmpcam_devices, devlist) {
- if (cam->pdev == pdev) {
- mutex_unlock(&mmpcam_devices_lock);
- return cam;
- }
- }
- mutex_unlock(&mmpcam_devices_lock);
- return NULL;
-}
-
-/*
* calc the dphy register values
* There are three dphy registers being used.
* dphy[0] - CSI2_DPHY3
@@ -227,6 +185,7 @@ static int mmpcam_probe(struct platform_device *pdev)
cam = devm_kzalloc(&pdev->dev, sizeof(*cam), GFP_KERNEL);
if (cam == NULL)
return -ENOMEM;
+ platform_set_drvdata(pdev, cam);
cam->pdev = pdev;
INIT_LIST_HEAD(&cam->devlist);
@@ -313,11 +272,11 @@ static int mmpcam_probe(struct platform_device *pdev)
cam->irq = res->start;
ret = devm_request_irq(&pdev->dev, cam->irq, mmpcam_irq, IRQF_SHARED,
"mmp-camera", mcam);
- if (ret == 0) {
- mmpcam_add_device(cam);
- return 0;
- }
+ if (ret)
+ goto out;
+ pm_runtime_enable(&pdev->dev);
+ return 0;
out:
fwnode_handle_put(mcam->asd.match.fwnode);
mccic_shutdown(mcam);
@@ -330,14 +289,14 @@ static int mmpcam_remove(struct mmp_camera *cam)
{
struct mcam_camera *mcam = &cam->mcam;
- mmpcam_remove_device(cam);
mccic_shutdown(mcam);
+ pm_runtime_force_suspend(mcam->dev);
return 0;
}
static int mmpcam_platform_remove(struct platform_device *pdev)
{
- struct mmp_camera *cam = mmpcam_find_device(pdev);
+ struct mmp_camera *cam = platform_get_drvdata(pdev);
if (cam == NULL)
return -ENODEV;
@@ -347,26 +306,57 @@ static int mmpcam_platform_remove(struct platform_device *pdev)
/*
* Suspend/resume support.
*/
-#ifdef CONFIG_PM
-static int mmpcam_suspend(struct platform_device *pdev, pm_message_t state)
+static int mmpcam_runtime_resume(struct device *dev)
+{
+ struct mmp_camera *cam = dev_get_drvdata(dev);
+ struct mcam_camera *mcam = &cam->mcam;
+ unsigned int i;
+
+ for (i = 0; i < NR_MCAM_CLK; i++) {
+ if (!IS_ERR(mcam->clk[i]))
+ clk_prepare_enable(mcam->clk[i]);
+ }
+
+ return 0;
+}
+
+static int mmpcam_runtime_suspend(struct device *dev)
+{
+ struct mmp_camera *cam = dev_get_drvdata(dev);
+ struct mcam_camera *mcam = &cam->mcam;
+ int i;
+
+ for (i = NR_MCAM_CLK - 1; i >= 0; i--) {
+ if (!IS_ERR(mcam->clk[i]))
+ clk_disable_unprepare(mcam->clk[i]);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mmpcam_suspend(struct device *dev)
{
- struct mmp_camera *cam = mmpcam_find_device(pdev);
+ struct mmp_camera *cam = dev_get_drvdata(dev);
- if (state.event != PM_EVENT_SUSPEND)
- return 0;
- mccic_suspend(&cam->mcam);
+ if (!pm_runtime_suspended(dev))
+ mccic_suspend(&cam->mcam);
return 0;
}
-static int mmpcam_resume(struct platform_device *pdev)
+static int __maybe_unused mmpcam_resume(struct device *dev)
{
- struct mmp_camera *cam = mmpcam_find_device(pdev);
+ struct mmp_camera *cam = dev_get_drvdata(dev);
- return mccic_resume(&cam->mcam);
+ if (!pm_runtime_suspended(dev))
+ return mccic_resume(&cam->mcam);
+ return 0;
}
-#endif
+static const struct dev_pm_ops mmpcam_pm_ops = {
+ SET_RUNTIME_PM_OPS(mmpcam_runtime_suspend, mmpcam_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mmpcam_suspend, mmpcam_resume)
+};
static const struct of_device_id mmpcam_of_match[] = {
{ .compatible = "marvell,mmp2-ccic", },
@@ -377,32 +367,11 @@ MODULE_DEVICE_TABLE(of, mmpcam_of_match);
static struct platform_driver mmpcam_driver = {
.probe = mmpcam_probe,
.remove = mmpcam_platform_remove,
-#ifdef CONFIG_PM
- .suspend = mmpcam_suspend,
- .resume = mmpcam_resume,
-#endif
.driver = {
.name = "mmp-camera",
.of_match_table = of_match_ptr(mmpcam_of_match),
+ .pm = &mmpcam_pm_ops,
}
};
-
-static int __init mmpcam_init_module(void)
-{
- mutex_init(&mmpcam_devices_lock);
- return platform_driver_register(&mmpcam_driver);
-}
-
-static void __exit mmpcam_exit_module(void)
-{
- platform_driver_unregister(&mmpcam_driver);
- /*
- * platform_driver_unregister() should have emptied the list
- */
- if (!list_empty(&mmpcam_devices))
- printk(KERN_ERR "mmp_camera leaving devices behind\n");
-}
-
-module_init(mmpcam_init_module);
-module_exit(mmpcam_exit_module);
+module_platform_driver(mmpcam_driver);
diff --git a/drivers/media/platform/mtk-jpeg/Makefile b/drivers/media/platform/mtk-jpeg/Makefile
index 92a4fc046bfe..76c33aad0f3f 100644
--- a/drivers/media/platform/mtk-jpeg/Makefile
+++ b/drivers/media/platform/mtk-jpeg/Makefile
@@ -1,3 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-mtk_jpeg-objs := mtk_jpeg_core.o mtk_jpeg_hw.o mtk_jpeg_parse.o
+mtk_jpeg-objs := mtk_jpeg_core.o \
+ mtk_jpeg_dec_hw.o \
+ mtk_jpeg_dec_parse.o \
+ mtk_jpeg_enc_hw.o
obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk_jpeg.o
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index 61fed1e35a00..227245ccaedc 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -3,6 +3,7 @@
* Copyright (c) 2016 MediaTek Inc.
* Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
* Rick Chang <rick.chang@mediatek.com>
+ * Xia Jiang <xia.jiang@mediatek.com>
*/
#include <linux/clk.h>
@@ -23,15 +24,64 @@
#include <media/videobuf2-dma-contig.h>
#include <soc/mediatek/smi.h>
-#include "mtk_jpeg_hw.h"
+#include "mtk_jpeg_enc_hw.h"
+#include "mtk_jpeg_dec_hw.h"
#include "mtk_jpeg_core.h"
-#include "mtk_jpeg_parse.h"
+#include "mtk_jpeg_dec_parse.h"
-static struct mtk_jpeg_fmt mtk_jpeg_formats[] = {
+static struct mtk_jpeg_fmt mtk_jpeg_enc_formats[] = {
{
.fourcc = V4L2_PIX_FMT_JPEG,
.colplanes = 1,
- .flags = MTK_JPEG_FMT_FLAG_DEC_OUTPUT,
+ .flags = MTK_JPEG_FMT_FLAG_CAPTURE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .hw_format = JPEG_ENC_YUV_FORMAT_NV12,
+ .h_sample = {4, 4},
+ .v_sample = {4, 2},
+ .colplanes = 2,
+ .h_align = 4,
+ .v_align = 4,
+ .flags = MTK_JPEG_FMT_FLAG_OUTPUT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .hw_format = JEPG_ENC_YUV_FORMAT_NV21,
+ .h_sample = {4, 4},
+ .v_sample = {4, 2},
+ .colplanes = 2,
+ .h_align = 4,
+ .v_align = 4,
+ .flags = MTK_JPEG_FMT_FLAG_OUTPUT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .hw_format = JPEG_ENC_YUV_FORMAT_YUYV,
+ .h_sample = {8},
+ .v_sample = {4},
+ .colplanes = 1,
+ .h_align = 5,
+ .v_align = 3,
+ .flags = MTK_JPEG_FMT_FLAG_OUTPUT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .hw_format = JPEG_ENC_YUV_FORMAT_YVYU,
+ .h_sample = {8},
+ .v_sample = {4},
+ .colplanes = 1,
+ .h_align = 5,
+ .v_align = 3,
+ .flags = MTK_JPEG_FMT_FLAG_OUTPUT,
+ },
+};
+
+static struct mtk_jpeg_fmt mtk_jpeg_dec_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .colplanes = 1,
+ .flags = MTK_JPEG_FMT_FLAG_OUTPUT,
},
{
.fourcc = V4L2_PIX_FMT_YUV420M,
@@ -40,7 +90,7 @@ static struct mtk_jpeg_fmt mtk_jpeg_formats[] = {
.colplanes = 3,
.h_align = 5,
.v_align = 4,
- .flags = MTK_JPEG_FMT_FLAG_DEC_CAPTURE,
+ .flags = MTK_JPEG_FMT_FLAG_CAPTURE,
},
{
.fourcc = V4L2_PIX_FMT_YUV422M,
@@ -49,27 +99,27 @@ static struct mtk_jpeg_fmt mtk_jpeg_formats[] = {
.colplanes = 3,
.h_align = 5,
.v_align = 3,
- .flags = MTK_JPEG_FMT_FLAG_DEC_CAPTURE,
+ .flags = MTK_JPEG_FMT_FLAG_CAPTURE,
},
};
-#define MTK_JPEG_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_formats)
-
-enum {
- MTK_JPEG_BUF_FLAGS_INIT = 0,
- MTK_JPEG_BUF_FLAGS_LAST_FRAME = 1,
-};
+#define MTK_JPEG_ENC_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_enc_formats)
+#define MTK_JPEG_DEC_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_dec_formats)
struct mtk_jpeg_src_buf {
struct vb2_v4l2_buffer b;
struct list_head list;
- int flags;
struct mtk_jpeg_dec_param dec_param;
};
static int debug;
module_param(debug, int, 0644);
+static inline struct mtk_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mtk_jpeg_ctx, ctrl_hdl);
+}
+
static inline struct mtk_jpeg_ctx *mtk_jpeg_fh_to_ctx(struct v4l2_fh *fh)
{
return container_of(fh, struct mtk_jpeg_ctx, fh);
@@ -86,14 +136,61 @@ static int mtk_jpeg_querycap(struct file *file, void *priv,
{
struct mtk_jpeg_dev *jpeg = video_drvdata(file);
- strscpy(cap->driver, MTK_JPEG_NAME " decoder", sizeof(cap->driver));
- strscpy(cap->card, MTK_JPEG_NAME " decoder", sizeof(cap->card));
+ strscpy(cap->driver, jpeg->variant->dev_name, sizeof(cap->driver));
+ strscpy(cap->card, jpeg->variant->dev_name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
dev_name(jpeg->dev));
return 0;
}
+static int vidioc_jpeg_enc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_JPEG_RESTART_INTERVAL:
+ ctx->restart_interval = ctrl->val;
+ break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ctx->enc_quality = ctrl->val;
+ break;
+ case V4L2_CID_JPEG_ACTIVE_MARKER:
+ ctx->enable_exif = ctrl->val & V4L2_JPEG_ACTIVE_MARKER_APP1;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops mtk_jpeg_enc_ctrl_ops = {
+ .s_ctrl = vidioc_jpeg_enc_s_ctrl,
+};
+
+static int mtk_jpeg_enc_ctrls_setup(struct mtk_jpeg_ctx *ctx)
+{
+ const struct v4l2_ctrl_ops *ops = &mtk_jpeg_enc_ctrl_ops;
+ struct v4l2_ctrl_handler *handler = &ctx->ctrl_hdl;
+
+ v4l2_ctrl_handler_init(handler, 3);
+
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_JPEG_RESTART_INTERVAL, 0, 100,
+ 1, 0);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, 48,
+ 100, 1, 90);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_JPEG_ACTIVE_MARKER, 0,
+ V4L2_JPEG_ACTIVE_MARKER_APP1, 0, 0);
+
+ if (handler->error) {
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ return handler->error;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+
+ return 0;
+}
+
static int mtk_jpeg_enum_fmt(struct mtk_jpeg_fmt *mtk_jpeg_formats, int n,
struct v4l2_fmtdesc *f, u32 type)
{
@@ -118,15 +215,23 @@ static int mtk_jpeg_enum_fmt(struct mtk_jpeg_fmt *mtk_jpeg_formats, int n,
static int mtk_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return mtk_jpeg_enum_fmt(mtk_jpeg_formats, MTK_JPEG_NUM_FORMATS, f,
- MTK_JPEG_FMT_FLAG_DEC_CAPTURE);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+
+ return mtk_jpeg_enum_fmt(jpeg->variant->formats,
+ jpeg->variant->num_formats, f,
+ MTK_JPEG_FMT_FLAG_CAPTURE);
}
static int mtk_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return mtk_jpeg_enum_fmt(mtk_jpeg_formats, MTK_JPEG_NUM_FORMATS, f,
- MTK_JPEG_FMT_FLAG_DEC_OUTPUT);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+
+ return mtk_jpeg_enum_fmt(jpeg->variant->formats,
+ jpeg->variant->num_formats, f,
+ MTK_JPEG_FMT_FLAG_OUTPUT);
}
static struct mtk_jpeg_q_data *mtk_jpeg_get_q_data(struct mtk_jpeg_ctx *ctx,
@@ -137,126 +242,63 @@ static struct mtk_jpeg_q_data *mtk_jpeg_get_q_data(struct mtk_jpeg_ctx *ctx,
return &ctx->cap_q;
}
-static struct mtk_jpeg_fmt *mtk_jpeg_find_format(struct mtk_jpeg_ctx *ctx,
- u32 pixelformat,
- unsigned int fmt_type)
+static struct mtk_jpeg_fmt *
+mtk_jpeg_find_format(struct mtk_jpeg_fmt *mtk_jpeg_formats, int num_formats,
+ u32 pixelformat, unsigned int fmt_type)
{
- unsigned int k, fmt_flag;
-
- fmt_flag = (fmt_type == MTK_JPEG_FMT_TYPE_OUTPUT) ?
- MTK_JPEG_FMT_FLAG_DEC_OUTPUT :
- MTK_JPEG_FMT_FLAG_DEC_CAPTURE;
+ unsigned int k;
+ struct mtk_jpeg_fmt *fmt;
- for (k = 0; k < MTK_JPEG_NUM_FORMATS; k++) {
- struct mtk_jpeg_fmt *fmt = &mtk_jpeg_formats[k];
+ for (k = 0; k < num_formats; k++) {
+ fmt = &mtk_jpeg_formats[k];
- if (fmt->fourcc == pixelformat && fmt->flags & fmt_flag)
+ if (fmt->fourcc == pixelformat && fmt->flags & fmt_type)
return fmt;
}
return NULL;
}
-static void mtk_jpeg_bound_align_image(u32 *w, unsigned int wmin,
- unsigned int wmax, unsigned int walign,
- u32 *h, unsigned int hmin,
- unsigned int hmax, unsigned int halign)
+static int mtk_jpeg_try_fmt_mplane(struct v4l2_pix_format_mplane *pix_mp,
+ struct mtk_jpeg_fmt *fmt)
{
- int width, height, w_step, h_step;
-
- width = *w;
- height = *h;
- w_step = 1 << walign;
- h_step = 1 << halign;
-
- v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0);
- if (*w < width && (*w + w_step) <= wmax)
- *w += w_step;
- if (*h < height && (*h + h_step) <= hmax)
- *h += h_step;
-}
-
-static void mtk_jpeg_adjust_fmt_mplane(struct mtk_jpeg_ctx *ctx,
- struct v4l2_format *f)
-{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct mtk_jpeg_q_data *q_data;
- int i;
-
- q_data = mtk_jpeg_get_q_data(ctx, f->type);
-
- pix_mp->width = q_data->w;
- pix_mp->height = q_data->h;
- pix_mp->pixelformat = q_data->fmt->fourcc;
- pix_mp->num_planes = q_data->fmt->colplanes;
-
- for (i = 0; i < pix_mp->num_planes; i++) {
- pix_mp->plane_fmt[i].bytesperline = q_data->bytesperline[i];
- pix_mp->plane_fmt[i].sizeimage = q_data->sizeimage[i];
- }
-}
-
-static int mtk_jpeg_try_fmt_mplane(struct v4l2_format *f,
- struct mtk_jpeg_fmt *fmt,
- struct mtk_jpeg_ctx *ctx, int q_type)
-{
- struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct mtk_jpeg_dev *jpeg = ctx->jpeg;
int i;
- memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
pix_mp->field = V4L2_FIELD_NONE;
- if (ctx->state != MTK_JPEG_INIT) {
- mtk_jpeg_adjust_fmt_mplane(ctx, f);
- goto end;
- }
-
pix_mp->num_planes = fmt->colplanes;
pix_mp->pixelformat = fmt->fourcc;
- if (q_type == MTK_JPEG_FMT_TYPE_OUTPUT) {
+ if (fmt->fourcc == V4L2_PIX_FMT_JPEG) {
struct v4l2_plane_pix_format *pfmt = &pix_mp->plane_fmt[0];
- mtk_jpeg_bound_align_image(&pix_mp->width, MTK_JPEG_MIN_WIDTH,
- MTK_JPEG_MAX_WIDTH, 0,
- &pix_mp->height, MTK_JPEG_MIN_HEIGHT,
- MTK_JPEG_MAX_HEIGHT, 0);
+ pix_mp->height = clamp(pix_mp->height, MTK_JPEG_MIN_HEIGHT,
+ MTK_JPEG_MAX_HEIGHT);
+ pix_mp->width = clamp(pix_mp->width, MTK_JPEG_MIN_WIDTH,
+ MTK_JPEG_MAX_WIDTH);
- memset(pfmt->reserved, 0, sizeof(pfmt->reserved));
pfmt->bytesperline = 0;
/* Source size must be aligned to 128 */
- pfmt->sizeimage = mtk_jpeg_align(pfmt->sizeimage, 128);
+ pfmt->sizeimage = round_up(pfmt->sizeimage, 128);
if (pfmt->sizeimage == 0)
pfmt->sizeimage = MTK_JPEG_DEFAULT_SIZEIMAGE;
- goto end;
+ return 0;
}
- /* type is MTK_JPEG_FMT_TYPE_CAPTURE */
- mtk_jpeg_bound_align_image(&pix_mp->width, MTK_JPEG_MIN_WIDTH,
- MTK_JPEG_MAX_WIDTH, fmt->h_align,
- &pix_mp->height, MTK_JPEG_MIN_HEIGHT,
- MTK_JPEG_MAX_HEIGHT, fmt->v_align);
+ /* other fourcc */
+ pix_mp->height = clamp(round_up(pix_mp->height, fmt->v_align),
+ MTK_JPEG_MIN_HEIGHT, MTK_JPEG_MAX_HEIGHT);
+ pix_mp->width = clamp(round_up(pix_mp->width, fmt->h_align),
+ MTK_JPEG_MIN_WIDTH, MTK_JPEG_MAX_WIDTH);
for (i = 0; i < fmt->colplanes; i++) {
struct v4l2_plane_pix_format *pfmt = &pix_mp->plane_fmt[i];
u32 stride = pix_mp->width * fmt->h_sample[i] / 4;
u32 h = pix_mp->height * fmt->v_sample[i] / 4;
- memset(pfmt->reserved, 0, sizeof(pfmt->reserved));
pfmt->bytesperline = stride;
pfmt->sizeimage = stride * h;
}
-end:
- v4l2_dbg(2, debug, &jpeg->v4l2_dev, "wxh:%ux%u\n",
- pix_mp->width, pix_mp->height);
- for (i = 0; i < pix_mp->num_planes; i++) {
- v4l2_dbg(2, debug, &jpeg->v4l2_dev,
- "plane[%d] bpl=%u, size=%u\n",
- i,
- pix_mp->plane_fmt[i].bytesperline,
- pix_mp->plane_fmt[i].sizeimage);
- }
return 0;
}
@@ -276,16 +318,15 @@ static int mtk_jpeg_g_fmt_vid_mplane(struct file *file, void *priv,
q_data = mtk_jpeg_get_q_data(ctx, f->type);
- memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
- pix_mp->width = q_data->w;
- pix_mp->height = q_data->h;
+ pix_mp->width = q_data->pix_mp.width;
+ pix_mp->height = q_data->pix_mp.height;
pix_mp->field = V4L2_FIELD_NONE;
pix_mp->pixelformat = q_data->fmt->fourcc;
pix_mp->num_planes = q_data->fmt->colplanes;
- pix_mp->colorspace = ctx->colorspace;
- pix_mp->ycbcr_enc = ctx->ycbcr_enc;
- pix_mp->xfer_func = ctx->xfer_func;
- pix_mp->quantization = ctx->quantization;
+ pix_mp->colorspace = q_data->pix_mp.colorspace;
+ pix_mp->ycbcr_enc = q_data->pix_mp.ycbcr_enc;
+ pix_mp->xfer_func = q_data->pix_mp.xfer_func;
+ pix_mp->quantization = q_data->pix_mp.quantization;
v4l2_dbg(1, debug, &jpeg->v4l2_dev, "(%d) g_fmt:%c%c%c%c wxh:%ux%u\n",
f->type,
@@ -298,9 +339,8 @@ static int mtk_jpeg_g_fmt_vid_mplane(struct file *file, void *priv,
for (i = 0; i < pix_mp->num_planes; i++) {
struct v4l2_plane_pix_format *pfmt = &pix_mp->plane_fmt[i];
- pfmt->bytesperline = q_data->bytesperline[i];
- pfmt->sizeimage = q_data->sizeimage[i];
- memset(pfmt->reserved, 0, sizeof(pfmt->reserved));
+ pfmt->bytesperline = q_data->pix_mp.plane_fmt[i].bytesperline;
+ pfmt->sizeimage = q_data->pix_mp.plane_fmt[i].sizeimage;
v4l2_dbg(1, debug, &jpeg->v4l2_dev,
"plane[%d] bpl=%u, size=%u\n",
@@ -315,10 +355,13 @@ static int mtk_jpeg_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
struct mtk_jpeg_fmt *fmt;
- fmt = mtk_jpeg_find_format(ctx, f->fmt.pix_mp.pixelformat,
- MTK_JPEG_FMT_TYPE_CAPTURE);
+ fmt = mtk_jpeg_find_format(jpeg->variant->formats,
+ jpeg->variant->num_formats,
+ f->fmt.pix_mp.pixelformat,
+ MTK_JPEG_FMT_FLAG_CAPTURE);
if (!fmt)
fmt = ctx->cap_q.fmt;
@@ -329,17 +372,25 @@ static int mtk_jpeg_try_fmt_vid_cap_mplane(struct file *file, void *priv,
(fmt->fourcc >> 16 & 0xff),
(fmt->fourcc >> 24 & 0xff));
- return mtk_jpeg_try_fmt_mplane(f, fmt, ctx, MTK_JPEG_FMT_TYPE_CAPTURE);
+ if (ctx->state != MTK_JPEG_INIT) {
+ mtk_jpeg_g_fmt_vid_mplane(file, priv, f);
+ return 0;
+ }
+
+ return mtk_jpeg_try_fmt_mplane(&f->fmt.pix_mp, fmt);
}
static int mtk_jpeg_try_fmt_vid_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
struct mtk_jpeg_fmt *fmt;
- fmt = mtk_jpeg_find_format(ctx, f->fmt.pix_mp.pixelformat,
- MTK_JPEG_FMT_TYPE_OUTPUT);
+ fmt = mtk_jpeg_find_format(jpeg->variant->formats,
+ jpeg->variant->num_formats,
+ f->fmt.pix_mp.pixelformat,
+ MTK_JPEG_FMT_FLAG_OUTPUT);
if (!fmt)
fmt = ctx->out_q.fmt;
@@ -350,17 +401,21 @@ static int mtk_jpeg_try_fmt_vid_out_mplane(struct file *file, void *priv,
(fmt->fourcc >> 16 & 0xff),
(fmt->fourcc >> 24 & 0xff));
- return mtk_jpeg_try_fmt_mplane(f, fmt, ctx, MTK_JPEG_FMT_TYPE_OUTPUT);
+ if (ctx->state != MTK_JPEG_INIT) {
+ mtk_jpeg_g_fmt_vid_mplane(file, priv, f);
+ return 0;
+ }
+
+ return mtk_jpeg_try_fmt_mplane(&f->fmt.pix_mp, fmt);
}
static int mtk_jpeg_s_fmt_mplane(struct mtk_jpeg_ctx *ctx,
- struct v4l2_format *f)
+ struct v4l2_format *f, unsigned int fmt_type)
{
struct vb2_queue *vq;
struct mtk_jpeg_q_data *q_data = NULL;
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
- unsigned int f_type;
int i;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
@@ -374,16 +429,17 @@ static int mtk_jpeg_s_fmt_mplane(struct mtk_jpeg_ctx *ctx,
return -EBUSY;
}
- f_type = V4L2_TYPE_IS_OUTPUT(f->type) ?
- MTK_JPEG_FMT_TYPE_OUTPUT : MTK_JPEG_FMT_TYPE_CAPTURE;
-
- q_data->fmt = mtk_jpeg_find_format(ctx, pix_mp->pixelformat, f_type);
- q_data->w = pix_mp->width;
- q_data->h = pix_mp->height;
- ctx->colorspace = pix_mp->colorspace;
- ctx->ycbcr_enc = pix_mp->ycbcr_enc;
- ctx->xfer_func = pix_mp->xfer_func;
- ctx->quantization = pix_mp->quantization;
+ q_data->fmt = mtk_jpeg_find_format(jpeg->variant->formats,
+ jpeg->variant->num_formats,
+ pix_mp->pixelformat, fmt_type);
+ q_data->pix_mp.width = pix_mp->width;
+ q_data->pix_mp.height = pix_mp->height;
+ q_data->enc_crop_rect.width = pix_mp->width;
+ q_data->enc_crop_rect.height = pix_mp->height;
+ q_data->pix_mp.colorspace = V4L2_COLORSPACE_SRGB;
+ q_data->pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_601;
+ q_data->pix_mp.xfer_func = V4L2_XFER_FUNC_SRGB;
+ q_data->pix_mp.quantization = V4L2_QUANTIZATION_FULL_RANGE;
v4l2_dbg(1, debug, &jpeg->v4l2_dev, "(%d) s_fmt:%c%c%c%c wxh:%ux%u\n",
f->type,
@@ -391,15 +447,18 @@ static int mtk_jpeg_s_fmt_mplane(struct mtk_jpeg_ctx *ctx,
(q_data->fmt->fourcc >> 8 & 0xff),
(q_data->fmt->fourcc >> 16 & 0xff),
(q_data->fmt->fourcc >> 24 & 0xff),
- q_data->w, q_data->h);
+ q_data->pix_mp.width, q_data->pix_mp.height);
for (i = 0; i < q_data->fmt->colplanes; i++) {
- q_data->bytesperline[i] = pix_mp->plane_fmt[i].bytesperline;
- q_data->sizeimage[i] = pix_mp->plane_fmt[i].sizeimage;
+ q_data->pix_mp.plane_fmt[i].bytesperline =
+ pix_mp->plane_fmt[i].bytesperline;
+ q_data->pix_mp.plane_fmt[i].sizeimage =
+ pix_mp->plane_fmt[i].sizeimage;
v4l2_dbg(1, debug, &jpeg->v4l2_dev,
"plane[%d] bpl=%u, size=%u\n",
- i, q_data->bytesperline[i], q_data->sizeimage[i]);
+ i, q_data->pix_mp.plane_fmt[i].bytesperline,
+ q_data->pix_mp.plane_fmt[i].sizeimage);
}
return 0;
@@ -414,7 +473,8 @@ static int mtk_jpeg_s_fmt_vid_out_mplane(struct file *file, void *priv,
if (ret)
return ret;
- return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f);
+ return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f,
+ MTK_JPEG_FMT_FLAG_OUTPUT);
}
static int mtk_jpeg_s_fmt_vid_cap_mplane(struct file *file, void *priv,
@@ -426,7 +486,8 @@ static int mtk_jpeg_s_fmt_vid_cap_mplane(struct file *file, void *priv,
if (ret)
return ret;
- return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f);
+ return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f,
+ MTK_JPEG_FMT_FLAG_CAPTURE);
}
static void mtk_jpeg_queue_src_chg_event(struct mtk_jpeg_ctx *ctx)
@@ -446,13 +507,38 @@ static int mtk_jpeg_subscribe_event(struct v4l2_fh *fh,
switch (sub->type) {
case V4L2_EVENT_SOURCE_CHANGE:
return v4l2_src_change_event_subscribe(fh, sub);
+ }
+
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+
+static int mtk_jpeg_enc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ s->r = ctx->out_q.enc_crop_rect;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.width = ctx->out_q.pix_mp.width;
+ s->r.height = ctx->out_q.pix_mp.height;
+ s->r.left = 0;
+ s->r.top = 0;
+ break;
default:
return -EINVAL;
}
+ return 0;
}
-static int mtk_jpeg_g_selection(struct file *file, void *priv,
- struct v4l2_selection *s)
+static int mtk_jpeg_dec_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
{
struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
@@ -462,15 +548,15 @@ static int mtk_jpeg_g_selection(struct file *file, void *priv,
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- s->r.width = ctx->out_q.w;
- s->r.height = ctx->out_q.h;
+ s->r.width = ctx->out_q.pix_mp.width;
+ s->r.height = ctx->out_q.pix_mp.height;
s->r.left = 0;
s->r.top = 0;
break;
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
case V4L2_SEL_TGT_COMPOSE_PADDED:
- s->r.width = ctx->cap_q.w;
- s->r.height = ctx->cap_q.h;
+ s->r.width = ctx->cap_q.pix_mp.width;
+ s->r.height = ctx->cap_q.pix_mp.height;
s->r.left = 0;
s->r.top = 0;
break;
@@ -480,53 +566,57 @@ static int mtk_jpeg_g_selection(struct file *file, void *priv,
return 0;
}
-static int mtk_jpeg_s_selection(struct file *file, void *priv,
- struct v4l2_selection *s)
+static int mtk_jpeg_enc_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
{
struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
switch (s->target) {
- case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
s->r.left = 0;
s->r.top = 0;
- s->r.width = ctx->out_q.w;
- s->r.height = ctx->out_q.h;
+ s->r.width = min(s->r.width, ctx->out_q.pix_mp.width);
+ s->r.height = min(s->r.height, ctx->out_q.pix_mp.height);
+ ctx->out_q.enc_crop_rect = s->r;
break;
default:
return -EINVAL;
}
+
return 0;
}
-static int mtk_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct v4l2_fh *fh = file->private_data;
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
- struct vb2_queue *vq;
- struct vb2_buffer *vb;
- struct mtk_jpeg_src_buf *jpeg_src_buf;
-
- if (buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- goto end;
+static const struct v4l2_ioctl_ops mtk_jpeg_enc_ioctl_ops = {
+ .vidioc_querycap = mtk_jpeg_querycap,
+ .vidioc_enum_fmt_vid_cap = mtk_jpeg_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = mtk_jpeg_enum_fmt_vid_out,
+ .vidioc_try_fmt_vid_cap_mplane = mtk_jpeg_try_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = mtk_jpeg_try_fmt_vid_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = mtk_jpeg_g_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_out_mplane = mtk_jpeg_g_fmt_vid_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = mtk_jpeg_s_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_out_mplane = mtk_jpeg_s_fmt_vid_out_mplane,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_subscribe_event = mtk_jpeg_subscribe_event,
+ .vidioc_g_selection = mtk_jpeg_enc_g_selection,
+ .vidioc_s_selection = mtk_jpeg_enc_s_selection,
- vq = v4l2_m2m_get_vq(fh->m2m_ctx, buf->type);
- if (buf->index >= vq->num_buffers) {
- dev_err(ctx->jpeg->dev, "buffer index out of range\n");
- return -EINVAL;
- }
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
- vb = vb2_get_buffer(vq, buf->index);
- jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
- jpeg_src_buf->flags = (buf->m.planes[0].bytesused == 0) ?
- MTK_JPEG_BUF_FLAGS_LAST_FRAME : MTK_JPEG_BUF_FLAGS_INIT;
-end:
- return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
-}
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
-static const struct v4l2_ioctl_ops mtk_jpeg_ioctl_ops = {
+static const struct v4l2_ioctl_ops mtk_jpeg_dec_ioctl_ops = {
.vidioc_querycap = mtk_jpeg_querycap,
.vidioc_enum_fmt_vid_cap = mtk_jpeg_enum_fmt_vid_cap,
.vidioc_enum_fmt_vid_out = mtk_jpeg_enum_fmt_vid_out,
@@ -536,10 +626,9 @@ static const struct v4l2_ioctl_ops mtk_jpeg_ioctl_ops = {
.vidioc_g_fmt_vid_out_mplane = mtk_jpeg_g_fmt_vid_mplane,
.vidioc_s_fmt_vid_cap_mplane = mtk_jpeg_s_fmt_vid_cap_mplane,
.vidioc_s_fmt_vid_out_mplane = mtk_jpeg_s_fmt_vid_out_mplane,
- .vidioc_qbuf = mtk_jpeg_qbuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_subscribe_event = mtk_jpeg_subscribe_event,
- .vidioc_g_selection = mtk_jpeg_g_selection,
- .vidioc_s_selection = mtk_jpeg_s_selection,
+ .vidioc_g_selection = mtk_jpeg_dec_g_selection,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
@@ -571,9 +660,16 @@ static int mtk_jpeg_queue_setup(struct vb2_queue *q,
if (!q_data)
return -EINVAL;
+ if (*num_planes) {
+ for (i = 0; i < *num_planes; i++)
+ if (sizes[i] < q_data->pix_mp.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ return 0;
+ }
+
*num_planes = q_data->fmt->colplanes;
for (i = 0; i < q_data->fmt->colplanes; i++) {
- sizes[i] = q_data->sizeimage[i];
+ sizes[i] = q_data->pix_mp.plane_fmt[i].sizeimage;
v4l2_dbg(1, debug, &jpeg->v4l2_dev, "sizeimage[%d]=%u\n",
i, sizes[i]);
}
@@ -585,14 +681,22 @@ static int mtk_jpeg_buf_prepare(struct vb2_buffer *vb)
{
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct mtk_jpeg_q_data *q_data = NULL;
+ struct v4l2_plane_pix_format plane_fmt = {};
int i;
q_data = mtk_jpeg_get_q_data(ctx, vb->vb2_queue->type);
if (!q_data)
return -EINVAL;
- for (i = 0; i < q_data->fmt->colplanes; i++)
- vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+ for (i = 0; i < q_data->fmt->colplanes; i++) {
+ plane_fmt = q_data->pix_mp.plane_fmt[i];
+ if (ctx->enable_exif &&
+ q_data->fmt->fourcc == V4L2_PIX_FMT_JPEG)
+ vb2_set_plane_payload(vb, i, plane_fmt.sizeimage +
+ MTK_JPEG_MAX_EXIF_SIZE);
+ else
+ vb2_set_plane_payload(vb, i, plane_fmt.sizeimage);
+ }
return 0;
}
@@ -604,14 +708,17 @@ static bool mtk_jpeg_check_resolution_change(struct mtk_jpeg_ctx *ctx,
struct mtk_jpeg_q_data *q_data;
q_data = &ctx->out_q;
- if (q_data->w != param->pic_w || q_data->h != param->pic_h) {
+ if (q_data->pix_mp.width != param->pic_w ||
+ q_data->pix_mp.height != param->pic_h) {
v4l2_dbg(1, debug, &jpeg->v4l2_dev, "Picture size change\n");
return true;
}
q_data = &ctx->cap_q;
- if (q_data->fmt != mtk_jpeg_find_format(ctx, param->dst_fourcc,
- MTK_JPEG_FMT_TYPE_CAPTURE)) {
+ if (q_data->fmt !=
+ mtk_jpeg_find_format(jpeg->variant->formats,
+ jpeg->variant->num_formats, param->dst_fourcc,
+ MTK_JPEG_FMT_FLAG_CAPTURE)) {
v4l2_dbg(1, debug, &jpeg->v4l2_dev, "format change\n");
return true;
}
@@ -626,19 +733,20 @@ static void mtk_jpeg_set_queue_data(struct mtk_jpeg_ctx *ctx,
int i;
q_data = &ctx->out_q;
- q_data->w = param->pic_w;
- q_data->h = param->pic_h;
+ q_data->pix_mp.width = param->pic_w;
+ q_data->pix_mp.height = param->pic_h;
q_data = &ctx->cap_q;
- q_data->w = param->dec_w;
- q_data->h = param->dec_h;
- q_data->fmt = mtk_jpeg_find_format(ctx,
+ q_data->pix_mp.width = param->dec_w;
+ q_data->pix_mp.height = param->dec_h;
+ q_data->fmt = mtk_jpeg_find_format(jpeg->variant->formats,
+ jpeg->variant->num_formats,
param->dst_fourcc,
- MTK_JPEG_FMT_TYPE_CAPTURE);
+ MTK_JPEG_FMT_FLAG_CAPTURE);
for (i = 0; i < q_data->fmt->colplanes; i++) {
- q_data->bytesperline[i] = param->mem_stride[i];
- q_data->sizeimage[i] = param->comp_size[i];
+ q_data->pix_mp.plane_fmt[i].bytesperline = param->mem_stride[i];
+ q_data->pix_mp.plane_fmt[i].sizeimage = param->comp_size[i];
}
v4l2_dbg(1, debug, &jpeg->v4l2_dev,
@@ -651,7 +759,18 @@ static void mtk_jpeg_set_queue_data(struct mtk_jpeg_ctx *ctx,
param->dec_w, param->dec_h);
}
-static void mtk_jpeg_buf_queue(struct vb2_buffer *vb)
+static void mtk_jpeg_enc_buf_queue(struct vb2_buffer *vb)
+{
+ struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+
+ v4l2_dbg(2, debug, &jpeg->v4l2_dev, "(%d) buf_q id=%d, vb=%p\n",
+ vb->vb2_queue->type, vb->index, vb);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static void mtk_jpeg_dec_buf_queue(struct vb2_buffer *vb)
{
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct mtk_jpeg_dec_param *param;
@@ -669,10 +788,6 @@ static void mtk_jpeg_buf_queue(struct vb2_buffer *vb)
param = &jpeg_src_buf->dec_param;
memset(param, 0, sizeof(*param));
- if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) {
- v4l2_dbg(1, debug, &jpeg->v4l2_dev, "Got eos\n");
- goto end;
- }
header_valid = mtk_jpeg_parse(param, (u8 *)vb2_plane_vaddr(vb, 0),
vb2_get_plane_payload(vb, 0));
if (!header_valid) {
@@ -703,24 +818,16 @@ static struct vb2_v4l2_buffer *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
return v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
}
-static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
+static void mtk_jpeg_enc_stop_streaming(struct vb2_queue *q)
{
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
struct vb2_v4l2_buffer *vb;
- int ret = 0;
- ret = pm_runtime_get_sync(ctx->jpeg->dev);
- if (ret < 0)
- goto err;
-
- return 0;
-err:
while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
- v4l2_m2m_buf_done(vb, VB2_BUF_STATE_QUEUED);
- return ret;
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
}
-static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
+static void mtk_jpeg_dec_stop_streaming(struct vb2_queue *q)
{
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
struct vb2_v4l2_buffer *vb;
@@ -744,18 +851,24 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
-
- pm_runtime_put_sync(ctx->jpeg->dev);
}
-static const struct vb2_ops mtk_jpeg_qops = {
+static const struct vb2_ops mtk_jpeg_dec_qops = {
+ .queue_setup = mtk_jpeg_queue_setup,
+ .buf_prepare = mtk_jpeg_buf_prepare,
+ .buf_queue = mtk_jpeg_dec_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = mtk_jpeg_dec_stop_streaming,
+};
+
+static const struct vb2_ops mtk_jpeg_enc_qops = {
.queue_setup = mtk_jpeg_queue_setup,
.buf_prepare = mtk_jpeg_buf_prepare,
- .buf_queue = mtk_jpeg_buf_queue,
+ .buf_queue = mtk_jpeg_enc_buf_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
- .start_streaming = mtk_jpeg_start_streaming,
- .stop_streaming = mtk_jpeg_stop_streaming,
+ .stop_streaming = mtk_jpeg_enc_stop_streaming,
};
static void mtk_jpeg_set_dec_src(struct mtk_jpeg_ctx *ctx,
@@ -764,8 +877,8 @@ static void mtk_jpeg_set_dec_src(struct mtk_jpeg_ctx *ctx,
{
bs->str_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
bs->end_addr = bs->str_addr +
- mtk_jpeg_align(vb2_get_plane_payload(src_buf, 0), 16);
- bs->size = mtk_jpeg_align(vb2_plane_size(src_buf, 0), 128);
+ round_up(vb2_get_plane_payload(src_buf, 0), 16);
+ bs->size = round_up(vb2_plane_size(src_buf, 0), 128);
}
static int mtk_jpeg_set_dec_dst(struct mtk_jpeg_ctx *ctx,
@@ -795,7 +908,49 @@ static int mtk_jpeg_set_dec_dst(struct mtk_jpeg_ctx *ctx,
return 0;
}
-static void mtk_jpeg_device_run(void *priv)
+static void mtk_jpeg_enc_device_run(void *priv)
+{
+ struct mtk_jpeg_ctx *ctx = priv;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ unsigned long flags;
+ int ret;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ ret = pm_runtime_get_sync(jpeg->dev);
+ if (ret < 0)
+ goto enc_end;
+
+ schedule_delayed_work(&jpeg->job_timeout_work,
+ msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
+
+ spin_lock_irqsave(&jpeg->hw_lock, flags);
+
+ /*
+ * Resetting the hardware every frame is to ensure that all the
+ * registers are cleared. This is a hardware requirement.
+ */
+ mtk_jpeg_enc_reset(jpeg->reg_base);
+
+ mtk_jpeg_set_enc_src(ctx, jpeg->reg_base, &src_buf->vb2_buf);
+ mtk_jpeg_set_enc_dst(ctx, jpeg->reg_base, &dst_buf->vb2_buf);
+ mtk_jpeg_set_enc_params(ctx, jpeg->reg_base);
+ mtk_jpeg_enc_start(jpeg->reg_base);
+ spin_unlock_irqrestore(&jpeg->hw_lock, flags);
+ return;
+
+enc_end:
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, buf_state);
+ v4l2_m2m_buf_done(dst_buf, buf_state);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void mtk_jpeg_dec_device_run(void *priv)
{
struct mtk_jpeg_ctx *ctx = priv;
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
@@ -805,19 +960,12 @@ static void mtk_jpeg_device_run(void *priv)
struct mtk_jpeg_src_buf *jpeg_src_buf;
struct mtk_jpeg_bs bs;
struct mtk_jpeg_fb fb;
- int i;
+ int ret;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
- if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) {
- for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
- vb2_set_plane_payload(&dst_buf->vb2_buf, i, 0);
- buf_state = VB2_BUF_STATE_DONE;
- goto dec_end;
- }
-
if (mtk_jpeg_check_resolution_change(ctx, &jpeg_src_buf->dec_param)) {
mtk_jpeg_queue_src_chg_event(ctx);
ctx->state = MTK_JPEG_SOURCE_CHANGE;
@@ -825,16 +973,23 @@ static void mtk_jpeg_device_run(void *priv)
return;
}
+ ret = pm_runtime_get_sync(jpeg->dev);
+ if (ret < 0)
+ goto dec_end;
+
+ schedule_delayed_work(&jpeg->job_timeout_work,
+ msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
+
mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb))
goto dec_end;
spin_lock_irqsave(&jpeg->hw_lock, flags);
- mtk_jpeg_dec_reset(jpeg->dec_reg_base);
- mtk_jpeg_dec_set_config(jpeg->dec_reg_base,
+ mtk_jpeg_dec_reset(jpeg->reg_base);
+ mtk_jpeg_dec_set_config(jpeg->reg_base,
&jpeg_src_buf->dec_param, &bs, &fb);
- mtk_jpeg_dec_start(jpeg->dec_reg_base);
+ mtk_jpeg_dec_start(jpeg->reg_base);
spin_unlock_irqrestore(&jpeg->hw_lock, flags);
return;
@@ -846,29 +1001,34 @@ dec_end:
v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
}
-static int mtk_jpeg_job_ready(void *priv)
+static int mtk_jpeg_dec_job_ready(void *priv)
{
struct mtk_jpeg_ctx *ctx = priv;
return (ctx->state == MTK_JPEG_RUNNING) ? 1 : 0;
}
-static const struct v4l2_m2m_ops mtk_jpeg_m2m_ops = {
- .device_run = mtk_jpeg_device_run,
- .job_ready = mtk_jpeg_job_ready,
+static const struct v4l2_m2m_ops mtk_jpeg_enc_m2m_ops = {
+ .device_run = mtk_jpeg_enc_device_run,
+};
+
+static const struct v4l2_m2m_ops mtk_jpeg_dec_m2m_ops = {
+ .device_run = mtk_jpeg_dec_device_run,
+ .job_ready = mtk_jpeg_dec_job_ready,
};
static int mtk_jpeg_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct mtk_jpeg_ctx *ctx = priv;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct mtk_jpeg_src_buf);
- src_vq->ops = &mtk_jpeg_qops;
+ src_vq->ops = jpeg->variant->qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->jpeg->lock;
@@ -881,7 +1041,7 @@ static int mtk_jpeg_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
- dst_vq->ops = &mtk_jpeg_qops;
+ dst_vq->ops = jpeg->variant->qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->jpeg->lock;
@@ -898,17 +1058,68 @@ static void mtk_jpeg_clk_on(struct mtk_jpeg_dev *jpeg)
ret = mtk_smi_larb_get(jpeg->larb);
if (ret)
dev_err(jpeg->dev, "mtk_smi_larb_get larbvdec fail %d\n", ret);
- clk_prepare_enable(jpeg->clk_jdec_smi);
- clk_prepare_enable(jpeg->clk_jdec);
+
+ ret = clk_bulk_prepare_enable(jpeg->variant->num_clks,
+ jpeg->variant->clks);
+ if (ret)
+ dev_err(jpeg->dev, "Failed to open jpeg clk: %d\n", ret);
}
static void mtk_jpeg_clk_off(struct mtk_jpeg_dev *jpeg)
{
- clk_disable_unprepare(jpeg->clk_jdec);
- clk_disable_unprepare(jpeg->clk_jdec_smi);
+ clk_bulk_disable_unprepare(jpeg->variant->num_clks,
+ jpeg->variant->clks);
mtk_smi_larb_put(jpeg->larb);
}
+static irqreturn_t mtk_jpeg_enc_done(struct mtk_jpeg_dev *jpeg)
+{
+ struct mtk_jpeg_ctx *ctx;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ u32 result_size;
+
+ ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&jpeg->v4l2_dev, "Context is NULL\n");
+ return IRQ_HANDLED;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ result_size = mtk_jpeg_enc_get_file_size(jpeg->reg_base);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, result_size);
+
+ buf_state = VB2_BUF_STATE_DONE;
+
+ v4l2_m2m_buf_done(src_buf, buf_state);
+ v4l2_m2m_buf_done(dst_buf, buf_state);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ pm_runtime_put(ctx->jpeg->dev);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_jpeg_enc_irq(int irq, void *priv)
+{
+ struct mtk_jpeg_dev *jpeg = priv;
+ u32 irq_status;
+ irqreturn_t ret = IRQ_NONE;
+
+ cancel_delayed_work(&jpeg->job_timeout_work);
+
+ irq_status = readl(jpeg->reg_base + JPEG_ENC_INT_STS) &
+ JPEG_ENC_INT_STATUS_MASK_ALLIRQ;
+ if (irq_status)
+ writel(0, jpeg->reg_base + JPEG_ENC_INT_STS);
+
+ if (!(irq_status & JPEG_ENC_INT_STATUS_DONE))
+ return ret;
+
+ ret = mtk_jpeg_enc_done(jpeg);
+ return ret;
+}
+
static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
{
struct mtk_jpeg_dev *jpeg = priv;
@@ -920,7 +1131,9 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
u32 dec_ret;
int i;
- dec_ret = mtk_jpeg_dec_get_int_status(jpeg->dec_reg_base);
+ cancel_delayed_work(&jpeg->job_timeout_work);
+
+ dec_ret = mtk_jpeg_dec_get_int_status(jpeg->reg_base);
dec_irq_ret = mtk_jpeg_dec_enum_result(dec_ret);
ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
if (!ctx) {
@@ -933,7 +1146,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW)
- mtk_jpeg_dec_reset(jpeg->dec_reg_base);
+ mtk_jpeg_dec_reset(jpeg->reg_base);
if (dec_irq_ret != MTK_JPEG_DEC_RESULT_EOF_DONE) {
dev_err(jpeg->dev, "decode failed\n");
@@ -950,39 +1163,42 @@ dec_end:
v4l2_m2m_buf_done(src_buf, buf_state);
v4l2_m2m_buf_done(dst_buf, buf_state);
v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ pm_runtime_put(ctx->jpeg->dev);
return IRQ_HANDLED;
}
static void mtk_jpeg_set_default_params(struct mtk_jpeg_ctx *ctx)
{
struct mtk_jpeg_q_data *q = &ctx->out_q;
- int i;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
- ctx->colorspace = V4L2_COLORSPACE_JPEG,
- ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
- ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
- ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
+ q->pix_mp.colorspace = V4L2_COLORSPACE_SRGB;
+ q->pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_601;
+ q->pix_mp.quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ q->pix_mp.xfer_func = V4L2_XFER_FUNC_SRGB;
- q->fmt = mtk_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
- MTK_JPEG_FMT_TYPE_OUTPUT);
- q->w = MTK_JPEG_MIN_WIDTH;
- q->h = MTK_JPEG_MIN_HEIGHT;
- q->bytesperline[0] = 0;
- q->sizeimage[0] = MTK_JPEG_DEFAULT_SIZEIMAGE;
+ q->fmt = mtk_jpeg_find_format(jpeg->variant->formats,
+ jpeg->variant->num_formats,
+ jpeg->variant->out_q_default_fourcc,
+ MTK_JPEG_FMT_FLAG_OUTPUT);
+ q->pix_mp.width = MTK_JPEG_MIN_WIDTH;
+ q->pix_mp.height = MTK_JPEG_MIN_HEIGHT;
+ mtk_jpeg_try_fmt_mplane(&q->pix_mp, q->fmt);
q = &ctx->cap_q;
- q->fmt = mtk_jpeg_find_format(ctx, V4L2_PIX_FMT_YUV420M,
- MTK_JPEG_FMT_TYPE_CAPTURE);
- q->w = MTK_JPEG_MIN_WIDTH;
- q->h = MTK_JPEG_MIN_HEIGHT;
-
- for (i = 0; i < q->fmt->colplanes; i++) {
- u32 stride = q->w * q->fmt->h_sample[i] / 4;
- u32 h = q->h * q->fmt->v_sample[i] / 4;
-
- q->bytesperline[i] = stride;
- q->sizeimage[i] = stride * h;
- }
+ q->fmt = mtk_jpeg_find_format(jpeg->variant->formats,
+ jpeg->variant->num_formats,
+ jpeg->variant->cap_q_default_fourcc,
+ MTK_JPEG_FMT_FLAG_CAPTURE);
+ q->pix_mp.colorspace = V4L2_COLORSPACE_SRGB;
+ q->pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_601;
+ q->pix_mp.quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ q->pix_mp.xfer_func = V4L2_XFER_FUNC_SRGB;
+ q->pix_mp.width = MTK_JPEG_MIN_WIDTH;
+ q->pix_mp.height = MTK_JPEG_MIN_HEIGHT;
+
+ mtk_jpeg_try_fmt_mplane(&q->pix_mp, q->fmt);
}
static int mtk_jpeg_open(struct file *file)
@@ -1013,6 +1229,15 @@ static int mtk_jpeg_open(struct file *file)
goto error;
}
+ if (jpeg->variant->cap_q_default_fourcc == V4L2_PIX_FMT_JPEG) {
+ ret = mtk_jpeg_enc_ctrls_setup(ctx);
+ if (ret) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to setup jpeg enc controls\n");
+ goto error;
+ }
+ } else {
+ v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 0);
+ }
mtk_jpeg_set_default_params(ctx);
mutex_unlock(&jpeg->lock);
return 0;
@@ -1033,6 +1258,7 @@ static int mtk_jpeg_release(struct file *file)
mutex_lock(&jpeg->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
@@ -1049,10 +1275,20 @@ static const struct v4l2_file_operations mtk_jpeg_fops = {
.mmap = v4l2_m2m_fop_mmap,
};
+static struct clk_bulk_data mt8173_jpeg_dec_clocks[] = {
+ { .id = "jpgdec-smi" },
+ { .id = "jpgdec" },
+};
+
+static struct clk_bulk_data mtk_jpeg_clocks[] = {
+ { .id = "jpgenc" },
+};
+
static int mtk_jpeg_clk_init(struct mtk_jpeg_dev *jpeg)
{
struct device_node *node;
struct platform_device *pdev;
+ int ret;
node = of_parse_phandle(jpeg->dev->of_node, "mediatek,larb", 0);
if (!node)
@@ -1066,19 +1302,40 @@ static int mtk_jpeg_clk_init(struct mtk_jpeg_dev *jpeg)
jpeg->larb = &pdev->dev;
- jpeg->clk_jdec = devm_clk_get(jpeg->dev, "jpgdec");
- if (IS_ERR(jpeg->clk_jdec))
- return PTR_ERR(jpeg->clk_jdec);
+ ret = devm_clk_bulk_get(jpeg->dev, jpeg->variant->num_clks,
+ jpeg->variant->clks);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get jpeg clock:%d\n", ret);
+ return ret;
+ }
- jpeg->clk_jdec_smi = devm_clk_get(jpeg->dev, "jpgdec-smi");
- return PTR_ERR_OR_ZERO(jpeg->clk_jdec_smi);
+ return 0;
}
+static void mtk_jpeg_job_timeout_work(struct work_struct *work)
+{
+ struct mtk_jpeg_dev *jpeg = container_of(work, struct mtk_jpeg_dev,
+ job_timeout_work.work);
+ struct mtk_jpeg_ctx *ctx;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ jpeg->variant->hw_reset(jpeg->reg_base);
+
+ pm_runtime_put(jpeg->dev);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+}
static int mtk_jpeg_probe(struct platform_device *pdev)
{
struct mtk_jpeg_dev *jpeg;
struct resource *res;
- int dec_irq;
+ int jpeg_irq;
int ret;
jpeg = devm_kzalloc(&pdev->dev, sizeof(*jpeg), GFP_KERNEL);
@@ -1088,28 +1345,27 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
mutex_init(&jpeg->lock);
spin_lock_init(&jpeg->hw_lock);
jpeg->dev = &pdev->dev;
+ jpeg->variant = of_device_get_match_data(jpeg->dev);
+ INIT_DELAYED_WORK(&jpeg->job_timeout_work, mtk_jpeg_job_timeout_work);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jpeg->dec_reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(jpeg->dec_reg_base)) {
- ret = PTR_ERR(jpeg->dec_reg_base);
+ jpeg->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(jpeg->reg_base)) {
+ ret = PTR_ERR(jpeg->reg_base);
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- dec_irq = platform_get_irq(pdev, 0);
- if (!res || dec_irq < 0) {
- dev_err(&pdev->dev, "Failed to get dec_irq %d.\n", dec_irq);
- ret = -EINVAL;
- return ret;
+ jpeg_irq = platform_get_irq(pdev, 0);
+ if (jpeg_irq < 0) {
+ dev_err(&pdev->dev, "Failed to get jpeg_irq %d.\n", jpeg_irq);
+ return jpeg_irq;
}
- ret = devm_request_irq(&pdev->dev, dec_irq, mtk_jpeg_dec_irq, 0,
- pdev->name, jpeg);
+ ret = devm_request_irq(&pdev->dev, jpeg_irq,
+ jpeg->variant->irq_handler, 0, pdev->name, jpeg);
if (ret) {
- dev_err(&pdev->dev, "Failed to request dec_irq %d (%d)\n",
- dec_irq, ret);
- ret = -EINVAL;
+ dev_err(&pdev->dev, "Failed to request jpeg_irq %d (%d)\n",
+ jpeg_irq, ret);
goto err_req_irq;
}
@@ -1126,40 +1382,42 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
goto err_dev_register;
}
- jpeg->m2m_dev = v4l2_m2m_init(&mtk_jpeg_m2m_ops);
+ jpeg->m2m_dev = v4l2_m2m_init(jpeg->variant->m2m_ops);
+
if (IS_ERR(jpeg->m2m_dev)) {
v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(jpeg->m2m_dev);
goto err_m2m_init;
}
- jpeg->dec_vdev = video_device_alloc();
- if (!jpeg->dec_vdev) {
+ jpeg->vdev = video_device_alloc();
+ if (!jpeg->vdev) {
ret = -ENOMEM;
- goto err_dec_vdev_alloc;
+ goto err_vfd_jpeg_alloc;
}
- snprintf(jpeg->dec_vdev->name, sizeof(jpeg->dec_vdev->name),
- "%s-dec", MTK_JPEG_NAME);
- jpeg->dec_vdev->fops = &mtk_jpeg_fops;
- jpeg->dec_vdev->ioctl_ops = &mtk_jpeg_ioctl_ops;
- jpeg->dec_vdev->minor = -1;
- jpeg->dec_vdev->release = video_device_release;
- jpeg->dec_vdev->lock = &jpeg->lock;
- jpeg->dec_vdev->v4l2_dev = &jpeg->v4l2_dev;
- jpeg->dec_vdev->vfl_dir = VFL_DIR_M2M;
- jpeg->dec_vdev->device_caps = V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_M2M_MPLANE;
-
- ret = video_register_device(jpeg->dec_vdev, VFL_TYPE_VIDEO, 3);
+ snprintf(jpeg->vdev->name, sizeof(jpeg->vdev->name),
+ "%s", jpeg->variant->dev_name);
+ jpeg->vdev->fops = &mtk_jpeg_fops;
+ jpeg->vdev->ioctl_ops = jpeg->variant->ioctl_ops;
+ jpeg->vdev->minor = -1;
+ jpeg->vdev->release = video_device_release;
+ jpeg->vdev->lock = &jpeg->lock;
+ jpeg->vdev->v4l2_dev = &jpeg->v4l2_dev;
+ jpeg->vdev->vfl_dir = VFL_DIR_M2M;
+ jpeg->vdev->device_caps = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+
+ ret = video_register_device(jpeg->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
- goto err_dec_vdev_register;
+ goto err_vfd_jpeg_register;
}
- video_set_drvdata(jpeg->dec_vdev, jpeg);
+ video_set_drvdata(jpeg->vdev, jpeg);
v4l2_info(&jpeg->v4l2_dev,
- "decoder device registered as /dev/video%d (%d,%d)\n",
- jpeg->dec_vdev->num, VIDEO_MAJOR, jpeg->dec_vdev->minor);
+ "%s device registered as /dev/video%d (%d,%d)\n",
+ jpeg->variant->dev_name, jpeg->vdev->num,
+ VIDEO_MAJOR, jpeg->vdev->minor);
platform_set_drvdata(pdev, jpeg);
@@ -1167,10 +1425,10 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
return 0;
-err_dec_vdev_register:
- video_device_release(jpeg->dec_vdev);
+err_vfd_jpeg_register:
+ video_device_release(jpeg->vdev);
-err_dec_vdev_alloc:
+err_vfd_jpeg_alloc:
v4l2_m2m_release(jpeg->m2m_dev);
err_m2m_init:
@@ -1190,8 +1448,8 @@ static int mtk_jpeg_remove(struct platform_device *pdev)
struct mtk_jpeg_dev *jpeg = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
- video_unregister_device(jpeg->dec_vdev);
- video_device_release(jpeg->dec_vdev);
+ video_unregister_device(jpeg->vdev);
+ video_device_release(jpeg->vdev);
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
@@ -1202,7 +1460,6 @@ static __maybe_unused int mtk_jpeg_pm_suspend(struct device *dev)
{
struct mtk_jpeg_dev *jpeg = dev_get_drvdata(dev);
- mtk_jpeg_dec_reset(jpeg->dec_reg_base);
mtk_jpeg_clk_off(jpeg);
return 0;
@@ -1213,31 +1470,28 @@ static __maybe_unused int mtk_jpeg_pm_resume(struct device *dev)
struct mtk_jpeg_dev *jpeg = dev_get_drvdata(dev);
mtk_jpeg_clk_on(jpeg);
- mtk_jpeg_dec_reset(jpeg->dec_reg_base);
return 0;
}
static __maybe_unused int mtk_jpeg_suspend(struct device *dev)
{
- int ret;
-
- if (pm_runtime_suspended(dev))
- return 0;
+ struct mtk_jpeg_dev *jpeg = dev_get_drvdata(dev);
- ret = mtk_jpeg_pm_suspend(dev);
- return ret;
+ v4l2_m2m_suspend(jpeg->m2m_dev);
+ return pm_runtime_force_suspend(dev);
}
static __maybe_unused int mtk_jpeg_resume(struct device *dev)
{
+ struct mtk_jpeg_dev *jpeg = dev_get_drvdata(dev);
int ret;
- if (pm_runtime_suspended(dev))
- return 0;
-
- ret = mtk_jpeg_pm_resume(dev);
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0)
+ return ret;
+ v4l2_m2m_resume(jpeg->m2m_dev);
return ret;
}
@@ -1246,14 +1500,48 @@ static const struct dev_pm_ops mtk_jpeg_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_jpeg_pm_suspend, mtk_jpeg_pm_resume, NULL)
};
+static const struct mtk_jpeg_variant mt8173_jpeg_drvdata = {
+ .clks = mt8173_jpeg_dec_clocks,
+ .num_clks = ARRAY_SIZE(mt8173_jpeg_dec_clocks),
+ .formats = mtk_jpeg_dec_formats,
+ .num_formats = MTK_JPEG_DEC_NUM_FORMATS,
+ .qops = &mtk_jpeg_dec_qops,
+ .irq_handler = mtk_jpeg_dec_irq,
+ .hw_reset = mtk_jpeg_dec_reset,
+ .m2m_ops = &mtk_jpeg_dec_m2m_ops,
+ .dev_name = "mtk-jpeg-dec",
+ .ioctl_ops = &mtk_jpeg_dec_ioctl_ops,
+ .out_q_default_fourcc = V4L2_PIX_FMT_JPEG,
+ .cap_q_default_fourcc = V4L2_PIX_FMT_YUV420M,
+};
+
+static const struct mtk_jpeg_variant mtk_jpeg_drvdata = {
+ .clks = mtk_jpeg_clocks,
+ .num_clks = ARRAY_SIZE(mtk_jpeg_clocks),
+ .formats = mtk_jpeg_enc_formats,
+ .num_formats = MTK_JPEG_ENC_NUM_FORMATS,
+ .qops = &mtk_jpeg_enc_qops,
+ .irq_handler = mtk_jpeg_enc_irq,
+ .hw_reset = mtk_jpeg_enc_reset,
+ .m2m_ops = &mtk_jpeg_enc_m2m_ops,
+ .dev_name = "mtk-jpeg-enc",
+ .ioctl_ops = &mtk_jpeg_enc_ioctl_ops,
+ .out_q_default_fourcc = V4L2_PIX_FMT_YUYV,
+ .cap_q_default_fourcc = V4L2_PIX_FMT_JPEG,
+};
+
static const struct of_device_id mtk_jpeg_match[] = {
{
.compatible = "mediatek,mt8173-jpgdec",
- .data = NULL,
+ .data = &mt8173_jpeg_drvdata,
},
{
.compatible = "mediatek,mt2701-jpgdec",
- .data = NULL,
+ .data = &mt8173_jpeg_drvdata,
+ },
+ {
+ .compatible = "mediatek,mtk-jpgenc",
+ .data = &mtk_jpeg_drvdata,
},
{},
};
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h
index 999bd1427809..68e634f02e00 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h
@@ -3,6 +3,7 @@
* Copyright (c) 2016 MediaTek Inc.
* Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
* Rick Chang <rick.chang@mediatek.com>
+ * Xia Jiang <xia.jiang@mediatek.com>
*/
#ifndef _MTK_JPEG_CORE_H
@@ -15,19 +16,28 @@
#define MTK_JPEG_NAME "mtk-jpeg"
-#define MTK_JPEG_FMT_FLAG_DEC_OUTPUT BIT(0)
-#define MTK_JPEG_FMT_FLAG_DEC_CAPTURE BIT(1)
+#define MTK_JPEG_COMP_MAX 3
-#define MTK_JPEG_FMT_TYPE_OUTPUT 1
-#define MTK_JPEG_FMT_TYPE_CAPTURE 2
+#define MTK_JPEG_FMT_FLAG_OUTPUT BIT(0)
+#define MTK_JPEG_FMT_FLAG_CAPTURE BIT(1)
-#define MTK_JPEG_MIN_WIDTH 32
-#define MTK_JPEG_MIN_HEIGHT 32
-#define MTK_JPEG_MAX_WIDTH 8192
-#define MTK_JPEG_MAX_HEIGHT 8192
+#define MTK_JPEG_MIN_WIDTH 32U
+#define MTK_JPEG_MIN_HEIGHT 32U
+#define MTK_JPEG_MAX_WIDTH 65535U
+#define MTK_JPEG_MAX_HEIGHT 65535U
#define MTK_JPEG_DEFAULT_SIZEIMAGE (1 * 1024 * 1024)
+#define MTK_JPEG_HW_TIMEOUT_MSEC 1000
+
+#define MTK_JPEG_MAX_EXIF_SIZE (64 * 1024)
+
+/**
+ * enum mtk_jpeg_ctx_state - states of the context state machine
+ * @MTK_JPEG_INIT: current state is initialized
+ * @MTK_JPEG_RUNNING: current state is running
+ * @MTK_JPEG_SOURCE_CHANGE: current state is source resolution change
+ */
enum mtk_jpeg_ctx_state {
MTK_JPEG_INIT = 0,
MTK_JPEG_RUNNING,
@@ -35,6 +45,36 @@ enum mtk_jpeg_ctx_state {
};
/**
+ * mtk_jpeg_variant - mtk jpeg driver variant
+ * @clks: clock names
+ * @num_clks: numbers of clock
+ * @format: jpeg driver's internal color format
+ * @num_format: number of format
+ * @qops: the callback of jpeg vb2_ops
+ * @irq_handler: jpeg irq handler callback
+ * @hw_reset: jpeg hardware reset callback
+ * @m2m_ops: the callback of jpeg v4l2_m2m_ops
+ * @dev_name: jpeg device name
+ * @ioctl_ops: the callback of jpeg v4l2_ioctl_ops
+ * @out_q_default_fourcc: output queue default fourcc
+ * @cap_q_default_fourcc: capture queue default fourcc
+ */
+struct mtk_jpeg_variant {
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct mtk_jpeg_fmt *formats;
+ int num_formats;
+ const struct vb2_ops *qops;
+ irqreturn_t (*irq_handler)(int irq, void *priv);
+ void (*hw_reset)(void __iomem *base);
+ const struct v4l2_m2m_ops *m2m_ops;
+ const char *dev_name;
+ const struct v4l2_ioctl_ops *ioctl_ops;
+ u32 out_q_default_fourcc;
+ u32 cap_q_default_fourcc;
+};
+
+/**
* struct mt_jpeg - JPEG IP abstraction
* @lock: the mutex protecting this structure
* @hw_lock: spinlock protecting the hw device resource
@@ -43,11 +83,11 @@ enum mtk_jpeg_ctx_state {
* @v4l2_dev: v4l2 device for mem2mem mode
* @m2m_dev: v4l2 mem2mem device data
* @alloc_ctx: videobuf2 memory allocator's context
- * @dec_vdev: video device node for decoder mem2mem mode
- * @dec_reg_base: JPEG registers mapping
- * @clk_jdec: JPEG hw working clock
- * @clk_jdec_smi: JPEG SMI bus clock
+ * @vdev: video device node for jpeg mem2mem mode
+ * @reg_base: JPEG registers mapping
* @larb: SMI device
+ * @job_timeout_work: IRQ timeout structure
+ * @variant: driver variant to be used
*/
struct mtk_jpeg_dev {
struct mutex lock;
@@ -57,16 +97,17 @@ struct mtk_jpeg_dev {
struct v4l2_device v4l2_dev;
struct v4l2_m2m_dev *m2m_dev;
void *alloc_ctx;
- struct video_device *dec_vdev;
- void __iomem *dec_reg_base;
- struct clk *clk_jdec;
- struct clk *clk_jdec_smi;
+ struct video_device *vdev;
+ void __iomem *reg_base;
struct device *larb;
+ struct delayed_work job_timeout_work;
+ const struct mtk_jpeg_variant *variant;
};
/**
* struct jpeg_fmt - driver's internal color format data
* @fourcc: the fourcc code, 0 if not applicable
+ * @hw_format: hardware format value
* @h_sample: horizontal sample count of plane in 4 * 4 pixel image
* @v_sample: vertical sample count of plane in 4 * 4 pixel image
* @colplanes: number of color planes (1 for packed formats)
@@ -76,6 +117,7 @@ struct mtk_jpeg_dev {
*/
struct mtk_jpeg_fmt {
u32 fourcc;
+ u32 hw_format;
int h_sample[VIDEO_MAX_PLANES];
int v_sample[VIDEO_MAX_PLANES];
int colplanes;
@@ -87,18 +129,13 @@ struct mtk_jpeg_fmt {
/**
* mtk_jpeg_q_data - parameters of one queue
* @fmt: driver-specific format of this queue
- * @w: image width
- * @h: image height
- * @bytesperline: distance in bytes between the leftmost pixels in two adjacent
- * lines
- * @sizeimage: image buffer size in bytes
+ * @pix_mp: multiplanar format
+ * @enc_crop_rect: jpeg encoder crop information
*/
struct mtk_jpeg_q_data {
struct mtk_jpeg_fmt *fmt;
- u32 w;
- u32 h;
- u32 bytesperline[VIDEO_MAX_PLANES];
- u32 sizeimage[VIDEO_MAX_PLANES];
+ struct v4l2_pix_format_mplane pix_mp;
+ struct v4l2_rect enc_crop_rect;
};
/**
@@ -107,13 +144,11 @@ struct mtk_jpeg_q_data {
* @out_q: source (output) queue information
* @cap_q: destination (capture) queue queue information
* @fh: V4L2 file handle
- * @dec_param parameters for HW decoding
* @state: state of the context
- * @header_valid: set if header has been parsed and valid
- * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
- * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
- * @quantization: enum v4l2_quantization, colorspace quantization
- * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ * @enable_exif: enable exif mode of jpeg encoder
+ * @enc_quality: jpeg encoder quality
+ * @restart_interval: jpeg encoder restart interval
+ * @ctrl_hdl: controls handler
*/
struct mtk_jpeg_ctx {
struct mtk_jpeg_dev *jpeg;
@@ -121,11 +156,10 @@ struct mtk_jpeg_ctx {
struct mtk_jpeg_q_data cap_q;
struct v4l2_fh fh;
enum mtk_jpeg_ctx_state state;
-
- enum v4l2_colorspace colorspace;
- enum v4l2_ycbcr_encoding ycbcr_enc;
- enum v4l2_quantization quantization;
- enum v4l2_xfer_func xfer_func;
+ bool enable_exif;
+ u8 enc_quality;
+ u8 restart_interval;
+ struct v4l2_ctrl_handler ctrl_hdl;
};
#endif /* _MTK_JPEG_CORE_H */
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.c
index ddf0dfa78e20..afbbfd5d02bc 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <media/videobuf2-core.h>
-#include "mtk_jpeg_hw.h"
+#include "mtk_jpeg_dec_hw.h"
#define MTK_JPEG_DUNUM_MASK(val) (((val) - 1) & 0x3)
@@ -153,10 +153,10 @@ static int mtk_jpeg_calc_dst_size(struct mtk_jpeg_dec_param *param)
param->sampling_w[i];
/* output format is 420/422 */
param->comp_w[i] = padding_w >> brz_w[i];
- param->comp_w[i] = mtk_jpeg_align(param->comp_w[i],
- MTK_JPEG_DCTSIZE);
- param->img_stride[i] = i ? mtk_jpeg_align(param->comp_w[i], 16)
- : mtk_jpeg_align(param->comp_w[i], 32);
+ param->comp_w[i] = round_up(param->comp_w[i],
+ MTK_JPEG_DCTSIZE);
+ param->img_stride[i] = i ? round_up(param->comp_w[i], 16)
+ : round_up(param->comp_w[i], 32);
ds_row_h[i] = (MTK_JPEG_DCTSIZE * param->sampling_h[i]);
}
param->dec_w = param->img_stride[0];
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.h
index 9c6584eaad99..fa0d45fd7c34 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.h
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_hw.h
@@ -3,15 +3,16 @@
* Copyright (c) 2016 MediaTek Inc.
* Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
* Rick Chang <rick.chang@mediatek.com>
+ * Xia Jiang <xia.jiang@mediatek.com>
*/
-#ifndef _MTK_JPEG_HW_H
-#define _MTK_JPEG_HW_H
+#ifndef _MTK_JPEG_DEC_HW_H
+#define _MTK_JPEG_DEC_HW_H
#include <media/videobuf2-core.h>
#include "mtk_jpeg_core.h"
-#include "mtk_jpeg_reg.h"
+#include "mtk_jpeg_dec_reg.h"
enum {
MTK_JPEG_DEC_RESULT_EOF_DONE = 0,
@@ -54,11 +55,6 @@ struct mtk_jpeg_dec_param {
u8 uv_brz_w;
};
-static inline u32 mtk_jpeg_align(u32 val, u32 align)
-{
- return (val + align - 1) & ~(align - 1);
-}
-
struct mtk_jpeg_bs {
dma_addr_t str_addr;
dma_addr_t end_addr;
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.c
index f862d38f3af7..b95c45791c29 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.c
@@ -8,7 +8,7 @@
#include <linux/kernel.h>
#include <linux/videodev2.h>
-#include "mtk_jpeg_parse.h"
+#include "mtk_jpeg_dec_parse.h"
#define TEM 0x01
#define SOF0 0xc0
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.h
index 0a48eeabaff2..2918f15811f8 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.h
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_parse.h
@@ -8,7 +8,7 @@
#ifndef _MTK_JPEG_PARSE_H
#define _MTK_JPEG_PARSE_H
-#include "mtk_jpeg_hw.h"
+#include "mtk_jpeg_dec_hw.h"
bool mtk_jpeg_parse(struct mtk_jpeg_dec_param *param, u8 *src_addr_va,
u32 src_size);
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_reg.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_reg.h
index 94db04e9cdb6..21ec8f96797f 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_reg.h
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_dec_reg.h
@@ -8,7 +8,6 @@
#ifndef _MTK_JPEG_REG_H
#define _MTK_JPEG_REG_H
-#define MTK_JPEG_COMP_MAX 3
#define MTK_JPEG_BLOCK_MAX 10
#define MTK_JPEG_DCTSIZE 8
@@ -20,29 +19,29 @@
#define BIT_INQST_MASK_ALLIRQ 0x37
#define JPGDEC_REG_RESET 0x0090
-#define JPGDEC_REG_BRZ_FACTOR 0x00F8
-#define JPGDEC_REG_DU_NUM 0x00FC
+#define JPGDEC_REG_BRZ_FACTOR 0x00f8
+#define JPGDEC_REG_DU_NUM 0x00fc
#define JPGDEC_REG_DEST_ADDR0_Y 0x0140
#define JPGDEC_REG_DEST_ADDR0_U 0x0144
#define JPGDEC_REG_DEST_ADDR0_V 0x0148
-#define JPGDEC_REG_DEST_ADDR1_Y 0x014C
+#define JPGDEC_REG_DEST_ADDR1_Y 0x014c
#define JPGDEC_REG_DEST_ADDR1_U 0x0150
#define JPGDEC_REG_DEST_ADDR1_V 0x0154
#define JPGDEC_REG_STRIDE_Y 0x0158
-#define JPGDEC_REG_STRIDE_UV 0x015C
+#define JPGDEC_REG_STRIDE_UV 0x015c
#define JPGDEC_REG_IMG_STRIDE_Y 0x0160
#define JPGDEC_REG_IMG_STRIDE_UV 0x0164
-#define JPGDEC_REG_WDMA_CTRL 0x016C
+#define JPGDEC_REG_WDMA_CTRL 0x016c
#define JPGDEC_REG_PAUSE_MCU_NUM 0x0170
-#define JPGDEC_REG_OPERATION_MODE 0x017C
+#define JPGDEC_REG_OPERATION_MODE 0x017c
#define JPGDEC_REG_FILE_ADDR 0x0200
-#define JPGDEC_REG_COMP_ID 0x020C
+#define JPGDEC_REG_COMP_ID 0x020c
#define JPGDEC_REG_TOTAL_MCU_NUM 0x0210
#define JPGDEC_REG_COMP0_DATA_UNIT_NUM 0x0224
-#define JPGDEC_REG_DU_CTRL 0x023C
+#define JPGDEC_REG_DU_CTRL 0x023c
#define JPGDEC_REG_TRIG 0x0240
#define JPGDEC_REG_FILE_BRP 0x0248
-#define JPGDEC_REG_FILE_TOTAL_SIZE 0x024C
+#define JPGDEC_REG_FILE_TOTAL_SIZE 0x024c
#define JPGDEC_REG_QT_ID 0x0270
#define JPGDEC_REG_INTERRUPT_STATUS 0x0274
#define JPGDEC_REG_STATUS 0x0278
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.c
new file mode 100644
index 000000000000..1cf037bf72dd
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Xia Jiang <xia.jiang@mediatek.com>
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_jpeg_enc_hw.h"
+
+static const struct mtk_jpeg_enc_qlt mtk_jpeg_enc_quality[] = {
+ {.quality_param = 34, .hardware_value = JPEG_ENC_QUALITY_Q34},
+ {.quality_param = 39, .hardware_value = JPEG_ENC_QUALITY_Q39},
+ {.quality_param = 48, .hardware_value = JPEG_ENC_QUALITY_Q48},
+ {.quality_param = 60, .hardware_value = JPEG_ENC_QUALITY_Q60},
+ {.quality_param = 64, .hardware_value = JPEG_ENC_QUALITY_Q64},
+ {.quality_param = 68, .hardware_value = JPEG_ENC_QUALITY_Q68},
+ {.quality_param = 74, .hardware_value = JPEG_ENC_QUALITY_Q74},
+ {.quality_param = 80, .hardware_value = JPEG_ENC_QUALITY_Q80},
+ {.quality_param = 82, .hardware_value = JPEG_ENC_QUALITY_Q82},
+ {.quality_param = 84, .hardware_value = JPEG_ENC_QUALITY_Q84},
+ {.quality_param = 87, .hardware_value = JPEG_ENC_QUALITY_Q87},
+ {.quality_param = 90, .hardware_value = JPEG_ENC_QUALITY_Q90},
+ {.quality_param = 92, .hardware_value = JPEG_ENC_QUALITY_Q92},
+ {.quality_param = 95, .hardware_value = JPEG_ENC_QUALITY_Q95},
+ {.quality_param = 97, .hardware_value = JPEG_ENC_QUALITY_Q97},
+};
+
+void mtk_jpeg_enc_reset(void __iomem *base)
+{
+ writel(0, base + JPEG_ENC_RSTB);
+ writel(JPEG_ENC_RESET_BIT, base + JPEG_ENC_RSTB);
+ writel(0, base + JPEG_ENC_CODEC_SEL);
+}
+
+u32 mtk_jpeg_enc_get_file_size(void __iomem *base)
+{
+ return readl(base + JPEG_ENC_DMA_ADDR0) -
+ readl(base + JPEG_ENC_DST_ADDR0);
+}
+
+void mtk_jpeg_enc_start(void __iomem *base)
+{
+ u32 value;
+
+ value = readl(base + JPEG_ENC_CTRL);
+ value |= JPEG_ENC_CTRL_INT_EN_BIT | JPEG_ENC_CTRL_ENABLE_BIT;
+ writel(value, base + JPEG_ENC_CTRL);
+}
+
+void mtk_jpeg_set_enc_src(struct mtk_jpeg_ctx *ctx, void __iomem *base,
+ struct vb2_buffer *src_buf)
+{
+ int i;
+ dma_addr_t dma_addr;
+
+ for (i = 0; i < src_buf->num_planes; i++) {
+ dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, i) +
+ src_buf->planes[i].data_offset;
+ if (!i)
+ writel(dma_addr, base + JPEG_ENC_SRC_LUMA_ADDR);
+ else
+ writel(dma_addr, base + JPEG_ENC_SRC_CHROMA_ADDR);
+ }
+}
+
+void mtk_jpeg_set_enc_dst(struct mtk_jpeg_ctx *ctx, void __iomem *base,
+ struct vb2_buffer *dst_buf)
+{
+ dma_addr_t dma_addr;
+ size_t size;
+ u32 dma_addr_offset;
+ u32 dma_addr_offsetmask;
+
+ dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ dma_addr_offset = ctx->enable_exif ? MTK_JPEG_MAX_EXIF_SIZE : 0;
+ dma_addr_offsetmask = dma_addr & JPEG_ENC_DST_ADDR_OFFSET_MASK;
+ size = vb2_plane_size(dst_buf, 0);
+
+ writel(dma_addr_offset & ~0xf, base + JPEG_ENC_OFFSET_ADDR);
+ writel(dma_addr_offsetmask & 0xf, base + JPEG_ENC_BYTE_OFFSET_MASK);
+ writel(dma_addr & ~0xf, base + JPEG_ENC_DST_ADDR0);
+ writel((dma_addr + size) & ~0xf, base + JPEG_ENC_STALL_ADDR0);
+}
+
+void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base)
+{
+ u32 value;
+ u32 width = ctx->out_q.enc_crop_rect.width;
+ u32 height = ctx->out_q.enc_crop_rect.height;
+ u32 enc_format = ctx->out_q.fmt->fourcc;
+ u32 bytesperline = ctx->out_q.pix_mp.plane_fmt[0].bytesperline;
+ u32 blk_num;
+ u32 img_stride;
+ u32 mem_stride;
+ u32 i, enc_quality;
+
+ value = width << 16 | height;
+ writel(value, base + JPEG_ENC_IMG_SIZE);
+
+ if (enc_format == V4L2_PIX_FMT_NV12M ||
+ enc_format == V4L2_PIX_FMT_NV21M)
+ /*
+ * Total 8 x 8 block number of luma and chroma.
+ * The number of blocks is counted from 0.
+ */
+ blk_num = DIV_ROUND_UP(width, 16) *
+ DIV_ROUND_UP(height, 16) * 6 - 1;
+ else
+ blk_num = DIV_ROUND_UP(width, 16) *
+ DIV_ROUND_UP(height, 8) * 4 - 1;
+ writel(blk_num, base + JPEG_ENC_BLK_NUM);
+
+ if (enc_format == V4L2_PIX_FMT_NV12M ||
+ enc_format == V4L2_PIX_FMT_NV21M) {
+ /* 4:2:0 */
+ img_stride = round_up(width, 16);
+ mem_stride = bytesperline;
+ } else {
+ /* 4:2:2 */
+ img_stride = round_up(width * 2, 32);
+ mem_stride = img_stride;
+ }
+ writel(img_stride, base + JPEG_ENC_IMG_STRIDE);
+ writel(mem_stride, base + JPEG_ENC_STRIDE);
+
+ enc_quality = mtk_jpeg_enc_quality[0].hardware_value;
+ for (i = 0; i < ARRAY_SIZE(mtk_jpeg_enc_quality); i++) {
+ if (ctx->enc_quality <= mtk_jpeg_enc_quality[i].quality_param) {
+ enc_quality = mtk_jpeg_enc_quality[i].hardware_value;
+ break;
+ }
+ }
+ writel(enc_quality, base + JPEG_ENC_QUALITY);
+
+ value = readl(base + JPEG_ENC_CTRL);
+ value &= ~JPEG_ENC_CTRL_YUV_FORMAT_MASK;
+ value |= (ctx->out_q.fmt->hw_format & 3) << 3;
+ if (ctx->enable_exif)
+ value |= JPEG_ENC_CTRL_FILE_FORMAT_BIT;
+ else
+ value &= ~JPEG_ENC_CTRL_FILE_FORMAT_BIT;
+ if (ctx->restart_interval)
+ value |= JPEG_ENC_CTRL_RESTART_EN_BIT;
+ else
+ value &= ~JPEG_ENC_CTRL_RESTART_EN_BIT;
+ writel(value, base + JPEG_ENC_CTRL);
+
+ writel(ctx->restart_interval, base + JPEG_ENC_RST_MCU_NUM);
+}
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.h
new file mode 100644
index 000000000000..61c60e4e58ea
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_enc_hw.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Xia Jiang <xia.jiang@mediatek.com>
+ *
+ */
+
+#ifndef _MTK_JPEG_ENC_HW_H
+#define _MTK_JPEG_ENC_HW_H
+
+#include <media/videobuf2-core.h>
+
+#include "mtk_jpeg_core.h"
+
+#define JPEG_ENC_INT_STATUS_DONE BIT(0)
+#define JPEG_ENC_INT_STATUS_MASK_ALLIRQ 0x13
+
+#define JPEG_ENC_DST_ADDR_OFFSET_MASK GENMASK(3, 0)
+
+#define JPEG_ENC_CTRL_YUV_FORMAT_MASK 0x18
+#define JPEG_ENC_CTRL_RESTART_EN_BIT BIT(10)
+#define JPEG_ENC_CTRL_FILE_FORMAT_BIT BIT(5)
+#define JPEG_ENC_CTRL_INT_EN_BIT BIT(2)
+#define JPEG_ENC_CTRL_ENABLE_BIT BIT(0)
+#define JPEG_ENC_RESET_BIT BIT(0)
+
+#define JPEG_ENC_YUV_FORMAT_YUYV 0
+#define JPEG_ENC_YUV_FORMAT_YVYU 1
+#define JPEG_ENC_YUV_FORMAT_NV12 2
+#define JEPG_ENC_YUV_FORMAT_NV21 3
+
+#define JPEG_ENC_QUALITY_Q60 0x0
+#define JPEG_ENC_QUALITY_Q80 0x1
+#define JPEG_ENC_QUALITY_Q90 0x2
+#define JPEG_ENC_QUALITY_Q95 0x3
+#define JPEG_ENC_QUALITY_Q39 0x4
+#define JPEG_ENC_QUALITY_Q68 0x5
+#define JPEG_ENC_QUALITY_Q84 0x6
+#define JPEG_ENC_QUALITY_Q92 0x7
+#define JPEG_ENC_QUALITY_Q48 0x8
+#define JPEG_ENC_QUALITY_Q74 0xa
+#define JPEG_ENC_QUALITY_Q87 0xb
+#define JPEG_ENC_QUALITY_Q34 0xc
+#define JPEG_ENC_QUALITY_Q64 0xe
+#define JPEG_ENC_QUALITY_Q82 0xf
+#define JPEG_ENC_QUALITY_Q97 0x10
+
+#define JPEG_ENC_RSTB 0x100
+#define JPEG_ENC_CTRL 0x104
+#define JPEG_ENC_QUALITY 0x108
+#define JPEG_ENC_BLK_NUM 0x10C
+#define JPEG_ENC_BLK_CNT 0x110
+#define JPEG_ENC_INT_STS 0x11c
+#define JPEG_ENC_DST_ADDR0 0x120
+#define JPEG_ENC_DMA_ADDR0 0x124
+#define JPEG_ENC_STALL_ADDR0 0x128
+#define JPEG_ENC_OFFSET_ADDR 0x138
+#define JPEG_ENC_RST_MCU_NUM 0x150
+#define JPEG_ENC_IMG_SIZE 0x154
+#define JPEG_ENC_DEBUG_INFO0 0x160
+#define JPEG_ENC_DEBUG_INFO1 0x164
+#define JPEG_ENC_TOTAL_CYCLE 0x168
+#define JPEG_ENC_BYTE_OFFSET_MASK 0x16c
+#define JPEG_ENC_SRC_LUMA_ADDR 0x170
+#define JPEG_ENC_SRC_CHROMA_ADDR 0x174
+#define JPEG_ENC_STRIDE 0x178
+#define JPEG_ENC_IMG_STRIDE 0x17c
+#define JPEG_ENC_DCM_CTRL 0x300
+#define JPEG_ENC_CODEC_SEL 0x314
+#define JPEG_ENC_ULTRA_THRES 0x318
+
+/**
+ * struct mtk_jpeg_enc_qlt - JPEG encoder quality data
+ * @quality_param: quality value
+ * @hardware_value: hardware value of quality
+ */
+struct mtk_jpeg_enc_qlt {
+ u8 quality_param;
+ u8 hardware_value;
+};
+
+void mtk_jpeg_enc_reset(void __iomem *base);
+u32 mtk_jpeg_enc_get_file_size(void __iomem *base);
+void mtk_jpeg_enc_start(void __iomem *enc_reg_base);
+void mtk_jpeg_set_enc_src(struct mtk_jpeg_ctx *ctx, void __iomem *base,
+ struct vb2_buffer *src_buf);
+void mtk_jpeg_set_enc_dst(struct mtk_jpeg_ctx *ctx, void __iomem *base,
+ struct vb2_buffer *dst_buf);
+void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx, void __iomem *base);
+
+#endif /* _MTK_JPEG_ENC_HW_H */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
index f96c8b3bf861..976aa1f4829b 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -94,7 +94,7 @@ static void mtk_mdp_reset_handler(void *priv)
void mtk_mdp_register_component(struct mtk_mdp_dev *mdp,
struct mtk_mdp_comp *comp)
{
- list_add(&mdp->comp_list, &comp->node);
+ list_add(&comp->node, &mdp->comp_list);
}
void mtk_mdp_unregister_component(struct mtk_mdp_dev *mdp,
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
index 37b94b555fa1..f679c6e1a3e9 100644
--- a/drivers/media/platform/mtk-vcodec/Makefile
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -13,7 +13,6 @@ mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
mtk_vcodec_dec.o \
mtk_vcodec_dec_pm.o \
-
mtk-vcodec-enc-y := venc/venc_vp8_if.o \
venc/venc_h264_if.o \
mtk_vcodec_enc.o \
@@ -24,6 +23,5 @@ mtk-vcodec-enc-y := venc/venc_vp8_if.o \
mtk-vcodec-common-y := mtk_vcodec_intr.o \
- mtk_vcodec_util.o\
-
-ccflags-y += -I$(srctree)/drivers/media/platform/mtk-vpu
+ mtk_vcodec_util.o \
+ mtk_vcodec_fw.o
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 0f3e710aed4e..c768a587a944 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -194,8 +194,7 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
vb->vb2_buf.index,
dstbuf->queued_in_vb2);
v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
- } else if ((dstbuf->queued_in_vb2 == false) &&
- (dstbuf->queued_in_v4l2 == true)) {
+ } else if (!dstbuf->queued_in_vb2 && dstbuf->queued_in_v4l2) {
/*
* If buffer in v4l2 driver but not in vb2 queue yet,
* and we get this buffer from free_list, it means
@@ -448,7 +447,7 @@ static void mtk_vdec_worker(struct work_struct *work)
mutex_unlock(&ctx->lock);
}
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
- } else if (res_chg == false) {
+ } else if (!res_chg) {
/*
* we only return src buffer with VB2_BUF_STATE_DONE
* when decode success without resolution change
@@ -1156,7 +1155,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
buf = container_of(vb2_v4l2, struct mtk_video_dec_buf,
m2m_buf.vb);
mutex_lock(&ctx->lock);
- if (buf->used == false) {
+ if (!buf->used) {
v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
buf->queued_in_vb2 = true;
buf->queued_in_v4l2 = true;
@@ -1525,10 +1524,8 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->dev = &ctx->dev->plat_dev->dev;
ret = vb2_queue_init(dst_vq);
- if (ret) {
- vb2_queue_release(src_vq);
+ if (ret)
mtk_v4l2_err("Failed to initialize videobuf2 queue(capture)");
- }
return ret;
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index 97a1b6664c20..d14bc208ea5e 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -20,7 +20,7 @@
#include "mtk_vcodec_dec_pm.h"
#include "mtk_vcodec_intr.h"
#include "mtk_vcodec_util.h"
-#include "mtk_vpu.h"
+#include "mtk_vcodec_fw.h"
#define VDEC_HW_ACTIVE 0x10
#define VDEC_IRQ_CFG 0x11
@@ -77,22 +77,6 @@ static irqreturn_t mtk_vcodec_dec_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
-static void mtk_vcodec_dec_reset_handler(void *priv)
-{
- struct mtk_vcodec_dev *dev = priv;
- struct mtk_vcodec_ctx *ctx;
-
- mtk_v4l2_err("Watchdog timeout!!");
-
- mutex_lock(&dev->dev_mutex);
- list_for_each_entry(ctx, &dev->ctx_list, list) {
- ctx->state = MTK_STATE_ABORT;
- mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ERROR",
- ctx->id);
- }
- mutex_unlock(&dev->dev_mutex);
-}
-
static int fops_vcodec_open(struct file *file)
{
struct mtk_vcodec_dev *dev = video_drvdata(file);
@@ -144,21 +128,20 @@ static int fops_vcodec_open(struct file *file)
if (v4l2_fh_is_singular(&ctx->fh)) {
mtk_vcodec_dec_pw_on(&dev->pm);
/*
- * vpu_load_firmware checks if it was loaded already and
- * does nothing in that case
+ * Does nothing if firmware was already loaded.
*/
- ret = vpu_load_firmware(dev->vpu_plat_dev);
+ ret = mtk_vcodec_fw_load_firmware(dev->fw_handler);
if (ret < 0) {
/*
* Return 0 if downloading firmware successfully,
* otherwise it is failed
*/
- mtk_v4l2_err("vpu_load_firmware failed!");
+ mtk_v4l2_err("failed to load firmware!");
goto err_load_fw;
}
dev->dec_capability =
- vpu_get_vdec_hw_capa(dev->vpu_plat_dev);
+ mtk_vcodec_fw_get_vdec_capa(dev->fw_handler);
mtk_v4l2_debug(0, "decoder capability %x", dev->dec_capability);
}
@@ -228,6 +211,8 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
struct mtk_vcodec_dev *dev;
struct video_device *vfd_dec;
struct resource *res;
+ phandle rproc_phandle;
+ enum mtk_vcodec_fw_type fw_type;
int i, ret;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -237,19 +222,33 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dev->ctx_list);
dev->plat_dev = pdev;
- dev->vpu_plat_dev = vpu_get_plat_device(dev->plat_dev);
- if (dev->vpu_plat_dev == NULL) {
- mtk_v4l2_err("[VPU] vpu device in not ready");
- return -EPROBE_DEFER;
+ if (!of_property_read_u32(pdev->dev.of_node, "mediatek,vpu",
+ &rproc_phandle)) {
+ fw_type = VPU;
+ } else if (!of_property_read_u32(pdev->dev.of_node, "mediatek,scp",
+ &rproc_phandle)) {
+ fw_type = SCP;
+ } else {
+ mtk_v4l2_err("Could not get vdec IPI device");
+ return -ENODEV;
+ }
+ if (!pdev->dev.dma_parms) {
+ pdev->dev.dma_parms = devm_kzalloc(&pdev->dev,
+ sizeof(*pdev->dev.dma_parms),
+ GFP_KERNEL);
+ if (!pdev->dev.dma_parms)
+ return -ENOMEM;
}
+ dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
- vpu_wdt_reg_handler(dev->vpu_plat_dev, mtk_vcodec_dec_reset_handler,
- dev, VPU_RST_DEC);
+ dev->fw_handler = mtk_vcodec_fw_select(dev, fw_type, VPU_RST_DEC);
+ if (IS_ERR(dev->fw_handler))
+ return PTR_ERR(dev->fw_handler);
ret = mtk_vcodec_init_dec_pm(dev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get mt vcodec clock source");
- return ret;
+ goto err_dec_pm;
}
for (i = 0; i < NUM_MAX_VDEC_REG_BASE; i++) {
@@ -269,6 +268,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
}
dev->dec_irq = platform_get_irq(pdev, 0);
+ irq_set_status_flags(dev->dec_irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&pdev->dev, dev->dec_irq,
mtk_vcodec_dec_irq_handler, 0, pdev->name, dev);
if (ret) {
@@ -278,7 +278,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_res;
}
- disable_irq(dev->dec_irq);
mutex_init(&dev->dec_mutex);
mutex_init(&dev->dev_mutex);
spin_lock_init(&dev->irqlock);
@@ -352,6 +351,8 @@ err_dec_alloc:
v4l2_device_unregister(&dev->v4l2_dev);
err_res:
mtk_vcodec_release_dec_pm(dev);
+err_dec_pm:
+ mtk_vcodec_fw_release(dev->fw_handler);
return ret;
}
@@ -376,6 +377,7 @@ static int mtk_vcodec_dec_remove(struct platform_device *pdev)
v4l2_device_unregister(&dev->v4l2_dev);
mtk_vcodec_release_dec_pm(dev);
+ mtk_vcodec_fw_release(dev->fw_handler);
return 0;
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
index 5a6ec8fb52da..36dfe3fc056a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
@@ -12,7 +12,6 @@
#include "mtk_vcodec_dec_pm.h"
#include "mtk_vcodec_util.h"
-#include "mtk_vpu.h"
int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
{
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index 9fd56dee7fd1..3dd010cba23e 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -300,6 +300,40 @@ struct mtk_vcodec_ctx {
};
+enum mtk_chip {
+ MTK_MT8173,
+ MTK_MT8183,
+};
+
+/**
+ * struct mtk_vcodec_enc_pdata - compatible data for each IC
+ *
+ * @chip: chip this encoder is compatible with
+ *
+ * @uses_ext: whether the encoder uses the extended firmware messaging format
+ * @has_lt_irq: whether the encoder uses the LT irq
+ * @min_birate: minimum supported encoding bitrate
+ * @max_bitrate: maximum supported encoding bitrate
+ * @capture_formats: array of supported capture formats
+ * @num_capture_formats: number of entries in capture_formats
+ * @output_formats: array of supported output formats
+ * @num_output_formats: number of entries in output_formats
+ */
+struct mtk_vcodec_enc_pdata {
+ enum mtk_chip chip;
+
+ bool uses_ext;
+ bool has_lt_irq;
+ unsigned long min_bitrate;
+ unsigned long max_bitrate;
+ const struct mtk_video_fmt *capture_formats;
+ size_t num_capture_formats;
+ const struct mtk_video_fmt *output_formats;
+ size_t num_output_formats;
+};
+
+#define MTK_ENC_CTX_IS_EXT(ctx) ((ctx)->dev->venc_pdata->uses_ext)
+
/**
* struct mtk_vcodec_dev - driver data
* @v4l2_dev: V4L2 device to register video devices for.
@@ -309,13 +343,13 @@ struct mtk_vcodec_ctx {
* @m2m_dev_dec: m2m device for decoder
* @m2m_dev_enc: m2m device for encoder.
* @plat_dev: platform device
- * @vpu_plat_dev: mtk vpu platform device
* @ctx_list: list of struct mtk_vcodec_ctx
* @irqlock: protect data access by irq handler and work thread
* @curr_ctx: The context that is waiting for codec hardware
*
* @reg_base: Mapped address of MTK Vcodec registers.
*
+ * @fw_handler: used to communicate with the firmware.
* @id_counter: used to identify current opened instance
*
* @encode_workqueue: encode work queue
@@ -344,11 +378,13 @@ struct mtk_vcodec_dev {
struct v4l2_m2m_dev *m2m_dev_dec;
struct v4l2_m2m_dev *m2m_dev_enc;
struct platform_device *plat_dev;
- struct platform_device *vpu_plat_dev;
struct list_head ctx_list;
spinlock_t irqlock;
struct mtk_vcodec_ctx *curr_ctx;
void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
+ const struct mtk_vcodec_enc_pdata *venc_pdata;
+
+ struct mtk_vcodec_fw *fw_handler;
unsigned long id_counter;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index d469ff6464b2..21de1431cfcb 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -23,58 +23,15 @@
#define DFT_CFG_WIDTH MTK_VENC_MIN_W
#define DFT_CFG_HEIGHT MTK_VENC_MIN_H
#define MTK_MAX_CTRLS_HINT 20
-#define OUT_FMT_IDX 0
-#define CAP_FMT_IDX 4
+#define MTK_DEFAULT_FRAMERATE_NUM 1001
+#define MTK_DEFAULT_FRAMERATE_DENOM 30000
static void mtk_venc_worker(struct work_struct *work);
-static const struct mtk_video_fmt mtk_video_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_NV12M,
- .type = MTK_FMT_FRAME,
- .num_planes = 2,
- },
- {
- .fourcc = V4L2_PIX_FMT_NV21M,
- .type = MTK_FMT_FRAME,
- .num_planes = 2,
- },
- {
- .fourcc = V4L2_PIX_FMT_YUV420M,
- .type = MTK_FMT_FRAME,
- .num_planes = 3,
- },
- {
- .fourcc = V4L2_PIX_FMT_YVU420M,
- .type = MTK_FMT_FRAME,
- .num_planes = 3,
- },
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .type = MTK_FMT_ENC,
- .num_planes = 1,
- },
- {
- .fourcc = V4L2_PIX_FMT_VP8,
- .type = MTK_FMT_ENC,
- .num_planes = 1,
- },
-};
-
-#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
-
-static const struct mtk_codec_framesizes mtk_venc_framesizes[] = {
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .stepwise = { MTK_VENC_MIN_W, MTK_VENC_MAX_W, 16,
- MTK_VENC_MIN_H, MTK_VENC_MAX_H, 16 },
- },
- {
- .fourcc = V4L2_PIX_FMT_VP8,
- .stepwise = { MTK_VENC_MIN_W, MTK_VENC_MAX_W, 16,
- MTK_VENC_MIN_H, MTK_VENC_MAX_H, 16 },
- },
+static const struct v4l2_frmsize_stepwise mtk_venc_framesizes = {
+ MTK_VENC_MIN_W, MTK_VENC_MAX_W, 16,
+ MTK_VENC_MIN_H, MTK_VENC_MAX_H, 16,
};
#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_venc_framesizes)
@@ -156,59 +113,77 @@ static const struct v4l2_ctrl_ops mtk_vcodec_enc_ctrl_ops = {
.s_ctrl = vidioc_venc_s_ctrl,
};
-static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f,
+ const struct mtk_video_fmt *formats,
+ size_t num_formats)
+{
+ if (f->index >= num_formats)
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index].fourcc;
+ memset(f->reserved, 0, sizeof(f->reserved));
+
+ return 0;
+}
+
+static const struct mtk_video_fmt *
+mtk_venc_find_format(u32 fourcc, const struct mtk_vcodec_enc_pdata *pdata)
{
const struct mtk_video_fmt *fmt;
- int i, j = 0;
-
- for (i = 0; i < NUM_FORMATS; ++i) {
- if (output_queue && mtk_video_formats[i].type != MTK_FMT_FRAME)
- continue;
- if (!output_queue && mtk_video_formats[i].type != MTK_FMT_ENC)
- continue;
-
- if (j == f->index) {
- fmt = &mtk_video_formats[i];
- f->pixelformat = fmt->fourcc;
- memset(f->reserved, 0, sizeof(f->reserved));
- return 0;
- }
- ++j;
+ unsigned int k;
+
+ for (k = 0; k < pdata->num_capture_formats; k++) {
+ fmt = &pdata->capture_formats[k];
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ for (k = 0; k < pdata->num_output_formats; k++) {
+ fmt = &pdata->output_formats[k];
+ if (fmt->fourcc == fourcc)
+ return fmt;
}
- return -EINVAL;
+ return NULL;
}
static int vidioc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
- int i = 0;
+ const struct mtk_video_fmt *fmt;
if (fsize->index != 0)
return -EINVAL;
- for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) {
- if (fsize->pixel_format != mtk_venc_framesizes[i].fourcc)
- continue;
+ fmt = mtk_venc_find_format(fsize->pixel_format,
+ fh_to_ctx(fh)->dev->venc_pdata);
+ if (!fmt)
+ return -EINVAL;
- fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise = mtk_venc_framesizes[i].stepwise;
- return 0;
- }
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = mtk_venc_framesizes;
- return -EINVAL;
+ return 0;
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, false);
+ const struct mtk_vcodec_enc_pdata *pdata =
+ fh_to_ctx(priv)->dev->venc_pdata;
+
+ return vidioc_enum_fmt(f, pdata->capture_formats,
+ pdata->num_capture_formats);
}
static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, true);
+ const struct mtk_vcodec_enc_pdata *pdata =
+ fh_to_ctx(priv)->dev->venc_pdata;
+
+ return vidioc_enum_fmt(f, pdata->output_formats,
+ pdata->num_output_formats);
}
static int vidioc_venc_querycap(struct file *file, void *priv,
@@ -225,14 +200,18 @@ static int vidioc_venc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *a)
{
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_fract *timeperframe = &a->parm.output.timeperframe;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return -EINVAL;
- ctx->enc_params.framerate_num =
- a->parm.output.timeperframe.denominator;
- ctx->enc_params.framerate_denom =
- a->parm.output.timeperframe.numerator;
+ if (timeperframe->numerator == 0 || timeperframe->denominator == 0) {
+ timeperframe->numerator = MTK_DEFAULT_FRAMERATE_NUM;
+ timeperframe->denominator = MTK_DEFAULT_FRAMERATE_DENOM;
+ }
+
+ ctx->enc_params.framerate_num = timeperframe->denominator;
+ ctx->enc_params.framerate_denom = timeperframe->numerator;
ctx->param_change |= MTK_ENCODE_PARAM_FRAMERATE;
a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
@@ -266,20 +245,6 @@ static struct mtk_q_data *mtk_venc_get_q_data(struct mtk_vcodec_ctx *ctx,
return &ctx->q_data[MTK_Q_DATA_DST];
}
-static const struct mtk_video_fmt *mtk_venc_find_format(struct v4l2_format *f)
-{
- const struct mtk_video_fmt *fmt;
- unsigned int k;
-
- for (k = 0; k < NUM_FORMATS; k++) {
- fmt = &mtk_video_formats[k];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
- return fmt;
- }
-
- return NULL;
-}
-
/* V4L2 specification suggests the driver corrects the format struct if any of
* the dimensions is unsupported
*/
@@ -332,12 +297,14 @@ static int vidioc_try_fmt(struct v4l2_format *f,
pix_fmt_mp->num_planes = fmt->num_planes;
pix_fmt_mp->plane_fmt[0].sizeimage =
- pix_fmt_mp->width * pix_fmt_mp->height;
+ pix_fmt_mp->width * pix_fmt_mp->height +
+ ((ALIGN(pix_fmt_mp->width, 16) * 2) * 16);
pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
if (pix_fmt_mp->num_planes == 2) {
pix_fmt_mp->plane_fmt[1].sizeimage =
- (pix_fmt_mp->width * pix_fmt_mp->height) / 2;
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 2 +
+ (ALIGN(pix_fmt_mp->width, 16) * 16);
pix_fmt_mp->plane_fmt[2].sizeimage = 0;
pix_fmt_mp->plane_fmt[1].bytesperline =
pix_fmt_mp->width;
@@ -345,7 +312,8 @@ static int vidioc_try_fmt(struct v4l2_format *f,
} else if (pix_fmt_mp->num_planes == 3) {
pix_fmt_mp->plane_fmt[1].sizeimage =
pix_fmt_mp->plane_fmt[2].sizeimage =
- (pix_fmt_mp->width * pix_fmt_mp->height) / 4;
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 4 +
+ ((ALIGN(pix_fmt_mp->width, 16) / 2) * 16);
pix_fmt_mp->plane_fmt[1].bytesperline =
pix_fmt_mp->plane_fmt[2].bytesperline =
pix_fmt_mp->width / 2;
@@ -414,6 +382,7 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
struct vb2_queue *vq;
struct mtk_q_data *q_data;
int i, ret;
@@ -436,10 +405,10 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
return -EINVAL;
}
- fmt = mtk_venc_find_format(f);
+ fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
if (!fmt) {
- f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
- fmt = mtk_venc_find_format(f);
+ fmt = &ctx->dev->venc_pdata->capture_formats[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
}
q_data->fmt = fmt;
@@ -476,6 +445,7 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
struct vb2_queue *vq;
struct mtk_q_data *q_data;
int ret, i;
@@ -499,10 +469,10 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
return -EINVAL;
}
- fmt = mtk_venc_find_format(f);
+ fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
if (!fmt) {
- f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
- fmt = mtk_venc_find_format(f);
+ fmt = &ctx->dev->venc_pdata->output_formats[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
}
pix_fmt_mp->height = clamp(pix_fmt_mp->height,
@@ -580,11 +550,12 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
{
const struct mtk_video_fmt *fmt;
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
- fmt = mtk_venc_find_format(f);
+ fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
if (!fmt) {
- f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
- fmt = mtk_venc_find_format(f);
+ fmt = &ctx->dev->venc_pdata->capture_formats[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
}
f->fmt.pix_mp.colorspace = ctx->colorspace;
f->fmt.pix_mp.ycbcr_enc = ctx->ycbcr_enc;
@@ -598,11 +569,13 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
- fmt = mtk_venc_find_format(f);
+ fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
if (!fmt) {
- f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
- fmt = mtk_venc_find_format(f);
+ fmt = &ctx->dev->venc_pdata->output_formats[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
}
if (!f->fmt.pix_mp.colorspace) {
f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709;
@@ -918,8 +891,17 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
ctx->state = MTK_STATE_FREE;
}
+static int vb2ops_venc_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
static const struct vb2_ops mtk_venc_vb2_ops = {
.queue_setup = vb2ops_venc_queue_setup,
+ .buf_out_validate = vb2ops_venc_buf_out_validate,
.buf_prepare = vb2ops_venc_buf_prepare,
.buf_queue = vb2ops_venc_buf_queue,
.wait_prepare = vb2_ops_wait_prepare,
@@ -1187,7 +1169,7 @@ void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx)
q_data->coded_height = DFT_CFG_HEIGHT;
q_data->field = V4L2_FIELD_NONE;
- q_data->fmt = &mtk_video_formats[OUT_FMT_IDX];
+ q_data->fmt = &ctx->dev->venc_pdata->output_formats[0];
v4l_bound_align_image(&q_data->coded_width,
MTK_VENC_MIN_W,
@@ -1216,12 +1198,14 @@ void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx)
memset(q_data, 0, sizeof(struct mtk_q_data));
q_data->coded_width = DFT_CFG_WIDTH;
q_data->coded_height = DFT_CFG_HEIGHT;
- q_data->fmt = &mtk_video_formats[CAP_FMT_IDX];
+ q_data->fmt = &ctx->dev->venc_pdata->capture_formats[0];
q_data->field = V4L2_FIELD_NONE;
ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
DFT_CFG_WIDTH * DFT_CFG_HEIGHT;
ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] = 0;
+ ctx->enc_params.framerate_num = MTK_DEFAULT_FRAMERATE_NUM;
+ ctx->enc_params.framerate_denom = MTK_DEFAULT_FRAMERATE_DENOM;
}
int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
@@ -1231,8 +1215,11 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
v4l2_ctrl_handler_init(handler, MTK_MAX_CTRLS_HINT);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+ 1, 1, 1, 1);
v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_BITRATE,
- 1, 4000000, 1, 4000000);
+ ctx->dev->venc_pdata->min_bitrate,
+ ctx->dev->venc_pdata->max_bitrate, 1, 4000000);
v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_B_FRAMES,
0, 2, 1, 0);
v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index 4d31f1ed113f..dcfa2c2d4def 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -21,11 +21,55 @@
#include "mtk_vcodec_enc_pm.h"
#include "mtk_vcodec_intr.h"
#include "mtk_vcodec_util.h"
-#include "mtk_vpu.h"
+#include "mtk_vcodec_fw.h"
module_param(mtk_v4l2_dbg_level, int, S_IRUGO | S_IWUSR);
module_param(mtk_vcodec_dbg, bool, S_IRUGO | S_IWUSR);
+static const struct mtk_video_fmt mtk_video_formats_output_mt8173[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU420M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 3,
+ },
+};
+
+static const struct mtk_video_fmt mtk_video_formats_capture_mt8173[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .type = MTK_FMT_ENC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .type = MTK_FMT_ENC,
+ .num_planes = 1,
+ },
+};
+
+static const struct mtk_video_fmt mtk_video_formats_capture_mt8183[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .type = MTK_FMT_ENC,
+ .num_planes = 1,
+ },
+};
+
/* Wake up context wait_queue */
static void wake_up_ctx(struct mtk_vcodec_ctx *ctx, unsigned int reason)
{
@@ -101,22 +145,6 @@ static irqreturn_t mtk_vcodec_enc_lt_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
-static void mtk_vcodec_enc_reset_handler(void *priv)
-{
- struct mtk_vcodec_dev *dev = priv;
- struct mtk_vcodec_ctx *ctx;
-
- mtk_v4l2_debug(0, "Watchdog timeout!!");
-
- mutex_lock(&dev->dev_mutex);
- list_for_each_entry(ctx, &dev->ctx_list, list) {
- ctx->state = MTK_STATE_ABORT;
- mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ABORT",
- ctx->id);
- }
- mutex_unlock(&dev->dev_mutex);
-}
-
static int fops_vcodec_open(struct file *file)
{
struct mtk_vcodec_dev *dev = video_drvdata(file);
@@ -159,10 +187,10 @@ static int fops_vcodec_open(struct file *file)
if (v4l2_fh_is_singular(&ctx->fh)) {
/*
- * vpu_load_firmware checks if it was loaded already and
+ * load fireware to checks if it was loaded already and
* does nothing in that case
*/
- ret = vpu_load_firmware(dev->vpu_plat_dev);
+ ret = mtk_vcodec_fw_load_firmware(dev->fw_handler);
if (ret < 0) {
/*
* Return 0 if downloading firmware successfully,
@@ -173,7 +201,7 @@ static int fops_vcodec_open(struct file *file)
}
dev->enc_capability =
- vpu_get_venc_hw_capa(dev->vpu_plat_dev);
+ mtk_vcodec_fw_get_venc_capa(dev->fw_handler);
mtk_v4l2_debug(0, "encoder capability %x", dev->enc_capability);
}
@@ -235,7 +263,9 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
struct mtk_vcodec_dev *dev;
struct video_device *vfd_enc;
struct resource *res;
- int i, j, ret;
+ phandle rproc_phandle;
+ enum mtk_vcodec_fw_type fw_type;
+ int ret;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -244,30 +274,43 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dev->ctx_list);
dev->plat_dev = pdev;
- dev->vpu_plat_dev = vpu_get_plat_device(dev->plat_dev);
- if (dev->vpu_plat_dev == NULL) {
- mtk_v4l2_err("[VPU] vpu device in not ready");
- return -EPROBE_DEFER;
+ if (!of_property_read_u32(pdev->dev.of_node, "mediatek,vpu",
+ &rproc_phandle)) {
+ fw_type = VPU;
+ } else if (!of_property_read_u32(pdev->dev.of_node, "mediatek,scp",
+ &rproc_phandle)) {
+ fw_type = SCP;
+ } else {
+ mtk_v4l2_err("Could not get venc IPI device");
+ return -ENODEV;
}
+ if (!pdev->dev.dma_parms) {
+ pdev->dev.dma_parms = devm_kzalloc(&pdev->dev,
+ sizeof(*pdev->dev.dma_parms),
+ GFP_KERNEL);
+ if (!pdev->dev.dma_parms)
+ return -ENOMEM;
+ }
+ dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
- vpu_wdt_reg_handler(dev->vpu_plat_dev, mtk_vcodec_enc_reset_handler,
- dev, VPU_RST_ENC);
+ dev->fw_handler = mtk_vcodec_fw_select(dev, fw_type, VPU_RST_ENC);
+ if (IS_ERR(dev->fw_handler))
+ return PTR_ERR(dev->fw_handler);
+ dev->venc_pdata = of_device_get_match_data(&pdev->dev);
ret = mtk_vcodec_init_enc_pm(dev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get mt vcodec clock source!");
- return ret;
+ goto err_enc_pm;
}
- for (i = VENC_SYS, j = 0; i < NUM_MAX_VCODEC_REG_BASE; i++, j++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, j);
- dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR((__force void *)dev->reg_base[i])) {
- ret = PTR_ERR((__force void *)dev->reg_base[i]);
- goto err_res;
- }
- mtk_v4l2_debug(2, "reg[%d] base=0x%p", i, dev->reg_base[i]);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->reg_base[VENC_SYS] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR((__force void *)dev->reg_base[VENC_SYS])) {
+ ret = PTR_ERR((__force void *)dev->reg_base[VENC_SYS]);
+ goto err_res;
}
+ mtk_v4l2_debug(2, "reg[%d] base=0x%p", i, dev->reg_base[VENC_SYS]);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
@@ -277,6 +320,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
}
dev->enc_irq = platform_get_irq(pdev, 0);
+ irq_set_status_flags(dev->enc_irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&pdev->dev, dev->enc_irq,
mtk_vcodec_enc_irq_handler,
0, pdev->name, dev);
@@ -288,20 +332,30 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_res;
}
- dev->enc_lt_irq = platform_get_irq(pdev, 1);
- ret = devm_request_irq(&pdev->dev,
- dev->enc_lt_irq, mtk_vcodec_enc_lt_irq_handler,
- 0, pdev->name, dev);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to install dev->enc_lt_irq %d (%d)",
- dev->enc_lt_irq, ret);
- ret = -EINVAL;
- goto err_res;
+ if (dev->venc_pdata->has_lt_irq) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ dev->reg_base[VENC_LT_SYS] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR((__force void *)dev->reg_base[VENC_LT_SYS])) {
+ ret = PTR_ERR((__force void *)dev->reg_base[VENC_LT_SYS]);
+ goto err_res;
+ }
+ mtk_v4l2_debug(2, "reg[%d] base=0x%p", i, dev->reg_base[VENC_LT_SYS]);
+
+ dev->enc_lt_irq = platform_get_irq(pdev, 1);
+ irq_set_status_flags(dev->enc_lt_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev,
+ dev->enc_lt_irq,
+ mtk_vcodec_enc_lt_irq_handler,
+ 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to install dev->enc_lt_irq %d (%d)",
+ dev->enc_lt_irq, ret);
+ ret = -EINVAL;
+ goto err_res;
+ }
}
- disable_irq(dev->enc_irq);
- disable_irq(dev->enc_lt_irq); /* VENC_LT */
mutex_init(&dev->enc_mutex);
mutex_init(&dev->dev_mutex);
spin_lock_init(&dev->irqlock);
@@ -377,11 +431,38 @@ err_enc_alloc:
v4l2_device_unregister(&dev->v4l2_dev);
err_res:
mtk_vcodec_release_enc_pm(dev);
+err_enc_pm:
+ mtk_vcodec_fw_release(dev->fw_handler);
return ret;
}
+static const struct mtk_vcodec_enc_pdata mt8173_pdata = {
+ .chip = MTK_MT8173,
+ .has_lt_irq = true,
+ .capture_formats = mtk_video_formats_capture_mt8173,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8173),
+ .output_formats = mtk_video_formats_output_mt8173,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .min_bitrate = 1,
+ .max_bitrate = 4000000,
+};
+
+static const struct mtk_vcodec_enc_pdata mt8183_pdata = {
+ .chip = MTK_MT8183,
+ .has_lt_irq = false,
+ .uses_ext = true,
+ .capture_formats = mtk_video_formats_capture_mt8183,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8183),
+ /* MT8183 supports the same output formats as MT8173 */
+ .output_formats = mtk_video_formats_output_mt8173,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .min_bitrate = 64,
+ .max_bitrate = 40000000,
+};
+
static const struct of_device_id mtk_vcodec_enc_match[] = {
- {.compatible = "mediatek,mt8173-vcodec-enc",},
+ {.compatible = "mediatek,mt8173-vcodec-enc", .data = &mt8173_pdata},
+ {.compatible = "mediatek,mt8183-vcodec-enc", .data = &mt8183_pdata},
{},
};
MODULE_DEVICE_TABLE(of, mtk_vcodec_enc_match);
@@ -401,6 +482,7 @@ static int mtk_vcodec_enc_remove(struct platform_device *pdev)
v4l2_device_unregister(&dev->v4l2_dev);
mtk_vcodec_release_enc_pm(dev);
+ mtk_vcodec_fw_release(dev->fw_handler);
return 0;
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
index 3e2bfded79a6..ee22902aaa71 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
@@ -12,8 +12,6 @@
#include "mtk_vcodec_enc_pm.h"
#include "mtk_vcodec_util.h"
-#include "mtk_vpu.h"
-
int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
{
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c
new file mode 100644
index 000000000000..6c2a2568d844
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "mtk_vcodec_fw.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_drv.h"
+
+struct mtk_vcodec_fw_ops {
+ int (*load_firmware)(struct mtk_vcodec_fw *fw);
+ unsigned int (*get_vdec_capa)(struct mtk_vcodec_fw *fw);
+ unsigned int (*get_venc_capa)(struct mtk_vcodec_fw *fw);
+ void * (*map_dm_addr)(struct mtk_vcodec_fw *fw, u32 dtcm_dmem_addr);
+ int (*ipi_register)(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler, const char *name, void *priv);
+ int (*ipi_send)(struct mtk_vcodec_fw *fw, int id, void *buf,
+ unsigned int len, unsigned int wait);
+};
+
+struct mtk_vcodec_fw {
+ enum mtk_vcodec_fw_type type;
+ const struct mtk_vcodec_fw_ops *ops;
+ struct platform_device *pdev;
+ struct mtk_scp *scp;
+};
+
+static int mtk_vcodec_vpu_load_firmware(struct mtk_vcodec_fw *fw)
+{
+ return vpu_load_firmware(fw->pdev);
+}
+
+static unsigned int mtk_vcodec_vpu_get_vdec_capa(struct mtk_vcodec_fw *fw)
+{
+ return vpu_get_vdec_hw_capa(fw->pdev);
+}
+
+static unsigned int mtk_vcodec_vpu_get_venc_capa(struct mtk_vcodec_fw *fw)
+{
+ return vpu_get_venc_hw_capa(fw->pdev);
+}
+
+static void *mtk_vcodec_vpu_map_dm_addr(struct mtk_vcodec_fw *fw,
+ u32 dtcm_dmem_addr)
+{
+ return vpu_mapping_dm_addr(fw->pdev, dtcm_dmem_addr);
+}
+
+static int mtk_vcodec_vpu_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler,
+ const char *name, void *priv)
+{
+ /*
+ * The handler we receive takes a void * as its first argument. We
+ * cannot change this because it needs to be passed down to the rproc
+ * subsystem when SCP is used. VPU takes a const argument, which is
+ * more constrained, so the conversion below is safe.
+ */
+ ipi_handler_t handler_const = (ipi_handler_t)handler;
+
+ return vpu_ipi_register(fw->pdev, id, handler_const, name, priv);
+}
+
+static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
+ unsigned int len, unsigned int wait)
+{
+ return vpu_ipi_send(fw->pdev, id, buf, len);
+}
+
+static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
+ .load_firmware = mtk_vcodec_vpu_load_firmware,
+ .get_vdec_capa = mtk_vcodec_vpu_get_vdec_capa,
+ .get_venc_capa = mtk_vcodec_vpu_get_venc_capa,
+ .map_dm_addr = mtk_vcodec_vpu_map_dm_addr,
+ .ipi_register = mtk_vcodec_vpu_set_ipi_register,
+ .ipi_send = mtk_vcodec_vpu_ipi_send,
+};
+
+static int mtk_vcodec_scp_load_firmware(struct mtk_vcodec_fw *fw)
+{
+ return rproc_boot(scp_get_rproc(fw->scp));
+}
+
+static unsigned int mtk_vcodec_scp_get_vdec_capa(struct mtk_vcodec_fw *fw)
+{
+ return scp_get_vdec_hw_capa(fw->scp);
+}
+
+static unsigned int mtk_vcodec_scp_get_venc_capa(struct mtk_vcodec_fw *fw)
+{
+ return scp_get_venc_hw_capa(fw->scp);
+}
+
+static void *mtk_vcodec_vpu_scp_dm_addr(struct mtk_vcodec_fw *fw,
+ u32 dtcm_dmem_addr)
+{
+ return scp_mapping_dm_addr(fw->scp, dtcm_dmem_addr);
+}
+
+static int mtk_vcodec_scp_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler,
+ const char *name, void *priv)
+{
+ return scp_ipi_register(fw->scp, id, handler, priv);
+}
+
+static int mtk_vcodec_scp_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
+ unsigned int len, unsigned int wait)
+{
+ return scp_ipi_send(fw->scp, id, buf, len, wait);
+}
+
+static const struct mtk_vcodec_fw_ops mtk_vcodec_rproc_msg = {
+ .load_firmware = mtk_vcodec_scp_load_firmware,
+ .get_vdec_capa = mtk_vcodec_scp_get_vdec_capa,
+ .get_venc_capa = mtk_vcodec_scp_get_venc_capa,
+ .map_dm_addr = mtk_vcodec_vpu_scp_dm_addr,
+ .ipi_register = mtk_vcodec_scp_set_ipi_register,
+ .ipi_send = mtk_vcodec_scp_ipi_send,
+};
+
+static void mtk_vcodec_reset_handler(void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+
+ mtk_v4l2_err("Watchdog timeout!!");
+
+ mutex_lock(&dev->dev_mutex);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ABORT",
+ ctx->id);
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+struct mtk_vcodec_fw *mtk_vcodec_fw_select(struct mtk_vcodec_dev *dev,
+ enum mtk_vcodec_fw_type type,
+ enum rst_id rst_id)
+{
+ const struct mtk_vcodec_fw_ops *ops;
+ struct mtk_vcodec_fw *fw;
+ struct platform_device *fw_pdev = NULL;
+ struct mtk_scp *scp = NULL;
+
+ switch (type) {
+ case VPU:
+ ops = &mtk_vcodec_vpu_msg;
+ fw_pdev = vpu_get_plat_device(dev->plat_dev);
+ if (!fw_pdev) {
+ mtk_v4l2_err("firmware device is not ready");
+ return ERR_PTR(-EINVAL);
+ }
+ vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_reset_handler,
+ dev, rst_id);
+ break;
+ case SCP:
+ ops = &mtk_vcodec_rproc_msg;
+ scp = scp_get(dev->plat_dev);
+ if (!scp) {
+ mtk_v4l2_err("could not get vdec scp handle");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+ break;
+ default:
+ mtk_v4l2_err("invalid vcodec fw type");
+ return ERR_PTR(-EINVAL);
+ }
+
+ fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return ERR_PTR(-EINVAL);
+
+ fw->type = type;
+ fw->ops = ops;
+ fw->pdev = fw_pdev;
+ fw->scp = scp;
+
+ return fw;
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_fw_select);
+
+void mtk_vcodec_fw_release(struct mtk_vcodec_fw *fw)
+{
+ switch (fw->type) {
+ case VPU:
+ put_device(&fw->pdev->dev);
+ break;
+ case SCP:
+ scp_put(fw->scp);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_fw_release);
+
+int mtk_vcodec_fw_load_firmware(struct mtk_vcodec_fw *fw)
+{
+ return fw->ops->load_firmware(fw);
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_fw_load_firmware);
+
+unsigned int mtk_vcodec_fw_get_vdec_capa(struct mtk_vcodec_fw *fw)
+{
+ return fw->ops->get_vdec_capa(fw);
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_fw_get_vdec_capa);
+
+unsigned int mtk_vcodec_fw_get_venc_capa(struct mtk_vcodec_fw *fw)
+{
+ return fw->ops->get_venc_capa(fw);
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_fw_get_venc_capa);
+
+void *mtk_vcodec_fw_map_dm_addr(struct mtk_vcodec_fw *fw, u32 mem_addr)
+{
+ return fw->ops->map_dm_addr(fw, mem_addr);
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_fw_map_dm_addr);
+
+int mtk_vcodec_fw_ipi_register(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler,
+ const char *name, void *priv)
+{
+ return fw->ops->ipi_register(fw, id, handler, name, priv);
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_fw_ipi_register);
+
+int mtk_vcodec_fw_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
+ unsigned int len, unsigned int wait)
+{
+ return fw->ops->ipi_send(fw, id, buf, len, wait);
+}
+EXPORT_SYMBOL_GPL(mtk_vcodec_fw_ipi_send);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h
new file mode 100644
index 000000000000..fadbbe6ba6cd
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _MTK_VCODEC_FW_H_
+#define _MTK_VCODEC_FW_H_
+
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+
+#include "../mtk-vpu/mtk_vpu.h"
+
+struct mtk_vcodec_dev;
+
+enum mtk_vcodec_fw_type {
+ VPU,
+ SCP,
+};
+
+struct mtk_vcodec_fw;
+
+typedef void (*mtk_vcodec_ipi_handler) (void *data,
+ unsigned int len, void *priv);
+
+struct mtk_vcodec_fw *mtk_vcodec_fw_select(struct mtk_vcodec_dev *dev,
+ enum mtk_vcodec_fw_type type,
+ enum rst_id rst_id);
+void mtk_vcodec_fw_release(struct mtk_vcodec_fw *fw);
+
+int mtk_vcodec_fw_load_firmware(struct mtk_vcodec_fw *fw);
+unsigned int mtk_vcodec_fw_get_vdec_capa(struct mtk_vcodec_fw *fw);
+unsigned int mtk_vcodec_fw_get_venc_capa(struct mtk_vcodec_fw *fw);
+void *mtk_vcodec_fw_map_dm_addr(struct mtk_vcodec_fw *fw, u32 mem_addr);
+int mtk_vcodec_fw_ipi_register(struct mtk_vcodec_fw *fw, int id,
+ mtk_vcodec_ipi_handler handler,
+ const char *name, void *priv);
+int mtk_vcodec_fw_ipi_send(struct mtk_vcodec_fw *fw, int id,
+ void *buf, unsigned int len, unsigned int wait);
+
+#endif /* _MTK_VCODEC_FW_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
index d48f542db1a9..ac5973b6735f 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -9,7 +9,6 @@
#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_util.h"
-#include "mtk_vpu.h"
/* For encoder, this will enable logs in venc/*/
bool mtk_vcodec_dbg;
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index 50048c170b99..40d6e6c5ac7a 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -281,7 +281,6 @@ static int vdec_h264_init(struct mtk_vcodec_ctx *ctx)
inst->ctx = ctx;
inst->vpu.id = IPI_VDEC_H264;
- inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
err = vpu_dec_init(&inst->vpu);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index 6011fdd60a22..e5393f841080 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -400,7 +400,6 @@ static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx)
inst->ctx = ctx;
inst->vpu.id = IPI_VDEC_VP8;
- inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
err = vpu_dec_init(&inst->vpu);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index 257a5b5ad212..5ea153a68522 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -795,7 +795,6 @@ static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx)
inst->ctx = ctx;
inst->vpu.id = IPI_VDEC_VP9;
- inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
if (vpu_dec_init(&inst->vpu)) {
@@ -960,7 +959,7 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
goto DECODE_ERROR;
}
- if (vp9_decode_end_proc(inst) != true) {
+ if (!vp9_decode_end_proc(inst)) {
mtk_vcodec_err(inst, "vp9_decode_end_proc");
ret = -EINVAL;
goto DECODE_ERROR;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_base.h b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
index ceb4db4cb3be..e913f963b7db 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
@@ -7,8 +7,6 @@
#ifndef _VDEC_DRV_BASE_
#define _VDEC_DRV_BASE_
-#include "mtk_vcodec_drv.h"
-
#include "vdec_drv_if.h"
struct vdec_common_if {
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
index 2e43dd4486e0..b18743b906ea 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
@@ -13,7 +13,6 @@
#include "mtk_vcodec_dec.h"
#include "vdec_drv_base.h"
#include "mtk_vcodec_dec_pm.h"
-#include "mtk_vpu.h"
int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
{
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
index 948a12fd9d46..58b0e6fa8fd2 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -8,6 +8,7 @@
#include "mtk_vcodec_util.h"
#include "vdec_ipi_msg.h"
#include "vdec_vpu_if.h"
+#include "mtk_vcodec_fw.h"
static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg)
{
@@ -18,7 +19,8 @@ static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg)
/* mapping VPU address to kernel virtual address */
/* the content in vsi is initialized to 0 in VPU */
- vpu->vsi = vpu_mapping_dm_addr(vpu->dev, msg->vpu_inst_addr);
+ vpu->vsi = mtk_vcodec_fw_map_dm_addr(vpu->ctx->dev->fw_handler,
+ msg->vpu_inst_addr);
vpu->inst_addr = msg->vpu_inst_addr;
mtk_vcodec_debug(vpu, "- vpu_inst_addr = 0x%x", vpu->inst_addr);
@@ -34,7 +36,7 @@ static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg)
* This function runs in interrupt context and it means there's an IPI MSG
* from VPU.
*/
-static void vpu_dec_ipi_handler(const void *data, unsigned int len, void *priv)
+static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
{
const struct vdec_vpu_ipi_ack *msg = data;
struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
@@ -74,7 +76,8 @@ static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
vpu->failure = 0;
vpu->signaled = 0;
- err = vpu_ipi_send(vpu->dev, vpu->id, msg, len);
+ err = mtk_vcodec_fw_ipi_send(vpu->ctx->dev->fw_handler, vpu->id, msg,
+ len, 2000);
if (err) {
mtk_vcodec_err(vpu, "send fail vpu_id=%d msg_id=%X status=%d",
vpu->id, *(uint32_t *)msg, err);
@@ -110,7 +113,8 @@ int vpu_dec_init(struct vdec_vpu_inst *vpu)
init_waitqueue_head(&vpu->wq);
vpu->handler = vpu_dec_ipi_handler;
- err = vpu_ipi_register(vpu->dev, vpu->id, vpu->handler, "vdec", NULL);
+ err = mtk_vcodec_fw_ipi_register(vpu->ctx->dev->fw_handler, vpu->id,
+ vpu->handler, "vdec", NULL);
if (err != 0) {
mtk_vcodec_err(vpu, "vpu_ipi_register fail status=%d", err);
return err;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
index f779b0676fbd..85224eb7e34b 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -7,11 +7,13 @@
#ifndef _VDEC_VPU_IF_H_
#define _VDEC_VPU_IF_H_
-#include "mtk_vpu.h"
+#include "mtk_vcodec_fw.h"
+
+struct mtk_vcodec_ctx;
/**
* struct vdec_vpu_inst - VPU instance for video codec
- * @ipi_id : ipi id for each decoder
+ * @id : ipi msg id for each decoder
* @vsi : driver structure allocated by VPU side and shared to AP side
* for control and info share
* @failure : VPU execution result status, 0: success, others: fail
@@ -23,15 +25,14 @@
* @handler : ipi handler for each decoder
*/
struct vdec_vpu_inst {
- enum ipi_id id;
+ int id;
void *vsi;
int32_t failure;
uint32_t inst_addr;
unsigned int signaled;
struct mtk_vcodec_ctx *ctx;
- struct platform_device *dev;
wait_queue_head_t wq;
- ipi_handler_t handler;
+ mtk_vcodec_ipi_handler handler;
};
/**
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index b9624f8df0e9..d0123dfc5f93 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -18,7 +18,6 @@
#include "../venc_drv_base.h"
#include "../venc_ipi_msg.h"
#include "../venc_vpu_if.h"
-#include "mtk_vpu.h"
static const char h264_filler_marker[] = {0x0, 0x0, 0x0, 0x1, 0xc};
@@ -26,6 +25,16 @@ static const char h264_filler_marker[] = {0x0, 0x0, 0x0, 0x1, 0xc};
#define VENC_PIC_BITSTREAM_BYTE_CNT 0x0098
/*
+ * enum venc_h264_frame_type - h264 encoder output bitstream frame type
+ */
+enum venc_h264_frame_type {
+ VENC_H264_IDR_FRM,
+ VENC_H264_I_FRM,
+ VENC_H264_P_FRM,
+ VENC_H264_B_FRM,
+};
+
+/*
* enum venc_h264_vpu_work_buf - h264 encoder buffer index
*/
enum venc_h264_vpu_work_buf {
@@ -139,6 +148,7 @@ struct venc_h264_inst {
struct mtk_vcodec_mem pps_buf;
bool work_buf_allocated;
unsigned int frm_cnt;
+ unsigned int skip_frm_cnt;
unsigned int prepend_hdr;
struct venc_vpu_inst vpu_inst;
struct venc_h264_vsi *vsi;
@@ -257,8 +267,11 @@ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst)
*/
inst->work_bufs[i].size = wb[i].size;
if (i == VENC_H264_VPU_WORK_BUF_SKIP_FRAME) {
- inst->work_bufs[i].va = vpu_mapping_dm_addr(
- inst->vpu_inst.dev, wb[i].vpua);
+ struct mtk_vcodec_fw *handler;
+
+ handler = inst->vpu_inst.ctx->dev->fw_handler;
+ inst->work_bufs[i].va =
+ mtk_vcodec_fw_map_dm_addr(handler, wb[i].vpua);
inst->work_bufs[i].dma_addr = 0;
} else {
ret = mtk_vcodec_mem_alloc(inst->ctx,
@@ -275,10 +288,12 @@ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst)
* setting in VPU side.
*/
if (i == VENC_H264_VPU_WORK_BUF_RC_CODE) {
+ struct mtk_vcodec_fw *handler;
void *tmp_va;
- tmp_va = vpu_mapping_dm_addr(inst->vpu_inst.dev,
- wb[i].vpua);
+ handler = inst->vpu_inst.ctx->dev->fw_handler;
+ tmp_va = mtk_vcodec_fw_map_dm_addr(handler,
+ wb[i].vpua);
memcpy(inst->work_bufs[i].va, tmp_va,
wb[i].size);
}
@@ -323,6 +338,22 @@ static unsigned int h264_enc_wait_venc_done(struct venc_h264_inst *inst)
return irq_status;
}
+static int h264_frame_type(struct venc_h264_inst *inst)
+{
+ if ((inst->vsi->config.gop_size != 0 &&
+ (inst->frm_cnt % inst->vsi->config.gop_size) == 0) ||
+ (inst->frm_cnt == 0 && inst->vsi->config.gop_size == 0)) {
+ /* IDR frame */
+ return VENC_H264_IDR_FRM;
+ } else if ((inst->vsi->config.intra_period != 0 &&
+ (inst->frm_cnt % inst->vsi->config.intra_period) == 0) ||
+ (inst->frm_cnt == 0 && inst->vsi->config.intra_period == 0)) {
+ /* I frame */
+ return VENC_H264_I_FRM;
+ } else {
+ return VENC_H264_P_FRM; /* Note: B frames are not supported */
+ }
+}
static int h264_encode_sps(struct venc_h264_inst *inst,
struct mtk_vcodec_mem *bs_buf,
unsigned int *bs_size)
@@ -333,7 +364,7 @@ static int h264_encode_sps(struct venc_h264_inst *inst,
mtk_vcodec_debug_enter(inst);
ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_SPS, NULL,
- bs_buf, bs_size);
+ bs_buf, bs_size, NULL);
if (ret)
return ret;
@@ -360,7 +391,7 @@ static int h264_encode_pps(struct venc_h264_inst *inst,
mtk_vcodec_debug_enter(inst);
ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_PPS, NULL,
- bs_buf, bs_size);
+ bs_buf, bs_size, NULL);
if (ret)
return ret;
@@ -406,11 +437,18 @@ static int h264_encode_frame(struct venc_h264_inst *inst,
{
int ret = 0;
unsigned int irq_status;
+ struct venc_frame_info frame_info;
mtk_vcodec_debug_enter(inst);
-
+ mtk_vcodec_debug(inst, "frm_cnt = %d\n ", inst->frm_cnt);
+ frame_info.frm_count = inst->frm_cnt;
+ frame_info.skip_frm_count = inst->skip_frm_cnt;
+ frame_info.frm_type = h264_frame_type(inst);
+ mtk_vcodec_debug(inst, "frm_count = %d,skip_frm_count =%d,frm_type=%d.\n",
+ frame_info.frm_count, frame_info.skip_frm_count,
+ frame_info.frm_type);
ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_FRAME, frm_buf,
- bs_buf, bs_size);
+ bs_buf, bs_size, &frame_info);
if (ret)
return ret;
@@ -424,6 +462,7 @@ static int h264_encode_frame(struct venc_h264_inst *inst,
inst->work_bufs[VENC_H264_VPU_WORK_BUF_SKIP_FRAME].va,
*bs_size);
++inst->frm_cnt;
+ ++inst->skip_frm_cnt;
return ret;
}
@@ -460,6 +499,7 @@ static void h264_encode_filler(struct venc_h264_inst *inst, void *buf,
static int h264_enc_init(struct mtk_vcodec_ctx *ctx)
{
+ const bool is_ext = MTK_ENC_CTX_IS_EXT(ctx);
int ret = 0;
struct venc_h264_inst *inst;
@@ -469,8 +509,7 @@ static int h264_enc_init(struct mtk_vcodec_ctx *ctx)
inst->ctx = ctx;
inst->vpu_inst.ctx = ctx;
- inst->vpu_inst.dev = ctx->dev->vpu_plat_dev;
- inst->vpu_inst.id = IPI_VENC_H264;
+ inst->vpu_inst.id = is_ext ? SCP_IPI_VENC_H264 : IPI_VENC_H264;
inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx, VENC_SYS);
mtk_vcodec_debug_enter(inst);
@@ -626,7 +665,12 @@ static int h264_enc_set_param(void *handle,
inst->prepend_hdr = 1;
mtk_vcodec_debug(inst, "set prepend header mode");
break;
-
+ case VENC_SET_PARAM_FORCE_INTRA:
+ case VENC_SET_PARAM_GOP_SIZE:
+ case VENC_SET_PARAM_INTRA_PERIOD:
+ inst->frm_cnt = 0;
+ inst->skip_frm_cnt = 0;
+ fallthrough;
default:
ret = vpu_enc_set_param(&inst->vpu_inst, type, enc_prm);
break;
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index 8d36f0362efe..11abb191ada5 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -17,7 +17,6 @@
#include "../venc_drv_base.h"
#include "../venc_ipi_msg.h"
#include "../venc_vpu_if.h"
-#include "mtk_vpu.h"
#define VENC_BITSTREAM_FRAME_SIZE 0x0098
#define VENC_BITSTREAM_HEADER_LEN 0x00e8
@@ -190,10 +189,12 @@ static int vp8_enc_alloc_work_buf(struct venc_vp8_inst *inst)
if (i == VENC_VP8_VPU_WORK_BUF_RC_CODE ||
i == VENC_VP8_VPU_WORK_BUF_RC_CODE2 ||
i == VENC_VP8_VPU_WORK_BUF_RC_CODE3) {
+ struct mtk_vcodec_fw *handler;
void *tmp_va;
- tmp_va = vpu_mapping_dm_addr(inst->vpu_inst.dev,
- wb[i].vpua);
+ handler = inst->vpu_inst.ctx->dev->fw_handler;
+ tmp_va = mtk_vcodec_fw_map_dm_addr(handler,
+ wb[i].vpua);
memcpy(inst->work_bufs[i].va, tmp_va, wb[i].size);
}
wb[i].iova = inst->work_bufs[i].dma_addr;
@@ -301,7 +302,8 @@ static int vp8_enc_encode_frame(struct venc_vp8_inst *inst,
mtk_vcodec_debug(inst, "->frm_cnt=%d", inst->frm_cnt);
- ret = vpu_enc_encode(&inst->vpu_inst, 0, frm_buf, bs_buf, bs_size);
+ ret = vpu_enc_encode(&inst->vpu_inst, 0, frm_buf, bs_buf, bs_size,
+ NULL);
if (ret)
return ret;
@@ -334,7 +336,6 @@ static int vp8_enc_init(struct mtk_vcodec_ctx *ctx)
inst->ctx = ctx;
inst->vpu_inst.ctx = ctx;
- inst->vpu_inst.dev = ctx->dev->vpu_plat_dev;
inst->vpu_inst.id = IPI_VENC_VP8;
inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx, VENC_LT_SYS);
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.c b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
index c6bb82ac2dcd..ce0bce811615 100644
--- a/drivers/media/platform/mtk-vcodec/venc_drv_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
@@ -15,7 +15,6 @@
#include "mtk_vcodec_enc.h"
#include "mtk_vcodec_enc_pm.h"
-#include "mtk_vpu.h"
int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
{
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.h b/drivers/media/platform/mtk-vcodec/venc_drv_if.h
index 52fc9cc812fc..0b04a1020873 100644
--- a/drivers/media/platform/mtk-vcodec/venc_drv_if.h
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_if.h
@@ -92,6 +92,19 @@ struct venc_enc_param {
unsigned int gop_size;
};
+/**
+ * struct venc_frame_info - per-frame information to pass to the firmware.
+ *
+ * @frm_count: sequential number for this frame
+ * @skip_frm_count: number of frames skipped so far while decoding
+ * @frm_type: type of the frame, from enum venc_h264_frame_type
+ */
+struct venc_frame_info {
+ unsigned int frm_count; /* per frame update */
+ unsigned int skip_frm_count; /* per frame update */
+ unsigned int frm_type; /* per frame update */
+};
+
/*
* struct venc_frm_buf - frame buffer information used in venc_if_encode()
* @fb_addr: plane frame buffer addresses
diff --git a/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h b/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
index 28ee04ca6241..2feb0365179f 100644
--- a/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
+++ b/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
@@ -62,6 +62,11 @@ struct venc_ap_ipi_msg_set_param {
uint32_t data[8];
};
+struct venc_ap_ipi_msg_set_param_ext {
+ struct venc_ap_ipi_msg_set_param base;
+ uint32_t data_ext[24];
+};
+
/**
* struct venc_ap_ipi_msg_enc - AP to VPU enc cmd structure
* @msg_id: message id (AP_IPIMSG_XXX_ENC_ENCODE)
@@ -83,6 +88,19 @@ struct venc_ap_ipi_msg_enc {
};
/**
+ * struct venc_ap_ipi_msg_enc_ext - AP to SCP extended enc cmd structure
+ *
+ * @base: base msg structure
+ * @data_item: number of items in the data array
+ * @data[8]: data array to store the set parameters
+ */
+struct venc_ap_ipi_msg_enc_ext {
+ struct venc_ap_ipi_msg_enc base;
+ uint32_t data_item;
+ uint32_t data[32];
+};
+
+/**
* struct venc_ap_ipi_msg_deinit - AP to VPU deinit cmd structure
* @msg_id: message id (AP_IPIMSG_XXX_ENC_DEINIT)
* @vpu_inst_addr: VPU encoder instance addr
@@ -120,16 +138,17 @@ struct venc_vpu_ipi_msg_common {
* @venc_inst: AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
* @vpu_inst_addr: VPU encoder instance addr
* (struct venc_vp8_vsi/venc_h264_vsi *)
- * @reserved: reserved for future use. vpu is running in 32bit. Without
- * this reserved field, if kernel run in 64bit. this struct size
- * will be different between kernel and vpu
+ * @venc_abi_version: ABI version of the firmware. Kernel can use it to
+ * ensure that it is compatible with the firmware.
+ * For MT8173 the value of this field is undefined and
+ * should not be used.
*/
struct venc_vpu_ipi_msg_init {
uint32_t msg_id;
uint32_t status;
uint64_t venc_inst;
uint32_t vpu_inst_addr;
- uint32_t reserved;
+ uint32_t venc_abi_version;
};
/**
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
index 9540709c1905..be6d8790a41e 100644
--- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
@@ -4,7 +4,8 @@
* Author: PoChun Lin <pochun.lin@mediatek.com>
*/
-#include "mtk_vpu.h"
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_fw.h"
#include "venc_ipi_msg.h"
#include "venc_vpu_if.h"
@@ -13,7 +14,25 @@ static void handle_enc_init_msg(struct venc_vpu_inst *vpu, const void *data)
const struct venc_vpu_ipi_msg_init *msg = data;
vpu->inst_addr = msg->vpu_inst_addr;
- vpu->vsi = vpu_mapping_dm_addr(vpu->dev, msg->vpu_inst_addr);
+ vpu->vsi = mtk_vcodec_fw_map_dm_addr(vpu->ctx->dev->fw_handler,
+ msg->vpu_inst_addr);
+
+ /* Firmware version field value is unspecified on MT8173. */
+ if (vpu->ctx->dev->venc_pdata->chip == MTK_MT8173)
+ return;
+
+ /* Check firmware version. */
+ mtk_vcodec_debug(vpu, "firmware version: 0x%x\n",
+ msg->venc_abi_version);
+ switch (msg->venc_abi_version) {
+ case 1:
+ break;
+ default:
+ mtk_vcodec_err(vpu, "unhandled firmware version 0x%x\n",
+ msg->venc_abi_version);
+ vpu->failure = 1;
+ break;
+ }
}
static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, const void *data)
@@ -25,7 +44,7 @@ static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, const void *data)
vpu->is_key_frm = msg->is_key_frm;
}
-static void vpu_enc_ipi_handler(const void *data, unsigned int len, void *priv)
+static void vpu_enc_ipi_handler(void *data, unsigned int len, void *priv)
{
const struct venc_vpu_ipi_msg_common *msg = data;
struct venc_vpu_inst *vpu =
@@ -34,6 +53,11 @@ static void vpu_enc_ipi_handler(const void *data, unsigned int len, void *priv)
mtk_vcodec_debug(vpu, "msg_id %x inst %p status %d",
msg->msg_id, vpu, msg->status);
+ vpu->signaled = 1;
+ vpu->failure = (msg->status != VENC_IPI_MSG_STATUS_OK);
+ if (vpu->failure)
+ goto failure;
+
switch (msg->msg_id) {
case VPU_IPIMSG_ENC_INIT_DONE:
handle_enc_init_msg(vpu, data);
@@ -50,9 +74,7 @@ static void vpu_enc_ipi_handler(const void *data, unsigned int len, void *priv)
break;
}
- vpu->signaled = 1;
- vpu->failure = (msg->status != VENC_IPI_MSG_STATUS_OK);
-
+failure:
mtk_vcodec_debug_leave(vpu);
}
@@ -63,12 +85,13 @@ static int vpu_enc_send_msg(struct venc_vpu_inst *vpu, void *msg,
mtk_vcodec_debug_enter(vpu);
- if (!vpu->dev) {
+ if (!vpu->ctx->dev->fw_handler) {
mtk_vcodec_err(vpu, "inst dev is NULL");
return -EINVAL;
}
- status = vpu_ipi_send(vpu->dev, vpu->id, msg, len);
+ status = mtk_vcodec_fw_ipi_send(vpu->ctx->dev->fw_handler, vpu->id, msg,
+ len, 2000);
if (status) {
mtk_vcodec_err(vpu, "vpu_ipi_send msg_id %x len %d fail %d",
*(uint32_t *)msg, len, status);
@@ -93,8 +116,9 @@ int vpu_enc_init(struct venc_vpu_inst *vpu)
vpu->signaled = 0;
vpu->failure = 0;
- status = vpu_ipi_register(vpu->dev, vpu->id, vpu_enc_ipi_handler,
- NULL, NULL);
+ status = mtk_vcodec_fw_ipi_register(vpu->ctx->dev->fw_handler, vpu->id,
+ vpu_enc_ipi_handler, "venc", NULL);
+
if (status) {
mtk_vcodec_err(vpu, "vpu_ipi_register fail %d", status);
return -EINVAL;
@@ -113,49 +137,81 @@ int vpu_enc_init(struct venc_vpu_inst *vpu)
return 0;
}
+static unsigned int venc_enc_param_crop_right(struct venc_vpu_inst *vpu,
+ struct venc_enc_param *enc_prm)
+{
+ unsigned int img_crop_right = enc_prm->buf_width - enc_prm->width;
+
+ return img_crop_right % 16;
+}
+
+static unsigned int venc_enc_param_crop_bottom(struct venc_enc_param *enc_prm)
+{
+ return round_up(enc_prm->height, 16) - enc_prm->height;
+}
+
+static unsigned int venc_enc_param_num_mb(struct venc_enc_param *enc_prm)
+{
+ return DIV_ROUND_UP(enc_prm->width, 16) *
+ DIV_ROUND_UP(enc_prm->height, 16);
+}
+
int vpu_enc_set_param(struct venc_vpu_inst *vpu,
enum venc_set_param_type id,
struct venc_enc_param *enc_param)
{
- struct venc_ap_ipi_msg_set_param out;
+ const bool is_ext = MTK_ENC_CTX_IS_EXT(vpu->ctx);
+ size_t msg_size = is_ext ?
+ sizeof(struct venc_ap_ipi_msg_set_param_ext) :
+ sizeof(struct venc_ap_ipi_msg_set_param);
+ struct venc_ap_ipi_msg_set_param_ext out;
mtk_vcodec_debug(vpu, "id %d ->", id);
memset(&out, 0, sizeof(out));
- out.msg_id = AP_IPIMSG_ENC_SET_PARAM;
- out.vpu_inst_addr = vpu->inst_addr;
- out.param_id = id;
+ out.base.msg_id = AP_IPIMSG_ENC_SET_PARAM;
+ out.base.vpu_inst_addr = vpu->inst_addr;
+ out.base.param_id = id;
switch (id) {
case VENC_SET_PARAM_ENC:
- out.data_item = 0;
+ if (is_ext) {
+ out.base.data_item = 3;
+ out.base.data[0] =
+ venc_enc_param_crop_right(vpu, enc_param);
+ out.base.data[1] =
+ venc_enc_param_crop_bottom(enc_param);
+ out.base.data[2] = venc_enc_param_num_mb(enc_param);
+ } else {
+ out.base.data_item = 0;
+ }
break;
case VENC_SET_PARAM_FORCE_INTRA:
- out.data_item = 0;
+ out.base.data_item = 0;
break;
case VENC_SET_PARAM_ADJUST_BITRATE:
- out.data_item = 1;
- out.data[0] = enc_param->bitrate;
+ out.base.data_item = 1;
+ out.base.data[0] = enc_param->bitrate;
break;
case VENC_SET_PARAM_ADJUST_FRAMERATE:
- out.data_item = 1;
- out.data[0] = enc_param->frm_rate;
+ out.base.data_item = 1;
+ out.base.data[0] = enc_param->frm_rate;
break;
case VENC_SET_PARAM_GOP_SIZE:
- out.data_item = 1;
- out.data[0] = enc_param->gop_size;
+ out.base.data_item = 1;
+ out.base.data[0] = enc_param->gop_size;
break;
case VENC_SET_PARAM_INTRA_PERIOD:
- out.data_item = 1;
- out.data[0] = enc_param->intra_period;
+ out.base.data_item = 1;
+ out.base.data[0] = enc_param->intra_period;
break;
case VENC_SET_PARAM_SKIP_FRAME:
- out.data_item = 0;
+ out.base.data_item = 0;
break;
default:
mtk_vcodec_err(vpu, "id %d not supported", id);
return -EINVAL;
}
- if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
+ if (vpu_enc_send_msg(vpu, &out, msg_size)) {
mtk_vcodec_err(vpu,
"AP_IPIMSG_ENC_SET_PARAM %d fail", id);
return -EINVAL;
@@ -169,33 +225,44 @@ int vpu_enc_set_param(struct venc_vpu_inst *vpu,
int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
- unsigned int *bs_size)
+ unsigned int *bs_size,
+ struct venc_frame_info *frame_info)
{
- struct venc_ap_ipi_msg_enc out;
+ const bool is_ext = MTK_ENC_CTX_IS_EXT(vpu->ctx);
+ size_t msg_size = is_ext ?
+ sizeof(struct venc_ap_ipi_msg_enc_ext) :
+ sizeof(struct venc_ap_ipi_msg_enc);
+ struct venc_ap_ipi_msg_enc_ext out;
mtk_vcodec_debug(vpu, "bs_mode %d ->", bs_mode);
memset(&out, 0, sizeof(out));
- out.msg_id = AP_IPIMSG_ENC_ENCODE;
- out.vpu_inst_addr = vpu->inst_addr;
- out.bs_mode = bs_mode;
+ out.base.msg_id = AP_IPIMSG_ENC_ENCODE;
+ out.base.vpu_inst_addr = vpu->inst_addr;
+ out.base.bs_mode = bs_mode;
if (frm_buf) {
if ((frm_buf->fb_addr[0].dma_addr % 16 == 0) &&
(frm_buf->fb_addr[1].dma_addr % 16 == 0) &&
(frm_buf->fb_addr[2].dma_addr % 16 == 0)) {
- out.input_addr[0] = frm_buf->fb_addr[0].dma_addr;
- out.input_addr[1] = frm_buf->fb_addr[1].dma_addr;
- out.input_addr[2] = frm_buf->fb_addr[2].dma_addr;
+ out.base.input_addr[0] = frm_buf->fb_addr[0].dma_addr;
+ out.base.input_addr[1] = frm_buf->fb_addr[1].dma_addr;
+ out.base.input_addr[2] = frm_buf->fb_addr[2].dma_addr;
} else {
mtk_vcodec_err(vpu, "dma_addr not align to 16");
return -EINVAL;
}
}
if (bs_buf) {
- out.bs_addr = bs_buf->dma_addr;
- out.bs_size = bs_buf->size;
+ out.base.bs_addr = bs_buf->dma_addr;
+ out.base.bs_size = bs_buf->size;
}
- if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
+ if (is_ext && frame_info) {
+ out.data_item = 3;
+ out.data[0] = frame_info->frm_count;
+ out.data[1] = frame_info->skip_frm_count;
+ out.data[2] = frame_info->frm_type;
+ }
+ if (vpu_enc_send_msg(vpu, &out, msg_size)) {
mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_ENCODE %d fail",
bs_mode);
return -EINVAL;
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.h b/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
index ba301a138a5a..f9be9cab7ff7 100644
--- a/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
@@ -7,7 +7,7 @@
#ifndef _VENC_VPU_IF_H_
#define _VENC_VPU_IF_H_
-#include "mtk_vpu.h"
+#include "mtk_vcodec_fw.h"
#include "venc_drv_if.h"
/*
@@ -34,9 +34,8 @@ struct venc_vpu_inst {
int is_key_frm;
unsigned int inst_addr;
void *vsi;
- enum ipi_id id;
+ int id;
struct mtk_vcodec_ctx *ctx;
- struct platform_device *dev;
};
int vpu_enc_init(struct venc_vpu_inst *vpu);
@@ -46,7 +45,8 @@ int vpu_enc_set_param(struct venc_vpu_inst *vpu,
int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
- unsigned int *bs_size);
+ unsigned int *bs_size,
+ struct venc_frame_info *frame_info);
int vpu_enc_deinit(struct venc_vpu_inst *vpu);
#endif
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index d30c08983f56..36cb9b6131f7 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -849,10 +849,6 @@ static int mtk_vpu_probe(struct platform_device *pdev)
#ifdef CONFIG_DEBUG_FS
vpu_debugfs = debugfs_create_file("mtk_vpu", S_IRUGO, NULL, (void *)dev,
&vpu_debug_fops);
- if (!vpu_debugfs) {
- ret = -ENOMEM;
- goto cleanup_ipi;
- }
#endif
/* Set PTCM to 96K and DTCM to 32K */
@@ -910,7 +906,6 @@ remove_debugfs:
of_reserved_mem_device_release(dev);
#ifdef CONFIG_DEBUG_FS
debugfs_remove(vpu_debugfs);
-cleanup_ipi:
#endif
memset(vpu->ipi_desc, 0, sizeof(struct vpu_ipi_desc) * IPI_MAX);
vpu_mutex_destroy:
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index df78df59da45..08a5473b5610 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -852,8 +852,11 @@ static int emmaprp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcdev);
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ if (irq < 0) {
+ ret = irq;
+ goto rel_vdev;
+ }
+
ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
dev_name(&pdev->dev), pcdev);
if (ret)
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index b91e472ee764..b1fc4518e275 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -142,7 +142,7 @@ static struct isp_reg isp_reg_list[] = {
* readback the same register, in this case the revision register.
*
* See this link for reference:
- * http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
+ * https://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
*/
void omap3isp_flush(struct isp_device *isp)
{
@@ -2328,8 +2328,10 @@ static int isp_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
isp->mmio_base[map_idx] =
devm_ioremap_resource(isp->dev, mem);
- if (IS_ERR(isp->mmio_base[map_idx]))
- return PTR_ERR(isp->mmio_base[map_idx]);
+ if (IS_ERR(isp->mmio_base[map_idx])) {
+ ret = PTR_ERR(isp->mmio_base[map_idx]);
+ goto error;
+ }
}
ret = isp_get_clocks(isp);
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 1ac9aef70dff..8811d6dd4ee7 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -703,7 +703,7 @@ isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
* requested.
*/
format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB;
- /* Fall-through */
+ fallthrough;
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
/* Interlaced orders are only supported at the CCDC output. */
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 6dce33f35041..e47520fcb93c 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -605,42 +605,6 @@ static const struct pxa_mbus_pixelfmt *pxa_mbus_get_fmtdesc(
return pxa_mbus_find_fmtdesc(code, mbus_fmt, ARRAY_SIZE(mbus_fmt));
}
-static unsigned int pxa_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
- unsigned int flags)
-{
- unsigned long common_flags;
- bool hsync = true, vsync = true, pclk, data, mode;
- bool mipi_lanes, mipi_clock;
-
- common_flags = cfg->flags & flags;
-
- switch (cfg->type) {
- case V4L2_MBUS_PARALLEL:
- hsync = common_flags & (V4L2_MBUS_HSYNC_ACTIVE_HIGH |
- V4L2_MBUS_HSYNC_ACTIVE_LOW);
- vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH |
- V4L2_MBUS_VSYNC_ACTIVE_LOW);
- /* fall through */
- case V4L2_MBUS_BT656:
- pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING |
- V4L2_MBUS_PCLK_SAMPLE_FALLING);
- data = common_flags & (V4L2_MBUS_DATA_ACTIVE_HIGH |
- V4L2_MBUS_DATA_ACTIVE_LOW);
- mode = common_flags & (V4L2_MBUS_MASTER | V4L2_MBUS_SLAVE);
- return (!hsync || !vsync || !pclk || !data || !mode) ?
- 0 : common_flags;
- case V4L2_MBUS_CSI2_DPHY:
- mipi_lanes = common_flags & V4L2_MBUS_CSI2_LANES;
- mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK |
- V4L2_MBUS_CSI2_CONTINUOUS_CLOCK);
- return (!mipi_lanes || !mipi_clock) ? 0 : common_flags;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
- return 0;
-}
-
/**
* struct pxa_camera_format_xlate - match between host and sensor formats
* @code: code of a sensor provided format
@@ -1186,9 +1150,9 @@ static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev)
clk_disable_unprepare(pcdev->clk);
}
-static void pxa_camera_eof(unsigned long arg)
+static void pxa_camera_eof(struct tasklet_struct *t)
{
- struct pxa_camera_dev *pcdev = (struct pxa_camera_dev *)arg;
+ struct pxa_camera_dev *pcdev = from_tasklet(pcdev, t, task_eof);
unsigned long cifr;
struct pxa_buffer *buf;
@@ -1231,31 +1195,6 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int test_platform_param(struct pxa_camera_dev *pcdev,
- unsigned char buswidth, unsigned long *flags)
-{
- /*
- * Platform specified synchronization and pixel clock polarities are
- * only a recommendation and are only used during probing. The PXA270
- * quick capture interface supports both.
- */
- *flags = (pcdev->platform_flags & PXA_CAMERA_MASTER ?
- V4L2_MBUS_MASTER : V4L2_MBUS_SLAVE) |
- V4L2_MBUS_HSYNC_ACTIVE_HIGH |
- V4L2_MBUS_HSYNC_ACTIVE_LOW |
- V4L2_MBUS_VSYNC_ACTIVE_HIGH |
- V4L2_MBUS_VSYNC_ACTIVE_LOW |
- V4L2_MBUS_DATA_ACTIVE_HIGH |
- V4L2_MBUS_PCLK_SAMPLE_RISING |
- V4L2_MBUS_PCLK_SAMPLE_FALLING;
-
- /* If requested data width is supported by the platform, use it */
- if ((1 << (buswidth - 1)) & pcdev->width_flags)
- return 0;
-
- return -EINVAL;
-}
-
static void pxa_camera_setup_cicr(struct pxa_camera_dev *pcdev,
unsigned long flags, __u32 pixfmt)
{
@@ -1598,99 +1537,78 @@ static int pxa_camera_init_videobuf2(struct pxa_camera_dev *pcdev)
*/
static int pxa_camera_set_bus_param(struct pxa_camera_dev *pcdev)
{
+ unsigned int bus_width = pcdev->current_fmt->host_fmt->bits_per_sample;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
u32 pixfmt = pcdev->current_fmt->host_fmt->fourcc;
- unsigned long bus_flags, common_flags;
+ int mbus_config;
int ret;
- ret = test_platform_param(pcdev,
- pcdev->current_fmt->host_fmt->bits_per_sample,
- &bus_flags);
- if (ret < 0)
- return ret;
-
- ret = sensor_call(pcdev, video, g_mbus_config, &cfg);
- if (!ret) {
- common_flags = pxa_mbus_config_compatible(&cfg,
- bus_flags);
- if (!common_flags) {
- dev_warn(pcdev_to_dev(pcdev),
- "Flags incompatible: camera 0x%x, host 0x%lx\n",
- cfg.flags, bus_flags);
- return -EINVAL;
- }
- } else if (ret != -ENOIOCTLCMD) {
- return ret;
- } else {
- common_flags = bus_flags;
+ if (!((1 << (bus_width - 1)) & pcdev->width_flags)) {
+ dev_err(pcdev_to_dev(pcdev), "Unsupported bus width %u",
+ bus_width);
+ return -EINVAL;
}
pcdev->channels = 1;
/* Make choices, based on platform preferences */
- if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
- (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
- if (pcdev->platform_flags & PXA_CAMERA_HSP)
- common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
- else
- common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
- }
+ mbus_config = 0;
+ if (pcdev->platform_flags & PXA_CAMERA_MASTER)
+ mbus_config |= V4L2_MBUS_MASTER;
+ else
+ mbus_config |= V4L2_MBUS_SLAVE;
- if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
- (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
- if (pcdev->platform_flags & PXA_CAMERA_VSP)
- common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
- else
- common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
- }
+ if (pcdev->platform_flags & PXA_CAMERA_HSP)
+ mbus_config |= V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+ else
+ mbus_config |= V4L2_MBUS_HSYNC_ACTIVE_LOW;
- if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
- (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
- if (pcdev->platform_flags & PXA_CAMERA_PCP)
- common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
- else
- common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
- }
+ if (pcdev->platform_flags & PXA_CAMERA_VSP)
+ mbus_config |= V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+ else
+ mbus_config |= V4L2_MBUS_VSYNC_ACTIVE_LOW;
- cfg.flags = common_flags;
- ret = sensor_call(pcdev, video, s_mbus_config, &cfg);
+ if (pcdev->platform_flags & PXA_CAMERA_PCP)
+ mbus_config |= V4L2_MBUS_PCLK_SAMPLE_RISING;
+ else
+ mbus_config |= V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ mbus_config |= V4L2_MBUS_DATA_ACTIVE_HIGH;
+
+ cfg.flags = mbus_config;
+ ret = sensor_call(pcdev, pad, set_mbus_config, 0, &cfg);
if (ret < 0 && ret != -ENOIOCTLCMD) {
- dev_dbg(pcdev_to_dev(pcdev),
- "camera s_mbus_config(0x%lx) returned %d\n",
- common_flags, ret);
+ dev_err(pcdev_to_dev(pcdev),
+ "Failed to call set_mbus_config: %d\n", ret);
return ret;
}
- pxa_camera_setup_cicr(pcdev, common_flags, pixfmt);
-
- return 0;
-}
-
-static int pxa_camera_try_bus_param(struct pxa_camera_dev *pcdev,
- unsigned char buswidth)
-{
- struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
- unsigned long bus_flags, common_flags;
- int ret = test_platform_param(pcdev, buswidth, &bus_flags);
-
- if (ret < 0)
- return ret;
+ /*
+ * If the requested media bus configuration has not been fully applied
+ * make sure it is supported by the platform.
+ *
+ * PXA does not support V4L2_MBUS_DATA_ACTIVE_LOW and the bus mastering
+ * roles should match.
+ */
+ if (cfg.flags != mbus_config) {
+ unsigned int pxa_mbus_role = mbus_config & (V4L2_MBUS_MASTER |
+ V4L2_MBUS_SLAVE);
+ if (pxa_mbus_role != (cfg.flags & (V4L2_MBUS_MASTER |
+ V4L2_MBUS_SLAVE))) {
+ dev_err(pcdev_to_dev(pcdev),
+ "Unsupported mbus configuration: bus mastering\n");
+ return -EINVAL;
+ }
- ret = sensor_call(pcdev, video, g_mbus_config, &cfg);
- if (!ret) {
- common_flags = pxa_mbus_config_compatible(&cfg,
- bus_flags);
- if (!common_flags) {
- dev_warn(pcdev_to_dev(pcdev),
- "Flags incompatible: camera 0x%x, host 0x%lx\n",
- cfg.flags, bus_flags);
+ if (cfg.flags & V4L2_MBUS_DATA_ACTIVE_LOW) {
+ dev_err(pcdev_to_dev(pcdev),
+ "Unsupported mbus configuration: DATA_ACTIVE_LOW\n");
return -EINVAL;
}
- } else if (ret == -ENOIOCTLCMD) {
- ret = 0;
}
- return ret;
+ pxa_camera_setup_cicr(pcdev, cfg.flags, pixfmt);
+
+ return 0;
}
static const struct pxa_mbus_pixelfmt pxa_camera_formats[] = {
@@ -1738,11 +1656,6 @@ static int pxa_camera_get_formats(struct v4l2_device *v4l2_dev,
return 0;
}
- /* This also checks support for the requested bits-per-sample */
- ret = pxa_camera_try_bus_param(pcdev, fmt->bits_per_sample);
- if (ret < 0)
- return 0;
-
switch (code.code) {
case MEDIA_BUS_FMT_UYVY8_2X8:
formats++;
@@ -2478,7 +2391,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
goto exit_free_dma;
}
- tasklet_init(&pcdev->task_eof, pxa_camera_eof, (unsigned long)pcdev);
+ tasklet_setup(&pcdev->task_eof, pxa_camera_eof);
pxa_camera_activate(pcdev);
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 03ef9c5f4774..85b24054f35e 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_sync(dev);
return ret;
+ }
ret = csiphy_set_clock_rates(csiphy);
if (ret < 0) {
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index fc31c2c169cd..b7d2293a5004 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -2205,14 +2205,6 @@ static const struct camss_video_ops camss_vfe_video_ops = {
.flush_buffers = vfe_flush_buffers,
};
-void msm_vfe_stop_streaming(struct vfe_device *vfe)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vfe->line); i++)
- msm_video_stop_streaming(&vfe->line[i].video_out);
-}
-
/*
* msm_vfe_register_entities - Register subdev node for VFE module
* @vfe: VFE device
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index 0d10071ae881..a90b0d2cc6de 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -178,8 +178,6 @@ void msm_vfe_unregister_entities(struct vfe_device *vfe);
void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id);
void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id);
-void msm_vfe_stop_streaming(struct vfe_device *vfe);
-
extern const struct vfe_hw_ops vfe_ops_4_1;
extern const struct vfe_hw_ops vfe_ops_4_7;
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index cdbd6dba1122..114c3ae4a4ab 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -18,6 +18,12 @@
#include "camss-video.h"
#include "camss.h"
+#define CAMSS_FRAME_MIN_WIDTH 1
+#define CAMSS_FRAME_MAX_WIDTH 8191
+#define CAMSS_FRAME_MIN_HEIGHT 1
+#define CAMSS_FRAME_MAX_HEIGHT_RDI 8191
+#define CAMSS_FRAME_MAX_HEIGHT_PIX 4096
+
struct fract {
u8 numerator;
u8 denominator;
@@ -529,17 +535,16 @@ static int video_querycap(struct file *file, void *fh,
return 0;
}
-static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+/*
+ * Returns the index in the video->formats[] array of the element which
+ * has the "ndx"th unique value of pixelformat field.
+ * If not found (no more unique pixelformat's) returns -EINVAL.
+ */
+static int video_get_unique_pixelformat_by_index(struct camss_video *video,
+ int ndx)
{
- struct camss_video *video = video_drvdata(file);
int i, j, k;
- if (f->type != video->type)
- return -EINVAL;
-
- if (f->index >= video->nformats)
- return -EINVAL;
-
/* find index "i" of "k"th unique pixelformat in formats array */
k = -1;
for (i = 0; i < video->nformats; i++) {
@@ -552,11 +557,53 @@ static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
if (j == i)
k++;
- if (k == f->index)
- break;
+ if (k == ndx)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Returns the index in the video->formats[] array of the element which
+ * has code equal to mcode.
+ * If not found returns -EINVAL.
+ */
+static int video_get_pixelformat_by_mbus_code(struct camss_video *video,
+ u32 mcode)
+{
+ int i;
+
+ for (i = 0; i < video->nformats; i++) {
+ if (video->formats[i].code == mcode)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct camss_video *video = video_drvdata(file);
+ int i;
+
+ if (f->type != video->type)
+ return -EINVAL;
+
+ if (f->index >= video->nformats)
+ return -EINVAL;
+
+ if (f->mbus_code) {
+ /* Each entry in formats[] table has unique mbus_code */
+ if (f->index > 0)
+ return -EINVAL;
+
+ i = video_get_pixelformat_by_mbus_code(video, f->mbus_code);
+ } else {
+ i = video_get_unique_pixelformat_by_index(video, f->index);
}
- if (k < f->index)
+ if (i < 0)
return -EINVAL;
f->pixelformat = video->formats[i].pixelformat;
@@ -564,6 +611,36 @@ static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
return 0;
}
+static int video_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct camss_video *video = video_drvdata(file);
+ int i;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ /* Only accept pixel format present in the formats[] table */
+ for (i = 0; i < video->nformats; i++) {
+ if (video->formats[i].pixelformat == fsize->pixel_format)
+ break;
+ }
+
+ if (i == video->nformats)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = CAMSS_FRAME_MIN_WIDTH;
+ fsize->stepwise.max_width = CAMSS_FRAME_MAX_WIDTH;
+ fsize->stepwise.min_height = CAMSS_FRAME_MIN_HEIGHT;
+ fsize->stepwise.max_height = (video->line_based) ?
+ CAMSS_FRAME_MAX_HEIGHT_PIX : CAMSS_FRAME_MAX_HEIGHT_RDI;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
static int video_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct camss_video *video = video_drvdata(file);
@@ -593,7 +670,7 @@ static int __video_try_fmt(struct camss_video *video, struct v4l2_format *f)
1, 65528);
sizeimage[i] = clamp_t(u32, p->sizeimage,
bytesperline[i],
- bytesperline[i] * 4096);
+ bytesperline[i] * CAMSS_FRAME_MAX_HEIGHT_PIX);
}
for (j = 0; j < video->nformats; j++)
@@ -610,8 +687,8 @@ static int __video_try_fmt(struct camss_video *video, struct v4l2_format *f)
memset(pix_mp, 0, sizeof(*pix_mp));
pix_mp->pixelformat = fi->pixelformat;
- pix_mp->width = clamp_t(u32, width, 1, 8191);
- pix_mp->height = clamp_t(u32, height, 1, 8191);
+ pix_mp->width = clamp_t(u32, width, 1, CAMSS_FRAME_MAX_WIDTH);
+ pix_mp->height = clamp_t(u32, height, 1, CAMSS_FRAME_MAX_HEIGHT_RDI);
pix_mp->num_planes = fi->planes;
for (i = 0; i < pix_mp->num_planes; i++) {
bpl = pix_mp->width / fi->hsub[i].numerator *
@@ -637,7 +714,7 @@ static int __video_try_fmt(struct camss_video *video, struct v4l2_format *f)
1, 65528);
p->sizeimage = clamp_t(u32, p->sizeimage,
p->bytesperline,
- p->bytesperline * 4096);
+ p->bytesperline * CAMSS_FRAME_MAX_HEIGHT_PIX);
lines = p->sizeimage / p->bytesperline;
if (p->bytesperline < bytesperline[i])
@@ -704,6 +781,7 @@ static int video_s_input(struct file *file, void *fh, unsigned int input)
static const struct v4l2_ioctl_ops msm_vid_ioctl_ops = {
.vidioc_querycap = video_querycap,
.vidioc_enum_fmt_vid_cap = video_enum_fmt,
+ .vidioc_enum_framesizes = video_enum_framesizes,
.vidioc_g_fmt_vid_cap_mplane = video_g_fmt,
.vidioc_s_fmt_vid_cap_mplane = video_s_fmt,
.vidioc_try_fmt_vid_cap_mplane = video_try_fmt,
@@ -879,7 +957,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
if (ret < 0) {
dev_err(v4l2_dev->dev, "Failed to init video entity: %d\n",
ret);
- goto error_media_init;
+ goto error_vb2_init;
}
mutex_init(&video->lock);
@@ -911,8 +989,8 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
}
vdev->fops = &msm_vid_fops;
- vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING
+ | V4L2_CAP_READWRITE | V4L2_CAP_IO_MC;
vdev->ioctl_ops = &msm_vid_ioctl_ops;
vdev->release = msm_video_release;
vdev->v4l2_dev = v4l2_dev;
@@ -936,23 +1014,15 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
error_video_register:
media_entity_cleanup(&vdev->entity);
mutex_destroy(&video->lock);
-error_media_init:
- vb2_queue_release(&video->vb2_q);
error_vb2_init:
mutex_destroy(&video->q_lock);
return ret;
}
-void msm_video_stop_streaming(struct camss_video *video)
-{
- if (vb2_is_streaming(&video->vb2_q))
- vb2_queue_release(&video->vb2_q);
-}
-
void msm_video_unregister(struct camss_video *video)
{
atomic_inc(&video->camss->ref_count);
- video_unregister_device(&video->vdev);
+ vb2_video_unregister_device(&video->vdev);
atomic_dec(&video->camss->ref_count);
}
diff --git a/drivers/media/platform/qcom/camss/camss-video.h b/drivers/media/platform/qcom/camss/camss-video.h
index aa35e8cc6fd5..bdbae8424140 100644
--- a/drivers/media/platform/qcom/camss/camss-video.h
+++ b/drivers/media/platform/qcom/camss/camss-video.h
@@ -52,8 +52,6 @@ struct camss_video {
unsigned int nformats;
};
-void msm_video_stop_streaming(struct camss_video *video);
-
int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
const char *name, int is_pix);
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 2483641799df..9186881afc98 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -974,13 +974,8 @@ void camss_delete(struct camss *camss)
*/
static int camss_remove(struct platform_device *pdev)
{
- unsigned int i;
-
struct camss *camss = platform_get_drvdata(pdev);
- for (i = 0; i < camss->vfe_num; i++)
- msm_vfe_stop_streaming(&camss->vfe[i]);
-
v4l2_async_notifier_unregister(&camss->notifier);
v4l2_async_notifier_cleanup(&camss->notifier);
camss_unregister_entities(camss);
diff --git a/drivers/media/platform/qcom/venus/Makefile b/drivers/media/platform/qcom/venus/Makefile
index 64af0bc1edae..dfc636865709 100644
--- a/drivers/media/platform/qcom/venus/Makefile
+++ b/drivers/media/platform/qcom/venus/Makefile
@@ -3,7 +3,7 @@
venus-core-objs += core.o helpers.o firmware.o \
hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o \
- hfi_parser.o pm_helpers.o
+ hfi_parser.o pm_helpers.o dbgfs.o
venus-dec-objs += vdec.o vdec_ctrls.o
venus-enc-objs += venc.o venc_ctrls.o
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 203c6538044f..6103aaf43987 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/ioctl.h>
+#include <linux/delay.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -40,13 +41,7 @@ static void venus_event_notify(struct venus_core *core, u32 event)
mutex_unlock(&core->lock);
disable_irq_nosync(core->irq);
-
- /*
- * Delay recovery to ensure venus has completed any pending cache
- * operations. Without this sleep, we see device reset when firmware is
- * unloaded after a system error.
- */
- schedule_delayed_work(&core->work, msecs_to_jiffies(100));
+ schedule_delayed_work(&core->work, msecs_to_jiffies(10));
}
static const struct hfi_core_ops venus_core_ops = {
@@ -59,23 +54,29 @@ static void venus_sys_error_handler(struct work_struct *work)
container_of(work, struct venus_core, work.work);
int ret = 0;
- dev_warn(core->dev, "system error has occurred, starting recovery!\n");
-
pm_runtime_get_sync(core->dev);
hfi_core_deinit(core, true);
- hfi_destroy(core);
+
+ dev_warn(core->dev, "system error has occurred, starting recovery!\n");
+
mutex_lock(&core->lock);
+
+ while (pm_runtime_active(core->dev_dec) || pm_runtime_active(core->dev_enc))
+ msleep(10);
+
venus_shutdown(core);
pm_runtime_put_sync(core->dev);
- ret |= hfi_create(core, &venus_core_ops);
+ while (core->pmdomains[0] && pm_runtime_active(core->pmdomains[0]))
+ usleep_range(1000, 1500);
+
+ hfi_reinit(core);
pm_runtime_get_sync(core->dev);
ret |= venus_boot(core);
-
ret |= hfi_core_resume(core, true);
enable_irq(core->irq);
@@ -224,15 +225,9 @@ static int venus_probe(struct platform_device *pdev)
ret = dma_set_mask_and_coherent(dev, core->res->dma_mask);
if (ret)
- return ret;
+ goto err_core_put;
- if (!dev->dma_parms) {
- dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
- GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
- }
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(dev, UINT_MAX);
INIT_LIST_HEAD(&core->instances);
mutex_init(&core->lock);
@@ -242,11 +237,11 @@ static int venus_probe(struct platform_device *pdev)
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"venus", core);
if (ret)
- return ret;
+ goto err_core_put;
ret = hfi_create(core, &venus_core_ops);
if (ret)
- return ret;
+ goto err_core_put;
pm_runtime_enable(dev);
@@ -287,8 +282,12 @@ static int venus_probe(struct platform_device *pdev)
goto err_core_deinit;
ret = pm_runtime_put_sync(dev);
- if (ret)
+ if (ret) {
+ pm_runtime_get_noresume(dev);
goto err_dev_unregister;
+ }
+
+ venus_dbgfs_init(core);
return 0;
@@ -299,9 +298,13 @@ err_core_deinit:
err_venus_shutdown:
venus_shutdown(core);
err_runtime_disable:
+ pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
hfi_destroy(core);
+err_core_put:
+ if (core->pm_ops->core_put)
+ core->pm_ops->core_put(dev);
return ret;
}
@@ -337,6 +340,7 @@ static int venus_remove(struct platform_device *pdev)
v4l2_device_unregister(&core->v4l2_dev);
mutex_destroy(&core->pm_lock);
mutex_destroy(&core->lock);
+ venus_dbgfs_deinit(core);
return ret;
}
@@ -520,6 +524,7 @@ static const struct venus_resources sdm845_res_v2 = {
.vcodec_clks_num = 2,
.vcodec_pmdomains = { "venus", "vcodec0", "vcodec1" },
.vcodec_pmdomains_num = 3,
+ .opp_pmdomain = (const char *[]) { "cx", NULL },
.vcodec_num = 2,
.max_load = 3110400, /* 4096x2160@90 */
.hfi_version = HFI_VERSION_4XX,
@@ -527,6 +532,10 @@ static const struct venus_resources sdm845_res_v2 = {
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
+ .cp_start = 0,
+ .cp_size = 0x70800000,
+ .cp_nonpixel_start = 0x1000000,
+ .cp_nonpixel_size = 0x24800000,
.fwname = "qcom/venus-5.2/venus.mdt",
};
@@ -565,6 +574,7 @@ static const struct venus_resources sc7180_res = {
.vcodec_clks_num = 2,
.vcodec_pmdomains = { "venus", "vcodec0" },
.vcodec_pmdomains_num = 2,
+ .opp_pmdomain = (const char *[]) { "cx", NULL },
.vcodec_num = 1,
.hfi_version = HFI_VERSION_4XX,
.vmem_id = VIDC_RESOURCE_NONE,
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 7118612673c9..7b79a33dc9d6 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -12,12 +12,20 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include "dbgfs.h"
#include "hfi.h"
+#define VDBGL "VenusLow : "
+#define VDBGM "VenusMed : "
+#define VDBGH "VenusHigh: "
+#define VDBGFW "VenusFW : "
+
#define VIDC_CLKS_NUM_MAX 4
#define VIDC_VCODEC_CLKS_NUM_MAX 2
#define VIDC_PMDOMAINS_NUM_MAX 3
+extern int venus_fw_debug;
+
struct freq_tbl {
unsigned int load;
unsigned long freq;
@@ -62,12 +70,17 @@ struct venus_resources {
unsigned int vcodec_clks_num;
const char * const vcodec_pmdomains[VIDC_PMDOMAINS_NUM_MAX];
unsigned int vcodec_pmdomains_num;
+ const char **opp_pmdomain;
unsigned int vcodec_num;
enum hfi_version hfi_version;
u32 max_load;
unsigned int vmem_id;
u32 vmem_size;
u32 vmem_addr;
+ u32 cp_start;
+ u32 cp_size;
+ u32 cp_nonpixel_start;
+ u32 cp_nonpixel_size;
const char *fwname;
};
@@ -136,6 +149,7 @@ struct venus_caps {
* @priv: a private filed for HFI operations
* @ops: the core HFI operations
* @work: a delayed work for handling system fatal error
+ * @root: debugfs root directory
*/
struct venus_core {
void __iomem *base;
@@ -145,8 +159,12 @@ struct venus_core {
struct clk *vcodec1_clks[VIDC_VCODEC_CLKS_NUM_MAX];
struct icc_path *video_path;
struct icc_path *cpucfg_path;
+ struct opp_table *opp_table;
+ bool has_opp_table;
struct device_link *pd_dl_venus;
struct device *pmdomains[VIDC_PMDOMAINS_NUM_MAX];
+ struct device_link *opp_dl_venus;
+ struct device *opp_pmdomain;
struct video_device *vdev_dec;
struct video_device *vdev_enc;
struct v4l2_device v4l2_dev;
@@ -185,6 +203,7 @@ struct venus_core {
unsigned int codecs_count;
unsigned int core0_usage_count;
unsigned int core1_usage_count;
+ struct dentry *root;
};
struct vdec_controls {
@@ -201,6 +220,8 @@ struct venc_controls {
u32 bitrate;
u32 bitrate_peak;
u32 rc_enable;
+ u32 const_quality;
+ u32 frame_skip_mode;
u32 h264_i_period;
u32 h264_entropy_mode;
@@ -222,17 +243,8 @@ struct venc_controls {
u32 header_mode;
- struct {
- u32 mpeg4;
- u32 h264;
- u32 vpx;
- u32 hevc;
- } profile;
- struct {
- u32 mpeg4;
- u32 h264;
- u32 hevc;
- } level;
+ u32 profile;
+ u32 level;
};
struct venus_buffer {
diff --git a/drivers/media/platform/qcom/venus/dbgfs.c b/drivers/media/platform/qcom/venus/dbgfs.c
new file mode 100644
index 000000000000..52de47f2ca88
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/dbgfs.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Linaro Ltd.
+ */
+
+#include <linux/debugfs.h>
+
+#include "core.h"
+
+void venus_dbgfs_init(struct venus_core *core)
+{
+ core->root = debugfs_create_dir("venus", NULL);
+ debugfs_create_x32("fw_level", 0644, core->root, &venus_fw_debug);
+}
+
+void venus_dbgfs_deinit(struct venus_core *core)
+{
+ debugfs_remove_recursive(core->root);
+}
diff --git a/drivers/media/platform/qcom/venus/dbgfs.h b/drivers/media/platform/qcom/venus/dbgfs.h
new file mode 100644
index 000000000000..b7b621a8472f
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/dbgfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Linaro Ltd. */
+
+#ifndef __VENUS_DBGFS_H__
+#define __VENUS_DBGFS_H__
+
+struct venus_core;
+
+void venus_dbgfs_init(struct venus_core *core);
+void venus_dbgfs_deinit(struct venus_core *core);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 8801a6a7543d..1db64a854b88 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -181,6 +181,7 @@ static int venus_shutdown_no_tz(struct venus_core *core)
int venus_boot(struct venus_core *core)
{
struct device *dev = core->dev;
+ const struct venus_resources *res = core->res;
phys_addr_t mem_phys;
size_t mem_size;
int ret;
@@ -200,7 +201,23 @@ int venus_boot(struct venus_core *core)
else
ret = venus_boot_no_tz(core, mem_phys, mem_size);
- return ret;
+ if (ret)
+ return ret;
+
+ if (core->use_tz && res->cp_size) {
+ ret = qcom_scm_mem_protect_video_var(res->cp_start,
+ res->cp_size,
+ res->cp_nonpixel_start,
+ res->cp_nonpixel_size);
+ if (ret) {
+ qcom_scm_pas_shutdown(VENUS_PAS_ID);
+ dev_err(dev, "set virtual address ranges fail (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
}
int venus_shutdown(struct venus_core *core)
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 0143af7822b2..50439eb1ffea 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -6,6 +6,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/kernel.h>
#include <media/videobuf2-dma-sg.h>
#include <media/v4l2-mem2mem.h>
#include <asm/div64.h>
@@ -396,7 +397,7 @@ put_ts_metadata(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
}
if (slot == -1) {
- dev_dbg(inst->core->dev, "%s: no free slot\n", __func__);
+ dev_dbg(inst->core->dev, VDBGL "no free slot\n");
return;
}
@@ -582,6 +583,244 @@ int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
}
EXPORT_SYMBOL_GPL(venus_helper_get_bufreq);
+struct id_mapping {
+ u32 hfi_id;
+ u32 v4l2_id;
+};
+
+static const struct id_mapping mpeg4_profiles[] = {
+ { HFI_MPEG4_PROFILE_SIMPLE, V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE },
+ { HFI_MPEG4_PROFILE_ADVANCEDSIMPLE, V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE },
+};
+
+static const struct id_mapping mpeg4_levels[] = {
+ { HFI_MPEG4_LEVEL_0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 },
+ { HFI_MPEG4_LEVEL_0b, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B },
+ { HFI_MPEG4_LEVEL_1, V4L2_MPEG_VIDEO_MPEG4_LEVEL_1 },
+ { HFI_MPEG4_LEVEL_2, V4L2_MPEG_VIDEO_MPEG4_LEVEL_2 },
+ { HFI_MPEG4_LEVEL_3, V4L2_MPEG_VIDEO_MPEG4_LEVEL_3 },
+ { HFI_MPEG4_LEVEL_4, V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 },
+ { HFI_MPEG4_LEVEL_5, V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 },
+};
+
+static const struct id_mapping mpeg2_profiles[] = {
+ { HFI_MPEG2_PROFILE_SIMPLE, V4L2_MPEG_VIDEO_MPEG2_PROFILE_SIMPLE },
+ { HFI_MPEG2_PROFILE_MAIN, V4L2_MPEG_VIDEO_MPEG2_PROFILE_MAIN },
+ { HFI_MPEG2_PROFILE_SNR, V4L2_MPEG_VIDEO_MPEG2_PROFILE_SNR_SCALABLE },
+ { HFI_MPEG2_PROFILE_SPATIAL, V4L2_MPEG_VIDEO_MPEG2_PROFILE_SPATIALLY_SCALABLE },
+ { HFI_MPEG2_PROFILE_HIGH, V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH },
+};
+
+static const struct id_mapping mpeg2_levels[] = {
+ { HFI_MPEG2_LEVEL_LL, V4L2_MPEG_VIDEO_MPEG2_LEVEL_LOW },
+ { HFI_MPEG2_LEVEL_ML, V4L2_MPEG_VIDEO_MPEG2_LEVEL_MAIN },
+ { HFI_MPEG2_LEVEL_H14, V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH_1440 },
+ { HFI_MPEG2_LEVEL_HL, V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH },
+};
+
+static const struct id_mapping h264_profiles[] = {
+ { HFI_H264_PROFILE_BASELINE, V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE },
+ { HFI_H264_PROFILE_MAIN, V4L2_MPEG_VIDEO_H264_PROFILE_MAIN },
+ { HFI_H264_PROFILE_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH },
+ { HFI_H264_PROFILE_STEREO_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH },
+ { HFI_H264_PROFILE_MULTIVIEW_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH },
+ { HFI_H264_PROFILE_CONSTRAINED_BASE, V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE },
+ { HFI_H264_PROFILE_CONSTRAINED_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH },
+};
+
+static const struct id_mapping h264_levels[] = {
+ { HFI_H264_LEVEL_1, V4L2_MPEG_VIDEO_H264_LEVEL_1_0 },
+ { HFI_H264_LEVEL_1b, V4L2_MPEG_VIDEO_H264_LEVEL_1B },
+ { HFI_H264_LEVEL_11, V4L2_MPEG_VIDEO_H264_LEVEL_1_1 },
+ { HFI_H264_LEVEL_12, V4L2_MPEG_VIDEO_H264_LEVEL_1_2 },
+ { HFI_H264_LEVEL_13, V4L2_MPEG_VIDEO_H264_LEVEL_1_3 },
+ { HFI_H264_LEVEL_2, V4L2_MPEG_VIDEO_H264_LEVEL_2_0 },
+ { HFI_H264_LEVEL_21, V4L2_MPEG_VIDEO_H264_LEVEL_2_1 },
+ { HFI_H264_LEVEL_22, V4L2_MPEG_VIDEO_H264_LEVEL_2_2 },
+ { HFI_H264_LEVEL_3, V4L2_MPEG_VIDEO_H264_LEVEL_3_0 },
+ { HFI_H264_LEVEL_31, V4L2_MPEG_VIDEO_H264_LEVEL_3_1 },
+ { HFI_H264_LEVEL_32, V4L2_MPEG_VIDEO_H264_LEVEL_3_2 },
+ { HFI_H264_LEVEL_4, V4L2_MPEG_VIDEO_H264_LEVEL_4_0 },
+ { HFI_H264_LEVEL_41, V4L2_MPEG_VIDEO_H264_LEVEL_4_1 },
+ { HFI_H264_LEVEL_42, V4L2_MPEG_VIDEO_H264_LEVEL_4_2 },
+ { HFI_H264_LEVEL_5, V4L2_MPEG_VIDEO_H264_LEVEL_5_0 },
+ { HFI_H264_LEVEL_51, V4L2_MPEG_VIDEO_H264_LEVEL_5_1 },
+ { HFI_H264_LEVEL_52, V4L2_MPEG_VIDEO_H264_LEVEL_5_1 },
+};
+
+static const struct id_mapping hevc_profiles[] = {
+ { HFI_HEVC_PROFILE_MAIN, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN },
+ { HFI_HEVC_PROFILE_MAIN_STILL_PIC, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE },
+ { HFI_HEVC_PROFILE_MAIN10, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10 },
+};
+
+static const struct id_mapping hevc_levels[] = {
+ { HFI_HEVC_LEVEL_1, V4L2_MPEG_VIDEO_HEVC_LEVEL_1 },
+ { HFI_HEVC_LEVEL_2, V4L2_MPEG_VIDEO_HEVC_LEVEL_2 },
+ { HFI_HEVC_LEVEL_21, V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1 },
+ { HFI_HEVC_LEVEL_3, V4L2_MPEG_VIDEO_HEVC_LEVEL_3 },
+ { HFI_HEVC_LEVEL_31, V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1 },
+ { HFI_HEVC_LEVEL_4, V4L2_MPEG_VIDEO_HEVC_LEVEL_4 },
+ { HFI_HEVC_LEVEL_41, V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1 },
+ { HFI_HEVC_LEVEL_5, V4L2_MPEG_VIDEO_HEVC_LEVEL_5 },
+ { HFI_HEVC_LEVEL_51, V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1 },
+ { HFI_HEVC_LEVEL_52, V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2 },
+ { HFI_HEVC_LEVEL_6, V4L2_MPEG_VIDEO_HEVC_LEVEL_6 },
+ { HFI_HEVC_LEVEL_61, V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1 },
+ { HFI_HEVC_LEVEL_62, V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2 },
+};
+
+static const struct id_mapping vp8_profiles[] = {
+ { HFI_VPX_PROFILE_VERSION_0, V4L2_MPEG_VIDEO_VP8_PROFILE_0 },
+ { HFI_VPX_PROFILE_VERSION_1, V4L2_MPEG_VIDEO_VP8_PROFILE_1 },
+ { HFI_VPX_PROFILE_VERSION_2, V4L2_MPEG_VIDEO_VP8_PROFILE_2 },
+ { HFI_VPX_PROFILE_VERSION_3, V4L2_MPEG_VIDEO_VP8_PROFILE_3 },
+};
+
+static const struct id_mapping vp9_profiles[] = {
+ { HFI_VP9_PROFILE_P0, V4L2_MPEG_VIDEO_VP9_PROFILE_0 },
+ { HFI_VP9_PROFILE_P2_10B, V4L2_MPEG_VIDEO_VP9_PROFILE_2 },
+};
+
+static const struct id_mapping vp9_levels[] = {
+ { HFI_VP9_LEVEL_1, V4L2_MPEG_VIDEO_VP9_LEVEL_1_0 },
+ { HFI_VP9_LEVEL_11, V4L2_MPEG_VIDEO_VP9_LEVEL_1_1 },
+ { HFI_VP9_LEVEL_2, V4L2_MPEG_VIDEO_VP9_LEVEL_2_0},
+ { HFI_VP9_LEVEL_21, V4L2_MPEG_VIDEO_VP9_LEVEL_2_1 },
+ { HFI_VP9_LEVEL_3, V4L2_MPEG_VIDEO_VP9_LEVEL_3_0},
+ { HFI_VP9_LEVEL_31, V4L2_MPEG_VIDEO_VP9_LEVEL_3_1 },
+ { HFI_VP9_LEVEL_4, V4L2_MPEG_VIDEO_VP9_LEVEL_4_0 },
+ { HFI_VP9_LEVEL_41, V4L2_MPEG_VIDEO_VP9_LEVEL_4_1 },
+ { HFI_VP9_LEVEL_5, V4L2_MPEG_VIDEO_VP9_LEVEL_5_0 },
+ { HFI_VP9_LEVEL_51, V4L2_MPEG_VIDEO_VP9_LEVEL_5_1 },
+ { HFI_VP9_LEVEL_6, V4L2_MPEG_VIDEO_VP9_LEVEL_6_0 },
+ { HFI_VP9_LEVEL_61, V4L2_MPEG_VIDEO_VP9_LEVEL_6_1 },
+};
+
+static u32 find_v4l2_id(u32 hfi_id, const struct id_mapping *array, unsigned int array_sz)
+{
+ unsigned int i;
+
+ if (!array || !array_sz)
+ return 0;
+
+ for (i = 0; i < array_sz; i++)
+ if (hfi_id == array[i].hfi_id)
+ return array[i].v4l2_id;
+
+ return 0;
+}
+
+static u32 find_hfi_id(u32 v4l2_id, const struct id_mapping *array, unsigned int array_sz)
+{
+ unsigned int i;
+
+ if (!array || !array_sz)
+ return 0;
+
+ for (i = 0; i < array_sz; i++)
+ if (v4l2_id == array[i].v4l2_id)
+ return array[i].hfi_id;
+
+ return 0;
+}
+
+static void
+v4l2_id_profile_level(u32 hfi_codec, struct hfi_profile_level *pl, u32 *profile, u32 *level)
+{
+ u32 hfi_pf = pl->profile;
+ u32 hfi_lvl = pl->level;
+
+ switch (hfi_codec) {
+ case HFI_VIDEO_CODEC_H264:
+ *profile = find_v4l2_id(hfi_pf, h264_profiles, ARRAY_SIZE(h264_profiles));
+ *level = find_v4l2_id(hfi_lvl, h264_levels, ARRAY_SIZE(h264_levels));
+ break;
+ case HFI_VIDEO_CODEC_MPEG2:
+ *profile = find_v4l2_id(hfi_pf, mpeg2_profiles, ARRAY_SIZE(mpeg2_profiles));
+ *level = find_v4l2_id(hfi_lvl, mpeg2_levels, ARRAY_SIZE(mpeg2_levels));
+ break;
+ case HFI_VIDEO_CODEC_MPEG4:
+ *profile = find_v4l2_id(hfi_pf, mpeg4_profiles, ARRAY_SIZE(mpeg4_profiles));
+ *level = find_v4l2_id(hfi_lvl, mpeg4_levels, ARRAY_SIZE(mpeg4_levels));
+ break;
+ case HFI_VIDEO_CODEC_VP8:
+ *profile = find_v4l2_id(hfi_pf, vp8_profiles, ARRAY_SIZE(vp8_profiles));
+ *level = 0;
+ break;
+ case HFI_VIDEO_CODEC_VP9:
+ *profile = find_v4l2_id(hfi_pf, vp9_profiles, ARRAY_SIZE(vp9_profiles));
+ *level = find_v4l2_id(hfi_lvl, vp9_levels, ARRAY_SIZE(vp9_levels));
+ break;
+ case HFI_VIDEO_CODEC_HEVC:
+ *profile = find_v4l2_id(hfi_pf, hevc_profiles, ARRAY_SIZE(hevc_profiles));
+ *level = find_v4l2_id(hfi_lvl, hevc_levels, ARRAY_SIZE(hevc_levels));
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+hfi_id_profile_level(u32 hfi_codec, u32 v4l2_pf, u32 v4l2_lvl, struct hfi_profile_level *pl)
+{
+ switch (hfi_codec) {
+ case HFI_VIDEO_CODEC_H264:
+ pl->profile = find_hfi_id(v4l2_pf, h264_profiles, ARRAY_SIZE(h264_profiles));
+ pl->level = find_hfi_id(v4l2_lvl, h264_levels, ARRAY_SIZE(h264_levels));
+ break;
+ case HFI_VIDEO_CODEC_MPEG2:
+ pl->profile = find_hfi_id(v4l2_pf, mpeg2_profiles, ARRAY_SIZE(mpeg2_profiles));
+ pl->level = find_hfi_id(v4l2_lvl, mpeg2_levels, ARRAY_SIZE(mpeg2_levels));
+ break;
+ case HFI_VIDEO_CODEC_MPEG4:
+ pl->profile = find_hfi_id(v4l2_pf, mpeg4_profiles, ARRAY_SIZE(mpeg4_profiles));
+ pl->level = find_hfi_id(v4l2_lvl, mpeg4_levels, ARRAY_SIZE(mpeg4_levels));
+ break;
+ case HFI_VIDEO_CODEC_VP8:
+ pl->profile = find_hfi_id(v4l2_pf, vp8_profiles, ARRAY_SIZE(vp8_profiles));
+ pl->level = 0;
+ break;
+ case HFI_VIDEO_CODEC_VP9:
+ pl->profile = find_hfi_id(v4l2_pf, vp9_profiles, ARRAY_SIZE(vp9_profiles));
+ pl->level = find_hfi_id(v4l2_lvl, vp9_levels, ARRAY_SIZE(vp9_levels));
+ break;
+ case HFI_VIDEO_CODEC_HEVC:
+ pl->profile = find_hfi_id(v4l2_pf, hevc_profiles, ARRAY_SIZE(hevc_profiles));
+ pl->level = find_hfi_id(v4l2_lvl, hevc_levels, ARRAY_SIZE(hevc_levels));
+ break;
+ default:
+ break;
+ }
+}
+
+int venus_helper_get_profile_level(struct venus_inst *inst, u32 *profile, u32 *level)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
+ union hfi_get_property hprop;
+ int ret;
+
+ ret = hfi_session_get_property(inst, ptype, &hprop);
+ if (ret)
+ return ret;
+
+ v4l2_id_profile_level(inst->hfi_codec, &hprop.profile_level, profile, level);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_profile_level);
+
+int venus_helper_set_profile_level(struct venus_inst *inst, u32 profile, u32 level)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
+ struct hfi_profile_level pl;
+
+ hfi_id_profile_level(inst->hfi_codec, profile, level, &pl);
+
+ return hfi_session_set_property(inst, ptype, &pl);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_profile_level);
+
static u32 get_framesize_raw_nv12(u32 width, u32 height)
{
u32 y_stride, uv_stride, y_plane;
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index 8fbbda12a4fe..a4a0562bc83f 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -61,4 +61,6 @@ int venus_helper_process_initial_cap_bufs(struct venus_inst *inst);
int venus_helper_process_initial_out_bufs(struct venus_inst *inst);
void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
struct vb2_v4l2_buffer *vbuf);
+int venus_helper_get_profile_level(struct venus_inst *inst, u32 *profile, u32 *level);
+int venus_helper_set_profile_level(struct venus_inst *inst, u32 profile, u32 level);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index a211eb93e0f9..a59022adb14c 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -517,3 +517,8 @@ void hfi_destroy(struct venus_core *core)
{
venus_hfi_destroy(core);
}
+
+void hfi_reinit(struct venus_core *core)
+{
+ venus_hfi_queues_reinit(core);
+}
diff --git a/drivers/media/platform/qcom/venus/hfi.h b/drivers/media/platform/qcom/venus/hfi.h
index 62c315291484..f25d412d6553 100644
--- a/drivers/media/platform/qcom/venus/hfi.h
+++ b/drivers/media/platform/qcom/venus/hfi.h
@@ -145,6 +145,7 @@ struct hfi_ops {
int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops);
void hfi_destroy(struct venus_core *core);
+void hfi_reinit(struct venus_core *core);
int hfi_core_init(struct venus_core *core);
int hfi_core_deinit(struct venus_core *core, bool blocking);
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index c67e412f8201..7022368c1e63 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -640,6 +640,7 @@ static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
case HFI_RATE_CONTROL_CBR_VFR:
case HFI_RATE_CONTROL_VBR_CFR:
case HFI_RATE_CONTROL_VBR_VFR:
+ case HFI_RATE_CONTROL_CQ:
break;
default:
ret = -EINVAL;
@@ -1218,6 +1219,37 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
return 0;
}
+static int
+pkt_session_set_property_6xx(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata)
+{
+ void *prop_data;
+
+ if (!pkt || !cookie || !pdata)
+ return -EINVAL;
+
+ prop_data = &pkt->data[1];
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+ pkt->data[0] = ptype;
+
+ switch (ptype) {
+ case HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY: {
+ struct hfi_heic_frame_quality *in = pdata, *cq = prop_data;
+
+ cq->frame_quality = in->frame_quality;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cq);
+ break;
+ } default:
+ return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
+ }
+
+ return 0;
+}
+
int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt,
void *cookie, u32 ptype)
{
@@ -1236,7 +1268,10 @@ int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt,
if (hfi_ver == HFI_VERSION_3XX)
return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
- return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
+ if (hfi_ver == HFI_VERSION_4XX)
+ return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
+
+ return pkt_session_set_property_6xx(pkt, cookie, ptype, pdata);
}
void pkt_set_version(enum hfi_version version)
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index f6613df1d16b..60ee2479f7a6 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -231,6 +231,7 @@
#define HFI_RATE_CONTROL_VBR_CFR 0x1000003
#define HFI_RATE_CONTROL_CBR_VFR 0x1000004
#define HFI_RATE_CONTROL_CBR_CFR 0x1000005
+#define HFI_RATE_CONTROL_CQ 0x1000008
#define HFI_VIDEO_CODEC_H264 0x00000002
#define HFI_VIDEO_CODEC_H263 0x00000004
@@ -363,6 +364,24 @@
#define HFI_HEVC_TIER_MAIN 0x1
#define HFI_HEVC_TIER_HIGH0 0x2
+/* VP9 Profile 0, 8-bit */
+#define HFI_VP9_PROFILE_P0 0x00000001
+/* VP9 Profile 2, 10-bit */
+#define HFI_VP9_PROFILE_P2_10B 0x00000004
+
+#define HFI_VP9_LEVEL_1 0x00000001
+#define HFI_VP9_LEVEL_11 0x00000002
+#define HFI_VP9_LEVEL_2 0x00000004
+#define HFI_VP9_LEVEL_21 0x00000008
+#define HFI_VP9_LEVEL_3 0x00000010
+#define HFI_VP9_LEVEL_31 0x00000020
+#define HFI_VP9_LEVEL_4 0x00000040
+#define HFI_VP9_LEVEL_41 0x00000080
+#define HFI_VP9_LEVEL_5 0x00000100
+#define HFI_VP9_LEVEL_51 0x00000200
+#define HFI_VP9_LEVEL_6 0x00000400
+#define HFI_VP9_LEVEL_61 0x00000800
+
#define HFI_BUFFER_INPUT 0x1
#define HFI_BUFFER_OUTPUT 0x2
#define HFI_BUFFER_OUTPUT2 0x3
@@ -504,6 +523,7 @@
#define HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER 0x200600b
#define HFI_PROPERTY_CONFIG_VENC_LTRPERIOD 0x200600c
#define HFI_PROPERTY_CONFIG_VENC_PERF_MODE 0x200600e
+#define HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY 0x2006014
/*
* HFI_PROPERTY_PARAM_VPE_COMMON_START
@@ -520,7 +540,8 @@
enum hfi_version {
HFI_VERSION_1XX,
HFI_VERSION_3XX,
- HFI_VERSION_4XX
+ HFI_VERSION_4XX,
+ HFI_VERSION_6XX,
};
struct hfi_buffer_info {
@@ -725,6 +746,11 @@ struct hfi_quality_vs_speed {
u32 quality_vs_speed;
};
+struct hfi_heic_frame_quality {
+ u32 frame_quality;
+ u32 reserved[3];
+};
+
struct hfi_quantization {
u32 qp_i;
u32 qp_p;
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index 279a9d6fe737..06a1908ca225 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -138,7 +138,7 @@ static void event_sys_error(struct venus_core *core, u32 event,
struct hfi_msg_event_notify_pkt *pkt)
{
if (pkt)
- dev_dbg(core->dev,
+ dev_dbg(core->dev, VDBGH
"sys error (session id:%x, data1:%x, data2:%x)\n",
pkt->shdr.session_id, pkt->event_data1,
pkt->event_data2);
@@ -152,7 +152,7 @@ event_session_error(struct venus_core *core, struct venus_inst *inst,
{
struct device *dev = core->dev;
- dev_dbg(dev, "session error: event id:%x, session id:%x\n",
+ dev_dbg(dev, VDBGH "session error: event id:%x, session id:%x\n",
pkt->event_data1, pkt->shdr.session_id);
if (!inst)
@@ -247,7 +247,7 @@ sys_get_prop_image_version(struct device *dev,
/* bad packet */
return;
- dev_dbg(dev, "F/W version: %s\n", (u8 *)&pkt->data[1]);
+ dev_dbg(dev, VDBGL "F/W version: %s\n", (u8 *)&pkt->data[1]);
}
static void hfi_sys_property_info(struct venus_core *core,
@@ -257,7 +257,7 @@ static void hfi_sys_property_info(struct venus_core *core,
struct device *dev = core->dev;
if (!pkt->num_properties) {
- dev_dbg(dev, "%s: no properties\n", __func__);
+ dev_dbg(dev, VDBGL "no properties\n");
return;
}
@@ -266,7 +266,7 @@ static void hfi_sys_property_info(struct venus_core *core,
sys_get_prop_image_version(dev, pkt);
break;
default:
- dev_dbg(dev, "%s: unknown property data\n", __func__);
+ dev_dbg(dev, VDBGL "unknown property data\n");
break;
}
}
@@ -297,7 +297,7 @@ static void hfi_sys_ping_done(struct venus_core *core, struct venus_inst *inst,
static void hfi_sys_idle_done(struct venus_core *core, struct venus_inst *inst,
void *packet)
{
- dev_dbg(core->dev, "sys idle\n");
+ dev_dbg(core->dev, VDBGL "sys idle\n");
}
static void hfi_sys_pc_prepare_done(struct venus_core *core,
@@ -305,7 +305,8 @@ static void hfi_sys_pc_prepare_done(struct venus_core *core,
{
struct hfi_msg_sys_pc_prep_done_pkt *pkt = packet;
- dev_dbg(core->dev, "pc prepare done (error %x)\n", pkt->error_type);
+ dev_dbg(core->dev, VDBGL "pc prepare done (error %x)\n",
+ pkt->error_type);
}
static unsigned int
@@ -387,8 +388,7 @@ static void hfi_session_prop_info(struct venus_core *core,
case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
break;
default:
- dev_dbg(dev, "%s: unknown property id:%x\n", __func__,
- pkt->data[0]);
+ dev_dbg(dev, VDBGM "unknown property id:%x\n", pkt->data[0]);
return;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index 7f515a4b9bd1..363ee2a65453 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -239,6 +239,9 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
parser_init(inst, &codecs, &domain);
+ core->codecs_count = 0;
+ memset(core->caps, 0, sizeof(core->caps));
+
while (words_count) {
data = word + 1;
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 0d8855014ab3..4be4a75ddcb6 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -130,7 +130,7 @@ struct venus_hfi_device {
};
static bool venus_pkt_debug;
-static int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
+int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
static bool venus_sys_idle_indicator;
static bool venus_fw_low_power_mode = true;
static int venus_hw_rsp_timeout = 1000;
@@ -477,7 +477,7 @@ static u32 venus_hwversion(struct venus_hfi_device *hdev)
minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
- dev_dbg(dev, "venus hw version %x.%x.%x\n", major, minor, step);
+ dev_dbg(dev, VDBGL "venus hw version %x.%x.%x\n", major, minor, step);
return major;
}
@@ -906,7 +906,7 @@ static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
struct hfi_msg_sys_debug_pkt *pkt = packet;
- dev_dbg(dev, "%s", pkt->msg_data);
+ dev_dbg(dev, VDBGFW "%s", pkt->msg_data);
}
}
}
@@ -986,13 +986,6 @@ static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
venus_set_state(hdev, VENUS_STATE_DEINIT);
- /*
- * Once SYS_ERROR received from HW, it is safe to halt the AXI.
- * With SYS_ERROR, Venus FW may have crashed and HW might be
- * active and causing unnecessary transactions. Hence it is
- * safe to stop all AXI transactions from venus subsystem.
- */
- venus_halt_axi(hdev);
venus_sfr_print(hdev);
}
@@ -1009,10 +1002,6 @@ static irqreturn_t venus_isr_thread(struct venus_core *core)
res = hdev->core->res;
pkt = hdev->pkt_buf;
- if (hdev->irq_status & WRAPPER_INTR_STATUS_A2HWD_MASK) {
- venus_sfr_print(hdev);
- hfi_process_watchdog_timeout(core);
- }
while (!venus_iface_msgq_read(hdev, pkt)) {
msg_ret = hfi_process_msg_packet(core, pkt);
@@ -1133,6 +1122,10 @@ static int venus_session_init(struct venus_inst *inst, u32 session_type,
struct hfi_session_init_pkt pkt;
int ret;
+ ret = venus_sys_set_debug(hdev, venus_fw_debug);
+ if (ret)
+ goto err;
+
ret = pkt_session_init(&pkt, inst, session_type, codec);
if (ret)
goto err;
@@ -1614,3 +1607,54 @@ err_kfree:
core->ops = NULL;
return ret;
}
+
+void venus_hfi_queues_reinit(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct hfi_queue_table_header *tbl_hdr;
+ struct iface_queue *queue;
+ struct hfi_sfr *sfr;
+ unsigned int i;
+
+ mutex_lock(&hdev->lock);
+
+ for (i = 0; i < IFACEQ_NUM; i++) {
+ queue = &hdev->queues[i];
+ queue->qhdr =
+ IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
+
+ venus_set_qhdr_defaults(queue->qhdr);
+
+ queue->qhdr->start_addr = queue->qmem.da;
+
+ if (i == IFACEQ_CMD_IDX)
+ queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
+ else if (i == IFACEQ_MSG_IDX)
+ queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
+ else if (i == IFACEQ_DBG_IDX)
+ queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
+ }
+
+ tbl_hdr = hdev->ifaceq_table.kva;
+ tbl_hdr->version = 0;
+ tbl_hdr->size = IFACEQ_TABLE_SIZE;
+ tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
+ tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
+ tbl_hdr->num_q = IFACEQ_NUM;
+ tbl_hdr->num_active_q = IFACEQ_NUM;
+
+ /*
+ * Set receive request to zero on debug queue as there is no
+ * need of interrupt from video hardware for debug messages
+ */
+ queue = &hdev->queues[IFACEQ_DBG_IDX];
+ queue->qhdr->rx_req = 0;
+
+ sfr = hdev->sfr.kva;
+ sfr->buf_size = ALIGNED_SFR_SIZE;
+
+ /* ensure table and queue header structs are settled in memory */
+ wmb();
+
+ mutex_unlock(&hdev->lock);
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.h b/drivers/media/platform/qcom/venus/hfi_venus.h
index 57154832090e..1b656ef2bf07 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.h
+++ b/drivers/media/platform/qcom/venus/hfi_venus.h
@@ -10,5 +10,6 @@ struct venus_core;
void venus_hfi_destroy(struct venus_core *core);
int venus_hfi_create(struct venus_core *core);
+void venus_hfi_queues_reinit(struct venus_core *core);
#endif
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 531e7a41658f..57877eacecf0 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -9,6 +9,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <media/v4l2-mem2mem.h>
@@ -66,10 +67,9 @@ static void core_clks_disable(struct venus_core *core)
static int core_clks_set_rate(struct venus_core *core, unsigned long freq)
{
- struct clk *clk = core->clks[0];
int ret;
- ret = clk_set_rate(clk, freq);
+ ret = dev_pm_opp_set_rate(core->dev, freq);
if (ret)
return ret;
@@ -212,7 +212,7 @@ static int load_scale_bw(struct venus_core *core)
}
mutex_unlock(&core->lock);
- dev_dbg(core->dev, "total: avg_bw: %u, peak_bw: %u\n",
+ dev_dbg(core->dev, VDBGL "total: avg_bw: %u, peak_bw: %u\n",
total_avg, total_peak);
return icc_set_bw(core->video_path, total_avg, total_peak);
@@ -744,13 +744,16 @@ static int venc_power_v4(struct device *dev, int on)
static int vcodec_domains_get(struct device *dev)
{
+ int ret;
+ struct opp_table *opp_table;
+ struct device **opp_virt_dev;
struct venus_core *core = dev_get_drvdata(dev);
const struct venus_resources *res = core->res;
struct device *pd;
unsigned int i;
if (!res->vcodec_pmdomains_num)
- return -ENODEV;
+ goto skip_pmdomains;
for (i = 0; i < res->vcodec_pmdomains_num; i++) {
pd = dev_pm_domain_attach_by_name(dev,
@@ -767,7 +770,41 @@ static int vcodec_domains_get(struct device *dev)
if (!core->pd_dl_venus)
return -ENODEV;
+skip_pmdomains:
+ if (!core->has_opp_table)
+ return 0;
+
+ /* Attach the power domain for setting performance state */
+ opp_table = dev_pm_opp_attach_genpd(dev, res->opp_pmdomain, &opp_virt_dev);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+ goto opp_attach_err;
+ }
+
+ core->opp_pmdomain = *opp_virt_dev;
+ core->opp_dl_venus = device_link_add(dev, core->opp_pmdomain,
+ DL_FLAG_RPM_ACTIVE |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!core->opp_dl_venus) {
+ ret = -ENODEV;
+ goto opp_dl_add_err;
+ }
+
return 0;
+
+opp_dl_add_err:
+ dev_pm_domain_detach(core->opp_pmdomain, true);
+opp_attach_err:
+ if (core->pd_dl_venus) {
+ device_link_del(core->pd_dl_venus);
+ for (i = 0; i < res->vcodec_pmdomains_num; i++) {
+ if (IS_ERR_OR_NULL(core->pmdomains[i]))
+ continue;
+ dev_pm_domain_detach(core->pmdomains[i], true);
+ }
+ }
+ return ret;
}
static void vcodec_domains_put(struct device *dev)
@@ -777,7 +814,7 @@ static void vcodec_domains_put(struct device *dev)
unsigned int i;
if (!res->vcodec_pmdomains_num)
- return;
+ goto skip_pmdomains;
if (core->pd_dl_venus)
device_link_del(core->pd_dl_venus);
@@ -787,6 +824,15 @@ static void vcodec_domains_put(struct device *dev)
continue;
dev_pm_domain_detach(core->pmdomains[i], true);
}
+
+skip_pmdomains:
+ if (!core->has_opp_table)
+ return;
+
+ if (core->opp_dl_venus)
+ device_link_del(core->opp_dl_venus);
+
+ dev_pm_domain_detach(core->opp_pmdomain, true);
}
static int core_get_v4(struct device *dev)
@@ -815,19 +861,46 @@ static int core_get_v4(struct device *dev)
if (legacy_binding)
return 0;
+ core->opp_table = dev_pm_opp_set_clkname(dev, "core");
+ if (IS_ERR(core->opp_table))
+ return PTR_ERR(core->opp_table);
+
+ if (core->res->opp_pmdomain) {
+ ret = dev_pm_opp_of_add_table(dev);
+ if (!ret) {
+ core->has_opp_table = true;
+ } else if (ret != -ENODEV) {
+ dev_err(dev, "invalid OPP table in device tree\n");
+ dev_pm_opp_put_clkname(core->opp_table);
+ return ret;
+ }
+ }
+
ret = vcodec_domains_get(dev);
- if (ret)
+ if (ret) {
+ if (core->has_opp_table)
+ dev_pm_opp_of_remove_table(dev);
+ dev_pm_opp_put_clkname(core->opp_table);
return ret;
+ }
return 0;
}
static void core_put_v4(struct device *dev)
{
+ struct venus_core *core = dev_get_drvdata(dev);
+
if (legacy_binding)
return;
vcodec_domains_put(dev);
+
+ if (core->has_opp_table)
+ dev_pm_opp_of_remove_table(dev);
+ if (core->opp_table)
+ dev_pm_opp_put_clkname(core->opp_table);
+
}
static int core_power_v4(struct device *dev, int on)
@@ -835,10 +908,15 @@ static int core_power_v4(struct device *dev, int on)
struct venus_core *core = dev_get_drvdata(dev);
int ret = 0;
- if (on == POWER_ON)
+ if (on == POWER_ON) {
ret = core_clks_enable(core);
- else
+ } else {
+ /* Drop the performance state vote */
+ if (core->opp_pmdomain)
+ dev_pm_opp_set_rate(dev, 0);
+
core_clks_disable(core);
+ }
return ret;
}
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 7c4c483d5438..ea13170a6a2c 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -225,7 +225,7 @@ static int vdec_check_src_change(struct venus_inst *inst)
if (!(inst->codec_state == VENUS_DEC_STATE_CAPTURE_SETUP) ||
!inst->reconfig)
- dev_dbg(inst->core->dev, "%s: wrong state\n", __func__);
+ dev_dbg(inst->core->dev, VDBGH "wrong state\n");
done:
return 0;
@@ -1072,7 +1072,7 @@ static int vdec_stop_capture(struct venus_inst *inst)
switch (inst->codec_state) {
case VENUS_DEC_STATE_DECODING:
ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true);
- /* fallthrough */
+ fallthrough;
case VENUS_DEC_STATE_DRAIN:
vdec_cancel_dst_buffers(inst);
inst->codec_state = VENUS_DEC_STATE_STOPPED;
@@ -1088,8 +1088,6 @@ static int vdec_stop_capture(struct venus_inst *inst)
break;
}
- INIT_LIST_HEAD(&inst->registeredbufs);
-
return ret;
}
@@ -1189,6 +1187,14 @@ static int vdec_buf_init(struct vb2_buffer *vb)
static void vdec_buf_cleanup(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct venus_buffer *buf = to_venus_buffer(vbuf);
+
+ mutex_lock(&inst->lock);
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ if (!list_empty(&inst->registeredbufs))
+ list_del_init(&buf->reg_list);
+ mutex_unlock(&inst->lock);
inst->buf_count--;
if (!inst->buf_count)
@@ -1310,7 +1316,7 @@ static void vdec_event_change(struct venus_inst *inst,
if (inst->bit_depth != ev_data->bit_depth)
inst->bit_depth = ev_data->bit_depth;
- dev_dbg(dev, "event %s sufficient resources (%ux%u)\n",
+ dev_dbg(dev, VDBGM "event %s sufficient resources (%ux%u)\n",
sufficient ? "" : "not", ev_data->width, ev_data->height);
if (sufficient) {
@@ -1344,7 +1350,7 @@ static void vdec_event_change(struct venus_inst *inst,
ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, false);
if (ret)
- dev_dbg(dev, "flush output error %d\n", ret);
+ dev_dbg(dev, VDBGH "flush output error %d\n", ret);
}
inst->reconfig = true;
@@ -1453,13 +1459,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->allow_zero_bytesused = 1;
dst_vq->min_buffers_needed = 0;
dst_vq->dev = inst->core->dev;
- ret = vb2_queue_init(dst_vq);
- if (ret) {
- vb2_queue_release(src_vq);
- return ret;
- }
-
- return 0;
+ return vb2_queue_init(dst_vq);
}
static int vdec_open(struct file *file)
diff --git a/drivers/media/platform/qcom/venus/vdec_ctrls.c b/drivers/media/platform/qcom/venus/vdec_ctrls.c
index 3a963cbd342a..974110b75b93 100644
--- a/drivers/media/platform/qcom/venus/vdec_ctrls.c
+++ b/drivers/media/platform/qcom/venus/vdec_ctrls.c
@@ -22,10 +22,12 @@ static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
ctr->profile = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
ctr->level = ctrl->val;
break;
default:
@@ -40,25 +42,26 @@ static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
struct venus_inst *inst = ctrl_to_inst(ctrl);
struct vdec_controls *ctr = &inst->controls.dec;
struct hfi_buffer_requirements bufreq;
- union hfi_get_property hprop;
enum hfi_version ver = inst->core->res->hfi_version;
- u32 ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
+ u32 profile, level;
int ret;
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
- ret = hfi_session_get_property(inst, ptype, &hprop);
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ ret = venus_helper_get_profile_level(inst, &profile, &level);
if (!ret)
- ctr->profile = hprop.profile_level.profile;
+ ctr->profile = profile;
ctrl->val = ctr->profile;
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
- ret = hfi_session_get_property(inst, ptype, &hprop);
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
+ ret = venus_helper_get_profile_level(inst, &profile, &level);
if (!ret)
- ctr->level = hprop.profile_level.level;
+ ctr->level = level;
ctrl->val = ctr->level;
break;
case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
@@ -86,7 +89,7 @@ int vdec_ctrl_init(struct venus_inst *inst)
struct v4l2_ctrl *ctrl;
int ret;
- ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 7);
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 9);
if (ret)
return ret;
@@ -133,6 +136,20 @@ int vdec_ctrl_init(struct venus_inst *inst)
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_3,
+ 0, V4L2_MPEG_VIDEO_VP9_PROFILE_0);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP9_LEVEL,
+ V4L2_MPEG_VIDEO_VP9_LEVEL_6_2,
+ 0, V4L2_MPEG_VIDEO_VP9_LEVEL_1_0);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER, 0, 1, 1, 0);
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 513bbc07f7bc..f8b1484e7dcd 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -113,80 +113,6 @@ find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type)
static int venc_v4l2_to_hfi(int id, int value)
{
switch (id) {
- case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
- switch (value) {
- case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0:
- default:
- return HFI_MPEG4_LEVEL_0;
- case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B:
- return HFI_MPEG4_LEVEL_0b;
- case V4L2_MPEG_VIDEO_MPEG4_LEVEL_1:
- return HFI_MPEG4_LEVEL_1;
- case V4L2_MPEG_VIDEO_MPEG4_LEVEL_2:
- return HFI_MPEG4_LEVEL_2;
- case V4L2_MPEG_VIDEO_MPEG4_LEVEL_3:
- return HFI_MPEG4_LEVEL_3;
- case V4L2_MPEG_VIDEO_MPEG4_LEVEL_4:
- return HFI_MPEG4_LEVEL_4;
- case V4L2_MPEG_VIDEO_MPEG4_LEVEL_5:
- return HFI_MPEG4_LEVEL_5;
- }
- case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
- switch (value) {
- case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE:
- default:
- return HFI_MPEG4_PROFILE_SIMPLE;
- case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE:
- return HFI_MPEG4_PROFILE_ADVANCEDSIMPLE;
- }
- case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- switch (value) {
- case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
- return HFI_H264_PROFILE_BASELINE;
- case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
- return HFI_H264_PROFILE_CONSTRAINED_BASE;
- case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
- return HFI_H264_PROFILE_MAIN;
- case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
- default:
- return HFI_H264_PROFILE_HIGH;
- }
- case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- switch (value) {
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
- return HFI_H264_LEVEL_1;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
- return HFI_H264_LEVEL_1b;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
- return HFI_H264_LEVEL_11;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
- return HFI_H264_LEVEL_12;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
- return HFI_H264_LEVEL_13;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
- return HFI_H264_LEVEL_2;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
- return HFI_H264_LEVEL_21;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
- return HFI_H264_LEVEL_22;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
- return HFI_H264_LEVEL_3;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
- return HFI_H264_LEVEL_31;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
- return HFI_H264_LEVEL_32;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
- return HFI_H264_LEVEL_4;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
- return HFI_H264_LEVEL_41;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
- return HFI_H264_LEVEL_42;
- case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
- default:
- return HFI_H264_LEVEL_5;
- case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
- return HFI_H264_LEVEL_51;
- }
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
switch (value) {
case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC:
@@ -195,18 +121,6 @@ static int venc_v4l2_to_hfi(int id, int value)
case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC:
return HFI_H264_ENTROPY_CABAC;
}
- case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
- switch (value) {
- case 0:
- default:
- return HFI_VPX_PROFILE_VERSION_0;
- case 1:
- return HFI_VPX_PROFILE_VERSION_1;
- case 2:
- return HFI_VPX_PROFILE_VERSION_2;
- case 3:
- return HFI_VPX_PROFILE_VERSION_3;
- }
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
switch (value) {
case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED:
@@ -217,46 +131,6 @@ static int venc_v4l2_to_hfi(int id, int value)
case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY:
return HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
}
- case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
- switch (value) {
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
- default:
- return HFI_HEVC_PROFILE_MAIN;
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
- return HFI_HEVC_PROFILE_MAIN_STILL_PIC;
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
- return HFI_HEVC_PROFILE_MAIN10;
- }
- case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
- switch (value) {
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
- default:
- return HFI_HEVC_LEVEL_1;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
- return HFI_HEVC_LEVEL_2;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
- return HFI_HEVC_LEVEL_21;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
- return HFI_HEVC_LEVEL_3;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
- return HFI_HEVC_LEVEL_31;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
- return HFI_HEVC_LEVEL_4;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
- return HFI_HEVC_LEVEL_41;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
- return HFI_HEVC_LEVEL_5;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
- return HFI_HEVC_LEVEL_51;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
- return HFI_HEVC_LEVEL_52;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
- return HFI_HEVC_LEVEL_6;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
- return HFI_HEVC_LEVEL_61;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
- return HFI_HEVC_LEVEL_62;
- }
}
return 0;
@@ -584,6 +458,7 @@ static int venc_enum_frameintervals(struct file *file, void *fh,
{
struct venus_inst *inst = to_inst(file);
const struct venus_format *fmt;
+ unsigned int framerate_factor = 1;
fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
@@ -608,12 +483,17 @@ static int venc_enum_frameintervals(struct file *file, void *fh,
fival->height < frame_height_min(inst))
return -EINVAL;
+ if (IS_V1(inst->core)) {
+ /* framerate is reported in 1/65535 fps unit */
+ framerate_factor = (1 << 16);
+ }
+
fival->stepwise.min.numerator = 1;
- fival->stepwise.min.denominator = frate_max(inst);
+ fival->stepwise.min.denominator = frate_max(inst) / framerate_factor;
fival->stepwise.max.numerator = 1;
- fival->stepwise.max.denominator = frate_min(inst);
+ fival->stepwise.max.denominator = frate_min(inst) / framerate_factor;
fival->stepwise.step.numerator = 1;
- fival->stepwise.step.denominator = frate_max(inst);
+ fival->stepwise.step.denominator = frate_max(inst) / framerate_factor;
return 0;
}
@@ -651,13 +531,12 @@ static int venc_set_properties(struct venus_inst *inst)
{
struct venc_controls *ctr = &inst->controls.enc;
struct hfi_intra_period intra_period;
- struct hfi_profile_level pl;
struct hfi_framerate frate;
struct hfi_bitrate brate;
struct hfi_idr_period idrp;
struct hfi_quantization quant;
struct hfi_quantization_range quant_range;
- u32 ptype, rate_control, bitrate, profile = 0, level = 0;
+ u32 ptype, rate_control, bitrate;
int ret;
ret = venus_helper_set_work_mode(inst, VIDC_WORK_MODE_2);
@@ -739,15 +618,29 @@ static int venc_set_properties(struct venus_inst *inst)
if (!ctr->rc_enable)
rate_control = HFI_RATE_CONTROL_OFF;
else if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
- rate_control = HFI_RATE_CONTROL_VBR_CFR;
- else
- rate_control = HFI_RATE_CONTROL_CBR_CFR;
+ rate_control = ctr->frame_skip_mode ? HFI_RATE_CONTROL_VBR_VFR :
+ HFI_RATE_CONTROL_VBR_CFR;
+ else if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ rate_control = ctr->frame_skip_mode ? HFI_RATE_CONTROL_CBR_VFR :
+ HFI_RATE_CONTROL_CBR_CFR;
+ else if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
+ rate_control = HFI_RATE_CONTROL_CQ;
ptype = HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
ret = hfi_session_set_property(inst, ptype, &rate_control);
if (ret)
return ret;
+ if (rate_control == HFI_RATE_CONTROL_CQ && ctr->const_quality) {
+ struct hfi_heic_frame_quality quality = {};
+
+ ptype = HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY;
+ quality.frame_quality = ctr->const_quality;
+ ret = hfi_session_set_property(inst, ptype, &quality);
+ if (ret)
+ return ret;
+ }
+
if (!ctr->bitrate)
bitrate = 64000;
else
@@ -791,35 +684,7 @@ static int venc_set_properties(struct venus_inst *inst)
if (ret)
return ret;
- if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264) {
- profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_H264_PROFILE,
- ctr->profile.h264);
- level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_H264_LEVEL,
- ctr->level.h264);
- } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_VP8) {
- profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
- ctr->profile.vpx);
- level = 0;
- } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_MPEG4) {
- profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
- ctr->profile.mpeg4);
- level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
- ctr->level.mpeg4);
- } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H263) {
- profile = 0;
- level = 0;
- } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
- profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
- ctr->profile.hevc);
- level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
- ctr->level.hevc);
- }
-
- ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
- pl.profile = profile;
- pl.level = level;
-
- ret = hfi_session_set_property(inst, ptype, &pl);
+ ret = venus_helper_set_profile_level(inst, ctr->profile, ctr->level);
if (ret)
return ret;
@@ -1129,13 +994,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->allow_zero_bytesused = 1;
dst_vq->min_buffers_needed = 1;
dst_vq->dev = inst->core->dev;
- ret = vb2_queue_init(dst_vq);
- if (ret) {
- vb2_queue_release(src_vq);
- return ret;
- }
-
- return 0;
+ return vb2_queue_init(dst_vq);
}
static void venc_inst_init(struct venus_inst *inst)
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 8362dde7949e..0708b3b89d0c 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -103,25 +103,15 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
ctr->h264_entropy_mode = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
- ctr->profile.mpeg4 = ctrl->val;
- break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- ctr->profile.h264 = ctrl->val;
- break;
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
- ctr->profile.hevc = ctrl->val;
- break;
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
- ctr->profile.vpx = ctrl->val;
+ ctr->profile = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
- ctr->level.mpeg4 = ctrl->val;
- break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- ctr->level.h264 = ctrl->val;
- break;
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
- ctr->level.hevc = ctrl->val;
+ ctr->level = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
ctr->h264_i_qp = ctrl->val;
@@ -202,6 +192,12 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
ctr->rc_enable = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY:
+ ctr->const_quality = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
+ ctr->frame_skip_mode = ctrl->val;
+ break;
default:
return -EINVAL;
}
@@ -217,7 +213,7 @@ int venc_ctrl_init(struct venus_inst *inst)
{
int ret;
- ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 31);
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 33);
if (ret)
return ret;
@@ -225,7 +221,8 @@ int venc_ctrl_init(struct venus_inst *inst)
V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
~((1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
- (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)),
+ (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) |
+ (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)),
V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
@@ -357,6 +354,16 @@ int venc_ctrl_init(struct venus_inst *inst)
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY, 0, 100, 1, 0);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE,
+ V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+ ~((1 << V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED) |
+ (1 << V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT)),
+ V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED);
+
ret = inst->ctrl_handler.error;
if (ret)
goto err;
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index 5c6b00737fe7..5c03318ae07b 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -22,7 +22,6 @@
struct rcar_fcp_device {
struct list_head list;
struct device *dev;
- struct device_dma_parameters dma_parms;
};
static LIST_HEAD(fcp_devices);
@@ -103,8 +102,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp)
return 0;
ret = pm_runtime_get_sync(fcp->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(fcp->dev);
return ret;
+ }
return 0;
}
@@ -138,8 +139,7 @@ static int rcar_fcp_probe(struct platform_device *pdev)
fcp->dev = &pdev->dev;
- fcp->dev->dma_parms = &fcp->dma_parms;
- dma_set_max_seg_size(fcp->dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(fcp->dev, UINT_MAX);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/media/platform/rcar-vin/Kconfig b/drivers/media/platform/rcar-vin/Kconfig
index ca0d906dce2f..030312d862e7 100644
--- a/drivers/media/platform/rcar-vin/Kconfig
+++ b/drivers/media/platform/rcar-vin/Kconfig
@@ -9,7 +9,7 @@ config VIDEO_RCAR_CSI2
select V4L2_FWNODE
help
Support for Renesas R-Car MIPI CSI-2 receiver.
- Supports R-Car Gen3 SoCs.
+ Supports R-Car Gen3 and RZ/G2 SoCs.
To compile this driver as a module, choose M here: the
module will be called rcar-csi2.
@@ -24,7 +24,7 @@ config VIDEO_RCAR_VIN
select V4L2_FWNODE
help
Support for Renesas R-Car Video Input (VIN) driver.
- Supports R-Car Gen2 and Gen3 SoCs.
+ Supports R-Car Gen{2,3} and RZ/G{1,2} SoCs.
To compile this driver as a module, choose M here: the
module will be called rcar-vin.
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 7440c8965d27..34d003e0e9b9 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -243,7 +243,6 @@ static struct rvin_group *rvin_group_data;
static void rvin_group_cleanup(struct rvin_group *group)
{
- media_device_unregister(&group->mdev);
media_device_cleanup(&group->mdev);
mutex_destroy(&group->lock);
}
@@ -253,7 +252,6 @@ static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin)
struct media_device *mdev = &group->mdev;
const struct of_device_id *match;
struct device_node *np;
- int ret;
mutex_init(&group->lock);
@@ -278,11 +276,7 @@ static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin)
media_device_init(mdev);
- ret = media_device_register(&group->mdev);
- if (ret)
- rvin_group_cleanup(group);
-
- return ret;
+ return 0;
}
static void rvin_group_release(struct kref *kref)
@@ -626,12 +620,11 @@ static int rvin_parallel_parse_v4l2(struct device *dev,
switch (vin->parallel->mbus_type) {
case V4L2_MBUS_PARALLEL:
- vin_dbg(vin, "Found PARALLEL media bus\n");
- vin->parallel->mbus_flags = vep->bus.parallel.flags;
- break;
case V4L2_MBUS_BT656:
- vin_dbg(vin, "Found BT656 media bus\n");
- vin->parallel->mbus_flags = 0;
+ vin_dbg(vin, "Found %s media bus\n",
+ vin->parallel->mbus_type == V4L2_MBUS_PARALLEL ?
+ "PARALLEL" : "BT656");
+ vin->parallel->bus = vep->bus.parallel;
break;
default:
vin_err(vin, "Unknown media bus type\n");
@@ -682,6 +675,10 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
unsigned int i;
int ret;
+ ret = media_device_register(&vin->group->mdev);
+ if (ret)
+ return ret;
+
ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
if (ret) {
vin_err(vin, "Failed to register subdev nodes\n");
@@ -762,6 +759,8 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
}
mutex_unlock(&vin->group->lock);
+
+ media_device_unregister(&vin->group->mdev);
}
static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
@@ -944,6 +943,42 @@ static const struct rvin_info rcar_info_gen2 = {
.max_height = 2048,
};
+static const struct rvin_group_route rcar_info_r8a774e1_routes[] = {
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 0, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 1, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(1) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 1, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 2, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 2, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 2, .mask = BIT(4) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 3, .mask = BIT(1) | BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 3, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 4, .mask = BIT(1) | BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 5, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 5, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 6, .mask = BIT(0) },
+ { .csi = RVIN_CSI20, .channel = 0, .vin = 6, .mask = BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 2, .vin = 6, .mask = BIT(4) },
+ { .csi = RVIN_CSI20, .channel = 1, .vin = 7, .mask = BIT(1) | BIT(2) },
+ { .csi = RVIN_CSI20, .channel = 3, .vin = 7, .mask = BIT(4) },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a774e1 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a774e1_routes,
+};
+
static const struct rvin_group_route rcar_info_r8a7795_routes[] = {
{ .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
{ .csi = RVIN_CSI20, .channel = 0, .vin = 0, .mask = BIT(1) | BIT(4) },
@@ -1221,6 +1256,10 @@ static const struct of_device_id rvin_of_id_table[] = {
.data = &rcar_info_r8a77990,
},
{
+ .compatible = "renesas,vin-r8a774e1",
+ .data = &rcar_info_r8a774e1,
+ },
+ {
.compatible = "renesas,vin-r8a7778",
.data = &rcar_info_m1,
},
@@ -1370,12 +1409,8 @@ static int rcar_vin_remove(struct platform_device *pdev)
v4l2_async_notifier_cleanup(&vin->notifier);
if (vin->info->use_mc) {
- mutex_lock(&vin->group->lock);
- if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
- v4l2_async_notifier_unregister(&vin->group->notifier);
- v4l2_async_notifier_cleanup(&vin->group->notifier);
- }
- mutex_unlock(&vin->group->lock);
+ v4l2_async_notifier_unregister(&vin->group->notifier);
+ v4l2_async_notifier_cleanup(&vin->group->notifier);
rvin_group_put(vin);
}
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index c6cc4f473a07..79f229756805 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -320,6 +320,9 @@ static const struct rcar_csi2_format rcar_csi2_formats[] = {
{ .code = MEDIA_BUS_FMT_YUYV8_1X16, .datatype = 0x1e, .bpp = 16 },
{ .code = MEDIA_BUS_FMT_UYVY8_2X8, .datatype = 0x1e, .bpp = 16 },
{ .code = MEDIA_BUS_FMT_YUYV10_2X10, .datatype = 0x1e, .bpp = 20 },
+ { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .datatype = 0x2a, .bpp = 8 },
+ { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .datatype = 0x2a, .bpp = 8 },
+ { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .datatype = 0x2a, .bpp = 8 },
{ .code = MEDIA_BUS_FMT_SRGGB8_1X8, .datatype = 0x2a, .bpp = 8 },
};
@@ -362,8 +365,8 @@ struct rcar_csi2 {
struct media_pad pads[NR_OF_RCAR_CSI2_PAD];
struct v4l2_async_notifier notifier;
- struct v4l2_async_subdev asd;
struct v4l2_subdev *remote;
+ unsigned int remote_pad;
struct v4l2_mbus_framefmt mf;
@@ -409,13 +412,14 @@ static void rcsi2_exit_standby(struct rcar_csi2 *priv)
reset_control_deassert(priv->rstc);
}
-static int rcsi2_wait_phy_start(struct rcar_csi2 *priv)
+static int rcsi2_wait_phy_start(struct rcar_csi2 *priv,
+ unsigned int lanes)
{
unsigned int timeout;
/* Wait for the clock and data lanes to enter LP-11 state. */
for (timeout = 0; timeout <= 20; timeout++) {
- const u32 lane_mask = (1 << priv->lanes) - 1;
+ const u32 lane_mask = (1 << lanes) - 1;
if ((rcsi2_read(priv, PHCLM_REG) & PHCLM_STOPSTATECKL) &&
(rcsi2_read(priv, PHDLM_REG) & lane_mask) == lane_mask)
@@ -447,7 +451,8 @@ static int rcsi2_set_phypll(struct rcar_csi2 *priv, unsigned int mbps)
return 0;
}
-static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp)
+static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp,
+ unsigned int lanes)
{
struct v4l2_subdev *source;
struct v4l2_ctrl *ctrl;
@@ -472,15 +477,64 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp)
* bps = link_freq * 2
*/
mbps = v4l2_ctrl_g_ctrl_int64(ctrl) * bpp;
- do_div(mbps, priv->lanes * 1000000);
+ do_div(mbps, lanes * 1000000);
return mbps;
}
+static int rcsi2_get_active_lanes(struct rcar_csi2 *priv,
+ unsigned int *lanes)
+{
+ struct v4l2_mbus_config mbus_config = { 0 };
+ unsigned int num_lanes = UINT_MAX;
+ int ret;
+
+ *lanes = priv->lanes;
+
+ ret = v4l2_subdev_call(priv->remote, pad, get_mbus_config,
+ priv->remote_pad, &mbus_config);
+ if (ret == -ENOIOCTLCMD) {
+ dev_dbg(priv->dev, "No remote mbus configuration available\n");
+ return 0;
+ }
+
+ if (ret) {
+ dev_err(priv->dev, "Failed to get remote mbus configuration\n");
+ return ret;
+ }
+
+ if (mbus_config.type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(priv->dev, "Unsupported media bus type %u\n",
+ mbus_config.type);
+ return -EINVAL;
+ }
+
+ if (mbus_config.flags & V4L2_MBUS_CSI2_1_LANE)
+ num_lanes = 1;
+ else if (mbus_config.flags & V4L2_MBUS_CSI2_2_LANE)
+ num_lanes = 2;
+ else if (mbus_config.flags & V4L2_MBUS_CSI2_3_LANE)
+ num_lanes = 3;
+ else if (mbus_config.flags & V4L2_MBUS_CSI2_4_LANE)
+ num_lanes = 4;
+
+ if (num_lanes > priv->lanes) {
+ dev_err(priv->dev,
+ "Unsupported mbus config: too many data lanes %u\n",
+ num_lanes);
+ return -EINVAL;
+ }
+
+ *lanes = num_lanes;
+
+ return 0;
+}
+
static int rcsi2_start_receiver(struct rcar_csi2 *priv)
{
const struct rcar_csi2_format *format;
u32 phycnt, vcdt = 0, vcdt2 = 0, fld = 0;
+ unsigned int lanes;
unsigned int i;
int mbps, ret;
@@ -522,10 +576,18 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
fld |= FLD_FLD_NUM(1);
}
+ /*
+ * Get the number of active data lanes inspecting the remote mbus
+ * configuration.
+ */
+ ret = rcsi2_get_active_lanes(priv, &lanes);
+ if (ret)
+ return ret;
+
phycnt = PHYCNT_ENABLECLK;
- phycnt |= (1 << priv->lanes) - 1;
+ phycnt |= (1 << lanes) - 1;
- mbps = rcsi2_calc_mbps(priv, format->bpp);
+ mbps = rcsi2_calc_mbps(priv, format->bpp, lanes);
if (mbps < 0)
return mbps;
@@ -572,7 +634,7 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ);
rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ | PHYCNT_RSTZ);
- ret = rcsi2_wait_phy_start(priv);
+ ret = rcsi2_wait_phy_start(priv, lanes);
if (ret)
return ret;
@@ -749,6 +811,7 @@ static int rcsi2_notify_bound(struct v4l2_async_notifier *notifier,
}
priv->remote = subdev;
+ priv->remote_pad = pad;
dev_dbg(priv->dev, "Bound %s pad: %d\n", subdev->name, pad);
@@ -811,6 +874,8 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
static int rcsi2_parse_dt(struct rcar_csi2 *priv)
{
+ struct v4l2_async_subdev *asd;
+ struct fwnode_handle *fwnode;
struct device_node *ep;
struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
int ret;
@@ -834,24 +899,19 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
return ret;
}
- priv->asd.match.fwnode =
- fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
- priv->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
-
+ fwnode = fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep));
of_node_put(ep);
- v4l2_async_notifier_init(&priv->notifier);
-
- ret = v4l2_async_notifier_add_subdev(&priv->notifier, &priv->asd);
- if (ret) {
- fwnode_handle_put(priv->asd.match.fwnode);
- return ret;
- }
+ dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
+ v4l2_async_notifier_init(&priv->notifier);
priv->notifier.ops = &rcar_csi2_notify_ops;
- dev_dbg(priv->dev, "Found '%pOF'\n",
- to_of_node(priv->asd.match.fwnode));
+ asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode,
+ sizeof(*asd));
+ fwnode_handle_put(fwnode);
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
ret = v4l2_async_subdev_notifier_register(&priv->subdev,
&priv->notifier);
@@ -1091,6 +1151,10 @@ static const struct of_device_id rcar_csi2_of_table[] = {
.data = &rcar_csi2_info_r8a77990,
},
{
+ .compatible = "renesas,r8a774e1-csi2",
+ .data = &rcar_csi2_info_r8a7795,
+ },
+ {
.compatible = "renesas,r8a7795-csi2",
.data = &rcar_csi2_info_r8a7795,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index a5dbb90c5210..692dea300b0d 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -125,6 +125,7 @@
#define VNDMR2_VPS (1 << 30)
#define VNDMR2_HPS (1 << 29)
#define VNDMR2_CES (1 << 28)
+#define VNDMR2_YDS (1 << 22)
#define VNDMR2_FTEV (1 << 17)
#define VNDMR2_VLV(n) ((n & 0xf) << 12)
@@ -598,8 +599,16 @@ void rvin_crop_scale_comp(struct rvin_dev *vin)
/* For RAW8 format bpp is 1, but the hardware process RAW8
* format in 2 pixel unit hence configure VNIS_REG as stride / 2.
*/
- if (vin->format.pixelformat == V4L2_PIX_FMT_SRGGB8)
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
stride /= 2;
+ break;
+ default:
+ break;
+ }
rvin_write(vin, stride, VNIS_REG);
}
@@ -683,6 +692,9 @@ static int rvin_setup(struct rvin_dev *vin)
input_is_yuv = true;
break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
vnmc |= VNMC_INF_RAW8;
break;
@@ -698,16 +710,26 @@ static int rvin_setup(struct rvin_dev *vin)
if (!vin->is_csi) {
/* Hsync Signal Polarity Select */
- if (!(vin->parallel->mbus_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+ if (!(vin->parallel->bus.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
dmr2 |= VNDMR2_HPS;
/* Vsync Signal Polarity Select */
- if (!(vin->parallel->mbus_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+ if (!(vin->parallel->bus.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
dmr2 |= VNDMR2_VPS;
/* Data Enable Polarity Select */
- if (vin->parallel->mbus_flags & V4L2_MBUS_DATA_ENABLE_LOW)
+ if (vin->parallel->bus.flags & V4L2_MBUS_DATA_ENABLE_LOW)
dmr2 |= VNDMR2_CES;
+
+ switch (vin->mbus_code) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ if (vin->parallel->bus.bus_width == 8 &&
+ vin->parallel->bus.data_shift == 8)
+ dmr2 |= VNDMR2_YDS;
+ break;
+ default:
+ break;
+ }
}
/*
@@ -747,6 +769,9 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_PIX_FMT_ABGR32:
dmr = VNDMR_A8BIT(vin->alpha) | VNDMR_EXRGB | VNDMR_DTMD_ARGB;
break;
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
dmr = 0;
break;
@@ -1124,6 +1149,18 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
case MEDIA_BUS_FMT_UYVY10_2X10:
case MEDIA_BUS_FMT_RGB888_1X24:
break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ if (vin->format.pixelformat != V4L2_PIX_FMT_SBGGR8)
+ return -EPIPE;
+ break;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ if (vin->format.pixelformat != V4L2_PIX_FMT_SGBRG8)
+ return -EPIPE;
+ break;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ if (vin->format.pixelformat != V4L2_PIX_FMT_SGRBG8)
+ return -EPIPE;
+ break;
case MEDIA_BUS_FMT_SRGGB8_1X8:
if (vin->format.pixelformat != V4L2_PIX_FMT_SRGGB8)
return -EPIPE;
@@ -1409,8 +1446,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
int ret;
ret = pm_runtime_get_sync(vin->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(vin->dev);
return ret;
+ }
/* Make register writes take effect immediately. */
vnmc = rvin_read(vin, VNMC_REG);
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 0e066bba747e..3e7a3ae2a6b9 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -67,6 +67,18 @@ static const struct rvin_video_format rvin_formats[] = {
.bpp = 4,
},
{
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .bpp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .bpp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .bpp = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_SRGGB8,
.bpp = 1,
},
@@ -366,6 +378,21 @@ static int rvin_enum_fmt_vid_cap(struct file *file, void *priv,
case MEDIA_BUS_FMT_UYVY10_2X10:
case MEDIA_BUS_FMT_RGB888_1X24:
break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ if (f->index)
+ return -EINVAL;
+ f->pixelformat = V4L2_PIX_FMT_SBGGR8;
+ return 0;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ if (f->index)
+ return -EINVAL;
+ f->pixelformat = V4L2_PIX_FMT_SGBRG8;
+ return 0;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ if (f->index)
+ return -EINVAL;
+ f->pixelformat = V4L2_PIX_FMT_SGRBG8;
+ return 0;
case MEDIA_BUS_FMT_SRGGB8_1X8:
if (f->index)
return -EINVAL;
@@ -844,8 +871,10 @@ static int rvin_open(struct file *file)
int ret;
ret = pm_runtime_get_sync(vin->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(vin->dev);
return ret;
+ }
ret = mutex_lock_interruptible(&vin->lock);
if (ret)
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index c19d077ce1cb..8396e0e45478 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -19,6 +19,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#include <media/videobuf2-v4l2.h>
/* Number of HW buffers */
@@ -92,7 +93,7 @@ struct rvin_video_format {
* @asd: sub-device descriptor for async framework
* @subdev: subdevice matched using async framework
* @mbus_type: media bus type
- * @mbus_flags: media bus configuration flags
+ * @bus: media bus parallel configuration
* @source_pad: source pad of remote subdevice
* @sink_pad: sink pad of remote subdevice
*
@@ -102,7 +103,7 @@ struct rvin_parallel_entity {
struct v4l2_subdev *subdev;
enum v4l2_mbus_type mbus_type;
- unsigned int mbus_flags;
+ struct v4l2_fwnode_bus_parallel bus;
unsigned int source_pad;
unsigned int sink_pad;
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 3d2451ac347d..f318cd4b8086 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -185,7 +185,6 @@ struct rcar_drif_frame_buf {
/* OF graph endpoint's V4L2 async data */
struct rcar_drif_graph_ep {
struct v4l2_subdev *subdev; /* Async matched subdev */
- struct v4l2_async_subdev asd; /* Async sub-device descriptor */
};
/* DMA buffer */
@@ -1109,12 +1108,6 @@ static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
struct rcar_drif_sdr *sdr =
container_of(notifier, struct rcar_drif_sdr, notifier);
- if (sdr->ep.asd.match.fwnode !=
- of_fwnode_handle(subdev->dev->of_node)) {
- rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name);
- return -EINVAL;
- }
-
v4l2_set_subdev_hostdata(subdev, sdr);
sdr->ep.subdev = subdev;
rdrif_dbg(sdr, "bound asd %s\n", subdev->name);
@@ -1218,7 +1211,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
{
struct v4l2_async_notifier *notifier = &sdr->notifier;
struct fwnode_handle *fwnode, *ep;
- int ret;
+ struct v4l2_async_subdev *asd;
v4l2_async_notifier_init(notifier);
@@ -1227,26 +1220,21 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
if (!ep)
return 0;
+ /* Get the endpoint properties */
+ rcar_drif_get_ep_properties(sdr, ep);
+
fwnode = fwnode_graph_get_remote_port_parent(ep);
+ fwnode_handle_put(ep);
if (!fwnode) {
dev_warn(sdr->dev, "bad remote port parent\n");
- fwnode_handle_put(ep);
return -EINVAL;
}
- sdr->ep.asd.match.fwnode = fwnode;
- sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- ret = v4l2_async_notifier_add_subdev(notifier, &sdr->ep.asd);
- if (ret) {
- fwnode_handle_put(fwnode);
- return ret;
- }
-
- /* Get the endpoint properties */
- rcar_drif_get_ep_properties(sdr, ep);
-
+ asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
+ sizeof(*asd));
fwnode_handle_put(fwnode);
- fwnode_handle_put(ep);
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
return 0;
}
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index f7d71a6a7970..4a633ad0e8fa 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -405,7 +405,7 @@ static int ceu_hw_config(struct ceu_device *ceudev)
/* Non-swapped planar image capture mode. */
case V4L2_PIX_FMT_NV16:
cdocr |= CEU_CDOCR_NO_DOWSAMPLE;
- /* fall-through */
+ fallthrough;
case V4L2_PIX_FMT_NV12:
if (mbus_fmt->swapped)
camcr = mbus_fmt->fmt_order_swap;
@@ -419,7 +419,7 @@ static int ceu_hw_config(struct ceu_device *ceudev)
/* Swapped planar image capture mode. */
case V4L2_PIX_FMT_NV61:
cdocr |= CEU_CDOCR_NO_DOWSAMPLE;
- /* fall-through */
+ fallthrough;
case V4L2_PIX_FMT_NV21:
if (mbus_fmt->swapped)
camcr = mbus_fmt->fmt_order;
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
index 36b821ccc1db..bf9a75b75083 100644
--- a/drivers/media/platform/rockchip/rga/rga-buf.c
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -81,6 +81,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
ret = pm_runtime_get_sync(rga->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(rga->dev);
rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
return ret;
}
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index 92f43c0cbc0c..422fd549e9c8 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -464,7 +464,7 @@ static int s3c_camif_probe(struct platform_device *pdev)
ret = camif_media_dev_init(camif);
if (ret < 0)
- goto err_alloc;
+ goto err_pm;
ret = camif_register_sensor(camif);
if (ret < 0)
@@ -498,10 +498,9 @@ err_sens:
media_device_unregister(&camif->media_dev);
media_device_cleanup(&camif->media_dev);
camif_unregister_media_entities(camif);
-err_alloc:
+err_pm:
pm_runtime_put(dev);
pm_runtime_disable(dev);
-err_pm:
camif_clk_put(camif);
err_clk:
s3c_camif_unregister_subdev(camif);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 912fe0c5ab18..acc2217dd7e9 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -262,6 +262,12 @@ static struct mfc_control controls[] = {
.default_value = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED,
},
{
+ .id = V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .maximum = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+ .default_value = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ },
+ {
.id = V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Fixed Target Bit Enable",
@@ -1849,6 +1855,7 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
p->seq_hdr_mode = ctrl->val;
break;
case V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE:
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
p->frame_skip_mode = ctrl->val;
break;
case V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT:
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 7d52431c2c83..62d2320a7218 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -79,8 +79,10 @@ int s5p_mfc_power_on(void)
int i, ret = 0;
ret = pm_runtime_get_sync(pm->device);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(pm->device);
return ret;
+ }
/* clock control */
for (i = 0; i < pm->num_clocks; i++) {
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
index 77ca7517fa3e..2b270093009c 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
@@ -637,35 +637,18 @@ DEFINE_SHOW_ATTRIBUTE(last_nodes_raw);
DEFINE_SHOW_ATTRIBUTE(last_request);
DEFINE_SHOW_ATTRIBUTE(perf);
-int bdisp_debugfs_create(struct bdisp_dev *bdisp)
+void bdisp_debugfs_create(struct bdisp_dev *bdisp)
{
char dirname[16];
snprintf(dirname, sizeof(dirname), "%s%d", BDISP_NAME, bdisp->id);
bdisp->dbg.debugfs_entry = debugfs_create_dir(dirname, NULL);
- if (!bdisp->dbg.debugfs_entry)
- goto err;
- if (!bdisp_dbg_create_entry(regs))
- goto err;
-
- if (!bdisp_dbg_create_entry(last_nodes))
- goto err;
-
- if (!bdisp_dbg_create_entry(last_nodes_raw))
- goto err;
-
- if (!bdisp_dbg_create_entry(last_request))
- goto err;
-
- if (!bdisp_dbg_create_entry(perf))
- goto err;
-
- return 0;
-
-err:
- bdisp_debugfs_remove(bdisp);
- return -ENOMEM;
+ bdisp_dbg_create_entry(regs);
+ bdisp_dbg_create_entry(last_nodes);
+ bdisp_dbg_create_entry(last_nodes_raw);
+ bdisp_dbg_create_entry(last_request);
+ bdisp_dbg_create_entry(perf);
}
void bdisp_debugfs_remove(struct bdisp_dev *bdisp)
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index af2d5eb782ce..060ca85f64d5 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1360,18 +1360,14 @@ static int bdisp_probe(struct platform_device *pdev)
}
/* Debug */
- ret = bdisp_debugfs_create(bdisp);
- if (ret) {
- dev_err(dev, "failed to create debugfs\n");
- goto err_v4l2;
- }
+ bdisp_debugfs_create(bdisp);
/* Power management */
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "failed to set PM\n");
- goto err_dbg;
+ goto err_pm;
}
/* Filters */
@@ -1399,9 +1395,7 @@ err_filter:
bdisp_hw_free_filters(bdisp->dev);
err_pm:
pm_runtime_put(dev);
-err_dbg:
bdisp_debugfs_remove(bdisp);
-err_v4l2:
v4l2_device_unregister(&bdisp->v4l2_dev);
err_clk:
if (!IS_ERR(bdisp->clock))
diff --git a/drivers/media/platform/sti/bdisp/bdisp.h b/drivers/media/platform/sti/bdisp/bdisp.h
index e309cde379ca..3fb009d24791 100644
--- a/drivers/media/platform/sti/bdisp/bdisp.h
+++ b/drivers/media/platform/sti/bdisp/bdisp.h
@@ -209,6 +209,6 @@ int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp);
int bdisp_hw_update(struct bdisp_ctx *ctx);
void bdisp_debugfs_remove(struct bdisp_dev *bdisp);
-int bdisp_debugfs_create(struct bdisp_dev *bdisp);
+void bdisp_debugfs_create(struct bdisp_dev *bdisp);
void bdisp_dbg_perf_begin(struct bdisp_dev *bdisp);
void bdisp_dbg_perf_end(struct bdisp_dev *bdisp);
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 5baada4f65e5..dbe7788083a4 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -77,9 +77,9 @@ static void c8sectpfe_timer_interrupt(struct timer_list *t)
add_timer(&fei->timer);
}
-static void channel_swdemux_tsklet(unsigned long data)
+static void channel_swdemux_tsklet(struct tasklet_struct *t)
{
- struct channel_info *channel = (struct channel_info *)data;
+ struct channel_info *channel = from_tasklet(channel, t, tsklet);
struct c8sectpfei *fei;
unsigned long wp, rp;
int pos, num_packets, n, size;
@@ -208,8 +208,7 @@ static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
dev_dbg(fei->dev, "Starting channel=%p\n", channel);
- tasklet_init(&channel->tsklet, channel_swdemux_tsklet,
- (unsigned long) channel);
+ tasklet_setup(&channel->tsklet, channel_swdemux_tsklet);
/* Reset the internal inputblock sram pointers */
writel(channel->fifo,
@@ -638,8 +637,7 @@ static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
/* initialize tasklet */
- tasklet_init(&tsin->tsklet, channel_swdemux_tsklet,
- (unsigned long) tsin);
+ tasklet_setup(&tsin->tsklet, channel_swdemux_tsklet);
return 0;
diff --git a/drivers/media/platform/sti/delta/delta-v4l2.c b/drivers/media/platform/sti/delta/delta-v4l2.c
index 2503224eeee5..c691b3d81549 100644
--- a/drivers/media/platform/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/sti/delta/delta-v4l2.c
@@ -954,8 +954,10 @@ static void delta_run_work(struct work_struct *work)
/* enable the hardware */
if (!dec->pm) {
ret = delta_get_sync(ctx);
- if (ret)
+ if (ret) {
+ delta_put_autosuspend(ctx);
goto err;
+ }
}
/* decode this access unit */
diff --git a/drivers/media/platform/sti/hva/hva-debugfs.c b/drivers/media/platform/sti/hva/hva-debugfs.c
index 7d12a5b5d914..a86a07b6fbc7 100644
--- a/drivers/media/platform/sti/hva/hva-debugfs.c
+++ b/drivers/media/platform/sti/hva/hva-debugfs.c
@@ -337,25 +337,11 @@ DEFINE_SHOW_ATTRIBUTE(regs);
void hva_debugfs_create(struct hva_dev *hva)
{
hva->dbg.debugfs_entry = debugfs_create_dir(HVA_NAME, NULL);
- if (!hva->dbg.debugfs_entry)
- goto err;
- if (!hva_dbg_create_entry(device))
- goto err;
-
- if (!hva_dbg_create_entry(encoders))
- goto err;
-
- if (!hva_dbg_create_entry(last))
- goto err;
-
- if (!hva_dbg_create_entry(regs))
- goto err;
-
- return;
-
-err:
- hva_debugfs_remove(hva);
+ hva_dbg_create_entry(device);
+ hva_dbg_create_entry(encoders);
+ hva_dbg_create_entry(last);
+ hva_dbg_create_entry(regs);
}
void hva_debugfs_remove(struct hva_dev *hva)
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index 401aaafa1710..43f279e2a6a3 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -272,6 +272,7 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
if (pm_runtime_get_sync(dev) < 0) {
dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
+ pm_runtime_put_noidle(dev);
mutex_unlock(&hva->protect_mutex);
return -EFAULT;
}
@@ -388,7 +389,7 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
- goto err_clk;
+ goto err_pm;
}
/* check IP hardware version */
@@ -553,6 +554,7 @@ void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
if (pm_runtime_get_sync(dev) < 0) {
seq_puts(s, "Cannot wake up IP\n");
+ pm_runtime_put_noidle(dev);
mutex_unlock(&hva->protect_mutex);
return;
}
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index b8931490b83b..fd1c41cba52f 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -733,7 +733,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret < 0) {
dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
__func__, ret);
- goto err_release_buffers;
+ goto err_pm_put;
}
ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline);
@@ -837,8 +837,6 @@ err_media_pipeline_stop:
err_pm_put:
pm_runtime_put(dcmi->dev);
-
-err_release_buffers:
spin_lock_irq(&dcmi->irqlock);
/*
* Return all buffers to vb2 in QUEUED state.
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index 5319eb1ab309..eb15c8c725ca 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -182,8 +183,14 @@ static int sun4i_csi_probe(struct platform_device *pdev)
if (ret)
return ret;
} else {
+ /*
+ * XXX(hch): this has no business in a driver and needs to move
+ * to the device tree.
+ */
#ifdef PHYS_PFN_OFFSET
- csi->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+ ret = dma_direct_set_offset(csi->dev, PHYS_OFFSET, 0, SZ_4G);
+ if (ret)
+ return ret;
#endif
}
@@ -287,6 +294,7 @@ static int sun4i_csi_remove(struct platform_device *pdev)
v4l2_async_notifier_unregister(&csi->notifier);
v4l2_async_notifier_cleanup(&csi->notifier);
+ vb2_video_unregister_device(&csi->vdev);
media_device_unregister(&csi->mdev);
sun4i_csi_dma_unregister(csi);
media_device_cleanup(&csi->mdev);
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
index 3278746246aa..2c39cd7f2862 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
@@ -431,7 +431,7 @@ int sun4i_csi_dma_register(struct sun4i_csi *csi, int irq)
ret = v4l2_device_register(csi->dev, &csi->v4l);
if (ret) {
dev_err(csi->dev, "Couldn't register the v4l2 device\n");
- goto err_free_queue;
+ goto err_free_mutex;
}
ret = devm_request_irq(csi->dev, irq, sun4i_csi_irq, 0,
@@ -446,9 +446,6 @@ int sun4i_csi_dma_register(struct sun4i_csi *csi, int irq)
err_unregister_device:
v4l2_device_unregister(&csi->v4l);
-err_free_queue:
- vb2_queue_release(q);
-
err_free_mutex:
mutex_destroy(&csi->lock);
return ret;
@@ -457,6 +454,5 @@ err_free_mutex:
void sun4i_csi_dma_unregister(struct sun4i_csi *csi)
{
v4l2_device_unregister(&csi->v4l);
- vb2_queue_release(&csi->queue);
mutex_destroy(&csi->lock);
}
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index 28e89340fed9..e69e14379fc6 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -899,8 +899,15 @@ static int sun6i_csi_probe(struct platform_device *pdev)
return -ENOMEM;
sdev->dev = &pdev->dev;
- /* The DMA bus has the memory mapped at 0 */
- sdev->dev->dma_pfn_offset = PHYS_OFFSET >> PAGE_SHIFT;
+ /*
+ * The DMA bus has the memory mapped at 0.
+ *
+ * XXX(hch): this has no business in a driver and needs to move
+ * to the device tree.
+ */
+ ret = dma_direct_set_offset(sdev->dev, PHYS_OFFSET, 0, SZ_4G);
+ if (ret)
+ return ret;
ret = sun6i_csi_resource_request(sdev, pdev);
if (ret)
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index d9648b2810b9..b55de9ab64d8 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -660,13 +660,11 @@ int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi,
if (ret < 0) {
v4l2_err(&csi->v4l2_dev,
"video_register_device failed: %d\n", ret);
- goto release_vb2;
+ goto clean_entity;
}
return 0;
-release_vb2:
- vb2_queue_release(&video->vb2_vidq);
clean_entity:
media_entity_cleanup(&video->vdev.entity);
mutex_destroy(&video->lock);
@@ -675,8 +673,7 @@ clean_entity:
void sun6i_video_cleanup(struct sun6i_video *video)
{
- video_unregister_device(&video->vdev);
+ vb2_video_unregister_device(&video->vdev);
media_entity_cleanup(&video->vdev.entity);
- vb2_queue_release(&video->vb2_vidq);
mutex_destroy(&video->lock);
}
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
index 94f505d3cbad..3f81dd17755c 100644
--- a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
+++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
@@ -747,11 +747,8 @@ static int rotate_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev->dev, "Failed to get IRQ\n");
-
+ if (irq <= 0)
return irq;
- }
ret = devm_request_irq(dev->dev, irq, rotate_irq,
0, dev_name(dev->dev), dev);
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 346f8212791c..779dd74b82d0 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -2475,6 +2475,8 @@ static int vpe_runtime_get(struct platform_device *pdev)
r = pm_runtime_get_sync(&pdev->dev);
WARN_ON(r < 0);
+ if (r)
+ pm_runtime_put_noidle(&pdev->dev);
return r < 0 ? r : 0;
}
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index a4a45d68a6ef..86d5e3f4b1ff 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -912,8 +912,8 @@ int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt)
* skip cache sync. This will need to be revisited when support for
* non-coherent buffers will be added to the DU driver.
*/
- return dma_map_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents,
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ return dma_map_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
EXPORT_SYMBOL_GPL(vsp1_du_map_sg);
@@ -921,8 +921,8 @@ void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
- dma_unmap_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents,
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg);
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index c650e45bb0ad..dc62533cf32c 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -562,7 +562,12 @@ int vsp1_device_get(struct vsp1_device *vsp1)
int ret;
ret = pm_runtime_get_sync(vsp1->dev);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ pm_runtime_put_noidle(vsp1->dev);
+ return ret;
+ }
+
+ return 0;
}
/*
@@ -845,12 +850,12 @@ static int vsp1_probe(struct platform_device *pdev)
/* Configure device parameters based on the version register. */
pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = vsp1_device_get(vsp1);
if (ret < 0)
goto done;
vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
- pm_runtime_put_sync(&pdev->dev);
+ vsp1_device_put(vsp1);
for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 7e2460263882..23997425bdb5 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -1345,60 +1345,24 @@ static const struct file_operations radio_rsq_primary_fops = {
};
-static int si476x_radio_init_debugfs(struct si476x_radio *radio)
+static void si476x_radio_init_debugfs(struct si476x_radio *radio)
{
- struct dentry *dentry;
- int ret;
+ radio->debugfs = debugfs_create_dir(dev_name(radio->v4l2dev.dev), NULL);
- dentry = debugfs_create_dir(dev_name(radio->v4l2dev.dev), NULL);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- goto exit;
- }
- radio->debugfs = dentry;
-
- dentry = debugfs_create_file("acf", S_IRUGO,
- radio->debugfs, radio, &radio_acf_fops);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- goto cleanup;
- }
+ debugfs_create_file("acf", S_IRUGO, radio->debugfs, radio,
+ &radio_acf_fops);
- dentry = debugfs_create_file("rds_blckcnt", S_IRUGO,
- radio->debugfs, radio,
- &radio_rds_blckcnt_fops);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- goto cleanup;
- }
+ debugfs_create_file("rds_blckcnt", S_IRUGO, radio->debugfs, radio,
+ &radio_rds_blckcnt_fops);
- dentry = debugfs_create_file("agc", S_IRUGO,
- radio->debugfs, radio, &radio_agc_fops);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- goto cleanup;
- }
+ debugfs_create_file("agc", S_IRUGO, radio->debugfs, radio,
+ &radio_agc_fops);
- dentry = debugfs_create_file("rsq", S_IRUGO,
- radio->debugfs, radio, &radio_rsq_fops);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- goto cleanup;
- }
+ debugfs_create_file("rsq", S_IRUGO, radio->debugfs, radio,
+ &radio_rsq_fops);
- dentry = debugfs_create_file("rsq_primary", S_IRUGO,
- radio->debugfs, radio,
- &radio_rsq_primary_fops);
- if (IS_ERR(dentry)) {
- ret = PTR_ERR(dentry);
- goto cleanup;
- }
-
- return 0;
-cleanup:
- debugfs_remove_recursive(radio->debugfs);
-exit:
- return ret;
+ debugfs_create_file("rsq_primary", S_IRUGO, radio->debugfs, radio,
+ &radio_rsq_primary_fops);
}
@@ -1535,11 +1499,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
goto exit;
}
- rval = si476x_radio_init_debugfs(radio);
- if (rval < 0) {
- dev_err(&pdev->dev, "Could not create debugfs interface\n");
- goto exit;
- }
+ si476x_radio_init_debugfs(radio);
return 0;
exit:
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index 7f3aee495ed3..6afa7c3464ab 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -1157,7 +1157,7 @@ static int si4713_s_ctrl(struct v4l2_ctrl *ctrl)
* V4L2_CID_TUNE_POWER_LEVEL. */
if (force)
break;
- /* fall through */
+ fallthrough;
case V4L2_CID_TUNE_POWER_LEVEL:
ret = si4713_tx_tune_power(sdev,
sdev->tune_pwr_level->val, sdev->tune_ant_cap->val);
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index cce97c9d5409..6142484d5cb4 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -19,9 +19,11 @@
* Author: Manjunatha Halli <manjunatha_halli@ti.com>
*/
-#include <linux/module.h>
-#include <linux/firmware.h>
#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/nospec.h>
+
#include "fmdrv.h"
#include "fmdrv_v4l2.h"
#include "fmdrv_common.h"
@@ -244,7 +246,7 @@ void fmc_update_region_info(struct fmdev *fmdev, u8 region_to_set)
* FM common sub-module will schedule this tasklet whenever it receives
* FM packet from ST driver.
*/
-static void recv_tasklet(unsigned long arg)
+static void recv_tasklet(struct tasklet_struct *t)
{
struct fmdev *fmdev;
struct fm_irq *irq_info;
@@ -253,7 +255,7 @@ static void recv_tasklet(unsigned long arg)
u8 num_fm_hci_cmds;
unsigned long flags;
- fmdev = (struct fmdev *)arg;
+ fmdev = from_tasklet(fmdev, t, tx_task);
irq_info = &fmdev->irq_info;
/* Process all packets in the RX queue */
while ((skb = skb_dequeue(&fmdev->rx_q))) {
@@ -328,13 +330,13 @@ static void recv_tasklet(unsigned long arg)
}
/* FM send tasklet: is scheduled when FM packet has to be sent to chip */
-static void send_tasklet(unsigned long arg)
+static void send_tasklet(struct tasklet_struct *t)
{
struct fmdev *fmdev;
struct sk_buff *skb;
int len;
- fmdev = (struct fmdev *)arg;
+ fmdev = from_tasklet(fmdev, t, tx_task);
if (!atomic_read(&fmdev->tx_cnt))
return;
@@ -700,7 +702,7 @@ static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
struct fm_rds *rds = &fmdev->rx.rds;
unsigned long group_idx, flags;
u8 *rds_data, meta_data, tmpbuf[FM_RDS_BLK_SIZE];
- u8 type, blk_idx;
+ u8 type, blk_idx, idx;
u16 cur_picode;
u32 rds_len;
@@ -733,9 +735,11 @@ static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
}
/* Skip checkword (control) byte and copy only data byte */
- memcpy(&rds_fmt.data.groupdatabuff.
- buff[blk_idx * (FM_RDS_BLK_SIZE - 1)],
- rds_data, (FM_RDS_BLK_SIZE - 1));
+ idx = array_index_nospec(blk_idx * (FM_RDS_BLK_SIZE - 1),
+ FM_RX_RDS_INFO_FIELD_MAX - (FM_RDS_BLK_SIZE - 1));
+
+ memcpy(&rds_fmt.data.groupdatabuff.buff[idx], rds_data,
+ FM_RDS_BLK_SIZE - 1);
rds->last_blk_idx = blk_idx;
@@ -1535,11 +1539,11 @@ int fmc_prepare(struct fmdev *fmdev)
/* Initialize TX queue and TX tasklet */
skb_queue_head_init(&fmdev->tx_q);
- tasklet_init(&fmdev->tx_task, send_tasklet, (unsigned long)fmdev);
+ tasklet_setup(&fmdev->tx_task, send_tasklet);
/* Initialize RX Queue and RX tasklet */
skb_queue_head_init(&fmdev->rx_q);
- tasklet_init(&fmdev->rx_task, recv_tasklet, (unsigned long)fmdev);
+ tasklet_setup(&fmdev->rx_task, recv_tasklet);
fmdev->irq_info.stage = 0;
atomic_set(&fmdev->tx_cnt, 1);
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 9cdef17b4793..c12dda73cdd5 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -835,6 +835,10 @@ static int ati_remote_probe(struct usb_interface *interface,
err("%s: endpoint_in message size==0? \n", __func__);
return -ENODEV;
}
+ if (!usb_endpoint_is_int_out(endpoint_out)) {
+ err("%s: Unexpected endpoint_out\n", __func__);
+ return -ENODEV;
+ }
ati_remote = kzalloc(sizeof (struct ati_remote), GFP_KERNEL);
rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 82867a2a60b0..6049e5c95394 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -432,27 +432,27 @@ static void ene_rx_setup(struct ene_device *dev)
select_timeout:
if (dev->rx_fan_input_inuse) {
- dev->rdev->rx_resolution = US_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
+ dev->rdev->rx_resolution = ENE_FW_SAMPLE_PERIOD_FAN;
/* Fan input doesn't support timeouts, it just ends the
input with a maximum sample */
dev->rdev->min_timeout = dev->rdev->max_timeout =
- US_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
- ENE_FW_SAMPLE_PERIOD_FAN);
+ ENE_FW_SMPL_BUF_FAN_MSK *
+ ENE_FW_SAMPLE_PERIOD_FAN;
} else {
- dev->rdev->rx_resolution = US_TO_NS(sample_period);
+ dev->rdev->rx_resolution = sample_period;
/* Theoreticly timeout is unlimited, but we cap it
* because it was seen that on one device, it
* would stop sending spaces after around 250 msec.
* Besides, this is close to 2^32 anyway and timeout is u32.
*/
- dev->rdev->min_timeout = US_TO_NS(127 * sample_period);
- dev->rdev->max_timeout = US_TO_NS(200000);
+ dev->rdev->min_timeout = 127 * sample_period;
+ dev->rdev->max_timeout = 200000;
}
if (dev->hw_learning_and_tx_capable)
- dev->rdev->tx_resolution = US_TO_NS(sample_period);
+ dev->rdev->tx_resolution = sample_period;
if (dev->rdev->timeout > dev->rdev->max_timeout)
dev->rdev->timeout = dev->rdev->max_timeout;
@@ -798,7 +798,7 @@ static irqreturn_t ene_isr(int irq, void *data)
dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
- ev.duration = US_TO_NS(hw_sample);
+ ev.duration = hw_sample;
ev.pulse = pulse;
ir_raw_event_store_with_filter(dev->rdev, &ev);
}
@@ -818,7 +818,7 @@ static void ene_setup_default_settings(struct ene_device *dev)
dev->learning_mode_enabled = learning_mode_force;
/* Set reasonable default timeout */
- dev->rdev->timeout = US_TO_NS(150000);
+ dev->rdev->timeout = MS_TO_US(150);
}
/* Upload all hardware settings at once. Used at load and resume time */
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 8e3177c5b586..b0d580566e4e 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -299,8 +299,8 @@ static void fintek_process_rx_ir_data(struct fintek_dev *fintek)
case PARSE_IRDATA:
fintek->rem--;
rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
- rawir.duration = US_TO_NS((sample & BUF_SAMPLE_MASK)
- * CIR_SAMPLE_PERIOD);
+ rawir.duration = (sample & BUF_SAMPLE_MASK)
+ * CIR_SAMPLE_PERIOD;
fit_dbg("Storing %s with duration %d",
rawir.pulse ? "pulse" : "space",
@@ -524,9 +524,9 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
rdev->dev.parent = &pdev->dev;
rdev->driver_name = FINTEK_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
- rdev->timeout = US_TO_NS(1000);
+ rdev->timeout = 1000;
/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
- rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
+ rdev->rx_resolution = CIR_SAMPLE_PERIOD;
fintek->rdev = rdev;
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index a20413008c3c..22e524b69806 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -11,6 +11,8 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
#include <linux/irq.h>
#include <media/rc-core.h>
@@ -20,17 +22,38 @@ struct gpio_rc_dev {
struct rc_dev *rcdev;
struct gpio_desc *gpiod;
int irq;
+ struct device *pmdev;
+ struct pm_qos_request qos;
};
static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
{
int val;
struct gpio_rc_dev *gpio_dev = dev_id;
+ struct device *pmdev = gpio_dev->pmdev;
+
+ /*
+ * For some cpuidle systems, not all:
+ * Respond to interrupt taking more latency when cpu in idle.
+ * Invoke asynchronous pm runtime get from interrupt context,
+ * this may introduce a millisecond delay to call resume callback,
+ * where to disable cpuilde.
+ *
+ * Two issues lead to fail to decode first frame, one is latency to
+ * respond to interrupt, another is delay introduced by async api.
+ */
+ if (pmdev)
+ pm_runtime_get(pmdev);
val = gpiod_get_value(gpio_dev->gpiod);
if (val >= 0)
ir_raw_event_store_edge(gpio_dev->rcdev, val == 1);
+ if (pmdev) {
+ pm_runtime_mark_last_busy(pmdev);
+ pm_runtime_put_autosuspend(pmdev);
+ }
+
return IRQ_HANDLED;
}
@@ -40,6 +63,7 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct gpio_rc_dev *gpio_dev;
struct rc_dev *rcdev;
+ u32 period = 0;
int rc;
if (!np)
@@ -90,6 +114,15 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
return rc;
}
+ of_property_read_u32(np, "linux,autosuspend-period", &period);
+ if (period) {
+ gpio_dev->pmdev = dev;
+ pm_runtime_set_autosuspend_delay(dev, period);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+ }
+
platform_set_drvdata(pdev, gpio_dev);
return devm_request_irq(dev, gpio_dev->irq, gpio_ir_recv_irq,
@@ -122,9 +155,29 @@ static int gpio_ir_recv_resume(struct device *dev)
return 0;
}
+static int gpio_ir_recv_runtime_suspend(struct device *dev)
+{
+ struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
+
+ cpu_latency_qos_remove_request(&gpio_dev->qos);
+
+ return 0;
+}
+
+static int gpio_ir_recv_runtime_resume(struct device *dev)
+{
+ struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
+
+ cpu_latency_qos_add_request(&gpio_dev->qos, 0);
+
+ return 0;
+}
+
static const struct dev_pm_ops gpio_ir_recv_pm_ops = {
.suspend = gpio_ir_recv_suspend,
.resume = gpio_ir_recv_resume,
+ .runtime_suspend = gpio_ir_recv_runtime_suspend,
+ .runtime_resume = gpio_ir_recv_runtime_resume,
};
#endif
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index b981f7290c1b..effaa5751d6c 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -69,7 +69,7 @@ static void igorplugusb_irdata(struct igorplugusb *ir, unsigned len)
overflow);
do {
- rawir.duration = ir->buf_in[i] * 85333;
+ rawir.duration = ir->buf_in[i] * 85;
rawir.pulse = i & 1;
ir_raw_event_store_with_filter(ir->rc, &rawir);
@@ -202,8 +202,8 @@ static int igorplugusb_probe(struct usb_interface *intf,
rc->priv = ir;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_HAUPPAUGE;
- rc->timeout = MS_TO_NS(100);
- rc->rx_resolution = 85333;
+ rc->timeout = MS_TO_US(100);
+ rc->rx_resolution = 85;
ir->rc = rc;
ret = rc_register_device(rc);
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 566c2816d5be..84949baf9f6b 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -59,7 +59,7 @@ struct iguanair {
#define MAX_IN_PACKET 8u
#define MAX_OUT_PACKET (sizeof(struct send_packet) + BUF_SIZE)
#define TIMEOUT 1000
-#define RX_RESOLUTION 21333
+#define RX_RESOLUTION 21
struct packet {
uint16_t start;
@@ -101,7 +101,7 @@ static void process_ir_data(struct iguanair *ir, unsigned len)
break;
case CMD_TX_OVERFLOW:
ir->tx_overflow = true;
- /* fall through */
+ fallthrough;
case CMD_RECEIVER_OFF:
case CMD_RECEIVER_ON:
case CMD_SEND:
@@ -124,7 +124,7 @@ static void process_ir_data(struct iguanair *ir, unsigned len)
for (i = 0; i < 7; i++) {
if (ir->buf_in[i] == 0x80) {
rawir.pulse = false;
- rawir.duration = US_TO_NS(21845);
+ rawir.duration = 21845;
} else {
rawir.pulse = (ir->buf_in[i] & 0x80) == 0;
rawir.duration = ((ir->buf_in[i] & 0x7f) + 1) *
diff --git a/drivers/media/rc/imon_raw.c b/drivers/media/rc/imon_raw.c
index aae0a3cc9479..d41580f6e4c7 100644
--- a/drivers/media/rc/imon_raw.c
+++ b/drivers/media/rc/imon_raw.c
@@ -8,7 +8,7 @@
#include <media/rc-core.h>
/* Each bit is 250us */
-#define BIT_DURATION 250000
+#define BIT_DURATION 250
struct imon {
struct device *dev;
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index d80cfa455c73..0ffc27514fab 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -214,12 +214,12 @@ static irqreturn_t hix5hd2_ir_rx_interrupt(int irq, void *data)
data_h = ((symb_val >> 16) & 0xffff) * 10;
symb_time = (data_l + data_h) / 10;
- ev.duration = US_TO_NS(data_l);
+ ev.duration = data_l;
ev.pulse = true;
ir_raw_event_store(priv->rdev, &ev);
if (symb_time < IR_CFG_SYMBOL_MAXWIDTH) {
- ev.duration = US_TO_NS(data_h);
+ ev.duration = data_h;
ev.pulse = false;
ir_raw_event_store(priv->rdev, &ev);
} else {
@@ -311,8 +311,8 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
rdev->input_id.vendor = 0x0001;
rdev->input_id.product = 0x0001;
rdev->input_id.version = 0x0100;
- rdev->rx_resolution = US_TO_NS(10);
- rdev->timeout = US_TO_NS(IR_CFG_SYMBOL_MAXWIDTH * 10);
+ rdev->rx_resolution = 10;
+ rdev->timeout = IR_CFG_SYMBOL_MAXWIDTH * 10;
ret = rc_register_device(rdev);
if (ret < 0)
diff --git a/drivers/media/rc/ir-imon-decoder.c b/drivers/media/rc/ir-imon-decoder.c
index a0efe2605393..41dbbef27fa6 100644
--- a/drivers/media/rc/ir-imon-decoder.c
+++ b/drivers/media/rc/ir-imon-decoder.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include "rc-core-priv.h"
-#define IMON_UNIT 415662 /* ns */
+#define IMON_UNIT 416 /* us */
#define IMON_BITS 30
#define IMON_CHKBITS (BIT(30) | BIT(25) | BIT(24) | BIT(22) | \
BIT(21) | BIT(20) | BIT(19) | BIT(18) | \
@@ -102,8 +102,7 @@ static int ir_imon_decode(struct rc_dev *dev, struct ir_raw_event ev)
dev_dbg(&dev->dev,
"iMON decode started at state %d bitno %d (%uus %s)\n",
- data->state, data->count, TO_US(ev.duration),
- TO_STR(ev.pulse));
+ data->state, data->count, ev.duration, TO_STR(ev.pulse));
/*
* Since iMON protocol is a series of bits, if at any point
@@ -116,7 +115,7 @@ static int ir_imon_decode(struct rc_dev *dev, struct ir_raw_event ev)
* we're at a new scancode.
*/
if (data->state == STATE_ERROR) {
- if (!ev.pulse && ev.duration > MS_TO_NS(10))
+ if (!ev.pulse && ev.duration > MS_TO_US(10))
data->state = STATE_INACTIVE;
return 0;
}
@@ -169,8 +168,7 @@ static int ir_imon_decode(struct rc_dev *dev, struct ir_raw_event ev)
err_out:
dev_dbg(&dev->dev,
"iMON decode failed at state %d bitno %d (%uus %s)\n",
- data->state, data->count, TO_US(ev.duration),
- TO_STR(ev.pulse));
+ data->state, data->count, ev.duration, TO_STR(ev.pulse));
data->state = STATE_ERROR;
diff --git a/drivers/media/rc/ir-jvc-decoder.c b/drivers/media/rc/ir-jvc-decoder.c
index 864d9e316c33..470f2e1fd507 100644
--- a/drivers/media/rc/ir-jvc-decoder.c
+++ b/drivers/media/rc/ir-jvc-decoder.c
@@ -9,7 +9,7 @@
#include "rc-core-priv.h"
#define JVC_NBITS 16 /* dev(8) + func(8) */
-#define JVC_UNIT 525000 /* ns */
+#define JVC_UNIT 525 /* us */
#define JVC_HEADER_PULSE (16 * JVC_UNIT) /* lack of header -> repeat */
#define JVC_HEADER_SPACE (8 * JVC_UNIT)
#define JVC_BIT_PULSE (1 * JVC_UNIT)
@@ -49,7 +49,7 @@ static int ir_jvc_decode(struct rc_dev *dev, struct ir_raw_event ev)
goto out;
dev_dbg(&dev->dev, "JVC decode started at state %d (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
again:
switch (data->state) {
@@ -157,7 +157,7 @@ again:
out:
dev_dbg(&dev->dev, "JVC decode failed at state %d (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index cfe837f773c1..be8f2756a444 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -21,7 +21,7 @@
* input device for the remote, rather than the keyboard/mouse one.
*/
-#define MCIR2_UNIT 333333 /* ns */
+#define MCIR2_UNIT 333 /* us */
#define MCIR2_HEADER_NBITS 5
#define MCIR2_MOUSE_NBITS 29
#define MCIR2_KEYBOARD_NBITS 32
@@ -231,7 +231,7 @@ static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev)
again:
dev_dbg(&dev->dev, "started at state %i (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
if (!geq_margin(ev.duration, MCIR2_UNIT, MCIR2_UNIT / 2))
return 0;
@@ -344,7 +344,7 @@ again:
}
lsc.scancode = scancode;
- ir_lirc_scancode_event(dev, &lsc);
+ lirc_scancode_event(dev, &lsc);
data->state = STATE_INACTIVE;
input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
input_sync(dev->input_dev);
@@ -353,7 +353,7 @@ again:
out:
dev_dbg(&dev->dev, "failed at state %i (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 6a8973ae3684..b4c3e4baf34d 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -8,7 +8,7 @@
#include "rc-core-priv.h"
#define NEC_NBITS 32
-#define NEC_UNIT 562500 /* ns */
+#define NEC_UNIT 563 /* us */
#define NEC_HEADER_PULSE (16 * NEC_UNIT)
#define NECX_HEADER_PULSE (8 * NEC_UNIT) /* Less common NEC variant */
#define NEC_HEADER_SPACE (8 * NEC_UNIT)
@@ -50,7 +50,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
dev_dbg(&dev->dev, "NEC decode started at state %d (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
switch (data->state) {
@@ -163,7 +163,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
dev_dbg(&dev->dev, "NEC decode failed at count %d state %d (%uus %s)\n",
- data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->count, data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 63624654a71e..d58b6226afeb 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -16,7 +16,7 @@
#define RC5_SZ_NBITS 15
#define RC5X_NBITS 20
#define CHECK_RC5X_NBITS 8
-#define RC5_UNIT 888888 /* ns */
+#define RC5_UNIT 889 /* us */
#define RC5_BIT_START (1 * RC5_UNIT)
#define RC5_BIT_END (1 * RC5_UNIT)
#define RC5X_SPACE (4 * RC5_UNIT)
@@ -55,7 +55,7 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev)
again:
dev_dbg(&dev->dev, "RC5(x/sz) decode started at state %i (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2))
return 0;
@@ -164,7 +164,7 @@ again:
out:
dev_dbg(&dev->dev, "RC5(x/sz) decode failed at state %i count %d (%uus %s)\n",
- data->state, data->count, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, data->count, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index 0cda78f72fd8..0657ad5eef48 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -15,7 +15,7 @@
* RC6-6A-32 (MCE version with toggle bit in body)
*/
-#define RC6_UNIT 444444 /* nanosecs */
+#define RC6_UNIT 444 /* microseconds */
#define RC6_HEADER_NBITS 4 /* not including toggle bit */
#define RC6_0_NBITS 16
#define RC6_6A_32_NBITS 32
@@ -95,7 +95,7 @@ static int ir_rc6_decode(struct rc_dev *dev, struct ir_raw_event ev)
again:
dev_dbg(&dev->dev, "RC6 decode started at state %i (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
if (!geq_margin(ev.duration, RC6_UNIT, RC6_UNIT / 2))
return 0;
@@ -270,7 +270,7 @@ again:
out:
dev_dbg(&dev->dev, "RC6 decode failed at state %i (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ir-rcmm-decoder.c b/drivers/media/rc/ir-rcmm-decoder.c
index 028df5cb1828..fd9ec69a3718 100644
--- a/drivers/media/rc/ir-rcmm-decoder.c
+++ b/drivers/media/rc/ir-rcmm-decoder.c
@@ -6,12 +6,12 @@
#include "rc-core-priv.h"
#include <linux/module.h>
-#define RCMM_UNIT 166667 /* nanosecs */
-#define RCMM_PREFIX_PULSE 416666 /* 166666.666666666*2.5 */
-#define RCMM_PULSE_0 277777 /* 166666.666666666*(1+2/3) */
-#define RCMM_PULSE_1 444444 /* 166666.666666666*(2+2/3) */
-#define RCMM_PULSE_2 611111 /* 166666.666666666*(3+2/3) */
-#define RCMM_PULSE_3 777778 /* 166666.666666666*(4+2/3) */
+#define RCMM_UNIT 166 /* microseconds */
+#define RCMM_PREFIX_PULSE 417 /* 166.666666666666*2.5 */
+#define RCMM_PULSE_0 278 /* 166.666666666666*(1+2/3) */
+#define RCMM_PULSE_1 444 /* 166.666666666666*(2+2/3) */
+#define RCMM_PULSE_2 611 /* 166.666666666666*(3+2/3) */
+#define RCMM_PULSE_3 778 /* 166.666666666666*(4+2/3) */
enum rcmm_state {
STATE_INACTIVE,
@@ -64,8 +64,8 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
int value;
if (!(dev->enabled_protocols & (RC_PROTO_BIT_RCMM32 |
- RC_PROTO_BIT_RCMM24 |
- RC_PROTO_BIT_RCMM12)))
+ RC_PROTO_BIT_RCMM24 |
+ RC_PROTO_BIT_RCMM12)))
return 0;
if (!is_timing_event(ev)) {
@@ -165,7 +165,7 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
dev_dbg(&dev->dev, "RC-MM decode failed at count %d state %d (%uus %s)\n",
- data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->count, data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index dd6ee1e339d6..bfc181be1044 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -17,7 +17,7 @@
#include "rc-core-priv.h"
#define SANYO_NBITS (13+13+8+8)
-#define SANYO_UNIT 562500 /* ns */
+#define SANYO_UNIT 563 /* us */
#define SANYO_HEADER_PULSE (16 * SANYO_UNIT)
#define SANYO_HEADER_SPACE (8 * SANYO_UNIT)
#define SANYO_BIT_PULSE (1 * SANYO_UNIT)
@@ -59,7 +59,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
dev_dbg(&dev->dev, "SANYO decode started at state %d (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
switch (data->state) {
@@ -158,7 +158,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
dev_dbg(&dev->dev, "SANYO decode failed at count %d state %d (%uus %s)\n",
- data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->count, data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
index 37fab0919131..d09c38c07dbd 100644
--- a/drivers/media/rc/ir-sharp-decoder.c
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -12,7 +12,7 @@
#include "rc-core-priv.h"
#define SHARP_NBITS 15
-#define SHARP_UNIT 40000 /* ns */
+#define SHARP_UNIT 40 /* us */
#define SHARP_BIT_PULSE (8 * SHARP_UNIT) /* 320us */
#define SHARP_BIT_0_PERIOD (25 * SHARP_UNIT) /* 1ms (680us space) */
#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680ms space) */
@@ -47,7 +47,7 @@ static int ir_sharp_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
dev_dbg(&dev->dev, "Sharp decode started at state %d (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
switch (data->state) {
@@ -159,7 +159,7 @@ static int ir_sharp_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
dev_dbg(&dev->dev, "Sharp decode failed at count %d state %d (%uus %s)\n",
- data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->count, data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ir-sony-decoder.c b/drivers/media/rc/ir-sony-decoder.c
index 7d9a7c000c75..d760d52abaa2 100644
--- a/drivers/media/rc/ir-sony-decoder.c
+++ b/drivers/media/rc/ir-sony-decoder.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include "rc-core-priv.h"
-#define SONY_UNIT 600000 /* ns */
+#define SONY_UNIT 600 /* us */
#define SONY_HEADER_PULSE (4 * SONY_UNIT)
#define SONY_HEADER_SPACE (1 * SONY_UNIT)
#define SONY_BIT_0_PULSE (1 * SONY_UNIT)
@@ -48,7 +48,7 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
goto out;
dev_dbg(&dev->dev, "Sony decode started at state %d (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
switch (data->state) {
@@ -154,7 +154,7 @@ static int ir_sony_decode(struct rc_dev *dev, struct ir_raw_event ev)
out:
dev_dbg(&dev->dev, "Sony decode failed at state %d (%uus %s)\n",
- data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
diff --git a/drivers/media/rc/ir-xmp-decoder.c b/drivers/media/rc/ir-xmp-decoder.c
index 4c3d03876200..ff94f48bda32 100644
--- a/drivers/media/rc/ir-xmp-decoder.c
+++ b/drivers/media/rc/ir-xmp-decoder.c
@@ -12,11 +12,12 @@
#include <linux/module.h>
#include "rc-core-priv.h"
-#define XMP_UNIT 136000 /* ns */
-#define XMP_LEADER 210000 /* ns */
-#define XMP_NIBBLE_PREFIX 760000 /* ns */
-#define XMP_HALFFRAME_SPACE 13800000 /* ns */
-#define XMP_TRAILER_SPACE 20000000 /* should be 80ms but not all dureation supliers can go that high */
+#define XMP_UNIT 136 /* us */
+#define XMP_LEADER 210 /* us */
+#define XMP_NIBBLE_PREFIX 760 /* us */
+#define XMP_HALFFRAME_SPACE 13800 /* us */
+/* should be 80ms but not all duration supliers can go that high */
+#define XMP_TRAILER_SPACE 20000
enum xmp_state {
STATE_INACTIVE,
@@ -42,7 +43,7 @@ static int ir_xmp_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
dev_dbg(&dev->dev, "XMP decode started at state %d %d (%uus %s)\n",
- data->state, data->count, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state, data->count, ev.duration, TO_STR(ev.pulse));
switch (data->state) {
@@ -183,7 +184,7 @@ static int ir_xmp_decode(struct rc_dev *dev, struct ir_raw_event ev)
}
dev_dbg(&dev->dev, "XMP decode failed at count %d state %d (%uus %s)\n",
- data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->count, data->state, ev.duration, TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index 5c7a7500a925..e0242c9b6aeb 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -38,8 +38,8 @@ static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
#define LEN_SAMPLEMODEPROTO 3
#define MIN_FW_VERSION 20
-#define UNIT_NS 21333
-#define MAX_TIMEOUT_NS (UNIT_NS * U16_MAX)
+#define UNIT_US 21
+#define MAX_TIMEOUT_US (UNIT_US * U16_MAX)
#define MAX_PACKET 64
@@ -131,7 +131,7 @@ static void irtoy_response(struct irtoy *irtoy, u32 len)
if (v == 0xffff) {
rawir.pulse = false;
} else {
- rawir.duration = v * UNIT_NS;
+ rawir.duration = v * UNIT_US;
ir_raw_event_store_with_timeout(irtoy->rc,
&rawir);
}
@@ -302,7 +302,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
return -ENOMEM;
for (i = 0; i < count; i++) {
- u16 v = DIV_ROUND_CLOSEST(US_TO_NS(txbuf[i]), UNIT_NS);
+ u16 v = DIV_ROUND_CLOSEST(txbuf[i], UNIT_US);
if (!v)
v = 1;
@@ -438,7 +438,7 @@ static int irtoy_probe(struct usb_interface *intf,
rc->tx_ir = irtoy_tx;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc->map_name = RC_MAP_RC6_MCE;
- rc->rx_resolution = UNIT_NS;
+ rc->rx_resolution = UNIT_US;
rc->timeout = IR_DEFAULT_TIMEOUT;
/*
@@ -450,8 +450,8 @@ static int irtoy_probe(struct usb_interface *intf,
*
* So, make timeout a largish minimum which works with most protocols.
*/
- rc->min_timeout = MS_TO_NS(40);
- rc->max_timeout = MAX_TIMEOUT_NS;
+ rc->min_timeout = MS_TO_US(40);
+ rc->max_timeout = MAX_TIMEOUT_US;
err = rc_register_device(rc);
if (err)
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 07667c04c1d2..a905113fef6e 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -176,14 +176,14 @@ static void ite_decode_bytes(struct ite_dev *dev, const u8 * data, int
if (next_one > 0) {
ev.pulse = true;
ev.duration =
- ITE_BITS_TO_NS(next_one, sample_period);
+ ITE_BITS_TO_US(next_one, sample_period);
ir_raw_event_store_with_filter(dev->rdev, &ev);
}
while (next_one < size) {
next_zero = find_next_zero_bit_le(ldata, size, next_one + 1);
ev.pulse = false;
- ev.duration = ITE_BITS_TO_NS(next_zero - next_one, sample_period);
+ ev.duration = ITE_BITS_TO_US(next_zero - next_one, sample_period);
ir_raw_event_store_with_filter(dev->rdev, &ev);
if (next_zero < size) {
@@ -193,7 +193,7 @@ static void ite_decode_bytes(struct ite_dev *dev, const u8 * data, int
next_zero + 1);
ev.pulse = true;
ev.duration =
- ITE_BITS_TO_NS(next_one - next_zero,
+ ITE_BITS_TO_US(next_one - next_zero,
sample_period);
ir_raw_event_store_with_filter
(dev->rdev, &ev);
@@ -1555,9 +1555,9 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
rdev->timeout = IR_DEFAULT_TIMEOUT;
rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
rdev->rx_resolution = ITE_BAUDRATE_DIVISOR *
- itdev->params.sample_period;
+ itdev->params.sample_period / 1000;
rdev->tx_resolution = ITE_BAUDRATE_DIVISOR *
- itdev->params.sample_period;
+ itdev->params.sample_period / 1000;
/* set up transmitter related values if needed */
if (itdev->params.hw_tx_capable) {
diff --git a/drivers/media/rc/ite-cir.h b/drivers/media/rc/ite-cir.h
index f04c4b34ff0c..4954470448a7 100644
--- a/drivers/media/rc/ite-cir.h
+++ b/drivers/media/rc/ite-cir.h
@@ -146,8 +146,8 @@ struct ite_dev {
#define ITE_DEFAULT_CARRIER_FREQ 38000
/* convert bits to us */
-#define ITE_BITS_TO_NS(bits, sample_period) \
-((u32) ((bits) * ITE_BAUDRATE_DIVISOR * sample_period))
+#define ITE_BITS_TO_US(bits, sample_period) \
+((u32)((bits) * ITE_BAUDRATE_DIVISOR * (sample_period) / 1000))
/*
* n in RDCR produces a tolerance of +/- n * 6.25% around the center
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 583e4f32a0da..220363b9a868 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -30,12 +30,12 @@ static DEFINE_IDA(lirc_ida);
static struct class *lirc_class;
/**
- * ir_lirc_raw_event() - Send raw IR data to lirc to be relayed to userspace
+ * lirc_raw_event() - Send raw IR data to lirc to be relayed to userspace
*
* @dev: the struct rc_dev descriptor of the device
* @ev: the struct ir_raw_event descriptor of the pulse/space
*/
-void ir_lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev)
+void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev)
{
unsigned long flags;
struct lirc_fh *fh;
@@ -67,17 +67,16 @@ void ir_lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev)
dev->gap = true;
dev->gap_duration = ev.duration;
- sample = LIRC_TIMEOUT(ev.duration / 1000);
+ sample = LIRC_TIMEOUT(ev.duration);
dev_dbg(&dev->dev, "timeout report (duration: %d)\n", sample);
/* Normal sample */
} else {
if (dev->gap) {
- dev->gap_duration += ktime_to_ns(ktime_sub(ktime_get(),
+ dev->gap_duration += ktime_to_us(ktime_sub(ktime_get(),
dev->gap_start));
- /* Convert to ms and cap by LIRC_VALUE_MASK */
- do_div(dev->gap_duration, 1000);
+ /* Cap by LIRC_VALUE_MASK */
dev->gap_duration = min_t(u64, dev->gap_duration,
LIRC_VALUE_MASK);
@@ -89,10 +88,10 @@ void ir_lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev)
dev->gap = false;
}
- sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
- LIRC_SPACE(ev.duration / 1000);
+ sample = ev.pulse ? LIRC_PULSE(ev.duration) :
+ LIRC_SPACE(ev.duration);
dev_dbg(&dev->dev, "delivering %uus %s to lirc_dev\n",
- TO_US(ev.duration), TO_STR(ev.pulse));
+ ev.duration, TO_STR(ev.pulse));
}
/*
@@ -112,12 +111,12 @@ void ir_lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev)
}
/**
- * ir_lirc_scancode_event() - Send scancode data to lirc to be relayed to
+ * lirc_scancode_event() - Send scancode data to lirc to be relayed to
* userspace. This can be called in atomic context.
* @dev: the struct rc_dev descriptor of the device
* @lsc: the struct lirc_scancode describing the decoded scancode
*/
-void ir_lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc)
+void lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc)
{
unsigned long flags;
struct lirc_fh *fh;
@@ -131,9 +130,9 @@ void ir_lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc)
}
spin_unlock_irqrestore(&dev->lirc_fh_lock, flags);
}
-EXPORT_SYMBOL_GPL(ir_lirc_scancode_event);
+EXPORT_SYMBOL_GPL(lirc_scancode_event);
-static int ir_lirc_open(struct inode *inode, struct file *file)
+static int lirc_open(struct inode *inode, struct file *file)
{
struct rc_dev *dev = container_of(inode->i_cdev, struct rc_dev,
lirc_cdev);
@@ -201,7 +200,7 @@ out_fh:
return retval;
}
-static int ir_lirc_close(struct inode *inode, struct file *file)
+static int lirc_close(struct inode *inode, struct file *file)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *dev = fh->rc;
@@ -223,8 +222,8 @@ static int ir_lirc_close(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf,
- size_t n, loff_t *ppos)
+static ssize_t lirc_transmit(struct file *file, const char __user *buf,
+ size_t n, loff_t *ppos)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *dev = fh->rc;
@@ -296,8 +295,7 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf,
}
for (i = 0; i < count; i++)
- /* Convert from NS to US */
- txbuf[i] = DIV_ROUND_UP(raw[i].duration, 1000);
+ txbuf[i] = raw[i].duration;
if (dev->s_tx_carrier) {
int carrier = ir_raw_encode_carrier(scan.rc_proto);
@@ -325,7 +323,7 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf,
}
for (i = 0; i < count; i++) {
- if (txbuf[i] > IR_MAX_DURATION / 1000 - duration || !txbuf[i]) {
+ if (txbuf[i] > IR_MAX_DURATION - duration || !txbuf[i]) {
ret = -EINVAL;
goto out_kfree;
}
@@ -365,8 +363,7 @@ out_unlock:
return ret;
}
-static long ir_lirc_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long lirc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *dev = fh->rc;
@@ -517,7 +514,7 @@ static long ir_lirc_ioctl(struct file *file, unsigned int cmd,
if (!dev->rx_resolution)
ret = -ENOTTY;
else
- val = dev->rx_resolution / 1000;
+ val = dev->rx_resolution;
break;
case LIRC_SET_WIDEBAND_RECEIVER:
@@ -539,31 +536,26 @@ static long ir_lirc_ioctl(struct file *file, unsigned int cmd,
if (!dev->max_timeout)
ret = -ENOTTY;
else
- val = DIV_ROUND_UP(dev->min_timeout, 1000);
+ val = dev->min_timeout;
break;
case LIRC_GET_MAX_TIMEOUT:
if (!dev->max_timeout)
ret = -ENOTTY;
else
- val = dev->max_timeout / 1000;
+ val = dev->max_timeout;
break;
case LIRC_SET_REC_TIMEOUT:
if (!dev->max_timeout) {
ret = -ENOTTY;
- } else if (val > U32_MAX / 1000) {
- /* Check for multiply overflow */
- ret = -EINVAL;
} else {
- u32 tmp = val * 1000;
-
- if (tmp < dev->min_timeout || tmp > dev->max_timeout)
+ if (val < dev->min_timeout || val > dev->max_timeout)
ret = -EINVAL;
else if (dev->s_timeout)
- ret = dev->s_timeout(dev, tmp);
+ ret = dev->s_timeout(dev, val);
else
- dev->timeout = tmp;
+ dev->timeout = val;
}
break;
@@ -571,7 +563,7 @@ static long ir_lirc_ioctl(struct file *file, unsigned int cmd,
if (!dev->timeout)
ret = -ENOTTY;
else
- val = DIV_ROUND_UP(dev->timeout, 1000);
+ val = dev->timeout;
break;
case LIRC_SET_REC_TIMEOUT_REPORTS:
@@ -593,7 +585,7 @@ out:
return ret;
}
-static __poll_t ir_lirc_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t lirc_poll(struct file *file, struct poll_table_struct *wait)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *rcdev = fh->rc;
@@ -616,8 +608,8 @@ static __poll_t ir_lirc_poll(struct file *file, struct poll_table_struct *wait)
return events;
}
-static ssize_t ir_lirc_read_mode2(struct file *file, char __user *buffer,
- size_t length)
+static ssize_t lirc_read_mode2(struct file *file, char __user *buffer,
+ size_t length)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *rcdev = fh->rc;
@@ -654,8 +646,8 @@ static ssize_t ir_lirc_read_mode2(struct file *file, char __user *buffer,
return copied;
}
-static ssize_t ir_lirc_read_scancode(struct file *file, char __user *buffer,
- size_t length)
+static ssize_t lirc_read_scancode(struct file *file, char __user *buffer,
+ size_t length)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *rcdev = fh->rc;
@@ -693,8 +685,8 @@ static ssize_t ir_lirc_read_scancode(struct file *file, char __user *buffer,
return copied;
}
-static ssize_t ir_lirc_read(struct file *file, char __user *buffer,
- size_t length, loff_t *ppos)
+static ssize_t lirc_read(struct file *file, char __user *buffer, size_t length,
+ loff_t *ppos)
{
struct lirc_fh *fh = file->private_data;
struct rc_dev *rcdev = fh->rc;
@@ -706,20 +698,20 @@ static ssize_t ir_lirc_read(struct file *file, char __user *buffer,
return -ENODEV;
if (fh->rec_mode == LIRC_MODE_MODE2)
- return ir_lirc_read_mode2(file, buffer, length);
+ return lirc_read_mode2(file, buffer, length);
else /* LIRC_MODE_SCANCODE */
- return ir_lirc_read_scancode(file, buffer, length);
+ return lirc_read_scancode(file, buffer, length);
}
static const struct file_operations lirc_fops = {
.owner = THIS_MODULE,
- .write = ir_lirc_transmit_ir,
- .unlocked_ioctl = ir_lirc_ioctl,
+ .write = lirc_transmit,
+ .unlocked_ioctl = lirc_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .read = ir_lirc_read,
- .poll = ir_lirc_poll,
- .open = ir_lirc_open,
- .release = ir_lirc_close,
+ .read = lirc_read,
+ .poll = lirc_poll,
+ .open = lirc_open,
+ .release = lirc_close,
.llseek = no_llseek,
};
@@ -730,7 +722,7 @@ static void lirc_release_device(struct device *ld)
put_device(&rcdev->dev);
}
-int ir_lirc_register(struct rc_dev *dev)
+int lirc_register(struct rc_dev *dev)
{
const char *rx_type, *tx_type;
int err, minor;
@@ -784,7 +776,7 @@ out_ida:
return err;
}
-void ir_lirc_unregister(struct rc_dev *dev)
+void lirc_unregister(struct rc_dev *dev)
{
unsigned long flags;
struct lirc_fh *fh;
@@ -811,8 +803,7 @@ int __init lirc_dev_init(void)
return PTR_ERR(lirc_class);
}
- retval = alloc_chrdev_region(&lirc_base_dev, 0, RC_DEV_MAX,
- "BaseRemoteCtl");
+ retval = alloc_chrdev_region(&lirc_base_dev, 0, RC_DEV_MAX, "lirc");
if (retval) {
class_destroy(lirc_class);
pr_err("alloc_chrdev_region failed\n");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 98681ba10428..f1dbd059ed08 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1070,7 +1070,7 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
struct mceusb_dev *ir = dev->priv;
unsigned int units;
- units = DIV_ROUND_CLOSEST(timeout, US_TO_NS(MCE_TIME_UNIT));
+ units = DIV_ROUND_CLOSEST(timeout, MCE_TIME_UNIT);
cmdbuf[2] = units >> 8;
cmdbuf[3] = units;
@@ -1196,7 +1196,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
switch (subcmd) {
/* 2-byte return value commands */
case MCE_RSP_EQIRTIMEOUT:
- ir->rc->timeout = US_TO_NS((*hi << 8 | *lo) * MCE_TIME_UNIT);
+ ir->rc->timeout = (*hi << 8 | *lo) * MCE_TIME_UNIT;
break;
case MCE_RSP_EQIRNUMPORTS:
ir->num_txports = *hi;
@@ -1291,9 +1291,9 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->pulse_tunit += rawir.duration;
ir->pulse_count++;
}
- rawir.duration *= US_TO_NS(MCE_TIME_UNIT);
+ rawir.duration *= MCE_TIME_UNIT;
- dev_dbg(ir->dev, "Storing %s %u ns (%02x)",
+ dev_dbg(ir->dev, "Storing %s %u us (%02x)",
rawir.pulse ? "pulse" : "space",
rawir.duration, ir->buf_in[i]);
@@ -1605,8 +1605,8 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc->dev.parent = dev;
rc->priv = ir;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
- rc->min_timeout = US_TO_NS(MCE_TIME_UNIT);
- rc->timeout = MS_TO_NS(100);
+ rc->min_timeout = MCE_TIME_UNIT;
+ rc->timeout = MS_TO_US(100);
if (!mceusb_model[ir->model].broken_irtimeout) {
rc->s_timeout = mceusb_set_timeout;
rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 51c6dd3406a0..dad55950dfc6 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -86,7 +86,7 @@ static irqreturn_t meson_ir_irq(int irqno, void *dev_id)
duration = readl_relaxed(ir->reg + IR_DEC_REG1);
duration = FIELD_GET(REG1_TIME_IV_MASK, duration);
- rawir.duration = US_TO_NS(duration * MESON_TRATE);
+ rawir.duration = duration * MESON_TRATE;
status = readl_relaxed(ir->reg + IR_DEC_STATUS);
rawir.pulse = !!(status & STATUS_IR_DEC_IN);
@@ -133,7 +133,7 @@ static int meson_ir_probe(struct platform_device *pdev)
map_name = of_get_property(node, "linux,rc-map-name", NULL);
ir->rc->map_name = map_name ? map_name : RC_MAP_EMPTY;
ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
- ir->rc->rx_resolution = US_TO_NS(MESON_TRATE);
+ ir->rc->rx_resolution = MESON_TRATE;
ir->rc->min_timeout = 1;
ir->rc->timeout = IR_DEFAULT_TIMEOUT;
ir->rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index a0c94ab322c7..5051a5e5244b 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -52,8 +52,8 @@
#define MTK_IR_END(v, p) ((v) == MTK_MAX_SAMPLES && (p) == 0)
/* Number of registers to record the pulse width */
#define MTK_CHKDATA_SZ 17
-/* Sample period in ns */
-#define MTK_IR_SAMPLE 46000
+/* Sample period in us */
+#define MTK_IR_SAMPLE 46
enum mtk_fields {
/* Register to setting software sampling period */
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 52d246dc5b3d..8a37f083fe3d 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -653,8 +653,7 @@ static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
/* Inspect the ir samples */
for (i = 0, count = 0; i < ret && count < WAKEUP_MAX_SIZE; ++i) {
- /* NS to US */
- val = DIV_ROUND_UP(raw[i].duration, 1000L) / SAMPLE_PERIOD;
+ val = raw[i].duration / SAMPLE_PERIOD;
/* Split too large values into several smaller ones */
while (val > 0 && count < WAKEUP_MAX_SIZE) {
@@ -721,8 +720,7 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
sample = nvt->buf[i];
rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
- rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
- * SAMPLE_PERIOD);
+ rawir.duration = (sample & BUF_LEN_MASK) * SAMPLE_PERIOD;
nvt_dbg("Storing %s with duration %d",
rawir.pulse ? "pulse" : "space", rawir.duration);
@@ -1000,9 +998,9 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
rdev->input_id.version = nvt->chip_minor;
rdev->driver_name = NVT_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
- rdev->timeout = MS_TO_NS(100);
+ rdev->timeout = MS_TO_US(100);
/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
- rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
+ rdev->rx_resolution = CIR_SAMPLE_PERIOD;
#if 0
rdev->min_timeout = XYZ;
rdev->max_timeout = XYZ;
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 0cf301d1e163..ed7d93beaa28 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -94,7 +94,7 @@ struct nvt_dev {
#define CIR_IOREG_LENGTH 0x0f
/* RX limit length, 8 high bits for SLCH, 8 low bits for SLCL */
-#define CIR_RX_LIMIT_COUNT (IR_DEFAULT_TIMEOUT / US_TO_NS(SAMPLE_PERIOD))
+#define CIR_RX_LIMIT_COUNT (IR_DEFAULT_TIMEOUT / SAMPLE_PERIOD)
/* CIR Regs */
#define CIR_IRCON 0x00
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 1eeab277a08e..62f032dffd33 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -193,7 +193,6 @@ static inline bool is_timing_event(struct ir_raw_event ev)
return !ev.carrier_report && !ev.reset;
}
-#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
/* functions for IR encoders */
@@ -322,20 +321,20 @@ void ir_raw_init(void);
#ifdef CONFIG_LIRC
int lirc_dev_init(void);
void lirc_dev_exit(void);
-void ir_lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev);
-void ir_lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc);
-int ir_lirc_register(struct rc_dev *dev);
-void ir_lirc_unregister(struct rc_dev *dev);
+void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev);
+void lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc);
+int lirc_register(struct rc_dev *dev);
+void lirc_unregister(struct rc_dev *dev);
struct rc_dev *rc_dev_get_from_fd(int fd);
#else
static inline int lirc_dev_init(void) { return 0; }
static inline void lirc_dev_exit(void) {}
-static inline void ir_lirc_raw_event(struct rc_dev *dev,
- struct ir_raw_event ev) { }
-static inline void ir_lirc_scancode_event(struct rc_dev *dev,
- struct lirc_scancode *lsc) { }
-static inline int ir_lirc_register(struct rc_dev *dev) { return 0; }
-static inline void ir_lirc_unregister(struct rc_dev *dev) { }
+static inline void lirc_raw_event(struct rc_dev *dev,
+ struct ir_raw_event ev) { }
+static inline void lirc_scancode_event(struct rc_dev *dev,
+ struct lirc_scancode *lsc) { }
+static inline int lirc_register(struct rc_dev *dev) { return 0; }
+static inline void lirc_unregister(struct rc_dev *dev) { }
#endif
/*
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 39dd46bbd0c1..c65bba4ec473 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -42,7 +42,7 @@ static int ir_raw_event_thread(void *data)
if (dev->enabled_protocols &
handler->protocols || !handler->protocols)
handler->decode(dev, ev);
- ir_lirc_raw_event(dev, ev);
+ lirc_raw_event(dev, ev);
raw->prev_ev = ev;
}
mutex_unlock(&ir_raw_handler_lock);
@@ -77,7 +77,7 @@ int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
return -EINVAL;
dev_dbg(&dev->dev, "sample: (%05dus %s)\n",
- TO_US(ev->duration), TO_STR(ev->pulse));
+ ev->duration, TO_STR(ev->pulse));
if (!kfifo_put(&dev->raw->kfifo, *ev)) {
dev_err(&dev->dev, "IR event FIFO is full!\n");
@@ -108,7 +108,7 @@ int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
return -EINVAL;
now = ktime_get();
- ev.duration = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
+ ev.duration = ktime_to_us(ktime_sub(now, dev->raw->last_event));
ev.pulse = !pulse;
return ir_raw_event_store_with_timeout(dev, &ev);
@@ -275,7 +275,7 @@ static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
if (timeout == 0)
timeout = IR_DEFAULT_TIMEOUT;
else
- timeout += MS_TO_NS(10);
+ timeout += MS_TO_US(10);
if (timeout < dev->min_timeout)
timeout = dev->min_timeout;
@@ -561,17 +561,17 @@ static void ir_raw_edge_handle(struct timer_list *t)
spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
interval = ktime_sub(ktime_get(), dev->raw->last_event);
- if (ktime_to_ns(interval) >= dev->timeout) {
+ if (ktime_to_us(interval) >= dev->timeout) {
struct ir_raw_event ev = {
.timeout = true,
- .duration = ktime_to_ns(interval)
+ .duration = ktime_to_us(interval)
};
ir_raw_event_store(dev, &ev);
} else {
mod_timer(&dev->raw->edge_handle,
- jiffies + nsecs_to_jiffies(dev->timeout -
- ktime_to_ns(interval)));
+ jiffies + usecs_to_jiffies(dev->timeout -
+ ktime_to_us(interval)));
}
spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index ef8b83b707df..1ba3f96ffa7d 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -113,7 +113,7 @@ static int loop_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
for (i = 0; i < count; i++) {
rawir.pulse = i % 2 ? false : true;
- rawir.duration = txbuf[i] * 1000;
+ rawir.duration = txbuf[i];
if (rawir.duration)
ir_raw_event_store_with_filter(dev, &rawir);
}
@@ -219,11 +219,11 @@ static int __init loop_init(void)
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc->allowed_wakeup_protocols = RC_PROTO_BIT_ALL_IR_ENCODER;
rc->encode_wakeup = true;
- rc->timeout = 100 * 1000 * 1000; /* 100 ms */
+ rc->timeout = MS_TO_US(100); /* 100 ms */
rc->min_timeout = 1;
rc->max_timeout = UINT_MAX;
- rc->rx_resolution = 1000;
- rc->tx_resolution = 1000;
+ rc->rx_resolution = 1;
+ rc->tx_resolution = 1;
rc->s_tx_mask = loop_set_tx_mask;
rc->s_tx_carrier = loop_set_tx_carrier;
rc->s_tx_duty_cycle = loop_set_tx_duty_cycle;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index dee8a9f3d80a..1d811e5ffb55 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -747,7 +747,7 @@ void rc_repeat(struct rc_dev *dev)
};
if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
- ir_lirc_scancode_event(dev, &sc);
+ lirc_scancode_event(dev, &sc);
spin_lock_irqsave(&dev->keylock, flags);
@@ -791,7 +791,7 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
};
if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
- ir_lirc_scancode_event(dev, &sc);
+ lirc_scancode_event(dev, &sc);
if (new_event && dev->keypressed)
ir_do_keyup(dev, false);
@@ -1946,7 +1946,7 @@ int rc_register_device(struct rc_dev *dev)
* keycodes with rc_keydown, so lirc must be registered first.
*/
if (dev->allowed_protocols != RC_PROTO_BIT_CEC) {
- rc = ir_lirc_register(dev);
+ rc = lirc_register(dev);
if (rc < 0)
goto out_dev;
}
@@ -1972,7 +1972,7 @@ out_rx:
rc_free_rx_device(dev);
out_lirc:
if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
- ir_lirc_unregister(dev);
+ lirc_unregister(dev);
out_dev:
device_del(&dev->dev);
out_rx_free:
@@ -2036,7 +2036,7 @@ void rc_unregister_device(struct rc_dev *dev)
* that userspace polling will get notified.
*/
if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
- ir_lirc_unregister(dev);
+ lirc_unregister(dev);
device_del(&dev->dev);
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index aad9526f3754..2cf3377ec63a 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -340,7 +340,7 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
{
struct ir_raw_event rawir = {};
struct device *dev;
- unsigned int i, sig_size, single_len, offset, val;
+ unsigned int i, sig_size, offset, val;
u32 mod_freq;
dev = rr3->dev;
@@ -361,7 +361,6 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
for (i = 0; i < sig_size; i++) {
offset = rr3->irdata.sigdata[i];
val = get_unaligned_be16(&rr3->irdata.lens[offset]);
- single_len = redrat3_len_to_us(val);
/* we should always get pulse/space/pulse/space samples */
if (i % 2)
@@ -369,7 +368,7 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
else
rawir.pulse = true;
- rawir.duration = US_TO_NS(single_len);
+ rawir.duration = redrat3_len_to_us(val);
/* cap the value to IR_MAX_DURATION */
rawir.duration = (rawir.duration > IR_MAX_DURATION) ?
IR_MAX_DURATION : rawir.duration;
@@ -495,7 +494,7 @@ static u32 redrat3_get_timeout(struct redrat3_dev *rr3)
return timeout;
}
-static int redrat3_set_timeout(struct rc_dev *rc_dev, unsigned int timeoutns)
+static int redrat3_set_timeout(struct rc_dev *rc_dev, unsigned int timeoutus)
{
struct redrat3_dev *rr3 = rc_dev->priv;
struct usb_device *udev = rr3->udev;
@@ -507,7 +506,7 @@ static int redrat3_set_timeout(struct rc_dev *rc_dev, unsigned int timeoutns)
if (!timeout)
return -ENOMEM;
- *timeout = cpu_to_be32(redrat3_us_to_len(timeoutns / 1000));
+ *timeout = cpu_to_be32(redrat3_us_to_len(timeoutus));
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
RR3_IR_IO_SIG_TIMEOUT, 0, timeout, sizeof(*timeout),
@@ -947,15 +946,15 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->dev.parent = dev;
rc->priv = rr3;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
- rc->min_timeout = MS_TO_NS(RR3_RX_MIN_TIMEOUT);
- rc->max_timeout = MS_TO_NS(RR3_RX_MAX_TIMEOUT);
- rc->timeout = US_TO_NS(redrat3_get_timeout(rr3));
+ rc->min_timeout = MS_TO_US(RR3_RX_MIN_TIMEOUT);
+ rc->max_timeout = MS_TO_US(RR3_RX_MAX_TIMEOUT);
+ rc->timeout = redrat3_get_timeout(rr3);
rc->s_timeout = redrat3_set_timeout;
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
rc->s_carrier_report = redrat3_wideband_receiver;
rc->driver_name = DRIVER_NAME;
- rc->rx_resolution = US_TO_NS(2);
+ rc->rx_resolution = 2;
rc->map_name = RC_MAP_HAUPPAUGE;
ret = rc_register_device(rc);
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index d77507ba0fb5..8cc28c92d05d 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -269,7 +269,7 @@ static void frbwrite(unsigned int l, bool is_pulse)
if (ptr > 0 && is_pulse) {
pulse += l;
- if (pulse > 250000) {
+ if (pulse > 250) {
ev.duration = space;
ev.pulse = false;
ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
@@ -283,13 +283,13 @@ static void frbwrite(unsigned int l, bool is_pulse)
}
if (!is_pulse) {
if (ptr == 0) {
- if (l > 20000000) {
+ if (l > 20000) {
space = l;
ptr++;
return;
}
} else {
- if (l > 20000000) {
+ if (l > 20000) {
space += pulse;
if (space > IR_MAX_DURATION)
space = IR_MAX_DURATION;
@@ -376,7 +376,7 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah)
sense = sense ? 0 : 1;
}
} else {
- data = ktime_to_ns(delkt);
+ data = ktime_to_us(delkt);
}
frbwrite(data, !(dcd ^ sense));
serial_ir.lastkt = kt;
@@ -528,7 +528,7 @@ static int serial_ir_probe(struct platform_device *dev)
rcdev->min_timeout = 1;
rcdev->timeout = IR_DEFAULT_TIMEOUT;
rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
- rcdev->rx_resolution = 250000;
+ rcdev->rx_resolution = 250;
serial_ir.rcdev = rcdev;
@@ -547,7 +547,7 @@ static int serial_ir_probe(struct platform_device *dev)
/* Reserve io region. */
if ((iommap &&
- (devm_request_mem_region(&dev->dev, iommap, 8 << ioshift,
+ (devm_request_mem_region(&dev->dev, iommap, 8UL << ioshift,
KBUILD_MODNAME) == NULL)) ||
(!iommap && (devm_request_region(&dev->dev, io, 8,
KBUILD_MODNAME) == NULL))) {
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
index 80b3a6736dbd..6ec96dc34586 100644
--- a/drivers/media/rc/sir_ir.c
+++ b/drivers/media/rc/sir_ir.c
@@ -110,7 +110,7 @@ static void add_read_queue(int flag, unsigned long val)
} else {
val += TIME_CONST / 2;
}
- ev.duration = US_TO_NS(val);
+ ev.duration = val;
ir_raw_event_store_with_filter(rcdev, &ev);
}
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index 1dc4e2e33705..3237fef5d502 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -134,12 +134,12 @@ static irqreturn_t st_rc_rx_interrupt(int irq, void *data)
mark /= dev->sample_div;
}
- ev.duration = US_TO_NS(mark);
+ ev.duration = mark;
ev.pulse = true;
ir_raw_event_store(dev->rdev, &ev);
if (!last_symbol) {
- ev.duration = US_TO_NS(symbol);
+ ev.duration = symbol;
ev.pulse = false;
ir_raw_event_store(dev->rdev, &ev);
} else {
@@ -292,7 +292,7 @@ static int st_rc_probe(struct platform_device *pdev)
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
/* rx sampling rate is 10Mhz */
rdev->rx_resolution = 100;
- rdev->timeout = US_TO_NS(MAX_SYMB_TIME);
+ rdev->timeout = MAX_SYMB_TIME;
rdev->priv = rc_dev;
rdev->open = st_rc_open;
rdev->close = st_rc_close;
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 79a41fc7161c..9f3cd9fb6b6e 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -137,7 +137,6 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
} else {
rawir.duration = delta;
rawir.duration -= sz->sum;
- rawir.duration = US_TO_NS(rawir.duration);
rawir.duration = (rawir.duration > IR_MAX_DURATION) ?
IR_MAX_DURATION : rawir.duration;
}
@@ -151,7 +150,6 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
rawir.duration = ((int) value) * SZ_RESOLUTION;
rawir.duration += SZ_RESOLUTION / 2;
sz->sum += rawir.duration;
- rawir.duration = US_TO_NS(rawir.duration);
rawir.duration = (rawir.duration > IR_MAX_DURATION) ?
IR_MAX_DURATION : rawir.duration;
sz_push(sz, rawir);
@@ -172,7 +170,6 @@ static void sz_push_full_space(struct streamzap_ir *sz,
rawir.duration = ((int) value) * SZ_RESOLUTION;
rawir.duration += SZ_RESOLUTION / 2;
sz->sum += rawir.duration;
- rawir.duration = US_TO_NS(rawir.duration);
sz_push(sz, rawir);
}
@@ -403,13 +400,12 @@ static int streamzap_probe(struct usb_interface *intf,
sz->decoder_state = PulseSpace;
/* FIXME: don't yet have a way to set this */
sz->timeout_enabled = true;
- sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) &
- IR_MAX_DURATION) | 0x03000000);
+ sz->rdev->timeout = SZ_TIMEOUT * SZ_RESOLUTION;
#if 0
/* not yet supported, depends on patches from maxim */
/* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
- sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
- sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
+ sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION;
+ sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION;
#endif
sz->signal_start = ktime_get_real();
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index e222b4c98be4..ddee6ee37bab 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -241,8 +241,8 @@ static int sunxi_ir_probe(struct platform_device *pdev)
ir->rc->dev.parent = dev;
ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
/* Frequency after IR internal divider with sample period in ns */
- ir->rc->rx_resolution = (1000000000ul / (b_clk_freq / 64));
- ir->rc->timeout = MS_TO_NS(SUNXI_IR_TIMEOUT);
+ ir->rc->rx_resolution = (USEC_PER_SEC / (b_clk_freq / 64));
+ ir->rc->timeout = MS_TO_US(SUNXI_IR_TIMEOUT);
ir->rc->driver_name = SUNXI_IR_DEV;
ret = rc_register_device(ir->rc);
diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c
index 011a8b620d86..629787d53ee1 100644
--- a/drivers/media/rc/ttusbir.c
+++ b/drivers/media/rc/ttusbir.c
@@ -20,8 +20,8 @@
* messages per second (!), whether IR is idle or not.
*/
#define NUM_URBS 4
-#define NS_PER_BYTE 62500
-#define NS_PER_BIT (NS_PER_BYTE/8)
+#define US_PER_BYTE 62
+#define US_PER_BIT (US_PER_BYTE / 8)
struct ttusbir {
struct rc_dev *rc;
@@ -117,13 +117,13 @@ static void ttusbir_process_ir_data(struct ttusbir *tt, uint8_t *buf)
switch (v) {
case 0xfe:
rawir.pulse = false;
- rawir.duration = NS_PER_BYTE;
+ rawir.duration = US_PER_BYTE;
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
break;
case 0:
rawir.pulse = true;
- rawir.duration = NS_PER_BYTE;
+ rawir.duration = US_PER_BYTE;
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
break;
@@ -137,12 +137,12 @@ static void ttusbir_process_ir_data(struct ttusbir *tt, uint8_t *buf)
rawir.pulse = false;
}
- rawir.duration = NS_PER_BIT * (8 - b);
+ rawir.duration = US_PER_BIT * (8 - b);
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
rawir.pulse = !rawir.pulse;
- rawir.duration = NS_PER_BIT * b;
+ rawir.duration = US_PER_BIT * b;
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
break;
@@ -311,10 +311,10 @@ static int ttusbir_probe(struct usb_interface *intf,
rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
/*
- * The precision is NS_PER_BIT, but since every 8th bit can be
- * overwritten with garbage the accuracy is at best 2 * NS_PER_BIT.
+ * The precision is US_PER_BIT, but since every 8th bit can be
+ * overwritten with garbage the accuracy is at best 2 * US_PER_BIT.
*/
- rc->rx_resolution = NS_PER_BIT;
+ rc->rx_resolution = 2 * US_PER_BIT;
ret = rc_register_device(rc);
if (ret) {
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 630e376d3688..aed23ca0fa6c 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -354,7 +354,6 @@ wbcir_irq_rx(struct wbcir_data *data, struct pnp_dev *device)
{
u8 irdata;
struct ir_raw_event rawir = {};
- unsigned duration;
/* Since RXHDLEV is set, at least 8 bytes are in the FIFO */
while (inb(data->sbase + WBCIR_REG_SP3_LSR) & WBCIR_RX_AVAIL) {
@@ -362,13 +361,12 @@ wbcir_irq_rx(struct wbcir_data *data, struct pnp_dev *device)
if (data->rxstate == WBCIR_RXSTATE_ERROR)
continue;
- duration = ((irdata & 0x7F) + 1) *
+ rawir.duration = ((irdata & 0x7F) + 1) *
(data->carrier_report_enabled ? 2 : 10);
rawir.pulse = irdata & 0x80 ? false : true;
- rawir.duration = US_TO_NS(duration);
if (rawir.pulse)
- data->pulse_duration += duration;
+ data->pulse_duration += rawir.duration;
ir_raw_event_store_with_filter(data->dev, &rawir);
}
@@ -519,7 +517,7 @@ wbcir_set_carrier_report(struct rc_dev *dev, int enable)
/* Set a higher sampling resolution if carrier reports are enabled */
wbcir_select_bank(data, WBCIR_BANK_2);
- data->dev->rx_resolution = US_TO_NS(enable ? 2 : 10);
+ data->dev->rx_resolution = enable ? 2 : 10;
outb(enable ? 0x03 : 0x0f, data->sbase + WBCIR_REG_SP3_BGDL);
outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
@@ -1076,7 +1074,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->min_timeout = 1;
data->dev->timeout = IR_DEFAULT_TIMEOUT;
data->dev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
- data->dev->rx_resolution = US_TO_NS(2);
+ data->dev->rx_resolution = 2;
data->dev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
data->dev->allowed_wakeup_protocols = RC_PROTO_BIT_NEC |
RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32 | RC_PROTO_BIT_RC5 |
diff --git a/drivers/media/rc/xbox_remote.c b/drivers/media/rc/xbox_remote.c
index 4a3f2cc4ef18..98d0b43608ad 100644
--- a/drivers/media/rc/xbox_remote.c
+++ b/drivers/media/rc/xbox_remote.c
@@ -157,7 +157,7 @@ static void xbox_remote_rc_init(struct xbox_remote *xbox_remote)
rdev->device_name = xbox_remote->rc_name;
rdev->input_phys = xbox_remote->rc_phys;
- rdev->timeout = MS_TO_NS(10);
+ rdev->timeout = MS_TO_US(10);
usb_to_input_id(xbox_remote->udev, &rdev->input_id);
rdev->dev.parent = &xbox_remote->interface->dev;
diff --git a/drivers/media/test-drivers/Kconfig b/drivers/media/test-drivers/Kconfig
index 188381c85593..e27d6602545d 100644
--- a/drivers/media/test-drivers/Kconfig
+++ b/drivers/media/test-drivers/Kconfig
@@ -24,3 +24,19 @@ config VIDEO_VIM2M
source "drivers/media/test-drivers/vicodec/Kconfig"
endif #V4L_TEST_DRIVERS
+
+menuconfig DVB_TEST_DRIVERS
+ bool "DVB test drivers"
+ depends on DVB_CORE && MEDIA_SUPPORT && I2C
+ help
+ Enables DVB test drivers.
+
+ This enables the DVB test drivers. They are meant as an aid for
+ DVB device driver writers and developers working on userspace
+ media applications.
+
+if DVB_TEST_DRIVERS
+
+source "drivers/media/test-drivers/vidtv/Kconfig"
+
+endif #DVB_TEST_DRIVERS
diff --git a/drivers/media/test-drivers/Makefile b/drivers/media/test-drivers/Makefile
index 74410d3a9f2d..9f0e4ebb2efe 100644
--- a/drivers/media/test-drivers/Makefile
+++ b/drivers/media/test-drivers/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_VIDEO_VIMC) += vimc/
obj-$(CONFIG_VIDEO_VIVID) += vivid/
obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
+obj-$(CONFIG_DVB_VIDTV) += vidtv/
diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index 71928e30dae8..0e115683f8da 100644
--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -1310,7 +1310,7 @@ static int vicodec_subscribe_event(struct v4l2_fh *fh,
case V4L2_EVENT_SOURCE_CHANGE:
if (ctx->is_enc)
return -EINVAL;
- /* fall through */
+ fallthrough;
case V4L2_EVENT_EOS:
if (ctx->is_stateless)
return -EINVAL;
@@ -1671,8 +1671,8 @@ static void vicodec_stop_streaming(struct vb2_queue *q)
ctx->comp_size = 0;
ctx->header_size = 0;
ctx->comp_magic_cnt = 0;
- ctx->comp_has_frame = 0;
- ctx->comp_has_next_frame = 0;
+ ctx->comp_has_frame = false;
+ ctx->comp_has_next_frame = false;
}
}
diff --git a/drivers/media/test-drivers/vidtv/Kconfig b/drivers/media/test-drivers/vidtv/Kconfig
new file mode 100644
index 000000000000..22c4fd39461f
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DVB_VIDTV
+ tristate "Virtual DVB Driver (vidtv)"
+ depends on DVB_CORE && MEDIA_SUPPORT && I2C
+ help
+ The virtual DVB test driver serves as a reference DVB driver and helps
+ validate the existing APIs in the media subsystem. It can also aid developers
+ working on userspace applications.
+
+
+ When in doubt, say N.
diff --git a/drivers/media/test-drivers/vidtv/Makefile b/drivers/media/test-drivers/vidtv/Makefile
new file mode 100644
index 000000000000..330089e3b70c
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+dvb-vidtv-tuner-objs := vidtv_tuner.o
+dvb-vidtv-demod-objs := vidtv_demod.o
+dvb-vidtv-bridge-objs := vidtv_bridge.o vidtv_common.o vidtv_ts.o vidtv_psi.o \
+ vidtv_pes.o vidtv_s302m.o vidtv_channel.o vidtv_mux.o
+
+obj-$(CONFIG_DVB_VIDTV) += dvb-vidtv-tuner.o dvb-vidtv-demod.o \
+ dvb-vidtv-bridge.o
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
new file mode 100644
index 000000000000..74b054947bbe
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The Virtual DTV test driver serves as a reference DVB driver and helps
+ * validate the existing APIs in the media subsystem. It can also aid
+ * developers working on userspace applications.
+ *
+ * When this module is loaded, it will attempt to modprobe 'dvb_vidtv_tuner' and 'dvb_vidtv_demod'.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/dev_printk.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "vidtv_bridge.h"
+#include "vidtv_demod.h"
+#include "vidtv_tuner.h"
+#include "vidtv_ts.h"
+#include "vidtv_mux.h"
+#include "vidtv_common.h"
+
+//#define MUX_BUF_MAX_SZ
+//#define MUX_BUF_MIN_SZ
+#define TUNER_DEFAULT_ADDR 0x68
+#define DEMOD_DEFAULT_ADDR 0x60
+
+/* LNBf fake parameters: ranges used by an Universal (extended) European LNBf */
+#define LNB_CUT_FREQUENCY 11700000
+#define LNB_LOW_FREQ 9750000
+#define LNB_HIGH_FREQ 10600000
+
+
+static unsigned int drop_tslock_prob_on_low_snr;
+module_param(drop_tslock_prob_on_low_snr, uint, 0);
+MODULE_PARM_DESC(drop_tslock_prob_on_low_snr,
+ "Probability of losing the TS lock if the signal quality is bad");
+
+static unsigned int recover_tslock_prob_on_good_snr;
+module_param(recover_tslock_prob_on_good_snr, uint, 0);
+MODULE_PARM_DESC(recover_tslock_prob_on_good_snr,
+ "Probability recovering the TS lock when the signal improves");
+
+static unsigned int mock_power_up_delay_msec;
+module_param(mock_power_up_delay_msec, uint, 0);
+MODULE_PARM_DESC(mock_power_up_delay_msec, "Simulate a power up delay");
+
+static unsigned int mock_tune_delay_msec;
+module_param(mock_tune_delay_msec, uint, 0);
+MODULE_PARM_DESC(mock_tune_delay_msec, "Simulate a tune delay");
+
+static unsigned int vidtv_valid_dvb_t_freqs[NUM_VALID_TUNER_FREQS] = {
+ 474000000
+};
+
+module_param_array(vidtv_valid_dvb_t_freqs, uint, NULL, 0);
+MODULE_PARM_DESC(vidtv_valid_dvb_t_freqs,
+ "Valid DVB-T frequencies to simulate, in Hz");
+
+static unsigned int vidtv_valid_dvb_c_freqs[NUM_VALID_TUNER_FREQS] = {
+ 474000000
+};
+
+module_param_array(vidtv_valid_dvb_c_freqs, uint, NULL, 0);
+MODULE_PARM_DESC(vidtv_valid_dvb_c_freqs,
+ "Valid DVB-C frequencies to simulate, in Hz");
+
+static unsigned int vidtv_valid_dvb_s_freqs[NUM_VALID_TUNER_FREQS] = {
+ 11362000
+};
+module_param_array(vidtv_valid_dvb_s_freqs, uint, NULL, 0);
+MODULE_PARM_DESC(vidtv_valid_dvb_s_freqs,
+ "Valid DVB-S/S2 frequencies to simulate at Ku-Band, in kHz");
+
+static unsigned int max_frequency_shift_hz;
+module_param(max_frequency_shift_hz, uint, 0);
+MODULE_PARM_DESC(max_frequency_shift_hz,
+ "Maximum shift in HZ allowed when tuning in a channel");
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums);
+
+/*
+ * Influences the signal acquisition time. See ISO/IEC 13818-1 : 2000. p. 113.
+ */
+static unsigned int si_period_msec = 40;
+module_param(si_period_msec, uint, 0);
+MODULE_PARM_DESC(si_period_msec, "How often to send SI packets. Default: 40ms");
+
+static unsigned int pcr_period_msec = 40;
+module_param(pcr_period_msec, uint, 0);
+MODULE_PARM_DESC(pcr_period_msec, "How often to send PCR packets. Default: 40ms");
+
+static unsigned int mux_rate_kbytes_sec = 4096;
+module_param(mux_rate_kbytes_sec, uint, 0);
+MODULE_PARM_DESC(mux_rate_kbytes_sec, "Mux rate: will pad stream if below");
+
+static unsigned int pcr_pid = 0x200;
+module_param(pcr_pid, uint, 0);
+MODULE_PARM_DESC(pcr_pid, "PCR PID for all channels: defaults to 0x200");
+
+static unsigned int mux_buf_sz_pkts;
+module_param(mux_buf_sz_pkts, uint, 0);
+MODULE_PARM_DESC(mux_buf_sz_pkts, "Size for the internal mux buffer in multiples of 188 bytes");
+
+#define MUX_BUF_MIN_SZ 90164
+#define MUX_BUF_MAX_SZ (MUX_BUF_MIN_SZ * 10)
+
+static u32 vidtv_bridge_mux_buf_sz_for_mux_rate(void)
+{
+ u32 max_elapsed_time_msecs = VIDTV_MAX_SLEEP_USECS / USEC_PER_MSEC;
+ u32 nbytes_expected;
+ u32 mux_buf_sz = mux_buf_sz_pkts * TS_PACKET_LEN;
+
+ nbytes_expected = mux_rate_kbytes_sec;
+ nbytes_expected *= max_elapsed_time_msecs;
+
+ mux_buf_sz = roundup(nbytes_expected, TS_PACKET_LEN);
+ mux_buf_sz += mux_buf_sz / 10;
+
+ if (mux_buf_sz < MUX_BUF_MIN_SZ)
+ mux_buf_sz = MUX_BUF_MIN_SZ;
+
+ if (mux_buf_sz > MUX_BUF_MAX_SZ)
+ mux_buf_sz = MUX_BUF_MAX_SZ;
+
+ return mux_buf_sz;
+}
+
+static bool vidtv_bridge_check_demod_lock(struct vidtv_dvb *dvb, u32 n)
+{
+ enum fe_status status;
+
+ dvb->fe[n]->ops.read_status(dvb->fe[n], &status);
+
+ return status == (FE_HAS_SIGNAL |
+ FE_HAS_CARRIER |
+ FE_HAS_VITERBI |
+ FE_HAS_SYNC |
+ FE_HAS_LOCK);
+}
+
+static void
+vidtv_bridge_on_new_pkts_avail(void *priv, u8 *buf, u32 npkts)
+{
+ /*
+ * called on a separate thread by the mux when new packets become
+ * available
+ */
+ struct vidtv_dvb *dvb = (struct vidtv_dvb *)priv;
+
+ /* drop packets if we lose the lock */
+ if (vidtv_bridge_check_demod_lock(dvb, 0))
+ dvb_dmx_swfilter_packets(&dvb->demux, buf, npkts);
+}
+
+static int vidtv_start_streaming(struct vidtv_dvb *dvb)
+{
+ struct vidtv_mux_init_args mux_args = {0};
+ struct device *dev = &dvb->pdev->dev;
+ u32 mux_buf_sz;
+
+ if (dvb->streaming) {
+ dev_warn_ratelimited(dev, "Already streaming. Skipping.\n");
+ return 0;
+ }
+
+ mux_buf_sz = (mux_buf_sz_pkts) ? mux_buf_sz_pkts : vidtv_bridge_mux_buf_sz_for_mux_rate();
+
+ mux_args.mux_rate_kbytes_sec = mux_rate_kbytes_sec;
+ mux_args.on_new_packets_available_cb = vidtv_bridge_on_new_pkts_avail;
+ mux_args.mux_buf_sz = mux_buf_sz;
+ mux_args.pcr_period_usecs = pcr_period_msec * 1000;
+ mux_args.si_period_usecs = si_period_msec * 1000;
+ mux_args.pcr_pid = pcr_pid;
+ mux_args.transport_stream_id = VIDTV_DEFAULT_TS_ID;
+ mux_args.priv = dvb;
+
+ dvb->streaming = true;
+ dvb->mux = vidtv_mux_init(dvb->fe[0], dev, mux_args);
+ vidtv_mux_start_thread(dvb->mux);
+
+ dev_dbg_ratelimited(dev, "Started streaming\n");
+ return 0;
+}
+
+static int vidtv_stop_streaming(struct vidtv_dvb *dvb)
+{
+ struct device *dev = &dvb->pdev->dev;
+
+ dvb->streaming = false;
+ vidtv_mux_stop_thread(dvb->mux);
+ vidtv_mux_destroy(dvb->mux);
+ dvb->mux = NULL;
+
+ dev_dbg_ratelimited(dev, "Stopped streaming\n");
+ return 0;
+}
+
+static int vidtv_start_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct vidtv_dvb *dvb = demux->priv;
+ int rc;
+ int ret;
+
+ if (!demux->dmx.frontend)
+ return -EINVAL;
+
+ mutex_lock(&dvb->feed_lock);
+
+ dvb->nfeeds++;
+ rc = dvb->nfeeds;
+
+ if (dvb->nfeeds == 1) {
+ ret = vidtv_start_streaming(dvb);
+ if (ret < 0)
+ rc = ret;
+ }
+
+ mutex_unlock(&dvb->feed_lock);
+ return rc;
+}
+
+static int vidtv_stop_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct vidtv_dvb *dvb = demux->priv;
+ int err = 0;
+
+ mutex_lock(&dvb->feed_lock);
+ dvb->nfeeds--;
+
+ if (!dvb->nfeeds)
+ err = vidtv_stop_streaming(dvb);
+
+ mutex_unlock(&dvb->feed_lock);
+ return err;
+}
+
+static struct dvb_frontend *vidtv_get_frontend_ptr(struct i2c_client *c)
+{
+ /* the demod will set this when its probe function runs */
+ struct vidtv_demod_state *state = i2c_get_clientdata(c);
+
+ return &state->frontend;
+}
+
+static int vidtv_master_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ return 0;
+}
+
+static u32 vidtv_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm vidtv_i2c_algorithm = {
+ .master_xfer = vidtv_master_xfer,
+ .functionality = vidtv_i2c_func,
+};
+
+static int vidtv_bridge_i2c_register_adap(struct vidtv_dvb *dvb)
+{
+ struct i2c_adapter *i2c_adapter = &dvb->i2c_adapter;
+
+ strscpy(i2c_adapter->name, "vidtv_i2c", sizeof(i2c_adapter->name));
+ i2c_adapter->owner = THIS_MODULE;
+ i2c_adapter->algo = &vidtv_i2c_algorithm;
+ i2c_adapter->algo_data = NULL;
+ i2c_adapter->timeout = 500;
+ i2c_adapter->retries = 3;
+ i2c_adapter->dev.parent = &dvb->pdev->dev;
+
+ i2c_set_adapdata(i2c_adapter, dvb);
+ return i2c_add_adapter(&dvb->i2c_adapter);
+}
+
+static int vidtv_bridge_register_adap(struct vidtv_dvb *dvb)
+{
+ int ret = 0;
+
+ ret = dvb_register_adapter(&dvb->adapter,
+ KBUILD_MODNAME,
+ THIS_MODULE,
+ &dvb->i2c_adapter.dev,
+ adapter_nums);
+
+ return ret;
+}
+
+static int vidtv_bridge_dmx_init(struct vidtv_dvb *dvb)
+{
+ dvb->demux.dmx.capabilities = DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING;
+
+ dvb->demux.priv = dvb;
+ dvb->demux.filternum = 256;
+ dvb->demux.feednum = 256;
+ dvb->demux.start_feed = vidtv_start_feed;
+ dvb->demux.stop_feed = vidtv_stop_feed;
+
+ return dvb_dmx_init(&dvb->demux);
+}
+
+static int vidtv_bridge_dmxdev_init(struct vidtv_dvb *dvb)
+{
+ dvb->dmx_dev.filternum = 256;
+ dvb->dmx_dev.demux = &dvb->demux.dmx;
+ dvb->dmx_dev.capabilities = 0;
+
+ return dvb_dmxdev_init(&dvb->dmx_dev, &dvb->adapter);
+}
+
+static int vidtv_bridge_probe_demod(struct vidtv_dvb *dvb, u32 n)
+{
+ struct vidtv_demod_config cfg = {};
+
+ cfg.drop_tslock_prob_on_low_snr = drop_tslock_prob_on_low_snr;
+ cfg.recover_tslock_prob_on_good_snr = recover_tslock_prob_on_good_snr;
+
+ dvb->i2c_client_demod[n] = dvb_module_probe("dvb_vidtv_demod",
+ NULL,
+ &dvb->i2c_adapter,
+ DEMOD_DEFAULT_ADDR,
+ &cfg);
+
+ /* driver will not work anyways so bail out */
+ if (!dvb->i2c_client_demod[n])
+ return -ENODEV;
+
+ /* retrieve a ptr to the frontend state */
+ dvb->fe[n] = vidtv_get_frontend_ptr(dvb->i2c_client_demod[n]);
+
+ return 0;
+}
+
+static int vidtv_bridge_probe_tuner(struct vidtv_dvb *dvb, u32 n)
+{
+ struct vidtv_tuner_config cfg = {};
+ u32 freq;
+ int i;
+
+ cfg.fe = dvb->fe[n];
+ cfg.mock_power_up_delay_msec = mock_power_up_delay_msec;
+ cfg.mock_tune_delay_msec = mock_tune_delay_msec;
+
+ /* TODO: check if the frequencies are at a valid range */
+
+ memcpy(cfg.vidtv_valid_dvb_t_freqs,
+ vidtv_valid_dvb_t_freqs,
+ sizeof(vidtv_valid_dvb_t_freqs));
+
+ memcpy(cfg.vidtv_valid_dvb_c_freqs,
+ vidtv_valid_dvb_c_freqs,
+ sizeof(vidtv_valid_dvb_c_freqs));
+
+ /*
+ * Convert Satellite frequencies from Ku-band in kHZ into S-band
+ * frequencies in Hz.
+ */
+ for (i = 0; i < ARRAY_SIZE(vidtv_valid_dvb_s_freqs); i++) {
+ freq = vidtv_valid_dvb_s_freqs[i];
+ if (freq) {
+ if (freq < LNB_CUT_FREQUENCY)
+ freq = abs(freq - LNB_LOW_FREQ);
+ else
+ freq = abs(freq - LNB_HIGH_FREQ);
+ }
+ cfg.vidtv_valid_dvb_s_freqs[i] = freq;
+ }
+
+ cfg.max_frequency_shift_hz = max_frequency_shift_hz;
+
+ dvb->i2c_client_tuner[n] = dvb_module_probe("dvb_vidtv_tuner",
+ NULL,
+ &dvb->i2c_adapter,
+ TUNER_DEFAULT_ADDR,
+ &cfg);
+
+ return (dvb->i2c_client_tuner[n]) ? 0 : -ENODEV;
+}
+
+static int vidtv_bridge_dvb_init(struct vidtv_dvb *dvb)
+{
+ int ret;
+ int i;
+ int j;
+
+ ret = vidtv_bridge_i2c_register_adap(dvb);
+ if (ret < 0)
+ goto fail_i2c;
+
+ ret = vidtv_bridge_register_adap(dvb);
+ if (ret < 0)
+ goto fail_adapter;
+
+ for (i = 0; i < NUM_FE; ++i) {
+ ret = vidtv_bridge_probe_demod(dvb, i);
+ if (ret < 0)
+ goto fail_demod_probe;
+
+ ret = vidtv_bridge_probe_tuner(dvb, i);
+ if (ret < 0)
+ goto fail_tuner_probe;
+
+ ret = dvb_register_frontend(&dvb->adapter, dvb->fe[i]);
+ if (ret < 0)
+ goto fail_fe;
+ }
+
+ ret = vidtv_bridge_dmx_init(dvb);
+ if (ret < 0)
+ goto fail_dmx;
+
+ ret = vidtv_bridge_dmxdev_init(dvb);
+ if (ret < 0)
+ goto fail_dmx_dev;
+
+ for (j = 0; j < NUM_FE; ++j) {
+ ret = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx,
+ &dvb->dmx_fe[j]);
+ if (ret < 0)
+ goto fail_dmx_conn;
+
+ /*
+ * The source of the demux is a frontend connected
+ * to the demux.
+ */
+ dvb->dmx_fe[j].source = DMX_FRONTEND_0;
+ }
+
+ return ret;
+
+fail_dmx_conn:
+ for (j = j - 1; j >= 0; --j)
+ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx,
+ &dvb->dmx_fe[j]);
+fail_dmx_dev:
+ dvb_dmxdev_release(&dvb->dmx_dev);
+fail_dmx:
+ dvb_dmx_release(&dvb->demux);
+fail_fe:
+ for (j = i; j >= 0; --j)
+ dvb_unregister_frontend(dvb->fe[j]);
+fail_tuner_probe:
+ for (j = i; j >= 0; --j)
+ if (dvb->i2c_client_tuner[j])
+ dvb_module_release(dvb->i2c_client_tuner[j]);
+
+fail_demod_probe:
+ for (j = i; j >= 0; --j)
+ if (dvb->i2c_client_demod[j])
+ dvb_module_release(dvb->i2c_client_demod[j]);
+
+fail_adapter:
+ dvb_unregister_adapter(&dvb->adapter);
+
+fail_i2c:
+ i2c_del_adapter(&dvb->i2c_adapter);
+
+ return ret;
+}
+
+static int vidtv_bridge_probe(struct platform_device *pdev)
+{
+ struct vidtv_dvb *dvb;
+ int ret;
+
+ dvb = kzalloc(sizeof(*dvb), GFP_KERNEL);
+ if (!dvb)
+ return -ENOMEM;
+
+ dvb->pdev = pdev;
+
+ ret = vidtv_bridge_dvb_init(dvb);
+ if (ret < 0)
+ goto err_dvb;
+
+ mutex_init(&dvb->feed_lock);
+
+ platform_set_drvdata(pdev, dvb);
+
+ dev_info(&pdev->dev, "Successfully initialized vidtv!\n");
+ return ret;
+
+err_dvb:
+ kfree(dvb);
+ return ret;
+}
+
+static int vidtv_bridge_remove(struct platform_device *pdev)
+{
+ struct vidtv_dvb *dvb;
+ u32 i;
+
+ dvb = platform_get_drvdata(pdev);
+
+ mutex_destroy(&dvb->feed_lock);
+
+ for (i = 0; i < NUM_FE; ++i) {
+ dvb_unregister_frontend(dvb->fe[i]);
+ dvb_module_release(dvb->i2c_client_tuner[i]);
+ dvb_module_release(dvb->i2c_client_demod[i]);
+ }
+
+ dvb_dmxdev_release(&dvb->dmx_dev);
+ dvb_dmx_release(&dvb->demux);
+ dvb_unregister_adapter(&dvb->adapter);
+
+ return 0;
+}
+
+static void vidtv_bridge_dev_release(struct device *dev)
+{
+}
+
+static struct platform_device vidtv_bridge_dev = {
+ .name = "vidtv_bridge",
+ .dev.release = vidtv_bridge_dev_release,
+};
+
+static struct platform_driver vidtv_bridge_driver = {
+ .driver = {
+ .name = "vidtv_bridge",
+ .suppress_bind_attrs = true,
+ },
+ .probe = vidtv_bridge_probe,
+ .remove = vidtv_bridge_remove,
+};
+
+static void __exit vidtv_bridge_exit(void)
+{
+ platform_driver_unregister(&vidtv_bridge_driver);
+ platform_device_unregister(&vidtv_bridge_dev);
+}
+
+static int __init vidtv_bridge_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&vidtv_bridge_dev);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&vidtv_bridge_driver);
+ if (ret)
+ platform_device_unregister(&vidtv_bridge_dev);
+
+ return ret;
+}
+
+module_init(vidtv_bridge_init);
+module_exit(vidtv_bridge_exit);
+
+MODULE_DESCRIPTION("Virtual Digital TV Test Driver");
+MODULE_AUTHOR("Daniel W. S. Almeida");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("vidtv");
+MODULE_ALIAS("dvb_vidtv");
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.h b/drivers/media/test-drivers/vidtv/vidtv_bridge.h
new file mode 100644
index 000000000000..78fe8472fa37
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The Virtual DTV test driver serves as a reference DVB driver and helps
+ * validate the existing APIs in the media subsystem. It can also aid
+ * developers working on userspace applications.
+ *
+ * When this module is loaded, it will attempt to modprobe 'dvb_vidtv_tuner' and 'dvb_vidtv_demod'.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#ifndef VIDTV_BRIDGE_H
+#define VIDTV_BRIDGE_H
+
+/*
+ * For now, only one frontend is supported. See vidtv_start_streaming()
+ */
+#define NUM_FE 1
+
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <media/dmxdev.h>
+#include <media/dvb_demux.h>
+#include <media/dvb_frontend.h>
+#include "vidtv_mux.h"
+
+/**
+ * struct vidtv_dvb - Vidtv bridge state
+ * @pdev: The platform device. Obtained when the bridge is probed.
+ * @fe: The frontends. Obtained when probing the demodulator modules.
+ * @adapter: Represents a DTV adapter. See 'dvb_register_adapter'.
+ * @demux: The demux used by the dvb_dmx_swfilter_packets() call.
+ * @dmx_dev: Represents a demux device.
+ * @dmx_frontend: The frontends associated with the demux.
+ * @i2c_adapter: The i2c_adapter associated with the bridge driver.
+ * @i2c_client_demod: The i2c_clients associated with the demodulator modules.
+ * @i2c_client_tuner: The i2c_clients associated with the tuner modules.
+ * @nfeeds: The number of feeds active.
+ * @feed_lock: Protects access to the start/stop stream logic/data.
+ * @streaming: Whether we are streaming now.
+ * @mux: The abstraction responsible for delivering MPEG TS packets to the bridge.
+ */
+struct vidtv_dvb {
+ struct platform_device *pdev;
+ struct dvb_frontend *fe[NUM_FE];
+ struct dvb_adapter adapter;
+ struct dvb_demux demux;
+ struct dmxdev dmx_dev;
+ struct dmx_frontend dmx_fe[NUM_FE];
+ struct i2c_adapter i2c_adapter;
+ struct i2c_client *i2c_client_demod[NUM_FE];
+ struct i2c_client *i2c_client_tuner[NUM_FE];
+
+ u32 nfeeds;
+ struct mutex feed_lock; /* Protects access to the start/stop stream logic/data. */
+
+ bool streaming;
+
+ struct vidtv_mux *mux;
+};
+
+#endif // VIDTV_BRIDG_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.c b/drivers/media/test-drivers/vidtv/vidtv_channel.c
new file mode 100644
index 000000000000..f2b97cf08e87
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_channel.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Vidtv serves as a reference DVB driver and helps validate the existing APIs
+ * in the media subsystem. It can also aid developers working on userspace
+ * applications.
+ *
+ * This file contains the code for a 'channel' abstraction.
+ *
+ * When vidtv boots, it will create some hardcoded channels.
+ * Their services will be concatenated to populate the SDT.
+ * Their programs will be concatenated to populate the PAT
+ * For each program in the PAT, a PMT section will be created
+ * The PMT section for a channel will be assigned its streams.
+ * Every stream will have its corresponding encoder polled to produce TS packets
+ * These packets may be interleaved by the mux and then delivered to the bridge
+ *
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/dev_printk.h>
+#include <linux/ratelimit.h>
+
+#include "vidtv_channel.h"
+#include "vidtv_psi.h"
+#include "vidtv_encoder.h"
+#include "vidtv_mux.h"
+#include "vidtv_common.h"
+#include "vidtv_s302m.h"
+
+static void vidtv_channel_encoder_destroy(struct vidtv_encoder *e)
+{
+ struct vidtv_encoder *curr = e;
+ struct vidtv_encoder *tmp = NULL;
+
+ while (curr) {
+ /* forward the call to the derived type */
+ tmp = curr;
+ curr = curr->next;
+ tmp->destroy(tmp);
+ }
+}
+
+#define ENCODING_ISO8859_15 "\x0b"
+
+struct vidtv_channel
+*vidtv_channel_s302m_init(struct vidtv_channel *head, u16 transport_stream_id)
+{
+ /*
+ * init an audio only channel with a s302m encoder
+ */
+ const u16 s302m_service_id = 0x880;
+ const u16 s302m_program_num = 0x880;
+ const u16 s302m_program_pid = 0x101; /* packet id for PMT*/
+ const u16 s302m_es_pid = 0x111; /* packet id for the ES */
+ const __be32 s302m_fid = cpu_to_be32(VIDTV_S302M_FORMAT_IDENTIFIER);
+
+ char *name = ENCODING_ISO8859_15 "Beethoven";
+ char *provider = ENCODING_ISO8859_15 "LinuxTV.org";
+
+ struct vidtv_channel *s302m = kzalloc(sizeof(*s302m), GFP_KERNEL);
+ struct vidtv_s302m_encoder_init_args encoder_args = {};
+
+ s302m->name = kstrdup(name, GFP_KERNEL);
+
+ s302m->service = vidtv_psi_sdt_service_init(NULL, s302m_service_id);
+
+ s302m->service->descriptor = (struct vidtv_psi_desc *)
+ vidtv_psi_service_desc_init(NULL,
+ DIGITAL_TELEVISION_SERVICE,
+ name,
+ provider);
+
+ s302m->transport_stream_id = transport_stream_id;
+
+ s302m->program = vidtv_psi_pat_program_init(NULL,
+ s302m_service_id,
+ s302m_program_pid);
+
+ s302m->program_num = s302m_program_num;
+
+ s302m->streams = vidtv_psi_pmt_stream_init(NULL,
+ STREAM_PRIVATE_DATA,
+ s302m_es_pid);
+
+ s302m->streams->descriptor = (struct vidtv_psi_desc *)
+ vidtv_psi_registration_desc_init(NULL,
+ s302m_fid,
+ NULL,
+ 0);
+ encoder_args.es_pid = s302m_es_pid;
+
+ s302m->encoders = vidtv_s302m_encoder_init(encoder_args);
+
+ if (head) {
+ while (head->next)
+ head = head->next;
+
+ head->next = s302m;
+ }
+
+ return s302m;
+}
+
+static struct vidtv_psi_table_sdt_service
+*vidtv_channel_sdt_serv_cat_into_new(struct vidtv_mux *m)
+{
+ /* Concatenate the services */
+ const struct vidtv_channel *cur_chnl = m->channels;
+
+ struct vidtv_psi_table_sdt_service *curr = NULL;
+ struct vidtv_psi_table_sdt_service *head = NULL;
+ struct vidtv_psi_table_sdt_service *tail = NULL;
+
+ struct vidtv_psi_desc *desc = NULL;
+ u16 service_id;
+
+ if (!cur_chnl)
+ return NULL;
+
+ while (cur_chnl) {
+ curr = cur_chnl->service;
+
+ if (!curr)
+ dev_warn_ratelimited(m->dev,
+ "No services found for channel %s\n", cur_chnl->name);
+
+ while (curr) {
+ service_id = be16_to_cpu(curr->service_id);
+ tail = vidtv_psi_sdt_service_init(tail, service_id);
+
+ desc = vidtv_psi_desc_clone(curr->descriptor);
+ vidtv_psi_desc_assign(&tail->descriptor, desc);
+
+ if (!head)
+ head = tail;
+
+ curr = curr->next;
+ }
+
+ cur_chnl = cur_chnl->next;
+ }
+
+ return head;
+}
+
+static struct vidtv_psi_table_pat_program*
+vidtv_channel_pat_prog_cat_into_new(struct vidtv_mux *m)
+{
+ /* Concatenate the programs */
+ const struct vidtv_channel *cur_chnl = m->channels;
+ struct vidtv_psi_table_pat_program *curr = NULL;
+ struct vidtv_psi_table_pat_program *head = NULL;
+ struct vidtv_psi_table_pat_program *tail = NULL;
+ u16 serv_id;
+ u16 pid;
+
+ if (!cur_chnl)
+ return NULL;
+
+ while (cur_chnl) {
+ curr = cur_chnl->program;
+
+ if (!curr)
+ dev_warn_ratelimited(m->dev,
+ "No programs found for channel %s\n",
+ cur_chnl->name);
+
+ while (curr) {
+ serv_id = be16_to_cpu(curr->service_id);
+ pid = vidtv_psi_get_pat_program_pid(curr);
+ tail = vidtv_psi_pat_program_init(tail,
+ serv_id,
+ pid);
+
+ if (!head)
+ head = tail;
+
+ curr = curr->next;
+ }
+
+ cur_chnl = cur_chnl->next;
+ }
+
+ return head;
+}
+
+static void
+vidtv_channel_pmt_match_sections(struct vidtv_channel *channels,
+ struct vidtv_psi_table_pmt **sections,
+ u32 nsections)
+{
+ /*
+ * Match channels to their respective PMT sections, then assign the
+ * streams
+ */
+ struct vidtv_psi_table_pmt *curr_section = NULL;
+ struct vidtv_channel *cur_chnl = channels;
+
+ struct vidtv_psi_table_pmt_stream *s = NULL;
+ struct vidtv_psi_table_pmt_stream *head = NULL;
+ struct vidtv_psi_table_pmt_stream *tail = NULL;
+
+ struct vidtv_psi_desc *desc = NULL;
+ u32 j;
+ u16 curr_id;
+ u16 e_pid; /* elementary stream pid */
+
+ while (cur_chnl) {
+ for (j = 0; j < nsections; ++j) {
+ curr_section = sections[j];
+
+ if (!curr_section)
+ continue;
+
+ curr_id = be16_to_cpu(curr_section->header.id);
+
+ /* we got a match */
+ if (curr_id == cur_chnl->program_num) {
+ s = cur_chnl->streams;
+
+ /* clone the streams for the PMT */
+ while (s) {
+ e_pid = vidtv_psi_pmt_stream_get_elem_pid(s);
+ tail = vidtv_psi_pmt_stream_init(tail,
+ s->type,
+ e_pid);
+
+ if (!head)
+ head = tail;
+
+ desc = vidtv_psi_desc_clone(s->descriptor);
+ vidtv_psi_desc_assign(&tail->descriptor, desc);
+
+ s = s->next;
+ }
+
+ vidtv_psi_pmt_stream_assign(curr_section, head);
+ break;
+ }
+ }
+
+ cur_chnl = cur_chnl->next;
+ }
+}
+
+void vidtv_channel_si_init(struct vidtv_mux *m)
+{
+ struct vidtv_psi_table_pat_program *programs = NULL;
+ struct vidtv_psi_table_sdt_service *services = NULL;
+
+ m->si.pat = vidtv_psi_pat_table_init(m->transport_stream_id);
+
+ m->si.sdt = vidtv_psi_sdt_table_init(m->transport_stream_id);
+
+ programs = vidtv_channel_pat_prog_cat_into_new(m);
+ services = vidtv_channel_sdt_serv_cat_into_new(m);
+
+ /* assemble all programs and assign to PAT */
+ vidtv_psi_pat_program_assign(m->si.pat, programs);
+
+ /* assemble all services and assign to SDT */
+ vidtv_psi_sdt_service_assign(m->si.sdt, services);
+
+ m->si.pmt_secs = vidtv_psi_pmt_create_sec_for_each_pat_entry(m->si.pat, m->pcr_pid);
+
+ vidtv_channel_pmt_match_sections(m->channels,
+ m->si.pmt_secs,
+ m->si.pat->programs);
+}
+
+void vidtv_channel_si_destroy(struct vidtv_mux *m)
+{
+ u32 i;
+ u16 num_programs = m->si.pat->programs;
+
+ vidtv_psi_pat_table_destroy(m->si.pat);
+
+ for (i = 0; i < num_programs; ++i)
+ vidtv_psi_pmt_table_destroy(m->si.pmt_secs[i]);
+
+ kfree(m->si.pmt_secs);
+ vidtv_psi_sdt_table_destroy(m->si.sdt);
+}
+
+void vidtv_channels_init(struct vidtv_mux *m)
+{
+ /* this is the place to add new 'channels' for vidtv */
+ m->channels = vidtv_channel_s302m_init(NULL, m->transport_stream_id);
+}
+
+void vidtv_channels_destroy(struct vidtv_mux *m)
+{
+ struct vidtv_channel *curr = m->channels;
+ struct vidtv_channel *tmp = NULL;
+
+ while (curr) {
+ kfree(curr->name);
+ vidtv_psi_sdt_service_destroy(curr->service);
+ vidtv_psi_pat_program_destroy(curr->program);
+ vidtv_psi_pmt_stream_destroy(curr->streams);
+ vidtv_channel_encoder_destroy(curr->encoders);
+
+ tmp = curr;
+ curr = curr->next;
+ kfree(tmp);
+ }
+}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.h b/drivers/media/test-drivers/vidtv/vidtv_channel.h
new file mode 100644
index 000000000000..2c3cba4313b0
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_channel.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Vidtv serves as a reference DVB driver and helps validate the existing APIs
+ * in the media subsystem. It can also aid developers working on userspace
+ * applications.
+ *
+ * This file contains the code for a 'channel' abstraction.
+ *
+ * When vidtv boots, it will create some hardcoded channels.
+ * Their services will be concatenated to populate the SDT.
+ * Their programs will be concatenated to populate the PAT
+ * For each program in the PAT, a PMT section will be created
+ * The PMT section for a channel will be assigned its streams.
+ * Every stream will have its corresponding encoder polled to produce TS packets
+ * These packets may be interleaved by the mux and then delivered to the bridge
+ *
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#ifndef VIDTV_CHANNEL_H
+#define VIDTV_CHANNEL_H
+
+#include <linux/types.h>
+#include "vidtv_psi.h"
+#include "vidtv_encoder.h"
+#include "vidtv_mux.h"
+
+/**
+ * struct vidtv_channel - A 'channel' abstraction
+ *
+ * When vidtv boots, it will create some hardcoded channels.
+ * Their services will be concatenated to populate the SDT.
+ * Their programs will be concatenated to populate the PAT
+ * For each program in the PAT, a PMT section will be created
+ * The PMT section for a channel will be assigned its streams.
+ * Every stream will have its corresponding encoder polled to produce TS packets
+ * These packets may be interleaved by the mux and then delivered to the bridge
+ *
+ * @transport_stream_id: a number to identify the TS, chosen at will.
+ * @service: A _single_ service. Will be concatenated into the SDT.
+ * @program_num: The link between PAT, PMT and SDT.
+ * @program: A _single_ program with one or more streams associated with it.
+ * Will be concatenated into the PAT.
+ * @streams: A stream loop used to populate the PMT section for 'program'
+ * @encoders: A encoder loop. There must be one encoder for each stream.
+ * @next: Optionally chain this channel.
+ */
+struct vidtv_channel {
+ char *name;
+ u16 transport_stream_id;
+ struct vidtv_psi_table_sdt_service *service;
+ u16 program_num;
+ struct vidtv_psi_table_pat_program *program;
+ struct vidtv_psi_table_pmt_stream *streams;
+ struct vidtv_encoder *encoders;
+ struct vidtv_channel *next;
+};
+
+/**
+ * vidtv_channel_si_init - Init the PSI tables from the channels in the mux
+ * @m: The mux containing the channels.
+ */
+void vidtv_channel_si_init(struct vidtv_mux *m);
+void vidtv_channel_si_destroy(struct vidtv_mux *m);
+
+/**
+ * vidtv_channels_init - Init hardcoded, fake 'channels'.
+ * @m: The mux to store the channels into.
+ */
+void vidtv_channels_init(struct vidtv_mux *m);
+struct vidtv_channel
+*vidtv_channel_s302m_init(struct vidtv_channel *head, u16 transport_stream_id);
+void vidtv_channels_destroy(struct vidtv_mux *m);
+
+#endif //VIDTV_CHANNEL_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_common.c b/drivers/media/test-drivers/vidtv/vidtv_common.c
new file mode 100644
index 000000000000..63b3055bd715
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_common.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The Virtual DVB test driver serves as a reference DVB driver and helps
+ * validate the existing APIs in the media subsystem. It can also aid
+ * developers working on userspace applications.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__
+
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "vidtv_common.h"
+
+/**
+ * vidtv_memcpy() - wrapper routine to be used by MPEG-TS
+ * generator, in order to avoid going past the
+ * output buffer.
+ * @to: Starting element to where a MPEG-TS packet will
+ * be copied.
+ * @to_offset: Starting position of the @to buffer to be filled.
+ * @to_size: Size of the @to buffer.
+ * @from: Starting element of the buffer to be copied.
+ * @len: Number of elements to be copy from @from buffer
+ * into @to+ @to_offset buffer.
+ *
+ * Note:
+ * Real digital TV demod drivers should not have memcpy
+ * wrappers. We use it here because emulating MPEG-TS
+ * generation at kernelspace requires some extra care.
+ *
+ * Return:
+ * Returns the number of bytes written
+ */
+u32 vidtv_memcpy(void *to,
+ size_t to_offset,
+ size_t to_size,
+ const void *from,
+ size_t len)
+{
+ if (unlikely(to_offset + len > to_size)) {
+ pr_err_ratelimited("overflow detected, skipping. Try increasing the buffer size. Needed %zu, had %zu\n",
+ to_offset + len,
+ to_size);
+ return 0;
+ }
+
+ memcpy(to + to_offset, from, len);
+ return len;
+}
+
+/**
+ * vidtv_memset() - wrapper routine to be used by MPEG-TS
+ * generator, in order to avoid going past the
+ * output buffer.
+ * @to: Starting element to set
+ * @to_offset: Starting position of the @to buffer to be filled.
+ * @to_size: Size of the @to buffer.
+ * @c: The value to set the memory to.
+ * @len: Number of elements to be copy from @from buffer
+ * into @to+ @to_offset buffer.
+ *
+ * Note:
+ * Real digital TV demod drivers should not have memset
+ * wrappers. We use it here because emulating MPEG-TS
+ * generation at kernelspace requires some extra care.
+ *
+ * Return:
+ * Returns the number of bytes written
+ */
+u32 vidtv_memset(void *to,
+ size_t to_offset,
+ size_t to_size,
+ const int c,
+ size_t len)
+{
+ if (unlikely(to_offset + len > to_size)) {
+ pr_err_ratelimited("overflow detected, skipping. Try increasing the buffer size. Needed %zu, had %zu\n",
+ to_offset + len,
+ to_size);
+ return 0;
+ }
+
+ memset(to + to_offset, c, len);
+ return len;
+}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_common.h b/drivers/media/test-drivers/vidtv/vidtv_common.h
new file mode 100644
index 000000000000..818e7f2b9ec5
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_common.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The Virtual DVB test driver serves as a reference DVB driver and helps
+ * validate the existing APIs in the media subsystem. It can also aid
+ * developers working on userspace applications.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#ifndef VIDTV_COMMON_H
+#define VIDTV_COMMON_H
+
+#include <linux/types.h>
+
+#define CLOCK_UNIT_90KHZ 90000
+#define CLOCK_UNIT_27MHZ 27000000
+#define VIDTV_SLEEP_USECS 10000
+#define VIDTV_MAX_SLEEP_USECS (2 * VIDTV_SLEEP_USECS)
+#define VIDTV_DEFAULT_TS_ID 0x744
+
+u32 vidtv_memcpy(void *to,
+ size_t to_offset,
+ size_t to_size,
+ const void *from,
+ size_t len);
+
+u32 vidtv_memset(void *to,
+ size_t to_offset,
+ size_t to_size,
+ int c,
+ size_t len);
+
+#endif // VIDTV_COMMON_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.c b/drivers/media/test-drivers/vidtv/vidtv_demod.c
new file mode 100644
index 000000000000..eba7fe1a1b48
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_demod.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * The Virtual DVB test driver serves as a reference DVB driver and helps
+ * validate the existing APIs in the media subsystem. It can also aid
+ * developers working on userspace applications.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ * Based on the example driver written by Emard <emard@softhome.net>
+ */
+
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <media/dvb_frontend.h>
+
+#include "vidtv_demod.h"
+
+#define POLL_THRD_TIME 2000 /* ms */
+
+static const struct vidtv_demod_cnr_to_qual_s vidtv_demod_c_cnr_2_qual[] = {
+ /* from libdvbv5 source code, in milli db */
+ { QAM_256, FEC_NONE, 34000, 38000},
+ { QAM_64, FEC_NONE, 30000, 34000},
+};
+
+static const struct vidtv_demod_cnr_to_qual_s vidtv_demod_s_cnr_2_qual[] = {
+ /* from libdvbv5 source code, in milli db */
+ { QPSK, FEC_1_2, 7000, 10000},
+ { QPSK, FEC_2_3, 9000, 12000},
+ { QPSK, FEC_3_4, 10000, 13000},
+ { QPSK, FEC_5_6, 11000, 14000},
+ { QPSK, FEC_7_8, 12000, 15000},
+};
+
+static const struct vidtv_demod_cnr_to_qual_s vidtv_demod_s2_cnr_2_qual[] = {
+ /* from libdvbv5 source code, in milli db */
+ { QPSK, FEC_1_2, 9000, 12000},
+ { QPSK, FEC_2_3, 11000, 14000},
+ { QPSK, FEC_3_4, 12000, 15000},
+ { QPSK, FEC_5_6, 12000, 15000},
+ { QPSK, FEC_8_9, 13000, 16000},
+ { QPSK, FEC_9_10, 13500, 16500},
+ { PSK_8, FEC_2_3, 14500, 17500},
+ { PSK_8, FEC_3_4, 16000, 19000},
+ { PSK_8, FEC_5_6, 17500, 20500},
+ { PSK_8, FEC_8_9, 19000, 22000},
+};
+
+static const struct vidtv_demod_cnr_to_qual_s vidtv_demod_t_cnr_2_qual[] = {
+ /* from libdvbv5 source code, in milli db*/
+ { QPSK, FEC_1_2, 4100, 5900},
+ { QPSK, FEC_2_3, 6100, 9600},
+ { QPSK, FEC_3_4, 7200, 12400},
+ { QPSK, FEC_5_6, 8500, 15600},
+ { QPSK, FEC_7_8, 9200, 17500},
+ { QAM_16, FEC_1_2, 9800, 11800},
+ { QAM_16, FEC_2_3, 12100, 15300},
+ { QAM_16, FEC_3_4, 13400, 18100},
+ { QAM_16, FEC_5_6, 14800, 21300},
+ { QAM_16, FEC_7_8, 15700, 23600},
+ { QAM_64, FEC_1_2, 14000, 16000},
+ { QAM_64, FEC_2_3, 19900, 25400},
+ { QAM_64, FEC_3_4, 24900, 27900},
+ { QAM_64, FEC_5_6, 21300, 23300},
+ { QAM_64, FEC_7_8, 22000, 24000},
+};
+
+static const struct vidtv_demod_cnr_to_qual_s *vidtv_match_cnr_s(struct dvb_frontend *fe)
+{
+ const struct vidtv_demod_cnr_to_qual_s *cnr2qual = NULL;
+ struct device *dev = fe->dvb->device;
+ struct dtv_frontend_properties *c;
+ u32 array_size = 0;
+ u32 i;
+
+ c = &fe->dtv_property_cache;
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ cnr2qual = vidtv_demod_t_cnr_2_qual;
+ array_size = ARRAY_SIZE(vidtv_demod_t_cnr_2_qual);
+ break;
+
+ case SYS_DVBS:
+ cnr2qual = vidtv_demod_s_cnr_2_qual;
+ array_size = ARRAY_SIZE(vidtv_demod_s_cnr_2_qual);
+ break;
+
+ case SYS_DVBS2:
+ cnr2qual = vidtv_demod_s2_cnr_2_qual;
+ array_size = ARRAY_SIZE(vidtv_demod_s2_cnr_2_qual);
+ break;
+
+ case SYS_DVBC_ANNEX_A:
+ cnr2qual = vidtv_demod_c_cnr_2_qual;
+ array_size = ARRAY_SIZE(vidtv_demod_c_cnr_2_qual);
+ break;
+
+ default:
+ dev_warn_ratelimited(dev,
+ "%s: unsupported delivery system: %u\n",
+ __func__,
+ c->delivery_system);
+ break;
+ }
+
+ for (i = 0; i < array_size; i++)
+ if (cnr2qual[i].modulation == c->modulation &&
+ cnr2qual[i].fec == c->fec_inner)
+ return &cnr2qual[i];
+
+ return NULL; /* not found */
+}
+
+static void vidtv_clean_stats(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ /* Fill the length of each status counter */
+
+ /* Signal is always available */
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ c->strength.stat[0].svalue = 0;
+
+ /* Usually available only after Viterbi lock */
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->cnr.stat[0].svalue = 0;
+ c->cnr.len = 1;
+
+ /* Those depends on full lock */
+ c->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->pre_bit_error.stat[0].uvalue = 0;
+ c->pre_bit_error.len = 1;
+ c->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->pre_bit_count.stat[0].uvalue = 0;
+ c->pre_bit_count.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_error.stat[0].uvalue = 0;
+ c->post_bit_error.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].uvalue = 0;
+ c->post_bit_count.len = 1;
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_error.stat[0].uvalue = 0;
+ c->block_error.len = 1;
+ c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[0].uvalue = 0;
+ c->block_count.len = 1;
+}
+
+static void vidtv_demod_update_stats(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct vidtv_demod_state *state = fe->demodulator_priv;
+ u32 scale;
+
+ if (state->status & FE_HAS_LOCK) {
+ scale = FE_SCALE_COUNTER;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ } else {
+ scale = FE_SCALE_NOT_AVAILABLE;
+ c->cnr.stat[0].scale = scale;
+ }
+
+ c->pre_bit_error.stat[0].scale = scale;
+ c->pre_bit_count.stat[0].scale = scale;
+ c->post_bit_error.stat[0].scale = scale;
+ c->post_bit_count.stat[0].scale = scale;
+ c->block_error.stat[0].scale = scale;
+ c->block_count.stat[0].scale = scale;
+
+ /*
+ * Add a 0.5% of randomness at the signal strength and CNR,
+ * and make them different, as we want to have something closer
+ * to a real case scenario.
+ *
+ * Also, usually, signal strength is a negative number in dBm.
+ */
+ c->strength.stat[0].svalue = state->tuner_cnr;
+ c->strength.stat[0].svalue -= prandom_u32_max(state->tuner_cnr / 50);
+ c->strength.stat[0].svalue -= 68000; /* Adjust to a better range */
+
+ c->cnr.stat[0].svalue = state->tuner_cnr;
+ c->cnr.stat[0].svalue -= prandom_u32_max(state->tuner_cnr / 50);
+
+}
+
+static int vidtv_demod_read_status(struct dvb_frontend *fe,
+ enum fe_status *status)
+{
+ struct vidtv_demod_state *state = fe->demodulator_priv;
+ const struct vidtv_demod_cnr_to_qual_s *cnr2qual = NULL;
+ struct vidtv_demod_config *config = &state->config;
+ u16 snr = 0;
+
+ /* Simulate random lost of signal due to a bad-tuned channel */
+ cnr2qual = vidtv_match_cnr_s(&state->frontend);
+
+ if (cnr2qual && state->tuner_cnr < cnr2qual->cnr_good &&
+ state->frontend.ops.tuner_ops.get_rf_strength) {
+ state->frontend.ops.tuner_ops.get_rf_strength(&state->frontend,
+ &snr);
+
+ if (snr < cnr2qual->cnr_ok) {
+ /* eventually lose the TS lock */
+ if (prandom_u32_max(100) < config->drop_tslock_prob_on_low_snr)
+ state->status = 0;
+ } else {
+ /* recover if the signal improves */
+ if (prandom_u32_max(100) <
+ config->recover_tslock_prob_on_good_snr)
+ state->status = FE_HAS_SIGNAL |
+ FE_HAS_CARRIER |
+ FE_HAS_VITERBI |
+ FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ }
+ }
+
+ vidtv_demod_update_stats(&state->frontend);
+
+ *status = state->status;
+
+ return 0;
+}
+
+static int vidtv_demod_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ *strength = c->strength.stat[0].uvalue;
+
+ return 0;
+}
+
+/*
+ * NOTE:
+ * This is implemented here just to be used as an example for real
+ * demod drivers.
+ *
+ * Should only be implemented if it actually reads something from the hardware.
+ * Also, it should check for the locks, in order to avoid report wrong data
+ * to userspace.
+ */
+static int vidtv_demod_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
+{
+ return 0;
+}
+
+static int vidtv_demod_set_frontend(struct dvb_frontend *fe)
+{
+ struct vidtv_demod_state *state = fe->demodulator_priv;
+ u32 tuner_status = 0;
+ int ret;
+
+ if (!fe->ops.tuner_ops.set_params)
+ return 0;
+
+ fe->ops.tuner_ops.set_params(fe);
+
+ /* store the CNR returned by the tuner */
+ ret = fe->ops.tuner_ops.get_rf_strength(fe, &state->tuner_cnr);
+ if (ret < 0)
+ return ret;
+
+ fe->ops.tuner_ops.get_status(fe, &tuner_status);
+ state->status = (state->tuner_cnr > 0) ? FE_HAS_SIGNAL |
+ FE_HAS_CARRIER |
+ FE_HAS_VITERBI |
+ FE_HAS_SYNC |
+ FE_HAS_LOCK :
+ 0;
+
+ vidtv_demod_update_stats(fe);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ return 0;
+}
+
+/*
+ * NOTE:
+ * This is implemented here just to be used as an example for real
+ * demod drivers.
+ *
+ * Should only be implemented if the demod has support for DVB-S or DVB-S2
+ */
+static int vidtv_demod_set_tone(struct dvb_frontend *fe,
+ enum fe_sec_tone_mode tone)
+{
+ return 0;
+}
+
+/*
+ * NOTE:
+ * This is implemented here just to be used as an example for real
+ * demod drivers.
+ *
+ * Should only be implemented if the demod has support for DVB-S or DVB-S2
+ */
+static int vidtv_demod_set_voltage(struct dvb_frontend *fe,
+ enum fe_sec_voltage voltage)
+{
+ return 0;
+}
+
+/*
+ * NOTE:
+ * This is implemented here just to be used as an example for real
+ * demod drivers.
+ *
+ * Should only be implemented if the demod has support for DVB-S or DVB-S2
+ */
+static int vidtv_send_diseqc_msg(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd)
+{
+ return 0;
+}
+
+/*
+ * NOTE:
+ * This is implemented here just to be used as an example for real
+ * demod drivers.
+ *
+ * Should only be implemented if the demod has support for DVB-S or DVB-S2
+ */
+static int vidtv_diseqc_send_burst(struct dvb_frontend *fe,
+ enum fe_sec_mini_cmd burst)
+{
+ return 0;
+}
+
+static void vidtv_demod_release(struct dvb_frontend *fe)
+{
+ struct vidtv_demod_state *state = fe->demodulator_priv;
+
+ kfree(state);
+}
+
+static const struct dvb_frontend_ops vidtv_demod_ops = {
+ .delsys = {
+ SYS_DVBT,
+ SYS_DVBT2,
+ SYS_DVBC_ANNEX_A,
+ SYS_DVBS,
+ SYS_DVBS2,
+ },
+
+ .info = {
+ .name = "Dummy demod for DVB-T/T2/C/S/S2",
+ .frequency_min_hz = 51 * MHz,
+ .frequency_max_hz = 2150 * MHz,
+ .frequency_stepsize_hz = 62500,
+ .frequency_tolerance_hz = 29500 * kHz,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_4_5 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_6_7 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_8_9 |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_INVERSION_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO,
+ },
+
+ .release = vidtv_demod_release,
+
+ .set_frontend = vidtv_demod_set_frontend,
+ .get_frontend = vidtv_demod_get_frontend,
+
+ .read_status = vidtv_demod_read_status,
+ .read_signal_strength = vidtv_demod_read_signal_strength,
+
+ /* For DVB-S/S2 */
+ .set_voltage = vidtv_demod_set_voltage,
+ .set_tone = vidtv_demod_set_tone,
+ .diseqc_send_master_cmd = vidtv_send_diseqc_msg,
+ .diseqc_send_burst = vidtv_diseqc_send_burst,
+
+};
+
+static const struct i2c_device_id vidtv_demod_i2c_id_table[] = {
+ {"dvb_vidtv_demod", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, vidtv_demod_i2c_id_table);
+
+static int vidtv_demod_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct vidtv_tuner_config *config = client->dev.platform_data;
+ struct vidtv_demod_state *state;
+
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ /* create dvb_frontend */
+ memcpy(&state->frontend.ops,
+ &vidtv_demod_ops,
+ sizeof(struct dvb_frontend_ops));
+
+ memcpy(&state->config, config, sizeof(state->config));
+
+ state->frontend.demodulator_priv = state;
+ i2c_set_clientdata(client, state);
+
+ vidtv_clean_stats(&state->frontend);
+
+ return 0;
+}
+
+static int vidtv_demod_i2c_remove(struct i2c_client *client)
+{
+ struct vidtv_demod_state *state = i2c_get_clientdata(client);
+
+ kfree(state);
+
+ return 0;
+}
+
+static struct i2c_driver vidtv_demod_i2c_driver = {
+ .driver = {
+ .name = "dvb_vidtv_demod",
+ .suppress_bind_attrs = true,
+ },
+ .probe = vidtv_demod_i2c_probe,
+ .remove = vidtv_demod_i2c_remove,
+ .id_table = vidtv_demod_i2c_id_table,
+};
+
+module_i2c_driver(vidtv_demod_i2c_driver);
+
+MODULE_DESCRIPTION("Virtual DVB Demodulator Driver");
+MODULE_AUTHOR("Daniel W. S. Almeida");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.h b/drivers/media/test-drivers/vidtv/vidtv_demod.h
new file mode 100644
index 000000000000..87651b0193e6
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_demod.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * The Virtual DTV test driver serves as a reference DVB driver and helps
+ * validate the existing APIs in the media subsystem. It can also aid
+ * developers working on userspace applications.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ * Based on the example driver written by Emard <emard@softhome.net>
+ */
+
+#ifndef VIDTV_DEMOD_H
+#define VIDTV_DEMOD_H
+
+#include <linux/dvb/frontend.h>
+#include <media/dvb_frontend.h>
+
+/**
+ * struct vidtv_demod_cnr_to_qual_s - Map CNR values to a given combination of
+ * modulation and fec_inner
+ * @modulation: see enum fe_modulation
+ * @fec: see enum fe_fec_rate
+ *
+ * This struct matches values for 'good' and 'ok' CNRs given the combination
+ * of modulation and fec_inner in use. We might simulate some noise if the
+ * signal quality is not too good.
+ *
+ * The values were taken from libdvbv5.
+ */
+struct vidtv_demod_cnr_to_qual_s {
+ u32 modulation;
+ u32 fec;
+ u32 cnr_ok;
+ u32 cnr_good;
+};
+
+/**
+ * struct vidtv_demod_config - Configuration used to init the demod
+ * @drop_tslock_prob_on_low_snr: probability of losing the lock due to low snr
+ * @recover_tslock_prob_on_good_snr: probability of recovering when the signal
+ * improves
+ *
+ * The configuration used to init the demodulator module, usually filled
+ * by a bridge driver. For vidtv, this is filled by vidtv_bridge before the
+ * demodulator module is probed.
+ */
+struct vidtv_demod_config {
+ u8 drop_tslock_prob_on_low_snr;
+ u8 recover_tslock_prob_on_good_snr;
+};
+
+/**
+ * struct vidtv_demod_state - The demodulator state
+ * @frontend: The frontend structure allocated by the demod.
+ * @config: The config used to init the demod.
+ * @poll_snr: The task responsible for periodically checking the simulated
+ * signal quality, eventually dropping or reacquiring the TS lock.
+ * @status: the demod status.
+ * @cold_start: Whether the demod has not been init yet.
+ * @poll_snr_thread_running: Whether the task responsible for periodically
+ * checking the simulated signal quality is running.
+ * @poll_snr_thread_restart: Whether we should restart the poll_snr task.
+ */
+struct vidtv_demod_state {
+ struct dvb_frontend frontend;
+ struct vidtv_demod_config config;
+ enum fe_status status;
+ u16 tuner_cnr;
+};
+#endif // VIDTV_DEMOD_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_encoder.h b/drivers/media/test-drivers/vidtv/vidtv_encoder.h
new file mode 100644
index 000000000000..65d81daef4c3
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_encoder.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Vidtv serves as a reference DVB driver and helps validate the existing APIs
+ * in the media subsystem. It can also aid developers working on userspace
+ * applications.
+ *
+ * This file contains a generic encoder type that can provide data for a stream
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#ifndef VIDTV_ENCODER_H
+#define VIDTV_ENCODER_H
+
+#include <linux/types.h>
+
+enum vidtv_encoder_id {
+ /* add IDs here when implementing new encoders */
+ S302M,
+};
+
+struct vidtv_access_unit {
+ u32 num_samples;
+ u64 pts;
+ u64 dts;
+ u32 nbytes;
+ u32 offset;
+ struct vidtv_access_unit *next;
+};
+
+/* Some musical notes, used by a tone generator */
+enum musical_notes {
+ NOTE_SILENT = 0,
+
+ NOTE_C_2 = 65,
+ NOTE_CS_2 = 69,
+ NOTE_D_2 = 73,
+ NOTE_DS_2 = 78,
+ NOTE_E_2 = 82,
+ NOTE_F_2 = 87,
+ NOTE_FS_2 = 93,
+ NOTE_G_2 = 98,
+ NOTE_GS_2 = 104,
+ NOTE_A_2 = 110,
+ NOTE_AS_2 = 117,
+ NOTE_B_2 = 123,
+ NOTE_C_3 = 131,
+ NOTE_CS_3 = 139,
+ NOTE_D_3 = 147,
+ NOTE_DS_3 = 156,
+ NOTE_E_3 = 165,
+ NOTE_F_3 = 175,
+ NOTE_FS_3 = 185,
+ NOTE_G_3 = 196,
+ NOTE_GS_3 = 208,
+ NOTE_A_3 = 220,
+ NOTE_AS_3 = 233,
+ NOTE_B_3 = 247,
+ NOTE_C_4 = 262,
+ NOTE_CS_4 = 277,
+ NOTE_D_4 = 294,
+ NOTE_DS_4 = 311,
+ NOTE_E_4 = 330,
+ NOTE_F_4 = 349,
+ NOTE_FS_4 = 370,
+ NOTE_G_4 = 392,
+ NOTE_GS_4 = 415,
+ NOTE_A_4 = 440,
+ NOTE_AS_4 = 466,
+ NOTE_B_4 = 494,
+ NOTE_C_5 = 523,
+ NOTE_CS_5 = 554,
+ NOTE_D_5 = 587,
+ NOTE_DS_5 = 622,
+ NOTE_E_5 = 659,
+ NOTE_F_5 = 698,
+ NOTE_FS_5 = 740,
+ NOTE_G_5 = 784,
+ NOTE_GS_5 = 831,
+ NOTE_A_5 = 880,
+ NOTE_AS_5 = 932,
+ NOTE_B_5 = 988,
+ NOTE_C_6 = 1047,
+ NOTE_CS_6 = 1109,
+ NOTE_D_6 = 1175,
+ NOTE_DS_6 = 1245,
+ NOTE_E_6 = 1319,
+ NOTE_F_6 = 1397,
+ NOTE_FS_6 = 1480,
+ NOTE_G_6 = 1568,
+ NOTE_GS_6 = 1661,
+ NOTE_A_6 = 1760,
+ NOTE_AS_6 = 1865,
+ NOTE_B_6 = 1976,
+ NOTE_C_7 = 2093
+};
+
+/**
+ * struct vidtv_encoder - A generic encoder type.
+ * @id: So we can cast to a concrete implementation when needed.
+ * @name: Usually the same as the stream name.
+ * @encoder_buf: The encoder internal buffer for the access units.
+ * @encoder_buf_sz: The encoder buffer size, in bytes
+ * @encoder_buf_offset: Our byte position in the encoder buffer.
+ * @sample_count: How many samples we have encoded in total.
+ * @src_buf: The source of raw data to be encoded, encoder might set a
+ * default if null.
+ * @src_buf_offset: Our position in the source buffer.
+ * @is_video_encoder: Whether this a video encoder (as opposed to audio)
+ * @ctx: Encoder-specific state.
+ * @stream_id: Examples: Audio streams (0xc0-0xdf), Video streams
+ * (0xe0-0xef).
+ * @es_id: The TS PID to use for the elementary stream in this encoder.
+ * @encode: Prepare enough AUs for the given amount of time.
+ * @clear: Clear the encoder output.
+ * @sync: Attempt to synchronize with this encoder.
+ * @sampling_rate_hz: The sampling rate (or fps, if video) used.
+ * @last_sample_cb: Called when the encoder runs out of data.This is
+ * so the source can read data in a
+ * piecemeal fashion instead of having to
+ * provide it all at once.
+ * @destroy: Destroy this encoder, freeing allocated resources.
+ * @next: Next in the chain
+ */
+struct vidtv_encoder {
+ enum vidtv_encoder_id id;
+ char *name;
+
+ u8 *encoder_buf;
+ u32 encoder_buf_sz;
+ u32 encoder_buf_offset;
+
+ u64 sample_count;
+ int last_duration;
+ int note_offset;
+ enum musical_notes last_tone;
+
+ struct vidtv_access_unit *access_units;
+
+ void *src_buf;
+ u32 src_buf_sz;
+ u32 src_buf_offset;
+
+ bool is_video_encoder;
+ void *ctx;
+
+ __be16 stream_id;
+
+ __be16 es_pid;
+
+ void *(*encode)(struct vidtv_encoder *e);
+
+ u32 (*clear)(struct vidtv_encoder *e);
+
+ struct vidtv_encoder *sync;
+
+ u32 sampling_rate_hz;
+
+ void (*last_sample_cb)(u32 sample_no);
+
+ void (*destroy)(struct vidtv_encoder *e);
+
+ struct vidtv_encoder *next;
+};
+
+#endif /* VIDTV_ENCODER_H */
diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.c b/drivers/media/test-drivers/vidtv/vidtv_mux.c
new file mode 100644
index 000000000000..082740ae9d44
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_mux.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Vidtv serves as a reference DVB driver and helps validate the existing APIs
+ * in the media subsystem. It can also aid developers working on userspace
+ * applications.
+ *
+ * This file contains the multiplexer logic for TS packets from different
+ * elementary streams
+ *
+ * Loosely based on libavcodec/mpegtsenc.c
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/dev_printk.h>
+#include <linux/ratelimit.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/math64.h>
+
+#include "vidtv_mux.h"
+#include "vidtv_ts.h"
+#include "vidtv_pes.h"
+#include "vidtv_encoder.h"
+#include "vidtv_channel.h"
+#include "vidtv_common.h"
+#include "vidtv_psi.h"
+
+static struct vidtv_mux_pid_ctx
+*vidtv_mux_get_pid_ctx(struct vidtv_mux *m, u16 pid)
+{
+ struct vidtv_mux_pid_ctx *ctx;
+
+ hash_for_each_possible(m->pid_ctx, ctx, h, pid)
+ if (ctx->pid == pid)
+ return ctx;
+ return NULL;
+}
+
+static struct vidtv_mux_pid_ctx
+*vidtv_mux_create_pid_ctx_once(struct vidtv_mux *m, u16 pid)
+{
+ struct vidtv_mux_pid_ctx *ctx;
+
+ ctx = vidtv_mux_get_pid_ctx(m, pid);
+
+ if (ctx)
+ goto end;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx->pid = pid;
+ ctx->cc = 0;
+ hash_add(m->pid_ctx, &ctx->h, pid);
+
+end:
+ return ctx;
+}
+
+static void vidtv_mux_pid_ctx_init(struct vidtv_mux *m)
+{
+ struct vidtv_psi_table_pat_program *p = m->si.pat->program;
+ u16 pid;
+
+ hash_init(m->pid_ctx);
+ /* push the pcr pid ctx */
+ vidtv_mux_create_pid_ctx_once(m, m->pcr_pid);
+ /* push the null packet pid ctx */
+ vidtv_mux_create_pid_ctx_once(m, TS_NULL_PACKET_PID);
+ /* push the PAT pid ctx */
+ vidtv_mux_create_pid_ctx_once(m, VIDTV_PAT_PID);
+ /* push the SDT pid ctx */
+ vidtv_mux_create_pid_ctx_once(m, VIDTV_SDT_PID);
+
+ /* add a ctx for all PMT sections */
+ while (p) {
+ pid = vidtv_psi_get_pat_program_pid(p);
+ vidtv_mux_create_pid_ctx_once(m, pid);
+ p = p->next;
+ }
+}
+
+static void vidtv_mux_pid_ctx_destroy(struct vidtv_mux *m)
+{
+ int bkt;
+ struct vidtv_mux_pid_ctx *ctx;
+ struct hlist_node *tmp;
+
+ hash_for_each_safe(m->pid_ctx, bkt, tmp, ctx, h) {
+ hash_del(&ctx->h);
+ kfree(ctx);
+ }
+}
+
+static void vidtv_mux_update_clk(struct vidtv_mux *m)
+{
+ /* call this at every thread iteration */
+ u64 elapsed_time;
+
+ m->timing.past_jiffies = m->timing.current_jiffies;
+ m->timing.current_jiffies = get_jiffies_64();
+
+ elapsed_time = jiffies_to_usecs(m->timing.current_jiffies -
+ m->timing.past_jiffies);
+
+ /* update the 27Mhz clock proportionally to the elapsed time */
+ m->timing.clk += (CLOCK_UNIT_27MHZ / USEC_PER_SEC) * elapsed_time;
+}
+
+static u32 vidtv_mux_push_si(struct vidtv_mux *m)
+{
+ u32 initial_offset = m->mux_buf_offset;
+
+ struct vidtv_mux_pid_ctx *pat_ctx;
+ struct vidtv_mux_pid_ctx *pmt_ctx;
+ struct vidtv_mux_pid_ctx *sdt_ctx;
+
+ struct vidtv_psi_pat_write_args pat_args = {};
+ struct vidtv_psi_pmt_write_args pmt_args = {};
+ struct vidtv_psi_sdt_write_args sdt_args = {};
+
+ u32 nbytes; /* the number of bytes written by this function */
+ u16 pmt_pid;
+ u32 i;
+
+ pat_ctx = vidtv_mux_get_pid_ctx(m, VIDTV_PAT_PID);
+ sdt_ctx = vidtv_mux_get_pid_ctx(m, VIDTV_SDT_PID);
+
+ pat_args.buf = m->mux_buf;
+ pat_args.offset = m->mux_buf_offset;
+ pat_args.pat = m->si.pat;
+ pat_args.buf_sz = m->mux_buf_sz;
+ pat_args.continuity_counter = &pat_ctx->cc;
+
+ m->mux_buf_offset += vidtv_psi_pat_write_into(pat_args);
+
+ for (i = 0; i < m->si.pat->programs; ++i) {
+ pmt_pid = vidtv_psi_pmt_get_pid(m->si.pmt_secs[i],
+ m->si.pat);
+
+ if (pmt_pid > TS_LAST_VALID_PID) {
+ dev_warn_ratelimited(m->dev,
+ "PID: %d not found\n", pmt_pid);
+ continue;
+ }
+
+ pmt_ctx = vidtv_mux_get_pid_ctx(m, pmt_pid);
+
+ pmt_args.buf = m->mux_buf;
+ pmt_args.offset = m->mux_buf_offset;
+ pmt_args.pmt = m->si.pmt_secs[i];
+ pmt_args.pid = pmt_pid;
+ pmt_args.buf_sz = m->mux_buf_sz;
+ pmt_args.continuity_counter = &pmt_ctx->cc;
+ pmt_args.pcr_pid = m->pcr_pid;
+
+ /* write each section into buffer */
+ m->mux_buf_offset += vidtv_psi_pmt_write_into(pmt_args);
+ }
+
+ sdt_args.buf = m->mux_buf;
+ sdt_args.offset = m->mux_buf_offset;
+ sdt_args.sdt = m->si.sdt;
+ sdt_args.buf_sz = m->mux_buf_sz;
+ sdt_args.continuity_counter = &sdt_ctx->cc;
+
+ m->mux_buf_offset += vidtv_psi_sdt_write_into(sdt_args);
+
+ nbytes = m->mux_buf_offset - initial_offset;
+
+ m->num_streamed_si++;
+
+ return nbytes;
+}
+
+static u32 vidtv_mux_push_pcr(struct vidtv_mux *m)
+{
+ struct pcr_write_args args = {};
+ struct vidtv_mux_pid_ctx *ctx;
+ u32 nbytes = 0;
+
+ ctx = vidtv_mux_get_pid_ctx(m, m->pcr_pid);
+ args.dest_buf = m->mux_buf;
+ args.pid = m->pcr_pid;
+ args.buf_sz = m->mux_buf_sz;
+ args.continuity_counter = &ctx->cc;
+
+ /* the 27Mhz clock will feed both parts of the PCR bitfield */
+ args.pcr = m->timing.clk;
+
+ nbytes += vidtv_ts_pcr_write_into(args);
+ m->mux_buf_offset += nbytes;
+
+ m->num_streamed_pcr++;
+
+ return nbytes;
+}
+
+static bool vidtv_mux_should_push_pcr(struct vidtv_mux *m)
+{
+ u64 next_pcr_at;
+
+ if (m->num_streamed_pcr == 0)
+ return true;
+
+ next_pcr_at = m->timing.start_jiffies +
+ usecs_to_jiffies(m->num_streamed_pcr *
+ m->timing.pcr_period_usecs);
+
+ return time_after64(m->timing.current_jiffies, next_pcr_at);
+}
+
+static bool vidtv_mux_should_push_si(struct vidtv_mux *m)
+{
+ u64 next_si_at;
+
+ if (m->num_streamed_si == 0)
+ return true;
+
+ next_si_at = m->timing.start_jiffies +
+ usecs_to_jiffies(m->num_streamed_si *
+ m->timing.si_period_usecs);
+
+ return time_after64(m->timing.current_jiffies, next_si_at);
+}
+
+static u32 vidtv_mux_packetize_access_units(struct vidtv_mux *m,
+ struct vidtv_encoder *e)
+{
+ u32 nbytes = 0;
+
+ struct pes_write_args args = {};
+ u32 initial_offset = m->mux_buf_offset;
+ struct vidtv_access_unit *au = e->access_units;
+
+ u8 *buf = NULL;
+ struct vidtv_mux_pid_ctx *pid_ctx = vidtv_mux_create_pid_ctx_once(m,
+ be16_to_cpu(e->es_pid));
+
+ args.dest_buf = m->mux_buf;
+ args.dest_buf_sz = m->mux_buf_sz;
+ args.pid = be16_to_cpu(e->es_pid);
+ args.encoder_id = e->id;
+ args.continuity_counter = &pid_ctx->cc;
+ args.stream_id = be16_to_cpu(e->stream_id);
+ args.send_pts = true;
+
+ while (au) {
+ buf = e->encoder_buf + au->offset;
+ args.from = buf;
+ args.access_unit_len = au->nbytes;
+ args.dest_offset = m->mux_buf_offset;
+ args.pts = au->pts;
+ args.pcr = m->timing.clk;
+
+ m->mux_buf_offset += vidtv_pes_write_into(args);
+
+ au = au->next;
+ }
+
+ /*
+ * clear the encoder state once the ES data has been written to the mux
+ * buffer
+ */
+ e->clear(e);
+
+ nbytes = m->mux_buf_offset - initial_offset;
+ return nbytes;
+}
+
+static u32 vidtv_mux_poll_encoders(struct vidtv_mux *m)
+{
+ u32 nbytes = 0;
+ u32 au_nbytes;
+ struct vidtv_channel *cur_chnl = m->channels;
+ struct vidtv_encoder *e = NULL;
+
+ while (cur_chnl) {
+ e = cur_chnl->encoders;
+
+ while (e) {
+ e->encode(e);
+ /* get the TS packets into the mux buffer */
+ au_nbytes = vidtv_mux_packetize_access_units(m, e);
+ nbytes += au_nbytes;
+ m->mux_buf_offset += au_nbytes;
+ /* grab next encoder */
+ e = e->next;
+ }
+
+ /* grab the next channel */
+ cur_chnl = cur_chnl->next;
+ }
+
+ return nbytes;
+}
+
+static u32 vidtv_mux_pad_with_nulls(struct vidtv_mux *m, u32 npkts)
+{
+ struct null_packet_write_args args = {};
+ u32 initial_offset = m->mux_buf_offset;
+ u32 nbytes; /* the number of bytes written by this function */
+ u32 i;
+ struct vidtv_mux_pid_ctx *ctx;
+
+ ctx = vidtv_mux_get_pid_ctx(m, TS_NULL_PACKET_PID);
+
+ args.dest_buf = m->mux_buf;
+ args.buf_sz = m->mux_buf_sz;
+ args.continuity_counter = &ctx->cc;
+ args.dest_offset = m->mux_buf_offset;
+
+ for (i = 0; i < npkts; ++i) {
+ m->mux_buf_offset += vidtv_ts_null_write_into(args);
+ args.dest_offset = m->mux_buf_offset;
+ }
+
+ nbytes = m->mux_buf_offset - initial_offset;
+
+ /* sanity check */
+ if (nbytes != npkts * TS_PACKET_LEN)
+ dev_err_ratelimited(m->dev, "%d != %d\n",
+ nbytes, npkts * TS_PACKET_LEN);
+
+ return nbytes;
+}
+
+static void vidtv_mux_clear(struct vidtv_mux *m)
+{
+ /* clear the packets currently in the mux */
+ memset(m->mux_buf, 0, m->mux_buf_sz * sizeof(*m->mux_buf));
+ /* point to the beginning of the buffer again */
+ m->mux_buf_offset = 0;
+}
+
+#define ERR_RATE 10000000
+static void vidtv_mux_tick(struct work_struct *work)
+{
+ struct vidtv_mux *m = container_of(work,
+ struct vidtv_mux,
+ mpeg_thread);
+ struct dtv_frontend_properties *c = &m->fe->dtv_property_cache;
+ u32 nbytes;
+ u32 npkts;
+ u32 tot_bits = 0;
+
+ while (m->streaming) {
+ nbytes = 0;
+
+ vidtv_mux_update_clk(m);
+
+ if (vidtv_mux_should_push_pcr(m))
+ nbytes += vidtv_mux_push_pcr(m);
+
+ if (vidtv_mux_should_push_si(m))
+ nbytes += vidtv_mux_push_si(m);
+
+ nbytes += vidtv_mux_poll_encoders(m);
+ nbytes += vidtv_mux_pad_with_nulls(m, 256);
+
+ npkts = nbytes / TS_PACKET_LEN;
+
+ /* if the buffer is not aligned there is a bug somewhere */
+ if (nbytes % TS_PACKET_LEN)
+ dev_err_ratelimited(m->dev, "Misaligned buffer\n");
+
+ if (m->on_new_packets_available_cb)
+ m->on_new_packets_available_cb(m->priv,
+ m->mux_buf,
+ npkts);
+
+ vidtv_mux_clear(m);
+
+ /*
+ * Update bytes and packet counts at DVBv5 stats
+ *
+ * For now, both pre and post bit counts are identical,
+ * but post BER count can be lower than pre BER, if the error
+ * correction logic discards packages.
+ */
+ c->pre_bit_count.stat[0].uvalue = nbytes * 8;
+ c->post_bit_count.stat[0].uvalue = nbytes * 8;
+ c->block_count.stat[0].uvalue += npkts;
+
+ /*
+ * Even without any visible errors for the user, the pre-BER
+ * stats usually have an error range up to 1E-6. So,
+ * add some random error increment count to it.
+ *
+ * Please notice that this is a poor guy's implementation,
+ * as it will produce one corrected bit error every time
+ * ceil(total bytes / ERR_RATE) is incremented, without
+ * any sort of (pseudo-)randomness.
+ */
+ tot_bits += nbytes * 8;
+ if (tot_bits > ERR_RATE) {
+ c->pre_bit_error.stat[0].uvalue++;
+ tot_bits -= ERR_RATE;
+ }
+
+ usleep_range(VIDTV_SLEEP_USECS, VIDTV_MAX_SLEEP_USECS);
+ }
+}
+
+void vidtv_mux_start_thread(struct vidtv_mux *m)
+{
+ if (m->streaming) {
+ dev_warn_ratelimited(m->dev, "Already streaming. Skipping.\n");
+ return;
+ }
+
+ m->streaming = true;
+ m->timing.start_jiffies = get_jiffies_64();
+ schedule_work(&m->mpeg_thread);
+}
+
+void vidtv_mux_stop_thread(struct vidtv_mux *m)
+{
+ if (m->streaming) {
+ m->streaming = false; /* thread will quit */
+ cancel_work_sync(&m->mpeg_thread);
+ }
+}
+
+struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
+ struct device *dev,
+ struct vidtv_mux_init_args args)
+{
+ struct vidtv_mux *m = kzalloc(sizeof(*m), GFP_KERNEL);
+
+ m->dev = dev;
+ m->fe = fe;
+ m->timing.pcr_period_usecs = args.pcr_period_usecs;
+ m->timing.si_period_usecs = args.si_period_usecs;
+
+ m->mux_rate_kbytes_sec = args.mux_rate_kbytes_sec;
+
+ m->on_new_packets_available_cb = args.on_new_packets_available_cb;
+
+ m->mux_buf = vzalloc(args.mux_buf_sz);
+ m->mux_buf_sz = args.mux_buf_sz;
+
+ m->pcr_pid = args.pcr_pid;
+ m->transport_stream_id = args.transport_stream_id;
+ m->priv = args.priv;
+ m->timing.current_jiffies = get_jiffies_64();
+
+ if (args.channels)
+ m->channels = args.channels;
+ else
+ vidtv_channels_init(m);
+
+ /* will alloc data for pmt_sections after initializing pat */
+ vidtv_channel_si_init(m);
+
+ INIT_WORK(&m->mpeg_thread, vidtv_mux_tick);
+
+ vidtv_mux_pid_ctx_init(m);
+
+ return m;
+}
+
+void vidtv_mux_destroy(struct vidtv_mux *m)
+{
+ vidtv_mux_stop_thread(m);
+ vidtv_mux_pid_ctx_destroy(m);
+ vidtv_channel_si_destroy(m);
+ vidtv_channels_destroy(m);
+ vfree(m->mux_buf);
+ kfree(m);
+}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_mux.h b/drivers/media/test-drivers/vidtv/vidtv_mux.h
new file mode 100644
index 000000000000..2caa60623e97
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_mux.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Vidtv serves as a reference DVB driver and helps validate the existing APIs
+ * in the media subsystem. It can also aid developers working on userspace
+ * applications.
+ *
+ * This file contains the muxer logic for TS packets from different
+ * elementary streams.
+ *
+ * Loosely based on libavcodec/mpegtsenc.c
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#ifndef VIDTV_MUX_H
+#define VIDTV_MUX_H
+
+#include <linux/types.h>
+#include <linux/hashtable.h>
+#include <linux/workqueue.h>
+#include <media/dvb_frontend.h>
+
+#include "vidtv_psi.h"
+
+/**
+ * struct vidtv_mux_timing - Timing related information
+ *
+ * This is used to decide when PCR or PSI packets should be sent. This will also
+ * provide storage for the clock, which is used to compute the value for the PCR.
+ *
+ * @start_jiffies: The value of 'jiffies' when we started the mux thread.
+ * @current_jiffies: The value of 'jiffies' for the current iteration.
+ * @past_jiffies: The value of 'jiffies' for the past iteration.
+ * @clk: A 27Mhz clock from which we will drive the PCR. Updated proportionally
+ * on every iteration.
+ * @pcr_period_usecs: How often we should send PCR packets.
+ * @si_period_usecs: How often we should send PSI packets.
+ */
+struct vidtv_mux_timing {
+ u64 start_jiffies;
+ u64 current_jiffies;
+ u64 past_jiffies;
+
+ u64 clk;
+
+ u64 pcr_period_usecs;
+ u64 si_period_usecs;
+};
+
+/**
+ * struct vidtv_mux_si - Store the PSI context.
+ *
+ * This is used to store the PAT, PMT sections and SDT in use by the muxer.
+ *
+ * The muxer acquire these by looking into the hardcoded channels in
+ * vidtv_channel and then periodically sends the TS packets for them>
+ *
+ * @pat: The PAT in use by the muxer.
+ * @pmt_secs: The PMT sections in use by the muxer. One for each program in the PAT.
+ * @sdt: The SDT in use by the muxer.
+ */
+struct vidtv_mux_si {
+ /* the SI tables */
+ struct vidtv_psi_table_pat *pat;
+ struct vidtv_psi_table_pmt **pmt_secs; /* the PMT sections */
+ struct vidtv_psi_table_sdt *sdt;
+};
+
+/**
+ * struct vidtv_mux_pid_ctx - Store the context for a given TS PID.
+ * @pid: The TS PID.
+ * @cc: The continuity counter for this PID. It is incremented on every TS
+ * pack and it will wrap around at 0xf0. If the decoder notices a sudden jump in
+ * this counter this will trigger a discontinuity state.
+ * @h: This is embedded in a hash table, mapping pid -> vidtv_mux_pid_ctx
+ */
+struct vidtv_mux_pid_ctx {
+ u16 pid;
+ u8 cc; /* continuity counter */
+ struct hlist_node h;
+};
+
+/**
+ * struct vidtv_mux - A muxer abstraction loosely based in libavcodec/mpegtsenc.c
+ * @mux_rate_kbytes_sec: The bit rate for the TS, in kbytes.
+ * @timing: Keeps track of timing related information.
+ * @pid_ctx: A hash table to keep track of per-PID metadata.
+ * @on_new_packets_available_cb: A callback to inform of new TS packets ready.
+ * @mux_buf: A pointer to a buffer for this muxer. TS packets are stored there
+ * and then passed on to the bridge driver.
+ * @mux_buf_sz: The size for 'mux_buf'.
+ * @mux_buf_offset: The current offset into 'mux_buf'.
+ * @channels: The channels associated with this muxer.
+ * @si: Keeps track of the PSI context.
+ * @num_streamed_pcr: Number of PCR packets streamed.
+ * @num_streamed_si: The number of PSI packets streamed.
+ * @mpeg_thread: Thread responsible for the muxer loop.
+ * @streaming: whether 'mpeg_thread' is running.
+ * @pcr_pid: The TS PID used for the PSI packets. All channels will share the
+ * same PCR.
+ * @transport_stream_id: The transport stream ID
+ * @priv: Private data.
+ */
+struct vidtv_mux {
+ struct dvb_frontend *fe;
+ struct device *dev;
+
+ struct vidtv_mux_timing timing;
+
+ u32 mux_rate_kbytes_sec;
+
+ DECLARE_HASHTABLE(pid_ctx, 3);
+
+ void (*on_new_packets_available_cb)(void *priv, u8 *buf, u32 npackets);
+
+ u8 *mux_buf;
+ u32 mux_buf_sz;
+ u32 mux_buf_offset;
+
+ struct vidtv_channel *channels;
+
+ struct vidtv_mux_si si;
+ u64 num_streamed_pcr;
+ u64 num_streamed_si;
+
+ struct work_struct mpeg_thread;
+ bool streaming;
+
+ u16 pcr_pid;
+ u16 transport_stream_id;
+ void *priv;
+};
+
+/**
+ * struct vidtv_mux_init_args - Arguments used to inix the muxer.
+ * @mux_rate_kbytes_sec: The bit rate for the TS, in kbytes.
+ * @on_new_packets_available_cb: A callback to inform of new TS packets ready.
+ * @mux_buf_sz: The size for 'mux_buf'.
+ * @pcr_period_usecs: How often we should send PCR packets.
+ * @si_period_usecs: How often we should send PSI packets.
+ * @pcr_pid: The TS PID used for the PSI packets. All channels will share the
+ * same PCR.
+ * @transport_stream_id: The transport stream ID
+ * @channels: an optional list of channels to use
+ * @priv: Private data.
+ */
+struct vidtv_mux_init_args {
+ u32 mux_rate_kbytes_sec;
+ void (*on_new_packets_available_cb)(void *priv, u8 *buf, u32 npackets);
+ u32 mux_buf_sz;
+ u64 pcr_period_usecs;
+ u64 si_period_usecs;
+ u16 pcr_pid;
+ u16 transport_stream_id;
+ struct vidtv_channel *channels;
+ void *priv;
+};
+
+struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
+ struct device *dev,
+ struct vidtv_mux_init_args args);
+void vidtv_mux_destroy(struct vidtv_mux *m);
+
+void vidtv_mux_start_thread(struct vidtv_mux *m);
+void vidtv_mux_stop_thread(struct vidtv_mux *m);
+
+#endif //VIDTV_MUX_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_pes.c b/drivers/media/test-drivers/vidtv/vidtv_pes.c
new file mode 100644
index 000000000000..1c75f88070e9
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_pes.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Vidtv serves as a reference DVB driver and helps validate the existing APIs
+ * in the media subsystem. It can also aid developers working on userspace
+ * applications.
+ *
+ * This file contains the logic to translate the ES data for one access unit
+ * from an encoder into MPEG TS packets. It does so by first encapsulating it
+ * with a PES header and then splitting it into TS packets.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__
+
+#include <linux/types.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <asm/byteorder.h>
+
+#include "vidtv_pes.h"
+#include "vidtv_common.h"
+#include "vidtv_encoder.h"
+#include "vidtv_ts.h"
+
+#define PRIVATE_STREAM_1_ID 0xbd /* private_stream_1. See SMPTE 302M-2007 p.6 */
+#define PES_HEADER_MAX_STUFFING_BYTES 32
+#define PES_TS_HEADER_MAX_STUFFING_BYTES 182
+
+static u32 vidtv_pes_op_get_len(bool send_pts, bool send_dts)
+{
+ u32 len = 0;
+
+ /* the flags must always be sent */
+ len += sizeof(struct vidtv_pes_optional);
+
+ /* From all optionals, we might send these for now */
+ if (send_pts && send_dts)
+ len += sizeof(struct vidtv_pes_optional_pts_dts);
+ else if (send_pts)
+ len += sizeof(struct vidtv_pes_optional_pts);
+
+ return len;
+}
+
+#define SIZE_PCR (6 + sizeof(struct vidtv_mpeg_ts_adaption))
+
+static u32 vidtv_pes_h_get_len(bool send_pts, bool send_dts)
+{
+ u32 len = 0;
+
+ /* PES header length notwithstanding stuffing bytes */
+
+ len += sizeof(struct vidtv_mpeg_pes);
+ len += vidtv_pes_op_get_len(send_pts, send_dts);
+
+ return len;
+}
+
+static u32 vidtv_pes_write_header_stuffing(struct pes_header_write_args args)
+{
+ /*
+ * This is a fixed 8-bit value equal to '0xFF' that can be inserted
+ * by the encoder, for example to meet the requirements of the channel.
+ * It is discarded by the decoder. No more than 32 stuffing bytes shall
+ * be present in one PES packet header.
+ */
+ if (args.n_pes_h_s_bytes > PES_HEADER_MAX_STUFFING_BYTES) {
+ pr_warn_ratelimited("More than %d stuffing bytes in PES packet header\n",
+ PES_HEADER_MAX_STUFFING_BYTES);
+ args.n_pes_h_s_bytes = PES_HEADER_MAX_STUFFING_BYTES;
+ }
+
+ return vidtv_memset(args.dest_buf,
+ args.dest_offset,
+ args.dest_buf_sz,
+ TS_FILL_BYTE,
+ args.n_pes_h_s_bytes);
+}
+
+static u32 vidtv_pes_write_pts_dts(struct pes_header_write_args args)
+{
+ u32 nbytes = 0; /* the number of bytes written by this function */
+
+ struct vidtv_pes_optional_pts pts = {};
+ struct vidtv_pes_optional_pts_dts pts_dts = {};
+ void *op = NULL;
+ size_t op_sz = 0;
+ u64 mask1;
+ u64 mask2;
+ u64 mask3;
+
+ if (!args.send_pts && args.send_dts)
+ return 0;
+
+ mask1 = GENMASK_ULL(32, 30);
+ mask2 = GENMASK_ULL(29, 15);
+ mask3 = GENMASK_ULL(14, 0);
+
+ /* see ISO/IEC 13818-1 : 2000 p. 32 */
+ if (args.send_pts && args.send_dts) {
+ pts_dts.pts1 = (0x3 << 4) | ((args.pts & mask1) >> 29) | 0x1;
+ pts_dts.pts2 = cpu_to_be16(((args.pts & mask2) >> 14) | 0x1);
+ pts_dts.pts3 = cpu_to_be16(((args.pts & mask3) << 1) | 0x1);
+
+ pts_dts.dts1 = (0x1 << 4) | ((args.dts & mask1) >> 29) | 0x1;
+ pts_dts.dts2 = cpu_to_be16(((args.dts & mask2) >> 14) | 0x1);
+ pts_dts.dts3 = cpu_to_be16(((args.dts & mask3) << 1) | 0x1);
+
+ op = &pts_dts;
+ op_sz = sizeof(pts_dts);
+
+ } else if (args.send_pts) {
+ pts.pts1 = (0x1 << 5) | ((args.pts & mask1) >> 29) | 0x1;
+ pts.pts2 = cpu_to_be16(((args.pts & mask2) >> 14) | 0x1);
+ pts.pts3 = cpu_to_be16(((args.pts & mask3) << 1) | 0x1);
+
+ op = &pts;
+ op_sz = sizeof(pts);
+ }
+
+ /* copy PTS/DTS optional */
+ nbytes += vidtv_memcpy(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.dest_buf_sz,
+ op,
+ op_sz);
+
+ return nbytes;
+}
+
+static u32 vidtv_pes_write_h(struct pes_header_write_args args)
+{
+ u32 nbytes = 0; /* the number of bytes written by this function */
+
+ struct vidtv_mpeg_pes pes_header = {};
+ struct vidtv_pes_optional pes_optional = {};
+ struct pes_header_write_args pts_dts_args = args;
+ u32 stream_id = (args.encoder_id == S302M) ? PRIVATE_STREAM_1_ID : args.stream_id;
+ u16 pes_opt_bitfield = 0x01 << 15;
+
+ pes_header.bitfield = cpu_to_be32((PES_START_CODE_PREFIX << 8) | stream_id);
+
+ pes_header.length = cpu_to_be16(vidtv_pes_op_get_len(args.send_pts,
+ args.send_dts) +
+ args.access_unit_len);
+
+ if (args.send_pts && args.send_dts)
+ pes_opt_bitfield |= (0x3 << 6);
+ else if (args.send_pts)
+ pes_opt_bitfield |= (0x1 << 7);
+
+ pes_optional.bitfield = cpu_to_be16(pes_opt_bitfield);
+ pes_optional.length = vidtv_pes_op_get_len(args.send_pts, args.send_dts) +
+ args.n_pes_h_s_bytes -
+ sizeof(struct vidtv_pes_optional);
+
+ /* copy header */
+ nbytes += vidtv_memcpy(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.dest_buf_sz,
+ &pes_header,
+ sizeof(pes_header));
+
+ /* copy optional header bits */
+ nbytes += vidtv_memcpy(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.dest_buf_sz,
+ &pes_optional,
+ sizeof(pes_optional));
+
+ /* copy the timing information */
+ pts_dts_args.dest_offset = args.dest_offset + nbytes;
+ nbytes += vidtv_pes_write_pts_dts(pts_dts_args);
+
+ /* write any PES header stuffing */
+ nbytes += vidtv_pes_write_header_stuffing(args);
+
+ return nbytes;
+}
+
+static u32 vidtv_pes_write_pcr_bits(u8 *to, u32 to_offset, u64 pcr)
+{
+ /* Exact same from ffmpeg. PCR is a counter driven by a 27Mhz clock */
+ u64 div;
+ u64 rem;
+ u8 *buf = to + to_offset;
+ u64 pcr_low;
+ u64 pcr_high;
+
+ div = div64_u64_rem(pcr, 300, &rem);
+
+ pcr_low = rem; /* pcr_low = pcr % 300 */
+ pcr_high = div; /* pcr_high = pcr / 300 */
+
+ *buf++ = pcr_high >> 25;
+ *buf++ = pcr_high >> 17;
+ *buf++ = pcr_high >> 9;
+ *buf++ = pcr_high >> 1;
+ *buf++ = pcr_high << 7 | pcr_low >> 8 | 0x7e;
+ *buf++ = pcr_low;
+
+ return 6;
+}
+
+static u32 vidtv_pes_write_stuffing(struct pes_ts_header_write_args *args,
+ u32 dest_offset, bool need_pcr,
+ u64 *last_pcr)
+{
+ struct vidtv_mpeg_ts_adaption ts_adap = {};
+ int stuff_nbytes;
+ u32 nbytes = 0;
+
+ if (!args->n_stuffing_bytes)
+ return 0;
+
+ ts_adap.random_access = 1;
+
+ /* length _immediately_ following 'adaptation_field_length' */
+ if (need_pcr) {
+ ts_adap.PCR = 1;
+ ts_adap.length = SIZE_PCR;
+ } else {
+ ts_adap.length = sizeof(ts_adap);
+ }
+ stuff_nbytes = args->n_stuffing_bytes - ts_adap.length;
+
+ ts_adap.length -= sizeof(ts_adap.length);
+
+ if (unlikely(stuff_nbytes < 0))
+ stuff_nbytes = 0;
+
+ ts_adap.length += stuff_nbytes;
+
+ /* write the adap after the TS header */
+ nbytes += vidtv_memcpy(args->dest_buf,
+ dest_offset + nbytes,
+ args->dest_buf_sz,
+ &ts_adap,
+ sizeof(ts_adap));
+
+ /* write the optional PCR */
+ if (need_pcr) {
+ nbytes += vidtv_pes_write_pcr_bits(args->dest_buf,
+ dest_offset + nbytes,
+ args->pcr);
+
+ *last_pcr = args->pcr;
+ }
+
+ /* write the stuffing bytes, if are there anything left */
+ if (stuff_nbytes)
+ nbytes += vidtv_memset(args->dest_buf,
+ dest_offset + nbytes,
+ args->dest_buf_sz,
+ TS_FILL_BYTE,
+ stuff_nbytes);
+
+ /*
+ * The n_stuffing_bytes contain a pre-calculated value of
+ * the amount of data that this function would read, made from
+ * vidtv_pes_h_get_len(). If something went wrong, print a warning
+ */
+ if (nbytes != args->n_stuffing_bytes)
+ pr_warn_ratelimited("write size was %d, expected %d\n",
+ nbytes, args->n_stuffing_bytes);
+
+ return nbytes;
+}
+
+static u32 vidtv_pes_write_ts_h(struct pes_ts_header_write_args args,
+ bool need_pcr, u64 *last_pcr)
+{
+ /* number of bytes written by this function */
+ u32 nbytes = 0;
+ struct vidtv_mpeg_ts ts_header = {};
+ u16 payload_start = !args.wrote_pes_header;
+
+ ts_header.sync_byte = TS_SYNC_BYTE;
+ ts_header.bitfield = cpu_to_be16((payload_start << 14) | args.pid);
+ ts_header.scrambling = 0;
+ ts_header.adaptation_field = (args.n_stuffing_bytes) > 0;
+ ts_header.payload = (args.n_stuffing_bytes) < PES_TS_HEADER_MAX_STUFFING_BYTES;
+
+ ts_header.continuity_counter = *args.continuity_counter;
+
+ vidtv_ts_inc_cc(args.continuity_counter);
+
+ /* write the TS header */
+ nbytes += vidtv_memcpy(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.dest_buf_sz,
+ &ts_header,
+ sizeof(ts_header));
+
+ /* write stuffing, if any */
+ nbytes += vidtv_pes_write_stuffing(&args, args.dest_offset + nbytes,
+ need_pcr, last_pcr);
+
+ return nbytes;
+}
+
+u32 vidtv_pes_write_into(struct pes_write_args args)
+{
+ u32 unaligned_bytes = (args.dest_offset % TS_PACKET_LEN);
+ struct pes_ts_header_write_args ts_header_args = {};
+ struct pes_header_write_args pes_header_args = {};
+ u32 remaining_len = args.access_unit_len;
+ bool wrote_pes_header = false;
+ u64 last_pcr = args.pcr;
+ bool need_pcr = true;
+ u32 available_space;
+ u32 payload_size;
+ u32 stuff_bytes;
+ u32 nbytes = 0;
+
+ if (unaligned_bytes) {
+ pr_warn_ratelimited("buffer is misaligned, while starting PES\n");
+
+ /* forcibly align and hope for the best */
+ nbytes += vidtv_memset(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.dest_buf_sz,
+ TS_FILL_BYTE,
+ TS_PACKET_LEN - unaligned_bytes);
+ }
+
+ if (args.send_dts && !args.send_pts) {
+ pr_warn_ratelimited("forbidden value '01' for PTS_DTS flags\n");
+ args.send_pts = true;
+ args.pts = args.dts;
+ }
+
+ /* see SMPTE 302M clause 6.4 */
+ if (args.encoder_id == S302M) {
+ args.send_dts = false;
+ args.send_pts = true;
+ }
+
+ while (remaining_len) {
+ available_space = TS_PAYLOAD_LEN;
+ /*
+ * The amount of space initially available in the TS packet.
+ * if this is the beginning of the PES packet, take into account
+ * the space needed for the TS header _and_ for the PES header
+ */
+ if (!wrote_pes_header)
+ available_space -= vidtv_pes_h_get_len(args.send_pts,
+ args.send_dts);
+
+ /*
+ * if the encoder has inserted stuffing bytes in the PES
+ * header, account for them.
+ */
+ available_space -= args.n_pes_h_s_bytes;
+
+ /* Take the extra adaptation into account if need to send PCR */
+ if (need_pcr) {
+ available_space -= SIZE_PCR;
+ stuff_bytes = SIZE_PCR;
+ } else {
+ stuff_bytes = 0;
+ }
+
+ /*
+ * how much of the _actual_ payload should be written in this
+ * packet.
+ */
+ if (remaining_len >= available_space) {
+ payload_size = available_space;
+ } else {
+ /* Last frame should ensure 188-bytes PS alignment */
+ payload_size = remaining_len;
+ stuff_bytes += available_space - payload_size;
+
+ /*
+ * Ensure that the stuff bytes will be within the
+ * allowed range, decrementing the number of payload
+ * bytes to write if needed.
+ */
+ if (stuff_bytes > PES_TS_HEADER_MAX_STUFFING_BYTES) {
+ u32 tmp = stuff_bytes - PES_TS_HEADER_MAX_STUFFING_BYTES;
+
+ stuff_bytes = PES_TS_HEADER_MAX_STUFFING_BYTES;
+ payload_size -= tmp;
+ }
+ }
+
+ /* write ts header */
+ ts_header_args.dest_buf = args.dest_buf;
+ ts_header_args.dest_offset = args.dest_offset + nbytes;
+ ts_header_args.dest_buf_sz = args.dest_buf_sz;
+ ts_header_args.pid = args.pid;
+ ts_header_args.pcr = args.pcr;
+ ts_header_args.continuity_counter = args.continuity_counter;
+ ts_header_args.wrote_pes_header = wrote_pes_header;
+ ts_header_args.n_stuffing_bytes = stuff_bytes;
+
+ nbytes += vidtv_pes_write_ts_h(ts_header_args, need_pcr,
+ &last_pcr);
+
+ need_pcr = false;
+
+ if (!wrote_pes_header) {
+ /* write the PES header only once */
+ pes_header_args.dest_buf = args.dest_buf;
+
+ pes_header_args.dest_offset = args.dest_offset +
+ nbytes;
+
+ pes_header_args.dest_buf_sz = args.dest_buf_sz;
+ pes_header_args.encoder_id = args.encoder_id;
+ pes_header_args.send_pts = args.send_pts;
+ pes_header_args.pts = args.pts;
+ pes_header_args.send_dts = args.send_dts;
+ pes_header_args.dts = args.dts;
+ pes_header_args.stream_id = args.stream_id;
+ pes_header_args.n_pes_h_s_bytes = args.n_pes_h_s_bytes;
+ pes_header_args.access_unit_len = args.access_unit_len;
+
+ nbytes += vidtv_pes_write_h(pes_header_args);
+ wrote_pes_header = true;
+ }
+
+ /* write as much of the payload as we possibly can */
+ nbytes += vidtv_memcpy(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.dest_buf_sz,
+ args.from,
+ payload_size);
+
+ args.from += payload_size;
+
+ remaining_len -= payload_size;
+ }
+
+ return nbytes;
+}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_pes.h b/drivers/media/test-drivers/vidtv/vidtv_pes.h
new file mode 100644
index 000000000000..0ea9e863024d
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_pes.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Vidtv serves as a reference DVB driver and helps validate the existing APIs
+ * in the media subsystem. It can also aid developers working on userspace
+ * applications.
+ *
+ * This file contains the logic to translate the ES data for one access unit
+ * from an encoder into MPEG TS packets. It does so by first encapsulating it
+ * with a PES header and then splitting it into TS packets.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#ifndef VIDTV_PES_H
+#define VIDTV_PES_H
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+
+#include "vidtv_common.h"
+
+#define PES_MAX_LEN 65536 /* Set 'length' to 0 if greater. Only possible for video. */
+#define PES_START_CODE_PREFIX 0x001 /* 00 00 01 */
+
+/* Used when sending PTS, but not DTS */
+struct vidtv_pes_optional_pts {
+ u8 pts1;
+ __be16 pts2;
+ __be16 pts3;
+} __packed;
+
+/* Used when sending both PTS and DTS */
+struct vidtv_pes_optional_pts_dts {
+ u8 pts1;
+ __be16 pts2;
+ __be16 pts3;
+
+ u8 dts1;
+ __be16 dts2;
+ __be16 dts3;
+} __packed;
+
+/* PES optional flags */
+struct vidtv_pes_optional {
+ /*
+ * These flags show which components are actually
+ * present in the "optional fields" in the optional PES
+ * header and which are not
+ *
+ * u16 two:2; //0x2
+ * u16 PES_scrambling_control:2;
+ * u16 PES_priority:1;
+ * u16 data_alignment_indicator:1; // unused
+ * u16 copyright:1;
+ * u16 original_or_copy:1;
+ * u16 PTS_DTS:2;
+ * u16 ESCR:1;
+ * u16 ES_rate:1;
+ * u16 DSM_trick_mode:1;
+ * u16 additional_copy_info:1;
+ * u16 PES_CRC:1;
+ * u16 PES_extension:1;
+ */
+ __be16 bitfield;
+ u8 length;
+} __packed;
+
+/* The PES header */
+struct vidtv_mpeg_pes {
+ __be32 bitfield; /* packet_start_code_prefix:24, stream_id: 8 */
+ /* after this field until the end of the PES data payload */
+ __be16 length;
+ struct vidtv_pes_optional optional[];
+} __packed;
+
+/**
+ * struct pes_header_write_args - Arguments to write a PES header.
+ * @dest_buf: The buffer to write into.
+ * @dest_offset: where to start writing in the dest_buffer.
+ * @dest_buf_sz: The size of the dest_buffer
+ * @encoder_id: Encoder id (see vidtv_encoder.h)
+ * @send_pts: Should we send PTS?
+ * @pts: PTS value to send.
+ * @send_dts: Should we send DTS?
+ * @dts: DTS value to send.
+ * @stream_id: The stream id to use. Ex: Audio streams (0xc0-0xdf), Video
+ * streams (0xe0-0xef).
+ * @n_pes_h_s_bytes: Padding bytes. Might be used by an encoder if needed, gets
+ * discarded by the decoder.
+ * @access_unit_len: The size of _one_ access unit (with any headers it might need)
+ */
+struct pes_header_write_args {
+ void *dest_buf;
+ u32 dest_offset;
+ u32 dest_buf_sz;
+ u32 encoder_id;
+
+ bool send_pts;
+ u64 pts;
+
+ bool send_dts;
+ u64 dts;
+
+ u16 stream_id;
+ /* might be used by an encoder if needed, gets discarded by decoder */
+ u32 n_pes_h_s_bytes;
+ u32 access_unit_len;
+};
+
+/**
+ * struct pes_ts_header_write_args - Arguments to write a TS header.
+ * @dest_buf: The buffer to write into.
+ * @dest_offset: where to start writing in the dest_buffer.
+ * @dest_buf_sz: The size of the dest_buffer
+ * @pid: The PID to use for the TS packets.
+ * @continuity_counter: Incremented on every new TS packet.
+ * @n_pes_h_s_bytes: Padding bytes. Might be used by an encoder if needed, gets
+ * discarded by the decoder.
+ */
+struct pes_ts_header_write_args {
+ void *dest_buf;
+ u32 dest_offset;
+ u32 dest_buf_sz;
+ u16 pid;
+ u8 *continuity_counter;
+ bool wrote_pes_header;
+ u32 n_stuffing_bytes;
+ u64 pcr;
+};
+
+/**
+ * struct pes_write_args - Arguments for the packetizer.
+ * @dest_buf: The buffer to write into.
+ * @from: A pointer to the encoder buffer containing one access unit.
+ * @access_unit_len: The size of _one_ access unit (with any headers it might need)
+ * @dest_offset: where to start writing in the dest_buffer.
+ * @dest_buf_sz: The size of the dest_buffer
+ * @pid: The PID to use for the TS packets.
+ * @encoder_id: Encoder id (see vidtv_encoder.h)
+ * @continuity_counter: Incremented on every new TS packet.
+ * @stream_id: The stream id to use. Ex: Audio streams (0xc0-0xdf), Video
+ * streams (0xe0-0xef).
+ * @send_pts: Should we send PTS?
+ * @pts: PTS value to send.
+ * @send_dts: Should we send DTS?
+ * @dts: DTS value to send.
+ * @n_pes_h_s_bytes: Padding bytes. Might be used by an encoder if needed, gets
+ * discarded by the decoder.
+ */
+struct pes_write_args {
+ void *dest_buf;
+ void *from;
+ u32 access_unit_len;
+
+ u32 dest_offset;
+ u32 dest_buf_sz;
+ u16 pid;
+
+ u32 encoder_id;
+
+ u8 *continuity_counter;
+
+ u16 stream_id;
+
+ bool send_pts;
+ u64 pts;
+
+ bool send_dts;
+ u64 dts;
+
+ u32 n_pes_h_s_bytes;
+ u64 pcr;
+};
+
+/**
+ * vidtv_pes_write_into - Write a PES packet as MPEG-TS packets into a buffer.
+ * @args: The args to use when writing
+ *
+ * This function translate the ES data for one access unit
+ * from an encoder into MPEG TS packets. It does so by first encapsulating it
+ * with a PES header and then splitting it into TS packets.
+ *
+ * The data is then written into the buffer pointed to by 'args.buf'
+ *
+ * Return: The number of bytes written into the buffer. This is usually NOT
+ * equal to the size of the access unit, since we need space for PES headers, TS headers
+ * and padding bytes, if any.
+ */
+u32 vidtv_pes_write_into(struct pes_write_args args);
+
+#endif // VIDTV_PES_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
new file mode 100644
index 000000000000..82cf67dd27c0
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
@@ -0,0 +1,1322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains the logic to work with MPEG Program-Specific Information.
+ * These are defined both in ISO/IEC 13818-1 (systems) and ETSI EN 300 468.
+ * PSI is carried in the form of table structures, and although each table might
+ * technically be broken into one or more sections, we do not do this here,
+ * hence 'table' and 'section' are interchangeable for vidtv.
+ *
+ * This code currently supports three tables: PAT, PMT and SDT. These are the
+ * bare minimum to get userspace to recognize our MPEG transport stream. It can
+ * be extended to support more PSI tables in the future.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/string.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
+#include "vidtv_psi.h"
+#include "vidtv_common.h"
+#include "vidtv_ts.h"
+
+#define CRC_SIZE_IN_BYTES 4
+#define MAX_VERSION_NUM 32
+
+static const u32 CRC_LUT[256] = {
+ /* from libdvbv5 */
+ 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b,
+ 0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
+ 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7,
+ 0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
+ 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3,
+ 0x709f7b7a, 0x745e66cd, 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
+ 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef,
+ 0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
+ 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb,
+ 0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
+ 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, 0x34867077, 0x30476dc0,
+ 0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
+ 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4,
+ 0x0808d07d, 0x0cc9cdca, 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
+ 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, 0x5e9f46bf, 0x5a5e5b08,
+ 0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
+ 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc,
+ 0xb6238b25, 0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
+ 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7, 0xe4750050,
+ 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
+ 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34,
+ 0xdc3abded, 0xd8fba05a, 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
+ 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1,
+ 0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
+ 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5,
+ 0x3f9b762c, 0x3b5a6b9b, 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
+ 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9,
+ 0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
+ 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd,
+ 0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
+ 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, 0x9b3660c6, 0x9ff77d71,
+ 0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
+ 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2,
+ 0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
+ 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, 0x119b4be9, 0x155a565e,
+ 0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
+ 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a,
+ 0x2d15ebe3, 0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
+ 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1, 0xe760d676,
+ 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
+ 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662,
+ 0x933eb0bb, 0x97ffad0c, 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
+ 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
+};
+
+static inline u32 dvb_crc32(u32 crc, u8 *data, u32 len)
+{
+ /* from libdvbv5 */
+ while (len--)
+ crc = (crc << 8) ^ CRC_LUT[((crc >> 24) ^ *data++) & 0xff];
+ return crc;
+}
+
+static void vidtv_psi_update_version_num(struct vidtv_psi_table_header *h)
+{
+ h->version++;
+}
+
+static inline u16 vidtv_psi_sdt_serv_get_desc_loop_len(struct vidtv_psi_table_sdt_service *s)
+{
+ u16 mask;
+ u16 ret;
+
+ mask = GENMASK(11, 0);
+
+ ret = be16_to_cpu(s->bitfield) & mask;
+ return ret;
+}
+
+static inline u16 vidtv_psi_pmt_stream_get_desc_loop_len(struct vidtv_psi_table_pmt_stream *s)
+{
+ u16 mask;
+ u16 ret;
+
+ mask = GENMASK(9, 0);
+
+ ret = be16_to_cpu(s->bitfield2) & mask;
+ return ret;
+}
+
+static inline u16 vidtv_psi_pmt_get_desc_loop_len(struct vidtv_psi_table_pmt *p)
+{
+ u16 mask;
+ u16 ret;
+
+ mask = GENMASK(9, 0);
+
+ ret = be16_to_cpu(p->bitfield2) & mask;
+ return ret;
+}
+
+static inline u16 vidtv_psi_get_sec_len(struct vidtv_psi_table_header *h)
+{
+ u16 mask;
+ u16 ret;
+
+ mask = GENMASK(11, 0);
+
+ ret = be16_to_cpu(h->bitfield) & mask;
+ return ret;
+}
+
+inline u16 vidtv_psi_get_pat_program_pid(struct vidtv_psi_table_pat_program *p)
+{
+ u16 mask;
+ u16 ret;
+
+ mask = GENMASK(12, 0);
+
+ ret = be16_to_cpu(p->bitfield) & mask;
+ return ret;
+}
+
+inline u16 vidtv_psi_pmt_stream_get_elem_pid(struct vidtv_psi_table_pmt_stream *s)
+{
+ u16 mask;
+ u16 ret;
+
+ mask = GENMASK(12, 0);
+
+ ret = be16_to_cpu(s->bitfield) & mask;
+ return ret;
+}
+
+static inline void vidtv_psi_set_desc_loop_len(__be16 *bitfield, u16 new_len, u8 desc_len_nbits)
+{
+ u16 mask;
+ __be16 new;
+
+ mask = GENMASK(15, desc_len_nbits);
+
+ new = cpu_to_be16((be16_to_cpu(*bitfield) & mask) | new_len);
+ *bitfield = new;
+}
+
+static void vidtv_psi_set_sec_len(struct vidtv_psi_table_header *h, u16 new_len)
+{
+ u16 old_len = vidtv_psi_get_sec_len(h);
+ __be16 new;
+ u16 mask;
+
+ mask = GENMASK(15, 13);
+
+ new = cpu_to_be16((be16_to_cpu(h->bitfield) & mask) | new_len);
+
+ if (old_len > MAX_SECTION_LEN)
+ pr_warn_ratelimited("section length: %d > %d, old len was %d\n",
+ new_len,
+ MAX_SECTION_LEN,
+ old_len);
+
+ h->bitfield = new;
+}
+
+static u32 vidtv_psi_ts_psi_write_into(struct psi_write_args args)
+{
+ /*
+ * Packetize PSI sections into TS packets:
+ * push a TS header (4bytes) every 184 bytes
+ * manage the continuity_counter
+ * add stuffing (i.e. padding bytes) after the CRC
+ */
+
+ u32 nbytes_past_boundary = (args.dest_offset % TS_PACKET_LEN);
+ bool aligned = (nbytes_past_boundary == 0);
+ struct vidtv_mpeg_ts ts_header = {};
+
+ /* number of bytes written by this function */
+ u32 nbytes = 0;
+ /* how much there is left to write */
+ u32 remaining_len = args.len;
+ /* how much we can be written in this packet */
+ u32 payload_write_len = 0;
+ /* where we are in the source */
+ u32 payload_offset = 0;
+
+ const u16 PAYLOAD_START = args.new_psi_section;
+
+ if (!args.crc && !args.is_crc)
+ pr_warn_ratelimited("Missing CRC for chunk\n");
+
+ if (args.crc)
+ *args.crc = dvb_crc32(*args.crc, args.from, args.len);
+
+ if (args.new_psi_section && !aligned) {
+ pr_warn_ratelimited("Cannot write a new PSI section in a misaligned buffer\n");
+
+ /* forcibly align and hope for the best */
+ nbytes += vidtv_memset(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.dest_buf_sz,
+ TS_FILL_BYTE,
+ TS_PACKET_LEN - nbytes_past_boundary);
+ }
+
+ while (remaining_len) {
+ nbytes_past_boundary = (args.dest_offset + nbytes) % TS_PACKET_LEN;
+ aligned = (nbytes_past_boundary == 0);
+
+ if (aligned) {
+ /* if at a packet boundary, write a new TS header */
+ ts_header.sync_byte = TS_SYNC_BYTE;
+ ts_header.bitfield = cpu_to_be16((PAYLOAD_START << 14) | args.pid);
+ ts_header.scrambling = 0;
+ ts_header.continuity_counter = *args.continuity_counter;
+ ts_header.payload = 1;
+ /* no adaptation field */
+ ts_header.adaptation_field = 0;
+
+ /* copy the header */
+ nbytes += vidtv_memcpy(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.dest_buf_sz,
+ &ts_header,
+ sizeof(ts_header));
+ /*
+ * This will trigger a discontinuity if the buffer is full,
+ * effectively dropping the packet.
+ */
+ vidtv_ts_inc_cc(args.continuity_counter);
+ }
+
+ /* write the pointer_field in the first byte of the payload */
+ if (args.new_psi_section)
+ nbytes += vidtv_memset(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.dest_buf_sz,
+ 0x0,
+ 1);
+
+ /* write as much of the payload as possible */
+ nbytes_past_boundary = (args.dest_offset + nbytes) % TS_PACKET_LEN;
+ payload_write_len = min(TS_PACKET_LEN - nbytes_past_boundary, remaining_len);
+
+ nbytes += vidtv_memcpy(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.dest_buf_sz,
+ args.from + payload_offset,
+ payload_write_len);
+
+ /* 'payload_write_len' written from a total of 'len' requested*/
+ remaining_len -= payload_write_len;
+ payload_offset += payload_write_len;
+ }
+
+ /*
+ * fill the rest of the packet if there is any remaining space unused
+ */
+
+ nbytes_past_boundary = (args.dest_offset + nbytes) % TS_PACKET_LEN;
+
+ if (args.is_crc)
+ nbytes += vidtv_memset(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.dest_buf_sz,
+ TS_FILL_BYTE,
+ TS_PACKET_LEN - nbytes_past_boundary);
+
+ return nbytes;
+}
+
+static u32 table_section_crc32_write_into(struct crc32_write_args args)
+{
+ /* the CRC is the last entry in the section */
+ u32 nbytes = 0;
+ struct psi_write_args psi_args = {};
+
+ psi_args.dest_buf = args.dest_buf;
+ psi_args.from = &args.crc;
+ psi_args.len = CRC_SIZE_IN_BYTES;
+ psi_args.dest_offset = args.dest_offset;
+ psi_args.pid = args.pid;
+ psi_args.new_psi_section = false;
+ psi_args.continuity_counter = args.continuity_counter;
+ psi_args.is_crc = true;
+ psi_args.dest_buf_sz = args.dest_buf_sz;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+
+ return nbytes;
+}
+
+struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc *head,
+ enum service_type service_type,
+ char *service_name,
+ char *provider_name)
+{
+ struct vidtv_psi_desc_service *desc;
+ u32 service_name_len = service_name ? strlen(service_name) : 0;
+ u32 provider_name_len = provider_name ? strlen(provider_name) : 0;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+
+ desc->type = SERVICE_DESCRIPTOR;
+
+ desc->length = sizeof_field(struct vidtv_psi_desc_service, service_type)
+ + sizeof_field(struct vidtv_psi_desc_service, provider_name_len)
+ + provider_name_len
+ + sizeof_field(struct vidtv_psi_desc_service, service_name_len)
+ + service_name_len;
+
+ desc->service_type = service_type;
+
+ desc->service_name_len = service_name_len;
+
+ if (service_name && service_name_len)
+ desc->service_name = kstrdup(service_name, GFP_KERNEL);
+
+ desc->provider_name_len = provider_name_len;
+
+ if (provider_name && provider_name_len)
+ desc->provider_name = kstrdup(provider_name, GFP_KERNEL);
+
+ if (head) {
+ while (head->next)
+ head = head->next;
+
+ head->next = (struct vidtv_psi_desc *)desc;
+ }
+ return desc;
+}
+
+struct vidtv_psi_desc_registration
+*vidtv_psi_registration_desc_init(struct vidtv_psi_desc *head,
+ __be32 format_id,
+ u8 *additional_ident_info,
+ u32 additional_info_len)
+{
+ struct vidtv_psi_desc_registration *desc;
+
+ desc = kzalloc(sizeof(*desc) + sizeof(format_id) + additional_info_len, GFP_KERNEL);
+
+ desc->type = REGISTRATION_DESCRIPTOR;
+
+ desc->length = sizeof_field(struct vidtv_psi_desc_registration, format_id)
+ + additional_info_len;
+
+ desc->format_id = format_id;
+
+ if (additional_ident_info && additional_info_len)
+ memcpy(desc->additional_identification_info,
+ additional_ident_info,
+ additional_info_len);
+
+ if (head) {
+ while (head->next)
+ head = head->next;
+
+ head->next = (struct vidtv_psi_desc *)desc;
+ }
+
+ return desc;
+}
+
+struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
+{
+ struct vidtv_psi_desc *head = NULL;
+ struct vidtv_psi_desc *prev = NULL;
+ struct vidtv_psi_desc *curr = NULL;
+
+ struct vidtv_psi_desc_service *service;
+
+ while (desc) {
+ switch (desc->type) {
+ case SERVICE_DESCRIPTOR:
+ service = (struct vidtv_psi_desc_service *)desc;
+ curr = (struct vidtv_psi_desc *)
+ vidtv_psi_service_desc_init(head,
+ service->service_type,
+ service->service_name,
+ service->provider_name);
+ break;
+
+ case REGISTRATION_DESCRIPTOR:
+ default:
+ curr = kzalloc(sizeof(*desc) + desc->length, GFP_KERNEL);
+ memcpy(curr, desc, sizeof(*desc) + desc->length);
+ break;
+ }
+
+ if (curr)
+ curr->next = NULL;
+ if (!head)
+ head = curr;
+ if (prev)
+ prev->next = curr;
+
+ prev = curr;
+ desc = desc->next;
+ }
+
+ return head;
+}
+
+void vidtv_psi_desc_destroy(struct vidtv_psi_desc *desc)
+{
+ struct vidtv_psi_desc *curr = desc;
+ struct vidtv_psi_desc *tmp = NULL;
+
+ while (curr) {
+ tmp = curr;
+ curr = curr->next;
+
+ switch (tmp->type) {
+ case SERVICE_DESCRIPTOR:
+ kfree(((struct vidtv_psi_desc_service *)tmp)->provider_name);
+ kfree(((struct vidtv_psi_desc_service *)tmp)->service_name);
+
+ break;
+ case REGISTRATION_DESCRIPTOR:
+ /* nothing to do */
+ break;
+
+ default:
+ pr_warn_ratelimited("Possible leak: not handling descriptor type %d\n",
+ tmp->type);
+ break;
+ }
+
+ kfree(tmp);
+ }
+}
+
+static u16
+vidtv_psi_desc_comp_loop_len(struct vidtv_psi_desc *desc)
+{
+ u32 length = 0;
+
+ if (!desc)
+ return 0;
+
+ while (desc) {
+ length += sizeof_field(struct vidtv_psi_desc, type);
+ length += sizeof_field(struct vidtv_psi_desc, length);
+ length += desc->length; /* from 'length' field until the end of the descriptor */
+ desc = desc->next;
+ }
+
+ return length;
+}
+
+void vidtv_psi_desc_assign(struct vidtv_psi_desc **to,
+ struct vidtv_psi_desc *desc)
+{
+ if (desc == *to)
+ return;
+
+ if (*to)
+ vidtv_psi_desc_destroy(*to);
+
+ *to = desc;
+}
+
+void vidtv_pmt_desc_assign(struct vidtv_psi_table_pmt *pmt,
+ struct vidtv_psi_desc **to,
+ struct vidtv_psi_desc *desc)
+{
+ vidtv_psi_desc_assign(to, desc);
+ vidtv_psi_pmt_table_update_sec_len(pmt);
+
+ if (vidtv_psi_get_sec_len(&pmt->header) > MAX_SECTION_LEN)
+ vidtv_psi_desc_assign(to, NULL);
+
+ vidtv_psi_update_version_num(&pmt->header);
+}
+
+void vidtv_sdt_desc_assign(struct vidtv_psi_table_sdt *sdt,
+ struct vidtv_psi_desc **to,
+ struct vidtv_psi_desc *desc)
+{
+ vidtv_psi_desc_assign(to, desc);
+ vidtv_psi_sdt_table_update_sec_len(sdt);
+
+ if (vidtv_psi_get_sec_len(&sdt->header) > MAX_SECTION_LEN)
+ vidtv_psi_desc_assign(to, NULL);
+
+ vidtv_psi_update_version_num(&sdt->header);
+}
+
+static u32 vidtv_psi_desc_write_into(struct desc_write_args args)
+{
+ /* the number of bytes written by this function */
+ u32 nbytes = 0;
+ struct psi_write_args psi_args = {};
+
+ psi_args.dest_buf = args.dest_buf;
+ psi_args.from = &args.desc->type;
+
+ psi_args.len = sizeof_field(struct vidtv_psi_desc, type) +
+ sizeof_field(struct vidtv_psi_desc, length);
+
+ psi_args.dest_offset = args.dest_offset + nbytes;
+ psi_args.pid = args.pid;
+ psi_args.new_psi_section = false;
+ psi_args.continuity_counter = args.continuity_counter;
+ psi_args.is_crc = false;
+ psi_args.dest_buf_sz = args.dest_buf_sz;
+ psi_args.crc = args.crc;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+
+ switch (args.desc->type) {
+ case SERVICE_DESCRIPTOR:
+ psi_args.dest_offset = args.dest_offset + nbytes;
+ psi_args.len = sizeof_field(struct vidtv_psi_desc_service, service_type) +
+ sizeof_field(struct vidtv_psi_desc_service, provider_name_len);
+ psi_args.from = &((struct vidtv_psi_desc_service *)args.desc)->service_type;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+
+ psi_args.dest_offset = args.dest_offset + nbytes;
+ psi_args.len = ((struct vidtv_psi_desc_service *)args.desc)->provider_name_len;
+ psi_args.from = ((struct vidtv_psi_desc_service *)args.desc)->provider_name;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+
+ psi_args.dest_offset = args.dest_offset + nbytes;
+ psi_args.len = sizeof_field(struct vidtv_psi_desc_service, service_name_len);
+ psi_args.from = &((struct vidtv_psi_desc_service *)args.desc)->service_name_len;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+
+ psi_args.dest_offset = args.dest_offset + nbytes;
+ psi_args.len = ((struct vidtv_psi_desc_service *)args.desc)->service_name_len;
+ psi_args.from = ((struct vidtv_psi_desc_service *)args.desc)->service_name;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ break;
+
+ case REGISTRATION_DESCRIPTOR:
+ default:
+ psi_args.dest_offset = args.dest_offset + nbytes;
+ psi_args.len = args.desc->length;
+ psi_args.from = &args.desc->data;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+ break;
+ }
+
+ return nbytes;
+}
+
+static u32
+vidtv_psi_table_header_write_into(struct header_write_args args)
+{
+ /* the number of bytes written by this function */
+ u32 nbytes = 0;
+ struct psi_write_args psi_args = {};
+
+ psi_args.dest_buf = args.dest_buf;
+ psi_args.from = args.h;
+ psi_args.len = sizeof(struct vidtv_psi_table_header);
+ psi_args.dest_offset = args.dest_offset;
+ psi_args.pid = args.pid;
+ psi_args.new_psi_section = true;
+ psi_args.continuity_counter = args.continuity_counter;
+ psi_args.is_crc = false;
+ psi_args.dest_buf_sz = args.dest_buf_sz;
+ psi_args.crc = args.crc;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+
+ return nbytes;
+}
+
+void
+vidtv_psi_pat_table_update_sec_len(struct vidtv_psi_table_pat *pat)
+{
+ /* see ISO/IEC 13818-1 : 2000 p.43 */
+ u16 length = 0;
+ u32 i;
+
+ /* from immediately after 'section_length' until 'last_section_number'*/
+ length += PAT_LEN_UNTIL_LAST_SECTION_NUMBER;
+
+ /* do not count the pointer */
+ for (i = 0; i < pat->programs; ++i)
+ length += sizeof(struct vidtv_psi_table_pat_program) -
+ sizeof(struct vidtv_psi_table_pat_program *);
+
+ length += CRC_SIZE_IN_BYTES;
+
+ vidtv_psi_set_sec_len(&pat->header, length);
+}
+
+void vidtv_psi_pmt_table_update_sec_len(struct vidtv_psi_table_pmt *pmt)
+{
+ /* see ISO/IEC 13818-1 : 2000 p.46 */
+ u16 length = 0;
+ struct vidtv_psi_table_pmt_stream *s = pmt->stream;
+ u16 desc_loop_len;
+
+ /* from immediately after 'section_length' until 'program_info_length'*/
+ length += PMT_LEN_UNTIL_PROGRAM_INFO_LENGTH;
+
+ desc_loop_len = vidtv_psi_desc_comp_loop_len(pmt->descriptor);
+ vidtv_psi_set_desc_loop_len(&pmt->bitfield2, desc_loop_len, 10);
+
+ length += desc_loop_len;
+
+ while (s) {
+ /* skip both pointers at the end */
+ length += sizeof(struct vidtv_psi_table_pmt_stream) -
+ sizeof(struct vidtv_psi_desc *) -
+ sizeof(struct vidtv_psi_table_pmt_stream *);
+
+ desc_loop_len = vidtv_psi_desc_comp_loop_len(s->descriptor);
+ vidtv_psi_set_desc_loop_len(&s->bitfield2, desc_loop_len, 10);
+
+ length += desc_loop_len;
+
+ s = s->next;
+ }
+
+ length += CRC_SIZE_IN_BYTES;
+
+ vidtv_psi_set_sec_len(&pmt->header, length);
+}
+
+void vidtv_psi_sdt_table_update_sec_len(struct vidtv_psi_table_sdt *sdt)
+{
+ /* see ETSI EN 300 468 V 1.10.1 p.24 */
+ u16 length = 0;
+ struct vidtv_psi_table_sdt_service *s = sdt->service;
+ u16 desc_loop_len;
+
+ /*
+ * from immediately after 'section_length' until
+ * 'reserved_for_future_use'
+ */
+ length += SDT_LEN_UNTIL_RESERVED_FOR_FUTURE_USE;
+
+ while (s) {
+ /* skip both pointers at the end */
+ length += sizeof(struct vidtv_psi_table_sdt_service) -
+ sizeof(struct vidtv_psi_desc *) -
+ sizeof(struct vidtv_psi_table_sdt_service *);
+
+ desc_loop_len = vidtv_psi_desc_comp_loop_len(s->descriptor);
+ vidtv_psi_set_desc_loop_len(&s->bitfield, desc_loop_len, 12);
+
+ length += desc_loop_len;
+
+ s = s->next;
+ }
+
+ length += CRC_SIZE_IN_BYTES;
+
+ vidtv_psi_set_sec_len(&sdt->header, length);
+}
+
+struct vidtv_psi_table_pat_program*
+vidtv_psi_pat_program_init(struct vidtv_psi_table_pat_program *head,
+ u16 service_id,
+ u16 program_map_pid)
+{
+ struct vidtv_psi_table_pat_program *program;
+ const u16 RESERVED = 0x07;
+
+ program = kzalloc(sizeof(*program), GFP_KERNEL);
+
+ program->service_id = cpu_to_be16(service_id);
+
+ /* pid for the PMT section in the TS */
+ program->bitfield = cpu_to_be16((RESERVED << 13) | program_map_pid);
+ program->next = NULL;
+
+ if (head) {
+ while (head->next)
+ head = head->next;
+
+ head->next = program;
+ }
+
+ return program;
+}
+
+void
+vidtv_psi_pat_program_destroy(struct vidtv_psi_table_pat_program *p)
+{
+ struct vidtv_psi_table_pat_program *curr = p;
+ struct vidtv_psi_table_pat_program *tmp = NULL;
+
+ while (curr) {
+ tmp = curr;
+ curr = curr->next;
+ kfree(tmp);
+ }
+}
+
+void
+vidtv_psi_pat_program_assign(struct vidtv_psi_table_pat *pat,
+ struct vidtv_psi_table_pat_program *p)
+{
+ /* This function transfers ownership of p to the table */
+
+ u16 program_count = 0;
+ struct vidtv_psi_table_pat_program *program = p;
+
+ if (p == pat->program)
+ return;
+
+ while (program) {
+ ++program_count;
+ program = program->next;
+ }
+
+ pat->programs = program_count;
+ pat->program = p;
+
+ /* Recompute section length */
+ vidtv_psi_pat_table_update_sec_len(pat);
+
+ if (vidtv_psi_get_sec_len(&pat->header) > MAX_SECTION_LEN)
+ vidtv_psi_pat_program_assign(pat, NULL);
+
+ vidtv_psi_update_version_num(&pat->header);
+}
+
+struct vidtv_psi_table_pat *vidtv_psi_pat_table_init(u16 transport_stream_id)
+{
+ struct vidtv_psi_table_pat *pat = kzalloc(sizeof(*pat), GFP_KERNEL);
+ const u16 SYNTAX = 0x1;
+ const u16 ZERO = 0x0;
+ const u16 ONES = 0x03;
+
+ pat->header.table_id = 0x0;
+
+ pat->header.bitfield = cpu_to_be16((SYNTAX << 15) | (ZERO << 14) | (ONES << 12));
+ pat->header.id = cpu_to_be16(transport_stream_id);
+ pat->header.current_next = 0x1;
+
+ pat->header.version = 0x1f;
+
+ pat->header.one2 = 0x03;
+ pat->header.section_id = 0x0;
+ pat->header.last_section = 0x0;
+
+ pat->programs = 0;
+
+ vidtv_psi_pat_table_update_sec_len(pat);
+
+ return pat;
+}
+
+u32 vidtv_psi_pat_write_into(struct vidtv_psi_pat_write_args args)
+{
+ /* the number of bytes written by this function */
+ u32 nbytes = 0;
+ const u16 pat_pid = VIDTV_PAT_PID;
+ u32 crc = 0xffffffff;
+
+ struct vidtv_psi_table_pat_program *p = args.pat->program;
+
+ struct header_write_args h_args = {};
+ struct psi_write_args psi_args = {};
+ struct crc32_write_args c_args = {};
+
+ vidtv_psi_pat_table_update_sec_len(args.pat);
+
+ h_args.dest_buf = args.buf;
+ h_args.dest_offset = args.offset;
+ h_args.h = &args.pat->header;
+ h_args.pid = pat_pid;
+ h_args.continuity_counter = args.continuity_counter;
+ h_args.dest_buf_sz = args.buf_sz;
+ h_args.crc = &crc;
+
+ nbytes += vidtv_psi_table_header_write_into(h_args);
+
+ /* note that the field 'u16 programs' is not really part of the PAT */
+
+ psi_args.dest_buf = args.buf;
+ psi_args.pid = pat_pid;
+ psi_args.new_psi_section = false;
+ psi_args.continuity_counter = args.continuity_counter;
+ psi_args.is_crc = false;
+ psi_args.dest_buf_sz = args.buf_sz;
+ psi_args.crc = &crc;
+
+ while (p) {
+ /* copy the PAT programs */
+ psi_args.from = p;
+ /* skip the pointer */
+ psi_args.len = sizeof(*p) -
+ sizeof(struct vidtv_psi_table_pat_program *);
+ psi_args.dest_offset = args.offset + nbytes;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+
+ p = p->next;
+ }
+
+ c_args.dest_buf = args.buf;
+ c_args.dest_offset = args.offset + nbytes;
+ c_args.crc = cpu_to_be32(crc);
+ c_args.pid = pat_pid;
+ c_args.continuity_counter = args.continuity_counter;
+ c_args.dest_buf_sz = args.buf_sz;
+
+ /* Write the CRC32 at the end */
+ nbytes += table_section_crc32_write_into(c_args);
+
+ return nbytes;
+}
+
+void
+vidtv_psi_pat_table_destroy(struct vidtv_psi_table_pat *p)
+{
+ vidtv_psi_pat_program_destroy(p->program);
+ kfree(p);
+}
+
+struct vidtv_psi_table_pmt_stream*
+vidtv_psi_pmt_stream_init(struct vidtv_psi_table_pmt_stream *head,
+ enum vidtv_psi_stream_types stream_type,
+ u16 es_pid)
+{
+ struct vidtv_psi_table_pmt_stream *stream;
+ const u16 RESERVED1 = 0x07;
+ const u16 RESERVED2 = 0x0f;
+ const u16 ZERO = 0x0;
+ u16 desc_loop_len;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+
+ stream->type = stream_type;
+
+ stream->bitfield = cpu_to_be16((RESERVED1 << 13) | es_pid);
+
+ desc_loop_len = vidtv_psi_desc_comp_loop_len(stream->descriptor);
+
+ stream->bitfield2 = cpu_to_be16((RESERVED2 << 12) |
+ (ZERO << 10) |
+ desc_loop_len);
+ stream->next = NULL;
+
+ if (head) {
+ while (head->next)
+ head = head->next;
+
+ head->next = stream;
+ }
+
+ return stream;
+}
+
+void vidtv_psi_pmt_stream_destroy(struct vidtv_psi_table_pmt_stream *s)
+{
+ struct vidtv_psi_table_pmt_stream *curr_stream = s;
+ struct vidtv_psi_table_pmt_stream *tmp_stream = NULL;
+
+ while (curr_stream) {
+ tmp_stream = curr_stream;
+ curr_stream = curr_stream->next;
+ vidtv_psi_desc_destroy(tmp_stream->descriptor);
+ kfree(tmp_stream);
+ }
+}
+
+void vidtv_psi_pmt_stream_assign(struct vidtv_psi_table_pmt *pmt,
+ struct vidtv_psi_table_pmt_stream *s)
+{
+ /* This function transfers ownership of s to the table */
+ if (s == pmt->stream)
+ return;
+
+ pmt->stream = s;
+ vidtv_psi_pmt_table_update_sec_len(pmt);
+
+ if (vidtv_psi_get_sec_len(&pmt->header) > MAX_SECTION_LEN)
+ vidtv_psi_pmt_stream_assign(pmt, NULL);
+
+ vidtv_psi_update_version_num(&pmt->header);
+}
+
+u16 vidtv_psi_pmt_get_pid(struct vidtv_psi_table_pmt *section,
+ struct vidtv_psi_table_pat *pat)
+{
+ struct vidtv_psi_table_pat_program *program = pat->program;
+
+ /*
+ * service_id is the same as program_number in the
+ * corresponding program_map_section
+ * see ETSI EN 300 468 v1.15.1 p. 24
+ */
+ while (program) {
+ if (program->service_id == section->header.id)
+ return vidtv_psi_get_pat_program_pid(program);
+
+ program = program->next;
+ }
+
+ return TS_LAST_VALID_PID + 1; /* not found */
+}
+
+struct vidtv_psi_table_pmt *vidtv_psi_pmt_table_init(u16 program_number,
+ u16 pcr_pid)
+{
+ struct vidtv_psi_table_pmt *pmt = kzalloc(sizeof(*pmt), GFP_KERNEL);
+ const u16 SYNTAX = 0x1;
+ const u16 ZERO = 0x0;
+ const u16 ONES = 0x03;
+ const u16 RESERVED1 = 0x07;
+ const u16 RESERVED2 = 0x0f;
+ u16 desc_loop_len;
+
+ if (!pcr_pid)
+ pcr_pid = 0x1fff;
+
+ pmt->header.table_id = 0x2;
+
+ pmt->header.bitfield = cpu_to_be16((SYNTAX << 15) | (ZERO << 14) | (ONES << 12));
+
+ pmt->header.id = cpu_to_be16(program_number);
+ pmt->header.current_next = 0x1;
+
+ pmt->header.version = 0x1f;
+
+ pmt->header.one2 = ONES;
+ pmt->header.section_id = 0;
+ pmt->header.last_section = 0;
+
+ pmt->bitfield = cpu_to_be16((RESERVED1 << 13) | pcr_pid);
+
+ desc_loop_len = vidtv_psi_desc_comp_loop_len(pmt->descriptor);
+
+ pmt->bitfield2 = cpu_to_be16((RESERVED2 << 12) |
+ (ZERO << 10) |
+ desc_loop_len);
+
+ vidtv_psi_pmt_table_update_sec_len(pmt);
+
+ return pmt;
+}
+
+u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args args)
+{
+ /* the number of bytes written by this function */
+ u32 nbytes = 0;
+ u32 crc = 0xffffffff;
+
+ struct vidtv_psi_desc *table_descriptor = args.pmt->descriptor;
+ struct vidtv_psi_table_pmt_stream *stream = args.pmt->stream;
+ struct vidtv_psi_desc *stream_descriptor = (stream) ?
+ args.pmt->stream->descriptor :
+ NULL;
+
+ struct header_write_args h_args = {};
+ struct psi_write_args psi_args = {};
+ struct desc_write_args d_args = {};
+ struct crc32_write_args c_args = {};
+
+ vidtv_psi_pmt_table_update_sec_len(args.pmt);
+
+ h_args.dest_buf = args.buf;
+ h_args.dest_offset = args.offset;
+ h_args.h = &args.pmt->header;
+ h_args.pid = args.pid;
+ h_args.continuity_counter = args.continuity_counter;
+ h_args.dest_buf_sz = args.buf_sz;
+ h_args.crc = &crc;
+
+ nbytes += vidtv_psi_table_header_write_into(h_args);
+
+ /* write the two bitfields */
+ psi_args.dest_buf = args.buf;
+ psi_args.from = &args.pmt->bitfield;
+ psi_args.len = sizeof_field(struct vidtv_psi_table_pmt, bitfield) +
+ sizeof_field(struct vidtv_psi_table_pmt, bitfield2);
+
+ psi_args.dest_offset = args.offset + nbytes;
+ psi_args.pid = args.pid;
+ psi_args.new_psi_section = false;
+ psi_args.continuity_counter = args.continuity_counter;
+ psi_args.is_crc = false;
+ psi_args.dest_buf_sz = args.buf_sz;
+ psi_args.crc = &crc;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+
+ while (table_descriptor) {
+ /* write the descriptors, if any */
+ d_args.dest_buf = args.buf;
+ d_args.dest_offset = args.offset + nbytes;
+ d_args.desc = table_descriptor;
+ d_args.pid = args.pid;
+ d_args.continuity_counter = args.continuity_counter;
+ d_args.dest_buf_sz = args.buf_sz;
+ d_args.crc = &crc;
+
+ nbytes += vidtv_psi_desc_write_into(d_args);
+
+ table_descriptor = table_descriptor->next;
+ }
+
+ while (stream) {
+ /* write the streams, if any */
+ psi_args.from = stream;
+ psi_args.len = sizeof_field(struct vidtv_psi_table_pmt_stream, type) +
+ sizeof_field(struct vidtv_psi_table_pmt_stream, bitfield) +
+ sizeof_field(struct vidtv_psi_table_pmt_stream, bitfield2);
+ psi_args.dest_offset = args.offset + nbytes;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+
+ while (stream_descriptor) {
+ /* write the stream descriptors, if any */
+ d_args.dest_buf = args.buf;
+ d_args.dest_offset = args.offset + nbytes;
+ d_args.desc = stream_descriptor;
+ d_args.pid = args.pid;
+ d_args.continuity_counter = args.continuity_counter;
+ d_args.dest_buf_sz = args.buf_sz;
+ d_args.crc = &crc;
+
+ nbytes += vidtv_psi_desc_write_into(d_args);
+
+ stream_descriptor = stream_descriptor->next;
+ }
+
+ stream = stream->next;
+ }
+
+ c_args.dest_buf = args.buf;
+ c_args.dest_offset = args.offset + nbytes;
+ c_args.crc = cpu_to_be32(crc);
+ c_args.pid = args.pid;
+ c_args.continuity_counter = args.continuity_counter;
+ c_args.dest_buf_sz = args.buf_sz;
+
+ /* Write the CRC32 at the end */
+ nbytes += table_section_crc32_write_into(c_args);
+
+ return nbytes;
+}
+
+void vidtv_psi_pmt_table_destroy(struct vidtv_psi_table_pmt *pmt)
+{
+ vidtv_psi_desc_destroy(pmt->descriptor);
+ vidtv_psi_pmt_stream_destroy(pmt->stream);
+ kfree(pmt);
+}
+
+struct vidtv_psi_table_sdt *vidtv_psi_sdt_table_init(u16 transport_stream_id)
+{
+ struct vidtv_psi_table_sdt *sdt = kzalloc(sizeof(*sdt), GFP_KERNEL);
+ const u16 SYNTAX = 0x1;
+ const u16 ONE = 0x1;
+ const u16 ONES = 0x03;
+ const u16 RESERVED = 0xff;
+
+ sdt->header.table_id = 0x42;
+
+ sdt->header.bitfield = cpu_to_be16((SYNTAX << 15) | (ONE << 14) | (ONES << 12));
+
+ /*
+ * This is a 16-bit field which serves as a label for identification
+ * of the TS, about which the SDT informs, from any other multiplex
+ * within the delivery system.
+ */
+ sdt->header.id = cpu_to_be16(transport_stream_id);
+ sdt->header.current_next = ONE;
+
+ sdt->header.version = 0x1f;
+
+ sdt->header.one2 = ONES;
+ sdt->header.section_id = 0;
+ sdt->header.last_section = 0;
+
+ /*
+ * FIXME: The network_id range from 0xFF01 to 0xFFFF is used to
+ * indicate temporary private use. For now, let's use the first
+ * value.
+ * This can be changed to something more useful, when support for
+ * NIT gets added
+ */
+ sdt->network_id = cpu_to_be16(0xff01);
+ sdt->reserved = RESERVED;
+
+ vidtv_psi_sdt_table_update_sec_len(sdt);
+
+ return sdt;
+}
+
+u32 vidtv_psi_sdt_write_into(struct vidtv_psi_sdt_write_args args)
+{
+ u32 nbytes = 0;
+ u16 sdt_pid = VIDTV_SDT_PID; /* see ETSI EN 300 468 v1.15.1 p. 11 */
+
+ u32 crc = 0xffffffff;
+
+ struct vidtv_psi_table_sdt_service *service = args.sdt->service;
+ struct vidtv_psi_desc *service_desc = (args.sdt->service) ?
+ args.sdt->service->descriptor :
+ NULL;
+
+ struct header_write_args h_args = {};
+ struct psi_write_args psi_args = {};
+ struct desc_write_args d_args = {};
+ struct crc32_write_args c_args = {};
+
+ vidtv_psi_sdt_table_update_sec_len(args.sdt);
+
+ h_args.dest_buf = args.buf;
+ h_args.dest_offset = args.offset;
+ h_args.h = &args.sdt->header;
+ h_args.pid = sdt_pid;
+ h_args.continuity_counter = args.continuity_counter;
+ h_args.dest_buf_sz = args.buf_sz;
+ h_args.crc = &crc;
+
+ nbytes += vidtv_psi_table_header_write_into(h_args);
+
+ psi_args.dest_buf = args.buf;
+ psi_args.from = &args.sdt->network_id;
+
+ psi_args.len = sizeof_field(struct vidtv_psi_table_sdt, network_id) +
+ sizeof_field(struct vidtv_psi_table_sdt, reserved);
+
+ psi_args.dest_offset = args.offset + nbytes;
+ psi_args.pid = sdt_pid;
+ psi_args.new_psi_section = false;
+ psi_args.continuity_counter = args.continuity_counter;
+ psi_args.is_crc = false;
+ psi_args.dest_buf_sz = args.buf_sz;
+ psi_args.crc = &crc;
+
+ /* copy u16 network_id + u8 reserved)*/
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+
+ while (service) {
+ /* copy the services, if any */
+ psi_args.from = service;
+ /* skip both pointers at the end */
+ psi_args.len = sizeof(struct vidtv_psi_table_sdt_service) -
+ sizeof(struct vidtv_psi_desc *) -
+ sizeof(struct vidtv_psi_table_sdt_service *);
+ psi_args.dest_offset = args.offset + nbytes;
+
+ nbytes += vidtv_psi_ts_psi_write_into(psi_args);
+
+ while (service_desc) {
+ /* copy the service descriptors, if any */
+ d_args.dest_buf = args.buf;
+ d_args.dest_offset = args.offset + nbytes;
+ d_args.desc = service_desc;
+ d_args.pid = sdt_pid;
+ d_args.continuity_counter = args.continuity_counter;
+ d_args.dest_buf_sz = args.buf_sz;
+ d_args.crc = &crc;
+
+ nbytes += vidtv_psi_desc_write_into(d_args);
+
+ service_desc = service_desc->next;
+ }
+
+ service = service->next;
+ }
+
+ c_args.dest_buf = args.buf;
+ c_args.dest_offset = args.offset + nbytes;
+ c_args.crc = cpu_to_be32(crc);
+ c_args.pid = sdt_pid;
+ c_args.continuity_counter = args.continuity_counter;
+ c_args.dest_buf_sz = args.buf_sz;
+
+ /* Write the CRC at the end */
+ nbytes += table_section_crc32_write_into(c_args);
+
+ return nbytes;
+}
+
+void vidtv_psi_sdt_table_destroy(struct vidtv_psi_table_sdt *sdt)
+{
+ vidtv_psi_sdt_service_destroy(sdt->service);
+ kfree(sdt);
+}
+
+struct vidtv_psi_table_sdt_service
+*vidtv_psi_sdt_service_init(struct vidtv_psi_table_sdt_service *head,
+ u16 service_id)
+{
+ struct vidtv_psi_table_sdt_service *service;
+
+ service = kzalloc(sizeof(*service), GFP_KERNEL);
+
+ /*
+ * ETSI 300 468: this is a 16bit field which serves as a label to
+ * identify this service from any other service within the TS.
+ * The service id is the same as the program number in the
+ * corresponding program_map_section
+ */
+ service->service_id = cpu_to_be16(service_id);
+ service->EIT_schedule = 0x0;
+ service->EIT_present_following = 0x0;
+ service->reserved = 0x3f;
+
+ service->bitfield = cpu_to_be16(RUNNING << 13);
+
+ if (head) {
+ while (head->next)
+ head = head->next;
+
+ head->next = service;
+ }
+
+ return service;
+}
+
+void
+vidtv_psi_sdt_service_destroy(struct vidtv_psi_table_sdt_service *service)
+{
+ struct vidtv_psi_table_sdt_service *curr = service;
+ struct vidtv_psi_table_sdt_service *tmp = NULL;
+
+ while (curr) {
+ tmp = curr;
+ curr = curr->next;
+ vidtv_psi_desc_destroy(tmp->descriptor);
+ kfree(tmp);
+ }
+}
+
+void
+vidtv_psi_sdt_service_assign(struct vidtv_psi_table_sdt *sdt,
+ struct vidtv_psi_table_sdt_service *service)
+{
+ if (service == sdt->service)
+ return;
+
+ sdt->service = service;
+
+ /* recompute section length */
+ vidtv_psi_sdt_table_update_sec_len(sdt);
+
+ if (vidtv_psi_get_sec_len(&sdt->header) > MAX_SECTION_LEN)
+ vidtv_psi_sdt_service_assign(sdt, NULL);
+
+ vidtv_psi_update_version_num(&sdt->header);
+}
+
+struct vidtv_psi_table_pmt**
+vidtv_psi_pmt_create_sec_for_each_pat_entry(struct vidtv_psi_table_pat *pat, u16 pcr_pid)
+
+{
+ /*
+ * PMTs contain information about programs. For each program,
+ * there is one PMT section. This function will create a section
+ * for each program found in the PAT
+ */
+ struct vidtv_psi_table_pat_program *program = pat->program;
+ struct vidtv_psi_table_pmt **pmt_secs;
+ u32 i = 0;
+
+ /* a section for each program_id */
+ pmt_secs = kcalloc(pat->programs,
+ sizeof(struct vidtv_psi_table_pmt *),
+ GFP_KERNEL);
+
+ while (program) {
+ pmt_secs[i] = vidtv_psi_pmt_table_init(be16_to_cpu(program->service_id), pcr_pid);
+ ++i;
+ program = program->next;
+ }
+
+ return pmt_secs;
+}
+
+struct vidtv_psi_table_pmt
+*vidtv_psi_find_pmt_sec(struct vidtv_psi_table_pmt **pmt_sections,
+ u16 nsections,
+ u16 program_num)
+{
+ /* find the PMT section associated with 'program_num' */
+ struct vidtv_psi_table_pmt *sec = NULL;
+ u32 i;
+
+ for (i = 0; i < nsections; ++i) {
+ sec = pmt_sections[i];
+ if (be16_to_cpu(sec->header.id) == program_num)
+ return sec;
+ }
+
+ return NULL; /* not found */
+}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.h b/drivers/media/test-drivers/vidtv/vidtv_psi.h
new file mode 100644
index 000000000000..3f962cc78278
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_psi.h
@@ -0,0 +1,577 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file contains the logic to work with MPEG Program-Specific Information.
+ * These are defined both in ISO/IEC 13818-1 (systems) and ETSI EN 300 468.
+ * PSI is carried in the form of table structures, and although each table might
+ * technically be broken into one or more sections, we do not do this here,
+ * hence 'table' and 'section' are interchangeable for vidtv.
+ *
+ * This code currently supports three tables: PAT, PMT and SDT. These are the
+ * bare minimum to get userspace to recognize our MPEG transport stream. It can
+ * be extended to support more PSI tables in the future.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#ifndef VIDTV_PSI_H
+#define VIDTV_PSI_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/*
+ * all section lengths start immediately after the 'section_length' field
+ * see ISO/IEC 13818-1 : 2000 and ETSI EN 300 468 V 1.10.1 for
+ * reference
+ */
+#define PAT_LEN_UNTIL_LAST_SECTION_NUMBER 5
+#define PMT_LEN_UNTIL_PROGRAM_INFO_LENGTH 9
+#define SDT_LEN_UNTIL_RESERVED_FOR_FUTURE_USE 8
+#define MAX_SECTION_LEN 1021
+#define VIDTV_PAT_PID 0 /* mandated by the specs */
+#define VIDTV_SDT_PID 0x0011 /* mandated by the specs */
+
+enum vidtv_psi_descriptors {
+ REGISTRATION_DESCRIPTOR = 0x05, /* See ISO/IEC 13818-1 section 2.6.8 */
+ SERVICE_DESCRIPTOR = 0x48, /* See ETSI EN 300 468 section 6.2.33 */
+};
+
+enum vidtv_psi_stream_types {
+ STREAM_PRIVATE_DATA = 0x06, /* see ISO/IEC 13818-1 2000 p. 48 */
+};
+
+/**
+ * struct vidtv_psi_desc - A generic PSI descriptor type.
+ * The descriptor length is an 8-bit field specifying the total number of bytes of the data portion
+ * of the descriptor following the byte defining the value of this field.
+ */
+struct vidtv_psi_desc {
+ struct vidtv_psi_desc *next;
+ u8 type;
+ u8 length;
+ u8 data[];
+} __packed;
+
+/**
+ * struct vidtv_psi_desc_service - Service descriptor.
+ * See ETSI EN 300 468 section 6.2.33.
+ */
+struct vidtv_psi_desc_service {
+ struct vidtv_psi_desc *next;
+ u8 type;
+ u8 length;
+
+ u8 service_type;
+ u8 provider_name_len;
+ char *provider_name;
+ u8 service_name_len;
+ char *service_name;
+} __packed;
+
+/**
+ * struct vidtv_psi_desc_registration - A registration descriptor.
+ * See ISO/IEC 13818-1 section 2.6.8
+ */
+struct vidtv_psi_desc_registration {
+ struct vidtv_psi_desc *next;
+ u8 type;
+ u8 length;
+
+ /*
+ * The format_identifier is a 32-bit value obtained from a Registration
+ * Authority as designated by ISO/IEC JTC 1/SC 29.
+ */
+ __be32 format_id;
+ /*
+ * The meaning of additional_identification_info bytes, if any, are
+ * defined by the assignee of that format_identifier, and once defined
+ * they shall not change.
+ */
+ u8 additional_identification_info[];
+} __packed;
+
+/**
+ * struct vidtv_psi_table_header - A header that is present for all PSI tables.
+ */
+struct vidtv_psi_table_header {
+ u8 table_id;
+
+ __be16 bitfield; /* syntax: 1, zero: 1, one: 2, section_length: 13 */
+
+ __be16 id; /* TS ID */
+ u8 current_next:1;
+ u8 version:5;
+ u8 one2:2;
+ u8 section_id; /* section_number */
+ u8 last_section; /* last_section_number */
+} __packed;
+
+/**
+ * struct vidtv_psi_table_pat_program - A single program in the PAT
+ * See ISO/IEC 13818-1 : 2000 p.43
+ */
+struct vidtv_psi_table_pat_program {
+ __be16 service_id;
+ __be16 bitfield; /* reserved: 3, program_map_pid/network_pid: 13 */
+ struct vidtv_psi_table_pat_program *next;
+} __packed;
+
+/**
+ * struct vidtv_psi_table_pat - The Program Allocation Table (PAT)
+ * See ISO/IEC 13818-1 : 2000 p.43
+ */
+struct vidtv_psi_table_pat {
+ struct vidtv_psi_table_header header;
+ u16 programs; /* Included by libdvbv5, not part of the table and not actually serialized */
+ struct vidtv_psi_table_pat_program *program;
+} __packed;
+
+/**
+ * struct vidtv_psi_table_sdt_service - Represents a service in the SDT.
+ * see ETSI EN 300 468 v1.15.1 section 5.2.3.
+ */
+struct vidtv_psi_table_sdt_service {
+ __be16 service_id;
+ u8 EIT_present_following:1;
+ u8 EIT_schedule:1;
+ u8 reserved:6;
+ __be16 bitfield; /* running_status: 3, free_ca:1, desc_loop_len:12 */
+ struct vidtv_psi_desc *descriptor;
+ struct vidtv_psi_table_sdt_service *next;
+} __packed;
+
+/**
+ * struct vidtv_psi_table_sdt - Represents the Service Description Table
+ * see ETSI EN 300 468 v1.15.1 section 5.2.3.
+ */
+
+struct vidtv_psi_table_sdt {
+ struct vidtv_psi_table_header header;
+ __be16 network_id; /* original_network_id */
+ u8 reserved;
+ struct vidtv_psi_table_sdt_service *service;
+} __packed;
+
+/**
+ * enum service_running_status - Status of a SDT service.
+ * see ETSI EN 300 468 v1.15.1 section 5.2.3 table 6.
+ */
+enum service_running_status {
+ RUNNING = 0x4,
+};
+
+/**
+ * enum service_type - The type of a SDT service.
+ * see ETSI EN 300 468 v1.15.1 section 6.2.33, table 81.
+ */
+enum service_type {
+ /* see ETSI EN 300 468 v1.15.1 p. 77 */
+ DIGITAL_TELEVISION_SERVICE = 0x1,
+};
+
+/**
+ * struct vidtv_psi_table_pmt_stream - A single stream in the PMT.
+ * See ISO/IEC 13818-1 : 2000 p.46.
+ */
+struct vidtv_psi_table_pmt_stream {
+ u8 type;
+ __be16 bitfield; /* reserved: 3, elementary_pid: 13 */
+ __be16 bitfield2; /*reserved: 4, zero: 2, desc_length: 10 */
+ struct vidtv_psi_desc *descriptor;
+ struct vidtv_psi_table_pmt_stream *next;
+} __packed;
+
+/**
+ * struct vidtv_psi_table_pmt - The Program Map Table (PMT).
+ * See ISO/IEC 13818-1 : 2000 p.46.
+ */
+struct vidtv_psi_table_pmt {
+ struct vidtv_psi_table_header header;
+ __be16 bitfield; /* reserved:3, pcr_pid: 13 */
+ __be16 bitfield2; /* reserved: 4, zero: 2, desc_len: 10 */
+ struct vidtv_psi_desc *descriptor;
+ struct vidtv_psi_table_pmt_stream *stream;
+} __packed;
+
+/**
+ * struct psi_write_args - Arguments for the PSI packetizer.
+ * @dest_buf: The buffer to write into.
+ * @from: PSI data to be copied.
+ * @len: How much to write.
+ * @dest_offset: where to start writing in the dest_buffer.
+ * @pid: TS packet ID.
+ * @new_psi_section: Set when starting a table section.
+ * @continuity_counter: Incremented on every new packet.
+ * @is_crc: Set when writing the CRC at the end.
+ * @dest_buf_sz: The size of the dest_buffer
+ * @crc: a pointer to store the crc for this chunk
+ */
+struct psi_write_args {
+ void *dest_buf;
+ void *from;
+ size_t len;
+ u32 dest_offset;
+ u16 pid;
+ bool new_psi_section;
+ u8 *continuity_counter;
+ bool is_crc;
+ u32 dest_buf_sz;
+ u32 *crc;
+};
+
+/**
+ * struct desc_write_args - Arguments in order to write a descriptor.
+ * @dest_buf: The buffer to write into.
+ * @dest_offset: where to start writing in the dest_buffer.
+ * @desc: A pointer to the descriptor
+ * @pid: TS packet ID.
+ * @continuity_counter: Incremented on every new packet.
+ * @dest_buf_sz: The size of the dest_buffer
+ * @crc: a pointer to store the crc for this chunk
+ */
+struct desc_write_args {
+ void *dest_buf;
+ u32 dest_offset;
+ struct vidtv_psi_desc *desc;
+ u16 pid;
+ u8 *continuity_counter;
+ u32 dest_buf_sz;
+ u32 *crc;
+};
+
+/**
+ * struct crc32_write_args - Arguments in order to write the CRC at the end of
+ * the PSI tables.
+ * @dest_buf: The buffer to write into.
+ * @dest_offset: where to start writing in the dest_buffer.
+ * @crc: the CRC value to write
+ * @pid: TS packet ID.
+ * @continuity_counter: Incremented on every new packet.
+ * @dest_buf_sz: The size of the dest_buffer
+ */
+struct crc32_write_args {
+ void *dest_buf;
+ u32 dest_offset;
+ __be32 crc;
+ u16 pid;
+ u8 *continuity_counter;
+ u32 dest_buf_sz;
+};
+
+/**
+ * struct header_write_args - Arguments in order to write the common table
+ * header
+ * @dest_buf: The buffer to write into.
+ * @dest_offset: where to start writing in the dest_buffer.
+ * @h: a pointer to the header.
+ * @pid: TS packet ID.
+ * @continuity_counter: Incremented on every new packet.
+ * @dest_buf_sz: The size of the dest_buffer
+ * @crc: a pointer to store the crc for this chunk
+ */
+struct header_write_args {
+ void *dest_buf;
+ u32 dest_offset;
+ struct vidtv_psi_table_header *h;
+ u16 pid;
+ u8 *continuity_counter;
+ u32 dest_buf_sz;
+ u32 *crc;
+};
+
+struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc *head,
+ enum service_type service_type,
+ char *service_name,
+ char *provider_name);
+
+struct vidtv_psi_desc_registration
+*vidtv_psi_registration_desc_init(struct vidtv_psi_desc *head,
+ __be32 format_id,
+ u8 *additional_ident_info,
+ u32 additional_info_len);
+
+struct vidtv_psi_table_pat_program
+*vidtv_psi_pat_program_init(struct vidtv_psi_table_pat_program *head,
+ u16 service_id,
+ u16 program_map_pid);
+
+struct vidtv_psi_table_pmt_stream*
+vidtv_psi_pmt_stream_init(struct vidtv_psi_table_pmt_stream *head,
+ enum vidtv_psi_stream_types stream_type,
+ u16 es_pid);
+
+struct vidtv_psi_table_pat *vidtv_psi_pat_table_init(u16 transport_stream_id);
+
+struct vidtv_psi_table_pmt *vidtv_psi_pmt_table_init(u16 program_number,
+ u16 pcr_pid);
+
+struct vidtv_psi_table_sdt *vidtv_psi_sdt_table_init(u16 transport_stream_id);
+
+struct vidtv_psi_table_sdt_service*
+vidtv_psi_sdt_service_init(struct vidtv_psi_table_sdt_service *head,
+ u16 service_id);
+
+void
+vidtv_psi_desc_destroy(struct vidtv_psi_desc *desc);
+
+void
+vidtv_psi_pat_program_destroy(struct vidtv_psi_table_pat_program *p);
+
+void
+vidtv_psi_pat_table_destroy(struct vidtv_psi_table_pat *p);
+
+void
+vidtv_psi_pmt_stream_destroy(struct vidtv_psi_table_pmt_stream *s);
+
+void
+vidtv_psi_pmt_table_destroy(struct vidtv_psi_table_pmt *pmt);
+
+void
+vidtv_psi_sdt_table_destroy(struct vidtv_psi_table_sdt *sdt);
+
+void
+vidtv_psi_sdt_service_destroy(struct vidtv_psi_table_sdt_service *service);
+
+/**
+ * vidtv_psi_sdt_service_assign - Assigns the service loop to the SDT.
+ * @sdt: The SDT to assign to.
+ * @service: The service loop (one or more services)
+ *
+ * This will free the previous service loop in the table.
+ * This will assign ownership of the service loop to the table, i.e. the table
+ * will free this service loop when a call to its destroy function is made.
+ */
+void
+vidtv_psi_sdt_service_assign(struct vidtv_psi_table_sdt *sdt,
+ struct vidtv_psi_table_sdt_service *service);
+
+/**
+ * vidtv_psi_desc_assign - Assigns a descriptor loop at some point
+ * @to: Where to assign this descriptor loop to
+ * @desc: The descriptor loop that will be assigned.
+ *
+ * This will free the loop in 'to', if any.
+ */
+void vidtv_psi_desc_assign(struct vidtv_psi_desc **to,
+ struct vidtv_psi_desc *desc);
+
+/**
+ * vidtv_psi_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section.
+ * @pmt: The PMT section that will contain the descriptor loop
+ * @to: Where in the PMT to assign this descriptor loop to
+ * @desc: The descriptor loop that will be assigned.
+ *
+ * This will free the loop in 'to', if any.
+ * This will assign ownership of the loop to the table, i.e. the table
+ * will free this loop when a call to its destroy function is made.
+ */
+void vidtv_pmt_desc_assign(struct vidtv_psi_table_pmt *pmt,
+ struct vidtv_psi_desc **to,
+ struct vidtv_psi_desc *desc);
+
+/**
+ * vidtv_psi_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT.
+ * @sdt: The SDT that will contain the descriptor loop
+ * @to: Where in the PMT to assign this descriptor loop to
+ * @desc: The descriptor loop that will be assigned.
+ *
+ * This will free the loop in 'to', if any.
+ * This will assign ownership of the loop to the table, i.e. the table
+ * will free this loop when a call to its destroy function is made.
+ */
+void vidtv_sdt_desc_assign(struct vidtv_psi_table_sdt *sdt,
+ struct vidtv_psi_desc **to,
+ struct vidtv_psi_desc *desc);
+
+/**
+ * vidtv_psi_pat_program_assign - Assigns the program loop to the PAT.
+ * @pat: The PAT to assign to.
+ * @p: The program loop (one or more programs)
+ *
+ * This will free the previous program loop in the table.
+ * This will assign ownership of the program loop to the table, i.e. the table
+ * will free this program loop when a call to its destroy function is made.
+ */
+void vidtv_psi_pat_program_assign(struct vidtv_psi_table_pat *pat,
+ struct vidtv_psi_table_pat_program *p);
+
+/**
+ * vidtv_psi_pmt_stream_assign - Assigns the stream loop to the PAT.
+ * @pmt: The PMT to assign to.
+ * @s: The stream loop (one or more streams)
+ *
+ * This will free the previous stream loop in the table.
+ * This will assign ownership of the stream loop to the table, i.e. the table
+ * will free this stream loop when a call to its destroy function is made.
+ */
+void vidtv_psi_pmt_stream_assign(struct vidtv_psi_table_pmt *pmt,
+ struct vidtv_psi_table_pmt_stream *s);
+
+struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc);
+
+/**
+ * vidtv_psi_create_sec_for_each_pat_entry - Create a PMT section for each
+ * program found in the PAT
+ * @pat: The PAT to look for programs.
+ * @s: The stream loop (one or more streams)
+ * @pcr_pid: packet ID for the PCR to be used for the program described in this
+ * PMT section
+ */
+struct vidtv_psi_table_pmt**
+vidtv_psi_pmt_create_sec_for_each_pat_entry(struct vidtv_psi_table_pat *pat, u16 pcr_pid);
+
+/**
+ * vidtv_psi_pmt_get_pid - Get the TS PID for a PMT section.
+ * @section: The PMT section whose PID we want to retrieve.
+ * @pat: The PAT table to look into.
+ *
+ * Returns: the TS PID for 'section'
+ */
+u16 vidtv_psi_pmt_get_pid(struct vidtv_psi_table_pmt *section,
+ struct vidtv_psi_table_pat *pat);
+
+/**
+ * vidtv_psi_pat_table_update_sec_len - Recompute and update the PAT section length.
+ * @pat: The PAT whose length is to be updated.
+ *
+ * This will traverse the table and accumulate the length of its components,
+ * which is then used to replace the 'section_length' field.
+ *
+ * If section_length > MAX_SECTION_LEN, the operation fails.
+ */
+void vidtv_psi_pat_table_update_sec_len(struct vidtv_psi_table_pat *pat);
+
+/**
+ * vidtv_psi_pmt_table_update_sec_len - Recompute and update the PMT section length.
+ * @pmt: The PMT whose length is to be updated.
+ *
+ * This will traverse the table and accumulate the length of its components,
+ * which is then used to replace the 'section_length' field.
+ *
+ * If section_length > MAX_SECTION_LEN, the operation fails.
+ */
+void vidtv_psi_pmt_table_update_sec_len(struct vidtv_psi_table_pmt *pmt);
+
+/**
+ * vidtv_psi_sdt_table_update_sec_len - Recompute and update the SDT section length.
+ * @sdt: The SDT whose length is to be updated.
+ *
+ * This will traverse the table and accumulate the length of its components,
+ * which is then used to replace the 'section_length' field.
+ *
+ * If section_length > MAX_SECTION_LEN, the operation fails.
+ */
+void vidtv_psi_sdt_table_update_sec_len(struct vidtv_psi_table_sdt *sdt);
+
+/**
+ * struct vidtv_psi_pat_write_args - Arguments for writing a PAT table
+ * @buf: The destination buffer.
+ * @offset: The offset into the destination buffer.
+ * @pat: A pointer to the PAT.
+ * @buf_sz: The size of the destination buffer.
+ * @continuity_counter: A pointer to the CC. Incremented on every new packet.
+ *
+ */
+struct vidtv_psi_pat_write_args {
+ char *buf;
+ u32 offset;
+ struct vidtv_psi_table_pat *pat;
+ u32 buf_sz;
+ u8 *continuity_counter;
+};
+
+/**
+ * vidtv_psi_pat_write_into - Write PAT as MPEG-TS packets into a buffer.
+ * @args: An instance of struct vidtv_psi_pat_write_args
+ *
+ * This function writes the MPEG TS packets for a PAT table into a buffer.
+ * Calling code will usually generate the PAT via a call to its init function
+ * and thus is responsible for freeing it.
+ *
+ * Return: The number of bytes written into the buffer. This is NOT
+ * equal to the size of the PAT, since more space is needed for TS headers during TS
+ * encapsulation.
+ */
+u32 vidtv_psi_pat_write_into(struct vidtv_psi_pat_write_args args);
+
+/**
+ * struct vidtv_psi_sdt_write_args - Arguments for writing a SDT table
+ * @buf: The destination buffer.
+ * @offset: The offset into the destination buffer.
+ * @sdt: A pointer to the SDT.
+ * @buf_sz: The size of the destination buffer.
+ * @continuity_counter: A pointer to the CC. Incremented on every new packet.
+ *
+ */
+
+struct vidtv_psi_sdt_write_args {
+ char *buf;
+ u32 offset;
+ struct vidtv_psi_table_sdt *sdt;
+ u32 buf_sz;
+ u8 *continuity_counter;
+};
+
+/**
+ * vidtv_psi_sdt_write_into - Write SDT as MPEG-TS packets into a buffer.
+ * @args: an instance of struct vidtv_psi_sdt_write_args
+ *
+ * This function writes the MPEG TS packets for a SDT table into a buffer.
+ * Calling code will usually generate the SDT via a call to its init function
+ * and thus is responsible for freeing it.
+ *
+ * Return: The number of bytes written into the buffer. This is NOT
+ * equal to the size of the SDT, since more space is needed for TS headers during TS
+ * encapsulation.
+ */
+u32 vidtv_psi_sdt_write_into(struct vidtv_psi_sdt_write_args args);
+
+/**
+ * struct vidtv_psi_pmt_write_args - Arguments for writing a PMT section
+ * @buf: The destination buffer.
+ * @offset: The offset into the destination buffer.
+ * @pmt: A pointer to the PMT.
+ * @buf_sz: The size of the destination buffer.
+ * @continuity_counter: A pointer to the CC. Incremented on every new packet.
+ *
+ */
+struct vidtv_psi_pmt_write_args {
+ char *buf;
+ u32 offset;
+ struct vidtv_psi_table_pmt *pmt;
+ u16 pid;
+ u32 buf_sz;
+ u8 *continuity_counter;
+ u16 pcr_pid;
+};
+
+/**
+ * vidtv_psi_pmt_write_into - Write PMT as MPEG-TS packets into a buffer.
+ * @args: an instance of struct vidtv_psi_pmt_write_args
+ *
+ * This function writes the MPEG TS packets for a PMT section into a buffer.
+ * Calling code will usually generate the PMT section via a call to its init function
+ * and thus is responsible for freeing it.
+ *
+ * Return: The number of bytes written into the buffer. This is NOT
+ * equal to the size of the PMT section, since more space is needed for TS headers
+ * during TS encapsulation.
+ */
+u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args args);
+
+/**
+ * vidtv_psi_find_pmt_sec - Finds the PMT section for 'program_num'
+ * @pmt_sections: The sections to look into.
+ * @nsections: The number of sections.
+ * @program_num: The 'program_num' from PAT pointing to a PMT section.
+ *
+ * Return: A pointer to the PMT, if found, or NULL.
+ */
+struct vidtv_psi_table_pmt *vidtv_psi_find_pmt_sec(struct vidtv_psi_table_pmt **pmt_sections,
+ u16 nsections,
+ u16 program_num);
+
+u16 vidtv_psi_get_pat_program_pid(struct vidtv_psi_table_pat_program *p);
+u16 vidtv_psi_pmt_stream_get_elem_pid(struct vidtv_psi_table_pmt_stream *s);
+
+#endif // VIDTV_PSI_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.c b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
new file mode 100644
index 000000000000..a447ccbd68d5
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.c
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Vidtv serves as a reference DVB driver and helps validate the existing APIs
+ * in the media subsystem. It can also aid developers working on userspace
+ * applications.
+ *
+ * This file contains the code for an AES3 (also known as AES/EBU) encoder.
+ * It is based on EBU Tech 3250 and SMPTE 302M technical documents.
+ *
+ * This encoder currently supports 16bit AES3 subframes using 16bit signed
+ * integers.
+ *
+ * Note: AU stands for Access Unit, and AAU stands for Audio Access Unit
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/fixp-arith.h>
+
+#include <linux/math64.h>
+#include <asm/byteorder.h>
+
+#include "vidtv_s302m.h"
+#include "vidtv_encoder.h"
+#include "vidtv_common.h"
+
+#define S302M_SAMPLING_RATE_HZ 48000
+#define PES_PRIVATE_STREAM_1 0xbd /* PES: private_stream_1 */
+#define S302M_BLOCK_SZ 192
+#define S302M_SIN_LUT_NUM_ELEM 1024
+
+/* these are retrieved empirically from ffmpeg/libavcodec */
+#define FF_S302M_DEFAULT_NUM_FRAMES 1115
+#define FF_S302M_DEFAULT_PTS_INCREMENT 2090
+#define FF_S302M_DEFAULT_PTS_OFFSET 100000
+
+/* Used by the tone generator: number of samples for PI */
+#define PI 180
+
+static const u8 reverse[256] = {
+ /* from ffmpeg */
+ 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0,
+ 0x30, 0xB0, 0x70, 0xF0, 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8,
+ 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, 0x04, 0x84, 0x44, 0xC4,
+ 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
+ 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC,
+ 0x3C, 0xBC, 0x7C, 0xFC, 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2,
+ 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, 0x0A, 0x8A, 0x4A, 0xCA,
+ 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
+ 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6,
+ 0x36, 0xB6, 0x76, 0xF6, 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE,
+ 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, 0x01, 0x81, 0x41, 0xC1,
+ 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
+ 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9,
+ 0x39, 0xB9, 0x79, 0xF9, 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5,
+ 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, 0x0D, 0x8D, 0x4D, 0xCD,
+ 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
+ 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3,
+ 0x33, 0xB3, 0x73, 0xF3, 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB,
+ 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, 0x07, 0x87, 0x47, 0xC7,
+ 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
+ 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF,
+ 0x3F, 0xBF, 0x7F, 0xFF,
+};
+
+struct tone_duration {
+ enum musical_notes note;
+ int duration;
+};
+
+#define COMPASS 120 /* beats per minute (Allegro) */
+static const struct tone_duration beethoven_5th_symphony[] = {
+ { NOTE_E_6, 128}, { NOTE_DS_6, 128}, { NOTE_E_6, 128},
+ { NOTE_DS_6, 128}, { NOTE_E_6, 128}, { NOTE_B_5, 128},
+ { NOTE_D_6, 128}, { NOTE_C_6, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_C_5, 128},
+ { NOTE_E_5, 128}, { NOTE_A_5, 128}, { NOTE_E_3, 128},
+ { NOTE_E_4, 128}, { NOTE_GS_4, 128}, { NOTE_E_5, 128},
+ { NOTE_GS_5, 128}, { NOTE_B_5, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_E_5, 128},
+ { NOTE_E_6, 128}, { NOTE_DS_6, 128}, { NOTE_E_6, 128},
+ { NOTE_DS_6, 128}, { NOTE_E_6, 128}, { NOTE_B_5, 128},
+ { NOTE_D_6, 128}, { NOTE_C_6, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_C_5, 128},
+ { NOTE_E_5, 128}, { NOTE_A_5, 128}, { NOTE_E_3, 128},
+ { NOTE_E_4, 128}, { NOTE_GS_4, 128}, { NOTE_E_5, 128},
+ { NOTE_C_6, 128}, { NOTE_B_5, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_SILENT, 128},
+
+ { NOTE_E_6, 128}, { NOTE_DS_6, 128}, { NOTE_E_6, 128},
+ { NOTE_DS_6, 128}, { NOTE_E_6, 128}, { NOTE_B_5, 128},
+ { NOTE_D_6, 128}, { NOTE_C_6, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_C_5, 128},
+ { NOTE_E_5, 128}, { NOTE_A_5, 128}, { NOTE_E_3, 128},
+ { NOTE_E_4, 128}, { NOTE_GS_4, 128}, { NOTE_E_5, 128},
+ { NOTE_GS_5, 128}, { NOTE_B_5, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_E_5, 128},
+ { NOTE_E_6, 128}, { NOTE_DS_6, 128}, { NOTE_E_6, 128},
+ { NOTE_DS_6, 128}, { NOTE_E_6, 128}, { NOTE_B_5, 128},
+ { NOTE_D_6, 128}, { NOTE_C_6, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_C_5, 128},
+ { NOTE_E_5, 128}, { NOTE_A_5, 128}, { NOTE_E_3, 128},
+ { NOTE_E_4, 128}, { NOTE_GS_4, 128}, { NOTE_E_5, 128},
+ { NOTE_C_6, 128}, { NOTE_B_5, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_B_4, 128},
+ { NOTE_C_5, 128}, { NOTE_D_5, 128}, { NOTE_C_4, 128},
+ { NOTE_G_4, 128}, { NOTE_C_5, 128}, { NOTE_G_4, 128},
+ { NOTE_F_5, 128}, { NOTE_E_5, 128}, { NOTE_G_3, 128},
+ { NOTE_G_4, 128}, { NOTE_B_3, 128}, { NOTE_F_4, 128},
+ { NOTE_E_5, 128}, { NOTE_D_5, 128}, { NOTE_A_3, 128},
+ { NOTE_E_4, 128}, { NOTE_A_4, 128}, { NOTE_E_4, 128},
+ { NOTE_D_5, 128}, { NOTE_C_5, 128}, { NOTE_E_3, 128},
+ { NOTE_E_4, 128}, { NOTE_E_5, 255}, { NOTE_E_6, 128},
+ { NOTE_E_5, 128}, { NOTE_E_6, 128}, { NOTE_E_5, 255},
+ { NOTE_DS_5, 128}, { NOTE_E_5, 128}, { NOTE_DS_6, 128},
+ { NOTE_E_6, 128}, { NOTE_DS_5, 128}, { NOTE_E_5, 128},
+ { NOTE_DS_6, 128}, { NOTE_E_6, 128}, { NOTE_DS_6, 128},
+ { NOTE_E_6, 128}, { NOTE_DS_6, 128}, { NOTE_E_6, 128},
+ { NOTE_B_5, 128}, { NOTE_D_6, 128}, { NOTE_C_6, 128},
+ { NOTE_A_3, 128}, { NOTE_E_4, 128}, { NOTE_A_4, 128},
+ { NOTE_C_5, 128}, { NOTE_E_5, 128}, { NOTE_A_5, 128},
+ { NOTE_E_3, 128}, { NOTE_E_4, 128}, { NOTE_GS_4, 128},
+ { NOTE_E_5, 128}, { NOTE_GS_5, 128}, { NOTE_B_5, 128},
+ { NOTE_A_3, 128}, { NOTE_E_4, 128}, { NOTE_A_4, 128},
+ { NOTE_E_5, 128}, { NOTE_E_6, 128}, { NOTE_DS_6, 128},
+ { NOTE_E_6, 128}, { NOTE_DS_6, 128}, { NOTE_E_6, 128},
+ { NOTE_B_5, 128}, { NOTE_D_6, 128}, { NOTE_C_6, 128},
+ { NOTE_A_3, 128}, { NOTE_E_4, 128}, { NOTE_A_4, 128},
+ { NOTE_C_5, 128}, { NOTE_E_5, 128}, { NOTE_A_5, 128},
+ { NOTE_E_3, 128}, { NOTE_E_4, 128}, { NOTE_GS_4, 128},
+ { NOTE_E_5, 128}, { NOTE_C_6, 128}, { NOTE_B_5, 128},
+ { NOTE_C_5, 255}, { NOTE_C_5, 255}, { NOTE_SILENT, 512},
+};
+
+static struct vidtv_access_unit *vidtv_s302m_access_unit_init(struct vidtv_access_unit *head)
+{
+ struct vidtv_access_unit *au = kzalloc(sizeof(*au), GFP_KERNEL);
+
+ if (head) {
+ while (head->next)
+ head = head->next;
+
+ head->next = au;
+ }
+
+ return au;
+}
+
+static void vidtv_s302m_access_unit_destroy(struct vidtv_encoder *e)
+{
+ struct vidtv_access_unit *head = e->access_units;
+ struct vidtv_access_unit *tmp = NULL;
+
+ while (head) {
+ tmp = head;
+ head = head->next;
+ kfree(tmp);
+ }
+
+ e->access_units = NULL;
+}
+
+static void vidtv_s302m_alloc_au(struct vidtv_encoder *e)
+{
+ struct vidtv_access_unit *sync_au = NULL;
+ struct vidtv_access_unit *temp = NULL;
+
+ if (e->sync && e->sync->is_video_encoder) {
+ sync_au = e->sync->access_units;
+
+ while (sync_au) {
+ temp = vidtv_s302m_access_unit_init(e->access_units);
+ if (!e->access_units)
+ e->access_units = temp;
+
+ sync_au = sync_au->next;
+ }
+
+ return;
+ }
+
+ e->access_units = vidtv_s302m_access_unit_init(NULL);
+}
+
+static void
+vidtv_s302m_compute_sample_count_from_video(struct vidtv_encoder *e)
+{
+ struct vidtv_access_unit *au = e->access_units;
+ struct vidtv_access_unit *sync_au = e->sync->access_units;
+ u32 vau_duration_usecs;
+ u32 sample_duration_usecs;
+ u32 s;
+
+ vau_duration_usecs = USEC_PER_SEC / e->sync->sampling_rate_hz;
+ sample_duration_usecs = USEC_PER_SEC / e->sampling_rate_hz;
+
+ while (au && sync_au) {
+ s = DIV_ROUND_UP(vau_duration_usecs, sample_duration_usecs);
+ au->num_samples = s;
+ au = au->next;
+ sync_au = sync_au->next;
+ }
+}
+
+static void vidtv_s302m_compute_pts_from_video(struct vidtv_encoder *e)
+{
+ struct vidtv_access_unit *au = e->access_units;
+ struct vidtv_access_unit *sync_au = e->sync->access_units;
+
+ /* use the same pts from the video access unit*/
+ while (au && sync_au) {
+ au->pts = sync_au->pts;
+ au = au->next;
+ sync_au = sync_au->next;
+ }
+}
+
+static u16 vidtv_s302m_get_sample(struct vidtv_encoder *e)
+{
+ u16 sample;
+ int pos;
+
+ if (!e->src_buf) {
+ /*
+ * Simple tone generator: play the tones at the
+ * beethoven_5th_symphony array.
+ */
+ if (e->last_duration <= 0) {
+ if (e->src_buf_offset >= ARRAY_SIZE(beethoven_5th_symphony))
+ e->src_buf_offset = 0;
+
+ e->last_tone = beethoven_5th_symphony[e->src_buf_offset].note;
+ e->last_duration = beethoven_5th_symphony[e->src_buf_offset].duration * S302M_SAMPLING_RATE_HZ / COMPASS / 5;
+ e->src_buf_offset++;
+ e->note_offset = 0;
+ } else {
+ e->last_duration--;
+ }
+
+ /* Handle silent */
+ if (!e->last_tone) {
+ e->src_buf_offset = 0;
+ return 0x8000;
+ }
+
+ pos = (2 * PI * e->note_offset * e->last_tone / S302M_SAMPLING_RATE_HZ);
+
+ if (pos == 360)
+ e->note_offset = 0;
+ else
+ e->note_offset++;
+
+ return (fixp_sin32(pos % (2 * PI)) >> 16) + 0x8000;
+ }
+
+ /* bug somewhere */
+ if (e->src_buf_offset > e->src_buf_sz) {
+ pr_err_ratelimited("overflow detected: %d > %d, wrapping.\n",
+ e->src_buf_offset,
+ e->src_buf_sz);
+
+ e->src_buf_offset = 0;
+ }
+
+ if (e->src_buf_offset >= e->src_buf_sz) {
+ /* let the source know we are out of data */
+ if (e->last_sample_cb)
+ e->last_sample_cb(e->sample_count);
+
+ e->src_buf_offset = 0;
+ }
+
+ sample = *(u16 *)(e->src_buf + e->src_buf_offset);
+
+ return sample;
+}
+
+static u32 vidtv_s302m_write_frame(struct vidtv_encoder *e,
+ u16 sample)
+{
+ u32 nbytes = 0;
+ struct vidtv_s302m_frame_16 f = {};
+ struct vidtv_s302m_ctx *ctx = e->ctx;
+
+ /* from ffmpeg: see s302enc.c */
+
+ u8 vucf = ctx->frame_index == 0 ? 0x10 : 0;
+
+ f.data[0] = sample & 0xFF;
+ f.data[1] = (sample & 0xFF00) >> 8;
+ f.data[2] = ((sample & 0x0F) << 4) | vucf;
+ f.data[3] = (sample & 0x0FF0) >> 4;
+ f.data[4] = (sample & 0xF000) >> 12;
+
+ f.data[0] = reverse[f.data[0]];
+ f.data[1] = reverse[f.data[1]];
+ f.data[2] = reverse[f.data[2]];
+ f.data[3] = reverse[f.data[3]];
+ f.data[4] = reverse[f.data[4]];
+
+ nbytes += vidtv_memcpy(e->encoder_buf,
+ e->encoder_buf_offset,
+ VIDTV_S302M_BUF_SZ,
+ &f,
+ sizeof(f));
+
+ e->encoder_buf_offset += nbytes;
+
+ ctx->frame_index++;
+ if (ctx->frame_index >= S302M_BLOCK_SZ)
+ ctx->frame_index = 0;
+
+ return nbytes;
+}
+
+static u32 vidtv_s302m_write_h(struct vidtv_encoder *e, u32 p_sz)
+{
+ struct vidtv_smpte_s302m_es h = {};
+ u32 nbytes = 0;
+
+ /* 2 channels, ident: 0, 16 bits per sample */
+ h.bitfield = cpu_to_be32((p_sz << 16));
+
+ nbytes += vidtv_memcpy(e->encoder_buf,
+ e->encoder_buf_offset,
+ e->encoder_buf_sz,
+ &h,
+ sizeof(h));
+
+ e->encoder_buf_offset += nbytes;
+ return nbytes;
+}
+
+static void vidtv_s302m_write_frames(struct vidtv_encoder *e)
+{
+ struct vidtv_access_unit *au = e->access_units;
+ struct vidtv_s302m_ctx *ctx = e->ctx;
+ u32 nbytes_per_unit = 0;
+ u32 nbytes = 0;
+ u32 au_sz = 0;
+ u16 sample;
+ u32 j;
+
+ while (au) {
+ au_sz = au->num_samples *
+ sizeof(struct vidtv_s302m_frame_16);
+
+ nbytes_per_unit = vidtv_s302m_write_h(e, au_sz);
+
+ for (j = 0; j < au->num_samples; ++j) {
+ sample = vidtv_s302m_get_sample(e);
+ nbytes_per_unit += vidtv_s302m_write_frame(e, sample);
+
+ if (e->src_buf)
+ e->src_buf_offset += sizeof(u16);
+
+ e->sample_count++;
+ }
+
+ au->nbytes = nbytes_per_unit;
+
+ if (au_sz + sizeof(struct vidtv_smpte_s302m_es) != nbytes_per_unit) {
+ pr_warn_ratelimited("write size was %u, expected %zu\n",
+ nbytes_per_unit,
+ au_sz + sizeof(struct vidtv_smpte_s302m_es));
+ }
+
+ nbytes += nbytes_per_unit;
+ au->offset = nbytes - nbytes_per_unit;
+
+ nbytes_per_unit = 0;
+ ctx->au_count++;
+
+ au = au->next;
+ }
+}
+
+static void *vidtv_s302m_encode(struct vidtv_encoder *e)
+{
+ /*
+ * According to SMPTE 302M, an audio access unit is specified as those
+ * AES3 words that are associated with a corresponding video frame.
+ * Therefore, there is one audio access unit for every video access unit
+ * in the corresponding video encoder ('sync'), using the same values
+ * for PTS as used by the video encoder.
+ *
+ * Assuming that it is also possible to send audio without any
+ * associated video, as in a radio-like service, a single audio access unit
+ * is created with values for 'num_samples' and 'pts' taken empirically from
+ * ffmpeg
+ */
+
+ struct vidtv_s302m_ctx *ctx = e->ctx;
+
+ vidtv_s302m_access_unit_destroy(e);
+ vidtv_s302m_alloc_au(e);
+
+ if (e->sync && e->sync->is_video_encoder) {
+ vidtv_s302m_compute_sample_count_from_video(e);
+ vidtv_s302m_compute_pts_from_video(e);
+ } else {
+ e->access_units->num_samples = FF_S302M_DEFAULT_NUM_FRAMES;
+ e->access_units->pts = (ctx->au_count * FF_S302M_DEFAULT_PTS_INCREMENT) +
+ FF_S302M_DEFAULT_PTS_OFFSET;
+ }
+
+ vidtv_s302m_write_frames(e);
+
+ return e->encoder_buf;
+}
+
+static u32 vidtv_s302m_clear(struct vidtv_encoder *e)
+{
+ struct vidtv_access_unit *au = e->access_units;
+ u32 count = 0;
+
+ while (au) {
+ count++;
+ au = au->next;
+ }
+
+ vidtv_s302m_access_unit_destroy(e);
+ memset(e->encoder_buf, 0, VIDTV_S302M_BUF_SZ);
+ e->encoder_buf_offset = 0;
+
+ return count;
+}
+
+struct vidtv_encoder
+*vidtv_s302m_encoder_init(struct vidtv_s302m_encoder_init_args args)
+{
+ struct vidtv_encoder *e = kzalloc(sizeof(*e), GFP_KERNEL);
+ u32 priv_sz = sizeof(struct vidtv_s302m_ctx);
+
+ e->id = S302M;
+
+ if (args.name)
+ e->name = kstrdup(args.name, GFP_KERNEL);
+
+ e->encoder_buf = vzalloc(VIDTV_S302M_BUF_SZ);
+ e->encoder_buf_sz = VIDTV_S302M_BUF_SZ;
+ e->encoder_buf_offset = 0;
+
+ e->sample_count = 0;
+ e->last_duration = 0;
+
+ e->src_buf = (args.src_buf) ? args.src_buf : NULL;
+ e->src_buf_sz = (args.src_buf) ? args.src_buf_sz : 0;
+ e->src_buf_offset = 0;
+
+ e->is_video_encoder = false;
+ e->ctx = kzalloc(priv_sz, GFP_KERNEL);
+
+ e->encode = vidtv_s302m_encode;
+ e->clear = vidtv_s302m_clear;
+
+ e->es_pid = cpu_to_be16(args.es_pid);
+ e->stream_id = cpu_to_be16(PES_PRIVATE_STREAM_1);
+
+ e->sync = args.sync;
+ e->sampling_rate_hz = S302M_SAMPLING_RATE_HZ;
+
+ e->last_sample_cb = args.last_sample_cb;
+
+ e->destroy = vidtv_s302m_encoder_destroy;
+
+ if (args.head) {
+ while (args.head->next)
+ args.head = args.head->next;
+
+ args.head->next = e;
+ }
+
+ e->next = NULL;
+
+ return e;
+}
+
+void vidtv_s302m_encoder_destroy(struct vidtv_encoder *e)
+{
+ if (e->id != S302M) {
+ pr_err_ratelimited("Encoder type mismatch, skipping.\n");
+ return;
+ }
+
+ vidtv_s302m_access_unit_destroy(e);
+ kfree(e->name);
+ vfree(e->encoder_buf);
+ kfree(e->ctx);
+ kfree(e);
+}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.h b/drivers/media/test-drivers/vidtv/vidtv_s302m.h
new file mode 100644
index 000000000000..eca5e3150ede
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Vidtv serves as a reference DVB driver and helps validate the existing APIs
+ * in the media subsystem. It can also aid developers working on userspace
+ * applications.
+ *
+ * This file contains the code for an AES3 (also known as AES/EBU) encoder.
+ * It is based on EBU Tech 3250 and SMPTE 302M technical documents.
+ *
+ * This encoder currently supports 16bit AES3 subframes using 16bit signed
+ * integers.
+ *
+ * Note: AU stands for Access Unit, and AAU stands for Audio Access Unit
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#ifndef VIDTV_S302M_H
+#define VIDTV_S302M_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#include "vidtv_encoder.h"
+
+/* see SMPTE 302M 2007 clause 7.3 */
+#define VIDTV_S302M_BUF_SZ 65024
+
+/* see ETSI TS 102 154 v.1.2.1 clause 7.3.5 */
+#define VIDTV_S302M_FORMAT_IDENTIFIER 0x42535344
+
+/**
+ * struct vidtv_s302m_ctx - s302m encoder context.
+ * @enc: A pointer to the containing encoder structure.
+ * @frame_index: The current frame in a block
+ * @au_count: The total number of access units encoded up to now
+ */
+struct vidtv_s302m_ctx {
+ struct vidtv_encoder *enc;
+ u32 frame_index;
+ u32 au_count;
+};
+
+/**
+ * struct vidtv_smpte_s302m_es - s302m MPEG Elementary Stream header.
+ *
+ * See SMPTE 302M 2007 table 1.
+ */
+struct vidtv_smpte_s302m_es {
+ /*
+ *
+ * audio_packet_size:16;
+ * num_channels:2;
+ * channel_identification:8;
+ * bits_per_sample:2; // 0x0 for 16bits
+ * zero:4;
+ */
+ __be32 bitfield;
+} __packed;
+
+struct vidtv_s302m_frame_16 {
+ u8 data[5];
+} __packed;
+
+/**
+ * struct vidtv_s302m_encoder_init_args - Args for the s302m encoder.
+ *
+ * @name: A name to identify this particular instance
+ * @src_buf: The source buffer, encoder will default to a sine wave if this is NULL.
+ * @src_buf_sz: The size of the source buffer.
+ * @es_pid: The MPEG Elementary Stream PID to use.
+ * @sync: Attempt to synchronize audio with this video encoder, if not NULL.
+ * @last_sample_cb: A callback called when the encoder runs out of data.
+ * @head: Add to this chain
+ */
+struct vidtv_s302m_encoder_init_args {
+ char *name;
+ void *src_buf;
+ u32 src_buf_sz;
+ u16 es_pid;
+ struct vidtv_encoder *sync;
+ void (*last_sample_cb)(u32 sample_no);
+
+ struct vidtv_encoder *head;
+};
+
+struct vidtv_encoder
+*vidtv_s302m_encoder_init(struct vidtv_s302m_encoder_init_args args);
+
+void vidtv_s302m_encoder_destroy(struct vidtv_encoder *encoder);
+
+#endif /* VIDTV_S302M_H */
diff --git a/drivers/media/test-drivers/vidtv/vidtv_ts.c b/drivers/media/test-drivers/vidtv/vidtv_ts.c
new file mode 100644
index 000000000000..190b9e4438dc
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_ts.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The Virtual DVB test driver serves as a reference DVB driver and helps
+ * validate the existing APIs in the media subsystem. It can also aid
+ * developers working on userspace applications.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s, %d: " fmt, __func__, __LINE__
+
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/types.h>
+#include <linux/math64.h>
+#include <asm/byteorder.h>
+
+#include "vidtv_ts.h"
+#include "vidtv_common.h"
+
+static u32 vidtv_ts_write_pcr_bits(u8 *to, u32 to_offset, u64 pcr)
+{
+ /* Exact same from ffmpeg. PCR is a counter driven by a 27Mhz clock */
+ u64 div;
+ u64 rem;
+ u8 *buf = to + to_offset;
+ u64 pcr_low;
+ u64 pcr_high;
+
+ div = div64_u64_rem(pcr, 300, &rem);
+
+ pcr_low = rem; /* pcr_low = pcr % 300 */
+ pcr_high = div; /* pcr_high = pcr / 300 */
+
+ *buf++ = pcr_high >> 25;
+ *buf++ = pcr_high >> 17;
+ *buf++ = pcr_high >> 9;
+ *buf++ = pcr_high >> 1;
+ *buf++ = pcr_high << 7 | pcr_low >> 8 | 0x7e;
+ *buf++ = pcr_low;
+
+ return 6;
+}
+
+void vidtv_ts_inc_cc(u8 *continuity_counter)
+{
+ ++*continuity_counter;
+ if (*continuity_counter > TS_CC_MAX_VAL)
+ *continuity_counter = 0;
+}
+
+u32 vidtv_ts_null_write_into(struct null_packet_write_args args)
+{
+ u32 nbytes = 0;
+ struct vidtv_mpeg_ts ts_header = {};
+
+ ts_header.sync_byte = TS_SYNC_BYTE;
+ ts_header.bitfield = cpu_to_be16(TS_NULL_PACKET_PID);
+ ts_header.payload = 1;
+ ts_header.continuity_counter = *args.continuity_counter;
+
+ /* copy TS header */
+ nbytes += vidtv_memcpy(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.buf_sz,
+ &ts_header,
+ sizeof(ts_header));
+
+ vidtv_ts_inc_cc(args.continuity_counter);
+
+ /* fill the rest with empty data */
+ nbytes += vidtv_memset(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.buf_sz,
+ TS_FILL_BYTE,
+ TS_PACKET_LEN - nbytes);
+
+ /* we should have written exactly _one_ 188byte packet */
+ if (nbytes != TS_PACKET_LEN)
+ pr_warn_ratelimited("Expected exactly %d bytes, got %d\n",
+ TS_PACKET_LEN,
+ nbytes);
+
+ return nbytes;
+}
+
+u32 vidtv_ts_pcr_write_into(struct pcr_write_args args)
+{
+ u32 nbytes = 0;
+ struct vidtv_mpeg_ts ts_header = {};
+ struct vidtv_mpeg_ts_adaption ts_adap = {};
+
+ ts_header.sync_byte = TS_SYNC_BYTE;
+ ts_header.bitfield = cpu_to_be16(args.pid);
+ ts_header.scrambling = 0;
+ /* cc is not incremented, but it is needed. see 13818-1 clause 2.4.3.3 */
+ ts_header.continuity_counter = *args.continuity_counter;
+ ts_header.payload = 0;
+ ts_header.adaptation_field = 1;
+
+ /* 13818-1 clause 2.4.3.5 */
+ ts_adap.length = 183;
+ ts_adap.PCR = 1;
+
+ /* copy TS header */
+ nbytes += vidtv_memcpy(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.buf_sz,
+ &ts_header,
+ sizeof(ts_header));
+
+ /* write the adap after the TS header */
+ nbytes += vidtv_memcpy(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.buf_sz,
+ &ts_adap,
+ sizeof(ts_adap));
+
+ /* write the PCR optional */
+ nbytes += vidtv_ts_write_pcr_bits(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.pcr);
+
+ nbytes += vidtv_memset(args.dest_buf,
+ args.dest_offset + nbytes,
+ args.buf_sz,
+ TS_FILL_BYTE,
+ TS_PACKET_LEN - nbytes);
+
+ /* we should have written exactly _one_ 188byte packet */
+ if (nbytes != TS_PACKET_LEN)
+ pr_warn_ratelimited("Expected exactly %d bytes, got %d\n",
+ TS_PACKET_LEN,
+ nbytes);
+
+ return nbytes;
+}
diff --git a/drivers/media/test-drivers/vidtv/vidtv_ts.h b/drivers/media/test-drivers/vidtv/vidtv_ts.h
new file mode 100644
index 000000000000..83dcc9183b45
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_ts.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The Virtual DVB test driver serves as a reference DVB driver and helps
+ * validate the existing APIs in the media subsystem. It can also aid
+ * developers working on userspace applications.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#ifndef VIDTV_TS_H
+#define VIDTV_TS_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#define TS_SYNC_BYTE 0x47
+#define TS_PACKET_LEN 188
+#define TS_PAYLOAD_LEN 184
+#define TS_NULL_PACKET_PID 0x1fff
+#define TS_CC_MAX_VAL 0x0f /* 4 bits */
+#define TS_LAST_VALID_PID 8191
+#define TS_FILL_BYTE 0xff /* the byte used in packet stuffing */
+
+struct vidtv_mpeg_ts_adaption {
+ u8 length;
+ struct {
+ u8 extension:1;
+ u8 private_data:1;
+ u8 splicing_point:1;
+ u8 OPCR:1;
+ u8 PCR:1;
+ u8 priority:1;
+ u8 random_access:1;
+ u8 discontinued:1;
+ } __packed;
+ u8 data[];
+} __packed;
+
+struct vidtv_mpeg_ts {
+ u8 sync_byte;
+ __be16 bitfield; /* tei: 1, payload_start:1 priority: 1, pid:13 */
+ struct {
+ u8 continuity_counter:4;
+ u8 payload:1;
+ u8 adaptation_field:1;
+ u8 scrambling:2;
+ } __packed;
+ struct vidtv_mpeg_ts_adaption adaption[];
+} __packed;
+
+/**
+ * struct pcr_write_args - Arguments for the pcr_write_into function.
+ * @dest_buf: The buffer to write into.
+ * @dest_offset: The byte offset into the buffer.
+ * @pid: The TS PID for the PCR packets.
+ * @buf_sz: The size of the buffer in bytes.
+ * @countinuity_counter: The TS continuity_counter.
+ * @pcr: A sample from the system clock.
+ */
+struct pcr_write_args {
+ void *dest_buf;
+ u32 dest_offset;
+ u16 pid;
+ u32 buf_sz;
+ u8 *continuity_counter;
+ u64 pcr;
+};
+
+/**
+ * struct null_packet_write_args - Arguments for the null_write_into function
+ * @dest_buf: The buffer to write into.
+ * @dest_offset: The byte offset into the buffer.
+ * @buf_sz: The size of the buffer in bytes.
+ * @countinuity_counter: The TS continuity_counter.
+ */
+struct null_packet_write_args {
+ void *dest_buf;
+ u32 dest_offset;
+ u32 buf_sz;
+ u8 *continuity_counter;
+};
+
+/* Increment the continuity counter */
+void vidtv_ts_inc_cc(u8 *continuity_counter);
+
+/**
+ * vidtv_ts_null_write_into - Write a TS null packet into a buffer.
+ * @args: the arguments to use when writing.
+ *
+ * This function will write a null packet into a buffer. This is usually used to
+ * pad TS streams.
+ *
+ * Return: The number of bytes written into the buffer.
+ */
+u32 vidtv_ts_null_write_into(struct null_packet_write_args args);
+
+/**
+ * vidtv_ts_pcr_write_into - Write a PCR packet into a buffer.
+ * @args: the arguments to use when writing.
+ *
+ * This function will write a PCR packet into a buffer. This is used to
+ * synchronize the clocks between encoders and decoders.
+ *
+ * Return: The number of bytes written into the buffer.
+ */
+u32 vidtv_ts_pcr_write_into(struct pcr_write_args args);
+
+#endif //VIDTV_TS_H
diff --git a/drivers/media/test-drivers/vidtv/vidtv_tuner.c b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
new file mode 100644
index 000000000000..9bc49e099f65
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The Virtual DVB test driver serves as a reference DVB driver and helps
+ * validate the existing APIs in the media subsystem. It can also aid
+ * developers working on userspace applications.
+ *
+ * The vidtv tuner should support common TV standards such as
+ * DVB-T/T2/S/S2, ISDB-T and ATSC when completed.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/dvb_frontend.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+
+#include "vidtv_tuner.h"
+
+struct vidtv_tuner_cnr_to_qual_s {
+ /* attempt to use the same values as libdvbv5 */
+ u32 modulation;
+ u32 fec;
+ u32 cnr_ok;
+ u32 cnr_good;
+};
+
+static const struct vidtv_tuner_cnr_to_qual_s vidtv_tuner_c_cnr_2_qual[] = {
+ /* from libdvbv5 source code, in milli db */
+ { QAM_256, FEC_NONE, 34000, 38000},
+ { QAM_64, FEC_NONE, 30000, 34000},
+};
+
+static const struct vidtv_tuner_cnr_to_qual_s vidtv_tuner_s_cnr_2_qual[] = {
+ /* from libdvbv5 source code, in milli db */
+ { QPSK, FEC_1_2, 7000, 10000},
+ { QPSK, FEC_2_3, 9000, 12000},
+ { QPSK, FEC_3_4, 10000, 13000},
+ { QPSK, FEC_5_6, 11000, 14000},
+ { QPSK, FEC_7_8, 12000, 15000},
+};
+
+static const struct vidtv_tuner_cnr_to_qual_s vidtv_tuner_s2_cnr_2_qual[] = {
+ /* from libdvbv5 source code, in milli db */
+ { QPSK, FEC_1_2, 9000, 12000},
+ { QPSK, FEC_2_3, 11000, 14000},
+ { QPSK, FEC_3_4, 12000, 15000},
+ { QPSK, FEC_5_6, 12000, 15000},
+ { QPSK, FEC_8_9, 13000, 16000},
+ { QPSK, FEC_9_10, 13500, 16500},
+ { PSK_8, FEC_2_3, 14500, 17500},
+ { PSK_8, FEC_3_4, 16000, 19000},
+ { PSK_8, FEC_5_6, 17500, 20500},
+ { PSK_8, FEC_8_9, 19000, 22000},
+};
+
+static const struct vidtv_tuner_cnr_to_qual_s vidtv_tuner_t_cnr_2_qual[] = {
+ /* from libdvbv5 source code, in milli db*/
+ { QPSK, FEC_1_2, 4100, 5900},
+ { QPSK, FEC_2_3, 6100, 9600},
+ { QPSK, FEC_3_4, 7200, 12400},
+ { QPSK, FEC_5_6, 8500, 15600},
+ { QPSK, FEC_7_8, 9200, 17500},
+ { QAM_16, FEC_1_2, 9800, 11800},
+ { QAM_16, FEC_2_3, 12100, 15300},
+ { QAM_16, FEC_3_4, 13400, 18100},
+ { QAM_16, FEC_5_6, 14800, 21300},
+ { QAM_16, FEC_7_8, 15700, 23600},
+ { QAM_64, FEC_1_2, 14000, 16000},
+ { QAM_64, FEC_2_3, 19900, 25400},
+ { QAM_64, FEC_3_4, 24900, 27900},
+ { QAM_64, FEC_5_6, 21300, 23300},
+ { QAM_64, FEC_7_8, 22000, 24000},
+};
+
+/**
+ * struct vidtv_tuner_hardware_state - Simulate the tuner hardware status
+ * @asleep: whether the tuner is asleep, i.e whether _sleep() or _suspend() was
+ * called.
+ * @lock_status: Whether the tuner has managed to lock on the requested
+ * frequency.
+ * @if_frequency: The tuner's intermediate frequency. Hardcoded for the purposes
+ * of simulation.
+ * @tuned_frequency: The actual tuned frequency.
+ * @bandwidth: The actual bandwidth.
+ *
+ * This structure is meant to simulate the status of the tuner hardware, as if
+ * we had a physical tuner hardware.
+ */
+struct vidtv_tuner_hardware_state {
+ bool asleep;
+ u32 lock_status;
+ u32 if_frequency;
+ u32 tuned_frequency;
+ u32 bandwidth;
+};
+
+/**
+ * struct vidtv_tuner_dev - The tuner struct
+ * @fe: A pointer to the dvb_frontend structure allocated by vidtv_demod
+ * @hw_state: A struct to simulate the tuner's hardware state as if we had a
+ * physical tuner hardware.
+ * @config: The configuration used to start the tuner module, usually filled
+ * by a bridge driver. For vidtv, this is filled by vidtv_bridge before the
+ * tuner module is probed.
+ */
+struct vidtv_tuner_dev {
+ struct dvb_frontend *fe;
+ struct vidtv_tuner_hardware_state hw_state;
+ struct vidtv_tuner_config config;
+};
+
+static struct vidtv_tuner_dev*
+vidtv_tuner_get_dev(struct dvb_frontend *fe)
+{
+ return i2c_get_clientdata(fe->tuner_priv);
+}
+
+static int vidtv_tuner_check_frequency_shift(struct dvb_frontend *fe)
+{
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct vidtv_tuner_config config = tuner_dev->config;
+ u32 *valid_freqs = NULL;
+ u32 array_sz = 0;
+ u32 i;
+ u32 shift;
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ valid_freqs = config.vidtv_valid_dvb_t_freqs;
+ array_sz = ARRAY_SIZE(config.vidtv_valid_dvb_t_freqs);
+ break;
+ case SYS_DVBS:
+ case SYS_DVBS2:
+ valid_freqs = config.vidtv_valid_dvb_s_freqs;
+ array_sz = ARRAY_SIZE(config.vidtv_valid_dvb_s_freqs);
+ break;
+ case SYS_DVBC_ANNEX_A:
+ valid_freqs = config.vidtv_valid_dvb_c_freqs;
+ array_sz = ARRAY_SIZE(config.vidtv_valid_dvb_c_freqs);
+ break;
+
+ default:
+ dev_warn(fe->dvb->device,
+ "%s: unsupported delivery system: %u\n",
+ __func__,
+ c->delivery_system);
+
+ return -EINVAL;
+ }
+
+ for (i = 0; i < array_sz; i++) {
+ if (!valid_freqs[i])
+ break;
+ shift = abs(c->frequency - valid_freqs[i]);
+
+ if (!shift)
+ return 0;
+
+ /*
+ * This will provide a value from 0 to 100 that would
+ * indicate how far is the tuned frequency from the
+ * right one.
+ */
+ if (shift < config.max_frequency_shift_hz)
+ return shift * 100 / config.max_frequency_shift_hz;
+ }
+
+ return -EINVAL;
+}
+
+static int
+vidtv_tuner_get_signal_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+ const struct vidtv_tuner_cnr_to_qual_s *cnr2qual = NULL;
+ struct device *dev = fe->dvb->device;
+ u32 array_size = 0;
+ s32 shift;
+ u32 i;
+
+ shift = vidtv_tuner_check_frequency_shift(fe);
+ if (shift < 0) {
+ tuner_dev->hw_state.lock_status = 0;
+ *strength = 0;
+ return 0;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ cnr2qual = vidtv_tuner_t_cnr_2_qual;
+ array_size = ARRAY_SIZE(vidtv_tuner_t_cnr_2_qual);
+ break;
+
+ case SYS_DVBS:
+ cnr2qual = vidtv_tuner_s_cnr_2_qual;
+ array_size = ARRAY_SIZE(vidtv_tuner_s_cnr_2_qual);
+ break;
+
+ case SYS_DVBS2:
+ cnr2qual = vidtv_tuner_s2_cnr_2_qual;
+ array_size = ARRAY_SIZE(vidtv_tuner_s2_cnr_2_qual);
+ break;
+
+ case SYS_DVBC_ANNEX_A:
+ cnr2qual = vidtv_tuner_c_cnr_2_qual;
+ array_size = ARRAY_SIZE(vidtv_tuner_c_cnr_2_qual);
+ break;
+
+ default:
+ dev_warn_ratelimited(dev,
+ "%s: unsupported delivery system: %u\n",
+ __func__,
+ c->delivery_system);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < array_size; i++) {
+ if (cnr2qual[i].modulation != c->modulation ||
+ cnr2qual[i].fec != c->fec_inner)
+ continue;
+
+ if (!shift) {
+ *strength = cnr2qual[i].cnr_good;
+ return 0;
+ }
+ /*
+ * Channel tuned at wrong frequency. Simulate that the
+ * Carrier S/N ratio is not too good.
+ */
+
+ *strength = cnr2qual[i].cnr_ok -
+ (cnr2qual[i].cnr_good - cnr2qual[i].cnr_ok);
+ return 0;
+ }
+
+ /*
+ * do a linear interpolation between 34dB and 10dB if we can't
+ * match against the table
+ */
+ *strength = 34000 - 24000 * shift / 100;
+ return 0;
+}
+
+static int vidtv_tuner_init(struct dvb_frontend *fe)
+{
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+ struct vidtv_tuner_config config = tuner_dev->config;
+
+ msleep_interruptible(config.mock_power_up_delay_msec);
+
+ tuner_dev->hw_state.asleep = false;
+ tuner_dev->hw_state.if_frequency = 5000;
+
+ return 0;
+}
+
+static int vidtv_tuner_sleep(struct dvb_frontend *fe)
+{
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+
+ tuner_dev->hw_state.asleep = true;
+ return 0;
+}
+
+static int vidtv_tuner_suspend(struct dvb_frontend *fe)
+{
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+
+ tuner_dev->hw_state.asleep = true;
+ return 0;
+}
+
+static int vidtv_tuner_resume(struct dvb_frontend *fe)
+{
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+
+ tuner_dev->hw_state.asleep = false;
+ return 0;
+}
+
+static int vidtv_tuner_set_params(struct dvb_frontend *fe)
+{
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+ struct vidtv_tuner_config config = tuner_dev->config;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ s32 shift;
+
+ u32 min_freq = fe->ops.tuner_ops.info.frequency_min_hz;
+ u32 max_freq = fe->ops.tuner_ops.info.frequency_max_hz;
+ u32 min_bw = fe->ops.tuner_ops.info.bandwidth_min;
+ u32 max_bw = fe->ops.tuner_ops.info.bandwidth_max;
+
+ if (c->frequency < min_freq || c->frequency > max_freq ||
+ c->bandwidth_hz < min_bw || c->bandwidth_hz > max_bw) {
+ tuner_dev->hw_state.lock_status = 0;
+ return -EINVAL;
+ }
+
+ tuner_dev->hw_state.tuned_frequency = c->frequency;
+ tuner_dev->hw_state.bandwidth = c->bandwidth_hz;
+ tuner_dev->hw_state.lock_status = TUNER_STATUS_LOCKED;
+
+ msleep_interruptible(config.mock_tune_delay_msec);
+
+ shift = vidtv_tuner_check_frequency_shift(fe);
+ if (shift < 0) {
+ tuner_dev->hw_state.lock_status = 0;
+ return shift;
+ }
+
+ return 0;
+}
+
+static int vidtv_tuner_set_config(struct dvb_frontend *fe,
+ void *priv_cfg)
+{
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+
+ memcpy(&tuner_dev->config, priv_cfg, sizeof(tuner_dev->config));
+
+ return 0;
+}
+
+static int vidtv_tuner_get_frequency(struct dvb_frontend *fe,
+ u32 *frequency)
+{
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+
+ *frequency = tuner_dev->hw_state.tuned_frequency;
+
+ return 0;
+}
+
+static int vidtv_tuner_get_bandwidth(struct dvb_frontend *fe,
+ u32 *bandwidth)
+{
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+
+ *bandwidth = tuner_dev->hw_state.bandwidth;
+
+ return 0;
+}
+
+static int vidtv_tuner_get_if_frequency(struct dvb_frontend *fe,
+ u32 *frequency)
+{
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+
+ *frequency = tuner_dev->hw_state.if_frequency;
+
+ return 0;
+}
+
+static int vidtv_tuner_get_status(struct dvb_frontend *fe, u32 *status)
+{
+ struct vidtv_tuner_dev *tuner_dev = vidtv_tuner_get_dev(fe);
+
+ *status = tuner_dev->hw_state.lock_status;
+
+ return 0;
+}
+
+static const struct dvb_tuner_ops vidtv_tuner_ops = {
+ .init = vidtv_tuner_init,
+ .sleep = vidtv_tuner_sleep,
+ .suspend = vidtv_tuner_suspend,
+ .resume = vidtv_tuner_resume,
+ .set_params = vidtv_tuner_set_params,
+ .set_config = vidtv_tuner_set_config,
+ .get_bandwidth = vidtv_tuner_get_bandwidth,
+ .get_frequency = vidtv_tuner_get_frequency,
+ .get_if_frequency = vidtv_tuner_get_if_frequency,
+ .get_status = vidtv_tuner_get_status,
+ .get_rf_strength = vidtv_tuner_get_signal_strength
+};
+
+static const struct i2c_device_id vidtv_tuner_i2c_id_table[] = {
+ {"dvb_vidtv_tuner", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, vidtv_tuner_i2c_id_table);
+
+static int vidtv_tuner_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct vidtv_tuner_config *config = client->dev.platform_data;
+ struct dvb_frontend *fe = config->fe;
+ struct vidtv_tuner_dev *tuner_dev = NULL;
+
+ tuner_dev = kzalloc(sizeof(*tuner_dev), GFP_KERNEL);
+ if (!tuner_dev)
+ return -ENOMEM;
+
+ tuner_dev->fe = config->fe;
+ i2c_set_clientdata(client, tuner_dev);
+
+ memcpy(&fe->ops.tuner_ops,
+ &vidtv_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ memcpy(&tuner_dev->config, config, sizeof(tuner_dev->config));
+ fe->tuner_priv = client;
+
+ return 0;
+}
+
+static int vidtv_tuner_i2c_remove(struct i2c_client *client)
+{
+ struct vidtv_tuner_dev *tuner_dev = i2c_get_clientdata(client);
+
+ kfree(tuner_dev);
+
+ return 0;
+}
+
+static struct i2c_driver vidtv_tuner_i2c_driver = {
+ .driver = {
+ .name = "dvb_vidtv_tuner",
+ .suppress_bind_attrs = true,
+ },
+ .probe = vidtv_tuner_i2c_probe,
+ .remove = vidtv_tuner_i2c_remove,
+ .id_table = vidtv_tuner_i2c_id_table,
+};
+module_i2c_driver(vidtv_tuner_i2c_driver);
+
+MODULE_DESCRIPTION("Virtual DVB Tuner");
+MODULE_AUTHOR("Daniel W. S. Almeida");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/test-drivers/vidtv/vidtv_tuner.h b/drivers/media/test-drivers/vidtv/vidtv_tuner.h
new file mode 100644
index 000000000000..8455b2d564b3
--- /dev/null
+++ b/drivers/media/test-drivers/vidtv/vidtv_tuner.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The Virtual DTV test driver serves as a reference DVB driver and helps
+ * validate the existing APIs in the media subsystem. It can also aid
+ * developers working on userspace applications.
+ *
+ * Copyright (C) 2020 Daniel W. S. Almeida
+ */
+
+#ifndef VIDTV_TUNER_H
+#define VIDTV_TUNER_H
+
+#include <linux/types.h>
+#include <media/dvb_frontend.h>
+
+#define NUM_VALID_TUNER_FREQS 8
+
+/**
+ * struct vidtv_tuner_config - Configuration used to init the tuner.
+ * @fe: A pointer to the dvb_frontend structure allocated by vidtv_demod.
+ * @mock_power_up_delay_msec: Simulate a power-up delay.
+ * @mock_tune_delay_msec: Simulate a tune delay.
+ * @vidtv_valid_dvb_t_freqs: The valid DVB-T frequencies to simulate.
+ * @vidtv_valid_dvb_c_freqs: The valid DVB-C frequencies to simulate.
+ * @vidtv_valid_dvb_s_freqs: The valid DVB-S frequencies to simulate.
+ * @max_frequency_shift_hz: The maximum frequency shift in HZ allowed when
+ * tuning in a channel
+ *
+ * The configuration used to init the tuner module, usually filled
+ * by a bridge driver. For vidtv, this is filled by vidtv_bridge before the
+ * tuner module is probed.
+ */
+struct vidtv_tuner_config {
+ struct dvb_frontend *fe;
+ u32 mock_power_up_delay_msec;
+ u32 mock_tune_delay_msec;
+ u32 vidtv_valid_dvb_t_freqs[NUM_VALID_TUNER_FREQS];
+ u32 vidtv_valid_dvb_c_freqs[NUM_VALID_TUNER_FREQS];
+ u32 vidtv_valid_dvb_s_freqs[NUM_VALID_TUNER_FREQS];
+ u8 max_frequency_shift_hz;
+};
+
+#endif //VIDTV_TUNER_H
diff --git a/drivers/media/test-drivers/vimc/vimc-capture.c b/drivers/media/test-drivers/vimc/vimc-capture.c
index c63496b17b9a..5e9fd902cd37 100644
--- a/drivers/media/test-drivers/vimc/vimc-capture.c
+++ b/drivers/media/test-drivers/vimc/vimc-capture.c
@@ -351,8 +351,7 @@ static void vimc_cap_unregister(struct vimc_ent_device *ved)
struct vimc_cap_device *vcap =
container_of(ved, struct vimc_cap_device, ved);
- vb2_queue_release(&vcap->queue);
- video_unregister_device(&vcap->vdev);
+ vb2_video_unregister_device(&vcap->vdev);
}
static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
@@ -477,13 +476,11 @@ static struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
if (ret) {
dev_err(vimc->mdev.dev, "%s: video register failed (err=%d)\n",
vcap->vdev.name, ret);
- goto err_release_queue;
+ goto err_clean_m_ent;
}
return &vcap->ved;
-err_release_queue:
- vb2_queue_release(q);
err_clean_m_ent:
media_entity_cleanup(&vcap->vdev.entity);
err_free_vcap:
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index f7ee37e9508d..aa8d350fd682 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -832,56 +832,16 @@ static int vivid_create_queue(struct vivid_dev *dev,
return vb2_queue_init(q);
}
-static int vivid_create_instance(struct platform_device *pdev, int inst)
+static int vivid_detect_feature_set(struct vivid_dev *dev, int inst,
+ unsigned node_type,
+ bool *has_tuner,
+ bool *has_modulator,
+ int *ccs_cap,
+ int *ccs_out,
+ unsigned in_type_counter[4],
+ unsigned out_type_counter[4])
{
- static const struct v4l2_dv_timings def_dv_timings =
- V4L2_DV_BT_CEA_1280X720P60;
- unsigned in_type_counter[4] = { 0, 0, 0, 0 };
- unsigned out_type_counter[4] = { 0, 0, 0, 0 };
- int ccs_cap = ccs_cap_mode[inst];
- int ccs_out = ccs_out_mode[inst];
- bool has_tuner;
- bool has_modulator;
- struct vivid_dev *dev;
- struct video_device *vfd;
- unsigned node_type = node_types[inst];
- v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0;
- int ret;
int i;
-#ifdef CONFIG_VIDEO_VIVID_CEC
- unsigned int cec_tx_bus_cnt = 0;
-#endif
-
- /* allocate main vivid state structure */
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->inst = inst;
-
-#ifdef CONFIG_MEDIA_CONTROLLER
- dev->v4l2_dev.mdev = &dev->mdev;
-
- /* Initialize media device */
- strscpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model));
- snprintf(dev->mdev.bus_info, sizeof(dev->mdev.bus_info),
- "platform:%s-%03d", VIVID_MODULE_NAME, inst);
- dev->mdev.dev = &pdev->dev;
- media_device_init(&dev->mdev);
- dev->mdev.ops = &vivid_media_ops;
-#endif
-
- /* register v4l2_device */
- snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
- "%s-%03d", VIVID_MODULE_NAME, inst);
- ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
- if (ret) {
- kfree(dev);
- return ret;
- }
- dev->v4l2_dev.release = vivid_dev_release;
-
- /* start detecting feature set */
/* do we use single- or multi-planar? */
dev->multiplanar = multiplanar[inst] > 1;
@@ -947,14 +907,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
!dev->has_vid_cap && !dev->has_meta_cap) {
v4l2_warn(&dev->v4l2_dev,
"Webcam or HDMI input without video or metadata nodes\n");
- kfree(dev);
return -EINVAL;
}
if ((in_type_counter[TV] || in_type_counter[SVID]) &&
!dev->has_vid_cap && !dev->has_vbi_cap && !dev->has_meta_cap) {
v4l2_warn(&dev->v4l2_dev,
"TV or S-Video input without video, VBI or metadata nodes\n");
- kfree(dev);
return -EINVAL;
}
@@ -976,13 +934,11 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
!dev->has_vid_out && !dev->has_vbi_out && !dev->has_meta_out) {
v4l2_warn(&dev->v4l2_dev,
"S-Video output without video, VBI or metadata nodes\n");
- kfree(dev);
return -EINVAL;
}
if (out_type_counter[HDMI] && !dev->has_vid_out && !dev->has_meta_out) {
v4l2_warn(&dev->v4l2_dev,
"HDMI output without video or metadata nodes\n");
- kfree(dev);
return -EINVAL;
}
@@ -999,25 +955,25 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->has_tv_tuner = in_type_counter[TV];
/* do we have a tuner? */
- has_tuner = ((dev->has_vid_cap || dev->has_vbi_cap) && in_type_counter[TV]) ||
- dev->has_radio_rx || dev->has_sdr_cap;
+ *has_tuner = ((dev->has_vid_cap || dev->has_vbi_cap) && in_type_counter[TV]) ||
+ dev->has_radio_rx || dev->has_sdr_cap;
/* do we have a modulator? */
- has_modulator = dev->has_radio_tx;
+ *has_modulator = dev->has_radio_tx;
if (dev->has_vid_cap)
/* do we have a framebuffer for overlay testing? */
dev->has_fb = node_type & 0x10000;
/* can we do crop/compose/scaling while capturing? */
- if (no_error_inj && ccs_cap == -1)
- ccs_cap = 7;
+ if (no_error_inj && *ccs_cap == -1)
+ *ccs_cap = 7;
/* if ccs_cap == -1, then the user can select it using controls */
- if (ccs_cap != -1) {
- dev->has_crop_cap = ccs_cap & 1;
- dev->has_compose_cap = ccs_cap & 2;
- dev->has_scaler_cap = ccs_cap & 4;
+ if (*ccs_cap != -1) {
+ dev->has_crop_cap = *ccs_cap & 1;
+ dev->has_compose_cap = *ccs_cap & 2;
+ dev->has_scaler_cap = *ccs_cap & 4;
v4l2_info(&dev->v4l2_dev, "Capture Crop: %c Compose: %c Scaler: %c\n",
dev->has_crop_cap ? 'Y' : 'N',
dev->has_compose_cap ? 'Y' : 'N',
@@ -1025,14 +981,14 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
}
/* can we do crop/compose/scaling with video output? */
- if (no_error_inj && ccs_out == -1)
- ccs_out = 7;
+ if (no_error_inj && *ccs_out == -1)
+ *ccs_out = 7;
/* if ccs_out == -1, then the user can select it using controls */
- if (ccs_out != -1) {
- dev->has_crop_out = ccs_out & 1;
- dev->has_compose_out = ccs_out & 2;
- dev->has_scaler_out = ccs_out & 4;
+ if (*ccs_out != -1) {
+ dev->has_crop_out = *ccs_out & 1;
+ dev->has_compose_out = *ccs_out & 2;
+ dev->has_scaler_out = *ccs_out & 4;
v4l2_info(&dev->v4l2_dev, "Output Crop: %c Compose: %c Scaler: %c\n",
dev->has_crop_out ? 'Y' : 'N',
dev->has_compose_out ? 'Y' : 'N',
@@ -1042,8 +998,11 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
/* do we create a touch capture device */
dev->has_touch_cap = node_type & 0x80000;
- /* end detecting feature set */
+ return 0;
+}
+static void vivid_set_capabilities(struct vivid_dev *dev)
+{
if (dev->has_vid_cap) {
/* set up the capabilities of the video capture device */
dev->vid_cap_caps = dev->multiplanar ?
@@ -1122,58 +1081,14 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->touch_cap_caps |= dev->multiplanar ?
V4L2_CAP_VIDEO_CAPTURE_MPLANE : V4L2_CAP_VIDEO_CAPTURE;
}
+}
- ret = -ENOMEM;
- /* initialize the test pattern generator */
- tpg_init(&dev->tpg, 640, 360);
- if (tpg_alloc(&dev->tpg, array_size(MAX_WIDTH, MAX_ZOOM)))
- goto free_dev;
- dev->scaled_line = vzalloc(array_size(MAX_WIDTH, MAX_ZOOM));
- if (!dev->scaled_line)
- goto free_dev;
- dev->blended_line = vzalloc(array_size(MAX_WIDTH, MAX_ZOOM));
- if (!dev->blended_line)
- goto free_dev;
-
- /* load the edid */
- dev->edid = vmalloc(array_size(256, 128));
- if (!dev->edid)
- goto free_dev;
-
- while (v4l2_dv_timings_presets[dev->query_dv_timings_size].bt.width)
- dev->query_dv_timings_size++;
-
- /*
- * Create a char pointer array that points to the names of all the
- * preset timings
- */
- dev->query_dv_timings_qmenu = kmalloc_array(dev->query_dv_timings_size,
- sizeof(char *), GFP_KERNEL);
- /*
- * Create a string array containing the names of all the preset
- * timings. Each name is max 31 chars long (+ terminating 0).
- */
- dev->query_dv_timings_qmenu_strings =
- kmalloc_array(dev->query_dv_timings_size, 32, GFP_KERNEL);
-
- if (!dev->query_dv_timings_qmenu ||
- !dev->query_dv_timings_qmenu_strings)
- goto free_dev;
-
- for (i = 0; i < dev->query_dv_timings_size; i++) {
- const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
- char *p = dev->query_dv_timings_qmenu_strings + i * 32;
- u32 htot, vtot;
-
- dev->query_dv_timings_qmenu[i] = p;
-
- htot = V4L2_DV_BT_FRAME_WIDTH(bt);
- vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
- snprintf(p, 32, "%ux%u%s%u",
- bt->width, bt->height, bt->interlaced ? "i" : "p",
- (u32)bt->pixelclock / (htot * vtot));
- }
-
+static void vivid_disable_unused_ioctls(struct vivid_dev *dev,
+ bool has_tuner,
+ bool has_modulator,
+ unsigned in_type_counter[4],
+ unsigned out_type_counter[4])
+{
/* disable invalid ioctls based on the feature set */
if (!dev->has_audio_inputs) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_AUDIO);
@@ -1260,112 +1175,52 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_S_PARM);
v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_ENUM_FRAMESIZES);
v4l2_disable_ioctl(&dev->touch_cap_dev, VIDIOC_ENUM_FRAMEINTERVALS);
+}
- /* configure internal data */
- dev->fmt_cap = &vivid_formats[0];
- dev->fmt_out = &vivid_formats[0];
- if (!dev->multiplanar)
- vivid_formats[0].data_offset[0] = 0;
- dev->webcam_size_idx = 1;
- dev->webcam_ival_idx = 3;
- tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
- dev->std_out = V4L2_STD_PAL;
- if (dev->input_type[0] == TV || dev->input_type[0] == SVID)
- tvnorms_cap = V4L2_STD_ALL;
- if (dev->output_type[0] == SVID)
- tvnorms_out = V4L2_STD_ALL;
- for (i = 0; i < MAX_INPUTS; i++) {
- dev->dv_timings_cap[i] = def_dv_timings;
- dev->std_cap[i] = V4L2_STD_PAL;
- }
- dev->dv_timings_out = def_dv_timings;
- dev->tv_freq = 2804 /* 175.25 * 16 */;
- dev->tv_audmode = V4L2_TUNER_MODE_STEREO;
- dev->tv_field_cap = V4L2_FIELD_INTERLACED;
- dev->tv_field_out = V4L2_FIELD_INTERLACED;
- dev->radio_rx_freq = 95000 * 16;
- dev->radio_rx_audmode = V4L2_TUNER_MODE_STEREO;
- if (dev->has_radio_tx) {
- dev->radio_tx_freq = 95500 * 16;
- dev->radio_rds_loop = false;
- }
- dev->radio_tx_subchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_RDS;
- dev->sdr_adc_freq = 300000;
- dev->sdr_fm_freq = 50000000;
- dev->sdr_pixelformat = V4L2_SDR_FMT_CU8;
- dev->sdr_buffersize = SDR_CAP_SAMPLES_PER_BUF * 2;
-
- dev->edid_max_blocks = dev->edid_blocks = 2;
- memcpy(dev->edid, vivid_hdmi_edid, sizeof(vivid_hdmi_edid));
- dev->radio_rds_init_time = ktime_get();
-
- /* create all controls */
- ret = vivid_create_controls(dev, ccs_cap == -1, ccs_out == -1, no_error_inj,
- in_type_counter[TV] || in_type_counter[SVID] ||
- out_type_counter[SVID],
- in_type_counter[HDMI] || out_type_counter[HDMI]);
- if (ret)
- goto unreg_dev;
+static int vivid_init_dv_timings(struct vivid_dev *dev)
+{
+ int i;
- /* enable/disable interface specific controls */
- if (dev->num_outputs && dev->output_type[0] != HDMI)
- v4l2_ctrl_activate(dev->ctrl_display_present, false);
- if (dev->num_inputs && dev->input_type[0] != HDMI) {
- v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, false);
- v4l2_ctrl_activate(dev->ctrl_dv_timings, false);
- } else if (dev->num_inputs && dev->input_type[0] == HDMI) {
- v4l2_ctrl_activate(dev->ctrl_std_signal_mode, false);
- v4l2_ctrl_activate(dev->ctrl_standard, false);
- }
+ while (v4l2_dv_timings_presets[dev->query_dv_timings_size].bt.width)
+ dev->query_dv_timings_size++;
/*
- * update the capture and output formats to do a proper initial
- * configuration.
+ * Create a char pointer array that points to the names of all the
+ * preset timings
*/
- vivid_update_format_cap(dev, false);
- vivid_update_format_out(dev);
-
- /* initialize overlay */
- dev->fb_cap.fmt.width = dev->src_rect.width;
- dev->fb_cap.fmt.height = dev->src_rect.height;
- dev->fb_cap.fmt.pixelformat = dev->fmt_cap->fourcc;
- dev->fb_cap.fmt.bytesperline = dev->src_rect.width * tpg_g_twopixelsize(&dev->tpg, 0) / 2;
- dev->fb_cap.fmt.sizeimage = dev->src_rect.height * dev->fb_cap.fmt.bytesperline;
+ dev->query_dv_timings_qmenu = kmalloc_array(dev->query_dv_timings_size,
+ sizeof(char *), GFP_KERNEL);
+ /*
+ * Create a string array containing the names of all the preset
+ * timings. Each name is max 31 chars long (+ terminating 0).
+ */
+ dev->query_dv_timings_qmenu_strings =
+ kmalloc_array(dev->query_dv_timings_size, 32, GFP_KERNEL);
- /* update touch configuration */
- dev->timeperframe_tch_cap.numerator = 1;
- dev->timeperframe_tch_cap.denominator = 10;
- vivid_set_touch(dev, 0);
+ if (!dev->query_dv_timings_qmenu ||
+ !dev->query_dv_timings_qmenu_strings)
+ return -ENOMEM;
- /* initialize locks */
- spin_lock_init(&dev->slock);
- mutex_init(&dev->mutex);
+ for (i = 0; i < dev->query_dv_timings_size; i++) {
+ const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
+ char *p = dev->query_dv_timings_qmenu_strings + i * 32;
+ u32 htot, vtot;
- /* init dma queues */
- INIT_LIST_HEAD(&dev->vid_cap_active);
- INIT_LIST_HEAD(&dev->vid_out_active);
- INIT_LIST_HEAD(&dev->vbi_cap_active);
- INIT_LIST_HEAD(&dev->vbi_out_active);
- INIT_LIST_HEAD(&dev->sdr_cap_active);
- INIT_LIST_HEAD(&dev->meta_cap_active);
- INIT_LIST_HEAD(&dev->meta_out_active);
- INIT_LIST_HEAD(&dev->touch_cap_active);
+ dev->query_dv_timings_qmenu[i] = p;
- INIT_LIST_HEAD(&dev->cec_work_list);
- spin_lock_init(&dev->cec_slock);
- /*
- * Same as create_singlethread_workqueue, but now I can use the
- * string formatting of alloc_ordered_workqueue.
- */
- dev->cec_workqueue =
- alloc_ordered_workqueue("vivid-%03d-cec", WQ_MEM_RECLAIM, inst);
- if (!dev->cec_workqueue) {
- ret = -ENOMEM;
- goto unreg_dev;
+ htot = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ snprintf(p, 32, "%ux%u%s%u",
+ bt->width, bt->height, bt->interlaced ? "i" : "p",
+ (u32)bt->pixelclock / (htot * vtot));
}
- if (allocators[inst] == 1)
- dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ return 0;
+}
+
+static int vivid_create_queues(struct vivid_dev *dev)
+{
+ int ret;
/* start creating the vb2 queues */
if (dev->has_vid_cap) {
@@ -1374,7 +1229,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
V4L2_BUF_TYPE_VIDEO_CAPTURE, 2,
&vivid_vid_cap_qops);
if (ret)
- goto unreg_dev;
+ return ret;
}
if (dev->has_vid_out) {
@@ -1383,7 +1238,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
V4L2_BUF_TYPE_VIDEO_OUTPUT, 2,
&vivid_vid_out_qops);
if (ret)
- goto unreg_dev;
+ return ret;
}
if (dev->has_vbi_cap) {
@@ -1392,7 +1247,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
V4L2_BUF_TYPE_VBI_CAPTURE, 2,
&vivid_vbi_cap_qops);
if (ret)
- goto unreg_dev;
+ return ret;
}
if (dev->has_vbi_out) {
@@ -1401,7 +1256,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
V4L2_BUF_TYPE_VBI_OUTPUT, 2,
&vivid_vbi_out_qops);
if (ret)
- goto unreg_dev;
+ return ret;
}
if (dev->has_sdr_cap) {
@@ -1410,7 +1265,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
V4L2_BUF_TYPE_SDR_CAPTURE, 8,
&vivid_sdr_cap_qops);
if (ret)
- goto unreg_dev;
+ return ret;
}
if (dev->has_meta_cap) {
@@ -1419,7 +1274,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
V4L2_BUF_TYPE_META_CAPTURE, 2,
&vivid_meta_cap_qops);
if (ret)
- goto unreg_dev;
+ return ret;
}
if (dev->has_meta_out) {
@@ -1428,7 +1283,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
V4L2_BUF_TYPE_META_OUTPUT, 1,
&vivid_meta_out_qops);
if (ret)
- goto unreg_dev;
+ return ret;
}
if (dev->has_touch_cap) {
@@ -1437,63 +1292,31 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
V4L2_BUF_TYPE_VIDEO_CAPTURE, 1,
&vivid_touch_cap_qops);
if (ret)
- goto unreg_dev;
+ return ret;
}
if (dev->has_fb) {
/* Create framebuffer for testing capture/output overlay */
ret = vivid_fb_init(dev);
if (ret)
- goto unreg_dev;
+ return ret;
v4l2_info(&dev->v4l2_dev, "Framebuffer device registered as fb%d\n",
dev->fb_info.node);
}
+ return 0;
+}
-#ifdef CONFIG_VIDEO_VIVID_CEC
- if (dev->has_vid_cap && in_type_counter[HDMI]) {
- struct cec_adapter *adap;
-
- adap = vivid_cec_alloc_adap(dev, 0, false);
- ret = PTR_ERR_OR_ZERO(adap);
- if (ret < 0)
- goto unreg_dev;
- dev->cec_rx_adap = adap;
- }
-
- if (dev->has_vid_out) {
- for (i = 0; i < dev->num_outputs; i++) {
- struct cec_adapter *adap;
-
- if (dev->output_type[i] != HDMI)
- continue;
-
- dev->cec_output2bus_map[i] = cec_tx_bus_cnt;
- adap = vivid_cec_alloc_adap(dev, cec_tx_bus_cnt, true);
- ret = PTR_ERR_OR_ZERO(adap);
- if (ret < 0) {
- for (i = 0; i < dev->num_outputs; i++)
- cec_delete_adapter(dev->cec_tx_adap[i]);
- goto unreg_dev;
- }
-
- dev->cec_tx_adap[cec_tx_bus_cnt] = adap;
- cec_tx_bus_cnt++;
- }
- }
-#endif
-
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_cap);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_out);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_cap);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_out);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_cap);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_out);
- v4l2_ctrl_handler_setup(&dev->ctrl_hdl_touch_cap);
+static int vivid_create_devnodes(struct platform_device *pdev,
+ struct vivid_dev *dev, int inst,
+ unsigned int cec_tx_bus_cnt,
+ v4l2_std_id tvnorms_cap,
+ v4l2_std_id tvnorms_out,
+ unsigned in_type_counter[4],
+ unsigned out_type_counter[4])
+{
+ struct video_device *vfd;
+ int ret;
- /* finally start creating the device nodes */
if (dev->has_vid_cap) {
vfd = &dev->vid_cap_dev;
snprintf(vfd->name, sizeof(vfd->name),
@@ -1517,7 +1340,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vid_cap_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_cap_pad);
if (ret)
- goto unreg_dev;
+ return ret;
#endif
#ifdef CONFIG_VIDEO_VIVID_CEC
@@ -1526,7 +1349,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (ret < 0) {
cec_delete_adapter(dev->cec_rx_adap);
dev->cec_rx_adap = NULL;
- goto unreg_dev;
+ return ret;
}
cec_s_phys_addr(dev->cec_rx_adap, 0, false);
v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI input 0\n",
@@ -1536,12 +1359,15 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
ret = video_register_device(vfd, VFL_TYPE_VIDEO, vid_cap_nr[inst]);
if (ret < 0)
- goto unreg_dev;
+ return ret;
v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n",
video_device_node_name(vfd));
}
if (dev->has_vid_out) {
+#ifdef CONFIG_VIDEO_VIVID_CEC
+ int i;
+#endif
vfd = &dev->vid_out_dev;
snprintf(vfd->name, sizeof(vfd->name),
"vivid-%03d-vid-out", inst);
@@ -1565,7 +1391,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vid_out_pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_out_pad);
if (ret)
- goto unreg_dev;
+ return ret;
#endif
#ifdef CONFIG_VIDEO_VIVID_CEC
@@ -1576,7 +1402,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
cec_delete_adapter(dev->cec_tx_adap[i]);
dev->cec_tx_adap[i] = NULL;
}
- goto unreg_dev;
+ return ret;
}
v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI output %d\n",
dev_name(&dev->cec_tx_adap[i]->devnode.dev), i);
@@ -1589,7 +1415,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
ret = video_register_device(vfd, VFL_TYPE_VIDEO, vid_out_nr[inst]);
if (ret < 0)
- goto unreg_dev;
+ return ret;
v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s\n",
video_device_node_name(vfd));
}
@@ -1612,12 +1438,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vbi_cap_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_cap_pad);
if (ret)
- goto unreg_dev;
+ return ret;
#endif
ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_cap_nr[inst]);
if (ret < 0)
- goto unreg_dev;
+ return ret;
v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s, supports %s VBI\n",
video_device_node_name(vfd),
(dev->has_raw_vbi_cap && dev->has_sliced_vbi_cap) ?
@@ -1644,12 +1470,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vbi_out_pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_out_pad);
if (ret)
- goto unreg_dev;
+ return ret;
#endif
ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_out_nr[inst]);
if (ret < 0)
- goto unreg_dev;
+ return ret;
v4l2_info(&dev->v4l2_dev, "V4L2 output device registered as %s, supports %s VBI\n",
video_device_node_name(vfd),
(dev->has_raw_vbi_out && dev->has_sliced_vbi_out) ?
@@ -1674,12 +1500,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->sdr_cap_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vfd->entity, 1, &dev->sdr_cap_pad);
if (ret)
- goto unreg_dev;
+ return ret;
#endif
ret = video_register_device(vfd, VFL_TYPE_SDR, sdr_cap_nr[inst]);
if (ret < 0)
- goto unreg_dev;
+ return ret;
v4l2_info(&dev->v4l2_dev, "V4L2 capture device registered as %s\n",
video_device_node_name(vfd));
}
@@ -1698,7 +1524,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
ret = video_register_device(vfd, VFL_TYPE_RADIO, radio_rx_nr[inst]);
if (ret < 0)
- goto unreg_dev;
+ return ret;
v4l2_info(&dev->v4l2_dev, "V4L2 receiver device registered as %s\n",
video_device_node_name(vfd));
}
@@ -1718,7 +1544,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
ret = video_register_device(vfd, VFL_TYPE_RADIO, radio_tx_nr[inst]);
if (ret < 0)
- goto unreg_dev;
+ return ret;
v4l2_info(&dev->v4l2_dev, "V4L2 transmitter device registered as %s\n",
video_device_node_name(vfd));
}
@@ -1741,12 +1567,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
ret = media_entity_pads_init(&vfd->entity, 1,
&dev->meta_cap_pad);
if (ret)
- goto unreg_dev;
+ return ret;
#endif
ret = video_register_device(vfd, VFL_TYPE_VIDEO,
meta_cap_nr[inst]);
if (ret < 0)
- goto unreg_dev;
+ return ret;
v4l2_info(&dev->v4l2_dev,
"V4L2 metadata capture device registered as %s\n",
video_device_node_name(vfd));
@@ -1771,12 +1597,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
ret = media_entity_pads_init(&vfd->entity, 1,
&dev->meta_out_pad);
if (ret)
- goto unreg_dev;
+ return ret;
#endif
ret = video_register_device(vfd, VFL_TYPE_VIDEO,
meta_out_nr[inst]);
if (ret < 0)
- goto unreg_dev;
+ return ret;
v4l2_info(&dev->v4l2_dev,
"V4L2 metadata output device registered as %s\n",
video_device_node_name(vfd));
@@ -1800,12 +1626,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
ret = media_entity_pads_init(&vfd->entity, 1,
&dev->touch_cap_pad);
if (ret)
- goto unreg_dev;
+ return ret;
#endif
ret = video_register_device(vfd, VFL_TYPE_TOUCH,
touch_cap_nr[inst]);
if (ret < 0)
- goto unreg_dev;
+ return ret;
v4l2_info(&dev->v4l2_dev,
"V4L2 touch capture device registered as %s\n",
video_device_node_name(vfd));
@@ -1817,26 +1643,268 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (ret) {
dev_err(dev->mdev.dev,
"media device register failed (err=%d)\n", ret);
+ return ret;
+ }
+#endif
+ return 0;
+}
+
+static int vivid_create_instance(struct platform_device *pdev, int inst)
+{
+ static const struct v4l2_dv_timings def_dv_timings =
+ V4L2_DV_BT_CEA_1280X720P60;
+ unsigned in_type_counter[4] = { 0, 0, 0, 0 };
+ unsigned out_type_counter[4] = { 0, 0, 0, 0 };
+ int ccs_cap = ccs_cap_mode[inst];
+ int ccs_out = ccs_out_mode[inst];
+ bool has_tuner;
+ bool has_modulator;
+ struct vivid_dev *dev;
+ unsigned node_type = node_types[inst];
+ v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0;
+ unsigned int cec_tx_bus_cnt = 0;
+ int ret;
+ int i;
+
+ /* allocate main vivid state structure */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->inst = inst;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->v4l2_dev.mdev = &dev->mdev;
+
+ /* Initialize media device */
+ strscpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model));
+ snprintf(dev->mdev.bus_info, sizeof(dev->mdev.bus_info),
+ "platform:%s-%03d", VIVID_MODULE_NAME, inst);
+ dev->mdev.dev = &pdev->dev;
+ media_device_init(&dev->mdev);
+ dev->mdev.ops = &vivid_media_ops;
+#endif
+
+ /* register v4l2_device */
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
+ "%s-%03d", VIVID_MODULE_NAME, inst);
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ kfree(dev);
+ return ret;
+ }
+ dev->v4l2_dev.release = vivid_dev_release;
+
+ ret = vivid_detect_feature_set(dev, inst, node_type,
+ &has_tuner, &has_modulator,
+ &ccs_cap, &ccs_out,
+ in_type_counter, out_type_counter);
+ if (ret)
+ goto free_dev;
+
+ vivid_set_capabilities(dev);
+
+ ret = -ENOMEM;
+ /* initialize the test pattern generator */
+ tpg_init(&dev->tpg, 640, 360);
+ if (tpg_alloc(&dev->tpg, array_size(MAX_WIDTH, MAX_ZOOM)))
+ goto free_dev;
+ dev->scaled_line = vzalloc(array_size(MAX_WIDTH, MAX_ZOOM));
+ if (!dev->scaled_line)
+ goto free_dev;
+ dev->blended_line = vzalloc(array_size(MAX_WIDTH, MAX_ZOOM));
+ if (!dev->blended_line)
+ goto free_dev;
+
+ /* load the edid */
+ dev->edid = vmalloc(array_size(256, 128));
+ if (!dev->edid)
+ goto free_dev;
+
+ ret = vivid_init_dv_timings(dev);
+ if (ret < 0)
+ goto free_dev;
+
+ vivid_disable_unused_ioctls(dev, has_tuner, has_modulator,
+ in_type_counter, out_type_counter);
+
+ /* configure internal data */
+ dev->fmt_cap = &vivid_formats[0];
+ dev->fmt_out = &vivid_formats[0];
+ if (!dev->multiplanar)
+ vivid_formats[0].data_offset[0] = 0;
+ dev->webcam_size_idx = 1;
+ dev->webcam_ival_idx = 3;
+ tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
+ dev->std_out = V4L2_STD_PAL;
+ if (dev->input_type[0] == TV || dev->input_type[0] == SVID)
+ tvnorms_cap = V4L2_STD_ALL;
+ if (dev->output_type[0] == SVID)
+ tvnorms_out = V4L2_STD_ALL;
+ for (i = 0; i < MAX_INPUTS; i++) {
+ dev->dv_timings_cap[i] = def_dv_timings;
+ dev->std_cap[i] = V4L2_STD_PAL;
+ }
+ dev->dv_timings_out = def_dv_timings;
+ dev->tv_freq = 2804 /* 175.25 * 16 */;
+ dev->tv_audmode = V4L2_TUNER_MODE_STEREO;
+ dev->tv_field_cap = V4L2_FIELD_INTERLACED;
+ dev->tv_field_out = V4L2_FIELD_INTERLACED;
+ dev->radio_rx_freq = 95000 * 16;
+ dev->radio_rx_audmode = V4L2_TUNER_MODE_STEREO;
+ if (dev->has_radio_tx) {
+ dev->radio_tx_freq = 95500 * 16;
+ dev->radio_rds_loop = false;
+ }
+ dev->radio_tx_subchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_RDS;
+ dev->sdr_adc_freq = 300000;
+ dev->sdr_fm_freq = 50000000;
+ dev->sdr_pixelformat = V4L2_SDR_FMT_CU8;
+ dev->sdr_buffersize = SDR_CAP_SAMPLES_PER_BUF * 2;
+
+ dev->edid_max_blocks = dev->edid_blocks = 2;
+ memcpy(dev->edid, vivid_hdmi_edid, sizeof(vivid_hdmi_edid));
+ dev->radio_rds_init_time = ktime_get();
+
+ /* create all controls */
+ ret = vivid_create_controls(dev, ccs_cap == -1, ccs_out == -1, no_error_inj,
+ in_type_counter[TV] || in_type_counter[SVID] ||
+ out_type_counter[SVID],
+ in_type_counter[HDMI] || out_type_counter[HDMI]);
+ if (ret)
goto unreg_dev;
+
+ /* enable/disable interface specific controls */
+ if (dev->num_outputs && dev->output_type[0] != HDMI)
+ v4l2_ctrl_activate(dev->ctrl_display_present, false);
+ if (dev->num_inputs && dev->input_type[0] != HDMI) {
+ v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, false);
+ v4l2_ctrl_activate(dev->ctrl_dv_timings, false);
+ } else if (dev->num_inputs && dev->input_type[0] == HDMI) {
+ v4l2_ctrl_activate(dev->ctrl_std_signal_mode, false);
+ v4l2_ctrl_activate(dev->ctrl_standard, false);
+ }
+
+ /*
+ * update the capture and output formats to do a proper initial
+ * configuration.
+ */
+ vivid_update_format_cap(dev, false);
+ vivid_update_format_out(dev);
+
+ /* initialize overlay */
+ dev->fb_cap.fmt.width = dev->src_rect.width;
+ dev->fb_cap.fmt.height = dev->src_rect.height;
+ dev->fb_cap.fmt.pixelformat = dev->fmt_cap->fourcc;
+ dev->fb_cap.fmt.bytesperline = dev->src_rect.width * tpg_g_twopixelsize(&dev->tpg, 0) / 2;
+ dev->fb_cap.fmt.sizeimage = dev->src_rect.height * dev->fb_cap.fmt.bytesperline;
+
+ /* update touch configuration */
+ dev->timeperframe_tch_cap.numerator = 1;
+ dev->timeperframe_tch_cap.denominator = 10;
+ vivid_set_touch(dev, 0);
+
+ /* initialize locks */
+ spin_lock_init(&dev->slock);
+ mutex_init(&dev->mutex);
+
+ /* init dma queues */
+ INIT_LIST_HEAD(&dev->vid_cap_active);
+ INIT_LIST_HEAD(&dev->vid_out_active);
+ INIT_LIST_HEAD(&dev->vbi_cap_active);
+ INIT_LIST_HEAD(&dev->vbi_out_active);
+ INIT_LIST_HEAD(&dev->sdr_cap_active);
+ INIT_LIST_HEAD(&dev->meta_cap_active);
+ INIT_LIST_HEAD(&dev->meta_out_active);
+ INIT_LIST_HEAD(&dev->touch_cap_active);
+
+ INIT_LIST_HEAD(&dev->cec_work_list);
+ spin_lock_init(&dev->cec_slock);
+ /*
+ * Same as create_singlethread_workqueue, but now I can use the
+ * string formatting of alloc_ordered_workqueue.
+ */
+ dev->cec_workqueue = alloc_ordered_workqueue("vivid-%03d-cec",
+ WQ_MEM_RECLAIM, inst);
+ if (!dev->cec_workqueue) {
+ ret = -ENOMEM;
+ goto unreg_dev;
+ }
+
+ if (allocators[inst] == 1)
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ ret = vivid_create_queues(dev);
+ if (ret)
+ goto unreg_dev;
+
+#ifdef CONFIG_VIDEO_VIVID_CEC
+ if (dev->has_vid_cap && in_type_counter[HDMI]) {
+ struct cec_adapter *adap;
+
+ adap = vivid_cec_alloc_adap(dev, 0, false);
+ ret = PTR_ERR_OR_ZERO(adap);
+ if (ret < 0)
+ goto unreg_dev;
+ dev->cec_rx_adap = adap;
+ }
+
+ if (dev->has_vid_out) {
+ for (i = 0; i < dev->num_outputs; i++) {
+ struct cec_adapter *adap;
+
+ if (dev->output_type[i] != HDMI)
+ continue;
+
+ dev->cec_output2bus_map[i] = cec_tx_bus_cnt;
+ adap = vivid_cec_alloc_adap(dev, cec_tx_bus_cnt, true);
+ ret = PTR_ERR_OR_ZERO(adap);
+ if (ret < 0) {
+ for (i = 0; i < dev->num_outputs; i++)
+ cec_delete_adapter(dev->cec_tx_adap[i]);
+ goto unreg_dev;
+ }
+
+ dev->cec_tx_adap[cec_tx_bus_cnt] = adap;
+ cec_tx_bus_cnt++;
+ }
}
#endif
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_out);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vbi_out);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_out);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_touch_cap);
+
+ /* finally start creating the device nodes */
+ ret = vivid_create_devnodes(pdev, dev, inst, cec_tx_bus_cnt,
+ tvnorms_cap, tvnorms_out,
+ in_type_counter, out_type_counter);
+ if (ret)
+ goto unreg_dev;
+
/* Now that everything is fine, let's add it to device list */
vivid_devs[inst] = dev;
return 0;
unreg_dev:
- video_unregister_device(&dev->touch_cap_dev);
- video_unregister_device(&dev->meta_out_dev);
- video_unregister_device(&dev->meta_cap_dev);
+ vb2_video_unregister_device(&dev->touch_cap_dev);
+ vb2_video_unregister_device(&dev->meta_out_dev);
+ vb2_video_unregister_device(&dev->meta_cap_dev);
video_unregister_device(&dev->radio_tx_dev);
video_unregister_device(&dev->radio_rx_dev);
- video_unregister_device(&dev->sdr_cap_dev);
- video_unregister_device(&dev->vbi_out_dev);
- video_unregister_device(&dev->vbi_cap_dev);
- video_unregister_device(&dev->vid_out_dev);
- video_unregister_device(&dev->vid_cap_dev);
+ vb2_video_unregister_device(&dev->sdr_cap_dev);
+ vb2_video_unregister_device(&dev->vbi_out_dev);
+ vb2_video_unregister_device(&dev->vbi_cap_dev);
+ vb2_video_unregister_device(&dev->vid_out_dev);
+ vb2_video_unregister_device(&dev->vid_cap_dev);
cec_unregister_adapter(dev->cec_rx_adap);
for (i = 0; i < MAX_OUTPUTS; i++)
cec_unregister_adapter(dev->cec_tx_adap[i]);
@@ -1907,27 +1975,27 @@ static int vivid_remove(struct platform_device *pdev)
if (dev->has_vid_cap) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->vid_cap_dev));
- video_unregister_device(&dev->vid_cap_dev);
+ vb2_video_unregister_device(&dev->vid_cap_dev);
}
if (dev->has_vid_out) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->vid_out_dev));
- video_unregister_device(&dev->vid_out_dev);
+ vb2_video_unregister_device(&dev->vid_out_dev);
}
if (dev->has_vbi_cap) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->vbi_cap_dev));
- video_unregister_device(&dev->vbi_cap_dev);
+ vb2_video_unregister_device(&dev->vbi_cap_dev);
}
if (dev->has_vbi_out) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->vbi_out_dev));
- video_unregister_device(&dev->vbi_out_dev);
+ vb2_video_unregister_device(&dev->vbi_out_dev);
}
if (dev->has_sdr_cap) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->sdr_cap_dev));
- video_unregister_device(&dev->sdr_cap_dev);
+ vb2_video_unregister_device(&dev->sdr_cap_dev);
}
if (dev->has_radio_rx) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
@@ -1948,17 +2016,17 @@ static int vivid_remove(struct platform_device *pdev)
if (dev->has_meta_cap) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->meta_cap_dev));
- video_unregister_device(&dev->meta_cap_dev);
+ vb2_video_unregister_device(&dev->meta_cap_dev);
}
if (dev->has_meta_out) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->meta_out_dev));
- video_unregister_device(&dev->meta_out_dev);
+ vb2_video_unregister_device(&dev->meta_out_dev);
}
if (dev->has_touch_cap) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->touch_cap_dev));
- video_unregister_device(&dev->touch_cap_dev);
+ vb2_video_unregister_device(&dev->touch_cap_dev);
}
cec_unregister_adapter(dev->cec_rx_adap);
for (j = 0; j < MAX_OUTPUTS; j++)
diff --git a/drivers/media/test-drivers/vivid/vivid-meta-out.c b/drivers/media/test-drivers/vivid/vivid-meta-out.c
index ff8a039aba72..95835b52b58f 100644
--- a/drivers/media/test-drivers/vivid/vivid-meta-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-meta-out.c
@@ -164,10 +164,11 @@ void vivid_meta_out_process(struct vivid_dev *dev,
{
struct vivid_meta_out_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
- tpg_s_brightness(&dev->tpg, meta->brightness);
- tpg_s_contrast(&dev->tpg, meta->contrast);
- tpg_s_saturation(&dev->tpg, meta->saturation);
- tpg_s_hue(&dev->tpg, meta->hue);
+ v4l2_ctrl_s_ctrl(dev->brightness, meta->brightness);
+ v4l2_ctrl_s_ctrl(dev->contrast, meta->contrast);
+ v4l2_ctrl_s_ctrl(dev->saturation, meta->saturation);
+ v4l2_ctrl_s_ctrl(dev->hue, meta->hue);
+
dprintk(dev, 2, " %s brightness %u contrast %u saturation %u hue %d\n",
__func__, meta->brightness, meta->contrast,
meta->saturation, meta->hue);
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-gen.c b/drivers/media/test-drivers/vivid/vivid-vbi-gen.c
index acc98445a1fa..a141369a7a63 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-gen.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-gen.c
@@ -298,7 +298,7 @@ void vivid_vbi_gen_sliced(struct vivid_vbi_gen_data *vbi,
switch (frame) {
case 0:
vivid_vbi_gen_set_time_of_day(vbi->time_of_day_packet);
- /* fall through */
+ fallthrough;
case 1 ... 7:
data1->data[0] = vbi->time_of_day_packet[frame * 2];
data1->data[1] = vbi->time_of_day_packet[frame * 2 + 1];
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index e94beef008c8..eadf28ab1e39 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -560,6 +560,7 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
unsigned factor = 1;
unsigned w, h;
unsigned p;
+ bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC);
fmt = vivid_get_format(dev, mp->pixelformat);
if (!fmt) {
@@ -633,13 +634,30 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
(fmt->bit_depth[p] / fmt->vdownsampling[p])) /
(fmt->bit_depth[0] / fmt->vdownsampling[0]);
- mp->colorspace = vivid_colorspace_cap(dev);
- if (fmt->color_enc == TGP_COLOR_ENC_HSV)
- mp->hsv_enc = vivid_hsv_enc_cap(dev);
- else
+ if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace))
+ mp->colorspace = vivid_colorspace_cap(dev);
+
+ if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func))
+ mp->xfer_func = vivid_xfer_func_cap(dev);
+
+ if (fmt->color_enc == TGP_COLOR_ENC_HSV) {
+ if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc))
+ mp->hsv_enc = vivid_hsv_enc_cap(dev);
+ } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) {
+ if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc))
+ mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
+ } else {
mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
- mp->xfer_func = vivid_xfer_func_cap(dev);
- mp->quantization = vivid_quantization_cap(dev);
+ }
+
+ if (fmt->color_enc == TGP_COLOR_ENC_YCBCR ||
+ fmt->color_enc == TGP_COLOR_ENC_RGB) {
+ if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization))
+ mp->quantization = vivid_quantization_cap(dev);
+ } else {
+ mp->quantization = vivid_quantization_cap(dev);
+ }
+
memset(mp->reserved, 0, sizeof(mp->reserved));
return 0;
}
@@ -769,6 +787,14 @@ int vivid_s_fmt_vid_cap(struct file *file, void *priv,
if (vivid_is_sdtv_cap(dev))
dev->tv_field_cap = mp->field;
tpg_update_mv_step(&dev->tpg);
+ dev->tpg.colorspace = mp->colorspace;
+ dev->tpg.xfer_func = mp->xfer_func;
+ if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR)
+ dev->tpg.ycbcr_enc = mp->ycbcr_enc;
+ else
+ dev->tpg.hsv_enc = mp->hsv_enc;
+ dev->tpg.quantization = mp->quantization;
+
return 0;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-common.c b/drivers/media/test-drivers/vivid/vivid-vid-common.c
index 76b0be670ebb..19701fe72030 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-common.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-common.c
@@ -920,6 +920,31 @@ int vivid_enum_fmt_vid(struct file *file, void *priv,
fmt = &vivid_formats[f->index];
f->pixelformat = fmt->fourcc;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return 0;
+ /*
+ * For capture devices, we support the CSC API.
+ * We allow userspace to:
+ * 1. set the colorspace
+ * 2. set the xfer_func
+ * 3. set the ycbcr_enc on YUV formats
+ * 4. set the hsv_enc on HSV formats
+ * 5. set the quantization on YUV and RGB formats
+ */
+ f->flags |= V4L2_FMT_FLAG_CSC_COLORSPACE;
+ f->flags |= V4L2_FMT_FLAG_CSC_XFER_FUNC;
+
+ if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) {
+ f->flags |= V4L2_FMT_FLAG_CSC_YCBCR_ENC;
+ f->flags |= V4L2_FMT_FLAG_CSC_QUANTIZATION;
+ } else if (fmt->color_enc == TGP_COLOR_ENC_HSV) {
+ f->flags |= V4L2_FMT_FLAG_CSC_HSV_ENC;
+ } else if (fmt->color_enc == TGP_COLOR_ENC_RGB) {
+ f->flags |= V4L2_FMT_FLAG_CSC_QUANTIZATION;
+ }
+
return 0;
}
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index b7b5b33b11f4..eaa3bbc903d7 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -250,7 +250,7 @@ static int fc0011_set_params(struct dvb_frontend *fe)
dev_warn(&priv->i2c->dev, "Unsupported bandwidth %u kHz. Using 6000 kHz.\n",
bandwidth);
bandwidth = 6000;
- /* fallthrough */
+ fallthrough;
case 6000:
regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_BW6M;
break;
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index e48faf942830..3853a3d43d4f 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -222,23 +222,24 @@ static int qt1010_init_meas1(struct qt1010_priv *priv,
{ QT1010_WR, reg, reg_init_val },
{ QT1010_WR, 0x1e, 0x00 },
{ QT1010_WR, 0x1e, oper },
- { QT1010_RD, reg, 0xff }
};
for (i = 0; i < ARRAY_SIZE(i2c_data); i++) {
- if (i2c_data[i].oper == QT1010_WR) {
- err = qt1010_writereg(priv, i2c_data[i].reg,
- i2c_data[i].val);
- } else {
- err = qt1010_readreg(priv, i2c_data[i].reg, &val2);
- }
- if (err) return err;
+ err = qt1010_writereg(priv, i2c_data[i].reg,
+ i2c_data[i].val);
+ if (err)
+ return err;
}
+ err = qt1010_readreg(priv, reg, &val2);
+ if (err)
+ return err;
do {
val1 = val2;
err = qt1010_readreg(priv, reg, &val2);
- if (err) return err;
+ if (err)
+ return err;
+
dev_dbg(&priv->i2c->dev, "%s: compare reg:%02x %02x %02x\n",
__func__, reg, val1, val2);
} while (val1 != val2);
@@ -250,7 +251,7 @@ static int qt1010_init_meas1(struct qt1010_priv *priv,
static int qt1010_init_meas2(struct qt1010_priv *priv,
u8 reg_init_val, u8 *retval)
{
- u8 i, val;
+ u8 i, val = 0xff;
int err;
qt1010_i2c_oper_t i2c_data[] = {
{ QT1010_WR, 0x07, reg_init_val },
@@ -261,6 +262,7 @@ static int qt1010_init_meas2(struct qt1010_priv *priv,
{ QT1010_WR, 0x1e, 0x00 },
{ QT1010_WR, 0x22, 0xff }
};
+
for (i = 0; i < ARRAY_SIZE(i2c_data); i++) {
if (i2c_data[i].oper == QT1010_WR) {
err = qt1010_writereg(priv, i2c_data[i].reg,
@@ -268,7 +270,8 @@ static int qt1010_init_meas2(struct qt1010_priv *priv,
} else {
err = qt1010_readreg(priv, i2c_data[i].reg, &val);
}
- if (err) return err;
+ if (err)
+ return err;
}
*retval = val;
return 0;
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index 471aaf71fdef..f0371d004b36 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -948,7 +948,7 @@ static int tda18271_set_params(struct dvb_frontend *fe)
break;
case SYS_DVBC_ANNEX_B:
bw = 6000000;
- /* fall through */
+ fallthrough;
case SYS_DVBC_ANNEX_A:
case SYS_DVBC_ANNEX_C:
if (bw <= 6000000) {
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index b6e70fada3fb..8fb186b25d6a 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -500,7 +500,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
case TUNER_TENA_9533_DI:
case TUNER_YMEC_TVF_5533MF:
tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
- return 0;
+ return -EINVAL;
case TUNER_PHILIPS_FM1216ME_MK3:
case TUNER_PHILIPS_FM1236_MK3:
case TUNER_PHILIPS_FMD1216ME_MK3:
@@ -702,7 +702,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
TUNER_RATIO_SELECT_50; /* 50 kHz step */
/* Bandswitch byte */
- simple_radio_bandswitch(fe, &buffer[0]);
+ if (simple_radio_bandswitch(fe, &buffer[0]))
+ return 0;
/* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps
freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) =
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index 4befa920246c..3d3368202cd0 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -104,11 +104,11 @@ static int au8522_rc_andor(struct au0828_rc *ir, u16 reg, u8 mask, u8 value)
/* Remote Controller time units */
-#define AU8522_UNIT 200000 /* ns */
-#define NEC_START_SPACE (4500000 / AU8522_UNIT)
-#define NEC_START_PULSE (562500 * 16)
+#define AU8522_UNIT 200 /* us */
+#define NEC_START_SPACE (4500 / AU8522_UNIT)
+#define NEC_START_PULSE (563 * 16)
#define RC5_START_SPACE (4 * AU8522_UNIT)
-#define RC5_START_PULSE 888888
+#define RC5_START_PULSE 889
static int au0828_get_key_au8522(struct au0828_rc *ir)
{
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 51b8d14fb4dc..aa5bc6a2ae20 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -938,8 +938,8 @@ int au0828_analog_unregister(struct au0828_dev *dev)
return 0;
mutex_lock(&au0828_sysfs_lock);
- video_unregister_device(&dev->vdev);
- video_unregister_device(&dev->vbi_dev);
+ vb2_video_unregister_device(&dev->vdev);
+ vb2_video_unregister_device(&dev->vbi_dev);
mutex_unlock(&au0828_sysfs_lock);
v4l2_device_disconnect(&dev->v4l2_dev);
@@ -2011,8 +2011,7 @@ int au0828_analog_register(struct au0828_dev *dev,
if (retval != 0) {
dprintk(1, "unable to register video device (error = %d).\n",
retval);
- ret = -ENODEV;
- goto err_reg_vdev;
+ return -ENODEV;
}
/* Register the vbi device */
@@ -2040,10 +2039,7 @@ int au0828_analog_register(struct au0828_dev *dev,
return 0;
err_reg_vbi_dev:
- video_unregister_device(&dev->vdev);
-err_reg_vdev:
- vb2_queue_release(&dev->vb_vidq);
- vb2_queue_release(&dev->vb_vbiq);
+ vb2_video_unregister_device(&dev->vdev);
return ret;
}
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index e3234d169065..e731243267e4 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -419,10 +419,9 @@ static void flexcop_usb_transfer_exit(struct flexcop_usb *fc_usb)
usb_free_urb(fc_usb->iso_urb[i]);
}
- if (fc_usb->iso_buffer != NULL)
- usb_free_coherent(fc_usb->udev,
- fc_usb->buffer_size, fc_usb->iso_buffer,
- fc_usb->dma_addr);
+ usb_free_coherent(fc_usb->udev, fc_usb->buffer_size,
+ fc_usb->iso_buffer, fc_usb->dma_addr);
+
}
static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
@@ -513,6 +512,8 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
+ if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
+ return -ENODEV;
switch (fc_usb->udev->speed) {
case USB_SPEED_LOW:
diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h
index e86faa0e06ca..2f230bf72252 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.h
+++ b/drivers/media/usb/b2c2/flexcop-usb.h
@@ -15,7 +15,7 @@
#define B2C2_USB_CTRL_PIPE_IN usb_rcvctrlpipe(fc_usb->udev, 0)
#define B2C2_USB_CTRL_PIPE_OUT usb_sndctrlpipe(fc_usb->udev, 0)
-#define B2C2_USB_DATA_PIPE usb_rcvisocpipe(fc_usb->udev, 0x81)
+#define B2C2_USB_DATA_PIPE usb_rcvisocpipe(fc_usb->udev, 1)
struct flexcop_usb {
struct usb_device *udev;
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 982cb56e97e9..05d91caaed0c 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -115,11 +115,9 @@ void cx231xx_init_extension(struct cx231xx *dev)
struct cx231xx_ops *ops = NULL;
mutex_lock(&cx231xx_devlist_mutex);
- if (!list_empty(&cx231xx_extension_devlist)) {
- list_for_each_entry(ops, &cx231xx_extension_devlist, next) {
- if (ops->init)
- ops->init(dev);
- }
+ list_for_each_entry(ops, &cx231xx_extension_devlist, next) {
+ if (ops->init)
+ ops->init(dev);
}
mutex_unlock(&cx231xx_devlist_mutex);
}
@@ -129,11 +127,9 @@ void cx231xx_close_extension(struct cx231xx *dev)
struct cx231xx_ops *ops = NULL;
mutex_lock(&cx231xx_devlist_mutex);
- if (!list_empty(&cx231xx_extension_devlist)) {
- list_for_each_entry(ops, &cx231xx_extension_devlist, next) {
- if (ops->fini)
- ops->fini(dev);
- }
+ list_for_each_entry(ops, &cx231xx_extension_devlist, next) {
+ if (ops->fini)
+ ops->fini(dev);
}
mutex_unlock(&cx231xx_devlist_mutex);
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index c427b9031e42..c70b3cef3176 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -43,7 +43,7 @@ static int af9015_ctrl_msg(struct dvb_usb_device *d, struct req_t *req)
case READ_I2C:
write = 0;
state->buf[2] |= 0x01; /* set I2C direction */
- /* fall through */
+ fallthrough;
case WRITE_I2C:
state->buf[0] = READ_WRITE_I2C;
break;
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index b7ca236174f3..0c434259c36f 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -41,7 +41,7 @@ static int gl861_ctrl_msg(struct dvb_usb_device *d, u8 request, u16 value,
switch (request) {
case CMD_WRITE:
memcpy(ctx->buf, data, size);
- /* Fall through */
+ fallthrough;
case CMD_WRITE_SHORT:
pipe = usb_sndctrlpipe(d->udev, 0);
requesttype = USB_TYPE_VENDOR | USB_DIR_OUT;
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 8a3c0eeed959..5a7a9522d46d 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -687,7 +687,7 @@ static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
cold = 0;
break;
}
- /* fall through */
+ fallthrough;
case TUNER_LG:
fw_lme = fw_lg;
ret = request_firmware(&fw, fw_lme, &udev->dev);
@@ -710,7 +710,7 @@ static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
cold = 0;
break;
}
- /* fall through */
+ fallthrough;
case TUNER_LG:
fw_lme = fw_c_lg;
ret = request_firmware(&fw, fw_lme, &udev->dev);
@@ -718,7 +718,7 @@ static const char *lme_firmware_switch(struct dvb_usb_device *d, int cold)
st->dvb_usb_lme2510_firmware = TUNER_LG;
break;
}
- /* fall through */
+ fallthrough;
case TUNER_S0194:
fw_lme = fw_c_s0194;
ret = request_firmware(&fw, fw_lme, &udev->dev);
@@ -1018,7 +1018,7 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
}
break;
}
- /* fall through */
+ fallthrough;
case 0x22f0:
st->i2c_gate = 5;
adap->fe[0] = dvb_attach(m88rs2000_attach,
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
index 0b7dda99e410..ef489c566b75 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
@@ -632,7 +632,7 @@ int mxl111sf_set_gpio(struct mxl111sf_state *state, int gpio, int val)
default:
mxl_printk(KERN_ERR,
"gpio_port_expander undefined, assuming PCA9534");
- /* fall-thru */
+ fallthrough;
case mxl111sf_PCA9534:
return pca9534_set_gpio(state, gpio, val);
case mxl111sf_gpio_hw:
@@ -693,7 +693,7 @@ int mxl111sf_init_port_expander(struct mxl111sf_state *state)
default:
mxl_printk(KERN_ERR,
"gpio_port_expander undefined, assuming PCA9534");
- /* fall-thru */
+ fallthrough;
case mxl111sf_PCA9534:
return pca9534_init_port_expander(state);
case mxl111sf_gpio_hw:
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 2080f6ef4be1..91460e4d0c30 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1781,7 +1781,7 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
/* pass data to Kernel IR decoder */
for (i = 0; i < len; i++) {
ev.pulse = buf[i] >> 7;
- ev.duration = 50800 * (buf[i] & 0x7f);
+ ev.duration = 51 * (buf[i] & 0x7f);
ir_raw_event_store_with_filter(d->rc_dev, &ev);
}
@@ -1809,7 +1809,7 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
rc->query = rtl2832u_rc_query;
rc->interval = 200;
/* we program idle len to 0xc0, set timeout to one less */
- rc->timeout = 0xbf * 50800;
+ rc->timeout = 0xbf * 51;
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
index 25ba03edcb5c..7498110142e4 100644
--- a/drivers/media/usb/dvb-usb/Kconfig
+++ b/drivers/media/usb/dvb-usb/Kconfig
@@ -279,6 +279,7 @@ config DVB_USB_PCTV452E
tristate "Pinnacle PCTV HDTV Pro USB device/TT Connect S2-3600"
depends on DVB_USB
select TTPCI_EEPROM
+ select DVB_ISL6423 if MEDIA_SUBDRV_AUTOSELECT
select DVB_LNBP22 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STB0899 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/usb/dvb-usb/cxusb-analog.c b/drivers/media/usb/dvb-usb/cxusb-analog.c
index 001cae648797..e93183ddd797 100644
--- a/drivers/media/usb/dvb-usb/cxusb-analog.c
+++ b/drivers/media/usb/dvb-usb/cxusb-analog.c
@@ -1615,8 +1615,6 @@ static void cxusb_medion_videodev_release(struct video_device *vdev)
cxusb_vprintk(dvbdev, OPS, "video device release\n");
- vb2_queue_release(vdev->queue);
-
video_device_release(vdev);
}
@@ -1647,8 +1645,7 @@ static int cxusb_medion_register_analog_video(struct dvb_usb_device *dvbdev)
cxdev->videodev = video_device_alloc();
if (!cxdev->videodev) {
dev_err(&dvbdev->udev->dev, "video device alloc failed\n");
- ret = -ENOMEM;
- goto ret_qrelease;
+ return -ENOMEM;
}
cxdev->videodev->device_caps = videocaps;
@@ -1674,10 +1671,6 @@ static int cxusb_medion_register_analog_video(struct dvb_usb_device *dvbdev)
ret_vrelease:
video_device_release(cxdev->videodev);
-
-ret_qrelease:
- vb2_queue_release(&cxdev->videoqueue);
-
return ret;
}
@@ -1820,7 +1813,7 @@ int cxusb_medion_register_analog(struct dvb_usb_device *dvbdev)
return 0;
ret_vunreg:
- video_unregister_device(cxdev->videodev);
+ vb2_video_unregister_device(cxdev->videodev);
ret_unregister:
v4l2_device_put(&cxdev->v4l2dev);
@@ -1836,7 +1829,7 @@ void cxusb_medion_unregister_analog(struct dvb_usb_device *dvbdev)
cxusb_vprintk(dvbdev, OPS, "unregistering analog\n");
video_unregister_device(cxdev->radiodev);
- video_unregister_device(cxdev->videodev);
+ vb2_video_unregister_device(cxdev->videodev);
v4l2_device_put(&cxdev->v4l2dev);
wait_for_completion(&cxdev->v4l2_release);
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 52e648e2713a..d3288c107906 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -1738,14 +1738,9 @@ static int dib809x_tuner_attach(struct dvb_usb_adapter *adap)
struct dib0700_adapter_state *st = adap->priv;
struct i2c_adapter *tun_i2c = st->dib8000_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1);
- if (adap->id == 0) {
- if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
- return -ENODEV;
- } else {
- /* FIXME: check if it is fe_adap[1] */
- if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
- return -ENODEV;
- }
+ /* FIXME: if adap->id != 0, check if it is fe_adap[1] */
+ if (!dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config))
+ return -ENODEV;
st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096_set_param_override;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 441d878fc22c..9b78b40abc6d 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -20,6 +20,7 @@
#include "stb6100.h"
#include "stb6100_cfg.h"
/* FE Power */
+#include "isl6423.h"
#include "lnbp22.h"
#include <media/dvb_ca_en50221.h>
@@ -83,6 +84,13 @@ static struct stb0899_postproc pctv45e_postproc[] = {
{ 0, 0 }
};
+static struct isl6423_config pctv452e_isl6423_config = {
+ .current_max = SEC_CURRENT_515m,
+ .curlim = SEC_CURRENT_LIM_ON,
+ .mod_extern = 1,
+ .addr = 0x08,
+};
+
/*
* stores all private variables for communication with the PCTV452e DVB-S2
*/
@@ -909,15 +917,23 @@ static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
&a->dev->i2c_adap);
if (!a->fe_adap[0].fe)
return -ENODEV;
- if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe,
- &a->dev->i2c_adap)) == NULL)
- err("Cannot attach lnbp22\n");
id = a->dev->desc->warm_ids[0];
- if (USB_VID_TECHNOTREND == id->idVendor
- && USB_PID_TECHNOTREND_CONNECT_S2_3650_CI == id->idProduct)
+ if (id->idVendor == USB_VID_TECHNOTREND &&
+ id->idProduct == USB_PID_TECHNOTREND_CONNECT_S2_3650_CI) {
+ if (dvb_attach(lnbp22_attach,
+ a->fe_adap[0].fe,
+ &a->dev->i2c_adap) == NULL) {
+ err("Cannot attach lnbp22\n");
+ }
/* Error ignored. */
tt3650_ci_init(a);
+ } else if (dvb_attach(isl6423_attach,
+ a->fe_adap[0].fe,
+ &a->dev->i2c_adap,
+ &pctv452e_isl6423_config) == NULL) {
+ err("Cannot attach isl6423\n");
+ }
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index f172120db2aa..a9ed26ce1be6 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -656,14 +656,14 @@ unlock:
for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
if (buf[i] == 0xff) {
ev.pulse = 0;
- ev.duration = 888888*2;
+ ev.duration = 889 * 2;
ir_raw_event_store(d->rc_dev, &ev);
break;
}
ev.pulse = !ev.pulse;
ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
- FIRMWARE_CLOCK_TICK) / 1000;
+ FIRMWARE_CLOCK_TICK) / (1000 * 1000);
ir_raw_event_store(d->rc_dev, &ev);
}
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 6833b5bfe293..dc968fd5ace9 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -362,13 +362,13 @@ static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream,
return -ENODEV;
switch (cmd) {
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
- case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_START:
atomic_set(&dev->adev.stream_started, 1);
break;
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
- case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
atomic_set(&dev->adev.stream_started, 0);
break;
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index a8c321d11827..5144888ae36f 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2519,6 +2519,26 @@ const struct em28xx_board em28xx_boards[] = {
.amux = EM28XX_AMUX_LINE_IN,
} },
},
+ /*
+ * 1f4d:1abe MyGica iGrabber
+ * (same as several other EM2860 devices)
+ * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner
+ */
+ [EM2860_BOARD_MYGICA_IGRABBER] = {
+ .name = "MyGica iGrabber",
+ .vchannels = 2,
+ .tuner_type = TUNER_ABSENT,
+ .decoder = EM28XX_SAA711X,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE,
+ .vmux = SAA7115_COMPOSITE0,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = SAA7115_SVIDEO3,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
};
EXPORT_SYMBOL_GPL(em28xx_boards);
@@ -2698,6 +2718,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2860_BOARD_EASYCAP },
{ USB_DEVICE(0x1b80, 0xe425),
.driver_info = EM2874_BOARD_MAXMEDIA_UB425_TC },
+ { USB_DEVICE(0x1f4d, 0x1abe),
+ .driver_info = EM2860_BOARD_MYGICA_IGRABBER },
{ USB_DEVICE(0x2304, 0x0242),
.driver_info = EM2884_BOARD_PCTV_510E },
{ USB_DEVICE(0x2013, 0x0251),
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index acbb62397314..55a46faaf7b7 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -151,6 +151,7 @@
#define EM2882_BOARD_ZOLID_HYBRID_TV_STICK 102
#define EM2861_BOARD_MAGIX_VIDEOWANDLER2 103
#define EM28178_BOARD_PCTV_461E_V2 104
+#define EM2860_BOARD_MYGICA_IGRABBER 105
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
index 153a0c3e3da6..f1767be9d868 100644
--- a/drivers/media/usb/go7007/go7007-driver.c
+++ b/drivers/media/usb/go7007/go7007-driver.c
@@ -643,7 +643,7 @@ void go7007_parse_video_stream(struct go7007 *go, u8 *buf, int length)
case 0xD8:
if (go->format == V4L2_PIX_FMT_MJPEG)
vb = frame_boundary(go, vb);
- /* fall through */
+ fallthrough;
default:
store_byte(vb, 0xFF);
store_byte(vb, buf[i]);
diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c
index 464aa61cd914..3553788e8542 100644
--- a/drivers/media/usb/gspca/mr97310a.c
+++ b/drivers/media/usb/gspca/mr97310a.c
@@ -510,7 +510,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
switch (gspca_dev->pixfmt.width) {
case 160:
data[9] |= 0x04; /* reg 8, 2:1 scale down from 320 */
- /* fall through */
+ fallthrough;
case 320:
default:
data[3] = 0x28; /* reg 2, H size/8 */
@@ -520,7 +520,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
break;
case 176:
data[9] |= 0x04; /* reg 8, 2:1 scale down from 352 */
- /* fall through */
+ fallthrough;
case 352:
data[3] = 0x2c; /* reg 2, H size/8 */
data[4] = 0x48; /* reg 3, V size/4 */
@@ -607,10 +607,10 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
switch (gspca_dev->pixfmt.width) {
case 160:
data[9] |= 0x0c; /* reg 8, 4:1 scale down */
- /* fall through */
+ fallthrough;
case 320:
data[9] |= 0x04; /* reg 8, 2:1 scale down */
- /* fall through */
+ fallthrough;
case 640:
default:
data[3] = 0x50; /* reg 2, H size/8 */
@@ -627,7 +627,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
case 176:
data[9] |= 0x04; /* reg 8, 2:1 scale down */
- /* fall through */
+ fallthrough;
case 352:
data[3] = 0x2c; /* reg 2, H size */
data[4] = 0x48; /* reg 3, V size */
diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c
index 880f569bda30..0f5f2464ac7a 100644
--- a/drivers/media/usb/gspca/nw80x.c
+++ b/drivers/media/usb/gspca/nw80x.c
@@ -2019,7 +2019,7 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_GAIN, 0, 253, 1, 128);
- /* fall through */
+ fallthrough;
case Cvideopro:
case DvcV6:
case Kritter:
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 0afe70a3f9a2..cd6776c3163b 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -2004,7 +2004,7 @@ static void reg_w(struct sd *sd, u16 index, u16 value)
break;
case BRIDGE_OVFX2:
req = 0x0a;
- /* fall through */
+ fallthrough;
case BRIDGE_W9968CF:
gspca_dbg(gspca_dev, D_USBO, "SET %02x %04x %04x\n",
req, value, index);
@@ -3528,7 +3528,7 @@ static void ov511_mode_init_regs(struct sd *sd)
case SEN_OV76BE:
if (sd->gspca_dev.pixfmt.width == 320)
interlaced = 1;
- /* Fall through */
+ fallthrough;
case SEN_OV6630:
case SEN_OV7610:
case SEN_OV7670:
@@ -3541,7 +3541,7 @@ static void ov511_mode_init_regs(struct sd *sd)
break;
}
/* For 640x480 case */
- /* fall through */
+ fallthrough;
default:
/* case 20: */
/* case 15: */
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index 2a6d0a1265a7..bfd194c61819 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -1637,7 +1637,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
break;
case SENSOR_HV7131R:
sd->i2c_intf = 0x81; /* i2c 400 Kb/s */
- /* fall through */
+ fallthrough;
default:
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index f4a4222f0d2e..ace3da40006e 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -551,7 +551,7 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev)
case BRIDGE_SPCA504:
case BRIDGE_SPCA504C:
pollreg = 0;
- /* fall through */
+ fallthrough;
default:
/* case BRIDGE_SPCA533: */
/* case BRIDGE_SPCA504B: */
@@ -634,7 +634,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
reg_w_riv(gspca_dev, 0x00, 0x2000, 0x00);
reg_w_riv(gspca_dev, 0x00, 0x2301, 0x13);
reg_w_riv(gspca_dev, 0x00, 0x2306, 0x00);
- /* fall through */
+ fallthrough;
case BRIDGE_SPCA533:
spca504B_PollingDataReady(gspca_dev);
spca50x_GetFirmware(gspca_dev);
diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c
index c579b100f066..cc87c24dd24c 100644
--- a/drivers/media/usb/gspca/xirlink_cit.c
+++ b/drivers/media/usb/gspca/xirlink_cit.c
@@ -1409,7 +1409,7 @@ static int cit_restart_stream(struct gspca_dev *gspca_dev)
case CIT_MODEL0:
case CIT_MODEL1:
cit_write_reg(gspca_dev, 0x0001, 0x0114);
- /* Fall through */
+ fallthrough;
case CIT_MODEL2:
case CIT_MODEL4:
cit_write_reg(gspca_dev, 0x00c0, 0x010c); /* Go! */
@@ -2725,7 +2725,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
break;
case CIT_MODEL2:
v4l2_ctrl_grab(sd->lighting, false);
- /* Fall through! */
+ fallthrough;
case CIT_MODEL4:
cit_model2_Packet1(gspca_dev, 0x0030, 0x0004);
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index 15a2449d536f..aa285d5d6c0d 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -6766,7 +6766,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_HV7131R:
case SENSOR_TAS5130C:
reg_r(gspca_dev, 0x0008);
- /* fall through */
+ fallthrough;
case SENSOR_PO2030:
reg_w(gspca_dev, 0x03, 0x0008);
break;
@@ -6815,7 +6815,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_TAS5130C:
reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */
reg_w(gspca_dev, 0x15, 0x01ae);
- /* fall through */
+ fallthrough;
case SENSOR_PAS202B:
case SENSOR_PO2030:
/* reg_w(gspca_dev, 0x40, ZC3XX_R117_GGAIN); in win traces */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 1cfb7cf64131..f4a727918e35 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -864,10 +864,9 @@ static int ctrl_std_sym_to_val(struct pvr2_ctrl *cptr,
const char *bufPtr,unsigned int bufSize,
int *mskp,int *valp)
{
- int ret;
v4l2_std_id id;
- ret = pvr2_std_str_to_id(&id,bufPtr,bufSize);
- if (ret < 0) return ret;
+ if (!pvr2_std_str_to_id(&id, bufPtr, bufSize))
+ return -EINVAL;
if (mskp) *mskp = id;
if (valp) *valp = id;
return 0;
diff --git a/drivers/media/usb/pwc/pwc-v4l.c b/drivers/media/usb/pwc/pwc-v4l.c
index 2f135d533af6..71b719d363a5 100644
--- a/drivers/media/usb/pwc/pwc-v4l.c
+++ b/drivers/media/usb/pwc/pwc-v4l.c
@@ -554,7 +554,7 @@ static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
if (!DEVICE_USE_CODEC3(pdev->type))
break;
/* For CODEC3 where autogain also controls expo */
- /* fall through */
+ fallthrough;
case V4L2_CID_EXPOSURE_AUTO:
if (pdev->exposure_valid && time_before(jiffies,
pdev->last_exposure_update + HZ / 4)) {
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 9ba3a2ae36e5..df4c5dcba39c 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -430,7 +430,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
break;
case SMS_UNKNOWN_TYPE:
pr_err("Unspecified sms device type!\n");
- /* fall-thru */
+ fallthrough;
default:
dev->buffer_size = USB2_BUFFER_SIZE;
dev->response_alignment = align;
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index c26a0ff60a64..3a2df36ef1db 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -272,13 +272,13 @@ static int snd_tm6000_card_trigger(struct snd_pcm_substream *substream, int cmd)
int err = 0;
switch (cmd) {
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
- case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_START:
atomic_set(&core->stream_started, 1);
break;
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
- case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
atomic_set(&core->stream_started, 0);
break;
diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c
index 2c723706f8c8..5c8cbc5d6f72 100644
--- a/drivers/media/usb/tm6000/tm6000-core.c
+++ b/drivers/media/usb/tm6000/tm6000-core.c
@@ -853,11 +853,9 @@ int tm6000_call_fillbuf(struct tm6000_core *dev, enum tm6000_ops_type type,
/* FIXME: tm6000_extension_devlist_lock should be a spinlock */
- if (!list_empty(&tm6000_extension_devlist)) {
- list_for_each_entry(ops, &tm6000_extension_devlist, next) {
- if (ops->fillbuf && ops->type == type)
- ops->fillbuf(dev, buf, size);
- }
+ list_for_each_entry(ops, &tm6000_extension_devlist, next) {
+ if (ops->fillbuf && ops->type == type)
+ ops->fillbuf(dev, buf, size);
}
return 0;
@@ -898,11 +896,9 @@ void tm6000_init_extension(struct tm6000_core *dev)
struct tm6000_ops *ops = NULL;
mutex_lock(&tm6000_devlist_mutex);
- if (!list_empty(&tm6000_extension_devlist)) {
- list_for_each_entry(ops, &tm6000_extension_devlist, next) {
- if (ops->init)
- ops->init(dev);
- }
+ list_for_each_entry(ops, &tm6000_extension_devlist, next) {
+ if (ops->init)
+ ops->init(dev);
}
mutex_unlock(&tm6000_devlist_mutex);
}
@@ -912,11 +908,9 @@ void tm6000_close_extension(struct tm6000_core *dev)
struct tm6000_ops *ops = NULL;
mutex_lock(&tm6000_devlist_mutex);
- if (!list_empty(&tm6000_extension_devlist)) {
- list_for_each_entry(ops, &tm6000_extension_devlist, next) {
- if (ops->fini)
- ops->fini(dev);
- }
+ list_for_each_entry(ops, &tm6000_extension_devlist, next) {
+ if (ops->fini)
+ ops->fini(dev);
}
mutex_unlock(&tm6000_devlist_mutex);
}
diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
index 4e56ff83566b..9e016b71aa91 100644
--- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
@@ -5,6 +5,9 @@
* Copyright (c) 2002 Holger Waechtler <holger@convergence.de>
* Copyright (c) 2003 Felix Domke <tmbinc@elitedvb.net>
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/wait.h>
@@ -59,7 +62,12 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-#define dprintk(x...) do { if (debug) printk(KERN_DEBUG x); } while (0)
+#define dprintk(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
+
#define ISO_BUF_COUNT 4
#define FRAMES_PER_ISO_BUF 4
@@ -72,6 +80,9 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define TTUSB_REV_2_2 0x22
#define TTUSB_BUDGET_NAME "ttusb_stc_fw"
+#define MAX_SEND 0x28
+#define MAX_RCV 0x20
+
/*
* since we're casting (struct ttusb*) <-> (struct dvb_demux*) around
* the dvb_demux field must be the first in struct!!
@@ -119,87 +130,70 @@ struct ttusb {
int cc; /* MuxCounter - will increment on EVERY MUX PACKET */
/* (including stuffing. yes. really.) */
- u8 last_result[32];
+ u8 send_buf[MAX_SEND];
+ u8 last_result[MAX_RCV];
int revision;
struct dvb_frontend* fe;
};
-/* ugly workaround ... don't know why it's necessary to read */
-/* all result codes. */
-
-static int ttusb_cmd(struct ttusb *ttusb,
- const u8 * data, int len, int needresult)
+static int ttusb_cmd(struct ttusb *ttusb, u8 *data, int len, int len_result)
{
int actual_len;
int err;
- int i;
-
- if (debug >= 3) {
- printk(KERN_DEBUG ">");
- for (i = 0; i < len; ++i)
- printk(KERN_CONT " %02x", data[i]);
- printk(KERN_CONT "\n");
- }
if (mutex_lock_interruptible(&ttusb->semusb) < 0)
return -EAGAIN;
+ if (debug >= 3)
+ dprintk("> %*ph\n", len, data);
+
+ memcpy(data, ttusb->send_buf, len);
+
err = usb_bulk_msg(ttusb->dev, ttusb->bulk_out_pipe,
- (u8 *) data, len, &actual_len, 1000);
+ ttusb->send_buf, len, &actual_len, 1000);
if (err != 0) {
- dprintk("%s: usb_bulk_msg(send) failed, err == %i!\n",
- __func__, err);
- mutex_unlock(&ttusb->semusb);
- return err;
+ dprintk("usb_bulk_msg(send) failed, err == %i!\n", err);
+ goto err;
}
if (actual_len != len) {
- dprintk("%s: only wrote %d of %d bytes\n", __func__,
+ err = -EIO;
+ dprintk("only wrote %d of %d bytes\n",
actual_len, len);
- mutex_unlock(&ttusb->semusb);
- return -1;
+ goto err;
}
err = usb_bulk_msg(ttusb->dev, ttusb->bulk_in_pipe,
- ttusb->last_result, 32, &actual_len, 1000);
+ ttusb->last_result, MAX_RCV, &actual_len, 1000);
if (err != 0) {
- printk("%s: failed, receive error %d\n", __func__,
- err);
- mutex_unlock(&ttusb->semusb);
- return err;
+ pr_err("cmd xter failed, receive error %d\n", err);
+ goto err;
}
if (debug >= 3) {
actual_len = ttusb->last_result[3] + 4;
- printk(KERN_DEBUG "<");
- for (i = 0; i < actual_len; ++i)
- printk(KERN_CONT " %02x", ttusb->last_result[i]);
- printk(KERN_CONT "\n");
+ dprintk("< %*ph\n", actual_len, ttusb->last_result);
}
- if (!needresult)
- mutex_unlock(&ttusb->semusb);
- return 0;
-}
+ if (len_result)
+ memcpy(ttusb->send_buf, ttusb->last_result, len_result);
-static int ttusb_result(struct ttusb *ttusb, u8 * data, int len)
-{
- memcpy(data, ttusb->last_result, len);
+err:
mutex_unlock(&ttusb->semusb);
- return 0;
+ return err;
}
static int ttusb_i2c_msg(struct ttusb *ttusb,
u8 addr, u8 * snd_buf, u8 snd_len, u8 * rcv_buf,
u8 rcv_len)
{
- u8 b[0x28];
+ u8 b[MAX_SEND];
u8 id = ++ttusb->c;
int i, err;
- if (snd_len > 0x28 - 7 || rcv_len > 0x20 - 7)
+ if (snd_len > MAX_SEND - 7 || rcv_len > MAX_RCV - 7)
return -EINVAL;
b[0] = 0xaa;
@@ -213,22 +207,19 @@ static int ttusb_i2c_msg(struct ttusb *ttusb,
for (i = 0; i < snd_len; i++)
b[7 + i] = snd_buf[i];
- err = ttusb_cmd(ttusb, b, snd_len + 7, 1);
+ err = ttusb_cmd(ttusb, b, snd_len + 7, MAX_RCV);
if (err)
return -EREMOTEIO;
- err = ttusb_result(ttusb, b, 0x20);
-
/* check if the i2c transaction was successful */
if ((snd_len != b[5]) || (rcv_len != b[6])) return -EREMOTEIO;
if (rcv_len > 0) {
if (err || b[0] != 0x55 || b[1] != id) {
- dprintk
- ("%s: usb_bulk_msg(recv) failed, err == %i, id == %02x, b == ",
- __func__, err, id);
+ dprintk("usb_bulk_msg(recv) failed, err == %i, id == %02x, b == ",
+ err, id);
return -EREMOTEIO;
}
@@ -272,7 +263,7 @@ static int master_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, int num
snd_buf, snd_len, rcv_buf, rcv_len);
if (err < rcv_len) {
- dprintk("%s: i == %i\n", __func__, i);
+ dprintk("i == %i\n", i);
break;
}
@@ -292,7 +283,7 @@ static int ttusb_boot_dsp(struct ttusb *ttusb)
err = request_firmware(&fw, "ttusb-budget/dspbootcode.bin",
&ttusb->dev->dev);
if (err) {
- printk(KERN_ERR "ttusb-budget: failed to request firmware\n");
+ pr_err("failed to request firmware\n");
return err;
}
@@ -332,8 +323,7 @@ static int ttusb_boot_dsp(struct ttusb *ttusb)
done:
release_firmware(fw);
if (err) {
- dprintk("%s: usb_bulk_msg() failed, return value %i!\n",
- __func__, err);
+ dprintk("usb_bulk_msg() failed, return value %i!\n", err);
}
return err;
@@ -400,8 +390,6 @@ static int ttusb_init_controller(struct ttusb *ttusb)
/* i2c write read: 5 bytes, addr 0x10, 0x02 bytes write, 1 bytes read. */
u8 b3[] =
{ 0xaa, ++ttusb->c, 0x31, 5, 0x10, 0x02, 0x01, 0x00, 0x1e };
- u8 b4[] =
- { 0x55, ttusb->c, 0x31, 4, 0x10, 0x02, 0x01, 0x00, 0x1e };
u8 get_version[] = { 0xaa, ++ttusb->c, 0x17, 5, 0, 0, 0, 0, 0 };
u8 get_dsp_version[0x20] =
@@ -422,44 +410,35 @@ static int ttusb_init_controller(struct ttusb *ttusb)
if ((err = ttusb_cmd(ttusb, b2, sizeof(b2), 0)))
return err;
- if ((err = ttusb_cmd(ttusb, b3, sizeof(b3), 1)))
- return err;
-
- err = ttusb_result(ttusb, b4, sizeof(b4));
-
- if ((err = ttusb_cmd(ttusb, get_version, sizeof(get_version), 1)))
+ if ((err = ttusb_cmd(ttusb, b3, sizeof(b3), 0)))
return err;
- if ((err = ttusb_result(ttusb, get_version, sizeof(get_version))))
+ if ((err = ttusb_cmd(ttusb, get_version,
+ sizeof(get_version), sizeof(get_version))))
return err;
- dprintk("%s: stc-version: %c%c%c%c%c\n", __func__,
- get_version[4], get_version[5], get_version[6],
- get_version[7], get_version[8]);
+ dprintk("stc-version: %c%c%c%c%c\n", get_version[4], get_version[5],
+ get_version[6], get_version[7], get_version[8]);
if (memcmp(get_version + 4, "V 0.0", 5) &&
memcmp(get_version + 4, "V 1.1", 5) &&
memcmp(get_version + 4, "V 2.1", 5) &&
memcmp(get_version + 4, "V 2.2", 5)) {
- printk
- ("%s: unknown STC version %c%c%c%c%c, please report!\n",
- __func__, get_version[4], get_version[5],
- get_version[6], get_version[7], get_version[8]);
+ pr_err("unknown STC version %c%c%c%c%c, please report!\n",
+ get_version[4], get_version[5],
+ get_version[6], get_version[7], get_version[8]);
}
ttusb->revision = ((get_version[6] - '0') << 4) |
(get_version[8] - '0');
err =
- ttusb_cmd(ttusb, get_dsp_version, sizeof(get_dsp_version), 1);
+ ttusb_cmd(ttusb, get_dsp_version,
+ sizeof(get_dsp_version), sizeof(get_dsp_version));
if (err)
return err;
- err =
- ttusb_result(ttusb, get_dsp_version, sizeof(get_dsp_version));
- if (err)
- return err;
- printk("%s: dsp-version: %c%c%c\n", __func__,
+ pr_info("dsp-version: %c%c%c\n",
get_dsp_version[4], get_dsp_version[5], get_dsp_version[6]);
return 0;
}
@@ -481,8 +460,7 @@ static int ttusb_send_diseqc(struct dvb_frontend* fe,
/* Diseqc */
if ((err = ttusb_cmd(ttusb, b, 4 + b[3], 0))) {
- dprintk("%s: usb_bulk_msg() failed, return value %i!\n",
- __func__, err);
+ dprintk("usb_bulk_msg() failed, return value %i!\n", err);
}
return err;
@@ -499,8 +477,7 @@ static int ttusb_update_lnb(struct ttusb *ttusb)
/* SetLNB */
if ((err = ttusb_cmd(ttusb, b, sizeof(b), 0))) {
- dprintk("%s: usb_bulk_msg() failed, return value %i!\n",
- __func__, err);
+ dprintk("usb_bulk_msg() failed, return value %i!\n", err);
}
return err;
@@ -534,8 +511,7 @@ static void ttusb_set_led_freq(struct ttusb *ttusb, u8 freq)
err = ttusb_cmd(ttusb, b, sizeof(b), 0);
if (err) {
- dprintk("%s: usb_bulk_msg() failed, return value %i!\n",
- __func__, err);
+ dprintk("usb_bulk_msg() failed, return value %i!\n", err);
}
}
#endif
@@ -559,7 +535,7 @@ static void ttusb_process_muxpack(struct ttusb *ttusb, const u8 * muxpack,
int i;
if (len < 4 || len & 0x1) {
- pr_warn("%s: muxpack has invalid len %d\n", __func__, len);
+ pr_warn("muxpack has invalid len %d\n", len);
numinvalid++;
return;
}
@@ -567,8 +543,7 @@ static void ttusb_process_muxpack(struct ttusb *ttusb, const u8 * muxpack,
for (i = 0; i < len; i += 2)
csum ^= le16_to_cpup((__le16 *) (muxpack + i));
if (csum) {
- printk("%s: muxpack with incorrect checksum, ignoring\n",
- __func__);
+ pr_warn("muxpack with incorrect checksum, ignoring\n");
numinvalid++;
return;
}
@@ -576,8 +551,8 @@ static void ttusb_process_muxpack(struct ttusb *ttusb, const u8 * muxpack,
cc = (muxpack[len - 4] << 8) | muxpack[len - 3];
cc &= 0x7FFF;
if ((cc != ttusb->cc) && (ttusb->cc != -1))
- printk("%s: cc discontinuity (%d frames missing)\n",
- __func__, (cc - ttusb->cc) & 0x7FFF);
+ pr_warn("cc discontinuity (%d frames missing)\n",
+ (cc - ttusb->cc) & 0x7FFF);
ttusb->cc = (cc + 1) & 0x7FFF;
if (muxpack[0] & 0x80) {
#ifdef TTUSB_HWSECTIONS
@@ -598,7 +573,7 @@ static void ttusb_process_muxpack(struct ttusb *ttusb, const u8 * muxpack,
!!(ttusb->muxpack[1] & 1))
data++;
#warning TODO: pusi
- printk("cc: %04x\n", (data[0] << 8) | data[1]);
+ dprintk("cc: %04x\n", (data[0] << 8) | data[1]);
#endif
numsec++;
} else if (muxpack[0] == 0x47) {
@@ -617,7 +592,7 @@ static void ttusb_process_muxpack(struct ttusb *ttusb, const u8 * muxpack,
dvb_dmx_swfilter_packets(&ttusb->dvb_demux, muxpack, 1);
} else if (muxpack[0] != 0) {
numinvalid++;
- printk("illegal muxpack type %02x\n", muxpack[0]);
+ pr_err("illegal muxpack type %02x\n", muxpack[0]);
} else
numstuff++;
}
@@ -627,7 +602,7 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
int maxwork = 1024;
while (len) {
if (!(maxwork--)) {
- printk("%s: too much work\n", __func__);
+ pr_err("too much work\n");
break;
}
@@ -641,10 +616,7 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
else {
ttusb->mux_state = 0;
if (ttusb->insync) {
- dprintk("%s: %02x\n",
- __func__, data[-1]);
- printk(KERN_INFO "%s: lost sync.\n",
- __func__);
+ pr_info("lost sync.\n");
ttusb->insync = 0;
}
}
@@ -700,10 +672,8 @@ static void ttusb_process_frame(struct ttusb *ttusb, u8 * data, int len)
ttusb->muxpack[1] + 2 +
4;
else {
- dprintk
- ("%s: invalid state: first byte is %x\n",
- __func__,
- ttusb->muxpack[0]);
+ dprintk("invalid state: first byte is %x\n",
+ ttusb->muxpack[0]);
ttusb->mux_state = 0;
}
}
@@ -752,12 +722,6 @@ static void ttusb_iso_irq(struct urb *urb)
if (!ttusb->iso_streaming)
return;
-#if 0
- printk("%s: status %d, errcount == %d, length == %i\n",
- __func__,
- urb->status, urb->error_count, urb->actual_length);
-#endif
-
if (!urb->status) {
for (i = 0; i < urb->number_of_packets; ++i) {
numpkt++;
@@ -830,7 +794,7 @@ static int ttusb_start_iso_xfer(struct ttusb *ttusb)
int i, j, err, buffer_offset = 0;
if (ttusb->iso_streaming) {
- printk("%s: iso xfer already running!\n", __func__);
+ pr_err("iso xfer already running!\n");
return 0;
}
@@ -864,9 +828,8 @@ static int ttusb_start_iso_xfer(struct ttusb *ttusb)
for (i = 0; i < ISO_BUF_COUNT; i++) {
if ((err = usb_submit_urb(ttusb->iso_urb[i], GFP_ATOMIC))) {
ttusb_stop_iso_xfer(ttusb);
- printk
- ("%s: failed urb submission (%i: err = %i)!\n",
- __func__, i, err);
+ pr_err("failed urb submission (%i: err = %i)!\n",
+ i, err);
return err;
}
}
@@ -1426,7 +1389,7 @@ static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&ttusb->i2c_adap, &tuner_msg, 1) != 1) {
- printk("dvb-ttusb-budget: dvbc_philips_tdm1316l_pll_set Error 1\n");
+ pr_err("dvbc_philips_tdm1316l_pll_set Error 1\n");
return -EIO;
}
@@ -1435,7 +1398,7 @@ static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&ttusb->i2c_adap, &tuner_msg, 1) != 1) {
- printk("dvb-ttusb-budget: dvbc_philips_tdm1316l_pll_set Error 2\n");
+ pr_err("dvbc_philips_tdm1316l_pll_set Error 2\n");
return -EIO;
}
@@ -1612,12 +1575,12 @@ static void frontend_init(struct ttusb* ttusb)
}
if (ttusb->fe == NULL) {
- printk("dvb-ttusb-budget: A frontend driver was not found for device [%04x:%04x]\n",
+ pr_err("no frontend driver found for device [%04x:%04x]\n",
le16_to_cpu(ttusb->dev->descriptor.idVendor),
le16_to_cpu(ttusb->dev->descriptor.idProduct));
} else {
if (dvb_register_frontend(&ttusb->adapter, ttusb->fe)) {
- printk("dvb-ttusb-budget: Frontend registration failed!\n");
+ pr_err("Frontend registration failed!\n");
dvb_frontend_detach(ttusb->fe);
ttusb->fe = NULL;
}
@@ -1637,7 +1600,7 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
struct ttusb *ttusb;
int result;
- dprintk("%s: TTUSB DVB connected\n", __func__);
+ dprintk("TTUSB DVB connected\n");
udev = interface_to_usbdev(intf);
@@ -1659,14 +1622,14 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
result = ttusb_alloc_iso_urbs(ttusb);
if (result < 0) {
- dprintk("%s: ttusb_alloc_iso_urbs - failed\n", __func__);
+ dprintk("ttusb_alloc_iso_urbs - failed\n");
mutex_unlock(&ttusb->semi2c);
kfree(ttusb);
return result;
}
if (ttusb_init_controller(ttusb))
- printk("ttusb_init_controller: error\n");
+ pr_err("ttusb_init_controller: error\n");
mutex_unlock(&ttusb->semi2c);
@@ -1711,7 +1674,7 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
result = dvb_dmx_init(&ttusb->dvb_demux);
if (result < 0) {
- printk("ttusb_dvb: dvb_dmx_init failed (errno = %d)\n", result);
+ pr_err("dvb_dmx_init failed (errno = %d)\n", result);
result = -ENODEV;
goto err_i2c_del_adapter;
}
@@ -1722,14 +1685,14 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
result = dvb_dmxdev_init(&ttusb->dmxdev, &ttusb->adapter);
if (result < 0) {
- printk("ttusb_dvb: dvb_dmxdev_init failed (errno = %d)\n",
+ pr_err("dvb_dmxdev_init failed (errno = %d)\n",
result);
result = -ENODEV;
goto err_release_dmx;
}
if (dvb_net_init(&ttusb->adapter, &ttusb->dvbnet, &ttusb->dvb_demux.dmx)) {
- printk("ttusb_dvb: dvb_net_init failed!\n");
+ pr_err("dvb_net_init failed!\n");
result = -ENODEV;
goto err_release_dmxdev;
}
@@ -1778,7 +1741,7 @@ static void ttusb_disconnect(struct usb_interface *intf)
kfree(ttusb);
- dprintk("%s: TTUSB DVB disconnected\n", __func__);
+ dprintk("TTUSB DVB disconnected\n");
}
static const struct usb_device_id ttusb_table[] = {
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index b8d39b2f777f..df6c5e4a0f05 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -769,9 +769,9 @@ static void ttusb_dec_process_urb_frame(struct ttusb_dec *dec, u8 *b,
}
}
-static void ttusb_dec_process_urb_frame_list(unsigned long data)
+static void ttusb_dec_process_urb_frame_list(struct tasklet_struct *t)
{
- struct ttusb_dec *dec = (struct ttusb_dec *)data;
+ struct ttusb_dec *dec = from_tasklet(dec, t, urb_tasklet);
struct list_head *item;
struct urb_frame *frame;
unsigned long flags;
@@ -1209,8 +1209,7 @@ static void ttusb_dec_init_tasklet(struct ttusb_dec *dec)
{
spin_lock_init(&dec->urb_frame_list_lock);
INIT_LIST_HEAD(&dec->urb_frame_list);
- tasklet_init(&dec->urb_tasklet, ttusb_dec_process_urb_frame_list,
- (unsigned long)dec);
+ tasklet_setup(&dec->urb_tasklet, ttusb_dec_process_urb_frame_list);
}
static int ttusb_init_rc( struct ttusb_dec *dec)
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index ee9c656d121f..2308c0b4f5e7 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -113,7 +113,8 @@ static int usbtv_probe(struct usb_interface *intf,
usbtv_audio_fail:
/* we must not free at this point */
- usb_get_dev(usbtv->udev);
+ v4l2_device_get(&usbtv->v4l2_dev);
+ /* this will undo the v4l2_device_get() */
usbtv_video_free(usbtv);
usbtv_video_fail:
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index c89efcd46163..3b4a2e769230 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -872,7 +872,6 @@ static void usbtv_release(struct v4l2_device *v4l2_dev)
v4l2_device_unregister(&usbtv->v4l2_dev);
v4l2_ctrl_handler_free(&usbtv->ctrl);
- vb2_queue_release(&usbtv->vb2q);
kfree(usbtv);
}
@@ -954,7 +953,6 @@ vdev_fail:
v4l2_fail:
ctrl_fail:
v4l2_ctrl_handler_free(&usbtv->ctrl);
- vb2_queue_release(&usbtv->vb2q);
return ret;
}
@@ -965,7 +963,7 @@ void usbtv_video_free(struct usbtv *usbtv)
mutex_lock(&usbtv->v4l2_lock);
usbtv_stop(usbtv);
- video_unregister_device(&usbtv->vdev);
+ vb2_video_unregister_device(&usbtv->vdev);
v4l2_device_disconnect(&usbtv->v4l2_dev);
mutex_unlock(&usbtv->v4l2_lock);
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index e399b9fad757..f479d8971dfb 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -773,12 +773,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
offset &= 7;
mask = ((1LL << bits) - 1) << offset;
- for (; bits > 0; data++) {
+ while (1) {
u8 byte = *data & mask;
value |= offset > 0 ? (byte >> offset) : (byte << (-offset));
bits -= 8 - (offset > 0 ? offset : 0);
+ if (bits <= 0)
+ break;
+
offset -= 8;
mask = (1 << bits) - 1;
+ data++;
}
/* Sign-extend the value if needed. */
@@ -1844,30 +1848,35 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
{
struct uvc_entity *entity;
struct uvc_control *ctrl;
- unsigned int i, found = 0;
+ unsigned int i;
+ bool found;
u32 reqflags;
u16 size;
u8 *data = NULL;
int ret;
/* Find the extension unit. */
+ found = false;
list_for_each_entry(entity, &chain->entities, chain) {
if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT &&
- entity->id == xqry->unit)
+ entity->id == xqry->unit) {
+ found = true;
break;
+ }
}
- if (entity->id != xqry->unit) {
+ if (!found) {
uvc_trace(UVC_TRACE_CONTROL, "Extension unit %u not found.\n",
xqry->unit);
return -ENOENT;
}
/* Find the control and perform delayed initialization if needed. */
+ found = false;
for (i = 0; i < entity->ncontrols; ++i) {
ctrl = &entity->controls[i];
if (ctrl->index == xqry->selector - 1) {
- found = 1;
+ found = true;
break;
}
}
@@ -2011,25 +2020,14 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
const struct uvc_control_info *info)
{
- int ret = 0;
-
ctrl->info = *info;
INIT_LIST_HEAD(&ctrl->info.mappings);
/* Allocate an array to save control values (cur, def, max, etc.) */
ctrl->uvc_data = kzalloc(ctrl->info.size * UVC_CTRL_DATA_LAST + 1,
GFP_KERNEL);
- if (ctrl->uvc_data == NULL) {
- ret = -ENOMEM;
- goto done;
- }
-
- /*
- * Retrieve control flags from the device. Ignore errors and work with
- * default flag values from the uvc_ctrl array when the device doesn't
- * properly implement GET_INFO on standard controls.
- */
- uvc_ctrl_get_flags(dev, ctrl, &ctrl->info);
+ if (!ctrl->uvc_data)
+ return -ENOMEM;
ctrl->initialized = 1;
@@ -2037,10 +2035,7 @@ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
"entity %u\n", ctrl->info.entity, ctrl->info.selector,
dev->udev->devpath, ctrl->entity->id);
-done:
- if (ret < 0)
- kfree(ctrl->uvc_data);
- return ret;
+ return 0;
}
/*
@@ -2253,6 +2248,13 @@ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
if (uvc_entity_match_guid(ctrl->entity, info->entity) &&
ctrl->index == info->index) {
uvc_ctrl_add_info(dev, ctrl, info);
+ /*
+ * Retrieve control flags from the device. Ignore errors
+ * and work with default flag values from the uvc_ctrl
+ * array when the device doesn't properly implement
+ * GET_INFO on standard controls.
+ */
+ uvc_ctrl_get_flags(dev, ctrl, &ctrl->info);
break;
}
}
diff --git a/drivers/media/usb/uvc/uvc_debugfs.c b/drivers/media/usb/uvc/uvc_debugfs.c
index 2b8af4b54117..1a1258d4ffca 100644
--- a/drivers/media/usb/uvc/uvc_debugfs.c
+++ b/drivers/media/usb/uvc/uvc_debugfs.c
@@ -73,7 +73,6 @@ static struct dentry *uvc_debugfs_root_dir;
void uvc_debugfs_init_stream(struct uvc_streaming *stream)
{
struct usb_device *udev = stream->dev->udev;
- struct dentry *dent;
char dir_name[33];
if (uvc_debugfs_root_dir == NULL)
@@ -82,22 +81,11 @@ void uvc_debugfs_init_stream(struct uvc_streaming *stream)
snprintf(dir_name, sizeof(dir_name), "%u-%u-%u", udev->bus->busnum,
udev->devnum, stream->intfnum);
- dent = debugfs_create_dir(dir_name, uvc_debugfs_root_dir);
- if (IS_ERR_OR_NULL(dent)) {
- uvc_printk(KERN_INFO, "Unable to create debugfs %s "
- "directory.\n", dir_name);
- return;
- }
-
- stream->debugfs_dir = dent;
+ stream->debugfs_dir = debugfs_create_dir(dir_name,
+ uvc_debugfs_root_dir);
- dent = debugfs_create_file("stats", 0444, stream->debugfs_dir,
- stream, &uvc_debugfs_stats_fops);
- if (IS_ERR_OR_NULL(dent)) {
- uvc_printk(KERN_INFO, "Unable to create debugfs stats file.\n");
- uvc_debugfs_cleanup_stream(stream);
- return;
- }
+ debugfs_create_file("stats", 0444, stream->debugfs_dir, stream,
+ &uvc_debugfs_stats_fops);
}
void uvc_debugfs_cleanup_stream(struct uvc_streaming *stream)
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 431d86e1c94b..ddb9eaa11be7 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -214,6 +214,11 @@ static struct uvc_format_desc uvc_fmts[] = {
.guid = UVC_GUID_FORMAT_CNF4,
.fcc = V4L2_PIX_FMT_CNF4,
},
+ {
+ .name = "HEVC",
+ .guid = UVC_GUID_FORMAT_HEVC,
+ .fcc = V4L2_PIX_FMT_HEVC,
+ },
};
/* ------------------------------------------------------------------------
@@ -248,10 +253,10 @@ static struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16])
return NULL;
}
-static u32 uvc_colorspace(const u8 primaries)
+static enum v4l2_colorspace uvc_colorspace(const u8 primaries)
{
- static const u8 colorprimaries[] = {
- 0,
+ static const enum v4l2_colorspace colorprimaries[] = {
+ V4L2_COLORSPACE_DEFAULT, /* Unspecified */
V4L2_COLORSPACE_SRGB,
V4L2_COLORSPACE_470_SYSTEM_M,
V4L2_COLORSPACE_470_SYSTEM_BG,
@@ -262,7 +267,61 @@ static u32 uvc_colorspace(const u8 primaries)
if (primaries < ARRAY_SIZE(colorprimaries))
return colorprimaries[primaries];
- return 0;
+ return V4L2_COLORSPACE_DEFAULT; /* Reserved */
+}
+
+static enum v4l2_xfer_func uvc_xfer_func(const u8 transfer_characteristics)
+{
+ /*
+ * V4L2 does not currently have definitions for all possible values of
+ * UVC transfer characteristics. If v4l2_xfer_func is extended with new
+ * values, the mapping below should be updated.
+ *
+ * Substitutions are taken from the mapping given for
+ * V4L2_XFER_FUNC_DEFAULT documented in videodev2.h.
+ */
+ static const enum v4l2_xfer_func xfer_funcs[] = {
+ V4L2_XFER_FUNC_DEFAULT, /* Unspecified */
+ V4L2_XFER_FUNC_709,
+ V4L2_XFER_FUNC_709, /* Substitution for BT.470-2 M */
+ V4L2_XFER_FUNC_709, /* Substitution for BT.470-2 B, G */
+ V4L2_XFER_FUNC_709, /* Substitution for SMPTE 170M */
+ V4L2_XFER_FUNC_SMPTE240M,
+ V4L2_XFER_FUNC_NONE,
+ V4L2_XFER_FUNC_SRGB,
+ };
+
+ if (transfer_characteristics < ARRAY_SIZE(xfer_funcs))
+ return xfer_funcs[transfer_characteristics];
+
+ return V4L2_XFER_FUNC_DEFAULT; /* Reserved */
+}
+
+static enum v4l2_ycbcr_encoding uvc_ycbcr_enc(const u8 matrix_coefficients)
+{
+ /*
+ * V4L2 does not currently have definitions for all possible values of
+ * UVC matrix coefficients. If v4l2_ycbcr_encoding is extended with new
+ * values, the mapping below should be updated.
+ *
+ * Substitutions are taken from the mapping given for
+ * V4L2_YCBCR_ENC_DEFAULT documented in videodev2.h.
+ *
+ * FCC is assumed to be close enough to 601.
+ */
+ static const enum v4l2_ycbcr_encoding ycbcr_encs[] = {
+ V4L2_YCBCR_ENC_DEFAULT, /* Unspecified */
+ V4L2_YCBCR_ENC_709,
+ V4L2_YCBCR_ENC_601, /* Substitution for FCC */
+ V4L2_YCBCR_ENC_601, /* Substitution for BT.470-2 B, G */
+ V4L2_YCBCR_ENC_601,
+ V4L2_YCBCR_ENC_SMPTE240M,
+ };
+
+ if (matrix_coefficients < ARRAY_SIZE(ycbcr_encs))
+ return ycbcr_encs[matrix_coefficients];
+
+ return V4L2_YCBCR_ENC_DEFAULT; /* Reserved */
}
/* Simplify a fraction using a simple continued fraction decomposition. The
@@ -284,7 +343,7 @@ void uvc_simplify_fraction(u32 *numerator, u32 *denominator,
return;
/* Convert the fraction to a simple continued fraction. See
- * http://mathforum.org/dr.math/faq/faq.fractions.html
+ * https://mathforum.org/dr.math/faq/faq.fractions.html
* Stop if the current term is bigger than or equal to the given
* threshold.
*/
@@ -704,6 +763,8 @@ static int uvc_parse_format(struct uvc_device *dev,
}
format->colorspace = uvc_colorspace(buffer[3]);
+ format->xfer_func = uvc_xfer_func(buffer[4]);
+ format->ycbcr_enc = uvc_ycbcr_enc(buffer[5]);
buflen -= buffer[0];
buffer += buffer[0];
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
index b4499cddeffe..ca3a9c2eec27 100644
--- a/drivers/media/usb/uvc/uvc_entity.c
+++ b/drivers/media/usb/uvc/uvc_entity.c
@@ -73,10 +73,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain,
int ret;
if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
+ u32 function;
+
v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
strscpy(entity->subdev.name, entity->name,
sizeof(entity->subdev.name));
+ switch (UVC_ENTITY_TYPE(entity)) {
+ case UVC_VC_SELECTOR_UNIT:
+ function = MEDIA_ENT_F_VID_MUX;
+ break;
+ case UVC_VC_PROCESSING_UNIT:
+ case UVC_VC_EXTENSION_UNIT:
+ /* For lack of a better option. */
+ function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ break;
+ case UVC_COMPOSITE_CONNECTOR:
+ case UVC_COMPONENT_CONNECTOR:
+ function = MEDIA_ENT_F_CONN_COMPOSITE;
+ break;
+ case UVC_SVIDEO_CONNECTOR:
+ function = MEDIA_ENT_F_CONN_SVIDEO;
+ break;
+ case UVC_ITT_CAMERA:
+ function = MEDIA_ENT_F_CAM_SENSOR;
+ break;
+ case UVC_TT_VENDOR_SPECIFIC:
+ case UVC_ITT_VENDOR_SPECIFIC:
+ case UVC_ITT_MEDIA_TRANSPORT_INPUT:
+ case UVC_OTT_VENDOR_SPECIFIC:
+ case UVC_OTT_DISPLAY:
+ case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
+ case UVC_EXTERNAL_VENDOR_SPECIFIC:
+ default:
+ function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ break;
+ }
+
+ entity->subdev.entity.function = function;
+
ret = media_entity_pads_init(&entity->subdev.entity,
entity->num_pads, entity->pads);
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 0335e69b70ab..fa06bfa174ad 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -247,12 +247,44 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
if (ret < 0)
goto done;
+ /* After the probe, update fmt with the values returned from
+ * negotiation with the device.
+ */
+ for (i = 0; i < stream->nformats; ++i) {
+ if (probe->bFormatIndex == stream->format[i].index) {
+ format = &stream->format[i];
+ break;
+ }
+ }
+
+ if (i == stream->nformats) {
+ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
+ probe->bFormatIndex);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < format->nframes; ++i) {
+ if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
+ frame = &format->frame[i];
+ break;
+ }
+ }
+
+ if (i == format->nframes) {
+ uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
+ probe->bFrameIndex);
+ return -EINVAL;
+ }
+
fmt->fmt.pix.width = frame->wWidth;
fmt->fmt.pix.height = frame->wHeight;
fmt->fmt.pix.field = V4L2_FIELD_NONE;
fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;
+ fmt->fmt.pix.pixelformat = format->fcc;
fmt->fmt.pix.colorspace = format->colorspace;
+ fmt->fmt.pix.xfer_func = format->xfer_func;
+ fmt->fmt.pix.ycbcr_enc = format->ycbcr_enc;
if (uvc_format != NULL)
*uvc_format = format;
@@ -289,6 +321,8 @@ static int uvc_v4l2_get_format(struct uvc_streaming *stream,
fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
fmt->fmt.pix.sizeimage = stream->ctrl.dwMaxVideoFrameSize;
fmt->fmt.pix.colorspace = format->colorspace;
+ fmt->fmt.pix.xfer_func = format->xfer_func;
+ fmt->fmt.pix.ycbcr_enc = format->ycbcr_enc;
done:
mutex_unlock(&stream->mutex);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index a65d5353a441..a6a441d92b94 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -622,7 +622,7 @@ static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample)
* to avoid losing precision in the division. Similarly, the host timestamp is
* computed with
*
- * TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2)
+ * TS = ((TS2 - TS1) * SOF + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2)
*
* SOF values are coded on 11 bits by USB. We extend their precision with 16
* decimal bits, leading to a 11.16 coding.
@@ -1509,11 +1509,11 @@ static void uvc_video_complete(struct urb *urb)
default:
uvc_printk(KERN_WARNING, "Non-zero status (%d) in video "
"completion handler.\n", urb->status);
- /* fall through */
+ fallthrough;
case -ENOENT: /* usb_poison_urb() called. */
if (stream->frozen)
return;
- /* fall through */
+ fallthrough;
case -ECONNRESET: /* usb_unlink_urb() called. */
case -ESHUTDOWN: /* The endpoint is being disabled. */
uvc_queue_cancel(queue, urb->status == -ESHUTDOWN);
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 6ab972c643e3..a3dfacf069c4 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -165,6 +165,10 @@
{0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_HEVC \
+ { 'H', 'E', 'V', 'C', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+
/* ------------------------------------------------------------------------
* Driver specific constants.
@@ -370,7 +374,9 @@ struct uvc_format {
u8 type;
u8 index;
u8 bpp;
- u8 colorspace;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
u32 fcc;
u32 flags;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 45a2403aa039..bd7f330c941c 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -200,6 +200,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
static const char * const mpeg_video_bitrate_mode[] = {
"Variable Bitrate",
"Constant Bitrate",
+ "Constant Quality",
NULL
};
static const char * const mpeg_stream_type[] = {
@@ -474,6 +475,23 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"3",
NULL,
};
+ static const char * const vp9_level[] = {
+ "1",
+ "1.1",
+ "2",
+ "2.1",
+ "3",
+ "3.1",
+ "4",
+ "4.1",
+ "5",
+ "5.1",
+ "5.2",
+ "6",
+ "6.1",
+ "6.2",
+ NULL,
+ };
static const char * const flash_led_mode[] = {
"Off",
@@ -590,6 +608,12 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"External",
NULL,
};
+ static const char * const mpeg_video_frame_skip[] = {
+ "Disabled",
+ "Level Limit",
+ "VBV/CPB Limit",
+ NULL,
+ };
switch (id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -651,6 +675,8 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return flash_strobe_source;
case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
return header_mode;
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
+ return mpeg_video_frame_skip;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
return multi_slice;
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
@@ -685,6 +711,8 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return vp8_profile;
case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
return vp9_profile;
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
+ return vp9_level;
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
return jpeg_chroma_subsampling;
case V4L2_CID_DV_TX_MODE:
@@ -832,6 +860,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure";
case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown";
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode";
+ case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: return "Constant Quality";
case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate";
case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate";
case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
@@ -844,6 +873,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return "Frame Skip Mode";
case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
@@ -897,6 +927,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS: return "H264 Decode Parameters";
case V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE: return "H264 Decode Mode";
case V4L2_CID_MPEG_VIDEO_H264_START_CODE: return "H264 Start Code";
+ case V4L2_CID_MPEG_VIDEO_H264_PRED_WEIGHTS: return "H264 Prediction Weight Table";
case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level";
case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile";
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
@@ -938,6 +969,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile";
case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile";
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return "VP9 Level";
case V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER: return "VP8 Frame Header";
/* HEVC controls */
@@ -1265,6 +1297,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_FLASH_LED_MODE:
case V4L2_CID_FLASH_STROBE_SOURCE:
case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
@@ -1294,6 +1327,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
case V4L2_CID_DETECT_MD_MODE:
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
@@ -1412,6 +1446,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS:
*type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS;
break;
+ case V4L2_CID_MPEG_VIDEO_H264_PRED_WEIGHTS:
+ *type = V4L2_CTRL_TYPE_H264_PRED_WEIGHTS;
+ break;
case V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER:
*type = V4L2_CTRL_TYPE_VP8_FRAME_HEADER;
break;
@@ -1721,6 +1758,8 @@ static void std_log(const struct v4l2_ctrl *ctrl)
#define zero_padding(s) \
memset(&(s).padding, 0, sizeof((s).padding))
+#define zero_reserved(s) \
+ memset(&(s).reserved, 0, sizeof((s).reserved))
/*
* Compound controls validation requires setting unused fields/flags to zero
@@ -1731,6 +1770,8 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
{
struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header;
+ struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
+ struct v4l2_ctrl_h264_decode_params *p_h264_dec_params;
struct v4l2_ctrl_hevc_sps *p_hevc_sps;
struct v4l2_ctrl_hevc_pps *p_hevc_pps;
struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
@@ -1790,8 +1831,25 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
case V4L2_CTRL_TYPE_H264_SPS:
case V4L2_CTRL_TYPE_H264_PPS:
case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
+ break;
+
case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ p_h264_slice_params = p;
+
+ zero_reserved(*p_h264_slice_params);
+ break;
+
case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ p_h264_dec_params = p;
+
+ for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) {
+ struct v4l2_h264_dpb_entry *dpb_entry =
+ &p_h264_dec_params->dpb[i];
+
+ zero_reserved(*dpb_entry);
+ }
+ zero_reserved(*p_h264_dec_params);
break;
case V4L2_CTRL_TYPE_VP8_FRAME_HEADER:
@@ -2553,6 +2611,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_h264_decode_params);
break;
+ case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_pred_weights);
+ break;
case V4L2_CTRL_TYPE_VP8_FRAME_HEADER:
elem_size = sizeof(struct v4l2_ctrl_vp8_frame_header);
break;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index a4c3c77c1894..d7bbe33840cb 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -547,8 +547,8 @@ int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
}
for (i = 0; i < vep->nr_of_link_frequencies; i++)
- pr_info("link-frequencies %u value %llu\n", i,
- vep->link_frequencies[i]);
+ pr_debug("link-frequencies %u value %llu\n", i,
+ vep->link_frequencies[i]);
}
pr_debug("===== end parsing endpoint %pfw\n", fwnode);
diff --git a/drivers/media/v4l2-core/v4l2-h264.c b/drivers/media/v4l2-core/v4l2-h264.c
index edf6225f0522..5633a242520a 100644
--- a/drivers/media/v4l2-core/v4l2-h264.c
+++ b/drivers/media/v4l2-core/v4l2-h264.c
@@ -18,14 +18,12 @@
*
* @b: the builder context to initialize
* @dec_params: decode parameters control
- * @slice_params: first slice parameters control
* @sps: SPS control
* @dpb: DPB to use when creating the reference list
*/
void
v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b,
const struct v4l2_ctrl_h264_decode_params *dec_params,
- const struct v4l2_ctrl_h264_slice_params *slice_params,
const struct v4l2_ctrl_h264_sps *sps,
const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES])
{
@@ -33,13 +31,13 @@ v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b,
unsigned int i;
max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
- cur_frame_num = slice_params->frame_num;
+ cur_frame_num = dec_params->frame_num;
memset(b, 0, sizeof(*b));
- if (!(slice_params->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC))
+ if (!(dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC))
b->cur_pic_order_count = min(dec_params->bottom_field_order_cnt,
dec_params->top_field_order_cnt);
- else if (slice_params->flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ else if (dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
b->cur_pic_order_count = dec_params->bottom_field_order_cnt;
else
b->cur_pic_order_count = dec_params->top_field_order_cnt;
@@ -66,10 +64,10 @@ v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b,
else
b->refs[i].frame_num = dpb[i].frame_num;
- if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_FIELD))
+ if (dpb[i].fields == V4L2_H264_FRAME_REF)
pic_order_count = min(dpb[i].top_field_order_cnt,
dpb[i].bottom_field_order_cnt);
- else if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_BOTTOM_FIELD)
+ else if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
pic_order_count = dpb[i].bottom_field_order_cnt;
else
pic_order_count = dpb[i].top_field_order_cnt;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 95a8f2dc5341..b221b4e438a1 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -43,6 +43,10 @@ module_param(debug, bool, 0644);
#define TRANS_ABORT (1 << 2)
+/* The job queue is not running new jobs */
+#define QUEUE_PAUSED (1 << 0)
+
+
/* Offset base for buffers on the destination queue - used to distinguish
* between source and destination buffers when mmapping - they receive the same
* offsets but for different queues */
@@ -84,6 +88,7 @@ static const char * const m2m_entity_name[] = {
* @job_queue: instances queued to run
* @job_spinlock: protects job_queue
* @job_work: worker to run queued jobs.
+ * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
* @m2m_ops: driver callbacks
*/
struct v4l2_m2m_dev {
@@ -101,6 +106,7 @@ struct v4l2_m2m_dev {
struct list_head job_queue;
spinlock_t job_spinlock;
struct work_struct job_work;
+ unsigned long job_queue_flags;
const struct v4l2_m2m_ops *m2m_ops;
};
@@ -263,6 +269,12 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
return;
}
+ if (m2m_dev->job_queue_flags & QUEUE_PAUSED) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Running new jobs is paused\n");
+ return;
+ }
+
m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
struct v4l2_m2m_ctx, queue);
m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
@@ -528,6 +540,34 @@ unlock:
}
EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
+void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+ struct v4l2_m2m_ctx *curr_ctx;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ m2m_dev->job_queue_flags |= QUEUE_PAUSED;
+ curr_ctx = m2m_dev->curr_ctx;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (curr_ctx)
+ wait_event(curr_ctx->finished,
+ !(curr_ctx->job_flags & TRANS_RUNNING));
+}
+EXPORT_SYMBOL(v4l2_m2m_suspend);
+
+void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ m2m_dev->job_queue_flags &= ~QUEUE_PAUSED;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ v4l2_m2m_try_run(m2m_dev);
+}
+EXPORT_SYMBOL(v4l2_m2m_resume);
+
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{
@@ -841,7 +881,6 @@ static __poll_t v4l2_m2m_poll_for_data(struct file *file,
struct poll_table_struct *wait)
{
struct vb2_queue *src_q, *dst_q;
- struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
__poll_t rc = 0;
unsigned long flags;
@@ -862,34 +901,17 @@ static __poll_t v4l2_m2m_poll_for_data(struct file *file,
list_empty(&dst_q->queued_list)))
return EPOLLERR;
- spin_lock_irqsave(&dst_q->done_lock, flags);
- if (list_empty(&dst_q->done_list)) {
- /*
- * If the last buffer was dequeued from the capture queue,
- * return immediately. DQBUF will return -EPIPE.
- */
- if (dst_q->last_buffer_dequeued) {
- spin_unlock_irqrestore(&dst_q->done_lock, flags);
- return EPOLLIN | EPOLLRDNORM;
- }
- }
- spin_unlock_irqrestore(&dst_q->done_lock, flags);
-
spin_lock_irqsave(&src_q->done_lock, flags);
if (!list_empty(&src_q->done_list))
- src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
- done_entry);
- if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
- || src_vb->state == VB2_BUF_STATE_ERROR))
rc |= EPOLLOUT | EPOLLWRNORM;
spin_unlock_irqrestore(&src_q->done_lock, flags);
spin_lock_irqsave(&dst_q->done_lock, flags);
- if (!list_empty(&dst_q->done_list))
- dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
- done_entry);
- if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
- || dst_vb->state == VB2_BUF_STATE_ERROR))
+ /*
+ * If the last buffer was dequeued from the capture queue, signal
+ * userspace. DQBUF(CAPTURE) will return -EPIPE.
+ */
+ if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued)
rc |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&dst_q->done_lock, flags);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 6b989fe5a0a9..a7d508e74d6b 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -309,6 +309,20 @@ static int call_enum_dv_timings(struct v4l2_subdev *sd,
sd->ops->pad->enum_dv_timings(sd, dvt);
}
+static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_config *config)
+{
+ return check_pad(sd, pad) ? :
+ sd->ops->pad->get_mbus_config(sd, pad, config);
+}
+
+static int call_set_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_config *config)
+{
+ return check_pad(sd, pad) ? :
+ sd->ops->pad->get_mbus_config(sd, pad, config);
+}
+
static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
.get_fmt = call_get_fmt,
.set_fmt = call_set_fmt,
@@ -321,6 +335,8 @@ static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
.set_edid = call_set_edid,
.dv_timings_cap = call_dv_timings_cap,
.enum_dv_timings = call_enum_dv_timings,
+ .get_mbus_config = call_get_mbus_config,
+ .set_mbus_config = call_set_mbus_config,
};
static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 46ff19df9f53..8dd0562de287 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -180,7 +180,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
if (rw == READ)
flags |= FOLL_WRITE;
- dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
+ dprintk(1, "init user [0x%lx+0x%lx => %lu pages]\n",
data, size, dma->nr_pages);
err = pin_user_pages(data & PAGE_MASK, dma->nr_pages,
@@ -188,7 +188,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1, "pin_user_pages: err=%d [%d]\n", err,
+ dprintk(1, "pin_user_pages: err=%d [%lu]\n", err,
dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
@@ -208,11 +208,11 @@ static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
}
static int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
- int nr_pages)
+ unsigned long nr_pages)
{
int i;
- dprintk(1, "init kernel [%d pages]\n", nr_pages);
+ dprintk(1, "init kernel [%lu pages]\n", nr_pages);
dma->direction = direction;
dma->vaddr_pages = kcalloc(nr_pages, sizeof(*dma->vaddr_pages),
@@ -238,11 +238,11 @@ static int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
dma->vaddr = vmap(dma->vaddr_pages, nr_pages, VM_MAP | VM_IOREMAP,
PAGE_KERNEL);
if (NULL == dma->vaddr) {
- dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
+ dprintk(1, "vmalloc_32(%lu pages) failed\n", nr_pages);
goto out_free_pages;
}
- dprintk(1, "vmalloc is at addr %p, size=%d\n",
+ dprintk(1, "vmalloc is at addr %p, size=%lu\n",
dma->vaddr, nr_pages << PAGE_SHIFT);
memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT);
@@ -267,9 +267,9 @@ out_free_pages:
}
static int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
- dma_addr_t addr, int nr_pages)
+ dma_addr_t addr, unsigned long nr_pages)
{
- dprintk(1, "init overlay [%d pages @ bus 0x%lx]\n",
+ dprintk(1, "init overlay [%lu pages @ bus 0x%lx]\n",
nr_pages, (unsigned long)addr);
dma->direction = direction;
@@ -500,9 +500,11 @@ static int __videobuf_iolock(struct videobuf_queue *q,
struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
{
- int err, pages;
- dma_addr_t bus;
struct videobuf_dma_sg_memory *mem = vb->priv;
+ unsigned long pages;
+ dma_addr_t bus;
+ int err;
+
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 2c79e95dd486..00e013b14703 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -32,8 +32,9 @@ config ARM_PL172_MPMC
config ATMEL_SDRAMC
bool "Atmel (Multi-port DDR-)SDRAM Controller"
- default y
- depends on ARCH_AT91 && OF
+ default y if ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on OF
help
This driver is for Atmel SDRAM Controller or Atmel Multi-port
DDR-SDRAM Controller available on Atmel AT91SAM9 and SAMA5 SoCs.
@@ -42,8 +43,9 @@ config ATMEL_SDRAMC
config ATMEL_EBI
bool "Atmel EBI driver"
- default y
- depends on ARCH_AT91 && OF
+ default y if ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on OF
select MFD_SYSCON
select MFD_ATMEL_SMC
help
@@ -52,6 +54,18 @@ config ATMEL_EBI
tree is used. This bus supports NANDs, external ethernet controller,
SRAMs, ATA devices, etc.
+config BRCMSTB_DPFE
+ bool "Broadcom STB DPFE driver" if COMPILE_TEST
+ default y if ARCH_BRCMSTB
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ help
+ This driver provides access to the DPFE interface of Broadcom
+ STB SoCs. The firmware running on the DCPU inside the DDR PHY can
+ provide current information about the system's RAM, for instance
+ the DRAM refresh rate. This can be used as an indirect indicator
+ for the DRAM's temperature. Slower refresh rate means cooler RAM,
+ higher refresh rate means hotter RAM.
+
config BT1_L2_CTL
bool "Baikal-T1 CM2 L2-RAM Cache Control Block"
depends on MIPS_BAIKAL_T1 || COMPILE_TEST
@@ -65,7 +79,8 @@ config BT1_L2_CTL
config TI_AEMIF
tristate "Texas Instruments AEMIF driver"
- depends on (ARCH_DAVINCI || ARCH_KEYSTONE) && OF
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE || COMPILE_TEST
+ depends on OF
help
This driver is for the AEMIF module available in Texas Instruments
SoCs. AEMIF stands for Asynchronous External Memory Interface and
@@ -76,7 +91,7 @@ config TI_AEMIF
config TI_EMIF
tristate "Texas Instruments EMIF driver"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
select DDR
help
This driver is for the EMIF module available in Texas Instruments
@@ -88,7 +103,8 @@ config TI_EMIF
temperature changes
config OMAP_GPMC
- bool
+ bool "Texas Instruments OMAP SoC GPMC driver" if COMPILE_TEST
+ depends on OF_ADDRESS
select GPIOLIB
help
This driver is for the General Purpose Memory Controller (GPMC)
@@ -112,7 +128,8 @@ config OMAP_GPMC_DEBUG
config TI_EMIF_SRAM
tristate "Texas Instruments EMIF SRAM driver"
- depends on (SOC_AM33XX || SOC_AM43XX) && SRAM
+ depends on SOC_AM33XX || SOC_AM43XX || (ARM && COMPILE_TEST)
+ depends on SRAM
help
This driver is for the EMIF module available on Texas Instruments
AM33XX and AM43XX SoCs and is required for PM. Certain parts of
@@ -122,8 +139,9 @@ config TI_EMIF_SRAM
config MVEBU_DEVBUS
bool "Marvell EBU Device Bus Controller"
- default y
- depends on PLAT_ORION && OF
+ default y if PLAT_ORION
+ depends on PLAT_ORION || COMPILE_TEST
+ depends on OF
help
This driver is for the Device Bus controller available in some
Marvell EBU SoCs such as Discovery (mv78xx0), Orion (88f5xxx) and
@@ -132,7 +150,7 @@ config MVEBU_DEVBUS
config FSL_CORENET_CF
tristate "Freescale CoreNet Error Reporting"
- depends on FSL_SOC_BOOKE
+ depends on FSL_SOC_BOOKE || COMPILE_TEST
help
Say Y for reporting of errors from the Freescale CoreNet
Coherency Fabric. Errors reported include accesses to
@@ -141,7 +159,7 @@ config FSL_CORENET_CF
represents a coherency violation.
config FSL_IFC
- bool
+ bool "Freescale IFC driver" if COMPILE_TEST
depends on FSL_SOC || ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST
depends on HAS_IOMEM
@@ -155,7 +173,7 @@ config JZ4780_NEMC
memory devices such as NAND and SRAM.
config MTK_SMI
- bool
+ bool "Mediatek SoC Memory Controller driver" if COMPILE_TEST
depends on ARCH_MEDIATEK || COMPILE_TEST
help
This driver is for the Memory Controller module in MediaTek SoCs,
@@ -164,7 +182,7 @@ config MTK_SMI
config DA8XX_DDRCTL
bool "Texas Instruments da8xx DDR2/mDDR driver"
- depends on ARCH_DAVINCI_DA8XX
+ depends on ARCH_DAVINCI_DA8XX || COMPILE_TEST
help
This driver is for the DDR2/mDDR Memory Controller present on
Texas Instruments da8xx SoCs. It's used to tweak various memory
@@ -172,16 +190,16 @@ config DA8XX_DDRCTL
config PL353_SMC
tristate "ARM PL35X Static Memory Controller(SMC) driver"
- default y
+ default y if ARM
depends on ARM
- depends on ARM_AMBA
+ depends on ARM_AMBA || COMPILE_TEST
help
This driver is for the ARM PL351/PL353 Static Memory
Controller(SMC) module.
config RENESAS_RPCIF
tristate "Renesas RPC-IF driver"
- depends on ARCH_RENESAS
+ depends on ARCH_RENESAS || COMPILE_TEST
select REGMAP_MMIO
help
This supports Renesas R-Car Gen3 RPC-IF which provides either SPI
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index b4533ffff2bc..e71cf7b99641 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -10,7 +10,7 @@ endif
obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o
obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o
-obj-$(CONFIG_ARCH_BRCMSTB) += brcmstb_dpfe.o
+obj-$(CONFIG_BRCMSTB_DPFE) += brcmstb_dpfe.o
obj-$(CONFIG_BT1_L2_CTL) += bt1-l2-ctl.o
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 60e8633b1175..f43ba69fbb3e 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -188,11 +188,6 @@ struct brcmstb_dpfe_priv {
struct mutex lock;
};
-static const char * const error_text[] = {
- "Success", "Header code incorrect", "Unknown command or argument",
- "Incorrect checksum", "Malformed command", "Timed out",
-};
-
/*
* Forward declaration of our sysfs attribute functions, so we can declare the
* attribute data structures early.
@@ -307,6 +302,20 @@ static const struct dpfe_api dpfe_api_v3 = {
},
};
+static const char *get_error_text(unsigned int i)
+{
+ static const char * const error_text[] = {
+ "Success", "Header code incorrect",
+ "Unknown command or argument", "Incorrect checksum",
+ "Malformed command", "Timed out", "Unknown error",
+ };
+
+ if (unlikely(i >= ARRAY_SIZE(error_text)))
+ i = ARRAY_SIZE(error_text) - 1;
+
+ return error_text[i];
+}
+
static bool is_dcpu_enabled(struct brcmstb_dpfe_priv *priv)
{
u32 val;
@@ -445,7 +454,7 @@ static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
}
if (resp != 0) {
mutex_unlock(&priv->lock);
- return -ETIMEDOUT;
+ return -ffs(DCPU_RET_ERR_TIMEDOUT);
}
/* Compute checksum over the message */
@@ -647,8 +656,10 @@ static int brcmstb_dpfe_download_firmware(struct brcmstb_dpfe_priv *priv)
return (ret == -ENOENT) ? -EPROBE_DEFER : ret;
ret = __verify_firmware(&init, fw);
- if (ret)
- return -EFAULT;
+ if (ret) {
+ ret = -EFAULT;
+ goto release_fw;
+ }
__disable_dcpu(priv);
@@ -667,18 +678,20 @@ static int brcmstb_dpfe_download_firmware(struct brcmstb_dpfe_priv *priv)
ret = __write_firmware(priv->dmem, dmem, dmem_size, is_big_endian);
if (ret)
- return ret;
+ goto release_fw;
ret = __write_firmware(priv->imem, imem, imem_size, is_big_endian);
if (ret)
- return ret;
+ goto release_fw;
ret = __verify_fw_checksum(&init, priv, header, init.chksum);
if (ret)
- return ret;
+ goto release_fw;
__enable_dcpu(priv);
- return 0;
+release_fw:
+ release_firmware(fw);
+ return ret;
}
static ssize_t generic_show(unsigned int command, u32 response[],
@@ -691,7 +704,7 @@ static ssize_t generic_show(unsigned int command, u32 response[],
ret = __send_command(priv, command, response);
if (ret < 0)
- return sprintf(buf, "ERROR: %s\n", error_text[-ret]);
+ return sprintf(buf, "ERROR: %s\n", get_error_text(-ret));
return 0;
}
@@ -888,11 +901,8 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
}
ret = brcmstb_dpfe_download_firmware(priv);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Couldn't download firmware -- %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Couldn't download firmware\n");
ret = sysfs_create_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
if (!ret)
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index bb6a71d26798..ddb1879f07d3 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -131,16 +131,7 @@ static int emif_regdump_show(struct seq_file *s, void *unused)
return 0;
}
-static int emif_regdump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, emif_regdump_show, inode->i_private);
-}
-
-static const struct file_operations emif_regdump_fops = {
- .open = emif_regdump_open,
- .read = seq_read,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(emif_regdump);
static int emif_mr4_show(struct seq_file *s, void *unused)
{
@@ -150,48 +141,16 @@ static int emif_mr4_show(struct seq_file *s, void *unused)
return 0;
}
-static int emif_mr4_open(struct inode *inode, struct file *file)
-{
- return single_open(file, emif_mr4_show, inode->i_private);
-}
-
-static const struct file_operations emif_mr4_fops = {
- .open = emif_mr4_open,
- .read = seq_read,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(emif_mr4);
static int __init_or_module emif_debugfs_init(struct emif_data *emif)
{
- struct dentry *dentry;
- int ret;
-
- dentry = debugfs_create_dir(dev_name(emif->dev), NULL);
- if (!dentry) {
- ret = -ENOMEM;
- goto err0;
- }
- emif->debugfs_root = dentry;
-
- dentry = debugfs_create_file("regcache_dump", S_IRUGO,
- emif->debugfs_root, emif, &emif_regdump_fops);
- if (!dentry) {
- ret = -ENOMEM;
- goto err1;
- }
-
- dentry = debugfs_create_file("mr4", S_IRUGO,
- emif->debugfs_root, emif, &emif_mr4_fops);
- if (!dentry) {
- ret = -ENOMEM;
- goto err1;
- }
-
+ emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
+ debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
+ &emif_regdump_fops);
+ debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
+ &emif_mr4_fops);
return 0;
-err1:
- debugfs_remove_recursive(emif->debugfs_root);
-err0:
- return ret;
}
static void __exit emif_debugfs_exit(struct emif_data *emif)
diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
index 0b0ed72016da..0309bd5a1800 100644
--- a/drivers/memory/fsl-corenet-cf.c
+++ b/drivers/memory/fsl-corenet-cf.c
@@ -211,10 +211,8 @@ static int ccf_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, ccf);
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(&pdev->dev, "%s: no irq\n", __func__);
- return -ENXIO;
- }
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf);
if (ret) {
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index c21262502581..691e4c344cf8 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -19,6 +19,9 @@
/* mt8173 */
#define SMI_LARB_MMU_EN 0xf00
+/* mt8167 */
+#define MT8167_SMI_LARB_MMU_EN 0xfc0
+
/* mt2701 */
#define REG_SMI_SECUR_CON_BASE 0x5c0
@@ -179,6 +182,13 @@ static void mtk_smi_larb_config_port_mt8173(struct device *dev)
writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
}
+static void mtk_smi_larb_config_port_mt8167(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+ writel(*larb->mmu, larb->base + MT8167_SMI_LARB_MMU_EN);
+}
+
static void mtk_smi_larb_config_port_gen1(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
@@ -226,6 +236,11 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
.config_port = mtk_smi_larb_config_port_mt8173,
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = {
+ /* mt8167 do not need the port in larb */
+ .config_port = mtk_smi_larb_config_port_mt8167,
+};
+
static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
.port_in_larb = {
LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
@@ -255,6 +270,10 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
static const struct of_device_id mtk_smi_larb_of_ids[] = {
{
+ .compatible = "mediatek,mt8167-smi-larb",
+ .data = &mtk_smi_larb_mt8167
+ },
+ {
.compatible = "mediatek,mt8173-smi-larb",
.data = &mtk_smi_larb_mt8173
},
@@ -419,6 +438,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
.data = &mtk_smi_common_gen2,
},
{
+ .compatible = "mediatek,mt8167-smi-common",
+ .data = &mtk_smi_common_gen2,
+ },
+ {
.compatible = "mediatek,mt2701-smi-common",
.data = &mtk_smi_common_gen1,
},
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index ca0097664b12..cfa730cfd145 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -33,8 +33,6 @@
#include <linux/platform_data/mtd-nand-omap2.h>
-#include <asm/mach-types.h>
-
#define DEVICE_NAME "omap-gpmc"
/* GPMC register offsets */
@@ -245,7 +243,6 @@ static DEFINE_SPINLOCK(gpmc_mem_lock);
/* Define chip-selects as reserved by default until probe completes */
static unsigned int gpmc_cs_num = GPMC_CS_NUM;
static unsigned int gpmc_nr_waitpins;
-static resource_size_t phys_base, mem_size;
static unsigned int gpmc_capability;
static void __iomem *gpmc_base;
@@ -634,14 +631,6 @@ static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max
return 0;
}
-#define GPMC_SET_ONE_CD_MAX(reg, st, end, max, field, cd) \
- if (set_gpmc_timing_reg(cs, (reg), (st), (end), (max), \
- t->field, (cd), #field) < 0) \
- return -1
-
-#define GPMC_SET_ONE(reg, st, end, field) \
- GPMC_SET_ONE_CD_MAX(reg, st, end, 0, field, GPMC_CD_FCLK)
-
/**
* gpmc_calc_waitmonitoring_divider - calculate proper GPMCFCLKDIVIDER based on WAITMONITORINGTIME
* WAITMONITORINGTIME will be _at least_ as long as desired, i.e.
@@ -700,12 +689,12 @@ int gpmc_calc_divider(unsigned int sync_clk)
int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
const struct gpmc_settings *s)
{
- int div;
+ int div, ret;
u32 l;
div = gpmc_calc_divider(t->sync_clk);
if (div < 0)
- return div;
+ return -EINVAL;
/*
* See if we need to change the divider for waitmonitoringtime.
@@ -729,57 +718,114 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
__func__,
t->wait_monitoring
);
- return -1;
+ return -ENXIO;
}
}
- GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
- GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
- GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
+ ret = 0;
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG2, 0, 3, 0, t->cs_on,
+ GPMC_CD_FCLK, "cs_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG2, 8, 12, 0, t->cs_rd_off,
+ GPMC_CD_FCLK, "cs_rd_off");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG2, 16, 20, 0, t->cs_wr_off,
+ GPMC_CD_FCLK, "cs_wr_off");
+ if (ret)
+ return -ENXIO;
+
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 0, 3, 0, t->adv_on,
+ GPMC_CD_FCLK, "adv_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 8, 12, 0, t->adv_rd_off,
+ GPMC_CD_FCLK, "adv_rd_off");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 16, 20, 0, t->adv_wr_off,
+ GPMC_CD_FCLK, "adv_wr_off");
+ if (ret)
+ return -ENXIO;
- GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
- GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
- GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
if (gpmc_capability & GPMC_HAS_MUX_AAD) {
- GPMC_SET_ONE(GPMC_CS_CONFIG3, 4, 6, adv_aad_mux_on);
- GPMC_SET_ONE(GPMC_CS_CONFIG3, 24, 26, adv_aad_mux_rd_off);
- GPMC_SET_ONE(GPMC_CS_CONFIG3, 28, 30, adv_aad_mux_wr_off);
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 4, 6, 0,
+ t->adv_aad_mux_on, GPMC_CD_FCLK,
+ "adv_aad_mux_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 24, 26, 0,
+ t->adv_aad_mux_rd_off, GPMC_CD_FCLK,
+ "adv_aad_mux_rd_off");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG3, 28, 30, 0,
+ t->adv_aad_mux_wr_off, GPMC_CD_FCLK,
+ "adv_aad_mux_wr_off");
+ if (ret)
+ return -ENXIO;
}
- GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
- GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 0, 3, 0, t->oe_on,
+ GPMC_CD_FCLK, "oe_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 8, 12, 0, t->oe_off,
+ GPMC_CD_FCLK, "oe_off");
if (gpmc_capability & GPMC_HAS_MUX_AAD) {
- GPMC_SET_ONE(GPMC_CS_CONFIG4, 4, 6, oe_aad_mux_on);
- GPMC_SET_ONE(GPMC_CS_CONFIG4, 13, 15, oe_aad_mux_off);
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 4, 6, 0,
+ t->oe_aad_mux_on, GPMC_CD_FCLK,
+ "oe_aad_mux_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 13, 15, 0,
+ t->oe_aad_mux_off, GPMC_CD_FCLK,
+ "oe_aad_mux_off");
+ }
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 16, 19, 0, t->we_on,
+ GPMC_CD_FCLK, "we_on");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG4, 24, 28, 0, t->we_off,
+ GPMC_CD_FCLK, "we_off");
+ if (ret)
+ return -ENXIO;
+
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG5, 0, 4, 0, t->rd_cycle,
+ GPMC_CD_FCLK, "rd_cycle");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG5, 8, 12, 0, t->wr_cycle,
+ GPMC_CD_FCLK, "wr_cycle");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG5, 16, 20, 0, t->access,
+ GPMC_CD_FCLK, "access");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG5, 24, 27, 0,
+ t->page_burst_access, GPMC_CD_FCLK,
+ "page_burst_access");
+ if (ret)
+ return -ENXIO;
+
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG6, 0, 3, 0,
+ t->bus_turnaround, GPMC_CD_FCLK,
+ "bus_turnaround");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG6, 8, 11, 0,
+ t->cycle2cycle_delay, GPMC_CD_FCLK,
+ "cycle2cycle_delay");
+ if (ret)
+ return -ENXIO;
+
+ if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS) {
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG6, 16, 19, 0,
+ t->wr_data_mux_bus, GPMC_CD_FCLK,
+ "wr_data_mux_bus");
+ if (ret)
+ return -ENXIO;
+ }
+ if (gpmc_capability & GPMC_HAS_WR_ACCESS) {
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG6, 24, 28, 0,
+ t->wr_access, GPMC_CD_FCLK,
+ "wr_access");
+ if (ret)
+ return -ENXIO;
}
- GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
- GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
-
- GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle);
- GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle);
- GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
-
- GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
-
- GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround);
- GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay);
-
- if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
- GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
- if (gpmc_capability & GPMC_HAS_WR_ACCESS)
- GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
l &= ~0x03;
l |= (div - 1);
gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
- GPMC_SET_ONE_CD_MAX(GPMC_CS_CONFIG1, 18, 19,
- GPMC_CONFIG1_WAITMONITORINGTIME_MAX,
- wait_monitoring, GPMC_CD_CLK);
- GPMC_SET_ONE_CD_MAX(GPMC_CS_CONFIG1, 25, 26,
- GPMC_CONFIG1_CLKACTIVATIONTIME_MAX,
- clk_activation, GPMC_CD_FCLK);
+ ret = 0;
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG1, 18, 19,
+ GPMC_CONFIG1_WAITMONITORINGTIME_MAX,
+ t->wait_monitoring, GPMC_CD_CLK,
+ "wait_monitoring");
+ ret |= set_gpmc_timing_reg(cs, GPMC_CS_CONFIG1, 25, 26,
+ GPMC_CONFIG1_CLKACTIVATIONTIME_MAX,
+ t->clk_activation, GPMC_CD_FCLK,
+ "clk_activation");
+ if (ret)
+ return -ENXIO;
#ifdef CONFIG_OMAP_GPMC_DEBUG
pr_info("GPMC CS%d CLK period is %lu ns (div %d)\n",
@@ -870,20 +916,6 @@ static bool gpmc_cs_reserved(int cs)
return gpmc->flags & GPMC_CS_RESERVED;
}
-static void gpmc_cs_set_name(int cs, const char *name)
-{
- struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
-
- gpmc->name = name;
-}
-
-static const char *gpmc_cs_get_name(int cs)
-{
- struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
-
- return gpmc->name;
-}
-
static unsigned long gpmc_mem_align(unsigned long size)
{
int order;
@@ -929,56 +961,13 @@ static int gpmc_cs_delete_mem(int cs)
return r;
}
-/**
- * gpmc_cs_remap - remaps a chip-select physical base address
- * @cs: chip-select to remap
- * @base: physical base address to re-map chip-select to
- *
- * Re-maps a chip-select to a new physical base address specified by
- * "base". Returns 0 on success and appropriate negative error code
- * on failure.
- */
-static int gpmc_cs_remap(int cs, u32 base)
-{
- int ret;
- u32 old_base, size;
-
- if (cs > gpmc_cs_num) {
- pr_err("%s: requested chip-select is disabled\n", __func__);
- return -ENODEV;
- }
-
- /*
- * Make sure we ignore any device offsets from the GPMC partition
- * allocated for the chip select and that the new base confirms
- * to the GPMC 16MB minimum granularity.
- */
- base &= ~(SZ_16M - 1);
-
- gpmc_cs_get_memconf(cs, &old_base, &size);
- if (base == old_base)
- return 0;
-
- ret = gpmc_cs_delete_mem(cs);
- if (ret < 0)
- return ret;
-
- ret = gpmc_cs_insert_mem(cs, base, size);
- if (ret < 0)
- return ret;
-
- ret = gpmc_cs_set_memconf(cs, base, size);
-
- return ret;
-}
-
int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
{
struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
struct resource *res = &gpmc->mem;
int r = -1;
- if (cs > gpmc_cs_num) {
+ if (cs >= gpmc_cs_num) {
pr_err("%s: requested chip-select is disabled\n", __func__);
return -ENODEV;
}
@@ -1025,8 +1014,7 @@ void gpmc_cs_free(int cs)
spin_lock(&gpmc_mem_lock);
if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
- printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
- BUG();
+ WARN(1, "Trying to free non-reserved GPMC CS%d\n", cs);
spin_unlock(&gpmc_mem_lock);
return;
}
@@ -1896,6 +1884,63 @@ static const struct of_device_id gpmc_dt_ids[] = {
{ }
};
+static void gpmc_cs_set_name(int cs, const char *name)
+{
+ struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
+
+ gpmc->name = name;
+}
+
+static const char *gpmc_cs_get_name(int cs)
+{
+ struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
+
+ return gpmc->name;
+}
+
+/**
+ * gpmc_cs_remap - remaps a chip-select physical base address
+ * @cs: chip-select to remap
+ * @base: physical base address to re-map chip-select to
+ *
+ * Re-maps a chip-select to a new physical base address specified by
+ * "base". Returns 0 on success and appropriate negative error code
+ * on failure.
+ */
+static int gpmc_cs_remap(int cs, u32 base)
+{
+ int ret;
+ u32 old_base, size;
+
+ if (cs >= gpmc_cs_num) {
+ pr_err("%s: requested chip-select is disabled\n", __func__);
+ return -ENODEV;
+ }
+
+ /*
+ * Make sure we ignore any device offsets from the GPMC partition
+ * allocated for the chip select and that the new base confirms
+ * to the GPMC 16MB minimum granularity.
+ */
+ base &= ~(SZ_16M - 1);
+
+ gpmc_cs_get_memconf(cs, &old_base, &size);
+ if (base == old_base)
+ return 0;
+
+ ret = gpmc_cs_delete_mem(cs);
+ if (ret < 0)
+ return ret;
+
+ ret = gpmc_cs_insert_mem(cs, base, size);
+ if (ret < 0)
+ return ret;
+
+ ret = gpmc_cs_set_memconf(cs, base, size);
+
+ return ret;
+}
+
/**
* gpmc_read_settings_dt - read gpmc settings from device-tree
* @np: pointer to device-tree node for a gpmc child device
@@ -2265,6 +2310,10 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
}
}
#else
+void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
+{
+ memset(p, 0, sizeof(*p));
+}
static int gpmc_probe_dt(struct platform_device *pdev)
{
return 0;
@@ -2347,12 +2396,9 @@ static int gpmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gpmc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL)
+ if (!res)
return -ENOENT;
- phys_base = res->start;
- mem_size = resource_size(res);
-
gpmc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(gpmc_base))
return PTR_ERR(gpmc_base);
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 88f51ec8f1d1..f2a33a1af836 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -199,10 +199,8 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
rpc->dirmap = NULL;
rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rpc->rstc))
- return PTR_ERR(rpc->rstc);
- return 0;
+ return PTR_ERR_OR_ZERO(rpc->rstc);
}
EXPORT_SYMBOL(rpcif_sw_init);
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index b9c7956e5031..c5ee4121a4d2 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -98,6 +98,8 @@ MODULE_PARM_DESC(irqmode, "Enable IRQ mode (0=off [default], 1=on)");
/**
* struct dmc_opp_table - Operating level desciption
+ * @freq_hz: target frequency in Hz
+ * @volt_uv: target voltage in uV
*
* Covers frequency and voltage settings of the DMC operating mode.
*/
@@ -108,6 +110,41 @@ struct dmc_opp_table {
/**
* struct exynos5_dmc - main structure describing DMC device
+ * @dev: DMC device
+ * @df: devfreq device structure returned by devfreq framework
+ * @gov_data: configuration of devfreq governor
+ * @base_drexi0: DREX0 registers mapping
+ * @base_drexi1: DREX1 registers mapping
+ * @clk_regmap: regmap for clock controller registers
+ * @lock: protects curr_rate and frequency/voltage setting section
+ * @curr_rate: current frequency
+ * @curr_volt: current voltage
+ * @opp: OPP table
+ * @opp_count: number of 'opp' elements
+ * @timings_arr_size: number of 'timings' elements
+ * @timing_row: values for timing row register, for each OPP
+ * @timing_data: values for timing data register, for each OPP
+ * @timing_power: balues for timing power register, for each OPP
+ * @timings: DDR memory timings, from device tree
+ * @min_tck: DDR memory minimum timing values, from device tree
+ * @bypass_timing_row: value for timing row register for bypass timings
+ * @bypass_timing_data: value for timing data register for bypass timings
+ * @bypass_timing_power: value for timing power register for bypass
+ * timings
+ * @vdd_mif: Memory interface regulator
+ * @fout_spll: clock: SPLL
+ * @fout_bpll: clock: BPLL
+ * @mout_spll: clock: mux SPLL
+ * @mout_bpll: clock: mux BPLL
+ * @mout_mclk_cdrex: clock: mux mclk_cdrex
+ * @mout_mx_mspll_ccore: clock: mux mx_mspll_ccore
+ * @counter: devfreq events
+ * @num_counters: number of 'counter' elements
+ * @last_overflow_ts: time (in ns) of last overflow of each DREX
+ * @load: utilization in percents
+ * @total: total time between devfreq events
+ * @in_irq_mode: whether running in interrupt mode (true)
+ * or polling (false)
*
* The main structure for the Dynamic Memory Controller which covers clocks,
* memory regions, HW information, parameters and current operating mode.
@@ -119,12 +156,11 @@ struct exynos5_dmc {
void __iomem *base_drexi0;
void __iomem *base_drexi1;
struct regmap *clk_regmap;
+ /* Protects curr_rate and frequency/voltage setting section */
struct mutex lock;
unsigned long curr_rate;
unsigned long curr_volt;
- unsigned long bypass_rate;
struct dmc_opp_table *opp;
- struct dmc_opp_table opp_bypass;
int opp_count;
u32 timings_arr_size;
u32 *timing_row;
@@ -142,8 +178,6 @@ struct exynos5_dmc {
struct clk *mout_bpll;
struct clk *mout_mclk_cdrex;
struct clk *mout_mx_mspll_ccore;
- struct clk *mx_mspll_ccore_phy;
- struct clk *mout_mx_mspll_ccore_phy;
struct devfreq_event_dev **counter;
int num_counters;
u64 last_overflow_ts[2];
@@ -169,7 +203,7 @@ struct timing_reg {
unsigned int val;
};
-static const struct timing_reg timing_row[] = {
+static const struct timing_reg timing_row_reg_fields[] = {
TIMING_FIELD("tRFC", 24, 31),
TIMING_FIELD("tRRD", 20, 23),
TIMING_FIELD("tRP", 16, 19),
@@ -178,7 +212,7 @@ static const struct timing_reg timing_row[] = {
TIMING_FIELD("tRAS", 0, 5),
};
-static const struct timing_reg timing_data[] = {
+static const struct timing_reg timing_data_reg_fields[] = {
TIMING_FIELD("tWTR", 28, 31),
TIMING_FIELD("tWR", 24, 27),
TIMING_FIELD("tRTP", 20, 23),
@@ -189,7 +223,7 @@ static const struct timing_reg timing_data[] = {
TIMING_FIELD("RL", 0, 3),
};
-static const struct timing_reg timing_power[] = {
+static const struct timing_reg timing_power_reg_fields[] = {
TIMING_FIELD("tFAW", 26, 31),
TIMING_FIELD("tXSR", 16, 25),
TIMING_FIELD("tXP", 8, 15),
@@ -197,8 +231,9 @@ static const struct timing_reg timing_power[] = {
TIMING_FIELD("tMRD", 0, 3),
};
-#define TIMING_COUNT (ARRAY_SIZE(timing_row) + ARRAY_SIZE(timing_data) + \
- ARRAY_SIZE(timing_power))
+#define TIMING_COUNT (ARRAY_SIZE(timing_row_reg_fields) + \
+ ARRAY_SIZE(timing_data_reg_fields) + \
+ ARRAY_SIZE(timing_power_reg_fields))
static int exynos5_counters_set_event(struct exynos5_dmc *dmc)
{
@@ -346,7 +381,6 @@ err_opp:
/**
* exynos5_set_bypass_dram_timings() - Low-level changes of the DRAM timings
* @dmc: device for which the new settings is going to be applied
- * @param: DRAM parameters which passes timing data
*
* Low-level function for changing timings for DRAM memory clocking from
* 'bypass' clock source (fixed frequency @400MHz).
@@ -453,9 +487,6 @@ static int exynos5_dmc_align_bypass_voltage(struct exynos5_dmc *dmc,
unsigned long target_volt)
{
int ret = 0;
- unsigned long bypass_volt = dmc->opp_bypass.volt_uv;
-
- target_volt = max(bypass_volt, target_volt);
if (dmc->curr_volt >= target_volt)
return 0;
@@ -617,6 +648,7 @@ disable_clocks:
* requested
* @target_volt: returned voltage which corresponds to the returned
* frequency
+ * @flags: devfreq flags provided for this frequency change request
*
* Function gets requested frequency and checks OPP framework for needed
* frequency and voltage. It populates the values 'target_rate' and
@@ -908,7 +940,10 @@ static int exynos5_dmc_get_status(struct device *dev,
int ret;
if (dmc->in_irq_mode) {
+ mutex_lock(&dmc->lock);
stat->current_frequency = dmc->curr_rate;
+ mutex_unlock(&dmc->lock);
+
stat->busy_time = dmc->load;
stat->total_time = dmc->total;
} else {
@@ -950,7 +985,7 @@ static int exynos5_dmc_get_cur_freq(struct device *dev, unsigned long *freq)
return 0;
}
-/**
+/*
* exynos5_dmc_df_profile - Devfreq governor's profile structure
*
* It provides to the devfreq framework needed functions and polling period.
@@ -993,7 +1028,9 @@ exynos5_dmc_align_init_freq(struct exynos5_dmc *dmc,
/**
* create_timings_aligned() - Create register values and align with standard
* @dmc: device for which the frequency is going to be set
- * @idx: speed bin in the OPP table
+ * @reg_timing_row: array to fill with values for timing row register
+ * @reg_timing_data: array to fill with values for timing data register
+ * @reg_timing_power: array to fill with values for timing power register
* @clk_period_ps: the period of the clock, known as tCK
*
* The function calculates timings and creates a register value ready for
@@ -1018,117 +1055,117 @@ static int create_timings_aligned(struct exynos5_dmc *dmc, u32 *reg_timing_row,
val = dmc->timings->tRFC / clk_period_ps;
val += dmc->timings->tRFC % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tRFC);
- reg = &timing_row[0];
+ reg = &timing_row_reg_fields[0];
*reg_timing_row |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tRRD / clk_period_ps;
val += dmc->timings->tRRD % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tRRD);
- reg = &timing_row[1];
+ reg = &timing_row_reg_fields[1];
*reg_timing_row |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tRPab / clk_period_ps;
val += dmc->timings->tRPab % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tRPab);
- reg = &timing_row[2];
+ reg = &timing_row_reg_fields[2];
*reg_timing_row |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tRCD / clk_period_ps;
val += dmc->timings->tRCD % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tRCD);
- reg = &timing_row[3];
+ reg = &timing_row_reg_fields[3];
*reg_timing_row |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tRC / clk_period_ps;
val += dmc->timings->tRC % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tRC);
- reg = &timing_row[4];
+ reg = &timing_row_reg_fields[4];
*reg_timing_row |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tRAS / clk_period_ps;
val += dmc->timings->tRAS % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tRAS);
- reg = &timing_row[5];
+ reg = &timing_row_reg_fields[5];
*reg_timing_row |= TIMING_VAL2REG(reg, val);
/* data related timings */
val = dmc->timings->tWTR / clk_period_ps;
val += dmc->timings->tWTR % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tWTR);
- reg = &timing_data[0];
+ reg = &timing_data_reg_fields[0];
*reg_timing_data |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tWR / clk_period_ps;
val += dmc->timings->tWR % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tWR);
- reg = &timing_data[1];
+ reg = &timing_data_reg_fields[1];
*reg_timing_data |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tRTP / clk_period_ps;
val += dmc->timings->tRTP % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tRTP);
- reg = &timing_data[2];
+ reg = &timing_data_reg_fields[2];
*reg_timing_data |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tW2W_C2C / clk_period_ps;
val += dmc->timings->tW2W_C2C % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tW2W_C2C);
- reg = &timing_data[3];
+ reg = &timing_data_reg_fields[3];
*reg_timing_data |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tR2R_C2C / clk_period_ps;
val += dmc->timings->tR2R_C2C % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tR2R_C2C);
- reg = &timing_data[4];
+ reg = &timing_data_reg_fields[4];
*reg_timing_data |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tWL / clk_period_ps;
val += dmc->timings->tWL % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tWL);
- reg = &timing_data[5];
+ reg = &timing_data_reg_fields[5];
*reg_timing_data |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tDQSCK / clk_period_ps;
val += dmc->timings->tDQSCK % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tDQSCK);
- reg = &timing_data[6];
+ reg = &timing_data_reg_fields[6];
*reg_timing_data |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tRL / clk_period_ps;
val += dmc->timings->tRL % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tRL);
- reg = &timing_data[7];
+ reg = &timing_data_reg_fields[7];
*reg_timing_data |= TIMING_VAL2REG(reg, val);
/* power related timings */
val = dmc->timings->tFAW / clk_period_ps;
val += dmc->timings->tFAW % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tFAW);
- reg = &timing_power[0];
+ reg = &timing_power_reg_fields[0];
*reg_timing_power |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tXSR / clk_period_ps;
val += dmc->timings->tXSR % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tXSR);
- reg = &timing_power[1];
+ reg = &timing_power_reg_fields[1];
*reg_timing_power |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tXP / clk_period_ps;
val += dmc->timings->tXP % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tXP);
- reg = &timing_power[2];
+ reg = &timing_power_reg_fields[2];
*reg_timing_power |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tCKE / clk_period_ps;
val += dmc->timings->tCKE % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tCKE);
- reg = &timing_power[3];
+ reg = &timing_power_reg_fields[3];
*reg_timing_power |= TIMING_VAL2REG(reg, val);
val = dmc->timings->tMRD / clk_period_ps;
val += dmc->timings->tMRD % clk_period_ps ? 1 : 0;
val = max(val, dmc->min_tck->tMRD);
- reg = &timing_power[4];
+ reg = &timing_power_reg_fields[4];
*reg_timing_power |= TIMING_VAL2REG(reg, val);
return 0;
@@ -1263,8 +1300,6 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
- dmc->bypass_rate = clk_get_rate(dmc->mout_mx_mspll_ccore);
-
clk_prepare_enable(dmc->fout_bpll);
clk_prepare_enable(dmc->mout_bpll);
@@ -1293,7 +1328,8 @@ static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
int counters_size;
int ret, i;
- dmc->num_counters = devfreq_event_get_edev_count(dmc->dev);
+ dmc->num_counters = devfreq_event_get_edev_count(dmc->dev,
+ "devfreq-events");
if (dmc->num_counters < 0) {
dev_err(dmc->dev, "could not get devfreq-event counters\n");
return dmc->num_counters;
@@ -1306,7 +1342,8 @@ static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
for (i = 0; i < dmc->num_counters; i++) {
dmc->counter[i] =
- devfreq_event_get_edev_by_phandle(dmc->dev, i);
+ devfreq_event_get_edev_by_phandle(dmc->dev,
+ "devfreq-events", i);
if (IS_ERR_OR_NULL(dmc->counter[i]))
return -EPROBE_DEFER;
}
@@ -1330,7 +1367,6 @@ static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
/**
* exynos5_dmc_set_pause_on_switching() - Controls a pause feature in DMC
* @dmc: device which is used for changing this feature
- * @set: a boolean state passing enable/disable request
*
* There is a need of pausing DREX DMC when divider or MUX in clock tree
* changes its configuration. In such situation access to the memory is blocked
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index ba5cb1f4dfc2..76ace42a688a 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1060,19 +1060,7 @@ static int tegra_emc_debug_available_rates_show(struct seq_file *s,
return 0;
}
-static int tegra_emc_debug_available_rates_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, tegra_emc_debug_available_rates_show,
- inode->i_private);
-}
-
-static const struct file_operations tegra_emc_debug_available_rates_fops = {
- .open = tegra_emc_debug_available_rates_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates);
static int tegra_emc_debug_min_rate_get(void *data, u64 *rate)
{
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 493b5dc3a4b3..0cede24479bf 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -957,7 +957,6 @@ static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
static const unsigned int tegra124_group_drm[] = {
TEGRA_SWGROUP_DC,
TEGRA_SWGROUP_DCB,
- TEGRA_SWGROUP_GPU,
TEGRA_SWGROUP_VIC,
};
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index 8478f59db432..fa8af17b0e2d 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -172,14 +172,8 @@ static int tegra186_emc_probe(struct platform_device *pdev)
return -ENOMEM;
emc->bpmp = tegra_bpmp_get(&pdev->dev);
- if (IS_ERR(emc->bpmp)) {
- err = PTR_ERR(emc->bpmp);
-
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get BPMP: %d\n", err);
-
- return err;
- }
+ if (IS_ERR(emc->bpmp))
+ return dev_err_probe(&pdev->dev, PTR_ERR(emc->bpmp), "failed to get BPMP\n");
emc->clk = devm_clk_get(&pdev->dev, "emc");
if (IS_ERR(emc->clk)) {
diff --git a/drivers/memory/tegra/tegra210-emc-cc-r21021.c b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
index ff55a17896fa..0ebfa8eccf0c 100644
--- a/drivers/memory/tegra/tegra210-emc-cc-r21021.c
+++ b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
@@ -501,7 +501,6 @@ static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
emc_cfg_o = emc_readl(emc, EMC_CFG);
emc_cfg = emc_cfg_o & ~(EMC_CFG_DYN_SELF_REF |
EMC_CFG_DRAM_ACPD |
- EMC_CFG_DRAM_CLKSTOP_PD |
EMC_CFG_DRAM_CLKSTOP_PD);
@@ -1044,7 +1043,7 @@ static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc)
!opt_cc_short_zcal && opt_short_zcal) {
value = (value & ~(EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK <<
EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT)) |
- ((zq_wait_long & EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK) <<
+ ((zq_wait_long & EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK) <<
EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
} else if (offset == EMC_ZCAL_INTERVAL && opt_zcal_en_cc) {
value = 0; /* EMC_ZCAL_INTERVAL reset value. */
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index cc0482434c75..7fb8b5438bf4 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -842,7 +842,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
.la = {
.reg = 0x3dc,
- .shift = 0,
+ .shift = 16,
.mask = 0xff,
.def = 0x80,
},
@@ -1073,7 +1073,7 @@ static const struct tegra_smmu_soc tegra210_smmu_soc = {
.num_groups = ARRAY_SIZE(tegra210_groups),
.supports_round_robin_arbitration = true,
.supports_request_limit = true,
- .num_tlb_lines = 32,
+ .num_tlb_lines = 48,
.num_asids = 128,
};
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 178954228631..8004dd64d09a 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1223,7 +1223,7 @@ static int msb_read_boot_blocks(struct msb_data *msb)
}
if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
- dbg("the pba at %d doesn' contain boot block ID", pba);
+ dbg("the pba at %d doesn't contain boot block ID", pba);
continue;
}
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 1074b882c57c..24aebad60366 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2593,7 +2593,7 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
/* Get the data transfer speeds
*/
data_sz = ioc->spi_data.sdp0length * 4;
- pg0_alloc = (SCSIDevicePage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+ pg0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
if (pg0_alloc) {
hdr.PageVersion = ioc->spi_data.sdp0version;
hdr.PageLength = data_sz;
@@ -2657,8 +2657,7 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
/* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
data_sz = (int) cfg.cfghdr.hdr->PageLength * 4;
- pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent(
- ioc->pcidev, data_sz, &page_dma);
+ pg3_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
if (pg3_alloc) {
cfg.physAddr = page_dma;
cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 4314a3352b96..f92b0433f599 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -763,7 +763,7 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
- ppage0_alloc = (FCPortPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
+ ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
if (ppage0_alloc) {
try_again:
@@ -904,7 +904,7 @@ start_over:
if (data_sz < sizeof(FCPortPage1_t))
data_sz = sizeof(FCPortPage1_t);
- page1_alloc = (FCPortPage1_t *) pci_alloc_consistent(ioc->pcidev,
+ page1_alloc = pci_alloc_consistent(ioc->pcidev,
data_sz,
&page1_dma);
if (!page1_alloc)
@@ -922,8 +922,6 @@ start_over:
}
}
- memset(page1_alloc,0,data_sz);
-
cfg.physAddr = page1_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 8543f0324d5a..e7f0d4ae0f96 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1176,8 +1176,10 @@ mptscsih_remove(struct pci_dev *pdev)
MPT_SCSI_HOST *hd;
int sz1;
- if((hd = shost_priv(host)) == NULL)
- return;
+ if (host == NULL)
+ hd = NULL;
+ else
+ hd = shost_priv(host);
mptscsih_shutdown(pdev);
@@ -1193,14 +1195,15 @@ mptscsih_remove(struct pci_dev *pdev)
"Free'd ScsiLookup (%d) memory\n",
ioc->name, sz1));
- kfree(hd->info_kbuf);
+ if (hd)
+ kfree(hd->info_kbuf);
/* NULL the Scsi_Host pointer
*/
ioc->sh = NULL;
- scsi_host_put(host);
-
+ if (host)
+ scsi_host_put(host);
mpt_detach(pdev);
}
@@ -1516,7 +1519,6 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, u64 lun,
int ii;
int retval;
MPT_ADAPTER *ioc = hd->ioc;
- unsigned long timeleft;
u8 issue_hard_reset;
u32 ioc_raw_state;
unsigned long time_count;
@@ -1614,7 +1616,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, u64 lun,
}
}
- timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
+ wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
timeout*HZ);
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
retval = FAILED;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 33df0837ab41..8b99a13669bf 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -290,7 +290,8 @@ config MFD_CS47L92
config MFD_ASIC3
bool "Compaq ASIC3"
- depends on GPIOLIB && ARM
+ depends on GPIOLIB
+ depends on ARM || COMPILE_TEST
select MFD_CORE
help
This driver supports the ASIC3 multifunction chip found on many
@@ -398,6 +399,17 @@ config MFD_DLN2
etc. must be enabled in order to use the functionality of
the device.
+config MFD_ENE_KB3930
+ tristate "ENE KB3930 Embedded Controller support"
+ depends on I2C
+ depends on MACH_MMP3_DT || COMPILE_TEST
+ select MFD_CORE
+ help
+ This adds support for the power-off functionality and access to
+ the registers that control LEDS and USB port power on ENE KB3930
+ Embedded Controller. To use the LED functionality LEDS_ARIEL must
+ be enabled.
+
config MFD_EXYNOS_LPASS
tristate "Samsung Exynos SoC Low Power Audio Subsystem"
depends on ARCH_EXYNOS || COMPILE_TEST
@@ -493,7 +505,7 @@ config MFD_HI6421_PMIC
Add support for HiSilicon Hi6421 PMIC. Hi6421 includes multi-
functions, such as regulators, RTC, codec, Coulomb counter, etc.
This driver includes core APIs _only_. You have to select
- individul components like voltage regulators under corresponding
+ individual components like voltage regulators under corresponding
menus in order to enable them.
We communicate with the Hi6421 via memory-mapped I/O.
@@ -1162,6 +1174,29 @@ config MFD_SI476X_CORE
To compile this driver as a module, choose M here: the
module will be called si476x-core.
+config MFD_SIMPLE_MFD_I2C
+ tristate
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver creates a single register map with the intention for it
+ to be shared by all sub-devices.
+
+ Once the register map has been successfully initialised, any
+ sub-devices represented by child nodes in Device Tree will be
+ subsequently registered.
+
+config MFD_SL28CPLD
+ tristate "Kontron sl28cpld Board Management Controller"
+ depends on I2C
+ select MFD_SIMPLE_MFD_I2C
+ help
+ Say yes here to enable support for the Kontron sl28cpld board
+ management controller.
+
+ It can be found on the following boards:
+ * SMARC-sAL28
+
config MFD_SM501
tristate "Silicon Motion SM501"
depends on HAS_DMA
@@ -2118,5 +2153,18 @@ config SGI_MFD_IOC3
If you have an SGI Origin, Octane, or a PCI IOC3 card,
then say Y. Otherwise say N.
+config MFD_INTEL_M10_BMC
+ tristate "Intel MAX 10 Board Management Controller"
+ depends on SPI_MASTER
+ select REGMAP_SPI_AVMM
+ select MFD_CORE
+ help
+ Support for the Intel MAX 10 board management controller using the
+ SPI interface.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index a60e5f835283..1780019d2474 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_ARCH_BCM2835) += bcm2835-pm.o
obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o
obj-$(CONFIG_MFD_CROS_EC_DEV) += cros_ec_dev.o
+obj-$(CONFIG_MFD_ENE_KB3930) += ene-kb3930.o
obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
obj-$(CONFIG_MFD_GATEWORKS_GSC) += gateworks-gsc.o
@@ -264,3 +265,5 @@ obj-$(CONFIG_MFD_STMFX) += stmfx.o
obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o
obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o
+obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o
+obj-$(CONFIG_MFD_INTEL_M10_BMC) += intel-m10-bmc.o
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 151c36ce7343..54fb6cbd2aa0 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/leds.h>
#include <linux/i2c.h>
#include <linux/mfd/dm355evm_msp.h>
@@ -116,6 +117,54 @@ static const u8 msp_gpios[] = {
MSP_GPIO(4, SDMMC), MSP_GPIO(3, SDMMC), /* mmc1 WP, nCD */
};
+static struct gpio_led evm_leds[] = {
+ { .name = "dm355evm::ds14",
+ .default_trigger = "heartbeat", },
+ { .name = "dm355evm::ds15",
+ .default_trigger = "mmc0", },
+ { .name = "dm355evm::ds16",
+ /* could also be a CE-ATA drive */
+ .default_trigger = "mmc1", },
+ { .name = "dm355evm::ds17",
+ .default_trigger = "nand-disk", },
+ { .name = "dm355evm::ds18", },
+ { .name = "dm355evm::ds19", },
+ { .name = "dm355evm::ds20", },
+ { .name = "dm355evm::ds21", },
+};
+
+static struct gpio_led_platform_data evm_led_data = {
+ .num_leds = ARRAY_SIZE(evm_leds),
+ .leds = evm_leds,
+};
+
+static struct gpiod_lookup_table evm_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ /*
+ * These GPIOs are on the dm355evm_msp
+ * GPIO chip at index 0..7
+ */
+ GPIO_LOOKUP_IDX("dm355evm_msp", 0, NULL,
+ 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("dm355evm_msp", 1, NULL,
+ 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("dm355evm_msp", 2, NULL,
+ 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("dm355evm_msp", 3, NULL,
+ 3, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("dm355evm_msp", 4, NULL,
+ 4, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("dm355evm_msp", 5, NULL,
+ 5, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("dm355evm_msp", 6, NULL,
+ 6, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("dm355evm_msp", 7, NULL,
+ 7, GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
#define MSP_GPIO_REG(offset) (msp_gpios[(offset)] >> 3)
#define MSP_GPIO_MASK(offset) BIT(msp_gpios[(offset)] & 0x07)
@@ -260,32 +309,7 @@ static int add_children(struct i2c_client *client)
/* LED output */
if (msp_has_leds()) {
-#define GPIO_LED(l) .name = l, .active_low = true
- static struct gpio_led evm_leds[] = {
- { GPIO_LED("dm355evm::ds14"),
- .default_trigger = "heartbeat", },
- { GPIO_LED("dm355evm::ds15"),
- .default_trigger = "mmc0", },
- { GPIO_LED("dm355evm::ds16"),
- /* could also be a CE-ATA drive */
- .default_trigger = "mmc1", },
- { GPIO_LED("dm355evm::ds17"),
- .default_trigger = "nand-disk", },
- { GPIO_LED("dm355evm::ds18"), },
- { GPIO_LED("dm355evm::ds19"), },
- { GPIO_LED("dm355evm::ds20"), },
- { GPIO_LED("dm355evm::ds21"), },
- };
-#undef GPIO_LED
-
- struct gpio_led_platform_data evm_led_data = {
- .num_leds = ARRAY_SIZE(evm_leds),
- .leds = evm_leds,
- };
-
- for (i = 0; i < ARRAY_SIZE(evm_leds); i++)
- evm_leds[i].gpio = i + dm355evm_msp_gpio.base;
-
+ gpiod_add_lookup_table(&evm_leds_gpio_table);
/* NOTE: these are the only fully programmable LEDs
* on the board, since GPIO-61/ds22 (and many signals
* going to DC7) must be used for AEMIF address lines
diff --git a/drivers/mfd/ene-kb3930.c b/drivers/mfd/ene-kb3930.c
new file mode 100644
index 000000000000..1c32ff586816
--- /dev/null
+++ b/drivers/mfd/ene-kb3930.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0-or-later
+/*
+ * ENE KB3930 Embedded Controller Driver
+ *
+ * Copyright (C) 2020 Lubomir Rintel
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+
+/* I2C registers that are multiplexing access to the EC RAM. */
+enum {
+ EC_DATA_IN = 0x00,
+ EC_RAM_OUT = 0x80,
+ EC_RAM_IN = 0x81,
+};
+
+/* EC RAM registers. */
+enum {
+ EC_MODEL = 0x30,
+ EC_VERSION_MAJ = 0x31,
+ EC_VERSION_MIN = 0x32,
+};
+
+struct kb3930 {
+ struct i2c_client *client;
+ struct regmap *ram_regmap;
+ struct gpio_descs *off_gpios;
+};
+
+struct kb3930 *kb3930_power_off;
+
+#define EC_GPIO_WAVE 0
+#define EC_GPIO_OFF_MODE 1
+
+#define EC_OFF_MODE_REBOOT 0
+#define EC_OFF_MODE_POWER 1
+
+static void kb3930_off(struct kb3930 *ddata, int off_mode)
+{
+ gpiod_direction_output(ddata->off_gpios->desc[EC_GPIO_OFF_MODE],
+ off_mode);
+
+ /*
+ * This creates a 10 Hz wave on EC_GPIO_WAVE that signals a
+ * shutdown request to the EC. Once the EC detects it, it will
+ * proceed to turn the power off or reset the board depending on
+ * the value of EC_GPIO_OFF_MODE.
+ */
+ while (1) {
+ mdelay(50);
+ gpiod_direction_output(ddata->off_gpios->desc[EC_GPIO_WAVE], 0);
+ mdelay(50);
+ gpiod_direction_output(ddata->off_gpios->desc[EC_GPIO_WAVE], 1);
+ }
+}
+
+static int kb3930_restart(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ kb3930_off(kb3930_power_off, EC_OFF_MODE_REBOOT);
+ return NOTIFY_DONE;
+}
+
+static void kb3930_pm_power_off(void)
+{
+ kb3930_off(kb3930_power_off, EC_OFF_MODE_POWER);
+}
+
+static struct notifier_block kb3930_restart_nb = {
+ .notifier_call = kb3930_restart,
+};
+
+static const struct mfd_cell ariel_ec_cells[] = {
+ { .name = "dell-wyse-ariel-led", },
+ { .name = "dell-wyse-ariel-power", },
+};
+
+static int kb3930_ec_ram_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct kb3930 *ddata = context;
+
+ return i2c_smbus_write_word_data(ddata->client, EC_RAM_OUT,
+ (val << 8) | reg);
+}
+
+static int kb3930_ec_ram_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct kb3930 *ddata = context;
+ int ret;
+
+ ret = i2c_smbus_write_word_data(ddata->client, EC_RAM_IN, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(ddata->client, EC_DATA_IN);
+ if (ret < 0)
+ return ret;
+
+ *val = ret >> 8;
+ return 0;
+}
+
+static const struct regmap_config kb3930_ram_regmap_config = {
+ .name = "ec_ram",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_stride = 1,
+ .max_register = 0xff,
+ .reg_write = kb3930_ec_ram_reg_write,
+ .reg_read = kb3930_ec_ram_reg_read,
+ .fast_io = false,
+};
+
+static int kb3930_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct kb3930 *ddata;
+ unsigned int model;
+ int ret;
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ kb3930_power_off = ddata;
+ ddata->client = client;
+ i2c_set_clientdata(client, ddata);
+
+ ddata->ram_regmap = devm_regmap_init(dev, NULL, ddata,
+ &kb3930_ram_regmap_config);
+ if (IS_ERR(ddata->ram_regmap))
+ return PTR_ERR(ddata->ram_regmap);
+
+ ret = regmap_read(ddata->ram_regmap, EC_MODEL, &model);
+ if (ret < 0)
+ return ret;
+
+ /* Currently we only support the cells present on Dell Ariel model. */
+ if (model != 'J') {
+ dev_err(dev, "unknown board model: %02x\n", model);
+ return -ENODEV;
+ }
+
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO,
+ ariel_ec_cells,
+ ARRAY_SIZE(ariel_ec_cells),
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ if (of_property_read_bool(np, "system-power-controller")) {
+ ddata->off_gpios =
+ devm_gpiod_get_array_optional(dev, "off", GPIOD_IN);
+ if (IS_ERR(ddata->off_gpios))
+ return PTR_ERR(ddata->off_gpios);
+ if (ddata->off_gpios->ndescs < 2) {
+ dev_err(dev, "invalid off-gpios property\n");
+ return -EINVAL;
+ }
+ }
+
+ if (ddata->off_gpios) {
+ register_restart_handler(&kb3930_restart_nb);
+ if (!pm_power_off)
+ pm_power_off = kb3930_pm_power_off;
+ }
+
+ return 0;
+}
+
+static int kb3930_remove(struct i2c_client *client)
+{
+ struct kb3930 *ddata = i2c_get_clientdata(client);
+
+ if (ddata->off_gpios) {
+ if (pm_power_off == kb3930_pm_power_off)
+ pm_power_off = NULL;
+ unregister_restart_handler(&kb3930_restart_nb);
+ }
+ kb3930_power_off = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id kb3930_dt_ids[] = {
+ { .compatible = "ene,kb3930" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, kb3930_dt_ids);
+
+static struct i2c_driver kb3930_driver = {
+ .probe_new = kb3930_probe,
+ .remove = kb3930_remove,
+ .driver = {
+ .name = "ene-kb3930",
+ .of_match_table = of_match_ptr(kb3930_dt_ids),
+ },
+};
+module_i2c_driver(kb3930_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("ENE KB3930 Embedded Controller Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 9a58032f818a..2d7c588ef1ed 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -293,6 +293,10 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x5ac4), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x5ac6), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x5aee), (kernel_ulong_t)&bxt_uart_info },
+ /* LKF */
+ { PCI_VDEVICE(INTEL, 0x98a8), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x98a9), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x98c7), (kernel_ulong_t)&bxt_uart_info },
/* SPT-LP */
{ PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info },
diff --git a/drivers/mfd/intel-m10-bmc.c b/drivers/mfd/intel-m10-bmc.c
new file mode 100644
index 000000000000..b84579b7b4f0
--- /dev/null
+++ b/drivers/mfd/intel-m10-bmc.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel MAX 10 Board Management Controller chip
+ *
+ * Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
+ */
+#include <linux/bitfield.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel-m10-bmc.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+enum m10bmc_type {
+ M10_N3000,
+};
+
+static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
+ { .name = "n3000bmc-hwmon" },
+ { .name = "n3000bmc-retimer" },
+ { .name = "n3000bmc-secure" },
+};
+
+static struct regmap_config intel_m10bmc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = M10BMC_MEM_END,
+};
+
+static ssize_t bmc_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct intel_m10bmc *ddata = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = m10bmc_sys_read(ddata, M10BMC_BUILD_VER, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", val);
+}
+static DEVICE_ATTR_RO(bmc_version);
+
+static ssize_t bmcfw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct intel_m10bmc *ddata = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = m10bmc_sys_read(ddata, NIOS2_FW_VERSION, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", val);
+}
+static DEVICE_ATTR_RO(bmcfw_version);
+
+static struct attribute *m10bmc_attrs[] = {
+ &dev_attr_bmc_version.attr,
+ &dev_attr_bmcfw_version.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(m10bmc);
+
+static int check_m10bmc_version(struct intel_m10bmc *ddata)
+{
+ unsigned int v;
+ int ret;
+
+ /*
+ * This check is to filter out the very old legacy BMC versions,
+ * M10BMC_LEGACY_SYS_BASE is the offset to this old block of mmio
+ * registers. In the old BMC chips, the BMC version info is stored
+ * in this old version register (M10BMC_LEGACY_SYS_BASE +
+ * M10BMC_BUILD_VER), so its read out value would have not been
+ * LEGACY_INVALID (0xffffffff). But in new BMC chips that the
+ * driver supports, the value of this register should be
+ * LEGACY_INVALID.
+ */
+ ret = m10bmc_raw_read(ddata,
+ M10BMC_LEGACY_SYS_BASE + M10BMC_BUILD_VER, &v);
+ if (ret)
+ return -ENODEV;
+
+ if (v != M10BMC_VER_LEGACY_INVALID) {
+ dev_err(ddata->dev, "bad version M10BMC detected\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int intel_m10_bmc_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct device *dev = &spi->dev;
+ struct mfd_cell *cells;
+ struct intel_m10bmc *ddata;
+ int ret, n_cell;
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->dev = dev;
+
+ ddata->regmap =
+ devm_regmap_init_spi_avmm(spi, &intel_m10bmc_regmap_config);
+ if (IS_ERR(ddata->regmap)) {
+ ret = PTR_ERR(ddata->regmap);
+ dev_err(dev, "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
+ spi_set_drvdata(spi, ddata);
+
+ ret = check_m10bmc_version(ddata);
+ if (ret) {
+ dev_err(dev, "Failed to identify m10bmc hardware\n");
+ return ret;
+ }
+
+ switch (id->driver_data) {
+ case M10_N3000:
+ cells = m10bmc_pacn3000_subdevs;
+ n_cell = ARRAY_SIZE(m10bmc_pacn3000_subdevs);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cells, n_cell,
+ NULL, 0, NULL);
+ if (ret)
+ dev_err(dev, "Failed to register sub-devices: %d\n", ret);
+
+ return ret;
+}
+
+static const struct spi_device_id m10bmc_spi_id[] = {
+ { "m10-n3000", M10_N3000 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, m10bmc_spi_id);
+
+static struct spi_driver intel_m10bmc_spi_driver = {
+ .driver = {
+ .name = "intel-m10-bmc",
+ .dev_groups = m10bmc_groups,
+ },
+ .probe = intel_m10_bmc_spi_probe,
+ .id_table = m10bmc_spi_id,
+};
+module_spi_driver(intel_m10bmc_spi_driver);
+
+MODULE_DESCRIPTION("Intel MAX 10 BMC Device Driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:intel-m10-bmc");
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 52bec01149e5..2c9295953c11 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -13,6 +13,7 @@
#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/acpi.h>
#define MAX_ID_LEN 4
static char force_device_id[MAX_ID_LEN + 1] = "";
@@ -124,6 +125,7 @@ static const struct kempld_platform_data kempld_platform_data_generic = {
};
static struct platform_device *kempld_pdev;
+static bool kempld_acpi_mode;
static int kempld_create_platform_device(const struct dmi_system_id *id)
{
@@ -426,13 +428,93 @@ static int kempld_detect_device(struct kempld_device_data *pld)
return ret;
}
+#ifdef CONFIG_ACPI
+static int kempld_get_acpi_data(struct platform_device *pdev)
+{
+ struct list_head resource_list;
+ struct resource *resources;
+ struct resource_entry *rentry;
+ struct device *dev = &pdev->dev;
+ struct acpi_device *acpi_dev = ACPI_COMPANION(dev);
+ const struct kempld_platform_data *pdata;
+ int ret;
+ int count;
+
+ pdata = acpi_device_get_match_data(dev);
+ ret = platform_device_add_data(pdev, pdata,
+ sizeof(struct kempld_platform_data));
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(acpi_dev, &resource_list, NULL, NULL);
+ if (ret < 0)
+ goto out;
+
+ count = ret;
+
+ if (count == 0) {
+ ret = platform_device_add_resources(pdev, pdata->ioresource, 1);
+ goto out;
+ }
+
+ resources = devm_kcalloc(&acpi_dev->dev, count, sizeof(*resources),
+ GFP_KERNEL);
+ if (!resources) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ count = 0;
+ list_for_each_entry(rentry, &resource_list, node) {
+ memcpy(&resources[count], rentry->res,
+ sizeof(*resources));
+ count++;
+ }
+ ret = platform_device_add_resources(pdev, resources, count);
+
+out:
+ acpi_dev_free_resource_list(&resource_list);
+
+ return ret;
+}
+#else
+static int kempld_get_acpi_data(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_ACPI */
+
static int kempld_probe(struct platform_device *pdev)
{
- const struct kempld_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ const struct kempld_platform_data *pdata;
struct device *dev = &pdev->dev;
struct kempld_device_data *pld;
struct resource *ioport;
+ int ret;
+
+ if (kempld_pdev == NULL) {
+ /*
+ * No kempld_pdev device has been registered in kempld_init,
+ * so we seem to be probing an ACPI platform device.
+ */
+ ret = kempld_get_acpi_data(pdev);
+ if (ret)
+ return ret;
+
+ kempld_acpi_mode = true;
+ } else if (kempld_pdev != pdev) {
+ /*
+ * The platform device we are probing is not the one we
+ * registered in kempld_init using the DMI table, so this one
+ * comes from ACPI.
+ * As we can only probe one - abort here and use the DMI
+ * based one instead.
+ */
+ dev_notice(dev, "platform device exists - not using ACPI\n");
+ return -ENODEV;
+ }
+ pdata = dev_get_platdata(dev);
pld = devm_kzalloc(dev, sizeof(*pld), GFP_KERNEL);
if (!pld)
@@ -471,9 +553,19 @@ static int kempld_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id kempld_acpi_table[] = {
+ { "KEM0001", (kernel_ulong_t)&kempld_platform_data_generic },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, kempld_acpi_table);
+#endif
+
static struct platform_driver kempld_driver = {
.driver = {
.name = "kempld",
+ .acpi_match_table = ACPI_PTR(kempld_acpi_table),
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
},
.probe = kempld_probe,
.remove = kempld_remove,
@@ -792,6 +884,7 @@ MODULE_DEVICE_TABLE(dmi, kempld_dmi_table);
static int __init kempld_init(void)
{
const struct dmi_system_id *id;
+ int ret;
if (force_device_id[0]) {
for (id = kempld_dmi_table;
@@ -801,12 +894,24 @@ static int __init kempld_init(void)
break;
if (id->matches[0].slot == DMI_NONE)
return -ENODEV;
- } else {
- if (!dmi_check_system(kempld_dmi_table))
- return -ENODEV;
}
- return platform_driver_register(&kempld_driver);
+ ret = platform_driver_register(&kempld_driver);
+ if (ret)
+ return ret;
+
+ /*
+ * With synchronous probing the device should already be probed now.
+ * If no device id is forced and also no ACPI definition for the
+ * device was found, scan DMI table as fallback.
+ *
+ * If drivers_autoprobing is disabled and the device is found here,
+ * only that device can be bound manually later.
+ */
+ if (!kempld_pdev && !kempld_acpi_mode)
+ dmi_check_system(kempld_dmi_table);
+
+ return 0;
}
static void __exit kempld_exit(void)
diff --git a/drivers/mfd/khadas-mcu.c b/drivers/mfd/khadas-mcu.c
index 44d5bb462dab..f3d418810693 100644
--- a/drivers/mfd/khadas-mcu.c
+++ b/drivers/mfd/khadas-mcu.c
@@ -122,11 +122,13 @@ static int khadas_mcu_probe(struct i2c_client *client,
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id khadas_mcu_of_match[] = {
{ .compatible = "khadas,mcu", },
{},
};
MODULE_DEVICE_TABLE(of, khadas_mcu_of_match);
+#endif
static struct i2c_driver khadas_mcu_driver = {
.driver = {
diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c
index 2268be9113f1..9c21483d9653 100644
--- a/drivers/mfd/lp87565.c
+++ b/drivers/mfd/lp87565.c
@@ -27,6 +27,10 @@ static const struct mfd_cell lp87565_cells[] = {
static const struct of_device_id of_lp87565_match_table[] = {
{ .compatible = "ti,lp87565", },
{
+ .compatible = "ti,lp87524-q1",
+ .data = (void *)LP87565_DEVICE_TYPE_LP87524_Q1,
+ },
+ {
.compatible = "ti,lp87565-q1",
.data = (void *)LP87565_DEVICE_TYPE_LP87565_Q1,
},
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 8a8d733fdce5..4ed6ad8ce002 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -369,19 +369,14 @@ EXPORT_SYMBOL_GPL(madera_of_match);
static int madera_get_reset_gpio(struct madera *madera)
{
struct gpio_desc *reset;
- int ret;
if (madera->pdata.reset)
return 0;
reset = devm_gpiod_get_optional(madera->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(reset)) {
- ret = PTR_ERR(reset);
- if (ret != -EPROBE_DEFER)
- dev_err(madera->dev, "Failed to request /RESET: %d\n",
- ret);
- return ret;
- }
+ if (IS_ERR(reset))
+ return dev_err_probe(madera->dev, PTR_ERR(reset),
+ "Failed to request /RESET");
/*
* A hard reset is needed for full reset of the chip. We allow running
diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c
index e9cacc27d980..4661c1b29a72 100644
--- a/drivers/mfd/mt6360-core.c
+++ b/drivers/mfd/mt6360-core.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/version.h>
#include <linux/mfd/mt6360.h>
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index e25407ed3ad4..dc452df1f1bf 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -25,6 +25,7 @@ static const struct mfd_cell rn5t618_cells[] = {
static const struct mfd_cell rc5t619_cells[] = {
{ .name = "rn5t618-adc" },
+ { .name = "rn5t618-power" },
{ .name = "rn5t618-regulator" },
{ .name = "rc5t619-rtc" },
{ .name = "rn5t618-wdt" },
diff --git a/drivers/mfd/simple-mfd-i2c.c b/drivers/mfd/simple-mfd-i2c.c
new file mode 100644
index 000000000000..87f684cff9a1
--- /dev/null
+++ b/drivers/mfd/simple-mfd-i2c.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Simple MFD - I2C
+ *
+ * This driver creates a single register map with the intention for it to be
+ * shared by all sub-devices. Children can use their parent's device structure
+ * (dev.parent) in order to reference it.
+ *
+ * Once the register map has been successfully initialised, any sub-devices
+ * represented by child nodes in Device Tree will be subsequently registered.
+ */
+
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+
+static const struct regmap_config simple_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int simple_mfd_i2c_probe(struct i2c_client *i2c)
+{
+ const struct regmap_config *config;
+ struct regmap *regmap;
+
+ config = device_get_match_data(&i2c->dev);
+ if (!config)
+ config = &simple_regmap_config;
+
+ regmap = devm_regmap_init_i2c(i2c, config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return devm_of_platform_populate(&i2c->dev);
+}
+
+static const struct of_device_id simple_mfd_i2c_of_match[] = {
+ { .compatible = "kontron,sl28cpld" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, simple_mfd_i2c_of_match);
+
+static struct i2c_driver simple_mfd_i2c_driver = {
+ .probe_new = simple_mfd_i2c_probe,
+ .driver = {
+ .name = "simple-mfd-i2c",
+ .of_match_table = simple_mfd_i2c_of_match,
+ },
+};
+module_i2c_driver(simple_mfd_i2c_driver);
+
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_DESCRIPTION("Simple MFD - I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index ccd62b963952..6d2f4a0a901d 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1415,8 +1415,14 @@ static int sm501_plat_probe(struct platform_device *dev)
goto err_claim;
}
- return sm501_init_dev(sm);
+ ret = sm501_init_dev(sm);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+ err_unmap:
+ iounmap(sm->regs);
err_claim:
release_mem_region(sm->io_res->start, 0x100);
err_res:
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index f8a8b918c60d..6b7956604a0f 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -189,7 +189,7 @@ static int sprd_pmic_probe(struct spi_device *spi)
ddata->irqs[i].mask = BIT(i);
ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq,
- IRQF_ONESHOT | IRQF_NO_SUSPEND, 0,
+ IRQF_ONESHOT, 0,
&ddata->irq_chip, &ddata->irq_data);
if (ret) {
dev_err(&spi->dev, "Failed to add PMIC irq chip %d\n", ret);
@@ -202,9 +202,34 @@ static int sprd_pmic_probe(struct spi_device *spi)
return ret;
}
+ device_init_wakeup(&spi->dev, true);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int sprd_pmic_suspend(struct device *dev)
+{
+ struct sprd_pmic *ddata = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(ddata->irq);
+
+ return 0;
+}
+
+static int sprd_pmic_resume(struct device *dev)
+{
+ struct sprd_pmic *ddata = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(ddata->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(sprd_pmic_pm_ops, sprd_pmic_suspend, sprd_pmic_resume);
+
static const struct of_device_id sprd_pmic_match[] = {
{ .compatible = "sprd,sc2731", .data = &sc2731_data },
{},
@@ -215,6 +240,7 @@ static struct spi_driver sprd_pmic_driver = {
.driver = {
.name = "sc27xx-pmic",
.of_match_table = sprd_pmic_match,
+ .pm = &sprd_pmic_pm_ops,
},
.probe = sprd_pmic_probe,
};
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index 711979afd90a..5e680bfdf5c9 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -331,11 +331,9 @@ static int stmfx_chip_init(struct i2c_client *client)
ret = PTR_ERR_OR_ZERO(stmfx->vdd);
if (ret == -ENODEV) {
stmfx->vdd = NULL;
- } else if (ret == -EPROBE_DEFER) {
- return ret;
- } else if (ret) {
- dev_err(&client->dev, "Failed to get VDD regulator: %d\n", ret);
- return ret;
+ } else {
+ return dev_err_probe(&client->dev, ret,
+ "Failed to get VDD regulator\n");
}
if (stmfx->vdd) {
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index df5cebb372a5..ca465794ea9c 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -108,7 +108,6 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
syscon_config.max_register = resource_size(&res) - reg_io_width;
regmap = regmap_init_mmio(NULL, base, &syscon_config);
- kfree(syscon_config.name);
if (IS_ERR(regmap)) {
pr_err("regmap init failed\n");
ret = PTR_ERR(regmap);
@@ -145,6 +144,7 @@ err_clk:
regmap_exit(regmap);
err_regmap:
iounmap(base);
+ kfree(syscon_config.name);
err_map:
kfree(syscon);
return ERR_PTR(ret);
diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c
index da910302d51a..c274d733b656 100644
--- a/drivers/mfd/wcd934x.c
+++ b/drivers/mfd/wcd934x.c
@@ -219,12 +219,9 @@ static int wcd934x_slim_probe(struct slim_device *sdev)
return -ENOMEM;
ddata->irq = of_irq_get(np, 0);
- if (ddata->irq < 0) {
- if (ddata->irq != -EPROBE_DEFER)
- dev_err(ddata->dev, "Failed to get IRQ: err = %d\n",
- ddata->irq);
- return ddata->irq;
- }
+ if (ddata->irq < 0)
+ return dev_err_probe(ddata->dev, ddata->irq,
+ "Failed to get IRQ\n");
reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
if (reset_gpio < 0) {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index ce136d685d14..fafa8b0d8099 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -456,6 +456,16 @@ config PVPANIC
a paravirtualized device provided by QEMU; it lets a virtual machine
(guest) communicate panic events to the host.
+config HISI_HIKEY_USB
+ tristate "USB GPIO Hub on HiSilicon Hikey 960/970 Platform"
+ depends on (OF && GPIOLIB) || COMPILE_TEST
+ depends on USB_ROLE_SWITCH
+ help
+ If you say yes here this adds support for the on-board USB GPIO hub
+ found on HiKey 960/970 boards, which is necessary to support
+ switching between the dual-role USB-C port and the USB-A host ports
+ using only one USB controller.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -464,7 +474,6 @@ source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
-source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c7bd01ac6291..d23231e73330 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -46,7 +46,6 @@ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
-obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_CXL_BASE) += cxl/
@@ -57,3 +56,4 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_HABANA_AI) += habanalabs/
obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
+obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index f5f392ddf3d6..8859011672cb 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -72,28 +72,80 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ if (rtsx_check_mmc_support(reg))
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
}
-static void rts5227_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+static void rts5227_init_from_cfg(struct rtsx_pcr *pcr)
{
- /* Set relink_time to 0 */
- rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, 0xFF, 0);
- rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, 0xFF, 0);
- rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 0x01, 0);
+ struct pci_dev *pdev = pcr->pci;
+ int l1ss;
+ u32 lval;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
- if (pm_state == HOST_ENTER_S3)
- rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x10);
+ if (CHK_PCI_PID(pcr, 0x522A)) {
+ if (0 == (lval & 0x0F))
+ rtsx_pci_enable_oobs_polling(pcr);
+ else
+ rtsx_pci_disable_oobs_polling(pcr);
+ }
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
- rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
}
static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
{
u16 cap;
+ struct rtsx_cr_option *option = &pcr->option;
+ rts5227_init_from_cfg(pcr);
rtsx_pci_init_cmd(pcr);
/* Configure GPIO as output */
@@ -115,9 +167,17 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
rts5227_fill_driving(pcr, OUTPUT_3V3);
/* Configure force_clock_req */
if (pcr->flags & PCR_REVERSE_SOCKET)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB8, 0xB8);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x30);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x00);
+
+ if (option->force_clkreq_0)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB8, 0x88);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, pcr->reg_pm_ctrl3, 0x10, 0x00);
return rtsx_pci_send_cmd(pcr, 100);
@@ -239,7 +299,6 @@ static const struct pcr_ops rts5227_pcr_ops = {
.switch_output_voltage = rts5227_switch_output_voltage,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
- .force_power_down = rts5227_force_power_down,
};
/* SD Pull Control Enable:
@@ -373,6 +432,27 @@ static int rts522a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
return rtsx_pci_send_cmd(pcr, 100);
}
+static void rts522a_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ rtsx_set_l1off_sub(pcr, val);
+}
/* rts522a operations mainly derived from rts5227, except phy/hw init setting.
*/
@@ -389,16 +469,29 @@ static const struct pcr_ops rts522a_pcr_ops = {
.switch_output_voltage = rts522a_switch_output_voltage,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
- .force_power_down = rts5227_force_power_down,
+ .set_l1off_cfg_sub_d0 = rts522a_set_l1off_cfg_sub_d0,
};
void rts522a_init_params(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &pcr->option;
+
rts5227_init_params(pcr);
pcr->ops = &rts522a_pcr_ops;
pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
+ option->dev_flags = LTR_L1SS_PWR_GATE_EN;
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = 0x7F;
+ option->ltr_l1off_snooze_sspwrgate = 0x78;
+
pcr->option.ocp_en = 1;
if (pcr->option.ocp_en)
pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
index 28feab1449ab..781a86def59a 100644
--- a/drivers/misc/cardreader/rts5228.c
+++ b/drivers/misc/cardreader/rts5228.c
@@ -99,9 +99,8 @@ static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
RELINK_TIME_MASK, 0);
- if (pm_state == HOST_ENTER_S3)
- rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
- D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
rtsx_pci_write_register(pcr, FPDCTL,
SSC_POWER_DOWN, SSC_POWER_DOWN);
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 941b3d77f1e9..b85279f1fc5e 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -73,25 +73,13 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ if (rtsx_check_mmc_support(reg))
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
}
-static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
-{
- /* Set relink_time to 0 */
- rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, 0xFF, 0);
- rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, 0xFF, 0);
- rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 0x01, 0);
-
- if (pm_state == HOST_ENTER_S3)
- rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
- D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
-
- rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
-}
-
static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
@@ -105,6 +93,14 @@ static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
+ if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
+ if (0 == (lval & 0x0F))
+ rtsx_pci_enable_oobs_polling(pcr);
+ else
+ rtsx_pci_disable_oobs_polling(pcr);
+ }
+
+
if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
@@ -144,6 +140,112 @@ static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
return 0;
}
+static void rts52xa_save_content_from_efuse(struct rtsx_pcr *pcr)
+{
+ u8 cnt, sv;
+ u16 j = 0;
+ u8 tmp;
+ u8 val;
+ int i;
+
+ rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL,
+ REG_EFUSE_BYPASS | REG_EFUSE_POR, REG_EFUSE_POR);
+ udelay(1);
+
+ pcr_dbg(pcr, "Enable efuse por!");
+ pcr_dbg(pcr, "save efuse to autoload");
+
+ rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD, REG_EFUSE_ADD_MASK, 0x00);
+ rtsx_pci_write_register(pcr, RTS525A_EFUSE_CTL,
+ REG_EFUSE_ENABLE | REG_EFUSE_MODE, REG_EFUSE_ENABLE);
+ /* Wait transfer end */
+ for (j = 0; j < 1024; j++) {
+ rtsx_pci_read_register(pcr, RTS525A_EFUSE_CTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+ rtsx_pci_read_register(pcr, RTS525A_EFUSE_DATA, &val);
+ cnt = val & 0x0F;
+ sv = val & 0x10;
+
+ if (sv) {
+ for (i = 0; i < 4; i++) {
+ rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD,
+ REG_EFUSE_ADD_MASK, 0x04 + i);
+ rtsx_pci_write_register(pcr, RTS525A_EFUSE_CTL,
+ REG_EFUSE_ENABLE | REG_EFUSE_MODE, REG_EFUSE_ENABLE);
+ /* Wait transfer end */
+ for (j = 0; j < 1024; j++) {
+ rtsx_pci_read_register(pcr, RTS525A_EFUSE_CTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+ rtsx_pci_read_register(pcr, RTS525A_EFUSE_DATA, &val);
+ rtsx_pci_write_register(pcr, 0xFF04 + i, 0xFF, val);
+ }
+ } else {
+ rtsx_pci_write_register(pcr, 0xFF04, 0xFF, (u8)PCI_VID(pcr));
+ rtsx_pci_write_register(pcr, 0xFF05, 0xFF, (u8)(PCI_VID(pcr) >> 8));
+ rtsx_pci_write_register(pcr, 0xFF06, 0xFF, (u8)PCI_PID(pcr));
+ rtsx_pci_write_register(pcr, 0xFF07, 0xFF, (u8)(PCI_PID(pcr) >> 8));
+ }
+
+ for (i = 0; i < cnt * 4; i++) {
+ if (sv)
+ rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD,
+ REG_EFUSE_ADD_MASK, 0x08 + i);
+ else
+ rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD,
+ REG_EFUSE_ADD_MASK, 0x04 + i);
+ rtsx_pci_write_register(pcr, RTS525A_EFUSE_CTL,
+ REG_EFUSE_ENABLE | REG_EFUSE_MODE, REG_EFUSE_ENABLE);
+ /* Wait transfer end */
+ for (j = 0; j < 1024; j++) {
+ rtsx_pci_read_register(pcr, RTS525A_EFUSE_CTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+ rtsx_pci_read_register(pcr, RTS525A_EFUSE_DATA, &val);
+ rtsx_pci_write_register(pcr, 0xFF08 + i, 0xFF, val);
+ }
+ rtsx_pci_write_register(pcr, 0xFF00, 0xFF, (cnt & 0x7F) | 0x80);
+ rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL,
+ REG_EFUSE_BYPASS | REG_EFUSE_POR, REG_EFUSE_BYPASS);
+ pcr_dbg(pcr, "Disable efuse por!");
+}
+
+static void rts52xa_save_content_to_autoload_space(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, RESET_LOAD_REG, &val);
+ if (val & 0x02) {
+ rtsx_pci_read_register(pcr, RTS525A_BIOS_CFG, &val);
+ if (val & RTS525A_LOAD_BIOS_FLAG) {
+ rtsx_pci_write_register(pcr, RTS525A_BIOS_CFG,
+ RTS525A_LOAD_BIOS_FLAG, RTS525A_CLEAR_BIOS_FLAG);
+
+ rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL,
+ REG_EFUSE_POWER_MASK, REG_EFUSE_POWERON);
+
+ pcr_dbg(pcr, "Power ON efuse!");
+ mdelay(1);
+ rts52xa_save_content_from_efuse(pcr);
+ } else {
+ rtsx_pci_read_register(pcr, RTS524A_PME_FORCE_CTL, &val);
+ if (!(val & 0x08))
+ rts52xa_save_content_from_efuse(pcr);
+ }
+ } else {
+ pcr_dbg(pcr, "Load from autoload");
+ rtsx_pci_write_register(pcr, 0xFF00, 0xFF, 0x80);
+ rtsx_pci_write_register(pcr, 0xFF04, 0xFF, (u8)PCI_VID(pcr));
+ rtsx_pci_write_register(pcr, 0xFF05, 0xFF, (u8)(PCI_VID(pcr) >> 8));
+ rtsx_pci_write_register(pcr, 0xFF06, 0xFF, (u8)PCI_PID(pcr));
+ rtsx_pci_write_register(pcr, 0xFF07, 0xFF, (u8)(PCI_PID(pcr) >> 8));
+ }
+}
+
static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &(pcr->option);
@@ -153,6 +255,9 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_init_cmd(pcr);
+ if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A))
+ rts52xa_save_content_to_autoload_space(pcr);
+
/* Rest L1SUB Config */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
/* Configure GPIO as output */
@@ -171,18 +276,36 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
+ rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
+ rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN);
+ rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00);
+ rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20);
+ } else {
+ rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x30);
+ rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x00);
+ }
+
/*
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
* to drive low, and we forcibly request clock.
*/
if (option->force_clkreq_0)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
- return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+ if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
+ rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL,
+ REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF);
+ pcr_dbg(pcr, "Power OFF efuse!");
+ }
+
+ return 0;
}
static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
@@ -360,7 +483,6 @@ static const struct pcr_ops rts5249_pcr_ops = {
.card_power_on = rtsx_base_card_power_on,
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
- .force_power_down = rtsx_base_force_power_down,
};
/* SD Pull Control Enable:
@@ -585,7 +707,6 @@ static const struct pcr_ops rts524a_pcr_ops = {
.card_power_on = rtsx_base_card_power_on,
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
- .force_power_down = rtsx_base_force_power_down,
.set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
};
@@ -668,6 +789,8 @@ static int rts525a_extra_init_hw(struct rtsx_pcr *pcr)
{
rts5249_extra_init_hw(pcr);
+ rtsx_pci_write_register(pcr, RTS5250_CLK_CFG3, RTS525A_CFG_MEM_PD, RTS525A_CFG_MEM_PD);
+
rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
if (is_version(pcr, 0x525A, IC_VER_A)) {
rtsx_pci_write_register(pcr, L1SUB_CONFIG2,
@@ -700,7 +823,6 @@ static const struct pcr_ops rts525a_pcr_ops = {
.card_power_on = rts525a_card_power_on,
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rts525a_switch_output_voltage,
- .force_power_down = rtsx_base_force_power_down,
.set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
};
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index b9f66b1384a6..080a7d67a8e1 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -26,21 +26,17 @@ static u8 rts5260_get_ic_version(struct rtsx_pcr *pcr)
static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
{
- u8 driving_3v3[6][3] = {
- {0x94, 0x94, 0x94},
- {0x11, 0x11, 0x18},
- {0x55, 0x55, 0x5C},
- {0x94, 0x94, 0x94},
- {0x94, 0x94, 0x94},
- {0xFF, 0xFF, 0xFF},
+ u8 driving_3v3[4][3] = {
+ {0x11, 0x11, 0x11},
+ {0x22, 0x22, 0x22},
+ {0x55, 0x55, 0x55},
+ {0x33, 0x33, 0x33},
};
- u8 driving_1v8[6][3] = {
- {0x9A, 0x89, 0x89},
- {0xC4, 0xC4, 0xC4},
- {0x3C, 0x3C, 0x3C},
+ u8 driving_1v8[4][3] = {
+ {0x35, 0x33, 0x33},
+ {0x8A, 0x88, 0x88},
+ {0xBD, 0xBB, 0xBB},
{0x9B, 0x99, 0x99},
- {0x9A, 0x89, 0x89},
- {0xFE, 0xFE, 0xFE},
};
u8 (*driving)[3], drive_sel;
@@ -58,7 +54,7 @@ static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
0xFF, driving[drive_sel][1]);
- rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
+ rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
0xFF, driving[drive_sel][2]);
}
@@ -82,26 +78,13 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pci_read_config_dword(pdev, PCR_SETTING_REG2, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ if (rtsx_check_mmc_support(reg))
+ pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
}
-static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
-{
- /* Set relink_time to 0 */
- rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
- rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
- rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
- RELINK_TIME_MASK, 0);
-
- if (pm_state == HOST_ENTER_S3)
- rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
- D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
-
- rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
-}
-
static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL,
@@ -574,6 +557,8 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+
return 0;
}
@@ -620,7 +605,6 @@ static const struct pcr_ops rts5260_pcr_ops = {
.card_power_on = rts5260_card_power_on,
.card_power_off = rts5260_card_power_off,
.switch_output_voltage = rts5260_switch_output_voltage,
- .force_power_down = rtsx_base_force_power_down,
.stop_cmd = rts5260_stop_cmd,
.set_l1off_cfg_sub_d0 = rts5260_set_l1off_cfg_sub_d0,
.enable_ocp = rts5260_enable_ocp,
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 37ccc67f4914..5d15607027e9 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1096,6 +1096,20 @@ static void rtsx_pci_idle_work(struct work_struct *work)
mutex_unlock(&pcr->pcr_mutex);
}
+static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
+}
+
static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
{
if (pcr->ops->turn_off_led)
@@ -1109,6 +1123,8 @@ static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
if (pcr->ops->force_power_down)
pcr->ops->force_power_down(pcr, pm_state);
+ else
+ rtsx_base_force_power_down(pcr, pm_state);
}
void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
@@ -1155,10 +1171,6 @@ void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
rtsx_pci_write_register(pcr, REG_OCPGLITCH,
SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
rtsx_pci_enable_ocp(pcr);
- } else {
- /* OC power down */
- rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
- OC_POWER_DOWN);
}
}
}
@@ -1562,12 +1574,14 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
if (ret < 0)
- goto disable_irq;
+ goto free_slots;
schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
return 0;
+free_slots:
+ kfree(pcr->slots);
disable_irq:
free_irq(pcr->irq, (void *)pcr);
disable_msi:
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 6b322db8738e..fe5f4ca0f937 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -18,7 +18,24 @@
#define RTS522A_PM_CTRL3 0xFF7E
#define RTS524A_PME_FORCE_CTL 0xFF78
+#define REG_EFUSE_BYPASS 0x08
+#define REG_EFUSE_POR 0x04
+#define REG_EFUSE_POWER_MASK 0x03
+#define REG_EFUSE_POWERON 0x03
+#define REG_EFUSE_POWEROFF 0x00
+#define RTS5250_CLK_CFG3 0xFF79
+#define RTS525A_CFG_MEM_PD 0xF0
#define RTS524A_PM_CTRL3 0xFF7E
+#define RTS525A_BIOS_CFG 0xFF2D
+#define RTS525A_LOAD_BIOS_FLAG 0x01
+#define RTS525A_CLEAR_BIOS_FLAG 0x00
+
+#define RTS525A_EFUSE_CTL 0xFC32
+#define REG_EFUSE_ENABLE 0x80
+#define REG_EFUSE_MODE 0x40
+#define RTS525A_EFUSE_ADD 0xFC33
+#define REG_EFUSE_ADD_MASK 0x3F
+#define RTS525A_EFUSE_DATA 0xFC35
#define LTR_ACTIVE_LATENCY_DEF 0x883C
#define LTR_IDLE_LATENCY_DEF 0x892C
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 25a9dd9c0c1b..2ba899f5659f 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -393,8 +393,8 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
*capp_unit_id = get_capp_unit_id(np, *phb_index);
of_node_put(np);
if (!*capp_unit_id) {
- pr_err("cxl: invalid capp unit id (phb_index: %d)\n",
- *phb_index);
+ pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible slot\n",
+ *chipid, *phb_index);
return -ENODEV;
}
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 26a23abc053d..1c0a41803bb6 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -8,6 +8,7 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
+#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -89,6 +90,7 @@ struct at24_data {
struct nvmem_device *nvmem;
struct regulator *vcc_reg;
+ void (*read_post)(unsigned int off, char *buf, size_t count);
/*
* Some chips tie up multiple I2C addresses; dummy devices reserve
@@ -121,6 +123,7 @@ MODULE_PARM_DESC(at24_write_timeout, "Time (in ms) to try writes (default 25)");
struct at24_chip_data {
u32 byte_len;
u8 flags;
+ void (*read_post)(unsigned int off, char *buf, size_t count);
};
#define AT24_CHIP_DATA(_name, _len, _flags) \
@@ -128,6 +131,32 @@ struct at24_chip_data {
.byte_len = _len, .flags = _flags, \
}
+#define AT24_CHIP_DATA_CB(_name, _len, _flags, _read_post) \
+ static const struct at24_chip_data _name = { \
+ .byte_len = _len, .flags = _flags, \
+ .read_post = _read_post, \
+ }
+
+static void at24_read_post_vaio(unsigned int off, char *buf, size_t count)
+{
+ int i;
+
+ if (capable(CAP_SYS_ADMIN))
+ return;
+
+ /*
+ * Hide VAIO private settings to regular users:
+ * - BIOS passwords: bytes 0x00 to 0x0f
+ * - UUID: bytes 0x10 to 0x1f
+ * - Serial number: 0xc0 to 0xdf
+ */
+ for (i = 0; i < count; i++) {
+ if ((off + i <= 0x1f) ||
+ (off + i >= 0xc0 && off + i <= 0xdf))
+ buf[i] = 0;
+ }
+}
+
/* needs 8 addresses as A0-A2 are ignored */
AT24_CHIP_DATA(at24_data_24c00, 128 / 8, AT24_FLAG_TAKE8ADDR);
/* old variants can't be handled with this generic entry! */
@@ -144,6 +173,10 @@ AT24_CHIP_DATA(at24_data_24mac602, 64 / 8,
/* spd is a 24c02 in memory DIMMs */
AT24_CHIP_DATA(at24_data_spd, 2048 / 8,
AT24_FLAG_READONLY | AT24_FLAG_IRUGO);
+/* 24c02_vaio is a 24c02 on some Sony laptops */
+AT24_CHIP_DATA_CB(at24_data_24c02_vaio, 2048 / 8,
+ AT24_FLAG_READONLY | AT24_FLAG_IRUGO,
+ at24_read_post_vaio);
AT24_CHIP_DATA(at24_data_24c04, 4096 / 8, 0);
AT24_CHIP_DATA(at24_data_24cs04, 16,
AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
@@ -177,6 +210,7 @@ static const struct i2c_device_id at24_ids[] = {
{ "24mac402", (kernel_ulong_t)&at24_data_24mac402 },
{ "24mac602", (kernel_ulong_t)&at24_data_24mac602 },
{ "spd", (kernel_ulong_t)&at24_data_spd },
+ { "24c02-vaio", (kernel_ulong_t)&at24_data_24c02_vaio },
{ "24c04", (kernel_ulong_t)&at24_data_24c04 },
{ "24cs04", (kernel_ulong_t)&at24_data_24cs04 },
{ "24c08", (kernel_ulong_t)&at24_data_24c08 },
@@ -388,7 +422,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
struct at24_data *at24;
struct device *dev;
char *buf = val;
- int ret;
+ int i, ret;
at24 = priv;
dev = at24_base_client_dev(at24);
@@ -411,22 +445,22 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
*/
mutex_lock(&at24->lock);
- while (count) {
- ret = at24_regmap_read(at24, buf, off, count);
+ for (i = 0; count; i += ret, count -= ret) {
+ ret = at24_regmap_read(at24, buf + i, off + i, count);
if (ret < 0) {
mutex_unlock(&at24->lock);
pm_runtime_put(dev);
return ret;
}
- buf += ret;
- off += ret;
- count -= ret;
}
mutex_unlock(&at24->lock);
pm_runtime_put(dev);
+ if (unlikely(at24->read_post))
+ at24->read_post(off, buf, i);
+
return 0;
}
@@ -654,6 +688,7 @@ static int at24_probe(struct i2c_client *client)
at24->byte_len = byte_len;
at24->page_size = page_size;
at24->flags = flags;
+ at24->read_post = cdata->read_post;
at24->num_addresses = num_addresses;
at24->offset_adj = at24_get_offset_adj(flags, byte_len);
at24->client[0].client = client;
@@ -678,8 +713,30 @@ static int at24_probe(struct i2c_client *client)
return err;
}
- nvmem_config.name = dev_name(dev);
+ /*
+ * If the 'label' property is not present for the AT24 EEPROM,
+ * then nvmem_config.id is initialised to NVMEM_DEVID_AUTO,
+ * and this will append the 'devid' to the name of the NVMEM
+ * device. This is purely legacy and the AT24 driver has always
+ * defaulted to this. However, if the 'label' property is
+ * present then this means that the name is specified by the
+ * firmware and this name should be used verbatim and so it is
+ * not necessary to append the 'devid'.
+ */
+ if (device_property_present(dev, "label")) {
+ nvmem_config.id = NVMEM_DEVID_NONE;
+ err = device_property_read_string(dev, "label",
+ &nvmem_config.name);
+ if (err)
+ return err;
+ } else {
+ nvmem_config.id = NVMEM_DEVID_AUTO;
+ nvmem_config.name = dev_name(dev);
+ }
+
+ nvmem_config.type = NVMEM_TYPE_EEPROM;
nvmem_config.dev = dev;
+ nvmem_config.id = NVMEM_DEVID_AUTO;
nvmem_config.read_only = !writable;
nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO);
nvmem_config.owner = THIS_MODULE;
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index ed8d38b09925..3b7d8b7584f4 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -261,7 +261,7 @@ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
if (device_property_read_u32(dev, "pagesize", &val) == 0 ||
device_property_read_u32(dev, "at25,page-size", &val) == 0) {
- chip->page_size = (u16)val;
+ chip->page_size = val;
} else {
dev_err(dev, "Error: missing \"pagesize\" property\n");
return -ENODEV;
@@ -348,6 +348,7 @@ static int at25_probe(struct spi_device *spi)
spi_set_drvdata(spi, at25);
at25->addrlen = addrlen;
+ at25->nvmem_config.type = NVMEM_TYPE_EEPROM;
at25->nvmem_config.name = dev_name(&spi->dev);
at25->nvmem_config.dev = &spi->dev;
at25->nvmem_config.read_only = chip.flags & EE_READONLY;
@@ -358,7 +359,7 @@ static int at25_probe(struct spi_device *spi)
at25->nvmem_config.reg_read = at25_ee_read;
at25->nvmem_config.reg_write = at25_ee_write;
at25->nvmem_config.priv = at25;
- at25->nvmem_config.stride = 4;
+ at25->nvmem_config.stride = 1;
at25->nvmem_config.word_size = 1;
at25->nvmem_config.size = chip.byte_len;
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index b081c67416d7..252e15ba65e1 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -280,18 +280,7 @@ static struct i2c_driver ee1004_driver = {
.remove = ee1004_remove,
.id_table = ee1004_ids,
};
-
-static int __init ee1004_init(void)
-{
- return i2c_add_driver(&ee1004_driver);
-}
-module_init(ee1004_init);
-
-static void __exit ee1004_exit(void)
-{
- i2c_del_driver(&ee1004_driver);
-}
-module_exit(ee1004_exit);
+module_i2c_driver(ee1004_driver);
MODULE_DESCRIPTION("Driver for EE1004-compliant DDR4 SPD EEPROMs");
MODULE_AUTHOR("Jean Delvare");
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 226b5efa6a77..34fa385dfd4b 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -76,7 +76,7 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj));
+ struct i2c_client *client = kobj_to_i2c_client(kobj);
struct eeprom_data *data = i2c_get_clientdata(client);
u8 slice;
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 94cfb675fe4e..7c45f82b4302 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -455,6 +455,7 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->pdata = pd;
edev->size = 128;
+ edev->nvmem_config.type = NVMEM_TYPE_EEPROM;
edev->nvmem_config.name = dev_name(&spi->dev);
edev->nvmem_config.dev = &spi->dev;
edev->nvmem_config.read_only = pd->flags & EE_READONLY;
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 7939c55daceb..994ab67bc2dc 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -73,6 +73,11 @@
#define FASTRPC_RMID_INIT_CREATE_ATTR 7
#define FASTRPC_RMID_INIT_CREATE_STATIC 8
+/* Protection Domain(PD) ids */
+#define AUDIO_PD (0) /* also GUEST_OS PD? */
+#define USER_PD (1)
+#define SENSORS_PD (2)
+
#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
@@ -518,7 +523,7 @@ fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
table = &a->sgt;
- if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
+ if (!dma_map_sgtable(attachment->dev, table, dir, 0))
return ERR_PTR(-ENOMEM);
return table;
@@ -528,7 +533,7 @@ static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *table,
enum dma_data_direction dir)
{
- dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
+ dma_unmap_sgtable(attach->dev, table, dir, 0);
}
static void fastrpc_release(struct dma_buf *dmabuf)
@@ -1037,7 +1042,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
inbuf.pageslen = 1;
inbuf.attrs = init.attrs;
inbuf.siglen = init.siglen;
- fl->pd = 1;
+ fl->pd = USER_PD;
if (init.filelen && init.filefd) {
err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
@@ -1276,7 +1281,7 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
return 0;
}
-static int fastrpc_init_attach(struct fastrpc_user *fl)
+static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
{
struct fastrpc_invoke_args args[1];
int tgid = fl->tgid;
@@ -1287,7 +1292,7 @@ static int fastrpc_init_attach(struct fastrpc_user *fl)
args[0].fd = -1;
args[0].reserved = 0;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
- fl->pd = 0;
+ fl->pd = pd;
return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
sc, &args[0]);
@@ -1477,7 +1482,10 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
err = fastrpc_invoke(fl, argp);
break;
case FASTRPC_IOCTL_INIT_ATTACH:
- err = fastrpc_init_attach(fl);
+ err = fastrpc_init_attach(fl, AUDIO_PD);
+ break;
+ case FASTRPC_IOCTL_INIT_ATTACH_SNS:
+ err = fastrpc_init_attach(fl, SENSORS_PD);
break;
case FASTRPC_IOCTL_INIT_CREATE:
err = fastrpc_init_create_process(fl, argp);
diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig
index 8eb5d38c618e..1640340d3e62 100644
--- a/drivers/misc/habanalabs/Kconfig
+++ b/drivers/misc/habanalabs/Kconfig
@@ -7,7 +7,6 @@ config HABANA_AI
tristate "HabanaAI accelerators (habanalabs)"
depends on PCI && HAS_IOMEM
select FRAME_VECTOR
- select DMA_SHARED_BUFFER
select GENERIC_ALLOCATOR
select HWMON
help
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index b984bfa4face..eccd8c7dc62d 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -3,5 +3,5 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/asid.o common/habanalabs_ioctl.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
- common/command_submission.o common/mmu.o common/firmware_if.o \
- common/pci.o
+ common/command_submission.o common/mmu.o common/mmu_v1.o \
+ common/firmware_if.o common/pci.o
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index a8004911c977..ada570f35a41 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -13,15 +13,139 @@
#include <linux/uaccess.h>
#include <linux/genalloc.h>
+static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_vm_va_block *va_block, *tmp;
+ dma_addr_t bus_addr;
+ u64 virt_addr;
+ u32 page_size = prop->pmmu.page_size;
+ s32 offset;
+ int rc;
+
+ if (!hdev->supports_cb_mapping) {
+ dev_err_ratelimited(hdev->dev,
+ "Cannot map CB because no VA range is allocated for CB mapping\n");
+ return -EINVAL;
+ }
+
+ if (!hdev->mmu_enable) {
+ dev_err_ratelimited(hdev->dev,
+ "Cannot map CB because MMU is disabled\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&cb->va_block_list);
+
+ for (bus_addr = cb->bus_address;
+ bus_addr < cb->bus_address + cb->size;
+ bus_addr += page_size) {
+
+ virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
+ if (!virt_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate device virtual address for CB\n");
+ rc = -ENOMEM;
+ goto err_va_pool_free;
+ }
+
+ va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
+ if (!va_block) {
+ rc = -ENOMEM;
+ gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
+ goto err_va_pool_free;
+ }
+
+ va_block->start = virt_addr;
+ va_block->end = virt_addr + page_size;
+ va_block->size = page_size;
+ list_add_tail(&va_block->node, &cb->va_block_list);
+ }
+
+ mutex_lock(&ctx->mmu_lock);
+
+ bus_addr = cb->bus_address;
+ offset = 0;
+ list_for_each_entry(va_block, &cb->va_block_list, node) {
+ rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size,
+ list_is_last(&va_block->node,
+ &cb->va_block_list));
+ if (rc) {
+ dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
+ va_block->start);
+ goto err_va_umap;
+ }
+
+ bus_addr += va_block->size;
+ offset += va_block->size;
+ }
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+ cb->is_mmu_mapped = true;
+
+ return 0;
+
+err_va_umap:
+ list_for_each_entry(va_block, &cb->va_block_list, node) {
+ if (offset <= 0)
+ break;
+ hl_mmu_unmap(ctx, va_block->start, va_block->size,
+ offset <= va_block->size);
+ offset -= va_block->size;
+ }
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+err_va_pool_free:
+ list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
+ gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
+ list_del(&va_block->node);
+ kfree(va_block);
+ }
+
+ return rc;
+}
+
+static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm_va_block *va_block, *tmp;
+
+ mutex_lock(&ctx->mmu_lock);
+
+ list_for_each_entry(va_block, &cb->va_block_list, node)
+ if (hl_mmu_unmap(ctx, va_block->start, va_block->size,
+ list_is_last(&va_block->node,
+ &cb->va_block_list)))
+ dev_warn_ratelimited(hdev->dev,
+ "Failed to unmap CB's va 0x%llx\n",
+ va_block->start);
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+ list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
+ gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
+ list_del(&va_block->node);
+ kfree(va_block);
+ }
+}
+
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
if (cb->is_internal)
gen_pool_free(hdev->internal_cb_pool,
- cb->kernel_address, cb->size);
+ (uintptr_t)cb->kernel_address, cb->size);
else
hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
- (void *) (uintptr_t) cb->kernel_address,
- cb->bus_address);
+ cb->kernel_address, cb->bus_address);
kfree(cb);
}
@@ -47,6 +171,11 @@ static void cb_release(struct kref *ref)
hl_debugfs_remove_cb(cb);
+ if (cb->is_mmu_mapped)
+ cb_unmap_mem(cb->ctx, cb);
+
+ hl_ctx_put(cb->ctx);
+
cb_do_release(hdev, cb);
}
@@ -100,18 +229,19 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
return NULL;
}
- cb->kernel_address = (u64) (uintptr_t) p;
+ cb->kernel_address = p;
cb->size = cb_size;
return cb;
}
int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u32 cb_size, u64 *handle, int ctx_id, bool internal_cb)
+ struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
+ bool map_cb, u64 *handle)
{
struct hl_cb *cb;
bool alloc_new_cb = true;
- int rc;
+ int rc, ctx_id = ctx->asid;
/*
* Can't use generic function to check this because of special case
@@ -163,7 +293,21 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
}
cb->hdev = hdev;
- cb->ctx_id = ctx_id;
+ cb->ctx = ctx;
+ hl_ctx_get(hdev, cb->ctx);
+
+ if (map_cb) {
+ if (ctx_id == HL_KERNEL_ASID_ID) {
+ dev_err(hdev->dev,
+ "CB mapping is not supported for kernel context\n");
+ rc = -EINVAL;
+ goto release_cb;
+ }
+
+ rc = cb_map_mem(ctx, cb);
+ if (rc)
+ goto release_cb;
+ }
spin_lock(&mgr->cb_lock);
rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
@@ -171,10 +315,10 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
if (rc < 0) {
dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
- goto release_cb;
+ goto unmap_mem;
}
- cb->id = rc;
+ cb->id = (u64) rc;
kref_init(&cb->refcount);
spin_lock_init(&cb->lock);
@@ -183,14 +327,18 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
* idr is 32-bit so we can safely OR it with a mask that is above
* 32 bit
*/
- *handle = cb->id | HL_MMAP_CB_MASK;
+ *handle = cb->id | HL_MMAP_TYPE_CB;
*handle <<= PAGE_SHIFT;
hl_debugfs_add_cb(cb);
return 0;
+unmap_mem:
+ if (cb->is_mmu_mapped)
+ cb_unmap_mem(cb->ctx, cb);
release_cb:
+ hl_ctx_put(cb->ctx);
cb_do_release(hdev, cb);
out_err:
*handle = 0;
@@ -250,9 +398,10 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
args->in.cb_size, HL_MAX_CB_SIZE);
rc = -EINVAL;
} else {
- rc = hl_cb_create(hdev, &hpriv->cb_mgr,
- args->in.cb_size, &handle,
- hpriv->ctx->asid, false);
+ rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
+ args->in.cb_size, false,
+ !!(args->in.flags & HL_CB_FLAGS_MAP),
+ &handle);
}
memset(args, 0, sizeof(*args));
@@ -300,11 +449,14 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_cb *cb;
- phys_addr_t address;
u32 handle, user_cb_size;
int rc;
+ /* We use the page offset to hold the idr and thus we need to clear
+ * it before doing the mmap itself
+ */
handle = vma->vm_pgoff;
+ vma->vm_pgoff = 0;
/* reference was taken here */
cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
@@ -356,12 +508,8 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
vma->vm_private_data = cb;
- /* Calculate address for CB */
- address = virt_to_phys((void *) (uintptr_t) cb->kernel_address);
-
rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
- address, cb->size);
-
+ cb->bus_address, cb->size);
if (rc) {
spin_lock(&cb->lock);
cb->mmap = false;
@@ -425,7 +573,7 @@ void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
if (kref_put(&cb->refcount, cb_release) != 1)
dev_err(hdev->dev,
"CB %d for CTX ID %d is still alive\n",
- id, cb->ctx_id);
+ id, cb->ctx->asid);
}
idr_destroy(&mgr->cb_handles);
@@ -438,8 +586,8 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
struct hl_cb *cb;
int rc;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
- HL_KERNEL_ASID_ID, internal_cb);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
+ internal_cb, false, &cb_handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate CB for the kernel driver %d\n", rc);
@@ -495,3 +643,45 @@ int hl_cb_pool_fini(struct hl_device *hdev)
return 0;
}
+
+int hl_cb_va_pool_init(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ if (!hdev->supports_cb_mapping)
+ return 0;
+
+ ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
+ if (!ctx->cb_va_pool) {
+ dev_err(hdev->dev,
+ "Failed to create VA gen pool for CB mapping\n");
+ return -ENOMEM;
+ }
+
+ rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
+ prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add memory to VA gen pool for CB mapping\n");
+ goto err_pool_destroy;
+ }
+
+ return 0;
+
+err_pool_destroy:
+ gen_pool_destroy(ctx->cb_va_pool);
+
+ return rc;
+}
+
+void hl_cb_va_pool_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ if (!hdev->supports_cb_mapping)
+ return;
+
+ gen_pool_destroy(ctx->cb_va_pool);
+}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 2e3fcbc794db..b2b974ecc431 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -38,26 +38,10 @@ void hl_sob_reset_error(struct kref *ref)
hw_sob->q_idx, hw_sob->sob_id);
}
-static const char *hl_fence_get_driver_name(struct dma_fence *fence)
-{
- return "HabanaLabs";
-}
-
-static const char *hl_fence_get_timeline_name(struct dma_fence *fence)
-{
- struct hl_cs_compl *hl_cs_compl =
- container_of(fence, struct hl_cs_compl, base_fence);
-
- return dev_name(hl_cs_compl->hdev->dev);
-}
-
-static bool hl_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
-static void hl_fence_release(struct dma_fence *fence)
+static void hl_fence_release(struct kref *kref)
{
+ struct hl_fence *fence =
+ container_of(kref, struct hl_fence, refcount);
struct hl_cs_compl *hl_cs_cmpl =
container_of(fence, struct hl_cs_compl, base_fence);
struct hl_device *hdev = hl_cs_cmpl->hdev;
@@ -99,15 +83,27 @@ static void hl_fence_release(struct dma_fence *fence)
}
free:
- kfree_rcu(hl_cs_cmpl, base_fence.rcu);
+ kfree(hl_cs_cmpl);
}
-static const struct dma_fence_ops hl_fence_ops = {
- .get_driver_name = hl_fence_get_driver_name,
- .get_timeline_name = hl_fence_get_timeline_name,
- .enable_signaling = hl_fence_enable_signaling,
- .release = hl_fence_release
-};
+void hl_fence_put(struct hl_fence *fence)
+{
+ if (fence)
+ kref_put(&fence->refcount, hl_fence_release);
+}
+
+void hl_fence_get(struct hl_fence *fence)
+{
+ if (fence)
+ kref_get(&fence->refcount);
+}
+
+static void hl_fence_init(struct hl_fence *fence)
+{
+ kref_init(&fence->refcount);
+ fence->error = 0;
+ init_completion(&fence->completion);
+}
static void cs_get(struct hl_cs *cs)
{
@@ -256,6 +252,8 @@ static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx)
ctx->cs_counters.parsing_drop_cnt;
hdev->aggregated_cs_counters.queue_full_drop_cnt +=
ctx->cs_counters.queue_full_drop_cnt;
+ hdev->aggregated_cs_counters.max_cs_in_flight_drop_cnt +=
+ ctx->cs_counters.max_cs_in_flight_drop_cnt;
}
static void cs_do_release(struct kref *ref)
@@ -336,7 +334,7 @@ static void cs_do_release(struct kref *ref)
* In case the wait for signal CS was submitted, the put occurs
* in init_signal_wait_cs() right before hanging on the PQ.
*/
- dma_fence_put(cs->signal_fence);
+ hl_fence_put(cs->signal_fence);
}
/*
@@ -348,19 +346,18 @@ static void cs_do_release(struct kref *ref)
hl_ctx_put(cs->ctx);
/* We need to mark an error for not submitted because in that case
- * the dma fence release flow is different. Mainly, we don't need
+ * the hl fence release flow is different. Mainly, we don't need
* to handle hw_sob for signal/wait
*/
if (cs->timedout)
- dma_fence_set_error(cs->fence, -ETIMEDOUT);
+ cs->fence->error = -ETIMEDOUT;
else if (cs->aborted)
- dma_fence_set_error(cs->fence, -EIO);
+ cs->fence->error = -EIO;
else if (!cs->submitted)
- dma_fence_set_error(cs->fence, -EBUSY);
-
- dma_fence_signal(cs->fence);
- dma_fence_put(cs->fence);
+ cs->fence->error = -EBUSY;
+ complete_all(&cs->fence->completion);
+ hl_fence_put(cs->fence);
cs_counters_aggregate(hdev, cs->ctx);
kfree(cs->jobs_in_queue_cnt);
@@ -401,7 +398,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
enum hl_cs_type cs_type, struct hl_cs **cs_new)
{
struct hl_cs_compl *cs_cmpl;
- struct dma_fence *other = NULL;
+ struct hl_fence *other = NULL;
struct hl_cs *cs;
int rc;
@@ -434,9 +431,11 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs_cmpl->cs_seq = ctx->cs_sequence;
other = ctx->cs_pending[cs_cmpl->cs_seq &
(hdev->asic_prop.max_pending_cs - 1)];
- if ((other) && (!dma_fence_is_signaled(other))) {
- dev_dbg(hdev->dev,
+
+ if (other && !completion_done(&other->completion)) {
+ dev_dbg_ratelimited(hdev->dev,
"Rejecting CS because of too many in-flights CS\n");
+ ctx->cs_counters.max_cs_in_flight_drop_cnt++;
rc = -EAGAIN;
goto free_fence;
}
@@ -448,8 +447,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
goto free_fence;
}
- dma_fence_init(&cs_cmpl->base_fence, &hl_fence_ops, &cs_cmpl->lock,
- ctx->asid, ctx->cs_sequence);
+ /* init hl_fence */
+ hl_fence_init(&cs_cmpl->base_fence);
cs->sequence = cs_cmpl->cs_seq;
@@ -458,9 +457,9 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
&cs_cmpl->base_fence;
ctx->cs_sequence++;
- dma_fence_get(&cs_cmpl->base_fence);
+ hl_fence_get(&cs_cmpl->base_fence);
- dma_fence_put(other);
+ hl_fence_put(other);
spin_unlock(&ctx->cs_lock);
@@ -690,8 +689,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = -ENOMEM;
if (is_kernel_allocated_cb)
goto release_cb;
- else
- goto free_cs_object;
+
+ goto free_cs_object;
}
job->id = i + 1;
@@ -773,7 +772,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
struct hl_ctx *ctx = hpriv->ctx;
struct hl_cs_chunk *cs_chunk_array, *chunk;
struct hw_queue_properties *hw_queue_prop;
- struct dma_fence *sig_fence = NULL;
+ struct hl_fence *sig_fence = NULL;
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
@@ -883,14 +882,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
dev_err(hdev->dev,
"CS seq 0x%llx is not of a signal CS\n",
signal_seq);
- dma_fence_put(sig_fence);
+ hl_fence_put(sig_fence);
rc = -EINVAL;
goto free_signal_seq_array;
}
- if (dma_fence_is_signaled(sig_fence)) {
+ if (completion_done(&sig_fence->completion)) {
/* signal CS already finished */
- dma_fence_put(sig_fence);
+ hl_fence_put(sig_fence);
rc = 0;
goto free_signal_seq_array;
}
@@ -902,7 +901,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
rc = allocate_cs(hdev, ctx, cs_type, &cs);
if (rc) {
if (cs_type == CS_TYPE_WAIT)
- dma_fence_put(sig_fence);
+ hl_fence_put(sig_fence);
hl_ctx_put(ctx);
goto free_signal_seq_array;
}
@@ -1162,7 +1161,7 @@ out:
static long _hl_cs_wait_ioctl(struct hl_device *hdev,
struct hl_ctx *ctx, u64 timeout_us, u64 seq)
{
- struct dma_fence *fence;
+ struct hl_fence *fence;
unsigned long timeout;
long rc;
@@ -1181,12 +1180,18 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev,
"Can't wait on CS %llu because current CS is at seq %llu\n",
seq, ctx->cs_sequence);
} else if (fence) {
- rc = dma_fence_wait_timeout(fence, true, timeout);
+ if (!timeout_us)
+ rc = completion_done(&fence->completion);
+ else
+ rc = wait_for_completion_interruptible_timeout(
+ &fence->completion, timeout);
+
if (fence->error == -ETIMEDOUT)
rc = -ETIMEDOUT;
else if (fence->error == -EIO)
rc = -EIO;
- dma_fence_put(fence);
+
+ hl_fence_put(fence);
} else {
dev_dbg(hdev->dev,
"Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index 3e375958e73b..7a59dd7c6450 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -12,6 +12,7 @@
static void hl_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
+ u64 idle_mask = 0;
int i;
/*
@@ -23,11 +24,13 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
*/
for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
- dma_fence_put(ctx->cs_pending[i]);
+ hl_fence_put(ctx->cs_pending[i]);
kfree(ctx->cs_pending);
if (ctx->asid != HL_KERNEL_ASID_ID) {
+ dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid);
+
/* The engines are stopped as there is no executing CS, but the
* Coresight might be still working by accessing addresses
* related to the stopped engines. Hence stop it explicitly.
@@ -37,9 +40,18 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
hl_device_set_debug_mode(hdev, false);
+ hl_cb_va_pool_fini(ctx);
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
+
+ if ((!hdev->pldm) && (hdev->pdev) &&
+ (!hdev->asic_funcs->is_device_idle(hdev,
+ &idle_mask, NULL)))
+ dev_notice(hdev->dev,
+ "device not idle after user context is closed (0x%llx)\n",
+ idle_mask);
} else {
+ dev_dbg(hdev->dev, "closing kernel context\n");
hl_mmu_ctx_fini(ctx);
}
}
@@ -128,7 +140,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
atomic_set(&ctx->thread_ctx_switch_token, 1);
ctx->thread_ctx_switch_wait_token = 0;
ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
- sizeof(struct dma_fence *),
+ sizeof(struct hl_fence *),
GFP_KERNEL);
if (!ctx->cs_pending)
return -ENOMEM;
@@ -155,15 +167,26 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
goto err_asid_free;
}
+ rc = hl_cb_va_pool_init(ctx);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to init VA pool for mapped CB\n");
+ goto err_vm_ctx_fini;
+ }
+
rc = hdev->asic_funcs->ctx_init(ctx);
if (rc) {
dev_err(hdev->dev, "ctx_init failed\n");
- goto err_vm_ctx_fini;
+ goto err_cb_va_pool_fini;
}
+
+ dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
}
return 0;
+err_cb_va_pool_fini:
+ hl_cb_va_pool_fini(ctx);
err_vm_ctx_fini:
hl_vm_ctx_fini(ctx);
err_asid_free:
@@ -184,10 +207,10 @@ int hl_ctx_put(struct hl_ctx *ctx)
return kref_put(&ctx->refcount, hl_ctx_do_release);
}
-struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
+struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
{
struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
- struct dma_fence *fence;
+ struct hl_fence *fence;
spin_lock(&ctx->cs_lock);
@@ -201,8 +224,9 @@ struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
return NULL;
}
- fence = dma_fence_get(
- ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)]);
+ fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
+ hl_fence_get(fence);
+
spin_unlock(&ctx->cs_lock);
return fence;
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index aa77771635d3..912ddfa360b1 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -21,7 +21,7 @@ static struct dentry *hl_debug_root;
static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
u8 i2c_reg, long *val)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
if (hl_device_disabled_or_in_reset(hdev))
@@ -29,8 +29,8 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_RD <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
pkt.i2c_reg = i2c_reg;
@@ -47,7 +47,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
u8 i2c_reg, u32 val)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
if (hl_device_disabled_or_in_reset(hdev))
@@ -55,8 +55,8 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_I2C_WR <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
pkt.i2c_reg = i2c_reg;
@@ -73,7 +73,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
if (hl_device_disabled_or_in_reset(hdev))
@@ -81,8 +81,8 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_LED_SET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_LED_SET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.led_index = cpu_to_le32(led);
pkt.value = cpu_to_le64(state);
@@ -110,8 +110,8 @@ static int command_buffers_show(struct seq_file *s, void *data)
seq_puts(s, "---------------------------------------------------------------\n");
}
seq_printf(s,
- " %03d %d 0x%08x %d %d %d\n",
- cb->id, cb->ctx_id, cb->size,
+ " %03llu %d 0x%08x %d %d %d\n",
+ cb->id, cb->ctx->asid, cb->size,
kref_read(&cb->refcount),
cb->mmap, cb->cs_cnt);
}
@@ -354,6 +354,14 @@ static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
mmu_specs->hop4_shift);
}
+static inline u64 get_hop5_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop5_mask,
+ mmu_specs->hop5_shift);
+}
+
static inline u64 get_next_hop_addr(u64 curr_pte)
{
if (curr_pte & PAGE_PRESENT_MASK)
@@ -377,6 +385,7 @@ static int mmu_show(struct seq_file *s, void *data)
hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0,
hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0,
hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0,
+ hop5_addr = 0, hop5_pte_addr = 0, hop5_pte = 0,
virt_addr = dev_entry->mmu_addr;
if (!hdev->mmu_enable)
@@ -428,20 +437,49 @@ static int mmu_show(struct seq_file *s, void *data)
hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
- if (!(hop3_pte & LAST_MASK)) {
+ if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) {
+ if (!(hop3_pte & LAST_MASK)) {
+ hop4_addr = get_next_hop_addr(hop3_pte);
+
+ if (hop4_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop,
+ hop4_addr, virt_addr);
+ hop4_pte = hdev->asic_funcs->read_pte(hdev,
+ hop4_pte_addr);
+ if (!(hop4_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+ } else {
+ if (!(hop3_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+ }
+ } else {
hop4_addr = get_next_hop_addr(hop3_pte);
if (hop4_addr == ULLONG_MAX)
goto not_mapped;
- hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
- virt_addr);
- hop4_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
- if (!(hop4_pte & PAGE_PRESENT_MASK))
- goto not_mapped;
- } else {
- if (!(hop3_pte & PAGE_PRESENT_MASK))
- goto not_mapped;
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop,
+ hop4_addr, virt_addr);
+ hop4_pte = hdev->asic_funcs->read_pte(hdev,
+ hop4_pte_addr);
+ if (!(hop4_pte & LAST_MASK)) {
+ hop5_addr = get_next_hop_addr(hop4_pte);
+
+ if (hop5_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop5_pte_addr = get_hop5_pte_addr(ctx, mmu_prop,
+ hop5_addr, virt_addr);
+ hop5_pte = hdev->asic_funcs->read_pte(hdev,
+ hop5_pte_addr);
+ if (!(hop5_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+ } else {
+ if (!(hop4_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+ }
}
seq_printf(s, "asid: %u, virt_addr: 0x%llx\n",
@@ -463,10 +501,22 @@ static int mmu_show(struct seq_file *s, void *data)
seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr);
seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte);
- if (!(hop3_pte & LAST_MASK)) {
+ if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) {
+ if (!(hop3_pte & LAST_MASK)) {
+ seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
+ seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
+ seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
+ }
+ } else {
seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr);
seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr);
seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte);
+
+ if (!(hop4_pte & LAST_MASK)) {
+ seq_printf(s, "hop5_addr: 0x%llx\n", hop5_addr);
+ seq_printf(s, "hop5_pte_addr: 0x%llx\n", hop5_pte_addr);
+ seq_printf(s, "hop5_pte: 0x%llx\n", hop5_pte);
+ }
}
goto out;
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 24b01cce0a38..20572224099a 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -123,9 +123,13 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct hl_fpriv *hpriv = filp->private_data;
+ unsigned long vm_pgoff;
- if ((vma->vm_pgoff & HL_MMAP_CB_MASK) == HL_MMAP_CB_MASK) {
- vma->vm_pgoff ^= HL_MMAP_CB_MASK;
+ vm_pgoff = vma->vm_pgoff;
+ vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
+
+ switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
+ case HL_MMAP_TYPE_CB:
return hl_cb_mmap(hpriv, vma);
}
@@ -286,7 +290,7 @@ static int device_early_init(struct hl_device *hdev)
}
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
- snprintf(workq_name, 32, "hl-free-jobs-%u", i);
+ snprintf(workq_name, 32, "hl-free-jobs-%u", (u32) i);
hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
if (hdev->cq_wq[i] == NULL) {
dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
@@ -317,6 +321,10 @@ static int device_early_init(struct hl_device *hdev)
goto free_chip_info;
}
+ rc = hl_mmu_if_set_funcs(hdev);
+ if (rc)
+ goto free_idle_busy_ts_arr;
+
hl_cb_mgr_init(&hdev->kernel_cb_mgr);
mutex_init(&hdev->send_cpu_message_lock);
@@ -330,6 +338,8 @@ static int device_early_init(struct hl_device *hdev)
return 0;
+free_idle_busy_ts_arr:
+ kfree(hdev->idle_busy_ts_arr);
free_chip_info:
kfree(hdev->hl_chip_info);
free_eq_wq:
@@ -871,7 +881,7 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
* so this message won't be sent
*/
if (hl_fw_send_pci_access_msg(hdev,
- ARMCP_PACKET_DISABLE_PCI_ACCESS))
+ CPUCP_PACKET_DISABLE_PCI_ACCESS))
dev_warn(hdev->dev,
"Failed to disable PCI access by F/W\n");
}
@@ -957,14 +967,13 @@ again:
flush_workqueue(hdev->eq_wq);
}
- /* Release kernel context */
- if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
- hdev->kernel_ctx = NULL;
-
/* Reset the H/W. It will be in idle state after this returns */
hdev->asic_funcs->hw_fini(hdev, hard_reset);
if (hard_reset) {
+ /* Release kernel context */
+ if (hl_ctx_put(hdev->kernel_ctx) == 1)
+ hdev->kernel_ctx = NULL;
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
hl_eq_reset(hdev, &hdev->event_queue);
@@ -1455,13 +1464,13 @@ void hl_device_fini(struct hl_device *hdev)
hl_cb_pool_fini(hdev);
+ /* Reset the H/W. It will be in idle state after this returns */
+ hdev->asic_funcs->hw_fini(hdev, true);
+
/* Release kernel context */
if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
dev_err(hdev->dev, "kernel ctx is still alive\n");
- /* Reset the H/W. It will be in idle state after this returns */
- hdev->asic_funcs->hw_fini(hdev, true);
-
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index f52bc690dfc5..cd41c7ceb0e7 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -68,9 +68,9 @@ out:
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
{
- struct armcp_packet pkt = {};
+ struct cpucp_packet pkt = {};
- pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
sizeof(pkt), 0, NULL);
@@ -79,7 +79,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u16 len, u32 timeout, long *result)
{
- struct armcp_packet *pkt;
+ struct cpucp_packet *pkt;
dma_addr_t pkt_dma_addr;
u32 tmp;
int rc = 0;
@@ -111,7 +111,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
}
rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
- (tmp == ARMCP_PACKET_FENCE_VAL), 1000,
+ (tmp == CPUCP_PACKET_FENCE_VAL), 1000,
timeout, true);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
@@ -124,12 +124,12 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
tmp = le32_to_cpu(pkt->ctl);
- rc = (tmp & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
+ rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
if (rc) {
dev_err(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
rc,
- (tmp & ARMCP_PKT_CTL_OPCODE_MASK)
- >> ARMCP_PKT_CTL_OPCODE_SHIFT);
+ (tmp & CPUCP_PKT_CTL_OPCODE_MASK)
+ >> CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = -EIO;
} else if (result) {
*result = (long) le64_to_cpu(pkt->result);
@@ -145,14 +145,14 @@ out:
int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(event_type);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -167,15 +167,15 @@ int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
size_t irq_arr_size)
{
- struct armcp_unmask_irq_arr_packet *pkt;
+ struct cpucp_unmask_irq_arr_packet *pkt;
size_t total_pkt_size;
long result;
int rc;
- total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
+ total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
irq_arr_size;
- /* data should be aligned to 8 bytes in order to ArmCP to copy it */
+ /* data should be aligned to 8 bytes in order to CPU-CP to copy it */
total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
/* total_pkt_size is casted to u16 later on */
@@ -191,8 +191,8 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
memcpy(&pkt->irqs, irq_arr, irq_arr_size);
- pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
total_pkt_size, 0, &result);
@@ -207,19 +207,19 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
int hl_fw_test_cpu_queue(struct hl_device *hdev)
{
- struct armcp_packet test_pkt = {};
+ struct cpucp_packet test_pkt = {};
long result;
int rc;
- test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
- test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
+ test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+ test_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
sizeof(test_pkt), 0, &result);
if (!rc) {
- if (result != ARMCP_PACKET_FENCE_VAL)
+ if (result != CPUCP_PACKET_FENCE_VAL)
dev_err(hdev->dev,
"CPU queue test failed (0x%08lX)\n", result);
} else {
@@ -251,61 +251,61 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
int hl_fw_send_heartbeat(struct hl_device *hdev)
{
- struct armcp_packet hb_pkt = {};
+ struct cpucp_packet hb_pkt = {};
long result;
int rc;
- hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
- hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
+ hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+ hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
sizeof(hb_pkt), 0, &result);
- if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
+ if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
rc = -EIO;
return rc;
}
-int hl_fw_armcp_info_get(struct hl_device *hdev)
+int hl_fw_cpucp_info_get(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- struct armcp_packet pkt = {};
- void *armcp_info_cpu_addr;
- dma_addr_t armcp_info_dma_addr;
+ struct cpucp_packet pkt = {};
+ void *cpucp_info_cpu_addr;
+ dma_addr_t cpucp_info_dma_addr;
long result;
int rc;
- armcp_info_cpu_addr =
+ cpucp_info_cpu_addr =
hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- sizeof(struct armcp_info),
- &armcp_info_dma_addr);
- if (!armcp_info_cpu_addr) {
+ sizeof(struct cpucp_info),
+ &cpucp_info_dma_addr);
+ if (!cpucp_info_cpu_addr) {
dev_err(hdev->dev,
- "Failed to allocate DMA memory for ArmCP info packet\n");
+ "Failed to allocate DMA memory for CPU-CP info packet\n");
return -ENOMEM;
}
- memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
+ memset(cpucp_info_cpu_addr, 0, sizeof(struct cpucp_info));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
- pkt.addr = cpu_to_le64(armcp_info_dma_addr);
- pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_INFO_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(cpucp_info_dma_addr);
+ pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_info));
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_ARMCP_INFO_TIMEOUT_USEC, &result);
+ HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
- "Failed to handle ArmCP info pkt, error %d\n", rc);
+ "Failed to handle CPU-CP info pkt, error %d\n", rc);
goto out;
}
- memcpy(&prop->armcp_info, armcp_info_cpu_addr,
- sizeof(prop->armcp_info));
+ memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
+ sizeof(prop->cpucp_info));
- rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
+ rc = hl_build_hwmon_channel_info(hdev, prop->cpucp_info.sensors);
if (rc) {
dev_err(hdev->dev,
"Failed to build hwmon channel info, error %d\n", rc);
@@ -315,14 +315,14 @@ int hl_fw_armcp_info_get(struct hl_device *hdev)
out:
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- sizeof(struct armcp_info), armcp_info_cpu_addr);
+ sizeof(struct cpucp_info), cpucp_info_cpu_addr);
return rc;
}
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
{
- struct armcp_packet pkt = {};
+ struct cpucp_packet pkt = {};
void *eeprom_info_cpu_addr;
dma_addr_t eeprom_info_dma_addr;
long result;
@@ -333,23 +333,24 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
max_size, &eeprom_info_dma_addr);
if (!eeprom_info_cpu_addr) {
dev_err(hdev->dev,
- "Failed to allocate DMA memory for ArmCP EEPROM packet\n");
+ "Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
return -ENOMEM;
}
memset(eeprom_info_cpu_addr, 0, max_size);
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_EEPROM_DATA_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
pkt.data_max_size = cpu_to_le32(max_size);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- HL_ARMCP_EEPROM_TIMEOUT_USEC, &result);
+ HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
if (rc) {
dev_err(hdev->dev,
- "Failed to handle ArmCP EEPROM packet, error %d\n", rc);
+ "Failed to handle CPU-CP EEPROM packet, error %d\n",
+ rc);
goto out;
}
@@ -363,6 +364,77 @@ out:
return rc;
}
+int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
+ struct hl_info_pci_counters *counters)
+{
+ struct cpucp_packet pkt = {};
+ long result;
+ int rc;
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
+ /* Fetch PCI rx counter */
+ pkt.index = cpu_to_le32(cpucp_pcie_throughput_rx);
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_CPUCP_INFO_TIMEOUT_USEC, &result);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
+ return rc;
+ }
+ counters->rx_throughput = result;
+
+ /* Fetch PCI tx counter */
+ pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_CPUCP_INFO_TIMEOUT_USEC, &result);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
+ return rc;
+ }
+ counters->tx_throughput = result;
+
+ /* Fetch PCI replay counter */
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_CPUCP_INFO_TIMEOUT_USEC, &result);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
+ return rc;
+ }
+ counters->replay_cnt = (u32) result;
+
+ return rc;
+}
+
+int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
+{
+ struct cpucp_packet pkt = {};
+ long result;
+ int rc;
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_CPUCP_INFO_TIMEOUT_USEC, &result);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to handle CpuCP total energy pkt, error %d\n",
+ rc);
+ return rc;
+ }
+
+ *total_energy = result;
+
+ return rc;
+}
+
static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg)
{
u32 err_val;
@@ -402,8 +474,11 @@ static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg)
"Device boot error - NIC F/W initialization failed\n");
}
-static void hl_detect_cpu_boot_status(struct hl_device *hdev, u32 status)
+static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
{
+ /* Some of the status codes below are deprecated in newer f/w
+ * versions but we keep them here for backward compatibility
+ */
switch (status) {
case CPU_BOOT_STATUS_NA:
dev_err(hdev->dev,
@@ -449,6 +524,48 @@ static void hl_detect_cpu_boot_status(struct hl_device *hdev, u32 status)
}
}
+int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg,
+ u32 boot_err0_reg, u32 timeout)
+{
+ u32 status;
+ int rc;
+
+ if (!hdev->cpu_enable)
+ return 0;
+
+ /* Need to check two possible scenarios:
+ *
+ * CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT - for newer firmwares where
+ * the preboot is waiting for the boot fit
+ *
+ * All other status values - for older firmwares where the uboot was
+ * loaded from the FLASH
+ */
+ rc = hl_poll_timeout(
+ hdev,
+ cpu_boot_status_reg,
+ status,
+ (status == CPU_BOOT_STATUS_IN_UBOOT) ||
+ (status == CPU_BOOT_STATUS_DRAM_RDY) ||
+ (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
+ (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL) ||
+ (status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
+ 10000,
+ timeout);
+
+ if (rc) {
+ dev_err(hdev->dev, "Failed to read preboot version\n");
+ detect_cpu_boot_status(hdev, status);
+ fw_read_errors(hdev, boot_err0_reg);
+ return -EIO;
+ }
+
+ hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT);
+
+ return 0;
+}
+
int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
u32 boot_err0_reg, bool skip_bmc,
@@ -514,15 +631,11 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
10000,
cpu_timeout);
- /* Read U-Boot, preboot versions now in case we will later fail */
+ /* Read U-Boot version now in case we will later fail */
hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
- hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT);
- /* Some of the status codes below are deprecated in newer f/w
- * versions but we keep them here for backward compatibility
- */
if (rc) {
- hl_detect_cpu_boot_status(hdev, status);
+ detect_cpu_boot_status(hdev, status);
rc = -EIO;
goto out;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index edbd627b29d2..6ed974d2def0 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -8,21 +8,33 @@
#ifndef HABANALABSP_H_
#define HABANALABSP_H_
-#include "../include/common/armcp_if.h"
+#include "../include/common/cpucp_if.h"
#include "../include/common/qman_if.h"
#include <uapi/misc/habanalabs.h>
#include <linux/cdev.h>
#include <linux/iopoll.h>
#include <linux/irqreturn.h>
-#include <linux/dma-fence.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/hashtable.h>
+#include <linux/bitfield.h>
#define HL_NAME "habanalabs"
-#define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT)
+/* Use upper bits of mmap offset to store habana driver specific information.
+ * bits[63:62] - Encode mmap type
+ * bits[45:0] - mmap offset value
+ *
+ * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
+ * defines are w.r.t to PAGE_SIZE
+ */
+#define HL_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT)
+#define HL_MMAP_TYPE_MASK (0x3ull << HL_MMAP_TYPE_SHIFT)
+#define HL_MMAP_TYPE_CB (0x2ull << HL_MMAP_TYPE_SHIFT)
+
+#define HL_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFull >> PAGE_SHIFT)
+#define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK)
#define HL_PENDING_RESET_PER_SEC 30
@@ -34,8 +46,8 @@
#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
-#define HL_ARMCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
-#define HL_ARMCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
+#define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
+#define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
@@ -66,6 +78,8 @@
#define HL_PCI_NUM_BARS 6
+#define HL_MAX_DCORES 4
+
/**
* struct pgt_info - MMU hop page info.
* @node: hash linked-list node for the pgts shadow hash of pgts.
@@ -222,12 +236,15 @@ enum hl_device_hw_state {
* @hop2_shift: shift of hop 2 mask.
* @hop3_shift: shift of hop 3 mask.
* @hop4_shift: shift of hop 4 mask.
+ * @hop5_shift: shift of hop 5 mask.
* @hop0_mask: mask to get the PTE address in hop 0.
* @hop1_mask: mask to get the PTE address in hop 1.
* @hop2_mask: mask to get the PTE address in hop 2.
* @hop3_mask: mask to get the PTE address in hop 3.
* @hop4_mask: mask to get the PTE address in hop 4.
+ * @hop5_mask: mask to get the PTE address in hop 5.
* @page_size: default page size used to allocate memory.
+ * @num_hops: The amount of hops supported by the translation table.
*/
struct hl_mmu_properties {
u64 start_addr;
@@ -237,18 +254,21 @@ struct hl_mmu_properties {
u64 hop2_shift;
u64 hop3_shift;
u64 hop4_shift;
+ u64 hop5_shift;
u64 hop0_mask;
u64 hop1_mask;
u64 hop2_mask;
u64 hop3_mask;
u64 hop4_mask;
+ u64 hop5_mask;
u32 page_size;
+ u32 num_hops;
};
/**
* struct asic_fixed_properties - ASIC specific immutable properties.
* @hw_queues_props: H/W queues properties.
- * @armcp_info: received various information from ArmCP regarding the H/W, e.g.
+ * @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
* available sensors.
* @uboot_ver: F/W U-boot version.
* @preboot_ver: F/W Preboot version.
@@ -271,6 +291,10 @@ struct hl_mmu_properties {
* @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
* @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
* @mmu_dram_default_page_addr: DRAM default page physical address.
+ * @cb_va_start_addr: virtual start address of command buffers which are mapped
+ * to the device's MMU.
+ * @cb_va_end_addr: virtual end address of command buffers which are mapped to
+ * the device's MMU.
* @mmu_pgt_size: MMU page tables total size.
* @mmu_pte_size: PTE size in MMU page tables.
* @mmu_hop_table_size: MMU hop table size.
@@ -292,12 +316,16 @@ struct hl_mmu_properties {
* @max_queues: maximum amount of queues in the system
* @sync_stream_first_sob: first sync object available for sync stream use
* @sync_stream_first_mon: first monitor available for sync stream use
+ * @first_available_user_sob: first sob available for the user
+ * @first_available_user_mon: first monitor available for the user
* @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
+ * @fw_security_disabled: true if security measures are disabled in firmware,
+ * false otherwise
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
- struct armcp_info armcp_info;
+ struct cpucp_info cpucp_info;
char uboot_ver[VERSION_MAX_LEN];
char preboot_ver[VERSION_MAX_LEN];
struct hl_mmu_properties dmmu;
@@ -317,6 +345,8 @@ struct asic_fixed_properties {
u64 pcie_aux_dbi_reg_addr;
u64 mmu_pgt_addr;
u64 mmu_dram_default_page_addr;
+ u64 cb_va_start_addr;
+ u64 cb_va_end_addr;
u32 mmu_pgt_size;
u32 mmu_pte_size;
u32 mmu_hop_table_size;
@@ -338,13 +368,29 @@ struct asic_fixed_properties {
u32 max_queues;
u16 sync_stream_first_sob;
u16 sync_stream_first_mon;
+ u16 first_available_user_sob[HL_MAX_DCORES];
+ u16 first_available_user_mon[HL_MAX_DCORES];
u8 tpc_enabled_mask;
u8 completion_queues_count;
+ u8 fw_security_disabled;
+};
+
+/**
+ * struct hl_fence - software synchronization primitive
+ * @completion: fence is implemented using completion
+ * @refcount: refcount for this fence
+ * @error: mark this fence with error
+ *
+ */
+struct hl_fence {
+ struct completion completion;
+ struct kref refcount;
+ int error;
};
/**
* struct hl_cs_compl - command submission completion object.
- * @base_fence: kernel fence object.
+ * @base_fence: hl fence object.
* @lock: spinlock to protect fence.
* @hdev: habanalabs device structure.
* @hw_sob: the H/W SOB used in this signal/wait CS.
@@ -353,7 +399,7 @@ struct asic_fixed_properties {
* @sob_val: the SOB value that is used in this signal/wait CS.
*/
struct hl_cs_compl {
- struct dma_fence base_fence;
+ struct hl_fence base_fence;
spinlock_t lock;
struct hl_device *hdev;
struct hl_hw_sob *hw_sob;
@@ -380,36 +426,41 @@ struct hl_cb_mgr {
* struct hl_cb - describes a Command Buffer.
* @refcount: reference counter for usage of the CB.
* @hdev: pointer to device this CB belongs to.
+ * @ctx: pointer to the CB owner's context.
* @lock: spinlock to protect mmap/cs flows.
* @debugfs_list: node in debugfs list of command buffers.
* @pool_list: node in pool list of command buffers.
+ * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
+ * the device's MMU.
+ * @id: the CB's ID.
* @kernel_address: Holds the CB's kernel virtual address.
* @bus_address: Holds the CB's DMA address.
* @mmap_size: Holds the CB's size that was mmaped.
* @size: holds the CB's size.
- * @id: the CB's ID.
* @cs_cnt: holds number of CS that this CB participates in.
- * @ctx_id: holds the ID of the owner's context.
* @mmap: true if the CB is currently mmaped to user.
* @is_pool: true if CB was acquired from the pool, false otherwise.
* @is_internal: internaly allocated
+ * @is_mmu_mapped: true if the CB is mapped to the device's MMU.
*/
struct hl_cb {
struct kref refcount;
struct hl_device *hdev;
+ struct hl_ctx *ctx;
spinlock_t lock;
struct list_head debugfs_list;
struct list_head pool_list;
- u64 kernel_address;
+ struct list_head va_block_list;
+ u64 id;
+ void *kernel_address;
dma_addr_t bus_address;
u32 mmap_size;
u32 size;
- u32 id;
u32 cs_cnt;
- u32 ctx_id;
u8 mmap;
u8 is_pool;
u8 is_internal;
+ u8 is_mmu_mapped;
};
@@ -435,7 +486,7 @@ struct hl_cs_job;
#define HL_EQ_LENGTH 64
#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
-/* Host <-> ArmCP shared memory size */
+/* Host <-> CPU-CP shared memory size */
#define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M
/**
@@ -464,7 +515,7 @@ struct hl_hw_queue {
struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
struct hl_cs_job **shadow_queue;
enum hl_queue_type queue_type;
- u64 kernel_address;
+ void *kernel_address;
dma_addr_t bus_address;
u32 pi;
atomic_t ci;
@@ -493,7 +544,7 @@ struct hl_hw_queue {
*/
struct hl_cq {
struct hl_device *hdev;
- u64 kernel_address;
+ void *kernel_address;
dma_addr_t bus_address;
u32 cq_idx;
u32 hw_queue_id;
@@ -511,7 +562,7 @@ struct hl_cq {
*/
struct hl_eq {
struct hl_device *hdev;
- u64 kernel_address;
+ void *kernel_address;
dma_addr_t bus_address;
u32 ci;
};
@@ -617,7 +668,7 @@ enum div_select_defs {
* @debugfs_read32: debug interface for reading u32 from DRAM/SRAM.
* @debugfs_write32: debug interface for writing u32 to DRAM/SRAM.
* @add_device_attr: add ASIC specific device attributes.
- * @handle_eqe: handle event queue entry (IRQ) from ArmCP.
+ * @handle_eqe: handle event queue entry (IRQ) from CPU-CP.
* @set_pll_profile: change PLL profile (manual/automatic).
* @get_events_stat: retrieve event queue entries histogram.
* @read_pte: read MMU page table entry from DRAM.
@@ -626,7 +677,7 @@ enum div_select_defs {
* (L1 only) or hard (L0 & L1) flush.
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
- * @send_heartbeat: send is-alive packet to ArmCP and verify response.
+ * @send_heartbeat: send is-alive packet to CPU-CP and verify response.
* @set_clock_gating: enable/disable clock gating per engine according to
* clock gating mask in hdev
* @disable_clock_gating: disable clock gating completely
@@ -644,8 +695,6 @@ enum div_select_defs {
* ASIC
* @get_hw_state: retrieve the H/W state
* @pci_bars_map: Map PCI BARs.
- * @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns
- * old address the bar pointed to or U64_MAX for failure
* @init_iatu: Initialize the iATU unit inside the PCI controller.
* @rreg: Read a register. Needed for simulator support.
* @wreg: Write a register. Needed for simulator support.
@@ -679,7 +728,7 @@ struct hl_asic_funcs {
int (*suspend)(struct hl_device *hdev);
int (*resume)(struct hl_device *hdev);
int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
- u64 kaddress, phys_addr_t paddress, u32 size);
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
struct hl_bd *bd);
@@ -708,7 +757,7 @@ struct hl_asic_funcs {
u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
struct sg_table *sgt);
void (*add_end_of_cb_packets)(struct hl_device *hdev,
- u64 kernel_address, u32 len,
+ void *kernel_address, u32 len,
u64 cq_addr, u32 cq_val, u32 msix_num,
bool eb);
void (*update_eq_ci)(struct hl_device *hdev, u32 val);
@@ -736,7 +785,7 @@ struct hl_asic_funcs {
void (*set_clock_gating)(struct hl_device *hdev);
void (*disable_clock_gating)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, void *data);
- bool (*is_device_idle)(struct hl_device *hdev, u32 *mask,
+ bool (*is_device_idle)(struct hl_device *hdev, u64 *mask,
struct seq_file *s);
int (*soft_reset_late_init)(struct hl_device *hdev);
void (*hw_queues_lock)(struct hl_device *hdev);
@@ -748,7 +797,6 @@ struct hl_asic_funcs {
u16 len, u32 timeout, long *result);
enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
int (*pci_bars_map)(struct hl_device *hdev);
- u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
int (*init_iatu)(struct hl_device *hdev);
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
@@ -800,7 +848,7 @@ struct hl_va_range {
* @hdev: pointer to the device structure.
* @refcount: reference counter for the context. Context is released only when
* this hits 0l. It is incremented on CS and CS_WAIT.
- * @cs_pending: array of DMA fence objects representing pending CS.
+ * @cs_pending: array of hl fence objects representing pending CS.
* @host_va_range: holds available virtual addresses for host mappings.
* @host_huge_va_range: holds available virtual addresses for host mappings
* with huge pages.
@@ -809,6 +857,8 @@ struct hl_va_range {
* @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
* MMU hash or walking the PGT requires talking this lock.
* @debugfs_list: node in debugfs list of contexts.
+ * @cb_va_pool: device VA pool for command buffers which are mapped to the
+ * device's MMU.
* @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
* to user so user could inquire about CS. It is used as
* index to cs_pending array.
@@ -832,7 +882,7 @@ struct hl_ctx {
struct hl_fpriv *hpriv;
struct hl_device *hdev;
struct kref refcount;
- struct dma_fence **cs_pending;
+ struct hl_fence **cs_pending;
struct hl_va_range *host_va_range;
struct hl_va_range *host_huge_va_range;
struct hl_va_range *dram_va_range;
@@ -840,6 +890,7 @@ struct hl_ctx {
struct mutex mmu_lock;
struct list_head debugfs_list;
struct hl_cs_counters cs_counters;
+ struct gen_pool *cb_va_pool;
u64 cs_sequence;
u64 *dram_default_hops;
spinlock_t cs_lock;
@@ -919,8 +970,8 @@ struct hl_cs {
struct list_head job_list;
spinlock_t job_lock;
struct kref refcount;
- struct dma_fence *fence;
- struct dma_fence *signal_fence;
+ struct hl_fence *fence;
+ struct hl_fence *signal_fence;
struct work_struct finish_work;
struct delayed_work work_tdr;
struct list_head mirror_node;
@@ -1331,13 +1382,13 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
for (;;) { \
/* Verify we read updates done by other cores or by device */ \
mb(); \
- (val) = *((u32 *) (uintptr_t) (addr)); \
+ (val) = *((u32 *)(addr)); \
if (mem_written_by_device) \
(val) = le32_to_cpu(*(__le32 *) &(val)); \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = *((u32 *) (uintptr_t) (addr)); \
+ (val) = *((u32 *)(addr)); \
if (mem_written_by_device) \
(val) = le32_to_cpu(*(__le32 *) &(val)); \
break; \
@@ -1395,6 +1446,44 @@ struct hl_device_idle_busy_ts {
ktime_t busy_to_idle_ts;
};
+
+/**
+ * struct hl_mmu_priv - used for holding per-device mmu internal information.
+ * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
+ * @mmu_shadow_hop0: shadow array of hop0 tables.
+ */
+struct hl_mmu_priv {
+ struct gen_pool *mmu_pgt_pool;
+ void *mmu_shadow_hop0;
+};
+
+/**
+ * struct hl_mmu_funcs - Device related MMU functions.
+ * @init: initialize the MMU module.
+ * @fini: release the MMU module.
+ * @ctx_init: Initialize a context for using the MMU module.
+ * @ctx_fini: disable a ctx from using the mmu module.
+ * @map: maps a virtual address to physical address for a context.
+ * @unmap: unmap a virtual address of a context.
+ * @flush: flush all writes from all cores to reach device MMU.
+ * @swap_out: marks all mapping of the given context as swapped out.
+ * @swap_in: marks all mapping of the given context as swapped in.
+ */
+struct hl_mmu_funcs {
+ int (*init)(struct hl_device *hdev);
+ void (*fini)(struct hl_device *hdev);
+ int (*ctx_init)(struct hl_ctx *ctx);
+ void (*ctx_fini)(struct hl_ctx *ctx);
+ int (*map)(struct hl_ctx *ctx,
+ u64 virt_addr, u64 phys_addr, u32 page_size,
+ bool is_dram_addr);
+ int (*unmap)(struct hl_ctx *ctx,
+ u64 virt_addr, bool is_dram_addr);
+ void (*flush)(struct hl_ctx *ctx);
+ void (*swap_out)(struct hl_ctx *ctx);
+ void (*swap_in)(struct hl_ctx *ctx);
+};
+
/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
@@ -1407,8 +1496,8 @@ struct hl_device_idle_busy_ts {
* @dev: related kernel basic device structure.
* @dev_ctrl: related kernel device structure for the control device
* @work_freq: delayed work to lower device frequency if possible.
- * @work_heartbeat: delayed work for ArmCP is-alive check.
- * @asic_name: ASIC specific nmae.
+ * @work_heartbeat: delayed work for CPU-CP is-alive check.
+ * @asic_name: ASIC specific name.
* @asic_type: ASIC specific type.
* @completion_queue: array of hl_cq.
* @cq_wq: work queues of completion queues for executing work in process
@@ -1419,22 +1508,20 @@ struct hl_device_idle_busy_ts {
* @hw_queues_mirror_list: CS mirror list for TDR.
* @hw_queues_mirror_lock: protects hw_queues_mirror_list.
* @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
- * @event_queue: event queue for IRQ from ArmCP.
+ * @event_queue: event queue for IRQ from CPU-CP.
* @dma_pool: DMA pool for small allocations.
- * @cpu_accessible_dma_mem: Host <-> ArmCP shared memory CPU address.
- * @cpu_accessible_dma_address: Host <-> ArmCP shared memory DMA address.
- * @cpu_accessible_dma_pool: Host <-> ArmCP shared memory pool.
+ * @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address.
+ * @cpu_accessible_dma_address: Host <-> CPU-CP shared memory DMA address.
+ * @cpu_accessible_dma_pool: Host <-> CPU-CP shared memory pool.
* @asid_bitmap: holds used/available ASIDs.
* @asid_mutex: protects asid_bitmap.
- * @send_cpu_message_lock: enforces only one message in Host <-> ArmCP queue.
+ * @send_cpu_message_lock: enforces only one message in Host <-> CPU-CP queue.
* @debug_lock: protects critical section of setting debug mode for device
* @asic_prop: ASIC specific immutable properties.
* @asic_funcs: ASIC specific functions.
* @asic_specific: ASIC specific information to use only from ASIC files.
- * @mmu_pgt_pool: pool of available MMU hops.
* @vm: virtual memory manager for MMU.
* @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
- * @mmu_shadow_hop0: shadow mapping of the MMU hop 0 zone.
* @hwmon_dev: H/W monitor device.
* @pm_mng_profile: current power management profile.
* @hl_chip_info: ASIC's sensors information.
@@ -1452,6 +1539,8 @@ struct hl_device_idle_busy_ts {
* @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy
* and vice-versa
* @aggregated_cs_counters: aggregated cs counters among all contexts
+ * @mmu_priv: device-specific MMU data.
+ * @mmu_func: device-related MMU functions.
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -1471,6 +1560,7 @@ struct hl_device_idle_busy_ts {
* @soft_reset_cnt: number of soft reset since the driver was loaded.
* @hard_reset_cnt: number of hard reset since the driver was loaded.
* @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr
+ * @clk_throttling_reason: bitmask represents the current clk throttling reasons
* @id: device minor.
* @id_control: minor of the control device
* @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
@@ -1479,7 +1569,7 @@ struct hl_device_idle_busy_ts {
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
* @hard_reset_pending: is there a hard reset work pending.
- * @heartbeat: is heartbeat sanity check towards ArmCP enabled.
+ * @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
* @reset_on_lockup: true if a reset should be done in case of stuck CS, false
* otherwise.
* @dram_supports_virtual_memory: is MMU enabled towards DRAM.
@@ -1501,6 +1591,7 @@ struct hl_device_idle_busy_ts {
* @sync_stream_queue_idx: helper index for sync stream queues initialization.
* @supports_coresight: is CoreSight supported.
* @supports_soft_reset: is soft reset supported.
+ * @supports_cb_mapping: is mapping a CB to the device's MMU supported.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -1513,7 +1604,7 @@ struct hl_device {
struct device *dev_ctrl;
struct delayed_work work_freq;
struct delayed_work work_heartbeat;
- char asic_name[16];
+ char asic_name[32];
enum hl_asic_type asic_type;
struct hl_cq *completion_queue;
struct workqueue_struct **cq_wq;
@@ -1535,10 +1626,8 @@ struct hl_device {
struct asic_fixed_properties asic_prop;
const struct hl_asic_funcs *asic_funcs;
void *asic_specific;
- struct gen_pool *mmu_pgt_pool;
struct hl_vm vm;
struct mutex mmu_cache_lock;
- void *mmu_shadow_hop0;
struct device *hwmon_dev;
enum hl_pm_mng_profile pm_mng_profile;
struct hwmon_chip_info *hl_chip_info;
@@ -1562,19 +1651,23 @@ struct hl_device {
struct hl_cs_counters aggregated_cs_counters;
+ struct hl_mmu_priv mmu_priv;
+ struct hl_mmu_funcs mmu_func;
+
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
u64 clock_gating_mask;
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
- enum armcp_card_types card_type;
+ enum cpucp_card_types card_type;
int cs_active_cnt;
u32 major;
u32 high_pll;
u32 soft_reset_cnt;
u32 hard_reset_cnt;
u32 idle_busy_ts_idx;
+ u32 clk_throttling_reason;
u16 id;
u16 id_control;
u16 cpu_pci_msb_addr;
@@ -1598,6 +1691,7 @@ struct hl_device {
u8 sync_stream_queue_idx;
u8 supports_coresight;
u8 supports_soft_reset;
+ u8 supports_cb_mapping;
/* Parameters for bring-up */
u8 mmu_enable;
@@ -1739,7 +1833,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
void hl_ctx_do_release(struct kref *ref);
void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
int hl_ctx_put(struct hl_ctx *ctx);
-struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
+struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
@@ -1755,7 +1849,7 @@ int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms);
int hl_build_hwmon_channel_info(struct hl_device *hdev,
- struct armcp_sensor *sensors_arr);
+ struct cpucp_sensor *sensors_arr);
int hl_sysfs_init(struct hl_device *hdev);
void hl_sysfs_fini(struct hl_device *hdev);
@@ -1763,8 +1857,9 @@ void hl_sysfs_fini(struct hl_device *hdev);
int hl_hwmon_init(struct hl_device *hdev);
void hl_hwmon_fini(struct hl_device *hdev);
-int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size,
- u64 *handle, int ctx_id, bool internal_cb);
+int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
+ struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
+ bool map_cb, u64 *handle);
int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
@@ -1776,11 +1871,15 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
bool internal_cb);
int hl_cb_pool_init(struct hl_device *hdev);
int hl_cb_pool_fini(struct hl_device *hdev);
+int hl_cb_va_pool_init(struct hl_ctx *ctx);
+void hl_cb_va_pool_fini(struct hl_ctx *ctx);
void hl_cs_rollback_all(struct hl_device *hdev);
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
void hl_sob_reset_error(struct kref *ref);
+void hl_fence_put(struct hl_fence *fence);
+void hl_fence_get(struct hl_fence *fence);
void goya_set_asic_funcs(struct hl_device *hdev);
void gaudi_set_asic_funcs(struct hl_device *hdev);
@@ -1810,6 +1909,8 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
bool flush_pte);
void hl_mmu_swap_out(struct hl_ctx *ctx);
void hl_mmu_swap_in(struct hl_ctx *ctx);
+int hl_mmu_if_set_funcs(struct hl_device *hdev);
+void hl_mmu_v1_set_funcs(struct hl_device *hdev);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst);
@@ -1825,23 +1926,28 @@ void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr);
int hl_fw_send_heartbeat(struct hl_device *hdev);
-int hl_fw_armcp_info_get(struct hl_device *hdev);
+int hl_fw_cpucp_info_get(struct hl_device *hdev);
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
+int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
+ struct hl_info_pci_counters *counters);
+int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
+ u64 *total_energy);
int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
u32 boot_err0_reg, bool skip_bmc,
u32 cpu_timeout, u32 boot_fit_timeout);
+int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg,
+ u32 boot_err0_reg, u32 timeout);
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
bool is_wc[3]);
int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
-int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
- u64 addr);
int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
struct hl_inbound_pci_region *pci_region);
int hl_pci_set_outbound_region(struct hl_device *hdev,
struct hl_outbound_pci_region *pci_region);
-int hl_pci_init(struct hl_device *hdev);
+int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg,
+ u32 boot_err0_reg, u32 preboot_ver_timeout);
void hl_pci_fini(struct hl_device *hdev);
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index c6b31e93fb5e..f9067d3ef437 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -11,6 +11,7 @@
#include "habanalabs.h"
#include <linux/pci.h>
+#include <linux/aer.h>
#include <linux/module.h>
#define HL_DRIVER_AUTHOR "HabanaLabs Kernel Driver Team"
@@ -408,6 +409,8 @@ static int hl_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, hdev);
+ pci_enable_pcie_error_reporting(pdev);
+
rc = hl_device_init(hdev, hl_class);
if (rc) {
dev_err(&pdev->dev, "Fatal error during habanalabs device init\n");
@@ -440,22 +443,93 @@ static void hl_pci_remove(struct pci_dev *pdev)
return;
hl_device_fini(hdev);
+ pci_disable_pcie_error_reporting(pdev);
pci_set_drvdata(pdev, NULL);
-
destroy_hdev(hdev);
}
+/**
+ * hl_pci_err_detected - a PCI bus error detected on this device
+ *
+ * @pdev: pointer to pci device
+ * @state: PCI error type
+ *
+ * Called by the PCI subsystem whenever a non-correctable
+ * PCI bus error is detected
+ */
+static pci_ers_result_t
+hl_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct hl_device *hdev = pci_get_drvdata(pdev);
+ enum pci_ers_result result;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+
+ case pci_channel_io_frozen:
+ dev_warn(hdev->dev, "frozen state error detected\n");
+ result = PCI_ERS_RESULT_NEED_RESET;
+ break;
+
+ case pci_channel_io_perm_failure:
+ dev_warn(hdev->dev, "failure state error detected\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ break;
+
+ default:
+ result = PCI_ERS_RESULT_NONE;
+ }
+
+ hdev->asic_funcs->halt_engines(hdev, true);
+
+ return result;
+}
+
+/**
+ * hl_pci_err_resume - resume after a PCI slot reset
+ *
+ * @pdev: pointer to pci device
+ *
+ */
+static void hl_pci_err_resume(struct pci_dev *pdev)
+{
+ struct hl_device *hdev = pci_get_drvdata(pdev);
+
+ dev_warn(hdev->dev, "Resuming device after PCI slot reset\n");
+ hl_device_resume(hdev);
+}
+
+/**
+ * hl_pci_err_slot_reset - a PCI slot reset has just happened
+ *
+ * @pdev: pointer to pci device
+ *
+ * Determine if the driver can recover from the PCI slot reset
+ */
+static pci_ers_result_t hl_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
static const struct dev_pm_ops hl_pm_ops = {
.suspend = hl_pmops_suspend,
.resume = hl_pmops_resume,
};
+static const struct pci_error_handlers hl_pci_err_handler = {
+ .error_detected = hl_pci_err_detected,
+ .slot_reset = hl_pci_err_slot_reset,
+ .resume = hl_pci_err_resume,
+};
+
static struct pci_driver hl_pci_driver = {
.name = HL_NAME,
.id_table = ids,
.probe = hl_pci_probe,
.remove = hl_pci_remove,
.driver.pm = &hl_pm_ops,
+ .err_handler = &hl_pci_err_handler,
};
/*
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 5af1c03da473..07317ea49129 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -8,6 +8,7 @@
#include <uapi/misc/habanalabs.h>
#include "habanalabs.h"
+#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
@@ -64,14 +65,14 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.dram_enabled = 1;
hw_ip.num_of_events = prop->num_of_events;
- memcpy(hw_ip.armcp_version, prop->armcp_info.armcp_version,
+ memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
- memcpy(hw_ip.card_name, prop->armcp_info.card_name,
+ memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
- hw_ip.armcp_cpld_version = le32_to_cpu(prop->armcp_info.cpld_version);
- hw_ip.module_id = le32_to_cpu(prop->armcp_info.card_location);
+ hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
+ hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
@@ -131,7 +132,7 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
return -EINVAL;
hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
- &hw_idle.busy_engines_mask, NULL);
+ &hw_idle.busy_engines_mask_ext, NULL);
return copy_to_user(out, &hw_idle,
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
@@ -276,10 +277,45 @@ static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
}
+static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_info_pci_counters pci_counters = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
+ if (rc)
+ return rc;
+
+ return copy_to_user(out, &pci_counters,
+ min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
+}
+
+static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_info_clk_throttle clk_throttle = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ clk_throttle.clk_throttling_reason = hdev->clk_throttling_reason;
+
+ return copy_to_user(out, &clk_throttle,
+ min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
+}
+
static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
- struct hl_info_cs_counters cs_counters = {0};
+ struct hl_info_cs_counters cs_counters = { {0} };
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
@@ -297,6 +333,51 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
}
+static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_info_sync_manager sm_info = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ if (args->dcore_id >= HL_MAX_DCORES)
+ return -EINVAL;
+
+ sm_info.first_available_sync_object =
+ prop->first_available_user_sob[args->dcore_id];
+ sm_info.first_available_monitor =
+ prop->first_available_user_mon[args->dcore_id];
+
+
+ return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
+ sizeof(sm_info))) ? -EFAULT : 0;
+}
+
+static int total_energy_consumption_info(struct hl_fpriv *hpriv,
+ struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_info_energy total_energy = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ rc = hl_fw_cpucp_total_energy_get(hdev,
+ &total_energy.total_energy_consumption);
+ if (rc)
+ return rc;
+
+ return copy_to_user(out, &total_energy,
+ min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -360,6 +441,18 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_CS_COUNTERS:
return cs_counters_info(hpriv, args);
+ case HL_INFO_PCI_COUNTERS:
+ return pci_counters_info(hpriv, args);
+
+ case HL_INFO_CLK_THROTTLE_REASON:
+ return clk_throttle_info(hpriv, args);
+
+ case HL_INFO_SYNC_MANAGER:
+ return sync_manager_info(hpriv, args);
+
+ case HL_INFO_TOTAL_ENERGY:
+ return total_energy_consumption_info(hpriv, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 287681646071..250cf9cefc06 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -75,7 +75,7 @@ static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
{
struct hl_bd *bd;
- bd = (struct hl_bd *) (uintptr_t) q->kernel_address;
+ bd = q->kernel_address;
bd += hl_pi_2_offset(q->pi);
bd->ctl = cpu_to_le32(ctl);
bd->len = cpu_to_le32(len);
@@ -288,10 +288,10 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
ptr = cb->bus_address;
cq_pkt.data = cpu_to_le32(
- ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
- & CQ_ENTRY_SHADOW_INDEX_MASK) |
- (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
- (1 << CQ_ENTRY_READY_SHIFT));
+ ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
+ & CQ_ENTRY_SHADOW_INDEX_MASK) |
+ FIELD_PREP(CQ_ENTRY_SHADOW_INDEX_VALID_MASK, 1) |
+ FIELD_PREP(CQ_ENTRY_READY_MASK, 1));
/*
* No need to protect pi_offset because scheduling to the
@@ -335,8 +335,7 @@ static void int_queue_schedule_job(struct hl_cs_job *job)
bd.len = cpu_to_le32(job->job_cb_size);
bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
- pi = (__le64 *) (uintptr_t) (q->kernel_address +
- ((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
+ pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd);
q->pi++;
q->pi &= ((q->int_queue_len << 1) - 1);
@@ -474,7 +473,7 @@ static void init_signal_wait_cs(struct hl_cs *cs)
* wait CS was submitted.
*/
mb();
- dma_fence_put(cs->signal_fence);
+ hl_fence_put(cs->signal_fence);
cs->signal_fence = NULL;
}
}
@@ -630,7 +629,7 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
if (!p)
return -ENOMEM;
- q->kernel_address = (u64) (uintptr_t) p;
+ q->kernel_address = p;
q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH,
sizeof(*q->shadow_queue),
@@ -653,11 +652,11 @@ free_queue:
if (is_cpu_queue)
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
HL_QUEUE_SIZE_IN_BYTES,
- (void *) (uintptr_t) q->kernel_address);
+ q->kernel_address);
else
hdev->asic_funcs->asic_dma_free_coherent(hdev,
HL_QUEUE_SIZE_IN_BYTES,
- (void *) (uintptr_t) q->kernel_address,
+ q->kernel_address,
q->bus_address);
return rc;
@@ -676,7 +675,7 @@ static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
return -EFAULT;
}
- q->kernel_address = (u64) (uintptr_t) p;
+ q->kernel_address = p;
q->pi = 0;
atomic_set(&q->ci, 0);
@@ -704,7 +703,7 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
if (!p)
return -ENOMEM;
- q->kernel_address = (u64) (uintptr_t) p;
+ q->kernel_address = p;
/* Make sure read/write pointers are initialized to start of queue */
atomic_set(&q->ci, 0);
@@ -839,11 +838,11 @@ static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
if (q->queue_type == QUEUE_TYPE_CPU)
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
HL_QUEUE_SIZE_IN_BYTES,
- (void *) (uintptr_t) q->kernel_address);
+ q->kernel_address);
else
hdev->asic_funcs->asic_dma_free_coherent(hdev,
HL_QUEUE_SIZE_IN_BYTES,
- (void *) (uintptr_t) q->kernel_address,
+ q->kernel_address,
q->bus_address);
}
diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c
index b997336fa75f..2ac29cb2fe61 100644
--- a/drivers/misc/habanalabs/common/hwmon.c
+++ b/drivers/misc/habanalabs/common/hwmon.c
@@ -13,7 +13,7 @@
#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1)
int hl_build_hwmon_channel_info(struct hl_device *hdev,
- struct armcp_sensor *sensors_arr)
+ struct cpucp_sensor *sensors_arr)
{
u32 counts[HWMON_NR_SENSOR_TYPES] = {0};
u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL};
@@ -24,7 +24,7 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
enum hwmon_sensor_types type;
int rc, i, j;
- for (i = 0 ; i < ARMCP_MAX_SENSORS ; i++) {
+ for (i = 0 ; i < CPUCP_MAX_SENSORS ; i++) {
type = le32_to_cpu(sensors_arr[i].type);
if ((type == 0) && (sensors_arr[i].flags == 0))
@@ -311,13 +311,13 @@ static const struct hwmon_ops hl_hwmon_ops = {
int hl_get_temperature(struct hl_device *hdev,
int sensor_index, u32 attr, long *value)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEMPERATURE_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -337,13 +337,13 @@ int hl_get_temperature(struct hl_device *hdev,
int hl_set_temperature(struct hl_device *hdev,
int sensor_index, u32 attr, long value)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEMPERATURE_SET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEMPERATURE_SET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
pkt.value = __cpu_to_le64(value);
@@ -362,13 +362,13 @@ int hl_set_temperature(struct hl_device *hdev,
int hl_get_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long *value)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_VOLTAGE_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -388,13 +388,13 @@ int hl_get_voltage(struct hl_device *hdev,
int hl_get_current(struct hl_device *hdev,
int sensor_index, u32 attr, long *value)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_CURRENT_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_CURRENT_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -414,13 +414,13 @@ int hl_get_current(struct hl_device *hdev,
int hl_get_fan_speed(struct hl_device *hdev,
int sensor_index, u32 attr, long *value)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_FAN_SPEED_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -440,13 +440,13 @@ int hl_get_fan_speed(struct hl_device *hdev,
int hl_get_pwm_info(struct hl_device *hdev,
int sensor_index, u32 attr, long *value)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_PWM_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_PWM_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
@@ -466,13 +466,13 @@ int hl_get_pwm_info(struct hl_device *hdev,
void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
long value)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_PWM_SET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_PWM_SET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
pkt.value = cpu_to_le64(value);
@@ -489,13 +489,13 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
int hl_set_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long value)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_VOLTAGE_SET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_VOLTAGE_SET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
pkt.value = __cpu_to_le64(value);
@@ -514,13 +514,13 @@ int hl_set_voltage(struct hl_device *hdev,
int hl_set_current(struct hl_device *hdev,
int sensor_index, u32 attr, long value)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_CURRENT_SET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_CURRENT_SET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
pkt.value = __cpu_to_le64(value);
@@ -549,7 +549,7 @@ int hl_hwmon_init(struct hl_device *hdev)
hdev->hl_chip_info->ops = &hl_hwmon_ops;
hdev->hwmon_dev = hwmon_device_register_with_info(dev,
- prop->armcp_info.card_name, hdev,
+ prop->cpucp_info.card_name, hdev,
hdev->hl_chip_info, NULL);
if (IS_ERR(hdev->hwmon_dev)) {
rc = PTR_ERR(hdev->hwmon_dev);
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c
index c8db717023f5..de53fb5f978a 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -11,7 +11,7 @@
/**
* struct hl_eqe_work - This structure is used to schedule work of EQ
- * entry and armcp_reset event
+ * entry and cpucp_reset event
*
* @eq_work: workqueue object to run when EQ entry is received
* @hdev: pointer to device structure
@@ -90,7 +90,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
return IRQ_HANDLED;
}
- cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address;
+ cq_base = cq->kernel_address;
while (1) {
bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
@@ -152,7 +152,7 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
struct hl_eq_entry *eq_base;
struct hl_eqe_work *handle_eqe_work;
- eq_base = (struct hl_eq_entry *) (uintptr_t) eq->kernel_address;
+ eq_base = eq->kernel_address;
while (1) {
bool entry_ready =
@@ -221,7 +221,7 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
return -ENOMEM;
q->hdev = hdev;
- q->kernel_address = (u64) (uintptr_t) p;
+ q->kernel_address = p;
q->hw_queue_id = hw_queue_id;
q->ci = 0;
q->pi = 0;
@@ -242,7 +242,8 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
{
hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
- (void *) (uintptr_t) q->kernel_address, q->bus_address);
+ q->kernel_address,
+ q->bus_address);
}
void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
@@ -259,7 +260,7 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
* when the device is operational again
*/
- memset((void *) (uintptr_t) q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
+ memset(q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
}
/**
@@ -282,7 +283,7 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
return -ENOMEM;
q->hdev = hdev;
- q->kernel_address = (u64) (uintptr_t) p;
+ q->kernel_address = p;
q->ci = 0;
return 0;
@@ -302,7 +303,7 @@ void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
HL_EQ_SIZE_IN_BYTES,
- (void *) (uintptr_t) q->kernel_address);
+ q->kernel_address);
}
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
@@ -316,5 +317,5 @@ void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
* when the device is operational again
*/
- memset((void *) (uintptr_t) q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
+ memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 5ff4688683fd..84227819e4d1 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -77,8 +77,8 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
if (!paddr) {
dev_err(hdev->dev,
- "failed to allocate %llu huge contiguous pages\n",
- num_pgs);
+ "failed to allocate %llu contiguous pages with total size of %llu\n",
+ num_pgs, total_size);
return -ENOMEM;
}
}
@@ -505,41 +505,32 @@ static inline int add_va_block(struct hl_device *hdev,
}
/*
- * get_va_block - get a virtual block with the requested size
- *
- * @hdev : pointer to the habanalabs device structure
- * @va_range : pointer to the virtual addresses range
- * @size : requested block size
- * @hint_addr : hint for request address by the user
- * @is_userptr : is host or DRAM memory
+ * get_va_block() - get a virtual block for the given size and alignment.
+ * @hdev: pointer to the habanalabs device structure.
+ * @va_range: pointer to the virtual addresses range.
+ * @size: requested block size.
+ * @hint_addr: hint for requested address by the user.
+ * @va_block_align: required alignment of the virtual block start address.
*
* This function does the following:
* - Iterate on the virtual block list to find a suitable virtual block for the
- * requested size
- * - Reserve the requested block and update the list
- * - Return the start address of the virtual block
+ * given size and alignment.
+ * - Reserve the requested block and update the list.
+ * - Return the start address of the virtual block.
*/
-static u64 get_va_block(struct hl_device *hdev,
- struct hl_va_range *va_range, u64 size, u64 hint_addr,
- bool is_userptr)
+static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
+ u64 size, u64 hint_addr, u32 va_block_align)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
- u64 valid_start, valid_size, prev_start, prev_end, page_mask,
+ u64 valid_start, valid_size, prev_start, prev_end, align_mask,
res_valid_start = 0, res_valid_size = 0;
- u32 page_size;
bool add_prev = false;
- if (is_userptr)
- /*
- * We cannot know if the user allocated memory with huge pages
- * or not, hence we continue with the biggest possible
- * granularity.
- */
- page_size = hdev->asic_prop.pmmu_huge.page_size;
- else
- page_size = hdev->asic_prop.dmmu.page_size;
+ align_mask = ~((u64)va_block_align - 1);
- page_mask = ~((u64)page_size - 1);
+ /* check if hint_addr is aligned */
+ if (hint_addr & (va_block_align - 1))
+ hint_addr = 0;
mutex_lock(&va_range->lock);
@@ -549,9 +540,9 @@ static u64 get_va_block(struct hl_device *hdev,
/* calc the first possible aligned addr */
valid_start = va_block->start;
- if (valid_start & (page_size - 1)) {
- valid_start &= page_mask;
- valid_start += page_size;
+ if (valid_start & (va_block_align - 1)) {
+ valid_start &= align_mask;
+ valid_start += va_block_align;
if (valid_start > va_block->end)
continue;
}
@@ -863,7 +854,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
struct hl_va_range *va_range;
enum vm_type_t *vm_type;
u64 ret_vaddr, hint_addr;
- u32 handle = 0;
+ u32 handle = 0, va_block_align;
int rc;
bool is_userptr = args->flags & HL_MEM_USERPTR;
@@ -873,6 +864,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
if (is_userptr) {
u64 addr = args->map_host.host_virt_addr,
size = args->map_host.mem_size;
+ u32 page_size = hdev->asic_prop.pmmu.page_size,
+ huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
rc = dma_map_host_va(hdev, addr, size, &userptr);
if (rc) {
@@ -892,6 +885,27 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
vm_type = (enum vm_type_t *) userptr;
hint_addr = args->map_host.hint_addr;
handle = phys_pg_pack->handle;
+
+ /* get required alignment */
+ if (phys_pg_pack->page_size == page_size) {
+ va_range = ctx->host_va_range;
+
+ /*
+ * huge page alignment may be needed in case of regular
+ * page mapping, depending on the host VA alignment
+ */
+ if (addr & (huge_page_size - 1))
+ va_block_align = page_size;
+ else
+ va_block_align = huge_page_size;
+ } else {
+ /*
+ * huge page alignment is needed in case of huge page
+ * mapping
+ */
+ va_range = ctx->host_huge_va_range;
+ va_block_align = huge_page_size;
+ }
} else {
handle = lower_32_bits(args->map_device.handle);
@@ -912,6 +926,10 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
vm_type = (enum vm_type_t *) phys_pg_pack;
hint_addr = args->map_device.hint_addr;
+
+ /* DRAM VA alignment is the same as the DRAM page size */
+ va_range = ctx->dram_va_range;
+ va_block_align = hdev->asic_prop.dmmu.page_size;
}
/*
@@ -933,16 +951,8 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto hnode_err;
}
- if (is_userptr)
- if (phys_pg_pack->page_size == hdev->asic_prop.pmmu.page_size)
- va_range = ctx->host_va_range;
- else
- va_range = ctx->host_huge_va_range;
- else
- va_range = ctx->dram_va_range;
-
ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
- hint_addr, is_userptr);
+ hint_addr, va_block_align);
if (!ret_vaddr) {
dev_err(hdev->dev, "no available va block for handle %u\n",
handle);
diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c
index 3fc0f497fab3..b5058798aeb9 100644
--- a/drivers/misc/habanalabs/common/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu.c
@@ -1,258 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*/
-#include "habanalabs.h"
-#include "../include/hw_ip/mmu/mmu_general.h"
-
-#include <linux/genalloc.h>
#include <linux/slab.h>
-static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
-
-static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
-{
- struct pgt_info *pgt_info = NULL;
-
- hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
- (unsigned long) hop_addr)
- if (hop_addr == pgt_info->shadow_addr)
- break;
-
- return pgt_info;
-}
-
-static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
-{
- struct hl_device *hdev = ctx->hdev;
-
- gen_pool_free(hdev->mmu_pgt_pool, pgt_info->phys_addr,
- hdev->asic_prop.mmu_hop_table_size);
- hash_del(&pgt_info->node);
- kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
- kfree(pgt_info);
-}
-
-static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
-{
- struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
-
- _free_hop(ctx, pgt_info);
-}
-
-static u64 alloc_hop(struct hl_ctx *ctx)
-{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- struct pgt_info *pgt_info;
- u64 phys_addr, shadow_addr;
-
- pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
- if (!pgt_info)
- return ULLONG_MAX;
-
- phys_addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
- prop->mmu_hop_table_size);
- if (!phys_addr) {
- dev_err(hdev->dev, "failed to allocate page\n");
- goto pool_add_err;
- }
-
- shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
- GFP_KERNEL);
- if (!shadow_addr)
- goto shadow_err;
-
- pgt_info->phys_addr = phys_addr;
- pgt_info->shadow_addr = shadow_addr;
- pgt_info->ctx = ctx;
- pgt_info->num_of_ptes = 0;
- hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
-
- return shadow_addr;
-
-shadow_err:
- gen_pool_free(hdev->mmu_pgt_pool, phys_addr, prop->mmu_hop_table_size);
-pool_add_err:
- kfree(pgt_info);
-
- return ULLONG_MAX;
-}
-
-static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
-{
- return ctx->hdev->asic_prop.mmu_pgt_addr +
- (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
-}
-
-static inline u64 get_hop0_addr(struct hl_ctx *ctx)
-{
- return (u64) (uintptr_t) ctx->hdev->mmu_shadow_hop0 +
- (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
-}
-
-static inline void flush(struct hl_ctx *ctx)
-{
- /* flush all writes from all cores to reach PCI */
- mb();
- ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
-}
-
-/* transform the value to physical address when writing to H/W */
-static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
-{
- /*
- * The value to write is actually the address of the next shadow hop +
- * flags at the 12 LSBs.
- * Hence in order to get the value to write to the physical PTE, we
- * clear the 12 LSBs and translate the shadow hop to its associated
- * physical hop, and add back the original 12 LSBs.
- */
- u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
- (val & FLAGS_MASK);
-
- ctx->hdev->asic_funcs->write_pte(ctx->hdev,
- get_phys_addr(ctx, shadow_pte_addr),
- phys_val);
-
- *(u64 *) (uintptr_t) shadow_pte_addr = val;
-}
-
-/* do not transform the value to physical address when writing to H/W */
-static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
- u64 val)
-{
- ctx->hdev->asic_funcs->write_pte(ctx->hdev,
- get_phys_addr(ctx, shadow_pte_addr),
- val);
- *(u64 *) (uintptr_t) shadow_pte_addr = val;
-}
-
-/* clear the last and present bits */
-static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
-{
- /* no need to transform the value to physical address */
- write_final_pte(ctx, pte_addr, 0);
-}
-
-static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
-{
- get_pgt_info(ctx, hop_addr)->num_of_ptes++;
-}
-
-/*
- * put_pte - decrement the num of ptes and free the hop if possible
- *
- * @ctx: pointer to the context structure
- * @hop_addr: addr of the hop
- *
- * This function returns the number of ptes left on this hop. If the number is
- * 0, it means the pte was freed.
- */
-static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
-{
- struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
- int num_of_ptes_left;
-
- pgt_info->num_of_ptes--;
-
- /*
- * Need to save the number of ptes left because free_hop might free
- * the pgt_info
- */
- num_of_ptes_left = pgt_info->num_of_ptes;
- if (!num_of_ptes_left)
- _free_hop(ctx, pgt_info);
-
- return num_of_ptes_left;
-}
-
-static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr, u64 mask, u64 shift)
-{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & mask) >> shift);
-}
-
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
- mmu_prop->hop0_shift);
-}
-
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
- mmu_prop->hop1_shift);
-}
-
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
- mmu_prop->hop2_shift);
-}
-
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
- mmu_prop->hop3_shift);
-}
-
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
- struct hl_mmu_properties *mmu_prop,
- u64 hop_addr, u64 vaddr)
-{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
- mmu_prop->hop4_shift);
-}
-
-static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
-{
- if (curr_pte & PAGE_PRESENT_MASK)
- return curr_pte & HOP_PHYS_ADDR_MASK;
- else
- return ULLONG_MAX;
-}
-
-static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
- bool *is_new_hop)
-{
- u64 hop_addr = get_next_hop_addr(ctx, curr_pte);
-
- if (hop_addr == ULLONG_MAX) {
- hop_addr = alloc_hop(ctx);
- *is_new_hop = (hop_addr != ULLONG_MAX);
- }
-
- return hop_addr;
-}
-
-/* translates shadow address inside hop to a physical address */
-static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
-{
- u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
- u64 shadow_hop_addr = shadow_addr & ~page_mask;
- u64 pte_offset = shadow_addr & page_mask;
- u64 phys_hop_addr;
-
- if (shadow_hop_addr != get_hop0_addr(ctx))
- phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
- else
- phys_hop_addr = get_phys_hop0_addr(ctx);
-
- return phys_hop_addr + pte_offset;
-}
+#include "habanalabs.h"
static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
{
@@ -263,155 +18,6 @@ static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
prop->dmmu.end_addr);
}
-static int dram_default_mapping_init(struct hl_ctx *ctx)
-{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
- hop2_pte_addr, hop3_pte_addr, pte_val;
- int rc, i, j, hop3_allocated = 0;
-
- if ((!hdev->dram_supports_virtual_memory) ||
- (!hdev->dram_default_page_mapping) ||
- (ctx->asid == HL_KERNEL_ASID_ID))
- return 0;
-
- num_of_hop3 = prop->dram_size_for_default_page_mapping;
- do_div(num_of_hop3, prop->dram_page_size);
- do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
-
- /* add hop1 and hop2 */
- total_hops = num_of_hop3 + 2;
-
- ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
- if (!ctx->dram_default_hops)
- return -ENOMEM;
-
- hop0_addr = get_hop0_addr(ctx);
-
- hop1_addr = alloc_hop(ctx);
- if (hop1_addr == ULLONG_MAX) {
- dev_err(hdev->dev, "failed to alloc hop 1\n");
- rc = -ENOMEM;
- goto hop1_err;
- }
-
- ctx->dram_default_hops[total_hops - 1] = hop1_addr;
-
- hop2_addr = alloc_hop(ctx);
- if (hop2_addr == ULLONG_MAX) {
- dev_err(hdev->dev, "failed to alloc hop 2\n");
- rc = -ENOMEM;
- goto hop2_err;
- }
-
- ctx->dram_default_hops[total_hops - 2] = hop2_addr;
-
- for (i = 0 ; i < num_of_hop3 ; i++) {
- ctx->dram_default_hops[i] = alloc_hop(ctx);
- if (ctx->dram_default_hops[i] == ULLONG_MAX) {
- dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
- rc = -ENOMEM;
- goto hop3_err;
- }
- hop3_allocated++;
- }
-
- /* need only pte 0 in hops 0 and 1 */
- pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop0_addr, pte_val);
-
- pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop1_addr, pte_val);
- get_pte(ctx, hop1_addr);
-
- hop2_pte_addr = hop2_addr;
- for (i = 0 ; i < num_of_hop3 ; i++) {
- pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
- PAGE_PRESENT_MASK;
- write_pte(ctx, hop2_pte_addr, pte_val);
- get_pte(ctx, hop2_addr);
- hop2_pte_addr += HL_PTE_SIZE;
- }
-
- pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
- LAST_MASK | PAGE_PRESENT_MASK;
-
- for (i = 0 ; i < num_of_hop3 ; i++) {
- hop3_pte_addr = ctx->dram_default_hops[i];
- for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
- write_final_pte(ctx, hop3_pte_addr, pte_val);
- get_pte(ctx, ctx->dram_default_hops[i]);
- hop3_pte_addr += HL_PTE_SIZE;
- }
- }
-
- flush(ctx);
-
- return 0;
-
-hop3_err:
- for (i = 0 ; i < hop3_allocated ; i++)
- free_hop(ctx, ctx->dram_default_hops[i]);
-
- free_hop(ctx, hop2_addr);
-hop2_err:
- free_hop(ctx, hop1_addr);
-hop1_err:
- kfree(ctx->dram_default_hops);
-
- return rc;
-}
-
-static void dram_default_mapping_fini(struct hl_ctx *ctx)
-{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
- hop2_pte_addr, hop3_pte_addr;
- int i, j;
-
- if ((!hdev->dram_supports_virtual_memory) ||
- (!hdev->dram_default_page_mapping) ||
- (ctx->asid == HL_KERNEL_ASID_ID))
- return;
-
- num_of_hop3 = prop->dram_size_for_default_page_mapping;
- do_div(num_of_hop3, prop->dram_page_size);
- do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
-
- hop0_addr = get_hop0_addr(ctx);
- /* add hop1 and hop2 */
- total_hops = num_of_hop3 + 2;
- hop1_addr = ctx->dram_default_hops[total_hops - 1];
- hop2_addr = ctx->dram_default_hops[total_hops - 2];
-
- for (i = 0 ; i < num_of_hop3 ; i++) {
- hop3_pte_addr = ctx->dram_default_hops[i];
- for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
- clear_pte(ctx, hop3_pte_addr);
- put_pte(ctx, ctx->dram_default_hops[i]);
- hop3_pte_addr += HL_PTE_SIZE;
- }
- }
-
- hop2_pte_addr = hop2_addr;
- hop2_pte_addr = hop2_addr;
- for (i = 0 ; i < num_of_hop3 ; i++) {
- clear_pte(ctx, hop2_pte_addr);
- put_pte(ctx, hop2_addr);
- hop2_pte_addr += HL_PTE_SIZE;
- }
-
- clear_pte(ctx, hop1_addr);
- put_pte(ctx, hop1_addr);
- clear_pte(ctx, hop0_addr);
-
- kfree(ctx->dram_default_hops);
-
- flush(ctx);
-}
-
/**
* hl_mmu_init() - initialize the MMU module.
* @hdev: habanalabs device structure.
@@ -424,45 +30,10 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
*/
int hl_mmu_init(struct hl_device *hdev)
{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- int rc;
-
- if (!hdev->mmu_enable)
- return 0;
-
- hdev->mmu_pgt_pool =
- gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
-
- if (!hdev->mmu_pgt_pool) {
- dev_err(hdev->dev, "Failed to create page gen pool\n");
- return -ENOMEM;
- }
-
- rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
- prop->mmu_hop0_tables_total_size,
- prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
- -1);
- if (rc) {
- dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
- goto err_pool_add;
- }
-
- hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
- prop->mmu_hop_table_size,
- GFP_KERNEL | __GFP_ZERO);
- if (ZERO_OR_NULL_PTR(hdev->mmu_shadow_hop0)) {
- rc = -ENOMEM;
- goto err_pool_add;
- }
-
- /* MMU H/W init will be done in device hw_init() */
+ if (hdev->mmu_enable)
+ return hdev->mmu_func.init(hdev);
return 0;
-
-err_pool_add:
- gen_pool_destroy(hdev->mmu_pgt_pool);
-
- return rc;
}
/**
@@ -477,13 +48,8 @@ err_pool_add:
*/
void hl_mmu_fini(struct hl_device *hdev)
{
- if (!hdev->mmu_enable)
- return;
-
- /* MMU H/W fini was already done in device hw_fini() */
-
- kvfree(hdev->mmu_shadow_hop0);
- gen_pool_destroy(hdev->mmu_pgt_pool);
+ if (hdev->mmu_enable)
+ hdev->mmu_func.fini(hdev);
}
/**
@@ -498,13 +64,10 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
- if (!hdev->mmu_enable)
- return 0;
+ if (hdev->mmu_enable)
+ return hdev->mmu_func.ctx_init(ctx);
- mutex_init(&ctx->mmu_lock);
- hash_init(ctx->mmu_shadow_hash);
-
- return dram_default_mapping_init(ctx);
+ return 0;
}
/*
@@ -520,160 +83,9 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
void hl_mmu_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
- struct pgt_info *pgt_info;
- struct hlist_node *tmp;
- int i;
-
- if (!hdev->mmu_enable)
- return;
-
- dram_default_mapping_fini(ctx);
-
- if (!hash_empty(ctx->mmu_shadow_hash))
- dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
- ctx->asid);
-
- hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
- dev_err_ratelimited(hdev->dev,
- "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
- pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
- _free_hop(ctx, pgt_info);
- }
-
- mutex_destroy(&ctx->mmu_lock);
-}
-
-static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
-{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- struct hl_mmu_properties *mmu_prop;
- u64 hop0_addr = 0, hop0_pte_addr = 0,
- hop1_addr = 0, hop1_pte_addr = 0,
- hop2_addr = 0, hop2_pte_addr = 0,
- hop3_addr = 0, hop3_pte_addr = 0,
- hop4_addr = 0, hop4_pte_addr = 0,
- curr_pte;
- bool is_huge, clear_hop3 = true;
-
- /* shifts and masks are the same in PMMU and HPMMU, use one of them */
- mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
-
- hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
-
- hop1_addr = get_next_hop_addr(ctx, curr_pte);
-
- if (hop1_addr == ULLONG_MAX)
- goto not_mapped;
-
- hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
-
- hop2_addr = get_next_hop_addr(ctx, curr_pte);
-
- if (hop2_addr == ULLONG_MAX)
- goto not_mapped;
-
- hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
-
- hop3_addr = get_next_hop_addr(ctx, curr_pte);
-
- if (hop3_addr == ULLONG_MAX)
- goto not_mapped;
-
- hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
-
- is_huge = curr_pte & LAST_MASK;
-
- if (is_dram_addr && !is_huge) {
- dev_err(hdev->dev,
- "DRAM unmapping should use huge pages only\n");
- return -EFAULT;
- }
-
- if (!is_huge) {
- hop4_addr = get_next_hop_addr(ctx, curr_pte);
-
- if (hop4_addr == ULLONG_MAX)
- goto not_mapped;
-
- hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
- virt_addr);
-
- curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
-
- clear_hop3 = false;
- }
-
- if (hdev->dram_default_page_mapping && is_dram_addr) {
- u64 default_pte = (prop->mmu_dram_default_page_addr &
- HOP_PHYS_ADDR_MASK) | LAST_MASK |
- PAGE_PRESENT_MASK;
- if (curr_pte == default_pte) {
- dev_err(hdev->dev,
- "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
- virt_addr);
- goto not_mapped;
- }
-
- if (!(curr_pte & PAGE_PRESENT_MASK)) {
- dev_err(hdev->dev,
- "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
- virt_addr);
- goto not_mapped;
- }
-
- write_final_pte(ctx, hop3_pte_addr, default_pte);
- put_pte(ctx, hop3_addr);
- } else {
- if (!(curr_pte & PAGE_PRESENT_MASK))
- goto not_mapped;
-
- if (hop4_addr)
- clear_pte(ctx, hop4_pte_addr);
- else
- clear_pte(ctx, hop3_pte_addr);
-
- if (hop4_addr && !put_pte(ctx, hop4_addr))
- clear_hop3 = true;
-
- if (!clear_hop3)
- goto mapped;
-
- clear_pte(ctx, hop3_pte_addr);
- if (put_pte(ctx, hop3_addr))
- goto mapped;
-
- clear_pte(ctx, hop2_pte_addr);
-
- if (put_pte(ctx, hop2_addr))
- goto mapped;
-
- clear_pte(ctx, hop1_pte_addr);
-
- if (put_pte(ctx, hop1_addr))
- goto mapped;
-
- clear_pte(ctx, hop0_pte_addr);
- }
-
-mapped:
- return 0;
-
-not_mapped:
- dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
- virt_addr);
-
- return -EINVAL;
+ if (hdev->mmu_enable)
+ hdev->mmu_func.ctx_fini(ctx);
}
/*
@@ -738,7 +150,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
real_virt_addr = virt_addr;
for (i = 0 ; i < npages ; i++) {
- rc = _hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr);
+ rc = hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr);
if (rc)
break;
@@ -746,172 +158,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
}
if (flush_pte)
- flush(ctx);
-
- return rc;
-}
-
-static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
- u32 page_size, bool is_dram_addr)
-{
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- struct hl_mmu_properties *mmu_prop;
- u64 hop0_addr = 0, hop0_pte_addr = 0,
- hop1_addr = 0, hop1_pte_addr = 0,
- hop2_addr = 0, hop2_pte_addr = 0,
- hop3_addr = 0, hop3_pte_addr = 0,
- hop4_addr = 0, hop4_pte_addr = 0,
- curr_pte = 0;
- bool hop1_new = false, hop2_new = false, hop3_new = false,
- hop4_new = false, is_huge;
- int rc = -ENOMEM;
-
- /*
- * This mapping function can map a page or a huge page. For huge page
- * there are only 3 hops rather than 4. Currently the DRAM allocation
- * uses huge pages only but user memory could have been allocated with
- * one of the two page sizes. Since this is a common code for all the
- * three cases, we need this hugs page check.
- */
- if (is_dram_addr) {
- mmu_prop = &prop->dmmu;
- is_huge = true;
- } else if (page_size == prop->pmmu_huge.page_size) {
- mmu_prop = &prop->pmmu_huge;
- is_huge = true;
- } else {
- mmu_prop = &prop->pmmu;
- is_huge = false;
- }
-
- hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
-
- hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
- if (hop1_addr == ULLONG_MAX)
- goto err;
-
- hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
-
- hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
- if (hop2_addr == ULLONG_MAX)
- goto err;
-
- hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
-
- hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
- if (hop3_addr == ULLONG_MAX)
- goto err;
-
- hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
-
- if (!is_huge) {
- hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
- if (hop4_addr == ULLONG_MAX)
- goto err;
-
- hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
- virt_addr);
- curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
- }
-
- if (hdev->dram_default_page_mapping && is_dram_addr) {
- u64 default_pte = (prop->mmu_dram_default_page_addr &
- HOP_PHYS_ADDR_MASK) | LAST_MASK |
- PAGE_PRESENT_MASK;
-
- if (curr_pte != default_pte) {
- dev_err(hdev->dev,
- "DRAM: mapping already exists for virt_addr 0x%llx\n",
- virt_addr);
- rc = -EINVAL;
- goto err;
- }
-
- if (hop1_new || hop2_new || hop3_new || hop4_new) {
- dev_err(hdev->dev,
- "DRAM mapping should not allocate more hops\n");
- rc = -EFAULT;
- goto err;
- }
- } else if (curr_pte & PAGE_PRESENT_MASK) {
- dev_err(hdev->dev,
- "mapping already exists for virt_addr 0x%llx\n",
- virt_addr);
-
- dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
- dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
- dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
- dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
-
- if (!is_huge)
- dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
- *(u64 *) (uintptr_t) hop4_pte_addr,
- hop4_pte_addr);
-
- rc = -EINVAL;
- goto err;
- }
-
- curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
- | PAGE_PRESENT_MASK;
-
- if (is_huge)
- write_final_pte(ctx, hop3_pte_addr, curr_pte);
- else
- write_final_pte(ctx, hop4_pte_addr, curr_pte);
-
- if (hop1_new) {
- curr_pte =
- (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop0_pte_addr, curr_pte);
- }
- if (hop2_new) {
- curr_pte =
- (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop1_pte_addr, curr_pte);
- get_pte(ctx, hop1_addr);
- }
- if (hop3_new) {
- curr_pte =
- (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
- write_pte(ctx, hop2_pte_addr, curr_pte);
- get_pte(ctx, hop2_addr);
- }
-
- if (!is_huge) {
- if (hop4_new) {
- curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
- PAGE_PRESENT_MASK;
- write_pte(ctx, hop3_pte_addr, curr_pte);
- get_pte(ctx, hop3_addr);
- }
-
- get_pte(ctx, hop4_addr);
- } else {
- get_pte(ctx, hop3_addr);
- }
-
- return 0;
-
-err:
- if (hop4_new)
- free_hop(ctx, hop4_addr);
- if (hop3_new)
- free_hop(ctx, hop3_addr);
- if (hop2_new)
- free_hop(ctx, hop2_addr);
- if (hop1_new)
- free_hop(ctx, hop1_addr);
+ hdev->mmu_func.flush(ctx);
return rc;
}
@@ -984,7 +231,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
real_phys_addr = phys_addr;
for (i = 0 ; i < npages ; i++) {
- rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
+ rc = hdev->mmu_func.map(ctx, real_virt_addr, real_phys_addr,
real_page_size, is_dram_addr);
if (rc)
goto err;
@@ -995,21 +242,21 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
}
if (flush_pte)
- flush(ctx);
+ hdev->mmu_func.flush(ctx);
return 0;
err:
real_virt_addr = virt_addr;
for (i = 0 ; i < mapped_cnt ; i++) {
- if (_hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr))
+ if (hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr))
dev_warn_ratelimited(hdev->dev,
"failed to unmap va: 0x%llx\n", real_virt_addr);
real_virt_addr += real_page_size;
}
- flush(ctx);
+ hdev->mmu_func.flush(ctx);
return rc;
}
@@ -1022,7 +269,10 @@ err:
*/
void hl_mmu_swap_out(struct hl_ctx *ctx)
{
+ struct hl_device *hdev = ctx->hdev;
+ if (hdev->mmu_enable)
+ hdev->mmu_func.swap_out(ctx);
}
/*
@@ -1033,5 +283,27 @@ void hl_mmu_swap_out(struct hl_ctx *ctx)
*/
void hl_mmu_swap_in(struct hl_ctx *ctx)
{
+ struct hl_device *hdev = ctx->hdev;
+
+ if (hdev->mmu_enable)
+ hdev->mmu_func.swap_in(ctx);
+}
+
+int hl_mmu_if_set_funcs(struct hl_device *hdev)
+{
+ if (!hdev->mmu_enable)
+ return 0;
+
+ switch (hdev->asic_type) {
+ case ASIC_GOYA:
+ case ASIC_GAUDI:
+ hl_mmu_v1_set_funcs(hdev);
+ break;
+ default:
+ dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
+ hdev->asic_type);
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c
new file mode 100644
index 000000000000..8d1eb5265419
--- /dev/null
+++ b/drivers/misc/habanalabs/common/mmu_v1.c
@@ -0,0 +1,863 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2019 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "../include/hw_ip/mmu/mmu_general.h"
+
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+
+static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
+
+static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = NULL;
+
+ hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
+ (unsigned long) hop_addr)
+ if (hop_addr == pgt_info->shadow_addr)
+ break;
+
+ return pgt_info;
+}
+
+static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ gen_pool_free(hdev->mmu_priv.mmu_pgt_pool, pgt_info->phys_addr,
+ hdev->asic_prop.mmu_hop_table_size);
+ hash_del(&pgt_info->node);
+ kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
+ kfree(pgt_info);
+}
+
+static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
+
+ _free_hop(ctx, pgt_info);
+}
+
+static u64 alloc_hop(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pgt_info *pgt_info;
+ u64 phys_addr, shadow_addr;
+
+ pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
+ if (!pgt_info)
+ return ULLONG_MAX;
+
+ phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.mmu_pgt_pool,
+ prop->mmu_hop_table_size);
+ if (!phys_addr) {
+ dev_err(hdev->dev, "failed to allocate page\n");
+ goto pool_add_err;
+ }
+
+ shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
+ GFP_KERNEL);
+ if (!shadow_addr)
+ goto shadow_err;
+
+ pgt_info->phys_addr = phys_addr;
+ pgt_info->shadow_addr = shadow_addr;
+ pgt_info->ctx = ctx;
+ pgt_info->num_of_ptes = 0;
+ hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
+
+ return shadow_addr;
+
+shadow_err:
+ gen_pool_free(hdev->mmu_priv.mmu_pgt_pool, phys_addr,
+ prop->mmu_hop_table_size);
+pool_add_err:
+ kfree(pgt_info);
+
+ return ULLONG_MAX;
+}
+
+static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
+{
+ return ctx->hdev->asic_prop.mmu_pgt_addr +
+ (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
+}
+
+static inline u64 get_hop0_addr(struct hl_ctx *ctx)
+{
+ return (u64) (uintptr_t) ctx->hdev->mmu_priv.mmu_shadow_hop0 +
+ (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
+}
+
+static void flush(struct hl_ctx *ctx)
+{
+ /* flush all writes from all cores to reach PCI */
+ mb();
+ ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
+}
+
+/* transform the value to physical address when writing to H/W */
+static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
+{
+ /*
+ * The value to write is actually the address of the next shadow hop +
+ * flags at the 12 LSBs.
+ * Hence in order to get the value to write to the physical PTE, we
+ * clear the 12 LSBs and translate the shadow hop to its associated
+ * physical hop, and add back the original 12 LSBs.
+ */
+ u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
+ (val & FLAGS_MASK);
+
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev,
+ get_phys_addr(ctx, shadow_pte_addr),
+ phys_val);
+
+ *(u64 *) (uintptr_t) shadow_pte_addr = val;
+}
+
+/* do not transform the value to physical address when writing to H/W */
+static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
+ u64 val)
+{
+ ctx->hdev->asic_funcs->write_pte(ctx->hdev,
+ get_phys_addr(ctx, shadow_pte_addr),
+ val);
+ *(u64 *) (uintptr_t) shadow_pte_addr = val;
+}
+
+/* clear the last and present bits */
+static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
+{
+ /* no need to transform the value to physical address */
+ write_final_pte(ctx, pte_addr, 0);
+}
+
+static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
+{
+ get_pgt_info(ctx, hop_addr)->num_of_ptes++;
+}
+
+/*
+ * put_pte - decrement the num of ptes and free the hop if possible
+ *
+ * @ctx: pointer to the context structure
+ * @hop_addr: addr of the hop
+ *
+ * This function returns the number of ptes left on this hop. If the number is
+ * 0, it means the pte was freed.
+ */
+static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
+ int num_of_ptes_left;
+
+ pgt_info->num_of_ptes--;
+
+ /*
+ * Need to save the number of ptes left because free_hop might free
+ * the pgt_info
+ */
+ num_of_ptes_left = pgt_info->num_of_ptes;
+ if (!num_of_ptes_left)
+ _free_hop(ctx, pgt_info);
+
+ return num_of_ptes_left;
+}
+
+static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr, u64 mask, u64 shift)
+{
+ return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
+ ((virt_addr & mask) >> shift);
+}
+
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
+ mmu_prop->hop0_shift);
+}
+
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
+ mmu_prop->hop1_shift);
+}
+
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
+ mmu_prop->hop2_shift);
+}
+
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
+ mmu_prop->hop3_shift);
+}
+
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
+ mmu_prop->hop4_shift);
+}
+
+static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
+{
+ if (curr_pte & PAGE_PRESENT_MASK)
+ return curr_pte & HOP_PHYS_ADDR_MASK;
+ else
+ return ULLONG_MAX;
+}
+
+static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
+ bool *is_new_hop)
+{
+ u64 hop_addr = get_next_hop_addr(ctx, curr_pte);
+
+ if (hop_addr == ULLONG_MAX) {
+ hop_addr = alloc_hop(ctx);
+ *is_new_hop = (hop_addr != ULLONG_MAX);
+ }
+
+ return hop_addr;
+}
+
+/* translates shadow address inside hop to a physical address */
+static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
+{
+ u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
+ u64 shadow_hop_addr = shadow_addr & ~page_mask;
+ u64 pte_offset = shadow_addr & page_mask;
+ u64 phys_hop_addr;
+
+ if (shadow_hop_addr != get_hop0_addr(ctx))
+ phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
+ else
+ phys_hop_addr = get_phys_hop0_addr(ctx);
+
+ return phys_hop_addr + pte_offset;
+}
+
+static int dram_default_mapping_init(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
+ hop2_pte_addr, hop3_pte_addr, pte_val;
+ int rc, i, j, hop3_allocated = 0;
+
+ if ((!hdev->dram_supports_virtual_memory) ||
+ (!hdev->dram_default_page_mapping) ||
+ (ctx->asid == HL_KERNEL_ASID_ID))
+ return 0;
+
+ num_of_hop3 = prop->dram_size_for_default_page_mapping;
+ do_div(num_of_hop3, prop->dram_page_size);
+ do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+
+ /* add hop1 and hop2 */
+ total_hops = num_of_hop3 + 2;
+
+ ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
+ if (!ctx->dram_default_hops)
+ return -ENOMEM;
+
+ hop0_addr = get_hop0_addr(ctx);
+
+ hop1_addr = alloc_hop(ctx);
+ if (hop1_addr == ULLONG_MAX) {
+ dev_err(hdev->dev, "failed to alloc hop 1\n");
+ rc = -ENOMEM;
+ goto hop1_err;
+ }
+
+ ctx->dram_default_hops[total_hops - 1] = hop1_addr;
+
+ hop2_addr = alloc_hop(ctx);
+ if (hop2_addr == ULLONG_MAX) {
+ dev_err(hdev->dev, "failed to alloc hop 2\n");
+ rc = -ENOMEM;
+ goto hop2_err;
+ }
+
+ ctx->dram_default_hops[total_hops - 2] = hop2_addr;
+
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ ctx->dram_default_hops[i] = alloc_hop(ctx);
+ if (ctx->dram_default_hops[i] == ULLONG_MAX) {
+ dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i);
+ rc = -ENOMEM;
+ goto hop3_err;
+ }
+ hop3_allocated++;
+ }
+
+ /* need only pte 0 in hops 0 and 1 */
+ pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ write_pte(ctx, hop0_addr, pte_val);
+
+ pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ write_pte(ctx, hop1_addr, pte_val);
+ get_pte(ctx, hop1_addr);
+
+ hop2_pte_addr = hop2_addr;
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ write_pte(ctx, hop2_pte_addr, pte_val);
+ get_pte(ctx, hop2_addr);
+ hop2_pte_addr += HL_PTE_SIZE;
+ }
+
+ pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
+ LAST_MASK | PAGE_PRESENT_MASK;
+
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ hop3_pte_addr = ctx->dram_default_hops[i];
+ for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+ write_final_pte(ctx, hop3_pte_addr, pte_val);
+ get_pte(ctx, ctx->dram_default_hops[i]);
+ hop3_pte_addr += HL_PTE_SIZE;
+ }
+ }
+
+ flush(ctx);
+
+ return 0;
+
+hop3_err:
+ for (i = 0 ; i < hop3_allocated ; i++)
+ free_hop(ctx, ctx->dram_default_hops[i]);
+
+ free_hop(ctx, hop2_addr);
+hop2_err:
+ free_hop(ctx, hop1_addr);
+hop1_err:
+ kfree(ctx->dram_default_hops);
+
+ return rc;
+}
+
+static void dram_default_mapping_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
+ hop2_pte_addr, hop3_pte_addr;
+ int i, j;
+
+ if ((!hdev->dram_supports_virtual_memory) ||
+ (!hdev->dram_default_page_mapping) ||
+ (ctx->asid == HL_KERNEL_ASID_ID))
+ return;
+
+ num_of_hop3 = prop->dram_size_for_default_page_mapping;
+ do_div(num_of_hop3, prop->dram_page_size);
+ do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+
+ hop0_addr = get_hop0_addr(ctx);
+ /* add hop1 and hop2 */
+ total_hops = num_of_hop3 + 2;
+ hop1_addr = ctx->dram_default_hops[total_hops - 1];
+ hop2_addr = ctx->dram_default_hops[total_hops - 2];
+
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ hop3_pte_addr = ctx->dram_default_hops[i];
+ for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+ clear_pte(ctx, hop3_pte_addr);
+ put_pte(ctx, ctx->dram_default_hops[i]);
+ hop3_pte_addr += HL_PTE_SIZE;
+ }
+ }
+
+ hop2_pte_addr = hop2_addr;
+ hop2_pte_addr = hop2_addr;
+ for (i = 0 ; i < num_of_hop3 ; i++) {
+ clear_pte(ctx, hop2_pte_addr);
+ put_pte(ctx, hop2_addr);
+ hop2_pte_addr += HL_PTE_SIZE;
+ }
+
+ clear_pte(ctx, hop1_addr);
+ put_pte(ctx, hop1_addr);
+ clear_pte(ctx, hop0_addr);
+
+ kfree(ctx->dram_default_hops);
+
+ flush(ctx);
+}
+
+/**
+ * hl_mmu_v1_init() - initialize the MMU module.
+ * @hdev: habanalabs device structure.
+ *
+ * This function does the following:
+ * - Create a pool of pages for pgt_infos.
+ * - Create a shadow table for pgt
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int hl_mmu_v1_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ hdev->mmu_priv.mmu_pgt_pool =
+ gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
+
+ if (!hdev->mmu_priv.mmu_pgt_pool) {
+ dev_err(hdev->dev, "Failed to create page gen pool\n");
+ return -ENOMEM;
+ }
+
+ rc = gen_pool_add(hdev->mmu_priv.mmu_pgt_pool, prop->mmu_pgt_addr +
+ prop->mmu_hop0_tables_total_size,
+ prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
+ -1);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
+ goto err_pool_add;
+ }
+
+ hdev->mmu_priv.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
+ prop->mmu_hop_table_size,
+ GFP_KERNEL | __GFP_ZERO);
+ if (ZERO_OR_NULL_PTR(hdev->mmu_priv.mmu_shadow_hop0)) {
+ rc = -ENOMEM;
+ goto err_pool_add;
+ }
+
+ /* MMU H/W init will be done in device hw_init() */
+
+ return 0;
+
+err_pool_add:
+ gen_pool_destroy(hdev->mmu_priv.mmu_pgt_pool);
+
+ return rc;
+}
+
+/**
+ * hl_mmu_fini() - release the MMU module.
+ * @hdev: habanalabs device structure.
+ *
+ * This function does the following:
+ * - Disable MMU in H/W.
+ * - Free the pgt_infos pool.
+ *
+ * All contexts should be freed before calling this function.
+ */
+static void hl_mmu_v1_fini(struct hl_device *hdev)
+{
+ /* MMU H/W fini was already done in device hw_fini() */
+
+ kvfree(hdev->mmu_priv.mmu_shadow_hop0);
+ gen_pool_destroy(hdev->mmu_priv.mmu_pgt_pool);
+}
+
+/**
+ * hl_mmu_ctx_init() - initialize a context for using the MMU module.
+ * @ctx: pointer to the context structure to initialize.
+ *
+ * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
+ * page tables hops related to this context.
+ * Return: 0 on success, non-zero otherwise.
+ */
+static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx)
+{
+ mutex_init(&ctx->mmu_lock);
+ hash_init(ctx->mmu_shadow_hash);
+
+ return dram_default_mapping_init(ctx);
+}
+
+/*
+ * hl_mmu_ctx_fini - disable a ctx from using the mmu module
+ *
+ * @ctx: pointer to the context structure
+ *
+ * This function does the following:
+ * - Free any pgts which were not freed yet
+ * - Free the mutex
+ * - Free DRAM default page mapping hops
+ */
+static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct pgt_info *pgt_info;
+ struct hlist_node *tmp;
+ int i;
+
+ dram_default_mapping_fini(ctx);
+
+ if (!hash_empty(ctx->mmu_shadow_hash))
+ dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
+ ctx->asid);
+
+ hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
+ dev_err_ratelimited(hdev->dev,
+ "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
+ pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
+ _free_hop(ctx, pgt_info);
+ }
+
+ mutex_destroy(&ctx->mmu_lock);
+}
+
+static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
+ u64 virt_addr, bool is_dram_addr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
+ u64 hop0_addr = 0, hop0_pte_addr = 0,
+ hop1_addr = 0, hop1_pte_addr = 0,
+ hop2_addr = 0, hop2_pte_addr = 0,
+ hop3_addr = 0, hop3_pte_addr = 0,
+ hop4_addr = 0, hop4_pte_addr = 0,
+ curr_pte;
+ bool is_huge, clear_hop3 = true;
+
+ /* shifts and masks are the same in PMMU and HPMMU, use one of them */
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
+ hop0_addr = get_hop0_addr(ctx);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
+
+ curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
+
+ hop1_addr = get_next_hop_addr(ctx, curr_pte);
+
+ if (hop1_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
+
+ curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
+
+ hop2_addr = get_next_hop_addr(ctx, curr_pte);
+
+ if (hop2_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
+
+ curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
+
+ hop3_addr = get_next_hop_addr(ctx, curr_pte);
+
+ if (hop3_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
+
+ curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
+
+ is_huge = curr_pte & LAST_MASK;
+
+ if (is_dram_addr && !is_huge) {
+ dev_err(hdev->dev,
+ "DRAM unmapping should use huge pages only\n");
+ return -EFAULT;
+ }
+
+ if (!is_huge) {
+ hop4_addr = get_next_hop_addr(ctx, curr_pte);
+
+ if (hop4_addr == ULLONG_MAX)
+ goto not_mapped;
+
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
+
+ curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
+
+ clear_hop3 = false;
+ }
+
+ if (hdev->dram_default_page_mapping && is_dram_addr) {
+ u64 default_pte = (prop->mmu_dram_default_page_addr &
+ HOP_PHYS_ADDR_MASK) | LAST_MASK |
+ PAGE_PRESENT_MASK;
+ if (curr_pte == default_pte) {
+ dev_err(hdev->dev,
+ "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
+ virt_addr);
+ goto not_mapped;
+ }
+
+ if (!(curr_pte & PAGE_PRESENT_MASK)) {
+ dev_err(hdev->dev,
+ "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n",
+ virt_addr);
+ goto not_mapped;
+ }
+
+ write_final_pte(ctx, hop3_pte_addr, default_pte);
+ put_pte(ctx, hop3_addr);
+ } else {
+ if (!(curr_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+
+ if (hop4_addr)
+ clear_pte(ctx, hop4_pte_addr);
+ else
+ clear_pte(ctx, hop3_pte_addr);
+
+ if (hop4_addr && !put_pte(ctx, hop4_addr))
+ clear_hop3 = true;
+
+ if (!clear_hop3)
+ goto mapped;
+
+ clear_pte(ctx, hop3_pte_addr);
+
+ if (put_pte(ctx, hop3_addr))
+ goto mapped;
+
+ clear_pte(ctx, hop2_pte_addr);
+
+ if (put_pte(ctx, hop2_addr))
+ goto mapped;
+
+ clear_pte(ctx, hop1_pte_addr);
+
+ if (put_pte(ctx, hop1_addr))
+ goto mapped;
+
+ clear_pte(ctx, hop0_pte_addr);
+ }
+
+mapped:
+ return 0;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
+ virt_addr);
+
+ return -EINVAL;
+}
+
+static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
+ u32 page_size, bool is_dram_addr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
+ u64 hop0_addr = 0, hop0_pte_addr = 0,
+ hop1_addr = 0, hop1_pte_addr = 0,
+ hop2_addr = 0, hop2_pte_addr = 0,
+ hop3_addr = 0, hop3_pte_addr = 0,
+ hop4_addr = 0, hop4_pte_addr = 0,
+ curr_pte = 0;
+ bool hop1_new = false, hop2_new = false, hop3_new = false,
+ hop4_new = false, is_huge;
+ int rc = -ENOMEM;
+
+ /*
+ * This mapping function can map a page or a huge page. For huge page
+ * there are only 3 hops rather than 4. Currently the DRAM allocation
+ * uses huge pages only but user memory could have been allocated with
+ * one of the two page sizes. Since this is a common code for all the
+ * three cases, we need this hugs page check.
+ */
+ if (is_dram_addr) {
+ mmu_prop = &prop->dmmu;
+ is_huge = true;
+ } else if (page_size == prop->pmmu_huge.page_size) {
+ mmu_prop = &prop->pmmu_huge;
+ is_huge = true;
+ } else {
+ mmu_prop = &prop->pmmu;
+ is_huge = false;
+ }
+
+ hop0_addr = get_hop0_addr(ctx);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
+
+ hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
+ if (hop1_addr == ULLONG_MAX)
+ goto err;
+
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
+
+ hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
+ if (hop2_addr == ULLONG_MAX)
+ goto err;
+
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
+
+ hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
+ if (hop3_addr == ULLONG_MAX)
+ goto err;
+
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
+
+ if (!is_huge) {
+ hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
+ if (hop4_addr == ULLONG_MAX)
+ goto err;
+
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
+ curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
+ }
+
+ if (hdev->dram_default_page_mapping && is_dram_addr) {
+ u64 default_pte = (prop->mmu_dram_default_page_addr &
+ HOP_PHYS_ADDR_MASK) | LAST_MASK |
+ PAGE_PRESENT_MASK;
+
+ if (curr_pte != default_pte) {
+ dev_err(hdev->dev,
+ "DRAM: mapping already exists for virt_addr 0x%llx\n",
+ virt_addr);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (hop1_new || hop2_new || hop3_new || hop4_new) {
+ dev_err(hdev->dev,
+ "DRAM mapping should not allocate more hops\n");
+ rc = -EFAULT;
+ goto err;
+ }
+ } else if (curr_pte & PAGE_PRESENT_MASK) {
+ dev_err(hdev->dev,
+ "mapping already exists for virt_addr 0x%llx\n",
+ virt_addr);
+
+ dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
+ *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
+ dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
+ *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
+ dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
+ *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
+ dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
+ *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
+
+ if (!is_huge)
+ dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
+ *(u64 *) (uintptr_t) hop4_pte_addr,
+ hop4_pte_addr);
+
+ rc = -EINVAL;
+ goto err;
+ }
+
+ curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
+ | PAGE_PRESENT_MASK;
+
+ if (is_huge)
+ write_final_pte(ctx, hop3_pte_addr, curr_pte);
+ else
+ write_final_pte(ctx, hop4_pte_addr, curr_pte);
+
+ if (hop1_new) {
+ curr_pte =
+ (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ write_pte(ctx, hop0_pte_addr, curr_pte);
+ }
+ if (hop2_new) {
+ curr_pte =
+ (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ write_pte(ctx, hop1_pte_addr, curr_pte);
+ get_pte(ctx, hop1_addr);
+ }
+ if (hop3_new) {
+ curr_pte =
+ (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ write_pte(ctx, hop2_pte_addr, curr_pte);
+ get_pte(ctx, hop2_addr);
+ }
+
+ if (!is_huge) {
+ if (hop4_new) {
+ curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ write_pte(ctx, hop3_pte_addr, curr_pte);
+ get_pte(ctx, hop3_addr);
+ }
+
+ get_pte(ctx, hop4_addr);
+ } else {
+ get_pte(ctx, hop3_addr);
+ }
+
+ return 0;
+
+err:
+ if (hop4_new)
+ free_hop(ctx, hop4_addr);
+ if (hop3_new)
+ free_hop(ctx, hop3_addr);
+ if (hop2_new)
+ free_hop(ctx, hop2_addr);
+ if (hop1_new)
+ free_hop(ctx, hop1_addr);
+
+ return rc;
+}
+
+/*
+ * hl_mmu_v1_swap_out - marks all mapping of the given ctx as swapped out
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+static void hl_mmu_v1_swap_out(struct hl_ctx *ctx)
+{
+
+}
+
+/*
+ * hl_mmu_v1_swap_in - marks all mapping of the given ctx as swapped in
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+static void hl_mmu_v1_swap_in(struct hl_ctx *ctx)
+{
+
+}
+
+/*
+ * hl_mmu_v1_prepare - prepare mmu for working with mmu v1
+ *
+ * @hdev: pointer to the device structure
+ */
+void hl_mmu_v1_set_funcs(struct hl_device *hdev)
+{
+ struct hl_mmu_funcs *mmu = &hdev->mmu_func;
+
+ mmu->init = hl_mmu_v1_init;
+ mmu->fini = hl_mmu_v1_fini;
+ mmu->ctx_init = hl_mmu_v1_ctx_init;
+ mmu->ctx_fini = hl_mmu_v1_ctx_fini;
+ mmu->map = _hl_mmu_v1_map;
+ mmu->unmap = _hl_mmu_v1_unmap;
+ mmu->flush = flush;
+ mmu->swap_out = hl_mmu_v1_swap_out;
+ mmu->swap_in = hl_mmu_v1_swap_in;
+}
diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c
index 2770f03b6cbb..4327e5704ebb 100644
--- a/drivers/misc/habanalabs/common/pci.c
+++ b/drivers/misc/habanalabs/common/pci.c
@@ -9,7 +9,6 @@
#include "../include/hw_ip/pci/pci_general.h"
#include <linux/pci.h>
-#include <linux/bitfield.h>
#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 10)
@@ -339,12 +338,17 @@ static int hl_pci_set_dma_mask(struct hl_device *hdev)
/**
* hl_pci_init() - PCI initialization code.
* @hdev: Pointer to hl_device structure.
+ * @cpu_boot_status_reg: status register of the device's CPU
+ * @boot_err0_reg: boot error register of the device's CPU
+ * @preboot_ver_timeout: how much to wait before bailing out on reading
+ * the preboot version
*
* Set DMA masks, initialize the PCI controller and map the PCI BARs.
*
* Return: 0 on success, non-zero for failure.
*/
-int hl_pci_init(struct hl_device *hdev)
+int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg,
+ u32 boot_err0_reg, u32 preboot_ver_timeout)
{
struct pci_dev *pdev = hdev->pdev;
int rc;
@@ -376,6 +380,15 @@ int hl_pci_init(struct hl_device *hdev)
if (rc)
goto unmap_pci_bars;
+ /* Before continuing in the initialization, we need to read the preboot
+ * version to determine whether we run with a security-enabled firmware
+ * The check will be done in each ASIC's specific code
+ */
+ rc = hl_fw_read_preboot_ver(hdev, cpu_boot_status_reg, boot_err0_reg,
+ preboot_ver_timeout);
+ if (rc)
+ goto unmap_pci_bars;
+
return 0;
unmap_pci_bars:
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 5ae484cc84cd..3ceae87016b1 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -11,18 +11,18 @@
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
if (curr)
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_CURR_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
else
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.pll_index = cpu_to_le32(pll_index);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -40,13 +40,13 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_SET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.pll_index = cpu_to_le32(pll_index);
pkt.value = cpu_to_le64(freq);
@@ -61,14 +61,14 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
u64 hl_get_max_power(struct hl_device *hdev)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
@@ -83,13 +83,13 @@ u64 hl_get_max_power(struct hl_device *hdev)
void hl_set_max_power(struct hl_device *hdev)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_SET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(hdev->max_power);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -112,7 +112,7 @@ static ssize_t armcp_kernel_ver_show(struct device *dev,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%s", hdev->asic_prop.armcp_info.kernel_version);
+ return sprintf(buf, "%s", hdev->asic_prop.cpucp_info.kernel_version);
}
static ssize_t armcp_ver_show(struct device *dev, struct device_attribute *attr,
@@ -120,7 +120,7 @@ static ssize_t armcp_ver_show(struct device *dev, struct device_attribute *attr,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", hdev->asic_prop.armcp_info.armcp_version);
+ return sprintf(buf, "%s\n", hdev->asic_prop.cpucp_info.cpucp_version);
}
static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr,
@@ -129,7 +129,23 @@ static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr,
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "0x%08x\n",
- hdev->asic_prop.armcp_info.cpld_version);
+ hdev->asic_prop.cpucp_info.cpld_version);
+}
+
+static ssize_t cpucp_kernel_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s", hdev->asic_prop.cpucp_info.kernel_version);
+}
+
+static ssize_t cpucp_ver_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", hdev->asic_prop.cpucp_info.cpucp_version);
}
static ssize_t infineon_ver_show(struct device *dev,
@@ -138,7 +154,7 @@ static ssize_t infineon_ver_show(struct device *dev,
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "0x%04x\n",
- hdev->asic_prop.armcp_info.infineon_version);
+ hdev->asic_prop.cpucp_info.infineon_version);
}
static ssize_t fuse_ver_show(struct device *dev, struct device_attribute *attr,
@@ -146,7 +162,7 @@ static ssize_t fuse_ver_show(struct device *dev, struct device_attribute *attr,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", hdev->asic_prop.armcp_info.fuse_version);
+ return sprintf(buf, "%s\n", hdev->asic_prop.cpucp_info.fuse_version);
}
static ssize_t thermal_ver_show(struct device *dev,
@@ -154,7 +170,7 @@ static ssize_t thermal_ver_show(struct device *dev,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%s", hdev->asic_prop.armcp_info.thermal_version);
+ return sprintf(buf, "%s", hdev->asic_prop.cpucp_info.thermal_version);
}
static ssize_t preboot_btl_ver_show(struct device *dev,
@@ -356,6 +372,8 @@ out:
static DEVICE_ATTR_RO(armcp_kernel_ver);
static DEVICE_ATTR_RO(armcp_ver);
static DEVICE_ATTR_RO(cpld_ver);
+static DEVICE_ATTR_RO(cpucp_kernel_ver);
+static DEVICE_ATTR_RO(cpucp_ver);
static DEVICE_ATTR_RO(device_type);
static DEVICE_ATTR_RO(fuse_ver);
static DEVICE_ATTR_WO(hard_reset);
@@ -380,6 +398,8 @@ static struct attribute *hl_dev_attrs[] = {
&dev_attr_armcp_kernel_ver.attr,
&dev_attr_armcp_ver.attr,
&dev_attr_cpld_ver.attr,
+ &dev_attr_cpucp_kernel_ver.attr,
+ &dev_attr_cpucp_ver.attr,
&dev_attr_device_type.attr,
&dev_attr_fuse_ver.attr,
&dev_attr_hard_reset.attr,
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 4009b7df4caf..2519a34e25b7 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -21,7 +21,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/iommu.h>
#include <linux/seq_file.h>
-#include <linux/bitfield.h>
/*
* Gaudi security scheme:
@@ -360,13 +359,14 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
u32 tpc_id);
static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
-static int gaudi_armcp_info_get(struct hl_device *hdev);
+static int gaudi_cpucp_info_get(struct hl_device *hdev);
static void gaudi_disable_clock_gating(struct hl_device *hdev);
static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
static int gaudi_get_fixed_properties(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 num_sync_stream_queues = 0;
int i;
prop->max_queues = GAUDI_QUEUE_ID_SIZE;
@@ -383,6 +383,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
prop->hw_queues_props[i].driver_only = 0;
prop->hw_queues_props[i].requires_kernel_cb = 1;
prop->hw_queues_props[i].supports_sync_stream = 1;
+ num_sync_stream_queues++;
} else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
prop->hw_queues_props[i].driver_only = 1;
@@ -440,6 +441,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
prop->pmmu.end_addr =
(VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1;
prop->pmmu.page_size = PAGE_SIZE_4KB;
+ prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
@@ -464,11 +466,16 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev)
prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
- strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
+ strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
prop->max_pending_cs = GAUDI_MAX_PENDING_CS;
+ prop->first_available_user_sob[HL_GAUDI_WS_DCORE] =
+ num_sync_stream_queues * HL_RSVD_SOBS;
+ prop->first_available_user_mon[HL_GAUDI_WS_DCORE] =
+ num_sync_stream_queues * HL_RSVD_MONS;
+
return 0;
}
@@ -592,10 +599,15 @@ static int gaudi_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
- rc = hl_pci_init(hdev);
+ rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
+ mmCPU_BOOT_ERR0, GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
if (rc)
goto free_queue_props;
+ /* GAUDI Firmware does not yet support security */
+ prop->fw_security_disabled = true;
+ dev_info(hdev->dev, "firmware-level security is disabled\n");
+
return 0;
free_queue_props:
@@ -668,17 +680,16 @@ static int _gaudi_init_tpc_mem(struct hl_device *hdev,
if (!cb)
return -EFAULT;
- init_tpc_mem_pkt = (struct packet_lin_dma *) (uintptr_t)
- cb->kernel_address;
+ init_tpc_mem_pkt = cb->kernel_address;
cb_size = sizeof(*init_tpc_mem_pkt);
memset(init_tpc_mem_pkt, 0, cb_size);
init_tpc_mem_pkt->tsize = cpu_to_le32(tpc_kernel_size);
- ctl = ((PACKET_LIN_DMA << GAUDI_PKT_CTL_OPCODE_SHIFT) |
- (1 << GAUDI_PKT_LIN_DMA_CTL_LIN_SHIFT) |
- (1 << GAUDI_PKT_CTL_RB_SHIFT) |
- (1 << GAUDI_PKT_CTL_MB_SHIFT));
+ ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA);
+ ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_LIN_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
init_tpc_mem_pkt->ctl = cpu_to_le32(ctl);
@@ -780,13 +791,13 @@ static int gaudi_late_init(struct hl_device *hdev)
struct gaudi_device *gaudi = hdev->asic_specific;
int rc;
- rc = gaudi->armcp_info_get(hdev);
+ rc = gaudi->cpucp_info_get(hdev);
if (rc) {
- dev_err(hdev->dev, "Failed to get armcp info\n");
+ dev_err(hdev->dev, "Failed to get cpucp info\n");
return rc;
}
- rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
+ rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS);
if (rc) {
dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
return rc;
@@ -811,7 +822,7 @@ static int gaudi_late_init(struct hl_device *hdev)
return 0;
disable_pci_access:
- hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
return rc;
}
@@ -981,7 +992,7 @@ static int gaudi_sw_init(struct hl_device *hdev)
}
}
- gaudi->armcp_info_get = gaudi_armcp_info_get;
+ gaudi->cpucp_info_get = gaudi_cpucp_info_get;
gaudi->max_freq_value = GAUDI_MAX_CLK_FREQ;
@@ -1853,9 +1864,11 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
- WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
- WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
- WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, QMAN_LDMA_SIZE_OFFSET);
+ WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
+ QMAN_LDMA_SRC_OFFSET);
+ WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
+ QMAN_LDMA_DST_OFFSET);
WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
@@ -1911,6 +1924,9 @@ static void gaudi_init_dma_core(struct hl_device *hdev, int dma_id)
WREG32(mmDMA0_CORE_RD_MAX_OUTSTAND + dma_offset, 0);
WREG32(mmDMA0_CORE_RD_MAX_SIZE + dma_offset, 0);
+ /* WA for H/W bug H3-2116 */
+ WREG32(mmDMA0_CORE_LBW_MAX_OUTSTAND + dma_offset, 15);
+
/* STOP_ON bit implies no completion to operation in case of RAZWI */
if (hdev->stop_on_err)
dma_err_cfg |= 1 << DMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT;
@@ -2010,13 +2026,19 @@ static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
- WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x81BC);
- WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x81B4);
- WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
+ QMAN_CPDMA_SIZE_OFFSET);
+ WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
+ QMAN_CPDMA_SRC_OFFSET);
+ WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
+ QMAN_CPDMA_DST_OFFSET);
} else {
- WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
- WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
- WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
+ QMAN_LDMA_SIZE_OFFSET);
+ WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
+ QMAN_LDMA_SRC_OFFSET);
+ WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
+ QMAN_LDMA_DST_OFFSET);
/* Configure RAZWI IRQ */
dma_qm_err_cfg = HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
@@ -2120,13 +2142,19 @@ static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
WREG32(mmMME0_QM_PQ_PI_0 + q_off, 0);
WREG32(mmMME0_QM_PQ_CI_0 + q_off, 0);
- WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x81BC);
- WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x81B4);
- WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
+ QMAN_CPDMA_SIZE_OFFSET);
+ WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
+ QMAN_CPDMA_SRC_OFFSET);
+ WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
+ QMAN_CPDMA_DST_OFFSET);
} else {
- WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
- WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
- WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
+ QMAN_LDMA_SIZE_OFFSET);
+ WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
+ QMAN_LDMA_SRC_OFFSET);
+ WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
+ QMAN_LDMA_DST_OFFSET);
/* Configure RAZWI IRQ */
mme_id = mme_offset /
@@ -2234,13 +2262,19 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
WREG32(mmTPC0_QM_PQ_PI_0 + q_off, 0);
WREG32(mmTPC0_QM_PQ_CI_0 + q_off, 0);
- WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x81BC);
- WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x81B4);
- WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
+ QMAN_CPDMA_SIZE_OFFSET);
+ WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
+ QMAN_CPDMA_SRC_OFFSET);
+ WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
+ QMAN_CPDMA_DST_OFFSET);
} else {
- WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
- WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
- WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
+ QMAN_LDMA_SIZE_OFFSET);
+ WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
+ QMAN_LDMA_SRC_OFFSET);
+ WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
+ QMAN_LDMA_DST_OFFSET);
/* Configure RAZWI IRQ */
tpc_id = tpc_offset /
@@ -2321,7 +2355,8 @@ static void gaudi_init_tpc_qmans(struct hl_device *hdev)
tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0;
- gaudi->hw_cap_initialized |= 1 << (HW_CAP_TPC_SHIFT + tpc_id);
+ gaudi->hw_cap_initialized |=
+ FIELD_PREP(HW_CAP_TPC_MASK, 1 << tpc_id);
}
}
@@ -2847,7 +2882,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
if (err) {
dev_err(hdev->dev,
- "Failed to communicate with ARM CPU (ArmCP timeout)\n");
+ "Failed to communicate with Device CPU (CPU-CP timeout)\n");
return -EIO;
}
@@ -2860,6 +2895,18 @@ static void gaudi_pre_hw_init(struct hl_device *hdev)
/* Perform read from the device to make sure device is up */
RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ /* Set the access through PCI bars (Linux driver only) as
+ * secured
+ */
+ WREG32(mmPCIE_WRAP_LBW_PROT_OVR,
+ (PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK |
+ PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK));
+
+ /* Perform read to flush the waiting writes to ensure
+ * configuration was set in the device
+ */
+ RREG32(mmPCIE_WRAP_LBW_PROT_OVR);
+
/*
* Let's mark in the H/W that we have reached this point. We check
* this value in the reset_before_init function to understand whether
@@ -2868,31 +2915,6 @@ static void gaudi_pre_hw_init(struct hl_device *hdev)
*/
WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
- /* Set the access through PCI bars (Linux driver only) as secured */
- WREG32(mmPCIE_WRAP_LBW_PROT_OVR, (PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK |
- PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK));
-
- /* Perform read to flush the waiting writes to ensure configuration
- * was set in the device
- */
- RREG32(mmPCIE_WRAP_LBW_PROT_OVR);
-
- if (hdev->axi_drain) {
- WREG32(mmPCIE_WRAP_LBW_DRAIN_CFG,
- 1 << PCIE_WRAP_LBW_DRAIN_CFG_EN_SHIFT);
- WREG32(mmPCIE_WRAP_HBW_DRAIN_CFG,
- 1 << PCIE_WRAP_HBW_DRAIN_CFG_EN_SHIFT);
-
- /* Perform read to flush the DRAIN cfg */
- RREG32(mmPCIE_WRAP_HBW_DRAIN_CFG);
- } else {
- WREG32(mmPCIE_WRAP_LBW_DRAIN_CFG, 0);
- WREG32(mmPCIE_WRAP_HBW_DRAIN_CFG, 0);
-
- /* Perform read to flush the DRAIN cfg */
- RREG32(mmPCIE_WRAP_HBW_DRAIN_CFG);
- }
-
/* Configure the reset registers. Must be done as early as possible
* in case we fail during H/W initialization
*/
@@ -2900,13 +2922,13 @@ static void gaudi_pre_hw_init(struct hl_device *hdev)
(CFG_RST_H_DMA_MASK |
CFG_RST_H_MME_MASK |
CFG_RST_H_SM_MASK |
- CFG_RST_H_TPC_MASK));
+ CFG_RST_H_TPC_7_MASK));
WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK);
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H,
(CFG_RST_H_HBM_MASK |
- CFG_RST_H_TPC_MASK |
+ CFG_RST_H_TPC_7_MASK |
CFG_RST_H_NIC_MASK |
CFG_RST_H_SM_MASK |
CFG_RST_H_DMA_MASK |
@@ -3071,7 +3093,7 @@ static int gaudi_suspend(struct hl_device *hdev)
{
int rc;
- rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+ rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -3084,17 +3106,16 @@ static int gaudi_resume(struct hl_device *hdev)
}
static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
- u64 kaddress, phys_addr_t paddress, u32 size)
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int rc;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
- size, vma->vm_page_prot);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
if (rc)
- dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+ dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
return rc;
}
@@ -3441,7 +3462,8 @@ static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id)
&fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
- "Failed to allocate memory for queue testing\n");
+ "Failed to allocate memory for H/W queue %d testing\n",
+ hw_queue_id);
return -ENOMEM;
}
@@ -3452,14 +3474,16 @@ static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id)
GFP_KERNEL, &pkt_dma_addr);
if (!fence_pkt) {
dev_err(hdev->dev,
- "Failed to allocate packet for queue testing\n");
+ "Failed to allocate packet for H/W queue %d testing\n",
+ hw_queue_id);
rc = -ENOMEM;
goto free_fence_ptr;
}
- tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
- (1 << GAUDI_PKT_CTL_EB_SHIFT) |
- (1 << GAUDI_PKT_CTL_MB_SHIFT);
+ tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
+ tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
+ tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
+
fence_pkt->ctl = cpu_to_le32(tmp);
fence_pkt->value = cpu_to_le32(fence_val);
fence_pkt->addr = cpu_to_le64(fence_dma_addr);
@@ -3469,7 +3493,8 @@ static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id)
pkt_dma_addr);
if (rc) {
dev_err(hdev->dev,
- "Failed to send fence packet\n");
+ "Failed to send fence packet to H/W queue %d\n",
+ hw_queue_id);
goto free_pkt;
}
@@ -3785,8 +3810,7 @@ static int gaudi_validate_cb(struct hl_device *hdev,
u16 pkt_size;
struct gaudi_packet *user_pkt;
- user_pkt = (struct gaudi_packet *) (uintptr_t)
- (parser->user_cb->kernel_address + cb_parsed_length);
+ user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
pkt_id = (enum packet_id) (
(le64_to_cpu(user_pkt->header) &
@@ -3959,8 +3983,6 @@ static int gaudi_patch_dma_packet(struct hl_device *hdev,
}
}
- new_dma_pkt->ctl = user_dma_pkt->ctl;
-
ctl = le32_to_cpu(user_dma_pkt->ctl);
if (likely(dma_desc_cnt))
ctl &= ~GAUDI_PKT_CTL_EB_MASK;
@@ -4011,11 +4033,9 @@ static int gaudi_patch_cb(struct hl_device *hdev,
u32 new_pkt_size = 0;
struct gaudi_packet *user_pkt, *kernel_pkt;
- user_pkt = (struct gaudi_packet *) (uintptr_t)
- (parser->user_cb->kernel_address + cb_parsed_length);
- kernel_pkt = (struct gaudi_packet *) (uintptr_t)
- (parser->patched_cb->kernel_address +
- cb_patched_cur_length);
+ user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
+ kernel_pkt = parser->patched_cb->kernel_address +
+ cb_patched_cur_length;
pkt_id = (enum packet_id) (
(le64_to_cpu(user_pkt->header) &
@@ -4105,8 +4125,9 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
parser->patched_cb_size = parser->user_cb_size +
sizeof(struct packet_msg_prot) * 2;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID, false);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ parser->patched_cb_size, false, false,
+ &patched_cb_handle);
if (rc) {
dev_err(hdev->dev,
@@ -4130,8 +4151,8 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
* The check that parser->user_cb_size <= parser->user_cb->size was done
* in validate_queue_index().
*/
- memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
- (void *) (uintptr_t) parser->user_cb->kernel_address,
+ memcpy(parser->patched_cb->kernel_address,
+ parser->user_cb->kernel_address,
parser->user_cb_size);
patched_cb_size = parser->patched_cb_size;
@@ -4178,8 +4199,9 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID, false);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ parser->patched_cb_size, false, false,
+ &patched_cb_handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
@@ -4264,7 +4286,7 @@ static int gaudi_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
}
static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
- u64 kernel_address, u32 len,
+ void *kernel_address, u32 len,
u64 cq_addr, u32 cq_val, u32 msi_vec,
bool eb)
{
@@ -4272,14 +4294,13 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
struct packet_msg_prot *cq_pkt;
u32 tmp;
- cq_pkt = (struct packet_msg_prot *) (uintptr_t)
- (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
+ cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
- tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
- (1 << GAUDI_PKT_CTL_MB_SHIFT);
+ tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
+ tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
if (eb)
- tmp |= (1 << GAUDI_PKT_CTL_EB_SHIFT);
+ tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(cq_val);
@@ -4287,8 +4308,8 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
cq_pkt++;
- tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
- (1 << GAUDI_PKT_CTL_MB_SHIFT);
+ tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
+ tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(1);
@@ -4316,15 +4337,16 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
if (!cb)
return -EFAULT;
- lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
+ lin_dma_pkt = cb->kernel_address;
memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
cb_size = sizeof(*lin_dma_pkt);
- ctl = ((PACKET_LIN_DMA << GAUDI_PKT_CTL_OPCODE_SHIFT) |
- (1 << GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
- (1 << GAUDI_PKT_LIN_DMA_CTL_LIN_SHIFT) |
- (1 << GAUDI_PKT_CTL_RB_SHIFT) |
- (1 << GAUDI_PKT_CTL_MB_SHIFT));
+ ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA);
+ ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_LIN_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
+
lin_dma_pkt->ctl = cpu_to_le32(ctl);
lin_dma_pkt->src_addr = cpu_to_le64(val);
lin_dma_pkt->dst_addr |= cpu_to_le64(addr);
@@ -4720,7 +4742,7 @@ static void gaudi_write_pte(struct hl_device *hdev, u64 addr, u64 val)
(addr - gaudi->hbm_bar_cur_addr));
}
-static void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
+void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
{
/* mask to zero the MMBP and ASID bits */
WREG32_AND(reg, ~0x7FF);
@@ -4888,9 +4910,6 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
gaudi_mmu_prepare_reg(hdev, mmMME2_ACC_WBC, asid);
gaudi_mmu_prepare_reg(hdev, mmMME3_ACC_WBC, asid);
- gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
- gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
-
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@@ -4927,12 +4946,13 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
cb = job->patched_cb;
- fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
- job->job_cb_size - sizeof(struct packet_msg_prot));
+ fence_pkt = cb->kernel_address +
+ job->job_cb_size - sizeof(struct packet_msg_prot);
+
+ tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
+ tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
+ tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
- tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
- (1 << GAUDI_PKT_CTL_EB_SHIFT) |
- (1 << GAUDI_PKT_CTL_MB_SHIFT);
fence_pkt->ctl = cpu_to_le32(tmp);
fence_pkt->value = cpu_to_le32(GAUDI_QMAN0_FENCE_VAL);
fence_pkt->addr = cpu_to_le64(fence_dma_addr);
@@ -5606,7 +5626,7 @@ static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id,
bool soft_reset_required = false;
/* Accessing the TPC_INTR_CAUSE registers requires disabling the clock
- * gating, and thus cannot be done in ArmCP and should be done instead
+ * gating, and thus cannot be done in CPU-CP and should be done instead
* by the driver.
*/
@@ -5653,21 +5673,25 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev,
{
switch (event_type) {
case GAUDI_EVENT_FIX_POWER_ENV_S:
+ hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to power consumption\n");
break;
case GAUDI_EVENT_FIX_POWER_ENV_E:
+ hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER;
dev_info_ratelimited(hdev->dev,
"Power envelop is safe, back to optimal clock\n");
break;
case GAUDI_EVENT_FIX_THERMAL_ENV_S:
+ hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to overheating\n");
break;
case GAUDI_EVENT_FIX_THERMAL_ENV_E:
+ hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL;
dev_info_ratelimited(hdev->dev,
"Thermal envelop is safe, back to optimal clock\n");
break;
@@ -6038,7 +6062,7 @@ static int gaudi_send_heartbeat(struct hl_device *hdev)
return hl_fw_send_heartbeat(hdev);
}
-static int gaudi_armcp_info_get(struct hl_device *hdev)
+static int gaudi_cpucp_info_get(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -6047,19 +6071,19 @@ static int gaudi_armcp_info_get(struct hl_device *hdev)
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- rc = hl_fw_armcp_info_get(hdev);
+ rc = hl_fw_cpucp_info_get(hdev);
if (rc)
return rc;
- if (!strlen(prop->armcp_info.card_name))
- strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
+ if (!strlen(prop->cpucp_info.card_name))
+ strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
- hdev->card_type = le32_to_cpu(hdev->asic_prop.armcp_info.card_type);
+ hdev->card_type = le32_to_cpu(hdev->asic_prop.cpucp_info.card_type);
- if (hdev->card_type == armcp_card_type_pci)
+ if (hdev->card_type == cpucp_card_type_pci)
prop->max_power_default = MAX_POWER_DEFAULT_PCI;
- else if (hdev->card_type == armcp_card_type_pmc)
+ else if (hdev->card_type == cpucp_card_type_pmc)
prop->max_power_default = MAX_POWER_DEFAULT_PMC;
hdev->max_power = prop->max_power_default;
@@ -6067,7 +6091,7 @@ static int gaudi_armcp_info_get(struct hl_device *hdev)
return 0;
}
-static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
+static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask,
struct seq_file *s)
{
struct gaudi_device *gaudi = hdev->asic_specific;
@@ -6099,7 +6123,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
is_idle &= is_eng_idle;
if (mask)
- *mask |= !is_eng_idle <<
+ *mask |= ((u64) !is_eng_idle) <<
(GAUDI_ENGINE_ID_DMA_0 + dma_id);
if (s)
seq_printf(s, fmt, dma_id,
@@ -6122,7 +6146,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
is_idle &= is_eng_idle;
if (mask)
- *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_TPC_0 + i);
+ *mask |= ((u64) !is_eng_idle) <<
+ (GAUDI_ENGINE_ID_TPC_0 + i);
if (s)
seq_printf(s, fmt, i,
is_eng_idle ? "Y" : "N",
@@ -6150,7 +6175,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
is_idle &= is_eng_idle;
if (mask)
- *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_MME_0 + i);
+ *mask |= ((u64) !is_eng_idle) <<
+ (GAUDI_ENGINE_ID_MME_0 + i);
if (s) {
if (!is_slave)
seq_printf(s, fmt, i,
@@ -6288,6 +6314,15 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
1000,
kernel_timeout);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout while waiting for TPC%d vector pipe\n",
+ tpc_id);
+ hdev->asic_funcs->set_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+ return -EIO;
+ }
+
rc = hl_poll_timeout(
hdev,
mmTPC0_CFG_WQ_INFLIGHT_CNTR + offset,
@@ -6343,7 +6378,7 @@ static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
struct packet_msg_short *pkt;
u32 value, ctl;
- pkt = (struct packet_msg_short *) (uintptr_t) cb->kernel_address;
+ pkt = cb->kernel_address;
memset(pkt, 0, sizeof(*pkt));
/* Inc by 1, Mode ADD */
@@ -6435,7 +6470,7 @@ static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id,
u16 sob_val, u16 mon_id, u32 q_idx)
{
struct hl_cb *cb = (struct hl_cb *) data;
- void *buf = (void *) (uintptr_t) cb->kernel_address;
+ void *buf = cb->kernel_address;
u64 monitor_base, fence_addr = 0;
u32 size = 0;
u16 msg_addr_offset;
@@ -6617,7 +6652,6 @@ static const struct hl_asic_funcs gaudi_funcs = {
.send_cpu_message = gaudi_send_cpu_message,
.get_hw_state = gaudi_get_hw_state,
.pci_bars_map = gaudi_pci_bars_map,
- .set_dram_bar_base = gaudi_set_hbm_bar_base,
.init_iatu = gaudi_init_iatu,
.rreg = hl_rreg,
.wreg = hl_wreg,
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index 82137c3f3e2e..8eb598db81b2 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -35,8 +35,6 @@
#error "Number of MSI interrupts must be smaller or equal to GAUDI_MSI_ENTRIES"
#endif
-#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */
-
#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
#define GAUDI_MAX_CLK_FREQ 2200000000ull /* 2200 MHz */
@@ -44,7 +42,7 @@
#define MAX_POWER_DEFAULT_PCI 200000 /* 200W */
#define MAX_POWER_DEFAULT_PMC 350000 /* 350W */
-#define GAUDI_CPU_TIMEOUT_USEC 15000000 /* 15s */
+#define GAUDI_CPU_TIMEOUT_USEC 30000000 /* 30s */
#define TPC_ENABLED_MASK 0xFF
@@ -86,6 +84,14 @@
#define DMA_CORE_OFFSET (mmDMA1_CORE_BASE - mmDMA0_CORE_BASE)
+#define QMAN_LDMA_SRC_OFFSET (mmDMA0_CORE_SRC_BASE_LO - mmDMA0_CORE_CFG_0)
+#define QMAN_LDMA_DST_OFFSET (mmDMA0_CORE_DST_BASE_LO - mmDMA0_CORE_CFG_0)
+#define QMAN_LDMA_SIZE_OFFSET (mmDMA0_CORE_DST_TSIZE_0 - mmDMA0_CORE_CFG_0)
+
+#define QMAN_CPDMA_SRC_OFFSET (mmDMA0_QM_CQ_PTR_LO_4 - mmDMA0_CORE_CFG_0)
+#define QMAN_CPDMA_DST_OFFSET (mmDMA0_CORE_DST_BASE_LO - mmDMA0_CORE_CFG_0)
+#define QMAN_CPDMA_SIZE_OFFSET (mmDMA0_QM_CQ_TSIZE_4 - mmDMA0_CORE_CFG_0)
+
#define SIF_RTR_CTRL_OFFSET (mmSIF_RTR_CTRL_1_BASE - mmSIF_RTR_CTRL_0_BASE)
#define NIF_RTR_CTRL_OFFSET (mmNIF_RTR_CTRL_1_BASE - mmNIF_RTR_CTRL_0_BASE)
@@ -142,28 +148,28 @@
#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \
VA_HOST_SPACE_START) /* 767TB */
-#define HW_CAP_PLL 0x00000001
-#define HW_CAP_HBM 0x00000002
-#define HW_CAP_MMU 0x00000004
-#define HW_CAP_MME 0x00000008
-#define HW_CAP_CPU 0x00000010
-#define HW_CAP_PCI_DMA 0x00000020
-#define HW_CAP_MSI 0x00000040
-#define HW_CAP_CPU_Q 0x00000080
-#define HW_CAP_HBM_DMA 0x00000100
-#define HW_CAP_CLK_GATE 0x00000200
-#define HW_CAP_SRAM_SCRAMBLER 0x00000400
-#define HW_CAP_HBM_SCRAMBLER 0x00000800
-
-#define HW_CAP_TPC0 0x01000000
-#define HW_CAP_TPC1 0x02000000
-#define HW_CAP_TPC2 0x04000000
-#define HW_CAP_TPC3 0x08000000
-#define HW_CAP_TPC4 0x10000000
-#define HW_CAP_TPC5 0x20000000
-#define HW_CAP_TPC6 0x40000000
-#define HW_CAP_TPC7 0x80000000
-#define HW_CAP_TPC_MASK 0xFF000000
+#define HW_CAP_PLL BIT(0)
+#define HW_CAP_HBM BIT(1)
+#define HW_CAP_MMU BIT(2)
+#define HW_CAP_MME BIT(3)
+#define HW_CAP_CPU BIT(4)
+#define HW_CAP_PCI_DMA BIT(5)
+#define HW_CAP_MSI BIT(6)
+#define HW_CAP_CPU_Q BIT(7)
+#define HW_CAP_HBM_DMA BIT(8)
+#define HW_CAP_CLK_GATE BIT(9)
+#define HW_CAP_SRAM_SCRAMBLER BIT(10)
+#define HW_CAP_HBM_SCRAMBLER BIT(11)
+
+#define HW_CAP_TPC0 BIT(24)
+#define HW_CAP_TPC1 BIT(25)
+#define HW_CAP_TPC2 BIT(26)
+#define HW_CAP_TPC3 BIT(27)
+#define HW_CAP_TPC4 BIT(28)
+#define HW_CAP_TPC5 BIT(29)
+#define HW_CAP_TPC6 BIT(30)
+#define HW_CAP_TPC7 BIT(31)
+#define HW_CAP_TPC_MASK GENMASK(31, 24)
#define HW_CAP_TPC_SHIFT 24
#define GAUDI_CPU_PCI_MSB_ADDR(addr) (((addr) & GENMASK_ULL(49, 39)) >> 39)
@@ -216,7 +222,7 @@ struct gaudi_internal_qman_info {
/**
* struct gaudi_device - ASIC specific manage structure.
- * @armcp_info_get: get information on device from ArmCP
+ * @cpucp_info_get: get information on device from CPU-CP
* @hw_queues_lock: protects the H/W queues from concurrent access.
* @clk_gate_mutex: protects code areas that require clock gating to be disabled
* temporarily
@@ -239,7 +245,7 @@ struct gaudi_internal_qman_info {
* 8-bit value so use u8.
*/
struct gaudi_device {
- int (*armcp_info_get)(struct hl_device *hdev);
+ int (*cpucp_info_get)(struct hl_device *hdev);
/* TODO: remove hw_queues_lock after moving to scheduler code */
spinlock_t hw_queues_lock;
@@ -265,5 +271,6 @@ void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
int gaudi_debug_coresight(struct hl_device *hdev, void *data);
void gaudi_halt_coresight(struct hl_device *hdev);
int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
+void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid);
#endif /* GAUDIP_H_ */
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 881531d4d9da..3d2b0f0f4650 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -623,6 +623,11 @@ static int gaudi_config_etr(struct hl_device *hdev,
return -EINVAL;
}
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER,
+ hdev->compute_ctx->asid);
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER,
+ hdev->compute_ctx->asid);
+
msb = upper_32_bits(input->buffer_address) >> 8;
msb &= PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK;
WREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR, msb);
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index 8d5d6ddee6ed..2d7add0e5bcc 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -487,241 +487,241 @@ static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
pb_addr = (mmMME0_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_CTRL_RESET & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_QM_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_LOG_SHADOW & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_RL_TH & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_RL_MIN & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME0_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -729,236 +729,235 @@ static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 &
PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmMME0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmMME0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmMME0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME1_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME1_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME1_CTRL_RESET & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_QM_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_LOG_SHADOW & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_RL_TH & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_RL_MIN & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
- mask |= 1 << ((mmMME1_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+ mask = 1U << ((mmMME1_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1U << ((mmMME1_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME1_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME1_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME1_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+ mask = 1U << ((mmMME1_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -966,241 +965,241 @@ static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
pb_addr = (mmMME2_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_CTRL_RESET & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_QM_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_LOG_SHADOW & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_RL_TH & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_RL_MIN & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME2_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1208,102 +1207,102 @@ static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1311,134 +1310,133 @@ static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmMME2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmMME2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmMME2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME3_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME3_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmMME3_CTRL_RESET & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_QM_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_LOG_SHADOW & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_RL_TH & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_RL_MIN & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
- mask |= 1 << ((mmMME3_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+ mask = 1U << ((mmMME3_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1U << ((mmMME3_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmMME3_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmMME3_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmMME3_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+ mask = 1U << ((mmMME3_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1486,199 +1484,199 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
pb_addr = (mmDMA0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1687,102 +1685,102 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
@@ -1790,290 +1788,289 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -2082,102 +2079,102 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -2186,290 +2183,289 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA1_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -2478,102 +2474,102 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -2582,290 +2578,289 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -2874,102 +2869,102 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -2978,290 +2973,289 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA3_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -3270,102 +3264,102 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -3374,290 +3368,289 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA4_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -3666,102 +3659,102 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -3770,290 +3763,289 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA5_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -4062,102 +4054,102 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -4166,290 +4158,290 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA6_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ mask = 1U << ((mmDMA6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -4458,102 +4450,102 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -4562,606 +4554,605 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
word_offset =
((mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA7_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_CORE_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_CORE_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_SECURE_PROPS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_CORE_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA0_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_RD_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_RD_ARCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_RD_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_WR_MAX_AWID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_WR_AWCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_WR_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_WR_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_WR_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_CORE_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_STS1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA0_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA0_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA0_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_DBG_DESC_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_DBG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA0_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+ mask = 1U << ((mmDMA0_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA0_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_CORE_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_CORE_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_SECURE_PROPS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_CORE_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA1_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_RD_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_RD_ARCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_RD_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_WR_MAX_AWID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_WR_AWCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_WR_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_WR_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_WR_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_CORE_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_STS1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA1_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA1_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA1_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_DBG_DESC_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_DBG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA1_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+ mask = 1U << ((mmDMA1_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA1_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_CORE_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_CORE_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_SECURE_PROPS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_CORE_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA2_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_RD_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_RD_ARCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_RD_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_WR_MAX_AWID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_WR_AWCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_WR_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_CORE_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_STS1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA2_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA2_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA2_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_DBG_DESC_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_DBG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA2_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+ mask = 1U << ((mmDMA2_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA2_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_CORE_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_CORE_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_SECURE_PROPS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_CORE_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA3_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_RD_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_RD_ARCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_RD_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_WR_MAX_AWID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_WR_AWCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_WR_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_CORE_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_STS1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA3_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA3_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA3_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_DBG_DESC_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_DBG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA3_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+ mask = 1U << ((mmDMA3_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA3_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_CORE_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_CORE_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_SECURE_PROPS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_CORE_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA4_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_RD_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_RD_ARCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_RD_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_WR_MAX_AWID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_WR_AWCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_WR_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_CORE_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_STS1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA4_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA4_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA4_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_DBG_DESC_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_DBG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA4_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+ mask = 1U << ((mmDMA4_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA4_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_CORE_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_CORE_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_SECURE_PROPS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_CORE_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA5_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_RD_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_RD_ARCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_RD_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_WR_MAX_AWID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_WR_AWCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_WR_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_CORE_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_STS1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA5_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA5_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA5_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_DBG_DESC_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_DBG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA5_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+ mask = 1U << ((mmDMA5_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA5_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_CORE_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_CORE_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_SECURE_PROPS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_CORE_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA6_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_RD_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_RD_ARCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_RD_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_WR_MAX_AWID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_WR_AWCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_WR_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_CORE_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_STS1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA6_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA6_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA6_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_DBG_DESC_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_DBG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA6_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+ mask = 1U << ((mmDMA6_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA6_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_CORE_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_CORE_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_SECURE_PROPS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_CORE_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmDMA7_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_RD_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_RD_ARCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_RD_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_WR_MAX_AWID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_WR_AWCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_WR_INFLIGHTS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_ERRMSG_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_CORE_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_STS1 & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_STS1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmDMA7_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmDMA7_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmDMA7_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_DBG_DESC_CNT & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_DBG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
- mask |= 1 << ((mmDMA7_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+ mask = 1U << ((mmDMA7_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1U << ((mmDMA7_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
}
@@ -5185,199 +5176,199 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
pb_addr = (mmTPC0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -5387,102 +5378,102 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
word_offset = ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -5491,150 +5482,149 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
word_offset = ((mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_CFG_ROUND_CSR & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC0_CFG_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_TPC_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_WQ_CREDITS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_ARUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_ARUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_AWUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_AWUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_OPCODE_EXEC & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_CFG_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC0_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC0_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC0_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC0_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -5643,199 +5633,199 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
pb_addr = (mmTPC1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -5843,102 +5833,102 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -5947,150 +5937,149 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
word_offset = ((mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC1_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_CFG_ROUND_CSR & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC1_CFG_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_WQ_CREDITS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_ARUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_ARUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_AWUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_AWUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_OPCODE_EXEC & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_CFG_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC1_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC1_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC1_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -6099,199 +6088,199 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
pb_addr = (mmTPC2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -6299,102 +6288,102 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -6402,150 +6391,149 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_CFG_ROUND_CSR & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC2_CFG_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_WQ_CREDITS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_ARUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_ARUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_AWUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_AWUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_OPCODE_EXEC & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_CFG_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC2_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC2_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC2_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -6554,199 +6542,199 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
pb_addr = (mmTPC3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -6754,102 +6742,102 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -6857,150 +6845,149 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC3_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_CFG_ROUND_CSR & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC3_CFG_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_WQ_CREDITS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_ARUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_ARUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_AWUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_AWUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_OPCODE_EXEC & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_CFG_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC3_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC3_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC3_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -7009,199 +6996,199 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
pb_addr = (mmTPC4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -7209,102 +7196,102 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -7312,150 +7299,149 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC4_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_CFG_ROUND_CSR & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC4_CFG_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_WQ_CREDITS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_ARUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_ARUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_AWUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_AWUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_OPCODE_EXEC & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_CFG_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC4_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC4_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC4_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -7464,199 +7450,199 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
pb_addr = (mmTPC5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -7664,102 +7650,102 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -7767,150 +7753,149 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC5_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_CFG_ROUND_CSR & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC5_CFG_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_WQ_CREDITS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_ARUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_ARUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_AWUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_AWUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_OPCODE_EXEC & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_CFG_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC5_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC5_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC5_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -7919,199 +7904,199 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
pb_addr = (mmTPC6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -8119,102 +8104,102 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -8223,85 +8208,84 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
word_offset = ((mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC6_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -8309,65 +8293,65 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
word_offset = ((mmTPC6_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_CFG_ROUND_CSR & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC6_CFG_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_WQ_CREDITS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_ARUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_ARUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_AWUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_AWUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_OPCODE_EXEC & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_CFG_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC6_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC6_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC6_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -8376,199 +8360,199 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
pb_addr = (mmTPC7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_STS1_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_SIZE_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_SIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_SIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_SIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_PI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_PI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_PI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_PI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CFG0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CFG0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CFG0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CFG0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CFG1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CFG1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CFG1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_CFG1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_STS0_3 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_STS0_3 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_QM_PQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_PQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_STS0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_STS0_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_STS0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_STS0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_STS1_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_STS1_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_STS1_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_STS1_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_0 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_QM_CQ_CTL_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_CTL_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_CTL_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_CTL_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -8578,102 +8562,102 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
word_offset = ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_QM_CP_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_DBG_0_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_DBG_0_1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_DBG_0_1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_QM_CP_DBG_0_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_DBG_0_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_DBG_0_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_QM_ARB_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -8681,150 +8665,149 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
>> 7) << 2;
- mask = 1 << ((mmTPC7_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_QM_ARB_STATE_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MSG_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CGM_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CGM_STS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CGM_CFG1 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CGM_CFG1 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_AXCACHE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_IND_GW_APB_CFG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_CFG_ROUND_CSR & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_CFG_ROUND_CSR & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
- mask = 1 << ((mmTPC7_CFG_PROT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_STATUS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_WQ_CREDITS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_ARUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_ARUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_AWUSER_LO & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_AWUSER_HI & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_OPCODE_EXEC & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_CFG_PROT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_OPCODE_EXEC & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
pb_addr = (mmTPC7_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
<< 2;
- mask = 1 << ((mmTPC7_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_DBGMEM_ADD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_DBGMEM_CTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_DBGMEM_RC & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
- mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+ mask = 1U << ((mmTPC7_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1U << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 33cd2ae653d2..235d47b2420f 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -426,12 +426,14 @@ int goya_get_fixed_properties(struct hl_device *hdev)
prop->dmmu.start_addr = VA_DDR_SPACE_START;
prop->dmmu.end_addr = VA_DDR_SPACE_END;
prop->dmmu.page_size = PAGE_SIZE_2MB;
+ prop->dmmu.num_hops = MMU_ARCH_5_HOPS;
/* shifts and masks are the same in PMMU and DMMU */
memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
prop->pmmu.start_addr = VA_HOST_SPACE_START;
prop->pmmu.end_addr = VA_HOST_SPACE_END;
prop->pmmu.page_size = PAGE_SIZE_4KB;
+ prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
@@ -449,7 +451,7 @@ int goya_get_fixed_properties(struct hl_device *hdev)
prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
- strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
+ strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
prop->max_pending_cs = GOYA_MAX_PENDING_CS;
@@ -598,10 +600,15 @@ static int goya_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
- rc = hl_pci_init(hdev);
+ rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
+ mmCPU_BOOT_ERR0, GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
if (rc)
goto free_queue_props;
+ /* Goya Firmware does not support security */
+ prop->fw_security_disabled = true;
+ dev_info(hdev->dev, "firmware-level security is disabled\n");
+
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
@@ -727,9 +734,9 @@ int goya_late_init(struct hl_device *hdev)
if (rc)
return rc;
- rc = goya_armcp_info_get(hdev);
+ rc = goya_cpucp_info_get(hdev);
if (rc) {
- dev_err(hdev->dev, "Failed to get armcp info %d\n", rc);
+ dev_err(hdev->dev, "Failed to get cpucp info %d\n", rc);
return rc;
}
@@ -739,7 +746,7 @@ int goya_late_init(struct hl_device *hdev)
*/
WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
- rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
+ rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS);
if (rc) {
dev_err(hdev->dev,
"Failed to enable PCI access from CPU %d\n", rc);
@@ -2648,7 +2655,7 @@ int goya_suspend(struct hl_device *hdev)
{
int rc;
- rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+ rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2661,17 +2668,16 @@ int goya_resume(struct hl_device *hdev)
}
static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
- u64 kaddress, phys_addr_t paddress, u32 size)
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int rc;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE;
- rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
- size, vma->vm_page_prot);
+ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
if (rc)
- dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+ dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
return rc;
}
@@ -2876,8 +2882,8 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
cb = job->patched_cb;
- fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
- job->job_cb_size - sizeof(struct packet_msg_prot));
+ fence_pkt = cb->kernel_address +
+ job->job_cb_size - sizeof(struct packet_msg_prot);
tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
(1 << GOYA_PKT_CTL_EB_SHIFT) |
@@ -2946,7 +2952,8 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
&fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
- "Failed to allocate memory for queue testing\n");
+ "Failed to allocate memory for H/W queue %d testing\n",
+ hw_queue_id);
return -ENOMEM;
}
@@ -2957,7 +2964,8 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
GFP_KERNEL, &pkt_dma_addr);
if (!fence_pkt) {
dev_err(hdev->dev,
- "Failed to allocate packet for queue testing\n");
+ "Failed to allocate packet for H/W queue %d testing\n",
+ hw_queue_id);
rc = -ENOMEM;
goto free_fence_ptr;
}
@@ -2974,7 +2982,8 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
pkt_dma_addr);
if (rc) {
dev_err(hdev->dev,
- "Failed to send fence packet\n");
+ "Failed to send fence packet to H/W queue %d\n",
+ hw_queue_id);
goto free_pkt;
}
@@ -3466,8 +3475,7 @@ static int goya_validate_cb(struct hl_device *hdev,
u16 pkt_size;
struct goya_packet *user_pkt;
- user_pkt = (struct goya_packet *) (uintptr_t)
- (parser->user_cb->kernel_address + cb_parsed_length);
+ user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
pkt_id = (enum packet_id) (
(le64_to_cpu(user_pkt->header) &
@@ -3704,11 +3712,9 @@ static int goya_patch_cb(struct hl_device *hdev,
u32 new_pkt_size = 0;
struct goya_packet *user_pkt, *kernel_pkt;
- user_pkt = (struct goya_packet *) (uintptr_t)
- (parser->user_cb->kernel_address + cb_parsed_length);
- kernel_pkt = (struct goya_packet *) (uintptr_t)
- (parser->patched_cb->kernel_address +
- cb_patched_cur_length);
+ user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
+ kernel_pkt = parser->patched_cb->kernel_address +
+ cb_patched_cur_length;
pkt_id = (enum packet_id) (
(le64_to_cpu(user_pkt->header) &
@@ -3806,8 +3812,9 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
parser->patched_cb_size = parser->user_cb_size +
sizeof(struct packet_msg_prot) * 2;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID, false);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ parser->patched_cb_size, false, false,
+ &patched_cb_handle);
if (rc) {
dev_err(hdev->dev,
@@ -3831,8 +3838,8 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
* The check that parser->user_cb_size <= parser->user_cb->size was done
* in validate_queue_index().
*/
- memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
- (void *) (uintptr_t) parser->user_cb->kernel_address,
+ memcpy(parser->patched_cb->kernel_address,
+ parser->user_cb->kernel_address,
parser->user_cb_size);
patched_cb_size = parser->patched_cb_size;
@@ -3879,8 +3886,9 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev,
if (rc)
goto free_userptr;
- rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, parser->patched_cb_size,
- &patched_cb_handle, HL_KERNEL_ASID_ID, false);
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
+ parser->patched_cb_size, false, false,
+ &patched_cb_handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate patched CB for DMA CS %d\n", rc);
@@ -3963,15 +3971,14 @@ int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
return goya_parse_cb_no_mmu(hdev, parser);
}
-void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
+void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
bool eb)
{
struct packet_msg_prot *cq_pkt;
u32 tmp;
- cq_pkt = (struct packet_msg_prot *) (uintptr_t)
- (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
+ cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
(1 << GOYA_PKT_CTL_EB_SHIFT) |
@@ -4497,17 +4504,17 @@ static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
size_t irq_arr_size)
{
- struct armcp_unmask_irq_arr_packet *pkt;
+ struct cpucp_unmask_irq_arr_packet *pkt;
size_t total_pkt_size;
long result;
int rc;
int irq_num_entries, irq_arr_index;
__le32 *goya_irq_arr;
- total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
+ total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
irq_arr_size;
- /* data should be aligned to 8 bytes in order to ArmCP to copy it */
+ /* data should be aligned to 8 bytes in order to CPU-CP to copy it */
total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
/* total_pkt_size is casted to u16 later on */
@@ -4531,8 +4538,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
goya_irq_arr[irq_arr_index] =
cpu_to_le32(irq_arr[irq_arr_index]);
- pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
total_pkt_size, 0, &result);
@@ -4557,14 +4564,14 @@ static int goya_soft_reset_late_init(struct hl_device *hdev)
static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
{
- struct armcp_packet pkt;
+ struct cpucp_packet pkt;
long result;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
- ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(event_type);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -4580,18 +4587,22 @@ static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
{
switch (event_type) {
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
+ hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to power consumption\n");
break;
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
+ hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER;
dev_info_ratelimited(hdev->dev,
"Power envelop is safe, back to optimal clock\n");
break;
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
+ hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to overheating\n");
break;
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
+ hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL;
dev_info_ratelimited(hdev->dev,
"Thermal envelop is safe, back to optimal clock\n");
break;
@@ -4638,7 +4649,8 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
goya_print_irq_info(hdev, event_type, false);
- hl_device_reset(hdev, true, false);
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
break;
case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
@@ -4730,7 +4742,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
if (!cb)
return -ENOMEM;
- lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
+ lin_dma_pkt = cb->kernel_address;
do {
memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
@@ -5096,7 +5108,7 @@ int goya_send_heartbeat(struct hl_device *hdev)
return hl_fw_send_heartbeat(hdev);
}
-int goya_armcp_info_get(struct hl_device *hdev)
+int goya_cpucp_info_get(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -5106,11 +5118,11 @@ int goya_armcp_info_get(struct hl_device *hdev)
if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
return 0;
- rc = hl_fw_armcp_info_get(hdev);
+ rc = hl_fw_cpucp_info_get(hdev);
if (rc)
return rc;
- dram_size = le64_to_cpu(prop->armcp_info.dram_size);
+ dram_size = le64_to_cpu(prop->cpucp_info.dram_size);
if (dram_size) {
if ((!is_power_of_2(dram_size)) ||
(dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
@@ -5124,8 +5136,8 @@ int goya_armcp_info_get(struct hl_device *hdev)
prop->dram_end_address = prop->dram_base_address + dram_size;
}
- if (!strlen(prop->armcp_info.card_name))
- strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
+ if (!strlen(prop->cpucp_info.card_name))
+ strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
CARD_NAME_MAX_LEN);
return 0;
@@ -5141,7 +5153,7 @@ static void goya_disable_clock_gating(struct hl_device *hdev)
/* clock gating not supported in Goya */
}
-static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
+static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask,
struct seq_file *s)
{
const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
@@ -5166,7 +5178,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
is_idle &= is_eng_idle;
if (mask)
- *mask |= !is_eng_idle << (GOYA_ENGINE_ID_DMA_0 + i);
+ *mask |= ((u64) !is_eng_idle) <<
+ (GOYA_ENGINE_ID_DMA_0 + i);
if (s)
seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, dma_core_sts0);
@@ -5189,7 +5202,8 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
is_idle &= is_eng_idle;
if (mask)
- *mask |= !is_eng_idle << (GOYA_ENGINE_ID_TPC_0 + i);
+ *mask |= ((u64) !is_eng_idle) <<
+ (GOYA_ENGINE_ID_TPC_0 + i);
if (s)
seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
@@ -5209,7 +5223,7 @@ static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
is_idle &= is_eng_idle;
if (mask)
- *mask |= !is_eng_idle << GOYA_ENGINE_ID_MME_0;
+ *mask |= ((u64) !is_eng_idle) << GOYA_ENGINE_ID_MME_0;
if (s) {
seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
cmdq_glbl_sts0, mme_arch_sts);
@@ -5369,7 +5383,6 @@ static const struct hl_asic_funcs goya_funcs = {
.send_cpu_message = goya_send_cpu_message,
.get_hw_state = goya_get_hw_state,
.pci_bars_map = goya_pci_bars_map,
- .set_dram_bar_base = goya_set_ddr_bar_base,
.init_iatu = goya_init_iatu,
.rreg = hl_rreg,
.wreg = hl_wreg,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index bb7474ee9784..def86c75e035 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -207,7 +207,7 @@ void goya_set_max_power(struct hl_device *hdev, u64 value);
void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
void goya_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
-int goya_armcp_info_get(struct hl_device *hdev);
+int goya_cpucp_info_get(struct hl_device *hdev);
int goya_debug_coresight(struct hl_device *hdev, void *data);
void goya_halt_coresight(struct hl_device *hdev);
@@ -217,7 +217,7 @@ int goya_resume(struct hl_device *hdev);
void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size);
-void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
+void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
bool eb);
int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser);
diff --git a/drivers/misc/habanalabs/include/common/armcp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 07f9972db28d..2a5c9cb3d505 100644
--- a/drivers/misc/habanalabs/include/common/armcp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2020 HabanaLabs, Ltd.
+ * Copyright 2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
-#ifndef ARMCP_IF_H
-#define ARMCP_IF_H
+#ifndef CPUCP_IF_H
+#define CPUCP_IF_H
#include <linux/types.h>
@@ -50,16 +50,16 @@ enum pq_init_status {
};
/*
- * ArmCP Primary Queue Packets
+ * CpuCP Primary Queue Packets
*
* During normal operation, the host's kernel driver needs to send various
- * messages to ArmCP, usually either to SET some value into a H/W periphery or
+ * messages to CpuCP, usually either to SET some value into a H/W periphery or
* to GET the current value of some H/W periphery. For example, SET the
* frequency of MME/TPC and GET the value of the thermal sensor.
*
* These messages can be initiated either by the User application or by the
* host's driver itself, e.g. power management code. In either case, the
- * communication from the host's driver to ArmCP will *always* be in
+ * communication from the host's driver to CpuCP will *always* be in
* synchronous mode, meaning that the host will send a single message and poll
* until the message was acknowledged and the results are ready (if results are
* needed).
@@ -73,21 +73,20 @@ enum pq_init_status {
*
* The message, inputs/outputs (if relevant) and fence object will be located
* on the device DDR at an address that will be determined by the host's driver.
- * During device initialization phase, the host will pass to ArmCP that address.
+ * During device initialization phase, the host will pass to CpuCP that address.
* Most of the message types will contain inputs/outputs inside the message
* itself. The common part of each message will contain the opcode of the
* message (its type) and a field representing a fence object.
*
- * When the host's driver wishes to send a message to ArmCP, it will write the
- * message contents to the device DDR, clear the fence object and then write the
- * value 484 to the mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR register to issue
- * the 484 interrupt-id to the ARM core.
+ * When the host's driver wishes to send a message to CPU CP, it will write the
+ * message contents to the device DDR, clear the fence object and then write to
+ * the PSOC_ARC1_AUX_SW_INTR, to issue interrupt 121 to ARC Management CPU.
*
- * Upon receiving the 484 interrupt-id, ArmCP will read the message from the
- * DDR. In case the message is a SET operation, ArmCP will first perform the
+ * Upon receiving the interrupt (#121), CpuCP will read the message from the
+ * DDR. In case the message is a SET operation, CpuCP will first perform the
* operation and then write to the fence object on the device DDR. In case the
- * message is a GET operation, ArmCP will first fill the results section on the
- * device DDR and then write to the fence object. If an error occurred, ArmCP
+ * message is a GET operation, CpuCP will first fill the results section on the
+ * device DDR and then write to the fence object. If an error occurred, CpuCP
* will fill the rc field with the right error code.
*
* In the meantime, the host's driver will poll on the fence object. Once the
@@ -96,164 +95,174 @@ enum pq_init_status {
* driver.
*
* To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
- * so the value being put by the host's driver matches the value read by ArmCP
+ * so the value being put by the host's driver matches the value read by CpuCP
*
* Non-QMAN packets should be limited to values 1 through (2^8 - 1)
*
* Detailed description:
*
- * ARMCP_PACKET_DISABLE_PCI_ACCESS -
+ * CPUCP_PACKET_DISABLE_PCI_ACCESS -
* After receiving this packet the embedded CPU must NOT issue PCI
* transactions (read/write) towards the Host CPU. This also include
* sending MSI-X interrupts.
* This packet is usually sent before the device is moved to D3Hot state.
*
- * ARMCP_PACKET_ENABLE_PCI_ACCESS -
+ * CPUCP_PACKET_ENABLE_PCI_ACCESS -
* After receiving this packet the embedded CPU is allowed to issue PCI
* transactions towards the Host CPU, including sending MSI-X interrupts.
* This packet is usually send after the device is moved to D0 state.
*
- * ARMCP_PACKET_TEMPERATURE_GET -
+ * CPUCP_PACKET_TEMPERATURE_GET -
* Fetch the current temperature / Max / Max Hyst / Critical /
* Critical Hyst of a specified thermal sensor. The packet's
* arguments specify the desired sensor and the field to get.
*
- * ARMCP_PACKET_VOLTAGE_GET -
+ * CPUCP_PACKET_VOLTAGE_GET -
* Fetch the voltage / Max / Min of a specified sensor. The packet's
* arguments specify the sensor and type.
*
- * ARMCP_PACKET_CURRENT_GET -
+ * CPUCP_PACKET_CURRENT_GET -
* Fetch the current / Max / Min of a specified sensor. The packet's
* arguments specify the sensor and type.
*
- * ARMCP_PACKET_FAN_SPEED_GET -
+ * CPUCP_PACKET_FAN_SPEED_GET -
* Fetch the speed / Max / Min of a specified fan. The packet's
* arguments specify the sensor and type.
*
- * ARMCP_PACKET_PWM_GET -
+ * CPUCP_PACKET_PWM_GET -
* Fetch the pwm value / mode of a specified pwm. The packet's
* arguments specify the sensor and type.
*
- * ARMCP_PACKET_PWM_SET -
+ * CPUCP_PACKET_PWM_SET -
* Set the pwm value / mode of a specified pwm. The packet's
* arguments specify the sensor, type and value.
*
- * ARMCP_PACKET_FREQUENCY_SET -
+ * CPUCP_PACKET_FREQUENCY_SET -
* Set the frequency of a specified PLL. The packet's arguments specify
* the PLL and the desired frequency. The actual frequency in the device
* might differ from the requested frequency.
*
- * ARMCP_PACKET_FREQUENCY_GET -
+ * CPUCP_PACKET_FREQUENCY_GET -
* Fetch the frequency of a specified PLL. The packet's arguments specify
* the PLL.
*
- * ARMCP_PACKET_LED_SET -
+ * CPUCP_PACKET_LED_SET -
* Set the state of a specified led. The packet's arguments
* specify the led and the desired state.
*
- * ARMCP_PACKET_I2C_WR -
+ * CPUCP_PACKET_I2C_WR -
* Write 32-bit value to I2C device. The packet's arguments specify the
* I2C bus, address and value.
*
- * ARMCP_PACKET_I2C_RD -
+ * CPUCP_PACKET_I2C_RD -
* Read 32-bit value from I2C device. The packet's arguments specify the
* I2C bus and address.
*
- * ARMCP_PACKET_INFO_GET -
+ * CPUCP_PACKET_INFO_GET -
* Fetch information from the device as specified in the packet's
- * structure. The host's driver passes the max size it allows the ArmCP to
+ * structure. The host's driver passes the max size it allows the CpuCP to
* write to the structure, to prevent data corruption in case of
* mismatched driver/FW versions.
*
- * ARMCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
+ * CPUCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
*
- * ARMCP_PACKET_UNMASK_RAZWI_IRQ -
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ -
* Unmask the given IRQ. The IRQ number is specified in the value field.
* The packet is sent after receiving an interrupt and printing its
* relevant information.
*
- * ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY -
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY -
* Unmask the given IRQs. The IRQs numbers are specified in an array right
- * after the armcp_packet structure, where its first element is the array
+ * after the cpucp_packet structure, where its first element is the array
* length. The packet is sent after a soft reset was done in order to
* handle any interrupts that were sent during the reset process.
*
- * ARMCP_PACKET_TEST -
- * Test packet for ArmCP connectivity. The CPU will put the fence value
+ * CPUCP_PACKET_TEST -
+ * Test packet for CpuCP connectivity. The CPU will put the fence value
* in the result field.
*
- * ARMCP_PACKET_FREQUENCY_CURR_GET -
+ * CPUCP_PACKET_FREQUENCY_CURR_GET -
* Fetch the current frequency of a specified PLL. The packet's arguments
* specify the PLL.
*
- * ARMCP_PACKET_MAX_POWER_GET -
+ * CPUCP_PACKET_MAX_POWER_GET -
* Fetch the maximal power of the device.
*
- * ARMCP_PACKET_MAX_POWER_SET -
+ * CPUCP_PACKET_MAX_POWER_SET -
* Set the maximal power of the device. The packet's arguments specify
* the power.
*
- * ARMCP_PACKET_EEPROM_DATA_GET -
- * Get EEPROM data from the ArmCP kernel. The buffer is specified in the
+ * CPUCP_PACKET_EEPROM_DATA_GET -
+ * Get EEPROM data from the CpuCP kernel. The buffer is specified in the
* addr field. The CPU will put the returned data size in the result
* field. In addition, the host's driver passes the max size it allows the
- * ArmCP to write to the structure, to prevent data corruption in case of
+ * CpuCP to write to the structure, to prevent data corruption in case of
* mismatched driver/FW versions.
*
- * ARMCP_PACKET_TEMPERATURE_SET -
+ * CPUCP_PACKET_TEMPERATURE_SET -
* Set the value of the offset property of a specified thermal sensor.
* The packet's arguments specify the desired sensor and the field to
* set.
*
- * ARMCP_PACKET_VOLTAGE_SET -
+ * CPUCP_PACKET_VOLTAGE_SET -
* Trigger the reset_history property of a specified voltage sensor.
* The packet's arguments specify the desired sensor and the field to
* set.
*
- * ARMCP_PACKET_CURRENT_SET -
+ * CPUCP_PACKET_CURRENT_SET -
* Trigger the reset_history property of a specified current sensor.
* The packet's arguments specify the desired sensor and the field to
* set.
+ *
+ * CPUCP_PACKET_PLL_REG_GET
+ * Fetch register of PLL from the required PLL IP.
+ * The packet's arguments specify the PLL IP and the register to get.
+ * Each register is 32-bit value which is returned in result field.
+ *
*/
-enum armcp_packet_id {
- ARMCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */
- ARMCP_PACKET_ENABLE_PCI_ACCESS, /* internal */
- ARMCP_PACKET_TEMPERATURE_GET, /* sysfs */
- ARMCP_PACKET_VOLTAGE_GET, /* sysfs */
- ARMCP_PACKET_CURRENT_GET, /* sysfs */
- ARMCP_PACKET_FAN_SPEED_GET, /* sysfs */
- ARMCP_PACKET_PWM_GET, /* sysfs */
- ARMCP_PACKET_PWM_SET, /* sysfs */
- ARMCP_PACKET_FREQUENCY_SET, /* sysfs */
- ARMCP_PACKET_FREQUENCY_GET, /* sysfs */
- ARMCP_PACKET_LED_SET, /* debugfs */
- ARMCP_PACKET_I2C_WR, /* debugfs */
- ARMCP_PACKET_I2C_RD, /* debugfs */
- ARMCP_PACKET_INFO_GET, /* IOCTL */
- ARMCP_PACKET_FLASH_PROGRAM_REMOVED,
- ARMCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */
- ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */
- ARMCP_PACKET_TEST, /* internal */
- ARMCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */
- ARMCP_PACKET_MAX_POWER_GET, /* sysfs */
- ARMCP_PACKET_MAX_POWER_SET, /* sysfs */
- ARMCP_PACKET_EEPROM_DATA_GET, /* sysfs */
- ARMCP_RESERVED,
- ARMCP_PACKET_TEMPERATURE_SET, /* sysfs */
- ARMCP_PACKET_VOLTAGE_SET, /* sysfs */
- ARMCP_PACKET_CURRENT_SET, /* sysfs */
+enum cpucp_packet_id {
+ CPUCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */
+ CPUCP_PACKET_ENABLE_PCI_ACCESS, /* internal */
+ CPUCP_PACKET_TEMPERATURE_GET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_GET, /* sysfs */
+ CPUCP_PACKET_CURRENT_GET, /* sysfs */
+ CPUCP_PACKET_FAN_SPEED_GET, /* sysfs */
+ CPUCP_PACKET_PWM_GET, /* sysfs */
+ CPUCP_PACKET_PWM_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_GET, /* sysfs */
+ CPUCP_PACKET_LED_SET, /* debugfs */
+ CPUCP_PACKET_I2C_WR, /* debugfs */
+ CPUCP_PACKET_I2C_RD, /* debugfs */
+ CPUCP_PACKET_INFO_GET, /* IOCTL */
+ CPUCP_PACKET_FLASH_PROGRAM_REMOVED,
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */
+ CPUCP_PACKET_TEST, /* internal */
+ CPUCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_SET, /* sysfs */
+ CPUCP_PACKET_EEPROM_DATA_GET, /* sysfs */
+ CPUCP_RESERVED,
+ CPUCP_PACKET_TEMPERATURE_SET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_SET, /* sysfs */
+ CPUCP_PACKET_CURRENT_SET, /* sysfs */
+ CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */
+ CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */
+ CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */
+ CPUCP_PACKET_PLL_REG_GET, /* internal */
};
-#define ARMCP_PACKET_FENCE_VAL 0xFE8CE7A5
+#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
-#define ARMCP_PKT_CTL_RC_SHIFT 12
-#define ARMCP_PKT_CTL_RC_MASK 0x0000F000
+#define CPUCP_PKT_CTL_RC_SHIFT 12
+#define CPUCP_PKT_CTL_RC_MASK 0x0000F000
-#define ARMCP_PKT_CTL_OPCODE_SHIFT 16
-#define ARMCP_PKT_CTL_OPCODE_MASK 0x1FFF0000
+#define CPUCP_PKT_CTL_OPCODE_SHIFT 16
+#define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000
-struct armcp_packet {
+struct cpucp_packet {
union {
__le64 value; /* For SET packets */
__le64 result; /* For GET packets */
@@ -277,71 +286,97 @@ struct armcp_packet {
__u8 pad; /* unused */
};
+ struct {/* For PLL register fetch */
+ __le16 pll_type;
+ __le16 pll_reg;
+ };
+
+ /* For any general request */
+ __le32 index;
+
/* For frequency get/set */
__le32 pll_index;
/* For led set */
__le32 led_index;
- /* For get Armcp info/EEPROM data */
+ /* For get CpuCP info/EEPROM data */
__le32 data_max_size;
};
__le32 reserved;
};
-struct armcp_unmask_irq_arr_packet {
- struct armcp_packet armcp_pkt;
+struct cpucp_unmask_irq_arr_packet {
+ struct cpucp_packet cpucp_pkt;
__le32 length;
__le32 irqs[0];
};
-enum armcp_packet_rc {
- armcp_packet_success,
- armcp_packet_invalid,
- armcp_packet_fault
+enum cpucp_packet_rc {
+ cpucp_packet_success,
+ cpucp_packet_invalid,
+ cpucp_packet_fault
};
/*
- * armcp_temp_type should adhere to hwmon_temp_attributes
+ * cpucp_temp_type should adhere to hwmon_temp_attributes
* defined in Linux kernel hwmon.h file
*/
-enum armcp_temp_type {
- armcp_temp_input,
- armcp_temp_max = 6,
- armcp_temp_max_hyst,
- armcp_temp_crit,
- armcp_temp_crit_hyst,
- armcp_temp_offset = 19,
- armcp_temp_highest = 22,
- armcp_temp_reset_history = 23
+enum cpucp_temp_type {
+ cpucp_temp_input,
+ cpucp_temp_max = 6,
+ cpucp_temp_max_hyst,
+ cpucp_temp_crit,
+ cpucp_temp_crit_hyst,
+ cpucp_temp_offset = 19,
+ cpucp_temp_highest = 22,
+ cpucp_temp_reset_history = 23
+};
+
+enum cpucp_in_attributes {
+ cpucp_in_input,
+ cpucp_in_min,
+ cpucp_in_max,
+ cpucp_in_highest = 7,
+ cpucp_in_reset_history
+};
+
+enum cpucp_curr_attributes {
+ cpucp_curr_input,
+ cpucp_curr_min,
+ cpucp_curr_max,
+ cpucp_curr_highest = 7,
+ cpucp_curr_reset_history
+};
+
+enum cpucp_fan_attributes {
+ cpucp_fan_input,
+ cpucp_fan_min = 2,
+ cpucp_fan_max
};
-enum armcp_in_attributes {
- armcp_in_input,
- armcp_in_min,
- armcp_in_max,
- armcp_in_highest = 7,
- armcp_in_reset_history
+enum cpucp_pwm_attributes {
+ cpucp_pwm_input,
+ cpucp_pwm_enable
};
-enum armcp_curr_attributes {
- armcp_curr_input,
- armcp_curr_min,
- armcp_curr_max,
- armcp_curr_highest = 7,
- armcp_curr_reset_history
+enum cpucp_pcie_throughput_attributes {
+ cpucp_pcie_throughput_tx,
+ cpucp_pcie_throughput_rx
};
-enum armcp_fan_attributes {
- armcp_fan_input,
- armcp_fan_min = 2,
- armcp_fan_max
+enum cpucp_pll_reg_attributes {
+ cpucp_pll_nr_reg,
+ cpucp_pll_nf_reg,
+ cpucp_pll_od_reg,
+ cpucp_pll_div_factor_reg,
+ cpucp_pll_div_sel_reg
};
-enum armcp_pwm_attributes {
- armcp_pwm_input,
- armcp_pwm_enable
+enum cpucp_pll_type_attributes {
+ cpucp_pll_cpu,
+ cpucp_pll_pci,
};
/* Event Queue Packets */
@@ -351,32 +386,32 @@ struct eq_generic_event {
};
/*
- * ArmCP info
+ * CpuCP info
*/
#define CARD_NAME_MAX_LEN 16
#define VERSION_MAX_LEN 128
-#define ARMCP_MAX_SENSORS 128
+#define CPUCP_MAX_SENSORS 128
-struct armcp_sensor {
+struct cpucp_sensor {
__le32 type;
__le32 flags;
};
/**
- * struct armcp_card_types - ASIC card type.
- * @armcp_card_type_pci: PCI card.
- * @armcp_card_type_pmc: PCI Mezzanine Card.
+ * struct cpucp_card_types - ASIC card type.
+ * @cpucp_card_type_pci: PCI card.
+ * @cpucp_card_type_pmc: PCI Mezzanine Card.
*/
-enum armcp_card_types {
- armcp_card_type_pci,
- armcp_card_type_pmc
+enum cpucp_card_types {
+ cpucp_card_type_pci,
+ cpucp_card_type_pmc
};
/**
- * struct armcp_info - Info from ArmCP that is necessary to the host's driver
+ * struct cpucp_info - Info from CpuCP that is necessary to the host's driver
* @sensors: available sensors description.
- * @kernel_version: ArmCP linux kernel version.
+ * @kernel_version: CpuCP linux kernel version.
* @reserved: reserved field.
* @card_type: card configuration type.
* @card_location: in a server, each card has different connections topology
@@ -385,12 +420,12 @@ enum armcp_card_types {
* @infineon_version: Infineon main DC-DC version.
* @fuse_version: silicon production FUSE information.
* @thermal_version: thermald S/W version.
- * @armcp_version: ArmCP S/W version.
+ * @cpucp_version: CpuCP S/W version.
* @dram_size: available DRAM size.
* @card_name: card name that will be displayed in HWMON subsystem on the host
*/
-struct armcp_info {
- struct armcp_sensor sensors[ARMCP_MAX_SENSORS];
+struct cpucp_info {
+ struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
__u8 kernel_version[VERSION_MAX_LEN];
__le32 reserved;
__le32 card_type;
@@ -399,9 +434,10 @@ struct armcp_info {
__le32 infineon_version;
__u8 fuse_version[VERSION_MAX_LEN];
__u8 thermal_version[VERSION_MAX_LEN];
- __u8 armcp_version[VERSION_MAX_LEN];
+ __u8 cpucp_version[VERSION_MAX_LEN];
+ __le32 reserved2;
__le64 dram_size;
char card_name[CARD_NAME_MAX_LEN];
};
-#endif /* ARMCP_IF_H */
+#endif /* CPUCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/common/qman_if.h b/drivers/misc/habanalabs/include/common/qman_if.h
index 0fdb49188ed7..7ed7739575ee 100644
--- a/drivers/misc/habanalabs/include/common/qman_if.h
+++ b/drivers/misc/habanalabs/include/common/qman_if.h
@@ -40,7 +40,7 @@ struct hl_bd {
*/
#define BD_CTL_COMP_OFFSET_SHIFT 16
-#define BD_CTL_COMP_OFFSET_MASK 0x00FF0000
+#define BD_CTL_COMP_OFFSET_MASK 0x0FFF0000
#define BD_CTL_COMP_DATA_SHIFT 0
#define BD_CTL_COMP_DATA_MASK 0x0000FFFF
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi.h b/drivers/misc/habanalabs/include/gaudi/gaudi.h
index 8829891d3eef..f9ea897ae42c 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi.h
@@ -44,6 +44,8 @@
#define MME_NUMBER_OF_MASTER_ENGINES 2
+#define MME_NUMBER_OF_SLAVE_ENGINES 2
+
#define TPC_NUMBER_OF_ENGINES 8
#define DMA_NUMBER_OF_CHANNELS 8
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index 3510c42d24e3..46aed13f16b1 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -12,191 +12,160 @@
/* Useful masks for bits in various registers */
#define PCI_DMA_QMAN_ENABLE (\
- (0xF << DMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
- (0xF << DMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
- (0xF << DMA0_QM_GLBL_CFG0_CP_EN_SHIFT))
+ (FIELD_PREP(DMA0_QM_GLBL_CFG0_PQF_EN_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_CFG0_CQF_EN_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_CFG0_CP_EN_MASK, 0xF)))
#define QMAN_EXTERNAL_MAKE_TRUSTED (\
- (0xF << DMA0_QM_GLBL_PROT_PQF_SHIFT) | \
- (0xF << DMA0_QM_GLBL_PROT_CQF_SHIFT) | \
- (0xF << DMA0_QM_GLBL_PROT_CP_SHIFT) | \
- (0x1 << DMA0_QM_GLBL_PROT_ERR_SHIFT))
+ (FIELD_PREP(DMA0_QM_GLBL_PROT_PQF_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_PROT_CQF_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_PROT_CP_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_PROT_ERR_MASK, 0x1)))
#define QMAN_INTERNAL_MAKE_TRUSTED (\
- (0xF << DMA0_QM_GLBL_PROT_PQF_SHIFT) | \
- (0x1 << DMA0_QM_GLBL_PROT_ERR_SHIFT))
+ (FIELD_PREP(DMA0_QM_GLBL_PROT_PQF_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_PROT_ERR_MASK, 0x1)))
#define HBM_DMA_QMAN_ENABLE (\
- (0xF << DMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
- (0x1F << DMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
- (0x1F << DMA0_QM_GLBL_CFG0_CP_EN_SHIFT))
+ (FIELD_PREP(DMA0_QM_GLBL_CFG0_PQF_EN_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_CFG0_CQF_EN_MASK, 0x1F)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_CFG0_CP_EN_MASK, 0x1F)))
#define QMAN_MME_ENABLE (\
- (0xF << MME0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
- (0x1F << MME0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
- (0x1F << MME0_QM_GLBL_CFG0_CP_EN_SHIFT))
+ (FIELD_PREP(MME0_QM_GLBL_CFG0_PQF_EN_MASK, 0xF)) | \
+ (FIELD_PREP(MME0_QM_GLBL_CFG0_CQF_EN_MASK, 0x1F)) | \
+ (FIELD_PREP(MME0_QM_GLBL_CFG0_CP_EN_MASK, 0x1F)))
#define QMAN_TPC_ENABLE (\
- (0xF << TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
- (0x1F << TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
- (0x1F << TPC0_QM_GLBL_CFG0_CP_EN_SHIFT))
+ (FIELD_PREP(TPC0_QM_GLBL_CFG0_PQF_EN_MASK, 0xF)) | \
+ (FIELD_PREP(TPC0_QM_GLBL_CFG0_CQF_EN_MASK, 0x1F)) | \
+ (FIELD_PREP(TPC0_QM_GLBL_CFG0_CP_EN_MASK, 0x1F)))
#define QMAN_UPPER_CP_CGM_PWR_GATE_EN (\
- (0x20 << DMA0_QM_CGM_CFG_IDLE_TH_SHIFT) | \
- (0xA << DMA0_QM_CGM_CFG_G2F_TH_SHIFT) | \
- (0x10 << DMA0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT) | \
- (1 << DMA0_QM_CGM_CFG_EN_SHIFT))
+ (FIELD_PREP(DMA0_QM_CGM_CFG_IDLE_TH_MASK, 0x20)) | \
+ (FIELD_PREP(DMA0_QM_CGM_CFG_G2F_TH_MASK, 0xA)) | \
+ (FIELD_PREP(DMA0_QM_CGM_CFG_CP_IDLE_MASK_MASK, 0x10)) | \
+ (FIELD_PREP(DMA0_QM_CGM_CFG_EN_MASK, 0x1)))
#define QMAN_COMMON_CP_CGM_PWR_GATE_EN (\
- (0x20 << DMA0_QM_CGM_CFG_IDLE_TH_SHIFT) | \
- (0xA << DMA0_QM_CGM_CFG_G2F_TH_SHIFT) | \
- (0xF << DMA0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT) | \
- (1 << DMA0_QM_CGM_CFG_EN_SHIFT))
+ (FIELD_PREP(DMA0_QM_CGM_CFG_IDLE_TH_MASK, 0x20)) | \
+ (FIELD_PREP(DMA0_QM_CGM_CFG_G2F_TH_MASK, 0xA)) | \
+ (FIELD_PREP(DMA0_QM_CGM_CFG_CP_IDLE_MASK_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_CGM_CFG_EN_MASK, 0x1)))
#define PCI_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
- (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
- (0xF << DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
- (0xF << DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK, 0xF)))
#define PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
- (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
- (0xF << DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
- (0xF << DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0xF)))
#define HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
- (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
- (0x1F << DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
- (0x1F << DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK, 0x1F)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK, 0x1F)))
#define HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
- (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
- (0x1F << DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
- (0x1F << DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \
+ (FIELD_PREP(DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F)))
#define TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
- (0xF << TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
- (0x1F << TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
- (0x1F << TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+ (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \
+ (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK, 0x1F)) | \
+ (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK, 0x1F)))
#define TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
- (0xF << TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
- (0x1F << TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
- (0x1F << TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+ (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \
+ (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \
+ (FIELD_PREP(TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F)))
#define MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
- (0xF << MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
- (0x1F << MME0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
- (0x1F << MME0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \
+ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK, 0x1F)) | \
+ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK, 0x1F)))
#define MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
- (0xF << MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
- (0x1F << MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
- (0x1F << MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \
+ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \
+ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F)))
-#define QMAN_CGM1_PWR_GATE_EN (0xA << DMA0_QM_CGM_CFG1_MASK_TH_SHIFT)
+#define QMAN_CGM1_PWR_GATE_EN (FIELD_PREP(DMA0_QM_CGM_CFG1_MASK_TH_MASK, 0xA))
/* RESET registers configuration */
-#define CFG_RST_L_PSOC_SHIFT 0
-#define CFG_RST_L_PCIE_SHIFT 1
-#define CFG_RST_L_PCIE_IF_SHIFT 2
-#define CFG_RST_L_HBM_S_PLL_SHIFT 3
-#define CFG_RST_L_TPC_S_PLL_SHIFT 4
-#define CFG_RST_L_MME_S_PLL_SHIFT 5
-#define CFG_RST_L_CPU_PLL_SHIFT 6
-#define CFG_RST_L_PCIE_PLL_SHIFT 7
-#define CFG_RST_L_NIC_S_PLL_SHIFT 8
-#define CFG_RST_L_HBM_N_PLL_SHIFT 9
-#define CFG_RST_L_TPC_N_PLL_SHIFT 10
-#define CFG_RST_L_MME_N_PLL_SHIFT 11
-#define CFG_RST_L_NIC_N_PLL_SHIFT 12
-#define CFG_RST_L_DMA_W_PLL_SHIFT 13
-#define CFG_RST_L_SIF_W_PLL_SHIFT 14
-#define CFG_RST_L_MESH_W_PLL_SHIFT 15
-#define CFG_RST_L_SRAM_W_PLL_SHIFT 16
-#define CFG_RST_L_DMA_E_PLL_SHIFT 17
-#define CFG_RST_L_SIF_E_PLL_SHIFT 18
-#define CFG_RST_L_MESH_E_PLL_SHIFT 19
-#define CFG_RST_L_SRAM_E_PLL_SHIFT 20
-#define CFG_RST_L_IF_1_SHIFT 21
-#define CFG_RST_L_IF_0_SHIFT 22
-#define CFG_RST_L_IF_2_SHIFT 23
-#define CFG_RST_L_IF_3_SHIFT 24
-#define CFG_RST_L_TPC_0_SHIFT 25
-#define CFG_RST_L_TPC_1_SHIFT 26
-#define CFG_RST_L_TPC_2_SHIFT 27
-#define CFG_RST_L_TPC_3_SHIFT 28
-#define CFG_RST_L_TPC_4_SHIFT 29
-#define CFG_RST_L_TPC_5_SHIFT 30
-#define CFG_RST_L_TPC_6_SHIFT 31
-#define CFG_RST_H_TPC_7_SHIFT 0
-#define CFG_RST_H_MME_0_SHIFT 1
-#define CFG_RST_H_MME_1_SHIFT 2
-#define CFG_RST_H_MME_2_SHIFT 3
-#define CFG_RST_H_MME_3_SHIFT 4
-#define CFG_RST_H_HBM_0_SHIFT 5
-#define CFG_RST_H_HBM_1_SHIFT 6
-#define CFG_RST_H_HBM_2_SHIFT 7
-#define CFG_RST_H_HBM_3_SHIFT 8
-#define CFG_RST_H_NIC_0_SHIFT 9
-#define CFG_RST_H_NIC_1_SHIFT 10
-#define CFG_RST_H_NIC_2_SHIFT 11
-#define CFG_RST_H_NIC_3_SHIFT 12
-#define CFG_RST_H_NIC_4_SHIFT 13
-#define CFG_RST_H_SM_0_SHIFT 14
-#define CFG_RST_H_SM_1_SHIFT 15
-#define CFG_RST_H_SM_2_SHIFT 16
-#define CFG_RST_H_SM_3_SHIFT 17
-#define CFG_RST_H_DMA_0_SHIFT 18
-#define CFG_RST_H_DMA_1_SHIFT 19
-#define CFG_RST_H_CPU_SHIFT 20
-#define CFG_RST_H_MMU_SHIFT 21
-
-
-#define CFG_RST_H_DMA_MASK ((1 << CFG_RST_H_DMA_0_SHIFT) | \
- (1 << CFG_RST_H_DMA_1_SHIFT))
-
-#define CFG_RST_H_CPU_MASK (1 << CFG_RST_H_CPU_SHIFT)
-#define CFG_RST_H_MMU_MASK (1 << CFG_RST_H_MMU_SHIFT)
-
-#define CFG_RST_H_HBM_MASK ((1 << CFG_RST_H_HBM_0_SHIFT) | \
- (1 << CFG_RST_H_HBM_1_SHIFT) | \
- (1 << CFG_RST_H_HBM_2_SHIFT) | \
- (1 << CFG_RST_H_HBM_3_SHIFT))
-
-#define CFG_RST_H_NIC_MASK ((1 << CFG_RST_H_NIC_0_SHIFT) | \
- (1 << CFG_RST_H_NIC_1_SHIFT) | \
- (1 << CFG_RST_H_NIC_2_SHIFT) | \
- (1 << CFG_RST_H_NIC_3_SHIFT) | \
- (1 << CFG_RST_H_NIC_4_SHIFT))
-
-#define CFG_RST_H_SM_MASK ((1 << CFG_RST_H_SM_0_SHIFT) | \
- (1 << CFG_RST_H_SM_1_SHIFT) | \
- (1 << CFG_RST_H_SM_2_SHIFT) | \
- (1 << CFG_RST_H_SM_3_SHIFT))
-
-#define CFG_RST_H_MME_MASK ((1 << CFG_RST_H_MME_0_SHIFT) | \
- (1 << CFG_RST_H_MME_1_SHIFT) | \
- (1 << CFG_RST_H_MME_2_SHIFT) | \
- (1 << CFG_RST_H_MME_3_SHIFT))
-
-#define CFG_RST_L_PSOC_MASK (1 << CFG_RST_L_PSOC_SHIFT)
-
-#define CFG_RST_L_IF_MASK ((1 << CFG_RST_L_IF_0_SHIFT) | \
- (1 << CFG_RST_L_IF_1_SHIFT) | \
- (1 << CFG_RST_L_IF_2_SHIFT) | \
- (1 << CFG_RST_L_IF_3_SHIFT))
-
-#define CFG_RST_L_TPC_MASK ((1 << CFG_RST_L_TPC_0_SHIFT) | \
- (1 << CFG_RST_L_TPC_1_SHIFT) | \
- (1 << CFG_RST_L_TPC_2_SHIFT) | \
- (1 << CFG_RST_L_TPC_3_SHIFT) | \
- (1 << CFG_RST_L_TPC_4_SHIFT) | \
- (1 << CFG_RST_L_TPC_5_SHIFT) | \
- (1 << CFG_RST_L_TPC_6_SHIFT))
-
-#define CFG_RST_H_TPC_MASK (1 << CFG_RST_H_TPC_7_SHIFT)
-
-#define CA53_RESET (1 << CFG_RST_H_CPU_SHIFT)
+#define CFG_RST_L_PSOC_MASK BIT_MASK(0)
+#define CFG_RST_L_PCIE_MASK BIT_MASK(1)
+#define CFG_RST_L_PCIE_IF_MASK BIT_MASK(2)
+#define CFG_RST_L_HBM_S_PLL_MASK BIT_MASK(3)
+#define CFG_RST_L_TPC_S_PLL_MASK BIT_MASK(4)
+#define CFG_RST_L_MME_S_PLL_MASK BIT_MASK(5)
+#define CFG_RST_L_CPU_PLL_MASK BIT_MASK(6)
+#define CFG_RST_L_PCIE_PLL_MASK BIT_MASK(7)
+#define CFG_RST_L_NIC_S_PLL_MASK BIT_MASK(8)
+#define CFG_RST_L_HBM_N_PLL_MASK BIT_MASK(9)
+#define CFG_RST_L_TPC_N_PLL_MASK BIT_MASK(10)
+#define CFG_RST_L_MME_N_PLL_MASK BIT_MASK(11)
+#define CFG_RST_L_NIC_N_PLL_MASK BIT_MASK(12)
+#define CFG_RST_L_DMA_W_PLL_MASK BIT_MASK(13)
+#define CFG_RST_L_SIF_W_PLL_MASK BIT_MASK(14)
+#define CFG_RST_L_MESH_W_PLL_MASK BIT_MASK(15)
+#define CFG_RST_L_SRAM_W_PLL_MASK BIT_MASK(16)
+#define CFG_RST_L_DMA_E_PLL_MASK BIT_MASK(17)
+#define CFG_RST_L_SIF_E_PLL_MASK BIT_MASK(18)
+#define CFG_RST_L_MESH_E_PLL_MASK BIT_MASK(19)
+#define CFG_RST_L_SRAM_E_PLL_MASK BIT_MASK(20)
+
+#define CFG_RST_L_IF_1_MASK BIT_MASK(21)
+#define CFG_RST_L_IF_0_MASK BIT_MASK(22)
+#define CFG_RST_L_IF_2_MASK BIT_MASK(23)
+#define CFG_RST_L_IF_3_MASK BIT_MASK(24)
+#define CFG_RST_L_IF_MASK GENMASK(24, 21)
+
+#define CFG_RST_L_TPC_0_MASK BIT_MASK(25)
+#define CFG_RST_L_TPC_1_MASK BIT_MASK(26)
+#define CFG_RST_L_TPC_2_MASK BIT_MASK(27)
+#define CFG_RST_L_TPC_3_MASK BIT_MASK(28)
+#define CFG_RST_L_TPC_4_MASK BIT_MASK(29)
+#define CFG_RST_L_TPC_5_MASK BIT_MASK(30)
+#define CFG_RST_L_TPC_6_MASK BIT_MASK(31)
+#define CFG_RST_L_TPC_MASK GENMASK(31, 25)
+
+#define CFG_RST_H_TPC_7_MASK BIT_MASK(0)
+
+#define CFG_RST_H_MME_0_MASK BIT_MASK(1)
+#define CFG_RST_H_MME_1_MASK BIT_MASK(2)
+#define CFG_RST_H_MME_2_MASK BIT_MASK(3)
+#define CFG_RST_H_MME_3_MASK BIT_MASK(4)
+#define CFG_RST_H_MME_MASK GENMASK(4, 1)
+
+#define CFG_RST_H_HBM_0_MASK BIT_MASK(5)
+#define CFG_RST_H_HBM_1_MASK BIT_MASK(6)
+#define CFG_RST_H_HBM_2_MASK BIT_MASK(7)
+#define CFG_RST_H_HBM_3_MASK BIT_MASK(8)
+#define CFG_RST_H_HBM_MASK GENMASK(8, 5)
+
+#define CFG_RST_H_NIC_0_MASK BIT_MASK(9)
+#define CFG_RST_H_NIC_1_MASK BIT_MASK(10)
+#define CFG_RST_H_NIC_2_MASK BIT_MASK(11)
+#define CFG_RST_H_NIC_3_MASK BIT_MASK(12)
+#define CFG_RST_H_NIC_4_MASK BIT_MASK(13)
+#define CFG_RST_H_NIC_MASK GENMASK(13, 9)
+
+#define CFG_RST_H_SM_0_MASK BIT_MASK(14)
+#define CFG_RST_H_SM_1_MASK BIT_MASK(15)
+#define CFG_RST_H_SM_2_MASK BIT_MASK(16)
+#define CFG_RST_H_SM_3_MASK BIT_MASK(17)
+#define CFG_RST_H_SM_MASK GENMASK(17, 14)
+
+#define CFG_RST_H_DMA_0_MASK BIT_MASK(18)
+#define CFG_RST_H_DMA_1_MASK BIT_MASK(19)
+#define CFG_RST_H_DMA_MASK GENMASK(19, 18)
+
+#define CFG_RST_H_CPU_MASK BIT_MASK(20)
+#define CFG_RST_H_MMU_MASK BIT_MASK(21)
#define UNIT_RST_L_PSOC_SHIFT 0
#define UNIT_RST_L_PCIE_SHIFT 1
@@ -452,7 +421,6 @@ enum axi_id {
#define QM_ARB_ERR_MSG_EN_MASK (\
QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK |\
- QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK |\
QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK)
#define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK 0x1
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
index f25c60a2c243..977fb341a6e7 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
@@ -12,6 +12,7 @@
* PSOC scratch-pad registers
*/
#define mmHW_STATE mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
+#define mmFUSE_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_22
#define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23
#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
index 0195f62d7254..e56124265a05 100644
--- a/drivers/misc/habanalabs/include/goya/goya_reg_map.h
+++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
@@ -22,6 +22,7 @@
#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
+#define mmFUSE_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_22
#define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23
#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index 468bb045fbd1..dedf20e8f956 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -29,6 +29,8 @@
#define HOP3_SHIFT 21
#define HOP4_SHIFT 12
+#define MMU_ARCH_5_HOPS 5
+
#define HOP_PHYS_ADDR_MASK (~FLAGS_MASK)
#define HL_PTE_SIZE sizeof(u64)
diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c
new file mode 100644
index 000000000000..cc93569e601c
--- /dev/null
+++ b/drivers/misc/hisi_hikey_usb.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for usb functionality of Hikey series boards
+ * based on Hisilicon Kirin Soc.
+ *
+ * Copyright (C) 2017-2018 Hilisicon Electronics Co., Ltd.
+ * http://www.huawei.com
+ *
+ * Authors: Yu Chen <chenyu56@huawei.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/role.h>
+
+#define DEVICE_DRIVER_NAME "hisi_hikey_usb"
+
+#define HUB_VBUS_POWER_ON 1
+#define HUB_VBUS_POWER_OFF 0
+#define USB_SWITCH_TO_HUB 1
+#define USB_SWITCH_TO_TYPEC 0
+#define TYPEC_VBUS_POWER_ON 1
+#define TYPEC_VBUS_POWER_OFF 0
+
+struct hisi_hikey_usb {
+ struct device *dev;
+ struct gpio_desc *otg_switch;
+ struct gpio_desc *typec_vbus;
+ struct gpio_desc *hub_vbus;
+ struct gpio_desc *reset;
+
+ struct regulator *regulator;
+
+ struct usb_role_switch *hub_role_sw;
+
+ struct usb_role_switch *dev_role_sw;
+ enum usb_role role;
+
+ struct mutex lock;
+ struct work_struct work;
+
+ struct notifier_block nb;
+};
+
+static void hub_power_ctrl(struct hisi_hikey_usb *hisi_hikey_usb, int value)
+{
+ int ret, status;
+
+ if (hisi_hikey_usb->hub_vbus)
+ gpiod_set_value_cansleep(hisi_hikey_usb->hub_vbus, value);
+
+ if (!hisi_hikey_usb->regulator)
+ return;
+
+ status = regulator_is_enabled(hisi_hikey_usb->regulator);
+ if (status == !!value)
+ return;
+
+ if (value)
+ ret = regulator_enable(hisi_hikey_usb->regulator);
+ else
+ ret = regulator_disable(hisi_hikey_usb->regulator);
+
+ if (ret)
+ dev_err(hisi_hikey_usb->dev,
+ "Can't switch regulator state to %s\n",
+ value ? "enabled" : "disabled");
+}
+
+static void usb_switch_ctrl(struct hisi_hikey_usb *hisi_hikey_usb,
+ int switch_to)
+{
+ if (!hisi_hikey_usb->otg_switch)
+ return;
+
+ gpiod_set_value_cansleep(hisi_hikey_usb->otg_switch, switch_to);
+}
+
+static void usb_typec_power_ctrl(struct hisi_hikey_usb *hisi_hikey_usb,
+ int value)
+{
+ if (!hisi_hikey_usb->typec_vbus)
+ return;
+
+ gpiod_set_value_cansleep(hisi_hikey_usb->typec_vbus, value);
+}
+
+static void relay_set_role_switch(struct work_struct *work)
+{
+ struct hisi_hikey_usb *hisi_hikey_usb = container_of(work,
+ struct hisi_hikey_usb,
+ work);
+ struct usb_role_switch *sw;
+ enum usb_role role;
+
+ if (!hisi_hikey_usb || !hisi_hikey_usb->dev_role_sw)
+ return;
+
+ mutex_lock(&hisi_hikey_usb->lock);
+ switch (hisi_hikey_usb->role) {
+ case USB_ROLE_NONE:
+ usb_typec_power_ctrl(hisi_hikey_usb, TYPEC_VBUS_POWER_OFF);
+ usb_switch_ctrl(hisi_hikey_usb, USB_SWITCH_TO_HUB);
+ hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_ON);
+ break;
+ case USB_ROLE_HOST:
+ hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF);
+ usb_switch_ctrl(hisi_hikey_usb, USB_SWITCH_TO_TYPEC);
+ usb_typec_power_ctrl(hisi_hikey_usb, TYPEC_VBUS_POWER_ON);
+ break;
+ case USB_ROLE_DEVICE:
+ hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF);
+ usb_typec_power_ctrl(hisi_hikey_usb, TYPEC_VBUS_POWER_OFF);
+ usb_switch_ctrl(hisi_hikey_usb, USB_SWITCH_TO_TYPEC);
+ break;
+ default:
+ break;
+ }
+ sw = hisi_hikey_usb->dev_role_sw;
+ role = hisi_hikey_usb->role;
+ mutex_unlock(&hisi_hikey_usb->lock);
+
+ usb_role_switch_set_role(sw, role);
+}
+
+static int hub_usb_role_switch_set(struct usb_role_switch *sw, enum usb_role role)
+{
+ struct hisi_hikey_usb *hisi_hikey_usb = usb_role_switch_get_drvdata(sw);
+
+ if (!hisi_hikey_usb || !hisi_hikey_usb->dev_role_sw)
+ return -EINVAL;
+
+ mutex_lock(&hisi_hikey_usb->lock);
+ hisi_hikey_usb->role = role;
+ mutex_unlock(&hisi_hikey_usb->lock);
+
+ schedule_work(&hisi_hikey_usb->work);
+
+ return 0;
+}
+
+static int hisi_hikey_usb_parse_kirin970(struct platform_device *pdev,
+ struct hisi_hikey_usb *hisi_hikey_usb)
+{
+ struct regulator *regulator;
+
+ regulator = devm_regulator_get(&pdev->dev, "hub-vdd");
+ if (IS_ERR(regulator)) {
+ if (PTR_ERR(regulator) == -EPROBE_DEFER) {
+ dev_info(&pdev->dev,
+ "waiting for hub-vdd-supply to be probed\n");
+ return PTR_ERR(regulator);
+ }
+ dev_err(&pdev->dev,
+ "get hub-vdd-supply failed with error %ld\n",
+ PTR_ERR(regulator));
+ return PTR_ERR(regulator);
+ }
+ hisi_hikey_usb->regulator = regulator;
+
+ hisi_hikey_usb->reset = devm_gpiod_get(&pdev->dev, "hub_reset_en_gpio",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(hisi_hikey_usb->reset))
+ return PTR_ERR(hisi_hikey_usb->reset);
+
+ return 0;
+}
+
+static int hisi_hikey_usb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_hikey_usb *hisi_hikey_usb;
+ struct usb_role_switch_desc hub_role_switch = {NULL};
+ int ret;
+
+ hisi_hikey_usb = devm_kzalloc(dev, sizeof(*hisi_hikey_usb), GFP_KERNEL);
+ if (!hisi_hikey_usb)
+ return -ENOMEM;
+
+ hisi_hikey_usb->dev = &pdev->dev;
+
+ hisi_hikey_usb->otg_switch = devm_gpiod_get(dev, "otg-switch",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(hisi_hikey_usb->otg_switch))
+ return PTR_ERR(hisi_hikey_usb->otg_switch);
+
+ hisi_hikey_usb->typec_vbus = devm_gpiod_get(dev, "typec-vbus",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(hisi_hikey_usb->typec_vbus))
+ return PTR_ERR(hisi_hikey_usb->typec_vbus);
+
+ /* Parse Kirin 970-specific OF data */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "hisilicon,kirin970_hikey_usbhub")) {
+ ret = hisi_hikey_usb_parse_kirin970(pdev, hisi_hikey_usb);
+ if (ret)
+ return ret;
+ } else {
+ /* hub-vdd33-en is optional */
+ hisi_hikey_usb->hub_vbus = devm_gpiod_get_optional(dev, "hub-vdd33-en",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(hisi_hikey_usb->hub_vbus))
+ return PTR_ERR(hisi_hikey_usb->hub_vbus);
+ }
+
+ hisi_hikey_usb->dev_role_sw = usb_role_switch_get(dev);
+ if (!hisi_hikey_usb->dev_role_sw)
+ return -EPROBE_DEFER;
+ if (IS_ERR(hisi_hikey_usb->dev_role_sw))
+ return PTR_ERR(hisi_hikey_usb->dev_role_sw);
+
+ INIT_WORK(&hisi_hikey_usb->work, relay_set_role_switch);
+ mutex_init(&hisi_hikey_usb->lock);
+
+ hub_role_switch.fwnode = dev_fwnode(dev);
+ hub_role_switch.set = hub_usb_role_switch_set;
+ hub_role_switch.driver_data = hisi_hikey_usb;
+
+ hisi_hikey_usb->hub_role_sw = usb_role_switch_register(dev,
+ &hub_role_switch);
+
+ if (IS_ERR(hisi_hikey_usb->hub_role_sw)) {
+ usb_role_switch_put(hisi_hikey_usb->dev_role_sw);
+ return PTR_ERR(hisi_hikey_usb->hub_role_sw);
+ }
+
+ platform_set_drvdata(pdev, hisi_hikey_usb);
+
+ return 0;
+}
+
+static int hisi_hikey_usb_remove(struct platform_device *pdev)
+{
+ struct hisi_hikey_usb *hisi_hikey_usb = platform_get_drvdata(pdev);
+
+ if (hisi_hikey_usb->hub_role_sw)
+ usb_role_switch_unregister(hisi_hikey_usb->hub_role_sw);
+
+ if (hisi_hikey_usb->dev_role_sw)
+ usb_role_switch_put(hisi_hikey_usb->dev_role_sw);
+
+ return 0;
+}
+
+static const struct of_device_id id_table_hisi_hikey_usb[] = {
+ { .compatible = "hisilicon,gpio_hubv1" },
+ { .compatible = "hisilicon,kirin970_hikey_usbhub" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, id_table_hisi_hikey_usb);
+
+static struct platform_driver hisi_hikey_usb_driver = {
+ .probe = hisi_hikey_usb_probe,
+ .remove = hisi_hikey_usb_remove,
+ .driver = {
+ .name = DEVICE_DRIVER_NAME,
+ .of_match_table = id_table_hisi_hikey_usb,
+ },
+};
+
+module_platform_driver(hisi_hikey_usb_driver);
+
+MODULE_AUTHOR("Yu Chen <chenyu56@huawei.com>");
+MODULE_DESCRIPTION("Driver Support for USB functionality of Hikey");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index d5d2af4d10e6..945701bce553 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -33,16 +33,16 @@
* You can also specify optional tests:
* N## = Go to sleep with interrupts of for ## seconds
* to test the HW NMI watchdog
- * F## = Break at do_fork for ## iterations
+ * F## = Break at kernel_clone for ## iterations
* S## = Break at sys_open for ## iterations
* I## = Run the single step test ## iterations
*
- * NOTE: that the do_fork and sys_open tests are mutually exclusive.
+ * NOTE: that the kernel_clone and sys_open tests are mutually exclusive.
*
* To invoke the kgdb test suite from boot you use a kernel start
* argument as follows:
* kgdbts=V1 kgdbwait
- * Or if you wanted to perform the NMI test for 6 seconds and do_fork
+ * Or if you wanted to perform the NMI test for 6 seconds and kernel_clone
* test for 100 forks, you could use:
* kgdbts=V1N6F100 kgdbwait
*
@@ -74,7 +74,7 @@
* echo kgdbts=V1S10000 > /sys/module/kgdbts/parameters/kgdbts
* fg # and hit control-c
* fg # and hit control-c
- * ## This tests break points on do_fork
+ * ## This tests break points on kernel_clone
* while [ 1 ] ; do date > /dev/null ; done &
* while [ 1 ] ; do date > /dev/null ; done &
* echo kgdbts=V1F1000 > /sys/module/kgdbts/parameters/kgdbts
@@ -209,8 +209,8 @@ static unsigned long lookup_addr(char *arg)
addr = (unsigned long)kgdbts_break_test;
else if (!strcmp(arg, "sys_open"))
addr = (unsigned long)do_sys_open;
- else if (!strcmp(arg, "do_fork"))
- addr = (unsigned long)_do_fork;
+ else if (!strcmp(arg, "kernel_clone"))
+ addr = (unsigned long)kernel_clone;
else if (!strcmp(arg, "hw_break_val"))
addr = (unsigned long)&hw_break_val;
addr = (unsigned long) dereference_function_descriptor((void *)addr);
@@ -310,7 +310,7 @@ static int check_and_rewind_pc(char *put_str, char *arg)
if (arch_needs_sstep_emulation && sstep_addr &&
ip + offset == sstep_addr &&
- ((!strcmp(arg, "sys_open") || !strcmp(arg, "do_fork")))) {
+ ((!strcmp(arg, "sys_open") || !strcmp(arg, "kernel_clone")))) {
/* This is special case for emulated single step */
v2printk("Emul: rewind hit single step bp\n");
restart_from_top_after_write = 1;
@@ -596,19 +596,19 @@ static struct test_struct singlestep_break_test[] = {
};
/*
- * Test for hitting a breakpoint at do_fork for what ever the number
+ * Test for hitting a breakpoint at kernel_clone for what ever the number
* of iterations required by the variable repeat_test.
*/
-static struct test_struct do_fork_test[] = {
+static struct test_struct do_kernel_clone_test[] = {
{ "?", "S0*" }, /* Clear break points */
- { "do_fork", "OK", sw_break, }, /* set sw breakpoint */
+ { "kernel_clone", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
- { "do_fork", "OK", sw_rem_break }, /*remove breakpoint */
- { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
+ { "kernel_clone", "OK", sw_rem_break }, /*remove breakpoint */
+ { "g", "kernel_clone", NULL, check_and_rewind_pc }, /* check location */
{ "write", "OK", write_regs, emul_reset }, /* Write registers */
{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
- { "g", "do_fork", NULL, check_single_step },
- { "do_fork", "OK", sw_break, }, /* set sw breakpoint */
+ { "g", "kernel_clone", NULL, check_single_step },
+ { "kernel_clone", "OK", sw_break, }, /* set sw breakpoint */
{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
{ "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
{ "", "", get_cont_catch, put_cont_catch },
@@ -935,11 +935,11 @@ static void run_bad_read_test(void)
kgdb_breakpoint();
}
-static void run_do_fork_test(void)
+static void run_kernel_clone_test(void)
{
init_simple_test();
- ts.tst = do_fork_test;
- ts.name = "do_fork_test";
+ ts.tst = do_kernel_clone_test;
+ ts.name = "do_kernel_clone_test";
/* Activate test with initial breakpoint */
kgdb_breakpoint();
}
@@ -967,7 +967,7 @@ static void run_singlestep_break_test(void)
static void kgdbts_run_tests(void)
{
char *ptr;
- int fork_test = 0;
+ int clone_test = 0;
int do_sys_open_test = 0;
int sstep_test = 1000;
int nmi_sleep = 0;
@@ -981,7 +981,7 @@ static void kgdbts_run_tests(void)
ptr = strchr(config, 'F');
if (ptr)
- fork_test = simple_strtol(ptr + 1, NULL, 10);
+ clone_test = simple_strtol(ptr + 1, NULL, 10);
ptr = strchr(config, 'S');
if (ptr)
do_sys_open_test = simple_strtol(ptr + 1, NULL, 10);
@@ -1025,16 +1025,16 @@ static void kgdbts_run_tests(void)
run_nmi_sleep_test(nmi_sleep);
}
- /* If the do_fork test is run it will be the last test that is
+ /* If the kernel_clone test is run it will be the last test that is
* executed because a kernel thread will be spawned at the very
* end to unregister the debug hooks.
*/
- if (fork_test) {
- repeat_test = fork_test;
- printk(KERN_INFO "kgdbts:RUN do_fork for %i breakpoints\n",
+ if (clone_test) {
+ repeat_test = clone_test;
+ printk(KERN_INFO "kgdbts:RUN kernel_clone for %i breakpoints\n",
repeat_test);
kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg");
- run_do_fork_test();
+ run_kernel_clone_test();
return;
}
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 4dfbfd51bdf7..a0675d4154d2 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -312,16 +312,6 @@ void lkdtm_CORRUPT_LIST_DEL(void)
pr_err("list_del() corruption not detected!\n");
}
-/* Test if unbalanced set_fs(KERNEL_DS)/set_fs(USER_DS) check exists. */
-void lkdtm_CORRUPT_USER_DS(void)
-{
- pr_info("setting bad task size limit\n");
- set_fs(KERNEL_DS);
-
- /* Make sure we do not keep running with a KERNEL_DS! */
- force_sig(SIGKILL);
-}
-
/* Test that VMAP_STACK is actually allocating with a leading guard page */
void lkdtm_STACK_GUARD_PAGE_LEADING(void)
{
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index a5e344df9166..97803f213d9d 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -112,7 +112,6 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(CORRUPT_STACK_STRONG),
CRASHTYPE(CORRUPT_LIST_ADD),
CRASHTYPE(CORRUPT_LIST_DEL),
- CRASHTYPE(CORRUPT_USER_DS),
CRASHTYPE(STACK_GUARD_PAGE_LEADING),
CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
CRASHTYPE(UNSET_SMEP),
@@ -172,7 +171,6 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
CRASHTYPE(USERCOPY_STACK_BEYOND),
CRASHTYPE(USERCOPY_KERNEL),
- CRASHTYPE(USERCOPY_KERNEL_DS),
CRASHTYPE(STACKLEAK_ERASING),
CRASHTYPE(CFI_FORWARD_PROTO),
#ifdef CONFIG_X86_32
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 8878538b2c13..6dec4c9b442f 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -27,7 +27,6 @@ void lkdtm_OVERFLOW_UNSIGNED(void);
void lkdtm_ARRAY_BOUNDS(void);
void lkdtm_CORRUPT_LIST_ADD(void);
void lkdtm_CORRUPT_LIST_DEL(void);
-void lkdtm_CORRUPT_USER_DS(void);
void lkdtm_STACK_GUARD_PAGE_LEADING(void);
void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
void lkdtm_UNSET_SMEP(void);
@@ -96,7 +95,6 @@ void lkdtm_USERCOPY_STACK_FRAME_TO(void);
void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
void lkdtm_USERCOPY_STACK_BEYOND(void);
void lkdtm_USERCOPY_KERNEL(void);
-void lkdtm_USERCOPY_KERNEL_DS(void);
/* lkdtm_stackleak.c */
void lkdtm_STACKLEAK_ERASING(void);
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index b833367a45d0..109e8d4302c1 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -325,21 +325,6 @@ free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
-void lkdtm_USERCOPY_KERNEL_DS(void)
-{
- char __user *user_ptr =
- (char __user *)(0xFUL << (sizeof(unsigned long) * 8 - 4));
- mm_segment_t old_fs = get_fs();
- char buf[10] = {0};
-
- pr_info("attempting copy_to_user() to noncanonical address: %px\n",
- user_ptr);
- set_fs(KERNEL_DS);
- if (copy_to_user(user_ptr, buf, sizeof(buf)) == 0)
- pr_err("copy_to_user() to noncanonical address succeeded!?\n");
- set_fs(old_fs);
-}
-
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index f5fd5b786607..c06581ffa7bd 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -46,4 +46,14 @@ config INTEL_MEI_TXE
Supported SoCs:
Intel Bay Trail
+config INTEL_MEI_VIRTIO
+ tristate "Intel MEI interface emulation with virtio framework"
+ select INTEL_MEI
+ depends on X86 && PCI && VIRTIO_PCI
+ help
+ This module implements mei hw emulation over virtio transport.
+ The module will be called mei_virtio.
+ Enable this if your virtual machine supports virtual mei
+ device over virtio.
+
source "drivers/misc/mei/hdcp/Kconfig"
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index f1c76f7ee804..52aefaab5c1b 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -22,6 +22,9 @@ obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
mei-txe-objs := pci-txe.o
mei-txe-objs += hw-txe.o
+obj-$(CONFIG_INTEL_MEI_VIRTIO) += mei-virtio.o
+mei-virtio-objs := hw-virtio.o
+
mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
CFLAGS_mei-trace.o = -I$(src)
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 07ba16d46690..4e30fa98fe7d 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -463,6 +463,17 @@ out:
dev_dbg(bus->dev, "end of fixup match = %d\n", cldev->do_match);
}
+/**
+ * vt_support - enable on bus clients with vtag support
+ *
+ * @cldev: me clients device
+ */
+static void vt_support(struct mei_cl_device *cldev)
+{
+ if (cldev->me_cl->props.vt_supported == 1)
+ cldev->do_match = 1;
+}
+
#define MEI_FIXUP(_uuid, _hook) { _uuid, _hook }
static struct mei_fixup {
@@ -476,6 +487,7 @@ static struct mei_fixup {
MEI_FIXUP(MEI_UUID_WD, mei_wd),
MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
MEI_FIXUP(MEI_UUID_HDCP, whitelist),
+ MEI_FIXUP(MEI_UUID_ANY, vt_support),
};
/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index a6dfc3ce1db2..9cdaa7f3af23 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -152,7 +152,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
if (timeout) {
rets = wait_event_interruptible_timeout
(cl->rx_wait,
- (!list_empty(&cl->rd_completed)) ||
+ mei_cl_read_cb(cl, NULL) ||
(!mei_cl_is_connected(cl)),
msecs_to_jiffies(timeout));
if (rets == 0)
@@ -165,7 +165,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
} else {
if (wait_event_interruptible
(cl->rx_wait,
- (!list_empty(&cl->rd_completed)) ||
+ mei_cl_read_cb(cl, NULL) ||
(!mei_cl_is_connected(cl)))) {
if (signal_pending(current))
return -EINTR;
@@ -198,7 +198,7 @@ copy:
rets = r_length;
free:
- mei_io_cb_free(cb);
+ mei_cl_del_rd_completed(cl, cb);
out:
mutex_unlock(&bus->device_lock);
@@ -496,6 +496,68 @@ static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
}
/**
+ * mei_cl_bus_vtag - get bus vtag entry wrapper
+ * The tag for bus client is always first.
+ *
+ * @cl: host client
+ *
+ * Return: bus vtag or NULL
+ */
+static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
+{
+ return list_first_entry_or_null(&cl->vtag_map,
+ struct mei_cl_vtag, list);
+}
+
+/**
+ * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
+ *
+ * @cldev: me client device
+ *
+ * Return:
+ * * 0 on success
+ * * -ENOMEM if memory allocation failed
+ */
+static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
+{
+ struct mei_cl *cl = cldev->cl;
+ struct mei_cl_vtag *cl_vtag;
+
+ /*
+ * Bail out if the client does not supports vtags
+ * or has already allocated one
+ */
+ if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
+ return 0;
+
+ cl_vtag = mei_cl_vtag_alloc(NULL, 0);
+ if (IS_ERR(cl_vtag))
+ return -ENOMEM;
+
+ list_add_tail(&cl_vtag->list, &cl->vtag_map);
+
+ return 0;
+}
+
+/**
+ * mei_cl_bus_vtag_free - remove the bus entry from vtag map
+ *
+ * @cldev: me client device
+ */
+static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
+{
+ struct mei_cl *cl = cldev->cl;
+ struct mei_cl_vtag *cl_vtag;
+
+ cl_vtag = mei_cl_bus_vtag(cl);
+ if (!cl_vtag)
+ return;
+
+ list_del(&cl_vtag->list);
+ kfree(cl_vtag);
+}
+
+/**
* mei_cldev_enable - enable me client device
* create connection with me client
*
@@ -531,9 +593,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
goto out;
}
+ ret = mei_cl_bus_vtag_alloc(cldev);
+ if (ret)
+ goto out;
+
ret = mei_cl_connect(cl, cldev->me_cl, NULL);
- if (ret < 0)
+ if (ret < 0) {
dev_err(&cldev->dev, "cannot connect\n");
+ mei_cl_bus_vtag_free(cldev);
+ }
out:
mutex_unlock(&bus->device_lock);
@@ -586,6 +654,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
mutex_lock(&bus->device_lock);
+ mei_cl_bus_vtag_free(cldev);
+
if (!mei_cl_is_connected(cl)) {
dev_dbg(bus->dev, "Already disconnected\n");
err = 0;
@@ -810,6 +880,16 @@ static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
}
static DEVICE_ATTR_RO(fixed);
+static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ bool vt = mei_me_cl_vt(cldev->me_cl);
+
+ return sprintf(buf, "%d", vt);
+}
+static DEVICE_ATTR_RO(vtag);
+
static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
char *buf)
{
@@ -827,6 +907,7 @@ static struct attribute *mei_cldev_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_max_conn.attr,
&dev_attr_fixed.attr,
+ &dev_attr_vtag.attr,
&dev_attr_max_len.attr,
NULL,
};
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 2572887d99b6..d5c3f7d54634 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -355,6 +355,27 @@ static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
}
/**
+ * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * @cl: mei client
+ * @fp: pointer to file structure
+ */
+static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
+ const struct file *fp)
+{
+ struct mei_cl_vtag *cl_vtag;
+
+ list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
+ if (cl_vtag->fp == fp) {
+ cl_vtag->pending_read = true;
+ return;
+ }
+ }
+}
+
+/**
* mei_io_cb_init - allocate and initialize io callback
*
* @cl: mei client
@@ -378,6 +399,8 @@ static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
cb->cl = cl;
cb->buf_idx = 0;
cb->fop_type = type;
+ cb->vtag = 0;
+
return cb;
}
@@ -406,14 +429,16 @@ static void mei_io_list_flush_cl(struct list_head *head,
*
* @head: An instance of our list structure
* @cl: host client
+ * @fp: file pointer (matching cb file object), may be NULL
*/
static void mei_io_tx_list_free_cl(struct list_head *head,
- const struct mei_cl *cl)
+ const struct mei_cl *cl,
+ const struct file *fp)
{
struct mei_cl_cb *cb, *next;
list_for_each_entry_safe(cb, next, head, list) {
- if (cl == cb->cl)
+ if (cl == cb->cl && (!fp || fp == cb->fp))
mei_tx_cb_dequeue(cb);
}
}
@@ -434,6 +459,19 @@ static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
}
/**
+ * mei_cl_free_pending - free pending cb
+ *
+ * @cl: host client
+ */
+static void mei_cl_free_pending(struct mei_cl *cl)
+{
+ struct mei_cl_cb *cb;
+
+ cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
+ mei_io_cb_free(cb);
+}
+
+/**
* mei_cl_alloc_cb - a convenient wrapper for allocating read cb
*
* @cl: host client
@@ -505,15 +543,19 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
*
* Return: cb on success, NULL if cb is not found
*/
-struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
+struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
{
struct mei_cl_cb *cb;
+ struct mei_cl_cb *ret_cb = NULL;
+ spin_lock(&cl->rd_completed_lock);
list_for_each_entry(cb, &cl->rd_completed, list)
- if (!fp || fp == cb->fp)
- return cb;
-
- return NULL;
+ if (!fp || fp == cb->fp) {
+ ret_cb = cb;
+ break;
+ }
+ spin_unlock(&cl->rd_completed_lock);
+ return ret_cb;
}
/**
@@ -534,12 +576,17 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
dev = cl->dev;
cl_dbg(dev, cl, "remove list entry belonging to cl\n");
- mei_io_tx_list_free_cl(&cl->dev->write_list, cl);
- mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
- mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
- mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
- mei_io_list_free_fp(&cl->rd_pending, fp);
+ mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
+ mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
+ /* free pending and control cb only in final flush */
+ if (!fp) {
+ mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
+ mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
+ mei_cl_free_pending(cl);
+ }
+ spin_lock(&cl->rd_completed_lock);
mei_io_list_free_fp(&cl->rd_completed, fp);
+ spin_unlock(&cl->rd_completed_lock);
return 0;
}
@@ -557,6 +604,8 @@ static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
init_waitqueue_head(&cl->rx_wait);
init_waitqueue_head(&cl->tx_wait);
init_waitqueue_head(&cl->ev_wait);
+ INIT_LIST_HEAD(&cl->vtag_map);
+ spin_lock_init(&cl->rd_completed_lock);
INIT_LIST_HEAD(&cl->rd_completed);
INIT_LIST_HEAD(&cl->rd_pending);
INIT_LIST_HEAD(&cl->link);
@@ -752,8 +801,8 @@ static void mei_cl_set_disconnected(struct mei_cl *cl)
return;
cl->state = MEI_FILE_DISCONNECTED;
- mei_io_tx_list_free_cl(&dev->write_list, cl);
- mei_io_tx_list_free_cl(&dev->write_waiting_list, cl);
+ mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
+ mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
mei_cl_wake_all(cl);
@@ -1229,6 +1278,157 @@ static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
}
/**
+ * mei_cl_vtag_alloc - allocate and fill the vtag structure
+ *
+ * @fp: pointer to file structure
+ * @vtag: vm tag
+ *
+ * Return:
+ * * Pointer to allocated struct - on success
+ * * ERR_PTR(-ENOMEM) on memory allocation failure
+ */
+struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
+{
+ struct mei_cl_vtag *cl_vtag;
+
+ cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
+ if (!cl_vtag)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&cl_vtag->list);
+ cl_vtag->vtag = vtag;
+ cl_vtag->fp = fp;
+
+ return cl_vtag;
+}
+
+/**
+ * mei_cl_fp_by_vtag - obtain the file pointer by vtag
+ *
+ * @cl: host client
+ * @vtag: vm tag
+ *
+ * Return:
+ * * A file pointer - on success
+ * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
+ */
+const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
+{
+ struct mei_cl_vtag *vtag_l;
+
+ list_for_each_entry(vtag_l, &cl->vtag_map, list)
+ if (vtag_l->vtag == vtag)
+ return vtag_l->fp;
+
+ return ERR_PTR(-ENOENT);
+}
+
+/**
+ * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
+ *
+ * @cl: host client
+ * @vtag: vm tag
+ */
+static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
+{
+ struct mei_cl_vtag *vtag_l;
+
+ list_for_each_entry(vtag_l, &cl->vtag_map, list) {
+ if (vtag_l->vtag == vtag) {
+ vtag_l->pending_read = false;
+ break;
+ }
+ }
+}
+
+/**
+ * mei_cl_read_vtag_add_fc - add flow control for next pending reader
+ * in the vtag list
+ *
+ * @cl: host client
+ */
+static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
+{
+ struct mei_cl_vtag *cl_vtag;
+
+ list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
+ if (cl_vtag->pending_read) {
+ if (mei_cl_enqueue_ctrl_wr_cb(cl,
+ mei_cl_mtu(cl),
+ MEI_FOP_READ,
+ cl_vtag->fp))
+ cl->rx_flow_ctrl_creds++;
+ break;
+ }
+ }
+}
+
+/**
+ * mei_cl_vt_support_check - check if client support vtags
+ *
+ * @cl: host client
+ *
+ * Return:
+ * * 0 - supported, or not connected at all
+ * * -EOPNOTSUPP - vtags are not supported by client
+ */
+int mei_cl_vt_support_check(const struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+
+ if (!dev->hbm_f_vt_supported)
+ return -EOPNOTSUPP;
+
+ if (!cl->me_cl)
+ return 0;
+
+ return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
+}
+
+/**
+ * mei_cl_add_rd_completed - add read completed callback to list with lock
+ * and vtag check
+ *
+ * @cl: host client
+ * @cb: callback block
+ *
+ */
+void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
+{
+ const struct file *fp;
+
+ if (!mei_cl_vt_support_check(cl)) {
+ fp = mei_cl_fp_by_vtag(cl, cb->vtag);
+ if (IS_ERR(fp)) {
+ /* client already disconnected, discarding */
+ mei_io_cb_free(cb);
+ return;
+ }
+ cb->fp = fp;
+ mei_cl_reset_read_by_vtag(cl, cb->vtag);
+ mei_cl_read_vtag_add_fc(cl);
+ }
+
+ spin_lock(&cl->rd_completed_lock);
+ list_add_tail(&cb->list, &cl->rd_completed);
+ spin_unlock(&cl->rd_completed_lock);
+}
+
+/**
+ * mei_cl_del_rd_completed - free read completed callback with lock
+ *
+ * @cl: host client
+ * @cb: callback block
+ *
+ */
+void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
+{
+ spin_lock(&cl->rd_completed_lock);
+ mei_io_cb_free(cb);
+ spin_unlock(&cl->rd_completed_lock);
+}
+
+/**
* mei_cl_notify_fop2req - convert fop to proper request
*
* @fop: client notification start response command
@@ -1483,13 +1683,17 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
return 0;
/* HW currently supports only one pending read */
- if (cl->rx_flow_ctrl_creds)
+ if (cl->rx_flow_ctrl_creds) {
+ mei_cl_set_read_by_fp(cl, fp);
return -EBUSY;
+ }
cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
if (!cb)
return -ENOMEM;
+ mei_cl_set_read_by_fp(cl, fp);
+
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
@@ -1518,21 +1722,67 @@ nortpm:
return rets;
}
+static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag)
+{
+ ext->type = MEI_EXT_HDR_VTAG;
+ ext->ext_payload[0] = vtag;
+ ext->length = mei_data2slots(sizeof(*ext));
+ return ext->length;
+}
+
/**
- * mei_msg_hdr_init - initialize mei message header
+ * mei_msg_hdr_init - allocate and initialize mei message header
*
- * @mei_hdr: mei message header
* @cb: message callback structure
+ *
+ * Return: a pointer to initialized header
*/
-static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb)
+static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
{
+ size_t hdr_len;
+ struct mei_ext_meta_hdr *meta;
+ struct mei_ext_hdr *ext;
+ struct mei_msg_hdr *mei_hdr;
+ bool is_ext, is_vtag;
+
+ if (!cb)
+ return ERR_PTR(-EINVAL);
+
+ /* Extended header for vtag is attached only on the first fragment */
+ is_vtag = (cb->vtag && cb->buf_idx == 0);
+ is_ext = is_vtag;
+
+ /* Compute extended header size */
+ hdr_len = sizeof(*mei_hdr);
+
+ if (!is_ext)
+ goto setup_hdr;
+
+ hdr_len += sizeof(*meta);
+ if (is_vtag)
+ hdr_len += sizeof(*ext);
+
+setup_hdr:
+ mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
+ if (!mei_hdr)
+ return ERR_PTR(-ENOMEM);
+
mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
mei_hdr->me_addr = mei_cl_me_id(cb->cl);
- mei_hdr->length = 0;
- mei_hdr->reserved = 0;
- mei_hdr->msg_complete = 0;
- mei_hdr->dma_ring = 0;
mei_hdr->internal = cb->internal;
+ mei_hdr->extended = is_ext;
+
+ if (!is_ext)
+ goto out;
+
+ meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
+ if (is_vtag) {
+ meta->count++;
+ meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag);
+ }
+out:
+ mei_hdr->length = hdr_len - sizeof(*mei_hdr);
+ return mei_hdr;
}
/**
@@ -1550,10 +1800,11 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
{
struct mei_device *dev;
struct mei_msg_data *buf;
- struct mei_msg_hdr mei_hdr;
- size_t hdr_len = sizeof(mei_hdr);
- size_t len;
+ struct mei_msg_hdr *mei_hdr = NULL;
+ size_t hdr_len;
size_t hbuf_len, dr_len;
+ size_t buf_len;
+ size_t data_len;
int hbuf_slots;
u32 dr_slots;
u32 dma_len;
@@ -1579,7 +1830,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
- len = buf->size - cb->buf_idx;
+ buf_len = buf->size - cb->buf_idx;
data = buf->data + cb->buf_idx;
hbuf_slots = mei_hbuf_empty_slots(dev);
if (hbuf_slots < 0) {
@@ -1591,42 +1842,54 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
- mei_msg_hdr_init(&mei_hdr, cb);
+ mei_hdr = mei_msg_hdr_init(cb);
+ if (IS_ERR(mei_hdr)) {
+ rets = PTR_ERR(mei_hdr);
+ mei_hdr = NULL;
+ goto err;
+ }
+
+ cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
+ mei_hdr->extended, cb->vtag);
+
+ hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
/**
* Split the message only if we can write the whole host buffer
* otherwise wait for next time the host buffer is empty.
*/
- if (len + hdr_len <= hbuf_len) {
- mei_hdr.length = len;
- mei_hdr.msg_complete = 1;
+ if (hdr_len + buf_len <= hbuf_len) {
+ data_len = buf_len;
+ mei_hdr->msg_complete = 1;
} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
- mei_hdr.dma_ring = 1;
- if (len > dr_len)
- len = dr_len;
+ mei_hdr->dma_ring = 1;
+ if (buf_len > dr_len)
+ buf_len = dr_len;
else
- mei_hdr.msg_complete = 1;
+ mei_hdr->msg_complete = 1;
- mei_hdr.length = sizeof(dma_len);
- dma_len = len;
+ data_len = sizeof(dma_len);
+ dma_len = buf_len;
data = &dma_len;
} else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
- len = hbuf_len - hdr_len;
- mei_hdr.length = len;
+ buf_len = hbuf_len - hdr_len;
+ data_len = buf_len;
} else {
+ kfree(mei_hdr);
return 0;
}
+ mei_hdr->length += data_len;
- if (mei_hdr.dma_ring)
- mei_dma_ring_write(dev, buf->data + cb->buf_idx, len);
+ if (mei_hdr->dma_ring)
+ mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
+ rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
- rets = mei_write_message(dev, &mei_hdr, hdr_len, data, mei_hdr.length);
if (rets)
goto err;
cl->status = 0;
cl->writing_state = MEI_WRITING;
- cb->buf_idx += len;
+ cb->buf_idx += buf_len;
if (first_chunk) {
if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
@@ -1635,12 +1898,14 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
}
}
- if (mei_hdr.msg_complete)
+ if (mei_hdr->msg_complete)
list_move_tail(&cb->list, &dev->write_waiting_list);
+ kfree(mei_hdr);
return 0;
err:
+ kfree(mei_hdr);
cl->status = rets;
list_move_tail(&cb->list, cmpl_list);
return rets;
@@ -1659,9 +1924,11 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
{
struct mei_device *dev;
struct mei_msg_data *buf;
- struct mei_msg_hdr mei_hdr;
- size_t hdr_len = sizeof(mei_hdr);
- size_t len, hbuf_len, dr_len;
+ struct mei_msg_hdr *mei_hdr = NULL;
+ size_t hdr_len;
+ size_t hbuf_len, dr_len;
+ size_t buf_len;
+ size_t data_len;
int hbuf_slots;
u32 dr_slots;
u32 dma_len;
@@ -1678,9 +1945,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
dev = cl->dev;
buf = &cb->buf;
- len = buf->size;
+ buf_len = buf->size;
- cl_dbg(dev, cl, "len=%zd\n", len);
+ cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
blocking = cb->blocking;
data = buf->data;
@@ -1700,17 +1967,27 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
if (rets < 0)
goto err;
- mei_msg_hdr_init(&mei_hdr, cb);
+ mei_hdr = mei_msg_hdr_init(cb);
+ if (IS_ERR(mei_hdr)) {
+ rets = -PTR_ERR(mei_hdr);
+ mei_hdr = NULL;
+ goto err;
+ }
+
+ cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
+ mei_hdr->extended, cb->vtag);
+
+ hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
if (rets == 0) {
cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
- rets = len;
+ rets = buf_len;
goto out;
}
if (!mei_hbuf_acquire(dev)) {
cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
- rets = len;
+ rets = buf_len;
goto out;
}
@@ -1724,29 +2001,30 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
dr_slots = mei_dma_ring_empty_slots(dev);
dr_len = mei_slots2data(dr_slots);
- if (len + hdr_len <= hbuf_len) {
- mei_hdr.length = len;
- mei_hdr.msg_complete = 1;
+ if (hdr_len + buf_len <= hbuf_len) {
+ data_len = buf_len;
+ mei_hdr->msg_complete = 1;
} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
- mei_hdr.dma_ring = 1;
- if (len > dr_len)
- len = dr_len;
+ mei_hdr->dma_ring = 1;
+ if (buf_len > dr_len)
+ buf_len = dr_len;
else
- mei_hdr.msg_complete = 1;
+ mei_hdr->msg_complete = 1;
- mei_hdr.length = sizeof(dma_len);
- dma_len = len;
+ data_len = sizeof(dma_len);
+ dma_len = buf_len;
data = &dma_len;
} else {
- len = hbuf_len - hdr_len;
- mei_hdr.length = len;
+ buf_len = hbuf_len - hdr_len;
+ data_len = buf_len;
}
- if (mei_hdr.dma_ring)
- mei_dma_ring_write(dev, buf->data, len);
+ mei_hdr->length += data_len;
+
+ if (mei_hdr->dma_ring)
+ mei_dma_ring_write(dev, buf->data, buf_len);
+ rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
- rets = mei_write_message(dev, &mei_hdr, hdr_len,
- data, mei_hdr.length);
if (rets)
goto err;
@@ -1755,12 +2033,12 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
goto err;
cl->writing_state = MEI_WRITING;
- cb->buf_idx = len;
+ cb->buf_idx = buf_len;
/* restore return value */
- len = buf->size;
+ buf_len = buf->size;
out:
- if (mei_hdr.msg_complete)
+ if (mei_hdr->msg_complete)
mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
else
mei_tx_cb_enqueue(cb, &dev->write_list);
@@ -1785,7 +2063,7 @@ out:
}
}
- rets = len;
+ rets = buf_len;
err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
@@ -1793,10 +2071,11 @@ err:
free:
mei_io_cb_free(cb);
+ kfree(mei_hdr);
+
return rets;
}
-
/**
* mei_cl_complete - processes completed operation for a client
*
@@ -1820,7 +2099,7 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
break;
case MEI_FOP_READ:
- list_add_tail(&cb->list, &cl->rd_completed);
+ mei_cl_add_rd_completed(cl, cb);
if (!mei_cl_is_fixed_address(cl) &&
!WARN_ON(!cl->rx_flow_ctrl_creds))
cl->rx_flow_ctrl_creds--;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 2f8954def591..9e08a9843bba 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -94,6 +94,18 @@ static inline u8 mei_me_cl_fixed(const struct mei_me_client *me_cl)
}
/**
+ * mei_me_cl_vt - return me client vtag supported status
+ *
+ * @me_cl: me client
+ *
+ * Return: true if me client supports vt tagging
+ */
+static inline bool mei_me_cl_vt(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.vt_supported == 1;
+}
+
+/**
* mei_me_cl_max_len - return me client max msg length
*
* @me_cl: me client
@@ -121,8 +133,11 @@ int mei_cl_unlink(struct mei_cl *cl);
struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev);
-struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
- const struct file *fp);
+struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp);
+
+void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb);
+void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb);
+
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
enum mei_cb_file_ops type,
const struct file *fp);
@@ -131,6 +146,9 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
const struct file *fp);
int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
+struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag);
+const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag);
+int mei_cl_vt_support_check(const struct mei_cl *cl);
/*
* MEI input output function prototype
*/
@@ -164,11 +182,11 @@ static inline u8 mei_cl_me_id(const struct mei_cl *cl)
*
* @cl: host client
*
- * Return: mtu
+ * Return: mtu or 0 if client is not connected
*/
static inline size_t mei_cl_mtu(const struct mei_cl *cl)
{
- return cl->me_cl->props.max_msg_length;
+ return cl->me_cl ? cl->me_cl->props.max_msg_length : 0;
}
/**
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index a26c716c61a1..3ab1a431d810 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -27,7 +27,7 @@ static int mei_dbgfs_meclients_show(struct seq_file *m, void *unused)
down_read(&dev->me_clients_rwsem);
- seq_puts(m, " |id|fix| UUID |con|msg len|sb|refc|\n");
+ seq_puts(m, " |id|fix| UUID |con|msg len|sb|refc|vt|\n");
/* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED)
@@ -37,14 +37,15 @@ static int mei_dbgfs_meclients_show(struct seq_file *m, void *unused)
if (!mei_me_cl_get(me_cl))
continue;
- seq_printf(m, "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|\n",
+ seq_printf(m, "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|%4d|%2d|\n",
i++, me_cl->client_id,
me_cl->props.fixed_address,
&me_cl->props.protocol_name,
me_cl->props.max_number_of_connections,
me_cl->props.max_msg_length,
me_cl->props.single_recv_buf,
- kref_read(&me_cl->refcnt));
+ kref_read(&me_cl->refcnt),
+ me_cl->props.vt_supported);
mei_me_cl_put(me_cl);
}
@@ -103,6 +104,8 @@ static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused)
seq_printf(m, "\tFA: %01d\n", dev->hbm_f_fa_supported);
seq_printf(m, "\tOS: %01d\n", dev->hbm_f_os_supported);
seq_printf(m, "\tDR: %01d\n", dev->hbm_f_dr_supported);
+ seq_printf(m, "\tVT: %01d\n", dev->hbm_f_vt_supported);
+ seq_printf(m, "\tCAP: %01d\n", dev->hbm_f_cap_supported);
}
seq_printf(m, "pg: %s, %s\n",
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 308caee86920..a97eb5d47705 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -125,19 +125,15 @@ void mei_hbm_reset(struct mei_device *dev)
/**
* mei_hbm_hdr - construct hbm header
*
- * @hdr: hbm header
+ * @mei_hdr: hbm header
* @length: payload length
*/
-static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
+static inline void mei_hbm_hdr(struct mei_msg_hdr *mei_hdr, size_t length)
{
- hdr->host_addr = 0;
- hdr->me_addr = 0;
- hdr->length = length;
- hdr->msg_complete = 1;
- hdr->dma_ring = 0;
- hdr->reserved = 0;
- hdr->internal = 0;
+ memset(mei_hdr, 0, sizeof(*mei_hdr));
+ mei_hdr->length = length;
+ mei_hdr->msg_complete = 1;
}
/**
@@ -326,6 +322,39 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
}
/**
+ * mei_hbm_capabilities_req - request capabilities
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success and < 0 on failure
+ */
+static int mei_hbm_capabilities_req(struct mei_device *dev)
+{
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_capability_request req;
+ int ret;
+
+ mei_hbm_hdr(&mei_hdr, sizeof(req));
+
+ memset(&req, 0, sizeof(req));
+ req.hbm_cmd = MEI_HBM_CAPABILITIES_REQ_CMD;
+ if (dev->hbm_f_vt_supported)
+ req.capability_requested[0] = HBM_CAP_VT;
+
+ ret = mei_hbm_write_message(dev, &mei_hdr, &req);
+ if (ret) {
+ dev_err(dev->dev,
+ "capabilities request write failed: ret = %d.\n", ret);
+ return ret;
+ }
+
+ dev->hbm_state = MEI_HBM_CAP_SETUP;
+ dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ mei_schedule_stall_timer(dev);
+ return 0;
+}
+
+/**
* mei_hbm_enum_clients_req - sends enumeration client request message.
*
* @dev: the device structure
@@ -1042,6 +1071,20 @@ static void mei_hbm_config_features(struct mei_device *dev)
(dev->version.major_version == HBM_MAJOR_VERSION_DR &&
dev->version.minor_version >= HBM_MINOR_VERSION_DR))
dev->hbm_f_dr_supported = 1;
+
+ /* VTag Support */
+ dev->hbm_f_vt_supported = 0;
+ if (dev->version.major_version > HBM_MAJOR_VERSION_VT ||
+ (dev->version.major_version == HBM_MAJOR_VERSION_VT &&
+ dev->version.minor_version >= HBM_MINOR_VERSION_VT))
+ dev->hbm_f_vt_supported = 1;
+
+ /* Capability message Support */
+ dev->hbm_f_cap_supported = 0;
+ if (dev->version.major_version > HBM_MAJOR_VERSION_CAP ||
+ (dev->version.major_version == HBM_MAJOR_VERSION_CAP &&
+ dev->version.minor_version >= HBM_MINOR_VERSION_CAP))
+ dev->hbm_f_cap_supported = 1;
}
/**
@@ -1075,6 +1118,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
struct hbm_host_enum_response *enum_res;
struct hbm_dma_setup_response *dma_setup_res;
struct hbm_add_client_request *add_cl_req;
+ struct hbm_capability_response *capability_res;
int ret;
struct mei_hbm_cl_cmd *cl_cmd;
@@ -1138,6 +1182,13 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
return -EPROTO;
}
+ if (dev->hbm_f_cap_supported) {
+ if (mei_hbm_capabilities_req(dev))
+ return -EIO;
+ wake_up(&dev->wait_hbm_start);
+ break;
+ }
+
if (dev->hbm_f_dr_supported) {
if (mei_dmam_ring_alloc(dev))
dev_info(dev->dev, "running w/o dma ring\n");
@@ -1159,6 +1210,38 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
wake_up(&dev->wait_hbm_start);
break;
+ case MEI_HBM_CAPABILITIES_RES_CMD:
+ dev_dbg(dev->dev, "hbm: capabilities response: message received.\n");
+
+ dev->init_clients_timer = 0;
+
+ if (dev->hbm_state != MEI_HBM_CAP_SETUP) {
+ dev_err(dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n",
+ dev->dev_state, dev->hbm_state);
+ return -EPROTO;
+ }
+
+ capability_res = (struct hbm_capability_response *)mei_msg;
+ if (!(capability_res->capability_granted[0] & HBM_CAP_VT))
+ dev->hbm_f_vt_supported = 0;
+
+ if (dev->hbm_f_dr_supported) {
+ if (mei_dmam_ring_alloc(dev))
+ dev_info(dev->dev, "running w/o dma ring\n");
+ if (mei_dma_ring_is_allocated(dev)) {
+ if (mei_hbm_dma_setup_req(dev))
+ return -EIO;
+ break;
+ }
+ }
+
+ dev->hbm_f_dr_supported = 0;
+ mei_dmam_ring_free(dev);
+
+ if (mei_hbm_enum_clients_req(dev))
+ return -EIO;
+ break;
+
case MEI_HBM_DMA_SETUP_RES_CMD:
dev_dbg(dev->dev, "hbm: dma setup response: message received.\n");
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 5aa58cffdd2e..4d95e38e4ddf 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -16,6 +16,7 @@ struct mei_cl;
*
* @MEI_HBM_IDLE : protocol not started
* @MEI_HBM_STARTING : start request message was sent
+ * @MEI_HBM_CAP_SETUP : capabilities request message was sent
* @MEI_HBM_DR_SETUP : dma ring setup request message was sent
* @MEI_HBM_ENUM_CLIENTS : enumeration request was sent
* @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties
@@ -25,6 +26,7 @@ struct mei_cl;
enum mei_hbm_state {
MEI_HBM_IDLE = 0,
MEI_HBM_STARTING,
+ MEI_HBM_CAP_SETUP,
MEI_HBM_DR_SETUP,
MEI_HBM_ENUM_CLIENTS,
MEI_HBM_CLIENT_PROPERTIES,
diff --git a/drivers/misc/mei/hw-virtio.c b/drivers/misc/mei/hw-virtio.c
new file mode 100644
index 000000000000..899dc1c5e7ca
--- /dev/null
+++ b/drivers/misc/mei/hw-virtio.c
@@ -0,0 +1,874 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2018-2020, Intel Corporation.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/atomic.h>
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "client.h"
+
+#define MEI_VIRTIO_RPM_TIMEOUT 500
+/* ACRN virtio device types */
+#ifndef VIRTIO_ID_MEI
+#define VIRTIO_ID_MEI 0xFFFE /* virtio mei */
+#endif
+
+/**
+ * struct mei_virtio_cfg - settings passed from the virtio backend
+ * @buf_depth: read buffer depth in slots (4bytes)
+ * @hw_ready: hw is ready for operation
+ * @host_reset: synchronize reset with virtio backend
+ * @reserved: reserved for alignment
+ * @fw_status: FW status
+ */
+struct mei_virtio_cfg {
+ u32 buf_depth;
+ u8 hw_ready;
+ u8 host_reset;
+ u8 reserved[2];
+ u32 fw_status[MEI_FW_STATUS_MAX];
+} __packed;
+
+struct mei_virtio_hw {
+ struct mei_device mdev;
+ char name[32];
+
+ struct virtqueue *in;
+ struct virtqueue *out;
+
+ bool host_ready;
+ struct work_struct intr_handler;
+
+ u32 *recv_buf;
+ u8 recv_rdy;
+ size_t recv_sz;
+ u32 recv_idx;
+ u32 recv_len;
+
+ /* send buffer */
+ atomic_t hbuf_ready;
+ const void *send_hdr;
+ const void *send_buf;
+
+ struct mei_virtio_cfg cfg;
+};
+
+#define to_virtio_hw(_dev) container_of(_dev, struct mei_virtio_hw, mdev)
+
+/**
+ * mei_virtio_fw_status() - read status register of mei
+ * @dev: mei device
+ * @fw_status: fw status register values
+ *
+ * Return: always 0
+ */
+static int mei_virtio_fw_status(struct mei_device *dev,
+ struct mei_fw_status *fw_status)
+{
+ struct virtio_device *vdev = dev_to_virtio(dev->dev);
+
+ fw_status->count = MEI_FW_STATUS_MAX;
+ virtio_cread_bytes(vdev, offsetof(struct mei_virtio_cfg, fw_status),
+ fw_status->status, sizeof(fw_status->status));
+ return 0;
+}
+
+/**
+ * mei_virtio_pg_state() - translate internal pg state
+ * to the mei power gating state
+ * There is no power management in ACRN mode always return OFF
+ * @dev: mei device
+ *
+ * Return:
+ * * MEI_PG_OFF - if aliveness is on (always)
+ * * MEI_PG_ON - (never)
+ */
+static inline enum mei_pg_state mei_virtio_pg_state(struct mei_device *dev)
+{
+ return MEI_PG_OFF;
+}
+
+/**
+ * mei_virtio_hw_config() - configure hw dependent settings
+ *
+ * @dev: mei device
+ *
+ * Return: always 0
+ */
+static int mei_virtio_hw_config(struct mei_device *dev)
+{
+ return 0;
+}
+
+/**
+ * mei_virtio_hbuf_empty_slots() - counts write empty slots.
+ * @dev: the device structure
+ *
+ * Return: always return frontend buf size if buffer is ready, 0 otherwise
+ */
+static int mei_virtio_hbuf_empty_slots(struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+
+ return (atomic_read(&hw->hbuf_ready) == 1) ? hw->cfg.buf_depth : 0;
+}
+
+/**
+ * mei_virtio_hbuf_is_ready() - checks if write buffer is ready
+ * @dev: the device structure
+ *
+ * Return: true if hbuf is ready
+ */
+static bool mei_virtio_hbuf_is_ready(struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+
+ return atomic_read(&hw->hbuf_ready) == 1;
+}
+
+/**
+ * mei_virtio_hbuf_max_depth() - returns depth of FE write buffer.
+ * @dev: the device structure
+ *
+ * Return: size of frontend write buffer in bytes
+ */
+static u32 mei_virtio_hbuf_depth(const struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+
+ return hw->cfg.buf_depth;
+}
+
+/**
+ * mei_virtio_intr_clear() - clear and stop interrupts
+ * @dev: the device structure
+ */
+static void mei_virtio_intr_clear(struct mei_device *dev)
+{
+ /*
+ * In our virtio solution, there are two types of interrupts,
+ * vq interrupt and config change interrupt.
+ * 1) start/reset rely on virtio config changed interrupt;
+ * 2) send/recv rely on virtio virtqueue interrupts.
+ * They are all virtual interrupts. So, we don't have corresponding
+ * operation to do here.
+ */
+}
+
+/**
+ * mei_virtio_intr_enable() - enables mei BE virtqueues callbacks
+ * @dev: the device structure
+ */
+static void mei_virtio_intr_enable(struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+ struct virtio_device *vdev = dev_to_virtio(dev->dev);
+
+ virtio_config_enable(vdev);
+
+ virtqueue_enable_cb(hw->in);
+ virtqueue_enable_cb(hw->out);
+}
+
+/**
+ * mei_virtio_intr_disable() - disables mei BE virtqueues callbacks
+ *
+ * @dev: the device structure
+ */
+static void mei_virtio_intr_disable(struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+ struct virtio_device *vdev = dev_to_virtio(dev->dev);
+
+ virtio_config_disable(vdev);
+
+ virtqueue_disable_cb(hw->in);
+ virtqueue_disable_cb(hw->out);
+}
+
+/**
+ * mei_virtio_synchronize_irq() - wait for pending IRQ handlers for all
+ * virtqueue
+ * @dev: the device structure
+ */
+static void mei_virtio_synchronize_irq(struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+
+ /*
+ * Now, all IRQ handlers are converted to workqueue.
+ * Change synchronize irq to flush this work.
+ */
+ flush_work(&hw->intr_handler);
+}
+
+static void mei_virtio_free_outbufs(struct mei_virtio_hw *hw)
+{
+ kfree(hw->send_hdr);
+ kfree(hw->send_buf);
+ hw->send_hdr = NULL;
+ hw->send_buf = NULL;
+}
+
+/**
+ * mei_virtio_write_message() - writes a message to mei virtio back-end service.
+ * @dev: the device structure
+ * @hdr: mei header of message
+ * @hdr_len: header length
+ * @data: message payload will be written
+ * @data_len: message payload length
+ *
+ * Return:
+ * * 0: on success
+ * * -EIO: if write has failed
+ * * -ENOMEM: on memory allocation failure
+ */
+static int mei_virtio_write_message(struct mei_device *dev,
+ const void *hdr, size_t hdr_len,
+ const void *data, size_t data_len)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+ struct scatterlist sg[2];
+ const void *hbuf, *dbuf;
+ int ret;
+
+ if (WARN_ON(!atomic_add_unless(&hw->hbuf_ready, -1, 0)))
+ return -EIO;
+
+ hbuf = kmemdup(hdr, hdr_len, GFP_KERNEL);
+ hw->send_hdr = hbuf;
+
+ dbuf = kmemdup(data, data_len, GFP_KERNEL);
+ hw->send_buf = dbuf;
+
+ if (!hbuf || !dbuf) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sg_init_table(sg, 2);
+ sg_set_buf(&sg[0], hbuf, hdr_len);
+ sg_set_buf(&sg[1], dbuf, data_len);
+
+ ret = virtqueue_add_outbuf(hw->out, sg, 2, hw, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev->dev, "failed to add outbuf\n");
+ goto fail;
+ }
+
+ virtqueue_kick(hw->out);
+ return 0;
+fail:
+
+ mei_virtio_free_outbufs(hw);
+
+ return ret;
+}
+
+/**
+ * mei_virtio_count_full_read_slots() - counts read full slots.
+ * @dev: the device structure
+ *
+ * Return: -EOVERFLOW if overflow, otherwise filled slots count
+ */
+static int mei_virtio_count_full_read_slots(struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+
+ if (hw->recv_idx > hw->recv_len)
+ return -EOVERFLOW;
+
+ return hw->recv_len - hw->recv_idx;
+}
+
+/**
+ * mei_virtio_read_hdr() - Reads 32bit dword from mei virtio receive buffer
+ *
+ * @dev: the device structure
+ *
+ * Return: 32bit dword of receive buffer (u32)
+ */
+static inline u32 mei_virtio_read_hdr(const struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+
+ WARN_ON(hw->cfg.buf_depth < hw->recv_idx + 1);
+
+ return hw->recv_buf[hw->recv_idx++];
+}
+
+static int mei_virtio_read(struct mei_device *dev, unsigned char *buffer,
+ unsigned long len)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+ u32 slots = mei_data2slots(len);
+
+ if (WARN_ON(hw->cfg.buf_depth < hw->recv_idx + slots))
+ return -EOVERFLOW;
+
+ /*
+ * Assumption: There is only one MEI message in recv_buf each time.
+ * Backend service need follow this rule too.
+ */
+ memcpy(buffer, hw->recv_buf + hw->recv_idx, len);
+ hw->recv_idx += slots;
+
+ return 0;
+}
+
+static bool mei_virtio_pg_is_enabled(struct mei_device *dev)
+{
+ return false;
+}
+
+static bool mei_virtio_pg_in_transition(struct mei_device *dev)
+{
+ return false;
+}
+
+static void mei_virtio_add_recv_buf(struct mei_virtio_hw *hw)
+{
+ struct scatterlist sg;
+
+ if (hw->recv_rdy) /* not needed */
+ return;
+
+ /* refill the recv_buf to IN virtqueue to get next message */
+ sg_init_one(&sg, hw->recv_buf, mei_slots2data(hw->cfg.buf_depth));
+ hw->recv_len = 0;
+ hw->recv_idx = 0;
+ hw->recv_rdy = 1;
+ virtqueue_add_inbuf(hw->in, &sg, 1, hw->recv_buf, GFP_KERNEL);
+ virtqueue_kick(hw->in);
+}
+
+/**
+ * mei_virtio_hw_is_ready() - check whether the BE(hw) has turned ready
+ * @dev: mei device
+ * Return: bool
+ */
+static bool mei_virtio_hw_is_ready(struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+ struct virtio_device *vdev = dev_to_virtio(dev->dev);
+
+ virtio_cread(vdev, struct mei_virtio_cfg,
+ hw_ready, &hw->cfg.hw_ready);
+
+ dev_dbg(dev->dev, "hw ready %d\n", hw->cfg.hw_ready);
+
+ return hw->cfg.hw_ready;
+}
+
+/**
+ * mei_virtio_hw_reset - resets virtio hw.
+ *
+ * @dev: the device structure
+ * @intr_enable: virtio use data/config callbacks
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_virtio_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+ struct virtio_device *vdev = dev_to_virtio(dev->dev);
+
+ dev_dbg(dev->dev, "hw reset\n");
+
+ dev->recvd_hw_ready = false;
+ hw->host_ready = false;
+ atomic_set(&hw->hbuf_ready, 0);
+ hw->recv_len = 0;
+ hw->recv_idx = 0;
+
+ hw->cfg.host_reset = 1;
+ virtio_cwrite(vdev, struct mei_virtio_cfg,
+ host_reset, &hw->cfg.host_reset);
+
+ mei_virtio_hw_is_ready(dev);
+
+ if (intr_enable)
+ mei_virtio_intr_enable(dev);
+
+ return 0;
+}
+
+/**
+ * mei_virtio_hw_reset_release() - release device from the reset
+ * @dev: the device structure
+ */
+static void mei_virtio_hw_reset_release(struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+ struct virtio_device *vdev = dev_to_virtio(dev->dev);
+
+ dev_dbg(dev->dev, "hw reset release\n");
+ hw->cfg.host_reset = 0;
+ virtio_cwrite(vdev, struct mei_virtio_cfg,
+ host_reset, &hw->cfg.host_reset);
+}
+
+/**
+ * mei_virtio_hw_ready_wait() - wait until the virtio(hw) has turned ready
+ * or timeout is reached
+ * @dev: mei device
+ *
+ * Return: 0 on success, error otherwise
+ */
+static int mei_virtio_hw_ready_wait(struct mei_device *dev)
+{
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(dev->wait_hw_ready,
+ dev->recvd_hw_ready,
+ mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
+ mutex_lock(&dev->device_lock);
+ if (!dev->recvd_hw_ready) {
+ dev_err(dev->dev, "wait hw ready failed\n");
+ return -ETIMEDOUT;
+ }
+
+ dev->recvd_hw_ready = false;
+ return 0;
+}
+
+/**
+ * mei_virtio_hw_start() - hw start routine
+ * @dev: mei device
+ *
+ * Return: 0 on success, error otherwise
+ */
+static int mei_virtio_hw_start(struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+ int ret;
+
+ dev_dbg(dev->dev, "hw start\n");
+ mei_virtio_hw_reset_release(dev);
+
+ ret = mei_virtio_hw_ready_wait(dev);
+ if (ret)
+ return ret;
+
+ mei_virtio_add_recv_buf(hw);
+ atomic_set(&hw->hbuf_ready, 1);
+ dev_dbg(dev->dev, "hw is ready\n");
+ hw->host_ready = true;
+
+ return 0;
+}
+
+/**
+ * mei_virtio_host_is_ready() - check whether the FE has turned ready
+ * @dev: mei device
+ *
+ * Return: bool
+ */
+static bool mei_virtio_host_is_ready(struct mei_device *dev)
+{
+ struct mei_virtio_hw *hw = to_virtio_hw(dev);
+
+ dev_dbg(dev->dev, "host ready %d\n", hw->host_ready);
+
+ return hw->host_ready;
+}
+
+/**
+ * mei_virtio_data_in() - The callback of recv virtqueue of virtio mei
+ * @vq: receiving virtqueue
+ */
+static void mei_virtio_data_in(struct virtqueue *vq)
+{
+ struct mei_virtio_hw *hw = vq->vdev->priv;
+
+ /* disable interrupts (enabled again from in the interrupt worker) */
+ virtqueue_disable_cb(hw->in);
+
+ schedule_work(&hw->intr_handler);
+}
+
+/**
+ * mei_virtio_data_out() - The callback of send virtqueue of virtio mei
+ * @vq: transmitting virtqueue
+ */
+static void mei_virtio_data_out(struct virtqueue *vq)
+{
+ struct mei_virtio_hw *hw = vq->vdev->priv;
+
+ schedule_work(&hw->intr_handler);
+}
+
+static void mei_virtio_intr_handler(struct work_struct *work)
+{
+ struct mei_virtio_hw *hw =
+ container_of(work, struct mei_virtio_hw, intr_handler);
+ struct mei_device *dev = &hw->mdev;
+ LIST_HEAD(complete_list);
+ s32 slots;
+ int rets = 0;
+ void *data;
+ unsigned int len;
+
+ mutex_lock(&dev->device_lock);
+
+ if (dev->dev_state == MEI_DEV_DISABLED) {
+ dev_warn(dev->dev, "Interrupt in disabled state.\n");
+ mei_virtio_intr_disable(dev);
+ goto end;
+ }
+
+ /* check if ME wants a reset */
+ if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
+ dev_warn(dev->dev, "BE service not ready: resetting.\n");
+ schedule_work(&dev->reset_work);
+ goto end;
+ }
+
+ /* check if we need to start the dev */
+ if (!mei_host_is_ready(dev)) {
+ if (mei_hw_is_ready(dev)) {
+ dev_dbg(dev->dev, "we need to start the dev.\n");
+ dev->recvd_hw_ready = true;
+ wake_up(&dev->wait_hw_ready);
+ } else {
+ dev_warn(dev->dev, "Spurious Interrupt\n");
+ }
+ goto end;
+ }
+
+ /* read */
+ if (hw->recv_rdy) {
+ data = virtqueue_get_buf(hw->in, &len);
+ if (!data || !len) {
+ dev_dbg(dev->dev, "No data %d", len);
+ } else {
+ dev_dbg(dev->dev, "data_in %d\n", len);
+ WARN_ON(data != hw->recv_buf);
+ hw->recv_len = mei_data2slots(len);
+ hw->recv_rdy = 0;
+ }
+ }
+
+ /* write */
+ if (!atomic_read(&hw->hbuf_ready)) {
+ if (!virtqueue_get_buf(hw->out, &len)) {
+ dev_warn(dev->dev, "Failed to getbuf\n");
+ } else {
+ mei_virtio_free_outbufs(hw);
+ atomic_inc(&hw->hbuf_ready);
+ }
+ }
+
+ /* check slots available for reading */
+ slots = mei_count_full_read_slots(dev);
+ while (slots > 0) {
+ dev_dbg(dev->dev, "slots to read = %08x\n", slots);
+ rets = mei_irq_read_handler(dev, &complete_list, &slots);
+
+ if (rets &&
+ (dev->dev_state != MEI_DEV_RESETTING &&
+ dev->dev_state != MEI_DEV_POWER_DOWN)) {
+ dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
+ rets);
+ schedule_work(&dev->reset_work);
+ goto end;
+ }
+ }
+
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+
+ mei_irq_write_handler(dev, &complete_list);
+
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+
+ mei_irq_compl_handler(dev, &complete_list);
+
+ mei_virtio_add_recv_buf(hw);
+
+end:
+ if (dev->dev_state != MEI_DEV_DISABLED) {
+ if (!virtqueue_enable_cb(hw->in))
+ schedule_work(&hw->intr_handler);
+ }
+
+ mutex_unlock(&dev->device_lock);
+}
+
+static void mei_virtio_config_changed(struct virtio_device *vdev)
+{
+ struct mei_virtio_hw *hw = vdev->priv;
+ struct mei_device *dev = &hw->mdev;
+
+ virtio_cread(vdev, struct mei_virtio_cfg,
+ hw_ready, &hw->cfg.hw_ready);
+
+ if (dev->dev_state == MEI_DEV_DISABLED) {
+ dev_dbg(dev->dev, "disabled state don't start\n");
+ return;
+ }
+
+ /* Run intr handler once to handle reset notify */
+ schedule_work(&hw->intr_handler);
+}
+
+static void mei_virtio_remove_vqs(struct virtio_device *vdev)
+{
+ struct mei_virtio_hw *hw = vdev->priv;
+
+ virtqueue_detach_unused_buf(hw->in);
+ hw->recv_len = 0;
+ hw->recv_idx = 0;
+ hw->recv_rdy = 0;
+
+ virtqueue_detach_unused_buf(hw->out);
+
+ mei_virtio_free_outbufs(hw);
+
+ vdev->config->del_vqs(vdev);
+}
+
+/*
+ * There are two virtqueues, one is for send and another is for recv.
+ */
+static int mei_virtio_init_vqs(struct mei_virtio_hw *hw,
+ struct virtio_device *vdev)
+{
+ struct virtqueue *vqs[2];
+
+ vq_callback_t *cbs[] = {
+ mei_virtio_data_in,
+ mei_virtio_data_out,
+ };
+ static const char * const names[] = {
+ "in",
+ "out",
+ };
+ int ret;
+
+ ret = virtio_find_vqs(vdev, 2, vqs, cbs, names, NULL);
+ if (ret)
+ return ret;
+
+ hw->in = vqs[0];
+ hw->out = vqs[1];
+
+ return 0;
+}
+
+static const struct mei_hw_ops mei_virtio_ops = {
+ .fw_status = mei_virtio_fw_status,
+ .pg_state = mei_virtio_pg_state,
+
+ .host_is_ready = mei_virtio_host_is_ready,
+
+ .hw_is_ready = mei_virtio_hw_is_ready,
+ .hw_reset = mei_virtio_hw_reset,
+ .hw_config = mei_virtio_hw_config,
+ .hw_start = mei_virtio_hw_start,
+
+ .pg_in_transition = mei_virtio_pg_in_transition,
+ .pg_is_enabled = mei_virtio_pg_is_enabled,
+
+ .intr_clear = mei_virtio_intr_clear,
+ .intr_enable = mei_virtio_intr_enable,
+ .intr_disable = mei_virtio_intr_disable,
+ .synchronize_irq = mei_virtio_synchronize_irq,
+
+ .hbuf_free_slots = mei_virtio_hbuf_empty_slots,
+ .hbuf_is_ready = mei_virtio_hbuf_is_ready,
+ .hbuf_depth = mei_virtio_hbuf_depth,
+
+ .write = mei_virtio_write_message,
+
+ .rdbuf_full_slots = mei_virtio_count_full_read_slots,
+ .read_hdr = mei_virtio_read_hdr,
+ .read = mei_virtio_read,
+};
+
+static int mei_virtio_probe(struct virtio_device *vdev)
+{
+ struct mei_virtio_hw *hw;
+ int ret;
+
+ hw = devm_kzalloc(&vdev->dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ vdev->priv = hw;
+
+ INIT_WORK(&hw->intr_handler, mei_virtio_intr_handler);
+
+ ret = mei_virtio_init_vqs(hw, vdev);
+ if (ret)
+ goto vqs_failed;
+
+ virtio_cread(vdev, struct mei_virtio_cfg,
+ buf_depth, &hw->cfg.buf_depth);
+
+ hw->recv_buf = kzalloc(mei_slots2data(hw->cfg.buf_depth), GFP_KERNEL);
+ if (!hw->recv_buf) {
+ ret = -ENOMEM;
+ goto hbuf_failed;
+ }
+ atomic_set(&hw->hbuf_ready, 0);
+
+ virtio_device_ready(vdev);
+
+ mei_device_init(&hw->mdev, &vdev->dev, &mei_virtio_ops);
+
+ pm_runtime_get_noresume(&vdev->dev);
+ pm_runtime_set_active(&vdev->dev);
+ pm_runtime_enable(&vdev->dev);
+
+ ret = mei_start(&hw->mdev);
+ if (ret)
+ goto mei_start_failed;
+
+ pm_runtime_set_autosuspend_delay(&vdev->dev, MEI_VIRTIO_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(&vdev->dev);
+
+ ret = mei_register(&hw->mdev, &vdev->dev);
+ if (ret)
+ goto mei_failed;
+
+ pm_runtime_put(&vdev->dev);
+
+ return 0;
+
+mei_failed:
+ mei_stop(&hw->mdev);
+mei_start_failed:
+ mei_cancel_work(&hw->mdev);
+ mei_disable_interrupts(&hw->mdev);
+ kfree(hw->recv_buf);
+hbuf_failed:
+ vdev->config->del_vqs(vdev);
+vqs_failed:
+ return ret;
+}
+
+static int __maybe_unused mei_virtio_pm_runtime_idle(struct device *device)
+{
+ struct virtio_device *vdev = dev_to_virtio(device);
+ struct mei_virtio_hw *hw = vdev->priv;
+
+ dev_dbg(&vdev->dev, "rpm: mei_virtio : runtime_idle\n");
+
+ if (!hw)
+ return -ENODEV;
+
+ if (mei_write_is_idle(&hw->mdev))
+ pm_runtime_autosuspend(device);
+
+ return -EBUSY;
+}
+
+static int __maybe_unused mei_virtio_pm_runtime_suspend(struct device *device)
+{
+ return 0;
+}
+
+static int __maybe_unused mei_virtio_pm_runtime_resume(struct device *device)
+{
+ return 0;
+}
+
+static int __maybe_unused mei_virtio_freeze(struct virtio_device *vdev)
+{
+ struct mei_virtio_hw *hw = vdev->priv;
+
+ dev_dbg(&vdev->dev, "freeze\n");
+
+ if (!hw)
+ return -ENODEV;
+
+ mei_stop(&hw->mdev);
+ mei_disable_interrupts(&hw->mdev);
+ cancel_work_sync(&hw->intr_handler);
+ vdev->config->reset(vdev);
+ mei_virtio_remove_vqs(vdev);
+
+ return 0;
+}
+
+static int __maybe_unused mei_virtio_restore(struct virtio_device *vdev)
+{
+ struct mei_virtio_hw *hw = vdev->priv;
+ int ret;
+
+ dev_dbg(&vdev->dev, "restore\n");
+
+ if (!hw)
+ return -ENODEV;
+
+ ret = mei_virtio_init_vqs(hw, vdev);
+ if (ret)
+ return ret;
+
+ virtio_device_ready(vdev);
+
+ ret = mei_restart(&hw->mdev);
+ if (ret)
+ return ret;
+
+ /* Start timer if stopped in suspend */
+ schedule_delayed_work(&hw->mdev.timer_work, HZ);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mei_virtio_pm_ops = {
+ SET_RUNTIME_PM_OPS(mei_virtio_pm_runtime_suspend,
+ mei_virtio_pm_runtime_resume,
+ mei_virtio_pm_runtime_idle)
+};
+
+static void mei_virtio_remove(struct virtio_device *vdev)
+{
+ struct mei_virtio_hw *hw = vdev->priv;
+
+ mei_stop(&hw->mdev);
+ mei_disable_interrupts(&hw->mdev);
+ cancel_work_sync(&hw->intr_handler);
+ mei_deregister(&hw->mdev);
+ vdev->config->reset(vdev);
+ mei_virtio_remove_vqs(vdev);
+ kfree(hw->recv_buf);
+ pm_runtime_disable(&vdev->dev);
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_MEI, VIRTIO_DEV_ANY_ID },
+ { }
+};
+
+static struct virtio_driver mei_virtio_driver = {
+ .id_table = id_table,
+ .probe = mei_virtio_probe,
+ .remove = mei_virtio_remove,
+ .config_changed = mei_virtio_config_changed,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .pm = &mei_virtio_pm_ops,
+ },
+#ifdef CONFIG_PM_SLEEP
+ .freeze = mei_virtio_freeze,
+ .restore = mei_virtio_restore,
+#endif
+};
+
+module_virtio_driver(mei_virtio_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("Virtio MEI frontend driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 26fa92cb7f7a..df2fb9520dd8 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -25,7 +25,7 @@
/*
* MEI Version
*/
-#define HBM_MINOR_VERSION 1
+#define HBM_MINOR_VERSION 2
#define HBM_MAJOR_VERSION 2
/*
@@ -76,6 +76,18 @@
#define HBM_MINOR_VERSION_DR 1
#define HBM_MAJOR_VERSION_DR 2
+/*
+ * MEI version with vm tag support
+ */
+#define HBM_MINOR_VERSION_VT 2
+#define HBM_MAJOR_VERSION_VT 2
+
+/*
+ * MEI version with capabilities message support
+ */
+#define HBM_MINOR_VERSION_CAP 2
+#define HBM_MAJOR_VERSION_CAP 2
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
@@ -121,6 +133,9 @@
#define MEI_HBM_DMA_SETUP_REQ_CMD 0x12
#define MEI_HBM_DMA_SETUP_RES_CMD 0x92
+#define MEI_HBM_CAPABILITIES_REQ_CMD 0x13
+#define MEI_HBM_CAPABILITIES_RES_CMD 0x93
+
/*
* MEI Stop Reason
* used by hbm_host_stop_request.reason
@@ -182,17 +197,103 @@ enum mei_cl_connect_status {
/*
* Client Disconnect Status
*/
-enum mei_cl_disconnect_status {
+enum mei_cl_disconnect_status {
MEI_CL_DISCONN_SUCCESS = MEI_HBMS_SUCCESS
};
/**
+ * enum mei_ext_hdr_type - extended header type used in
+ * extended header TLV
+ *
+ * @MEI_EXT_HDR_NONE: sentinel
+ * @MEI_EXT_HDR_VTAG: vtag header
+ */
+enum mei_ext_hdr_type {
+ MEI_EXT_HDR_NONE = 0,
+ MEI_EXT_HDR_VTAG = 1,
+};
+
+/**
+ * struct mei_ext_hdr - extend header descriptor (TLV)
+ * @type: enum mei_ext_hdr_type
+ * @length: length excluding descriptor
+ * @ext_payload: payload of the specific extended header
+ * @hdr: place holder for actual header
+ */
+struct mei_ext_hdr {
+ u8 type;
+ u8 length;
+ u8 ext_payload[2];
+ u8 hdr[];
+};
+
+/**
+ * struct mei_ext_meta_hdr - extend header meta data
+ * @count: number of headers
+ * @size: total size of the extended header list excluding meta header
+ * @reserved: reserved
+ * @hdrs: extended headers TLV list
+ */
+struct mei_ext_meta_hdr {
+ u8 count;
+ u8 size;
+ u8 reserved[2];
+ struct mei_ext_hdr hdrs[];
+};
+
+/*
+ * Extended header iterator functions
+ */
+/**
+ * mei_ext_hdr - extended header iterator begin
+ *
+ * @meta: meta header of the extended header list
+ *
+ * Return:
+ * The first extended header
+ */
+static inline struct mei_ext_hdr *mei_ext_begin(struct mei_ext_meta_hdr *meta)
+{
+ return meta->hdrs;
+}
+
+/**
+ * mei_ext_last - check if the ext is the last one in the TLV list
+ *
+ * @meta: meta header of the extended header list
+ * @ext: a meta header on the list
+ *
+ * Return: true if ext is the last header on the list
+ */
+static inline bool mei_ext_last(struct mei_ext_meta_hdr *meta,
+ struct mei_ext_hdr *ext)
+{
+ return (u8 *)ext >= (u8 *)meta + sizeof(*meta) + (meta->size * 4);
+}
+
+/**
+ *mei_ext_next - following extended header on the TLV list
+ *
+ * @ext: current extend header
+ *
+ * Context: The function does not check for the overflows,
+ * one should call mei_ext_last before.
+ *
+ * Return: The following extend header after @ext
+ */
+static inline struct mei_ext_hdr *mei_ext_next(struct mei_ext_hdr *ext)
+{
+ return (struct mei_ext_hdr *)(ext->hdr + (ext->length * 4));
+}
+
+/**
* struct mei_msg_hdr - MEI BUS Interface Section
*
* @me_addr: device address
* @host_addr: host address
* @length: message length
* @reserved: reserved
+ * @extended: message has extended header
* @dma_ring: message is on dma ring
* @internal: message is internal
* @msg_complete: last packet of the message
@@ -202,18 +303,17 @@ struct mei_msg_hdr {
u32 me_addr:8;
u32 host_addr:8;
u32 length:9;
- u32 reserved:4;
+ u32 reserved:3;
+ u32 extended:1;
u32 dma_ring:1;
u32 internal:1;
u32 msg_complete:1;
- u32 extension[0];
+ u32 extension[];
} __packed;
/* The length is up to 9 bits */
#define MEI_MSG_MAX_LEN_MASK GENMASK(9, 0)
-#define MEI_MSG_HDR_MAX 2
-
struct mei_bus_message {
u8 hbm_cmd;
u8 data[];
@@ -299,13 +399,26 @@ struct hbm_host_enum_response {
u8 valid_addresses[32];
} __packed;
+/**
+ * struct mei_client_properties - mei client properties
+ *
+ * @protocol_name: guid of the client
+ * @protocol_version: client protocol version
+ * @max_number_of_connections: number of possible connections.
+ * @fixed_address: fixed me address (0 if the client is dynamic)
+ * @single_recv_buf: 1 if all connections share a single receive buffer.
+ * @vt_supported: the client support vtag
+ * @reserved: reserved
+ * @max_msg_length: MTU of the client
+ */
struct mei_client_properties {
uuid_le protocol_name;
u8 protocol_version;
u8 max_number_of_connections;
u8 fixed_address;
u8 single_recv_buf:1;
- u8 reserved:7;
+ u8 vt_supported:1;
+ u8 reserved:6;
u32 max_msg_length;
} __packed;
@@ -533,4 +646,29 @@ struct hbm_dma_ring_ctrl {
u32 reserved4;
} __packed;
+/* virtual tag supported */
+#define HBM_CAP_VT BIT(0)
+
+/**
+ * struct hbm_capability_request - capability request from host to fw
+ *
+ * @hbm_cmd : bus message command header
+ * @capability_requested: bitmask of capabilities requested by host
+ */
+struct hbm_capability_request {
+ u8 hbm_cmd;
+ u8 capability_requested[3];
+} __packed;
+
+/**
+ * struct hbm_capability_response - capability response from fw to host
+ *
+ * @hbm_cmd : bus message command header
+ * @capability_granted: bitmask of capabilities granted by FW
+ */
+struct hbm_capability_response {
+ u8 hbm_cmd;
+ u8 capability_granted[3];
+} __packed;
+
#endif
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index c70a8c74cc57..326955b04fda 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -61,16 +61,21 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
*
* @dev: mei device
* @hdr: message header
+ * @discard_len: the length of the message to discard (excluding header)
*/
-static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
+static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr,
+ size_t discard_len)
{
- if (hdr->dma_ring)
- mei_dma_ring_read(dev, NULL, hdr->extension[0]);
+ if (hdr->dma_ring) {
+ mei_dma_ring_read(dev, NULL,
+ hdr->extension[dev->rd_msg_hdr_count - 2]);
+ discard_len = 0;
+ }
/*
* no need to check for size as it is guarantied
* that length fits into rd_msg_buf
*/
- mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
+ mei_read_slots(dev, dev->rd_msg_buf, discard_len);
dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
MEI_HDR_PRM(hdr));
}
@@ -80,18 +85,29 @@ static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
*
* @cl: reading client
* @mei_hdr: header of mei client message
+ * @meta: extend meta header
* @cmpl_list: completion list
*
* Return: always 0
*/
static int mei_cl_irq_read_msg(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr,
+ struct mei_ext_meta_hdr *meta,
struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
+
size_t buf_sz;
u32 length;
+ int ext_len;
+
+ length = mei_hdr->length;
+ ext_len = 0;
+ if (mei_hdr->extended) {
+ ext_len = sizeof(*meta) + mei_slots2data(meta->size);
+ length -= ext_len;
+ }
cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
if (!cb) {
@@ -105,13 +121,50 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl,
list_add_tail(&cb->list, &cl->rd_pending);
}
+ if (mei_hdr->extended) {
+ struct mei_ext_hdr *ext;
+ struct mei_ext_hdr *vtag = NULL;
+
+ ext = mei_ext_begin(meta);
+ do {
+ switch (ext->type) {
+ case MEI_EXT_HDR_VTAG:
+ vtag = ext;
+ break;
+ case MEI_EXT_HDR_NONE:
+ fallthrough;
+ default:
+ cb->status = -EPROTO;
+ break;
+ }
+
+ ext = mei_ext_next(ext);
+ } while (!mei_ext_last(meta, ext));
+
+ if (!vtag) {
+ cl_dbg(dev, cl, "vtag not found in extended header.\n");
+ cb->status = -EPROTO;
+ goto discard;
+ }
+
+ cl_dbg(dev, cl, "vtag: %d\n", vtag->ext_payload[0]);
+ if (cb->vtag && cb->vtag != vtag->ext_payload[0]) {
+ cl_err(dev, cl, "mismatched tag: %d != %d\n",
+ cb->vtag, vtag->ext_payload[0]);
+ cb->status = -EPROTO;
+ goto discard;
+ }
+ cb->vtag = vtag->ext_payload[0];
+ }
+
if (!mei_cl_is_connected(cl)) {
cl_dbg(dev, cl, "not connected\n");
cb->status = -ENODEV;
goto discard;
}
- length = mei_hdr->dma_ring ? mei_hdr->extension[0] : mei_hdr->length;
+ if (mei_hdr->dma_ring)
+ length = mei_hdr->extension[mei_data2slots(ext_len)];
buf_sz = length + cb->buf_idx;
/* catch for integer overflow */
@@ -129,11 +182,13 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl,
goto discard;
}
- if (mei_hdr->dma_ring)
+ if (mei_hdr->dma_ring) {
mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length);
-
- /* for DMA read 0 length to generate an interrupt to the device */
- mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length);
+ /* for DMA read 0 length to generate interrupt to the device */
+ mei_read_slots(dev, cb->buf.data + cb->buf_idx, 0);
+ } else {
+ mei_read_slots(dev, cb->buf.data + cb->buf_idx, length);
+ }
cb->buf_idx += length;
@@ -150,7 +205,7 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl,
discard:
if (cb)
list_move_tail(&cb->list, cmpl_list);
- mei_irq_discard_msg(dev, mei_hdr);
+ mei_irq_discard_msg(dev, mei_hdr, length);
return 0;
}
@@ -265,11 +320,16 @@ int mei_irq_read_handler(struct mei_device *dev,
struct list_head *cmpl_list, s32 *slots)
{
struct mei_msg_hdr *mei_hdr;
+ struct mei_ext_meta_hdr *meta_hdr = NULL;
struct mei_cl *cl;
int ret;
+ u32 ext_meta_hdr_u32;
+ int i;
+ int ext_hdr_end;
if (!dev->rd_msg_hdr[0]) {
dev->rd_msg_hdr[0] = mei_read_hdr(dev);
+ dev->rd_msg_hdr_count = 1;
(*slots)--;
dev_dbg(dev->dev, "slots =%08x.\n", *slots);
@@ -292,10 +352,34 @@ int mei_irq_read_handler(struct mei_device *dev,
goto end;
}
+ ext_hdr_end = 1;
+
+ if (mei_hdr->extended) {
+ if (!dev->rd_msg_hdr[1]) {
+ ext_meta_hdr_u32 = mei_read_hdr(dev);
+ dev->rd_msg_hdr[1] = ext_meta_hdr_u32;
+ dev->rd_msg_hdr_count++;
+ (*slots)--;
+ dev_dbg(dev->dev, "extended header is %08x\n",
+ ext_meta_hdr_u32);
+ }
+ meta_hdr = ((struct mei_ext_meta_hdr *)
+ dev->rd_msg_hdr + 1);
+ ext_hdr_end = meta_hdr->size + 2;
+ for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) {
+ dev->rd_msg_hdr[i] = mei_read_hdr(dev);
+ dev_dbg(dev->dev, "extended header %d is %08x\n", i,
+ dev->rd_msg_hdr[i]);
+ dev->rd_msg_hdr_count++;
+ (*slots)--;
+ }
+ }
+
if (mei_hdr->dma_ring) {
- dev->rd_msg_hdr[1] = mei_read_hdr(dev);
+ dev->rd_msg_hdr[ext_hdr_end] = mei_read_hdr(dev);
+ dev->rd_msg_hdr_count++;
(*slots)--;
- mei_hdr->length = 0;
+ mei_hdr->length -= sizeof(dev->rd_msg_hdr[ext_hdr_end]);
}
/* HBM message */
@@ -326,7 +410,7 @@ int mei_irq_read_handler(struct mei_device *dev,
*/
if (hdr_is_fixed(mei_hdr) ||
dev->dev_state == MEI_DEV_POWER_DOWN) {
- mei_irq_discard_msg(dev, mei_hdr);
+ mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length);
ret = 0;
goto reset_slots;
}
@@ -336,12 +420,13 @@ int mei_irq_read_handler(struct mei_device *dev,
goto end;
}
- ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
+ ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list);
reset_slots:
/* reset the number of slots and header */
memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr));
+ dev->rd_msg_hdr_count = 0;
*slots = mei_count_full_read_slots(dev);
if (*slots == -EOVERFLOW) {
/* overflow - reset */
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 86ef5c1a7928..9f6682033ed7 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -81,6 +81,27 @@ err_unlock:
}
/**
+ * mei_cl_vtag_remove_by_fp - remove vtag that corresponds to fp from list
+ *
+ * @cl: host client
+ * @fp: pointer to file structure
+ *
+ */
+static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl,
+ const struct file *fp)
+{
+ struct mei_cl_vtag *vtag_l, *next;
+
+ list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) {
+ if (vtag_l->fp == fp) {
+ list_del(&vtag_l->list);
+ kfree(vtag_l);
+ return;
+ }
+ }
+}
+
+/**
* mei_release - the release function
*
* @inode: pointer to inode structure
@@ -101,17 +122,35 @@ static int mei_release(struct inode *inode, struct file *file)
mutex_lock(&dev->device_lock);
+ mei_cl_vtag_remove_by_fp(cl, file);
+
+ if (!list_empty(&cl->vtag_map)) {
+ cl_dbg(dev, cl, "not the last vtag\n");
+ mei_cl_flush_queues(cl, file);
+ rets = 0;
+ goto out;
+ }
+
rets = mei_cl_disconnect(cl);
+ /*
+ * Check again: This is necessary since disconnect releases the lock
+ * and another client can connect in the meantime.
+ */
+ if (!list_empty(&cl->vtag_map)) {
+ cl_dbg(dev, cl, "not the last vtag after disconnect\n");
+ mei_cl_flush_queues(cl, file);
+ goto out;
+ }
- mei_cl_flush_queues(cl, file);
+ mei_cl_flush_queues(cl, NULL);
cl_dbg(dev, cl, "removing\n");
mei_cl_unlink(cl);
+ kfree(cl);
+out:
file->private_data = NULL;
- kfree(cl);
-
mutex_unlock(&dev->device_lock);
return rets;
}
@@ -178,7 +217,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
mutex_unlock(&dev->device_lock);
if (wait_event_interruptible(cl->rx_wait,
- !list_empty(&cl->rd_completed) ||
+ mei_cl_read_cb(cl, file) ||
!mei_cl_is_connected(cl))) {
if (signal_pending(current))
return -EINTR;
@@ -229,7 +268,7 @@ copy_buffer:
goto out;
free:
- mei_io_cb_free(cb);
+ mei_cl_del_rd_completed(cl, cb);
*offset = 0;
out:
@@ -237,6 +276,28 @@ out:
mutex_unlock(&dev->device_lock);
return rets;
}
+
+/**
+ * mei_cl_vtag_by_fp - obtain the vtag by file pointer
+ *
+ * @cl: host client
+ * @fp: pointer to file structure
+ *
+ * Return: vtag value on success, otherwise 0
+ */
+static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp)
+{
+ struct mei_cl_vtag *cl_vtag;
+
+ if (!fp)
+ return 0;
+
+ list_for_each_entry(cl_vtag, &cl->vtag_map, list)
+ if (cl_vtag->fp == fp)
+ return cl_vtag->vtag;
+ return 0;
+}
+
/**
* mei_write - the write function.
*
@@ -314,6 +375,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
rets = -ENOMEM;
goto out;
}
+ cb->vtag = mei_cl_vtag_by_fp(cl, file);
rets = copy_from_user(cb->buf.data, ubuf, length);
if (rets) {
@@ -333,17 +395,18 @@ out:
* mei_ioctl_connect_client - the connect to fw client IOCTL function
*
* @file: private data of the file object
- * @data: IOCTL connect data, input and output parameters
+ * @in_client_uuid: requested UUID for connection
+ * @client: IOCTL connect data, output parameters
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on success, <0 on failure.
*/
static int mei_ioctl_connect_client(struct file *file,
- struct mei_connect_client_data *data)
+ const uuid_le *in_client_uuid,
+ struct mei_client *client)
{
struct mei_device *dev;
- struct mei_client *client;
struct mei_me_client *me_cl;
struct mei_cl *cl;
int rets;
@@ -351,18 +414,15 @@ static int mei_ioctl_connect_client(struct file *file,
cl = file->private_data;
dev = cl->dev;
- if (dev->dev_state != MEI_DEV_ENABLED)
- return -ENODEV;
-
if (cl->state != MEI_FILE_INITIALIZING &&
cl->state != MEI_FILE_DISCONNECTED)
return -EBUSY;
/* find ME client we're trying to connect to */
- me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
+ me_cl = mei_me_cl_by_uuid(dev, in_client_uuid);
if (!me_cl) {
dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
- &data->in_client_uuid);
+ in_client_uuid);
rets = -ENOTTY;
goto end;
}
@@ -372,7 +432,7 @@ static int mei_ioctl_connect_client(struct file *file,
!dev->allow_fixed_address : !dev->hbm_f_fa_supported;
if (forbidden) {
dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
- &data->in_client_uuid);
+ in_client_uuid);
rets = -ENOTTY;
goto end;
}
@@ -386,7 +446,6 @@ static int mei_ioctl_connect_client(struct file *file,
me_cl->props.max_msg_length);
/* prepare the output buffer */
- client = &data->out_client_properties;
client->max_msg_length = me_cl->props.max_msg_length;
client->protocol_version = me_cl->props.protocol_version;
dev_dbg(dev->dev, "Can connect?\n");
@@ -399,6 +458,135 @@ end:
}
/**
+ * mei_vt_support_check - check if client support vtags
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * @dev: mei_device
+ * @uuid: client UUID
+ *
+ * Return:
+ * 0 - supported
+ * -ENOTTY - no such client
+ * -EOPNOTSUPP - vtags are not supported by client
+ */
+static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid)
+{
+ struct mei_me_client *me_cl;
+ int ret;
+
+ if (!dev->hbm_f_vt_supported)
+ return -EOPNOTSUPP;
+
+ me_cl = mei_me_cl_by_uuid(dev, uuid);
+ if (!me_cl) {
+ dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
+ uuid);
+ return -ENOTTY;
+ }
+ ret = me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
+ mei_me_cl_put(me_cl);
+
+ return ret;
+}
+
+/**
+ * mei_ioctl_connect_vtag - connect to fw client with vtag IOCTL function
+ *
+ * @file: private data of the file object
+ * @in_client_uuid: requested UUID for connection
+ * @client: IOCTL connect data, output parameters
+ * @vtag: vm tag
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+static int mei_ioctl_connect_vtag(struct file *file,
+ const uuid_le *in_client_uuid,
+ struct mei_client *client,
+ u8 vtag)
+{
+ struct mei_device *dev;
+ struct mei_cl *cl;
+ struct mei_cl *pos;
+ struct mei_cl_vtag *cl_vtag;
+
+ cl = file->private_data;
+ dev = cl->dev;
+
+ dev_dbg(dev->dev, "FW Client %pUl vtag %d\n", in_client_uuid, vtag);
+
+ switch (cl->state) {
+ case MEI_FILE_DISCONNECTED:
+ if (mei_cl_vtag_by_fp(cl, file) != vtag) {
+ dev_err(dev->dev, "reconnect with different vtag\n");
+ return -EINVAL;
+ }
+ break;
+ case MEI_FILE_INITIALIZING:
+ /* malicious connect from another thread may push vtag */
+ if (!IS_ERR(mei_cl_fp_by_vtag(cl, vtag))) {
+ dev_err(dev->dev, "vtag already filled\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(pos, &dev->file_list, link) {
+ if (pos == cl)
+ continue;
+ if (!pos->me_cl)
+ continue;
+
+ /* only search for same UUID */
+ if (uuid_le_cmp(*mei_cl_uuid(pos), *in_client_uuid))
+ continue;
+
+ /* if tag already exist try another fp */
+ if (!IS_ERR(mei_cl_fp_by_vtag(pos, vtag)))
+ continue;
+
+ /* replace cl with acquired one */
+ dev_dbg(dev->dev, "replacing with existing cl\n");
+ mei_cl_unlink(cl);
+ kfree(cl);
+ file->private_data = pos;
+ cl = pos;
+ break;
+ }
+
+ cl_vtag = mei_cl_vtag_alloc(file, vtag);
+ if (IS_ERR(cl_vtag))
+ return -ENOMEM;
+
+ list_add_tail(&cl_vtag->list, &cl->vtag_map);
+ break;
+ default:
+ return -EBUSY;
+ }
+
+ while (cl->state != MEI_FILE_INITIALIZING &&
+ cl->state != MEI_FILE_DISCONNECTED &&
+ cl->state != MEI_FILE_CONNECTED) {
+ mutex_unlock(&dev->device_lock);
+ wait_event_timeout(cl->wait,
+ (cl->state == MEI_FILE_CONNECTED ||
+ cl->state == MEI_FILE_DISCONNECTED ||
+ cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
+ cl->state == MEI_FILE_DISCONNECT_REPLY),
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ mutex_lock(&dev->device_lock);
+ }
+
+ if (!mei_cl_is_connected(cl))
+ return mei_ioctl_connect_client(file, in_client_uuid, client);
+
+ client->max_msg_length = cl->me_cl->props.max_msg_length;
+ client->protocol_version = cl->me_cl->props.protocol_version;
+
+ return 0;
+}
+
+/**
* mei_ioctl_client_notify_request -
* propagate event notification request to client
*
@@ -454,7 +642,11 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
{
struct mei_device *dev;
struct mei_cl *cl = file->private_data;
- struct mei_connect_client_data connect_data;
+ struct mei_connect_client_data conn;
+ struct mei_connect_client_data_vtag conn_vtag;
+ const uuid_le *cl_uuid;
+ struct mei_client *props;
+ u8 vtag;
u32 notify_get, notify_req;
int rets;
@@ -475,20 +667,68 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
switch (cmd) {
case IOCTL_MEI_CONNECT_CLIENT:
dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
- if (copy_from_user(&connect_data, (char __user *)data,
- sizeof(connect_data))) {
+ if (copy_from_user(&conn, (char __user *)data, sizeof(conn))) {
dev_dbg(dev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
goto out;
}
+ cl_uuid = &conn.in_client_uuid;
+ props = &conn.out_client_properties;
+ vtag = 0;
+
+ rets = mei_vt_support_check(dev, cl_uuid);
+ if (rets == -ENOTTY)
+ goto out;
+ if (!rets)
+ rets = mei_ioctl_connect_vtag(file, cl_uuid, props,
+ vtag);
+ else
+ rets = mei_ioctl_connect_client(file, cl_uuid, props);
+ if (rets)
+ goto out;
+
+ /* if all is ok, copying the data back to user. */
+ if (copy_to_user((char __user *)data, &conn, sizeof(conn))) {
+ dev_dbg(dev->dev, "failed to copy data to userland\n");
+ rets = -EFAULT;
+ goto out;
+ }
+
+ break;
+
+ case IOCTL_MEI_CONNECT_CLIENT_VTAG:
+ dev_dbg(dev->dev, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n");
+ if (copy_from_user(&conn_vtag, (char __user *)data,
+ sizeof(conn_vtag))) {
+ dev_dbg(dev->dev, "failed to copy data from userland\n");
+ rets = -EFAULT;
+ goto out;
+ }
+
+ cl_uuid = &conn_vtag.connect.in_client_uuid;
+ props = &conn_vtag.out_client_properties;
+ vtag = conn_vtag.connect.vtag;
+
+ rets = mei_vt_support_check(dev, cl_uuid);
+ if (rets == -EOPNOTSUPP)
+ dev_dbg(dev->dev, "FW Client %pUl does not support vtags\n",
+ cl_uuid);
+ if (rets)
+ goto out;
+
+ if (!vtag) {
+ dev_dbg(dev->dev, "vtag can't be zero\n");
+ rets = -EINVAL;
+ goto out;
+ }
- rets = mei_ioctl_connect_client(file, &connect_data);
+ rets = mei_ioctl_connect_vtag(file, cl_uuid, props, vtag);
if (rets)
goto out;
/* if all is ok, copying the data back to user. */
- if (copy_to_user((char __user *)data, &connect_data,
- sizeof(connect_data))) {
+ if (copy_to_user((char __user *)data, &conn_vtag,
+ sizeof(conn_vtag))) {
dev_dbg(dev->dev, "failed to copy data to userland\n");
rets = -EFAULT;
goto out;
@@ -572,7 +812,7 @@ static __poll_t mei_poll(struct file *file, poll_table *wait)
if (req_events & (EPOLLIN | EPOLLRDNORM)) {
poll_wait(file, &cl->rx_wait, wait);
- if (!list_empty(&cl->rd_completed))
+ if (mei_cl_read_cb(cl, file))
mask |= EPOLLIN | EPOLLRDNORM;
else
mei_cl_read_start(cl, mei_cl_mtu(cl), file);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index d3a4f54c0ae7..2f4cc1a8aae8 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -174,6 +174,7 @@ struct mei_cl;
* @fop_type: file operation type
* @buf: buffer for data associated with the callback
* @buf_idx: last read index
+ * @vtag: virtual tag
* @fp: pointer to file structure
* @status: io status of the cb
* @internal: communication between driver and FW flag
@@ -185,6 +186,7 @@ struct mei_cl_cb {
enum mei_cb_file_ops fop_type;
struct mei_msg_data buf;
size_t buf_idx;
+ u8 vtag;
const struct file *fp;
int status;
u32 internal:1;
@@ -192,6 +194,21 @@ struct mei_cl_cb {
};
/**
+ * struct mei_cl_vtag - file pointer to vtag mapping structure
+ *
+ * @list: link in map queue
+ * @fp: file pointer
+ * @vtag: corresponding vtag
+ * @pending_read: the read is pending on this file
+ */
+struct mei_cl_vtag {
+ struct list_head list;
+ const struct file *fp;
+ u8 vtag;
+ u8 pending_read:1;
+};
+
+/**
* struct mei_cl - me client host representation
* carried in file->private_data
*
@@ -207,6 +224,7 @@ struct mei_cl_cb {
* @me_cl: fw client connected
* @fp: file associated with client
* @host_client_id: host id
+ * @vtag_map: vtag map
* @tx_flow_ctrl_creds: transmit flow credentials
* @rx_flow_ctrl_creds: receive flow credentials
* @timer_count: watchdog timer for operation completion
@@ -215,6 +233,7 @@ struct mei_cl_cb {
* @tx_cb_queued: number of tx callbacks in queue
* @writing_state: state of the tx
* @rd_pending: pending read credits
+ * @rd_completed_lock: protects rd_completed queue
* @rd_completed: completed read
*
* @cldev: device on the mei client bus
@@ -232,6 +251,7 @@ struct mei_cl {
struct mei_me_client *me_cl;
const struct file *fp;
u8 host_client_id;
+ struct list_head vtag_map;
u8 tx_flow_ctrl_creds;
u8 rx_flow_ctrl_creds;
u8 timer_count;
@@ -240,6 +260,7 @@ struct mei_cl {
u8 tx_cb_queued;
enum mei_file_transaction_states writing_state;
struct list_head rd_pending;
+ spinlock_t rd_completed_lock; /* protects rd_completed queue */
struct list_head rd_completed;
struct mei_cl_device *cldev;
@@ -413,6 +434,7 @@ struct mei_fw_version {
*
* @rd_msg_buf : control messages buffer
* @rd_msg_hdr : read message header storage
+ * @rd_msg_hdr_count : how many dwords were already read from header
*
* @hbuf_is_ready : query if the host host/write buffer is ready
* @dr_dscr: DMA ring descriptors: TX, RX, and CTRL
@@ -426,6 +448,8 @@ struct mei_fw_version {
* @hbm_f_ie_supported : hbm feature immediate reply to enum request
* @hbm_f_os_supported : hbm feature support OS ver message
* @hbm_f_dr_supported : hbm feature dma ring supported
+ * @hbm_f_vt_supported : hbm feature vtag supported
+ * @hbm_f_cap_supported : hbm feature capabilities message supported
*
* @fw_ver : FW versions
*
@@ -494,7 +518,8 @@ struct mei_device {
#endif /* CONFIG_PM */
unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE];
- u32 rd_msg_hdr[MEI_MSG_HDR_MAX];
+ u32 rd_msg_hdr[MEI_RD_MSG_BUF_SIZE];
+ int rd_msg_hdr_count;
/* write buffer */
bool hbuf_is_ready;
@@ -510,6 +535,8 @@ struct mei_device {
unsigned int hbm_f_ie_supported:1;
unsigned int hbm_f_os_supported:1;
unsigned int hbm_f_dr_supported:1;
+ unsigned int hbm_f_vt_supported:1;
+ unsigned int hbm_f_cap_supported:1;
struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
@@ -746,10 +773,11 @@ static inline void mei_dbgfs_deregister(struct mei_device *dev) {}
int mei_register(struct mei_device *dev, struct device *parent);
void mei_deregister(struct mei_device *dev);
-#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d internal=%1d comp=%1d"
+#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d ext=%1d internal=%1d comp=%1d"
#define MEI_HDR_PRM(hdr) \
(hdr)->host_addr, (hdr)->me_addr, \
- (hdr)->length, (hdr)->dma_ring, (hdr)->internal, (hdr)->msg_complete
+ (hdr)->length, (hdr)->dma_ring, (hdr)->extended, \
+ (hdr)->internal, (hdr)->msg_complete
ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len);
/**
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
deleted file mode 100644
index b9bb086785db..000000000000
--- a/drivers/misc/mic/Kconfig
+++ /dev/null
@@ -1,140 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "Intel MIC & related support"
-
-config INTEL_MIC_BUS
- tristate "Intel MIC Bus Driver"
- depends on 64BIT && PCI && X86
- select DMA_OPS
- help
- This option is selected by any driver which registers a
- device or driver on the MIC Bus, such as CONFIG_INTEL_MIC_HOST,
- CONFIG_INTEL_MIC_CARD, CONFIG_INTEL_MIC_X100_DMA etc.
-
- If you are building a host/card kernel with an Intel MIC device
- then say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config SCIF_BUS
- tristate "SCIF Bus Driver"
- depends on 64BIT && PCI && X86
- select DMA_OPS
- help
- This option is selected by any driver which registers a
- device or driver on the SCIF Bus, such as CONFIG_INTEL_MIC_HOST
- and CONFIG_INTEL_MIC_CARD.
-
- If you are building a host/card kernel with an Intel MIC device
- then say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config VOP_BUS
- tristate "VOP Bus Driver"
- select DMA_OPS
- help
- This option is selected by any driver which registers a
- device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
- and CONFIG_INTEL_MIC_CARD.
-
- If you are building a host/card kernel with an Intel MIC device
- then say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config INTEL_MIC_HOST
- tristate "Intel MIC Host Driver"
- depends on 64BIT && PCI && X86
- depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
- select DMA_OPS
- help
- This enables Host Driver support for the Intel Many Integrated
- Core (MIC) family of PCIe form factor coprocessor devices that
- run a 64 bit Linux OS. The driver manages card OS state and
- enables communication between host and card. Intel MIC X100
- devices are currently supported.
-
- If you are building a host kernel with an Intel MIC device then
- say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config INTEL_MIC_CARD
- tristate "Intel MIC Card Driver"
- depends on 64BIT && X86
- depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
- select VIRTIO
- help
- This enables card driver support for the Intel Many Integrated
- Core (MIC) device family. The card driver communicates shutdown/
- crash events to the host and allows registration/configuration of
- virtio devices. Intel MIC X100 devices are currently supported.
-
- If you are building a card kernel for an Intel MIC device then
- say M (recommended) or Y, else say N. If unsure say N.
-
- For more information see
- <http://software.intel.com/en-us/mic-developer>.
-
-config SCIF
- tristate "SCIF Driver"
- depends on 64BIT && PCI && X86 && SCIF_BUS && IOMMU_SUPPORT
- select IOMMU_IOVA
- help
- This enables SCIF Driver support for the Intel Many Integrated
- Core (MIC) family of PCIe form factor coprocessor devices that
- run a 64 bit Linux OS. The Symmetric Communication Interface
- (SCIF (pronounced as skiff)) is a low level communications API
- across PCIe currently implemented for MIC.
-
- If you are building a host kernel with an Intel MIC device then
- say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config MIC_COSM
- tristate "Intel MIC Coprocessor State Management (COSM) Drivers"
- depends on 64BIT && PCI && X86 && SCIF
- help
- This enables COSM driver support for the Intel Many
- Integrated Core (MIC) family of PCIe form factor coprocessor
- devices. COSM drivers implement functions such as boot,
- shutdown, reset and reboot of MIC devices.
-
- If you are building a host kernel with an Intel MIC device then
- say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-config VOP
- tristate "VOP Driver"
- depends on VOP_BUS
- select VHOST_RING
- select VIRTIO
- help
- This enables VOP (Virtio over PCIe) Driver support for the Intel
- Many Integrated Core (MIC) family of PCIe form factor coprocessor
- devices. The VOP driver allows virtio drivers, e.g. net, console
- and block drivers, on the card connect to user space virtio
- devices on the host.
-
- If you are building a host kernel with an Intel MIC device then
- say M (recommended) or Y, else say N. If unsure say N.
-
- More information about the Intel MIC family as well as the Linux
- OS and tools for MIC to use with this driver are available from
- <http://software.intel.com/en-us/mic-developer>.
-
-endmenu
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
deleted file mode 100644
index 1a43622b183f..000000000000
--- a/drivers/misc/mic/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile - Intel MIC Linux driver.
-# Copyright(c) 2013, Intel Corporation.
-#
-obj-$(CONFIG_INTEL_MIC_HOST) += host/
-obj-$(CONFIG_INTEL_MIC_CARD) += card/
-obj-y += bus/
-obj-$(CONFIG_SCIF) += scif/
-obj-$(CONFIG_MIC_COSM) += cosm/
-obj-$(CONFIG_MIC_COSM) += cosm_client/
-obj-$(CONFIG_VOP) += vop/
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile
deleted file mode 100644
index 0a6aa21b2f67..000000000000
--- a/drivers/misc/mic/bus/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile - Intel MIC Linux driver.
-# Copyright(c) 2014, Intel Corporation.
-#
-obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
-obj-$(CONFIG_SCIF_BUS) += scif_bus.o
-obj-$(CONFIG_MIC_COSM) += cosm_bus.o
-obj-$(CONFIG_VOP_BUS) += vop_bus.o
diff --git a/drivers/misc/mic/bus/cosm_bus.c b/drivers/misc/mic/bus/cosm_bus.c
deleted file mode 100644
index 5f2141c71738..000000000000
--- a/drivers/misc/mic/bus/cosm_bus.c
+++ /dev/null
@@ -1,130 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC COSM Bus Driver
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/idr.h>
-#include "cosm_bus.h"
-
-/* Unique numbering for cosm devices. */
-static DEFINE_IDA(cosm_index_ida);
-
-static int cosm_dev_probe(struct device *d)
-{
- struct cosm_device *dev = dev_to_cosm(d);
- struct cosm_driver *drv = drv_to_cosm(dev->dev.driver);
-
- return drv->probe(dev);
-}
-
-static int cosm_dev_remove(struct device *d)
-{
- struct cosm_device *dev = dev_to_cosm(d);
- struct cosm_driver *drv = drv_to_cosm(dev->dev.driver);
-
- drv->remove(dev);
- return 0;
-}
-
-static struct bus_type cosm_bus = {
- .name = "cosm_bus",
- .probe = cosm_dev_probe,
- .remove = cosm_dev_remove,
-};
-
-int cosm_register_driver(struct cosm_driver *driver)
-{
- driver->driver.bus = &cosm_bus;
- return driver_register(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(cosm_register_driver);
-
-void cosm_unregister_driver(struct cosm_driver *driver)
-{
- driver_unregister(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(cosm_unregister_driver);
-
-static inline void cosm_release_dev(struct device *d)
-{
- struct cosm_device *cdev = dev_to_cosm(d);
-
- kfree(cdev);
-}
-
-struct cosm_device *
-cosm_register_device(struct device *pdev, struct cosm_hw_ops *hw_ops)
-{
- struct cosm_device *cdev;
- int ret;
-
- cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
- if (!cdev)
- return ERR_PTR(-ENOMEM);
-
- cdev->dev.parent = pdev;
- cdev->dev.release = cosm_release_dev;
- cdev->hw_ops = hw_ops;
- dev_set_drvdata(&cdev->dev, cdev);
- cdev->dev.bus = &cosm_bus;
-
- /* Assign a unique device index and hence name */
- ret = ida_simple_get(&cosm_index_ida, 0, 0, GFP_KERNEL);
- if (ret < 0)
- goto free_cdev;
-
- cdev->index = ret;
- cdev->dev.id = ret;
- dev_set_name(&cdev->dev, "cosm-dev%u", cdev->index);
-
- ret = device_register(&cdev->dev);
- if (ret)
- goto ida_remove;
- return cdev;
-ida_remove:
- ida_simple_remove(&cosm_index_ida, cdev->index);
-free_cdev:
- put_device(&cdev->dev);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(cosm_register_device);
-
-void cosm_unregister_device(struct cosm_device *dev)
-{
- int index = dev->index; /* save for after device release */
-
- device_unregister(&dev->dev);
- ida_simple_remove(&cosm_index_ida, index);
-}
-EXPORT_SYMBOL_GPL(cosm_unregister_device);
-
-struct cosm_device *cosm_find_cdev_by_id(int id)
-{
- struct device *dev = subsys_find_device_by_id(&cosm_bus, id, NULL);
-
- return dev ? container_of(dev, struct cosm_device, dev) : NULL;
-}
-EXPORT_SYMBOL_GPL(cosm_find_cdev_by_id);
-
-static int __init cosm_init(void)
-{
- return bus_register(&cosm_bus);
-}
-
-static void __exit cosm_exit(void)
-{
- bus_unregister(&cosm_bus);
- ida_destroy(&cosm_index_ida);
-}
-
-core_initcall(cosm_init);
-module_exit(cosm_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC card OS state management bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/cosm_bus.h b/drivers/misc/mic/bus/cosm_bus.h
deleted file mode 100644
index d50d7aea168d..000000000000
--- a/drivers/misc/mic/bus/cosm_bus.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC COSM Bus Driver
- */
-#ifndef _COSM_BUS_H_
-#define _COSM_BUS_H_
-
-#include <linux/scif.h>
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-
-/**
- * cosm_device - representation of a cosm device
- *
- * @attr_group: Pointer to list of sysfs attribute groups.
- * @sdev: Device for sysfs entries.
- * @state: MIC state.
- * @prev_state: MIC state previous to MIC_RESETTING
- * @shutdown_status: MIC status reported by card for shutdown/crashes.
- * @shutdown_status_int: Internal shutdown status maintained by the driver
- * @cosm_mutex: Mutex for synchronizing access to data structures.
- * @reset_trigger_work: Work for triggering reset requests.
- * @scif_work: Work for handling per device SCIF connections
- * @cmdline: Kernel command line.
- * @firmware: Firmware file name.
- * @ramdisk: Ramdisk file name.
- * @bootmode: Boot mode i.e. "linux" or "elf" for flash updates.
- * @log_buf_addr: Log buffer address for MIC.
- * @log_buf_len: Log buffer length address for MIC.
- * @state_sysfs: Sysfs dirent for notifying ring 3 about MIC state changes.
- * @hw_ops: the hardware bus ops for this device.
- * @dev: underlying device.
- * @index: unique position on the cosm bus
- * @dbg_dir: debug fs directory
- * @newepd: new endpoint from scif accept to be assigned to this cdev
- * @epd: SCIF endpoint for this cdev
- * @heartbeat_watchdog_enable: if heartbeat watchdog is enabled for this cdev
- * @sysfs_heartbeat_enable: sysfs setting for disabling heartbeat notification
- */
-struct cosm_device {
- const struct attribute_group **attr_group;
- struct device *sdev;
- u8 state;
- u8 prev_state;
- u8 shutdown_status;
- u8 shutdown_status_int;
- struct mutex cosm_mutex;
- struct work_struct reset_trigger_work;
- struct work_struct scif_work;
- char *cmdline;
- char *firmware;
- char *ramdisk;
- char *bootmode;
- void *log_buf_addr;
- int *log_buf_len;
- struct kernfs_node *state_sysfs;
- struct cosm_hw_ops *hw_ops;
- struct device dev;
- int index;
- struct dentry *dbg_dir;
- scif_epd_t newepd;
- scif_epd_t epd;
- bool heartbeat_watchdog_enable;
- bool sysfs_heartbeat_enable;
-};
-
-/**
- * cosm_driver - operations for a cosm driver
- *
- * @driver: underlying device driver (populate name and owner).
- * @probe: the function to call when a device is found. Returns 0 or -errno.
- * @remove: the function to call when a device is removed.
- */
-struct cosm_driver {
- struct device_driver driver;
- int (*probe)(struct cosm_device *dev);
- void (*remove)(struct cosm_device *dev);
-};
-
-/**
- * cosm_hw_ops - cosm bus ops
- *
- * @reset: trigger MIC reset
- * @force_reset: force MIC reset
- * @post_reset: inform MIC reset is complete
- * @ready: is MIC ready for OS download
- * @start: boot MIC
- * @stop: prepare MIC for reset
- * @family: return MIC HW family string
- * @stepping: return MIC HW stepping string
- * @aper: return MIC PCIe aperture
- */
-struct cosm_hw_ops {
- void (*reset)(struct cosm_device *cdev);
- void (*force_reset)(struct cosm_device *cdev);
- void (*post_reset)(struct cosm_device *cdev, enum mic_states state);
- bool (*ready)(struct cosm_device *cdev);
- int (*start)(struct cosm_device *cdev, int id);
- void (*stop)(struct cosm_device *cdev, bool force);
- ssize_t (*family)(struct cosm_device *cdev, char *buf);
- ssize_t (*stepping)(struct cosm_device *cdev, char *buf);
- struct mic_mw *(*aper)(struct cosm_device *cdev);
-};
-
-struct cosm_device *
-cosm_register_device(struct device *pdev, struct cosm_hw_ops *hw_ops);
-void cosm_unregister_device(struct cosm_device *dev);
-int cosm_register_driver(struct cosm_driver *drv);
-void cosm_unregister_driver(struct cosm_driver *drv);
-struct cosm_device *cosm_find_cdev_by_id(int id);
-
-static inline struct cosm_device *dev_to_cosm(struct device *dev)
-{
- return container_of(dev, struct cosm_device, dev);
-}
-
-static inline struct cosm_driver *drv_to_cosm(struct device_driver *drv)
-{
- return container_of(drv, struct cosm_driver, driver);
-}
-#endif /* _COSM_BUS_H */
diff --git a/drivers/misc/mic/bus/mic_bus.c b/drivers/misc/mic/bus/mic_bus.c
deleted file mode 100644
index ed9a8351c3bf..000000000000
--- a/drivers/misc/mic/bus/mic_bus.c
+++ /dev/null
@@ -1,193 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel MIC Bus driver.
- *
- * This implementation is very similar to the the virtio bus driver
- * implementation @ drivers/virtio/virtio.c
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/idr.h>
-#include <linux/mic_bus.h>
-
-static ssize_t device_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct mbus_device *dev = dev_to_mbus(d);
- return sprintf(buf, "0x%04x\n", dev->id.device);
-}
-static DEVICE_ATTR_RO(device);
-
-static ssize_t vendor_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct mbus_device *dev = dev_to_mbus(d);
- return sprintf(buf, "0x%04x\n", dev->id.vendor);
-}
-static DEVICE_ATTR_RO(vendor);
-
-static ssize_t modalias_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct mbus_device *dev = dev_to_mbus(d);
- return sprintf(buf, "mbus:d%08Xv%08X\n",
- dev->id.device, dev->id.vendor);
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *mbus_dev_attrs[] = {
- &dev_attr_device.attr,
- &dev_attr_vendor.attr,
- &dev_attr_modalias.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(mbus_dev);
-
-static inline int mbus_id_match(const struct mbus_device *dev,
- const struct mbus_device_id *id)
-{
- if (id->device != dev->id.device && id->device != MBUS_DEV_ANY_ID)
- return 0;
-
- return id->vendor == MBUS_DEV_ANY_ID || id->vendor == dev->id.vendor;
-}
-
-/*
- * This looks through all the IDs a driver claims to support. If any of them
- * match, we return 1 and the kernel will call mbus_dev_probe().
- */
-static int mbus_dev_match(struct device *dv, struct device_driver *dr)
-{
- unsigned int i;
- struct mbus_device *dev = dev_to_mbus(dv);
- const struct mbus_device_id *ids;
-
- ids = drv_to_mbus(dr)->id_table;
- for (i = 0; ids[i].device; i++)
- if (mbus_id_match(dev, &ids[i]))
- return 1;
- return 0;
-}
-
-static int mbus_uevent(struct device *dv, struct kobj_uevent_env *env)
-{
- struct mbus_device *dev = dev_to_mbus(dv);
-
- return add_uevent_var(env, "MODALIAS=mbus:d%08Xv%08X",
- dev->id.device, dev->id.vendor);
-}
-
-static int mbus_dev_probe(struct device *d)
-{
- int err;
- struct mbus_device *dev = dev_to_mbus(d);
- struct mbus_driver *drv = drv_to_mbus(dev->dev.driver);
-
- err = drv->probe(dev);
- if (!err)
- if (drv->scan)
- drv->scan(dev);
- return err;
-}
-
-static int mbus_dev_remove(struct device *d)
-{
- struct mbus_device *dev = dev_to_mbus(d);
- struct mbus_driver *drv = drv_to_mbus(dev->dev.driver);
-
- drv->remove(dev);
- return 0;
-}
-
-static struct bus_type mic_bus = {
- .name = "mic_bus",
- .match = mbus_dev_match,
- .dev_groups = mbus_dev_groups,
- .uevent = mbus_uevent,
- .probe = mbus_dev_probe,
- .remove = mbus_dev_remove,
-};
-
-int mbus_register_driver(struct mbus_driver *driver)
-{
- driver->driver.bus = &mic_bus;
- return driver_register(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(mbus_register_driver);
-
-void mbus_unregister_driver(struct mbus_driver *driver)
-{
- driver_unregister(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(mbus_unregister_driver);
-
-static void mbus_release_dev(struct device *d)
-{
- struct mbus_device *mbdev = dev_to_mbus(d);
- kfree(mbdev);
-}
-
-struct mbus_device *
-mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
- struct mbus_hw_ops *hw_ops, int index,
- void __iomem *mmio_va)
-{
- int ret;
- struct mbus_device *mbdev;
-
- mbdev = kzalloc(sizeof(*mbdev), GFP_KERNEL);
- if (!mbdev)
- return ERR_PTR(-ENOMEM);
-
- mbdev->mmio_va = mmio_va;
- mbdev->dev.parent = pdev;
- mbdev->id.device = id;
- mbdev->id.vendor = MBUS_DEV_ANY_ID;
- mbdev->dev.dma_ops = dma_ops;
- mbdev->dev.dma_mask = &mbdev->dev.coherent_dma_mask;
- dma_set_mask(&mbdev->dev, DMA_BIT_MASK(64));
- mbdev->dev.release = mbus_release_dev;
- mbdev->hw_ops = hw_ops;
- mbdev->dev.bus = &mic_bus;
- mbdev->index = index;
- dev_set_name(&mbdev->dev, "mbus-dev%u", mbdev->index);
- /*
- * device_register() causes the bus infrastructure to look for a
- * matching driver.
- */
- ret = device_register(&mbdev->dev);
- if (ret)
- goto free_mbdev;
- return mbdev;
-free_mbdev:
- put_device(&mbdev->dev);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(mbus_register_device);
-
-void mbus_unregister_device(struct mbus_device *mbdev)
-{
- device_unregister(&mbdev->dev);
-}
-EXPORT_SYMBOL_GPL(mbus_unregister_device);
-
-static int __init mbus_init(void)
-{
- return bus_register(&mic_bus);
-}
-
-static void __exit mbus_exit(void)
-{
- bus_unregister(&mic_bus);
-}
-
-core_initcall(mbus_init);
-module_exit(mbus_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC Bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/scif_bus.c b/drivers/misc/mic/bus/scif_bus.c
deleted file mode 100644
index ae84109649d0..000000000000
--- a/drivers/misc/mic/bus/scif_bus.c
+++ /dev/null
@@ -1,201 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel Symmetric Communications Interface Bus driver.
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/idr.h>
-#include <linux/dma-mapping.h>
-
-#include "scif_bus.h"
-
-static ssize_t device_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct scif_hw_dev *dev = dev_to_scif(d);
-
- return sprintf(buf, "0x%04x\n", dev->id.device);
-}
-static DEVICE_ATTR_RO(device);
-
-static ssize_t vendor_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct scif_hw_dev *dev = dev_to_scif(d);
-
- return sprintf(buf, "0x%04x\n", dev->id.vendor);
-}
-static DEVICE_ATTR_RO(vendor);
-
-static ssize_t modalias_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct scif_hw_dev *dev = dev_to_scif(d);
-
- return sprintf(buf, "scif:d%08Xv%08X\n",
- dev->id.device, dev->id.vendor);
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *scif_dev_attrs[] = {
- &dev_attr_device.attr,
- &dev_attr_vendor.attr,
- &dev_attr_modalias.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(scif_dev);
-
-static inline int scif_id_match(const struct scif_hw_dev *dev,
- const struct scif_hw_dev_id *id)
-{
- if (id->device != dev->id.device && id->device != SCIF_DEV_ANY_ID)
- return 0;
-
- return id->vendor == SCIF_DEV_ANY_ID || id->vendor == dev->id.vendor;
-}
-
-/*
- * This looks through all the IDs a driver claims to support. If any of them
- * match, we return 1 and the kernel will call scif_dev_probe().
- */
-static int scif_dev_match(struct device *dv, struct device_driver *dr)
-{
- unsigned int i;
- struct scif_hw_dev *dev = dev_to_scif(dv);
- const struct scif_hw_dev_id *ids;
-
- ids = drv_to_scif(dr)->id_table;
- for (i = 0; ids[i].device; i++)
- if (scif_id_match(dev, &ids[i]))
- return 1;
- return 0;
-}
-
-static int scif_uevent(struct device *dv, struct kobj_uevent_env *env)
-{
- struct scif_hw_dev *dev = dev_to_scif(dv);
-
- return add_uevent_var(env, "MODALIAS=scif:d%08Xv%08X",
- dev->id.device, dev->id.vendor);
-}
-
-static int scif_dev_probe(struct device *d)
-{
- struct scif_hw_dev *dev = dev_to_scif(d);
- struct scif_driver *drv = drv_to_scif(dev->dev.driver);
-
- return drv->probe(dev);
-}
-
-static int scif_dev_remove(struct device *d)
-{
- struct scif_hw_dev *dev = dev_to_scif(d);
- struct scif_driver *drv = drv_to_scif(dev->dev.driver);
-
- drv->remove(dev);
- return 0;
-}
-
-static struct bus_type scif_bus = {
- .name = "scif_bus",
- .match = scif_dev_match,
- .dev_groups = scif_dev_groups,
- .uevent = scif_uevent,
- .probe = scif_dev_probe,
- .remove = scif_dev_remove,
-};
-
-int scif_register_driver(struct scif_driver *driver)
-{
- driver->driver.bus = &scif_bus;
- return driver_register(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(scif_register_driver);
-
-void scif_unregister_driver(struct scif_driver *driver)
-{
- driver_unregister(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(scif_unregister_driver);
-
-static void scif_release_dev(struct device *d)
-{
- struct scif_hw_dev *sdev = dev_to_scif(d);
-
- kfree(sdev);
-}
-
-struct scif_hw_dev *
-scif_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
- struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
- struct mic_mw *mmio, struct mic_mw *aper, void *dp,
- void __iomem *rdp, struct dma_chan **chan, int num_chan,
- bool card_rel_da)
-{
- int ret;
- struct scif_hw_dev *sdev;
-
- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
- if (!sdev)
- return ERR_PTR(-ENOMEM);
-
- sdev->dev.parent = pdev;
- sdev->id.device = id;
- sdev->id.vendor = SCIF_DEV_ANY_ID;
- sdev->dev.dma_ops = dma_ops;
- sdev->dev.release = scif_release_dev;
- sdev->hw_ops = hw_ops;
- sdev->dnode = dnode;
- sdev->snode = snode;
- dev_set_drvdata(&sdev->dev, sdev);
- sdev->dev.bus = &scif_bus;
- sdev->mmio = mmio;
- sdev->aper = aper;
- sdev->dp = dp;
- sdev->rdp = rdp;
- sdev->dev.dma_mask = &sdev->dev.coherent_dma_mask;
- dma_set_mask(&sdev->dev, DMA_BIT_MASK(64));
- sdev->dma_ch = chan;
- sdev->num_dma_ch = num_chan;
- sdev->card_rel_da = card_rel_da;
- dev_set_name(&sdev->dev, "scif-dev%u", sdev->dnode);
- /*
- * device_register() causes the bus infrastructure to look for a
- * matching driver.
- */
- ret = device_register(&sdev->dev);
- if (ret)
- goto free_sdev;
- return sdev;
-free_sdev:
- put_device(&sdev->dev);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(scif_register_device);
-
-void scif_unregister_device(struct scif_hw_dev *sdev)
-{
- device_unregister(&sdev->dev);
-}
-EXPORT_SYMBOL_GPL(scif_unregister_device);
-
-static int __init scif_init(void)
-{
- return bus_register(&scif_bus);
-}
-
-static void __exit scif_exit(void)
-{
- bus_unregister(&scif_bus);
-}
-
-core_initcall(scif_init);
-module_exit(scif_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) SCIF Bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h
deleted file mode 100644
index 642cd43bcabc..000000000000
--- a/drivers/misc/mic/bus/scif_bus.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel Symmetric Communications Interface Bus driver.
- */
-#ifndef _SCIF_BUS_H_
-#define _SCIF_BUS_H_
-/*
- * Everything a scif driver needs to work with any particular scif
- * hardware abstraction layer.
- */
-#include <linux/dma-mapping.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-
-struct scif_hw_dev_id {
- u32 device;
- u32 vendor;
-};
-
-#define MIC_SCIF_DEV 1
-#define SCIF_DEV_ANY_ID 0xffffffff
-
-/**
- * scif_hw_dev - representation of a hardware device abstracted for scif
- * @hw_ops: the hardware ops supported by this device
- * @id: the device type identification (used to match it with a driver)
- * @mmio: MMIO memory window
- * @aper: Aperture memory window
- * @dev: underlying device
- * @dnode - The destination node which this device will communicate with.
- * @snode - The source node for this device.
- * @dp - Self device page
- * @rdp - Remote device page
- * @dma_ch - Array of DMA channels
- * @num_dma_ch - Number of DMA channels available
- * @card_rel_da - Set to true if DMA addresses programmed in the DMA engine
- * are relative to the card point of view
- */
-struct scif_hw_dev {
- struct scif_hw_ops *hw_ops;
- struct scif_hw_dev_id id;
- struct mic_mw *mmio;
- struct mic_mw *aper;
- struct device dev;
- u8 dnode;
- u8 snode;
- void *dp;
- void __iomem *rdp;
- struct dma_chan **dma_ch;
- int num_dma_ch;
- bool card_rel_da;
-};
-
-/**
- * scif_driver - operations for a scif I/O driver
- * @driver: underlying device driver (populate name and owner).
- * @id_table: the ids serviced by this driver.
- * @probe: the function to call when a device is found. Returns 0 or -errno.
- * @remove: the function to call when a device is removed.
- */
-struct scif_driver {
- struct device_driver driver;
- const struct scif_hw_dev_id *id_table;
- int (*probe)(struct scif_hw_dev *dev);
- void (*remove)(struct scif_hw_dev *dev);
-};
-
-/**
- * scif_hw_ops - Hardware operations for accessing a SCIF device on the SCIF bus.
- *
- * @next_db: Obtain the next available doorbell.
- * @request_irq: Request an interrupt on a particular doorbell.
- * @free_irq: Free an interrupt requested previously.
- * @ack_interrupt: acknowledge an interrupt in the ISR.
- * @send_intr: Send an interrupt to the remote node on a specified doorbell.
- * @send_p2p_intr: Send an interrupt to the peer node on a specified doorbell
- * which is specifically targeted for a peer to peer node.
- * @remap: Map a buffer with the specified physical address and length.
- * @unmap: Unmap a buffer previously mapped.
- */
-struct scif_hw_ops {
- int (*next_db)(struct scif_hw_dev *sdev);
- struct mic_irq * (*request_irq)(struct scif_hw_dev *sdev,
- irqreturn_t (*func)(int irq,
- void *data),
- const char *name, void *data,
- int db);
- void (*free_irq)(struct scif_hw_dev *sdev,
- struct mic_irq *cookie, void *data);
- void (*ack_interrupt)(struct scif_hw_dev *sdev, int num);
- void (*send_intr)(struct scif_hw_dev *sdev, int db);
- void (*send_p2p_intr)(struct scif_hw_dev *sdev, int db,
- struct mic_mw *mw);
- void __iomem * (*remap)(struct scif_hw_dev *sdev,
- phys_addr_t pa, size_t len);
- void (*unmap)(struct scif_hw_dev *sdev, void __iomem *va);
-};
-
-int scif_register_driver(struct scif_driver *driver);
-void scif_unregister_driver(struct scif_driver *driver);
-struct scif_hw_dev *
-scif_register_device(struct device *pdev, int id,
- const struct dma_map_ops *dma_ops,
- struct scif_hw_ops *hw_ops, u8 dnode, u8 snode,
- struct mic_mw *mmio, struct mic_mw *aper,
- void *dp, void __iomem *rdp,
- struct dma_chan **chan, int num_chan,
- bool card_rel_da);
-void scif_unregister_device(struct scif_hw_dev *sdev);
-
-static inline struct scif_hw_dev *dev_to_scif(struct device *dev)
-{
- return container_of(dev, struct scif_hw_dev, dev);
-}
-
-static inline struct scif_driver *drv_to_scif(struct device_driver *drv)
-{
- return container_of(drv, struct scif_driver, driver);
-}
-#endif /* _SCIF_BUS_H */
diff --git a/drivers/misc/mic/bus/vop_bus.c b/drivers/misc/mic/bus/vop_bus.c
deleted file mode 100644
index 3c865534868a..000000000000
--- a/drivers/misc/mic/bus/vop_bus.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Intel Virtio Over PCIe (VOP) Bus driver.
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/idr.h>
-#include <linux/dma-mapping.h>
-
-#include "vop_bus.h"
-
-static ssize_t device_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct vop_device *dev = dev_to_vop(d);
-
- return sprintf(buf, "0x%04x\n", dev->id.device);
-}
-static DEVICE_ATTR_RO(device);
-
-static ssize_t vendor_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct vop_device *dev = dev_to_vop(d);
-
- return sprintf(buf, "0x%04x\n", dev->id.vendor);
-}
-static DEVICE_ATTR_RO(vendor);
-
-static ssize_t modalias_show(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct vop_device *dev = dev_to_vop(d);
-
- return sprintf(buf, "vop:d%08Xv%08X\n",
- dev->id.device, dev->id.vendor);
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *vop_dev_attrs[] = {
- &dev_attr_device.attr,
- &dev_attr_vendor.attr,
- &dev_attr_modalias.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(vop_dev);
-
-static inline int vop_id_match(const struct vop_device *dev,
- const struct vop_device_id *id)
-{
- if (id->device != dev->id.device && id->device != VOP_DEV_ANY_ID)
- return 0;
-
- return id->vendor == VOP_DEV_ANY_ID || id->vendor == dev->id.vendor;
-}
-
-/*
- * This looks through all the IDs a driver claims to support. If any of them
- * match, we return 1 and the kernel will call vop_dev_probe().
- */
-static int vop_dev_match(struct device *dv, struct device_driver *dr)
-{
- unsigned int i;
- struct vop_device *dev = dev_to_vop(dv);
- const struct vop_device_id *ids;
-
- ids = drv_to_vop(dr)->id_table;
- for (i = 0; ids[i].device; i++)
- if (vop_id_match(dev, &ids[i]))
- return 1;
- return 0;
-}
-
-static int vop_uevent(struct device *dv, struct kobj_uevent_env *env)
-{
- struct vop_device *dev = dev_to_vop(dv);
-
- return add_uevent_var(env, "MODALIAS=vop:d%08Xv%08X",
- dev->id.device, dev->id.vendor);
-}
-
-static int vop_dev_probe(struct device *d)
-{
- struct vop_device *dev = dev_to_vop(d);
- struct vop_driver *drv = drv_to_vop(dev->dev.driver);
-
- return drv->probe(dev);
-}
-
-static int vop_dev_remove(struct device *d)
-{
- struct vop_device *dev = dev_to_vop(d);
- struct vop_driver *drv = drv_to_vop(dev->dev.driver);
-
- drv->remove(dev);
- return 0;
-}
-
-static struct bus_type vop_bus = {
- .name = "vop_bus",
- .match = vop_dev_match,
- .dev_groups = vop_dev_groups,
- .uevent = vop_uevent,
- .probe = vop_dev_probe,
- .remove = vop_dev_remove,
-};
-
-int vop_register_driver(struct vop_driver *driver)
-{
- driver->driver.bus = &vop_bus;
- return driver_register(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(vop_register_driver);
-
-void vop_unregister_driver(struct vop_driver *driver)
-{
- driver_unregister(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(vop_unregister_driver);
-
-static void vop_release_dev(struct device *d)
-{
- struct vop_device *dev = dev_to_vop(d);
-
- kfree(dev);
-}
-
-struct vop_device *
-vop_register_device(struct device *pdev, int id,
- const struct dma_map_ops *dma_ops,
- struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
- struct dma_chan *chan)
-{
- int ret;
- struct vop_device *vdev;
-
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return ERR_PTR(-ENOMEM);
-
- vdev->dev.parent = pdev;
- vdev->id.device = id;
- vdev->id.vendor = VOP_DEV_ANY_ID;
- vdev->dev.dma_ops = dma_ops;
- vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask;
- dma_set_mask(&vdev->dev, DMA_BIT_MASK(64));
- vdev->dev.release = vop_release_dev;
- vdev->hw_ops = hw_ops;
- vdev->dev.bus = &vop_bus;
- vdev->dnode = dnode;
- vdev->aper = aper;
- vdev->dma_ch = chan;
- vdev->index = dnode - 1;
- dev_set_name(&vdev->dev, "vop-dev%u", vdev->index);
- /*
- * device_register() causes the bus infrastructure to look for a
- * matching driver.
- */
- ret = device_register(&vdev->dev);
- if (ret)
- goto free_vdev;
- return vdev;
-free_vdev:
- put_device(&vdev->dev);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(vop_register_device);
-
-void vop_unregister_device(struct vop_device *dev)
-{
- device_unregister(&dev->dev);
-}
-EXPORT_SYMBOL_GPL(vop_unregister_device);
-
-static int __init vop_init(void)
-{
- return bus_register(&vop_bus);
-}
-
-static void __exit vop_exit(void)
-{
- bus_unregister(&vop_bus);
-}
-
-core_initcall(vop_init);
-module_exit(vop_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) VOP Bus driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h
deleted file mode 100644
index 4fa02808c1e2..000000000000
--- a/drivers/misc/mic/bus/vop_bus.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Intel Virtio over PCIe Bus driver.
- */
-#ifndef _VOP_BUS_H_
-#define _VOP_BUS_H_
-/*
- * Everything a vop driver needs to work with any particular vop
- * implementation.
- */
-#include <linux/dmaengine.h>
-#include <linux/interrupt.h>
-
-#include "../common/mic_dev.h"
-
-struct vop_device_id {
- u32 device;
- u32 vendor;
-};
-
-#define VOP_DEV_TRNSP 1
-#define VOP_DEV_ANY_ID 0xffffffff
-/*
- * Size of the internal buffer used during DMA's as an intermediate buffer
- * for copy to/from user. Must be an integral number of pages.
- */
-#define VOP_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
-
-/**
- * vop_device - representation of a device using vop
- * @hw_ops: the hardware ops supported by this device.
- * @id: the device type identification (used to match it with a driver).
- * @dev: underlying device.
- * @dnode - The destination node which this device will communicate with.
- * @aper: Aperture memory window
- * @dma_ch - DMA channel
- * @index: unique position on the vop bus
- */
-struct vop_device {
- struct vop_hw_ops *hw_ops;
- struct vop_device_id id;
- struct device dev;
- u8 dnode;
- struct mic_mw *aper;
- struct dma_chan *dma_ch;
- int index;
-};
-
-/**
- * vop_driver - operations for a vop I/O driver
- * @driver: underlying device driver (populate name and owner).
- * @id_table: the ids serviced by this driver.
- * @probe: the function to call when a device is found. Returns 0 or -errno.
- * @remove: the function to call when a device is removed.
- */
-struct vop_driver {
- struct device_driver driver;
- const struct vop_device_id *id_table;
- int (*probe)(struct vop_device *dev);
- void (*remove)(struct vop_device *dev);
-};
-
-/**
- * vop_hw_ops - Hardware operations for accessing a VOP device on the VOP bus.
- *
- * @next_db: Obtain the next available doorbell.
- * @request_irq: Request an interrupt on a particular doorbell.
- * @free_irq: Free an interrupt requested previously.
- * @ack_interrupt: acknowledge an interrupt in the ISR.
- * @get_remote_dp: Get access to the virtio device page used by the remote
- * node to add/remove/configure virtio devices.
- * @get_dp: Get access to the virtio device page used by the self
- * node to add/remove/configure virtio devices.
- * @send_intr: Send an interrupt to the peer node on a specified doorbell.
- * @remap: Map a buffer with the specified DMA address and length.
- * @unmap: Unmap a buffer previously mapped.
- * @dma_filter: The DMA filter function to use for obtaining access to
- * a DMA channel on the peer node.
- */
-struct vop_hw_ops {
- int (*next_db)(struct vop_device *vpdev);
- struct mic_irq *(*request_irq)(struct vop_device *vpdev,
- irqreturn_t (*func)(int irq, void *data),
- const char *name, void *data,
- int intr_src);
- void (*free_irq)(struct vop_device *vpdev,
- struct mic_irq *cookie, void *data);
- void (*ack_interrupt)(struct vop_device *vpdev, int num);
- void __iomem * (*get_remote_dp)(struct vop_device *vpdev);
- void * (*get_dp)(struct vop_device *vpdev);
- void (*send_intr)(struct vop_device *vpdev, int db);
- void __iomem * (*remap)(struct vop_device *vpdev,
- dma_addr_t pa, size_t len);
- void (*unmap)(struct vop_device *vpdev, void __iomem *va);
-};
-
-struct vop_device *
-vop_register_device(struct device *pdev, int id,
- const struct dma_map_ops *dma_ops,
- struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
- struct dma_chan *chan);
-void vop_unregister_device(struct vop_device *dev);
-int vop_register_driver(struct vop_driver *drv);
-void vop_unregister_driver(struct vop_driver *drv);
-
-/*
- * module_vop_driver() - Helper macro for drivers that don't do
- * anything special in module init/exit. This eliminates a lot of
- * boilerplate. Each module may only use this macro once, and
- * calling it replaces module_init() and module_exit()
- */
-#define module_vop_driver(__vop_driver) \
- module_driver(__vop_driver, vop_register_driver, \
- vop_unregister_driver)
-
-static inline struct vop_device *dev_to_vop(struct device *dev)
-{
- return container_of(dev, struct vop_device, dev);
-}
-
-static inline struct vop_driver *drv_to_vop(struct device_driver *drv)
-{
- return container_of(drv, struct vop_driver, driver);
-}
-#endif /* _VOP_BUS_H */
diff --git a/drivers/misc/mic/card/Makefile b/drivers/misc/mic/card/Makefile
deleted file mode 100644
index 921a7e7e0fbd..000000000000
--- a/drivers/misc/mic/card/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile - Intel MIC Linux driver.
-# Copyright(c) 2013, Intel Corporation.
-#
-ccflags-y += -DINTEL_MIC_CARD
-
-obj-$(CONFIG_INTEL_MIC_CARD) += mic_card.o
-mic_card-y += mic_x100.o
-mic_card-y += mic_device.o
-mic_card-y += mic_debugfs.o
diff --git a/drivers/misc/mic/card/mic_debugfs.c b/drivers/misc/mic/card/mic_debugfs.c
deleted file mode 100644
index 4c326e8f4d99..000000000000
--- a/drivers/misc/mic/card/mic_debugfs.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- */
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-
-/* Debugfs parent dir */
-static struct dentry *mic_dbg;
-
-/*
- * mic_intr_show - Send interrupts to host.
- */
-static int mic_intr_show(struct seq_file *s, void *unused)
-{
- struct mic_driver *mdrv = s->private;
- struct mic_device *mdev = &mdrv->mdev;
-
- mic_send_intr(mdev, 0);
- msleep(1000);
- mic_send_intr(mdev, 1);
- msleep(1000);
- mic_send_intr(mdev, 2);
- msleep(1000);
- mic_send_intr(mdev, 3);
- msleep(1000);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mic_intr);
-
-/*
- * mic_create_card_debug_dir - Initialize MIC debugfs entries.
- */
-void __init mic_create_card_debug_dir(struct mic_driver *mdrv)
-{
- if (!mic_dbg)
- return;
-
- mdrv->dbg_dir = debugfs_create_dir(mdrv->name, mic_dbg);
-
- debugfs_create_file("intr_test", 0444, mdrv->dbg_dir, mdrv,
- &mic_intr_fops);
-}
-
-/*
- * mic_delete_card_debug_dir - Uninitialize MIC debugfs entries.
- */
-void mic_delete_card_debug_dir(struct mic_driver *mdrv)
-{
- debugfs_remove_recursive(mdrv->dbg_dir);
-}
-
-/*
- * mic_init_card_debugfs - Initialize global debugfs entry.
- */
-void __init mic_init_card_debugfs(void)
-{
- mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
-}
-
-/*
- * mic_exit_card_debugfs - Uninitialize global debugfs entry
- */
-void mic_exit_card_debugfs(void)
-{
- debugfs_remove(mic_dbg);
-}
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
deleted file mode 100644
index a15606259bdc..000000000000
--- a/drivers/misc/mic/card/mic_device.c
+++ /dev/null
@@ -1,417 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- */
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/reboot.h>
-#include <linux/dmaengine.h>
-#include <linux/kmod.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-
-static struct mic_driver *g_drv;
-
-static int __init mic_dp_init(void)
-{
- struct mic_driver *mdrv = g_drv;
- struct mic_device *mdev = &mdrv->mdev;
- struct mic_bootparam __iomem *bootparam;
- u64 lo, hi, dp_dma_addr;
- u32 magic;
-
- lo = mic_read_spad(&mdrv->mdev, MIC_DPLO_SPAD);
- hi = mic_read_spad(&mdrv->mdev, MIC_DPHI_SPAD);
-
- dp_dma_addr = lo | (hi << 32);
- mdrv->dp = mic_card_map(mdev, dp_dma_addr, MIC_DP_SIZE);
- if (!mdrv->dp) {
- dev_err(mdrv->dev, "Cannot remap Aperture BAR\n");
- return -ENOMEM;
- }
- bootparam = mdrv->dp;
- magic = ioread32(&bootparam->magic);
- if (MIC_MAGIC != magic) {
- dev_err(mdrv->dev, "bootparam magic mismatch 0x%x\n", magic);
- return -EIO;
- }
- return 0;
-}
-
-/* Uninitialize the device page */
-static void mic_dp_uninit(void)
-{
- mic_card_unmap(&g_drv->mdev, g_drv->dp);
-}
-
-/**
- * mic_request_card_irq - request an irq.
- *
- * @handler: interrupt handler passed to request_threaded_irq.
- * @thread_fn: thread fn. passed to request_threaded_irq.
- * @name: The ASCII name of the callee requesting the irq.
- * @data: private data that is returned back when calling the
- * function handler.
- * @index: The doorbell index of the requester.
- *
- * returns: The cookie that is transparent to the caller. Passed
- * back when calling mic_free_irq. An appropriate error code
- * is returned on failure. Caller needs to use IS_ERR(return_val)
- * to check for failure and PTR_ERR(return_val) to obtained the
- * error code.
- *
- */
-struct mic_irq *
-mic_request_card_irq(irq_handler_t handler,
- irq_handler_t thread_fn, const char *name,
- void *data, int index)
-{
- int rc = 0;
- unsigned long cookie;
- struct mic_driver *mdrv = g_drv;
-
- rc = request_threaded_irq(mic_db_to_irq(mdrv, index), handler,
- thread_fn, 0, name, data);
- if (rc) {
- dev_err(mdrv->dev, "request_threaded_irq failed rc = %d\n", rc);
- goto err;
- }
- mdrv->irq_info.irq_usage_count[index]++;
- cookie = index;
- return (struct mic_irq *)cookie;
-err:
- return ERR_PTR(rc);
-}
-
-/**
- * mic_free_card_irq - free irq.
- *
- * @cookie: cookie obtained during a successful call to mic_request_threaded_irq
- * @data: private data specified by the calling function during the
- * mic_request_threaded_irq
- *
- * returns: none.
- */
-void mic_free_card_irq(struct mic_irq *cookie, void *data)
-{
- int index;
- struct mic_driver *mdrv = g_drv;
-
- index = (unsigned long)cookie & 0xFFFFU;
- free_irq(mic_db_to_irq(mdrv, index), data);
- mdrv->irq_info.irq_usage_count[index]--;
-}
-
-/**
- * mic_next_card_db - Get the doorbell with minimum usage count.
- *
- * Returns the irq index.
- */
-int mic_next_card_db(void)
-{
- int i;
- int index = 0;
- struct mic_driver *mdrv = g_drv;
-
- for (i = 0; i < mdrv->intr_info.num_intr; i++) {
- if (mdrv->irq_info.irq_usage_count[i] <
- mdrv->irq_info.irq_usage_count[index])
- index = i;
- }
-
- return index;
-}
-
-/**
- * mic_init_irq - Initialize irq information.
- *
- * Returns 0 in success. Appropriate error code on failure.
- */
-static int mic_init_irq(void)
-{
- struct mic_driver *mdrv = g_drv;
-
- mdrv->irq_info.irq_usage_count = kzalloc((sizeof(u32) *
- mdrv->intr_info.num_intr),
- GFP_KERNEL);
- if (!mdrv->irq_info.irq_usage_count)
- return -ENOMEM;
- return 0;
-}
-
-/**
- * mic_uninit_irq - Uninitialize irq information.
- *
- * None.
- */
-static void mic_uninit_irq(void)
-{
- struct mic_driver *mdrv = g_drv;
-
- kfree(mdrv->irq_info.irq_usage_count);
-}
-
-static inline struct mic_driver *scdev_to_mdrv(struct scif_hw_dev *scdev)
-{
- return dev_get_drvdata(scdev->dev.parent);
-}
-
-static struct mic_irq *
-___mic_request_irq(struct scif_hw_dev *scdev,
- irqreturn_t (*func)(int irq, void *data),
- const char *name, void *data,
- int db)
-{
- return mic_request_card_irq(func, NULL, name, data, db);
-}
-
-static void
-___mic_free_irq(struct scif_hw_dev *scdev,
- struct mic_irq *cookie, void *data)
-{
- return mic_free_card_irq(cookie, data);
-}
-
-static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num)
-{
- struct mic_driver *mdrv = scdev_to_mdrv(scdev);
-
- mic_ack_interrupt(&mdrv->mdev);
-}
-
-static int ___mic_next_db(struct scif_hw_dev *scdev)
-{
- return mic_next_card_db();
-}
-
-static void ___mic_send_intr(struct scif_hw_dev *scdev, int db)
-{
- struct mic_driver *mdrv = scdev_to_mdrv(scdev);
-
- mic_send_intr(&mdrv->mdev, db);
-}
-
-static void ___mic_send_p2p_intr(struct scif_hw_dev *scdev, int db,
- struct mic_mw *mw)
-{
- mic_send_p2p_intr(db, mw);
-}
-
-static void __iomem *
-___mic_ioremap(struct scif_hw_dev *scdev,
- phys_addr_t pa, size_t len)
-{
- struct mic_driver *mdrv = scdev_to_mdrv(scdev);
-
- return mic_card_map(&mdrv->mdev, pa, len);
-}
-
-static void ___mic_iounmap(struct scif_hw_dev *scdev, void __iomem *va)
-{
- struct mic_driver *mdrv = scdev_to_mdrv(scdev);
-
- mic_card_unmap(&mdrv->mdev, va);
-}
-
-static struct scif_hw_ops scif_hw_ops = {
- .request_irq = ___mic_request_irq,
- .free_irq = ___mic_free_irq,
- .ack_interrupt = ___mic_ack_interrupt,
- .next_db = ___mic_next_db,
- .send_intr = ___mic_send_intr,
- .send_p2p_intr = ___mic_send_p2p_intr,
- .remap = ___mic_ioremap,
- .unmap = ___mic_iounmap,
-};
-
-static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev)
-{
- return dev_get_drvdata(vpdev->dev.parent);
-}
-
-static struct mic_irq *
-__mic_request_irq(struct vop_device *vpdev,
- irqreturn_t (*func)(int irq, void *data),
- const char *name, void *data, int intr_src)
-{
- return mic_request_card_irq(func, NULL, name, data, intr_src);
-}
-
-static void __mic_free_irq(struct vop_device *vpdev,
- struct mic_irq *cookie, void *data)
-{
- return mic_free_card_irq(cookie, data);
-}
-
-static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
-{
- struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
-
- mic_ack_interrupt(&mdrv->mdev);
-}
-
-static int __mic_next_db(struct vop_device *vpdev)
-{
- return mic_next_card_db();
-}
-
-static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
-{
- struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
-
- return mdrv->dp;
-}
-
-static void __mic_send_intr(struct vop_device *vpdev, int db)
-{
- struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
-
- mic_send_intr(&mdrv->mdev, db);
-}
-
-static void __iomem *__mic_ioremap(struct vop_device *vpdev,
- dma_addr_t pa, size_t len)
-{
- struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
-
- return mic_card_map(&mdrv->mdev, pa, len);
-}
-
-static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
-{
- struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
-
- mic_card_unmap(&mdrv->mdev, va);
-}
-
-static struct vop_hw_ops vop_hw_ops = {
- .request_irq = __mic_request_irq,
- .free_irq = __mic_free_irq,
- .ack_interrupt = __mic_ack_interrupt,
- .next_db = __mic_next_db,
- .get_remote_dp = __mic_get_remote_dp,
- .send_intr = __mic_send_intr,
- .remap = __mic_ioremap,
- .unmap = __mic_iounmap,
-};
-
-static int mic_request_dma_chans(struct mic_driver *mdrv)
-{
- dma_cap_mask_t mask;
- struct dma_chan *chan;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_MEMCPY, mask);
-
- do {
- chan = dma_request_channel(mask, NULL, NULL);
- if (chan) {
- mdrv->dma_ch[mdrv->num_dma_ch++] = chan;
- if (mdrv->num_dma_ch >= MIC_MAX_DMA_CHAN)
- break;
- }
- } while (chan);
- dev_info(mdrv->dev, "DMA channels # %d\n", mdrv->num_dma_ch);
- return mdrv->num_dma_ch;
-}
-
-static void mic_free_dma_chans(struct mic_driver *mdrv)
-{
- int i = 0;
-
- for (i = 0; i < mdrv->num_dma_ch; i++) {
- dma_release_channel(mdrv->dma_ch[i]);
- mdrv->dma_ch[i] = NULL;
- }
- mdrv->num_dma_ch = 0;
-}
-
-/*
- * mic_driver_init - MIC driver initialization tasks.
- *
- * Returns 0 in success. Appropriate error code on failure.
- */
-int __init mic_driver_init(struct mic_driver *mdrv)
-{
- int rc;
- struct mic_bootparam __iomem *bootparam;
- u8 node_id;
-
- g_drv = mdrv;
- /* Unloading the card module is not supported. */
- if (!try_module_get(mdrv->dev->driver->owner)) {
- rc = -ENODEV;
- goto done;
- }
- rc = mic_dp_init();
- if (rc)
- goto put;
- rc = mic_init_irq();
- if (rc)
- goto dp_uninit;
- if (!mic_request_dma_chans(mdrv)) {
- rc = -ENODEV;
- goto irq_uninit;
- }
- mdrv->vpdev = vop_register_device(mdrv->dev, VOP_DEV_TRNSP,
- NULL, &vop_hw_ops, 0,
- NULL, mdrv->dma_ch[0]);
- if (IS_ERR(mdrv->vpdev)) {
- rc = PTR_ERR(mdrv->vpdev);
- goto dma_free;
- }
- bootparam = mdrv->dp;
- node_id = ioread8(&bootparam->node_id);
- mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV,
- NULL, &scif_hw_ops,
- 0, node_id, &mdrv->mdev.mmio, NULL,
- NULL, mdrv->dp, mdrv->dma_ch,
- mdrv->num_dma_ch, true);
- if (IS_ERR(mdrv->scdev)) {
- rc = PTR_ERR(mdrv->scdev);
- goto vop_remove;
- }
- mic_create_card_debug_dir(mdrv);
-done:
- return rc;
-vop_remove:
- vop_unregister_device(mdrv->vpdev);
-dma_free:
- mic_free_dma_chans(mdrv);
-irq_uninit:
- mic_uninit_irq();
-dp_uninit:
- mic_dp_uninit();
-put:
- module_put(mdrv->dev->driver->owner);
- return rc;
-}
-
-/*
- * mic_driver_uninit - MIC driver uninitialization tasks.
- *
- * Returns None
- */
-void mic_driver_uninit(struct mic_driver *mdrv)
-{
- mic_delete_card_debug_dir(mdrv);
- scif_unregister_device(mdrv->scdev);
- vop_unregister_device(mdrv->vpdev);
- mic_free_dma_chans(mdrv);
- mic_uninit_irq();
- mic_dp_uninit();
- module_put(mdrv->dev->driver->owner);
-}
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
deleted file mode 100644
index d6cc69a235a3..000000000000
--- a/drivers/misc/mic/card/mic_device.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- */
-#ifndef _MIC_CARD_DEVICE_H_
-#define _MIC_CARD_DEVICE_H_
-
-#include <linux/workqueue.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/mic_bus.h>
-#include "../bus/scif_bus.h"
-#include "../bus/vop_bus.h"
-
-/**
- * struct mic_intr_info - Contains h/w specific interrupt sources info
- *
- * @num_intr: The number of irqs available
- */
-struct mic_intr_info {
- u32 num_intr;
-};
-
-/**
- * struct mic_irq_info - OS specific irq information
- *
- * @irq_usage_count: usage count array tracking the number of sources
- * assigned for each irq.
- */
-struct mic_irq_info {
- int *irq_usage_count;
-};
-
-/**
- * struct mic_device - MIC device information.
- *
- * @mmio: MMIO bar information.
- */
-struct mic_device {
- struct mic_mw mmio;
-};
-
-/**
- * struct mic_driver - MIC card driver information.
- *
- * @name: Name for MIC driver.
- * @dbg_dir: debugfs directory of this MIC device.
- * @dev: The device backing this MIC.
- * @dp: The pointer to the virtio device page.
- * @mdev: MIC device information for the host.
- * @hotplug_work: Hot plug work for adding/removing virtio devices.
- * @irq_info: The OS specific irq information
- * @intr_info: H/W specific interrupt information.
- * @dma_mbdev: dma device on the MIC virtual bus.
- * @dma_ch - Array of DMA channels
- * @num_dma_ch - Number of DMA channels available
- * @scdev: SCIF device on the SCIF virtual bus.
- * @vpdev: Virtio over PCIe device on the VOP virtual bus.
- */
-struct mic_driver {
- char name[20];
- struct dentry *dbg_dir;
- struct device *dev;
- void __iomem *dp;
- struct mic_device mdev;
- struct work_struct hotplug_work;
- struct mic_irq_info irq_info;
- struct mic_intr_info intr_info;
- struct mbus_device *dma_mbdev;
- struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
- int num_dma_ch;
- struct scif_hw_dev *scdev;
- struct vop_device *vpdev;
-};
-
-/**
- * struct mic_irq - opaque pointer used as cookie
- */
-struct mic_irq;
-
-/**
- * mic_mmio_read - read from an MMIO register.
- * @mw: MMIO register base virtual address.
- * @offset: register offset.
- *
- * RETURNS: register value.
- */
-static inline u32 mic_mmio_read(struct mic_mw *mw, u32 offset)
-{
- return ioread32(mw->va + offset);
-}
-
-/**
- * mic_mmio_write - write to an MMIO register.
- * @mw: MMIO register base virtual address.
- * @val: the data value to put into the register
- * @offset: register offset.
- *
- * RETURNS: none.
- */
-static inline void
-mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset)
-{
- iowrite32(val, mw->va + offset);
-}
-
-int mic_driver_init(struct mic_driver *mdrv);
-void mic_driver_uninit(struct mic_driver *mdrv);
-int mic_next_card_db(void);
-struct mic_irq *
-mic_request_card_irq(irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int db);
-void mic_free_card_irq(struct mic_irq *cookie, void *data);
-u32 mic_read_spad(struct mic_device *mdev, unsigned int idx);
-void mic_send_intr(struct mic_device *mdev, int doorbell);
-void mic_send_p2p_intr(int doorbell, struct mic_mw *mw);
-int mic_db_to_irq(struct mic_driver *mdrv, int db);
-u32 mic_ack_interrupt(struct mic_device *mdev);
-void mic_hw_intr_init(struct mic_driver *mdrv);
-void __iomem *
-mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size);
-void mic_card_unmap(struct mic_device *mdev, void __iomem *addr);
-void __init mic_create_card_debug_dir(struct mic_driver *mdrv);
-void mic_delete_card_debug_dir(struct mic_driver *mdrv);
-void __init mic_init_card_debugfs(void);
-void mic_exit_card_debugfs(void);
-#endif
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
deleted file mode 100644
index c8bff2916d3d..000000000000
--- a/drivers/misc/mic/card/mic_x100.c
+++ /dev/null
@@ -1,347 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- */
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_x100.h"
-
-static const char mic_driver_name[] = "mic";
-
-static struct mic_driver g_drv;
-
-/**
- * mic_read_spad - read from the scratchpad register
- * @mdev: pointer to mic_device instance
- * @idx: index to scratchpad register, 0 based
- *
- * This function allows reading of the 32bit scratchpad register.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-u32 mic_read_spad(struct mic_device *mdev, unsigned int idx)
-{
- return mic_mmio_read(&mdev->mmio,
- MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_SPAD0 + idx * 4);
-}
-
-/**
- * __mic_send_intr - Send interrupt to Host.
- * @mdev: pointer to mic_device instance
- * @doorbell: Doorbell number.
- */
-void mic_send_intr(struct mic_device *mdev, int doorbell)
-{
- struct mic_mw *mw = &mdev->mmio;
-
- if (doorbell > MIC_X100_MAX_DOORBELL_IDX)
- return;
- /* Ensure that the interrupt is ordered w.r.t previous stores. */
- wmb();
- mic_mmio_write(mw, MIC_X100_SBOX_SDBIC0_DBREQ_BIT,
- MIC_X100_SBOX_BASE_ADDRESS +
- (MIC_X100_SBOX_SDBIC0 + (4 * doorbell)));
-}
-
-/*
- * mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC.
- */
-static void mic_x100_send_sbox_intr(struct mic_mw *mw, int doorbell)
-{
- u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8;
- u32 apicicr_low = mic_mmio_read(mw, MIC_X100_SBOX_BASE_ADDRESS +
- apic_icr_offset);
-
- /* for MIC we need to make sure we "hit" the send_icr bit (13) */
- apicicr_low = (apicicr_low | (1 << 13));
- /*
- * Ensure that the interrupt is ordered w.r.t. previous stores
- * to main memory. Fence instructions are not implemented in X100
- * since execution is in order but a compiler barrier is still
- * required.
- */
- wmb();
- mic_mmio_write(mw, apicicr_low,
- MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset);
-}
-
-static void mic_x100_send_rdmasr_intr(struct mic_mw *mw, int doorbell)
-{
- int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2);
- /*
- * Ensure that the interrupt is ordered w.r.t. previous stores
- * to main memory. Fence instructions are not implemented in X100
- * since execution is in order but a compiler barrier is still
- * required.
- */
- wmb();
- mic_mmio_write(mw, 0, MIC_X100_SBOX_BASE_ADDRESS + rdmasr_offset);
-}
-
-/**
- * mic_ack_interrupt - Device specific interrupt handling.
- * @mdev: pointer to mic_device instance
- *
- * Returns: bitmask of doorbell events triggered.
- */
-u32 mic_ack_interrupt(struct mic_device *mdev)
-{
- return 0;
-}
-
-static inline int mic_get_sbox_irq(int db)
-{
- return MIC_X100_IRQ_BASE + db;
-}
-
-static inline int mic_get_rdmasr_irq(int index)
-{
- return MIC_X100_RDMASR_IRQ_BASE + index;
-}
-
-void mic_send_p2p_intr(int db, struct mic_mw *mw)
-{
- int rdmasr_index;
-
- if (db < MIC_X100_NUM_SBOX_IRQ) {
- mic_x100_send_sbox_intr(mw, db);
- } else {
- rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ;
- mic_x100_send_rdmasr_intr(mw, rdmasr_index);
- }
-}
-
-/**
- * mic_hw_intr_init - Initialize h/w specific interrupt
- * information.
- * @mdrv: pointer to mic_driver
- */
-void mic_hw_intr_init(struct mic_driver *mdrv)
-{
- mdrv->intr_info.num_intr = MIC_X100_NUM_SBOX_IRQ +
- MIC_X100_NUM_RDMASR_IRQ;
-}
-
-/**
- * mic_db_to_irq - Retrieve irq number corresponding to a doorbell.
- * @mdrv: pointer to mic_driver
- * @db: The doorbell obtained for which the irq is needed. Doorbell
- * may correspond to an sbox doorbell or an rdmasr index.
- *
- * Returns the irq corresponding to the doorbell.
- */
-int mic_db_to_irq(struct mic_driver *mdrv, int db)
-{
- int rdmasr_index;
-
- /*
- * The total number of doorbell interrupts on the card are 16. Indices
- * 0-8 falls in the SBOX category and 8-15 fall in the RDMASR category.
- */
- if (db < MIC_X100_NUM_SBOX_IRQ) {
- return mic_get_sbox_irq(db);
- } else {
- rdmasr_index = db - MIC_X100_NUM_SBOX_IRQ;
- return mic_get_rdmasr_irq(rdmasr_index);
- }
-}
-
-/*
- * mic_card_map - Allocate virtual address for a remote memory region.
- * @mdev: pointer to mic_device instance.
- * @addr: Remote DMA address.
- * @size: Size of the region.
- *
- * Returns: Virtual address backing the remote memory region.
- */
-void __iomem *
-mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size)
-{
- return ioremap(addr, size);
-}
-
-/*
- * mic_card_unmap - Unmap the virtual address for a remote memory region.
- * @mdev: pointer to mic_device instance.
- * @addr: Virtual address for remote memory region.
- *
- * Returns: None.
- */
-void mic_card_unmap(struct mic_device *mdev, void __iomem *addr)
-{
- iounmap(addr);
-}
-
-static inline struct mic_driver *mbdev_to_mdrv(struct mbus_device *mbdev)
-{
- return dev_get_drvdata(mbdev->dev.parent);
-}
-
-static struct mic_irq *
-_mic_request_threaded_irq(struct mbus_device *mbdev,
- irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int intr_src)
-{
- int rc = 0;
- unsigned int irq = intr_src;
- unsigned long cookie = irq;
-
- rc = request_threaded_irq(irq, handler, thread_fn, 0, name, data);
- if (rc) {
- dev_err(mbdev_to_mdrv(mbdev)->dev,
- "request_threaded_irq failed rc = %d\n", rc);
- return ERR_PTR(rc);
- }
- return (struct mic_irq *)cookie;
-}
-
-static void _mic_free_irq(struct mbus_device *mbdev,
- struct mic_irq *cookie, void *data)
-{
- unsigned long irq = (unsigned long)cookie;
- free_irq(irq, data);
-}
-
-static void _mic_ack_interrupt(struct mbus_device *mbdev, int num)
-{
- mic_ack_interrupt(&mbdev_to_mdrv(mbdev)->mdev);
-}
-
-static struct mbus_hw_ops mbus_hw_ops = {
- .request_threaded_irq = _mic_request_threaded_irq,
- .free_irq = _mic_free_irq,
- .ack_interrupt = _mic_ack_interrupt,
-};
-
-static int __init mic_probe(struct platform_device *pdev)
-{
- struct mic_driver *mdrv = &g_drv;
- struct mic_device *mdev = &mdrv->mdev;
- int rc = 0;
-
- mdrv->dev = &pdev->dev;
- snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name);
-
- /* FIXME: use dma_set_mask_and_coherent() and check result */
- dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-
- mdev->mmio.pa = MIC_X100_MMIO_BASE;
- mdev->mmio.len = MIC_X100_MMIO_LEN;
- mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE,
- MIC_X100_MMIO_LEN);
- if (!mdev->mmio.va) {
- dev_err(&pdev->dev, "Cannot remap MMIO BAR\n");
- rc = -EIO;
- goto done;
- }
- mic_hw_intr_init(mdrv);
- platform_set_drvdata(pdev, mdrv);
- mdrv->dma_mbdev = mbus_register_device(mdrv->dev, MBUS_DEV_DMA_MIC,
- NULL, &mbus_hw_ops, 0,
- mdrv->mdev.mmio.va);
- if (IS_ERR(mdrv->dma_mbdev)) {
- rc = PTR_ERR(mdrv->dma_mbdev);
- dev_err(&pdev->dev, "mbus_add_device failed rc %d\n", rc);
- goto done;
- }
- rc = mic_driver_init(mdrv);
- if (rc) {
- dev_err(&pdev->dev, "mic_driver_init failed rc %d\n", rc);
- goto remove_dma;
- }
-done:
- return rc;
-remove_dma:
- mbus_unregister_device(mdrv->dma_mbdev);
- return rc;
-}
-
-static int mic_remove(struct platform_device *pdev)
-{
- struct mic_driver *mdrv = &g_drv;
-
- mic_driver_uninit(mdrv);
- mbus_unregister_device(mdrv->dma_mbdev);
- return 0;
-}
-
-static void mic_platform_shutdown(struct platform_device *pdev)
-{
- mic_remove(pdev);
-}
-
-static struct platform_driver __refdata mic_platform_driver = {
- .probe = mic_probe,
- .remove = mic_remove,
- .shutdown = mic_platform_shutdown,
- .driver = {
- .name = mic_driver_name,
- },
-};
-
-static struct platform_device *mic_platform_dev;
-
-static int __init mic_init(void)
-{
- int ret;
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- if (!(c->x86 == 11 && c->x86_model == 1)) {
- ret = -ENODEV;
- pr_err("%s not running on X100 ret %d\n", __func__, ret);
- goto done;
- }
-
- request_module("mic_x100_dma");
- mic_init_card_debugfs();
-
- mic_platform_dev = platform_device_register_simple(mic_driver_name,
- 0, NULL, 0);
- ret = PTR_ERR_OR_ZERO(mic_platform_dev);
- if (ret) {
- pr_err("platform_device_register_full ret %d\n", ret);
- goto cleanup_debugfs;
- }
- ret = platform_driver_register(&mic_platform_driver);
- if (ret) {
- pr_err("platform_driver_register ret %d\n", ret);
- goto device_unregister;
- }
- return ret;
-
-device_unregister:
- platform_device_unregister(mic_platform_dev);
-cleanup_debugfs:
- mic_exit_card_debugfs();
-done:
- return ret;
-}
-
-static void __exit mic_exit(void)
-{
- platform_driver_unregister(&mic_platform_driver);
- platform_device_unregister(mic_platform_dev);
- mic_exit_card_debugfs();
-}
-
-module_init(mic_init);
-module_exit(mic_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC X100 Card driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/card/mic_x100.h b/drivers/misc/mic/card/mic_x100.h
deleted file mode 100644
index 46644dde0c07..000000000000
--- a/drivers/misc/mic/card/mic_x100.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- */
-#ifndef _MIC_X100_CARD_H_
-#define _MIC_X100_CARD_H_
-
-#define MIC_X100_MMIO_BASE 0x08007C0000ULL
-#define MIC_X100_MMIO_LEN 0x00020000ULL
-#define MIC_X100_SBOX_BASE_ADDRESS 0x00010000ULL
-
-#define MIC_X100_SBOX_SPAD0 0x0000AB20
-#define MIC_X100_SBOX_SDBIC0 0x0000CC90
-#define MIC_X100_SBOX_SDBIC0_DBREQ_BIT 0x80000000
-#define MIC_X100_SBOX_RDMASR0 0x0000B180
-#define MIC_X100_SBOX_APICICR0 0x0000A9D0
-
-#define MIC_X100_MAX_DOORBELL_IDX 8
-
-#define MIC_X100_NUM_SBOX_IRQ 8
-#define MIC_X100_NUM_RDMASR_IRQ 8
-#define MIC_X100_SBOX_IRQ_BASE 0
-#define MIC_X100_RDMASR_IRQ_BASE 17
-
-#define MIC_X100_IRQ_BASE 26
-
-#endif
diff --git a/drivers/misc/mic/common/mic_dev.h b/drivers/misc/mic/common/mic_dev.h
deleted file mode 100644
index f94f08df0260..000000000000
--- a/drivers/misc/mic/common/mic_dev.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC driver.
- */
-#ifndef __MIC_DEV_H__
-#define __MIC_DEV_H__
-
-/* The maximum number of MIC devices supported in a single host system. */
-#define MIC_MAX_NUM_DEVS 128
-
-/**
- * enum mic_hw_family - The hardware family to which a device belongs.
- */
-enum mic_hw_family {
- MIC_FAMILY_X100 = 0,
- MIC_FAMILY_X200,
- MIC_FAMILY_UNKNOWN,
- MIC_FAMILY_LAST
-};
-
-/**
- * struct mic_mw - MIC memory window
- *
- * @pa: Base physical address.
- * @va: Base ioremap'd virtual address.
- * @len: Size of the memory window.
- */
-struct mic_mw {
- phys_addr_t pa;
- void __iomem *va;
- resource_size_t len;
-};
-
-/*
- * Scratch pad register offsets used by the host to communicate
- * device page DMA address to the card.
- */
-#define MIC_DPLO_SPAD 14
-#define MIC_DPHI_SPAD 15
-
-/*
- * These values are supposed to be in the config_change field of the
- * device page when the host sends a config change interrupt to the card.
- */
-#define MIC_VIRTIO_PARAM_DEV_REMOVE 0x1
-#define MIC_VIRTIO_PARAM_CONFIG_CHANGED 0x2
-
-/* Maximum number of DMA channels */
-#define MIC_MAX_DMA_CHAN 4
-
-#endif
diff --git a/drivers/misc/mic/cosm/Makefile b/drivers/misc/mic/cosm/Makefile
deleted file mode 100644
index 97d74cb12030..000000000000
--- a/drivers/misc/mic/cosm/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile - Intel MIC Coprocessor State Management (COSM) Driver
-# Copyright(c) 2015, Intel Corporation.
-#
-obj-$(CONFIG_MIC_COSM) += mic_cosm.o
-
-mic_cosm-objs := cosm_main.o
-mic_cosm-objs += cosm_debugfs.o
-mic_cosm-objs += cosm_sysfs.o
-mic_cosm-objs += cosm_scif_server.o
diff --git a/drivers/misc/mic/cosm/cosm_debugfs.c b/drivers/misc/mic/cosm/cosm_debugfs.c
deleted file mode 100644
index cb55653cf1f9..000000000000
--- a/drivers/misc/mic/cosm/cosm_debugfs.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC Coprocessor State Management (COSM) Driver
- */
-
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include "cosm_main.h"
-
-/* Debugfs parent dir */
-static struct dentry *cosm_dbg;
-
-/*
- * log_buf_show - Display MIC kernel log buffer
- *
- * log_buf addr/len is read from System.map by user space
- * and populated in sysfs entries.
- */
-static int log_buf_show(struct seq_file *s, void *unused)
-{
- void __iomem *log_buf_va;
- int __iomem *log_buf_len_va;
- struct cosm_device *cdev = s->private;
- void *kva;
- int size;
- u64 aper_offset;
-
- if (!cdev || !cdev->log_buf_addr || !cdev->log_buf_len)
- goto done;
-
- mutex_lock(&cdev->cosm_mutex);
- switch (cdev->state) {
- case MIC_BOOTING:
- case MIC_ONLINE:
- case MIC_SHUTTING_DOWN:
- break;
- default:
- goto unlock;
- }
-
- /*
- * Card kernel will never be relocated and any kernel text/data mapping
- * can be translated to phys address by subtracting __START_KERNEL_map.
- */
- aper_offset = (u64)cdev->log_buf_len - __START_KERNEL_map;
- log_buf_len_va = cdev->hw_ops->aper(cdev)->va + aper_offset;
- aper_offset = (u64)cdev->log_buf_addr - __START_KERNEL_map;
- log_buf_va = cdev->hw_ops->aper(cdev)->va + aper_offset;
-
- size = ioread32(log_buf_len_va);
- kva = kmalloc(size, GFP_KERNEL);
- if (!kva)
- goto unlock;
-
- memcpy_fromio(kva, log_buf_va, size);
- seq_write(s, kva, size);
- kfree(kva);
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
-done:
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(log_buf);
-
-/*
- * force_reset_show - Force MIC reset
- *
- * Invokes the force_reset COSM bus op instead of the standard reset
- * op in case a force reset of the MIC device is required
- */
-static int force_reset_show(struct seq_file *s, void *pos)
-{
- struct cosm_device *cdev = s->private;
-
- cosm_stop(cdev, true);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(force_reset);
-
-void cosm_create_debug_dir(struct cosm_device *cdev)
-{
- char name[16];
-
- if (!cosm_dbg)
- return;
-
- scnprintf(name, sizeof(name), "mic%d", cdev->index);
- cdev->dbg_dir = debugfs_create_dir(name, cosm_dbg);
-
- debugfs_create_file("log_buf", 0444, cdev->dbg_dir, cdev,
- &log_buf_fops);
- debugfs_create_file("force_reset", 0444, cdev->dbg_dir, cdev,
- &force_reset_fops);
-}
-
-void cosm_delete_debug_dir(struct cosm_device *cdev)
-{
- debugfs_remove_recursive(cdev->dbg_dir);
-}
-
-void cosm_init_debugfs(void)
-{
- cosm_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
-}
-
-void cosm_exit_debugfs(void)
-{
- debugfs_remove(cosm_dbg);
-}
diff --git a/drivers/misc/mic/cosm/cosm_main.c b/drivers/misc/mic/cosm/cosm_main.c
deleted file mode 100644
index ebb0eac43754..000000000000
--- a/drivers/misc/mic/cosm/cosm_main.c
+++ /dev/null
@@ -1,382 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC Coprocessor State Management (COSM) Driver
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/idr.h>
-#include <linux/slab.h>
-#include <linux/cred.h>
-#include "cosm_main.h"
-
-static const char cosm_driver_name[] = "mic";
-
-/* COSM ID allocator */
-static struct ida g_cosm_ida;
-/* Class of MIC devices for sysfs accessibility. */
-static struct class *g_cosm_class;
-/* Number of MIC devices */
-static atomic_t g_num_dev;
-
-/**
- * cosm_hw_reset - Issue a HW reset for the MIC device
- * @cdev: pointer to cosm_device instance
- * @force: force a MIC to reset even if it is already reset and ready
- */
-static void cosm_hw_reset(struct cosm_device *cdev, bool force)
-{
- int i;
-
-#define MIC_RESET_TO (45)
- if (force && cdev->hw_ops->force_reset)
- cdev->hw_ops->force_reset(cdev);
- else
- cdev->hw_ops->reset(cdev);
-
- for (i = 0; i < MIC_RESET_TO; i++) {
- if (cdev->hw_ops->ready(cdev)) {
- cosm_set_state(cdev, MIC_READY);
- return;
- }
- /*
- * Resets typically take 10s of seconds to complete.
- * Since an MMIO read is required to check if the
- * firmware is ready or not, a 1 second delay works nicely.
- */
- msleep(1000);
- }
- cosm_set_state(cdev, MIC_RESET_FAILED);
-}
-
-/**
- * cosm_start - Start the MIC
- * @cdev: pointer to cosm_device instance
- *
- * This function prepares an MIC for boot and initiates boot.
- * RETURNS: An appropriate -ERRNO error value on error, or 0 for success.
- */
-int cosm_start(struct cosm_device *cdev)
-{
- const struct cred *orig_cred;
- struct cred *override_cred;
- int rc;
-
- mutex_lock(&cdev->cosm_mutex);
- if (!cdev->bootmode) {
- dev_err(&cdev->dev, "%s %d bootmode not set\n",
- __func__, __LINE__);
- rc = -EINVAL;
- goto unlock_ret;
- }
-retry:
- if (cdev->state != MIC_READY) {
- dev_err(&cdev->dev, "%s %d MIC state not READY\n",
- __func__, __LINE__);
- rc = -EINVAL;
- goto unlock_ret;
- }
- if (!cdev->hw_ops->ready(cdev)) {
- cosm_hw_reset(cdev, false);
- /*
- * The state will either be MIC_READY if the reset succeeded
- * or MIC_RESET_FAILED if the firmware reset failed.
- */
- goto retry;
- }
-
- /*
- * Set credentials to root to allow non-root user to download initramsfs
- * with 600 permissions
- */
- override_cred = prepare_creds();
- if (!override_cred) {
- dev_err(&cdev->dev, "%s %d prepare_creds failed\n",
- __func__, __LINE__);
- rc = -ENOMEM;
- goto unlock_ret;
- }
- override_cred->fsuid = GLOBAL_ROOT_UID;
- orig_cred = override_creds(override_cred);
-
- rc = cdev->hw_ops->start(cdev, cdev->index);
-
- revert_creds(orig_cred);
- put_cred(override_cred);
- if (rc)
- goto unlock_ret;
-
- /*
- * If linux is being booted, card is treated 'online' only
- * when the scif interface in the card is up. If anything else
- * is booted, we set card to 'online' immediately.
- */
- if (!strcmp(cdev->bootmode, "linux"))
- cosm_set_state(cdev, MIC_BOOTING);
- else
- cosm_set_state(cdev, MIC_ONLINE);
-unlock_ret:
- mutex_unlock(&cdev->cosm_mutex);
- if (rc)
- dev_err(&cdev->dev, "cosm_start failed rc %d\n", rc);
- return rc;
-}
-
-/**
- * cosm_stop - Prepare the MIC for reset and trigger reset
- * @cdev: pointer to cosm_device instance
- * @force: force a MIC to reset even if it is already reset and ready.
- *
- * RETURNS: None
- */
-void cosm_stop(struct cosm_device *cdev, bool force)
-{
- mutex_lock(&cdev->cosm_mutex);
- if (cdev->state != MIC_READY || force) {
- /*
- * Don't call hw_ops if they have been called previously.
- * stop(..) calls device_unregister and will crash the system if
- * called multiple times.
- */
- u8 state = cdev->state == MIC_RESETTING ?
- cdev->prev_state : cdev->state;
- bool call_hw_ops = state != MIC_RESET_FAILED &&
- state != MIC_READY;
-
- if (cdev->state != MIC_RESETTING)
- cosm_set_state(cdev, MIC_RESETTING);
- cdev->heartbeat_watchdog_enable = false;
- if (call_hw_ops)
- cdev->hw_ops->stop(cdev, force);
- cosm_hw_reset(cdev, force);
- cosm_set_shutdown_status(cdev, MIC_NOP);
- if (call_hw_ops && cdev->hw_ops->post_reset)
- cdev->hw_ops->post_reset(cdev, cdev->state);
- }
- mutex_unlock(&cdev->cosm_mutex);
- flush_work(&cdev->scif_work);
-}
-
-/**
- * cosm_reset_trigger_work - Trigger MIC reset
- * @work: The work structure
- *
- * This work is scheduled whenever the host wants to reset the MIC.
- */
-static void cosm_reset_trigger_work(struct work_struct *work)
-{
- struct cosm_device *cdev = container_of(work, struct cosm_device,
- reset_trigger_work);
- cosm_stop(cdev, false);
-}
-
-/**
- * cosm_reset - Schedule MIC reset
- * @cdev: pointer to cosm_device instance
- *
- * RETURNS: An -EINVAL if the card is already READY or 0 for success.
- */
-int cosm_reset(struct cosm_device *cdev)
-{
- int rc = 0;
-
- mutex_lock(&cdev->cosm_mutex);
- if (cdev->state != MIC_READY) {
- if (cdev->state != MIC_RESETTING) {
- cdev->prev_state = cdev->state;
- cosm_set_state(cdev, MIC_RESETTING);
- schedule_work(&cdev->reset_trigger_work);
- }
- } else {
- dev_err(&cdev->dev, "%s %d MIC is READY\n", __func__, __LINE__);
- rc = -EINVAL;
- }
- mutex_unlock(&cdev->cosm_mutex);
- return rc;
-}
-
-/**
- * cosm_shutdown - Initiate MIC shutdown.
- * @cdev: pointer to cosm_device instance
- *
- * RETURNS: None
- */
-int cosm_shutdown(struct cosm_device *cdev)
-{
- struct cosm_msg msg = { .id = COSM_MSG_SHUTDOWN };
- int rc = 0;
-
- mutex_lock(&cdev->cosm_mutex);
- if (cdev->state != MIC_ONLINE) {
- rc = -EINVAL;
- dev_err(&cdev->dev, "%s %d skipping shutdown in state: %s\n",
- __func__, __LINE__, cosm_state_string[cdev->state]);
- goto err;
- }
-
- if (!cdev->epd) {
- rc = -ENOTCONN;
- dev_err(&cdev->dev, "%s %d scif endpoint not connected rc %d\n",
- __func__, __LINE__, rc);
- goto err;
- }
-
- rc = scif_send(cdev->epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
- if (rc < 0) {
- dev_err(&cdev->dev, "%s %d scif_send failed rc %d\n",
- __func__, __LINE__, rc);
- goto err;
- }
- cdev->heartbeat_watchdog_enable = false;
- cosm_set_state(cdev, MIC_SHUTTING_DOWN);
- rc = 0;
-err:
- mutex_unlock(&cdev->cosm_mutex);
- return rc;
-}
-
-static int cosm_driver_probe(struct cosm_device *cdev)
-{
- int rc;
-
- /* Initialize SCIF server at first probe */
- if (atomic_add_return(1, &g_num_dev) == 1) {
- rc = cosm_scif_init();
- if (rc)
- goto scif_exit;
- }
- mutex_init(&cdev->cosm_mutex);
- INIT_WORK(&cdev->reset_trigger_work, cosm_reset_trigger_work);
- INIT_WORK(&cdev->scif_work, cosm_scif_work);
- cdev->sysfs_heartbeat_enable = true;
- cosm_sysfs_init(cdev);
- cdev->sdev = device_create_with_groups(g_cosm_class, cdev->dev.parent,
- MKDEV(0, cdev->index), cdev, cdev->attr_group,
- "mic%d", cdev->index);
- if (IS_ERR(cdev->sdev)) {
- rc = PTR_ERR(cdev->sdev);
- dev_err(&cdev->dev, "device_create_with_groups failed rc %d\n",
- rc);
- goto scif_exit;
- }
-
- cdev->state_sysfs = sysfs_get_dirent(cdev->sdev->kobj.sd,
- "state");
- if (!cdev->state_sysfs) {
- rc = -ENODEV;
- dev_err(&cdev->dev, "sysfs_get_dirent failed rc %d\n", rc);
- goto destroy_device;
- }
- cosm_create_debug_dir(cdev);
- return 0;
-destroy_device:
- device_destroy(g_cosm_class, MKDEV(0, cdev->index));
-scif_exit:
- if (atomic_dec_and_test(&g_num_dev))
- cosm_scif_exit();
- return rc;
-}
-
-static void cosm_driver_remove(struct cosm_device *cdev)
-{
- cosm_delete_debug_dir(cdev);
- sysfs_put(cdev->state_sysfs);
- device_destroy(g_cosm_class, MKDEV(0, cdev->index));
- flush_work(&cdev->reset_trigger_work);
- cosm_stop(cdev, false);
- if (atomic_dec_and_test(&g_num_dev))
- cosm_scif_exit();
-
- /* These sysfs entries might have allocated */
- kfree(cdev->cmdline);
- kfree(cdev->firmware);
- kfree(cdev->ramdisk);
- kfree(cdev->bootmode);
-}
-
-static int cosm_suspend(struct device *dev)
-{
- struct cosm_device *cdev = dev_to_cosm(dev);
-
- mutex_lock(&cdev->cosm_mutex);
- switch (cdev->state) {
- /**
- * Suspend/freeze hooks in userspace have already shutdown the card.
- * Card should be 'ready' in most cases. It is however possible that
- * some userspace application initiated a boot. In those cases, we
- * simply reset the card.
- */
- case MIC_ONLINE:
- case MIC_BOOTING:
- case MIC_SHUTTING_DOWN:
- mutex_unlock(&cdev->cosm_mutex);
- cosm_stop(cdev, false);
- break;
- default:
- mutex_unlock(&cdev->cosm_mutex);
- break;
- }
- return 0;
-}
-
-static const struct dev_pm_ops cosm_pm_ops = {
- .suspend = cosm_suspend,
- .freeze = cosm_suspend
-};
-
-static struct cosm_driver cosm_driver = {
- .driver = {
- .name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
- .pm = &cosm_pm_ops,
- },
- .probe = cosm_driver_probe,
- .remove = cosm_driver_remove
-};
-
-static int __init cosm_init(void)
-{
- int ret;
-
- cosm_init_debugfs();
-
- g_cosm_class = class_create(THIS_MODULE, cosm_driver_name);
- if (IS_ERR(g_cosm_class)) {
- ret = PTR_ERR(g_cosm_class);
- pr_err("class_create failed ret %d\n", ret);
- goto cleanup_debugfs;
- }
-
- ida_init(&g_cosm_ida);
- ret = cosm_register_driver(&cosm_driver);
- if (ret) {
- pr_err("cosm_register_driver failed ret %d\n", ret);
- goto ida_destroy;
- }
- return 0;
-ida_destroy:
- ida_destroy(&g_cosm_ida);
- class_destroy(g_cosm_class);
-cleanup_debugfs:
- cosm_exit_debugfs();
- return ret;
-}
-
-static void __exit cosm_exit(void)
-{
- cosm_unregister_driver(&cosm_driver);
- ida_destroy(&g_cosm_ida);
- class_destroy(g_cosm_class);
- cosm_exit_debugfs();
-}
-
-module_init(cosm_init);
-module_exit(cosm_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC Coprocessor State Management (COSM) Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/cosm/cosm_main.h b/drivers/misc/mic/cosm/cosm_main.h
deleted file mode 100644
index 5188ad245814..000000000000
--- a/drivers/misc/mic/cosm/cosm_main.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC Coprocessor State Management (COSM) Driver
- */
-#ifndef _COSM_COSM_H_
-#define _COSM_COSM_H_
-
-#include <linux/scif.h>
-#include "../bus/cosm_bus.h"
-
-#define COSM_HEARTBEAT_SEND_SEC 30
-#define SCIF_COSM_LISTEN_PORT 201
-
-/**
- * enum COSM msg id's
- * @COSM_MSG_SHUTDOWN: host->card trigger shutdown
- * @COSM_MSG_SYNC_TIME: host->card send host time to card to sync time
- * @COSM_MSG_HEARTBEAT: card->host heartbeat
- * @COSM_MSG_SHUTDOWN_STATUS: card->host with shutdown status as payload
- */
-enum cosm_msg_id {
- COSM_MSG_SHUTDOWN,
- COSM_MSG_SYNC_TIME,
- COSM_MSG_HEARTBEAT,
- COSM_MSG_SHUTDOWN_STATUS,
-};
-
-struct cosm_msg {
- u64 id;
- union {
- u64 shutdown_status;
- struct {
- u64 tv_sec;
- u64 tv_nsec;
- } timespec;
- };
-};
-
-extern const char * const cosm_state_string[];
-extern const char * const cosm_shutdown_status_string[];
-
-void cosm_sysfs_init(struct cosm_device *cdev);
-int cosm_start(struct cosm_device *cdev);
-void cosm_stop(struct cosm_device *cdev, bool force);
-int cosm_reset(struct cosm_device *cdev);
-int cosm_shutdown(struct cosm_device *cdev);
-void cosm_set_state(struct cosm_device *cdev, u8 state);
-void cosm_set_shutdown_status(struct cosm_device *cdev, u8 status);
-void cosm_init_debugfs(void);
-void cosm_exit_debugfs(void);
-void cosm_create_debug_dir(struct cosm_device *cdev);
-void cosm_delete_debug_dir(struct cosm_device *cdev);
-int cosm_scif_init(void);
-void cosm_scif_exit(void);
-void cosm_scif_work(struct work_struct *work);
-
-#endif
diff --git a/drivers/misc/mic/cosm/cosm_scif_server.c b/drivers/misc/mic/cosm/cosm_scif_server.c
deleted file mode 100644
index 7baec9fd8756..000000000000
--- a/drivers/misc/mic/cosm/cosm_scif_server.c
+++ /dev/null
@@ -1,399 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC Coprocessor State Management (COSM) Driver
- */
-#include <linux/kthread.h>
-#include <linux/sched/signal.h>
-
-#include "cosm_main.h"
-
-/*
- * The COSM driver uses SCIF to communicate between the management node and the
- * MIC cards. SCIF is used to (a) Send a shutdown command to the card (b)
- * receive a shutdown status back from the card upon completion of shutdown and
- * (c) receive periodic heartbeat messages from the card used to deduce if the
- * card has crashed.
- *
- * A COSM server consisting of a SCIF listening endpoint waits for incoming
- * connections from the card. Upon acceptance of the connection, a separate
- * work-item is scheduled to handle SCIF message processing for that card. The
- * life-time of this work-item is therefore the time from which the connection
- * from a card is accepted to the time at which the connection is closed. A new
- * work-item starts each time the card boots and is alive till the card (a)
- * shuts down (b) is reset (c) crashes (d) cosm_client driver on the card is
- * unloaded.
- *
- * From the point of view of COSM interactions with SCIF during card
- * shutdown, reset and crash are as follows:
- *
- * Card shutdown
- * -------------
- * 1. COSM client on the card invokes orderly_poweroff() in response to SHUTDOWN
- * message from the host.
- * 2. Card driver shutdown callback invokes scif_unregister_device(..) resulting
- * in scif_remove(..) getting called on the card
- * 3. scif_remove -> scif_stop -> scif_handle_remove_node ->
- * scif_peer_unregister_device -> device_unregister for the host peer device
- * 4. During device_unregister remove(..) method of cosm_client is invoked which
- * closes the COSM SCIF endpoint on the card. This results in a SCIF_DISCNCT
- * message being sent to host SCIF. SCIF_DISCNCT message processing on the
- * host SCIF sets the host COSM SCIF endpoint state to DISCONNECTED and wakes
- * up the host COSM thread blocked in scif_poll(..) resulting in
- * scif_poll(..) returning EPOLLHUP.
- * 5. On the card, scif_peer_release_dev is next called which results in an
- * SCIF_EXIT message being sent to the host and after receiving the
- * SCIF_EXIT_ACK from the host the peer device teardown on the card is
- * complete.
- * 6. As part of the SCIF_EXIT message processing on the host, host sends a
- * SCIF_REMOVE_NODE to itself corresponding to the card being removed. This
- * starts a similar SCIF peer device teardown sequence on the host
- * corresponding to the card being shut down.
- *
- * Card reset
- * ----------
- * The case of interest here is when the card has not been previously shut down
- * since most of the steps below are skipped in that case:
-
- * 1. cosm_stop(..) invokes hw_ops->stop(..) method of the base PCIe driver
- * which unregisters the SCIF HW device resulting in scif_remove(..) being
- * called on the host.
- * 2. scif_remove(..) calls scif_disconnect_node(..) which results in a
- * SCIF_EXIT message being sent to the card.
- * 3. The card executes scif_stop() as part of SCIF_EXIT message
- * processing. This results in the COSM endpoint on the card being closed and
- * the SCIF host peer device on the card getting unregistered similar to
- * steps 3, 4 and 5 for the card shutdown case above. scif_poll(..) on the
- * host returns EPOLLHUP as a result.
- * 4. On the host, card peer device unregister and SCIF HW remove(..) also
- * subsequently complete.
- *
- * Card crash
- * ----------
- * If a reset is issued after the card has crashed, there is no SCIF_DISCNT
- * message from the card which would result in scif_poll(..) returning
- * EPOLLHUP. In this case when the host SCIF driver sends a SCIF_REMOVE_NODE
- * message to itself resulting in the card SCIF peer device being unregistered,
- * this results in a scif_peer_release_dev -> scif_cleanup_scifdev->
- * scif_invalidate_ep call sequence which sets the endpoint state to
- * DISCONNECTED and results in scif_poll(..) returning EPOLLHUP.
- */
-
-#define COSM_SCIF_BACKLOG 16
-#define COSM_HEARTBEAT_CHECK_DELTA_SEC 10
-#define COSM_HEARTBEAT_TIMEOUT_SEC \
- (COSM_HEARTBEAT_SEND_SEC + COSM_HEARTBEAT_CHECK_DELTA_SEC)
-#define COSM_HEARTBEAT_TIMEOUT_MSEC (COSM_HEARTBEAT_TIMEOUT_SEC * MSEC_PER_SEC)
-
-static struct task_struct *server_thread;
-static scif_epd_t listen_epd;
-
-/* Publish MIC card's shutdown status to user space MIC daemon */
-static void cosm_update_mic_status(struct cosm_device *cdev)
-{
- if (cdev->shutdown_status_int != MIC_NOP) {
- cosm_set_shutdown_status(cdev, cdev->shutdown_status_int);
- cdev->shutdown_status_int = MIC_NOP;
- }
-}
-
-/* Store MIC card's shutdown status internally when it is received */
-static void cosm_shutdown_status_int(struct cosm_device *cdev,
- enum mic_status shutdown_status)
-{
- switch (shutdown_status) {
- case MIC_HALTED:
- case MIC_POWER_OFF:
- case MIC_RESTART:
- case MIC_CRASHED:
- break;
- default:
- dev_err(&cdev->dev, "%s %d Unexpected shutdown_status %d\n",
- __func__, __LINE__, shutdown_status);
- return;
- };
- cdev->shutdown_status_int = shutdown_status;
- cdev->heartbeat_watchdog_enable = false;
-
- if (cdev->state != MIC_SHUTTING_DOWN)
- cosm_set_state(cdev, MIC_SHUTTING_DOWN);
-}
-
-/* Non-blocking recv. Read and process all available messages */
-static void cosm_scif_recv(struct cosm_device *cdev)
-{
- struct cosm_msg msg;
- int rc;
-
- while (1) {
- rc = scif_recv(cdev->epd, &msg, sizeof(msg), 0);
- if (!rc) {
- break;
- } else if (rc < 0) {
- dev_dbg(&cdev->dev, "%s: %d rc %d\n",
- __func__, __LINE__, rc);
- break;
- }
- dev_dbg(&cdev->dev, "%s: %d rc %d id 0x%llx\n",
- __func__, __LINE__, rc, msg.id);
-
- switch (msg.id) {
- case COSM_MSG_SHUTDOWN_STATUS:
- cosm_shutdown_status_int(cdev, msg.shutdown_status);
- break;
- case COSM_MSG_HEARTBEAT:
- /* Nothing to do, heartbeat only unblocks scif_poll */
- break;
- default:
- dev_err(&cdev->dev, "%s: %d unknown msg.id %lld\n",
- __func__, __LINE__, msg.id);
- break;
- }
- }
-}
-
-/* Publish crashed status for this MIC card */
-static void cosm_set_crashed(struct cosm_device *cdev)
-{
- dev_err(&cdev->dev, "node alive timeout\n");
- cosm_shutdown_status_int(cdev, MIC_CRASHED);
- cosm_update_mic_status(cdev);
-}
-
-/* Send host time to the MIC card to sync system time between host and MIC */
-static void cosm_send_time(struct cosm_device *cdev)
-{
- struct cosm_msg msg = { .id = COSM_MSG_SYNC_TIME };
- struct timespec64 ts;
- int rc;
-
- ktime_get_real_ts64(&ts);
- msg.timespec.tv_sec = ts.tv_sec;
- msg.timespec.tv_nsec = ts.tv_nsec;
-
- rc = scif_send(cdev->epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
- if (rc < 0)
- dev_err(&cdev->dev, "%s %d scif_send failed rc %d\n",
- __func__, __LINE__, rc);
-}
-
-/*
- * Close this cosm_device's endpoint after its peer endpoint on the card has
- * been closed. In all cases except MIC card crash EPOLLHUP on the host is
- * triggered by the client's endpoint being closed.
- */
-static void cosm_scif_close(struct cosm_device *cdev)
-{
- /*
- * Because SHUTDOWN_STATUS message is sent by the MIC cards in the
- * reboot notifier when shutdown is still not complete, we notify mpssd
- * to reset the card when SCIF endpoint is closed.
- */
- cosm_update_mic_status(cdev);
- scif_close(cdev->epd);
- cdev->epd = NULL;
- dev_dbg(&cdev->dev, "%s %d\n", __func__, __LINE__);
-}
-
-/*
- * Set card state to ONLINE when a new SCIF connection from a MIC card is
- * received. Normally the state is BOOTING when the connection comes in, but can
- * be ONLINE if cosm_client driver on the card was unloaded and then reloaded.
- */
-static int cosm_set_online(struct cosm_device *cdev)
-{
- int rc = 0;
-
- if (MIC_BOOTING == cdev->state || MIC_ONLINE == cdev->state) {
- cdev->heartbeat_watchdog_enable = cdev->sysfs_heartbeat_enable;
- cdev->epd = cdev->newepd;
- if (cdev->state == MIC_BOOTING)
- cosm_set_state(cdev, MIC_ONLINE);
- cosm_send_time(cdev);
- dev_dbg(&cdev->dev, "%s %d\n", __func__, __LINE__);
- } else {
- dev_warn(&cdev->dev, "%s %d not going online in state: %s\n",
- __func__, __LINE__, cosm_state_string[cdev->state]);
- rc = -EINVAL;
- }
- /* Drop reference acquired by bus_find_device in the server thread */
- put_device(&cdev->dev);
- return rc;
-}
-
-/*
- * Work function for handling work for a SCIF connection from a particular MIC
- * card. It first sets the card state to ONLINE and then calls scif_poll to
- * block on activity such as incoming messages on the SCIF endpoint. When the
- * endpoint is closed, the work function exits, completing its life cycle, from
- * MIC card boot to card shutdown/reset/crash.
- */
-void cosm_scif_work(struct work_struct *work)
-{
- struct cosm_device *cdev = container_of(work, struct cosm_device,
- scif_work);
- struct scif_pollepd pollepd;
- int rc;
-
- mutex_lock(&cdev->cosm_mutex);
- if (cosm_set_online(cdev))
- goto exit;
-
- while (1) {
- pollepd.epd = cdev->epd;
- pollepd.events = EPOLLIN;
-
- /* Drop the mutex before blocking in scif_poll(..) */
- mutex_unlock(&cdev->cosm_mutex);
- /* poll(..) with timeout on our endpoint */
- rc = scif_poll(&pollepd, 1, COSM_HEARTBEAT_TIMEOUT_MSEC);
- mutex_lock(&cdev->cosm_mutex);
- if (rc < 0) {
- dev_err(&cdev->dev, "%s %d scif_poll rc %d\n",
- __func__, __LINE__, rc);
- continue;
- }
-
- /* There is a message from the card */
- if (pollepd.revents & EPOLLIN)
- cosm_scif_recv(cdev);
-
- /* The peer endpoint is closed or this endpoint disconnected */
- if (pollepd.revents & EPOLLHUP) {
- cosm_scif_close(cdev);
- break;
- }
-
- /* Did we timeout from poll? */
- if (!rc && cdev->heartbeat_watchdog_enable)
- cosm_set_crashed(cdev);
- }
-exit:
- dev_dbg(&cdev->dev, "%s %d exiting\n", __func__, __LINE__);
- mutex_unlock(&cdev->cosm_mutex);
-}
-
-/*
- * COSM SCIF server thread function. Accepts incoming SCIF connections from MIC
- * cards, finds the correct cosm_device to associate that connection with and
- * schedules individual work items for each MIC card.
- */
-static int cosm_scif_server(void *unused)
-{
- struct cosm_device *cdev;
- scif_epd_t newepd;
- struct scif_port_id port_id;
- int rc;
-
- allow_signal(SIGKILL);
-
- while (!kthread_should_stop()) {
- rc = scif_accept(listen_epd, &port_id, &newepd,
- SCIF_ACCEPT_SYNC);
- if (rc < 0) {
- if (-ERESTARTSYS != rc)
- pr_err("%s %d rc %d\n", __func__, __LINE__, rc);
- continue;
- }
-
- /*
- * Associate the incoming connection with a particular
- * cosm_device, COSM device ID == SCIF node ID - 1
- */
- cdev = cosm_find_cdev_by_id(port_id.node - 1);
- if (!cdev)
- continue;
- cdev->newepd = newepd;
- schedule_work(&cdev->scif_work);
- }
-
- pr_debug("%s %d Server thread stopped\n", __func__, __LINE__);
- return 0;
-}
-
-static int cosm_scif_listen(void)
-{
- int rc;
-
- listen_epd = scif_open();
- if (!listen_epd) {
- pr_err("%s %d scif_open failed\n", __func__, __LINE__);
- return -ENOMEM;
- }
-
- rc = scif_bind(listen_epd, SCIF_COSM_LISTEN_PORT);
- if (rc < 0) {
- pr_err("%s %d scif_bind failed rc %d\n",
- __func__, __LINE__, rc);
- goto err;
- }
-
- rc = scif_listen(listen_epd, COSM_SCIF_BACKLOG);
- if (rc < 0) {
- pr_err("%s %d scif_listen rc %d\n", __func__, __LINE__, rc);
- goto err;
- }
- pr_debug("%s %d listen_epd set up\n", __func__, __LINE__);
- return 0;
-err:
- scif_close(listen_epd);
- listen_epd = NULL;
- return rc;
-}
-
-static void cosm_scif_listen_exit(void)
-{
- pr_debug("%s %d closing listen_epd\n", __func__, __LINE__);
- if (listen_epd) {
- scif_close(listen_epd);
- listen_epd = NULL;
- }
-}
-
-/*
- * Create a listening SCIF endpoint and a server kthread which accepts incoming
- * SCIF connections from MIC cards
- */
-int cosm_scif_init(void)
-{
- int rc = cosm_scif_listen();
-
- if (rc) {
- pr_err("%s %d cosm_scif_listen rc %d\n",
- __func__, __LINE__, rc);
- goto err;
- }
-
- server_thread = kthread_run(cosm_scif_server, NULL, "cosm_server");
- if (IS_ERR(server_thread)) {
- rc = PTR_ERR(server_thread);
- pr_err("%s %d kthread_run rc %d\n", __func__, __LINE__, rc);
- goto listen_exit;
- }
- return 0;
-listen_exit:
- cosm_scif_listen_exit();
-err:
- return rc;
-}
-
-/* Stop the running server thread and close the listening SCIF endpoint */
-void cosm_scif_exit(void)
-{
- int rc;
-
- if (!IS_ERR_OR_NULL(server_thread)) {
- rc = send_sig(SIGKILL, server_thread, 0);
- if (rc) {
- pr_err("%s %d send_sig rc %d\n",
- __func__, __LINE__, rc);
- return;
- }
- kthread_stop(server_thread);
- }
-
- cosm_scif_listen_exit();
-}
diff --git a/drivers/misc/mic/cosm/cosm_sysfs.c b/drivers/misc/mic/cosm/cosm_sysfs.c
deleted file mode 100644
index e6dac967c1af..000000000000
--- a/drivers/misc/mic/cosm/cosm_sysfs.c
+++ /dev/null
@@ -1,449 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC Coprocessor State Management (COSM) Driver
- */
-#include <linux/slab.h>
-#include "cosm_main.h"
-
-/*
- * A state-to-string lookup table, for exposing a human readable state
- * via sysfs. Always keep in sync with enum cosm_states
- */
-const char * const cosm_state_string[] = {
- [MIC_READY] = "ready",
- [MIC_BOOTING] = "booting",
- [MIC_ONLINE] = "online",
- [MIC_SHUTTING_DOWN] = "shutting_down",
- [MIC_RESETTING] = "resetting",
- [MIC_RESET_FAILED] = "reset_failed",
-};
-
-/*
- * A shutdown-status-to-string lookup table, for exposing a human
- * readable state via sysfs. Always keep in sync with enum cosm_shutdown_status
- */
-const char * const cosm_shutdown_status_string[] = {
- [MIC_NOP] = "nop",
- [MIC_CRASHED] = "crashed",
- [MIC_HALTED] = "halted",
- [MIC_POWER_OFF] = "poweroff",
- [MIC_RESTART] = "restart",
-};
-
-void cosm_set_shutdown_status(struct cosm_device *cdev, u8 shutdown_status)
-{
- dev_dbg(&cdev->dev, "Shutdown Status %s -> %s\n",
- cosm_shutdown_status_string[cdev->shutdown_status],
- cosm_shutdown_status_string[shutdown_status]);
- cdev->shutdown_status = shutdown_status;
-}
-
-void cosm_set_state(struct cosm_device *cdev, u8 state)
-{
- dev_dbg(&cdev->dev, "State %s -> %s\n",
- cosm_state_string[cdev->state],
- cosm_state_string[state]);
- cdev->state = state;
- sysfs_notify_dirent(cdev->state_sysfs);
-}
-
-static ssize_t
-family_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- return cdev->hw_ops->family(cdev, buf);
-}
-static DEVICE_ATTR_RO(family);
-
-static ssize_t
-stepping_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- return cdev->hw_ops->stepping(cdev, buf);
-}
-static DEVICE_ATTR_RO(stepping);
-
-static ssize_t
-state_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev || cdev->state >= MIC_LAST)
- return -EINVAL;
-
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- cosm_state_string[cdev->state]);
-}
-
-static ssize_t
-state_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- int rc;
-
- if (!cdev)
- return -EINVAL;
-
- if (sysfs_streq(buf, "boot")) {
- rc = cosm_start(cdev);
- goto done;
- }
- if (sysfs_streq(buf, "reset")) {
- rc = cosm_reset(cdev);
- goto done;
- }
-
- if (sysfs_streq(buf, "shutdown")) {
- rc = cosm_shutdown(cdev);
- goto done;
- }
- rc = -EINVAL;
-done:
- if (rc)
- count = rc;
- return count;
-}
-static DEVICE_ATTR_RW(state);
-
-static ssize_t shutdown_status_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev || cdev->shutdown_status >= MIC_STATUS_LAST)
- return -EINVAL;
-
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- cosm_shutdown_status_string[cdev->shutdown_status]);
-}
-static DEVICE_ATTR_RO(shutdown_status);
-
-static ssize_t
-heartbeat_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", cdev->sysfs_heartbeat_enable);
-}
-
-static ssize_t
-heartbeat_enable_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- int enable;
- int ret;
-
- if (!cdev)
- return -EINVAL;
-
- mutex_lock(&cdev->cosm_mutex);
- ret = kstrtoint(buf, 10, &enable);
- if (ret)
- goto unlock;
-
- cdev->sysfs_heartbeat_enable = enable;
- /* if state is not online, cdev->heartbeat_watchdog_enable is 0 */
- if (cdev->state == MIC_ONLINE)
- cdev->heartbeat_watchdog_enable = enable;
- ret = count;
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
- return ret;
-}
-static DEVICE_ATTR_RW(heartbeat_enable);
-
-static ssize_t
-cmdline_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- char *cmdline;
-
- if (!cdev)
- return -EINVAL;
-
- cmdline = cdev->cmdline;
-
- if (cmdline)
- return scnprintf(buf, PAGE_SIZE, "%s\n", cmdline);
- return 0;
-}
-
-static ssize_t
-cmdline_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- mutex_lock(&cdev->cosm_mutex);
- kfree(cdev->cmdline);
-
- cdev->cmdline = kmalloc(count + 1, GFP_KERNEL);
- if (!cdev->cmdline) {
- count = -ENOMEM;
- goto unlock;
- }
-
- strncpy(cdev->cmdline, buf, count);
-
- if (cdev->cmdline[count - 1] == '\n')
- cdev->cmdline[count - 1] = '\0';
- else
- cdev->cmdline[count] = '\0';
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
- return count;
-}
-static DEVICE_ATTR_RW(cmdline);
-
-static ssize_t
-firmware_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- char *firmware;
-
- if (!cdev)
- return -EINVAL;
-
- firmware = cdev->firmware;
-
- if (firmware)
- return scnprintf(buf, PAGE_SIZE, "%s\n", firmware);
- return 0;
-}
-
-static ssize_t
-firmware_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- mutex_lock(&cdev->cosm_mutex);
- kfree(cdev->firmware);
-
- cdev->firmware = kmalloc(count + 1, GFP_KERNEL);
- if (!cdev->firmware) {
- count = -ENOMEM;
- goto unlock;
- }
- strncpy(cdev->firmware, buf, count);
-
- if (cdev->firmware[count - 1] == '\n')
- cdev->firmware[count - 1] = '\0';
- else
- cdev->firmware[count] = '\0';
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
- return count;
-}
-static DEVICE_ATTR_RW(firmware);
-
-static ssize_t
-ramdisk_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- char *ramdisk;
-
- if (!cdev)
- return -EINVAL;
-
- ramdisk = cdev->ramdisk;
-
- if (ramdisk)
- return scnprintf(buf, PAGE_SIZE, "%s\n", ramdisk);
- return 0;
-}
-
-static ssize_t
-ramdisk_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- mutex_lock(&cdev->cosm_mutex);
- kfree(cdev->ramdisk);
-
- cdev->ramdisk = kmalloc(count + 1, GFP_KERNEL);
- if (!cdev->ramdisk) {
- count = -ENOMEM;
- goto unlock;
- }
-
- strncpy(cdev->ramdisk, buf, count);
-
- if (cdev->ramdisk[count - 1] == '\n')
- cdev->ramdisk[count - 1] = '\0';
- else
- cdev->ramdisk[count] = '\0';
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
- return count;
-}
-static DEVICE_ATTR_RW(ramdisk);
-
-static ssize_t
-bootmode_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- char *bootmode;
-
- if (!cdev)
- return -EINVAL;
-
- bootmode = cdev->bootmode;
-
- if (bootmode)
- return scnprintf(buf, PAGE_SIZE, "%s\n", bootmode);
- return 0;
-}
-
-static ssize_t
-bootmode_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- if (!sysfs_streq(buf, "linux") && !sysfs_streq(buf, "flash"))
- return -EINVAL;
-
- mutex_lock(&cdev->cosm_mutex);
- kfree(cdev->bootmode);
-
- cdev->bootmode = kmalloc(count + 1, GFP_KERNEL);
- if (!cdev->bootmode) {
- count = -ENOMEM;
- goto unlock;
- }
-
- strncpy(cdev->bootmode, buf, count);
-
- if (cdev->bootmode[count - 1] == '\n')
- cdev->bootmode[count - 1] = '\0';
- else
- cdev->bootmode[count] = '\0';
-unlock:
- mutex_unlock(&cdev->cosm_mutex);
- return count;
-}
-static DEVICE_ATTR_RW(bootmode);
-
-static ssize_t
-log_buf_addr_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- return scnprintf(buf, PAGE_SIZE, "%p\n", cdev->log_buf_addr);
-}
-
-static ssize_t
-log_buf_addr_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- int ret;
- unsigned long addr;
-
- if (!cdev)
- return -EINVAL;
-
- ret = kstrtoul(buf, 16, &addr);
- if (ret)
- goto exit;
-
- cdev->log_buf_addr = (void *)addr;
- ret = count;
-exit:
- return ret;
-}
-static DEVICE_ATTR_RW(log_buf_addr);
-
-static ssize_t
-log_buf_len_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
-
- if (!cdev)
- return -EINVAL;
-
- return scnprintf(buf, PAGE_SIZE, "%p\n", cdev->log_buf_len);
-}
-
-static ssize_t
-log_buf_len_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cosm_device *cdev = dev_get_drvdata(dev);
- int ret;
- unsigned long addr;
-
- if (!cdev)
- return -EINVAL;
-
- ret = kstrtoul(buf, 16, &addr);
- if (ret)
- goto exit;
-
- cdev->log_buf_len = (int *)addr;
- ret = count;
-exit:
- return ret;
-}
-static DEVICE_ATTR_RW(log_buf_len);
-
-static struct attribute *cosm_default_attrs[] = {
- &dev_attr_family.attr,
- &dev_attr_stepping.attr,
- &dev_attr_state.attr,
- &dev_attr_shutdown_status.attr,
- &dev_attr_heartbeat_enable.attr,
- &dev_attr_cmdline.attr,
- &dev_attr_firmware.attr,
- &dev_attr_ramdisk.attr,
- &dev_attr_bootmode.attr,
- &dev_attr_log_buf_addr.attr,
- &dev_attr_log_buf_len.attr,
-
- NULL
-};
-
-ATTRIBUTE_GROUPS(cosm_default);
-
-void cosm_sysfs_init(struct cosm_device *cdev)
-{
- cdev->attr_group = cosm_default_groups;
-}
diff --git a/drivers/misc/mic/cosm_client/Makefile b/drivers/misc/mic/cosm_client/Makefile
deleted file mode 100644
index 5b62270bc2ab..000000000000
--- a/drivers/misc/mic/cosm_client/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile - Intel MIC COSM Client Driver
-# Copyright(c) 2015, Intel Corporation.
-#
-obj-$(CONFIG_MIC_COSM) += cosm_client.o
-
-cosm_client-objs += cosm_scif_client.o
diff --git a/drivers/misc/mic/cosm_client/cosm_scif_client.c b/drivers/misc/mic/cosm_client/cosm_scif_client.c
deleted file mode 100644
index a03213dd9319..000000000000
--- a/drivers/misc/mic/cosm_client/cosm_scif_client.c
+++ /dev/null
@@ -1,269 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel MIC COSM Client Driver
- */
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/kthread.h>
-#include <linux/sched/signal.h>
-
-#include "../cosm/cosm_main.h"
-
-#define COSM_SCIF_MAX_RETRIES 10
-#define COSM_HEARTBEAT_SEND_MSEC (COSM_HEARTBEAT_SEND_SEC * MSEC_PER_SEC)
-
-static struct task_struct *client_thread;
-static scif_epd_t client_epd;
-static struct scif_peer_dev *client_spdev;
-
-/*
- * Reboot notifier: receives shutdown status from the OS and communicates it
- * back to the COSM process on the host
- */
-static int cosm_reboot_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- struct cosm_msg msg = { .id = COSM_MSG_SHUTDOWN_STATUS };
- int rc;
-
- event = (event == SYS_RESTART) ? SYSTEM_RESTART : event;
- dev_info(&client_spdev->dev, "%s %d received event %ld\n",
- __func__, __LINE__, event);
-
- msg.shutdown_status = event;
- rc = scif_send(client_epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
- if (rc < 0)
- dev_err(&client_spdev->dev, "%s %d scif_send rc %d\n",
- __func__, __LINE__, rc);
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block cosm_reboot = {
- .notifier_call = cosm_reboot_event,
-};
-
-/* Set system time from timespec value received from the host */
-static void cosm_set_time(struct cosm_msg *msg)
-{
- struct timespec64 ts = {
- .tv_sec = msg->timespec.tv_sec,
- .tv_nsec = msg->timespec.tv_nsec,
- };
- int rc = do_settimeofday64(&ts);
-
- if (rc)
- dev_err(&client_spdev->dev, "%s: %d settimeofday rc %d\n",
- __func__, __LINE__, rc);
-}
-
-/* COSM client receive message processing */
-static void cosm_client_recv(void)
-{
- struct cosm_msg msg;
- int rc;
-
- while (1) {
- rc = scif_recv(client_epd, &msg, sizeof(msg), 0);
- if (!rc) {
- return;
- } else if (rc < 0) {
- dev_err(&client_spdev->dev, "%s: %d rc %d\n",
- __func__, __LINE__, rc);
- return;
- }
-
- dev_dbg(&client_spdev->dev, "%s: %d rc %d id 0x%llx\n",
- __func__, __LINE__, rc, msg.id);
-
- switch (msg.id) {
- case COSM_MSG_SYNC_TIME:
- cosm_set_time(&msg);
- break;
- case COSM_MSG_SHUTDOWN:
- orderly_poweroff(true);
- break;
- default:
- dev_err(&client_spdev->dev, "%s: %d unknown id %lld\n",
- __func__, __LINE__, msg.id);
- break;
- }
- }
-}
-
-/* Initiate connection to the COSM server on the host */
-static int cosm_scif_connect(void)
-{
- struct scif_port_id port_id;
- int i, rc;
-
- client_epd = scif_open();
- if (!client_epd) {
- dev_err(&client_spdev->dev, "%s %d scif_open failed\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- port_id.node = 0;
- port_id.port = SCIF_COSM_LISTEN_PORT;
-
- for (i = 0; i < COSM_SCIF_MAX_RETRIES; i++) {
- rc = scif_connect(client_epd, &port_id);
- if (rc < 0)
- msleep(1000);
- else
- break;
- }
-
- if (rc < 0) {
- dev_err(&client_spdev->dev, "%s %d scif_connect rc %d\n",
- __func__, __LINE__, rc);
- scif_close(client_epd);
- client_epd = NULL;
- }
- return rc < 0 ? rc : 0;
-}
-
-/* Close host SCIF connection */
-static void cosm_scif_connect_exit(void)
-{
- if (client_epd) {
- scif_close(client_epd);
- client_epd = NULL;
- }
-}
-
-/*
- * COSM SCIF client thread function: waits for messages from the host and sends
- * a heartbeat to the host
- */
-static int cosm_scif_client(void *unused)
-{
- struct cosm_msg msg = { .id = COSM_MSG_HEARTBEAT };
- struct scif_pollepd pollepd;
- int rc;
-
- allow_signal(SIGKILL);
-
- while (!kthread_should_stop()) {
- pollepd.epd = client_epd;
- pollepd.events = EPOLLIN;
-
- rc = scif_poll(&pollepd, 1, COSM_HEARTBEAT_SEND_MSEC);
- if (rc < 0) {
- if (-EINTR != rc)
- dev_err(&client_spdev->dev,
- "%s %d scif_poll rc %d\n",
- __func__, __LINE__, rc);
- continue;
- }
-
- if (pollepd.revents & EPOLLIN)
- cosm_client_recv();
-
- msg.id = COSM_MSG_HEARTBEAT;
- rc = scif_send(client_epd, &msg, sizeof(msg), SCIF_SEND_BLOCK);
- if (rc < 0)
- dev_err(&client_spdev->dev, "%s %d scif_send rc %d\n",
- __func__, __LINE__, rc);
- }
-
- dev_dbg(&client_spdev->dev, "%s %d Client thread stopped\n",
- __func__, __LINE__);
- return 0;
-}
-
-static void cosm_scif_probe(struct scif_peer_dev *spdev)
-{
- int rc;
-
- dev_dbg(&spdev->dev, "%s %d: dnode %d\n",
- __func__, __LINE__, spdev->dnode);
-
- /* We are only interested in the host with spdev->dnode == 0 */
- if (spdev->dnode)
- return;
-
- client_spdev = spdev;
- rc = cosm_scif_connect();
- if (rc)
- goto exit;
-
- rc = register_reboot_notifier(&cosm_reboot);
- if (rc) {
- dev_err(&spdev->dev,
- "reboot notifier registration failed rc %d\n", rc);
- goto connect_exit;
- }
-
- client_thread = kthread_run(cosm_scif_client, NULL, "cosm_client");
- if (IS_ERR(client_thread)) {
- rc = PTR_ERR(client_thread);
- dev_err(&spdev->dev, "%s %d kthread_run rc %d\n",
- __func__, __LINE__, rc);
- goto unreg_reboot;
- }
- return;
-unreg_reboot:
- unregister_reboot_notifier(&cosm_reboot);
-connect_exit:
- cosm_scif_connect_exit();
-exit:
- client_spdev = NULL;
-}
-
-static void cosm_scif_remove(struct scif_peer_dev *spdev)
-{
- int rc;
-
- dev_dbg(&spdev->dev, "%s %d: dnode %d\n",
- __func__, __LINE__, spdev->dnode);
-
- if (spdev->dnode)
- return;
-
- if (!IS_ERR_OR_NULL(client_thread)) {
- rc = send_sig(SIGKILL, client_thread, 0);
- if (rc) {
- pr_err("%s %d send_sig rc %d\n",
- __func__, __LINE__, rc);
- return;
- }
- kthread_stop(client_thread);
- }
- unregister_reboot_notifier(&cosm_reboot);
- cosm_scif_connect_exit();
- client_spdev = NULL;
-}
-
-static struct scif_client scif_client_cosm = {
- .name = KBUILD_MODNAME,
- .probe = cosm_scif_probe,
- .remove = cosm_scif_remove,
-};
-
-static int __init cosm_client_init(void)
-{
- int rc = scif_client_register(&scif_client_cosm);
-
- if (rc)
- pr_err("scif_client_register failed rc %d\n", rc);
- return rc;
-}
-
-static void __exit cosm_client_exit(void)
-{
- scif_client_unregister(&scif_client_cosm);
-}
-
-module_init(cosm_client_init);
-module_exit(cosm_client_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC card OS state management client driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile
deleted file mode 100644
index 25f153367980..000000000000
--- a/drivers/misc/mic/host/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile - Intel MIC Linux driver.
-# Copyright(c) 2013, Intel Corporation.
-#
-obj-$(CONFIG_INTEL_MIC_HOST) += mic_host.o
-mic_host-objs := mic_main.o
-mic_host-objs += mic_x100.o
-mic_host-objs += mic_smpt.o
-mic_host-objs += mic_intr.o
-mic_host-objs += mic_boot.o
-mic_host-objs += mic_debugfs.o
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
deleted file mode 100644
index fb5b3989753d..000000000000
--- a/drivers/misc/mic/host/mic_boot.c
+++ /dev/null
@@ -1,587 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/pci.h>
-#include <linux/kmod.h>
-#include <linux/mic_common.h>
-#include <linux/mic_bus.h>
-#include "../bus/scif_bus.h"
-#include "../bus/vop_bus.h"
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_smpt.h"
-
-static inline struct mic_device *vpdev_to_mdev(struct device *dev)
-{
- return dev_get_drvdata(dev->parent);
-}
-
-static dma_addr_t
-_mic_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- void *va = phys_to_virt(page_to_phys(page)) + offset;
- struct mic_device *mdev = vpdev_to_mdev(dev);
-
- return mic_map_single(mdev, va, size);
-}
-
-static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct mic_device *mdev = vpdev_to_mdev(dev);
-
- mic_unmap_single(mdev, dma_addr, size);
-}
-
-static const struct dma_map_ops _mic_dma_ops = {
- .map_page = _mic_dma_map_page,
- .unmap_page = _mic_dma_unmap_page,
-};
-
-static struct mic_irq *
-__mic_request_irq(struct vop_device *vpdev,
- irqreturn_t (*func)(int irq, void *data),
- const char *name, void *data, int intr_src)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- return mic_request_threaded_irq(mdev, func, NULL, name, data,
- intr_src, MIC_INTR_DB);
-}
-
-static void __mic_free_irq(struct vop_device *vpdev,
- struct mic_irq *cookie, void *data)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- mic_free_irq(mdev, cookie, data);
-}
-
-static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- mdev->ops->intr_workarounds(mdev);
-}
-
-static int __mic_next_db(struct vop_device *vpdev)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- return mic_next_db(mdev);
-}
-
-static void *__mic_get_dp(struct vop_device *vpdev)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- return mdev->dp;
-}
-
-static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
-{
- return NULL;
-}
-
-static void __mic_send_intr(struct vop_device *vpdev, int db)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- mdev->ops->send_intr(mdev, db);
-}
-
-static void __iomem *__mic_ioremap(struct vop_device *vpdev,
- dma_addr_t pa, size_t len)
-{
- struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
-
- return mdev->aper.va + pa;
-}
-
-static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
-{
- /* nothing to do */
-}
-
-static struct vop_hw_ops vop_hw_ops = {
- .request_irq = __mic_request_irq,
- .free_irq = __mic_free_irq,
- .ack_interrupt = __mic_ack_interrupt,
- .next_db = __mic_next_db,
- .get_dp = __mic_get_dp,
- .get_remote_dp = __mic_get_remote_dp,
- .send_intr = __mic_send_intr,
- .remap = __mic_ioremap,
- .unmap = __mic_iounmap,
-};
-
-static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
-{
- return dev_get_drvdata(scdev->dev.parent);
-}
-
-static void *__mic_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
- dma_addr_t tmp;
- void *va = kzalloc(size, gfp);
-
- if (va) {
- tmp = mic_map_single(mdev, va, size);
- if (dma_mapping_error(dev, tmp)) {
- kfree(va);
- va = NULL;
- } else {
- *dma_handle = tmp;
- }
- }
- return va;
-}
-
-static void __mic_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- mic_unmap_single(mdev, dma_handle, size);
- kfree(vaddr);
-}
-
-static dma_addr_t
-__mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- void *va = phys_to_virt(page_to_phys(page)) + offset;
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- return mic_map_single(mdev, va, size);
-}
-
-static void
-__mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- mic_unmap_single(mdev, dma_addr, size);
-}
-
-static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
- struct scatterlist *s;
- int i, j, ret;
- dma_addr_t da;
-
- ret = dma_map_sg(&mdev->pdev->dev, sg, nents, dir);
- if (ret <= 0)
- return 0;
-
- for_each_sg(sg, s, nents, i) {
- da = mic_map(mdev, sg_dma_address(s) + s->offset, s->length);
- if (!da)
- goto err;
- sg_dma_address(s) = da;
- }
- return nents;
-err:
- for_each_sg(sg, s, i, j) {
- mic_unmap(mdev, sg_dma_address(s), s->length);
- sg_dma_address(s) = mic_to_dma_addr(mdev, sg_dma_address(s));
- }
- dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir);
- return 0;
-}
-
-static void __mic_dma_unmap_sg(struct device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scif_hw_dev *scdev = dev_get_drvdata(dev);
- struct mic_device *mdev = scdev_to_mdev(scdev);
- struct scatterlist *s;
- dma_addr_t da;
- int i;
-
- for_each_sg(sg, s, nents, i) {
- da = mic_to_dma_addr(mdev, sg_dma_address(s));
- mic_unmap(mdev, sg_dma_address(s), s->length);
- sg_dma_address(s) = da;
- }
- dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir);
-}
-
-static const struct dma_map_ops __mic_dma_ops = {
- .alloc = __mic_dma_alloc,
- .free = __mic_dma_free,
- .map_page = __mic_dma_map_page,
- .unmap_page = __mic_dma_unmap_page,
- .map_sg = __mic_dma_map_sg,
- .unmap_sg = __mic_dma_unmap_sg,
-};
-
-static struct mic_irq *
-___mic_request_irq(struct scif_hw_dev *scdev,
- irqreturn_t (*func)(int irq, void *data),
- const char *name,
- void *data, int db)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- return mic_request_threaded_irq(mdev, func, NULL, name, data,
- db, MIC_INTR_DB);
-}
-
-static void
-___mic_free_irq(struct scif_hw_dev *scdev,
- struct mic_irq *cookie, void *data)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- mic_free_irq(mdev, cookie, data);
-}
-
-static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- mdev->ops->intr_workarounds(mdev);
-}
-
-static int ___mic_next_db(struct scif_hw_dev *scdev)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- return mic_next_db(mdev);
-}
-
-static void ___mic_send_intr(struct scif_hw_dev *scdev, int db)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- mdev->ops->send_intr(mdev, db);
-}
-
-static void __iomem *___mic_ioremap(struct scif_hw_dev *scdev,
- phys_addr_t pa, size_t len)
-{
- struct mic_device *mdev = scdev_to_mdev(scdev);
-
- return mdev->aper.va + pa;
-}
-
-static void ___mic_iounmap(struct scif_hw_dev *scdev, void __iomem *va)
-{
- /* nothing to do */
-}
-
-static struct scif_hw_ops scif_hw_ops = {
- .request_irq = ___mic_request_irq,
- .free_irq = ___mic_free_irq,
- .ack_interrupt = ___mic_ack_interrupt,
- .next_db = ___mic_next_db,
- .send_intr = ___mic_send_intr,
- .remap = ___mic_ioremap,
- .unmap = ___mic_iounmap,
-};
-
-static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
-{
- return dev_get_drvdata(mbdev->dev.parent);
-}
-
-static dma_addr_t
-mic_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- void *va = phys_to_virt(page_to_phys(page)) + offset;
- struct mic_device *mdev = dev_get_drvdata(dev->parent);
-
- return mic_map_single(mdev, va, size);
-}
-
-static void
-mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct mic_device *mdev = dev_get_drvdata(dev->parent);
- mic_unmap_single(mdev, dma_addr, size);
-}
-
-static const struct dma_map_ops mic_dma_ops = {
- .map_page = mic_dma_map_page,
- .unmap_page = mic_dma_unmap_page,
-};
-
-static struct mic_irq *
-_mic_request_threaded_irq(struct mbus_device *mbdev,
- irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int intr_src)
-{
- return mic_request_threaded_irq(mbdev_to_mdev(mbdev), handler,
- thread_fn, name, data,
- intr_src, MIC_INTR_DMA);
-}
-
-static void _mic_free_irq(struct mbus_device *mbdev,
- struct mic_irq *cookie, void *data)
-{
- mic_free_irq(mbdev_to_mdev(mbdev), cookie, data);
-}
-
-static void _mic_ack_interrupt(struct mbus_device *mbdev, int num)
-{
- struct mic_device *mdev = mbdev_to_mdev(mbdev);
- mdev->ops->intr_workarounds(mdev);
-}
-
-static struct mbus_hw_ops mbus_hw_ops = {
- .request_threaded_irq = _mic_request_threaded_irq,
- .free_irq = _mic_free_irq,
- .ack_interrupt = _mic_ack_interrupt,
-};
-
-/* Initialize the MIC bootparams */
-void mic_bootparam_init(struct mic_device *mdev)
-{
- struct mic_bootparam *bootparam = mdev->dp;
-
- bootparam->magic = cpu_to_le32(MIC_MAGIC);
- bootparam->h2c_config_db = -1;
- bootparam->node_id = mdev->id + 1;
- bootparam->scif_host_dma_addr = 0x0;
- bootparam->scif_card_dma_addr = 0x0;
- bootparam->c2h_scif_db = -1;
- bootparam->h2c_scif_db = -1;
-}
-
-static inline struct mic_device *cosmdev_to_mdev(struct cosm_device *cdev)
-{
- return dev_get_drvdata(cdev->dev.parent);
-}
-
-static void _mic_reset(struct cosm_device *cdev)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
-
- mdev->ops->reset_fw_ready(mdev);
- mdev->ops->reset(mdev);
-}
-
-static bool _mic_ready(struct cosm_device *cdev)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
-
- return mdev->ops->is_fw_ready(mdev);
-}
-
-/**
- * mic_request_dma_chans - Request DMA channels
- * @mdev: pointer to mic_device instance
- *
- * returns number of DMA channels acquired
- */
-static int mic_request_dma_chans(struct mic_device *mdev)
-{
- dma_cap_mask_t mask;
- struct dma_chan *chan;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_MEMCPY, mask);
-
- do {
- chan = dma_request_channel(mask, mdev->ops->dma_filter,
- &mdev->pdev->dev);
- if (chan) {
- mdev->dma_ch[mdev->num_dma_ch++] = chan;
- if (mdev->num_dma_ch >= MIC_MAX_DMA_CHAN)
- break;
- }
- } while (chan);
- dev_info(&mdev->pdev->dev, "DMA channels # %d\n", mdev->num_dma_ch);
- return mdev->num_dma_ch;
-}
-
-/**
- * mic_free_dma_chans - release DMA channels
- * @mdev: pointer to mic_device instance
- *
- * returns none
- */
-static void mic_free_dma_chans(struct mic_device *mdev)
-{
- int i = 0;
-
- for (i = 0; i < mdev->num_dma_ch; i++) {
- dma_release_channel(mdev->dma_ch[i]);
- mdev->dma_ch[i] = NULL;
- }
- mdev->num_dma_ch = 0;
-}
-
-/**
- * _mic_start - Start the MIC.
- * @cdev: pointer to cosm_device instance
- * @id: MIC device id/index provided by COSM used in other drivers like SCIF
- *
- * This function prepares an MIC for boot and initiates boot.
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- *
- * For all cosm_hw_ops the caller holds a mutex to ensure serialization.
- */
-static int _mic_start(struct cosm_device *cdev, int id)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
- int rc;
-
- mic_bootparam_init(mdev);
- mdev->dma_mbdev = mbus_register_device(&mdev->pdev->dev,
- MBUS_DEV_DMA_HOST, &mic_dma_ops,
- &mbus_hw_ops, id, mdev->mmio.va);
- if (IS_ERR(mdev->dma_mbdev)) {
- rc = PTR_ERR(mdev->dma_mbdev);
- goto unlock_ret;
- }
- if (!mic_request_dma_chans(mdev)) {
- rc = -ENODEV;
- goto dma_remove;
- }
- mdev->scdev = scif_register_device(&mdev->pdev->dev, MIC_SCIF_DEV,
- &__mic_dma_ops, &scif_hw_ops,
- id + 1, 0, &mdev->mmio,
- &mdev->aper, mdev->dp, NULL,
- mdev->dma_ch, mdev->num_dma_ch,
- true);
- if (IS_ERR(mdev->scdev)) {
- rc = PTR_ERR(mdev->scdev);
- goto dma_free;
- }
-
- mdev->vpdev = vop_register_device(&mdev->pdev->dev,
- VOP_DEV_TRNSP, &_mic_dma_ops,
- &vop_hw_ops, id + 1, &mdev->aper,
- mdev->dma_ch[0]);
- if (IS_ERR(mdev->vpdev)) {
- rc = PTR_ERR(mdev->vpdev);
- goto scif_remove;
- }
-
- rc = mdev->ops->load_mic_fw(mdev, NULL);
- if (rc)
- goto vop_remove;
- mic_smpt_restore(mdev);
- mic_intr_restore(mdev);
- mdev->intr_ops->enable_interrupts(mdev);
- mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr);
- mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
- mdev->ops->send_firmware_intr(mdev);
- goto unlock_ret;
-vop_remove:
- vop_unregister_device(mdev->vpdev);
-scif_remove:
- scif_unregister_device(mdev->scdev);
-dma_free:
- mic_free_dma_chans(mdev);
-dma_remove:
- mbus_unregister_device(mdev->dma_mbdev);
-unlock_ret:
- return rc;
-}
-
-/**
- * _mic_stop - Prepare the MIC for reset and trigger reset.
- * @cdev: pointer to cosm_device instance
- * @force: force a MIC to reset even if it is already offline.
- *
- * RETURNS: None.
- */
-static void _mic_stop(struct cosm_device *cdev, bool force)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
-
- /*
- * Since SCIF handles card shutdown and reset (using COSM), it will
- * will be the first to be registered and the last to be
- * unregistered.
- */
- vop_unregister_device(mdev->vpdev);
- scif_unregister_device(mdev->scdev);
- mic_free_dma_chans(mdev);
- mbus_unregister_device(mdev->dma_mbdev);
- mic_bootparam_init(mdev);
-}
-
-static ssize_t _mic_family(struct cosm_device *cdev, char *buf)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
- static const char *family[MIC_FAMILY_LAST] = { "x100", "Unknown" };
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", family[mdev->family]);
-}
-
-static ssize_t _mic_stepping(struct cosm_device *cdev, char *buf)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
- const char *string = "??";
-
- switch (mdev->stepping) {
- case MIC_A0_STEP:
- string = "A0";
- break;
- case MIC_B0_STEP:
- string = "B0";
- break;
- case MIC_B1_STEP:
- string = "B1";
- break;
- case MIC_C0_STEP:
- string = "C0";
- break;
- default:
- break;
- }
- return scnprintf(buf, PAGE_SIZE, "%s\n", string);
-}
-
-static struct mic_mw *_mic_aper(struct cosm_device *cdev)
-{
- struct mic_device *mdev = cosmdev_to_mdev(cdev);
-
- return &mdev->aper;
-}
-
-struct cosm_hw_ops cosm_hw_ops = {
- .reset = _mic_reset,
- .force_reset = _mic_reset,
- .post_reset = NULL,
- .ready = _mic_ready,
- .start = _mic_start,
- .stop = _mic_stop,
- .family = _mic_family,
- .stepping = _mic_stepping,
- .aper = _mic_aper,
-};
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
deleted file mode 100644
index ffda740e20d5..000000000000
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/debugfs.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_smpt.h"
-
-/* Debugfs parent dir */
-static struct dentry *mic_dbg;
-
-static int mic_smpt_show(struct seq_file *s, void *pos)
-{
- int i;
- struct mic_device *mdev = s->private;
- unsigned long flags;
-
- seq_printf(s, "MIC %-2d |%-10s| %-14s %-10s\n",
- mdev->id, "SMPT entry", "SW DMA addr", "RefCount");
- seq_puts(s, "====================================================\n");
-
- if (mdev->smpt) {
- struct mic_smpt_info *smpt_info = mdev->smpt;
- spin_lock_irqsave(&smpt_info->smpt_lock, flags);
- for (i = 0; i < smpt_info->info.num_reg; i++) {
- seq_printf(s, "%9s|%-10d| %-#14llx %-10lld\n",
- " ", i, smpt_info->entry[i].dma_addr,
- smpt_info->entry[i].ref_count);
- }
- spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
- }
- seq_puts(s, "====================================================\n");
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mic_smpt);
-
-static int mic_post_code_show(struct seq_file *s, void *pos)
-{
- struct mic_device *mdev = s->private;
- u32 reg = mdev->ops->get_postcode(mdev);
-
- seq_printf(s, "%c%c", reg & 0xff, (reg >> 8) & 0xff);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mic_post_code);
-
-static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
-{
- struct mic_device *mdev = s->private;
- int reg;
- int i, j;
- u16 entry;
- u16 vector;
- struct pci_dev *pdev = mdev->pdev;
-
- if (pci_dev_msi_enabled(pdev)) {
- for (i = 0; i < mdev->irq_info.num_vectors; i++) {
- if (pdev->msix_enabled) {
- entry = mdev->irq_info.msix_entries[i].entry;
- vector = mdev->irq_info.msix_entries[i].vector;
- } else {
- entry = 0;
- vector = pdev->irq;
- }
-
- reg = mdev->intr_ops->read_msi_to_src_map(mdev, entry);
-
- seq_printf(s, "%s %-10d %s %-10d MXAR[%d]: %08X\n",
- "IRQ:", vector, "Entry:", entry, i, reg);
-
- seq_printf(s, "%-10s", "offset:");
- for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--)
- seq_printf(s, "%4d ", j);
- seq_puts(s, "\n");
-
-
- seq_printf(s, "%-10s", "count:");
- for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--)
- seq_printf(s, "%4d ",
- (mdev->irq_info.mic_msi_map[i] &
- BIT(j)) ? 1 : 0);
- seq_puts(s, "\n\n");
- }
- } else {
- seq_puts(s, "MSI/MSIx interrupts not enabled\n");
- }
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(mic_msi_irq_info);
-
-/*
- * mic_create_debug_dir - Initialize MIC debugfs entries.
- */
-void mic_create_debug_dir(struct mic_device *mdev)
-{
- char name[16];
-
- if (!mic_dbg)
- return;
-
- scnprintf(name, sizeof(name), "mic%d", mdev->id);
- mdev->dbg_dir = debugfs_create_dir(name, mic_dbg);
-
- debugfs_create_file("smpt", 0444, mdev->dbg_dir, mdev,
- &mic_smpt_fops);
-
- debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev,
- &mic_post_code_fops);
-
- debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev,
- &mic_msi_irq_info_fops);
-}
-
-/*
- * mic_delete_debug_dir - Uninitialize MIC debugfs entries.
- */
-void mic_delete_debug_dir(struct mic_device *mdev)
-{
- debugfs_remove_recursive(mdev->dbg_dir);
-}
-
-/*
- * mic_init_debugfs - Initialize global debugfs entry.
- */
-void __init mic_init_debugfs(void)
-{
- mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
-}
-
-/*
- * mic_exit_debugfs - Uninitialize global debugfs entry
- */
-void mic_exit_debugfs(void)
-{
- debugfs_remove(mic_dbg);
-}
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
deleted file mode 100644
index 41bcd308ae59..000000000000
--- a/drivers/misc/mic/host/mic_device.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#ifndef _MIC_DEVICE_H_
-#define _MIC_DEVICE_H_
-
-#include <linux/cdev.h>
-#include <linux/idr.h>
-#include <linux/notifier.h>
-#include <linux/irqreturn.h>
-#include <linux/dmaengine.h>
-#include <linux/miscdevice.h>
-#include <linux/mic_bus.h>
-#include "../bus/scif_bus.h"
-#include "../bus/vop_bus.h"
-#include "../bus/cosm_bus.h"
-#include "mic_intr.h"
-
-/**
- * enum mic_stepping - MIC stepping ids.
- */
-enum mic_stepping {
- MIC_A0_STEP = 0x0,
- MIC_B0_STEP = 0x10,
- MIC_B1_STEP = 0x11,
- MIC_C0_STEP = 0x20,
-};
-
-extern struct cosm_hw_ops cosm_hw_ops;
-
-/**
- * struct mic_device - MIC device information for each card.
- *
- * @mmio: MMIO bar information.
- * @aper: Aperture bar information.
- * @family: The MIC family to which this device belongs.
- * @ops: MIC HW specific operations.
- * @id: The unique device id for this MIC device.
- * @stepping: Stepping ID.
- * @pdev: Underlying PCI device.
- * @mic_mutex: Mutex for synchronizing access to mic_device.
- * @intr_ops: HW specific interrupt operations.
- * @smpt_ops: Hardware specific SMPT operations.
- * @smpt: MIC SMPT information.
- * @intr_info: H/W specific interrupt information.
- * @irq_info: The OS specific irq information
- * @dbg_dir: debugfs directory of this MIC device.
- * @bootaddr: MIC boot address.
- * @dp: virtio device page
- * @dp_dma_addr: virtio device page DMA address.
- * @dma_mbdev: MIC BUS DMA device.
- * @dma_ch - Array of DMA channels
- * @num_dma_ch - Number of DMA channels available
- * @scdev: SCIF device on the SCIF virtual bus.
- * @vpdev: Virtio over PCIe device on the VOP virtual bus.
- * @cosm_dev: COSM device
- */
-struct mic_device {
- struct mic_mw mmio;
- struct mic_mw aper;
- enum mic_hw_family family;
- struct mic_hw_ops *ops;
- int id;
- enum mic_stepping stepping;
- struct pci_dev *pdev;
- struct mutex mic_mutex;
- struct mic_hw_intr_ops *intr_ops;
- struct mic_smpt_ops *smpt_ops;
- struct mic_smpt_info *smpt;
- struct mic_intr_info *intr_info;
- struct mic_irq_info irq_info;
- struct dentry *dbg_dir;
- u32 bootaddr;
- void *dp;
- dma_addr_t dp_dma_addr;
- struct mbus_device *dma_mbdev;
- struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
- int num_dma_ch;
- struct scif_hw_dev *scdev;
- struct vop_device *vpdev;
- struct cosm_device *cosm_dev;
-};
-
-/**
- * struct mic_hw_ops - MIC HW specific operations.
- * @aper_bar: Aperture bar resource number.
- * @mmio_bar: MMIO bar resource number.
- * @read_spad: Read from scratch pad register.
- * @write_spad: Write to scratch pad register.
- * @send_intr: Send an interrupt for a particular doorbell on the card.
- * @ack_interrupt: Hardware specific operations to ack the h/w on
- * receipt of an interrupt.
- * @intr_workarounds: Hardware specific workarounds needed after
- * handling an interrupt.
- * @reset: Reset the remote processor.
- * @reset_fw_ready: Reset firmware ready field.
- * @is_fw_ready: Check if firmware is ready for OS download.
- * @send_firmware_intr: Send an interrupt to the card firmware.
- * @load_mic_fw: Load firmware segments required to boot the card
- * into card memory. This includes the kernel, command line, ramdisk etc.
- * @get_postcode: Get post code status from firmware.
- * @dma_filter: DMA filter function to be used.
- */
-struct mic_hw_ops {
- u8 aper_bar;
- u8 mmio_bar;
- u32 (*read_spad)(struct mic_device *mdev, unsigned int idx);
- void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val);
- void (*send_intr)(struct mic_device *mdev, int doorbell);
- u32 (*ack_interrupt)(struct mic_device *mdev);
- void (*intr_workarounds)(struct mic_device *mdev);
- void (*reset)(struct mic_device *mdev);
- void (*reset_fw_ready)(struct mic_device *mdev);
- bool (*is_fw_ready)(struct mic_device *mdev);
- void (*send_firmware_intr)(struct mic_device *mdev);
- int (*load_mic_fw)(struct mic_device *mdev, const char *buf);
- u32 (*get_postcode)(struct mic_device *mdev);
- bool (*dma_filter)(struct dma_chan *chan, void *param);
-};
-
-/**
- * mic_mmio_read - read from an MMIO register.
- * @mw: MMIO register base virtual address.
- * @offset: register offset.
- *
- * RETURNS: register value.
- */
-static inline u32 mic_mmio_read(struct mic_mw *mw, u32 offset)
-{
- return ioread32(mw->va + offset);
-}
-
-/**
- * mic_mmio_write - write to an MMIO register.
- * @mw: MMIO register base virtual address.
- * @val: the data value to put into the register
- * @offset: register offset.
- *
- * RETURNS: none.
- */
-static inline void
-mic_mmio_write(struct mic_mw *mw, u32 val, u32 offset)
-{
- iowrite32(val, mw->va + offset);
-}
-
-void mic_bootparam_init(struct mic_device *mdev);
-void mic_create_debug_dir(struct mic_device *dev);
-void mic_delete_debug_dir(struct mic_device *dev);
-void __init mic_init_debugfs(void);
-void mic_exit_debugfs(void);
-#endif
diff --git a/drivers/misc/mic/host/mic_intr.c b/drivers/misc/mic/host/mic_intr.c
deleted file mode 100644
index 85b3221b5d40..000000000000
--- a/drivers/misc/mic/host/mic_intr.c
+++ /dev/null
@@ -1,635 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-
-static irqreturn_t mic_thread_fn(int irq, void *dev)
-{
- struct mic_device *mdev = dev;
- struct mic_intr_info *intr_info = mdev->intr_info;
- struct mic_irq_info *irq_info = &mdev->irq_info;
- struct mic_intr_cb *intr_cb;
- struct pci_dev *pdev = mdev->pdev;
- int i;
-
- spin_lock(&irq_info->mic_thread_lock);
- for (i = intr_info->intr_start_idx[MIC_INTR_DB];
- i < intr_info->intr_len[MIC_INTR_DB]; i++)
- if (test_and_clear_bit(i, &irq_info->mask)) {
- list_for_each_entry(intr_cb, &irq_info->cb_list[i],
- list)
- if (intr_cb->thread_fn)
- intr_cb->thread_fn(pdev->irq,
- intr_cb->data);
- }
- spin_unlock(&irq_info->mic_thread_lock);
- return IRQ_HANDLED;
-}
-/**
- * mic_interrupt - Generic interrupt handler for
- * MSI and INTx based interrupts.
- * @irq: interrupt to handle (unused)
- * @dev: pointer to the mic_device instance
- */
-static irqreturn_t mic_interrupt(int irq, void *dev)
-{
- struct mic_device *mdev = dev;
- struct mic_intr_info *intr_info = mdev->intr_info;
- struct mic_irq_info *irq_info = &mdev->irq_info;
- struct mic_intr_cb *intr_cb;
- struct pci_dev *pdev = mdev->pdev;
- u32 mask;
- int i;
-
- mask = mdev->ops->ack_interrupt(mdev);
- if (!mask)
- return IRQ_NONE;
-
- spin_lock(&irq_info->mic_intr_lock);
- for (i = intr_info->intr_start_idx[MIC_INTR_DB];
- i < intr_info->intr_len[MIC_INTR_DB]; i++)
- if (mask & BIT(i)) {
- list_for_each_entry(intr_cb, &irq_info->cb_list[i],
- list)
- if (intr_cb->handler)
- intr_cb->handler(pdev->irq,
- intr_cb->data);
- set_bit(i, &irq_info->mask);
- }
- spin_unlock(&irq_info->mic_intr_lock);
- return IRQ_WAKE_THREAD;
-}
-
-/* Return the interrupt offset from the index. Index is 0 based. */
-static u16 mic_map_src_to_offset(struct mic_device *mdev,
- int intr_src, enum mic_intr_type type)
-{
- if (type >= MIC_NUM_INTR_TYPES)
- return MIC_NUM_OFFSETS;
- if (intr_src >= mdev->intr_info->intr_len[type])
- return MIC_NUM_OFFSETS;
-
- return mdev->intr_info->intr_start_idx[type] + intr_src;
-}
-
-/* Return next available msix_entry. */
-static struct msix_entry *mic_get_available_vector(struct mic_device *mdev)
-{
- int i;
- struct mic_irq_info *info = &mdev->irq_info;
-
- for (i = 0; i < info->num_vectors; i++)
- if (!info->mic_msi_map[i])
- return &info->msix_entries[i];
- return NULL;
-}
-
-/**
- * mic_register_intr_callback - Register a callback handler for the
- * given source id.
- *
- * @mdev: pointer to the mic_device instance
- * @idx: The source id to be registered.
- * @handler: The function to be called when the source id receives
- * the interrupt.
- * @thread_fn: thread fn. corresponding to the handler
- * @data: Private data of the requester.
- * Return the callback structure that was registered or an
- * appropriate error on failure.
- */
-static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev,
- u8 idx, irq_handler_t handler, irq_handler_t thread_fn,
- void *data)
-{
- struct mic_intr_cb *intr_cb;
- unsigned long flags;
- int rc;
- intr_cb = kmalloc(sizeof(*intr_cb), GFP_KERNEL);
-
- if (!intr_cb)
- return ERR_PTR(-ENOMEM);
-
- intr_cb->handler = handler;
- intr_cb->thread_fn = thread_fn;
- intr_cb->data = data;
- intr_cb->cb_id = ida_simple_get(&mdev->irq_info.cb_ida,
- 0, 0, GFP_KERNEL);
- if (intr_cb->cb_id < 0) {
- rc = intr_cb->cb_id;
- goto ida_fail;
- }
-
- spin_lock(&mdev->irq_info.mic_thread_lock);
- spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
- list_add_tail(&intr_cb->list, &mdev->irq_info.cb_list[idx]);
- spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
- spin_unlock(&mdev->irq_info.mic_thread_lock);
-
- return intr_cb;
-ida_fail:
- kfree(intr_cb);
- return ERR_PTR(rc);
-}
-
-/**
- * mic_unregister_intr_callback - Unregister the callback handler
- * identified by its callback id.
- *
- * @mdev: pointer to the mic_device instance
- * @idx: The callback structure id to be unregistered.
- * Return the source id that was unregistered or MIC_NUM_OFFSETS if no
- * such callback handler was found.
- */
-static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
-{
- struct list_head *pos, *tmp;
- struct mic_intr_cb *intr_cb;
- unsigned long flags;
- int i;
-
- spin_lock(&mdev->irq_info.mic_thread_lock);
- spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
- for (i = 0; i < MIC_NUM_OFFSETS; i++) {
- list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
- intr_cb = list_entry(pos, struct mic_intr_cb, list);
- if (intr_cb->cb_id == idx) {
- list_del(pos);
- ida_simple_remove(&mdev->irq_info.cb_ida,
- intr_cb->cb_id);
- kfree(intr_cb);
- spin_unlock_irqrestore(
- &mdev->irq_info.mic_intr_lock, flags);
- spin_unlock(&mdev->irq_info.mic_thread_lock);
- return i;
- }
- }
- }
- spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
- spin_unlock(&mdev->irq_info.mic_thread_lock);
- return MIC_NUM_OFFSETS;
-}
-
-/**
- * mic_setup_msix - Initializes MSIx interrupts.
- *
- * @mdev: pointer to mic_device instance
- * @pdev: PCI device structure
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev)
-{
- int rc, i;
- int entry_size = sizeof(*mdev->irq_info.msix_entries);
-
- mdev->irq_info.msix_entries = kmalloc_array(MIC_MIN_MSIX,
- entry_size, GFP_KERNEL);
- if (!mdev->irq_info.msix_entries) {
- rc = -ENOMEM;
- goto err_nomem1;
- }
-
- for (i = 0; i < MIC_MIN_MSIX; i++)
- mdev->irq_info.msix_entries[i].entry = i;
-
- rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries,
- MIC_MIN_MSIX);
- if (rc) {
- dev_dbg(&pdev->dev, "Error enabling MSIx. rc = %d\n", rc);
- goto err_enable_msix;
- }
-
- mdev->irq_info.num_vectors = MIC_MIN_MSIX;
- mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) *
- mdev->irq_info.num_vectors), GFP_KERNEL);
-
- if (!mdev->irq_info.mic_msi_map) {
- rc = -ENOMEM;
- goto err_nomem2;
- }
-
- dev_dbg(&mdev->pdev->dev,
- "%d MSIx irqs setup\n", mdev->irq_info.num_vectors);
- return 0;
-err_nomem2:
- pci_disable_msix(pdev);
-err_enable_msix:
- kfree(mdev->irq_info.msix_entries);
-err_nomem1:
- mdev->irq_info.num_vectors = 0;
- return rc;
-}
-
-/**
- * mic_setup_callbacks - Initialize data structures needed
- * to handle callbacks.
- *
- * @mdev: pointer to mic_device instance
- */
-static int mic_setup_callbacks(struct mic_device *mdev)
-{
- int i;
-
- mdev->irq_info.cb_list = kmalloc_array(MIC_NUM_OFFSETS,
- sizeof(*mdev->irq_info.cb_list),
- GFP_KERNEL);
- if (!mdev->irq_info.cb_list)
- return -ENOMEM;
-
- for (i = 0; i < MIC_NUM_OFFSETS; i++)
- INIT_LIST_HEAD(&mdev->irq_info.cb_list[i]);
- ida_init(&mdev->irq_info.cb_ida);
- spin_lock_init(&mdev->irq_info.mic_intr_lock);
- spin_lock_init(&mdev->irq_info.mic_thread_lock);
- return 0;
-}
-
-/**
- * mic_release_callbacks - Uninitialize data structures needed
- * to handle callbacks.
- *
- * @mdev: pointer to mic_device instance
- */
-static void mic_release_callbacks(struct mic_device *mdev)
-{
- unsigned long flags;
- struct list_head *pos, *tmp;
- struct mic_intr_cb *intr_cb;
- int i;
-
- spin_lock(&mdev->irq_info.mic_thread_lock);
- spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
- for (i = 0; i < MIC_NUM_OFFSETS; i++) {
- if (list_empty(&mdev->irq_info.cb_list[i]))
- break;
-
- list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
- intr_cb = list_entry(pos, struct mic_intr_cb, list);
- list_del(pos);
- ida_simple_remove(&mdev->irq_info.cb_ida,
- intr_cb->cb_id);
- kfree(intr_cb);
- }
- }
- spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
- spin_unlock(&mdev->irq_info.mic_thread_lock);
- ida_destroy(&mdev->irq_info.cb_ida);
- kfree(mdev->irq_info.cb_list);
-}
-
-/**
- * mic_setup_msi - Initializes MSI interrupts.
- *
- * @mdev: pointer to mic_device instance
- * @pdev: PCI device structure
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int mic_setup_msi(struct mic_device *mdev, struct pci_dev *pdev)
-{
- int rc;
-
- rc = pci_enable_msi(pdev);
- if (rc) {
- dev_dbg(&pdev->dev, "Error enabling MSI. rc = %d\n", rc);
- return rc;
- }
-
- mdev->irq_info.num_vectors = 1;
- mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) *
- mdev->irq_info.num_vectors), GFP_KERNEL);
-
- if (!mdev->irq_info.mic_msi_map) {
- rc = -ENOMEM;
- goto err_nomem1;
- }
-
- rc = mic_setup_callbacks(mdev);
- if (rc) {
- dev_err(&pdev->dev, "Error setting up callbacks\n");
- goto err_nomem2;
- }
-
- rc = request_threaded_irq(pdev->irq, mic_interrupt, mic_thread_fn,
- 0, "mic-msi", mdev);
- if (rc) {
- dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
- goto err_irq_req_fail;
- }
-
- dev_dbg(&pdev->dev, "%d MSI irqs setup\n", mdev->irq_info.num_vectors);
- return 0;
-err_irq_req_fail:
- mic_release_callbacks(mdev);
-err_nomem2:
- kfree(mdev->irq_info.mic_msi_map);
-err_nomem1:
- pci_disable_msi(pdev);
- mdev->irq_info.num_vectors = 0;
- return rc;
-}
-
-/**
- * mic_setup_intx - Initializes legacy interrupts.
- *
- * @mdev: pointer to mic_device instance
- * @pdev: PCI device structure
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev)
-{
- int rc;
-
- /* Enable intx */
- pci_intx(pdev, 1);
- rc = mic_setup_callbacks(mdev);
- if (rc) {
- dev_err(&pdev->dev, "Error setting up callbacks\n");
- goto err_nomem;
- }
-
- rc = request_threaded_irq(pdev->irq, mic_interrupt, mic_thread_fn,
- IRQF_SHARED, "mic-intx", mdev);
- if (rc)
- goto err;
-
- dev_dbg(&pdev->dev, "intx irq setup\n");
- return 0;
-err:
- mic_release_callbacks(mdev);
-err_nomem:
- return rc;
-}
-
-/**
- * mic_next_db - Retrieve the next doorbell interrupt source id.
- * The id is picked sequentially from the available pool of
- * doorlbell ids.
- *
- * @mdev: pointer to the mic_device instance.
- *
- * Returns the next doorbell interrupt source.
- */
-int mic_next_db(struct mic_device *mdev)
-{
- int next_db;
-
- next_db = mdev->irq_info.next_avail_src %
- mdev->intr_info->intr_len[MIC_INTR_DB];
- mdev->irq_info.next_avail_src++;
- return next_db;
-}
-
-#define COOKIE_ID_SHIFT 16
-#define GET_ENTRY(cookie) ((cookie) & 0xFFFF)
-#define GET_OFFSET(cookie) ((cookie) >> COOKIE_ID_SHIFT)
-#define MK_COOKIE(x, y) ((x) | (y) << COOKIE_ID_SHIFT)
-
-/**
- * mic_request_threaded_irq - request an irq. mic_mutex needs
- * to be held before calling this function.
- *
- * @mdev: pointer to mic_device instance
- * @handler: The callback function that handles the interrupt.
- * The function needs to call ack_interrupts
- * (mdev->ops->ack_interrupt(mdev)) when handling the interrupts.
- * @thread_fn: thread fn required by request_threaded_irq.
- * @name: The ASCII name of the callee requesting the irq.
- * @data: private data that is returned back when calling the
- * function handler.
- * @intr_src: The source id of the requester. Its the doorbell id
- * for Doorbell interrupts and DMA channel id for DMA interrupts.
- * @type: The type of interrupt. Values defined in mic_intr_type
- *
- * returns: The cookie that is transparent to the caller. Passed
- * back when calling mic_free_irq. An appropriate error code
- * is returned on failure. Caller needs to use IS_ERR(return_val)
- * to check for failure and PTR_ERR(return_val) to obtained the
- * error code.
- *
- */
-struct mic_irq *
-mic_request_threaded_irq(struct mic_device *mdev,
- irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int intr_src,
- enum mic_intr_type type)
-{
- u16 offset;
- int rc = 0;
- struct msix_entry *msix = NULL;
- unsigned long cookie = 0;
- u16 entry;
- struct mic_intr_cb *intr_cb;
- struct pci_dev *pdev = mdev->pdev;
-
- offset = mic_map_src_to_offset(mdev, intr_src, type);
- if (offset >= MIC_NUM_OFFSETS) {
- dev_err(&mdev->pdev->dev,
- "Error mapping index %d to a valid source id.\n",
- intr_src);
- rc = -EINVAL;
- goto err;
- }
-
- if (mdev->irq_info.num_vectors > 1) {
- msix = mic_get_available_vector(mdev);
- if (!msix) {
- dev_err(&mdev->pdev->dev,
- "No MSIx vectors available for use.\n");
- rc = -ENOSPC;
- goto err;
- }
-
- rc = request_threaded_irq(msix->vector, handler, thread_fn,
- 0, name, data);
- if (rc) {
- dev_dbg(&mdev->pdev->dev,
- "request irq failed rc = %d\n", rc);
- goto err;
- }
- entry = msix->entry;
- mdev->irq_info.mic_msi_map[entry] |= BIT(offset);
- mdev->intr_ops->program_msi_to_src_map(mdev,
- entry, offset, true);
- cookie = MK_COOKIE(entry, offset);
- dev_dbg(&mdev->pdev->dev, "irq: %d assigned for src: %d\n",
- msix->vector, intr_src);
- } else {
- intr_cb = mic_register_intr_callback(mdev, offset, handler,
- thread_fn, data);
- if (IS_ERR(intr_cb)) {
- dev_err(&mdev->pdev->dev,
- "No available callback entries for use\n");
- rc = PTR_ERR(intr_cb);
- goto err;
- }
-
- entry = 0;
- if (pci_dev_msi_enabled(pdev)) {
- mdev->irq_info.mic_msi_map[entry] |= (1 << offset);
- mdev->intr_ops->program_msi_to_src_map(mdev,
- entry, offset, true);
- }
- cookie = MK_COOKIE(entry, intr_cb->cb_id);
- dev_dbg(&mdev->pdev->dev, "callback %d registered for src: %d\n",
- intr_cb->cb_id, intr_src);
- }
- return (struct mic_irq *)cookie;
-err:
- return ERR_PTR(rc);
-}
-
-/**
- * mic_free_irq - free irq. mic_mutex
- * needs to be held before calling this function.
- *
- * @mdev: pointer to mic_device instance
- * @cookie: cookie obtained during a successful call to mic_request_threaded_irq
- * @data: private data specified by the calling function during the
- * mic_request_threaded_irq
- *
- * returns: none.
- */
-void mic_free_irq(struct mic_device *mdev,
- struct mic_irq *cookie, void *data)
-{
- u32 offset;
- u32 entry;
- u8 src_id;
- unsigned int irq;
- struct pci_dev *pdev = mdev->pdev;
-
- entry = GET_ENTRY((unsigned long)cookie);
- offset = GET_OFFSET((unsigned long)cookie);
- if (mdev->irq_info.num_vectors > 1) {
- if (entry >= mdev->irq_info.num_vectors) {
- dev_warn(&mdev->pdev->dev,
- "entry %d should be < num_irq %d\n",
- entry, mdev->irq_info.num_vectors);
- return;
- }
- irq = mdev->irq_info.msix_entries[entry].vector;
- free_irq(irq, data);
- mdev->irq_info.mic_msi_map[entry] &= ~(BIT(offset));
- mdev->intr_ops->program_msi_to_src_map(mdev,
- entry, offset, false);
-
- dev_dbg(&mdev->pdev->dev, "irq: %d freed\n", irq);
- } else {
- irq = pdev->irq;
- src_id = mic_unregister_intr_callback(mdev, offset);
- if (src_id >= MIC_NUM_OFFSETS) {
- dev_warn(&mdev->pdev->dev, "Error unregistering callback\n");
- return;
- }
- if (pci_dev_msi_enabled(pdev)) {
- mdev->irq_info.mic_msi_map[entry] &= ~(BIT(src_id));
- mdev->intr_ops->program_msi_to_src_map(mdev,
- entry, src_id, false);
- }
- dev_dbg(&mdev->pdev->dev, "callback %d unregistered for src: %d\n",
- offset, src_id);
- }
-}
-
-/**
- * mic_setup_interrupts - Initializes interrupts.
- *
- * @mdev: pointer to mic_device instance
- * @pdev: PCI device structure
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
-{
- int rc;
-
- rc = mic_setup_msix(mdev, pdev);
- if (!rc)
- goto done;
-
- rc = mic_setup_msi(mdev, pdev);
- if (!rc)
- goto done;
-
- rc = mic_setup_intx(mdev, pdev);
- if (rc) {
- dev_err(&mdev->pdev->dev, "no usable interrupts\n");
- return rc;
- }
-done:
- mdev->intr_ops->enable_interrupts(mdev);
- return 0;
-}
-
-/**
- * mic_free_interrupts - Frees interrupts setup by mic_setup_interrupts
- *
- * @mdev: pointer to mic_device instance
- * @pdev: PCI device structure
- *
- * returns none.
- */
-void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
-{
- int i;
-
- mdev->intr_ops->disable_interrupts(mdev);
- if (mdev->irq_info.num_vectors > 1) {
- for (i = 0; i < mdev->irq_info.num_vectors; i++) {
- if (mdev->irq_info.mic_msi_map[i])
- dev_warn(&pdev->dev, "irq %d may still be in use.\n",
- mdev->irq_info.msix_entries[i].vector);
- }
- kfree(mdev->irq_info.mic_msi_map);
- kfree(mdev->irq_info.msix_entries);
- pci_disable_msix(pdev);
- } else {
- if (pci_dev_msi_enabled(pdev)) {
- free_irq(pdev->irq, mdev);
- kfree(mdev->irq_info.mic_msi_map);
- pci_disable_msi(pdev);
- } else {
- free_irq(pdev->irq, mdev);
- }
- mic_release_callbacks(mdev);
- }
-}
-
-/**
- * mic_intr_restore - Restore MIC interrupt registers.
- *
- * @mdev: pointer to mic_device instance.
- *
- * Restore the interrupt registers to values previously
- * stored in the SW data structures. mic_mutex needs to
- * be held before calling this function.
- *
- * returns None.
- */
-void mic_intr_restore(struct mic_device *mdev)
-{
- int entry, offset;
- struct pci_dev *pdev = mdev->pdev;
-
- if (!pci_dev_msi_enabled(pdev))
- return;
-
- for (entry = 0; entry < mdev->irq_info.num_vectors; entry++) {
- for (offset = 0; offset < MIC_NUM_OFFSETS; offset++) {
- if (mdev->irq_info.mic_msi_map[entry] & BIT(offset))
- mdev->intr_ops->program_msi_to_src_map(mdev,
- entry, offset, true);
- }
- }
-}
diff --git a/drivers/misc/mic/host/mic_intr.h b/drivers/misc/mic/host/mic_intr.h
deleted file mode 100644
index b14ba818006f..000000000000
--- a/drivers/misc/mic/host/mic_intr.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#ifndef _MIC_INTR_H_
-#define _MIC_INTR_H_
-
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
-/*
- * The minimum number of msix vectors required for normal operation.
- * 3 for virtio network, console and block devices.
- * 1 for card shutdown notifications.
- * 4 for host owned DMA channels.
- * 1 for SCIF
- */
-#define MIC_MIN_MSIX 9
-#define MIC_NUM_OFFSETS 32
-
-/**
- * mic_intr_source - The type of source that will generate
- * the interrupt.The number of types needs to be in sync with
- * MIC_NUM_INTR_TYPES
- *
- * MIC_INTR_DB: The source is a doorbell
- * MIC_INTR_DMA: The source is a DMA channel
- * MIC_INTR_ERR: The source is an error interrupt e.g. SBOX ERR
- * MIC_NUM_INTR_TYPES: Total number of interrupt sources.
- */
-enum mic_intr_type {
- MIC_INTR_DB = 0,
- MIC_INTR_DMA,
- MIC_INTR_ERR,
- MIC_NUM_INTR_TYPES
-};
-
-/**
- * struct mic_intr_info - Contains h/w specific interrupt sources
- * information.
- *
- * @intr_start_idx: Contains the starting indexes of the
- * interrupt types.
- * @intr_len: Contains the length of the interrupt types.
- */
-struct mic_intr_info {
- u16 intr_start_idx[MIC_NUM_INTR_TYPES];
- u16 intr_len[MIC_NUM_INTR_TYPES];
-};
-
-/**
- * struct mic_irq_info - OS specific irq information
- *
- * @next_avail_src: next available doorbell that can be assigned.
- * @msix_entries: msix entries allocated while setting up MSI-x
- * @mic_msi_map: The MSI/MSI-x mapping information.
- * @num_vectors: The number of MSI/MSI-x vectors that have been allocated.
- * @cb_ida: callback ID allocator to track the callbacks registered.
- * @mic_intr_lock: spinlock to protect the interrupt callback list.
- * @mic_thread_lock: spinlock to protect the thread callback list.
- * This lock is used to protect against thread_fn while
- * mic_intr_lock is used to protect against interrupt handler.
- * @cb_list: Array of callback lists one for each source.
- * @mask: Mask used by the main thread fn to call the underlying thread fns.
- */
-struct mic_irq_info {
- int next_avail_src;
- struct msix_entry *msix_entries;
- u32 *mic_msi_map;
- u16 num_vectors;
- struct ida cb_ida;
- spinlock_t mic_intr_lock;
- spinlock_t mic_thread_lock;
- struct list_head *cb_list;
- unsigned long mask;
-};
-
-/**
- * struct mic_intr_cb - Interrupt callback structure.
- *
- * @handler: The callback function
- * @thread_fn: The thread_fn.
- * @data: Private data of the requester.
- * @cb_id: The callback id. Identifies this callback.
- * @list: list head pointing to the next callback structure.
- */
-struct mic_intr_cb {
- irq_handler_t handler;
- irq_handler_t thread_fn;
- void *data;
- int cb_id;
- struct list_head list;
-};
-
-/**
- * struct mic_irq - opaque pointer used as cookie
- */
-struct mic_irq;
-
-/* Forward declaration */
-struct mic_device;
-
-/**
- * struct mic_hw_intr_ops: MIC HW specific interrupt operations
- * @intr_init: Initialize H/W specific interrupt information.
- * @enable_interrupts: Enable interrupts from the hardware.
- * @disable_interrupts: Disable interrupts from the hardware.
- * @program_msi_to_src_map: Update MSI mapping registers with
- * irq information.
- * @read_msi_to_src_map: Read MSI mapping registers containing
- * irq information.
- */
-struct mic_hw_intr_ops {
- void (*intr_init)(struct mic_device *mdev);
- void (*enable_interrupts)(struct mic_device *mdev);
- void (*disable_interrupts)(struct mic_device *mdev);
- void (*program_msi_to_src_map) (struct mic_device *mdev,
- int idx, int intr_src, bool set);
- u32 (*read_msi_to_src_map) (struct mic_device *mdev,
- int idx);
-};
-
-int mic_next_db(struct mic_device *mdev);
-struct mic_irq *
-mic_request_threaded_irq(struct mic_device *mdev,
- irq_handler_t handler, irq_handler_t thread_fn,
- const char *name, void *data, int intr_src,
- enum mic_intr_type type);
-void mic_free_irq(struct mic_device *mdev,
- struct mic_irq *cookie, void *data);
-int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev);
-void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev);
-void mic_intr_restore(struct mic_device *mdev);
-#endif
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
deleted file mode 100644
index ea4608527ea0..000000000000
--- a/drivers/misc/mic/host/mic_main.c
+++ /dev/null
@@ -1,335 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_x100.h"
-#include "mic_smpt.h"
-
-static const char mic_driver_name[] = "mic";
-
-static const struct pci_device_id mic_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2250)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2251)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2252)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2253)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2254)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2255)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2256)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2257)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2258)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_2259)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225a)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225b)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225c)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225d)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MIC_X100_PCI_DEVICE_225e)},
-
- /* required last entry */
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mic_pci_tbl);
-
-/* ID allocator for MIC devices */
-static struct ida g_mic_ida;
-
-/* Initialize the device page */
-static int mic_dp_init(struct mic_device *mdev)
-{
- mdev->dp = kzalloc(MIC_DP_SIZE, GFP_KERNEL);
- if (!mdev->dp)
- return -ENOMEM;
-
- mdev->dp_dma_addr = mic_map_single(mdev,
- mdev->dp, MIC_DP_SIZE);
- if (mic_map_error(mdev->dp_dma_addr)) {
- kfree(mdev->dp);
- dev_err(&mdev->pdev->dev, "%s %d err %d\n",
- __func__, __LINE__, -ENOMEM);
- return -ENOMEM;
- }
- mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr);
- mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
- return 0;
-}
-
-/* Uninitialize the device page */
-static void mic_dp_uninit(struct mic_device *mdev)
-{
- mic_unmap_single(mdev, mdev->dp_dma_addr, MIC_DP_SIZE);
- kfree(mdev->dp);
-}
-
-/**
- * mic_ops_init: Initialize HW specific operation tables.
- *
- * @mdev: pointer to mic_device instance
- *
- * returns none.
- */
-static void mic_ops_init(struct mic_device *mdev)
-{
- switch (mdev->family) {
- case MIC_FAMILY_X100:
- mdev->ops = &mic_x100_ops;
- mdev->intr_ops = &mic_x100_intr_ops;
- mdev->smpt_ops = &mic_x100_smpt_ops;
- break;
- default:
- break;
- }
-}
-
-/**
- * mic_get_family - Determine hardware family to which this MIC belongs.
- *
- * @pdev: The pci device structure
- *
- * returns family.
- */
-static enum mic_hw_family mic_get_family(struct pci_dev *pdev)
-{
- enum mic_hw_family family;
-
- switch (pdev->device) {
- case MIC_X100_PCI_DEVICE_2250:
- case MIC_X100_PCI_DEVICE_2251:
- case MIC_X100_PCI_DEVICE_2252:
- case MIC_X100_PCI_DEVICE_2253:
- case MIC_X100_PCI_DEVICE_2254:
- case MIC_X100_PCI_DEVICE_2255:
- case MIC_X100_PCI_DEVICE_2256:
- case MIC_X100_PCI_DEVICE_2257:
- case MIC_X100_PCI_DEVICE_2258:
- case MIC_X100_PCI_DEVICE_2259:
- case MIC_X100_PCI_DEVICE_225a:
- case MIC_X100_PCI_DEVICE_225b:
- case MIC_X100_PCI_DEVICE_225c:
- case MIC_X100_PCI_DEVICE_225d:
- case MIC_X100_PCI_DEVICE_225e:
- family = MIC_FAMILY_X100;
- break;
- default:
- family = MIC_FAMILY_UNKNOWN;
- break;
- }
- return family;
-}
-
-/**
- * mic_device_init - Allocates and initializes the MIC device structure
- *
- * @mdev: pointer to mic_device instance
- * @pdev: The pci device structure
- *
- * returns none.
- */
-static void
-mic_device_init(struct mic_device *mdev, struct pci_dev *pdev)
-{
- mdev->pdev = pdev;
- mdev->family = mic_get_family(pdev);
- mdev->stepping = pdev->revision;
- mic_ops_init(mdev);
- mutex_init(&mdev->mic_mutex);
- mdev->irq_info.next_avail_src = 0;
-}
-
-/**
- * mic_probe - Device Initialization Routine
- *
- * @pdev: PCI device structure
- * @ent: entry in mic_pci_tbl
- *
- * returns 0 on success, < 0 on failure.
- */
-static int mic_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int rc;
- struct mic_device *mdev;
-
- mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
- if (!mdev) {
- rc = -ENOMEM;
- goto mdev_alloc_fail;
- }
- mdev->id = ida_simple_get(&g_mic_ida, 0, MIC_MAX_NUM_DEVS, GFP_KERNEL);
- if (mdev->id < 0) {
- rc = mdev->id;
- dev_err(&pdev->dev, "ida_simple_get failed rc %d\n", rc);
- goto ida_fail;
- }
-
- mic_device_init(mdev, pdev);
-
- rc = pci_enable_device(pdev);
- if (rc) {
- dev_err(&pdev->dev, "failed to enable pci device.\n");
- goto ida_remove;
- }
-
- pci_set_master(pdev);
-
- rc = pci_request_regions(pdev, mic_driver_name);
- if (rc) {
- dev_err(&pdev->dev, "failed to get pci regions.\n");
- goto disable_device;
- }
-
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- dev_err(&pdev->dev, "Cannot set DMA mask\n");
- goto release_regions;
- }
-
- mdev->mmio.pa = pci_resource_start(pdev, mdev->ops->mmio_bar);
- mdev->mmio.len = pci_resource_len(pdev, mdev->ops->mmio_bar);
- mdev->mmio.va = pci_ioremap_bar(pdev, mdev->ops->mmio_bar);
- if (!mdev->mmio.va) {
- dev_err(&pdev->dev, "Cannot remap MMIO BAR\n");
- rc = -EIO;
- goto release_regions;
- }
-
- mdev->aper.pa = pci_resource_start(pdev, mdev->ops->aper_bar);
- mdev->aper.len = pci_resource_len(pdev, mdev->ops->aper_bar);
- mdev->aper.va = ioremap_wc(mdev->aper.pa, mdev->aper.len);
- if (!mdev->aper.va) {
- dev_err(&pdev->dev, "Cannot remap Aperture BAR\n");
- rc = -EIO;
- goto unmap_mmio;
- }
-
- mdev->intr_ops->intr_init(mdev);
- rc = mic_setup_interrupts(mdev, pdev);
- if (rc) {
- dev_err(&pdev->dev, "mic_setup_interrupts failed %d\n", rc);
- goto unmap_aper;
- }
- rc = mic_smpt_init(mdev);
- if (rc) {
- dev_err(&pdev->dev, "smpt_init failed %d\n", rc);
- goto free_interrupts;
- }
-
- pci_set_drvdata(pdev, mdev);
-
- rc = mic_dp_init(mdev);
- if (rc) {
- dev_err(&pdev->dev, "mic_dp_init failed rc %d\n", rc);
- goto smpt_uninit;
- }
- mic_bootparam_init(mdev);
- mic_create_debug_dir(mdev);
-
- mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops);
- if (IS_ERR(mdev->cosm_dev)) {
- rc = PTR_ERR(mdev->cosm_dev);
- dev_err(&pdev->dev, "cosm_add_device failed rc %d\n", rc);
- goto cleanup_debug_dir;
- }
- return 0;
-cleanup_debug_dir:
- mic_delete_debug_dir(mdev);
- mic_dp_uninit(mdev);
-smpt_uninit:
- mic_smpt_uninit(mdev);
-free_interrupts:
- mic_free_interrupts(mdev, pdev);
-unmap_aper:
- iounmap(mdev->aper.va);
-unmap_mmio:
- iounmap(mdev->mmio.va);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_disable_device(pdev);
-ida_remove:
- ida_simple_remove(&g_mic_ida, mdev->id);
-ida_fail:
- kfree(mdev);
-mdev_alloc_fail:
- dev_err(&pdev->dev, "Probe failed rc %d\n", rc);
- return rc;
-}
-
-/**
- * mic_remove - Device Removal Routine
- * mic_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
- *
- * @pdev: PCI device structure
- */
-static void mic_remove(struct pci_dev *pdev)
-{
- struct mic_device *mdev;
-
- mdev = pci_get_drvdata(pdev);
- if (!mdev)
- return;
-
- cosm_unregister_device(mdev->cosm_dev);
- mic_delete_debug_dir(mdev);
- mic_dp_uninit(mdev);
- mic_smpt_uninit(mdev);
- mic_free_interrupts(mdev, pdev);
- iounmap(mdev->aper.va);
- iounmap(mdev->mmio.va);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- ida_simple_remove(&g_mic_ida, mdev->id);
- kfree(mdev);
-}
-
-static struct pci_driver mic_driver = {
- .name = mic_driver_name,
- .id_table = mic_pci_tbl,
- .probe = mic_probe,
- .remove = mic_remove
-};
-
-static int __init mic_init(void)
-{
- int ret;
-
- request_module("mic_x100_dma");
- mic_init_debugfs();
- ida_init(&g_mic_ida);
- ret = pci_register_driver(&mic_driver);
- if (ret) {
- pr_err("pci_register_driver failed ret %d\n", ret);
- goto cleanup_debugfs;
- }
- return 0;
-cleanup_debugfs:
- ida_destroy(&g_mic_ida);
- mic_exit_debugfs();
- return ret;
-}
-
-static void __exit mic_exit(void)
-{
- pci_unregister_driver(&mic_driver);
- ida_destroy(&g_mic_ida);
- mic_exit_debugfs();
-}
-
-module_init(mic_init);
-module_exit(mic_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) MIC X100 Host driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/host/mic_smpt.c b/drivers/misc/mic/host/mic_smpt.c
deleted file mode 100644
index 50d1bebecd54..000000000000
--- a/drivers/misc/mic/host/mic_smpt.c
+++ /dev/null
@@ -1,427 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/pci.h>
-
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_smpt.h"
-
-static inline u64 mic_system_page_mask(struct mic_device *mdev)
-{
- return (1ULL << mdev->smpt->info.page_shift) - 1ULL;
-}
-
-static inline u8 mic_sys_addr_to_smpt(struct mic_device *mdev, dma_addr_t pa)
-{
- return (pa - mdev->smpt->info.base) >> mdev->smpt->info.page_shift;
-}
-
-static inline u64 mic_smpt_to_pa(struct mic_device *mdev, u8 index)
-{
- return mdev->smpt->info.base + (index * mdev->smpt->info.page_size);
-}
-
-static inline u64 mic_smpt_offset(struct mic_device *mdev, dma_addr_t pa)
-{
- return pa & mic_system_page_mask(mdev);
-}
-
-static inline u64 mic_smpt_align_low(struct mic_device *mdev, dma_addr_t pa)
-{
- return ALIGN(pa - mic_system_page_mask(mdev),
- mdev->smpt->info.page_size);
-}
-
-static inline u64 mic_smpt_align_high(struct mic_device *mdev, dma_addr_t pa)
-{
- return ALIGN(pa, mdev->smpt->info.page_size);
-}
-
-/* Total Cumulative system memory accessible by MIC across all SMPT entries */
-static inline u64 mic_max_system_memory(struct mic_device *mdev)
-{
- return mdev->smpt->info.num_reg * mdev->smpt->info.page_size;
-}
-
-/* Maximum system memory address accessible by MIC */
-static inline u64 mic_max_system_addr(struct mic_device *mdev)
-{
- return mdev->smpt->info.base + mic_max_system_memory(mdev) - 1ULL;
-}
-
-/* Check if the DMA address is a MIC system memory address */
-static inline bool
-mic_is_system_addr(struct mic_device *mdev, dma_addr_t pa)
-{
- return pa >= mdev->smpt->info.base && pa <= mic_max_system_addr(mdev);
-}
-
-/* Populate an SMPT entry and update the reference counts. */
-static void mic_add_smpt_entry(int spt, s64 *ref, u64 addr,
- int entries, struct mic_device *mdev)
-{
- struct mic_smpt_info *smpt_info = mdev->smpt;
- int i;
-
- for (i = spt; i < spt + entries; i++,
- addr += smpt_info->info.page_size) {
- if (!smpt_info->entry[i].ref_count &&
- (smpt_info->entry[i].dma_addr != addr)) {
- mdev->smpt_ops->set(mdev, addr, i);
- smpt_info->entry[i].dma_addr = addr;
- }
- smpt_info->entry[i].ref_count += ref[i - spt];
- }
-}
-
-/*
- * Find an available MIC address in MIC SMPT address space
- * for a given DMA address and size.
- */
-static dma_addr_t mic_smpt_op(struct mic_device *mdev, u64 dma_addr,
- int entries, s64 *ref, size_t size)
-{
- int spt;
- int ae = 0;
- int i;
- unsigned long flags;
- dma_addr_t mic_addr = 0;
- dma_addr_t addr = dma_addr;
- struct mic_smpt_info *smpt_info = mdev->smpt;
-
- spin_lock_irqsave(&smpt_info->smpt_lock, flags);
-
- /* find existing entries */
- for (i = 0; i < smpt_info->info.num_reg; i++) {
- if (smpt_info->entry[i].dma_addr == addr) {
- ae++;
- addr += smpt_info->info.page_size;
- } else if (ae) /* cannot find contiguous entries */
- goto not_found;
-
- if (ae == entries)
- goto found;
- }
-
- /* find free entry */
- for (ae = 0, i = 0; i < smpt_info->info.num_reg; i++) {
- ae = (smpt_info->entry[i].ref_count == 0) ? ae + 1 : 0;
- if (ae == entries)
- goto found;
- }
-
-not_found:
- spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
- return mic_addr;
-
-found:
- spt = i - entries + 1;
- mic_addr = mic_smpt_to_pa(mdev, spt);
- mic_add_smpt_entry(spt, ref, dma_addr, entries, mdev);
- smpt_info->map_count++;
- smpt_info->ref_count += (s64)size;
- spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
- return mic_addr;
-}
-
-/*
- * Returns number of smpt entries needed for dma_addr to dma_addr + size
- * also returns the reference count array for each of those entries
- * and the starting smpt address
- */
-static int mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr,
- size_t size, s64 *ref, u64 *smpt_start)
-{
- u64 start = dma_addr;
- u64 end = dma_addr + size;
- int i = 0;
-
- while (start < end) {
- ref[i++] = min(mic_smpt_align_high(mdev, start + 1),
- end) - start;
- start = mic_smpt_align_high(mdev, start + 1);
- }
-
- if (smpt_start)
- *smpt_start = mic_smpt_align_low(mdev, dma_addr);
-
- return i;
-}
-
-/*
- * mic_to_dma_addr - Converts a MIC address to a DMA address.
- *
- * @mdev: pointer to mic_device instance.
- * @mic_addr: MIC address.
- *
- * returns a DMA address.
- */
-dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr)
-{
- struct mic_smpt_info *smpt_info = mdev->smpt;
- int spt;
- dma_addr_t dma_addr;
-
- if (!mic_is_system_addr(mdev, mic_addr)) {
- dev_err(&mdev->pdev->dev,
- "mic_addr is invalid. mic_addr = 0x%llx\n", mic_addr);
- return -EINVAL;
- }
- spt = mic_sys_addr_to_smpt(mdev, mic_addr);
- dma_addr = smpt_info->entry[spt].dma_addr +
- mic_smpt_offset(mdev, mic_addr);
- return dma_addr;
-}
-
-/**
- * mic_map - Maps a DMA address to a MIC physical address.
- *
- * @mdev: pointer to mic_device instance.
- * @dma_addr: DMA address.
- * @size: Size of the region to be mapped.
- *
- * This API converts the DMA address provided to a DMA address understood
- * by MIC. Caller should check for errors by calling mic_map_error(..).
- *
- * returns DMA address as required by MIC.
- */
-dma_addr_t mic_map(struct mic_device *mdev, dma_addr_t dma_addr, size_t size)
-{
- dma_addr_t mic_addr = 0;
- int num_entries;
- s64 *ref;
- u64 smpt_start;
-
- if (!size || size > mic_max_system_memory(mdev))
- return mic_addr;
-
- ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
- if (!ref)
- return mic_addr;
-
- num_entries = mic_get_smpt_ref_count(mdev, dma_addr, size,
- ref, &smpt_start);
-
- /* Set the smpt table appropriately and get 16G aligned mic address */
- mic_addr = mic_smpt_op(mdev, smpt_start, num_entries, ref, size);
-
- kfree(ref);
-
- /*
- * If mic_addr is zero then its an error case
- * since mic_addr can never be zero.
- * else generate mic_addr by adding the 16G offset in dma_addr
- */
- if (!mic_addr && MIC_FAMILY_X100 == mdev->family) {
- dev_err(&mdev->pdev->dev,
- "mic_map failed dma_addr 0x%llx size 0x%lx\n",
- dma_addr, size);
- return mic_addr;
- } else {
- return mic_addr + mic_smpt_offset(mdev, dma_addr);
- }
-}
-
-/**
- * mic_unmap - Unmaps a MIC physical address.
- *
- * @mdev: pointer to mic_device instance.
- * @mic_addr: MIC physical address.
- * @size: Size of the region to be unmapped.
- *
- * This API unmaps the mappings created by mic_map(..).
- *
- * returns None.
- */
-void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
-{
- struct mic_smpt_info *smpt_info = mdev->smpt;
- s64 *ref;
- int num_smpt;
- int spt;
- int i;
- unsigned long flags;
-
- if (!size)
- return;
-
- if (!mic_is_system_addr(mdev, mic_addr)) {
- dev_err(&mdev->pdev->dev,
- "invalid address: 0x%llx\n", mic_addr);
- return;
- }
-
- spt = mic_sys_addr_to_smpt(mdev, mic_addr);
- ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
- if (!ref)
- return;
-
- /* Get number of smpt entries to be mapped, ref count array */
- num_smpt = mic_get_smpt_ref_count(mdev, mic_addr, size, ref, NULL);
-
- spin_lock_irqsave(&smpt_info->smpt_lock, flags);
- smpt_info->unmap_count++;
- smpt_info->ref_count -= (s64)size;
-
- for (i = spt; i < spt + num_smpt; i++) {
- smpt_info->entry[i].ref_count -= ref[i - spt];
- if (smpt_info->entry[i].ref_count < 0)
- dev_warn(&mdev->pdev->dev,
- "ref count for entry %d is negative\n", i);
- }
- spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
- kfree(ref);
-}
-
-/**
- * mic_map_single - Maps a virtual address to a MIC physical address.
- *
- * @mdev: pointer to mic_device instance.
- * @va: Kernel direct mapped virtual address.
- * @size: Size of the region to be mapped.
- *
- * This API calls pci_map_single(..) for the direct mapped virtual address
- * and then converts the DMA address provided to a DMA address understood
- * by MIC. Caller should check for errors by calling mic_map_error(..).
- *
- * returns DMA address as required by MIC.
- */
-dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size)
-{
- dma_addr_t mic_addr = 0;
- struct pci_dev *pdev = mdev->pdev;
- dma_addr_t dma_addr =
- pci_map_single(pdev, va, size, PCI_DMA_BIDIRECTIONAL);
-
- if (!pci_dma_mapping_error(pdev, dma_addr)) {
- mic_addr = mic_map(mdev, dma_addr, size);
- if (!mic_addr) {
- dev_err(&mdev->pdev->dev,
- "mic_map failed dma_addr 0x%llx size 0x%lx\n",
- dma_addr, size);
- pci_unmap_single(pdev, dma_addr,
- size, PCI_DMA_BIDIRECTIONAL);
- }
- }
- return mic_addr;
-}
-
-/**
- * mic_unmap_single - Unmaps a MIC physical address.
- *
- * @mdev: pointer to mic_device instance.
- * @mic_addr: MIC physical address.
- * @size: Size of the region to be unmapped.
- *
- * This API unmaps the mappings created by mic_map_single(..).
- *
- * returns None.
- */
-void
-mic_unmap_single(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
-{
- struct pci_dev *pdev = mdev->pdev;
- dma_addr_t dma_addr = mic_to_dma_addr(mdev, mic_addr);
- mic_unmap(mdev, mic_addr, size);
- pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
-}
-
-/**
- * mic_smpt_init - Initialize MIC System Memory Page Tables.
- *
- * @mdev: pointer to mic_device instance.
- *
- * returns 0 for success and -errno for error.
- */
-int mic_smpt_init(struct mic_device *mdev)
-{
- int i, err = 0;
- dma_addr_t dma_addr;
- struct mic_smpt_info *smpt_info;
-
- mdev->smpt = kmalloc(sizeof(*mdev->smpt), GFP_KERNEL);
- if (!mdev->smpt)
- return -ENOMEM;
-
- smpt_info = mdev->smpt;
- mdev->smpt_ops->init(mdev);
- smpt_info->entry = kmalloc_array(smpt_info->info.num_reg,
- sizeof(*smpt_info->entry), GFP_KERNEL);
- if (!smpt_info->entry) {
- err = -ENOMEM;
- goto free_smpt;
- }
- spin_lock_init(&smpt_info->smpt_lock);
- for (i = 0; i < smpt_info->info.num_reg; i++) {
- dma_addr = i * smpt_info->info.page_size;
- smpt_info->entry[i].dma_addr = dma_addr;
- smpt_info->entry[i].ref_count = 0;
- mdev->smpt_ops->set(mdev, dma_addr, i);
- }
- smpt_info->ref_count = 0;
- smpt_info->map_count = 0;
- smpt_info->unmap_count = 0;
- return 0;
-free_smpt:
- kfree(smpt_info);
- return err;
-}
-
-/**
- * mic_smpt_uninit - UnInitialize MIC System Memory Page Tables.
- *
- * @mdev: pointer to mic_device instance.
- *
- * returns None.
- */
-void mic_smpt_uninit(struct mic_device *mdev)
-{
- struct mic_smpt_info *smpt_info = mdev->smpt;
- int i;
-
- dev_dbg(&mdev->pdev->dev,
- "nodeid %d SMPT ref count %lld map %lld unmap %lld\n",
- mdev->id, smpt_info->ref_count,
- smpt_info->map_count, smpt_info->unmap_count);
-
- for (i = 0; i < smpt_info->info.num_reg; i++) {
- dev_dbg(&mdev->pdev->dev,
- "SMPT entry[%d] dma_addr = 0x%llx ref_count = %lld\n",
- i, smpt_info->entry[i].dma_addr,
- smpt_info->entry[i].ref_count);
- if (smpt_info->entry[i].ref_count)
- dev_warn(&mdev->pdev->dev,
- "ref count for entry %d is not zero\n", i);
- }
- kfree(smpt_info->entry);
- kfree(smpt_info);
-}
-
-/**
- * mic_smpt_restore - Restore MIC System Memory Page Tables.
- *
- * @mdev: pointer to mic_device instance.
- *
- * Restore the SMPT registers to values previously stored in the
- * SW data structures. Some MIC steppings lose register state
- * across resets and this API should be called for performing
- * a restore operation if required.
- *
- * returns None.
- */
-void mic_smpt_restore(struct mic_device *mdev)
-{
- int i;
- dma_addr_t dma_addr;
-
- for (i = 0; i < mdev->smpt->info.num_reg; i++) {
- dma_addr = mdev->smpt->entry[i].dma_addr;
- mdev->smpt_ops->set(mdev, dma_addr, i);
- }
-}
diff --git a/drivers/misc/mic/host/mic_smpt.h b/drivers/misc/mic/host/mic_smpt.h
deleted file mode 100644
index 3b1ec14a9d81..000000000000
--- a/drivers/misc/mic/host/mic_smpt.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#ifndef MIC_SMPT_H
-#define MIC_SMPT_H
-/**
- * struct mic_smpt_ops - MIC HW specific SMPT operations.
- * @init: Initialize hardware specific SMPT information in mic_smpt_hw_info.
- * @set: Set the value for a particular SMPT entry.
- */
-struct mic_smpt_ops {
- void (*init)(struct mic_device *mdev);
- void (*set)(struct mic_device *mdev, dma_addr_t dma_addr, u8 index);
-};
-
-/**
- * struct mic_smpt - MIC SMPT entry information.
- * @dma_addr: Base DMA address for this SMPT entry.
- * @ref_count: Number of active mappings for this SMPT entry in bytes.
- */
-struct mic_smpt {
- dma_addr_t dma_addr;
- s64 ref_count;
-};
-
-/**
- * struct mic_smpt_hw_info - MIC SMPT hardware specific information.
- * @num_reg: Number of SMPT registers.
- * @page_shift: System memory page shift.
- * @page_size: System memory page size.
- * @base: System address base.
- */
-struct mic_smpt_hw_info {
- u8 num_reg;
- u8 page_shift;
- u64 page_size;
- u64 base;
-};
-
-/**
- * struct mic_smpt_info - MIC SMPT information.
- * @entry: Array of SMPT entries.
- * @smpt_lock: Spin lock protecting access to SMPT data structures.
- * @info: Hardware specific SMPT information.
- * @ref_count: Number of active SMPT mappings (for debug).
- * @map_count: Number of SMPT mappings created (for debug).
- * @unmap_count: Number of SMPT mappings destroyed (for debug).
- */
-struct mic_smpt_info {
- struct mic_smpt *entry;
- spinlock_t smpt_lock;
- struct mic_smpt_hw_info info;
- s64 ref_count;
- s64 map_count;
- s64 unmap_count;
-};
-
-dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size);
-void mic_unmap_single(struct mic_device *mdev,
- dma_addr_t mic_addr, size_t size);
-dma_addr_t mic_map(struct mic_device *mdev,
- dma_addr_t dma_addr, size_t size);
-void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size);
-dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr);
-
-/**
- * mic_map_error - Check a MIC address for errors.
- *
- * @mdev: pointer to mic_device instance.
- *
- * returns Whether there was an error during mic_map..(..) APIs.
- */
-static inline bool mic_map_error(dma_addr_t mic_addr)
-{
- return !mic_addr;
-}
-
-int mic_smpt_init(struct mic_device *mdev);
-void mic_smpt_uninit(struct mic_device *mdev);
-void mic_smpt_restore(struct mic_device *mdev);
-
-#endif
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
deleted file mode 100644
index f5536c1ad607..000000000000
--- a/drivers/misc/mic/host/mic_x100.c
+++ /dev/null
@@ -1,585 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#include <linux/fs.h>
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/firmware.h>
-#include <linux/delay.h>
-
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_x100.h"
-#include "mic_smpt.h"
-
-static const u16 mic_x100_intr_init[] = {
- MIC_X100_DOORBELL_IDX_START,
- MIC_X100_DMA_IDX_START,
- MIC_X100_ERR_IDX_START,
- MIC_X100_NUM_DOORBELL,
- MIC_X100_NUM_DMA,
- MIC_X100_NUM_ERR,
-};
-
-/**
- * mic_x100_write_spad - write to the scratchpad register
- * @mdev: pointer to mic_device instance
- * @idx: index to the scratchpad register, 0 based
- * @val: the data value to put into the register
- *
- * This function allows writing of a 32bit value to the indexed scratchpad
- * register.
- *
- * RETURNS: none.
- */
-static void
-mic_x100_write_spad(struct mic_device *mdev, unsigned int idx, u32 val)
-{
- dev_dbg(&mdev->pdev->dev, "Writing 0x%x to scratch pad index %d\n",
- val, idx);
- mic_mmio_write(&mdev->mmio, val,
- MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_SPAD0 + idx * 4);
-}
-
-/**
- * mic_x100_read_spad - read from the scratchpad register
- * @mdev: pointer to mic_device instance
- * @idx: index to scratchpad register, 0 based
- *
- * This function allows reading of the 32bit scratchpad register.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static u32
-mic_x100_read_spad(struct mic_device *mdev, unsigned int idx)
-{
- u32 val = mic_mmio_read(&mdev->mmio,
- MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_SPAD0 + idx * 4);
-
- dev_dbg(&mdev->pdev->dev,
- "Reading 0x%x from scratch pad index %d\n", val, idx);
- return val;
-}
-
-/**
- * mic_x100_enable_interrupts - Enable interrupts.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_enable_interrupts(struct mic_device *mdev)
-{
- u32 reg;
- struct mic_mw *mw = &mdev->mmio;
- u32 sice0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICE0;
- u32 siac0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SIAC0;
-
- reg = mic_mmio_read(mw, sice0);
- reg |= MIC_X100_SBOX_DBR_BITS(0xf) | MIC_X100_SBOX_DMA_BITS(0xff);
- mic_mmio_write(mw, reg, sice0);
-
- /*
- * Enable auto-clear when enabling interrupts. Applicable only for
- * MSI-x. Legacy and MSI mode cannot have auto-clear enabled.
- */
- if (mdev->irq_info.num_vectors > 1) {
- reg = mic_mmio_read(mw, siac0);
- reg |= MIC_X100_SBOX_DBR_BITS(0xf) |
- MIC_X100_SBOX_DMA_BITS(0xff);
- mic_mmio_write(mw, reg, siac0);
- }
-}
-
-/**
- * mic_x100_disable_interrupts - Disable interrupts.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_disable_interrupts(struct mic_device *mdev)
-{
- u32 reg;
- struct mic_mw *mw = &mdev->mmio;
- u32 sice0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICE0;
- u32 siac0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SIAC0;
- u32 sicc0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICC0;
-
- reg = mic_mmio_read(mw, sice0);
- mic_mmio_write(mw, reg, sicc0);
-
- if (mdev->irq_info.num_vectors > 1) {
- reg = mic_mmio_read(mw, siac0);
- reg &= ~(MIC_X100_SBOX_DBR_BITS(0xf) |
- MIC_X100_SBOX_DMA_BITS(0xff));
- mic_mmio_write(mw, reg, siac0);
- }
-}
-
-/**
- * mic_x100_send_sbox_intr - Send an MIC_X100_SBOX interrupt to MIC.
- * @mdev: pointer to mic_device instance
- * @doorbell: doorbell number
- */
-static void mic_x100_send_sbox_intr(struct mic_device *mdev,
- int doorbell)
-{
- struct mic_mw *mw = &mdev->mmio;
- u64 apic_icr_offset = MIC_X100_SBOX_APICICR0 + doorbell * 8;
- u32 apicicr_low = mic_mmio_read(mw, MIC_X100_SBOX_BASE_ADDRESS +
- apic_icr_offset);
-
- /* for MIC we need to make sure we "hit" the send_icr bit (13) */
- apicicr_low = (apicicr_low | (1 << 13));
-
- /* Ensure that the interrupt is ordered w.r.t. previous stores. */
- wmb();
- mic_mmio_write(mw, apicicr_low,
- MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset);
-}
-
-/**
- * mic_x100_send_rdmasr_intr - Send an RDMASR interrupt to MIC.
- * @mdev: pointer to mic_device instance
- * @doorbell: doorbell number
- */
-static void mic_x100_send_rdmasr_intr(struct mic_device *mdev,
- int doorbell)
-{
- int rdmasr_offset = MIC_X100_SBOX_RDMASR0 + (doorbell << 2);
- /* Ensure that the interrupt is ordered w.r.t. previous stores. */
- wmb();
- mic_mmio_write(&mdev->mmio, 0,
- MIC_X100_SBOX_BASE_ADDRESS + rdmasr_offset);
-}
-
-/**
- * __mic_x100_send_intr - Send interrupt to MIC.
- * @mdev: pointer to mic_device instance
- * @doorbell: doorbell number.
- */
-static void mic_x100_send_intr(struct mic_device *mdev, int doorbell)
-{
- int rdmasr_db;
- if (doorbell < MIC_X100_NUM_SBOX_IRQ) {
- mic_x100_send_sbox_intr(mdev, doorbell);
- } else {
- rdmasr_db = doorbell - MIC_X100_NUM_SBOX_IRQ;
- mic_x100_send_rdmasr_intr(mdev, rdmasr_db);
- }
-}
-
-/**
- * mic_x100_ack_interrupt - Read the interrupt sources register and
- * clear it. This function will be called in the MSI/INTx case.
- * @mdev: Pointer to mic_device instance.
- *
- * Returns: bitmask of interrupt sources triggered.
- */
-static u32 mic_x100_ack_interrupt(struct mic_device *mdev)
-{
- u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0;
- u32 reg = mic_mmio_read(&mdev->mmio, sicr0);
- mic_mmio_write(&mdev->mmio, reg, sicr0);
- return reg;
-}
-
-/**
- * mic_x100_intr_workarounds - These hardware specific workarounds are
- * to be invoked everytime an interrupt is handled.
- * @mdev: Pointer to mic_device instance.
- *
- * Returns: none
- */
-static void mic_x100_intr_workarounds(struct mic_device *mdev)
-{
- struct mic_mw *mw = &mdev->mmio;
-
- /* Clear pending bit array. */
- if (MIC_A0_STEP == mdev->stepping)
- mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_MSIXPBACR);
-
- if (mdev->stepping >= MIC_B0_STEP)
- mdev->intr_ops->enable_interrupts(mdev);
-}
-
-/**
- * mic_x100_hw_intr_init - Initialize h/w specific interrupt
- * information.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_hw_intr_init(struct mic_device *mdev)
-{
- mdev->intr_info = (struct mic_intr_info *)mic_x100_intr_init;
-}
-
-/**
- * mic_x100_read_msi_to_src_map - read from the MSI mapping registers
- * @mdev: pointer to mic_device instance
- * @idx: index to the mapping register, 0 based
- *
- * This function allows reading of the 32bit MSI mapping register.
- *
- * RETURNS: The value in the register.
- */
-static u32
-mic_x100_read_msi_to_src_map(struct mic_device *mdev, int idx)
-{
- return mic_mmio_read(&mdev->mmio,
- MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_MXAR0 + idx * 4);
-}
-
-/**
- * mic_x100_program_msi_to_src_map - program the MSI mapping registers
- * @mdev: pointer to mic_device instance
- * @idx: index to the mapping register, 0 based
- * @offset: The bit offset in the register that needs to be updated.
- * @set: boolean specifying if the bit in the specified offset needs
- * to be set or cleared.
- *
- * RETURNS: None.
- */
-static void
-mic_x100_program_msi_to_src_map(struct mic_device *mdev,
- int idx, int offset, bool set)
-{
- unsigned long reg;
- struct mic_mw *mw = &mdev->mmio;
- u32 mxar = MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_MXAR0 + idx * 4;
-
- reg = mic_mmio_read(mw, mxar);
- if (set)
- __set_bit(offset, &reg);
- else
- __clear_bit(offset, &reg);
- mic_mmio_write(mw, reg, mxar);
-}
-
-/*
- * mic_x100_reset_fw_ready - Reset Firmware ready status field.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_reset_fw_ready(struct mic_device *mdev)
-{
- mdev->ops->write_spad(mdev, MIC_X100_DOWNLOAD_INFO, 0);
-}
-
-/*
- * mic_x100_is_fw_ready - Check if firmware is ready.
- * @mdev: pointer to mic_device instance
- */
-static bool mic_x100_is_fw_ready(struct mic_device *mdev)
-{
- u32 scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
- return MIC_X100_SPAD2_DOWNLOAD_STATUS(scratch2) ? true : false;
-}
-
-/**
- * mic_x100_get_apic_id - Get bootstrap APIC ID.
- * @mdev: pointer to mic_device instance
- */
-static u32 mic_x100_get_apic_id(struct mic_device *mdev)
-{
- u32 scratch2 = 0;
-
- scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
- return MIC_X100_SPAD2_APIC_ID(scratch2);
-}
-
-/**
- * mic_x100_send_firmware_intr - Send an interrupt to the firmware on MIC.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_send_firmware_intr(struct mic_device *mdev)
-{
- u32 apicicr_low;
- u64 apic_icr_offset = MIC_X100_SBOX_APICICR7;
- int vector = MIC_X100_BSP_INTERRUPT_VECTOR;
- struct mic_mw *mw = &mdev->mmio;
-
- /*
- * For MIC we need to make sure we "hit"
- * the send_icr bit (13).
- */
- apicicr_low = (vector | (1 << 13));
-
- mic_mmio_write(mw, mic_x100_get_apic_id(mdev),
- MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset + 4);
-
- /* Ensure that the interrupt is ordered w.r.t. previous stores. */
- wmb();
- mic_mmio_write(mw, apicicr_low,
- MIC_X100_SBOX_BASE_ADDRESS + apic_icr_offset);
-}
-
-/**
- * mic_x100_hw_reset - Reset the MIC device.
- * @mdev: pointer to mic_device instance
- */
-static void mic_x100_hw_reset(struct mic_device *mdev)
-{
- u32 reset_reg;
- u32 rgcr = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_RGCR;
- struct mic_mw *mw = &mdev->mmio;
-
- /* Ensure that the reset is ordered w.r.t. previous loads and stores */
- mb();
- /* Trigger reset */
- reset_reg = mic_mmio_read(mw, rgcr);
- reset_reg |= 0x1;
- mic_mmio_write(mw, reset_reg, rgcr);
- /*
- * It seems we really want to delay at least 1 second
- * after touching reset to prevent a lot of problems.
- */
- msleep(1000);
-}
-
-/**
- * mic_x100_load_command_line - Load command line to MIC.
- * @mdev: pointer to mic_device instance
- * @fw: the firmware image
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int
-mic_x100_load_command_line(struct mic_device *mdev, const struct firmware *fw)
-{
- u32 len = 0;
- u32 boot_mem;
- char *buf;
- void __iomem *cmd_line_va = mdev->aper.va + mdev->bootaddr + fw->size;
-#define CMDLINE_SIZE 2048
-
- boot_mem = mdev->aper.len >> 20;
- buf = kzalloc(CMDLINE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- len += scnprintf(buf, CMDLINE_SIZE - len,
- " mem=%dM", boot_mem);
- if (mdev->cosm_dev->cmdline)
- scnprintf(buf + len, CMDLINE_SIZE - len, " %s",
- mdev->cosm_dev->cmdline);
- memcpy_toio(cmd_line_va, buf, strlen(buf) + 1);
- kfree(buf);
- return 0;
-}
-
-/**
- * mic_x100_load_ramdisk - Load ramdisk to MIC.
- * @mdev: pointer to mic_device instance
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int
-mic_x100_load_ramdisk(struct mic_device *mdev)
-{
- const struct firmware *fw;
- int rc;
- struct boot_params __iomem *bp = mdev->aper.va + mdev->bootaddr;
-
- rc = request_firmware(&fw, mdev->cosm_dev->ramdisk, &mdev->pdev->dev);
- if (rc < 0) {
- dev_err(&mdev->pdev->dev,
- "ramdisk request_firmware failed: %d %s\n",
- rc, mdev->cosm_dev->ramdisk);
- goto error;
- }
- /*
- * Typically the bootaddr for card OS is 64M
- * so copy over the ramdisk @ 128M.
- */
- memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size);
- iowrite32(mdev->bootaddr << 1, &bp->hdr.ramdisk_image);
- iowrite32(fw->size, &bp->hdr.ramdisk_size);
- release_firmware(fw);
-error:
- return rc;
-}
-
-/**
- * mic_x100_get_boot_addr - Get MIC boot address.
- * @mdev: pointer to mic_device instance
- *
- * This function is called during firmware load to determine
- * the address at which the OS should be downloaded in card
- * memory i.e. GDDR.
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int
-mic_x100_get_boot_addr(struct mic_device *mdev)
-{
- u32 scratch2, boot_addr;
- int rc = 0;
-
- scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
- boot_addr = MIC_X100_SPAD2_DOWNLOAD_ADDR(scratch2);
- dev_dbg(&mdev->pdev->dev, "%s %d boot_addr 0x%x\n",
- __func__, __LINE__, boot_addr);
- if (boot_addr > (1 << 31)) {
- dev_err(&mdev->pdev->dev,
- "incorrect bootaddr 0x%x\n",
- boot_addr);
- rc = -EINVAL;
- goto error;
- }
- mdev->bootaddr = boot_addr;
-error:
- return rc;
-}
-
-/**
- * mic_x100_load_firmware - Load firmware to MIC.
- * @mdev: pointer to mic_device instance
- * @buf: buffer containing boot string including firmware/ramdisk path.
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-static int
-mic_x100_load_firmware(struct mic_device *mdev, const char *buf)
-{
- int rc;
- const struct firmware *fw;
-
- rc = mic_x100_get_boot_addr(mdev);
- if (rc)
- return rc;
- /* load OS */
- rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev);
- if (rc < 0) {
- dev_err(&mdev->pdev->dev,
- "ramdisk request_firmware failed: %d %s\n",
- rc, mdev->cosm_dev->firmware);
- return rc;
- }
- if (mdev->bootaddr > mdev->aper.len - fw->size) {
- rc = -EINVAL;
- dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n",
- __func__, __LINE__, rc, mdev->bootaddr);
- goto error;
- }
- memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size);
- mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size);
- if (!strcmp(mdev->cosm_dev->bootmode, "flash")) {
- rc = -EINVAL;
- dev_err(&mdev->pdev->dev, "%s %d rc %d\n",
- __func__, __LINE__, rc);
- goto error;
- }
- /* load command line */
- rc = mic_x100_load_command_line(mdev, fw);
- if (rc) {
- dev_err(&mdev->pdev->dev, "%s %d rc %d\n",
- __func__, __LINE__, rc);
- goto error;
- }
- release_firmware(fw);
- /* load ramdisk */
- if (mdev->cosm_dev->ramdisk)
- rc = mic_x100_load_ramdisk(mdev);
-
- return rc;
-
-error:
- release_firmware(fw);
- return rc;
-}
-
-/**
- * mic_x100_get_postcode - Get postcode status from firmware.
- * @mdev: pointer to mic_device instance
- *
- * RETURNS: postcode.
- */
-static u32 mic_x100_get_postcode(struct mic_device *mdev)
-{
- return mic_mmio_read(&mdev->mmio, MIC_X100_POSTCODE);
-}
-
-/**
- * mic_x100_smpt_set - Update an SMPT entry with a DMA address.
- * @mdev: pointer to mic_device instance
- * @dma_addr: DMA address to use
- * @index: entry to write to
- *
- * RETURNS: none.
- */
-static void
-mic_x100_smpt_set(struct mic_device *mdev, dma_addr_t dma_addr, u8 index)
-{
-#define SNOOP_ON (0 << 0)
-#define SNOOP_OFF (1 << 0)
-/*
- * Sbox Smpt Reg Bits:
- * Bits 31:2 Host address
- * Bits 1 RSVD
- * Bits 0 No snoop
- */
-#define BUILD_SMPT(NO_SNOOP, HOST_ADDR) \
- (u32)(((HOST_ADDR) << 2) | ((NO_SNOOP) & 0x01))
-
- uint32_t smpt_reg_val = BUILD_SMPT(SNOOP_ON,
- dma_addr >> mdev->smpt->info.page_shift);
- mic_mmio_write(&mdev->mmio, smpt_reg_val,
- MIC_X100_SBOX_BASE_ADDRESS +
- MIC_X100_SBOX_SMPT00 + (4 * index));
-}
-
-/**
- * mic_x100_smpt_hw_init - Initialize SMPT X100 specific fields.
- * @mdev: pointer to mic_device instance
- *
- * RETURNS: none.
- */
-static void mic_x100_smpt_hw_init(struct mic_device *mdev)
-{
- struct mic_smpt_hw_info *info = &mdev->smpt->info;
-
- info->num_reg = 32;
- info->page_shift = 34;
- info->page_size = (1ULL << info->page_shift);
- info->base = 0x8000000000ULL;
-}
-
-struct mic_smpt_ops mic_x100_smpt_ops = {
- .init = mic_x100_smpt_hw_init,
- .set = mic_x100_smpt_set,
-};
-
-static bool mic_x100_dma_filter(struct dma_chan *chan, void *param)
-{
- if (chan->device->dev->parent == (struct device *)param)
- return true;
- return false;
-}
-
-struct mic_hw_ops mic_x100_ops = {
- .aper_bar = MIC_X100_APER_BAR,
- .mmio_bar = MIC_X100_MMIO_BAR,
- .read_spad = mic_x100_read_spad,
- .write_spad = mic_x100_write_spad,
- .send_intr = mic_x100_send_intr,
- .ack_interrupt = mic_x100_ack_interrupt,
- .intr_workarounds = mic_x100_intr_workarounds,
- .reset = mic_x100_hw_reset,
- .reset_fw_ready = mic_x100_reset_fw_ready,
- .is_fw_ready = mic_x100_is_fw_ready,
- .send_firmware_intr = mic_x100_send_firmware_intr,
- .load_mic_fw = mic_x100_load_firmware,
- .get_postcode = mic_x100_get_postcode,
- .dma_filter = mic_x100_dma_filter,
-};
-
-struct mic_hw_intr_ops mic_x100_intr_ops = {
- .intr_init = mic_x100_hw_intr_init,
- .enable_interrupts = mic_x100_enable_interrupts,
- .disable_interrupts = mic_x100_disable_interrupts,
- .program_msi_to_src_map = mic_x100_program_msi_to_src_map,
- .read_msi_to_src_map = mic_x100_read_msi_to_src_map,
-};
diff --git a/drivers/misc/mic/host/mic_x100.h b/drivers/misc/mic/host/mic_x100.h
deleted file mode 100644
index aebcaed6fa72..000000000000
--- a/drivers/misc/mic/host/mic_x100.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * Intel MIC Host driver.
- */
-#ifndef _MIC_X100_HW_H_
-#define _MIC_X100_HW_H_
-
-#define MIC_X100_PCI_DEVICE_2250 0x2250
-#define MIC_X100_PCI_DEVICE_2251 0x2251
-#define MIC_X100_PCI_DEVICE_2252 0x2252
-#define MIC_X100_PCI_DEVICE_2253 0x2253
-#define MIC_X100_PCI_DEVICE_2254 0x2254
-#define MIC_X100_PCI_DEVICE_2255 0x2255
-#define MIC_X100_PCI_DEVICE_2256 0x2256
-#define MIC_X100_PCI_DEVICE_2257 0x2257
-#define MIC_X100_PCI_DEVICE_2258 0x2258
-#define MIC_X100_PCI_DEVICE_2259 0x2259
-#define MIC_X100_PCI_DEVICE_225a 0x225a
-#define MIC_X100_PCI_DEVICE_225b 0x225b
-#define MIC_X100_PCI_DEVICE_225c 0x225c
-#define MIC_X100_PCI_DEVICE_225d 0x225d
-#define MIC_X100_PCI_DEVICE_225e 0x225e
-
-#define MIC_X100_APER_BAR 0
-#define MIC_X100_MMIO_BAR 4
-
-#define MIC_X100_SBOX_BASE_ADDRESS 0x00010000
-#define MIC_X100_SBOX_SPAD0 0x0000AB20
-#define MIC_X100_SBOX_SICR0_DBR(x) ((x) & 0xf)
-#define MIC_X100_SBOX_SICR0_DMA(x) (((x) >> 8) & 0xff)
-#define MIC_X100_SBOX_SICE0_DBR(x) ((x) & 0xf)
-#define MIC_X100_SBOX_DBR_BITS(x) ((x) & 0xf)
-#define MIC_X100_SBOX_SICE0_DMA(x) (((x) >> 8) & 0xff)
-#define MIC_X100_SBOX_DMA_BITS(x) (((x) & 0xff) << 8)
-
-#define MIC_X100_SBOX_APICICR0 0x0000A9D0
-#define MIC_X100_SBOX_SICR0 0x00009004
-#define MIC_X100_SBOX_SICE0 0x0000900C
-#define MIC_X100_SBOX_SICC0 0x00009010
-#define MIC_X100_SBOX_SIAC0 0x00009014
-#define MIC_X100_SBOX_MSIXPBACR 0x00009084
-#define MIC_X100_SBOX_MXAR0 0x00009044
-#define MIC_X100_SBOX_SMPT00 0x00003100
-#define MIC_X100_SBOX_RDMASR0 0x0000B180
-
-#define MIC_X100_DOORBELL_IDX_START 0
-#define MIC_X100_NUM_DOORBELL 4
-#define MIC_X100_DMA_IDX_START 8
-#define MIC_X100_NUM_DMA 8
-#define MIC_X100_ERR_IDX_START 30
-#define MIC_X100_NUM_ERR 1
-
-#define MIC_X100_NUM_SBOX_IRQ 8
-#define MIC_X100_NUM_RDMASR_IRQ 8
-#define MIC_X100_RDMASR_IRQ_BASE 17
-#define MIC_X100_SPAD2_DOWNLOAD_STATUS(x) ((x) & 0x1)
-#define MIC_X100_SPAD2_APIC_ID(x) (((x) >> 1) & 0x1ff)
-#define MIC_X100_SPAD2_DOWNLOAD_ADDR(x) ((x) & 0xfffff000)
-#define MIC_X100_SBOX_APICICR7 0x0000AA08
-#define MIC_X100_SBOX_RGCR 0x00004010
-#define MIC_X100_SBOX_SDBIC0 0x0000CC90
-#define MIC_X100_DOWNLOAD_INFO 2
-#define MIC_X100_FW_SIZE 5
-#define MIC_X100_POSTCODE 0x242c
-
-/* Host->Card(bootstrap) Interrupt Vector */
-#define MIC_X100_BSP_INTERRUPT_VECTOR 229
-
-extern struct mic_hw_ops mic_x100_ops;
-extern struct mic_smpt_ops mic_x100_smpt_ops;
-extern struct mic_hw_intr_ops mic_x100_intr_ops;
-
-#endif
diff --git a/drivers/misc/mic/scif/Makefile b/drivers/misc/mic/scif/Makefile
deleted file mode 100644
index ff372555d118..000000000000
--- a/drivers/misc/mic/scif/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile - SCIF driver.
-# Copyright(c) 2014, Intel Corporation.
-#
-obj-$(CONFIG_SCIF) += scif.o
-scif-objs := scif_main.o
-scif-objs += scif_peer_bus.o
-scif-objs += scif_ports.o
-scif-objs += scif_debugfs.o
-scif-objs += scif_fd.o
-scif-objs += scif_api.o
-scif-objs += scif_epd.o
-scif-objs += scif_rb.o
-scif-objs += scif_nodeqp.o
-scif-objs += scif_nm.o
-scif-objs += scif_dma.o
-scif-objs += scif_fence.o
-scif-objs += scif_mmap.o
-scif-objs += scif_rma.o
-scif-objs += scif_rma_list.o
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
deleted file mode 100644
index 304d6c833712..000000000000
--- a/drivers/misc/mic/scif/scif_api.c
+++ /dev/null
@@ -1,1485 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/scif.h>
-#include "scif_main.h"
-#include "scif_map.h"
-
-static const char * const scif_ep_states[] = {
- "Unbound",
- "Bound",
- "Listening",
- "Connected",
- "Connecting",
- "Mapping",
- "Closing",
- "Close Listening",
- "Disconnected",
- "Zombie"};
-
-enum conn_async_state {
- ASYNC_CONN_IDLE = 1, /* ep setup for async connect */
- ASYNC_CONN_INPROGRESS, /* async connect in progress */
- ASYNC_CONN_FLUSH_WORK /* async work flush in progress */
-};
-
-/*
- * File operations for anonymous inode file associated with a SCIF endpoint,
- * used in kernel mode SCIF poll. Kernel mode SCIF poll calls portions of the
- * poll API in the kernel and these take in a struct file *. Since a struct
- * file is not available to kernel mode SCIF, it uses an anonymous file for
- * this purpose.
- */
-const struct file_operations scif_anon_fops = {
- .owner = THIS_MODULE,
-};
-
-scif_epd_t scif_open(void)
-{
- struct scif_endpt *ep;
- int err;
-
- might_sleep();
- ep = kzalloc(sizeof(*ep), GFP_KERNEL);
- if (!ep)
- goto err_ep_alloc;
-
- ep->qp_info.qp = kzalloc(sizeof(*ep->qp_info.qp), GFP_KERNEL);
- if (!ep->qp_info.qp)
- goto err_qp_alloc;
-
- err = scif_anon_inode_getfile(ep);
- if (err)
- goto err_anon_inode;
-
- spin_lock_init(&ep->lock);
- mutex_init(&ep->sendlock);
- mutex_init(&ep->recvlock);
-
- scif_rma_ep_init(ep);
- ep->state = SCIFEP_UNBOUND;
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI open: ep %p success\n", ep);
- return ep;
-
-err_anon_inode:
- kfree(ep->qp_info.qp);
-err_qp_alloc:
- kfree(ep);
-err_ep_alloc:
- return NULL;
-}
-EXPORT_SYMBOL_GPL(scif_open);
-
-/*
- * scif_disconnect_ep - Disconnects the endpoint if found
- * @epd: The end point returned from scif_open()
- */
-static struct scif_endpt *scif_disconnect_ep(struct scif_endpt *ep)
-{
- struct scifmsg msg;
- struct scif_endpt *fep = NULL;
- struct scif_endpt *tmpep;
- struct list_head *pos, *tmpq;
- int err;
-
- /*
- * Wake up any threads blocked in send()/recv() before closing
- * out the connection. Grabbing and releasing the send/recv lock
- * will ensure that any blocked senders/receivers have exited for
- * Ring 0 endpoints. It is a Ring 0 bug to call send/recv after
- * close. Ring 3 endpoints are not affected since close will not
- * be called while there are IOCTLs executing.
- */
- wake_up_interruptible(&ep->sendwq);
- wake_up_interruptible(&ep->recvwq);
- mutex_lock(&ep->sendlock);
- mutex_unlock(&ep->sendlock);
- mutex_lock(&ep->recvlock);
- mutex_unlock(&ep->recvlock);
-
- /* Remove from the connected list */
- mutex_lock(&scif_info.connlock);
- list_for_each_safe(pos, tmpq, &scif_info.connected) {
- tmpep = list_entry(pos, struct scif_endpt, list);
- if (tmpep == ep) {
- list_del(pos);
- fep = tmpep;
- spin_lock(&ep->lock);
- break;
- }
- }
-
- if (!fep) {
- /*
- * The other side has completed the disconnect before
- * the end point can be removed from the list. Therefore
- * the ep lock is not locked, traverse the disconnected
- * list to find the endpoint and release the conn lock.
- */
- list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
- tmpep = list_entry(pos, struct scif_endpt, list);
- if (tmpep == ep) {
- list_del(pos);
- break;
- }
- }
- mutex_unlock(&scif_info.connlock);
- return NULL;
- }
-
- init_completion(&ep->discon);
- msg.uop = SCIF_DISCNCT;
- msg.src = ep->port;
- msg.dst = ep->peer;
- msg.payload[0] = (u64)ep;
- msg.payload[1] = ep->remote_ep;
-
- err = scif_nodeqp_send(ep->remote_dev, &msg);
- spin_unlock(&ep->lock);
- mutex_unlock(&scif_info.connlock);
-
- if (!err)
- /* Wait for the remote node to respond with SCIF_DISCNT_ACK */
- wait_for_completion_timeout(&ep->discon,
- SCIF_NODE_ALIVE_TIMEOUT);
- return ep;
-}
-
-int scif_close(scif_epd_t epd)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scif_endpt *tmpep;
- struct list_head *pos, *tmpq;
- enum scif_epd_state oldstate;
- bool flush_conn;
-
- dev_dbg(scif_info.mdev.this_device, "SCIFAPI close: ep %p %s\n",
- ep, scif_ep_states[ep->state]);
- might_sleep();
- spin_lock(&ep->lock);
- flush_conn = (ep->conn_async_state == ASYNC_CONN_INPROGRESS);
- spin_unlock(&ep->lock);
-
- if (flush_conn)
- flush_work(&scif_info.conn_work);
-
- spin_lock(&ep->lock);
- oldstate = ep->state;
-
- ep->state = SCIFEP_CLOSING;
-
- switch (oldstate) {
- case SCIFEP_ZOMBIE:
- dev_err(scif_info.mdev.this_device,
- "SCIFAPI close: zombie state unexpected\n");
- fallthrough;
- case SCIFEP_DISCONNECTED:
- spin_unlock(&ep->lock);
- scif_unregister_all_windows(epd);
- /* Remove from the disconnected list */
- mutex_lock(&scif_info.connlock);
- list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
- tmpep = list_entry(pos, struct scif_endpt, list);
- if (tmpep == ep) {
- list_del(pos);
- break;
- }
- }
- mutex_unlock(&scif_info.connlock);
- break;
- case SCIFEP_UNBOUND:
- case SCIFEP_BOUND:
- case SCIFEP_CONNECTING:
- spin_unlock(&ep->lock);
- break;
- case SCIFEP_MAPPING:
- case SCIFEP_CONNECTED:
- case SCIFEP_CLOSING:
- {
- spin_unlock(&ep->lock);
- scif_unregister_all_windows(epd);
- scif_disconnect_ep(ep);
- break;
- }
- case SCIFEP_LISTENING:
- case SCIFEP_CLLISTEN:
- {
- struct scif_conreq *conreq;
- struct scifmsg msg;
- struct scif_endpt *aep;
-
- spin_unlock(&ep->lock);
- mutex_lock(&scif_info.eplock);
-
- /* remove from listen list */
- list_for_each_safe(pos, tmpq, &scif_info.listen) {
- tmpep = list_entry(pos, struct scif_endpt, list);
- if (tmpep == ep)
- list_del(pos);
- }
- /* Remove any dangling accepts */
- while (ep->acceptcnt) {
- aep = list_first_entry(&ep->li_accept,
- struct scif_endpt, liacceptlist);
- list_del(&aep->liacceptlist);
- scif_put_port(aep->port.port);
- list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
- tmpep = list_entry(pos, struct scif_endpt,
- miacceptlist);
- if (tmpep == aep) {
- list_del(pos);
- break;
- }
- }
- mutex_unlock(&scif_info.eplock);
- mutex_lock(&scif_info.connlock);
- list_for_each_safe(pos, tmpq, &scif_info.connected) {
- tmpep = list_entry(pos,
- struct scif_endpt, list);
- if (tmpep == aep) {
- list_del(pos);
- break;
- }
- }
- list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
- tmpep = list_entry(pos,
- struct scif_endpt, list);
- if (tmpep == aep) {
- list_del(pos);
- break;
- }
- }
- mutex_unlock(&scif_info.connlock);
- scif_teardown_ep(aep);
- mutex_lock(&scif_info.eplock);
- scif_add_epd_to_zombie_list(aep, SCIF_EPLOCK_HELD);
- ep->acceptcnt--;
- }
-
- spin_lock(&ep->lock);
- mutex_unlock(&scif_info.eplock);
-
- /* Remove and reject any pending connection requests. */
- while (ep->conreqcnt) {
- conreq = list_first_entry(&ep->conlist,
- struct scif_conreq, list);
- list_del(&conreq->list);
-
- msg.uop = SCIF_CNCT_REJ;
- msg.dst.node = conreq->msg.src.node;
- msg.dst.port = conreq->msg.src.port;
- msg.payload[0] = conreq->msg.payload[0];
- msg.payload[1] = conreq->msg.payload[1];
- /*
- * No Error Handling on purpose for scif_nodeqp_send().
- * If the remote node is lost we still want free the
- * connection requests on the self node.
- */
- scif_nodeqp_send(&scif_dev[conreq->msg.src.node],
- &msg);
- ep->conreqcnt--;
- kfree(conreq);
- }
-
- spin_unlock(&ep->lock);
- /* If a kSCIF accept is waiting wake it up */
- wake_up_interruptible(&ep->conwq);
- break;
- }
- }
- scif_put_port(ep->port.port);
- scif_anon_inode_fput(ep);
- scif_teardown_ep(ep);
- scif_add_epd_to_zombie_list(ep, !SCIF_EPLOCK_HELD);
- return 0;
-}
-EXPORT_SYMBOL_GPL(scif_close);
-
-/**
- * scif_flush() - Wakes up any blocking accepts. The endpoint will no longer
- * accept new connections.
- * @epd: The end point returned from scif_open()
- */
-int __scif_flush(scif_epd_t epd)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
-
- switch (ep->state) {
- case SCIFEP_LISTENING:
- {
- ep->state = SCIFEP_CLLISTEN;
-
- /* If an accept is waiting wake it up */
- wake_up_interruptible(&ep->conwq);
- break;
- }
- default:
- break;
- }
- return 0;
-}
-
-int scif_bind(scif_epd_t epd, u16 pn)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int ret = 0;
- int tmp;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI bind: ep %p %s requested port number %d\n",
- ep, scif_ep_states[ep->state], pn);
- if (pn) {
- /*
- * Similar to IETF RFC 1700, SCIF ports below
- * SCIF_ADMIN_PORT_END can only be bound by system (or root)
- * processes or by processes executed by privileged users.
- */
- if (pn < SCIF_ADMIN_PORT_END && !capable(CAP_SYS_ADMIN)) {
- ret = -EACCES;
- goto scif_bind_admin_exit;
- }
- }
-
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_BOUND) {
- ret = -EINVAL;
- goto scif_bind_exit;
- } else if (ep->state != SCIFEP_UNBOUND) {
- ret = -EISCONN;
- goto scif_bind_exit;
- }
-
- if (pn) {
- tmp = scif_rsrv_port(pn);
- if (tmp != pn) {
- ret = -EINVAL;
- goto scif_bind_exit;
- }
- } else {
- ret = scif_get_new_port();
- if (ret < 0)
- goto scif_bind_exit;
- pn = ret;
- }
-
- ep->state = SCIFEP_BOUND;
- ep->port.node = scif_info.nodeid;
- ep->port.port = pn;
- ep->conn_async_state = ASYNC_CONN_IDLE;
- ret = pn;
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI bind: bound to port number %d\n", pn);
-scif_bind_exit:
- spin_unlock(&ep->lock);
-scif_bind_admin_exit:
- return ret;
-}
-EXPORT_SYMBOL_GPL(scif_bind);
-
-int scif_listen(scif_epd_t epd, int backlog)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI listen: ep %p %s\n", ep, scif_ep_states[ep->state]);
- spin_lock(&ep->lock);
- switch (ep->state) {
- case SCIFEP_ZOMBIE:
- case SCIFEP_CLOSING:
- case SCIFEP_CLLISTEN:
- case SCIFEP_UNBOUND:
- case SCIFEP_DISCONNECTED:
- spin_unlock(&ep->lock);
- return -EINVAL;
- case SCIFEP_LISTENING:
- case SCIFEP_CONNECTED:
- case SCIFEP_CONNECTING:
- case SCIFEP_MAPPING:
- spin_unlock(&ep->lock);
- return -EISCONN;
- case SCIFEP_BOUND:
- break;
- }
-
- ep->state = SCIFEP_LISTENING;
- ep->backlog = backlog;
-
- ep->conreqcnt = 0;
- ep->acceptcnt = 0;
- INIT_LIST_HEAD(&ep->conlist);
- init_waitqueue_head(&ep->conwq);
- INIT_LIST_HEAD(&ep->li_accept);
- spin_unlock(&ep->lock);
-
- /*
- * Listen status is complete so delete the qp information not needed
- * on a listen before placing on the list of listening ep's
- */
- scif_teardown_ep(ep);
- ep->qp_info.qp = NULL;
-
- mutex_lock(&scif_info.eplock);
- list_add_tail(&ep->list, &scif_info.listen);
- mutex_unlock(&scif_info.eplock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(scif_listen);
-
-/*
- ************************************************************************
- * SCIF connection flow:
- *
- * 1) A SCIF listening endpoint can call scif_accept(..) to wait for SCIF
- * connections via a SCIF_CNCT_REQ message
- * 2) A SCIF endpoint can initiate a SCIF connection by calling
- * scif_connect(..) which calls scif_setup_qp_connect(..) which
- * allocates the local qp for the endpoint ring buffer and then sends
- * a SCIF_CNCT_REQ to the remote node and waits for a SCIF_CNCT_GNT or
- * a SCIF_CNCT_REJ message
- * 3) The peer node handles a SCIF_CNCT_REQ via scif_cnctreq_resp(..) which
- * wakes up any threads blocked in step 1 or sends a SCIF_CNCT_REJ
- * message otherwise
- * 4) A thread blocked waiting for incoming connections allocates its local
- * endpoint QP and ring buffer following which it sends a SCIF_CNCT_GNT
- * and waits for a SCIF_CNCT_GNT(N)ACK. If the allocation fails then
- * the node sends a SCIF_CNCT_REJ message
- * 5) Upon receipt of a SCIF_CNCT_GNT or a SCIF_CNCT_REJ message the
- * connecting endpoint is woken up as part of handling
- * scif_cnctgnt_resp(..) following which it maps the remote endpoints'
- * QP, updates its outbound QP and sends a SCIF_CNCT_GNTACK message on
- * success or a SCIF_CNCT_GNTNACK message on failure and completes
- * the scif_connect(..) API
- * 6) Upon receipt of a SCIF_CNCT_GNT(N)ACK the accepting endpoint blocked
- * in step 4 is woken up and completes the scif_accept(..) API
- * 7) The SCIF connection is now established between the two SCIF endpoints.
- */
-static int scif_conn_func(struct scif_endpt *ep)
-{
- int err = 0;
- struct scifmsg msg;
- struct device *spdev;
-
- err = scif_reserve_dma_chan(ep);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- ep->state = SCIFEP_BOUND;
- goto connect_error_simple;
- }
- /* Initiate the first part of the endpoint QP setup */
- err = scif_setup_qp_connect(ep->qp_info.qp, &ep->qp_info.qp_offset,
- SCIF_ENDPT_QP_SIZE, ep->remote_dev);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s err %d qp_offset 0x%llx\n",
- __func__, err, ep->qp_info.qp_offset);
- ep->state = SCIFEP_BOUND;
- goto connect_error_simple;
- }
-
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- goto cleanup_qp;
- }
- /* Format connect message and send it */
- msg.src = ep->port;
- msg.dst = ep->conn_port;
- msg.uop = SCIF_CNCT_REQ;
- msg.payload[0] = (u64)ep;
- msg.payload[1] = ep->qp_info.qp_offset;
- err = _scif_nodeqp_send(ep->remote_dev, &msg);
- if (err)
- goto connect_error_dec;
- scif_put_peer_dev(spdev);
- /*
- * Wait for the remote node to respond with SCIF_CNCT_GNT or
- * SCIF_CNCT_REJ message.
- */
- err = wait_event_timeout(ep->conwq, ep->state != SCIFEP_CONNECTING,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d timeout\n", __func__, __LINE__);
- ep->state = SCIFEP_BOUND;
- }
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- goto cleanup_qp;
- }
- if (ep->state == SCIFEP_MAPPING) {
- err = scif_setup_qp_connect_response(ep->remote_dev,
- ep->qp_info.qp,
- ep->qp_info.gnt_pld);
- /*
- * If the resource to map the queue are not available then
- * we need to tell the other side to terminate the accept
- */
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- msg.uop = SCIF_CNCT_GNTNACK;
- msg.payload[0] = ep->remote_ep;
- _scif_nodeqp_send(ep->remote_dev, &msg);
- ep->state = SCIFEP_BOUND;
- goto connect_error_dec;
- }
-
- msg.uop = SCIF_CNCT_GNTACK;
- msg.payload[0] = ep->remote_ep;
- err = _scif_nodeqp_send(ep->remote_dev, &msg);
- if (err) {
- ep->state = SCIFEP_BOUND;
- goto connect_error_dec;
- }
- ep->state = SCIFEP_CONNECTED;
- mutex_lock(&scif_info.connlock);
- list_add_tail(&ep->list, &scif_info.connected);
- mutex_unlock(&scif_info.connlock);
- dev_dbg(&ep->remote_dev->sdev->dev,
- "SCIFAPI connect: ep %p connected\n", ep);
- } else if (ep->state == SCIFEP_BOUND) {
- dev_dbg(&ep->remote_dev->sdev->dev,
- "SCIFAPI connect: ep %p connection refused\n", ep);
- err = -ECONNREFUSED;
- goto connect_error_dec;
- }
- scif_put_peer_dev(spdev);
- return err;
-connect_error_dec:
- scif_put_peer_dev(spdev);
-cleanup_qp:
- scif_cleanup_ep_qp(ep);
-connect_error_simple:
- return err;
-}
-
-/*
- * scif_conn_handler:
- *
- * Workqueue handler for servicing non-blocking SCIF connect
- *
- */
-void scif_conn_handler(struct work_struct *work)
-{
- struct scif_endpt *ep;
-
- do {
- ep = NULL;
- spin_lock(&scif_info.nb_connect_lock);
- if (!list_empty(&scif_info.nb_connect_list)) {
- ep = list_first_entry(&scif_info.nb_connect_list,
- struct scif_endpt, conn_list);
- list_del(&ep->conn_list);
- }
- spin_unlock(&scif_info.nb_connect_lock);
- if (ep) {
- ep->conn_err = scif_conn_func(ep);
- wake_up_interruptible(&ep->conn_pend_wq);
- }
- } while (ep);
-}
-
-int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
- struct scif_dev *remote_dev;
- struct device *spdev;
-
- dev_dbg(scif_info.mdev.this_device, "SCIFAPI connect: ep %p %s\n", ep,
- scif_ep_states[ep->state]);
-
- if (!scif_dev || dst->node > scif_info.maxid)
- return -ENODEV;
-
- might_sleep();
-
- remote_dev = &scif_dev[dst->node];
- spdev = scif_get_peer_dev(remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- return err;
- }
-
- spin_lock(&ep->lock);
- switch (ep->state) {
- case SCIFEP_ZOMBIE:
- case SCIFEP_CLOSING:
- err = -EINVAL;
- break;
- case SCIFEP_DISCONNECTED:
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
- ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
- else
- err = -EINVAL;
- break;
- case SCIFEP_LISTENING:
- case SCIFEP_CLLISTEN:
- err = -EOPNOTSUPP;
- break;
- case SCIFEP_CONNECTING:
- case SCIFEP_MAPPING:
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
- err = -EINPROGRESS;
- else
- err = -EISCONN;
- break;
- case SCIFEP_CONNECTED:
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS)
- ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
- else
- err = -EISCONN;
- break;
- case SCIFEP_UNBOUND:
- err = scif_get_new_port();
- if (err < 0)
- break;
- ep->port.port = err;
- ep->port.node = scif_info.nodeid;
- ep->conn_async_state = ASYNC_CONN_IDLE;
- fallthrough;
- case SCIFEP_BOUND:
- /*
- * If a non-blocking connect has been already initiated
- * (conn_async_state is either ASYNC_CONN_INPROGRESS or
- * ASYNC_CONN_FLUSH_WORK), the end point could end up in
- * SCIF_BOUND due an error in the connection process
- * (e.g., connection refused) If conn_async_state is
- * ASYNC_CONN_INPROGRESS - transition to ASYNC_CONN_FLUSH_WORK
- * so that the error status can be collected. If the state is
- * already ASYNC_CONN_FLUSH_WORK - then set the error to
- * EINPROGRESS since some other thread is waiting to collect
- * error status.
- */
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
- ep->conn_async_state = ASYNC_CONN_FLUSH_WORK;
- } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
- err = -EINPROGRESS;
- } else {
- ep->conn_port = *dst;
- init_waitqueue_head(&ep->sendwq);
- init_waitqueue_head(&ep->recvwq);
- init_waitqueue_head(&ep->conwq);
- ep->conn_async_state = 0;
-
- if (unlikely(non_block))
- ep->conn_async_state = ASYNC_CONN_INPROGRESS;
- }
- break;
- }
-
- if (err || ep->conn_async_state == ASYNC_CONN_FLUSH_WORK)
- goto connect_simple_unlock1;
-
- ep->state = SCIFEP_CONNECTING;
- ep->remote_dev = &scif_dev[dst->node];
- ep->qp_info.qp->magic = SCIFEP_MAGIC;
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
- init_waitqueue_head(&ep->conn_pend_wq);
- spin_lock(&scif_info.nb_connect_lock);
- list_add_tail(&ep->conn_list, &scif_info.nb_connect_list);
- spin_unlock(&scif_info.nb_connect_lock);
- err = -EINPROGRESS;
- schedule_work(&scif_info.conn_work);
- }
-connect_simple_unlock1:
- spin_unlock(&ep->lock);
- scif_put_peer_dev(spdev);
- if (err) {
- return err;
- } else if (ep->conn_async_state == ASYNC_CONN_FLUSH_WORK) {
- flush_work(&scif_info.conn_work);
- err = ep->conn_err;
- spin_lock(&ep->lock);
- ep->conn_async_state = ASYNC_CONN_IDLE;
- spin_unlock(&ep->lock);
- } else {
- err = scif_conn_func(ep);
- }
- return err;
-}
-
-int scif_connect(scif_epd_t epd, struct scif_port_id *dst)
-{
- return __scif_connect(epd, dst, false);
-}
-EXPORT_SYMBOL_GPL(scif_connect);
-
-/*
- * scif_accept() - Accept a connection request from the remote node
- *
- * The function accepts a connection request from the remote node. Successful
- * complete is indicate by a new end point being created and passed back
- * to the caller for future reference.
- *
- * Upon successful complete a zero will be returned and the peer information
- * will be filled in.
- *
- * If the end point is not in the listening state -EINVAL will be returned.
- *
- * If during the connection sequence resource allocation fails the -ENOMEM
- * will be returned.
- *
- * If the function is called with the ASYNC flag set and no connection requests
- * are pending it will return -EAGAIN.
- *
- * If the remote side is not sending any connection requests the caller may
- * terminate this function with a signal. If so a -EINTR will be returned.
- */
-int scif_accept(scif_epd_t epd, struct scif_port_id *peer,
- scif_epd_t *newepd, int flags)
-{
- struct scif_endpt *lep = (struct scif_endpt *)epd;
- struct scif_endpt *cep;
- struct scif_conreq *conreq;
- struct scifmsg msg;
- int err;
- struct device *spdev;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI accept: ep %p %s\n", lep, scif_ep_states[lep->state]);
-
- if (flags & ~SCIF_ACCEPT_SYNC)
- return -EINVAL;
-
- if (!peer || !newepd)
- return -EINVAL;
-
- might_sleep();
- spin_lock(&lep->lock);
- if (lep->state != SCIFEP_LISTENING) {
- spin_unlock(&lep->lock);
- return -EINVAL;
- }
-
- if (!lep->conreqcnt && !(flags & SCIF_ACCEPT_SYNC)) {
- /* No connection request present and we do not want to wait */
- spin_unlock(&lep->lock);
- return -EAGAIN;
- }
-
- lep->files = current->files;
-retry_connection:
- spin_unlock(&lep->lock);
- /* Wait for the remote node to send us a SCIF_CNCT_REQ */
- err = wait_event_interruptible(lep->conwq,
- (lep->conreqcnt ||
- (lep->state != SCIFEP_LISTENING)));
- if (err)
- return err;
-
- if (lep->state != SCIFEP_LISTENING)
- return -EINTR;
-
- spin_lock(&lep->lock);
-
- if (!lep->conreqcnt)
- goto retry_connection;
-
- /* Get the first connect request off the list */
- conreq = list_first_entry(&lep->conlist, struct scif_conreq, list);
- list_del(&conreq->list);
- lep->conreqcnt--;
- spin_unlock(&lep->lock);
-
- /* Fill in the peer information */
- peer->node = conreq->msg.src.node;
- peer->port = conreq->msg.src.port;
-
- cep = kzalloc(sizeof(*cep), GFP_KERNEL);
- if (!cep) {
- err = -ENOMEM;
- goto scif_accept_error_epalloc;
- }
- spin_lock_init(&cep->lock);
- mutex_init(&cep->sendlock);
- mutex_init(&cep->recvlock);
- cep->state = SCIFEP_CONNECTING;
- cep->remote_dev = &scif_dev[peer->node];
- cep->remote_ep = conreq->msg.payload[0];
-
- scif_rma_ep_init(cep);
-
- err = scif_reserve_dma_chan(cep);
- if (err) {
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto scif_accept_error_qpalloc;
- }
-
- cep->qp_info.qp = kzalloc(sizeof(*cep->qp_info.qp), GFP_KERNEL);
- if (!cep->qp_info.qp) {
- err = -ENOMEM;
- goto scif_accept_error_qpalloc;
- }
-
- err = scif_anon_inode_getfile(cep);
- if (err)
- goto scif_accept_error_anon_inode;
-
- cep->qp_info.qp->magic = SCIFEP_MAGIC;
- spdev = scif_get_peer_dev(cep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- goto scif_accept_error_map;
- }
- err = scif_setup_qp_accept(cep->qp_info.qp, &cep->qp_info.qp_offset,
- conreq->msg.payload[1], SCIF_ENDPT_QP_SIZE,
- cep->remote_dev);
- if (err) {
- dev_dbg(&cep->remote_dev->sdev->dev,
- "SCIFAPI accept: ep %p new %p scif_setup_qp_accept %d qp_offset 0x%llx\n",
- lep, cep, err, cep->qp_info.qp_offset);
- scif_put_peer_dev(spdev);
- goto scif_accept_error_map;
- }
-
- cep->port.node = lep->port.node;
- cep->port.port = lep->port.port;
- cep->peer.node = peer->node;
- cep->peer.port = peer->port;
- init_waitqueue_head(&cep->sendwq);
- init_waitqueue_head(&cep->recvwq);
- init_waitqueue_head(&cep->conwq);
-
- msg.uop = SCIF_CNCT_GNT;
- msg.src = cep->port;
- msg.payload[0] = cep->remote_ep;
- msg.payload[1] = cep->qp_info.qp_offset;
- msg.payload[2] = (u64)cep;
-
- err = _scif_nodeqp_send(cep->remote_dev, &msg);
- scif_put_peer_dev(spdev);
- if (err)
- goto scif_accept_error_map;
-retry:
- /* Wait for the remote node to respond with SCIF_CNCT_GNT(N)ACK */
- err = wait_event_timeout(cep->conwq, cep->state != SCIFEP_CONNECTING,
- SCIF_NODE_ACCEPT_TIMEOUT);
- if (!err && scifdev_alive(cep))
- goto retry;
- err = !err ? -ENODEV : 0;
- if (err)
- goto scif_accept_error_map;
- kfree(conreq);
-
- spin_lock(&cep->lock);
-
- if (cep->state == SCIFEP_CLOSING) {
- /*
- * Remote failed to allocate resources and NAKed the grant.
- * There is at this point nothing referencing the new end point.
- */
- spin_unlock(&cep->lock);
- scif_teardown_ep(cep);
- kfree(cep);
-
- /* If call with sync flag then go back and wait. */
- if (flags & SCIF_ACCEPT_SYNC) {
- spin_lock(&lep->lock);
- goto retry_connection;
- }
- return -EAGAIN;
- }
-
- scif_get_port(cep->port.port);
- *newepd = (scif_epd_t)cep;
- spin_unlock(&cep->lock);
- return 0;
-scif_accept_error_map:
- scif_anon_inode_fput(cep);
-scif_accept_error_anon_inode:
- scif_teardown_ep(cep);
-scif_accept_error_qpalloc:
- kfree(cep);
-scif_accept_error_epalloc:
- msg.uop = SCIF_CNCT_REJ;
- msg.dst.node = conreq->msg.src.node;
- msg.dst.port = conreq->msg.src.port;
- msg.payload[0] = conreq->msg.payload[0];
- msg.payload[1] = conreq->msg.payload[1];
- scif_nodeqp_send(&scif_dev[conreq->msg.src.node], &msg);
- kfree(conreq);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_accept);
-
-/*
- * scif_msg_param_check:
- * @epd: The end point returned from scif_open()
- * @len: Length to receive
- * @flags: blocking or non blocking
- *
- * Validate parameters for messaging APIs scif_send(..)/scif_recv(..).
- */
-static inline int scif_msg_param_check(scif_epd_t epd, int len, int flags)
-{
- int ret = -EINVAL;
-
- if (len < 0)
- goto err_ret;
- if (flags && (!(flags & SCIF_RECV_BLOCK)))
- goto err_ret;
- ret = 0;
-err_ret:
- return ret;
-}
-
-static int _scif_send(scif_epd_t epd, void *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scifmsg notif_msg;
- int curr_xfer_len = 0, sent_len = 0, write_count;
- int ret = 0;
- struct scif_qp *qp = ep->qp_info.qp;
-
- if (flags & SCIF_SEND_BLOCK)
- might_sleep();
-
- spin_lock(&ep->lock);
- while (sent_len != len && SCIFEP_CONNECTED == ep->state) {
- write_count = scif_rb_space(&qp->outbound_q);
- if (write_count) {
- /* Best effort to send as much data as possible */
- curr_xfer_len = min(len - sent_len, write_count);
- ret = scif_rb_write(&qp->outbound_q, msg,
- curr_xfer_len);
- if (ret < 0)
- break;
- /* Success. Update write pointer */
- scif_rb_commit(&qp->outbound_q);
- /*
- * Send a notification to the peer about the
- * produced data message.
- */
- notif_msg.src = ep->port;
- notif_msg.uop = SCIF_CLIENT_SENT;
- notif_msg.payload[0] = ep->remote_ep;
- ret = _scif_nodeqp_send(ep->remote_dev, &notif_msg);
- if (ret)
- break;
- sent_len += curr_xfer_len;
- msg = msg + curr_xfer_len;
- continue;
- }
- curr_xfer_len = min(len - sent_len, SCIF_ENDPT_QP_SIZE - 1);
- /* Not enough RB space. return for the Non Blocking case */
- if (!(flags & SCIF_SEND_BLOCK))
- break;
-
- spin_unlock(&ep->lock);
- /* Wait for a SCIF_CLIENT_RCVD message in the Blocking case */
- ret =
- wait_event_interruptible(ep->sendwq,
- (SCIFEP_CONNECTED != ep->state) ||
- (scif_rb_space(&qp->outbound_q) >=
- curr_xfer_len));
- spin_lock(&ep->lock);
- if (ret)
- break;
- }
- if (sent_len)
- ret = sent_len;
- else if (!ret && SCIFEP_CONNECTED != ep->state)
- ret = SCIFEP_DISCONNECTED == ep->state ?
- -ECONNRESET : -ENOTCONN;
- spin_unlock(&ep->lock);
- return ret;
-}
-
-static int _scif_recv(scif_epd_t epd, void *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scifmsg notif_msg;
- int curr_recv_len = 0, remaining_len = len, read_count;
- int ret = 0;
- struct scif_qp *qp = ep->qp_info.qp;
-
- if (flags & SCIF_RECV_BLOCK)
- might_sleep();
- spin_lock(&ep->lock);
- while (remaining_len && (SCIFEP_CONNECTED == ep->state ||
- SCIFEP_DISCONNECTED == ep->state)) {
- read_count = scif_rb_count(&qp->inbound_q, remaining_len);
- if (read_count) {
- /*
- * Best effort to recv as much data as there
- * are bytes to read in the RB particularly
- * important for the Non Blocking case.
- */
- curr_recv_len = min(remaining_len, read_count);
- scif_rb_get_next(&qp->inbound_q, msg, curr_recv_len);
- if (ep->state == SCIFEP_CONNECTED) {
- /*
- * Update the read pointer only if the endpoint
- * is still connected else the read pointer
- * might no longer exist since the peer has
- * freed resources!
- */
- scif_rb_update_read_ptr(&qp->inbound_q);
- /*
- * Send a notification to the peer about the
- * consumed data message only if the EP is in
- * SCIFEP_CONNECTED state.
- */
- notif_msg.src = ep->port;
- notif_msg.uop = SCIF_CLIENT_RCVD;
- notif_msg.payload[0] = ep->remote_ep;
- ret = _scif_nodeqp_send(ep->remote_dev,
- &notif_msg);
- if (ret)
- break;
- }
- remaining_len -= curr_recv_len;
- msg = msg + curr_recv_len;
- continue;
- }
- /*
- * Bail out now if the EP is in SCIFEP_DISCONNECTED state else
- * we will keep looping forever.
- */
- if (ep->state == SCIFEP_DISCONNECTED)
- break;
- /*
- * Return in the Non Blocking case if there is no data
- * to read in this iteration.
- */
- if (!(flags & SCIF_RECV_BLOCK))
- break;
- curr_recv_len = min(remaining_len, SCIF_ENDPT_QP_SIZE - 1);
- spin_unlock(&ep->lock);
- /*
- * Wait for a SCIF_CLIENT_SEND message in the blocking case
- * or until other side disconnects.
- */
- ret =
- wait_event_interruptible(ep->recvwq,
- SCIFEP_CONNECTED != ep->state ||
- scif_rb_count(&qp->inbound_q,
- curr_recv_len)
- >= curr_recv_len);
- spin_lock(&ep->lock);
- if (ret)
- break;
- }
- if (len - remaining_len)
- ret = len - remaining_len;
- else if (!ret && ep->state != SCIFEP_CONNECTED)
- ret = ep->state == SCIFEP_DISCONNECTED ?
- -ECONNRESET : -ENOTCONN;
- spin_unlock(&ep->lock);
- return ret;
-}
-
-/**
- * scif_user_send() - Send data to connection queue
- * @epd: The end point returned from scif_open()
- * @msg: Address to place data
- * @len: Length to receive
- * @flags: blocking or non blocking
- *
- * This function is called from the driver IOCTL entry point
- * only and is a wrapper for _scif_send().
- */
-int scif_user_send(scif_epd_t epd, void __user *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
- int sent_len = 0;
- char *tmp;
- int loop_len;
- int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI send (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
- if (!len)
- return 0;
-
- err = scif_msg_param_check(epd, len, flags);
- if (err)
- goto send_err;
-
- tmp = kmalloc(chunk_len, GFP_KERNEL);
- if (!tmp) {
- err = -ENOMEM;
- goto send_err;
- }
- /*
- * Grabbing the lock before breaking up the transfer in
- * multiple chunks is required to ensure that messages do
- * not get fragmented and reordered.
- */
- mutex_lock(&ep->sendlock);
- while (sent_len != len) {
- loop_len = len - sent_len;
- loop_len = min(chunk_len, loop_len);
- if (copy_from_user(tmp, msg, loop_len)) {
- err = -EFAULT;
- goto send_free_err;
- }
- err = _scif_send(epd, tmp, loop_len, flags);
- if (err < 0)
- goto send_free_err;
- sent_len += err;
- msg += err;
- if (err != loop_len)
- goto send_free_err;
- }
-send_free_err:
- mutex_unlock(&ep->sendlock);
- kfree(tmp);
-send_err:
- return err < 0 ? err : sent_len;
-}
-
-/**
- * scif_user_recv() - Receive data from connection queue
- * @epd: The end point returned from scif_open()
- * @msg: Address to place data
- * @len: Length to receive
- * @flags: blocking or non blocking
- *
- * This function is called from the driver IOCTL entry point
- * only and is a wrapper for _scif_recv().
- */
-int scif_user_recv(scif_epd_t epd, void __user *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
- int recv_len = 0;
- char *tmp;
- int loop_len;
- int chunk_len = min(len, (1 << (MAX_ORDER + PAGE_SHIFT - 1)));
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI recv (U): ep %p %s\n", ep, scif_ep_states[ep->state]);
- if (!len)
- return 0;
-
- err = scif_msg_param_check(epd, len, flags);
- if (err)
- goto recv_err;
-
- tmp = kmalloc(chunk_len, GFP_KERNEL);
- if (!tmp) {
- err = -ENOMEM;
- goto recv_err;
- }
- /*
- * Grabbing the lock before breaking up the transfer in
- * multiple chunks is required to ensure that messages do
- * not get fragmented and reordered.
- */
- mutex_lock(&ep->recvlock);
- while (recv_len != len) {
- loop_len = len - recv_len;
- loop_len = min(chunk_len, loop_len);
- err = _scif_recv(epd, tmp, loop_len, flags);
- if (err < 0)
- goto recv_free_err;
- if (copy_to_user(msg, tmp, err)) {
- err = -EFAULT;
- goto recv_free_err;
- }
- recv_len += err;
- msg += err;
- if (err != loop_len)
- goto recv_free_err;
- }
-recv_free_err:
- mutex_unlock(&ep->recvlock);
- kfree(tmp);
-recv_err:
- return err < 0 ? err : recv_len;
-}
-
-/**
- * scif_send() - Send data to connection queue
- * @epd: The end point returned from scif_open()
- * @msg: Address to place data
- * @len: Length to receive
- * @flags: blocking or non blocking
- *
- * This function is called from the kernel mode only and is
- * a wrapper for _scif_send().
- */
-int scif_send(scif_epd_t epd, void *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int ret;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI send (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
- if (!len)
- return 0;
-
- ret = scif_msg_param_check(epd, len, flags);
- if (ret)
- return ret;
- if (!ep->remote_dev)
- return -ENOTCONN;
- /*
- * Grab the mutex lock in the blocking case only
- * to ensure messages do not get fragmented/reordered.
- * The non blocking mode is protected using spin locks
- * in _scif_send().
- */
- if (flags & SCIF_SEND_BLOCK)
- mutex_lock(&ep->sendlock);
-
- ret = _scif_send(epd, msg, len, flags);
-
- if (flags & SCIF_SEND_BLOCK)
- mutex_unlock(&ep->sendlock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(scif_send);
-
-/**
- * scif_recv() - Receive data from connection queue
- * @epd: The end point returned from scif_open()
- * @msg: Address to place data
- * @len: Length to receive
- * @flags: blocking or non blocking
- *
- * This function is called from the kernel mode only and is
- * a wrapper for _scif_recv().
- */
-int scif_recv(scif_epd_t epd, void *msg, int len, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int ret;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI recv (K): ep %p %s\n", ep, scif_ep_states[ep->state]);
- if (!len)
- return 0;
-
- ret = scif_msg_param_check(epd, len, flags);
- if (ret)
- return ret;
- /*
- * Grab the mutex lock in the blocking case only
- * to ensure messages do not get fragmented/reordered.
- * The non blocking mode is protected using spin locks
- * in _scif_send().
- */
- if (flags & SCIF_RECV_BLOCK)
- mutex_lock(&ep->recvlock);
-
- ret = _scif_recv(epd, msg, len, flags);
-
- if (flags & SCIF_RECV_BLOCK)
- mutex_unlock(&ep->recvlock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(scif_recv);
-
-static inline void _scif_poll_wait(struct file *f, wait_queue_head_t *wq,
- poll_table *p, struct scif_endpt *ep)
-{
- /*
- * Because poll_wait makes a GFP_KERNEL allocation, give up the lock
- * and regrab it afterwards. Because the endpoint state might have
- * changed while the lock was given up, the state must be checked
- * again after re-acquiring the lock. The code in __scif_pollfd(..)
- * does this.
- */
- spin_unlock(&ep->lock);
- poll_wait(f, wq, p);
- spin_lock(&ep->lock);
-}
-
-__poll_t
-__scif_pollfd(struct file *f, poll_table *wait, struct scif_endpt *ep)
-{
- __poll_t mask = 0;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI pollfd: ep %p %s\n", ep, scif_ep_states[ep->state]);
-
- spin_lock(&ep->lock);
-
- /* Endpoint is waiting for a non-blocking connect to complete */
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
- _scif_poll_wait(f, &ep->conn_pend_wq, wait, ep);
- if (ep->conn_async_state == ASYNC_CONN_INPROGRESS) {
- if (ep->state == SCIFEP_CONNECTED ||
- ep->state == SCIFEP_DISCONNECTED ||
- ep->conn_err)
- mask |= EPOLLOUT;
- goto exit;
- }
- }
-
- /* Endpoint is listening for incoming connection requests */
- if (ep->state == SCIFEP_LISTENING) {
- _scif_poll_wait(f, &ep->conwq, wait, ep);
- if (ep->state == SCIFEP_LISTENING) {
- if (ep->conreqcnt)
- mask |= EPOLLIN;
- goto exit;
- }
- }
-
- /* Endpoint is connected or disconnected */
- if (ep->state == SCIFEP_CONNECTED || ep->state == SCIFEP_DISCONNECTED) {
- if (poll_requested_events(wait) & EPOLLIN)
- _scif_poll_wait(f, &ep->recvwq, wait, ep);
- if (poll_requested_events(wait) & EPOLLOUT)
- _scif_poll_wait(f, &ep->sendwq, wait, ep);
- if (ep->state == SCIFEP_CONNECTED ||
- ep->state == SCIFEP_DISCONNECTED) {
- /* Data can be read without blocking */
- if (scif_rb_count(&ep->qp_info.qp->inbound_q, 1))
- mask |= EPOLLIN;
- /* Data can be written without blocking */
- if (scif_rb_space(&ep->qp_info.qp->outbound_q))
- mask |= EPOLLOUT;
- /* Return EPOLLHUP if endpoint is disconnected */
- if (ep->state == SCIFEP_DISCONNECTED)
- mask |= EPOLLHUP;
- goto exit;
- }
- }
-
- /* Return EPOLLERR if the endpoint is in none of the above states */
- mask |= EPOLLERR;
-exit:
- spin_unlock(&ep->lock);
- return mask;
-}
-
-/**
- * scif_poll() - Kernel mode SCIF poll
- * @ufds: Array of scif_pollepd structures containing the end points
- * and events to poll on
- * @nfds: Size of the ufds array
- * @timeout_msecs: Timeout in msecs, -ve implies infinite timeout
- *
- * The code flow in this function is based on do_poll(..) in select.c
- *
- * Returns the number of endpoints which have pending events or 0 in
- * the event of a timeout. If a signal is used for wake up, -EINTR is
- * returned.
- */
-int
-scif_poll(struct scif_pollepd *ufds, unsigned int nfds, long timeout_msecs)
-{
- struct poll_wqueues table;
- poll_table *pt;
- int i, count = 0, timed_out = timeout_msecs == 0;
- __poll_t mask;
- u64 timeout = timeout_msecs < 0 ? MAX_SCHEDULE_TIMEOUT
- : msecs_to_jiffies(timeout_msecs);
-
- poll_initwait(&table);
- pt = &table.pt;
- while (1) {
- for (i = 0; i < nfds; i++) {
- pt->_key = ufds[i].events | EPOLLERR | EPOLLHUP;
- mask = __scif_pollfd(ufds[i].epd->anon,
- pt, ufds[i].epd);
- mask &= ufds[i].events | EPOLLERR | EPOLLHUP;
- if (mask) {
- count++;
- pt->_qproc = NULL;
- }
- ufds[i].revents = mask;
- }
- pt->_qproc = NULL;
- if (!count) {
- count = table.error;
- if (signal_pending(current))
- count = -EINTR;
- }
- if (count || timed_out)
- break;
-
- if (!schedule_timeout_interruptible(timeout))
- timed_out = 1;
- }
- poll_freewait(&table);
- return count;
-}
-EXPORT_SYMBOL_GPL(scif_poll);
-
-int scif_get_node_ids(u16 *nodes, int len, u16 *self)
-{
- int online = 0;
- int offset = 0;
- int node;
-
- if (!scif_is_mgmt_node())
- scif_get_node_info();
-
- *self = scif_info.nodeid;
- mutex_lock(&scif_info.conflock);
- len = min_t(int, len, scif_info.total);
- for (node = 0; node <= scif_info.maxid; node++) {
- if (_scifdev_alive(&scif_dev[node])) {
- online++;
- if (offset < len)
- nodes[offset++] = node;
- }
- }
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI get_node_ids total %d online %d filled in %d nodes\n",
- scif_info.total, online, offset);
- mutex_unlock(&scif_info.conflock);
-
- return online;
-}
-EXPORT_SYMBOL_GPL(scif_get_node_ids);
-
-static int scif_add_client_dev(struct device *dev, struct subsys_interface *si)
-{
- struct scif_client *client =
- container_of(si, struct scif_client, si);
- struct scif_peer_dev *spdev =
- container_of(dev, struct scif_peer_dev, dev);
-
- if (client->probe)
- client->probe(spdev);
- return 0;
-}
-
-static void scif_remove_client_dev(struct device *dev,
- struct subsys_interface *si)
-{
- struct scif_client *client =
- container_of(si, struct scif_client, si);
- struct scif_peer_dev *spdev =
- container_of(dev, struct scif_peer_dev, dev);
-
- if (client->remove)
- client->remove(spdev);
-}
-
-void scif_client_unregister(struct scif_client *client)
-{
- subsys_interface_unregister(&client->si);
-}
-EXPORT_SYMBOL_GPL(scif_client_unregister);
-
-int scif_client_register(struct scif_client *client)
-{
- struct subsys_interface *si = &client->si;
-
- si->name = client->name;
- si->subsys = &scif_peer_bus;
- si->add_dev = scif_add_client_dev;
- si->remove_dev = scif_remove_client_dev;
-
- return subsys_interface_register(&client->si);
-}
-EXPORT_SYMBOL_GPL(scif_client_register);
diff --git a/drivers/misc/mic/scif/scif_debugfs.c b/drivers/misc/mic/scif/scif_debugfs.c
deleted file mode 100644
index 8fe38e7ca6e6..000000000000
--- a/drivers/misc/mic/scif/scif_debugfs.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include "../common/mic_dev.h"
-#include "scif_main.h"
-
-/* Debugfs parent dir */
-static struct dentry *scif_dbg;
-
-static int scif_dev_show(struct seq_file *s, void *unused)
-{
- int node;
-
- seq_printf(s, "Total Nodes %d Self Node Id %d Maxid %d\n",
- scif_info.total, scif_info.nodeid,
- scif_info.maxid);
-
- if (!scif_dev)
- return 0;
-
- seq_printf(s, "%-16s\t%-16s\n", "node_id", "state");
-
- for (node = 0; node <= scif_info.maxid; node++)
- seq_printf(s, "%-16d\t%-16s\n", scif_dev[node].node,
- _scifdev_alive(&scif_dev[node]) ?
- "Running" : "Offline");
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(scif_dev);
-
-static void scif_display_window(struct scif_window *window, struct seq_file *s)
-{
- int j;
- struct scatterlist *sg;
- scif_pinned_pages_t pin = window->pinned_pages;
-
- seq_printf(s, "window %p type %d temp %d offset 0x%llx ",
- window, window->type, window->temp, window->offset);
- seq_printf(s, "nr_pages 0x%llx nr_contig_chunks 0x%x prot %d ",
- window->nr_pages, window->nr_contig_chunks, window->prot);
- seq_printf(s, "ref_count %d magic 0x%llx peer_window 0x%llx ",
- window->ref_count, window->magic, window->peer_window);
- seq_printf(s, "unreg_state 0x%x va_for_temp 0x%lx\n",
- window->unreg_state, window->va_for_temp);
-
- for (j = 0; j < window->nr_contig_chunks; j++)
- seq_printf(s, "page[%d] dma_addr 0x%llx num_pages 0x%llx\n", j,
- window->dma_addr[j], window->num_pages[j]);
-
- if (window->type == SCIF_WINDOW_SELF && pin)
- for (j = 0; j < window->nr_pages; j++)
- seq_printf(s, "page[%d] = pinned_pages %p address %p\n",
- j, pin->pages[j],
- page_address(pin->pages[j]));
-
- if (window->st)
- for_each_sg(window->st->sgl, sg, window->st->nents, j)
- seq_printf(s, "sg[%d] dma addr 0x%llx length 0x%x\n",
- j, sg_dma_address(sg), sg_dma_len(sg));
-}
-
-static void scif_display_all_windows(struct list_head *head, struct seq_file *s)
-{
- struct list_head *item;
- struct scif_window *window;
-
- list_for_each(item, head) {
- window = list_entry(item, struct scif_window, list);
- scif_display_window(window, s);
- }
-}
-
-static int scif_rma_show(struct seq_file *s, void *unused)
-{
- struct scif_endpt *ep;
- struct list_head *pos;
-
- mutex_lock(&scif_info.connlock);
- list_for_each(pos, &scif_info.connected) {
- ep = list_entry(pos, struct scif_endpt, list);
- seq_printf(s, "ep %p self windows\n", ep);
- mutex_lock(&ep->rma_info.rma_lock);
- scif_display_all_windows(&ep->rma_info.reg_list, s);
- seq_printf(s, "ep %p remote windows\n", ep);
- scif_display_all_windows(&ep->rma_info.remote_reg_list, s);
- mutex_unlock(&ep->rma_info.rma_lock);
- }
- mutex_unlock(&scif_info.connlock);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(scif_rma);
-
-void __init scif_init_debugfs(void)
-{
- scif_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
-
- debugfs_create_file("scif_dev", 0444, scif_dbg, NULL, &scif_dev_fops);
- debugfs_create_file("scif_rma", 0444, scif_dbg, NULL, &scif_rma_fops);
- debugfs_create_u8("en_msg_log", 0666, scif_dbg, &scif_info.en_msg_log);
- debugfs_create_u8("p2p_enable", 0666, scif_dbg, &scif_info.p2p_enable);
-}
-
-void scif_exit_debugfs(void)
-{
- debugfs_remove_recursive(scif_dbg);
-}
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
deleted file mode 100644
index 401b98e5ad79..000000000000
--- a/drivers/misc/mic/scif/scif_dma.c
+++ /dev/null
@@ -1,1940 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-#include "scif_map.h"
-
-/*
- * struct scif_dma_comp_cb - SCIF DMA completion callback
- *
- * @dma_completion_func: DMA completion callback
- * @cb_cookie: DMA completion callback cookie
- * @temp_buf: Temporary buffer
- * @temp_buf_to_free: Temporary buffer to be freed
- * @is_cache: Is a kmem_cache allocated buffer
- * @dst_offset: Destination registration offset
- * @dst_window: Destination registration window
- * @len: Length of the temp buffer
- * @temp_phys: DMA address of the temp buffer
- * @sdev: The SCIF device
- * @header_padding: padding for cache line alignment
- */
-struct scif_dma_comp_cb {
- void (*dma_completion_func)(void *cookie);
- void *cb_cookie;
- u8 *temp_buf;
- u8 *temp_buf_to_free;
- bool is_cache;
- s64 dst_offset;
- struct scif_window *dst_window;
- size_t len;
- dma_addr_t temp_phys;
- struct scif_dev *sdev;
- int header_padding;
-};
-
-/**
- * struct scif_copy_work - Work for DMA copy
- *
- * @src_offset: Starting source offset
- * @dst_offset: Starting destination offset
- * @src_window: Starting src registered window
- * @dst_window: Starting dst registered window
- * @loopback: true if this is a loopback DMA transfer
- * @len: Length of the transfer
- * @comp_cb: DMA copy completion callback
- * @remote_dev: The remote SCIF peer device
- * @fence_type: polling or interrupt based
- * @ordered: is this a tail byte ordered DMA transfer
- */
-struct scif_copy_work {
- s64 src_offset;
- s64 dst_offset;
- struct scif_window *src_window;
- struct scif_window *dst_window;
- int loopback;
- size_t len;
- struct scif_dma_comp_cb *comp_cb;
- struct scif_dev *remote_dev;
- int fence_type;
- bool ordered;
-};
-
-/**
- * scif_reserve_dma_chan:
- * @ep: Endpoint Descriptor.
- *
- * This routine reserves a DMA channel for a particular
- * endpoint. All DMA transfers for an endpoint are always
- * programmed on the same DMA channel.
- */
-int scif_reserve_dma_chan(struct scif_endpt *ep)
-{
- int err = 0;
- struct scif_dev *scifdev;
- struct scif_hw_dev *sdev;
- struct dma_chan *chan;
-
- /* Loopback DMAs are not supported on the management node */
- if (!scif_info.nodeid && scifdev_self(ep->remote_dev))
- return 0;
- if (scif_info.nodeid)
- scifdev = &scif_dev[0];
- else
- scifdev = ep->remote_dev;
- sdev = scifdev->sdev;
- if (!sdev->num_dma_ch)
- return -ENODEV;
- chan = sdev->dma_ch[scifdev->dma_ch_idx];
- scifdev->dma_ch_idx = (scifdev->dma_ch_idx + 1) % sdev->num_dma_ch;
- mutex_lock(&ep->rma_info.rma_lock);
- ep->rma_info.dma_chan = chan;
- mutex_unlock(&ep->rma_info.rma_lock);
- return err;
-}
-
-#ifdef CONFIG_MMU_NOTIFIER
-/*
- * scif_rma_destroy_tcw:
- *
- * This routine destroys temporary cached windows
- */
-static
-void __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn,
- u64 start, u64 len)
-{
- struct list_head *item, *tmp;
- struct scif_window *window;
- u64 start_va, end_va;
- u64 end = start + len;
-
- if (end <= start)
- return;
-
- list_for_each_safe(item, tmp, &mmn->tc_reg_list) {
- window = list_entry(item, struct scif_window, list);
- if (!len)
- break;
- start_va = window->va_for_temp;
- end_va = start_va + (window->nr_pages << PAGE_SHIFT);
- if (start < start_va && end <= start_va)
- break;
- if (start >= end_va)
- continue;
- __scif_rma_destroy_tcw_helper(window);
- }
-}
-
-static void scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, u64 start, u64 len)
-{
- struct scif_endpt *ep = mmn->ep;
-
- spin_lock(&ep->rma_info.tc_lock);
- __scif_rma_destroy_tcw(mmn, start, len);
- spin_unlock(&ep->rma_info.tc_lock);
-}
-
-static void scif_rma_destroy_tcw_ep(struct scif_endpt *ep)
-{
- struct list_head *item, *tmp;
- struct scif_mmu_notif *mmn;
-
- list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) {
- mmn = list_entry(item, struct scif_mmu_notif, list);
- scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
- }
-}
-
-static void __scif_rma_destroy_tcw_ep(struct scif_endpt *ep)
-{
- struct list_head *item, *tmp;
- struct scif_mmu_notif *mmn;
-
- spin_lock(&ep->rma_info.tc_lock);
- list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) {
- mmn = list_entry(item, struct scif_mmu_notif, list);
- __scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
- }
- spin_unlock(&ep->rma_info.tc_lock);
-}
-
-static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
-{
- if ((cur_bytes >> PAGE_SHIFT) > scif_info.rma_tc_limit)
- return false;
- if ((atomic_read(&ep->rma_info.tcw_total_pages)
- + (cur_bytes >> PAGE_SHIFT)) >
- scif_info.rma_tc_limit) {
- dev_info(scif_info.mdev.this_device,
- "%s %d total=%d, current=%zu reached max\n",
- __func__, __LINE__,
- atomic_read(&ep->rma_info.tcw_total_pages),
- (1 + (cur_bytes >> PAGE_SHIFT)));
- scif_rma_destroy_tcw_invalid();
- __scif_rma_destroy_tcw_ep(ep);
- }
- return true;
-}
-
-static void scif_mmu_notifier_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
-{
- struct scif_mmu_notif *mmn;
-
- mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
- scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
- schedule_work(&scif_info.misc_work);
-}
-
-static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct scif_mmu_notif *mmn;
-
- mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
- scif_rma_destroy_tcw(mmn, range->start, range->end - range->start);
-
- return 0;
-}
-
-static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- /*
- * Nothing to do here, everything needed was done in
- * invalidate_range_start.
- */
-}
-
-static const struct mmu_notifier_ops scif_mmu_notifier_ops = {
- .release = scif_mmu_notifier_release,
- .clear_flush_young = NULL,
- .invalidate_range_start = scif_mmu_notifier_invalidate_range_start,
- .invalidate_range_end = scif_mmu_notifier_invalidate_range_end};
-
-static void scif_ep_unregister_mmu_notifier(struct scif_endpt *ep)
-{
- struct scif_endpt_rma_info *rma = &ep->rma_info;
- struct scif_mmu_notif *mmn = NULL;
- struct list_head *item, *tmp;
-
- mutex_lock(&ep->rma_info.mmn_lock);
- list_for_each_safe(item, tmp, &rma->mmn_list) {
- mmn = list_entry(item, struct scif_mmu_notif, list);
- mmu_notifier_unregister(&mmn->ep_mmu_notifier, mmn->mm);
- list_del(item);
- kfree(mmn);
- }
- mutex_unlock(&ep->rma_info.mmn_lock);
-}
-
-static void scif_init_mmu_notifier(struct scif_mmu_notif *mmn,
- struct mm_struct *mm, struct scif_endpt *ep)
-{
- mmn->ep = ep;
- mmn->mm = mm;
- mmn->ep_mmu_notifier.ops = &scif_mmu_notifier_ops;
- INIT_LIST_HEAD(&mmn->list);
- INIT_LIST_HEAD(&mmn->tc_reg_list);
-}
-
-static struct scif_mmu_notif *
-scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma)
-{
- struct scif_mmu_notif *mmn;
-
- list_for_each_entry(mmn, &rma->mmn_list, list)
- if (mmn->mm == mm)
- return mmn;
- return NULL;
-}
-
-static struct scif_mmu_notif *
-scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
-{
- struct scif_mmu_notif *mmn
- = kzalloc(sizeof(*mmn), GFP_KERNEL);
-
- if (!mmn)
- return ERR_PTR(-ENOMEM);
-
- scif_init_mmu_notifier(mmn, current->mm, ep);
- if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) {
- kfree(mmn);
- return ERR_PTR(-EBUSY);
- }
- list_add(&mmn->list, &ep->rma_info.mmn_list);
- return mmn;
-}
-
-/*
- * Called from the misc thread to destroy temporary cached windows and
- * unregister the MMU notifier for the SCIF endpoint.
- */
-void scif_mmu_notif_handler(struct work_struct *work)
-{
- struct list_head *pos, *tmpq;
- struct scif_endpt *ep;
-restart:
- scif_rma_destroy_tcw_invalid();
- spin_lock(&scif_info.rmalock);
- list_for_each_safe(pos, tmpq, &scif_info.mmu_notif_cleanup) {
- ep = list_entry(pos, struct scif_endpt, mmu_list);
- list_del(&ep->mmu_list);
- spin_unlock(&scif_info.rmalock);
- scif_rma_destroy_tcw_ep(ep);
- scif_ep_unregister_mmu_notifier(ep);
- goto restart;
- }
- spin_unlock(&scif_info.rmalock);
-}
-
-static bool scif_is_set_reg_cache(int flags)
-{
- return !!(flags & SCIF_RMA_USECACHE);
-}
-#else
-static struct scif_mmu_notif *
-scif_find_mmu_notifier(struct mm_struct *mm,
- struct scif_endpt_rma_info *rma)
-{
- return NULL;
-}
-
-static struct scif_mmu_notif *
-scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
-{
- return NULL;
-}
-
-void scif_mmu_notif_handler(struct work_struct *work)
-{
-}
-
-static bool scif_is_set_reg_cache(int flags)
-{
- return false;
-}
-
-static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
-{
- return false;
-}
-#endif
-
-/**
- * scif_register_temp:
- * @epd: End Point Descriptor.
- * @addr: virtual address to/from which to copy
- * @len: length of range to copy
- * @prot: read/write protection
- * @out_offset: computed offset returned by reference.
- * @out_window: allocated registered window returned by reference.
- *
- * Create a temporary registered window. The peer will not know about this
- * window. This API is used for scif_vreadfrom()/scif_vwriteto() API's.
- */
-static int
-scif_register_temp(scif_epd_t epd, unsigned long addr, size_t len, int prot,
- off_t *out_offset, struct scif_window **out_window)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err;
- scif_pinned_pages_t pinned_pages;
- size_t aligned_len;
-
- aligned_len = ALIGN(len, PAGE_SIZE);
-
- err = __scif_pin_pages((void *)(addr & PAGE_MASK),
- aligned_len, &prot, 0, &pinned_pages);
- if (err)
- return err;
-
- pinned_pages->prot = prot;
-
- /* Compute the offset for this registration */
- err = scif_get_window_offset(ep, 0, 0,
- aligned_len >> PAGE_SHIFT,
- (s64 *)out_offset);
- if (err)
- goto error_unpin;
-
- /* Allocate and prepare self registration window */
- *out_window = scif_create_window(ep, aligned_len >> PAGE_SHIFT,
- *out_offset, true);
- if (!*out_window) {
- scif_free_window_offset(ep, NULL, *out_offset);
- err = -ENOMEM;
- goto error_unpin;
- }
-
- (*out_window)->pinned_pages = pinned_pages;
- (*out_window)->nr_pages = pinned_pages->nr_pages;
- (*out_window)->prot = pinned_pages->prot;
-
- (*out_window)->va_for_temp = addr & PAGE_MASK;
- err = scif_map_window(ep->remote_dev, *out_window);
- if (err) {
- /* Something went wrong! Rollback */
- scif_destroy_window(ep, *out_window);
- *out_window = NULL;
- } else {
- *out_offset |= (addr - (*out_window)->va_for_temp);
- }
- return err;
-error_unpin:
- if (err)
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- scif_unpin_pages(pinned_pages);
- return err;
-}
-
-#define SCIF_DMA_TO (3 * HZ)
-
-/*
- * scif_sync_dma - Program a DMA without an interrupt descriptor
- *
- * @dev - The address of the pointer to the device instance used
- * for DMA registration.
- * @chan - DMA channel to be used.
- * @sync_wait: Wait for DMA to complete?
- *
- * Return 0 on success and -errno on error.
- */
-static int scif_sync_dma(struct scif_hw_dev *sdev, struct dma_chan *chan,
- bool sync_wait)
-{
- int err = 0;
- struct dma_async_tx_descriptor *tx = NULL;
- enum dma_ctrl_flags flags = DMA_PREP_FENCE;
- dma_cookie_t cookie;
- struct dma_device *ddev;
-
- if (!chan) {
- err = -EIO;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- ddev = chan->device;
-
- tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags);
- if (!tx) {
- err = -ENOMEM;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
- cookie = tx->tx_submit(tx);
-
- if (dma_submit_error(cookie)) {
- err = -ENOMEM;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
- if (!sync_wait) {
- dma_async_issue_pending(chan);
- } else {
- if (dma_sync_wait(chan, cookie) == DMA_COMPLETE) {
- err = 0;
- } else {
- err = -EIO;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- }
- }
-release:
- return err;
-}
-
-static void scif_dma_callback(void *arg)
-{
- struct completion *done = (struct completion *)arg;
-
- complete(done);
-}
-
-#define SCIF_DMA_SYNC_WAIT true
-#define SCIF_DMA_POLL BIT(0)
-#define SCIF_DMA_INTR BIT(1)
-
-/*
- * scif_async_dma - Program a DMA with an interrupt descriptor
- *
- * @dev - The address of the pointer to the device instance used
- * for DMA registration.
- * @chan - DMA channel to be used.
- * Return 0 on success and -errno on error.
- */
-static int scif_async_dma(struct scif_hw_dev *sdev, struct dma_chan *chan)
-{
- int err = 0;
- struct dma_device *ddev;
- struct dma_async_tx_descriptor *tx = NULL;
- enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
- DECLARE_COMPLETION_ONSTACK(done_wait);
- dma_cookie_t cookie;
- enum dma_status status;
-
- if (!chan) {
- err = -EIO;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- ddev = chan->device;
-
- tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags);
- if (!tx) {
- err = -ENOMEM;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
- reinit_completion(&done_wait);
- tx->callback = scif_dma_callback;
- tx->callback_param = &done_wait;
- cookie = tx->tx_submit(tx);
-
- if (dma_submit_error(cookie)) {
- err = -ENOMEM;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
- dma_async_issue_pending(chan);
-
- err = wait_for_completion_timeout(&done_wait, SCIF_DMA_TO);
- if (!err) {
- err = -EIO;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
- err = 0;
- status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- if (status != DMA_COMPLETE) {
- err = -EIO;
- dev_err(&sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto release;
- }
-release:
- return err;
-}
-
-/*
- * scif_drain_dma_poll - Drain all outstanding DMA operations for a particular
- * DMA channel via polling.
- *
- * @sdev - The SCIF device
- * @chan - DMA channel
- * Return 0 on success and -errno on error.
- */
-static int scif_drain_dma_poll(struct scif_hw_dev *sdev, struct dma_chan *chan)
-{
- if (!chan)
- return -EINVAL;
- return scif_sync_dma(sdev, chan, SCIF_DMA_SYNC_WAIT);
-}
-
-/*
- * scif_drain_dma_intr - Drain all outstanding DMA operations for a particular
- * DMA channel via interrupt based blocking wait.
- *
- * @sdev - The SCIF device
- * @chan - DMA channel
- * Return 0 on success and -errno on error.
- */
-int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan)
-{
- if (!chan)
- return -EINVAL;
- return scif_async_dma(sdev, chan);
-}
-
-/**
- * scif_rma_destroy_windows:
- *
- * This routine destroys all windows queued for cleanup
- */
-void scif_rma_destroy_windows(void)
-{
- struct list_head *item, *tmp;
- struct scif_window *window;
- struct scif_endpt *ep;
- struct dma_chan *chan;
-
- might_sleep();
-restart:
- spin_lock(&scif_info.rmalock);
- list_for_each_safe(item, tmp, &scif_info.rma) {
- window = list_entry(item, struct scif_window,
- list);
- ep = (struct scif_endpt *)window->ep;
- chan = ep->rma_info.dma_chan;
-
- list_del_init(&window->list);
- spin_unlock(&scif_info.rmalock);
- if (!chan || !scifdev_alive(ep) ||
- !scif_drain_dma_intr(ep->remote_dev->sdev,
- ep->rma_info.dma_chan))
- /* Remove window from global list */
- window->unreg_state = OP_COMPLETED;
- else
- dev_warn(&ep->remote_dev->sdev->dev,
- "DMA engine hung?\n");
- if (window->unreg_state == OP_COMPLETED) {
- if (window->type == SCIF_WINDOW_SELF)
- scif_destroy_window(ep, window);
- else
- scif_destroy_remote_window(window);
- atomic_dec(&ep->rma_info.tw_refcount);
- }
- goto restart;
- }
- spin_unlock(&scif_info.rmalock);
-}
-
-/**
- * scif_rma_destroy_tcw:
- *
- * This routine destroys temporary cached registered windows
- * which have been queued for cleanup.
- */
-void scif_rma_destroy_tcw_invalid(void)
-{
- struct list_head *item, *tmp;
- struct scif_window *window;
- struct scif_endpt *ep;
- struct dma_chan *chan;
-
- might_sleep();
-restart:
- spin_lock(&scif_info.rmalock);
- list_for_each_safe(item, tmp, &scif_info.rma_tc) {
- window = list_entry(item, struct scif_window, list);
- ep = (struct scif_endpt *)window->ep;
- chan = ep->rma_info.dma_chan;
- list_del_init(&window->list);
- spin_unlock(&scif_info.rmalock);
- mutex_lock(&ep->rma_info.rma_lock);
- if (!chan || !scifdev_alive(ep) ||
- !scif_drain_dma_intr(ep->remote_dev->sdev,
- ep->rma_info.dma_chan)) {
- atomic_sub(window->nr_pages,
- &ep->rma_info.tcw_total_pages);
- scif_destroy_window(ep, window);
- atomic_dec(&ep->rma_info.tcw_refcount);
- } else {
- dev_warn(&ep->remote_dev->sdev->dev,
- "DMA engine hung?\n");
- }
- mutex_unlock(&ep->rma_info.rma_lock);
- goto restart;
- }
- spin_unlock(&scif_info.rmalock);
-}
-
-static inline
-void *_get_local_va(off_t off, struct scif_window *window, size_t len)
-{
- int page_nr = (off - window->offset) >> PAGE_SHIFT;
- off_t page_off = off & ~PAGE_MASK;
- void *va = NULL;
-
- if (window->type == SCIF_WINDOW_SELF) {
- struct page **pages = window->pinned_pages->pages;
-
- va = page_address(pages[page_nr]) + page_off;
- }
- return va;
-}
-
-static inline
-void *ioremap_remote(off_t off, struct scif_window *window,
- size_t len, struct scif_dev *dev,
- struct scif_window_iter *iter)
-{
- dma_addr_t phys = scif_off_to_dma_addr(window, off, NULL, iter);
-
- /*
- * If the DMA address is not card relative then we need the DMA
- * addresses to be an offset into the bar. The aperture base was already
- * added so subtract it here since scif_ioremap is going to add it again
- */
- if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER &&
- dev->sdev->aper && !dev->sdev->card_rel_da)
- phys = phys - dev->sdev->aper->pa;
- return scif_ioremap(phys, len, dev);
-}
-
-static inline void
-iounmap_remote(void *virt, size_t size, struct scif_copy_work *work)
-{
- scif_iounmap(virt, size, work->remote_dev);
-}
-
-/*
- * Takes care of ordering issue caused by
- * 1. Hardware: Only in the case of cpu copy from mgmt node to card
- * because of WC memory.
- * 2. Software: If memcpy reorders copy instructions for optimization.
- * This could happen at both mgmt node and card.
- */
-static inline void
-scif_ordered_memcpy_toio(char *dst, const char *src, size_t count)
-{
- if (!count)
- return;
-
- memcpy_toio((void __iomem __force *)dst, src, --count);
- /* Order the last byte with the previous stores */
- wmb();
- *(dst + count) = *(src + count);
-}
-
-static inline void scif_unaligned_cpy_toio(char *dst, const char *src,
- size_t count, bool ordered)
-{
- if (ordered)
- scif_ordered_memcpy_toio(dst, src, count);
- else
- memcpy_toio((void __iomem __force *)dst, src, count);
-}
-
-static inline
-void scif_ordered_memcpy_fromio(char *dst, const char *src, size_t count)
-{
- if (!count)
- return;
-
- memcpy_fromio(dst, (void __iomem __force *)src, --count);
- /* Order the last byte with the previous loads */
- rmb();
- *(dst + count) = *(src + count);
-}
-
-static inline void scif_unaligned_cpy_fromio(char *dst, const char *src,
- size_t count, bool ordered)
-{
- if (ordered)
- scif_ordered_memcpy_fromio(dst, src, count);
- else
- memcpy_fromio(dst, (void __iomem __force *)src, count);
-}
-
-#define SCIF_RMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-/*
- * scif_off_to_dma_addr:
- * Obtain the dma_addr given the window and the offset.
- * @window: Registered window.
- * @off: Window offset.
- * @nr_bytes: Return the number of contiguous bytes till next DMA addr index.
- * @index: Return the index of the dma_addr array found.
- * @start_off: start offset of index of the dma addr array found.
- * The nr_bytes provides the callee an estimate of the maximum possible
- * DMA xfer possible while the index/start_off provide faster lookups
- * for the next iteration.
- */
-dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off,
- size_t *nr_bytes, struct scif_window_iter *iter)
-{
- int i, page_nr;
- s64 start, end;
- off_t page_off;
-
- if (window->nr_pages == window->nr_contig_chunks) {
- page_nr = (off - window->offset) >> PAGE_SHIFT;
- page_off = off & ~PAGE_MASK;
-
- if (nr_bytes)
- *nr_bytes = PAGE_SIZE - page_off;
- return window->dma_addr[page_nr] | page_off;
- }
- if (iter) {
- i = iter->index;
- start = iter->offset;
- } else {
- i = 0;
- start = window->offset;
- }
- for (; i < window->nr_contig_chunks; i++) {
- end = start + (window->num_pages[i] << PAGE_SHIFT);
- if (off >= start && off < end) {
- if (iter) {
- iter->index = i;
- iter->offset = start;
- }
- if (nr_bytes)
- *nr_bytes = end - off;
- return (window->dma_addr[i] + (off - start));
- }
- start += (window->num_pages[i] << PAGE_SHIFT);
- }
- dev_err(scif_info.mdev.this_device,
- "%s %d BUG. Addr not found? window %p off 0x%llx\n",
- __func__, __LINE__, window, off);
- return SCIF_RMA_ERROR_CODE;
-}
-
-/*
- * Copy between rma window and temporary buffer
- */
-static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window,
- u8 *temp, size_t rem_len, bool to_temp)
-{
- void *window_virt;
- size_t loop_len;
- int offset_in_page;
- s64 end_offset;
-
- offset_in_page = offset & ~PAGE_MASK;
- loop_len = PAGE_SIZE - offset_in_page;
-
- if (rem_len < loop_len)
- loop_len = rem_len;
-
- window_virt = _get_local_va(offset, window, loop_len);
- if (!window_virt)
- return;
- if (to_temp)
- memcpy(temp, window_virt, loop_len);
- else
- memcpy(window_virt, temp, loop_len);
-
- offset += loop_len;
- temp += loop_len;
- rem_len -= loop_len;
-
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- while (rem_len) {
- if (offset == end_offset) {
- window = list_next_entry(window, list);
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- }
- loop_len = min(PAGE_SIZE, rem_len);
- window_virt = _get_local_va(offset, window, loop_len);
- if (!window_virt)
- return;
- if (to_temp)
- memcpy(temp, window_virt, loop_len);
- else
- memcpy(window_virt, temp, loop_len);
- offset += loop_len;
- temp += loop_len;
- rem_len -= loop_len;
- }
-}
-
-/**
- * scif_rma_completion_cb:
- * @data: RMA cookie
- *
- * RMA interrupt completion callback.
- */
-static void scif_rma_completion_cb(void *data)
-{
- struct scif_dma_comp_cb *comp_cb = data;
-
- /* Free DMA Completion CB. */
- if (comp_cb->dst_window)
- scif_rma_local_cpu_copy(comp_cb->dst_offset,
- comp_cb->dst_window,
- comp_cb->temp_buf +
- comp_cb->header_padding,
- comp_cb->len, false);
- scif_unmap_single(comp_cb->temp_phys, comp_cb->sdev,
- SCIF_KMEM_UNALIGNED_BUF_SIZE);
- if (comp_cb->is_cache)
- kmem_cache_free(unaligned_cache,
- comp_cb->temp_buf_to_free);
- else
- kfree(comp_cb->temp_buf_to_free);
-}
-
-/* Copies between temporary buffer and offsets provided in work */
-static int
-scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
- u8 *temp, struct dma_chan *chan,
- bool src_local)
-{
- struct scif_dma_comp_cb *comp_cb = work->comp_cb;
- dma_addr_t window_dma_addr, temp_dma_addr;
- dma_addr_t temp_phys = comp_cb->temp_phys;
- size_t loop_len, nr_contig_bytes = 0, remaining_len = work->len;
- int offset_in_ca, ret = 0;
- s64 end_offset, offset;
- struct scif_window *window;
- void *window_virt_addr;
- size_t tail_len;
- struct dma_async_tx_descriptor *tx;
- struct dma_device *dev = chan->device;
- dma_cookie_t cookie;
-
- if (src_local) {
- offset = work->dst_offset;
- window = work->dst_window;
- } else {
- offset = work->src_offset;
- window = work->src_window;
- }
-
- offset_in_ca = offset & (L1_CACHE_BYTES - 1);
- if (offset_in_ca) {
- loop_len = L1_CACHE_BYTES - offset_in_ca;
- loop_len = min(loop_len, remaining_len);
- window_virt_addr = ioremap_remote(offset, window,
- loop_len,
- work->remote_dev,
- NULL);
- if (!window_virt_addr)
- return -ENOMEM;
- if (src_local)
- scif_unaligned_cpy_toio(window_virt_addr, temp,
- loop_len,
- work->ordered &&
- !(remaining_len - loop_len));
- else
- scif_unaligned_cpy_fromio(temp, window_virt_addr,
- loop_len, work->ordered &&
- !(remaining_len - loop_len));
- iounmap_remote(window_virt_addr, loop_len, work);
-
- offset += loop_len;
- temp += loop_len;
- temp_phys += loop_len;
- remaining_len -= loop_len;
- }
-
- offset_in_ca = offset & ~PAGE_MASK;
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
-
- tail_len = remaining_len & (L1_CACHE_BYTES - 1);
- remaining_len -= tail_len;
- while (remaining_len) {
- if (offset == end_offset) {
- window = list_next_entry(window, list);
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- }
- if (scif_is_mgmt_node())
- temp_dma_addr = temp_phys;
- else
- /* Fix if we ever enable IOMMU on the card */
- temp_dma_addr = (dma_addr_t)virt_to_phys(temp);
- window_dma_addr = scif_off_to_dma_addr(window, offset,
- &nr_contig_bytes,
- NULL);
- loop_len = min(nr_contig_bytes, remaining_len);
- if (src_local) {
- if (work->ordered && !tail_len &&
- !(remaining_len - loop_len) &&
- loop_len != L1_CACHE_BYTES) {
- /*
- * Break up the last chunk of the transfer into
- * two steps. if there is no tail to guarantee
- * DMA ordering. SCIF_DMA_POLLING inserts
- * a status update descriptor in step 1 which
- * acts as a double sided synchronization fence
- * for the DMA engine to ensure that the last
- * cache line in step 2 is updated last.
- */
- /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
- tx =
- dev->device_prep_dma_memcpy(chan,
- window_dma_addr,
- temp_dma_addr,
- loop_len -
- L1_CACHE_BYTES,
- DMA_PREP_FENCE);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- offset += (loop_len - L1_CACHE_BYTES);
- temp_dma_addr += (loop_len - L1_CACHE_BYTES);
- window_dma_addr += (loop_len - L1_CACHE_BYTES);
- remaining_len -= (loop_len - L1_CACHE_BYTES);
- loop_len = remaining_len;
-
- /* Step 2) DMA: L1_CACHE_BYTES */
- tx =
- dev->device_prep_dma_memcpy(chan,
- window_dma_addr,
- temp_dma_addr,
- loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- } else {
- tx =
- dev->device_prep_dma_memcpy(chan,
- window_dma_addr,
- temp_dma_addr,
- loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- }
- } else {
- tx = dev->device_prep_dma_memcpy(chan, temp_dma_addr,
- window_dma_addr, loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- }
- offset += loop_len;
- temp += loop_len;
- temp_phys += loop_len;
- remaining_len -= loop_len;
- offset_in_ca = 0;
- }
- if (tail_len) {
- if (offset == end_offset) {
- window = list_next_entry(window, list);
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- }
- window_virt_addr = ioremap_remote(offset, window, tail_len,
- work->remote_dev,
- NULL);
- if (!window_virt_addr)
- return -ENOMEM;
- /*
- * The CPU copy for the tail bytes must be initiated only once
- * previous DMA transfers for this endpoint have completed
- * to guarantee ordering.
- */
- if (work->ordered) {
- struct scif_dev *rdev = work->remote_dev;
-
- ret = scif_drain_dma_intr(rdev->sdev, chan);
- if (ret)
- return ret;
- }
- if (src_local)
- scif_unaligned_cpy_toio(window_virt_addr, temp,
- tail_len, work->ordered);
- else
- scif_unaligned_cpy_fromio(temp, window_virt_addr,
- tail_len, work->ordered);
- iounmap_remote(window_virt_addr, tail_len, work);
- }
- tx = dev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_INTERRUPT);
- if (!tx) {
- ret = -ENOMEM;
- return ret;
- }
- tx->callback = &scif_rma_completion_cb;
- tx->callback_param = comp_cb;
- cookie = tx->tx_submit(tx);
-
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- return ret;
- }
- dma_async_issue_pending(chan);
- return 0;
-err:
- dev_err(scif_info.mdev.this_device,
- "%s %d Desc Prog Failed ret %d\n",
- __func__, __LINE__, ret);
- return ret;
-}
-
-/*
- * _scif_rma_list_dma_copy_aligned:
- *
- * Traverse all the windows and perform DMA copy.
- */
-static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
- struct dma_chan *chan)
-{
- dma_addr_t src_dma_addr, dst_dma_addr;
- size_t loop_len, remaining_len, src_contig_bytes = 0;
- size_t dst_contig_bytes = 0;
- struct scif_window_iter src_win_iter;
- struct scif_window_iter dst_win_iter;
- s64 end_src_offset, end_dst_offset;
- struct scif_window *src_window = work->src_window;
- struct scif_window *dst_window = work->dst_window;
- s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
- int ret = 0;
- struct dma_async_tx_descriptor *tx;
- struct dma_device *dev = chan->device;
- dma_cookie_t cookie;
-
- remaining_len = work->len;
-
- scif_init_window_iter(src_window, &src_win_iter);
- scif_init_window_iter(dst_window, &dst_win_iter);
- end_src_offset = src_window->offset +
- (src_window->nr_pages << PAGE_SHIFT);
- end_dst_offset = dst_window->offset +
- (dst_window->nr_pages << PAGE_SHIFT);
- while (remaining_len) {
- if (src_offset == end_src_offset) {
- src_window = list_next_entry(src_window, list);
- end_src_offset = src_window->offset +
- (src_window->nr_pages << PAGE_SHIFT);
- scif_init_window_iter(src_window, &src_win_iter);
- }
- if (dst_offset == end_dst_offset) {
- dst_window = list_next_entry(dst_window, list);
- end_dst_offset = dst_window->offset +
- (dst_window->nr_pages << PAGE_SHIFT);
- scif_init_window_iter(dst_window, &dst_win_iter);
- }
-
- /* compute dma addresses for transfer */
- src_dma_addr = scif_off_to_dma_addr(src_window, src_offset,
- &src_contig_bytes,
- &src_win_iter);
- dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset,
- &dst_contig_bytes,
- &dst_win_iter);
- loop_len = min(src_contig_bytes, dst_contig_bytes);
- loop_len = min(loop_len, remaining_len);
- if (work->ordered && !(remaining_len - loop_len)) {
- /*
- * Break up the last chunk of the transfer into two
- * steps to ensure that the last byte in step 2 is
- * updated last.
- */
- /* Step 1) DMA: Body Length - 1 */
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr,
- loop_len - 1,
- DMA_PREP_FENCE);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- src_offset += (loop_len - 1);
- dst_offset += (loop_len - 1);
- src_dma_addr += (loop_len - 1);
- dst_dma_addr += (loop_len - 1);
- remaining_len -= (loop_len - 1);
- loop_len = remaining_len;
-
- /* Step 2) DMA: 1 BYTES */
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr, loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- } else {
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr, loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- }
- src_offset += loop_len;
- dst_offset += loop_len;
- remaining_len -= loop_len;
- }
- return ret;
-err:
- dev_err(scif_info.mdev.this_device,
- "%s %d Desc Prog Failed ret %d\n",
- __func__, __LINE__, ret);
- return ret;
-}
-
-/*
- * scif_rma_list_dma_copy_aligned:
- *
- * Traverse all the windows and perform DMA copy.
- */
-static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
- struct dma_chan *chan)
-{
- dma_addr_t src_dma_addr, dst_dma_addr;
- size_t loop_len, remaining_len, tail_len, src_contig_bytes = 0;
- size_t dst_contig_bytes = 0;
- int src_cache_off;
- s64 end_src_offset, end_dst_offset;
- struct scif_window_iter src_win_iter;
- struct scif_window_iter dst_win_iter;
- void *src_virt, *dst_virt;
- struct scif_window *src_window = work->src_window;
- struct scif_window *dst_window = work->dst_window;
- s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
- int ret = 0;
- struct dma_async_tx_descriptor *tx;
- struct dma_device *dev = chan->device;
- dma_cookie_t cookie;
-
- remaining_len = work->len;
- scif_init_window_iter(src_window, &src_win_iter);
- scif_init_window_iter(dst_window, &dst_win_iter);
-
- src_cache_off = src_offset & (L1_CACHE_BYTES - 1);
- if (src_cache_off != 0) {
- /* Head */
- loop_len = L1_CACHE_BYTES - src_cache_off;
- loop_len = min(loop_len, remaining_len);
- src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
- dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
- if (src_window->type == SCIF_WINDOW_SELF)
- src_virt = _get_local_va(src_offset, src_window,
- loop_len);
- else
- src_virt = ioremap_remote(src_offset, src_window,
- loop_len,
- work->remote_dev, NULL);
- if (!src_virt)
- return -ENOMEM;
- if (dst_window->type == SCIF_WINDOW_SELF)
- dst_virt = _get_local_va(dst_offset, dst_window,
- loop_len);
- else
- dst_virt = ioremap_remote(dst_offset, dst_window,
- loop_len,
- work->remote_dev, NULL);
- if (!dst_virt) {
- if (src_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(src_virt, loop_len, work);
- return -ENOMEM;
- }
- if (src_window->type == SCIF_WINDOW_SELF)
- scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len,
- remaining_len == loop_len ?
- work->ordered : false);
- else
- scif_unaligned_cpy_fromio(dst_virt, src_virt, loop_len,
- remaining_len == loop_len ?
- work->ordered : false);
- if (src_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(src_virt, loop_len, work);
- if (dst_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(dst_virt, loop_len, work);
- src_offset += loop_len;
- dst_offset += loop_len;
- remaining_len -= loop_len;
- }
-
- end_src_offset = src_window->offset +
- (src_window->nr_pages << PAGE_SHIFT);
- end_dst_offset = dst_window->offset +
- (dst_window->nr_pages << PAGE_SHIFT);
- tail_len = remaining_len & (L1_CACHE_BYTES - 1);
- remaining_len -= tail_len;
- while (remaining_len) {
- if (src_offset == end_src_offset) {
- src_window = list_next_entry(src_window, list);
- end_src_offset = src_window->offset +
- (src_window->nr_pages << PAGE_SHIFT);
- scif_init_window_iter(src_window, &src_win_iter);
- }
- if (dst_offset == end_dst_offset) {
- dst_window = list_next_entry(dst_window, list);
- end_dst_offset = dst_window->offset +
- (dst_window->nr_pages << PAGE_SHIFT);
- scif_init_window_iter(dst_window, &dst_win_iter);
- }
-
- /* compute dma addresses for transfer */
- src_dma_addr = scif_off_to_dma_addr(src_window, src_offset,
- &src_contig_bytes,
- &src_win_iter);
- dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset,
- &dst_contig_bytes,
- &dst_win_iter);
- loop_len = min(src_contig_bytes, dst_contig_bytes);
- loop_len = min(loop_len, remaining_len);
- if (work->ordered && !tail_len &&
- !(remaining_len - loop_len)) {
- /*
- * Break up the last chunk of the transfer into two
- * steps. if there is no tail to gurantee DMA ordering.
- * Passing SCIF_DMA_POLLING inserts a status update
- * descriptor in step 1 which acts as a double sided
- * synchronization fence for the DMA engine to ensure
- * that the last cache line in step 2 is updated last.
- */
- /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr,
- loop_len -
- L1_CACHE_BYTES,
- DMA_PREP_FENCE);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- src_offset += (loop_len - L1_CACHE_BYTES);
- dst_offset += (loop_len - L1_CACHE_BYTES);
- src_dma_addr += (loop_len - L1_CACHE_BYTES);
- dst_dma_addr += (loop_len - L1_CACHE_BYTES);
- remaining_len -= (loop_len - L1_CACHE_BYTES);
- loop_len = remaining_len;
-
- /* Step 2) DMA: L1_CACHE_BYTES */
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr,
- loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- } else {
- tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
- src_dma_addr,
- loop_len, 0);
- if (!tx) {
- ret = -ENOMEM;
- goto err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- ret = -ENOMEM;
- goto err;
- }
- dma_async_issue_pending(chan);
- }
- src_offset += loop_len;
- dst_offset += loop_len;
- remaining_len -= loop_len;
- }
- remaining_len = tail_len;
- if (remaining_len) {
- loop_len = remaining_len;
- if (src_offset == end_src_offset)
- src_window = list_next_entry(src_window, list);
- if (dst_offset == end_dst_offset)
- dst_window = list_next_entry(dst_window, list);
-
- src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
- dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
- /*
- * The CPU copy for the tail bytes must be initiated only once
- * previous DMA transfers for this endpoint have completed to
- * guarantee ordering.
- */
- if (work->ordered) {
- struct scif_dev *rdev = work->remote_dev;
-
- ret = scif_drain_dma_poll(rdev->sdev, chan);
- if (ret)
- return ret;
- }
- if (src_window->type == SCIF_WINDOW_SELF)
- src_virt = _get_local_va(src_offset, src_window,
- loop_len);
- else
- src_virt = ioremap_remote(src_offset, src_window,
- loop_len,
- work->remote_dev, NULL);
- if (!src_virt)
- return -ENOMEM;
-
- if (dst_window->type == SCIF_WINDOW_SELF)
- dst_virt = _get_local_va(dst_offset, dst_window,
- loop_len);
- else
- dst_virt = ioremap_remote(dst_offset, dst_window,
- loop_len,
- work->remote_dev, NULL);
- if (!dst_virt) {
- if (src_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(src_virt, loop_len, work);
- return -ENOMEM;
- }
-
- if (src_window->type == SCIF_WINDOW_SELF)
- scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len,
- work->ordered);
- else
- scif_unaligned_cpy_fromio(dst_virt, src_virt,
- loop_len, work->ordered);
- if (src_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(src_virt, loop_len, work);
-
- if (dst_window->type != SCIF_WINDOW_SELF)
- iounmap_remote(dst_virt, loop_len, work);
- remaining_len -= loop_len;
- }
- return ret;
-err:
- dev_err(scif_info.mdev.this_device,
- "%s %d Desc Prog Failed ret %d\n",
- __func__, __LINE__, ret);
- return ret;
-}
-
-/*
- * scif_rma_list_cpu_copy:
- *
- * Traverse all the windows and perform CPU copy.
- */
-static int scif_rma_list_cpu_copy(struct scif_copy_work *work)
-{
- void *src_virt, *dst_virt;
- size_t loop_len, remaining_len;
- int src_page_off, dst_page_off;
- s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
- struct scif_window *src_window = work->src_window;
- struct scif_window *dst_window = work->dst_window;
- s64 end_src_offset, end_dst_offset;
- int ret = 0;
- struct scif_window_iter src_win_iter;
- struct scif_window_iter dst_win_iter;
-
- remaining_len = work->len;
-
- scif_init_window_iter(src_window, &src_win_iter);
- scif_init_window_iter(dst_window, &dst_win_iter);
- while (remaining_len) {
- src_page_off = src_offset & ~PAGE_MASK;
- dst_page_off = dst_offset & ~PAGE_MASK;
- loop_len = min(PAGE_SIZE -
- max(src_page_off, dst_page_off),
- remaining_len);
-
- if (src_window->type == SCIF_WINDOW_SELF)
- src_virt = _get_local_va(src_offset, src_window,
- loop_len);
- else
- src_virt = ioremap_remote(src_offset, src_window,
- loop_len,
- work->remote_dev,
- &src_win_iter);
- if (!src_virt) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (dst_window->type == SCIF_WINDOW_SELF)
- dst_virt = _get_local_va(dst_offset, dst_window,
- loop_len);
- else
- dst_virt = ioremap_remote(dst_offset, dst_window,
- loop_len,
- work->remote_dev,
- &dst_win_iter);
- if (!dst_virt) {
- if (src_window->type == SCIF_WINDOW_PEER)
- iounmap_remote(src_virt, loop_len, work);
- ret = -ENOMEM;
- goto error;
- }
-
- if (work->loopback) {
- memcpy(dst_virt, src_virt, loop_len);
- } else {
- if (src_window->type == SCIF_WINDOW_SELF)
- memcpy_toio((void __iomem __force *)dst_virt,
- src_virt, loop_len);
- else
- memcpy_fromio(dst_virt,
- (void __iomem __force *)src_virt,
- loop_len);
- }
- if (src_window->type == SCIF_WINDOW_PEER)
- iounmap_remote(src_virt, loop_len, work);
-
- if (dst_window->type == SCIF_WINDOW_PEER)
- iounmap_remote(dst_virt, loop_len, work);
-
- src_offset += loop_len;
- dst_offset += loop_len;
- remaining_len -= loop_len;
- if (remaining_len) {
- end_src_offset = src_window->offset +
- (src_window->nr_pages << PAGE_SHIFT);
- end_dst_offset = dst_window->offset +
- (dst_window->nr_pages << PAGE_SHIFT);
- if (src_offset == end_src_offset) {
- src_window = list_next_entry(src_window, list);
- scif_init_window_iter(src_window,
- &src_win_iter);
- }
- if (dst_offset == end_dst_offset) {
- dst_window = list_next_entry(dst_window, list);
- scif_init_window_iter(dst_window,
- &dst_win_iter);
- }
- }
- }
-error:
- return ret;
-}
-
-static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
- struct scif_copy_work *work,
- struct dma_chan *chan, off_t loffset)
-{
- int src_cache_off, dst_cache_off;
- s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
- u8 *temp = NULL;
- bool src_local = true;
- struct scif_dma_comp_cb *comp_cb;
- int err;
-
- if (is_dma_copy_aligned(chan->device, 1, 1, 1))
- return _scif_rma_list_dma_copy_aligned(work, chan);
-
- src_cache_off = src_offset & (L1_CACHE_BYTES - 1);
- dst_cache_off = dst_offset & (L1_CACHE_BYTES - 1);
-
- if (dst_cache_off == src_cache_off)
- return scif_rma_list_dma_copy_aligned(work, chan);
-
- if (work->loopback)
- return scif_rma_list_cpu_copy(work);
- src_local = work->src_window->type == SCIF_WINDOW_SELF;
-
- /* Allocate dma_completion cb */
- comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL);
- if (!comp_cb)
- goto error;
-
- work->comp_cb = comp_cb;
- comp_cb->cb_cookie = comp_cb;
- comp_cb->dma_completion_func = &scif_rma_completion_cb;
-
- if (work->len + (L1_CACHE_BYTES << 1) < SCIF_KMEM_UNALIGNED_BUF_SIZE) {
- comp_cb->is_cache = false;
- /* Allocate padding bytes to align to a cache line */
- temp = kmalloc(work->len + (L1_CACHE_BYTES << 1),
- GFP_KERNEL);
- if (!temp)
- goto free_comp_cb;
- comp_cb->temp_buf_to_free = temp;
- /* kmalloc(..) does not guarantee cache line alignment */
- if (!IS_ALIGNED((u64)temp, L1_CACHE_BYTES))
- temp = PTR_ALIGN(temp, L1_CACHE_BYTES);
- } else {
- comp_cb->is_cache = true;
- temp = kmem_cache_alloc(unaligned_cache, GFP_KERNEL);
- if (!temp)
- goto free_comp_cb;
- comp_cb->temp_buf_to_free = temp;
- }
-
- if (src_local) {
- temp += dst_cache_off;
- scif_rma_local_cpu_copy(work->src_offset, work->src_window,
- temp, work->len, true);
- } else {
- comp_cb->dst_window = work->dst_window;
- comp_cb->dst_offset = work->dst_offset;
- work->src_offset = work->src_offset - src_cache_off;
- comp_cb->len = work->len;
- work->len = ALIGN(work->len + src_cache_off, L1_CACHE_BYTES);
- comp_cb->header_padding = src_cache_off;
- }
- comp_cb->temp_buf = temp;
-
- err = scif_map_single(&comp_cb->temp_phys, temp,
- work->remote_dev, SCIF_KMEM_UNALIGNED_BUF_SIZE);
- if (err)
- goto free_temp_buf;
- comp_cb->sdev = work->remote_dev;
- if (scif_rma_list_dma_copy_unaligned(work, temp, chan, src_local) < 0)
- goto free_temp_buf;
- if (!src_local)
- work->fence_type = SCIF_DMA_INTR;
- return 0;
-free_temp_buf:
- if (comp_cb->is_cache)
- kmem_cache_free(unaligned_cache, comp_cb->temp_buf_to_free);
- else
- kfree(comp_cb->temp_buf_to_free);
-free_comp_cb:
- kfree(comp_cb);
-error:
- return -ENOMEM;
-}
-
-/**
- * scif_rma_copy:
- * @epd: end point descriptor.
- * @loffset: offset in local registered address space to/from which to copy
- * @addr: user virtual address to/from which to copy
- * @len: length of range to copy
- * @roffset: offset in remote registered address space to/from which to copy
- * @flags: flags
- * @dir: LOCAL->REMOTE or vice versa.
- * @last_chunk: true if this is the last chunk of a larger transfer
- *
- * Validate parameters, check if src/dst registered ranges requested for copy
- * are valid and initiate either CPU or DMA copy.
- */
-static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr,
- size_t len, off_t roffset, int flags,
- enum scif_rma_dir dir, bool last_chunk)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scif_rma_req remote_req;
- struct scif_rma_req req;
- struct scif_window *local_window = NULL;
- struct scif_window *remote_window = NULL;
- struct scif_copy_work copy_work;
- bool loopback;
- int err = 0;
- struct dma_chan *chan;
- struct scif_mmu_notif *mmn = NULL;
- bool cache = false;
- struct device *spdev;
-
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- if (flags && !(flags & (SCIF_RMA_USECPU | SCIF_RMA_USECACHE |
- SCIF_RMA_SYNC | SCIF_RMA_ORDERED)))
- return -EINVAL;
-
- loopback = scifdev_self(ep->remote_dev) ? true : false;
- copy_work.fence_type = ((flags & SCIF_RMA_SYNC) && last_chunk) ?
- SCIF_DMA_POLL : 0;
- copy_work.ordered = !!((flags & SCIF_RMA_ORDERED) && last_chunk);
-
- /* Use CPU for Mgmt node <-> Mgmt node copies */
- if (loopback && scif_is_mgmt_node()) {
- flags |= SCIF_RMA_USECPU;
- copy_work.fence_type = 0x0;
- }
-
- cache = scif_is_set_reg_cache(flags);
-
- remote_req.out_window = &remote_window;
- remote_req.offset = roffset;
- remote_req.nr_bytes = len;
- /*
- * If transfer is from local to remote then the remote window
- * must be writeable and vice versa.
- */
- remote_req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_WRITE : VM_READ;
- remote_req.type = SCIF_WINDOW_PARTIAL;
- remote_req.head = &ep->rma_info.remote_reg_list;
-
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- return err;
- }
-
- if (addr && cache) {
- mutex_lock(&ep->rma_info.mmn_lock);
- mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info);
- if (!mmn)
- mmn = scif_add_mmu_notifier(current->mm, ep);
- mutex_unlock(&ep->rma_info.mmn_lock);
- if (IS_ERR(mmn)) {
- scif_put_peer_dev(spdev);
- return PTR_ERR(mmn);
- }
- cache = cache && !scif_rma_tc_can_cache(ep, len);
- }
- mutex_lock(&ep->rma_info.rma_lock);
- if (addr) {
- req.out_window = &local_window;
- req.nr_bytes = ALIGN(len + (addr & ~PAGE_MASK),
- PAGE_SIZE);
- req.va_for_temp = addr & PAGE_MASK;
- req.prot = (dir == SCIF_LOCAL_TO_REMOTE ?
- VM_READ : VM_WRITE | VM_READ);
- /* Does a valid local window exist? */
- if (mmn) {
- spin_lock(&ep->rma_info.tc_lock);
- req.head = &mmn->tc_reg_list;
- err = scif_query_tcw(ep, &req);
- spin_unlock(&ep->rma_info.tc_lock);
- }
- if (!mmn || err) {
- err = scif_register_temp(epd, req.va_for_temp,
- req.nr_bytes, req.prot,
- &loffset, &local_window);
- if (err) {
- mutex_unlock(&ep->rma_info.rma_lock);
- goto error;
- }
- if (!cache)
- goto skip_cache;
- atomic_inc(&ep->rma_info.tcw_refcount);
- atomic_add_return(local_window->nr_pages,
- &ep->rma_info.tcw_total_pages);
- if (mmn) {
- spin_lock(&ep->rma_info.tc_lock);
- scif_insert_tcw(local_window,
- &mmn->tc_reg_list);
- spin_unlock(&ep->rma_info.tc_lock);
- }
- }
-skip_cache:
- loffset = local_window->offset +
- (addr - local_window->va_for_temp);
- } else {
- req.out_window = &local_window;
- req.offset = loffset;
- /*
- * If transfer is from local to remote then the self window
- * must be readable and vice versa.
- */
- req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_READ : VM_WRITE;
- req.nr_bytes = len;
- req.type = SCIF_WINDOW_PARTIAL;
- req.head = &ep->rma_info.reg_list;
- /* Does a valid local window exist? */
- err = scif_query_window(&req);
- if (err) {
- mutex_unlock(&ep->rma_info.rma_lock);
- goto error;
- }
- }
-
- /* Does a valid remote window exist? */
- err = scif_query_window(&remote_req);
- if (err) {
- mutex_unlock(&ep->rma_info.rma_lock);
- goto error;
- }
-
- /*
- * Prepare copy_work for submitting work to the DMA kernel thread
- * or CPU copy routine.
- */
- copy_work.len = len;
- copy_work.loopback = loopback;
- copy_work.remote_dev = ep->remote_dev;
- if (dir == SCIF_LOCAL_TO_REMOTE) {
- copy_work.src_offset = loffset;
- copy_work.src_window = local_window;
- copy_work.dst_offset = roffset;
- copy_work.dst_window = remote_window;
- } else {
- copy_work.src_offset = roffset;
- copy_work.src_window = remote_window;
- copy_work.dst_offset = loffset;
- copy_work.dst_window = local_window;
- }
-
- if (flags & SCIF_RMA_USECPU) {
- scif_rma_list_cpu_copy(&copy_work);
- } else {
- chan = ep->rma_info.dma_chan;
- err = scif_rma_list_dma_copy_wrapper(epd, &copy_work,
- chan, loffset);
- }
- if (addr && !cache)
- atomic_inc(&ep->rma_info.tw_refcount);
-
- mutex_unlock(&ep->rma_info.rma_lock);
-
- if (last_chunk) {
- struct scif_dev *rdev = ep->remote_dev;
-
- if (copy_work.fence_type == SCIF_DMA_POLL)
- err = scif_drain_dma_poll(rdev->sdev,
- ep->rma_info.dma_chan);
- else if (copy_work.fence_type == SCIF_DMA_INTR)
- err = scif_drain_dma_intr(rdev->sdev,
- ep->rma_info.dma_chan);
- }
-
- if (addr && !cache)
- scif_queue_for_cleanup(local_window, &scif_info.rma);
- scif_put_peer_dev(spdev);
- return err;
-error:
- if (err) {
- if (addr && local_window && !cache)
- scif_destroy_window(ep, local_window);
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d len 0x%lx\n",
- __func__, __LINE__, err, len);
- }
- scif_put_peer_dev(spdev);
- return err;
-}
-
-int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len,
- off_t roffset, int flags)
-{
- int err;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI readfrom: ep %p loffset 0x%lx len 0x%lx offset 0x%lx flags 0x%x\n",
- epd, loffset, len, roffset, flags);
- if (scif_unaligned(loffset, roffset)) {
- while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
- err = scif_rma_copy(epd, loffset, 0x0,
- SCIF_MAX_UNALIGNED_BUF_SIZE,
- roffset, flags,
- SCIF_REMOTE_TO_LOCAL, false);
- if (err)
- goto readfrom_err;
- loffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
- }
- }
- err = scif_rma_copy(epd, loffset, 0x0, len,
- roffset, flags, SCIF_REMOTE_TO_LOCAL, true);
-readfrom_err:
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_readfrom);
-
-int scif_writeto(scif_epd_t epd, off_t loffset, size_t len,
- off_t roffset, int flags)
-{
- int err;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI writeto: ep %p loffset 0x%lx len 0x%lx roffset 0x%lx flags 0x%x\n",
- epd, loffset, len, roffset, flags);
- if (scif_unaligned(loffset, roffset)) {
- while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
- err = scif_rma_copy(epd, loffset, 0x0,
- SCIF_MAX_UNALIGNED_BUF_SIZE,
- roffset, flags,
- SCIF_LOCAL_TO_REMOTE, false);
- if (err)
- goto writeto_err;
- loffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
- }
- }
- err = scif_rma_copy(epd, loffset, 0x0, len,
- roffset, flags, SCIF_LOCAL_TO_REMOTE, true);
-writeto_err:
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_writeto);
-
-int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len,
- off_t roffset, int flags)
-{
- int err;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI vreadfrom: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
- epd, addr, len, roffset, flags);
- if (scif_unaligned((off_t __force)addr, roffset)) {
- if (len > SCIF_MAX_UNALIGNED_BUF_SIZE)
- flags &= ~SCIF_RMA_USECACHE;
-
- while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
- err = scif_rma_copy(epd, 0, (u64)addr,
- SCIF_MAX_UNALIGNED_BUF_SIZE,
- roffset, flags,
- SCIF_REMOTE_TO_LOCAL, false);
- if (err)
- goto vreadfrom_err;
- addr += SCIF_MAX_UNALIGNED_BUF_SIZE;
- roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
- }
- }
- err = scif_rma_copy(epd, 0, (u64)addr, len,
- roffset, flags, SCIF_REMOTE_TO_LOCAL, true);
-vreadfrom_err:
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_vreadfrom);
-
-int scif_vwriteto(scif_epd_t epd, void *addr, size_t len,
- off_t roffset, int flags)
-{
- int err;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI vwriteto: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
- epd, addr, len, roffset, flags);
- if (scif_unaligned((off_t __force)addr, roffset)) {
- if (len > SCIF_MAX_UNALIGNED_BUF_SIZE)
- flags &= ~SCIF_RMA_USECACHE;
-
- while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
- err = scif_rma_copy(epd, 0, (u64)addr,
- SCIF_MAX_UNALIGNED_BUF_SIZE,
- roffset, flags,
- SCIF_LOCAL_TO_REMOTE, false);
- if (err)
- goto vwriteto_err;
- addr += SCIF_MAX_UNALIGNED_BUF_SIZE;
- roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
- len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
- }
- }
- err = scif_rma_copy(epd, 0, (u64)addr, len,
- roffset, flags, SCIF_LOCAL_TO_REMOTE, true);
-vwriteto_err:
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_vwriteto);
diff --git a/drivers/misc/mic/scif/scif_epd.c b/drivers/misc/mic/scif/scif_epd.c
deleted file mode 100644
index 426687f6696b..000000000000
--- a/drivers/misc/mic/scif/scif_epd.c
+++ /dev/null
@@ -1,357 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-#include "scif_map.h"
-
-void scif_cleanup_ep_qp(struct scif_endpt *ep)
-{
- struct scif_qp *qp = ep->qp_info.qp;
-
- if (qp->outbound_q.rb_base) {
- scif_iounmap((void *)qp->outbound_q.rb_base,
- qp->outbound_q.size, ep->remote_dev);
- qp->outbound_q.rb_base = NULL;
- }
- if (qp->remote_qp) {
- scif_iounmap((void *)qp->remote_qp,
- sizeof(struct scif_qp), ep->remote_dev);
- qp->remote_qp = NULL;
- }
- if (qp->local_qp) {
- scif_unmap_single(qp->local_qp, ep->remote_dev,
- sizeof(struct scif_qp));
- qp->local_qp = 0x0;
- }
- if (qp->local_buf) {
- scif_unmap_single(qp->local_buf, ep->remote_dev,
- SCIF_ENDPT_QP_SIZE);
- qp->local_buf = 0;
- }
-}
-
-void scif_teardown_ep(void *endpt)
-{
- struct scif_endpt *ep = endpt;
- struct scif_qp *qp = ep->qp_info.qp;
-
- if (qp) {
- spin_lock(&ep->lock);
- scif_cleanup_ep_qp(ep);
- spin_unlock(&ep->lock);
- kfree(qp->inbound_q.rb_base);
- kfree(qp);
- }
-}
-
-/*
- * Enqueue the endpoint to the zombie list for cleanup.
- * The endpoint should not be accessed once this API returns.
- */
-void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held)
-{
- if (!eplock_held)
- mutex_lock(&scif_info.eplock);
- spin_lock(&ep->lock);
- ep->state = SCIFEP_ZOMBIE;
- spin_unlock(&ep->lock);
- list_add_tail(&ep->list, &scif_info.zombie);
- scif_info.nr_zombies++;
- if (!eplock_held)
- mutex_unlock(&scif_info.eplock);
- schedule_work(&scif_info.misc_work);
-}
-
-static struct scif_endpt *scif_find_listen_ep(u16 port)
-{
- struct scif_endpt *ep = NULL;
- struct list_head *pos, *tmpq;
-
- mutex_lock(&scif_info.eplock);
- list_for_each_safe(pos, tmpq, &scif_info.listen) {
- ep = list_entry(pos, struct scif_endpt, list);
- if (ep->port.port == port) {
- mutex_unlock(&scif_info.eplock);
- return ep;
- }
- }
- mutex_unlock(&scif_info.eplock);
- return NULL;
-}
-
-void scif_cleanup_zombie_epd(void)
-{
- struct list_head *pos, *tmpq;
- struct scif_endpt *ep;
-
- mutex_lock(&scif_info.eplock);
- list_for_each_safe(pos, tmpq, &scif_info.zombie) {
- ep = list_entry(pos, struct scif_endpt, list);
- if (scif_rma_ep_can_uninit(ep)) {
- list_del(pos);
- scif_info.nr_zombies--;
- put_iova_domain(&ep->rma_info.iovad);
- kfree(ep);
- }
- }
- mutex_unlock(&scif_info.eplock);
-}
-
-/**
- * scif_cnctreq() - Respond to SCIF_CNCT_REQ interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * This message is initiated by the remote node to request a connection
- * to the local node. This function looks for an end point in the
- * listen state on the requested port id.
- *
- * If it finds a listening port it places the connect request on the
- * listening end points queue and wakes up any pending accept calls.
- *
- * If it does not find a listening end point it sends a connection
- * reject message to the remote node.
- */
-void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = NULL;
- struct scif_conreq *conreq;
-
- conreq = kmalloc(sizeof(*conreq), GFP_KERNEL);
- if (!conreq)
- /* Lack of resources so reject the request. */
- goto conreq_sendrej;
-
- ep = scif_find_listen_ep(msg->dst.port);
- if (!ep)
- /* Send reject due to no listening ports */
- goto conreq_sendrej_free;
- else
- spin_lock(&ep->lock);
-
- if (ep->backlog <= ep->conreqcnt) {
- /* Send reject due to too many pending requests */
- spin_unlock(&ep->lock);
- goto conreq_sendrej_free;
- }
-
- conreq->msg = *msg;
- list_add_tail(&conreq->list, &ep->conlist);
- ep->conreqcnt++;
- wake_up_interruptible(&ep->conwq);
- spin_unlock(&ep->lock);
- return;
-
-conreq_sendrej_free:
- kfree(conreq);
-conreq_sendrej:
- msg->uop = SCIF_CNCT_REJ;
- scif_nodeqp_send(&scif_dev[msg->src.node], msg);
-}
-
-/**
- * scif_cnctgnt() - Respond to SCIF_CNCT_GNT interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * An accept() on the remote node has occurred and sent this message
- * to indicate success. Place the end point in the MAPPING state and
- * save the remote nodes memory information. Then wake up the connect
- * request so it can finish.
- */
-void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- if (SCIFEP_CONNECTING == ep->state) {
- ep->peer.node = msg->src.node;
- ep->peer.port = msg->src.port;
- ep->qp_info.gnt_pld = msg->payload[1];
- ep->remote_ep = msg->payload[2];
- ep->state = SCIFEP_MAPPING;
-
- wake_up(&ep->conwq);
- }
- spin_unlock(&ep->lock);
-}
-
-/**
- * scif_cnctgnt_ack() - Respond to SCIF_CNCT_GNTACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The remote connection request has finished mapping the local memory.
- * Place the connection in the connected state and wake up the pending
- * accept() call.
- */
-void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- mutex_lock(&scif_info.connlock);
- spin_lock(&ep->lock);
- /* New ep is now connected with all resources set. */
- ep->state = SCIFEP_CONNECTED;
- list_add_tail(&ep->list, &scif_info.connected);
- wake_up(&ep->conwq);
- spin_unlock(&ep->lock);
- mutex_unlock(&scif_info.connlock);
-}
-
-/**
- * scif_cnctgnt_nack() - Respond to SCIF_CNCT_GNTNACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The remote connection request failed to map the local memory it was sent.
- * Place the end point in the CLOSING state to indicate it and wake up
- * the pending accept();
- */
-void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- ep->state = SCIFEP_CLOSING;
- wake_up(&ep->conwq);
- spin_unlock(&ep->lock);
-}
-
-/**
- * scif_cnctrej() - Respond to SCIF_CNCT_REJ interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The remote end has rejected the connection request. Set the end
- * point back to the bound state and wake up the pending connect().
- */
-void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- if (SCIFEP_CONNECTING == ep->state) {
- ep->state = SCIFEP_BOUND;
- wake_up(&ep->conwq);
- }
- spin_unlock(&ep->lock);
-}
-
-/**
- * scif_discnct() - Respond to SCIF_DISCNCT interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The remote node has indicated close() has been called on its end
- * point. Remove the local end point from the connected list, set its
- * state to disconnected and ensure accesses to the remote node are
- * shutdown.
- *
- * When all accesses to the remote end have completed then send a
- * DISCNT_ACK to indicate it can remove its resources and complete
- * the close routine.
- */
-void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = NULL;
- struct scif_endpt *tmpep;
- struct list_head *pos, *tmpq;
-
- mutex_lock(&scif_info.connlock);
- list_for_each_safe(pos, tmpq, &scif_info.connected) {
- tmpep = list_entry(pos, struct scif_endpt, list);
- /*
- * The local ep may have sent a disconnect and and been closed
- * due to a message response time out. It may have been
- * allocated again and formed a new connection so we want to
- * check if the remote ep matches
- */
- if (((u64)tmpep == msg->payload[1]) &&
- ((u64)tmpep->remote_ep == msg->payload[0])) {
- list_del(pos);
- ep = tmpep;
- spin_lock(&ep->lock);
- break;
- }
- }
-
- /*
- * If the terminated end is not found then this side started closing
- * before the other side sent the disconnect. If so the ep will no
- * longer be on the connected list. Regardless the other side
- * needs to be acked to let it know close is complete.
- */
- if (!ep) {
- mutex_unlock(&scif_info.connlock);
- goto discnct_ack;
- }
-
- ep->state = SCIFEP_DISCONNECTED;
- list_add_tail(&ep->list, &scif_info.disconnected);
-
- wake_up_interruptible(&ep->sendwq);
- wake_up_interruptible(&ep->recvwq);
- spin_unlock(&ep->lock);
- mutex_unlock(&scif_info.connlock);
-
-discnct_ack:
- msg->uop = SCIF_DISCNT_ACK;
- scif_nodeqp_send(&scif_dev[msg->src.node], msg);
-}
-
-/**
- * scif_discnct_ack() - Respond to SCIF_DISCNT_ACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remote side has indicated it has not more references to local resources
- */
-void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- ep->state = SCIFEP_DISCONNECTED;
- spin_unlock(&ep->lock);
- complete(&ep->discon);
-}
-
-/**
- * scif_clientsend() - Respond to SCIF_CLIENT_SEND interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remote side is confirming send or receive interrupt handling is complete.
- */
-void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- if (SCIFEP_CONNECTED == ep->state)
- wake_up_interruptible(&ep->recvwq);
- spin_unlock(&ep->lock);
-}
-
-/**
- * scif_clientrcvd() - Respond to SCIF_CLIENT_RCVD interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remote side is confirming send or receive interrupt handling is complete.
- */
-void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
-
- spin_lock(&ep->lock);
- if (SCIFEP_CONNECTED == ep->state)
- wake_up_interruptible(&ep->sendwq);
- spin_unlock(&ep->lock);
-}
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
deleted file mode 100644
index 0b9dfe1cc06c..000000000000
--- a/drivers/misc/mic/scif/scif_epd.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#ifndef SCIF_EPD_H
-#define SCIF_EPD_H
-
-#include <linux/delay.h>
-#include <linux/scif.h>
-#include <linux/scif_ioctl.h>
-
-#define SCIF_EPLOCK_HELD true
-
-enum scif_epd_state {
- SCIFEP_UNBOUND,
- SCIFEP_BOUND,
- SCIFEP_LISTENING,
- SCIFEP_CONNECTED,
- SCIFEP_CONNECTING,
- SCIFEP_MAPPING,
- SCIFEP_CLOSING,
- SCIFEP_CLLISTEN,
- SCIFEP_DISCONNECTED,
- SCIFEP_ZOMBIE
-};
-
-/*
- * struct scif_conreq - Data structure added to the connection list.
- *
- * @msg: connection request message received
- * @list: link to list of connection requests
- */
-struct scif_conreq {
- struct scifmsg msg;
- struct list_head list;
-};
-
-/* Size of the RB for the Endpoint QP */
-#define SCIF_ENDPT_QP_SIZE 0x1000
-
-/*
- * scif_endpt_qp_info - SCIF endpoint queue pair
- *
- * @qp - Qpair for this endpoint
- * @qp_offset - DMA address of the QP
- * @gnt_pld - Payload in a SCIF_CNCT_GNT message containing the
- * physical address of the remote_qp.
- */
-struct scif_endpt_qp_info {
- struct scif_qp *qp;
- dma_addr_t qp_offset;
- dma_addr_t gnt_pld;
-};
-
-/*
- * struct scif_endpt - The SCIF endpoint data structure
- *
- * @state: end point state
- * @lock: lock synchronizing access to endpoint fields like state etc
- * @port: self port information
- * @peer: peer port information
- * @backlog: maximum pending connection requests
- * @qp_info: Endpoint QP information for SCIF messaging
- * @remote_dev: scifdev used by this endpt to communicate with remote node.
- * @remote_ep: remote endpoint
- * @conreqcnt: Keep track of number of connection requests.
- * @files: Open file information used to match the id passed in with
- * the flush routine.
- * @conlist: list of connection requests
- * @conwq: waitqueue for connection processing
- * @discon: completion used during disconnection
- * @sendwq: waitqueue used during sending messages
- * @recvwq: waitqueue used during message receipt
- * @sendlock: Synchronize ordering of messages sent
- * @recvlock: Synchronize ordering of messages received
- * @list: link to list of various endpoints like connected, listening etc
- * @li_accept: pending ACCEPTREG
- * @acceptcnt: pending ACCEPTREG cnt
- * @liacceptlist: link to listen accept
- * @miacceptlist: link to uaccept
- * @listenep: associated listen ep
- * @conn_work: Non blocking connect work
- * @conn_port: Connection port
- * @conn_err: Errors during connection
- * @conn_async_state: Async connection
- * @conn_pend_wq: Used by poll while waiting for incoming connections
- * @conn_list: List of async connection requests
- * @rma_info: Information for triggering SCIF RMA and DMA operations
- * @mmu_list: link to list of MMU notifier cleanup work
- * @anon: anonymous file for use in kernel mode scif poll
- */
-struct scif_endpt {
- enum scif_epd_state state;
- spinlock_t lock;
- struct scif_port_id port;
- struct scif_port_id peer;
- int backlog;
- struct scif_endpt_qp_info qp_info;
- struct scif_dev *remote_dev;
- u64 remote_ep;
- int conreqcnt;
- struct files_struct *files;
- struct list_head conlist;
- wait_queue_head_t conwq;
- struct completion discon;
- wait_queue_head_t sendwq;
- wait_queue_head_t recvwq;
- struct mutex sendlock;
- struct mutex recvlock;
- struct list_head list;
- struct list_head li_accept;
- int acceptcnt;
- struct list_head liacceptlist;
- struct list_head miacceptlist;
- struct scif_endpt *listenep;
- struct scif_port_id conn_port;
- int conn_err;
- int conn_async_state;
- wait_queue_head_t conn_pend_wq;
- struct list_head conn_list;
- struct scif_endpt_rma_info rma_info;
- struct list_head mmu_list;
- struct file *anon;
-};
-
-static inline int scifdev_alive(struct scif_endpt *ep)
-{
- return _scifdev_alive(ep->remote_dev);
-}
-
-/*
- * scif_verify_epd:
- * ep: SCIF endpoint
- *
- * Checks several generic error conditions and returns the
- * appropriate error.
- */
-static inline int scif_verify_epd(struct scif_endpt *ep)
-{
- if (ep->state == SCIFEP_DISCONNECTED)
- return -ECONNRESET;
-
- if (ep->state != SCIFEP_CONNECTED)
- return -ENOTCONN;
-
- if (!scifdev_alive(ep))
- return -ENODEV;
-
- return 0;
-}
-
-static inline int scif_anon_inode_getfile(scif_epd_t epd)
-{
- epd->anon = anon_inode_getfile("scif", &scif_anon_fops, NULL, 0);
-
- return PTR_ERR_OR_ZERO(epd->anon);
-}
-
-static inline void scif_anon_inode_fput(scif_epd_t epd)
-{
- if (epd->anon) {
- fput(epd->anon);
- epd->anon = NULL;
- }
-}
-
-void scif_cleanup_zombie_epd(void);
-void scif_teardown_ep(void *endpt);
-void scif_cleanup_ep_qp(struct scif_endpt *ep);
-void scif_add_epd_to_zombie_list(struct scif_endpt *ep, bool eplock_held);
-void scif_get_node_info(void);
-void scif_send_acks(struct scif_dev *dev);
-void scif_conn_handler(struct work_struct *work);
-int scif_rsrv_port(u16 port);
-void scif_get_port(u16 port);
-int scif_get_new_port(void);
-void scif_put_port(u16 port);
-int scif_user_send(scif_epd_t epd, void __user *msg, int len, int flags);
-int scif_user_recv(scif_epd_t epd, void __user *msg, int len, int flags);
-void scif_cnctreq(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_cnctgnt(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_cnctgnt_ack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_cnctgnt_nack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_cnctrej(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_discnct(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_discnt_ack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_clientsend(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg);
-int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block);
-int __scif_flush(scif_epd_t epd);
-int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd);
-__poll_t __scif_pollfd(struct file *f, poll_table *wait,
- struct scif_endpt *ep);
-int __scif_pin_pages(void *addr, size_t len, int *out_prot,
- int map_flags, scif_pinned_pages_t *pages);
-#endif /* SCIF_EPD_H */
diff --git a/drivers/misc/mic/scif/scif_fd.c b/drivers/misc/mic/scif/scif_fd.c
deleted file mode 100644
index 3f08646cd78a..000000000000
--- a/drivers/misc/mic/scif/scif_fd.c
+++ /dev/null
@@ -1,462 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-
-static int scif_fdopen(struct inode *inode, struct file *f)
-{
- struct scif_endpt *priv = scif_open();
-
- if (!priv)
- return -ENOMEM;
- f->private_data = priv;
- return 0;
-}
-
-static int scif_fdclose(struct inode *inode, struct file *f)
-{
- struct scif_endpt *priv = f->private_data;
-
- return scif_close(priv);
-}
-
-static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
-{
- struct scif_endpt *priv = f->private_data;
-
- return scif_mmap(vma, priv);
-}
-
-static __poll_t scif_fdpoll(struct file *f, poll_table *wait)
-{
- struct scif_endpt *priv = f->private_data;
-
- return __scif_pollfd(f, wait, priv);
-}
-
-static int scif_fdflush(struct file *f, fl_owner_t id)
-{
- struct scif_endpt *ep = f->private_data;
-
- spin_lock(&ep->lock);
- /*
- * The listening endpoint stashes the open file information before
- * waiting for incoming connections. The release callback would never be
- * called if the application closed the endpoint, while waiting for
- * incoming connections from a separate thread since the file descriptor
- * reference count is bumped up in the accept IOCTL. Call the flush
- * routine if the id matches the endpoint open file information so that
- * the listening endpoint can be woken up and the fd released.
- */
- if (ep->files == id)
- __scif_flush(ep);
- spin_unlock(&ep->lock);
- return 0;
-}
-
-static __always_inline void scif_err_debug(int err, const char *str)
-{
- /*
- * ENOTCONN is a common uninteresting error which is
- * flooding debug messages to the console unnecessarily.
- */
- if (err < 0 && err != -ENOTCONN)
- dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
-}
-
-static long scif_fdioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
- struct scif_endpt *priv = f->private_data;
- void __user *argp = (void __user *)arg;
- int err = 0;
- struct scifioctl_msg request;
- bool non_block = false;
-
- non_block = !!(f->f_flags & O_NONBLOCK);
-
- switch (cmd) {
- case SCIF_BIND:
- {
- int pn;
-
- if (copy_from_user(&pn, argp, sizeof(pn)))
- return -EFAULT;
-
- pn = scif_bind(priv, pn);
- if (pn < 0)
- return pn;
-
- if (copy_to_user(argp, &pn, sizeof(pn)))
- return -EFAULT;
-
- return 0;
- }
- case SCIF_LISTEN:
- return scif_listen(priv, arg);
- case SCIF_CONNECT:
- {
- struct scifioctl_connect req;
- struct scif_endpt *ep = (struct scif_endpt *)priv;
-
- if (copy_from_user(&req, argp, sizeof(req)))
- return -EFAULT;
-
- err = __scif_connect(priv, &req.peer, non_block);
- if (err < 0)
- return err;
-
- req.self.node = ep->port.node;
- req.self.port = ep->port.port;
-
- if (copy_to_user(argp, &req, sizeof(req)))
- return -EFAULT;
-
- return 0;
- }
- /*
- * Accept is done in two halves. The request ioctl does the basic
- * functionality of accepting the request and returning the information
- * about it including the internal ID of the end point. The register
- * is done with the internal ID on a new file descriptor opened by the
- * requesting process.
- */
- case SCIF_ACCEPTREQ:
- {
- struct scifioctl_accept request;
- scif_epd_t *ep = (scif_epd_t *)&request.endpt;
-
- if (copy_from_user(&request, argp, sizeof(request)))
- return -EFAULT;
-
- err = scif_accept(priv, &request.peer, ep, request.flags);
- if (err < 0)
- return err;
-
- if (copy_to_user(argp, &request, sizeof(request))) {
- scif_close(*ep);
- return -EFAULT;
- }
- /*
- * Add to the list of user mode eps where the second half
- * of the accept is not yet completed.
- */
- mutex_lock(&scif_info.eplock);
- list_add_tail(&((*ep)->miacceptlist), &scif_info.uaccept);
- list_add_tail(&((*ep)->liacceptlist), &priv->li_accept);
- (*ep)->listenep = priv;
- priv->acceptcnt++;
- mutex_unlock(&scif_info.eplock);
-
- return 0;
- }
- case SCIF_ACCEPTREG:
- {
- struct scif_endpt *priv = f->private_data;
- struct scif_endpt *newep;
- struct scif_endpt *lisep;
- struct scif_endpt *fep = NULL;
- struct scif_endpt *tmpep;
- struct list_head *pos, *tmpq;
-
- /* Finally replace the pointer to the accepted endpoint */
- if (copy_from_user(&newep, argp, sizeof(void *)))
- return -EFAULT;
-
- /* Remove form the user accept queue */
- mutex_lock(&scif_info.eplock);
- list_for_each_safe(pos, tmpq, &scif_info.uaccept) {
- tmpep = list_entry(pos,
- struct scif_endpt, miacceptlist);
- if (tmpep == newep) {
- list_del(pos);
- fep = tmpep;
- break;
- }
- }
-
- if (!fep) {
- mutex_unlock(&scif_info.eplock);
- return -ENOENT;
- }
-
- lisep = newep->listenep;
- list_for_each_safe(pos, tmpq, &lisep->li_accept) {
- tmpep = list_entry(pos,
- struct scif_endpt, liacceptlist);
- if (tmpep == newep) {
- list_del(pos);
- lisep->acceptcnt--;
- break;
- }
- }
-
- mutex_unlock(&scif_info.eplock);
-
- /* Free the resources automatically created from the open. */
- scif_anon_inode_fput(priv);
- scif_teardown_ep(priv);
- scif_add_epd_to_zombie_list(priv, !SCIF_EPLOCK_HELD);
- f->private_data = newep;
- return 0;
- }
- case SCIF_SEND:
- {
- struct scif_endpt *priv = f->private_data;
-
- if (copy_from_user(&request, argp,
- sizeof(struct scifioctl_msg))) {
- err = -EFAULT;
- goto send_err;
- }
- err = scif_user_send(priv, (void __user *)request.msg,
- request.len, request.flags);
- if (err < 0)
- goto send_err;
- if (copy_to_user(&
- ((struct scifioctl_msg __user *)argp)->out_len,
- &err, sizeof(err))) {
- err = -EFAULT;
- goto send_err;
- }
- err = 0;
-send_err:
- scif_err_debug(err, "scif_send");
- return err;
- }
- case SCIF_RECV:
- {
- struct scif_endpt *priv = f->private_data;
-
- if (copy_from_user(&request, argp,
- sizeof(struct scifioctl_msg))) {
- err = -EFAULT;
- goto recv_err;
- }
-
- err = scif_user_recv(priv, (void __user *)request.msg,
- request.len, request.flags);
- if (err < 0)
- goto recv_err;
-
- if (copy_to_user(&
- ((struct scifioctl_msg __user *)argp)->out_len,
- &err, sizeof(err))) {
- err = -EFAULT;
- goto recv_err;
- }
- err = 0;
-recv_err:
- scif_err_debug(err, "scif_recv");
- return err;
- }
- case SCIF_GET_NODEIDS:
- {
- struct scifioctl_node_ids node_ids;
- int entries;
- u16 *nodes;
- void __user *unodes, *uself;
- u16 self;
-
- if (copy_from_user(&node_ids, argp, sizeof(node_ids))) {
- err = -EFAULT;
- goto getnodes_err2;
- }
-
- entries = min_t(int, scif_info.maxid, node_ids.len);
- nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL);
- if (entries && !nodes) {
- err = -ENOMEM;
- goto getnodes_err2;
- }
- node_ids.len = scif_get_node_ids(nodes, entries, &self);
-
- unodes = (void __user *)node_ids.nodes;
- if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) {
- err = -EFAULT;
- goto getnodes_err1;
- }
-
- uself = (void __user *)node_ids.self;
- if (copy_to_user(uself, &self, sizeof(u16))) {
- err = -EFAULT;
- goto getnodes_err1;
- }
-
- if (copy_to_user(argp, &node_ids, sizeof(node_ids))) {
- err = -EFAULT;
- goto getnodes_err1;
- }
-getnodes_err1:
- kfree(nodes);
-getnodes_err2:
- return err;
- }
- case SCIF_REG:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_reg reg;
- off_t ret;
-
- if (copy_from_user(&reg, argp, sizeof(reg))) {
- err = -EFAULT;
- goto reg_err;
- }
- if (reg.flags & SCIF_MAP_KERNEL) {
- err = -EINVAL;
- goto reg_err;
- }
- ret = scif_register(priv, (void *)reg.addr, reg.len,
- reg.offset, reg.prot, reg.flags);
- if (ret < 0) {
- err = (int)ret;
- goto reg_err;
- }
-
- if (copy_to_user(&((struct scifioctl_reg __user *)argp)
- ->out_offset, &ret, sizeof(reg.out_offset))) {
- err = -EFAULT;
- goto reg_err;
- }
- err = 0;
-reg_err:
- scif_err_debug(err, "scif_register");
- return err;
- }
- case SCIF_UNREG:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_unreg unreg;
-
- if (copy_from_user(&unreg, argp, sizeof(unreg))) {
- err = -EFAULT;
- goto unreg_err;
- }
- err = scif_unregister(priv, unreg.offset, unreg.len);
-unreg_err:
- scif_err_debug(err, "scif_unregister");
- return err;
- }
- case SCIF_READFROM:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_copy copy;
-
- if (copy_from_user(&copy, argp, sizeof(copy))) {
- err = -EFAULT;
- goto readfrom_err;
- }
- err = scif_readfrom(priv, copy.loffset, copy.len, copy.roffset,
- copy.flags);
-readfrom_err:
- scif_err_debug(err, "scif_readfrom");
- return err;
- }
- case SCIF_WRITETO:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_copy copy;
-
- if (copy_from_user(&copy, argp, sizeof(copy))) {
- err = -EFAULT;
- goto writeto_err;
- }
- err = scif_writeto(priv, copy.loffset, copy.len, copy.roffset,
- copy.flags);
-writeto_err:
- scif_err_debug(err, "scif_writeto");
- return err;
- }
- case SCIF_VREADFROM:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_copy copy;
-
- if (copy_from_user(&copy, argp, sizeof(copy))) {
- err = -EFAULT;
- goto vreadfrom_err;
- }
- err = scif_vreadfrom(priv, (void __force *)copy.addr, copy.len,
- copy.roffset, copy.flags);
-vreadfrom_err:
- scif_err_debug(err, "scif_vreadfrom");
- return err;
- }
- case SCIF_VWRITETO:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_copy copy;
-
- if (copy_from_user(&copy, argp, sizeof(copy))) {
- err = -EFAULT;
- goto vwriteto_err;
- }
- err = scif_vwriteto(priv, (void __force *)copy.addr, copy.len,
- copy.roffset, copy.flags);
-vwriteto_err:
- scif_err_debug(err, "scif_vwriteto");
- return err;
- }
- case SCIF_FENCE_MARK:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_fence_mark mark;
- int tmp_mark = 0;
-
- if (copy_from_user(&mark, argp, sizeof(mark))) {
- err = -EFAULT;
- goto fence_mark_err;
- }
- err = scif_fence_mark(priv, mark.flags, &tmp_mark);
- if (err)
- goto fence_mark_err;
- if (copy_to_user((void __user *)mark.mark, &tmp_mark,
- sizeof(tmp_mark))) {
- err = -EFAULT;
- goto fence_mark_err;
- }
-fence_mark_err:
- scif_err_debug(err, "scif_fence_mark");
- return err;
- }
- case SCIF_FENCE_WAIT:
- {
- struct scif_endpt *priv = f->private_data;
-
- err = scif_fence_wait(priv, arg);
- scif_err_debug(err, "scif_fence_wait");
- return err;
- }
- case SCIF_FENCE_SIGNAL:
- {
- struct scif_endpt *priv = f->private_data;
- struct scifioctl_fence_signal signal;
-
- if (copy_from_user(&signal, argp, sizeof(signal))) {
- err = -EFAULT;
- goto fence_signal_err;
- }
-
- err = scif_fence_signal(priv, signal.loff, signal.lval,
- signal.roff, signal.rval, signal.flags);
-fence_signal_err:
- scif_err_debug(err, "scif_fence_signal");
- return err;
- }
- }
- return -EINVAL;
-}
-
-const struct file_operations scif_fops = {
- .open = scif_fdopen,
- .release = scif_fdclose,
- .unlocked_ioctl = scif_fdioctl,
- .mmap = scif_fdmmap,
- .poll = scif_fdpoll,
- .flush = scif_fdflush,
- .owner = THIS_MODULE,
-};
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
deleted file mode 100644
index 4fedf6183951..000000000000
--- a/drivers/misc/mic/scif/scif_fence.c
+++ /dev/null
@@ -1,783 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-
-#include "scif_main.h"
-
-/**
- * scif_recv_mark: Handle SCIF_MARK request
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has requested a mark.
- */
-void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- int mark = 0;
- int err;
-
- err = _scif_fence_mark(ep, &mark);
- if (err)
- msg->uop = SCIF_MARK_NACK;
- else
- msg->uop = SCIF_MARK_ACK;
- msg->payload[0] = ep->remote_ep;
- msg->payload[2] = mark;
- scif_nodeqp_send(ep->remote_dev, msg);
-}
-
-/**
- * scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages.
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has responded to a SCIF_MARK message.
- */
-void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- struct scif_fence_info *fence_req =
- (struct scif_fence_info *)msg->payload[1];
-
- mutex_lock(&ep->rma_info.rma_lock);
- if (msg->uop == SCIF_MARK_ACK) {
- fence_req->state = OP_COMPLETED;
- fence_req->dma_mark = (int)msg->payload[2];
- } else {
- fence_req->state = OP_FAILED;
- }
- mutex_unlock(&ep->rma_info.rma_lock);
- complete(&fence_req->comp);
-}
-
-/**
- * scif_recv_wait: Handle SCIF_WAIT request
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has requested waiting on a fence.
- */
-void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- struct scif_remote_fence_info *fence;
-
- /*
- * Allocate structure for remote fence information and
- * send a NACK if the allocation failed. The peer will
- * return ENOMEM upon receiving a NACK.
- */
- fence = kmalloc(sizeof(*fence), GFP_KERNEL);
- if (!fence) {
- msg->payload[0] = ep->remote_ep;
- msg->uop = SCIF_WAIT_NACK;
- scif_nodeqp_send(ep->remote_dev, msg);
- return;
- }
-
- /* Prepare the fence request */
- memcpy(&fence->msg, msg, sizeof(struct scifmsg));
- INIT_LIST_HEAD(&fence->list);
-
- /* Insert to the global remote fence request list */
- mutex_lock(&scif_info.fencelock);
- atomic_inc(&ep->rma_info.fence_refcount);
- list_add_tail(&fence->list, &scif_info.fence);
- mutex_unlock(&scif_info.fencelock);
-
- schedule_work(&scif_info.misc_work);
-}
-
-/**
- * scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages.
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has responded to a SCIF_WAIT message.
- */
-void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- struct scif_fence_info *fence_req =
- (struct scif_fence_info *)msg->payload[1];
-
- mutex_lock(&ep->rma_info.rma_lock);
- if (msg->uop == SCIF_WAIT_ACK)
- fence_req->state = OP_COMPLETED;
- else
- fence_req->state = OP_FAILED;
- mutex_unlock(&ep->rma_info.rma_lock);
- complete(&fence_req->comp);
-}
-
-/**
- * scif_recv_sig_local: Handle SCIF_SIG_LOCAL request
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has requested a signal on a local offset.
- */
-void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- int err;
-
- err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
- SCIF_WINDOW_SELF);
- if (err)
- msg->uop = SCIF_SIG_NACK;
- else
- msg->uop = SCIF_SIG_ACK;
- msg->payload[0] = ep->remote_ep;
- scif_nodeqp_send(ep->remote_dev, msg);
-}
-
-/**
- * scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has requested a signal on a remote offset.
- */
-void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- int err;
-
- err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
- SCIF_WINDOW_PEER);
- if (err)
- msg->uop = SCIF_SIG_NACK;
- else
- msg->uop = SCIF_SIG_ACK;
- msg->payload[0] = ep->remote_ep;
- scif_nodeqp_send(ep->remote_dev, msg);
-}
-
-/**
- * scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages.
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * The peer has responded to a signal request.
- */
-void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- struct scif_fence_info *fence_req =
- (struct scif_fence_info *)msg->payload[3];
-
- mutex_lock(&ep->rma_info.rma_lock);
- if (msg->uop == SCIF_SIG_ACK)
- fence_req->state = OP_COMPLETED;
- else
- fence_req->state = OP_FAILED;
- mutex_unlock(&ep->rma_info.rma_lock);
- complete(&fence_req->comp);
-}
-
-static inline void *scif_get_local_va(off_t off, struct scif_window *window)
-{
- struct page **pages = window->pinned_pages->pages;
- int page_nr = (off - window->offset) >> PAGE_SHIFT;
- off_t page_off = off & ~PAGE_MASK;
-
- return page_address(pages[page_nr]) + page_off;
-}
-
-static void scif_prog_signal_cb(void *arg)
-{
- struct scif_cb_arg *cb_arg = arg;
-
- dma_pool_free(cb_arg->ep->remote_dev->signal_pool, cb_arg->status,
- cb_arg->src_dma_addr);
- kfree(cb_arg);
-}
-
-static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct dma_chan *chan = ep->rma_info.dma_chan;
- struct dma_device *ddev = chan->device;
- bool x100 = !is_dma_copy_aligned(chan->device, 1, 1, 1);
- struct dma_async_tx_descriptor *tx;
- struct scif_status *status = NULL;
- struct scif_cb_arg *cb_arg = NULL;
- dma_addr_t src;
- dma_cookie_t cookie;
- int err;
-
- tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
- if (!tx) {
- err = -ENOMEM;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto alloc_fail;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- err = (int)cookie;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto alloc_fail;
- }
- dma_async_issue_pending(chan);
- if (x100) {
- /*
- * For X100 use the status descriptor to write the value to
- * the destination.
- */
- tx = ddev->device_prep_dma_imm_data(chan, dst, val, 0);
- } else {
- status = dma_pool_alloc(ep->remote_dev->signal_pool, GFP_KERNEL,
- &src);
- if (!status) {
- err = -ENOMEM;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto alloc_fail;
- }
- status->val = val;
- status->src_dma_addr = src;
- status->ep = ep;
- src += offsetof(struct scif_status, val);
- tx = ddev->device_prep_dma_memcpy(chan, dst, src, sizeof(val),
- DMA_PREP_INTERRUPT);
- }
- if (!tx) {
- err = -ENOMEM;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto dma_fail;
- }
- if (!x100) {
- cb_arg = kmalloc(sizeof(*cb_arg), GFP_KERNEL);
- if (!cb_arg) {
- err = -ENOMEM;
- goto dma_fail;
- }
- cb_arg->src_dma_addr = src;
- cb_arg->status = status;
- cb_arg->ep = ep;
- tx->callback = scif_prog_signal_cb;
- tx->callback_param = cb_arg;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- err = -EIO;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- goto dma_fail;
- }
- dma_async_issue_pending(chan);
- return 0;
-dma_fail:
- if (!x100) {
- dma_pool_free(ep->remote_dev->signal_pool, status,
- src - offsetof(struct scif_status, val));
- kfree(cb_arg);
- }
-alloc_fail:
- return err;
-}
-
-/**
- * scif_prog_signal:
- * @epd: Endpoint Descriptor
- * @offset: registered address to write @val to
- * @val: Value to be written at @offset
- * @type: Type of the window.
- *
- * Arrange to write a value to the registered offset after ensuring that the
- * offset provided is indeed valid.
- */
-int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
- enum scif_window_type type)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scif_window *window = NULL;
- struct scif_rma_req req;
- dma_addr_t dst_dma_addr;
- int err;
-
- mutex_lock(&ep->rma_info.rma_lock);
- req.out_window = &window;
- req.offset = offset;
- req.nr_bytes = sizeof(u64);
- req.prot = SCIF_PROT_WRITE;
- req.type = SCIF_WINDOW_SINGLE;
- if (type == SCIF_WINDOW_SELF)
- req.head = &ep->rma_info.reg_list;
- else
- req.head = &ep->rma_info.remote_reg_list;
- /* Does a valid window exist? */
- err = scif_query_window(&req);
- if (err) {
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto unlock_ret;
- }
-
- if (scif_is_mgmt_node() && scifdev_self(ep->remote_dev)) {
- u64 *dst_virt;
-
- if (type == SCIF_WINDOW_SELF)
- dst_virt = scif_get_local_va(offset, window);
- else
- dst_virt =
- scif_get_local_va(offset, (struct scif_window *)
- window->peer_window);
- *dst_virt = val;
- } else {
- dst_dma_addr = __scif_off_to_dma_addr(window, offset);
- err = _scif_prog_signal(epd, dst_dma_addr, val);
- }
-unlock_ret:
- mutex_unlock(&ep->rma_info.rma_lock);
- return err;
-}
-
-static int _scif_fence_wait(scif_epd_t epd, int mark)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- dma_cookie_t cookie = mark & ~SCIF_REMOTE_FENCE;
- int err;
-
- /* Wait for DMA callback in scif_fence_mark_cb(..) */
- err = wait_event_interruptible_timeout(ep->rma_info.markwq,
- dma_async_is_tx_complete(
- ep->rma_info.dma_chan,
- cookie, NULL, NULL) ==
- DMA_COMPLETE,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err)
- err = -ETIMEDOUT;
- else if (err > 0)
- err = 0;
- return err;
-}
-
-/**
- * scif_rma_handle_remote_fences:
- *
- * This routine services remote fence requests.
- */
-void scif_rma_handle_remote_fences(void)
-{
- struct list_head *item, *tmp;
- struct scif_remote_fence_info *fence;
- struct scif_endpt *ep;
- int mark, err;
-
- might_sleep();
- mutex_lock(&scif_info.fencelock);
- list_for_each_safe(item, tmp, &scif_info.fence) {
- fence = list_entry(item, struct scif_remote_fence_info,
- list);
- /* Remove fence from global list */
- list_del(&fence->list);
-
- /* Initiate the fence operation */
- ep = (struct scif_endpt *)fence->msg.payload[0];
- mark = fence->msg.payload[2];
- err = _scif_fence_wait(ep, mark);
- if (err)
- fence->msg.uop = SCIF_WAIT_NACK;
- else
- fence->msg.uop = SCIF_WAIT_ACK;
- fence->msg.payload[0] = ep->remote_ep;
- scif_nodeqp_send(ep->remote_dev, &fence->msg);
- kfree(fence);
- if (!atomic_sub_return(1, &ep->rma_info.fence_refcount))
- schedule_work(&scif_info.misc_work);
- }
- mutex_unlock(&scif_info.fencelock);
-}
-
-static int _scif_send_fence(scif_epd_t epd, int uop, int mark, int *out_mark)
-{
- int err;
- struct scifmsg msg;
- struct scif_fence_info *fence_req;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
-
- fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
- if (!fence_req) {
- err = -ENOMEM;
- goto error;
- }
-
- fence_req->state = OP_IN_PROGRESS;
- init_completion(&fence_req->comp);
-
- msg.src = ep->port;
- msg.uop = uop;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = (u64)fence_req;
- if (uop == SCIF_WAIT)
- msg.payload[2] = mark;
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED)
- err = scif_nodeqp_send(ep->remote_dev, &msg);
- else
- err = -ENOTCONN;
- spin_unlock(&ep->lock);
- if (err)
- goto error_free;
-retry:
- /* Wait for a SCIF_WAIT_(N)ACK message */
- err = wait_for_completion_timeout(&fence_req->comp,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err && scifdev_alive(ep))
- goto retry;
- if (!err)
- err = -ENODEV;
- if (err > 0)
- err = 0;
- mutex_lock(&ep->rma_info.rma_lock);
- if (err < 0) {
- if (fence_req->state == OP_IN_PROGRESS)
- fence_req->state = OP_FAILED;
- }
- if (fence_req->state == OP_FAILED && !err)
- err = -ENOMEM;
- if (uop == SCIF_MARK && fence_req->state == OP_COMPLETED)
- *out_mark = SCIF_REMOTE_FENCE | fence_req->dma_mark;
- mutex_unlock(&ep->rma_info.rma_lock);
-error_free:
- kfree(fence_req);
-error:
- return err;
-}
-
-/**
- * scif_send_fence_mark:
- * @epd: end point descriptor.
- * @out_mark: Output DMA mark reported by peer.
- *
- * Send a remote fence mark request.
- */
-static int scif_send_fence_mark(scif_epd_t epd, int *out_mark)
-{
- return _scif_send_fence(epd, SCIF_MARK, 0, out_mark);
-}
-
-/**
- * scif_send_fence_wait:
- * @epd: end point descriptor.
- * @mark: DMA mark to wait for.
- *
- * Send a remote fence wait request.
- */
-static int scif_send_fence_wait(scif_epd_t epd, int mark)
-{
- return _scif_send_fence(epd, SCIF_WAIT, mark, NULL);
-}
-
-static int _scif_send_fence_signal_wait(struct scif_endpt *ep,
- struct scif_fence_info *fence_req)
-{
- int err;
-
-retry:
- /* Wait for a SCIF_SIG_(N)ACK message */
- err = wait_for_completion_timeout(&fence_req->comp,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err && scifdev_alive(ep))
- goto retry;
- if (!err)
- err = -ENODEV;
- if (err > 0)
- err = 0;
- if (err < 0) {
- mutex_lock(&ep->rma_info.rma_lock);
- if (fence_req->state == OP_IN_PROGRESS)
- fence_req->state = OP_FAILED;
- mutex_unlock(&ep->rma_info.rma_lock);
- }
- if (fence_req->state == OP_FAILED && !err)
- err = -ENXIO;
- return err;
-}
-
-/**
- * scif_send_fence_signal:
- * @epd: endpoint descriptor
- * @loff: local offset
- * @lval: local value to write to loffset
- * @roff: remote offset
- * @rval: remote value to write to roffset
- * @flags: flags
- *
- * Sends a remote fence signal request
- */
-static int scif_send_fence_signal(scif_epd_t epd, off_t roff, u64 rval,
- off_t loff, u64 lval, int flags)
-{
- int err = 0;
- struct scifmsg msg;
- struct scif_fence_info *fence_req;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
-
- fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
- if (!fence_req) {
- err = -ENOMEM;
- goto error;
- }
-
- fence_req->state = OP_IN_PROGRESS;
- init_completion(&fence_req->comp);
- msg.src = ep->port;
- if (flags & SCIF_SIGNAL_LOCAL) {
- msg.uop = SCIF_SIG_LOCAL;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = roff;
- msg.payload[2] = rval;
- msg.payload[3] = (u64)fence_req;
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED)
- err = scif_nodeqp_send(ep->remote_dev, &msg);
- else
- err = -ENOTCONN;
- spin_unlock(&ep->lock);
- if (err)
- goto error_free;
- err = _scif_send_fence_signal_wait(ep, fence_req);
- if (err)
- goto error_free;
- }
- fence_req->state = OP_IN_PROGRESS;
-
- if (flags & SCIF_SIGNAL_REMOTE) {
- msg.uop = SCIF_SIG_REMOTE;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = loff;
- msg.payload[2] = lval;
- msg.payload[3] = (u64)fence_req;
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED)
- err = scif_nodeqp_send(ep->remote_dev, &msg);
- else
- err = -ENOTCONN;
- spin_unlock(&ep->lock);
- if (err)
- goto error_free;
- err = _scif_send_fence_signal_wait(ep, fence_req);
- }
-error_free:
- kfree(fence_req);
-error:
- return err;
-}
-
-static void scif_fence_mark_cb(void *arg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)arg;
-
- wake_up_interruptible(&ep->rma_info.markwq);
- atomic_dec(&ep->rma_info.fence_refcount);
-}
-
-/**
- * _scif_fence_mark:
- * @epd: endpoint descriptor
- * @mark: DMA mark to set-up
- *
- * Set up a mark for this endpoint and return the value of the mark.
- */
-int _scif_fence_mark(scif_epd_t epd, int *mark)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct dma_chan *chan = ep->rma_info.dma_chan;
- struct dma_device *ddev = chan->device;
- struct dma_async_tx_descriptor *tx;
- dma_cookie_t cookie;
- int err;
-
- tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
- if (!tx) {
- err = -ENOMEM;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- err = (int)cookie;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- dma_async_issue_pending(chan);
- tx = ddev->device_prep_dma_interrupt(chan, DMA_PREP_INTERRUPT);
- if (!tx) {
- err = -ENOMEM;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- tx->callback = scif_fence_mark_cb;
- tx->callback_param = ep;
- *mark = cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- err = (int)cookie;
- dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
- }
- atomic_inc(&ep->rma_info.fence_refcount);
- dma_async_issue_pending(chan);
- return 0;
-}
-
-#define SCIF_LOOPB_MAGIC_MARK 0xdead
-
-int scif_fence_mark(scif_epd_t epd, int flags, int *mark)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x\n",
- ep, flags, *mark);
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- /* Invalid flags? */
- if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER))
- return -EINVAL;
-
- /* At least one of init self or peer RMA should be set */
- if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
- return -EINVAL;
-
- /* Exactly one of init self or peer RMA should be set but not both */
- if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
- return -EINVAL;
-
- /*
- * Management node loopback does not need to use DMA.
- * Return a valid mark to be symmetric.
- */
- if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
- *mark = SCIF_LOOPB_MAGIC_MARK;
- return 0;
- }
-
- if (flags & SCIF_FENCE_INIT_SELF)
- err = _scif_fence_mark(epd, mark);
- else
- err = scif_send_fence_mark(ep, mark);
-
- if (err)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x err %d\n",
- ep, flags, *mark, err);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_fence_mark);
-
-int scif_fence_wait(scif_epd_t epd, int mark)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI fence_wait: ep %p mark 0x%x\n",
- ep, mark);
- err = scif_verify_epd(ep);
- if (err)
- return err;
- /*
- * Management node loopback does not need to use DMA.
- * The only valid mark provided is 0 so simply
- * return success if the mark is valid.
- */
- if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
- if (mark == SCIF_LOOPB_MAGIC_MARK)
- return 0;
- else
- return -EINVAL;
- }
- if (mark & SCIF_REMOTE_FENCE)
- err = scif_send_fence_wait(epd, mark);
- else
- err = _scif_fence_wait(epd, mark);
- if (err < 0)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_fence_wait);
-
-int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval,
- off_t roff, u64 rval, int flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- int err = 0;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI fence_signal: ep %p loff 0x%lx lval 0x%llx roff 0x%lx rval 0x%llx flags 0x%x\n",
- ep, loff, lval, roff, rval, flags);
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- /* Invalid flags? */
- if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER |
- SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE))
- return -EINVAL;
-
- /* At least one of init self or peer RMA should be set */
- if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
- return -EINVAL;
-
- /* Exactly one of init self or peer RMA should be set but not both */
- if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
- return -EINVAL;
-
- /* At least one of SCIF_SIGNAL_LOCAL or SCIF_SIGNAL_REMOTE required */
- if (!(flags & (SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE)))
- return -EINVAL;
-
- /* Only Dword offsets allowed */
- if ((flags & SCIF_SIGNAL_LOCAL) && (loff & (sizeof(u32) - 1)))
- return -EINVAL;
-
- /* Only Dword aligned offsets allowed */
- if ((flags & SCIF_SIGNAL_REMOTE) && (roff & (sizeof(u32) - 1)))
- return -EINVAL;
-
- if (flags & SCIF_FENCE_INIT_PEER) {
- err = scif_send_fence_signal(epd, roff, rval, loff,
- lval, flags);
- } else {
- /* Local Signal in Local RAS */
- if (flags & SCIF_SIGNAL_LOCAL) {
- err = scif_prog_signal(epd, loff, lval,
- SCIF_WINDOW_SELF);
- if (err)
- goto error_ret;
- }
-
- /* Signal in Remote RAS */
- if (flags & SCIF_SIGNAL_REMOTE)
- err = scif_prog_signal(epd, roff,
- rval, SCIF_WINDOW_PEER);
- }
-error_ret:
- if (err)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_fence_signal);
diff --git a/drivers/misc/mic/scif/scif_main.c b/drivers/misc/mic/scif/scif_main.c
deleted file mode 100644
index e2278bf9f11d..000000000000
--- a/drivers/misc/mic/scif/scif_main.c
+++ /dev/null
@@ -1,351 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/module.h>
-#include <linux/idr.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "../bus/scif_bus.h"
-#include "scif_peer_bus.h"
-#include "scif_main.h"
-#include "scif_map.h"
-
-struct scif_info scif_info = {
- .mdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "scif",
- .fops = &scif_fops,
- }
-};
-
-struct scif_dev *scif_dev;
-struct kmem_cache *unaligned_cache;
-static atomic_t g_loopb_cnt;
-
-/* Runs in the context of intr_wq */
-static void scif_intr_bh_handler(struct work_struct *work)
-{
- struct scif_dev *scifdev =
- container_of(work, struct scif_dev, intr_bh);
-
- if (scifdev_self(scifdev))
- scif_loopb_msg_handler(scifdev, scifdev->qpairs);
- else
- scif_nodeqp_intrhandler(scifdev, scifdev->qpairs);
-}
-
-int scif_setup_intr_wq(struct scif_dev *scifdev)
-{
- if (!scifdev->intr_wq) {
- snprintf(scifdev->intr_wqname, sizeof(scifdev->intr_wqname),
- "SCIF INTR %d", scifdev->node);
- scifdev->intr_wq =
- alloc_ordered_workqueue(scifdev->intr_wqname, 0);
- if (!scifdev->intr_wq)
- return -ENOMEM;
- INIT_WORK(&scifdev->intr_bh, scif_intr_bh_handler);
- }
- return 0;
-}
-
-void scif_destroy_intr_wq(struct scif_dev *scifdev)
-{
- if (scifdev->intr_wq) {
- destroy_workqueue(scifdev->intr_wq);
- scifdev->intr_wq = NULL;
- }
-}
-
-irqreturn_t scif_intr_handler(int irq, void *data)
-{
- struct scif_dev *scifdev = data;
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- sdev->hw_ops->ack_interrupt(sdev, scifdev->db);
- queue_work(scifdev->intr_wq, &scifdev->intr_bh);
- return IRQ_HANDLED;
-}
-
-static void scif_qp_setup_handler(struct work_struct *work)
-{
- struct scif_dev *scifdev = container_of(work, struct scif_dev,
- qp_dwork.work);
- struct scif_hw_dev *sdev = scifdev->sdev;
- dma_addr_t da = 0;
- int err;
-
- if (scif_is_mgmt_node()) {
- struct mic_bootparam *bp = sdev->dp;
-
- da = bp->scif_card_dma_addr;
- scifdev->rdb = bp->h2c_scif_db;
- } else {
- struct mic_bootparam __iomem *bp = sdev->rdp;
-
- da = readq(&bp->scif_host_dma_addr);
- scifdev->rdb = ioread8(&bp->c2h_scif_db);
- }
- if (da) {
- err = scif_qp_response(da, scifdev);
- if (err)
- dev_err(&scifdev->sdev->dev,
- "scif_qp_response err %d\n", err);
- } else {
- schedule_delayed_work(&scifdev->qp_dwork,
- msecs_to_jiffies(1000));
- }
-}
-
-static int scif_setup_scifdev(void)
-{
- /* We support a maximum of 129 SCIF nodes including the mgmt node */
-#define MAX_SCIF_NODES 129
- int i;
- u8 num_nodes = MAX_SCIF_NODES;
-
- scif_dev = kcalloc(num_nodes, sizeof(*scif_dev), GFP_KERNEL);
- if (!scif_dev)
- return -ENOMEM;
- for (i = 0; i < num_nodes; i++) {
- struct scif_dev *scifdev = &scif_dev[i];
-
- scifdev->node = i;
- scifdev->exit = OP_IDLE;
- init_waitqueue_head(&scifdev->disconn_wq);
- mutex_init(&scifdev->lock);
- INIT_WORK(&scifdev->peer_add_work, scif_add_peer_device);
- INIT_DELAYED_WORK(&scifdev->p2p_dwork,
- scif_poll_qp_state);
- INIT_DELAYED_WORK(&scifdev->qp_dwork,
- scif_qp_setup_handler);
- INIT_LIST_HEAD(&scifdev->p2p);
- RCU_INIT_POINTER(scifdev->spdev, NULL);
- }
- return 0;
-}
-
-static void scif_destroy_scifdev(void)
-{
- kfree(scif_dev);
- scif_dev = NULL;
-}
-
-static int scif_probe(struct scif_hw_dev *sdev)
-{
- struct scif_dev *scifdev = &scif_dev[sdev->dnode];
- int rc;
-
- dev_set_drvdata(&sdev->dev, sdev);
- scifdev->sdev = sdev;
-
- if (1 == atomic_add_return(1, &g_loopb_cnt)) {
- struct scif_dev *loopb_dev = &scif_dev[sdev->snode];
-
- loopb_dev->sdev = sdev;
- rc = scif_setup_loopback_qp(loopb_dev);
- if (rc)
- goto exit;
- }
-
- rc = scif_setup_intr_wq(scifdev);
- if (rc)
- goto destroy_loopb;
- rc = scif_setup_qp(scifdev);
- if (rc)
- goto destroy_intr;
- scifdev->db = sdev->hw_ops->next_db(sdev);
- scifdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
- "SCIF_INTR", scifdev,
- scifdev->db);
- if (IS_ERR(scifdev->cookie)) {
- rc = PTR_ERR(scifdev->cookie);
- goto free_qp;
- }
- if (scif_is_mgmt_node()) {
- struct mic_bootparam *bp = sdev->dp;
-
- bp->c2h_scif_db = scifdev->db;
- bp->scif_host_dma_addr = scifdev->qp_dma_addr;
- } else {
- struct mic_bootparam __iomem *bp = sdev->rdp;
-
- iowrite8(scifdev->db, &bp->h2c_scif_db);
- writeq(scifdev->qp_dma_addr, &bp->scif_card_dma_addr);
- }
- schedule_delayed_work(&scifdev->qp_dwork,
- msecs_to_jiffies(1000));
- return rc;
-free_qp:
- scif_free_qp(scifdev);
-destroy_intr:
- scif_destroy_intr_wq(scifdev);
-destroy_loopb:
- if (atomic_dec_and_test(&g_loopb_cnt))
- scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
-exit:
- return rc;
-}
-
-void scif_stop(struct scif_dev *scifdev)
-{
- struct scif_dev *dev;
- int i;
-
- for (i = scif_info.maxid; i >= 0; i--) {
- dev = &scif_dev[i];
- if (scifdev_self(dev))
- continue;
- scif_handle_remove_node(i);
- }
-}
-
-static void scif_remove(struct scif_hw_dev *sdev)
-{
- struct scif_dev *scifdev = &scif_dev[sdev->dnode];
-
- if (scif_is_mgmt_node()) {
- struct mic_bootparam *bp = sdev->dp;
-
- bp->c2h_scif_db = -1;
- bp->scif_host_dma_addr = 0x0;
- } else {
- struct mic_bootparam __iomem *bp = sdev->rdp;
-
- iowrite8(-1, &bp->h2c_scif_db);
- writeq(0x0, &bp->scif_card_dma_addr);
- }
- if (scif_is_mgmt_node()) {
- scif_disconnect_node(scifdev->node, true);
- } else {
- scif_info.card_initiated_exit = true;
- scif_stop(scifdev);
- }
- if (atomic_dec_and_test(&g_loopb_cnt))
- scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
- if (scifdev->cookie) {
- sdev->hw_ops->free_irq(sdev, scifdev->cookie, scifdev);
- scifdev->cookie = NULL;
- }
- scif_destroy_intr_wq(scifdev);
- cancel_delayed_work(&scifdev->qp_dwork);
- scif_free_qp(scifdev);
- scifdev->rdb = -1;
- scifdev->sdev = NULL;
-}
-
-static struct scif_hw_dev_id id_table[] = {
- { MIC_SCIF_DEV, SCIF_DEV_ANY_ID },
- { 0 },
-};
-
-static struct scif_driver scif_driver = {
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = scif_probe,
- .remove = scif_remove,
-};
-
-static int _scif_init(void)
-{
- int rc;
-
- mutex_init(&scif_info.eplock);
- spin_lock_init(&scif_info.rmalock);
- spin_lock_init(&scif_info.nb_connect_lock);
- spin_lock_init(&scif_info.port_lock);
- mutex_init(&scif_info.conflock);
- mutex_init(&scif_info.connlock);
- mutex_init(&scif_info.fencelock);
- INIT_LIST_HEAD(&scif_info.uaccept);
- INIT_LIST_HEAD(&scif_info.listen);
- INIT_LIST_HEAD(&scif_info.zombie);
- INIT_LIST_HEAD(&scif_info.connected);
- INIT_LIST_HEAD(&scif_info.disconnected);
- INIT_LIST_HEAD(&scif_info.rma);
- INIT_LIST_HEAD(&scif_info.rma_tc);
- INIT_LIST_HEAD(&scif_info.mmu_notif_cleanup);
- INIT_LIST_HEAD(&scif_info.fence);
- INIT_LIST_HEAD(&scif_info.nb_connect_list);
- init_waitqueue_head(&scif_info.exitwq);
- scif_info.rma_tc_limit = SCIF_RMA_TEMP_CACHE_LIMIT;
- scif_info.en_msg_log = 0;
- scif_info.p2p_enable = 1;
- rc = scif_setup_scifdev();
- if (rc)
- goto error;
- unaligned_cache = kmem_cache_create("Unaligned_DMA",
- SCIF_KMEM_UNALIGNED_BUF_SIZE,
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!unaligned_cache) {
- rc = -ENOMEM;
- goto free_sdev;
- }
- INIT_WORK(&scif_info.misc_work, scif_misc_handler);
- INIT_WORK(&scif_info.mmu_notif_work, scif_mmu_notif_handler);
- INIT_WORK(&scif_info.conn_work, scif_conn_handler);
- idr_init(&scif_ports);
- return 0;
-free_sdev:
- scif_destroy_scifdev();
-error:
- return rc;
-}
-
-static void _scif_exit(void)
-{
- idr_destroy(&scif_ports);
- kmem_cache_destroy(unaligned_cache);
- scif_destroy_scifdev();
-}
-
-static int __init scif_init(void)
-{
- struct miscdevice *mdev = &scif_info.mdev;
- int rc;
-
- _scif_init();
- iova_cache_get();
- rc = scif_peer_bus_init();
- if (rc)
- goto exit;
- rc = scif_register_driver(&scif_driver);
- if (rc)
- goto peer_bus_exit;
- rc = misc_register(mdev);
- if (rc)
- goto unreg_scif;
- scif_init_debugfs();
- return 0;
-unreg_scif:
- scif_unregister_driver(&scif_driver);
-peer_bus_exit:
- scif_peer_bus_exit();
-exit:
- _scif_exit();
- return rc;
-}
-
-static void __exit scif_exit(void)
-{
- scif_exit_debugfs();
- misc_deregister(&scif_info.mdev);
- scif_unregister_driver(&scif_driver);
- scif_peer_bus_exit();
- iova_cache_put();
- _scif_exit();
-}
-
-module_init(scif_init);
-module_exit(scif_exit);
-
-MODULE_DEVICE_TABLE(scif, id_table);
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) SCIF driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/scif/scif_main.h b/drivers/misc/mic/scif/scif_main.h
deleted file mode 100644
index bb3ab97d5b35..000000000000
--- a/drivers/misc/mic/scif/scif_main.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#ifndef SCIF_MAIN_H
-#define SCIF_MAIN_H
-
-#include <linux/sched/signal.h>
-#include <linux/pci.h>
-#include <linux/miscdevice.h>
-#include <linux/dmaengine.h>
-#include <linux/iova.h>
-#include <linux/anon_inodes.h>
-#include <linux/file.h>
-#include <linux/vmalloc.h>
-#include <linux/scif.h>
-#include "../common/mic_dev.h"
-
-#define SCIF_MGMT_NODE 0
-#define SCIF_DEFAULT_WATCHDOG_TO 30
-#define SCIF_NODE_ACCEPT_TIMEOUT (3 * HZ)
-#define SCIF_NODE_ALIVE_TIMEOUT (SCIF_DEFAULT_WATCHDOG_TO * HZ)
-#define SCIF_RMA_TEMP_CACHE_LIMIT 0x20000
-
-/*
- * Generic state used for certain node QP message exchanges
- * like Unregister, Alloc etc.
- */
-enum scif_msg_state {
- OP_IDLE = 1,
- OP_IN_PROGRESS,
- OP_COMPLETED,
- OP_FAILED
-};
-
-/*
- * struct scif_info - Global SCIF information
- *
- * @nodeid: Node ID this node is to others
- * @maxid: Max known node ID
- * @total: Total number of SCIF nodes
- * @nr_zombies: number of zombie endpoints
- * @eplock: Lock to synchronize listening, zombie endpoint lists
- * @connlock: Lock to synchronize connected and disconnected lists
- * @nb_connect_lock: Synchronize non blocking connect operations
- * @port_lock: Synchronize access to SCIF ports
- * @uaccept: List of user acceptreq waiting for acceptreg
- * @listen: List of listening end points
- * @zombie: List of zombie end points with pending RMA's
- * @connected: List of end points in connected state
- * @disconnected: List of end points in disconnected state
- * @nb_connect_list: List for non blocking connections
- * @misc_work: miscellaneous SCIF tasks
- * @conflock: Lock to synchronize SCIF node configuration changes
- * @en_msg_log: Enable debug message logging
- * @p2p_enable: Enable P2P SCIF network
- * @mdev: The MISC device
- * @conn_work: Work for workqueue handling all connections
- * @exitwq: Wait queue for waiting for an EXIT node QP message response
- * @loopb_dev: Dummy SCIF device used for loopback
- * @loopb_wq: Workqueue used for handling loopback messages
- * @loopb_wqname[16]: Name of loopback workqueue
- * @loopb_work: Used for submitting work to loopb_wq
- * @loopb_recv_q: List of messages received on the loopb_wq
- * @card_initiated_exit: set when the card has initiated the exit
- * @rmalock: Synchronize access to RMA operations
- * @fencelock: Synchronize access to list of remote fences requested.
- * @rma: List of temporary registered windows to be destroyed.
- * @rma_tc: List of temporary registered & cached Windows to be destroyed
- * @fence: List of remote fence requests
- * @mmu_notif_work: Work for registration caching MMU notifier workqueue
- * @mmu_notif_cleanup: List of temporary cached windows for reg cache
- * @rma_tc_limit: RMA temporary cache limit
- */
-struct scif_info {
- u8 nodeid;
- u8 maxid;
- u8 total;
- u32 nr_zombies;
- struct mutex eplock;
- struct mutex connlock;
- spinlock_t nb_connect_lock;
- spinlock_t port_lock;
- struct list_head uaccept;
- struct list_head listen;
- struct list_head zombie;
- struct list_head connected;
- struct list_head disconnected;
- struct list_head nb_connect_list;
- struct work_struct misc_work;
- struct mutex conflock;
- u8 en_msg_log;
- u8 p2p_enable;
- struct miscdevice mdev;
- struct work_struct conn_work;
- wait_queue_head_t exitwq;
- struct scif_dev *loopb_dev;
- struct workqueue_struct *loopb_wq;
- char loopb_wqname[16];
- struct work_struct loopb_work;
- struct list_head loopb_recv_q;
- bool card_initiated_exit;
- spinlock_t rmalock;
- struct mutex fencelock;
- struct list_head rma;
- struct list_head rma_tc;
- struct list_head fence;
- struct work_struct mmu_notif_work;
- struct list_head mmu_notif_cleanup;
- unsigned long rma_tc_limit;
-};
-
-/*
- * struct scif_p2p_info - SCIF mapping information used for P2P
- *
- * @ppi_peer_id - SCIF peer node id
- * @ppi_sg - Scatter list for bar information (One for mmio and one for aper)
- * @sg_nentries - Number of entries in the scatterlist
- * @ppi_da: DMA address for MMIO and APER bars
- * @ppi_len: Length of MMIO and APER bars
- * @ppi_list: Link in list of mapping information
- */
-struct scif_p2p_info {
- u8 ppi_peer_id;
- struct scatterlist *ppi_sg[2];
- u64 sg_nentries[2];
- dma_addr_t ppi_da[2];
- u64 ppi_len[2];
-#define SCIF_PPI_MMIO 0
-#define SCIF_PPI_APER 1
- struct list_head ppi_list;
-};
-
-/*
- * struct scif_dev - SCIF remote device specific fields
- *
- * @node: Node id
- * @p2p: List of P2P mapping information
- * @qpairs: The node queue pair for exchanging control messages
- * @intr_wq: Workqueue for handling Node QP messages
- * @intr_wqname: Name of node QP workqueue for handling interrupts
- * @intr_bh: Used for submitting work to intr_wq
- * @lock: Lock used for synchronizing access to the scif device
- * @sdev: SCIF hardware device on the SCIF hardware bus
- * @db: doorbell the peer will trigger to generate an interrupt on self
- * @rdb: Doorbell to trigger on the peer to generate an interrupt on the peer
- * @cookie: Cookie received while registering the interrupt handler
- * @peer_add_work: Work for handling device_add for peer devices
- * @p2p_dwork: Delayed work to enable polling for P2P state
- * @qp_dwork: Delayed work for enabling polling for remote QP information
- * @p2p_retry: Number of times to retry polling of P2P state
- * @base_addr: P2P aperture bar base address
- * @mic_mw mmio: The peer MMIO information used for P2P
- * @spdev: SCIF peer device on the SCIF peer bus
- * @node_remove_ack_pending: True if a node_remove_ack is pending
- * @exit_ack_pending: true if an exit_ack is pending
- * @disconn_wq: Used while waiting for a node remove response
- * @disconn_rescnt: Keeps track of number of node remove requests sent
- * @exit: Status of exit message
- * @qp_dma_addr: Queue pair DMA address passed to the peer
- * @dma_ch_idx: Round robin index for DMA channels
- * @signal_pool: DMA pool used for scheduling scif_fence_signal DMA's
-*/
-struct scif_dev {
- u8 node;
- struct list_head p2p;
- struct scif_qp *qpairs;
- struct workqueue_struct *intr_wq;
- char intr_wqname[16];
- struct work_struct intr_bh;
- struct mutex lock;
- struct scif_hw_dev *sdev;
- int db;
- int rdb;
- struct mic_irq *cookie;
- struct work_struct peer_add_work;
- struct delayed_work p2p_dwork;
- struct delayed_work qp_dwork;
- int p2p_retry;
- dma_addr_t base_addr;
- struct mic_mw mmio;
- struct scif_peer_dev __rcu *spdev;
- bool node_remove_ack_pending;
- bool exit_ack_pending;
- wait_queue_head_t disconn_wq;
- atomic_t disconn_rescnt;
- enum scif_msg_state exit;
- dma_addr_t qp_dma_addr;
- int dma_ch_idx;
- struct dma_pool *signal_pool;
-};
-
-extern bool scif_reg_cache_enable;
-extern bool scif_ulimit_check;
-extern struct scif_info scif_info;
-extern struct idr scif_ports;
-extern struct bus_type scif_peer_bus;
-extern struct scif_dev *scif_dev;
-extern const struct file_operations scif_fops;
-extern const struct file_operations scif_anon_fops;
-
-/* Size of the RB for the Node QP */
-#define SCIF_NODE_QP_SIZE 0x10000
-
-#include "scif_nodeqp.h"
-#include "scif_rma.h"
-#include "scif_rma_list.h"
-
-/*
- * scifdev_self:
- * @dev: The remote SCIF Device
- *
- * Returns true if the SCIF Device passed is the self aka Loopback SCIF device.
- */
-static inline int scifdev_self(struct scif_dev *dev)
-{
- return dev->node == scif_info.nodeid;
-}
-
-static inline bool scif_is_mgmt_node(void)
-{
- return !scif_info.nodeid;
-}
-
-/*
- * scifdev_is_p2p:
- * @dev: The remote SCIF Device
- *
- * Returns true if the SCIF Device is a MIC Peer to Peer SCIF device.
- */
-static inline bool scifdev_is_p2p(struct scif_dev *dev)
-{
- if (scif_is_mgmt_node())
- return false;
- else
- return dev != &scif_dev[SCIF_MGMT_NODE] &&
- !scifdev_self(dev);
-}
-
-/*
- * scifdev_alive:
- * @scifdev: The remote SCIF Device
- *
- * Returns true if the remote SCIF Device is running or sleeping for
- * this endpoint.
- */
-static inline int _scifdev_alive(struct scif_dev *scifdev)
-{
- struct scif_peer_dev *spdev;
-
- rcu_read_lock();
- spdev = rcu_dereference(scifdev->spdev);
- rcu_read_unlock();
- return !!spdev;
-}
-
-#include "scif_epd.h"
-
-void __init scif_init_debugfs(void);
-void scif_exit_debugfs(void);
-int scif_setup_intr_wq(struct scif_dev *scifdev);
-void scif_destroy_intr_wq(struct scif_dev *scifdev);
-void scif_cleanup_scifdev(struct scif_dev *dev);
-void scif_handle_remove_node(int node);
-void scif_disconnect_node(u32 node_id, bool mgmt_initiated);
-void scif_free_qp(struct scif_dev *dev);
-void scif_misc_handler(struct work_struct *work);
-void scif_stop(struct scif_dev *scifdev);
-irqreturn_t scif_intr_handler(int irq, void *data);
-#endif /* SCIF_MAIN_H */
diff --git a/drivers/misc/mic/scif/scif_map.h b/drivers/misc/mic/scif/scif_map.h
deleted file mode 100644
index 96b760819bfc..000000000000
--- a/drivers/misc/mic/scif/scif_map.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#ifndef SCIF_MAP_H
-#define SCIF_MAP_H
-
-#include "../bus/scif_bus.h"
-
-static __always_inline void *
-scif_alloc_coherent(dma_addr_t *dma_handle,
- struct scif_dev *scifdev, size_t size,
- gfp_t gfp)
-{
- void *va;
-
- if (scifdev_self(scifdev)) {
- va = kmalloc(size, gfp);
- if (va)
- *dma_handle = virt_to_phys(va);
- } else {
- va = dma_alloc_coherent(&scifdev->sdev->dev,
- size, dma_handle, gfp);
- if (va && scifdev_is_p2p(scifdev))
- *dma_handle = *dma_handle + scifdev->base_addr;
- }
- return va;
-}
-
-static __always_inline void
-scif_free_coherent(void *va, dma_addr_t local,
- struct scif_dev *scifdev, size_t size)
-{
- if (scifdev_self(scifdev)) {
- kfree(va);
- } else {
- if (scifdev_is_p2p(scifdev) && local > scifdev->base_addr)
- local = local - scifdev->base_addr;
- dma_free_coherent(&scifdev->sdev->dev,
- size, va, local);
- }
-}
-
-static __always_inline int
-scif_map_single(dma_addr_t *dma_handle,
- void *local, struct scif_dev *scifdev, size_t size)
-{
- int err = 0;
-
- if (scifdev_self(scifdev)) {
- *dma_handle = virt_to_phys((local));
- } else {
- *dma_handle = dma_map_single(&scifdev->sdev->dev,
- local, size, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&scifdev->sdev->dev, *dma_handle))
- err = -ENOMEM;
- else if (scifdev_is_p2p(scifdev))
- *dma_handle = *dma_handle + scifdev->base_addr;
- }
- if (err)
- *dma_handle = 0;
- return err;
-}
-
-static __always_inline void
-scif_unmap_single(dma_addr_t local, struct scif_dev *scifdev,
- size_t size)
-{
- if (!scifdev_self(scifdev)) {
- if (scifdev_is_p2p(scifdev))
- local = local - scifdev->base_addr;
- dma_unmap_single(&scifdev->sdev->dev, local,
- size, DMA_BIDIRECTIONAL);
- }
-}
-
-static __always_inline void *
-scif_ioremap(dma_addr_t phys, size_t size, struct scif_dev *scifdev)
-{
- void *out_virt;
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- if (scifdev_self(scifdev))
- out_virt = phys_to_virt(phys);
- else
- out_virt = (void __force *)
- sdev->hw_ops->remap(sdev, phys, size);
- return out_virt;
-}
-
-static __always_inline void
-scif_iounmap(void *virt, size_t len, struct scif_dev *scifdev)
-{
- if (!scifdev_self(scifdev)) {
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- sdev->hw_ops->unmap(sdev, (void __force __iomem *)virt);
- }
-}
-
-static __always_inline int
-scif_map_page(dma_addr_t *dma_handle, struct page *page,
- struct scif_dev *scifdev)
-{
- int err = 0;
-
- if (scifdev_self(scifdev)) {
- *dma_handle = page_to_phys(page);
- } else {
- struct scif_hw_dev *sdev = scifdev->sdev;
- *dma_handle = dma_map_page(&sdev->dev,
- page, 0x0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&sdev->dev, *dma_handle))
- err = -ENOMEM;
- else if (scifdev_is_p2p(scifdev))
- *dma_handle = *dma_handle + scifdev->base_addr;
- }
- if (err)
- *dma_handle = 0;
- return err;
-}
-#endif /* SCIF_MAP_H */
diff --git a/drivers/misc/mic/scif/scif_mmap.c b/drivers/misc/mic/scif/scif_mmap.c
deleted file mode 100644
index a151d416f39c..000000000000
--- a/drivers/misc/mic/scif/scif_mmap.c
+++ /dev/null
@@ -1,690 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-
-/*
- * struct scif_vma_info - Information about a remote memory mapping
- * created via scif_mmap(..)
- * @vma: VM area struct
- * @list: link to list of active vmas
- */
-struct scif_vma_info {
- struct vm_area_struct *vma;
- struct list_head list;
-};
-
-void scif_recv_munmap(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_rma_req req;
- struct scif_window *window = NULL;
- struct scif_window *recv_window =
- (struct scif_window *)msg->payload[0];
- struct scif_endpt *ep;
-
- ep = (struct scif_endpt *)recv_window->ep;
- req.out_window = &window;
- req.offset = recv_window->offset;
- req.prot = recv_window->prot;
- req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT;
- req.type = SCIF_WINDOW_FULL;
- req.head = &ep->rma_info.reg_list;
- msg->payload[0] = ep->remote_ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- /* Does a valid window exist? */
- if (scif_query_window(&req)) {
- dev_err(&scifdev->sdev->dev,
- "%s %d -ENXIO\n", __func__, __LINE__);
- msg->uop = SCIF_UNREGISTER_ACK;
- goto error;
- }
-
- scif_put_window(window, window->nr_pages);
-
- if (!window->ref_count) {
- atomic_inc(&ep->rma_info.tw_refcount);
- ep->rma_info.async_list_del = 1;
- list_del_init(&window->list);
- scif_free_window_offset(ep, window, window->offset);
- }
-error:
- mutex_unlock(&ep->rma_info.rma_lock);
- if (window && !window->ref_count)
- scif_queue_for_cleanup(window, &scif_info.rma);
-}
-
-/*
- * Remove valid remote memory mappings created via scif_mmap(..) from the
- * process address space since the remote node is lost
- */
-static void __scif_zap_mmaps(struct scif_endpt *ep)
-{
- struct list_head *item;
- struct scif_vma_info *info;
- struct vm_area_struct *vma;
- unsigned long size;
-
- spin_lock(&ep->lock);
- list_for_each(item, &ep->rma_info.vma_list) {
- info = list_entry(item, struct scif_vma_info, list);
- vma = info->vma;
- size = vma->vm_end - vma->vm_start;
- zap_vma_ptes(vma, vma->vm_start, size);
- dev_dbg(scif_info.mdev.this_device,
- "%s ep %p zap vma %p size 0x%lx\n",
- __func__, ep, info->vma, size);
- }
- spin_unlock(&ep->lock);
-}
-
-/*
- * Traverse the list of endpoints for a particular remote node and
- * zap valid remote memory mappings since the remote node is lost
- */
-static void _scif_zap_mmaps(int node, struct list_head *head)
-{
- struct scif_endpt *ep;
- struct list_head *item;
-
- mutex_lock(&scif_info.connlock);
- list_for_each(item, head) {
- ep = list_entry(item, struct scif_endpt, list);
- if (ep->remote_dev->node == node)
- __scif_zap_mmaps(ep);
- }
- mutex_unlock(&scif_info.connlock);
-}
-
-/*
- * Wrapper for removing remote memory mappings for a particular node. This API
- * is called by peer nodes as part of handling a lost node.
- */
-void scif_zap_mmaps(int node)
-{
- _scif_zap_mmaps(node, &scif_info.connected);
- _scif_zap_mmaps(node, &scif_info.disconnected);
-}
-
-/*
- * This API is only called while handling a lost node:
- * a) Remote node is dead.
- * b) Remote memory mappings have been zapped
- * So we can traverse the remote_reg_list without any locks. Since
- * the window has not yet been unregistered we can drop the ref count
- * and queue it to the cleanup thread.
- */
-static void __scif_cleanup_rma_for_zombies(struct scif_endpt *ep)
-{
- struct list_head *pos, *tmp;
- struct scif_window *window;
-
- list_for_each_safe(pos, tmp, &ep->rma_info.remote_reg_list) {
- window = list_entry(pos, struct scif_window, list);
- if (window->ref_count)
- scif_put_window(window, window->nr_pages);
- else
- dev_err(scif_info.mdev.this_device,
- "%s %d unexpected\n",
- __func__, __LINE__);
- if (!window->ref_count) {
- atomic_inc(&ep->rma_info.tw_refcount);
- list_del_init(&window->list);
- scif_queue_for_cleanup(window, &scif_info.rma);
- }
- }
-}
-
-/* Cleanup remote registration lists for zombie endpoints */
-void scif_cleanup_rma_for_zombies(int node)
-{
- struct scif_endpt *ep;
- struct list_head *item;
-
- mutex_lock(&scif_info.eplock);
- list_for_each(item, &scif_info.zombie) {
- ep = list_entry(item, struct scif_endpt, list);
- if (ep->remote_dev && ep->remote_dev->node == node)
- __scif_cleanup_rma_for_zombies(ep);
- }
- mutex_unlock(&scif_info.eplock);
- flush_work(&scif_info.misc_work);
-}
-
-/* Insert the VMA into the per endpoint VMA list */
-static int scif_insert_vma(struct scif_endpt *ep, struct vm_area_struct *vma)
-{
- struct scif_vma_info *info;
- int err = 0;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto done;
- }
- info->vma = vma;
- spin_lock(&ep->lock);
- list_add_tail(&info->list, &ep->rma_info.vma_list);
- spin_unlock(&ep->lock);
-done:
- return err;
-}
-
-/* Delete the VMA from the per endpoint VMA list */
-static void scif_delete_vma(struct scif_endpt *ep, struct vm_area_struct *vma)
-{
- struct list_head *item;
- struct scif_vma_info *info;
-
- spin_lock(&ep->lock);
- list_for_each(item, &ep->rma_info.vma_list) {
- info = list_entry(item, struct scif_vma_info, list);
- if (info->vma == vma) {
- list_del(&info->list);
- kfree(info);
- break;
- }
- }
- spin_unlock(&ep->lock);
-}
-
-static phys_addr_t scif_get_phys(phys_addr_t phys, struct scif_endpt *ep)
-{
- struct scif_dev *scifdev = (struct scif_dev *)ep->remote_dev;
- struct scif_hw_dev *sdev = scifdev->sdev;
- phys_addr_t out_phys, apt_base = 0;
-
- /*
- * If the DMA address is card relative then we need to add the
- * aperture base for mmap to work correctly
- */
- if (!scifdev_self(scifdev) && sdev->aper && sdev->card_rel_da)
- apt_base = sdev->aper->pa;
- out_phys = apt_base + phys;
- return out_phys;
-}
-
-int scif_get_pages(scif_epd_t epd, off_t offset, size_t len,
- struct scif_range **pages)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scif_rma_req req;
- struct scif_window *window = NULL;
- int nr_pages, err, i;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI get_pinned_pages: ep %p offset 0x%lx len 0x%lx\n",
- ep, offset, len);
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- if (!len || (offset < 0) ||
- (offset + len < offset) ||
- (ALIGN(offset, PAGE_SIZE) != offset) ||
- (ALIGN(len, PAGE_SIZE) != len))
- return -EINVAL;
-
- nr_pages = len >> PAGE_SHIFT;
-
- req.out_window = &window;
- req.offset = offset;
- req.prot = 0;
- req.nr_bytes = len;
- req.type = SCIF_WINDOW_SINGLE;
- req.head = &ep->rma_info.remote_reg_list;
-
- mutex_lock(&ep->rma_info.rma_lock);
- /* Does a valid window exist? */
- err = scif_query_window(&req);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error;
- }
-
- /* Allocate scif_range */
- *pages = kzalloc(sizeof(**pages), GFP_KERNEL);
- if (!*pages) {
- err = -ENOMEM;
- goto error;
- }
-
- /* Allocate phys addr array */
- (*pages)->phys_addr = scif_zalloc(nr_pages * sizeof(dma_addr_t));
- if (!((*pages)->phys_addr)) {
- err = -ENOMEM;
- goto error;
- }
-
- if (scif_is_mgmt_node() && !scifdev_self(ep->remote_dev)) {
- /* Allocate virtual address array */
- ((*pages)->va = scif_zalloc(nr_pages * sizeof(void *)));
- if (!(*pages)->va) {
- err = -ENOMEM;
- goto error;
- }
- }
- /* Populate the values */
- (*pages)->cookie = window;
- (*pages)->nr_pages = nr_pages;
- (*pages)->prot_flags = window->prot;
-
- for (i = 0; i < nr_pages; i++) {
- (*pages)->phys_addr[i] =
- __scif_off_to_dma_addr(window, offset +
- (i * PAGE_SIZE));
- (*pages)->phys_addr[i] = scif_get_phys((*pages)->phys_addr[i],
- ep);
- if (scif_is_mgmt_node() && !scifdev_self(ep->remote_dev))
- (*pages)->va[i] =
- ep->remote_dev->sdev->aper->va +
- (*pages)->phys_addr[i] -
- ep->remote_dev->sdev->aper->pa;
- }
-
- scif_get_window(window, nr_pages);
-error:
- mutex_unlock(&ep->rma_info.rma_lock);
- if (err) {
- if (*pages) {
- scif_free((*pages)->phys_addr,
- nr_pages * sizeof(dma_addr_t));
- scif_free((*pages)->va,
- nr_pages * sizeof(void *));
- kfree(*pages);
- *pages = NULL;
- }
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- }
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_get_pages);
-
-int scif_put_pages(struct scif_range *pages)
-{
- struct scif_endpt *ep;
- struct scif_window *window;
- struct scifmsg msg;
-
- if (!pages || !pages->cookie)
- return -EINVAL;
-
- window = pages->cookie;
-
- if (!window || window->magic != SCIFEP_MAGIC)
- return -EINVAL;
-
- ep = (struct scif_endpt *)window->ep;
- /*
- * If the state is SCIFEP_CONNECTED or SCIFEP_DISCONNECTED then the
- * callee should be allowed to release references to the pages,
- * else the endpoint was not connected in the first place,
- * hence the ENOTCONN.
- */
- if (ep->state != SCIFEP_CONNECTED && ep->state != SCIFEP_DISCONNECTED)
- return -ENOTCONN;
-
- mutex_lock(&ep->rma_info.rma_lock);
-
- scif_put_window(window, pages->nr_pages);
-
- /* Initiate window destruction if ref count is zero */
- if (!window->ref_count) {
- list_del(&window->list);
- mutex_unlock(&ep->rma_info.rma_lock);
- scif_drain_dma_intr(ep->remote_dev->sdev,
- ep->rma_info.dma_chan);
- /* Inform the peer about this window being destroyed. */
- msg.uop = SCIF_MUNMAP;
- msg.src = ep->port;
- msg.payload[0] = window->peer_window;
- /* No error handling for notification messages */
- scif_nodeqp_send(ep->remote_dev, &msg);
- /* Destroy this window from the peer's registered AS */
- scif_destroy_remote_window(window);
- } else {
- mutex_unlock(&ep->rma_info.rma_lock);
- }
-
- scif_free(pages->phys_addr, pages->nr_pages * sizeof(dma_addr_t));
- scif_free(pages->va, pages->nr_pages * sizeof(void *));
- kfree(pages);
- return 0;
-}
-EXPORT_SYMBOL_GPL(scif_put_pages);
-
-/*
- * scif_rma_list_mmap:
- *
- * Traverse the remote registration list starting from start_window:
- * 1) Create VtoP mappings via remap_pfn_range(..)
- * 2) Once step 1) and 2) complete successfully then traverse the range of
- * windows again and bump the reference count.
- * RMA lock must be held.
- */
-static int scif_rma_list_mmap(struct scif_window *start_window, s64 offset,
- int nr_pages, struct vm_area_struct *vma)
-{
- s64 end_offset, loop_offset = offset;
- struct scif_window *window = start_window;
- int loop_nr_pages, nr_pages_left = nr_pages;
- struct scif_endpt *ep = (struct scif_endpt *)start_window->ep;
- struct list_head *head = &ep->rma_info.remote_reg_list;
- int i, err = 0;
- dma_addr_t phys_addr;
- struct scif_window_iter src_win_iter;
- size_t contig_bytes = 0;
-
- might_sleep();
- list_for_each_entry_from(window, head, list) {
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- loop_nr_pages = min_t(int,
- (end_offset - loop_offset) >> PAGE_SHIFT,
- nr_pages_left);
- scif_init_window_iter(window, &src_win_iter);
- for (i = 0; i < loop_nr_pages; i++) {
- phys_addr = scif_off_to_dma_addr(window, loop_offset,
- &contig_bytes,
- &src_win_iter);
- phys_addr = scif_get_phys(phys_addr, ep);
- err = remap_pfn_range(vma,
- vma->vm_start +
- loop_offset - offset,
- phys_addr >> PAGE_SHIFT,
- PAGE_SIZE,
- vma->vm_page_prot);
- if (err)
- goto error;
- loop_offset += PAGE_SIZE;
- }
- nr_pages_left -= loop_nr_pages;
- if (!nr_pages_left)
- break;
- }
- /*
- * No more failures expected. Bump up the ref count for all
- * the windows. Another traversal from start_window required
- * for handling errors encountered across windows during
- * remap_pfn_range(..).
- */
- loop_offset = offset;
- nr_pages_left = nr_pages;
- window = start_window;
- head = &ep->rma_info.remote_reg_list;
- list_for_each_entry_from(window, head, list) {
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- loop_nr_pages = min_t(int,
- (end_offset - loop_offset) >> PAGE_SHIFT,
- nr_pages_left);
- scif_get_window(window, loop_nr_pages);
- nr_pages_left -= loop_nr_pages;
- loop_offset += (loop_nr_pages << PAGE_SHIFT);
- if (!nr_pages_left)
- break;
- }
-error:
- if (err)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-
-/*
- * scif_rma_list_munmap:
- *
- * Traverse the remote registration list starting from window:
- * 1) Decrement ref count.
- * 2) If the ref count drops to zero then send a SCIF_MUNMAP message to peer.
- * RMA lock must be held.
- */
-static void scif_rma_list_munmap(struct scif_window *start_window,
- s64 offset, int nr_pages)
-{
- struct scifmsg msg;
- s64 loop_offset = offset, end_offset;
- int loop_nr_pages, nr_pages_left = nr_pages;
- struct scif_endpt *ep = (struct scif_endpt *)start_window->ep;
- struct list_head *head = &ep->rma_info.remote_reg_list;
- struct scif_window *window = start_window, *_window;
-
- msg.uop = SCIF_MUNMAP;
- msg.src = ep->port;
- loop_offset = offset;
- nr_pages_left = nr_pages;
- list_for_each_entry_safe_from(window, _window, head, list) {
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- loop_nr_pages = min_t(int,
- (end_offset - loop_offset) >> PAGE_SHIFT,
- nr_pages_left);
- scif_put_window(window, loop_nr_pages);
- if (!window->ref_count) {
- struct scif_dev *rdev = ep->remote_dev;
-
- scif_drain_dma_intr(rdev->sdev,
- ep->rma_info.dma_chan);
- /* Inform the peer about this munmap */
- msg.payload[0] = window->peer_window;
- /* No error handling for Notification messages. */
- scif_nodeqp_send(ep->remote_dev, &msg);
- list_del(&window->list);
- /* Destroy this window from the peer's registered AS */
- scif_destroy_remote_window(window);
- }
- nr_pages_left -= loop_nr_pages;
- loop_offset += (loop_nr_pages << PAGE_SHIFT);
- if (!nr_pages_left)
- break;
- }
-}
-
-/*
- * The private data field of each VMA used to mmap a remote window
- * points to an instance of struct vma_pvt
- */
-struct vma_pvt {
- struct scif_endpt *ep; /* End point for remote window */
- s64 offset; /* offset within remote window */
- bool valid_offset; /* offset is valid only if the original
- * mmap request was for a single page
- * else the offset within the vma is
- * the correct offset
- */
- struct kref ref;
-};
-
-static void vma_pvt_release(struct kref *ref)
-{
- struct vma_pvt *vmapvt = container_of(ref, struct vma_pvt, ref);
-
- kfree(vmapvt);
-}
-
-/**
- * scif_vma_open - VMA open driver callback
- * @vma: VMM memory area.
- * The open method is called by the kernel to allow the subsystem implementing
- * the VMA to initialize the area. This method is invoked any time a new
- * reference to the VMA is made (when a process forks, for example).
- * The one exception happens when the VMA is first created by mmap;
- * in this case, the driver's mmap method is called instead.
- * This function is also invoked when an existing VMA is split by the kernel
- * due to a call to munmap on a subset of the VMA resulting in two VMAs.
- * The kernel invokes this function only on one of the two VMAs.
- */
-static void scif_vma_open(struct vm_area_struct *vma)
-{
- struct vma_pvt *vmapvt = vma->vm_private_data;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI vma open: vma_start 0x%lx vma_end 0x%lx\n",
- vma->vm_start, vma->vm_end);
- scif_insert_vma(vmapvt->ep, vma);
- kref_get(&vmapvt->ref);
-}
-
-/**
- * scif_munmap - VMA close driver callback.
- * @vma: VMM memory area.
- * When an area is destroyed, the kernel calls its close operation.
- * Note that there's no usage count associated with VMA's; the area
- * is opened and closed exactly once by each process that uses it.
- */
-static void scif_munmap(struct vm_area_struct *vma)
-{
- struct scif_endpt *ep;
- struct vma_pvt *vmapvt = vma->vm_private_data;
- int nr_pages = vma_pages(vma);
- s64 offset;
- struct scif_rma_req req;
- struct scif_window *window = NULL;
- int err;
-
- might_sleep();
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI munmap: vma_start 0x%lx vma_end 0x%lx\n",
- vma->vm_start, vma->vm_end);
- ep = vmapvt->ep;
- offset = vmapvt->valid_offset ? vmapvt->offset :
- (vma->vm_pgoff) << PAGE_SHIFT;
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI munmap: ep %p nr_pages 0x%x offset 0x%llx\n",
- ep, nr_pages, offset);
- req.out_window = &window;
- req.offset = offset;
- req.nr_bytes = vma->vm_end - vma->vm_start;
- req.prot = vma->vm_flags & (VM_READ | VM_WRITE);
- req.type = SCIF_WINDOW_PARTIAL;
- req.head = &ep->rma_info.remote_reg_list;
-
- mutex_lock(&ep->rma_info.rma_lock);
-
- err = scif_query_window(&req);
- if (err)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- else
- scif_rma_list_munmap(window, offset, nr_pages);
-
- mutex_unlock(&ep->rma_info.rma_lock);
- /*
- * The kernel probably zeroes these out but we still want
- * to clean up our own mess just in case.
- */
- vma->vm_ops = NULL;
- vma->vm_private_data = NULL;
- kref_put(&vmapvt->ref, vma_pvt_release);
- scif_delete_vma(ep, vma);
-}
-
-static const struct vm_operations_struct scif_vm_ops = {
- .open = scif_vma_open,
- .close = scif_munmap,
-};
-
-/**
- * scif_mmap - Map pages in virtual address space to a remote window.
- * @vma: VMM memory area.
- * @epd: endpoint descriptor
- *
- * Return: Upon successful completion, scif_mmap() returns zero
- * else an apt error is returned as documented in scif.h
- */
-int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd)
-{
- struct scif_rma_req req;
- struct scif_window *window = NULL;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- s64 start_offset = vma->vm_pgoff << PAGE_SHIFT;
- int nr_pages = vma_pages(vma);
- int err;
- struct vma_pvt *vmapvt;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI mmap: ep %p start_offset 0x%llx nr_pages 0x%x\n",
- ep, start_offset, nr_pages);
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- might_sleep();
-
- err = scif_insert_vma(ep, vma);
- if (err)
- return err;
-
- vmapvt = kzalloc(sizeof(*vmapvt), GFP_KERNEL);
- if (!vmapvt) {
- scif_delete_vma(ep, vma);
- return -ENOMEM;
- }
-
- vmapvt->ep = ep;
- kref_init(&vmapvt->ref);
-
- req.out_window = &window;
- req.offset = start_offset;
- req.nr_bytes = vma->vm_end - vma->vm_start;
- req.prot = vma->vm_flags & (VM_READ | VM_WRITE);
- req.type = SCIF_WINDOW_PARTIAL;
- req.head = &ep->rma_info.remote_reg_list;
-
- mutex_lock(&ep->rma_info.rma_lock);
- /* Does a valid window exist? */
- err = scif_query_window(&req);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error_unlock;
- }
-
- /* Default prot for loopback */
- if (!scifdev_self(ep->remote_dev))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- /*
- * VM_DONTCOPY - Do not copy this vma on fork
- * VM_DONTEXPAND - Cannot expand with mremap()
- * VM_RESERVED - Count as reserved_vm like IO
- * VM_PFNMAP - Page-ranges managed without "struct page"
- * VM_IO - Memory mapped I/O or similar
- *
- * We do not want to copy this VMA automatically on a fork(),
- * expand this VMA due to mremap() or swap out these pages since
- * the VMA is actually backed by physical pages in the remote
- * node's physical memory and not via a struct page.
- */
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
-
- if (!scifdev_self(ep->remote_dev))
- vma->vm_flags |= VM_IO | VM_PFNMAP;
-
- /* Map this range of windows */
- err = scif_rma_list_mmap(window, start_offset, nr_pages, vma);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error_unlock;
- }
- /* Set up the driver call back */
- vma->vm_ops = &scif_vm_ops;
- vma->vm_private_data = vmapvt;
-error_unlock:
- mutex_unlock(&ep->rma_info.rma_lock);
- if (err) {
- kfree(vmapvt);
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- scif_delete_vma(ep, vma);
- }
- return err;
-}
diff --git a/drivers/misc/mic/scif/scif_nm.c b/drivers/misc/mic/scif/scif_nm.c
deleted file mode 100644
index c4d9422082b7..000000000000
--- a/drivers/misc/mic/scif/scif_nm.c
+++ /dev/null
@@ -1,229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_peer_bus.h"
-
-#include "scif_main.h"
-#include "scif_map.h"
-
-/**
- * scif_invalidate_ep() - Set state for all connected endpoints
- * to disconnected and wake up all send/recv waitqueues
- *
- * @node: Node to invalidate
- */
-static void scif_invalidate_ep(int node)
-{
- struct scif_endpt *ep;
- struct list_head *pos, *tmpq;
-
- flush_work(&scif_info.conn_work);
- mutex_lock(&scif_info.connlock);
- list_for_each_safe(pos, tmpq, &scif_info.disconnected) {
- ep = list_entry(pos, struct scif_endpt, list);
- if (ep->remote_dev->node == node) {
- scif_unmap_all_windows(ep);
- spin_lock(&ep->lock);
- scif_cleanup_ep_qp(ep);
- spin_unlock(&ep->lock);
- }
- }
- list_for_each_safe(pos, tmpq, &scif_info.connected) {
- ep = list_entry(pos, struct scif_endpt, list);
- if (ep->remote_dev->node == node) {
- list_del(pos);
- spin_lock(&ep->lock);
- ep->state = SCIFEP_DISCONNECTED;
- list_add_tail(&ep->list, &scif_info.disconnected);
- scif_cleanup_ep_qp(ep);
- wake_up_interruptible(&ep->sendwq);
- wake_up_interruptible(&ep->recvwq);
- spin_unlock(&ep->lock);
- scif_unmap_all_windows(ep);
- }
- }
- mutex_unlock(&scif_info.connlock);
-}
-
-void scif_free_qp(struct scif_dev *scifdev)
-{
- struct scif_qp *qp = scifdev->qpairs;
-
- if (!qp)
- return;
- scif_unmap_single(qp->local_buf, scifdev, qp->inbound_q.size);
- kfree(qp->inbound_q.rb_base);
- scif_unmap_single(qp->local_qp, scifdev, sizeof(struct scif_qp));
- kfree(scifdev->qpairs);
- scifdev->qpairs = NULL;
-}
-
-static void scif_cleanup_qp(struct scif_dev *dev)
-{
- struct scif_qp *qp = &dev->qpairs[0];
-
- if (!qp)
- return;
- scif_iounmap((void *)qp->remote_qp, sizeof(struct scif_qp), dev);
- scif_iounmap((void *)qp->outbound_q.rb_base,
- sizeof(struct scif_qp), dev);
- qp->remote_qp = NULL;
- qp->local_write = 0;
- qp->inbound_q.current_write_offset = 0;
- qp->inbound_q.current_read_offset = 0;
- if (scifdev_is_p2p(dev))
- scif_free_qp(dev);
-}
-
-void scif_send_acks(struct scif_dev *dev)
-{
- struct scifmsg msg;
-
- if (dev->node_remove_ack_pending) {
- msg.uop = SCIF_NODE_REMOVE_ACK;
- msg.src.node = scif_info.nodeid;
- msg.dst.node = SCIF_MGMT_NODE;
- msg.payload[0] = dev->node;
- scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg);
- dev->node_remove_ack_pending = false;
- }
- if (dev->exit_ack_pending) {
- msg.uop = SCIF_EXIT_ACK;
- msg.src.node = scif_info.nodeid;
- msg.dst.node = dev->node;
- scif_nodeqp_send(dev, &msg);
- dev->exit_ack_pending = false;
- }
-}
-
-/**
- * scif_cleanup_scifdev - Uninitialize SCIF data structures for remote
- * SCIF device.
- * @dev: Remote SCIF device.
- */
-void scif_cleanup_scifdev(struct scif_dev *dev)
-{
- struct scif_hw_dev *sdev = dev->sdev;
-
- if (!dev->sdev)
- return;
- if (scifdev_is_p2p(dev)) {
- if (dev->cookie) {
- sdev->hw_ops->free_irq(sdev, dev->cookie, dev);
- dev->cookie = NULL;
- }
- scif_destroy_intr_wq(dev);
- }
- flush_work(&scif_info.misc_work);
- scif_destroy_p2p(dev);
- scif_invalidate_ep(dev->node);
- scif_zap_mmaps(dev->node);
- scif_cleanup_rma_for_zombies(dev->node);
- flush_work(&scif_info.misc_work);
- scif_send_acks(dev);
- if (!dev->node && scif_info.card_initiated_exit) {
- /*
- * Send an SCIF_EXIT message which is the last message from MIC
- * to the Host and wait for a SCIF_EXIT_ACK
- */
- scif_send_exit(dev);
- scif_info.card_initiated_exit = false;
- }
- scif_cleanup_qp(dev);
-}
-
-/**
- * scif_remove_node
- *
- * @node: Node to remove
- */
-void scif_handle_remove_node(int node)
-{
- struct scif_dev *scifdev = &scif_dev[node];
-
- if (scif_peer_unregister_device(scifdev))
- scif_send_acks(scifdev);
-}
-
-static int scif_send_rmnode_msg(int node, int remove_node)
-{
- struct scifmsg notif_msg;
- struct scif_dev *dev = &scif_dev[node];
-
- notif_msg.uop = SCIF_NODE_REMOVE;
- notif_msg.src.node = scif_info.nodeid;
- notif_msg.dst.node = node;
- notif_msg.payload[0] = remove_node;
- return scif_nodeqp_send(dev, &notif_msg);
-}
-
-/**
- * scif_node_disconnect
- *
- * @node_id: source node id [in]
- * @mgmt_initiated: Disconnection initiated from the mgmt node
- *
- * Disconnect a node from the scif network.
- */
-void scif_disconnect_node(u32 node_id, bool mgmt_initiated)
-{
- int ret;
- int msg_cnt = 0;
- u32 i = 0;
- struct scif_dev *scifdev = &scif_dev[node_id];
-
- if (!node_id)
- return;
-
- atomic_set(&scifdev->disconn_rescnt, 0);
-
- /* Destroy p2p network */
- for (i = 1; i <= scif_info.maxid; i++) {
- if (i == node_id)
- continue;
- ret = scif_send_rmnode_msg(i, node_id);
- if (!ret)
- msg_cnt++;
- }
- /* Wait for the remote nodes to respond with SCIF_NODE_REMOVE_ACK */
- ret = wait_event_timeout(scifdev->disconn_wq,
- (atomic_read(&scifdev->disconn_rescnt)
- == msg_cnt), SCIF_NODE_ALIVE_TIMEOUT);
- /* Tell the card to clean up */
- if (mgmt_initiated && _scifdev_alive(scifdev))
- /*
- * Send an SCIF_EXIT message which is the last message from Host
- * to the MIC and wait for a SCIF_EXIT_ACK
- */
- scif_send_exit(scifdev);
- atomic_set(&scifdev->disconn_rescnt, 0);
- /* Tell the mgmt node to clean up */
- ret = scif_send_rmnode_msg(SCIF_MGMT_NODE, node_id);
- if (!ret)
- /* Wait for mgmt node to respond with SCIF_NODE_REMOVE_ACK */
- wait_event_timeout(scifdev->disconn_wq,
- (atomic_read(&scifdev->disconn_rescnt) == 1),
- SCIF_NODE_ALIVE_TIMEOUT);
-}
-
-void scif_get_node_info(void)
-{
- struct scifmsg msg;
- DECLARE_COMPLETION_ONSTACK(node_info);
-
- msg.uop = SCIF_GET_NODE_INFO;
- msg.src.node = scif_info.nodeid;
- msg.dst.node = SCIF_MGMT_NODE;
- msg.payload[3] = (u64)&node_info;
-
- if ((scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], &msg)))
- return;
-
- /* Wait for a response with SCIF_GET_NODE_INFO */
- wait_for_completion(&node_info);
-}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
deleted file mode 100644
index e0748be373f1..000000000000
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ /dev/null
@@ -1,1349 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "../bus/scif_bus.h"
-#include "scif_peer_bus.h"
-#include "scif_main.h"
-#include "scif_nodeqp.h"
-#include "scif_map.h"
-
-/*
- ************************************************************************
- * SCIF node Queue Pair (QP) setup flow:
- *
- * 1) SCIF driver gets probed with a scif_hw_dev via the scif_hw_bus
- * 2) scif_setup_qp(..) allocates the local qp and calls
- * scif_setup_qp_connect(..) which allocates and maps the local
- * buffer for the inbound QP
- * 3) The local node updates the device page with the DMA address of the QP
- * 4) A delayed work is scheduled (qp_dwork) which periodically reads if
- * the peer node has updated its QP DMA address
- * 5) Once a valid non zero address is found in the QP DMA address field
- * in the device page, the local node maps the remote node's QP,
- * updates its outbound QP and sends a SCIF_INIT message to the peer
- * 6) The SCIF_INIT message is received by the peer node QP interrupt bottom
- * half handler by calling scif_init(..)
- * 7) scif_init(..) registers a new SCIF peer node by calling
- * scif_peer_register_device(..) which signifies the addition of a new
- * SCIF node
- * 8) On the mgmt node, P2P network setup/teardown is initiated if all the
- * remote nodes are online via scif_p2p_setup(..)
- * 9) For P2P setup, the host maps the remote nodes' aperture and memory
- * bars and sends a SCIF_NODE_ADD message to both nodes
- * 10) As part of scif_nodeadd, both nodes set up their local inbound
- * QPs and send a SCIF_NODE_ADD_ACK to the mgmt node
- * 11) As part of scif_node_add_ack(..) the mgmt node forwards the
- * SCIF_NODE_ADD_ACK to the remote nodes
- * 12) As part of scif_node_add_ack(..) the remote nodes update their
- * outbound QPs, make sure they can access memory on the remote node
- * and then add a new SCIF peer node by calling
- * scif_peer_register_device(..) which signifies the addition of a new
- * SCIF node.
- * 13) The SCIF network is now established across all nodes.
- *
- ************************************************************************
- * SCIF node QP teardown flow (initiated by non mgmt node):
- *
- * 1) SCIF driver gets a remove callback with a scif_hw_dev via the scif_hw_bus
- * 2) The device page QP DMA address field is updated with 0x0
- * 3) A non mgmt node now cleans up all local data structures and sends a
- * SCIF_EXIT message to the peer and waits for a SCIF_EXIT_ACK
- * 4) As part of scif_exit(..) handling scif_disconnect_node(..) is called
- * 5) scif_disconnect_node(..) sends a SCIF_NODE_REMOVE message to all the
- * peers and waits for a SCIF_NODE_REMOVE_ACK
- * 6) As part of scif_node_remove(..) a remote node unregisters the peer
- * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
- * 7) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
- * it sends itself a node remove message whose handling cleans up local
- * data structures and unregisters the peer node from the SCIF network
- * 8) The mgmt node sends a SCIF_EXIT_ACK
- * 9) Upon receipt of the SCIF_EXIT_ACK the node initiating the teardown
- * completes the SCIF remove routine
- * 10) The SCIF network is now torn down for the node initiating the
- * teardown sequence
- *
- ************************************************************************
- * SCIF node QP teardown flow (initiated by mgmt node):
- *
- * 1) SCIF driver gets a remove callback with a scif_hw_dev via the scif_hw_bus
- * 2) The device page QP DMA address field is updated with 0x0
- * 3) The mgmt node calls scif_disconnect_node(..)
- * 4) scif_disconnect_node(..) sends a SCIF_NODE_REMOVE message to all the peers
- * and waits for a SCIF_NODE_REMOVE_ACK
- * 5) As part of scif_node_remove(..) a remote node unregisters the peer
- * node from the SCIF network and sends a SCIF_NODE_REMOVE_ACK
- * 6) When the mgmt node has received all the SCIF_NODE_REMOVE_ACKs
- * it unregisters the peer node from the SCIF network
- * 7) The mgmt node sends a SCIF_EXIT message and waits for a SCIF_EXIT_ACK.
- * 8) A non mgmt node upon receipt of a SCIF_EXIT message calls scif_stop(..)
- * which would clean up local data structures for all SCIF nodes and
- * then send a SCIF_EXIT_ACK back to the mgmt node
- * 9) Upon receipt of the SCIF_EXIT_ACK the the mgmt node sends itself a node
- * remove message whose handling cleans up local data structures and
- * destroys any P2P mappings.
- * 10) The SCIF hardware device for which a remove callback was received is now
- * disconnected from the SCIF network.
- */
-/*
- * Initializes "local" data structures for the QP. Allocates the QP
- * ring buffer (rb) and initializes the "in bound" queue.
- */
-int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
- int local_size, struct scif_dev *scifdev)
-{
- void *local_q = qp->inbound_q.rb_base;
- int err = 0;
- u32 tmp_rd = 0;
-
- spin_lock_init(&qp->send_lock);
- spin_lock_init(&qp->recv_lock);
-
- /* Allocate rb only if not already allocated */
- if (!local_q) {
- local_q = kzalloc(local_size, GFP_KERNEL);
- if (!local_q) {
- err = -ENOMEM;
- return err;
- }
- }
-
- err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size);
- if (err)
- goto kfree;
- /*
- * To setup the inbound_q, the buffer lives locally, the read pointer
- * is remote and the write pointer is local.
- */
- scif_rb_init(&qp->inbound_q,
- &tmp_rd,
- &qp->local_write,
- local_q, get_count_order(local_size));
- /*
- * The read pointer is NULL initially and it is unsafe to use the ring
- * buffer til this changes!
- */
- qp->inbound_q.read_ptr = NULL;
- err = scif_map_single(qp_offset, qp,
- scifdev, sizeof(struct scif_qp));
- if (err)
- goto unmap;
- qp->local_qp = *qp_offset;
- return err;
-unmap:
- scif_unmap_single(qp->local_buf, scifdev, local_size);
- qp->local_buf = 0;
-kfree:
- kfree(local_q);
- return err;
-}
-
-/* When the other side has already done it's allocation, this is called */
-int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset,
- dma_addr_t phys, int local_size,
- struct scif_dev *scifdev)
-{
- void *local_q;
- void *remote_q;
- struct scif_qp *remote_qp;
- int remote_size;
- int err = 0;
-
- spin_lock_init(&qp->send_lock);
- spin_lock_init(&qp->recv_lock);
- /* Start by figuring out where we need to point */
- remote_qp = scif_ioremap(phys, sizeof(struct scif_qp), scifdev);
- if (!remote_qp)
- return -EIO;
- qp->remote_qp = remote_qp;
- if (qp->remote_qp->magic != SCIFEP_MAGIC) {
- err = -EIO;
- goto iounmap;
- }
- qp->remote_buf = remote_qp->local_buf;
- remote_size = qp->remote_qp->inbound_q.size;
- remote_q = scif_ioremap(qp->remote_buf, remote_size, scifdev);
- if (!remote_q) {
- err = -EIO;
- goto iounmap;
- }
- qp->remote_qp->local_write = 0;
- /*
- * To setup the outbound_q, the buffer lives in remote memory,
- * the read pointer is local, the write pointer is remote
- */
- scif_rb_init(&qp->outbound_q,
- &qp->local_read,
- &qp->remote_qp->local_write,
- remote_q,
- get_count_order(remote_size));
- local_q = kzalloc(local_size, GFP_KERNEL);
- if (!local_q) {
- err = -ENOMEM;
- goto iounmap_1;
- }
- err = scif_map_single(&qp->local_buf, local_q, scifdev, local_size);
- if (err)
- goto kfree;
- qp->remote_qp->local_read = 0;
- /*
- * To setup the inbound_q, the buffer lives locally, the read pointer
- * is remote and the write pointer is local
- */
- scif_rb_init(&qp->inbound_q,
- &qp->remote_qp->local_read,
- &qp->local_write,
- local_q, get_count_order(local_size));
- err = scif_map_single(qp_offset, qp, scifdev,
- sizeof(struct scif_qp));
- if (err)
- goto unmap;
- qp->local_qp = *qp_offset;
- return err;
-unmap:
- scif_unmap_single(qp->local_buf, scifdev, local_size);
- qp->local_buf = 0;
-kfree:
- kfree(local_q);
-iounmap_1:
- scif_iounmap(remote_q, remote_size, scifdev);
- qp->outbound_q.rb_base = NULL;
-iounmap:
- scif_iounmap(qp->remote_qp, sizeof(struct scif_qp), scifdev);
- qp->remote_qp = NULL;
- return err;
-}
-
-int scif_setup_qp_connect_response(struct scif_dev *scifdev,
- struct scif_qp *qp, u64 payload)
-{
- int err = 0;
- void *r_buf;
- int remote_size;
- phys_addr_t tmp_phys;
-
- qp->remote_qp = scif_ioremap(payload, sizeof(struct scif_qp), scifdev);
-
- if (!qp->remote_qp) {
- err = -ENOMEM;
- goto error;
- }
-
- if (qp->remote_qp->magic != SCIFEP_MAGIC) {
- dev_err(&scifdev->sdev->dev,
- "SCIFEP_MAGIC mismatch between self %d remote %d\n",
- scif_dev[scif_info.nodeid].node, scifdev->node);
- err = -ENODEV;
- goto error;
- }
-
- tmp_phys = qp->remote_qp->local_buf;
- remote_size = qp->remote_qp->inbound_q.size;
- r_buf = scif_ioremap(tmp_phys, remote_size, scifdev);
-
- if (!r_buf)
- return -EIO;
-
- qp->local_read = 0;
- scif_rb_init(&qp->outbound_q,
- &qp->local_read,
- &qp->remote_qp->local_write,
- r_buf,
- get_count_order(remote_size));
- /*
- * Because the node QP may already be processing an INIT message, set
- * the read pointer so the cached read offset isn't lost
- */
- qp->remote_qp->local_read = qp->inbound_q.current_read_offset;
- /*
- * resetup the inbound_q now that we know where the
- * inbound_read really is.
- */
- scif_rb_init(&qp->inbound_q,
- &qp->remote_qp->local_read,
- &qp->local_write,
- qp->inbound_q.rb_base,
- get_count_order(qp->inbound_q.size));
-error:
- return err;
-}
-
-static __always_inline void
-scif_send_msg_intr(struct scif_dev *scifdev)
-{
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- if (scifdev_is_p2p(scifdev))
- sdev->hw_ops->send_p2p_intr(sdev, scifdev->rdb, &scifdev->mmio);
- else
- sdev->hw_ops->send_intr(sdev, scifdev->rdb);
-}
-
-int scif_qp_response(phys_addr_t phys, struct scif_dev *scifdev)
-{
- int err = 0;
- struct scifmsg msg;
-
- err = scif_setup_qp_connect_response(scifdev, scifdev->qpairs, phys);
- if (!err) {
- /*
- * Now that everything is setup and mapped, we're ready
- * to tell the peer about our queue's location
- */
- msg.uop = SCIF_INIT;
- msg.dst.node = scifdev->node;
- err = scif_nodeqp_send(scifdev, &msg);
- }
- return err;
-}
-
-void scif_send_exit(struct scif_dev *scifdev)
-{
- struct scifmsg msg;
- int ret;
-
- scifdev->exit = OP_IN_PROGRESS;
- msg.uop = SCIF_EXIT;
- msg.src.node = scif_info.nodeid;
- msg.dst.node = scifdev->node;
- ret = scif_nodeqp_send(scifdev, &msg);
- if (ret)
- goto done;
- /* Wait for a SCIF_EXIT_ACK message */
- wait_event_timeout(scif_info.exitwq, scifdev->exit == OP_COMPLETED,
- SCIF_NODE_ALIVE_TIMEOUT);
-done:
- scifdev->exit = OP_IDLE;
-}
-
-int scif_setup_qp(struct scif_dev *scifdev)
-{
- int err = 0;
- int local_size;
- struct scif_qp *qp;
-
- local_size = SCIF_NODE_QP_SIZE;
-
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- err = -ENOMEM;
- return err;
- }
- qp->magic = SCIFEP_MAGIC;
- scifdev->qpairs = qp;
- err = scif_setup_qp_connect(qp, &scifdev->qp_dma_addr,
- local_size, scifdev);
- if (err)
- goto free_qp;
- /*
- * We're as setup as we can be. The inbound_q is setup, w/o a usable
- * outbound q. When we get a message, the read_ptr will be updated,
- * and we will pull the message.
- */
- return err;
-free_qp:
- kfree(scifdev->qpairs);
- scifdev->qpairs = NULL;
- return err;
-}
-
-static void scif_p2p_freesg(struct scatterlist *sg)
-{
- kfree(sg);
-}
-
-static struct scatterlist *
-scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt)
-{
- struct scatterlist *sg;
- struct page *page;
- int i;
-
- sg = kcalloc(page_cnt, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- return NULL;
- sg_init_table(sg, page_cnt);
- for (i = 0; i < page_cnt; i++) {
- page = pfn_to_page(pa >> PAGE_SHIFT);
- sg_set_page(&sg[i], page, page_size, 0);
- pa += page_size;
- }
- return sg;
-}
-
-/* Init p2p mappings required to access peerdev from scifdev */
-static struct scif_p2p_info *
-scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
-{
- struct scif_p2p_info *p2p;
- int num_mmio_pages, num_aper_pages, sg_page_shift, err, num_aper_chunks;
- struct scif_hw_dev *psdev = peerdev->sdev;
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- num_mmio_pages = psdev->mmio->len >> PAGE_SHIFT;
- num_aper_pages = psdev->aper->len >> PAGE_SHIFT;
-
- p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
- if (!p2p)
- return NULL;
- p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa,
- PAGE_SIZE, num_mmio_pages);
- if (!p2p->ppi_sg[SCIF_PPI_MMIO])
- goto free_p2p;
- p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
- sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
- num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
- p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa,
- 1 << sg_page_shift,
- num_aper_chunks);
- p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
- err = dma_map_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
- num_mmio_pages, PCI_DMA_BIDIRECTIONAL);
- if (err != num_mmio_pages)
- goto scif_p2p_free;
- err = dma_map_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
- num_aper_chunks, PCI_DMA_BIDIRECTIONAL);
- if (err != num_aper_chunks)
- goto dma_unmap;
- p2p->ppi_da[SCIF_PPI_MMIO] = sg_dma_address(p2p->ppi_sg[SCIF_PPI_MMIO]);
- p2p->ppi_da[SCIF_PPI_APER] = sg_dma_address(p2p->ppi_sg[SCIF_PPI_APER]);
- p2p->ppi_len[SCIF_PPI_MMIO] = num_mmio_pages;
- p2p->ppi_len[SCIF_PPI_APER] = num_aper_pages;
- p2p->ppi_peer_id = peerdev->node;
- return p2p;
-dma_unmap:
- dma_unmap_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
- p2p->sg_nentries[SCIF_PPI_MMIO], DMA_BIDIRECTIONAL);
-scif_p2p_free:
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
-free_p2p:
- kfree(p2p);
- return NULL;
-}
-
-/* Uninitialize and release resources from a p2p mapping */
-static void scif_deinit_p2p_info(struct scif_dev *scifdev,
- struct scif_p2p_info *p2p)
-{
- struct scif_hw_dev *sdev = scifdev->sdev;
-
- dma_unmap_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
- p2p->sg_nentries[SCIF_PPI_MMIO], DMA_BIDIRECTIONAL);
- dma_unmap_sg(&sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
- p2p->sg_nentries[SCIF_PPI_APER], DMA_BIDIRECTIONAL);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
- kfree(p2p);
-}
-
-/**
- * scif_node_connect: Respond to SCIF_NODE_CONNECT interrupt message
- * @scifdev: SCIF device
- * @dst: Destination node
- *
- * Connect the src and dst node by setting up the p2p connection
- * between them. Management node here acts like a proxy.
- */
-static void scif_node_connect(struct scif_dev *scifdev, int dst)
-{
- struct scif_dev *dev_j = scifdev;
- struct scif_dev *dev_i = NULL;
- struct scif_p2p_info *p2p_ij = NULL; /* bus addr for j from i */
- struct scif_p2p_info *p2p_ji = NULL; /* bus addr for i from j */
- struct scif_p2p_info *p2p;
- struct list_head *pos, *tmp;
- struct scifmsg msg;
- int err;
- u64 tmppayload;
-
- if (dst < 1 || dst > scif_info.maxid)
- return;
-
- dev_i = &scif_dev[dst];
-
- if (!_scifdev_alive(dev_i))
- return;
- /*
- * If the p2p connection is already setup or in the process of setting
- * up then just ignore this request. The requested node will get
- * informed by SCIF_NODE_ADD_ACK or SCIF_NODE_ADD_NACK
- */
- if (!list_empty(&dev_i->p2p)) {
- list_for_each_safe(pos, tmp, &dev_i->p2p) {
- p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
- if (p2p->ppi_peer_id == dev_j->node)
- return;
- }
- }
- p2p_ij = scif_init_p2p_info(dev_i, dev_j);
- if (!p2p_ij)
- return;
- p2p_ji = scif_init_p2p_info(dev_j, dev_i);
- if (!p2p_ji) {
- scif_deinit_p2p_info(dev_i, p2p_ij);
- return;
- }
- list_add_tail(&p2p_ij->ppi_list, &dev_i->p2p);
- list_add_tail(&p2p_ji->ppi_list, &dev_j->p2p);
-
- /*
- * Send a SCIF_NODE_ADD to dev_i, pass it its bus address
- * as seen from dev_j
- */
- msg.uop = SCIF_NODE_ADD;
- msg.src.node = dev_j->node;
- msg.dst.node = dev_i->node;
-
- msg.payload[0] = p2p_ji->ppi_da[SCIF_PPI_APER];
- msg.payload[1] = p2p_ij->ppi_da[SCIF_PPI_MMIO];
- msg.payload[2] = p2p_ij->ppi_da[SCIF_PPI_APER];
- msg.payload[3] = p2p_ij->ppi_len[SCIF_PPI_APER] << PAGE_SHIFT;
-
- err = scif_nodeqp_send(dev_i, &msg);
- if (err) {
- dev_err(&scifdev->sdev->dev,
- "%s %d error %d\n", __func__, __LINE__, err);
- return;
- }
-
- /* Same as above but to dev_j */
- msg.uop = SCIF_NODE_ADD;
- msg.src.node = dev_i->node;
- msg.dst.node = dev_j->node;
-
- tmppayload = msg.payload[0];
- msg.payload[0] = msg.payload[2];
- msg.payload[2] = tmppayload;
- msg.payload[1] = p2p_ji->ppi_da[SCIF_PPI_MMIO];
- msg.payload[3] = p2p_ji->ppi_len[SCIF_PPI_APER] << PAGE_SHIFT;
-
- scif_nodeqp_send(dev_j, &msg);
-}
-
-static void scif_p2p_setup(void)
-{
- int i, j;
-
- if (!scif_info.p2p_enable)
- return;
-
- for (i = 1; i <= scif_info.maxid; i++)
- if (!_scifdev_alive(&scif_dev[i]))
- return;
-
- for (i = 1; i <= scif_info.maxid; i++) {
- for (j = 1; j <= scif_info.maxid; j++) {
- struct scif_dev *scifdev = &scif_dev[i];
-
- if (i == j)
- continue;
- scif_node_connect(scifdev, j);
- }
- }
-}
-
-static char *message_types[] = {"BAD",
- "INIT",
- "EXIT",
- "SCIF_EXIT_ACK",
- "SCIF_NODE_ADD",
- "SCIF_NODE_ADD_ACK",
- "SCIF_NODE_ADD_NACK",
- "REMOVE_NODE",
- "REMOVE_NODE_ACK",
- "CNCT_REQ",
- "CNCT_GNT",
- "CNCT_GNTACK",
- "CNCT_GNTNACK",
- "CNCT_REJ",
- "DISCNCT",
- "DISCNT_ACK",
- "CLIENT_SENT",
- "CLIENT_RCVD",
- "SCIF_GET_NODE_INFO",
- "REGISTER",
- "REGISTER_ACK",
- "REGISTER_NACK",
- "UNREGISTER",
- "UNREGISTER_ACK",
- "UNREGISTER_NACK",
- "ALLOC_REQ",
- "ALLOC_GNT",
- "ALLOC_REJ",
- "FREE_PHYS",
- "FREE_VIRT",
- "MUNMAP",
- "MARK",
- "MARK_ACK",
- "MARK_NACK",
- "WAIT",
- "WAIT_ACK",
- "WAIT_NACK",
- "SIGNAL_LOCAL",
- "SIGNAL_REMOTE",
- "SIG_ACK",
- "SIG_NACK"};
-
-static void
-scif_display_message(struct scif_dev *scifdev, struct scifmsg *msg,
- const char *label)
-{
- if (!scif_info.en_msg_log)
- return;
- if (msg->uop > SCIF_MAX_MSG) {
- dev_err(&scifdev->sdev->dev,
- "%s: unknown msg type %d\n", label, msg->uop);
- return;
- }
- dev_info(&scifdev->sdev->dev,
- "%s: msg type %s, src %d:%d, dest %d:%d payload 0x%llx:0x%llx:0x%llx:0x%llx\n",
- label, message_types[msg->uop], msg->src.node, msg->src.port,
- msg->dst.node, msg->dst.port, msg->payload[0], msg->payload[1],
- msg->payload[2], msg->payload[3]);
-}
-
-int _scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_qp *qp = scifdev->qpairs;
- int err = -ENOMEM, loop_cnt = 0;
-
- scif_display_message(scifdev, msg, "Sent");
- if (!qp) {
- err = -EINVAL;
- goto error;
- }
- spin_lock(&qp->send_lock);
-
- while ((err = scif_rb_write(&qp->outbound_q,
- msg, sizeof(struct scifmsg)))) {
- mdelay(1);
-#define SCIF_NODEQP_SEND_TO_MSEC (3 * 1000)
- if (loop_cnt++ > (SCIF_NODEQP_SEND_TO_MSEC)) {
- err = -ENODEV;
- break;
- }
- }
- if (!err)
- scif_rb_commit(&qp->outbound_q);
- spin_unlock(&qp->send_lock);
- if (!err) {
- if (scifdev_self(scifdev))
- /*
- * For loopback we need to emulate an interrupt by
- * queuing work for the queue handling real node
- * Qp interrupts.
- */
- queue_work(scifdev->intr_wq, &scifdev->intr_bh);
- else
- scif_send_msg_intr(scifdev);
- }
-error:
- if (err)
- dev_dbg(&scifdev->sdev->dev,
- "%s %d error %d uop %d\n",
- __func__, __LINE__, err, msg->uop);
- return err;
-}
-
-/**
- * scif_nodeqp_send - Send a message on the node queue pair
- * @scifdev: Scif Device.
- * @msg: The message to be sent.
- */
-int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- int err;
- struct device *spdev = NULL;
-
- if (msg->uop > SCIF_EXIT_ACK) {
- /* Don't send messages once the exit flow has begun */
- if (OP_IDLE != scifdev->exit)
- return -ENODEV;
- spdev = scif_get_peer_dev(scifdev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- return err;
- }
- }
- err = _scif_nodeqp_send(scifdev, msg);
- if (msg->uop > SCIF_EXIT_ACK)
- scif_put_peer_dev(spdev);
- return err;
-}
-
-/*
- * scif_misc_handler:
- *
- * Work queue handler for servicing miscellaneous SCIF tasks.
- * Examples include:
- * 1) Remote fence requests.
- * 2) Destruction of temporary registered windows
- * created during scif_vreadfrom()/scif_vwriteto().
- * 3) Cleanup of zombie endpoints.
- */
-void scif_misc_handler(struct work_struct *work)
-{
- scif_rma_handle_remote_fences();
- scif_rma_destroy_windows();
- scif_rma_destroy_tcw_invalid();
- scif_cleanup_zombie_epd();
-}
-
-/**
- * scif_init() - Respond to SCIF_INIT interrupt message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- */
-static __always_inline void
-scif_init(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- /*
- * Allow the thread waiting for device page updates for the peer QP DMA
- * address to complete initializing the inbound_q.
- */
- flush_delayed_work(&scifdev->qp_dwork);
-
- scif_peer_register_device(scifdev);
-
- if (scif_is_mgmt_node()) {
- mutex_lock(&scif_info.conflock);
- scif_p2p_setup();
- mutex_unlock(&scif_info.conflock);
- }
-}
-
-/**
- * scif_exit() - Respond to SCIF_EXIT interrupt message
- * @scifdev: Remote SCIF device node
- * @unused: Interrupt message (unused)
- *
- * This function stops the SCIF interface for the node which sent
- * the SCIF_EXIT message and starts waiting for that node to
- * resetup the queue pair again.
- */
-static __always_inline void
-scif_exit(struct scif_dev *scifdev, struct scifmsg *unused)
-{
- scifdev->exit_ack_pending = true;
- if (scif_is_mgmt_node())
- scif_disconnect_node(scifdev->node, false);
- else
- scif_stop(scifdev);
- schedule_delayed_work(&scifdev->qp_dwork,
- msecs_to_jiffies(1000));
-}
-
-/**
- * scif_exitack() - Respond to SCIF_EXIT_ACK interrupt message
- * @scifdev: Remote SCIF device node
- * @unused: Interrupt message (unused)
- *
- */
-static __always_inline void
-scif_exit_ack(struct scif_dev *scifdev, struct scifmsg *unused)
-{
- scifdev->exit = OP_COMPLETED;
- wake_up(&scif_info.exitwq);
-}
-
-/**
- * scif_node_add() - Respond to SCIF_NODE_ADD interrupt message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * When the mgmt node driver has finished initializing a MIC node queue pair it
- * marks the node as online. It then looks for all currently online MIC cards
- * and send a SCIF_NODE_ADD message to identify the ID of the new card for
- * peer to peer initialization
- *
- * The local node allocates its incoming queue and sends its address in the
- * SCIF_NODE_ADD_ACK message back to the mgmt node, the mgmt node "reflects"
- * this message to the new node
- */
-static __always_inline void
-scif_node_add(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_dev *newdev;
- dma_addr_t qp_offset;
- int qp_connect;
- struct scif_hw_dev *sdev;
-
- dev_dbg(&scifdev->sdev->dev,
- "Scifdev %d:%d received NODE_ADD msg for node %d\n",
- scifdev->node, msg->dst.node, msg->src.node);
- dev_dbg(&scifdev->sdev->dev,
- "Remote address for this node's aperture %llx\n",
- msg->payload[0]);
- newdev = &scif_dev[msg->src.node];
- newdev->node = msg->src.node;
- newdev->sdev = scif_dev[SCIF_MGMT_NODE].sdev;
- sdev = newdev->sdev;
-
- if (scif_setup_intr_wq(newdev)) {
- dev_err(&scifdev->sdev->dev,
- "failed to setup interrupts for %d\n", msg->src.node);
- goto interrupt_setup_error;
- }
- newdev->mmio.va = ioremap(msg->payload[1], sdev->mmio->len);
- if (!newdev->mmio.va) {
- dev_err(&scifdev->sdev->dev,
- "failed to map mmio for %d\n", msg->src.node);
- goto mmio_map_error;
- }
- newdev->qpairs = kzalloc(sizeof(*newdev->qpairs), GFP_KERNEL);
- if (!newdev->qpairs)
- goto qp_alloc_error;
- /*
- * Set the base address of the remote node's memory since it gets
- * added to qp_offset
- */
- newdev->base_addr = msg->payload[0];
-
- qp_connect = scif_setup_qp_connect(newdev->qpairs, &qp_offset,
- SCIF_NODE_QP_SIZE, newdev);
- if (qp_connect) {
- dev_err(&scifdev->sdev->dev,
- "failed to setup qp_connect %d\n", qp_connect);
- goto qp_connect_error;
- }
-
- newdev->db = sdev->hw_ops->next_db(sdev);
- newdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
- "SCIF_INTR", newdev,
- newdev->db);
- if (IS_ERR(newdev->cookie))
- goto qp_connect_error;
- newdev->qpairs->magic = SCIFEP_MAGIC;
- newdev->qpairs->qp_state = SCIF_QP_OFFLINE;
-
- msg->uop = SCIF_NODE_ADD_ACK;
- msg->dst.node = msg->src.node;
- msg->src.node = scif_info.nodeid;
- msg->payload[0] = qp_offset;
- msg->payload[2] = newdev->db;
- scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], msg);
- return;
-qp_connect_error:
- kfree(newdev->qpairs);
- newdev->qpairs = NULL;
-qp_alloc_error:
- iounmap(newdev->mmio.va);
- newdev->mmio.va = NULL;
-mmio_map_error:
-interrupt_setup_error:
- dev_err(&scifdev->sdev->dev,
- "node add failed for node %d\n", msg->src.node);
- msg->uop = SCIF_NODE_ADD_NACK;
- msg->dst.node = msg->src.node;
- msg->src.node = scif_info.nodeid;
- scif_nodeqp_send(&scif_dev[SCIF_MGMT_NODE], msg);
-}
-
-void scif_poll_qp_state(struct work_struct *work)
-{
-#define SCIF_NODE_QP_RETRY 100
-#define SCIF_NODE_QP_TIMEOUT 100
- struct scif_dev *peerdev = container_of(work, struct scif_dev,
- p2p_dwork.work);
- struct scif_qp *qp = &peerdev->qpairs[0];
-
- if (qp->qp_state != SCIF_QP_ONLINE ||
- qp->remote_qp->qp_state != SCIF_QP_ONLINE) {
- if (peerdev->p2p_retry++ == SCIF_NODE_QP_RETRY) {
- dev_err(&peerdev->sdev->dev,
- "Warning: QP check timeout with state %d\n",
- qp->qp_state);
- goto timeout;
- }
- schedule_delayed_work(&peerdev->p2p_dwork,
- msecs_to_jiffies(SCIF_NODE_QP_TIMEOUT));
- return;
- }
- return;
-timeout:
- dev_err(&peerdev->sdev->dev,
- "%s %d remote node %d offline, state = 0x%x\n",
- __func__, __LINE__, peerdev->node, qp->qp_state);
- qp->remote_qp->qp_state = SCIF_QP_OFFLINE;
- scif_peer_unregister_device(peerdev);
- scif_cleanup_scifdev(peerdev);
-}
-
-/**
- * scif_node_add_ack() - Respond to SCIF_NODE_ADD_ACK interrupt message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * After a MIC node receives the SCIF_NODE_ADD_ACK message it send this
- * message to the mgmt node to confirm the sequence is finished.
- *
- */
-static __always_inline void
-scif_node_add_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_dev *peerdev;
- struct scif_qp *qp;
- struct scif_dev *dst_dev = &scif_dev[msg->dst.node];
-
- dev_dbg(&scifdev->sdev->dev,
- "Scifdev %d received SCIF_NODE_ADD_ACK msg src %d dst %d\n",
- scifdev->node, msg->src.node, msg->dst.node);
- dev_dbg(&scifdev->sdev->dev,
- "payload %llx %llx %llx %llx\n", msg->payload[0],
- msg->payload[1], msg->payload[2], msg->payload[3]);
- if (scif_is_mgmt_node()) {
- /*
- * the lock serializes with scif_qp_response_ack. The mgmt node
- * is forwarding the NODE_ADD_ACK message from src to dst we
- * need to make sure that the dst has already received a
- * NODE_ADD for src and setup its end of the qp to dst
- */
- mutex_lock(&scif_info.conflock);
- msg->payload[1] = scif_info.maxid;
- scif_nodeqp_send(dst_dev, msg);
- mutex_unlock(&scif_info.conflock);
- return;
- }
- peerdev = &scif_dev[msg->src.node];
- peerdev->sdev = scif_dev[SCIF_MGMT_NODE].sdev;
- peerdev->node = msg->src.node;
-
- qp = &peerdev->qpairs[0];
-
- if ((scif_setup_qp_connect_response(peerdev, &peerdev->qpairs[0],
- msg->payload[0])))
- goto local_error;
- peerdev->rdb = msg->payload[2];
- qp->remote_qp->qp_state = SCIF_QP_ONLINE;
-
- scif_peer_register_device(peerdev);
-
- schedule_delayed_work(&peerdev->p2p_dwork, 0);
- return;
-local_error:
- scif_cleanup_scifdev(peerdev);
-}
-
-/**
- * scif_node_add_nack: Respond to SCIF_NODE_ADD_NACK interrupt message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * SCIF_NODE_ADD failed, so inform the waiting wq.
- */
-static __always_inline void
-scif_node_add_nack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- if (scif_is_mgmt_node()) {
- struct scif_dev *dst_dev = &scif_dev[msg->dst.node];
-
- dev_dbg(&scifdev->sdev->dev,
- "SCIF_NODE_ADD_NACK received from %d\n", scifdev->node);
- scif_nodeqp_send(dst_dev, msg);
- }
-}
-
-/**
- * scif_node_remove: Handle SCIF_NODE_REMOVE message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * Handle node removal.
- */
-static __always_inline void
-scif_node_remove(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- int node = msg->payload[0];
- struct scif_dev *scdev = &scif_dev[node];
-
- scdev->node_remove_ack_pending = true;
- scif_handle_remove_node(node);
-}
-
-/**
- * scif_node_remove_ack: Handle SCIF_NODE_REMOVE_ACK message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * The peer has acked a SCIF_NODE_REMOVE message.
- */
-static __always_inline void
-scif_node_remove_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_dev *sdev = &scif_dev[msg->payload[0]];
-
- atomic_inc(&sdev->disconn_rescnt);
- wake_up(&sdev->disconn_wq);
-}
-
-/**
- * scif_get_node_info: Respond to SCIF_GET_NODE_INFO interrupt message
- * @scifdev: Remote SCIF device node
- * @msg: Interrupt message
- *
- * Retrieve node info i.e maxid and total from the mgmt node.
- */
-static __always_inline void
-scif_get_node_info_resp(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- if (scif_is_mgmt_node()) {
- swap(msg->dst.node, msg->src.node);
- mutex_lock(&scif_info.conflock);
- msg->payload[1] = scif_info.maxid;
- msg->payload[2] = scif_info.total;
- mutex_unlock(&scif_info.conflock);
- scif_nodeqp_send(scifdev, msg);
- } else {
- struct completion *node_info =
- (struct completion *)msg->payload[3];
-
- mutex_lock(&scif_info.conflock);
- scif_info.maxid = msg->payload[1];
- scif_info.total = msg->payload[2];
- complete_all(node_info);
- mutex_unlock(&scif_info.conflock);
- }
-}
-
-static void
-scif_msg_unknown(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- /* Bogus Node Qp Message? */
- dev_err(&scifdev->sdev->dev,
- "Unknown message 0x%xn scifdev->node 0x%x\n",
- msg->uop, scifdev->node);
-}
-
-static void (*scif_intr_func[SCIF_MAX_MSG + 1])
- (struct scif_dev *, struct scifmsg *msg) = {
- scif_msg_unknown, /* Error */
- scif_init, /* SCIF_INIT */
- scif_exit, /* SCIF_EXIT */
- scif_exit_ack, /* SCIF_EXIT_ACK */
- scif_node_add, /* SCIF_NODE_ADD */
- scif_node_add_ack, /* SCIF_NODE_ADD_ACK */
- scif_node_add_nack, /* SCIF_NODE_ADD_NACK */
- scif_node_remove, /* SCIF_NODE_REMOVE */
- scif_node_remove_ack, /* SCIF_NODE_REMOVE_ACK */
- scif_cnctreq, /* SCIF_CNCT_REQ */
- scif_cnctgnt, /* SCIF_CNCT_GNT */
- scif_cnctgnt_ack, /* SCIF_CNCT_GNTACK */
- scif_cnctgnt_nack, /* SCIF_CNCT_GNTNACK */
- scif_cnctrej, /* SCIF_CNCT_REJ */
- scif_discnct, /* SCIF_DISCNCT */
- scif_discnt_ack, /* SCIF_DISCNT_ACK */
- scif_clientsend, /* SCIF_CLIENT_SENT */
- scif_clientrcvd, /* SCIF_CLIENT_RCVD */
- scif_get_node_info_resp,/* SCIF_GET_NODE_INFO */
- scif_recv_reg, /* SCIF_REGISTER */
- scif_recv_reg_ack, /* SCIF_REGISTER_ACK */
- scif_recv_reg_nack, /* SCIF_REGISTER_NACK */
- scif_recv_unreg, /* SCIF_UNREGISTER */
- scif_recv_unreg_ack, /* SCIF_UNREGISTER_ACK */
- scif_recv_unreg_nack, /* SCIF_UNREGISTER_NACK */
- scif_alloc_req, /* SCIF_ALLOC_REQ */
- scif_alloc_gnt_rej, /* SCIF_ALLOC_GNT */
- scif_alloc_gnt_rej, /* SCIF_ALLOC_REJ */
- scif_free_virt, /* SCIF_FREE_VIRT */
- scif_recv_munmap, /* SCIF_MUNMAP */
- scif_recv_mark, /* SCIF_MARK */
- scif_recv_mark_resp, /* SCIF_MARK_ACK */
- scif_recv_mark_resp, /* SCIF_MARK_NACK */
- scif_recv_wait, /* SCIF_WAIT */
- scif_recv_wait_resp, /* SCIF_WAIT_ACK */
- scif_recv_wait_resp, /* SCIF_WAIT_NACK */
- scif_recv_sig_local, /* SCIF_SIG_LOCAL */
- scif_recv_sig_remote, /* SCIF_SIG_REMOTE */
- scif_recv_sig_resp, /* SCIF_SIG_ACK */
- scif_recv_sig_resp, /* SCIF_SIG_NACK */
-};
-
-static int scif_max_msg_id = SCIF_MAX_MSG;
-/**
- * scif_nodeqp_msg_handler() - Common handler for node messages
- * @scifdev: Remote device to respond to
- * @qp: Remote memory pointer
- * @msg: The message to be handled.
- *
- * This routine calls the appropriate routine to handle a Node Qp
- * message receipt
- */
-static void
-scif_nodeqp_msg_handler(struct scif_dev *scifdev,
- struct scif_qp *qp, struct scifmsg *msg)
-{
- scif_display_message(scifdev, msg, "Rcvd");
-
- if (msg->uop > (u32)scif_max_msg_id) {
- /* Bogus Node Qp Message? */
- dev_err(&scifdev->sdev->dev,
- "Unknown message 0x%xn scifdev->node 0x%x\n",
- msg->uop, scifdev->node);
- return;
- }
-
- scif_intr_func[msg->uop](scifdev, msg);
-}
-
-/**
- * scif_nodeqp_intrhandler() - Interrupt handler for node messages
- * @scifdev: Remote device to respond to
- * @qp: Remote memory pointer
- *
- * This routine is triggered by the interrupt mechanism. It reads
- * messages from the node queue RB and calls the Node QP Message handling
- * routine.
- */
-void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp)
-{
- struct scifmsg msg;
- int read_size;
-
- do {
- read_size = scif_rb_get_next(&qp->inbound_q, &msg, sizeof(msg));
- if (!read_size)
- break;
- scif_nodeqp_msg_handler(scifdev, qp, &msg);
- /*
- * The node queue pair is unmapped so skip the read pointer
- * update after receipt of a SCIF_EXIT_ACK
- */
- if (SCIF_EXIT_ACK == msg.uop)
- break;
- scif_rb_update_read_ptr(&qp->inbound_q);
- } while (1);
-}
-
-/**
- * scif_loopb_wq_handler - Loopback Workqueue Handler.
- * @unused: loop back work (unused)
- *
- * This work queue routine is invoked by the loopback work queue handler.
- * It grabs the recv lock, dequeues any available messages from the head
- * of the loopback message list, calls the node QP message handler,
- * waits for it to return, then frees up this message and dequeues more
- * elements of the list if available.
- */
-static void scif_loopb_wq_handler(struct work_struct *unused)
-{
- struct scif_dev *scifdev = scif_info.loopb_dev;
- struct scif_qp *qp = scifdev->qpairs;
- struct scif_loopb_msg *msg;
-
- do {
- msg = NULL;
- spin_lock(&qp->recv_lock);
- if (!list_empty(&scif_info.loopb_recv_q)) {
- msg = list_first_entry(&scif_info.loopb_recv_q,
- struct scif_loopb_msg,
- list);
- list_del(&msg->list);
- }
- spin_unlock(&qp->recv_lock);
-
- if (msg) {
- scif_nodeqp_msg_handler(scifdev, qp, &msg->msg);
- kfree(msg);
- }
- } while (msg);
-}
-
-/**
- * scif_loopb_msg_handler() - Workqueue handler for loopback messages.
- * @scifdev: SCIF device
- * @qp: Queue pair.
- *
- * This work queue routine is triggered when a loopback message is received.
- *
- * We need special handling for receiving Node Qp messages on a loopback SCIF
- * device via two workqueues for receiving messages.
- *
- * The reason we need the extra workqueue which is not required with *normal*
- * non-loopback SCIF devices is the potential classic deadlock described below:
- *
- * Thread A tries to send a message on a loopback SCIF device and blocks since
- * there is no space in the RB while it has the send_lock held or another
- * lock called lock X for example.
- *
- * Thread B: The Loopback Node QP message receive workqueue receives the message
- * and tries to send a message (eg an ACK) to the loopback SCIF device. It tries
- * to grab the send lock again or lock X and deadlocks with Thread A. The RB
- * cannot be drained any further due to this classic deadlock.
- *
- * In order to avoid deadlocks as mentioned above we have an extra level of
- * indirection achieved by having two workqueues.
- * 1) The first workqueue whose handler is scif_loopb_msg_handler reads
- * messages from the Node QP RB, adds them to a list and queues work for the
- * second workqueue.
- *
- * 2) The second workqueue whose handler is scif_loopb_wq_handler dequeues
- * messages from the list, handles them, frees up the memory and dequeues
- * more elements from the list if possible.
- */
-int
-scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp)
-{
- int read_size;
- struct scif_loopb_msg *msg;
-
- do {
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
- read_size = scif_rb_get_next(&qp->inbound_q, &msg->msg,
- sizeof(struct scifmsg));
- if (read_size != sizeof(struct scifmsg)) {
- kfree(msg);
- scif_rb_update_read_ptr(&qp->inbound_q);
- break;
- }
- spin_lock(&qp->recv_lock);
- list_add_tail(&msg->list, &scif_info.loopb_recv_q);
- spin_unlock(&qp->recv_lock);
- queue_work(scif_info.loopb_wq, &scif_info.loopb_work);
- scif_rb_update_read_ptr(&qp->inbound_q);
- } while (read_size == sizeof(struct scifmsg));
- return read_size;
-}
-
-/**
- * scif_setup_loopback_qp - One time setup work for Loopback Node Qp.
- * @scifdev: SCIF device
- *
- * Sets up the required loopback workqueues, queue pairs and ring buffers
- */
-int scif_setup_loopback_qp(struct scif_dev *scifdev)
-{
- int err = 0;
- void *local_q;
- struct scif_qp *qp;
-
- err = scif_setup_intr_wq(scifdev);
- if (err)
- goto exit;
- INIT_LIST_HEAD(&scif_info.loopb_recv_q);
- snprintf(scif_info.loopb_wqname, sizeof(scif_info.loopb_wqname),
- "SCIF LOOPB %d", scifdev->node);
- scif_info.loopb_wq =
- alloc_ordered_workqueue(scif_info.loopb_wqname, 0);
- if (!scif_info.loopb_wq) {
- err = -ENOMEM;
- goto destroy_intr;
- }
- INIT_WORK(&scif_info.loopb_work, scif_loopb_wq_handler);
- /* Allocate Self Qpair */
- scifdev->qpairs = kzalloc(sizeof(*scifdev->qpairs), GFP_KERNEL);
- if (!scifdev->qpairs) {
- err = -ENOMEM;
- goto destroy_loopb_wq;
- }
-
- qp = scifdev->qpairs;
- qp->magic = SCIFEP_MAGIC;
- spin_lock_init(&qp->send_lock);
- spin_lock_init(&qp->recv_lock);
-
- local_q = kzalloc(SCIF_NODE_QP_SIZE, GFP_KERNEL);
- if (!local_q) {
- err = -ENOMEM;
- goto free_qpairs;
- }
- /*
- * For loopback the inbound_q and outbound_q are essentially the same
- * since the Node sends a message on the loopback interface to the
- * outbound_q which is then received on the inbound_q.
- */
- scif_rb_init(&qp->outbound_q,
- &qp->local_read,
- &qp->local_write,
- local_q, get_count_order(SCIF_NODE_QP_SIZE));
-
- scif_rb_init(&qp->inbound_q,
- &qp->local_read,
- &qp->local_write,
- local_q, get_count_order(SCIF_NODE_QP_SIZE));
- scif_info.nodeid = scifdev->node;
-
- scif_peer_register_device(scifdev);
-
- scif_info.loopb_dev = scifdev;
- return err;
-free_qpairs:
- kfree(scifdev->qpairs);
-destroy_loopb_wq:
- destroy_workqueue(scif_info.loopb_wq);
-destroy_intr:
- scif_destroy_intr_wq(scifdev);
-exit:
- return err;
-}
-
-/**
- * scif_destroy_loopback_qp - One time uninit work for Loopback Node Qp
- * @scifdev: SCIF device
- *
- * Destroys the workqueues and frees up the Ring Buffer and Queue Pair memory.
- */
-int scif_destroy_loopback_qp(struct scif_dev *scifdev)
-{
- scif_peer_unregister_device(scifdev);
- destroy_workqueue(scif_info.loopb_wq);
- scif_destroy_intr_wq(scifdev);
- kfree(scifdev->qpairs->outbound_q.rb_base);
- kfree(scifdev->qpairs);
- scifdev->sdev = NULL;
- scif_info.loopb_dev = NULL;
- return 0;
-}
-
-void scif_destroy_p2p(struct scif_dev *scifdev)
-{
- struct scif_dev *peer_dev;
- struct scif_p2p_info *p2p;
- struct list_head *pos, *tmp;
- int bd;
-
- mutex_lock(&scif_info.conflock);
- /* Free P2P mappings in the given node for all its peer nodes */
- list_for_each_safe(pos, tmp, &scifdev->p2p) {
- p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
- dma_unmap_sg(&scifdev->sdev->dev, p2p->ppi_sg[SCIF_PPI_MMIO],
- p2p->sg_nentries[SCIF_PPI_MMIO],
- DMA_BIDIRECTIONAL);
- dma_unmap_sg(&scifdev->sdev->dev, p2p->ppi_sg[SCIF_PPI_APER],
- p2p->sg_nentries[SCIF_PPI_APER],
- DMA_BIDIRECTIONAL);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
- list_del(pos);
- kfree(p2p);
- }
-
- /* Free P2P mapping created in the peer nodes for the given node */
- for (bd = SCIF_MGMT_NODE + 1; bd <= scif_info.maxid; bd++) {
- peer_dev = &scif_dev[bd];
- list_for_each_safe(pos, tmp, &peer_dev->p2p) {
- p2p = list_entry(pos, struct scif_p2p_info, ppi_list);
- if (p2p->ppi_peer_id == scifdev->node) {
- dma_unmap_sg(&peer_dev->sdev->dev,
- p2p->ppi_sg[SCIF_PPI_MMIO],
- p2p->sg_nentries[SCIF_PPI_MMIO],
- DMA_BIDIRECTIONAL);
- dma_unmap_sg(&peer_dev->sdev->dev,
- p2p->ppi_sg[SCIF_PPI_APER],
- p2p->sg_nentries[SCIF_PPI_APER],
- DMA_BIDIRECTIONAL);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_MMIO]);
- scif_p2p_freesg(p2p->ppi_sg[SCIF_PPI_APER]);
- list_del(pos);
- kfree(p2p);
- }
- }
- }
- mutex_unlock(&scif_info.conflock);
-}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.h b/drivers/misc/mic/scif/scif_nodeqp.h
deleted file mode 100644
index 95896273138e..000000000000
--- a/drivers/misc/mic/scif/scif_nodeqp.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel SCIF driver.
- *
- */
-#ifndef SCIF_NODEQP
-#define SCIF_NODEQP
-
-#include "scif_rb.h"
-#include "scif_peer_bus.h"
-
-#define SCIF_INIT 1 /* First message sent to the peer node for discovery */
-#define SCIF_EXIT 2 /* Last message from the peer informing intent to exit */
-#define SCIF_EXIT_ACK 3 /* Response to SCIF_EXIT message */
-#define SCIF_NODE_ADD 4 /* Tell Online nodes a new node exits */
-#define SCIF_NODE_ADD_ACK 5 /* Confirm to mgmt node sequence is finished */
-#define SCIF_NODE_ADD_NACK 6 /* SCIF_NODE_ADD failed */
-#define SCIF_NODE_REMOVE 7 /* Request to deactivate a SCIF node */
-#define SCIF_NODE_REMOVE_ACK 8 /* Response to a SCIF_NODE_REMOVE message */
-#define SCIF_CNCT_REQ 9 /* Phys addr of Request connection to a port */
-#define SCIF_CNCT_GNT 10 /* Phys addr of new Grant connection request */
-#define SCIF_CNCT_GNTACK 11 /* Error type Reject a connection request */
-#define SCIF_CNCT_GNTNACK 12 /* Error type Reject a connection request */
-#define SCIF_CNCT_REJ 13 /* Error type Reject a connection request */
-#define SCIF_DISCNCT 14 /* Notify peer that connection is being terminated */
-#define SCIF_DISCNT_ACK 15 /* Notify peer that connection is being terminated */
-#define SCIF_CLIENT_SENT 16 /* Notify the peer that data has been written */
-#define SCIF_CLIENT_RCVD 17 /* Notify the peer that data has been read */
-#define SCIF_GET_NODE_INFO 18 /* Get current node mask from the mgmt node*/
-#define SCIF_REGISTER 19 /* Tell peer about a new registered window */
-#define SCIF_REGISTER_ACK 20 /* Notify peer about unregistration success */
-#define SCIF_REGISTER_NACK 21 /* Notify peer about registration success */
-#define SCIF_UNREGISTER 22 /* Tell peer about unregistering a window */
-#define SCIF_UNREGISTER_ACK 23 /* Notify peer about registration failure */
-#define SCIF_UNREGISTER_NACK 24 /* Notify peer about unregistration failure */
-#define SCIF_ALLOC_REQ 25 /* Request a mapped buffer */
-#define SCIF_ALLOC_GNT 26 /* Notify peer about allocation success */
-#define SCIF_ALLOC_REJ 27 /* Notify peer about allocation failure */
-#define SCIF_FREE_VIRT 28 /* Free previously allocated virtual memory */
-#define SCIF_MUNMAP 29 /* Acknowledgment for a SCIF_MMAP request */
-#define SCIF_MARK 30 /* SCIF Remote Fence Mark Request */
-#define SCIF_MARK_ACK 31 /* SCIF Remote Fence Mark Success */
-#define SCIF_MARK_NACK 32 /* SCIF Remote Fence Mark Failure */
-#define SCIF_WAIT 33 /* SCIF Remote Fence Wait Request */
-#define SCIF_WAIT_ACK 34 /* SCIF Remote Fence Wait Success */
-#define SCIF_WAIT_NACK 35 /* SCIF Remote Fence Wait Failure */
-#define SCIF_SIG_LOCAL 36 /* SCIF Remote Fence Local Signal Request */
-#define SCIF_SIG_REMOTE 37 /* SCIF Remote Fence Remote Signal Request */
-#define SCIF_SIG_ACK 38 /* SCIF Remote Fence Remote Signal Success */
-#define SCIF_SIG_NACK 39 /* SCIF Remote Fence Remote Signal Failure */
-#define SCIF_MAX_MSG SCIF_SIG_NACK
-
-/*
- * struct scifmsg - Node QP message format
- *
- * @src: Source information
- * @dst: Destination information
- * @uop: The message opcode
- * @payload: Unique payload format for each message
- */
-struct scifmsg {
- struct scif_port_id src;
- struct scif_port_id dst;
- u32 uop;
- u64 payload[4];
-} __packed;
-
-/*
- * struct scif_allocmsg - Used with SCIF_ALLOC_REQ to request
- * the remote note to allocate memory
- *
- * phys_addr: Physical address of the buffer
- * vaddr: Virtual address of the buffer
- * size: Size of the buffer
- * state: Current state
- * allocwq: wait queue for status
- */
-struct scif_allocmsg {
- dma_addr_t phys_addr;
- unsigned long vaddr;
- size_t size;
- enum scif_msg_state state;
- wait_queue_head_t allocwq;
-};
-
-/*
- * struct scif_qp - Node Queue Pair
- *
- * Interesting structure -- a little difficult because we can only
- * write across the PCIe, so any r/w pointer we need to read is
- * local. We only need to read the read pointer on the inbound_q
- * and read the write pointer in the outbound_q
- *
- * @magic: Magic value to ensure the peer sees the QP correctly
- * @outbound_q: The outbound ring buffer for sending messages
- * @inbound_q: The inbound ring buffer for receiving messages
- * @local_write: Local write index
- * @local_read: Local read index
- * @remote_qp: The remote queue pair
- * @local_buf: DMA address of local ring buffer
- * @local_qp: DMA address of the local queue pair data structure
- * @remote_buf: DMA address of remote ring buffer
- * @qp_state: QP state i.e. online or offline used for P2P
- * @send_lock: synchronize access to outbound queue
- * @recv_lock: Synchronize access to inbound queue
- */
-struct scif_qp {
- u64 magic;
-#define SCIFEP_MAGIC 0x5c1f000000005c1fULL
- struct scif_rb outbound_q;
- struct scif_rb inbound_q;
-
- u32 local_write __aligned(64);
- u32 local_read __aligned(64);
- struct scif_qp *remote_qp;
- dma_addr_t local_buf;
- dma_addr_t local_qp;
- dma_addr_t remote_buf;
- u32 qp_state;
-#define SCIF_QP_OFFLINE 0xdead
-#define SCIF_QP_ONLINE 0xc0de
- spinlock_t send_lock;
- spinlock_t recv_lock;
-};
-
-/*
- * struct scif_loopb_msg - An element in the loopback Node QP message list.
- *
- * @msg - The SCIF node QP message
- * @list - link in the list of messages
- */
-struct scif_loopb_msg {
- struct scifmsg msg;
- struct list_head list;
-};
-
-int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg);
-int _scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_nodeqp_intrhandler(struct scif_dev *scifdev, struct scif_qp *qp);
-int scif_loopb_msg_handler(struct scif_dev *scifdev, struct scif_qp *qp);
-int scif_setup_qp(struct scif_dev *scifdev);
-int scif_qp_response(phys_addr_t phys, struct scif_dev *dev);
-int scif_setup_qp_connect(struct scif_qp *qp, dma_addr_t *qp_offset,
- int local_size, struct scif_dev *scifdev);
-int scif_setup_qp_accept(struct scif_qp *qp, dma_addr_t *qp_offset,
- dma_addr_t phys, int local_size,
- struct scif_dev *scifdev);
-int scif_setup_qp_connect_response(struct scif_dev *scifdev,
- struct scif_qp *qp, u64 payload);
-int scif_setup_loopback_qp(struct scif_dev *scifdev);
-int scif_destroy_loopback_qp(struct scif_dev *scifdev);
-void scif_poll_qp_state(struct work_struct *work);
-void scif_destroy_p2p(struct scif_dev *scifdev);
-void scif_send_exit(struct scif_dev *scifdev);
-static inline struct device *scif_get_peer_dev(struct scif_dev *scifdev)
-{
- struct scif_peer_dev *spdev;
- struct device *spdev_ret;
-
- rcu_read_lock();
- spdev = rcu_dereference(scifdev->spdev);
- if (spdev)
- spdev_ret = get_device(&spdev->dev);
- else
- spdev_ret = ERR_PTR(-ENODEV);
- rcu_read_unlock();
- return spdev_ret;
-}
-
-static inline void scif_put_peer_dev(struct device *dev)
-{
- put_device(dev);
-}
-#endif /* SCIF_NODEQP */
diff --git a/drivers/misc/mic/scif/scif_peer_bus.c b/drivers/misc/mic/scif/scif_peer_bus.c
deleted file mode 100644
index 6d608308bb60..000000000000
--- a/drivers/misc/mic/scif/scif_peer_bus.c
+++ /dev/null
@@ -1,175 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-#include "../bus/scif_bus.h"
-#include "scif_peer_bus.h"
-
-static inline struct scif_peer_dev *
-dev_to_scif_peer(struct device *dev)
-{
- return container_of(dev, struct scif_peer_dev, dev);
-}
-
-struct bus_type scif_peer_bus = {
- .name = "scif_peer_bus",
-};
-
-static void scif_peer_release_dev(struct device *d)
-{
- struct scif_peer_dev *sdev = dev_to_scif_peer(d);
- struct scif_dev *scifdev = &scif_dev[sdev->dnode];
-
- scif_cleanup_scifdev(scifdev);
- kfree(sdev);
-}
-
-static int scif_peer_initialize_device(struct scif_dev *scifdev)
-{
- struct scif_peer_dev *spdev;
- int ret;
-
- spdev = kzalloc(sizeof(*spdev), GFP_KERNEL);
- if (!spdev) {
- ret = -ENOMEM;
- goto err;
- }
-
- spdev->dev.parent = scifdev->sdev->dev.parent;
- spdev->dev.release = scif_peer_release_dev;
- spdev->dnode = scifdev->node;
- spdev->dev.bus = &scif_peer_bus;
- dev_set_name(&spdev->dev, "scif_peer-dev%u", spdev->dnode);
-
- device_initialize(&spdev->dev);
- get_device(&spdev->dev);
- rcu_assign_pointer(scifdev->spdev, spdev);
-
- mutex_lock(&scif_info.conflock);
- scif_info.total++;
- scif_info.maxid = max_t(u32, spdev->dnode, scif_info.maxid);
- mutex_unlock(&scif_info.conflock);
- return 0;
-err:
- dev_err(&scifdev->sdev->dev,
- "dnode %d: initialize_device rc %d\n", scifdev->node, ret);
- return ret;
-}
-
-static int scif_peer_add_device(struct scif_dev *scifdev)
-{
- struct scif_peer_dev *spdev = rcu_dereference(scifdev->spdev);
- char pool_name[16];
- int ret;
-
- ret = device_add(&spdev->dev);
- put_device(&spdev->dev);
- if (ret) {
- dev_err(&scifdev->sdev->dev,
- "dnode %d: peer device_add failed\n", scifdev->node);
- goto put_spdev;
- }
-
- scnprintf(pool_name, sizeof(pool_name), "scif-%d", spdev->dnode);
- scifdev->signal_pool = dmam_pool_create(pool_name, &scifdev->sdev->dev,
- sizeof(struct scif_status), 1,
- 0);
- if (!scifdev->signal_pool) {
- dev_err(&scifdev->sdev->dev,
- "dnode %d: dmam_pool_create failed\n", scifdev->node);
- ret = -ENOMEM;
- goto del_spdev;
- }
- dev_dbg(&spdev->dev, "Added peer dnode %d\n", spdev->dnode);
- return 0;
-del_spdev:
- device_del(&spdev->dev);
-put_spdev:
- RCU_INIT_POINTER(scifdev->spdev, NULL);
- synchronize_rcu();
- put_device(&spdev->dev);
-
- mutex_lock(&scif_info.conflock);
- scif_info.total--;
- mutex_unlock(&scif_info.conflock);
- return ret;
-}
-
-void scif_add_peer_device(struct work_struct *work)
-{
- struct scif_dev *scifdev = container_of(work, struct scif_dev,
- peer_add_work);
-
- scif_peer_add_device(scifdev);
-}
-
-/*
- * Peer device registration is split into a device_initialize and a device_add.
- * The reason for doing this is as follows: First, peer device registration
- * itself cannot be done in the message processing thread and must be delegated
- * to another workqueue, otherwise if SCIF client probe, called during peer
- * device registration, calls scif_connect(..), it will block the message
- * processing thread causing a deadlock. Next, device_initialize is done in the
- * "top-half" message processing thread and device_add in the "bottom-half"
- * workqueue. If this is not done, SCIF_CNCT_REQ message processing executing
- * concurrently with SCIF_INIT message processing is unable to get a reference
- * on the peer device, thereby failing the connect request.
- */
-void scif_peer_register_device(struct scif_dev *scifdev)
-{
- int ret;
-
- mutex_lock(&scifdev->lock);
- ret = scif_peer_initialize_device(scifdev);
- if (ret)
- goto exit;
- schedule_work(&scifdev->peer_add_work);
-exit:
- mutex_unlock(&scifdev->lock);
-}
-
-int scif_peer_unregister_device(struct scif_dev *scifdev)
-{
- struct scif_peer_dev *spdev;
-
- mutex_lock(&scifdev->lock);
- /* Flush work to ensure device register is complete */
- flush_work(&scifdev->peer_add_work);
-
- /*
- * Continue holding scifdev->lock since theoretically unregister_device
- * can be called simultaneously from multiple threads
- */
- spdev = rcu_dereference(scifdev->spdev);
- if (!spdev) {
- mutex_unlock(&scifdev->lock);
- return -ENODEV;
- }
-
- RCU_INIT_POINTER(scifdev->spdev, NULL);
- synchronize_rcu();
- mutex_unlock(&scifdev->lock);
-
- dev_dbg(&spdev->dev, "Removing peer dnode %d\n", spdev->dnode);
- device_unregister(&spdev->dev);
-
- mutex_lock(&scif_info.conflock);
- scif_info.total--;
- mutex_unlock(&scif_info.conflock);
- return 0;
-}
-
-int scif_peer_bus_init(void)
-{
- return bus_register(&scif_peer_bus);
-}
-
-void scif_peer_bus_exit(void)
-{
- bus_unregister(&scif_peer_bus);
-}
diff --git a/drivers/misc/mic/scif/scif_peer_bus.h b/drivers/misc/mic/scif/scif_peer_bus.h
deleted file mode 100644
index 2ea4c51c18c1..000000000000
--- a/drivers/misc/mic/scif/scif_peer_bus.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#ifndef _SCIF_PEER_BUS_H_
-#define _SCIF_PEER_BUS_H_
-
-#include <linux/device.h>
-#include <linux/mic_common.h>
-#include <linux/scif.h>
-
-struct scif_dev;
-
-void scif_add_peer_device(struct work_struct *work);
-void scif_peer_register_device(struct scif_dev *sdev);
-int scif_peer_unregister_device(struct scif_dev *scifdev);
-int scif_peer_bus_init(void);
-void scif_peer_bus_exit(void);
-#endif /* _SCIF_PEER_BUS_H */
diff --git a/drivers/misc/mic/scif/scif_ports.c b/drivers/misc/mic/scif/scif_ports.c
deleted file mode 100644
index 4bdb5ef9a139..000000000000
--- a/drivers/misc/mic/scif/scif_ports.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/idr.h>
-
-#include "scif_main.h"
-
-#define SCIF_PORT_COUNT 0x10000 /* Ports available */
-
-struct idr scif_ports;
-
-/**
- * struct scif_port - SCIF port information
- *
- * @ref_cnt: Reference count since there can be multiple endpoints
- * created via scif_accept(..) simultaneously using a port.
- */
-struct scif_port {
- int ref_cnt;
-};
-
-/**
- * __scif_get_port - Reserve a specified port # for SCIF and add it
- * to the global list.
- * @start: lowest port # to be reserved (inclusive).
- * @end: highest port # to be reserved (exclusive).
- *
- * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
- * On memory allocation failure, returns -ENOMEM.
- */
-static int __scif_get_port(int start, int end)
-{
- int id;
- struct scif_port *port = kzalloc(sizeof(*port), GFP_ATOMIC);
-
- if (!port)
- return -ENOMEM;
- spin_lock(&scif_info.port_lock);
- id = idr_alloc(&scif_ports, port, start, end, GFP_ATOMIC);
- if (id >= 0)
- port->ref_cnt++;
- spin_unlock(&scif_info.port_lock);
- return id;
-}
-
-/**
- * scif_rsrv_port - Reserve a specified port # for SCIF.
- * @port : port # to be reserved.
- *
- * @return : Allocated SCIF port #, or -ENOSPC if port unavailable.
- * On memory allocation failure, returns -ENOMEM.
- */
-int scif_rsrv_port(u16 port)
-{
- return __scif_get_port(port, port + 1);
-}
-
-/**
- * scif_get_new_port - Get and reserve any port # for SCIF in the range
- * SCIF_PORT_RSVD + 1 to SCIF_PORT_COUNT - 1.
- *
- * @return : Allocated SCIF port #, or -ENOSPC if no ports available.
- * On memory allocation failure, returns -ENOMEM.
- */
-int scif_get_new_port(void)
-{
- return __scif_get_port(SCIF_PORT_RSVD + 1, SCIF_PORT_COUNT);
-}
-
-/**
- * scif_get_port - Increment the reference count for a SCIF port
- * @id : SCIF port
- *
- * @return : None
- */
-void scif_get_port(u16 id)
-{
- struct scif_port *port;
-
- if (!id)
- return;
- spin_lock(&scif_info.port_lock);
- port = idr_find(&scif_ports, id);
- if (port)
- port->ref_cnt++;
- spin_unlock(&scif_info.port_lock);
-}
-
-/**
- * scif_put_port - Release a reserved SCIF port
- * @id : SCIF port to be released.
- *
- * @return : None
- */
-void scif_put_port(u16 id)
-{
- struct scif_port *port;
-
- if (!id)
- return;
- spin_lock(&scif_info.port_lock);
- port = idr_find(&scif_ports, id);
- if (port) {
- port->ref_cnt--;
- if (!port->ref_cnt) {
- idr_remove(&scif_ports, id);
- kfree(port);
- }
- }
- spin_unlock(&scif_info.port_lock);
-}
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c
deleted file mode 100644
index e425882ae06d..000000000000
--- a/drivers/misc/mic/scif/scif_rb.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/circ_buf.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/errno.h>
-
-#include "scif_rb.h"
-
-#define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size)
-#define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size)
-
-/**
- * scif_rb_init - Initializes the ring buffer
- * @rb: ring buffer
- * @read_ptr: A pointer to the read offset
- * @write_ptr: A pointer to the write offset
- * @rb_base: A pointer to the base of the ring buffer
- * @size: The size of the ring buffer in powers of two
- */
-void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
- void *rb_base, u8 size)
-{
- rb->rb_base = rb_base;
- rb->size = (1 << size);
- rb->read_ptr = read_ptr;
- rb->write_ptr = write_ptr;
- rb->current_read_offset = *read_ptr;
- rb->current_write_offset = *write_ptr;
-}
-
-/* Copies a message to the ring buffer -- handles the wrap around case */
-static void memcpy_torb(struct scif_rb *rb, void *header,
- void *msg, u32 size)
-{
- u32 size1, size2;
-
- if (header + size >= rb->rb_base + rb->size) {
- /* Need to call two copies if it wraps around */
- size1 = (u32)(rb->rb_base + rb->size - header);
- size2 = size - size1;
- memcpy_toio((void __iomem __force *)header, msg, size1);
- memcpy_toio((void __iomem __force *)rb->rb_base,
- msg + size1, size2);
- } else {
- memcpy_toio((void __iomem __force *)header, msg, size);
- }
-}
-
-/* Copies a message from the ring buffer -- handles the wrap around case */
-static void memcpy_fromrb(struct scif_rb *rb, void *header,
- void *msg, u32 size)
-{
- u32 size1, size2;
-
- if (header + size >= rb->rb_base + rb->size) {
- /* Need to call two copies if it wraps around */
- size1 = (u32)(rb->rb_base + rb->size - header);
- size2 = size - size1;
- memcpy_fromio(msg, (void __iomem __force *)header, size1);
- memcpy_fromio(msg + size1,
- (void __iomem __force *)rb->rb_base, size2);
- } else {
- memcpy_fromio(msg, (void __iomem __force *)header, size);
- }
-}
-
-/**
- * scif_rb_space - Query space available for writing to the RB
- * @rb: ring buffer
- *
- * Return: size available for writing to RB in bytes.
- */
-u32 scif_rb_space(struct scif_rb *rb)
-{
- rb->current_read_offset = *rb->read_ptr;
- /*
- * Update from the HW read pointer only once the peer has exposed the
- * new empty slot. This barrier is paired with the memory barrier
- * scif_rb_update_read_ptr()
- */
- mb();
- return scif_rb_ring_space(rb->current_write_offset,
- rb->current_read_offset, rb->size);
-}
-
-/**
- * scif_rb_write - Write a message to the RB
- * @rb: ring buffer
- * @msg: buffer to send the message. Must be at least size bytes long
- * @size: the size (in bytes) to be copied to the RB
- *
- * This API does not block if there isn't enough space in the RB.
- * Returns: 0 on success or -ENOMEM on failure
- */
-int scif_rb_write(struct scif_rb *rb, void *msg, u32 size)
-{
- void *header;
-
- if (scif_rb_space(rb) < size)
- return -ENOMEM;
- header = rb->rb_base + rb->current_write_offset;
- memcpy_torb(rb, header, msg, size);
- /*
- * Wait until scif_rb_commit(). Update the local ring
- * buffer data, not the shared data until commit.
- */
- rb->current_write_offset =
- (rb->current_write_offset + size) & (rb->size - 1);
- return 0;
-}
-
-/**
- * scif_rb_commit - To submit the message to let the peer fetch it
- * @rb: ring buffer
- */
-void scif_rb_commit(struct scif_rb *rb)
-{
- /*
- * We must ensure ordering between the all the data committed
- * previously before we expose the new message to the peer by
- * updating the write_ptr. This write barrier is paired with
- * the read barrier in scif_rb_count(..)
- */
- wmb();
- WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
-#ifdef CONFIG_INTEL_MIC_CARD
- /*
- * X100 Si bug: For the case where a Core is performing an EXT_WR
- * followed by a Doorbell Write, the Core must perform two EXT_WR to the
- * same address with the same data before it does the Doorbell Write.
- * This way, if ordering is violated for the Interrupt Message, it will
- * fall just behind the first Posted associated with the first EXT_WR.
- */
- WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
-#endif
-}
-
-/**
- * scif_rb_get - To get next message from the ring buffer
- * @rb: ring buffer
- * @size: Number of bytes to be read
- *
- * Return: NULL if no bytes to be read from the ring buffer, otherwise the
- * pointer to the next byte
- */
-static void *scif_rb_get(struct scif_rb *rb, u32 size)
-{
- void *header = NULL;
-
- if (scif_rb_count(rb, size) >= size)
- header = rb->rb_base + rb->current_read_offset;
- return header;
-}
-
-/*
- * scif_rb_get_next - Read from ring buffer.
- * @rb: ring buffer
- * @msg: buffer to hold the message. Must be at least size bytes long
- * @size: Number of bytes to be read
- *
- * Return: number of bytes read if available bytes are >= size, otherwise
- * returns zero.
- */
-u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size)
-{
- void *header = NULL;
- int read_size = 0;
-
- header = scif_rb_get(rb, size);
- if (header) {
- u32 next_cmd_offset =
- (rb->current_read_offset + size) & (rb->size - 1);
-
- read_size = size;
- rb->current_read_offset = next_cmd_offset;
- memcpy_fromrb(rb, header, msg, size);
- }
- return read_size;
-}
-
-/**
- * scif_rb_update_read_ptr
- * @rb: ring buffer
- */
-void scif_rb_update_read_ptr(struct scif_rb *rb)
-{
- u32 new_offset;
-
- new_offset = rb->current_read_offset;
- /*
- * We must ensure ordering between the all the data committed or read
- * previously before we expose the empty slot to the peer by updating
- * the read_ptr. This barrier is paired with the memory barrier in
- * scif_rb_space(..)
- */
- mb();
- WRITE_ONCE(*rb->read_ptr, new_offset);
-#ifdef CONFIG_INTEL_MIC_CARD
- /*
- * X100 Si Bug: For the case where a Core is performing an EXT_WR
- * followed by a Doorbell Write, the Core must perform two EXT_WR to the
- * same address with the same data before it does the Doorbell Write.
- * This way, if ordering is violated for the Interrupt Message, it will
- * fall just behind the first Posted associated with the first EXT_WR.
- */
- WRITE_ONCE(*rb->read_ptr, new_offset);
-#endif
-}
-
-/**
- * scif_rb_count
- * @rb: ring buffer
- * @size: Number of bytes expected to be read
- *
- * Return: number of bytes that can be read from the RB
- */
-u32 scif_rb_count(struct scif_rb *rb, u32 size)
-{
- if (scif_rb_ring_cnt(rb->current_write_offset,
- rb->current_read_offset,
- rb->size) < size) {
- rb->current_write_offset = *rb->write_ptr;
- /*
- * Update from the HW write pointer if empty only once the peer
- * has exposed the new message. This read barrier is paired
- * with the write barrier in scif_rb_commit(..)
- */
- smp_rmb();
- }
- return scif_rb_ring_cnt(rb->current_write_offset,
- rb->current_read_offset,
- rb->size);
-}
diff --git a/drivers/misc/mic/scif/scif_rb.h b/drivers/misc/mic/scif/scif_rb.h
deleted file mode 100644
index 166dffe3093d..000000000000
--- a/drivers/misc/mic/scif/scif_rb.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel SCIF driver.
- */
-#ifndef SCIF_RB_H
-#define SCIF_RB_H
-/*
- * This file describes a general purpose, byte based ring buffer. Writers to the
- * ring buffer need to synchronize using a lock. The same is true for readers,
- * although in practice, the ring buffer has a single reader. It is lockless
- * between producer and consumer so it can handle being used across the PCIe
- * bus. The ring buffer ensures that there are no reads across the PCIe bus for
- * performance reasons. Two of these are used to form a single bidirectional
- * queue-pair across PCIe.
- */
-/*
- * struct scif_rb - SCIF Ring Buffer
- *
- * @rb_base: The base of the memory used for storing RB messages
- * @read_ptr: Pointer to the read offset
- * @write_ptr: Pointer to the write offset
- * @size: Size of the memory in rb_base
- * @current_read_offset: Cached read offset for performance
- * @current_write_offset: Cached write offset for performance
- */
-struct scif_rb {
- void *rb_base;
- u32 *read_ptr;
- u32 *write_ptr;
- u32 size;
- u32 current_read_offset;
- u32 current_write_offset;
-};
-
-/* methods used by both */
-void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
- void *rb_base, u8 size);
-/* writer only methods */
-/* write a new command, then scif_rb_commit() */
-int scif_rb_write(struct scif_rb *rb, void *msg, u32 size);
-/* after write(), then scif_rb_commit() */
-void scif_rb_commit(struct scif_rb *rb);
-/* query space available for writing to a RB. */
-u32 scif_rb_space(struct scif_rb *rb);
-
-/* reader only methods */
-/* read a new message from the ring buffer of size bytes */
-u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size);
-/* update the read pointer so that the space can be reused */
-void scif_rb_update_read_ptr(struct scif_rb *rb);
-/* count the number of bytes that can be read */
-u32 scif_rb_count(struct scif_rb *rb, u32 size);
-#endif
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
deleted file mode 100644
index 2da3b474f486..000000000000
--- a/drivers/misc/mic/scif/scif_rma.c
+++ /dev/null
@@ -1,1760 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include <linux/intel-iommu.h>
-#include <linux/pagemap.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/signal.h>
-
-#include "scif_main.h"
-#include "scif_map.h"
-
-/* Used to skip ulimit checks for registrations with SCIF_MAP_KERNEL flag */
-#define SCIF_MAP_ULIMIT 0x40
-
-bool scif_ulimit_check = 1;
-
-/**
- * scif_rma_ep_init:
- * @ep: end point
- *
- * Initialize RMA per EP data structures.
- */
-void scif_rma_ep_init(struct scif_endpt *ep)
-{
- struct scif_endpt_rma_info *rma = &ep->rma_info;
-
- mutex_init(&rma->rma_lock);
- init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN);
- spin_lock_init(&rma->tc_lock);
- mutex_init(&rma->mmn_lock);
- INIT_LIST_HEAD(&rma->reg_list);
- INIT_LIST_HEAD(&rma->remote_reg_list);
- atomic_set(&rma->tw_refcount, 0);
- atomic_set(&rma->tcw_refcount, 0);
- atomic_set(&rma->tcw_total_pages, 0);
- atomic_set(&rma->fence_refcount, 0);
-
- rma->async_list_del = 0;
- rma->dma_chan = NULL;
- INIT_LIST_HEAD(&rma->mmn_list);
- INIT_LIST_HEAD(&rma->vma_list);
- init_waitqueue_head(&rma->markwq);
-}
-
-/**
- * scif_rma_ep_can_uninit:
- * @ep: end point
- *
- * Returns 1 if an endpoint can be uninitialized and 0 otherwise.
- */
-int scif_rma_ep_can_uninit(struct scif_endpt *ep)
-{
- int ret = 0;
-
- mutex_lock(&ep->rma_info.rma_lock);
- /* Destroy RMA Info only if both lists are empty */
- if (list_empty(&ep->rma_info.reg_list) &&
- list_empty(&ep->rma_info.remote_reg_list) &&
- list_empty(&ep->rma_info.mmn_list) &&
- !atomic_read(&ep->rma_info.tw_refcount) &&
- !atomic_read(&ep->rma_info.tcw_refcount) &&
- !atomic_read(&ep->rma_info.fence_refcount))
- ret = 1;
- mutex_unlock(&ep->rma_info.rma_lock);
- return ret;
-}
-
-/**
- * scif_create_pinned_pages:
- * @nr_pages: number of pages in window
- * @prot: read/write protection
- *
- * Allocate and prepare a set of pinned pages.
- */
-static struct scif_pinned_pages *
-scif_create_pinned_pages(int nr_pages, int prot)
-{
- struct scif_pinned_pages *pin;
-
- might_sleep();
- pin = scif_zalloc(sizeof(*pin));
- if (!pin)
- goto error;
-
- pin->pages = scif_zalloc(nr_pages * sizeof(*pin->pages));
- if (!pin->pages)
- goto error_free_pinned_pages;
-
- pin->prot = prot;
- pin->magic = SCIFEP_MAGIC;
- return pin;
-
-error_free_pinned_pages:
- scif_free(pin, sizeof(*pin));
-error:
- return NULL;
-}
-
-/**
- * scif_destroy_pinned_pages:
- * @pin: A set of pinned pages.
- *
- * Deallocate resources for pinned pages.
- */
-static int scif_destroy_pinned_pages(struct scif_pinned_pages *pin)
-{
- int j;
- int writeable = pin->prot & SCIF_PROT_WRITE;
- int kernel = SCIF_MAP_KERNEL & pin->map_flags;
-
- if (kernel) {
- for (j = 0; j < pin->nr_pages; j++) {
- if (pin->pages[j] && !kernel) {
- if (writeable)
- set_page_dirty_lock(pin->pages[j]);
- put_page(pin->pages[j]);
- }
- }
- } else
- unpin_user_pages_dirty_lock(pin->pages, pin->nr_pages,
- writeable);
- scif_free(pin->pages,
- pin->nr_pages * sizeof(*pin->pages));
- scif_free(pin, sizeof(*pin));
- return 0;
-}
-
-/*
- * scif_create_window:
- * @ep: end point
- * @nr_pages: number of pages
- * @offset: registration offset
- * @temp: true if a temporary window is being created
- *
- * Allocate and prepare a self registration window.
- */
-struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages,
- s64 offset, bool temp)
-{
- struct scif_window *window;
-
- might_sleep();
- window = scif_zalloc(sizeof(*window));
- if (!window)
- goto error;
-
- window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr));
- if (!window->dma_addr)
- goto error_free_window;
-
- window->num_pages = scif_zalloc(nr_pages * sizeof(*window->num_pages));
- if (!window->num_pages)
- goto error_free_window;
-
- window->offset = offset;
- window->ep = (u64)ep;
- window->magic = SCIFEP_MAGIC;
- window->reg_state = OP_IDLE;
- init_waitqueue_head(&window->regwq);
- window->unreg_state = OP_IDLE;
- init_waitqueue_head(&window->unregwq);
- INIT_LIST_HEAD(&window->list);
- window->type = SCIF_WINDOW_SELF;
- window->temp = temp;
- return window;
-
-error_free_window:
- scif_free(window->dma_addr,
- nr_pages * sizeof(*window->dma_addr));
- scif_free(window, sizeof(*window));
-error:
- return NULL;
-}
-
-/**
- * scif_destroy_incomplete_window:
- * @ep: end point
- * @window: registration window
- *
- * Deallocate resources for self window.
- */
-static void scif_destroy_incomplete_window(struct scif_endpt *ep,
- struct scif_window *window)
-{
- int err;
- int nr_pages = window->nr_pages;
- struct scif_allocmsg *alloc = &window->alloc_handle;
- struct scifmsg msg;
-
-retry:
- /* Wait for a SCIF_ALLOC_GNT/REJ message */
- err = wait_event_timeout(alloc->allocwq,
- alloc->state != OP_IN_PROGRESS,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err && scifdev_alive(ep))
- goto retry;
-
- mutex_lock(&ep->rma_info.rma_lock);
- if (alloc->state == OP_COMPLETED) {
- msg.uop = SCIF_FREE_VIRT;
- msg.src = ep->port;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = window->alloc_handle.vaddr;
- msg.payload[2] = (u64)window;
- msg.payload[3] = SCIF_REGISTER;
- _scif_nodeqp_send(ep->remote_dev, &msg);
- }
- mutex_unlock(&ep->rma_info.rma_lock);
-
- scif_free_window_offset(ep, window, window->offset);
- scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr));
- scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages));
- scif_free(window, sizeof(*window));
-}
-
-/**
- * scif_unmap_window:
- * @remote_dev: SCIF remote device
- * @window: registration window
- *
- * Delete any DMA mappings created for a registered self window
- */
-void scif_unmap_window(struct scif_dev *remote_dev, struct scif_window *window)
-{
- int j;
-
- if (scif_is_iommu_enabled() && !scifdev_self(remote_dev)) {
- if (window->st) {
- dma_unmap_sg(&remote_dev->sdev->dev,
- window->st->sgl, window->st->nents,
- DMA_BIDIRECTIONAL);
- sg_free_table(window->st);
- kfree(window->st);
- window->st = NULL;
- }
- } else {
- for (j = 0; j < window->nr_contig_chunks; j++) {
- if (window->dma_addr[j]) {
- scif_unmap_single(window->dma_addr[j],
- remote_dev,
- window->num_pages[j] <<
- PAGE_SHIFT);
- window->dma_addr[j] = 0x0;
- }
- }
- }
-}
-
-static inline struct mm_struct *__scif_acquire_mm(void)
-{
- if (scif_ulimit_check)
- return get_task_mm(current);
- return NULL;
-}
-
-static inline void __scif_release_mm(struct mm_struct *mm)
-{
- if (mm)
- mmput(mm);
-}
-
-static inline int
-__scif_dec_pinned_vm_lock(struct mm_struct *mm,
- int nr_pages)
-{
- if (!mm || !nr_pages || !scif_ulimit_check)
- return 0;
-
- atomic64_sub(nr_pages, &mm->pinned_vm);
- return 0;
-}
-
-static inline int __scif_check_inc_pinned_vm(struct mm_struct *mm,
- int nr_pages)
-{
- unsigned long locked, lock_limit;
-
- if (!mm || !nr_pages || !scif_ulimit_check)
- return 0;
-
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- locked = atomic64_add_return(nr_pages, &mm->pinned_vm);
-
- if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
- atomic64_sub(nr_pages, &mm->pinned_vm);
- dev_err(scif_info.mdev.this_device,
- "locked(%lu) > lock_limit(%lu)\n",
- locked, lock_limit);
- return -ENOMEM;
- }
- return 0;
-}
-
-/**
- * scif_destroy_window:
- * @ep: end point
- * @window: registration window
- *
- * Deallocate resources for self window.
- */
-int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window)
-{
- int j;
- struct scif_pinned_pages *pinned_pages = window->pinned_pages;
- int nr_pages = window->nr_pages;
-
- might_sleep();
- if (!window->temp && window->mm) {
- __scif_dec_pinned_vm_lock(window->mm, window->nr_pages);
- __scif_release_mm(window->mm);
- window->mm = NULL;
- }
-
- scif_free_window_offset(ep, window, window->offset);
- scif_unmap_window(ep->remote_dev, window);
- /*
- * Decrement references for this set of pinned pages from
- * this window.
- */
- j = atomic_sub_return(1, &pinned_pages->ref_count);
- if (j < 0)
- dev_err(scif_info.mdev.this_device,
- "%s %d incorrect ref count %d\n",
- __func__, __LINE__, j);
- /*
- * If the ref count for pinned_pages is zero then someone
- * has already called scif_unpin_pages() for it and we should
- * destroy the page cache.
- */
- if (!j)
- scif_destroy_pinned_pages(window->pinned_pages);
- scif_free(window->dma_addr, nr_pages * sizeof(*window->dma_addr));
- scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages));
- window->magic = 0;
- scif_free(window, sizeof(*window));
- return 0;
-}
-
-/**
- * scif_create_remote_lookup:
- * @remote_dev: SCIF remote device
- * @window: remote window
- *
- * Allocate and prepare lookup entries for the remote
- * end to copy over the physical addresses.
- * Returns 0 on success and appropriate errno on failure.
- */
-static int scif_create_remote_lookup(struct scif_dev *remote_dev,
- struct scif_window *window)
-{
- int i, j, err = 0;
- int nr_pages = window->nr_pages;
- bool vmalloc_dma_phys, vmalloc_num_pages;
-
- might_sleep();
- /* Map window */
- err = scif_map_single(&window->mapped_offset,
- window, remote_dev, sizeof(*window));
- if (err)
- goto error_window;
-
- /* Compute the number of lookup entries. 21 == 2MB Shift */
- window->nr_lookup = ALIGN(nr_pages * PAGE_SIZE,
- ((2) * 1024 * 1024)) >> 21;
-
- window->dma_addr_lookup.lookup =
- scif_alloc_coherent(&window->dma_addr_lookup.offset,
- remote_dev, window->nr_lookup *
- sizeof(*window->dma_addr_lookup.lookup),
- GFP_KERNEL | __GFP_ZERO);
- if (!window->dma_addr_lookup.lookup) {
- err = -ENOMEM;
- goto error_window;
- }
-
- window->num_pages_lookup.lookup =
- scif_alloc_coherent(&window->num_pages_lookup.offset,
- remote_dev, window->nr_lookup *
- sizeof(*window->num_pages_lookup.lookup),
- GFP_KERNEL | __GFP_ZERO);
- if (!window->num_pages_lookup.lookup) {
- err = -ENOMEM;
- goto error_window;
- }
-
- vmalloc_dma_phys = is_vmalloc_addr(&window->dma_addr[0]);
- vmalloc_num_pages = is_vmalloc_addr(&window->num_pages[0]);
-
- /* Now map each of the pages containing physical addresses */
- for (i = 0, j = 0; i < nr_pages; i += SCIF_NR_ADDR_IN_PAGE, j++) {
- err = scif_map_page(&window->dma_addr_lookup.lookup[j],
- vmalloc_dma_phys ?
- vmalloc_to_page(&window->dma_addr[i]) :
- virt_to_page(&window->dma_addr[i]),
- remote_dev);
- if (err)
- goto error_window;
- err = scif_map_page(&window->num_pages_lookup.lookup[j],
- vmalloc_num_pages ?
- vmalloc_to_page(&window->num_pages[i]) :
- virt_to_page(&window->num_pages[i]),
- remote_dev);
- if (err)
- goto error_window;
- }
- return 0;
-error_window:
- return err;
-}
-
-/**
- * scif_destroy_remote_lookup:
- * @remote_dev: SCIF remote device
- * @window: remote window
- *
- * Destroy lookup entries used for the remote
- * end to copy over the physical addresses.
- */
-static void scif_destroy_remote_lookup(struct scif_dev *remote_dev,
- struct scif_window *window)
-{
- int i, j;
-
- if (window->nr_lookup) {
- struct scif_rma_lookup *lup = &window->dma_addr_lookup;
- struct scif_rma_lookup *npup = &window->num_pages_lookup;
-
- for (i = 0, j = 0; i < window->nr_pages;
- i += SCIF_NR_ADDR_IN_PAGE, j++) {
- if (lup->lookup && lup->lookup[j])
- scif_unmap_single(lup->lookup[j],
- remote_dev,
- PAGE_SIZE);
- if (npup->lookup && npup->lookup[j])
- scif_unmap_single(npup->lookup[j],
- remote_dev,
- PAGE_SIZE);
- }
- if (lup->lookup)
- scif_free_coherent(lup->lookup, lup->offset,
- remote_dev, window->nr_lookup *
- sizeof(*lup->lookup));
- if (npup->lookup)
- scif_free_coherent(npup->lookup, npup->offset,
- remote_dev, window->nr_lookup *
- sizeof(*npup->lookup));
- if (window->mapped_offset)
- scif_unmap_single(window->mapped_offset,
- remote_dev, sizeof(*window));
- window->nr_lookup = 0;
- }
-}
-
-/**
- * scif_create_remote_window:
- * @scifdev: SCIF device
- * @nr_pages: number of pages in window
- *
- * Allocate and prepare a remote registration window.
- */
-static struct scif_window *
-scif_create_remote_window(struct scif_dev *scifdev, int nr_pages)
-{
- struct scif_window *window;
-
- might_sleep();
- window = scif_zalloc(sizeof(*window));
- if (!window)
- goto error_ret;
-
- window->magic = SCIFEP_MAGIC;
- window->nr_pages = nr_pages;
-
- window->dma_addr = scif_zalloc(nr_pages * sizeof(*window->dma_addr));
- if (!window->dma_addr)
- goto error_window;
-
- window->num_pages = scif_zalloc(nr_pages *
- sizeof(*window->num_pages));
- if (!window->num_pages)
- goto error_window;
-
- if (scif_create_remote_lookup(scifdev, window))
- goto error_window;
-
- window->type = SCIF_WINDOW_PEER;
- window->unreg_state = OP_IDLE;
- INIT_LIST_HEAD(&window->list);
- return window;
-error_window:
- scif_destroy_remote_window(window);
-error_ret:
- return NULL;
-}
-
-/**
- * scif_destroy_remote_window:
- * @window: remote registration window
- *
- * Deallocate resources for remote window.
- */
-void
-scif_destroy_remote_window(struct scif_window *window)
-{
- scif_free(window->dma_addr, window->nr_pages *
- sizeof(*window->dma_addr));
- scif_free(window->num_pages, window->nr_pages *
- sizeof(*window->num_pages));
- window->magic = 0;
- scif_free(window, sizeof(*window));
-}
-
-/**
- * scif_iommu_map: create DMA mappings if the IOMMU is enabled
- * @remote_dev: SCIF remote device
- * @window: remote registration window
- *
- * Map the physical pages using dma_map_sg(..) and then detect the number
- * of contiguous DMA mappings allocated
- */
-static int scif_iommu_map(struct scif_dev *remote_dev,
- struct scif_window *window)
-{
- struct scatterlist *sg;
- int i, err;
- scif_pinned_pages_t pin = window->pinned_pages;
-
- window->st = kzalloc(sizeof(*window->st), GFP_KERNEL);
- if (!window->st)
- return -ENOMEM;
-
- err = sg_alloc_table(window->st, window->nr_pages, GFP_KERNEL);
- if (err)
- return err;
-
- for_each_sg(window->st->sgl, sg, window->st->nents, i)
- sg_set_page(sg, pin->pages[i], PAGE_SIZE, 0x0);
-
- err = dma_map_sg(&remote_dev->sdev->dev, window->st->sgl,
- window->st->nents, DMA_BIDIRECTIONAL);
- if (!err)
- return -ENOMEM;
- /* Detect contiguous ranges of DMA mappings */
- sg = window->st->sgl;
- for (i = 0; sg; i++) {
- dma_addr_t last_da;
-
- window->dma_addr[i] = sg_dma_address(sg);
- window->num_pages[i] = sg_dma_len(sg) >> PAGE_SHIFT;
- last_da = sg_dma_address(sg) + sg_dma_len(sg);
- while ((sg = sg_next(sg)) && sg_dma_address(sg) == last_da) {
- window->num_pages[i] +=
- (sg_dma_len(sg) >> PAGE_SHIFT);
- last_da = window->dma_addr[i] +
- sg_dma_len(sg);
- }
- window->nr_contig_chunks++;
- }
- return 0;
-}
-
-/**
- * scif_map_window:
- * @remote_dev: SCIF remote device
- * @window: self registration window
- *
- * Map pages of a window into the aperture/PCI.
- * Also determine addresses required for DMA.
- */
-int
-scif_map_window(struct scif_dev *remote_dev, struct scif_window *window)
-{
- int i, j, k, err = 0, nr_contig_pages;
- scif_pinned_pages_t pin;
- phys_addr_t phys_prev, phys_curr;
-
- might_sleep();
-
- pin = window->pinned_pages;
-
- if (intel_iommu_enabled && !scifdev_self(remote_dev))
- return scif_iommu_map(remote_dev, window);
-
- for (i = 0, j = 0; i < window->nr_pages; i += nr_contig_pages, j++) {
- phys_prev = page_to_phys(pin->pages[i]);
- nr_contig_pages = 1;
-
- /* Detect physically contiguous chunks */
- for (k = i + 1; k < window->nr_pages; k++) {
- phys_curr = page_to_phys(pin->pages[k]);
- if (phys_curr != (phys_prev + PAGE_SIZE))
- break;
- phys_prev = phys_curr;
- nr_contig_pages++;
- }
- window->num_pages[j] = nr_contig_pages;
- window->nr_contig_chunks++;
- if (scif_is_mgmt_node()) {
- /*
- * Management node has to deal with SMPT on X100 and
- * hence the DMA mapping is required
- */
- err = scif_map_single(&window->dma_addr[j],
- phys_to_virt(page_to_phys(
- pin->pages[i])),
- remote_dev,
- nr_contig_pages << PAGE_SHIFT);
- if (err)
- return err;
- } else {
- window->dma_addr[j] = page_to_phys(pin->pages[i]);
- }
- }
- return err;
-}
-
-/**
- * scif_send_scif_unregister:
- * @ep: end point
- * @window: self registration window
- *
- * Send a SCIF_UNREGISTER message.
- */
-static int scif_send_scif_unregister(struct scif_endpt *ep,
- struct scif_window *window)
-{
- struct scifmsg msg;
-
- msg.uop = SCIF_UNREGISTER;
- msg.src = ep->port;
- msg.payload[0] = window->alloc_handle.vaddr;
- msg.payload[1] = (u64)window;
- return scif_nodeqp_send(ep->remote_dev, &msg);
-}
-
-/**
- * scif_unregister_window:
- * @window: self registration window
- *
- * Send an unregistration request and wait for a response.
- */
-int scif_unregister_window(struct scif_window *window)
-{
- int err = 0;
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
- bool send_msg = false;
-
- might_sleep();
- switch (window->unreg_state) {
- case OP_IDLE:
- {
- window->unreg_state = OP_IN_PROGRESS;
- send_msg = true;
- }
- fallthrough;
- case OP_IN_PROGRESS:
- {
- scif_get_window(window, 1);
- mutex_unlock(&ep->rma_info.rma_lock);
- if (send_msg) {
- err = scif_send_scif_unregister(ep, window);
- if (err) {
- window->unreg_state = OP_COMPLETED;
- goto done;
- }
- } else {
- /* Return ENXIO since unregistration is in progress */
- mutex_lock(&ep->rma_info.rma_lock);
- return -ENXIO;
- }
-retry:
- /* Wait for a SCIF_UNREGISTER_(N)ACK message */
- err = wait_event_timeout(window->unregwq,
- window->unreg_state != OP_IN_PROGRESS,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err && scifdev_alive(ep))
- goto retry;
- if (!err) {
- err = -ENODEV;
- window->unreg_state = OP_COMPLETED;
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n", __func__, __LINE__, err);
- }
- if (err > 0)
- err = 0;
-done:
- mutex_lock(&ep->rma_info.rma_lock);
- scif_put_window(window, 1);
- break;
- }
- case OP_FAILED:
- {
- if (!scifdev_alive(ep)) {
- err = -ENODEV;
- window->unreg_state = OP_COMPLETED;
- }
- break;
- }
- case OP_COMPLETED:
- break;
- default:
- err = -ENODEV;
- }
-
- if (window->unreg_state == OP_COMPLETED && window->ref_count)
- scif_put_window(window, window->nr_pages);
-
- if (!window->ref_count) {
- atomic_inc(&ep->rma_info.tw_refcount);
- list_del_init(&window->list);
- scif_free_window_offset(ep, window, window->offset);
- mutex_unlock(&ep->rma_info.rma_lock);
- if ((!!(window->pinned_pages->map_flags & SCIF_MAP_KERNEL)) &&
- scifdev_alive(ep)) {
- scif_drain_dma_intr(ep->remote_dev->sdev,
- ep->rma_info.dma_chan);
- } else {
- if (!__scif_dec_pinned_vm_lock(window->mm,
- window->nr_pages)) {
- __scif_release_mm(window->mm);
- window->mm = NULL;
- }
- }
- scif_queue_for_cleanup(window, &scif_info.rma);
- mutex_lock(&ep->rma_info.rma_lock);
- }
- return err;
-}
-
-/**
- * scif_send_alloc_request:
- * @ep: end point
- * @window: self registration window
- *
- * Send a remote window allocation request
- */
-static int scif_send_alloc_request(struct scif_endpt *ep,
- struct scif_window *window)
-{
- struct scifmsg msg;
- struct scif_allocmsg *alloc = &window->alloc_handle;
-
- /* Set up the Alloc Handle */
- alloc->state = OP_IN_PROGRESS;
- init_waitqueue_head(&alloc->allocwq);
-
- /* Send out an allocation request */
- msg.uop = SCIF_ALLOC_REQ;
- msg.payload[1] = window->nr_pages;
- msg.payload[2] = (u64)&window->alloc_handle;
- return _scif_nodeqp_send(ep->remote_dev, &msg);
-}
-
-/**
- * scif_prep_remote_window:
- * @ep: end point
- * @window: self registration window
- *
- * Send a remote window allocation request, wait for an allocation response,
- * and prepares the remote window by copying over the page lists
- */
-static int scif_prep_remote_window(struct scif_endpt *ep,
- struct scif_window *window)
-{
- struct scifmsg msg;
- struct scif_window *remote_window;
- struct scif_allocmsg *alloc = &window->alloc_handle;
- dma_addr_t *dma_phys_lookup, *tmp, *num_pages_lookup, *tmp1;
- int i = 0, j = 0;
- int nr_contig_chunks, loop_nr_contig_chunks;
- int remaining_nr_contig_chunks, nr_lookup;
- int err, map_err;
-
- map_err = scif_map_window(ep->remote_dev, window);
- if (map_err)
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d map_err %d\n", __func__, __LINE__, map_err);
- remaining_nr_contig_chunks = window->nr_contig_chunks;
- nr_contig_chunks = window->nr_contig_chunks;
-retry:
- /* Wait for a SCIF_ALLOC_GNT/REJ message */
- err = wait_event_timeout(alloc->allocwq,
- alloc->state != OP_IN_PROGRESS,
- SCIF_NODE_ALIVE_TIMEOUT);
- mutex_lock(&ep->rma_info.rma_lock);
- /* Synchronize with the thread waking up allocwq */
- mutex_unlock(&ep->rma_info.rma_lock);
- if (!err && scifdev_alive(ep))
- goto retry;
-
- if (!err)
- err = -ENODEV;
-
- if (err > 0)
- err = 0;
- else
- return err;
-
- /* Bail out. The remote end rejected this request */
- if (alloc->state == OP_FAILED)
- return -ENOMEM;
-
- if (map_err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, map_err);
- msg.uop = SCIF_FREE_VIRT;
- msg.src = ep->port;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = window->alloc_handle.vaddr;
- msg.payload[2] = (u64)window;
- msg.payload[3] = SCIF_REGISTER;
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED)
- err = _scif_nodeqp_send(ep->remote_dev, &msg);
- else
- err = -ENOTCONN;
- spin_unlock(&ep->lock);
- return err;
- }
-
- remote_window = scif_ioremap(alloc->phys_addr, sizeof(*window),
- ep->remote_dev);
-
- /* Compute the number of lookup entries. 21 == 2MB Shift */
- nr_lookup = ALIGN(nr_contig_chunks, SCIF_NR_ADDR_IN_PAGE)
- >> ilog2(SCIF_NR_ADDR_IN_PAGE);
-
- dma_phys_lookup =
- scif_ioremap(remote_window->dma_addr_lookup.offset,
- nr_lookup *
- sizeof(*remote_window->dma_addr_lookup.lookup),
- ep->remote_dev);
- num_pages_lookup =
- scif_ioremap(remote_window->num_pages_lookup.offset,
- nr_lookup *
- sizeof(*remote_window->num_pages_lookup.lookup),
- ep->remote_dev);
-
- while (remaining_nr_contig_chunks) {
- loop_nr_contig_chunks = min_t(int, remaining_nr_contig_chunks,
- (int)SCIF_NR_ADDR_IN_PAGE);
- /* #1/2 - Copy physical addresses over to the remote side */
-
- /* #2/2 - Copy DMA addresses (addresses that are fed into the
- * DMA engine) We transfer bus addresses which are then
- * converted into a MIC physical address on the remote
- * side if it is a MIC, if the remote node is a mgmt node we
- * transfer the MIC physical address
- */
- tmp = scif_ioremap(dma_phys_lookup[j],
- loop_nr_contig_chunks *
- sizeof(*window->dma_addr),
- ep->remote_dev);
- tmp1 = scif_ioremap(num_pages_lookup[j],
- loop_nr_contig_chunks *
- sizeof(*window->num_pages),
- ep->remote_dev);
- if (scif_is_mgmt_node()) {
- memcpy_toio((void __force __iomem *)tmp,
- &window->dma_addr[i], loop_nr_contig_chunks
- * sizeof(*window->dma_addr));
- memcpy_toio((void __force __iomem *)tmp1,
- &window->num_pages[i], loop_nr_contig_chunks
- * sizeof(*window->num_pages));
- } else {
- if (scifdev_is_p2p(ep->remote_dev)) {
- /*
- * add remote node's base address for this node
- * to convert it into a MIC address
- */
- int m;
- dma_addr_t dma_addr;
-
- for (m = 0; m < loop_nr_contig_chunks; m++) {
- dma_addr = window->dma_addr[i + m] +
- ep->remote_dev->base_addr;
- writeq(dma_addr,
- (void __force __iomem *)&tmp[m]);
- }
- memcpy_toio((void __force __iomem *)tmp1,
- &window->num_pages[i],
- loop_nr_contig_chunks
- * sizeof(*window->num_pages));
- } else {
- /* Mgmt node or loopback - transfer DMA
- * addresses as is, this is the same as a
- * MIC physical address (we use the dma_addr
- * and not the phys_addr array since the
- * phys_addr is only setup if there is a mmap()
- * request from the mgmt node)
- */
- memcpy_toio((void __force __iomem *)tmp,
- &window->dma_addr[i],
- loop_nr_contig_chunks *
- sizeof(*window->dma_addr));
- memcpy_toio((void __force __iomem *)tmp1,
- &window->num_pages[i],
- loop_nr_contig_chunks *
- sizeof(*window->num_pages));
- }
- }
- remaining_nr_contig_chunks -= loop_nr_contig_chunks;
- i += loop_nr_contig_chunks;
- j++;
- scif_iounmap(tmp, loop_nr_contig_chunks *
- sizeof(*window->dma_addr), ep->remote_dev);
- scif_iounmap(tmp1, loop_nr_contig_chunks *
- sizeof(*window->num_pages), ep->remote_dev);
- }
-
- /* Prepare the remote window for the peer */
- remote_window->peer_window = (u64)window;
- remote_window->offset = window->offset;
- remote_window->prot = window->prot;
- remote_window->nr_contig_chunks = nr_contig_chunks;
- remote_window->ep = ep->remote_ep;
- scif_iounmap(num_pages_lookup,
- nr_lookup *
- sizeof(*remote_window->num_pages_lookup.lookup),
- ep->remote_dev);
- scif_iounmap(dma_phys_lookup,
- nr_lookup *
- sizeof(*remote_window->dma_addr_lookup.lookup),
- ep->remote_dev);
- scif_iounmap(remote_window, sizeof(*remote_window), ep->remote_dev);
- window->peer_window = alloc->vaddr;
- return err;
-}
-
-/**
- * scif_send_scif_register:
- * @ep: end point
- * @window: self registration window
- *
- * Send a SCIF_REGISTER message if EP is connected and wait for a
- * SCIF_REGISTER_(N)ACK message else send a SCIF_FREE_VIRT
- * message so that the peer can free its remote window allocated earlier.
- */
-static int scif_send_scif_register(struct scif_endpt *ep,
- struct scif_window *window)
-{
- int err = 0;
- struct scifmsg msg;
-
- msg.src = ep->port;
- msg.payload[0] = ep->remote_ep;
- msg.payload[1] = window->alloc_handle.vaddr;
- msg.payload[2] = (u64)window;
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED) {
- msg.uop = SCIF_REGISTER;
- window->reg_state = OP_IN_PROGRESS;
- err = _scif_nodeqp_send(ep->remote_dev, &msg);
- spin_unlock(&ep->lock);
- if (!err) {
-retry:
- /* Wait for a SCIF_REGISTER_(N)ACK message */
- err = wait_event_timeout(window->regwq,
- window->reg_state !=
- OP_IN_PROGRESS,
- SCIF_NODE_ALIVE_TIMEOUT);
- if (!err && scifdev_alive(ep))
- goto retry;
- err = !err ? -ENODEV : 0;
- if (window->reg_state == OP_FAILED)
- err = -ENOTCONN;
- }
- } else {
- msg.uop = SCIF_FREE_VIRT;
- msg.payload[3] = SCIF_REGISTER;
- err = _scif_nodeqp_send(ep->remote_dev, &msg);
- spin_unlock(&ep->lock);
- if (!err)
- err = -ENOTCONN;
- }
- return err;
-}
-
-/**
- * scif_get_window_offset:
- * @ep: end point descriptor
- * @flags: flags
- * @offset: offset hint
- * @num_pages: number of pages
- * @out_offset: computed offset returned by reference.
- *
- * Compute/Claim a new offset for this EP.
- */
-int scif_get_window_offset(struct scif_endpt *ep, int flags, s64 offset,
- int num_pages, s64 *out_offset)
-{
- s64 page_index;
- struct iova *iova_ptr;
- int err = 0;
-
- if (flags & SCIF_MAP_FIXED) {
- page_index = SCIF_IOVA_PFN(offset);
- iova_ptr = reserve_iova(&ep->rma_info.iovad, page_index,
- page_index + num_pages - 1);
- if (!iova_ptr)
- err = -EADDRINUSE;
- } else {
- iova_ptr = alloc_iova(&ep->rma_info.iovad, num_pages,
- SCIF_DMA_63BIT_PFN - 1, 0);
- if (!iova_ptr)
- err = -ENOMEM;
- }
- if (!err)
- *out_offset = (iova_ptr->pfn_lo) << PAGE_SHIFT;
- return err;
-}
-
-/**
- * scif_free_window_offset:
- * @ep: end point descriptor
- * @window: registration window
- * @offset: Offset to be freed
- *
- * Free offset for this EP. The callee is supposed to grab
- * the RMA mutex before calling this API.
- */
-void scif_free_window_offset(struct scif_endpt *ep,
- struct scif_window *window, s64 offset)
-{
- if ((window && !window->offset_freed) || !window) {
- free_iova(&ep->rma_info.iovad, offset >> PAGE_SHIFT);
- if (window)
- window->offset_freed = true;
- }
-}
-
-/**
- * scif_alloc_req: Respond to SCIF_ALLOC_REQ interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remote side is requesting a memory allocation.
- */
-void scif_alloc_req(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- int err;
- struct scif_window *window = NULL;
- int nr_pages = msg->payload[1];
-
- window = scif_create_remote_window(scifdev, nr_pages);
- if (!window) {
- err = -ENOMEM;
- goto error;
- }
-
- /* The peer's allocation request is granted */
- msg->uop = SCIF_ALLOC_GNT;
- msg->payload[0] = (u64)window;
- msg->payload[1] = window->mapped_offset;
- err = scif_nodeqp_send(scifdev, msg);
- if (err)
- scif_destroy_remote_window(window);
- return;
-error:
- /* The peer's allocation request is rejected */
- dev_err(&scifdev->sdev->dev,
- "%s %d error %d alloc_ptr %p nr_pages 0x%x\n",
- __func__, __LINE__, err, window, nr_pages);
- msg->uop = SCIF_ALLOC_REJ;
- scif_nodeqp_send(scifdev, msg);
-}
-
-/**
- * scif_alloc_gnt_rej: Respond to SCIF_ALLOC_GNT/REJ interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remote side responded to a memory allocation.
- */
-void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_allocmsg *handle = (struct scif_allocmsg *)msg->payload[2];
- struct scif_window *window = container_of(handle, struct scif_window,
- alloc_handle);
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- handle->vaddr = msg->payload[0];
- handle->phys_addr = msg->payload[1];
- if (msg->uop == SCIF_ALLOC_GNT)
- handle->state = OP_COMPLETED;
- else
- handle->state = OP_FAILED;
- wake_up(&handle->allocwq);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-/**
- * scif_free_virt: Respond to SCIF_FREE_VIRT interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Free up memory kmalloc'd earlier.
- */
-void scif_free_virt(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_window *window = (struct scif_window *)msg->payload[1];
-
- scif_destroy_remote_window(window);
-}
-
-static void
-scif_fixup_aper_base(struct scif_dev *dev, struct scif_window *window)
-{
- int j;
- struct scif_hw_dev *sdev = dev->sdev;
- phys_addr_t apt_base = 0;
-
- /*
- * Add the aperture base if the DMA address is not card relative
- * since the DMA addresses need to be an offset into the bar
- */
- if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER &&
- sdev->aper && !sdev->card_rel_da)
- apt_base = sdev->aper->pa;
- else
- return;
-
- for (j = 0; j < window->nr_contig_chunks; j++) {
- if (window->num_pages[j])
- window->dma_addr[j] += apt_base;
- else
- break;
- }
-}
-
-/**
- * scif_recv_reg: Respond to SCIF_REGISTER interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Update remote window list with a new registered window.
- */
-void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- struct scif_window *window =
- (struct scif_window *)msg->payload[1];
-
- mutex_lock(&ep->rma_info.rma_lock);
- spin_lock(&ep->lock);
- if (ep->state == SCIFEP_CONNECTED) {
- msg->uop = SCIF_REGISTER_ACK;
- scif_nodeqp_send(ep->remote_dev, msg);
- scif_fixup_aper_base(ep->remote_dev, window);
- /* No further failures expected. Insert new window */
- scif_insert_window(window, &ep->rma_info.remote_reg_list);
- } else {
- msg->uop = SCIF_REGISTER_NACK;
- scif_nodeqp_send(ep->remote_dev, msg);
- }
- spin_unlock(&ep->lock);
- mutex_unlock(&ep->rma_info.rma_lock);
- /* free up any lookup resources now that page lists are transferred */
- scif_destroy_remote_lookup(ep->remote_dev, window);
- /*
- * We could not insert the window but we need to
- * destroy the window.
- */
- if (msg->uop == SCIF_REGISTER_NACK)
- scif_destroy_remote_window(window);
-}
-
-/**
- * scif_recv_unreg: Respond to SCIF_UNREGISTER interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Remove window from remote registration list;
- */
-void scif_recv_unreg(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_rma_req req;
- struct scif_window *window = NULL;
- struct scif_window *recv_window =
- (struct scif_window *)msg->payload[0];
- struct scif_endpt *ep;
- int del_window = 0;
-
- ep = (struct scif_endpt *)recv_window->ep;
- req.out_window = &window;
- req.offset = recv_window->offset;
- req.prot = 0;
- req.nr_bytes = recv_window->nr_pages << PAGE_SHIFT;
- req.type = SCIF_WINDOW_FULL;
- req.head = &ep->rma_info.remote_reg_list;
- msg->payload[0] = ep->remote_ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- /* Does a valid window exist? */
- if (scif_query_window(&req)) {
- dev_err(&scifdev->sdev->dev,
- "%s %d -ENXIO\n", __func__, __LINE__);
- msg->uop = SCIF_UNREGISTER_ACK;
- goto error;
- }
- if (window) {
- if (window->ref_count)
- scif_put_window(window, window->nr_pages);
- else
- dev_err(&scifdev->sdev->dev,
- "%s %d ref count should be +ve\n",
- __func__, __LINE__);
- window->unreg_state = OP_COMPLETED;
- if (!window->ref_count) {
- msg->uop = SCIF_UNREGISTER_ACK;
- atomic_inc(&ep->rma_info.tw_refcount);
- ep->rma_info.async_list_del = 1;
- list_del_init(&window->list);
- del_window = 1;
- } else {
- /* NACK! There are valid references to this window */
- msg->uop = SCIF_UNREGISTER_NACK;
- }
- } else {
- /* The window did not make its way to the list at all. ACK */
- msg->uop = SCIF_UNREGISTER_ACK;
- scif_destroy_remote_window(recv_window);
- }
-error:
- mutex_unlock(&ep->rma_info.rma_lock);
- if (del_window)
- scif_drain_dma_intr(ep->remote_dev->sdev,
- ep->rma_info.dma_chan);
- scif_nodeqp_send(ep->remote_dev, msg);
- if (del_window)
- scif_queue_for_cleanup(window, &scif_info.rma);
-}
-
-/**
- * scif_recv_reg_ack: Respond to SCIF_REGISTER_ACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Wake up the window waiting to complete registration.
- */
-void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_window *window =
- (struct scif_window *)msg->payload[2];
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- window->reg_state = OP_COMPLETED;
- wake_up(&window->regwq);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-/**
- * scif_recv_reg_nack: Respond to SCIF_REGISTER_NACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Wake up the window waiting to inform it that registration
- * cannot be completed.
- */
-void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_window *window =
- (struct scif_window *)msg->payload[2];
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- window->reg_state = OP_FAILED;
- wake_up(&window->regwq);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-/**
- * scif_recv_unreg_ack: Respond to SCIF_UNREGISTER_ACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Wake up the window waiting to complete unregistration.
- */
-void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_window *window =
- (struct scif_window *)msg->payload[1];
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- window->unreg_state = OP_COMPLETED;
- wake_up(&window->unregwq);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-/**
- * scif_recv_unreg_nack: Respond to SCIF_UNREGISTER_NACK interrupt message
- * @scifdev: SCIF device
- * @msg: Interrupt message
- *
- * Wake up the window waiting to inform it that unregistration
- * cannot be completed immediately.
- */
-void scif_recv_unreg_nack(struct scif_dev *scifdev, struct scifmsg *msg)
-{
- struct scif_window *window =
- (struct scif_window *)msg->payload[1];
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
-
- mutex_lock(&ep->rma_info.rma_lock);
- window->unreg_state = OP_FAILED;
- wake_up(&window->unregwq);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-int __scif_pin_pages(void *addr, size_t len, int *out_prot,
- int map_flags, scif_pinned_pages_t *pages)
-{
- struct scif_pinned_pages *pinned_pages;
- int nr_pages, err = 0, i;
- bool vmalloc_addr = false;
- bool try_upgrade = false;
- int prot = *out_prot;
- int ulimit = 0;
- struct mm_struct *mm = NULL;
-
- /* Unsupported flags */
- if (map_flags & ~(SCIF_MAP_KERNEL | SCIF_MAP_ULIMIT))
- return -EINVAL;
- ulimit = !!(map_flags & SCIF_MAP_ULIMIT);
-
- /* Unsupported protection requested */
- if (prot & ~(SCIF_PROT_READ | SCIF_PROT_WRITE))
- return -EINVAL;
-
- /* addr/len must be page aligned. len should be non zero */
- if (!len ||
- (ALIGN((u64)addr, PAGE_SIZE) != (u64)addr) ||
- (ALIGN((u64)len, PAGE_SIZE) != (u64)len))
- return -EINVAL;
-
- might_sleep();
-
- nr_pages = len >> PAGE_SHIFT;
-
- /* Allocate a set of pinned pages */
- pinned_pages = scif_create_pinned_pages(nr_pages, prot);
- if (!pinned_pages)
- return -ENOMEM;
-
- if (map_flags & SCIF_MAP_KERNEL) {
- if (is_vmalloc_addr(addr))
- vmalloc_addr = true;
-
- for (i = 0; i < nr_pages; i++) {
- if (vmalloc_addr)
- pinned_pages->pages[i] =
- vmalloc_to_page(addr + (i * PAGE_SIZE));
- else
- pinned_pages->pages[i] =
- virt_to_page(addr + (i * PAGE_SIZE));
- }
- pinned_pages->nr_pages = nr_pages;
- pinned_pages->map_flags = SCIF_MAP_KERNEL;
- } else {
- /*
- * SCIF supports registration caching. If a registration has
- * been requested with read only permissions, then we try
- * to pin the pages with RW permissions so that a subsequent
- * transfer with RW permission can hit the cache instead of
- * invalidating it. If the upgrade fails with RW then we
- * revert back to R permission and retry
- */
- if (prot == SCIF_PROT_READ)
- try_upgrade = true;
- prot |= SCIF_PROT_WRITE;
-retry:
- mm = current->mm;
- if (ulimit) {
- err = __scif_check_inc_pinned_vm(mm, nr_pages);
- if (err) {
- pinned_pages->nr_pages = 0;
- goto error_unmap;
- }
- }
-
- pinned_pages->nr_pages = pin_user_pages_fast(
- (u64)addr,
- nr_pages,
- (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
- pinned_pages->pages);
- if (nr_pages != pinned_pages->nr_pages) {
- if (try_upgrade) {
- if (ulimit)
- __scif_dec_pinned_vm_lock(mm, nr_pages);
- /* Roll back any pinned pages */
- unpin_user_pages(pinned_pages->pages,
- pinned_pages->nr_pages);
- prot &= ~SCIF_PROT_WRITE;
- try_upgrade = false;
- goto retry;
- }
- }
- pinned_pages->map_flags = 0;
- }
-
- if (pinned_pages->nr_pages < nr_pages) {
- err = -EFAULT;
- pinned_pages->nr_pages = nr_pages;
- goto dec_pinned;
- }
-
- *out_prot = prot;
- atomic_set(&pinned_pages->ref_count, 1);
- *pages = pinned_pages;
- return err;
-dec_pinned:
- if (ulimit)
- __scif_dec_pinned_vm_lock(mm, nr_pages);
- /* Something went wrong! Rollback */
-error_unmap:
- pinned_pages->nr_pages = nr_pages;
- scif_destroy_pinned_pages(pinned_pages);
- *pages = NULL;
- dev_dbg(scif_info.mdev.this_device,
- "%s %d err %d len 0x%lx\n", __func__, __LINE__, err, len);
- return err;
-}
-
-int scif_pin_pages(void *addr, size_t len, int prot,
- int map_flags, scif_pinned_pages_t *pages)
-{
- return __scif_pin_pages(addr, len, &prot, map_flags, pages);
-}
-EXPORT_SYMBOL_GPL(scif_pin_pages);
-
-int scif_unpin_pages(scif_pinned_pages_t pinned_pages)
-{
- int err = 0, ret;
-
- if (!pinned_pages || SCIFEP_MAGIC != pinned_pages->magic)
- return -EINVAL;
-
- ret = atomic_sub_return(1, &pinned_pages->ref_count);
- if (ret < 0) {
- dev_err(scif_info.mdev.this_device,
- "%s %d scif_unpin_pages called without pinning? rc %d\n",
- __func__, __LINE__, ret);
- return -EINVAL;
- }
- /*
- * Destroy the window if the ref count for this set of pinned
- * pages has dropped to zero. If it is positive then there is
- * a valid registered window which is backed by these pages and
- * it will be destroyed once all such windows are unregistered.
- */
- if (!ret)
- err = scif_destroy_pinned_pages(pinned_pages);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_unpin_pages);
-
-static inline void
-scif_insert_local_window(struct scif_window *window, struct scif_endpt *ep)
-{
- mutex_lock(&ep->rma_info.rma_lock);
- scif_insert_window(window, &ep->rma_info.reg_list);
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-off_t scif_register_pinned_pages(scif_epd_t epd,
- scif_pinned_pages_t pinned_pages,
- off_t offset, int map_flags)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- s64 computed_offset;
- struct scif_window *window;
- int err;
- size_t len;
- struct device *spdev;
-
- /* Unsupported flags */
- if (map_flags & ~SCIF_MAP_FIXED)
- return -EINVAL;
-
- len = pinned_pages->nr_pages << PAGE_SHIFT;
-
- /*
- * Offset is not page aligned/negative or offset+len
- * wraps around with SCIF_MAP_FIXED.
- */
- if ((map_flags & SCIF_MAP_FIXED) &&
- ((ALIGN(offset, PAGE_SIZE) != offset) ||
- (offset < 0) ||
- (len > LONG_MAX - offset)))
- return -EINVAL;
-
- might_sleep();
-
- err = scif_verify_epd(ep);
- if (err)
- return err;
- /*
- * It is an error to pass pinned_pages to scif_register_pinned_pages()
- * after calling scif_unpin_pages().
- */
- if (!atomic_add_unless(&pinned_pages->ref_count, 1, 0))
- return -EINVAL;
-
- /* Compute the offset for this registration */
- err = scif_get_window_offset(ep, map_flags, offset,
- len, &computed_offset);
- if (err) {
- atomic_sub(1, &pinned_pages->ref_count);
- return err;
- }
-
- /* Allocate and prepare self registration window */
- window = scif_create_window(ep, pinned_pages->nr_pages,
- computed_offset, false);
- if (!window) {
- atomic_sub(1, &pinned_pages->ref_count);
- scif_free_window_offset(ep, NULL, computed_offset);
- return -ENOMEM;
- }
-
- window->pinned_pages = pinned_pages;
- window->nr_pages = pinned_pages->nr_pages;
- window->prot = pinned_pages->prot;
-
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- scif_destroy_window(ep, window);
- return err;
- }
- err = scif_send_alloc_request(ep, window);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error_unmap;
- }
-
- /* Prepare the remote registration window */
- err = scif_prep_remote_window(ep, window);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error_unmap;
- }
-
- /* Tell the peer about the new window */
- err = scif_send_scif_register(ep, window);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error_unmap;
- }
-
- scif_put_peer_dev(spdev);
- /* No further failures expected. Insert new window */
- scif_insert_local_window(window, ep);
- return computed_offset;
-error_unmap:
- scif_destroy_window(ep, window);
- scif_put_peer_dev(spdev);
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_register_pinned_pages);
-
-off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
- int prot, int map_flags)
-{
- scif_pinned_pages_t pinned_pages;
- off_t err;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- s64 computed_offset;
- struct scif_window *window;
- struct mm_struct *mm = NULL;
- struct device *spdev;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI register: ep %p addr %p len 0x%lx offset 0x%lx prot 0x%x map_flags 0x%x\n",
- epd, addr, len, offset, prot, map_flags);
- /* Unsupported flags */
- if (map_flags & ~(SCIF_MAP_FIXED | SCIF_MAP_KERNEL))
- return -EINVAL;
-
- /*
- * Offset is not page aligned/negative or offset+len
- * wraps around with SCIF_MAP_FIXED.
- */
- if ((map_flags & SCIF_MAP_FIXED) &&
- ((ALIGN(offset, PAGE_SIZE) != offset) ||
- (offset < 0) ||
- (len > LONG_MAX - offset)))
- return -EINVAL;
-
- /* Unsupported protection requested */
- if (prot & ~(SCIF_PROT_READ | SCIF_PROT_WRITE))
- return -EINVAL;
-
- /* addr/len must be page aligned. len should be non zero */
- if (!len || (ALIGN((u64)addr, PAGE_SIZE) != (u64)addr) ||
- (ALIGN(len, PAGE_SIZE) != len))
- return -EINVAL;
-
- might_sleep();
-
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- /* Compute the offset for this registration */
- err = scif_get_window_offset(ep, map_flags, offset,
- len >> PAGE_SHIFT, &computed_offset);
- if (err)
- return err;
-
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- scif_free_window_offset(ep, NULL, computed_offset);
- return err;
- }
- /* Allocate and prepare self registration window */
- window = scif_create_window(ep, len >> PAGE_SHIFT,
- computed_offset, false);
- if (!window) {
- scif_free_window_offset(ep, NULL, computed_offset);
- scif_put_peer_dev(spdev);
- return -ENOMEM;
- }
-
- window->nr_pages = len >> PAGE_SHIFT;
-
- err = scif_send_alloc_request(ep, window);
- if (err) {
- scif_destroy_incomplete_window(ep, window);
- scif_put_peer_dev(spdev);
- return err;
- }
-
- if (!(map_flags & SCIF_MAP_KERNEL)) {
- mm = __scif_acquire_mm();
- map_flags |= SCIF_MAP_ULIMIT;
- }
- /* Pin down the pages */
- err = __scif_pin_pages(addr, len, &prot,
- map_flags & (SCIF_MAP_KERNEL | SCIF_MAP_ULIMIT),
- &pinned_pages);
- if (err) {
- scif_destroy_incomplete_window(ep, window);
- __scif_release_mm(mm);
- goto error;
- }
-
- window->pinned_pages = pinned_pages;
- window->prot = pinned_pages->prot;
- window->mm = mm;
-
- /* Prepare the remote registration window */
- err = scif_prep_remote_window(ep, window);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %ld\n", __func__, __LINE__, err);
- goto error_unmap;
- }
-
- /* Tell the peer about the new window */
- err = scif_send_scif_register(ep, window);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %ld\n", __func__, __LINE__, err);
- goto error_unmap;
- }
-
- scif_put_peer_dev(spdev);
- /* No further failures expected. Insert new window */
- scif_insert_local_window(window, ep);
- dev_dbg(&ep->remote_dev->sdev->dev,
- "SCIFAPI register: ep %p addr %p len 0x%lx computed_offset 0x%llx\n",
- epd, addr, len, computed_offset);
- return computed_offset;
-error_unmap:
- scif_destroy_window(ep, window);
-error:
- scif_put_peer_dev(spdev);
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %ld\n", __func__, __LINE__, err);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_register);
-
-int
-scif_unregister(scif_epd_t epd, off_t offset, size_t len)
-{
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct scif_window *window = NULL;
- struct scif_rma_req req;
- int nr_pages, err;
- struct device *spdev;
-
- dev_dbg(scif_info.mdev.this_device,
- "SCIFAPI unregister: ep %p offset 0x%lx len 0x%lx\n",
- ep, offset, len);
- /* len must be page aligned. len should be non zero */
- if (!len ||
- (ALIGN((u64)len, PAGE_SIZE) != (u64)len))
- return -EINVAL;
-
- /* Offset is not page aligned or offset+len wraps around */
- if ((ALIGN(offset, PAGE_SIZE) != offset) ||
- (offset < 0) ||
- (len > LONG_MAX - offset))
- return -EINVAL;
-
- err = scif_verify_epd(ep);
- if (err)
- return err;
-
- might_sleep();
- nr_pages = len >> PAGE_SHIFT;
-
- req.out_window = &window;
- req.offset = offset;
- req.prot = 0;
- req.nr_bytes = len;
- req.type = SCIF_WINDOW_FULL;
- req.head = &ep->rma_info.reg_list;
-
- spdev = scif_get_peer_dev(ep->remote_dev);
- if (IS_ERR(spdev)) {
- err = PTR_ERR(spdev);
- return err;
- }
- mutex_lock(&ep->rma_info.rma_lock);
- /* Does a valid window exist? */
- err = scif_query_window(&req);
- if (err) {
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
- goto error;
- }
- /* Unregister all the windows in this range */
- err = scif_rma_list_unregister(window, offset, nr_pages);
- if (err)
- dev_err(&ep->remote_dev->sdev->dev,
- "%s %d err %d\n", __func__, __LINE__, err);
-error:
- mutex_unlock(&ep->rma_info.rma_lock);
- scif_put_peer_dev(spdev);
- return err;
-}
-EXPORT_SYMBOL_GPL(scif_unregister);
diff --git a/drivers/misc/mic/scif/scif_rma.h b/drivers/misc/mic/scif/scif_rma.h
deleted file mode 100644
index 964dd0fc3657..000000000000
--- a/drivers/misc/mic/scif/scif_rma.h
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Intel SCIF driver.
- *
- */
-#ifndef SCIF_RMA_H
-#define SCIF_RMA_H
-
-#include <linux/intel-iommu.h>
-#include <linux/mmu_notifier.h>
-
-#include "../bus/scif_bus.h"
-
-/* If this bit is set then the mark is a remote fence mark */
-#define SCIF_REMOTE_FENCE_BIT 31
-/* Magic value used to indicate a remote fence request */
-#define SCIF_REMOTE_FENCE BIT_ULL(SCIF_REMOTE_FENCE_BIT)
-
-#define SCIF_MAX_UNALIGNED_BUF_SIZE (1024 * 1024ULL)
-#define SCIF_KMEM_UNALIGNED_BUF_SIZE (SCIF_MAX_UNALIGNED_BUF_SIZE + \
- (L1_CACHE_BYTES << 1))
-
-#define SCIF_IOVA_START_PFN (1)
-#define SCIF_IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-#define SCIF_DMA_64BIT_PFN SCIF_IOVA_PFN(DMA_BIT_MASK(64))
-#define SCIF_DMA_63BIT_PFN SCIF_IOVA_PFN(DMA_BIT_MASK(63))
-
-/*
- * struct scif_endpt_rma_info - Per Endpoint Remote Memory Access Information
- *
- * @reg_list: List of registration windows for self
- * @remote_reg_list: List of registration windows for peer
- * @iovad: Offset generator
- * @rma_lock: Synchronizes access to self/remote list and also protects the
- * window from being destroyed while RMAs are in progress.
- * @tc_lock: Synchronizes access to temporary cached windows list
- * for SCIF Registration Caching.
- * @mmn_lock: Synchronizes access to the list of MMU notifiers registered
- * @tw_refcount: Keeps track of number of outstanding temporary registered
- * windows created by scif_vreadfrom/scif_vwriteto which have
- * not been destroyed.
- * @tcw_refcount: Same as tw_refcount but for temporary cached windows
- * @tcw_total_pages: Same as tcw_refcount but in terms of pages pinned
- * @mmn_list: MMU notifier so that we can destroy the windows when required
- * @fence_refcount: Keeps track of number of outstanding remote fence
- * requests which have been received by the peer.
- * @dma_chan: DMA channel used for all DMA transfers for this endpoint.
- * @async_list_del: Detect asynchronous list entry deletion
- * @vma_list: List of vmas with remote memory mappings
- * @markwq: Wait queue used for scif_fence_mark/scif_fence_wait
-*/
-struct scif_endpt_rma_info {
- struct list_head reg_list;
- struct list_head remote_reg_list;
- struct iova_domain iovad;
- struct mutex rma_lock;
- spinlock_t tc_lock;
- struct mutex mmn_lock;
- atomic_t tw_refcount;
- atomic_t tcw_refcount;
- atomic_t tcw_total_pages;
- struct list_head mmn_list;
- atomic_t fence_refcount;
- struct dma_chan *dma_chan;
- int async_list_del;
- struct list_head vma_list;
- wait_queue_head_t markwq;
-};
-
-/*
- * struct scif_fence_info - used for tracking fence requests
- *
- * @state: State of this transfer
- * @wq: Fences wait on this queue
- * @dma_mark: Used for storing the DMA mark
- */
-struct scif_fence_info {
- enum scif_msg_state state;
- struct completion comp;
- int dma_mark;
-};
-
-/*
- * struct scif_remote_fence_info - used for tracking remote fence requests
- *
- * @msg: List of SCIF node QP fence messages
- * @list: Link to list of remote fence requests
- */
-struct scif_remote_fence_info {
- struct scifmsg msg;
- struct list_head list;
-};
-
-/*
- * Specifies whether an RMA operation can span across partial windows, a single
- * window or multiple contiguous windows. Mmaps can span across partial windows.
- * Unregistration can span across complete windows. scif_get_pages() can span a
- * single window. A window can also be of type self or peer.
- */
-enum scif_window_type {
- SCIF_WINDOW_PARTIAL,
- SCIF_WINDOW_SINGLE,
- SCIF_WINDOW_FULL,
- SCIF_WINDOW_SELF,
- SCIF_WINDOW_PEER
-};
-
-/* The number of physical addresses that can be stored in a PAGE. */
-#define SCIF_NR_ADDR_IN_PAGE (0x1000 >> 3)
-
-/*
- * struct scif_rma_lookup - RMA lookup data structure for page list transfers
- *
- * Store an array of lookup offsets. Each offset in this array maps
- * one 4K page containing 512 physical addresses i.e. 2MB. 512 such
- * offsets in a 4K page will correspond to 1GB of registered address space.
-
- * @lookup: Array of offsets
- * @offset: DMA offset of lookup array
- */
-struct scif_rma_lookup {
- dma_addr_t *lookup;
- dma_addr_t offset;
-};
-
-/*
- * struct scif_pinned_pages - A set of pinned pages obtained with
- * scif_pin_pages() which could be part of multiple registered
- * windows across different end points.
- *
- * @nr_pages: Number of pages which is defined as a s64 instead of an int
- * to avoid sign extension with buffers >= 2GB
- * @prot: read/write protections
- * @map_flags: Flags specified during the pin operation
- * @ref_count: Reference count bumped in terms of number of pages
- * @magic: A magic value
- * @pages: Array of pointers to struct pages populated with get_user_pages(..)
- */
-struct scif_pinned_pages {
- s64 nr_pages;
- int prot;
- int map_flags;
- atomic_t ref_count;
- u64 magic;
- struct page **pages;
-};
-
-/*
- * struct scif_status - Stores DMA status update information
- *
- * @src_dma_addr: Source buffer DMA address
- * @val: src location for value to be written to the destination
- * @ep: SCIF endpoint
- */
-struct scif_status {
- dma_addr_t src_dma_addr;
- u64 val;
- struct scif_endpt *ep;
-};
-
-/*
- * struct scif_cb_arg - Stores the argument of the callback func
- *
- * @src_dma_addr: Source buffer DMA address
- * @status: DMA status
- * @ep: SCIF endpoint
- */
-struct scif_cb_arg {
- dma_addr_t src_dma_addr;
- struct scif_status *status;
- struct scif_endpt *ep;
-};
-
-/*
- * struct scif_window - Registration Window for Self and Remote
- *
- * @nr_pages: Number of pages which is defined as a s64 instead of an int
- * to avoid sign extension with buffers >= 2GB
- * @nr_contig_chunks: Number of contiguous physical chunks
- * @prot: read/write protections
- * @ref_count: reference count in terms of number of pages
- * @magic: Cookie to detect corruption
- * @offset: registered offset
- * @va_for_temp: va address that this window represents
- * @dma_mark: Used to determine if all DMAs against the window are done
- * @ep: Pointer to EP. Useful for passing EP around with messages to
- avoid expensive list traversals.
- * @list: link to list of windows for the endpoint
- * @type: self or peer window
- * @peer_window: Pointer to peer window. Useful for sending messages to peer
- * without requiring an extra list traversal
- * @unreg_state: unregistration state
- * @offset_freed: True if the offset has been freed
- * @temp: True for temporary windows created via scif_vreadfrom/scif_vwriteto
- * @mm: memory descriptor for the task_struct which initiated the RMA
- * @st: scatter gather table for DMA mappings with IOMMU enabled
- * @pinned_pages: The set of pinned_pages backing this window
- * @alloc_handle: Handle for sending ALLOC_REQ
- * @regwq: Wait Queue for an registration (N)ACK
- * @reg_state: Registration state
- * @unregwq: Wait Queue for an unregistration (N)ACK
- * @dma_addr_lookup: Lookup for physical addresses used for DMA
- * @nr_lookup: Number of entries in lookup
- * @mapped_offset: Offset used to map the window by the peer
- * @dma_addr: Array of physical addresses used for Mgmt node & MIC initiated DMA
- * @num_pages: Array specifying number of pages for each physical address
- */
-struct scif_window {
- s64 nr_pages;
- int nr_contig_chunks;
- int prot;
- int ref_count;
- u64 magic;
- s64 offset;
- unsigned long va_for_temp;
- int dma_mark;
- u64 ep;
- struct list_head list;
- enum scif_window_type type;
- u64 peer_window;
- enum scif_msg_state unreg_state;
- bool offset_freed;
- bool temp;
- struct mm_struct *mm;
- struct sg_table *st;
- union {
- struct {
- struct scif_pinned_pages *pinned_pages;
- struct scif_allocmsg alloc_handle;
- wait_queue_head_t regwq;
- enum scif_msg_state reg_state;
- wait_queue_head_t unregwq;
- };
- struct {
- struct scif_rma_lookup dma_addr_lookup;
- struct scif_rma_lookup num_pages_lookup;
- int nr_lookup;
- dma_addr_t mapped_offset;
- };
- };
- dma_addr_t *dma_addr;
- u64 *num_pages;
-} __packed;
-
-/*
- * scif_mmu_notif - SCIF mmu notifier information
- *
- * @mmu_notifier ep_mmu_notifier: MMU notifier operations
- * @tc_reg_list: List of temp registration windows for self
- * @mm: memory descriptor for the task_struct which initiated the RMA
- * @ep: SCIF endpoint
- * @list: link to list of MMU notifier information
- */
-struct scif_mmu_notif {
-#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier ep_mmu_notifier;
-#endif
- struct list_head tc_reg_list;
- struct mm_struct *mm;
- struct scif_endpt *ep;
- struct list_head list;
-};
-
-enum scif_rma_dir {
- SCIF_LOCAL_TO_REMOTE,
- SCIF_REMOTE_TO_LOCAL
-};
-
-extern struct kmem_cache *unaligned_cache;
-/* Initialize RMA for this EP */
-void scif_rma_ep_init(struct scif_endpt *ep);
-/* Check if epd can be uninitialized */
-int scif_rma_ep_can_uninit(struct scif_endpt *ep);
-/* Obtain a new offset. Callee must grab RMA lock */
-int scif_get_window_offset(struct scif_endpt *ep, int flags,
- s64 offset, int nr_pages, s64 *out_offset);
-/* Free offset. Callee must grab RMA lock */
-void scif_free_window_offset(struct scif_endpt *ep,
- struct scif_window *window, s64 offset);
-/* Create self registration window */
-struct scif_window *scif_create_window(struct scif_endpt *ep, int nr_pages,
- s64 offset, bool temp);
-/* Destroy self registration window.*/
-int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window);
-void scif_unmap_window(struct scif_dev *remote_dev, struct scif_window *window);
-/* Map pages of self window to Aperture/PCI */
-int scif_map_window(struct scif_dev *remote_dev,
- struct scif_window *window);
-/* Unregister a self window */
-int scif_unregister_window(struct scif_window *window);
-/* Destroy remote registration window */
-void
-scif_destroy_remote_window(struct scif_window *window);
-/* remove valid remote memory mappings from process address space */
-void scif_zap_mmaps(int node);
-/* Query if any applications have remote memory mappings */
-bool scif_rma_do_apps_have_mmaps(int node);
-/* Cleanup remote registration lists for zombie endpoints */
-void scif_cleanup_rma_for_zombies(int node);
-/* Reserve a DMA channel for a particular endpoint */
-int scif_reserve_dma_chan(struct scif_endpt *ep);
-/* Setup a DMA mark for an endpoint */
-int _scif_fence_mark(scif_epd_t epd, int *mark);
-int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
- enum scif_window_type type);
-void scif_alloc_req(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_alloc_gnt_rej(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_free_virt(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_reg(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_unreg(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_reg_ack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_reg_nack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_unreg_ack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_unreg_nack(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_munmap(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg);
-void scif_mmu_notif_handler(struct work_struct *work);
-void scif_rma_handle_remote_fences(void);
-void scif_rma_destroy_windows(void);
-void scif_rma_destroy_tcw_invalid(void);
-int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan);
-
-struct scif_window_iter {
- s64 offset;
- int index;
-};
-
-static inline void
-scif_init_window_iter(struct scif_window *window, struct scif_window_iter *iter)
-{
- iter->offset = window->offset;
- iter->index = 0;
-}
-
-dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off,
- size_t *nr_bytes,
- struct scif_window_iter *iter);
-static inline
-dma_addr_t __scif_off_to_dma_addr(struct scif_window *window, s64 off)
-{
- return scif_off_to_dma_addr(window, off, NULL, NULL);
-}
-
-static inline bool scif_unaligned(off_t src_offset, off_t dst_offset)
-{
- src_offset = src_offset & (L1_CACHE_BYTES - 1);
- dst_offset = dst_offset & (L1_CACHE_BYTES - 1);
- return !(src_offset == dst_offset);
-}
-
-/*
- * scif_zalloc:
- * @size: Size of the allocation request.
- *
- * Helper API which attempts to allocate zeroed pages via
- * __get_free_pages(..) first and then falls back on
- * vzalloc(..) if that fails.
- */
-static inline void *scif_zalloc(size_t size)
-{
- void *ret = NULL;
- size_t align = ALIGN(size, PAGE_SIZE);
-
- if (align && get_order(align) < MAX_ORDER)
- ret = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(align));
- return ret ? ret : vzalloc(align);
-}
-
-/*
- * scif_free:
- * @addr: Address to be freed.
- * @size: Size of the allocation.
- * Helper API which frees memory allocated via scif_zalloc().
- */
-static inline void scif_free(void *addr, size_t size)
-{
- size_t align = ALIGN(size, PAGE_SIZE);
-
- if (is_vmalloc_addr(addr))
- vfree(addr);
- else
- free_pages((unsigned long)addr, get_order(align));
-}
-
-static inline void scif_get_window(struct scif_window *window, int nr_pages)
-{
- window->ref_count += nr_pages;
-}
-
-static inline void scif_put_window(struct scif_window *window, int nr_pages)
-{
- window->ref_count -= nr_pages;
-}
-
-static inline void scif_set_window_ref(struct scif_window *window, int nr_pages)
-{
- window->ref_count = nr_pages;
-}
-
-static inline void
-scif_queue_for_cleanup(struct scif_window *window, struct list_head *list)
-{
- spin_lock(&scif_info.rmalock);
- list_add_tail(&window->list, list);
- spin_unlock(&scif_info.rmalock);
- schedule_work(&scif_info.misc_work);
-}
-
-static inline void __scif_rma_destroy_tcw_helper(struct scif_window *window)
-{
- list_del_init(&window->list);
- scif_queue_for_cleanup(window, &scif_info.rma_tc);
-}
-
-static inline bool scif_is_iommu_enabled(void)
-{
-#ifdef CONFIG_INTEL_IOMMU
- return intel_iommu_enabled;
-#else
- return false;
-#endif
-}
-#endif /* SCIF_RMA_H */
diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c
deleted file mode 100644
index ef923ba134c8..000000000000
--- a/drivers/misc/mic/scif/scif_rma_list.c
+++ /dev/null
@@ -1,282 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#include "scif_main.h"
-#include <linux/mmu_notifier.h>
-#include <linux/highmem.h>
-
-/*
- * scif_insert_tcw:
- *
- * Insert a temp window to the temp registration list sorted by va_for_temp.
- * RMA lock must be held.
- */
-void scif_insert_tcw(struct scif_window *window, struct list_head *head)
-{
- struct scif_window *curr = NULL;
- struct scif_window *prev = list_entry(head, struct scif_window, list);
- struct list_head *item;
-
- INIT_LIST_HEAD(&window->list);
- /* Compare with tail and if the entry is new tail add it to the end */
- if (!list_empty(head)) {
- curr = list_entry(head->prev, struct scif_window, list);
- if (curr->va_for_temp < window->va_for_temp) {
- list_add_tail(&window->list, head);
- return;
- }
- }
- list_for_each(item, head) {
- curr = list_entry(item, struct scif_window, list);
- if (curr->va_for_temp > window->va_for_temp)
- break;
- prev = curr;
- }
- list_add(&window->list, &prev->list);
-}
-
-/*
- * scif_insert_window:
- *
- * Insert a window to the self registration list sorted by offset.
- * RMA lock must be held.
- */
-void scif_insert_window(struct scif_window *window, struct list_head *head)
-{
- struct scif_window *curr = NULL, *prev = NULL;
- struct list_head *item;
-
- INIT_LIST_HEAD(&window->list);
- list_for_each(item, head) {
- curr = list_entry(item, struct scif_window, list);
- if (curr->offset > window->offset)
- break;
- prev = curr;
- }
- if (!prev)
- list_add(&window->list, head);
- else
- list_add(&window->list, &prev->list);
- scif_set_window_ref(window, window->nr_pages);
-}
-
-/*
- * scif_query_tcw:
- *
- * Query the temp cached registration list of ep for an overlapping window
- * in case of permission mismatch, destroy the previous window. if permissions
- * match and overlap is partial, destroy the window but return the new range
- * RMA lock must be held.
- */
-int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *req)
-{
- struct list_head *item, *temp, *head = req->head;
- struct scif_window *window;
- u64 start_va_window, start_va_req = req->va_for_temp;
- u64 end_va_window, end_va_req = start_va_req + req->nr_bytes;
-
- if (!req->nr_bytes)
- return -EINVAL;
- /*
- * Avoid traversing the entire list to find out that there
- * is no entry that matches
- */
- if (!list_empty(head)) {
- window = list_last_entry(head, struct scif_window, list);
- end_va_window = window->va_for_temp +
- (window->nr_pages << PAGE_SHIFT);
- if (start_va_req > end_va_window)
- return -ENXIO;
- }
- list_for_each_safe(item, temp, head) {
- window = list_entry(item, struct scif_window, list);
- start_va_window = window->va_for_temp;
- end_va_window = window->va_for_temp +
- (window->nr_pages << PAGE_SHIFT);
- if (start_va_req < start_va_window &&
- end_va_req < start_va_window)
- break;
- if (start_va_req >= end_va_window)
- continue;
- if ((window->prot & req->prot) == req->prot) {
- if (start_va_req >= start_va_window &&
- end_va_req <= end_va_window) {
- *req->out_window = window;
- return 0;
- }
- /* expand window */
- if (start_va_req < start_va_window) {
- req->nr_bytes +=
- start_va_window - start_va_req;
- req->va_for_temp = start_va_window;
- }
- if (end_va_req >= end_va_window)
- req->nr_bytes += end_va_window - end_va_req;
- }
- /* Destroy the old window to create a new one */
- __scif_rma_destroy_tcw_helper(window);
- break;
- }
- return -ENXIO;
-}
-
-/*
- * scif_query_window:
- *
- * Query the registration list and check if a valid contiguous
- * range of windows exist.
- * RMA lock must be held.
- */
-int scif_query_window(struct scif_rma_req *req)
-{
- struct list_head *item;
- struct scif_window *window;
- s64 end_offset, offset = req->offset;
- u64 tmp_min, nr_bytes_left = req->nr_bytes;
-
- if (!req->nr_bytes)
- return -EINVAL;
-
- list_for_each(item, req->head) {
- window = list_entry(item, struct scif_window, list);
- end_offset = window->offset +
- (window->nr_pages << PAGE_SHIFT);
- if (offset < window->offset)
- /* Offset not found! */
- return -ENXIO;
- if (offset >= end_offset)
- continue;
- /* Check read/write protections. */
- if ((window->prot & req->prot) != req->prot)
- return -EPERM;
- if (nr_bytes_left == req->nr_bytes)
- /* Store the first window */
- *req->out_window = window;
- tmp_min = min((u64)end_offset - offset, nr_bytes_left);
- nr_bytes_left -= tmp_min;
- offset += tmp_min;
- /*
- * Range requested encompasses
- * multiple windows contiguously.
- */
- if (!nr_bytes_left) {
- /* Done for partial window */
- if (req->type == SCIF_WINDOW_PARTIAL ||
- req->type == SCIF_WINDOW_SINGLE)
- return 0;
- /* Extra logic for full windows */
- if (offset == end_offset)
- /* Spanning multiple whole windows */
- return 0;
- /* Not spanning multiple whole windows */
- return -ENXIO;
- }
- if (req->type == SCIF_WINDOW_SINGLE)
- break;
- }
- dev_err(scif_info.mdev.this_device,
- "%s %d ENXIO\n", __func__, __LINE__);
- return -ENXIO;
-}
-
-/*
- * scif_rma_list_unregister:
- *
- * Traverse the self registration list starting from window:
- * 1) Call scif_unregister_window(..)
- * RMA lock must be held.
- */
-int scif_rma_list_unregister(struct scif_window *window,
- s64 offset, int nr_pages)
-{
- struct scif_endpt *ep = (struct scif_endpt *)window->ep;
- struct list_head *head = &ep->rma_info.reg_list;
- s64 end_offset;
- int err = 0;
- int loop_nr_pages;
- struct scif_window *_window;
-
- list_for_each_entry_safe_from(window, _window, head, list) {
- end_offset = window->offset + (window->nr_pages << PAGE_SHIFT);
- loop_nr_pages = min((int)((end_offset - offset) >> PAGE_SHIFT),
- nr_pages);
- err = scif_unregister_window(window);
- if (err)
- return err;
- nr_pages -= loop_nr_pages;
- offset += (loop_nr_pages << PAGE_SHIFT);
- if (!nr_pages)
- break;
- }
- return 0;
-}
-
-/*
- * scif_unmap_all_window:
- *
- * Traverse all the windows in the self registration list and:
- * 1) Delete any DMA mappings created
- */
-void scif_unmap_all_windows(scif_epd_t epd)
-{
- struct list_head *item, *tmp;
- struct scif_window *window;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct list_head *head = &ep->rma_info.reg_list;
-
- mutex_lock(&ep->rma_info.rma_lock);
- list_for_each_safe(item, tmp, head) {
- window = list_entry(item, struct scif_window, list);
- scif_unmap_window(ep->remote_dev, window);
- }
- mutex_unlock(&ep->rma_info.rma_lock);
-}
-
-/*
- * scif_unregister_all_window:
- *
- * Traverse all the windows in the self registration list and:
- * 1) Call scif_unregister_window(..)
- * RMA lock must be held.
- */
-int scif_unregister_all_windows(scif_epd_t epd)
-{
- struct list_head *item, *tmp;
- struct scif_window *window;
- struct scif_endpt *ep = (struct scif_endpt *)epd;
- struct list_head *head = &ep->rma_info.reg_list;
- int err = 0;
-
- mutex_lock(&ep->rma_info.rma_lock);
-retry:
- item = NULL;
- tmp = NULL;
- list_for_each_safe(item, tmp, head) {
- window = list_entry(item, struct scif_window, list);
- ep->rma_info.async_list_del = 0;
- err = scif_unregister_window(window);
- if (err)
- dev_err(scif_info.mdev.this_device,
- "%s %d err %d\n",
- __func__, __LINE__, err);
- /*
- * Need to restart list traversal if there has been
- * an asynchronous list entry deletion.
- */
- if (READ_ONCE(ep->rma_info.async_list_del))
- goto retry;
- }
- mutex_unlock(&ep->rma_info.rma_lock);
- if (!list_empty(&ep->rma_info.mmn_list)) {
- spin_lock(&scif_info.rmalock);
- list_add_tail(&ep->mmu_list, &scif_info.mmu_notif_cleanup);
- spin_unlock(&scif_info.rmalock);
- schedule_work(&scif_info.mmu_notif_work);
- }
- return err;
-}
diff --git a/drivers/misc/mic/scif/scif_rma_list.h b/drivers/misc/mic/scif/scif_rma_list.h
deleted file mode 100644
index 0f8e0ed65614..000000000000
--- a/drivers/misc/mic/scif/scif_rma_list.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Intel SCIF driver.
- */
-#ifndef SCIF_RMA_LIST_H
-#define SCIF_RMA_LIST_H
-
-/*
- * struct scif_rma_req - Self Registration list RMA Request query
- *
- * @out_window - Returns the window if found
- * @offset: Starting offset
- * @nr_bytes: number of bytes
- * @prot: protection requested i.e. read or write or both
- * @type: Specify single, partial or multiple windows
- * @head: Head of list on which to search
- * @va_for_temp: VA for searching temporary cached windows
- */
-struct scif_rma_req {
- struct scif_window **out_window;
- union {
- s64 offset;
- unsigned long va_for_temp;
- };
- size_t nr_bytes;
- int prot;
- enum scif_window_type type;
- struct list_head *head;
-};
-
-/* Insert */
-void scif_insert_window(struct scif_window *window, struct list_head *head);
-void scif_insert_tcw(struct scif_window *window,
- struct list_head *head);
-/* Query */
-int scif_query_window(struct scif_rma_req *request);
-int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *request);
-/* Called from close to unregister all self windows */
-int scif_unregister_all_windows(scif_epd_t epd);
-void scif_unmap_all_windows(scif_epd_t epd);
-/* Traverse list and unregister */
-int scif_rma_list_unregister(struct scif_window *window, s64 offset,
- int nr_pages);
-#endif /* SCIF_RMA_LIST_H */
diff --git a/drivers/misc/mic/vop/Makefile b/drivers/misc/mic/vop/Makefile
deleted file mode 100644
index 579da3868c8e..000000000000
--- a/drivers/misc/mic/vop/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile - Intel MIC Linux driver.
-# Copyright(c) 2016, Intel Corporation.
-#
-obj-m := vop.o
-
-vop-objs += vop_main.o
-vop-objs += vop_debugfs.o
-vop-objs += vop_vringh.o
diff --git a/drivers/misc/mic/vop/vop_debugfs.c b/drivers/misc/mic/vop/vop_debugfs.c
deleted file mode 100644
index 9d4f175f4dd1..000000000000
--- a/drivers/misc/mic/vop/vop_debugfs.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Intel Virtio Over PCIe (VOP) driver.
- */
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include "vop_main.h"
-
-static int vop_dp_show(struct seq_file *s, void *pos)
-{
- struct mic_device_desc *d;
- struct mic_device_ctrl *dc;
- struct mic_vqconfig *vqconfig;
- __u32 *features;
- __u8 *config;
- struct vop_info *vi = s->private;
- struct vop_device *vpdev = vi->vpdev;
- struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
- int j, k;
-
- seq_printf(s, "Bootparam: magic 0x%x\n",
- bootparam->magic);
- seq_printf(s, "Bootparam: h2c_config_db %d\n",
- bootparam->h2c_config_db);
- seq_printf(s, "Bootparam: node_id %d\n",
- bootparam->node_id);
- seq_printf(s, "Bootparam: c2h_scif_db %d\n",
- bootparam->c2h_scif_db);
- seq_printf(s, "Bootparam: h2c_scif_db %d\n",
- bootparam->h2c_scif_db);
- seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
- bootparam->scif_host_dma_addr);
- seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
- bootparam->scif_card_dma_addr);
-
- for (j = sizeof(*bootparam);
- j < MIC_DP_SIZE; j += mic_total_desc_size(d)) {
- d = (void *)bootparam + j;
- dc = (void *)d + mic_aligned_desc_size(d);
-
- /* end of list */
- if (d->type == 0)
- break;
-
- if (d->type == -1)
- continue;
-
- seq_printf(s, "Type %d ", d->type);
- seq_printf(s, "Num VQ %d ", d->num_vq);
- seq_printf(s, "Feature Len %d\n", d->feature_len);
- seq_printf(s, "Config Len %d ", d->config_len);
- seq_printf(s, "Shutdown Status %d\n", d->status);
-
- for (k = 0; k < d->num_vq; k++) {
- vqconfig = mic_vq_config(d) + k;
- seq_printf(s, "vqconfig[%d]: ", k);
- seq_printf(s, "address 0x%llx ",
- vqconfig->address);
- seq_printf(s, "num %d ", vqconfig->num);
- seq_printf(s, "used address 0x%llx\n",
- vqconfig->used_address);
- }
-
- features = (__u32 *)mic_vq_features(d);
- seq_printf(s, "Features: Host 0x%x ", features[0]);
- seq_printf(s, "Guest 0x%x\n", features[1]);
-
- config = mic_vq_configspace(d);
- for (k = 0; k < d->config_len; k++)
- seq_printf(s, "config[%d]=%d\n", k, config[k]);
-
- seq_puts(s, "Device control:\n");
- seq_printf(s, "Config Change %d ", dc->config_change);
- seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
- seq_printf(s, "Guest Ack %d ", dc->guest_ack);
- seq_printf(s, "Host ack %d\n", dc->host_ack);
- seq_printf(s, "Used address updated %d ",
- dc->used_address_updated);
- seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
- seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
- seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
- }
- schedule_work(&vi->hotplug_work);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(vop_dp);
-
-static int vop_vdev_info_show(struct seq_file *s, void *unused)
-{
- struct vop_info *vi = s->private;
- struct list_head *pos, *tmp;
- struct vop_vdev *vdev;
- int i, j;
-
- mutex_lock(&vi->vop_mutex);
- list_for_each_safe(pos, tmp, &vi->vdev_list) {
- vdev = list_entry(pos, struct vop_vdev, list);
- seq_printf(s, "VDEV type %d state %s in %ld out %ld in_dma %ld out_dma %ld\n",
- vdev->virtio_id,
- vop_vdevup(vdev) ? "UP" : "DOWN",
- vdev->in_bytes,
- vdev->out_bytes,
- vdev->in_bytes_dma,
- vdev->out_bytes_dma);
- for (i = 0; i < MIC_MAX_VRINGS; i++) {
- struct vring_desc *desc;
- struct vring_avail *avail;
- struct vring_used *used;
- struct vop_vringh *vvr = &vdev->vvr[i];
- struct vringh *vrh = &vvr->vrh;
- int num = vrh->vring.num;
-
- if (!num)
- continue;
- desc = vrh->vring.desc;
- seq_printf(s, "vring i %d avail_idx %d",
- i, vvr->vring.info->avail_idx & (num - 1));
- seq_printf(s, " vring i %d avail_idx %d\n",
- i, vvr->vring.info->avail_idx);
- seq_printf(s, "vrh i %d weak_barriers %d",
- i, vrh->weak_barriers);
- seq_printf(s, " last_avail_idx %d last_used_idx %d",
- vrh->last_avail_idx, vrh->last_used_idx);
- seq_printf(s, " completed %d\n", vrh->completed);
- for (j = 0; j < num; j++) {
- seq_printf(s, "desc[%d] addr 0x%llx len %d",
- j, desc->addr, desc->len);
- seq_printf(s, " flags 0x%x next %d\n",
- desc->flags, desc->next);
- desc++;
- }
- avail = vrh->vring.avail;
- seq_printf(s, "avail flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, avail->flags),
- vringh16_to_cpu(vrh,
- avail->idx) & (num - 1));
- seq_printf(s, "avail flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, avail->flags),
- vringh16_to_cpu(vrh, avail->idx));
- for (j = 0; j < num; j++)
- seq_printf(s, "avail ring[%d] %d\n",
- j, avail->ring[j]);
- used = vrh->vring.used;
- seq_printf(s, "used flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, used->flags),
- vringh16_to_cpu(vrh, used->idx) & (num - 1));
- seq_printf(s, "used flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, used->flags),
- vringh16_to_cpu(vrh, used->idx));
- for (j = 0; j < num; j++)
- seq_printf(s, "used ring[%d] id %d len %d\n",
- j, vringh32_to_cpu(vrh,
- used->ring[j].id),
- vringh32_to_cpu(vrh,
- used->ring[j].len));
- }
- }
- mutex_unlock(&vi->vop_mutex);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(vop_vdev_info);
-
-void vop_init_debugfs(struct vop_info *vi)
-{
- char name[16];
-
- snprintf(name, sizeof(name), "%s%d", KBUILD_MODNAME, vi->vpdev->dnode);
- vi->dbg = debugfs_create_dir(name, NULL);
- debugfs_create_file("dp", 0444, vi->dbg, vi, &vop_dp_fops);
- debugfs_create_file("vdev_info", 0444, vi->dbg, vi, &vop_vdev_info_fops);
-}
-
-void vop_exit_debugfs(struct vop_info *vi)
-{
- debugfs_remove_recursive(vi->dbg);
-}
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
deleted file mode 100644
index 55e7f21e51f4..000000000000
--- a/drivers/misc/mic/vop/vop_main.c
+++ /dev/null
@@ -1,783 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Adapted from:
- *
- * virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * Intel Virtio Over PCIe (VOP) driver.
- */
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/dma-mapping.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-
-#include "vop_main.h"
-
-#define VOP_MAX_VRINGS 4
-
-/*
- * _vop_vdev - Allocated per virtio device instance injected by the peer.
- *
- * @vdev: Virtio device
- * @desc: Virtio device page descriptor
- * @dc: Virtio device control
- * @vpdev: VOP device which is the parent for this virtio device
- * @vr: Buffer for accessing the VRING
- * @used_virt: Virtual address of used ring
- * @used: DMA address of used ring
- * @used_size: Size of the used buffer
- * @reset_done: Track whether VOP reset is complete
- * @virtio_cookie: Cookie returned upon requesting a interrupt
- * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
- * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
- * @dnode: The destination node
- */
-struct _vop_vdev {
- struct virtio_device vdev;
- struct mic_device_desc __iomem *desc;
- struct mic_device_ctrl __iomem *dc;
- struct vop_device *vpdev;
- void __iomem *vr[VOP_MAX_VRINGS];
- void *used_virt[VOP_MAX_VRINGS];
- dma_addr_t used[VOP_MAX_VRINGS];
- int used_size[VOP_MAX_VRINGS];
- struct completion reset_done;
- struct mic_irq *virtio_cookie;
- int c2h_vdev_db;
- int h2c_vdev_db;
- int dnode;
-};
-
-#define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
-
-#define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
-
-/* Helper API to obtain the parent of the virtio device */
-static inline struct device *_vop_dev(struct _vop_vdev *vdev)
-{
- return vdev->vdev.dev.parent;
-}
-
-static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc)
-{
- return sizeof(*desc)
- + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
- + ioread8(&desc->feature_len) * 2
- + ioread8(&desc->config_len);
-}
-
-static inline struct mic_vqconfig __iomem *
-_vop_vq_config(struct mic_device_desc __iomem *desc)
-{
- return (struct mic_vqconfig __iomem *)(desc + 1);
-}
-
-static inline u8 __iomem *
-_vop_vq_features(struct mic_device_desc __iomem *desc)
-{
- return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq));
-}
-
-static inline u8 __iomem *
-_vop_vq_configspace(struct mic_device_desc __iomem *desc)
-{
- return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2;
-}
-
-static inline unsigned
-_vop_total_desc_size(struct mic_device_desc __iomem *desc)
-{
- return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
-}
-
-/* This gets the device's feature bits. */
-static u64 vop_get_features(struct virtio_device *vdev)
-{
- unsigned int i, bits;
- u64 features = 0;
- struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
- u8 __iomem *in_features = _vop_vq_features(desc);
- int feature_len = ioread8(&desc->feature_len);
-
- bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
- for (i = 0; i < bits; i++)
- if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
- features |= BIT_ULL(i);
-
- return features;
-}
-
-static void vop_transport_features(struct virtio_device *vdev)
-{
- /*
- * Packed ring isn't enabled on virtio_vop for now,
- * because virtio_vop uses vring_new_virtqueue() which
- * creates virtio rings on preallocated memory.
- */
- __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
-}
-
-static int vop_finalize_features(struct virtio_device *vdev)
-{
- unsigned int i, bits;
- struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
- u8 feature_len = ioread8(&desc->feature_len);
- /* Second half of bitmap is features we accept. */
- u8 __iomem *out_features =
- _vop_vq_features(desc) + feature_len;
-
- /* Give virtio_ring a chance to accept features. */
- vring_transport_features(vdev);
-
- /* Give virtio_vop a chance to accept features. */
- vop_transport_features(vdev);
-
- memset_io(out_features, 0, feature_len);
- bits = min_t(unsigned, feature_len,
- sizeof(vdev->features)) * 8;
- for (i = 0; i < bits; i++) {
- if (__virtio_test_bit(vdev, i))
- iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
- &out_features[i / 8]);
- }
- return 0;
-}
-
-/*
- * Reading and writing elements in config space
- */
-static void vop_get(struct virtio_device *vdev, unsigned int offset,
- void *buf, unsigned len)
-{
- struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
-
- if (offset + len > ioread8(&desc->config_len))
- return;
- memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len);
-}
-
-static void vop_set(struct virtio_device *vdev, unsigned int offset,
- const void *buf, unsigned len)
-{
- struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
-
- if (offset + len > ioread8(&desc->config_len))
- return;
- memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len);
-}
-
-/*
- * The operations to get and set the status word just access the status
- * field of the device descriptor. set_status also interrupts the host
- * to tell about status changes.
- */
-static u8 vop_get_status(struct virtio_device *vdev)
-{
- return ioread8(&to_vopvdev(vdev)->desc->status);
-}
-
-static void vop_set_status(struct virtio_device *dev, u8 status)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
- struct vop_device *vpdev = vdev->vpdev;
-
- if (!status)
- return;
- iowrite8(status, &vdev->desc->status);
- vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
-}
-
-/* Inform host on a virtio device reset and wait for ack from host */
-static void vop_reset_inform_host(struct virtio_device *dev)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
- struct mic_device_ctrl __iomem *dc = vdev->dc;
- struct vop_device *vpdev = vdev->vpdev;
- int retry;
-
- iowrite8(0, &dc->host_ack);
- iowrite8(1, &dc->vdev_reset);
- vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
-
- /* Wait till host completes all card accesses and acks the reset */
- for (retry = 100; retry--;) {
- if (ioread8(&dc->host_ack))
- break;
- msleep(100);
- }
-
- dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
-
- /* Reset status to 0 in case we timed out */
- iowrite8(0, &vdev->desc->status);
-}
-
-static void vop_reset(struct virtio_device *dev)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
-
- dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n",
- __func__, dev->id.device);
-
- vop_reset_inform_host(dev);
- complete_all(&vdev->reset_done);
-}
-
-/*
- * The virtio_ring code calls this API when it wants to notify the Host.
- */
-static bool vop_notify(struct virtqueue *vq)
-{
- struct _vop_vdev *vdev = vq->priv;
- struct vop_device *vpdev = vdev->vpdev;
-
- vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
- return true;
-}
-
-static void vop_del_vq(struct virtqueue *vq, int n)
-{
- struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
- struct vop_device *vpdev = vdev->vpdev;
-
- dma_unmap_single(&vpdev->dev, vdev->used[n],
- vdev->used_size[n], DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vdev->used_virt[n],
- get_order(vdev->used_size[n]));
- vring_del_virtqueue(vq);
- vpdev->hw_ops->unmap(vpdev, vdev->vr[n]);
- vdev->vr[n] = NULL;
-}
-
-static void vop_del_vqs(struct virtio_device *dev)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
- struct virtqueue *vq, *n;
- int idx = 0;
-
- dev_dbg(_vop_dev(vdev), "%s\n", __func__);
-
- list_for_each_entry_safe(vq, n, &dev->vqs, list)
- vop_del_vq(vq, idx++);
-}
-
-static struct virtqueue *vop_new_virtqueue(unsigned int index,
- unsigned int num,
- struct virtio_device *vdev,
- bool context,
- void *pages,
- bool (*notify)(struct virtqueue *vq),
- void (*callback)(struct virtqueue *vq),
- const char *name,
- void *used)
-{
- bool weak_barriers = false;
- struct vring vring;
-
- vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN);
- vring.used = used;
-
- return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
-}
-
-/*
- * This routine will assign vring's allocated in host/io memory. Code in
- * virtio_ring.c however continues to access this io memory as if it were local
- * memory without io accessors.
- */
-static struct virtqueue *vop_find_vq(struct virtio_device *dev,
- unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name, bool ctx)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
- struct vop_device *vpdev = vdev->vpdev;
- struct mic_vqconfig __iomem *vqconfig;
- struct mic_vqconfig config;
- struct virtqueue *vq;
- void __iomem *va;
- struct _mic_vring_info __iomem *info;
- void *used;
- int vr_size, _vr_size, err, magic;
- u8 type = ioread8(&vdev->desc->type);
-
- if (index >= ioread8(&vdev->desc->num_vq))
- return ERR_PTR(-ENOENT);
-
- if (!name)
- return ERR_PTR(-ENOENT);
-
- /* First assign the vring's allocated in host memory */
- vqconfig = _vop_vq_config(vdev->desc) + index;
- memcpy_fromio(&config, vqconfig, sizeof(config));
- _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
- vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
- va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
- if (!va)
- return ERR_PTR(-ENOMEM);
- vdev->vr[index] = va;
- memset_io(va, 0x0, _vr_size);
-
- info = va + _vr_size;
- magic = ioread32(&info->magic);
-
- if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
- err = -EIO;
- goto unmap;
- }
-
- vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
- sizeof(struct vring_used_elem) *
- le16_to_cpu(config.num));
- used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(vdev->used_size[index]));
- vdev->used_virt[index] = used;
- if (!used) {
- err = -ENOMEM;
- dev_err(_vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto unmap;
- }
-
- vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx,
- (void __force *)va, vop_notify, callback,
- name, used);
- if (!vq) {
- err = -ENOMEM;
- goto free_used;
- }
-
- vdev->used[index] = dma_map_single(&vpdev->dev, used,
- vdev->used_size[index],
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
- err = -ENOMEM;
- dev_err(_vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto del_vq;
- }
- writeq(vdev->used[index], &vqconfig->used_address);
-
- vq->priv = vdev;
- return vq;
-del_vq:
- vring_del_virtqueue(vq);
-free_used:
- free_pages((unsigned long)used,
- get_order(vdev->used_size[index]));
-unmap:
- vpdev->hw_ops->unmap(vpdev, vdev->vr[index]);
- return ERR_PTR(err);
-}
-
-static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
- struct irq_affinity *desc)
-{
- struct _vop_vdev *vdev = to_vopvdev(dev);
- struct vop_device *vpdev = vdev->vpdev;
- struct mic_device_ctrl __iomem *dc = vdev->dc;
- int i, err, retry, queue_idx = 0;
-
- /* We must have this many virtqueues. */
- if (nvqs > ioread8(&vdev->desc->num_vq))
- return -ENOENT;
-
- for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
- vqs[i] = NULL;
- continue;
- }
-
- dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
- __func__, i, names[i]);
- vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
- ctx ? ctx[i] : false);
- if (IS_ERR(vqs[i])) {
- err = PTR_ERR(vqs[i]);
- goto error;
- }
- }
-
- iowrite8(1, &dc->used_address_updated);
- /*
- * Send an interrupt to the host to inform it that used
- * rings have been re-assigned.
- */
- vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
- for (retry = 100; --retry;) {
- if (!ioread8(&dc->used_address_updated))
- break;
- msleep(100);
- }
-
- dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
- if (!retry) {
- err = -ENODEV;
- goto error;
- }
-
- return 0;
-error:
- vop_del_vqs(dev);
- return err;
-}
-
-/*
- * The config ops structure as defined by virtio config
- */
-static const struct virtio_config_ops vop_vq_config_ops = {
- .get_features = vop_get_features,
- .finalize_features = vop_finalize_features,
- .get = vop_get,
- .set = vop_set,
- .get_status = vop_get_status,
- .set_status = vop_set_status,
- .reset = vop_reset,
- .find_vqs = vop_find_vqs,
- .del_vqs = vop_del_vqs,
-};
-
-static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
-{
- struct _vop_vdev *vdev = data;
- struct vop_device *vpdev = vdev->vpdev;
- struct virtqueue *vq;
-
- vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db);
- list_for_each_entry(vq, &vdev->vdev.vqs, list)
- vring_interrupt(0, vq);
-
- return IRQ_HANDLED;
-}
-
-static void vop_virtio_release_dev(struct device *_d)
-{
- struct virtio_device *vdev =
- container_of(_d, struct virtio_device, dev);
- struct _vop_vdev *vop_vdev =
- container_of(vdev, struct _vop_vdev, vdev);
-
- kfree(vop_vdev);
-}
-
-/*
- * adds a new device and register it with virtio
- * appropriate drivers are loaded by the device model
- */
-static int _vop_add_device(struct mic_device_desc __iomem *d,
- unsigned int offset, struct vop_device *vpdev,
- int dnode)
-{
- struct _vop_vdev *vdev, *reg_dev = NULL;
- int ret;
- u8 type = ioread8(&d->type);
-
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
-
- vdev->vpdev = vpdev;
- vdev->vdev.dev.parent = &vpdev->dev;
- vdev->vdev.dev.release = vop_virtio_release_dev;
- vdev->vdev.id.device = type;
- vdev->vdev.config = &vop_vq_config_ops;
- vdev->desc = d;
- vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
- vdev->dnode = dnode;
- vdev->vdev.priv = (void *)(unsigned long)dnode;
- init_completion(&vdev->reset_done);
-
- vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
- vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
- vop_virtio_intr_handler, "virtio intr",
- vdev, vdev->h2c_vdev_db);
- if (IS_ERR(vdev->virtio_cookie)) {
- ret = PTR_ERR(vdev->virtio_cookie);
- goto kfree;
- }
- iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db);
- vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
-
- ret = register_virtio_device(&vdev->vdev);
- reg_dev = vdev;
- if (ret) {
- dev_err(_vop_dev(vdev),
- "Failed to register vop device %u type %u\n",
- offset, type);
- goto free_irq;
- }
- writeq((unsigned long)vdev, &vdev->dc->vdev);
- dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
- __func__, offset, type, vdev);
-
- return 0;
-
-free_irq:
- vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
-kfree:
- if (reg_dev)
- put_device(&vdev->vdev.dev);
- else
- kfree(vdev);
- return ret;
-}
-
-/*
- * match for a vop device with a specific desc pointer
- */
-static int vop_match_desc(struct device *dev, void *data)
-{
- struct virtio_device *_dev = dev_to_virtio(dev);
- struct _vop_vdev *vdev = to_vopvdev(_dev);
-
- return vdev->desc == (void __iomem *)data;
-}
-
-static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl __iomem *dc)
-{
- return (struct _vop_vdev *)(unsigned long)readq(&dc->vdev);
-}
-
-static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
- unsigned int offset,
- struct vop_device *vpdev)
-{
- struct mic_device_ctrl __iomem *dc
- = (void __iomem *)d + _vop_aligned_desc_size(d);
- struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
-
- if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
- return;
-
- dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__);
- virtio_config_changed(&vdev->vdev);
- iowrite8(1, &dc->guest_ack);
-}
-
-/*
- * removes a virtio device if a hot remove event has been
- * requested by the host.
- */
-static int _vop_remove_device(struct mic_device_desc __iomem *d,
- unsigned int offset, struct vop_device *vpdev)
-{
- struct mic_device_ctrl __iomem *dc
- = (void __iomem *)d + _vop_aligned_desc_size(d);
- struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
- u8 status;
- int ret = -1;
-
- if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
- struct device *dev = get_device(&vdev->vdev.dev);
-
- dev_dbg(&vpdev->dev,
- "%s %d config_change %d type %d vdev %p\n",
- __func__, __LINE__,
- ioread8(&dc->config_change), ioread8(&d->type), vdev);
- status = ioread8(&d->status);
- reinit_completion(&vdev->reset_done);
- unregister_virtio_device(&vdev->vdev);
- vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
- iowrite8(-1, &dc->h2c_vdev_db);
- if (status & VIRTIO_CONFIG_S_DRIVER_OK)
- wait_for_completion(&vdev->reset_done);
- put_device(dev);
- iowrite8(1, &dc->guest_ack);
- dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
- __func__, __LINE__, ioread8(&dc->guest_ack));
- iowrite8(-1, &d->type);
- ret = 0;
- }
- return ret;
-}
-
-#define REMOVE_DEVICES true
-
-static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
- bool remove, int dnode)
-{
- s8 type;
- unsigned int i;
- struct mic_device_desc __iomem *d;
- struct mic_device_ctrl __iomem *dc;
- struct device *dev;
-
- for (i = sizeof(struct mic_bootparam);
- i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
- d = dp + i;
- dc = (void __iomem *)d + _vop_aligned_desc_size(d);
- /*
- * This read barrier is paired with the corresponding write
- * barrier on the host which is inserted before adding or
- * removing a virtio device descriptor, by updating the type.
- */
- rmb();
- type = ioread8(&d->type);
-
- /* end of list */
- if (type == 0)
- break;
-
- if (type == -1)
- continue;
-
- /* device already exists */
- dev = device_find_child(&vpdev->dev, (void __force *)d,
- vop_match_desc);
- if (dev) {
- if (remove)
- iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
- &dc->config_change);
- put_device(dev);
- _vop_handle_config_change(d, i, vpdev);
- _vop_remove_device(d, i, vpdev);
- if (remove) {
- iowrite8(0, &dc->config_change);
- iowrite8(0, &dc->guest_ack);
- }
- continue;
- }
-
- /* new device */
- dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n",
- __func__, __LINE__, d);
- if (!remove)
- _vop_add_device(d, i, vpdev, dnode);
- }
-}
-
-static void vop_scan_devices(struct vop_info *vi,
- struct vop_device *vpdev, bool remove)
-{
- void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev);
-
- if (!dp)
- return;
- mutex_lock(&vi->vop_mutex);
- _vop_scan_devices(dp, vpdev, remove, vpdev->dnode);
- mutex_unlock(&vi->vop_mutex);
-}
-
-/*
- * vop_hotplug_device tries to find changes in the device page.
- */
-static void vop_hotplug_devices(struct work_struct *work)
-{
- struct vop_info *vi = container_of(work, struct vop_info,
- hotplug_work);
-
- vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES);
-}
-
-/*
- * Interrupt handler for hot plug/config changes etc.
- */
-static irqreturn_t vop_extint_handler(int irq, void *data)
-{
- struct vop_info *vi = data;
- struct mic_bootparam __iomem *bp;
- struct vop_device *vpdev = vi->vpdev;
-
- bp = vpdev->hw_ops->get_remote_dp(vpdev);
- dev_dbg(&vpdev->dev, "%s %d hotplug work\n",
- __func__, __LINE__);
- vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db));
- schedule_work(&vi->hotplug_work);
- return IRQ_HANDLED;
-}
-
-static int vop_driver_probe(struct vop_device *vpdev)
-{
- struct vop_info *vi;
- int rc;
-
- vi = kzalloc(sizeof(*vi), GFP_KERNEL);
- if (!vi) {
- rc = -ENOMEM;
- goto exit;
- }
- dev_set_drvdata(&vpdev->dev, vi);
- vi->vpdev = vpdev;
-
- mutex_init(&vi->vop_mutex);
- INIT_WORK(&vi->hotplug_work, vop_hotplug_devices);
- if (vpdev->dnode) {
- rc = vop_host_init(vi);
- if (rc < 0)
- goto free;
- } else {
- struct mic_bootparam __iomem *bootparam;
-
- vop_scan_devices(vi, vpdev, !REMOVE_DEVICES);
-
- vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev);
- vi->cookie = vpdev->hw_ops->request_irq(vpdev,
- vop_extint_handler,
- "virtio_config_intr",
- vi, vi->h2c_config_db);
- if (IS_ERR(vi->cookie)) {
- rc = PTR_ERR(vi->cookie);
- goto free;
- }
- bootparam = vpdev->hw_ops->get_remote_dp(vpdev);
- iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db);
- }
- vop_init_debugfs(vi);
- return 0;
-free:
- kfree(vi);
-exit:
- return rc;
-}
-
-static void vop_driver_remove(struct vop_device *vpdev)
-{
- struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
-
- if (vpdev->dnode) {
- vop_host_uninit(vi);
- } else {
- struct mic_bootparam __iomem *bootparam =
- vpdev->hw_ops->get_remote_dp(vpdev);
- if (bootparam)
- iowrite8(-1, &bootparam->h2c_config_db);
- vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi);
- flush_work(&vi->hotplug_work);
- vop_scan_devices(vi, vpdev, REMOVE_DEVICES);
- }
- vop_exit_debugfs(vi);
- kfree(vi);
-}
-
-static const struct vop_device_id id_table[] = {
- { VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
- { 0 },
-};
-
-static struct vop_driver vop_driver = {
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .probe = vop_driver_probe,
- .remove = vop_driver_remove,
-};
-
-module_vop_driver(vop_driver);
-
-MODULE_DEVICE_TABLE(mbus, id_table);
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/vop/vop_main.h b/drivers/misc/mic/vop/vop_main.h
deleted file mode 100644
index 2451d9218137..000000000000
--- a/drivers/misc/mic/vop/vop_main.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Intel Virtio Over PCIe (VOP) driver.
- */
-#ifndef _VOP_MAIN_H_
-#define _VOP_MAIN_H_
-
-#include <linux/vringh.h>
-#include <linux/virtio_config.h>
-#include <linux/virtio.h>
-#include <linux/miscdevice.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-
-#include "../bus/vop_bus.h"
-
-/*
- * Note on endianness.
- * 1. Host can be both BE or LE
- * 2. Guest/card is LE. Host uses le_to_cpu to access desc/avail
- * rings and ioreadXX/iowriteXX to access used ring.
- * 3. Device page exposed by host to guest contains LE values. Guest
- * accesses these using ioreadXX/iowriteXX etc. This way in general we
- * obey the virtio spec according to which guest works with native
- * endianness and host is aware of guest endianness and does all
- * required endianness conversion.
- * 4. Data provided from user space to guest (in ADD_DEVICE and
- * CONFIG_CHANGE ioctl's) is not interpreted by the driver and should be
- * in guest endianness.
- */
-
-/*
- * vop_info - Allocated per invocation of VOP probe
- *
- * @vpdev: VOP device
- * @hotplug_work: Handle virtio device creation, deletion and configuration
- * @cookie: Cookie received upon requesting a virtio configuration interrupt
- * @h2c_config_db: The doorbell used by the peer to indicate a config change
- * @vdev_list: List of "active" virtio devices injected in the peer node
- * @vop_mutex: Synchronize access to the device page as well as serialize
- * creation/deletion of virtio devices on the peer node
- * @dp: Peer device page information
- * @dbg: Debugfs entry
- * @dma_ch: The DMA channel used by this transport for data transfers.
- * @name: Name for this transport used in misc device creation.
- * @miscdev: The misc device registered.
- */
-struct vop_info {
- struct vop_device *vpdev;
- struct work_struct hotplug_work;
- struct mic_irq *cookie;
- int h2c_config_db;
- struct list_head vdev_list;
- struct mutex vop_mutex;
- void __iomem *dp;
- struct dentry *dbg;
- struct dma_chan *dma_ch;
- char name[16];
- struct miscdevice miscdev;
-};
-
-/**
- * struct vop_vringh - Virtio ring host information.
- *
- * @vring: The VOP vring used for setting up user space mappings.
- * @vrh: The host VRINGH used for accessing the card vrings.
- * @riov: The VRINGH read kernel IOV.
- * @wiov: The VRINGH write kernel IOV.
- * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
- * @vr_mutex: Mutex for synchronizing access to the VRING.
- * @buf: Temporary kernel buffer used to copy in/out data
- * from/to the card via DMA.
- * @buf_da: dma address of buf.
- * @vdev: Back pointer to VOP virtio device for vringh_notify(..).
- */
-struct vop_vringh {
- struct mic_vring vring;
- struct vringh vrh;
- struct vringh_kiov riov;
- struct vringh_kiov wiov;
- u16 head;
- struct mutex vr_mutex;
- void *buf;
- dma_addr_t buf_da;
- struct vop_vdev *vdev;
-};
-
-/**
- * struct vop_vdev - Host information for a card Virtio device.
- *
- * @virtio_id - Virtio device id.
- * @waitq - Waitqueue to allow ring3 apps to poll.
- * @vpdev - pointer to VOP bus device.
- * @poll_wake - Used for waking up threads blocked in poll.
- * @out_bytes - Debug stats for number of bytes copied from host to card.
- * @in_bytes - Debug stats for number of bytes copied from card to host.
- * @out_bytes_dma - Debug stats for number of bytes copied from host to card
- * using DMA.
- * @in_bytes_dma - Debug stats for number of bytes copied from card to host
- * using DMA.
- * @tx_len_unaligned - Debug stats for number of bytes copied to the card where
- * the transfer length did not have the required DMA alignment.
- * @tx_dst_unaligned - Debug stats for number of bytes copied where the
- * destination address on the card did not have the required DMA alignment.
- * @vvr - Store per VRING data structures.
- * @virtio_bh_work - Work struct used to schedule virtio bottom half handling.
- * @dd - Virtio device descriptor.
- * @dc - Virtio device control fields.
- * @list - List of Virtio devices.
- * @virtio_db - The doorbell used by the card to interrupt the host.
- * @virtio_cookie - The cookie returned while requesting interrupts.
- * @vi: Transport information.
- * @vdev_mutex: Mutex synchronizing virtio device injection,
- * removal and data transfers.
- * @destroy: Track if a virtio device is being destroyed.
- * @deleted: The virtio device has been deleted.
- */
-struct vop_vdev {
- int virtio_id;
- wait_queue_head_t waitq;
- struct vop_device *vpdev;
- int poll_wake;
- unsigned long out_bytes;
- unsigned long in_bytes;
- unsigned long out_bytes_dma;
- unsigned long in_bytes_dma;
- unsigned long tx_len_unaligned;
- unsigned long tx_dst_unaligned;
- unsigned long rx_dst_unaligned;
- struct vop_vringh vvr[MIC_MAX_VRINGS];
- struct work_struct virtio_bh_work;
- struct mic_device_desc *dd;
- struct mic_device_ctrl *dc;
- struct list_head list;
- int virtio_db;
- struct mic_irq *virtio_cookie;
- struct vop_info *vi;
- struct mutex vdev_mutex;
- struct completion destroy;
- bool deleted;
-};
-
-/* Helper API to check if a virtio device is running */
-static inline bool vop_vdevup(struct vop_vdev *vdev)
-{
- return !!vdev->dd->status;
-}
-
-void vop_init_debugfs(struct vop_info *vi);
-void vop_exit_debugfs(struct vop_info *vi);
-int vop_host_init(struct vop_info *vi);
-void vop_host_uninit(struct vop_info *vi);
-#endif
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
deleted file mode 100644
index 30eac172f017..000000000000
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ /dev/null
@@ -1,1158 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Intel Virtio Over PCIe (VOP) driver.
- */
-#include <linux/sched.h>
-#include <linux/poll.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-
-#include <linux/mic_ioctl.h>
-#include "vop_main.h"
-
-/* Helper API to obtain the VOP PCIe device */
-static inline struct device *vop_dev(struct vop_vdev *vdev)
-{
- return vdev->vpdev->dev.parent;
-}
-
-/* Helper API to check if a virtio device is initialized */
-static inline int vop_vdev_inited(struct vop_vdev *vdev)
-{
- if (!vdev)
- return -EINVAL;
- /* Device has not been created yet */
- if (!vdev->dd || !vdev->dd->type) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, -EINVAL);
- return -EINVAL;
- }
- /* Device has been removed/deleted */
- if (vdev->dd->type == -1) {
- dev_dbg(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, -ENODEV);
- return -ENODEV;
- }
- return 0;
-}
-
-static void _vop_notify(struct vringh *vrh)
-{
- struct vop_vringh *vvrh = container_of(vrh, struct vop_vringh, vrh);
- struct vop_vdev *vdev = vvrh->vdev;
- struct vop_device *vpdev = vdev->vpdev;
- s8 db = vdev->dc->h2c_vdev_db;
-
- if (db != -1)
- vpdev->hw_ops->send_intr(vpdev, db);
-}
-
-static void vop_virtio_init_post(struct vop_vdev *vdev)
-{
- struct mic_vqconfig *vqconfig = mic_vq_config(vdev->dd);
- struct vop_device *vpdev = vdev->vpdev;
- int i, used_size;
-
- for (i = 0; i < vdev->dd->num_vq; i++) {
- used_size = PAGE_ALIGN(sizeof(u16) * 3 +
- sizeof(struct vring_used_elem) *
- le16_to_cpu(vqconfig->num));
- if (!le64_to_cpu(vqconfig[i].used_address)) {
- dev_warn(vop_dev(vdev), "used_address zero??\n");
- continue;
- }
- vdev->vvr[i].vrh.vring.used =
- (void __force *)vpdev->hw_ops->remap(
- vpdev,
- le64_to_cpu(vqconfig[i].used_address),
- used_size);
- }
-
- vdev->dc->used_address_updated = 0;
-
- dev_info(vop_dev(vdev), "%s: device type %d LINKUP\n",
- __func__, vdev->virtio_id);
-}
-
-static inline void vop_virtio_device_reset(struct vop_vdev *vdev)
-{
- int i;
-
- dev_dbg(vop_dev(vdev), "%s: status %d device type %d RESET\n",
- __func__, vdev->dd->status, vdev->virtio_id);
-
- for (i = 0; i < vdev->dd->num_vq; i++)
- /*
- * Avoid lockdep false positive. The + 1 is for the vop
- * mutex which is held in the reset devices code path.
- */
- mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
-
- /* 0 status means "reset" */
- vdev->dd->status = 0;
- vdev->dc->vdev_reset = 0;
- vdev->dc->host_ack = 1;
-
- for (i = 0; i < vdev->dd->num_vq; i++) {
- struct vringh *vrh = &vdev->vvr[i].vrh;
-
- vdev->vvr[i].vring.info->avail_idx = 0;
- vrh->completed = 0;
- vrh->last_avail_idx = 0;
- vrh->last_used_idx = 0;
- }
-
- for (i = 0; i < vdev->dd->num_vq; i++)
- mutex_unlock(&vdev->vvr[i].vr_mutex);
-}
-
-static void vop_virtio_reset_devices(struct vop_info *vi)
-{
- struct list_head *pos, *tmp;
- struct vop_vdev *vdev;
-
- list_for_each_safe(pos, tmp, &vi->vdev_list) {
- vdev = list_entry(pos, struct vop_vdev, list);
- vop_virtio_device_reset(vdev);
- vdev->poll_wake = 1;
- wake_up(&vdev->waitq);
- }
-}
-
-static void vop_bh_handler(struct work_struct *work)
-{
- struct vop_vdev *vdev = container_of(work, struct vop_vdev,
- virtio_bh_work);
-
- if (vdev->dc->used_address_updated)
- vop_virtio_init_post(vdev);
-
- if (vdev->dc->vdev_reset)
- vop_virtio_device_reset(vdev);
-
- vdev->poll_wake = 1;
- wake_up(&vdev->waitq);
-}
-
-static irqreturn_t _vop_virtio_intr_handler(int irq, void *data)
-{
- struct vop_vdev *vdev = data;
- struct vop_device *vpdev = vdev->vpdev;
-
- vpdev->hw_ops->ack_interrupt(vpdev, vdev->virtio_db);
- schedule_work(&vdev->virtio_bh_work);
- return IRQ_HANDLED;
-}
-
-static int vop_virtio_config_change(struct vop_vdev *vdev, void *argp)
-{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
- int ret = 0, retry, i;
- struct vop_device *vpdev = vdev->vpdev;
- struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
- struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
- s8 db = bootparam->h2c_config_db;
-
- mutex_lock(&vi->vop_mutex);
- for (i = 0; i < vdev->dd->num_vq; i++)
- mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
-
- if (db == -1 || vdev->dd->type == -1) {
- ret = -EIO;
- goto exit;
- }
-
- memcpy(mic_vq_configspace(vdev->dd), argp, vdev->dd->config_len);
- vdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
- vpdev->hw_ops->send_intr(vpdev, db);
-
- for (retry = 100; retry--;) {
- ret = wait_event_timeout(wake, vdev->dc->guest_ack,
- msecs_to_jiffies(100));
- if (ret)
- break;
- }
-
- dev_dbg(vop_dev(vdev),
- "%s %d retry: %d\n", __func__, __LINE__, retry);
- vdev->dc->config_change = 0;
- vdev->dc->guest_ack = 0;
-exit:
- for (i = 0; i < vdev->dd->num_vq; i++)
- mutex_unlock(&vdev->vvr[i].vr_mutex);
- mutex_unlock(&vi->vop_mutex);
- return ret;
-}
-
-static int vop_copy_dp_entry(struct vop_vdev *vdev,
- struct mic_device_desc *argp, __u8 *type,
- struct mic_device_desc **devpage)
-{
- struct vop_device *vpdev = vdev->vpdev;
- struct mic_device_desc *devp;
- struct mic_vqconfig *vqconfig;
- int ret = 0, i;
- bool slot_found = false;
-
- vqconfig = mic_vq_config(argp);
- for (i = 0; i < argp->num_vq; i++) {
- if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
- ret = -EINVAL;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto exit;
- }
- }
-
- /* Find the first free device page entry */
- for (i = sizeof(struct mic_bootparam);
- i < MIC_DP_SIZE - mic_total_desc_size(argp);
- i += mic_total_desc_size(devp)) {
- devp = vpdev->hw_ops->get_dp(vpdev) + i;
- if (devp->type == 0 || devp->type == -1) {
- slot_found = true;
- break;
- }
- }
- if (!slot_found) {
- ret = -EINVAL;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto exit;
- }
- /*
- * Save off the type before doing the memcpy. Type will be set in the
- * end after completing all initialization for the new device.
- */
- *type = argp->type;
- argp->type = 0;
- memcpy(devp, argp, mic_desc_size(argp));
-
- *devpage = devp;
-exit:
- return ret;
-}
-
-static void vop_init_device_ctrl(struct vop_vdev *vdev,
- struct mic_device_desc *devpage)
-{
- struct mic_device_ctrl *dc;
-
- dc = (void *)devpage + mic_aligned_desc_size(devpage);
-
- dc->config_change = 0;
- dc->guest_ack = 0;
- dc->vdev_reset = 0;
- dc->host_ack = 0;
- dc->used_address_updated = 0;
- dc->c2h_vdev_db = -1;
- dc->h2c_vdev_db = -1;
- vdev->dc = dc;
-}
-
-static int vop_virtio_add_device(struct vop_vdev *vdev,
- struct mic_device_desc *argp)
-{
- struct vop_info *vi = vdev->vi;
- struct vop_device *vpdev = vi->vpdev;
- struct mic_device_desc *dd = NULL;
- struct mic_vqconfig *vqconfig;
- int vr_size, i, j, ret;
- u8 type = 0;
- s8 db = -1;
- char irqname[16];
- struct mic_bootparam *bootparam;
- u16 num;
- dma_addr_t vr_addr;
-
- bootparam = vpdev->hw_ops->get_dp(vpdev);
- init_waitqueue_head(&vdev->waitq);
- INIT_LIST_HEAD(&vdev->list);
- vdev->vpdev = vpdev;
-
- ret = vop_copy_dp_entry(vdev, argp, &type, &dd);
- if (ret) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- return ret;
- }
-
- vop_init_device_ctrl(vdev, dd);
-
- vdev->dd = dd;
- vdev->virtio_id = type;
- vqconfig = mic_vq_config(dd);
- INIT_WORK(&vdev->virtio_bh_work, vop_bh_handler);
-
- for (i = 0; i < dd->num_vq; i++) {
- struct vop_vringh *vvr = &vdev->vvr[i];
- struct mic_vring *vr = &vdev->vvr[i].vring;
-
- num = le16_to_cpu(vqconfig[i].num);
- mutex_init(&vvr->vr_mutex);
- vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
- sizeof(struct _mic_vring_info));
- vr->va = (void *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(vr_size));
- if (!vr->va) {
- ret = -ENOMEM;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vr->len = vr_size;
- vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
- vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
- vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&vpdev->dev, vr_addr)) {
- free_pages((unsigned long)vr->va, get_order(vr_size));
- ret = -ENOMEM;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vqconfig[i].address = cpu_to_le64(vr_addr);
-
- vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
- ret = vringh_init_kern(&vvr->vrh,
- *(u32 *)mic_vq_features(vdev->dd),
- num, false, vr->vr.desc, vr->vr.avail,
- vr->vr.used);
- if (ret) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vringh_kiov_init(&vvr->riov, NULL, 0);
- vringh_kiov_init(&vvr->wiov, NULL, 0);
- vvr->head = USHRT_MAX;
- vvr->vdev = vdev;
- vvr->vrh.notify = _vop_notify;
- dev_dbg(&vpdev->dev,
- "%s %d index %d va %p info %p vr_size 0x%x\n",
- __func__, __LINE__, i, vr->va, vr->info, vr_size);
- vvr->buf = (void *)__get_free_pages(GFP_KERNEL,
- get_order(VOP_INT_DMA_BUF_SIZE));
- vvr->buf_da = dma_map_single(&vpdev->dev,
- vvr->buf, VOP_INT_DMA_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- }
-
- snprintf(irqname, sizeof(irqname), "vop%dvirtio%d", vpdev->index,
- vdev->virtio_id);
- vdev->virtio_db = vpdev->hw_ops->next_db(vpdev);
- vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
- _vop_virtio_intr_handler, irqname, vdev,
- vdev->virtio_db);
- if (IS_ERR(vdev->virtio_cookie)) {
- ret = PTR_ERR(vdev->virtio_cookie);
- dev_dbg(&vpdev->dev, "request irq failed\n");
- goto err;
- }
-
- vdev->dc->c2h_vdev_db = vdev->virtio_db;
-
- /*
- * Order the type update with previous stores. This write barrier
- * is paired with the corresponding read barrier before the uncached
- * system memory read of the type, on the card while scanning the
- * device page.
- */
- smp_wmb();
- dd->type = type;
- argp->type = type;
-
- if (bootparam) {
- db = bootparam->h2c_config_db;
- if (db != -1)
- vpdev->hw_ops->send_intr(vpdev, db);
- }
- dev_dbg(&vpdev->dev, "Added virtio id %d db %d\n", dd->type, db);
- return 0;
-err:
- vqconfig = mic_vq_config(dd);
- for (j = 0; j < i; j++) {
- struct vop_vringh *vvr = &vdev->vvr[j];
-
- dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[j].address),
- vvr->vring.len, DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->vring.va,
- get_order(vvr->vring.len));
- }
- return ret;
-}
-
-static void vop_dev_remove(struct vop_info *pvi, struct mic_device_ctrl *devp,
- struct vop_device *vpdev)
-{
- struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
- s8 db;
- int ret, retry;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
-
- devp->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
- db = bootparam->h2c_config_db;
- if (db != -1)
- vpdev->hw_ops->send_intr(vpdev, db);
- else
- goto done;
- for (retry = 15; retry--;) {
- ret = wait_event_timeout(wake, devp->guest_ack,
- msecs_to_jiffies(1000));
- if (ret)
- break;
- }
-done:
- devp->config_change = 0;
- devp->guest_ack = 0;
-}
-
-static void vop_virtio_del_device(struct vop_vdev *vdev)
-{
- struct vop_info *vi = vdev->vi;
- struct vop_device *vpdev = vdev->vpdev;
- int i;
- struct mic_vqconfig *vqconfig;
- struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
-
- if (!bootparam)
- goto skip_hot_remove;
- vop_dev_remove(vi, vdev->dc, vpdev);
-skip_hot_remove:
- vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
- flush_work(&vdev->virtio_bh_work);
- vqconfig = mic_vq_config(vdev->dd);
- for (i = 0; i < vdev->dd->num_vq; i++) {
- struct vop_vringh *vvr = &vdev->vvr[i];
-
- dma_unmap_single(&vpdev->dev,
- vvr->buf_da, VOP_INT_DMA_BUF_SIZE,
- DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->buf,
- get_order(VOP_INT_DMA_BUF_SIZE));
- vringh_kiov_cleanup(&vvr->riov);
- vringh_kiov_cleanup(&vvr->wiov);
- dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[i].address),
- vvr->vring.len, DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vvr->vring.va,
- get_order(vvr->vring.len));
- }
- /*
- * Order the type update with previous stores. This write barrier
- * is paired with the corresponding read barrier before the uncached
- * system memory read of the type, on the card while scanning the
- * device page.
- */
- smp_wmb();
- vdev->dd->type = -1;
-}
-
-/*
- * vop_sync_dma - Wrapper for synchronous DMAs.
- *
- * @dev - The address of the pointer to the device instance used
- * for DMA registration.
- * @dst - destination DMA address.
- * @src - source DMA address.
- * @len - size of the transfer.
- *
- * Return DMA_SUCCESS on success
- */
-static int vop_sync_dma(struct vop_vdev *vdev, dma_addr_t dst, dma_addr_t src,
- size_t len)
-{
- int err = 0;
- struct dma_device *ddev;
- struct dma_async_tx_descriptor *tx;
- struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
- struct dma_chan *vop_ch = vi->dma_ch;
-
- if (!vop_ch) {
- err = -EBUSY;
- goto error;
- }
- ddev = vop_ch->device;
- tx = ddev->device_prep_dma_memcpy(vop_ch, dst, src, len,
- DMA_PREP_FENCE);
- if (!tx) {
- err = -ENOMEM;
- goto error;
- } else {
- dma_cookie_t cookie;
-
- cookie = tx->tx_submit(tx);
- if (dma_submit_error(cookie)) {
- err = -ENOMEM;
- goto error;
- }
- dma_async_issue_pending(vop_ch);
- err = dma_sync_wait(vop_ch, cookie);
- }
-error:
- if (err)
- dev_err(&vi->vpdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
-}
-
-#define VOP_USE_DMA true
-
-/*
- * Initiates the copies across the PCIe bus from card memory to a user
- * space buffer. When transfers are done using DMA, source/destination
- * addresses and transfer length must follow the alignment requirements of
- * the MIC DMA engine.
- */
-static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
- size_t len, u64 daddr, size_t dlen,
- int vr_idx)
-{
- struct vop_device *vpdev = vdev->vpdev;
- void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len);
- struct vop_vringh *vvr = &vdev->vvr[vr_idx];
- struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
- size_t dma_alignment;
- bool x200;
- size_t dma_offset, partlen;
- int err;
-
- if (!VOP_USE_DMA || !vi->dma_ch) {
- if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
- err = -EFAULT;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- vdev->in_bytes += len;
- err = 0;
- goto err;
- }
-
- dma_alignment = 1 << vi->dma_ch->device->copy_align;
- x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
-
- dma_offset = daddr - round_down(daddr, dma_alignment);
- daddr -= dma_offset;
- len += dma_offset;
- /*
- * X100 uses DMA addresses as seen by the card so adding
- * the aperture base is not required for DMA. However x200
- * requires DMA addresses to be an offset into the bar so
- * add the aperture base for x200.
- */
- if (x200)
- daddr += vpdev->aper->pa;
- while (len) {
- partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
- err = vop_sync_dma(vdev, vvr->buf_da, daddr,
- ALIGN(partlen, dma_alignment));
- if (err) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- if (copy_to_user(ubuf, vvr->buf + dma_offset,
- partlen - dma_offset)) {
- err = -EFAULT;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- daddr += partlen;
- ubuf += partlen;
- dbuf += partlen;
- vdev->in_bytes_dma += partlen;
- vdev->in_bytes += partlen;
- len -= partlen;
- dma_offset = 0;
- }
- err = 0;
-err:
- vpdev->hw_ops->unmap(vpdev, dbuf);
- dev_dbg(vop_dev(vdev),
- "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
- __func__, ubuf, dbuf, len, vr_idx);
- return err;
-}
-
-/*
- * Initiates copies across the PCIe bus from a user space buffer to card
- * memory. When transfers are done using DMA, source/destination addresses
- * and transfer length must follow the alignment requirements of the MIC
- * DMA engine.
- */
-static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
- size_t len, u64 daddr, size_t dlen,
- int vr_idx)
-{
- struct vop_device *vpdev = vdev->vpdev;
- void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len);
- struct vop_vringh *vvr = &vdev->vvr[vr_idx];
- struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
- size_t dma_alignment;
- bool x200;
- size_t partlen;
- bool dma = VOP_USE_DMA && vi->dma_ch;
- int err = 0;
-
- if (dma) {
- dma_alignment = 1 << vi->dma_ch->device->copy_align;
- x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
-
- if (daddr & (dma_alignment - 1)) {
- vdev->tx_dst_unaligned += len;
- dma = false;
- } else if (ALIGN(len, dma_alignment) > dlen) {
- vdev->tx_len_unaligned += len;
- dma = false;
- }
- }
-
- if (!dma)
- goto memcpy;
-
- /*
- * X100 uses DMA addresses as seen by the card so adding
- * the aperture base is not required for DMA. However x200
- * requires DMA addresses to be an offset into the bar so
- * add the aperture base for x200.
- */
- if (x200)
- daddr += vpdev->aper->pa;
- while (len) {
- partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
-
- if (copy_from_user(vvr->buf, ubuf, partlen)) {
- err = -EFAULT;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- err = vop_sync_dma(vdev, daddr, vvr->buf_da,
- ALIGN(partlen, dma_alignment));
- if (err) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- daddr += partlen;
- ubuf += partlen;
- dbuf += partlen;
- vdev->out_bytes_dma += partlen;
- vdev->out_bytes += partlen;
- len -= partlen;
- }
-memcpy:
- /*
- * We are copying to IO below and should ideally use something
- * like copy_from_user_toio(..) if it existed.
- */
- if (copy_from_user((void __force *)dbuf, ubuf, len)) {
- err = -EFAULT;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- vdev->out_bytes += len;
- err = 0;
-err:
- vpdev->hw_ops->unmap(vpdev, dbuf);
- dev_dbg(vop_dev(vdev),
- "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n",
- __func__, ubuf, dbuf, len, vr_idx);
- return err;
-}
-
-#define MIC_VRINGH_READ true
-
-/* Determine the total number of bytes consumed in a VRINGH KIOV */
-static inline u32 vop_vringh_iov_consumed(struct vringh_kiov *iov)
-{
- int i;
- u32 total = iov->consumed;
-
- for (i = 0; i < iov->i; i++)
- total += iov->iov[i].iov_len;
- return total;
-}
-
-/*
- * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
- * This API is heavily based on the vringh_iov_xfer(..) implementation
- * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
- * and vringh_iov_push_kern(..) directly is because there is no
- * way to override the VRINGH xfer(..) routines as of v3.10.
- */
-static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov,
- void __user *ubuf, size_t len, bool read, int vr_idx,
- size_t *out_len)
-{
- int ret = 0;
- size_t partlen, tot_len = 0;
-
- while (len && iov->i < iov->used) {
- struct kvec *kiov = &iov->iov[iov->i];
- unsigned long daddr = (unsigned long)kiov->iov_base;
-
- partlen = min(kiov->iov_len, len);
- if (read)
- ret = vop_virtio_copy_to_user(vdev, ubuf, partlen,
- daddr,
- kiov->iov_len,
- vr_idx);
- else
- ret = vop_virtio_copy_from_user(vdev, ubuf, partlen,
- daddr,
- kiov->iov_len,
- vr_idx);
- if (ret) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= partlen;
- ubuf += partlen;
- tot_len += partlen;
- iov->consumed += partlen;
- kiov->iov_len -= partlen;
- kiov->iov_base += partlen;
- if (!kiov->iov_len) {
- /* Fix up old iov element then increment. */
- kiov->iov_len = iov->consumed;
- kiov->iov_base -= iov->consumed;
-
- iov->consumed = 0;
- iov->i++;
- }
- }
- *out_len = tot_len;
- return ret;
-}
-
-/*
- * Use the standard VRINGH infrastructure in the kernel to fetch new
- * descriptors, initiate the copies and update the used ring.
- */
-static int _vop_virtio_copy(struct vop_vdev *vdev, struct mic_copy_desc *copy)
-{
- int ret = 0;
- u32 iovcnt = copy->iovcnt;
- struct iovec iov;
- struct iovec __user *u_iov = copy->iov;
- void __user *ubuf = NULL;
- struct vop_vringh *vvr = &vdev->vvr[copy->vr_idx];
- struct vringh_kiov *riov = &vvr->riov;
- struct vringh_kiov *wiov = &vvr->wiov;
- struct vringh *vrh = &vvr->vrh;
- u16 *head = &vvr->head;
- struct mic_vring *vr = &vvr->vring;
- size_t len = 0, out_len;
-
- copy->out_len = 0;
- /* Fetch a new IOVEC if all previous elements have been processed */
- if (riov->i == riov->used && wiov->i == wiov->used) {
- ret = vringh_getdesc_kern(vrh, riov, wiov,
- head, GFP_KERNEL);
- /* Check if there are available descriptors */
- if (ret <= 0)
- return ret;
- }
- while (iovcnt) {
- if (!len) {
- /* Copy over a new iovec from user space. */
- ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
- if (ret) {
- ret = -EINVAL;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len = iov.iov_len;
- ubuf = iov.iov_base;
- }
- /* Issue all the read descriptors first */
- ret = vop_vringh_copy(vdev, riov, ubuf, len,
- MIC_VRINGH_READ, copy->vr_idx, &out_len);
- if (ret) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= out_len;
- ubuf += out_len;
- copy->out_len += out_len;
- /* Issue the write descriptors next */
- ret = vop_vringh_copy(vdev, wiov, ubuf, len,
- !MIC_VRINGH_READ, copy->vr_idx, &out_len);
- if (ret) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= out_len;
- ubuf += out_len;
- copy->out_len += out_len;
- if (!len) {
- /* One user space iovec is now completed */
- iovcnt--;
- u_iov++;
- }
- /* Exit loop if all elements in KIOVs have been processed. */
- if (riov->i == riov->used && wiov->i == wiov->used)
- break;
- }
- /*
- * Update the used ring if a descriptor was available and some data was
- * copied in/out and the user asked for a used ring update.
- */
- if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
- u32 total = 0;
-
- /* Determine the total data consumed */
- total += vop_vringh_iov_consumed(riov);
- total += vop_vringh_iov_consumed(wiov);
- vringh_complete_kern(vrh, *head, total);
- *head = USHRT_MAX;
- if (vringh_need_notify_kern(vrh) > 0)
- vringh_notify(vrh);
- vringh_kiov_cleanup(riov);
- vringh_kiov_cleanup(wiov);
- /* Update avail idx for user space */
- vr->info->avail_idx = vrh->last_avail_idx;
- }
- return ret;
-}
-
-static inline int vop_verify_copy_args(struct vop_vdev *vdev,
- struct mic_copy_desc *copy)
-{
- if (!vdev || copy->vr_idx >= vdev->dd->num_vq)
- return -EINVAL;
- return 0;
-}
-
-/* Copy a specified number of virtio descriptors in a chain */
-static int vop_virtio_copy_desc(struct vop_vdev *vdev,
- struct mic_copy_desc *copy)
-{
- int err;
- struct vop_vringh *vvr;
-
- err = vop_verify_copy_args(vdev, copy);
- if (err)
- return err;
-
- vvr = &vdev->vvr[copy->vr_idx];
- mutex_lock(&vvr->vr_mutex);
- if (!vop_vdevup(vdev)) {
- err = -ENODEV;
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- err = _vop_virtio_copy(vdev, copy);
- if (err) {
- dev_err(vop_dev(vdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- }
-err:
- mutex_unlock(&vvr->vr_mutex);
- return err;
-}
-
-static int vop_open(struct inode *inode, struct file *f)
-{
- struct vop_vdev *vdev;
- struct vop_info *vi = container_of(f->private_data,
- struct vop_info, miscdev);
-
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
- vdev->vi = vi;
- mutex_init(&vdev->vdev_mutex);
- f->private_data = vdev;
- init_completion(&vdev->destroy);
- complete(&vdev->destroy);
- return 0;
-}
-
-static int vop_release(struct inode *inode, struct file *f)
-{
- struct vop_vdev *vdev = f->private_data, *vdev_tmp;
- struct vop_info *vi = vdev->vi;
- struct list_head *pos, *tmp;
- bool found = false;
-
- mutex_lock(&vdev->vdev_mutex);
- if (vdev->deleted)
- goto unlock;
- mutex_lock(&vi->vop_mutex);
- list_for_each_safe(pos, tmp, &vi->vdev_list) {
- vdev_tmp = list_entry(pos, struct vop_vdev, list);
- if (vdev == vdev_tmp) {
- vop_virtio_del_device(vdev);
- list_del(pos);
- found = true;
- break;
- }
- }
- mutex_unlock(&vi->vop_mutex);
-unlock:
- mutex_unlock(&vdev->vdev_mutex);
- if (!found)
- wait_for_completion(&vdev->destroy);
- f->private_data = NULL;
- kfree(vdev);
- return 0;
-}
-
-static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
- struct vop_vdev *vdev = f->private_data;
- struct vop_info *vi = vdev->vi;
- void __user *argp = (void __user *)arg;
- int ret;
-
- switch (cmd) {
- case MIC_VIRTIO_ADD_DEVICE:
- {
- struct mic_device_desc dd, *dd_config;
-
- if (copy_from_user(&dd, argp, sizeof(dd)))
- return -EFAULT;
-
- if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
- dd.num_vq > MIC_MAX_VRINGS)
- return -EINVAL;
-
- dd_config = memdup_user(argp, mic_desc_size(&dd));
- if (IS_ERR(dd_config))
- return PTR_ERR(dd_config);
-
- /* Ensure desc has not changed between the two reads */
- if (memcmp(&dd, dd_config, sizeof(dd))) {
- ret = -EINVAL;
- goto free_ret;
- }
- mutex_lock(&vdev->vdev_mutex);
- mutex_lock(&vi->vop_mutex);
- ret = vop_virtio_add_device(vdev, dd_config);
- if (ret)
- goto unlock_ret;
- list_add_tail(&vdev->list, &vi->vdev_list);
-unlock_ret:
- mutex_unlock(&vi->vop_mutex);
- mutex_unlock(&vdev->vdev_mutex);
-free_ret:
- kfree(dd_config);
- return ret;
- }
- case MIC_VIRTIO_COPY_DESC:
- {
- struct mic_copy_desc copy;
-
- mutex_lock(&vdev->vdev_mutex);
- ret = vop_vdev_inited(vdev);
- if (ret)
- goto _unlock_ret;
-
- if (copy_from_user(&copy, argp, sizeof(copy))) {
- ret = -EFAULT;
- goto _unlock_ret;
- }
-
- ret = vop_virtio_copy_desc(vdev, &copy);
- if (ret < 0)
- goto _unlock_ret;
- if (copy_to_user(
- &((struct mic_copy_desc __user *)argp)->out_len,
- &copy.out_len, sizeof(copy.out_len)))
- ret = -EFAULT;
-_unlock_ret:
- mutex_unlock(&vdev->vdev_mutex);
- return ret;
- }
- case MIC_VIRTIO_CONFIG_CHANGE:
- {
- void *buf;
-
- mutex_lock(&vdev->vdev_mutex);
- ret = vop_vdev_inited(vdev);
- if (ret)
- goto __unlock_ret;
- buf = memdup_user(argp, vdev->dd->config_len);
- if (IS_ERR(buf)) {
- ret = PTR_ERR(buf);
- goto __unlock_ret;
- }
- ret = vop_virtio_config_change(vdev, buf);
- kfree(buf);
-__unlock_ret:
- mutex_unlock(&vdev->vdev_mutex);
- return ret;
- }
- default:
- return -ENOIOCTLCMD;
- };
- return 0;
-}
-
-/*
- * We return EPOLLIN | EPOLLOUT from poll when new buffers are enqueued, and
- * not when previously enqueued buffers may be available. This means that
- * in the card->host (TX) path, when userspace is unblocked by poll it
- * must drain all available descriptors or it can stall.
- */
-static __poll_t vop_poll(struct file *f, poll_table *wait)
-{
- struct vop_vdev *vdev = f->private_data;
- __poll_t mask = 0;
-
- mutex_lock(&vdev->vdev_mutex);
- if (vop_vdev_inited(vdev)) {
- mask = EPOLLERR;
- goto done;
- }
- poll_wait(f, &vdev->waitq, wait);
- if (vop_vdev_inited(vdev)) {
- mask = EPOLLERR;
- } else if (vdev->poll_wake) {
- vdev->poll_wake = 0;
- mask = EPOLLIN | EPOLLOUT;
- }
-done:
- mutex_unlock(&vdev->vdev_mutex);
- return mask;
-}
-
-static inline int
-vop_query_offset(struct vop_vdev *vdev, unsigned long offset,
- unsigned long *size, unsigned long *pa)
-{
- struct vop_device *vpdev = vdev->vpdev;
- unsigned long start = MIC_DP_SIZE;
- int i;
-
- /*
- * MMAP interface is as follows:
- * offset region
- * 0x0 virtio device_page
- * 0x1000 first vring
- * 0x1000 + size of 1st vring second vring
- * ....
- */
- if (!offset) {
- *pa = virt_to_phys(vpdev->hw_ops->get_dp(vpdev));
- *size = MIC_DP_SIZE;
- return 0;
- }
-
- for (i = 0; i < vdev->dd->num_vq; i++) {
- struct vop_vringh *vvr = &vdev->vvr[i];
-
- if (offset == start) {
- *pa = virt_to_phys(vvr->vring.va);
- *size = vvr->vring.len;
- return 0;
- }
- start += vvr->vring.len;
- }
- return -1;
-}
-
-/*
- * Maps the device page and virtio rings to user space for readonly access.
- */
-static int vop_mmap(struct file *f, struct vm_area_struct *vma)
-{
- struct vop_vdev *vdev = f->private_data;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
- int i, err;
-
- err = vop_vdev_inited(vdev);
- if (err)
- goto ret;
- if (vma->vm_flags & VM_WRITE) {
- err = -EACCES;
- goto ret;
- }
- while (size_rem) {
- i = vop_query_offset(vdev, offset, &size, &pa);
- if (i < 0) {
- err = -EINVAL;
- goto ret;
- }
- err = remap_pfn_range(vma, vma->vm_start + offset,
- pa >> PAGE_SHIFT, size,
- vma->vm_page_prot);
- if (err)
- goto ret;
- size_rem -= size;
- offset += size;
- }
-ret:
- return err;
-}
-
-static const struct file_operations vop_fops = {
- .open = vop_open,
- .release = vop_release,
- .unlocked_ioctl = vop_ioctl,
- .poll = vop_poll,
- .mmap = vop_mmap,
- .owner = THIS_MODULE,
-};
-
-int vop_host_init(struct vop_info *vi)
-{
- int rc;
- struct miscdevice *mdev;
- struct vop_device *vpdev = vi->vpdev;
-
- INIT_LIST_HEAD(&vi->vdev_list);
- vi->dma_ch = vpdev->dma_ch;
- mdev = &vi->miscdev;
- mdev->minor = MISC_DYNAMIC_MINOR;
- snprintf(vi->name, sizeof(vi->name), "vop_virtio%d", vpdev->index);
- mdev->name = vi->name;
- mdev->fops = &vop_fops;
- mdev->parent = &vpdev->dev;
-
- rc = misc_register(mdev);
- if (rc)
- dev_err(&vpdev->dev, "%s failed rc %d\n", __func__, rc);
- return rc;
-}
-
-void vop_host_uninit(struct vop_info *vi)
-{
- struct list_head *pos, *tmp;
- struct vop_vdev *vdev;
-
- mutex_lock(&vi->vop_mutex);
- vop_virtio_reset_devices(vi);
- list_for_each_safe(pos, tmp, &vi->vdev_list) {
- vdev = list_entry(pos, struct vop_vdev, list);
- list_del(pos);
- reinit_completion(&vdev->destroy);
- mutex_unlock(&vi->vop_mutex);
- mutex_lock(&vdev->vdev_mutex);
- vop_virtio_del_device(vdev);
- vdev->deleted = true;
- mutex_unlock(&vdev->vdev_mutex);
- complete(&vdev->destroy);
- mutex_lock(&vi->vop_mutex);
- }
- mutex_unlock(&vi->vop_mutex);
- misc_deregister(&vi->miscdev);
-}
diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig
index 6551007a066c..c9b0a27caf64 100644
--- a/drivers/misc/ocxl/Kconfig
+++ b/drivers/misc/ocxl/Kconfig
@@ -9,9 +9,8 @@ config OCXL_BASE
config OCXL
tristate "OpenCAPI coherent accelerator support"
- depends on PPC_POWERNV && PCI && EEH
+ depends on HOTPLUG_PCI_POWERNV
select OCXL_BASE
- select HOTPLUG_PCI_POWERNV
default m
help
Select this option to enable the ocxl driver for Open
diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c
index 70f8f1c3929d..ecdcfae025b7 100644
--- a/drivers/misc/ocxl/afu_irq.c
+++ b/drivers/misc/ocxl/afu_irq.c
@@ -2,6 +2,7 @@
// Copyright 2017 IBM Corp.
#include <linux/interrupt.h>
#include <asm/pnv-ocxl.h>
+#include <asm/xive.h>
#include "ocxl_internal.h"
#include "trace.h"
@@ -10,7 +11,6 @@ struct afu_irq {
int hw_irq;
unsigned int virq;
char *name;
- u64 trigger_page;
irqreturn_t (*handler)(void *private);
void (*free_private)(void *private);
void *private;
@@ -124,8 +124,7 @@ int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id)
goto err_unlock;
}
- rc = ocxl_link_irq_alloc(ctx->afu->fn->link, &irq->hw_irq,
- &irq->trigger_page);
+ rc = ocxl_link_irq_alloc(ctx->afu->fn->link, &irq->hw_irq);
if (rc)
goto err_idr;
@@ -196,13 +195,16 @@ void ocxl_afu_irq_free_all(struct ocxl_context *ctx)
u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id)
{
+ struct xive_irq_data *xd;
struct afu_irq *irq;
u64 addr = 0;
mutex_lock(&ctx->irq_lock);
irq = idr_find(&ctx->irq_idr, irq_id);
- if (irq)
- addr = irq->trigger_page;
+ if (irq) {
+ xd = irq_get_handler_data(irq->virq);
+ addr = xd ? xd->trig_page : 0;
+ }
mutex_unlock(&ctx->irq_lock);
return addr;
}
diff --git a/drivers/misc/ocxl/core.c b/drivers/misc/ocxl/core.c
index b7a09b21ab36..aebfc53a2d09 100644
--- a/drivers/misc/ocxl/core.c
+++ b/drivers/misc/ocxl/core.c
@@ -327,14 +327,9 @@ static void free_function_dev(struct device *dev)
static int set_function_device(struct ocxl_fn *fn, struct pci_dev *dev)
{
- int rc;
-
fn->dev.parent = &dev->dev;
fn->dev.release = free_function_dev;
- rc = dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev));
- if (rc)
- return rc;
- return 0;
+ return dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev));
}
static int assign_function_actag(struct ocxl_fn *fn)
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index 58d111afd9f6..fd73d3bc0eb6 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -6,6 +6,7 @@
#include <linux/mmu_context.h>
#include <asm/copro.h>
#include <asm/pnv-ocxl.h>
+#include <asm/xive.h>
#include <misc/ocxl.h>
#include "ocxl_internal.h"
#include "trace.h"
@@ -682,23 +683,21 @@ unlock:
}
EXPORT_SYMBOL_GPL(ocxl_link_remove_pe);
-int ocxl_link_irq_alloc(void *link_handle, int *hw_irq, u64 *trigger_addr)
+int ocxl_link_irq_alloc(void *link_handle, int *hw_irq)
{
struct ocxl_link *link = (struct ocxl_link *) link_handle;
- int rc, irq;
- u64 addr;
+ int irq;
if (atomic_dec_if_positive(&link->irq_available) < 0)
return -ENOSPC;
- rc = pnv_ocxl_alloc_xive_irq(&irq, &addr);
- if (rc) {
+ irq = xive_native_alloc_irq();
+ if (!irq) {
atomic_inc(&link->irq_available);
- return rc;
+ return -ENXIO;
}
*hw_irq = irq;
- *trigger_addr = addr;
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc);
@@ -707,7 +706,7 @@ void ocxl_link_free_irq(void *link_handle, int hw_irq)
{
struct ocxl_link *link = (struct ocxl_link *) link_handle;
- pnv_ocxl_free_xive_irq(hw_irq);
+ xive_native_free_irq(hw_irq);
atomic_inc(&link->irq_available);
}
EXPORT_SYMBOL_GPL(ocxl_link_free_irq);
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index e060796f9caa..146ca6fb3260 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -70,11 +70,15 @@
#define PCI_DEVICE_ID_TI_J721E 0xb00d
#define PCI_DEVICE_ID_TI_AM654 0xb00c
+#define PCI_DEVICE_ID_LS1088A 0x80c0
#define is_am654_pci_dev(pdev) \
((pdev)->device == PCI_DEVICE_ID_TI_AM654)
+#define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
+#define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
+#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
static DEFINE_IDA(pci_endpoint_test_ida);
@@ -945,13 +949,20 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
.driver_data = (kernel_ulong_t)&default_data,
},
- { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
+ .driver_data = (kernel_ulong_t)&default_data,
+ },
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
+ .driver_data = (kernel_ulong_t)&default_data,
+ },
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
.driver_data = (kernel_ulong_t)&am654_data
},
- { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),
- },
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
.driver_data = (kernel_ulong_t)&j721e_data,
},
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index a6e1a8983e1f..e16a5e51006e 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -143,13 +143,7 @@ static void pvpanic_unregister_acpi_driver(void) {}
static int pvpanic_mmio_probe(struct platform_device *pdev)
{
- struct resource *mem;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -EINVAL;
-
- base = devm_ioremap_resource(&pdev->dev, mem);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 93bb49ddda1f..7ffcfc0bb587 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -7,6 +7,7 @@
* This file supports the user system call for file open, close, mmap, etc.
* This also incudes the driver initialization code.
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2008-2014 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -516,7 +517,7 @@ static int __init gru_init(void)
#if defined CONFIG_IA64
gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */
#else
- gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) &
+ gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG) &
0x7fffffffffffUL;
#endif
gru_start_vaddr = __va(gru_start_paddr);
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 06469b12aced..9f9af77f8d2e 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -3,6 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (C) 2004-2008 Silicon Graphics, Inc. All rights reserved.
*/
@@ -17,11 +18,6 @@
#if defined CONFIG_X86_UV || defined CONFIG_IA64_SGI_UV
#include <asm/uv/uv.h>
-#define is_uv() is_uv_system()
-#endif
-
-#ifndef is_uv
-#define is_uv() 0
#endif
#ifdef USE_DBUG_ON
@@ -79,7 +75,7 @@
#define XPC_MSG_SIZE(_payload_size) \
ALIGN(XPC_MSG_HDR_MAX_SIZE + (_payload_size), \
- is_uv() ? 64 : 128)
+ is_uv_system() ? 64 : 128)
/*
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 61b03fcefb13..cf2965aa5c05 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -3,6 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -233,7 +234,7 @@ xp_init(void)
for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
mutex_init(&xpc_registrations[ch_number].mutex);
- if (is_uv())
+ if (is_uv_system())
ret = xp_init_uv();
else
ret = 0;
@@ -249,7 +250,7 @@ module_init(xp_init);
static void __exit
xp_exit(void)
{
- if (is_uv())
+ if (is_uv_system())
xp_exit_uv();
}
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
index f15a9f2ac1dd..19fc7076af27 100644
--- a/drivers/misc/sgi-xp/xp_uv.c
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -3,6 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -148,7 +149,9 @@ xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size)
enum xp_retval
xp_init_uv(void)
{
- BUG_ON(!is_uv());
+ WARN_ON(!is_uv_system());
+ if (!is_uv_system())
+ return xpUnsupported;
xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
#ifdef CONFIG_X86
@@ -168,5 +171,5 @@ xp_init_uv(void)
void
xp_exit_uv(void)
{
- BUG_ON(!is_uv());
+ WARN_ON(!is_uv_system());
}
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 8a495dc82f16..e5244fc1dab3 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -3,6 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -1043,7 +1044,7 @@ xpc_do_exit(enum xp_retval reason)
xpc_teardown_partitions();
- if (is_uv())
+ if (is_uv_system())
xpc_exit_uv();
}
@@ -1226,7 +1227,7 @@ xpc_init(void)
dev_set_name(xpc_part, "part");
dev_set_name(xpc_chan, "chan");
- if (is_uv()) {
+ if (is_uv_system()) {
ret = xpc_init_uv();
} else {
@@ -1312,7 +1313,7 @@ out_2:
xpc_teardown_partitions();
out_1:
- if (is_uv())
+ if (is_uv_system())
xpc_exit_uv();
return ret;
}
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 099a53bdbb7d..57df06820bae 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -3,6 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
@@ -433,7 +434,7 @@ xpc_discovery(void)
*/
region_size = xp_region_size;
- if (is_uv())
+ if (is_uv_system())
max_regions = 256;
else {
max_regions = 64;
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 837d6c3fe69c..23837d0d6f4a 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -3,6 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
* Copyright (C) 1999-2009 Silicon Graphics, Inc. All rights reserved.
*/
@@ -515,7 +516,7 @@ xpnet_init(void)
{
int result;
- if (!is_uv())
+ if (!is_uv_system())
return -ENODEV;
dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index a5b8dab80c76..56dd98ab5a81 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -93,7 +93,7 @@ static long uacce_fops_compat_ioctl(struct file *filep,
static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
{
- int pasid;
+ u32 pasid;
struct iommu_sva *handle;
if (!(uacce->flags & UACCE_DEV_SVA))
@@ -370,7 +370,7 @@ static struct attribute *uacce_dev_attrs[] = {
static umode_t uacce_dev_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct uacce_device *uacce = to_uacce_device(dev);
if (((attr == &dev_attr_region_mmio_size.attr) &&
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8531ae781195..c49065887e8f 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -657,8 +657,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
retval);
- qp_release_pages(produce_q->kernel_if->u.h.header_page,
- retval, false);
+ if (retval > 0)
+ qp_release_pages(produce_q->kernel_if->u.h.header_page,
+ retval, false);
err = VMCI_ERROR_NO_MEM;
goto out;
}
@@ -670,8 +671,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
retval);
- qp_release_pages(consume_q->kernel_if->u.h.header_page,
- retval, false);
+ if (retval > 0)
+ qp_release_pages(consume_q->kernel_if->u.h.header_page,
+ retval, false);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
produce_q->kernel_if->num_pages, false);
err = VMCI_ERROR_NO_MEM;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index fa313b634135..8d3df0be0355 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -723,7 +723,7 @@ static int mmc_blk_check_blkdev(struct block_device *bdev)
* whole block device, not on a partition. This prevents overspray
* between sibling partitions.
*/
- if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev))
return -EPERM;
return 0;
}
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 70207f11a654..c2e70b757dd1 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -68,6 +68,7 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct mmc_card *card = mmc_dev_to_card(dev);
const char *type;
+ unsigned int i;
int retval = 0;
switch (card->type) {
@@ -98,6 +99,17 @@ mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
card->cis.vendor, card->cis.device);
if (retval)
return retval;
+
+ retval = add_uevent_var(env, "SDIO_REVISION=%u.%u",
+ card->major_rev, card->minor_rev);
+ if (retval)
+ return retval;
+
+ for (i = 0; i < card->num_info; i++) {
+ retval = add_uevent_var(env, "SDIO_INFO%u=%s", i+1, card->info[i]);
+ if (retval)
+ return retval;
+ }
}
/*
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8ccae6452b9c..d42037f0f10d 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2063,6 +2063,16 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
host->ops->hw_reset(host);
}
+/**
+ * mmc_hw_reset - reset the card in hardware
+ * @host: MMC host to which the card is attached
+ *
+ * Hard reset the card. This function is only for upper layers, like the
+ * block layer or card drivers. You cannot use it in host drivers (struct
+ * mmc_card might be gone then).
+ *
+ * Return: 0 on success, -errno on failure
+ */
int mmc_hw_reset(struct mmc_host *host)
{
int ret;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index c8fae6611b73..96b2ca1f1b06 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -377,6 +377,20 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
EXPORT_SYMBOL(mmc_of_parse_voltage);
/**
+ * mmc_first_nonreserved_index() - get the first index that is not reserved
+ */
+static int mmc_first_nonreserved_index(void)
+{
+ int max;
+
+ max = of_alias_get_highest_id("mmc");
+ if (max < 0)
+ return 0;
+
+ return max + 1;
+}
+
+/**
* mmc_alloc_host - initialise the per-host structure.
* @extra: sizeof private data structure
* @dev: pointer to host device model structure
@@ -387,6 +401,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
{
int err;
struct mmc_host *host;
+ int alias_id, min_idx, max_idx;
host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
if (!host)
@@ -395,7 +410,16 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
/* scanning will be enabled when we're ready */
host->rescan_disable = 1;
- err = ida_simple_get(&mmc_host_ida, 0, 0, GFP_KERNEL);
+ alias_id = of_alias_get_id(dev->of_node, "mmc");
+ if (alias_id >= 0) {
+ min_idx = alias_id;
+ max_idx = alias_id + 1;
+ } else {
+ min_idx = mmc_first_nonreserved_index();
+ max_idx = 0;
+ }
+
+ err = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
if (err < 0) {
kfree(host);
return NULL;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index b3fa193de846..ff3063ce2acd 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1168,13 +1168,13 @@ static int mmc_select_hs400(struct mmc_card *card)
return err;
}
- /* Set host controller to HS timing */
- mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
-
/* Prepare host to downgrade to HS timing */
if (host->ops->hs400_downgrade)
host->ops->hs400_downgrade(host);
+ /* Set host controller to HS timing */
+ mmc_set_timing(host, MMC_TIMING_MMC_HS);
+
/* Reduce frequency to HS frequency */
max_dtr = card->ext_csd.hs_max_dtr;
mmc_set_clock(host, max_dtr);
@@ -1253,6 +1253,9 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
if (err)
goto out_err;
+ if (host->ops->hs400_downgrade)
+ host->ops->hs400_downgrade(host);
+
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
err = mmc_switch_status(card, true);
@@ -1268,9 +1271,6 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_HS);
- if (host->ops->hs400_downgrade)
- host->ops->hs400_downgrade(host);
-
err = mmc_switch_status(card, true);
if (err)
goto out_err;
@@ -1763,13 +1763,17 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
goto free_card;
if (mmc_card_hs200(card)) {
+ host->doing_init_tune = 1;
+
err = mmc_hs200_tuning(card);
- if (err)
- goto free_card;
+ if (!err)
+ err = mmc_select_hs400(card);
+
+ host->doing_init_tune = 0;
- err = mmc_select_hs400(card);
if (err)
goto free_card;
+
} else if (!mmc_card_hs400es(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index c21b3cb71775..152e7525ed33 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -2669,22 +2669,22 @@ static const struct mmc_test_case mmc_test_cases[] = {
},
{
- .name = "Correct xfer_size at write (start failure)",
+ .name = "Proper xfer_size at write (start failure)",
.run = mmc_test_xfersize_write,
},
{
- .name = "Correct xfer_size at read (start failure)",
+ .name = "Proper xfer_size at read (start failure)",
.run = mmc_test_xfersize_read,
},
{
- .name = "Correct xfer_size at write (midway failure)",
+ .name = "Proper xfer_size at write (midway failure)",
.run = mmc_test_multi_xfersize_write,
},
{
- .name = "Correct xfer_size at read (midway failure)",
+ .name = "Proper xfer_size at read (midway failure)",
.run = mmc_test_multi_xfersize_read,
},
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 350d0cc4ee62..de7cb0369c30 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -472,8 +472,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
}
if (mmc_host_is_spi(host) && host->use_spi_crc)
- mq->queue->backing_dev_info->capabilities |=
- BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
mq->queue->queuedata = mq;
blk_queue_rq_timeout(mq->queue, 60 * HZ);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 5a2210c25aa7..6f054c449d46 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -709,10 +709,34 @@ static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
MMC_DEV_ATTR(vendor, "0x%04x\n", card->cis.vendor);
MMC_DEV_ATTR(device, "0x%04x\n", card->cis.device);
+MMC_DEV_ATTR(revision, "%u.%u\n", card->major_rev, card->minor_rev);
+
+#define sdio_info_attr(num) \
+static ssize_t info##num##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct mmc_card *card = mmc_dev_to_card(dev); \
+ \
+ if (num > card->num_info) \
+ return -ENODATA; \
+ if (!card->info[num-1][0]) \
+ return 0; \
+ return sprintf(buf, "%s\n", card->info[num-1]); \
+} \
+static DEVICE_ATTR_RO(info##num)
+
+sdio_info_attr(1);
+sdio_info_attr(2);
+sdio_info_attr(3);
+sdio_info_attr(4);
static struct attribute *sd_std_attrs[] = {
&dev_attr_vendor.attr,
&dev_attr_device.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_info1.attr,
+ &dev_attr_info2.attr,
+ &dev_attr_info3.attr,
+ &dev_attr_info4.attr,
&dev_attr_cid.attr,
&dev_attr_csd.attr,
&dev_attr_scr.attr,
@@ -735,12 +759,18 @@ static struct attribute *sd_std_attrs[] = {
static umode_t sd_std_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct mmc_card *card = mmc_dev_to_card(dev);
- /* CIS vendor and device ids are available only for Combo cards */
- if ((attr == &dev_attr_vendor.attr || attr == &dev_attr_device.attr) &&
- card->type != MMC_TYPE_SD_COMBO)
+ /* CIS vendor and device ids, revision and info string are available only for Combo cards */
+ if ((attr == &dev_attr_vendor.attr ||
+ attr == &dev_attr_device.attr ||
+ attr == &dev_attr_revision.attr ||
+ attr == &dev_attr_info1.attr ||
+ attr == &dev_attr_info2.attr ||
+ attr == &dev_attr_info3.attr ||
+ attr == &dev_attr_info4.attr
+ ) && card->type != MMC_TYPE_SD_COMBO)
return 0;
return attr->mode;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 7b40553d3934..694a212cbe25 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -29,12 +29,36 @@
MMC_DEV_ATTR(vendor, "0x%04x\n", card->cis.vendor);
MMC_DEV_ATTR(device, "0x%04x\n", card->cis.device);
+MMC_DEV_ATTR(revision, "%u.%u\n", card->major_rev, card->minor_rev);
MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
+#define sdio_info_attr(num) \
+static ssize_t info##num##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct mmc_card *card = mmc_dev_to_card(dev); \
+ \
+ if (num > card->num_info) \
+ return -ENODATA; \
+ if (!card->info[num-1][0]) \
+ return 0; \
+ return sprintf(buf, "%s\n", card->info[num-1]); \
+} \
+static DEVICE_ATTR_RO(info##num)
+
+sdio_info_attr(1);
+sdio_info_attr(2);
+sdio_info_attr(3);
+sdio_info_attr(4);
+
static struct attribute *sdio_std_attrs[] = {
&dev_attr_vendor.attr,
&dev_attr_device.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_info1.attr,
+ &dev_attr_info2.attr,
+ &dev_attr_info3.attr,
+ &dev_attr_info4.attr,
&dev_attr_ocr.attr,
&dev_attr_rca.attr,
NULL,
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 3cc928282af7..3d709029e07c 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -28,34 +28,50 @@
#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
/* show configuration fields */
-#define sdio_config_attr(field, format_string) \
+#define sdio_config_attr(field, format_string, args...) \
static ssize_t \
field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct sdio_func *func; \
\
func = dev_to_sdio_func (dev); \
- return sprintf (buf, format_string, func->field); \
+ return sprintf(buf, format_string, args); \
} \
static DEVICE_ATTR_RO(field)
-sdio_config_attr(class, "0x%02x\n");
-sdio_config_attr(vendor, "0x%04x\n");
-sdio_config_attr(device, "0x%04x\n");
-
-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct sdio_func *func = dev_to_sdio_func (dev);
-
- return sprintf(buf, "sdio:c%02Xv%04Xd%04X\n",
- func->class, func->vendor, func->device);
-}
-static DEVICE_ATTR_RO(modalias);
+sdio_config_attr(class, "0x%02x\n", func->class);
+sdio_config_attr(vendor, "0x%04x\n", func->vendor);
+sdio_config_attr(device, "0x%04x\n", func->device);
+sdio_config_attr(revision, "%u.%u\n", func->major_rev, func->minor_rev);
+sdio_config_attr(modalias, "sdio:c%02Xv%04Xd%04X\n", func->class, func->vendor, func->device);
+
+#define sdio_info_attr(num) \
+static ssize_t info##num##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct sdio_func *func = dev_to_sdio_func(dev); \
+ \
+ if (num > func->num_info) \
+ return -ENODATA; \
+ if (!func->info[num-1][0]) \
+ return 0; \
+ return sprintf(buf, "%s\n", func->info[num-1]); \
+} \
+static DEVICE_ATTR_RO(info##num)
+
+sdio_info_attr(1);
+sdio_info_attr(2);
+sdio_info_attr(3);
+sdio_info_attr(4);
static struct attribute *sdio_dev_attrs[] = {
&dev_attr_class.attr,
&dev_attr_vendor.attr,
&dev_attr_device.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_info1.attr,
+ &dev_attr_info2.attr,
+ &dev_attr_info3.attr,
+ &dev_attr_info4.attr,
&dev_attr_modalias.attr,
NULL,
};
@@ -106,6 +122,7 @@ static int
sdio_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct sdio_func *func = dev_to_sdio_func(dev);
+ unsigned int i;
if (add_uevent_var(env,
"SDIO_CLASS=%02X", func->class))
@@ -116,6 +133,15 @@ sdio_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
return -ENOMEM;
if (add_uevent_var(env,
+ "SDIO_REVISION=%u.%u", func->major_rev, func->minor_rev))
+ return -ENOMEM;
+
+ for (i = 0; i < func->num_info; i++) {
+ if (add_uevent_var(env, "SDIO_INFO%u=%s", i+1, func->info[i]))
+ return -ENOMEM;
+ }
+
+ if (add_uevent_var(env,
"MODALIAS=sdio:c%02Xv%04Xd%04X",
func->class, func->vendor, func->device))
return -ENOMEM;
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index e0655278c5c3..44bea5e4aeda 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -23,9 +23,16 @@
static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
+ u8 major_rev, minor_rev;
unsigned i, nr_strings;
char **buffer, *string;
+ if (size < 2)
+ return 0;
+
+ major_rev = buf[0];
+ minor_rev = buf[1];
+
/* Find all null-terminated (including zero length) strings in
the TPLLV1_INFO field. Trailing garbage is ignored. */
buf += 2;
@@ -57,9 +64,13 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
}
if (func) {
+ func->major_rev = major_rev;
+ func->minor_rev = minor_rev;
func->num_info = nr_strings;
func->info = (const char**)buffer;
} else {
+ card->major_rev = major_rev;
+ card->minor_rev = minor_rev;
card->num_info = nr_strings;
card->info = (const char**)buffer;
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 9a34c827c96e..31481c9fcc2e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -178,7 +178,7 @@ config MMC_SDHCI_OF_AT91
config MMC_SDHCI_OF_ESDHC
tristate "SDHCI OF support for the Freescale eSDHC controller"
depends on MMC_SDHCI_PLTFM
- depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE
+ depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
select MMC_SDHCI_IO_ACCESSORS
select FSL_GUTS
help
@@ -213,6 +213,18 @@ config MMC_SDHCI_OF_DWCMSHC
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+config MMC_SDHCI_OF_SPARX5
+ tristate "SDHCI OF support for the MCHP Sparx5 SoC"
+ depends on MMC_SDHCI_PLTFM
+ depends on ARCH_SPARX5 || COMPILE_TEST
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ found in the MCHP Sparx5 SoC.
+
+ If you have a Sparx5 SoC with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_CADENCE
tristate "SDHCI support for the Cadence SD/SDIO/eMMC controller"
depends on MMC_SDHCI_PLTFM
@@ -226,7 +238,7 @@ config MMC_SDHCI_CADENCE
config MMC_SDHCI_CNS3XXX
tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
- depends on ARCH_CNS3XXX
+ depends on ARCH_CNS3XXX || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
help
This selects the SDHCI support for CNS3xxx System-on-Chip devices.
@@ -250,7 +262,7 @@ config MMC_SDHCI_ESDHC_MCF
config MMC_SDHCI_ESDHC_IMX
tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
@@ -264,7 +276,7 @@ config MMC_SDHCI_ESDHC_IMX
config MMC_SDHCI_DOVE
tristate "SDHCI support on Marvell's Dove SoC"
- depends on ARCH_DOVE || MACH_DOVE
+ depends on ARCH_DOVE || MACH_DOVE || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
help
@@ -277,7 +289,7 @@ config MMC_SDHCI_DOVE
config MMC_SDHCI_TEGRA
tristate "SDHCI platform support for the Tegra SD/MMC Controller"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
@@ -289,7 +301,8 @@ config MMC_SDHCI_TEGRA
config MMC_SDHCI_S3C
tristate "SDHCI support on Samsung S3C SoC"
- depends on MMC_SDHCI && PLAT_SAMSUNG
+ depends on MMC_SDHCI
+ depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
This selects the Secure Digital Host Controller Interface (SDHCI)
often referrered to as the HSMMC block in some of the Samsung S3C
@@ -301,7 +314,7 @@ config MMC_SDHCI_S3C
config MMC_SDHCI_SIRF
tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs"
- depends on ARCH_SIRF
+ depends on ARCH_SIRF || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
help
@@ -339,7 +352,8 @@ config MMC_SDHCI_PXAV2
config MMC_SDHCI_SPEAR
tristate "SDHCI support on ST SPEAr platform"
- depends on MMC_SDHCI && PLAT_SPEAR
+ depends on MMC_SDHCI
+ depends on PLAT_SPEAR || COMPILE_TEST
depends on OF
help
This selects the Secure Digital Host Controller Interface (SDHCI)
@@ -362,7 +376,7 @@ config MMC_SDHCI_S3C_DMA
config MMC_SDHCI_BCM_KONA
tristate "SDHCI support on Broadcom KONA platform"
- depends on ARCH_BCM_MOBILE
+ depends on ARCH_BCM_MOBILE || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
help
This selects the Broadcom Kona Secure Digital Host Controller
@@ -410,7 +424,8 @@ config MMC_SDHCI_IPROC
config MMC_MESON_GX
tristate "Amlogic S905/GX*/AXG SD/MMC Host Controller support"
- depends on ARCH_MESON && MMC
+ depends on ARCH_MESON|| COMPILE_TEST
+ depends on COMMON_CLK
help
This selects support for the Amlogic SD/MMC Host Controller
found on the S905/GX*/AXG family of SoCs. This controller is
@@ -446,7 +461,7 @@ config MMC_MESON_MX_SDIO
config MMC_MOXART
tristate "MOXART SD/MMC Host Controller support"
- depends on ARCH_MOXART && MMC
+ depends on ARCH_MOXART || COMPILE_TEST
help
This selects support for the MOXART SD/MMC Host Controller.
MOXA provides one multi-functional card reader which can
@@ -455,7 +470,7 @@ config MMC_MOXART
config MMC_SDHCI_ST
tristate "SDHCI support on STMicroelectronics SoC"
- depends on ARCH_STI || FSP2
+ depends on ARCH_STI || FSP2 || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
help
@@ -525,7 +540,7 @@ config MMC_ATMELMCI
config MMC_SDHCI_MSM
tristate "Qualcomm SDHCI Controller Support"
- depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ depends on ARCH_QCOM || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
select MMC_CQHCI
@@ -575,7 +590,7 @@ config MMC_TIFM_SD
config MMC_MVSDIO
tristate "Marvell MMC/SD/SDIO host driver"
- depends on PLAT_ORION
+ depends on PLAT_ORION || (COMPILE_TEST && ARM)
depends on OF
help
This selects the Marvell SDIO host driver.
@@ -587,7 +602,7 @@ config MMC_MVSDIO
config MMC_DAVINCI
tristate "TI DAVINCI Multimedia Card Interface support"
- depends on ARCH_DAVINCI
+ depends on ARCH_DAVINCI || COMPILE_TEST
help
This selects the TI DAVINCI Multimedia card Interface.
If you have an DAVINCI board with a Multimedia Card slot,
@@ -669,7 +684,7 @@ config MMC_SDRICOH_CS
config MMC_SDHCI_SPRD
tristate "Spreadtrum SDIO host Controller"
- depends on ARCH_SPRD
+ depends on ARCH_SPRD || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
select MMC_HSQ
@@ -686,7 +701,7 @@ config MMC_TMIO_CORE
config MMC_TMIO
tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
- depends on MFD_TMIO || MFD_ASIC3
+ depends on MFD_TMIO || MFD_ASIC3 || COMPILE_TEST
select MMC_TMIO_CORE
help
This provides support for the SD/MMC cell found in TC6393XB,
@@ -777,7 +792,7 @@ config MMC_CAVIUM_THUNDERX
config MMC_DW
tristate "Synopsys DesignWare Memory Card Interface"
- depends on ARC || ARM || ARM64 || MIPS || COMPILE_TEST
+ depends on ARC || ARM || ARM64 || MIPS || RISCV || CSKY || COMPILE_TEST
help
This selects support for the Synopsys DesignWare Mobile Storage IP
block, this provides host support for SD and MMC interfaces, in both
@@ -959,7 +974,7 @@ config MMC_REALTEK_USB
config MMC_SUNXI
tristate "Allwinner sunxi SD/MMC Host Controller support"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
help
This selects support for the SD/MMC Host Controller on
Allwinner sunxi SoCs.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 4d5bcb0144a0..451c25fc2c69 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_OF_DWCMSHC) += sdhci-of-dwcmshc.o
+obj-$(CONFIG_MMC_SDHCI_OF_SPARX5) += sdhci-of-sparx5.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index 026ca9194ce5..bfb8efeb7eb8 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -1178,6 +1178,7 @@ static struct platform_driver alcor_pci_sdmmc_driver = {
.id_table = alcor_pci_sdmmc_ids,
.driver = {
.name = DRV_NAME_ALCOR_PCI_SDMMC,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &alcor_mmc_pm_ops
},
};
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index ceb4924e02d0..e878fdf8f20a 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -537,6 +537,7 @@ static struct platform_driver goldfish_mmc_driver = {
.remove = goldfish_mmc_remove,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 3fc3bbea8536..444bd3a0a922 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2668,6 +2668,7 @@ static struct platform_driver atmci_driver = {
.remove = atmci_remove,
.driver = {
.name = "atmel_mci",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(atmci_dt_ids),
.pm = &atmci_dev_pm_ops,
},
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 9bb1910268ca..bd00515fbaba 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1189,6 +1189,7 @@ static struct platform_driver au1xmmc_driver = {
.resume = au1xmmc_resume,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index a0767790a826..8c2361e66277 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1406,9 +1406,7 @@ static int bcm2835_probe(struct platform_device *pdev)
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "could not get clk: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(clk), "could not get clk\n");
goto err;
}
@@ -1476,6 +1474,7 @@ static struct platform_driver bcm2835_driver = {
.remove = bcm2835_remove,
.driver = {
.name = "sdhost-bcm2835",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = bcm2835_match,
},
};
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index e299cdd1e619..2c4b2df52adb 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -327,6 +327,7 @@ static struct platform_driver octeon_mmc_driver = {
.remove = octeon_mmc_remove,
.driver = {
.name = KBUILD_MODNAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = octeon_mmc_match,
},
};
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index cfa87dfa73d8..697fe40756bf 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -376,6 +376,9 @@ static void cqhci_off(struct mmc_host *mmc)
else
pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
+ if (cq_host->ops->post_disable)
+ cq_host->ops->post_disable(mmc);
+
mmc->cqe_on = false;
}
@@ -580,6 +583,9 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
__cqhci_enable(cq_host);
if (!mmc->cqe_on) {
+ if (cq_host->ops->pre_enable)
+ cq_host->ops->pre_enable(mmc);
+
cqhci_writel(cq_host, 0, CQHCI_CTL);
mmc->cqe_on = true;
pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
index 437700179de4..89bf6adbce8c 100644
--- a/drivers/mmc/host/cqhci.h
+++ b/drivers/mmc/host/cqhci.h
@@ -206,6 +206,8 @@ struct cqhci_host_ops {
void (*disable)(struct mmc_host *mmc, bool recovery);
void (*update_dcmd_desc)(struct mmc_host *mmc, struct mmc_request *mrq,
u64 *data);
+ void (*pre_enable)(struct mmc_host *mmc);
+ void (*post_disable)(struct mmc_host *mmc);
};
static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index e50a08bce7ef..90cd179625fc 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -996,7 +996,7 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
if (qstatus & MMCST0_RSPDNE) {
/* End of command phase */
- end_command = (int) host->cmd;
+ end_command = host->cmd ? 1 : 0;
}
if (end_command)
@@ -1240,9 +1240,8 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
pdev->id_entry = match->data;
ret = mmc_of_parse(mmc);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "could not parse of data: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret,
+ "could not parse of data\n");
goto parse_fail;
}
} else {
@@ -1396,6 +1395,7 @@ static const struct dev_pm_ops davinci_mmcsd_pm = {
static struct platform_driver davinci_mmcsd_driver = {
.driver = {
.name = "davinci_mmc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = davinci_mmcsd_pm_ops,
.of_match_table = davinci_mmc_dt_ids,
},
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
index aa38b1a8017e..10baf122bc15 100644
--- a/drivers/mmc/host/dw_mmc-bluefield.c
+++ b/drivers/mmc/host/dw_mmc-bluefield.c
@@ -55,6 +55,7 @@ static struct platform_driver dw_mci_bluefield_pltfm_driver = {
.remove = dw_mci_pltfm_remove,
.driver = {
.name = "dwmmc_bluefield",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_bluefield_match,
.pm = &dw_mci_pltfm_pmops,
},
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 95adeee07217..0c75810812a0 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -592,6 +592,7 @@ static struct platform_driver dw_mci_exynos_pltfm_driver = {
.remove = dw_mci_exynos_remove,
.driver = {
.name = "dwmmc_exynos",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_exynos_match,
.pm = &dw_mci_exynos_pmops,
},
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index 83e1bad0a008..39794f93826f 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -200,6 +200,7 @@ static struct platform_driver dw_mci_hi3798cv200_driver = {
.remove = dw_mci_hi3798cv200_remove,
.driver = {
.name = "dwmmc_hi3798cv200",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_hi3798cv200_match,
},
};
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index db1a84b2ba61..29d2494eb27a 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -473,6 +473,7 @@ static struct platform_driver dw_mci_k3_pltfm_driver = {
.remove = dw_mci_pltfm_remove,
.driver = {
.name = "dwmmc_k3",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_k3_match,
.pm = &dw_mci_k3_dev_pm_ops,
},
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 7de37f524a96..73731cd3ba23 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -98,6 +98,7 @@ static struct platform_driver dw_mci_pltfm_driver = {
.remove = dw_mci_pltfm_remove,
.driver = {
.name = "dw_mmc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_pltfm_match,
.pm = &dw_mci_pltfm_pmops,
},
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index d4d02134848c..753502ce3c85 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -383,6 +383,7 @@ static struct platform_driver dw_mci_rockchip_pltfm_driver = {
.remove = dw_mci_rockchip_remove,
.driver = {
.name = "dwmmc_rockchip",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_rockchip_match,
.pm = &dw_mci_rockchip_dev_pm_ops,
},
diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c
index eada648b27ec..51bcc6332f3a 100644
--- a/drivers/mmc/host/dw_mmc-zx.c
+++ b/drivers/mmc/host/dw_mmc-zx.c
@@ -155,7 +155,6 @@ static int dw_mci_zx_parse_dt(struct dw_mci *host)
struct device_node *node;
struct dw_mci_zx_priv_data *priv;
struct regmap *sysc_base;
- int ret;
/* syscon is needed only by emmc */
node = of_parse_phandle(np, "zte,aon-syscon", 0);
@@ -163,13 +162,9 @@ static int dw_mci_zx_parse_dt(struct dw_mci *host)
sysc_base = syscon_node_to_regmap(node);
of_node_put(node);
- if (IS_ERR(sysc_base)) {
- ret = PTR_ERR(sysc_base);
- if (ret != -EPROBE_DEFER)
- dev_err(host->dev, "Can't get syscon: %d\n",
- ret);
- return ret;
- }
+ if (IS_ERR(sysc_base))
+ return dev_err_probe(host->dev, PTR_ERR(sysc_base),
+ "Can't get syscon\n");
} else {
return 0;
}
@@ -227,6 +222,7 @@ static struct platform_driver dw_mci_zx_pltfm_driver = {
.remove = dw_mci_pltfm_remove,
.driver = {
.name = "dwmmc_zx",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_zx_match,
.pm = &dw_mci_zx_dev_pm_ops,
},
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 0fba940544ca..43c5795691fb 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -3161,12 +3161,9 @@ int dw_mci_probe(struct dw_mci *host)
if (!host->pdata) {
host->pdata = dw_mci_parse_dt(host);
- if (PTR_ERR(host->pdata) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(host->pdata)) {
- dev_err(host->dev, "platform data not available\n");
- return -EINVAL;
- }
+ if (IS_ERR(host->pdata))
+ return dev_err_probe(host->dev, PTR_ERR(host->pdata),
+ "platform data not available\n");
}
host->biu_clk = devm_clk_get(host->dev, "biu");
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 81d71010b474..a1f92fed2a55 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -991,9 +991,7 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
ret = mmc_of_parse(mmc);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "could not parse device properties: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "could not parse device properties\n");
goto err_free_host;
}
@@ -1126,6 +1124,7 @@ static struct platform_driver jz4740_mmc_driver = {
.remove = jz4740_mmc_remove,
.driver = {
.name = "jz4740-mmc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(jz4740_mmc_of_match),
.pm = pm_ptr(&jz4740_mmc_pm_ops),
},
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 08a3b1c05acb..4ec41579940a 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -426,11 +426,9 @@ static int meson_mmc_clk_init(struct meson_host *host)
snprintf(name, sizeof(name), "clkin%d", i);
clk = devm_clk_get(host->dev, name);
- if (IS_ERR(clk)) {
- if (clk != ERR_PTR(-EPROBE_DEFER))
- dev_err(host->dev, "Missing clock %s\n", name);
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(host->dev, PTR_ERR(clk),
+ "Missing clock %s\n", name);
mux_parent_names[i] = __clk_get_name(clk);
}
@@ -521,7 +519,7 @@ static int meson_mmc_resampling_tuning(struct mmc_host *mmc, u32 opcode)
val |= ADJUST_ADJ_EN;
writel(val, host->regs + host->data->adjust);
- if (mmc->doing_retune)
+ if (mmc_doing_retune(mmc))
dly = FIELD_GET(ADJUST_ADJ_DELAY_MASK, val) + 1;
else
dly = 0;
@@ -1077,12 +1075,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
ret = device_reset_optional(&pdev->dev);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "device reset failed: %d\n", ret);
-
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "device reset failed\n");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->regs = devm_ioremap_resource(&pdev->dev, res);
@@ -1270,6 +1264,7 @@ static struct platform_driver meson_mmc_driver = {
.remove = meson_mmc_remove,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(meson_mmc_of_match),
},
};
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index 53e3f6a4245a..7cd9c0ec2fcf 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -903,6 +903,7 @@ static struct platform_driver meson_mx_sdhc_driver = {
.remove = meson_mx_sdhc_remove,
.driver = {
.name = "meson-mx-sdhc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(meson_mx_sdhc_of_match),
},
};
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 703d5834f9a5..1c5299cd0cbe 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -755,6 +755,7 @@ static struct platform_driver meson_mx_mmc_driver = {
.remove = meson_mx_mmc_remove,
.driver = {
.name = "meson-mx-sdio",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(meson_mx_mmc_of_match),
},
};
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 18a850f37ddc..02f4fd26e76a 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -882,9 +882,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
else
clock_rate = spi->max_speed_hz;
- timeout = data->timeout_ns +
+ timeout = data->timeout_ns / 1000 +
data->timeout_clks * 1000000 / clock_rate;
- timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1;
+ timeout = usecs_to_jiffies((unsigned int)timeout) + 1;
/* Handle scatterlist segments one at a time, with synch for
* each 512-byte block
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index fc6b9cf27d0b..f25079ba3bca 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -689,19 +689,18 @@ static int moxart_remove(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, NULL);
- if (mmc) {
- if (!IS_ERR(host->dma_chan_tx))
- dma_release_channel(host->dma_chan_tx);
- if (!IS_ERR(host->dma_chan_rx))
- dma_release_channel(host->dma_chan_rx);
- mmc_remove_host(mmc);
- mmc_free_host(mmc);
+ if (!IS_ERR(host->dma_chan_tx))
+ dma_release_channel(host->dma_chan_tx);
+ if (!IS_ERR(host->dma_chan_rx))
+ dma_release_channel(host->dma_chan_rx);
+ mmc_remove_host(mmc);
+ mmc_free_host(mmc);
+
+ writel(0, host->base + REG_INTERRUPT_MASK);
+ writel(0, host->base + REG_POWER_CONTROL);
+ writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
+ host->base + REG_CLOCK_CONTROL);
- writel(0, host->base + REG_INTERRUPT_MASK);
- writel(0, host->base + REG_POWER_CONTROL);
- writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
- host->base + REG_CLOCK_CONTROL);
- }
return 0;
}
@@ -717,6 +716,7 @@ static struct platform_driver moxart_mmc_driver = {
.remove = moxart_remove,
.driver = {
.name = "mmc-moxart",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = moxart_mmc_match,
},
};
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index b0c27944db7f..a704745e5882 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -397,7 +397,6 @@ struct msdc_delay_phase {
struct msdc_host {
struct device *dev;
const struct mtk_mmc_compatible *dev_comp;
- struct mmc_host *mmc; /* mmc structure */
int cmd_rsp;
spinlock_t lock;
@@ -734,14 +733,15 @@ static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq)
static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks)
{
+ struct mmc_host *mmc = mmc_from_priv(host);
u64 timeout, clk_ns;
u32 mode = 0;
- if (host->mmc->actual_clock == 0) {
+ if (mmc->actual_clock == 0) {
timeout = 0;
} else {
clk_ns = 1000000000ULL;
- do_div(clk_ns, host->mmc->actual_clock);
+ do_div(clk_ns, mmc->actual_clock);
timeout = ns + clk_ns - 1;
do_div(timeout, clk_ns);
timeout += clks;
@@ -802,6 +802,7 @@ static void msdc_ungate_clock(struct msdc_host *host)
static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
{
+ struct mmc_host *mmc = mmc_from_priv(host);
u32 mode;
u32 flags;
u32 div;
@@ -811,7 +812,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
host->mclk = 0;
- host->mmc->actual_clock = 0;
+ mmc->actual_clock = 0;
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
return;
}
@@ -890,7 +891,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
- host->mmc->actual_clock = sclk;
+ mmc->actual_clock = sclk;
host->mclk = hz;
host->timing = timing;
/* need because clk changed. */
@@ -901,7 +902,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
* mmc_select_hs400() will drop to 50Mhz and High speed mode,
* tune result of hs200/200Mhz is not suitable for 50Mhz
*/
- if (host->mmc->actual_clock <= 52000000) {
+ if (mmc->actual_clock <= 52000000) {
writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
if (host->top_base) {
writel(host->def_tune_para.emmc_top_control,
@@ -932,7 +933,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
- dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
+ dev_dbg(host->dev, "sclk: %d, timing: %d\n", mmc->actual_clock,
timing);
}
@@ -967,6 +968,7 @@ static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd)
{
+ struct mmc_host *mmc = mmc_from_priv(host);
/* rawcmd :
* vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
* stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
@@ -993,7 +995,7 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
struct mmc_data *data = cmd->data;
if (mmc_op_multi(opcode)) {
- if (mmc_card_mmc(host->mmc->card) && mrq->sbc &&
+ if (mmc_card_mmc(mmc->card) && mrq->sbc &&
!(mrq->sbc->arg & 0xFFFF0000))
rawcmd |= 0x2 << 28; /* AutoCMD23 */
}
@@ -1070,9 +1072,10 @@ static int msdc_auto_cmd_done(struct msdc_host *host, int events,
*/
static void msdc_recheck_sdio_irq(struct msdc_host *host)
{
+ struct mmc_host *mmc = mmc_from_priv(host);
u32 reg_int, reg_inten, reg_ps;
- if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
+ if (mmc->caps & MMC_CAP_SDIO_IRQ) {
reg_inten = readl(host->base + MSDC_INTEN);
if (reg_inten & MSDC_INTEN_SDIOIRQ) {
reg_int = readl(host->base + MSDC_INT);
@@ -1080,7 +1083,7 @@ static void msdc_recheck_sdio_irq(struct msdc_host *host)
if (!(reg_int & MSDC_INT_SDIOIRQ ||
reg_ps & MSDC_PS_DATA1)) {
__msdc_enable_sdio_irq(host, 0);
- sdio_signal_irq(host->mmc);
+ sdio_signal_irq(mmc);
}
}
}
@@ -1113,7 +1116,7 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
msdc_unprepare_data(host, mrq);
if (host->error)
msdc_reset_hw(host);
- mmc_request_done(host->mmc, mrq);
+ mmc_request_done(mmc_from_priv(host), mrq);
if (host->dev_comp->recheck_sdio_irq)
msdc_recheck_sdio_irq(host);
}
@@ -1500,6 +1503,7 @@ static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb)
static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
{
+ struct mmc_host *mmc = mmc_from_priv(host);
int cmd_err = 0, dat_err = 0;
if (intsts & MSDC_INT_RSPCRCERR) {
@@ -1523,12 +1527,13 @@ static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts)
cmd_err, dat_err, intsts);
}
- return cqhci_irq(host->mmc, 0, cmd_err, dat_err);
+ return cqhci_irq(mmc, 0, cmd_err, dat_err);
}
static irqreturn_t msdc_irq(int irq, void *dev_id)
{
struct msdc_host *host = (struct msdc_host *) dev_id;
+ struct mmc_host *mmc = mmc_from_priv(host);
while (true) {
unsigned long flags;
@@ -1551,18 +1556,18 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
spin_unlock_irqrestore(&host->lock, flags);
if ((events & event_mask) & MSDC_INT_SDIOIRQ)
- sdio_signal_irq(host->mmc);
+ sdio_signal_irq(mmc);
if ((events & event_mask) & MSDC_INT_CDSC) {
if (host->internal_cd)
- mmc_detect_change(host->mmc, msecs_to_jiffies(20));
+ mmc_detect_change(mmc, msecs_to_jiffies(20));
events &= ~MSDC_INT_CDSC;
}
if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ)))
break;
- if ((host->mmc->caps2 & MMC_CAP2_CQE) &&
+ if ((mmc->caps2 & MMC_CAP2_CQE) &&
(events & MSDC_INT_CMDQ)) {
msdc_cmdq_irq(host, events);
/* clear interrupts */
@@ -2290,6 +2295,26 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
}
}
+static void msdc_cqe_pre_enable(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u32 reg;
+
+ reg = cqhci_readl(cq_host, CQHCI_CFG);
+ reg |= CQHCI_ENABLE;
+ cqhci_writel(cq_host, reg, CQHCI_CFG);
+}
+
+static void msdc_cqe_post_disable(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u32 reg;
+
+ reg = cqhci_readl(cq_host, CQHCI_CFG);
+ reg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, reg, CQHCI_CFG);
+}
+
static const struct mmc_host_ops mt_msdc_ops = {
.post_req = msdc_post_req,
.pre_req = msdc_pre_req,
@@ -2309,6 +2334,8 @@ static const struct mmc_host_ops mt_msdc_ops = {
static const struct cqhci_host_ops msdc_cmdq_ops = {
.enable = msdc_cqe_enable,
.disable = msdc_cqe_disable,
+ .pre_enable = msdc_cqe_pre_enable,
+ .post_disable = msdc_cqe_post_disable,
};
static void msdc_of_property_parse(struct platform_device *pdev,
@@ -2434,7 +2461,6 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->dev = &pdev->dev;
host->dev_comp = of_device_get_match_data(&pdev->dev);
- host->mmc = mmc;
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
@@ -2475,7 +2501,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
mmc_dev(mmc)->dma_mask = &host->dma_mask;
if (mmc->caps2 & MMC_CAP2_CQE) {
- host->cq_host = devm_kzalloc(host->mmc->parent,
+ host->cq_host = devm_kzalloc(mmc->parent,
sizeof(*host->cq_host),
GFP_KERNEL);
if (!host->cq_host) {
@@ -2560,7 +2586,7 @@ static int msdc_drv_remove(struct platform_device *pdev)
pm_runtime_get_sync(host->dev);
platform_set_drvdata(pdev, NULL);
- mmc_remove_host(host->mmc);
+ mmc_remove_host(mmc);
msdc_deinit_hw(host);
msdc_gate_clock(host);
@@ -2572,7 +2598,7 @@ static int msdc_drv_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
host->dma.bd, host->dma.bd_addr);
- mmc_free_host(host->mmc);
+ mmc_free_host(mmc);
return 0;
}
@@ -2607,6 +2633,7 @@ static void msdc_save_reg(struct msdc_host *host)
static void msdc_restore_reg(struct msdc_host *host)
{
+ struct mmc_host *mmc = mmc_from_priv(host);
u32 tune_reg = host->dev_comp->pad_tune_reg;
writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
@@ -2631,7 +2658,7 @@ static void msdc_restore_reg(struct msdc_host *host)
writel(host->save_para.pad_tune, host->base + tune_reg);
}
- if (sdio_irq_claimed(host->mmc))
+ if (sdio_irq_claimed(mmc))
__msdc_enable_sdio_irq(host, 1);
}
@@ -2667,6 +2694,7 @@ static struct platform_driver mt_msdc_driver = {
.remove = msdc_drv_remove,
.driver = {
.name = "mtk-msdc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = msdc_of_ids,
.pm = &msdc_dev_pm_ops,
},
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index cc0752a9df6d..629efbe639c4 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -824,6 +824,7 @@ static struct platform_driver mvsd_driver = {
.remove = mvsd_remove,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = mvsdio_dt_ids,
},
};
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index b3d654c688e5..12ee07285980 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1244,6 +1244,7 @@ static struct platform_driver mxcmci_driver = {
.id_table = mxcmci_devtype,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &mxcmci_pm_ops,
.of_match_table = mxcmci_of_match,
}
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index b1820def36c0..75007f61df97 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -726,6 +726,7 @@ static struct platform_driver mxs_mmc_driver = {
.id_table = mxs_ssp_ids,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &mxs_mmc_pm_ops,
.of_match_table = mxs_mmc_dt_ids,
},
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 33d7af7c7762..6aa0537f1f84 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1504,6 +1504,7 @@ static struct platform_driver mmc_omap_driver = {
.remove = mmc_omap_remove,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(mmc_omap_match),
},
};
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 37b8740513f5..aa9cc49206d1 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1114,8 +1114,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
int ret;
/* Disable the clocks */
- if (host->dbclk)
- clk_disable_unprepare(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
/* Turn the power off */
ret = omap_hsmmc_set_power(host, 0);
@@ -1123,8 +1122,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
/* Turn the power ON with given VDD 1.8 or 3.0v */
if (!ret)
ret = omap_hsmmc_set_power(host, 1);
- if (host->dbclk)
- clk_prepare_enable(host->dbclk);
+ clk_prepare_enable(host->dbclk);
if (ret != 0)
goto err;
@@ -2014,8 +2012,7 @@ err_irq:
pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
- if (host->dbclk)
- clk_disable_unprepare(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
err1:
mmc_free_host(mmc);
err:
@@ -2037,8 +2034,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
device_init_wakeup(&pdev->dev, false);
- if (host->dbclk)
- clk_disable_unprepare(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
mmc_free_host(host->mmc);
@@ -2063,8 +2059,7 @@ static int omap_hsmmc_suspend(struct device *dev)
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
}
- if (host->dbclk)
- clk_disable_unprepare(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
pm_runtime_put_sync(host->dev);
return 0;
@@ -2080,8 +2075,7 @@ static int omap_hsmmc_resume(struct device *dev)
pm_runtime_get_sync(host->dev);
- if (host->dbclk)
- clk_prepare_enable(host->dbclk);
+ clk_prepare_enable(host->dbclk);
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
omap_hsmmc_conf_bus_power(host);
@@ -2171,6 +2165,7 @@ static struct platform_driver omap_hsmmc_driver = {
.remove = omap_hsmmc_remove,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &omap_hsmmc_dev_pm_ops,
.of_match_table = of_match_ptr(omap_mmc_of_match),
},
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
index df43f42855e2..ccf214a89eda 100644
--- a/drivers/mmc/host/owl-mmc.c
+++ b/drivers/mmc/host/owl-mmc.c
@@ -689,6 +689,7 @@ MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
static struct platform_driver owl_mmc_driver = {
.driver = {
.name = "owl_mmc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = owl_mmc_of_match,
},
.probe = owl_mmc_probe,
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 3a9333475a2b..29f6180a0036 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -811,6 +811,7 @@ static struct platform_driver pxamci_driver = {
.remove = pxamci_remove,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(pxa_mmc_dt_ids),
},
};
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index 14c64caefc64..cb962c7883dc 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -33,10 +33,13 @@ struct renesas_sdhi_of_data {
unsigned short max_segs;
};
+#define SDHI_CALIB_TABLE_MAX 32
+
struct renesas_sdhi_quirks {
bool hs400_disabled;
bool hs400_4taps;
u32 hs400_bad_taps;
+ const u8 (*hs400_calib_table)[SDHI_CALIB_TABLE_MAX];
};
struct tmio_mmc_dma {
@@ -58,7 +61,8 @@ struct renesas_sdhi {
void __iomem *scc_ctl;
u32 scc_tappos;
u32 scc_tappos_hs400;
- bool doing_tune;
+ const u8 *adjust_hs400_calib_table;
+ bool needs_adjust_hs400;
/* Tuning values: 1 for success, 0 for failure */
DECLARE_BITMAP(taps, BITS_PER_LONG);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 904f5237d8f7..acb9c81a4e45 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/mfd/tmio.h>
#include <linux/sh_dma.h>
@@ -47,6 +48,8 @@
#define SDHI_VER_GEN3_SD 0xcc10
#define SDHI_VER_GEN3_SDMMC 0xcd10
+#define SDHI_GEN3_MMC0_ADDR 0xee140000
+
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
{
u32 val;
@@ -117,8 +120,12 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
unsigned int freq, diff, best_freq = 0, diff_min = ~0;
int i;
- /* tested only on R-Car Gen2+ currently; may work for others */
- if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
+ /*
+ * We simply return the current rate if a) we are not on a R-Car Gen2+
+ * SoC (may work for others, but untested) or b) if the SCC needs its
+ * clock during tuning, so we don't change the external clock setup.
+ */
+ if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2) || mmc_doing_tune(host->mmc))
return clk_get_rate(priv->clk);
/*
@@ -247,6 +254,11 @@ static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
#define SH_MOBILE_SDHI_SCC_RVSREQ 0x00A
#define SH_MOBILE_SDHI_SCC_SMPCMP 0x00C
#define SH_MOBILE_SDHI_SCC_TMPPORT2 0x00E
+#define SH_MOBILE_SDHI_SCC_TMPPORT3 0x014
+#define SH_MOBILE_SDHI_SCC_TMPPORT4 0x016
+#define SH_MOBILE_SDHI_SCC_TMPPORT5 0x018
+#define SH_MOBILE_SDHI_SCC_TMPPORT6 0x01A
+#define SH_MOBILE_SDHI_SCC_TMPPORT7 0x01C
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN BIT(0)
#define SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT 16
@@ -267,6 +279,40 @@ static int renesas_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL BIT(4)
#define SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN BIT(31)
+/* Definitions for values the SH_MOBILE_SDHI_SCC_TMPPORT4 register */
+#define SH_MOBILE_SDHI_SCC_TMPPORT4_DLL_ACC_START BIT(0)
+
+/* Definitions for values the SH_MOBILE_SDHI_SCC_TMPPORT5 register */
+#define SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_R BIT(8)
+#define SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_W (0 << 8)
+#define SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_ADR_MASK 0x3F
+
+/* Definitions for values the SH_MOBILE_SDHI_SCC register */
+#define SH_MOBILE_SDHI_SCC_TMPPORT_DISABLE_WP_CODE 0xa5000000
+#define SH_MOBILE_SDHI_SCC_TMPPORT_CALIB_CODE_MASK 0x1f
+#define SH_MOBILE_SDHI_SCC_TMPPORT_MANUAL_MODE BIT(7)
+
+static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
+ { 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15,
+ 16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 },
+ { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11,
+ 12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 }
+};
+
+static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
+ { 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 },
+ { 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 }
+};
+
+static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10,
+ 11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 }
+};
+
static inline u32 sd_scc_read32(struct tmio_mmc_host *host,
struct renesas_sdhi *priv, int addr)
{
@@ -373,6 +419,9 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ if (priv->adjust_hs400_calib_table)
+ priv->needs_adjust_hs400 = true;
}
static void renesas_sdhi_reset_scc(struct tmio_mmc_host *host,
@@ -403,6 +452,74 @@ static void renesas_sdhi_disable_scc(struct mmc_host *mmc)
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
}
+static u32 sd_scc_tmpport_read32(struct tmio_mmc_host *host,
+ struct renesas_sdhi *priv, u32 addr)
+{
+ /* read mode */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT5,
+ SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_R |
+ (SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_ADR_MASK & addr));
+
+ /* access start and stop */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4,
+ SH_MOBILE_SDHI_SCC_TMPPORT4_DLL_ACC_START);
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4, 0);
+
+ return sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT7);
+}
+
+static void sd_scc_tmpport_write32(struct tmio_mmc_host *host,
+ struct renesas_sdhi *priv, u32 addr, u32 val)
+{
+ /* write mode */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT5,
+ SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_RW_SEL_W |
+ (SH_MOBILE_SDHI_SCC_TMPPORT5_DLL_ADR_MASK & addr));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT6, val);
+
+ /* access start and stop */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4,
+ SH_MOBILE_SDHI_SCC_TMPPORT4_DLL_ACC_START);
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT4, 0);
+}
+
+static void renesas_sdhi_adjust_hs400_mode_enable(struct tmio_mmc_host *host)
+{
+ struct renesas_sdhi *priv = host_to_priv(host);
+ u32 calib_code;
+
+ /* disable write protect */
+ sd_scc_tmpport_write32(host, priv, 0x00,
+ SH_MOBILE_SDHI_SCC_TMPPORT_DISABLE_WP_CODE);
+ /* read calibration code and adjust */
+ calib_code = sd_scc_tmpport_read32(host, priv, 0x26);
+ calib_code &= SH_MOBILE_SDHI_SCC_TMPPORT_CALIB_CODE_MASK;
+
+ sd_scc_tmpport_write32(host, priv, 0x22,
+ SH_MOBILE_SDHI_SCC_TMPPORT_MANUAL_MODE |
+ priv->adjust_hs400_calib_table[calib_code]);
+
+ /* set offset value to TMPPORT3, hardcoded to OFFSET0 (= 0x3) for now */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT3, 0x3);
+
+ /* adjustment done, clear flag */
+ priv->needs_adjust_hs400 = false;
+}
+
+static void renesas_sdhi_adjust_hs400_mode_disable(struct tmio_mmc_host *host)
+{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ /* disable write protect */
+ sd_scc_tmpport_write32(host, priv, 0x00,
+ SH_MOBILE_SDHI_SCC_TMPPORT_DISABLE_WP_CODE);
+ /* disable manual calibration */
+ sd_scc_tmpport_write32(host, priv, 0x22, 0);
+ /* clear offset value of TMPPORT3 */
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT3, 0);
+}
+
static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host,
struct renesas_sdhi *priv)
{
@@ -420,6 +537,9 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host,
SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
+ if (priv->adjust_hs400_calib_table)
+ renesas_sdhi_adjust_hs400_mode_disable(host);
+
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
}
@@ -432,6 +552,26 @@ static int renesas_sdhi_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_io
return 0;
}
+static void renesas_sdhi_reset(struct tmio_mmc_host *host)
+{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ renesas_sdhi_reset_scc(host, priv);
+ renesas_sdhi_reset_hs400_mode(host, priv);
+ priv->needs_adjust_hs400 = false;
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
+ ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
+ sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
+
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK,
+ TMIO_MASK_INIT_RCAR2);
+}
+
#define SH_MOBILE_SDHI_MIN_TAP_ROW 3
static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
@@ -441,7 +581,6 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
unsigned int taps_size = priv->tap_num * 2, min_tap_row;
unsigned long *bitmap;
- priv->doing_tune = false;
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSREQ, 0);
/*
@@ -500,10 +639,11 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
return 0;
}
-static int renesas_sdhi_execute_tuning(struct tmio_mmc_host *host, u32 opcode)
+static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
struct renesas_sdhi *priv = host_to_priv(host);
- int i;
+ int i, ret;
priv->tap_num = renesas_sdhi_init_tuning(host);
if (!priv->tap_num)
@@ -515,7 +655,6 @@ static int renesas_sdhi_execute_tuning(struct tmio_mmc_host *host, u32 opcode)
return -EINVAL;
}
- priv->doing_tune = true;
bitmap_zero(priv->taps, priv->tap_num * 2);
bitmap_zero(priv->smpcmp, priv->tap_num * 2);
@@ -524,14 +663,17 @@ static int renesas_sdhi_execute_tuning(struct tmio_mmc_host *host, u32 opcode)
/* Set sampling clock position */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
- if (mmc_send_tuning(host->mmc, opcode, NULL) == 0)
+ if (mmc_send_tuning(mmc, opcode, NULL) == 0)
set_bit(i, priv->taps);
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_SMPCMP) == 0)
set_bit(i, priv->smpcmp);
}
- return renesas_sdhi_select_tuning(host);
+ ret = renesas_sdhi_select_tuning(host);
+ if (ret < 0)
+ renesas_sdhi_reset(host);
+ return ret;
}
static bool renesas_sdhi_manual_correction(struct tmio_mmc_host *host, bool use_4tap)
@@ -621,7 +763,7 @@ static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && !use_4tap))
return false;
- if (mmc_doing_retune(host->mmc) || priv->doing_tune)
+ if (mmc_doing_tune(host->mmc))
return false;
if (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL) &
@@ -631,27 +773,6 @@ static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host)
return renesas_sdhi_manual_correction(host, use_4tap);
}
-static void renesas_sdhi_hw_reset(struct tmio_mmc_host *host)
-{
- struct renesas_sdhi *priv;
-
- priv = host_to_priv(host);
-
- renesas_sdhi_reset_scc(host, priv);
- renesas_sdhi_reset_hs400_mode(host, priv);
-
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL,
- ~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
- sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
-
- if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
- sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK,
- TMIO_MASK_INIT_RCAR2);
-}
-
static int renesas_sdhi_wait_idle(struct tmio_mmc_host *host, u32 bit)
{
int timeout = 1000;
@@ -711,6 +832,13 @@ static int renesas_sdhi_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
+static void renesas_sdhi_fixup_request(struct tmio_mmc_host *host, struct mmc_request *mrq)
+{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ if (priv->needs_adjust_hs400 && mrq->cmd->opcode == MMC_SEND_STATUS)
+ renesas_sdhi_adjust_hs400_mode_enable(host);
+}
static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
{
/* Iff regs are 8 byte apart, sdbuf is 64 bit. Otherwise always 32. */
@@ -742,6 +870,21 @@ static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
.hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
};
+static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
+ .hs400_4taps = true,
+ .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .hs400_calib_table = r8a7796_es13_calib_table,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
+ .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
+ .hs400_calib_table = r8a77965_calib_table,
+};
+
+static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
+ .hs400_calib_table = r8a77990_calib_table,
+};
+
/*
* Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
* So, we want to treat them equally and only have a match for ES1.2 to enforce
@@ -753,10 +896,11 @@ static const struct soc_device_attribute sdhi_quirks_match[] = {
{ .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
{ .soc_id = "r8a7795", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps2367 },
{ .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
- { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_4tap },
+ { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
{ .soc_id = "r8a7796", .revision = "ES3.*", .data = &sdhi_quirks_bad_taps1357 },
- { .soc_id = "r8a77965", .data = &sdhi_quirks_bad_taps2367 },
+ { .soc_id = "r8a77965", .data = &sdhi_quirks_r8a77965 },
{ .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 },
+ { .soc_id = "r8a77990", .data = &sdhi_quirks_r8a77990 },
{ /* Sentinel. */ },
};
@@ -862,11 +1006,9 @@ int renesas_sdhi_probe(struct platform_device *pdev,
renesas_sdhi_start_signal_voltage_switch;
host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
- /* SDR and HS200/400 registers requires HW reset */
if (of_data && of_data->scc_offset) {
priv->scc_ctl = host->ctl + of_data->scc_offset;
- host->mmc->caps |= MMC_CAP_HW_RESET;
- host->hw_reset = renesas_sdhi_hw_reset;
+ host->reset = renesas_sdhi_reset;
}
}
@@ -915,6 +1057,14 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ver == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
+ if (ver == SDHI_VER_GEN3_SDMMC && quirks && quirks->hs400_calib_table) {
+ host->fixup_request = renesas_sdhi_fixup_request;
+ priv->adjust_hs400_calib_table = *(
+ res->start == SDHI_GEN3_MMC0_ADDR ?
+ quirks->hs400_calib_table :
+ quirks->hs400_calib_table + 1);
+ }
+
ret = tmio_mmc_host_probe(host);
if (ret < 0)
goto edisclk;
@@ -943,8 +1093,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (!hit)
dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n");
- host->execute_tuning = renesas_sdhi_execute_tuning;
host->check_retune = renesas_sdhi_check_scc_error;
+ host->ops.execute_tuning = renesas_sdhi_execute_tuning;
host->ops.prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning;
host->ops.hs400_downgrade = renesas_sdhi_disable_scc;
host->ops.hs400_complete = renesas_sdhi_hs400_complete;
@@ -997,6 +1147,7 @@ int renesas_sdhi_remove(struct platform_device *pdev)
tmio_mmc_host_remove(host);
renesas_sdhi_clk_disable(host);
+ tmio_mmc_host_free(host);
return 0;
}
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 32ab991544ef..fe13e1ea22dc 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -336,10 +336,6 @@ static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
if (soc)
global_flags |= (unsigned long)soc->data;
- dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
-
/* value is max of SD_SECCNT. Confirmed by HW engineers */
dma_set_max_seg_size(dev, 0xffffffff);
@@ -357,6 +353,7 @@ static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = {
static struct platform_driver renesas_internal_dmac_sdhi_driver = {
.driver = {
.name = "renesas_sdhi_internal_dmac",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &renesas_sdhi_internal_dmac_dev_pm_ops,
.of_match_table = renesas_sdhi_internal_dmac_of_match,
},
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 13ff023fbee9..c5f789675302 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -463,6 +463,7 @@ static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = {
static struct platform_driver renesas_sys_dmac_sdhi_driver = {
.driver = {
.name = "sh_mobile_sdhi",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &renesas_sdhi_sys_dmac_dev_pm_ops,
.of_match_table = renesas_sdhi_sys_dmac_of_match,
},
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 2763a376b054..eb395e144207 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -1471,6 +1471,7 @@ static struct platform_driver rtsx_pci_sdmmc_driver = {
.id_table = rtsx_pci_sdmmc_ids,
.driver = {
.name = DRV_NAME_RTSX_PCI_SDMMC,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(rtsx_pci_sdmmc_driver);
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 7225d9312af8..5fe4528e296e 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -579,7 +579,6 @@ static void sd_normal_rw(struct rtsx_usb_sdmmc *host,
static int sd_change_phase(struct rtsx_usb_sdmmc *host, u8 sample_point, int tx)
{
struct rtsx_ucr *ucr = host->ucr;
- int err;
dev_dbg(sdmmc_dev(host), "%s: %s sample_point = %d\n",
__func__, tx ? "TX" : "RX", sample_point);
@@ -601,11 +600,7 @@ static int sd_change_phase(struct rtsx_usb_sdmmc *host, u8 sample_point, int tx)
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, 0);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_RST, 0);
- err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
- if (err)
- return err;
-
- return 0;
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
}
static inline u32 get_phase_point(u32 phase_map, unsigned int idx)
@@ -1458,6 +1453,7 @@ static struct platform_driver rtsx_usb_sdmmc_driver = {
.id_table = rtsx_usb_sdmmc_ids,
.driver = {
.name = "rtsx_usb_sdmmc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &rtsx_usb_sdmmc_dev_pm_ops,
},
};
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 444b2769ae2c..643d54eceef6 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -24,11 +24,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/mmc/slot-gpio.h>
-
-#include <plat/gpio-cfg.h>
-#include <mach/dma.h>
-#include <mach/gpio-samsung.h>
-
#include <linux/platform_data/mmc-s3cmci.h>
#include "s3cmci.h"
@@ -150,8 +145,8 @@ static void s3cmci_reset(struct s3cmci_host *host);
static void dbg_dumpregs(struct s3cmci_host *host, char *prefix)
{
- u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer, bsize;
- u32 datcon, datcnt, datsta, fsta, imask;
+ u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer;
+ u32 datcon, datcnt, datsta, fsta;
con = readl(host->base + S3C2410_SDICON);
pre = readl(host->base + S3C2410_SDIPRE);
@@ -163,12 +158,10 @@ static void dbg_dumpregs(struct s3cmci_host *host, char *prefix)
r2 = readl(host->base + S3C2410_SDIRSP2);
r3 = readl(host->base + S3C2410_SDIRSP3);
timer = readl(host->base + S3C2410_SDITIMER);
- bsize = readl(host->base + S3C2410_SDIBSIZE);
datcon = readl(host->base + S3C2410_SDIDCON);
datcnt = readl(host->base + S3C2410_SDIDCNT);
datsta = readl(host->base + S3C2410_SDIDSTA);
fsta = readl(host->base + S3C2410_SDIFSTA);
- imask = readl(host->base + host->sdiimsk);
dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n",
prefix, con, pre, timer);
@@ -307,7 +300,8 @@ static inline void clear_imask(struct s3cmci_host *host)
static void s3cmci_check_sdio_irq(struct s3cmci_host *host)
{
if (host->sdio_irqen) {
- if (gpio_get_value(S3C2410_GPE(8)) == 0) {
+ if (host->pdata->bus[3] &&
+ gpiod_get_value(host->pdata->bus[3]) == 0) {
pr_debug("%s: signalling irq\n", __func__);
mmc_signal_sdio_irq(host->mmc);
}
@@ -396,9 +390,6 @@ static void s3cmci_enable_irq(struct s3cmci_host *host, bool more)
local_irq_restore(flags);
}
-/**
- *
- */
static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)
{
unsigned long flags;
@@ -1206,33 +1197,20 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_ON:
case MMC_POWER_UP:
- /* Configure GPE5...GPE10 pins in SD mode */
- if (!host->pdev->dev.of_node)
- s3c_gpio_cfgall_range(S3C2410_GPE(5), 6, S3C_GPIO_SFN(2),
- S3C_GPIO_PULL_NONE);
-
- if (host->pdata->set_power)
- host->pdata->set_power(ios->power_mode, ios->vdd);
-
if (!host->is2440)
mci_con |= S3C2410_SDICON_FIFORESET;
-
break;
case MMC_POWER_OFF:
default:
- if (!host->pdev->dev.of_node)
- gpio_direction_output(S3C2410_GPE(5), 0);
-
if (host->is2440)
mci_con |= S3C2440_SDICON_SDRESET;
-
- if (host->pdata->set_power)
- host->pdata->set_power(ios->power_mode, ios->vdd);
-
break;
}
+ if (host->pdata->set_power)
+ host->pdata->set_power(ios->power_mode, ios->vdd);
+
s3cmci_set_clk(host, ios);
/* Set CLOCK_ENABLE */
@@ -1310,13 +1288,6 @@ static const struct mmc_host_ops s3cmci_ops = {
.enable_sdio_irq = s3cmci_enable_sdio_irq,
};
-static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
- /* This is currently here to avoid a number of if (host->pdata)
- * checks. Any zero fields to ensure reasonable defaults are picked. */
- .no_wprotect = 1,
- .no_detect = 1,
-};
-
#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
static int s3cmci_cpufreq_transition(struct notifier_block *nb,
@@ -1379,7 +1350,7 @@ static int s3cmci_state_show(struct seq_file *seq, void *v)
{
struct s3cmci_host *host = seq->private;
- seq_printf(seq, "Register base = 0x%08x\n", (u32)host->base);
+ seq_printf(seq, "Register base = 0x%p\n", host->base);
seq_printf(seq, "Clock rate = %ld\n", host->clk_rate);
seq_printf(seq, "Prescale = %d\n", host->prescaler);
seq_printf(seq, "is2440 = %d\n", host->is2440);
@@ -1470,24 +1441,21 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host)
int i, ret;
host->is2440 = platform_get_device_id(pdev)->driver_data;
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "need platform data");
+ return -ENXIO;
+ }
- for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) {
- ret = gpio_request(i, dev_name(&pdev->dev));
- if (ret) {
+ for (i = 0; i < 6; i++) {
+ pdata->bus[i] = devm_gpiod_get_index(&pdev->dev, "bus", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->bus[i])) {
dev_err(&pdev->dev, "failed to get gpio %d\n", i);
-
- for (i--; i >= S3C2410_GPE(5); i--)
- gpio_free(i);
-
- return ret;
+ return PTR_ERR(pdata->bus[i]);
}
}
- if (!pdev->dev.platform_data)
- pdev->dev.platform_data = &s3cmci_def_pdata;
-
- pdata = pdev->dev.platform_data;
-
if (pdata->no_wprotect)
mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
@@ -1522,7 +1490,7 @@ static int s3cmci_probe_dt(struct s3cmci_host *host)
struct mmc_host *mmc = host->mmc;
int ret;
- host->is2440 = (int) of_device_get_match_data(&pdev->dev);
+ host->is2440 = (long) of_device_get_match_data(&pdev->dev);
ret = mmc_of_parse(mmc);
if (ret)
@@ -1542,7 +1510,6 @@ static int s3cmci_probe(struct platform_device *pdev)
struct s3cmci_host *host;
struct mmc_host *mmc;
int ret;
- int i;
mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
if (!mmc) {
@@ -1586,7 +1553,7 @@ static int s3cmci_probe(struct platform_device *pdev)
"failed to get io memory region resource.\n");
ret = -ENOENT;
- goto probe_free_gpio;
+ goto probe_free_host;
}
host->mem = request_mem_region(host->mem->start,
@@ -1595,7 +1562,7 @@ static int s3cmci_probe(struct platform_device *pdev)
if (!host->mem) {
dev_err(&pdev->dev, "failed to request io memory region.\n");
ret = -ENOENT;
- goto probe_free_gpio;
+ goto probe_free_host;
}
host->base = ioremap(host->mem->start, resource_size(host->mem));
@@ -1719,11 +1686,6 @@ static int s3cmci_probe(struct platform_device *pdev)
probe_free_mem_region:
release_mem_region(host->mem->start, resource_size(host->mem));
- probe_free_gpio:
- if (!pdev->dev.of_node)
- for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
- gpio_free(i);
-
probe_free_host:
mmc_free_host(mmc);
@@ -1749,7 +1711,6 @@ static int s3cmci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct s3cmci_host *host = mmc_priv(mmc);
- int i;
s3cmci_shutdown(pdev);
@@ -1762,10 +1723,6 @@ static int s3cmci_remove(struct platform_device *pdev)
free_irq(host->irq, host);
- if (!pdev->dev.of_node)
- for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
- gpio_free(i);
-
iounmap(host->base);
release_mem_region(host->mem->start, resource_size(host->mem));
@@ -1809,6 +1766,7 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
static struct platform_driver s3cmci_driver = {
.driver = {
.name = "s3c-sdi",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = s3cmci_dt_match,
},
.id_table = s3cmci_driver_ids,
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 284cba11e279..54205e3be9e8 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -662,6 +662,43 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
(host->mmc->caps & MMC_CAP_1_8V_DDR))
host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
+ /*
+ * There are two types of presets out in the wild:
+ * 1) Default/broken presets.
+ * These presets have two sets of problems:
+ * a) The clock divisor for SDR12, SDR25, and SDR50 is too small.
+ * This results in clock frequencies that are 2x higher than
+ * acceptable. i.e., SDR12 = 25 MHz, SDR25 = 50 MHz, SDR50 =
+ * 100 MHz.x
+ * b) The HS200 and HS400 driver strengths don't match.
+ * By default, the SDR104 preset register has a driver strength of
+ * A, but the (internal) HS400 preset register has a driver
+ * strength of B. As part of initializing HS400, HS200 tuning
+ * needs to be performed. Having different driver strengths
+ * between tuning and operation is wrong. It results in different
+ * rise/fall times that lead to incorrect sampling.
+ * 2) Firmware with properly initialized presets.
+ * These presets have proper clock divisors. i.e., SDR12 => 12MHz,
+ * SDR25 => 25 MHz, SDR50 => 50 MHz. Additionally the HS200 and
+ * HS400 preset driver strengths match.
+ *
+ * Enabling presets for HS400 doesn't work for the following reasons:
+ * 1) sdhci_set_ios has a hard coded list of timings that are used
+ * to determine if presets should be enabled.
+ * 2) sdhci_get_preset_value is using a non-standard register to
+ * read out HS400 presets. The AMD controller doesn't support this
+ * non-standard register. In fact, it doesn't expose the HS400
+ * preset register anywhere in the SDHCI memory map. This results
+ * in reading a garbage value and using the wrong presets.
+ *
+ * Since HS400 and HS200 presets must be identical, we could
+ * instead use the the SDR104 preset register.
+ *
+ * If the above issues are resolved we could remove this quirk for
+ * firmware that that has valid presets (i.e., SDR12 <= 12 MHz).
+ */
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+
host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
host->mmc_host_ops.set_ios = amd_set_ios;
host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning;
@@ -1027,6 +1064,7 @@ static const struct dev_pm_ops sdhci_acpi_pm_ops = {
static struct platform_driver sdhci_acpi_driver = {
.driver = {
.name = "sdhci-acpi",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.acpi_match_table = sdhci_acpi_ids,
.pm = &sdhci_acpi_pm_ops,
},
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index a6c2bd202b45..4d4aac85cc7a 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -324,6 +324,7 @@ err_pltfm_free:
static struct platform_driver sdhci_bcm_kona_driver = {
.driver = {
.name = "sdhci-kona",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_pltfm_pmops,
.of_match_table = sdhci_bcm_kona_of_match,
},
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index ad01f6451a95..bbf3496f4495 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -235,13 +235,11 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible);
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "Clock not found in Device Tree\n");
- clk = NULL;
- }
+ clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "Failed to get clock from Device Tree\n");
+
res = clk_prepare_enable(clk);
if (res)
return res;
@@ -328,6 +326,7 @@ MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
static struct platform_driver sdhci_brcmstb_driver = {
.driver = {
.name = "sdhci-brcmstb",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_pltfm_pmops,
.of_match_table = of_match_ptr(sdhci_brcm_of_match),
},
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 4d9f7681817c..6f2de54a5987 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -463,6 +463,7 @@ MODULE_DEVICE_TABLE(of, sdhci_cdns_match);
static struct platform_driver sdhci_cdns_driver = {
.driver = {
.name = "sdhci-cdns",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_cdns_pm_ops,
.of_match_table = sdhci_cdns_match,
},
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 811eab1b8964..2a29c7a4f308 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -98,6 +98,7 @@ static int sdhci_cns3xxx_probe(struct platform_device *pdev)
static struct platform_driver sdhci_cns3xxx_driver = {
.driver = {
.name = "sdhci-cns3xxx",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_cns3xxx_probe,
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index fe9da3122fe9..5e5bf82e5976 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -105,6 +105,7 @@ MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table);
static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_pltfm_pmops,
.of_match_table = sdhci_dove_of_match_table,
},
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d738907a622f..fce8fa7e6b30 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -987,10 +987,20 @@ static int usdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
{
u32 reg;
+ u8 sw_rst;
+ int ret;
/* FIXME: delay a bit for card to be ready for next tuning due to errors */
mdelay(1);
+ /* IC suggest to reset USDHC before every tuning command */
+ esdhc_clrset_le(host, 0xff, SDHCI_RESET_ALL, SDHCI_SOFTWARE_RESET);
+ ret = readb_poll_timeout(host->ioaddr + SDHCI_SOFTWARE_RESET, sw_rst,
+ !(sw_rst & SDHCI_RESET_ALL), 10, 100);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc),
+ "warning! RESET_ALL never complete before sending tuning command\n");
+
reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
ESDHC_MIX_CTRL_FBCLK_SEL;
@@ -1367,7 +1377,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
* response, block the tuning procedure or the first command
* after the whole tuning procedure always can't get any response.
*/
- tmp |= ESDHC_TUNING_CMD_CRC_CHECK_DISABLE;
+ tmp |= ESDHC_TUNING_CMD_CRC_CHECK_DISABLE;
writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
} else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
/*
@@ -1643,10 +1653,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
goto disable_ipg_clk;
imx_data->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (IS_ERR(imx_data->pinctrl)) {
- err = PTR_ERR(imx_data->pinctrl);
+ if (IS_ERR(imx_data->pinctrl))
dev_warn(mmc_dev(host->mmc), "could not get pinctrl\n");
- }
if (esdhc_is_usdhc(imx_data)) {
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
@@ -1917,6 +1925,7 @@ static const struct dev_pm_ops sdhci_esdhc_pmops = {
static struct platform_driver sdhci_esdhc_imx_driver = {
.driver = {
.name = "sdhci-esdhc-imx",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = imx_esdhc_dt_ids,
.pm = &sdhci_esdhc_pmops,
},
diff --git a/drivers/mmc/host/sdhci-esdhc-mcf.c b/drivers/mmc/host/sdhci-esdhc-mcf.c
index 71bf086a9812..ca7a1690b2a8 100644
--- a/drivers/mmc/host/sdhci-esdhc-mcf.c
+++ b/drivers/mmc/host/sdhci-esdhc-mcf.c
@@ -509,6 +509,7 @@ static int sdhci_esdhc_mcf_remove(struct platform_device *pdev)
static struct platform_driver sdhci_esdhc_mcf_driver = {
.driver = {
.name = "sdhci-esdhc-mcf",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = sdhci_esdhc_mcf_probe,
.remove = sdhci_esdhc_mcf_remove,
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index a30796e79b1c..6de02f09c322 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -5,6 +5,7 @@
* Copyright (c) 2007 Freescale Semiconductor, Inc.
* Copyright (c) 2009 MontaVista Software, Inc.
* Copyright (c) 2010 Pengutronix e.K.
+ * Copyright 2020 NXP
* Author: Wolfram Sang <kernel@pengutronix.de>
*/
@@ -88,6 +89,7 @@
/* DLL Config 0 Register */
#define ESDHC_DLLCFG0 0x160
#define ESDHC_DLL_ENABLE 0x80000000
+#define ESDHC_DLL_RESET 0x40000000
#define ESDHC_DLL_FREQ_SEL 0x08000000
/* DLL Config 1 Register */
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index e2d8dfe90077..c9434b461aab 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -283,6 +283,7 @@ static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
static const struct sdhci_iproc_data bcm2711_data = {
.pdata = &sdhci_bcm2711_pltfm_data,
+ .mmc_caps = MMC_CAP_3_3V_DDR,
};
static const struct of_device_id sdhci_iproc_of_match[] = {
@@ -368,6 +369,7 @@ err:
static struct platform_driver sdhci_iproc_driver = {
.driver = {
.name = "sdhci-iproc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_iproc_of_match,
.acpi_match_table = ACPI_PTR(sdhci_iproc_acpi_ids),
.pm = &sdhci_pltfm_pmops,
diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c
index 4e7cc0680f94..148b37ac6564 100644
--- a/drivers/mmc/host/sdhci-milbeaut.c
+++ b/drivers/mmc/host/sdhci-milbeaut.c
@@ -333,6 +333,7 @@ static int sdhci_milbeaut_remove(struct platform_device *pdev)
static struct platform_driver sdhci_milbeaut_driver = {
.driver = {
.name = "sdhci-milbeaut",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(mlb_dt_ids),
},
.probe = sdhci_milbeaut_probe,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 729868abd2db..3451eb325513 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -263,7 +263,6 @@ struct sdhci_msm_host {
unsigned long clk_rate;
struct mmc_host *mmc;
struct opp_table *opp_table;
- bool has_opp_table;
bool use_14lpp_dll_reset;
bool tuning_done;
bool calibration_done;
@@ -2167,6 +2166,7 @@ static const struct of_device_id sdhci_msm_dt_match[] = {
{.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
{.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sm8250-sdhci", .data = &sm8250_sdhci_var},
+ {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
{},
};
@@ -2301,11 +2301,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
/* OPP table is optional */
ret = dev_pm_opp_of_add_table(&pdev->dev);
- if (!ret) {
- msm_host->has_opp_table = true;
- } else if (ret != -ENODEV) {
+ if (ret && ret != -ENODEV) {
dev_err(&pdev->dev, "Invalid OPP table in Device tree\n");
- goto opp_cleanup;
+ goto opp_put_clkname;
}
/* Vote for maximum clock rate for maximum performance */
@@ -2469,8 +2467,8 @@ clk_disable:
clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
msm_host->bulk_clks);
opp_cleanup:
- if (msm_host->has_opp_table)
- dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_of_remove_table(&pdev->dev);
+opp_put_clkname:
dev_pm_opp_put_clkname(msm_host->opp_table);
bus_clk_disable:
if (!IS_ERR(msm_host->bus_clk))
@@ -2490,8 +2488,7 @@ static int sdhci_msm_remove(struct platform_device *pdev)
sdhci_remove_host(host, dead);
- if (msm_host->has_opp_table)
- dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_of_remove_table(&pdev->dev);
dev_pm_opp_put_clkname(msm_host->opp_table);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -2557,6 +2554,7 @@ static struct platform_driver sdhci_msm_driver = {
.name = "sdhci_msm",
.of_match_table = sdhci_msm_dt_match,
.pm = &sdhci_msm_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index f186fbd016b1..829ccef87426 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -1543,10 +1543,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
of_node_put(node);
if (IS_ERR(sdhci_arasan->soc_ctl_base)) {
- ret = PTR_ERR(sdhci_arasan->soc_ctl_base);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Can't get syscon: %d\n",
- ret);
+ ret = dev_err_probe(&pdev->dev,
+ PTR_ERR(sdhci_arasan->soc_ctl_base),
+ "Can't get syscon\n");
goto err_pltfm_free;
}
}
@@ -1694,6 +1693,7 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
static struct platform_driver sdhci_arasan_driver = {
.driver = {
.name = "sdhci-arasan",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_arasan_of_match,
.pm = &sdhci_arasan_dev_pm_ops,
},
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index a1bcc0f4ba9e..4f008ba3280e 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -240,6 +240,7 @@ static const struct of_device_id aspeed_sdhci_of_match[] = {
static struct platform_driver aspeed_sdhci_driver = {
.driver = {
.name = "sdhci-aspeed",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = aspeed_sdhci_of_match,
},
.probe = aspeed_sdhci_probe,
@@ -318,6 +319,7 @@ MODULE_DEVICE_TABLE(of, aspeed_sdc_of_match);
static struct platform_driver aspeed_sdc_driver = {
.driver = {
.name = "sd-controller-aspeed",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_pltfm_pmops,
.of_match_table = aspeed_sdc_of_match,
},
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 1ece2c50042c..5564d7b23e7c 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -465,6 +465,7 @@ static int sdhci_at91_remove(struct platform_device *pdev)
static struct platform_driver sdhci_at91_driver = {
.driver = {
.name = "sdhci-at91",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_at91_dt_match,
.pm = &sdhci_at91_dev_pm_ops,
},
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 64ac0dbee95c..4b673792b5a4 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -214,6 +214,7 @@ MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
static struct platform_driver sdhci_dwcmshc_driver = {
.driver = {
.name = "sdhci-dwcmshc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_dwcmshc_dt_ids,
.pm = &dwcmshc_pmops,
},
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 45881b309956..ab5ab969f711 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -4,6 +4,7 @@
*
* Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
* Copyright (c) 2009 MontaVista Software, Inc.
+ * Copyright 2020 NXP
*
* Authors: Xiaobo Xie <X.Xie@freescale.com>
* Anton Vorontsov <avorontsov@ru.mvista.com>
@@ -19,6 +20,7 @@
#include <linux/clk.h>
#include <linux/ktime.h>
#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include "sdhci-pltfm.h"
@@ -743,6 +745,21 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
temp |= ESDHC_DLL_FREQ_SEL;
sdhci_writel(host, temp, ESDHC_DLLCFG0);
+
+ temp |= ESDHC_DLL_RESET;
+ sdhci_writel(host, temp, ESDHC_DLLCFG0);
+ udelay(1);
+ temp &= ~ESDHC_DLL_RESET;
+ sdhci_writel(host, temp, ESDHC_DLLCFG0);
+
+ /* Wait max 20 ms */
+ if (read_poll_timeout(sdhci_readl, temp,
+ temp & ESDHC_DLL_STS_SLV_LOCK,
+ 10, 20000, false,
+ host, ESDHC_DLLSTAT0))
+ pr_err("%s: timeout for delay chain lock.\n",
+ mmc_hostname(host->mmc));
+
temp = sdhci_readl(host, ESDHC_TBCTL);
sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
@@ -1052,6 +1069,17 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
esdhc_tuning_block_enable(host, true);
+ /*
+ * The eSDHC controller takes the data timeout value into account
+ * during tuning. If the SD card is too slow sending the response, the
+ * timer will expire and a "Buffer Read Ready" interrupt without data
+ * is triggered. This leads to tuning errors.
+ *
+ * Just set the timeout to the maximum value because the core will
+ * already take care of it in sdhci_send_tuning().
+ */
+ sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
+
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
do {
@@ -1296,6 +1324,8 @@ static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
{ .family = "QorIQ LX2160A", .revision = "1.0", },
+ { .family = "QorIQ LX2160A", .revision = "2.0", },
+ { .family = "QorIQ LS1028A", .revision = "1.0", },
{ },
};
@@ -1360,13 +1390,19 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
clk_put(clk);
}
- if (esdhc->peripheral_clock) {
- esdhc_clock_enable(host, false);
- val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
+ esdhc_clock_enable(host, false);
+ val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
+ /*
+ * This bit is not able to be reset by SDHCI_RESET_ALL. Need to
+ * initialize it as 1 or 0 once, to override the different value
+ * which may be configured in bootloader.
+ */
+ if (esdhc->peripheral_clock)
val |= ESDHC_PERIPHERAL_CLK_SEL;
- sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
- esdhc_clock_enable(host, true);
- }
+ else
+ val &= ~ESDHC_PERIPHERAL_CLK_SEL;
+ sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
+ esdhc_clock_enable(host, true);
}
static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
@@ -1468,6 +1504,7 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
static struct platform_driver sdhci_esdhc_driver = {
.driver = {
.name = "sdhci-esdhc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_esdhc_of_match,
.pm = &esdhc_of_dev_pm_ops,
},
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index da844a39af6e..12675797b296 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -80,6 +80,7 @@ MODULE_DEVICE_TABLE(of, sdhci_hlwd_of_match);
static struct platform_driver sdhci_hlwd_driver = {
.driver = {
.name = "sdhci-hlwd",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_hlwd_of_match,
.pm = &sdhci_pltfm_pmops,
},
diff --git a/drivers/mmc/host/sdhci-of-sparx5.c b/drivers/mmc/host/sdhci-of-sparx5.c
new file mode 100644
index 000000000000..28e4ee69e100
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-sparx5.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * drivers/mmc/host/sdhci-of-sparx5.c
+ *
+ * MCHP Sparx5 SoC Secure Digital Host Controller Interface.
+ *
+ * Copyright (c) 2019 Microchip Inc.
+ *
+ * Author: Lars Povlsen <lars.povlsen@microchip.com>
+ */
+
+#include <linux/sizes.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/dma-mapping.h>
+
+#include "sdhci-pltfm.h"
+
+#define CPU_REGS_GENERAL_CTRL (0x22 * 4)
+#define MSHC_DLY_CC_MASK GENMASK(16, 13)
+#define MSHC_DLY_CC_SHIFT 13
+#define MSHC_DLY_CC_MAX 15
+
+#define CPU_REGS_PROC_CTRL (0x2C * 4)
+#define ACP_CACHE_FORCE_ENA BIT(4)
+#define ACP_AWCACHE BIT(3)
+#define ACP_ARCACHE BIT(2)
+#define ACP_CACHE_MASK (ACP_CACHE_FORCE_ENA|ACP_AWCACHE|ACP_ARCACHE)
+
+#define MSHC2_VERSION 0x500 /* Off 0x140, reg 0x0 */
+#define MSHC2_TYPE 0x504 /* Off 0x140, reg 0x1 */
+#define MSHC2_EMMC_CTRL 0x52c /* Off 0x140, reg 0xB */
+#define MSHC2_EMMC_CTRL_EMMC_RST_N BIT(2)
+#define MSHC2_EMMC_CTRL_IS_EMMC BIT(0)
+
+struct sdhci_sparx5_data {
+ struct sdhci_host *host;
+ struct regmap *cpu_ctrl;
+ int delay_clock;
+};
+
+#define BOUNDARY_OK(addr, len) \
+ ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
+
+/*
+ * If DMA addr spans 128MB boundary, we split the DMA transfer into two
+ * so that each DMA transfer doesn't exceed the boundary.
+ */
+static void sdhci_sparx5_adma_write_desc(struct sdhci_host *host, void **desc,
+ dma_addr_t addr, int len,
+ unsigned int cmd)
+{
+ int tmplen, offset;
+
+ if (likely(!len || BOUNDARY_OK(addr, len))) {
+ sdhci_adma_write_desc(host, desc, addr, len, cmd);
+ return;
+ }
+
+ pr_debug("%s: write_desc: splitting dma len %d, offset %pad\n",
+ mmc_hostname(host->mmc), len, &addr);
+
+ offset = addr & (SZ_128M - 1);
+ tmplen = SZ_128M - offset;
+ sdhci_adma_write_desc(host, desc, addr, tmplen, cmd);
+
+ addr += tmplen;
+ len -= tmplen;
+ sdhci_adma_write_desc(host, desc, addr, len, cmd);
+}
+
+static void sparx5_set_cacheable(struct sdhci_host *host, u32 value)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_sparx5_data *sdhci_sparx5 = sdhci_pltfm_priv(pltfm_host);
+
+ pr_debug("%s: Set Cacheable = 0x%x\n", mmc_hostname(host->mmc), value);
+
+ /* Update ACP caching attributes in HW */
+ regmap_update_bits(sdhci_sparx5->cpu_ctrl,
+ CPU_REGS_PROC_CTRL, ACP_CACHE_MASK, value);
+}
+
+static void sparx5_set_delay(struct sdhci_host *host, u8 value)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_sparx5_data *sdhci_sparx5 = sdhci_pltfm_priv(pltfm_host);
+
+ pr_debug("%s: Set DLY_CC = %u\n", mmc_hostname(host->mmc), value);
+
+ /* Update DLY_CC in HW */
+ regmap_update_bits(sdhci_sparx5->cpu_ctrl,
+ CPU_REGS_GENERAL_CTRL,
+ MSHC_DLY_CC_MASK,
+ (value << MSHC_DLY_CC_SHIFT));
+}
+
+static void sdhci_sparx5_set_emmc(struct sdhci_host *host)
+{
+ if (!mmc_card_is_removable(host->mmc)) {
+ u8 value;
+
+ value = sdhci_readb(host, MSHC2_EMMC_CTRL);
+ if (!(value & MSHC2_EMMC_CTRL_IS_EMMC)) {
+ value |= MSHC2_EMMC_CTRL_IS_EMMC;
+ pr_debug("%s: Set EMMC_CTRL: 0x%08x\n",
+ mmc_hostname(host->mmc), value);
+ sdhci_writeb(host, value, MSHC2_EMMC_CTRL);
+ }
+ }
+}
+
+static void sdhci_sparx5_reset_emmc(struct sdhci_host *host)
+{
+ u8 value;
+
+ pr_debug("%s: Toggle EMMC_CTRL.EMMC_RST_N\n", mmc_hostname(host->mmc));
+ value = sdhci_readb(host, MSHC2_EMMC_CTRL) &
+ ~MSHC2_EMMC_CTRL_EMMC_RST_N;
+ sdhci_writeb(host, value, MSHC2_EMMC_CTRL);
+ /* For eMMC, minimum is 1us but give it 10us for good measure */
+ usleep_range(10, 20);
+ sdhci_writeb(host, value | MSHC2_EMMC_CTRL_EMMC_RST_N,
+ MSHC2_EMMC_CTRL);
+ /* For eMMC, minimum is 200us but give it 300us for good measure */
+ usleep_range(300, 400);
+}
+
+static void sdhci_sparx5_reset(struct sdhci_host *host, u8 mask)
+{
+ pr_debug("%s: *** RESET: mask %d\n", mmc_hostname(host->mmc), mask);
+
+ sdhci_reset(host, mask);
+
+ /* Be sure CARD_IS_EMMC stays set */
+ sdhci_sparx5_set_emmc(host);
+}
+
+static const struct sdhci_ops sdhci_sparx5_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .reset = sdhci_sparx5_reset,
+ .adma_write_desc = sdhci_sparx5_adma_write_desc,
+};
+
+static const struct sdhci_pltfm_data sdhci_sparx5_pdata = {
+ .quirks = 0,
+ .quirks2 = SDHCI_QUIRK2_HOST_NO_CMD23 | /* Controller issue */
+ SDHCI_QUIRK2_NO_1_8_V, /* No sdr104, ddr50, etc */
+ .ops = &sdhci_sparx5_ops,
+};
+
+static int sdhci_sparx5_probe(struct platform_device *pdev)
+{
+ int ret;
+ const char *syscon = "microchip,sparx5-cpu-syscon";
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_sparx5_data *sdhci_sparx5;
+ struct device_node *np = pdev->dev.of_node;
+ u32 value;
+ u32 extra;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_sparx5_pdata,
+ sizeof(*sdhci_sparx5));
+
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ /*
+ * extra adma table cnt for cross 128M boundary handling.
+ */
+ extra = DIV_ROUND_UP_ULL(dma_get_required_mask(&pdev->dev), SZ_128M);
+ if (extra > SDHCI_MAX_SEGS)
+ extra = SDHCI_MAX_SEGS;
+ host->adma_table_cnt += extra;
+
+ pltfm_host = sdhci_priv(host);
+ sdhci_sparx5 = sdhci_pltfm_priv(pltfm_host);
+ sdhci_sparx5->host = host;
+
+ pltfm_host->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(pltfm_host->clk)) {
+ ret = PTR_ERR(pltfm_host->clk);
+ dev_err(&pdev->dev, "failed to get core clk: %d\n", ret);
+ goto free_pltfm;
+ }
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret)
+ goto free_pltfm;
+
+ if (!of_property_read_u32(np, "microchip,clock-delay", &value) &&
+ (value > 0 && value <= MSHC_DLY_CC_MAX))
+ sdhci_sparx5->delay_clock = value;
+
+ sdhci_get_of_property(pdev);
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_clk;
+
+ sdhci_sparx5->cpu_ctrl = syscon_regmap_lookup_by_compatible(syscon);
+ if (IS_ERR(sdhci_sparx5->cpu_ctrl)) {
+ dev_err(&pdev->dev, "No CPU syscon regmap !\n");
+ ret = PTR_ERR(sdhci_sparx5->cpu_ctrl);
+ goto err_clk;
+ }
+
+ if (sdhci_sparx5->delay_clock >= 0)
+ sparx5_set_delay(host, sdhci_sparx5->delay_clock);
+
+ if (!mmc_card_is_removable(host->mmc)) {
+ /* Do a HW reset of eMMC card */
+ sdhci_sparx5_reset_emmc(host);
+ /* Update EMMC_CTRL */
+ sdhci_sparx5_set_emmc(host);
+ /* If eMMC, disable SD and SDIO */
+ host->mmc->caps2 |= (MMC_CAP2_NO_SDIO|MMC_CAP2_NO_SD);
+ }
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_clk;
+
+ /* Set AXI bus master to use un-cached access (for DMA) */
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA) &&
+ IS_ENABLED(CONFIG_DMA_DECLARE_COHERENT))
+ sparx5_set_cacheable(host, ACP_CACHE_FORCE_ENA);
+
+ pr_debug("%s: SDHC version: 0x%08x\n",
+ mmc_hostname(host->mmc), sdhci_readl(host, MSHC2_VERSION));
+ pr_debug("%s: SDHC type: 0x%08x\n",
+ mmc_hostname(host->mmc), sdhci_readl(host, MSHC2_TYPE));
+
+ return ret;
+
+err_clk:
+ clk_disable_unprepare(pltfm_host->clk);
+free_pltfm:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static const struct of_device_id sdhci_sparx5_of_match[] = {
+ { .compatible = "microchip,dw-sparx5-sdhci" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_sparx5_of_match);
+
+static struct platform_driver sdhci_sparx5_driver = {
+ .driver = {
+ .name = "sdhci-sparx5",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = sdhci_sparx5_of_match,
+ .pm = &sdhci_pltfm_pmops,
+ },
+ .probe = sdhci_sparx5_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+
+module_platform_driver(sdhci_sparx5_driver);
+
+MODULE_DESCRIPTION("Sparx5 SDHCI OF driver");
+MODULE_AUTHOR("Lars Povlsen <lars.povlsen@microchip.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 1ec74c2d5c17..7893fd3599b6 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1297,6 +1297,7 @@ static struct platform_driver sdhci_omap_driver = {
.remove = sdhci_omap_remove,
.driver = {
.name = "sdhci-omap",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_omap_dev_pm_ops,
.of_match_table = omap_sdhci_match,
},
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 914f5184295f..23da7f7fe093 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -24,6 +24,8 @@
#include <linux/iopoll.h>
#include <linux/gpio.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/debugfs.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
@@ -516,6 +518,8 @@ struct intel_host {
bool rpm_retune_ok;
u32 glk_rx_ctrl1;
u32 glk_tun_val;
+ u32 active_ltr;
+ u32 idle_ltr;
};
static const guid_t intel_dsm_guid =
@@ -760,6 +764,108 @@ static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
return 0;
}
+#define INTEL_ACTIVELTR 0x804
+#define INTEL_IDLELTR 0x808
+
+#define INTEL_LTR_REQ BIT(15)
+#define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
+#define INTEL_LTR_SCALE_1US (2 << 10)
+#define INTEL_LTR_SCALE_32US (3 << 10)
+#define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
+
+static void intel_cache_ltr(struct sdhci_pci_slot *slot)
+{
+ struct intel_host *intel_host = sdhci_pci_priv(slot);
+ struct sdhci_host *host = slot->host;
+
+ intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
+ intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR);
+}
+
+static void intel_ltr_set(struct device *dev, s32 val)
+{
+ struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ struct intel_host *intel_host = sdhci_pci_priv(slot);
+ struct sdhci_host *host = slot->host;
+ u32 ltr;
+
+ pm_runtime_get_sync(dev);
+
+ /*
+ * Program latency tolerance (LTR) accordingly what has been asked
+ * by the PM QoS layer or disable it in case we were passed
+ * negative value or PM_QOS_LATENCY_ANY.
+ */
+ ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
+
+ if (val == PM_QOS_LATENCY_ANY || val < 0) {
+ ltr &= ~INTEL_LTR_REQ;
+ } else {
+ ltr |= INTEL_LTR_REQ;
+ ltr &= ~INTEL_LTR_SCALE_MASK;
+ ltr &= ~INTEL_LTR_VALUE_MASK;
+
+ if (val > INTEL_LTR_VALUE_MASK) {
+ val >>= 5;
+ if (val > INTEL_LTR_VALUE_MASK)
+ val = INTEL_LTR_VALUE_MASK;
+ ltr |= INTEL_LTR_SCALE_32US | val;
+ } else {
+ ltr |= INTEL_LTR_SCALE_1US | val;
+ }
+ }
+
+ if (ltr == intel_host->active_ltr)
+ goto out;
+
+ writel(ltr, host->ioaddr + INTEL_ACTIVELTR);
+ writel(ltr, host->ioaddr + INTEL_IDLELTR);
+
+ /* Cache the values into lpss structure */
+ intel_cache_ltr(slot);
+out:
+ pm_runtime_put_autosuspend(dev);
+}
+
+static bool intel_use_ltr(struct sdhci_pci_chip *chip)
+{
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_BYT_EMMC:
+ case PCI_DEVICE_ID_INTEL_BYT_EMMC2:
+ case PCI_DEVICE_ID_INTEL_BYT_SDIO:
+ case PCI_DEVICE_ID_INTEL_BYT_SD:
+ case PCI_DEVICE_ID_INTEL_BSW_EMMC:
+ case PCI_DEVICE_ID_INTEL_BSW_SDIO:
+ case PCI_DEVICE_ID_INTEL_BSW_SD:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static void intel_ltr_expose(struct sdhci_pci_chip *chip)
+{
+ struct device *dev = &chip->pdev->dev;
+
+ if (!intel_use_ltr(chip))
+ return;
+
+ dev->power.set_latency_tolerance = intel_ltr_set;
+ dev_pm_qos_expose_latency_tolerance(dev);
+}
+
+static void intel_ltr_hide(struct sdhci_pci_chip *chip)
+{
+ struct device *dev = &chip->pdev->dev;
+
+ if (!intel_use_ltr(chip))
+ return;
+
+ dev_pm_qos_hide_latency_tolerance(dev);
+ dev->power.set_latency_tolerance = NULL;
+}
+
static void byt_probe_slot(struct sdhci_pci_slot *slot)
{
struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
@@ -774,6 +880,43 @@ static void byt_probe_slot(struct sdhci_pci_slot *slot)
ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
device_property_read_u32(dev, "max-frequency", &mmc->f_max);
+
+ if (!mmc->slotno) {
+ slot->chip->slots[mmc->slotno] = slot;
+ intel_ltr_expose(slot->chip);
+ }
+}
+
+static void byt_add_debugfs(struct sdhci_pci_slot *slot)
+{
+ struct intel_host *intel_host = sdhci_pci_priv(slot);
+ struct mmc_host *mmc = slot->host->mmc;
+ struct dentry *dir = mmc->debugfs_root;
+
+ if (!intel_use_ltr(slot->chip))
+ return;
+
+ debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr);
+ debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr);
+
+ intel_cache_ltr(slot);
+}
+
+static int byt_add_host(struct sdhci_pci_slot *slot)
+{
+ int ret = sdhci_add_host(slot->host);
+
+ if (!ret)
+ byt_add_debugfs(slot);
+ return ret;
+}
+
+static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead)
+{
+ struct mmc_host *mmc = slot->host->mmc;
+
+ if (!mmc->slotno)
+ intel_ltr_hide(slot->chip);
}
static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
@@ -855,6 +998,8 @@ static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
if (ret)
goto cleanup;
+ byt_add_debugfs(slot);
+
return 0;
cleanup:
@@ -1032,6 +1177,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
#endif
.allow_runtime_pm = true,
.probe_slot = byt_emmc_probe_slot,
+ .add_host = byt_add_host,
+ .remove_slot = byt_remove_slot,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
SDHCI_QUIRK_NO_LED,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
@@ -1045,6 +1192,7 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
.allow_runtime_pm = true,
.probe_slot = glk_emmc_probe_slot,
.add_host = glk_emmc_add_host,
+ .remove_slot = byt_remove_slot,
#ifdef CONFIG_PM_SLEEP
.suspend = sdhci_cqhci_suspend,
.resume = sdhci_cqhci_resume,
@@ -1075,6 +1223,8 @@ static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
.probe_slot = ni_byt_sdio_probe_slot,
+ .add_host = byt_add_host,
+ .remove_slot = byt_remove_slot,
.ops = &sdhci_intel_byt_ops,
.priv_size = sizeof(struct intel_host),
};
@@ -1092,6 +1242,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
.probe_slot = byt_sdio_probe_slot,
+ .add_host = byt_add_host,
+ .remove_slot = byt_remove_slot,
.ops = &sdhci_intel_byt_ops,
.priv_size = sizeof(struct intel_host),
};
@@ -1111,6 +1263,8 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
.allow_runtime_pm = true,
.own_cd_for_runtime_pm = true,
.probe_slot = byt_sd_probe_slot,
+ .add_host = byt_add_host,
+ .remove_slot = byt_remove_slot,
.ops = &sdhci_intel_byt_ops,
.priv_size = sizeof(struct intel_host),
};
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 5da2b06d84ae..9887485a4134 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include "sdhci.h"
#include "sdhci-pci.h"
+#include "cqhci.h"
/* Genesys Logic extra registers */
#define SDHCI_GLI_9750_WT 0x800
@@ -81,9 +82,16 @@
#define GLI_9763E_VHS_REV_R 0x0
#define GLI_9763E_VHS_REV_M 0x1
#define GLI_9763E_VHS_REV_W 0x2
+#define PCIE_GLI_9763E_MB 0x888
+#define GLI_9763E_MB_CMDQ_OFF BIT(19)
#define PCIE_GLI_9763E_SCR 0x8E0
#define GLI_9763E_SCR_AXI_REQ BIT(9)
+#define SDHCI_GLI_9763E_CQE_BASE_ADDR 0x200
+#define GLI_9763E_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
+ SDHCI_TRNS_BLK_CNT_EN | \
+ SDHCI_TRNS_DMA)
+
#define PCI_GLI_9755_WT 0x800
#define PCI_GLI_9755_WT_EN BIT(0)
#define GLI_9755_WT_EN_ON 0x1
@@ -578,6 +586,30 @@ static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
return sdhci_pci_resume_host(chip);
}
+
+static int sdhci_cqhci_gli_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ int ret;
+
+ ret = sdhci_pci_gli_resume(chip);
+ if (ret)
+ return ret;
+
+ return cqhci_resume(slot->host->mmc);
+}
+
+static int sdhci_cqhci_gli_suspend(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ int ret;
+
+ ret = cqhci_suspend(slot->host->mmc);
+ if (ret)
+ return ret;
+
+ return sdhci_suspend_host(slot->host);
+}
#endif
static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
@@ -614,6 +646,110 @@ static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
}
+static void sdhci_gl9763e_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_gl9763e_cqe_pre_enable(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u32 value;
+
+ value = cqhci_readl(cq_host, CQHCI_CFG);
+ value |= CQHCI_ENABLE;
+ cqhci_writel(cq_host, value, CQHCI_CFG);
+}
+
+static void sdhci_gl9763e_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_writew(host, GLI_9763E_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
+ sdhci_cqe_enable(mmc);
+}
+
+static u32 sdhci_gl9763e_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void sdhci_gl9763e_cqe_post_disable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u32 value;
+
+ value = cqhci_readl(cq_host, CQHCI_CFG);
+ value &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, value, CQHCI_CFG);
+ sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
+}
+
+static const struct cqhci_host_ops sdhci_gl9763e_cqhci_ops = {
+ .enable = sdhci_gl9763e_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_gl9763e_dumpregs,
+ .pre_enable = sdhci_gl9763e_cqe_pre_enable,
+ .post_disable = sdhci_gl9763e_cqe_post_disable,
+};
+
+static int gl9763e_add_host(struct sdhci_pci_slot *slot)
+{
+ struct device *dev = &slot->chip->pdev->dev;
+ struct sdhci_host *host = slot->host;
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_GLI_9763E_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_gl9763e_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
+static void sdhci_gl9763e_reset(struct sdhci_host *host, u8 mask)
+{
+ if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
+ host->mmc->cqe_private)
+ cqhci_deactivate(host->mmc);
+ sdhci_reset(host, mask);
+}
+
static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
@@ -636,7 +772,9 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
{
+ struct pci_dev *pdev = slot->chip->pdev;
struct sdhci_host *host = slot->host;
+ u32 value;
host->mmc->caps |= MMC_CAP_8_BIT_DATA |
MMC_CAP_1_8V_DDR |
@@ -646,6 +784,11 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
MMC_CAP2_HS400_ES |
MMC_CAP2_NO_SDIO |
MMC_CAP2_NO_SD;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_MB, &value);
+ if (!(value & GLI_9763E_MB_CMDQ_OFF))
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
+
gli_pcie_enable_msi(slot);
host->mmc_host_ops.hs400_enhanced_strobe =
gl9763e_hs400_enhanced_strobe;
@@ -699,9 +842,10 @@ static const struct sdhci_ops sdhci_gl9763e_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = sdhci_gl9763e_reset,
.set_uhs_signaling = sdhci_set_gl9763e_signaling,
.voltage_switch = sdhci_gli_voltage_switch,
+ .irq = sdhci_gl9763e_cqhci_irq,
};
const struct sdhci_pci_fixes sdhci_gl9763e = {
@@ -709,6 +853,8 @@ const struct sdhci_pci_fixes sdhci_gl9763e = {
.probe_slot = gli_probe_slot_gl9763e,
.ops = &sdhci_gl9763e_ops,
#ifdef CONFIG_PM_SLEEP
- .resume = sdhci_pci_gli_resume,
+ .resume = sdhci_cqhci_gli_resume,
+ .suspend = sdhci_cqhci_gli_suspend,
#endif
+ .add_host = gl9763e_add_host,
};
diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c
index a11e6397d4ff..6ce1519ae177 100644
--- a/drivers/mmc/host/sdhci-pic32.c
+++ b/drivers/mmc/host/sdhci-pic32.c
@@ -241,6 +241,7 @@ MODULE_DEVICE_TABLE(of, pic32_sdhci_id_table);
static struct platform_driver pic32_sdhci_driver = {
.driver = {
.name = "pic32-sdhci",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(pic32_sdhci_id_table),
},
.probe = pic32_sdhci_probe,
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index 9282bc4b8c41..f18906b5575f 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -226,6 +226,7 @@ free:
static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(sdhci_pxav2_of_match),
.pm = &sdhci_pltfm_pmops,
},
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index e55037ceda73..a6d89a3f1946 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -567,6 +567,7 @@ static const struct dev_pm_ops sdhci_pxav3_pmops = {
static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(sdhci_pxav3_of_match),
.pm = &sdhci_pxav3_pmops,
},
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 080ced1e63f0..f48a788a9d3d 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -461,7 +461,9 @@ static int sdhci_s3c_parse_dt(struct device *dev,
}
#endif
+#ifdef CONFIG_OF
static const struct of_device_id sdhci_s3c_dt_match[];
+#endif
static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data(
struct platform_device *pdev)
@@ -784,6 +786,7 @@ static struct platform_driver sdhci_s3c_driver = {
.id_table = sdhci_s3c_driver_ids,
.driver = {
.name = "s3c-sdhci",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(sdhci_s3c_dt_match),
.pm = &sdhci_s3c_pmops,
},
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index f4b05dd6c20a..e9b347b3af7e 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -220,6 +220,7 @@ MODULE_DEVICE_TABLE(of, sdhci_sirf_of_match);
static struct platform_driver sdhci_sirf_driver = {
.driver = {
.name = "sdhci-sirf",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_sirf_of_match,
.pm = &sdhci_pltfm_pmops,
},
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index b4b63089a4e2..d463e2fd5b1a 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -181,6 +181,7 @@ MODULE_DEVICE_TABLE(of, sdhci_spear_id_table);
static struct platform_driver sdhci_driver = {
.driver = {
.name = "sdhci",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_pm_ops,
.of_match_table = of_match_ptr(sdhci_spear_id_table),
},
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index bafa2e41c8b6..58109c5b53e2 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -387,7 +387,7 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
if (mmc_hsq_finalize_request(host->mmc, mrq))
return;
- mmc_request_done(host->mmc, mrq);
+ mmc_request_done(host->mmc, mrq);
}
static struct sdhci_ops sdhci_sprd_ops = {
@@ -433,7 +433,7 @@ static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
static int sdhci_sprd_request_atomic(struct mmc_host *mmc,
- struct mmc_request *mrq)
+ struct mmc_request *mrq)
{
sdhci_sprd_check_auto_cmd23(mmc, mrq);
@@ -787,6 +787,7 @@ static struct platform_driver sdhci_sprd_driver = {
.remove = sdhci_sprd_remove,
.driver = {
.name = "sdhci_sprd_r11",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(sdhci_sprd_of_match),
.pm = &sdhci_sprd_pm_ops,
},
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index 1301cebfc3ea..4e9ff3e828ba 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -521,6 +521,7 @@ static struct platform_driver sdhci_st_driver = {
.remove = sdhci_st_remove,
.driver = {
.name = "sdhci-st",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_st_pmops,
.of_match_table = of_match_ptr(st_sdhci_match),
},
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 13fbf70b5fde..ed12aacb1c73 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -1660,11 +1660,8 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
clk = devm_clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
- rc = PTR_ERR(clk);
-
- if (rc != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get clock: %d\n", rc);
-
+ rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "failed to get clock\n");
goto err_clk_get;
}
clk_prepare_enable(clk);
@@ -1785,6 +1782,7 @@ static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
static struct platform_driver sdhci_tegra_driver = {
.driver = {
.name = "sdhci-tegra",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_tegra_dt_match,
.pm = &sdhci_tegra_dev_pm_ops,
},
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 4703cd540c7f..24c978de2a3f 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -677,6 +677,7 @@ MODULE_DEVICE_TABLE(of, sdhci_xenon_dt_ids);
static struct platform_driver sdhci_xenon_driver = {
.driver = {
.name = "xenon-sdhci",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_xenon_dt_ids,
.pm = &sdhci_xenon_dev_pm_ops,
},
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 592a55a34b58..3561ae8a481a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1384,9 +1384,11 @@ static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
/*
* In case of Version 4.10 or later, use of 'Auto CMD Auto
* Select' is recommended rather than use of 'Auto CMD12
- * Enable' or 'Auto CMD23 Enable'.
+ * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
+ * here because some controllers (e.g sdhci-of-dwmshc) expect it.
*/
- if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
+ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
+ (use_cmd12 || use_cmd23)) {
*mode |= SDHCI_TRNS_AUTO_SEL;
ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index f9d24af12396..a64ea143d185 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -2,10 +2,11 @@
/*
* sdhci_am654.c - SDHCI driver for TI's AM654 SOCs
*
- * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com
*
*/
#include <linux/clk.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -18,9 +19,11 @@
/* CTL_CFG Registers */
#define CTL_CFG_2 0x14
+#define CTL_CFG_3 0x18
#define SLOTTYPE_MASK GENMASK(31, 30)
#define SLOTTYPE_EMBEDDED BIT(30)
+#define TUNINGFORSDR50_MASK BIT(13)
/* PHY Registers */
#define PHY_CTRL1 0x100
@@ -65,6 +68,14 @@
#define RETRIM_MASK BIT(RETRIM_SHIFT)
#define SELDLYTXCLK_SHIFT 17
#define SELDLYTXCLK_MASK BIT(SELDLYTXCLK_SHIFT)
+#define SELDLYRXCLK_SHIFT 16
+#define SELDLYRXCLK_MASK BIT(SELDLYRXCLK_SHIFT)
+#define ITAPDLYSEL_SHIFT 0
+#define ITAPDLYSEL_MASK GENMASK(4, 0)
+#define ITAPDLYENA_SHIFT 8
+#define ITAPDLYENA_MASK BIT(ITAPDLYENA_SHIFT)
+#define ITAPCHGWIN_SHIFT 9
+#define ITAPCHGWIN_MASK BIT(ITAPCHGWIN_SHIFT)
#define DRIVER_STRENGTH_50_OHM 0x0
#define DRIVER_STRENGTH_33_OHM 0x1
@@ -72,7 +83,7 @@
#define DRIVER_STRENGTH_100_OHM 0x3
#define DRIVER_STRENGTH_40_OHM 0x4
-#define CLOCK_TOO_SLOW_HZ 400000
+#define CLOCK_TOO_SLOW_HZ 50000000
/* Command Queue Host Controller Interface Base address */
#define SDHCI_AM654_CQE_BASE_ADDR 0x200
@@ -84,14 +95,56 @@ static struct regmap_config sdhci_am654_regmap_config = {
.fast_io = true,
};
+struct timing_data {
+ const char *otap_binding;
+ const char *itap_binding;
+ u32 capability;
+};
+
+static const struct timing_data td[] = {
+ [MMC_TIMING_LEGACY] = {"ti,otap-del-sel-legacy",
+ "ti,itap-del-sel-legacy",
+ 0},
+ [MMC_TIMING_MMC_HS] = {"ti,otap-del-sel-mmc-hs",
+ "ti,itap-del-sel-mmc-hs",
+ MMC_CAP_MMC_HIGHSPEED},
+ [MMC_TIMING_SD_HS] = {"ti,otap-del-sel-sd-hs",
+ "ti,itap-del-sel-sd-hs",
+ MMC_CAP_SD_HIGHSPEED},
+ [MMC_TIMING_UHS_SDR12] = {"ti,otap-del-sel-sdr12",
+ "ti,itap-del-sel-sdr12",
+ MMC_CAP_UHS_SDR12},
+ [MMC_TIMING_UHS_SDR25] = {"ti,otap-del-sel-sdr25",
+ "ti,itap-del-sel-sdr25",
+ MMC_CAP_UHS_SDR25},
+ [MMC_TIMING_UHS_SDR50] = {"ti,otap-del-sel-sdr50",
+ NULL,
+ MMC_CAP_UHS_SDR50},
+ [MMC_TIMING_UHS_SDR104] = {"ti,otap-del-sel-sdr104",
+ NULL,
+ MMC_CAP_UHS_SDR104},
+ [MMC_TIMING_UHS_DDR50] = {"ti,otap-del-sel-ddr50",
+ NULL,
+ MMC_CAP_UHS_DDR50},
+ [MMC_TIMING_MMC_DDR52] = {"ti,otap-del-sel-ddr52",
+ "ti,itap-del-sel-ddr52",
+ MMC_CAP_DDR},
+ [MMC_TIMING_MMC_HS200] = {"ti,otap-del-sel-hs200",
+ NULL,
+ MMC_CAP2_HS200},
+ [MMC_TIMING_MMC_HS400] = {"ti,otap-del-sel-hs400",
+ NULL,
+ MMC_CAP2_HS400},
+};
+
struct sdhci_am654_data {
struct regmap *base;
bool legacy_otapdly;
- int otap_del_sel[11];
+ int otap_del_sel[ARRAY_SIZE(td)];
+ int itap_del_sel[ARRAY_SIZE(td)];
int clkbuf_sel;
int trm_icp;
int drv_strength;
- bool dll_on;
int strb_sel;
u32 flags;
};
@@ -106,26 +159,6 @@ struct sdhci_am654_driver_data {
#define DLL_CALIB (1 << 4)
};
-struct timing_data {
- const char *binding;
- u32 capability;
-};
-
-static const struct timing_data td[] = {
- [MMC_TIMING_LEGACY] = {"ti,otap-del-sel-legacy", 0},
- [MMC_TIMING_MMC_HS] = {"ti,otap-del-sel-mmc-hs", MMC_CAP_MMC_HIGHSPEED},
- [MMC_TIMING_SD_HS] = {"ti,otap-del-sel-sd-hs", MMC_CAP_SD_HIGHSPEED},
- [MMC_TIMING_UHS_SDR12] = {"ti,otap-del-sel-sdr12", MMC_CAP_UHS_SDR12},
- [MMC_TIMING_UHS_SDR25] = {"ti,otap-del-sel-sdr25", MMC_CAP_UHS_SDR25},
- [MMC_TIMING_UHS_SDR50] = {"ti,otap-del-sel-sdr50", MMC_CAP_UHS_SDR50},
- [MMC_TIMING_UHS_SDR104] = {"ti,otap-del-sel-sdr104",
- MMC_CAP_UHS_SDR104},
- [MMC_TIMING_UHS_DDR50] = {"ti,otap-del-sel-ddr50", MMC_CAP_UHS_DDR50},
- [MMC_TIMING_MMC_DDR52] = {"ti,otap-del-sel-ddr52", MMC_CAP_DDR},
- [MMC_TIMING_MMC_HS200] = {"ti,otap-del-sel-hs200", MMC_CAP2_HS200},
- [MMC_TIMING_MMC_HS400] = {"ti,otap-del-sel-hs400", MMC_CAP2_HS400},
-};
-
static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -134,6 +167,10 @@ static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock)
u32 mask, val;
int ret;
+ /* Disable delay chain mode */
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL5,
+ SELDLYTXCLK_MASK | SELDLYRXCLK_MASK, 0);
+
if (sdhci_am654->flags & FREQSEL_2_BIT) {
switch (clock) {
case 200000000:
@@ -188,8 +225,32 @@ static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock)
dev_err(mmc_dev(host->mmc), "DLL failed to relock\n");
return;
}
+}
+
+static void sdhci_am654_write_itapdly(struct sdhci_am654_data *sdhci_am654,
+ u32 itapdly)
+{
+ /* Set ITAPCHGWIN before writing to ITAPDLY */
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
+ 1 << ITAPCHGWIN_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYSEL_MASK,
+ itapdly << ITAPDLYSEL_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
+}
+
+static void sdhci_am654_setup_delay_chain(struct sdhci_am654_data *sdhci_am654,
+ unsigned char timing)
+{
+ u32 mask, val;
+
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0);
+
+ val = 1 << SELDLYTXCLK_SHIFT | 1 << SELDLYRXCLK_SHIFT;
+ mask = SELDLYTXCLK_MASK | SELDLYRXCLK_MASK;
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val);
- sdhci_am654->dll_on = true;
+ sdhci_am654_write_itapdly(sdhci_am654,
+ sdhci_am654->itap_del_sel[timing]);
}
static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
@@ -201,11 +262,7 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
u32 otap_del_ena;
u32 mask, val;
- if (sdhci_am654->dll_on) {
- regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0);
-
- sdhci_am654->dll_on = false;
- }
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0);
sdhci_set_clock(host, clock);
@@ -233,14 +290,10 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
- if (timing > MMC_TIMING_UHS_SDR25 && clock > CLOCK_TOO_SLOW_HZ) {
- regmap_update_bits(sdhci_am654->base, PHY_CTRL5,
- SELDLYTXCLK_MASK, 0);
+ if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ)
sdhci_am654_setup_dll(host, clock);
- } else {
- regmap_update_bits(sdhci_am654->base, PHY_CTRL5,
- SELDLYTXCLK_MASK, 1 << SELDLYTXCLK_SHIFT);
- }
+ else
+ sdhci_am654_setup_delay_chain(sdhci_am654, timing);
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
sdhci_am654->clkbuf_sel);
@@ -272,9 +325,19 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
sdhci_set_clock(host, clock);
}
+static u8 sdhci_am654_write_power_on(struct sdhci_host *host, u8 val, int reg)
+{
+ writeb(val, host->ioaddr + reg);
+ usleep_range(1000, 10000);
+ return readb(host->ioaddr + reg);
+}
+
+#define MAX_POWER_ON_TIMEOUT 1500000 /* us */
static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
{
unsigned char timing = host->mmc->ios.timing;
+ u8 pwr;
+ int ret;
if (reg == SDHCI_HOST_CONTROL) {
switch (timing) {
@@ -291,6 +354,19 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
}
writeb(val, host->ioaddr + reg);
+ if (reg == SDHCI_POWER_CONTROL && (val & SDHCI_POWER_ON)) {
+ /*
+ * Power on will not happen until the card detect debounce
+ * timer expires. Wait at least 1.5 seconds for the power on
+ * bit to be set
+ */
+ ret = read_poll_timeout(sdhci_am654_write_power_on, pwr,
+ pwr & SDHCI_POWER_ON, 0,
+ MAX_POWER_ON_TIMEOUT, false, host, val,
+ reg);
+ if (ret)
+ dev_warn(mmc_dev(host->mmc), "Power on failed\n");
+ }
}
static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
@@ -322,7 +398,46 @@ static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
return 0;
}
+#define ITAP_MAX 32
+static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
+ u32 opcode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ int cur_val, prev_val = 1, fail_len = 0, pass_window = 0, pass_len;
+ u32 itap;
+
+ /* Enable ITAPDLY */
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
+ 1 << ITAPDLYENA_SHIFT);
+
+ for (itap = 0; itap < ITAP_MAX; itap++) {
+ sdhci_am654_write_itapdly(sdhci_am654, itap);
+
+ cur_val = !mmc_send_tuning(host->mmc, opcode, NULL);
+ if (cur_val && !prev_val)
+ pass_window = itap;
+
+ if (!cur_val)
+ fail_len++;
+
+ prev_val = cur_val;
+ }
+ /*
+ * Having determined the length of the failing window and start of
+ * the passing window calculate the length of the passing window and
+ * set the final value halfway through it considering the range as a
+ * circular buffer
+ */
+ pass_len = ITAP_MAX - fail_len;
+ itap = (pass_window + (pass_len >> 1)) % ITAP_MAX;
+ sdhci_am654_write_itapdly(sdhci_am654, itap);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_am654_ops = {
+ .platform_execute_tuning = sdhci_am654_platform_execute_tuning,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -352,6 +467,7 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
};
static struct sdhci_ops sdhci_j721e_8bit_ops = {
+ .platform_execute_tuning = sdhci_am654_platform_execute_tuning,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -375,6 +491,7 @@ static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = {
};
static struct sdhci_ops sdhci_j721e_4bit_ops = {
+ .platform_execute_tuning = sdhci_am654_platform_execute_tuning,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -445,7 +562,7 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
int i;
int ret;
- ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].binding,
+ ret = device_property_read_u32(dev, td[MMC_TIMING_LEGACY].otap_binding,
&sdhci_am654->otap_del_sel[MMC_TIMING_LEGACY]);
if (ret) {
/*
@@ -468,11 +585,11 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
- ret = device_property_read_u32(dev, td[i].binding,
+ ret = device_property_read_u32(dev, td[i].otap_binding,
&sdhci_am654->otap_del_sel[i]);
if (ret) {
dev_dbg(dev, "Couldn't find %s\n",
- td[i].binding);
+ td[i].otap_binding);
/*
* Remove the corresponding capability
* if an otap-del-sel value is not found
@@ -482,6 +599,10 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
else
host->mmc->caps2 &= ~td[i].capability;
}
+
+ if (td[i].itap_binding)
+ device_property_read_u32(dev, td[i].itap_binding,
+ &sdhci_am654->itap_del_sel[i]);
}
return 0;
@@ -527,6 +648,10 @@ static int sdhci_am654_init(struct sdhci_host *host)
regmap_update_bits(sdhci_am654->base, CTL_CFG_2, SLOTTYPE_MASK,
ctl_cfg_2);
+ /* Enable tuning for SDR50 */
+ regmap_update_bits(sdhci_am654->base, CTL_CFG_3, TUNINGFORSDR50_MASK,
+ TUNINGFORSDR50_MASK);
+
ret = sdhci_setup_host(host);
if (ret)
return ret;
@@ -614,6 +739,7 @@ static const struct of_device_id sdhci_am654_of_match[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, sdhci_am654_of_match);
static int sdhci_am654_probe(struct platform_device *pdev)
{
@@ -721,6 +847,7 @@ static int sdhci_am654_remove(struct platform_device *pdev)
static struct platform_driver sdhci_am654_driver = {
.driver = {
.name = "sdhci-am654",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_am654_of_match,
},
.probe = sdhci_am654_probe,
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 4625cc071b61..3f5977979cf2 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -219,6 +219,7 @@ MODULE_DEVICE_TABLE(acpi, f_sdh30_acpi_ids);
static struct platform_driver sdhci_f_sdh30_driver = {
.driver = {
.name = "f_sdh30",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(f_sdh30_dt_ids),
.acpi_match_table = ACPI_PTR(f_sdh30_acpi_ids),
.pm = &sdhci_pltfm_pmops,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 9f53634aa411..e5e457037235 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1562,6 +1562,7 @@ static struct platform_driver sh_mmcif_driver = {
.remove = sh_mmcif_remove,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sh_mmcif_dev_pm_ops,
.of_match_table = sh_mmcif_of_match,
},
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 5e95bbc51644..fc62773602ec 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1514,6 +1514,7 @@ static const struct dev_pm_ops sunxi_mmc_pm_ops = {
static struct platform_driver sunxi_mmc_driver = {
.driver = {
.name = "sunxi-mmc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(sunxi_mmc_of_match),
.pm = &sunxi_mmc_pm_ops,
},
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 93e83ad25976..d2d3b8df1bbe 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -77,18 +77,10 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
static void tmio_mmc_reset(struct tmio_mmc_host *host)
{
- /* FIXME - should we set stop clock reg here */
- sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
usleep_range(10000, 11000);
- sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
usleep_range(10000, 11000);
-
- if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
- sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
- sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
- }
}
#ifdef CONFIG_PM_SLEEP
@@ -221,6 +213,7 @@ static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
static struct platform_driver tmio_mmc_driver = {
.driver = {
.name = "tmio-mmc",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &tmio_mmc_dev_pm_ops,
},
.probe = tmio_mmc_probe,
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 0a4f36500add..9546e542619c 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -178,14 +178,8 @@ struct tmio_mmc_host {
unsigned int direction, int blk_size);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
void (*reset)(struct tmio_mmc_host *host);
- void (*hw_reset)(struct tmio_mmc_host *host);
bool (*check_retune)(struct tmio_mmc_host *host);
-
- /*
- * Mandatory callback for tuning to occur which is optional for SDR50
- * and mandatory for SDR104.
- */
- int (*execute_tuning)(struct tmio_mmc_host *host, u32 opcode);
+ void (*fixup_request)(struct tmio_mmc_host *host, struct mmc_request *mrq);
void (*prepare_hs400_tuning)(struct tmio_mmc_host *host);
void (*hs400_downgrade)(struct tmio_mmc_host *host);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 946fb013c610..cb4149fd12e0 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -172,24 +172,17 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
usleep_range(10000, 11000);
+ if (host->reset)
+ host->reset(host);
+
+ tmio_mmc_abort_dma(host);
+
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
}
}
-static void tmio_mmc_hw_reset(struct mmc_host *mmc)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- host->reset(host);
-
- tmio_mmc_abort_dma(host);
-
- if (host->hw_reset)
- host->hw_reset(host);
-}
-
static void tmio_mmc_reset_work(struct work_struct *work)
{
struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
@@ -228,11 +221,10 @@ static void tmio_mmc_reset_work(struct work_struct *work)
spin_unlock_irqrestore(&host->lock, flags);
- tmio_mmc_hw_reset(host->mmc);
+ tmio_mmc_reset(host);
/* Ready for new calls */
host->mrq = NULL;
-
mmc_request_done(host->mmc, mrq);
}
@@ -720,24 +712,6 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
return 0;
}
-static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
- int ret;
-
- if (!host->execute_tuning)
- return 0;
-
- ret = host->execute_tuning(host, opcode);
-
- if (ret < 0) {
- dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
- tmio_mmc_hw_reset(mmc);
- }
-
- return ret;
-}
-
static void tmio_process_mrq(struct tmio_mmc_host *host,
struct mmc_request *mrq)
{
@@ -835,6 +809,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
return;
}
+ if (host->fixup_request)
+ host->fixup_request(host, mrq);
+
mmc_request_done(host->mmc, mrq);
}
@@ -950,6 +927,9 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
tmio_mmc_power_off(host);
+ /* Downgrade ensures a sane state for tuning HW (e.g. SCC) */
+ if (host->mmc->ops->hs400_downgrade)
+ host->mmc->ops->hs400_downgrade(host->mmc);
host->set_clock(host, 0);
break;
case MMC_POWER_UP:
@@ -1011,8 +991,6 @@ static struct mmc_host_ops tmio_mmc_ops = {
.get_cd = tmio_mmc_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
.multi_io_quirk = tmio_multi_io_quirk,
- .hw_reset = tmio_mmc_hw_reset,
- .execute_tuning = tmio_mmc_execute_tuning,
};
static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
@@ -1156,9 +1134,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
mmc->caps & MMC_CAP_NEEDS_POLL ||
!mmc_card_is_removable(mmc));
- if (!_host->reset)
- _host->reset = tmio_mmc_reset;
-
/*
* On Gen2+, eMMC with NONREMOVABLE currently fails because native
* hotplug gets disabled. It seems RuntimePM related yet we need further
@@ -1180,7 +1155,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
_host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
_host->set_clock(_host, 0);
- tmio_mmc_hw_reset(mmc);
+ tmio_mmc_reset(_host);
_host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
@@ -1283,7 +1258,7 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_clk_enable(host);
- tmio_mmc_hw_reset(host->mmc);
+ tmio_mmc_reset(host);
if (host->clk_cache)
host->set_clock(host, host->clk_cache);
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index f82baf99fd69..3092466a99ab 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -409,8 +409,9 @@ static void uniphier_sd_clk_disable(struct tmio_mmc_host *host)
clk_disable_unprepare(priv->clk);
}
-static void uniphier_sd_hw_reset(struct tmio_mmc_host *host)
+static void uniphier_sd_hw_reset(struct mmc_host *mmc)
{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
reset_control_assert(priv->rst_hw);
@@ -597,7 +598,7 @@ static int uniphier_sd_probe(struct platform_device *pdev)
ret = PTR_ERR(priv->rst_hw);
goto free_host;
}
- host->hw_reset = uniphier_sd_hw_reset;
+ host->ops.hw_reset = uniphier_sd_hw_reset;
}
if (host->mmc->caps & MMC_CAP_UHS) {
@@ -684,6 +685,7 @@ static struct platform_driver uniphier_sd_driver = {
.remove = uniphier_sd_remove,
.driver = {
.name = "uniphier-sd",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = uniphier_sd_match,
},
};
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 7666c90054ae..e2d5112d809d 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1890,6 +1890,7 @@ static struct platform_driver usdhi6_driver = {
.remove = usdhi6_remove,
.driver = {
.name = "usdhi6rol0",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = usdhi6_of_match,
},
};
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 49dab9f42b6d..9b755ea0fa03 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1257,11 +1257,14 @@ static void __maybe_unused via_init_sdc_pm(struct via_crdr_mmc_host *host)
static int __maybe_unused via_sd_suspend(struct device *dev)
{
struct via_crdr_mmc_host *host;
+ unsigned long flags;
host = dev_get_drvdata(dev);
+ spin_lock_irqsave(&host->lock, flags);
via_save_pcictrlreg(host);
via_save_sdcreg(host);
+ spin_unlock_irqrestore(&host->lock, flags);
device_wakeup_enable(dev);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 67f917d6ecd3..cd63ea865b77 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1905,6 +1905,7 @@ static struct platform_driver wbsd_driver = {
.resume = wbsd_platform_resume,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 2c4ba1fa4bbf..cf10949fb0ac 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -990,6 +990,7 @@ static struct platform_driver wmt_mci_driver = {
.remove = wmt_mci_remove,
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = wmt_mci_pm_ops,
.of_match_table = wmt_mci_dt_ids,
},
diff --git a/drivers/most/Kconfig b/drivers/most/Kconfig
index 60fc0820dad3..ebfe84e69715 100644
--- a/drivers/most/Kconfig
+++ b/drivers/most/Kconfig
@@ -23,4 +23,13 @@ config MOST_USB_HDM
To compile this driver as a module, choose M here: the
module will be called most_usb.
+
+config MOST_CDEV
+ tristate "Cdev"
+
+ help
+ Say Y here if you want to commumicate via character devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called most_cdev.
endif
diff --git a/drivers/most/Makefile b/drivers/most/Makefile
index 6a3cb9056288..8b53ca46633f 100644
--- a/drivers/most/Makefile
+++ b/drivers/most/Makefile
@@ -4,3 +4,4 @@ most_core-y := core.o \
configfs.o
obj-$(CONFIG_MOST_USB_HDM) += most_usb.o
+obj-$(CONFIG_MOST_CDEV) += most_cdev.o
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/most/most_cdev.c
index 044880760b58..044880760b58 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/most/most_cdev.c
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index f96287c4b789..0f4c2d823de8 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -91,7 +91,7 @@ config MTD_MCHP23K256
config MTD_SPEAR_SMI
tristate "SPEAR MTD NOR Support through SMI controller"
- depends on PLAT_SPEAR
+ depends on PLAT_SPEAR || COMPILE_TEST
default y
help
This enable SNOR support on SPEAR platforms using SMI controller
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 56f50d27b7fd..aecd441e4183 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -436,7 +436,10 @@ static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retle
{
int gap = BUSWIDTH - (from & (BUSWIDTH - 1));
- while (len && gap--) *buf++ = read8 (from++), len--;
+ while (len && gap--) {
+ *buf++ = read8 (from++);
+ len--;
+ }
}
/* now we read dwords until we reach a non-dword boundary */
@@ -518,7 +521,10 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
i = n = 0;
while (gap--) tmp[i++] = 0xFF;
- while (len && i < BUSWIDTH) tmp[i++] = buf[n++], len--;
+ while (len && i < BUSWIDTH) {
+ tmp[i++] = buf[n++];
+ len--;
+ }
while (i < BUSWIDTH) tmp[i++] = 0xFF;
if (!write_dword (aligned,*((__u32 *) tmp))) return (-EIO);
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 79dcca16481d..2e00862389dd 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -793,7 +793,7 @@ static int spear_smi_probe_config_dt(struct platform_device *pdev,
struct device_node *np)
{
struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
- struct device_node *pp = NULL;
+ struct device_node *pp;
const __be32 *addr;
u32 val;
int len;
@@ -812,7 +812,7 @@ static int spear_smi_probe_config_dt(struct platform_device *pdev,
return -ENOMEM;
/* Fill structs for each subnode (flash device) */
- while ((pp = of_get_next_child(np, pp))) {
+ for_each_child_of_node(np, pp) {
pdata->np[i] = pp;
/* Read base-addr and size from DT */
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
index a4d8968d133d..46c7e407e378 100644
--- a/drivers/mtd/hyperbus/Kconfig
+++ b/drivers/mtd/hyperbus/Kconfig
@@ -22,4 +22,11 @@ config HBMC_AM654
This is the driver for HyperBus controller on TI's AM65x and
other SoCs
+config RPCIF_HYPERBUS
+ tristate "Renesas RPC-IF HyperBus driver"
+ depends on RENESAS_RPCIF
+ depends on MTD_CFI_BE_BYTE_SWAP
+ help
+ This option includes Renesas RPC-IF HyperBus support.
+
endif # MTD_HYPERBUS
diff --git a/drivers/mtd/hyperbus/Makefile b/drivers/mtd/hyperbus/Makefile
index 8a936e066f48..5fc2b5124542 100644
--- a/drivers/mtd/hyperbus/Makefile
+++ b/drivers/mtd/hyperbus/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_MTD_HYPERBUS) += hyperbus-core.o
obj-$(CONFIG_HBMC_AM654) += hbmc-am654.o
+obj-$(CONFIG_RPCIF_HYPERBUS) += rpc-if.o
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index e0e33f6bf513..a3439b791eeb 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -3,6 +3,10 @@
// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
// Author: Vignesh Raghavendra <vigneshr@ti.com>
+#include <linux/completion.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -13,11 +17,18 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
+#include <linux/sched/task_stack.h>
#include <linux/types.h>
#define AM654_HBMC_CALIB_COUNT 25
+struct am654_hbmc_device_priv {
+ struct completion rx_dma_complete;
+ phys_addr_t device_base;
+ struct hyperbus_ctlr *ctlr;
+ struct dma_chan *rx_chan;
+};
+
struct am654_hbmc_priv {
struct hyperbus_ctlr ctlr;
struct hyperbus_device hbdev;
@@ -52,13 +63,103 @@ static int am654_hbmc_calibrate(struct hyperbus_device *hbdev)
return ret;
}
+static void am654_hbmc_dma_callback(void *param)
+{
+ struct am654_hbmc_device_priv *priv = param;
+
+ complete(&priv->rx_dma_complete);
+}
+
+static int am654_hbmc_dma_read(struct am654_hbmc_device_priv *priv, void *to,
+ unsigned long from, ssize_t len)
+
+{
+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ struct dma_chan *rx_chan = priv->rx_chan;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_dst, dma_src;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (!priv->rx_chan || !virt_addr_valid(to) || object_is_on_stack(to))
+ return -EINVAL;
+
+ dma_dst = dma_map_single(rx_chan->device->dev, to, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_chan->device->dev, dma_dst)) {
+ dev_dbg(priv->ctlr->dev, "DMA mapping failed\n");
+ return -EIO;
+ }
+
+ dma_src = priv->device_base + from;
+ tx = dmaengine_prep_dma_memcpy(rx_chan, dma_dst, dma_src, len, flags);
+ if (!tx) {
+ dev_err(priv->ctlr->dev, "device_prep_dma_memcpy error\n");
+ ret = -EIO;
+ goto unmap_dma;
+ }
+
+ reinit_completion(&priv->rx_dma_complete);
+ tx->callback = am654_hbmc_dma_callback;
+ tx->callback_param = priv;
+ cookie = dmaengine_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(priv->ctlr->dev, "dma_submit_error %d\n", cookie);
+ goto unmap_dma;
+ }
+
+ dma_async_issue_pending(rx_chan);
+ if (!wait_for_completion_timeout(&priv->rx_dma_complete, msecs_to_jiffies(len + 1000))) {
+ dmaengine_terminate_sync(rx_chan);
+ dev_err(priv->ctlr->dev, "DMA wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+unmap_dma:
+ dma_unmap_single(rx_chan->device->dev, dma_dst, len, DMA_FROM_DEVICE);
+ return ret;
+}
+
+static void am654_hbmc_read(struct hyperbus_device *hbdev, void *to,
+ unsigned long from, ssize_t len)
+{
+ struct am654_hbmc_device_priv *priv = hbdev->priv;
+
+ if (len < SZ_1K || am654_hbmc_dma_read(priv, to, from, len))
+ memcpy_fromio(to, hbdev->map.virt + from, len);
+}
+
static const struct hyperbus_ops am654_hbmc_ops = {
.calibrate = am654_hbmc_calibrate,
+ .copy_from = am654_hbmc_read,
};
+static int am654_hbmc_request_mmap_dma(struct am654_hbmc_device_priv *priv)
+{
+ struct dma_chan *rx_chan;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ rx_chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(rx_chan)) {
+ if (PTR_ERR(rx_chan) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(priv->ctlr->dev, "No DMA channel available\n");
+ return 0;
+ }
+ priv->rx_chan = rx_chan;
+ init_completion(&priv->rx_dma_complete);
+
+ return 0;
+}
+
static int am654_hbmc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct am654_hbmc_device_priv *dev_priv;
struct device *dev = &pdev->dev;
struct am654_hbmc_priv *priv;
struct resource res;
@@ -70,7 +171,8 @@ static int am654_hbmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- ret = of_address_to_resource(np, 0, &res);
+ priv->hbdev.np = of_get_next_child(np, NULL);
+ ret = of_address_to_resource(priv->hbdev.np, 0, &res);
if (ret)
return ret;
@@ -88,13 +190,6 @@ static int am654_hbmc_probe(struct platform_device *pdev)
priv->mux_ctrl = control;
}
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
- goto disable_pm;
- }
-
priv->hbdev.map.size = resource_size(&res);
priv->hbdev.map.virt = devm_ioremap_resource(dev, &res);
if (IS_ERR(priv->hbdev.map.virt))
@@ -103,17 +198,32 @@ static int am654_hbmc_probe(struct platform_device *pdev)
priv->ctlr.dev = dev;
priv->ctlr.ops = &am654_hbmc_ops;
priv->hbdev.ctlr = &priv->ctlr;
- priv->hbdev.np = of_get_next_child(dev->of_node, NULL);
+
+ dev_priv = devm_kzalloc(dev, sizeof(*dev_priv), GFP_KERNEL);
+ if (!dev_priv) {
+ ret = -ENOMEM;
+ goto disable_mux;
+ }
+
+ priv->hbdev.priv = dev_priv;
+ dev_priv->device_base = res.start;
+ dev_priv->ctlr = &priv->ctlr;
+
+ ret = am654_hbmc_request_mmap_dma(dev_priv);
+ if (ret)
+ goto disable_mux;
+
ret = hyperbus_register_device(&priv->hbdev);
if (ret) {
dev_err(dev, "failed to register controller\n");
- pm_runtime_put_sync(&pdev->dev);
- goto disable_pm;
+ goto release_dma;
}
return 0;
-disable_pm:
- pm_runtime_disable(dev);
+release_dma:
+ if (dev_priv->rx_chan)
+ dma_release_channel(dev_priv->rx_chan);
+disable_mux:
if (priv->mux_ctrl)
mux_control_deselect(priv->mux_ctrl);
return ret;
@@ -122,13 +232,15 @@ disable_pm:
static int am654_hbmc_remove(struct platform_device *pdev)
{
struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
+ struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
int ret;
ret = hyperbus_unregister_device(&priv->hbdev);
if (priv->mux_ctrl)
mux_control_deselect(priv->mux_ctrl);
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+
+ if (dev_priv->rx_chan)
+ dma_release_channel(dev_priv->rx_chan);
return ret;
}
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
new file mode 100644
index 000000000000..ecb050ba95cd
--- /dev/null
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux driver for RPC-IF HyperFlash
+ *
+ * Copyright (C) 2019-2020 Cogent Embedded, Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/hyperbus.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <memory/renesas-rpc-if.h>
+
+struct rpcif_hyperbus {
+ struct rpcif rpc;
+ struct hyperbus_ctlr ctlr;
+ struct hyperbus_device hbdev;
+};
+
+static const struct rpcif_op rpcif_op_tmpl = {
+ .cmd = {
+ .buswidth = 8,
+ .ddr = true,
+ },
+ .ocmd = {
+ .buswidth = 8,
+ .ddr = true,
+ },
+ .addr = {
+ .nbytes = 1,
+ .buswidth = 8,
+ .ddr = true,
+ },
+ .data = {
+ .buswidth = 8,
+ .ddr = true,
+ },
+};
+
+static void rpcif_hb_prepare_read(struct rpcif *rpc, void *to,
+ unsigned long from, ssize_t len)
+{
+ struct rpcif_op op = rpcif_op_tmpl;
+
+ op.cmd.opcode = HYPERBUS_RW_READ | HYPERBUS_AS_MEM;
+ op.addr.val = from >> 1;
+ op.dummy.buswidth = 1;
+ op.dummy.ncycles = 15;
+ op.data.dir = RPCIF_DATA_IN;
+ op.data.nbytes = len;
+ op.data.buf.in = to;
+
+ rpcif_prepare(rpc, &op, NULL, NULL);
+}
+
+static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to,
+ void *from, ssize_t len)
+{
+ struct rpcif_op op = rpcif_op_tmpl;
+
+ op.cmd.opcode = HYPERBUS_RW_WRITE | HYPERBUS_AS_MEM;
+ op.addr.val = to >> 1;
+ op.data.dir = RPCIF_DATA_OUT;
+ op.data.nbytes = len;
+ op.data.buf.out = from;
+
+ rpcif_prepare(rpc, &op, NULL, NULL);
+}
+
+static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr)
+{
+ struct rpcif_hyperbus *hyperbus =
+ container_of(hbdev, struct rpcif_hyperbus, hbdev);
+ map_word data;
+
+ rpcif_hb_prepare_read(&hyperbus->rpc, &data, addr, 2);
+
+ rpcif_manual_xfer(&hyperbus->rpc);
+
+ return data.x[0];
+}
+
+static void rpcif_hb_write16(struct hyperbus_device *hbdev, unsigned long addr,
+ u16 data)
+{
+ struct rpcif_hyperbus *hyperbus =
+ container_of(hbdev, struct rpcif_hyperbus, hbdev);
+
+ rpcif_hb_prepare_write(&hyperbus->rpc, addr, &data, 2);
+
+ rpcif_manual_xfer(&hyperbus->rpc);
+}
+
+static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to,
+ unsigned long from, ssize_t len)
+{
+ struct rpcif_hyperbus *hyperbus =
+ container_of(hbdev, struct rpcif_hyperbus, hbdev);
+
+ rpcif_hb_prepare_read(&hyperbus->rpc, to, from, len);
+
+ rpcif_dirmap_read(&hyperbus->rpc, from, len, to);
+}
+
+static const struct hyperbus_ops rpcif_hb_ops = {
+ .read16 = rpcif_hb_read16,
+ .write16 = rpcif_hb_write16,
+ .copy_from = rpcif_hb_copy_from,
+};
+
+static int rpcif_hb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpcif_hyperbus *hyperbus;
+ int error;
+
+ hyperbus = devm_kzalloc(dev, sizeof(*hyperbus), GFP_KERNEL);
+ if (!hyperbus)
+ return -ENOMEM;
+
+ rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent);
+
+ platform_set_drvdata(pdev, hyperbus);
+
+ rpcif_enable_rpm(&hyperbus->rpc);
+
+ rpcif_hw_init(&hyperbus->rpc, true);
+
+ hyperbus->hbdev.map.size = hyperbus->rpc.size;
+ hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
+
+ hyperbus->ctlr.dev = dev;
+ hyperbus->ctlr.ops = &rpcif_hb_ops;
+ hyperbus->hbdev.ctlr = &hyperbus->ctlr;
+ hyperbus->hbdev.np = of_get_next_child(pdev->dev.parent->of_node, NULL);
+ error = hyperbus_register_device(&hyperbus->hbdev);
+ if (error)
+ rpcif_disable_rpm(&hyperbus->rpc);
+
+ return error;
+}
+
+static int rpcif_hb_remove(struct platform_device *pdev)
+{
+ struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
+ int error = hyperbus_unregister_device(&hyperbus->hbdev);
+ struct rpcif *rpc = dev_get_drvdata(pdev->dev.parent);
+
+ rpcif_disable_rpm(rpc);
+ return error;
+}
+
+static struct platform_driver rpcif_platform_driver = {
+ .probe = rpcif_hb_probe,
+ .remove = rpcif_hb_remove,
+ .driver = {
+ .name = "rpc-if-hyperflash",
+ },
+};
+
+module_platform_driver(rpcif_platform_driver);
+
+MODULE_DESCRIPTION("Renesas RPC-IF HyperFlash driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index 0f1547f09d08..72f5c7b30079 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -393,6 +393,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
}
+static const struct mtd_info lpddr2_nvm_mtd_info = {
+ .type = MTD_RAM,
+ .writesize = 1,
+ .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
+ ._read = lpddr2_nvm_read,
+ ._write = lpddr2_nvm_write,
+ ._erase = lpddr2_nvm_erase,
+ ._unlock = lpddr2_nvm_unlock,
+ ._lock = lpddr2_nvm_lock,
+};
+
/*
* lpddr2_nvm driver probe method
*/
@@ -433,6 +444,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
.pfow_base = OW_BASE_ADDRESS,
.fldrv_priv = pcm_data,
};
+
if (IS_ERR(map->virt))
return PTR_ERR(map->virt);
@@ -444,22 +456,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
return PTR_ERR(pcm_data->ctl_regs);
/* Populate mtd_info data structure */
- *mtd = (struct mtd_info) {
- .dev = { .parent = &pdev->dev },
- .name = pdev->dev.init_name,
- .type = MTD_RAM,
- .priv = map,
- .size = resource_size(add_range),
- .erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width,
- .writesize = 1,
- .writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
- .flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
- ._read = lpddr2_nvm_read,
- ._write = lpddr2_nvm_write,
- ._erase = lpddr2_nvm_erase,
- ._unlock = lpddr2_nvm_unlock,
- ._lock = lpddr2_nvm_lock,
- };
+ *mtd = lpddr2_nvm_mtd_info;
+ mtd->dev.parent = &pdev->dev;
+ mtd->name = pdev->dev.init_name;
+ mtd->priv = map;
+ mtd->size = resource_size(add_range);
+ mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width;
+ mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width;
/* Verify the presence of the device looking for PFOW string */
if (!lpddr2_nvm_pfow_present(map)) {
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index fb1cbc9a2870..ee063baed136 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -94,6 +94,34 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
}
EXPORT_SYMBOL(lpddr_cmdset);
+static void print_drs_error(unsigned int dsr)
+{
+ int prog_status = (dsr & DSR_RPS) >> 8;
+
+ if (!(dsr & DSR_AVAILABLE))
+ pr_notice("DSR.15: (0) Device not Available\n");
+ if ((prog_status & 0x03) == 0x03)
+ pr_notice("DSR.9,8: (11) Attempt to program invalid half with 41h command\n");
+ else if (prog_status & 0x02)
+ pr_notice("DSR.9,8: (10) Object Mode Program attempt in region with Control Mode data\n");
+ else if (prog_status & 0x01)
+ pr_notice("DSR.9,8: (01) Program attempt in region with Object Mode data\n");
+ if (!(dsr & DSR_READY_STATUS))
+ pr_notice("DSR.7: (0) Device is Busy\n");
+ if (dsr & DSR_ESS)
+ pr_notice("DSR.6: (1) Erase Suspended\n");
+ if (dsr & DSR_ERASE_STATUS)
+ pr_notice("DSR.5: (1) Erase/Blank check error\n");
+ if (dsr & DSR_PROGRAM_STATUS)
+ pr_notice("DSR.4: (1) Program Error\n");
+ if (dsr & DSR_VPPS)
+ pr_notice("DSR.3: (1) Vpp low detect, operation aborted\n");
+ if (dsr & DSR_PSS)
+ pr_notice("DSR.2: (1) Program suspended\n");
+ if (dsr & DSR_DPS)
+ pr_notice("DSR.1: (1) Aborted Erase/Program attempt on locked block\n");
+}
+
static int wait_for_ready(struct map_info *map, struct flchip *chip,
unsigned int chip_op_time)
{
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index fd37553f1b07..6650acbc961e 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -75,6 +75,17 @@ config MTD_PHYSMAP_OF
physically into the CPU's memory. The mapping description here is
taken from OF device tree.
+config MTD_PHYSMAP_BT1_ROM
+ bool "Baikal-T1 Boot ROMs OF-based physical memory map handling"
+ depends on MTD_PHYSMAP_OF
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MTD_COMPLEX_MAPPINGS
+ select MULTIPLEXER
+ select MUX_MMIO
+ help
+ This provides some extra DT physmap parsing for the Baikal-T1
+ platforms, some detection and setting up ROMs-specific accessors.
+
config MTD_PHYSMAP_VERSATILE
bool "ARM Versatile OF-based physical memory map handling"
depends on MTD_PHYSMAP_OF
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index c0da86a5d26f..79f018cf412f 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
physmap-objs-y += physmap-core.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_BT1_ROM) += physmap-bt1-rom.o
physmap-objs-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o
physmap-objs-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o
physmap-objs-$(CONFIG_MTD_PHYSMAP_IXP4XX) += physmap-ixp4xx.o
diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
new file mode 100644
index 000000000000..27cfe1c63a2e
--- /dev/null
+++ b/drivers/mtd/maps/physmap-bt1-rom.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 Physically Mapped Internal ROM driver
+ */
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "physmap-bt1-rom.h"
+
+/*
+ * Baikal-T1 SoC ROMs are only accessible by the dword-aligned instructions.
+ * We have to take this into account when implementing the data read-methods.
+ * Note there is no need in bothering with endianness, since both Baikal-T1
+ * CPU and MMIO are LE.
+ */
+static map_word __xipram bt1_rom_map_read(struct map_info *map,
+ unsigned long ofs)
+{
+ void __iomem *src = map->virt + ofs;
+ unsigned long shift;
+ map_word ret;
+ u32 data;
+
+ /* Read data within offset dword. */
+ shift = (unsigned long)src & 0x3;
+ data = readl_relaxed(src - shift);
+ if (!shift) {
+ ret.x[0] = data;
+ return ret;
+ }
+ ret.x[0] = data >> (shift * BITS_PER_BYTE);
+
+ /* Read data from the next dword. */
+ shift = 4 - shift;
+ if (ofs + shift >= map->size)
+ return ret;
+
+ data = readl_relaxed(src + shift);
+ ret.x[0] |= data << (shift * BITS_PER_BYTE);
+
+ return ret;
+}
+
+static void __xipram bt1_rom_map_copy_from(struct map_info *map,
+ void *to, unsigned long from,
+ ssize_t len)
+{
+ void __iomem *src = map->virt + from;
+ ssize_t shift, chunk;
+ u32 data;
+
+ if (len <= 0 || from >= map->size)
+ return;
+
+ /* Make sure we don't go over the map limit. */
+ len = min_t(ssize_t, map->size - from, len);
+
+ /*
+ * Since requested data size can be pretty big we have to implement
+ * the copy procedure as optimal as possible. That's why it's split
+ * up into the next three stages: unaligned head, aligned body,
+ * unaligned tail.
+ */
+ shift = (ssize_t)src & 0x3;
+ if (shift) {
+ chunk = min_t(ssize_t, 4 - shift, len);
+ data = readl_relaxed(src - shift);
+ memcpy(to, &data + shift, chunk);
+ src += chunk;
+ to += chunk;
+ len -= chunk;
+ }
+
+ while (len >= 4) {
+ data = readl_relaxed(src);
+ memcpy(to, &data, 4);
+ src += 4;
+ to += 4;
+ len -= 4;
+ }
+
+ if (len) {
+ data = readl_relaxed(src);
+ memcpy(to, &data, len);
+ }
+}
+
+int of_flash_probe_bt1_rom(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ struct device *dev = &pdev->dev;
+
+ /* It's supposed to be read-only MTD. */
+ if (!of_device_is_compatible(np, "mtd-rom")) {
+ dev_info(dev, "No mtd-rom compatible string\n");
+ return 0;
+ }
+
+ /* Multiplatform guard. */
+ if (!of_device_is_compatible(np, "baikal,bt1-int-rom"))
+ return 0;
+
+ /* Sanity check the device parameters retrieved from DTB. */
+ if (map->bankwidth != 4)
+ dev_warn(dev, "Bank width is supposed to be 32 bits wide\n");
+
+ map->read = bt1_rom_map_read;
+ map->copy_from = bt1_rom_map_copy_from;
+
+ return 0;
+}
diff --git a/drivers/mtd/maps/physmap-bt1-rom.h b/drivers/mtd/maps/physmap-bt1-rom.h
new file mode 100644
index 000000000000..6782899598a4
--- /dev/null
+++ b/drivers/mtd/maps/physmap-bt1-rom.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/mtd/map.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_BT1_ROM
+int of_flash_probe_bt1_rom(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map);
+#else
+static inline
+int of_flash_probe_bt1_rom(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 8f7f966fa9a7..001ed5deb622 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -41,6 +41,7 @@
#include <linux/pm_runtime.h>
#include <linux/gpio/consumer.h>
+#include "physmap-bt1-rom.h"
#include "physmap-gemini.h"
#include "physmap-ixp4xx.h"
#include "physmap-versatile.h"
@@ -371,6 +372,10 @@ static int physmap_flash_of_init(struct platform_device *dev)
info->maps[i].bankwidth = bankwidth;
info->maps[i].device_node = dp;
+ err = of_flash_probe_bt1_rom(dev, dp, &info->maps[i]);
+ if (err)
+ return err;
+
err = of_flash_probe_gemini(dev, dp, &info->maps[i]);
if (err)
return err;
@@ -515,7 +520,8 @@ static int physmap_flash_probe(struct platform_device *dev)
dev_notice(&dev->dev, "physmap platform flash device: %pR\n",
res);
- info->maps[i].name = dev_name(&dev->dev);
+ if (!info->maps[i].name)
+ info->maps[i].name = dev_name(&dev->dev);
if (!info->maps[i].phys)
info->maps[i].phys = res->start;
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 177bf134e189..a7ec947a3ebb 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -40,7 +40,7 @@ struct memcard {
u32 blocklen;
u32 writecnt;
u32 readcnt;
- u32 removeable;
+ u32 removable;
int partition;
int read;
unsigned char *blockread;
@@ -619,7 +619,7 @@ static int vmu_connect(struct maple_device *mdev)
card->blocklen = ((basic_flash_data >> 16 & 0xFF) + 1) << 5;
card->writecnt = basic_flash_data >> 12 & 0xF;
card->readcnt = basic_flash_data >> 8 & 0xF;
- card->removeable = basic_flash_data >> 7 & 1;
+ card->removable = basic_flash_data >> 7 & 1;
card->partition = 0;
@@ -772,7 +772,6 @@ static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
static int probe_maple_vmu(struct device *dev)
{
- int error;
struct maple_device *mdev = to_maple_dev(dev);
struct maple_driver *mdrv = to_maple_driver(dev->driver);
@@ -780,11 +779,7 @@ static int probe_maple_vmu(struct device *dev)
mdev->fileerr_handler = vmu_file_error;
mdev->driver = mdrv;
- error = vmu_connect(mdev);
- if (error)
- return error;
-
- return 0;
+ return vmu_connect(mdev);
}
static int remove_maple_vmu(struct device *dev)
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 1d6c9e7e7b7d..6e4d0017c0bd 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -103,6 +103,47 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
}
static int
+concat_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (to >= subdev->size) {
+ to -= subdev->size;
+ continue;
+ }
+ if (to + len > subdev->size)
+ size = subdev->size - to;
+ else
+ size = len;
+
+ err = mtd_panic_write(subdev, to, size, &retsize, buf);
+ if (err == -EOPNOTSUPP) {
+ printk(KERN_ERR "mtdconcat: Cannot write from panic without panic_write\n");
+ return err;
+ }
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ to = 0;
+ }
+ return err;
+}
+
+
+static int
concat_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t * retlen, const u_char * buf)
{
@@ -648,6 +689,8 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd._block_isbad = concat_block_isbad;
if (subdev[0]->_block_markbad)
concat->mtd._block_markbad = concat_block_markbad;
+ if (subdev[0]->_panic_write)
+ concat->mtd._panic_write = concat_panic_write;
concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 7d930569a7df..e9e163ae9d86 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -335,7 +335,7 @@ static const struct device_type mtd_devtype = {
.release = mtd_release,
};
-static int mtd_partid_show(struct seq_file *s, void *p)
+static int mtd_partid_debug_show(struct seq_file *s, void *p)
{
struct mtd_info *mtd = s->private;
@@ -344,19 +344,9 @@ static int mtd_partid_show(struct seq_file *s, void *p)
return 0;
}
-static int mtd_partid_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mtd_partid_show, inode->i_private);
-}
-
-static const struct file_operations mtd_partid_debug_fops = {
- .open = mtd_partid_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug);
-static int mtd_partname_show(struct seq_file *s, void *p)
+static int mtd_partname_debug_show(struct seq_file *s, void *p)
{
struct mtd_info *mtd = s->private;
@@ -365,17 +355,7 @@ static int mtd_partname_show(struct seq_file *s, void *p)
return 0;
}
-static int mtd_partname_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mtd_partname_show, inode->i_private);
-}
-
-static const struct file_operations mtd_partname_debug_fops = {
- .open = mtd_partname_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug);
static struct dentry *dfs_dir_mtd;
@@ -2196,6 +2176,8 @@ static struct backing_dev_info * __init mtd_bdi_init(char *name)
bdi = bdi_alloc(NUMA_NO_NODE);
if (!bdi)
return ERR_PTR(-ENOMEM);
+ bdi->ra_pages = 0;
+ bdi->io_pages = 0;
/*
* We put '-0' suffix to the name to get the same name format as we
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 4ced68be7ed7..774970bfcf85 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -279,12 +279,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
record_size - MTDOOPS_HEADER_SIZE, NULL);
- /* Panics must be written immediately */
- if (reason != KMSG_DUMP_OOPS)
+ if (reason != KMSG_DUMP_OOPS) {
+ /* Panics must be written immediately */
mtdoops_write(cxt, 1);
-
- /* For other cases, schedule work to write it "nicely" */
- schedule_work(&cxt->work_write);
+ } else {
+ /* For other cases, schedule work to write it "nicely" */
+ schedule_work(&cxt->work_write);
+ }
}
static void mtdoops_notify_add(struct mtd_info *mtd)
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index c1a45b071165..4a9aed4f0104 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -9,4 +9,12 @@ source "drivers/mtd/nand/onenand/Kconfig"
source "drivers/mtd/nand/raw/Kconfig"
source "drivers/mtd/nand/spi/Kconfig"
+menu "ECC engine support"
+
+config MTD_NAND_ECC
+ bool
+ depends on MTD_NAND_CORE
+
+endmenu
+
endmenu
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 7ecd80c0a66e..981372953b56 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-y += onenand/
obj-y += raw/
obj-y += spi/
+
+nandcore-$(CONFIG_MTD_NAND_ECC) += ecc.o
diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c
new file mode 100644
index 000000000000..4a56e6c0da67
--- /dev/null
+++ b/drivers/mtd/nand/ecc.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Generic Error-Correcting Code (ECC) engine
+ *
+ * Copyright (C) 2019 Macronix
+ * Author:
+ * Miquèl RAYNAL <miquel.raynal@bootlin.com>
+ *
+ *
+ * This file describes the abstraction of any NAND ECC engine. It has been
+ * designed to fit most cases, including parallel NANDs and SPI-NANDs.
+ *
+ * There are three main situations where instantiating this ECC engine makes
+ * sense:
+ * - external: The ECC engine is outside the NAND pipeline, typically this
+ * is a software ECC engine, or an hardware engine that is
+ * outside the NAND controller pipeline.
+ * - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
+ * controller's side. This is the case of most of the raw NAND
+ * controllers. In the pipeline case, the ECC bytes are
+ * generated/data corrected on the fly when a page is
+ * written/read.
+ * - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
+ * Some NAND chips can correct themselves the data.
+ *
+ * Besides the initial setup and final cleanups, the interfaces are rather
+ * simple:
+ * - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
+ * the I/O request type. In case of software correction or external
+ * engine, this step may involve to derive the ECC bytes and place
+ * them in the OOB area before a write.
+ * - finish: Finish an I/O request. Correct the data in case of a read
+ * request and report the number of corrected bits/uncorrectable
+ * errors. Most likely empty for write operations, unless you have
+ * hardware specific stuff to do, like shutting down the engine to
+ * save power.
+ *
+ * The I/O request should be enclosed in a prepare()/finish() pair of calls
+ * and will behave differently depending on the requested I/O type:
+ * - raw: Correction disabled
+ * - ecc: Correction enabled
+ *
+ * The request direction is impacting the logic as well:
+ * - read: Load data from the NAND chip
+ * - write: Store data in the NAND chip
+ *
+ * Mixing all this combinations together gives the following behavior.
+ * Those are just examples, drivers are free to add custom steps in their
+ * prepare/finish hook.
+ *
+ * [external ECC engine]
+ * - external + prepare + raw + read: do nothing
+ * - external + finish + raw + read: do nothing
+ * - external + prepare + raw + write: do nothing
+ * - external + finish + raw + write: do nothing
+ * - external + prepare + ecc + read: do nothing
+ * - external + finish + ecc + read: calculate expected ECC bytes, extract
+ * ECC bytes from OOB buffer, correct
+ * and report any bitflip/error
+ * - external + prepare + ecc + write: calculate ECC bytes and store them at
+ * the right place in the OOB buffer based
+ * on the OOB layout
+ * - external + finish + ecc + write: do nothing
+ *
+ * [pipelined ECC engine]
+ * - pipelined + prepare + raw + read: disable the controller's ECC engine if
+ * activated
+ * - pipelined + finish + raw + read: do nothing
+ * - pipelined + prepare + raw + write: disable the controller's ECC engine if
+ * activated
+ * - pipelined + finish + raw + write: do nothing
+ * - pipelined + prepare + ecc + read: enable the controller's ECC engine if
+ * deactivated
+ * - pipelined + finish + ecc + read: check the status, report any
+ * error/bitflip
+ * - pipelined + prepare + ecc + write: enable the controller's ECC engine if
+ * deactivated
+ * - pipelined + finish + ecc + write: do nothing
+ *
+ * [ondie ECC engine]
+ * - ondie + prepare + raw + read: send commands to disable the on-chip ECC
+ * engine if activated
+ * - ondie + finish + raw + read: do nothing
+ * - ondie + prepare + raw + write: send commands to disable the on-chip ECC
+ * engine if activated
+ * - ondie + finish + raw + write: do nothing
+ * - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
+ * engine if deactivated
+ * - ondie + finish + ecc + read: send commands to check the status, report
+ * any error/bitflip
+ * - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
+ * engine if deactivated
+ * - ondie + finish + ecc + write: do nothing
+ */
+
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+
+/**
+ * nand_ecc_init_ctx - Init the ECC engine context
+ * @nand: the NAND device
+ *
+ * On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
+ */
+int nand_ecc_init_ctx(struct nand_device *nand)
+{
+ if (!nand->ecc.engine->ops->init_ctx)
+ return 0;
+
+ return nand->ecc.engine->ops->init_ctx(nand);
+}
+EXPORT_SYMBOL(nand_ecc_init_ctx);
+
+/**
+ * nand_ecc_cleanup_ctx - Cleanup the ECC engine context
+ * @nand: the NAND device
+ */
+void nand_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ if (nand->ecc.engine->ops->cleanup_ctx)
+ nand->ecc.engine->ops->cleanup_ctx(nand);
+}
+EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
+
+/**
+ * nand_ecc_prepare_io_req - Prepare an I/O request
+ * @nand: the NAND device
+ * @req: the I/O request
+ */
+int nand_ecc_prepare_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ if (!nand->ecc.engine->ops->prepare_io_req)
+ return 0;
+
+ return nand->ecc.engine->ops->prepare_io_req(nand, req);
+}
+EXPORT_SYMBOL(nand_ecc_prepare_io_req);
+
+/**
+ * nand_ecc_finish_io_req - Finish an I/O request
+ * @nand: the NAND device
+ * @req: the I/O request
+ */
+int nand_ecc_finish_io_req(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ if (!nand->ecc.engine->ops->finish_io_req)
+ return 0;
+
+ return nand->ecc.engine->ops->finish_io_req(nand, req);
+}
+EXPORT_SYMBOL(nand_ecc_finish_io_req);
+
+/* Define default OOB placement schemes for large and small page devices */
+static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+ if (section > 1)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ if (mtd->oobsize == 16)
+ oobregion->length = 4;
+ else
+ oobregion->length = 3;
+ } else {
+ if (mtd->oobsize == 8)
+ return -ERANGE;
+
+ oobregion->offset = 6;
+ oobregion->length = total_ecc_bytes - 4;
+ }
+
+ return 0;
+}
+
+static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ if (mtd->oobsize == 16) {
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 8;
+ oobregion->offset = 8;
+ } else {
+ oobregion->length = 2;
+ if (!section)
+ oobregion->offset = 3;
+ else
+ oobregion->offset = 6;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
+ .ecc = nand_ooblayout_ecc_sp,
+ .free = nand_ooblayout_free_sp,
+};
+
+const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void)
+{
+ return &nand_ooblayout_sp_ops;
+}
+EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout);
+
+static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+ if (section || !total_ecc_bytes)
+ return -ERANGE;
+
+ oobregion->length = total_ecc_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - total_ecc_bytes - 2;
+ oobregion->offset = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
+ .ecc = nand_ooblayout_ecc_lp,
+ .free = nand_ooblayout_free_lp,
+};
+
+const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void)
+{
+ return &nand_ooblayout_lp_ops;
+}
+EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout);
+
+/*
+ * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
+ * are placed at a fixed offset.
+ */
+static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+
+ if (section)
+ return -ERANGE;
+
+ switch (mtd->oobsize) {
+ case 64:
+ oobregion->offset = 40;
+ break;
+ case 128:
+ oobregion->offset = 80;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ oobregion->length = total_ecc_bytes;
+ if (oobregion->offset + oobregion->length > mtd->oobsize)
+ return -ERANGE;
+
+ return 0;
+}
+
+static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+ int ecc_offset = 0;
+
+ if (section < 0 || section > 1)
+ return -ERANGE;
+
+ switch (mtd->oobsize) {
+ case 64:
+ ecc_offset = 40;
+ break;
+ case 128:
+ ecc_offset = 80;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (section == 0) {
+ oobregion->offset = 2;
+ oobregion->length = ecc_offset - 2;
+ } else {
+ oobregion->offset = ecc_offset + total_ecc_bytes;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+ .ecc = nand_ooblayout_ecc_lp_hamming,
+ .free = nand_ooblayout_free_lp_hamming,
+};
+
+const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void)
+{
+ return &nand_ooblayout_lp_hamming_ops;
+}
+EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout);
+
+static enum nand_ecc_engine_type
+of_get_nand_ecc_engine_type(struct device_node *np)
+{
+ struct device_node *eng_np;
+
+ if (of_property_read_bool(np, "nand-no-ecc-engine"))
+ return NAND_ECC_ENGINE_TYPE_NONE;
+
+ if (of_property_read_bool(np, "nand-use-soft-ecc-engine"))
+ return NAND_ECC_ENGINE_TYPE_SOFT;
+
+ eng_np = of_parse_phandle(np, "nand-ecc-engine", 0);
+ of_node_put(eng_np);
+
+ if (eng_np) {
+ if (eng_np == np)
+ return NAND_ECC_ENGINE_TYPE_ON_DIE;
+ else
+ return NAND_ECC_ENGINE_TYPE_ON_HOST;
+ }
+
+ return NAND_ECC_ENGINE_TYPE_INVALID;
+}
+
+static const char * const nand_ecc_placement[] = {
+ [NAND_ECC_PLACEMENT_OOB] = "oob",
+ [NAND_ECC_PLACEMENT_INTERLEAVED] = "interleaved",
+};
+
+static enum nand_ecc_placement of_get_nand_ecc_placement(struct device_node *np)
+{
+ enum nand_ecc_placement placement;
+ const char *pm;
+ int err;
+
+ err = of_property_read_string(np, "nand-ecc-placement", &pm);
+ if (!err) {
+ for (placement = NAND_ECC_PLACEMENT_OOB;
+ placement < ARRAY_SIZE(nand_ecc_placement); placement++) {
+ if (!strcasecmp(pm, nand_ecc_placement[placement]))
+ return placement;
+ }
+ }
+
+ return NAND_ECC_PLACEMENT_UNKNOWN;
+}
+
+static const char * const nand_ecc_algos[] = {
+ [NAND_ECC_ALGO_HAMMING] = "hamming",
+ [NAND_ECC_ALGO_BCH] = "bch",
+ [NAND_ECC_ALGO_RS] = "rs",
+};
+
+static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
+{
+ enum nand_ecc_algo ecc_algo;
+ const char *pm;
+ int err;
+
+ err = of_property_read_string(np, "nand-ecc-algo", &pm);
+ if (!err) {
+ for (ecc_algo = NAND_ECC_ALGO_HAMMING;
+ ecc_algo < ARRAY_SIZE(nand_ecc_algos);
+ ecc_algo++) {
+ if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
+ return ecc_algo;
+ }
+ }
+
+ return NAND_ECC_ALGO_UNKNOWN;
+}
+
+static int of_get_nand_ecc_step_size(struct device_node *np)
+{
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
+ return ret ? ret : val;
+}
+
+static int of_get_nand_ecc_strength(struct device_node *np)
+{
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(np, "nand-ecc-strength", &val);
+ return ret ? ret : val;
+}
+
+void of_get_nand_ecc_user_config(struct nand_device *nand)
+{
+ struct device_node *dn = nanddev_get_of_node(nand);
+ int strength, size;
+
+ nand->ecc.user_conf.engine_type = of_get_nand_ecc_engine_type(dn);
+ nand->ecc.user_conf.algo = of_get_nand_ecc_algo(dn);
+ nand->ecc.user_conf.placement = of_get_nand_ecc_placement(dn);
+
+ strength = of_get_nand_ecc_strength(dn);
+ if (strength >= 0)
+ nand->ecc.user_conf.strength = strength;
+
+ size = of_get_nand_ecc_step_size(dn);
+ if (size >= 0)
+ nand->ecc.user_conf.step_size = size;
+
+ if (of_property_read_bool(dn, "nand-ecc-maximize"))
+ nand->ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
+}
+EXPORT_SYMBOL(of_get_nand_ecc_user_config);
+
+/**
+ * nand_ecc_is_strong_enough - Check if the chip configuration meets the
+ * datasheet requirements.
+ *
+ * @nand: Device to check
+ *
+ * If our configuration corrects A bits per B bytes and the minimum
+ * required correction level is X bits per Y bytes, then we must ensure
+ * both of the following are true:
+ *
+ * (1) A / B >= X / Y
+ * (2) A >= X
+ *
+ * Requirement (1) ensures we can correct for the required bitflip density.
+ * Requirement (2) ensures we can correct even when all bitflips are clumped
+ * in the same sector.
+ */
+bool nand_ecc_is_strong_enough(struct nand_device *nand)
+{
+ const struct nand_ecc_props *reqs = nanddev_get_ecc_requirements(nand);
+ const struct nand_ecc_props *conf = nanddev_get_ecc_conf(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int corr, ds_corr;
+
+ if (conf->step_size == 0 || reqs->step_size == 0)
+ /* Not enough information */
+ return true;
+
+ /*
+ * We get the number of corrected bits per page to compare
+ * the correction density.
+ */
+ corr = (mtd->writesize * conf->strength) / conf->step_size;
+ ds_corr = (mtd->writesize * reqs->strength) / reqs->step_size;
+
+ return corr >= ds_corr && conf->strength >= reqs->strength;
+}
+EXPORT_SYMBOL(nand_ecc_is_strong_enough);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Generic ECC engine");
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index ec18ade33262..188b8061e1f7 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -1052,16 +1052,11 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col
int thislen)
{
struct onenand_chip *this = mtd->priv;
- int ret;
this->read_bufferram(mtd, ONENAND_SPARERAM, this->oob_buf, 0,
mtd->oobsize);
- ret = mtd_ooblayout_get_databytes(mtd, buf, this->oob_buf,
- column, thislen);
- if (ret)
- return ret;
-
- return 0;
+ return mtd_ooblayout_get_databytes(mtd, buf, this->oob_buf,
+ column, thislen);
}
/**
diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c
index aa9368bf7a0c..d8c0bd002c2b 100644
--- a/drivers/mtd/nand/onenand/onenand_omap2.c
+++ b/drivers/mtd/nand/onenand/onenand_omap2.c
@@ -494,11 +494,8 @@ static int omap2_onenand_probe(struct platform_device *pdev)
c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
if (IS_ERR(c->int_gpiod)) {
- r = PTR_ERR(c->int_gpiod);
/* Just try again if this happens */
- if (r != -EPROBE_DEFER)
- dev_err(dev, "error getting gpio: %d\n", r);
- return r;
+ return dev_err_probe(dev, PTR_ERR(c->int_gpiod), "error getting gpio\n");
}
if (c->int_gpiod) {
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 1203775023ad..6c46f25b57e2 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -13,6 +13,7 @@ config MTD_NAND_ECC_SW_HAMMING_SMC
menuconfig MTD_RAW_NAND
tristate "Raw/Parallel NAND Device Support"
select MTD_NAND_CORE
+ select MTD_NAND_ECC
select MTD_NAND_ECC_SW_HAMMING
help
This enables support for accessing all type of raw/parallel
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index fdba155416d2..d3c5cc513c8f 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -260,8 +260,8 @@ static int gpio_nand_probe(struct platform_device *pdev)
return err;
}
- this->ecc.mode = NAND_ECC_SOFT;
- this->ecc.algo = NAND_ECC_HAMMING;
+ this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ this->ecc.algo = NAND_ECC_ALGO_HAMMING;
platform_set_drvdata(pdev, priv);
@@ -400,12 +400,14 @@ static int gpio_nand_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id gpio_nand_of_id_table[] = {
{
/* sentinel */
},
};
MODULE_DEVICE_TABLE(of, gpio_nand_of_id_table);
+#endif
static const struct platform_device_id gpio_nand_plat_id_table[] = {
{
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 12c643e97c85..fbb4ea751be8 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -980,10 +980,10 @@ static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
return -EINVAL;
}
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
ecc->steps = mtd->writesize / ecc->size;
- ecc->algo = NAND_ECC_BCH;
+ ecc->algo = NAND_ECC_ALGO_BCH;
anand->ecc_bits = bch_gf_mag * ecc->strength;
ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8);
anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8);
@@ -1056,17 +1056,17 @@ static int anfc_attach_chip(struct nand_chip *chip)
chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
- switch (chip->ecc.mode) {
- case NAND_ECC_NONE:
- case NAND_ECC_SOFT:
- case NAND_ECC_ON_DIE:
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
break;
- case NAND_ECC_HW:
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = anfc_init_hw_ecc_controller(nfc, chip);
break;
default:
dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
- chip->ecc.mode);
+ chip->ecc.engine_type);
return -EINVAL;
}
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index c9818f548d07..e6ceec8f50dc 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -202,6 +202,8 @@ struct atmel_nand_controller_ops {
int (*ecc_init)(struct nand_chip *chip);
int (*setup_interface)(struct atmel_nand *nand, int csline,
const struct nand_interface_config *conf);
+ int (*exec_op)(struct atmel_nand *nand,
+ const struct nand_operation *op, bool check_only);
};
struct atmel_nand_controller_caps {
@@ -259,6 +261,7 @@ struct atmel_hsmc_nand_controller {
struct regmap *io;
struct atmel_nfc_op op;
struct completion complete;
+ u32 cfg;
int irq;
/* Only used when instantiating from legacy DT bindings. */
@@ -414,29 +417,62 @@ err:
return -EIO;
}
-static u8 atmel_nand_read_byte(struct nand_chip *chip)
+static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
{
- struct atmel_nand *nand = to_atmel_nand(chip);
+ u8 *addrs = nc->op.addrs;
+ unsigned int op = 0;
+ u32 addr, val;
+ int i, ret;
- return ioread8(nand->activecs->io.virt);
-}
+ nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
-static void atmel_nand_write_byte(struct nand_chip *chip, u8 byte)
-{
- struct atmel_nand *nand = to_atmel_nand(chip);
+ for (i = 0; i < nc->op.ncmds; i++)
+ op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
- if (chip->options & NAND_BUSWIDTH_16)
- iowrite16(byte | (byte << 8), nand->activecs->io.virt);
- else
- iowrite8(byte, nand->activecs->io.virt);
+ if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
+
+ op |= ATMEL_NFC_CSID(nc->op.cs) |
+ ATMEL_NFC_ACYCLE(nc->op.naddrs);
+
+ if (nc->op.ncmds > 1)
+ op |= ATMEL_NFC_VCMD2;
+
+ addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
+ (addrs[3] << 24);
+
+ if (nc->op.data != ATMEL_NFC_NO_DATA) {
+ op |= ATMEL_NFC_DATAEN;
+ nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
+
+ if (nc->op.data == ATMEL_NFC_WRITE_DATA)
+ op |= ATMEL_NFC_NFCWR;
+ }
+
+ /* Clear all flags. */
+ regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
+
+ /* Send the command. */
+ regmap_write(nc->io, op, addr);
+
+ ret = atmel_nfc_wait(nc, poll, 0);
+ if (ret)
+ dev_err(nc->base.dev,
+ "Failed to send NAND command (err = %d)!",
+ ret);
+
+ /* Reset the op state. */
+ memset(&nc->op, 0, sizeof(nc->op));
+
+ return ret;
}
-static void atmel_nand_read_buf(struct nand_chip *chip, u8 *buf, int len)
+static void atmel_nand_data_in(struct atmel_nand *nand, void *buf,
+ unsigned int len, bool force_8bit)
{
- struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_nand_controller *nc;
- nc = to_nand_controller(chip->controller);
+ nc = to_nand_controller(nand->base.controller);
/*
* If the controller supports DMA, the buffer address is DMA-able and
@@ -444,23 +480,23 @@ static void atmel_nand_read_buf(struct nand_chip *chip, u8 *buf, int len)
* a DMA transfer. If it fails, fallback to PIO mode.
*/
if (nc->dmac && virt_addr_valid(buf) &&
- len >= MIN_DMA_LEN &&
+ len >= MIN_DMA_LEN && !force_8bit &&
!atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
DMA_FROM_DEVICE))
return;
- if (chip->options & NAND_BUSWIDTH_16)
+ if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
ioread16_rep(nand->activecs->io.virt, buf, len / 2);
else
ioread8_rep(nand->activecs->io.virt, buf, len);
}
-static void atmel_nand_write_buf(struct nand_chip *chip, const u8 *buf, int len)
+static void atmel_nand_data_out(struct atmel_nand *nand, const void *buf,
+ unsigned int len, bool force_8bit)
{
- struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_nand_controller *nc;
- nc = to_nand_controller(chip->controller);
+ nc = to_nand_controller(nand->base.controller);
/*
* If the controller supports DMA, the buffer address is DMA-able and
@@ -468,179 +504,213 @@ static void atmel_nand_write_buf(struct nand_chip *chip, const u8 *buf, int len)
* a DMA transfer. If it fails, fallback to PIO mode.
*/
if (nc->dmac && virt_addr_valid(buf) &&
- len >= MIN_DMA_LEN &&
+ len >= MIN_DMA_LEN && !force_8bit &&
!atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
len, DMA_TO_DEVICE))
return;
- if (chip->options & NAND_BUSWIDTH_16)
+ if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
else
iowrite8_rep(nand->activecs->io.virt, buf, len);
}
-static int atmel_nand_dev_ready(struct nand_chip *chip)
+static int atmel_nand_waitrdy(struct atmel_nand *nand, unsigned int timeout_ms)
{
- struct atmel_nand *nand = to_atmel_nand(chip);
+ if (nand->activecs->rb.type == ATMEL_NAND_NO_RB)
+ return nand_soft_waitrdy(&nand->base, timeout_ms);
- return gpiod_get_value(nand->activecs->rb.gpio);
+ return nand_gpio_waitrdy(&nand->base, nand->activecs->rb.gpio,
+ timeout_ms);
}
-static void atmel_nand_select_chip(struct nand_chip *chip, int cs)
+static int atmel_hsmc_nand_waitrdy(struct atmel_nand *nand,
+ unsigned int timeout_ms)
{
- struct atmel_nand *nand = to_atmel_nand(chip);
-
- if (cs < 0 || cs >= nand->numcs) {
- nand->activecs = NULL;
- chip->legacy.dev_ready = NULL;
- return;
- }
+ struct atmel_hsmc_nand_controller *nc;
+ u32 status, mask;
- nand->activecs = &nand->cs[cs];
+ if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
+ return atmel_nand_waitrdy(nand, timeout_ms);
- if (nand->activecs->rb.type == ATMEL_NAND_GPIO_RB)
- chip->legacy.dev_ready = atmel_nand_dev_ready;
+ nc = to_hsmc_nand_controller(nand->base.controller);
+ mask = ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
+ return regmap_read_poll_timeout_atomic(nc->base.smc, ATMEL_HSMC_NFC_SR,
+ status, status & mask,
+ 10, timeout_ms * 1000);
}
-static int atmel_hsmc_nand_dev_ready(struct nand_chip *chip)
+static void atmel_nand_select_target(struct atmel_nand *nand,
+ unsigned int cs)
{
- struct atmel_nand *nand = to_atmel_nand(chip);
- struct atmel_hsmc_nand_controller *nc;
- u32 status;
-
- nc = to_hsmc_nand_controller(chip->controller);
-
- regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &status);
-
- return status & ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
+ nand->activecs = &nand->cs[cs];
}
-static void atmel_hsmc_nand_select_chip(struct nand_chip *chip, int cs)
+static void atmel_hsmc_nand_select_target(struct atmel_nand *nand,
+ unsigned int cs)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct atmel_nand *nand = to_atmel_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(&nand->base);
struct atmel_hsmc_nand_controller *nc;
+ u32 cfg = ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
+ ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
+ ATMEL_HSMC_NFC_CFG_RSPARE;
- nc = to_hsmc_nand_controller(chip->controller);
-
- atmel_nand_select_chip(chip, cs);
-
- if (!nand->activecs) {
- regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
- ATMEL_HSMC_NFC_CTRL_DIS);
+ nand->activecs = &nand->cs[cs];
+ nc = to_hsmc_nand_controller(nand->base.controller);
+ if (nc->cfg == cfg)
return;
- }
-
- if (nand->activecs->rb.type == ATMEL_NAND_NATIVE_RB)
- chip->legacy.dev_ready = atmel_hsmc_nand_dev_ready;
regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
ATMEL_HSMC_NFC_CFG_RSPARE |
ATMEL_HSMC_NFC_CFG_WSPARE,
- ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
- ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
- ATMEL_HSMC_NFC_CFG_RSPARE);
- regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
- ATMEL_HSMC_NFC_CTRL_EN);
+ cfg);
+ nc->cfg = cfg;
}
-static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
+static int atmel_smc_nand_exec_instr(struct atmel_nand *nand,
+ const struct nand_op_instr *instr)
{
- u8 *addrs = nc->op.addrs;
- unsigned int op = 0;
- u32 addr, val;
- int i, ret;
-
- nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
-
- for (i = 0; i < nc->op.ncmds; i++)
- op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
-
- if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
- regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
-
- op |= ATMEL_NFC_CSID(nc->op.cs) |
- ATMEL_NFC_ACYCLE(nc->op.naddrs);
-
- if (nc->op.ncmds > 1)
- op |= ATMEL_NFC_VCMD2;
-
- addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
- (addrs[3] << 24);
-
- if (nc->op.data != ATMEL_NFC_NO_DATA) {
- op |= ATMEL_NFC_DATAEN;
- nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
+ struct atmel_nand_controller *nc;
+ unsigned int i;
- if (nc->op.data == ATMEL_NFC_WRITE_DATA)
- op |= ATMEL_NFC_NFCWR;
+ nc = to_nand_controller(nand->base.controller);
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb(instr->ctx.cmd.opcode,
+ nand->activecs->io.virt + nc->caps->cle_offs);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb(instr->ctx.addr.addrs[i],
+ nand->activecs->io.virt + nc->caps->ale_offs);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ atmel_nand_data_in(nand, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ atmel_nand_data_out(nand, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ return atmel_nand_waitrdy(nand,
+ instr->ctx.waitrdy.timeout_ms);
+ default:
+ break;
}
- /* Clear all flags. */
- regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
+ return -EINVAL;
+}
- /* Send the command. */
- regmap_write(nc->io, op, addr);
+static int atmel_smc_nand_exec_op(struct atmel_nand *nand,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ unsigned int i;
+ int ret = 0;
- ret = atmel_nfc_wait(nc, poll, 0);
- if (ret)
- dev_err(nc->base.dev,
- "Failed to send NAND command (err = %d)!",
- ret);
+ if (check_only)
+ return 0;
- /* Reset the op state. */
- memset(&nc->op, 0, sizeof(nc->op));
+ atmel_nand_select_target(nand, op->cs);
+ gpiod_set_value(nand->activecs->csgpio, 0);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = atmel_smc_nand_exec_instr(nand, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+ gpiod_set_value(nand->activecs->csgpio, 1);
return ret;
}
-static void atmel_hsmc_nand_cmd_ctrl(struct nand_chip *chip, int dat,
- unsigned int ctrl)
+static int atmel_hsmc_exec_cmd_addr(struct nand_chip *chip,
+ const struct nand_subop *subop)
{
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_hsmc_nand_controller *nc;
+ unsigned int i, j;
nc = to_hsmc_nand_controller(chip->controller);
- if (ctrl & NAND_ALE) {
- if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
- return;
+ nc->op.cs = nand->activecs->id;
+ for (i = 0; i < subop->ninstrs; i++) {
+ const struct nand_op_instr *instr = &subop->instrs[i];
- nc->op.addrs[nc->op.naddrs++] = dat;
- } else if (ctrl & NAND_CLE) {
- if (nc->op.ncmds > 1)
- return;
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ nc->op.cmds[nc->op.ncmds++] = instr->ctx.cmd.opcode;
+ continue;
+ }
- nc->op.cmds[nc->op.ncmds++] = dat;
+ for (j = nand_subop_get_addr_start_off(subop, i);
+ j < nand_subop_get_num_addr_cyc(subop, i); j++) {
+ nc->op.addrs[nc->op.naddrs] = instr->ctx.addr.addrs[j];
+ nc->op.naddrs++;
+ }
}
- if (dat == NAND_CMD_NONE) {
- nc->op.cs = nand->activecs->id;
- atmel_nfc_exec_op(nc, true);
- }
+ return atmel_nfc_exec_op(nc, true);
}
-static void atmel_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
- unsigned int ctrl)
+static int atmel_hsmc_exec_rw(struct nand_chip *chip,
+ const struct nand_subop *subop)
{
+ const struct nand_op_instr *instr = subop->instrs;
struct atmel_nand *nand = to_atmel_nand(chip);
- struct atmel_nand_controller *nc;
- nc = to_nand_controller(chip->controller);
+ if (instr->type == NAND_OP_DATA_IN_INSTR)
+ atmel_nand_data_in(nand, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ else
+ atmel_nand_data_out(nand, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
- if ((ctrl & NAND_CTRL_CHANGE) && nand->activecs->csgpio) {
- if (ctrl & NAND_NCE)
- gpiod_set_value(nand->activecs->csgpio, 0);
- else
- gpiod_set_value(nand->activecs->csgpio, 1);
- }
+ return 0;
+}
+
+static int atmel_hsmc_exec_waitrdy(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr = subop->instrs;
+ struct atmel_nand *nand = to_atmel_nand(chip);
- if (ctrl & NAND_ALE)
- writeb(cmd, nand->activecs->io.virt + nc->caps->ale_offs);
- else if (ctrl & NAND_CLE)
- writeb(cmd, nand->activecs->io.virt + nc->caps->cle_offs);
+ return atmel_hsmc_nand_waitrdy(nand, instr->ctx.waitrdy.timeout_ms);
+}
+
+static const struct nand_op_parser atmel_hsmc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_cmd_addr,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0)),
+ NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_waitrdy,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+);
+
+static int atmel_hsmc_nand_exec_op(struct atmel_nand *nand,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int ret;
+
+ if (check_only)
+ return nand_op_parser_exec_op(&nand->base,
+ &atmel_hsmc_op_parser, op, true);
+
+ atmel_hsmc_nand_select_target(nand, op->cs);
+ ret = nand_op_parser_exec_op(&nand->base, &atmel_hsmc_op_parser, op,
+ false);
+
+ return ret;
}
static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
@@ -838,7 +908,7 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
if (ret)
return ret;
- atmel_nand_write_buf(chip, buf, mtd->writesize);
+ nand_write_data_op(chip, buf, mtd->writesize, false);
ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
if (ret) {
@@ -848,7 +918,7 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
atmel_nand_pmecc_disable(chip, raw);
- atmel_nand_write_buf(chip, chip->oob_poi, mtd->oobsize);
+ nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
return nand_prog_page_end_op(chip);
}
@@ -878,11 +948,17 @@ static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
if (ret)
return ret;
- atmel_nand_read_buf(chip, buf, mtd->writesize);
- atmel_nand_read_buf(chip, chip->oob_poi, mtd->oobsize);
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false, false);
+ if (ret)
+ goto out_disable;
+
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, false);
+ if (ret)
+ goto out_disable;
ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
+out_disable:
atmel_nand_pmecc_disable(chip, raw);
return ret;
@@ -907,8 +983,9 @@ static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_hsmc_nand_controller *nc;
- int ret, status;
+ int ret;
+ atmel_hsmc_nand_select_target(nand, chip->cur_cs);
nc = to_hsmc_nand_controller(chip->controller);
atmel_nfc_copy_to_sram(chip, buf, false);
@@ -939,21 +1016,9 @@ static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
if (ret)
return ret;
- atmel_nand_write_buf(chip, chip->oob_poi, mtd->oobsize);
-
- nc->op.cmds[0] = NAND_CMD_PAGEPROG;
- nc->op.ncmds = 1;
- nc->op.cs = nand->activecs->id;
- ret = atmel_nfc_exec_op(nc, false);
- if (ret)
- dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n",
- ret);
-
- status = chip->legacy.waitfunc(chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
+ nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
- return ret;
+ return nand_prog_page_end_op(chip);
}
static int atmel_hsmc_nand_pmecc_write_page(struct nand_chip *chip,
@@ -981,6 +1046,7 @@ static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
struct atmel_hsmc_nand_controller *nc;
int ret;
+ atmel_hsmc_nand_select_target(nand, chip->cur_cs);
nc = to_hsmc_nand_controller(chip->controller);
/*
@@ -988,12 +1054,9 @@ static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
* connected to a native SoC R/B pin. If that's not the case, fallback
* to the non-optimized one.
*/
- if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
- nand_read_page_op(chip, page, 0, NULL, 0);
-
+ if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
raw);
- }
nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
@@ -1043,7 +1106,10 @@ static int atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip *chip,
static int atmel_nand_pmecc_init(struct nand_chip *chip)
{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
struct atmel_nand *nand = to_atmel_nand(chip);
struct atmel_nand_controller *nc;
struct atmel_pmecc_user_req req;
@@ -1068,19 +1134,19 @@ static int atmel_nand_pmecc_init(struct nand_chip *chip)
chip->ecc.size = val;
}
- if (chip->ecc.options & NAND_ECC_MAXIMIZE)
+ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
else if (chip->ecc.strength)
req.ecc.strength = chip->ecc.strength;
- else if (chip->base.eccreq.strength)
- req.ecc.strength = chip->base.eccreq.strength;
+ else if (requirements->strength)
+ req.ecc.strength = requirements->strength;
else
req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
if (chip->ecc.size)
req.ecc.sectorsize = chip->ecc.size;
- else if (chip->base.eccreq.step_size)
- req.ecc.sectorsize = chip->base.eccreq.step_size;
+ else if (requirements->step_size)
+ req.ecc.sectorsize = requirements->step_size;
else
req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
@@ -1099,14 +1165,14 @@ static int atmel_nand_pmecc_init(struct nand_chip *chip)
if (IS_ERR(nand->pmecc))
return PTR_ERR(nand->pmecc);
- chip->ecc.algo = NAND_ECC_BCH;
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
chip->ecc.size = req.ecc.sectorsize;
chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
chip->ecc.strength = req.ecc.strength;
chip->options |= NAND_NO_SUBPAGE_WRITE;
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
return 0;
}
@@ -1118,15 +1184,15 @@ static int atmel_nand_ecc_init(struct nand_chip *chip)
nc = to_nand_controller(chip->controller);
- switch (chip->ecc.mode) {
- case NAND_ECC_NONE:
- case NAND_ECC_SOFT:
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
/*
* Nothing to do, the core will initialize everything for us.
*/
break;
- case NAND_ECC_HW:
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = atmel_nand_pmecc_init(chip);
if (ret)
return ret;
@@ -1140,7 +1206,7 @@ static int atmel_nand_ecc_init(struct nand_chip *chip)
default:
/* Other modes are not supported. */
dev_err(nc->dev, "Unsupported ECC mode: %d\n",
- chip->ecc.mode);
+ chip->ecc.engine_type);
return -ENOTSUPP;
}
@@ -1155,7 +1221,7 @@ static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip)
if (ret)
return ret;
- if (chip->ecc.mode != NAND_ECC_HW)
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
/* Adjust the ECC operations for the HSMC IP. */
@@ -1467,6 +1533,18 @@ static int atmel_nand_setup_interface(struct nand_chip *chip, int csline,
return nc->caps->ops->setup_interface(nand, csline, conf);
}
+static int atmel_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct atmel_nand *nand = to_atmel_nand(chip);
+ struct atmel_nand_controller *nc;
+
+ nc = to_nand_controller(nand->base.controller);
+
+ return nc->caps->ops->exec_op(nand, op, check_only);
+}
+
static void atmel_nand_init(struct atmel_nand_controller *nc,
struct atmel_nand *nand)
{
@@ -1476,19 +1554,9 @@ static void atmel_nand_init(struct atmel_nand_controller *nc,
mtd->dev.parent = nc->dev;
nand->base.controller = &nc->base;
- chip->legacy.cmd_ctrl = atmel_nand_cmd_ctrl;
- chip->legacy.read_byte = atmel_nand_read_byte;
- chip->legacy.write_byte = atmel_nand_write_byte;
- chip->legacy.read_buf = atmel_nand_read_buf;
- chip->legacy.write_buf = atmel_nand_write_buf;
- chip->legacy.select_chip = atmel_nand_select_chip;
-
if (!nc->mck || !nc->caps->ops->setup_interface)
chip->options |= NAND_KEEP_TIMINGS;
- /* Some NANDs require a longer delay than the default one (20us). */
- chip->legacy.chip_delay = 40;
-
/*
* Use a bounce buffer when the buffer passed by the MTD user is not
* suitable for DMA.
@@ -1498,7 +1566,7 @@ static void atmel_nand_init(struct atmel_nand_controller *nc,
/* Default to HW ECC if pmecc is available. */
if (nc->pmecc)
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
}
static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
@@ -1527,18 +1595,6 @@ static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
smc_nc->ebi_csa->nfd0_on_d16);
}
-static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
- struct atmel_nand *nand)
-{
- struct nand_chip *chip = &nand->base;
-
- atmel_nand_init(nc, nand);
-
- /* Overload some methods for the HSMC controller. */
- chip->legacy.cmd_ctrl = atmel_hsmc_nand_cmd_ctrl;
- chip->legacy.select_chip = atmel_hsmc_nand_select_chip;
-}
-
static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
{
struct nand_chip *chip = &nand->base;
@@ -1957,6 +2013,7 @@ static int atmel_nand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops atmel_nand_controller_ops = {
.attach_chip = atmel_nand_attach_chip,
.setup_interface = atmel_nand_setup_interface,
+ .exec_op = atmel_nand_exec_op,
};
static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
@@ -1976,13 +2033,9 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
platform_set_drvdata(pdev, nc);
nc->pmecc = devm_atmel_pmecc_get(dev);
- if (IS_ERR(nc->pmecc)) {
- ret = PTR_ERR(nc->pmecc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Could not get PMECC object (err = %d)\n",
- ret);
- return ret;
- }
+ if (IS_ERR(nc->pmecc))
+ return dev_err_probe(dev, PTR_ERR(nc->pmecc),
+ "Could not get PMECC object\n");
if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
dma_cap_mask_t mask;
@@ -2248,6 +2301,9 @@ atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
return ret;
hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
+ regmap_write(hsmc_nc->base.smc, ATMEL_HSMC_NFC_CTRL,
+ ATMEL_HSMC_NFC_CTRL_DIS);
+
if (hsmc_nc->sram.pool)
gen_pool_free(hsmc_nc->sram.pool,
(unsigned long)hsmc_nc->sram.virt,
@@ -2300,6 +2356,8 @@ static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
/* Initial NFC configuration. */
regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
ATMEL_HSMC_NFC_CFG_DTO_MAX);
+ regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
+ ATMEL_HSMC_NFC_CTRL_EN);
ret = atmel_nand_controller_add_nands(&nc->base);
if (ret)
@@ -2317,8 +2375,9 @@ static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
.probe = atmel_hsmc_nand_controller_probe,
.remove = atmel_hsmc_nand_controller_remove,
.ecc_init = atmel_hsmc_nand_ecc_init,
- .nand_init = atmel_hsmc_nand_init,
+ .nand_init = atmel_nand_init,
.setup_interface = atmel_hsmc_nand_setup_interface,
+ .exec_op = atmel_hsmc_nand_exec_op,
};
static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
@@ -2385,6 +2444,7 @@ static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
.remove = atmel_smc_nand_controller_remove,
.ecc_init = atmel_nand_ecc_init,
.nand_init = atmel_smc_nand_init,
+ .exec_op = atmel_smc_nand_exec_op,
};
static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
@@ -2400,6 +2460,7 @@ static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
.ecc_init = atmel_nand_ecc_init,
.nand_init = atmel_smc_nand_init,
.setup_interface = atmel_smc_nand_setup_interface,
+ .exec_op = atmel_smc_nand_exec_op,
};
static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index d865200ccd08..79b057400fe9 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -294,8 +294,8 @@ static int au1550nd_probe(struct platform_device *pdev)
nand_controller_init(&ctx->controller);
ctx->controller.ops = &au1550nd_ops;
this->controller = &ctx->controller;
- this->ecc.mode = NAND_ECC_SOFT;
- this->ecc.algo = NAND_ECC_HAMMING;
+ this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ this->ecc.algo = NAND_ECC_ALGO_HAMMING;
if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 591775173034..8bb17c5a66c3 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -391,7 +391,8 @@ int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
nand_chip->legacy.chip_delay = 50;
b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
- b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
+ /* TODO: implement ECC */
+ b47n->nand_chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_NONE;
/* Enable NAND flash access */
bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index a4033d32a710..2da39ab89286 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2532,6 +2532,8 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
{
struct mtd_info *mtd = nand_to_mtd(&host->chip);
struct nand_chip *chip = &host->chip;
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
struct brcmnand_controller *ctrl = host->ctrl;
struct brcmnand_cfg *cfg = &host->hwcfg;
char msg[128];
@@ -2565,34 +2567,34 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
cfg->col_adr_bytes = 2;
cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
- if (chip->ecc.mode != NAND_ECC_HW) {
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
- chip->ecc.mode);
+ chip->ecc.engine_type);
return -EINVAL;
}
- if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
if (chip->ecc.strength == 1 && chip->ecc.size == 512)
/* Default to Hamming for 1-bit ECC, if unspecified */
- chip->ecc.algo = NAND_ECC_HAMMING;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
else
/* Otherwise, BCH */
- chip->ecc.algo = NAND_ECC_BCH;
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
}
- if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 ||
- chip->ecc.size != 512)) {
+ if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING &&
+ (chip->ecc.strength != 1 || chip->ecc.size != 512)) {
dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
chip->ecc.strength, chip->ecc.size);
return -EINVAL;
}
- if (chip->ecc.mode != NAND_ECC_NONE &&
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
(!chip->ecc.size || !chip->ecc.strength)) {
- if (chip->base.eccreq.step_size && chip->base.eccreq.strength) {
+ if (requirements->step_size && requirements->strength) {
/* use detected ECC parameters */
- chip->ecc.size = chip->base.eccreq.step_size;
- chip->ecc.strength = chip->base.eccreq.strength;
+ chip->ecc.size = requirements->step_size;
+ chip->ecc.strength = requirements->strength;
dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
chip->ecc.size, chip->ecc.strength);
}
@@ -2600,7 +2602,7 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
switch (chip->ecc.size) {
case 512:
- if (chip->ecc.algo == NAND_ECC_HAMMING)
+ if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
cfg->ecc_level = 15;
else
cfg->ecc_level = chip->ecc.strength;
@@ -2728,7 +2730,7 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
chip->legacy.read_buf = brcmnand_read_buf;
chip->legacy.write_buf = brcmnand_write_buf;
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.read_page = brcmnand_read_page;
chip->ecc.write_page = brcmnand_write_page;
chip->ecc.read_page_raw = brcmnand_read_page_raw;
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 71516af85f23..b46786cd53e0 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -2611,7 +2611,7 @@ static int cadence_nand_attach_chip(struct nand_chip *chip)
chip->bbt_options |= NAND_BBT_USE_FLASH;
chip->bbt_options |= NAND_BBT_NO_OOB;
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->options |= NAND_NO_SUBPAGE_WRITE;
@@ -2757,7 +2757,7 @@ static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
* Default to HW ECC engine mode. If the nand-ecc-mode property is given
* in the DT node, this entry will be overwritten in nand_scan_ident().
*/
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
ret = nand_scan(chip, cdns_chip->nsels);
if (ret) {
@@ -2980,18 +2980,14 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
- if (IS_ERR(cdns_ctrl->reg)) {
- dev_err(&ofdev->dev, "devm_ioremap_resource res 0 failed\n");
+ if (IS_ERR(cdns_ctrl->reg))
return PTR_ERR(cdns_ctrl->reg);
- }
res = platform_get_resource(ofdev, IORESOURCE_MEM, 1);
cdns_ctrl->io.dma = res->start;
cdns_ctrl->io.virt = devm_ioremap_resource(&ofdev->dev, res);
- if (IS_ERR(cdns_ctrl->io.virt)) {
- dev_err(cdns_ctrl->dev, "devm_ioremap_resource res 1 failed\n");
+ if (IS_ERR(cdns_ctrl->io.virt))
return PTR_ERR(cdns_ctrl->io.virt);
- }
dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
if (IS_ERR(dt->clk))
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index 92173790f20b..2b94f385a1a8 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -629,7 +629,8 @@ static int cafe_nand_attach_chip(struct nand_chip *chip)
goto out_free_dma;
}
- cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+ cafe->nand.ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ cafe->nand.ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
cafe->nand.ecc.size = mtd->writesize;
cafe->nand.ecc.bytes = 14;
cafe->nand.ecc.strength = 4;
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index 9472bf798ed5..b7f3f6347761 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -286,7 +286,7 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
goto out_mtd;
}
- this->ecc.mode = NAND_ECC_HW;
+ this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
this->ecc.size = 256;
this->ecc.bytes = 3;
this->ecc.hwctl = cs_enable_hwecc;
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index d975a62caaa5..427f320fb79b 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -168,7 +168,7 @@ static int nand_davinci_correct_1bit(struct nand_chip *chip, u_char *dat,
/*
* 4-bit hardware ECC ... context maintained over entire AEMIF
*
- * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
+ * This is a syndrome engine, but we avoid NAND_ECC_PLACEMENT_INTERLEAVED
* since that forces use of a problematic "infix OOB" layout.
* Among other things, it trashes manufacturer bad block markers.
* Also, and specific to this hardware, it ECC-protects the "prepad"
@@ -530,11 +530,11 @@ static struct davinci_nand_pdata
if (!of_property_read_string(pdev->dev.of_node,
"ti,davinci-ecc-mode", &mode)) {
if (!strncmp("none", mode, 4))
- pdata->ecc_mode = NAND_ECC_NONE;
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_NONE;
if (!strncmp("soft", mode, 4))
- pdata->ecc_mode = NAND_ECC_SOFT;
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
if (!strncmp("hw", mode, 2))
- pdata->ecc_mode = NAND_ECC_HW;
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
}
if (!of_property_read_u32(pdev->dev.of_node,
"ti,davinci-ecc-bits", &prop))
@@ -585,21 +585,21 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- switch (info->chip.ecc.mode) {
- case NAND_ECC_NONE:
+ switch (info->chip.ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
pdata->ecc_bits = 0;
break;
- case NAND_ECC_SOFT:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
pdata->ecc_bits = 0;
/*
- * This driver expects Hamming based ECC when ecc_mode is set
- * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
- * avoid adding an extra ->ecc_algo field to
- * davinci_nand_pdata.
+ * This driver expects Hamming based ECC when engine_type is set
+ * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
+ * NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo
+ * field to davinci_nand_pdata.
*/
- info->chip.ecc.algo = NAND_ECC_HAMMING;
+ info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
break;
- case NAND_ECC_HW:
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
if (pdata->ecc_bits == 4) {
int chunks = mtd->writesize / 512;
@@ -629,7 +629,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
info->chip.ecc.bytes = 10;
info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
- info->chip.ecc.algo = NAND_ECC_BCH;
+ info->chip.ecc.algo = NAND_ECC_ALGO_BCH;
/*
* Update ECC layout if needed ... for 1-bit HW ECC, the
@@ -645,7 +645,8 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd,
&hwecc4_small_ooblayout_ops);
} else if (chunks == 4 || chunks == 8) {
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ mtd_set_ooblayout(mtd,
+ nand_get_large_page_ooblayout());
info->chip.ecc.read_page = nand_davinci_read_page_hwecc_oob_first;
} else {
return -EIO;
@@ -656,7 +657,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
info->chip.ecc.correct = nand_davinci_correct_1bit;
info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
info->chip.ecc.bytes = 3;
- info->chip.ecc.algo = NAND_ECC_HAMMING;
+ info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
}
info->chip.ecc.size = 512;
info->chip.ecc.strength = pdata->ecc_bits;
@@ -850,7 +851,8 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Use board-specific ECC config */
- info->chip.ecc.mode = pdata->ecc_mode;
+ info->chip.ecc.engine_type = pdata->engine_type;
+ info->chip.ecc.placement = pdata->ecc_placement;
spin_lock_irq(&davinci_nand_lock);
@@ -897,7 +899,7 @@ static int nand_davinci_remove(struct platform_device *pdev)
int ret;
spin_lock_irq(&davinci_nand_lock);
- if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+ if (info->chip.ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 9d99dade95ce..fa2439cb4daa 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1237,7 +1237,8 @@ int denali_chip_init(struct denali_controller *denali,
chip->bbt_options |= NAND_BBT_USE_FLASH;
chip->bbt_options |= NAND_BBT_NO_OOB;
chip->options |= NAND_NO_SUBPAGE_WRITE;
- chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
chip->ecc.read_page = denali_read_page;
chip->ecc.write_page = denali_write_page;
chip->ecc.read_page_raw = denali_read_page_raw;
diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c
index 2f77ee55e1bf..20c085a30adc 100644
--- a/drivers/mtd/nand/raw/denali_pci.c
+++ b/drivers/mtd/nand/raw/denali_pci.c
@@ -100,7 +100,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto out_remove_denali;
}
- dchip->chip.ecc.options |= NAND_ECC_MAXIMIZE;
+ dchip->chip.base.ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
dchip->nsels = nsels;
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 43721863a0d8..94432a453e5e 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1456,7 +1456,8 @@ static int __init doc_probe(unsigned long physadr)
nand->ecc.calculate = doc200x_calculate_ecc;
nand->ecc.correct = doc200x_correct_data;
- nand->ecc.mode = NAND_ECC_HW_SYNDROME;
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ nand->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
nand->ecc.size = 512;
nand->ecc.bytes = 6;
nand->ecc.strength = 2;
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index 088692b2e27a..b2af7f81fdf8 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -244,7 +244,7 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
return -EIO;
}
- if (chip->ecc.mode != NAND_ECC_HW)
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
elbc_fcm_ctrl->max_bitflips = 0;
@@ -727,12 +727,12 @@ static int fsl_elbc_attach_chip(struct nand_chip *chip)
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
unsigned int al;
- switch (chip->ecc.mode) {
+ switch (chip->ecc.engine_type) {
/*
* if ECC was not chosen in DT, decide whether to use HW or SW ECC from
* CS Base Register
*/
- case NAND_ECC_NONE:
+ case NAND_ECC_ENGINE_TYPE_NONE:
/* If CS Base Register selects full hardware ECC then use it */
if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
BR_DECC_CHK_GEN) {
@@ -740,23 +740,23 @@ static int fsl_elbc_attach_chip(struct nand_chip *chip)
chip->ecc.write_page = fsl_elbc_write_page;
chip->ecc.write_subpage = fsl_elbc_write_subpage;
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
chip->ecc.size = 512;
chip->ecc.bytes = 3;
chip->ecc.strength = 1;
} else {
/* otherwise fall back to default software ECC */
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
}
break;
/* if SW ECC was chosen in DT, we do not need to set anything here */
- case NAND_ECC_SOFT:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
break;
- /* should we also implement NAND_ECC_HW to do as the code above? */
+ /* should we also implement *_ECC_ENGINE_CONTROLLER to do as above? */
default:
return -EINVAL;
}
@@ -786,8 +786,8 @@ static int fsl_elbc_attach_chip(struct nand_chip *chip)
chip->page_shift);
dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
chip->phys_erase_shift);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
- chip->ecc.mode);
+ dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.engine_type = %d\n",
+ chip->ecc.engine_type);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
chip->ecc.steps);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 00ae7a910b03..e345f9d9f8e8 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -309,7 +309,7 @@ static void fsl_ifc_cmdfunc(struct nand_chip *chip, unsigned int command,
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
ifc_nand_ctrl->index += column;
- if (chip->ecc.mode == NAND_ECC_HW)
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
ifc_nand_ctrl->eccread = 1;
fsl_ifc_do_read(chip, 0, mtd);
@@ -707,6 +707,30 @@ static int fsl_ifc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+ u32 csor;
+
+ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
+
+ /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
+ if (csor & CSOR_NAND_ECC_DEC_EN) {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
+
+ /* Hardware generates ECC per 512 Bytes */
+ chip->ecc.size = 512;
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
+ chip->ecc.bytes = 8;
+ chip->ecc.strength = 4;
+ } else {
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
+ } else {
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
+ }
dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
nanddev_ntargets(&chip->base));
@@ -724,8 +748,8 @@ static int fsl_ifc_attach_chip(struct nand_chip *chip)
chip->page_shift);
dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
chip->phys_erase_shift);
- dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
- chip->ecc.mode);
+ dev_dbg(priv->dev, "%s: nand->ecc.engine_type = %d\n", __func__,
+ chip->ecc.engine_type);
dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
chip->ecc.steps);
dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
@@ -910,25 +934,6 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
return -ENODEV;
}
- /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
- if (csor & CSOR_NAND_ECC_DEC_EN) {
- chip->ecc.mode = NAND_ECC_HW;
- mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
-
- /* Hardware generates ECC per 512 Bytes */
- chip->ecc.size = 512;
- if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
- chip->ecc.bytes = 8;
- chip->ecc.strength = 4;
- } else {
- chip->ecc.bytes = 16;
- chip->ecc.strength = 8;
- }
- } else {
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
- }
-
ret = fsl_ifc_sram_init(priv);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index 197850aeb261..d5813b9abc8e 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -47,8 +47,8 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
int ret;
struct device_node *flash_np;
- fun->chip.ecc.mode = NAND_ECC_SOFT;
- fun->chip.ecc.algo = NAND_ECC_HAMMING;
+ fun->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ fun->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
fun->chip.controller = &fun->base;
mtd->dev.parent = fun->dev;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 92ddc41d0ff0..4191831df182 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -900,8 +900,8 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
return 0;
}
- switch (nand->ecc.mode) {
- case NAND_ECC_HW:
+ switch (nand->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
nand->ecc.calculate = fsmc_read_hwecc_ecc1;
nand->ecc.correct = nand_correct_data;
@@ -910,14 +910,14 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
nand->ecc.options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
break;
- case NAND_ECC_SOFT:
- if (nand->ecc.algo == NAND_ECC_BCH) {
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ if (nand->ecc.algo == NAND_ECC_ALGO_BCH) {
dev_info(host->dev,
"Using 4-bit SW BCH ECC scheme\n");
break;
}
- case NAND_ECC_ON_DIE:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
break;
default:
@@ -929,7 +929,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
* Don't set layout for BCH4 SW ECC. This will be
* generated later in nand_bch_init() later.
*/
- if (nand->ecc.mode == NAND_ECC_HW) {
+ if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
switch (mtd->oobsize) {
case 16:
case 64:
@@ -1059,7 +1059,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* Setup default ECC mode. nand_dt_init() called from nand_scan_ident()
* can overwrite this value if the DT provides a different value.
*/
- nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
nand->ecc.hwctl = fsmc_enable_hwecc;
nand->ecc.size = 512;
nand->badblockbits = 7;
diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c
index 3bd847ccc3f3..4ec0a1e10867 100644
--- a/drivers/mtd/nand/raw/gpio.c
+++ b/drivers/mtd/nand/raw/gpio.c
@@ -342,8 +342,8 @@ static int gpio_nand_probe(struct platform_device *pdev)
gpiomtd->base.ops = &gpio_nand_ops;
nand_set_flash_node(chip, pdev->dev.of_node);
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
chip->options = gpiomtd->plat.options;
chip->controller = &gpiomtd->base;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 5d4aee46cc55..dc8104e67506 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -272,8 +272,8 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
default:
dev_err(this->dev,
"unsupported nand chip. ecc bits : %d, ecc size : %d\n",
- chip->base.eccreq.strength,
- chip->base.eccreq.step_size);
+ nanddev_get_ecc_requirements(&chip->base)->strength,
+ nanddev_get_ecc_requirements(&chip->base)->step_size);
return -EINVAL;
}
geo->ecc_chunk_size = ecc_step;
@@ -510,6 +510,8 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
static int common_nfc_set_geometry(struct gpmi_nand_data *this)
{
struct nand_chip *chip = &this->nand;
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
if (chip->ecc.strength > 0 && chip->ecc.size > 0)
return set_geometry_by_ecc_info(this, chip->ecc.strength,
@@ -517,13 +519,12 @@ static int common_nfc_set_geometry(struct gpmi_nand_data *this)
if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
|| legacy_set_geometry(this)) {
- if (!(chip->base.eccreq.strength > 0 &&
- chip->base.eccreq.step_size > 0))
+ if (!(requirements->strength > 0 && requirements->step_size > 0))
return -EINVAL;
return set_geometry_by_ecc_info(this,
- chip->base.eccreq.strength,
- chip->base.eccreq.step_size);
+ requirements->strength,
+ requirements->step_size);
}
return 0;
@@ -1003,10 +1004,8 @@ static int acquire_dma_channels(struct gpmi_nand_data *this)
/* request dma channel */
dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
if (IS_ERR(dma_chan)) {
- ret = PTR_ERR(dma_chan);
- if (ret != -EPROBE_DEFER)
- dev_err(this->dev, "DMA channel request failed: %d\n",
- ret);
+ ret = dev_err_probe(this->dev, PTR_ERR(dma_chan),
+ "DMA channel request failed\n");
release_dma_channels(this);
} else {
this->dma_chans[0] = dma_chan;
@@ -2032,7 +2031,7 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
ecc->write_page_raw = gpmi_ecc_write_page_raw;
ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
- ecc->mode = NAND_ECC_HW;
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
ecc->size = bch_geo->ecc_chunk_size;
ecc->strength = bch_geo->ecc_strength;
mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index b84238e2268a..8b2122ce6ec3 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -186,7 +186,7 @@ static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev)
hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA);
hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB);
- if (chip->ecc.mode == NAND_ECC_NONE) {
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK)
<< HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN);
@@ -468,7 +468,7 @@ static void hisi_nfc_cmdfunc(struct nand_chip *chip, unsigned command,
case NAND_CMD_STATUS:
flag = hinfc_read(host, HINFC504_CON);
- if (chip->ecc.mode == NAND_ECC_HW)
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
hinfc_write(host,
flag & ~(HINFC504_CON_ECCTYPE_MASK <<
HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON);
@@ -721,7 +721,7 @@ static int hisi_nfc_attach_chip(struct nand_chip *chip)
}
hinfc_write(host, flag, HINFC504_CON);
- if (chip->ecc.mode == NAND_ECC_HW)
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
hisi_nfc_ecc_probe(host);
return 0;
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index 69423bb29adb..0e9d426fe4f2 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -194,8 +194,8 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip)
(chip->ecc.strength / 8);
}
- switch (chip->ecc.mode) {
- case NAND_ECC_HW:
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
if (!nfc->ecc) {
dev_err(nfc->dev, "HW ECC selected, but ECC controller not found\n");
return -ENODEV;
@@ -205,22 +205,22 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip)
chip->ecc.calculate = ingenic_nand_ecc_calculate;
chip->ecc.correct = ingenic_nand_ecc_correct;
fallthrough;
- case NAND_ECC_SOFT:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
dev_info(nfc->dev, "using %s (strength %d, size %d, bytes %d)\n",
(nfc->ecc) ? "hardware ECC" : "software ECC",
chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
break;
- case NAND_ECC_NONE:
+ case NAND_ECC_ENGINE_TYPE_NONE:
dev_info(nfc->dev, "not using ECC\n");
break;
default:
dev_err(nfc->dev, "ECC mode %d not supported\n",
- chip->ecc.mode);
+ chip->ecc.engine_type);
return -EINVAL;
}
/* The NAND core will generate the ECC layout for SW ECC */
- if (chip->ecc.mode != NAND_ECC_HW)
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
/* Generate ECC layout. ECC codes are right aligned in the OOB area. */
@@ -243,8 +243,10 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip)
/* For legacy reasons we use a different layout on the qi,lb60 board. */
if (of_machine_is_compatible("qi,lb60"))
mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
- else
+ else if (nfc->soc_info->oob_layout)
mtd_set_ooblayout(mtd, nfc->soc_info->oob_layout);
+ else
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
return 0;
}
@@ -404,7 +406,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev,
mtd->dev.parent = dev;
chip->options = NAND_NO_SUBPAGE_WRITE;
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
@@ -532,7 +534,6 @@ static const struct jz_soc_info jz4740_soc_info = {
.data_offset = 0x00000000,
.cmd_offset = 0x00008000,
.addr_offset = 0x00010000,
- .oob_layout = &nand_ooblayout_lp_ops,
};
static const struct jz_soc_info jz4725b_soc_info = {
@@ -546,7 +547,6 @@ static const struct jz_soc_info jz4780_soc_info = {
.data_offset = 0x00000000,
.cmd_offset = 0x00400000,
.addr_offset = 0x00800000,
- .oob_layout = &nand_ooblayout_lp_ops,
};
static const struct of_device_id ingenic_nand_dt_match[] = {
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 7521038af2ef..4940bb2e3c07 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -656,7 +656,7 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
if (!host->dummy_buf)
return -ENOMEM;
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
host->mlcsubpages = mtd->writesize / 512;
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index b151fd000815..6db9d2ed6881 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -881,7 +881,8 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
/* NAND callbacks for LPC32xx SLC hardware */
- chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
chip->legacy.read_byte = lpc32xx_nand_read_byte;
chip->legacy.read_buf = lpc32xx_nand_read_buf;
chip->legacy.write_buf = lpc32xx_nand_write_buf;
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 8482d3bd8b1f..f5ca2002d08e 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -227,6 +227,8 @@
#define XTYPE_MASK 7
/**
+ * struct marvell_hw_ecc_layout - layout of Marvell ECC
+ *
* Marvell ECC engine works differently than the others, in order to limit the
* size of the IP, hardware engineers chose to set a fixed strength at 16 bits
* per subpage, and depending on a the desired strength needed by the NAND chip,
@@ -292,6 +294,8 @@ static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
};
/**
+ * struct marvell_nand_chip_sel - CS line description
+ *
* The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
* is made by a field in NDCB0 register, and in another field in NDCB2 register.
* The datasheet describes the logic with an error: ADDR5 field is once
@@ -312,14 +316,15 @@ struct marvell_nand_chip_sel {
};
/**
- * NAND chip structure: stores NAND chip device related information
+ * struct marvell_nand_chip - stores NAND chip device related information
*
* @chip: Base NAND chip structure
* @node: Used to store NAND chips into a list
- * @layout NAND layout when using hardware ECC
+ * @layout: NAND layout when using hardware ECC
* @ndcr: Controller register value for this NAND chip
* @ndtr0: Timing registers 0 value for this NAND chip
* @ndtr1: Timing registers 1 value for this NAND chip
+ * @addr_cyc: Amount of cycles needed to pass column address
* @selected_die: Current active CS
* @nsels: Number of CS lines required by the NAND chip
* @sels: Array of CS lines descriptions
@@ -349,7 +354,8 @@ static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
}
/**
- * NAND controller capabilities for distinction between compatible strings
+ * struct marvell_nfc_caps - NAND controller capabilities for distinction
+ * between compatible strings
*
* @max_cs_nb: Number of Chip Select lines available
* @max_rb_nb: Number of Ready/Busy lines available
@@ -372,7 +378,7 @@ struct marvell_nfc_caps {
};
/**
- * NAND controller structure: stores Marvell NAND controller information
+ * struct marvell_nfc - stores Marvell NAND controller information
*
* @controller: Base controller structure
* @dev: Parent device (used to print error messages)
@@ -383,7 +389,9 @@ struct marvell_nfc_caps {
* @assigned_cs: Bitmask describing already assigned CS lines
* @chips: List containing all the NAND chips attached to
* this NAND controller
+ * @selected_chip: Currently selected target chip
* @caps: NAND controller capabilities for each compatible string
+ * @use_dma: Whetner DMA is used
* @dma_chan: DMA channel (NFCv1 only)
* @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
*/
@@ -411,7 +419,8 @@ static inline struct marvell_nfc *to_marvell_nfc(struct nand_controller *ctrl)
}
/**
- * NAND controller timings expressed in NAND Controller clock cycles
+ * struct marvell_nfc_timings - NAND controller timings expressed in NAND
+ * Controller clock cycles
*
* @tRP: ND_nRE pulse width
* @tRH: ND_nRE high duration
@@ -455,8 +464,8 @@ struct marvell_nfc_timings {
period_ns))
/**
- * NAND driver structure filled during the parsing of the ->exec_op() subop
- * subset of instructions.
+ * struct marvell_nfc_op - filled during the parsing of the ->exec_op()
+ * subop subset of instructions.
*
* @ndcb: Array of values written to NDCBx registers
* @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
@@ -685,9 +694,31 @@ static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
}
+static int marvell_nfc_poll_status(struct marvell_nfc *nfc, u32 mask,
+ u32 expected_val, unsigned long timeout_ms)
+{
+ unsigned long limit;
+ u32 st;
+
+ limit = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ st = readl_relaxed(nfc->regs + NDSR);
+ if (st & NDSR_RDY(1))
+ st |= NDSR_RDY(0);
+
+ if ((st & mask) == expected_val)
+ return 0;
+
+ cpu_relax();
+ } while (time_after(limit, jiffies));
+
+ return -ETIMEDOUT;
+}
+
static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
u32 pending;
int ret;
@@ -695,12 +726,18 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
if (!timeout_ms)
timeout_ms = IRQ_TIMEOUT;
- init_completion(&nfc->complete);
+ if (mtd->oops_panic_write) {
+ ret = marvell_nfc_poll_status(nfc, NDSR_RDY(0),
+ NDSR_RDY(0),
+ timeout_ms);
+ } else {
+ init_completion(&nfc->complete);
- marvell_nfc_enable_int(nfc, NDCR_RDYM);
- ret = wait_for_completion_timeout(&nfc->complete,
- msecs_to_jiffies(timeout_ms));
- marvell_nfc_disable_int(nfc, NDCR_RDYM);
+ marvell_nfc_enable_int(nfc, NDCR_RDYM);
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
+ marvell_nfc_disable_int(nfc, NDCR_RDYM);
+ }
pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
/*
@@ -780,7 +817,7 @@ static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
* When enabling BCH, set threshold to 0 to always know the
* number of corrected bitflips.
*/
- if (chip->ecc.algo == NAND_ECC_BCH)
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
}
}
@@ -792,7 +829,7 @@ static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
if (ndcr & NDCR_ECC_EN) {
writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
- if (chip->ecc.algo == NAND_ECC_BCH)
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
writel_relaxed(0, nfc->regs + NDECCCTRL);
}
}
@@ -966,7 +1003,7 @@ static int marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip,
if (ndsr & NDSR_CORERR) {
writel_relaxed(ndsr, nfc->regs + NDSR);
- if (chip->ecc.algo == NAND_ECC_BCH)
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
bf = NDSR_ERRCNT(ndsr);
else
bf = 1;
@@ -2218,7 +2255,7 @@ static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd,
ecc->size = l->data_bytes;
if (ecc->strength == 1) {
- chip->ecc.algo = NAND_ECC_HAMMING;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
@@ -2228,7 +2265,7 @@ static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd,
ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
ecc->write_oob = ecc->write_oob_raw;
} else {
- chip->ecc.algo = NAND_ECC_BCH;
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
ecc->strength = 16;
ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
@@ -2247,13 +2284,16 @@ static int marvell_nand_ecc_init(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
int ret;
- if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
- if (chip->base.eccreq.step_size && chip->base.eccreq.strength) {
- ecc->size = chip->base.eccreq.step_size;
- ecc->strength = chip->base.eccreq.strength;
+ if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
+ (!ecc->size || !ecc->strength)) {
+ if (requirements->step_size && requirements->strength) {
+ ecc->size = requirements->step_size;
+ ecc->strength = requirements->strength;
} else {
dev_info(nfc->dev,
"No minimum ECC strength, using 1b/512B\n");
@@ -2262,15 +2302,15 @@ static int marvell_nand_ecc_init(struct mtd_info *mtd,
}
}
- switch (ecc->mode) {
- case NAND_ECC_HW:
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = marvell_nand_hw_ecc_controller_init(mtd, ecc);
if (ret)
return ret;
break;
- case NAND_ECC_NONE:
- case NAND_ECC_SOFT:
- case NAND_ECC_ON_DIE:
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
mtd->writesize != SZ_2K) {
dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
@@ -2467,7 +2507,7 @@ static int marvell_nand_attach_chip(struct nand_chip *chip)
return ret;
}
- if (chip->ecc.mode == NAND_ECC_HW) {
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
/*
* Subpage write not available with hardware ECC, prohibit also
* subpage read as in userspace subpage access would still be
@@ -2642,7 +2682,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
* Default to HW ECC engine mode. If the nand-ecc-mode property is given
* in the DT node, this entry will be overwritten in nand_scan_ident().
*/
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
/*
* Save a reference value for timing registers before
@@ -2759,10 +2799,7 @@ static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
if (IS_ERR(nfc->dma_chan)) {
ret = PTR_ERR(nfc->dma_chan);
nfc->dma_chan = NULL;
- if (ret != -EPROBE_DEFER)
- dev_err(nfc->dev, "DMA channel request failed: %d\n",
- ret);
- return ret;
+ return dev_err_probe(nfc->dev, ret, "DMA channel request failed\n");
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 0e5829a2b54f..48e6dac96be6 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -1197,7 +1197,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
if (ret)
return -EINVAL;
- nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
nand->ecc.write_page_raw = meson_nfc_write_page_raw;
nand->ecc.write_page = meson_nfc_write_page_hwecc;
nand->ecc.write_oob_raw = nand_write_oob_std;
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index 18ecb096a32d..dfd0d3ed5ed0 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -688,8 +688,8 @@ static int mpc5121_nfc_probe(struct platform_device *op)
chip->legacy.set_features = nand_get_set_features_notsupp;
chip->legacy.get_features = nand_get_set_features_notsupp;
chip->bbt_options = NAND_BBT_USE_FLASH;
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
/* Support external chip-select logic on ADS5121 board */
if (of_machine_is_compatible("fsl,mpc5121ads")) {
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index ad1b55dab211..57f1f1708994 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1253,21 +1253,23 @@ static int mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&nand->base);
struct mtk_nfc *nfc = nand_get_controller_data(nand);
u32 spare;
int free, ret;
/* support only ecc hw mode */
- if (nand->ecc.mode != NAND_ECC_HW) {
- dev_err(dev, "ecc.mode not supported\n");
+ if (nand->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ dev_err(dev, "ecc.engine_type not supported\n");
return -EINVAL;
}
/* if optional dt settings not present */
if (!nand->ecc.size || !nand->ecc.strength) {
/* use datasheet requirements */
- nand->ecc.strength = nand->base.eccreq.strength;
- nand->ecc.size = nand->base.eccreq.step_size;
+ nand->ecc.strength = requirements->strength;
+ nand->ecc.size = requirements->step_size;
/*
* align eccstrength and eccsize
@@ -1416,7 +1418,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
/* set default mode in case dt entry is missing */
- nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index a043d76b48cb..684c51e5e60d 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -669,7 +669,7 @@ static void mxc_nand_enable_hwecc_v1_v2(struct nand_chip *chip, bool enable)
struct mxc_nand_host *host = nand_get_controller_data(chip);
uint16_t config1;
- if (chip->ecc.mode != NAND_ECC_HW)
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return;
config1 = readw(NFC_V1_V2_CONFIG1);
@@ -687,7 +687,7 @@ static void mxc_nand_enable_hwecc_v3(struct nand_chip *chip, bool enable)
struct mxc_nand_host *host = nand_get_controller_data(chip);
uint32_t config2;
- if (chip->ecc.mode != NAND_ECC_HW)
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return;
config2 = readl(NFC_V3_CONFIG2);
@@ -1117,7 +1117,8 @@ static void preset_v1(struct mtd_info *mtd)
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
uint16_t config1 = 0;
- if (nand_chip->ecc.mode == NAND_ECC_HW && mtd->writesize)
+ if (nand_chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
+ mtd->writesize)
config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
if (!host->devtype_data->irqpending_quirk)
@@ -1227,7 +1228,7 @@ static void preset_v2(struct mtd_info *mtd)
if (mtd->writesize) {
uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
- if (nand_chip->ecc.mode == NAND_ECC_HW)
+ if (nand_chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
host->eccsize = get_eccsize(mtd);
@@ -1303,7 +1304,7 @@ static void preset_v3(struct mtd_info *mtd)
}
if (mtd->writesize) {
- if (chip->ecc.mode == NAND_ECC_HW)
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST)
config2 |= NFC_V3_CONFIG2_ECC_EN;
config2 |= NFC_V3_CONFIG2_PPB(
@@ -1680,8 +1681,13 @@ static int mxcnd_attach_chip(struct nand_chip *chip)
struct mxc_nand_host *host = nand_get_controller_data(chip);
struct device *dev = mtd->dev.parent;
- switch (chip->ecc.mode) {
- case NAND_ECC_HW:
+ chip->ecc.bytes = host->devtype_data->eccbytes;
+ host->eccsize = host->devtype_data->eccsize;
+ chip->ecc.size = 512;
+ mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
chip->ecc.read_page = mxc_nand_read_page;
chip->ecc.read_page_raw = mxc_nand_read_page_raw;
chip->ecc.read_oob = mxc_nand_read_oob;
@@ -1690,7 +1696,7 @@ static int mxcnd_attach_chip(struct nand_chip *chip)
chip->ecc.write_oob = mxc_nand_write_oob;
break;
- case NAND_ECC_SOFT:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
break;
default:
@@ -1728,7 +1734,7 @@ static int mxcnd_attach_chip(struct nand_chip *chip)
*/
host->used_oobsize = min(mtd->oobsize, 218U);
- if (chip->ecc.mode == NAND_ECC_HW) {
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
if (is_imx21_nfc(host) || is_imx27_nfc(host))
chip->ecc.strength = 1;
else
@@ -1835,19 +1841,7 @@ static int mxcnd_probe(struct platform_device *pdev)
if (host->devtype_data->axi_offset)
host->regs_axi = host->base + host->devtype_data->axi_offset;
- this->ecc.bytes = host->devtype_data->eccbytes;
- host->eccsize = host->devtype_data->eccsize;
-
this->legacy.select_chip = host->devtype_data->select_chip;
- this->ecc.size = 512;
- mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
-
- if (host->pdata.hw_ecc) {
- this->ecc.mode = NAND_ECC_HW;
- } else {
- this->ecc.mode = NAND_ECC_SOFT;
- this->ecc.algo = NAND_ECC_HAMMING;
- }
/* NAND bus width determines access functions used by upper layer */
if (host->pdata.width == 2)
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 0c768cb88f96..1f0d542d5923 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -34,6 +34,7 @@
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/nand_bch.h>
#include <linux/interrupt.h>
@@ -45,166 +46,6 @@
#include "internals.h"
-/* Define default oob placement schemes for large and small page devices */
-static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- if (section > 1)
- return -ERANGE;
-
- if (!section) {
- oobregion->offset = 0;
- if (mtd->oobsize == 16)
- oobregion->length = 4;
- else
- oobregion->length = 3;
- } else {
- if (mtd->oobsize == 8)
- return -ERANGE;
-
- oobregion->offset = 6;
- oobregion->length = ecc->total - 4;
- }
-
- return 0;
-}
-
-static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- if (section > 1)
- return -ERANGE;
-
- if (mtd->oobsize == 16) {
- if (section)
- return -ERANGE;
-
- oobregion->length = 8;
- oobregion->offset = 8;
- } else {
- oobregion->length = 2;
- if (!section)
- oobregion->offset = 3;
- else
- oobregion->offset = 6;
- }
-
- return 0;
-}
-
-const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
- .ecc = nand_ooblayout_ecc_sp,
- .free = nand_ooblayout_free_sp,
-};
-EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
-
-static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- if (section || !ecc->total)
- return -ERANGE;
-
- oobregion->length = ecc->total;
- oobregion->offset = mtd->oobsize - oobregion->length;
-
- return 0;
-}
-
-static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- if (section)
- return -ERANGE;
-
- oobregion->length = mtd->oobsize - ecc->total - 2;
- oobregion->offset = 2;
-
- return 0;
-}
-
-const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
- .ecc = nand_ooblayout_ecc_lp,
- .free = nand_ooblayout_free_lp,
-};
-EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
-
-/*
- * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
- * are placed at a fixed offset.
- */
-static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- if (section)
- return -ERANGE;
-
- switch (mtd->oobsize) {
- case 64:
- oobregion->offset = 40;
- break;
- case 128:
- oobregion->offset = 80;
- break;
- default:
- return -EINVAL;
- }
-
- oobregion->length = ecc->total;
- if (oobregion->offset + oobregion->length > mtd->oobsize)
- return -ERANGE;
-
- return 0;
-}
-
-static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- int ecc_offset = 0;
-
- if (section < 0 || section > 1)
- return -ERANGE;
-
- switch (mtd->oobsize) {
- case 64:
- ecc_offset = 40;
- break;
- case 128:
- ecc_offset = 80;
- break;
- default:
- return -EINVAL;
- }
-
- if (section == 0) {
- oobregion->offset = 2;
- oobregion->length = ecc_offset - 2;
- } else {
- oobregion->offset = ecc_offset + ecc->total;
- oobregion->length = mtd->oobsize - oobregion->offset;
- }
-
- return 0;
-}
-
-static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
- .ecc = nand_ooblayout_ecc_lp_hamming,
- .free = nand_ooblayout_free_lp_hamming,
-};
-
static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
struct mtd_pairing_info *info)
{
@@ -4750,6 +4591,8 @@ static inline bool is_full_id_nand(struct nand_flash_dev *type)
static bool find_full_id_nand(struct nand_chip *chip,
struct nand_flash_dev *type)
{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements;
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
u8 *id_data = chip->id.data;
@@ -4771,8 +4614,9 @@ static bool find_full_id_nand(struct nand_chip *chip,
memorg->pagesize *
memorg->pages_per_eraseblock);
chip->options |= type->options;
- chip->base.eccreq.strength = NAND_ECC_STRENGTH(type);
- chip->base.eccreq.step_size = NAND_ECC_STEP(type);
+ requirements.strength = NAND_ECC_STRENGTH(type);
+ requirements.step_size = NAND_ECC_STEP(type);
+ nanddev_set_ecc_requirements(base, &requirements);
chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
if (!chip->parameters.model)
@@ -5033,91 +4877,101 @@ free_detect_allocation:
return ret;
}
-static const char * const nand_ecc_modes[] = {
- [NAND_ECC_NONE] = "none",
- [NAND_ECC_SOFT] = "soft",
- [NAND_ECC_HW] = "hw",
- [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
- [NAND_ECC_ON_DIE] = "on-die",
-};
-
-static int of_get_nand_ecc_mode(struct device_node *np)
+static enum nand_ecc_engine_type
+of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
{
+ enum nand_ecc_legacy_mode {
+ NAND_ECC_INVALID,
+ NAND_ECC_NONE,
+ NAND_ECC_SOFT,
+ NAND_ECC_SOFT_BCH,
+ NAND_ECC_HW,
+ NAND_ECC_HW_SYNDROME,
+ NAND_ECC_ON_DIE,
+ };
+ const char * const nand_ecc_legacy_modes[] = {
+ [NAND_ECC_NONE] = "none",
+ [NAND_ECC_SOFT] = "soft",
+ [NAND_ECC_SOFT_BCH] = "soft_bch",
+ [NAND_ECC_HW] = "hw",
+ [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
+ [NAND_ECC_ON_DIE] = "on-die",
+ };
+ enum nand_ecc_legacy_mode eng_type;
const char *pm;
- int err, i;
+ int err;
err = of_property_read_string(np, "nand-ecc-mode", &pm);
- if (err < 0)
- return err;
-
- for (i = NAND_ECC_NONE; i < ARRAY_SIZE(nand_ecc_modes); i++)
- if (!strcasecmp(pm, nand_ecc_modes[i]))
- return i;
-
- /*
- * For backward compatibility we support few obsoleted values that don't
- * have their mappings into the nand_ecc_mode enum anymore (they were
- * merged with other enums).
- */
- if (!strcasecmp(pm, "soft_bch"))
- return NAND_ECC_SOFT;
+ if (err)
+ return NAND_ECC_ENGINE_TYPE_INVALID;
+
+ for (eng_type = NAND_ECC_NONE;
+ eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
+ if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
+ switch (eng_type) {
+ case NAND_ECC_NONE:
+ return NAND_ECC_ENGINE_TYPE_NONE;
+ case NAND_ECC_SOFT:
+ case NAND_ECC_SOFT_BCH:
+ return NAND_ECC_ENGINE_TYPE_SOFT;
+ case NAND_ECC_HW:
+ case NAND_ECC_HW_SYNDROME:
+ return NAND_ECC_ENGINE_TYPE_ON_HOST;
+ case NAND_ECC_ON_DIE:
+ return NAND_ECC_ENGINE_TYPE_ON_DIE;
+ default:
+ break;
+ }
+ }
+ }
- return -ENODEV;
+ return NAND_ECC_ENGINE_TYPE_INVALID;
}
-static const char * const nand_ecc_algos[] = {
- [NAND_ECC_HAMMING] = "hamming",
- [NAND_ECC_BCH] = "bch",
- [NAND_ECC_RS] = "rs",
-};
-
-static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
+static enum nand_ecc_placement
+of_get_rawnand_ecc_placement_legacy(struct device_node *np)
{
- enum nand_ecc_algo ecc_algo;
const char *pm;
int err;
- err = of_property_read_string(np, "nand-ecc-algo", &pm);
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
if (!err) {
- for (ecc_algo = NAND_ECC_HAMMING;
- ecc_algo < ARRAY_SIZE(nand_ecc_algos);
- ecc_algo++) {
- if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
- return ecc_algo;
- }
+ if (!strcasecmp(pm, "hw_syndrome"))
+ return NAND_ECC_PLACEMENT_INTERLEAVED;
}
- /*
- * For backward compatibility we also read "nand-ecc-mode" checking
- * for some obsoleted values that were specifying ECC algorithm.
- */
+ return NAND_ECC_PLACEMENT_UNKNOWN;
+}
+
+static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
+{
+ const char *pm;
+ int err;
+
err = of_property_read_string(np, "nand-ecc-mode", &pm);
if (!err) {
if (!strcasecmp(pm, "soft"))
- return NAND_ECC_HAMMING;
+ return NAND_ECC_ALGO_HAMMING;
else if (!strcasecmp(pm, "soft_bch"))
- return NAND_ECC_BCH;
+ return NAND_ECC_ALGO_BCH;
}
- return NAND_ECC_UNKNOWN;
+ return NAND_ECC_ALGO_UNKNOWN;
}
-static int of_get_nand_ecc_step_size(struct device_node *np)
+static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
{
- int ret;
- u32 val;
+ struct device_node *dn = nand_get_flash_node(chip);
+ struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
- ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
- return ret ? ret : val;
-}
+ if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+ user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
-static int of_get_nand_ecc_strength(struct device_node *np)
-{
- int ret;
- u32 val;
+ if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
+ user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
- ret = of_property_read_u32(np, "nand-ecc-strength", &val);
- return ret ? ret : val;
+ if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
+ user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
}
static int of_get_nand_bus_width(struct device_node *np)
@@ -5141,11 +4995,10 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np)
return of_property_read_bool(np, "nand-on-flash-bbt");
}
-static int nand_dt_init(struct nand_chip *chip)
+static int rawnand_dt_init(struct nand_chip *chip)
{
+ struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
struct device_node *dn = nand_get_flash_node(chip);
- enum nand_ecc_algo ecc_algo;
- int ecc_mode, ecc_strength, ecc_step;
if (!dn)
return 0;
@@ -5159,25 +5012,29 @@ static int nand_dt_init(struct nand_chip *chip)
if (of_get_nand_on_flash_bbt(dn))
chip->bbt_options |= NAND_BBT_USE_FLASH;
- ecc_mode = of_get_nand_ecc_mode(dn);
- ecc_algo = of_get_nand_ecc_algo(dn);
- ecc_strength = of_get_nand_ecc_strength(dn);
- ecc_step = of_get_nand_ecc_step_size(dn);
-
- if (ecc_mode >= 0)
- chip->ecc.mode = ecc_mode;
+ of_get_nand_ecc_user_config(nand);
+ of_get_nand_ecc_legacy_user_config(chip);
- if (ecc_algo != NAND_ECC_UNKNOWN)
- chip->ecc.algo = ecc_algo;
-
- if (ecc_strength >= 0)
- chip->ecc.strength = ecc_strength;
+ /*
+ * If neither the user nor the NAND controller have requested a specific
+ * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
+ */
+ nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- if (ecc_step > 0)
- chip->ecc.size = ecc_step;
+ /*
+ * Use the user requested engine type, unless there is none, in this
+ * case default to the NAND controller choice, otherwise fallback to
+ * the raw NAND default one.
+ */
+ if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
+ chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
+ chip->ecc.engine_type = nand->ecc.defaults.engine_type;
- if (of_property_read_bool(dn, "nand-ecc-maximize"))
- chip->ecc.options |= NAND_ECC_MAXIMIZE;
+ chip->ecc.placement = nand->ecc.user_conf.placement;
+ chip->ecc.algo = nand->ecc.user_conf.algo;
+ chip->ecc.strength = nand->ecc.user_conf.strength;
+ chip->ecc.size = nand->ecc.user_conf.step_size;
return 0;
}
@@ -5215,7 +5072,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
/* Enforce the right timings for reset/detection */
chip->current_interface_config = nand_get_reset_interface_config();
- ret = nand_dt_init(chip);
+ ret = rawnand_dt_init(chip);
if (ret)
return ret;
@@ -5282,16 +5139,76 @@ static void nand_scan_ident_cleanup(struct nand_chip *chip)
kfree(chip->parameters.onfi);
}
+static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ switch (ecc->placement) {
+ case NAND_ECC_PLACEMENT_UNKNOWN:
+ case NAND_ECC_PLACEMENT_OOB:
+ /* Use standard hwecc read page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_hwecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->read_subpage)
+ ecc->read_subpage = nand_read_subpage;
+ if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
+ ecc->write_subpage = nand_write_subpage_hwecc;
+ fallthrough;
+
+ case NAND_ECC_PLACEMENT_INTERLEAVED:
+ if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+ (!ecc->read_page ||
+ ecc->read_page == nand_read_page_hwecc ||
+ !ecc->write_page ||
+ ecc->write_page == nand_write_page_hwecc)) {
+ WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
+ return -EINVAL;
+ }
+ /* Use standard syndrome read/write page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_syndrome;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_syndrome;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw_syndrome;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw_syndrome;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_syndrome;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_syndrome;
+ break;
+
+ default:
+ pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
+ ecc->placement);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nand_set_ecc_soft_ops(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
+ if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
return -EINVAL;
switch (ecc->algo) {
- case NAND_ECC_HAMMING:
+ case NAND_ECC_ALGO_HAMMING:
ecc->calculate = nand_calculate_ecc;
ecc->correct = nand_correct_data;
ecc->read_page = nand_read_page_swecc;
@@ -5312,7 +5229,7 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
return 0;
- case NAND_ECC_BCH:
+ case NAND_ECC_ALGO_BCH:
if (!mtd_nand_has_bch()) {
WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
return -EINVAL;
@@ -5350,7 +5267,7 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
return -EINVAL;
}
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
}
@@ -5359,8 +5276,8 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
* used, otherwise we don't know how many bytes can really be
* used.
*/
- if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
- ecc->options & NAND_ECC_MAXIMIZE) {
+ if (mtd->ooblayout == nand_get_large_page_ooblayout() &&
+ nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
int steps, bytes;
/* Always prefer 1k blocks over 512bytes ones */
@@ -5454,10 +5371,12 @@ static int
nand_match_ecc_req(struct nand_chip *chip,
const struct nand_ecc_caps *caps, int oobavail)
{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
struct mtd_info *mtd = nand_to_mtd(chip);
const struct nand_ecc_step_info *stepinfo;
- int req_step = chip->base.eccreq.step_size;
- int req_strength = chip->base.eccreq.strength;
+ int req_step = requirements->step_size;
+ int req_strength = requirements->strength;
int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
int best_step, best_strength, best_ecc_bytes;
int best_ecc_bytes_total = INT_MAX;
@@ -5598,11 +5517,12 @@ nand_maximize_ecc(struct nand_chip *chip,
* @caps: ECC engine caps info structure
* @oobavail: OOB size that the ECC engine can use
*
- * Choose the ECC configuration according to following logic
+ * Choose the ECC configuration according to following logic.
*
* 1. If both ECC step size and ECC strength are already set (usually by DT)
* then check if it is supported by this controller.
- * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
+ * 2. If the user provided the nand-ecc-maximize property, then select maximum
+ * ECC strength.
* 3. Otherwise, try to match the ECC step size and ECC strength closest
* to the chip's requirement. If available OOB size can't fit the chip
* requirement then fallback to the maximum ECC step size and ECC strength.
@@ -5613,6 +5533,7 @@ int nand_ecc_choose_conf(struct nand_chip *chip,
const struct nand_ecc_caps *caps, int oobavail)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
return -EINVAL;
@@ -5620,7 +5541,7 @@ int nand_ecc_choose_conf(struct nand_chip *chip,
if (chip->ecc.size && chip->ecc.strength)
return nand_check_ecc_caps(chip, caps, oobavail);
- if (chip->ecc.options & NAND_ECC_MAXIMIZE)
+ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
return nand_maximize_ecc(chip, caps, oobavail);
if (!nand_match_ecc_req(chip, caps, oobavail))
@@ -5630,41 +5551,6 @@ int nand_ecc_choose_conf(struct nand_chip *chip,
}
EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
-/*
- * Check if the chip configuration meet the datasheet requirements.
-
- * If our configuration corrects A bits per B bytes and the minimum
- * required correction level is X bits per Y bytes, then we must ensure
- * both of the following are true:
- *
- * (1) A / B >= X / Y
- * (2) A >= X
- *
- * Requirement (1) ensures we can correct for the required bitflip density.
- * Requirement (2) ensures we can correct even when all bitflips are clumped
- * in the same sector.
- */
-static bool nand_ecc_strength_good(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- int corr, ds_corr;
-
- if (ecc->size == 0 || chip->base.eccreq.step_size == 0)
- /* Not enough information */
- return true;
-
- /*
- * We get the number of corrected bits per page to compare
- * the correction density.
- */
- corr = (mtd->writesize * ecc->strength) / ecc->size;
- ds_corr = (mtd->writesize * chip->base.eccreq.strength) /
- chip->base.eccreq.step_size;
-
- return corr >= ds_corr && ecc->strength >= chip->base.eccreq.strength;
-}
-
static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
{
struct nand_chip *chip = container_of(nand, struct nand_chip,
@@ -5752,15 +5638,17 @@ static int nand_scan_tail(struct nand_chip *chip)
* If no default placement scheme is given, select an appropriate one.
*/
if (!mtd->ooblayout &&
- !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
+ !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ ecc->algo == NAND_ECC_ALGO_BCH)) {
switch (mtd->oobsize) {
case 8:
case 16:
- mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
+ mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
break;
case 64:
case 128:
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
+ mtd_set_ooblayout(mtd,
+ nand_get_large_page_hamming_ooblayout());
break;
default:
/*
@@ -5770,9 +5658,9 @@ static int nand_scan_tail(struct nand_chip *chip)
* page with ECC layout when ->oobsize <= 128 for
* compatibility reasons.
*/
- if (ecc->mode == NAND_ECC_NONE) {
+ if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
mtd_set_ooblayout(mtd,
- &nand_ooblayout_lp_ops);
+ nand_get_large_page_ooblayout());
break;
}
@@ -5788,49 +5676,11 @@ static int nand_scan_tail(struct nand_chip *chip)
* selected and we have 256 byte pagesize fallback to software ECC
*/
- switch (ecc->mode) {
- case NAND_ECC_HW:
- /* Use standard hwecc read page function? */
- if (!ecc->read_page)
- ecc->read_page = nand_read_page_hwecc;
- if (!ecc->write_page)
- ecc->write_page = nand_write_page_hwecc;
- if (!ecc->read_page_raw)
- ecc->read_page_raw = nand_read_page_raw;
- if (!ecc->write_page_raw)
- ecc->write_page_raw = nand_write_page_raw;
- if (!ecc->read_oob)
- ecc->read_oob = nand_read_oob_std;
- if (!ecc->write_oob)
- ecc->write_oob = nand_write_oob_std;
- if (!ecc->read_subpage)
- ecc->read_subpage = nand_read_subpage;
- if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
- ecc->write_subpage = nand_write_subpage_hwecc;
- fallthrough;
- case NAND_ECC_HW_SYNDROME:
- if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
- (!ecc->read_page ||
- ecc->read_page == nand_read_page_hwecc ||
- !ecc->write_page ||
- ecc->write_page == nand_write_page_hwecc)) {
- WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
- ret = -EINVAL;
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ ret = nand_set_ecc_on_host_ops(chip);
+ if (ret)
goto err_nand_manuf_cleanup;
- }
- /* Use standard syndrome read/write page function? */
- if (!ecc->read_page)
- ecc->read_page = nand_read_page_syndrome;
- if (!ecc->write_page)
- ecc->write_page = nand_write_page_syndrome;
- if (!ecc->read_page_raw)
- ecc->read_page_raw = nand_read_page_raw_syndrome;
- if (!ecc->write_page_raw)
- ecc->write_page_raw = nand_write_page_raw_syndrome;
- if (!ecc->read_oob)
- ecc->read_oob = nand_read_oob_syndrome;
- if (!ecc->write_oob)
- ecc->write_oob = nand_write_oob_syndrome;
if (mtd->writesize >= ecc->size) {
if (!ecc->strength) {
@@ -5842,18 +5692,17 @@ static int nand_scan_tail(struct nand_chip *chip)
}
pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
ecc->size, mtd->writesize);
- ecc->mode = NAND_ECC_SOFT;
- ecc->algo = NAND_ECC_HAMMING;
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ ecc->algo = NAND_ECC_ALGO_HAMMING;
fallthrough;
- case NAND_ECC_SOFT:
+
+ case NAND_ECC_ENGINE_TYPE_SOFT:
ret = nand_set_ecc_soft_ops(chip);
- if (ret) {
- ret = -EINVAL;
+ if (ret)
goto err_nand_manuf_cleanup;
- }
break;
- case NAND_ECC_ON_DIE:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
if (!ecc->read_page || !ecc->write_page) {
WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
ret = -EINVAL;
@@ -5865,8 +5714,8 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->write_oob = nand_write_oob_std;
break;
- case NAND_ECC_NONE:
- pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
ecc->read_page = nand_read_page_raw;
ecc->write_page = nand_write_page_raw;
ecc->read_oob = nand_read_oob_std;
@@ -5879,7 +5728,7 @@ static int nand_scan_tail(struct nand_chip *chip)
break;
default:
- WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
+ WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
ret = -EINVAL;
goto err_nand_manuf_cleanup;
}
@@ -5913,7 +5762,10 @@ static int nand_scan_tail(struct nand_chip *chip)
ret = -EINVAL;
goto err_nand_manuf_cleanup;
}
+
ecc->total = ecc->steps * ecc->bytes;
+ chip->base.ecc.ctx.total = ecc->total;
+
if (ecc->total > mtd->oobsize) {
WARN(1, "Total number of ECC bytes exceeded oobsize\n");
ret = -EINVAL;
@@ -5931,11 +5783,11 @@ static int nand_scan_tail(struct nand_chip *chip)
mtd->oobavail = ret;
/* ECC sanity check: warn if it's too weak */
- if (!nand_ecc_strength_good(chip))
+ if (!nand_ecc_is_strong_enough(&chip->base))
pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
mtd->name, chip->ecc.strength, chip->ecc.size,
- chip->base.eccreq.strength,
- chip->base.eccreq.step_size);
+ nanddev_get_ecc_requirements(&chip->base)->strength,
+ nanddev_get_ecc_requirements(&chip->base)->step_size);
/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
@@ -5956,8 +5808,8 @@ static int nand_scan_tail(struct nand_chip *chip)
chip->pagecache.page = -1;
/* Large page NAND with SOFT_ECC should support subpage reads */
- switch (ecc->mode) {
- case NAND_ECC_SOFT:
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_SOFT:
if (chip->page_shift > 9)
chip->options |= NAND_SUBPAGE_READ;
break;
@@ -6101,8 +5953,8 @@ EXPORT_SYMBOL(nand_scan_with_ids);
*/
void nand_cleanup(struct nand_chip *chip)
{
- if (chip->ecc.mode == NAND_ECC_SOFT &&
- chip->ecc.algo == NAND_ECC_BCH)
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
+ chip->ecc.algo == NAND_ECC_ALGO_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
nanddev_cleanup(&chip->base);
diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c
index d5af8c5fd02f..9d19ac14c196 100644
--- a/drivers/mtd/nand/raw/nand_bch.c
+++ b/drivers/mtd/nand/raw/nand_bch.c
@@ -165,6 +165,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
*/
nand->ecc.steps = eccsteps;
nand->ecc.total = eccsteps * eccbytes;
+ nand->base.ecc.ctx.total = nand->ecc.total;
if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
pr_warn("invalid ecc layout\n");
goto fail;
diff --git a/drivers/mtd/nand/raw/nand_esmt.c b/drivers/mtd/nand/raw/nand_esmt.c
index 3338c68aaaf1..4412c407aef3 100644
--- a/drivers/mtd/nand/raw/nand_esmt.c
+++ b/drivers/mtd/nand/raw/nand_esmt.c
@@ -10,27 +10,32 @@
static void esmt_nand_decode_id(struct nand_chip *chip)
{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
+
nand_decode_ext_id(chip);
/* Extract ECC requirements from 5th id byte. */
if (chip->id.len >= 5 && nand_is_slc(chip)) {
- chip->base.eccreq.step_size = 512;
+ requirements.step_size = 512;
switch (chip->id.data[4] & 0x3) {
case 0x0:
- chip->base.eccreq.strength = 4;
+ requirements.strength = 4;
break;
case 0x1:
- chip->base.eccreq.strength = 2;
+ requirements.strength = 2;
break;
case 0x2:
- chip->base.eccreq.strength = 1;
+ requirements.strength = 1;
break;
default:
WARN(1, "Could not get ECC info");
- chip->base.eccreq.step_size = 0;
+ requirements.step_size = 0;
break;
}
}
+
+ nanddev_set_ecc_requirements(base, &requirements);
}
static int esmt_nand_init(struct nand_chip *chip)
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index 6d08eb834456..a9f50c9af109 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -495,34 +495,36 @@ static void hynix_nand_extract_oobsize(struct nand_chip *chip,
static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
bool valid_jedecid)
{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
if (valid_jedecid) {
/* Reference: H27UCG8T2E datasheet */
- chip->base.eccreq.step_size = 1024;
+ requirements.step_size = 1024;
switch (ecc_level) {
case 0:
- chip->base.eccreq.step_size = 0;
- chip->base.eccreq.strength = 0;
+ requirements.step_size = 0;
+ requirements.strength = 0;
break;
case 1:
- chip->base.eccreq.strength = 4;
+ requirements.strength = 4;
break;
case 2:
- chip->base.eccreq.strength = 24;
+ requirements.strength = 24;
break;
case 3:
- chip->base.eccreq.strength = 32;
+ requirements.strength = 32;
break;
case 4:
- chip->base.eccreq.strength = 40;
+ requirements.strength = 40;
break;
case 5:
- chip->base.eccreq.strength = 50;
+ requirements.strength = 50;
break;
case 6:
- chip->base.eccreq.strength = 60;
+ requirements.strength = 60;
break;
default:
/*
@@ -543,14 +545,14 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
if (nand_tech < 3) {
/* > 26nm, reference: H27UBG8T2A datasheet */
if (ecc_level < 5) {
- chip->base.eccreq.step_size = 512;
- chip->base.eccreq.strength = 1 << ecc_level;
+ requirements.step_size = 512;
+ requirements.strength = 1 << ecc_level;
} else if (ecc_level < 7) {
if (ecc_level == 5)
- chip->base.eccreq.step_size = 2048;
+ requirements.step_size = 2048;
else
- chip->base.eccreq.step_size = 1024;
- chip->base.eccreq.strength = 24;
+ requirements.step_size = 1024;
+ requirements.strength = 24;
} else {
/*
* We should never reach this case, but if that
@@ -563,18 +565,20 @@ static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
} else {
/* <= 26nm, reference: H27UBG8T2B datasheet */
if (!ecc_level) {
- chip->base.eccreq.step_size = 0;
- chip->base.eccreq.strength = 0;
+ requirements.step_size = 0;
+ requirements.strength = 0;
} else if (ecc_level < 5) {
- chip->base.eccreq.step_size = 512;
- chip->base.eccreq.strength = 1 << (ecc_level - 1);
+ requirements.step_size = 512;
+ requirements.strength = 1 << (ecc_level - 1);
} else {
- chip->base.eccreq.step_size = 1024;
- chip->base.eccreq.strength = 24 +
+ requirements.step_size = 1024;
+ requirements.strength = 24 +
(8 * (ecc_level - 5));
}
}
}
+
+ nanddev_set_ecc_requirements(base, &requirements);
}
static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index b15c42f48755..85b6d9372d80 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -23,6 +23,7 @@
*/
int nand_jedec_detect(struct nand_chip *chip)
{
+ struct nand_device *base = &chip->base;
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
struct nand_jedec_params *p;
@@ -120,8 +121,12 @@ int nand_jedec_detect(struct nand_chip *chip)
ecc = &p->ecc_info[0];
if (ecc->codeword_size >= 9) {
- chip->base.eccreq.strength = ecc->ecc_bits;
- chip->base.eccreq.step_size = 1 << ecc->codeword_size;
+ struct nand_ecc_props requirements = {
+ .strength = ecc->ecc_bits,
+ .step_size = 1 << ecc->codeword_size,
+ };
+
+ nanddev_set_ecc_requirements(base, &requirements);
} else {
pr_warn("Invalid codeword size\n");
}
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 4385092a9325..c0192881906b 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -413,6 +413,8 @@ enum {
*/
static int micron_supports_on_die_ecc(struct nand_chip *chip)
{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
u8 id[5];
int ret;
@@ -425,7 +427,7 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
/*
* We only support on-die ECC of 4/512 or 8/512
*/
- if (chip->base.eccreq.strength != 4 && chip->base.eccreq.strength != 8)
+ if (requirements->strength != 4 && requirements->strength != 8)
return MICRON_ON_DIE_UNSUPPORTED;
/* 0x2 means on-die ECC is available. */
@@ -466,7 +468,7 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
/*
* We only support on-die ECC of 4/512 or 8/512
*/
- if (chip->base.eccreq.strength != 4 && chip->base.eccreq.strength != 8)
+ if (requirements->strength != 4 && requirements->strength != 8)
return MICRON_ON_DIE_UNSUPPORTED;
return MICRON_ON_DIE_SUPPORTED;
@@ -474,6 +476,9 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
static int micron_nand_init(struct nand_chip *chip)
{
+ struct nand_device *base = &chip->base;
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(base);
struct mtd_info *mtd = nand_to_mtd(chip);
struct micron_nand *micron;
int ondie;
@@ -497,13 +502,13 @@ static int micron_nand_init(struct nand_chip *chip)
ondie = micron_supports_on_die_ecc(chip);
if (ondie == MICRON_ON_DIE_MANDATORY &&
- chip->ecc.mode != NAND_ECC_ON_DIE) {
+ chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_DIE) {
pr_err("On-die ECC forcefully enabled, not supported\n");
ret = -EINVAL;
goto err_free_manuf_data;
}
- if (chip->ecc.mode == NAND_ECC_ON_DIE) {
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE) {
if (ondie == MICRON_ON_DIE_UNSUPPORTED) {
pr_err("On-die ECC selected but not supported\n");
ret = -EINVAL;
@@ -523,7 +528,7 @@ static int micron_nand_init(struct nand_chip *chip)
* That's not needed for 8-bit ECC, because the status expose
* a better approximation of the number of bitflips in a page.
*/
- if (chip->base.eccreq.strength == 4) {
+ if (requirements->strength == 4) {
micron->ecc.rawbuf = kmalloc(mtd->writesize +
mtd->oobsize,
GFP_KERNEL);
@@ -533,17 +538,17 @@ static int micron_nand_init(struct nand_chip *chip)
}
}
- if (chip->base.eccreq.strength == 4)
+ if (requirements->strength == 4)
mtd_set_ooblayout(mtd,
&micron_nand_on_die_4_ooblayout_ops);
else
mtd_set_ooblayout(mtd,
&micron_nand_on_die_8_ooblayout_ops);
- chip->ecc.bytes = chip->base.eccreq.strength * 2;
+ chip->ecc.bytes = requirements->strength * 2;
chip->ecc.size = 512;
- chip->ecc.strength = chip->base.eccreq.strength;
- chip->ecc.algo = NAND_ECC_BCH;
+ chip->ecc.strength = requirements->strength;
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index be3456627288..45649e03797d 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -34,6 +34,8 @@ u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
struct nand_onfi_params *p)
{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements;
struct onfi_ext_param_page *ep;
struct onfi_ext_section *s;
struct onfi_ext_ecc_info *ecc;
@@ -94,8 +96,10 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
goto ext_out;
}
- chip->base.eccreq.strength = ecc->ecc_bits;
- chip->base.eccreq.step_size = 1 << ecc->codeword_size;
+ requirements.strength = ecc->ecc_bits;
+ requirements.step_size = 1 << ecc->codeword_size;
+ nanddev_set_ecc_requirements(base, &requirements);
+
ret = 0;
ext_out:
@@ -139,6 +143,7 @@ static void nand_bit_wise_majority(const void **srcbufs,
*/
int nand_onfi_detect(struct nand_chip *chip)
{
+ struct nand_device *base = &chip->base;
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
struct nand_onfi_params *p = NULL, *pbuf;
@@ -265,8 +270,12 @@ int nand_onfi_detect(struct nand_chip *chip)
chip->options |= NAND_BUSWIDTH_16;
if (p->ecc_bits != 0xff) {
- chip->base.eccreq.strength = p->ecc_bits;
- chip->base.eccreq.step_size = 512;
+ struct nand_ecc_props requirements = {
+ .strength = p->ecc_bits,
+ .step_size = 512,
+ };
+
+ nanddev_set_ecc_requirements(base, &requirements);
} else if (onfi_version >= 21 &&
(le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
diff --git a/drivers/mtd/nand/raw/nand_samsung.c b/drivers/mtd/nand/raw/nand_samsung.c
index 3a4a19e808f6..0be6b7563805 100644
--- a/drivers/mtd/nand/raw/nand_samsung.c
+++ b/drivers/mtd/nand/raw/nand_samsung.c
@@ -10,6 +10,8 @@
static void samsung_nand_decode_id(struct nand_chip *chip)
{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
@@ -71,23 +73,23 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
/* Extract ECC requirements from 5th id byte*/
extid = (chip->id.data[4] >> 4) & 0x07;
if (extid < 5) {
- chip->base.eccreq.step_size = 512;
- chip->base.eccreq.strength = 1 << extid;
+ requirements.step_size = 512;
+ requirements.strength = 1 << extid;
} else {
- chip->base.eccreq.step_size = 1024;
+ requirements.step_size = 1024;
switch (extid) {
case 5:
- chip->base.eccreq.strength = 24;
+ requirements.strength = 24;
break;
case 6:
- chip->base.eccreq.strength = 40;
+ requirements.strength = 40;
break;
case 7:
- chip->base.eccreq.strength = 60;
+ requirements.strength = 60;
break;
default:
WARN(1, "Could not decode ECC info");
- chip->base.eccreq.step_size = 0;
+ requirements.step_size = 0;
}
}
} else {
@@ -97,8 +99,8 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
switch (chip->id.data[1]) {
/* K9F4G08U0D-S[I|C]B0(T00) */
case 0xDC:
- chip->base.eccreq.step_size = 512;
- chip->base.eccreq.strength = 1;
+ requirements.step_size = 512;
+ requirements.strength = 1;
break;
/* K9F1G08U0E 21nm chips do not support subpage write */
@@ -112,6 +114,8 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
}
}
}
+
+ nanddev_set_ecc_requirements(base, &requirements);
}
static int samsung_nand_init(struct nand_chip *chip)
diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c
index f746c19f3b2c..cf4f37959421 100644
--- a/drivers/mtd/nand/raw/nand_toshiba.c
+++ b/drivers/mtd/nand/raw/nand_toshiba.c
@@ -140,11 +140,13 @@ static void toshiba_nand_benand_init(struct nand_chip *chip)
chip->options |= NAND_SUBPAGE_READ;
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
}
static void toshiba_nand_decode_id(struct nand_chip *chip)
{
+ struct nand_device *base = &chip->base;
+ struct nand_ecc_props requirements = {};
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
@@ -175,23 +177,25 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
* - 24nm: 8 bit ECC for each 512Byte is required.
*/
if (chip->id.len >= 6 && nand_is_slc(chip)) {
- chip->base.eccreq.step_size = 512;
+ requirements.step_size = 512;
switch (chip->id.data[5] & 0x7) {
case 0x4:
- chip->base.eccreq.strength = 1;
+ requirements.strength = 1;
break;
case 0x5:
- chip->base.eccreq.strength = 4;
+ requirements.strength = 4;
break;
case 0x6:
- chip->base.eccreq.strength = 8;
+ requirements.strength = 8;
break;
default:
WARN(1, "Could not get ECC info");
- chip->base.eccreq.step_size = 0;
+ requirements.step_size = 0;
break;
}
}
+
+ nanddev_set_ecc_requirements(base, &requirements);
}
static int
@@ -273,7 +277,8 @@ static int toshiba_nand_init(struct nand_chip *chip)
chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
/* Check that chip is BENAND and ECC mode is on-die */
- if (nand_is_slc(chip) && chip->ecc.mode == NAND_ECC_ON_DIE &&
+ if (nand_is_slc(chip) &&
+ chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND)
toshiba_nand_benand_init(chip);
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index f5a53aac3c5f..a8048cb8d220 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -2234,8 +2234,8 @@ static int ns_attach_chip(struct nand_chip *chip)
return -EINVAL;
}
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_BCH;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
chip->ecc.size = 512;
chip->ecc.strength = bch;
chip->ecc.bytes = eccbytes;
@@ -2274,8 +2274,8 @@ static int __init ns_init_module(void)
nsmtd = nand_to_mtd(chip);
nand_set_controller_data(chip, (void *)ns);
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
/* and 'badblocks' parameters to work */
chip->options |= NAND_SKIP_BBTSCAN;
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index ed38338c1383..0fb4ba93c41e 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -149,7 +149,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->ecc.correct = nand_correct_data;
chip->ecc.hwctl = ndfc_enable_hwecc;
chip->ecc.calculate = ndfc_calculate_ecc;
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 256;
chip->ecc.bytes = 3;
chip->ecc.strength = 1;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index eb7fcfd9276b..512f60780a50 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -884,8 +884,8 @@ static int omap_correct_data(struct nand_chip *chip, u_char *dat,
int stat = 0;
/* Ex NAND_ECC_HW12_2048 */
- if ((info->nand.ecc.mode == NAND_ECC_HW) &&
- (info->nand.ecc.size == 2048))
+ if (info->nand.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
+ info->nand.ecc.size == 2048)
blockCnt = 4;
else
blockCnt = 1;
@@ -2006,12 +2006,12 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
return -EINVAL;
/*
- * Bail out earlier to let NAND_ECC_SOFT code create its own
+ * Bail out earlier to let NAND_ECC_ENGINE_TYPE_SOFT code create its own
* ooblayout instead of using ours.
*/
if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
return 0;
}
@@ -2019,7 +2019,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
switch (info->ecc_opt) {
case OMAP_ECC_HAM1_CODE_HW:
dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.bytes = 3;
chip->ecc.size = 512;
chip->ecc.strength = 1;
@@ -2036,7 +2036,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
chip->ecc.bytes = 7;
chip->ecc.strength = 4;
@@ -2056,7 +2056,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case OMAP_ECC_BCH4_CODE_HW:
pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
/* 14th bit is kept reserved for ROM-code compatibility */
chip->ecc.bytes = 7 + 1;
@@ -2078,7 +2078,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
chip->ecc.bytes = 13;
chip->ecc.strength = 8;
@@ -2098,7 +2098,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case OMAP_ECC_BCH8_CODE_HW:
pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
/* 14th bit is kept reserved for ROM-code compatibility */
chip->ecc.bytes = 13 + 1;
@@ -2121,7 +2121,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case OMAP_ECC_BCH16_CODE_HW:
pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
chip->ecc.bytes = 26;
chip->ecc.strength = 16;
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index 880b54ca1b41..df9c0f8e4b4e 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -139,8 +139,8 @@ static int __init orion_nand_probe(struct platform_device *pdev)
nc->legacy.IO_ADDR_R = nc->legacy.IO_ADDR_W = io_base;
nc->legacy.cmd_ctrl = orion_nand_cmd_ctrl;
nc->legacy.read_buf = orion_nand_read_buf;
- nc->ecc.mode = NAND_ECC_SOFT;
- nc->ecc.algo = NAND_ECC_HAMMING;
+ nc->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ nc->ecc.algo = NAND_ECC_ALGO_HAMMING;
if (board->chip_delay)
nc->legacy.chip_delay = board->chip_delay;
diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
index 8d0d76ad319d..f44947043e5a 100644
--- a/drivers/mtd/nand/raw/oxnas_nand.c
+++ b/drivers/mtd/nand/raw/oxnas_nand.c
@@ -144,8 +144,7 @@ static int oxnas_nand_probe(struct platform_device *pdev)
if (err)
goto err_cleanup_nand;
- oxnas->chips[oxnas->nchips] = chip;
- ++oxnas->nchips;
+ oxnas->chips[oxnas->nchips++] = chip;
}
/* Exit if no chips found */
diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
index d8eca8c3fdcd..2b8f155cc0c5 100644
--- a/drivers/mtd/nand/raw/pasemi_nand.c
+++ b/drivers/mtd/nand/raw/pasemi_nand.c
@@ -68,7 +68,7 @@ static void pasemi_hwcontrol(struct nand_chip *chip, int cmd,
inl(lpcctl);
}
-int pasemi_device_ready(struct nand_chip *chip)
+static int pasemi_device_ready(struct nand_chip *chip)
{
return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
}
@@ -132,8 +132,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
chip->legacy.read_buf = pasemi_read_buf;
chip->legacy.write_buf = pasemi_write_buf;
chip->legacy.chip_delay = 0;
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
/* Enable the following for a flash based bad block table */
chip->bbt_options = NAND_BBT_USE_FLASH;
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index 556182f26057..b98c0d5c413f 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -66,8 +66,8 @@ static int plat_nand_probe(struct platform_device *pdev)
data->chip.options |= pdata->chip.options;
data->chip.bbt_options |= pdata->chip.bbt_options;
- data->chip.ecc.mode = NAND_ECC_SOFT;
- data->chip.ecc.algo = NAND_ECC_HAMMING;
+ data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ data->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
platform_set_drvdata(pdev, data);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index bd7a7251429b..777fb0de0680 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2550,7 +2550,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
ecc->write_page_raw = qcom_nandc_write_page_raw;
ecc->write_oob = qcom_nandc_write_oob;
- ecc->mode = NAND_ECC_HW;
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
@@ -2702,10 +2702,8 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
if (IS_ERR(nandc->tx_chan)) {
ret = PTR_ERR(nandc->tx_chan);
nandc->tx_chan = NULL;
- if (ret != -EPROBE_DEFER)
- dev_err(nandc->dev,
- "tx DMA channel request failed: %d\n",
- ret);
+ dev_err_probe(nandc->dev, ret,
+ "tx DMA channel request failed\n");
goto unalloc;
}
@@ -2713,10 +2711,8 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
if (IS_ERR(nandc->rx_chan)) {
ret = PTR_ERR(nandc->rx_chan);
nandc->rx_chan = NULL;
- if (ret != -EPROBE_DEFER)
- dev_err(nandc->dev,
- "rx DMA channel request failed: %d\n",
- ret);
+ dev_err_probe(nandc->dev, ret,
+ "rx DMA channel request failed\n");
goto unalloc;
}
@@ -2724,10 +2720,8 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
if (IS_ERR(nandc->cmd_chan)) {
ret = PTR_ERR(nandc->cmd_chan);
nandc->cmd_chan = NULL;
- if (ret != -EPROBE_DEFER)
- dev_err(nandc->dev,
- "cmd DMA channel request failed: %d\n",
- ret);
+ dev_err_probe(nandc->dev, ret,
+ "cmd DMA channel request failed\n");
goto unalloc;
}
@@ -2750,10 +2744,8 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
if (IS_ERR(nandc->chan)) {
ret = PTR_ERR(nandc->chan);
nandc->chan = NULL;
- if (ret != -EPROBE_DEFER)
- dev_err(nandc->dev,
- "rxtx DMA channel request failed: %d\n",
- ret);
+ dev_err_probe(nandc->dev, ret,
+ "rxtx DMA channel request failed\n");
return ret;
}
}
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index f865e3a47b01..6b7addd2c420 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -859,7 +859,8 @@ static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
chip->legacy.write_buf = r852_write_buf;
/* ecc */
- chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
chip->ecc.size = R852_DMA_LEN;
chip->ecc.bytes = SM_OOB_SIZE;
chip->ecc.strength = 2;
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index 105522205979..fbd0fa48e063 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -904,7 +904,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
nmtd->info = info;
nmtd->set = set;
- chip->ecc.mode = info->platform->ecc_mode;
+ chip->ecc.engine_type = info->platform->engine_type;
/*
* If you use u-boot BBT creation code, specifying this flag will
@@ -929,24 +929,24 @@ static int s3c2410_nand_attach_chip(struct nand_chip *chip)
struct mtd_info *mtd = nand_to_mtd(chip);
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- switch (chip->ecc.mode) {
+ switch (chip->ecc.engine_type) {
- case NAND_ECC_NONE:
+ case NAND_ECC_ENGINE_TYPE_NONE:
dev_info(info->device, "ECC disabled\n");
break;
- case NAND_ECC_SOFT:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
/*
- * This driver expects Hamming based ECC when ecc_mode is set
- * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
- * avoid adding an extra ecc_algo field to
- * s3c2410_platform_nand.
+ * This driver expects Hamming based ECC when engine_type is set
+ * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
+ * NAND_ECC_ALGO_HAMMING to avoid adding an extra ecc_algo field
+ * to s3c2410_platform_nand.
*/
- chip->ecc.algo = NAND_ECC_HAMMING;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
dev_info(info->device, "soft ECC\n");
break;
- case NAND_ECC_HW:
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
chip->ecc.calculate = s3c2410_nand_calculate_ecc;
chip->ecc.correct = s3c2410_nand_correct_data;
chip->ecc.strength = 1;
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index a661b8bb2dd5..13df4bdf792a 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1039,13 +1039,13 @@ static int flctl_chip_attach_chip(struct nand_chip *chip)
chip->ecc.strength = 4;
chip->ecc.read_page = flctl_read_page_hwecc;
chip->ecc.write_page = flctl_write_page_hwecc;
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
/* 4 symbols ECC enabled */
flctl->flcmncr_base |= _4ECCEN;
} else {
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
}
return 0;
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index 51286f7acf54..1327bfb3d5d3 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -157,7 +157,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
/* 15 us command delay time */
this->legacy.chip_delay = 15;
/* set eccmode using hardware ECC */
- this->ecc.mode = NAND_ECC_HW;
+ this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
this->ecc.size = 256;
this->ecc.bytes = 3;
this->ecc.strength = 1;
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
index 243b34cfbc1b..0f63ff6f7fe7 100644
--- a/drivers/mtd/nand/raw/socrates_nand.c
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -153,8 +153,9 @@ static int socrates_nand_probe(struct platform_device *ofdev)
nand_chip->legacy.read_buf = socrates_nand_read_buf;
nand_chip->legacy.dev_ready = socrates_nand_device_ready;
- nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
- nand_chip->ecc.algo = NAND_ECC_HAMMING;
+ /* enable ECC */
+ nand_chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ nand_chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
/* TODO: I have no idea what real delay is. */
nand_chip->legacy.chip_delay = 20; /* 20us command delay time */
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 7f4546ae9130..550bda4d1415 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1696,17 +1696,25 @@ static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
int ret;
/*
- * Only NAND_ECC_HW mode is actually supported
+ * Only NAND_ECC_ENGINE_TYPE_ON_HOST mode is actually supported
* Hamming => ecc.strength = 1
* BCH4 => ecc.strength = 4
* BCH8 => ecc.strength = 8
* ECC sector size = 512
*/
- if (chip->ecc.mode != NAND_ECC_HW) {
- dev_err(nfc->dev, "nand_ecc_mode is not well defined in the DT\n");
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) {
+ dev_err(nfc->dev,
+ "nand_ecc_engine_type is not well defined in the DT\n");
return -EINVAL;
}
+ /* Default ECC settings in case they are not set in the device tree */
+ if (!chip->ecc.size)
+ chip->ecc.size = FMC2_ECC_STEP_SIZE;
+
+ if (!chip->ecc.strength)
+ chip->ecc.strength = FMC2_ECC_BCH8;
+
ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps,
mtd->oobsize - FMC2_BBM_LEN);
if (ret) {
@@ -1726,8 +1734,7 @@ static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops);
- if (chip->options & NAND_BUSWIDTH_16)
- stm32_fmc2_nfc_set_buswidth_16(nfc, true);
+ stm32_fmc2_nfc_setup(chip);
return 0;
}
@@ -1762,7 +1769,7 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
return ret;
}
- if (cs > FMC2_MAX_CE) {
+ if (cs >= FMC2_MAX_CE) {
dev_err(nfc->dev, "invalid reg value: %d\n", cs);
return -EINVAL;
}
@@ -1951,11 +1958,6 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
NAND_USES_DMA;
- /* Default ECC settings */
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.size = FMC2_ECC_STEP_SIZE;
- chip->ecc.strength = FMC2_ECC_BCH8;
-
/* Scan to find existence of the device */
ret = nand_scan(chip, nand->ncs);
if (ret)
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 9c50c2b965e1..2a7ca3072f35 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -1575,7 +1575,7 @@ static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
* only have 2 bytes available in the first user data
* section.
*/
- if (!section && ecc->mode == NAND_ECC_HW) {
+ if (!section && ecc->engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
oobregion->offset = 2;
oobregion->length = 2;
@@ -1609,12 +1609,13 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_device *nanddev = mtd_to_nanddev(mtd);
struct sunxi_nand_hw_ecc *data;
int nsectors;
int ret;
int i;
- if (ecc->options & NAND_ECC_MAXIMIZE) {
+ if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
int bytes;
ecc->size = 1024;
@@ -1720,11 +1721,11 @@ err:
static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
{
- switch (ecc->mode) {
- case NAND_ECC_HW:
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
sunxi_nand_hw_ecc_ctrl_cleanup(ecc);
break;
- case NAND_ECC_NONE:
+ case NAND_ECC_ENGINE_TYPE_NONE:
default:
break;
}
@@ -1732,6 +1733,8 @@ static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
static int sunxi_nand_attach_chip(struct nand_chip *nand)
{
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&nand->base);
struct nand_ecc_ctrl *ecc = &nand->ecc;
struct device_node *np = nand_get_flash_node(nand);
int ret;
@@ -1745,21 +1748,21 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand)
nand->options |= NAND_SUBPAGE_READ;
if (!ecc->size) {
- ecc->size = nand->base.eccreq.step_size;
- ecc->strength = nand->base.eccreq.strength;
+ ecc->size = requirements->step_size;
+ ecc->strength = requirements->strength;
}
if (!ecc->size || !ecc->strength)
return -EINVAL;
- switch (ecc->mode) {
- case NAND_ECC_HW:
+ switch (ecc->engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
ret = sunxi_nand_hw_ecc_ctrl_init(nand, ecc, np);
if (ret)
return ret;
break;
- case NAND_ECC_NONE:
- case NAND_ECC_SOFT:
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
break;
default:
return -EINVAL;
@@ -1991,7 +1994,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
* Set the ECC mode to the default value in case nothing is specified
* in the DT.
*/
- nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
nand_set_flash_node(nand, np);
mtd = nand_to_mtd(nand);
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index bdb965ae7a4a..359187b5a4be 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -549,8 +549,8 @@ static int tango_attach_chip(struct nand_chip *chip)
{
struct nand_ecc_ctrl *ecc = &chip->ecc;
- ecc->mode = NAND_ECC_HW;
- ecc->algo = NAND_ECC_BCH;
+ ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ ecc->algo = NAND_ECC_ALGO_BCH;
ecc->bytes = DIV_ROUND_UP(ecc->strength * FIELD_ORDER, BITS_PER_BYTE);
ecc->read_page_raw = tango_read_page_raw;
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 6b6212ffa01c..fbf67722a049 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -479,7 +479,7 @@ static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
{
struct tegra_nand_chip *nand = to_tegra_chip(chip);
- if (chip->ecc.algo == NAND_ECC_BCH && enable)
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH && enable)
writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
else
writel_relaxed(0, ctrl->regs + BCH_CONFIG);
@@ -840,7 +840,10 @@ static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
int strength_len, int bits_per_step,
int oobsize)
{
- bool maximize = chip->ecc.options & NAND_ECC_MAXIMIZE;
+ struct nand_device *base = mtd_to_nanddev(nand_to_mtd(chip));
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(base);
+ bool maximize = base->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH;
int i;
/*
@@ -855,7 +858,7 @@ static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
} else {
strength_sel = strength[i];
- if (strength_sel < chip->base.eccreq.strength)
+ if (strength_sel < requirements->strength)
continue;
}
@@ -877,7 +880,7 @@ static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
int strength_len, bits_per_step;
switch (chip->ecc.algo) {
- case NAND_ECC_RS:
+ case NAND_ECC_ALGO_RS:
bits_per_step = BITS_PER_STEP_RS;
if (chip->options & NAND_IS_BOOT_MEDIUM) {
strength = rs_strength_bootable;
@@ -887,7 +890,7 @@ static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
strength_len = ARRAY_SIZE(rs_strength);
}
break;
- case NAND_ECC_BCH:
+ case NAND_ECC_ALGO_BCH:
bits_per_step = BITS_PER_STEP_BCH;
if (chip->options & NAND_IS_BOOT_MEDIUM) {
strength = bch_strength_bootable;
@@ -908,6 +911,8 @@ static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
static int tegra_nand_attach_chip(struct nand_chip *chip)
{
struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+ const struct nand_ecc_props *requirements =
+ nanddev_get_ecc_requirements(&chip->base);
struct tegra_nand_chip *nand = to_tegra_chip(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
int bits_per_step;
@@ -916,12 +921,12 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.size = 512;
chip->ecc.steps = mtd->writesize / chip->ecc.size;
- if (chip->base.eccreq.step_size != 512) {
+ if (requirements->step_size != 512) {
dev_err(ctrl->dev, "Unsupported step size %d\n",
- chip->base.eccreq.step_size);
+ requirements->step_size);
return -EINVAL;
}
@@ -935,14 +940,14 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
if (chip->options & NAND_BUSWIDTH_16)
nand->config |= CONFIG_BUS_WIDTH_16;
- if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
+ if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
if (mtd->writesize < 2048)
- chip->ecc.algo = NAND_ECC_RS;
+ chip->ecc.algo = NAND_ECC_ALGO_RS;
else
- chip->ecc.algo = NAND_ECC_BCH;
+ chip->ecc.algo = NAND_ECC_ALGO_BCH;
}
- if (chip->ecc.algo == NAND_ECC_BCH && mtd->writesize < 2048) {
+ if (chip->ecc.algo == NAND_ECC_ALGO_BCH && mtd->writesize < 2048) {
dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
return -EINVAL;
}
@@ -952,7 +957,7 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
if (ret < 0) {
dev_err(ctrl->dev,
"No valid strength found, minimum %d\n",
- chip->base.eccreq.strength);
+ requirements->strength);
return ret;
}
@@ -963,7 +968,7 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
CONFIG_SKIP_SPARE_SIZE_4;
switch (chip->ecc.algo) {
- case NAND_ECC_RS:
+ case NAND_ECC_ALGO_RS:
bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
@@ -984,7 +989,7 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
return -EINVAL;
}
break;
- case NAND_ECC_BCH:
+ case NAND_ECC_ALGO_BCH:
bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
nand->bch_config = BCH_ENABLE;
@@ -1013,7 +1018,7 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
}
dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
- chip->ecc.algo == NAND_ECC_BCH ? "BCH" : "RS",
+ chip->ecc.algo == NAND_ECC_ALGO_BCH ? "BCH" : "RS",
chip->ecc.strength);
chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index 843a8683b737..235a2f7b1bad 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -410,7 +410,7 @@ static int tmio_probe(struct platform_device *dev)
nand_chip->legacy.read_buf = tmio_nand_read_buf;
/* set eccmode using hardware ECC */
- nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
nand_chip->ecc.size = 512;
nand_chip->ecc.bytes = 6;
nand_chip->ecc.strength = 2;
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index 47d966871445..ef81dce6b5c4 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -329,7 +329,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
chip->ecc.calculate = txx9ndfmc_calculate_ecc;
chip->ecc.correct = txx9ndfmc_correct_data;
chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
- chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
chip->ecc.strength = 1;
chip->legacy.chip_delay = 100;
chip->controller = &drvdata->controller;
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index 7248c5901183..40d70f991d89 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -323,11 +323,6 @@ static inline void vf610_nfc_ecc_mode(struct vf610_nfc *nfc, int ecc_mode)
CONFIG_ECC_MODE_SHIFT, ecc_mode);
}
-static inline void vf610_nfc_transfer_size(struct vf610_nfc *nfc, int size)
-{
- vf610_nfc_write(nfc, NFC_SECTOR_SIZE, size);
-}
-
static inline void vf610_nfc_run(struct vf610_nfc *nfc, u32 col, u32 row,
u32 cmd1, u32 cmd2, u32 trfr_sz)
{
@@ -732,7 +727,7 @@ static void vf610_nfc_init_controller(struct vf610_nfc *nfc)
else
vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
- if (nfc->chip.ecc.mode == NAND_ECC_HW) {
+ if (nfc->chip.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
/* Set ECC status offset in SRAM */
vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
CONFIG_ECC_SRAM_ADDR_MASK,
@@ -761,7 +756,7 @@ static int vf610_nfc_attach_chip(struct nand_chip *chip)
return -ENXIO;
}
- if (chip->ecc.mode != NAND_ECC_HW)
+ if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
return 0;
if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
@@ -779,7 +774,7 @@ static int vf610_nfc_attach_chip(struct nand_chip *chip)
mtd->oobsize = 64;
/* Use default large page ECC layout defined in NAND core */
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
if (chip->ecc.strength == 32) {
nfc->ecc_mode = ECC_60_BYTE;
chip->ecc.bytes = 60;
@@ -852,8 +847,10 @@ static int vf610_nfc_probe(struct platform_device *pdev)
}
of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
- if (!of_id)
- return -ENODEV;
+ if (!of_id) {
+ err = -ENODEV;
+ goto err_disable_clk;
+ }
nfc->variant = (enum vf610_nfc_variant)of_id->data;
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index 29255476afdb..f2dbd63a5c1f 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -180,8 +180,8 @@ static int xway_nand_probe(struct platform_device *pdev)
data->chip.legacy.read_byte = xway_read_byte;
data->chip.legacy.chip_delay = 30;
- data->chip.ecc.mode = NAND_ECC_SOFT;
- data->chip.ecc.algo = NAND_ECC_HAMMING;
+ data->chip.ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+ data->chip.ecc.algo = NAND_ECC_ALGO_HAMMING;
platform_set_drvdata(pdev, data);
nand_set_controller_data(&data->chip, data);
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index e2c382ffc5b6..c35221794645 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -419,7 +419,7 @@ static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
* fixed, so let's return the maximum possible value so that
* wear-leveling layers move the data immediately.
*/
- return nand->eccreq.strength;
+ return nanddev_get_ecc_conf(nand)->strength;
case STATUS_ECC_UNCOR_ERROR:
return -EBADMSG;
@@ -497,7 +497,7 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
mutex_lock(&spinand->lock);
- nanddev_io_for_each_page(nand, from, ops, &iter) {
+ nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
@@ -545,7 +545,7 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
mutex_lock(&spinand->lock);
- nanddev_io_for_each_page(nand, to, ops, &iter) {
+ nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
ret = spinand_select_target(spinand, iter.req.pos.target);
if (ret)
break;
@@ -902,7 +902,7 @@ int spinand_match_and_init(struct spinand_device *spinand,
continue;
nand->memorg = table[i].memorg;
- nand->eccreq = table[i].eccreq;
+ nanddev_set_ecc_requirements(nand, &table[i].eccreq);
spinand->eccinfo = table[i].eccinfo;
spinand->flags = table[i].flags;
spinand->id.len = 1 + table[i].devid.len;
@@ -1090,8 +1090,8 @@ static int spinand_init(struct spinand_device *spinand)
mtd->oobavail = ret;
/* Propagate ECC information to mtd_info */
- mtd->ecc_strength = nand->eccreq.strength;
- mtd->ecc_step_size = nand->eccreq.step_size;
+ mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
+ mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
return 0;
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index d219c970042a..33c67403c4aa 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -21,7 +21,7 @@
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
@@ -29,7 +29,7 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
@@ -132,6 +132,35 @@ static const struct mtd_ooblayout_ops gd5fxgq4_variant2_ooblayout = {
.free = gd5fxgq4_variant2_ooblayout_free,
};
+static int gd5fxgq4xc_ooblayout_256_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 128;
+ oobregion->length = 128;
+
+ return 0;
+}
+
+static int gd5fxgq4xc_ooblayout_256_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 1;
+ oobregion->length = 127;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops gd5fxgq4xc_oob_256_ops = {
+ .ecc = gd5fxgq4xc_ooblayout_256_ecc,
+ .free = gd5fxgq4xc_ooblayout_256_free,
+};
+
static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
u8 status)
{
@@ -202,7 +231,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- 0,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F2GQ4xA",
@@ -212,7 +241,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- 0,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
SPINAND_INFO("GD5F4GQ4xA",
@@ -222,9 +251,29 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- 0,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F4GQ4RC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xa4, 0x68),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4xc_oob_256_ops,
+ gd5fxgq4ufxxg_ecc_get_status)),
+ SPINAND_INFO("GD5F4GQ4UC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE, 0xb4, 0x68),
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgq4xc_oob_256_ops,
+ gd5fxgq4ufxxg_ecc_get_status)),
SPINAND_INFO("GD5F1GQ4UExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -232,7 +281,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
&write_cache_variants,
&update_cache_variants),
- 0,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
SPINAND_INFO("GD5F1GQ4UFxxG",
@@ -242,7 +291,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
&write_cache_variants,
&update_cache_variants),
- 0,
+ SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
gd5fxgq4ufxxg_ecc_get_status)),
};
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 0f900f3aa21a..8e801e4c3a00 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -84,10 +84,11 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
* data around if it's not necessary.
*/
if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr))
- return nand->eccreq.strength;
+ return nanddev_get_ecc_conf(nand)->strength;
- if (WARN_ON(eccsr > nand->eccreq.strength || !eccsr))
- return nand->eccreq.strength;
+ if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength ||
+ !eccsr))
+ return nanddev_get_ecc_conf(nand)->strength;
return eccsr;
@@ -118,6 +119,26 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_INFO("MX31LF1GE4BC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0 /*SPINAND_HAS_QE_BIT*/,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX31UF1GE4BC",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9e),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0 /*SPINAND_HAS_QE_BIT*/,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
};
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index bc801d83343e..21fde2875674 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -90,12 +90,12 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
* data around if it's not necessary.
*/
if (spi_mem_exec_op(spinand->spimem, &op))
- return nand->eccreq.strength;
+ return nanddev_get_ecc_conf(nand)->strength;
mbf >>= 4;
- if (WARN_ON(mbf > nand->eccreq.strength || !mbf))
- return nand->eccreq.strength;
+ if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
+ return nanddev_get_ecc_conf(nand)->strength;
return mbf;
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index f98363c9b363..e72354322f62 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -12,7 +12,7 @@ config MTD_BCM47XX_PARTS
boards.
config MTD_BCM63XX_PARTS
- tristate "BCM63XX CFE partitioning parser"
+ bool "BCM63XX CFE partitioning parser"
depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
select CRC32
select MTD_PARSER_IMAGETAG
diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
index c72aa1ab71ad..555fe55d14ae 100644
--- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c
@@ -73,6 +73,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 0369d98b2d12..f0ae7a01703a 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -2701,11 +2701,10 @@ static void spi_nor_sfdp_init_params(struct spi_nor *nor)
memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
- if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+ if (spi_nor_parse_sfdp(nor, nor->params)) {
+ memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
nor->addr_width = 0;
nor->flags &= ~SNOR_F_4B_OPCODES;
- } else {
- memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
}
}
@@ -3009,13 +3008,15 @@ static int spi_nor_set_addr_width(struct spi_nor *nor)
/* already configured from SFDP */
} else if (nor->info->addr_width) {
nor->addr_width = nor->info->addr_width;
- } else if (nor->mtd.size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
} else {
nor->addr_width = 3;
}
+ if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ }
+
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
dev_dbg(nor->dev, "address width is too large: %u\n",
nor->addr_width);
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index f97f3d127575..9203abaac229 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -50,7 +50,7 @@ static const struct flash_info macronix_parts[] = {
{ "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) },
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
- { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, SECT_4K) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ |
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 6dcde15fb1aa..e5dfa786f190 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -63,6 +63,15 @@ static const struct flash_info winbond_parts[] = {
{ "w25q32jwm", INFO(0xef8016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
+ { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 42cac572f82d..7847de75a74c 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1639,6 +1639,19 @@ int ubi_thread(void *u)
!ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Check kthread_should_stop() after we set the task
+ * state to guarantee that we either see the stop bit
+ * and exit or the task state is reset to runnable such
+ * that it's not scheduled out indefinitely and detects
+ * the stop bit at kthread_should_stop().
+ */
+ if (kthread_should_stop()) {
+ set_current_state(TASK_RUNNING);
+ break;
+ }
+
schedule();
continue;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1368d1d6a114..c3dbe64e628e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -473,6 +473,10 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/mdio/Kconfig"
+
+source "drivers/net/pcs/Kconfig"
+
source "drivers/net/plip/Kconfig"
source "drivers/net/ppp/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 94b60800887a..72e18d505d1a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -21,6 +21,8 @@ obj-$(CONFIG_MDIO) += mdio.o
obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-y += phy/
+obj-y += mdio/
+obj-y += pcs/
obj-$(CONFIG_RIONET) += rionet.o
obj-$(CONFIG_NET_TEAM) += team/
obj-$(CONFIG_TUN) += tun.o
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index d4f22a2e5be4..43918398f0d3 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -48,7 +48,7 @@ config LTPC
If you are in doubt, this card is the one with the 65C02 chip on it.
You also need version 1.3.3 or later of the netatalk package.
This driver is experimental, which means that it may not work.
- See the file <file:Documentation/networking/ltpc.rst>.
+ See the file <file:Documentation/networking/device_drivers/appletalk/ltpc.rst>.
config COPS
tristate "COPS LocalTalk PC support"
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 1c6c27f35ac4..ba8e70a8e312 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -70,6 +70,8 @@ static const char *version =
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <net/Space.h>
+
#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 75a5a9b87c5a..c6f73aa3700c 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -229,6 +229,8 @@ static int dma;
#include <linux/bitops.h>
#include <linux/gfp.h>
+#include <net/Space.h>
+
#include <asm/dma.h>
#include <asm/io.h>
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 841910f1db65..ff0bea1554f9 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -54,7 +54,6 @@ struct bareudp_dev {
static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct metadata_dst *tun_dst = NULL;
- struct pcpu_sw_netstats *stats;
struct bareudp_dev *bareudp;
unsigned short family;
unsigned int len;
@@ -160,13 +159,9 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
len = skb->len;
err = gro_cells_receive(&bareudp->gro_cells, skb);
- if (likely(err == NET_RX_SUCCESS)) {
- stats = this_cpu_ptr(bareudp->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += len;
- u64_stats_update_end(&stats->syncp);
- }
+ if (likely(err == NET_RX_SUCCESS))
+ dev_sw_netstats_rx_add(bareudp->dev, len);
+
return 0;
drop:
/* Consume bad packet */
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index c245edddb48b..a77124bc1f4b 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -20,25 +20,6 @@ config CAIF_TTY
identified as N_CAIF. When this ldisc is opened from user space
it will redirect the TTY's traffic into the CAIF stack.
-config CAIF_SPI_SLAVE
- tristate "CAIF SPI transport driver for slave interface"
- depends on CAIF && HAS_DMA
- default n
- help
- The CAIF Link layer SPI Protocol driver for Slave SPI interface.
- This driver implements a platform driver to accommodate for a
- platform specific SPI device. A sample CAIF SPI Platform device is
- provided in <file:Documentation/networking/caif/spi_porting.rst>.
-
-config CAIF_SPI_SYNC
- bool "Next command and length in start of frame"
- depends on CAIF_SPI_SLAVE
- default n
- help
- Putting the next command and length in the start of the frame can
- help to synchronize to the next transfer in case of over or under-runs.
- This option also needs to be enabled on the modem.
-
config CAIF_HSI
tristate "CAIF HSI transport driver"
depends on CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 54ae1165d60a..b1918c8c126c 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -4,10 +4,6 @@ ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
# Serial interface
obj-$(CONFIG_CAIF_TTY) += caif_serial.o
-# SPI slave physical interfaces module
-cfspi_slave-objs := caif_spi.o caif_spi_slave.o
-obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
-
# HSI interface
obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 4a33ec4fc089..3d63b15bbaa1 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -458,15 +458,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
skb_reset_mac_header(skb);
skb->dev = cfhsi->ndev;
- /*
- * We are in a callback handler and
- * unfortunately we don't know what context we're
- * running in.
- */
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
+ netif_rx_any_context(skb);
/* Update network statistics. */
cfhsi->ndev->stats.rx_packets++;
@@ -587,14 +579,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
skb_reset_mac_header(skb);
skb->dev = cfhsi->ndev;
- /*
- * We're called in callback from HSI
- * and don't know the context we're running in.
- */
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
+ netif_rx_any_context(skb);
/* Update network statistics. */
cfhsi->ndev->stats.rx_packets++;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
deleted file mode 100644
index 7d5899626130..000000000000
--- a/drivers/net/caif/caif_spi.c
+++ /dev/null
@@ -1,874 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <linux/workqueue.h>
-#include <linux/completion.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/debugfs.h>
-#include <linux/if_arp.h>
-#include <net/caif/caif_layer.h>
-#include <net/caif/caif_spi.h>
-
-#ifndef CONFIG_CAIF_SPI_SYNC
-#define FLAVOR "Flavour: Vanilla.\n"
-#else
-#define FLAVOR "Flavour: Master CMD&LEN at start.\n"
-#endif /* CONFIG_CAIF_SPI_SYNC */
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Daniel Martensson");
-MODULE_DESCRIPTION("CAIF SPI driver");
-
-/* Returns the number of padding bytes for alignment. */
-#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
-
-static bool spi_loop;
-module_param(spi_loop, bool, 0444);
-MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
-
-/* SPI frame alignment. */
-module_param(spi_frm_align, int, 0444);
-MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
-
-/*
- * SPI padding options.
- * Warning: must be a base of 2 (& operation used) and can not be zero !
- */
-module_param(spi_up_head_align, int, 0444);
-MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
-
-module_param(spi_up_tail_align, int, 0444);
-MODULE_PARM_DESC(spi_up_tail_align, "SPI uplink tail alignment.");
-
-module_param(spi_down_head_align, int, 0444);
-MODULE_PARM_DESC(spi_down_head_align, "SPI downlink head alignment.");
-
-module_param(spi_down_tail_align, int, 0444);
-MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
-
-#ifdef CONFIG_ARM
-#define BYTE_HEX_FMT "%02X"
-#else
-#define BYTE_HEX_FMT "%02hhX"
-#endif
-
-#define SPI_MAX_PAYLOAD_SIZE 4096
-/*
- * Threshold values for the SPI packet queue. Flowcontrol will be asserted
- * when the number of packets exceeds HIGH_WATER_MARK. It will not be
- * deasserted before the number of packets drops below LOW_WATER_MARK.
- */
-#define LOW_WATER_MARK 100
-#define HIGH_WATER_MARK (LOW_WATER_MARK*5)
-
-#ifndef CONFIG_HAS_DMA
-
-/*
- * We sometimes use UML for debugging, but it cannot handle
- * dma_alloc_coherent so we have to wrap it.
- */
-static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
-{
- return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
-}
-
-static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
- dma_addr_t handle)
-{
- kfree(cpu_addr);
-}
-
-#else
-
-static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
-{
- return dma_alloc_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, daddr,
- GFP_KERNEL);
-}
-
-static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
- dma_addr_t handle)
-{
- dma_free_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, cpu_addr, handle);
-}
-#endif /* CONFIG_HAS_DMA */
-
-#ifdef CONFIG_DEBUG_FS
-
-#define DEBUGFS_BUF_SIZE 4096
-
-static struct dentry *dbgfs_root;
-
-static inline void driver_debugfs_create(void)
-{
- dbgfs_root = debugfs_create_dir(cfspi_spi_driver.driver.name, NULL);
-}
-
-static inline void driver_debugfs_remove(void)
-{
- debugfs_remove(dbgfs_root);
-}
-
-static inline void dev_debugfs_rem(struct cfspi *cfspi)
-{
- debugfs_remove(cfspi->dbgfs_frame);
- debugfs_remove(cfspi->dbgfs_state);
- debugfs_remove(cfspi->dbgfs_dir);
-}
-
-static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char *buf;
- int len = 0;
- ssize_t size;
- struct cfspi *cfspi = file->private_data;
-
- buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- return 0;
-
- /* Print out debug information. */
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "CAIF SPI debug information:\n");
-
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
-
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "STATE: %d\n", cfspi->dbg_state);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Previous CMD: 0x%x\n", cfspi->pcmd);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Current CMD: 0x%x\n", cfspi->cmd);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Previous TX len: %d\n", cfspi->tx_ppck_len);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Previous RX len: %d\n", cfspi->rx_ppck_len);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Current TX len: %d\n", cfspi->tx_cpck_len);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Current RX len: %d\n", cfspi->rx_cpck_len);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Next TX len: %d\n", cfspi->tx_npck_len);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Next RX len: %d\n", cfspi->rx_npck_len);
-
- if (len > DEBUGFS_BUF_SIZE)
- len = DEBUGFS_BUF_SIZE;
-
- size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return size;
-}
-
-static ssize_t print_frame(char *buf, size_t size, char *frm,
- size_t count, size_t cut)
-{
- int len = 0;
- int i;
- for (i = 0; i < count; i++) {
- len += scnprintf((buf + len), (size - len),
- "[0x" BYTE_HEX_FMT "]",
- frm[i]);
- if ((i == cut) && (count > (cut * 2))) {
- /* Fast forward. */
- i = count - cut;
- len += scnprintf((buf + len), (size - len),
- "--- %zu bytes skipped ---\n",
- count - (cut * 2));
- }
-
- if ((!(i % 10)) && i) {
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "\n");
- }
- }
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
- return len;
-}
-
-static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char *buf;
- int len = 0;
- ssize_t size;
- struct cfspi *cfspi;
-
- cfspi = file->private_data;
- buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- return 0;
-
- /* Print out debug information. */
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Current frame:\n");
-
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
-
- len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
- cfspi->xfer.va_tx[0],
- (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
-
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Rx data (Len: %d):\n", cfspi->rx_cpck_len);
-
- len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
- cfspi->xfer.va_rx,
- (cfspi->rx_cpck_len + SPI_CMD_SZ), 100);
-
- size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return size;
-}
-
-static const struct file_operations dbgfs_state_fops = {
- .open = simple_open,
- .read = dbgfs_state,
- .owner = THIS_MODULE
-};
-
-static const struct file_operations dbgfs_frame_fops = {
- .open = simple_open,
- .read = dbgfs_frame,
- .owner = THIS_MODULE
-};
-
-static inline void dev_debugfs_add(struct cfspi *cfspi)
-{
- cfspi->dbgfs_dir = debugfs_create_dir(cfspi->pdev->name, dbgfs_root);
- cfspi->dbgfs_state = debugfs_create_file("state", 0444,
- cfspi->dbgfs_dir, cfspi,
- &dbgfs_state_fops);
- cfspi->dbgfs_frame = debugfs_create_file("frame", 0444,
- cfspi->dbgfs_dir, cfspi,
- &dbgfs_frame_fops);
-}
-
-inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
-{
- cfspi->dbg_state = state;
-};
-#else
-
-static inline void driver_debugfs_create(void)
-{
-}
-
-static inline void driver_debugfs_remove(void)
-{
-}
-
-static inline void dev_debugfs_add(struct cfspi *cfspi)
-{
-}
-
-static inline void dev_debugfs_rem(struct cfspi *cfspi)
-{
-}
-
-inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
-
-static LIST_HEAD(cfspi_list);
-static spinlock_t cfspi_list_lock;
-
-/* SPI uplink head alignment. */
-static ssize_t up_head_align_show(struct device_driver *driver, char *buf)
-{
- return sprintf(buf, "%d\n", spi_up_head_align);
-}
-
-static DRIVER_ATTR_RO(up_head_align);
-
-/* SPI uplink tail alignment. */
-static ssize_t up_tail_align_show(struct device_driver *driver, char *buf)
-{
- return sprintf(buf, "%d\n", spi_up_tail_align);
-}
-
-static DRIVER_ATTR_RO(up_tail_align);
-
-/* SPI downlink head alignment. */
-static ssize_t down_head_align_show(struct device_driver *driver, char *buf)
-{
- return sprintf(buf, "%d\n", spi_down_head_align);
-}
-
-static DRIVER_ATTR_RO(down_head_align);
-
-/* SPI downlink tail alignment. */
-static ssize_t down_tail_align_show(struct device_driver *driver, char *buf)
-{
- return sprintf(buf, "%d\n", spi_down_tail_align);
-}
-
-static DRIVER_ATTR_RO(down_tail_align);
-
-/* SPI frame alignment. */
-static ssize_t frame_align_show(struct device_driver *driver, char *buf)
-{
- return sprintf(buf, "%d\n", spi_frm_align);
-}
-
-static DRIVER_ATTR_RO(frame_align);
-
-int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
-{
- u8 *dst = buf;
- caif_assert(buf);
-
- if (cfspi->slave && !cfspi->slave_talked)
- cfspi->slave_talked = true;
-
- do {
- struct sk_buff *skb;
- struct caif_payload_info *info;
- int spad = 0;
- int epad;
-
- skb = skb_dequeue(&cfspi->chead);
- if (!skb)
- break;
-
- /*
- * Calculate length of frame including SPI padding.
- * The payload position is found in the control buffer.
- */
- info = (struct caif_payload_info *)&skb->cb;
-
- /*
- * Compute head offset i.e. number of bytes to add to
- * get the start of the payload aligned.
- */
- if (spi_up_head_align > 1) {
- spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
- *dst = (u8)(spad - 1);
- dst += spad;
- }
-
- /* Copy in CAIF frame. */
- skb_copy_bits(skb, 0, dst, skb->len);
- dst += skb->len;
- cfspi->ndev->stats.tx_packets++;
- cfspi->ndev->stats.tx_bytes += skb->len;
-
- /*
- * Compute tail offset i.e. number of bytes to add to
- * get the complete CAIF frame aligned.
- */
- epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
- dst += epad;
-
- dev_kfree_skb(skb);
-
- } while ((dst - buf) < len);
-
- return dst - buf;
-}
-
-int cfspi_xmitlen(struct cfspi *cfspi)
-{
- struct sk_buff *skb = NULL;
- int frm_len = 0;
- int pkts = 0;
-
- /*
- * Decommit previously committed frames.
- * skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead)
- */
- while (skb_peek(&cfspi->chead)) {
- skb = skb_dequeue_tail(&cfspi->chead);
- skb_queue_head(&cfspi->qhead, skb);
- }
-
- do {
- struct caif_payload_info *info = NULL;
- int spad = 0;
- int epad = 0;
-
- skb = skb_dequeue(&cfspi->qhead);
- if (!skb)
- break;
-
- /*
- * Calculate length of frame including SPI padding.
- * The payload position is found in the control buffer.
- */
- info = (struct caif_payload_info *)&skb->cb;
-
- /*
- * Compute head offset i.e. number of bytes to add to
- * get the start of the payload aligned.
- */
- if (spi_up_head_align > 1)
- spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
-
- /*
- * Compute tail offset i.e. number of bytes to add to
- * get the complete CAIF frame aligned.
- */
- epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
-
- if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
- skb_queue_tail(&cfspi->chead, skb);
- pkts++;
- frm_len += skb->len + spad + epad;
- } else {
- /* Put back packet. */
- skb_queue_head(&cfspi->qhead, skb);
- break;
- }
- } while (pkts <= CAIF_MAX_SPI_PKTS);
-
- /*
- * Send flow on if previously sent flow off
- * and now go below the low water mark
- */
- if (cfspi->flow_off_sent && cfspi->qhead.qlen < cfspi->qd_low_mark &&
- cfspi->cfdev.flowctrl) {
- cfspi->flow_off_sent = 0;
- cfspi->cfdev.flowctrl(cfspi->ndev, 1);
- }
-
- return frm_len;
-}
-
-static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
-{
- struct cfspi *cfspi = (struct cfspi *)ifc->priv;
-
- /*
- * The slave device is the master on the link. Interrupts before the
- * slave has transmitted are considered spurious.
- */
- if (cfspi->slave && !cfspi->slave_talked) {
- printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
- return;
- }
-
- if (!in_interrupt())
- spin_lock(&cfspi->lock);
- if (assert) {
- set_bit(SPI_SS_ON, &cfspi->state);
- set_bit(SPI_XFER, &cfspi->state);
- } else {
- set_bit(SPI_SS_OFF, &cfspi->state);
- }
- if (!in_interrupt())
- spin_unlock(&cfspi->lock);
-
- /* Wake up the xfer thread. */
- if (assert)
- wake_up_interruptible(&cfspi->wait);
-}
-
-static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
-{
- struct cfspi *cfspi = (struct cfspi *)ifc->priv;
-
- /* Transfer done, complete work queue */
- complete(&cfspi->comp);
-}
-
-static netdev_tx_t cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct cfspi *cfspi = NULL;
- unsigned long flags;
- if (!dev)
- return -EINVAL;
-
- cfspi = netdev_priv(dev);
-
- skb_queue_tail(&cfspi->qhead, skb);
-
- spin_lock_irqsave(&cfspi->lock, flags);
- if (!test_and_set_bit(SPI_XFER, &cfspi->state)) {
- /* Wake up xfer thread. */
- wake_up_interruptible(&cfspi->wait);
- }
- spin_unlock_irqrestore(&cfspi->lock, flags);
-
- /* Send flow off if number of bytes is above high water mark */
- if (!cfspi->flow_off_sent &&
- cfspi->qhead.qlen > cfspi->qd_high_mark &&
- cfspi->cfdev.flowctrl) {
- cfspi->flow_off_sent = 1;
- cfspi->cfdev.flowctrl(cfspi->ndev, 0);
- }
-
- return NETDEV_TX_OK;
-}
-
-int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
-{
- u8 *src = buf;
-
- caif_assert(buf != NULL);
-
- do {
- int res;
- struct sk_buff *skb = NULL;
- int spad = 0;
- int epad = 0;
- int pkt_len = 0;
-
- /*
- * Compute head offset i.e. number of bytes added to
- * get the start of the payload aligned.
- */
- if (spi_down_head_align > 1) {
- spad = 1 + *src;
- src += spad;
- }
-
- /* Read length of CAIF frame (little endian). */
- pkt_len = *src;
- pkt_len |= ((*(src+1)) << 8) & 0xFF00;
- pkt_len += 2; /* Add FCS fields. */
-
- /* Get a suitable caif packet and copy in data. */
-
- skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1);
- caif_assert(skb != NULL);
-
- skb_put_data(skb, src, pkt_len);
- src += pkt_len;
-
- skb->protocol = htons(ETH_P_CAIF);
- skb_reset_mac_header(skb);
-
- /*
- * Push received packet up the stack.
- */
- if (!spi_loop)
- res = netif_rx_ni(skb);
- else
- res = cfspi_xmit(skb, cfspi->ndev);
-
- if (!res) {
- cfspi->ndev->stats.rx_packets++;
- cfspi->ndev->stats.rx_bytes += pkt_len;
- } else
- cfspi->ndev->stats.rx_dropped++;
-
- /*
- * Compute tail offset i.e. number of bytes added to
- * get the complete CAIF frame aligned.
- */
- epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
- src += epad;
- } while ((src - buf) < len);
-
- return src - buf;
-}
-
-static int cfspi_open(struct net_device *dev)
-{
- netif_wake_queue(dev);
- return 0;
-}
-
-static int cfspi_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- return 0;
-}
-
-static int cfspi_init(struct net_device *dev)
-{
- int res = 0;
- struct cfspi *cfspi = netdev_priv(dev);
-
- /* Set flow info. */
- cfspi->flow_off_sent = 0;
- cfspi->qd_low_mark = LOW_WATER_MARK;
- cfspi->qd_high_mark = HIGH_WATER_MARK;
-
- /* Set slave info. */
- if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
- cfspi->slave = true;
- cfspi->slave_talked = false;
- } else {
- cfspi->slave = false;
- cfspi->slave_talked = false;
- }
-
- /* Allocate DMA buffers. */
- cfspi->xfer.va_tx[0] = dma_alloc(cfspi, &cfspi->xfer.pa_tx[0]);
- if (!cfspi->xfer.va_tx[0]) {
- res = -ENODEV;
- goto err_dma_alloc_tx_0;
- }
-
- cfspi->xfer.va_rx = dma_alloc(cfspi, &cfspi->xfer.pa_rx);
-
- if (!cfspi->xfer.va_rx) {
- res = -ENODEV;
- goto err_dma_alloc_rx;
- }
-
- /* Initialize the work queue. */
- INIT_WORK(&cfspi->work, cfspi_xfer);
-
- /* Initialize spin locks. */
- spin_lock_init(&cfspi->lock);
-
- /* Initialize flow control state. */
- cfspi->flow_stop = false;
-
- /* Initialize wait queue. */
- init_waitqueue_head(&cfspi->wait);
-
- /* Create work thread. */
- cfspi->wq = create_singlethread_workqueue(dev->name);
- if (!cfspi->wq) {
- printk(KERN_WARNING "CFSPI: failed to create work queue.\n");
- res = -ENODEV;
- goto err_create_wq;
- }
-
- /* Initialize work queue. */
- init_completion(&cfspi->comp);
-
- /* Create debugfs entries. */
- dev_debugfs_add(cfspi);
-
- /* Set up the ifc. */
- cfspi->ifc.ss_cb = cfspi_ss_cb;
- cfspi->ifc.xfer_done_cb = cfspi_xfer_done_cb;
- cfspi->ifc.priv = cfspi;
-
- /* Add CAIF SPI device to list. */
- spin_lock(&cfspi_list_lock);
- list_add_tail(&cfspi->list, &cfspi_list);
- spin_unlock(&cfspi_list_lock);
-
- /* Schedule the work queue. */
- queue_work(cfspi->wq, &cfspi->work);
-
- return 0;
-
- err_create_wq:
- dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- err_dma_alloc_rx:
- dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
- err_dma_alloc_tx_0:
- return res;
-}
-
-static void cfspi_uninit(struct net_device *dev)
-{
- struct cfspi *cfspi = netdev_priv(dev);
-
- /* Remove from list. */
- spin_lock(&cfspi_list_lock);
- list_del(&cfspi->list);
- spin_unlock(&cfspi_list_lock);
-
- cfspi->ndev = NULL;
- /* Free DMA buffers. */
- dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
- set_bit(SPI_TERMINATE, &cfspi->state);
- wake_up_interruptible(&cfspi->wait);
- destroy_workqueue(cfspi->wq);
- /* Destroy debugfs directory and files. */
- dev_debugfs_rem(cfspi);
- return;
-}
-
-static const struct net_device_ops cfspi_ops = {
- .ndo_open = cfspi_open,
- .ndo_stop = cfspi_close,
- .ndo_init = cfspi_init,
- .ndo_uninit = cfspi_uninit,
- .ndo_start_xmit = cfspi_xmit
-};
-
-static void cfspi_setup(struct net_device *dev)
-{
- struct cfspi *cfspi = netdev_priv(dev);
- dev->features = 0;
- dev->netdev_ops = &cfspi_ops;
- dev->type = ARPHRD_CAIF;
- dev->flags = IFF_NOARP | IFF_POINTOPOINT;
- dev->priv_flags |= IFF_NO_QUEUE;
- dev->mtu = SPI_MAX_PAYLOAD_SIZE;
- dev->needs_free_netdev = true;
- skb_queue_head_init(&cfspi->qhead);
- skb_queue_head_init(&cfspi->chead);
- cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
- cfspi->cfdev.use_frag = false;
- cfspi->cfdev.use_stx = false;
- cfspi->cfdev.use_fcs = false;
- cfspi->ndev = dev;
-}
-
-int cfspi_spi_probe(struct platform_device *pdev)
-{
- struct cfspi *cfspi = NULL;
- struct net_device *ndev;
- struct cfspi_dev *dev;
- int res;
- dev = (struct cfspi_dev *)pdev->dev.platform_data;
-
- if (!dev)
- return -ENODEV;
-
- ndev = alloc_netdev(sizeof(struct cfspi), "cfspi%d",
- NET_NAME_UNKNOWN, cfspi_setup);
- if (!ndev)
- return -ENOMEM;
-
- cfspi = netdev_priv(ndev);
- netif_stop_queue(ndev);
- cfspi->ndev = ndev;
- cfspi->pdev = pdev;
-
- /* Assign the SPI device. */
- cfspi->dev = dev;
- /* Assign the device ifc to this SPI interface. */
- dev->ifc = &cfspi->ifc;
-
- /* Register network device. */
- res = register_netdev(ndev);
- if (res) {
- printk(KERN_ERR "CFSPI: Reg. error: %d.\n", res);
- goto err_net_reg;
- }
- return res;
-
- err_net_reg:
- free_netdev(ndev);
-
- return res;
-}
-
-int cfspi_spi_remove(struct platform_device *pdev)
-{
- /* Everything is done in cfspi_uninit(). */
- return 0;
-}
-
-static void __exit cfspi_exit_module(void)
-{
- struct list_head *list_node;
- struct list_head *n;
- struct cfspi *cfspi = NULL;
-
- list_for_each_safe(list_node, n, &cfspi_list) {
- cfspi = list_entry(list_node, struct cfspi, list);
- unregister_netdev(cfspi->ndev);
- }
-
- /* Destroy sysfs files. */
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_up_head_align);
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_up_tail_align);
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_down_head_align);
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_down_tail_align);
- driver_remove_file(&cfspi_spi_driver.driver, &driver_attr_frame_align);
- /* Unregister platform driver. */
- platform_driver_unregister(&cfspi_spi_driver);
- /* Destroy debugfs root directory. */
- driver_debugfs_remove();
-}
-
-static int __init cfspi_init_module(void)
-{
- int result;
-
- /* Initialize spin lock. */
- spin_lock_init(&cfspi_list_lock);
-
- /* Register platform driver. */
- result = platform_driver_register(&cfspi_spi_driver);
- if (result) {
- printk(KERN_ERR "Could not register platform SPI driver.\n");
- goto err_dev_register;
- }
-
- /* Create sysfs files. */
- result =
- driver_create_file(&cfspi_spi_driver.driver,
- &driver_attr_up_head_align);
- if (result) {
- printk(KERN_ERR "Sysfs creation failed 1.\n");
- goto err_create_up_head_align;
- }
-
- result =
- driver_create_file(&cfspi_spi_driver.driver,
- &driver_attr_up_tail_align);
- if (result) {
- printk(KERN_ERR "Sysfs creation failed 2.\n");
- goto err_create_up_tail_align;
- }
-
- result =
- driver_create_file(&cfspi_spi_driver.driver,
- &driver_attr_down_head_align);
- if (result) {
- printk(KERN_ERR "Sysfs creation failed 3.\n");
- goto err_create_down_head_align;
- }
-
- result =
- driver_create_file(&cfspi_spi_driver.driver,
- &driver_attr_down_tail_align);
- if (result) {
- printk(KERN_ERR "Sysfs creation failed 4.\n");
- goto err_create_down_tail_align;
- }
-
- result =
- driver_create_file(&cfspi_spi_driver.driver,
- &driver_attr_frame_align);
- if (result) {
- printk(KERN_ERR "Sysfs creation failed 5.\n");
- goto err_create_frame_align;
- }
- driver_debugfs_create();
- return result;
-
- err_create_frame_align:
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_down_tail_align);
- err_create_down_tail_align:
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_down_head_align);
- err_create_down_head_align:
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_up_tail_align);
- err_create_up_tail_align:
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_up_head_align);
- err_create_up_head_align:
- platform_driver_unregister(&cfspi_spi_driver);
- err_dev_register:
- return result;
-}
-
-module_init(cfspi_init_module);
-module_exit(cfspi_exit_module);
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
deleted file mode 100644
index bb776d33dd8f..000000000000
--- a/drivers/net/caif/caif_spi_slave.c
+++ /dev/null
@@ -1,254 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson
- */
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
-#include <linux/completion.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/debugfs.h>
-#include <net/caif/caif_spi.h>
-
-#ifndef CONFIG_CAIF_SPI_SYNC
-#define SPI_DATA_POS 0
-static inline int forward_to_spi_cmd(struct cfspi *cfspi)
-{
- return cfspi->rx_cpck_len;
-}
-#else
-#define SPI_DATA_POS SPI_CMD_SZ
-static inline int forward_to_spi_cmd(struct cfspi *cfspi)
-{
- return 0;
-}
-#endif
-
-int spi_frm_align = 2;
-
-/*
- * SPI padding options.
- * Warning: must be a base of 2 (& operation used) and can not be zero !
- */
-int spi_up_head_align = 1 << 1;
-int spi_up_tail_align = 1 << 0;
-int spi_down_head_align = 1 << 2;
-int spi_down_tail_align = 1 << 1;
-
-#ifdef CONFIG_DEBUG_FS
-static inline void debugfs_store_prev(struct cfspi *cfspi)
-{
- /* Store previous command for debugging reasons.*/
- cfspi->pcmd = cfspi->cmd;
- /* Store previous transfer. */
- cfspi->tx_ppck_len = cfspi->tx_cpck_len;
- cfspi->rx_ppck_len = cfspi->rx_cpck_len;
-}
-#else
-static inline void debugfs_store_prev(struct cfspi *cfspi)
-{
-}
-#endif
-
-void cfspi_xfer(struct work_struct *work)
-{
- struct cfspi *cfspi;
- u8 *ptr = NULL;
- unsigned long flags;
- int ret;
- cfspi = container_of(work, struct cfspi, work);
-
- /* Initialize state. */
- cfspi->cmd = SPI_CMD_EOT;
-
- for (;;) {
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING);
-
- /* Wait for master talk or transmit event. */
- wait_event_interruptible(cfspi->wait,
- test_bit(SPI_XFER, &cfspi->state) ||
- test_bit(SPI_TERMINATE, &cfspi->state));
-
- if (test_bit(SPI_TERMINATE, &cfspi->state))
- return;
-
-#if CFSPI_DBG_PREFILL
- /* Prefill buffers for easier debugging. */
- memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN);
- memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN);
-#endif /* CFSPI_DBG_PREFILL */
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE);
-
- /* Check whether we have a committed frame. */
- if (cfspi->tx_cpck_len) {
- int len;
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT);
-
- /* Copy committed SPI frames after the SPI indication. */
- ptr = (u8 *) cfspi->xfer.va_tx;
- ptr += SPI_IND_SZ;
- len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len);
- WARN_ON(len != cfspi->tx_cpck_len);
- }
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT);
-
- /* Get length of next frame to commit. */
- cfspi->tx_npck_len = cfspi_xmitlen(cfspi);
-
- WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN);
-
- /*
- * Add indication and length at the beginning of the frame,
- * using little endian.
- */
- ptr = (u8 *) cfspi->xfer.va_tx;
- *ptr++ = SPI_CMD_IND;
- *ptr++ = (SPI_CMD_IND & 0xFF00) >> 8;
- *ptr++ = cfspi->tx_npck_len & 0x00FF;
- *ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8;
-
- /* Calculate length of DMAs. */
- cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ;
- cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ;
-
- /* Add SPI TX frame alignment padding, if necessary. */
- if (cfspi->tx_cpck_len &&
- (cfspi->xfer.tx_dma_len % spi_frm_align)) {
-
- cfspi->xfer.tx_dma_len += spi_frm_align -
- (cfspi->xfer.tx_dma_len % spi_frm_align);
- }
-
- /* Add SPI RX frame alignment padding, if necessary. */
- if (cfspi->rx_cpck_len &&
- (cfspi->xfer.rx_dma_len % spi_frm_align)) {
-
- cfspi->xfer.rx_dma_len += spi_frm_align -
- (cfspi->xfer.rx_dma_len % spi_frm_align);
- }
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER);
-
- /* Start transfer. */
- ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev);
- WARN_ON(ret);
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE);
-
- /*
- * TODO: We might be able to make an assumption if this is the
- * first loop. Make sure that minimum toggle time is respected.
- */
- udelay(MIN_TRANSITION_TIME_USEC);
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE);
-
- /* Signal that we are ready to receive data. */
- cfspi->dev->sig_xfer(true, cfspi->dev);
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE);
-
- /* Wait for transfer completion. */
- wait_for_completion(&cfspi->comp);
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE);
-
- if (cfspi->cmd == SPI_CMD_EOT) {
- /*
- * Clear the master talk bit. A xfer is always at
- * least two bursts.
- */
- clear_bit(SPI_SS_ON, &cfspi->state);
- }
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE);
-
- /* Make sure that the minimum toggle time is respected. */
- if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len,
- cfspi->dev->clk_mhz) <
- MIN_TRANSITION_TIME_USEC) {
-
- udelay(MIN_TRANSITION_TIME_USEC -
- SPI_XFER_TIME_USEC
- (cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz));
- }
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE);
-
- /* De-assert transfer signal. */
- cfspi->dev->sig_xfer(false, cfspi->dev);
-
- /* Check whether we received a CAIF packet. */
- if (cfspi->rx_cpck_len) {
- int len;
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT);
-
- /* Parse SPI frame. */
- ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS));
-
- len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len);
- WARN_ON(len != cfspi->rx_cpck_len);
- }
-
- /* Check the next SPI command and length. */
- ptr = (u8 *) cfspi->xfer.va_rx;
-
- ptr += forward_to_spi_cmd(cfspi);
-
- cfspi->cmd = *ptr++;
- cfspi->cmd |= ((*ptr++) << 8) & 0xFF00;
- cfspi->rx_npck_len = *ptr++;
- cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00;
-
- WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN);
- WARN_ON(cfspi->cmd > SPI_CMD_EOT);
-
- debugfs_store_prev(cfspi);
-
- /* Check whether the master issued an EOT command. */
- if (cfspi->cmd == SPI_CMD_EOT) {
- /* Reset state. */
- cfspi->tx_cpck_len = 0;
- cfspi->rx_cpck_len = 0;
- } else {
- /* Update state. */
- cfspi->tx_cpck_len = cfspi->tx_npck_len;
- cfspi->rx_cpck_len = cfspi->rx_npck_len;
- }
-
- /*
- * Check whether we need to clear the xfer bit.
- * Spin lock needed for packet insertion.
- * Test and clear of different bits
- * are not supported.
- */
- spin_lock_irqsave(&cfspi->lock, flags);
- if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi)
- && !test_bit(SPI_SS_ON, &cfspi->state))
- clear_bit(SPI_XFER, &cfspi->state);
-
- spin_unlock_irqrestore(&cfspi->lock, flags);
- }
-}
-
-struct platform_driver cfspi_spi_driver = {
- .probe = cfspi_spi_probe,
- .remove = cfspi_spi_remove,
- .driver = {
- .name = "cfspi_sspi",
- .owner = THIS_MODULE,
- },
-};
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 80ea2e913c2b..47a6d62b7511 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -652,7 +652,7 @@ static int cfv_probe(struct virtio_device *vdev)
const char *cfv_netdev_name = "cfvrt";
struct net_device *netdev;
struct cfv_info *cfv;
- int err = -EINVAL;
+ int err;
netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
NET_NAME_UNKNOWN, cfv_netdev_setup);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index f07012a76c0c..424970939fd4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -41,8 +41,8 @@ config CAN_SLCAN
www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
Userspace tools to attach the SLCAN line discipline (slcan_attach,
- slcand) can be found in the can-utils at the SocketCAN SVN, see
- http://developer.berlios.de/projects/socketcan for details.
+ slcand) can be found in the can-utils at the linux-can project, see
+ https://github.com/linux-can/can-utils for details.
The slcan driver supports up to 10 CAN netdevices by default which
can be changed by the 'maxdev=xx' module option. This driver can
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 38e9f80ed1ef..c14de95d2ca7 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -643,7 +643,7 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
*
* The first message goes into mb nr. 1 and issues an interrupt. All
* rx ints are disabled in the interrupt handler and a napi poll is
- * scheduled. We read the mailbox, but do _not_ reenable the mb (to
+ * scheduled. We read the mailbox, but do _not_ re-enable the mb (to
* receive another message).
*
* lower mbxs upper
@@ -661,13 +661,13 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
*
* The variable priv->rx_next points to the next mailbox to read a
* message from. As long we're in the lower mailboxes we just read the
- * mailbox but not reenable it.
+ * mailbox but not re-enable it.
*
- * With completion of the last of the lower mailboxes, we reenable the
+ * With completion of the last of the lower mailboxes, we re-enable the
* whole first group, but continue to look for filled mailboxes in the
* upper mailboxes. Imagine the second group like overflow mailboxes,
* which takes CAN messages if the lower goup is full. While in the
- * upper group we reenable the mailbox right after reading it. Giving
+ * upper group we re-enable the mailbox right after reading it. Giving
* the chip more room to store messages.
*
* After finishing we look again in the lower group if we've still
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 8e9f5620c9a2..1ccdbe89585b 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -356,15 +356,6 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
}
}
-static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
- int iface)
-{
- int i;
-
- for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
- c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
-}
-
static int c_can_handle_lost_msg_obj(struct net_device *dev,
int iface, int objno, u32 ctrl)
{
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index d5567a7c1c6d..92213d3d96eb 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -81,7 +81,7 @@ enum reg {
C_CAN_FUNCTION_REG,
};
-static const u16 reg_map_c_can[] = {
+static const u16 __maybe_unused reg_map_c_can[] = {
[C_CAN_CTRL_REG] = 0x00,
[C_CAN_STS_REG] = 0x02,
[C_CAN_ERR_CNT_REG] = 0x04,
@@ -121,7 +121,7 @@ static const u16 reg_map_c_can[] = {
[C_CAN_MSGVAL2_REG] = 0xB2,
};
-static const u16 reg_map_d_can[] = {
+static const u16 __maybe_unused reg_map_d_can[] = {
[C_CAN_CTRL_REG] = 0x00,
[C_CAN_CTRL_EX_REG] = 0x02,
[C_CAN_STS_REG] = 0x04,
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 7cdc232cbfea..07e2b8df5153 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -538,7 +538,7 @@ static int cc770_err(struct net_device *dev, u8 status)
priv->can.can_stats.error_warning++;
}
} else {
- /* Back to error avtive */
+ /* Back to error active */
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
index 948541491ab5..0628fd9e1980 100644
--- a/drivers/net/can/cc770/cc770.h
+++ b/drivers/net/can/cc770/cc770.h
@@ -184,7 +184,7 @@ struct cc770_priv {
u8 control_normal_mode; /* Control register for normal mode */
u8 cpu_interface; /* CPU interface register */
u8 clkout; /* Clock out register */
- u8 bus_config; /* Bus conffiguration register */
+ u8 bus_config; /* Bus configuration register */
struct sk_buff *tx_skb;
};
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 68834a2853c9..6dee4f8f2024 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -60,7 +60,6 @@ EXPORT_SYMBOL_GPL(can_len2dlc);
#ifdef CONFIG_CAN_CALC_BITTIMING
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
-#define CAN_CALC_SYNC_SEG 1
/* Bit-timing calculation derived from:
*
@@ -86,8 +85,8 @@ can_update_sample_point(const struct can_bittiming_const *btc,
int i;
for (i = 0; i <= 1; i++) {
- tseg2 = tseg + CAN_CALC_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) /
+ tseg2 = tseg + CAN_SYNC_SEG -
+ (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
1000 - i;
tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
tseg1 = tseg - tseg2;
@@ -96,8 +95,8 @@ can_update_sample_point(const struct can_bittiming_const *btc,
tseg2 = tseg - tseg1;
}
- sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) /
- (tseg + CAN_CALC_SYNC_SEG);
+ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+ (tseg + CAN_SYNC_SEG);
sample_point_error = abs(sample_point_nominal - sample_point);
if (sample_point <= sample_point_nominal &&
@@ -145,7 +144,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
/* tseg even = round down, odd = round up */
for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = CAN_CALC_SYNC_SEG + tseg / 2;
+ tsegall = CAN_SYNC_SEG + tseg / 2;
/* Compute all possible tseg choices (tseg=tseg1+tseg2) */
brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
@@ -223,7 +222,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
/* real bitrate */
bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2));
+ (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
return 0;
}
@@ -371,6 +370,28 @@ static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
}
}
+static const char *can_get_state_str(const enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return "Error Active";
+ case CAN_STATE_ERROR_WARNING:
+ return "Error Warning";
+ case CAN_STATE_ERROR_PASSIVE:
+ return "Error Passive";
+ case CAN_STATE_BUS_OFF:
+ return "Bus Off";
+ case CAN_STATE_STOPPED:
+ return "Stopped";
+ case CAN_STATE_SLEEPING:
+ return "Sleeping";
+ default:
+ return "<unknown>";
+ }
+
+ return "<unknown>";
+}
+
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state)
{
@@ -382,7 +403,9 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
return;
}
- netdev_dbg(dev, "New error state: %d\n", new_state);
+ netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
+ can_get_state_str(priv->state), priv->state,
+ can_get_state_str(new_state), new_state);
can_update_state_error_stats(dev, new_state);
priv->state = new_state;
@@ -434,8 +457,8 @@ static void can_flush_echo_skb(struct net_device *dev)
* of the device driver. The driver must protect access to
* priv->echo_skb, if necessary.
*/
-void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx)
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx)
{
struct can_priv *priv = netdev_priv(dev);
@@ -446,13 +469,13 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
(skb->protocol != htons(ETH_P_CAN) &&
skb->protocol != htons(ETH_P_CANFD))) {
kfree_skb(skb);
- return;
+ return 0;
}
if (!priv->echo_skb[idx]) {
skb = can_create_echo_skb(skb);
if (!skb)
- return;
+ return -ENOMEM;
/* make settings for echo to reduce code in irq context */
skb->pkt_type = PACKET_BROADCAST;
@@ -463,9 +486,12 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
priv->echo_skb[idx] = skb;
} else {
/* locking problem with netif_stop_queue() ?? */
- netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__);
+ netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
kfree_skb(skb);
+ return -EBUSY;
}
+
+ return 0;
}
EXPORT_SYMBOL_GPL(can_put_echo_skb);
@@ -486,9 +512,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
*/
struct sk_buff *skb = priv->echo_skb[idx];
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
- u8 len = cf->len;
- *len_ptr = len;
+ /* get the real payload length for netdev statistics */
+ if (cf->can_id & CAN_RTR_FLAG)
+ *len_ptr = 0;
+ else
+ *len_ptr = cf->len;
+
priv->echo_skb[idx] = NULL;
return skb;
@@ -512,7 +542,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
if (!skb)
return 0;
- netif_rx(skb);
+ skb_get(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS)
+ dev_consume_skb_any(skb);
+ else
+ dev_kfree_skb_any(skb);
return len;
}
@@ -612,7 +646,11 @@ void can_bus_off(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- netdev_info(dev, "bus-off\n");
+ if (priv->restart_ms)
+ netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
+ priv->restart_ms);
+ else
+ netdev_info(dev, "bus-off\n");
netif_carrier_off(dev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 94d10ec954a0..881799bd9c5e 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -9,7 +9,7 @@
//
// Based on code originally by Andrey Volkov <avolkov@varma-el.com>
-#include <linux/netdevice.h>
+#include <linux/bitfield.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -21,12 +21,14 @@
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#define DRV_NAME "flexcan"
@@ -52,6 +54,7 @@
#define FLEXCAN_MCR_IRMQ BIT(16)
#define FLEXCAN_MCR_LPRIO_EN BIT(13)
#define FLEXCAN_MCR_AEN BIT(12)
+#define FLEXCAN_MCR_FDEN BIT(11)
/* MCR_MAXMB: maximum used MBs is MAXMB + 1 */
#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f)
#define FLEXCAN_MCR_IDAM_A (0x0 << 8)
@@ -91,6 +94,7 @@
#define FLEXCAN_CTRL2_MRP BIT(18)
#define FLEXCAN_CTRL2_RRS BIT(17)
#define FLEXCAN_CTRL2_EACEN BIT(16)
+#define FLEXCAN_CTRL2_ISOCANFDEN BIT(12)
/* FLEXCAN memory error control register (MECR) bits */
#define FLEXCAN_MECR_ECRWRDIS BIT(31)
@@ -134,8 +138,35 @@
(FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
#define FLEXCAN_ESR_ALL_INT \
(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \
- FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT | \
- FLEXCAN_ESR_WAK_INT)
+ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
+
+/* FLEXCAN Bit Timing register (CBT) bits */
+#define FLEXCAN_CBT_BTF BIT(31)
+#define FLEXCAN_CBT_EPRESDIV_MASK GENMASK(30, 21)
+#define FLEXCAN_CBT_ERJW_MASK GENMASK(20, 16)
+#define FLEXCAN_CBT_EPROPSEG_MASK GENMASK(15, 10)
+#define FLEXCAN_CBT_EPSEG1_MASK GENMASK(9, 5)
+#define FLEXCAN_CBT_EPSEG2_MASK GENMASK(4, 0)
+
+/* FLEXCAN FD control register (FDCTRL) bits */
+#define FLEXCAN_FDCTRL_FDRATE BIT(31)
+#define FLEXCAN_FDCTRL_MBDSR1 GENMASK(20, 19)
+#define FLEXCAN_FDCTRL_MBDSR0 GENMASK(17, 16)
+#define FLEXCAN_FDCTRL_MBDSR_8 0x0
+#define FLEXCAN_FDCTRL_MBDSR_12 0x1
+#define FLEXCAN_FDCTRL_MBDSR_32 0x2
+#define FLEXCAN_FDCTRL_MBDSR_64 0x3
+#define FLEXCAN_FDCTRL_TDCEN BIT(15)
+#define FLEXCAN_FDCTRL_TDCFAIL BIT(14)
+#define FLEXCAN_FDCTRL_TDCOFF GENMASK(12, 8)
+#define FLEXCAN_FDCTRL_TDCVAL GENMASK(5, 0)
+
+/* FLEXCAN FD Bit Timing register (FDCBT) bits */
+#define FLEXCAN_FDCBT_FPRESDIV_MASK GENMASK(29, 20)
+#define FLEXCAN_FDCBT_FRJW_MASK GENMASK(18, 16)
+#define FLEXCAN_FDCBT_FPROPSEG_MASK GENMASK(14, 10)
+#define FLEXCAN_FDCBT_FPSEG1_MASK GENMASK(7, 5)
+#define FLEXCAN_FDCBT_FPSEG2_MASK GENMASK(2, 0)
/* FLEXCAN interrupt flag register (IFLAG) bits */
/* Errata ERR005829 step7: Reserve first valid MB */
@@ -161,6 +192,9 @@
#define FLEXCAN_MB_CODE_TX_DATA (0xc << 24)
#define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24)
+#define FLEXCAN_MB_CNT_EDL BIT(31)
+#define FLEXCAN_MB_CNT_BRS BIT(30)
+#define FLEXCAN_MB_CNT_ESI BIT(29)
#define FLEXCAN_MB_CNT_SRR BIT(22)
#define FLEXCAN_MB_CNT_IDE BIT(21)
#define FLEXCAN_MB_CNT_RTR BIT(20)
@@ -172,26 +206,42 @@
/* FLEXCAN hardware feature flags
*
* Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
- * Filter? connected? Passive detection ception in MB
- * MX25 FlexCAN2 03.00.00.00 no no no no no
- * MX28 FlexCAN2 03.00.04.00 yes yes no no no
- * MX35 FlexCAN2 03.00.00.00 no no no no no
- * MX53 FlexCAN2 03.00.00.00 yes no no no no
- * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
- * VF610 FlexCAN3 ? no yes no yes yes?
- * LS1021A FlexCAN2 03.00.04.00 no yes no no yes
+ * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode
+ * Filter? connected? Passive detection ption in MB Supported?
+ * MX25 FlexCAN2 03.00.00.00 no no no no no no
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no no no
+ * MX35 FlexCAN2 03.00.00.00 no no no no no no
+ * MX53 FlexCAN2 03.00.00.00 yes no no no no no
+ * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no
+ * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes
+ * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes
+ * VF610 FlexCAN3 ? no yes no yes yes? no
+ * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no
+ * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
-#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */
-#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
-#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
-#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
-#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
-#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
-#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) /* default to BE register access */
-#define FLEXCAN_QUIRK_SETUP_STOP_MODE BIT(8) /* Setup stop mode to support wakeup */
+
+/* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1)
+ /* Disable RX FIFO Global mask */
+#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2)
+/* Enable EACEN and RRS bit in ctrl2 */
+#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3)
+/* Disable non-correctable errors interrupt and freeze mode */
+#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4)
+/* Use timestamp based offloading */
+#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5)
+/* No interrupt for error passive */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6)
+/* default to BE register access */
+#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7)
+/* Setup stop mode to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE BIT(8)
+/* Support CAN-FD mode */
+#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
+/* support memory detection and correction */
+#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
/* Structure of the message buffer */
struct flexcan_mb {
@@ -203,12 +253,12 @@ struct flexcan_mb {
/* Structure of the hardware registers */
struct flexcan_regs {
u32 mcr; /* 0x00 */
- u32 ctrl; /* 0x04 */
+ u32 ctrl; /* 0x04 - Not affected by Soft Reset */
u32 timer; /* 0x08 */
- u32 _reserved1; /* 0x0c */
- u32 rxgmask; /* 0x10 */
- u32 rx14mask; /* 0x14 */
- u32 rx15mask; /* 0x18 */
+ u32 tcr; /* 0x0c */
+ u32 rxgmask; /* 0x10 - Not affected by Soft Reset */
+ u32 rx14mask; /* 0x14 - Not affected by Soft Reset */
+ u32 rx15mask; /* 0x18 - Not affected by Soft Reset */
u32 ecr; /* 0x1c */
u32 esr; /* 0x20 */
u32 imask2; /* 0x24 */
@@ -217,20 +267,24 @@ struct flexcan_regs {
u32 iflag1; /* 0x30 */
union { /* 0x34 */
u32 gfwr_mx28; /* MX28, MX53 */
- u32 ctrl2; /* MX6, VF610 */
+ u32 ctrl2; /* MX6, VF610 - Not affected by Soft Reset */
};
u32 esr2; /* 0x38 */
u32 imeur; /* 0x3c */
u32 lrfr; /* 0x40 */
u32 crcr; /* 0x44 */
u32 rxfgmask; /* 0x48 */
- u32 rxfir; /* 0x4c */
- u32 _reserved3[12]; /* 0x50 */
- u8 mb[2][512]; /* 0x80 */
+ u32 rxfir; /* 0x4c - Not affected by Soft Reset */
+ u32 cbt; /* 0x50 - Not affected by Soft Reset */
+ u32 _reserved2; /* 0x54 */
+ u32 dbg1; /* 0x58 */
+ u32 dbg2; /* 0x5c */
+ u32 _reserved3[8]; /* 0x60 */
+ u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */
/* FIFO-mode:
* MB
* 0x080...0x08f 0 RX message buffer
- * 0x090...0x0df 1-5 reserverd
+ * 0x090...0x0df 1-5 reserved
* 0x0e0...0x0ff 6-7 8 entry ID table
* (mx25, mx28, mx35, mx53)
* 0x0e0...0x2df 6-7..37 8..128 entry ID table
@@ -238,10 +292,19 @@ struct flexcan_regs {
* (mx6, vf610)
*/
u32 _reserved4[256]; /* 0x480 */
- u32 rximr[64]; /* 0x880 */
+ u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */
u32 _reserved5[24]; /* 0x980 */
u32 gfwr_mx6; /* 0x9e0 - MX6 */
- u32 _reserved6[63]; /* 0x9e4 */
+ u32 _reserved6[39]; /* 0x9e4 */
+ u32 _rxfir[6]; /* 0xa80 */
+ u32 _reserved8[2]; /* 0xa98 */
+ u32 _rxmgmask; /* 0xaa0 */
+ u32 _rxfgmask; /* 0xaa4 */
+ u32 _rx14mask; /* 0xaa8 */
+ u32 _rx15mask; /* 0xaac */
+ u32 tx_smb[4]; /* 0xab0 */
+ u32 rx_smb0[4]; /* 0xac0 */
+ u32 rx_smb1[4]; /* 0xad0 */
u32 mecr; /* 0xae0 */
u32 erriar; /* 0xae4 */
u32 erridpr; /* 0xae8 */
@@ -250,8 +313,18 @@ struct flexcan_regs {
u32 rerrdr; /* 0xaf4 */
u32 rerrsynr; /* 0xaf8 */
u32 errsr; /* 0xafc */
+ u32 _reserved7[64]; /* 0xb00 */
+ u32 fdctrl; /* 0xc00 - Not affected by Soft Reset */
+ u32 fdcbt; /* 0xc04 - Not affected by Soft Reset */
+ u32 fdcrc; /* 0xc08 */
+ u32 _reserved9[199]; /* 0xc0c */
+ u32 tx_smb_fd[18]; /* 0xf28 */
+ u32 rx_smb0_fd[18]; /* 0xf70 */
+ u32 rx_smb1_fd[18]; /* 0xfb8 */
};
+static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
+
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
};
@@ -260,8 +333,6 @@ struct flexcan_stop_mode {
struct regmap *gpr;
u8 req_gpr;
u8 req_bit;
- u8 ack_gpr;
- u8 ack_bit;
};
struct flexcan_priv {
@@ -313,16 +384,35 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
FLEXCAN_QUIRK_SETUP_STOP_MODE,
};
+static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPORT_FD,
+};
+
+static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE |
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC,
+};
+
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC,
};
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+};
+
+static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -337,6 +427,30 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const flexcan_fd_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 96,
+ .tseg2_min = 2,
+ .tseg2_max = 32,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const flexcan_fd_data_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 39,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
/* FlexCAN module is essentially modelled as a little-endian IP in most
* SoCs, i.e the registers as well as the message buffer areas are
* implemented in a little-endian fashion.
@@ -457,7 +571,6 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 0);
-
reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr);
@@ -628,10 +741,10 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
u32 can_id;
u32 data;
- u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16);
+ u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_len2dlc(cfd->len)) << 16);
int i;
if (can_dropped_invalid_skb(dev, skb))
@@ -639,18 +752,25 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
netif_stop_queue(dev);
- if (cf->can_id & CAN_EFF_FLAG) {
- can_id = cf->can_id & CAN_EFF_MASK;
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ can_id = cfd->can_id & CAN_EFF_MASK;
ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR;
} else {
- can_id = (cf->can_id & CAN_SFF_MASK) << 18;
+ can_id = (cfd->can_id & CAN_SFF_MASK) << 18;
}
- if (cf->can_id & CAN_RTR_FLAG)
+ if (cfd->can_id & CAN_RTR_FLAG)
ctrl |= FLEXCAN_MB_CNT_RTR;
- for (i = 0; i < cf->can_dlc; i += sizeof(u32)) {
- data = be32_to_cpup((__be32 *)&cf->data[i]);
+ if (can_is_canfd_skb(skb)) {
+ ctrl |= FLEXCAN_MB_CNT_EDL;
+
+ if (cfd->flags & CANFD_BRS)
+ ctrl |= FLEXCAN_MB_CNT_BRS;
+ }
+
+ for (i = 0; i < cfd->len; i += sizeof(u32)) {
+ data = be32_to_cpup((__be32 *)&cfd->data[i]);
priv->write(data, &priv->tx_mb->data[i / sizeof(u32)]);
}
@@ -822,7 +942,7 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
struct flexcan_regs __iomem *regs = priv->regs;
struct flexcan_mb __iomem *mb;
struct sk_buff *skb;
- struct can_frame *cf;
+ struct canfd_frame *cfd;
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
@@ -859,8 +979,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_ctrl = priv->read(&mb->can_ctrl);
}
- skb = alloc_can_skb(offload->dev, &cf);
- if (!skb) {
+ if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
+ skb = alloc_canfd_skb(offload->dev, &cfd);
+ else
+ skb = alloc_can_skb(offload->dev, (struct can_frame **)&cfd);
+ if (unlikely(!skb)) {
skb = ERR_PTR(-ENOMEM);
goto mark_as_read;
}
@@ -870,17 +993,28 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_id = priv->read(&mb->can_id);
if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
- cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ cfd->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
- cf->can_id = (reg_id >> 18) & CAN_SFF_MASK;
+ cfd->can_id = (reg_id >> 18) & CAN_SFF_MASK;
+
+ if (reg_ctrl & FLEXCAN_MB_CNT_EDL) {
+ cfd->len = can_dlc2len(get_canfd_dlc((reg_ctrl >> 16) & 0xf));
+
+ if (reg_ctrl & FLEXCAN_MB_CNT_BRS)
+ cfd->flags |= CANFD_BRS;
+ } else {
+ cfd->len = get_can_dlc((reg_ctrl >> 16) & 0xf);
+
+ if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
+ cfd->can_id |= CAN_RTR_FLAG;
+ }
- if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
- cf->can_id |= CAN_RTR_FLAG;
- cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
+ if (reg_ctrl & FLEXCAN_MB_CNT_ESI)
+ cfd->flags |= CANFD_ESI;
- for (i = 0; i < cf->can_dlc; i += sizeof(u32)) {
+ for (i = 0; i < cfd->len; i += sizeof(u32)) {
__be32 data = cpu_to_be32(priv->read(&mb->data[i / sizeof(u32)]));
- *(__be32 *)(cf->data + i) = data;
+ *(__be32 *)(cfd->data + i) = data;
}
mark_as_read:
@@ -961,10 +1095,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
reg_esr = priv->read(&regs->esr);
- /* ACK all bus error and state change IRQ sources */
- if (reg_esr & FLEXCAN_ESR_ALL_INT) {
+ /* ACK all bus error, state change and wake IRQ sources */
+ if (reg_esr & (FLEXCAN_ESR_ALL_INT | FLEXCAN_ESR_WAK_INT)) {
handled = IRQ_HANDLED;
- priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+ priv->write(reg_esr & (FLEXCAN_ESR_ALL_INT | FLEXCAN_ESR_WAK_INT), &regs->esr);
}
/* state change interrupt or broken error state quirk fix is enabled */
@@ -1019,7 +1153,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
return handled;
}
-static void flexcan_set_bittiming(struct net_device *dev)
+static void flexcan_set_bittiming_ctrl(const struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
const struct can_bittiming *bt = &priv->can.bittiming;
@@ -1031,10 +1165,7 @@ static void flexcan_set_bittiming(struct net_device *dev)
FLEXCAN_CTRL_RJW(0x3) |
FLEXCAN_CTRL_PSEG1(0x7) |
FLEXCAN_CTRL_PSEG2(0x7) |
- FLEXCAN_CTRL_PROPSEG(0x7) |
- FLEXCAN_CTRL_LPB |
- FLEXCAN_CTRL_SMP |
- FLEXCAN_CTRL_LOM);
+ FLEXCAN_CTRL_PROPSEG(0x7));
reg |= FLEXCAN_CTRL_PRESDIV(bt->brp - 1) |
FLEXCAN_CTRL_PSEG1(bt->phase_seg1 - 1) |
@@ -1042,6 +1173,130 @@ static void flexcan_set_bittiming(struct net_device *dev)
FLEXCAN_CTRL_RJW(bt->sjw - 1) |
FLEXCAN_CTRL_PROPSEG(bt->prop_seg - 1);
+ netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
+ priv->write(reg, &regs->ctrl);
+
+ /* print chip status */
+ netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
+ priv->read(&regs->mcr), priv->read(&regs->ctrl));
+}
+
+static void flexcan_set_bittiming_cbt(const struct net_device *dev)
+{
+ struct flexcan_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_cbt, reg_fdctrl;
+
+ /* CBT */
+ /* CBT[EPSEG1] is 5 bit long and CBT[EPROPSEG] is 6 bit
+ * long. The can_calc_bittiming() tries to divide the tseg1
+ * equally between phase_seg1 and prop_seg, which may not fit
+ * in CBT register. Therefore, if phase_seg1 is more than
+ * possible value, increase prop_seg and decrease phase_seg1.
+ */
+ if (bt->phase_seg1 > 0x20) {
+ bt->prop_seg += (bt->phase_seg1 - 0x20);
+ bt->phase_seg1 = 0x20;
+ }
+
+ reg_cbt = FLEXCAN_CBT_BTF |
+ FIELD_PREP(FLEXCAN_CBT_EPRESDIV_MASK, bt->brp - 1) |
+ FIELD_PREP(FLEXCAN_CBT_ERJW_MASK, bt->sjw - 1) |
+ FIELD_PREP(FLEXCAN_CBT_EPROPSEG_MASK, bt->prop_seg - 1) |
+ FIELD_PREP(FLEXCAN_CBT_EPSEG1_MASK, bt->phase_seg1 - 1) |
+ FIELD_PREP(FLEXCAN_CBT_EPSEG2_MASK, bt->phase_seg2 - 1);
+
+ netdev_dbg(dev, "writing cbt=0x%08x\n", reg_cbt);
+ priv->write(reg_cbt, &regs->cbt);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ u32 reg_fdcbt, reg_ctrl2;
+
+ if (bt->brp != dbt->brp)
+ netdev_warn(dev, "Data brp=%d and brp=%d don't match, this may result in a phase error. Consider using different bitrate and/or data bitrate.\n",
+ dbt->brp, bt->brp);
+
+ /* FDCBT */
+ /* FDCBT[FPSEG1] is 3 bit long and FDCBT[FPROPSEG] is
+ * 5 bit long. The can_calc_bittiming tries to divide
+ * the tseg1 equally between phase_seg1 and prop_seg,
+ * which may not fit in FDCBT register. Therefore, if
+ * phase_seg1 is more than possible value, increase
+ * prop_seg and decrease phase_seg1
+ */
+ if (dbt->phase_seg1 > 0x8) {
+ dbt->prop_seg += (dbt->phase_seg1 - 0x8);
+ dbt->phase_seg1 = 0x8;
+ }
+
+ reg_fdcbt = priv->read(&regs->fdcbt);
+ reg_fdcbt &= ~(FIELD_PREP(FLEXCAN_FDCBT_FPRESDIV_MASK, 0x3ff) |
+ FIELD_PREP(FLEXCAN_FDCBT_FRJW_MASK, 0x7) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPROPSEG_MASK, 0x1f) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG1_MASK, 0x7) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG2_MASK, 0x7));
+
+ reg_fdcbt |= FIELD_PREP(FLEXCAN_FDCBT_FPRESDIV_MASK, dbt->brp - 1) |
+ FIELD_PREP(FLEXCAN_FDCBT_FRJW_MASK, dbt->sjw - 1) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPROPSEG_MASK, dbt->prop_seg) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG1_MASK, dbt->phase_seg1 - 1) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG2_MASK, dbt->phase_seg2 - 1);
+
+ netdev_dbg(dev, "writing fdcbt=0x%08x\n", reg_fdcbt);
+ priv->write(reg_fdcbt, &regs->fdcbt);
+
+ /* CTRL2 */
+ reg_ctrl2 = priv->read(&regs->ctrl2);
+ reg_ctrl2 &= ~FLEXCAN_CTRL2_ISOCANFDEN;
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
+ reg_ctrl2 |= FLEXCAN_CTRL2_ISOCANFDEN;
+
+ netdev_dbg(dev, "writing ctrl2=0x%08x\n", reg_ctrl2);
+ priv->write(reg_ctrl2, &regs->ctrl2);
+ }
+
+ /* FDCTRL */
+ reg_fdctrl = priv->read(&regs->fdctrl);
+ reg_fdctrl &= ~(FLEXCAN_FDCTRL_FDRATE |
+ FIELD_PREP(FLEXCAN_FDCTRL_TDCOFF, 0x1f));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ reg_fdctrl |= FLEXCAN_FDCTRL_FDRATE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ /* TDC must be disabled for Loop Back mode */
+ reg_fdctrl &= ~FLEXCAN_FDCTRL_TDCEN;
+ } else {
+ reg_fdctrl |= FLEXCAN_FDCTRL_TDCEN |
+ FIELD_PREP(FLEXCAN_FDCTRL_TDCOFF,
+ ((dbt->phase_seg1 - 1) +
+ dbt->prop_seg + 2) *
+ ((dbt->brp - 1 ) + 1));
+ }
+ }
+
+ netdev_dbg(dev, "writing fdctrl=0x%08x\n", reg_fdctrl);
+ priv->write(reg_fdctrl, &regs->fdctrl);
+
+ netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x ctrl2=0x%08x fdctrl=0x%08x cbt=0x%08x fdcbt=0x%08x\n",
+ __func__,
+ priv->read(&regs->mcr), priv->read(&regs->ctrl),
+ priv->read(&regs->ctrl2), priv->read(&regs->fdctrl),
+ priv->read(&regs->cbt), priv->read(&regs->fdcbt));
+}
+
+static void flexcan_set_bittiming(struct net_device *dev)
+{
+ const struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg;
+
+ reg = priv->read(&regs->ctrl);
+ reg &= ~(FLEXCAN_CTRL_LPB | FLEXCAN_CTRL_SMP |
+ FLEXCAN_CTRL_LOM);
+
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
reg |= FLEXCAN_CTRL_LPB;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -1052,9 +1307,41 @@ static void flexcan_set_bittiming(struct net_device *dev)
netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
priv->write(reg, &regs->ctrl);
- /* print chip status */
- netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
- priv->read(&regs->mcr), priv->read(&regs->ctrl));
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD)
+ return flexcan_set_bittiming_cbt(dev);
+ else
+ return flexcan_set_bittiming_ctrl(dev);
+}
+
+static void flexcan_ram_init(struct net_device *dev)
+{
+ struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_ctrl2;
+
+ /* 11.8.3.13 Detection and correction of memory errors:
+ * CTRL2[WRMFRZ] grants write access to all memory positions
+ * that require initialization, ranging from 0x080 to 0xADF
+ * and from 0xF28 to 0xFFF when the CAN FD feature is enabled.
+ * The RXMGMASK, RX14MASK, RX15MASK, and RXFGMASK registers
+ * need to be initialized as well. MCR[RFEN] must not be set
+ * during memory initialization.
+ */
+ reg_ctrl2 = priv->read(&regs->ctrl2);
+ reg_ctrl2 |= FLEXCAN_CTRL2_WRMFRZ;
+ priv->write(reg_ctrl2, &regs->ctrl2);
+
+ memset_io(&regs->mb[0][0], 0,
+ offsetof(struct flexcan_regs, rx_smb1[3]) -
+ offsetof(struct flexcan_regs, mb[0][0]) + 0x4);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ memset_io(&regs->tx_smb_fd[0], 0,
+ offsetof(struct flexcan_regs, rx_smb1_fd[17]) -
+ offsetof(struct flexcan_regs, tx_smb_fd[0]) + 0x4);
+
+ reg_ctrl2 &= ~FLEXCAN_CTRL2_WRMFRZ;
+ priv->write(reg_ctrl2, &regs->ctrl2);
}
/* flexcan_chip_start
@@ -1081,6 +1368,9 @@ static int flexcan_chip_start(struct net_device *dev)
if (err)
goto out_chip_disable;
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_ECC)
+ flexcan_ram_init(dev);
+
flexcan_set_bittiming(dev);
/* MCR
@@ -1127,6 +1417,12 @@ static int flexcan_chip_start(struct net_device *dev)
else
reg_mcr |= FLEXCAN_MCR_SRX_DIS;
+ /* MCR - CAN-FD */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ reg_mcr |= FLEXCAN_MCR_FDEN;
+ else
+ reg_mcr &= ~FLEXCAN_MCR_FDEN;
+
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
priv->write(reg_mcr, &regs->mcr);
@@ -1169,6 +1465,32 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(reg_ctrl2, &regs->ctrl2);
}
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ u32 reg_fdctrl;
+
+ reg_fdctrl = priv->read(&regs->fdctrl);
+ reg_fdctrl &= ~(FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1, 0x3) |
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0, 0x3));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ reg_fdctrl |=
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1,
+ FLEXCAN_FDCTRL_MBDSR_64) |
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0,
+ FLEXCAN_FDCTRL_MBDSR_64);
+ } else {
+ reg_fdctrl |=
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1,
+ FLEXCAN_FDCTRL_MBDSR_8) |
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0,
+ FLEXCAN_FDCTRL_MBDSR_8);
+ }
+
+ netdev_dbg(dev, "%s: writing fdctrl=0x%08x",
+ __func__, reg_fdctrl);
+ priv->write(reg_fdctrl, &regs->fdctrl);
+ }
+
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
mb = flexcan_get_mb(priv, i);
@@ -1204,28 +1526,43 @@ static int flexcan_chip_start(struct net_device *dev)
for (i = 0; i < priv->mb_count; i++)
priv->write(0, &regs->rximr[i]);
- /* On Vybrid, disable memory error detection interrupts
- * and freeze mode.
- * This also works around errata e5295 which generates
- * false positive memory errors and put the device in
- * freeze mode.
+ /* On Vybrid, disable non-correctable errors interrupt and
+ * freeze mode. It still can correct the correctable errors
+ * when HW supports ECC.
+ *
+ * This also works around errata e5295 which generates false
+ * positive memory errors and put the device in freeze mode.
*/
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
/* Follow the protocol as described in "Detection
* and Correction of Memory Errors" to write to
- * MECR register
+ * MECR register (step 1 - 5)
+ *
+ * 1. By default, CTRL2[ECRWRE] = 0, MECR[ECRWRDIS] = 1
+ * 2. set CTRL2[ECRWRE]
*/
reg_ctrl2 = priv->read(&regs->ctrl2);
reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
priv->write(reg_ctrl2, &regs->ctrl2);
+ /* 3. clear MECR[ECRWRDIS] */
reg_mecr = priv->read(&regs->mecr);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
priv->write(reg_mecr, &regs->mecr);
- reg_mecr |= FLEXCAN_MECR_ECCDIS;
+
+ /* 4. all writes to MECR must keep MECR[ECRWRDIS] cleared */
reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
FLEXCAN_MECR_FANCEI_MSK);
priv->write(reg_mecr, &regs->mecr);
+
+ /* 5. after configuration done, lock MECR by either
+ * setting MECR[ECRWRDIS] or clearing CTRL2[ECRWRE]
+ */
+ reg_mecr |= FLEXCAN_MECR_ECRWRDIS;
+ priv->write(reg_mecr, &regs->mecr);
+
+ reg_ctrl2 &= ~FLEXCAN_CTRL2_ECRWRE;
+ priv->write(reg_ctrl2, &regs->ctrl2);
}
err = flexcan_transceiver_enable(priv);
@@ -1260,18 +1597,23 @@ static int flexcan_chip_start(struct net_device *dev)
return err;
}
-/* flexcan_chip_stop
+/* __flexcan_chip_stop
*
- * this functions is entered with clocks enabled
+ * this function is entered with clocks enabled
*/
-static void flexcan_chip_stop(struct net_device *dev)
+static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
+ int err;
/* freeze + disable module */
- flexcan_chip_freeze(priv);
- flexcan_chip_disable(priv);
+ err = flexcan_chip_freeze(priv);
+ if (err && !disable_on_error)
+ return err;
+ err = flexcan_chip_disable(priv);
+ if (err && !disable_on_error)
+ goto out_chip_unfreeze;
/* Disable all interrupts */
priv->write(0, &regs->imask2);
@@ -1281,6 +1623,23 @@ static void flexcan_chip_stop(struct net_device *dev)
flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
+
+ return 0;
+
+ out_chip_unfreeze:
+ flexcan_chip_unfreeze(priv);
+
+ return err;
+}
+
+static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev)
+{
+ return __flexcan_chip_stop(dev, true);
+}
+
+static inline int flexcan_chip_stop(struct net_device *dev)
+{
+ return __flexcan_chip_stop(dev, false);
}
static int flexcan_open(struct net_device *dev)
@@ -1288,6 +1647,12 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) &&
+ (priv->can.ctrlmode & CAN_CTRLMODE_FD)) {
+ netdev_err(dev, "Three Samples mode and CAN-FD mode can't be used together\n");
+ return -EINVAL;
+ }
+
err = pm_runtime_get_sync(priv->dev);
if (err < 0)
return err;
@@ -1300,7 +1665,10 @@ static int flexcan_open(struct net_device *dev)
if (err)
goto out_close;
- priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN;
+ else
+ priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
(sizeof(priv->regs->mb[1]) / priv->mb_size);
@@ -1362,7 +1730,7 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
can_rx_offload_disable(&priv->offload);
- flexcan_chip_stop(dev);
+ flexcan_chip_stop_disable_on_error(dev);
can_rx_offload_del(&priv->offload);
free_irq(dev->irq, dev);
@@ -1477,14 +1845,14 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
struct device_node *gpr_np;
struct flexcan_priv *priv;
phandle phandle;
- u32 out_val[5];
+ u32 out_val[3];
int ret;
if (!np)
return -EINVAL;
/* stop mode property format is:
- * <&gpr req_gpr req_bit ack_gpr ack_bit>.
+ * <&gpr req_gpr>.
*/
ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
ARRAY_SIZE(out_val));
@@ -1510,13 +1878,10 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
priv->stm.req_gpr = out_val[1];
priv->stm.req_bit = out_val[2];
- priv->stm.ack_gpr = out_val[3];
- priv->stm.ack_bit = out_val[4];
dev_dbg(&pdev->dev,
- "gpr %s req_gpr=0x02%x req_bit=%u ack_gpr=0x02%x ack_bit=%u\n",
- gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit,
- priv->stm.ack_gpr, priv->stm.ack_bit);
+ "gpr %s req_gpr=0x02%x req_bit=%u\n",
+ gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit);
device_set_wakeup_capable(&pdev->dev, true);
@@ -1531,6 +1896,8 @@ out_put_node:
}
static const struct of_device_id flexcan_of_match[] = {
+ { .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
+ { .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
@@ -1539,6 +1906,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
+ { .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -1562,11 +1930,13 @@ static int flexcan_probe(struct platform_device *pdev)
u8 clk_src = 1;
u32 clock_freq = 0;
- reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+ reg_xceiver = devm_regulator_get_optional(&pdev->dev, "xceiver");
if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- else if (IS_ERR(reg_xceiver))
+ else if (PTR_ERR(reg_xceiver) == -ENODEV)
reg_xceiver = NULL;
+ else if (IS_ERR(reg_xceiver))
+ return PTR_ERR(reg_xceiver);
if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node,
@@ -1608,6 +1978,12 @@ static int flexcan_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
+ !(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) {
+ dev_err(&pdev->dev, "CAN-FD mode doesn't work with FIFO mode!\n");
+ return -EINVAL;
+ }
+
dev = alloc_candev(sizeof(struct flexcan_priv), 1);
if (!dev)
return -ENOMEM;
@@ -1632,7 +2008,6 @@ static int flexcan_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->can.clock.freq = clock_freq;
- priv->can.bittiming_const = &flexcan_bittiming_const;
priv->can.do_set_mode = flexcan_set_mode;
priv->can.do_get_berr_counter = flexcan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
@@ -1645,6 +2020,16 @@ static int flexcan_probe(struct platform_device *pdev)
priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_FD_NON_ISO;
+ priv->can.bittiming_const = &flexcan_fd_bittiming_const;
+ priv->can.data_bittiming_const =
+ &flexcan_fd_data_bittiming_const;
+ } else {
+ priv->can.bittiming_const = &flexcan_bittiming_const;
+ }
+
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -1655,6 +2040,7 @@ static int flexcan_probe(struct platform_device *pdev)
goto failed_register;
}
+ of_can_transceiver(dev);
devm_can_led_init(dev);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE) {
@@ -1666,6 +2052,8 @@ static int flexcan_probe(struct platform_device *pdev)
return 0;
failed_register:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
free_candev(dev);
return err;
}
@@ -1674,6 +2062,8 @@ static int flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ device_set_wakeup_enable(&pdev->dev, false);
+ device_set_wakeup_capable(&pdev->dev, false);
unregister_flexcandev(dev);
pm_runtime_disable(&pdev->dev);
free_candev(dev);
@@ -1685,7 +2075,7 @@ static int __maybe_unused flexcan_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- int err = 0;
+ int err;
if (netif_running(dev)) {
/* if wakeup is enabled, enter stop mode
@@ -1697,25 +2087,27 @@ static int __maybe_unused flexcan_suspend(struct device *device)
if (err)
return err;
} else {
- err = flexcan_chip_disable(priv);
+ err = flexcan_chip_stop(dev);
if (err)
return err;
- err = pm_runtime_force_suspend(device);
+ err = pinctrl_pm_select_sleep_state(device);
+ if (err)
+ return err;
}
netif_stop_queue(dev);
netif_device_detach(dev);
}
priv->can.state = CAN_STATE_SLEEPING;
- return err;
+ return 0;
}
static int __maybe_unused flexcan_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- int err = 0;
+ int err;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
@@ -1727,15 +2119,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (err)
return err;
} else {
- err = pm_runtime_force_resume(device);
+ err = pinctrl_pm_select_default_state(device);
if (err)
return err;
- err = flexcan_chip_enable(priv);
+ err = flexcan_chip_start(dev);
+ if (err)
+ return err;
}
}
- return err;
+ return 0;
}
static int __maybe_unused flexcan_runtime_suspend(struct device *device)
@@ -1761,8 +2155,16 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- if (netif_running(dev) && device_may_wakeup(device))
- flexcan_enable_wakeup_irq(priv, true);
+ if (netif_running(dev)) {
+ int err;
+
+ if (device_may_wakeup(device))
+ flexcan_enable_wakeup_irq(priv, true);
+
+ err = pm_runtime_force_suspend(device);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -1772,8 +2174,16 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- if (netif_running(dev) && device_may_wakeup(device))
- flexcan_enable_wakeup_irq(priv, false);
+ if (netif_running(dev)) {
+ int err;
+
+ err = pm_runtime_force_resume(device);
+ if (err)
+ return err;
+
+ if (device_may_wakeup(device))
+ flexcan_enable_wakeup_irq(priv, false);
+ }
return 0;
}
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 378200b682fa..39802f107eb1 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -726,7 +726,7 @@ static void grcan_err(struct net_device *dev, u32 sources, u32 status)
txrx = "on rx ";
stats->rx_errors++;
}
- netdev_err(dev, "Fatal AHB buss error %s- halting device\n",
+ netdev_err(dev, "Fatal AHB bus error %s- halting device\n",
txrx);
spin_lock_irqsave(&priv->lock, flags);
@@ -1243,7 +1243,7 @@ static int grcan_poll(struct napi_struct *napi, int budget)
int rx_budget = budget / 2;
int tx_budget = budget - rx_budget;
- /* Half of the budget for receiveing messages */
+ /* Half of the budget for receiving messages */
rx_work_done = grcan_receive(dev, rx_budget);
/* Half of the budget for transmitting messages as that can trigger echo
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index d9216147ca93..48be627c85c2 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -20,5 +20,5 @@ config CAN_M_CAN_TCAN4X5X
tristate "TCAN4X5X M_CAN device"
help
Say Y here if you want support for Texas Instruments TCAN4x5x
- M_CAN controller. This device is a peripherial device that uses the
+ M_CAN controller. This device is a peripheral device that uses the
SPI bus for communication.
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 38ea5e600fb8..e6d0cb9ee02f 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct m_can_classdev *mcan_class = netdev_priv(ndev);
- m_can_class_suspend(dev);
-
clk_disable_unprepare(mcan_class->cclk);
clk_disable_unprepare(mcan_class->hclk);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index e4f4b5c9ebd6..e254e04ae257 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -5,7 +5,7 @@
* Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
* Varma Electronics Oy
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix <kernel@pengutronix.de>
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 99101d7027a8..640ba1b356ec 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -209,6 +209,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
* since buffer with lower id have higher priority (hell..)
*/
netif_stop_queue(dev);
+ fallthrough;
case 2:
if (buf_id < priv->prev_buf_id) {
priv->cur_pri++;
@@ -540,16 +541,12 @@ static int mscan_open(struct net_device *dev)
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs __iomem *regs = priv->reg_base;
- if (priv->clk_ipg) {
- ret = clk_prepare_enable(priv->clk_ipg);
- if (ret)
- goto exit_retcode;
- }
- if (priv->clk_can) {
- ret = clk_prepare_enable(priv->clk_can);
- if (ret)
- goto exit_dis_ipg_clock;
- }
+ ret = clk_prepare_enable(priv->clk_ipg);
+ if (ret)
+ goto exit_retcode;
+ ret = clk_prepare_enable(priv->clk_can);
+ if (ret)
+ goto exit_dis_ipg_clock;
/* common open */
ret = open_candev(dev);
@@ -583,11 +580,9 @@ exit_napi_disable:
napi_disable(&priv->napi);
close_candev(dev);
exit_dis_can_clock:
- if (priv->clk_can)
- clk_disable_unprepare(priv->clk_can);
+ clk_disable_unprepare(priv->clk_can);
exit_dis_ipg_clock:
- if (priv->clk_ipg)
- clk_disable_unprepare(priv->clk_ipg);
+ clk_disable_unprepare(priv->clk_ipg);
exit_retcode:
return ret;
}
@@ -606,10 +601,8 @@ static int mscan_close(struct net_device *dev)
close_candev(dev);
free_irq(dev->irq, dev);
- if (priv->clk_can)
- clk_disable_unprepare(priv->clk_can);
- if (priv->clk_ipg)
- clk_disable_unprepare(priv->clk_ipg);
+ clk_disable_unprepare(priv->clk_can);
+ clk_disable_unprepare(priv->clk_ipg);
return 0;
}
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index db41dddd5771..5c180d2f3c3c 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -461,7 +461,7 @@ static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
PCH_ID2_DIR | (0x7ff << 2));
iowrite32(0x0, &priv->regs->ifregs[1].id1);
- /* Claring NewDat, TxRqst & IntPnd */
+ /* Clearing NewDat, TxRqst & IntPnd */
pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
PCH_IF_MCONT_TXRQXT);
@@ -834,7 +834,7 @@ static int pch_can_open(struct net_device *ndev)
struct pch_can_priv *priv = netdev_priv(ndev);
int retval;
- /* Regstering the interrupt. */
+ /* Registering the interrupt. */
retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
ndev->name, ndev);
if (retval) {
@@ -957,8 +957,7 @@ static void pch_can_remove(struct pci_dev *pdev)
free_candev(priv->ndev);
}
-#ifdef CONFIG_PM
-static void pch_can_set_int_custom(struct pch_can_priv *priv)
+static void __maybe_unused pch_can_set_int_custom(struct pch_can_priv *priv)
{
/* Clearing the IE, SIE and EIE bits of Can control register. */
pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
@@ -969,14 +968,14 @@ static void pch_can_set_int_custom(struct pch_can_priv *priv)
}
/* This function retrieves interrupt enabled for the CAN device. */
-static u32 pch_can_get_int_enables(struct pch_can_priv *priv)
+static u32 __maybe_unused pch_can_get_int_enables(struct pch_can_priv *priv)
{
/* Obtaining the status of IE, SIE and EIE interrupt bits. */
return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
}
-static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
- enum pch_ifreg dir)
+static u32 __maybe_unused pch_can_get_rxtx_ir(struct pch_can_priv *priv,
+ u32 buff_num, enum pch_ifreg dir)
{
u32 ie, enable;
@@ -997,8 +996,8 @@ static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
return enable;
}
-static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num, int set)
+static void __maybe_unused pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, int set)
{
iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
@@ -1013,7 +1012,8 @@ static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
}
-static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num)
+static u32 __maybe_unused pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num)
{
u32 link;
@@ -1027,20 +1027,19 @@ static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num)
return link;
}
-static int pch_can_get_buffer_status(struct pch_can_priv *priv)
+static int __maybe_unused pch_can_get_buffer_status(struct pch_can_priv *priv)
{
return (ioread32(&priv->regs->treq1) & 0xffff) |
(ioread32(&priv->regs->treq2) << 16);
}
-static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pch_can_suspend(struct device *dev_d)
{
int i;
- int retval;
u32 buf_stat; /* Variable for reading the transmit buffer status. */
int counter = PCH_COUNTER_LIMIT;
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct pch_can_priv *priv = netdev_priv(dev);
/* Stop the CAN controller */
@@ -1058,7 +1057,7 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
udelay(1);
}
if (!counter)
- dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
+ dev_err(dev_d, "%s -> Transmission time out.\n", __func__);
/* Save interrupt configuration and then disable them */
priv->int_enables = pch_can_get_int_enables(priv);
@@ -1081,35 +1080,16 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
/* Disable all Receive buffers */
pch_can_set_rx_all(priv, 0);
- retval = pci_save_state(pdev);
- if (retval) {
- dev_err(&pdev->dev, "pci_save_state failed.\n");
- } else {
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- }
- return retval;
+ return 0;
}
-static int pch_can_resume(struct pci_dev *pdev)
+static int __maybe_unused pch_can_resume(struct device *dev_d)
{
int i;
- int retval;
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct pch_can_priv *priv = netdev_priv(dev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- retval = pci_enable_device(pdev);
- if (retval) {
- dev_err(&pdev->dev, "pci_enable_device failed.\n");
- return retval;
- }
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
-
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* Disabling all interrupts. */
@@ -1146,12 +1126,8 @@ static int pch_can_resume(struct pci_dev *pdev)
/* Restore Run Mode */
pch_can_set_run_mode(priv, PCH_CAN_RUN);
- return retval;
+ return 0;
}
-#else
-#define pch_can_suspend NULL
-#define pch_can_resume NULL
-#endif
static int pch_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
@@ -1252,13 +1228,16 @@ probe_exit_endev:
return rc;
}
+static SIMPLE_DEV_PM_OPS(pch_can_pm_ops,
+ pch_can_suspend,
+ pch_can_resume);
+
static struct pci_driver pch_can_pci_driver = {
.name = "pch_can",
.id_table = pch_pci_tbl,
.probe = pch_can_probe,
.remove = pch_can_remove,
- .suspend = pch_can_suspend,
- .resume = pch_can_resume,
+ .driver.pm = &pch_can_pm_ops,
};
module_pci_driver(pch_can_pci_driver);
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 10aa3e457c33..40c33b8a5fda 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -262,8 +262,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
cf_len = get_can_dlc(pucan_msg_get_dlc(msg));
/* if this frame is an echo, */
- if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
- !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
+ if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
@@ -277,7 +276,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
netif_wake_queue(priv->ndev);
spin_unlock_irqrestore(&priv->echo_lock, flags);
- return 0;
+
+ /* if this frame is only an echo, stop here. Otherwise,
+ * continue to push this application self-received frame into
+ * its own rx queue.
+ */
+ if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE))
+ return 0;
}
/* otherwise, it should be pushed into rx fifo */
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 9469d4421afe..0df1cdfa6835 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -116,8 +116,6 @@ MODULE_LICENSE("GPL v2");
#define CANFD_CTL_IRQ_CL_DEF 16 /* Rx msg max nb per IRQ in Rx DMA */
#define CANFD_CTL_IRQ_TL_DEF 10 /* Time before IRQ if < CL (x100 µs) */
-#define CANFD_OPTIONS_SET (CANFD_OPTION_ERROR | CANFD_OPTION_BUSLOAD)
-
/* Tx anticipation window (link logical address should be aligned on 2K
* boundary)
*/
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index e8328910a234..6e95193b215b 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -245,7 +245,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return -ENOBUFS;
}
@@ -290,7 +290,7 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
{
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) {
- kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return -ENOBUFS;
}
@@ -351,6 +351,17 @@ int can_rx_offload_add_fifo(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
+int can_rx_offload_add_manual(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight)
+{
+ if (offload->mailbox_read)
+ return -EINVAL;
+
+ return can_rx_offload_init_queue(dev, offload, weight);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
+
void can_rx_offload_enable(struct can_rx_offload *offload)
{
napi_enable(&offload->napi);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 8c0244f51059..4713921bd511 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(pci, peak_pci_tbl);
/* GPIOICR byte access offsets */
#define PITA_GPOUT 0x18 /* GPx output value */
#define PITA_GPIN 0x19 /* GPx input value */
-#define PITA_GPOEN 0x1A /* configure GPx as ouput pin */
+#define PITA_GPOEN 0x1A /* configure GPx as output pin */
/* I2C GP bits */
#define PITA_GPIN_SCL 0x01 /* Serial Clock Line */
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 5e0d5e8101c8..cf951a783078 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -671,7 +671,7 @@ static int pcan_probe(struct pcmcia_device *pdev)
card->fw_major = pcan_read_reg(card, PCC_FW_MAJOR);
card->fw_minor = pcan_read_reg(card, PCC_FW_MINOR);
- /* display board name and firware version */
+ /* display board name and firmware version */
dev_info(&pdev->dev, "PEAK-System pcmcia card %s fw %d.%d\n",
pdev->prod_id[1] ? pdev->prod_id[1] : "PCAN-PC Card",
card->fw_major, card->fw_minor);
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
index 16b9eec63490..8afd7d0a1000 100644
--- a/drivers/net/can/softing/Kconfig
+++ b/drivers/net/can/softing/Kconfig
@@ -5,14 +5,14 @@ config CAN_SOFTING
help
Support for CAN cards from Softing Gmbh & some cards
from Vector Gmbh.
- Softing Gmbh CAN cards come with 1 or 2 physical busses.
+ Softing Gmbh CAN cards come with 1 or 2 physical buses.
Those cards typically use Dual Port RAM to communicate
with the host CPU. The interface is then identical for PCI
and PCMCIA cards. This driver operates on a platform device,
which has been created by softing_cs or softing_pci driver.
Warning:
The API of the card does not allow fine control per bus, but
- controls the 2 busses on the card together.
+ controls the 2 buses on the card together.
As such, some actions (start/stop/busoff recovery) on 1 bus
must bring down the other bus too temporarily.
@@ -24,7 +24,7 @@ config CAN_SOFTING_CS
Support for PCMCIA cards from Softing Gmbh & some cards
from Vector Gmbh.
You need firmware for these, which you can get at
- http://developer.berlios.de/projects/socketcan/
+ https://github.com/linux-can/can-firmware
This version of the driver is written against
firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
In order to use the card as CAN device, you need the Softing generic
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 8f44fdd8804b..ccd649a8e37b 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -273,7 +273,7 @@ int softing_load_app_fw(const char *file, struct softing *card)
goto failed;
}
- /* regualar data */
+ /* regular data */
for (sum = 0, j = 0; j < len; ++j)
sum += dat[j];
/* work in 16bit (target) */
@@ -474,14 +474,14 @@ int softing_startstop(struct net_device *dev, int up)
if (ret)
goto failed;
if (!bus_bitmask_start)
- /* no busses to be brought up */
+ /* no buses to be brought up */
goto card_done;
if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
&& (softing_error_reporting(card->net[0])
!= softing_error_reporting(card->net[1]))) {
dev_alert(&card->pdev->dev,
- "err_reporting flag differs for busses\n");
+ "err_reporting flag differs for buses\n");
goto invalid;
}
error_reporting = 0;
@@ -635,7 +635,7 @@ int softing_startstop(struct net_device *dev, int up)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
open_candev(netdev);
if (dev != netdev) {
- /* notify other busses on the restart */
+ /* notify other buses on the restart */
softing_netdev_rx(netdev, &msg, 0);
++priv->can.can_stats.restarts;
}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index d1ddf763b188..9d2faaa39ce4 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -170,8 +170,8 @@ static int softing_handle_1(struct softing *card)
msg.can_dlc = CAN_ERR_DLC;
msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
/*
- * service to all busses, we don't know which it was applicable
- * but only service busses that are online
+ * service to all buses, we don't know which it was applicable
+ * but only service buses that are online
*/
for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
netdev = card->net[j];
@@ -339,7 +339,7 @@ static irqreturn_t softing_irq_thread(int irq, void *dev_id)
continue;
priv = netdev_priv(netdev);
if (!canif_is_active(netdev))
- /* it makes no sense to wake dead busses */
+ /* it makes no sense to wake dead buses */
continue;
if (priv->tx.pending >= TX_ECHO_SKB_MAX)
continue;
@@ -374,7 +374,7 @@ static irqreturn_t softing_irq_v1(int irq, void *dev_id)
}
/*
- * netdev/candev inter-operability
+ * netdev/candev interoperability
*/
static int softing_netdev_open(struct net_device *ndev)
{
@@ -447,8 +447,9 @@ static void softing_card_shutdown(struct softing *card)
{
int fw_up = 0;
- if (mutex_lock_interruptible(&card->fw.lock))
+ if (mutex_lock_interruptible(&card->fw.lock)) {
/* return -ERESTARTSYS */;
+ }
fw_up = card->fw.up;
card->fw.up = 0;
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
index 68a161547644..cd8d7904c5f0 100644
--- a/drivers/net/can/softing/softing_platform.h
+++ b/drivers/net/can/softing/softing_platform.h
@@ -19,7 +19,7 @@ struct softing_platform_data {
* 16bit, shared interrupt
*/
int generation;
- int nbus; /* # busses on device */
+ int nbus; /* # buses on device */
unsigned int freq; /* operating frequency in Hz */
unsigned int max_brp;
unsigned int max_sjw;
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
index f780c79aac6f..f45449210047 100644
--- a/drivers/net/can/spi/Kconfig
+++ b/drivers/net/can/spi/Kconfig
@@ -4,15 +4,15 @@ menu "CAN SPI interfaces"
config CAN_HI311X
tristate "Holt HI311x SPI CAN controllers"
- depends on CAN_DEV && SPI && HAS_DMA
help
Driver for the Holt HI311x SPI CAN controllers.
config CAN_MCP251X
tristate "Microchip MCP251x and MCP25625 SPI CAN controllers"
- depends on HAS_DMA
help
Driver for the Microchip MCP251x and MCP25625 SPI CAN
controllers.
+source "drivers/net/can/spi/mcp251xfd/Kconfig"
+
endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
index f115b2c46623..33e3f60bbc10 100644
--- a/drivers/net/can/spi/Makefile
+++ b/drivers/net/can/spi/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_CAN_HI311X) += hi311x.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+obj-y += mcp251xfd/
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index d17608870f2d..22d814ae4edc 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -19,6 +19,7 @@
* Copyright 2007
*/
+#include <linux/bitfield.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/led.h>
@@ -27,17 +28,20 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/freezer.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/property.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
-#include <linux/regulator/consumer.h>
/* SPI interface instruction set */
#define INSTRUCTION_WRITE 0x02
@@ -52,6 +56,30 @@
#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07))
/* MPC251x registers */
+#define BFPCTRL 0x0c
+# define BFPCTRL_B0BFM BIT(0)
+# define BFPCTRL_B1BFM BIT(1)
+# define BFPCTRL_BFM(n) (BFPCTRL_B0BFM << (n))
+# define BFPCTRL_BFM_MASK GENMASK(1, 0)
+# define BFPCTRL_B0BFE BIT(2)
+# define BFPCTRL_B1BFE BIT(3)
+# define BFPCTRL_BFE(n) (BFPCTRL_B0BFE << (n))
+# define BFPCTRL_BFE_MASK GENMASK(3, 2)
+# define BFPCTRL_B0BFS BIT(4)
+# define BFPCTRL_B1BFS BIT(5)
+# define BFPCTRL_BFS(n) (BFPCTRL_B0BFS << (n))
+# define BFPCTRL_BFS_MASK GENMASK(5, 4)
+#define TXRTSCTRL 0x0d
+# define TXRTSCTRL_B0RTSM BIT(0)
+# define TXRTSCTRL_B1RTSM BIT(1)
+# define TXRTSCTRL_B2RTSM BIT(2)
+# define TXRTSCTRL_RTSM(n) (TXRTSCTRL_B0RTSM << (n))
+# define TXRTSCTRL_RTSM_MASK GENMASK(2, 0)
+# define TXRTSCTRL_B0RTS BIT(3)
+# define TXRTSCTRL_B1RTS BIT(4)
+# define TXRTSCTRL_B2RTS BIT(5)
+# define TXRTSCTRL_RTS(n) (TXRTSCTRL_B0RTS << (n))
+# define TXRTSCTRL_RTS_MASK GENMASK(5, 3)
#define CANSTAT 0x0e
#define CANCTRL 0x0f
# define CANCTRL_REQOP_MASK 0xe0
@@ -225,6 +253,10 @@ struct mcp251x_priv {
struct regulator *power;
struct regulator *transceiver;
struct clk *clk;
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gpio;
+ u8 reg_bfpctrl;
+#endif
};
#define MCP251X_IS(_model) \
@@ -290,8 +322,12 @@ static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
priv->spi_tx_buf[0] = INSTRUCTION_READ;
priv->spi_tx_buf[1] = reg;
- mcp251x_spi_trans(spi, 3);
- val = priv->spi_rx_buf[2];
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ spi_write_then_read(spi, priv->spi_tx_buf, 2, &val, 1);
+ } else {
+ mcp251x_spi_trans(spi, 3);
+ val = priv->spi_rx_buf[2];
+ }
return val;
}
@@ -303,10 +339,18 @@ static void mcp251x_read_2regs(struct spi_device *spi, u8 reg, u8 *v1, u8 *v2)
priv->spi_tx_buf[0] = INSTRUCTION_READ;
priv->spi_tx_buf[1] = reg;
- mcp251x_spi_trans(spi, 4);
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ u8 val[2] = { 0 };
- *v1 = priv->spi_rx_buf[2];
- *v2 = priv->spi_rx_buf[3];
+ spi_write_then_read(spi, priv->spi_tx_buf, 2, val, 2);
+ *v1 = val[0];
+ *v2 = val[1];
+ } else {
+ mcp251x_spi_trans(spi, 4);
+
+ *v1 = priv->spi_rx_buf[2];
+ *v2 = priv->spi_rx_buf[3];
+ }
}
static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
@@ -345,6 +389,222 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
mcp251x_spi_trans(spi, 4);
}
+static u8 mcp251x_read_stat(struct spi_device *spi)
+{
+ return mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK;
+}
+
+#define mcp251x_read_stat_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(mcp251x_read_stat, addr, val, cond, \
+ delay_us, timeout_us)
+
+#ifdef CONFIG_GPIOLIB
+enum {
+ MCP251X_GPIO_TX0RTS = 0, /* inputs */
+ MCP251X_GPIO_TX1RTS,
+ MCP251X_GPIO_TX2RTS,
+ MCP251X_GPIO_RX0BF, /* outputs */
+ MCP251X_GPIO_RX1BF,
+};
+
+#define MCP251X_GPIO_INPUT_MASK \
+ GENMASK(MCP251X_GPIO_TX2RTS, MCP251X_GPIO_TX0RTS)
+#define MCP251X_GPIO_OUTPUT_MASK \
+ GENMASK(MCP251X_GPIO_RX1BF, MCP251X_GPIO_RX0BF)
+
+static const char * const mcp251x_gpio_names[] = {
+ [MCP251X_GPIO_TX0RTS] = "TX0RTS", /* inputs */
+ [MCP251X_GPIO_TX1RTS] = "TX1RTS",
+ [MCP251X_GPIO_TX2RTS] = "TX2RTS",
+ [MCP251X_GPIO_RX0BF] = "RX0BF", /* outputs */
+ [MCP251X_GPIO_RX1BF] = "RX1BF",
+};
+
+static inline bool mcp251x_gpio_is_input(unsigned int offset)
+{
+ return offset <= MCP251X_GPIO_TX2RTS;
+}
+
+static int mcp251x_gpio_request(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 val;
+
+ /* nothing to be done for inputs */
+ if (mcp251x_gpio_is_input(offset))
+ return 0;
+
+ val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF);
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl |= val;
+
+ return 0;
+}
+
+static void mcp251x_gpio_free(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 val;
+
+ /* nothing to be done for inputs */
+ if (mcp251x_gpio_is_input(offset))
+ return;
+
+ val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF);
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, val, 0);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl &= ~val;
+}
+
+static int mcp251x_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ if (mcp251x_gpio_is_input(offset))
+ return GPIOF_DIR_IN;
+
+ return GPIOF_DIR_OUT;
+}
+
+static int mcp251x_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 reg, mask, val;
+
+ if (mcp251x_gpio_is_input(offset)) {
+ reg = TXRTSCTRL;
+ mask = TXRTSCTRL_RTS(offset);
+ } else {
+ reg = BFPCTRL;
+ mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF);
+ }
+
+ mutex_lock(&priv->mcp_lock);
+ val = mcp251x_read_reg(priv->spi, reg);
+ mutex_unlock(&priv->mcp_lock);
+
+ return !!(val & mask);
+}
+
+static int mcp251x_gpio_get_multiple(struct gpio_chip *chip,
+ unsigned long *maskp, unsigned long *bitsp)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ unsigned long bits = 0;
+ u8 val;
+
+ mutex_lock(&priv->mcp_lock);
+ if (maskp[0] & MCP251X_GPIO_INPUT_MASK) {
+ val = mcp251x_read_reg(priv->spi, TXRTSCTRL);
+ val = FIELD_GET(TXRTSCTRL_RTS_MASK, val);
+ bits |= FIELD_PREP(MCP251X_GPIO_INPUT_MASK, val);
+ }
+ if (maskp[0] & MCP251X_GPIO_OUTPUT_MASK) {
+ val = mcp251x_read_reg(priv->spi, BFPCTRL);
+ val = FIELD_GET(BFPCTRL_BFS_MASK, val);
+ bits |= FIELD_PREP(MCP251X_GPIO_OUTPUT_MASK, val);
+ }
+ mutex_unlock(&priv->mcp_lock);
+
+ bitsp[0] = bits;
+ return 0;
+}
+
+static void mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 mask, val;
+
+ mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF);
+ val = value ? mask : 0;
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl &= ~mask;
+ priv->reg_bfpctrl |= val;
+}
+
+static void
+mcp251x_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *maskp, unsigned long *bitsp)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 mask, val;
+
+ mask = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, maskp[0]);
+ mask = FIELD_PREP(BFPCTRL_BFS_MASK, mask);
+
+ val = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, bitsp[0]);
+ val = FIELD_PREP(BFPCTRL_BFS_MASK, val);
+
+ if (!mask)
+ return;
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl &= ~mask;
+ priv->reg_bfpctrl |= val;
+}
+
+static void mcp251x_gpio_restore(struct spi_device *spi)
+{
+ struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+ mcp251x_write_reg(spi, BFPCTRL, priv->reg_bfpctrl);
+}
+
+static int mcp251x_gpio_setup(struct mcp251x_priv *priv)
+{
+ struct gpio_chip *gpio = &priv->gpio;
+
+ if (!device_property_present(&priv->spi->dev, "gpio-controller"))
+ return 0;
+
+ /* gpiochip handles TX[0..2]RTS and RX[0..1]BF */
+ gpio->label = priv->spi->modalias;
+ gpio->parent = &priv->spi->dev;
+ gpio->owner = THIS_MODULE;
+ gpio->request = mcp251x_gpio_request;
+ gpio->free = mcp251x_gpio_free;
+ gpio->get_direction = mcp251x_gpio_get_direction;
+ gpio->get = mcp251x_gpio_get;
+ gpio->get_multiple = mcp251x_gpio_get_multiple;
+ gpio->set = mcp251x_gpio_set;
+ gpio->set_multiple = mcp251x_gpio_set_multiple;
+ gpio->base = -1;
+ gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names);
+ gpio->names = mcp251x_gpio_names;
+ gpio->can_sleep = true;
+#ifdef CONFIG_OF_GPIO
+ gpio->of_node = priv->spi->dev.of_node;
+#endif
+
+ return devm_gpiochip_add_data(&priv->spi->dev, gpio, priv);
+}
+#else
+static inline void mcp251x_gpio_restore(struct spi_device *spi)
+{
+}
+
+static inline int mcp251x_gpio_setup(struct mcp251x_priv *priv)
+{
+ return 0;
+}
+#endif
+
static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
int len, int tx_buf_idx)
{
@@ -409,8 +669,16 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
} else {
priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
- mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
- memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ spi_write_then_read(spi, priv->spi_tx_buf, 1,
+ priv->spi_rx_buf,
+ SPI_TRANSFER_BUF_LEN);
+ memcpy(buf + 1, priv->spi_rx_buf,
+ SPI_TRANSFER_BUF_LEN - 1);
+ } else {
+ mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
+ memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
+ }
}
}
@@ -471,7 +739,8 @@ static void mcp251x_hw_sleep(struct spi_device *spi)
/* May only be called when device is sleeping! */
static int mcp251x_hw_wake(struct spi_device *spi)
{
- unsigned long timeout;
+ u8 value;
+ int ret;
/* Force wakeup interrupt to wake device, but don't execute IST */
disable_irq(spi->irq);
@@ -484,14 +753,12 @@ static int mcp251x_hw_wake(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_CONF);
/* Wait for the device to enter config mode */
- timeout = jiffies + HZ;
- while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
- CANCTRL_REQOP_CONF) {
- schedule();
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't enter in config mode\n");
- return -EBUSY;
- }
+ ret = mcp251x_read_stat_poll_timeout(spi, value, value == CANCTRL_REQOP_CONF,
+ MCP251X_OST_DELAY_MS * 1000,
+ USEC_PER_SEC);
+ if (ret) {
+ dev_err(&spi->dev, "MCP251x didn't enter in config mode\n");
+ return ret;
}
/* Disable and clear pending interrupts */
@@ -546,7 +813,8 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
static int mcp251x_set_normal_mode(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- unsigned long timeout;
+ u8 value;
+ int ret;
/* Enable interrupts */
mcp251x_write_reg(spi, CANINTE,
@@ -564,13 +832,12 @@ static int mcp251x_set_normal_mode(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
/* Wait for the device to enter normal mode */
- timeout = jiffies + HZ;
- while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
- schedule();
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't enter in normal mode\n");
- return -EBUSY;
- }
+ ret = mcp251x_read_stat_poll_timeout(spi, value, value == 0,
+ MCP251X_OST_DELAY_MS * 1000,
+ USEC_PER_SEC);
+ if (ret) {
+ dev_err(&spi->dev, "MCP251x didn't enter in normal mode\n");
+ return ret;
}
}
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -614,7 +881,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- unsigned long timeout;
+ u8 value;
int ret;
/* Wait for oscillator startup timer after power up */
@@ -629,19 +896,12 @@ static int mcp251x_hw_reset(struct spi_device *spi)
mdelay(MCP251X_OST_DELAY_MS);
/* Wait for reset to finish */
- timeout = jiffies + HZ;
- while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
- CANCTRL_REQOP_CONF) {
- usleep_range(MCP251X_OST_DELAY_MS * 1000,
- MCP251X_OST_DELAY_MS * 1000 * 2);
-
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev,
- "MCP251x didn't enter in conf mode after reset\n");
- return -EBUSY;
- }
- }
- return 0;
+ ret = mcp251x_read_stat_poll_timeout(spi, value, value == CANCTRL_REQOP_CONF,
+ MCP251X_OST_DELAY_MS * 1000,
+ USEC_PER_SEC);
+ if (ret)
+ dev_err(&spi->dev, "MCP251x didn't enter in conf mode after reset\n");
+ return ret;
}
static int mcp251x_hw_probe(struct spi_device *spi)
@@ -761,6 +1021,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
if (priv->after_suspend & AFTER_SUSPEND_POWER) {
mcp251x_hw_reset(spi);
mcp251x_setup(net, spi);
+ mcp251x_gpio_restore(spi);
} else {
mcp251x_hw_wake(spi);
}
@@ -1136,6 +1397,10 @@ static int mcp251x_can_probe(struct spi_device *spi)
devm_can_led_init(net);
+ ret = mcp251x_gpio_setup(priv);
+ if (ret)
+ goto error_probe;
+
netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
return 0;
diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig
new file mode 100644
index 000000000000..f5a147a92cb2
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CAN_MCP251XFD
+ tristate "Microchip MCP251xFD SPI CAN controllers"
+ select REGMAP
+ help
+ Driver for the Microchip MCP251XFD SPI FD-CAN controller
+ family.
+
+config CAN_MCP251XFD_SANITY
+ depends on CAN_MCP251XFD
+ bool "Additional Sanity Checks"
+ help
+ This option enables additional sanity checks in the driver,
+ that compares various internal counters with the in chip
+ variants. This comes with a runtime overhead.
+ Disable if unsure.
diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile
new file mode 100644
index 000000000000..cb71244cbe89
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CAN_MCP251XFD) += mcp251xfd.o
+
+mcp251xfd-objs :=
+mcp251xfd-objs += mcp251xfd-core.o
+mcp251xfd-objs += mcp251xfd-crc16.o
+mcp251xfd-objs += mcp251xfd-regmap.o
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
new file mode 100644
index 000000000000..9c215f7c5f81
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -0,0 +1,2927 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/unaligned.h>
+
+#include "mcp251xfd.h"
+
+#define DEVICE_NAME "mcp251xfd"
+
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = {
+ .quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG |
+ MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX |
+ MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP2517FD,
+};
+
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
+ .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
+ MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP2518FD,
+};
+
+/* Autodetect model, start with CRC enabled. */
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
+ .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
+ MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP251XFD,
+};
+
+static const struct can_bittiming_const mcp251xfd_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
+{
+ switch (model) {
+ case MCP251XFD_MODEL_MCP2517FD:
+ return "MCP2517FD";
+ case MCP251XFD_MODEL_MCP2518FD:
+ return "MCP2518FD";
+ case MCP251XFD_MODEL_MCP251XFD:
+ return "MCP251xFD";
+ }
+
+ return "<unknown>";
+}
+
+static inline const char *
+mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv)
+{
+ return __mcp251xfd_get_model_str(priv->devtype_data.model);
+}
+
+static const char *mcp251xfd_get_mode_str(const u8 mode)
+{
+ switch (mode) {
+ case MCP251XFD_REG_CON_MODE_MIXED:
+ return "Mixed (CAN FD/CAN 2.0)";
+ case MCP251XFD_REG_CON_MODE_SLEEP:
+ return "Sleep";
+ case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
+ return "Internal Loopback";
+ case MCP251XFD_REG_CON_MODE_LISTENONLY:
+ return "Listen Only";
+ case MCP251XFD_REG_CON_MODE_CONFIG:
+ return "Configuration";
+ case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
+ return "External Loopback";
+ case MCP251XFD_REG_CON_MODE_CAN2_0:
+ return "CAN 2.0";
+ case MCP251XFD_REG_CON_MODE_RESTRICTED:
+ return "Restricted Operation";
+ }
+
+ return "<unknown>";
+}
+
+static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
+{
+ if (!priv->reg_vdd)
+ return 0;
+
+ return regulator_enable(priv->reg_vdd);
+}
+
+static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv)
+{
+ if (!priv->reg_vdd)
+ return 0;
+
+ return regulator_disable(priv->reg_vdd);
+}
+
+static inline int
+mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_enable(priv->reg_xceiver);
+}
+
+static inline int
+mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_disable(priv->reg_xceiver);
+}
+
+static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv)
+{
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+
+ err = mcp251xfd_vdd_enable(priv);
+ if (err)
+ clk_disable_unprepare(priv->clk);
+
+ /* Wait for oscillator stabilisation time after power up */
+ usleep_range(MCP251XFD_OSC_STAB_SLEEP_US,
+ 2 * MCP251XFD_OSC_STAB_SLEEP_US);
+
+ return err;
+}
+
+static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
+{
+ int err;
+
+ err = mcp251xfd_vdd_disable(priv);
+ if (err)
+ return err;
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static inline u8
+mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
+ union mcp251xfd_write_reg_buf *write_reg_buf,
+ const u16 reg, const u32 mask, const u32 val)
+{
+ u8 first_byte, last_byte, len;
+ u8 *data;
+ __le32 val_le32;
+
+ first_byte = mcp251xfd_first_byte_set(mask);
+ last_byte = mcp251xfd_last_byte_set(mask);
+ len = last_byte - first_byte + 1;
+
+ data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
+ val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
+ memcpy(data, &val_le32, len);
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
+ u16 crc;
+
+ mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(write_reg_buf->crc.cmd);
+ crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
+ put_unaligned_be16(crc, (void *)write_reg_buf + len);
+
+ /* Total length */
+ len += sizeof(write_reg_buf->crc.crc);
+ } else {
+ len += sizeof(write_reg_buf->nocrc.cmd);
+ }
+
+ return len;
+}
+
+static inline int
+mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ u8 *tef_tail)
+{
+ u32 tef_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
+ if (err)
+ return err;
+
+ *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ u8 *tx_tail)
+{
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg,
+ MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *rx_head)
+{
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *rx_tail)
+{
+ u32 fifo_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
+ &fifo_ua);
+ if (err)
+ return err;
+
+ fifo_ua -= ring->base - MCP251XFD_RAM_START;
+ *rx_tail = fifo_ua / ring->obj_size;
+
+ return 0;
+}
+
+static void
+mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_tx_ring *ring,
+ struct mcp251xfd_tx_obj *tx_obj,
+ const u8 rts_buf_len,
+ const u8 n)
+{
+ struct spi_transfer *xfer;
+ u16 addr;
+
+ /* FIFO load */
+ addr = mcp251xfd_get_tx_obj_addr(ring, n);
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
+ mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
+ addr);
+ else
+ mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
+ addr);
+
+ xfer = &tx_obj->xfer[0];
+ xfer->tx_buf = &tx_obj->buf;
+ xfer->len = 0; /* actual len is assigned on the fly */
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ /* FIFO request to send */
+ xfer = &tx_obj->xfer[1];
+ xfer->tx_buf = &ring->rts_buf;
+ xfer->len = rts_buf_len;
+
+ /* SPI message */
+ spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
+ ARRAY_SIZE(tx_obj->xfer));
+}
+
+static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_tx_ring *tx_ring;
+ struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
+ struct mcp251xfd_tx_obj *tx_obj;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i;
+
+ /* TEF */
+ priv->tef.head = 0;
+ priv->tef.tail = 0;
+
+ /* TX */
+ tx_ring = priv->tx;
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+ tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
+
+ /* FIFO request to send */
+ addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
+ val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
+ addr, val, val);
+
+ mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
+ mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
+
+ /* RX */
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ rx_ring->head = 0;
+ rx_ring->tail = 0;
+ rx_ring->nr = i;
+ rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
+
+ if (!prev_rx_ring)
+ rx_ring->base =
+ mcp251xfd_get_tx_obj_addr(tx_ring,
+ tx_ring->obj_num);
+ else
+ rx_ring->base = prev_rx_ring->base +
+ prev_rx_ring->obj_size *
+ prev_rx_ring->obj_num;
+
+ prev_rx_ring = rx_ring;
+ }
+}
+
+static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
+ kfree(priv->rx[i]);
+ priv->rx[i] = NULL;
+ }
+}
+
+static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_tx_ring *tx_ring;
+ struct mcp251xfd_rx_ring *rx_ring;
+ int tef_obj_size, tx_obj_size, rx_obj_size;
+ int tx_obj_num;
+ int ram_free, i;
+
+ tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
+ /* listen-only mode works like FD mode */
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
+ tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
+ tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
+ rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
+ } else {
+ tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
+ tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
+ rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
+ }
+
+ tx_ring = priv->tx;
+ tx_ring->obj_num = tx_obj_num;
+ tx_ring->obj_size = tx_obj_size;
+
+ ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
+ (tef_obj_size + tx_obj_size);
+
+ for (i = 0;
+ i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
+ i++) {
+ int rx_obj_num;
+
+ rx_obj_num = ram_free / rx_obj_size;
+ rx_obj_num = min(1 << (fls(rx_obj_num) - 1), 32);
+
+ rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
+ GFP_KERNEL);
+ if (!rx_ring) {
+ mcp251xfd_ring_free(priv);
+ return -ENOMEM;
+ }
+ rx_ring->obj_num = rx_obj_num;
+ rx_ring->obj_size = rx_obj_size;
+ priv->rx[i] = rx_ring;
+
+ ram_free -= rx_ring->obj_num * rx_ring->obj_size;
+ }
+ priv->rx_ring_num = i;
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
+ tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
+ tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
+ i, rx_ring->obj_num, rx_ring->obj_size,
+ rx_ring->obj_size * rx_ring->obj_num);
+ }
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: free: %d bytes\n",
+ ram_free);
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
+{
+ u32 val;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val);
+ if (err)
+ return err;
+
+ *mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val);
+
+ return 0;
+}
+
+static int
+__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
+ const u8 mode_req, bool nowait)
+{
+ u32 con, con_reqop;
+ int err;
+
+ con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
+ MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
+ if (err)
+ return err;
+
+ if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
+ return 0;
+
+ err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
+ FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
+ con) == mode_req,
+ MCP251XFD_POLL_SLEEP_US,
+ MCP251XFD_POLL_TIMEOUT_US);
+ if (err) {
+ u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+
+ netdev_err(priv->ndev,
+ "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode_req), mode_req,
+ mcp251xfd_get_mode_str(mode), mode);
+ return err;
+ }
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
+ const u8 mode_req)
+{
+ return __mcp251xfd_chip_set_mode(priv, mode_req, false);
+}
+
+static inline int
+mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
+ const u8 mode_req)
+{
+ return __mcp251xfd_chip_set_mode(priv, mode_req, true);
+}
+
+static inline bool mcp251xfd_osc_invalid(u32 reg)
+{
+ return reg == 0x0 || reg == 0xffffffff;
+}
+
+static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
+{
+ u32 osc, osc_reference, osc_mask;
+ int err;
+
+ /* Set Power On Defaults for "Clock Output Divisor" and remove
+ * "Oscillator Disable" bit.
+ */
+ osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
+ MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_reference = MCP251XFD_REG_OSC_OSCRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+
+ /* Note:
+ *
+ * If the controller is in Sleep Mode the following write only
+ * removes the "Oscillator Disable" bit and powers it up. All
+ * other bits are unaffected.
+ */
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
+ if (err)
+ return err;
+
+ /* Wait for "Oscillator Ready" bit */
+ err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
+ (osc & osc_mask) == osc_reference,
+ MCP251XFD_OSC_STAB_SLEEP_US,
+ MCP251XFD_OSC_STAB_TIMEOUT_US);
+ if (mcp251xfd_osc_invalid(osc)) {
+ netdev_err(priv->ndev,
+ "Failed to detect %s (osc=0x%08x).\n",
+ mcp251xfd_get_model_str(priv), osc);
+ return -ENODEV;
+ } else if (err == -ETIMEDOUT) {
+ netdev_err(priv->ndev,
+ "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
+ osc, osc_reference);
+ return -ETIMEDOUT;
+ } else if (err) {
+ return err;
+ }
+
+ return 0;
+}
+
+static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
+{
+ const __be16 cmd = mcp251xfd_cmd_reset();
+ int err;
+
+ /* The Set Mode and SPI Reset command only seems to works if
+ * the controller is not in Sleep Mode.
+ */
+ err = mcp251xfd_chip_clock_enable(priv);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
+ if (err)
+ return err;
+
+ /* spi_write_then_read() works with non DMA-safe buffers */
+ return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
+}
+
+static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
+{
+ u32 osc, osc_reference;
+ u8 mode;
+ int err;
+
+ err = mcp251xfd_chip_get_mode(priv, &mode);
+ if (err)
+ return err;
+
+ if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
+ netdev_info(priv->ndev,
+ "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
+ return -ETIMEDOUT;
+ }
+
+ osc_reference = MCP251XFD_REG_OSC_OSCRDY |
+ FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
+ MCP251XFD_REG_OSC_CLKODIV_10);
+
+ /* check reset defaults of OSC reg */
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ if (err)
+ return err;
+
+ if (osc != osc_reference) {
+ netdev_info(priv->ndev,
+ "Controller failed to reset. osc=0x%08x, reference value=0x%08x\n",
+ osc, osc_reference);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
+{
+ int err, i;
+
+ for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
+ if (i)
+ netdev_info(priv->ndev,
+ "Retrying to reset Controller.\n");
+
+ err = mcp251xfd_chip_softreset_do(priv);
+ if (err == -ETIMEDOUT)
+ continue;
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_softreset_check(priv);
+ if (err == -ETIMEDOUT)
+ continue;
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ if (err)
+ return err;
+
+ return -ETIMEDOUT;
+}
+
+static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
+{
+ u32 osc;
+ int err;
+
+ /* Activate Low Power Mode on Oscillator Disable. This only
+ * works on the MCP2518FD. The MCP2517FD will go into normal
+ * Sleep Mode instead.
+ */
+ osc = MCP251XFD_REG_OSC_LPMEN |
+ FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
+ MCP251XFD_REG_OSC_CLKODIV_10);
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
+ if (err)
+ return err;
+
+ /* Set Time Base Counter Prescaler to 1.
+ *
+ * This means an overflow of the 32 bit Time Base Counter
+ * register at 40 MHz every 107 seconds.
+ */
+ return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON,
+ MCP251XFD_REG_TSCON_TBCEN);
+}
+
+static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
+{
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ u32 val = 0;
+ s8 tdco;
+ int err;
+
+ /* CAN Control Register
+ *
+ * - no transmit bandwidth sharing
+ * - config mode
+ * - disable transmit queue
+ * - store in transmit FIFO event
+ * - transition to restricted operation mode on system error
+ * - ESI is transmitted recessive when ESI of message is high or
+ * CAN controller error passive
+ * - restricted retransmission attempts,
+ * use TQXCON_TXAT and FIFOCON_TXAT
+ * - wake-up filter bits T11FILTER
+ * - use CAN bus line filter for wakeup
+ * - protocol exception is treated as a form error
+ * - Do not compare data bytes
+ */
+ val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK,
+ MCP251XFD_REG_CON_MODE_CONFIG) |
+ MCP251XFD_REG_CON_STEF |
+ MCP251XFD_REG_CON_ESIGM |
+ MCP251XFD_REG_CON_RTXAT |
+ FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK,
+ MCP251XFD_REG_CON_WFT_T11FILTER) |
+ MCP251XFD_REG_CON_WAKFIL |
+ MCP251XFD_REG_CON_PXEDIS;
+
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
+ val |= MCP251XFD_REG_CON_ISOCRCEN;
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val);
+ if (err)
+ return err;
+
+ /* Nominal Bit Time */
+ val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
+ FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
+ bt->prop_seg + bt->phase_seg1 - 1) |
+ FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK,
+ bt->phase_seg2 - 1) |
+ FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val);
+ if (err)
+ return err;
+
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return 0;
+
+ /* Data Bit Time */
+ val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
+ FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
+ dbt->prop_seg + dbt->phase_seg1 - 1) |
+ FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK,
+ dbt->phase_seg2 - 1) |
+ FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val);
+ if (err)
+ return err;
+
+ /* Transmitter Delay Compensation */
+ tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
+ -64, 63);
+ val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
+ MCP251XFD_REG_TDC_TDCMOD_AUTO) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
+
+ return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
+}
+
+static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
+{
+ u32 val;
+
+ if (!priv->rx_int)
+ return 0;
+
+ /* Configure GPIOs:
+ * - PIN0: GPIO Input
+ * - PIN1: GPIO Input/RX Interrupt
+ *
+ * PIN1 must be Input, otherwise there is a glitch on the
+ * rx-INT line. It happens between setting the PIN as output
+ * (in the first byte of the SPI transfer) and configuring the
+ * PIN as interrupt (in the last byte of the SPI transfer).
+ */
+ val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
+ MCP251XFD_REG_IOCON_TRIS0;
+ return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
+}
+
+static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
+{
+ u32 val;
+
+ if (!priv->rx_int)
+ return 0;
+
+ /* Configure GPIOs:
+ * - PIN0: GPIO Input
+ * - PIN1: GPIO Input
+ */
+ val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
+ MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
+ return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
+}
+
+static int
+mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u32 fifo_con;
+
+ /* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
+ *
+ * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
+ * generate a RXOVIF, use this to properly detect RX MAB
+ * overflows.
+ */
+ fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
+ ring->obj_num - 1) |
+ MCP251XFD_REG_FIFOCON_RXTSEN |
+ MCP251XFD_REG_FIFOCON_RXOVIE |
+ MCP251XFD_REG_FIFOCON_TFNRFNIE;
+
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
+ fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_64);
+ else
+ fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_8);
+
+ return regmap_write(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
+}
+
+static int
+mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u32 fltcon;
+
+ fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
+ MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
+
+ return regmap_update_bits(priv->map_reg,
+ MCP251XFD_REG_FLTCON(ring->nr >> 2),
+ MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
+ fltcon);
+}
+
+static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ const struct mcp251xfd_rx_ring *rx_ring;
+ u32 val;
+ int err, n;
+
+ /* TEF */
+ val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP251XFD_REG_TEFCON_TEFTSEN |
+ MCP251XFD_REG_TEFCON_TEFOVIE |
+ MCP251XFD_REG_TEFCON_TEFNEIE;
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
+ if (err)
+ return err;
+
+ /* FIFO 1 - TX */
+ val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP251XFD_REG_FIFOCON_TXEN |
+ MCP251XFD_REG_FIFOCON_TXATIE;
+
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_64);
+ else
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_8);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
+ MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
+ else
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
+ MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
+
+ err = regmap_write(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
+ val);
+ if (err)
+ return err;
+
+ /* RX FIFOs */
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
+ err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+ void *ram;
+ u32 val = 0;
+ int err;
+
+ ecc->ecc_stat = 0;
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC)
+ val = MCP251XFD_REG_ECCCON_ECCEN;
+
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
+ MCP251XFD_REG_ECCCON_ECCEN, val);
+ if (err)
+ return err;
+
+ ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL);
+ if (!ram)
+ return -ENOMEM;
+
+ err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram,
+ MCP251XFD_RAM_SIZE);
+ kfree(ram);
+
+ return err;
+}
+
+static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+
+ ecc->ecc_stat = 0;
+}
+
+static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
+{
+ u8 mode;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
+ else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ mode = MCP251XFD_REG_CON_MODE_MIXED;
+ else
+ mode = MCP251XFD_REG_CON_MODE_CAN2_0;
+
+ return mode;
+}
+
+static int
+__mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv,
+ bool nowait)
+{
+ u8 mode;
+
+ mode = mcp251xfd_get_normal_mode(priv);
+
+ return __mcp251xfd_chip_set_mode(priv, mode, nowait);
+}
+
+static inline int
+mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv)
+{
+ return __mcp251xfd_chip_set_normal_mode(priv, false);
+}
+
+static inline int
+mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv)
+{
+ return __mcp251xfd_chip_set_normal_mode(priv, true);
+}
+
+static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv)
+{
+ u32 val;
+ int err;
+
+ val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE;
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val);
+ if (err)
+ return err;
+
+ val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val);
+ if (err)
+ return err;
+
+ val = MCP251XFD_REG_INT_CERRIE |
+ MCP251XFD_REG_INT_SERRIE |
+ MCP251XFD_REG_INT_RXOVIE |
+ MCP251XFD_REG_INT_TXATIE |
+ MCP251XFD_REG_INT_SPICRCIE |
+ MCP251XFD_REG_INT_ECCIE |
+ MCP251XFD_REG_INT_TEFIE |
+ MCP251XFD_REG_INT_MODIE |
+ MCP251XFD_REG_INT_RXIE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ val |= MCP251XFD_REG_INT_IVMIE;
+
+ return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val);
+}
+
+static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
+{
+ int err;
+ u32 mask;
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0);
+ if (err)
+ return err;
+
+ mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
+ mask, 0x0);
+ if (err)
+ return err;
+
+ return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
+}
+
+static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
+ const enum can_state state)
+{
+ priv->can.state = state;
+
+ mcp251xfd_chip_interrupts_disable(priv);
+ mcp251xfd_chip_rx_int_disable(priv);
+ return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+}
+
+static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
+{
+ int err;
+
+ err = mcp251xfd_chip_softreset(priv);
+ if (err)
+ goto out_chip_stop;
+
+ err = mcp251xfd_chip_clock_init(priv);
+ if (err)
+ goto out_chip_stop;
+
+ err = mcp251xfd_set_bittiming(priv);
+ if (err)
+ goto out_chip_stop;
+
+ err = mcp251xfd_chip_rx_int_enable(priv);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_ecc_init(priv);
+ if (err)
+ goto out_chip_stop;
+
+ mcp251xfd_ring_init(priv);
+
+ err = mcp251xfd_chip_fifo_init(priv);
+ if (err)
+ goto out_chip_stop;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ err = mcp251xfd_chip_set_normal_mode(priv);
+ if (err)
+ goto out_chip_stop;
+
+ return 0;
+
+ out_chip_stop:
+ mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+
+ return err;
+}
+
+static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ int err;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = mcp251xfd_chip_start(priv);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_interrupts_enable(priv);
+ if (err) {
+ mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+ return err;
+ }
+
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ const struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ u32 trec;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
+ if (err)
+ return err;
+
+ if (trec & MCP251XFD_REG_TREC_TXBO)
+ bec->txerr = 256;
+ else
+ bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
+ bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
+
+ return 0;
+}
+
+static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ const struct mcp251xfd_priv *priv = netdev_priv(ndev);
+
+ /* Avoid waking up the controller if the interface is down */
+ if (!(ndev->flags & IFF_UP))
+ return 0;
+
+ /* The controller is powered down during Bus Off, use saved
+ * bec values.
+ */
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ *bec = priv->bec;
+ return 0;
+ }
+
+ return __mcp251xfd_get_berr_counter(ndev, bec);
+}
+
+static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
+{
+ u8 tef_tail_chip, tef_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
+ return 0;
+
+ err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
+ if (err)
+ return err;
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ if (tef_tail_chip != tef_tail) {
+ netdev_err(priv->ndev,
+ "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
+ tef_tail_chip, tef_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u8 rx_tail_chip, rx_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
+ return 0;
+
+ err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
+ if (err)
+ return err;
+
+ rx_tail = mcp251xfd_get_rx_tail(ring);
+ if (rx_tail_chip != rx_tail) {
+ netdev_err(priv->ndev,
+ "RX tail of chip (%d) and ours (%d) inconsistent.\n",
+ rx_tail_chip, rx_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ u32 tef_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
+ if (err)
+ return err;
+
+ if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
+ netdev_err(priv->ndev,
+ "Transmit Event FIFO buffer overflow.\n");
+ return -ENOBUFS;
+ }
+
+ netdev_info(priv->ndev,
+ "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x)\n",
+ tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
+ "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
+ "not empty" : "empty",
+ seq, priv->tef.tail, priv->tef.head, tx_ring->head);
+
+ /* The Sequence Number in the TEF doesn't match our tef_tail. */
+ return -EAGAIN;
+}
+
+static int
+mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_hw_tef_obj *hw_tef_obj)
+{
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct net_device_stats *stats = &priv->ndev->stats;
+ u32 seq, seq_masked, tef_tail_masked;
+ int err;
+
+ seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
+ hw_tef_obj->flags);
+
+ /* Use the MCP2517FD mask on the MCP2518FD, too. We only
+ * compare 7 bits, this should be enough to detect
+ * net-yet-completed, i.e. old TEF objects.
+ */
+ seq_masked = seq &
+ field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ tef_tail_masked = priv->tef.tail &
+ field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ if (seq_masked != tef_tail_masked)
+ return mcp251xfd_handle_tefif_recover(priv, seq);
+
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&priv->offload,
+ mcp251xfd_get_tef_tail(priv),
+ hw_tef_obj->ts);
+ stats->tx_packets++;
+
+ /* finally increment the TEF pointer */
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_TEFCON,
+ GENMASK(15, 8),
+ MCP251XFD_REG_TEFCON_UINC);
+ if (err)
+ return err;
+
+ priv->tef.tail++;
+ tx_ring->tail++;
+
+ return mcp251xfd_check_tef_tail(priv);
+}
+
+static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ unsigned int new_head;
+ u8 chip_tx_tail;
+ int err;
+
+ err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
+ if (err)
+ return err;
+
+ /* chip_tx_tail, is the next TX-Object send by the HW.
+ * The new TEF head must be >= the old head, ...
+ */
+ new_head = round_down(priv->tef.head, tx_ring->obj_num) + chip_tx_tail;
+ if (new_head <= priv->tef.head)
+ new_head += tx_ring->obj_num;
+
+ /* ... but it cannot exceed the TX head. */
+ priv->tef.head = min(new_head, tx_ring->head);
+
+ return mcp251xfd_check_tef_tail(priv);
+}
+
+static inline int
+mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ const u8 offset, const u8 len)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ (offset > tx_ring->obj_num ||
+ len > tx_ring->obj_num ||
+ offset + len > tx_ring->obj_num)) {
+ netdev_err(priv->ndev,
+ "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
+ tx_ring->obj_num, offset, len);
+ return -ERANGE;
+ }
+
+ return regmap_bulk_read(priv->map_rx,
+ mcp251xfd_get_tef_obj_addr(offset),
+ hw_tef_obj,
+ sizeof(*hw_tef_obj) / sizeof(u32) * len);
+}
+
+static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
+ u8 tef_tail, len, l;
+ int err, i;
+
+ err = mcp251xfd_tef_ring_update(priv);
+ if (err)
+ return err;
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ len = mcp251xfd_get_tef_len(priv);
+ l = mcp251xfd_get_tef_linear_len(priv);
+ err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
+ if (err)
+ return err;
+
+ if (l < len) {
+ err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < len; i++) {
+ err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]);
+ /* -EAGAIN means the Sequence Number in the TEF
+ * doesn't match our tef_tail. This can happen if we
+ * read the TEF objects too early. Leave loop let the
+ * interrupt handler call us again.
+ */
+ if (err == -EAGAIN)
+ goto out_netif_wake_queue;
+ if (err)
+ return err;
+ }
+
+ out_netif_wake_queue:
+ mcp251xfd_ecc_tefif_successful(priv);
+
+ if (mcp251xfd_get_tx_free(priv->tx)) {
+ /* Make sure that anybody stopping the queue after
+ * this sees the new tx_ring->tail.
+ */
+ smp_mb();
+ netif_wake_queue(priv->ndev);
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring)
+{
+ u32 new_head;
+ u8 chip_rx_head;
+ int err;
+
+ err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
+ if (err)
+ return err;
+
+ /* chip_rx_head, is the next RX-Object filled by the HW.
+ * The new RX head must be >= the old head.
+ */
+ new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
+ if (new_head <= ring->head)
+ new_head += ring->obj_num;
+
+ ring->head = new_head;
+
+ return mcp251xfd_check_rx_tail(priv, ring);
+}
+
+static void
+mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
+ struct sk_buff *skb)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
+ u32 sid, eid;
+
+ eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
+ sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
+
+ cfd->can_id = CAN_EFF_FLAG |
+ FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
+ FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
+ } else {
+ cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
+ hw_rx_obj->id);
+ }
+
+ /* CANFD */
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
+ u8 dlc;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
+ cfd->flags |= CANFD_ESI;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
+ cfd->flags |= CANFD_BRS;
+
+ dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags);
+ cfd->len = can_dlc2len(get_canfd_dlc(dlc));
+ } else {
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
+ cfd->can_id |= CAN_RTR_FLAG;
+
+ cfd->len = get_can_dlc(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC,
+ hw_rx_obj->flags));
+ }
+
+ memcpy(cfd->data, hw_rx_obj->data, cfd->len);
+}
+
+static int
+mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring,
+ const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ struct canfd_frame *cfd;
+ int err;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
+ skb = alloc_canfd_skb(priv->ndev, &cfd);
+ else
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
+
+ if (!cfd) {
+ stats->rx_dropped++;
+ return 0;
+ }
+
+ mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ ring->tail++;
+
+ /* finally increment the RX pointer */
+ return regmap_update_bits(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(ring->fifo_nr),
+ GENMASK(15, 8),
+ MCP251XFD_REG_FIFOCON_UINC);
+}
+
+static inline int
+mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
+ const u8 offset, const u8 len)
+{
+ int err;
+
+ err = regmap_bulk_read(priv->map_rx,
+ mcp251xfd_get_rx_obj_addr(ring, offset),
+ hw_rx_obj,
+ len * ring->obj_size / sizeof(u32));
+
+ return err;
+}
+
+static int
+mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring)
+{
+ struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
+ u8 rx_tail, len;
+ int err, i;
+
+ err = mcp251xfd_rx_ring_update(priv, ring);
+ if (err)
+ return err;
+
+ while ((len = mcp251xfd_get_rx_linear_len(ring))) {
+ rx_tail = mcp251xfd_get_rx_tail(ring);
+
+ err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
+ rx_tail, len);
+ if (err)
+ return err;
+
+ for (i = 0; i < len; i++) {
+ err = mcp251xfd_handle_rxif_one(priv, ring,
+ (void *)hw_rx_obj +
+ i * ring->obj_size);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_rx_ring *ring;
+ int err, n;
+
+ mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ err = mcp251xfd_handle_rxif_ring(priv, ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
+ u32 *timestamp)
+{
+ return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
+}
+
+static struct sk_buff *
+mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv,
+ struct can_frame **cf, u32 *timestamp)
+{
+ int err;
+
+ err = mcp251xfd_get_timestamp(priv, timestamp);
+ if (err)
+ return NULL;
+
+ return alloc_can_err_skb(priv->ndev, cf);
+}
+
+static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct mcp251xfd_rx_ring *ring;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ u32 timestamp, rxovif;
+ int err, i;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif);
+ if (err)
+ return err;
+
+ mcp251xfd_for_each_rx_ring(priv, ring, i) {
+ if (!(rxovif & BIT(ring->fifo_nr)))
+ continue;
+
+ /* If SERRIF is active, there was a RX MAB overflow. */
+ if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
+ netdev_info(priv->ndev,
+ "RX-%d: MAB overflow detected.\n",
+ ring->nr);
+ } else {
+ netdev_info(priv->ndev,
+ "RX-%d: FIFO overflow.\n", ring->nr);
+ }
+
+ err = regmap_update_bits(priv->map_reg,
+ MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+ MCP251XFD_REG_FIFOSTA_RXOVIF,
+ 0x0);
+ if (err)
+ return err;
+ }
+
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ if (!skb)
+ return 0;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
+{
+ netdev_info(priv->ndev, "%s\n", __func__);
+
+ return 0;
+}
+
+static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ u32 bdiag1, timestamp;
+ struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ int err;
+
+ err = mcp251xfd_get_timestamp(priv, &timestamp);
+ if (err)
+ return err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1);
+ if (err)
+ return err;
+
+ /* Write 0s to clear error bits, don't write 1s to non active
+ * bits, as they will be set.
+ */
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0);
+ if (err)
+ return err;
+
+ priv->can.can_stats.bus_error++;
+
+ skb = alloc_can_err_skb(priv->ndev, &cf);
+ if (cf)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ /* Controller misconfiguration */
+ if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM))
+ netdev_err(priv->ndev,
+ "recv'd DLC is larger than PLSIZE of FIFO element.");
+
+ /* RX errors */
+ if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR |
+ MCP251XFD_REG_BDIAG1_NCRCERR)) {
+ netdev_dbg(priv->ndev, "CRC error\n");
+
+ stats->rx_errors++;
+ if (cf)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+ if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR |
+ MCP251XFD_REG_BDIAG1_NSTUFERR)) {
+ netdev_dbg(priv->ndev, "Stuff error\n");
+
+ stats->rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+ if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR |
+ MCP251XFD_REG_BDIAG1_NFORMERR)) {
+ netdev_dbg(priv->ndev, "Format error\n");
+
+ stats->rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+
+ /* TX errors */
+ if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) {
+ netdev_dbg(priv->ndev, "NACK error\n");
+
+ stats->tx_errors++;
+ if (cf) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ }
+ if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR |
+ MCP251XFD_REG_BDIAG1_NBIT1ERR)) {
+ netdev_dbg(priv->ndev, "Bit1 error\n");
+
+ stats->tx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
+ }
+ if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR |
+ MCP251XFD_REG_BDIAG1_NBIT0ERR)) {
+ netdev_dbg(priv->ndev, "Bit0 error\n");
+
+ stats->tx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
+ }
+
+ if (!cf)
+ return 0;
+
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ enum can_state new_state, rx_state, tx_state;
+ u32 trec, timestamp;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
+ if (err)
+ return err;
+
+ if (trec & MCP251XFD_REG_TREC_TXBO)
+ tx_state = CAN_STATE_BUS_OFF;
+ else if (trec & MCP251XFD_REG_TREC_TXBP)
+ tx_state = CAN_STATE_ERROR_PASSIVE;
+ else if (trec & MCP251XFD_REG_TREC_TXWARN)
+ tx_state = CAN_STATE_ERROR_WARNING;
+ else
+ tx_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (trec & MCP251XFD_REG_TREC_RXBP)
+ rx_state = CAN_STATE_ERROR_PASSIVE;
+ else if (trec & MCP251XFD_REG_TREC_RXWARN)
+ rx_state = CAN_STATE_ERROR_WARNING;
+ else
+ rx_state = CAN_STATE_ERROR_ACTIVE;
+
+ new_state = max(tx_state, rx_state);
+ if (new_state == priv->can.state)
+ return 0;
+
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ can_change_state(priv->ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ /* As we're going to switch off the chip now, let's
+ * save the error counters and return them to
+ * userspace, if do_get_berr_counter() is called while
+ * the chip is in Bus Off.
+ */
+ err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec);
+ if (err)
+ return err;
+
+ mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF);
+ can_bus_off(priv->ndev);
+ }
+
+ if (!skb)
+ return 0;
+
+ if (new_state != CAN_STATE_BUS_OFF) {
+ struct can_berr_counter bec;
+
+ err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
+ if (err)
+ return err;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int
+mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
+{
+ const u8 mode_reference = mcp251xfd_get_normal_mode(priv);
+ u8 mode;
+ int err;
+
+ err = mcp251xfd_chip_get_mode(priv, &mode);
+ if (err)
+ return err;
+
+ if (mode == mode_reference) {
+ netdev_dbg(priv->ndev,
+ "Controller changed into %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
+ return 0;
+ }
+
+ /* According to MCP2517FD errata DS80000792B 1., during a TX
+ * MAB underflow, the controller will transition to Restricted
+ * Operation Mode or Listen Only Mode (depending on SERR2LOM).
+ *
+ * However this is not always the case. If SERR2LOM is
+ * configured for Restricted Operation Mode (SERR2LOM not set)
+ * the MCP2517FD will sometimes transition to Listen Only Mode
+ * first. When polling this bit we see that it will transition
+ * to Restricted Operation Mode shortly after.
+ */
+ if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) &&
+ (mode == MCP251XFD_REG_CON_MODE_RESTRICTED ||
+ mode == MCP251XFD_REG_CON_MODE_LISTENONLY))
+ netdev_dbg(priv->ndev,
+ "Controller changed into %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
+ else
+ netdev_err(priv->ndev,
+ "Controller changed into %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
+
+ /* After the application requests Normal mode, the Controller
+ * will automatically attempt to retransmit the message that
+ * caused the TX MAB underflow.
+ *
+ * However, if there is an ECC error in the TX-RAM, we first
+ * have to reload the tx-object before requesting Normal
+ * mode. This is done later in mcp251xfd_handle_eccif().
+ */
+ if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) {
+ *set_normal_mode = true;
+ return 0;
+ }
+
+ return mcp251xfd_chip_set_normal_mode_nowait(priv);
+}
+
+static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+ struct net_device_stats *stats = &priv->ndev->stats;
+ bool handled = false;
+
+ /* TX MAB underflow
+ *
+ * According to MCP2517FD Errata DS80000792B 1. a TX MAB
+ * underflow is indicated by SERRIF and MODIF.
+ *
+ * In addition to the effects mentioned in the Errata, there
+ * are Bus Errors due to the aborted CAN frame, so a IVMIF
+ * will be seen as well.
+ *
+ * Sometimes there is an ECC error in the TX-RAM, which leads
+ * to a TX MAB underflow.
+ *
+ * However, probably due to a race condition, there is no
+ * associated MODIF pending.
+ *
+ * Further, there are situations, where the SERRIF is caused
+ * by an ECC error in the TX-RAM, but not even the ECCIF is
+ * set. This only seems to happen _after_ the first occurrence
+ * of a ECCIF (which is tracked in ecc->cnt).
+ *
+ * Treat all as a known system errors..
+ */
+ if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF &&
+ priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) ||
+ priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
+ ecc->cnt) {
+ const char *msg;
+
+ if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
+ ecc->cnt)
+ msg = "TX MAB underflow due to ECC error detected.";
+ else
+ msg = "TX MAB underflow detected.";
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN)
+ netdev_dbg(priv->ndev, "%s\n", msg);
+ else
+ netdev_info(priv->ndev, "%s\n", msg);
+
+ stats->tx_aborted_errors++;
+ stats->tx_errors++;
+ handled = true;
+ }
+
+ /* RX MAB overflow
+ *
+ * According to MCP2517FD Errata DS80000792B 1. a RX MAB
+ * overflow is indicated by SERRIF.
+ *
+ * In addition to the effects mentioned in the Errata, (most
+ * of the times) a RXOVIF is raised, if the FIFO that is being
+ * received into has the RXOVIE activated (and we have enabled
+ * RXOVIE on all FIFOs).
+ *
+ * Sometimes there is no RXOVIF just a RXIF is pending.
+ *
+ * Treat all as a known system errors..
+ */
+ if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF ||
+ priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) {
+ stats->rx_dropped++;
+ handled = true;
+ }
+
+ if (!handled)
+ netdev_err(priv->ndev,
+ "Unhandled System Error Interrupt (intf=0x%08x)!\n",
+ priv->regs_status.intf);
+
+ return 0;
+}
+
+static int
+mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
+{
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+ struct mcp251xfd_tx_obj *tx_obj;
+ u8 chip_tx_tail, tx_tail, offset;
+ u16 addr;
+ int err;
+
+ addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
+
+ err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
+ if (err)
+ return err;
+
+ tx_tail = mcp251xfd_get_tx_tail(tx_ring);
+ offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
+
+ /* Bail out if one of the following is met:
+ * - tx_tail information is inconsistent
+ * - for mcp2517fd: offset not 0
+ * - for mcp2518fd: offset not 0 or 1
+ */
+ if (chip_tx_tail != tx_tail ||
+ !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
+ netdev_err(priv->ndev,
+ "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
+ addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
+ offset);
+ return -EINVAL;
+ }
+
+ netdev_info(priv->ndev,
+ "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
+ ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ?
+ "Single" : "Double",
+ addr, nr, tx_ring->tail, tx_tail, offset);
+
+ /* reload tx_obj into controller RAM ... */
+ tx_obj = &tx_ring->obj[nr];
+ err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
+ if (err)
+ return err;
+
+ /* ... and trigger retransmit */
+ return mcp251xfd_chip_set_normal_mode(priv);
+}
+
+static int
+mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
+{
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+ const char *msg;
+ bool in_tx_ram;
+ u32 ecc_stat;
+ u16 addr;
+ u8 nr;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT,
+ MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
+ if (err)
+ return err;
+
+ /* Check if ECC error occurred in TX-RAM */
+ addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
+ err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
+ if (!err)
+ in_tx_ram = true;
+ else if (err == -ENOENT)
+ in_tx_ram = false;
+ else
+ return err;
+
+ /* Errata Reference:
+ * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
+ *
+ * ECC single error correction does not work in all cases:
+ *
+ * Fix/Work Around:
+ * Enable single error correction and double error detection
+ * interrupts by setting SECIE and DEDIE. Handle SECIF as a
+ * detection interrupt and do not rely on the error
+ * correction. Instead, handle both interrupts as a
+ * notification that the RAM word at ERRADDR was corrupted.
+ */
+ if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF)
+ msg = "Single ECC Error detected at address";
+ else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF)
+ msg = "Double ECC Error detected at address";
+ else
+ return -EINVAL;
+
+ if (!in_tx_ram) {
+ ecc->ecc_stat = 0;
+
+ netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr);
+ } else {
+ /* Re-occurring error? */
+ if (ecc->ecc_stat == ecc_stat) {
+ ecc->cnt++;
+ } else {
+ ecc->ecc_stat = ecc_stat;
+ ecc->cnt = 1;
+ }
+
+ netdev_info(priv->ndev,
+ "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
+ msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
+
+ if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX)
+ return mcp251xfd_handle_eccif_recover(priv, nr);
+ }
+
+ if (set_normal_mode)
+ return mcp251xfd_chip_set_normal_mode_nowait(priv);
+
+ return 0;
+}
+
+static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
+{
+ int err;
+ u32 crc;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC,
+ MCP251XFD_REG_CRC_IF_MASK,
+ ~crc);
+ if (err)
+ return err;
+
+ if (crc & MCP251XFD_REG_CRC_FERRIF)
+ netdev_notice(priv->ndev, "CRC write command format error.\n");
+ else if (crc & MCP251XFD_REG_CRC_CRCERRIF)
+ netdev_notice(priv->ndev,
+ "CRC write error detected. CRC=0x%04lx.\n",
+ FIELD_GET(MCP251XFD_REG_CRC_MASK, crc));
+
+ return 0;
+}
+
+#define mcp251xfd_handle(priv, irq, ...) \
+({ \
+ struct mcp251xfd_priv *_priv = (priv); \
+ int err; \
+\
+ err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \
+ if (err) \
+ netdev_err(_priv->ndev, \
+ "IRQ handler mcp251xfd_handle_%s() returned %d.\n", \
+ __stringify(irq), err); \
+ err; \
+})
+
+static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
+{
+ struct mcp251xfd_priv *priv = dev_id;
+ irqreturn_t handled = IRQ_NONE;
+ int err;
+
+ if (priv->rx_int)
+ do {
+ int rx_pending;
+
+ rx_pending = gpiod_get_value_cansleep(priv->rx_int);
+ if (!rx_pending)
+ break;
+
+ err = mcp251xfd_handle(priv, rxif);
+ if (err)
+ goto out_fail;
+
+ handled = IRQ_HANDLED;
+ } while (1);
+
+ do {
+ u32 intf_pending, intf_pending_clearable;
+ bool set_normal_mode = false;
+
+ err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
+ &priv->regs_status,
+ sizeof(priv->regs_status) /
+ sizeof(u32));
+ if (err)
+ goto out_fail;
+
+ intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK,
+ priv->regs_status.intf) &
+ FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
+ priv->regs_status.intf);
+
+ if (!(intf_pending))
+ return handled;
+
+ /* Some interrupts must be ACKed in the
+ * MCP251XFD_REG_INT register.
+ * - First ACK then handle, to avoid lost-IRQ race
+ * condition on fast re-occurring interrupts.
+ * - Write "0" to clear active IRQs, "1" to all other,
+ * to avoid r/m/w race condition on the
+ * MCP251XFD_REG_INT register.
+ */
+ intf_pending_clearable = intf_pending &
+ MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
+ if (intf_pending_clearable) {
+ err = regmap_update_bits(priv->map_reg,
+ MCP251XFD_REG_INT,
+ MCP251XFD_REG_INT_IF_MASK,
+ ~intf_pending_clearable);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_MODIF) {
+ err = mcp251xfd_handle(priv, modif, &set_normal_mode);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_RXIF) {
+ err = mcp251xfd_handle(priv, rxif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_TEFIF) {
+ err = mcp251xfd_handle(priv, tefif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_RXOVIF) {
+ err = mcp251xfd_handle(priv, rxovif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_TXATIF) {
+ err = mcp251xfd_handle(priv, txatif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_IVMIF) {
+ err = mcp251xfd_handle(priv, ivmif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_SERRIF) {
+ err = mcp251xfd_handle(priv, serrif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_ECCIF) {
+ err = mcp251xfd_handle(priv, eccif, set_normal_mode);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) {
+ err = mcp251xfd_handle(priv, spicrcif);
+ if (err)
+ goto out_fail;
+ }
+
+ /* On the MCP2527FD and MCP2518FD, we don't get a
+ * CERRIF IRQ on the transition TX ERROR_WARNING -> TX
+ * ERROR_ACTIVE.
+ */
+ if (intf_pending & MCP251XFD_REG_INT_CERRIF ||
+ priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+ err = mcp251xfd_handle(priv, cerrif);
+ if (err)
+ goto out_fail;
+
+ /* In Bus Off we completely shut down the
+ * controller. Every subsequent register read
+ * will read bogus data, and if
+ * MCP251XFD_QUIRK_CRC_REG is enabled the CRC
+ * check will fail, too. So leave IRQ handler
+ * directly.
+ */
+ if (priv->can.state == CAN_STATE_BUS_OFF)
+ return IRQ_HANDLED;
+ }
+
+ handled = IRQ_HANDLED;
+ } while (1);
+
+ out_fail:
+ netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
+ err, priv->regs_status.intf);
+ mcp251xfd_chip_interrupts_disable(priv);
+
+ return handled;
+}
+
+static inline struct
+mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
+{
+ u8 tx_head;
+
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+
+ return &tx_ring->obj[tx_head];
+}
+
+static void
+mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_obj *tx_obj,
+ const struct sk_buff *skb,
+ unsigned int seq)
+{
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
+ union mcp251xfd_tx_obj_load_buf *load_buf;
+ u8 dlc;
+ u32 id, flags;
+ int offset, len;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ u32 sid, eid;
+
+ sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
+ eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
+
+ id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
+ FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
+
+ flags = MCP251XFD_OBJ_FLAGS_IDE;
+ } else {
+ id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
+ flags = 0;
+ }
+
+ /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
+ * harm, only the lower 7 bits will be transferred into the
+ * TEF object.
+ */
+ dlc = can_len2dlc(cfd->len);
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) |
+ FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc);
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ flags |= MCP251XFD_OBJ_FLAGS_RTR;
+
+ /* CANFD */
+ if (can_is_canfd_skb(skb)) {
+ if (cfd->flags & CANFD_ESI)
+ flags |= MCP251XFD_OBJ_FLAGS_ESI;
+
+ flags |= MCP251XFD_OBJ_FLAGS_FDF;
+
+ if (cfd->flags & CANFD_BRS)
+ flags |= MCP251XFD_OBJ_FLAGS_BRS;
+ }
+
+ load_buf = &tx_obj->buf;
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
+ hw_tx_obj = &load_buf->crc.hw_tx_obj;
+ else
+ hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
+
+ put_unaligned_le32(id, &hw_tx_obj->id);
+ put_unaligned_le32(flags, &hw_tx_obj->flags);
+
+ /* Clear data at end of CAN frame */
+ offset = round_down(cfd->len, sizeof(u32));
+ len = round_up(can_dlc2len(dlc), sizeof(u32)) - offset;
+ if (MCP251XFD_SANITIZE_CAN && len)
+ memset(hw_tx_obj->data + offset, 0x0, len);
+ memcpy(hw_tx_obj->data, cfd->data, cfd->len);
+
+ /* Number of bytes to be written into the RAM of the controller */
+ len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
+ if (MCP251XFD_SANITIZE_CAN)
+ len += round_up(can_dlc2len(dlc), sizeof(u32));
+ else
+ len += round_up(cfd->len, sizeof(u32));
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
+ u16 crc;
+
+ mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(load_buf->crc.cmd);
+ crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
+ put_unaligned_be16(crc, (void *)load_buf + len);
+
+ /* Total length */
+ len += sizeof(load_buf->crc.crc);
+ } else {
+ len += sizeof(load_buf->nocrc.cmd);
+ }
+
+ tx_obj->xfer[0].len = len;
+}
+
+static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_obj *tx_obj)
+{
+ return spi_async(priv->spi, &tx_obj->msg);
+}
+
+static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_ring *tx_ring)
+{
+ if (mcp251xfd_get_tx_free(tx_ring) > 0)
+ return false;
+
+ netif_stop_queue(priv->ndev);
+
+ /* Memory barrier before checking tx_free (head and tail) */
+ smp_mb();
+
+ if (mcp251xfd_get_tx_free(tx_ring) == 0) {
+ netdev_dbg(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
+ tx_ring->head, tx_ring->tail,
+ tx_ring->head - tx_ring->tail);
+
+ return true;
+ }
+
+ netif_start_queue(priv->ndev);
+
+ return false;
+}
+
+static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct mcp251xfd_tx_obj *tx_obj;
+ u8 tx_head;
+ int err;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (mcp251xfd_tx_busy(priv, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
+ mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
+
+ /* Stop queue if we occupy the complete TX FIFO */
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+ tx_ring->head++;
+ if (tx_ring->head - tx_ring->tail >= tx_ring->obj_num)
+ netif_stop_queue(ndev);
+
+ can_put_echo_skb(skb, ndev, tx_head);
+
+ err = mcp251xfd_tx_obj_write(priv, tx_obj);
+ if (err)
+ goto out_err;
+
+ return NETDEV_TX_OK;
+
+ out_err:
+ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+
+ return NETDEV_TX_OK;
+}
+
+static int mcp251xfd_open(struct net_device *ndev)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const struct spi_device *spi = priv->spi;
+ int err;
+
+ err = pm_runtime_get_sync(ndev->dev.parent);
+ if (err < 0) {
+ pm_runtime_put_noidle(ndev->dev.parent);
+ return err;
+ }
+
+ err = open_candev(ndev);
+ if (err)
+ goto out_pm_runtime_put;
+
+ err = mcp251xfd_ring_alloc(priv);
+ if (err)
+ goto out_close_candev;
+
+ err = mcp251xfd_transceiver_enable(priv);
+ if (err)
+ goto out_mcp251xfd_ring_free;
+
+ err = mcp251xfd_chip_start(priv);
+ if (err)
+ goto out_transceiver_disable;
+
+ can_rx_offload_enable(&priv->offload);
+
+ err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
+ IRQF_ONESHOT, dev_name(&spi->dev),
+ priv);
+ if (err)
+ goto out_can_rx_offload_disable;
+
+ err = mcp251xfd_chip_interrupts_enable(priv);
+ if (err)
+ goto out_free_irq;
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+ out_free_irq:
+ free_irq(spi->irq, priv);
+ out_can_rx_offload_disable:
+ can_rx_offload_disable(&priv->offload);
+ out_transceiver_disable:
+ mcp251xfd_transceiver_disable(priv);
+ out_mcp251xfd_ring_free:
+ mcp251xfd_ring_free(priv);
+ out_close_candev:
+ close_candev(ndev);
+ out_pm_runtime_put:
+ mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+ pm_runtime_put(ndev->dev.parent);
+
+ return err;
+}
+
+static int mcp251xfd_stop(struct net_device *ndev)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ mcp251xfd_chip_interrupts_disable(priv);
+ free_irq(ndev->irq, priv);
+ can_rx_offload_disable(&priv->offload);
+ mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+ mcp251xfd_transceiver_disable(priv);
+ mcp251xfd_ring_free(priv);
+ close_candev(ndev);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+}
+
+static const struct net_device_ops mcp251xfd_netdev_ops = {
+ .ndo_open = mcp251xfd_open,
+ .ndo_stop = mcp251xfd_stop,
+ .ndo_start_xmit = mcp251xfd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static void
+mcp251xfd_register_quirks(struct mcp251xfd_priv *priv)
+{
+ const struct spi_device *spi = priv->spi;
+ const struct spi_controller *ctlr = spi->controller;
+
+ if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX;
+}
+
+static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
+{
+ const struct net_device *ndev = priv->ndev;
+ const struct mcp251xfd_devtype_data *devtype_data;
+ u32 osc;
+ int err;
+
+ /* The OSC_LPMEN is only supported on MCP2518FD, so use it to
+ * autodetect the model.
+ */
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
+ MCP251XFD_REG_OSC_LPMEN,
+ MCP251XFD_REG_OSC_LPMEN);
+ if (err)
+ return err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ if (err)
+ return err;
+
+ if (osc & MCP251XFD_REG_OSC_LPMEN)
+ devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
+ else
+ devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
+
+ if (!mcp251xfd_is_251X(priv) &&
+ priv->devtype_data.model != devtype_data->model) {
+ netdev_info(ndev,
+ "Detected %s, but firmware specifies a %s. Fixing up.",
+ __mcp251xfd_get_model_str(devtype_data->model),
+ mcp251xfd_get_model_str(priv));
+ }
+ priv->devtype_data = *devtype_data;
+
+ /* We need to preserve the Half Duplex Quirk. */
+ mcp251xfd_register_quirks(priv);
+
+ /* Re-init regmap with quirks of detected model. */
+ return mcp251xfd_regmap_init(priv);
+}
+
+static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
+{
+ int err, rx_pending;
+
+ if (!priv->rx_int)
+ return 0;
+
+ err = mcp251xfd_chip_rx_int_enable(priv);
+ if (err)
+ return err;
+
+ /* Check if RX_INT is properly working. The RX_INT should not
+ * be active after a softreset.
+ */
+ rx_pending = gpiod_get_value_cansleep(priv->rx_int);
+
+ err = mcp251xfd_chip_rx_int_disable(priv);
+ if (err)
+ return err;
+
+ if (!rx_pending)
+ return 0;
+
+ netdev_info(priv->ndev,
+ "RX_INT active after softreset, disabling RX_INT support.");
+ devm_gpiod_put(&priv->spi->dev, priv->rx_int);
+ priv->rx_int = NULL;
+
+ return 0;
+}
+
+static int
+mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
+ u32 *dev_id, u32 *effective_speed_hz)
+{
+ struct mcp251xfd_map_buf_nocrc *buf_rx;
+ struct mcp251xfd_map_buf_nocrc *buf_tx;
+ struct spi_transfer xfer[2] = { };
+ int err;
+
+ buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
+ if (!buf_rx)
+ return -ENOMEM;
+
+ buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
+ if (!buf_tx) {
+ err = -ENOMEM;
+ goto out_kfree_buf_rx;
+ }
+
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].len = sizeof(buf_tx->cmd);
+ xfer[1].rx_buf = buf_rx->data;
+ xfer[1].len = sizeof(dev_id);
+
+ mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
+ err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
+ if (err)
+ goto out_kfree_buf_tx;
+
+ *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
+ *effective_speed_hz = xfer->effective_speed_hz;
+
+ out_kfree_buf_tx:
+ kfree(buf_tx);
+ out_kfree_buf_rx:
+ kfree(buf_rx);
+
+ return 0;
+}
+
+#define MCP251XFD_QUIRK_ACTIVE(quirk) \
+ (priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-')
+
+static int
+mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
+{
+ u32 dev_id, effective_speed_hz;
+ int err;
+
+ err = mcp251xfd_register_get_dev_id(priv, &dev_id,
+ &effective_speed_hz);
+ if (err)
+ return err;
+
+ netdev_info(priv->ndev,
+ "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
+ mcp251xfd_get_model_str(priv),
+ FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
+ FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
+ priv->rx_int ? '+' : '-',
+ MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
+ MCP251XFD_QUIRK_ACTIVE(CRC_REG),
+ MCP251XFD_QUIRK_ACTIVE(CRC_RX),
+ MCP251XFD_QUIRK_ACTIVE(CRC_TX),
+ MCP251XFD_QUIRK_ACTIVE(ECC),
+ MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
+ priv->can.clock.freq / 1000000,
+ priv->can.clock.freq % 1000000 / 1000 / 10,
+ priv->spi_max_speed_hz_orig / 1000000,
+ priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
+ priv->spi->max_speed_hz / 1000000,
+ priv->spi->max_speed_hz % 1000000 / 1000 / 10,
+ effective_speed_hz / 1000000,
+ effective_speed_hz % 1000000 / 1000 / 10);
+
+ return 0;
+}
+
+static int mcp251xfd_register(struct mcp251xfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ int err;
+
+ err = mcp251xfd_clks_and_vdd_enable(priv);
+ if (err)
+ return err;
+
+ pm_runtime_get_noresume(ndev->dev.parent);
+ err = pm_runtime_set_active(ndev->dev.parent);
+ if (err)
+ goto out_runtime_put_noidle;
+ pm_runtime_enable(ndev->dev.parent);
+
+ mcp251xfd_register_quirks(priv);
+
+ err = mcp251xfd_chip_softreset(priv);
+ if (err == -ENODEV)
+ goto out_runtime_disable;
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = mcp251xfd_register_chip_detect(priv);
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = mcp251xfd_register_check_rx_int(priv);
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = register_candev(ndev);
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = mcp251xfd_register_done(priv);
+ if (err)
+ goto out_unregister_candev;
+
+ /* Put controller into sleep mode and let pm_runtime_put()
+ * disable the clocks and vdd. If CONFIG_PM is not enabled,
+ * the clocks and vdd will stay powered.
+ */
+ err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ if (err)
+ goto out_unregister_candev;
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+
+ out_unregister_candev:
+ unregister_candev(ndev);
+ out_chip_set_mode_sleep:
+ mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ out_runtime_disable:
+ pm_runtime_disable(ndev->dev.parent);
+ out_runtime_put_noidle:
+ pm_runtime_put_noidle(ndev->dev.parent);
+ mcp251xfd_clks_and_vdd_disable(priv);
+
+ return err;
+}
+
+static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+
+ unregister_candev(ndev);
+
+ pm_runtime_get_sync(ndev->dev.parent);
+ pm_runtime_put_noidle(ndev->dev.parent);
+ mcp251xfd_clks_and_vdd_disable(priv);
+ pm_runtime_disable(ndev->dev.parent);
+}
+
+static const struct of_device_id mcp251xfd_of_match[] = {
+ {
+ .compatible = "microchip,mcp2517fd",
+ .data = &mcp251xfd_devtype_data_mcp2517fd,
+ }, {
+ .compatible = "microchip,mcp2518fd",
+ .data = &mcp251xfd_devtype_data_mcp2518fd,
+ }, {
+ .compatible = "microchip,mcp251xfd",
+ .data = &mcp251xfd_devtype_data_mcp251xfd,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, mcp251xfd_of_match);
+
+static const struct spi_device_id mcp251xfd_id_table[] = {
+ {
+ .name = "mcp2517fd",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd,
+ }, {
+ .name = "mcp2518fd",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
+ }, {
+ .name = "mcp251xfd",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
+
+static int mcp251xfd_probe(struct spi_device *spi)
+{
+ const void *match;
+ struct net_device *ndev;
+ struct mcp251xfd_priv *priv;
+ struct gpio_desc *rx_int;
+ struct regulator *reg_vdd, *reg_xceiver;
+ struct clk *clk;
+ u32 freq;
+ int err;
+
+ rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
+ GPIOD_IN);
+ if (PTR_ERR(rx_int) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (IS_ERR(rx_int))
+ return PTR_ERR(rx_int);
+
+ reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
+ if (PTR_ERR(reg_vdd) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (PTR_ERR(reg_vdd) == -ENODEV)
+ reg_vdd = NULL;
+ else if (IS_ERR(reg_vdd))
+ return PTR_ERR(reg_vdd);
+
+ reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
+ if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (PTR_ERR(reg_xceiver) == -ENODEV)
+ reg_xceiver = NULL;
+ else if (IS_ERR(reg_xceiver))
+ return PTR_ERR(reg_xceiver);
+
+ clk = devm_clk_get(&spi->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&spi->dev, "No Oscillator (clock) defined.\n");
+ return PTR_ERR(clk);
+ }
+ freq = clk_get_rate(clk);
+
+ /* Sanity check */
+ if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
+ freq > MCP251XFD_SYSCLOCK_HZ_MAX) {
+ dev_err(&spi->dev,
+ "Oscillator frequency (%u Hz) is too low or high.\n",
+ freq);
+ return -ERANGE;
+ }
+
+ if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
+ dev_err(&spi->dev,
+ "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
+ freq);
+ return -ERANGE;
+ }
+
+ ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
+ MCP251XFD_TX_OBJ_NUM_MAX);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &spi->dev);
+
+ ndev->netdev_ops = &mcp251xfd_netdev_ops;
+ ndev->irq = spi->irq;
+ ndev->flags |= IFF_ECHO;
+
+ priv = netdev_priv(ndev);
+ spi_set_drvdata(spi, priv);
+ priv->can.clock.freq = freq;
+ priv->can.do_set_mode = mcp251xfd_set_mode;
+ priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
+ priv->can.bittiming_const = &mcp251xfd_bittiming_const;
+ priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_FD_NON_ISO;
+ priv->ndev = ndev;
+ priv->spi = spi;
+ priv->rx_int = rx_int;
+ priv->clk = clk;
+ priv->reg_vdd = reg_vdd;
+ priv->reg_xceiver = reg_xceiver;
+
+ match = device_get_match_data(&spi->dev);
+ if (match)
+ priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
+ else
+ priv->devtype_data = *(struct mcp251xfd_devtype_data *)
+ spi_get_device_id(spi)->driver_data;
+
+ /* Errata Reference:
+ * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 4.
+ *
+ * The SPI can write corrupted data to the RAM at fast SPI
+ * speeds:
+ *
+ * Simultaneous activity on the CAN bus while writing data to
+ * RAM via the SPI interface, with high SCK frequency, can
+ * lead to corrupted data being written to RAM.
+ *
+ * Fix/Work Around:
+ * Ensure that FSCK is less than or equal to 0.85 *
+ * (FSYSCLK/2).
+ *
+ * Known good and bad combinations are:
+ *
+ * MCP ext-clk SoC SPI SPI-clk max-clk parent-clk Status config
+ *
+ * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 8333333 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx>
+ * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 9375000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx>
+ * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 16666667 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx>
+ * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 18750000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx>
+ * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 8333333 Hz 83.33% 16666667 Hz good assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
+ * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 9523809 Hz 95.34% 28571429 Hz bad assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
+ * 2517 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default
+ * 2518 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default
+ *
+ */
+ priv->spi_max_speed_hz_orig = spi->max_speed_hz;
+ spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
+ spi->bits_per_word = 8;
+ spi->rt = true;
+ err = spi_setup(spi);
+ if (err)
+ goto out_free_candev;
+
+ err = mcp251xfd_regmap_init(priv);
+ if (err)
+ goto out_free_candev;
+
+ err = can_rx_offload_add_manual(ndev, &priv->offload,
+ MCP251XFD_NAPI_WEIGHT);
+ if (err)
+ goto out_free_candev;
+
+ err = mcp251xfd_register(priv);
+ if (err)
+ goto out_free_candev;
+
+ return 0;
+
+ out_free_candev:
+ spi->max_speed_hz = priv->spi_max_speed_hz_orig;
+
+ free_candev(ndev);
+
+ return err;
+}
+
+static int mcp251xfd_remove(struct spi_device *spi)
+{
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct net_device *ndev = priv->ndev;
+
+ can_rx_offload_del(&priv->offload);
+ mcp251xfd_unregister(priv);
+ spi->max_speed_hz = priv->spi_max_speed_hz_orig;
+ free_candev(ndev);
+
+ return 0;
+}
+
+static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
+{
+ const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+
+ return mcp251xfd_clks_and_vdd_disable(priv);
+}
+
+static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
+{
+ const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+
+ return mcp251xfd_clks_and_vdd_enable(priv);
+}
+
+static const struct dev_pm_ops mcp251xfd_pm_ops = {
+ SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend,
+ mcp251xfd_runtime_resume, NULL)
+};
+
+static struct spi_driver mcp251xfd_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .pm = &mcp251xfd_pm_ops,
+ .of_match_table = mcp251xfd_of_match,
+ },
+ .probe = mcp251xfd_probe,
+ .remove = mcp251xfd_remove,
+ .id_table = mcp251xfd_id_table,
+};
+module_spi_driver(mcp251xfd_driver);
+
+MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
+MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c
new file mode 100644
index 000000000000..a02ca76ac239
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include "mcp251xfd.h"
+
+/* The standard crc16 in linux/crc16.h is unfortunately not computing
+ * the correct results (left shift vs. right shift). So here an
+ * implementation with a table generated with the help of:
+ *
+ * http://lkml.iu.edu/hypermail/linux/kernel/0508.1/1085.html
+ */
+static const u16 mcp251xfd_crc16_table[] = {
+ 0x0000, 0x8005, 0x800f, 0x000a, 0x801b, 0x001e, 0x0014, 0x8011,
+ 0x8033, 0x0036, 0x003c, 0x8039, 0x0028, 0x802d, 0x8027, 0x0022,
+ 0x8063, 0x0066, 0x006c, 0x8069, 0x0078, 0x807d, 0x8077, 0x0072,
+ 0x0050, 0x8055, 0x805f, 0x005a, 0x804b, 0x004e, 0x0044, 0x8041,
+ 0x80c3, 0x00c6, 0x00cc, 0x80c9, 0x00d8, 0x80dd, 0x80d7, 0x00d2,
+ 0x00f0, 0x80f5, 0x80ff, 0x00fa, 0x80eb, 0x00ee, 0x00e4, 0x80e1,
+ 0x00a0, 0x80a5, 0x80af, 0x00aa, 0x80bb, 0x00be, 0x00b4, 0x80b1,
+ 0x8093, 0x0096, 0x009c, 0x8099, 0x0088, 0x808d, 0x8087, 0x0082,
+ 0x8183, 0x0186, 0x018c, 0x8189, 0x0198, 0x819d, 0x8197, 0x0192,
+ 0x01b0, 0x81b5, 0x81bf, 0x01ba, 0x81ab, 0x01ae, 0x01a4, 0x81a1,
+ 0x01e0, 0x81e5, 0x81ef, 0x01ea, 0x81fb, 0x01fe, 0x01f4, 0x81f1,
+ 0x81d3, 0x01d6, 0x01dc, 0x81d9, 0x01c8, 0x81cd, 0x81c7, 0x01c2,
+ 0x0140, 0x8145, 0x814f, 0x014a, 0x815b, 0x015e, 0x0154, 0x8151,
+ 0x8173, 0x0176, 0x017c, 0x8179, 0x0168, 0x816d, 0x8167, 0x0162,
+ 0x8123, 0x0126, 0x012c, 0x8129, 0x0138, 0x813d, 0x8137, 0x0132,
+ 0x0110, 0x8115, 0x811f, 0x011a, 0x810b, 0x010e, 0x0104, 0x8101,
+ 0x8303, 0x0306, 0x030c, 0x8309, 0x0318, 0x831d, 0x8317, 0x0312,
+ 0x0330, 0x8335, 0x833f, 0x033a, 0x832b, 0x032e, 0x0324, 0x8321,
+ 0x0360, 0x8365, 0x836f, 0x036a, 0x837b, 0x037e, 0x0374, 0x8371,
+ 0x8353, 0x0356, 0x035c, 0x8359, 0x0348, 0x834d, 0x8347, 0x0342,
+ 0x03c0, 0x83c5, 0x83cf, 0x03ca, 0x83db, 0x03de, 0x03d4, 0x83d1,
+ 0x83f3, 0x03f6, 0x03fc, 0x83f9, 0x03e8, 0x83ed, 0x83e7, 0x03e2,
+ 0x83a3, 0x03a6, 0x03ac, 0x83a9, 0x03b8, 0x83bd, 0x83b7, 0x03b2,
+ 0x0390, 0x8395, 0x839f, 0x039a, 0x838b, 0x038e, 0x0384, 0x8381,
+ 0x0280, 0x8285, 0x828f, 0x028a, 0x829b, 0x029e, 0x0294, 0x8291,
+ 0x82b3, 0x02b6, 0x02bc, 0x82b9, 0x02a8, 0x82ad, 0x82a7, 0x02a2,
+ 0x82e3, 0x02e6, 0x02ec, 0x82e9, 0x02f8, 0x82fd, 0x82f7, 0x02f2,
+ 0x02d0, 0x82d5, 0x82df, 0x02da, 0x82cb, 0x02ce, 0x02c4, 0x82c1,
+ 0x8243, 0x0246, 0x024c, 0x8249, 0x0258, 0x825d, 0x8257, 0x0252,
+ 0x0270, 0x8275, 0x827f, 0x027a, 0x826b, 0x026e, 0x0264, 0x8261,
+ 0x0220, 0x8225, 0x822f, 0x022a, 0x823b, 0x023e, 0x0234, 0x8231,
+ 0x8213, 0x0216, 0x021c, 0x8219, 0x0208, 0x820d, 0x8207, 0x0202
+};
+
+static inline u16 mcp251xfd_crc16_byte(u16 crc, const u8 data)
+{
+ u8 index = (crc >> 8) ^ data;
+
+ return (crc << 8) ^ mcp251xfd_crc16_table[index];
+}
+
+static u16 mcp251xfd_crc16(u16 crc, u8 const *buffer, size_t len)
+{
+ while (len--)
+ crc = mcp251xfd_crc16_byte(crc, *buffer++);
+
+ return crc;
+}
+
+u16 mcp251xfd_crc16_compute(const void *data, size_t data_size)
+{
+ u16 crc = 0xffff;
+
+ return mcp251xfd_crc16(crc, data, data_size);
+}
+
+u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
+ const void *data, size_t data_size)
+{
+ u16 crc;
+
+ crc = mcp251xfd_crc16_compute(cmd, cmd_size);
+ crc = mcp251xfd_crc16(crc, data, data_size);
+
+ return crc;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
new file mode 100644
index 000000000000..314f868b3465
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include "mcp251xfd.h"
+
+#include <asm/unaligned.h>
+
+static const struct regmap_config mcp251xfd_regmap_crc;
+
+static int
+mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count)
+{
+ struct spi_device *spi = context;
+
+ return spi_write(spi, data, count);
+}
+
+static int
+mcp251xfd_regmap_nocrc_gather_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx;
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = buf_tx,
+ .len = sizeof(buf_tx->cmd) + val_len,
+ },
+ };
+
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd))
+ return -EINVAL;
+
+ memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd));
+ memcpy(buf_tx->data, val, val_len);
+
+ return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+}
+
+static inline bool mcp251xfd_update_bits_read_reg(unsigned int reg)
+{
+ switch (reg) {
+ case MCP251XFD_REG_INT:
+ case MCP251XFD_REG_TEFCON:
+ case MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO(0)):
+ case MCP251XFD_REG_FLTCON(0):
+ case MCP251XFD_REG_ECCSTAT:
+ case MCP251XFD_REG_CRC:
+ return false;
+ case MCP251XFD_REG_CON:
+ case MCP251XFD_REG_FIFOSTA(MCP251XFD_RX_FIFO(0)):
+ case MCP251XFD_REG_OSC:
+ case MCP251XFD_REG_ECCCON:
+ return true;
+ default:
+ WARN(1, "Status of reg 0x%04x unknown.\n", reg);
+ }
+
+ return true;
+}
+
+static int
+mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ struct spi_device *spi = context;
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp251xfd_map_buf_nocrc *buf_rx = priv->map_buf_nocrc_rx;
+ struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx;
+ __le32 orig_le32 = 0, mask_le32, val_le32, tmp_le32;
+ u8 first_byte, last_byte, len;
+ int err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ mask == 0)
+ return -EINVAL;
+
+ first_byte = mcp251xfd_first_byte_set(mask);
+ last_byte = mcp251xfd_last_byte_set(mask);
+ len = last_byte - first_byte + 1;
+
+ if (mcp251xfd_update_bits_read_reg(reg)) {
+ struct spi_transfer xfer[2] = { };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = buf_rx->data;
+ xfer[1].len = len;
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + len;
+
+ if (MCP251XFD_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, len);
+ }
+
+ mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, reg + first_byte);
+ err = spi_sync(spi, &msg);
+ if (err)
+ return err;
+
+ memcpy(&orig_le32, buf_rx->data, len);
+ }
+
+ mask_le32 = cpu_to_le32(mask >> BITS_PER_BYTE * first_byte);
+ val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
+
+ tmp_le32 = orig_le32 & ~mask_le32;
+ tmp_le32 |= val_le32 & mask_le32;
+
+ mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg + first_byte);
+ memcpy(buf_tx->data, &tmp_le32, len);
+
+ return spi_write(spi, buf_tx, sizeof(buf_tx->cmd) + len);
+}
+
+static int
+mcp251xfd_regmap_nocrc_read(void *context,
+ const void *reg, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp251xfd_map_buf_nocrc *buf_rx = priv->map_buf_nocrc_rx;
+ struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx;
+ struct spi_transfer xfer[2] = { };
+ struct spi_message msg;
+ int err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd))
+ return -EINVAL;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) {
+ xfer[0].tx_buf = reg;
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = val_buf;
+ xfer[1].len = val_len;
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + val_len;
+
+ memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd));
+ if (MCP251XFD_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, val_len);
+ }
+
+ err = spi_sync(spi, &msg);
+ if (err)
+ return err;
+
+ if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX))
+ memcpy(val_buf, buf_rx->data, val_len);
+
+ return 0;
+}
+
+static int
+mcp251xfd_regmap_crc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx;
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = buf_tx,
+ .len = sizeof(buf_tx->cmd) + val_len +
+ sizeof(buf_tx->crc),
+ },
+ };
+ u16 reg = *(u16 *)reg_p;
+ u16 crc;
+
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd) +
+ mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE)
+ return -EINVAL;
+
+ mcp251xfd_spi_cmd_write_crc(&buf_tx->cmd, reg, val_len);
+ memcpy(buf_tx->data, val, val_len);
+
+ crc = mcp251xfd_crc16_compute(buf_tx, sizeof(buf_tx->cmd) + val_len);
+ put_unaligned_be16(crc, buf_tx->data + val_len);
+
+ return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+}
+
+static int
+mcp251xfd_regmap_crc_write(void *context,
+ const void *data, size_t count)
+{
+ const size_t data_offset = sizeof(__be16) +
+ mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE;
+
+ return mcp251xfd_regmap_crc_gather_write(context,
+ data, data_offset,
+ data + data_offset,
+ count - data_offset);
+}
+
+static int
+mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv,
+ struct spi_message *msg, unsigned int data_len)
+{
+ const struct mcp251xfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx;
+ const struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx;
+ u16 crc_received, crc_calculated;
+ int err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8));
+
+ err = spi_sync(priv->spi, msg);
+ if (err)
+ return err;
+
+ crc_received = get_unaligned_be16(buf_rx->data + data_len);
+ crc_calculated = mcp251xfd_crc16_compute2(&buf_tx->cmd,
+ sizeof(buf_tx->cmd),
+ buf_rx->data,
+ data_len);
+ if (crc_received != crc_calculated)
+ return -EBADMSG;
+
+ return 0;
+}
+
+static int
+mcp251xfd_regmap_crc_read(void *context,
+ const void *reg_p, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp251xfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx;
+ struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx;
+ struct spi_transfer xfer[2] = { };
+ struct spi_message msg;
+ u16 reg = *(u16 *)reg_p;
+ int i, err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd) +
+ mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE)
+ return -EINVAL;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = buf_rx->data;
+ xfer[1].len = val_len + sizeof(buf_tx->crc);
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + val_len +
+ sizeof(buf_tx->crc);
+
+ if (MCP251XFD_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, val_len +
+ sizeof(buf_tx->crc));
+ }
+
+ mcp251xfd_spi_cmd_read_crc(&buf_tx->cmd, reg, val_len);
+
+ for (i = 0; i < MCP251XFD_READ_CRC_RETRIES_MAX; i++) {
+ err = mcp251xfd_regmap_crc_read_one(priv, &msg, val_len);
+ if (!err)
+ goto out;
+ if (err != -EBADMSG)
+ return err;
+
+ /* MCP251XFD_REG_OSC is the first ever reg we read from.
+ *
+ * The chip may be in deep sleep and this SPI transfer
+ * (i.e. the assertion of the CS) will wake the chip
+ * up. This takes about 3ms. The CRC of this transfer
+ * is wrong.
+ *
+ * Or there isn't a chip at all, in this case the CRC
+ * will be wrong, too.
+ *
+ * In both cases ignore the CRC and copy the read data
+ * to the caller. It will take care of both cases.
+ *
+ */
+ if (reg == MCP251XFD_REG_OSC) {
+ err = 0;
+ goto out;
+ }
+
+ netdev_info(priv->ndev,
+ "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x) retrying.\n",
+ reg, val_len, (int)val_len, buf_rx->data,
+ get_unaligned_be16(buf_rx->data + val_len));
+ }
+
+ if (err) {
+ netdev_err(priv->ndev,
+ "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x).\n",
+ reg, val_len, (int)val_len, buf_rx->data,
+ get_unaligned_be16(buf_rx->data + val_len));
+
+ return err;
+ }
+ out:
+ memcpy(val_buf, buf_rx->data, val_len);
+
+ return 0;
+}
+
+static const struct regmap_range mcp251xfd_reg_table_yes_range[] = {
+ regmap_reg_range(0x000, 0x2ec), /* CAN FD Controller Module SFR */
+ regmap_reg_range(0x400, 0xbfc), /* RAM */
+ regmap_reg_range(0xe00, 0xe14), /* MCP2517/18FD SFR */
+};
+
+static const struct regmap_access_table mcp251xfd_reg_table = {
+ .yes_ranges = mcp251xfd_reg_table_yes_range,
+ .n_yes_ranges = ARRAY_SIZE(mcp251xfd_reg_table_yes_range),
+};
+
+static const struct regmap_config mcp251xfd_regmap_nocrc = {
+ .name = "nocrc",
+ .reg_bits = 16,
+ .reg_stride = 4,
+ .pad_bits = 0,
+ .val_bits = 32,
+ .max_register = 0xffc,
+ .wr_table = &mcp251xfd_reg_table,
+ .rd_table = &mcp251xfd_reg_table,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = (__force unsigned long)
+ cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ),
+ .write_flag_mask = (__force unsigned long)
+ cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE),
+};
+
+static const struct regmap_bus mcp251xfd_bus_nocrc = {
+ .write = mcp251xfd_regmap_nocrc_write,
+ .gather_write = mcp251xfd_regmap_nocrc_gather_write,
+ .reg_update_bits = mcp251xfd_regmap_nocrc_update_bits,
+ .read = mcp251xfd_regmap_nocrc_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .max_raw_read = sizeof_field(struct mcp251xfd_map_buf_nocrc, data),
+ .max_raw_write = sizeof_field(struct mcp251xfd_map_buf_nocrc, data),
+};
+
+static const struct regmap_config mcp251xfd_regmap_crc = {
+ .name = "crc",
+ .reg_bits = 16,
+ .reg_stride = 4,
+ .pad_bits = 16, /* keep data bits aligned */
+ .val_bits = 32,
+ .max_register = 0xffc,
+ .wr_table = &mcp251xfd_reg_table,
+ .rd_table = &mcp251xfd_reg_table,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct regmap_bus mcp251xfd_bus_crc = {
+ .write = mcp251xfd_regmap_crc_write,
+ .gather_write = mcp251xfd_regmap_crc_gather_write,
+ .read = mcp251xfd_regmap_crc_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .max_raw_read = sizeof_field(struct mcp251xfd_map_buf_crc, data),
+ .max_raw_write = sizeof_field(struct mcp251xfd_map_buf_crc, data),
+};
+
+static inline bool
+mcp251xfd_regmap_use_nocrc(struct mcp251xfd_priv *priv)
+{
+ return (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) ||
+ (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX));
+}
+
+static inline bool
+mcp251xfd_regmap_use_crc(struct mcp251xfd_priv *priv)
+{
+ return (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) ||
+ (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX);
+}
+
+static int
+mcp251xfd_regmap_init_nocrc(struct mcp251xfd_priv *priv)
+{
+ if (!priv->map_nocrc) {
+ struct regmap *map;
+
+ map = devm_regmap_init(&priv->spi->dev, &mcp251xfd_bus_nocrc,
+ priv->spi, &mcp251xfd_regmap_nocrc);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ priv->map_nocrc = map;
+ }
+
+ if (!priv->map_buf_nocrc_rx) {
+ priv->map_buf_nocrc_rx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_nocrc_rx),
+ GFP_KERNEL);
+ if (!priv->map_buf_nocrc_rx)
+ return -ENOMEM;
+ }
+
+ if (!priv->map_buf_nocrc_tx) {
+ priv->map_buf_nocrc_tx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_nocrc_tx),
+ GFP_KERNEL);
+ if (!priv->map_buf_nocrc_tx)
+ return -ENOMEM;
+ }
+
+ if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG))
+ priv->map_reg = priv->map_nocrc;
+
+ if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX))
+ priv->map_rx = priv->map_nocrc;
+
+ return 0;
+}
+
+static void mcp251xfd_regmap_destroy_nocrc(struct mcp251xfd_priv *priv)
+{
+ if (priv->map_buf_nocrc_rx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_nocrc_rx);
+ priv->map_buf_nocrc_rx = NULL;
+ }
+ if (priv->map_buf_nocrc_tx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_nocrc_tx);
+ priv->map_buf_nocrc_tx = NULL;
+ }
+}
+
+static int
+mcp251xfd_regmap_init_crc(struct mcp251xfd_priv *priv)
+{
+ if (!priv->map_crc) {
+ struct regmap *map;
+
+ map = devm_regmap_init(&priv->spi->dev, &mcp251xfd_bus_crc,
+ priv->spi, &mcp251xfd_regmap_crc);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ priv->map_crc = map;
+ }
+
+ if (!priv->map_buf_crc_rx) {
+ priv->map_buf_crc_rx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_crc_rx),
+ GFP_KERNEL);
+ if (!priv->map_buf_crc_rx)
+ return -ENOMEM;
+ }
+
+ if (!priv->map_buf_crc_tx) {
+ priv->map_buf_crc_tx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_crc_tx),
+ GFP_KERNEL);
+ if (!priv->map_buf_crc_tx)
+ return -ENOMEM;
+ }
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)
+ priv->map_reg = priv->map_crc;
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX)
+ priv->map_rx = priv->map_crc;
+
+ return 0;
+}
+
+static void mcp251xfd_regmap_destroy_crc(struct mcp251xfd_priv *priv)
+{
+ if (priv->map_buf_crc_rx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_crc_rx);
+ priv->map_buf_crc_rx = NULL;
+ }
+ if (priv->map_buf_crc_tx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_crc_tx);
+ priv->map_buf_crc_tx = NULL;
+ }
+}
+
+int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv)
+{
+ int err;
+
+ if (mcp251xfd_regmap_use_nocrc(priv)) {
+ err = mcp251xfd_regmap_init_nocrc(priv);
+
+ if (err)
+ return err;
+ } else {
+ mcp251xfd_regmap_destroy_nocrc(priv);
+ }
+
+ if (mcp251xfd_regmap_use_crc(priv)) {
+ err = mcp251xfd_regmap_init_crc(priv);
+
+ if (err)
+ return err;
+ } else {
+ mcp251xfd_regmap_destroy_crc(priv);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
new file mode 100644
index 000000000000..fa1246e39980
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -0,0 +1,835 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ *
+ * Copyright (c) 2019 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+ */
+
+#ifndef _MCP251XFD_H
+#define _MCP251XFD_H
+
+#include <linux/can/core.h>
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+/* MPC251x registers */
+
+/* CAN FD Controller Module SFR */
+#define MCP251XFD_REG_CON 0x00
+#define MCP251XFD_REG_CON_TXBWS_MASK GENMASK(31, 28)
+#define MCP251XFD_REG_CON_ABAT BIT(27)
+#define MCP251XFD_REG_CON_REQOP_MASK GENMASK(26, 24)
+#define MCP251XFD_REG_CON_MODE_MIXED 0
+#define MCP251XFD_REG_CON_MODE_SLEEP 1
+#define MCP251XFD_REG_CON_MODE_INT_LOOPBACK 2
+#define MCP251XFD_REG_CON_MODE_LISTENONLY 3
+#define MCP251XFD_REG_CON_MODE_CONFIG 4
+#define MCP251XFD_REG_CON_MODE_EXT_LOOPBACK 5
+#define MCP251XFD_REG_CON_MODE_CAN2_0 6
+#define MCP251XFD_REG_CON_MODE_RESTRICTED 7
+#define MCP251XFD_REG_CON_OPMOD_MASK GENMASK(23, 21)
+#define MCP251XFD_REG_CON_TXQEN BIT(20)
+#define MCP251XFD_REG_CON_STEF BIT(19)
+#define MCP251XFD_REG_CON_SERR2LOM BIT(18)
+#define MCP251XFD_REG_CON_ESIGM BIT(17)
+#define MCP251XFD_REG_CON_RTXAT BIT(16)
+#define MCP251XFD_REG_CON_BRSDIS BIT(12)
+#define MCP251XFD_REG_CON_BUSY BIT(11)
+#define MCP251XFD_REG_CON_WFT_MASK GENMASK(10, 9)
+#define MCP251XFD_REG_CON_WFT_T00FILTER 0x0
+#define MCP251XFD_REG_CON_WFT_T01FILTER 0x1
+#define MCP251XFD_REG_CON_WFT_T10FILTER 0x2
+#define MCP251XFD_REG_CON_WFT_T11FILTER 0x3
+#define MCP251XFD_REG_CON_WAKFIL BIT(8)
+#define MCP251XFD_REG_CON_PXEDIS BIT(6)
+#define MCP251XFD_REG_CON_ISOCRCEN BIT(5)
+#define MCP251XFD_REG_CON_DNCNT_MASK GENMASK(4, 0)
+
+#define MCP251XFD_REG_NBTCFG 0x04
+#define MCP251XFD_REG_NBTCFG_BRP_MASK GENMASK(31, 24)
+#define MCP251XFD_REG_NBTCFG_TSEG1_MASK GENMASK(23, 16)
+#define MCP251XFD_REG_NBTCFG_TSEG2_MASK GENMASK(14, 8)
+#define MCP251XFD_REG_NBTCFG_SJW_MASK GENMASK(6, 0)
+
+#define MCP251XFD_REG_DBTCFG 0x08
+#define MCP251XFD_REG_DBTCFG_BRP_MASK GENMASK(31, 24)
+#define MCP251XFD_REG_DBTCFG_TSEG1_MASK GENMASK(20, 16)
+#define MCP251XFD_REG_DBTCFG_TSEG2_MASK GENMASK(11, 8)
+#define MCP251XFD_REG_DBTCFG_SJW_MASK GENMASK(3, 0)
+
+#define MCP251XFD_REG_TDC 0x0c
+#define MCP251XFD_REG_TDC_EDGFLTEN BIT(25)
+#define MCP251XFD_REG_TDC_SID11EN BIT(24)
+#define MCP251XFD_REG_TDC_TDCMOD_MASK GENMASK(17, 16)
+#define MCP251XFD_REG_TDC_TDCMOD_AUTO 2
+#define MCP251XFD_REG_TDC_TDCMOD_MANUAL 1
+#define MCP251XFD_REG_TDC_TDCMOD_DISABLED 0
+#define MCP251XFD_REG_TDC_TDCO_MASK GENMASK(14, 8)
+#define MCP251XFD_REG_TDC_TDCV_MASK GENMASK(5, 0)
+
+#define MCP251XFD_REG_TBC 0x10
+
+#define MCP251XFD_REG_TSCON 0x14
+#define MCP251XFD_REG_TSCON_TSRES BIT(18)
+#define MCP251XFD_REG_TSCON_TSEOF BIT(17)
+#define MCP251XFD_REG_TSCON_TBCEN BIT(16)
+#define MCP251XFD_REG_TSCON_TBCPRE_MASK GENMASK(9, 0)
+
+#define MCP251XFD_REG_VEC 0x18
+#define MCP251XFD_REG_VEC_RXCODE_MASK GENMASK(30, 24)
+#define MCP251XFD_REG_VEC_TXCODE_MASK GENMASK(22, 16)
+#define MCP251XFD_REG_VEC_FILHIT_MASK GENMASK(12, 8)
+#define MCP251XFD_REG_VEC_ICODE_MASK GENMASK(6, 0)
+
+#define MCP251XFD_REG_INT 0x1c
+#define MCP251XFD_REG_INT_IF_MASK GENMASK(15, 0)
+#define MCP251XFD_REG_INT_IE_MASK GENMASK(31, 16)
+#define MCP251XFD_REG_INT_IVMIE BIT(31)
+#define MCP251XFD_REG_INT_WAKIE BIT(30)
+#define MCP251XFD_REG_INT_CERRIE BIT(29)
+#define MCP251XFD_REG_INT_SERRIE BIT(28)
+#define MCP251XFD_REG_INT_RXOVIE BIT(27)
+#define MCP251XFD_REG_INT_TXATIE BIT(26)
+#define MCP251XFD_REG_INT_SPICRCIE BIT(25)
+#define MCP251XFD_REG_INT_ECCIE BIT(24)
+#define MCP251XFD_REG_INT_TEFIE BIT(20)
+#define MCP251XFD_REG_INT_MODIE BIT(19)
+#define MCP251XFD_REG_INT_TBCIE BIT(18)
+#define MCP251XFD_REG_INT_RXIE BIT(17)
+#define MCP251XFD_REG_INT_TXIE BIT(16)
+#define MCP251XFD_REG_INT_IVMIF BIT(15)
+#define MCP251XFD_REG_INT_WAKIF BIT(14)
+#define MCP251XFD_REG_INT_CERRIF BIT(13)
+#define MCP251XFD_REG_INT_SERRIF BIT(12)
+#define MCP251XFD_REG_INT_RXOVIF BIT(11)
+#define MCP251XFD_REG_INT_TXATIF BIT(10)
+#define MCP251XFD_REG_INT_SPICRCIF BIT(9)
+#define MCP251XFD_REG_INT_ECCIF BIT(8)
+#define MCP251XFD_REG_INT_TEFIF BIT(4)
+#define MCP251XFD_REG_INT_MODIF BIT(3)
+#define MCP251XFD_REG_INT_TBCIF BIT(2)
+#define MCP251XFD_REG_INT_RXIF BIT(1)
+#define MCP251XFD_REG_INT_TXIF BIT(0)
+/* These IRQ flags must be cleared by SW in the CAN_INT register */
+#define MCP251XFD_REG_INT_IF_CLEARABLE_MASK \
+ (MCP251XFD_REG_INT_IVMIF | MCP251XFD_REG_INT_WAKIF | \
+ MCP251XFD_REG_INT_CERRIF | MCP251XFD_REG_INT_SERRIF | \
+ MCP251XFD_REG_INT_MODIF)
+
+#define MCP251XFD_REG_RXIF 0x20
+#define MCP251XFD_REG_TXIF 0x24
+#define MCP251XFD_REG_RXOVIF 0x28
+#define MCP251XFD_REG_TXATIF 0x2c
+#define MCP251XFD_REG_TXREQ 0x30
+
+#define MCP251XFD_REG_TREC 0x34
+#define MCP251XFD_REG_TREC_TXBO BIT(21)
+#define MCP251XFD_REG_TREC_TXBP BIT(20)
+#define MCP251XFD_REG_TREC_RXBP BIT(19)
+#define MCP251XFD_REG_TREC_TXWARN BIT(18)
+#define MCP251XFD_REG_TREC_RXWARN BIT(17)
+#define MCP251XFD_REG_TREC_EWARN BIT(16)
+#define MCP251XFD_REG_TREC_TEC_MASK GENMASK(15, 8)
+#define MCP251XFD_REG_TREC_REC_MASK GENMASK(7, 0)
+
+#define MCP251XFD_REG_BDIAG0 0x38
+#define MCP251XFD_REG_BDIAG0_DTERRCNT_MASK GENMASK(31, 24)
+#define MCP251XFD_REG_BDIAG0_DRERRCNT_MASK GENMASK(23, 16)
+#define MCP251XFD_REG_BDIAG0_NTERRCNT_MASK GENMASK(15, 8)
+#define MCP251XFD_REG_BDIAG0_NRERRCNT_MASK GENMASK(7, 0)
+
+#define MCP251XFD_REG_BDIAG1 0x3c
+#define MCP251XFD_REG_BDIAG1_DLCMM BIT(31)
+#define MCP251XFD_REG_BDIAG1_ESI BIT(30)
+#define MCP251XFD_REG_BDIAG1_DCRCERR BIT(29)
+#define MCP251XFD_REG_BDIAG1_DSTUFERR BIT(28)
+#define MCP251XFD_REG_BDIAG1_DFORMERR BIT(27)
+#define MCP251XFD_REG_BDIAG1_DBIT1ERR BIT(25)
+#define MCP251XFD_REG_BDIAG1_DBIT0ERR BIT(24)
+#define MCP251XFD_REG_BDIAG1_TXBOERR BIT(23)
+#define MCP251XFD_REG_BDIAG1_NCRCERR BIT(21)
+#define MCP251XFD_REG_BDIAG1_NSTUFERR BIT(20)
+#define MCP251XFD_REG_BDIAG1_NFORMERR BIT(19)
+#define MCP251XFD_REG_BDIAG1_NACKERR BIT(18)
+#define MCP251XFD_REG_BDIAG1_NBIT1ERR BIT(17)
+#define MCP251XFD_REG_BDIAG1_NBIT0ERR BIT(16)
+#define MCP251XFD_REG_BDIAG1_BERR_MASK \
+ (MCP251XFD_REG_BDIAG1_DLCMM | MCP251XFD_REG_BDIAG1_ESI | \
+ MCP251XFD_REG_BDIAG1_DCRCERR | MCP251XFD_REG_BDIAG1_DSTUFERR | \
+ MCP251XFD_REG_BDIAG1_DFORMERR | MCP251XFD_REG_BDIAG1_DBIT1ERR | \
+ MCP251XFD_REG_BDIAG1_DBIT0ERR | MCP251XFD_REG_BDIAG1_TXBOERR | \
+ MCP251XFD_REG_BDIAG1_NCRCERR | MCP251XFD_REG_BDIAG1_NSTUFERR | \
+ MCP251XFD_REG_BDIAG1_NFORMERR | MCP251XFD_REG_BDIAG1_NACKERR | \
+ MCP251XFD_REG_BDIAG1_NBIT1ERR | MCP251XFD_REG_BDIAG1_NBIT0ERR)
+#define MCP251XFD_REG_BDIAG1_EFMSGCNT_MASK GENMASK(15, 0)
+
+#define MCP251XFD_REG_TEFCON 0x40
+#define MCP251XFD_REG_TEFCON_FSIZE_MASK GENMASK(28, 24)
+#define MCP251XFD_REG_TEFCON_FRESET BIT(10)
+#define MCP251XFD_REG_TEFCON_UINC BIT(8)
+#define MCP251XFD_REG_TEFCON_TEFTSEN BIT(5)
+#define MCP251XFD_REG_TEFCON_TEFOVIE BIT(3)
+#define MCP251XFD_REG_TEFCON_TEFFIE BIT(2)
+#define MCP251XFD_REG_TEFCON_TEFHIE BIT(1)
+#define MCP251XFD_REG_TEFCON_TEFNEIE BIT(0)
+
+#define MCP251XFD_REG_TEFSTA 0x44
+#define MCP251XFD_REG_TEFSTA_TEFOVIF BIT(3)
+#define MCP251XFD_REG_TEFSTA_TEFFIF BIT(2)
+#define MCP251XFD_REG_TEFSTA_TEFHIF BIT(1)
+#define MCP251XFD_REG_TEFSTA_TEFNEIF BIT(0)
+
+#define MCP251XFD_REG_TEFUA 0x48
+
+#define MCP251XFD_REG_TXQCON 0x50
+#define MCP251XFD_REG_TXQCON_PLSIZE_MASK GENMASK(31, 29)
+#define MCP251XFD_REG_TXQCON_PLSIZE_8 0
+#define MCP251XFD_REG_TXQCON_PLSIZE_12 1
+#define MCP251XFD_REG_TXQCON_PLSIZE_16 2
+#define MCP251XFD_REG_TXQCON_PLSIZE_20 3
+#define MCP251XFD_REG_TXQCON_PLSIZE_24 4
+#define MCP251XFD_REG_TXQCON_PLSIZE_32 5
+#define MCP251XFD_REG_TXQCON_PLSIZE_48 6
+#define MCP251XFD_REG_TXQCON_PLSIZE_64 7
+#define MCP251XFD_REG_TXQCON_FSIZE_MASK GENMASK(28, 24)
+#define MCP251XFD_REG_TXQCON_TXAT_UNLIMITED 3
+#define MCP251XFD_REG_TXQCON_TXAT_THREE_SHOT 1
+#define MCP251XFD_REG_TXQCON_TXAT_ONE_SHOT 0
+#define MCP251XFD_REG_TXQCON_TXAT_MASK GENMASK(22, 21)
+#define MCP251XFD_REG_TXQCON_TXPRI_MASK GENMASK(20, 16)
+#define MCP251XFD_REG_TXQCON_FRESET BIT(10)
+#define MCP251XFD_REG_TXQCON_TXREQ BIT(9)
+#define MCP251XFD_REG_TXQCON_UINC BIT(8)
+#define MCP251XFD_REG_TXQCON_TXEN BIT(7)
+#define MCP251XFD_REG_TXQCON_TXATIE BIT(4)
+#define MCP251XFD_REG_TXQCON_TXQEIE BIT(2)
+#define MCP251XFD_REG_TXQCON_TXQNIE BIT(0)
+
+#define MCP251XFD_REG_TXQSTA 0x54
+#define MCP251XFD_REG_TXQSTA_TXQCI_MASK GENMASK(12, 8)
+#define MCP251XFD_REG_TXQSTA_TXABT BIT(7)
+#define MCP251XFD_REG_TXQSTA_TXLARB BIT(6)
+#define MCP251XFD_REG_TXQSTA_TXERR BIT(5)
+#define MCP251XFD_REG_TXQSTA_TXATIF BIT(4)
+#define MCP251XFD_REG_TXQSTA_TXQEIF BIT(2)
+#define MCP251XFD_REG_TXQSTA_TXQNIF BIT(0)
+
+#define MCP251XFD_REG_TXQUA 0x58
+
+#define MCP251XFD_REG_FIFOCON(x) (0x50 + 0xc * (x))
+#define MCP251XFD_REG_FIFOCON_PLSIZE_MASK GENMASK(31, 29)
+#define MCP251XFD_REG_FIFOCON_PLSIZE_8 0
+#define MCP251XFD_REG_FIFOCON_PLSIZE_12 1
+#define MCP251XFD_REG_FIFOCON_PLSIZE_16 2
+#define MCP251XFD_REG_FIFOCON_PLSIZE_20 3
+#define MCP251XFD_REG_FIFOCON_PLSIZE_24 4
+#define MCP251XFD_REG_FIFOCON_PLSIZE_32 5
+#define MCP251XFD_REG_FIFOCON_PLSIZE_48 6
+#define MCP251XFD_REG_FIFOCON_PLSIZE_64 7
+#define MCP251XFD_REG_FIFOCON_FSIZE_MASK GENMASK(28, 24)
+#define MCP251XFD_REG_FIFOCON_TXAT_MASK GENMASK(22, 21)
+#define MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT 0
+#define MCP251XFD_REG_FIFOCON_TXAT_THREE_SHOT 1
+#define MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED 3
+#define MCP251XFD_REG_FIFOCON_TXPRI_MASK GENMASK(20, 16)
+#define MCP251XFD_REG_FIFOCON_FRESET BIT(10)
+#define MCP251XFD_REG_FIFOCON_TXREQ BIT(9)
+#define MCP251XFD_REG_FIFOCON_UINC BIT(8)
+#define MCP251XFD_REG_FIFOCON_TXEN BIT(7)
+#define MCP251XFD_REG_FIFOCON_RTREN BIT(6)
+#define MCP251XFD_REG_FIFOCON_RXTSEN BIT(5)
+#define MCP251XFD_REG_FIFOCON_TXATIE BIT(4)
+#define MCP251XFD_REG_FIFOCON_RXOVIE BIT(3)
+#define MCP251XFD_REG_FIFOCON_TFERFFIE BIT(2)
+#define MCP251XFD_REG_FIFOCON_TFHRFHIE BIT(1)
+#define MCP251XFD_REG_FIFOCON_TFNRFNIE BIT(0)
+
+#define MCP251XFD_REG_FIFOSTA(x) (0x54 + 0xc * (x))
+#define MCP251XFD_REG_FIFOSTA_FIFOCI_MASK GENMASK(12, 8)
+#define MCP251XFD_REG_FIFOSTA_TXABT BIT(7)
+#define MCP251XFD_REG_FIFOSTA_TXLARB BIT(6)
+#define MCP251XFD_REG_FIFOSTA_TXERR BIT(5)
+#define MCP251XFD_REG_FIFOSTA_TXATIF BIT(4)
+#define MCP251XFD_REG_FIFOSTA_RXOVIF BIT(3)
+#define MCP251XFD_REG_FIFOSTA_TFERFFIF BIT(2)
+#define MCP251XFD_REG_FIFOSTA_TFHRFHIF BIT(1)
+#define MCP251XFD_REG_FIFOSTA_TFNRFNIF BIT(0)
+
+#define MCP251XFD_REG_FIFOUA(x) (0x58 + 0xc * (x))
+
+#define MCP251XFD_REG_FLTCON(x) (0x1d0 + 0x4 * (x))
+#define MCP251XFD_REG_FLTCON_FLTEN3 BIT(31)
+#define MCP251XFD_REG_FLTCON_F3BP_MASK GENMASK(28, 24)
+#define MCP251XFD_REG_FLTCON_FLTEN2 BIT(23)
+#define MCP251XFD_REG_FLTCON_F2BP_MASK GENMASK(20, 16)
+#define MCP251XFD_REG_FLTCON_FLTEN1 BIT(15)
+#define MCP251XFD_REG_FLTCON_F1BP_MASK GENMASK(12, 8)
+#define MCP251XFD_REG_FLTCON_FLTEN0 BIT(7)
+#define MCP251XFD_REG_FLTCON_F0BP_MASK GENMASK(4, 0)
+#define MCP251XFD_REG_FLTCON_FLTEN(x) (BIT(7) << 8 * ((x) & 0x3))
+#define MCP251XFD_REG_FLTCON_FLT_MASK(x) (GENMASK(7, 0) << (8 * ((x) & 0x3)))
+#define MCP251XFD_REG_FLTCON_FBP(x, fifo) ((fifo) << 8 * ((x) & 0x3))
+
+#define MCP251XFD_REG_FLTOBJ(x) (0x1f0 + 0x8 * (x))
+#define MCP251XFD_REG_FLTOBJ_EXIDE BIT(30)
+#define MCP251XFD_REG_FLTOBJ_SID11 BIT(29)
+#define MCP251XFD_REG_FLTOBJ_EID_MASK GENMASK(28, 11)
+#define MCP251XFD_REG_FLTOBJ_SID_MASK GENMASK(10, 0)
+
+#define MCP251XFD_REG_FLTMASK(x) (0x1f4 + 0x8 * (x))
+#define MCP251XFD_REG_MASK_MIDE BIT(30)
+#define MCP251XFD_REG_MASK_MSID11 BIT(29)
+#define MCP251XFD_REG_MASK_MEID_MASK GENMASK(28, 11)
+#define MCP251XFD_REG_MASK_MSID_MASK GENMASK(10, 0)
+
+/* RAM */
+#define MCP251XFD_RAM_START 0x400
+#define MCP251XFD_RAM_SIZE SZ_2K
+
+/* Message Object */
+#define MCP251XFD_OBJ_ID_SID11 BIT(29)
+#define MCP251XFD_OBJ_ID_EID_MASK GENMASK(28, 11)
+#define MCP251XFD_OBJ_ID_SID_MASK GENMASK(10, 0)
+#define MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK GENMASK(31, 9)
+#define MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK GENMASK(15, 9)
+#define MCP251XFD_OBJ_FLAGS_SEQ_MASK MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK
+#define MCP251XFD_OBJ_FLAGS_ESI BIT(8)
+#define MCP251XFD_OBJ_FLAGS_FDF BIT(7)
+#define MCP251XFD_OBJ_FLAGS_BRS BIT(6)
+#define MCP251XFD_OBJ_FLAGS_RTR BIT(5)
+#define MCP251XFD_OBJ_FLAGS_IDE BIT(4)
+#define MCP251XFD_OBJ_FLAGS_DLC GENMASK(3, 0)
+
+#define MCP251XFD_REG_FRAME_EFF_SID_MASK GENMASK(28, 18)
+#define MCP251XFD_REG_FRAME_EFF_EID_MASK GENMASK(17, 0)
+
+/* MCP2517/18FD SFR */
+#define MCP251XFD_REG_OSC 0xe00
+#define MCP251XFD_REG_OSC_SCLKRDY BIT(12)
+#define MCP251XFD_REG_OSC_OSCRDY BIT(10)
+#define MCP251XFD_REG_OSC_PLLRDY BIT(8)
+#define MCP251XFD_REG_OSC_CLKODIV_10 3
+#define MCP251XFD_REG_OSC_CLKODIV_4 2
+#define MCP251XFD_REG_OSC_CLKODIV_2 1
+#define MCP251XFD_REG_OSC_CLKODIV_1 0
+#define MCP251XFD_REG_OSC_CLKODIV_MASK GENMASK(6, 5)
+#define MCP251XFD_REG_OSC_SCLKDIV BIT(4)
+#define MCP251XFD_REG_OSC_LPMEN BIT(3) /* MCP2518FD only */
+#define MCP251XFD_REG_OSC_OSCDIS BIT(2)
+#define MCP251XFD_REG_OSC_PLLEN BIT(0)
+
+#define MCP251XFD_REG_IOCON 0xe04
+#define MCP251XFD_REG_IOCON_INTOD BIT(30)
+#define MCP251XFD_REG_IOCON_SOF BIT(29)
+#define MCP251XFD_REG_IOCON_TXCANOD BIT(28)
+#define MCP251XFD_REG_IOCON_PM1 BIT(25)
+#define MCP251XFD_REG_IOCON_PM0 BIT(24)
+#define MCP251XFD_REG_IOCON_GPIO1 BIT(17)
+#define MCP251XFD_REG_IOCON_GPIO0 BIT(16)
+#define MCP251XFD_REG_IOCON_LAT1 BIT(9)
+#define MCP251XFD_REG_IOCON_LAT0 BIT(8)
+#define MCP251XFD_REG_IOCON_XSTBYEN BIT(6)
+#define MCP251XFD_REG_IOCON_TRIS1 BIT(1)
+#define MCP251XFD_REG_IOCON_TRIS0 BIT(0)
+
+#define MCP251XFD_REG_CRC 0xe08
+#define MCP251XFD_REG_CRC_FERRIE BIT(25)
+#define MCP251XFD_REG_CRC_CRCERRIE BIT(24)
+#define MCP251XFD_REG_CRC_FERRIF BIT(17)
+#define MCP251XFD_REG_CRC_CRCERRIF BIT(16)
+#define MCP251XFD_REG_CRC_IF_MASK GENMASK(17, 16)
+#define MCP251XFD_REG_CRC_MASK GENMASK(15, 0)
+
+#define MCP251XFD_REG_ECCCON 0xe0c
+#define MCP251XFD_REG_ECCCON_PARITY_MASK GENMASK(14, 8)
+#define MCP251XFD_REG_ECCCON_DEDIE BIT(2)
+#define MCP251XFD_REG_ECCCON_SECIE BIT(1)
+#define MCP251XFD_REG_ECCCON_ECCEN BIT(0)
+
+#define MCP251XFD_REG_ECCSTAT 0xe10
+#define MCP251XFD_REG_ECCSTAT_ERRADDR_MASK GENMASK(27, 16)
+#define MCP251XFD_REG_ECCSTAT_IF_MASK GENMASK(2, 1)
+#define MCP251XFD_REG_ECCSTAT_DEDIF BIT(2)
+#define MCP251XFD_REG_ECCSTAT_SECIF BIT(1)
+
+#define MCP251XFD_REG_DEVID 0xe14 /* MCP2518FD only */
+#define MCP251XFD_REG_DEVID_ID_MASK GENMASK(7, 4)
+#define MCP251XFD_REG_DEVID_REV_MASK GENMASK(3, 0)
+
+/* number of TX FIFO objects, depending on CAN mode
+ *
+ * FIFO setup: tef: 8*12 bytes = 96 bytes, tx: 8*16 bytes = 128 bytes
+ * FIFO setup: tef: 4*12 bytes = 48 bytes, tx: 4*72 bytes = 288 bytes
+ */
+#define MCP251XFD_TX_OBJ_NUM_CAN 8
+#define MCP251XFD_TX_OBJ_NUM_CANFD 4
+
+#if MCP251XFD_TX_OBJ_NUM_CAN > MCP251XFD_TX_OBJ_NUM_CANFD
+#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CAN
+#else
+#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CANFD
+#endif
+
+#define MCP251XFD_NAPI_WEIGHT 32
+#define MCP251XFD_TX_FIFO 1
+#define MCP251XFD_RX_FIFO(x) (MCP251XFD_TX_FIFO + 1 + (x))
+
+/* SPI commands */
+#define MCP251XFD_SPI_INSTRUCTION_RESET 0x0000
+#define MCP251XFD_SPI_INSTRUCTION_WRITE 0x2000
+#define MCP251XFD_SPI_INSTRUCTION_READ 0x3000
+#define MCP251XFD_SPI_INSTRUCTION_WRITE_CRC 0xa000
+#define MCP251XFD_SPI_INSTRUCTION_READ_CRC 0xb000
+#define MCP251XFD_SPI_INSTRUCTION_WRITE_CRC_SAFE 0xc000
+#define MCP251XFD_SPI_ADDRESS_MASK GENMASK(11, 0)
+
+#define MCP251XFD_SYSCLOCK_HZ_MAX 40000000
+#define MCP251XFD_SYSCLOCK_HZ_MIN 1000000
+#define MCP251XFD_SPICLOCK_HZ_MAX 20000000
+#define MCP251XFD_OSC_PLL_MULTIPLIER 10
+#define MCP251XFD_OSC_STAB_SLEEP_US (3 * USEC_PER_MSEC)
+#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
+#define MCP251XFD_POLL_SLEEP_US (10)
+#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
+#define MCP251XFD_SOFTRESET_RETRIES_MAX 3
+#define MCP251XFD_READ_CRC_RETRIES_MAX 3
+#define MCP251XFD_ECC_CNT_MAX 2
+#define MCP251XFD_SANITIZE_SPI 1
+#define MCP251XFD_SANITIZE_CAN 1
+
+/* Silence TX MAB overflow warnings */
+#define MCP251XFD_QUIRK_MAB_NO_WARN BIT(0)
+/* Use CRC to access registers */
+#define MCP251XFD_QUIRK_CRC_REG BIT(1)
+/* Use CRC to access RX/TEF-RAM */
+#define MCP251XFD_QUIRK_CRC_RX BIT(2)
+/* Use CRC to access TX-RAM */
+#define MCP251XFD_QUIRK_CRC_TX BIT(3)
+/* Enable ECC for RAM */
+#define MCP251XFD_QUIRK_ECC BIT(4)
+/* Use Half Duplex SPI transfers */
+#define MCP251XFD_QUIRK_HALF_DUPLEX BIT(5)
+
+struct mcp251xfd_hw_tef_obj {
+ u32 id;
+ u32 flags;
+ u32 ts;
+};
+
+/* The tx_obj_raw version is used in spi async, i.e. without
+ * regmap. We have to take care of endianness ourselves.
+ */
+struct mcp251xfd_hw_tx_obj_raw {
+ __le32 id;
+ __le32 flags;
+ u8 data[sizeof_field(struct canfd_frame, data)];
+};
+
+struct mcp251xfd_hw_tx_obj_can {
+ u32 id;
+ u32 flags;
+ u8 data[sizeof_field(struct can_frame, data)];
+};
+
+struct mcp251xfd_hw_tx_obj_canfd {
+ u32 id;
+ u32 flags;
+ u8 data[sizeof_field(struct canfd_frame, data)];
+};
+
+struct mcp251xfd_hw_rx_obj_can {
+ u32 id;
+ u32 flags;
+ u32 ts;
+ u8 data[sizeof_field(struct can_frame, data)];
+};
+
+struct mcp251xfd_hw_rx_obj_canfd {
+ u32 id;
+ u32 flags;
+ u32 ts;
+ u8 data[sizeof_field(struct canfd_frame, data)];
+};
+
+struct mcp251xfd_tef_ring {
+ unsigned int head;
+ unsigned int tail;
+
+ /* u8 obj_num equals tx_ring->obj_num */
+ /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
+};
+
+struct __packed mcp251xfd_buf_cmd {
+ __be16 cmd;
+};
+
+struct __packed mcp251xfd_buf_cmd_crc {
+ __be16 cmd;
+ u8 len;
+};
+
+union mcp251xfd_tx_obj_load_buf {
+ struct __packed {
+ struct mcp251xfd_buf_cmd cmd;
+ struct mcp251xfd_hw_tx_obj_raw hw_tx_obj;
+ } nocrc;
+ struct __packed {
+ struct mcp251xfd_buf_cmd_crc cmd;
+ struct mcp251xfd_hw_tx_obj_raw hw_tx_obj;
+ __be16 crc;
+ } crc;
+} ____cacheline_aligned;
+
+union mcp251xfd_write_reg_buf {
+ struct __packed {
+ struct mcp251xfd_buf_cmd cmd;
+ u8 data[4];
+ } nocrc;
+ struct __packed {
+ struct mcp251xfd_buf_cmd_crc cmd;
+ u8 data[4];
+ __be16 crc;
+ } crc;
+} ____cacheline_aligned;
+
+struct mcp251xfd_tx_obj {
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+ union mcp251xfd_tx_obj_load_buf buf;
+};
+
+struct mcp251xfd_tx_ring {
+ unsigned int head;
+ unsigned int tail;
+
+ u16 base;
+ u8 obj_num;
+ u8 obj_size;
+
+ struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX];
+ union mcp251xfd_write_reg_buf rts_buf;
+};
+
+struct mcp251xfd_rx_ring {
+ unsigned int head;
+ unsigned int tail;
+
+ u16 base;
+ u8 nr;
+ u8 fifo_nr;
+ u8 obj_num;
+ u8 obj_size;
+
+ struct mcp251xfd_hw_rx_obj_canfd obj[];
+};
+
+struct __packed mcp251xfd_map_buf_nocrc {
+ struct mcp251xfd_buf_cmd cmd;
+ u8 data[256];
+} ____cacheline_aligned;
+
+struct __packed mcp251xfd_map_buf_crc {
+ struct mcp251xfd_buf_cmd_crc cmd;
+ u8 data[256 - 4];
+ __be16 crc;
+} ____cacheline_aligned;
+
+struct mcp251xfd_ecc {
+ u32 ecc_stat;
+ int cnt;
+};
+
+struct mcp251xfd_regs_status {
+ u32 intf;
+};
+
+enum mcp251xfd_model {
+ MCP251XFD_MODEL_MCP2517FD = 0x2517,
+ MCP251XFD_MODEL_MCP2518FD = 0x2518,
+ MCP251XFD_MODEL_MCP251XFD = 0xffff, /* autodetect model */
+};
+
+struct mcp251xfd_devtype_data {
+ enum mcp251xfd_model model;
+ u32 quirks;
+};
+
+struct mcp251xfd_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct net_device *ndev;
+
+ struct regmap *map_reg; /* register access */
+ struct regmap *map_rx; /* RX/TEF RAM access */
+
+ struct regmap *map_nocrc;
+ struct mcp251xfd_map_buf_nocrc *map_buf_nocrc_rx;
+ struct mcp251xfd_map_buf_nocrc *map_buf_nocrc_tx;
+
+ struct regmap *map_crc;
+ struct mcp251xfd_map_buf_crc *map_buf_crc_rx;
+ struct mcp251xfd_map_buf_crc *map_buf_crc_tx;
+
+ struct spi_device *spi;
+ u32 spi_max_speed_hz_orig;
+
+ struct mcp251xfd_tef_ring tef;
+ struct mcp251xfd_tx_ring tx[1];
+ struct mcp251xfd_rx_ring *rx[1];
+
+ u8 rx_ring_num;
+
+ struct mcp251xfd_ecc ecc;
+ struct mcp251xfd_regs_status regs_status;
+
+ struct gpio_desc *rx_int;
+ struct clk *clk;
+ struct regulator *reg_vdd;
+ struct regulator *reg_xceiver;
+
+ struct mcp251xfd_devtype_data devtype_data;
+ struct can_berr_counter bec;
+};
+
+#define MCP251XFD_IS(_model) \
+static inline bool \
+mcp251xfd_is_##_model(const struct mcp251xfd_priv *priv) \
+{ \
+ return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model##FD; \
+}
+
+MCP251XFD_IS(2517);
+MCP251XFD_IS(2518);
+MCP251XFD_IS(251X);
+
+static inline u8 mcp251xfd_first_byte_set(u32 mask)
+{
+ return (mask & 0x0000ffff) ?
+ ((mask & 0x000000ff) ? 0 : 1) :
+ ((mask & 0x00ff0000) ? 2 : 3);
+}
+
+static inline u8 mcp251xfd_last_byte_set(u32 mask)
+{
+ return (mask & 0xffff0000) ?
+ ((mask & 0xff000000) ? 3 : 2) :
+ ((mask & 0x0000ff00) ? 1 : 0);
+}
+
+static inline __be16 mcp251xfd_cmd_reset(void)
+{
+ return cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_RESET);
+}
+
+static inline void
+mcp251xfd_spi_cmd_read_nocrc(struct mcp251xfd_buf_cmd *cmd, u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ | addr);
+}
+
+static inline void
+mcp251xfd_spi_cmd_write_nocrc(struct mcp251xfd_buf_cmd *cmd, u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE | addr);
+}
+
+static inline bool mcp251xfd_reg_in_ram(unsigned int reg)
+{
+ static const struct regmap_range range =
+ regmap_reg_range(MCP251XFD_RAM_START,
+ MCP251XFD_RAM_START + MCP251XFD_RAM_SIZE - 4);
+
+ return regmap_reg_in_range(reg, &range);
+}
+
+static inline void
+__mcp251xfd_spi_cmd_crc_set_len(struct mcp251xfd_buf_cmd_crc *cmd,
+ u16 len, bool in_ram)
+{
+ /* Number of u32 for RAM access, number of u8 otherwise. */
+ if (in_ram)
+ cmd->len = len >> 2;
+ else
+ cmd->len = len;
+}
+
+static inline void
+mcp251xfd_spi_cmd_crc_set_len_in_ram(struct mcp251xfd_buf_cmd_crc *cmd, u16 len)
+{
+ __mcp251xfd_spi_cmd_crc_set_len(cmd, len, true);
+}
+
+static inline void
+mcp251xfd_spi_cmd_crc_set_len_in_reg(struct mcp251xfd_buf_cmd_crc *cmd, u16 len)
+{
+ __mcp251xfd_spi_cmd_crc_set_len(cmd, len, false);
+}
+
+static inline void
+mcp251xfd_spi_cmd_read_crc_set_addr(struct mcp251xfd_buf_cmd_crc *cmd, u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ_CRC | addr);
+}
+
+static inline void
+mcp251xfd_spi_cmd_read_crc(struct mcp251xfd_buf_cmd_crc *cmd,
+ u16 addr, u16 len)
+{
+ mcp251xfd_spi_cmd_read_crc_set_addr(cmd, addr);
+ __mcp251xfd_spi_cmd_crc_set_len(cmd, len, mcp251xfd_reg_in_ram(addr));
+}
+
+static inline void
+mcp251xfd_spi_cmd_write_crc_set_addr(struct mcp251xfd_buf_cmd_crc *cmd,
+ u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE_CRC | addr);
+}
+
+static inline void
+mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
+ u16 addr, u16 len)
+{
+ mcp251xfd_spi_cmd_write_crc_set_addr(cmd, addr);
+ __mcp251xfd_spi_cmd_crc_set_len(cmd, len, mcp251xfd_reg_in_ram(addr));
+}
+
+static inline u8 *
+mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
+ union mcp251xfd_write_reg_buf *write_reg_buf,
+ u16 addr)
+{
+ u8 *data;
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
+ mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
+ addr);
+ data = write_reg_buf->crc.data;
+ } else {
+ mcp251xfd_spi_cmd_write_nocrc(&write_reg_buf->nocrc.cmd,
+ addr);
+ data = write_reg_buf->nocrc.data;
+ }
+
+ return data;
+}
+
+static inline u16 mcp251xfd_get_tef_obj_addr(u8 n)
+{
+ return MCP251XFD_RAM_START +
+ sizeof(struct mcp251xfd_hw_tef_obj) * n;
+}
+
+static inline u16
+mcp251xfd_get_tx_obj_addr(const struct mcp251xfd_tx_ring *ring, u8 n)
+{
+ return ring->base + ring->obj_size * n;
+}
+
+static inline u16
+mcp251xfd_get_rx_obj_addr(const struct mcp251xfd_rx_ring *ring, u8 n)
+{
+ return ring->base + ring->obj_size * n;
+}
+
+static inline u8 mcp251xfd_get_tef_head(const struct mcp251xfd_priv *priv)
+{
+ return priv->tef.head & (priv->tx->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv)
+{
+ return priv->tef.tail & (priv->tx->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv)
+{
+ return priv->tef.head - priv->tef.tail;
+}
+
+static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv)
+{
+ u8 len;
+
+ len = mcp251xfd_get_tef_len(priv);
+
+ return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv));
+}
+
+static inline u8 mcp251xfd_get_tx_head(const struct mcp251xfd_tx_ring *ring)
+{
+ return ring->head & (ring->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_tx_tail(const struct mcp251xfd_tx_ring *ring)
+{
+ return ring->tail & (ring->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_tx_free(const struct mcp251xfd_tx_ring *ring)
+{
+ return ring->obj_num - (ring->head - ring->tail);
+}
+
+static inline int
+mcp251xfd_get_tx_nr_by_addr(const struct mcp251xfd_tx_ring *tx_ring, u8 *nr,
+ u16 addr)
+{
+ if (addr < mcp251xfd_get_tx_obj_addr(tx_ring, 0) ||
+ addr >= mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num))
+ return -ENOENT;
+
+ *nr = (addr - mcp251xfd_get_tx_obj_addr(tx_ring, 0)) /
+ tx_ring->obj_size;
+
+ return 0;
+}
+
+static inline u8 mcp251xfd_get_rx_head(const struct mcp251xfd_rx_ring *ring)
+{
+ return ring->head & (ring->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring)
+{
+ return ring->tail & (ring->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring)
+{
+ return ring->head - ring->tail;
+}
+
+static inline u8
+mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
+{
+ u8 len;
+
+ len = mcp251xfd_get_rx_len(ring);
+
+ return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring));
+}
+
+#define mcp251xfd_for_each_tx_obj(ring, _obj, n) \
+ for ((n) = 0, (_obj) = &(ring)->obj[(n)]; \
+ (n) < (ring)->obj_num; \
+ (n)++, (_obj) = &(ring)->obj[(n)])
+
+#define mcp251xfd_for_each_rx_ring(priv, ring, n) \
+ for ((n) = 0, (ring) = *((priv)->rx + (n)); \
+ (n) < (priv)->rx_ring_num; \
+ (n)++, (ring) = *((priv)->rx + (n)))
+
+int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
+u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
+ const void *data, size_t data_size);
+u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
+
+#endif
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 94b1491b569f..9913f5458279 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -454,7 +454,7 @@ static int ti_hecc_get_berr_counter(const struct net_device *ndev,
/* ti_hecc_xmit: HECC Transmit
*
* The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
- * priority of the mailbox for tranmission is dependent upon priority setting
+ * priority of the mailbox for transmission is dependent upon priority setting
* field in mailbox registers. The mailbox with highest value in priority field
* is transmitted first. Only when two mailboxes have the same value in
* priority field the highest numbered mailbox is transmitted first.
@@ -857,7 +857,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
struct net_device *ndev = (struct net_device *)0;
struct ti_hecc_priv *priv;
struct device_node *np = pdev->dev.of_node;
- struct resource *res, *irq;
+ struct resource *irq;
struct regulator *reg_xceiver;
int err = -ENODEV;
@@ -878,39 +878,22 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
/* handle hecc memory */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc");
- if (!res) {
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc\n");
- return -EINVAL;
- }
-
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "hecc");
if (IS_ERR(priv->base)) {
dev_err(&pdev->dev, "hecc ioremap failed\n");
return PTR_ERR(priv->base);
}
/* handle hecc-ram memory */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc-ram");
- if (!res) {
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc-ram\n");
- return -EINVAL;
- }
-
- priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res);
+ priv->hecc_ram = devm_platform_ioremap_resource_byname(pdev,
+ "hecc-ram");
if (IS_ERR(priv->hecc_ram)) {
dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
return PTR_ERR(priv->hecc_ram);
}
/* handle mbx memory */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mbx");
- if (!res) {
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM mbx\n");
- return -EINVAL;
- }
-
- priv->mbx = devm_ioremap_resource(&pdev->dev, res);
+ priv->mbx = devm_platform_ioremap_resource_byname(pdev, "mbx");
if (IS_ERR(priv->mbx)) {
dev_err(&pdev->dev, "mbx ioremap failed\n");
return PTR_ERR(priv->mbx);
@@ -950,7 +933,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
err = clk_prepare_enable(priv->clk);
if (err) {
dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
- goto probe_exit_clk;
+ goto probe_exit_release_clk;
}
priv->offload.mailbox_read = ti_hecc_mailbox_read;
@@ -959,7 +942,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
err = can_rx_offload_add_timestamp(ndev, &priv->offload);
if (err) {
dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
- goto probe_exit_clk;
+ goto probe_exit_disable_clk;
}
err = register_candev(ndev);
@@ -977,7 +960,9 @@ static int ti_hecc_probe(struct platform_device *pdev)
probe_exit_offload:
can_rx_offload_del(&priv->offload);
-probe_exit_clk:
+probe_exit_disable_clk:
+ clk_disable_unprepare(priv->clk);
+probe_exit_release_clk:
clk_put(priv->clk);
probe_exit_candev:
free_candev(ndev);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 77fa830fe7dd..bcb331b0c958 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -90,7 +90,7 @@ config CAN_PEAK_USB
tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
help
This driver supports the PEAK-System Technik USB adapters that enable
- access to the CAN bus, with repect to the CAN 2.0b and/or CAN-FD
+ access to the CAN bus, with respect to the CAN 2.0b and/or CAN-FD
standards, that is:
PCAN-USB single CAN 2.0b channel USB adapter
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index a4b4b742c80c..3005157059ca 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -828,7 +828,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
- /* dev settup */
+ /* dev setup */
strcpy(dev->bt_const.name, "gs_usb");
dev->bt_const.tseg1_min = bt_const->tseg1_min;
dev->bt_const.tseg1_max = bt_const->tseg1_max;
@@ -852,7 +852,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
}
- /* can settup */
+ /* can setup */
dev->can.state = CAN_STATE_STOPPED;
dev->can.clock.freq = bt_const->fclk_can;
dev->can.bittiming_const = &dev->bt_const;
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 21faa2ec4632..5857b37dcd96 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -28,7 +28,7 @@
#define MCBA_CTX_FREE MCBA_MAX_TX_URBS
/* RX buffer must be bigger than msg size since at the
- * beggining USB messages are stacked.
+ * beginning USB messages are stacked.
*/
#define MCBA_USB_RX_BUFF_SIZE 64
#define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg))
@@ -793,7 +793,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
{
struct net_device *netdev;
struct mcba_priv *priv;
- int err = -ENOMEM;
+ int err;
struct usb_device *usbdev = interface_to_usbdev(intf);
netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 66d0198e7834..63bd2ed96697 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -34,6 +34,23 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
#define PCAN_USB_CMD_LEN (PCAN_USB_CMD_ARGS + \
PCAN_USB_CMD_ARGS_LEN)
+/* PCAN-USB commands */
+#define PCAN_USB_CMD_BITRATE 1
+#define PCAN_USB_CMD_SET_BUS 3
+#define PCAN_USB_CMD_DEVID 4
+#define PCAN_USB_CMD_SN 6
+#define PCAN_USB_CMD_REGISTER 9
+#define PCAN_USB_CMD_EXT_VCC 10
+#define PCAN_USB_CMD_ERR_FR 11
+
+/* PCAN_USB_CMD_SET_BUS number arg */
+#define PCAN_USB_BUS_XCVER 2
+#define PCAN_USB_BUS_SILENT_MODE 3
+
+/* PCAN_USB_CMD_xxx functions */
+#define PCAN_USB_GET 1
+#define PCAN_USB_SET 2
+
/* PCAN-USB command timeout (ms.) */
#define PCAN_USB_COMMAND_TIMEOUT 1000
@@ -66,6 +83,10 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
#define PCAN_USB_ERROR_QOVR 0x40
#define PCAN_USB_ERROR_TXQFULL 0x80
+#define PCAN_USB_ERROR_BUS (PCAN_USB_ERROR_BUS_LIGHT | \
+ PCAN_USB_ERROR_BUS_HEAVY | \
+ PCAN_USB_ERROR_BUS_OFF)
+
/* SJA1000 modes */
#define SJA1000_MODE_NORMAL 0x00
#define SJA1000_MODE_INIT 0x01
@@ -85,11 +106,25 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
#define PCAN_USB_REC_TS 4
#define PCAN_USB_REC_BUSEVT 5
+/* CAN bus events notifications selection mask */
+#define PCAN_USB_ERR_RXERR 0x02 /* ask for rxerr counter */
+#define PCAN_USB_ERR_TXERR 0x04 /* ask for txerr counter */
+
+/* This mask generates an usb packet each time the state of the bus changes.
+ * In other words, its interest is to know which side among rx and tx is
+ * responsible of the change of the bus state.
+ */
+#define PCAN_USB_BERR_MASK (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR)
+
+/* identify bus event packets with rx/tx error counters */
+#define PCAN_USB_ERR_CNT 0x80
+
/* private to PCAN-USB adapter */
struct pcan_usb {
struct peak_usb_device dev;
struct peak_time_ref time_ref;
struct timer_list restart_timer;
+ struct can_berr_counter bec;
};
/* incoming message context for decoding */
@@ -172,7 +207,8 @@ static int pcan_usb_set_sja1000(struct peak_usb_device *dev, u8 mode)
[1] = mode,
};
- return pcan_usb_send_cmd(dev, 9, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_REGISTER, PCAN_USB_SET,
+ args);
}
static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff)
@@ -181,7 +217,8 @@ static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff)
[0] = !!onoff,
};
- return pcan_usb_send_cmd(dev, 3, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS, PCAN_USB_BUS_XCVER,
+ args);
}
static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff)
@@ -190,7 +227,18 @@ static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff)
[0] = !!onoff,
};
- return pcan_usb_send_cmd(dev, 3, 3, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS,
+ PCAN_USB_BUS_SILENT_MODE, args);
+}
+
+/* send the cmd to be notified from bus errors */
+static int pcan_usb_set_err_frame(struct peak_usb_device *dev, u8 err_mask)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN] = {
+ [0] = err_mask,
+ };
+
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_ERR_FR, PCAN_USB_SET, args);
}
static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff)
@@ -199,7 +247,7 @@ static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff)
[0] = !!onoff,
};
- return pcan_usb_send_cmd(dev, 10, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_EXT_VCC, PCAN_USB_SET, args);
}
/*
@@ -223,7 +271,7 @@ static int pcan_usb_set_bittiming(struct peak_usb_device *dev,
args[0] = btr1;
args[1] = btr0;
- return pcan_usb_send_cmd(dev, 1, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_BITRATE, PCAN_USB_SET, args);
}
/*
@@ -307,7 +355,7 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
- err = pcan_usb_wait_rsp(dev, 6, 1, args);
+ err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_SN, PCAN_USB_GET, args);
if (err) {
netdev_err(dev->netdev, "getting serial failure: %d\n", err);
} else if (serial_number) {
@@ -328,7 +376,7 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
- err = pcan_usb_wait_rsp(dev, 4, 1, args);
+ err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args);
if (err)
netdev_err(dev->netdev, "getting device id failure: %d\n", err);
else if (device_id)
@@ -426,7 +474,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
new_state = CAN_STATE_BUS_OFF;
break;
}
- if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) {
+ if (n & ~PCAN_USB_ERROR_BUS) {
/*
* trick to bypass next comparison and process other
* errors
@@ -450,7 +498,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
new_state = CAN_STATE_ERROR_WARNING;
break;
}
- if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) {
+ if (n & ~PCAN_USB_ERROR_BUS) {
/*
* trick to bypass next comparison and process other
* errors
@@ -489,29 +537,50 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
case CAN_STATE_ERROR_PASSIVE:
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE |
- CAN_ERR_CRTL_RX_PASSIVE;
+ cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ cf->data[6] = mc->pdev->bec.txerr;
+ cf->data[7] = mc->pdev->bec.rxerr;
+
mc->pdev->dev.can.can_stats.error_passive++;
break;
case CAN_STATE_ERROR_WARNING:
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_TX_WARNING |
- CAN_ERR_CRTL_RX_WARNING;
+ cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = mc->pdev->bec.txerr;
+ cf->data[7] = mc->pdev->bec.rxerr;
+
mc->pdev->dev.can.can_stats.error_warning++;
break;
case CAN_STATE_ERROR_ACTIVE:
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+
+ /* sync local copies of rxerr/txerr counters */
+ mc->pdev->bec.txerr = 0;
+ mc->pdev->bec.rxerr = 0;
break;
default:
/* CAN_STATE_MAX (trick to handle other errors) */
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- mc->netdev->stats.rx_over_errors++;
- mc->netdev->stats.rx_errors++;
+ if (n & PCAN_USB_ERROR_TXQFULL)
+ netdev_dbg(mc->netdev, "device Tx queue full)\n");
+
+ if (n & PCAN_USB_ERROR_RXQOVR) {
+ netdev_dbg(mc->netdev, "data overrun interrupt\n");
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ mc->netdev->stats.rx_over_errors++;
+ mc->netdev->stats.rx_errors++;
+ }
+
+ cf->data[6] = mc->pdev->bec.txerr;
+ cf->data[7] = mc->pdev->bec.rxerr;
new_state = mc->pdev->dev.can.state;
break;
@@ -533,6 +602,30 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
return 0;
}
+/* decode bus event usb packet: first byte contains rxerr while 2nd one contains
+ * txerr.
+ */
+static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
+{
+ struct pcan_usb *pdev = mc->pdev;
+
+ /* acccording to the content of the packet */
+ switch (ir) {
+ case PCAN_USB_ERR_CNT:
+
+ /* save rx/tx error counters from in the device context */
+ pdev->bec.rxerr = mc->ptr[0];
+ pdev->bec.txerr = mc->ptr[1];
+ break;
+
+ default:
+ /* reserved */
+ break;
+ }
+
+ return 0;
+}
+
/*
* decode non-data usb message
*/
@@ -587,9 +680,10 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
break;
case PCAN_USB_REC_BUSEVT:
- /* error frame/bus event */
- if (n & PCAN_USB_ERROR_TXQFULL)
- netdev_dbg(mc->netdev, "device Tx queue full)\n");
+ /* bus event notifications (get rxerr/txerr) */
+ err = pcan_usb_handle_bus_evt(mc, n);
+ if (err)
+ return err;
break;
default:
netdev_err(mc->netdev, "unexpected function %u\n", f);
@@ -773,20 +867,44 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
return 0;
}
+/* socket callback used to copy berr counters values received through USB */
+static int pcan_usb_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
+
+ *bec = pdev->bec;
+
+ /* must return 0 */
+ return 0;
+}
+
/*
* start interface
*/
static int pcan_usb_start(struct peak_usb_device *dev)
{
struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
+ int err;
/* number of bits used in timestamps read from adapter struct */
peak_usb_init_time_ref(&pdev->time_ref, &pcan_usb);
+ pdev->bec.rxerr = 0;
+ pdev->bec.txerr = 0;
+
+ /* be notified on error counter changes (if requested by user) */
+ if (dev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ err = pcan_usb_set_err_frame(dev, PCAN_USB_BERR_MASK);
+ if (err)
+ netdev_warn(dev->netdev,
+ "Asking for BERR reporting error %u\n",
+ err);
+ }
+
/* if revision greater than 3, can put silent mode on/off */
if (dev->device_rev > 3) {
- int err;
-
err = pcan_usb_set_silent(dev,
dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY);
if (err)
@@ -873,7 +991,8 @@ const struct peak_usb_adapter pcan_usb = {
.name = "PCAN-USB",
.device_id = PCAN_USB_PRODUCT_ID,
.ctrl_count = 1,
- .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+ .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING,
.clock = {
.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
},
@@ -906,4 +1025,5 @@ const struct peak_usb_adapter pcan_usb = {
.dev_encode_msg = pcan_usb_encode_msg,
.dev_start = pcan_usb_start,
.dev_restart_async = pcan_usb_restart_async,
+ .do_get_berr_counter = pcan_usb_get_berr_counter,
};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index d91df34e7fa8..c2764799f9ef 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -130,14 +130,55 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
/* protect from getting time before setting now */
if (ktime_to_ns(time_ref->tv_host)) {
u64 delta_us;
+ s64 delta_ts = 0;
+
+ /* General case: dev_ts_1 < dev_ts_2 < ts, with:
+ *
+ * - dev_ts_1 = previous sync timestamp
+ * - dev_ts_2 = last sync timestamp
+ * - ts = event timestamp
+ * - ts_period = known sync period (theoretical)
+ * ~ dev_ts2 - dev_ts1
+ * *but*:
+ *
+ * - time counters wrap (see adapter->ts_used_bits)
+ * - sometimes, dev_ts_1 < ts < dev_ts2
+ *
+ * "normal" case (sync time counters increase):
+ * must take into account case when ts wraps (tsw)
+ *
+ * < ts_period > < >
+ * | | |
+ * ---+--------+----+-------0-+--+-->
+ * ts_dev_1 | ts_dev_2 |
+ * ts tsw
+ */
+ if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
+ /* case when event time (tsw) wraps */
+ if (ts < time_ref->ts_dev_1)
+ delta_ts = 1 << time_ref->adapter->ts_used_bits;
+
+ /* Otherwise, sync time counter (ts_dev_2) has wrapped:
+ * handle case when event time (tsn) hasn't.
+ *
+ * < ts_period > < >
+ * | | |
+ * ---+--------+--0-+---------+--+-->
+ * ts_dev_1 | ts_dev_2 |
+ * tsn ts
+ */
+ } else if (time_ref->ts_dev_1 < ts) {
+ delta_ts = -(1 << time_ref->adapter->ts_used_bits);
+ }
- delta_us = ts - time_ref->ts_dev_2;
- if (ts < time_ref->ts_dev_2)
- delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1;
+ /* add delay between last sync and event timestamps */
+ delta_ts += (signed int)(ts - time_ref->ts_dev_2);
- delta_us += time_ref->ts_total;
+ /* add time from beginning to last sync */
+ delta_ts += time_ref->ts_total;
- delta_us *= time_ref->adapter->us_per_ts_scale;
+ /* convert ticks number into microseconds */
+ delta_us = delta_ts * time_ref->adapter->us_per_ts_scale;
delta_us >>= time_ref->adapter->us_per_ts_shift;
*time = ktime_add_us(time_ref->tv_host_0, delta_us);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 47cc1ff5b88e..d29d20525588 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -35,7 +35,7 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter");
#define PCAN_UFD_RX_BUFFER_SIZE 2048
#define PCAN_UFD_TX_BUFFER_SIZE 512
-/* read some versions info from the hw devcie */
+/* read some versions info from the hw device */
struct __packed pcan_ufd_fw_info {
__le16 size_of; /* sizeof this */
__le16 type; /* type of this structure */
@@ -468,12 +468,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct canfd_frame *cfd;
struct sk_buff *skb;
const u16 rx_msg_flags = le16_to_cpu(rm->flags);
+ if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev))
+ return -ENOMEM;
+
+ dev = usb_if->dev[pucan_msg_get_channel(rm)];
+ netdev = dev->netdev;
+
if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
/* CANFD frame case */
skb = alloc_canfd_skb(netdev, &cfd);
@@ -519,15 +525,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
- struct pcan_usb_fd_device *pdev =
- container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_usb_fd_device *pdev;
enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
enum can_state rx_state, tx_state;
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct can_frame *cf;
struct sk_buff *skb;
+ if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev))
+ return -ENOMEM;
+
+ dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
+ pdev = container_of(dev, struct pcan_usb_fd_device, dev);
+ netdev = dev->netdev;
+
/* nothing should be sent while in BUS_OFF state */
if (dev->can.state == CAN_STATE_BUS_OFF)
return 0;
@@ -579,9 +591,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
- struct pcan_usb_fd_device *pdev =
- container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_usb_fd_device *pdev;
+ struct peak_usb_device *dev;
+
+ if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev))
+ return -EINVAL;
+
+ dev = usb_if->dev[pucan_ermsg_get_channel(er)];
+ pdev = container_of(dev, struct pcan_usb_fd_device, dev);
/* keep a trace of tx and rx error counters for later use */
pdev->bec.txerr = er->tx_err_cnt;
@@ -595,11 +612,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
struct pucan_msg *rx_msg)
{
struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
- struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
- struct net_device *netdev = dev->netdev;
+ struct peak_usb_device *dev;
+ struct net_device *netdev;
struct can_frame *cf;
struct sk_buff *skb;
+ if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev))
+ return -EINVAL;
+
+ dev = usb_if->dev[pufd_omsg_get_channel(ov)];
+ netdev = dev->netdev;
+
/* allocate an skb to store the error frame */
skb = alloc_can_err_skb(netdev, &cf);
if (!skb)
@@ -716,6 +739,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
u16 tx_msg_size, tx_msg_flags;
u8 can_dlc;
+ if (cfd->len > CANFD_MAX_DLEN)
+ return -EINVAL;
+
tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
tx_msg->size = cpu_to_le16(tx_msg_size);
tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
@@ -796,7 +822,7 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
return err;
}
-/* socket callback used to copy berr counters values receieved through USB */
+/* socket callback used to copy berr counters values received through USB */
static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev,
struct can_berr_counter *bec)
{
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 1689ab387612..c7564773fb2b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -186,7 +186,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...)
len = pc - pm->rec_ptr;
if (len > 0) {
- *pm->u.rec_cnt = cpu_to_le32(le32_to_cpu(*pm->u.rec_cnt) + 1);
+ le32_add_cpu(pm->u.rec_cnt, 1);
*pm->rec_ptr = id;
pm->rec_ptr = pc;
@@ -973,7 +973,7 @@ int pcan_usb_pro_probe(struct usb_interface *intf)
struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc;
/*
- * below is the list of valid ep addreses. Any other ep address
+ * below is the list of valid ep addresses. Any other ep address
* is considered as not-CAN interface address => no dev created
*/
switch (ep->bEndpointAddress) {
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 81e942f713e6..dc5290b36598 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -1445,7 +1445,7 @@ static int ucan_probe(struct usb_interface *intf,
/* request the device information and store it in ctl_msg_buffer
*
- * note: ucan_ctrl_command_* wrappers connot be used yet
+ * note: ucan_ctrl_command_* wrappers cannot be used yet
* because `up` is initialised in Stage 3
*/
ret = usb_control_msg(udev,
@@ -1494,7 +1494,7 @@ static int ucan_probe(struct usb_interface *intf,
up = netdev_priv(netdev);
- /* initialze data */
+ /* initialize data */
up->udev = udev;
up->intf = intf;
up->netdev = netdev;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 8fa224b28218..62749c67c959 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -88,7 +88,7 @@ enum usb_8dev_cmd {
/* status */
#define USB_8DEV_STATUSMSG_OK 0x00 /* Normal condition. */
-#define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occured when sending */
+#define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occurred when sending */
#define USB_8DEV_STATUSMSG_BUSLIGHT 0x02 /* Error counter has reached 96 */
#define USB_8DEV_STATUSMSG_BUSHEAVY 0x03 /* Error count. has reached 128 */
#define USB_8DEV_STATUSMSG_BUSOFF 0x04 /* Device is in BUSOFF */
@@ -165,7 +165,7 @@ struct __packed usb_8dev_rx_msg {
/* command frame */
struct __packed usb_8dev_cmd_msg {
u8 begin;
- u8 channel; /* unkown - always 0 */
+ u8 channel; /* unknown - always 0 */
u8 command; /* command to execute */
u8 opt1; /* optional parameter / return value */
u8 opt2; /* optional parameter 2 */
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index c1dbab8c896d..48d746e18f30 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -259,7 +259,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 128,
.sjw_max = 128,
- .brp_min = 1,
+ .brp_min = 2,
.brp_max = 256,
.brp_inc = 1,
};
@@ -272,7 +272,7 @@ static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
- .brp_min = 1,
+ .brp_min = 2,
.brp_max = 256,
.brp_inc = 1,
};
@@ -1308,7 +1308,7 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
/**
* xcan_interrupt - CAN Isr
* @irq: irq number
- * @dev_id: device id poniter
+ * @dev_id: device id pointer
*
* This is the xilinx CAN Isr. It checks for the type of interrupt
* and invokes the corresponding ISR.
@@ -1369,9 +1369,13 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
static void xcan_chip_stop(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
+ int ret;
/* Disable interrupts and leave the can in configuration mode */
- set_reset_mode(ndev);
+ ret = set_reset_mode(ndev);
+ if (ret < 0)
+ netdev_dbg(ndev, "set_reset_mode() Failed\n");
+
priv->can.state = CAN_STATE_STOPPED;
}
@@ -1391,7 +1395,7 @@ static int xcan_open(struct net_device *ndev)
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
__func__, ret);
- return ret;
+ goto err;
}
ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
@@ -1475,6 +1479,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
__func__, ret);
+ pm_runtime_put(priv->dev);
return ret;
}
@@ -1667,7 +1672,7 @@ static int xcan_probe(struct platform_device *pdev)
void __iomem *addr;
int ret;
int rx_max, tx_max;
- int hw_tx_max, hw_rx_max;
+ u32 hw_tx_max = 0, hw_rx_max = 0;
const char *hw_tx_max_property;
/* Get the virtual base address for the device */
@@ -1720,7 +1725,7 @@ static int xcan_probe(struct platform_device *pdev)
*/
if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
(devtype->flags & XCAN_FLAG_TXFEMP))
- tx_max = min(hw_tx_max, 2);
+ tx_max = min(hw_tx_max, 2U);
else
tx_max = 1;
@@ -1789,7 +1794,7 @@ static int xcan_probe(struct platform_device *pdev)
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
__func__, ret);
- goto err_pmdisable;
+ goto err_disableclks;
}
if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
@@ -1824,7 +1829,6 @@ static int xcan_probe(struct platform_device *pdev)
err_disableclks:
pm_runtime_put(priv->dev);
-err_pmdisable:
pm_runtime_disable(&pdev->dev);
err_free:
free_candev(ndev);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 468b3c4273c5..2451f61a38e4 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -33,12 +33,12 @@ config NET_DSA_LANTIQ_GSWIP
the xrx200 / VR9 SoC.
config NET_DSA_MT7530
- tristate "Mediatek MT7530 Ethernet switch support"
+ tristate "MediaTek MT753x and MT7621 Ethernet switch support"
depends on NET_DSA
select NET_DSA_TAG_MTK
help
- This enables support for the Mediatek MT7530 Ethernet switch
- chip.
+ This enables support for the MediaTek MT7530, MT7531, and MT7621
+ Ethernet switch chips.
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index e731db900ee0..288b5a5c3e0d 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -17,8 +17,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gpio.h>
@@ -767,8 +765,11 @@ static int b53_switch_reset(struct b53_device *dev)
usleep_range(1000, 2000);
} while (timeout-- > 0);
- if (timeout == 0)
+ if (timeout == 0) {
+ dev_err(dev->dev,
+ "Timeout waiting for SW_RST to clear!\n");
return -ETIMEDOUT;
+ }
}
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
@@ -976,6 +977,54 @@ int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
}
EXPORT_SYMBOL(b53_get_sset_count);
+enum b53_devlink_resource_id {
+ B53_DEVLINK_PARAM_ID_VLAN_TABLE,
+};
+
+static u64 b53_devlink_vlan_table_get(void *priv)
+{
+ struct b53_device *dev = priv;
+ struct b53_vlan *vl;
+ unsigned int i;
+ u64 count = 0;
+
+ for (i = 0; i < dev->num_vlans; i++) {
+ vl = &dev->vlans[i];
+ if (vl->members)
+ count++;
+ }
+
+ return count;
+}
+
+int b53_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct b53_device *dev = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params, dev->num_vlans,
+ dev->num_vlans,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans,
+ B53_DEVLINK_PARAM_ID_VLAN_TABLE,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ B53_DEVLINK_PARAM_ID_VLAN_TABLE,
+ b53_devlink_vlan_table_get, dev);
+
+ return 0;
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+EXPORT_SYMBOL(b53_setup_devlink_resources);
+
static int b53_setup(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
@@ -991,8 +1040,10 @@ static int b53_setup(struct dsa_switch *ds)
b53_reset_mib(dev);
ret = b53_apply_config(dev);
- if (ret)
+ if (ret) {
dev_err(ds->dev, "failed to apply configuration\n");
+ return ret;
+ }
/* Configure IMP/CPU port, disable all other ports. Enabled
* ports will be configured with .port_enable
@@ -1011,7 +1062,12 @@ static int b53_setup(struct dsa_switch *ds)
*/
ds->vlan_filtering_is_global = true;
- return ret;
+ return b53_setup_devlink_resources(ds);
+}
+
+static void b53_teardown(struct dsa_switch *ds)
+{
+ dsa_devlink_resources_unregister(ds);
}
static void b53_force_link(struct b53_device *dev, int port, int link)
@@ -1318,26 +1374,13 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_phylink_mac_link_up);
-int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct switchdev_trans *trans)
{
struct b53_device *dev = ds->priv;
- u16 pvid, new_pvid;
- b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
- if (!vlan_filtering) {
- /* Filtering is currently enabled, use the default PVID since
- * the bridge does not expect tagging anymore
- */
- dev->ports[port].pvid = pvid;
- new_pvid = b53_default_pvid(dev);
- } else {
- /* Filtering is currently disabled, restore the previous PVID */
- new_pvid = dev->ports[port].pvid;
- }
-
- if (pvid != new_pvid)
- b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
- new_pvid);
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
@@ -2140,6 +2183,7 @@ static int b53_get_max_mtu(struct dsa_switch *ds, int port)
static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
+ .teardown = b53_teardown,
.get_strings = b53_get_strings,
.get_ethtool_stats = b53_get_ethtool_stats,
.get_sset_count = b53_get_sset_count,
@@ -2562,6 +2606,9 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
+ ds->configure_vlan_while_not_filtering = true;
+ ds->untag_bridge_pvid = true;
+ dev->vlan_enabled = ds->configure_vlan_while_not_filtering;
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
@@ -2620,8 +2667,9 @@ int b53_switch_detect(struct b53_device *dev)
dev->chip_id = id32;
break;
default:
- pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
- id8, id32);
+ dev_err(dev->dev,
+ "unsupported switch detected (BCM53%02x/BCM%x)\n",
+ id8, id32);
return -ENODEV;
}
}
@@ -2651,7 +2699,8 @@ int b53_switch_register(struct b53_device *dev)
if (ret)
return ret;
- pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
+ dev_info(dev->dev, "found switch: %s, rev %i\n",
+ dev->name, dev->core_rev);
return dsa_register_switch(dev->ds);
}
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index e942c60e4365..7c67409bb186 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -91,7 +91,6 @@ enum {
struct b53_port {
u16 vlan_ctl_mask;
struct ethtool_eee eee;
- u16 pvid;
};
struct b53_vlan {
@@ -328,6 +327,7 @@ void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
int b53_br_egress_floods(struct dsa_switch *ds, int port,
bool unicast, bool multicast);
+int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
void b53_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
@@ -347,7 +347,8 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause);
-int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct switchdev_trans *trans);
int b53_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
void b53_vlan_add(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 5ebff986a1ac..1e9a0adda2d6 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -14,6 +14,7 @@
#include <linux/phy_fixed.h>
#include <linux/phylink.h>
#include <linux/mii.h>
+#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
@@ -31,6 +32,49 @@
#include "b53/b53_priv.h"
#include "b53/b53_regs.h"
+/* Return the number of active ports, not counting the IMP (CPU) port */
+static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int port, count = 0;
+
+ for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
+ if (dsa_is_cpu_port(ds, port))
+ continue;
+ if (priv->port_sts[port].enabled)
+ count++;
+ }
+
+ return count;
+}
+
+static void bcm_sf2_recalc_clock(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned long new_rate;
+ unsigned int ports_active;
+ /* Frequenty in Mhz */
+ static const unsigned long rate_table[] = {
+ 59220000,
+ 60820000,
+ 62500000,
+ 62500000,
+ };
+
+ ports_active = bcm_sf2_num_active_ports(ds);
+ if (ports_active == 0 || !priv->clk_mdiv)
+ return;
+
+ /* If we overflow our table, just use the recommended operational
+ * frequency
+ */
+ if (ports_active > ARRAY_SIZE(rate_table))
+ new_rate = 90000000;
+ else
+ new_rate = rate_table[ports_active - 1];
+ clk_set_rate(priv->clk_mdiv, new_rate);
+}
+
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -82,6 +126,8 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
reg &= ~(RX_DIS | TX_DIS);
core_writel(priv, reg, CORE_G_PCTL_PORT(port));
}
+
+ priv->port_sts[port].enabled = true;
}
static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
@@ -167,6 +213,10 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
if (!dsa_is_user_port(ds, port))
return 0;
+ priv->port_sts[port].enabled = true;
+
+ bcm_sf2_recalc_clock(ds);
+
/* Clear the memory power down */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg &= ~P_TXQ_PSM_VDD(port);
@@ -260,6 +310,10 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port)
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg |= P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+
+ priv->port_sts[port].enabled = false;
+
+ bcm_sf2_recalc_clock(ds);
}
@@ -403,6 +457,7 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
{
struct device_node *port;
unsigned int port_num;
+ struct property *prop;
phy_interface_t mode;
int err;
@@ -429,15 +484,27 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
priv->brcm_tag_mask |= 1 << port_num;
+
+ /* Ensure that port 5 is not picked up as a DSA CPU port
+ * flavour but a regular port instead. We should be using
+ * devlink to be able to set the port flavour.
+ */
+ if (port_num == 5 && priv->type == BCM7278_DEVICE_ID) {
+ prop = of_find_property(port, "ethernet", NULL);
+ if (prop)
+ of_remove_property(port, prop);
+ }
}
}
static int bcm_sf2_mdio_register(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct device_node *dn;
+ struct device_node *dn, *child;
+ struct phy_device *phydev;
+ struct property *prop;
static int index;
- int err;
+ int err, reg;
/* Find our integrated MDIO bus node */
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
@@ -471,7 +538,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
* driver.
*/
if (of_machine_is_compatible("brcm,bcm7445d0"))
- priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
+ priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0);
else
priv->indir_phy_mask = 0;
@@ -480,6 +547,31 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
priv->slave_mii_bus->parent = ds->dev->parent;
priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
+ /* We need to make sure that of_phy_connect() will not work by
+ * removing the 'phandle' and 'linux,phandle' properties and
+ * unregister the existing PHY device that was already registered.
+ */
+ for_each_available_child_of_node(dn, child) {
+ if (of_property_read_u32(child, "reg", &reg) ||
+ reg >= PHY_MAX_ADDR)
+ continue;
+
+ if (!(priv->indir_phy_mask & BIT(reg)))
+ continue;
+
+ prop = of_find_property(child, "phandle", NULL);
+ if (prop)
+ of_remove_property(child, prop);
+
+ prop = of_find_property(child, "linux,phandle", NULL);
+ if (prop)
+ of_remove_property(child, prop);
+
+ phydev = of_phy_find_device(child);
+ if (phydev)
+ phy_device_remove(phydev);
+ }
+
err = mdiobus_register(priv->slave_mii_bus);
if (err && dn)
of_node_put(dn);
@@ -750,6 +842,9 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
bcm_sf2_port_disable(ds, port);
}
+ if (!priv->wol_ports_mask)
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -758,6 +853,9 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret;
+ if (!priv->wol_ports_mask)
+ clk_prepare_enable(priv->clk);
+
ret = bcm_sf2_sw_rst(priv);
if (ret) {
pr_err("%s: failed to software reset switch\n", __func__);
@@ -849,7 +947,12 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
b53_configure_vlan(ds);
bcm_sf2_enable_acb(ds);
- return 0;
+ return b53_setup_devlink_resources(ds);
+}
+
+static void bcm_sf2_sw_teardown(struct dsa_switch *ds)
+{
+ dsa_devlink_resources_unregister(ds);
}
/* The SWITCH_CORE register space is managed by b53 but operates on a page +
@@ -986,6 +1089,7 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
static const struct dsa_switch_ops bcm_sf2_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = bcm_sf2_sw_setup,
+ .teardown = bcm_sf2_sw_teardown,
.get_strings = bcm_sf2_sw_get_strings,
.get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
.get_sset_count = bcm_sf2_sw_get_sset_count,
@@ -1189,10 +1293,24 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
base++;
}
+ priv->clk = devm_clk_get_optional(&pdev->dev, "sw_switch");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ clk_prepare_enable(priv->clk);
+
+ priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
+ if (IS_ERR(priv->clk_mdiv)) {
+ ret = PTR_ERR(priv->clk_mdiv);
+ goto out_clk;
+ }
+
+ clk_prepare_enable(priv->clk_mdiv);
+
ret = bcm_sf2_sw_rst(priv);
if (ret) {
pr_err("unable to software reset switch: %d\n", ret);
- return ret;
+ goto out_clk_mdiv;
}
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
@@ -1200,7 +1318,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
ret = bcm_sf2_mdio_register(ds);
if (ret) {
pr_err("failed to register MDIO bus\n");
- return ret;
+ goto out_clk_mdiv;
}
bcm_sf2_gphy_enable_set(priv->dev->ds, false);
@@ -1267,6 +1385,10 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
out_mdio:
bcm_sf2_mdio_unregister(priv);
+out_clk_mdiv:
+ clk_disable_unprepare(priv->clk_mdiv);
+out_clk:
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -1280,6 +1402,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
dsa_unregister_switch(priv->dev->ds);
bcm_sf2_cfp_exit(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
+ clk_disable_unprepare(priv->clk_mdiv);
+ clk_disable_unprepare(priv->clk);
if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev))
reset_control_assert(priv->rcdev);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index de386dd96d66..1ed901a68536 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -45,6 +45,7 @@ struct bcm_sf2_hw_params {
struct bcm_sf2_port_status {
unsigned int link;
+ bool enabled;
};
struct bcm_sf2_cfp_priv {
@@ -93,6 +94,9 @@ struct bcm_sf2_priv {
/* Mask of ports enabled for Wake-on-LAN */
u32 wol_ports_mask;
+ struct clk *clk;
+ struct clk *clk_mdiv;
+
/* MoCA port location */
int moca_port;
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index eb600b3dbf26..e38906ae8f23 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -28,6 +28,53 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
static struct phy_device *phydevs[PHY_MAX_ADDR];
+enum dsa_loop_devlink_resource_id {
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+};
+
+static u64 dsa_loop_devlink_vtu_get(void *priv)
+{
+ struct dsa_loop_priv *ps = priv;
+ unsigned int i, count = 0;
+ struct dsa_loop_vlan *vl;
+
+ for (i = 0; i < ARRAY_SIZE(ps->vlans); i++) {
+ vl = &ps->vlans[i];
+ if (vl->members)
+ count++;
+ }
+
+ return count;
+}
+
+static int dsa_loop_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct dsa_loop_priv *ps = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params, ARRAY_SIZE(ps->vlans),
+ ARRAY_SIZE(ps->vlans),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "VTU", ARRAY_SIZE(ps->vlans),
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+ dsa_loop_devlink_vtu_get, ps);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
@@ -48,7 +95,12 @@ static int dsa_loop_setup(struct dsa_switch *ds)
dev_dbg(ds->dev, "%s\n", __func__);
- return 0;
+ return dsa_loop_setup_devlink_resources(ds);
+}
+
+static void dsa_loop_teardown(struct dsa_switch *ds)
+{
+ dsa_devlink_resources_unregister(ds);
}
static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port, int sset)
@@ -138,7 +190,8 @@ static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
}
static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
{
dev_dbg(ds->dev, "%s: port: %d, vlan_filtering: %d\n",
__func__, port, vlan_filtering);
@@ -243,6 +296,7 @@ static int dsa_loop_port_max_mtu(struct dsa_switch *ds, int port)
static const struct dsa_switch_ops dsa_loop_driver = {
.get_tag_protocol = dsa_loop_get_protocol,
.setup = dsa_loop_setup,
+ .teardown = dsa_loop_teardown,
.get_strings = dsa_loop_get_strings,
.get_ethtool_stats = dsa_loop_get_ethtool_stats,
.get_sset_count = dsa_loop_get_sset_count,
@@ -290,6 +344,7 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
ds->dev = &mdiodev->dev;
ds->ops = &dsa_loop_driver;
ds->priv = ps;
+ ds->configure_vlan_while_not_filtering = true;
ps->bus = mdiodev->bus;
dev_set_drvdata(&mdiodev->dev, ds);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 521ebc072903..74db81dafee3 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -736,14 +736,23 @@ static int gswip_pce_load_microcode(struct gswip_priv *priv)
}
static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
{
struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
/* Do not allow changing the VLAN filtering options while in bridge */
- if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering && bridge)
- return -EIO;
+ if (switchdev_trans_ph_prepare(trans)) {
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+
+ if (!bridge)
+ return 0;
+
+ if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering)
+ return -EIO;
+
+ return 0;
+ }
if (vlan_filtering) {
/* Use port based VLAN tag */
@@ -781,8 +790,15 @@ static int gswip_setup(struct dsa_switch *ds)
/* disable port fetch/store dma on all ports */
for (i = 0; i < priv->hw_info->max_ports; i++) {
+ struct switchdev_trans trans;
+
+ /* Skip the prepare phase, this shouldn't return an error
+ * during setup.
+ */
+ trans.ph_prepare = false;
+
gswip_port_disable(ds, i);
- gswip_port_vlan_filtering(ds, i, false);
+ gswip_port_vlan_filtering(ds, i, false, &trans);
}
/* enable Switch */
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index f5779e152377..1e101ab56cea 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -782,10 +782,14 @@ static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port)
}
static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag)
+ bool flag,
+ struct switchdev_trans *trans)
{
struct ksz_device *dev = ds->priv;
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
return 0;
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 2f5506ac7d19..abfd3802bb51 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -493,10 +493,14 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
}
static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag)
+ bool flag,
+ struct switchdev_trans *trans)
{
struct ksz_device *dev = ds->priv;
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
if (flag) {
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, true);
@@ -1235,6 +1239,9 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
data8 |= PORT_RGMII_ID_EG_ENABLE;
+ /* On KSZ9893, disable RGMII in-band status support */
+ if (dev->features & IS_9893)
+ data8 &= ~PORT_MII_MAC_MODE;
p->phydev.speed = SPEED_1000;
break;
}
@@ -1265,6 +1272,8 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
for (i = 0; i < dev->port_cnt; i++) {
if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
phy_interface_t interface;
+ const char *prev_msg;
+ const char *prev_mode;
dev->cpu_port = i;
dev->host_mask = (1 << dev->cpu_port);
@@ -1287,11 +1296,19 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
p->interface = interface;
}
}
- if (interface && interface != p->interface)
- dev_info(dev->dev,
- "use %s instead of %s\n",
- phy_modes(p->interface),
- phy_modes(interface));
+ if (interface && interface != p->interface) {
+ prev_msg = " instead of ";
+ prev_mode = phy_modes(interface);
+ } else {
+ prev_msg = "";
+ prev_mode = "";
+ }
+ dev_info(dev->dev,
+ "Port%d: using phy mode %s%s%s\n",
+ i,
+ phy_modes(p->interface),
+ prev_msg,
+ prev_mode);
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
@@ -1435,10 +1452,12 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
/* Default capability is gigabit capable. */
dev->features = GBIT_SUPPORT;
+ dev_dbg(dev->dev, "Switch detect: ID=%08x%02x\n", id32, data8);
id_hi = (u8)(id32 >> 16);
id_lo = (u8)(id32 >> 8);
if ((id_lo & 0xf) == 3) {
/* Chip is from KSZ9893 design. */
+ dev_info(dev->dev, "Found KSZ9893\n");
dev->features |= IS_9893;
/* Chip does not support gigabit. */
@@ -1447,6 +1466,7 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
dev->mib_port_cnt = 3;
dev->phy_port_cnt = 2;
} else {
+ dev_info(dev->dev, "Found KSZ9477 or compatible\n");
/* Chip uses new XMII register definitions. */
dev->features |= NEW_XMII;
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 7951f52d860d..4e053a25d077 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -80,6 +80,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
{ .compatible = "microchip,ksz9477" },
{ .compatible = "microchip,ksz9897" },
{ .compatible = "microchip,ksz9893" },
+ { .compatible = "microchip,ksz9563" },
{ .compatible = "microchip,ksz9567" },
{},
};
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index c796d42730ba..0ef854911f21 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -103,14 +103,8 @@ void ksz_init_mib_timer(struct ksz_device *dev)
INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
- /* Read MIB counters every 30 seconds to avoid overflow. */
- dev->mib_read_interval = msecs_to_jiffies(30000);
-
for (i = 0; i < dev->mib_port_cnt; i++)
dev->dev_ops->port_init_cnt(dev, i);
-
- /* Start the timer 2 seconds later. */
- schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
}
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
@@ -143,7 +137,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
/* Read all MIB counters when the link is going down. */
p->read = true;
- schedule_delayed_work(&dev->mib_read, 0);
+ /* timer started */
+ if (dev->mib_read_interval)
+ schedule_delayed_work(&dev->mib_read, 0);
}
EXPORT_SYMBOL_GPL(ksz_mac_link_down);
@@ -402,8 +398,9 @@ int ksz_switch_register(struct ksz_device *dev,
if (dev->reset_gpio) {
gpiod_set_value_cansleep(dev->reset_gpio, 1);
- mdelay(10);
+ usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
+ usleep_range(100, 1000);
}
mutex_init(&dev->dev_mutex);
@@ -450,6 +447,12 @@ int ksz_switch_register(struct ksz_device *dev,
return ret;
}
+ /* Read MIB counters every 30 seconds to avoid overflow. */
+ dev->mib_read_interval = msecs_to_jiffies(30000);
+
+ /* Start the MIB timer. */
+ schedule_delayed_work(&dev->mib_read, 0);
+
return 0;
}
EXPORT_SYMBOL(ksz_switch_register);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1aaf47a0da2b..de7692b763d8 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -234,6 +234,12 @@ mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val)
}
static u32
+_mt7530_unlocked_read(struct mt7530_dummy_poll *p)
+{
+ return mt7530_mii_read(p->priv, p->reg);
+}
+
+static u32
_mt7530_read(struct mt7530_dummy_poll *p)
{
struct mii_bus *bus = p->priv->bus;
@@ -372,8 +378,9 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
}
+/* Setup TX circuit including relevant PAD and driving */
static int
-mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
+mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
u32 ncpo1, ssc_delta, trgint, i, xtal;
@@ -387,7 +394,7 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
return -EINVAL;
}
- switch (mode) {
+ switch (interface) {
case PHY_INTERFACE_MODE_RGMII:
trgint = 0;
/* PLL frequency: 125MHz */
@@ -409,7 +416,8 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
}
break;
default:
- dev_err(priv->dev, "xMII mode %d not supported\n", mode);
+ dev_err(priv->dev, "xMII interface %d not supported\n",
+ interface);
return -EINVAL;
}
@@ -481,6 +489,108 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
return 0;
}
+static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
+{
+ u32 val;
+
+ val = mt7530_read(priv, MT7531_TOP_SIG_SR);
+
+ return (val & PAD_DUAL_SGMII_EN) != 0;
+}
+
+static int
+mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 top_sig;
+ u32 hwstrap;
+ u32 xtal;
+ u32 val;
+
+ if (mt7531_dual_sgmii_supported(priv))
+ return 0;
+
+ val = mt7530_read(priv, MT7531_CREV);
+ top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
+ hwstrap = mt7530_read(priv, MT7531_HWTRAP);
+ if ((val & CHIP_REV_M) > 0)
+ xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ :
+ HWTRAP_XTAL_FSEL_25MHZ;
+ else
+ xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK;
+
+ /* Step 1 : Disable MT7531 COREPLL */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val &= ~EN_COREPLL;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ /* Step 2: switch to XTAL output */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= SW_CLKSW;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_EN;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ /* Step 3: disable PLLGP and enable program PLLGP */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= SW_PLLGP;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ /* Step 4: program COREPLL output frequency to 500MHz */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_POSDIV_M;
+ val |= 2 << RG_COREPLL_POSDIV_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ usleep_range(25, 35);
+
+ switch (xtal) {
+ case HWTRAP_XTAL_FSEL_25MHZ:
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_M;
+ val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ break;
+ case HWTRAP_XTAL_FSEL_40MHZ:
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_M;
+ val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ break;
+ };
+
+ /* Set feedback divide ratio update signal to high */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val |= RG_COREPLL_SDM_PCW_CHG;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ /* Wait for at least 16 XTAL clocks */
+ usleep_range(10, 20);
+
+ /* Step 5: set feedback divide ratio update signal to low */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_CHG;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ /* Enable 325M clock for SGMII */
+ mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
+
+ /* Enable 250SSC clock for RGMII */
+ mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
+
+ /* Step 6: Enable MT7531 PLL */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val |= RG_COREPLL_EN;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= EN_COREPLL;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+ usleep_range(25, 35);
+
+ return 0;
+}
+
static void
mt7530_mib_reset(struct dsa_switch *ds)
{
@@ -505,6 +615,217 @@ static int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum,
return mdiobus_write_nested(priv->bus, port, regnum, val);
}
+static int
+mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
+ int regnum)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ u32 reg, val;
+ int ret;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | regnum;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad);
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ ret = val & MT7531_MDIO_RW_DATA_MASK;
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u32 data)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ u32 val, reg;
+ int ret;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | regnum;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | data;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ int ret;
+ u32 val;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_REG_ADDR(regnum);
+
+ mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ ret = val & MT7531_MDIO_RW_DATA_MASK;
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
+ u16 data)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ int ret;
+ u32 reg;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
+ !(reg & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_REG_ADDR(regnum) | data;
+
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
+ !(reg & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int devad;
+ int ret;
+
+ if (regnum & MII_ADDR_C45) {
+ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ ret = mt7531_ind_c45_phy_read(priv, port, devad,
+ regnum & MII_REGADDR_C45_MASK);
+ } else {
+ ret = mt7531_ind_c22_phy_read(priv, port, regnum);
+ }
+
+ return ret;
+}
+
+static int
+mt7531_ind_phy_write(struct dsa_switch *ds, int port, int regnum,
+ u16 data)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int devad;
+ int ret;
+
+ if (regnum & MII_ADDR_C45) {
+ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ ret = mt7531_ind_c45_phy_write(priv, port, devad,
+ regnum & MII_REGADDR_C45_MASK,
+ data);
+ } else {
+ ret = mt7531_ind_c22_phy_write(priv, port, regnum, data);
+ }
+
+ return ret;
+}
+
static void
mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
@@ -621,9 +942,18 @@ unlock_exit:
}
static int
-mt7530_cpu_port_enable(struct mt7530_priv *priv,
- int port)
+mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
{
+ struct mt7530_priv *priv = ds->priv;
+ int ret;
+
+ /* Setup max capability of CPU port at first */
+ if (priv->info->cpu_port_config) {
+ ret = priv->info->cpu_port_config(ds, port);
+ if (ret)
+ return ret;
+ }
+
/* Enable Mediatek header mode on the cpu port */
mt7530_write(priv, MT7530_PVC_P(port),
PORT_SPEC_TAG);
@@ -636,7 +966,7 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
/* CPU port gets connected to all user ports of
- * the switch
+ * the switch.
*/
mt7530_write(priv, MT7530_PCR_P(port),
PCR_MATRIX(dsa_user_ports(priv->ds)));
@@ -959,8 +1289,12 @@ mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
static int
mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
{
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
@@ -1130,27 +1464,42 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int mt7530_port_mirror_add(struct dsa_switch *ds, int port,
+static int mt753x_mirror_port_get(unsigned int id, u32 val)
+{
+ return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
+ MIRROR_PORT(val);
+}
+
+static int mt753x_mirror_port_set(unsigned int id, u32 val)
+{
+ return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
+ MIRROR_PORT(val);
+}
+
+static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress)
{
struct mt7530_priv *priv = ds->priv;
+ int monitor_port;
u32 val;
/* Check for existent entry */
if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
return -EEXIST;
- val = mt7530_read(priv, MT7530_MFC);
+ val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
/* MT7530 only supports one monitor port */
- if (val & MIRROR_EN && MIRROR_PORT(val) != mirror->to_local_port)
+ monitor_port = mt753x_mirror_port_get(priv->id, val);
+ if (val & MT753X_MIRROR_EN(priv->id) &&
+ monitor_port != mirror->to_local_port)
return -EEXIST;
- val |= MIRROR_EN;
- val &= ~MIRROR_MASK;
- val |= mirror->to_local_port;
- mt7530_write(priv, MT7530_MFC, val);
+ val |= MT753X_MIRROR_EN(priv->id);
+ val &= ~MT753X_MIRROR_MASK(priv->id);
+ val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port);
+ mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
val = mt7530_read(priv, MT7530_PCR_P(port));
if (ingress) {
@@ -1165,7 +1514,7 @@ static int mt7530_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void mt7530_port_mirror_del(struct dsa_switch *ds, int port,
+static void mt753x_port_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
struct mt7530_priv *priv = ds->priv;
@@ -1182,9 +1531,9 @@ static void mt7530_port_mirror_del(struct dsa_switch *ds, int port,
mt7530_write(priv, MT7530_PCR_P(port), val);
if (!priv->mirror_rx && !priv->mirror_tx) {
- val = mt7530_read(priv, MT7530_MFC);
- val &= ~MIRROR_EN;
- mt7530_write(priv, MT7530_MFC, val);
+ val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
+ val &= ~MT753X_MIRROR_EN(priv->id);
+ mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
}
}
@@ -1290,9 +1639,11 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
- if (dsa_is_cpu_port(ds, i))
- mt7530_cpu_port_enable(priv, i);
- else
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = mt753x_cpu_port_enable(ds, i);
+ if (ret)
+ return ret;
+ } else
mt7530_port_disable(ds, i);
/* Enable consistent egress tag */
@@ -1352,51 +1703,492 @@ mt7530_setup(struct dsa_switch *ds)
return 0;
}
-static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
+static int
+mt7531_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct mt7530_dummy_poll p;
+ u32 val, id;
+ int ret, i;
+
+ /* Reset whole chip through gpio pin or memory-mapped registers for
+ * different type of hardware
+ */
+ if (priv->mcm) {
+ reset_control_assert(priv->rstc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rstc);
+ } else {
+ gpiod_set_value_cansleep(priv->reset, 0);
+ usleep_range(1000, 1100);
+ gpiod_set_value_cansleep(priv->reset, 1);
+ }
+
+ /* Waiting for MT7530 got to stable */
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
+ 20, 1000000);
+ if (ret < 0) {
+ dev_err(priv->dev, "reset timeout\n");
+ return ret;
+ }
+
+ id = mt7530_read(priv, MT7531_CREV);
+ id >>= CHIP_NAME_SHIFT;
+
+ if (id != MT7531_ID) {
+ dev_err(priv->dev, "chip %x can't be supported\n", id);
+ return -ENODEV;
+ }
+
+ /* Reset the switch through internal reset */
+ mt7530_write(priv, MT7530_SYS_CTRL,
+ SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
+ SYS_CTRL_REG_RST);
+
+ if (mt7531_dual_sgmii_supported(priv)) {
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
+
+ /* Let ds->slave_mii_bus be able to access external phy. */
+ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
+ MT7531_EXT_P_MDC_11);
+ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
+ MT7531_EXT_P_MDIO_12);
+ } else {
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
+ }
+ dev_dbg(ds->dev, "P5 support %s interface\n",
+ p5_intf_modes(priv->p5_intf_sel));
+
+ mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
+ MT7531_GPIO0_INTERRUPT);
+
+ /* Let phylink decide the interface later. */
+ priv->p5_interface = PHY_INTERFACE_MODE_NA;
+ priv->p6_interface = PHY_INTERFACE_MODE_NA;
+
+ /* Enable PHY core PLL, since phy_device has not yet been created
+ * provided for phy_[read,write]_mmd_indirect is called, we provide
+ * our own mt7531_ind_mmd_phy_[read,write] to complete this
+ * function.
+ */
+ val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
+ MDIO_MMD_VEND2, CORE_PLL_GROUP4);
+ val |= MT7531_PHY_PLL_BYPASS_MODE;
+ val &= ~MT7531_PHY_PLL_OFF;
+ mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
+ CORE_PLL_GROUP4, val);
+
+ /* BPDU to CPU port */
+ mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
+ BIT(MT7530_CPU_PORT));
+ mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+ MT753X_BPDU_CPU_ONLY);
+
+ /* Enable and reset MIB counters */
+ mt7530_mib_reset(ds);
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
+ PCR_MATRIX_CLR);
+
+ mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
+
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = mt753x_cpu_port_enable(ds, i);
+ if (ret)
+ return ret;
+ } else
+ mt7530_port_disable(ds, i);
+
+ /* Enable consistent egress tag */
+ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
+ ds->configure_vlan_while_not_filtering = true;
+
+ /* Flush the FDB table */
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static bool
+mt7530_phy_mode_supported(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state)
{
struct mt7530_priv *priv = ds->priv;
- u32 mcr_cur, mcr_new;
switch (port) {
- case 0: /* Internal phy */
- case 1:
- case 2:
- case 3:
- case 4:
+ case 0 ... 4: /* Internal phy */
if (state->interface != PHY_INTERFACE_MODE_GMII)
- return;
+ return false;
break;
case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (priv->p5_interface == state->interface)
- break;
if (!phy_interface_mode_is_rgmii(state->interface) &&
state->interface != PHY_INTERFACE_MODE_MII &&
state->interface != PHY_INTERFACE_MODE_GMII)
- return;
+ return false;
+ break;
+ case 6: /* 1st cpu port */
+ if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_TRGMII)
+ return false;
+ break;
+ default:
+ dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
+ port);
+ return false;
+ }
- mt7530_setup_port5(ds, state->interface);
+ return true;
+}
+
+static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
+{
+ return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
+}
+
+static bool
+mt7531_phy_mode_supported(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ switch (port) {
+ case 0 ... 4: /* Internal phy */
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+ return false;
+ break;
+ case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
+ if (mt7531_is_rgmii_port(priv, port))
+ return phy_interface_mode_is_rgmii(state->interface);
+ fallthrough;
+ case 6: /* 1st cpu port supports sgmii/8023z only */
+ if (state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(state->interface))
+ return false;
+ break;
+ default:
+ dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
+ port);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+mt753x_phy_mode_supported(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->phy_mode_supported(ds, port, state);
+}
+
+static int
+mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->pad_setup(ds, state->interface);
+}
+
+static int
+mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* Only need to setup port5. */
+ if (port != 5)
+ return 0;
+
+ mt7530_setup_port5(priv->ds, interface);
+
+ return 0;
+}
+
+static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ u32 val;
+
+ if (!mt7531_is_rgmii_port(priv, port)) {
+ dev_err(priv->dev, "RGMII mode is not available for port %d\n",
+ port);
+ return -EINVAL;
+ }
+
+ val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
+ val |= GP_CLK_EN;
+ val &= ~GP_MODE_MASK;
+ val |= GP_MODE(MT7531_GP_MODE_RGMII);
+ val &= ~CLK_SKEW_IN_MASK;
+ val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG);
+ val &= ~CLK_SKEW_OUT_MASK;
+ val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG);
+ val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY;
+
+ /* Do not adjust rgmii delay when vendor phy driver presents. */
+ if (!phydev || phy_driver_is_genphy(phydev)) {
+ val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY);
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= TXCLK_NO_REVERSE;
+ val |= RXCLK_NO_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val |= TXCLK_NO_REVERSE;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val |= RXCLK_NO_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
+
+ return 0;
+}
+
+static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port,
+ unsigned long *supported)
+{
+ /* Port5 supports ethier RGMII or SGMII.
+ * Port6 supports SGMII only.
+ */
+ switch (port) {
+ case 5:
+ if (mt7531_is_rgmii_port(priv, port))
+ break;
+ fallthrough;
+ case 6:
+ phylink_set(supported, 1000baseX_Full);
+ phylink_set(supported, 2500baseX_Full);
+ phylink_set(supported, 2500baseT_Full);
+ }
+}
+
+static void
+mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex)
+{
+ struct mt7530_priv *priv = ds->priv;
+ unsigned int val;
+
+ /* For adjusting speed and duplex of SGMII force mode. */
+ if (interface != PHY_INTERFACE_MODE_SGMII ||
+ phylink_autoneg_inband(mode))
+ return;
+
+ /* SGMII force mode setting */
+ val = mt7530_read(priv, MT7531_SGMII_MODE(port));
+ val &= ~MT7531_SGMII_IF_MODE_MASK;
+
+ switch (speed) {
+ case SPEED_10:
+ val |= MT7531_SGMII_FORCE_SPEED_10;
+ break;
+ case SPEED_100:
+ val |= MT7531_SGMII_FORCE_SPEED_100;
+ break;
+ case SPEED_1000:
+ val |= MT7531_SGMII_FORCE_SPEED_1000;
+ break;
+ }
+
+ /* MT7531 SGMII 1G force mode can only work in full duplex mode,
+ * no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ */
+ if ((speed == SPEED_10 || speed == SPEED_100) &&
+ duplex != DUPLEX_FULL)
+ val |= MT7531_SGMII_FORCE_HALF_DUPLEX;
+
+ mt7530_write(priv, MT7531_SGMII_MODE(port), val);
+}
+
+static bool mt753x_is_mac_port(u32 port)
+{
+ return (port == 5 || port == 6);
+}
+
+static int mt7531_sgmii_setup_mode_force(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface)
+{
+ u32 val;
+
+ if (!mt753x_is_mac_port(port))
+ return -EINVAL;
+
+ mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
+ MT7531_SGMII_PHYA_PWD);
+
+ val = mt7530_read(priv, MT7531_PHYA_CTRL_SIGNAL3(port));
+ val &= ~MT7531_RG_TPHY_SPEED_MASK;
+ /* Setup 2.5 times faster clock for 2.5Gbps data speeds with 10B/8B
+ * encoding.
+ */
+ val |= (interface == PHY_INTERFACE_MODE_2500BASEX) ?
+ MT7531_RG_TPHY_SPEED_3_125G : MT7531_RG_TPHY_SPEED_1_25G;
+ mt7530_write(priv, MT7531_PHYA_CTRL_SIGNAL3(port), val);
+
+ mt7530_clear(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
+
+ /* MT7531 SGMII 1G and 2.5G force mode can only work in full duplex
+ * mode, no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ */
+ mt7530_rmw(priv, MT7531_SGMII_MODE(port),
+ MT7531_SGMII_IF_MODE_MASK | MT7531_SGMII_REMOTE_FAULT_DIS,
+ MT7531_SGMII_FORCE_SPEED_1000);
+
+ mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
+
+ return 0;
+}
+
+static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port,
+ phy_interface_t interface)
+{
+ if (!mt753x_is_mac_port(port))
+ return -EINVAL;
+
+ mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
+ MT7531_SGMII_PHYA_PWD);
+
+ mt7530_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
+ MT7531_RG_TPHY_SPEED_MASK, MT7531_RG_TPHY_SPEED_1_25G);
+
+ mt7530_set(priv, MT7531_SGMII_MODE(port),
+ MT7531_SGMII_REMOTE_FAULT_DIS |
+ MT7531_SGMII_SPEED_DUPLEX_AN);
+
+ mt7530_rmw(priv, MT7531_PCS_SPEED_ABILITY(port),
+ MT7531_SGMII_TX_CONFIG_MASK, 1);
+
+ mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
+
+ mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_RESTART);
+
+ mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
+
+ return 0;
+}
+
+static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 val;
+
+ /* Only restart AN when AN is enabled */
+ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ if (val & MT7531_SGMII_AN_ENABLE) {
+ val |= MT7531_SGMII_AN_RESTART;
+ mt7530_write(priv, MT7531_PCS_CONTROL_1(port), val);
+ }
+}
+
+static int
+mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct phy_device *phydev;
+ struct dsa_port *dp;
+
+ if (!mt753x_is_mac_port(port)) {
+ dev_err(priv->dev, "port %d is not a MAC port\n", port);
+ return -EINVAL;
+ }
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dp = dsa_to_port(ds, port);
+ phydev = dp->slave->phydev;
+ return mt7531_rgmii_setup(priv, port, interface, phydev);
+ case PHY_INTERFACE_MODE_SGMII:
+ return mt7531_sgmii_setup_mode_an(priv, port, interface);
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (phylink_autoneg_inband(mode))
+ return -EINVAL;
+
+ return mt7531_sgmii_setup_mode_force(priv, port, interface);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int
+mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->mac_port_config(ds, port, mode, state->interface);
+}
+
+static void
+mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 mcr_cur, mcr_new;
+
+ if (!mt753x_phy_mode_supported(ds, port, state))
+ goto unsupported;
+
+ switch (port) {
+ case 0 ... 4: /* Internal phy */
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+ goto unsupported;
+ break;
+ case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+ if (priv->p5_interface == state->interface)
+ break;
+
+ if (mt753x_mac_config(ds, port, mode, state) < 0)
+ goto unsupported;
+
+ if (priv->p5_intf_sel != P5_DISABLED)
+ priv->p5_interface = state->interface;
break;
case 6: /* 1st cpu port */
if (priv->p6_interface == state->interface)
break;
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- return;
+ mt753x_pad_setup(ds, state);
- /* Setup TX circuit incluing relevant PAD and driving */
- mt7530_pad_clk_setup(ds, state->interface);
+ if (mt753x_mac_config(ds, port, mode, state) < 0)
+ goto unsupported;
priv->p6_interface = state->interface;
break;
default:
- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+unsupported:
+ dev_err(ds->dev, "%s: unsupported %s port: %i\n",
+ __func__, phy_modes(state->interface), port);
return;
}
- if (phylink_autoneg_inband(mode)) {
+ if (phylink_autoneg_inband(mode) &&
+ state->interface != PHY_INTERFACE_MODE_SGMII) {
dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
__func__);
return;
@@ -1406,7 +2198,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
mcr_new = mcr_cur;
mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
- PMCR_BACKPR_EN | PMCR_FORCE_MODE;
+ PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
@@ -1416,7 +2208,18 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
}
-static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void
+mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (!priv->info->mac_pcs_an_restart)
+ return;
+
+ priv->info->mac_pcs_an_restart(ds, port);
+}
+
+static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
@@ -1425,7 +2228,19 @@ static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port,
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
-static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (!priv->info->mac_pcs_link_up)
+ return;
+
+ priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+}
+
+static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev,
@@ -1435,8 +2250,19 @@ static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u32 mcr;
+ mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
+ /* MT753x MAC works in 1G full duplex mode for all up-clocked
+ * variants.
+ */
+ if (interface == PHY_INTERFACE_MODE_TRGMII ||
+ (phy_interface_mode_is_8023z(interface))) {
+ speed = SPEED_1000;
+ duplex = DUPLEX_FULL;
+ }
+
switch (speed) {
case SPEED_1000:
mcr |= PMCR_FORCE_SPEED_1000;
@@ -1456,66 +2282,107 @@ static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
mt7530_set(priv, MT7530_PMCR_P(port), mcr);
}
-static void mt7530_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static int
+mt7531_cpu_port_config(struct dsa_switch *ds, int port)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct mt7530_priv *priv = ds->priv;
+ phy_interface_t interface;
+ int speed;
+ int ret;
switch (port) {
- case 0: /* Internal phy */
- case 1:
- case 2:
- case 3:
- case 4:
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
- break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
+ case 5:
+ if (mt7531_is_rgmii_port(priv, port))
+ interface = PHY_INTERFACE_MODE_RGMII;
+ else
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+
+ priv->p5_interface = interface;
break;
- case 6: /* 1st cpu port */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- goto unsupported;
+ case 6:
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+
+ mt7531_pad_setup(ds, interface);
+
+ priv->p6_interface = interface;
break;
default:
- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
-unsupported:
+ return -EINVAL;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ speed = SPEED_2500;
+ else
+ speed = SPEED_1000;
+
+ ret = mt7531_mac_config(ds, port, MLO_AN_FIXED, interface);
+ if (ret)
+ return ret;
+ mt7530_write(priv, MT7530_PMCR_P(port),
+ PMCR_CPU_PORT_SETTING(priv->id));
+ mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
+ speed, DUPLEX_FULL, true, true);
+
+ return 0;
+}
+
+static void
+mt7530_mac_port_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported)
+{
+ if (port == 5)
+ phylink_set(supported, 1000baseX_Full);
+}
+
+static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7531_sgmii_validate(priv, port, supported);
+}
+
+static void
+mt753x_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct mt7530_priv *priv = ds->priv;
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ !mt753x_phy_mode_supported(ds, port, state)) {
linkmode_zero(supported);
return;
}
phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- if (state->interface == PHY_INTERFACE_MODE_TRGMII) {
- phylink_set(mask, 1000baseT_Full);
- } else {
+ if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
+ !phy_interface_mode_is_8023z(state->interface)) {
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
-
- if (state->interface != PHY_INTERFACE_MODE_MII) {
- /* This switch only supports 1G full-duplex. */
- phylink_set(mask, 1000baseT_Full);
- if (port == 5)
- phylink_set(mask, 1000baseX_Full);
- }
+ phylink_set(mask, Autoneg);
}
+ /* This switch only supports 1G full-duplex. */
+ if (state->interface != PHY_INTERFACE_MODE_MII)
+ phylink_set(mask, 1000baseT_Full);
+
+ priv->info->mac_port_validate(ds, port, mask);
+
phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause);
linkmode_and(supported, supported, mask);
linkmode_and(state->advertising, state->advertising, mask);
+
+ /* We can only operate at 2500BaseX or 1000BaseX. If requested
+ * to advertise both, only report advertising at 2500BaseX.
+ */
+ phylink_helper_basex_speed(state);
}
static int
@@ -1558,12 +2425,96 @@ mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
return 1;
}
+static int
+mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
+ struct phylink_link_state *state)
+{
+ u32 status, val;
+ u16 config_reg;
+
+ status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ state->link = !!(status & MT7531_SGMII_LINK_STATUS);
+ if (state->interface == PHY_INTERFACE_MODE_SGMII &&
+ (status & MT7531_SGMII_AN_ENABLE)) {
+ val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
+ config_reg = val >> 16;
+
+ switch (config_reg & LPA_SGMII_SPD_MASK) {
+ case LPA_SGMII_1000:
+ state->speed = SPEED_1000;
+ break;
+ case LPA_SGMII_100:
+ state->speed = SPEED_100;
+ break;
+ case LPA_SGMII_10:
+ state->speed = SPEED_10;
+ break;
+ default:
+ dev_err(priv->dev, "invalid sgmii PHY speed\n");
+ state->link = false;
+ return -EINVAL;
+ }
+
+ if (config_reg & LPA_SGMII_FULL_DUPLEX)
+ state->duplex = DUPLEX_FULL;
+ else
+ state->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static int
+mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ return mt7531_sgmii_pcs_get_state_an(priv, port, state);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->mac_port_get_state(ds, port, state);
+}
+
+static int
+mt753x_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->sw_setup(ds);
+}
+
+static int
+mt753x_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->phy_read(ds, port, regnum);
+}
+
+static int
+mt753x_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->phy_write(ds, port, regnum, val);
+}
+
static const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
- .setup = mt7530_setup,
+ .setup = mt753x_setup,
.get_strings = mt7530_get_strings,
- .phy_read = mt7530_phy_read,
- .phy_write = mt7530_phy_write,
+ .phy_read = mt753x_phy_read,
+ .phy_write = mt753x_phy_write,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
.port_enable = mt7530_port_enable,
@@ -1578,18 +2529,59 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_prepare = mt7530_port_vlan_prepare,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
- .port_mirror_add = mt7530_port_mirror_add,
- .port_mirror_del = mt7530_port_mirror_del,
- .phylink_validate = mt7530_phylink_validate,
- .phylink_mac_link_state = mt7530_phylink_mac_link_state,
- .phylink_mac_config = mt7530_phylink_mac_config,
- .phylink_mac_link_down = mt7530_phylink_mac_link_down,
- .phylink_mac_link_up = mt7530_phylink_mac_link_up,
+ .port_mirror_add = mt753x_port_mirror_add,
+ .port_mirror_del = mt753x_port_mirror_del,
+ .phylink_validate = mt753x_phylink_validate,
+ .phylink_mac_link_state = mt753x_phylink_mac_link_state,
+ .phylink_mac_config = mt753x_phylink_mac_config,
+ .phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
+ .phylink_mac_link_down = mt753x_phylink_mac_link_down,
+ .phylink_mac_link_up = mt753x_phylink_mac_link_up,
+};
+
+static const struct mt753x_info mt753x_table[] = {
+ [ID_MT7621] = {
+ .id = ID_MT7621,
+ .sw_setup = mt7530_setup,
+ .phy_read = mt7530_phy_read,
+ .phy_write = mt7530_phy_write,
+ .pad_setup = mt7530_pad_clk_setup,
+ .phy_mode_supported = mt7530_phy_mode_supported,
+ .mac_port_validate = mt7530_mac_port_validate,
+ .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_config = mt7530_mac_config,
+ },
+ [ID_MT7530] = {
+ .id = ID_MT7530,
+ .sw_setup = mt7530_setup,
+ .phy_read = mt7530_phy_read,
+ .phy_write = mt7530_phy_write,
+ .pad_setup = mt7530_pad_clk_setup,
+ .phy_mode_supported = mt7530_phy_mode_supported,
+ .mac_port_validate = mt7530_mac_port_validate,
+ .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_config = mt7530_mac_config,
+ },
+ [ID_MT7531] = {
+ .id = ID_MT7531,
+ .sw_setup = mt7531_setup,
+ .phy_read = mt7531_ind_phy_read,
+ .phy_write = mt7531_ind_phy_write,
+ .pad_setup = mt7531_pad_setup,
+ .cpu_port_config = mt7531_cpu_port_config,
+ .phy_mode_supported = mt7531_phy_mode_supported,
+ .mac_port_validate = mt7531_mac_port_validate,
+ .mac_port_get_state = mt7531_phylink_mac_link_state,
+ .mac_port_config = mt7531_mac_config,
+ .mac_pcs_an_restart = mt7531_sgmii_restart_an,
+ .mac_pcs_link_up = mt7531_sgmii_link_up_force,
+ },
};
static const struct of_device_id mt7530_of_match[] = {
- { .compatible = "mediatek,mt7621", .data = (void *)ID_MT7621, },
- { .compatible = "mediatek,mt7530", .data = (void *)ID_MT7530, },
+ { .compatible = "mediatek,mt7621", .data = &mt753x_table[ID_MT7621], },
+ { .compatible = "mediatek,mt7530", .data = &mt753x_table[ID_MT7530], },
+ { .compatible = "mediatek,mt7531", .data = &mt753x_table[ID_MT7531], },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mt7530_of_match);
@@ -1630,8 +2622,21 @@ mt7530_probe(struct mdio_device *mdiodev)
/* Get the hardware identifier from the devicetree node.
* We will need it for some of the clock and regulator setup.
*/
- priv->id = (unsigned int)(unsigned long)
- of_device_get_match_data(&mdiodev->dev);
+ priv->info = of_device_get_match_data(&mdiodev->dev);
+ if (!priv->info)
+ return -EINVAL;
+
+ /* Sanity check if these required device operations are filled
+ * properly.
+ */
+ if (!priv->info->sw_setup || !priv->info->pad_setup ||
+ !priv->info->phy_read || !priv->info->phy_write ||
+ !priv->info->phy_mode_supported ||
+ !priv->info->mac_port_validate ||
+ !priv->info->mac_port_get_state || !priv->info->mac_port_config)
+ return -EINVAL;
+
+ priv->id = priv->info->id;
if (priv->id == ID_MT7530) {
priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 14de60d0b9ca..9278a8e3d04e 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -11,9 +11,10 @@
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
-enum {
+enum mt753x_id {
ID_MT7530 = 0,
ID_MT7621 = 1,
+ ID_MT7531 = 2,
};
#define NUM_TRGMII_CTRL 5
@@ -41,6 +42,33 @@ enum {
#define MIRROR_PORT(x) ((x) & 0x7)
#define MIRROR_MASK 0x7
+/* Registers for CPU forward control */
+#define MT7531_CFC 0x4
+#define MT7531_MIRROR_EN BIT(19)
+#define MT7531_MIRROR_MASK (MIRROR_MASK << 16)
+#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK)
+#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
+#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
+
+#define MT753X_MIRROR_REG(id) (((id) == ID_MT7531) ? \
+ MT7531_CFC : MT7530_MFC)
+#define MT753X_MIRROR_EN(id) (((id) == ID_MT7531) ? \
+ MT7531_MIRROR_EN : MIRROR_EN)
+#define MT753X_MIRROR_MASK(id) (((id) == ID_MT7531) ? \
+ MT7531_MIRROR_MASK : MIRROR_MASK)
+
+/* Registers for BPDU and PAE frame control*/
+#define MT753X_BPC 0x24
+#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
+
+enum mt753x_bpdu_port_fw {
+ MT753X_BPDU_FOLLOW_MFC,
+ MT753X_BPDU_CPU_EXCLUDE = 4,
+ MT753X_BPDU_CPU_INCLUDE = 5,
+ MT753X_BPDU_CPU_ONLY = 6,
+ MT753X_BPDU_DROP = 7,
+};
+
/* Registers for address table access */
#define MT7530_ATA1 0x74
#define STATIC_EMP 0
@@ -220,10 +248,30 @@ enum mt7530_vlan_port_attr {
#define PMCR_FORCE_LNK BIT(0)
#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
PMCR_FORCE_SPEED_1000)
+#define MT7531_FORCE_LNK BIT(31)
+#define MT7531_FORCE_SPD BIT(30)
+#define MT7531_FORCE_DPX BIT(29)
+#define MT7531_FORCE_RX_FC BIT(28)
+#define MT7531_FORCE_TX_FC BIT(27)
+#define MT7531_FORCE_MODE (MT7531_FORCE_LNK | \
+ MT7531_FORCE_SPD | \
+ MT7531_FORCE_DPX | \
+ MT7531_FORCE_RX_FC | \
+ MT7531_FORCE_TX_FC)
+#define PMCR_FORCE_MODE_ID(id) (((id) == ID_MT7531) ? \
+ MT7531_FORCE_MODE : \
+ PMCR_FORCE_MODE)
#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
PMCR_FORCE_FDX | PMCR_FORCE_LNK)
+#define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \
+ PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
+ PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
+ PMCR_TX_EN | PMCR_RX_EN | \
+ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
+ PMCR_FORCE_SPEED_1000 | \
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
#define PMSR_EEE1G BIT(7)
@@ -237,6 +285,10 @@ enum mt7530_vlan_port_attr {
#define PMSR_DPX BIT(1)
#define PMSR_LINK BIT(0)
+/* Register for port debug count */
+#define MT7531_DBG_CNT(x) (0x3018 + (x) * 0x100)
+#define MT7531_DIS_CLR BIT(31)
+
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
#define MT7530_MIB_CCR 0x4fe0
@@ -254,12 +306,118 @@ enum mt7530_vlan_port_attr {
CCR_RX_OCT_CNT_BAD | \
CCR_TX_OCT_CNT_GOOD | \
CCR_TX_OCT_CNT_BAD)
+
+/* MT7531 SGMII register group */
+#define MT7531_SGMII_REG_BASE 0x5000
+#define MT7531_SGMII_REG(p, r) (MT7531_SGMII_REG_BASE + \
+ ((p) - 5) * 0x1000 + (r))
+
+/* Register forSGMII PCS_CONTROL_1 */
+#define MT7531_PCS_CONTROL_1(p) MT7531_SGMII_REG(p, 0x00)
+#define MT7531_SGMII_LINK_STATUS BIT(18)
+#define MT7531_SGMII_AN_ENABLE BIT(12)
+#define MT7531_SGMII_AN_RESTART BIT(9)
+
+/* Register for SGMII PCS_SPPED_ABILITY */
+#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
+#define MT7531_SGMII_TX_CONFIG_MASK GENMASK(15, 0)
+#define MT7531_SGMII_TX_CONFIG BIT(0)
+
+/* Register for SGMII_MODE */
+#define MT7531_SGMII_MODE(p) MT7531_SGMII_REG(p, 0x20)
+#define MT7531_SGMII_REMOTE_FAULT_DIS BIT(8)
+#define MT7531_SGMII_IF_MODE_MASK GENMASK(5, 1)
+#define MT7531_SGMII_FORCE_DUPLEX BIT(4)
+#define MT7531_SGMII_FORCE_SPEED_MASK GENMASK(3, 2)
+#define MT7531_SGMII_FORCE_SPEED_1000 BIT(3)
+#define MT7531_SGMII_FORCE_SPEED_100 BIT(2)
+#define MT7531_SGMII_FORCE_SPEED_10 0
+#define MT7531_SGMII_SPEED_DUPLEX_AN BIT(1)
+
+enum mt7531_sgmii_force_duplex {
+ MT7531_SGMII_FORCE_FULL_DUPLEX = 0,
+ MT7531_SGMII_FORCE_HALF_DUPLEX = 0x10,
+};
+
+/* Fields of QPHY_PWR_STATE_CTRL */
+#define MT7531_QPHY_PWR_STATE_CTRL(p) MT7531_SGMII_REG(p, 0xe8)
+#define MT7531_SGMII_PHYA_PWD BIT(4)
+
+/* Values of SGMII SPEED */
+#define MT7531_PHYA_CTRL_SIGNAL3(p) MT7531_SGMII_REG(p, 0x128)
+#define MT7531_RG_TPHY_SPEED_MASK (BIT(2) | BIT(3))
+#define MT7531_RG_TPHY_SPEED_1_25G 0x0
+#define MT7531_RG_TPHY_SPEED_3_125G BIT(2)
+
/* Register for system reset */
#define MT7530_SYS_CTRL 0x7000
#define SYS_CTRL_PHY_RST BIT(2)
#define SYS_CTRL_SW_RST BIT(1)
#define SYS_CTRL_REG_RST BIT(0)
+/* Register for PHY Indirect Access Control */
+#define MT7531_PHY_IAC 0x701C
+#define MT7531_PHY_ACS_ST BIT(31)
+#define MT7531_MDIO_REG_ADDR_MASK (0x1f << 25)
+#define MT7531_MDIO_PHY_ADDR_MASK (0x1f << 20)
+#define MT7531_MDIO_CMD_MASK (0x3 << 18)
+#define MT7531_MDIO_ST_MASK (0x3 << 16)
+#define MT7531_MDIO_RW_DATA_MASK (0xffff)
+#define MT7531_MDIO_REG_ADDR(x) (((x) & 0x1f) << 25)
+#define MT7531_MDIO_DEV_ADDR(x) (((x) & 0x1f) << 25)
+#define MT7531_MDIO_PHY_ADDR(x) (((x) & 0x1f) << 20)
+#define MT7531_MDIO_CMD(x) (((x) & 0x3) << 18)
+#define MT7531_MDIO_ST(x) (((x) & 0x3) << 16)
+
+enum mt7531_phy_iac_cmd {
+ MT7531_MDIO_ADDR = 0,
+ MT7531_MDIO_WRITE = 1,
+ MT7531_MDIO_READ = 2,
+ MT7531_MDIO_READ_CL45 = 3,
+};
+
+/* MDIO_ST: MDIO start field */
+enum mt7531_mdio_st {
+ MT7531_MDIO_ST_CL45 = 0,
+ MT7531_MDIO_ST_CL22 = 1,
+};
+
+#define MT7531_MDIO_CL22_READ (MT7531_MDIO_ST(MT7531_MDIO_ST_CL22) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_READ))
+#define MT7531_MDIO_CL22_WRITE (MT7531_MDIO_ST(MT7531_MDIO_ST_CL22) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_WRITE))
+#define MT7531_MDIO_CL45_ADDR (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_ADDR))
+#define MT7531_MDIO_CL45_READ (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_READ))
+#define MT7531_MDIO_CL45_WRITE (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_WRITE))
+
+/* Register for RGMII clock phase */
+#define MT7531_CLKGEN_CTRL 0x7500
+#define CLK_SKEW_OUT(x) (((x) & 0x3) << 8)
+#define CLK_SKEW_OUT_MASK GENMASK(9, 8)
+#define CLK_SKEW_IN(x) (((x) & 0x3) << 6)
+#define CLK_SKEW_IN_MASK GENMASK(7, 6)
+#define RXCLK_NO_DELAY BIT(5)
+#define TXCLK_NO_REVERSE BIT(4)
+#define GP_MODE(x) (((x) & 0x3) << 1)
+#define GP_MODE_MASK GENMASK(2, 1)
+#define GP_CLK_EN BIT(0)
+
+enum mt7531_gp_mode {
+ MT7531_GP_MODE_RGMII = 0,
+ MT7531_GP_MODE_MII = 1,
+ MT7531_GP_MODE_REV_MII = 2
+};
+
+enum mt7531_clk_skew {
+ MT7531_CLK_SKEW_NO_CHG = 0,
+ MT7531_CLK_SKEW_DLY_100PPS = 1,
+ MT7531_CLK_SKEW_DLY_200PPS = 2,
+ MT7531_CLK_SKEW_REVERSE = 3,
+};
+
/* Register for hw trap status */
#define MT7530_HWTRAP 0x7800
#define HWTRAP_XTAL_MASK (BIT(10) | BIT(9))
@@ -267,6 +425,16 @@ enum mt7530_vlan_port_attr {
#define HWTRAP_XTAL_40MHZ (BIT(10))
#define HWTRAP_XTAL_20MHZ (BIT(9))
+#define MT7531_HWTRAP 0x7800
+#define HWTRAP_XTAL_FSEL_MASK BIT(7)
+#define HWTRAP_XTAL_FSEL_25MHZ BIT(7)
+#define HWTRAP_XTAL_FSEL_40MHZ 0
+/* Unique fields of (M)HWSTRAP for MT7531 */
+#define XTAL_FSEL_S 7
+#define XTAL_FSEL_M BIT(7)
+#define PHY_EN BIT(6)
+#define CHG_STRAP BIT(8)
+
/* Register for hw trap modification */
#define MT7530_MHWTRAP 0x7804
#define MHWTRAP_PHY0_SEL BIT(20)
@@ -281,14 +449,37 @@ enum mt7530_vlan_port_attr {
#define MT7530_TOP_SIG_CTRL 0x7808
#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
+#define MT7531_TOP_SIG_SR 0x780c
+#define PAD_DUAL_SGMII_EN BIT(1)
+#define PAD_MCM_SMI_EN BIT(0)
+
#define MT7530_IO_DRV_CR 0x7810
#define P5_IO_CLK_DRV(x) ((x) & 0x3)
#define P5_IO_DATA_DRV(x) (((x) & 0x3) << 4)
+#define MT7531_CHIP_REV 0x781C
+
+#define MT7531_PLLGP_EN 0x7820
+#define EN_COREPLL BIT(2)
+#define SW_CLKSW BIT(1)
+#define SW_PLLGP BIT(0)
+
#define MT7530_P6ECR 0x7830
#define P6_INTF_MODE_MASK 0x3
#define P6_INTF_MODE(x) ((x) & 0x3)
+#define MT7531_PLLGP_CR0 0x78a8
+#define RG_COREPLL_EN BIT(22)
+#define RG_COREPLL_POSDIV_S 23
+#define RG_COREPLL_POSDIV_M 0x3800000
+#define RG_COREPLL_SDM_PCW_S 1
+#define RG_COREPLL_SDM_PCW_M 0x3ffffe
+#define RG_COREPLL_SDM_PCW_CHG BIT(0)
+
+/* Registers for RGMII and SGMII PLL clock */
+#define MT7531_ANA_PLLGP_CR2 0x78b0
+#define MT7531_ANA_PLLGP_CR5 0x78bc
+
/* Registers for TRGMII on the both side */
#define MT7530_TRGMII_RCK_CTRL 0x7a00
#define RX_RST BIT(31)
@@ -327,10 +518,25 @@ enum mt7530_vlan_port_attr {
#define MT7530_P5RGMIITXCR 0x7b04
#define CSR_RGMII_TXC_CFG(x) ((x) & 0x1f)
+/* Registers for GPIO mode */
+#define MT7531_GPIO_MODE0 0x7c0c
+#define MT7531_GPIO0_MASK GENMASK(3, 0)
+#define MT7531_GPIO0_INTERRUPT 1
+
+#define MT7531_GPIO_MODE1 0x7c10
+#define MT7531_GPIO11_RG_RXD2_MASK GENMASK(15, 12)
+#define MT7531_EXT_P_MDC_11 (2 << 12)
+#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
+#define MT7531_EXT_P_MDIO_12 (2 << 16)
+
#define MT7530_CREV 0x7ffc
#define CHIP_NAME_SHIFT 16
#define MT7530_ID 0x7530
+#define MT7531_CREV 0x781C
+#define CHIP_REV_M 0x0f
+#define MT7531_ID 0x7531
+
/* Registers for core PLL access through mmd indirect */
#define CORE_PLL_GROUP2 0x401
#define RG_SYSPLL_EN_NORMAL BIT(15)
@@ -347,6 +553,10 @@ enum mt7530_vlan_port_attr {
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+#define MT7531_PHY_PLL_OFF BIT(5)
+#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
+
+#define MT753X_CTRL_PHY_ADDR 0
#define CORE_PLL_GROUP5 0x404
#define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff)
@@ -425,6 +635,7 @@ enum p5_interface_select {
P5_INTF_SEL_PHY_P0,
P5_INTF_SEL_PHY_P4,
P5_INTF_SEL_GMAC5,
+ P5_INTF_SEL_GMAC5_SGMII,
};
static const char *p5_intf_modes(unsigned int p5_interface)
@@ -438,11 +649,56 @@ static const char *p5_intf_modes(unsigned int p5_interface)
return "PHY P4";
case P5_INTF_SEL_GMAC5:
return "GMAC5";
+ case P5_INTF_SEL_GMAC5_SGMII:
+ return "GMAC5_SGMII";
default:
return "unknown";
}
}
+/* struct mt753x_info - This is the main data structure for holding the specific
+ * part for each supported device
+ * @sw_setup: Holding the handler to a device initialization
+ * @phy_read: Holding the way reading PHY port
+ * @phy_write: Holding the way writing PHY port
+ * @pad_setup: Holding the way setting up the bus pad for a certain
+ * MAC port
+ * @phy_mode_supported: Check if the PHY type is being supported on a certain
+ * port
+ * @mac_port_validate: Holding the way to set addition validate type for a
+ * certan MAC port
+ * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain
+ * MAC port
+ * @mac_port_config: Holding the way setting up the PHY attribute to a
+ * certain MAC port
+ * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a
+ * certain MAC port
+ * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs
+ * of the certain MAC port
+ */
+struct mt753x_info {
+ enum mt753x_id id;
+
+ int (*sw_setup)(struct dsa_switch *ds);
+ int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
+ int (*phy_write)(struct dsa_switch *ds, int port, int regnum, u16 val);
+ int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
+ int (*cpu_port_config)(struct dsa_switch *ds, int port);
+ bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state);
+ void (*mac_port_validate)(struct dsa_switch *ds, int port,
+ unsigned long *supported);
+ int (*mac_port_get_state)(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state);
+ int (*mac_port_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
+ void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port);
+ void (*mac_pcs_link_up)(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex);
+};
+
/* struct mt7530_priv - This is the main data structure for holding the state
* of the driver
* @dev: The device pointer
@@ -468,6 +724,7 @@ struct mt7530_priv {
struct regulator *core_pwr;
struct regulator *io_pwr;
struct gpio_desc *reset;
+ const struct mt753x_info *info;
unsigned int id;
bool mcm;
phy_interface_t p6_interface;
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index aa645ff86f64..4b080b448ce7 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
mv88e6xxx-objs := chip.o
+mv88e6xxx-objs += devlink.o
mv88e6xxx-objs += global1.o
mv88e6xxx-objs += global1_atu.o
mv88e6xxx-objs += global1_vtu.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index f0dbc05e30a4..bd297ae7cf9e 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -32,6 +32,7 @@
#include <net/dsa.h>
#include "chip.h"
+#include "devlink.h"
#include "global1.h"
#include "global2.h"
#include "hwtstamp.h"
@@ -1465,21 +1466,21 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
return chip->info->ops->vtu_loadpurge(chip, entry);
}
-static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
+int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
{
- DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
struct mv88e6xxx_vtu_entry vlan;
int i, err;
+ u16 fid;
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
/* Set every FID bit used by the (un)bridged ports */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- err = mv88e6xxx_port_get_fid(chip, i, fid);
+ err = mv88e6xxx_port_get_fid(chip, i, &fid);
if (err)
return err;
- set_bit(*fid, fid_bitmap);
+ set_bit(fid, fid_bitmap);
}
/* Set every FID bit used by the VLAN entries */
@@ -1497,6 +1498,18 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
set_bit(vlan.fid, fid_bitmap);
} while (vlan.vid < chip->info->max_vid);
+ return 0;
+}
+
+static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
+{
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
+ int err;
+
+ err = mv88e6xxx_fid_map(chip, fid_bitmap);
+ if (err)
+ return err;
+
/* The reset value 0x000 is used to indicate that multiple address
* databases are not needed. Return the next positive available.
*/
@@ -1508,22 +1521,6 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
-static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
-{
- if (chip->info->ops->atu_get_hash)
- return chip->info->ops->atu_get_hash(chip, hash);
-
- return -EOPNOTSUPP;
-}
-
-static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
-{
- if (chip->info->ops->atu_set_hash)
- return chip->info->ops->atu_set_hash(chip, hash);
-
- return -EOPNOTSUPP;
-}
-
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
@@ -1581,15 +1578,16 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
{
struct mv88e6xxx_chip *chip = ds->priv;
u16 mode = vlan_filtering ? MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE :
MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED;
int err;
- if (!chip->info->max_vid)
- return -EOPNOTSUPP;
+ if (switchdev_trans_ph_prepare(trans))
+ return chip->info->max_vid ? 0 : -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
@@ -2837,248 +2835,11 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
return mv88e6xxx_software_reset(chip);
}
-enum mv88e6xxx_devlink_param_id {
- MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
-};
-
-static int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
-
- switch (id) {
- case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
- err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
-
- switch (id) {
- case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
- err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static const struct devlink_param mv88e6xxx_devlink_params[] = {
- DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
- "ATU_hash", DEVLINK_PARAM_TYPE_U8,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
-};
-
-static int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
-{
- return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
- ARRAY_SIZE(mv88e6xxx_devlink_params));
-}
-
-static void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
-{
- dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
- ARRAY_SIZE(mv88e6xxx_devlink_params));
-}
-
-enum mv88e6xxx_devlink_resource_id {
- MV88E6XXX_RESOURCE_ID_ATU,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
-};
-
-static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
- u16 bin)
-{
- u16 occupancy = 0;
- int err;
-
- mv88e6xxx_reg_lock(chip);
-
- err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
- bin);
- if (err) {
- dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
- goto unlock;
- }
-
- err = mv88e6xxx_g1_atu_get_next(chip, 0);
- if (err) {
- dev_err(chip->dev, "failed to perform ATU get next\n");
- goto unlock;
- }
-
- err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
- if (err) {
- dev_err(chip->dev, "failed to get ATU stats\n");
- goto unlock;
- }
-
- occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
-
-unlock:
- mv88e6xxx_reg_unlock(chip);
-
- return occupancy;
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_0);
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_1);
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_2);
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_3);
-}
-
-static u64 mv88e6xxx_devlink_atu_get(void *priv)
-{
- return mv88e6xxx_devlink_atu_bin_0_get(priv) +
- mv88e6xxx_devlink_atu_bin_1_get(priv) +
- mv88e6xxx_devlink_atu_bin_2_get(priv) +
- mv88e6xxx_devlink_atu_bin_3_get(priv);
-}
-
-static int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
-{
- struct devlink_resource_size_params size_params;
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- devlink_resource_size_params_init(&size_params,
- mv88e6xxx_num_macs(chip),
- mv88e6xxx_num_macs(chip),
- 1, DEVLINK_RESOURCE_UNIT_ENTRY);
-
- err = dsa_devlink_resource_register(ds, "ATU",
- mv88e6xxx_num_macs(chip),
- MV88E6XXX_RESOURCE_ID_ATU,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
- if (err)
- goto out;
-
- devlink_resource_size_params_init(&size_params,
- mv88e6xxx_num_macs(chip) / 4,
- mv88e6xxx_num_macs(chip) / 4,
- 1, DEVLINK_RESOURCE_UNIT_ENTRY);
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_0",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_1",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_2",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_3",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU,
- mv88e6xxx_devlink_atu_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
- mv88e6xxx_devlink_atu_bin_0_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
- mv88e6xxx_devlink_atu_bin_1_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
- mv88e6xxx_devlink_atu_bin_2_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
- mv88e6xxx_devlink_atu_bin_3_get,
- chip);
-
- return 0;
-
-out:
- dsa_devlink_resources_unregister(ds);
- return err;
-}
-
static void mv88e6xxx_teardown(struct dsa_switch *ds)
{
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
+ mv88e6xxx_teardown_devlink_regions(ds);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -3211,7 +2972,18 @@ unlock:
err = mv88e6xxx_setup_devlink_params(ds);
if (err)
- dsa_devlink_resources_unregister(ds);
+ goto out_resources;
+
+ err = mv88e6xxx_setup_devlink_regions(ds);
+ if (err)
+ goto out_params;
+
+ return 0;
+
+out_params:
+ mv88e6xxx_teardown_devlink_params(ds);
+out_resources:
+ dsa_devlink_resources_unregister(ds);
return err;
}
@@ -3329,12 +3101,6 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
return 0;
}
-static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
- { .compatible = "marvell,mv88e6xxx-mdio-external",
- .data = (void *)true },
- { },
-};
-
static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{
@@ -3354,7 +3120,6 @@ static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
struct device_node *np)
{
- const struct of_device_id *match;
struct device_node *child;
int err;
@@ -3372,8 +3137,8 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
* bus.
*/
for_each_available_child_of_node(np, child) {
- match = of_match_node(mv88e6xxx_mdio_external_match, child);
- if (match) {
+ if (of_device_is_compatible(
+ child, "marvell,mv88e6xxx-mdio-external")) {
err = mv88e6xxx_mdio_register(chip, child, true);
if (err) {
mv88e6xxx_mdios_unregister(chip);
@@ -5614,6 +5379,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_ts_info = mv88e6xxx_get_ts_info,
.devlink_param_get = mv88e6xxx_devlink_param_get,
.devlink_param_set = mv88e6xxx_devlink_param_set,
+ .devlink_info_get = mv88e6xxx_devlink_info_get,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 823ae89e5fca..81c244fc0419 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -238,6 +238,19 @@ struct mv88e6xxx_port {
bool mirror_egress;
unsigned int serdes_irq;
char serdes_irq_name[64];
+ struct devlink_region *region;
+};
+
+enum mv88e6xxx_region_id {
+ MV88E6XXX_REGION_GLOBAL1 = 0,
+ MV88E6XXX_REGION_GLOBAL2,
+ MV88E6XXX_REGION_ATU,
+
+ _MV88E6XXX_REGION_MAX,
+};
+
+struct mv88e6xxx_region_priv {
+ enum mv88e6xxx_region_id id;
};
struct mv88e6xxx_chip {
@@ -334,6 +347,9 @@ struct mv88e6xxx_chip {
/* Array of port structures. */
struct mv88e6xxx_port ports[DSA_MAX_PORTS];
+
+ /* devlink regions */
+ struct devlink_region *regions[_MV88E6XXX_REGION_MAX];
};
struct mv88e6xxx_bus_ops {
@@ -689,4 +705,6 @@ static inline void mv88e6xxx_reg_unlock(struct mv88e6xxx_chip *chip)
mutex_unlock(&chip->reg_lock);
}
+int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *bitmap);
+
#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
new file mode 100644
index 000000000000..ade04c036fd9
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <net/dsa.h>
+
+#include "chip.h"
+#include "devlink.h"
+#include "global1.h"
+#include "global2.h"
+#include "port.h"
+
+static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+ if (chip->info->ops->atu_get_hash)
+ return chip->info->ops->atu_get_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
+static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+ if (chip->info->ops->atu_set_hash)
+ return chip->info->ops->atu_set_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
+enum mv88e6xxx_devlink_param_id {
+ MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+};
+
+int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static const struct devlink_param mv88e6xxx_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+ "ATU_hash", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+enum mv88e6xxx_devlink_resource_id {
+ MV88E6XXX_RESOURCE_ID_ATU,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+};
+
+static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
+ u16 bin)
+{
+ u16 occupancy = 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
+ bin);
+ if (err) {
+ dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g1_atu_get_next(chip, 0);
+ if (err) {
+ dev_err(chip->dev, "failed to perform ATU get next\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
+ if (err) {
+ dev_err(chip->dev, "failed to get ATU stats\n");
+ goto unlock;
+ }
+
+ occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return occupancy;
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_0);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_1);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_2);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_3);
+}
+
+static u64 mv88e6xxx_devlink_atu_get(void *priv)
+{
+ return mv88e6xxx_devlink_atu_bin_0_get(priv) +
+ mv88e6xxx_devlink_atu_bin_1_get(priv) +
+ mv88e6xxx_devlink_atu_bin_2_get(priv) +
+ mv88e6xxx_devlink_atu_bin_3_get(priv);
+}
+
+int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip),
+ mv88e6xxx_num_macs(chip),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU",
+ mv88e6xxx_num_macs(chip),
+ MV88E6XXX_RESOURCE_ID_ATU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip) / 4,
+ mv88e6xxx_num_macs(chip) / 4,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_0",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_1",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_2",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_3",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ mv88e6xxx_devlink_atu_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ mv88e6xxx_devlink_atu_bin_0_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ mv88e6xxx_devlink_atu_bin_1_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ mv88e6xxx_devlink_atu_bin_2_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ mv88e6xxx_devlink_atu_bin_3_get,
+ chip);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
+static int mv88e6xxx_region_global_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_region_priv *region_priv = ops->priv;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 *registers;
+ int i, err;
+
+ registers = kmalloc_array(32, sizeof(u16), GFP_KERNEL);
+ if (!registers)
+ return -ENOMEM;
+
+ mv88e6xxx_reg_lock(chip);
+ for (i = 0; i < 32; i++) {
+ switch (region_priv->id) {
+ case MV88E6XXX_REGION_GLOBAL1:
+ err = mv88e6xxx_g1_read(chip, i, &registers[i]);
+ break;
+ case MV88E6XXX_REGION_GLOBAL2:
+ err = mv88e6xxx_g2_read(chip, i, &registers[i]);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ if (err) {
+ kfree(registers);
+ goto out;
+ }
+ }
+ *data = (u8 *)registers;
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+/* The ATU entry varies between mv88e6xxx chipset generations. Define
+ * a generic format which covers all the current and hopefully future
+ * mv88e6xxx generations
+ */
+
+struct mv88e6xxx_devlink_atu_entry {
+ /* The FID is scattered over multiple registers. */
+ u16 fid;
+ u16 atu_op;
+ u16 atu_data;
+ u16 atu_01;
+ u16 atu_23;
+ u16 atu_45;
+};
+
+static int mv88e6xxx_region_atu_snapshot_fid(struct mv88e6xxx_chip *chip,
+ int fid,
+ struct mv88e6xxx_devlink_atu_entry *table,
+ int *count)
+{
+ u16 atu_op, atu_data, atu_01, atu_23, atu_45;
+ struct mv88e6xxx_atu_entry addr;
+ int err;
+
+ addr.state = 0;
+ eth_broadcast_addr(addr.mac);
+
+ do {
+ err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
+ if (err)
+ return err;
+
+ if (!addr.state)
+ break;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &atu_op);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_DATA, &atu_data);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC01, &atu_01);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC23, &atu_23);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC45, &atu_45);
+ if (err)
+ return err;
+
+ table[*count].fid = fid;
+ table[*count].atu_op = atu_op;
+ table[*count].atu_data = atu_data;
+ table[*count].atu_01 = atu_01;
+ table[*count].atu_23 = atu_23;
+ table[*count].atu_45 = atu_45;
+ (*count)++;
+ } while (!is_broadcast_ether_addr(addr.mac));
+
+ return 0;
+}
+
+static int mv88e6xxx_region_atu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
+ struct mv88e6xxx_devlink_atu_entry *table;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int fid = -1, count, err;
+
+ table = kmalloc_array(mv88e6xxx_num_databases(chip),
+ sizeof(struct mv88e6xxx_devlink_atu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ memset(table, 0, mv88e6xxx_num_databases(chip) *
+ sizeof(struct mv88e6xxx_devlink_atu_entry));
+
+ count = 0;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_fid_map(chip, fid_bitmap);
+ if (err) {
+ kfree(table);
+ goto out;
+ }
+
+ while (1) {
+ fid = find_next_bit(fid_bitmap, MV88E6XXX_N_FID, fid + 1);
+ if (fid == MV88E6XXX_N_FID)
+ break;
+
+ err = mv88e6xxx_region_atu_snapshot_fid(chip, fid, table,
+ &count);
+ if (err) {
+ kfree(table);
+ goto out;
+ }
+ }
+ *data = (u8 *)table;
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_region_port_snapshot(struct devlink_port *devlink_port,
+ const struct devlink_port_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_port_to_ds(devlink_port);
+ int port = dsa_devlink_port_to_port(devlink_port);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 *registers;
+ int i, err;
+
+ registers = kmalloc_array(32, sizeof(u16), GFP_KERNEL);
+ if (!registers)
+ return -ENOMEM;
+
+ mv88e6xxx_reg_lock(chip);
+ for (i = 0; i < 32; i++) {
+ err = mv88e6xxx_port_read(chip, port, i, &registers[i]);
+ if (err) {
+ kfree(registers);
+ goto out;
+ }
+ }
+ *data = (u8 *)registers;
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static struct mv88e6xxx_region_priv mv88e6xxx_region_global1_priv = {
+ .id = MV88E6XXX_REGION_GLOBAL1,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_global1_ops = {
+ .name = "global1",
+ .snapshot = mv88e6xxx_region_global_snapshot,
+ .destructor = kfree,
+ .priv = &mv88e6xxx_region_global1_priv,
+};
+
+static struct mv88e6xxx_region_priv mv88e6xxx_region_global2_priv = {
+ .id = MV88E6XXX_REGION_GLOBAL2,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_global2_ops = {
+ .name = "global2",
+ .snapshot = mv88e6xxx_region_global_snapshot,
+ .destructor = kfree,
+ .priv = &mv88e6xxx_region_global2_priv,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_atu_ops = {
+ .name = "atu",
+ .snapshot = mv88e6xxx_region_atu_snapshot,
+ .destructor = kfree,
+};
+
+static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
+ .name = "port",
+ .snapshot = mv88e6xxx_region_port_snapshot,
+ .destructor = kfree,
+};
+
+struct mv88e6xxx_region {
+ struct devlink_region_ops *ops;
+ u64 size;
+};
+
+static struct mv88e6xxx_region mv88e6xxx_regions[] = {
+ [MV88E6XXX_REGION_GLOBAL1] = {
+ .ops = &mv88e6xxx_region_global1_ops,
+ .size = 32 * sizeof(u16)
+ },
+ [MV88E6XXX_REGION_GLOBAL2] = {
+ .ops = &mv88e6xxx_region_global2_ops,
+ .size = 32 * sizeof(u16) },
+ [MV88E6XXX_REGION_ATU] = {
+ .ops = &mv88e6xxx_region_atu_ops
+ /* calculated at runtime */
+ },
+};
+
+static void
+mv88e6xxx_teardown_devlink_regions_global(struct mv88e6xxx_chip *chip)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
+ dsa_devlink_region_destroy(chip->regions[i]);
+}
+
+static void
+mv88e6xxx_teardown_devlink_regions_port(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ dsa_devlink_region_destroy(chip->ports[port].region);
+}
+
+static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
+ struct mv88e6xxx_chip *chip,
+ int port)
+{
+ struct devlink_region *region;
+
+ region = dsa_devlink_port_region_create(ds,
+ port,
+ &mv88e6xxx_region_port_ops, 1,
+ 32 * sizeof(u16));
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+
+ chip->ports[port].region = region;
+
+ return 0;
+}
+
+static void
+mv88e6xxx_teardown_devlink_regions_ports(struct mv88e6xxx_chip *chip)
+{
+ int port;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++)
+ mv88e6xxx_teardown_devlink_regions_port(chip, port);
+}
+
+static int mv88e6xxx_setup_devlink_regions_ports(struct dsa_switch *ds,
+ struct mv88e6xxx_chip *chip)
+{
+ int port;
+ int err;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6xxx_setup_devlink_regions_port(ds, chip, port);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ while (port-- > 0)
+ mv88e6xxx_teardown_devlink_regions_port(chip, port);
+
+ return err;
+}
+
+static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
+ struct mv88e6xxx_chip *chip)
+{
+ struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++) {
+ ops = mv88e6xxx_regions[i].ops;
+ size = mv88e6xxx_regions[i].size;
+
+ if (i == MV88E6XXX_REGION_ATU)
+ size = mv88e6xxx_num_databases(chip) *
+ sizeof(struct mv88e6xxx_devlink_atu_entry);
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region))
+ goto out;
+ chip->regions[i] = region;
+ }
+ return 0;
+
+out:
+ for (j = 0; j < i; j++)
+ dsa_devlink_region_destroy(chip->regions[j]);
+
+ return PTR_ERR(region);
+}
+
+int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ err = mv88e6xxx_setup_devlink_regions_global(ds, chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_devlink_regions_ports(ds, chip);
+ if (err)
+ mv88e6xxx_teardown_devlink_regions_global(chip);
+
+ return err;
+}
+
+void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ mv88e6xxx_teardown_devlink_regions_ports(chip);
+ mv88e6xxx_teardown_devlink_regions_global(chip);
+}
+
+int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ err = devlink_info_driver_name_put(req, "mv88e6xxx");
+ if (err)
+ return err;
+
+ return devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ chip->info->name);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.h b/drivers/net/dsa/mv88e6xxx/devlink.h
new file mode 100644
index 000000000000..3d72db3dcf95
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/devlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/* Marvell 88E6xxx Switch devlink support. */
+
+#ifndef _MV88E6XXX_DEVLINK_H
+#define _MV88E6XXX_DEVLINK_H
+
+int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds);
+int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds);
+
+int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+#endif /* _MV88E6XXX_DEVLINK_H */
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index a4c488b12e8f..094d17a1d037 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -211,49 +211,20 @@ int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
-EFAULT : 0;
}
-/* Get the start of the PTP header in this skb */
-static u8 *parse_ptp_header(struct sk_buff *skb, unsigned int type)
-{
- u8 *data = skb_mac_header(skb);
- unsigned int offset = 0;
-
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return NULL;
- }
-
- /* Ensure that the entire header is present in this packet. */
- if (skb->len + ETH_HLEN < offset + 34)
- return NULL;
-
- return data + offset;
-}
-
/* Returns a pointer to the PTP header if the caller should time stamp,
* or NULL if the caller should not.
*/
-static u8 *mv88e6xxx_should_tstamp(struct mv88e6xxx_chip *chip, int port,
- struct sk_buff *skb, unsigned int type)
+static struct ptp_header *mv88e6xxx_should_tstamp(struct mv88e6xxx_chip *chip,
+ int port, struct sk_buff *skb,
+ unsigned int type)
{
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- u8 *hdr;
+ struct ptp_header *hdr;
if (!chip->info->ptp_support)
return NULL;
- hdr = parse_ptp_header(skb, type);
+ hdr = ptp_parse_header(skb, type);
if (!hdr)
return NULL;
@@ -275,12 +246,11 @@ static int mv88e6xxx_ts_valid(u16 status)
static int seq_match(struct sk_buff *skb, u16 ts_seqid)
{
unsigned int type = SKB_PTP_TYPE(skb);
- u8 *hdr = parse_ptp_header(skb, type);
- __be16 *seqid;
+ struct ptp_header *hdr;
- seqid = (__be16 *)(hdr + OFF_PTP_SEQUENCE_ID);
+ hdr = ptp_parse_header(skb, type);
- return ts_seqid == ntohs(*seqid);
+ return ts_seqid == ntohs(hdr->sequence_id);
}
static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
@@ -357,9 +327,9 @@ static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip,
&ps->rx_queue2);
}
-static int is_pdelay_resp(u8 *msgtype)
+static int is_pdelay_resp(const struct ptp_header *hdr)
{
- return (*msgtype & 0xf) == 3;
+ return (hdr->tsmt & 0xf) == 3;
}
bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
@@ -367,7 +337,7 @@ bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
{
struct mv88e6xxx_port_hwtstamp *ps;
struct mv88e6xxx_chip *chip;
- u8 *hdr;
+ struct ptp_header *hdr;
chip = ds->priv;
ps = &chip->port_hwtstamp[port];
@@ -503,8 +473,7 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- __be16 *seq_ptr;
- u8 *hdr;
+ struct ptp_header *hdr;
if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP))
return false;
@@ -513,15 +482,13 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
if (!hdr)
return false;
- seq_ptr = (__be16 *)(hdr + OFF_PTP_SEQUENCE_ID);
-
if (test_and_set_bit_lock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS,
&ps->state))
return false;
ps->tx_skb = clone;
ps->tx_tstamp_start = jiffies;
- ps->tx_seq_id = be16_to_cpup(seq_ptr);
+ ps->tx_seq_id = be16_to_cpu(hdr->sequence_id);
ptp_schedule_worker(chip->ptp_clock, 0);
return true;
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 2d23ccef7d0e..c110e82a7973 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -8,12 +8,19 @@ config NET_DSA_MSCC_FELIX
select MSCC_OCELOT_SWITCH_LIB
select NET_DSA_TAG_OCELOT
select FSL_ENETC_MDIO
+ select PCS_LYNX
help
- This driver supports network switches from the Vitesse /
- Microsemi / Microchip Ocelot family of switching cores that are
- connected to their host CPU via Ethernet.
- The following switches are supported:
- - VSC9959 (Felix): embedded as a PCIe function of the NXP LS1028A
- ENETC integrated endpoint.
- - VSC9953 (Seville): embedded as a platform device on the
- NXP T1040 SoC.
+ This driver supports the VSC9959 (Felix) switch, which is embedded as
+ a PCIe function of the NXP LS1028A ENETC RCiEP.
+
+config NET_DSA_MSCC_SEVILLE
+ tristate "Ocelot / Seville Ethernet switch support"
+ depends on NET_DSA
+ depends on NET_VENDOR_MICROSEMI
+ depends on HAS_IOMEM
+ select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_TAG_OCELOT
+ select PCS_LYNX
+ help
+ This driver supports the VSC9953 (Seville) switch, which is embedded
+ as a platform device on the NXP T1040 SoC.
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
index ec57a5a12330..f6dd131e7491 100644
--- a/drivers/net/dsa/ocelot/Makefile
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -1,7 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+obj-$(CONFIG_NET_DSA_MSCC_SEVILLE) += mscc_seville.o
mscc_felix-objs := \
felix.o \
- felix_vsc9959.o \
+ felix_vsc9959.o
+
+mscc_seville-objs := \
+ felix.o \
seville_vsc9953.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 01427cd08448..f791860d495f 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -19,6 +19,7 @@
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <net/dsa.h>
#include "felix.h"
@@ -118,13 +119,12 @@ static int felix_vlan_prepare(struct dsa_switch *ds, int port,
return 0;
}
-static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
+static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct switchdev_trans *trans)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_vlan_filtering(ocelot, port, enabled);
-
- return 0;
+ return ocelot_port_vlan_filtering(ocelot, port, enabled, trans);
}
static void felix_vlan_add(struct dsa_switch *ds, int port,
@@ -196,27 +196,16 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port,
felix->info->phylink_validate(ocelot, port, supported, state);
}
-static int felix_phylink_mac_pcs_get_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
-
- if (felix->info->pcs_link_state)
- felix->info->pcs_link_state(ocelot, port, state);
-
- return 0;
-}
-
static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int link_an_mode,
const struct phylink_link_state *state)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_to_port(ds, port);
- if (felix->info->pcs_config)
- felix->info->pcs_config(ocelot, port, link_an_mode, state);
+ if (felix->pcs[port])
+ phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs);
}
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -306,10 +295,6 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
- if (felix->info->pcs_link_up)
- felix->info->pcs_link_up(ocelot, port, link_an_mode, interface,
- speed, duplex);
-
if (felix->info->port_sched_speed_set)
felix->info->port_sched_speed_set(ocelot, port, speed);
}
@@ -449,10 +434,10 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->num_stats = felix->info->num_stats;
ocelot->shared_queue_sz = felix->info->shared_queue_sz;
ocelot->num_mact_rows = felix->info->num_mact_rows;
- ocelot->vcap_is2_keys = felix->info->vcap_is2_keys;
- ocelot->vcap_is2_actions= felix->info->vcap_is2_actions;
ocelot->vcap = felix->info->vcap;
ocelot->ops = felix->info->ops;
+ ocelot->inj_prefix = OCELOT_TAG_PREFIX_SHORT;
+ ocelot->xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
GFP_KERNEL);
@@ -523,7 +508,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return PTR_ERR(target);
}
- template = devm_kzalloc(ocelot->dev, OCELOT_TAG_LEN,
+ template = devm_kzalloc(ocelot->dev, OCELOT_TOTAL_TAG_LEN,
GFP_KERNEL);
if (!template) {
dev_err(ocelot->dev,
@@ -552,22 +537,27 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return 0;
}
-static struct ptp_clock_info ocelot_ptp_clock_info = {
- .owner = THIS_MODULE,
- .name = "felix ptp",
- .max_adj = 0x7fffffff,
- .n_alarm = 0,
- .n_ext_ts = 0,
- .n_per_out = OCELOT_PTP_PINS_NUM,
- .n_pins = OCELOT_PTP_PINS_NUM,
- .pps = 0,
- .gettime64 = ocelot_ptp_gettime64,
- .settime64 = ocelot_ptp_settime64,
- .adjtime = ocelot_ptp_adjtime,
- .adjfine = ocelot_ptp_adjfine,
- .verify = ocelot_ptp_verify,
- .enable = ocelot_ptp_enable,
-};
+/* The CPU port module is connected to the Node Processor Interface (NPI). This
+ * is the mode through which frames can be injected from and extracted to an
+ * external CPU, over Ethernet.
+ */
+static void felix_npi_port_init(struct ocelot *ocelot, int port)
+{
+ ocelot->npi = port;
+
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
+ QSYS_EXT_CPU_CFG);
+
+ /* NPI port Injection/Extraction configuration */
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
+ ocelot->xtr_prefix);
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
+ ocelot->inj_prefix);
+
+ /* Disable transmission of pause frames */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+}
/* Hardware initialization done here so that we can allocate structures with
* devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
@@ -590,7 +580,7 @@ static int felix_setup(struct dsa_switch *ds)
return err;
if (ocelot->ptp) {
- err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
+ err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps);
if (err) {
dev_err(ocelot->dev,
"Timestamp initialization failed\n");
@@ -601,11 +591,8 @@ static int felix_setup(struct dsa_switch *ds)
for (port = 0; port < ds->num_ports; port++) {
ocelot_init_port(ocelot, port);
- /* Bring up the CPU port module and configure the NPI port */
if (dsa_is_cpu_port(ds, port))
- ocelot_configure_cpu(ocelot, port,
- OCELOT_TAG_PREFIX_NONE,
- OCELOT_TAG_PREFIX_LONG);
+ felix_npi_port_init(ocelot, port);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
@@ -630,11 +617,6 @@ static int felix_setup(struct dsa_switch *ds)
ds->mtu_enforcement_ingress = true;
ds->configure_vlan_while_not_filtering = true;
- /* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
- * isn't instantiated for the Felix PF.
- * In-band AN may take a few ms to complete, so we need to poll.
- */
- ds->pcs_poll = true;
return 0;
}
@@ -705,8 +687,11 @@ static bool felix_txtstamp(struct dsa_switch *ds, int port,
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port = ocelot->ports[port];
- if (!ocelot_port_add_txtstamp_skb(ocelot_port, clone))
+ if (ocelot->ptp && (skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP) &&
+ ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ ocelot_port_add_txtstamp_skb(ocelot, port, clone);
return true;
+ }
return false;
}
@@ -793,7 +778,6 @@ const struct dsa_switch_ops felix_switch_ops = {
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
.phylink_validate = felix_phylink_validate,
- .phylink_mac_link_state = felix_phylink_mac_pcs_get_state,
.phylink_mac_config = felix_phylink_mac_config,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
@@ -823,31 +807,27 @@ const struct dsa_switch_ops felix_switch_ops = {
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
- .port_setup_tc = felix_port_setup_tc,
+ .port_setup_tc = felix_port_setup_tc,
};
-static int __init felix_init(void)
+struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
{
- int err;
-
- err = pci_register_driver(&felix_vsc9959_pci_driver);
- if (err)
- return err;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_switch *ds = felix->ds;
- err = platform_driver_register(&seville_vsc9953_driver);
- if (err)
- return err;
+ if (!dsa_is_user_port(ds, port))
+ return NULL;
- return 0;
+ return dsa_to_port(ds, port)->slave;
}
-module_init(felix_init);
-static void __exit felix_exit(void)
+int felix_netdev_to_port(struct net_device *dev)
{
- pci_unregister_driver(&felix_vsc9959_pci_driver);
- platform_driver_unregister(&seville_vsc9953_driver);
-}
-module_exit(felix_exit);
+ struct dsa_port *dp;
+
+ dp = dsa_port_from_netdev(dev);
+ if (IS_ERR(dp))
+ return -EINVAL;
-MODULE_DESCRIPTION("Felix Switch driver");
-MODULE_LICENSE("GPL v2");
+ return dp->index;
+}
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 98f14621ac23..4c717324ac2f 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -20,23 +20,13 @@ struct felix_info {
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int num_ports;
- int num_tx_queues;
- struct vcap_field *vcap_is2_keys;
- struct vcap_field *vcap_is2_actions;
- const struct vcap_props *vcap;
+ int num_tx_queues;
+ struct vcap_props *vcap;
int switch_pci_bar;
int imdio_pci_bar;
+ const struct ptp_clock_info *ptp_caps;
int (*mdio_bus_alloc)(struct ocelot *ocelot);
void (*mdio_bus_free)(struct ocelot *ocelot);
- void (*pcs_config)(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state);
- void (*pcs_link_up)(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- phy_interface_t interface,
- int speed, int duplex);
- void (*pcs_link_state)(struct ocelot *ocelot, int port,
- struct phylink_link_state *state);
void (*phylink_validate)(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state);
@@ -50,8 +40,6 @@ struct felix_info {
};
extern const struct dsa_switch_ops felix_switch_ops;
-extern struct pci_driver felix_vsc9959_pci_driver;
-extern struct platform_driver seville_vsc9953_driver;
/* DSA glue / front-end for struct ocelot */
struct felix {
@@ -59,20 +47,12 @@ struct felix {
const struct felix_info *info;
struct ocelot ocelot;
struct mii_bus *imdio;
- struct phy_device **pcs;
+ struct lynx_pcs **pcs;
resource_size_t switch_base;
resource_size_t imdio_base;
};
-void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
- struct phylink_link_state *state);
-void vsc9959_pcs_config(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state);
-void vsc9959_pcs_link_up(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- phy_interface_t interface,
- int speed, int duplex);
-void vsc9959_mdio_bus_free(struct ocelot *ocelot);
+struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
+int felix_netdev_to_port(struct net_device *dev);
#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 3a9637496407..3e925b8d5306 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -9,15 +9,13 @@
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/packing.h>
+#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/pci.h>
#include "felix.h"
-#define VSC9959_VCAP_IS2_CNT 1024
-#define VSC9959_VCAP_IS2_ENTRY_WIDTH 376
-#define VSC9959_VCAP_PORT_CNT 6
#define VSC9959_TAS_GCL_ENTRY_MAX 63
static const u32 vsc9959_ana_regmap[] = {
@@ -137,14 +135,27 @@ static const u32 vsc9959_qs_regmap[] = {
REG_RESERVED(QS_INH_DBG),
};
-static const u32 vsc9959_s2_regmap[] = {
- REG(S2_CORE_UPDATE_CTRL, 0x000000),
- REG(S2_CORE_MV_CFG, 0x000004),
- REG(S2_CACHE_ENTRY_DAT, 0x000008),
- REG(S2_CACHE_MASK_DAT, 0x000108),
- REG(S2_CACHE_ACTION_DAT, 0x000208),
- REG(S2_CACHE_CNT_DAT, 0x000308),
- REG(S2_CACHE_TG_DAT, 0x000388),
+static const u32 vsc9959_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG(VCAP_CONST_CORE_CNT, 0x0003b8),
+ REG(VCAP_CONST_IF_CNT, 0x0003bc),
};
static const u32 vsc9959_qsys_regmap[] = {
@@ -295,15 +306,15 @@ static const u32 vsc9959_sys_regmap[] = {
};
static const u32 vsc9959_ptp_regmap[] = {
- REG(PTP_PIN_CFG, 0x000000),
- REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
- REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
- REG(PTP_PIN_TOD_NSEC, 0x00000c),
- REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
- REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
- REG(PTP_CFG_MISC, 0x0000a0),
- REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
- REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
};
static const u32 vsc9959_gcb_regmap[] = {
@@ -358,7 +369,9 @@ static const u32 *vsc9959_regmap[TARGET_MAX] = {
[QSYS] = vsc9959_qsys_regmap,
[REW] = vsc9959_rew_regmap,
[SYS] = vsc9959_sys_regmap,
- [S2] = vsc9959_s2_regmap,
+ [S0] = vsc9959_vcap_regmap,
+ [S1] = vsc9959_vcap_regmap,
+ [S2] = vsc9959_vcap_regmap,
[PTP] = vsc9959_ptp_regmap,
[GCB] = vsc9959_gcb_regmap,
[DEV_GMII] = vsc9959_dev_gmii_regmap,
@@ -391,6 +404,16 @@ static const struct resource vsc9959_target_io_res[TARGET_MAX] = {
.end = 0x001ffff,
.name = "sys",
},
+ [S0] = {
+ .start = 0x0040000,
+ .end = 0x00403ff,
+ .name = "s0",
+ },
+ [S1] = {
+ .start = 0x0050000,
+ .end = 0x00503ff,
+ .name = "s1",
+ },
[S2] = {
.start = 0x0060000,
.end = 0x00603ff,
@@ -595,6 +618,113 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
{ .offset = 0x111, .name = "drop_green_prio_7", },
};
+static const struct vcap_field vsc9959_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 3},
+ [VCAP_ES0_IGR_PORT] = { 3, 3},
+ [VCAP_ES0_RSV] = { 6, 2},
+ [VCAP_ES0_L2_MC] = { 8, 1},
+ [VCAP_ES0_L2_BC] = { 9, 1},
+ [VCAP_ES0_VID] = { 10, 12},
+ [VCAP_ES0_DP] = { 22, 1},
+ [VCAP_ES0_PCP] = { 23, 3},
+};
+
+static const struct vcap_field vsc9959_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
+ [VCAP_ES0_ACT_RSV] = { 49, 23},
+ [VCAP_ES0_ACT_HIT_STICKY] = { 72, 1},
+};
+
+static const struct vcap_field vsc9959_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1},
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2},
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 7},
+ [VCAP_IS1_HK_RSV] = { 10, 9},
+ [VCAP_IS1_HK_OAM_Y1731] = { 19, 1},
+ [VCAP_IS1_HK_L2_MC] = { 20, 1},
+ [VCAP_IS1_HK_L2_BC] = { 21, 1},
+ [VCAP_IS1_HK_IP_MC] = { 22, 1},
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 23, 1},
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 24, 1},
+ [VCAP_IS1_HK_TPID] = { 25, 1},
+ [VCAP_IS1_HK_VID] = { 26, 12},
+ [VCAP_IS1_HK_DEI] = { 38, 1},
+ [VCAP_IS1_HK_PCP] = { 39, 3},
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 42, 48},
+ [VCAP_IS1_HK_ETYPE_LEN] = { 90, 1},
+ [VCAP_IS1_HK_ETYPE] = { 91, 16},
+ [VCAP_IS1_HK_IP_SNAP] = {107, 1},
+ [VCAP_IS1_HK_IP4] = {108, 1},
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = {109, 1},
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {110, 1},
+ [VCAP_IS1_HK_L3_OPTIONS] = {111, 1},
+ [VCAP_IS1_HK_L3_DSCP] = {112, 6},
+ [VCAP_IS1_HK_L3_IP4_SIP] = {118, 32},
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = {150, 1},
+ [VCAP_IS1_HK_TCP] = {151, 1},
+ [VCAP_IS1_HK_L4_SPORT] = {152, 16},
+ [VCAP_IS1_HK_L4_RNG] = {168, 8},
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 42, 1},
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 43, 12},
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 55, 1},
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 56, 3},
+ [VCAP_IS1_HK_IP4_IP4] = { 59, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 60, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 61, 1},
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 62, 1},
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 63, 6},
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 69, 32},
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {101, 32},
+ [VCAP_IS1_HK_IP4_L3_PROTO] = {133, 8},
+ [VCAP_IS1_HK_IP4_TCP_UDP] = {141, 1},
+ [VCAP_IS1_HK_IP4_TCP] = {142, 1},
+ [VCAP_IS1_HK_IP4_L4_RNG] = {143, 8},
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {151, 32},
+};
+
+static const struct vcap_field vsc9959_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
+ [VCAP_IS1_ACT_RSV] = { 29, 9},
+ /* The fields below are incorrectly shifted by 2 in the manual */
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1},
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12},
+ [VCAP_IS1_ACT_FID_SEL] = { 51, 2},
+ [VCAP_IS1_ACT_FID_VAL] = { 53, 13},
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1},
+ [VCAP_IS1_ACT_PCP_VAL] = { 67, 3},
+ [VCAP_IS1_ACT_DEI_VAL] = { 70, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2},
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4},
+ [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1},
+};
+
static struct vcap_field vsc9959_vcap_is2_keys[] = {
/* Common: 41 bits */
[VCAP_IS2_TYPE] = { 0, 4},
@@ -693,15 +823,32 @@ static struct vcap_field vsc9959_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_CNT] = { 44, 32},
};
-static const struct vcap_props vsc9959_vcap_props[] = {
+static struct vcap_props vsc9959_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 72, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc9959_vcap_es0_keys,
+ .actions = vsc9959_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 78, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc9959_vcap_is1_keys,
+ .actions = vsc9959_vcap_is1_actions,
+ },
[VCAP_IS2] = {
- .tg_width = 2,
- .sw_count = 4,
- .entry_count = VSC9959_VCAP_IS2_CNT,
- .entry_width = VSC9959_VCAP_IS2_ENTRY_WIDTH,
- .action_count = VSC9959_VCAP_IS2_CNT +
- VSC9959_VCAP_PORT_CNT + 2,
- .action_width = 89,
.action_type_width = 1,
.action_table = {
[IS2_ACTION_TYPE_NORMAL] = {
@@ -713,11 +860,29 @@ static const struct vcap_props vsc9959_vcap_props[] = {
.count = 4
},
},
- .counter_words = 4,
- .counter_width = 32,
+ .target = S2,
+ .keys = vsc9959_vcap_is2_keys,
+ .actions = vsc9959_vcap_is2_actions,
},
};
+static const struct ptp_clock_info vsc9959_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "felix ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = OCELOT_PTP_PINS_NUM,
+ .n_pins = OCELOT_PTP_PINS_NUM,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+ .verify = ocelot_ptp_verify,
+ .enable = ocelot_ptp_enable,
+};
+
#define VSC9959_INIT_TIMEOUT 50000
#define VSC9959_GCB_RST_SLEEP 100
#define VSC9959_SYS_RAMINIT_SLEEP 80
@@ -726,7 +891,7 @@ static int vsc9959_gcb_soft_rst_status(struct ocelot *ocelot)
{
int val;
- regmap_field_read(ocelot->regfields[GCB_SOFT_RST_SWC_RST], &val);
+ ocelot_field_read(ocelot, GCB_SOFT_RST_SWC_RST, &val);
return val;
}
@@ -736,12 +901,15 @@ static int vsc9959_sys_ram_init_status(struct ocelot *ocelot)
return ocelot_read(ocelot, SYS_RAM_INIT);
}
+/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
+ * RAM_INIT is in SYS:RAM_CTRL:RAM_INIT
+ */
static int vsc9959_reset(struct ocelot *ocelot)
{
int val, err;
/* soft-reset the switch core */
- regmap_field_write(ocelot->regfields[GCB_SOFT_RST_SWC_RST], 1);
+ ocelot_field_write(ocelot, GCB_SOFT_RST_SWC_RST, 1);
err = readx_poll_timeout(vsc9959_gcb_soft_rst_status, ocelot, val, !val,
VSC9959_GCB_RST_SLEEP, VSC9959_INIT_TIMEOUT);
@@ -761,352 +929,11 @@ static int vsc9959_reset(struct ocelot *ocelot)
}
/* enable switch core */
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+ ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1);
return 0;
}
-/* We enable SGMII AN only when the PHY has managed = "in-band-status" in the
- * device tree. If we are in MLO_AN_PHY mode, we program directly state->speed
- * into the PCS, which is retrieved out-of-band over MDIO. This also has the
- * benefit of working with SGMII fixed-links, like downstream switches, where
- * both link partners attempt to operate as AN slaves and therefore AN never
- * completes. But it also has the disadvantage that some PHY chips don't pass
- * traffic if SGMII AN is enabled but not completed (acknowledged by us), so
- * setting MLO_AN_INBAND is actually required for those.
- */
-static void vsc9959_pcs_config_sgmii(struct phy_device *pcs,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
-{
- int bmsr, bmcr;
-
- /* Some PHYs like VSC8234 don't like it when AN restarts on
- * their system side and they restart line side AN too, going
- * into an endless link up/down loop. Don't restart PCS AN if
- * link is up already.
- * We do check that AN is enabled just in case this is the 1st
- * call, PCS detects a carrier but AN is disabled from power on
- * or by boot loader.
- */
- bmcr = phy_read(pcs, MII_BMCR);
- if (bmcr < 0)
- return;
-
- bmsr = phy_read(pcs, MII_BMSR);
- if (bmsr < 0)
- return;
-
- if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS))
- return;
-
- /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001
- * for the MAC PCS in order to acknowledge the AN.
- */
- phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII |
- ADVERTISE_LPACK);
-
- phy_write(pcs, ENETC_PCS_IF_MODE,
- ENETC_PCS_IF_MODE_SGMII_EN |
- ENETC_PCS_IF_MODE_USE_SGMII_AN);
-
- /* Adjust link timer for SGMII */
- phy_write(pcs, ENETC_PCS_LINK_TIMER1,
- ENETC_PCS_LINK_TIMER1_VAL);
- phy_write(pcs, ENETC_PCS_LINK_TIMER2,
- ENETC_PCS_LINK_TIMER2_VAL);
-
- phy_set_bits(pcs, MII_BMCR, BMCR_ANENABLE);
-}
-
-static void vsc9959_pcs_config_usxgmii(struct phy_device *pcs,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
-{
- /* Configure device ability for the USXGMII Replicator */
- phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE,
- MDIO_USXGMII_2500FULL |
- MDIO_USXGMII_LINK |
- ADVERTISE_SGMII |
- ADVERTISE_LPACK);
-}
-
-void vsc9959_pcs_config(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
-{
- struct felix *felix = ocelot_to_felix(ocelot);
- struct phy_device *pcs = felix->pcs[port];
-
- if (!pcs)
- return;
-
- /* The PCS does not implement the BMSR register fully, so capability
- * detection via genphy_read_abilities does not work. Since we can get
- * the PHY config word from the LPA register though, there is still
- * value in using the generic phy_resolve_aneg_linkmode function. So
- * populate the supported and advertising link modes manually here.
- */
- linkmode_set_bit_array(phy_basic_ports_array,
- ARRAY_SIZE(phy_basic_ports_array),
- pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, pcs->supported);
- if (pcs->interface == PHY_INTERFACE_MODE_2500BASEX ||
- pcs->interface == PHY_INTERFACE_MODE_USXGMII)
- linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
- pcs->supported);
- if (pcs->interface != PHY_INTERFACE_MODE_2500BASEX)
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- pcs->supported);
- phy_advertise_supported(pcs);
-
- if (!phylink_autoneg_inband(link_an_mode))
- return;
-
- switch (pcs->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- vsc9959_pcs_config_sgmii(pcs, link_an_mode, state);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n");
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- vsc9959_pcs_config_usxgmii(pcs, link_an_mode, state);
- break;
- default:
- dev_err(ocelot->dev, "Unsupported link mode %s\n",
- phy_modes(pcs->interface));
- }
-}
-
-static void vsc9959_pcs_link_up_sgmii(struct phy_device *pcs,
- unsigned int link_an_mode,
- int speed, int duplex)
-{
- u16 if_mode = ENETC_PCS_IF_MODE_SGMII_EN;
-
- switch (speed) {
- case SPEED_1000:
- if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_1000);
- break;
- case SPEED_100:
- if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_100);
- break;
- case SPEED_10:
- if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_10);
- break;
- default:
- phydev_err(pcs, "Invalid PCS speed %d\n", speed);
- return;
- }
-
- if (duplex == DUPLEX_HALF)
- if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF;
-
- phy_write(pcs, ENETC_PCS_IF_MODE, if_mode);
- phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE);
-}
-
-/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane
- * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have
- * auto-negotiation of any link parameters. Electrically it is compatible with
- * a single lane of XAUI.
- * The hardware reference manual wants to call this mode SGMII, but it isn't
- * really, since the fundamental features of SGMII:
- * - Downgrading the link speed by duplicating symbols
- * - Auto-negotiation
- * are not there.
- * The speed is configured at 1000 in the IF_MODE and BMCR MDIO registers
- * because the clock frequency is actually given by a PLL configured in the
- * Reset Configuration Word (RCW).
- * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o
- * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a
- * lower link speed on line side, the system-side interface remains fixed at
- * 2500 Mbps and we do rate adaptation through pause frames.
- */
-static void vsc9959_pcs_link_up_2500basex(struct phy_device *pcs,
- unsigned int link_an_mode,
- int speed, int duplex)
-{
- u16 if_mode = ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500) |
- ENETC_PCS_IF_MODE_SGMII_EN;
-
- if (duplex == DUPLEX_HALF)
- if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF;
-
- phy_write(pcs, ENETC_PCS_IF_MODE, if_mode);
- phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE);
-}
-
-void vsc9959_pcs_link_up(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- phy_interface_t interface,
- int speed, int duplex)
-{
- struct felix *felix = ocelot_to_felix(ocelot);
- struct phy_device *pcs = felix->pcs[port];
-
- if (!pcs)
- return;
-
- if (phylink_autoneg_inband(link_an_mode))
- return;
-
- switch (interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- vsc9959_pcs_link_up_sgmii(pcs, link_an_mode, speed, duplex);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- vsc9959_pcs_link_up_2500basex(pcs, link_an_mode, speed,
- duplex);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- phydev_err(pcs, "USXGMII only supports in-band AN for now\n");
- break;
- default:
- dev_err(ocelot->dev, "Unsupported link mode %s\n",
- phy_modes(pcs->interface));
- }
-}
-
-static void vsc9959_pcs_link_state_resolve(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- state->an_complete = pcs->autoneg_complete;
- state->an_enabled = pcs->autoneg;
- state->link = pcs->link;
- state->duplex = pcs->duplex;
- state->speed = pcs->speed;
- /* SGMII AN does not negotiate flow control, but that's ok,
- * since phylink already knows that, and does:
- * link_state.pause |= pl->phy_state.pause;
- */
- state->pause = MLO_PAUSE_NONE;
-
- phydev_dbg(pcs,
- "mode=%s/%s/%s adv=%*pb lpa=%*pb link=%u an_enabled=%u an_complete=%u\n",
- phy_modes(pcs->interface),
- phy_speed_to_str(pcs->speed),
- phy_duplex_to_str(pcs->duplex),
- __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->lp_advertising,
- pcs->link, pcs->autoneg, pcs->autoneg_complete);
-}
-
-static void vsc9959_pcs_link_state_sgmii(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- int err;
-
- err = genphy_update_link(pcs);
- if (err < 0)
- return;
-
- if (pcs->autoneg_complete) {
- u16 lpa = phy_read(pcs, MII_LPA);
-
- mii_lpa_to_linkmode_lpa_sgmii(pcs->lp_advertising, lpa);
-
- phy_resolve_aneg_linkmode(pcs);
- }
-}
-
-static void vsc9959_pcs_link_state_2500basex(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- int err;
-
- err = genphy_update_link(pcs);
- if (err < 0)
- return;
-
- pcs->speed = SPEED_2500;
- pcs->asym_pause = true;
- pcs->pause = true;
-}
-
-static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- int status, lpa;
-
- status = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_BMSR);
- if (status < 0)
- return;
-
- pcs->autoneg = true;
- pcs->autoneg_complete = !!(status & BMSR_ANEGCOMPLETE);
- pcs->link = !!(status & BMSR_LSTATUS);
-
- if (!pcs->link || !pcs->autoneg_complete)
- return;
-
- lpa = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_LPA);
- if (lpa < 0)
- return;
-
- switch (lpa & MDIO_USXGMII_SPD_MASK) {
- case MDIO_USXGMII_10:
- pcs->speed = SPEED_10;
- break;
- case MDIO_USXGMII_100:
- pcs->speed = SPEED_100;
- break;
- case MDIO_USXGMII_1000:
- pcs->speed = SPEED_1000;
- break;
- case MDIO_USXGMII_2500:
- pcs->speed = SPEED_2500;
- break;
- default:
- break;
- }
-
- if (lpa & MDIO_USXGMII_FULL_DUPLEX)
- pcs->duplex = DUPLEX_FULL;
- else
- pcs->duplex = DUPLEX_HALF;
-}
-
-void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
- struct phylink_link_state *state)
-{
- struct felix *felix = ocelot_to_felix(ocelot);
- struct phy_device *pcs = felix->pcs[port];
-
- if (!pcs)
- return;
-
- pcs->speed = SPEED_UNKNOWN;
- pcs->duplex = DUPLEX_UNKNOWN;
- pcs->pause = 0;
- pcs->asym_pause = 0;
-
- switch (pcs->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- vsc9959_pcs_link_state_sgmii(pcs, state);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- vsc9959_pcs_link_state_2500basex(pcs, state);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- vsc9959_pcs_link_state_usxgmii(pcs, state);
- break;
- default:
- return;
- }
-
- vsc9959_pcs_link_state_resolve(pcs, state);
-}
-
static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -1182,6 +1009,8 @@ static u16 vsc9959_wm_enc(u16 value)
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
.wm_enc = vsc9959_wm_enc,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
};
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
@@ -1197,7 +1026,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
- sizeof(struct phy_device *),
+ sizeof(struct lynx_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
@@ -1248,18 +1077,26 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct phy_device *pcs;
- bool is_c45 = false;
+ struct mdio_device *pcs;
+ struct lynx_pcs *lynx;
- if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_USXGMII)
- is_c45 = true;
+ if (dsa_is_unused_port(felix->ds, port))
+ continue;
- pcs = get_phy_device(felix->imdio, port, is_c45);
+ if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
+ continue;
+
+ pcs = mdio_device_create(felix->imdio, port);
if (IS_ERR(pcs))
continue;
- pcs->interface = ocelot_port->phy_mode;
- felix->pcs[port] = pcs;
+ lynx = lynx_pcs_create(pcs);
+ if (!lynx) {
+ mdio_device_free(pcs);
+ continue;
+ }
+
+ felix->pcs[port] = lynx;
dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
}
@@ -1267,18 +1104,19 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return 0;
}
-void vsc9959_mdio_bus_free(struct ocelot *ocelot)
+static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct phy_device *pcs = felix->pcs[port];
+ struct lynx_pcs *pcs = felix->pcs[port];
if (!pcs)
continue;
- put_device(&pcs->mdio.dev);
+ mdio_device_free(pcs->mdio);
+ lynx_pcs_destroy(pcs);
}
mdiobus_unregister(felix->imdio);
}
@@ -1488,6 +1326,8 @@ static void vsc9959_xmit_template_populate(struct ocelot *ocelot, int port)
struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 *template = ocelot_port->xmit_template;
u64 bypass, dest, src;
+ __be32 *prefix;
+ u8 *injection;
/* Set the source port as the CPU port module and not the
* NPI port
@@ -1496,9 +1336,14 @@ static void vsc9959_xmit_template_populate(struct ocelot *ocelot, int port)
dest = BIT(port);
bypass = true;
- packing(template, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
- packing(template, &dest, 68, 56, OCELOT_TAG_LEN, PACK, 0);
- packing(template, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+ injection = template + OCELOT_SHORT_PREFIX_LEN;
+ prefix = (__be32 *)template;
+
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &dest, 68, 56, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+
+ *prefix = cpu_to_be32(0x8880000a);
}
static const struct felix_info felix_info_vsc9959 = {
@@ -1510,8 +1355,6 @@ static const struct felix_info felix_info_vsc9959 = {
.ops = &vsc9959_ops,
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
- .vcap_is2_keys = vsc9959_vcap_is2_keys,
- .vcap_is2_actions = vsc9959_vcap_is2_actions,
.vcap = vsc9959_vcap_props,
.shared_queue_sz = 128 * 1024,
.num_mact_rows = 2048,
@@ -1519,15 +1362,13 @@ static const struct felix_info felix_info_vsc9959 = {
.num_tx_queues = FELIX_NUM_TC,
.switch_pci_bar = 4,
.imdio_pci_bar = 0,
+ .ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
- .pcs_config = vsc9959_pcs_config,
- .pcs_link_up = vsc9959_pcs_link_up,
- .pcs_link_state = vsc9959_pcs_link_state,
.phylink_validate = vsc9959_phylink_validate,
.prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
- .port_setup_tc = vsc9959_port_setup_tc,
- .port_sched_speed_set = vsc9959_sched_speed_set,
+ .port_setup_tc = vsc9959_port_setup_tc,
+ .port_sched_speed_set = vsc9959_sched_speed_set,
.xmit_template_populate = vsc9959_xmit_template_populate,
};
@@ -1663,9 +1504,13 @@ static struct pci_device_id felix_ids[] = {
};
MODULE_DEVICE_TABLE(pci, felix_ids);
-struct pci_driver felix_vsc9959_pci_driver = {
+static struct pci_driver felix_vsc9959_pci_driver = {
.name = "mscc_felix",
.id_table = felix_ids,
.probe = felix_pci_probe,
.remove = felix_pci_remove,
};
+module_pci_driver(felix_vsc9959_pci_driver);
+
+MODULE_DESCRIPTION("Felix Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 9e9fd19e1d00..1d420c4a2f0f 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -7,31 +7,17 @@
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/of_platform.h>
+#include <linux/pcs-lynx.h>
#include <linux/packing.h>
#include <linux/iopoll.h>
#include "felix.h"
-#define VSC9953_VCAP_IS2_CNT 1024
-#define VSC9953_VCAP_IS2_ENTRY_WIDTH 376
-#define VSC9953_VCAP_PORT_CNT 10
-
-#define MSCC_MIIM_REG_STATUS 0x0
-#define MSCC_MIIM_STATUS_STAT_BUSY BIT(3)
-#define MSCC_MIIM_REG_CMD 0x8
-#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
-#define MSCC_MIIM_CMD_OPR_READ BIT(2)
-#define MSCC_MIIM_CMD_WRDATA_SHIFT 4
-#define MSCC_MIIM_CMD_REGAD_SHIFT 20
-#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
-#define MSCC_MIIM_CMD_VLD BIT(31)
-#define MSCC_MIIM_REG_DATA 0xC
-#define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17))
-
-#define MSCC_PHY_REG_PHY_CFG 0x0
-#define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-#define PHY_CFG_PHY_COMMON_RESET BIT(4)
-#define PHY_CFG_PHY_RESET (BIT(5) | BIT(6) | BIT(7) | BIT(8))
-#define MSCC_PHY_REG_PHY_STATUS 0x4
+#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
+#define MSCC_MIIM_CMD_OPR_READ BIT(2)
+#define MSCC_MIIM_CMD_WRDATA_SHIFT 4
+#define MSCC_MIIM_CMD_REGAD_SHIFT 20
+#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
+#define MSCC_MIIM_CMD_VLD BIT(31)
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
@@ -150,14 +136,27 @@ static const u32 vsc9953_qs_regmap[] = {
REG_RESERVED(QS_INH_DBG),
};
-static const u32 vsc9953_s2_regmap[] = {
- REG(S2_CORE_UPDATE_CTRL, 0x000000),
- REG(S2_CORE_MV_CFG, 0x000004),
- REG(S2_CACHE_ENTRY_DAT, 0x000008),
- REG(S2_CACHE_MASK_DAT, 0x000108),
- REG(S2_CACHE_ACTION_DAT, 0x000208),
- REG(S2_CACHE_CNT_DAT, 0x000308),
- REG(S2_CACHE_TG_DAT, 0x000388),
+static const u32 vsc9953_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG_RESERVED(VCAP_CONST_CORE_CNT),
+ REG_RESERVED(VCAP_CONST_IF_CNT),
};
static const u32 vsc9953_qsys_regmap[] = {
@@ -362,7 +361,9 @@ static const u32 *vsc9953_regmap[TARGET_MAX] = {
[QSYS] = vsc9953_qsys_regmap,
[REW] = vsc9953_rew_regmap,
[SYS] = vsc9953_sys_regmap,
- [S2] = vsc9953_s2_regmap,
+ [S0] = vsc9953_vcap_regmap,
+ [S1] = vsc9953_vcap_regmap,
+ [S2] = vsc9953_vcap_regmap,
[GCB] = vsc9953_gcb_regmap,
[DEV_GMII] = vsc9953_dev_gmii_regmap,
};
@@ -394,6 +395,16 @@ static const struct resource vsc9953_target_io_res[TARGET_MAX] = {
.end = 0x001ffff,
.name = "sys",
},
+ [S0] = {
+ .start = 0x0040000,
+ .end = 0x00403ff,
+ .name = "s0",
+ },
+ [S1] = {
+ .start = 0x0050000,
+ .end = 0x00503ff,
+ .name = "s1",
+ },
[S2] = {
.start = 0x0060000,
.end = 0x00603ff,
@@ -609,6 +620,112 @@ static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
{ .offset = 0x91, .name = "drop_green_prio_7", },
};
+static const struct vcap_field vsc9953_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 4},
+ [VCAP_ES0_IGR_PORT] = { 4, 4},
+ [VCAP_ES0_RSV] = { 8, 2},
+ [VCAP_ES0_L2_MC] = { 10, 1},
+ [VCAP_ES0_L2_BC] = { 11, 1},
+ [VCAP_ES0_VID] = { 12, 12},
+ [VCAP_ES0_DP] = { 24, 1},
+ [VCAP_ES0_PCP] = { 25, 3},
+};
+
+static const struct vcap_field vsc9953_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
+ [VCAP_ES0_ACT_RSV] = { 49, 24},
+ [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1},
+};
+
+static const struct vcap_field vsc9953_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1},
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2},
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 11},
+ [VCAP_IS1_HK_RSV] = { 14, 10},
+ /* VCAP_IS1_HK_OAM_Y1731 not supported */
+ [VCAP_IS1_HK_L2_MC] = { 24, 1},
+ [VCAP_IS1_HK_L2_BC] = { 25, 1},
+ [VCAP_IS1_HK_IP_MC] = { 26, 1},
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 27, 1},
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 28, 1},
+ [VCAP_IS1_HK_TPID] = { 29, 1},
+ [VCAP_IS1_HK_VID] = { 30, 12},
+ [VCAP_IS1_HK_DEI] = { 42, 1},
+ [VCAP_IS1_HK_PCP] = { 43, 3},
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 46, 48},
+ [VCAP_IS1_HK_ETYPE_LEN] = { 94, 1},
+ [VCAP_IS1_HK_ETYPE] = { 95, 16},
+ [VCAP_IS1_HK_IP_SNAP] = {111, 1},
+ [VCAP_IS1_HK_IP4] = {112, 1},
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = {113, 1},
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {114, 1},
+ [VCAP_IS1_HK_L3_OPTIONS] = {115, 1},
+ [VCAP_IS1_HK_L3_DSCP] = {116, 6},
+ [VCAP_IS1_HK_L3_IP4_SIP] = {122, 32},
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = {154, 1},
+ [VCAP_IS1_HK_TCP] = {155, 1},
+ [VCAP_IS1_HK_L4_SPORT] = {156, 16},
+ [VCAP_IS1_HK_L4_RNG] = {172, 8},
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 46, 1},
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 47, 12},
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 59, 1},
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 60, 3},
+ [VCAP_IS1_HK_IP4_IP4] = { 63, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 64, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 65, 1},
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 66, 1},
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 67, 6},
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 73, 32},
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {105, 32},
+ [VCAP_IS1_HK_IP4_L3_PROTO] = {137, 8},
+ [VCAP_IS1_HK_IP4_TCP_UDP] = {145, 1},
+ [VCAP_IS1_HK_IP4_TCP] = {146, 1},
+ [VCAP_IS1_HK_IP4_L4_RNG] = {147, 8},
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {155, 32},
+};
+
+static const struct vcap_field vsc9953_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
+ [VCAP_IS1_ACT_RSV] = { 29, 11},
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 40, 1},
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 41, 12},
+ [VCAP_IS1_ACT_FID_SEL] = { 53, 2},
+ [VCAP_IS1_ACT_FID_VAL] = { 55, 13},
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 68, 1},
+ [VCAP_IS1_ACT_PCP_VAL] = { 69, 3},
+ [VCAP_IS1_ACT_DEI_VAL] = { 72, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 73, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 74, 2},
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 76, 4},
+ [VCAP_IS1_ACT_HIT_STICKY] = { 80, 1},
+};
+
static struct vcap_field vsc9953_vcap_is2_keys[] = {
/* Common: 41 bits */
[VCAP_IS2_TYPE] = { 0, 4},
@@ -694,15 +811,32 @@ static struct vcap_field vsc9953_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_CNT] = { 50, 32},
};
-static const struct vcap_props vsc9953_vcap_props[] = {
+static struct vcap_props vsc9953_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 73, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc9953_vcap_es0_keys,
+ .actions = vsc9953_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 80, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc9953_vcap_is1_keys,
+ .actions = vsc9953_vcap_is1_actions,
+ },
[VCAP_IS2] = {
- .tg_width = 2,
- .sw_count = 4,
- .entry_count = VSC9953_VCAP_IS2_CNT,
- .entry_width = VSC9953_VCAP_IS2_ENTRY_WIDTH,
- .action_count = VSC9953_VCAP_IS2_CNT +
- VSC9953_VCAP_PORT_CNT + 2,
- .action_width = 101,
.action_type_width = 1,
.action_table = {
[IS2_ACTION_TYPE_NORMAL] = {
@@ -714,8 +848,9 @@ static const struct vcap_props vsc9953_vcap_props[] = {
.count = 4
},
},
- .counter_words = 4,
- .counter_width = 32,
+ .target = S2,
+ .keys = vsc9953_vcap_is2_keys,
+ .actions = vsc9953_vcap_is2_actions,
},
};
@@ -819,6 +954,10 @@ out:
return err;
}
+/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
+ * MEM_INIT is in SYS:SYSTEM:RESET_CFG
+ * MEM_ENA is in SYS:SYSTEM:RESET_CFG
+ */
static int vsc9953_reset(struct ocelot *ocelot)
{
int val, err;
@@ -834,8 +973,8 @@ static int vsc9953_reset(struct ocelot *ocelot)
}
/* initialize switch mem ~40us */
- ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_INIT, 1);
ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1);
+ ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_INIT, 1);
err = readx_poll_timeout(vsc9953_sys_ram_init_status, ocelot, val, !val,
VSC9953_SYS_RAMINIT_SLEEP,
@@ -846,7 +985,6 @@ static int vsc9953_reset(struct ocelot *ocelot)
}
/* enable switch core */
- ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1);
ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1);
return 0;
@@ -922,6 +1060,8 @@ static u16 vsc9953_wm_enc(u16 value)
static const struct ocelot_ops vsc9953_ops = {
.reset = vsc9953_reset,
.wm_enc = vsc9953_wm_enc,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
};
static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
@@ -962,18 +1102,27 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct phy_device *pcs;
int addr = port + 4;
+ struct mdio_device *pcs;
+ struct lynx_pcs *lynx;
+
+ if (dsa_is_unused_port(felix->ds, port))
+ continue;
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
- pcs = get_phy_device(felix->imdio, addr, false);
+ pcs = mdio_device_create(felix->imdio, addr);
if (IS_ERR(pcs))
continue;
- pcs->interface = ocelot_port->phy_mode;
- felix->pcs[port] = pcs;
+ lynx = lynx_pcs_create(pcs);
+ if (!lynx) {
+ mdio_device_free(pcs);
+ continue;
+ }
+
+ felix->pcs[port] = lynx;
dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
}
@@ -981,11 +1130,30 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
return 0;
}
+static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct lynx_pcs *pcs = felix->pcs[port];
+
+ if (!pcs)
+ continue;
+
+ mdio_device_free(pcs->mdio);
+ lynx_pcs_destroy(pcs);
+ }
+ mdiobus_unregister(felix->imdio);
+}
+
static void vsc9953_xmit_template_populate(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 *template = ocelot_port->xmit_template;
u64 bypass, dest, src;
+ __be32 *prefix;
+ u8 *injection;
/* Set the source port as the CPU port module and not the
* NPI port
@@ -994,9 +1162,14 @@ static void vsc9953_xmit_template_populate(struct ocelot *ocelot, int port)
dest = BIT(port);
bypass = true;
- packing(template, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
- packing(template, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0);
- packing(template, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+ injection = template + OCELOT_SHORT_PREFIX_LEN;
+ prefix = (__be32 *)template;
+
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+
+ *prefix = cpu_to_be32(0x88800005);
}
static const struct felix_info seville_info_vsc9953 = {
@@ -1007,17 +1180,12 @@ static const struct felix_info seville_info_vsc9953 = {
.ops = &vsc9953_ops,
.stats_layout = vsc9953_stats_layout,
.num_stats = ARRAY_SIZE(vsc9953_stats_layout),
- .vcap_is2_keys = vsc9953_vcap_is2_keys,
- .vcap_is2_actions = vsc9953_vcap_is2_actions,
.vcap = vsc9953_vcap_props,
- .shared_queue_sz = 2048 * 1024,
+ .shared_queue_sz = 256 * 1024,
.num_mact_rows = 2048,
.num_ports = 10,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
- .mdio_bus_free = vsc9959_mdio_bus_free,
- .pcs_config = vsc9959_pcs_config,
- .pcs_link_up = vsc9959_pcs_link_up,
- .pcs_link_state = vsc9959_pcs_link_state,
+ .mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
.prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
.xmit_template_populate = vsc9953_xmit_template_populate,
@@ -1096,7 +1264,7 @@ static const struct of_device_id seville_of_match[] = {
};
MODULE_DEVICE_TABLE(of, seville_of_match);
-struct platform_driver seville_vsc9953_driver = {
+static struct platform_driver seville_vsc9953_driver = {
.probe = seville_probe,
.remove = seville_remove,
.driver = {
@@ -1104,3 +1272,7 @@ struct platform_driver seville_vsc9953_driver = {
.of_match_table = of_match_ptr(seville_of_match),
},
};
+module_platform_driver(seville_vsc9953_driver);
+
+MODULE_DESCRIPTION("Seville Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index f1e484477e35..5bdac669a339 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1219,8 +1219,8 @@ qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
priv->port_mtu[port] = new_mtu;
for (i = 0; i < QCA8K_NUM_PORTS; i++)
- if (priv->port_mtu[port] > mtu)
- mtu = priv->port_mtu[port];
+ if (priv->port_mtu[i] > mtu)
+ mtu = priv->port_mtu[i];
/* Include L2 header / FCS length */
qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
@@ -1294,10 +1294,14 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int
-qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct switchdev_trans *trans)
{
struct qca8k_priv *priv = ds->priv;
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
if (vlan_filtering) {
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE,
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index fae188c60191..8e49d4f85d48 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -394,9 +394,10 @@ static int realtek_smi_probe(struct platform_device *pdev)
var = of_device_get_match_data(dev);
np = dev->of_node;
- smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL);
+ smi = devm_kzalloc(dev, sizeof(*smi) + var->chip_data_sz, GFP_KERNEL);
if (!smi)
return -ENOMEM;
+ smi->chip_data = (void *)smi + sizeof(*smi);
smi->map = devm_regmap_init(dev, NULL, smi,
&realtek_smi_mdio_regmap_config);
if (IS_ERR(smi->map)) {
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
index 9a63b51e1d82..6b6a3dec0984 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek-smi-core.h
@@ -25,6 +25,9 @@ struct rtl8366_mib_counter {
const char *name;
};
+/**
+ * struct rtl8366_vlan_mc - Virtual LAN member configuration
+ */
struct rtl8366_vlan_mc {
u16 vid;
u16 untag;
@@ -68,6 +71,7 @@ struct realtek_smi {
int vlan4k_enabled;
char buf[4096];
+ void *chip_data; /* Per-chip extra variant data */
};
/**
@@ -108,6 +112,7 @@ struct realtek_smi_variant {
unsigned int clk_delay;
u8 cmd_read;
u8 cmd_write;
+ size_t chip_data_sz;
};
/* SMI core calls */
@@ -119,7 +124,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi);
int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
u32 untag, u32 fid);
-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
unsigned int vid);
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
@@ -127,7 +131,8 @@ int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
int rtl8366_reset_vlan(struct realtek_smi *smi);
int rtl8366_init_vlan(struct realtek_smi *smi);
int rtl8366_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering);
+ bool vlan_filtering,
+ struct switchdev_trans *trans);
int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
void rtl8366_vlan_add(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index a8c5a934c3d3..307466b90489 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -36,12 +36,113 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
}
EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+/**
+ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
+ * @smi: the Realtek SMI device instance
+ * @vid: the VLAN ID to look up or allocate
+ * @vlanmc: the pointer will be assigned to a pointer to a valid member config
+ * if successful
+ * @return: index of a new member config or negative error number
+ */
+static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
+ struct rtl8366_vlan_mc *vlanmc)
+{
+ struct rtl8366_vlan_4k vlan4k;
+ int ret;
+ int i;
+
+ /* Try to find an existing member config entry for this VID */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ if (vid == vlanmc->vid)
+ return i;
+ }
+
+ /* We have no MC entry for this VID, try to find an empty one */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ if (vlanmc->vid == 0 && vlanmc->member == 0) {
+ /* Update the entry from the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret) {
+ dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ vlanmc->vid = vid;
+ vlanmc->member = vlan4k.member;
+ vlanmc->untag = vlan4k.untag;
+ vlanmc->fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
+ i, vid);
+ return i;
+ }
+ }
+
+ /* MC table is full, try to find an unused entry and replace it */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ int used;
+
+ ret = rtl8366_mc_is_used(smi, i, &used);
+ if (ret)
+ return ret;
+
+ if (!used) {
+ /* Update the entry from the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlanmc->vid = vid;
+ vlanmc->member = vlan4k.member;
+ vlanmc->untag = vlan4k.untag;
+ vlanmc->fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+ dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
+ i, vid);
+ return i;
+ }
+ }
+
+ dev_err(smi->dev, "all VLAN member configurations are in use\n");
+ return -ENOSPC;
+}
+
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
u32 untag, u32 fid)
{
+ struct rtl8366_vlan_mc vlanmc;
struct rtl8366_vlan_4k vlan4k;
+ int mc;
int ret;
- int i;
+
+ if (!smi->ops->is_vlan_valid(smi, vid))
+ return -EINVAL;
dev_dbg(smi->dev,
"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
@@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, vlan4k.member, vlan4k.untag);
- /* Try to find an existing MC entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- struct rtl8366_vlan_mc vlanmc;
-
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- if (vid == vlanmc.vid) {
- /* update the MC entry */
- vlanmc.member |= member;
- vlanmc.untag |= untag;
- vlanmc.fid = fid;
-
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ /* Find or allocate a member config for this VID */
+ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ if (ret < 0)
+ return ret;
+ mc = ret;
- dev_dbg(smi->dev,
- "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
- vid, vlanmc.member, vlanmc.untag);
+ /* Update the MC entry */
+ vlanmc.member |= member;
+ vlanmc.untag |= untag;
+ vlanmc.fid = fid;
- break;
- }
- }
+ /* Commit updates to the MC entry */
+ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
+ if (ret)
+ dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
+ mc, vid);
+ else
+ dev_dbg(smi->dev,
+ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
+ vid, vlanmc.member, vlanmc.untag);
return ret;
}
EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
-{
- struct rtl8366_vlan_mc vlanmc;
- int ret;
- int index;
-
- ret = smi->ops->get_mc_index(smi, port, &index);
- if (ret)
- return ret;
-
- ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
- if (ret)
- return ret;
-
- *val = vlanmc.vid;
- return 0;
-}
-EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
-
int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
unsigned int vid)
{
struct rtl8366_vlan_mc vlanmc;
- struct rtl8366_vlan_4k vlan4k;
+ int mc;
int ret;
- int i;
-
- /* Try to find an existing MC entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- if (vid == vlanmc.vid) {
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
- }
- }
-
- /* We have no MC entry for this VID, try to find an empty one */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- if (vlanmc.vid == 0 && vlanmc.member == 0) {
- /* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
- if (ret)
- return ret;
-
- vlanmc.vid = vid;
- vlanmc.member = vlan4k.member;
- vlanmc.untag = vlan4k.untag;
- vlanmc.fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
- }
- }
-
- /* MC table is full, try to find an unused entry and replace it */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- int used;
-
- ret = rtl8366_mc_is_used(smi, i, &used);
- if (ret)
- return ret;
- if (!used) {
- /* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
- if (ret)
- return ret;
+ if (!smi->ops->is_vlan_valid(smi, vid))
+ return -EINVAL;
- vlanmc.vid = vid;
- vlanmc.member = vlan4k.member;
- vlanmc.untag = vlan4k.untag;
- vlanmc.fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
+ /* Find or allocate a member config for this VID */
+ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ if (ret < 0)
+ return ret;
+ mc = ret;
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
- }
+ ret = smi->ops->set_mc_index(smi, port, mc);
+ if (ret) {
+ dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
+ mc, port);
+ return ret;
}
- dev_err(smi->dev,
- "all VLAN member configurations are in use\n");
+ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
+ port, vid, mc);
- return -ENOSPC;
+ return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
@@ -314,15 +340,20 @@ int rtl8366_init_vlan(struct realtek_smi *smi)
}
EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct switchdev_trans *trans)
{
struct realtek_smi *smi = ds->priv;
struct rtl8366_vlan_4k vlan4k;
int ret;
/* Use VLAN nr port + 1 since VLAN0 is not valid */
- if (!smi->ops->is_vlan_valid(smi, port + 1))
- return -EINVAL;
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (!smi->ops->is_vlan_valid(smi, port + 1))
+ return -EINVAL;
+
+ return 0;
+ }
dev_info(smi->dev, "%s filtering on port %d\n",
vlan_filtering ? "enable" : "disable",
@@ -389,7 +420,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (!smi->ops->is_vlan_valid(smi, vid))
return;
- dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
+ dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ vlan->vid_begin,
port,
untagged ? "untagged" : "tagged",
pvid ? " PVID" : "no PVID");
@@ -398,34 +430,29 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
dev_err(smi->dev, "port is DSA or CPU port\n");
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int pvid_val = 0;
-
- dev_info(smi->dev, "add VLAN %04x\n", vid);
member |= BIT(port);
if (untagged)
untag |= BIT(port);
- /* To ensure that we have a valid MC entry for this VLAN,
- * initialize the port VLAN ID here.
- */
- ret = rtl8366_get_pvid(smi, port, &pvid_val);
- if (ret < 0) {
- dev_err(smi->dev, "could not lookup PVID for port %d\n",
- port);
- return;
- }
- if (pvid_val == 0) {
- ret = rtl8366_set_pvid(smi, port, vid);
- if (ret < 0)
- return;
- }
-
ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
if (ret)
dev_err(smi->dev,
"failed to set up VLAN %04x",
vid);
+
+ if (!pvid)
+ continue;
+
+ ret = rtl8366_set_pvid(smi, port, vid);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to set PVID on port %d to VLAN %04x",
+ port, vid);
+
+ if (!ret)
+ dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
+ vid, port);
}
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index 48f1ff746799..cfe56960f44b 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -35,7 +35,7 @@
#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0)
#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1)
#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2)
-#define RTL8366RB_SGCR_MAX_LENGTH_9216 RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_MAX_LENGTH_16000 RTL8366RB_SGCR_MAX_LENGTH(0x3)
#define RTL8366RB_SGCR_EN_VLAN BIT(13)
#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14)
@@ -311,6 +311,14 @@
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
+/**
+ * struct rtl8366rb - RTL8366RB-specific data
+ * @max_mtu: per-port max MTU setting
+ */
+struct rtl8366rb {
+ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+};
+
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 0, 4, "IfInOctets" },
{ 0, 4, 4, "EtherStatsOctets" },
@@ -712,6 +720,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_smi *smi = ds->priv;
const u16 *jam_table;
+ struct rtl8366rb *rb;
u32 chip_ver = 0;
u32 chip_id = 0;
int jam_size;
@@ -719,6 +728,8 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
int ret;
int i;
+ rb = smi->chip_data;
+
ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
if (ret) {
dev_err(smi->dev, "unable to read chip id\n");
@@ -868,6 +879,9 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
RTL8366RB_SGCR_MAX_LENGTH_1536);
if (ret)
return ret;
+ for (i = 0; i < RTL8366RB_NUM_PORTS; i++)
+ /* layer 2 size, see rtl8366rb_change_mtu() */
+ rb->max_mtu[i] = 1532;
/* Enable learning for all ports */
ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0);
@@ -969,8 +983,10 @@ static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_RTL4_A;
}
-static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void
+rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phydev,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
struct realtek_smi *smi = ds->priv;
int ret;
@@ -978,25 +994,52 @@ static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port,
if (port != smi->cpu_port)
return;
- dev_info(smi->dev, "adjust link on CPU port (%d)\n", port);
+ dev_dbg(smi->dev, "MAC link up on CPU port (%d)\n", port);
/* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
BIT(port), BIT(port));
- if (ret)
+ if (ret) {
+ dev_err(smi->dev, "failed to force 1Gbit on CPU port\n");
return;
+ }
ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
0xFF00U,
RTL8366RB_PAACR_CPU_PORT << 8);
- if (ret)
+ if (ret) {
+ dev_err(smi->dev, "failed to set PAACR on CPU port\n");
return;
+ }
/* Enable the CPU port */
ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
0);
- if (ret)
+ if (ret) {
+ dev_err(smi->dev, "failed to enable the CPU port\n");
return;
+ }
+}
+
+static void
+rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (port != smi->cpu_port)
+ return;
+
+ dev_dbg(smi->dev, "MAC link down on CPU port (%d)\n", port);
+
+ /* Disable the CPU port */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ BIT(port));
+ if (ret) {
+ dev_err(smi->dev, "failed to disable the CPU port\n");
+ return;
+ }
}
static void rb8366rb_set_port_led(struct realtek_smi *smi,
@@ -1077,6 +1120,56 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
rb8366rb_set_port_led(smi, port, false);
}
+static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8366rb *rb;
+ unsigned int max_mtu;
+ u32 len;
+ int i;
+
+ /* Cache the per-port MTU setting */
+ rb = smi->chip_data;
+ rb->max_mtu[port] = new_mtu;
+
+ /* Roof out the MTU for the entire switch to the greatest
+ * common denominator: the biggest set for any one port will
+ * be the biggest MTU for the switch.
+ *
+ * The first setting, 1522 bytes, is max IP packet 1500 bytes,
+ * plus ethernet header, 1518 bytes, plus CPU tag, 4 bytes.
+ * This function should consider the parameter an SDU, so the
+ * MTU passed for this setting is 1518 bytes. The same logic
+ * of subtracting the DSA tag of 4 bytes apply to the other
+ * settings.
+ */
+ max_mtu = 1518;
+ for (i = 0; i < RTL8366RB_NUM_PORTS; i++) {
+ if (rb->max_mtu[i] > max_mtu)
+ max_mtu = rb->max_mtu[i];
+ }
+ if (max_mtu <= 1518)
+ len = RTL8366RB_SGCR_MAX_LENGTH_1522;
+ else if (max_mtu > 1518 && max_mtu <= 1532)
+ len = RTL8366RB_SGCR_MAX_LENGTH_1536;
+ else if (max_mtu > 1532 && max_mtu <= 1548)
+ len = RTL8366RB_SGCR_MAX_LENGTH_1552;
+ else
+ len = RTL8366RB_SGCR_MAX_LENGTH_16000;
+
+ return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ RTL8366RB_SGCR_MAX_LENGTH_MASK,
+ len);
+}
+
+static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* The max MTU is 16000 bytes, so we subtract the CPU tag
+ * and the max presented to the system is 15996 bytes.
+ */
+ return 15996;
+}
+
static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
struct rtl8366_vlan_4k *vlan4k)
{
@@ -1255,7 +1348,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
if (smi->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
- if (vlan == 0 || vlan >= max)
+ if (vlan == 0 || vlan > max)
return false;
return true;
@@ -1405,7 +1498,8 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
- .adjust_link = rtl8366rb_adjust_link,
+ .phylink_mac_link_up = rtl8366rb_mac_link_up,
+ .phylink_mac_link_down = rtl8366rb_mac_link_down,
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
@@ -1415,6 +1509,8 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.port_vlan_del = rtl8366_vlan_del,
.port_enable = rtl8366rb_port_enable,
.port_disable = rtl8366rb_port_disable,
+ .port_change_mtu = rtl8366rb_change_mtu,
+ .port_max_mtu = rtl8366rb_max_mtu,
};
static const struct realtek_smi_ops rtl8366rb_smi_ops = {
@@ -1439,5 +1535,6 @@ const struct realtek_smi_variant rtl8366rb_variant = {
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
+ .chip_data_sz = sizeof(struct rtl8366rb),
};
EXPORT_SYMBOL_GPL(rtl8366rb_variant);
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
index c88e56a29db8..a860e3a910be 100644
--- a/drivers/net/dsa/sja1105/Makefile
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -6,6 +6,7 @@ sja1105-objs := \
sja1105_main.o \
sja1105_flower.o \
sja1105_ethtool.o \
+ sja1105_devlink.o \
sja1105_clocking.o \
sja1105_static_config.o \
sja1105_dynamic_config.o \
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index ba70b40a9a95..4ebc4a5a7b35 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -210,15 +210,15 @@ struct sja1105_private {
struct dsa_switch *ds;
struct list_head dsa_8021q_vlans;
struct list_head bridge_vlans;
- struct list_head crosschip_links;
struct sja1105_flow_block flow_block;
struct sja1105_port ports[SJA1105_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
- bool expect_dsa_8021q;
+ struct dsa_8021q_context *dsa_8021q_ctx;
enum sja1105_vlan_state vlan_state;
+ struct devlink_region **regions;
struct sja1105_cbs_entry *cbs;
struct sja1105_tagger_data tagger_data;
struct sja1105_ptp_data ptp_data;
@@ -245,9 +245,21 @@ enum sja1105_reset_reason {
int sja1105_static_config_reload(struct sja1105_private *priv,
enum sja1105_reset_reason reason);
-
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct switchdev_trans *trans);
void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
+/* From sja1105_devlink.c */
+int sja1105_devlink_setup(struct dsa_switch *ds);
+void sja1105_devlink_teardown(struct dsa_switch *ds);
+int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int sja1105_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+
/* From sja1105_spi.c */
int sja1105_xfer_buf(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr,
@@ -258,6 +270,8 @@ int sja1105_xfer_u32(const struct sja1105_private *priv,
int sja1105_xfer_u64(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
struct ptp_system_timestamp *ptp_sts);
+int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len);
int sja1105_static_config_upload(struct sja1105_private *priv);
int sja1105_inhibit_tx(const struct sja1105_private *priv,
unsigned long port_bitmap, bool tx_inhibited);
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
new file mode 100644
index 000000000000..4a2ec395bcb0
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ * Copyright 2020 NXP Semiconductors
+ */
+#include "sja1105.h"
+
+/* Since devlink regions have a fixed size and the static config has a variable
+ * size, we need to calculate the maximum possible static config size by
+ * creating a dummy config with all table entries populated to the max, and get
+ * its packed length. This is done dynamically as opposed to simply hardcoding
+ * a number, since currently not all static config tables are implemented, so
+ * we are avoiding a possible code desynchronization.
+ */
+static size_t sja1105_static_config_get_max_size(struct sja1105_private *priv)
+{
+ struct sja1105_static_config config;
+ enum sja1105_blk_idx blk_idx;
+ int rc;
+
+ rc = sja1105_static_config_init(&config,
+ priv->info->static_ops,
+ priv->info->device_id);
+ if (rc)
+ return 0;
+
+ for (blk_idx = 0; blk_idx < BLK_IDX_MAX; blk_idx++) {
+ struct sja1105_table *table = &config.tables[blk_idx];
+
+ table->entry_count = table->ops->max_entry_count;
+ }
+
+ return sja1105_static_config_get_length(&config);
+}
+
+static int
+sja1105_region_static_config_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct sja1105_private *priv = ds->priv;
+ size_t max_len, len;
+
+ len = sja1105_static_config_get_length(&priv->static_config);
+ max_len = sja1105_static_config_get_max_size(priv);
+
+ *data = kcalloc(max_len, sizeof(u8), GFP_KERNEL);
+ if (!*data)
+ return -ENOMEM;
+
+ return static_config_buf_prepare_for_upload(priv, *data, len);
+}
+
+static struct devlink_region_ops sja1105_region_static_config_ops = {
+ .name = "static-config",
+ .snapshot = sja1105_region_static_config_snapshot,
+ .destructor = kfree,
+};
+
+enum sja1105_region_id {
+ SJA1105_REGION_STATIC_CONFIG = 0,
+};
+
+struct sja1105_region {
+ const struct devlink_region_ops *ops;
+ size_t (*get_size)(struct sja1105_private *priv);
+};
+
+static struct sja1105_region sja1105_regions[] = {
+ [SJA1105_REGION_STATIC_CONFIG] = {
+ .ops = &sja1105_region_static_config_ops,
+ .get_size = sja1105_static_config_get_max_size,
+ },
+};
+
+static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
+{
+ int i, num_regions = ARRAY_SIZE(sja1105_regions);
+ struct sja1105_private *priv = ds->priv;
+ const struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+
+ priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *),
+ GFP_KERNEL);
+ if (!priv->regions)
+ return -ENOMEM;
+
+ for (i = 0; i < num_regions; i++) {
+ size = sja1105_regions[i].get_size(priv);
+ ops = sja1105_regions[i].ops;
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region)) {
+ while (i-- >= 0)
+ dsa_devlink_region_destroy(priv->regions[i]);
+ return PTR_ERR(region);
+ }
+
+ priv->regions[i] = region;
+ }
+
+ return 0;
+}
+
+static void sja1105_teardown_devlink_regions(struct dsa_switch *ds)
+{
+ int i, num_regions = ARRAY_SIZE(sja1105_regions);
+ struct sja1105_private *priv = ds->priv;
+
+ for (i = 0; i < num_regions; i++)
+ dsa_devlink_region_destroy(priv->regions[i]);
+
+ kfree(priv->regions);
+}
+
+static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv,
+ bool *be_vlan)
+{
+ *be_vlan = priv->best_effort_vlan_filtering;
+
+ return 0;
+}
+
+static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
+ bool be_vlan)
+{
+ struct dsa_switch *ds = priv->ds;
+ bool vlan_filtering;
+ int port;
+ int rc;
+
+ priv->best_effort_vlan_filtering = be_vlan;
+
+ rtnl_lock();
+ for (port = 0; port < ds->num_ports; port++) {
+ struct switchdev_trans trans;
+ struct dsa_port *dp;
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ dp = dsa_to_port(ds, port);
+ vlan_filtering = dsa_port_is_vlan_filtering(dp);
+
+ trans.ph_prepare = true;
+ rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
+ if (rc)
+ break;
+
+ trans.ph_prepare = false;
+ rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
+ if (rc)
+ break;
+ }
+ rtnl_unlock();
+
+ return rc;
+}
+
+enum sja1105_devlink_param_id {
+ SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
+};
+
+int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct sja1105_private *priv = ds->priv;
+ int err;
+
+ switch (id) {
+ case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
+ err = sja1105_best_effort_vlan_filtering_get(priv,
+ &ctx->val.vbool);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct sja1105_private *priv = ds->priv;
+ int err;
+
+ switch (id) {
+ case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
+ err = sja1105_best_effort_vlan_filtering_set(priv,
+ ctx->val.vbool);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static const struct devlink_param sja1105_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
+ "best_effort_vlan_filtering",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+static int sja1105_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, sja1105_devlink_params,
+ ARRAY_SIZE(sja1105_devlink_params));
+}
+
+static void sja1105_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, sja1105_devlink_params,
+ ARRAY_SIZE(sja1105_devlink_params));
+}
+
+int sja1105_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = devlink_info_driver_name_put(req, "sja1105");
+ if (rc)
+ return rc;
+
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ priv->info->name);
+ return rc;
+}
+
+int sja1105_devlink_setup(struct dsa_switch *ds)
+{
+ int rc;
+
+ rc = sja1105_setup_devlink_params(ds);
+ if (rc)
+ return rc;
+
+ rc = sja1105_setup_devlink_regions(ds);
+ if (rc < 0) {
+ sja1105_teardown_devlink_params(ds);
+ return rc;
+ }
+
+ return 0;
+}
+
+void sja1105_devlink_teardown(struct dsa_switch *ds)
+{
+ sja1105_teardown_devlink_params(ds);
+ sja1105_teardown_devlink_regions(ds);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 75247f342124..b777d3f37573 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -97,10 +97,10 @@
#define SJA1105_SIZE_DYN_CMD 4
-#define SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD \
+#define SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD \
SJA1105_SIZE_DYN_CMD
-#define SJA1105PQRS_SJA1105_SIZE_VL_LOOKUP_DYN_CMD \
+#define SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_LOOKUP_ENTRY)
#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
@@ -183,7 +183,7 @@ static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vl_lookup_entry *entry = entry_ptr;
- const int size = SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD;
+ const int size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD;
sja1105_packing(buf, &entry->egrmirr, 21, 17, size, op);
sja1105_packing(buf, &entry->ingrmirr, 16, 16, size, op);
@@ -644,7 +644,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
.cmd_packing = sja1105_vl_lookup_cmd_packing,
.access = OP_WRITE,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
- .packed_size = SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD,
+ .packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
.addr = 0x35,
},
[BLK_IDX_L2_LOOKUP] = {
@@ -728,7 +728,7 @@ const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
.cmd_packing = sja1105_vl_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE),
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
- .packed_size = SJA1105PQRS_SJA1105_SIZE_VL_LOOKUP_DYN_CMD,
+ .packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
.addr = 0x47,
},
[BLK_IDX_L2_LOOKUP] = {
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 5a28dfb36ec3..4ca029650993 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1880,19 +1880,17 @@ static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
if (dsa_to_port(ds, port)->bridge_dev != br)
continue;
- other_priv->expect_dsa_8021q = true;
- rc = dsa_8021q_crosschip_bridge_join(ds, port, other_ds,
- other_port,
- &priv->crosschip_links);
- other_priv->expect_dsa_8021q = false;
+ rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
+ port,
+ other_priv->dsa_8021q_ctx,
+ other_port);
if (rc)
return rc;
- priv->expect_dsa_8021q = true;
- rc = dsa_8021q_crosschip_bridge_join(other_ds, other_port, ds,
- port,
- &other_priv->crosschip_links);
- priv->expect_dsa_8021q = false;
+ rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
+ other_port,
+ priv->dsa_8021q_ctx,
+ port);
if (rc)
return rc;
}
@@ -1919,33 +1917,24 @@ static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
if (dsa_to_port(ds, port)->bridge_dev != br)
continue;
- other_priv->expect_dsa_8021q = true;
- dsa_8021q_crosschip_bridge_leave(ds, port, other_ds, other_port,
- &priv->crosschip_links);
- other_priv->expect_dsa_8021q = false;
+ dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
+ other_priv->dsa_8021q_ctx,
+ other_port);
- priv->expect_dsa_8021q = true;
- dsa_8021q_crosschip_bridge_leave(other_ds, other_port, ds, port,
- &other_priv->crosschip_links);
- priv->expect_dsa_8021q = false;
+ dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
+ other_port,
+ priv->dsa_8021q_ctx, port);
}
}
static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
{
struct sja1105_private *priv = ds->priv;
- int rc, i;
+ int rc;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- priv->expect_dsa_8021q = true;
- rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
- priv->expect_dsa_8021q = false;
- if (rc < 0) {
- dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
- i, rc);
- return rc;
- }
- }
+ rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
+ if (rc)
+ return rc;
dev_info(ds->dev, "%s switch tagging\n",
enabled ? "Enabled" : "Disabled");
@@ -2149,12 +2138,12 @@ struct sja1105_crosschip_vlan {
bool untagged;
int port;
int other_port;
- struct dsa_switch *other_ds;
+ struct dsa_8021q_context *other_ctx;
};
struct sja1105_crosschip_switch {
struct list_head list;
- struct dsa_switch *other_ds;
+ struct dsa_8021q_context *other_ctx;
};
static int sja1105_commit_pvid(struct sja1105_private *priv)
@@ -2330,8 +2319,8 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
INIT_LIST_HEAD(&crosschip_vlans);
- list_for_each_entry(c, &priv->crosschip_links, list) {
- struct sja1105_private *other_priv = c->other_ds->priv;
+ list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
+ struct sja1105_private *other_priv = c->other_ctx->ds->priv;
if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
continue;
@@ -2341,7 +2330,7 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
*/
if (!dsa_is_user_port(priv->ds, c->port))
continue;
- if (!dsa_is_user_port(c->other_ds, c->other_port))
+ if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
continue;
/* Search for VLANs on the remote port */
@@ -2376,7 +2365,7 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
tmp->untagged == v->untagged &&
tmp->port == c->port &&
tmp->other_port == v->port &&
- tmp->other_ds == c->other_ds) {
+ tmp->other_ctx == c->other_ctx) {
already_added = true;
break;
}
@@ -2394,14 +2383,14 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
tmp->vid = v->vid;
tmp->port = c->port;
tmp->other_port = v->port;
- tmp->other_ds = c->other_ds;
+ tmp->other_ctx = c->other_ctx;
tmp->untagged = v->untagged;
list_add(&tmp->list, &crosschip_vlans);
}
}
list_for_each_entry(tmp, &crosschip_vlans, list) {
- struct sja1105_private *other_priv = tmp->other_ds->priv;
+ struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
int upstream = dsa_upstream_port(priv->ds, tmp->port);
int match, subvlan;
u16 rx_vid;
@@ -2418,7 +2407,7 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
goto out;
}
- rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ds,
+ rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
tmp->other_port,
subvlan);
@@ -2493,11 +2482,11 @@ static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
INIT_LIST_HEAD(&crosschip_switches);
- list_for_each_entry(c, &priv->crosschip_links, list) {
+ list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
bool already_added = false;
list_for_each_entry(s, &crosschip_switches, list) {
- if (s->other_ds == c->other_ds) {
+ if (s->other_ctx == c->other_ctx) {
already_added = true;
break;
}
@@ -2512,12 +2501,12 @@ static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
rc = -ENOMEM;
goto out;
}
- s->other_ds = c->other_ds;
+ s->other_ctx = c->other_ctx;
list_add(&s->list, &crosschip_switches);
}
list_for_each_entry(s, &crosschip_switches, list) {
- struct sja1105_private *other_priv = s->other_ds->priv;
+ struct sja1105_private *other_priv = s->other_ctx->ds->priv;
rc = sja1105_build_vlan_table(other_priv, false);
if (rc)
@@ -2618,16 +2607,6 @@ out:
return rc;
}
-/* Select the list to which we should add this VLAN. */
-static struct list_head *sja1105_classify_vlan(struct sja1105_private *priv,
- u16 vid)
-{
- if (priv->expect_dsa_8021q)
- return &priv->dsa_8021q_vlans;
-
- return &priv->bridge_vlans;
-}
-
static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
@@ -2642,7 +2621,7 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
* configuration done by dsa_8021q.
*/
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (!priv->expect_dsa_8021q && vid_is_dsa_8021q(vid)) {
+ if (vid_is_dsa_8021q(vid)) {
dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
return -EBUSY;
}
@@ -2655,7 +2634,8 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
* which can only be partially reconfigured at runtime (and not the TPID).
* So a switch reset is required.
*/
-static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct switchdev_trans *trans)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
@@ -2667,12 +2647,16 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
u16 tpid, tpid2;
int rc;
- list_for_each_entry(rule, &priv->flow_block.rules, list) {
- if (rule->type == SJA1105_RULE_VL) {
- dev_err(ds->dev,
- "Cannot change VLAN filtering state while VL rules are active\n");
- return -EBUSY;
+ if (switchdev_trans_ph_prepare(trans)) {
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type == SJA1105_RULE_VL) {
+ dev_err(ds->dev,
+ "Cannot change VLAN filtering with active VL rules\n");
+ return -EBUSY;
+ }
}
+
+ return 0;
}
if (enabled) {
@@ -2762,6 +2746,54 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
return sja1105_setup_8021q_tagging(ds, want_tagging);
}
+/* Returns number of VLANs added (0 or 1) on success,
+ * or a negative error code.
+ */
+static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags, struct list_head *vlan_list)
+{
+ bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
+ struct sja1105_bridge_vlan *v;
+
+ list_for_each_entry(v, vlan_list, list)
+ if (v->port == port && v->vid == vid &&
+ v->untagged == untagged && v->pvid == pvid)
+ /* Already added */
+ return 0;
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ dev_err(ds->dev, "Out of memory while storing VLAN\n");
+ return -ENOMEM;
+ }
+
+ v->port = port;
+ v->vid = vid;
+ v->untagged = untagged;
+ v->pvid = pvid;
+ list_add(&v->list, vlan_list);
+
+ return 1;
+}
+
+/* Returns number of VLANs deleted (0 or 1) */
+static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
+ struct list_head *vlan_list)
+{
+ struct sja1105_bridge_vlan *v, *n;
+
+ list_for_each_entry_safe(v, n, vlan_list, list) {
+ if (v->port == port && v->vid == vid) {
+ list_del(&v->list);
+ kfree(v);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
static void sja1105_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
@@ -2771,38 +2803,12 @@ static void sja1105_vlan_add(struct dsa_switch *ds, int port,
int rc;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct sja1105_bridge_vlan *v;
- struct list_head *vlan_list;
- bool already_added = false;
-
- vlan_list = sja1105_classify_vlan(priv, vid);
-
- list_for_each_entry(v, vlan_list, list) {
- if (v->port == port && v->vid == vid &&
- v->untagged == untagged && v->pvid == pvid) {
- already_added = true;
- break;
- }
- }
-
- if (already_added)
- continue;
-
- v = kzalloc(sizeof(*v), GFP_KERNEL);
- if (!v) {
- dev_err(ds->dev, "Out of memory while storing VLAN\n");
+ rc = sja1105_vlan_add_one(ds, port, vid, vlan->flags,
+ &priv->bridge_vlans);
+ if (rc < 0)
return;
- }
-
- v->port = port;
- v->vid = vid;
- v->untagged = untagged;
- v->pvid = pvid;
- list_add(&v->list, vlan_list);
-
- vlan_table_changed = true;
+ if (rc > 0)
+ vlan_table_changed = true;
}
if (!vlan_table_changed)
@@ -2819,21 +2825,12 @@ static int sja1105_vlan_del(struct dsa_switch *ds, int port,
struct sja1105_private *priv = ds->priv;
bool vlan_table_changed = false;
u16 vid;
+ int rc;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- struct sja1105_bridge_vlan *v, *n;
- struct list_head *vlan_list;
-
- vlan_list = sja1105_classify_vlan(priv, vid);
-
- list_for_each_entry_safe(v, n, vlan_list, list) {
- if (v->port == port && v->vid == vid) {
- list_del(&v->list);
- kfree(v);
- vlan_table_changed = true;
- break;
- }
- }
+ rc = sja1105_vlan_del_one(ds, port, vid, &priv->bridge_vlans);
+ if (rc > 0)
+ vlan_table_changed = true;
}
if (!vlan_table_changed)
@@ -2842,105 +2839,36 @@ static int sja1105_vlan_del(struct dsa_switch *ds, int port,
return sja1105_build_vlan_table(priv, true);
}
-static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv,
- bool *be_vlan)
-{
- *be_vlan = priv->best_effort_vlan_filtering;
-
- return 0;
-}
-
-static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
- bool be_vlan)
-{
- struct dsa_switch *ds = priv->ds;
- bool vlan_filtering;
- int port;
- int rc;
-
- priv->best_effort_vlan_filtering = be_vlan;
-
- rtnl_lock();
- for (port = 0; port < ds->num_ports; port++) {
- struct dsa_port *dp;
-
- if (!dsa_is_user_port(ds, port))
- continue;
-
- dp = dsa_to_port(ds, port);
- vlan_filtering = dsa_port_is_vlan_filtering(dp);
-
- rc = sja1105_vlan_filtering(ds, port, vlan_filtering);
- if (rc)
- break;
- }
- rtnl_unlock();
-
- return rc;
-}
-
-enum sja1105_devlink_param_id {
- SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
-};
-
-static int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
{
struct sja1105_private *priv = ds->priv;
- int err;
+ int rc;
- switch (id) {
- case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
- err = sja1105_best_effort_vlan_filtering_get(priv,
- &ctx->val.vbool);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
+ rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
+ if (rc <= 0)
+ return rc;
- return err;
+ return sja1105_build_vlan_table(priv, true);
}
-static int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
struct sja1105_private *priv = ds->priv;
- int err;
+ int rc;
- switch (id) {
- case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
- err = sja1105_best_effort_vlan_filtering_set(priv,
- ctx->val.vbool);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
+ rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
+ if (!rc)
+ return 0;
- return err;
+ return sja1105_build_vlan_table(priv, true);
}
-static const struct devlink_param sja1105_devlink_params[] = {
- DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
- "best_effort_vlan_filtering",
- DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
+ .vlan_add = sja1105_dsa_8021q_vlan_add,
+ .vlan_del = sja1105_dsa_8021q_vlan_del,
};
-static int sja1105_setup_devlink_params(struct dsa_switch *ds)
-{
- return dsa_devlink_params_register(ds, sja1105_devlink_params,
- ARRAY_SIZE(sja1105_devlink_params));
-}
-
-static void sja1105_teardown_devlink_params(struct dsa_switch *ds)
-{
- dsa_devlink_params_unregister(ds, sja1105_devlink_params,
- ARRAY_SIZE(sja1105_devlink_params));
-}
-
/* The programming model for the SJA1105 switch is "all-at-once" via static
* configuration tables. Some of these can be dynamically modified at runtime,
* but not the xMII mode parameters table.
@@ -3008,7 +2936,7 @@ static int sja1105_setup(struct dsa_switch *ds)
ds->configure_vlan_while_not_filtering = true;
- rc = sja1105_setup_devlink_params(ds);
+ rc = sja1105_devlink_setup(ds);
if (rc < 0)
return rc;
@@ -3016,7 +2944,11 @@ static int sja1105_setup(struct dsa_switch *ds)
* default, and that means vlan_filtering is 0 since they're not under
* a bridge, so it's safe to set up switch tagging at this time.
*/
- return sja1105_setup_8021q_tagging(ds, true);
+ rtnl_lock();
+ rc = sja1105_setup_8021q_tagging(ds, true);
+ rtnl_unlock();
+
+ return rc;
}
static void sja1105_teardown(struct dsa_switch *ds)
@@ -3035,7 +2967,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
kthread_destroy_worker(sp->xmit_worker);
}
- sja1105_teardown_devlink_params(ds);
+ sja1105_devlink_teardown(ds);
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds);
@@ -3389,6 +3321,7 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
.devlink_param_get = sja1105_devlink_param_get,
.devlink_param_set = sja1105_devlink_param_set,
+ .devlink_info_get = sja1105_devlink_info_get,
};
static const struct of_device_id sja1105_dt_ids[];
@@ -3504,7 +3437,16 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->mgmt_lock);
- INIT_LIST_HEAD(&priv->crosschip_links);
+ priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
+ GFP_KERNEL);
+ if (!priv->dsa_8021q_ctx)
+ return -ENOMEM;
+
+ priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
+ priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
+ priv->dsa_8021q_ctx->ds = ds;
+
+ INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
INIT_LIST_HEAD(&priv->bridge_vlans);
INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 704dcf1d1c01..591c5734747d 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -302,9 +302,8 @@ static int sja1105_status_get(struct sja1105_private *priv,
* for upload requires the recalculation of table CRCs and updating the
* structures with these.
*/
-static int
-static_config_buf_prepare_for_upload(struct sja1105_private *priv,
- void *config_buf, int buf_len)
+int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len)
{
struct sja1105_static_config *config = &priv->static_config;
struct sja1105_table_header final_header;
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index d3b30bacc94e..05e15b6e5e2c 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -789,8 +789,8 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
* it with zeros to ETH_ZLEN for us.
*/
if (skb_shinfo(skb)->nr_frags == 0) {
- skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ skb_dma = dma_map_single(&tp->tx_pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(skb->len);
txd->frag.addr = cpu_to_le32(skb_dma);
@@ -800,8 +800,8 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
int i, len;
len = skb_headlen(skb);
- skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
- PCI_DMA_TODEVICE);
+ skb_dma = dma_map_single(&tp->tx_pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(len);
txd->frag.addr = cpu_to_le32(skb_dma);
@@ -818,8 +818,8 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
len = skb_frag_size(frag);
frag_addr = skb_frag_address(frag);
- skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
- PCI_DMA_TODEVICE);
+ skb_dma = dma_map_single(&tp->tx_pdev->dev, frag_addr,
+ len, DMA_TO_DEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(len);
txd->frag.addr = cpu_to_le32(skb_dma);
@@ -1349,12 +1349,12 @@ typhoon_download_firmware(struct typhoon *tp)
image_data = typhoon_fw->data;
fHdr = (struct typhoon_file_header *) image_data;
- /* Cannot just map the firmware image using pci_map_single() as
+ /* Cannot just map the firmware image using dma_map_single() as
* the firmware is vmalloc()'d and may not be physically contiguous,
- * so we allocate some consistent memory to copy the sections into.
+ * so we allocate some coherent memory to copy the sections into.
*/
err = -ENOMEM;
- dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
+ dpage = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &dpage_dma, GFP_ATOMIC);
if (!dpage) {
netdev_err(tp->dev, "no DMA mem for firmware\n");
goto err_out;
@@ -1419,8 +1419,7 @@ typhoon_download_firmware(struct typhoon *tp)
* the checksum, we can do this once, at the end.
*/
csum = csum_fold(csum_partial_copy_nocheck(image_data,
- dpage, len,
- 0));
+ dpage, len));
iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
iowrite32(le16_to_cpu((__force __le16)csum),
@@ -1460,7 +1459,7 @@ err_out_irq:
iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
- pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dpage, dpage_dma);
err_out:
return err;
@@ -1527,8 +1526,8 @@ typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
*/
skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
dma_len = le16_to_cpu(tx->len);
- pci_unmap_single(tp->pdev, skb_dma, dma_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev, skb_dma, dma_len,
+ DMA_TO_DEVICE);
}
tx->flags = 0;
@@ -1609,8 +1608,8 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
skb_reserve(skb, 2);
#endif
- dma_addr = pci_map_single(tp->pdev, skb->data,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_addr = dma_map_single(&tp->pdev->dev, skb->data, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
/* Since no card does 64 bit DAC, the high bits will never
* change from zero.
@@ -1665,20 +1664,19 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
if (pkt_len < rx_copybreak &&
(new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
skb_reserve(new_skb, 2);
- pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
- PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
- pci_dma_sync_single_for_device(tp->pdev, dma_addr,
- PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
skb_put(new_skb, pkt_len);
typhoon_recycle_rx_skb(tp, idx);
} else {
new_skb = skb;
skb_put(new_skb, pkt_len);
- pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev, dma_addr, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
typhoon_alloc_rx_skb(tp, idx);
}
new_skb->protocol = eth_type_trans(new_skb, tp->dev);
@@ -1792,8 +1790,8 @@ typhoon_free_rx_rings(struct typhoon *tp)
for (i = 0; i < RXENT_ENTRIES; i++) {
struct rxbuff_ent *rxb = &tp->rxbuffers[i];
if (rxb->skb) {
- pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev, rxb->dma_addr,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb(rxb->skb);
rxb->skb = NULL;
}
@@ -2306,7 +2304,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error_out_disable;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err < 0) {
err_msg = "No usable DMA configuration";
goto error_out_mwi;
@@ -2355,8 +2353,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* allocate pci dma space for rx and tx descriptor rings
*/
- shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
- &shared_dma);
+ shared = dma_alloc_coherent(&pdev->dev, sizeof(struct typhoon_shared),
+ &shared_dma, GFP_KERNEL);
if (!shared) {
err_msg = "could not allocate DMA memory";
err = -ENOMEM;
@@ -2509,8 +2507,8 @@ error_out_reset:
typhoon_reset(ioaddr, NoWait);
error_out_dma:
- pci_free_consistent(pdev, sizeof(struct typhoon_shared),
- shared, shared_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct typhoon_shared), shared,
+ shared_dma);
error_out_remap:
pci_iounmap(pdev, ioaddr);
error_out_regions:
@@ -2537,8 +2535,8 @@ typhoon_remove_one(struct pci_dev *pdev)
pci_restore_state(pdev);
typhoon_reset(tp->ioaddr, NoWait);
pci_iounmap(pdev, tp->ioaddr);
- pci_free_consistent(pdev, sizeof(struct typhoon_shared),
- tp->shared, tp->shared_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct typhoon_shared),
+ tp->shared, tp->shared_dma);
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index a00b36f91d9f..2488bfdb9133 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -657,8 +657,10 @@ static void block_input(struct net_device *dev, int count,
outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
insw(nic_base + AXNET_DATAPORT,buf,count>>1);
- if (count & 0x01)
- buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++;
+ if (count & 0x01) {
+ buf[count-1] = inb(nic_base + AXNET_DATAPORT);
+ xfer_count++;
+ }
}
@@ -1270,10 +1272,12 @@ static void ei_tx_intr(struct net_device *dev)
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
netif_trans_update(dev);
- ei_local->tx2 = -1,
+ ei_local->tx2 = -1;
ei_local->lasttx = 2;
+ } else {
+ ei_local->lasttx = 20;
+ ei_local->txing = 0;
}
- else ei_local->lasttx = 20, ei_local->txing = 0;
}
else if (ei_local->tx2 < 0)
{
@@ -1289,9 +1293,10 @@ static void ei_tx_intr(struct net_device *dev)
netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
+ } else {
+ ei_local->lasttx = 10;
+ ei_local->txing = 0;
}
- else
- ei_local->lasttx = 10, ei_local->txing = 0;
}
// else
// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index babc92e2692e..e84021282edf 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -50,6 +50,7 @@
*/
+#include <linux/build_bug.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
@@ -112,8 +113,10 @@ static void do_set_multicast_list(struct net_device *dev);
static void __NS8390_init(struct net_device *dev, int startp);
static unsigned version_printed;
-static u32 msg_enable;
-module_param(msg_enable, uint, 0444);
+static int msg_enable;
+static const int default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR);
+module_param(msg_enable, int, 0444);
MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
/*
@@ -597,10 +600,12 @@ static void ei_tx_intr(struct net_device *dev)
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
netif_trans_update(dev);
- ei_local->tx2 = -1,
+ ei_local->tx2 = -1;
ei_local->lasttx = 2;
- } else
- ei_local->lasttx = 20, ei_local->txing = 0;
+ } else {
+ ei_local->lasttx = 20;
+ ei_local->txing = 0;
+ }
} else if (ei_local->tx2 < 0) {
if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
@@ -612,8 +617,10 @@ static void ei_tx_intr(struct net_device *dev)
netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
- } else
- ei_local->lasttx = 10, ei_local->txing = 0;
+ } else {
+ ei_local->lasttx = 10;
+ ei_local->txing = 0;
+ }
} /* else
netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
ei_local->lasttx);
@@ -969,14 +976,14 @@ static void ethdev_setup(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- if ((msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
- pr_info("%s", version);
-
ether_setup(dev);
spin_lock_init(&ei_local->page_lock);
- ei_local->msg_enable = msg_enable;
+ ei_local->msg_enable = netif_msg_init(msg_enable, default_msg_level);
+
+ if (netif_msg_drv(ei_local) && (version_printed++ == 0))
+ pr_info("%s", version);
}
/**
@@ -1014,8 +1021,7 @@ static void __NS8390_init(struct net_device *dev, int startp)
? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
: 0x48;
- if (sizeof(struct e8390_pkt_hdr) != 4)
- panic("8390.c: header struct mispacked\n");
+ BUILD_BUG_ON(sizeof(struct e8390_pkt_hdr) != 4);
/* Follow National Semi's recommendations for initing the DP83902. */
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 164c3ed550bf..9d3b1e0e425c 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1178,8 +1178,10 @@ static void dma_block_input(struct net_device *dev, int count,
outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
insw(nic_base + PCNET_DATAPORT,buf,count>>1);
- if (count & 0x01)
- buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++;
+ if (count & 0x01) {
+ buf[count-1] = inb(nic_base + PCNET_DATAPORT);
+ xfer_count++;
+ }
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index ba0055bb1614..555299737b51 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -886,7 +886,9 @@ static int netdev_open(struct net_device *dev)
tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
- np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
+ np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev,
+ np->queue_mem_size,
+ &np->queue_mem_dma, GFP_ATOMIC);
if (np->queue_mem == NULL) {
free_irq(irq, dev);
return -ENOMEM;
@@ -1136,9 +1138,11 @@ static void init_ring(struct net_device *dev)
np->rx_info[i].skb = skb;
if (skb == NULL)
break;
- np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->rx_info[i].mapping)) {
+ np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
+ skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
dev_kfree_skb(skb);
np->rx_info[i].skb = NULL;
break;
@@ -1217,18 +1221,19 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
np->tx_info[entry].mapping =
- pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+ dma_map_single(&np->pci_dev->dev, skb->data,
+ skb_first_frag_len(skb),
+ DMA_TO_DEVICE);
} else {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
status |= skb_frag_size(this_frag);
np->tx_info[entry].mapping =
- pci_map_single(np->pci_dev,
+ dma_map_single(&np->pci_dev->dev,
skb_frag_address(this_frag),
skb_frag_size(this_frag),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
- if (pci_dma_mapping_error(np->pci_dev,
- np->tx_info[entry].mapping)) {
+ if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
dev->stats.tx_dropped++;
goto err_out;
}
@@ -1271,18 +1276,16 @@ err_out:
entry = prev_tx % TX_RING_SIZE;
np->tx_info[entry].skb = NULL;
if (i > 0) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
- skb_first_frag_len(skb),
- PCI_DMA_TODEVICE);
+ skb_first_frag_len(skb), DMA_TO_DEVICE);
np->tx_info[entry].mapping = 0;
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
for (j = 1; j < i; j++) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
- skb_frag_size(
- &skb_shinfo(skb)->frags[j-1]),
- PCI_DMA_TODEVICE);
+ skb_frag_size(&skb_shinfo(skb)->frags[j - 1]),
+ DMA_TO_DEVICE);
entry++;
}
}
@@ -1356,20 +1359,20 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
struct sk_buff *skb = np->tx_info[entry].skb;
np->tx_info[entry].skb = NULL;
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
skb_first_frag_len(skb),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
np->tx_info[entry].mapping = 0;
np->dirty_tx += np->tx_info[entry].used_slots;
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
{
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
skb_frag_size(&skb_shinfo(skb)->frags[i]),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
np->dirty_tx++;
entry++;
}
@@ -1461,16 +1464,18 @@ static int __netdev_rx(struct net_device *dev, int *quota)
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(np->pci_dev,
- np->rx_info[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ np->rx_info[entry].mapping,
+ pkt_len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
- pci_dma_sync_single_for_device(np->pci_dev,
- np->rx_info[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ np->rx_info[entry].mapping,
+ pkt_len, DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
} else {
- pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_info[entry].mapping,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
skb = np->rx_info[entry].skb;
skb_put(skb, pkt_len);
np->rx_info[entry].skb = NULL;
@@ -1588,9 +1593,9 @@ static void refill_rx_ring(struct net_device *dev)
if (skb == NULL)
break; /* Better luck next round. */
np->rx_info[entry].mapping =
- pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->rx_info[entry].mapping)) {
+ dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
dev_kfree_skb(skb);
np->rx_info[entry].skb = NULL;
break;
@@ -1963,7 +1968,9 @@ static int netdev_close(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
if (np->rx_info[i].skb != NULL) {
- pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_info[i].mapping,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_info[i].skb);
}
np->rx_info[i].skb = NULL;
@@ -1973,9 +1980,8 @@ static int netdev_close(struct net_device *dev)
struct sk_buff *skb = np->tx_info[i].skb;
if (skb == NULL)
continue;
- pci_unmap_single(np->pci_dev,
- np->tx_info[i].mapping,
- skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
+ skb_first_frag_len(skb), DMA_TO_DEVICE);
np->tx_info[i].mapping = 0;
dev_kfree_skb(skb);
np->tx_info[i].skb = NULL;
@@ -2018,7 +2024,8 @@ static void starfire_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
if (np->queue_mem)
- pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
+ dma_free_coherent(&pdev->dev, np->queue_mem_size,
+ np->queue_mem, np->queue_mem_dma);
/* XXX: add wakeup code -- requires firmware for MagicPacket */
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index b3b8a8010142..862ea44beea7 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -640,13 +640,11 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct emac_board_info *db = netdev_priv(dev);
int int_status;
- unsigned long flags;
unsigned int reg_val;
/* A real interrupt coming */
- /* holders of db->lock must always block IRQs */
- spin_lock_irqsave(&db->lock, flags);
+ spin_lock(&db->lock);
/* Disable all interrupts */
writel(0, db->membase + EMAC_INT_CTL_REG);
@@ -680,7 +678,7 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
reg_val |= (0xf << 0) | (0x01 << 8);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
}
- spin_unlock_irqrestore(&db->lock, flags);
+ spin_unlock(&db->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 8470c836fa18..1a7e4df9b3e9 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -465,6 +465,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
ap = netdev_priv(dev);
+ ap->ndev = dev;
ap->pdev = pdev;
ap->name = pci_name(pdev);
@@ -1562,10 +1563,10 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue)
}
-static void ace_tasklet(unsigned long arg)
+static void ace_tasklet(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *) arg;
- struct ace_private *ap = netdev_priv(dev);
+ struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
+ struct net_device *dev = ap->ndev;
int cur_size;
cur_size = atomic_read(&ap->cur_rx_bufs);
@@ -2269,7 +2270,7 @@ static int ace_open(struct net_device *dev)
/*
* Setup the bottom half rx ring refill handler
*/
- tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
+ tasklet_setup(&ap->ace_tasklet, ace_tasklet);
return 0;
}
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index c670067b1541..265fa601a258 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -633,6 +633,7 @@ struct ace_skb
*/
struct ace_private
{
+ struct net_device *ndev; /* backpointer */
struct ace_info *info;
struct ace_regs __iomem *regs; /* register base */
struct ace_skb *skb;
@@ -776,7 +777,7 @@ static int ace_open(struct net_device *dev);
static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int ace_close(struct net_device *dev);
-static void ace_tasklet(unsigned long dev);
+static void ace_tasklet(struct tasklet_struct *t);
static void ace_dump_trace(struct ace_private *ap);
static void ace_set_multicast_list(struct net_device *dev);
static int ace_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index b818a169c193..4164eacc5c28 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -1,37 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_
+#define ENA_ADMIN_RSS_KEY_PARTS 10
enum ena_admin_aq_opcode {
ENA_ADMIN_CREATE_SQ = 1,
@@ -55,6 +29,7 @@ enum ena_admin_aq_completion_status {
ENA_ADMIN_RESOURCE_BUSY = 7,
};
+/* subcommands for the set/get feature admin commands */
enum ena_admin_aq_feature_id {
ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
ENA_ADMIN_MAX_QUEUES_NUM = 2,
@@ -63,7 +38,7 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_MAX_QUEUES_EXT = 7,
ENA_ADMIN_RSS_HASH_FUNCTION = 10,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG = 12,
ENA_ADMIN_MTU = 14,
ENA_ADMIN_RSS_HASH_INPUT = 18,
ENA_ADMIN_INTERRUPT_MODERATION = 20,
@@ -117,6 +92,8 @@ enum ena_admin_completion_policy_type {
enum ena_admin_get_stats_type {
ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+ /* extra HW stats for specific network interface */
+ ENA_ADMIN_GET_STATS_TYPE_ENI = 2,
};
enum ena_admin_get_stats_scope {
@@ -193,7 +170,7 @@ struct ena_admin_acq_common_desc {
u16 extended_status;
/* indicates to the driver which AQ entry has been consumed by the
- * device and could be reused
+ * device and could be reused
*/
u16 sq_head_indx;
};
@@ -238,8 +215,8 @@ struct ena_admin_aq_create_sq_cmd {
*/
u8 sq_caps_3;
- /* associated completion queue id. This CQ must be created prior to
- * SQ creation
+ /* associated completion queue id. This CQ must be created prior to SQ
+ * creation
*/
u16 cq_idx;
@@ -378,7 +355,7 @@ struct ena_admin_aq_get_stats_cmd {
u16 queue_idx;
/* device id, value 0xFFFF means mine. only privileged device can get
- * stats of other device
+ * stats of other device
*/
u16 device_id;
};
@@ -410,10 +387,43 @@ struct ena_admin_basic_stats {
u32 tx_drops_high;
};
+/* ENI Statistics Command. */
+struct ena_admin_eni_stats {
+ /* The number of packets shaped due to inbound aggregate BW
+ * allowance being exceeded
+ */
+ u64 bw_in_allowance_exceeded;
+
+ /* The number of packets shaped due to outbound aggregate BW
+ * allowance being exceeded
+ */
+ u64 bw_out_allowance_exceeded;
+
+ /* The number of packets shaped due to PPS allowance being exceeded */
+ u64 pps_allowance_exceeded;
+
+ /* The number of packets shaped due to connection tracking
+ * allowance being exceeded and leading to failure in establishment
+ * of new connections
+ */
+ u64 conntrack_allowance_exceeded;
+
+ /* The number of packets shaped due to linklocal packet rate
+ * allowance being exceeded
+ */
+ u64 linklocal_allowance_exceeded;
+};
+
struct ena_admin_acq_get_stats_resp {
struct ena_admin_acq_common_desc acq_common_desc;
- struct ena_admin_basic_stats basic_stats;
+ union {
+ u64 raw[7];
+
+ struct ena_admin_basic_stats basic_stats;
+
+ struct ena_admin_eni_stats eni_stats;
+ } u;
};
struct ena_admin_get_set_feature_common_desc {
@@ -440,7 +450,9 @@ struct ena_admin_device_attr_feature_desc {
u32 device_version;
- /* bitmap of ena_admin_aq_feature_id */
+ /* bitmap of ena_admin_aq_feature_id, which represents supported
+ * subcommands for the set/get feature admin commands.
+ */
u32 supported_features;
u32 reserved3;
@@ -526,32 +538,30 @@ struct ena_admin_feature_llq_desc {
u32 max_llq_depth;
- /* specify the header locations the device supports. bitfield of
- * enum ena_admin_llq_header_location.
+ /* specify the header locations the device supports. bitfield of enum
+ * ena_admin_llq_header_location.
*/
u16 header_location_ctrl_supported;
/* the header location the driver selected to use. */
u16 header_location_ctrl_enabled;
- /* if inline header is specified - this is the size of descriptor
- * list entry. If header in a separate ring is specified - this is
- * the size of header ring entry. bitfield of enum
- * ena_admin_llq_ring_entry_size. specify the entry sizes the device
- * supports
+ /* if inline header is specified - this is the size of descriptor list
+ * entry. If header in a separate ring is specified - this is the size
+ * of header ring entry. bitfield of enum ena_admin_llq_ring_entry_size.
+ * specify the entry sizes the device supports
*/
u16 entry_size_ctrl_supported;
/* the entry size the driver selected to use. */
u16 entry_size_ctrl_enabled;
- /* valid only if inline header is specified. First entry associated
- * with the packet includes descriptors and header. Rest of the
- * entries occupied by descriptors. This parameter defines the max
- * number of descriptors precedding the header in the first entry.
- * The field is bitfield of enum
- * ena_admin_llq_num_descs_before_header and specify the values the
- * device supports
+ /* valid only if inline header is specified. First entry associated with
+ * the packet includes descriptors and header. Rest of the entries
+ * occupied by descriptors. This parameter defines the max number of
+ * descriptors precedding the header in the first entry. The field is
+ * bitfield of enum ena_admin_llq_num_descs_before_header and specify
+ * the values the device supports
*/
u16 desc_num_before_header_supported;
@@ -559,7 +569,7 @@ struct ena_admin_feature_llq_desc {
u16 desc_num_before_header_enabled;
/* valid only if inline was chosen. bitfield of enum
- * ena_admin_llq_stride_ctrl
+ * ena_admin_llq_stride_ctrl
*/
u16 descriptors_stride_ctrl_supported;
@@ -594,8 +604,8 @@ struct ena_admin_queue_ext_feature_fields {
u32 max_tx_header_size;
- /* Maximum Descriptors number, including meta descriptor, allowed for
- * a single Tx packet
+ /* Maximum Descriptors number, including meta descriptor, allowed for a
+ * single Tx packet
*/
u16 max_per_packet_tx_descs;
@@ -618,8 +628,8 @@ struct ena_admin_queue_feature_desc {
u32 max_header_size;
- /* Maximum Descriptors number, including meta descriptor, allowed for
- * a single Tx packet
+ /* Maximum Descriptors number, including meta descriptor, allowed for a
+ * single Tx packet
*/
u16 max_packet_tx_descs;
@@ -707,11 +717,11 @@ enum ena_admin_hash_functions {
};
struct ena_admin_feature_rss_flow_hash_control {
- u32 keys_num;
+ u32 key_parts;
u32 reserved;
- u32 key[10];
+ u32 key[ENA_ADMIN_RSS_KEY_PARTS];
};
struct ena_admin_feature_rss_flow_hash_function {
@@ -1007,7 +1017,7 @@ struct ena_admin_set_feat_resp {
struct ena_admin_aenq_common_desc {
u16 group;
- u16 syndrom;
+ u16 syndrome;
/* 0 : phase
* 7:1 : reserved - MBZ
@@ -1031,7 +1041,7 @@ enum ena_admin_aenq_group {
ENA_ADMIN_AENQ_GROUPS_NUM = 5,
};
-enum ena_admin_aenq_notification_syndrom {
+enum ena_admin_aenq_notification_syndrome {
ENA_ADMIN_SUSPEND = 0,
ENA_ADMIN_RESUME = 1,
ENA_ADMIN_UPDATE_HINTS = 2,
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 435bf05a853c..5f8769aa469d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "ena_com.h"
@@ -98,7 +71,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
dma_addr_t addr)
{
if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
- pr_err("dma address has more bits that the device supports\n");
+ pr_err("DMA address has more bits that the device supports\n");
return -EINVAL;
}
@@ -108,16 +81,16 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
+static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
{
- struct ena_com_admin_sq *sq = &queue->sq;
- u16 size = ADMIN_SQ_SIZE(queue->q_depth);
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
- sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
- GFP_KERNEL);
+ sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+ &sq->dma_addr, GFP_KERNEL);
if (!sq->entries) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -130,16 +103,16 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
return 0;
}
-static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
+static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
{
- struct ena_com_admin_cq *cq = &queue->cq;
- u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
- cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
- GFP_KERNEL);
+ cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+ &cq->dma_addr, GFP_KERNEL);
if (!cq->entries) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -149,20 +122,20 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
return 0;
}
-static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
+static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
struct ena_aenq_handlers *aenq_handlers)
{
- struct ena_com_aenq *aenq = &dev->aenq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
u32 addr_low, addr_high, aenq_caps;
u16 size;
- dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
- GFP_KERNEL);
+ aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
+ &aenq->dma_addr, GFP_KERNEL);
if (!aenq->entries) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -172,18 +145,18 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
- writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
- writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
+ writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+ writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
aenq_caps = 0;
- aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+ aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
<< ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
- writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+ writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
- pr_err("aenq handlers pointer is NULL\n");
+ pr_err("AENQ handlers pointer is NULL\n");
return -EINVAL;
}
@@ -199,31 +172,31 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
atomic_dec(&queue->outstanding_cmds);
}
-static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
u16 command_id, bool capture)
{
- if (unlikely(command_id >= queue->q_depth)) {
- pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
- command_id, queue->q_depth);
+ if (unlikely(command_id >= admin_queue->q_depth)) {
+ pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, admin_queue->q_depth);
return NULL;
}
- if (unlikely(!queue->comp_ctx)) {
+ if (unlikely(!admin_queue->comp_ctx)) {
pr_err("Completion context is NULL\n");
return NULL;
}
- if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
+ if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
pr_err("Completion context is occupied\n");
return NULL;
}
if (capture) {
- atomic_inc(&queue->outstanding_cmds);
- queue->comp_ctx[command_id].occupied = true;
+ atomic_inc(&admin_queue->outstanding_cmds);
+ admin_queue->comp_ctx[command_id].occupied = true;
}
- return &queue->comp_ctx[command_id];
+ return &admin_queue->comp_ctx[command_id];
}
static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
@@ -244,7 +217,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- pr_debug("admin queue is full.\n");
+ pr_debug("Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -284,20 +257,21 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
return comp_ctx;
}
-static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
{
- size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
+ size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
struct ena_comp_ctx *comp_ctx;
u16 i;
- queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
- if (unlikely(!queue->comp_ctx)) {
- pr_err("memory allocation failed\n");
+ admin_queue->comp_ctx =
+ devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+ if (unlikely(!admin_queue->comp_ctx)) {
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
- for (i = 0; i < queue->q_depth; i++) {
- comp_ctx = get_comp_ctxt(queue, i, false);
+ for (i = 0; i < admin_queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(admin_queue, i, false);
if (comp_ctx)
init_completion(&comp_ctx->wait_event);
}
@@ -363,7 +337,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
}
if (!io_sq->desc_addr.virt_addr) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
}
@@ -389,7 +363,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- pr_err("bounce buffer memory allocation failed\n");
+ pr_err("Bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -449,7 +423,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
}
if (!io_cq->cdesc_addr.virt_addr) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -525,7 +499,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
static int ena_com_comp_status_to_errno(u8 comp_status)
{
if (unlikely(comp_status != 0))
- pr_err("admin command failed[%u]\n", comp_status);
+ pr_err("Admin command failed[%u]\n", comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@@ -539,6 +513,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
case ENA_ADMIN_ILLEGAL_PARAMETER:
case ENA_ADMIN_UNKNOWN_ERROR:
return -EINVAL;
+ case ENA_ADMIN_RESOURCE_BUSY:
+ return -EAGAIN;
}
return -EINVAL;
@@ -603,7 +579,7 @@ err:
return ret;
}
-/**
+/*
* Set the LLQ configurations of the firmware
*
* The driver provides only the enabled feature values to the device,
@@ -717,7 +693,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
/* The desc list entry size should be whole multiply of 8
* This requirement comes from __iowrite64_copy()
*/
- pr_err("illegal entry size %d\n", llq_info->desc_list_entry_size);
+ pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size);
return -EINVAL;
}
@@ -858,7 +834,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (unlikely(i == timeout)) {
- pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
mmio_read->seq_num, offset, read_resp->req_id,
read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT;
@@ -925,7 +901,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- pr_err("failed to destroy io sq error: %d\n", ret);
+ pr_err("Failed to destroy io sq error: %d\n", ret);
return ret;
}
@@ -1034,7 +1010,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
&get_cmd.control_buffer.address,
control_buf_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -1081,11 +1057,10 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
(ena_dev->rss).hash_key;
netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
- /* The key is stored in the device in u32 array
- * as well as the API requires the key to be passed in this
- * format. Thus the size of our array should be divided by 4
+ /* The key buffer is stored in the device in an array of
+ * uint32 elements.
*/
- hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
+ hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
}
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
@@ -1149,13 +1124,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
int ret;
ret = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
if (unlikely(ret))
return ret;
if ((get_resp.u.ind_table.min_size > log_size) ||
(get_resp.u.ind_table.max_size < log_size)) {
- pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+ pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1 << log_size, 1 << get_resp.u.ind_table.min_size,
1 << get_resp.u.ind_table.max_size);
return -EINVAL;
@@ -1248,7 +1223,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
}
@@ -1277,7 +1252,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
cmd_completion.llq_descriptors_offset);
}
- pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+ pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
return ret;
}
@@ -1390,7 +1365,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
&create_cmd.cq_ba,
io_cq->cdesc_addr.phys_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -1419,7 +1394,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
- pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+ pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
return ret;
}
@@ -1612,12 +1587,12 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
return -ETIME;
}
- pr_info("ena device version: %d.%d\n",
+ pr_info("ENA device version: %d.%d\n",
(ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- pr_info("ena controller version: %d.%d.%d implementation version %d\n",
+ pr_info("ENA controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
@@ -1640,6 +1615,19 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
return 0;
}
+static void
+ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
+ struct ena_com_admin_queue *admin_queue)
+
+{
+ if (!admin_queue->comp_ctx)
+ return;
+
+ devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
+
+ admin_queue->comp_ctx = NULL;
+}
+
void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
@@ -1648,9 +1636,8 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
struct ena_com_aenq *aenq = &ena_dev->aenq;
u16 size;
- if (admin_queue->comp_ctx)
- devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
- admin_queue->comp_ctx = NULL;
+ ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
+
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
dma_free_coherent(ena_dev->dmadev, size, sq->entries,
@@ -1928,6 +1915,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
sizeof(get_resp.u.dev_attr));
+
ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
@@ -2006,10 +1994,10 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
/* ena_handle_specific_aenq_event:
* return the handler that is relevant to the specific event group
*/
-static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
+static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
u16 group)
{
- struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
+ struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
return aenq_handlers->handlers[group];
@@ -2021,11 +2009,11 @@ static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
* handles the aenq incoming events.
* pop events from the queue and apply the specific handler
*/
-void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
+void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
{
struct ena_admin_aenq_entry *aenq_e;
struct ena_admin_aenq_common_desc *aenq_common;
- struct ena_com_aenq *aenq = &dev->aenq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
u64 timestamp;
ena_aenq_handler handler_cb;
u16 masked_head, processed = 0;
@@ -2045,12 +2033,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
dma_rmb();
timestamp = (u64)aenq_common->timestamp_low |
- ((u64)aenq_common->timestamp_high << 32);
- pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
- aenq_common->group, aenq_common->syndrom, timestamp);
+ ((u64)aenq_common->timestamp_high << 32);
+
+ pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ aenq_common->group, aenq_common->syndrome, timestamp);
/* Handle specific event*/
- handler_cb = ena_com_get_specific_aenq_cb(dev,
+ handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
aenq_common->group);
handler_cb(data, aenq_e); /* call the actual event handler*/
@@ -2075,7 +2064,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head,
+ ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2167,6 +2157,21 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
return ret;
}
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats)
+{
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.u.eni_stats,
+ sizeof(ctx.get_resp.u.eni_stats));
+
+ return ret;
+}
+
int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
struct ena_admin_basic_stats *stats)
{
@@ -2176,8 +2181,8 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
memset(&ctx, 0x0, sizeof(ctx));
ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.basic_stats,
- sizeof(ctx.get_resp.basic_stats));
+ memcpy(stats, &ctx.get_resp.u.basic_stats,
+ sizeof(ctx.get_resp.u.basic_stats));
return ret;
}
@@ -2273,7 +2278,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_key_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2331,7 +2336,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
}
memcpy(hash_key->key, key, key_len);
rss->hash_init_val = init_val;
- hash_key->keys_num = key_len >> 2;
+ hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
}
break;
case ENA_ADMIN_CRC32:
@@ -2386,7 +2391,8 @@ int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
ena_dev->rss.hash_key;
if (key)
- memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
+ memcpy(key, hash_key->key,
+ (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
return 0;
}
@@ -2442,7 +2448,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_ctrl_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
cmd.control_buffer.length = sizeof(*hash_ctrl);
@@ -2503,7 +2509,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
available_fields = hash_ctrl->selected_fields[i].fields &
hash_ctrl->supported_fields[i].fields;
if (available_fields != hash_ctrl->selected_fields[i].fields) {
- pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+ pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
return -EOPNOTSUPP;
@@ -2541,7 +2547,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
/* Make sure all the fields are supported */
supported_fields = hash_ctrl->supported_fields[proto].fields;
if ((hash_fields & supported_fields) != hash_fields) {
- pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
+ pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n",
proto, hash_fields, supported_fields);
}
@@ -2581,9 +2587,9 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
int ret;
if (!ena_com_check_supported_feature_id(
- ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+ ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
return -EOPNOTSUPP;
}
@@ -2598,7 +2604,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
cmd.aq_common_descriptor.flags =
ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
- cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
cmd.u.ind_table.size = rss->tbl_log_size;
cmd.u.ind_table.inline_index = 0xFFFFFFFF;
@@ -2606,7 +2612,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->rss_ind_tbl_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2636,7 +2642,7 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
sizeof(struct ena_admin_rss_ind_table_entry);
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
rss->rss_ind_tbl_dma_addr,
tbl_size, 0);
if (unlikely(rc))
@@ -2719,8 +2725,7 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
host_attr->debug_area_virt_addr =
dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
- &host_attr->debug_area_dma_addr,
- GFP_KERNEL);
+ &host_attr->debug_area_dma_addr, GFP_KERNEL);
if (unlikely(!host_attr->debug_area_virt_addr)) {
host_attr->debug_area_size = 0;
return -ENOMEM;
@@ -2777,7 +2782,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.debug_ba,
host_attr->debug_area_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2785,7 +2790,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.os_info_ba,
host_attr->host_info_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2904,7 +2909,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
- pr_err("the size of the LLQ entry is smaller than needed\n");
+ pr_err("The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 4287d47b2b0b..55097750d062 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_COM
@@ -536,7 +509,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
* This method goes over the async event notification queue and calls the proper
* aenq handler.
*/
-void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
+void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data);
/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
* @ena_dev: ENA communication layer struct
@@ -616,6 +589,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
struct ena_admin_basic_stats *stats);
+/* ena_com_get_eni_stats - Get extended network interface statistics
+ * @ena_dev: ENA communication layer struct
+ * @stats: stats return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats);
+
/* ena_com_set_dev_mtu - Configure the device mtu.
* @ena_dev: ENA communication layer struct
* @mtu: mtu value
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
index 8a8ded0de9ac..e210c8a81fc0 100644
--- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_COMMON_H_
#define _ENA_COMMON_H_
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index ccd440589565..ad30cacc1622 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "ena_eth_com.h"
@@ -45,8 +18,9 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+ desc_phase = (READ_ONCE(cdesc->status) &
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
return NULL;
@@ -89,7 +63,7 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
}
io_sq->entries_in_tx_burst_left--;
- pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
+ pr_debug("Decreasing entries_in_tx_burst_left of queue %d to %d\n",
io_sq->qid, io_sq->entries_in_tx_burst_left);
}
@@ -128,12 +102,12 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
if (unlikely((header_offset + header_len) >
llq_info->desc_list_entry_size)) {
- pr_err("trying to write header larger than llq entry can accommodate\n");
+ pr_err("Trying to write header larger than llq entry can accommodate\n");
return -EFAULT;
}
if (unlikely(!bounce_buffer)) {
- pr_err("bounce buffer is NULL\n");
+ pr_err("Bounce buffer is NULL\n");
return -EFAULT;
}
@@ -151,7 +125,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
- pr_err("bounce buffer is NULL\n");
+ pr_err("Bounce buffer is NULL\n");
return NULL;
}
@@ -262,8 +236,9 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
count++;
- last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ last = (READ_ONCE(cdesc->status) &
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
if (last) {
@@ -275,7 +250,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
- pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ pr_debug("ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
io_cq->qid, *first_cdesc_idx, count);
} else {
io_cq->cur_rx_pkt_cdesc_count += count;
@@ -291,6 +266,9 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
meta_desc = get_sq_desc(io_sq);
+ if (unlikely(!meta_desc))
+ return -EFAULT;
+
memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
@@ -298,7 +276,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
/* bits 0-9 of the mss */
- meta_desc->word2 |= (ena_meta->mss <<
+ meta_desc->word2 |= ((u32)ena_meta->mss <<
ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
/* bits 10-13 of the mss */
@@ -308,7 +286,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
/* Extended meta desc */
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
- meta_desc->len_ctrl |= (io_sq->phase <<
+ meta_desc->len_ctrl |= ((u32)io_sq->phase <<
ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
@@ -321,7 +299,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
- meta_desc->word2 |= (ena_meta->l4_hdr_len <<
+ meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
@@ -358,7 +336,7 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
}
static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
- struct ena_eth_io_rx_cdesc_base *cdesc)
+ struct ena_eth_io_rx_cdesc_base *cdesc)
{
ena_rx_ctx->l3_proto = cdesc->status &
ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
@@ -379,7 +357,7 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
- pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
+ pr_debug("l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
@@ -412,7 +390,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
}
if (unlikely(header_len > io_sq->tx_max_header_size)) {
- pr_err("header size is too large %d max header: %d\n",
+ pr_err("Header size is too large %d max header: %d\n",
header_len, io_sq->tx_max_header_size);
return -EINVAL;
}
@@ -427,7 +405,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
if (unlikely(rc)) {
- pr_err("failed to create and store tx meta desc\n");
+ pr_err("Failed to create and store tx meta desc\n");
return rc;
}
@@ -447,16 +425,16 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (!have_meta)
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
- desc->buff_addr_hi_hdr_sz |= (header_len <<
+ desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
- desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_DESC_PHASE_MASK;
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
/* Bits 0-9 */
- desc->meta_ctrl |= (ena_tx_ctx->req_id <<
+ desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
@@ -502,7 +480,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
- desc->len_ctrl |= (io_sq->phase <<
+ desc->len_ctrl |= ((u32)io_sq->phase <<
ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_DESC_PHASE_MASK;
}
@@ -550,7 +528,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return 0;
}
- pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
+ pr_debug("Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
@@ -606,9 +584,9 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->length = ena_buf->len;
desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
- ENA_ETH_IO_RX_DESC_LAST_MASK |
- (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
- ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+ ENA_ETH_IO_RX_DESC_LAST_MASK |
+ (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
+ ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
desc->req_id = req_id;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index b6592cb93b04..2c16c218818a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_ETH_COM_H_
@@ -167,7 +140,7 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
llq_info->descs_per_entry);
}
- pr_debug("queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
+ pr_debug("Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
num_descs, num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
@@ -178,13 +151,13 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
u16 tail = io_sq->tail;
- pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
+ pr_debug("Write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
- pr_debug("reset available entries in tx burst for queue %d to %d\n",
+ pr_debug("Reset available entries in tx burst for queue %d to %d\n",
io_sq->qid, max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
index d105c9c56192..332ac0d28ac7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_ETH_IO_H_
#define _ENA_ETH_IO_H_
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 430275bc0d04..3b2cd28f962d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/pci.h>
@@ -41,12 +14,17 @@ struct ena_stats {
#define ENA_STAT_ENA_COM_ENTRY(stat) { \
.name = #stat, \
- .stat_offset = offsetof(struct ena_com_stats_admin, stat) \
+ .stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \
}
#define ENA_STAT_ENTRY(stat, stat_type) { \
.name = #stat, \
- .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
+ .stat_offset = offsetof(struct ena_stats_##stat_type, stat) / sizeof(u64) \
+}
+
+#define ENA_STAT_HW_ENTRY(stat, stat_type) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_admin_##stat_type, stat) / sizeof(u64) \
}
#define ENA_STAT_RX_ENTRY(stat) \
@@ -58,6 +36,9 @@ struct ena_stats {
#define ENA_STAT_GLOBAL_ENTRY(stat) \
ENA_STAT_ENTRY(stat, dev)
+#define ENA_STAT_ENI_ENTRY(stat) \
+ ENA_STAT_HW_ENTRY(stat, eni_stats)
+
static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(tx_timeout),
ENA_STAT_GLOBAL_ENTRY(suspend),
@@ -68,6 +49,14 @@ static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
};
+static const struct ena_stats ena_stats_eni_strings[] = {
+ ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
+ ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
+ ENA_STAT_ENI_ENTRY(pps_allowance_exceeded),
+ ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded),
+ ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
+};
+
static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(cnt),
ENA_STAT_TX_ENTRY(bytes),
@@ -100,6 +89,11 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring),
ENA_STAT_RX_ENTRY(csum_unchecked),
+ ENA_STAT_RX_ENTRY(xdp_aborted),
+ ENA_STAT_RX_ENTRY(xdp_drop),
+ ENA_STAT_RX_ENTRY(xdp_pass),
+ ENA_STAT_RX_ENTRY(xdp_tx),
+ ENA_STAT_RX_ENTRY(xdp_invalid),
};
static const struct ena_stats ena_stats_ena_com_strings[] = {
@@ -110,10 +104,12 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
ENA_STAT_ENA_COM_ENTRY(no_completion),
};
-#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
-#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
-#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
-#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
+#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
+#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
+#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
+#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
+#define ENA_STATS_ARRAY_ENI(adapter) \
+ (ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported)
static void ena_safe_update_stat(u64 *src, u64 *dst,
struct u64_stats_sync *syncp)
@@ -134,29 +130,30 @@ static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
u64 *ptr;
int i, j;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
/* Tx stats */
ring = &adapter->tx_ring[i];
for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
ena_stats = &ena_stats_tx_strings[j];
- ptr = (u64 *)((uintptr_t)&ring->tx_stats +
- (uintptr_t)ena_stats->stat_offset);
+ ptr = (u64 *)&ring->tx_stats + ena_stats->stat_offset;
ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
}
+ /* XDP TX queues don't have a RX queue counterpart */
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ /* Rx stats */
+ ring = &adapter->rx_ring[i];
- /* Rx stats */
- ring = &adapter->rx_ring[i];
-
- for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
- ena_stats = &ena_stats_rx_strings[j];
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
- ptr = (u64 *)((uintptr_t)&ring->rx_stats +
- (uintptr_t)ena_stats->stat_offset);
+ ptr = (u64 *)&ring->rx_stats +
+ ena_stats->stat_offset;
- ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
+ ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
+ }
}
}
}
@@ -170,18 +167,17 @@ static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
ena_stats = &ena_stats_ena_com_strings[i];
- ptr = (u64 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats +
- (uintptr_t)ena_stats->stat_offset);
+ ptr = (u64 *)&adapter->ena_dev->admin_queue.stats +
+ ena_stats->stat_offset;
*(*data)++ = *ptr;
}
}
-static void ena_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats,
- u64 *data)
+static void ena_get_stats(struct ena_adapter *adapter,
+ u64 *data,
+ bool eni_stats_needed)
{
- struct ena_adapter *adapter = netdev_priv(netdev);
const struct ena_stats *ena_stats;
u64 *ptr;
int i;
@@ -189,16 +185,48 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i];
- ptr = (u64 *)((uintptr_t)&adapter->dev_stats +
- (uintptr_t)ena_stats->stat_offset);
+ ptr = (u64 *)&adapter->dev_stats + ena_stats->stat_offset;
ena_safe_update_stat(ptr, data++, &adapter->syncp);
}
+ if (eni_stats_needed) {
+ ena_update_hw_stats(adapter);
+ for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+
+ ptr = (u64 *)&adapter->eni_stats +
+ ena_stats->stat_offset;
+
+ ena_safe_update_stat(ptr, data++, &adapter->syncp);
+ }
+ }
+
ena_queue_stats(adapter, &data);
ena_dev_admin_queue_stats(adapter, &data);
}
+static void ena_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ ena_get_stats(adapter, data, adapter->eni_stats_supported);
+}
+
+static int ena_get_sw_stats_count(struct ena_adapter *adapter)
+{
+ return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ + adapter->xdp_num_queues * ENA_STATS_ARRAY_TX
+ + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
+}
+
+static int ena_get_hw_stats_count(struct ena_adapter *adapter)
+{
+ return ENA_STATS_ARRAY_ENI(adapter);
+}
+
int ena_get_sset_count(struct net_device *netdev, int sset)
{
struct ena_adapter *adapter = netdev_priv(netdev);
@@ -206,31 +234,38 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
- + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
+ return ena_get_sw_stats_count(adapter) + ena_get_hw_stats_count(adapter);
}
static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
{
const struct ena_stats *ena_stats;
+ bool is_xdp;
int i, j;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
+ is_xdp = ENA_IS_XDP_INDEX(adapter, i);
/* Tx stats */
for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
ena_stats = &ena_stats_tx_strings[j];
snprintf(*data, ETH_GSTRING_LEN,
- "queue_%u_tx_%s", i, ena_stats->name);
+ "queue_%u_%s_%s", i,
+ is_xdp ? "xdp_tx" : "tx", ena_stats->name);
(*data) += ETH_GSTRING_LEN;
}
- /* Rx stats */
- for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
- ena_stats = &ena_stats_rx_strings[j];
- snprintf(*data, ETH_GSTRING_LEN,
- "queue_%u_rx_%s", i, ena_stats->name);
- (*data) += ETH_GSTRING_LEN;
+ if (!is_xdp) {
+ /* RX stats, in XDP there isn't a RX queue
+ * counterpart
+ */
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
+
+ snprintf(*data, ETH_GSTRING_LEN,
+ "queue_%u_rx_%s", i, ena_stats->name);
+ (*data) += ETH_GSTRING_LEN;
+ }
}
}
}
@@ -249,25 +284,43 @@ static void ena_com_dev_strings(u8 **data)
}
}
-static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+static void ena_get_strings(struct ena_adapter *adapter,
+ u8 *data,
+ bool eni_stats_needed)
{
- struct ena_adapter *adapter = netdev_priv(netdev);
const struct ena_stats *ena_stats;
int i;
- if (sset != ETH_SS_STATS)
- return;
-
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i];
memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ if (eni_stats_needed) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+ memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
ena_queue_strings(adapter, &data);
ena_com_dev_strings(&data);
}
+static void ena_get_ethtool_strings(struct net_device *netdev,
+ u32 sset,
+ u8 *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ ena_get_strings(adapter, data, adapter->eni_stats_supported);
+}
+
static int ena_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *link_ksettings)
{
@@ -847,7 +900,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_ringparam = ena_get_ringparam,
.set_ringparam = ena_set_ringparam,
.get_sset_count = ena_get_sset_count,
- .get_strings = ena_get_strings,
+ .get_strings = ena_get_ethtool_strings,
.get_ethtool_stats = ena_get_ethtool_stats,
.get_rxnfc = ena_get_rxnfc,
.set_rxnfc = ena_set_rxnfc,
@@ -875,7 +928,7 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
int strings_num;
int i, rc;
- strings_num = ena_get_sset_count(netdev, ETH_SS_STATS);
+ strings_num = ena_get_sw_stats_count(adapter);
if (strings_num <= 0) {
netif_err(adapter, drv, netdev, "Can't get stats num\n");
return;
@@ -886,7 +939,7 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
GFP_ATOMIC);
if (!strings_buf) {
netif_err(adapter, drv, netdev,
- "failed to alloc strings_buf\n");
+ "Failed to allocate strings_buf\n");
return;
}
@@ -895,13 +948,13 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
GFP_ATOMIC);
if (!data_buf) {
netif_err(adapter, drv, netdev,
- "failed to allocate data buf\n");
+ "Failed to allocate data buf\n");
devm_kfree(&adapter->pdev->dev, strings_buf);
return;
}
- ena_get_strings(netdev, ETH_SS_STATS, strings_buf);
- ena_get_ethtool_stats(netdev, NULL, data_buf);
+ ena_get_strings(adapter, strings_buf, false);
+ ena_get_stats(adapter, data_buf, false);
/* If there is a buffer, dump stats, otherwise print them to dmesg */
if (buf)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a3a8edf9a734..e8131dadc22c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -139,7 +112,7 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
if (!ret) {
- netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
+ netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
update_rx_ring_mtu(adapter, new_mtu);
dev->mtu = new_mtu;
} else {
@@ -178,7 +151,7 @@ static int ena_xmit_common(struct net_device *dev,
*/
if (unlikely(rc)) {
netif_err(adapter, tx_queued, dev,
- "failed to prepare tx bufs\n");
+ "Failed to prepare tx bufs\n");
u64_stats_update_begin(&ring->syncp);
ring->tx_stats.prepare_ctx_err++;
u64_stats_update_end(&ring->syncp);
@@ -292,7 +265,7 @@ error_report_dma_error:
u64_stats_update_begin(&xdp_ring->syncp);
xdp_ring->tx_stats.dma_mapping_err++;
u64_stats_update_end(&xdp_ring->syncp);
- netdev_warn(adapter->netdev, "failed to map xdp buff\n");
+ netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
xdp_return_frame_rx_napi(tx_info->xdpf);
tx_info->xdpf = NULL;
@@ -365,6 +338,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring,
{
struct bpf_prog *xdp_prog;
u32 verdict = XDP_PASS;
+ u64 *xdp_stat;
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
@@ -374,17 +348,31 @@ static int ena_xdp_execute(struct ena_ring *rx_ring,
verdict = bpf_prog_run_xdp(xdp_prog, xdp);
- if (verdict == XDP_TX)
+ if (verdict == XDP_TX) {
ena_xdp_xmit_buff(rx_ring->netdev,
xdp,
rx_ring->qid + rx_ring->adapter->num_io_queues,
rx_info);
- else if (unlikely(verdict == XDP_ABORTED))
+
+ xdp_stat = &rx_ring->rx_stats.xdp_tx;
+ } else if (unlikely(verdict == XDP_ABORTED)) {
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- else if (unlikely(verdict > XDP_TX))
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ } else if (unlikely(verdict == XDP_DROP)) {
+ xdp_stat = &rx_ring->rx_stats.xdp_drop;
+ } else if (unlikely(verdict == XDP_PASS)) {
+ xdp_stat = &rx_ring->rx_stats.xdp_pass;
+ } else {
bpf_warn_invalid_xdp_action(verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_invalid;
+ }
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ (*xdp_stat)++;
+ u64_stats_update_end(&rx_ring->syncp);
out:
rcu_read_unlock();
+
return verdict;
}
@@ -549,7 +537,7 @@ static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
if (!old_bpf_prog)
netif_info(adapter, drv, adapter->netdev,
- "xdp program set, changing the max_mtu from %d to %d",
+ "XDP program is set, changing the max_mtu from %d to %d",
prev_mtu, netdev->max_mtu);
} else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
@@ -968,7 +956,7 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
return -EIO;
}
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "alloc page %p, rx_info %p\n", page, rx_info);
+ "Allocate page %p, rx_info %p\n", page, rx_info);
rx_info->page = page;
rx_info->page_offset = 0;
@@ -1018,7 +1006,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
GFP_ATOMIC | __GFP_COMP);
if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
- "failed to alloc buffer for rx queue %d\n",
+ "Failed to allocate buffer for rx queue %d\n",
rx_ring->qid);
break;
}
@@ -1027,7 +1015,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
req_id);
if (unlikely(rc)) {
netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
- "failed to add buffer for rx queue %d\n",
+ "Failed to add buffer for rx queue %d\n",
rx_ring->qid);
break;
}
@@ -1039,9 +1027,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.refil_partial++;
u64_stats_update_end(&rx_ring->syncp);
- netdev_warn(rx_ring->netdev,
- "refilled rx qid %d with only %d buffers (from %d)\n",
- rx_ring->qid, i, num);
+ netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Refilled rx qid %d with only %d buffers (from %d)\n",
+ rx_ring->qid, i, num);
}
/* ena_com_write_sq_doorbell issues a wmb() */
@@ -1082,7 +1070,7 @@ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
if (unlikely(rc != bufs_num))
netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
- "refilling Queue %d failed. allocated %d buffers from: %d\n",
+ "Refilling Queue %d failed. allocated %d buffers from: %d\n",
i, rc, bufs_num);
}
}
@@ -1140,14 +1128,14 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
continue;
if (print_once) {
- netdev_notice(tx_ring->netdev,
- "free uncompleted tx skb qid %d idx 0x%x\n",
- tx_ring->qid, i);
+ netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev,
+ "Free uncompleted tx skb qid %d idx 0x%x\n",
+ tx_ring->qid, i);
print_once = false;
} else {
- netdev_dbg(tx_ring->netdev,
- "free uncompleted tx skb qid %d idx 0x%x\n",
- tx_ring->qid, i);
+ netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev,
+ "Free uncompleted tx skb qid %d idx 0x%x\n",
+ tx_ring->qid, i);
}
ena_unmap_tx_buff(tx_ring, tx_info);
@@ -1399,7 +1387,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
return NULL;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "rx allocated small packet. len %d. data_len %d\n",
+ "RX allocated small packet. len %d. data_len %d\n",
skb->len, skb->data_len);
/* sync this buffer for CPU use */
@@ -1436,7 +1424,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info->page_offset = 0;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "rx skb updated. len %d. data_len %d\n",
+ "RX skb updated. len %d. data_len %d\n",
skb->len, skb->data_len);
rx_info->page = NULL;
@@ -1643,6 +1631,11 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
&next_to_clean);
if (unlikely(!skb)) {
+ /* The page might not actually be freed here since the
+ * page reference count is incremented in
+ * ena_xdp_xmit_buff(), and it will be decreased only
+ * when send completion was received from the device
+ */
if (xdp_verdict == XDP_TX)
ena_free_rx_page(rx_ring,
&rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
@@ -1770,6 +1763,7 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.unmask_interrupt++;
u64_stats_update_end(&tx_ring->syncp);
+
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
* So we use one of them to reach the intr reg
@@ -1987,7 +1981,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
netif_dbg(adapter, probe, adapter->netdev,
- "trying to enable MSI-X, vectors %d\n", msix_vecs);
+ "Trying to enable MSI-X, vectors %d\n", msix_vecs);
irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
msix_vecs, PCI_IRQ_MSIX);
@@ -2000,7 +1994,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
if (irq_cnt != msix_vecs) {
netif_notice(adapter, probe, adapter->netdev,
- "enable only %d MSI-X (out of %d), reduce the number of queues\n",
+ "Enable only %d MSI-X (out of %d), reduce the number of queues\n",
irq_cnt, msix_vecs);
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
@@ -2070,12 +2064,12 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
irq->data);
if (rc) {
netif_err(adapter, probe, adapter->netdev,
- "failed to request admin irq\n");
+ "Failed to request admin irq\n");
return rc;
}
netif_dbg(adapter, probe, adapter->netdev,
- "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
+ "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
irq->affinity_hint_mask.bits[0], irq->vector);
irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
@@ -2108,7 +2102,7 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
}
netif_dbg(adapter, ifup, adapter->netdev,
- "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
+ "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
i, irq->affinity_hint_mask.bits[0], irq->vector);
irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
@@ -2548,7 +2542,7 @@ static int ena_up(struct ena_adapter *adapter)
{
int io_queue_count, rc, i;
- netdev_dbg(adapter->netdev, "%s\n", __func__);
+ netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
ena_setup_io_intr(adapter);
@@ -2632,7 +2626,8 @@ static void ena_down(struct ena_adapter *adapter)
rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
if (rc)
- dev_err(&adapter->pdev->dev, "Device reset failed\n");
+ netif_err(adapter, ifdown, adapter->netdev,
+ "Device reset failed\n");
/* stop submitting admin commands on a device that was reset */
ena_com_set_admin_running_state(adapter->ena_dev, false);
}
@@ -2954,7 +2949,7 @@ error_report_dma_error:
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.dma_mapping_err++;
u64_stats_update_end(&tx_ring->syncp);
- netdev_warn(adapter->netdev, "failed to map skb\n");
+ netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
tx_info->skb = NULL;
@@ -3092,13 +3087,14 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
+ struct device *dev = &pdev->dev;
struct ena_admin_host_info *host_info;
int rc;
/* Allocate only the host info */
rc = ena_com_allocate_host_info(ena_dev);
if (rc) {
- pr_err("Cannot allocate host info\n");
+ dev_err(dev, "Cannot allocate host info\n");
return;
}
@@ -3128,9 +3124,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
if (rc == -EOPNOTSUPP)
- pr_warn("Cannot set host attributes\n");
+ dev_warn(dev, "Cannot set host attributes\n");
else
- pr_err("Cannot set host attributes\n");
+ dev_err(dev, "Cannot set host attributes\n");
goto err;
}
@@ -3158,7 +3154,8 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
if (rc) {
- pr_err("Cannot allocate debug area\n");
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot allocate debug area\n");
return;
}
@@ -3178,6 +3175,19 @@ err:
ena_com_delete_debug_area(adapter->ena_dev);
}
+int ena_update_hw_stats(struct ena_adapter *adapter)
+{
+ int rc = 0;
+
+ rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
+ if (rc) {
+ dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n");
+ return rc;
+ }
+
+ return 0;
+}
+
static void ena_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3349,7 +3359,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
rc = ena_com_mmio_reg_read_request_init(ena_dev);
if (rc) {
- dev_err(dev, "failed to init mmio read less\n");
+ dev_err(dev, "Failed to init mmio read less\n");
return rc;
}
@@ -3367,7 +3377,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
rc = ena_com_validate_version(ena_dev);
if (rc) {
- dev_err(dev, "device version is too low\n");
+ dev_err(dev, "Device version is too low\n");
goto err_mmio_read_less;
}
@@ -3436,7 +3446,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
if (rc) {
- dev_err(&pdev->dev, "ena device init failed\n");
+ dev_err(dev, "ENA device init failed\n");
goto err_admin_init;
}
@@ -3572,9 +3582,10 @@ static int ena_restore_device(struct ena_adapter *adapter)
netif_carrier_on(adapter->netdev);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- dev_err(&pdev->dev, "Device reset completed successfully\n");
adapter->last_keep_alive_jiffies = jiffies;
+ dev_err(&pdev->dev, "Device reset completed successfully\n");
+
return rc;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
@@ -3776,7 +3787,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
u64_stats_update_end(&rx_ring->syncp);
netif_err(adapter, drv, adapter->netdev,
- "trigger refill for ring %d\n", i);
+ "Trigger refill for ring %d\n", i);
napi_schedule(rx_ring->napi);
rx_ring->empty_rx_queue = 0;
@@ -4138,14 +4149,13 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
+ struct ena_calc_queue_size_ctx calc_queue_ctx = {};
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
struct net_device *netdev;
static int adapters_found;
u32 max_num_io_queues;
- char *queue_type_str;
bool wd_state;
int bars, rc;
@@ -4177,7 +4187,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_resource_start(pdev, ENA_REG_BAR),
pci_resource_len(pdev, ENA_REG_BAR));
if (!ena_dev->reg_bar) {
- dev_err(&pdev->dev, "failed to remap regs bar\n");
+ dev_err(&pdev->dev, "Failed to remap regs bar\n");
rc = -EFAULT;
goto err_free_region;
}
@@ -4188,7 +4198,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
if (rc) {
- dev_err(&pdev->dev, "ena device init failed\n");
+ dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
goto err_free_region;
@@ -4196,7 +4206,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
- dev_err(&pdev->dev, "ena llq bar mapping failed\n");
+ dev_err(&pdev->dev, "ENA llq bar mapping failed\n");
goto err_free_ena_dev;
}
@@ -4296,6 +4306,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_config_debug_area(adapter);
+ if (!ena_update_hw_stats(adapter))
+ adapter->eni_stats_supported = true;
+ else
+ adapter->eni_stats_supported = false;
+
memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
netif_carrier_off(netdev);
@@ -4318,15 +4333,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&adapter->timer_service, ena_timer_service, 0);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- queue_type_str = "Regular";
- else
- queue_type_str = "Low Latency";
-
dev_info(&pdev->dev,
- "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
+ "%s found at mem %lx, mac addr %pM\n",
DEVICE_NAME, (long)pci_resource_start(pdev, 0),
- netdev->dev_addr, queue_type_str);
+ netdev->dev_addr);
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
@@ -4456,7 +4466,7 @@ static int __maybe_unused ena_suspend(struct device *dev_d)
rtnl_lock();
if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
dev_err(&pdev->dev,
- "ignoring device reset request as the device is being suspended\n");
+ "Ignoring device reset request as the device is being suspended\n");
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
ena_destroy_device(adapter, true);
@@ -4531,7 +4541,7 @@ static void ena_update_on_link_change(void *adapter_data,
ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
if (status) {
- netdev_dbg(adapter->netdev, "%s\n", __func__);
+ netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
netif_carrier_on(adapter->netdev);
@@ -4575,7 +4585,7 @@ static void ena_notification(void *adapter_data,
aenq_e->aenq_common_desc.group,
ENA_ADMIN_NOTIFICATION);
- switch (aenq_e->aenq_common_desc.syndrom) {
+ switch (aenq_e->aenq_common_desc.syndrome) {
case ENA_ADMIN_UPDATE_HINTS:
hints = (struct ena_admin_ena_hw_hints *)
(&aenq_e->inline_data_w4);
@@ -4584,7 +4594,7 @@ static void ena_notification(void *adapter_data,
default:
netif_err(adapter, drv, adapter->netdev,
"Invalid aenq notification link state %d\n",
- aenq_e->aenq_common_desc.syndrom);
+ aenq_e->aenq_common_desc.syndrome);
}
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 0c8504006247..30eb686749dc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_H
@@ -261,6 +234,11 @@ struct ena_stats_rx {
u64 bad_req_id;
u64 empty_rx_ring;
u64 csum_unchecked;
+ u64 xdp_aborted;
+ u64 xdp_drop;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_invalid;
};
struct ena_ring {
@@ -405,6 +383,8 @@ struct ena_adapter {
struct u64_stats_sync syncp;
struct ena_stats_dev dev_stats;
+ struct ena_admin_eni_stats eni_stats;
+ bool eni_stats_supported;
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
@@ -422,6 +402,8 @@ void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
+int ena_update_hw_stats(struct ena_adapter *adapter);
+
int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size);
diff --git a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
index 426e57e10a7f..3ecdf29160ca 100644
--- a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
+++ b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_PCI_ID_TBL_H_
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index b514bb1b855d..1e007a41a525 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_REGS_H_
#define _ENA_REGS_H_
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 75dbd221dc59..19e195420e24 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1131,10 +1131,9 @@ static int au1000_probe(struct platform_device *pdev)
/* Allocate the data buffers
* Snooping works fine with eth on all au1xxx
*/
- aup->vaddr = (u32)dma_alloc_attrs(&pdev->dev, MAX_BUF_SIZE *
+ aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
(NUM_TX_BUFFS + NUM_RX_BUFFS),
- &aup->dma_addr, 0,
- DMA_ATTR_NON_CONSISTENT);
+ &aup->dma_addr, 0);
if (!aup->vaddr) {
dev_err(&pdev->dev, "failed to allocate data buffers\n");
err = -ENOMEM;
@@ -1310,9 +1309,8 @@ err_remap3:
err_remap2:
iounmap(aup->mac);
err_remap1:
- dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr,
- DMA_ATTR_NON_CONSISTENT);
+ dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
err_vaddr:
free_netdev(dev);
err_alloc:
@@ -1344,9 +1342,8 @@ static int au1000_remove(struct platform_device *pdev)
if (aup->tx_db_inuse[i])
au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
- dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr,
- DMA_ATTR_NON_CONSISTENT);
+ dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
iounmap(aup->macdma);
iounmap(aup->mac);
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index e1fde585fd0d..00ae1081254d 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -657,16 +657,6 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id)
struct net_device *dev = dev_id;
struct lance_private *lp = netdev_priv(dev);
int csr0;
- static int in_interrupt;
-
- if (dev == NULL) {
- DPRINTK( 1, ( "lance_interrupt(): invalid dev_id\n" ));
- return IRQ_NONE;
- }
-
- if (in_interrupt)
- DPRINTK( 2, ( "%s: Re-entering the interrupt handler.\n", dev->name ));
- in_interrupt = 1;
still_more:
flush_cache_all();
@@ -774,7 +764,6 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id)
DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
dev->name, DREG ));
- in_interrupt = 0;
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 4ba75551cb17..2709a2db5657 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -403,9 +403,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
return false;
}
-static void xgbe_ecc_isr_task(unsigned long data)
+static void xgbe_ecc_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
unsigned int ecc_isr;
bool stop = false;
@@ -468,14 +468,14 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_ecc);
else
- xgbe_ecc_isr_task((unsigned long)pdata);
+ xgbe_ecc_isr_task(&pdata->tasklet_ecc);
return IRQ_HANDLED;
}
-static void xgbe_isr_task(unsigned long data)
+static void xgbe_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
@@ -582,7 +582,7 @@ isr_done:
/* If there is not a separate ECC irq, handle it here */
if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
- xgbe_ecc_isr_task((unsigned long)pdata);
+ xgbe_ecc_isr_task(&pdata->tasklet_ecc);
/* If there is not a separate I2C irq, handle it here */
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
@@ -607,7 +607,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_dev);
else
- xgbe_isr_task((unsigned long)pdata);
+ xgbe_isr_task(&pdata->tasklet_dev);
return IRQ_HANDLED;
}
@@ -991,9 +991,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
- tasklet_init(&pdata->tasklet_dev, xgbe_isr_task, (unsigned long)pdata);
- tasklet_init(&pdata->tasklet_ecc, xgbe_ecc_isr_task,
- (unsigned long)pdata);
+ tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
+ tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev_name(netdev), pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 4d9062d35930..22d4fc547a0a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -274,9 +274,9 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
}
-static void xgbe_i2c_isr_task(unsigned long data)
+static void xgbe_i2c_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_i2c);
struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
unsigned int isr;
@@ -324,7 +324,7 @@ static irqreturn_t xgbe_i2c_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_i2c);
else
- xgbe_i2c_isr_task((unsigned long)pdata);
+ xgbe_i2c_isr_task(&pdata->tasklet_i2c);
return IRQ_HANDLED;
}
@@ -369,7 +369,7 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_i2c_isr_task((unsigned long)pdata);
+ xgbe_i2c_isr_task(&pdata->tasklet_i2c);
return IRQ_HANDLED;
}
@@ -462,8 +462,7 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
/* If we have a separate I2C irq, enable it */
if (pdata->dev_irq != pdata->i2c_irq) {
- tasklet_init(&pdata->tasklet_i2c, xgbe_i2c_isr_task,
- (unsigned long)pdata);
+ tasklet_setup(&pdata->tasklet_i2c, xgbe_i2c_isr_task);
ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
xgbe_i2c_isr, 0, pdata->i2c_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 8a3a60bb2688..93ef5a30cb8d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -688,9 +688,9 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_an_isr_task(unsigned long data)
+static void xgbe_an_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_an);
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
@@ -715,14 +715,14 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_an);
else
- xgbe_an_isr_task((unsigned long)pdata);
+ xgbe_an_isr_task(&pdata->tasklet_an);
return IRQ_HANDLED;
}
static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_an_isr_task((unsigned long)pdata);
+ xgbe_an_isr_task(&pdata->tasklet_an);
return IRQ_HANDLED;
}
@@ -1414,8 +1414,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* If we have a separate AN irq, enable it */
if (pdata->dev_irq != pdata->an_irq) {
- tasklet_init(&pdata->tasklet_an, xgbe_an_isr_task,
- (unsigned long)pdata);
+ tasklet_setup(&pdata->tasklet_an, xgbe_an_isr_task);
ret = devm_request_irq(pdata->dev, pdata->an_irq,
xgbe_an_isr, 0, pdata->an_name,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index d35a338120cf..643f5e646740 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -18,6 +18,7 @@
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <net/ip.h>
#include <linux/prefetch.h>
@@ -26,7 +27,6 @@
#include "xgene_enet_hw.h"
#include "xgene_enet_cle.h"
#include "xgene_enet_ring2.h"
-#include "../../../phy/mdio-xgene.h"
#define ETHER_MIN_PACKET 64
#define ETHER_STD_PACKET 1518
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 1ab5314c4c1b..de2a9348bc3f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -917,6 +917,57 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
return ret;
}
+static int aq_ethtool_get_phy_tunable(struct net_device *ndev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD: {
+ u16 *val = data;
+
+ *val = aq_nic->aq_nic_cfg.is_media_detect ? AQ_HW_MEDIA_DETECT_CNT : 0;
+ break;
+ }
+ case ETHTOOL_PHY_DOWNSHIFT: {
+ u8 *val = data;
+
+ *val = (u8)aq_nic->aq_nic_cfg.downshift_counter;
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int aq_ethtool_set_phy_tunable(struct net_device *ndev,
+ const struct ethtool_tunable *tuna, const void *data)
+{
+ int err = -EOPNOTSUPP;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD: {
+ const u16 *val = data;
+
+ err = aq_nic_set_media_detect(aq_nic, *val);
+ break;
+ }
+ case ETHTOOL_PHY_DOWNSHIFT: {
+ const u8 *val = data;
+
+ err = aq_nic_set_downshift(aq_nic, *val);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -952,4 +1003,6 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_coalesce = aq_ethtool_get_coalesce,
.set_coalesce = aq_ethtool_set_coalesce,
.get_ts_info = aq_ethtool_get_ts_info,
+ .get_phy_tunable = aq_ethtool_get_phy_tunable,
+ .set_phy_tunable = aq_ethtool_set_phy_tunable,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 7df74015fbc9..bed481816ea3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -143,6 +143,8 @@ struct aq_stats_s {
#define AQ_HW_LED_BLINK 0x2U
#define AQ_HW_LED_DEFAULT 0x0U
+#define AQ_HW_MEDIA_DETECT_CNT 6000
+
enum aq_priv_flags {
AQ_HW_LOOPBACK_DMA_SYS,
AQ_HW_LOOPBACK_PKT_SYS,
@@ -386,6 +388,10 @@ struct aq_fw_ops {
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
u32 *supported_rates);
+ int (*set_downshift)(struct aq_hw_s *self, u32 counter);
+
+ int (*set_media_detect)(struct aq_hw_s *self, bool enable);
+
u32 (*get_link_capabilities)(struct aq_hw_s *self);
int (*send_macsec_req)(struct aq_hw_s *self,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index c6bdf1d677d1..bf5e0e9bd0e2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -405,6 +405,10 @@ int aq_nic_init(struct aq_nic_s *self)
mutex_unlock(&self->fwreq_mutex);
if (err < 0)
goto err_exit;
+ /* Restore default settings */
+ aq_nic_set_downshift(self, self->aq_nic_cfg.downshift_counter);
+ aq_nic_set_media_detect(self, self->aq_nic_cfg.is_media_detect ?
+ AQ_HW_MEDIA_DETECT_CNT : 0);
err = self->aq_hw_ops->hw_init(self->aq_hw,
aq_nic_get_ndev(self)->dev_addr);
@@ -1159,7 +1163,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
default:
err = -1;
goto err_exit;
- break;
}
if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
err = -1;
@@ -1398,6 +1401,52 @@ void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
}
}
+int aq_nic_set_downshift(struct aq_nic_s *self, int val)
+{
+ int err = 0;
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (!self->aq_fw_ops->set_downshift)
+ return -EOPNOTSUPP;
+
+ if (val > 15) {
+ netdev_err(self->ndev, "downshift counter should be <= 15\n");
+ return -EINVAL;
+ }
+ cfg->downshift_counter = val;
+
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->set_downshift(self->aq_hw, cfg->downshift_counter);
+ mutex_unlock(&self->fwreq_mutex);
+
+ return err;
+}
+
+int aq_nic_set_media_detect(struct aq_nic_s *self, int val)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ int err = 0;
+
+ if (!self->aq_fw_ops->set_media_detect)
+ return -EOPNOTSUPP;
+
+ if (val > 0 && val != AQ_HW_MEDIA_DETECT_CNT) {
+ netdev_err(self->ndev, "EDPD on this device could have only fixed value of %d\n",
+ AQ_HW_MEDIA_DETECT_CNT);
+ return -EINVAL;
+ }
+
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->set_media_detect(self->aq_hw, !!val);
+ mutex_unlock(&self->fwreq_mutex);
+
+ /* msecs plays no role - configuration is always fixed in PHY */
+ if (!err)
+ cfg->is_media_detect = !!val;
+
+ return err;
+}
+
int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index eb7d8430f2f5..926cca9a0c83 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -62,6 +62,8 @@ struct aq_nic_cfg_s {
bool is_lro;
bool is_qos;
bool is_ptp;
+ bool is_media_detect;
+ int downshift_counter;
enum aq_tc_mode tc_mode;
u32 priv_flags;
u8 tcs;
@@ -195,6 +197,8 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
u32 aq_nic_get_fw_version(struct aq_nic_s *self);
int aq_nic_set_loopback(struct aq_nic_s *self);
+int aq_nic_set_downshift(struct aq_nic_s *self, int val);
+int aq_nic_set_media_detect(struct aq_nic_s *self, int val);
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
void aq_nic_shutdown(struct aq_nic_s *self);
u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 8941ac4df9e3..9f1b15077e7d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1536,7 +1536,7 @@ static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-/**
+/*
* @brief Set VLAN filter table
* @details Configure VLAN filter table to accept (and assign the queue) traffic
* for the particular vlan ids.
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 93c06dfa6c55..ee0c22d04935 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -612,6 +612,41 @@ static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
}
+static int aq_fw2x_set_downshift(struct aq_hw_s *self, u32 counter)
+{
+ int err = 0;
+ u32 mpi_opts;
+ u32 offset;
+
+ offset = offsetof(struct hw_atl_utils_settings, downshift_retry_count);
+ err = hw_atl_write_fwsettings_dwords(self, offset, &counter, 1);
+ if (err)
+ return err;
+
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ if (counter)
+ mpi_opts |= HW_ATL_FW2X_CTRL_DOWNSHIFT;
+ else
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_DOWNSHIFT;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ return err;
+}
+
+static int aq_fw2x_set_media_detect(struct aq_hw_s *self, bool on)
+{
+ u32 enable;
+ u32 offset;
+
+ if (self->fw_ver_actual < HW_ATL_FW_VER_MEDIA_CONTROL)
+ return -EOPNOTSUPP;
+
+ offset = offsetof(struct hw_atl_utils_settings, media_detect);
+ enable = on;
+
+ return hw_atl_write_fwsettings_dwords(self, offset, &enable, 1);
+}
+
static u32 aq_fw2x_get_link_capabilities(struct aq_hw_s *self)
{
int err = 0;
@@ -692,6 +727,8 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.enable_ptp = aq_fw3x_enable_ptp,
.led_control = aq_fw2x_led_control,
.set_phyloopback = aq_fw2x_set_phyloopback,
+ .set_downshift = aq_fw2x_set_downshift,
+ .set_media_detect = aq_fw2x_set_media_detect,
.adjust_ptp = aq_fw3x_adjust_ptp,
.get_link_capabilities = aq_fw2x_get_link_capabilities,
.send_macsec_req = aq_fw2x_send_macsec_req,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index 85628acbcc1d..dd259c8f2f4f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -519,6 +519,18 @@ int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
return 0;
}
+static int aq_a2_fw_set_downshift(struct aq_hw_s *self, u32 counter)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+ link_options.downshift = !!counter;
+ link_options.downshift_retry = counter;
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
const struct aq_fw_ops aq_a2_fw_ops = {
.init = aq_a2_fw_init,
.deinit = aq_a2_fw_deinit,
@@ -536,4 +548,5 @@ const struct aq_fw_ops aq_a2_fw_ops = {
.set_flow_control = aq_a2_fw_set_flow_control,
.get_flow_control = aq_a2_fw_get_flow_control,
.set_phyloopback = aq_a2_fw_set_phyloopback,
+ .set_downshift = aq_a2_fw_set_downshift,
};
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index 1c7736b7eaf7..800620b8f10d 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/**
- * emac_arc.c - ARC EMAC specific glue layer
+ * DOC: emac_arc.c - ARC EMAC specific glue layer
*
* Copyright (C) 2014 Romain Perier
*
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 38cce66ef212..dd5c8a9038bb 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -235,6 +235,59 @@
| NETIF_MSG_RX_ERR \
| NETIF_MSG_TX_ERR)
+struct ag71xx_statistic {
+ unsigned short offset;
+ u32 mask;
+ const char name[ETH_GSTRING_LEN];
+};
+
+static const struct ag71xx_statistic ag71xx_statistics[] = {
+ { 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", },
+ { 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
+ { 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
+ { 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
+ { 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
+ { 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
+ { 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
+ { 0x009C, GENMASK(23, 0), "Rx Byte", },
+ { 0x00A0, GENMASK(17, 0), "Rx Packet", },
+ { 0x00A4, GENMASK(11, 0), "Rx FCS Error", },
+ { 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", },
+ { 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", },
+ { 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", },
+ { 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", },
+ { 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", },
+ { 0x00BC, GENMASK(11, 0), "Rx Alignment Error", },
+ { 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", },
+ { 0x00C4, GENMASK(11, 0), "Rx Code Error", },
+ { 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", },
+ { 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", },
+ { 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", },
+ { 0x00D4, GENMASK(11, 0), "Rx Fragments", },
+ { 0x00D8, GENMASK(11, 0), "Rx Jabber", },
+ { 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", },
+ { 0x00E0, GENMASK(23, 0), "Tx Byte", },
+ { 0x00E4, GENMASK(17, 0), "Tx Packet", },
+ { 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", },
+ { 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", },
+ { 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", },
+ { 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", },
+ { 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", },
+ { 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", },
+ { 0x0100, GENMASK(11, 0), "Tx Multiple Collision", },
+ { 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", },
+ { 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", },
+ { 0x010C, GENMASK(12, 0), "Tx Total Collision", },
+ { 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", },
+ { 0x0114, GENMASK(11, 0), "Tx Drop Frame", },
+ { 0x0118, GENMASK(11, 0), "Tx Jabber Frame", },
+ { 0x011C, GENMASK(11, 0), "Tx FCS Error", },
+ { 0x0120, GENMASK(11, 0), "Tx Control Frame", },
+ { 0x0124, GENMASK(11, 0), "Tx Oversize Frame", },
+ { 0x0128, GENMASK(11, 0), "Tx Undersize Frame", },
+ { 0x012C, GENMASK(11, 0), "Tx Fragment", },
+};
+
#define DESC_EMPTY BIT(31)
#define DESC_MORE BIT(24)
#define DESC_PKTLEN_M 0xfff
@@ -394,6 +447,99 @@ static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
}
+static void ag71xx_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ strlcpy(info->driver, "ag71xx", sizeof(info->driver));
+ strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
+ sizeof(info->bus_info));
+}
+
+static int ag71xx_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *kset)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(ag->phylink, kset);
+}
+
+static int ag71xx_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *kset)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(ag->phylink, kset);
+}
+
+static int ag71xx_ethtool_nway_reset(struct net_device *ndev)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_ethtool_nway_reset(ag->phylink);
+}
+
+static void ag71xx_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ phylink_ethtool_get_pauseparam(ag->phylink, pause);
+}
+
+static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_ethtool_set_pauseparam(ag->phylink, pause);
+}
+
+static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ if (sset == ETH_SS_STATS) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ ag71xx_statistics[i].name, ETH_GSTRING_LEN);
+ }
+}
+
+static void ag71xx_ethtool_get_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
+ *data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset)
+ & ag71xx_statistics[i].mask;
+}
+
+static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(ag71xx_statistics);
+ return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops ag71xx_ethtool_ops = {
+ .get_drvinfo = ag71xx_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = ag71xx_get_link_ksettings,
+ .set_link_ksettings = ag71xx_set_link_ksettings,
+ .nway_reset = ag71xx_ethtool_nway_reset,
+ .get_pauseparam = ag71xx_ethtool_get_pauseparam,
+ .set_pauseparam = ag71xx_ethtool_set_pauseparam,
+ .get_strings = ag71xx_ethtool_get_strings,
+ .get_ethtool_stats = ag71xx_ethtool_get_stats,
+ .get_sset_count = ag71xx_ethtool_get_sset_count,
+};
+
static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
{
struct net_device *ndev = ag->ndev;
@@ -910,6 +1056,8 @@ static void ag71xx_mac_validate(struct phylink_config *config,
phylink_set(mask, MII);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
phylink_set(mask, Autoneg);
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
@@ -960,7 +1108,7 @@ static void ag71xx_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
- u32 cfg2;
+ u32 cfg1, cfg2;
u32 ifctl;
u32 fifo5;
@@ -994,6 +1142,15 @@ static void ag71xx_mac_link_up(struct phylink_config *config,
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
+ cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1);
+ cfg1 &= ~(MAC_CFG1_TFC | MAC_CFG1_RFC);
+ if (tx_pause)
+ cfg1 |= MAC_CFG1_TFC;
+
+ if (rx_pause)
+ cfg1 |= MAC_CFG1_RFC;
+ ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1);
+
ag71xx_hw_start(ag);
}
@@ -1769,6 +1926,7 @@ static int ag71xx_probe(struct platform_device *pdev)
}
ndev->netdev_ops = &ag71xx_netdev_ops;
+ ndev->ethtool_ops = &ag71xx_ethtool_ops;
INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index decab9a8e4a8..0c12cf7bda50 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -204,7 +204,7 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
/**
* atl1c_phy_config - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: timer list containing pointer to netdev cast into an unsigned long
*/
static void atl1c_phy_config(struct timer_list *t)
{
@@ -220,7 +220,6 @@ static void atl1c_phy_config(struct timer_list *t)
void atl1c_reinit_locked(struct atl1c_adapter *adapter)
{
- WARN_ON(in_interrupt());
atl1c_down(adapter);
atl1c_up(adapter);
clear_bit(__AT_RESETTING, &adapter->flags);
@@ -346,6 +345,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter)
/**
* atl1c_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: index of hanging tx queue
*/
static void atl1c_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -826,16 +826,16 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
return;
if (buffer_info->dma) {
if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE)
- pci_driection = PCI_DMA_FROMDEVICE;
+ pci_driection = DMA_FROM_DEVICE;
else
- pci_driection = PCI_DMA_TODEVICE;
+ pci_driection = DMA_TO_DEVICE;
if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
- pci_unmap_single(pdev, buffer_info->dma,
- buffer_info->length, pci_driection);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, pci_driection);
else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length, pci_driection);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, pci_driection);
}
if (buffer_info->skb)
dev_consume_skb_any(buffer_info->skb);
@@ -846,6 +846,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
/**
* atl1c_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
+ * @type: type of transmit queue
*/
static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
@@ -933,9 +934,8 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- pci_free_consistent(pdev, adapter->ring_header.size,
- adapter->ring_header.desc,
- adapter->ring_header.dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_header.size,
+ adapter->ring_header.desc, adapter->ring_header.dma);
adapter->ring_header.desc = NULL;
/* Note: just free tdp_ring.buffer_info,
@@ -1717,10 +1717,9 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
- mapping = pci_map_single(pdev, vir_addr,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, mapping))) {
+ mapping = dma_map_single(&pdev->dev, vir_addr,
+ buffer_info->length, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
dev_kfree_skb(skb);
buffer_info->skb = NULL;
buffer_info->length = 0;
@@ -1831,8 +1830,8 @@ rrs_checked:
rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
RRS_RX_RFD_INDEX_MASK;
buffer_info = &rfd_ring->buffer_info[rfd_index];
- pci_unmap_single(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
skb = buffer_info->skb;
} else {
/* TODO */
@@ -1863,6 +1862,8 @@ rrs_checked:
/**
* atl1c_clean - NAPI Rx polling callback
+ * @napi: napi info
+ * @budget: limit of packets to clean
*/
static int atl1c_clean(struct napi_struct *napi, int budget)
{
@@ -2106,10 +2107,10 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = map_len;
- buffer_info->dma = pci_map_single(adapter->pdev,
- skb->data, hdr_len, PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(adapter->pdev,
- buffer_info->dma)))
+ buffer_info->dma = dma_map_single(&adapter->pdev->dev,
+ skb->data, hdr_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
goto err_dma;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
@@ -2131,10 +2132,10 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = buf_len - mapped_len;
buffer_info->dma =
- pci_map_single(adapter->pdev, skb->data + mapped_len,
- buffer_info->length, PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(adapter->pdev,
- buffer_info->dma)))
+ dma_map_single(&adapter->pdev->dev,
+ skb->data + mapped_len,
+ buffer_info->length, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
goto err_dma;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
@@ -2542,8 +2543,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
- (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+ if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
+ (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 223ef846123e..098b0328e3cb 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -111,7 +111,7 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
/**
* atl1e_phy_config - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: timer list containing pointer to netdev cast into an unsigned long
*/
static void atl1e_phy_config(struct timer_list *t)
{
@@ -127,8 +127,6 @@ static void atl1e_phy_config(struct timer_list *t)
void atl1e_reinit_locked(struct atl1e_adapter *adapter)
{
-
- WARN_ON(in_interrupt());
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
atl1e_down(adapter);
@@ -196,7 +194,7 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
/**
* atl1e_link_chg_task - deal with link change event Out of interrupt context
- * @netdev: network interface device structure
+ * @work: work struct with driver info
*/
static void atl1e_link_chg_task(struct work_struct *work)
{
@@ -246,6 +244,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter)
/**
* atl1e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: the index of the hanging queue
*/
static void atl1e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -654,11 +653,13 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
tx_buffer = &tx_ring->tx_buffer[index];
if (tx_buffer->dma) {
if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
- pci_unmap_single(pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
- pci_unmap_page(pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
tx_buffer->dma = 0;
}
}
@@ -774,8 +775,8 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
atl1e_clean_rx_ring(adapter);
if (adapter->ring_vir_addr) {
- pci_free_consistent(pdev, adapter->ring_size,
- adapter->ring_vir_addr, adapter->ring_dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
adapter->ring_vir_addr = NULL;
}
@@ -810,11 +811,12 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
/* real ring DMA buffer */
size = adapter->ring_size;
- adapter->ring_vir_addr = pci_zalloc_consistent(pdev, adapter->ring_size,
- &adapter->ring_dma);
+ adapter->ring_vir_addr = dma_alloc_coherent(&pdev->dev,
+ adapter->ring_size,
+ &adapter->ring_dma, GFP_KERNEL);
if (adapter->ring_vir_addr == NULL) {
netdev_err(adapter->netdev,
- "pci_alloc_consistent failed, size = D%d\n", size);
+ "dma_alloc_coherent failed, size = D%d\n", size);
return -ENOMEM;
}
@@ -870,8 +872,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
return 0;
failed:
if (adapter->ring_vir_addr != NULL) {
- pci_free_consistent(pdev, adapter->ring_size,
- adapter->ring_vir_addr, adapter->ring_dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
adapter->ring_vir_addr = NULL;
}
return err;
@@ -1233,11 +1235,15 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
tx_buffer = &tx_ring->tx_buffer[next_to_clean];
if (tx_buffer->dma) {
if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
- pci_unmap_single(adapter->pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
- pci_unmap_page(adapter->pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
tx_buffer->dma = 0;
}
@@ -1495,6 +1501,8 @@ fatal_err:
/**
* atl1e_clean - NAPI Rx polling callback
+ * @napi: napi info
+ * @budget: number of packets to clean
*/
static int atl1e_clean(struct napi_struct *napi, int budget)
{
@@ -1710,8 +1718,9 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
tx_buffer->length = map_len;
- tx_buffer->dma = pci_map_single(adapter->pdev,
- skb->data, hdr_len, PCI_DMA_TODEVICE);
+ tx_buffer->dma = dma_map_single(&adapter->pdev->dev,
+ skb->data, hdr_len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
return -ENOSPC;
@@ -1739,8 +1748,9 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ?
MAX_TX_BUF_LEN : (buf_len - mapped_len);
tx_buffer->dma =
- pci_map_single(adapter->pdev, skb->data + mapped_len,
- map_len, PCI_DMA_TODEVICE);
+ dma_map_single(&adapter->pdev->dev,
+ skb->data + mapped_len, map_len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
/* We need to unwind the mappings we've done */
@@ -1749,8 +1759,10 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
while (adapter->tx_ring.next_to_use != ring_end) {
tpd = atl1e_get_tpd(adapter);
tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
- pci_unmap_single(adapter->pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
}
/* Reset the tx rings next pointer */
adapter->tx_ring.next_to_use = ring_start;
@@ -2300,8 +2312,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
- (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+ if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
+ (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b35fcfcd692d..eaf96d002fa5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1050,11 +1050,11 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
+ sizeof(struct stats_msg_block)
+ 40;
- ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
- &ring_header->dma);
+ ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
+ &ring_header->dma, GFP_KERNEL);
if (unlikely(!ring_header->desc)) {
if (netif_msg_drv(adapter))
- dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
+ dev_err(&pdev->dev, "dma_alloc_coherent failed\n");
goto err_nomem;
}
@@ -1136,8 +1136,8 @@ static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
for (i = 0; i < rfd_ring->count; i++) {
buffer_info = &rfd_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -1175,8 +1175,8 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
for (i = 0; i < tpd_ring->count; i++) {
buffer_info = &tpd_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
}
@@ -1217,8 +1217,8 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter)
atl1_clean_rx_ring(adapter);
kfree(tpd_ring->buffer_info);
- pci_free_consistent(pdev, ring_header->size, ring_header->desc,
- ring_header->dma);
+ dma_free_coherent(&pdev->dev, ring_header->size, ring_header->desc,
+ ring_header->dma);
tpd_ring->buffer_info = NULL;
tpd_ring->desc = NULL;
@@ -1866,9 +1866,9 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
buffer_info->length = (u16) adapter->rx_buffer_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
- buffer_info->dma = pci_map_page(pdev, page, offset,
+ buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
rfd_desc->coalese = 0;
@@ -1992,8 +1992,8 @@ rrd_ok:
}
/* Good Receive */
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
skb = buffer_info->skb;
length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
@@ -2062,8 +2062,8 @@ static int atl1_intr_tx(struct atl1_adapter *adapter)
while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
if (buffer_info->dma) {
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
@@ -2210,9 +2210,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
- buffer_info->dma = pci_map_page(adapter->pdev, page,
+ buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, hdr_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2235,9 +2235,10 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
offset = offset_in_page(skb->data +
(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
- buffer_info->dma = pci_map_page(adapter->pdev,
- page, offset, buffer_info->length,
- PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_page(&adapter->pdev->dev,
+ page, offset,
+ buffer_info->length,
+ DMA_TO_DEVICE);
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2247,8 +2248,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->length = buf_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
- buffer_info->dma = pci_map_page(adapter->pdev, page,
- offset, buf_len, PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
+ offset, buf_len,
+ DMA_TO_DEVICE);
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2550,7 +2552,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
/**
* atl1_phy_config - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: timer_list containing pointer to netdev cast into an unsigned long
*/
static void atl1_phy_config(struct timer_list *t)
{
@@ -2922,7 +2924,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto err_dma;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index c915852b8892..7b80d924632a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -281,8 +281,8 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
adapter->txs_ring_size * 4 + 7 + /* dword align */
adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */
- adapter->ring_vir_addr = pci_alloc_consistent(pdev, size,
- &adapter->ring_dma);
+ adapter->ring_vir_addr = dma_alloc_coherent(&pdev->dev, size,
+ &adapter->ring_dma, GFP_KERNEL);
if (!adapter->ring_vir_addr)
return -ENOMEM;
@@ -663,8 +663,8 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
static void atl2_free_ring_resources(struct atl2_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr,
- adapter->ring_dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
}
/**
@@ -994,6 +994,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
/**
* atl2_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: index of the hanging transmit queue
*/
static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -1005,7 +1006,7 @@ static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
/**
* atl2_watchdog - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: timer list containing a pointer to netdev cast into an unsigned long
*/
static void atl2_watchdog(struct timer_list *t)
{
@@ -1030,7 +1031,7 @@ static void atl2_watchdog(struct timer_list *t)
/**
* atl2_phy_config - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: timer list containing a pointer to netdev cast into an unsigned long
*/
static void atl2_phy_config(struct timer_list *t)
{
@@ -1085,7 +1086,6 @@ err_up:
static void atl2_reinit_locked(struct atl2_adapter *adapter)
{
- WARN_ON(in_interrupt());
while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
msleep(1);
atl2_down(adapter);
@@ -1235,6 +1235,7 @@ static int atl2_check_link(struct atl2_adapter *adapter)
/**
* atl2_link_chg_task - deal with link change event Out of interrupt context
+ * @work: pointer to work struct with private info
*/
static void atl2_link_chg_task(struct work_struct *work)
{
@@ -1328,8 +1329,8 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* until the kernel has the proper infrastructure to support 64-bit DMA
* on these devices.
*/
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) &&
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
err = -EIO;
goto err_dma;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 6fb620e25208..74c1778d841e 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2210,12 +2210,12 @@ static void b44_adjust_link(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
- bool status_changed = 0;
+ bool status_changed = false;
BUG_ON(!phydev);
if (bp->old_link != phydev->link) {
- status_changed = 1;
+ status_changed = true;
bp->old_link = phydev->link;
}
@@ -2223,11 +2223,11 @@ static void b44_adjust_link(struct net_device *dev)
if (phydev->link) {
if ((phydev->duplex == DUPLEX_HALF) &&
(bp->flags & B44_FLAG_FULL_DUPLEX)) {
- status_changed = 1;
+ status_changed = true;
bp->flags &= ~B44_FLAG_FULL_DUPLEX;
} else if ((phydev->duplex == DUPLEX_FULL) &&
!(bp->flags & B44_FLAG_FULL_DUPLEX)) {
- status_changed = 1;
+ status_changed = true;
bp->flags |= B44_FLAG_FULL_DUPLEX;
}
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0762d5d1a810..0fdd19d99d99 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -20,6 +20,7 @@
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <net/dsa.h>
+#include <linux/clk.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -186,6 +187,11 @@ static int bcm_sysport_set_features(struct net_device *dev,
netdev_features_t features)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
/* Read CRC forward */
if (!priv->is_lite)
@@ -197,6 +203,8 @@ static int bcm_sysport_set_features(struct net_device *dev,
bcm_sysport_set_rx_csum(dev, features);
bcm_sysport_set_tx_csum(dev, features);
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -1940,6 +1948,8 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
+ clk_prepare_enable(priv->clk);
+
/* Reset UniMAC */
umac_reset(priv);
@@ -1970,7 +1980,8 @@ static int bcm_sysport_open(struct net_device *dev)
0, priv->phy_interface);
if (!phydev) {
netdev_err(dev, "could not attach to PHY\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_clk_disable;
}
/* Reset house keeping link status */
@@ -2048,6 +2059,8 @@ out_free_irq0:
free_irq(priv->irq0, dev);
out_phy_disconnect:
phy_disconnect(phydev);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2106,6 +2119,8 @@ static int bcm_sysport_stop(struct net_device *dev)
/* Disconnect from PHY */
phy_disconnect(dev->phydev);
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -2487,6 +2502,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
/* Initialize private members */
priv = netdev_priv(dev);
+ priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
/* Allocate number of TX rings */
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
sizeof(struct bcm_sysport_tx_ring),
@@ -2566,6 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
if (!ret)
device_set_wakeup_capable(&pdev->dev, 1);
+ priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
+ if (IS_ERR(priv->wol_clk))
+ return PTR_ERR(priv->wol_clk);
+
/* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
dev->needed_headroom += sizeof(struct bcm_tsb);
@@ -2590,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
+ clk_prepare_enable(priv->clk);
+
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
"Broadcom SYSTEMPORT%s " REV_FMT
@@ -2598,6 +2623,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
(priv->rev >> 8) & 0xff, priv->rev & 0xff,
priv->irq0, priv->irq1, txq, rxq);
+ clk_disable_unprepare(priv->clk);
+
return 0;
err_deregister_notifier:
@@ -2751,8 +2778,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d)
bcm_sysport_fini_rx_ring(priv);
/* Get prepared for Wake-on-LAN */
- if (device_may_wakeup(d) && priv->wolopts)
+ if (device_may_wakeup(d) && priv->wolopts) {
+ clk_prepare_enable(priv->wol_clk);
ret = bcm_sysport_suspend_to_wol(priv);
+ }
+
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2767,6 +2798,10 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
+ clk_prepare_enable(priv->clk);
+ if (priv->wolopts)
+ clk_disable_unprepare(priv->wol_clk);
+
umac_reset(priv);
/* Disable the UniMAC RX/TX */
@@ -2846,6 +2881,7 @@ out_free_rx_ring:
out_free_tx_rings:
for (i = 0; i < dev->num_tx_queues; i++)
bcm_sysport_fini_tx_ring(priv, i);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 6d80735fbc7f..3a5cb6f128f5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -770,6 +770,8 @@ struct bcm_sysport_priv {
u32 wolopts;
u8 sopass[SOPASS_MAX];
unsigned int wol_irq_disabled:1;
+ struct clk *clk;
+ struct clk *wol_clk;
/* MIB related fields */
struct bcm_sysport_mib mib;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e3d92e4f2193..1a6ec1a12d53 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -504,6 +504,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
* @len_on_bd: total length of the first packet for the
* aggregation.
* @pkt_len: length of all segments
+ * @num_of_coalesced_segs: count of segments
*
* Approximate value of the MSS for this aggregation calculated using
* the first packet of it.
@@ -1958,6 +1959,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
* bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
*
* @bp: Driver handle
+ * @include_cnic: handle cnic case
*
* We currently support for at most 16 Tx queues for each CoS thus we will
* allocate a multiple of 16 for ETH L2 rings according to the value of the
@@ -4229,8 +4231,8 @@ void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
/**
* bnx2x_setup_tc - routine to configure net_device for multi tc
*
- * @netdev: net device to configure
- * @tc: number of traffic classes to enable
+ * @dev: net device to configure
+ * @num_tc: number of traffic classes to enable
*
* callback connected to the ndo_setup_tc function pointer
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 7e4c93be4451..d8b1824c334d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -825,9 +825,9 @@ static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
int i;
for_each_rx_queue_cnic(bp, i) {
- napi_hash_del(&bnx2x_fp(bp, i, napi));
- netif_napi_del(&bnx2x_fp(bp, i, napi));
+ __netif_napi_del(&bnx2x_fp(bp, i, napi));
}
+ synchronize_net();
}
static inline void bnx2x_del_all_napi(struct bnx2x *bp)
@@ -835,9 +835,9 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
int i;
for_each_eth_queue(bp, i) {
- napi_hash_del(&bnx2x_fp(bp, i, napi));
- netif_napi_del(&bnx2x_fp(bp, i, napi));
+ __netif_napi_del(&bnx2x_fp(bp, i, napi));
}
+ synchronize_net();
}
int bnx2x_set_int_mode(struct bnx2x *bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 7cea33803f7f..32245bbe88a8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -839,8 +839,9 @@ static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
/**
* bnx2x_read_pages_regs - read "paged" registers
*
- * @bp device handle
- * @p output buffer
+ * @bp: device handle
+ * @p: output buffer
+ * @preset: the preset value
*
* Reads "paged" memories: memories that may only be read by first writing to a
* specific address ("write address") and then reading from a specific address
@@ -3561,6 +3562,7 @@ static void bnx2x_get_channels(struct net_device *dev,
* bnx2x_change_num_queues - change the number of RSS queues.
*
* @bp: bnx2x private structure
+ * @num_rss: rss count
*
* Re-configure interrupt mode to get the new number of MSI-X
* vectors and re-add NAPI objects.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3c543dd7a8f3..28069b290862 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3086,9 +3086,9 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
/**
* bnx2x_get_common_flags - Return common flags
*
- * @bp device handle
- * @fp queue handle
- * @zero_stats TRUE if statistics zeroing is needed
+ * @bp: device handle
+ * @fp: queue handle
+ * @zero_stats: TRUE if statistics zeroing is needed
*
* Return the flags that are common for the Tx-only and not normal connections.
*/
@@ -6313,11 +6313,11 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
case FW_MSG_CODE_DRV_LOAD_COMMON:
case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
bnx2x_init_internal_common(bp);
- /* no break */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_PORT:
/* nothing to do */
- /* no break */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
/* internal memory per function is
@@ -12390,7 +12390,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
}
if (CHIP_IS_E1(bp))
- bp->dropless_fc = 0;
+ bp->dropless_fc = false;
else
bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
@@ -13591,8 +13591,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
/**
* bnx2x_get_num_none_def_sbs - return the number of none default SBs
- *
- * @dev: pci device
+ * @pdev: pci device
+ * @cnic_cnt: count
*
*/
static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
@@ -14451,9 +14451,7 @@ module_exit(bnx2x_cleanup);
/**
* bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
- *
* @bp: driver handle
- * @set: set or clear the CAM entry
*
* This function will wait until the ramrod completion returns.
* Return 0 if success, -ENODEV if ramrod doesn't return.
@@ -15412,7 +15410,7 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
return -EINVAL;
}
- bp->hwtstamp_ioctl_called = 1;
+ bp->hwtstamp_ioctl_called = true;
bp->tx_type = config.tx_type;
bp->rx_filter = config.rx_filter;
@@ -15494,7 +15492,7 @@ void bnx2x_init_ptp(struct bnx2x *bp)
bnx2x_init_cyclecounter(bp);
timecounter_init(&bp->timecounter, &bp->cyclecounter,
ktime_to_ns(ktime_get_real()));
- bp->timecounter_init_done = 1;
+ bp->timecounter_init_done = true;
}
DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index e26f4da5a6d7..6cd1523ad9e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -37,10 +37,12 @@
/**
* bnx2x_exe_queue_init - init the Exe Queue object
*
+ * @bp: driver handle
* @o: pointer to the object
* @exe_len: length
* @owner: pointer to the owner
* @validate: validate function pointer
+ * @remove: remove function pointer
* @optimize: optimize function pointer
* @exec: execute function pointer
* @get: get function pointer
@@ -103,7 +105,7 @@ static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
*
* @bp: driver handle
* @o: queue
- * @cmd: new command to add
+ * @elem: new command to add
* @restore: true - do not optimize the command
*
* If the element is optimized or is illegal, frees it.
@@ -277,7 +279,7 @@ static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
*
* @bp: device handle
* @state: state which is to be cleared
- * @state_p: state buffer
+ * @pstate: state buffer
*
*/
static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
@@ -424,8 +426,8 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
* @bp: device handle
* @o: vlan_mac object
*
- * @details: Non-blocking implementation; should be called under execution
- * queue lock.
+ * Context: Non-blocking implementation; should be called under execution
+ * queue lock.
*/
static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
@@ -445,7 +447,7 @@ static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details Should be called under execution queue lock; notice it might release
+ * details Should be called under execution queue lock; notice it might release
* and reclaim it during its run.
*/
static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
@@ -475,7 +477,7 @@ static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
* @o: vlan_mac object
* @ramrod_flags: ramrod flags of missed execution
*
- * @details Should be called under execution queue lock.
+ * Context: Should be called under execution queue lock.
*/
static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o,
@@ -493,7 +495,7 @@ static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details Should be called under execution queue lock. Notice if a pending
+ * Context: Should be called under execution queue lock. Notice if a pending
* execution exists, it would perform it - possibly releasing and
* reclaiming the execution queue lock.
*/
@@ -516,7 +518,7 @@ static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details Should be called under the execution queue lock. May sleep. May
+ * Context: Should be called under the execution queue lock. May sleep. May
* release and reclaim execution queue lock during its run.
*/
static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
@@ -536,7 +538,7 @@ static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details May sleep. Claims and releases execution queue lock during its run.
+ * Context: May sleep. Claims and releases execution queue lock during its run.
*/
int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
@@ -556,7 +558,7 @@ int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details Should be called under execution queue lock. Notice if a pending
+ * Context: Should be called under execution queue lock. Notice if a pending
* execution exists, it would be performed if this was the last
* reader. possibly releasing and reclaiming the execution queue lock.
*/
@@ -591,7 +593,7 @@ static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details Notice if a pending execution exists, it would be performed if this
+ * Context: Notice if a pending execution exists, it would be performed if this
* was the last reader. Claims and releases the execution queue lock
* during its run.
*/
@@ -968,7 +970,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
*
* @bp: device handle
* @o: queue
- * @type:
+ * @type: the type of echo
* @cam_offset: offset in cam memory
* @hdr: pointer to a header to setup
*
@@ -1608,8 +1610,8 @@ static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
*
* @bp: device handle
* @o: bnx2x_vlan_mac_obj
- * @cqe:
- * @cont: if true schedule next execution chunk
+ * @cqe: completion element
+ * @ramrod_flags: if set schedule next execution chunk
*
*/
static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
@@ -1656,7 +1658,7 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
* bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
*
* @bp: device handle
- * @o: bnx2x_qable_obj
+ * @qo: bnx2x_qable_obj
* @elem: bnx2x_exeq_elem
*/
static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
@@ -1714,10 +1716,10 @@ static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
* bnx2x_vlan_mac_get_registry_elem - prepare a registry element
*
* @bp: device handle
- * @o:
- * @elem:
- * @restore:
- * @re:
+ * @o: vlan object
+ * @elem: element
+ * @restore: to restore or not
+ * @re: registry
*
* prepare a registry element according to the current command request.
*/
@@ -1768,9 +1770,9 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
* bnx2x_execute_vlan_mac - execute vlan mac command
*
* @bp: device handle
- * @qo:
- * @exe_chunk:
- * @ramrod_flags:
+ * @qo: bnx2x_qable_obj pointer
+ * @exe_chunk: chunk
+ * @ramrod_flags: flags
*
* go and send a ramrod!
*/
@@ -2006,8 +2008,8 @@ int bnx2x_config_vlan_mac(struct bnx2x *bp,
* bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
*
* @bp: device handle
- * @o:
- * @vlan_mac_flags:
+ * @o: vlan object info
+ * @vlan_mac_flags: vlan flags
* @ramrod_flags: execution flags to be used for this deletion
*
* if the last operation has completed successfully and there are no
@@ -2767,7 +2769,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
/**
* bnx2x_mcast_get_next_bin - get the next set bin (index)
*
- * @o:
+ * @o: multicast object info
* @last: index to start looking from (including)
*
* returns the next found (set) bin or a negative value if none is found.
@@ -2892,7 +2894,7 @@ static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
* bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
*
* @bp: device handle
- * @o:
+ * @o: multicast object info
* @start_bin: index in the registry to start from (including)
* @rdata_idx: index in the ramrod data to start from
*
@@ -3202,11 +3204,11 @@ static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
}
/**
- * bnx2x_mcast_handle_current_cmd -
+ * bnx2x_mcast_handle_current_cmd - send command if room
*
* @bp: device handle
- * @p:
- * @cmd:
+ * @p: ramrod mcast info
+ * @cmd: command
* @start_cnt: first line in the ramrod data that may be used
*
* This function is called iff there is enough place for the current command in
@@ -3323,7 +3325,7 @@ static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
* bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
*
* @bp: device handle
- * @p:
+ * @p: ramrod parameters
* @len: number of rules to handle
*/
static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
@@ -3684,7 +3686,7 @@ static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
* bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
*
* @bp: device handle
- * @p:
+ * @p: ramrod parameters
* @len: number of rules to handle
*/
static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
@@ -3711,7 +3713,7 @@ static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
* bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
*
* @bp: device handle
- * @o:
+ * @o: multicast info
* @start_idx: index in the registry to start from
* @rdata_idx: index in the ramrod data to start from
*
@@ -3798,10 +3800,10 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
/**
* bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
*
- * @fw_hi:
- * @fw_mid:
- * @fw_lo:
- * @mac:
+ * @fw_hi: address
+ * @fw_mid: address
+ * @fw_lo: address
+ * @mac: mac address
*/
static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
__le16 *fw_lo, u8 *mac)
@@ -3818,7 +3820,7 @@ static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
* bnx2x_mcast_refresh_registry_e1 -
*
* @bp: device handle
- * @cnt:
+ * @o: multicast info
*
* Check the ramrod data first entry flag to see if it's a DELETE or ADD command
* and update the registry correspondingly: if ADD - allocate a memory and add
@@ -4311,7 +4313,7 @@ static bool bnx2x_credit_pool_get_entry_always_true(
/**
* bnx2x_init_credit_pool - initialize credit pool internals.
*
- * @p:
+ * @p: credit pool
* @base: Base entry in the CAM to use.
* @credit: pool size.
*
@@ -4725,8 +4727,8 @@ static int bnx2x_queue_wait_comp(struct bnx2x *bp,
* bnx2x_queue_comp_cmd - complete the state change command.
*
* @bp: device handle
- * @o:
- * @cmd:
+ * @o: queue info
+ * @cmd: command to exec
*
* Checks that the arrived completion is expected.
*/
@@ -5477,8 +5479,8 @@ static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
* bnx2x_queue_chk_transition - check state machine of a regular Queue
*
* @bp: device handle
- * @o:
- * @params:
+ * @o: queue info
+ * @params: queue state
*
* (not Forwarding)
* It both checks if the requested command is legal in a current
@@ -5735,8 +5737,8 @@ static int bnx2x_func_wait_comp(struct bnx2x *bp,
* bnx2x_func_state_change_comp - complete the state machine transition
*
* @bp: device handle
- * @o:
- * @cmd:
+ * @o: function info
+ * @cmd: more info
*
* Called on state change transition. Completes the state
* machine transition only - no HW interaction.
@@ -5776,8 +5778,8 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
* bnx2x_func_comp_cmd - complete the state change command
*
* @bp: device handle
- * @o:
- * @cmd:
+ * @o: function info
+ * @cmd: more info
*
* Checks that the arrived completion is expected.
*/
@@ -5796,8 +5798,8 @@ static int bnx2x_func_comp_cmd(struct bnx2x *bp,
* bnx2x_func_chk_transition - perform function state machine transition
*
* @bp: device handle
- * @o:
- * @params:
+ * @o: function info
+ * @params: state parameters
*
* It both checks if the requested command is legal in a current
* state and, if it's legal, sets a `next_state' in the object
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7b7e8b7883c8..7975f59735d6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,6 +69,7 @@
#include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
+#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
@@ -254,6 +255,7 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
+ ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -1158,21 +1160,14 @@ static void bnxt_queue_sp_work(struct bnxt *bp)
schedule_work(&bp->sp_task);
}
-static void bnxt_cancel_sp_work(struct bnxt *bp)
-{
- if (BNXT_PF(bp)) {
- flush_workqueue(bnxt_pf_wq);
- } else {
- cancel_work_sync(&bp->sp_task);
- cancel_delayed_work_sync(&bp->fw_reset_task);
- }
-}
-
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
- set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ else
+ set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
rxr->rx_next_cons = 0xffff;
@@ -1738,8 +1733,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
- netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
- cons, rxr->rx_next_cons);
+ /* 0xffff is forced error, don't print it */
+ if (rxr->rx_next_cons != 0xffff)
+ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return rc1;
}
@@ -1772,9 +1769,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
- netdev_warn(bp->dev, "RX buffer error %x\n",
- rx_err);
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
+ netdev_warn_once(bp->dev, "RX buffer error %x\n",
+ rx_err);
bnxt_sched_reset(bp, rxr);
}
}
@@ -1941,19 +1939,43 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
return val;
}
+static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
+{
+ int i;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ u16 grp_idx = bp->rx_ring[i].bnapi->index;
+ struct bnxt_ring_grp_info *grp_info;
+
+ grp_info = &bp->grp_info[grp_idx];
+ if (grp_info->agg_fw_ring_id == ring_id)
+ return grp_idx;
+ }
+ return INVALID_HW_RING_ID;
+}
+
#define BNXT_GET_EVENT_PORT(data) \
((data) & \
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
+#define BNXT_EVENT_RING_TYPE(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
+
+#define BNXT_EVENT_RING_TYPE_RX(data2) \
+ (BNXT_EVENT_RING_TYPE(data2) == \
+ ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
+
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+ u32 data2 = le32_to_cpu(cmpl->event_data2);
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
- u32 data1 = le32_to_cpu(cmpl->event_data1);
struct bnxt_link_info *link_info = &bp->link_info;
if (BNXT_VF(bp))
@@ -1983,7 +2005,6 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
- u32 data1 = le32_to_cpu(cmpl->event_data1);
u16 port_id = BNXT_GET_EVENT_PORT(data1);
if (BNXT_VF(bp))
@@ -2000,9 +2021,10 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
- case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
- u32 data1 = le32_to_cpu(cmpl->event_data1);
-
+ case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
+ if (netif_msg_hw(bp))
+ netdev_warn(bp->dev, "Received RESET_NOTIFY event, data1: 0x%x, data2: 0x%x\n",
+ data1, data2);
if (!bp->fw_health)
goto async_event_process_exit;
@@ -2022,10 +2044,8 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
break;
- }
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
struct bnxt_fw_health *fw_health = bp->fw_health;
- u32 data1 = le32_to_cpu(cmpl->event_data1);
if (!fw_health)
goto async_event_process_exit;
@@ -2052,6 +2072,28 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
+ struct bnxt_rx_ring_info *rxr;
+ u16 grp_idx;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ goto async_event_process_exit;
+
+ netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
+ BNXT_EVENT_RING_TYPE(data2), data1);
+ if (!BNXT_EVENT_RING_TYPE_RX(data2))
+ goto async_event_process_exit;
+
+ grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
+ if (grp_idx == INVALID_HW_RING_ID) {
+ netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
+ data1);
+ goto async_event_process_exit;
+ }
+ rxr = bp->bnapi[grp_idx]->rx_ring;
+ bnxt_sched_reset(bp, rxr);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
@@ -2250,7 +2292,7 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
bnapi->tx_pkts = 0;
}
- if (bnapi->events & BNXT_RX_EVENT) {
+ if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
if (bnapi->events & BNXT_AGG_EVENT)
@@ -2540,93 +2582,91 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
}
}
-static void bnxt_free_rx_skbs(struct bnxt *bp)
+static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
{
- int i, max_idx, max_agg_idx;
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct pci_dev *pdev = bp->pdev;
-
- if (!bp->rx_ring)
- return;
+ struct bnxt_tpa_idx_map *map;
+ int i, max_idx, max_agg_idx;
max_idx = bp->rx_nr_pages * RX_DESC_CNT;
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_tpa_idx_map *map;
- int j;
+ if (!rxr->rx_tpa)
+ goto skip_rx_tpa_free;
- if (rxr->rx_tpa) {
- for (j = 0; j < bp->max_tpa; j++) {
- struct bnxt_tpa_info *tpa_info =
- &rxr->rx_tpa[j];
- u8 *data = tpa_info->data;
-
- if (!data)
- continue;
+ for (i = 0; i < bp->max_tpa; i++) {
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
+ u8 *data = tpa_info->data;
- dma_unmap_single_attrs(&pdev->dev,
- tpa_info->mapping,
- bp->rx_buf_use_size,
- bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ if (!data)
+ continue;
- tpa_info->data = NULL;
+ dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
- kfree(data);
- }
- }
+ tpa_info->data = NULL;
- for (j = 0; j < max_idx; j++) {
- struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
- dma_addr_t mapping = rx_buf->mapping;
- void *data = rx_buf->data;
+ kfree(data);
+ }
- if (!data)
- continue;
+skip_rx_tpa_free:
+ for (i = 0; i < max_idx; i++) {
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+ dma_addr_t mapping = rx_buf->mapping;
+ void *data = rx_buf->data;
- rx_buf->data = NULL;
+ if (!data)
+ continue;
- if (BNXT_RX_PAGE_MODE(bp)) {
- mapping -= bp->rx_dma_offset;
- dma_unmap_page_attrs(&pdev->dev, mapping,
- PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- page_pool_recycle_direct(rxr->page_pool, data);
- } else {
- dma_unmap_single_attrs(&pdev->dev, mapping,
- bp->rx_buf_use_size,
- bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- kfree(data);
- }
+ rx_buf->data = NULL;
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ mapping -= bp->rx_dma_offset;
+ dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
+ bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ page_pool_recycle_direct(rxr->page_pool, data);
+ } else {
+ dma_unmap_single_attrs(&pdev->dev, mapping,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ kfree(data);
}
+ }
+ for (i = 0; i < max_agg_idx; i++) {
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
+ struct page *page = rx_agg_buf->page;
- for (j = 0; j < max_agg_idx; j++) {
- struct bnxt_sw_rx_agg_bd *rx_agg_buf =
- &rxr->rx_agg_ring[j];
- struct page *page = rx_agg_buf->page;
-
- if (!page)
- continue;
+ if (!page)
+ continue;
- dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE,
- PCI_DMA_FROMDEVICE,
- DMA_ATTR_WEAK_ORDERING);
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
+ DMA_ATTR_WEAK_ORDERING);
- rx_agg_buf->page = NULL;
- __clear_bit(j, rxr->rx_agg_bmap);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
- __free_page(page);
- }
- if (rxr->rx_page) {
- __free_page(rxr->rx_page);
- rxr->rx_page = NULL;
- }
- map = rxr->rx_tpa_idx_map;
- if (map)
- memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
+ __free_page(page);
}
+ if (rxr->rx_page) {
+ __free_page(rxr->rx_page);
+ rxr->rx_page = NULL;
+ }
+ map = rxr->rx_tpa_idx_map;
+ if (map)
+ memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
+}
+
+static void bnxt_free_rx_skbs(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->rx_ring)
+ return;
+
+ for (i = 0; i < bp->rx_nr_rings; i++)
+ bnxt_free_one_rx_ring_skbs(bp, i);
}
static void bnxt_free_skbs(struct bnxt *bp)
@@ -3165,31 +3205,16 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
}
}
-static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct net_device *dev = bp->dev;
- struct bnxt_rx_ring_info *rxr;
- struct bnxt_ring_struct *ring;
- u32 prod, type;
+ u32 prod;
int i;
- type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
- RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
-
- if (NET_IP_ALIGN == 2)
- type |= RX_BD_FLAGS_SOP;
-
- rxr = &bp->rx_ring[ring_nr];
- ring = &rxr->rx_ring_struct;
- bnxt_init_rxbd_pages(ring, type);
-
- if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
- bpf_prog_add(bp->xdp_prog, 1);
- rxr->xdp_prog = bp->xdp_prog;
- }
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+ if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_nr, i, bp->rx_ring_size);
break;
@@ -3197,22 +3222,13 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
prod = NEXT_RX(prod);
}
rxr->rx_prod = prod;
- ring->fw_ring_id = INVALID_HW_RING_ID;
-
- ring = &rxr->rx_agg_ring_struct;
- ring->fw_ring_id = INVALID_HW_RING_ID;
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
- RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
-
- bnxt_init_rxbd_pages(ring, type);
-
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+ if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
@@ -3221,30 +3237,58 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
}
rxr->rx_agg_prod = prod;
- if (bp->flags & BNXT_FLAG_TPA) {
- if (rxr->rx_tpa) {
- u8 *data;
- dma_addr_t mapping;
+ if (rxr->rx_tpa) {
+ dma_addr_t mapping;
+ u8 *data;
- for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_data(bp, &mapping,
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ for (i = 0; i < bp->max_tpa; i++) {
+ data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- rxr->rx_tpa[i].data = data;
- rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
- rxr->rx_tpa[i].mapping = mapping;
- }
- } else {
- netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
- return -ENOMEM;
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+ rxr->rx_tpa[i].mapping = mapping;
}
}
-
return 0;
}
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+ u32 type;
+
+ type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ rxr = &bp->rx_ring[ring_nr];
+ ring = &rxr->rx_ring_struct;
+ bnxt_init_rxbd_pages(ring, type);
+
+ if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+ bpf_prog_add(bp->xdp_prog, 1);
+ rxr->xdp_prog = bp->xdp_prog;
+ }
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
+ type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnxt_init_rxbd_pages(ring, type);
+ }
+
+ return bnxt_alloc_one_rx_ring(bp, ring_nr);
+}
+
static void bnxt_init_cp_rings(struct bnxt *bp)
{
int i, j;
@@ -4269,6 +4313,8 @@ static int bnxt_hwrm_to_stderr(u32 hwrm_err)
switch (hwrm_err) {
case HWRM_ERR_CODE_SUCCESS:
return 0;
+ case HWRM_ERR_CODE_RESOURCE_LOCKED:
+ return -EROFS;
case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
return -EACCES;
case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
@@ -4306,7 +4352,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
- if (BNXT_NO_FW_ACCESS(bp))
+ if (BNXT_NO_FW_ACCESS(bp) &&
+ le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
return -EBUSY;
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
@@ -5343,13 +5390,16 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
* VLAN_STRIP_CAP properly.
*/
if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
- ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (BNXT_CHIP_P5_THOR(bp) &&
!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
- if (bp->max_tpa_v2)
- bp->hw_ring_stats_size =
- sizeof(struct ctx_hw_stats_ext);
+ if (bp->max_tpa_v2) {
+ if (BNXT_CHIP_P5_THOR(bp))
+ bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
+ else
+ bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
+ }
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -6639,6 +6689,8 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
}
if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
+ if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
+ bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
@@ -7333,6 +7385,77 @@ hwrm_cfa_adv_qcaps_exit:
return rc;
}
+static int __bnxt_alloc_fw_health(struct bnxt *bp)
+{
+ if (bp->fw_health)
+ return 0;
+
+ bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
+ if (!bp->fw_health)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnxt_alloc_fw_health(struct bnxt *bp)
+{
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ return 0;
+
+ rc = __bnxt_alloc_fw_health(bp);
+ if (rc) {
+ bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
+{
+ writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT +
+ BNXT_FW_HEALTH_WIN_MAP_OFF);
+}
+
+static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
+{
+ void __iomem *hs;
+ u32 status_loc;
+ u32 reg_type;
+ u32 sig;
+
+ __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
+ hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
+
+ sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
+ if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
+ if (bp->fw_health)
+ bp->fw_health->status_reliable = false;
+ return;
+ }
+
+ if (__bnxt_alloc_fw_health(bp)) {
+ netdev_warn(bp->dev, "no memory for firmware status checks\n");
+ return;
+ }
+
+ status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc));
+ bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
+ __bnxt_map_fw_health_reg(bp, status_loc);
+ bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
+ BNXT_FW_HEALTH_WIN_OFF(status_loc);
+ }
+
+ bp->fw_health->status_reliable = true;
+}
+
static int bnxt_map_fw_health_regs(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -7349,14 +7472,12 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
reg_base = reg & BNXT_GRC_BASE_MASK;
if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
return -ERANGE;
- fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
- (reg & BNXT_GRC_OFFSET_MASK);
+ fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
}
if (reg_base == 0xffffffff)
return 0;
- writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
- BNXT_FW_HEALTH_WIN_MAP_OFF);
+ __bnxt_map_fw_health_reg(bp, reg_base);
return 0;
}
@@ -7432,6 +7553,16 @@ static int bnxt_hwrm_func_reset(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
}
+static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_info;
+
+ if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
+ snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
+ nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
+ nvm_info.nvm_cfg_ver_upd);
+}
+
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
{
int rc = 0;
@@ -8635,10 +8766,9 @@ static void bnxt_del_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- napi_hash_del(&bnapi->napi);
- netif_napi_del(&bnapi->napi);
+ __netif_napi_del(&bnapi->napi);
}
- /* We called napi_hash_del() before netif_napi_del(), we need
+ /* We called __netif_napi_del(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
@@ -8694,14 +8824,19 @@ static void bnxt_enable_napi(struct bnxt *bp)
int i;
for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
- bp->bnapi[i]->in_reset = false;
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+
+ cpr = &bnapi->cp_ring;
+ if (bnapi->in_reset)
+ cpr->sw_stats.rx.rx_resets++;
+ bnapi->in_reset = false;
- if (bp->bnapi[i]->rx_ring) {
+ if (bnapi->rx_ring) {
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
- napi_enable(&bp->bnapi[i]->napi);
+ napi_enable(&bnapi->napi);
}
}
@@ -8735,6 +8870,30 @@ void bnxt_tx_enable(struct bnxt *bp)
netif_carrier_on(bp->dev);
}
+static char *bnxt_report_fec(struct bnxt_link_info *link_info)
+{
+ u8 active_fec = link_info->active_fec_sig_mode &
+ PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
+
+ switch (active_fec) {
+ default:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
+ return "None";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
+ return "Clause 74 BaseR";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
+ return "Clause 91 RS(528,514)";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
+ return "Clause 91 RS544_1XN";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
+ return "Clause 91 RS(544,514)";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
+ return "Clause 91 RS272_1XN";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
+ return "Clause 91 RS(272,257)";
+ }
+}
+
static void bnxt_report_link(struct bnxt *bp)
{
if (bp->link_info.link_up) {
@@ -8744,6 +8903,11 @@ static void bnxt_report_link(struct bnxt *bp)
u16 fec;
netif_carrier_on(bp->dev);
+ speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ if (speed == SPEED_UNKNOWN) {
+ netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
+ return;
+ }
if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
duplex = "full";
else
@@ -8756,7 +8920,6 @@ static void bnxt_report_link(struct bnxt *bp)
flow_ctrl = "ON - receive";
else
flow_ctrl = "none";
- speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
speed, duplex, flow_ctrl);
if (bp->flags & BNXT_FLAG_EEE_CAP)
@@ -8765,16 +8928,25 @@ static void bnxt_report_link(struct bnxt *bp)
"not active");
fec = bp->link_info.fec_cfg;
if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
- netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
+ netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
(fec & BNXT_FEC_AUTONEG) ? "on" : "off",
- (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
- (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
+ bnxt_report_fec(&bp->link_info));
} else {
netif_carrier_off(bp->dev);
netdev_err(bp->dev, "NIC Link is Down\n");
}
}
+static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
+{
+ if (!resp->supported_speeds_auto_mode &&
+ !resp->supported_speeds_force_mode &&
+ !resp->supported_pam4_speeds_auto_mode &&
+ !resp->supported_pam4_speeds_force_mode)
+ return true;
+ return false;
+}
+
static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
{
int rc = 0;
@@ -8822,9 +8994,24 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
+ if (bp->hwrm_spec_code >= 0x10a01) {
+ if (bnxt_phy_qcaps_no_speed(resp)) {
+ link_info->phy_state = BNXT_PHY_STATE_DISABLED;
+ netdev_warn(bp->dev, "Ethernet link disabled\n");
+ } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
+ link_info->phy_state = BNXT_PHY_STATE_ENABLED;
+ netdev_info(bp->dev, "Ethernet link enabled\n");
+ /* Phy re-enabled, reprobe the speeds */
+ link_info->support_auto_speeds = 0;
+ link_info->support_pam4_auto_speeds = 0;
+ }
+ }
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
+ if (resp->supported_pam4_speeds_auto_mode)
+ link_info->support_pam4_auto_speeds =
+ le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
bp->port_count = resp->port_cnt;
@@ -8833,14 +9020,21 @@ hwrm_phy_qcaps_exit:
return rc;
}
-static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
+static bool bnxt_support_dropped(u16 advertising, u16 supported)
+{
+ u16 diff = advertising ^ supported;
+
+ return ((supported | diff) != supported);
+}
+
+int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
{
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
struct hwrm_port_phy_qcfg_input req = {0};
struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
u8 link_up = link_info->link_up;
- u16 diff;
+ bool support_changed = false;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
@@ -8867,10 +9061,17 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
else
link_info->link_speed = 0;
link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
+ link_info->force_pam4_link_speed =
+ le16_to_cpu(resp->force_pam4_link_speed);
link_info->support_speeds = le16_to_cpu(resp->support_speeds);
+ link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+ link_info->auto_pam4_link_speeds =
+ le16_to_cpu(resp->auto_pam4_link_speed_mask);
link_info->lp_auto_link_speeds =
le16_to_cpu(resp->link_partner_adv_speeds);
+ link_info->lp_auto_pam4_link_speeds =
+ resp->link_partner_pam4_adv_speeds;
link_info->preemphasis = le32_to_cpu(resp->preemphasis);
link_info->phy_ver[0] = resp->phy_maj;
link_info->phy_ver[1] = resp->phy_min;
@@ -8919,9 +9120,10 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
}
link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
- if (bp->hwrm_spec_code >= 0x10504)
+ if (bp->hwrm_spec_code >= 0x10504) {
link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
-
+ link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
+ }
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -8939,17 +9141,21 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
if (!BNXT_PHY_CFG_ABLE(bp))
return 0;
- diff = link_info->support_auto_speeds ^ link_info->advertising;
- if ((link_info->support_auto_speeds | diff) !=
- link_info->support_auto_speeds) {
- /* An advertised speed is no longer supported, so we need to
- * update the advertisement settings. Caller holds RTNL
- * so we can modify link settings.
- */
+ /* Check if any advertised speeds are no longer supported. The caller
+ * holds the link_lock mutex, so we can modify link_info settings.
+ */
+ if (bnxt_support_dropped(link_info->advertising,
+ link_info->support_auto_speeds)) {
link_info->advertising = link_info->support_auto_speeds;
- if (link_info->autoneg & BNXT_AUTONEG_SPEED)
- bnxt_hwrm_set_link_setting(bp, true, false);
+ support_changed = true;
}
+ if (bnxt_support_dropped(link_info->advertising_pam4,
+ link_info->support_pam4_auto_speeds)) {
+ link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
+ support_changed = true;
+ }
+ if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
+ bnxt_hwrm_set_link_setting(bp, true, false);
return 0;
}
@@ -9008,27 +9214,30 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
}
}
-static void bnxt_hwrm_set_link_common(struct bnxt *bp,
- struct hwrm_port_phy_cfg_input *req)
+static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
{
- u8 autoneg = bp->link_info.autoneg;
- u16 fw_link_speed = bp->link_info.req_link_speed;
- u16 advertising = bp->link_info.advertising;
-
- if (autoneg & BNXT_AUTONEG_SPEED) {
- req->auto_mode |=
- PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
-
- req->enables |= cpu_to_le32(
- PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
- req->auto_link_speed_mask = cpu_to_le16(advertising);
-
+ if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
+ req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
+ if (bp->link_info.advertising) {
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
+ req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
+ }
+ if (bp->link_info.advertising_pam4) {
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
+ req->auto_link_pam4_speed_mask =
+ cpu_to_le16(bp->link_info.advertising_pam4);
+ }
req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
- req->flags |=
- cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
} else {
- req->force_link_speed = cpu_to_le16(fw_link_speed);
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
+ if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
+ req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
+ } else {
+ req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
+ }
}
/* tell chimp that the setting takes effect immediately */
@@ -9424,14 +9633,19 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
if (BNXT_AUTO_MODE(link_info->auto_mode))
update_link = true;
- if (link_info->req_link_speed != link_info->force_link_speed)
+ if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
+ link_info->req_link_speed != link_info->force_link_speed)
+ update_link = true;
+ else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
+ link_info->req_link_speed != link_info->force_pam4_link_speed)
update_link = true;
if (link_info->req_duplex != link_info->duplex_setting)
update_link = true;
} else {
if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
update_link = true;
- if (link_info->advertising != link_info->auto_link_speeds)
+ if (link_info->advertising != link_info->auto_link_speeds ||
+ link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
update_link = true;
}
@@ -9566,7 +9780,10 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
- rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
+ rc = -EIO;
+ if (!rc)
+ rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
dev_close(bp->dev);
@@ -10362,6 +10579,23 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
}
}
+static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+ struct hwrm_ring_reset_input req = {0};
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ struct bnxt_cp_ring_info *cpr;
+ u16 cp_ring_id;
+
+ cpr = &bnapi->cp_ring;
+ cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
+ req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
+ req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
+ return hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+}
+
static void bnxt_reset_task(struct bnxt *bp, bool silent)
{
if (!silent)
@@ -10497,6 +10731,55 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
bnxt_rtnl_unlock_sp(bp);
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_rx_ring_reset(struct bnxt *bp)
+{
+ int i;
+
+ bnxt_rtnl_lock_sp(bp);
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ bnxt_rtnl_unlock_sp(bp);
+ return;
+ }
+ /* Disable and flush TPA before resetting the RX ring */
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, false);
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_cp_ring_info *cpr;
+ int rc;
+
+ if (!rxr->bnapi->in_reset)
+ continue;
+
+ rc = bnxt_hwrm_rx_ring_reset(bp, i);
+ if (rc) {
+ if (rc == -EINVAL || rc == -EOPNOTSUPP)
+ netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
+ else
+ netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
+ rc);
+ bnxt_reset_task(bp, true);
+ break;
+ }
+ bnxt_free_one_rx_ring_skbs(bp, i);
+ rxr->rx_prod = 0;
+ rxr->rx_agg_prod = 0;
+ rxr->rx_sw_agg_prod = 0;
+ rxr->rx_next_cons = 0;
+ rxr->bnapi->in_reset = false;
+ bnxt_alloc_one_rx_ring(bp, i);
+ cpr = &rxr->bnapi->cp_ring;
+ cpr->sw_stats.rx.rx_resets++;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ }
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, true);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
@@ -10691,8 +10974,15 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
}
link_info->advertising = link_info->auto_link_speeds;
+ link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
} else {
link_info->req_link_speed = link_info->force_link_speed;
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ if (link_info->force_pam4_link_speed) {
+ link_info->req_link_speed =
+ link_info->force_pam4_link_speed;
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
+ }
link_info->req_duplex = link_info->duplex_setting;
}
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
@@ -10778,6 +11068,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
+ if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
+ bnxt_rx_ring_reset(bp);
+
if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
@@ -10882,21 +11175,19 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
-static void bnxt_alloc_fw_health(struct bnxt *bp)
+static int bnxt_fw_reset_via_optee(struct bnxt *bp)
{
- if (bp->fw_health)
- return;
+#ifdef CONFIG_TEE_BNXT_FW
+ int rc = tee_bnxt_fw_load();
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
- !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
- return;
+ if (rc)
+ netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
- bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
- if (!bp->fw_health) {
- netdev_warn(bp->dev, "Failed to allocate fw_health\n");
- bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
- }
+ return rc;
+#else
+ netdev_err(bp->dev, "OP-TEE not supported\n");
+ return -ENODEV;
+#endif
}
static int bnxt_fw_init_one_p1(struct bnxt *bp)
@@ -10905,8 +11196,24 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
bp->fw_cap = 0;
rc = bnxt_hwrm_ver_get(bp);
- if (rc)
- return rc;
+ bnxt_try_map_fw_health_reg(bp);
+ if (rc) {
+ if (bp->fw_health && bp->fw_health->status_reliable) {
+ u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+
+ netdev_err(bp->dev,
+ "Firmware not responding, status: 0x%x\n",
+ sts);
+ if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
+ netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
+ rc = bnxt_fw_reset_via_optee(bp);
+ if (!rc)
+ rc = bnxt_hwrm_ver_get(bp);
+ }
+ }
+ if (rc)
+ return rc;
+ }
if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
rc = bnxt_alloc_kong_hwrm_resources(bp);
@@ -10920,6 +11227,8 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
if (rc)
return rc;
}
+ bnxt_nvm_cfg_ver_get(bp);
+
rc = bnxt_hwrm_func_reset(bp);
if (rc)
return -ENODEV;
@@ -10945,11 +11254,14 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
rc);
- bnxt_alloc_fw_health(bp);
- rc = bnxt_hwrm_error_recovery_qcfg(bp);
- if (rc)
- netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
- rc);
+ if (bnxt_alloc_fw_health(bp)) {
+ netdev_warn(bp->dev, "no memory for firmware error recovery\n");
+ } else {
+ rc = bnxt_hwrm_error_recovery_qcfg(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
+ rc);
+ }
rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
if (rc)
@@ -11075,12 +11387,8 @@ static void bnxt_reset_all(struct bnxt *bp)
int i, rc;
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
-#ifdef CONFIG_TEE_BNXT_FW
- rc = tee_bnxt_fw_load();
- if (rc)
- netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
+ bnxt_fw_reset_via_optee(bp);
bp->fw_reset_timestamp = jiffies;
-#endif
return;
}
@@ -11199,7 +11507,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (time_after(jiffies, bp->fw_reset_timestamp +
(bp->fw_reset_max_dsecs * HZ / 10))) {
netdev_err(bp->dev, "Firmware reset aborted\n");
- goto fw_reset_abort;
+ goto fw_reset_abort_status;
}
bnxt_queue_fw_reset_work(bp, HZ / 5);
return;
@@ -11233,6 +11541,13 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
return;
+fw_reset_abort_status:
+ if (bp->fw_health->status_reliable ||
+ (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
+ u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+
+ netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
+ }
fw_reset_abort:
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
@@ -11787,15 +12102,17 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_cancel_sp_work(bp);
- bp->sp_event = 0;
-
- bnxt_dl_fw_reporters_destroy(bp, true);
if (BNXT_PF(bp))
devlink_port_type_clear(&bp->dl_port);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ /* Flush any pending tasks */
+ cancel_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->fw_reset_task);
+ bp->sp_event = 0;
+
+ bnxt_dl_fw_reporters_destroy(bp, true);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
@@ -12203,6 +12520,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
bp = netdev_priv(dev);
+ bp->msg_enable = BNXT_DEF_MSG_ENABLE;
bnxt_set_max_func_irqs(bp, max_irqs);
if (bnxt_vf_pciid(ent->driver_data))
@@ -12234,8 +12552,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- if (BNXT_CHIP_P5(bp))
+ if (BNXT_CHIP_P5(bp)) {
bp->flags |= BNXT_FLAG_CHIP_P5;
+ if (BNXT_CHIP_SR2(bp))
+ bp->flags |= BNXT_FLAG_CHIP_SR2;
+ }
rc = bnxt_alloc_rss_indir_tbl(bp);
if (rc)
@@ -12535,6 +12856,9 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
}
+ if (state == pci_channel_io_frozen)
+ set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+
if (netif_running(netdev))
bnxt_close(netdev);
@@ -12561,7 +12885,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
- int err = 0;
+ int err = 0, off;
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12573,6 +12897,20 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n");
} else {
pci_set_master(pdev);
+ /* Upon fatal error, our device internal logic that latches to
+ * BAR value is getting reset and will restore only upon
+ * rewritting the BARs.
+ *
+ * As pci_restore_state() does not re-write the BARs if the
+ * value is same as saved value earlier, driver needs to
+ * write the BARs to 0 to force restore, in case of fatal error.
+ */
+ if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
+ &bp->state)) {
+ for (off = PCI_BASE_ADDRESS_0;
+ off <= PCI_BASE_ADDRESS_5; off += 4)
+ pci_write_config_dword(bp->pdev, off, 0);
+ }
pci_restore_state(pdev);
pci_save_state(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 0ef89dabfd61..47b3c3127879 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -907,6 +907,7 @@ struct bnxt_rx_ring_info {
struct bnxt_rx_sw_stats {
u64 rx_l4_csum_errors;
+ u64 rx_resets;
u64 rx_buf_errors;
};
@@ -1142,50 +1143,6 @@ struct bnxt_ntuple_filter {
#define BNXT_FLTR_UPDATE 1
};
-struct hwrm_port_phy_qcfg_output_compat {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 link;
- u8 link_signal_mode;
- __le16 link_speed;
- u8 duplex_cfg;
- u8 pause;
- __le16 support_speeds;
- __le16 force_link_speed;
- u8 auto_mode;
- u8 auto_pause;
- __le16 auto_link_speed;
- __le16 auto_link_speed_mask;
- u8 wirespeed;
- u8 lpbk;
- u8 force_pause;
- u8 module_status;
- __le32 preemphasis;
- u8 phy_maj;
- u8 phy_min;
- u8 phy_bld;
- u8 phy_type;
- u8 media_type;
- u8 xcvr_pkg_type;
- u8 eee_config_phy_addr;
- u8 parallel_detect;
- __le16 link_partner_adv_speeds;
- u8 link_partner_adv_auto_mode;
- u8 link_partner_adv_pause;
- __le16 adv_eee_link_speed_mask;
- __le16 link_partner_adv_eee_link_speed_mask;
- __le32 xcvr_identifier_type_tx_lpi_timer;
- __le16 fec_cfg;
- u8 duplex_state;
- u8 option_flags;
- char phy_vendor_name[16];
- char phy_vendor_partnumber[16];
- u8 unused_0[7];
- u8 valid;
-};
-
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
@@ -1196,7 +1153,10 @@ struct bnxt_link_info {
#define BNXT_LINK_SIGNAL PORT_PHY_QCFG_RESP_LINK_SIGNAL
#define BNXT_LINK_LINK PORT_PHY_QCFG_RESP_LINK_LINK
u8 wire_speed;
- u8 loop_back;
+ u8 phy_state;
+#define BNXT_PHY_STATE_ENABLED 0
+#define BNXT_PHY_STATE_DISABLED 1
+
u8 link_up;
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
@@ -1232,6 +1192,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
u16 support_speeds;
+ u16 support_pam4_speeds;
u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
@@ -1243,24 +1204,51 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
#define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB
+ u16 auto_pam4_link_speeds;
+#define BNXT_LINK_PAM4_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G
+#define BNXT_LINK_PAM4_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G
+#define BNXT_LINK_PAM4_SPEED_MSK_200GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G
u16 support_auto_speeds;
+ u16 support_pam4_auto_speeds;
u16 lp_auto_link_speeds;
+ u16 lp_auto_pam4_link_speeds;
u16 force_link_speed;
+ u16 force_pam4_link_speed;
u32 preemphasis;
u8 module_status;
+ u8 active_fec_sig_mode;
u16 fec_cfg;
+#define BNXT_FEC_NONE PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED
+#define BNXT_FEC_AUTONEG_CAP PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED
#define BNXT_FEC_AUTONEG PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED
+#define BNXT_FEC_ENC_BASE_R_CAP \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED
#define BNXT_FEC_ENC_BASE_R PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED
-#define BNXT_FEC_ENC_RS PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED
+#define BNXT_FEC_ENC_RS_CAP \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED
+#define BNXT_FEC_ENC_LLRS_CAP \
+ (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED)
+#define BNXT_FEC_ENC_RS \
+ (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED)
+#define BNXT_FEC_ENC_LLRS \
+ (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED)
/* copy of requested setting from ethtool cmd */
u8 autoneg;
#define BNXT_AUTONEG_SPEED 1
#define BNXT_AUTONEG_FLOW_CTRL 2
+ u8 req_signal_mode;
+#define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ
+#define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
u8 req_duplex;
u8 req_flow_ctrl;
u16 req_link_speed;
u16 advertising; /* user adv setting */
+ u16 advertising_pam4;
bool force_link_chng;
bool phy_retry;
@@ -1272,6 +1260,49 @@ struct bnxt_link_info {
struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
};
+#define BNXT_FEC_RS544_ON \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE)
+
+#define BNXT_FEC_RS544_OFF \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE)
+
+#define BNXT_FEC_RS272_ON \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE)
+
+#define BNXT_FEC_RS272_OFF \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE)
+
+#define BNXT_PAM4_SUPPORTED(link_info) \
+ ((link_info)->support_pam4_speeds)
+
+#define BNXT_FEC_RS_ON(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
+ (BNXT_PAM4_SUPPORTED(link_info) ? \
+ (BNXT_FEC_RS544_ON | BNXT_FEC_RS272_OFF) : 0))
+
+#define BNXT_FEC_LLRS_ON \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
+ BNXT_FEC_RS272_ON | BNXT_FEC_RS544_OFF)
+
+#define BNXT_FEC_RS_OFF(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE | \
+ (BNXT_PAM4_SUPPORTED(link_info) ? \
+ (BNXT_FEC_RS544_OFF | BNXT_FEC_RS272_OFF) : 0))
+
+#define BNXT_FEC_BASE_R_ON(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE | \
+ BNXT_FEC_RS_OFF(link_info))
+
+#define BNXT_FEC_ALL_OFF(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
+ BNXT_FEC_RS_OFF(link_info))
+
#define BNXT_MAX_QUEUE 8
struct bnxt_queue_info {
@@ -1464,6 +1495,7 @@ struct bnxt_fw_health {
u8 enabled:1;
u8 master:1;
u8 fatal:1;
+ u8 status_reliable:1;
u8 tmr_multiplier;
u8 tmr_counter;
u8 fw_reset_seq_cnt;
@@ -1491,6 +1523,9 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_HEALTH_WIN_BASE 0x3000
#define BNXT_FW_HEALTH_WIN_MAP_OFF 8
+#define BNXT_FW_HEALTH_WIN_OFF(reg) (BNXT_FW_HEALTH_WIN_BASE + \
+ ((reg) & BNXT_GRC_OFFSET_MASK))
+
#define BNXT_FW_STATUS_HEALTHY 0x8000
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
@@ -1535,6 +1570,8 @@ struct bnxt {
u8 chip_rev;
+#define CHIP_NUM_58818 0xd818
+
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
@@ -1613,6 +1650,7 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
+ #define BNXT_FLAG_CHIP_SR2 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_DSN_VALID 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
@@ -1630,20 +1668,27 @@ struct bnxt {
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
-#define BNXT_PHY_CFG_ABLE(bp) (BNXT_SINGLE_PF(bp) || \
- ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG))
+#define BNXT_PHY_CFG_ABLE(bp) ((BNXT_SINGLE_PF(bp) || \
+ ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG)) && \
+ (bp)->link_info.phy_state == BNXT_PHY_STATE_ENABLED)
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
(!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
(bp)->max_tpa_v2) && !is_kdump_kernel())
-/* Chip class phase 5 */
-#define BNXT_CHIP_P5(bp) \
+#define BNXT_CHIP_SR2(bp) \
+ ((bp)->chip_num == CHIP_NUM_58818)
+
+#define BNXT_CHIP_P5_THOR(bp) \
((bp)->chip_num == CHIP_NUM_57508 || \
(bp)->chip_num == CHIP_NUM_57504 || \
(bp)->chip_num == CHIP_NUM_57502)
+/* Chip class phase 5 */
+#define BNXT_CHIP_P5(bp) \
+ (BNXT_CHIP_P5_THOR(bp) || BNXT_CHIP_SR2(bp))
+
/* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \
(BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
@@ -1736,6 +1781,7 @@ struct bnxt {
#define BNXT_STATE_ABORT_ERR 5
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
+#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
@@ -1777,6 +1823,7 @@ struct bnxt {
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
#define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000
+ #define BNXT_FW_CAP_RING_MONITOR 0x40000000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -1810,6 +1857,7 @@ struct bnxt {
#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
char fw_ver_str[FW_VER_STR_LEN];
char hwrm_ver_supp[FW_VER_STR_LEN];
+ char nvm_cfg_ver[FW_VER_STR_LEN];
u64 fw_ver_code;
#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \
((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
@@ -1935,6 +1983,20 @@ struct bnxt {
struct device *hwmon_dev;
};
+#define BNXT_NUM_RX_RING_STATS 8
+#define BNXT_NUM_TX_RING_STATS 8
+#define BNXT_NUM_TPA_RING_STATS 4
+#define BNXT_NUM_TPA_RING_STATS_P5 5
+#define BNXT_NUM_TPA_RING_STATS_P5_SR2 6
+
+#define BNXT_RING_STATS_SIZE_P5 \
+ ((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
+ BNXT_NUM_TPA_RING_STATS_P5) * 8)
+
+#define BNXT_RING_STATS_SIZE_P5_SR2 \
+ ((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
+ BNXT_NUM_TPA_RING_STATS_P5_SR2) * 8)
+
#define BNXT_GET_RING_STATS64(sw, counter) \
(*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
@@ -2114,6 +2176,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
+int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 3a854195d5b0..184b6d0513b2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -17,15 +17,13 @@
#include "bnxt_ethtool.h"
static int
-bnxt_dl_flash_update(struct devlink *dl, const char *filename,
- const char *region, struct netlink_ext_ack *extack)
+bnxt_dl_flash_update(struct devlink *dl,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
- if (region)
- return -EOPNOTSUPP;
-
if (!BNXT_PF(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"flash update not supported from a VF");
@@ -33,15 +31,12 @@ bnxt_dl_flash_update(struct devlink *dl, const char *filename,
}
devlink_flash_update_begin_notify(dl);
- devlink_flash_update_status_notify(dl, "Preparing to flash", region, 0,
- 0);
- rc = bnxt_flash_package_from_file(bp->dev, filename, 0);
+ devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+ rc = bnxt_flash_package_from_file(bp->dev, params->file_name, 0);
if (!rc)
- devlink_flash_update_status_notify(dl, "Flashing done", region,
- 0, 0);
+ devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0);
else
- devlink_flash_update_status_notify(dl, "Flashing failed",
- region, 0, 0);
+ devlink_flash_update_status_notify(dl, "Flashing failed", NULL, 0, 0);
devlink_flash_update_end_notify(dl);
return rc;
}
@@ -387,15 +382,41 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
return rc;
}
+static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
+ enum bnxt_dl_version_type type, const char *key,
+ char *buf)
+{
+ if (!strlen(buf))
+ return 0;
+
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
+ !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE)))
+ return 0;
+
+ switch (type) {
+ case BNXT_VERSION_FIXED:
+ return devlink_info_version_fixed_put(req, key, buf);
+ case BNXT_VERSION_RUNNING:
+ return devlink_info_version_running_put(req, key, buf);
+ case BNXT_VERSION_STORED:
+ return devlink_info_version_stored_put(req, key, buf);
+ }
+ return 0;
+}
+
+#define HWRM_FW_VER_STR_LEN 16
+
static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
+ struct hwrm_nvm_get_dev_info_output nvm_dev_info;
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
union devlink_param_value nvm_cfg_ver;
struct hwrm_ver_get_output *ver_resp;
char mgmt_ver[FW_VER_STR_LEN];
char roce_ver[FW_VER_STR_LEN];
- char fw_ver[FW_VER_STR_LEN];
+ char ncsi_ver[FW_VER_STR_LEN];
char buf[32];
int rc;
@@ -403,10 +424,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
- if (strlen(bp->board_partno)) {
- rc = devlink_info_version_fixed_put(req,
- DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
- bp->board_partno);
+ if (BNXT_PF(bp) && (bp->flags & BNXT_FLAG_DSN_VALID)) {
+ sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
+ bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
+ bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]);
+ rc = devlink_info_serial_number_put(req, buf);
if (rc)
return rc;
}
@@ -417,54 +439,56 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
return rc;
}
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ bp->board_partno);
+ if (rc)
+ return rc;
+
sprintf(buf, "%X", bp->chip_num);
- rc = devlink_info_version_fixed_put(req,
- DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
if (rc)
return rc;
ver_resp = &bp->ver_resp;
sprintf(buf, "%X", ver_resp->chip_rev);
- rc = devlink_info_version_fixed_put(req,
- DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
if (rc)
return rc;
- if (BNXT_PF(bp)) {
- sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
- bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
- bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]);
- rc = devlink_info_serial_number_put(req, buf);
- if (rc)
- return rc;
- }
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ bp->nvm_cfg_ver);
+ if (rc)
+ return rc;
- if (strlen(ver_resp->active_pkg_name)) {
- rc =
- devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW,
- ver_resp->active_pkg_name);
- if (rc)
- return rc;
- }
+ buf[0] = 0;
+ strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc)
+ return rc;
if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
u32 ver = nvm_cfg_ver.vu32;
sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
ver & 0xF);
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_PSID, buf);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ buf);
if (rc)
return rc;
}
if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
- snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
- snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
@@ -472,11 +496,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
} else {
- snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
- snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
@@ -484,29 +508,60 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
}
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, fw_ver);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
if (rc)
return rc;
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
- bp->hwrm_ver_supp);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ bp->hwrm_ver_supp);
if (rc)
return rc;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, mgmt_ver);
- if (rc)
- return rc;
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc)
+ return rc;
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
- if (rc)
- return rc;
- }
- return 0;
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info);
+ if (rc ||
+ !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID))
+ return 0;
+
+ buf[0] = 0;
+ strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc)
+ return rc;
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor,
+ nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc)
+ return rc;
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor,
+ nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc)
+ return rc;
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
+ nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
+ return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
}
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index d5c8bd49383a..d22cab5d6856 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -60,6 +60,12 @@ struct bnxt_dl_nvm_param {
u8 dl_num_bytes;
};
+enum bnxt_dl_version_type {
+ BNXT_VERSION_FIXED,
+ BNXT_VERSION_RUNNING,
+ BNXT_VERSION_STORED,
+};
+
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
void bnxt_dl_health_recovery_done(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index fecdfd875af1..53687bc7fcf5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -11,6 +11,7 @@
#include <linux/ctype.h>
#include <linux/stringify.h>
#include <linux/ethtool.h>
+#include <linux/linkmode.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
@@ -172,10 +173,12 @@ static const char * const bnxt_ring_tpa2_stats_str[] = {
"rx_tpa_pkt",
"rx_tpa_bytes",
"rx_tpa_errors",
+ "rx_tpa_events",
};
static const char * const bnxt_rx_sw_stats_str[] = {
"rx_l4_csum_errors",
+ "rx_resets",
"rx_buf_errors",
};
@@ -462,9 +465,12 @@ static const struct {
static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
if (BNXT_SUPPORTS_TPA(bp)) {
- if (bp->max_tpa_v2)
- return ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
- return ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ if (bp->max_tpa_v2) {
+ if (BNXT_CHIP_P5_THOR(bp))
+ return BNXT_NUM_TPA_RING_STATS_P5;
+ return BNXT_NUM_TPA_RING_STATS_P5_SR2;
+ }
+ return BNXT_NUM_TPA_RING_STATS;
}
return 0;
}
@@ -796,7 +802,7 @@ static void bnxt_get_channels(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int max_rx_rings, max_tx_rings, tcs;
- int max_tx_sch_inputs;
+ int max_tx_sch_inputs, tx_grps;
/* Get the most up-to-date max_tx_sch_inputs. */
if (netif_running(dev) && BNXT_NEW_RM(bp))
@@ -806,6 +812,12 @@ static void bnxt_get_channels(struct net_device *dev,
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
+
+ tcs = netdev_get_num_tc(dev);
+ tx_grps = max(tcs, 1);
+ if (bp->tx_nr_rings_xdp)
+ tx_grps++;
+ max_tx_rings /= tx_grps;
channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
@@ -815,7 +827,6 @@ static void bnxt_get_channels(struct net_device *dev,
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
- tcs = netdev_get_num_tc(dev);
if (tcs > 1)
max_tx_rings /= tcs;
@@ -1503,6 +1514,53 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
(fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
}
+#define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
+{ \
+ if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 50000baseCR_Full); \
+ if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 100000baseCR2_Full);\
+ if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 200000baseCR4_Full);\
+}
+
+#define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
+{ \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 50000baseCR_Full)) \
+ (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100000baseCR2_Full)) \
+ (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 200000baseCR4_Full)) \
+ (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \
+}
+
+static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ u16 fec_cfg = link_info->fec_cfg;
+
+ if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ lk_ksettings->link_modes.advertising);
+ return;
+ }
+ if (fec_cfg & BNXT_FEC_ENC_BASE_R)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ lk_ksettings->link_modes.advertising);
+ if (fec_cfg & BNXT_FEC_ENC_RS)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ lk_ksettings->link_modes.advertising);
+ if (fec_cfg & BNXT_FEC_ENC_LLRS)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ lk_ksettings->link_modes.advertising);
+}
+
static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
@@ -1513,6 +1571,9 @@ static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
fw_pause = link_info->auto_pause_setting;
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
+ fw_speeds = link_info->advertising_pam4;
+ BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising);
+ bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
}
static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
@@ -1526,6 +1587,29 @@ static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
lp_advertising);
+ fw_speeds = link_info->lp_auto_pam4_link_speeds;
+ BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising);
+}
+
+static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ u16 fec_cfg = link_info->fec_cfg;
+
+ if (fec_cfg & BNXT_FEC_NONE) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ lk_ksettings->link_modes.supported);
+ return;
+ }
+ if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ lk_ksettings->link_modes.supported);
+ if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ lk_ksettings->link_modes.supported);
+ if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ lk_ksettings->link_modes.supported);
}
static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
@@ -1534,14 +1618,18 @@ static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
u16 fw_speeds = link_info->support_speeds;
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
+ fw_speeds = link_info->support_pam4_speeds;
+ BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
Asym_Pause);
- if (link_info->support_auto_speeds)
+ if (link_info->support_auto_speeds ||
+ link_info->support_pam4_auto_speeds)
ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
Autoneg);
+ bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
}
u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
@@ -1632,55 +1720,86 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
return 0;
}
-static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed)
+static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
+ u16 support_pam4_spds = link_info->support_pam4_speeds;
u16 support_spds = link_info->support_speeds;
- u32 fw_speed = 0;
+ u8 sig_mode = BNXT_SIG_MODE_NRZ;
+ u16 fw_speed = 0;
switch (ethtool_speed) {
case SPEED_100:
if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
break;
case SPEED_1000:
if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
break;
case SPEED_2500:
if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
break;
case SPEED_10000:
if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
break;
case SPEED_20000:
if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
break;
case SPEED_25000:
if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
break;
case SPEED_40000:
if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
break;
case SPEED_50000:
- if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_50GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+ } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ }
break;
case SPEED_100000:
- if (support_spds & BNXT_LINK_SPEED_MSK_100GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_100GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
+ } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ }
break;
- default:
- netdev_err(dev, "unsupported speed!\n");
+ case SPEED_200000:
+ if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ }
break;
}
- return fw_speed;
+
+ if (!fw_speed) {
+ netdev_err(dev, "unsupported speed!\n");
+ return -EINVAL;
+ }
+
+ if (link_info->req_link_speed == fw_speed &&
+ link_info->req_signal_mode == sig_mode &&
+ link_info->autoneg == 0)
+ return -EALREADY;
+
+ link_info->req_link_speed = fw_speed;
+ link_info->req_signal_mode = sig_mode;
+ link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+ link_info->autoneg = 0;
+ link_info->advertising = 0;
+ link_info->advertising_pam4 = 0;
+
+ return 0;
}
u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
@@ -1712,7 +1831,6 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
struct bnxt_link_info *link_info = &bp->link_info;
const struct ethtool_link_settings *base = &lk_ksettings->base;
bool set_pause = false;
- u16 fw_advertising = 0;
u32 speed;
int rc = 0;
@@ -1721,19 +1839,23 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
mutex_lock(&bp->link_lock);
if (base->autoneg == AUTONEG_ENABLE) {
- BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
+ link_info->advertising = 0;
+ link_info->advertising_pam4 = 0;
+ BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings,
advertising);
+ BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4,
+ lk_ksettings, advertising);
link_info->autoneg |= BNXT_AUTONEG_SPEED;
- if (!fw_advertising)
+ if (!link_info->advertising && !link_info->advertising_pam4) {
link_info->advertising = link_info->support_auto_speeds;
- else
- link_info->advertising = fw_advertising;
+ link_info->advertising_pam4 =
+ link_info->support_pam4_auto_speeds;
+ }
/* any change to autoneg will cause link change, therefore the
* driver should put back the original pause setting in autoneg
*/
set_pause = true;
} else {
- u16 fw_speed;
u8 phy_type = link_info->phy_type;
if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
@@ -1749,15 +1871,12 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
goto set_setting_exit;
}
speed = base->speed;
- fw_speed = bnxt_get_fw_speed(dev, speed);
- if (!fw_speed) {
- rc = -EINVAL;
+ rc = bnxt_force_link_speed(dev, speed);
+ if (rc) {
+ if (rc == -EALREADY)
+ rc = 0;
goto set_setting_exit;
}
- link_info->req_link_speed = fw_speed;
- link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
- link_info->autoneg = 0;
- link_info->advertising = 0;
}
if (netif_running(dev))
@@ -1768,6 +1887,110 @@ set_setting_exit:
return rc;
}
+static int bnxt_get_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fec)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info;
+ u8 active_fec;
+ u16 fec_cfg;
+
+ link_info = &bp->link_info;
+ fec_cfg = link_info->fec_cfg;
+ active_fec = link_info->active_fec_sig_mode &
+ PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
+ if (fec_cfg & BNXT_FEC_NONE) {
+ fec->fec = ETHTOOL_FEC_NONE;
+ fec->active_fec = ETHTOOL_FEC_NONE;
+ return 0;
+ }
+ if (fec_cfg & BNXT_FEC_AUTONEG)
+ fec->fec |= ETHTOOL_FEC_AUTO;
+ if (fec_cfg & BNXT_FEC_ENC_BASE_R)
+ fec->fec |= ETHTOOL_FEC_BASER;
+ if (fec_cfg & BNXT_FEC_ENC_RS)
+ fec->fec |= ETHTOOL_FEC_RS;
+ if (fec_cfg & BNXT_FEC_ENC_LLRS)
+ fec->fec |= ETHTOOL_FEC_LLRS;
+
+ switch (active_fec) {
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_BASER;
+ break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_RS;
+ break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_LLRS;
+ break;
+ }
+ return 0;
+}
+
+static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
+ u32 fec)
+{
+ u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
+
+ if (fec & ETHTOOL_FEC_BASER)
+ fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
+ else if (fec & ETHTOOL_FEC_RS)
+ fw_fec |= BNXT_FEC_RS_ON(link_info);
+ else if (fec & ETHTOOL_FEC_LLRS)
+ fw_fec |= BNXT_FEC_LLRS_ON;
+ return fw_fec;
+}
+
+static int bnxt_set_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info;
+ u32 new_cfg, fec = fecparam->fec;
+ u16 fec_cfg;
+ int rc;
+
+ link_info = &bp->link_info;
+ fec_cfg = link_info->fec_cfg;
+ if (fec_cfg & BNXT_FEC_NONE)
+ return -EOPNOTSUPP;
+
+ if (fec & ETHTOOL_FEC_OFF) {
+ new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
+ BNXT_FEC_ALL_OFF(link_info);
+ goto apply_fec;
+ }
+ if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
+ ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
+ ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
+ ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
+ return -EINVAL;
+
+ if (fec & ETHTOOL_FEC_AUTO) {
+ if (!link_info->autoneg)
+ return -EINVAL;
+ new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
+ } else {
+ new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
+ }
+
+apply_fec:
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ req.flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ /* update current settings */
+ if (!rc) {
+ mutex_lock(&bp->link_lock);
+ bnxt_update_link(bp, false);
+ mutex_unlock(&bp->link_lock);
+ }
+ return rc;
+}
+
static void bnxt_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
@@ -1781,6 +2004,22 @@ static void bnxt_get_pauseparam(struct net_device *dev,
epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
}
+static void bnxt_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *epstat)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx, *tx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
+ return;
+
+ rx = bp->port_stats.sw_stats;
+ tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
+ epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
+}
+
static int bnxt_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
@@ -1833,6 +2072,22 @@ static u32 bnxt_get_link(struct net_device *dev)
return bp->link_info.link_up;
}
+int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
+ struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
+{
+ struct hwrm_nvm_get_dev_info_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_nvm_get_dev_info_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ memcpy(nvm_dev_info, resp, sizeof(*resp));
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static void bnxt_print_admin_err(struct bnxt *bp)
{
netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
@@ -3059,7 +3314,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
u8 test_mask = 0;
int rc = 0, i;
- if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
+ if (!bp->num_tests || !BNXT_PF(bp))
return;
memset(buf, 0, sizeof(u64) * bp->num_tests);
if (!netif_running(dev)) {
@@ -3072,9 +3327,9 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
do_ext_lpbk = true;
if (etest->flags & ETH_TEST_FL_OFFLINE) {
- if (bp->pf.active_vfs) {
+ if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
etest->flags |= ETH_TEST_FL_FAILED;
- netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
+ netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
return;
}
offline = true;
@@ -3590,7 +3845,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
bnxt_get_pkgver(dev);
bp->num_tests = 0;
- if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
+ if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
@@ -3657,6 +3912,9 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
+ .get_fecparam = bnxt_get_fecparam,
+ .set_fecparam = bnxt_set_fecparam,
+ .get_pause_stats = bnxt_get_pause_stats,
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 34f44ddfad79..fa6fbde52bea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -92,6 +92,8 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
+int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
+ struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
u32 install_type);
void bnxt_ethtool_init(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index c4af6bf15e36..2d3e962bdac3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -213,7 +213,10 @@ struct cmd_nums {
#define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
#define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
#define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
- #define HWRM_PORT_ECN_QSTATS 0xbaUL
+ #define HWRM_RESERVED7 0xbaUL
+ #define HWRM_PORT_TX_FIR_CFG 0xbbUL
+ #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
+ #define HWRM_PORT_ECN_QSTATS 0xbdUL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -370,6 +373,9 @@ struct cmd_nums {
#define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
#define HWRM_TF_TBL_TYPE_GET 0x2daUL
#define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
+ #define HWRM_TF_CTXT_MEM_ALLOC 0x2e2UL
+ #define HWRM_TF_CTXT_MEM_FREE 0x2e3UL
#define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL
#define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL
#define HWRM_TF_EXT_EM_QCAPS 0x2e6UL
@@ -384,6 +390,8 @@ struct cmd_nums {
#define HWRM_TF_TCAM_FREE 0x2fbUL
#define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
#define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
+ #define HWRM_TF_IF_TBL_SET 0x2feUL
+ #define HWRM_TF_IF_TBL_GET 0x2ffUL
#define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -447,6 +455,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_BUSY 0x10UL
+ #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -478,8 +487,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 54
-#define HWRM_VERSION_STR "1.10.1.54"
+#define HWRM_VERSION_RSVD 68
+#define HWRM_VERSION_STR "1.10.1.68"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -675,6 +684,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
#define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
@@ -851,6 +861,32 @@ struct hwrm_async_event_cmpl_error_recovery {
#define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
};
+/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
+struct hwrm_async_event_cmpl_ring_monitor_msg {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
@@ -975,6 +1011,28 @@ struct hwrm_async_event_cmpl_eem_cache_flush_done {
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
};
+/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
+struct hwrm_async_event_cmpl_deferred_response {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1214,7 +1272,13 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
u8 max_schqs;
- u8 unused_1[2];
+ u8 mpc_chnls_cap;
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
+ u8 unused_1;
u8 valid;
};
@@ -1250,6 +1314,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
#define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
#define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
+ #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1341,7 +1406,13 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
#define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
#define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
- u8 unused_2[7];
+ u8 mpc_chnls;
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
+ u8 unused_2[6];
u8 valid;
};
@@ -1405,6 +1476,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
#define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
#define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
+ #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
__le16 mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -1479,7 +1551,18 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
__le16 num_mcast_filters;
__le16 schq_id;
- u8 unused_0[6];
+ __le16 mpc_chnls;
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
+ u8 unused_0[4];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -1559,7 +1642,7 @@ struct hwrm_func_qstats_ext_input {
u8 unused_1[4];
};
-/* hwrm_func_qstats_ext_output (size:1472b/184B) */
+/* hwrm_func_qstats_ext_output (size:1536b/192B) */
struct hwrm_func_qstats_ext_output {
__le16 error_code;
__le16 req_type;
@@ -1586,6 +1669,7 @@ struct hwrm_func_qstats_ext_output {
__le64 rx_tpa_pkt;
__le64 rx_tpa_bytes;
__le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
u8 unused_0[7];
u8 valid;
};
@@ -2412,25 +2496,29 @@ struct hwrm_port_phy_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_ENABLE 0x20000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
__le32 enables;
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -2573,7 +2661,7 @@ struct hwrm_port_phy_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcfg_output (size:832b/104B) */
+/* hwrm_port_phy_qcfg_output (size:768b/96B) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -2584,10 +2672,22 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
- u8 link_signal_mode;
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_NRZ 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4
+ u8 active_fec_signal_mode;
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
__le16 link_speed;
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
@@ -2809,21 +2909,21 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
__le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_SUPPORTED 0x200UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ENABLED 0x400UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ACTIVE 0x800UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ACTIVE 0x1000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ACTIVE 0x2000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ACTIVE 0x4000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
u8 duplex_state;
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
@@ -2845,11 +2945,10 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
#define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
- __le16 link_partner_pam4_adv_speeds;
+ u8 link_partner_pam4_adv_speeds;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
- u8 unused_0[7];
u8 valid;
};
@@ -3293,6 +3392,47 @@ struct hwrm_port_lpbk_qstats_output {
u8 valid;
};
+/* hwrm_port_ecn_qstats_input (size:256b/32B) */
+struct hwrm_port_ecn_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 ecn_stat_buf_size;
+ u8 flags;
+ #define PORT_ECN_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_ECN_QSTATS_REQ_FLAGS_LAST PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[3];
+ __le64 ecn_stat_host_addr;
+};
+
+/* hwrm_port_ecn_qstats_output (size:128b/16B) */
+struct hwrm_port_ecn_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ecn_stat_buf_size;
+ u8 mark_en;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* port_stats_ecn (size:512b/64B) */
+struct port_stats_ecn {
+ __le64 mark_cnt_cos0;
+ __le64 mark_cnt_cos1;
+ __le64 mark_cnt_cos2;
+ __le64 mark_cnt_cos3;
+ __le64 mark_cnt_cos4;
+ __le64 mark_cnt_cos5;
+ __le64 mark_cnt_cos6;
+ __le64 mark_cnt_cos7;
+};
+
/* hwrm_port_clr_stats_input (size:192b/24B) */
struct hwrm_port_clr_stats_input {
__le16 req_type;
@@ -3387,8 +3527,9 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
#define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
#define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xe0UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 5
+ #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xc0UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 6
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -5365,6 +5506,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
#define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -5424,7 +5566,14 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
#define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
#define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
- u8 unused_4[3];
+ u8 mpc_chnls_type;
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
+ u8 unused_4[2];
__le64 cq_handle;
};
@@ -6661,7 +6810,7 @@ struct hwrm_cfa_vfr_alloc_output {
u8 valid;
};
-/* hwrm_cfa_vfr_free_input (size:384b/48B) */
+/* hwrm_cfa_vfr_free_input (size:448b/56B) */
struct hwrm_cfa_vfr_free_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -6669,6 +6818,9 @@ struct hwrm_cfa_vfr_free_input {
__le16 target_id;
__le64 resp_addr;
char vfr_name[32];
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
};
/* hwrm_cfa_vfr_free_output (size:128b/16B) */
@@ -6970,7 +7122,7 @@ struct ctx_hw_stats {
__le64 tpa_aborts;
};
-/* ctx_hw_stats_ext (size:1344b/168B) */
+/* ctx_hw_stats_ext (size:1408b/176B) */
struct ctx_hw_stats_ext {
__le64 rx_ucast_pkts;
__le64 rx_mcast_pkts;
@@ -6993,6 +7145,7 @@ struct ctx_hw_stats_ext {
__le64 rx_tpa_pkt;
__le64 rx_tpa_bytes;
__le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
};
/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
@@ -7065,16 +7218,16 @@ struct hwrm_stat_ctx_query_output {
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
- __le64 tx_err_pkts;
- __le64 tx_drop_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_error_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
__le64 rx_ucast_pkts;
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
- __le64 rx_err_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
@@ -7099,7 +7252,7 @@ struct hwrm_stat_ext_ctx_query_input {
u8 unused_0[3];
};
-/* hwrm_stat_ext_ctx_query_output (size:1472b/184B) */
+/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
struct hwrm_stat_ext_ctx_query_output {
__le16 error_code;
__le16 req_type;
@@ -7126,6 +7279,7 @@ struct hwrm_stat_ext_ctx_query_output {
__le64 rx_tpa_pkt;
__le64 rx_tpa_bytes;
__le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
u8 unused_0[7];
u8 valid;
};
@@ -7702,6 +7856,77 @@ struct hwrm_dbg_read_direct_output {
u8 valid;
};
+/* hwrm_dbg_qcaps_input (size:192b/24B) */
+struct hwrm_dbg_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_dbg_qcaps_output (size:192b/24B) */
+struct hwrm_dbg_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_component_disable_caps;
+ #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
+ __le32 flags;
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcfg_input (size:192b/24B) */
+struct hwrm_dbg_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 flags;
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+ __le32 coredump_component_disable_flags;
+ #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
+};
+
+/* hwrm_dbg_qcfg_output (size:256b/32B) */
+struct hwrm_dbg_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_size;
+ __le32 flags;
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
+ #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
+ #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
+ __le16 async_cmpl_ring;
+ u8 unused_2[2];
+ __le32 crashdump_size;
+ u8 unused_3[3];
+ u8 valid;
+};
+
/* coredump_segment_record (size:128b/16B) */
struct coredump_segment_record {
__le16 component_id;
@@ -8048,7 +8273,7 @@ struct hwrm_nvm_get_dev_info_input {
__le64 resp_addr;
};
-/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
+/* hwrm_nvm_get_dev_info_output (size:640b/80B) */
struct hwrm_nvm_get_dev_info_output {
__le16 error_code;
__le16 req_type;
@@ -8063,6 +8288,22 @@ struct hwrm_nvm_get_dev_info_output {
u8 nvm_cfg_ver_maj;
u8 nvm_cfg_ver_min;
u8 nvm_cfg_ver_upd;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
+ char pkg_name[16];
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ u8 unused_0[7];
u8 valid;
};
@@ -8381,6 +8622,16 @@ struct hwrm_selftest_irq_output {
u8 valid;
};
+/* db_push_info (size:64b/8B) */
+struct db_push_info {
+ u32 push_size_push_index;
+ #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
+ #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
+ #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
+ u32 reserved32;
+};
+
/* fw_status_reg (size:32b/4B) */
struct fw_status_reg {
u32 fw_status;
@@ -8393,6 +8644,32 @@ struct fw_status_reg {
#define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
#define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
#define FW_STATUS_REG_SHUTDOWN 0x100000UL
-};
+ #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
+};
+
+/* hcomm_status (size:64b/8B) */
+struct hcomm_status {
+ u32 sig_ver;
+ #define HCOMM_STATUS_VER_MASK 0xffUL
+ #define HCOMM_STATUS_VER_SFT 0
+ #define HCOMM_STATUS_VER_LATEST 0x1UL
+ #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
+ #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
+ #define HCOMM_STATUS_SIGNATURE_SFT 8
+ #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
+ #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
+ u32 fw_status_loc;
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
+ #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
+ #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
+};
+
+#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index cc2ee4d0bd18..23b80aa171dd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1029,7 +1029,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {0};
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 84536292b031..f7f10cfb3476 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3009,10 +3009,10 @@ static int cnic_service_bnx2(void *data, void *status_blk)
return cnic_service_bnx2_queues(dev);
}
-static void cnic_service_bnx2_msix(unsigned long data)
+static void cnic_service_bnx2_msix(struct tasklet_struct *t)
{
- struct cnic_dev *dev = (struct cnic_dev *) data;
- struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_dev *dev = cp->dev;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
@@ -3134,10 +3134,10 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
return last_status;
}
-static void cnic_service_bnx2x_bh(unsigned long data)
+static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
{
- struct cnic_dev *dev = (struct cnic_dev *) data;
- struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_dev *dev = cp->dev;
struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
@@ -4458,8 +4458,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
- tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
- (unsigned long) dev);
+ tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
err = cnic_request_irq(dev);
if (err)
return err;
@@ -4868,8 +4867,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
- tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
- (unsigned long) dev);
+ tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 09fb9315d1ae..06f221c44802 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -102,14 +102,10 @@ bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
}
/**
- * bfa_cee_get_attr_isr()
+ * bfa_cee_reset_stats_isr - CEE ISR for reset-stats responses from f/w
*
- * @brief CEE ISR for reset-stats responses from f/w
- *
- * @param[in] cee - Pointer to the CEE module
- * status - Return status from the f/w
- *
- * @return void
+ * @cee: Input Pointer to the CEE module
+ * @status: Return status from the f/w
*/
static void
bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -148,9 +144,12 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
}
/**
- * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
+ * bfa_nw_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
*
* @cee: Pointer to the CEE module data structure.
+ * @attr: attribute requested
+ * @cbfn: function pointer
+ * @cbarg: function pointer arguments
*
* Return: status
*/
@@ -181,7 +180,9 @@ bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
}
/**
- * bfa_cee_isrs - Handles Mail-box interrupts for CEE module.
+ * bfa_cee_isr - Handles Mail-box interrupts for CEE module.
+ * @cbarg: argument passed containing pointer to the CEE module data structure.
+ * @m: message pointer
*/
static void
@@ -210,6 +211,7 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
/**
* bfa_cee_notify - CEE module heart-beat failure handler.
*
+ * @arg: argument passed containing pointer to the CEE module data structure.
* @event: IOC event type
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b9dd06b12945..cd933817a0b8 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -269,7 +269,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
break;
case IOC_E_PFFAILED:
- /* !!! fall through !!! */
+ fallthrough;
case IOC_E_HWERROR:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
@@ -365,7 +365,8 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_hb_stop(ioc);
- /* !!! fall through !!! */
+ fallthrough;
+
case IOC_E_HBFAIL:
if (ioc->iocpf.auto_recover)
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
@@ -1763,7 +1764,7 @@ bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
return BFI_IOC_IMG_VER_INCOMP;
}
-/**
+/*
* Returns TRUE if driver is willing to work with current smem f/w version.
*/
bool
@@ -2469,6 +2470,7 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
*
* @ioc: memory for IOC
* @bfa: driver instance structure
+ * @cbfn: callback function
*/
void
bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
@@ -2500,7 +2502,9 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc)
/**
* bfa_nw_ioc_pci_init - Setup IOC PCI properties.
*
+ * @ioc: memory for IOC
* @pcidev: PCI device information for this IOC
+ * @clscode: class code
*/
void
bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
@@ -2569,6 +2573,7 @@ bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
/**
* bfa_nw_ioc_mem_claim - Initialize IOC dma memory
*
+ * @ioc: memory for IOC
* @dm_kva: kernel virtual address of IOC dma memory
* @dm_pa: physical address of IOC dma memory
*/
@@ -2636,6 +2641,8 @@ bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
*
* @ioc: IOC instance
* @cmd: Mailbox command
+ * @cbfn: callback function
+ * @cbarg: arguments to callback
*
* Waits if mailbox is busy. Responsibility of caller to serialize
*/
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index cc80bbbefe87..7e4e831d720f 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3277,7 +3277,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
{
int err, mtu;
struct bnad *bnad = netdev_priv(netdev);
- u32 rx_count = 0, frame, new_frame;
+ u32 frame, new_frame;
mutex_lock(&bnad->conf_mutex);
@@ -3293,12 +3293,9 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
/* only when transition is over 4K */
if ((frame <= 4096 && new_frame > 4096) ||
(frame > 4096 && new_frame <= 4096))
- rx_count = bnad_reinit_rx(bnad);
+ bnad_reinit_rx(bnad);
}
- /* rx_count > 0 - new rx created
- * - Linux set err = 0 and return
- */
err = bnad_mtu_set(bnad, new_frame);
if (err)
err = -EBUSY;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 4f1b41569260..5de47f6fde5a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -7,6 +7,7 @@
#ifndef _MACB_H
#define _MACB_H
+#include <linux/clk.h>
#include <linux/phylink.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
@@ -365,6 +366,8 @@
#define MACB_ISR_RLE_SIZE 1
#define MACB_TXERR_OFFSET 6 /* EN TX frame corrupt from error interrupt */
#define MACB_TXERR_SIZE 1
+#define MACB_RM9200_TBRE_OFFSET 6 /* EN may send new frame interrupt (RM9200) */
+#define MACB_RM9200_TBRE_SIZE 1
#define MACB_TCOMP_OFFSET 7 /* Enable transmit complete interrupt */
#define MACB_TCOMP_SIZE 1
#define MACB_ISR_LINK_OFFSET 9 /* Enable link change interrupt */
@@ -1204,10 +1207,10 @@ struct macb {
phy_interface_t phy_interface;
- /* AT91RM9200 transmit */
- struct sk_buff *skb; /* holds skb until xmit interrupt completes */
- dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
- int skb_length; /* saved skb length for pci_unmap_single */
+ /* AT91RM9200 transmit queue (1 on wire + 1 queued) */
+ struct macb_tx_skb rm9200_txq[2];
+ unsigned int rm9200_tx_tail;
+ unsigned int rm9200_tx_len;
unsigned int max_tx_length;
u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
@@ -1298,4 +1301,14 @@ static inline bool gem_has_ptp(struct macb *bp)
return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP);
}
+/**
+ * struct macb_platform_data - platform data for MACB Ethernet used for PCI registration
+ * @pclk: platform clock
+ * @hclk: AHB clock
+ */
+struct macb_platform_data {
+ struct clk *pclk;
+ struct clk *hclk;
+};
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 9179f7b0b900..286f0341bdf8 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -23,7 +23,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
-#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include <linux/phylink.h>
#include <linux/of.h>
@@ -458,9 +457,9 @@ static void macb_init_buffers(struct macb *bp)
/**
* macb_set_tx_clk() - Set a clock to a new frequency
- * @clk Pointer to the clock to change
- * @rate New frequency in Hz
- * @dev Pointer to the struct net_device
+ * @clk: Pointer to the clock to change
+ * @speed: New frequency in Hz
+ * @dev: Pointer to the struct net_device
*/
static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
{
@@ -1465,9 +1464,9 @@ static int macb_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void macb_hresp_error_task(unsigned long data)
+static void macb_hresp_error_task(struct tasklet_struct *t)
{
- struct macb *bp = (struct macb *)data;
+ struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
struct net_device *dev = bp->dev;
struct macb_queue *queue;
unsigned int q;
@@ -1930,7 +1929,8 @@ static inline int macb_clear_csum(struct sk_buff *skb)
static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
{
- bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
+ bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
+ skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);
@@ -3909,6 +3909,7 @@ static int at91ether_start(struct macb *lp)
MACB_BIT(ISR_TUND) |
MACB_BIT(ISR_RLE) |
MACB_BIT(TCOMP) |
+ MACB_BIT(RM9200_TBRE) |
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
@@ -3925,6 +3926,7 @@ static void at91ether_stop(struct macb *lp)
MACB_BIT(ISR_TUND) |
MACB_BIT(ISR_RLE) |
MACB_BIT(TCOMP) |
+ MACB_BIT(RM9200_TBRE) |
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
@@ -3994,24 +3996,34 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ unsigned long flags;
- if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
- netif_stop_queue(dev);
+ if (lp->rm9200_tx_len < 2) {
+ int desc = lp->rm9200_tx_tail;
/* Store packet information (to free when Tx completed) */
- lp->skb = skb;
- lp->skb_length = skb->len;
- lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
+ lp->rm9200_txq[desc].skb = skb;
+ lp->rm9200_txq[desc].size = skb->len;
+ lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
netdev_err(dev, "%s: DMA mapping error\n", __func__);
return NETDEV_TX_OK;
}
+ spin_lock_irqsave(&lp->lock, flags);
+
+ lp->rm9200_tx_tail = (desc + 1) & 1;
+ lp->rm9200_tx_len++;
+ if (lp->rm9200_tx_len > 1)
+ netif_stop_queue(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
/* Set address of the data in the Transmit Address register */
- macb_writel(lp, TAR, lp->skb_physaddr);
+ macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
/* Set length of the packet in the Transmit Control register */
macb_writel(lp, TCR, skb->len);
@@ -4074,6 +4086,9 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct macb *lp = netdev_priv(dev);
u32 intstatus, ctl;
+ unsigned int desc;
+ unsigned int qlen;
+ u32 tsr;
/* MAC Interrupt Status register indicates what interrupts are pending.
* It is automatically cleared once read.
@@ -4085,20 +4100,39 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
at91ether_rx(dev);
/* Transmit complete */
- if (intstatus & MACB_BIT(TCOMP)) {
+ if (intstatus & (MACB_BIT(TCOMP) | MACB_BIT(RM9200_TBRE))) {
/* The TCOM bit is set even if the transmission failed */
if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
dev->stats.tx_errors++;
- if (lp->skb) {
- dev_consume_skb_irq(lp->skb);
- lp->skb = NULL;
- dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
- lp->skb_length, DMA_TO_DEVICE);
+ spin_lock(&lp->lock);
+
+ tsr = macb_readl(lp, TSR);
+
+ /* we have three possibilities here:
+ * - all pending packets transmitted (TGO, implies BNQ)
+ * - only first packet transmitted (!TGO && BNQ)
+ * - two frames pending (!TGO && !BNQ)
+ * Note that TGO ("transmit go") is called "IDLE" on RM9200.
+ */
+ qlen = (tsr & MACB_BIT(TGO)) ? 0 :
+ (tsr & MACB_BIT(RM9200_BNQ)) ? 1 : 2;
+
+ while (lp->rm9200_tx_len > qlen) {
+ desc = (lp->rm9200_tx_tail - lp->rm9200_tx_len) & 1;
+ dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
+ lp->rm9200_txq[desc].skb = NULL;
+ dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
+ lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
dev->stats.tx_packets++;
- dev->stats.tx_bytes += lp->skb_length;
+ dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
+ lp->rm9200_tx_len--;
}
- netif_wake_queue(dev);
+
+ if (lp->rm9200_tx_len < 2 && netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ spin_unlock(&lp->lock);
}
/* Work-around for EMAC Errata section 41.3.1 */
@@ -4559,8 +4593,7 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
- tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
- (unsigned long)bp);
+ tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index cd7d0332cba3..353393dea639 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/**
- * Cadence GEM PCI wrapper.
+ * DOC: Cadence GEM PCI wrapper.
*
* Copyright (C) 2016 Cadence Design Systems - https://www.cadence.com
*
@@ -13,7 +13,6 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include "macb.h"
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 05a3d067c3fc..bbb453c6a5f7 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1246,6 +1246,8 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
/**
* xgmac_tx_timeout
* @dev : Pointer to net device structure
+ * @txqueue: index of the hung transmit queue
+ *
* Description: this function is called when a packet transmission fails to
* complete within a reasonable tmrate. The driver will mark the error in the
* netdev structure and arrange for the device to be reset to a sane state
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 81ff9ac73f9a..9fd717b9cf69 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(cavium_ptp_put);
/**
* cavium_ptp_adjfine() - Adjust ptp frequency
- * @ptp: PTP clock info
+ * @ptp_info: PTP clock info
* @scaled_ppm: how much to adjust by, in parts per million, but with a
* 16 bit binary fractional field
*/
@@ -134,7 +134,7 @@ static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
/**
* cavium_ptp_adjtime() - Adjust ptp time
- * @ptp: PTP clock info
+ * @ptp_info: PTP clock info
* @delta: how much to adjust by, in nanosecs
*/
static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
@@ -155,7 +155,7 @@ static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
/**
* cavium_ptp_gettime() - Get hardware clock time with adjustment
- * @ptp: PTP clock info
+ * @ptp_info: PTP clock info
* @ts: timespec
*/
static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
@@ -177,7 +177,7 @@ static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
/**
* cavium_ptp_settime() - Set hardware clock time. Reset adjustment
- * @ptp: PTP clock info
+ * @ptp_info: PTP clock info
* @ts: timespec
*/
static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
@@ -199,7 +199,7 @@ static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
/**
* cavium_ptp_enable() - Request to enable or disable an ancillary feature.
- * @ptp: PTP clock info
+ * @ptp_info: PTP clock info
* @rq: request
* @on: is it on
*/
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 50b533ff58e6..2a6d1cadac9e 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -25,7 +25,9 @@
#include "octeon_main.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
+#include "cn68xx_device.h"
#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index e40c64b79f66..9ef172976b35 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -32,8 +32,8 @@
#define OCTNIC_MAX_SG MAX_SKB_FRAGS
/**
- * \brief Delete gather lists
- * @param lio per-network private data
+ * lio_delete_glists - Delete gather lists
+ * @lio: per-network private data
*/
void lio_delete_glists(struct lio *lio)
{
@@ -73,8 +73,10 @@ void lio_delete_glists(struct lio *lio)
}
/**
- * \brief Setup gather lists
- * @param lio per-network private data
+ * lio_setup_glists - Setup gather lists
+ * @oct: octeon_device
+ * @lio: per-network private data
+ * @num_iqs: count of iqs to allocate
*/
int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
{
@@ -521,12 +523,12 @@ static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
}
/**
- * \brief Setup output queue
- * @param oct octeon device
- * @param q_no which queue
- * @param num_descs how many descriptors
- * @param desc_size size of each descriptor
- * @param app_ctx application context
+ * octeon_setup_droq - Setup output queue
+ * @oct: octeon device
+ * @q_no: which queue
+ * @num_descs: how many descriptors
+ * @desc_size: size of each descriptor
+ * @app_ctx: application context
*/
static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
int desc_size, void *app_ctx)
@@ -555,16 +557,17 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
return ret_val;
}
-/** Routine to push packets arriving on Octeon interface upto network layer.
- * @param oct_id - octeon device id.
- * @param skbuff - skbuff struct to be passed to network layer.
- * @param len - size of total data received.
- * @param rh - Control header associated with the packet
- * @param param - additional control data with the packet
- * @param arg - farg registered in droq_ops
+/**
+ * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer.
+ * @octeon_id:octeon device id.
+ * @skbuff: skbuff struct to be passed to network layer.
+ * @len: size of total data received.
+ * @rh: Control header associated with the packet
+ * @param: additional control data with the packet
+ * @arg: farg registered in droq_ops
*/
static void
-liquidio_push_packet(u32 octeon_id __attribute__((unused)),
+liquidio_push_packet(u32 __maybe_unused octeon_id,
void *skbuff,
u32 len,
union octeon_rh *rh,
@@ -698,8 +701,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
}
/**
- * \brief wrapper for calling napi_schedule
- * @param param parameters to pass to napi_schedule
+ * napi_schedule_wrapper - wrapper for calling napi_schedule
+ * @param: parameters to pass to napi_schedule
*
* Used when scheduling on different CPUs
*/
@@ -711,8 +714,8 @@ static void napi_schedule_wrapper(void *param)
}
/**
- * \brief callback when receive interrupt occurs and we are in NAPI mode
- * @param arg pointer to octeon output queue
+ * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode
+ * @arg: pointer to octeon output queue
*/
static void liquidio_napi_drv_callback(void *arg)
{
@@ -737,9 +740,9 @@ static void liquidio_napi_drv_callback(void *arg)
}
/**
- * \brief Entry point for NAPI polling
- * @param napi NAPI structure
- * @param budget maximum number of items to process
+ * liquidio_napi_poll - Entry point for NAPI polling
+ * @napi: NAPI structure
+ * @budget: maximum number of items to process
*/
static int liquidio_napi_poll(struct napi_struct *napi, int budget)
{
@@ -792,9 +795,11 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
}
/**
- * \brief Setup input and output queues
- * @param octeon_dev octeon device
- * @param ifidx Interface index
+ * liquidio_setup_io_queues - Setup input and output queues
+ * @octeon_dev: octeon device
+ * @ifidx: Interface index
+ * @num_iqs: input io queue count
+ * @num_oqs: output io queue count
*
* Note: Queues are with respect to the octeon device. Thus
* an input queue is for egress packets, and output queues
@@ -927,7 +932,7 @@ int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
}
irqreturn_t
-liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
+liquidio_msix_intr_handler(int __maybe_unused irq, void *dev)
{
struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
struct octeon_device *oct = ioq_vector->oct_dev;
@@ -943,8 +948,8 @@ liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
}
/**
- * \brief Droq packet processor sceduler
- * @param oct octeon device
+ * liquidio_schedule_droq_pkt_handlers - Droq packet processor sceduler
+ * @oct: octeon device
*/
static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
{
@@ -972,13 +977,12 @@ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
}
/**
- * \brief Interrupt handler for octeon
- * @param irq unused
- * @param dev octeon device
+ * liquidio_legacy_intr_handler - Interrupt handler for octeon
+ * @irq: unused
+ * @dev: octeon device
*/
static
-irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
- void *dev)
+irqreturn_t liquidio_legacy_intr_handler(int __maybe_unused irq, void *dev)
{
struct octeon_device *oct = (struct octeon_device *)dev;
irqreturn_t ret;
@@ -999,8 +1003,9 @@ irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
}
/**
- * \brief Setup interrupt for octeon device
- * @param oct octeon device
+ * octeon_setup_interrupt - Setup interrupt for octeon device
+ * @oct: octeon device
+ * @num_ioqs: number of queues
*
* Enable interrupt in Octeon device as given in the PCI interrupt mask.
*/
@@ -1083,7 +1088,7 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
num_ioq_vectors = oct->num_msix_irqs;
- /** For PF, there is one non-ioq interrupt handler */
+ /* For PF, there is one non-ioq interrupt handler */
if (OCTEON_CN23XX_PF(oct)) {
num_ioq_vectors -= 1;
@@ -1126,13 +1131,13 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
dev_err(&oct->pci_dev->dev,
"Request_irq failed for MSIX interrupt Error: %d\n",
irqret);
- /** Freeing the non-ioq irq vector here . */
+ /* Freeing the non-ioq irq vector here . */
free_irq(msix_entries[num_ioq_vectors].vector,
oct);
while (i) {
i--;
- /** clearing affinity mask. */
+ /* clearing affinity mask. */
irq_set_affinity_hint(
msix_entries[i].vector,
NULL);
@@ -1197,8 +1202,9 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
}
/**
- * \brief Net device change_mtu
- * @param netdev network device
+ * liquidio_change_mtu - Net device change_mtu
+ * @netdev: network device
+ * @new_mtu: the new max transmit unit size
*/
int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8e0ed01e7f03..7d00d3a8ded4 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -69,9 +69,9 @@ MODULE_PARM_DESC(console_bitmask,
"Bitmask indicating which consoles have debug output redirected to syslog.");
/**
- * \brief determines if a given console has debug enabled.
- * @param console console to check
- * @returns 1 = enabled. 0 otherwise
+ * octeon_console_debug_enabled - determines if a given console has debug enabled.
+ * @console: console to check
+ * Return: 1 = enabled. 0 otherwise
*/
static int octeon_console_debug_enabled(u32 console)
{
@@ -126,7 +126,7 @@ union tx_info {
} s;
};
-/** Octeon device properties to be used by the NIC module.
+/* Octeon device properties to be used by the NIC module.
* Each octeon device in the system will be represented
* by this structure in the NIC module.
*/
@@ -161,13 +161,13 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
static struct handshake handshake[MAX_OCTEON_DEVICES];
static struct completion first_stage;
-static void octeon_droq_bh(unsigned long pdev)
+static void octeon_droq_bh(struct tasklet_struct *t)
{
int q_no;
int reschedule = 0;
- struct octeon_device *oct = (struct octeon_device *)pdev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
+ droq_tasklet);
+ struct octeon_device *oct = oct_priv->dev;
for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
@@ -222,8 +222,8 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
}
/**
- * \brief Forces all IO queues off on a given device
- * @param oct Pointer to Octeon device
+ * force_io_queues_off - Forces all IO queues off on a given device
+ * @oct: Pointer to Octeon device
*/
static void force_io_queues_off(struct octeon_device *oct)
{
@@ -238,8 +238,8 @@ static void force_io_queues_off(struct octeon_device *oct)
}
/**
- * \brief Cause device to go quiet so it can be safely removed/reset/etc
- * @param oct Pointer to Octeon device
+ * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
+ * @oct: Pointer to Octeon device
*/
static inline void pcierror_quiesce_device(struct octeon_device *oct)
{
@@ -283,8 +283,8 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
}
/**
- * \brief Cleanup PCI AER uncorrectable error status
- * @param dev Pointer to PCI device
+ * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
+ * @dev: Pointer to PCI device
*/
static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
@@ -303,8 +303,8 @@ static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
/**
- * \brief Stop all PCI IO to a given device
- * @param dev Pointer to Octeon device
+ * stop_pci_io - Stop all PCI IO to a given device
+ * @oct: Pointer to Octeon device
*/
static void stop_pci_io(struct octeon_device *oct)
{
@@ -332,9 +332,9 @@ static void stop_pci_io(struct octeon_device *oct)
}
/**
- * \brief called when PCI error is detected
- * @param pdev Pointer to PCI device
- * @param state The current pci connection state
+ * liquidio_pcie_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
@@ -362,11 +362,10 @@ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
}
/**
- * \brief mmio handler
- * @param pdev Pointer to PCI device
+ * liquidio_pcie_mmio_enabled - mmio handler
+ * @pdev: Pointer to PCI device
*/
-static pci_ers_result_t liquidio_pcie_mmio_enabled(
- struct pci_dev *pdev __attribute__((unused)))
+static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
@@ -376,14 +375,13 @@ static pci_ers_result_t liquidio_pcie_mmio_enabled(
}
/**
- * \brief called after the pci bus has been reset.
- * @param pdev Pointer to PCI device
+ * liquidio_pcie_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the octeon_resume routine.
*/
-static pci_ers_result_t liquidio_pcie_slot_reset(
- struct pci_dev *pdev __attribute__((unused)))
+static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
@@ -393,14 +391,14 @@ static pci_ers_result_t liquidio_pcie_slot_reset(
}
/**
- * \brief called when traffic can start flowing again.
- * @param pdev Pointer to PCI device
+ * liquidio_pcie_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
* second-half of the octeon_resume routine.
*/
-static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
+static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
{
/* Nothing to be done here. */
}
@@ -447,7 +445,7 @@ static struct pci_driver liquidio_pci_driver = {
};
/**
- * \brief register PCI driver
+ * liquidio_init_pci - register PCI driver
*/
static int liquidio_init_pci(void)
{
@@ -455,7 +453,7 @@ static int liquidio_init_pci(void)
}
/**
- * \brief unregister PCI driver
+ * liquidio_deinit_pci - unregister PCI driver
*/
static void liquidio_deinit_pci(void)
{
@@ -463,9 +461,9 @@ static void liquidio_deinit_pci(void)
}
/**
- * \brief Check Tx queue status, and take appropriate action
- * @param lio per-network private data
- * @returns 0 if full, number of queues woken up otherwise
+ * check_txq_status - Check Tx queue status, and take appropriate action
+ * @lio: per-network private data
+ * Return: 0 if full, number of queues woken up otherwise
*/
static inline int check_txq_status(struct lio *lio)
{
@@ -491,8 +489,8 @@ static inline int check_txq_status(struct lio *lio)
}
/**
- * \brief Print link information
- * @param netdev network device
+ * print_link_info - Print link information
+ * @netdev: network device
*/
static void print_link_info(struct net_device *netdev)
{
@@ -513,8 +511,8 @@ static void print_link_info(struct net_device *netdev)
}
/**
- * \brief Routine to notify MTU change
- * @param work work_struct data structure
+ * octnet_link_status_change - Routine to notify MTU change
+ * @work: work_struct data structure
*/
static void octnet_link_status_change(struct work_struct *work)
{
@@ -531,8 +529,8 @@ static void octnet_link_status_change(struct work_struct *work)
}
/**
- * \brief Sets up the mtu status change work
- * @param netdev network device
+ * setup_link_status_change_wq - Sets up the mtu status change work
+ * @netdev: network device
*/
static inline int setup_link_status_change_wq(struct net_device *netdev)
{
@@ -563,9 +561,9 @@ static inline void cleanup_link_status_change_wq(struct net_device *netdev)
}
/**
- * \brief Update link status
- * @param netdev network device
- * @param ls link status structure
+ * update_link_status - Update link status
+ * @netdev: network device
+ * @ls: link status structure
*
* Called on receipt of a link status response from the core application to
* update each interface's link status.
@@ -663,10 +661,9 @@ static void lio_sync_octeon_time(struct work_struct *work)
}
/**
- * setup_sync_octeon_time_wq - Sets up the work to periodically update
- * local time to octeon firmware
+ * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware
*
- * @netdev - network device which should send time update to firmware
+ * @netdev: network device which should send time update to firmware
**/
static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
{
@@ -690,10 +687,12 @@ static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
}
/**
- * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
- * to periodically update local time to octeon firmware
+ * cleanup_sync_octeon_time_wq - destroy wq
*
- * @netdev - network device which should send time update to firmware
+ * @netdev: network device which should send time update to firmware
+ *
+ * Stop scheduling and destroy the work created to periodically update local
+ * time to octeon firmware.
**/
static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
{
@@ -828,13 +827,12 @@ static int liquidio_watchdog(void *param)
}
/**
- * \brief PCI probe handler
- * @param pdev PCI device structure
- * @param ent unused
+ * liquidio_probe - PCI probe handler
+ * @pdev: PCI device structure
+ * @ent: unused
*/
static int
-liquidio_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent __attribute__((unused)))
+liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent)
{
struct octeon_device *oct_dev = NULL;
struct handshake *hs;
@@ -924,8 +922,8 @@ static bool fw_type_is_auto(void)
}
/**
- * \brief PCI FLR for each Octeon device.
- * @param oct octeon device
+ * octeon_pci_flr - PCI FLR for each Octeon device.
+ * @oct: octeon device
*/
static void octeon_pci_flr(struct octeon_device *oct)
{
@@ -951,9 +949,8 @@ static void octeon_pci_flr(struct octeon_device *oct)
}
/**
- *\brief Destroy resources associated with octeon device
- * @param pdev PCI device structure
- * @param ent unused
+ * octeon_destroy_resources - Destroy resources associated with octeon device
+ * @oct: octeon device
*/
static void octeon_destroy_resources(struct octeon_device *oct)
{
@@ -1152,9 +1149,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
}
/**
- * \brief Send Rx control command
- * @param lio per-network private data
- * @param start_stop whether to start or stop
+ * send_rx_ctrl_cmd - Send Rx control command
+ * @lio: per-network private data
+ * @start_stop: whether to start or stop
*/
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
@@ -1210,9 +1207,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
}
/**
- * \brief Destroy NIC device interface
- * @param oct octeon device
- * @param ifidx which interface to destroy
+ * liquidio_destroy_nic_device - Destroy NIC device interface
+ * @oct: octeon device
+ * @ifidx: which interface to destroy
*
* Cleanup associated with each interface for an Octeon device when NIC
* module is being unloaded or if initialization fails during load.
@@ -1272,8 +1269,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
}
/**
- * \brief Stop complete NIC functionality
- * @param oct octeon device
+ * liquidio_stop_nic_module - Stop complete NIC functionality
+ * @oct: octeon device
*/
static int liquidio_stop_nic_module(struct octeon_device *oct)
{
@@ -1313,8 +1310,8 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
}
/**
- * \brief Cleans up resources at unload time
- * @param pdev PCI device structure
+ * liquidio_remove - Cleans up resources at unload time
+ * @pdev: PCI device structure
*/
static void liquidio_remove(struct pci_dev *pdev)
{
@@ -1346,8 +1343,8 @@ static void liquidio_remove(struct pci_dev *pdev)
}
/**
- * \brief Identify the Octeon device and to map the BAR address space
- * @param oct octeon device
+ * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space
+ * @oct: octeon device
*/
static int octeon_chip_specific_setup(struct octeon_device *oct)
{
@@ -1390,8 +1387,8 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
}
/**
- * \brief PCI initialization for each Octeon device.
- * @param oct octeon device
+ * octeon_pci_os_setup - PCI initialization for each Octeon device.
+ * @oct: octeon device
*/
static int octeon_pci_os_setup(struct octeon_device *oct)
{
@@ -1414,8 +1411,8 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
}
/**
- * \brief Unmap and free network buffer
- * @param buf buffer
+ * free_netbuf - Unmap and free network buffer
+ * @buf: buffer
*/
static void free_netbuf(void *buf)
{
@@ -1434,8 +1431,8 @@ static void free_netbuf(void *buf)
}
/**
- * \brief Unmap and free gather buffer
- * @param buf buffer
+ * free_netsgbuf - Unmap and free gather buffer
+ * @buf: buffer
*/
static void free_netsgbuf(void *buf)
{
@@ -1474,8 +1471,8 @@ static void free_netsgbuf(void *buf)
}
/**
- * \brief Unmap and free gather buffer with response
- * @param buf buffer
+ * free_netsgbuf_with_resp - Unmap and free gather buffer with response
+ * @buf: buffer
*/
static void free_netsgbuf_with_resp(void *buf)
{
@@ -1518,9 +1515,9 @@ static void free_netsgbuf_with_resp(void *buf)
}
/**
- * \brief Adjust ptp frequency
- * @param ptp PTP clock info
- * @param ppb how much to adjust by, in parts-per-billion
+ * liquidio_ptp_adjfreq - Adjust ptp frequency
+ * @ptp: PTP clock info
+ * @ppb: how much to adjust by, in parts-per-billion
*/
static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
@@ -1555,9 +1552,9 @@ static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
}
/**
- * \brief Adjust ptp time
- * @param ptp PTP clock info
- * @param delta how much to adjust by, in nanosecs
+ * liquidio_ptp_adjtime - Adjust ptp time
+ * @ptp: PTP clock info
+ * @delta: how much to adjust by, in nanosecs
*/
static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
@@ -1572,9 +1569,9 @@ static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
/**
- * \brief Get hardware clock time, including any adjustment
- * @param ptp PTP clock info
- * @param ts timespec
+ * liquidio_ptp_gettime - Get hardware clock time, including any adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
*/
static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
@@ -1595,9 +1592,9 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
}
/**
- * \brief Set hardware clock time. Reset adjustment
- * @param ptp PTP clock info
- * @param ts timespec
+ * liquidio_ptp_settime - Set hardware clock time. Reset adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
*/
static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
@@ -1618,22 +1615,22 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * \brief Check if PTP is enabled
- * @param ptp PTP clock info
- * @param rq request
- * @param on is it on
+ * liquidio_ptp_enable - Check if PTP is enabled
+ * @ptp: PTP clock info
+ * @rq: request
+ * @on: is it on
*/
static int
-liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
- struct ptp_clock_request *rq __attribute__((unused)),
- int on __attribute__((unused)))
+liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp,
+ struct ptp_clock_request __maybe_unused *rq,
+ int __maybe_unused on)
{
return -EOPNOTSUPP;
}
/**
- * \brief Open PTP clock source
- * @param netdev network device
+ * oct_ptp_open - Open PTP clock source
+ * @netdev: network device
*/
static void oct_ptp_open(struct net_device *netdev)
{
@@ -1665,8 +1662,8 @@ static void oct_ptp_open(struct net_device *netdev)
}
/**
- * \brief Init PTP clock
- * @param oct octeon device
+ * liquidio_ptp_init - Init PTP clock
+ * @oct: octeon device
*/
static void liquidio_ptp_init(struct octeon_device *oct)
{
@@ -1682,8 +1679,8 @@ static void liquidio_ptp_init(struct octeon_device *oct)
}
/**
- * \brief Load firmware to device
- * @param oct octeon device
+ * load_firmware - Load firmware to device
+ * @oct: octeon device
*
* Maps device to firmware filename, requests firmware, and downloads it
*/
@@ -1721,8 +1718,8 @@ static int load_firmware(struct octeon_device *oct)
}
/**
- * \brief Poll routine for checking transmit queue status
- * @param work work_struct data structure
+ * octnet_poll_check_txq_status - Poll routine for checking transmit queue status
+ * @work: work_struct data structure
*/
static void octnet_poll_check_txq_status(struct work_struct *work)
{
@@ -1738,8 +1735,8 @@ static void octnet_poll_check_txq_status(struct work_struct *work)
}
/**
- * \brief Sets up the txq poll check
- * @param netdev network device
+ * setup_tx_poll_fn - Sets up the txq poll check
+ * @netdev: network device
*/
static inline int setup_tx_poll_fn(struct net_device *netdev)
{
@@ -1771,8 +1768,8 @@ static inline void cleanup_tx_poll_fn(struct net_device *netdev)
}
/**
- * \brief Net device open for LiquidIO
- * @param netdev network device
+ * liquidio_open - Net device open for LiquidIO
+ * @netdev: network device
*/
static int liquidio_open(struct net_device *netdev)
{
@@ -1831,8 +1828,8 @@ static int liquidio_open(struct net_device *netdev)
}
/**
- * \brief Net device stop for LiquidIO
- * @param netdev network device
+ * liquidio_stop - Net device stop for LiquidIO
+ * @netdev: network device
*/
static int liquidio_stop(struct net_device *netdev)
{
@@ -1896,8 +1893,8 @@ static int liquidio_stop(struct net_device *netdev)
}
/**
- * \brief Converts a mask based on net device flags
- * @param netdev network device
+ * get_new_flags - Converts a mask based on net device flags
+ * @netdev: network device
*
* This routine generates a octnet_ifflags mask from the net device flags
* received from the OS.
@@ -1929,8 +1926,8 @@ static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
}
/**
- * \brief Net device set_multicast_list
- * @param netdev network device
+ * liquidio_set_mcast_list - Net device set_multicast_list
+ * @netdev: network device
*/
static void liquidio_set_mcast_list(struct net_device *netdev)
{
@@ -1977,8 +1974,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
}
/**
- * \brief Net device set_mac_address
- * @param netdev network device
+ * liquidio_set_mac - Net device set_mac_address
+ * @netdev: network device
+ * @p: pointer to sockaddr
*/
static int liquidio_set_mac(struct net_device *netdev, void *p)
{
@@ -2096,10 +2094,9 @@ liquidio_get_stats64(struct net_device *netdev,
}
/**
- * \brief Handler for SIOCSHWTSTAMP ioctl
- * @param netdev network device
- * @param ifr interface request
- * @param cmd command
+ * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
+ * @netdev: network device
+ * @ifr: interface request
*/
static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
{
@@ -2154,10 +2151,10 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
}
/**
- * \brief ioctl handler
- * @param netdev network device
- * @param ifr interface request
- * @param cmd command
+ * liquidio_ioctl - ioctl handler
+ * @netdev: network device
+ * @ifr: interface request
+ * @cmd: command
*/
static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
@@ -2174,9 +2171,10 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
/**
- * \brief handle a Tx timestamp response
- * @param status response status
- * @param buf pointer to skb
+ * handle_timestamp - handle a Tx timestamp response
+ * @oct: octeon device
+ * @status: response status
+ * @buf: pointer to skb
*/
static void handle_timestamp(struct octeon_device *oct,
u32 status,
@@ -2217,10 +2215,12 @@ static void handle_timestamp(struct octeon_device *oct,
tx_buffer_free(skb);
}
-/* \brief Send a data packet that will be timestamped
- * @param oct octeon device
- * @param ndata pointer to network data
- * @param finfo pointer to private network data
+/**
+ * send_nic_timestamp_pkt - Send a data packet that will be timestamped
+ * @oct: octeon device
+ * @ndata: pointer to network data
+ * @finfo: pointer to private network data
+ * @xmit_more: more is coming
*/
static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
struct octnic_data_pkt *ndata,
@@ -2276,10 +2276,12 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
return retval;
}
-/** \brief Transmit networks packets to the Octeon interface
- * @param skbuff skbuff struct to be passed to network layer.
- * @param netdev pointer to network device
- * @returns whether the packet was transmitted to the device okay or not
+/**
+ * liquidio_xmit - Transmit networks packets to the Octeon interface
+ * @skb: skbuff struct to be passed to network layer.
+ * @netdev: pointer to network device
+ *
+ * Return: whether the packet was transmitted to the device okay or not
* (NETDEV_TX_OK or NETDEV_TX_BUSY)
*/
static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
@@ -2524,8 +2526,10 @@ lio_xmit_failed:
return NETDEV_TX_OK;
}
-/** \brief Network device Tx timeout
- * @param netdev pointer to network device
+/**
+ * liquidio_tx_timeout - Network device Tx timeout
+ * @netdev: pointer to network device
+ * @txqueue: index of the hung transmit queue
*/
static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -2597,12 +2601,12 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
return ret;
}
-/** Sending command to enable/disable RX checksum offload
- * @param netdev pointer to network device
- * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
- * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
- * OCTNET_CMD_RXCSUM_DISABLE
- * @returns SUCCESS or FAILURE
+/**
+ * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload
+ * @netdev: pointer to network device
+ * @command: OCTNET_CMD_TNL_RX_CSUM_CTL
+ * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE
+ * Returns: SUCCESS or FAILURE
*/
static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
u8 rx_cmd)
@@ -2632,13 +2636,14 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
return ret;
}
-/** Sending command to add/delete VxLAN UDP port to firmware
- * @param netdev pointer to network device
- * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
- * @param vxlan_port VxLAN port to be added or deleted
- * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
+/**
+ * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware
+ * @netdev: pointer to network device
+ * @command: OCTNET_CMD_VXLAN_PORT_CONFIG
+ * @vxlan_port: VxLAN port to be added or deleted
+ * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD,
* OCTNET_CMD_VXLAN_PORT_DEL
- * @returns SUCCESS or FAILURE
+ * Return: SUCCESS or FAILURE
*/
static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
u16 vxlan_port, u8 vxlan_cmd_bit)
@@ -2698,10 +2703,11 @@ static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
},
};
-/** \brief Net device fix features
- * @param netdev pointer to network device
- * @param request features requested
- * @returns updated features list
+/**
+ * liquidio_fix_features - Net device fix features
+ * @netdev: pointer to network device
+ * @request: features requested
+ * Return: updated features list
*/
static netdev_features_t liquidio_fix_features(struct net_device *netdev,
netdev_features_t request)
@@ -2737,9 +2743,10 @@ static netdev_features_t liquidio_fix_features(struct net_device *netdev,
return request;
}
-/** \brief Net device set features
- * @param netdev pointer to network device
- * @param features features to enable/disable
+/**
+ * liquidio_set_features - Net device set features
+ * @netdev: pointer to network device
+ * @features: features to enable/disable
*/
static int liquidio_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -3224,7 +3231,8 @@ static const struct net_device_ops lionetdevops = {
.ndo_get_port_parent_id = liquidio_get_port_parent_id,
};
-/** \brief Entry point for the liquidio module
+/**
+ * liquidio_init - Entry point for the liquidio module
*/
static int __init liquidio_init(void)
{
@@ -3307,8 +3315,8 @@ nic_info_err:
}
/**
- * \brief Setup network interfaces
- * @param octeon_dev octeon device
+ * setup_nic_devices - Setup network interfaces
+ * @octeon_dev: octeon device
*
* Called during init time for each device. It assumes the NIC
* is already up and running. The link information for each
@@ -3872,8 +3880,8 @@ static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
#endif
/**
- * \brief initialize the NIC
- * @param oct octeon device
+ * liquidio_init_nic_module - initialize the NIC
+ * @oct: octeon device
*
* This initialization routine is called once the Octeon device application is
* up and running
@@ -3928,9 +3936,10 @@ octnet_init_failure:
}
/**
- * \brief starter callback that invokes the remaining initialization work after
- * the NIC is up and running.
- * @param octptr work struct work_struct
+ * nic_starter - finish init
+ * @work: work struct work_struct
+ *
+ * starter callback that invokes the remaining initialization work after the NIC is up and running.
*/
static void nic_starter(struct work_struct *work)
{
@@ -4023,8 +4032,8 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
}
/**
- * \brief Device initialization for each Octeon device that is probed
- * @param octeon_dev octeon device
+ * octeon_device_init - Device initialization for each Octeon device that is probed
+ * @octeon_dev: octeon device
*/
static int octeon_device_init(struct octeon_device *octeon_dev)
{
@@ -4193,8 +4202,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Initialize the tasklet that handles output queue packet processing.*/
dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
- tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
- (unsigned long)octeon_dev);
+ tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
/* Setup the interrupt handler and record the INT SUM register address
*/
@@ -4298,16 +4306,17 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
complete(&handshake[octeon_dev->octeon_id].init);
atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
+ oct_priv->dev = octeon_dev;
return 0;
}
/**
- * \brief Debug console print function
- * @param octeon_dev octeon device
- * @param console_num console number
- * @param prefix first portion of line to display
- * @param suffix second portion of line to display
+ * octeon_dbg_console_print - Debug console print function
+ * @oct: octeon device
+ * @console_num: console number
+ * @prefix: first portion of line to display
+ * @suffix: second portion of line to display
*
* The OCTEON debug console outputs entire lines (excluding '\n').
* Normally, the line will be passed in the 'prefix' parameter.
@@ -4330,7 +4339,7 @@ static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
}
/**
- * \brief Exits the module
+ * liquidio_exit - Exits the module
*/
static void __exit liquidio_exit(void)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 8c5879e31240..103440f97bc8 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -99,8 +99,8 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
}
/**
- * \brief Cause device to go quiet so it can be safely removed/reset/etc
- * @param oct Pointer to Octeon device
+ * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
+ * @oct: Pointer to Octeon device
*/
static void pcierror_quiesce_device(struct octeon_device *oct)
{
@@ -143,8 +143,8 @@ static void pcierror_quiesce_device(struct octeon_device *oct)
}
/**
- * \brief Cleanup PCI AER uncorrectable error status
- * @param dev Pointer to PCI device
+ * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
+ * @dev: Pointer to PCI device
*/
static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
@@ -163,8 +163,8 @@ static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
/**
- * \brief Stop all PCI IO to a given device
- * @param dev Pointer to Octeon device
+ * stop_pci_io - Stop all PCI IO to a given device
+ * @oct: Pointer to Octeon device
*/
static void stop_pci_io(struct octeon_device *oct)
{
@@ -205,9 +205,9 @@ static void stop_pci_io(struct octeon_device *oct)
}
/**
- * \brief called when PCI error is detected
- * @param pdev Pointer to PCI device
- * @param state The current pci connection state
+ * liquidio_pcie_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
@@ -256,8 +256,8 @@ static struct pci_driver liquidio_vf_pci_driver = {
};
/**
- * \brief Print link information
- * @param netdev network device
+ * print_link_info - Print link information
+ * @netdev: network device
*/
static void print_link_info(struct net_device *netdev)
{
@@ -278,8 +278,8 @@ static void print_link_info(struct net_device *netdev)
}
/**
- * \brief Routine to notify MTU change
- * @param work work_struct data structure
+ * octnet_link_status_change - Routine to notify MTU change
+ * @work: work_struct data structure
*/
static void octnet_link_status_change(struct work_struct *work)
{
@@ -296,8 +296,8 @@ static void octnet_link_status_change(struct work_struct *work)
}
/**
- * \brief Sets up the mtu status change work
- * @param netdev network device
+ * setup_link_status_change_wq - Sets up the mtu status change work
+ * @netdev: network device
*/
static int setup_link_status_change_wq(struct net_device *netdev)
{
@@ -328,9 +328,9 @@ static void cleanup_link_status_change_wq(struct net_device *netdev)
}
/**
- * \brief Update link status
- * @param netdev network device
- * @param ls link status structure
+ * update_link_status - Update link status
+ * @netdev: network device
+ * @ls: link status structure
*
* Called on receipt of a link status response from the core application to
* update each interface's link status.
@@ -374,13 +374,13 @@ static void update_link_status(struct net_device *netdev,
}
/**
- * \brief PCI probe handler
- * @param pdev PCI device structure
- * @param ent unused
+ * liquidio_vf_probe - PCI probe handler
+ * @pdev: PCI device structure
+ * @ent: unused
*/
static int
liquidio_vf_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent __attribute__((unused)))
+ const struct pci_device_id __maybe_unused *ent)
{
struct octeon_device *oct_dev = NULL;
@@ -416,8 +416,8 @@ liquidio_vf_probe(struct pci_dev *pdev,
}
/**
- * \brief PCI FLR for each Octeon device.
- * @param oct octeon device
+ * octeon_pci_flr - PCI FLR for each Octeon device.
+ * @oct: octeon device
*/
static void octeon_pci_flr(struct octeon_device *oct)
{
@@ -437,9 +437,8 @@ static void octeon_pci_flr(struct octeon_device *oct)
}
/**
- *\brief Destroy resources associated with octeon device
- * @param pdev PCI device structure
- * @param ent unused
+ * octeon_destroy_resources - Destroy resources associated with octeon device
+ * @oct: octeon device
*/
static void octeon_destroy_resources(struct octeon_device *oct)
{
@@ -592,9 +591,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
}
/**
- * \brief Send Rx control command
- * @param lio per-network private data
- * @param start_stop whether to start or stop
+ * send_rx_ctrl_cmd - Send Rx control command
+ * @lio: per-network private data
+ * @start_stop: whether to start or stop
*/
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
@@ -644,9 +643,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
}
/**
- * \brief Destroy NIC device interface
- * @param oct octeon device
- * @param ifidx which interface to destroy
+ * liquidio_destroy_nic_device - Destroy NIC device interface
+ * @oct: octeon device
+ * @ifidx: which interface to destroy
*
* Cleanup associated with each interface for an Octeon device when NIC
* module is being unloaded or if initialization fails during load.
@@ -704,8 +703,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
}
/**
- * \brief Stop complete NIC functionality
- * @param oct octeon device
+ * liquidio_stop_nic_module - Stop complete NIC functionality
+ * @oct: octeon device
*/
static int liquidio_stop_nic_module(struct octeon_device *oct)
{
@@ -737,8 +736,8 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
}
/**
- * \brief Cleans up resources at unload time
- * @param pdev PCI device structure
+ * liquidio_vf_remove - Cleans up resources at unload time
+ * @pdev: PCI device structure
*/
static void liquidio_vf_remove(struct pci_dev *pdev)
{
@@ -763,8 +762,8 @@ static void liquidio_vf_remove(struct pci_dev *pdev)
}
/**
- * \brief PCI initialization for each Octeon device.
- * @param oct octeon device
+ * octeon_pci_os_setup - PCI initialization for each Octeon device.
+ * @oct: octeon device
*/
static int octeon_pci_os_setup(struct octeon_device *oct)
{
@@ -792,8 +791,8 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
}
/**
- * \brief Unmap and free network buffer
- * @param buf buffer
+ * free_netbuf - Unmap and free network buffer
+ * @buf: buffer
*/
static void free_netbuf(void *buf)
{
@@ -812,8 +811,8 @@ static void free_netbuf(void *buf)
}
/**
- * \brief Unmap and free gather buffer
- * @param buf buffer
+ * free_netsgbuf - Unmap and free gather buffer
+ * @buf: buffer
*/
static void free_netsgbuf(void *buf)
{
@@ -853,8 +852,8 @@ static void free_netsgbuf(void *buf)
}
/**
- * \brief Unmap and free gather buffer with response
- * @param buf buffer
+ * free_netsgbuf_with_resp - Unmap and free gather buffer with response
+ * @buf: buffer
*/
static void free_netsgbuf_with_resp(void *buf)
{
@@ -897,8 +896,8 @@ static void free_netsgbuf_with_resp(void *buf)
}
/**
- * \brief Net device open for LiquidIO
- * @param netdev network device
+ * liquidio_open - Net device open for LiquidIO
+ * @netdev: network device
*/
static int liquidio_open(struct net_device *netdev)
{
@@ -941,8 +940,8 @@ static int liquidio_open(struct net_device *netdev)
}
/**
- * \brief Net device stop for LiquidIO
- * @param netdev network device
+ * liquidio_stop - jNet device stop for LiquidIO
+ * @netdev: network device
*/
static int liquidio_stop(struct net_device *netdev)
{
@@ -991,8 +990,8 @@ static int liquidio_stop(struct net_device *netdev)
}
/**
- * \brief Converts a mask based on net device flags
- * @param netdev network device
+ * get_new_flags - Converts a mask based on net device flags
+ * @netdev: network device
*
* This routine generates a octnet_ifflags mask from the net device flags
* received from the OS.
@@ -1060,8 +1059,8 @@ static void liquidio_set_uc_list(struct net_device *netdev)
}
/**
- * \brief Net device set_multicast_list
- * @param netdev network device
+ * liquidio_set_mcast_list - Net device set_multicast_list
+ * @netdev: network device
*/
static void liquidio_set_mcast_list(struct net_device *netdev)
{
@@ -1110,8 +1109,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
}
/**
- * \brief Net device set_mac_address
- * @param netdev network device
+ * liquidio_set_mac - Net device set_mac_address
+ * @netdev: network device
+ * @p: opaque pointer to sockaddr
*/
static int liquidio_set_mac(struct net_device *netdev, void *p)
{
@@ -1229,10 +1229,9 @@ liquidio_get_stats64(struct net_device *netdev,
}
/**
- * \brief Handler for SIOCSHWTSTAMP ioctl
- * @param netdev network device
- * @param ifr interface request
- * @param cmd command
+ * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
+ * @netdev: network device
+ * @ifr: interface request
*/
static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
{
@@ -1287,10 +1286,10 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
}
/**
- * \brief ioctl handler
- * @param netdev network device
- * @param ifr interface request
- * @param cmd command
+ * liquidio_ioctl - ioctl handler
+ * @netdev: network device
+ * @ifr: interface request
+ * @cmd: command
*/
static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
@@ -1339,10 +1338,10 @@ static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
tx_buffer_free(skb);
}
-/* \brief Send a data packet that will be timestamped
- * @param oct octeon device
- * @param ndata pointer to network data
- * @param finfo pointer to private network data
+/* send_nic_timestamp_pkt - Send a data packet that will be timestamped
+ * @oct: octeon device
+ * @ndata: pointer to network data
+ * @finfo: pointer to private network data
*/
static int send_nic_timestamp_pkt(struct octeon_device *oct,
struct octnic_data_pkt *ndata,
@@ -1393,9 +1392,10 @@ static int send_nic_timestamp_pkt(struct octeon_device *oct,
return retval;
}
-/** \brief Transmit networks packets to the Octeon interface
- * @param skbuff skbuff struct to be passed to network layer.
- * @param netdev pointer to network device
+/**
+ * liquidio_xmit - Transmit networks packets to the Octeon interface
+ * @skb: skbuff struct to be passed to network layer.
+ * @netdev: pointer to network device
* @returns whether the packet was transmitted to the device okay or not
* (NETDEV_TX_OK or NETDEV_TX_BUSY)
*/
@@ -1623,8 +1623,10 @@ lio_xmit_failed:
return NETDEV_TX_OK;
}
-/** \brief Network device Tx timeout
- * @param netdev pointer to network device
+/**
+ * liquidio_tx_timeout - Network device Tx timeout
+ * @netdev: pointer to network device
+ * @txqueue: index of the hung transmit queue
*/
static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -1917,8 +1919,8 @@ nic_info_err:
}
/**
- * \brief Setup network interfaces
- * @param octeon_dev octeon device
+ * setup_nic_devices - Setup network interfaces
+ * @octeon_dev: octeon device
*
* Called during init time for each device. It assumes the NIC
* is already up and running. The link information for each
@@ -2229,8 +2231,8 @@ setup_nic_dev_done:
}
/**
- * \brief initialize the NIC
- * @param oct octeon device
+ * liquidio_init_nic_module - initialize the NIC
+ * @oct: octeon device
*
* This initialization routine is called once the Octeon device application is
* up and running
@@ -2270,8 +2272,8 @@ octnet_init_failure:
}
/**
- * \brief Device initialization for each Octeon device that is probed
- * @param octeon_dev octeon device
+ * octeon_device_init - Device initialization for each Octeon device that is probed
+ * @oct: octeon device
*/
static int octeon_device_init(struct octeon_device *oct)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 0d2831d10f65..28feabec8fbb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -15,7 +15,7 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/
-/**
+/*
* @file octeon_console.c
*/
#include <linux/moduleparam.h>
@@ -131,7 +131,7 @@ struct octeon_pci_console_desc {
/* Implicit storage for console_addr_array */
};
-/**
+/*
* This function is the implementation of the get macros defined
* for individual structure members. The argument are generated
* by the macros inorder to read only the needed memory.
@@ -160,7 +160,7 @@ static inline u64 __cvmx_bootmem_desc_get(struct octeon_device *oct,
}
}
-/**
+/*
* This function retrieves the string name of a named block. It is
* more complicated than a simple memcpy() since the named block
* descriptor may not be directly accessible.
@@ -182,7 +182,7 @@ static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
/* See header file for descriptions of functions */
-/**
+/*
* Check the version information on the bootmem descriptor
*
* @param exact_match
@@ -323,7 +323,7 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
return result;
}
-/**
+/*
* Find a named block on the remote Octeon
*
* @param name Name of block to find
@@ -707,7 +707,7 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num,
return ret;
}
-/**
+/*
* Removes all consoles
*
* @param oct octeon device
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index ac32facaa427..387a57cbfb73 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1307,7 +1307,7 @@ struct octeon_config *octeon_get_conf(struct octeon_device *oct)
/* scratch register address is same in all the OCT-II and CN70XX models */
#define CNXX_SLI_SCRATCH1 0x3C0
-/** Get the octeon device pointer.
+/* Get the octeon device pointer.
* @param octeon_id - The id for which the octeon device pointer is required.
* @return Success: Octeon device pointer.
* @return Failure: NULL.
@@ -1324,7 +1324,7 @@ u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
{
u64 val64;
unsigned long flags;
- u32 val32, addrhi;
+ u32 addrhi;
spin_lock_irqsave(&oct->pci_win_lock, flags);
@@ -1339,10 +1339,10 @@ u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
/* Read back to preserve ordering of writes */
- val32 = readl(oct->reg_list.pci_win_rd_addr_hi);
+ readl(oct->reg_list.pci_win_rd_addr_hi);
writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
- val32 = readl(oct->reg_list.pci_win_rd_addr_lo);
+ readl(oct->reg_list.pci_win_rd_addr_lo);
val64 = readq(oct->reg_list.pci_win_rd_data);
@@ -1355,7 +1355,6 @@ void lio_pci_writeq(struct octeon_device *oct,
u64 val,
u64 addr)
{
- u32 val32;
unsigned long flags;
spin_lock_irqsave(&oct->pci_win_lock, flags);
@@ -1365,7 +1364,7 @@ void lio_pci_writeq(struct octeon_device *oct,
/* The write happens when the LSB is written. So write MSB first. */
writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
/* Read the MSB to ensure ordering of writes. */
- val32 = readl(oct->reg_list.pci_win_wr_data_hi);
+ readl(oct->reg_list.pci_win_wr_data_hi);
writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
@@ -1411,7 +1410,7 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
return ret;
}
-/** Get the octeon id assigned to the octeon device passed as argument.
+/* Get the octeon id assigned to the octeon device passed as argument.
* This function is exported to other modules.
* @param dev - octeon device pointer passed as a void *.
* @return octeon device id
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 017169023cca..d4080bddcb6b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -280,13 +280,10 @@ int octeon_init_droq(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
droq->max_count);
- droq->recv_buf_list = (struct octeon_recv_buffer *)
- vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE),
- numa_node);
+ droq->recv_buf_list = vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE),
+ numa_node);
if (!droq->recv_buf_list)
- droq->recv_buf_list = (struct octeon_recv_buffer *)
- vzalloc(array_size(droq->max_count,
- OCT_DROQ_RECVBUF_SIZE));
+ droq->recv_buf_list = vzalloc(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE));
if (!droq->recv_buf_list) {
dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
goto init_droq_fail;
@@ -777,7 +774,7 @@ octeon_droq_process_packets(struct octeon_device *oct,
return 0;
}
-/**
+/*
* Utility function to poll for packets. check_hw_for_packets must be
* called before calling this routine.
*/
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 614d07be7181..ad685f5d0a13 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -28,7 +28,7 @@
/**
* octeon_mbox_read:
- * @oct: Pointer mailbox
+ * @mbox: Pointer mailbox
*
* Reads the 8-bytes of data from the mbox register
* Writes back the acknowldgement inidcating completion of read
@@ -285,7 +285,8 @@ static int octeon_mbox_process_cmd(struct octeon_mbox *mbox,
}
/**
- *octeon_mbox_process_message:
+ * octeon_mbox_process_message
+ * @mbox: mailbox
*
* Process the received mbox message.
*/
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 073d0647b439..5b4cb725f60f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -39,6 +39,7 @@ struct octeon_device_priv {
/** Tasklet structures for this device. */
struct tasklet_struct droq_tasklet;
unsigned long napi_mask;
+ struct octeon_device *dev;
};
/** This structure is used by NIC driver to store information required
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 4c85ae643b7b..7ccab36143c1 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -22,6 +22,7 @@
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
+#include "octeon_mem_ops.h"
#define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 6cb2162a75d4..5e50bb19bf26 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -315,9 +315,9 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
netif_wake_queue(p->netdev);
}
-static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
+static void octeon_mgmt_clean_tx_tasklet(struct tasklet_struct *t)
{
- struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
+ struct octeon_mgmt *p = from_tasklet(p, t, tx_clean_tasklet);
octeon_mgmt_clean_tx_buffers(p);
octeon_mgmt_enable_tx_irq(p);
}
@@ -1491,8 +1491,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
skb_queue_head_init(&p->tx_list);
skb_queue_head_init(&p->rx_list);
- tasklet_init(&p->tx_clean_tasklet,
- octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
+ tasklet_setup(&p->tx_clean_tasklet,
+ octeon_mgmt_clean_tx_tasklet);
netdev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 063e560d9c1b..f3b7b443f964 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -985,9 +985,9 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
*
* As of now only CQ errors are handled
*/
-static void nicvf_handle_qs_err(unsigned long data)
+static void nicvf_handle_qs_err(struct tasklet_struct *t)
{
- struct nicvf *nic = (struct nicvf *)data;
+ struct nicvf *nic = from_tasklet(nic, t, qs_err_task);
struct queue_set *qs = nic->qs;
int qidx;
u64 status;
@@ -1493,12 +1493,10 @@ int nicvf_open(struct net_device *netdev)
}
/* Init tasklet for handling Qset err interrupt */
- tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
- (unsigned long)nic);
+ tasklet_setup(&nic->qs_err_task, nicvf_handle_qs_err);
/* Init RBDR tasklet which will refill RBDR */
- tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
- (unsigned long)nic);
+ tasklet_setup(&nic->rbdr_task, nicvf_rbdr_task);
INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
/* Configure CPI alorithm */
@@ -2067,8 +2065,8 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
mode |= BGX_XCAST_MCAST_FILTER;
/* here we need to copy mc addrs */
if (netdev_mc_count(netdev)) {
- mc_list = kmalloc(offsetof(typeof(*mc_list),
- mc[netdev_mc_count(netdev)]),
+ mc_list = kmalloc(struct_size(mc_list, mc,
+ netdev_mc_count(netdev)),
GFP_ATOMIC);
if (unlikely(!mc_list))
return;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a45223f0cca5..7a141ce32e86 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -460,9 +460,9 @@ void nicvf_rbdr_work(struct work_struct *work)
}
/* In Softirq context, alloc rcv buffers in atomic mode */
-void nicvf_rbdr_task(unsigned long data)
+void nicvf_rbdr_task(struct tasklet_struct *t)
{
- struct nicvf *nic = (struct nicvf *)data;
+ struct nicvf *nic = from_tasklet(nic, t, rbdr_task);
nicvf_refill_rbdr(nic, GFP_ATOMIC);
if (nic->rb_alloc_fail) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 2460451fc48f..8453defc296c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -348,7 +348,7 @@ void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
struct cqe_rx_t *cqe_rx, bool xdp);
-void nicvf_rbdr_task(unsigned long data);
+void nicvf_rbdr_task(struct tasklet_struct *t);
void nicvf_rbdr_work(struct work_struct *work);
void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index f6f3ef9a93cf..87cc0ef68b31 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -134,4 +134,6 @@ config CHELSIO_LIB
help
Common library for Chelsio drivers.
+source "drivers/net/ethernet/chelsio/inline_crypto/Kconfig"
+
endif # NET_VENDOR_CHELSIO
diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile
index c0f978d2e8a7..1a6fd8b2bb7d 100644
--- a/drivers/net/ethernet/chelsio/Makefile
+++ b/drivers/net/ethernet/chelsio/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_CHELSIO_T3) += cxgb3/
obj-$(CONFIG_CHELSIO_T4) += cxgb4/
obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
obj-$(CONFIG_CHELSIO_LIB) += libcxgb/
+obj-$(CONFIG_CHELSIO_INLINE_CRYPTO) += inline_crypto/
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 99736796e1a0..0e4a0f413960 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -997,17 +997,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_pdev;
}
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pr_err("%s: unable to obtain 64-bit DMA for "
- "consistent allocations\n", pci_name(pdev));
+ if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n",
+ pci_name(pdev));
err = -ENODEV;
goto out_disable_pdev;
}
- } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+ } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
goto out_disable_pdev;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 47b5c8e2104b..2d9c2b5a690a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -239,8 +239,10 @@ struct sched {
unsigned int num; /* num skbs in per port queues */
struct sched_port p[MAX_NPORTS];
struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
+ struct sge *sge;
};
-static void restart_sched(unsigned long);
+
+static void restart_sched(struct tasklet_struct *t);
/*
@@ -378,7 +380,8 @@ static int tx_sched_init(struct sge *sge)
return -ENOMEM;
pr_debug("tx_sched_init\n");
- tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
+ tasklet_setup(&s->sched_tsk, restart_sched);
+ s->sge = sge;
sge->tx_sched = s;
for (i = 0; i < MAX_NPORTS; i++) {
@@ -509,9 +512,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
while (q->credits--) {
struct freelQ_ce *ce = &q->centries[cidx];
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
dev_kfree_skb(ce->skb);
ce->skb = NULL;
if (++cidx == q->size)
@@ -529,8 +531,8 @@ static void free_rx_resources(struct sge *sge)
if (sge->respQ.entries) {
size = sizeof(struct respQ_e) * sge->respQ.size;
- pci_free_consistent(pdev, size, sge->respQ.entries,
- sge->respQ.dma_addr);
+ dma_free_coherent(&pdev->dev, size, sge->respQ.entries,
+ sge->respQ.dma_addr);
}
for (i = 0; i < SGE_FREELQ_N; i++) {
@@ -542,8 +544,8 @@ static void free_rx_resources(struct sge *sge)
}
if (q->entries) {
size = sizeof(struct freelQ_e) * q->size;
- pci_free_consistent(pdev, size, q->entries,
- q->dma_addr);
+ dma_free_coherent(&pdev->dev, size, q->entries,
+ q->dma_addr);
}
}
}
@@ -564,7 +566,8 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
q->size = p->freelQ_size[i];
q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
size = sizeof(struct freelQ_e) * q->size;
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+ q->entries = dma_alloc_coherent(&pdev->dev, size,
+ &q->dma_addr, GFP_KERNEL);
if (!q->entries)
goto err_no_mem;
@@ -601,7 +604,8 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
sge->respQ.credits = 0;
size = sizeof(struct respQ_e) * sge->respQ.size;
sge->respQ.entries =
- pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
+ dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr,
+ GFP_KERNEL);
if (!sge->respQ.entries)
goto err_no_mem;
return 0;
@@ -624,9 +628,10 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
ce = &q->centries[cidx];
while (n--) {
if (likely(dma_unmap_len(ce, dma_len))) {
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
dma_unmap_len(ce, dma_len),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (q->sop)
q->sop = 0;
}
@@ -663,8 +668,8 @@ static void free_tx_resources(struct sge *sge)
}
if (q->entries) {
size = sizeof(struct cmdQ_e) * q->size;
- pci_free_consistent(pdev, size, q->entries,
- q->dma_addr);
+ dma_free_coherent(&pdev->dev, size, q->entries,
+ q->dma_addr);
}
}
}
@@ -689,7 +694,8 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
q->stop_thres = 0;
spin_lock_init(&q->lock);
size = sizeof(struct cmdQ_e) * q->size;
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+ q->entries = dma_alloc_coherent(&pdev->dev, size,
+ &q->dma_addr, GFP_KERNEL);
if (!q->entries)
goto err_no_mem;
@@ -837,8 +843,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
break;
skb_reserve(skb, q->dma_offset);
- mapping = pci_map_single(pdev, skb->data, dma_len,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&pdev->dev, skb->data, dma_len,
+ DMA_FROM_DEVICE);
skb_reserve(skb, sge->rx_pkt_pad);
ce->skb = skb;
@@ -1049,15 +1055,15 @@ static inline struct sk_buff *get_packet(struct adapter *adapter,
goto use_orig_buf;
skb_put(skb, len);
- pci_dma_sync_single_for_cpu(pdev,
- dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(ce->skb, skb->data, len);
- pci_dma_sync_single_for_device(pdev,
- dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ DMA_FROM_DEVICE);
recycle_fl_buf(fl, fl->cidx);
return skb;
}
@@ -1068,8 +1074,8 @@ use_orig_buf:
return NULL;
}
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
skb = ce->skb;
prefetch(skb->data);
@@ -1091,8 +1097,9 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
struct freelQ_ce *ce = &fl->centries[fl->cidx];
struct sk_buff *skb = ce->skb;
- pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
pr_err("%s: unexpected offload packet, cmd %u\n",
adapter->name, *skb->data);
recycle_fl_buf(fl, fl->cidx);
@@ -1209,8 +1216,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
e = e1 = &q->entries[pidx];
ce = &q->centries[pidx];
- mapping = pci_map_single(adapter->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&adapter->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
desc_mapping = mapping;
desc_len = skb_headlen(skb);
@@ -1301,9 +1308,10 @@ static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
* Called from tasklet. Checks the scheduler for any
* pending skbs that can be sent.
*/
-static void restart_sched(unsigned long arg)
+static void restart_sched(struct tasklet_struct *t)
{
- struct sge *sge = (struct sge *) arg;
+ struct sched *s = from_tasklet(s, t, sched_tsk);
+ struct sge *sge = s->sge;
struct adapter *adapter = sge->adapter;
struct cmdQ *q = &sge->cmdQ[0];
struct sk_buff *skb;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
index 087ff0ffb597..f80fbd81b609 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -313,6 +313,7 @@ void t3_os_link_fault(struct adapter *adapter, int port_id, int state);
void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
void t3_sge_start(struct adapter *adap);
+void t3_sge_stop_dma(struct adapter *adap);
void t3_sge_stop(struct adapter *adap);
void t3_start_sge_timers(struct adapter *adap);
void t3_stop_sge_timers(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
index dadf11e3dddb..9d591f0ddfc5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
@@ -815,17 +815,12 @@ static const struct cphy_ops ael2020_ops = {
int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
const struct mdio_ops *mdio_ops)
{
- int err;
-
cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_IRQ, "10GBASE-R");
msleep(125);
- err = set_phy_regs(phy, ael2020_reset_regs);
- if (err)
- return err;
- return 0;
+ return set_phy_regs(phy, ael2020_reset_regs);
}
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 387c357e1b8e..84ad7261e243 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -148,7 +148,7 @@ struct workqueue_struct *cxgb3_wq;
/**
* link_report - show link status and link speed/duplex
- * @p: the port whose settings are to be reported
+ * @dev: the port whose settings are to be reported
*
* Shows the link status, speed, and duplex of a port.
*/
@@ -304,8 +304,8 @@ void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
/**
* t3_os_phymod_changed - handle PHY module changes
- * @phy: the PHY reporting the module change
- * @mod_type: new module type
+ * @adap: the adapter associated with the link change
+ * @port_id: the port index whose limk status has changed
*
* This is the OS-dependent handler for PHY module changes. It is
* invoked when a PHY module is removed or inserted for any OS-specific
@@ -1200,7 +1200,7 @@ static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
/**
* cxgb_up - enable the adapter
- * @adapter: adapter being enabled
+ * @adap: adapter being enabled
*
* Called when the first port is enabled, this function performs the
* actions necessary to make an adapter operational, such as completing
@@ -2996,7 +2996,7 @@ void t3_fatal_err(struct adapter *adapter)
unsigned int fw_status[4];
if (adapter->flags & FULL_INIT_DONE) {
- t3_sge_stop(adapter);
+ t3_sge_stop_dma(adapter);
t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 6dabbf1502c7..e18e9ce27f94 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -372,7 +372,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
/**
* free_rx_bufs - free the Rx buffers on an SGE free list
* @pdev: the PCI device associated with the adapter
- * @rxq: the SGE free list to clean up
+ * @q: the SGE free list to clean up
*
* Release the buffers on an SGE free-buffer Rx queue. HW fetching from
* this queue should be stopped before calling this function.
@@ -493,7 +493,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
/**
* refill_fl - refill an SGE free-buffer list
- * @adapter: the adapter
+ * @adap: the adapter
* @q: the free-list to refill
* @n: the number of new buffers to allocate
* @gfp: the gfp flags for allocating new buffers
@@ -568,7 +568,7 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
/**
* recycle_rx_buf - recycle a receive buffer
- * @adapter: the adapter
+ * @adap: the adapter
* @q: the SGE free list
* @idx: index of buffer to recycle
*
@@ -825,6 +825,7 @@ use_orig_buf:
* get_packet_pg - return the next ingress packet buffer from a free list
* @adap: the adapter that received the packet
* @fl: the SGE free list holding the packet
+ * @q: the queue
* @len: the packet length including any SGE padding
* @drop_thres: # of remaining buffers before we start dropping packets
*
@@ -1173,6 +1174,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
* @q: the Tx queue
* @ndesc: number of descriptors the packet will occupy
* @compl: the value of the COMPL bit to use
+ * @addr: address
*
* Generate a TX_PKT work request to send the supplied packet.
*/
@@ -1516,14 +1518,14 @@ static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
/**
* restart_ctrlq - restart a suspended control queue
- * @qs: the queue set cotaining the control queue
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx control queue.
*/
-static void restart_ctrlq(unsigned long data)
+static void restart_ctrlq(struct tasklet_struct *t)
{
struct sk_buff *skb;
- struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_CTRL].qresume_tsk);
struct sge_txq *q = &qs->txq[TXQ_CTRL];
spin_lock(&q->lock);
@@ -1622,6 +1624,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
* @pidx: index of the first Tx descriptor to write
* @gen: the generation value to use
* @ndesc: number of descriptors the packet will occupy
+ * @addr: the address
*
* Write an offload work request to send the supplied packet. The packet
* data already carry the work request with most fields populated.
@@ -1733,14 +1736,14 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
/**
* restart_offloadq - restart a suspended offload queue
- * @qs: the queue set cotaining the offload queue
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx offload queue.
*/
-static void restart_offloadq(unsigned long data)
+static void restart_offloadq(struct tasklet_struct *t)
{
struct sk_buff *skb;
- struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_OFLD].qresume_tsk);
struct sge_txq *q = &qs->txq[TXQ_OFLD];
const struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
@@ -1883,7 +1886,7 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
/**
* ofld_poll - NAPI handler for offload packets in interrupt mode
- * @dev: the network device doing the polling
+ * @napi: the network device doing the polling
* @budget: polling budget
*
* The NAPI handler for offload packets when a response queue is serviced
@@ -2007,7 +2010,7 @@ static void restart_tx(struct sge_qset *qs)
/**
* cxgb3_arp_process - process an ARP request probing a private IP address
- * @adapter: the adapter
+ * @pi: the port info
* @skb: the skbuff containing the ARP request
*
* Check if the ARP request is probing the private IP address
@@ -2069,7 +2072,8 @@ static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
* @adap: the adapter
* @rq: the response queue that received the packet
* @skb: the packet
- * @pad: amount of padding at the start of the buffer
+ * @pad: padding
+ * @lro: large receive offload
*
* Process an ingress ethernet pakcet and deliver it to the stack.
* The padding is 2 if the packet was delivered in an Rx buffer and 0
@@ -2239,7 +2243,7 @@ static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
/**
* check_ring_db - check if we need to ring any doorbells
- * @adapter: the adapter
+ * @adap: the adapter
* @qs: the queue set whose Tx queues are to be examined
* @sleeping: indicates which Tx queue sent GTS
*
@@ -2372,10 +2376,7 @@ no_mem:
if (fl->use_pages) {
void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
- prefetch(addr);
-#if L1_CACHE_BYTES < 128
- prefetch(addr + L1_CACHE_BYTES);
-#endif
+ net_prefetch(addr);
__refill_fl(adap, fl);
if (lro > 0) {
lro_add_page(adap, qs, fl,
@@ -2902,7 +2903,7 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
/**
* sge_timer_tx - perform periodic maintenance of an SGE qset
- * @data: the SGE queue set to maintain
+ * @t: a timer list containing the SGE queue set to maintain
*
* Runs periodically from a timer to perform maintenance of an SGE queue
* set. It performs two tasks:
@@ -2946,7 +2947,7 @@ static void sge_timer_tx(struct timer_list *t)
/**
* sge_timer_rx - perform periodic maintenance of an SGE qset
- * @data: the SGE queue set to maintain
+ * @t: the timer list containing the SGE queue set to maintain
*
* a) Replenishes Rx queues that have run out due to memory shortage.
* Normally new Rx buffers are added when existing ones are consumed but
@@ -3024,7 +3025,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
* @irq_vec_idx: the IRQ vector index for response queue interrupts
* @p: configuration parameters for this queue set
* @ntxq: number of Tx queues for the queue set
- * @netdev: net device associated with this queue set
+ * @dev: net device associated with this queue set
* @netdevq: net device TX queue associated with this queue set
*
* Allocate resources and initialize an SGE queue set. A queue set
@@ -3084,10 +3085,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
skb_queue_head_init(&q->txq[i].sendq);
}
- tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
- (unsigned long)q);
- tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
- (unsigned long)q);
+ tasklet_setup(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq);
+ tasklet_setup(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq);
q->fl[0].gen = q->fl[1].gen = 1;
q->fl[0].size = p->fl_size;
@@ -3271,30 +3270,40 @@ void t3_sge_start(struct adapter *adap)
}
/**
- * t3_sge_stop - disable SGE operation
+ * t3_sge_stop_dma - Disable SGE DMA engine operation
* @adap: the adapter
*
- * Disables the DMA engine. This can be called in emeregencies (e.g.,
- * from error interrupts) or from normal process context. In the latter
- * case it also disables any pending queue restart tasklets. Note that
- * if it is called in interrupt context it cannot disable the restart
- * tasklets as it cannot wait, however the tasklets will have no effect
- * since the doorbells are disabled and the driver will call this again
- * later from process context, at which time the tasklets will be stopped
- * if they are still running.
+ * Can be invoked from interrupt context e.g. error handler.
+ *
+ * Note that this function cannot disable the restart of tasklets as
+ * it cannot wait if called from interrupt context, however the
+ * tasklets will have no effect since the doorbells are disabled. The
+ * driver will call tg3_sge_stop() later from process context, at
+ * which time the tasklets will be stopped if they are still running.
*/
-void t3_sge_stop(struct adapter *adap)
+void t3_sge_stop_dma(struct adapter *adap)
{
t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
- if (!in_interrupt()) {
- int i;
+}
- for (i = 0; i < SGE_QSETS; ++i) {
- struct sge_qset *qs = &adap->sge.qs[i];
+/**
+ * t3_sge_stop - disable SGE operation completly
+ * @adap: the adapter
+ *
+ * Called from process context. Disables the DMA engine and any
+ * pending queue restart tasklets.
+ */
+void t3_sge_stop(struct adapter *adap)
+{
+ int i;
- tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
- tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
- }
+ t3_sge_stop_dma(adap);
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
+ tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 0a9f2c596624..7ff31d1026fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -2195,7 +2195,7 @@ static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
/**
* clear_sge_ctxt - completely clear an SGE context
- * @adapter: the adapter
+ * @adap: the adapter
* @id: the context id
* @type: the context type
*
@@ -2484,6 +2484,7 @@ int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
* @adapter: the adapter
* @id: the context id
* @op: the operation to perform
+ * @credits: credit value to write
*
* Perform the selected operation on an SGE completion queue context.
* The caller is responsible for ensuring only one context operation
@@ -2885,7 +2886,7 @@ static void init_cong_ctrl(unsigned short *a, unsigned short *b)
* t3_load_mtus - write the MTU and congestion control HW tables
* @adap: the adapter
* @mtus: the unrestricted values for the MTU table
- * @alphs: the values for the congestion control alpha parameter
+ * @alpha: the values for the congestion control alpha parameter
* @beta: the values for the congestion control beta parameter
* @mtu_cap: the maximum permitted effective MTU
*
@@ -2966,7 +2967,7 @@ static void ulp_config(struct adapter *adap, const struct tp_params *p)
/**
* t3_set_proto_sram - set the contents of the protocol sram
- * @adapter: the adapter
+ * @adap: the adapter
* @data: the protocol image
*
* Write the contents of the protocol SRAM.
@@ -3483,7 +3484,7 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
- * @ai: information about the current card
+ * @caps: information about the current card
*
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/duplex/flow-control/autonegotiation
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 9cb8b229c1b3..27308600da15 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -146,6 +146,11 @@ enum {
CXGB4_ETHTOOL_FLASH_BOOTCFG = 4
};
+enum cxgb4_netdev_tls_ops {
+ CXGB4_TLSDEV_OPS = 1,
+ CXGB4_XFRMDEV_OPS
+};
+
struct cxgb4_bootcfg_data {
__le16 signature;
__u8 reserved[2];
@@ -1196,6 +1201,12 @@ struct adapter {
struct cxgb4_tc_u32_table *tc_u32;
struct chcr_ktls chcr_ktls;
struct chcr_stats_debug chcr_stats;
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ struct ch_ktls_stats_debug ch_ktls_stats;
+#endif
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ struct ch_ipsec_stats_debug ch_ipsec_stats;
+#endif
/* TC flower offload */
bool tc_flower_initialized;
@@ -2100,7 +2111,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q,
void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq,
u32 ndesc);
int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc);
-void cxgb4_ethofld_restart(unsigned long data);
+void cxgb4_ethofld_restart(struct tasklet_struct *t);
int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *si);
void free_txq(struct adapter *adap, struct sge_txq *q);
@@ -2113,6 +2124,9 @@ void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
struct ulptx_sgl *sgl, u64 *end, unsigned int start,
const dma_addr_t *addr);
+void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end,
+ const dma_addr_t *addr, u32 start, u32 send_len);
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
@@ -2169,7 +2183,7 @@ void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
int cxgb4_port_mirror_alloc(struct net_device *dev);
void cxgb4_port_mirror_free(struct net_device *dev);
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
int cxgb4_set_ktls_feature(struct adapter *adap, bool enable);
#endif
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 05f33b7e3677..17410fe86626 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3527,6 +3527,10 @@ DEFINE_SHOW_ATTRIBUTE(meminfo);
static int chcr_stats_show(struct seq_file *seq, void *v)
{
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ struct ch_ktls_port_stats_debug *ktls_port;
+ int i = 0;
+#endif
struct adapter *adap = seq->private;
seq_puts(seq, "Chelsio Crypto Accelerator Stats \n");
@@ -3542,52 +3546,47 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.error));
seq_printf(seq, "Fallback: %10u \n",
atomic_read(&adap->chcr_stats.fallback));
- seq_printf(seq, "IPSec PDU: %10u\n",
- atomic_read(&adap->chcr_stats.ipsec_cnt));
seq_printf(seq, "TLS PDU Tx: %10u\n",
atomic_read(&adap->chcr_stats.tls_pdu_tx));
seq_printf(seq, "TLS PDU Rx: %10u\n",
atomic_read(&adap->chcr_stats.tls_pdu_rx));
seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
atomic_read(&adap->chcr_stats.tls_key));
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ seq_puts(seq, "\nChelsio Inline IPsec Crypto Accelerator Stats\n");
+ seq_printf(seq, "IPSec PDU: %10u\n",
+ atomic_read(&adap->ch_ipsec_stats.ipsec_cnt));
+#endif
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
seq_printf(seq, "Tx TLS offload refcount: %20u\n",
refcount_read(&adap->chcr_ktls.ktls_refcount));
- seq_printf(seq, "Tx HW offload contexts added: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_ctx));
- seq_printf(seq, "Tx connection created: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_connection_open));
- seq_printf(seq, "Tx connection failed: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_connection_fail));
- seq_printf(seq, "Tx connection closed: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_connection_close));
- seq_printf(seq, "Packets passed for encryption : %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_packets));
- seq_printf(seq, "Bytes passed for encryption : %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_bytes));
seq_printf(seq, "Tx records send: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_send_records));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_send_records));
seq_printf(seq, "Tx partial start of records: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_start_pkts));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_start_pkts));
seq_printf(seq, "Tx partial middle of records: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_middle_pkts));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_middle_pkts));
seq_printf(seq, "Tx partial end of record: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_end_pkts));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_end_pkts));
seq_printf(seq, "Tx complete records: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_complete_pkts));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts));
seq_printf(seq, "TX trim pkts : %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_trimmed_pkts));
- seq_printf(seq, "Tx out of order packets: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_ooo));
- seq_printf(seq, "Tx drop pkts before HW offload: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_skip_no_sync_data));
- seq_printf(seq, "Tx drop not synced packets: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_drop_no_sync_data));
- seq_printf(seq, "Tx drop bypass req: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_drop_bypass_req));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts));
+ seq_printf(seq, "TX sw fallback : %20llu\n",
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_fallback));
+ while (i < MAX_NPORTS) {
+ ktls_port = &adap->ch_ktls_stats.ktls_port[i];
+ seq_printf(seq, "Port %d\n", i);
+ seq_printf(seq, "Tx connection created: %20llu\n",
+ atomic64_read(&ktls_port->ktls_tx_connection_open));
+ seq_printf(seq, "Tx connection failed: %20llu\n",
+ atomic64_read(&ktls_port->ktls_tx_connection_fail));
+ seq_printf(seq, "Tx connection closed: %20llu\n",
+ atomic64_read(&ktls_port->ktls_tx_connection_close));
+ i++;
+ }
#endif
-
return 0;
}
DEFINE_SHOW_ATTRIBUTE(chcr_stats);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 9f3173f86eed..61ea3ec5c3fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -117,15 +117,7 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"vlan_insertions ",
"gro_packets ",
"gro_merged ",
-};
-
-static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
- "db_drop ",
- "db_full ",
- "db_empty ",
- "write_coal_success ",
- "write_coal_fail ",
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
"tx_tls_encrypted_packets",
"tx_tls_encrypted_bytes ",
"tx_tls_ctx ",
@@ -136,6 +128,14 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
#endif
};
+static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
+ "db_drop ",
+ "db_full ",
+ "db_empty ",
+ "write_coal_success ",
+ "write_coal_fail ",
+};
+
static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"-------Loopback----------- ",
"octets_ok ",
@@ -257,15 +257,7 @@ struct queue_port_stats {
u64 vlan_ins;
u64 gro_pkts;
u64 gro_merged;
-};
-
-struct adapter_stats {
- u64 db_drop;
- u64 db_full;
- u64 db_empty;
- u64 wc_success;
- u64 wc_fail;
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
u64 tx_tls_ctx;
@@ -276,12 +268,23 @@ struct adapter_stats {
#endif
};
+struct adapter_stats {
+ u64 db_drop;
+ u64 db_full;
+ u64 db_empty;
+ u64 wc_success;
+ u64 wc_fail;
+};
+
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
{
const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ const struct ch_ktls_port_stats_debug *ktls_stats;
+#endif
struct sge_eohw_txq *eohw_tx;
unsigned int i;
@@ -306,6 +309,21 @@ static void collect_sge_port_stats(const struct adapter *adap,
s->vlan_ins += eohw_tx->vlan_ins;
}
}
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ ktls_stats = &adap->ch_ktls_stats.ktls_port[p->port_id];
+ s->tx_tls_encrypted_packets =
+ atomic64_read(&ktls_stats->ktls_tx_encrypted_packets);
+ s->tx_tls_encrypted_bytes =
+ atomic64_read(&ktls_stats->ktls_tx_encrypted_bytes);
+ s->tx_tls_ctx = atomic64_read(&ktls_stats->ktls_tx_ctx);
+ s->tx_tls_ooo = atomic64_read(&ktls_stats->ktls_tx_ooo);
+ s->tx_tls_skip_no_sync_data =
+ atomic64_read(&ktls_stats->ktls_tx_skip_no_sync_data);
+ s->tx_tls_drop_no_sync_data =
+ atomic64_read(&ktls_stats->ktls_tx_drop_no_sync_data);
+ s->tx_tls_drop_bypass_req =
+ atomic64_read(&ktls_stats->ktls_tx_drop_bypass_req);
+#endif
}
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 481498585ead..4e55f7081644 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -145,13 +145,13 @@ static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
int err;
/* do a set-tcb for smac-sel and CWR bit.. */
- err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
- if (err)
- goto smac_err;
-
err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
TCB_SMAC_SEL_V(f->smt->idx), 1);
+ if (err)
+ goto smac_err;
+
+ err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
if (!err)
return 0;
@@ -604,17 +604,14 @@ int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
/* If the new rule wants to get inserted into
* HPFILTER region, but its prio is greater
* than the rule with the highest prio in HASH
- * region, then reject the rule.
+ * region, or if there's not enough slots
+ * available in HPFILTER region, then skip
+ * trying to insert this rule into HPFILTER
+ * region and directly go to the next region.
*/
- if (t->tc_hash_tids_max_prio &&
- tc_prio > t->tc_hash_tids_max_prio)
- break;
-
- /* If there's not enough slots available
- * in HPFILTER region, then move on to
- * normal FILTER region immediately.
- */
- if (ftid + n > t->nhpftids) {
+ if ((t->tc_hash_tids_max_prio &&
+ tc_prio > t->tc_hash_tids_max_prio) ||
+ (ftid + n) > t->nhpftids) {
ftid = t->nhpftids;
continue;
}
@@ -865,6 +862,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
@@ -882,7 +880,7 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
- fwr->smac_sel = 0;
+ fwr->smac_sel = f->smt->idx;
fwr->rx_chan_rx_rpl_iq =
htons(FW_FILTER_WR_RX_CHAN_V(0) |
FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
@@ -1326,11 +1324,8 @@ static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
- ((f->fs.dirsteerhash) << 1)) |
- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+ ((f->fs.dirsteerhash) << 1)));
}
static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
@@ -1366,11 +1361,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
- ((f->fs.dirsteerhash) << 1)) |
- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+ ((f->fs.dirsteerhash) << 1)));
}
static int cxgb4_set_hash_filter(struct net_device *dev,
@@ -2042,6 +2034,20 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
}
return;
}
+ switch (f->fs.action) {
+ case FILTER_PASS:
+ if (f->fs.dirsteer)
+ set_tcb_tflag(adap, f, tid,
+ TF_DIRECT_STEER_S, 1, 1);
+ break;
+ case FILTER_DROP:
+ set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1);
+ break;
+ case FILTER_SWITCH:
+ set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1);
+ break;
+ }
+
break;
default:
@@ -2109,22 +2115,11 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
if (ctx)
ctx->result = 0;
} else if (ret == FW_FILTER_WR_FLT_ADDED) {
- int err = 0;
-
- if (f->fs.newsmac)
- err = configure_filter_smac(adap, f);
-
- if (!err) {
- f->pending = 0; /* async setup completed */
- f->valid = 1;
- if (ctx) {
- ctx->result = 0;
- ctx->tid = idx;
- }
- } else {
- clear_filter(adap, f);
- if (ctx)
- ctx->result = err;
+ f->pending = 0; /* async setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->result = 0;
+ ctx->tid = idx;
}
} else {
/* Something went wrong. Issue a warning about the
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index de078a5bf23e..7fd264a6d085 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -66,7 +66,7 @@
#include <linux/crash_dump.h>
#include <net/udp_tunnel.h>
#include <net/xfrm.h>
-#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
#include <net/tls.h>
#endif
@@ -1176,6 +1176,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
txq = netdev_pick_tx(dev, skb, sb_dev);
if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
skb->encapsulation ||
+ cxgb4_is_ktls_skb(skb) ||
(proto != IPPROTO_TCP && proto != IPPROTO_UDP))
txq = txq % pi->nqsets;
@@ -6396,7 +6397,50 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
}
#endif /* CONFIG_PCI_IOV */
-#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+
+static int chcr_offload_state(struct adapter *adap,
+ enum cxgb4_netdev_tls_ops op_val)
+{
+ switch (op_val) {
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ case CXGB4_TLSDEV_OPS:
+ if (!adap->uld[CXGB4_ULD_KTLS].handle) {
+ dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n");
+ return -EOPNOTSUPP;
+ }
+ if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) {
+ dev_dbg(adap->pdev_dev,
+ "ch_ktls driver has no registered tlsdev_ops\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ case CXGB4_XFRMDEV_OPS:
+ if (!adap->uld[CXGB4_ULD_IPSEC].handle) {
+ dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n");
+ return -EOPNOTSUPP;
+ }
+ if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) {
+ dev_dbg(adap->pdev_dev,
+ "chipsec driver has no registered xfrmdev_ops\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+ default:
+ dev_dbg(adap->pdev_dev,
+ "driver has no support for offload %d\n", op_val);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */
+
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
enum tls_offload_ctx_dir direction,
@@ -6404,30 +6448,21 @@ static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
u32 tcp_sn)
{
struct adapter *adap = netdev2adap(netdev);
- int ret = 0;
+ int ret;
mutex_lock(&uld_mutex);
- if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
- dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
- ret = -EOPNOTSUPP;
- goto out_unlock;
- }
-
- if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
- dev_err(adap->pdev_dev,
- "chcr driver has no registered tlsdev_ops()\n");
- ret = -EOPNOTSUPP;
+ ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS);
+ if (ret)
goto out_unlock;
- }
ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
if (ret)
goto out_unlock;
- ret = adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_add(netdev, sk,
- direction,
- crypto_info,
- tcp_sn);
+ ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk,
+ direction,
+ crypto_info,
+ tcp_sn);
/* if there is a failure, clear the refcount */
if (ret)
cxgb4_set_ktls_feature(adap,
@@ -6444,19 +6479,11 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev,
struct adapter *adap = netdev2adap(netdev);
mutex_lock(&uld_mutex);
- if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
- dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
+ if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS))
goto out_unlock;
- }
- if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
- dev_err(adap->pdev_dev,
- "chcr driver has no registered tlsdev_ops\n");
- goto out_unlock;
- }
-
- adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
- direction);
+ adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
+ direction);
cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
out_unlock:
@@ -6469,6 +6496,114 @@ static const struct tlsdev_ops cxgb4_ktls_ops = {
};
#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+
+static int cxgb4_xfrm_add_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+ int ret;
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return -EBUSY;
+ }
+ ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
+ if (ret)
+ goto out_unlock;
+
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+
+ return ret;
+}
+
+static void cxgb4_xfrm_del_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+}
+
+static void cxgb4_xfrm_free_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+}
+
+static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+ bool ret = false;
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return ret;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+ return ret;
+}
+
+static void cxgb4_advance_esn_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+}
+
+static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
+ .xdo_dev_state_add = cxgb4_xfrm_add_state,
+ .xdo_dev_state_delete = cxgb4_xfrm_del_state,
+ .xdo_dev_state_free = cxgb4_xfrm_free_state,
+ .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
+};
+
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
@@ -6721,14 +6856,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
-#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->tlsdev_ops = &cxgb4_ktls_ops;
/* initialize the refcount */
refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
}
-#endif
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) {
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+ netdev->features |= NETIF_F_HW_ESP;
+ netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops;
+ }
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
netdev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 81 - 9600 */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index f642c1b475c4..1b88bd1c2dbe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = {
PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
};
+static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
+ /* Default supported NAT modes */
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_NONE,
+ .natmode = NAT_MODE_NONE,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP,
+ .natmode = NAT_MODE_DIP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
+ .natmode = NAT_MODE_DIP_DP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
+ CXGB4_ACTION_NATMODE_SIP,
+ .natmode = NAT_MODE_DIP_DP_SIP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
+ CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_DIP_DP_SP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_SIP_SP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
+ CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_DIP_SIP_SP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
+ CXGB4_ACTION_NATMODE_DPORT |
+ CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_ALL,
+ },
+ /* T6+ can ignore L4 ports when they're disabled. */
+ {
+ .chip = CHELSIO_T6,
+ .flags = CXGB4_ACTION_NATMODE_SIP,
+ .natmode = NAT_MODE_SIP_SP,
+ },
+ {
+ .chip = CHELSIO_T6,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_DIP_DP_SP,
+ },
+ {
+ .chip = CHELSIO_T6,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
+ .natmode = NAT_MODE_ALL,
+ },
+};
+
+static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
+ u8 natmode_flags)
+{
+ u8 i = 0;
+
+ /* Translate the enabled NAT 4-tuple fields to one of the
+ * hardware supported NAT mode configurations. This ensures
+ * that we pick a valid combination, where the disabled fields
+ * do not get overwritten to 0.
+ */
+ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
+ if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
+ fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
+ return;
+ }
+ }
+}
+
static struct ch_tc_flower_entry *allocate_flower_entry(void)
{
struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
@@ -289,7 +372,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
}
static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
- u32 mask, u32 offset, u8 htype)
+ u32 mask, u32 offset, u8 htype,
+ u8 *natmode_flags)
{
switch (htype) {
case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
@@ -314,60 +398,94 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
switch (offset) {
case PEDIT_IP4_SRC:
offload_pedit(fs, val, mask, IP4_SRC);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP4_DST:
offload_pedit(fs, val, mask, IP4_DST);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
}
- fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
switch (offset) {
case PEDIT_IP6_SRC_31_0:
offload_pedit(fs, val, mask, IP6_SRC_31_0);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_63_32:
offload_pedit(fs, val, mask, IP6_SRC_63_32);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_95_64:
offload_pedit(fs, val, mask, IP6_SRC_95_64);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_127_96:
offload_pedit(fs, val, mask, IP6_SRC_127_96);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_DST_31_0:
offload_pedit(fs, val, mask, IP6_DST_31_0);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_63_32:
offload_pedit(fs, val, mask, IP6_DST_63_32);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_95_64:
offload_pedit(fs, val, mask, IP6_DST_95_64);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_127_96:
offload_pedit(fs, val, mask, IP6_DST_127_96);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
}
- fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
fs->nat_fport = val;
- else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ } else {
fs->nat_lport = val >> 16;
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ }
}
- fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
fs->nat_fport = val;
- else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ } else {
fs->nat_lport = val >> 16;
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ }
}
- fs->nat_mode = NAT_MODE_ALL;
+ break;
+ }
+}
+
+static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
+ struct netlink_ext_ack *extack)
+{
+ u8 i = 0;
+
+ /* Extract the NAT mode to enable based on what 4-tuple fields
+ * are enabled to be overwritten. This ensures that the
+ * disabled fields don't get overwritten to 0.
+ */
+ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
+ const struct cxgb4_natmode_config *c;
+
+ c = &cxgb4_natmode_config_array[i];
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
+ natmode_flags == c->flags)
+ return 0;
}
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
+ return -EOPNOTSUPP;
}
void cxgb4_process_flow_actions(struct net_device *in,
@@ -375,6 +493,7 @@ void cxgb4_process_flow_actions(struct net_device *in,
struct ch_filter_specification *fs)
{
struct flow_action_entry *act;
+ u8 natmode_flags = 0;
int i;
flow_action_for_each(i, act, actions) {
@@ -426,7 +545,8 @@ void cxgb4_process_flow_actions(struct net_device *in,
val = act->mangle.val;
offset = act->mangle.offset;
- process_pedit_field(fs, val, mask, offset, htype);
+ process_pedit_field(fs, val, mask, offset, htype,
+ &natmode_flags);
}
break;
case FLOW_ACTION_QUEUE:
@@ -438,6 +558,9 @@ void cxgb4_process_flow_actions(struct net_device *in,
break;
}
}
+ if (natmode_flags)
+ cxgb4_action_natmode_tweak(fs, natmode_flags);
+
}
static bool valid_l4_mask(u32 mask)
@@ -454,7 +577,8 @@ static bool valid_l4_mask(u32 mask)
}
static bool valid_pedit_action(struct net_device *dev,
- const struct flow_action_entry *act)
+ const struct flow_action_entry *act,
+ u8 *natmode_flags)
{
u32 mask, offset;
u8 htype;
@@ -479,7 +603,10 @@ static bool valid_pedit_action(struct net_device *dev,
case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
switch (offset) {
case PEDIT_IP4_SRC:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
case PEDIT_IP4_DST:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -493,10 +620,13 @@ static bool valid_pedit_action(struct net_device *dev,
case PEDIT_IP6_SRC_63_32:
case PEDIT_IP6_SRC_95_64:
case PEDIT_IP6_SRC_127_96:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
case PEDIT_IP6_DST_31_0:
case PEDIT_IP6_DST_63_32:
case PEDIT_IP6_DST_95_64:
case PEDIT_IP6_DST_127_96:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -512,6 +642,10 @@ static bool valid_pedit_action(struct net_device *dev,
__func__);
return false;
}
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -527,6 +661,10 @@ static bool valid_pedit_action(struct net_device *dev,
__func__);
return false;
}
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -546,10 +684,12 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
struct netlink_ext_ack *extack,
u8 matchall_filter)
{
+ struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
+ u8 natmode_flags = 0;
int i;
if (!flow_action_basic_hw_stats_check(actions, extack))
@@ -563,7 +703,6 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
break;
case FLOW_ACTION_MIRRED:
case FLOW_ACTION_REDIRECT: {
- struct adapter *adap = netdev2adap(dev);
struct net_device *n_dev, *target_dev;
bool found = false;
unsigned int i;
@@ -620,7 +759,8 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
}
break;
case FLOW_ACTION_MANGLE: {
- bool pedit_valid = valid_pedit_action(dev, act);
+ bool pedit_valid = valid_pedit_action(dev, act,
+ &natmode_flags);
if (!pedit_valid)
return -EOPNOTSUPP;
@@ -642,6 +782,15 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
return -EINVAL;
}
+ if (act_pedit) {
+ int ret;
+
+ ret = cxgb4_action_natmode_validate(adap, natmode_flags,
+ extack);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
index 6296e1d5a12b..3a2fa00c8cde 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
@@ -108,6 +108,21 @@ struct ch_tc_pedit_fields {
#define PEDIT_TCP_SPORT_DPORT 0x0
#define PEDIT_UDP_SPORT_DPORT 0x0
+enum cxgb4_action_natmode_flags {
+ CXGB4_ACTION_NATMODE_NONE = 0,
+ CXGB4_ACTION_NATMODE_DIP = (1 << 0),
+ CXGB4_ACTION_NATMODE_SIP = (1 << 1),
+ CXGB4_ACTION_NATMODE_DPORT = (1 << 2),
+ CXGB4_ACTION_NATMODE_SPORT = (1 << 3),
+};
+
+/* TC PEDIT action to NATMODE translation entry */
+struct cxgb4_natmode_config {
+ enum chip_type chip;
+ u8 flags;
+ u8 natmode;
+};
+
void cxgb4_process_flow_actions(struct net_device *in,
struct flow_action *actions,
struct ch_filter_specification *fs);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index ae7123a9de8e..6c259de96f96 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -114,8 +114,7 @@ static int cxgb4_init_eosw_txq(struct net_device *dev,
eosw_txq->cred = adap->params.ofldq_wr_cred;
eosw_txq->hwqid = hwqid;
eosw_txq->netdev = dev;
- tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart,
- (unsigned long)eosw_txq);
+ tasklet_setup(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 08439e215efe..743af9e654aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -663,7 +663,7 @@ static int uld_attach(struct adapter *adap, unsigned int uld)
return 0;
}
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
static bool cxgb4_uld_in_use(struct adapter *adap)
{
const struct tid_info *t = &adap->tids;
@@ -690,8 +690,8 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
* ULD is/are already active, return failure.
*/
if (cxgb4_uld_in_use(adap)) {
- dev_warn(adap->pdev_dev,
- "ULD connections (tid/stid) active. Can't enable kTLS\n");
+ dev_dbg(adap->pdev_dev,
+ "ULD connections (tid/stid) active. Can't enable kTLS\n");
return -EINVAL;
}
ret = t4_set_params(adap, adap->mbox, adap->pf,
@@ -699,7 +699,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
if (ret)
return ret;
refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
- pr_info("kTLS has been enabled. Restrictions placed on ULD support\n");
+ pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n");
} else {
/* ktls settings already up, just increment refcount. */
refcount_inc(&adap->chcr_ktls.ktls_refcount);
@@ -716,7 +716,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
0, 1, &params, &params);
if (ret)
return ret;
- pr_info("kTLS is disabled. Restrictions on ULD support removed\n");
+ pr_debug("kTLS is disabled. Restrictions on ULD support removed\n");
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index a963fd0b4540..1b49f2fa9b18 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -40,9 +40,11 @@
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/atomic.h>
+#include <net/tls.h>
#include "cxgb4.h"
#define MAX_ULD_QSETS 16
+#define MAX_ULD_NPORTS 4
/* CPL message priority levels */
enum {
@@ -302,7 +304,9 @@ enum cxgb4_uld {
CXGB4_ULD_ISCSI,
CXGB4_ULD_ISCSIT,
CXGB4_ULD_CRYPTO,
+ CXGB4_ULD_IPSEC,
CXGB4_ULD_TLS,
+ CXGB4_ULD_KTLS,
CXGB4_ULD_MAX
};
@@ -361,28 +365,11 @@ struct cxgb4_virt_res { /* virtualized HW resources */
struct cxgb4_range ppod_edram;
};
-struct chcr_stats_debug {
- atomic_t cipher_rqst;
- atomic_t digest_rqst;
- atomic_t aead_rqst;
- atomic_t complete;
- atomic_t error;
- atomic_t fallback;
- atomic_t ipsec_cnt;
- atomic_t tls_pdu_tx;
- atomic_t tls_pdu_rx;
- atomic_t tls_key;
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+struct ch_ktls_port_stats_debug {
atomic64_t ktls_tx_connection_open;
atomic64_t ktls_tx_connection_fail;
atomic64_t ktls_tx_connection_close;
- atomic64_t ktls_tx_send_records;
- atomic64_t ktls_tx_end_pkts;
- atomic64_t ktls_tx_start_pkts;
- atomic64_t ktls_tx_middle_pkts;
- atomic64_t ktls_tx_retransmit_pkts;
- atomic64_t ktls_tx_complete_pkts;
- atomic64_t ktls_tx_trimmed_pkts;
atomic64_t ktls_tx_encrypted_packets;
atomic64_t ktls_tx_encrypted_bytes;
atomic64_t ktls_tx_ctx;
@@ -390,10 +377,39 @@ struct chcr_stats_debug {
atomic64_t ktls_tx_skip_no_sync_data;
atomic64_t ktls_tx_drop_no_sync_data;
atomic64_t ktls_tx_drop_bypass_req;
+};
+struct ch_ktls_stats_debug {
+ struct ch_ktls_port_stats_debug ktls_port[MAX_ULD_NPORTS];
+ atomic64_t ktls_tx_send_records;
+ atomic64_t ktls_tx_end_pkts;
+ atomic64_t ktls_tx_start_pkts;
+ atomic64_t ktls_tx_middle_pkts;
+ atomic64_t ktls_tx_retransmit_pkts;
+ atomic64_t ktls_tx_complete_pkts;
+ atomic64_t ktls_tx_trimmed_pkts;
+ atomic64_t ktls_tx_fallback;
+};
#endif
+
+struct chcr_stats_debug {
+ atomic_t cipher_rqst;
+ atomic_t digest_rqst;
+ atomic_t aead_rqst;
+ atomic_t complete;
+ atomic_t error;
+ atomic_t fallback;
+ atomic_t tls_pdu_tx;
+ atomic_t tls_pdu_rx;
+ atomic_t tls_key;
};
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+struct ch_ipsec_stats_debug {
+ atomic_t ipsec_cnt;
+};
+#endif
+
#define OCQ_WIN_OFFSET(pdev, vres) \
(pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
@@ -470,11 +486,19 @@ struct cxgb4_uld_info {
struct napi_struct *napi);
void (*lro_flush)(struct t4_lro_mgr *);
int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
const struct tlsdev_ops *tlsdev_ops;
#endif
+#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
+ const struct xfrmdev_ops *xfrmdev_ops;
+#endif
};
+static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
+{
+ return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
+}
+
void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 869431a1eedd..196652a114c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -890,6 +890,114 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
}
EXPORT_SYMBOL(cxgb4_write_sgl);
+/* cxgb4_write_partial_sgl - populate SGL for partial packet
+ * @skb: the packet
+ * @q: the Tx queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @addr: the list of bus addresses for the SGL elements
+ * @start: start offset in the SKB where partial data starts
+ * @len: length of data from @start to send out
+ *
+ * This API will handle sending out partial data of a skb if required.
+ * Unlike cxgb4_write_sgl, @start can be any offset into the skb data,
+ * and @len will decide how much data after @start offset to send out.
+ */
+void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end,
+ const dma_addr_t *addr, u32 start, u32 len)
+{
+ struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to;
+ u32 frag_size, skb_linear_data_len = skb_headlen(skb);
+ struct skb_shared_info *si = skb_shinfo(skb);
+ u8 i = 0, frag_idx = 0, nfrags = 0;
+ skb_frag_t *frag;
+
+ /* Fill the first SGL either from linear data or from partial
+ * frag based on @start.
+ */
+ if (unlikely(start < skb_linear_data_len)) {
+ frag_size = min(len, skb_linear_data_len - start);
+ sgl->len0 = htonl(frag_size);
+ sgl->addr0 = cpu_to_be64(addr[0] + start);
+ len -= frag_size;
+ nfrags++;
+ } else {
+ start -= skb_linear_data_len;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ /* find the first frag */
+ while (start >= frag_size) {
+ start -= frag_size;
+ frag_idx++;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ }
+
+ frag_size = min(len, skb_frag_size(frag) - start);
+ sgl->len0 = cpu_to_be32(frag_size);
+ sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start);
+ len -= frag_size;
+ nfrags++;
+ frag_idx++;
+ }
+
+ /* If the entire partial data fit in one SGL, then send it out
+ * now.
+ */
+ if (!len)
+ goto done;
+
+ /* Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+ /* If the skb couldn't fit in first SGL completely, fill the
+ * rest of the frags in subsequent SGLs. Note that each SGL
+ * pair can store 2 frags.
+ */
+ while (len) {
+ frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
+ to->len[i & 1] = cpu_to_be32(frag_size);
+ to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]);
+ if (i && (i & 1))
+ to++;
+ nfrags++;
+ frag_idx++;
+ i++;
+ len -= frag_size;
+ }
+
+ /* If we ended in an odd boundary, then set the second SGL's
+ * length in the pair to 0.
+ */
+ if (i & 1)
+ to->len[1] = cpu_to_be32(0);
+
+ /* Copy from temporary buffer to Tx ring, in case we hit the
+ * end of the queue in the middle of writing the SGL.
+ */
+ if (unlikely((u8 *)end > (u8 *)q->stat)) {
+ u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = (u8 *)end - (u8 *)q->stat;
+ memcpy(q->desc, (u8 *)buf + part0, part1);
+ end = (void *)q->desc + part1;
+ }
+
+ /* 0-pad to multiple of 16 */
+ if ((uintptr_t)end & 8)
+ *end = 0;
+done:
+ sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE_V(nfrags));
+}
+EXPORT_SYMBOL(cxgb4_write_partial_sgl);
+
/* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space. For coalesced WR SGE fetches
* data from the FIFO instead of from Host.
@@ -1416,14 +1524,15 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
pi = netdev_priv(dev);
adap = pi->adapter;
ssi = skb_shinfo(skb);
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
if (xfrm_offload(skb) && !ssi->gso_size)
- return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+ return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev);
#endif /* CHELSIO_IPSEC_INLINE */
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- if (skb->decrypted)
- return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ if (cxgb4_is_ktls_skb(skb) &&
+ (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
+ return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
qidx = skb_get_queue_mapping(skb);
@@ -2660,15 +2769,15 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
/**
* restart_ctrlq - restart a suspended control queue
- * @data: the control queue to restart
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx control queue.
*/
-static void restart_ctrlq(unsigned long data)
+static void restart_ctrlq(struct tasklet_struct *t)
{
struct sk_buff *skb;
unsigned int written = 0;
- struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
+ struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
spin_lock(&q->sendq.lock);
reclaim_completed_tx_imm(&q->q);
@@ -2961,13 +3070,13 @@ static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
/**
* restart_ofldq - restart a suspended offload queue
- * @data: the offload queue to restart
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx offload queue.
*/
-static void restart_ofldq(unsigned long data)
+static void restart_ofldq(struct tasklet_struct *t)
{
- struct sge_uld_txq *q = (struct sge_uld_txq *)data;
+ struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
spin_lock(&q->sendq.lock);
q->full = 0; /* the queue actually is completely empty now */
@@ -3887,9 +3996,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
return work_done;
}
-void cxgb4_ethofld_restart(unsigned long data)
+void cxgb4_ethofld_restart(struct tasklet_struct *t)
{
- struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data;
+ struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t,
+ qresume_tsk);
int pktcount;
spin_lock(&eosw_txq->lock);
@@ -4580,7 +4690,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
- tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
+ tasklet_setup(&txq->qresume_tsk, restart_ctrlq);
txq->full = 0;
return 0;
}
@@ -4670,7 +4780,7 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
txq->q.q_type = CXGB4_TXQ_ULD;
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
- tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
+ tasklet_setup(&txq->qresume_tsk, restart_ofldq);
txq->full = 0;
txq->mapping_err = 0;
return 0;
@@ -4872,9 +4982,6 @@ void t4_sge_stop(struct adapter *adap)
int i;
struct sge *s = &adap->sge;
- if (in_interrupt()) /* actions below require waiting */
- return;
-
if (s->rx_timer.function)
del_timer_sync(&s->rx_timer);
if (s->tx_timer.function)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fa3367966f4b..98d01a7497ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4745,9 +4745,11 @@ static void le_intr_handler(struct adapter *adap)
static struct intr_info t6_le_intr_info[] = {
{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+ { CMDTIDERR_F, "LE cmd tid error", -1, 1 },
{ TCAMINTPERR_F, "LE parity error", -1, 1 },
{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+ { HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 },
{ 0 }
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 065c01c654ff..b11a172b5174 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -3017,6 +3017,14 @@
#define REV_V(x) ((x) << REV_S)
#define REV_G(x) (((x) >> REV_S) & REV_M)
+#define HASHTBLMEMCRCERR_S 27
+#define HASHTBLMEMCRCERR_V(x) ((x) << HASHTBLMEMCRCERR_S)
+#define HASHTBLMEMCRCERR_F HASHTBLMEMCRCERR_V(1U)
+
+#define CMDTIDERR_S 22
+#define CMDTIDERR_V(x) ((x) << CMDTIDERR_S)
+#define CMDTIDERR_F CMDTIDERR_V(1U)
+
#define T6_UNKNOWNCMD_S 3
#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S)
#define T6_UNKNOWNCMD_F T6_UNKNOWNCMD_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
index 50232e063f49..92473dda55d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -50,6 +50,10 @@
#define TCB_T_FLAGS_M 0xffffffffffffffffULL
#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
+#define TF_DROP_S 22
+#define TF_DIRECT_STEER_S 23
+#define TF_LPBK_S 59
+
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
#define TF_CCTRL_RFR_S 62
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index e2fe78e2e242..2820a0bb971b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2017,33 +2017,14 @@ static void mboxlog_stop(struct seq_file *seq, void *v)
{
}
-static const struct seq_operations mboxlog_seq_ops = {
+static const struct seq_operations mboxlog_sops = {
.start = mboxlog_start,
.next = mboxlog_next,
.stop = mboxlog_stop,
.show = mboxlog_show
};
-static int mboxlog_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &mboxlog_seq_ops);
-
- if (!res) {
- struct seq_file *seq = file->private_data;
-
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations mboxlog_fops = {
- .owner = THIS_MODULE,
- .open = mboxlog_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
+DEFINE_SEQ_ATTRIBUTE(mboxlog);
/*
* Show SGE Queue Set information. We display QPL Queues Sets per line.
*/
@@ -2171,31 +2152,14 @@ static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}
-static const struct seq_operations sge_qinfo_seq_ops = {
+static const struct seq_operations sge_qinfo_sops = {
.start = sge_queue_start,
.next = sge_queue_next,
.stop = sge_queue_stop,
.show = sge_qinfo_show
};
-static int sge_qinfo_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &sge_qinfo_seq_ops);
-
- if (!res) {
- struct seq_file *seq = file->private_data;
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations sge_qinfo_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sge_qinfo_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(sge_qinfo);
/*
* Show SGE Queue Set statistics. We display QPL Queues Sets per line.
@@ -2317,31 +2281,14 @@ static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}
-static const struct seq_operations sge_qstats_seq_ops = {
+static const struct seq_operations sge_qstats_sops = {
.start = sge_qstats_start,
.next = sge_qstats_next,
.stop = sge_qstats_stop,
.show = sge_qstats_show
};
-static int sge_qstats_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &sge_qstats_seq_ops);
-
- if (res == 0) {
- struct seq_file *seq = file->private_data;
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations sge_qstats_proc_fops = {
- .owner = THIS_MODULE,
- .open = sge_qstats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(sge_qstats);
/*
* Show PCI-E SR-IOV Virtual Function Resource Limits.
@@ -2415,31 +2362,14 @@ static void interfaces_stop(struct seq_file *seq, void *v)
{
}
-static const struct seq_operations interfaces_seq_ops = {
+static const struct seq_operations interfaces_sops = {
.start = interfaces_start,
.next = interfaces_next,
.stop = interfaces_stop,
.show = interfaces_show
};
-static int interfaces_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &interfaces_seq_ops);
-
- if (res == 0) {
- struct seq_file *seq = file->private_data;
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations interfaces_proc_fops = {
- .owner = THIS_MODULE,
- .open = interfaces_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(interfaces);
/*
* /sys/kernel/debugfs/cxgb4vf/ files list.
@@ -2452,10 +2382,10 @@ struct cxgb4vf_debugfs_entry {
static struct cxgb4vf_debugfs_entry debugfs_files[] = {
{ "mboxlog", 0444, &mboxlog_fops },
- { "sge_qinfo", 0444, &sge_qinfo_debugfs_fops },
- { "sge_qstats", 0444, &sge_qstats_proc_fops },
+ { "sge_qinfo", 0444, &sge_qinfo_fops },
+ { "sge_qstats", 0444, &sge_qstats_fops },
{ "resources", 0444, &resources_fops },
- { "interfaces", 0444, &interfaces_proc_fops },
+ { "interfaces", 0444, &interfaces_fops },
};
/*
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/Kconfig b/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
new file mode 100644
index 000000000000..bc06e83fd3c6
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Chelsio inline crypto configuration
+#
+
+config CHELSIO_INLINE_CRYPTO
+ bool "Chelsio Inline Crypto support"
+ depends on CHELSIO_T4
+ default y
+ help
+ Enable support for inline crypto.
+ Allows enable/disable from list of inline crypto drivers.
+
+if CHELSIO_INLINE_CRYPTO
+
+config CRYPTO_DEV_CHELSIO_TLS
+ tristate "Chelsio Crypto Inline TLS Driver"
+ depends on CHELSIO_T4
+ depends on TLS
+ depends on TLS_TOE
+ help
+ Support Chelsio Inline TLS with Chelsio crypto accelerator.
+ Enable inline TLS support for Tx and Rx.
+
+ To compile this driver as a module, choose M here: the module
+ will be called chtls.
+
+config CHELSIO_IPSEC_INLINE
+ tristate "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ help
+ Support Chelsio Inline IPsec with Chelsio crypto accelerator.
+ Enable inline IPsec support for Tx.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ch_ipsec.
+
+config CHELSIO_TLS_DEVICE
+ tristate "Chelsio Inline KTLS Offload"
+ depends on CHELSIO_T4
+ depends on TLS
+ depends on TLS_DEVICE
+ help
+ This flag enables support for kernel tls offload over Chelsio T6
+ crypto accelerator. CONFIG_CHELSIO_TLS_DEVICE flag can be enabled
+ only if CONFIG_TLS and CONFIG_TLS_DEVICE flags are enabled.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ch_ktls.
+
+endif # CHELSIO_INLINE_CRYPTO
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/Makefile b/drivers/net/ethernet/chelsio/inline_crypto/Makefile
new file mode 100644
index 000000000000..27e6d7e2f1eb
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls/
+obj-$(CONFIG_CHELSIO_IPSEC_INLINE) += ch_ipsec/
+obj-$(CONFIG_CHELSIO_TLS_DEVICE) += ch_ktls/
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile
new file mode 100644
index 000000000000..efdcaaebc455
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 \
+ -I $(srctree)/drivers/crypto/chelsio
+
+obj-$(CONFIG_CHELSIO_IPSEC_INLINE) += ch_ipsec.o
+ch_ipsec-objs := chcr_ipsec.o
+
+
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index 967babd67a51..072299b14b8d 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -35,7 +35,7 @@
* Atul Gupta (atul.gupta@chelsio.com)
*/
-#define pr_fmt(fmt) "chcr:" fmt
+#define pr_fmt(fmt) "ch_ipsec: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -60,9 +60,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
-#include "chcr_core.h"
-#include "chcr_algo.h"
-#include "chcr_crypto.h"
+#include "chcr_ipsec.h"
/*
* Max Tx descriptor space we allow for an Ethernet packet to be inlined
@@ -71,39 +69,80 @@
#define MAX_IMM_TX_PKT_LEN 256
#define GCM_ESP_IV_SIZE 8
-static int chcr_xfrm_add_state(struct xfrm_state *x);
-static void chcr_xfrm_del_state(struct xfrm_state *x);
-static void chcr_xfrm_free_state(struct xfrm_state *x);
-static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
-static void chcr_advance_esn_state(struct xfrm_state *x);
-
-static const struct xfrmdev_ops chcr_xfrmdev_ops = {
- .xdo_dev_state_add = chcr_xfrm_add_state,
- .xdo_dev_state_delete = chcr_xfrm_del_state,
- .xdo_dev_state_free = chcr_xfrm_free_state,
- .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
- .xdo_dev_state_advance_esn = chcr_advance_esn_state,
+static LIST_HEAD(uld_ctx_list);
+static DEFINE_MUTEX(dev_mutex);
+
+static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
+static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
+static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
+static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
+static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
+static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
+static int ch_ipsec_xfrm_add_state(struct xfrm_state *x);
+
+static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = ch_ipsec_xfrm_add_state,
+ .xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
+ .xdo_dev_state_free = ch_ipsec_xfrm_free_state,
+ .xdo_dev_offload_ok = ch_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
};
-/* Add offload xfrms to Chelsio Interface */
-void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
+static struct cxgb4_uld_info ch_ipsec_uld_info = {
+ .name = CHIPSEC_DRV_MODULE_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ /* Max ntxq will be derived from fw config file*/
+ .rxq_size = 1024,
+ .add = ch_ipsec_uld_add,
+ .state_change = ch_ipsec_uld_state_change,
+ .tx_handler = ch_ipsec_xmit,
+ .xfrmdev_ops = &ch_ipsec_xfrmdev_ops,
+};
+
+static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop)
+{
+ struct ipsec_uld_ctx *u_ctx;
+
+ pr_info_once("%s - version %s\n", CHIPSEC_DRV_DESC,
+ CHIPSEC_DRV_VERSION);
+ u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
+ if (!u_ctx) {
+ u_ctx = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ u_ctx->lldi = *infop;
+out:
+ return u_ctx;
+}
+
+static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state)
{
- struct net_device *netdev = NULL;
- int i;
-
- for (i = 0; i < lld->nports; i++) {
- netdev = lld->ports[i];
- if (!netdev)
- continue;
- netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
- netdev->hw_enc_features |= NETIF_F_HW_ESP;
- netdev->features |= NETIF_F_HW_ESP;
- netdev_change_features(netdev);
+ struct ipsec_uld_ctx *u_ctx = handle;
+
+ pr_debug("new_state %u\n", new_state);
+ switch (new_state) {
+ case CXGB4_STATE_UP:
+ pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
+ mutex_lock(&dev_mutex);
+ list_add_tail(&u_ctx->entry, &uld_ctx_list);
+ mutex_unlock(&dev_mutex);
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ case CXGB4_STATE_DOWN:
+ case CXGB4_STATE_DETACH:
+ pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
+ list_del(&u_ctx->entry);
+ break;
+ default:
+ break;
}
+
+ return 0;
}
-static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
- struct ipsec_sa_entry *sa_entry)
+static int ch_ipsec_setauthsize(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
{
int hmac_ctrl;
int authsize = x->aead->alg_icv_len / 8;
@@ -126,8 +165,8 @@ static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
return hmac_ctrl;
}
-static inline int chcr_ipsec_setkey(struct xfrm_state *x,
- struct ipsec_sa_entry *sa_entry)
+static int ch_ipsec_setkey(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
{
int keylen = (x->aead->alg_key_len + 7) / 8;
unsigned char *key = x->aead->alg_key;
@@ -185,65 +224,65 @@ out:
}
/*
- * chcr_xfrm_add_state
+ * ch_ipsec_xfrm_add_state
* returns 0 on success, negative error if failed to send message to FPGA
* positive error if FPGA returned a bad response
*/
-static int chcr_xfrm_add_state(struct xfrm_state *x)
+static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
{
struct ipsec_sa_entry *sa_entry;
int res = 0;
if (x->props.aalgo != SADB_AALG_NONE) {
- pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
+ pr_debug("Cannot offload authenticated xfrm states\n");
return -EINVAL;
}
if (x->props.calgo != SADB_X_CALG_NONE) {
- pr_debug("CHCR: Cannot offload compressed xfrm states\n");
+ pr_debug("Cannot offload compressed xfrm states\n");
return -EINVAL;
}
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
- pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
+ pr_debug("Only IPv4/6 xfrm state offloaded\n");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
- pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
+ pr_debug("Only transport and tunnel xfrm offload\n");
return -EINVAL;
}
if (x->id.proto != IPPROTO_ESP) {
- pr_debug("CHCR: Only ESP xfrm state offloaded\n");
+ pr_debug("Only ESP xfrm state offloaded\n");
return -EINVAL;
}
if (x->encap) {
- pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
+ pr_debug("Encapsulated xfrm state not offloaded\n");
return -EINVAL;
}
if (!x->aead) {
- pr_debug("CHCR: Cannot offload xfrm states without aead\n");
+ pr_debug("Cannot offload xfrm states without aead\n");
return -EINVAL;
}
if (x->aead->alg_icv_len != 128 &&
x->aead->alg_icv_len != 96) {
- pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
+ pr_debug("Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
return -EINVAL;
}
if ((x->aead->alg_key_len != 128 + 32) &&
(x->aead->alg_key_len != 256 + 32)) {
- pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ pr_debug("cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
return -EINVAL;
}
if (x->tfcpad) {
- pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
+ pr_debug("Cannot offload xfrm states with tfc padding\n");
return -EINVAL;
}
if (!x->geniv) {
- pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
+ pr_debug("Cannot offload xfrm states without geniv\n");
return -EINVAL;
}
if (strcmp(x->geniv, "seqiv")) {
- pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
+ pr_debug("Cannot offload xfrm states with geniv other than seqiv\n");
return -EINVAL;
}
@@ -253,24 +292,24 @@ static int chcr_xfrm_add_state(struct xfrm_state *x)
goto out;
}
- sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ sa_entry->hmac_ctrl = ch_ipsec_setauthsize(x, sa_entry);
if (x->props.flags & XFRM_STATE_ESN)
sa_entry->esn = 1;
- chcr_ipsec_setkey(x, sa_entry);
+ ch_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
try_module_get(THIS_MODULE);
out:
return res;
}
-static void chcr_xfrm_del_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
{
/* do nothing */
if (!x->xso.offload_handle)
return;
}
-static void chcr_xfrm_free_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
{
struct ipsec_sa_entry *sa_entry;
@@ -282,7 +321,7 @@ static void chcr_xfrm_free_state(struct xfrm_state *x)
module_put(THIS_MODULE);
}
-static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
if (x->props.family == AF_INET) {
/* Offload with IP options is not supported yet */
@@ -296,15 +335,15 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
return true;
}
-static void chcr_advance_esn_state(struct xfrm_state *x)
+static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
{
/* do nothing */
if (!x->xso.offload_handle)
return;
}
-static inline int is_eth_imm(const struct sk_buff *skb,
- struct ipsec_sa_entry *sa_entry)
+static int is_eth_imm(const struct sk_buff *skb,
+ struct ipsec_sa_entry *sa_entry)
{
unsigned int kctx_len;
int hdrlen;
@@ -322,9 +361,9 @@ static inline int is_eth_imm(const struct sk_buff *skb,
return 0;
}
-static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
- struct ipsec_sa_entry *sa_entry,
- bool *immediate)
+static unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+ struct ipsec_sa_entry *sa_entry,
+ bool *immediate)
{
unsigned int kctx_len;
unsigned int flits;
@@ -365,7 +404,7 @@ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
return flits;
}
-inline void *copy_esn_pktxt(struct sk_buff *skb,
+static void *copy_esn_pktxt(struct sk_buff *skb,
struct net_device *dev,
void *pos,
struct ipsec_sa_entry *sa_entry)
@@ -419,7 +458,7 @@ inline void *copy_esn_pktxt(struct sk_buff *skb,
return pos;
}
-inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+static void *copy_cpltx_pktxt(struct sk_buff *skb,
struct net_device *dev,
void *pos,
struct ipsec_sa_entry *sa_entry)
@@ -463,10 +502,10 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
return pos;
}
-inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
- struct net_device *dev,
- void *pos,
- struct ipsec_sa_entry *sa_entry)
+static void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
{
struct _key_ctx *key_ctx;
int left, eoq, key_len;
@@ -511,11 +550,11 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
return pos;
}
-inline void *chcr_crypto_wreq(struct sk_buff *skb,
- struct net_device *dev,
- void *pos,
- int credits,
- struct ipsec_sa_entry *sa_entry)
+static void *ch_ipsec_crypto_wreq(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ int credits,
+ struct ipsec_sa_entry *sa_entry)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -538,7 +577,7 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
unsigned int kctx_len = sa_entry->kctx_len;
int qid = q->q.cntxt_id;
- atomic_inc(&adap->chcr_stats.ipsec_cnt);
+ atomic_inc(&adap->ch_ipsec_stats.ipsec_cnt);
flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
ndesc = DIV_ROUND_UP(flits, 2);
@@ -636,13 +675,13 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
* Returns the number of Tx descriptors needed for the supplied number
* of flits.
*/
-static inline unsigned int flits_to_desc(unsigned int n)
+static unsigned int flits_to_desc(unsigned int n)
{
WARN_ON(n > SGE_MAX_WR_LEN / 8);
return DIV_ROUND_UP(n, 8);
}
-static inline unsigned int txq_avail(const struct sge_txq *q)
+static unsigned int txq_avail(const struct sge_txq *q)
{
return q->size - 1 - q->in_use;
}
@@ -653,7 +692,7 @@ static void eth_txq_stop(struct sge_eth_txq *q)
q->q.stops++;
}
-static inline void txq_advance(struct sge_txq *q, unsigned int n)
+static void txq_advance(struct sge_txq *q, unsigned int n)
{
q->in_use += n;
q->pidx += n;
@@ -662,9 +701,9 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
}
/*
- * chcr_ipsec_xmit called from ULD Tx handler
+ * ch_ipsec_xmit called from ULD Tx handler
*/
-int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
+int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_state *x = xfrm_input_state(skb);
unsigned int last_desc, ndesc, flits = 0;
@@ -725,8 +764,8 @@ out_free: dev_kfree_skb_any(skb);
before = (u64 *)pos;
end = (u64 *)pos + flits;
/* Setup IPSec CPL */
- pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
- credits, sa_entry);
+ pos = (void *)ch_ipsec_crypto_wreq(skb, dev, (void *)pos,
+ credits, sa_entry);
if (before > (u64 *)pos) {
left = (u8 *)end - (u8 *)q->q.stat;
end = (void *)q->q.desc + left;
@@ -752,3 +791,35 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
return NETDEV_TX_OK;
}
+
+static int __init ch_ipsec_init(void)
+{
+ cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info);
+
+ return 0;
+}
+
+static void __exit ch_ipsec_exit(void)
+{
+ struct ipsec_uld_ctx *u_ctx, *tmp;
+ struct adapter *adap;
+
+ mutex_lock(&dev_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
+ adap = pci_get_drvdata(u_ctx->lldi.pdev);
+ atomic_set(&adap->ch_ipsec_stats.ipsec_cnt, 0);
+ list_del(&u_ctx->entry);
+ kfree(u_ctx);
+ }
+ mutex_unlock(&dev_mutex);
+ cxgb4_unregister_uld(CXGB4_ULD_IPSEC);
+}
+
+module_init(ch_ipsec_init);
+module_exit(ch_ipsec_exit);
+
+MODULE_DESCRIPTION("Crypto IPSEC for Chelsio Terminator cards.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(CHIPSEC_DRV_VERSION);
+
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h
new file mode 100644
index 000000000000..1d110d2edd64
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018 Chelsio Communications, Inc. */
+
+#ifndef __CHCR_IPSEC_H__
+#define __CHCR_IPSEC_H__
+
+#include <crypto/algapi.h>
+#include "t4_hw.h"
+#include "cxgb4.h"
+#include "t4_msg.h"
+#include "cxgb4_uld.h"
+
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+#define CHIPSEC_DRV_MODULE_NAME "ch_ipsec"
+#define CHIPSEC_DRV_VERSION "1.0.0.0-ko"
+#define CHIPSEC_DRV_DESC "Chelsio T6 Crypto Ipsec offload Driver"
+
+struct ipsec_uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+};
+
+struct chcr_ipsec_req {
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
+struct chcr_ipsec_wr {
+ struct fw_ulptx_wr wreq;
+ struct chcr_ipsec_req req;
+};
+
+#define ESN_IV_INSERT_OFFSET 12
+struct chcr_ipsec_aadiv {
+ __be32 spi;
+ u8 seq_no[8];
+ u8 iv[8];
+};
+
+struct ipsec_sa_entry {
+ int hmac_ctrl;
+ u16 esn;
+ u16 resv;
+ unsigned int enckey_len;
+ unsigned int kctx_len;
+ unsigned int authsize;
+ __be32 key_ctx_hdr;
+ char salt[MAX_SALT];
+ char key[2 * AES_MAX_KEY_SIZE];
+};
+
+#endif /* __CHCR_IPSEC_H__ */
+
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/Makefile b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/Makefile
new file mode 100644
index 000000000000..5e7d161c3199
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
+
+obj-$(CONFIG_CHELSIO_TLS_DEVICE) += ch_ktls.o
+ch_ktls-objs := chcr_ktls.o
diff --git a/drivers/crypto/chelsio/chcr_common.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
index 33f589cbfba1..38319f4c3121 100644
--- a/drivers/crypto/chelsio/chcr_common.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
@@ -18,28 +18,6 @@
#define CHCR_SCMD_AUTH_MODE_GHASH 4
#define AES_BLOCK_LEN 16
-enum chcr_state {
- CHCR_INIT = 0,
- CHCR_ATTACH,
- CHCR_DETACH,
-};
-
-struct chcr_dev {
- spinlock_t lock_chcr_dev; /* chcr dev structure lock */
- enum chcr_state state;
- atomic_t inflight;
- int wqretry;
- struct delayed_work detach_work;
- struct completion detach_comp;
- unsigned char tx_channel_id;
-};
-
-struct uld_ctx {
- struct list_head entry;
- struct cxgb4_lld_info lldi;
- struct chcr_dev dev;
-};
-
struct ktls_key_ctx {
__be32 ctx_hdr;
u8 salt[CHCR_MAX_SALT];
@@ -77,8 +55,6 @@ struct ktls_key_ctx {
KEY_CONTEXT_SALT_PRESENT_F | \
KEY_CONTEXT_CTX_LEN_V((ctx_len)))
-struct uld_ctx *assign_chcr_device(void);
-
static inline void *chcr_copy_to_txd(const void *src, const struct sge_txq *q,
void *pos, int length)
{
diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index c5cce024886a..c24485c0d512 100644
--- a/drivers/crypto/chelsio/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1,10 +1,62 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/skbuff.h>
+#include <linux/module.h>
#include <linux/highmem.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <linux/netdevice.h>
#include "chcr_ktls.h"
-#include "clip_tbl.h"
+
+static LIST_HEAD(uld_ctx_list);
+static DEFINE_MUTEX(dev_mutex);
+
+/* chcr_get_nfrags_to_send: get the remaining nfrags after start offset
+ * @skb: skb
+ * @start: start offset.
+ * @len: how much data to send after @start
+ */
+static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
+{
+ struct skb_shared_info *si = skb_shinfo(skb);
+ u32 frag_size, skb_linear_data_len = skb_headlen(skb);
+ u8 nfrags = 0, frag_idx = 0;
+ skb_frag_t *frag;
+
+ /* if its a linear skb then return 1 */
+ if (!skb_is_nonlinear(skb))
+ return 1;
+
+ if (unlikely(start < skb_linear_data_len)) {
+ frag_size = min(len, skb_linear_data_len - start);
+ start = 0;
+ } else {
+ start -= skb_linear_data_len;
+
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ while (start >= frag_size) {
+ start -= frag_size;
+ frag_idx++;
+ frag = &si->frags[frag_idx];
+ frag_size = skb_frag_size(frag);
+ }
+ frag_size = min(len, skb_frag_size(frag) - start);
+ }
+ len -= frag_size;
+ nfrags++;
+
+ while (len) {
+ frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
+ len -= frag_size;
+ nfrags++;
+ frag_idx++;
+ }
+ return nfrags;
+}
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
/*
@@ -117,60 +169,6 @@ out:
return ret;
}
-static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
- int new_state)
-{
- /* This function can be called from both rx (interrupt context) and tx
- * queue contexts.
- */
- spin_lock_bh(&tx_info->lock);
- switch (tx_info->connection_state) {
- case KTLS_CONN_CLOSED:
- tx_info->connection_state = new_state;
- break;
-
- case KTLS_CONN_ACT_OPEN_REQ:
- /* only go forward if state is greater than current state. */
- if (new_state <= tx_info->connection_state)
- break;
- /* update to the next state and also initialize TCB */
- tx_info->connection_state = new_state;
- fallthrough;
- case KTLS_CONN_ACT_OPEN_RPL:
- /* if we are stuck in this state, means tcb init might not
- * received by HW, try sending it again.
- */
- if (!chcr_init_tcb_fields(tx_info))
- tx_info->connection_state = KTLS_CONN_SET_TCB_REQ;
- break;
-
- case KTLS_CONN_SET_TCB_REQ:
- /* only go forward if state is greater than current state. */
- if (new_state <= tx_info->connection_state)
- break;
- /* update to the next state and check if l2t_state is valid */
- tx_info->connection_state = new_state;
- fallthrough;
- case KTLS_CONN_SET_TCB_RPL:
- /* Check if l2t state is valid, then move to ready state. */
- if (cxgb4_check_l2t_valid(tx_info->l2te)) {
- tx_info->connection_state = KTLS_CONN_TX_READY;
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ctx);
- }
- break;
-
- case KTLS_CONN_TX_READY:
- /* nothing to be done here */
- break;
-
- default:
- pr_err("unknown KTLS connection state\n");
- break;
- }
- spin_unlock_bh(&tx_info->lock);
-
- return tx_info->connection_state;
-}
/*
* chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
* @sk - tcp socket.
@@ -290,27 +288,17 @@ static int chcr_setup_connection(struct sock *sk,
return -EINVAL;
tx_info->atid = atid;
- tx_info->ip_family = sk->sk_family;
- if (sk->sk_family == AF_INET) {
- tx_info->ip_family = AF_INET;
+ if (tx_info->ip_family == AF_INET) {
ret = chcr_ktls_act_open_req(sk, tx_info, atid);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- if (!sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
- tx_info->ip_family = AF_INET;
- ret = chcr_ktls_act_open_req(sk, tx_info, atid);
- } else {
- tx_info->ip_family = AF_INET6;
- ret = cxgb4_clip_get(tx_info->netdev,
- (const u32 *)
- &sk->sk_v6_rcv_saddr.s6_addr,
- 1);
- if (ret)
- goto out;
- ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
- }
+ ret = cxgb4_clip_get(tx_info->netdev, (const u32 *)
+ &sk->sk_v6_rcv_saddr,
+ 1);
+ if (ret)
+ return ret;
+ ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
#endif
}
@@ -318,16 +306,21 @@ static int chcr_setup_connection(struct sock *sk,
* success, if any other return type clear atid and return that failure.
*/
if (ret) {
- if (ret == NET_XMIT_CN)
+ if (ret == NET_XMIT_CN) {
ret = 0;
- else
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(tx_info->netdev,
+ (const u32 *)
+ &sk->sk_v6_rcv_saddr,
+ 1);
+#endif
cxgb4_free_atid(t, atid);
- goto out;
+ }
}
- /* update the connection state */
- chcr_ktls_update_connection_state(tx_info, KTLS_CONN_ACT_OPEN_REQ);
-out:
return ret;
}
@@ -381,22 +374,17 @@ static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
* @tls_cts - tls context.
* @direction - TX/RX crypto direction
*/
-void chcr_ktls_dev_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction)
+static void chcr_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
{
struct chcr_ktls_ofld_ctx_tx *tx_ctx =
chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
- struct sock *sk;
+ struct ch_ktls_port_stats_debug *port_stats;
if (!tx_info)
return;
- sk = tx_info->sk;
-
- spin_lock(&tx_info->lock);
- tx_info->connection_state = KTLS_CONN_CLOSED;
- spin_unlock(&tx_info->lock);
/* clear l2t entry */
if (tx_info->l2te)
@@ -405,8 +393,8 @@ void chcr_ktls_dev_del(struct net_device *netdev,
#if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */
if (tx_info->ip_family == AF_INET6)
- cxgb4_clip_release(netdev,
- (const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8,
+ cxgb4_clip_release(netdev, (const u32 *)
+ &tx_info->sk->sk_v6_rcv_saddr,
1);
#endif
@@ -418,7 +406,8 @@ void chcr_ktls_dev_del(struct net_device *netdev,
tx_info->tid, tx_info->ip_family);
}
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_connection_close);
+ port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
+ atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info);
tx_ctx->chcr_info = NULL;
/* release module refcount */
@@ -434,12 +423,13 @@ void chcr_ktls_dev_del(struct net_device *netdev,
* @direction - TX/RX crypto direction
* return: SUCCESS/FAILURE.
*/
-int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn)
+static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct chcr_ktls_info *tx_info;
struct dst_entry *dst;
@@ -453,30 +443,23 @@ int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
pi = netdev_priv(netdev);
adap = pi->adapter;
+ port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
+ atomic64_inc(&port_stats->ktls_tx_connection_open);
+
if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
pr_err("not expecting for RX direction\n");
- ret = -EINVAL;
goto out;
}
- if (tx_ctx->chcr_info) {
- ret = -EINVAL;
+
+ if (tx_ctx->chcr_info)
goto out;
- }
tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
- if (!tx_info) {
- ret = -ENOMEM;
+ if (!tx_info)
goto out;
- }
-
- spin_lock_init(&tx_info->lock);
-
- /* clear connection state */
- spin_lock(&tx_info->lock);
- tx_info->connection_state = KTLS_CONN_CLOSED;
- spin_unlock(&tx_info->lock);
tx_info->sk = sk;
+ spin_lock_init(&tx_info->lock);
/* initialize tid and atid to -1, 0 is a also a valid id. */
tx_info->tid = -1;
tx_info->atid = -1;
@@ -487,10 +470,12 @@ int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
tx_info->tx_chan = pi->tx_chan;
tx_info->smt_idx = pi->smt_idx;
tx_info->port_id = pi->port_id;
+ tx_info->prev_ack = 0;
+ tx_info->prev_win = 0;
tx_info->rx_qid = chcr_get_first_rx_qid(adap);
if (unlikely(tx_info->rx_qid < 0))
- goto out2;
+ goto free_tx_info;
tx_info->prev_seq = start_offload_tcp_sn;
tx_info->tcp_start_seq_number = start_offload_tcp_sn;
@@ -498,18 +483,22 @@ int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
/* save crypto keys */
ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
if (ret < 0)
- goto out2;
+ goto free_tx_info;
/* get peer ip */
if (sk->sk_family == AF_INET) {
memcpy(daaddr, &sk->sk_daddr, 4);
+ tx_info->ip_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (!sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
memcpy(daaddr, &sk->sk_daddr, 4);
- else
+ tx_info->ip_family = AF_INET;
+ } else {
memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
+ tx_info->ip_family = AF_INET6;
+ }
#endif
}
@@ -517,13 +506,13 @@ int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
dst = sk_dst_get(sk);
if (!dst) {
pr_err("DST entry not found\n");
- goto out2;
+ goto free_tx_info;
}
n = dst_neigh_lookup(dst, daaddr);
if (!n || !n->dev) {
pr_err("neighbour not found\n");
dst_release(dst);
- goto out2;
+ goto free_tx_info;
}
tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);
@@ -532,31 +521,86 @@ int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
if (!tx_info->l2te) {
pr_err("l2t entry not found\n");
- goto out2;
+ goto free_tx_info;
}
- tx_ctx->chcr_info = tx_info;
+ /* Driver shouldn't be removed until any single connection exists */
+ if (!try_module_get(THIS_MODULE))
+ goto free_l2t;
+ init_completion(&tx_info->completion);
/* create a filter and call cxgb4_l2t_send to send the packet out, which
* will take care of updating l2t entry in hw if not already done.
*/
- ret = chcr_setup_connection(sk, tx_info);
- if (ret)
- goto out2;
+ tx_info->open_state = CH_KTLS_OPEN_PENDING;
- /* Driver shouldn't be removed until any single connection exists */
- if (!try_module_get(THIS_MODULE)) {
- ret = -EINVAL;
- goto out2;
+ if (chcr_setup_connection(sk, tx_info))
+ goto put_module;
+
+ /* Wait for reply */
+ wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
+ spin_lock_bh(&tx_info->lock);
+ if (tx_info->open_state) {
+ /* need to wait for hw response, can't free tx_info yet. */
+ if (tx_info->open_state == CH_KTLS_OPEN_PENDING)
+ tx_info->pending_close = true;
+ /* free the lock after the cleanup */
+ goto put_module;
+ }
+ spin_unlock_bh(&tx_info->lock);
+
+ /* initialize tcb */
+ reinit_completion(&tx_info->completion);
+ /* mark it pending for hw response */
+ tx_info->open_state = CH_KTLS_OPEN_PENDING;
+
+ if (chcr_init_tcb_fields(tx_info))
+ goto free_tid;
+
+ /* Wait for reply */
+ wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
+ spin_lock_bh(&tx_info->lock);
+ if (tx_info->open_state) {
+ /* need to wait for hw response, can't free tx_info yet. */
+ tx_info->pending_close = true;
+ /* free the lock after cleanup */
+ goto free_tid;
}
+ spin_unlock_bh(&tx_info->lock);
+
+ if (!cxgb4_check_l2t_valid(tx_info->l2te))
+ goto free_tid;
+
+ atomic64_inc(&port_stats->ktls_tx_ctx);
+ tx_ctx->chcr_info = tx_info;
- atomic64_inc(&adap->chcr_stats.ktls_tx_connection_open);
return 0;
-out2:
- kvfree(tx_info);
+
+free_tid:
+ chcr_ktls_mark_tcb_close(tx_info);
+#if IS_ENABLED(CONFIG_IPV6)
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(netdev, (const u32 *)
+ &sk->sk_v6_rcv_saddr,
+ 1);
+#endif
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tx_info->tid, tx_info->ip_family);
+
+put_module:
+ /* release module refcount */
+ module_put(THIS_MODULE);
+free_l2t:
+ cxgb4_l2t_release(tx_info->l2te);
+free_tx_info:
+ if (tx_info->pending_close)
+ spin_unlock_bh(&tx_info->lock);
+ else
+ kvfree(tx_info);
out:
- atomic64_inc(&adap->chcr_stats.ktls_tx_connection_fail);
- return ret;
+ atomic64_inc(&port_stats->ktls_tx_connection_fail);
+ return -1;
}
/*
@@ -603,7 +647,8 @@ static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info)
/*
* chcr_ktls_cpl_act_open_rpl: connection reply received from TP.
*/
-int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input)
+static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
+ unsigned char *input)
{
const struct cpl_act_open_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
@@ -618,27 +663,46 @@ int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input)
tx_info = lookup_atid(t, atid);
if (!tx_info || tx_info->atid != atid) {
- pr_err("tx_info or atid is not correct\n");
+ pr_err("%s: incorrect tx_info or atid\n", __func__);
return -1;
}
+ cxgb4_free_atid(t, atid);
+ tx_info->atid = -1;
+
+ spin_lock(&tx_info->lock);
+ /* HW response is very close, finish pending cleanup */
+ if (tx_info->pending_close) {
+ spin_unlock(&tx_info->lock);
+ if (!status) {
+ /* it's a late success, tcb status is establised,
+ * mark it close.
+ */
+ chcr_ktls_mark_tcb_close(tx_info);
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tid, tx_info->ip_family);
+ }
+ kvfree(tx_info);
+ return 0;
+ }
+
if (!status) {
tx_info->tid = tid;
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
-
- cxgb4_free_atid(t, atid);
- tx_info->atid = -1;
- /* update the connection state */
- chcr_ktls_update_connection_state(tx_info,
- KTLS_CONN_ACT_OPEN_RPL);
+ tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
+ } else {
+ tx_info->open_state = CH_KTLS_OPEN_FAILURE;
}
+ spin_unlock(&tx_info->lock);
+
+ complete(&tx_info->completion);
return 0;
}
/*
* chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP.
*/
-int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
+static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
{
const struct cpl_set_tcb_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
@@ -649,17 +713,28 @@ int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
t = &adap->tids;
tx_info = lookup_tid(t, tid);
+
if (!tx_info || tx_info->tid != tid) {
- pr_err("tx_info or atid is not correct\n");
+ pr_err("%s: incorrect tx_info or tid\n", __func__);
return -1;
}
- /* update the connection state */
- chcr_ktls_update_connection_state(tx_info, KTLS_CONN_SET_TCB_RPL);
+
+ spin_lock(&tx_info->lock);
+ if (tx_info->pending_close) {
+ spin_unlock(&tx_info->lock);
+ kvfree(tx_info);
+ return 0;
+ }
+ tx_info->open_state = false;
+ spin_unlock(&tx_info->lock);
+
+ complete(&tx_info->completion);
return 0;
}
static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
- u32 tid, void *pos, u16 word, u64 mask,
+ u32 tid, void *pos, u16 word,
+ struct sge_eth_txq *q, u64 mask,
u64 val, u32 reply)
{
struct cpl_set_tcb_field_core *cpl;
@@ -668,7 +743,10 @@ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
/* ULP_TXPKT */
txpkt = pos;
- txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
+ ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
+ ULP_TXPKT_FID_V(q->q.cntxt_id) |
+ ULP_TXPKT_RO_F);
txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
/* ULPTX_IDATA sub-command */
@@ -723,7 +801,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
} else {
u8 buf[48] = {0};
- __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
+ __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q,
mask, val, reply);
return chcr_copy_to_txd(buf, &q->q, pos,
@@ -731,7 +809,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
}
}
- pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
+ pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q,
mask, val, reply);
/* check again if we are at the end of the queue */
@@ -753,10 +831,11 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
*/
static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u64 tcp_seq,
- u64 tcp_ack, u64 tcp_win)
+ u64 tcp_ack, u64 tcp_win, bool offset)
{
bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
- u32 len, cpl = 0, ndesc, wr_len;
+ struct ch_ktls_port_stats_debug *port_stats;
+ u32 len, cpl = 0, ndesc, wr_len, wr_mid = 0;
struct fw_ulptx_wr *wr;
int credits;
void *pos;
@@ -772,6 +851,11 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
return NETDEV_TX_BUSY;
}
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
pos = &q->q.desc[q->q.pidx];
/* make space for WR, we'll fill it later when we know all the cpls
* being sent out and have complete length.
@@ -787,14 +871,17 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
cpl++;
}
/* reset snd una if it's a re-transmit pkt */
- if (tcp_seq != tx_info->prev_seq) {
+ if (tcp_seq != tx_info->prev_seq || offset) {
/* reset snd_una */
+ port_stats =
+ &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
TCB_SND_UNA_RAW_W,
TCB_SND_UNA_RAW_V
(TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0);
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ooo);
+ if (tcp_seq != tx_info->prev_seq)
+ atomic64_inc(&port_stats->ktls_tx_ooo);
cpl++;
}
/* update ack */
@@ -823,7 +910,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
wr->cookie = 0;
/* fill len in wr field */
- wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+ wr->flowid_len16 = htonl(wr_mid |
+ FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
ndesc = DIV_ROUND_UP(len, 64);
chcr_txq_advance(&q->q, ndesc);
@@ -833,34 +921,14 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
}
/*
- * chcr_ktls_skb_copy
- * @nskb - new skb where the frags to be added.
- * @skb - old skb from which frags will be copied.
- */
-static void chcr_ktls_skb_copy(struct sk_buff *skb, struct sk_buff *nskb)
-{
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_shinfo(nskb)->frags[i] = skb_shinfo(skb)->frags[i];
- __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
- }
-
- skb_shinfo(nskb)->nr_frags = skb_shinfo(skb)->nr_frags;
- nskb->len += skb->data_len;
- nskb->data_len = skb->data_len;
- nskb->truesize += skb->data_len;
-}
-
-/*
* chcr_ktls_get_tx_flits
* returns number of flits to be sent out, it includes key context length, WR
* size and skb fragments.
*/
static unsigned int
-chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len)
+chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len)
{
- return chcr_sgl_len(skb_shinfo(skb)->nr_frags) +
+ return chcr_sgl_len(nr_frags) +
DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
}
@@ -924,8 +992,10 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct tcphdr *tcp;
int len16, pktlen;
struct iphdr *ip;
+ u32 wr_mid = 0;
int credits;
u8 buf[150];
+ u64 cntrl1;
void *pos;
iplen = skb_network_header_len(skb);
@@ -934,7 +1004,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb->len - skb->data_len;
+ pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -947,6 +1017,11 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
pos = &q->q.desc[q->q.pidx];
wr = pos;
@@ -954,7 +1029,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(ctrl));
- wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16));
+ wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
wr->r3 = 0;
cpl = (void *)(wr + 1);
@@ -964,22 +1039,28 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
TXPKT_PF_V(tx_info->adap->pf));
cpl->pack = 0;
cpl->len = htons(pktlen);
- /* checksum offload */
- cpl->ctrl1 = 0;
-
- pos = cpl + 1;
memcpy(buf, skb->data, pktlen);
if (tx_info->ip_family == AF_INET) {
/* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen);
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
#if IS_ENABLED(CONFIG_IPV6)
} else {
ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen);
+ cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
#endif
}
+
+ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+ TXPKT_IPHDR_LEN_V(iplen);
+ /* checksum offload */
+ cpl->ctrl1 = cpu_to_be64(cntrl1);
+
+ pos = cpl + 1;
+
/* now take care of the tcp header, if fin is not set then clear push
* bit as well, and if fin is set, it will be sent at the last so we
* need to update the tcp sequence number as per the last packet.
@@ -998,71 +1079,6 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return 0;
}
-/* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
- * @tgt- buffer into which tail data gets added
- * @skb- buffer from which the paged data comes from
- * @shiftlen- shift up to this many bytes
- */
-static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
- int shiftlen)
-{
- skb_frag_t *fragfrom, *fragto;
- int from, to, todo;
-
- WARN_ON(shiftlen > skb->data_len);
-
- todo = shiftlen;
- from = 0;
- to = 0;
- fragfrom = &skb_shinfo(skb)->frags[from];
-
- while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
- fragfrom = &skb_shinfo(skb)->frags[from];
- fragto = &skb_shinfo(tgt)->frags[to];
-
- if (todo >= skb_frag_size(fragfrom)) {
- *fragto = *fragfrom;
- todo -= skb_frag_size(fragfrom);
- from++;
- to++;
-
- } else {
- __skb_frag_ref(fragfrom);
- skb_frag_page_copy(fragto, fragfrom);
- skb_frag_off_copy(fragto, fragfrom);
- skb_frag_size_set(fragto, todo);
-
- skb_frag_off_add(fragfrom, todo);
- skb_frag_size_sub(fragfrom, todo);
- todo = 0;
-
- to++;
- break;
- }
- }
-
- /* Ready to "commit" this state change to tgt */
- skb_shinfo(tgt)->nr_frags = to;
-
- /* Reposition in the original skb */
- to = 0;
- while (from < skb_shinfo(skb)->nr_frags)
- skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
-
- skb_shinfo(skb)->nr_frags = to;
-
- WARN_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
-
- skb->len -= shiftlen;
- skb->data_len -= shiftlen;
- skb->truesize -= shiftlen;
- tgt->len += shiftlen;
- tgt->data_len += shiftlen;
- tgt->truesize += shiftlen;
-
- return shiftlen;
-}
-
/*
* chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
* received has partial end part of the record, send out the complete record, so
@@ -1078,6 +1094,8 @@ static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u32 tcp_seq,
+ bool is_last_wr, u32 data_len,
+ u32 skb_offset, u32 nfrags,
bool tcp_push, u32 mss)
{
u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
@@ -1093,7 +1111,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
u64 *end;
/* get the number of flits required */
- flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len);
+ flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len);
/* number of descriptors */
ndesc = chcr_flits_to_desc(flits);
/* check if enough credits available */
@@ -1122,6 +1140,9 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ if (!is_last_wr)
+ skb_get(skb);
+
pos = &q->q.desc[q->q.pidx];
end = (u64 *)pos + flits;
/* FW_ULPTX_WR */
@@ -1154,7 +1175,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
- cpl->pldlen = htonl(skb->data_len);
+ cpl->pldlen = htonl(data_len);
/* encryption should start after tls header size + iv size */
cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
@@ -1196,7 +1217,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
/* CPL_TX_DATA */
tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
- tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(skb->data_len));
+ tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(data_len));
tx_data->rsvd = htonl(tcp_seq);
@@ -1216,13 +1237,13 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
}
/* send the complete packet except the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- atomic64_inc(&adap->chcr_stats.ktls_tx_send_records);
+ atomic64_inc(&adap->ch_ktls_stats.ktls_tx_send_records);
return 0;
}
@@ -1249,10 +1270,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
struct sge_eth_txq *q,
u32 tcp_seq, bool tcp_push, u32 mss,
u32 tls_rec_offset, u8 *prior_data,
- u32 prior_data_len)
+ u32 prior_data_len, u32 data_len,
+ u32 skb_offset)
{
+ u32 len16, wr_mid = 0, cipher_start, nfrags;
struct adapter *adap = tx_info->adap;
- u32 len16, wr_mid = 0, cipher_start;
unsigned int flits = 0, ndesc;
int credits, left, last_desc;
struct tx_sw_desc *sgl_sdesc;
@@ -1265,10 +1287,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
void *pos;
u64 *end;
+ nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
/* get the number of flits required, it's a partial record so 2 flits
* (AES_BLOCK_SIZE) will be added.
*/
- flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len) + 2;
+ flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len) + 2;
/* get the correct 8 byte IV of this record */
iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
/* If it's a middle record and not 16 byte aligned to run AES CTR, need
@@ -1340,7 +1363,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
- cpl->pldlen = htonl(skb->data_len + AES_BLOCK_LEN + prior_data_len);
+ cpl->pldlen = htonl(data_len + AES_BLOCK_LEN + prior_data_len);
cpl->aadstart_cipherstop_hi =
htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
cpl->cipherstop_lo_authinsert = 0;
@@ -1371,7 +1394,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) |
- TX_LENGTH_V(skb->data_len + prior_data_len));
+ TX_LENGTH_V(data_len + prior_data_len));
tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F);
if (tcp_push)
@@ -1404,8 +1427,8 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
if (prior_data_len)
pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
/* send the complete packet except the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1433,6 +1456,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct sk_buff *skb, u32 tcp_seq, u32 mss,
bool tcp_push, struct sge_eth_txq *q,
u32 port_id, u8 *prior_data,
+ u32 data_len, u32 skb_offset,
u32 prior_data_len)
{
int credits, left, len16, last_desc;
@@ -1442,14 +1466,16 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct ulptx_idata *idata;
struct ulp_txpkt *ulptx;
struct fw_ulptx_wr *wr;
- u32 wr_mid = 0;
+ u32 wr_mid = 0, nfrags;
void *pos;
u64 *end;
flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
- flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags);
+ nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
+ flits += chcr_sgl_len(nfrags);
if (prior_data_len)
flits += 2;
+
/* WR will need len16 */
len16 = DIV_ROUND_UP(flits, 2);
/* check how many descriptors needed */
@@ -1502,7 +1528,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
tx_data = (struct cpl_tx_data *)(idata + 1);
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) |
- TX_LENGTH_V(skb->data_len + prior_data_len));
+ TX_LENGTH_V(data_len + prior_data_len));
/* set tcp seq number */
tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F);
@@ -1526,8 +1552,8 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
end = pos + left;
}
/* send the complete packet including the header */
- cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len,
- sgl_sdesc->addr);
+ cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
+ skb_offset, data_len);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
@@ -1535,12 +1561,96 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
return 0;
}
+static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
+ struct sk_buff *skb,
+ struct sge_eth_txq *q)
+{
+ u32 ctrl, iplen, maclen, wr_mid = 0, len16;
+ struct tx_sw_desc *sgl_sdesc;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ unsigned int flits, ndesc;
+ int credits, last_desc;
+ u64 cntrl1, *end;
+ void *pos;
+
+ ctrl = sizeof(*cpl);
+ flits = DIV_ROUND_UP(sizeof(*wr) + ctrl, 8);
+
+ flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ len16 = DIV_ROUND_UP(flits, 2);
+ /* check how many descriptors needed */
+ ndesc = DIV_ROUND_UP(flits, 8);
+
+ credits = chcr_txq_avail(&q->q) - ndesc;
+ if (unlikely(credits < 0)) {
+ chcr_eth_txq_stop(q);
+ return -ENOMEM;
+ }
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ chcr_eth_txq_stop(q);
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
+ if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
+ sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
+ q->mapping_err++;
+ return -ENOMEM;
+ }
+
+ iplen = skb_network_header_len(skb);
+ maclen = skb_mac_header_len(skb);
+
+ pos = &q->q.desc[q->q.pidx];
+ end = (u64 *)pos + flits;
+ wr = pos;
+
+ /* Firmware work request header */
+ wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+ FW_WR_IMMDLEN_V(ctrl));
+
+ wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) |
+ TXPKT_INTF_V(tx_info->tx_chan) |
+ TXPKT_PF_V(tx_info->adap->pf));
+ cpl->pack = 0;
+ cntrl1 = TXPKT_CSUM_TYPE_V(tx_info->ip_family == AF_INET ?
+ TX_CSUM_TCPIP : TX_CSUM_TCPIP6);
+ cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
+ TXPKT_IPHDR_LEN_V(iplen);
+ /* checksum offload */
+ cpl->ctrl1 = cpu_to_be64(cntrl1);
+ cpl->len = htons(skb->len);
+
+ pos = cpl + 1;
+
+ cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
+ sgl_sdesc->skb = skb;
+ chcr_txq_advance(&q->q, ndesc);
+ cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+ return 0;
+}
+
/*
* chcr_ktls_copy_record_in_skb
* @nskb - new skb where the frags to be added.
+ * @skb - old skb, to copy socket and destructor details.
* @record - specific record which has complete 16k record in frags.
*/
static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
+ struct sk_buff *skb,
struct tls_record_info *record)
{
int i = 0;
@@ -1555,6 +1665,9 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
nskb->data_len = record->len;
nskb->len += record->len;
nskb->truesize += record->len;
+ nskb->sk = skb->sk;
+ nskb->destructor = skb->destructor;
+ refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
}
/*
@@ -1626,39 +1739,46 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb,
struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin,
- struct sge_eth_txq *q,
+ struct sge_eth_txq *q, u32 skb_offset,
u32 tls_end_offset, bool last_wr)
{
struct sk_buff *nskb = NULL;
/* check if it is a complete record */
if (tls_end_offset == record->len) {
nskb = skb;
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_complete_pkts);
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts);
} else {
- dev_kfree_skb_any(skb);
-
- nskb = alloc_skb(0, GFP_KERNEL);
- if (!nskb)
+ nskb = alloc_skb(0, GFP_ATOMIC);
+ if (!nskb) {
+ dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
+ }
+
/* copy complete record in skb */
- chcr_ktls_copy_record_in_skb(nskb, record);
+ chcr_ktls_copy_record_in_skb(nskb, skb, record);
/* packet is being sent from the beginning, update the tcp_seq
* accordingly.
*/
tcp_seq = tls_record_start_seq(record);
- /* reset snd una, so the middle record won't send the already
- * sent part.
- */
- if (chcr_ktls_update_snd_una(tx_info, q))
- goto out;
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_end_pkts);
+ /* reset skb offset */
+ skb_offset = 0;
+
+ if (last_wr)
+ dev_kfree_skb_any(skb);
+
+ last_wr = true;
+
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts);
}
if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
+ last_wr, record->len, skb_offset,
+ record->num_frags,
(last_wr && tcp_push_no_fin),
mss)) {
goto out;
}
+ tx_info->prev_seq = record->end_seq;
return 0;
out:
dev_kfree_skb_any(nskb);
@@ -1690,41 +1810,47 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb,
struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin,
+ u32 data_len, u32 skb_offset,
struct sge_eth_txq *q, u32 tls_end_offset)
{
u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
u8 prior_data[16] = {0};
u32 prior_data_len = 0;
- u32 data_len;
/* check if the skb is ending in middle of tag/HASH, its a big
* trouble, send the packet before the HASH.
*/
- int remaining_record = tls_end_offset - skb->data_len;
+ int remaining_record = tls_end_offset - data_len;
if (remaining_record > 0 &&
remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
- int trimmed_len = skb->data_len -
- (TLS_CIPHER_AES_GCM_128_TAG_SIZE - remaining_record);
- struct sk_buff *tmp_skb = NULL;
- /* don't process the pkt if it is only a partial tag */
- if (skb->data_len < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
- goto out;
+ int trimmed_len = 0;
+
+ if (tls_end_offset > TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ trimmed_len = data_len -
+ (TLS_CIPHER_AES_GCM_128_TAG_SIZE -
+ remaining_record);
+ if (!trimmed_len)
+ return FALLBACK;
- WARN_ON(trimmed_len > skb->data_len);
+ WARN_ON(trimmed_len > data_len);
- /* shift to those many bytes */
- tmp_skb = alloc_skb(0, GFP_KERNEL);
- if (unlikely(!tmp_skb))
+ data_len = trimmed_len;
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts);
+ }
+
+ /* check if it is only the header part. */
+ if (tls_rec_offset + data_len <= (TLS_HEADER_SIZE + tx_info->iv_size)) {
+ if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ tcp_push_no_fin, q,
+ tx_info->port_id, prior_data,
+ data_len, skb_offset, prior_data_len))
goto out;
- chcr_ktls_skb_shift(tmp_skb, skb, trimmed_len);
- /* free the last trimmed portion */
- dev_kfree_skb_any(skb);
- skb = tmp_skb;
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_trimmed_pkts);
+ tx_info->prev_seq = tcp_seq + data_len;
+ return 0;
}
- data_len = skb->data_len;
+
/* check if the middle record's start point is 16 byte aligned. CTR
* needs 16 byte aligned start point to start encryption.
*/
@@ -1785,69 +1911,80 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
}
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
- /* include prio_data_len for further calculation.
- */
- data_len += prior_data_len;
}
/* reset snd una, so the middle record won't send the already
* sent part.
*/
if (chcr_ktls_update_snd_una(tx_info, q))
goto out;
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_middle_pkts);
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else {
- /* Else means, its a partial first part of the record. Check if
- * its only the header, don't need to send for encryption then.
- */
- if (data_len <= TLS_HEADER_SIZE + tx_info->iv_size) {
- if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
- tcp_push_no_fin, q,
- tx_info->port_id,
- prior_data,
- prior_data_len)) {
- goto out;
- }
- return 0;
- }
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_start_pkts);
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
}
if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
mss, tls_rec_offset, prior_data,
- prior_data_len)) {
+ prior_data_len, data_len, skb_offset)) {
goto out;
}
+ tx_info->prev_seq = tcp_seq + data_len + prior_data_len;
return 0;
out:
dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
+static int chcr_ktls_sw_fallback(struct sk_buff *skb,
+ struct chcr_ktls_info *tx_info,
+ struct sge_eth_txq *q)
+{
+ u32 data_len, skb_offset;
+ struct sk_buff *nskb;
+ struct tcphdr *th;
+
+ nskb = tls_encrypt_skb(skb);
+
+ if (!nskb)
+ return 0;
+
+ th = tcp_hdr(nskb);
+ skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
+ data_len = nskb->len - skb_offset;
+ skb_tx_timestamp(nskb);
+
+ if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
+ goto out;
+
+ tx_info->prev_seq = ntohl(th->seq) + data_len;
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_fallback);
+ return 0;
+out:
+ dev_kfree_skb_any(nskb);
+ return 0;
+}
/* nic tls TX handler */
-int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
+static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
+ struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct ch_ktls_stats_debug *stats;
struct tcphdr *th = tcp_hdr(skb);
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
- struct chcr_stats_debug *stats;
struct chcr_ktls_info *tx_info;
- u32 tls_end_offset, tcp_seq;
struct tls_context *tls_ctx;
- struct sk_buff *local_skb;
- int new_connection_state;
struct sge_eth_txq *q;
struct adapter *adap;
unsigned long flags;
tcp_seq = ntohl(th->seq);
+ skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_data_len = skb->len - skb_offset;
+ data_len = skb_data_len;
- mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : skb->data_len;
-
- /* check if we haven't set it for ktls offload */
- if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
- goto out;
+ mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
if (unlikely(tls_ctx->netdev != dev))
@@ -1859,25 +1996,9 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!tx_info))
goto out;
- /* check the connection state, we don't need to pass new connection
- * state, state machine will check and update the new state if it is
- * stuck due to responses not received from HW.
- * Start the tx handling only if state is KTLS_CONN_TX_READY.
- */
- new_connection_state = chcr_ktls_update_connection_state(tx_info, 0);
- if (new_connection_state != KTLS_CONN_TX_READY)
- goto out;
-
- /* don't touch the original skb, make a new skb to extract each records
- * and send them separately.
- */
- local_skb = alloc_skb(0, GFP_KERNEL);
-
- if (unlikely(!local_skb))
- return NETDEV_TX_BUSY;
-
adap = tx_info->adap;
- stats = &adap->chcr_stats;
+ stats = &adap->ch_ktls_stats;
+ port_stats = &stats->ktls_port[tx_info->port_id];
qidx = skb->queue_mapping;
q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
@@ -1889,20 +2010,7 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (ret)
return NETDEV_TX_BUSY;
}
- /* update tcb */
- ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, ntohl(th->seq),
- ntohl(th->ack_seq),
- ntohs(th->window));
- if (ret) {
- dev_kfree_skb_any(local_skb);
- return NETDEV_TX_BUSY;
- }
-
- /* copy skb contents into local skb */
- chcr_ktls_skb_copy(skb, local_skb);
- /* go through the skb and send only one record at a time. */
- data_len = skb->data_len;
/* TCP segments can be in received either complete or partial.
* chcr_end_part_handler will handle cases if complete record or end
* part of the record is received. Incase of partial end part of record,
@@ -1923,14 +2031,68 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (unlikely(!record)) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
- atomic64_inc(&stats->ktls_tx_drop_no_sync_data);
+ atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
goto out;
}
+ tls_end_offset = record->end_seq - tcp_seq;
+
+ pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
+ tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
+ /* update tcb for the skb */
+ if (skb_data_len == data_len) {
+ u32 tx_max = tcp_seq;
+
+ if (!tls_record_is_start_marker(record) &&
+ tls_end_offset < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
+ tx_max = record->end_seq -
+ TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+
+ ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, tx_max,
+ ntohl(th->ack_seq),
+ ntohs(th->window),
+ tls_end_offset !=
+ record->len);
+ if (ret) {
+ spin_unlock_irqrestore(&tx_ctx->base.lock,
+ flags);
+ goto out;
+ }
+
+ if (th->fin)
+ skb_get(skb);
+ }
+
if (unlikely(tls_record_is_start_marker(record))) {
+ atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
+ /* If tls_end_offset < data_len, means there is some
+ * data after start marker, which needs encryption, send
+ * plaintext first and take skb refcount. else send out
+ * complete pkt as plaintext.
+ */
+ if (tls_end_offset < data_len)
+ skb_get(skb);
+ else
+ tls_end_offset = data_len;
+
+ ret = chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ (!th->fin && th->psh), q,
+ tx_info->port_id, NULL,
+ tls_end_offset, skb_offset,
+ 0);
+
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
- atomic64_inc(&stats->ktls_tx_skip_no_sync_data);
- goto out;
+ if (ret) {
+ /* free the refcount taken earlier */
+ if (tls_end_offset < data_len)
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ data_len -= tls_end_offset;
+ tcp_seq = record->end_seq;
+ skb_offset += tls_end_offset;
+ continue;
}
/* increase page reference count of the record, so that there
@@ -1942,76 +2104,179 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* lock cleared */
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
- tls_end_offset = record->end_seq - tcp_seq;
- pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
- tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
/* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) {
- struct sk_buff *nskb = NULL;
-
- if (tls_end_offset < data_len) {
- nskb = alloc_skb(0, GFP_KERNEL);
- if (unlikely(!nskb)) {
- ret = -ENOMEM;
- goto clear_ref;
- }
-
- chcr_ktls_skb_shift(nskb, local_skb,
- tls_end_offset);
- } else {
- /* its the only record in this skb, directly
- * point it.
- */
- nskb = local_skb;
- }
- ret = chcr_end_part_handler(tx_info, nskb, record,
+ ret = chcr_end_part_handler(tx_info, skb, record,
tcp_seq, mss,
(!th->fin && th->psh), q,
+ skb_offset,
tls_end_offset,
- (nskb == local_skb));
-
- if (ret && nskb != local_skb)
- dev_kfree_skb_any(local_skb);
+ skb_offset +
+ tls_end_offset == skb->len);
data_len -= tls_end_offset;
/* tcp_seq increment is required to handle next record.
*/
tcp_seq += tls_end_offset;
+ skb_offset += tls_end_offset;
} else {
- ret = chcr_short_record_handler(tx_info, local_skb,
+ ret = chcr_short_record_handler(tx_info, skb,
record, tcp_seq, mss,
(!th->fin && th->psh),
+ data_len, skb_offset,
q, tls_end_offset);
data_len = 0;
}
-clear_ref:
+
/* clear the frag ref count which increased locally before */
for (i = 0; i < record->num_frags; i++) {
/* clear the frag ref count */
__skb_frag_unref(&record->frags[i]);
}
/* if any failure, come out from the loop. */
- if (ret)
- goto out;
+ if (ret) {
+ if (th->fin)
+ dev_kfree_skb_any(skb);
+
+ if (ret == FALLBACK)
+ return chcr_ktls_sw_fallback(skb, tx_info, q);
+
+ return NETDEV_TX_OK;
+ }
+
/* length should never be less than 0 */
WARN_ON(data_len < 0);
} while (data_len > 0);
- tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
-
- atomic64_inc(&stats->ktls_tx_encrypted_packets);
- atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes);
+ atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
+ atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
/* tcp finish is set, send a separate tcp msg including all the options
* as well.
*/
- if (th->fin)
+ if (th->fin) {
chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
+ dev_kfree_skb_any(skb);
+ }
+ return NETDEV_TX_OK;
out:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
-#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+
+static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
+{
+ struct chcr_ktls_uld_ctx *u_ctx;
+
+ pr_info_once("%s - version %s\n", CHCR_KTLS_DRV_DESC,
+ CHCR_KTLS_DRV_VERSION);
+ u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
+ if (!u_ctx) {
+ u_ctx = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ u_ctx->lldi = *lldi;
+out:
+ return u_ctx;
+}
+
+static const struct tlsdev_ops chcr_ktls_ops = {
+ .tls_dev_add = chcr_ktls_dev_add,
+ .tls_dev_del = chcr_ktls_dev_del,
+};
+
+static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
+ [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
+};
+
+static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
+ const struct pkt_gl *pgl)
+{
+ const struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)rsp;
+ struct chcr_ktls_uld_ctx *u_ctx = handle;
+ u8 opcode = rpl->ot.opcode;
+ struct adapter *adap;
+
+ adap = pci_get_drvdata(u_ctx->lldi.pdev);
+
+ if (!work_handlers[opcode]) {
+ pr_err("Unsupported opcode %d received\n", opcode);
+ return 0;
+ }
+
+ work_handlers[opcode](adap, (unsigned char *)&rsp[1]);
+ return 0;
+}
+
+static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
+{
+ struct chcr_ktls_uld_ctx *u_ctx = handle;
+
+ switch (new_state) {
+ case CXGB4_STATE_UP:
+ pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
+ mutex_lock(&dev_mutex);
+ list_add_tail(&u_ctx->entry, &uld_ctx_list);
+ mutex_unlock(&dev_mutex);
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ case CXGB4_STATE_DOWN:
+ case CXGB4_STATE_DETACH:
+ pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
+ mutex_lock(&dev_mutex);
+ list_del(&u_ctx->entry);
+ mutex_unlock(&dev_mutex);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct cxgb4_uld_info chcr_ktls_uld_info = {
+ .name = CHCR_KTLS_DRV_MODULE_NAME,
+ .nrxq = 1,
+ .rxq_size = 1024,
+ .add = chcr_ktls_uld_add,
+ .tx_handler = chcr_ktls_xmit,
+ .rx_handler = chcr_ktls_uld_rx_handler,
+ .state_change = chcr_ktls_uld_state_change,
+ .tlsdev_ops = &chcr_ktls_ops,
+};
+
+static int __init chcr_ktls_init(void)
+{
+ cxgb4_register_uld(CXGB4_ULD_KTLS, &chcr_ktls_uld_info);
+ return 0;
+}
+
+static void __exit chcr_ktls_exit(void)
+{
+ struct chcr_ktls_uld_ctx *u_ctx, *tmp;
+ struct adapter *adap;
+
+ pr_info("driver unloaded\n");
+
+ mutex_lock(&dev_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
+ adap = pci_get_drvdata(u_ctx->lldi.pdev);
+ memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
+ list_del(&u_ctx->entry);
+ kfree(u_ctx);
+ }
+ mutex_unlock(&dev_mutex);
+ cxgb4_unregister_uld(CXGB4_ULD_KTLS);
+}
+
+module_init(chcr_ktls_init);
+module_exit(chcr_ktls_exit);
+
+MODULE_DESCRIPTION("Chelsio NIC TLS ULD driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(CHCR_KTLS_DRV_VERSION);
diff --git a/drivers/crypto/chelsio/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index 5cbd84b1da05..18b3b1f02415 100644
--- a/drivers/crypto/chelsio/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -4,14 +4,17 @@
#ifndef __CHCR_KTLS_H__
#define __CHCR_KTLS_H__
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
-#include <net/tls.h>
#include "cxgb4.h"
#include "t4_msg.h"
#include "t4_tcb.h"
#include "l2t.h"
#include "chcr_common.h"
#include "cxgb4_uld.h"
+#include "clip_tbl.h"
+
+#define CHCR_KTLS_DRV_MODULE_NAME "ch_ktls"
+#define CHCR_KTLS_DRV_VERSION "1.0.0.0-ko"
+#define CHCR_KTLS_DRV_DESC "Chelsio NIC TLS ULD Driver"
#define CHCR_TCB_STATE_CLOSED 0
#define CHCR_KTLS_KEY_CTX_LEN 16
@@ -23,23 +26,22 @@
#define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\
sizeof(struct cpl_tx_sec_pdu))
+#define FALLBACK 35
-enum chcr_ktls_conn_state {
- KTLS_CONN_CLOSED,
- KTLS_CONN_ACT_OPEN_REQ,
- KTLS_CONN_ACT_OPEN_RPL,
- KTLS_CONN_SET_TCB_REQ,
- KTLS_CONN_SET_TCB_RPL,
- KTLS_CONN_TX_READY,
+enum ch_ktls_open_state {
+ CH_KTLS_OPEN_SUCCESS = 0,
+ CH_KTLS_OPEN_PENDING = 1,
+ CH_KTLS_OPEN_FAILURE = 2,
};
struct chcr_ktls_info {
struct sock *sk;
- spinlock_t lock; /* state machine lock */
+ spinlock_t lock; /* lock for pending_close */
struct ktls_key_ctx key_ctx;
struct adapter *adap;
struct l2t_entry *l2te;
struct net_device *netdev;
+ struct completion completion;
u64 iv;
u64 record_no;
int tid;
@@ -55,13 +57,14 @@ struct chcr_ktls_info {
u32 tcp_start_seq_number;
u32 scmd0_short_seqno_numivs;
u32 scmd0_short_ivgen_hdrlen;
- enum chcr_ktls_conn_state connection_state;
u16 prev_win;
u8 tx_chan;
u8 smt_idx;
u8 port_id;
u8 ip_family;
u8 first_qset;
+ enum ch_ktls_open_state open_state;
+ bool pending_close;
};
struct chcr_ktls_ofld_ctx_tx {
@@ -69,6 +72,11 @@ struct chcr_ktls_ofld_ctx_tx {
struct chcr_ktls_info *chcr_info;
};
+struct chcr_ktls_uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+};
+
static inline struct chcr_ktls_ofld_ctx_tx *
chcr_get_ktls_tx_context(struct tls_context *tls_ctx)
{
@@ -82,22 +90,12 @@ chcr_get_ktls_tx_context(struct tls_context *tls_ctx)
static inline int chcr_get_first_rx_qid(struct adapter *adap)
{
/* u_ctx is saved in adap, fetch it */
- struct uld_ctx *u_ctx = adap->uld[CXGB4_ULD_CRYPTO].handle;
+ struct chcr_ktls_uld_ctx *u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
if (!u_ctx)
return -1;
return u_ctx->lldi.rxq_ids[0];
}
-int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
-int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
-int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
-int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn);
-void chcr_ktls_dev_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction);
-#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
#endif /* __CHCR_KTLS_H__ */
diff --git a/drivers/crypto/chelsio/chtls/Makefile b/drivers/net/ethernet/chelsio/inline_crypto/chtls/Makefile
index bc11495acdb3..bc11495acdb3 100644
--- a/drivers/crypto/chelsio/chtls/Makefile
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/Makefile
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 459442704eb1..2d3dfdd2a716 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -32,6 +32,94 @@
#include "chcr_core.h"
#include "chcr_crypto.h"
+#define CHTLS_DRV_VERSION "1.0.0.0-ko"
+
+#define TLS_KEYCTX_RXFLIT_CNT_S 24
+#define TLS_KEYCTX_RXFLIT_CNT_V(x) ((x) << TLS_KEYCTX_RXFLIT_CNT_S)
+
+#define TLS_KEYCTX_RXPROT_VER_S 20
+#define TLS_KEYCTX_RXPROT_VER_M 0xf
+#define TLS_KEYCTX_RXPROT_VER_V(x) ((x) << TLS_KEYCTX_RXPROT_VER_S)
+
+#define TLS_KEYCTX_RXCIPH_MODE_S 16
+#define TLS_KEYCTX_RXCIPH_MODE_M 0xf
+#define TLS_KEYCTX_RXCIPH_MODE_V(x) ((x) << TLS_KEYCTX_RXCIPH_MODE_S)
+
+#define TLS_KEYCTX_RXAUTH_MODE_S 12
+#define TLS_KEYCTX_RXAUTH_MODE_M 0xf
+#define TLS_KEYCTX_RXAUTH_MODE_V(x) ((x) << TLS_KEYCTX_RXAUTH_MODE_S)
+
+#define TLS_KEYCTX_RXCIAU_CTRL_S 11
+#define TLS_KEYCTX_RXCIAU_CTRL_V(x) ((x) << TLS_KEYCTX_RXCIAU_CTRL_S)
+
+#define TLS_KEYCTX_RX_SEQCTR_S 9
+#define TLS_KEYCTX_RX_SEQCTR_M 0x3
+#define TLS_KEYCTX_RX_SEQCTR_V(x) ((x) << TLS_KEYCTX_RX_SEQCTR_S)
+
+#define TLS_KEYCTX_RX_VALID_S 8
+#define TLS_KEYCTX_RX_VALID_V(x) ((x) << TLS_KEYCTX_RX_VALID_S)
+
+#define TLS_KEYCTX_RXCK_SIZE_S 3
+#define TLS_KEYCTX_RXCK_SIZE_M 0x7
+#define TLS_KEYCTX_RXCK_SIZE_V(x) ((x) << TLS_KEYCTX_RXCK_SIZE_S)
+
+#define TLS_KEYCTX_RXMK_SIZE_S 0
+#define TLS_KEYCTX_RXMK_SIZE_M 0x7
+#define TLS_KEYCTX_RXMK_SIZE_V(x) ((x) << TLS_KEYCTX_RXMK_SIZE_S)
+
+#define KEYCTX_TX_WR_IV_S 55
+#define KEYCTX_TX_WR_IV_M 0x1ffULL
+#define KEYCTX_TX_WR_IV_V(x) ((x) << KEYCTX_TX_WR_IV_S)
+#define KEYCTX_TX_WR_IV_G(x) \
+ (((x) >> KEYCTX_TX_WR_IV_S) & KEYCTX_TX_WR_IV_M)
+
+#define KEYCTX_TX_WR_AAD_S 47
+#define KEYCTX_TX_WR_AAD_M 0xffULL
+#define KEYCTX_TX_WR_AAD_V(x) ((x) << KEYCTX_TX_WR_AAD_S)
+#define KEYCTX_TX_WR_AAD_G(x) (((x) >> KEYCTX_TX_WR_AAD_S) & \
+ KEYCTX_TX_WR_AAD_M)
+
+#define KEYCTX_TX_WR_AADST_S 39
+#define KEYCTX_TX_WR_AADST_M 0xffULL
+#define KEYCTX_TX_WR_AADST_V(x) ((x) << KEYCTX_TX_WR_AADST_S)
+#define KEYCTX_TX_WR_AADST_G(x) \
+ (((x) >> KEYCTX_TX_WR_AADST_S) & KEYCTX_TX_WR_AADST_M)
+
+#define KEYCTX_TX_WR_CIPHER_S 30
+#define KEYCTX_TX_WR_CIPHER_M 0x1ffULL
+#define KEYCTX_TX_WR_CIPHER_V(x) ((x) << KEYCTX_TX_WR_CIPHER_S)
+#define KEYCTX_TX_WR_CIPHER_G(x) \
+ (((x) >> KEYCTX_TX_WR_CIPHER_S) & KEYCTX_TX_WR_CIPHER_M)
+
+#define KEYCTX_TX_WR_CIPHERST_S 23
+#define KEYCTX_TX_WR_CIPHERST_M 0x7f
+#define KEYCTX_TX_WR_CIPHERST_V(x) ((x) << KEYCTX_TX_WR_CIPHERST_S)
+#define KEYCTX_TX_WR_CIPHERST_G(x) \
+ (((x) >> KEYCTX_TX_WR_CIPHERST_S) & KEYCTX_TX_WR_CIPHERST_M)
+
+#define KEYCTX_TX_WR_AUTH_S 14
+#define KEYCTX_TX_WR_AUTH_M 0x1ff
+#define KEYCTX_TX_WR_AUTH_V(x) ((x) << KEYCTX_TX_WR_AUTH_S)
+#define KEYCTX_TX_WR_AUTH_G(x) \
+ (((x) >> KEYCTX_TX_WR_AUTH_S) & KEYCTX_TX_WR_AUTH_M)
+
+#define KEYCTX_TX_WR_AUTHST_S 7
+#define KEYCTX_TX_WR_AUTHST_M 0x7f
+#define KEYCTX_TX_WR_AUTHST_V(x) ((x) << KEYCTX_TX_WR_AUTHST_S)
+#define KEYCTX_TX_WR_AUTHST_G(x) \
+ (((x) >> KEYCTX_TX_WR_AUTHST_S) & KEYCTX_TX_WR_AUTHST_M)
+
+#define KEYCTX_TX_WR_AUTHIN_S 0
+#define KEYCTX_TX_WR_AUTHIN_M 0x7f
+#define KEYCTX_TX_WR_AUTHIN_V(x) ((x) << KEYCTX_TX_WR_AUTHIN_S)
+#define KEYCTX_TX_WR_AUTHIN_G(x) \
+ (((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
+
+struct sge_opaque_hdr {
+ void *dev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
#define MAX_IVS_PAGE 256
#define TLS_KEY_CONTEXT_SZ 64
#define CIPHER_BLOCK_SIZE 16
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 05520dccd906..96d561653496 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -92,11 +92,13 @@ static void chtls_sock_release(struct kref *ref)
static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
struct sock *sk)
{
+ struct adapter *adap = pci_get_drvdata(cdev->pdev);
struct net_device *ndev = cdev->ports[0];
#if IS_ENABLED(CONFIG_IPV6)
struct net_device *temp;
int addr_type;
#endif
+ int i;
switch (sk->sk_family) {
case PF_INET:
@@ -127,8 +129,12 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
return NULL;
if (is_vlan_dev(ndev))
- return vlan_dev_real_dev(ndev);
- return ndev;
+ ndev = vlan_dev_real_dev(ndev);
+
+ for_each_port(adap, i)
+ if (cdev->ports[i] == ndev)
+ return ndev;
+ return NULL;
}
static void assign_rxopt(struct sock *sk, unsigned int opt)
@@ -206,7 +212,7 @@ static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
{
if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
__skb_trim(skb, 0);
- refcount_add(2, &skb->users);
+ refcount_inc(&skb->users);
} else {
skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
}
@@ -477,7 +483,6 @@ void chtls_destroy_sock(struct sock *sk)
chtls_purge_write_queue(sk);
free_tls_keyid(sk);
kref_put(&csk->kref, chtls_sock_release);
- csk->cdev = NULL;
if (sk->sk_family == AF_INET)
sk->sk_prot = &tcp_prot;
#if IS_ENABLED(CONFIG_IPV6)
@@ -736,14 +741,13 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET6) {
- struct chtls_sock *csk;
+ struct net_device *ndev = chtls_find_netdev(cdev, sk);
int addr_type = 0;
- csk = rcu_dereference_sk_user_data(sk);
addr_type = ipv6_addr_type((const struct in6_addr *)
&sk->sk_v6_rcv_saddr);
if (addr_type != IPV6_ADDR_ANY)
- cxgb4_clip_release(csk->egress_dev, (const u32 *)
+ cxgb4_clip_release(ndev, (const u32 *)
&sk->sk_v6_rcv_saddr, 1);
}
#endif
@@ -768,14 +772,13 @@ static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
@@ -792,15 +795,13 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
-
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static void chtls_purge_wr_queue(struct sock *sk)
@@ -1157,6 +1158,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
ndev = n->dev;
if (!ndev)
goto free_dst;
+ if (is_vlan_dev(ndev))
+ ndev = vlan_dev_real_dev(ndev);
+
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);
@@ -1507,7 +1511,6 @@ static void add_to_reap_list(struct sock *sk)
struct chtls_sock *csk = sk->sk_user_data;
local_bh_disable();
- bh_lock_sock(sk);
release_tcp_port(sk); /* release the port immediately */
spin_lock(&reap_list_lock);
@@ -1516,7 +1519,6 @@ static void add_to_reap_list(struct sock *sk)
if (!csk->passive_reap_next)
schedule_work(&reap_task);
spin_unlock(&reap_list_lock);
- bh_unlock_sock(sk);
local_bh_enable();
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index 47ba81e42f5d..47ba81e42f5d 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index f1820aca0d33..62c829023da5 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
@@ -383,6 +383,9 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen,
if (ret)
goto out_notcb;
+ if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
+ goto out_notcb;
+
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
csk->wr_credits -= DIV_ROUND_UP(len, 16);
csk->wr_unacked += DIV_ROUND_UP(len, 16);
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 2e9acae1cba3..188d871f6b8c 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -902,9 +902,9 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk,
return 0;
}
-static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
+static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
{
- return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
+ return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
}
static int csk_wait_memory(struct chtls_dev *cdev,
@@ -1240,6 +1240,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
copied = 0;
csk = rcu_dereference_sk_user_data(sk);
cdev = csk->cdev;
+ lock_sock(sk);
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
err = sk_stream_wait_connect(sk, &timeo);
@@ -1584,6 +1585,7 @@ skip_copy:
tp->urg_data = 0;
if ((avail + offset) >= skb->len) {
+ struct sk_buff *next_skb;
if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
tp->copied_seq += skb->len;
hws->rcvpld = skb->hdr_len;
@@ -1594,8 +1596,10 @@ skip_copy:
chtls_free_skb(sk, skb);
buffers_freed++;
hws->copied_seq = 0;
- if (copied >= target &&
- !skb_peek(&sk->sk_receive_queue))
+ next_skb = skb_peek(&sk->sk_receive_queue);
+ if (copied >= target && !next_skb)
+ break;
+ if (ULP_SKB_CB(next_skb)->flags & ULPCB_FLAG_TLS_HDR)
break;
}
} while (len > 0);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 66d247efd561..9098b3eed4da 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -638,4 +638,4 @@ module_exit(chtls_unregister);
MODULE_DESCRIPTION("Chelsio TLS Inline driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chelsio Communications");
-MODULE_VERSION(DRV_VERSION);
+MODULE_VERSION(CHTLS_DRV_VERSION);
diff --git a/drivers/net/ethernet/cirrus/cs89x0.h b/drivers/net/ethernet/cirrus/cs89x0.h
index 91423b70bb45..210f9ec9af4b 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.h
+++ b/drivers/net/ethernet/cirrus/cs89x0.h
@@ -459,7 +459,3 @@
#define PNP_CNF_INT 0x70
#define PNP_CNF_DMA 0x74
#define PNP_CNF_MEM 0x48
-
-#define BIT0 1
-#define BIT15 0x8000
-
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 18f3aeb88f22..c67a16a48d62 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -169,6 +169,7 @@ struct enic {
u16 num_vfs;
#endif
spinlock_t enic_api_lock;
+ bool enic_api_busy;
struct enic_port_profile *pp;
/* work queue cache line section */
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
index b161f24522b8..3bdc74fba1e3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_api.c
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright 2013 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
struct vnic_dev *vdev = enic->vdev;
spin_lock(&enic->enic_api_lock);
+ while (enic->enic_api_busy) {
+ spin_unlock(&enic->enic_api_lock);
+ cpu_relax();
+ spin_lock(&enic->enic_api_lock);
+ }
+
spin_lock_bh(&enic->devcmd_lock);
vnic_dev_cmd_proxy_by_index_start(vdev, vf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 4d8e0aa447fb..1a9803f2073e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright 2013 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -434,7 +434,6 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
break;
default:
return -EINVAL;
- break;
}
fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 552d89fdf54a..fb269d587b74 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -326,11 +326,11 @@ static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct enic *enic = vnic_dev_priv(wq->vdev);
if (buf->sop)
- pci_unmap_single(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
if (buf->os_buf)
dev_kfree_skb_any(buf->os_buf);
@@ -574,8 +574,8 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
dma_addr_t dma_addr;
int err = 0;
- dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
@@ -605,8 +605,8 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
dma_addr_t dma_addr;
int err = 0;
- dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
@@ -693,8 +693,9 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
*/
while (frag_len_left) {
len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
- dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev,
+ skb->data + offset, len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
@@ -752,8 +753,8 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
dma_addr_t dma_addr;
int err = 0;
- dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
@@ -1222,8 +1223,8 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
if (!buf->os_buf)
return;
- pci_unmap_single(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(buf->os_buf);
buf->os_buf = NULL;
}
@@ -1248,8 +1249,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
if (!skb)
return -ENOMEM;
- dma_addr = pci_map_single(enic->pdev, skb->data, len,
- PCI_DMA_FROMDEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
+ DMA_FROM_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr))) {
dev_kfree_skb(skb);
return -ENOMEM;
@@ -1281,8 +1282,8 @@ static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
new_skb = netdev_alloc_skb_ip_align(netdev, len);
if (!new_skb)
return false;
- pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
+ DMA_FROM_DEVICE);
memcpy(new_skb->data, (*skb)->data, len);
*skb = new_skb;
@@ -1331,8 +1332,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
enic->rq_truncated_pkts++;
}
- pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
buf->os_buf = NULL;
@@ -1346,8 +1347,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
buf->os_buf = NULL;
- pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
+ buf->len, DMA_FROM_DEVICE);
}
prefetch(skb->data - NET_IP_ALIGN);
@@ -1420,8 +1421,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
/* Buffer overflow
*/
- pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
buf->os_buf = NULL;
}
@@ -2106,8 +2107,6 @@ static int enic_dev_wait(struct vnic_dev *vdev,
int done;
int err;
- BUG_ON(in_interrupt());
-
err = start(vdev, arg);
if (err)
return err;
@@ -2178,9 +2177,9 @@ int __enic_set_rsskey(struct enic *enic)
dma_addr_t rss_key_buf_pa;
int i, kidx, bidx, err;
- rss_key_buf_va = pci_zalloc_consistent(enic->pdev,
- sizeof(union vnic_rss_key),
- &rss_key_buf_pa);
+ rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev,
+ sizeof(union vnic_rss_key),
+ &rss_key_buf_pa, GFP_ATOMIC);
if (!rss_key_buf_va)
return -ENOMEM;
@@ -2195,8 +2194,8 @@ int __enic_set_rsskey(struct enic *enic)
sizeof(union vnic_rss_key));
spin_unlock_bh(&enic->devcmd_lock);
- pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
- rss_key_buf_va, rss_key_buf_pa);
+ dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key),
+ rss_key_buf_va, rss_key_buf_pa);
return err;
}
@@ -2215,8 +2214,9 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
unsigned int i;
int err;
- rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
- sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
+ rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev,
+ sizeof(union vnic_rss_cpu),
+ &rss_cpu_buf_pa, GFP_ATOMIC);
if (!rss_cpu_buf_va)
return -ENOMEM;
@@ -2229,8 +2229,8 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
sizeof(union vnic_rss_cpu));
spin_unlock_bh(&enic->devcmd_lock);
- pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
- rss_cpu_buf_va, rss_cpu_buf_pa);
+ dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu),
+ rss_cpu_buf_va, rss_cpu_buf_pa);
return err;
}
@@ -2295,6 +2295,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
rss_hash_bits, rss_base_cpu, rss_enable);
}
+static void enic_set_api_busy(struct enic *enic, bool busy)
+{
+ spin_lock(&enic->enic_api_lock);
+ enic->enic_api_busy = busy;
+ spin_unlock(&enic->enic_api_lock);
+}
+
static void enic_reset(struct work_struct *work)
{
struct enic *enic = container_of(work, struct enic, reset);
@@ -2304,7 +2311,9 @@ static void enic_reset(struct work_struct *work)
rtnl_lock();
- spin_lock(&enic->enic_api_lock);
+ /* Stop any activity from infiniband */
+ enic_set_api_busy(enic, true);
+
enic_stop(enic->netdev);
enic_dev_soft_reset(enic);
enic_reset_addr_lists(enic);
@@ -2312,7 +2321,10 @@ static void enic_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
- spin_unlock(&enic->enic_api_lock);
+
+ /* Allow infiniband to fiddle with the device again */
+ enic_set_api_busy(enic, false);
+
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
@@ -2324,7 +2336,9 @@ static void enic_tx_hang_reset(struct work_struct *work)
rtnl_lock();
- spin_lock(&enic->enic_api_lock);
+ /* Stop any activity from infiniband */
+ enic_set_api_busy(enic, true);
+
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
@@ -2333,7 +2347,10 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
- spin_unlock(&enic->enic_api_lock);
+
+ /* Allow infiniband to fiddle with the device again */
+ enic_set_api_busy(enic, false);
+
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
@@ -2527,13 +2544,15 @@ static void enic_dev_deinit(struct enic *enic)
{
unsigned int i;
- for (i = 0; i < enic->rq_count; i++) {
- napi_hash_del(&enic->napi[i]);
- netif_napi_del(&enic->napi[i]);
- }
+ for (i = 0; i < enic->rq_count; i++)
+ __netif_napi_del(&enic->napi[i]);
+
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
for (i = 0; i < enic->wq_count; i++)
- netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
+ __netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
+
+ /* observe RCU grace period after __netif_napi_del() calls */
+ synchronize_net();
enic_free_vnic_resources(enic);
enic_clear_intr_mode(enic);
@@ -2699,21 +2718,21 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "No usable DMA configuration, aborting\n");
goto err_out_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
"for consistent allocations, aborting\n", 32);
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
"for consistent allocations, aborting\n", 47);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 901e44b0b795..45015931b335 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -193,9 +193,10 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
{
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
- ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
- ring->size_unaligned,
- &ring->base_addr_unaligned);
+ ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
+ ring->size_unaligned,
+ &ring->base_addr_unaligned,
+ GFP_KERNEL);
if (!ring->descs_unaligned) {
vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n",
@@ -218,10 +219,9 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
- pci_free_consistent(vdev->pdev,
- ring->size_unaligned,
- ring->descs_unaligned,
- ring->base_addr_unaligned);
+ dma_free_coherent(&vdev->pdev->dev, ring->size_unaligned,
+ ring->descs_unaligned,
+ ring->base_addr_unaligned);
ring->descs = NULL;
}
}
@@ -551,9 +551,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0;
if (!vdev->fw_info) {
- vdev->fw_info = pci_zalloc_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_fw_info),
- &vdev->fw_info_pa);
+ vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_devcmd_fw_info),
+ &vdev->fw_info_pa, GFP_ATOMIC);
if (!vdev->fw_info)
return -ENOMEM;
@@ -603,8 +603,9 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int wait = 1000;
if (!vdev->stats) {
- vdev->stats = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_stats), &vdev->stats_pa);
+ vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_stats),
+ &vdev->stats_pa, GFP_ATOMIC);
if (!vdev->stats)
return -ENOMEM;
}
@@ -852,9 +853,9 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
return -EINVAL;
}
- notify_addr = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_notify),
- &notify_pa);
+ notify_addr = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_devcmd_notify),
+ &notify_pa, GFP_ATOMIC);
if (!notify_addr)
return -ENOMEM;
@@ -882,10 +883,9 @@ static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
int vnic_dev_notify_unset(struct vnic_dev *vdev)
{
if (vdev->notify) {
- pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_notify),
- vdev->notify,
- vdev->notify_pa);
+ dma_free_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify, vdev->notify_pa);
}
return vnic_dev_notify_unsetcmd(vdev);
@@ -1046,18 +1046,17 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
- pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_notify),
- vdev->notify,
- vdev->notify_pa);
+ dma_free_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify, vdev->notify_pa);
if (vdev->stats)
- pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_stats),
- vdev->stats, vdev->stats_pa);
+ dma_free_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_stats),
+ vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
- pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_fw_info),
- vdev->fw_info, vdev->fw_info_pa);
+ dma_free_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_devcmd_fw_info),
+ vdev->fw_info, vdev->fw_info_pa);
if (vdev->devcmd2)
vnic_dev_deinit_devcmd2(vdev);
@@ -1127,7 +1126,7 @@ int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
void *prov_buf;
int ret;
- prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
+ prov_buf = dma_alloc_coherent(&vdev->pdev->dev, len, &prov_pa, GFP_ATOMIC);
if (!prov_buf)
return -ENOMEM;
@@ -1137,7 +1136,7 @@ int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
- pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
+ dma_free_coherent(&vdev->pdev->dev, len, prov_buf, prov_pa);
return ret;
}
@@ -1217,7 +1216,8 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
tlv_size = sizeof(struct filter) +
sizeof(struct filter_action) +
2 * sizeof(struct filter_tlv);
- tlv_va = pci_alloc_consistent(vdev->pdev, tlv_size, &tlv_pa);
+ tlv_va = dma_alloc_coherent(&vdev->pdev->dev, tlv_size,
+ &tlv_pa, GFP_ATOMIC);
if (!tlv_va)
return -ENOMEM;
tlv = tlv_va;
@@ -1240,7 +1240,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
*entry = (u16)a0;
- pci_free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
+ dma_free_coherent(&vdev->pdev->dev, tlv_size, tlv_va, tlv_pa);
} else if (cmd == CLSF_DEL) {
a0 = *entry;
ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index ffec0f3dd957..8df6f081f244 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -85,6 +85,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/**
* struct gmac_queue_page - page buffer per-page info
+ * @page: the page struct
+ * @mapping: the dma address handle
*/
struct gmac_queue_page {
struct page *page;
@@ -509,7 +511,6 @@ static int gmac_init(struct net_device *netdev)
.rel_threshold = 0,
} };
union gmac_config0 tmp;
- u32 val;
config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu);
tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
@@ -519,7 +520,7 @@ static int gmac_init(struct net_device *netdev)
writel(config2.bits32, port->gmac_base + GMAC_CONFIG2);
writel(config3.bits32, port->gmac_base + GMAC_CONFIG3);
- val = readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
+ readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG);
writel(hw_weigh.bits32,
@@ -539,12 +540,6 @@ static int gmac_init(struct net_device *netdev)
return 0;
}
-static void gmac_uninit(struct net_device *netdev)
-{
- if (netdev->phydev)
- phy_disconnect(netdev->phydev);
-}
-
static int gmac_setup_txqs(struct net_device *netdev)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
@@ -1768,15 +1763,6 @@ static int gmac_open(struct net_device *netdev)
struct gemini_ethernet_port *port = netdev_priv(netdev);
int err;
- if (!netdev->phydev) {
- err = gmac_setup_phy(netdev);
- if (err) {
- netif_err(port, ifup, netdev,
- "PHY init failed: %d\n", err);
- return err;
- }
- }
-
err = request_irq(netdev->irq, gmac_irq,
IRQF_SHARED, netdev->name, netdev);
if (err) {
@@ -2122,9 +2108,8 @@ static void gmac_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *rp)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
- union gmac_config0 config0;
- config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+ readl(port->gmac_base + GMAC_CONFIG0);
rp->rx_max_pending = 1 << 15;
rp->rx_mini_max_pending = 0;
@@ -2209,7 +2194,6 @@ static void gmac_get_drvinfo(struct net_device *netdev,
static const struct net_device_ops gmac_351x_ops = {
.ndo_init = gmac_init,
- .ndo_uninit = gmac_uninit,
.ndo_open = gmac_open,
.ndo_stop = gmac_stop,
.ndo_start_xmit = gmac_start_xmit,
@@ -2295,8 +2279,10 @@ static irqreturn_t gemini_port_irq(int irq, void *data)
static void gemini_port_remove(struct gemini_ethernet_port *port)
{
- if (port->netdev)
+ if (port->netdev) {
+ phy_disconnect(port->netdev->phydev);
unregister_netdev(port->netdev);
+ }
clk_disable_unprepare(port->pclk);
geth_cleanup_freeq(port->geth);
}
@@ -2505,6 +2491,13 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
if (ret)
goto unprepare;
+ ret = gmac_setup_phy(netdev);
+ if (ret) {
+ netdev_err(netdev,
+ "PHY init failed\n");
+ goto unprepare;
+ }
+
ret = register_netdev(netdev);
if (ret)
goto unprepare;
@@ -2513,10 +2506,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
"irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
port->irq, &dmares->start,
&gmacres->start);
- ret = gmac_setup_phy(netdev);
- if (ret)
- netdev_info(netdev,
- "PHY init failed, deferring to ifup time\n");
return 0;
unprepare:
@@ -2529,6 +2518,7 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
+
return 0;
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 2610efe4f873..d9f6c19940ef 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -443,21 +443,23 @@ static void de_rx (struct de_private *de)
}
if (!copying_skb) {
- pci_unmap_single(de->pdev, mapping,
- buflen, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&de->pdev->dev, mapping, buflen,
+ DMA_FROM_DEVICE);
skb_put(skb, len);
mapping =
de->rx_skb[rx_tail].mapping =
- pci_map_single(de->pdev, copy_skb->data,
- buflen, PCI_DMA_FROMDEVICE);
+ dma_map_single(&de->pdev->dev, copy_skb->data,
+ buflen, DMA_FROM_DEVICE);
de->rx_skb[rx_tail].skb = copy_skb;
} else {
- pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&de->pdev->dev, mapping, len,
+ DMA_FROM_DEVICE);
skb_reserve(copy_skb, RX_OFFSET);
skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
len);
- pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&de->pdev->dev, mapping,
+ len, DMA_FROM_DEVICE);
/* We'll reuse the original ring buffer. */
skb = copy_skb;
@@ -554,13 +556,15 @@ static void de_tx (struct de_private *de)
goto next;
if (unlikely(skb == DE_SETUP_SKB)) {
- pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
- sizeof(de->setup_frame), PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->tx_skb[tx_tail].mapping,
+ sizeof(de->setup_frame),
+ DMA_TO_DEVICE);
goto next;
}
- pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev, de->tx_skb[tx_tail].mapping,
+ skb->len, DMA_TO_DEVICE);
if (status & LastFrag) {
if (status & TxError) {
@@ -620,7 +624,8 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
txd = &de->tx_ring[entry];
len = skb->len;
- mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&de->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
if (entry == (DE_TX_RING_SIZE - 1))
flags |= RingEnd;
if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
@@ -763,8 +768,8 @@ static void __de_set_rx_mode (struct net_device *dev)
de->tx_skb[entry].skb = DE_SETUP_SKB;
de->tx_skb[entry].mapping = mapping =
- pci_map_single (de->pdev, de->setup_frame,
- sizeof (de->setup_frame), PCI_DMA_TODEVICE);
+ dma_map_single(&de->pdev->dev, de->setup_frame,
+ sizeof(de->setup_frame), DMA_TO_DEVICE);
/* Put the setup frame on the Tx list. */
txd = &de->tx_ring[entry];
@@ -1279,8 +1284,10 @@ static int de_refill_rx (struct de_private *de)
if (!skb)
goto err_out;
- de->rx_skb[i].mapping = pci_map_single(de->pdev,
- skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ de->rx_skb[i].mapping = dma_map_single(&de->pdev->dev,
+ skb->data,
+ de->rx_buf_sz,
+ DMA_FROM_DEVICE);
de->rx_skb[i].skb = skb;
de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
@@ -1313,7 +1320,8 @@ static int de_init_rings (struct de_private *de)
static int de_alloc_rings (struct de_private *de)
{
- de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
+ de->rx_ring = dma_alloc_coherent(&de->pdev->dev, DE_RING_BYTES,
+ &de->ring_dma, GFP_KERNEL);
if (!de->rx_ring)
return -ENOMEM;
de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
@@ -1333,8 +1341,9 @@ static void de_clean_rings (struct de_private *de)
for (i = 0; i < DE_RX_RING_SIZE; i++) {
if (de->rx_skb[i].skb) {
- pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
- de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->rx_skb[i].mapping, de->rx_buf_sz,
+ DMA_FROM_DEVICE);
dev_kfree_skb(de->rx_skb[i].skb);
}
}
@@ -1344,15 +1353,15 @@ static void de_clean_rings (struct de_private *de)
if ((skb) && (skb != DE_DUMMY_SKB)) {
if (skb != DE_SETUP_SKB) {
de->dev->stats.tx_dropped++;
- pci_unmap_single(de->pdev,
- de->tx_skb[i].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->tx_skb[i].mapping,
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
} else {
- pci_unmap_single(de->pdev,
- de->tx_skb[i].mapping,
- sizeof(de->setup_frame),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->tx_skb[i].mapping,
+ sizeof(de->setup_frame),
+ DMA_TO_DEVICE);
}
}
}
@@ -1364,7 +1373,8 @@ static void de_clean_rings (struct de_private *de)
static void de_free_rings (struct de_private *de)
{
de_clean_rings(de);
- pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
+ dma_free_coherent(&de->pdev->dev, DE_RING_BYTES, de->rx_ring,
+ de->ring_dma);
de->rx_ring = NULL;
de->tx_ring = NULL;
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index f9dd1aa9f2da..683e328b5461 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -4925,11 +4925,11 @@ mii_get_oui(u_char phyaddr, u_long ioaddr)
u_char breg[2];
} a;
int i, r2, r3, ret=0;*/
- int r2, r3;
+ int r2;
/* Read r2 and r3 */
r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
- r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
+ mii_rd(MII_ID1, phyaddr, ioaddr);
/* SEEQ and Cypress way * /
/ * Shuffle r2 and r3 * /
a.reg=0;
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index c3b4abff48b5..87a27fe2992d 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -380,7 +380,7 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_warn("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
@@ -422,15 +422,17 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
db = netdev_priv(dev);
/* Allocate Tx/Rx descriptor memory */
- db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
- DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+ db->desc_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ &db->desc_pool_dma_ptr, GFP_KERNEL);
if (!db->desc_pool_ptr) {
err = -ENOMEM;
goto err_out_res;
}
- db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
- TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+ db->buf_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ &db->buf_pool_dma_ptr, GFP_KERNEL);
if (!db->buf_pool_ptr) {
err = -ENOMEM;
goto err_out_free_desc;
@@ -492,11 +494,12 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_unmap:
pci_iounmap(pdev, db->ioaddr);
err_out_free_buf:
- pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
err_out_free_desc:
- pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
- db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
err_out_res:
pci_release_regions(pdev);
err_out_disable:
@@ -519,11 +522,12 @@ static void dmfe_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
pci_iounmap(db->pdev, db->ioaddr);
- pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
- DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
- db->desc_pool_dma_ptr);
- pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev,
+ TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
free_netdev(dev); /* free board information */
}
@@ -955,8 +959,8 @@ static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
db->rx_avail_cnt--;
db->interval_rx_cnt++;
- pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
- RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&db->pdev->dev, le32_to_cpu(rxptr->rdes2),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE);
if ( (rdes0 & 0x300) != 0x300) {
/* A packet without First/Last flag */
@@ -1329,8 +1333,8 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
rxptr->rx_skb_ptr = skb;
- rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
- skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
db->rx_avail_cnt++;
@@ -1544,8 +1548,8 @@ static void allocate_rx_buffer(struct net_device *dev)
if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
- rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
- RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
rxptr = rxptr->next_rx_desc;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index c1ca0765d56d..54560f9a1651 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -74,8 +74,8 @@ int tulip_refill_rx(struct net_device *dev)
if (skb == NULL)
break;
- mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&tp->pdev->dev, skb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
if (dma_mapping_error(&tp->pdev->dev, mapping)) {
dev_kfree_skb(skb);
tp->rx_buffers[entry].skb = NULL;
@@ -210,9 +210,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
if (pkt_len < tulip_rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
#if ! defined(__alpha__)
skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
pkt_len);
@@ -222,9 +223,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
tp->rx_buffers[entry].skb->data,
pkt_len);
#endif
- pci_dma_sync_single_for_device(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
} else { /* Pass up the skb already on the Rx ring. */
char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
pkt_len);
@@ -240,8 +242,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
}
#endif
- pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
tp->rx_buffers[entry].skb = NULL;
tp->rx_buffers[entry].mapping = 0;
@@ -436,9 +440,10 @@ static int tulip_rx(struct net_device *dev)
if (pkt_len < tulip_rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
#if ! defined(__alpha__)
skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
pkt_len);
@@ -448,9 +453,10 @@ static int tulip_rx(struct net_device *dev)
tp->rx_buffers[entry].skb->data,
pkt_len);
#endif
- pci_dma_sync_single_for_device(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
} else { /* Pass up the skb already on the Rx ring. */
char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
pkt_len);
@@ -466,8 +472,9 @@ static int tulip_rx(struct net_device *dev)
}
#endif
- pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
tp->rx_buffers[entry].skb = NULL;
tp->rx_buffers[entry].mapping = 0;
@@ -597,10 +604,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
if (tp->tx_buffers[entry].skb == NULL) {
/* test because dummy frames not mapped */
if (tp->tx_buffers[entry].mapping)
- pci_unmap_single(tp->pdev,
- tp->tx_buffers[entry].mapping,
- sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ DMA_TO_DEVICE);
continue;
}
@@ -629,9 +636,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
dev->stats.tx_packets++;
}
- pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
tp->tx_buffers[entry].skb->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
/* Free the original skb. */
dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index dcf21a36a9cf..011604787b8e 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -319,13 +319,8 @@ void tulip_select_media(struct net_device *dev, int startup)
break;
}
case 5: case 6: {
- u16 setup[5];
-
new_csr6 = 0; /* FIXME */
- for (i = 0; i < 5; i++)
- setup[i] = get_u16(&p[i*2 + 1]);
-
if (startup && mtable->has_reset) {
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3a8659c5da06..e7b0d7de40fd 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -350,9 +350,9 @@ static void tulip_up(struct net_device *dev)
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
- mapping = pci_map_single(tp->pdev, tp->setup_frame,
+ mapping = dma_map_single(&tp->pdev->dev, tp->setup_frame,
sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tp->tx_buffers[tp->cur_tx].skb = NULL;
tp->tx_buffers[tp->cur_tx].mapping = mapping;
@@ -630,8 +630,8 @@ static void tulip_init_ring(struct net_device *dev)
tp->rx_buffers[i].skb = skb;
if (skb == NULL)
break;
- mapping = pci_map_single(tp->pdev, skb->data,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&tp->pdev->dev, skb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
tp->rx_buffers[i].mapping = mapping;
tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
@@ -664,8 +664,8 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tp->cur_tx % TX_RING_SIZE;
tp->tx_buffers[entry].skb = skb;
- mapping = pci_map_single(tp->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
tp->tx_buffers[entry].mapping = mapping;
tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
@@ -716,16 +716,17 @@ static void tulip_clean_tx_ring(struct tulip_private *tp)
if (tp->tx_buffers[entry].skb == NULL) {
/* test because dummy frames not mapped */
if (tp->tx_buffers[entry].mapping)
- pci_unmap_single(tp->pdev,
- tp->tx_buffers[entry].mapping,
- sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ DMA_TO_DEVICE);
continue;
}
- pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
- tp->tx_buffers[entry].skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
+ tp->tx_buffers[entry].skb->len,
+ DMA_TO_DEVICE);
/* Free the original skb. */
dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
@@ -795,8 +796,8 @@ static void tulip_free_ring (struct net_device *dev)
/* An invalid address. */
tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
if (skb) {
- pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
dev_kfree_skb (skb);
}
}
@@ -805,8 +806,9 @@ static void tulip_free_ring (struct net_device *dev)
struct sk_buff *skb = tp->tx_buffers[i].skb;
if (skb != NULL) {
- pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[i].mapping, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb (skb);
}
tp->tx_buffers[i].skb = NULL;
@@ -1149,9 +1151,10 @@ static void set_rx_mode(struct net_device *dev)
tp->tx_buffers[entry].skb = NULL;
tp->tx_buffers[entry].mapping =
- pci_map_single(tp->pdev, tp->setup_frame,
+ dma_map_single(&tp->pdev->dev,
+ tp->setup_frame,
sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
/* Put the setup frame on the Tx list. */
if (entry == TX_RING_SIZE-1)
tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
@@ -1422,10 +1425,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp = netdev_priv(dev);
tp->dev = dev;
- tp->rx_ring = pci_alloc_consistent(pdev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- &tp->rx_ring_dma);
+ tp->rx_ring = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ &tp->rx_ring_dma, GFP_KERNEL);
if (!tp->rx_ring)
goto err_out_mtable;
tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
@@ -1757,10 +1760,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_free_ring:
- pci_free_consistent (pdev,
- sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
err_out_mtable:
kfree (tp->mtable);
@@ -1878,10 +1881,10 @@ static void tulip_remove_one(struct pci_dev *pdev)
tp = netdev_priv(dev);
unregister_netdev(dev);
- pci_free_consistent (pdev,
- sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
kfree (tp->mtable);
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index f942399f0f32..13e73ed15ef0 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -282,7 +282,7 @@ static int uli526x_init_one(struct pci_dev *pdev,
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_warn("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
@@ -317,11 +317,15 @@ static int uli526x_init_one(struct pci_dev *pdev,
/* Allocate Tx/Rx descriptor memory */
err = -ENOMEM;
- db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+ db->desc_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ &db->desc_pool_dma_ptr, GFP_KERNEL);
if (!db->desc_pool_ptr)
goto err_out_release;
- db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+ db->buf_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ &db->buf_pool_dma_ptr, GFP_KERNEL);
if (!db->buf_pool_ptr)
goto err_out_free_tx_desc;
@@ -401,11 +405,12 @@ static int uli526x_init_one(struct pci_dev *pdev,
err_out_unmap:
pci_iounmap(pdev, db->ioaddr);
err_out_free_tx_buf:
- pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
err_out_free_tx_desc:
- pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
- db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
err_out_release:
pci_release_regions(pdev);
err_out_disable:
@@ -424,11 +429,11 @@ static void uli526x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
pci_iounmap(pdev, db->ioaddr);
- pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
- DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
- db->desc_pool_dma_ptr);
- pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(dev);
@@ -810,7 +815,8 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
db->rx_avail_cnt--;
db->interval_rx_cnt++;
- pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&db->pdev->dev, le32_to_cpu(rxptr->rdes2),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE);
if ( (rdes0 & 0x300) != 0x300) {
/* A packet without First/Last flag */
/* reuse this SKB */
@@ -1234,10 +1240,8 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
rxptr->rx_skb_ptr = skb;
- rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
- skb_tail_pointer(skb),
- RX_ALLOC_SIZE,
- PCI_DMA_FROMDEVICE));
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb_tail_pointer(skb),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
db->rx_avail_cnt++;
@@ -1409,10 +1413,8 @@ static void allocate_rx_buffer(struct net_device *dev)
if (skb == NULL)
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
- rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
- skb_tail_pointer(skb),
- RX_ALLOC_SIZE,
- PCI_DMA_FROMDEVICE));
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb_tail_pointer(skb),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
rxptr = rxptr->next_rx_desc;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 5a43be327f58..89cbdc1f4857 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -364,7 +364,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
irq = pdev->irq;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_warn("Device %s disabled due to DMA limitations\n",
pci_name(pdev));
return -EIO;
@@ -630,9 +630,10 @@ static int netdev_open(struct net_device *dev)
goto out_err;
if (debug > 1)
- netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
+ netdev_dbg(dev, "%s() irq %d\n", __func__, irq);
- if((i=alloc_ringdesc(dev)))
+ i = alloc_ringdesc(dev);
+ if (i)
goto out_err;
spin_lock_irq(&np->lock);
@@ -642,7 +643,7 @@ static int netdev_open(struct net_device *dev)
netif_start_queue(dev);
if (debug > 2)
- netdev_dbg(dev, "Done netdev_open()\n");
+ netdev_dbg(dev, "Done %s()\n", __func__);
/* Set the timer to check for link beat. */
timer_setup(&np->timer, netdev_timer, 0);
@@ -802,8 +803,9 @@ static void init_rxtx_rings(struct net_device *dev)
np->rx_skbuff[i] = skb;
if (skb == NULL)
break;
- np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
- np->rx_buf_sz,PCI_DMA_FROMDEVICE);
+ np->rx_addr[i] = dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
np->rx_ring[i].buffer1 = np->rx_addr[i];
np->rx_ring[i].status = DescOwned;
@@ -833,20 +835,17 @@ static void free_rxtx_rings(struct netdev_private* np)
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].status = 0;
if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->rx_addr[i],
- np->rx_skbuff[i]->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->rx_addr[i],
+ np->rx_skbuff[i]->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_skbuff[i]);
}
np->rx_skbuff[i] = NULL;
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (np->tx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->tx_addr[i],
- np->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_addr[i],
+ np->tx_skbuff[i]->len, DMA_TO_DEVICE);
dev_kfree_skb(np->tx_skbuff[i]);
}
np->tx_skbuff[i] = NULL;
@@ -964,10 +963,10 @@ static int alloc_ringdesc(struct net_device *dev)
np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
- np->rx_ring = pci_alloc_consistent(np->pci_dev,
- sizeof(struct w840_rx_desc)*RX_RING_SIZE +
- sizeof(struct w840_tx_desc)*TX_RING_SIZE,
- &np->ring_dma_addr);
+ np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
+ sizeof(struct w840_rx_desc) * RX_RING_SIZE +
+ sizeof(struct w840_tx_desc) * TX_RING_SIZE,
+ &np->ring_dma_addr, GFP_KERNEL);
if(!np->rx_ring)
return -ENOMEM;
init_rxtx_rings(dev);
@@ -976,10 +975,10 @@ static int alloc_ringdesc(struct net_device *dev)
static void free_ringdesc(struct netdev_private *np)
{
- pci_free_consistent(np->pci_dev,
- sizeof(struct w840_rx_desc)*RX_RING_SIZE +
- sizeof(struct w840_tx_desc)*TX_RING_SIZE,
- np->rx_ring, np->ring_dma_addr);
+ dma_free_coherent(&np->pci_dev->dev,
+ sizeof(struct w840_rx_desc) * RX_RING_SIZE +
+ sizeof(struct w840_tx_desc) * TX_RING_SIZE,
+ np->rx_ring, np->ring_dma_addr);
}
@@ -994,8 +993,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
/* Calculate the next Tx descriptor entry. */
entry = np->cur_tx % TX_RING_SIZE;
- np->tx_addr[entry] = pci_map_single(np->pci_dev,
- skb->data,skb->len, PCI_DMA_TODEVICE);
+ np->tx_addr[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
np->tx_skbuff[entry] = skb;
np->tx_ring[entry].buffer1 = np->tx_addr[entry];
@@ -1078,9 +1077,8 @@ static void netdev_tx_done(struct net_device *dev)
np->stats.tx_packets++;
}
/* Free the original skb. */
- pci_unmap_single(np->pci_dev,np->tx_addr[entry],
- np->tx_skbuff[entry]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_addr[entry],
+ np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
np->tx_q_bytes -= np->tx_skbuff[entry]->len;
dev_kfree_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
@@ -1217,18 +1215,21 @@ static int netdev_rx(struct net_device *dev)
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
- np->rx_skbuff[entry]->len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
- np->rx_skbuff[entry]->len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(np->pci_dev,np->rx_addr[entry],
- np->rx_skbuff[entry]->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
}
@@ -1258,9 +1259,10 @@ static int netdev_rx(struct net_device *dev)
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- np->rx_addr[entry] = pci_map_single(np->pci_dev,
- skb->data,
- np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ np->rx_addr[entry] = dma_map_single(&np->pci_dev->dev,
+ skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
np->rx_ring[entry].buffer1 = np->rx_addr[entry];
}
wmb();
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index be6d8a9ada27..734acb834c98 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -7,7 +7,6 @@
*/
-#define DRV_NAME "DL2000/TC902x-based linux driver"
#include "dl2k.h"
#include <linux/dma-mapping.h>
@@ -223,13 +222,15 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata (pdev, dev);
- ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_iounmap;
np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
- ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_unmap_tx;
np->rx_ring = ring_space;
@@ -280,9 +281,11 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_unmap_rx:
- pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
err_out_unmap_tx:
- pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
err_out_iounmap:
#ifdef MEM_MAPPING
pci_iounmap(pdev, np->ioaddr);
@@ -436,8 +439,9 @@ static void free_list(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
skb = np->rx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]),
- skb->len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(&np->rx_ring[i]),
+ skb->len, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
}
@@ -447,8 +451,9 @@ static void free_list(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(&np->tx_ring[i]),
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
np->tx_skbuff[i] = NULL;
}
@@ -505,9 +510,8 @@ static int alloc_list(struct net_device *dev)
sizeof(struct netdev_desc));
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
- cpu_to_le64(pci_map_single(
- np->pdev, skb->data, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
@@ -673,9 +677,8 @@ rio_timer (struct timer_list *t)
}
np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo =
- cpu_to_le64 (pci_map_single
- (np->pdev, skb->data, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
}
np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48);
@@ -729,9 +732,8 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
((u64)np->vlan << 32) |
((u64)skb->priority << 45);
}
- txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
- skb->len,
- PCI_DMA_TODEVICE));
+ txdesc->fraginfo = cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE));
txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
/* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
@@ -828,9 +830,9 @@ rio_free_tx (struct net_device *dev, int irq)
if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
break;
skb = np->tx_skbuff[entry];
- pci_unmap_single (np->pdev,
- desc_to_dma(&np->tx_ring[entry]),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(&np->tx_ring[entry]), skb->len,
+ DMA_TO_DEVICE);
if (irq)
dev_consume_skb_irq(skb);
else
@@ -950,25 +952,25 @@ receive_packet (struct net_device *dev)
/* Small skbuffs for short packets */
if (pkt_len > copy_thresh) {
- pci_unmap_single (np->pdev,
- desc_to_dma(desc),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
- pci_dma_sync_single_for_cpu(np->pdev,
- desc_to_dma(desc),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pdev->dev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data (skb,
np->rx_skbuff[entry]->data,
pkt_len);
skb_put (skb, pkt_len);
- pci_dma_sync_single_for_device(np->pdev,
- desc_to_dma(desc),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pdev->dev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
}
skb->protocol = eth_type_trans (skb, dev);
#if 0
@@ -1001,9 +1003,8 @@ receive_packet (struct net_device *dev)
}
np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo =
- cpu_to_le64 (pci_map_single
- (np->pdev, skb->data, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
}
np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48);
@@ -1797,10 +1798,10 @@ rio_remove1 (struct pci_dev *pdev)
struct netdev_private *np = netdev_priv(dev);
unregister_netdev (dev);
- pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
- np->rx_ring_dma);
- pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
- np->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
#ifdef MEM_MAPPING
pci_iounmap(pdev, np->ioaddr);
#endif
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index b3f8597e77aa..e3a8858915b3 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -367,6 +367,7 @@ struct netdev_private {
dma_addr_t tx_ring_dma;
dma_addr_t rx_ring_dma;
struct timer_list timer; /* Media monitoring timer. */
+ struct net_device *ndev; /* backpointer */
/* ethtool extra stats */
struct {
u64 tx_multiple_collisions;
@@ -429,8 +430,8 @@ static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static int reset_tx (struct net_device *dev);
static irqreturn_t intr_handler(int irq, void *dev_instance);
-static void rx_poll(unsigned long data);
-static void tx_poll(unsigned long data);
+static void rx_poll(struct tasklet_struct *t);
+static void tx_poll(struct tasklet_struct *t);
static void refill_rx (struct net_device *dev);
static void netdev_error(struct net_device *dev, int intr_status);
static void netdev_error(struct net_device *dev, int intr_status);
@@ -531,14 +532,15 @@ static int sundance_probe1(struct pci_dev *pdev,
cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
np = netdev_priv(dev);
+ np->ndev = dev;
np->base = ioaddr;
np->pci_dev = pdev;
np->chip_id = chip_idx;
np->msg_enable = (1 << debug) - 1;
spin_lock_init(&np->lock);
spin_lock_init(&np->statlock);
- tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
- tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
+ tasklet_setup(&np->rx_tasklet, rx_poll);
+ tasklet_setup(&np->tx_tasklet, tx_poll);
ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
&ring_dma, GFP_KERNEL);
@@ -1054,10 +1056,9 @@ static void init_ring(struct net_device *dev)
}
}
-static void tx_poll (unsigned long data)
+static void tx_poll(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
unsigned head = np->cur_task % TX_RING_SIZE;
struct netdev_desc *txdesc =
&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
@@ -1312,10 +1313,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
return IRQ_RETVAL(handled);
}
-static void rx_poll(unsigned long data)
+static void rx_poll(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
+ struct net_device *dev = np->ndev;
int entry = np->cur_rx % RX_RING_SIZE;
int boguscnt = np->budget;
void __iomem *ioaddr = np->base;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index db98274501a0..48c6eb142dcc 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -507,23 +507,20 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dnet *bp = netdev_priv(dev);
- u32 tx_status, irq_enable;
- unsigned int len, i, tx_cmd, wrsz;
+ unsigned int i, tx_cmd, wrsz;
unsigned long flags;
unsigned int *bufp;
+ u32 irq_enable;
- tx_status = dnet_readl(bp, TX_STATUS);
+ dnet_readl(bp, TX_STATUS);
pr_debug("start_xmit: len %u head %p data %p\n",
skb->len, skb->head, skb->data);
dnet_print_skb(skb);
- /* frame size (words) */
- len = (skb->len + 3) >> 2;
-
spin_lock_irqsave(&bp->lock, flags);
- tx_status = dnet_readl(bp, TX_STATUS);
+ dnet_readl(bp, TX_STATUS);
bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
wrsz = (u32) skb->len + 3;
@@ -545,7 +542,7 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
netif_stop_queue(dev);
- tx_status = dnet_readl(bp, INTR_SRC);
+ dnet_readl(bp, INTR_SRC);
irq_enable = dnet_readl(bp, INTR_ENB);
irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
dnet_writel(bp, irq_enable, INTR_ENB);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index a817ca661c1f..0981fe9652e5 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -177,6 +177,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* struct ethoc - driver-private device structure
* @iobase: pointer to I/O memory region
* @membase: pointer to buffer memory region
+ * @big_endian: just big or little (endian)
* @num_bd: number of buffer descriptors
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
@@ -189,7 +190,10 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @msg_enable: device state flags
* @lock: device lock
* @mdio: MDIO bus for PHY access
+ * @clk: clock
* @phy_id: address of attached PHY
+ * @old_link: previous link info
+ * @old_duplex: previous duplex info
*/
struct ethoc {
void __iomem *iobase;
@@ -1015,7 +1019,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
/**
* ethoc_probe - initialize OpenCores ethernet MAC
- * pdev: platform device
+ * @pdev: platform device
*/
static int ethoc_probe(struct platform_device *pdev)
{
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 87236206366f..00024dd41147 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1817,6 +1817,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
+ /* Disable ast2600 problematic HW arbitration */
+ if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
+ iowrite32(FTGMAC100_TM_DEFAULT,
+ priv->base + FTGMAC100_OFFSET_TM);
+ }
} else {
priv->rxdes0_edorr_mask = BIT(15);
priv->txdes0_edotr_mask = BIT(15);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index e5876a3fda91..63b3e02fab16 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -170,6 +170,14 @@
#define FTGMAC100_MACCR_SW_RST (1 << 31)
/*
+ * test mode control register
+ */
+#define FTGMAC100_TM_RQ_TX_VALID_DIS (1 << 28)
+#define FTGMAC100_TM_RQ_RR_IDLE_PREV (1 << 27)
+#define FTGMAC100_TM_DEFAULT \
+ (FTGMAC100_TM_RQ_TX_VALID_DIS | FTGMAC100_TM_RQ_RR_IDLE_PREV)
+
+/*
* PHY control register
*/
#define FTGMAC100_PHYCR_MDC_CYCTHR_MASK 0x3f
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index fdff3b4723ba..d9c285948fc2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
NETIF_MSG_LINK | NETIF_MSG_IFUP | \
- NETIF_MSG_IFDOWN)
+ NETIF_MSG_IFDOWN | NETIF_MSG_HW)
#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
/* Ingress congestion threshold on FMan ports
@@ -174,12 +174,17 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
#define DPAA_TIME_STAMP_SIZE 8
#define DPAA_HASH_RESULTS_SIZE 8
+#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
+ + DPAA_HASH_RESULTS_SIZE)
+#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
+ dpaa_rx_extra_headroom)
#ifdef CONFIG_DPAA_ERRATUM_A050385
-#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\
- + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE))
+#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
+#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
+ DPAA_RX_PRIV_DATA_A050385_SIZE : \
+ DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
#else
-#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
- dpaa_rx_extra_headroom)
+#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
#endif
#define DPAA_ETH_PCD_RXQ_NUM 128
@@ -2840,7 +2845,8 @@ out_error:
return err;
}
-static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
+static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
+ enum port_type port)
{
u16 headroom;
@@ -2854,10 +2860,12 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
*
* Also make sure the headroom is a multiple of data_align bytes
*/
- headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
- DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
+ headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
- return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
+ if (port == RX)
+ return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
+ else
+ return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
}
static int dpaa_eth_probe(struct platform_device *pdev)
@@ -3025,8 +3033,8 @@ static int dpaa_eth_probe(struct platform_device *pdev)
goto free_dpaa_fqs;
}
- priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
- priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
+ priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
+ priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
/* All real interfaces need their ports initialized */
err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index feea797cde02..cfd369cf4c8c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -3,6 +3,7 @@ config FSL_DPAA2_ETH
tristate "Freescale DPAA2 Ethernet"
depends on FSL_MC_BUS && FSL_MC_DPIO
select PHYLINK
+ select PCS_LYNX
help
This is the DPAA2 Ethernet driver supporting Freescale SoCs
with DPAA2 (DataPath Acceleration Architecture v2).
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 6e7f33c956bf..146cb3540e61 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -6,7 +6,7 @@
obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
-fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
index 83dee575c2fa..84de0644168d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
@@ -17,12 +17,12 @@ static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
return 0;
}
-static inline bool is_prio_enabled(u8 pfc_en, u8 tc)
+static inline bool dpaa2_eth_is_prio_enabled(u8 pfc_en, u8 tc)
{
return !!(pfc_en & (1 << tc));
}
-static int set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
+static int dpaa2_eth_set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
{
struct dpni_congestion_notification_cfg cfg = {0};
int i, err;
@@ -33,7 +33,7 @@ static int set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
cfg.message_ctx = 0ULL;
for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
- if (is_prio_enabled(pfc_en, i)) {
+ if (dpaa2_eth_is_prio_enabled(pfc_en, i)) {
cfg.threshold_entry = DPAA2_ETH_CN_THRESH_ENTRY(priv);
cfg.threshold_exit = DPAA2_ETH_CN_THRESH_EXIT(priv);
} else {
@@ -93,7 +93,7 @@ static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
}
/* Configure congestion notifications for the enabled priorities */
- err = set_pfc_cn(priv, pfc->pfc_en);
+ err = dpaa2_eth_set_pfc_cn(priv, pfc->pfc_en);
if (err)
return err;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index 56d9927fbfda..b87db0846e10 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -42,24 +42,7 @@ static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
return 0;
}
-static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
-{
- int err;
- struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-
- err = single_open(file, dpaa2_dbg_cpu_show, priv);
- if (err < 0)
- netdev_err(priv->net_dev, "single_open() failed\n");
-
- return err;
-}
-
-static const struct file_operations dpaa2_dbg_cpu_ops = {
- .open = dpaa2_dbg_cpu_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_cpu);
static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
{
@@ -106,24 +89,7 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
return 0;
}
-static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
-{
- int err;
- struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-
- err = single_open(file, dpaa2_dbg_fqs_show, priv);
- if (err < 0)
- netdev_err(priv->net_dev, "single_open() failed\n");
-
- return err;
-}
-
-static const struct file_operations dpaa2_dbg_fq_ops = {
- .open = dpaa2_dbg_fqs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_fqs);
static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
{
@@ -151,24 +117,7 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
return 0;
}
-static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
-{
- int err;
- struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-
- err = single_open(file, dpaa2_dbg_ch_show, priv);
- if (err < 0)
- netdev_err(priv->net_dev, "single_open() failed\n");
-
- return err;
-}
-
-static const struct file_operations dpaa2_dbg_ch_ops = {
- .open = dpaa2_dbg_ch_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch);
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
{
@@ -179,13 +128,13 @@ void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
priv->dbg.dir = dir;
/* per-cpu stats file */
- debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_ops);
+ debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_fops);
/* per-fq stats file */
- debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fq_ops);
+ debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fqs_fops);
/* per-fq stats file */
- debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_ops);
+ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops);
}
void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
new file mode 100644
index 000000000000..833696245565
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+#include "dpaa2-eth.h"
+/* Copyright 2020 NXP
+ */
+
+#define DPAA2_ETH_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, 0)
+
+static const struct devlink_trap_group dpaa2_eth_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(PARSER_ERROR_DROPS, 0),
+};
+
+static const struct devlink_trap dpaa2_eth_traps_arr[] = {
+ DPAA2_ETH_TRAP_DROP(VXLAN_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(LLC_SNAP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(VLAN_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(PPPOE_PPP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(MPLS_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(ARP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IP_1_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IP_N_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(GRE_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(UDP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(TCP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IPSEC_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(SCTP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(DCCP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(GTP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(ESP_PARSING, PARSER_ERROR_DROPS),
+};
+
+static int dpaa2_eth_dl_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ char buf[10];
+ int err;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ scnprintf(buf, 10, "%d.%d", priv->dpni_ver_major, priv->dpni_ver_minor);
+ err = devlink_info_version_running_put(req, "dpni", buf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static struct dpaa2_eth_trap_item *
+dpaa2_eth_dl_trap_item_lookup(struct dpaa2_eth_priv *priv, u16 trap_id)
+{
+ struct dpaa2_eth_trap_data *dpaa2_eth_trap_data = priv->trap_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_traps_arr); i++) {
+ if (dpaa2_eth_traps_arr[i].id == trap_id)
+ return &dpaa2_eth_trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fapr *fapr)
+{
+ struct dpaa2_faf_error_bit {
+ int position;
+ enum devlink_trap_generic_id trap_id;
+ } faf_bits[] = {
+ { .position = 5, .trap_id = DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING },
+ { .position = 20, .trap_id = DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING },
+ { .position = 24, .trap_id = DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING },
+ { .position = 26, .trap_id = DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING },
+ { .position = 29, .trap_id = DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING },
+ { .position = 31, .trap_id = DEVLINK_TRAP_GENERIC_ID_ARP_PARSING },
+ { .position = 52, .trap_id = DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING },
+ { .position = 61, .trap_id = DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING },
+ { .position = 67, .trap_id = DEVLINK_TRAP_GENERIC_ID_GRE_PARSING },
+ { .position = 71, .trap_id = DEVLINK_TRAP_GENERIC_ID_UDP_PARSING },
+ { .position = 76, .trap_id = DEVLINK_TRAP_GENERIC_ID_TCP_PARSING },
+ { .position = 80, .trap_id = DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING },
+ { .position = 82, .trap_id = DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING },
+ { .position = 84, .trap_id = DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING },
+ { .position = 88, .trap_id = DEVLINK_TRAP_GENERIC_ID_GTP_PARSING },
+ { .position = 90, .trap_id = DEVLINK_TRAP_GENERIC_ID_ESP_PARSING },
+ };
+ u64 faf_word;
+ u64 mask;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(faf_bits); i++) {
+ if (faf_bits[i].position < 32) {
+ /* Low part of FAF.
+ * position ranges from 31 to 0, mask from 0 to 31.
+ */
+ mask = 1ull << (31 - faf_bits[i].position);
+ faf_word = __le32_to_cpu(fapr->faf_lo);
+ } else {
+ /* High part of FAF.
+ * position ranges from 95 to 32, mask from 0 to 63.
+ */
+ mask = 1ull << (63 - (faf_bits[i].position - 32));
+ faf_word = __le64_to_cpu(fapr->faf_hi);
+ }
+ if (faf_word & mask)
+ return dpaa2_eth_dl_trap_item_lookup(priv, faf_bits[i].trap_id);
+ }
+ return NULL;
+}
+
+static int dpaa2_eth_dl_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ struct dpaa2_eth_trap_item *dpaa2_eth_trap_item;
+
+ dpaa2_eth_trap_item = dpaa2_eth_dl_trap_item_lookup(priv, trap->id);
+ if (WARN_ON(!dpaa2_eth_trap_item))
+ return -ENOENT;
+
+ dpaa2_eth_trap_item->trap_ctx = trap_ctx;
+
+ return 0;
+}
+
+static int dpaa2_eth_dl_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ /* No support for changing the action of an independent packet trap,
+ * only per trap group - parser error drops
+ */
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change trap action independently of group");
+ return -EOPNOTSUPP;
+}
+
+static int dpaa2_eth_dl_trap_group_action_set(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct dpni_error_cfg err_cfg = {0};
+ int err;
+
+ if (group->id != DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS)
+ return -EOPNOTSUPP;
+
+ /* Configure handling of frames marked as errors from the parser */
+ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
+ err_cfg.set_frame_annotation = 1;
+
+ switch (action) {
+ case DEVLINK_TRAP_ACTION_DROP:
+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
+ break;
+ case DEVLINK_TRAP_ACTION_TRAP:
+ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, &err_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_errors_behavior failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops dpaa2_eth_devlink_ops = {
+ .info_get = dpaa2_eth_dl_info_get,
+ .trap_init = dpaa2_eth_dl_trap_init,
+ .trap_action_set = dpaa2_eth_dl_trap_action_set,
+ .trap_group_action_set = dpaa2_eth_dl_trap_group_action_set,
+};
+
+int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_devlink_priv *dl_priv;
+ int err;
+
+ priv->devlink = devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv));
+ if (!priv->devlink) {
+ dev_err(dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+ dl_priv = devlink_priv(priv->devlink);
+ dl_priv->dpaa2_priv = priv;
+
+ err = devlink_register(priv->devlink, dev);
+ if (err) {
+ dev_err(dev, "devlink_register() = %d\n", err);
+ goto devlink_free;
+ }
+
+ return 0;
+
+devlink_free:
+ devlink_free(priv->devlink);
+
+ return err;
+}
+
+void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv)
+{
+ devlink_unregister(priv->devlink);
+ devlink_free(priv->devlink);
+}
+
+int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv)
+{
+ struct devlink_port *devlink_port = &priv->devlink_port;
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devlink_port_register(priv->devlink, devlink_port, 0);
+ if (err)
+ return err;
+
+ devlink_port_type_eth_set(devlink_port, priv->net_dev);
+
+ return 0;
+}
+
+void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv)
+{
+ struct devlink_port *devlink_port = &priv->devlink_port;
+
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
+}
+
+int dpaa2_eth_dl_traps_register(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_trap_data *dpaa2_eth_trap_data;
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ int err;
+
+ dpaa2_eth_trap_data = kzalloc(sizeof(*dpaa2_eth_trap_data), GFP_KERNEL);
+ if (!dpaa2_eth_trap_data)
+ return -ENOMEM;
+ priv->trap_data = dpaa2_eth_trap_data;
+
+ dpaa2_eth_trap_data->trap_items_arr = kcalloc(ARRAY_SIZE(dpaa2_eth_traps_arr),
+ sizeof(struct dpaa2_eth_trap_item),
+ GFP_KERNEL);
+ if (!dpaa2_eth_trap_data->trap_items_arr) {
+ err = -ENOMEM;
+ goto trap_data_free;
+ }
+
+ err = devlink_trap_groups_register(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+ if (err) {
+ dev_err(dev, "devlink_trap_groups_register() = %d\n", err);
+ goto trap_items_arr_free;
+ }
+
+ err = devlink_traps_register(priv->devlink, dpaa2_eth_traps_arr,
+ ARRAY_SIZE(dpaa2_eth_traps_arr), priv);
+ if (err) {
+ dev_err(dev, "devlink_traps_register() = %d\n", err);
+ goto trap_groups_unregiser;
+ }
+
+ return 0;
+
+trap_groups_unregiser:
+ devlink_trap_groups_unregister(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+trap_items_arr_free:
+ kfree(dpaa2_eth_trap_data->trap_items_arr);
+trap_data_free:
+ kfree(dpaa2_eth_trap_data);
+ priv->trap_data = NULL;
+
+ return err;
+}
+
+void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv)
+{
+ devlink_traps_unregister(priv->devlink, dpaa2_eth_traps_arr,
+ ARRAY_SIZE(dpaa2_eth_traps_arr));
+ devlink_trap_groups_unregister(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+ kfree(priv->trap_data->trap_items_arr);
+ kfree(priv->trap_data);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index cf5383bb8331..cf9400a9886d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -11,10 +11,11 @@
#include <linux/msi.h>
#include <linux/kthread.h>
#include <linux/iommu.h>
-#include <linux/net_tstamp.h>
#include <linux/fsl/mc.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include <linux/fsl/ptp_qoriq.h>
+#include <linux/ptp_classify.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
@@ -30,6 +31,9 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+struct ptp_qoriq *dpaa2_ptp;
+EXPORT_SYMBOL(dpaa2_ptp);
+
static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
dma_addr_t iova_addr)
{
@@ -40,9 +44,9 @@ static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
return phys_to_virt(phys_addr);
}
-static void validate_rx_csum(struct dpaa2_eth_priv *priv,
- u32 fd_status,
- struct sk_buff *skb)
+static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
+ u32 fd_status,
+ struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
@@ -62,9 +66,9 @@ static void validate_rx_csum(struct dpaa2_eth_priv *priv,
/* Free a received FD.
* Not to be used for Tx conf FDs or on any other paths.
*/
-static void free_rx_fd(struct dpaa2_eth_priv *priv,
- const struct dpaa2_fd *fd,
- void *vaddr)
+static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd,
+ void *vaddr)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t addr = dpaa2_fd_get_addr(fd);
@@ -100,9 +104,9 @@ free_buf:
}
/* Build a linear skb based on a single-buffer frame descriptor */
-static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- void *fd_vaddr)
+static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
{
struct sk_buff *skb = NULL;
u16 fd_offset = dpaa2_fd_get_offset(fd);
@@ -121,9 +125,9 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
}
/* Build a non linear (fragmented) skb based on a S/G table */
-static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_sg_entry *sgt)
+static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_sg_entry *sgt)
{
struct sk_buff *skb = NULL;
struct device *dev = priv->net_dev->dev.parent;
@@ -204,7 +208,8 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* Free buffers acquired from the buffer pool or which were meant to
* be released in the pool
*/
-static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
+ int count)
{
struct device *dev = priv->net_dev->dev.parent;
void *vaddr;
@@ -218,9 +223,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
}
}
-static void xdp_release_buf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- dma_addr_t addr)
+static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
{
int retries = 0;
int err;
@@ -238,7 +243,7 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
}
if (err) {
- free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
+ dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
ch->buf_count -= ch->xdp.drop_cnt;
}
@@ -274,9 +279,9 @@ static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
return total_enqueued;
}
-static void xdp_tx_flush(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_eth_fq *fq)
+static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq)
{
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_fd *fds;
@@ -295,17 +300,17 @@ static void xdp_tx_flush(struct dpaa2_eth_priv *priv,
ch->stats.xdp_tx++;
}
for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
- xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
+ dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
percpu_stats->tx_errors++;
ch->stats.xdp_tx_err++;
}
fq->xdp_tx_fds.num = 0;
}
-static void xdp_enqueue(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_fd *fd,
- void *buf_start, u16 queue_id)
+static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
{
struct dpaa2_faead *faead;
struct dpaa2_fd *dest_fd;
@@ -333,13 +338,13 @@ static void xdp_enqueue(struct dpaa2_eth_priv *priv,
if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
return;
- xdp_tx_flush(priv, ch, fq);
+ dpaa2_eth_xdp_tx_flush(priv, ch, fq);
}
-static u32 run_xdp(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_eth_fq *rx_fq,
- struct dpaa2_fd *fd, void *vaddr)
+static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *rx_fq,
+ struct dpaa2_fd *fd, void *vaddr)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
struct bpf_prog *xdp_prog;
@@ -372,7 +377,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
case XDP_PASS:
break;
case XDP_TX:
- xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
+ dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
break;
default:
bpf_warn_invalid_xdp_action(xdp_act);
@@ -381,7 +386,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_DROP:
- xdp_release_buf(priv, ch, addr);
+ dpaa2_eth_xdp_release_buf(priv, ch, addr);
ch->stats.xdp_drop++;
break;
case XDP_REDIRECT:
@@ -441,7 +446,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) {
- xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+ xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
if (xdp_act != XDP_PASS) {
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
@@ -450,13 +455,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
- skb = build_linear_skb(ch, fd, vaddr);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
- skb = build_frag_skb(priv, ch, buf_data);
+ skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
free_pages((unsigned long)vaddr, 0);
percpu_extras->rx_sg_frames++;
percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
@@ -485,7 +490,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
/* Check if we need to validate the L4 csum */
if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
status = le32_to_cpu(fas->status);
- validate_rx_csum(priv, status, skb);
+ dpaa2_eth_validate_rx_csum(priv, status, skb);
}
skb->protocol = eth_type_trans(skb, priv->net_dev);
@@ -499,19 +504,71 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return;
err_build_skb:
- free_rx_fd(priv, fd, vaddr);
+ dpaa2_eth_free_rx_fd(priv, fd, vaddr);
err_frame_format:
percpu_stats->rx_dropped++;
}
+/* Processing of Rx frames received on the error FQ
+ * We check and print the error bits and then free the frame
+ */
+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq __always_unused)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_trap_item *trap_item;
+ struct dpaa2_fapr *fapr;
+ struct sk_buff *skb;
+ void *buf_data;
+ void *vaddr;
+
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+
+ buf_data = vaddr + dpaa2_fd_get_offset(fd);
+
+ if (fd_format == dpaa2_fd_single) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ } else if (fd_format == dpaa2_fd_sg) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ /* We don't support any other format */
+ dpaa2_eth_free_rx_fd(priv, fd, vaddr);
+ goto err_frame_format;
+ }
+
+ fapr = dpaa2_get_fapr(vaddr, false);
+ trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
+ if (trap_item)
+ devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
+ &priv->devlink_port, NULL);
+ consume_skb(skb);
+
+err_frame_format:
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_stats->rx_errors++;
+ ch->buf_count--;
+}
+
/* Consume all frames pull-dequeued into the store. This is the simplest way to
* make sure we don't accidentally issue another volatile dequeue which would
* overwrite (leak) frames already in the store.
*
* Observance of NAPI budget is not our concern, leaving that to the caller.
*/
-static int consume_frames(struct dpaa2_eth_channel *ch,
- struct dpaa2_eth_fq **src)
+static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq **src)
{
struct dpaa2_eth_priv *priv = ch->priv;
struct dpaa2_eth_fq *fq = NULL;
@@ -559,11 +616,57 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
return cleaned;
}
+static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
+ u8 *msgtype, u8 *twostep, u8 *udp,
+ u16 *correction_offset,
+ u16 *origintimestamp_offset)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+ unsigned int type;
+ u8 *base;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ *msgtype = ptp_get_msgtype(hdr, ptp_class);
+ *twostep = hdr->flag_field[0] & 0x2;
+
+ type = ptp_class & PTP_CLASS_PMASK;
+ if (type == PTP_CLASS_IPV4 ||
+ type == PTP_CLASS_IPV6)
+ *udp = 1;
+ else
+ *udp = 0;
+
+ base = skb_mac_header(skb);
+ *correction_offset = (u8 *)&hdr->correction - base;
+ *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
+
+ return 0;
+}
+
/* Configure the egress frame annotation for timestamp update */
-static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
+static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fd *fd,
+ void *buf_start,
+ struct sk_buff *skb)
{
+ struct ptp_tstamp origin_timestamp;
+ struct dpni_single_step_cfg cfg;
+ u8 msgtype, twostep, udp;
struct dpaa2_faead *faead;
+ struct dpaa2_fas *fas;
+ struct timespec64 ts;
+ u16 offset1, offset2;
u32 ctrl, frc;
+ __le64 *ns;
+ u8 *data;
/* Mark the egress frame annotation area as valid */
frc = dpaa2_fd_get_frc(fd);
@@ -579,12 +682,52 @@ static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
faead = dpaa2_get_faead(buf_start, true);
faead->ctrl = cpu_to_le32(ctrl);
+
+ if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
+ &offset1, &offset2) ||
+ msgtype != 0 || twostep) {
+ WARN_ONCE(1, "Bad packet for one-step timestamping\n");
+ return;
+ }
+
+ /* Mark the frame annotation status as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
+
+ /* Mark the PTP flag for one step timestamping */
+ fas = dpaa2_get_fas(buf_start, true);
+ fas->status = cpu_to_le32(DPAA2_FAS_PTP);
+
+ dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
+ ns = dpaa2_get_ts(buf_start, true);
+ *ns = cpu_to_le64(timespec64_to_ns(&ts) /
+ DPAA2_PTP_CLK_PERIOD_NS);
+
+ /* Update current time to PTP message originTimestamp field */
+ ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
+ data = skb_mac_header(skb);
+ *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
+ *(__be32 *)(data + offset2 + 2) =
+ htonl(origin_timestamp.sec_lsb);
+ *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
+
+ cfg.en = 1;
+ cfg.ch_update = udp;
+ cfg.offset = offset1;
+ cfg.peer_delay = 0;
+
+ if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
+ &cfg))
+ WARN_ONCE(1, "Failed to set single step register");
+ }
}
/* Create a frame descriptor based on a fragmented skb */
-static int build_sg_fd(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb,
- struct dpaa2_fd *fd)
+static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
void *sgt_buf = NULL;
@@ -606,7 +749,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
return -EINVAL;
- scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
+ scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
if (unlikely(!scl))
return -ENOMEM;
@@ -653,6 +796,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
* skb backpointer in the software annotation area. We'll need
* all of them on Tx Conf.
*/
+ *swa_addr = (void *)sgt_buf;
swa = (struct dpaa2_eth_swa *)sgt_buf;
swa->type = DPAA2_ETH_SWA_SG;
swa->sg.skb = skb;
@@ -672,9 +816,6 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- enable_tx_tstamp(fd, sgt_buf);
-
return 0;
dma_map_single_failed:
@@ -692,9 +833,10 @@ dma_map_sg_failed:
* enough for the HW requirements, thus instead of realloc-ing the skb we
* create a SG frame descriptor with only one entry.
*/
-static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb,
- struct dpaa2_fd *fd)
+static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_sgt_cache *sgt_cache;
@@ -732,6 +874,7 @@ static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
dpaa2_sg_set_final(sgt, true);
/* Store the skb backpointer in the SGT buffer */
+ *swa_addr = (void *)sgt_buf;
swa = (struct dpaa2_eth_swa *)sgt_buf;
swa->type = DPAA2_ETH_SWA_SINGLE;
swa->single.skb = skb;
@@ -750,9 +893,6 @@ static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- enable_tx_tstamp(fd, sgt_buf);
-
return 0;
sgt_map_failed:
@@ -767,16 +907,17 @@ data_map_failed:
}
/* Create a frame descriptor based on a linear skb */
-static int build_single_fd(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb,
- struct dpaa2_fd *fd)
+static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
u8 *buffer_start, *aligned_start;
struct dpaa2_eth_swa *swa;
dma_addr_t addr;
- buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
+ buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
/* If there's enough room to align the FD address, do it.
* It will help hardware optimize accesses.
@@ -790,6 +931,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
* (in the private data area) such that we can release it
* on Tx confirm
*/
+ *swa_addr = (void *)buffer_start;
swa = (struct dpaa2_eth_swa *)buffer_start;
swa->type = DPAA2_ETH_SWA_SINGLE;
swa->single.skb = skb;
@@ -806,9 +948,6 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_format(fd, dpaa2_fd_single);
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- enable_tx_tstamp(fd, buffer_start);
-
return 0;
}
@@ -819,9 +958,9 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
* This can be called either from dpaa2_eth_tx_conf() or on the error path of
* dpaa2_eth_tx().
*/
-static void free_tx_fd(const struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq,
- const struct dpaa2_fd *fd, bool in_napi)
+static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr, sg_addr;
@@ -892,7 +1031,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
}
/* Get the timestamp value */
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ if (skb->cb[0] == TX_TSTAMP) {
struct skb_shared_hwtstamps shhwtstamps;
__le64 *ts = dpaa2_get_ts(buffer_start, true);
u64 ns;
@@ -902,6 +1041,8 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &shhwtstamps);
+ } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ mutex_unlock(&priv->onestep_tstamp_lock);
}
/* Free SGT buffer allocated on tx */
@@ -921,7 +1062,8 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
napi_consume_skb(skb, in_napi);
}
-static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
+ struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_fd fd;
@@ -934,11 +1076,12 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
u32 fd_len;
u8 prio = 0;
int err, i;
+ void *swa;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
- needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
+ needed_headroom = dpaa2_eth_needed_headroom(skb);
/* We'll be holding a back-reference to the skb until Tx Confirmation;
* we don't want that overwritten by a concurrent Tx with a cloned skb.
@@ -954,17 +1097,17 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
memset(&fd, 0, sizeof(fd));
if (skb_is_nonlinear(skb)) {
- err = build_sg_fd(priv, skb, &fd);
+ err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
} else if (skb_headroom(skb) < needed_headroom) {
- err = build_sg_fd_single_buf(priv, skb, &fd);
+ err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
percpu_extras->tx_converted_sg_frames++;
percpu_extras->tx_converted_sg_bytes += skb->len;
} else {
- err = build_single_fd(priv, skb, &fd);
+ err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
}
if (unlikely(err)) {
@@ -972,6 +1115,9 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
goto err_build_fd;
}
+ if (skb->cb[0])
+ dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
+
/* Tracing point */
trace_dpaa2_tx_fd(net_dev, &fd);
@@ -1010,7 +1156,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- free_tx_fd(priv, fq, &fd, false);
+ dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
netdev_tx_completed_queue(nq, 1, fd_len);
} else {
percpu_stats->tx_packets++;
@@ -1025,6 +1171,63 @@ err_build_fd:
return NETDEV_TX_OK;
}
+static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
+{
+ struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
+ tx_onestep_tstamp);
+ struct sk_buff *skb;
+
+ while (true) {
+ skb = skb_dequeue(&priv->tx_skbs);
+ if (!skb)
+ return;
+
+ /* Lock just before TX one-step timestamping packet,
+ * and release the lock in dpaa2_eth_free_tx_fd when
+ * confirm the packet has been sent on hardware, or
+ * when clean up during transmit failure.
+ */
+ mutex_lock(&priv->onestep_tstamp_lock);
+ __dpaa2_eth_tx(skb, priv->net_dev);
+ }
+}
+
+static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u8 msgtype, twostep, udp;
+ u16 offset1, offset2;
+
+ /* Utilize skb->cb[0] for timestamping request per skb */
+ skb->cb[0] = 0;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
+ skb->cb[0] = TX_TSTAMP;
+ else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
+ }
+
+ /* TX for one-step timestamping PTP Sync packet */
+ if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
+ &offset1, &offset2))
+ if (msgtype == 0 && twostep == 0) {
+ skb_queue_tail(&priv->tx_skbs, skb);
+ queue_work(priv->dpaa2_ptp_wq,
+ &priv->tx_onestep_tstamp);
+ return NETDEV_TX_OK;
+ }
+ /* Use two-step timestamping if not one-step timestamping
+ * PTP Sync packet
+ */
+ skb->cb[0] = TX_TSTAMP;
+ }
+
+ /* TX for other packets */
+ return __dpaa2_eth_tx(skb, net_dev);
+}
+
/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch __always_unused,
@@ -1045,7 +1248,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
- free_tx_fd(priv, fq, fd, true);
+ dpaa2_eth_free_tx_fd(priv, fq, fd, true);
if (likely(!fd_errors))
return;
@@ -1059,7 +1262,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
-static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
int err;
@@ -1082,7 +1285,7 @@ static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
return 0;
}
-static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
int err;
@@ -1106,8 +1309,8 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
/* Perform a single release command to add buffers
* to the specified buffer pool
*/
-static int add_bufs(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch, u16 bpid)
+static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch, u16 bpid)
{
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
@@ -1155,7 +1358,7 @@ release_bufs:
* not much else we can do about it
*/
if (err) {
- free_bufs(priv, buf_array, i);
+ dpaa2_eth_free_bufs(priv, buf_array, i);
return 0;
}
@@ -1173,7 +1376,7 @@ err_alloc:
return 0;
}
-static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
{
int i, j;
int new_count;
@@ -1181,7 +1384,7 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
for (j = 0; j < priv->num_channels; j++) {
for (i = 0; i < DPAA2_ETH_NUM_BUFS;
i += DPAA2_ETH_BUFS_PER_CMD) {
- new_count = add_bufs(priv, priv->channel[j], bpid);
+ new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
priv->channel[j]->buf_count += new_count;
if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
@@ -1193,11 +1396,11 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
return 0;
}
-/**
+/*
* Drain the specified number of buffers from the DPNI's private buffer pool.
* @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
*/
-static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
+static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
int retries = 0;
@@ -1213,17 +1416,17 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
- free_bufs(priv, buf_array, ret);
+ dpaa2_eth_free_bufs(priv, buf_array, ret);
retries = 0;
} while (ret);
}
-static void drain_pool(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
{
int i;
- drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
- drain_bufs(priv, 1);
+ dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
+ dpaa2_eth_drain_bufs(priv, 1);
for (i = 0; i < priv->num_channels; i++)
priv->channel[i]->buf_count = 0;
@@ -1232,9 +1435,9 @@ static void drain_pool(struct dpaa2_eth_priv *priv)
/* Function is called from softirq context only, so we don't need to guard
* the access to percpu count
*/
-static int refill_pool(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- u16 bpid)
+static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ u16 bpid)
{
int new_count;
@@ -1242,7 +1445,7 @@ static int refill_pool(struct dpaa2_eth_priv *priv,
return 0;
do {
- new_count = add_bufs(priv, ch, bpid);
+ new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
if (unlikely(!new_count)) {
/* Out of memory; abort for now, we'll try later on */
break;
@@ -1272,7 +1475,7 @@ static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
}
}
-static int pull_channel(struct dpaa2_eth_channel *ch)
+static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
{
int err;
int dequeues = -1;
@@ -1319,14 +1522,14 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
ch->rx_list = &rx_list;
do {
- err = pull_channel(ch);
+ err = dpaa2_eth_pull_channel(ch);
if (unlikely(err))
break;
/* Refill pool if appropriate */
- refill_pool(priv, ch, priv->bpid);
+ dpaa2_eth_refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch, &fq);
+ store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
if (store_cleaned <= 0)
break;
if (fq->type == DPAA2_RX_FQ) {
@@ -1375,12 +1578,12 @@ out:
if (ch->xdp.res & XDP_REDIRECT)
xdp_do_flush_map();
else if (rx_cleaned && ch->xdp.res & XDP_TX)
- xdp_tx_flush(priv, ch, &priv->fq[flowid]);
+ dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
}
-static void enable_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_channel *ch;
int i;
@@ -1391,7 +1594,7 @@ static void enable_ch_napi(struct dpaa2_eth_priv *priv)
}
}
-static void disable_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_channel *ch;
int i;
@@ -1465,7 +1668,7 @@ set_cgtd:
priv->rx_cgtd_enabled = td.enable;
}
-static int link_state_update(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
{
struct dpni_link_state state = {0};
bool tx_pause;
@@ -1517,7 +1720,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int err;
- err = seed_pool(priv, priv->bpid);
+ err = dpaa2_eth_seed_pool(priv, priv->bpid);
if (err) {
/* Not much to do; the buffer pool, though not filled up,
* may still contain some buffers which would enable us
@@ -1541,7 +1744,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
*/
netif_carrier_off(net_dev);
}
- enable_ch_napi(priv);
+ dpaa2_eth_enable_ch_napi(priv);
err = dpni_enable(priv->mc_io, 0, priv->mc_token);
if (err < 0) {
@@ -1549,30 +1752,19 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- if (!priv->mac) {
- /* If the DPMAC object has already processed the link up
- * interrupt, we have to learn the link state ourselves.
- */
- err = link_state_update(priv);
- if (err < 0) {
- netdev_err(net_dev, "Can't update link state\n");
- goto link_state_err;
- }
- } else {
+ if (priv->mac)
phylink_start(priv->mac->phylink);
- }
return 0;
-link_state_err:
enable_err:
- disable_ch_napi(priv);
- drain_pool(priv);
+ dpaa2_eth_disable_ch_napi(priv);
+ dpaa2_eth_drain_pool(priv);
return err;
}
/* Total number of in-flight frames on ingress queues */
-static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
+static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_fq *fq;
u32 fcnt = 0, bcnt = 0, total = 0;
@@ -1591,13 +1783,13 @@ static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
return total;
}
-static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
{
int retries = 10;
u32 pending;
do {
- pending = ingress_fq_count(priv);
+ pending = dpaa2_eth_ingress_fq_count(priv);
if (pending)
msleep(100);
} while (pending && --retries);
@@ -1605,7 +1797,7 @@ static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
#define DPNI_TX_PENDING_VER_MAJOR 7
#define DPNI_TX_PENDING_VER_MINOR 13
-static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
{
union dpni_statistics stats;
int retries = 10;
@@ -1651,7 +1843,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
* on WRIOP. After it finishes, wait until all remaining frames on Rx
* and Tx conf queues are consumed on NAPI poll.
*/
- wait_for_egress_fq_empty(priv);
+ dpaa2_eth_wait_for_egress_fq_empty(priv);
do {
dpni_disable(priv->mc_io, 0, priv->mc_token);
@@ -1667,11 +1859,11 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
*/
}
- wait_for_ingress_fq_empty(priv);
- disable_ch_napi(priv);
+ dpaa2_eth_wait_for_ingress_fq_empty(priv);
+ dpaa2_eth_disable_ch_napi(priv);
/* Empty the buffer pool */
- drain_pool(priv);
+ dpaa2_eth_drain_pool(priv);
/* Empty the Scatter-Gather Buffer cache */
dpaa2_eth_sgt_cache_drain(priv);
@@ -1725,8 +1917,8 @@ static void dpaa2_eth_get_stats(struct net_device *net_dev,
/* Copy mac unicast addresses from @net_dev to @priv.
* Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
*/
-static void add_uc_hw_addr(const struct net_device *net_dev,
- struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
{
struct netdev_hw_addr *ha;
int err;
@@ -1744,8 +1936,8 @@ static void add_uc_hw_addr(const struct net_device *net_dev,
/* Copy mac multicast addresses from @net_dev to @priv
* Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
*/
-static void add_mc_hw_addr(const struct net_device *net_dev,
- struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
{
struct netdev_hw_addr *ha;
int err;
@@ -1810,7 +2002,7 @@ static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
if (err)
netdev_warn(net_dev, "Can't clear uc filters\n");
- add_uc_hw_addr(net_dev, priv);
+ dpaa2_eth_add_uc_hw_addr(net_dev, priv);
/* Finally, clear uc promisc and set mc promisc as requested. */
err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
@@ -1833,8 +2025,8 @@ static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
if (err)
netdev_warn(net_dev, "Can't clear mac filters\n");
- add_mc_hw_addr(net_dev, priv);
- add_uc_hw_addr(net_dev, priv);
+ dpaa2_eth_add_mc_hw_addr(net_dev, priv);
+ dpaa2_eth_add_uc_hw_addr(net_dev, priv);
/* Now we can clear both ucast and mcast promisc, without risking
* to drop legitimate frames anymore.
@@ -1868,14 +2060,14 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
if (changed & NETIF_F_RXCSUM) {
enable = !!(features & NETIF_F_RXCSUM);
- err = set_rx_csum(priv, enable);
+ err = dpaa2_eth_set_rx_csum(priv, enable);
if (err)
return err;
}
if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
- err = set_tx_csum(priv, enable);
+ err = dpaa2_eth_set_tx_csum(priv, enable);
if (err)
return err;
}
@@ -1888,15 +2080,17 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct dpaa2_eth_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
+ if (!dpaa2_ptp)
+ return -EINVAL;
+
if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
return -EFAULT;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->tx_tstamp = false;
- break;
case HWTSTAMP_TX_ON:
- priv->tx_tstamp = true;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ priv->tx_tstamp_type = config.tx_type;
break;
default:
return -ERANGE;
@@ -1944,7 +2138,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
return true;
}
-static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
+static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
{
int mfl, err;
@@ -1978,7 +2172,7 @@ static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
if (!xdp_mtu_valid(priv, new_mtu))
return -EINVAL;
- err = set_rx_mfl(priv, new_mtu, true);
+ err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
if (err)
return err;
@@ -1987,7 +2181,7 @@ out:
return 0;
}
-static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
+static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
{
struct dpni_buffer_layout buf_layout = {0};
int err;
@@ -2013,7 +2207,7 @@ static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
return 0;
}
-static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
+static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
struct dpaa2_eth_channel *ch;
@@ -2039,10 +2233,10 @@ static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
* so we are sure no old format buffers will be used from now on.
*/
if (need_update) {
- err = set_rx_mfl(priv, dev->mtu, !!prog);
+ err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
if (err)
goto out_err;
- err = update_rx_buffer_headroom(priv, !!prog);
+ err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
if (err)
goto out_err;
}
@@ -2079,7 +2273,7 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
- return setup_xdp(dev, xdp->prog);
+ return dpaa2_eth_setup_xdp(dev, xdp->prog);
default:
return -EINVAL;
}
@@ -2091,7 +2285,6 @@ static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
struct xdp_frame *xdpf,
struct dpaa2_fd *fd)
{
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
unsigned int needed_headroom;
struct dpaa2_eth_swa *swa;
@@ -2101,7 +2294,7 @@ static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
/* We require a minimum headroom to be able to transmit the frame.
* Otherwise return an error and let the original net_device handle it
*/
- needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
+ needed_headroom = dpaa2_eth_needed_headroom(NULL);
if (xdpf->headroom < needed_headroom)
return -EINVAL;
@@ -2316,7 +2509,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_setup_tc = dpaa2_eth_setup_tc,
};
-static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
+static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
{
struct dpaa2_eth_channel *ch;
@@ -2329,7 +2522,7 @@ static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
}
/* Allocate and configure a DPCON object */
-static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
+static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
{
struct fsl_mc_device *dpcon;
struct device *dev = priv->net_dev->dev.parent;
@@ -2373,16 +2566,15 @@ free:
return ERR_PTR(err);
}
-static void free_dpcon(struct dpaa2_eth_priv *priv,
- struct fsl_mc_device *dpcon)
+static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
+ struct fsl_mc_device *dpcon)
{
dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
fsl_mc_object_free(dpcon);
}
-static struct dpaa2_eth_channel *
-alloc_channel(struct dpaa2_eth_priv *priv)
+static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_channel *channel;
struct dpcon_attr attr;
@@ -2393,7 +2585,7 @@ alloc_channel(struct dpaa2_eth_priv *priv)
if (!channel)
return NULL;
- channel->dpcon = setup_dpcon(priv);
+ channel->dpcon = dpaa2_eth_setup_dpcon(priv);
if (IS_ERR(channel->dpcon)) {
err = PTR_ERR(channel->dpcon);
goto err_setup;
@@ -2413,23 +2605,23 @@ alloc_channel(struct dpaa2_eth_priv *priv)
return channel;
err_get_attr:
- free_dpcon(priv, channel->dpcon);
+ dpaa2_eth_free_dpcon(priv, channel->dpcon);
err_setup:
kfree(channel);
return ERR_PTR(err);
}
-static void free_channel(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *channel)
+static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *channel)
{
- free_dpcon(priv, channel->dpcon);
+ dpaa2_eth_free_dpcon(priv, channel->dpcon);
kfree(channel);
}
/* DPIO setup: allocate and configure QBMan channels, setup core affinity
* and register data availability notifications
*/
-static int setup_dpio(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
{
struct dpaa2_io_notification_ctx *nctx;
struct dpaa2_eth_channel *channel;
@@ -2449,7 +2641,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
cpumask_clear(&priv->dpio_cpumask);
for_each_online_cpu(i) {
/* Try to allocate a channel */
- channel = alloc_channel(priv);
+ channel = dpaa2_eth_alloc_channel(priv);
if (IS_ERR_OR_NULL(channel)) {
err = PTR_ERR_OR_ZERO(channel);
if (err != -EPROBE_DEFER)
@@ -2462,7 +2654,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
nctx = &channel->nctx;
nctx->is_cdan = 1;
- nctx->cb = cdan_cb;
+ nctx->cb = dpaa2_eth_cdan_cb;
nctx->id = channel->ch_id;
nctx->desired_cpu = i;
@@ -2510,14 +2702,14 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
err_set_cdan:
dpaa2_io_service_deregister(channel->dpio, nctx, dev);
err_service_reg:
- free_channel(priv, channel);
+ dpaa2_eth_free_channel(priv, channel);
err_alloc_ch:
if (err == -EPROBE_DEFER) {
for (i = 0; i < priv->num_channels; i++) {
channel = priv->channel[i];
nctx = &channel->nctx;
dpaa2_io_service_deregister(channel->dpio, nctx, dev);
- free_channel(priv, channel);
+ dpaa2_eth_free_channel(priv, channel);
}
priv->num_channels = 0;
return err;
@@ -2534,7 +2726,7 @@ err_alloc_ch:
return 0;
}
-static void free_dpio(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_channel *ch;
@@ -2544,12 +2736,12 @@ static void free_dpio(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
- free_channel(priv, ch);
+ dpaa2_eth_free_channel(priv, ch);
}
}
-static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
- int cpu)
+static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
+ int cpu)
{
struct device *dev = priv->net_dev->dev.parent;
int i;
@@ -2566,7 +2758,7 @@ static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
return priv->channel[0];
}
-static void set_fq_affinity(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_fq *fq;
@@ -2583,6 +2775,7 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
fq = &priv->fq[i];
switch (fq->type) {
case DPAA2_RX_FQ:
+ case DPAA2_RX_ERR_FQ:
fq->target_cpu = rx_cpu;
rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
if (rx_cpu >= nr_cpu_ids)
@@ -2597,13 +2790,13 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
default:
dev_err(dev, "Unknown FQ type: %d\n", fq->type);
}
- fq->channel = get_affine_channel(priv, fq->target_cpu);
+ fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
}
update_xps(priv);
}
-static void setup_fqs(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
{
int i, j;
@@ -2626,12 +2819,16 @@ static void setup_fqs(struct dpaa2_eth_priv *priv)
}
}
+ /* We have exactly one Rx error queue per DPNI */
+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
+
/* For each FQ, decide on which core to process incoming frames */
- set_fq_affinity(priv);
+ dpaa2_eth_set_fq_affinity(priv);
}
/* Allocate and configure one buffer pool for each interface */
-static int setup_dpbp(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
{
int err;
struct fsl_mc_device *dpbp_dev;
@@ -2690,15 +2887,15 @@ err_open:
return err;
}
-static void free_dpbp(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
{
- drain_pool(priv);
+ dpaa2_eth_drain_pool(priv);
dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
fsl_mc_object_free(priv->dpbp_dev);
}
-static int set_buffer_layout(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_buffer_layout buf_layout = {0};
@@ -2723,8 +2920,10 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
/* tx buffer */
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
buf_layout.pass_timestamp = true;
+ buf_layout.pass_frame_status = true;
buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
- DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &buf_layout);
if (err) {
@@ -2733,7 +2932,8 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
}
/* tx-confirm buffer */
- buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX_CONFIRM, &buf_layout);
if (err) {
@@ -2815,7 +3015,7 @@ static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
return 0;
}
-static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
{
if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
@@ -2824,7 +3024,7 @@ static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
}
-static int set_pause(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_link_cfg link_cfg = {0};
@@ -2851,7 +3051,7 @@ static int set_pause(struct dpaa2_eth_priv *priv)
return 0;
}
-static void update_tx_fqids(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
{
struct dpni_queue_id qid = {0};
struct dpaa2_eth_fq *fq;
@@ -2893,7 +3093,7 @@ out_err:
}
/* Configure ingress classification based on VLAN PCP */
-static int set_vlan_qos(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpkg_profile_cfg kg_cfg = {0};
@@ -3005,7 +3205,7 @@ out_free_tbl:
}
/* Configure the DPNI object this interface is associated with */
-static int setup_dpni(struct fsl_mc_device *ls_dev)
+static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
{
struct device *dev = &ls_dev->dev;
struct dpaa2_eth_priv *priv;
@@ -3053,20 +3253,20 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
goto close;
}
- err = set_buffer_layout(priv);
+ err = dpaa2_eth_set_buffer_layout(priv);
if (err)
goto close;
- set_enqueue_mode(priv);
+ dpaa2_eth_set_enqueue_mode(priv);
/* Enable pause frame support */
if (dpaa2_eth_has_pause_support(priv)) {
- err = set_pause(priv);
+ err = dpaa2_eth_set_pause(priv);
if (err)
goto close;
}
- err = set_vlan_qos(priv);
+ err = dpaa2_eth_set_vlan_qos(priv);
if (err && err != -EOPNOTSUPP)
goto close;
@@ -3086,7 +3286,7 @@ close:
return err;
}
-static void free_dpni(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
{
int err;
@@ -3098,8 +3298,8 @@ static void free_dpni(struct dpaa2_eth_priv *priv)
dpni_close(priv->mc_io, 0, priv->mc_token);
}
-static int setup_rx_flow(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq)
+static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue;
@@ -3150,8 +3350,8 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
return 0;
}
-static int setup_tx_flow(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq)
+static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue;
@@ -3198,6 +3398,38 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
return 0;
}
+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_queue q = { { 0 } };
+ struct dpni_queue_id qid;
+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+ int err;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
+ return err;
+ }
+
+ fq->fqid = qid.fqid;
+
+ q.destination.id = fq->channel->dpcon_id;
+ q.destination.type = DPNI_DEST_DPCON;
+ q.destination.priority = 1;
+ q.user_context = (u64)(uintptr_t)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
+ if (err) {
+ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
/* Supported header fields for Rx hash distribution key */
static const struct dpaa2_eth_dist_fields dist_fields[] = {
{
@@ -3266,7 +3498,7 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
};
/* Configure the Rx hash key using the legacy API */
-static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_tc_dist_cfg dist_cfg;
@@ -3291,7 +3523,7 @@ static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
}
/* Configure the Rx hash key using the new API */
-static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
@@ -3311,13 +3543,19 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dev_err(dev, "dpni_set_rx_hash_dist failed\n");
break;
}
+
+ /* If the flow steering / hashing key is shared between all
+ * traffic classes, install it just once
+ */
+ if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
+ break;
}
return err;
}
/* Configure the Rx flow classification key */
-static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
@@ -3337,6 +3575,12 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dev_err(dev, "dpni_set_rx_fs_dist failed\n");
break;
}
+
+ /* If the flow steering / hashing key is shared between all
+ * traffic classes, install it just once
+ */
+ if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
+ break;
}
return err;
@@ -3452,11 +3696,11 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
if (type == DPAA2_ETH_RX_DIST_HASH) {
if (dpaa2_eth_has_legacy_dist(priv))
- err = config_legacy_hash_key(priv, key_iova);
+ err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
else
- err = config_hash_key(priv, key_iova);
+ err = dpaa2_eth_config_hash_key(priv, key_iova);
} else {
- err = config_cls_key(priv, key_iova);
+ err = dpaa2_eth_config_cls_key(priv, key_iova);
}
dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
@@ -3531,7 +3775,7 @@ out:
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
* frame queues and channels
*/
-static int bind_dpni(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
@@ -3579,10 +3823,13 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_fqs; i++) {
switch (priv->fq[i].type) {
case DPAA2_RX_FQ:
- err = setup_rx_flow(priv, &priv->fq[i]);
+ err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
break;
case DPAA2_TX_CONF_FQ:
- err = setup_tx_flow(priv, &priv->fq[i]);
+ err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
+ break;
+ case DPAA2_RX_ERR_FQ:
+ err = setup_rx_err_flow(priv, &priv->fq[i]);
break;
default:
dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
@@ -3603,7 +3850,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
}
/* Allocate rings for storing incoming frame descriptors */
-static int alloc_rings(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
@@ -3630,7 +3877,7 @@ err_ring:
return -ENOMEM;
}
-static void free_rings(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
{
int i;
@@ -3638,7 +3885,7 @@ static void free_rings(struct dpaa2_eth_priv *priv)
dpaa2_io_store_destroy(priv->channel[i]->store);
}
-static int set_mac_addr(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
@@ -3703,7 +3950,7 @@ static int set_mac_addr(struct dpaa2_eth_priv *priv)
return 0;
}
-static int netdev_init(struct net_device *net_dev)
+static int dpaa2_eth_netdev_init(struct net_device *net_dev)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -3716,7 +3963,7 @@ static int netdev_init(struct net_device *net_dev)
net_dev->netdev_ops = &dpaa2_eth_ops;
net_dev->ethtool_ops = &dpaa2_ethtool_ops;
- err = set_mac_addr(priv);
+ err = dpaa2_eth_set_mac_addr(priv);
if (err)
return err;
@@ -3771,13 +4018,13 @@ static int netdev_init(struct net_device *net_dev)
return 0;
}
-static int poll_link_state(void *arg)
+static int dpaa2_eth_poll_link_state(void *arg)
{
struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
int err;
while (!kthread_should_stop()) {
- err = link_state_update(priv);
+ err = dpaa2_eth_link_state_update(priv);
if (unlikely(err))
return err;
@@ -3847,11 +4094,11 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
}
if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
- link_state_update(netdev_priv(net_dev));
+ dpaa2_eth_link_state_update(netdev_priv(net_dev));
if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
- set_mac_addr(netdev_priv(net_dev));
- update_tx_fqids(priv);
+ dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
+ dpaa2_eth_update_tx_fqids(priv);
rtnl_lock();
if (priv->mac)
@@ -3864,7 +4111,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
return IRQ_HANDLED;
}
-static int setup_irqs(struct fsl_mc_device *ls_dev)
+static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
{
int err = 0;
struct fsl_mc_device_irq *irq;
@@ -3910,7 +4157,7 @@ free_mc_irq:
return err;
}
-static void add_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
{
int i;
struct dpaa2_eth_channel *ch;
@@ -3923,7 +4170,7 @@ static void add_ch_napi(struct dpaa2_eth_priv *priv)
}
}
-static void del_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
{
int i;
struct dpaa2_eth_channel *ch;
@@ -3958,6 +4205,19 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->iommu_domain = iommu_get_domain_for_dev(dev);
+ priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
+ priv->rx_tstamp = false;
+
+ priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
+ if (!priv->dpaa2_ptp_wq) {
+ err = -ENOMEM;
+ goto err_wq_alloc;
+ }
+
+ INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
+
+ skb_queue_head_init(&priv->tx_skbs);
+
/* Obtain a MC portal */
err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&priv->mc_io);
@@ -3970,26 +4230,26 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
/* MC objects initialization and configuration */
- err = setup_dpni(dpni_dev);
+ err = dpaa2_eth_setup_dpni(dpni_dev);
if (err)
goto err_dpni_setup;
- err = setup_dpio(priv);
+ err = dpaa2_eth_setup_dpio(priv);
if (err)
goto err_dpio_setup;
- setup_fqs(priv);
+ dpaa2_eth_setup_fqs(priv);
- err = setup_dpbp(priv);
+ err = dpaa2_eth_setup_dpbp(priv);
if (err)
goto err_dpbp_setup;
- err = bind_dpni(priv);
+ err = dpaa2_eth_bind_dpni(priv);
if (err)
goto err_bind;
/* Add a NAPI context for each channel */
- add_ch_napi(priv);
+ dpaa2_eth_add_ch_napi(priv);
/* Percpu statistics */
priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
@@ -4012,21 +4272,21 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
goto err_alloc_sgt_cache;
}
- err = netdev_init(net_dev);
+ err = dpaa2_eth_netdev_init(net_dev);
if (err)
goto err_netdev_init;
/* Configure checksum offload based on current interface flags */
- err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
+ err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
if (err)
goto err_csum;
- err = set_tx_csum(priv, !!(net_dev->features &
- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
+ err = dpaa2_eth_set_tx_csum(priv,
+ !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
if (err)
goto err_csum;
- err = alloc_rings(priv);
+ err = dpaa2_eth_alloc_rings(priv);
if (err)
goto err_alloc_rings;
@@ -4039,10 +4299,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
#endif
- err = setup_irqs(dpni_dev);
+ err = dpaa2_eth_setup_irqs(dpni_dev);
if (err) {
netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
- priv->poll_thread = kthread_run(poll_link_state, priv,
+ priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
"%s_poll_link", net_dev->name);
if (IS_ERR(priv->poll_thread)) {
dev_err(dev, "Error starting polling thread\n");
@@ -4055,6 +4315,18 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_connect_mac;
+ err = dpaa2_eth_dl_register(priv);
+ if (err)
+ goto err_dl_register;
+
+ err = dpaa2_eth_dl_traps_register(priv);
+ if (err)
+ goto err_dl_trap_register;
+
+ err = dpaa2_eth_dl_port_add(priv);
+ if (err)
+ goto err_dl_port_add;
+
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() failed\n");
@@ -4069,6 +4341,12 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
return 0;
err_netdev_reg:
+ dpaa2_eth_dl_port_del(priv);
+err_dl_port_add:
+ dpaa2_eth_dl_traps_unregister(priv);
+err_dl_trap_register:
+ dpaa2_eth_dl_unregister(priv);
+err_dl_register:
dpaa2_eth_disconnect_mac(priv);
err_connect_mac:
if (priv->do_link_poll)
@@ -4076,7 +4354,7 @@ err_connect_mac:
else
fsl_mc_free_irqs(dpni_dev);
err_poll_thread:
- free_rings(priv);
+ dpaa2_eth_free_rings(priv);
err_alloc_rings:
err_csum:
err_netdev_init:
@@ -4086,16 +4364,18 @@ err_alloc_sgt_cache:
err_alloc_percpu_extras:
free_percpu(priv->percpu_stats);
err_alloc_percpu_stats:
- del_ch_napi(priv);
+ dpaa2_eth_del_ch_napi(priv);
err_bind:
- free_dpbp(priv);
+ dpaa2_eth_free_dpbp(priv);
err_dpbp_setup:
- free_dpio(priv);
+ dpaa2_eth_free_dpio(priv);
err_dpio_setup:
- free_dpni(priv);
+ dpaa2_eth_free_dpni(priv);
err_dpni_setup:
fsl_mc_portal_free(priv->mc_io);
err_portal_alloc:
+ destroy_workqueue(priv->dpaa2_ptp_wq);
+err_wq_alloc:
dev_set_drvdata(dev, NULL);
free_netdev(net_dev);
@@ -4121,20 +4401,24 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
unregister_netdev(net_dev);
+ dpaa2_eth_dl_port_del(priv);
+ dpaa2_eth_dl_traps_unregister(priv);
+ dpaa2_eth_dl_unregister(priv);
+
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
else
fsl_mc_free_irqs(ls_dev);
- free_rings(priv);
+ dpaa2_eth_free_rings(priv);
free_percpu(priv->sgt_cache);
free_percpu(priv->percpu_stats);
free_percpu(priv->percpu_extras);
- del_ch_napi(priv);
- free_dpbp(priv);
- free_dpio(priv);
- free_dpni(priv);
+ dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_dpbp(priv);
+ dpaa2_eth_free_dpio(priv);
+ dpaa2_eth_free_dpni(priv);
fsl_mc_portal_free(priv->mc_io);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 7f3c41dc98f2..d236b8695c39 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -10,6 +10,8 @@
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/fsl/mc.h>
+#include <linux/net_tstamp.h>
+#include <net/devlink.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
@@ -180,6 +182,49 @@ struct dpaa2_fas {
*/
#define DPAA2_TS_OFFSET 0x8
+/* Frame annotation parse results */
+struct dpaa2_fapr {
+ /* 64-bit word 1 */
+ __le32 faf_lo;
+ __le16 faf_ext;
+ __le16 nxt_hdr;
+ /* 64-bit word 2 */
+ __le64 faf_hi;
+ /* 64-bit word 3 */
+ u8 last_ethertype_offset;
+ u8 vlan_tci_offset_n;
+ u8 vlan_tci_offset_1;
+ u8 llc_snap_offset;
+ u8 eth_offset;
+ u8 ip1_pid_offset;
+ u8 shim_offset_2;
+ u8 shim_offset_1;
+ /* 64-bit word 4 */
+ u8 l5_offset;
+ u8 l4_offset;
+ u8 gre_offset;
+ u8 l3_offset_n;
+ u8 l3_offset_1;
+ u8 mpls_offset_n;
+ u8 mpls_offset_1;
+ u8 pppoe_offset;
+ /* 64-bit word 5 */
+ __le16 running_sum;
+ __le16 gross_running_sum;
+ u8 ipv6_frag_offset;
+ u8 nxt_hdr_offset;
+ u8 routing_hdr_offset_2;
+ u8 routing_hdr_offset_1;
+ /* 64-bit word 6 */
+ u8 reserved[5]; /* Soft-parsing context */
+ u8 ip_proto_offset_n;
+ u8 nxt_hdr_frag_offset;
+ u8 parse_error_code;
+};
+
+#define DPAA2_FAPR_OFFSET 0x10
+#define DPAA2_FAPR_SIZE sizeof((struct dpaa2_fapr))
+
/* Frame annotation egress action descriptor */
#define DPAA2_FAEAD_OFFSET 0x58
@@ -194,6 +239,24 @@ struct dpaa2_faead {
#define DPAA2_FAEAD_EBDDV 0x00002000
#define DPAA2_FAEAD_UPD 0x00000010
+struct ptp_tstamp {
+ u16 sec_msb;
+ u32 sec_lsb;
+ u32 nsec;
+};
+
+static inline void ns_to_ptp_tstamp(struct ptp_tstamp *tstamp, u64 ns)
+{
+ u64 sec, nsec;
+
+ sec = ns;
+ nsec = do_div(sec, 1000000000);
+
+ tstamp->sec_lsb = sec & 0xFFFFFFFF;
+ tstamp->sec_msb = (sec >> 32) & 0xFFFF;
+ tstamp->nsec = nsec;
+}
+
/* Accessors for the hardware annotation fields that we use */
static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
{
@@ -210,6 +273,11 @@ static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
}
+static inline struct dpaa2_fapr *dpaa2_get_fapr(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAPR_OFFSET;
+}
+
static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
{
return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
@@ -324,8 +392,10 @@ struct dpaa2_eth_ch_stats {
#define DPAA2_ETH_MAX_RX_QUEUES \
(DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS)
#define DPAA2_ETH_MAX_TX_QUEUES 16
+#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
- DPAA2_ETH_MAX_TX_QUEUES)
+ DPAA2_ETH_MAX_TX_QUEUES + \
+ DPAA2_ETH_MAX_RX_ERR_QUEUES)
#define DPAA2_ETH_MAX_NETDEV_QUEUES \
(DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS)
@@ -334,6 +404,7 @@ struct dpaa2_eth_ch_stats {
enum dpaa2_eth_fq_type {
DPAA2_RX_FQ = 0,
DPAA2_TX_CONF_FQ,
+ DPAA2_RX_ERR_FQ
};
struct dpaa2_eth_priv;
@@ -407,6 +478,15 @@ struct dpaa2_eth_sgt_cache {
u16 count;
};
+struct dpaa2_eth_trap_item {
+ void *trap_ctx;
+};
+
+struct dpaa2_eth_trap_data {
+ struct dpaa2_eth_trap_item *trap_items_arr;
+ struct dpaa2_eth_priv *priv;
+};
+
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
@@ -433,8 +513,8 @@ struct dpaa2_eth_priv {
u16 bpid;
struct iommu_domain *iommu_domain;
- bool tx_tstamp; /* Tx timestamping enabled */
- bool rx_tstamp; /* Rx timestamping enabled */
+ enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */
+ bool rx_tstamp; /* Rx timestamping enabled */
u16 tx_qdid;
struct fsl_mc_io *mc_io;
@@ -473,8 +553,29 @@ struct dpaa2_eth_priv {
#endif
struct dpaa2_mac *mac;
+ struct workqueue_struct *dpaa2_ptp_wq;
+ struct work_struct tx_onestep_tstamp;
+ struct sk_buff_head tx_skbs;
+ /* The one-step timestamping configuration on hardware
+ * registers could only be done when no one-step
+ * timestamping frames are in flight. So we use a mutex
+ * lock here to make sure the lock is released by last
+ * one-step timestamping packet through TX confirmation
+ * queue before transmit current packet.
+ */
+ struct mutex onestep_tstamp_lock;
+ struct devlink *devlink;
+ struct dpaa2_eth_trap_data *trap_data;
+ struct devlink_port devlink_port;
};
+struct dpaa2_eth_devlink_priv {
+ struct dpaa2_eth_priv *dpaa2_priv;
+};
+
+#define TX_TSTAMP 0x1
+#define TX_TSTAMP_ONESTEP_SYNC 0x2
+
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
| RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
| RXH_L4_B_2_3)
@@ -491,6 +592,7 @@ struct dpaa2_eth_priv {
extern const struct ethtool_ops dpaa2_ethtool_ops;
extern int dpaa2_phc_index;
+extern struct ptp_qoriq *dpaa2_ptp;
static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
u16 ver_major, u16 ver_minor)
@@ -560,9 +662,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
return !!(link_options & DPNI_LINK_OPT_PAUSE);
}
-static inline
-unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb)
+static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
{
unsigned int headroom = DPAA2_ETH_SWA_SIZE;
@@ -579,7 +679,7 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
return 0;
/* If we have Tx timestamping, need 128B hardware annotation */
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ if (skb->cb[0])
headroom += DPAA2_ETH_TX_HWA_SIZE;
return headroom;
@@ -604,4 +704,15 @@ void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
+int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv);
+
+int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv);
+
+int dpaa2_eth_dl_traps_register(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv);
+
+struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fapr *fapr);
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 8356f1fbbee1..f981a523e13a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#include <linux/net_tstamp.h>
@@ -316,8 +317,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
}
-static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -345,9 +346,9 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
return 0;
}
-static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
- struct ethtool_usrip4_spec *uip_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
+ struct ethtool_usrip4_spec *uip_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
u32 tmp_value, tmp_mask;
@@ -400,9 +401,9 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
return 0;
}
-static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
- struct ethtool_tcpip4_spec *l4_mask,
- void *key, void *mask, u8 l4_proto, u64 *fields)
+static int dpaa2_eth_prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
+ struct ethtool_tcpip4_spec *l4_mask,
+ void *key, void *mask, u8 l4_proto, u64 *fields)
{
int off;
@@ -451,9 +452,9 @@ static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
return 0;
}
-static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
- struct ethtool_flow_ext *ext_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -470,9 +471,9 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
return 0;
}
-static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
- struct ethtool_flow_ext *ext_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -486,32 +487,32 @@ static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
return 0;
}
-static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
- u64 *fields)
+static int dpaa2_eth_prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key,
+ void *mask, u64 *fields)
{
int err;
switch (fs->flow_type & 0xFF) {
case ETHER_FLOW:
- err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
- key, mask, fields);
+ err = dpaa2_eth_prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
+ key, mask, fields);
break;
case IP_USER_FLOW:
- err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
- &fs->m_u.usr_ip4_spec, key, mask, fields);
+ err = dpaa2_eth_prep_uip_rule(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec, key, mask, fields);
break;
case TCP_V4_FLOW:
- err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
- key, mask, IPPROTO_TCP, fields);
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
+ key, mask, IPPROTO_TCP, fields);
break;
case UDP_V4_FLOW:
- err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
- key, mask, IPPROTO_UDP, fields);
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
+ key, mask, IPPROTO_UDP, fields);
break;
case SCTP_V4_FLOW:
- err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
- &fs->m_u.sctp_ip4_spec, key, mask,
- IPPROTO_SCTP, fields);
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.sctp_ip4_spec,
+ &fs->m_u.sctp_ip4_spec, key, mask,
+ IPPROTO_SCTP, fields);
break;
default:
return -EOPNOTSUPP;
@@ -521,14 +522,14 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
return err;
if (fs->flow_type & FLOW_EXT) {
- err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
+ err = dpaa2_eth_prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
if (err)
return err;
}
if (fs->flow_type & FLOW_MAC_EXT) {
- err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
- fields);
+ err = dpaa2_eth_prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key,
+ mask, fields);
if (err)
return err;
}
@@ -536,9 +537,9 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
return 0;
}
-static int do_cls_rule(struct net_device *net_dev,
- struct ethtool_rx_flow_spec *fs,
- bool add)
+static int dpaa2_eth_do_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *fs,
+ bool add)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
@@ -561,7 +562,7 @@ static int do_cls_rule(struct net_device *net_dev,
return -ENOMEM;
/* Fill the key and mask memory areas */
- err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
+ err = dpaa2_eth_prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
if (err)
goto free_mem;
@@ -617,7 +618,7 @@ static int do_cls_rule(struct net_device *net_dev,
err = dpni_remove_fs_entry(priv->mc_io, 0,
priv->mc_token, i,
&rule_cfg);
- if (err)
+ if (err || priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
break;
}
@@ -629,7 +630,7 @@ free_mem:
return err;
}
-static int num_rules(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_num_cls_rules(struct dpaa2_eth_priv *priv)
{
int i, rules = 0;
@@ -640,9 +641,9 @@ static int num_rules(struct dpaa2_eth_priv *priv)
return rules;
}
-static int update_cls_rule(struct net_device *net_dev,
- struct ethtool_rx_flow_spec *new_fs,
- unsigned int location)
+static int dpaa2_eth_update_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *new_fs,
+ unsigned int location)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_cls_rule *rule;
@@ -658,13 +659,14 @@ static int update_cls_rule(struct net_device *net_dev,
/* If a rule is present at the specified location, delete it. */
if (rule->in_use) {
- err = do_cls_rule(net_dev, &rule->fs, false);
+ err = dpaa2_eth_do_cls_rule(net_dev, &rule->fs, false);
if (err)
return err;
rule->in_use = 0;
- if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
+ if (!dpaa2_eth_fs_mask_enabled(priv) &&
+ !dpaa2_eth_num_cls_rules(priv))
priv->rx_cls_fields = 0;
}
@@ -672,7 +674,7 @@ static int update_cls_rule(struct net_device *net_dev,
if (!new_fs)
return err;
- err = do_cls_rule(net_dev, new_fs, true);
+ err = dpaa2_eth_do_cls_rule(net_dev, new_fs, true);
if (err)
return err;
@@ -702,7 +704,7 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
break;
case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0;
- rxnfc->rule_cnt = num_rules(priv);
+ rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv);
rxnfc->data = max_rules;
break;
case ETHTOOL_GRXCLSRULE:
@@ -744,10 +746,10 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
break;
case ETHTOOL_SRXCLSRLINS:
- err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
+ err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
break;
case ETHTOOL_SRXCLSRLDEL:
- err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
+ err = dpaa2_eth_update_cls_rule(net_dev, NULL, rxnfc->fs.location);
break;
default:
err = -EOPNOTSUPP;
@@ -762,6 +764,9 @@ EXPORT_SYMBOL(dpaa2_phc_index);
static int dpaa2_eth_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
+ if (!dpaa2_ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -769,7 +774,8 @@ static int dpaa2_eth_get_ts_info(struct net_device *dev,
info->phc_index = dpaa2_phc_index;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 3ee236c5fc37..90cd243070d7 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -15,6 +15,18 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
case DPMAC_ETH_IF_RGMII:
*if_mode = PHY_INTERFACE_MODE_RGMII;
break;
+ case DPMAC_ETH_IF_USXGMII:
+ *if_mode = PHY_INTERFACE_MODE_USXGMII;
+ break;
+ case DPMAC_ETH_IF_QSGMII:
+ *if_mode = PHY_INTERFACE_MODE_QSGMII;
+ break;
+ case DPMAC_ETH_IF_SGMII:
+ *if_mode = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case DPMAC_ETH_IF_XFI:
+ *if_mode = PHY_INTERFACE_MODE_10GBASER;
+ break;
default:
return -EINVAL;
}
@@ -67,6 +79,10 @@ static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
phy_interface_t interface)
{
switch (interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -95,6 +111,17 @@ static void dpaa2_mac_validate(struct phylink_config *config,
phylink_set(mask, Asym_Pause);
switch (state->interface) {
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_USXGMII:
+ phylink_set(mask, 10000baseT_Full);
+ if (state->interface == PHY_INTERFACE_MODE_10GBASER)
+ break;
+ phylink_set(mask, 5000baseT_Full);
+ phylink_set(mask, 2500baseT_Full);
+ fallthrough;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -227,6 +254,51 @@ out:
return fixed;
}
+static int dpaa2_pcs_create(struct dpaa2_mac *mac,
+ struct device_node *dpmac_node, int id)
+{
+ struct mdio_device *mdiodev;
+ struct device_node *node;
+
+ node = of_parse_phandle(dpmac_node, "pcs-handle", 0);
+ if (!node) {
+ /* do not error out on old DTS files */
+ netdev_warn(mac->net_dev, "pcs-handle node not found\n");
+ return 0;
+ }
+
+ if (!of_device_is_available(node)) {
+ netdev_err(mac->net_dev, "pcs-handle node not available\n");
+ return -ENODEV;
+ }
+
+ mdiodev = of_mdio_find_device(node);
+ of_node_put(node);
+ if (!mdiodev)
+ return -EPROBE_DEFER;
+
+ mac->pcs = lynx_pcs_create(mdiodev);
+ if (!mac->pcs) {
+ netdev_err(mac->net_dev, "lynx_pcs_create() failed\n");
+ put_device(&mdiodev->dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
+{
+ struct lynx_pcs *pcs = mac->pcs;
+
+ if (pcs) {
+ struct device *dev = &pcs->mdio->dev;
+ lynx_pcs_destroy(pcs);
+ put_device(dev);
+ mac->pcs = NULL;
+ }
+}
+
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
struct fsl_mc_device *dpmac_dev = mac->mc_dev;
@@ -278,6 +350,13 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
goto err_put_node;
}
+ if (attr.link_type == DPMAC_LINK_TYPE_PHY &&
+ attr.eth_if != DPMAC_ETH_IF_RGMII) {
+ err = dpaa2_pcs_create(mac, dpmac_node, attr.id);
+ if (err)
+ goto err_put_node;
+ }
+
mac->phylink_config.dev = &net_dev->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
@@ -286,10 +365,13 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
&dpaa2_mac_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
- goto err_put_node;
+ goto err_pcs_destroy;
}
mac->phylink = phylink;
+ if (mac->pcs)
+ phylink_set_pcs(mac->phylink, &mac->pcs->pcs);
+
err = phylink_of_phy_connect(mac->phylink, dpmac_node, 0);
if (err) {
netdev_err(net_dev, "phylink_of_phy_connect() = %d\n", err);
@@ -302,6 +384,8 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
err_phylink_destroy:
phylink_destroy(mac->phylink);
+err_pcs_destroy:
+ dpaa2_pcs_destroy(mac);
err_put_node:
of_node_put(dpmac_node);
err_close_dpmac:
@@ -316,6 +400,8 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_disconnect_phy(mac->phylink);
phylink_destroy(mac->phylink);
+ dpaa2_pcs_destroy(mac);
+
dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 2130d9c7d40e..955a52856210 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -7,6 +7,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phylink.h>
+#include <linux/pcs-lynx.h>
#include "dpmac.h"
#include "dpmac-cmd.h"
@@ -21,6 +22,7 @@ struct dpaa2_mac {
struct phylink *phylink;
phy_interface_t if_mode;
enum dpmac_link_type if_link_type;
+ struct lynx_pcs *pcs;
};
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index cc1b7f85e433..32b5faa87bb8 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -2,6 +2,7 @@
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016-2018 NXP
+ * Copyright 2020 NXP
*/
#include <linux/module.h>
@@ -9,7 +10,6 @@
#include <linux/of_address.h>
#include <linux/msi.h>
#include <linux/fsl/mc.h>
-#include <linux/fsl/ptp_qoriq.h>
#include "dpaa2-ptp.h"
@@ -201,6 +201,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
goto err_free_threaded_irq;
dpaa2_phc_index = ptp_qoriq->phc_index;
+ dpaa2_ptp = ptp_qoriq;
dev_set_drvdata(dev, ptp_qoriq);
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
index df2458a5e9ef..e1023538b4c3 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
@@ -1,14 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2018 NXP
+ * Copyright 2020 NXP
*/
#ifndef __RTC_H
#define __RTC_H
+#include <linux/fsl/ptp_qoriq.h>
+
#include "dprtc.h"
#include "dprtc-cmd.h"
extern int dpaa2_phc_index;
+extern struct ptp_qoriq *dpaa2_ptp;
#endif
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 3c06f5fb5759..90453dc7baef 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#ifndef _FSL_DPNI_CMD_H
#define _FSL_DPNI_CMD_H
@@ -92,6 +93,9 @@
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
+#define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279)
+#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD(0x27a)
+
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
@@ -641,4 +645,21 @@ struct dpni_cmd_set_tx_shaping {
u8 coupled;
};
+#define DPNI_PTP_ENABLE_SHIFT 0
+#define DPNI_PTP_ENABLE_SIZE 1
+#define DPNI_PTP_CH_UPDATE_SHIFT 1
+#define DPNI_PTP_CH_UPDATE_SIZE 1
+
+struct dpni_cmd_single_step_cfg {
+ __le16 flags;
+ __le16 offset;
+ __le32 peer_delay;
+};
+
+struct dpni_rsp_single_step_cfg {
+ __le16 flags;
+ __le16 offset;
+ __le32 peer_delay;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 68ed4c41b282..6ea7db66a632 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1999,3 +2000,81 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+
+/**
+ * dpni_get_single_step_cfg() - return current configuration for
+ * single step PTP
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @ptp_cfg: ptp single step configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ */
+int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg)
+{
+ struct dpni_rsp_single_step_cfg *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SINGLE_STEP_CFG,
+ cmd_flags, token);
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* read command response */
+ rsp_params = (struct dpni_rsp_single_step_cfg *)cmd.params;
+ ptp_cfg->offset = le16_to_cpu(rsp_params->offset);
+ ptp_cfg->en = dpni_get_field(le16_to_cpu(rsp_params->flags),
+ PTP_ENABLE) ? 1 : 0;
+ ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags),
+ PTP_CH_UPDATE) ? 1 : 0;
+ ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay);
+
+ return err;
+}
+
+/**
+ * dpni_set_single_step_cfg() - enable/disable and configure single step PTP
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @ptp_cfg: ptp single step configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * The function has effect only when dpni object is connected to a dpmac
+ * object. If the dpni is not connected to a dpmac the configuration will
+ * be stored inside and applied when connection is made.
+ */
+int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg)
+{
+ struct dpni_cmd_single_step_cfg *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ u16 flags;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_SINGLE_STEP_CFG,
+ cmd_flags, token);
+ cmd_params = (struct dpni_cmd_single_step_cfg *)cmd.params;
+ cmd_params->offset = cpu_to_le16(ptp_cfg->offset);
+ cmd_params->peer_delay = cpu_to_le32(ptp_cfg->peer_delay);
+
+ flags = le16_to_cpu(cmd_params->flags);
+ dpni_set_field(flags, PTP_ENABLE, !!ptp_cfg->en);
+ dpni_set_field(flags, PTP_CH_UPDATE, !!ptp_cfg->ch_update);
+ cmd_params->flags = cpu_to_le16(flags);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 39387991a1f9..e7b9e195b534 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#ifndef __FSL_DPNI_H
#define __FSL_DPNI_H
@@ -74,6 +75,10 @@ struct fsl_mc_io;
* Disables the flow steering table.
*/
#define DPNI_OPT_NO_FS 0x000020
+/**
+ * Flow steering table is shared between all traffic classes
+ */
+#define DPNI_OPT_SHARED_FS 0x001000
int dpni_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
@@ -1079,4 +1084,34 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
const struct dpni_tx_shaping_cfg *tx_er_shaper,
int coupled);
+/**
+ * struct dpni_single_step_cfg - configure single step PTP (IEEE 1588)
+ * @en: enable single step PTP. When enabled the PTPv1 functionality
+ * will not work. If the field is zero, offset and ch_update
+ * parameters will be ignored
+ * @offset: start offset from the beginning of the frame where
+ * timestamp field is found. The offset must respect all MAC
+ * headers, VLAN tags and other protocol headers
+ * @ch_update: when set UDP checksum will be updated inside packet
+ * @peer_delay: For peer-to-peer transparent clocks add this value to the
+ * correction field in addition to the transient time update.
+ * The value expresses nanoseconds.
+ */
+struct dpni_single_step_cfg {
+ u8 en;
+ u8 ch_update;
+ u16 offset;
+ u32 peer_delay;
+};
+
+int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg);
+
+int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 37b804f8bd76..0fa18b00c49b 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -3,7 +3,8 @@ config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI && PCI_MSI
select FSL_ENETC_MDIO
- select PHYLIB
+ select PHYLINK
+ select PCS_LYNX
select DIMLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
@@ -15,7 +16,7 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
depends on PCI && PCI_MSI
- select PHYLIB
+ select PHYLINK
select DIMLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index f78ca7b343d2..52be6e315752 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -4,7 +4,6 @@
#include "enetc.h"
#include <linux/tcp.h>
#include <linux/udp.h>
-#include <linux/of_mdio.h>
#include <linux/vmalloc.h>
/* ENETC overhead: optional extension BD + 1 BD gap */
@@ -1392,38 +1391,24 @@ static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
}
-static void adjust_link(struct net_device *ndev)
+static int enetc_phylink_connect(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
-
- if (priv->active_offloads & ENETC_F_QBV)
- enetc_sched_speed_set(ndev);
-
- phy_print_status(phydev);
-}
-
-static int enetc_phy_connect(struct net_device *ndev)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev;
struct ethtool_eee edata;
+ int err;
- if (!priv->phy_node)
+ if (!priv->phylink)
return 0; /* phy-less mode */
- phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
- 0, priv->if_mode);
- if (!phydev) {
+ err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
+ if (err) {
dev_err(&ndev->dev, "could not attach to PHY\n");
- return -ENODEV;
+ return err;
}
- phy_attached_info(phydev);
-
/* disable EEE autoneg, until ENETC driver supports it */
memset(&edata, 0, sizeof(struct ethtool_eee));
- phy_ethtool_set_eee(phydev, &edata);
+ phylink_ethtool_set_eee(priv->phylink, &edata);
return 0;
}
@@ -1443,8 +1428,8 @@ void enetc_start(struct net_device *ndev)
enable_irq(irq);
}
- if (ndev->phydev)
- phy_start(ndev->phydev);
+ if (priv->phylink)
+ phylink_start(priv->phylink);
else
netif_carrier_on(ndev);
@@ -1460,7 +1445,7 @@ int enetc_open(struct net_device *ndev)
if (err)
return err;
- err = enetc_phy_connect(ndev);
+ err = enetc_phylink_connect(ndev);
if (err)
goto err_phy_connect;
@@ -1490,8 +1475,8 @@ err_set_queues:
err_alloc_rx:
enetc_free_tx_resources(priv);
err_alloc_tx:
- if (ndev->phydev)
- phy_disconnect(ndev->phydev);
+ if (priv->phylink)
+ phylink_disconnect_phy(priv->phylink);
err_phy_connect:
enetc_free_irqs(priv);
@@ -1514,8 +1499,8 @@ void enetc_stop(struct net_device *ndev)
napi_disable(&priv->int_vector[i]->napi);
}
- if (ndev->phydev)
- phy_stop(ndev->phydev);
+ if (priv->phylink)
+ phylink_stop(priv->phylink);
else
netif_carrier_off(ndev);
@@ -1529,8 +1514,8 @@ int enetc_close(struct net_device *ndev)
enetc_stop(ndev);
enetc_clear_bdrs(priv);
- if (ndev->phydev)
- phy_disconnect(ndev->phydev);
+ if (priv->phylink)
+ phylink_disconnect_phy(priv->phylink);
enetc_free_rxtx_rings(priv);
enetc_free_rx_resources(priv);
enetc_free_tx_resources(priv);
@@ -1780,6 +1765,7 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (cmd == SIOCSHWTSTAMP)
return enetc_hwtstamp_set(ndev, rq);
@@ -1787,9 +1773,10 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
return enetc_hwtstamp_get(ndev, rq);
#endif
- if (!ndev->phydev)
+ if (!priv->phylink)
return -EOPNOTSUPP;
- return phy_mii_ioctl(ndev->phydev, rq, cmd);
+
+ return phylink_mii_ioctl(priv->phylink, rq, cmd);
}
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index d309803cfeb6..dd0fb0c066d7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -9,7 +9,7 @@
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/dim.h>
#include "enetc_hw.h"
@@ -264,8 +264,7 @@ struct enetc_ndev_priv {
struct psfp_cap psfp_cap;
- struct device_node *phy_node;
- phy_interface_t if_mode;
+ struct phylink *phylink;
int ic_mode;
u32 tx_ictt;
};
@@ -323,7 +322,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
#ifdef CONFIG_FSL_ENETC_QOS
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
-void enetc_sched_speed_set(struct net_device *ndev);
+void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
@@ -388,7 +387,7 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
#else
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
-#define enetc_sched_speed_set(ndev) (void)0
+#define enetc_sched_speed_set(priv, speed) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 1dab83fbca77..8ed1ebd5a183 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -686,6 +686,28 @@ static int enetc_set_wol(struct net_device *dev,
return ret;
}
+static int enetc_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ if (!priv->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
+static int enetc_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ if (!priv->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -704,8 +726,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = enetc_get_link_ksettings,
+ .set_link_ksettings = enetc_set_link_ksettings,
.get_link = ethtool_op_get_link,
.get_ts_info = enetc_get_ts_info,
.get_wol = enetc_get_wol,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 177334f0adb1..419306342ac5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -482,8 +482,7 @@ static void enetc_port_si_configure(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
}
-static void enetc_configure_port_mac(struct enetc_hw *hw,
- phy_interface_t phy_mode)
+static void enetc_configure_port_mac(struct enetc_hw *hw)
{
enetc_port_wr(hw, ENETC_PM0_MAXFRM,
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
@@ -492,12 +491,14 @@ static void enetc_configure_port_mac(struct enetc_hw *hw,
enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
- ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
- ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+}
+
+static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
+{
/* set auto-speed for RGMII */
if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG ||
phy_interface_mode_is_rgmii(phy_mode))
@@ -507,6 +508,17 @@ static void enetc_configure_port_mac(struct enetc_hw *hw,
enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
}
+static void enetc_mac_enable(struct enetc_hw *hw, bool en)
+{
+ u32 val = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+
+ val &= ~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+ val |= en ? (ENETC_PM0_TX_EN | ENETC_PM0_RX_EN) : 0;
+
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, val);
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, val);
+}
+
static void enetc_configure_port_pmac(struct enetc_hw *hw)
{
u32 temp;
@@ -527,7 +539,7 @@ static void enetc_configure_port(struct enetc_pf *pf)
enetc_configure_port_pmac(hw);
- enetc_configure_port_mac(hw, pf->if_mode);
+ enetc_configure_port_mac(hw);
enetc_port_si_configure(pf->si);
@@ -733,11 +745,10 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
-static int enetc_mdio_probe(struct enetc_pf *pf)
+static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
{
struct device *dev = &pf->si->pdev->dev;
struct enetc_mdio_priv *mdio_priv;
- struct device_node *np;
struct mii_bus *bus;
int err;
@@ -754,20 +765,12 @@ static int enetc_mdio_probe(struct enetc_pf *pf)
mdio_priv->mdio_base = ENETC_EMDIO_BASE;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
- np = of_get_child_by_name(dev->of_node, "mdio");
- if (!np) {
- dev_err(dev, "MDIO node missing\n");
- return -EINVAL;
- }
-
err = of_mdiobus_register(bus, np);
if (err) {
- of_node_put(np);
dev_err(dev, "cannot register MDIO bus\n");
return err;
}
- of_node_put(np);
pf->mdio = bus;
return 0;
@@ -779,69 +782,12 @@ static void enetc_mdio_remove(struct enetc_pf *pf)
mdiobus_unregister(pf->mdio);
}
-static int enetc_of_get_phy(struct enetc_pf *pf)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct device_node *np = dev->of_node;
- struct device_node *mdio_np;
- int err;
-
- pf->phy_node = of_parse_phandle(np, "phy-handle", 0);
- if (!pf->phy_node) {
- if (!of_phy_is_fixed_link(np)) {
- dev_err(dev, "PHY not specified\n");
- return -ENODEV;
- }
-
- err = of_phy_register_fixed_link(np);
- if (err < 0) {
- dev_err(dev, "fixed link registration failed\n");
- return err;
- }
-
- pf->phy_node = of_node_get(np);
- }
-
- mdio_np = of_get_child_by_name(np, "mdio");
- if (mdio_np) {
- of_node_put(mdio_np);
- err = enetc_mdio_probe(pf);
- if (err) {
- of_node_put(pf->phy_node);
- return err;
- }
- }
-
- err = of_get_phy_mode(np, &pf->if_mode);
- if (err) {
- dev_err(dev, "missing phy type\n");
- of_node_put(pf->phy_node);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- else
- enetc_mdio_remove(pf);
-
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void enetc_of_put_phy(struct enetc_pf *pf)
-{
- struct device_node *np = pf->si->pdev->dev.of_node;
-
- if (np && of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- if (pf->phy_node)
- of_node_put(pf->phy_node);
-}
-
-static int enetc_imdio_init(struct enetc_pf *pf, bool is_c45)
+static int enetc_imdio_create(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
struct enetc_mdio_priv *mdio_priv;
- struct phy_device *pcs;
+ struct lynx_pcs *pcs_lynx;
+ struct mdio_device *pcs;
struct mii_bus *bus;
int err;
@@ -865,15 +811,23 @@ static int enetc_imdio_init(struct enetc_pf *pf, bool is_c45)
goto free_mdio_bus;
}
- pcs = get_phy_device(bus, 0, is_c45);
+ pcs = mdio_device_create(bus, 0);
if (IS_ERR(pcs)) {
err = PTR_ERR(pcs);
- dev_err(dev, "cannot get internal PCS PHY (%d)\n", err);
+ dev_err(dev, "cannot create pcs (%d)\n", err);
+ goto unregister_mdiobus;
+ }
+
+ pcs_lynx = lynx_pcs_create(pcs);
+ if (!pcs_lynx) {
+ mdio_device_free(pcs);
+ err = -ENOMEM;
+ dev_err(dev, "cannot create lynx pcs (%d)\n", err);
goto unregister_mdiobus;
}
pf->imdio = bus;
- pf->pcs = pcs;
+ pf->pcs = pcs_lynx;
return 0;
@@ -886,91 +840,168 @@ free_mdio_bus:
static void enetc_imdio_remove(struct enetc_pf *pf)
{
- if (pf->pcs)
- put_device(&pf->pcs->mdio.dev);
+ if (pf->pcs) {
+ mdio_device_free(pf->pcs->mdio);
+ lynx_pcs_destroy(pf->pcs);
+ }
if (pf->imdio) {
mdiobus_unregister(pf->imdio);
mdiobus_free(pf->imdio);
}
}
-static void enetc_configure_sgmii(struct phy_device *pcs)
+static bool enetc_port_has_pcs(struct enetc_pf *pf)
+{
+ return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
+ pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
+}
+
+static int enetc_mdiobus_create(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct device_node *mdio_np;
+ int err;
+
+ mdio_np = of_get_child_by_name(dev->of_node, "mdio");
+ if (mdio_np) {
+ err = enetc_mdio_probe(pf, mdio_np);
+
+ of_node_put(mdio_np);
+ if (err)
+ return err;
+ }
+
+ if (enetc_port_has_pcs(pf)) {
+ err = enetc_imdio_create(pf);
+ if (err) {
+ enetc_mdio_remove(pf);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void enetc_mdiobus_destroy(struct enetc_pf *pf)
{
- /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001
- * for the MAC PCS in order to acknowledge the AN.
- */
- phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII | ADVERTISE_LPACK);
+ enetc_mdio_remove(pf);
+ enetc_imdio_remove(pf);
+}
- phy_write(pcs, ENETC_PCS_IF_MODE,
- ENETC_PCS_IF_MODE_SGMII_EN |
- ENETC_PCS_IF_MODE_USE_SGMII_AN);
+static void enetc_pl_mac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_INTERNAL &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ state->interface != PHY_INTERFACE_MODE_2500BASEX &&
+ state->interface != PHY_INTERFACE_MODE_USXGMII &&
+ !phy_interface_mode_is_rgmii(state->interface)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
- /* Adjust link timer for SGMII */
- phy_write(pcs, ENETC_PCS_LINK_TIMER1, ENETC_PCS_LINK_TIMER1_VAL);
- phy_write(pcs, ENETC_PCS_LINK_TIMER2, ENETC_PCS_LINK_TIMER2_VAL);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 1000baseT_Half);
+ phylink_set(mask, 1000baseT_Full);
+
+ if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX ||
+ state->interface == PHY_INTERFACE_MODE_USXGMII) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
- phy_write(pcs, MII_BMCR, BMCR_ANRESTART | BMCR_ANENABLE);
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void enetc_configure_2500basex(struct phy_device *pcs)
+static void enetc_pl_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
{
- phy_write(pcs, ENETC_PCS_IF_MODE,
- ENETC_PCS_IF_MODE_SGMII_EN |
- ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500));
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_ndev_priv *priv;
+
+ enetc_mac_config(&pf->si->hw, state->interface);
- phy_write(pcs, MII_BMCR, BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_RESET);
+ priv = netdev_priv(pf->si->ndev);
+ if (pf->pcs)
+ phylink_set_pcs(priv->phylink, &pf->pcs->pcs);
}
-static void enetc_configure_usxgmii(struct phy_device *pcs)
+static void enetc_pl_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
{
- /* Configure device ability for the USXGMII Replicator */
- phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE,
- ADVERTISE_SGMII | ADVERTISE_LPACK |
- MDIO_USXGMII_FULL_DUPLEX);
-
- /* Restart PCS AN */
- phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_BMCR,
- BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_ndev_priv *priv;
+
+ priv = netdev_priv(pf->si->ndev);
+ if (priv->active_offloads & ENETC_F_QBV)
+ enetc_sched_speed_set(priv, speed);
+
+ enetc_mac_enable(&pf->si->hw, true);
}
-static int enetc_configure_serdes(struct enetc_ndev_priv *priv)
+static void enetc_pl_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc_mac_enable(&pf->si->hw, false);
+}
+
+static const struct phylink_mac_ops enetc_mac_phylink_ops = {
+ .validate = enetc_pl_mac_validate,
+ .mac_config = enetc_pl_mac_config,
+ .mac_link_up = enetc_pl_mac_link_up,
+ .mac_link_down = enetc_pl_mac_link_down,
+};
+
+static int enetc_phylink_create(struct enetc_ndev_priv *priv)
{
- bool is_c45 = priv->if_mode == PHY_INTERFACE_MODE_USXGMII;
struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct device *dev = &pf->si->pdev->dev;
+ struct phylink *phylink;
int err;
- if (priv->if_mode != PHY_INTERFACE_MODE_SGMII &&
- priv->if_mode != PHY_INTERFACE_MODE_2500BASEX &&
- priv->if_mode != PHY_INTERFACE_MODE_USXGMII)
- return 0;
+ pf->phylink_config.dev = &priv->ndev->dev;
+ pf->phylink_config.type = PHYLINK_NETDEV;
- err = enetc_imdio_init(pf, is_c45);
- if (err)
+ phylink = phylink_create(&pf->phylink_config,
+ of_fwnode_handle(dev->of_node),
+ pf->if_mode, &enetc_mac_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
return err;
-
- switch (priv->if_mode) {
- case PHY_INTERFACE_MODE_SGMII:
- enetc_configure_sgmii(pf->pcs);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- enetc_configure_2500basex(pf->pcs);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- enetc_configure_usxgmii(pf->pcs);
- break;
- default:
- dev_err(&pf->si->pdev->dev, "Unsupported link mode %s\n",
- phy_modes(priv->if_mode));
}
+ priv->phylink = phylink;
+
return 0;
}
-static void enetc_teardown_serdes(struct enetc_ndev_priv *priv)
+static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
{
- struct enetc_pf *pf = enetc_si_priv(priv->si);
-
- enetc_imdio_remove(pf);
+ if (priv->phylink)
+ phylink_destroy(priv->phylink);
}
static int enetc_pf_probe(struct pci_dev *pdev,
@@ -1004,10 +1035,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
pf->si = si;
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
- err = enetc_of_get_phy(pf);
- if (err)
- dev_warn(&pdev->dev, "Fallback to PHY-less operation\n");
-
enetc_configure_port(pf);
enetc_get_si_caps(si);
@@ -1022,8 +1049,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops);
priv = netdev_priv(ndev);
- priv->phy_node = pf->phy_node;
- priv->if_mode = pf->if_mode;
enetc_init_si_rings_params(priv);
@@ -1039,20 +1064,27 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_msix;
}
- err = enetc_configure_serdes(priv);
- if (err)
- dev_warn(&pdev->dev, "Attempted SerDes config but failed\n");
+ if (!of_get_phy_mode(pdev->dev.of_node, &pf->if_mode)) {
+ err = enetc_mdiobus_create(pf);
+ if (err)
+ goto err_mdiobus_create;
+
+ err = enetc_phylink_create(priv);
+ if (err)
+ goto err_phylink_create;
+ }
err = register_netdev(ndev);
if (err)
goto err_reg_netdev;
- netif_carrier_off(ndev);
-
return 0;
err_reg_netdev:
- enetc_teardown_serdes(priv);
+ enetc_phylink_destroy(priv);
+err_phylink_create:
+ enetc_mdiobus_destroy(pf);
+err_mdiobus_create:
enetc_free_msix(priv);
err_alloc_msix:
enetc_free_si_resources(priv);
@@ -1060,8 +1092,6 @@ err_alloc_si_res:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
- enetc_mdio_remove(pf);
- enetc_of_put_phy(pf);
err_map_pf_space:
enetc_pci_remove(pdev);
@@ -1074,16 +1104,15 @@ static void enetc_pf_remove(struct pci_dev *pdev)
struct enetc_pf *pf = enetc_si_priv(si);
struct enetc_ndev_priv *priv;
+ priv = netdev_priv(si->ndev);
+ enetc_phylink_destroy(priv);
+ enetc_mdiobus_destroy(pf);
+
if (pf->num_vfs)
enetc_sriov_configure(pdev, 0);
- priv = netdev_priv(si->ndev);
unregister_netdev(si->ndev);
- enetc_teardown_serdes(priv);
- enetc_mdio_remove(pf);
- enetc_of_put_phy(pf);
-
enetc_free_msix(priv);
enetc_free_si_resources(priv);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index 0d0ee91282a5..263946c51e37 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -2,6 +2,7 @@
/* Copyright 2017-2019 NXP */
#include "enetc.h"
+#include <linux/pcs-lynx.h>
#define ENETC_PF_NUM_RINGS 8
@@ -45,12 +46,15 @@ struct enetc_pf {
struct mii_bus *mdio; /* saved for cleanup */
struct mii_bus *imdio;
- struct phy_device *pcs;
+ struct lynx_pcs *pcs;
- struct device_node *phy_node;
phy_interface_t if_mode;
+ struct phylink_config phylink_config;
};
+#define phylink_to_enetc_pf(config) \
+ container_of((config), struct enetc_pf, phylink_config)
+
int enetc_msg_psi_init(struct enetc_pf *pf);
void enetc_msg_psi_free(struct enetc_pf *pf);
void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 1c4a535890da..827f74e86d34 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -15,17 +15,14 @@ static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
& ENETC_QBV_MAX_GCL_LEN_MASK;
}
-void enetc_sched_speed_set(struct net_device *ndev)
+void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
u32 old_speed = priv->speed;
- u32 speed, pspeed;
+ u32 pspeed;
- if (phydev->speed == old_speed)
+ if (speed == old_speed)
return;
- speed = phydev->speed;
switch (speed) {
case SPEED_1000:
pspeed = ENETC_PMR_PSPEED_1000M;
@@ -405,7 +402,7 @@ struct enetc_psfp_gate {
u32 num_entries;
refcount_t refcount;
struct hlist_node node;
- struct action_gate_entry entries[0];
+ struct action_gate_entry entries[];
};
/* Only enable the green color frame now
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index f14576212a0e..7b5c82c7e4e5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -78,16 +78,11 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct sockaddr *saddr = addr;
- int err;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
- if (err)
- return err;
-
- return 0;
+ return enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
}
static int enetc_vf_set_features(struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 832a2175636d..c527f4ee1d3a 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -456,6 +456,12 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_FRREG (1 << 16)
+/* Some FEC hardware blocks need the MMFR cleared at setup time to avoid
+ * the generation of an MII event. This must be avoided in the older
+ * FEC blocks where it will stop MII events being generated.
+ */
+#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index fb37816a74db..d7919555250d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -100,14 +100,14 @@ static const struct fec_devinfo fec_imx27_info = {
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_imx6q_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_mvf600_info = {
@@ -119,7 +119,8 @@ static const struct fec_devinfo fec_imx6x_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+ FEC_QUIRK_CLEAR_SETUP_MII,
};
static const struct fec_devinfo fec_imx6ul_info = {
@@ -127,7 +128,7 @@ static const struct fec_devinfo fec_imx6ul_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE,
+ FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
};
static struct platform_device_id fec_devtype[] = {
@@ -1912,6 +1913,27 @@ out:
return ret;
}
+static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = ndev->phydev;
+
+ if (phy_dev) {
+ phy_reset_after_clk_enable(phy_dev);
+ } else if (fep->phy_node) {
+ /*
+ * If the PHY still is not bound to the MAC, but there is
+ * OF PHY node and a matching PHY device instance already,
+ * use the OF PHY node to obtain the PHY device instance,
+ * and then use that PHY device instance when triggering
+ * the PHY reset.
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+ put_device(&phy_dev->mdio.dev);
+ }
+}
+
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1938,7 +1960,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
if (ret)
goto failed_clk_ref;
- phy_reset_after_clk_enable(ndev->phydev);
+ fec_enet_phy_reset_after_clk_enable(ndev);
} else {
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
@@ -1960,8 +1982,7 @@ failed_clk_ref:
mutex_unlock(&fep->ptp_clk_mutex);
}
failed_clk_ptp:
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
+ clk_disable_unprepare(fep->clk_enet_out);
return ret;
}
@@ -2114,15 +2135,17 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (suppress_preamble)
fep->phy_speed |= BIT(7);
- /* Clear MMFR to avoid to generate MII event by writing MSCR.
- * MII event generation condition:
- * - writing MSCR:
- * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
- * mscr_reg_data_in[7:0] != 0
- * - writing MMFR:
- * - mscr[7:0]_not_zero
- */
- writel(0, fep->hwp + FEC_MII_DATA);
+ if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
+ /* Clear MMFR to avoid to generate MII event by writing MSCR.
+ * MII event generation condition:
+ * - writing MSCR:
+ * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
+ * mscr_reg_data_in[7:0] != 0
+ * - writing MMFR:
+ * - mscr[7:0]_not_zero
+ */
+ writel(0, fep->hwp + FEC_MII_DATA);
+ }
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
@@ -2984,16 +3007,16 @@ fec_enet_open(struct net_device *ndev)
/* Init MAC prior to mii bus probe */
fec_restart(ndev);
- /* Probe and connect to PHY when open the interface */
- ret = fec_enet_mii_probe(ndev);
- if (ret)
- goto err_enet_mii_probe;
-
/* Call phy_reset_after_clk_enable() again if it failed during
* phy_reset_after_clk_enable() before because the PHY wasn't probed.
*/
if (reset_again)
- phy_reset_after_clk_enable(ndev->phydev);
+ fec_enet_phy_reset_after_clk_enable(ndev);
+
+ /* Probe and connect to PHY when open the interface */
+ ret = fec_enet_mii_probe(ndev);
+ if (ret)
+ goto err_enet_mii_probe;
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 7a3f066e611d..b3bad429e03b 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -74,7 +74,7 @@ struct mpc52xx_fec_priv {
static irqreturn_t mpc52xx_fec_interrupt(int, void *);
static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *);
static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *);
-static void mpc52xx_fec_stop(struct net_device *dev);
+static void mpc52xx_fec_stop(struct net_device *dev, bool may_sleep);
static void mpc52xx_fec_start(struct net_device *dev);
static void mpc52xx_fec_reset(struct net_device *dev);
@@ -283,7 +283,7 @@ static int mpc52xx_fec_close(struct net_device *dev)
netif_stop_queue(dev);
- mpc52xx_fec_stop(dev);
+ mpc52xx_fec_stop(dev, true);
mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
@@ -693,7 +693,7 @@ static void mpc52xx_fec_start(struct net_device *dev)
*
* stop all activity on fec and empty dma buffers
*/
-static void mpc52xx_fec_stop(struct net_device *dev)
+static void mpc52xx_fec_stop(struct net_device *dev, bool may_sleep)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
@@ -706,7 +706,7 @@ static void mpc52xx_fec_stop(struct net_device *dev)
bcom_disable(priv->rx_dmatsk);
/* Wait for tx queue to drain, but only if we're in process context */
- if (!in_interrupt()) {
+ if (may_sleep) {
timeout = jiffies + msecs_to_jiffies(2000);
while (time_before(jiffies, timeout) &&
!bcom_queue_empty(priv->tx_dmatsk))
@@ -738,7 +738,7 @@ static void mpc52xx_fec_reset(struct net_device *dev)
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
- mpc52xx_fec_stop(dev);
+ mpc52xx_fec_stop(dev, false);
out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status));
out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a0c1f4410306..2e344aada4c6 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -512,7 +512,7 @@ int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
-EFAULT : 0;
}
-/**
+/*
* fec_time_keep - call timecounter_read every second to avoid timer overrun
* because ENET just support 32bit counter, will timeout in 4s
*/
@@ -520,13 +520,12 @@ static void fec_time_keep(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
- u64 ns;
unsigned long flags;
mutex_lock(&fep->ptp_clk_mutex);
if (fep->ptp_clk_on) {
spin_lock_irqsave(&fep->tmreg_lock, flags);
- ns = timecounter_read(&fep->tc);
+ timecounter_read(&fep->tc);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
mutex_unlock(&fep->ptp_clk_mutex);
@@ -567,7 +566,8 @@ static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
/**
* fec_ptp_init
- * @ndev: The FEC network adapter
+ * @pdev: The FEC network adapter
+ * @irq_idx: the interrupt index
*
* This function performs the required steps for enabling ptp
* support. If ptp support has already been loaded it simply calls the
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index ef67e8599b39..ce0a121580f6 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2063,11 +2063,11 @@ static int fman_set_exception(struct fman *fman,
/**
* fman_register_intr
* @fman: A Pointer to FMan device
- * @mod: Calling module
+ * @module: Calling module
* @mod_id: Module id (if more than 1 exists, '0' if not)
* @intr_type: Interrupt type (error/normal) selection.
- * @f_isr: The interrupt service routine.
- * @h_src_arg: Argument to be passed to f_isr.
+ * @isr_cb: The interrupt service routine.
+ * @src_arg: Argument to be passed to isr_cb.
*
* Used to register an event handler to be processed by FMan
*
@@ -2091,7 +2091,7 @@ EXPORT_SYMBOL(fman_register_intr);
/**
* fman_unregister_intr
* @fman: A Pointer to FMan device
- * @mod: Calling module
+ * @module: Calling module
* @mod_id: Module id (if more than 1 exists, '0' if not)
* @intr_type: Interrupt type (error/normal) selection.
*
@@ -2342,8 +2342,8 @@ EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
/**
* fman_get_revision
- * @fman - Pointer to the FMan module
- * @rev_info - A structure of revision information parameters.
+ * @fman: - Pointer to the FMan module
+ * @rev_info: - A structure of revision information parameters.
*
* Returns the FM revision
*
@@ -2508,7 +2508,7 @@ EXPORT_SYMBOL(fman_get_rx_extra_headroom);
/**
* fman_bind
- * @dev: FMan OF device pointer
+ * @fm_dev: FMan OF device pointer
*
* Bind to a specific FMan device.
*
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 5ec94d243da0..7ad317e622bc 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -144,9 +144,9 @@ unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
/**
* fman_muram_free_mem
- * muram: FM-MURAM module pointer.
- * offset: offset of the memory region to be freed.
- * size: size of the memory to be freed.
+ * @muram: FM-MURAM module pointer.
+ * @offset: offset of the memory region to be freed.
+ * @size: size of the memory to be freed.
*
* Free an allocated memory from FM-MURAM partition.
*/
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 624b2eb6f01d..d9baac0dbc7d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1410,9 +1410,11 @@ err_port_cfg:
}
EXPORT_SYMBOL(fman_port_config);
-/**
+/*
* fman_port_use_kg_hash
- * port: A pointer to a FM Port module.
+ * @port: A pointer to a FM Port module.
+ * @enable: enable or disable
+ *
* Sets the HW KeyGen or the BMI as HW Parser next engine, enabling
* or bypassing the KeyGen hashing of Rx traffic
*/
@@ -1430,7 +1432,8 @@ EXPORT_SYMBOL(fman_port_use_kg_hash);
/**
* fman_port_init
- * port: A pointer to a FM Port module.
+ * @port: A pointer to a FM Port module.
+ *
* Initializes the FM PORT module by defining the software structure and
* configuring the hardware registers.
*
@@ -1524,8 +1527,8 @@ EXPORT_SYMBOL(fman_port_init);
/**
* fman_port_cfg_buf_prefix_content
- * @port A pointer to a FM Port module.
- * @buffer_prefix_content A structure of parameters describing
+ * @port: A pointer to a FM Port module.
+ * @buffer_prefix_content: A structure of parameters describing
* the structure of the buffer.
* Out parameter:
* Start margin - offset of data from
@@ -1570,7 +1573,7 @@ EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
/**
* fman_port_disable
- * port: A pointer to a FM Port module.
+ * @port: A pointer to a FM Port module.
*
* Gracefully disable an FM port. The port will not start new tasks after all
* tasks associated with the port are terminated.
@@ -1651,7 +1654,7 @@ EXPORT_SYMBOL(fman_port_disable);
/**
* fman_port_enable
- * port: A pointer to a FM Port module.
+ * @port: A pointer to a FM Port module.
*
* A runtime routine provided to allow disable/enable of port.
*
@@ -1697,7 +1700,7 @@ EXPORT_SYMBOL(fman_port_enable);
/**
* fman_port_bind
- * dev: FMan Port OF device pointer
+ * @dev: FMan Port OF device pointer
*
* Bind to a specific FMan Port.
*
@@ -1713,7 +1716,7 @@ EXPORT_SYMBOL(fman_port_bind);
/**
* fman_port_get_qman_channel_id
- * port: Pointer to the FMan port devuce
+ * @port: Pointer to the FMan port devuce
*
* Get the QMan channel ID for the specific port
*
@@ -1727,7 +1730,7 @@ EXPORT_SYMBOL(fman_port_get_qman_channel_id);
/**
* fman_port_get_device
- * port: Pointer to the FMan port device
+ * @port: Pointer to the FMan port device
*
* Get the 'struct device' associated to the specified FMan port device
*
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 43427c5b9396..901749a7a318 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -359,8 +359,8 @@ EXPORT_SYMBOL(fman_set_mac_active_pause);
/**
* fman_get_pause_cfg
* @mac_dev: A pointer to the MAC device
- * @rx: Return value for RX setting
- * @tx: Return value for TX setting
+ * @rx_pause: Return value for RX setting
+ * @tx_pause: Return value for TX setting
*
* Determine the MAC RX/TX PAUSE frames settings based on PHY
* autonegotiation or values set by eththool.
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index bf846b42bc74..78e008b81374 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -562,10 +562,13 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
BD_ENET_TX_TC);
CBDS_SC(bdp, BD_ENET_TX_READY);
- if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
- bdp++, curidx++;
- else
- bdp = fep->tx_bd_base, curidx = 0;
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) {
+ bdp++;
+ curidx++;
+ } else {
+ bdp = fep->tx_bd_base;
+ curidx = 0;
+ }
len = skb_frag_size(frag);
CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 41dd3d0f3452..d391a45cebb6 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1829,20 +1829,12 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* make space for additional header when fcb is needed */
- if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
- struct sk_buff *skb_new;
-
- skb_new = skb_realloc_headroom(skb, fcb_len);
- if (!skb_new) {
+ if (fcb_len) {
+ if (unlikely(skb_cow_head(skb, fcb_len))) {
dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
-
- if (skb->sk)
- skb_set_owner_w(skb_new, skb->sk);
- dev_consume_skb_any(skb);
- skb = skb_new;
}
/* total number of fragments in the SKB */
@@ -3380,7 +3372,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->needed_headroom = GMAC_FCB_LEN;
+ dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* Initializing some of the rx/tx queue level parameters */
for (i = 0; i < priv->num_tx_queues; i++) {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 714b501be7d0..ba8869c3d891 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1358,7 +1358,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UCC_GETH_UPSMR_TBIM;
}
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
+ if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
upsmr |= UCC_GETH_UPSMR_SGMM;
out_be32(&uf_regs->upsmr, upsmr);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index ebc37e256922..f5c80229ea96 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -27,6 +27,17 @@
/* 1 for management, 1 for rx, 1 for tx */
#define GVE_MIN_MSIX 3
+/* Numbers of gve tx/rx stats in stats report. */
+#define GVE_TX_STATS_REPORT_NUM 5
+#define GVE_RX_STATS_REPORT_NUM 2
+
+/* Interval to schedule a stats report update, 20000ms. */
+#define GVE_STATS_REPORT_TIMER_PERIOD 20000
+
+/* Numbers of NIC tx/rx stats in stats report. */
+#define NIC_TX_STATS_REPORT_NUM 0
+#define NIC_RX_STATS_REPORT_NUM 4
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -71,6 +82,11 @@ struct gve_rx_ring {
u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
+ u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
+ u64 rx_copied_pkt; /* free-running total number of copied packets */
+ u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
+ u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
+ u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
@@ -202,24 +218,63 @@ struct gve_priv {
dma_addr_t adminq_bus_addr;
u32 adminq_mask; /* masks prod_cnt to adminq size */
u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
-
+ u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
+ u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
+ /* free-running count of per AQ cmd executed */
+ u32 adminq_describe_device_cnt;
+ u32 adminq_cfg_device_resources_cnt;
+ u32 adminq_register_page_list_cnt;
+ u32 adminq_unregister_page_list_cnt;
+ u32 adminq_create_tx_queue_cnt;
+ u32 adminq_create_rx_queue_cnt;
+ u32 adminq_destroy_tx_queue_cnt;
+ u32 adminq_destroy_rx_queue_cnt;
+ u32 adminq_dcfg_device_resources_cnt;
+ u32 adminq_set_driver_parameter_cnt;
+ u32 adminq_report_stats_cnt;
+ u32 adminq_report_link_speed_cnt;
+
+ /* Global stats */
+ u32 interface_up_cnt; /* count of times interface turned up since last reset */
+ u32 interface_down_cnt; /* count of times interface turned down since last reset */
+ u32 reset_cnt; /* count of reset */
+ u32 page_alloc_fail; /* count of page alloc fails */
+ u32 dma_mapping_error; /* count of dma mapping errors */
+ u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
struct workqueue_struct *gve_wq;
struct work_struct service_task;
+ struct work_struct stats_report_task;
unsigned long service_task_flags;
unsigned long state_flags;
+
+ struct gve_stats_report *stats_report;
+ u64 stats_report_len;
+ dma_addr_t stats_report_bus; /* dma address for the stats report */
+ unsigned long ethtool_flags;
+
+ unsigned long stats_report_timer_period;
+ struct timer_list stats_report_timer;
+
+ /* Gvnic device link speed from hypervisor. */
+ u64 link_speed;
};
-enum gve_service_task_flags {
- GVE_PRIV_FLAGS_DO_RESET = BIT(1),
- GVE_PRIV_FLAGS_RESET_IN_PROGRESS = BIT(2),
- GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = BIT(3),
+enum gve_service_task_flags_bit {
+ GVE_PRIV_FLAGS_DO_RESET = 1,
+ GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
+ GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
+ GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
};
-enum gve_state_flags {
- GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = BIT(1),
- GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = BIT(2),
- GVE_PRIV_FLAGS_DEVICE_RINGS_OK = BIT(3),
- GVE_PRIV_FLAGS_NAPI_ENABLED = BIT(4),
+enum gve_state_flags_bit {
+ GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
+ GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
+ GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
+ GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
+};
+
+enum gve_ethtool_flags_bit {
+ GVE_PRIV_FLAGS_REPORT_STATS = 0,
};
static inline bool gve_get_do_reset(struct gve_priv *priv)
@@ -269,6 +324,22 @@ static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
}
+static inline bool gve_get_do_report_stats(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
+ &priv->service_task_flags);
+}
+
+static inline void gve_set_do_report_stats(struct gve_priv *priv)
+{
+ set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
+}
+
+static inline void gve_clear_do_report_stats(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
+}
+
static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
{
return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
@@ -329,6 +400,16 @@ static inline void gve_clear_napi_enabled(struct gve_priv *priv)
clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
}
+static inline bool gve_get_report_stats(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
+}
+
+static inline void gve_clear_report_stats(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
+}
+
/* Returns the address of the ntfy_blocks irq doorbell
*/
static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
@@ -426,7 +507,8 @@ static inline bool gve_can_recycle_pages(struct net_device *dev)
}
/* buffers */
-int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
+int gve_alloc_page(struct gve_priv *priv, struct device *dev,
+ struct page **page, dma_addr_t *dma,
enum dma_data_direction);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
@@ -450,6 +532,8 @@ int gve_reset(struct gve_priv *priv, bool attempt_teardown);
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config);
+/* report stats handling */
+void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
extern const struct ethtool_ops gve_ethtool_ops;
/* needed by ethtool */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index c3ba7baf0107..24ae6a28a806 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -23,6 +23,20 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
priv->adminq_prod_cnt = 0;
+ priv->adminq_cmd_fail = 0;
+ priv->adminq_timeouts = 0;
+ priv->adminq_describe_device_cnt = 0;
+ priv->adminq_cfg_device_resources_cnt = 0;
+ priv->adminq_register_page_list_cnt = 0;
+ priv->adminq_unregister_page_list_cnt = 0;
+ priv->adminq_create_tx_queue_cnt = 0;
+ priv->adminq_create_rx_queue_cnt = 0;
+ priv->adminq_destroy_tx_queue_cnt = 0;
+ priv->adminq_destroy_rx_queue_cnt = 0;
+ priv->adminq_dcfg_device_resources_cnt = 0;
+ priv->adminq_set_driver_parameter_cnt = 0;
+ priv->adminq_report_stats_cnt = 0;
+ priv->adminq_report_link_speed_cnt = 0;
/* Setup Admin queue with the device */
iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
@@ -81,17 +95,18 @@ static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
return false;
}
-static int gve_adminq_parse_err(struct device *dev, u32 status)
+static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
{
if (status != GVE_ADMINQ_COMMAND_PASSED &&
- status != GVE_ADMINQ_COMMAND_UNSET)
- dev_err(dev, "AQ command failed with status %d\n", status);
-
+ status != GVE_ADMINQ_COMMAND_UNSET) {
+ dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
+ priv->adminq_cmd_fail++;
+ }
switch (status) {
case GVE_ADMINQ_COMMAND_PASSED:
return 0;
case GVE_ADMINQ_COMMAND_UNSET:
- dev_err(dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
+ dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
return -EINVAL;
case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
@@ -116,36 +131,145 @@ static int gve_adminq_parse_err(struct device *dev, u32 status)
case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
return -ENOTSUPP;
default:
- dev_err(dev, "parse_aq_err: unknown status code %d\n", status);
+ dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
return -EINVAL;
}
}
+/* Flushes all AQ commands currently queued and waits for them to complete.
+ * If there are failures, it will return the first error.
+ */
+static int gve_adminq_kick_and_wait(struct gve_priv *priv)
+{
+ u32 tail, head;
+ int i;
+
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+ head = priv->adminq_prod_cnt;
+
+ gve_adminq_kick_cmd(priv, head);
+ if (!gve_adminq_wait_for_cmd(priv, head)) {
+ dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
+ priv->adminq_timeouts++;
+ return -ENOTRECOVERABLE;
+ }
+
+ for (i = tail; i < head; i++) {
+ union gve_adminq_command *cmd;
+ u32 status, err;
+
+ cmd = &priv->adminq[i & priv->adminq_mask];
+ status = be32_to_cpu(READ_ONCE(cmd->status));
+ err = gve_adminq_parse_err(priv, status);
+ if (err)
+ // Return the first error if we failed.
+ return err;
+ }
+
+ return 0;
+}
+
/* This function is not threadsafe - the caller is responsible for any
* necessary locks.
*/
-int gve_adminq_execute_cmd(struct gve_priv *priv,
- union gve_adminq_command *cmd_orig)
+static int gve_adminq_issue_cmd(struct gve_priv *priv,
+ union gve_adminq_command *cmd_orig)
{
union gve_adminq_command *cmd;
- u32 status = 0;
- u32 prod_cnt;
+ u32 opcode;
+ u32 tail;
+
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+
+ // Check if next command will overflow the buffer.
+ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+ int err;
+
+ // Flush existing commands to make room.
+ err = gve_adminq_kick_and_wait(priv);
+ if (err)
+ return err;
+
+ // Retry.
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+ // This should never happen. We just flushed the
+ // command queue so there should be enough space.
+ return -ENOMEM;
+ }
+ }
cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
priv->adminq_prod_cnt++;
- prod_cnt = priv->adminq_prod_cnt;
memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
-
- gve_adminq_kick_cmd(priv, prod_cnt);
- if (!gve_adminq_wait_for_cmd(priv, prod_cnt)) {
- dev_err(&priv->pdev->dev, "AQ command timed out, need to reset AQ\n");
- return -ENOTRECOVERABLE;
+ opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
+
+ switch (opcode) {
+ case GVE_ADMINQ_DESCRIBE_DEVICE:
+ priv->adminq_describe_device_cnt++;
+ break;
+ case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
+ priv->adminq_cfg_device_resources_cnt++;
+ break;
+ case GVE_ADMINQ_REGISTER_PAGE_LIST:
+ priv->adminq_register_page_list_cnt++;
+ break;
+ case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
+ priv->adminq_unregister_page_list_cnt++;
+ break;
+ case GVE_ADMINQ_CREATE_TX_QUEUE:
+ priv->adminq_create_tx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_CREATE_RX_QUEUE:
+ priv->adminq_create_rx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_DESTROY_TX_QUEUE:
+ priv->adminq_destroy_tx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_DESTROY_RX_QUEUE:
+ priv->adminq_destroy_rx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
+ priv->adminq_dcfg_device_resources_cnt++;
+ break;
+ case GVE_ADMINQ_SET_DRIVER_PARAMETER:
+ priv->adminq_set_driver_parameter_cnt++;
+ break;
+ case GVE_ADMINQ_REPORT_STATS:
+ priv->adminq_report_stats_cnt++;
+ break;
+ case GVE_ADMINQ_REPORT_LINK_SPEED:
+ priv->adminq_report_link_speed_cnt++;
+ break;
+ default:
+ dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
- memcpy(cmd_orig, cmd, sizeof(*cmd));
- status = be32_to_cpu(READ_ONCE(cmd->status));
- return gve_adminq_parse_err(&priv->pdev->dev, status);
+ return 0;
+}
+
+/* This function is not threadsafe - the caller is responsible for any
+ * necessary locks.
+ * The caller is also responsible for making sure there are no commands
+ * waiting to be executed.
+ */
+static int gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig)
+{
+ u32 tail, head;
+ int err;
+
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+ head = priv->adminq_prod_cnt;
+ if (tail != head)
+ // This is not a valid path
+ return -EINVAL;
+
+ err = gve_adminq_issue_cmd(priv, cmd_orig);
+ if (err)
+ return err;
+
+ return gve_adminq_kick_and_wait(priv);
}
/* The device specifies that the management vector can either be the first irq
@@ -190,29 +314,50 @@ int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
return gve_adminq_execute_cmd(priv, &cmd);
}
-int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
+static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
{
struct gve_tx_ring *tx = &priv->tx[queue_index];
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
.queue_id = cpu_to_be32(queue_index),
.reserved = 0,
- .queue_resources_addr = cpu_to_be64(tx->q_resources_bus),
+ .queue_resources_addr =
+ cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus),
.queue_page_list_id = cpu_to_be32(tx->tx_fifo.qpl->id),
.ntfy_id = cpu_to_be32(tx->ntfy_id),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_create_tx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
}
-int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
{
struct gve_rx_ring *rx = &priv->rx[queue_index];
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
@@ -227,12 +372,31 @@ int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
.queue_page_list_id = cpu_to_be32(rx->data.qpl->id),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
}
-int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
+int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_create_rx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
+}
+
+static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
@@ -240,12 +404,31 @@ int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
.queue_id = cpu_to_be32(queue_index),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_destroy_tx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
}
-int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
+static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
@@ -253,7 +436,25 @@ int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
.queue_id = cpu_to_be32(queue_index),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_destroy_rx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
}
int gve_adminq_describe_device(struct gve_priv *priv)
@@ -283,8 +484,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
- netif_err(priv, drv, priv->dev, "Tx desc count %d too low\n",
- priv->tx_desc_cnt);
+ dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -293,8 +493,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
< PAGE_SIZE ||
priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
< PAGE_SIZE) {
- netif_err(priv, drv, priv->dev, "Rx desc count %d too low\n",
- priv->rx_desc_cnt);
+ dev_err(&priv->pdev->dev, "Rx desc count %d too low\n", priv->rx_desc_cnt);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -302,8 +501,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
be64_to_cpu(descriptor->max_registered_pages);
mtu = be16_to_cpu(descriptor->mtu);
if (mtu < ETH_MIN_MTU) {
- netif_err(priv, drv, priv->dev, "MTU %d below minimum MTU\n",
- mtu);
+ dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -311,12 +509,12 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->num_event_counters = be16_to_cpu(descriptor->counters);
ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
mac = descriptor->mac;
- netif_info(priv, drv, priv->dev, "MAC addr: %pM\n", mac);
+ dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
priv->rx_pages_per_qpl = be16_to_cpu(descriptor->rx_pages_per_qpl);
if (priv->rx_pages_per_qpl < priv->rx_desc_cnt) {
- netif_err(priv, drv, priv->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
- priv->rx_pages_per_qpl);
+ dev_err(&priv->pdev->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
+ priv->rx_pages_per_qpl);
priv->rx_desc_cnt = priv->rx_pages_per_qpl;
}
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
@@ -385,3 +583,46 @@ int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
return gve_adminq_execute_cmd(priv, &cmd);
}
+
+int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
+ dma_addr_t stats_report_addr, u64 interval)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);
+ cmd.report_stats = (struct gve_adminq_report_stats) {
+ .stats_report_len = cpu_to_be64(stats_report_len),
+ .stats_report_addr = cpu_to_be64(stats_report_addr),
+ .interval = cpu_to_be64(interval),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
+int gve_adminq_report_link_speed(struct gve_priv *priv)
+{
+ union gve_adminq_command gvnic_cmd;
+ dma_addr_t link_speed_region_bus;
+ __be64 *link_speed_region;
+ int err;
+
+ link_speed_region =
+ dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region),
+ &link_speed_region_bus, GFP_KERNEL);
+
+ if (!link_speed_region)
+ return -ENOMEM;
+
+ memset(&gvnic_cmd, 0, sizeof(gvnic_cmd));
+ gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);
+ gvnic_cmd.report_link_speed.link_speed_address =
+ cpu_to_be64(link_speed_region_bus);
+
+ err = gve_adminq_execute_cmd(priv, &gvnic_cmd);
+
+ priv->link_speed = be64_to_cpu(*link_speed_region);
+ dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region,
+ link_speed_region_bus);
+ return err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 4dfa06edc0f8..015796a20118 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -21,6 +21,8 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8,
GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB,
+ GVE_ADMINQ_REPORT_STATS = 0xC,
+ GVE_ADMINQ_REPORT_LINK_SPEED = 0xD
};
/* Admin queue status codes */
@@ -172,6 +174,51 @@ struct gve_adminq_set_driver_parameter {
static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16);
+struct gve_adminq_report_stats {
+ __be64 stats_report_len;
+ __be64 stats_report_addr;
+ __be64 interval;
+};
+
+static_assert(sizeof(struct gve_adminq_report_stats) == 24);
+
+struct gve_adminq_report_link_speed {
+ __be64 link_speed_address;
+};
+
+static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
+
+struct stats {
+ __be32 stat_name;
+ __be32 queue_id;
+ __be64 value;
+};
+
+static_assert(sizeof(struct stats) == 16);
+
+struct gve_stats_report {
+ __be64 written_count;
+ struct stats stats[];
+};
+
+static_assert(sizeof(struct gve_stats_report) == 8);
+
+enum gve_stat_names {
+ // stats from gve
+ TX_WAKE_CNT = 1,
+ TX_STOP_CNT = 2,
+ TX_FRAMES_SENT = 3,
+ TX_BYTES_SENT = 4,
+ TX_LAST_COMPLETION_PROCESSED = 5,
+ RX_NEXT_EXPECTED_SEQUENCE = 6,
+ RX_BUFFERS_POSTED = 7,
+ // stats from NIC
+ RX_QUEUE_DROP_CNT = 65,
+ RX_NO_BUFFERS_POSTED = 66,
+ RX_DROPS_PACKET_OVER_MRU = 67,
+ RX_DROPS_INVALID_CHECKSUM = 68,
+};
+
union gve_adminq_command {
struct {
__be32 opcode;
@@ -187,6 +234,8 @@ union gve_adminq_command {
struct gve_adminq_register_page_list reg_page_list;
struct gve_adminq_unregister_page_list unreg_page_list;
struct gve_adminq_set_driver_parameter set_driver_param;
+ struct gve_adminq_report_stats report_stats;
+ struct gve_adminq_report_link_speed report_link_speed;
};
};
u8 reserved[64];
@@ -197,8 +246,6 @@ static_assert(sizeof(union gve_adminq_command) == 64);
int gve_adminq_alloc(struct device *dev, struct gve_priv *priv);
void gve_adminq_free(struct device *dev, struct gve_priv *priv);
void gve_adminq_release(struct gve_priv *priv);
-int gve_adminq_execute_cmd(struct gve_priv *priv,
- union gve_adminq_command *cmd_orig);
int gve_adminq_describe_device(struct gve_priv *priv);
int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t counter_array_bus_addr,
@@ -206,12 +253,15 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t db_array_bus_addr,
u32 num_ntfy_blks);
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
-int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
+int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
+ dma_addr_t stats_report_addr, u64 interval);
+int gve_adminq_report_link_speed(struct gve_priv *priv);
#endif /* _GVE_ADMINQ_H */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index d8fa816f4473..7b44769bd87c 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -6,6 +6,7 @@
#include <linux/rtnetlink.h>
#include "gve.h"
+#include "gve_adminq.h"
static void gve_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -34,41 +35,84 @@ static u32 gve_get_msglevel(struct net_device *netdev)
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
"rx_dropped", "tx_dropped", "tx_timeouts",
+ "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
+ "interface_up_cnt", "interface_down_cnt", "reset_cnt",
+ "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
+};
+
+static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
+ "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
+ "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
+ "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
+ "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
+};
+
+static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
+ "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]",
+ "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
+};
+
+static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
+ "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
+ "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
+ "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
+ "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
+ "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
+ "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
+ "adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
+};
+
+static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
+ "report-stats",
};
#define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats)
-#define NUM_GVE_TX_CNTS 5
-#define NUM_GVE_RX_CNTS 2
+#define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats)
+#define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats)
+#define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats)
+#define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct gve_priv *priv = netdev_priv(netdev);
char *s = (char *)data;
- int i;
+ int i, j;
- if (stringset != ETH_SS_STATS)
- return;
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(s, *gve_gstrings_main_stats,
+ sizeof(gve_gstrings_main_stats));
+ s += sizeof(gve_gstrings_main_stats);
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
+ snprintf(s, ETH_GSTRING_LEN,
+ gve_gstrings_rx_stats[j], i);
+ s += ETH_GSTRING_LEN;
+ }
+ }
- memcpy(s, *gve_gstrings_main_stats,
- sizeof(gve_gstrings_main_stats));
- s += sizeof(gve_gstrings_main_stats);
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- snprintf(s, ETH_GSTRING_LEN, "rx_desc_cnt[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "rx_desc_fill_cnt[%u]", i);
- s += ETH_GSTRING_LEN;
- }
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- snprintf(s, ETH_GSTRING_LEN, "tx_req[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_done[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_wake[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_stop[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_event_counter[%u]", i);
- s += ETH_GSTRING_LEN;
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
+ snprintf(s, ETH_GSTRING_LEN,
+ gve_gstrings_tx_stats[j], i);
+ s += ETH_GSTRING_LEN;
+ }
+ }
+
+ memcpy(s, *gve_gstrings_adminq_stats,
+ sizeof(gve_gstrings_adminq_stats));
+ s += sizeof(gve_gstrings_adminq_stats);
+ break;
+
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(s, *gve_gstrings_priv_flags,
+ sizeof(gve_gstrings_priv_flags));
+ s += sizeof(gve_gstrings_priv_flags);
+ break;
+
+ default:
+ break;
}
}
@@ -78,9 +122,11 @@ static int gve_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
- return GVE_MAIN_STATS_LEN +
+ return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
(priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
(priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
+ case ETH_SS_PRIV_FLAGS:
+ return GVE_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -90,24 +136,56 @@ static void
gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- struct gve_priv *priv = netdev_priv(netdev);
- u64 rx_pkts, rx_bytes, tx_pkts, tx_bytes;
+ u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
+ tmp_rx_desc_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes;
+ u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
+ rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes;
+ int stats_idx, base_stats_idx, max_stats_idx;
+ struct stats *report_stats;
+ int *rx_qid_to_stats_idx;
+ int *tx_qid_to_stats_idx;
+ struct gve_priv *priv;
+ bool skip_nic_stats;
unsigned int start;
int ring;
- int i;
+ int i, j;
ASSERT_RTNL();
- for (rx_pkts = 0, rx_bytes = 0, ring = 0;
+ priv = netdev_priv(netdev);
+ report_stats = priv->stats_report->stats;
+ rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
+ sizeof(int), GFP_KERNEL);
+ if (!rx_qid_to_stats_idx)
+ return;
+ tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues,
+ sizeof(int), GFP_KERNEL);
+ if (!tx_qid_to_stats_idx) {
+ kfree(rx_qid_to_stats_idx);
+ return;
+ }
+ for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
+ rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
ring < priv->rx_cfg.num_queues; ring++) {
if (priv->rx) {
do {
+ struct gve_rx_ring *rx = &priv->rx[ring];
+
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
- rx_pkts += priv->rx[ring].rpackets;
- rx_bytes += priv->rx[ring].rbytes;
+ tmp_rx_pkts = rx->rpackets;
+ tmp_rx_bytes = rx->rbytes;
+ tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
+ tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
+ tmp_rx_desc_err_dropped_pkt =
+ rx->rx_desc_err_dropped_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
+ rx_pkts += tmp_rx_pkts;
+ rx_bytes += tmp_rx_bytes;
+ rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
+ rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
+ rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
}
}
for (tx_pkts = 0, tx_bytes = 0, ring = 0;
@@ -116,10 +194,12 @@ gve_get_ethtool_stats(struct net_device *netdev,
do {
start =
u64_stats_fetch_begin(&priv->tx[ring].statss);
- tx_pkts += priv->tx[ring].pkt_done;
- tx_bytes += priv->tx[ring].bytes_done;
+ tmp_tx_pkts = priv->tx[ring].pkt_done;
+ tmp_tx_bytes = priv->tx[ring].bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
+ tx_pkts += tmp_tx_pkts;
+ tx_bytes += tmp_tx_bytes;
}
}
@@ -128,22 +208,102 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tx_pkts;
data[i++] = rx_bytes;
data[i++] = tx_bytes;
- /* Skip rx_dropped and tx_dropped */
- i += 2;
+ /* total rx dropped packets */
+ data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
+ rx_desc_err_dropped_pkt;
+ /* Skip tx_dropped */
+ i++;
+
data[i++] = priv->tx_timeo_cnt;
+ data[i++] = rx_skb_alloc_fail;
+ data[i++] = rx_buf_alloc_fail;
+ data[i++] = rx_desc_err_dropped_pkt;
+ data[i++] = priv->interface_up_cnt;
+ data[i++] = priv->interface_down_cnt;
+ data[i++] = priv->reset_cnt;
+ data[i++] = priv->page_alloc_fail;
+ data[i++] = priv->dma_mapping_error;
+ data[i++] = priv->stats_report_trigger_cnt;
i = GVE_MAIN_STATS_LEN;
+ /* For rx cross-reporting stats, start from nic rx stats in report */
+ base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
+ GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
+ max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
+ base_stats_idx;
+ /* Preprocess the stats report for rx, map queue id to start index */
+ skip_nic_stats = false;
+ for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
+ stats_idx += NIC_RX_STATS_REPORT_NUM) {
+ u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
+ u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
+
+ if (stat_name == 0) {
+ /* no stats written by NIC yet */
+ skip_nic_stats = true;
+ break;
+ }
+ rx_qid_to_stats_idx[queue_id] = stats_idx;
+ }
/* walk RX rings */
if (priv->rx) {
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
struct gve_rx_ring *rx = &priv->rx[ring];
- data[i++] = rx->cnt;
data[i++] = rx->fill_cnt;
+ data[i++] = rx->cnt;
+ do {
+ start =
+ u64_stats_fetch_begin(&priv->rx[ring].statss);
+ tmp_rx_bytes = rx->rbytes;
+ tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
+ tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
+ tmp_rx_desc_err_dropped_pkt =
+ rx->rx_desc_err_dropped_pkt;
+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ start));
+ data[i++] = tmp_rx_bytes;
+ /* rx dropped packets */
+ data[i++] = tmp_rx_skb_alloc_fail +
+ tmp_rx_buf_alloc_fail +
+ tmp_rx_desc_err_dropped_pkt;
+ data[i++] = rx->rx_copybreak_pkt;
+ data[i++] = rx->rx_copied_pkt;
+ /* stats from NIC */
+ if (skip_nic_stats) {
+ /* skip NIC rx stats */
+ i += NIC_RX_STATS_REPORT_NUM;
+ continue;
+ }
+ for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
+ u64 value =
+ be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value);
+
+ data[i++] = value;
+ }
}
} else {
i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
}
+
+ /* For tx cross-reporting stats, start from nic tx stats in report */
+ base_stats_idx = max_stats_idx;
+ max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
+ max_stats_idx;
+ /* Preprocess the stats report for tx, map queue id to start index */
+ skip_nic_stats = false;
+ for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
+ stats_idx += NIC_TX_STATS_REPORT_NUM) {
+ u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
+ u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
+
+ if (stat_name == 0) {
+ /* no stats written by NIC yet */
+ skip_nic_stats = true;
+ break;
+ }
+ tx_qid_to_stats_idx[queue_id] = stats_idx;
+ }
/* walk TX rings */
if (priv->tx) {
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
@@ -151,14 +311,51 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tx->req;
data[i++] = tx->done;
+ do {
+ start =
+ u64_stats_fetch_begin(&priv->tx[ring].statss);
+ tmp_tx_bytes = tx->bytes_done;
+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ start));
+ data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
data[i++] = tx->stop_queue;
data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
tx));
+ /* stats from NIC */
+ if (skip_nic_stats) {
+ /* skip NIC tx stats */
+ i += NIC_TX_STATS_REPORT_NUM;
+ continue;
+ }
+ for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
+ u64 value =
+ be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
+ data[i++] = value;
+ }
}
} else {
i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
}
+
+ kfree(rx_qid_to_stats_idx);
+ kfree(tx_qid_to_stats_idx);
+ /* AQ Stats */
+ data[i++] = priv->adminq_prod_cnt;
+ data[i++] = priv->adminq_cmd_fail;
+ data[i++] = priv->adminq_timeouts;
+ data[i++] = priv->adminq_describe_device_cnt;
+ data[i++] = priv->adminq_cfg_device_resources_cnt;
+ data[i++] = priv->adminq_register_page_list_cnt;
+ data[i++] = priv->adminq_unregister_page_list_cnt;
+ data[i++] = priv->adminq_create_tx_queue_cnt;
+ data[i++] = priv->adminq_create_rx_queue_cnt;
+ data[i++] = priv->adminq_destroy_tx_queue_cnt;
+ data[i++] = priv->adminq_destroy_rx_queue_cnt;
+ data[i++] = priv->adminq_dcfg_device_resources_cnt;
+ data[i++] = priv->adminq_set_driver_parameter_cnt;
+ data[i++] = priv->adminq_report_stats_cnt;
+ data[i++] = priv->adminq_report_link_speed_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -230,6 +427,95 @@ static int gve_user_reset(struct net_device *netdev, u32 *flags)
return -EOPNOTSUPP;
}
+static int gve_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *etuna, void *value)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ switch (etuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)value = priv->rx_copybreak;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int gve_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *etuna,
+ const void *value)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u32 len;
+
+ switch (etuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ len = *(u32 *)value;
+ if (len > PAGE_SIZE / 2)
+ return -EINVAL;
+ priv->rx_copybreak = len;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 gve_get_priv_flags(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u32 ret_flags = 0;
+
+ /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
+ if (priv->ethtool_flags & BIT(0))
+ ret_flags |= BIT(0);
+ return ret_flags;
+}
+
+static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u64 ori_flags, new_flags;
+
+ ori_flags = READ_ONCE(priv->ethtool_flags);
+ new_flags = ori_flags;
+
+ /* Only one priv flag exists: report-stats (BIT(0))*/
+ if (flags & BIT(0))
+ new_flags |= BIT(0);
+ else
+ new_flags &= ~(BIT(0));
+ priv->ethtool_flags = new_flags;
+ /* start report-stats timer when user turns report stats on. */
+ if (flags & BIT(0)) {
+ mod_timer(&priv->stats_report_timer,
+ round_jiffies(jiffies +
+ msecs_to_jiffies(priv->stats_report_timer_period)));
+ }
+ /* Zero off gve stats when report-stats turned off and */
+ /* delete report stats timer. */
+ if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
+ int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
+ priv->tx_cfg.num_queues;
+ int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
+ priv->rx_cfg.num_queues;
+
+ memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
+ sizeof(struct stats));
+ del_timer_sync(&priv->stats_report_timer);
+ }
+ return 0;
+}
+
+static int gve_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err = gve_adminq_report_link_speed(priv);
+
+ cmd->base.speed = priv->link_speed;
+ return err;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
@@ -242,4 +528,9 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = gve_get_ringparam,
.reset = gve_user_reset,
+ .get_tunable = gve_get_tunable,
+ .set_tunable = gve_set_tunable,
+ .get_priv_flags = gve_get_priv_flags,
+ .set_priv_flags = gve_set_priv_flags,
+ .get_link_ksettings = gve_get_link_ksettings
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index e032563ceefd..02e7d74779f4 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -78,6 +78,65 @@ static void gve_free_counter_array(struct gve_priv *priv)
priv->counter_array = NULL;
}
+/* NIC requests to report stats */
+static void gve_stats_report_task(struct work_struct *work)
+{
+ struct gve_priv *priv = container_of(work, struct gve_priv,
+ stats_report_task);
+ if (gve_get_do_report_stats(priv)) {
+ gve_handle_report_stats(priv);
+ gve_clear_do_report_stats(priv);
+ }
+}
+
+static void gve_stats_report_schedule(struct gve_priv *priv)
+{
+ if (!gve_get_probe_in_progress(priv) &&
+ !gve_get_reset_in_progress(priv)) {
+ gve_set_do_report_stats(priv);
+ queue_work(priv->gve_wq, &priv->stats_report_task);
+ }
+}
+
+static void gve_stats_report_timer(struct timer_list *t)
+{
+ struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
+
+ mod_timer(&priv->stats_report_timer,
+ round_jiffies(jiffies +
+ msecs_to_jiffies(priv->stats_report_timer_period)));
+ gve_stats_report_schedule(priv);
+}
+
+static int gve_alloc_stats_report(struct gve_priv *priv)
+{
+ int tx_stats_num, rx_stats_num;
+
+ tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
+ priv->tx_cfg.num_queues;
+ rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
+ priv->rx_cfg.num_queues;
+ priv->stats_report_len = struct_size(priv->stats_report, stats,
+ tx_stats_num + rx_stats_num);
+ priv->stats_report =
+ dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
+ &priv->stats_report_bus, GFP_KERNEL);
+ if (!priv->stats_report)
+ return -ENOMEM;
+ /* Set up timer for the report-stats task */
+ timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
+ priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
+ return 0;
+}
+
+static void gve_free_stats_report(struct gve_priv *priv)
+{
+ del_timer_sync(&priv->stats_report_timer);
+ dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
+ priv->stats_report, priv->stats_report_bus);
+ priv->stats_report = NULL;
+}
+
static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
{
struct gve_priv *priv = arg;
@@ -270,6 +329,9 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_notify_blocks(priv);
if (err)
goto abort_with_counter;
+ err = gve_alloc_stats_report(priv);
+ if (err)
+ goto abort_with_ntfy_blocks;
err = gve_adminq_configure_device_resources(priv,
priv->counter_array_bus,
priv->num_event_counters,
@@ -279,10 +341,18 @@ static int gve_setup_device_resources(struct gve_priv *priv)
dev_err(&priv->pdev->dev,
"could not setup device_resources: err=%d\n", err);
err = -ENXIO;
- goto abort_with_ntfy_blocks;
+ goto abort_with_stats_report;
}
+ err = gve_adminq_report_stats(priv, priv->stats_report_len,
+ priv->stats_report_bus,
+ GVE_STATS_REPORT_TIMER_PERIOD);
+ if (err)
+ dev_err(&priv->pdev->dev,
+ "Failed to report stats: err=%d\n", err);
gve_set_device_resources_ok(priv);
return 0;
+abort_with_stats_report:
+ gve_free_stats_report(priv);
abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
abort_with_counter:
@@ -298,6 +368,13 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
/* Tell device its resources are being freed */
if (gve_get_device_resources_ok(priv)) {
+ /* detach the stats report */
+ err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Failed to detach stats report: err=%d\n", err);
+ gve_trigger_reset(priv);
+ }
err = gve_adminq_deconfigure_device_resources(priv);
if (err) {
dev_err(&priv->pdev->dev,
@@ -308,6 +385,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
}
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
+ gve_free_stats_report(priv);
gve_clear_device_resources_ok(priv);
}
@@ -371,36 +449,37 @@ static int gve_create_rings(struct gve_priv *priv)
int err;
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- err = gve_adminq_create_tx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create tx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "created tx queue %d\n", i);
+ err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
+ priv->tx_cfg.num_queues);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
}
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_adminq_create_rx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create rx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- /* Rx data ring has been prefilled with packet buffers at
- * queue allocation time.
- * Write the doorbell to provide descriptor slots and packet
- * buffers to the NIC.
+ netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
+ priv->tx_cfg.num_queues);
+
+ err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
+ priv->rx_cfg.num_queues);
+ /* This failure will trigger a reset - no need to clean
+ * up
*/
- gve_rx_write_doorbell(priv, &priv->rx[i]);
- netif_dbg(priv, drv, priv->dev, "created rx queue %d\n", i);
+ return err;
}
+ netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
+ priv->rx_cfg.num_queues);
+
+ /* Rx data ring has been prefilled with packet buffers at queue
+ * allocation time.
+ * Write the doorbell to provide descriptor slots and packet buffers
+ * to the NIC.
+ */
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ gve_rx_write_doorbell(priv, &priv->rx[i]);
return 0;
}
@@ -458,34 +537,23 @@ free_tx:
static int gve_destroy_rings(struct gve_priv *priv)
{
int err;
- int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- err = gve_adminq_destroy_tx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy tx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed tx queue %d\n", i);
+ err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to destroy tx queues\n");
+ /* This failure will trigger a reset - no need to clean up */
+ return err;
}
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_adminq_destroy_rx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy rx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed rx queue %d\n", i);
+ netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
+ err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to destroy rx queues\n");
+ /* This failure will trigger a reset - no need to clean up */
+ return err;
}
+ netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
return 0;
}
@@ -514,14 +582,18 @@ static void gve_free_rings(struct gve_priv *priv)
}
}
-int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
+int gve_alloc_page(struct gve_priv *priv, struct device *dev,
+ struct page **page, dma_addr_t *dma,
enum dma_data_direction dir)
{
*page = alloc_page(GFP_KERNEL);
- if (!*page)
+ if (!*page) {
+ priv->page_alloc_fail++;
return -ENOMEM;
+ }
*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
if (dma_mapping_error(dev, *dma)) {
+ priv->dma_mapping_error++;
put_page(*page);
return -ENOMEM;
}
@@ -556,7 +628,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
return -ENOMEM;
for (i = 0; i < pages; i++) {
- err = gve_alloc_page(&priv->pdev->dev, &qpl->pages[i],
+ err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
gve_qpl_dma_dir(priv, id));
/* caller handles clean up */
@@ -695,8 +767,14 @@ static int gve_open(struct net_device *dev)
goto reset;
gve_set_device_rings_ok(priv);
+ if (gve_get_report_stats(priv))
+ mod_timer(&priv->stats_report_timer,
+ round_jiffies(jiffies +
+ msecs_to_jiffies(priv->stats_report_timer_period)));
+
gve_turnup(priv);
- netif_carrier_on(dev);
+ queue_work(priv->gve_wq, &priv->service_task);
+ priv->interface_up_cnt++;
return 0;
free_rings:
@@ -735,9 +813,11 @@ static int gve_close(struct net_device *dev)
goto err;
gve_clear_device_rings_ok(priv);
}
+ del_timer_sync(&priv->stats_report_timer);
gve_free_rings(priv);
gve_free_qpls(priv);
+ priv->interface_down_cnt++;
return 0;
err:
@@ -817,6 +897,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_tx_disable(priv->dev);
gve_clear_napi_enabled(priv);
+ gve_clear_report_stats(priv);
}
static void gve_turnup(struct gve_priv *priv)
@@ -867,6 +948,10 @@ static void gve_handle_status(struct gve_priv *priv, u32 status)
dev_info(&priv->pdev->dev, "Device requested reset.\n");
gve_set_do_reset(priv);
}
+ if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
+ priv->stats_report_trigger_cnt++;
+ gve_set_do_report_stats(priv);
+ }
}
static void gve_handle_reset(struct gve_priv *priv)
@@ -885,16 +970,95 @@ static void gve_handle_reset(struct gve_priv *priv)
}
}
-/* Handle NIC status register changes and reset requests */
+void gve_handle_report_stats(struct gve_priv *priv)
+{
+ int idx, stats_idx = 0, tx_bytes;
+ unsigned int start = 0;
+ struct stats *stats = priv->stats_report->stats;
+
+ if (!gve_get_report_stats(priv))
+ return;
+
+ be64_add_cpu(&priv->stats_report->written_count, 1);
+ /* tx stats */
+ if (priv->tx) {
+ for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ do {
+ start = u64_stats_fetch_begin(&priv->tx[idx].statss);
+ tx_bytes = priv->tx[idx].bytes_done;
+ } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_WAKE_CNT),
+ .value = cpu_to_be64(priv->tx[idx].wake_queue),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_STOP_CNT),
+ .value = cpu_to_be64(priv->tx[idx].stop_queue),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_FRAMES_SENT),
+ .value = cpu_to_be64(priv->tx[idx].req),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_BYTES_SENT),
+ .value = cpu_to_be64(tx_bytes),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
+ .value = cpu_to_be64(priv->tx[idx].done),
+ .queue_id = cpu_to_be32(idx),
+ };
+ }
+ }
+ /* rx stats */
+ if (priv->rx) {
+ for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
+ .value = cpu_to_be64(priv->rx[idx].desc.seqno),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
+ .value = cpu_to_be64(priv->rx[0].fill_cnt),
+ .queue_id = cpu_to_be32(idx),
+ };
+ }
+ }
+}
+
+static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
+{
+ if (!gve_get_napi_enabled(priv))
+ return;
+
+ if (link_status == netif_carrier_ok(priv->dev))
+ return;
+
+ if (link_status) {
+ netdev_info(priv->dev, "Device link is up.\n");
+ netif_carrier_on(priv->dev);
+ } else {
+ netdev_info(priv->dev, "Device link is down.\n");
+ netif_carrier_off(priv->dev);
+ }
+}
+
+/* Handle NIC status register changes, reset requests and report stats */
static void gve_service_task(struct work_struct *work)
{
struct gve_priv *priv = container_of(work, struct gve_priv,
service_task);
+ u32 status = ioread32be(&priv->reg_bar0->device_status);
- gve_handle_status(priv,
- ioread32be(&priv->reg_bar0->device_status));
+ gve_handle_status(priv, status);
gve_handle_reset(priv);
+ gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
}
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
@@ -924,7 +1088,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->dev->max_mtu = PAGE_SIZE;
err = gve_adminq_set_mtu(priv, priv->dev->mtu);
if (err) {
- netif_err(priv, drv, priv->dev, "Could not set mtu");
+ dev_err(&priv->pdev->dev, "Could not set mtu");
goto err;
}
}
@@ -964,10 +1128,10 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_cfg.num_queues);
}
- netif_info(priv, drv, priv->dev, "TX queues %d, RX queues %d\n",
- priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
- netif_info(priv, drv, priv->dev, "Max TX queues %d, Max RX queues %d\n",
- priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
+ dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
+ priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
+ dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
+ priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
setup_device:
err = gve_setup_device_resources(priv);
@@ -1047,6 +1211,10 @@ int gve_reset(struct gve_priv *priv, bool attempt_teardown)
/* Set it all back up */
err = gve_reset_recovery(priv, was_up);
gve_clear_reset_in_progress(priv);
+ priv->reset_cnt++;
+ priv->interface_up_cnt = 0;
+ priv->interface_down_cnt = 0;
+ priv->stats_report_trigger_cnt = 0;
return err;
}
@@ -1149,6 +1317,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->db_bar2 = db_bar;
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
+ priv->ethtool_flags = 0x0;
gve_set_probe_in_progress(priv);
priv->gve_wq = alloc_ordered_workqueue("gve", 0);
@@ -1158,6 +1327,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_netdev;
}
INIT_WORK(&priv->service_task, gve_service_task);
+ INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
priv->tx_cfg.max_queues = max_tx_queues;
priv->rx_cfg.max_queues = max_rx_queues;
diff --git a/drivers/net/ethernet/google/gve/gve_register.h b/drivers/net/ethernet/google/gve/gve_register.h
index 84ab8893aadd..fb655463c357 100644
--- a/drivers/net/ethernet/google/gve/gve_register.h
+++ b/drivers/net/ethernet/google/gve/gve_register.h
@@ -23,5 +23,6 @@ struct gve_registers {
enum gve_device_status_flags {
GVE_DEVICE_STATUS_RESET_MASK = BIT(1),
GVE_DEVICE_STATUS_LINK_STATUS_MASK = BIT(2),
+ GVE_DEVICE_STATUS_REPORT_STATS_MASK = BIT(3),
};
#endif /* _GVE_REGISTER_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 9f52e72ff641..008fa897a3e6 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -225,7 +225,8 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
return PKT_HASH_TYPE_L2;
}
-static struct sk_buff *gve_rx_copy(struct net_device *dev,
+static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx,
+ struct net_device *dev,
struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
u16 len)
@@ -242,6 +243,11 @@ static struct sk_buff *gve_rx_copy(struct net_device *dev,
skb_copy_to_linear_data(skb, va, len);
skb->protocol = eth_type_trans(skb, dev);
+
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ u64_stats_update_end(&rx->statss);
+
return skb;
}
@@ -284,8 +290,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
u16 len;
/* drop this packet */
- if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR))
+ if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_desc_err_dropped_pkt++;
+ u64_stats_update_end(&rx->statss);
return true;
+ }
len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
page_info = &rx->data.page_info[idx];
@@ -300,11 +310,14 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
if (PAGE_SIZE == 4096) {
if (len <= priv->rx_copybreak) {
/* Just copy small packets */
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copybreak_pkt++;
+ u64_stats_update_end(&rx->statss);
goto have_skb;
}
if (unlikely(!gve_can_recycle_pages(dev))) {
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
goto have_skb;
}
pagecount = page_count(page_info->page);
@@ -314,8 +327,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
* stack.
*/
skb = gve_rx_add_frags(dev, napi, page_info, len);
- if (!skb)
+ if (!skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_skb_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
return true;
+ }
/* Make sure the kernel stack can't release the page */
get_page(page_info->page);
/* "flip" to other packet buffer on this page */
@@ -324,21 +341,25 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
/* We have previously passed the other half of this
* page up the stack, but it has not yet been freed.
*/
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
} else {
WARN(pagecount < 1, "Pagecount should never be < 1");
return false;
}
} else {
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
}
have_skb:
/* We didn't manage to allocate an skb but we haven't had any
* reset worthy failures.
*/
- if (!skb)
+ if (!skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_skb_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
return true;
+ }
if (likely(feat & NETIF_F_RXCSUM)) {
/* NIC passes up the partial sum */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 08339278c722..00fafc0f8512 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -270,7 +270,7 @@ static void hnae_fini_queue(struct hnae_queue *q)
hnae_fini_ring(&q->rx_ring);
}
-/**
+/*
* ae_chain - define ae chain head
*/
static RAW_NOTIFIER_HEAD(ae_chain);
@@ -438,7 +438,7 @@ EXPORT_SYMBOL(hnae_ae_register);
/**
* hnae_ae_unregister - unregisters a HNAE AE engine
- * @cdev: the device to unregister
+ * @hdev: the device to unregister
*/
void hnae_ae_unregister(struct hnae_ae_dev *hdev)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index b43dec0560a8..b98244f75ab9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -13,8 +13,6 @@
#include "hns_dsaf_ppe.h"
#include "hns_dsaf_rcb.h"
-#define AE_NAME_PORT_ID_IDX 6
-
static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
{
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 9a907947ba19..4a448138b4ec 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -374,11 +374,12 @@ static void hns_mac_param_get(struct mac_params *param,
}
/**
- *hns_mac_queue_config_bc_en - set broadcast rx&tx enable
- *@mac_cb: mac device
- *@queue: queue number
- *@en:enable
- *retuen 0 - success , negative --fail
+ * hns_mac_queue_config_bc_en - set broadcast rx&tx enable
+ * @mac_cb: mac device
+ * @port_num: queue number
+ * @vlan_id: vlan id`
+ * @enable: enable
+ * return 0 - success , negative --fail
*/
static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
u32 port_num, u16 vlan_id, bool enable)
@@ -408,11 +409,11 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
}
/**
- *hns_mac_vm_config_bc_en - set broadcast rx&tx enable
- *@mac_cb: mac device
- *@vmid: vm id
- *@en:enable
- *retuen 0 - success , negative --fail
+ * hns_mac_vm_config_bc_en - set broadcast rx&tx enable
+ * @mac_cb: mac device
+ * @vmid: vm id
+ * @enable: enable
+ * return 0 - success , negative --fail
*/
int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
{
@@ -542,8 +543,8 @@ void hns_mac_stop(struct hns_mac_cb *mac_cb)
/**
* hns_mac_get_autoneg - get auto autonegotiation
* @mac_cb: mac control block
- * @enable: enable or not
- * retuen 0 - success , negative --fail
+ * @auto_neg: output pointer to autoneg result
+ * return 0 - success , negative --fail
*/
void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg)
{
@@ -560,7 +561,7 @@ void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg)
* @mac_cb: mac control block
* @rx_en: rx enable status
* @tx_en: tx enable status
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
{
@@ -578,7 +579,7 @@ void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
* hns_mac_set_autoneg - set auto autonegotiation
* @mac_cb: mac control block
* @enable: enable or not
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
{
@@ -623,7 +624,7 @@ int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
/**
* hns_mac_init_ex - mac init
* @mac_cb: mac control block
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
static int hns_mac_init_ex(struct hns_mac_cb *mac_cb)
{
@@ -800,7 +801,6 @@ static const struct {
/**
*hns_mac_get_info - get mac information from device node
*@mac_cb: mac device
- *@np:device node
* return: 0 --success, negative --fail
*/
static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
@@ -951,7 +951,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
/**
* hns_mac_get_mode - get mac mode
* @phy_if: phy interface
- * retuen 0 - gmac, 1 - xgmac , negative --fail
+ * return 0 - gmac, 1 - xgmac , negative --fail
*/
static int hns_mac_get_mode(phy_interface_t phy_if)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index acfa86e5296f..87d3db4666df 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -207,7 +207,7 @@ static int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_sbm_link_sram_init_en - config dsaf_sbm_init_en
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev)
{
@@ -216,8 +216,8 @@ static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_reg_cnt_clr_ce - config hns_dsaf_reg_cnt_clr_ce
- * @dsaf_id: dsa fabric id
- * @hns_dsaf_reg_cnt_clr_ce: config value
+ * @dsaf_dev: dsa fabric id
+ * @reg_cnt_clr_ce: config value
*/
static void
hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce)
@@ -228,8 +228,8 @@ hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce)
/**
* hns_ppe_qid_cfg - config ppe qid
- * @dsaf_id: dsa fabric id
- * @pppe_qid_cfg: value array
+ * @dsaf_dev: dsa fabric id
+ * @qid_cfg: value array
*/
static void
hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg)
@@ -285,8 +285,8 @@ static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_sw_port_type_cfg - cfg sw type
- * @dsaf_id: dsa fabric id
- * @psw_port_type: array
+ * @dsaf_dev: dsa fabric id
+ * @port_type: array
*/
static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev,
enum dsaf_sw_port_type port_type)
@@ -303,8 +303,8 @@ static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev,
/**
* hns_dsaf_stp_port_type_cfg - cfg stp type
- * @dsaf_id: dsa fabric id
- * @pstp_port_type: array
+ * @dsaf_dev: dsa fabric id
+ * @port_type: array
*/
static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev,
enum dsaf_stp_port_type port_type)
@@ -323,7 +323,7 @@ static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev,
(AE_IS_VER1((dev)->dsaf_ver) ? DSAF_SBM_NUM : DSAFV2_SBM_NUM)
/**
* hns_dsaf_sbm_cfg - config sbm
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev)
{
@@ -342,7 +342,7 @@ static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_sbm_cfg_mib_en - config sbm
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev)
{
@@ -387,7 +387,7 @@ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_sbm_bp_wl_cfg - config sbm
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
{
@@ -556,7 +556,7 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_voq_bp_all_thrd_cfg - voq
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_voq_bp_all_thrd_cfg(struct dsaf_device *dsaf_dev)
{
@@ -599,7 +599,7 @@ static void hns_dsaf_tbl_tcam_match_cfg(
/**
* hns_dsaf_tbl_tcam_data_cfg - tbl
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
* @ptbl_tcam_data: addr
*/
static void hns_dsaf_tbl_tcam_data_cfg(
@@ -614,8 +614,8 @@ static void hns_dsaf_tbl_tcam_data_cfg(
/**
* dsaf_tbl_tcam_mcast_cfg - tbl
- * @dsaf_id: dsa fabric id
- * @ptbl_tcam_mcast: addr
+ * @dsaf_dev: dsa fabric id
+ * @mcast: addr
*/
static void hns_dsaf_tbl_tcam_mcast_cfg(
struct dsaf_device *dsaf_dev,
@@ -648,8 +648,8 @@ static void hns_dsaf_tbl_tcam_mcast_cfg(
/**
* hns_dsaf_tbl_tcam_ucast_cfg - tbl
- * @dsaf_id: dsa fabric id
- * @ptbl_tcam_ucast: addr
+ * @dsaf_dev: dsa fabric id
+ * @tbl_tcam_ucast: addr
*/
static void hns_dsaf_tbl_tcam_ucast_cfg(
struct dsaf_device *dsaf_dev,
@@ -674,8 +674,8 @@ static void hns_dsaf_tbl_tcam_ucast_cfg(
/**
* hns_dsaf_tbl_line_cfg - tbl
- * @dsaf_id: dsa fabric id
- * @ptbl_lin: addr
+ * @dsaf_dev: dsa fabric id
+ * @tbl_lin: addr
*/
static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev,
struct dsaf_tbl_line_cfg *tbl_lin)
@@ -695,7 +695,7 @@ static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev,
/**
* hns_dsaf_tbl_tcam_mcast_pul - tbl
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev)
{
@@ -710,7 +710,7 @@ static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_tbl_line_pul - tbl
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev)
{
@@ -725,7 +725,7 @@ static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_tbl_tcam_data_mcast_pul - tbl
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_tcam_data_mcast_pul(
struct dsaf_device *dsaf_dev)
@@ -743,7 +743,7 @@ static void hns_dsaf_tbl_tcam_data_mcast_pul(
/**
* hns_dsaf_tbl_tcam_data_ucast_pul - tbl
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_tcam_data_ucast_pul(
struct dsaf_device *dsaf_dev)
@@ -768,8 +768,7 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
/**
* hns_dsaf_tbl_stat_en - tbl
- * @dsaf_id: dsa fabric id
- * @ptbl_stat_en: addr
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
{
@@ -785,7 +784,7 @@ static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_rocee_bp_en - rocee back press enable
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev)
{
@@ -852,9 +851,9 @@ static void hns_dsaf_int_tbl_src_clr(struct dsaf_device *dsaf_dev,
/**
* hns_dsaf_single_line_tbl_cfg - INT
- * @dsaf_id: dsa fabric id
- * @address:
- * @ptbl_line:
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_line: the line
*/
static void hns_dsaf_single_line_tbl_cfg(
struct dsaf_device *dsaf_dev,
@@ -876,9 +875,10 @@ static void hns_dsaf_single_line_tbl_cfg(
/**
* hns_dsaf_tcam_uc_cfg - INT
- * @dsaf_id: dsa fabric id
- * @address,
- * @ptbl_tcam_data,
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_ucast: unicast
*/
static void hns_dsaf_tcam_uc_cfg(
struct dsaf_device *dsaf_dev, u32 address,
@@ -904,7 +904,8 @@ static void hns_dsaf_tcam_uc_cfg(
* @dsaf_dev: dsa fabric device struct pointer
* @address: tcam index
* @ptbl_tcam_data: tcam data struct pointer
- * @ptbl_tcam_mcast: tcam mask struct pointer, it must be null for HNSv1
+ * @ptbl_tcam_mask: tcam mask struct pointer, it must be null for HNSv1
+ * @ptbl_tcam_mcast: tcam data struct pointer
*/
static void hns_dsaf_tcam_mc_cfg(
struct dsaf_device *dsaf_dev, u32 address,
@@ -933,8 +934,10 @@ static void hns_dsaf_tcam_mc_cfg(
/**
* hns_dsaf_tcam_uc_cfg_vague - INT
* @dsaf_dev: dsa fabric device struct pointer
- * @address,
- * @ptbl_tcam_data,
+ * @address: the address
+ * @tcam_data: the data
+ * @tcam_mask: the mask
+ * @tcam_uc: the unicast data
*/
static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
u32 address,
@@ -960,10 +963,10 @@ static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
/**
* hns_dsaf_tcam_mc_cfg_vague - INT
* @dsaf_dev: dsa fabric device struct pointer
- * @address,
- * @ptbl_tcam_data,
- * @ptbl_tcam_mask
- * @ptbl_tcam_mcast
+ * @address: the address
+ * @tcam_data: the data
+ * @tcam_mask: the mask
+ * @tcam_mc: the multicast data
*/
static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
u32 address,
@@ -988,8 +991,8 @@ static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
/**
* hns_dsaf_tcam_mc_invld - INT
- * @dsaf_id: dsa fabric id
- * @address
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
*/
static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
{
@@ -1024,10 +1027,10 @@ hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
/**
* hns_dsaf_tcam_uc_get - INT
- * @dsaf_id: dsa fabric id
- * @address
- * @ptbl_tcam_data
- * @ptbl_tcam_ucast
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_ucast: unicast
*/
static void hns_dsaf_tcam_uc_get(
struct dsaf_device *dsaf_dev, u32 address,
@@ -1077,10 +1080,10 @@ static void hns_dsaf_tcam_uc_get(
/**
* hns_dsaf_tcam_mc_get - INT
- * @dsaf_id: dsa fabric id
- * @address
- * @ptbl_tcam_data
- * @ptbl_tcam_ucast
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_mcast: tcam multicast data
*/
static void hns_dsaf_tcam_mc_get(
struct dsaf_device *dsaf_dev, u32 address,
@@ -1127,7 +1130,7 @@ static void hns_dsaf_tcam_mc_get(
/**
* hns_dsaf_tbl_line_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev)
{
@@ -1141,7 +1144,7 @@ static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_tbl_tcam_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
{
@@ -1156,7 +1159,9 @@ static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_pfc_en_cfg - dsaf pfc pause cfg
- * @mac_cb: mac contrl block
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: mac contrl block
+ * @tc_en: traffic class
*/
static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
int mac_id, int tc_en)
@@ -1209,8 +1214,7 @@ void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
/**
* hns_dsaf_tbl_tcam_init - INT
- * @dsaf_id: dsa fabric id
- * @dsaf_mode
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
{
@@ -1263,7 +1267,7 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_inode_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev)
{
@@ -1315,7 +1319,7 @@ static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_sbm_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev)
{
@@ -1369,7 +1373,7 @@ static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_tbl_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev)
{
@@ -1381,7 +1385,7 @@ static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_voq_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_voq_init(struct dsaf_device *dsaf_dev)
{
@@ -1435,7 +1439,7 @@ static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_init - init dsa fabric
* @dsaf_dev: dsa fabric device struct pointer
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
{
@@ -2099,7 +2103,7 @@ static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
/**
* hns_dsaf_free_dev - free dev mem
- * @dev: struct device pointer
+ * @dsaf_dev: struct device pointer
*/
static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev)
{
@@ -2108,9 +2112,9 @@ static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev)
/**
* dsaf_pfc_unit_cnt - set pfc unit count
- * @dsaf_id: dsa fabric id
- * @pport_rate: value array
- * @pdsaf_pfc_unit_cnt: value array
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: id in use
+ * @rate: value array
*/
static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
enum dsaf_port_rate_mode rate)
@@ -2139,8 +2143,9 @@ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
/**
* dsaf_port_work_rate_cfg - fifo
- * @dsaf_id: dsa fabric id
- * @xge_ge_work_mode
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: mac contrl block
+ * @rate_mode: value array
*/
static void
hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
@@ -2253,7 +2258,8 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
/**
*hns_dsaf_get_regs - dump dsaf regs
- *@dsaf_dev: dsaf device
+ *@ddev: dsaf device
+ *@port: port
*@data:data for value of regs
*/
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
@@ -2690,6 +2696,7 @@ void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
/**
*hns_dsaf_get_sset_count - get dsaf string set count
+ *@dsaf_dev: dsaf device
*@stringset: type of values in data
*return dsaf string name count
*/
@@ -2711,6 +2718,7 @@ int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset)
*@stringset:srting set index
*@data:strings name value
*@port:port index
+ *@dsaf_dev: dsaf device
*/
void hns_dsaf_get_strings(int stringset, u8 *data, int port,
struct dsaf_device *dsaf_dev)
@@ -2943,7 +2951,7 @@ int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
/**
* dsaf_probe - probo dsaf dev
* @pdev: dasf platform device
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
static int hns_dsaf_probe(struct platform_device *pdev)
{
@@ -3038,8 +3046,8 @@ module_platform_driver(g_dsaf_driver);
/**
* hns_dsaf_roce_reset - reset dsaf and roce
* @dsaf_fwnode: Pointer to framework node for the dasf
- * @enable: false - request reset , true - drop reset
- * retuen 0 - success , negative -fail
+ * @dereset: false - request reset , true - drop reset
+ * return 0 - success , negative -fail
*/
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a769273b36f7..a9aca8c24e90 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -330,11 +330,12 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
* hns_dsaf_srst_chns - reset dsaf channels
* @dsaf_dev: dsaf device struct pointer
* @msk: xbar channels mask value:
+ * @dereset: false - request reset , true - drop reset
+ *
* bit0-5 for xge0-5
* bit6-11 for ppe0-5
* bit12-17 for roce0-5
* bit18-19 for com/dfx
- * @dereset: false - request reset , true - drop reset
*/
static void
hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
@@ -353,11 +354,12 @@ hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
* hns_dsaf_srst_chns - reset dsaf channels
* @dsaf_dev: dsaf device struct pointer
* @msk: xbar channels mask value:
+ * @dereset: false - request reset , true - drop reset
+ *
* bit0-5 for xge0-5
* bit6-11 for ppe0-5
* bit12-17 for roce0-5
* bit18-19 for com/dfx
- * @dereset: false - request reset , true - drop reset
*/
static void
hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
@@ -612,7 +614,8 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
/**
* hns_mac_config_sds_loopback - set loop back for serdes
* @mac_cb: mac control block
- * retuen 0 == success
+ * @en: enable or disable
+ * return 0 == success
*/
static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 2b34b553acf3..d0f8b1fff333 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -66,8 +66,8 @@ hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
/**
* hns_ppe_common_get_cfg - get ppe common config
* @dsaf_dev: dasf device
- * comm_index: common index
- * retuen 0 - success , negative --fail
+ * @comm_index: common index
+ * return 0 - success , negative --fail
*/
static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
{
@@ -143,7 +143,7 @@ static void hns_ppe_set_vlan_strip(struct hns_ppe_cb *ppe_cb, int en)
/**
* hns_ppe_checksum_hw - set ppe checksum caculate
- * @ppe_device: ppe device
+ * @ppe_cb: ppe device
* @value: value
*/
static void hns_ppe_checksum_hw(struct hns_ppe_cb *ppe_cb, u32 value)
@@ -179,7 +179,7 @@ static void hns_ppe_set_qid(struct ppe_common_cb *ppe_common, u32 qid)
/**
* hns_ppe_set_port_mode - set port mode
- * @ppe_device: ppe device
+ * @ppe_cb: ppe device
* @mode: port mode
*/
static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
@@ -344,7 +344,7 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
/**
* ppe_uninit_hw - uninit ppe
- * @ppe_device: ppe device
+ * @ppe_cb: ppe device
*/
static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
{
@@ -384,7 +384,8 @@ void hns_ppe_uninit(struct dsaf_device *dsaf_dev)
/**
* hns_ppe_reset - reinit ppe/rcb hw
* @dsaf_dev: dasf device
- * retuen void
+ * @ppe_common_index: the index
+ * return void
*/
void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
{
@@ -455,7 +456,7 @@ int hns_ppe_get_regs_count(void)
/**
* ppe_get_strings - get ppe srting
- * @ppe_device: ppe device
+ * @ppe_cb: ppe device
* @stringset: string set type
* @data: output string
*/
@@ -513,7 +514,7 @@ void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
/**
* hns_ppe_init - init ppe device
* @dsaf_dev: dasf device
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
int hns_ppe_init(struct dsaf_device *dsaf_dev)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 5453597ec629..b6c8910cf7ba 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -34,7 +34,7 @@
/**
*hns_rcb_wait_fbd_clean - clean fbd
*@qs: ring struct pointer array
- *@qnum: num of array
+ *@q_num: num of array
*@flag: tx or rx flag
*/
void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
@@ -191,7 +191,8 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
/**
*hns_rcb_ring_enable_hw - enable ring
- *@ring: rcb ring
+ *@q: rcb ring
+ *@val: value to write
*/
void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
{
@@ -844,7 +845,7 @@ void hns_rcb_update_stats(struct hnae_queue *queue)
/**
*hns_rcb_get_stats - get rcb statistic
- *@ring: rcb ring
+ *@queue: rcb ring
*@data:statistic value
*/
void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 0a3dbab2dfc9..7e3609ce112a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -130,7 +130,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
/**
*hns_xgmac_enable - enable xgmac port
- *@drv: mac driver
+ *@mac_drv: mac driver
*@mode: mode of mac port
*/
static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
@@ -242,7 +242,8 @@ static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval)
/**
*hns_xgmac_pausefrm_cfg - set pause param about xgmac
*@mac_drv: mac driver
- *@newval:enable of pad and crc
+ *@rx_en: enable receive
+ *@tx_en: enable transmit
*/
static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
{
@@ -490,7 +491,6 @@ static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat)
/**
*hns_xgmac_get_regs - dump xgmac regs
*@mac_drv: mac driver
- *@cmd:ethtool cmd
*@data:data for value of regs
*/
static void hns_xgmac_get_regs(void *mac_drv, void *data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 22522f8a5299..858cb293152a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
@@ -557,10 +558,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
skb = *out_skb = napi_alloc_skb(&ring_data->napi,
HNS_RX_HEAD_SIZE);
@@ -754,6 +752,8 @@ static void hns_update_rx_rate(struct hnae_ring *ring)
/**
* smooth_alg - smoothing algrithm for adjusting coalesce parameter
+ * @new_param: new value
+ * @old_param: old value
**/
static u32 smooth_alg(u32 new_param, u32 old_param)
{
@@ -1293,6 +1293,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
+ irq_set_status_flags(rd->ring->irq, IRQ_NOAUTOEN);
ret = request_irq(rd->ring->irq,
hns_irq_handle, 0, rd->ring->ring_name, rd);
if (ret) {
@@ -1300,7 +1301,6 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
rd->ring->irq);
goto out_free_irq;
}
- disable_irq(rd->ring->irq);
cpu = hns_nic_init_affinity_mask(h->q_num, i,
rd->ring, &rd->mask);
@@ -1831,9 +1831,8 @@ static int hns_nic_uc_unsync(struct net_device *netdev,
}
/**
- * nic_set_multicast_list - set mutl mac address
- * @netdev: net device
- * @p: mac address
+ * hns_set_multicast_list - set mutl mac address
+ * @ndev: net device
*
* return void
*/
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 14e60c9e491d..7165da0ee9aa 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -462,7 +462,7 @@ static int __lb_clean_rings(struct hns_nic_priv *priv,
}
/**
- * nic_run_loopback_test - run loopback test
+ * __lb_run_test - run loopback test
* @ndev: net device
* @loop_mode: loopback mode
*/
@@ -971,7 +971,7 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
/**
- * nic_get_sset_count - get string set count witch returned by nic_get_strings.
+ * hns_get_sset_count - get string set count returned by nic_get_strings
* @netdev: net device
* @stringset: string set index, 0: self test string; 1: statistics string.
*
@@ -1027,7 +1027,7 @@ static int hns_phy_led_set(struct net_device *netdev, int value)
}
/**
- * nic_set_phys_id - set phy identify LED.
+ * hns_set_phys_id - set phy identify LED.
* @netdev: net device
* @state: LED state.
*
@@ -1125,7 +1125,7 @@ static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
}
/**
- * nic_get_regs_len - get total register len.
+ * hns_get_regs_len - get total register len.
* @net_dev: net device
*
* Return total register len.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 088550db2de7..912c51e327d6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -34,6 +34,13 @@
#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
+/* Device version */
+#define HNAE3_DEVICE_VERSION_V1 0x00020
+#define HNAE3_DEVICE_VERSION_V2 0x00021
+#define HNAE3_DEVICE_VERSION_V3 0x00030
+
+#define HNAE3_PCI_REVISION_BIT_SIZE 8
+
/* Device IDs */
#define HNAE3_DEV_ID_GE 0xA220
#define HNAE3_DEV_ID_25GE 0xA221
@@ -42,8 +49,9 @@
#define HNAE3_DEV_ID_50GE_RDMA 0xA224
#define HNAE3_DEV_ID_50GE_RDMA_MACSEC 0xA225
#define HNAE3_DEV_ID_100G_RDMA_MACSEC 0xA226
-#define HNAE3_DEV_ID_100G_VF 0xA22E
-#define HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF 0xA22F
+#define HNAE3_DEV_ID_200G_RDMA 0xA228
+#define HNAE3_DEV_ID_VF 0xA22E
+#define HNAE3_DEV_ID_RDMA_DCB_PFC_VF 0xA22F
#define HNAE3_CLASS_NAME_SIZE 16
@@ -53,8 +61,6 @@
#define HNAE3_KNIC_CLIENT_INITED_B 0x3
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
-#define HNAE3_DEV_SUPPORT_FD_B 0x6
-#define HNAE3_DEV_SUPPORT_GRO_B 0x7
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -65,11 +71,67 @@
#define hnae3_dev_dcb_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+enum HNAE3_DEV_CAP_BITS {
+ HNAE3_DEV_SUPPORT_FD_B,
+ HNAE3_DEV_SUPPORT_GRO_B,
+ HNAE3_DEV_SUPPORT_FEC_B,
+ HNAE3_DEV_SUPPORT_UDP_GSO_B,
+ HNAE3_DEV_SUPPORT_QB_B,
+ HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B,
+ HNAE3_DEV_SUPPORT_PTP_B,
+ HNAE3_DEV_SUPPORT_INT_QL_B,
+ HNAE3_DEV_SUPPORT_SIMPLE_BD_B,
+ HNAE3_DEV_SUPPORT_TX_PUSH_B,
+ HNAE3_DEV_SUPPORT_PHY_IMP_B,
+ HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B,
+ HNAE3_DEV_SUPPORT_HW_PAD_B,
+ HNAE3_DEV_SUPPORT_STASH_B,
+};
+
#define hnae3_dev_fd_supported(hdev) \
- hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, (hdev)->ae_dev->caps)
#define hnae3_dev_gro_supported(hdev) \
- hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B)
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_fec_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_udp_gso_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_qb_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_QB_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_fd_forward_tc_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_ptp_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_PTP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_int_ql_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_simple_bd_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_SIMPLE_BD_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_tx_push_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_phy_imp_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_tqp_txrx_indep_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_hw_pad_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_HW_PAD_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_stash_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_STASH_B, (hdev)->ae_dev->caps)
+
+#define hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (ae_dev)->caps)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -152,6 +214,7 @@ enum hnae3_hw_error_type {
HNAE3_PPU_POISON_ERROR,
HNAE3_CMDQ_ECC_ERROR,
HNAE3_IMP_RD_POISON_ERROR,
+ HNAE3_ROCEE_AXI_RESP_ERROR,
};
enum hnae3_reset_type {
@@ -207,6 +270,17 @@ struct hnae3_ring_chain_node {
#define HNAE3_IS_TX_RING(node) \
(((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX)
+/* device specification info from firmware */
+struct hnae3_dev_specs {
+ u32 mac_entry_num; /* number of mac-vlan table entry */
+ u32 mng_entry_num; /* number of manager table entry */
+ u32 max_tm_rate;
+ u16 rss_ind_tbl_size;
+ u16 rss_key_size;
+ u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
+ u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
+};
+
struct hnae3_client_ops {
int (*init_instance)(struct hnae3_handle *handle);
void (*uninit_instance)(struct hnae3_handle *handle, bool reset);
@@ -227,12 +301,16 @@ struct hnae3_client {
struct list_head node;
};
+#define HNAE3_DEV_CAPS_MAX_NUM 96
struct hnae3_ae_dev {
struct pci_dev *pdev;
const struct hnae3_ae_ops *ops;
struct list_head node;
u32 flag;
unsigned long hw_err_reset_req;
+ struct hnae3_dev_specs dev_specs;
+ u32 dev_version;
+ unsigned long caps[BITS_TO_LONGS(HNAE3_DEV_CAPS_MAX_NUM)];
void *priv;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index fe7fb565da19..dc9a85745e62 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -15,11 +15,12 @@ static struct dentry *hns3_dbgfs_root;
static int hns3_dbg_queue_info(struct hnae3_handle *h,
const char *cmd_buf)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
u32 base_add_l, base_add_h;
u32 queue_num, queue_max;
- u32 value, i = 0;
+ u32 value, i;
int cnt;
if (!priv->ring) {
@@ -118,8 +119,25 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
- dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n\n", i,
- value);
+ dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base + HNS3_RING_EN_REG);
+ dev_info(&h->pdev->dev, "TX/RX(%u) RING EN: %s\n", i,
+ value ? "enable" : "disable");
+
+ if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) {
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_EN_REG);
+ dev_info(&h->pdev->dev, "TX(%u) RING EN: %s\n", i,
+ value ? "enable" : "disable");
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_EN_REG);
+ dev_info(&h->pdev->dev, "RX(%u) RING EN: %s\n", i,
+ value ? "enable" : "disable");
+ }
+
+ dev_info(&h->pdev->dev, "\n");
}
return 0;
@@ -244,6 +262,8 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "queue info <number>\n");
dev_info(&h->pdev->dev, "queue map\n");
dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n");
+ dev_info(&h->pdev->dev, "dev capability\n");
+ dev_info(&h->pdev->dev, "dev spec\n");
if (!hns3_is_phys_func(h->pdev))
return;
@@ -264,6 +284,7 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
dev_info(&h->pdev->dev, "dump uc mac list <func id>\n");
dev_info(&h->pdev->dev, "dump mc mac list <func id>\n");
+ dev_info(&h->pdev->dev, "dump intr\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
@@ -284,6 +305,52 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "%s", printf_buf);
}
+static void hns3_dbg_dev_caps(struct hnae3_handle *h)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ unsigned long *caps;
+
+ caps = ae_dev->caps;
+
+ dev_info(&h->pdev->dev, "support FD: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support GRO: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support FEC: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_FEC_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support UDP GSO: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support PTP: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_PTP_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support INT QL: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, caps) ? "yes" : "no");
+}
+
+static void hns3_dbg_dev_specs(struct hnae3_handle *h)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ struct hns3_nic_priv *priv = h->priv;
+
+ dev_info(priv->dev, "MAC entry num: %u\n", dev_specs->mac_entry_num);
+ dev_info(priv->dev, "MNG entry num: %u\n", dev_specs->mng_entry_num);
+ dev_info(priv->dev, "MAX non tso bd num: %u\n",
+ dev_specs->max_non_tso_bd_num);
+ dev_info(priv->dev, "RSS ind tbl size: %u\n",
+ dev_specs->rss_ind_tbl_size);
+ dev_info(priv->dev, "RSS key size: %u\n", dev_specs->rss_key_size);
+ dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+
+ dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
+ dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
+}
+
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
@@ -359,6 +426,10 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
ret = hns3_dbg_queue_map(handle);
else if (strncmp(cmd_buf, "bd info", 7) == 0)
ret = hns3_dbg_bd_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "dev capability", 14) == 0)
+ hns3_dbg_dev_caps(handle);
+ else if (strncmp(cmd_buf, "dev spec", 8) == 0)
+ hns3_dbg_dev_specs(handle);
else if (handle->ae_algo->ops->dbg_run_cmd)
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a4f1d515e5e0..a362516a3185 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -81,8 +81,10 @@ static const struct pci_device_id hns3_pci_tbl[] = {
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
@@ -623,27 +625,15 @@ void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
ops->request_update_promisc_mode(handle);
}
-int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
-
- if (h->ae_algo->ops->set_promisc_mode) {
- return h->ae_algo->ops->set_promisc_mode(h,
- promisc_flags & HNAE3_UPE,
- promisc_flags & HNAE3_MPE);
- }
-
- return 0;
-}
-
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
bool last_state;
- if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
+ h->ae_algo->ops->enable_vlan_filter) {
last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
if (enable != last_state) {
netdev_info(netdev,
@@ -706,12 +696,19 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* normal or tunnel packet */
l4_offset = l4.hdr - skb->data;
- hdr_len = (l4.tcp->doff << 2) + l4_offset;
/* remove payload length from inner pseudo checksum when tso */
l4_paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check,
- (__force __wsum)htonl(l4_paylen));
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(l4_paylen));
+ } else {
+ hdr_len = (l4.tcp->doff << 2) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(l4_paylen));
+ }
/* find the txbd field values */
*paylen = skb->len - hdr_len;
@@ -1194,21 +1191,23 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
return bd_num;
}
-static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size)
+static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ u8 max_non_tso_bd_num)
{
struct sk_buff *frag_skb;
unsigned int bd_num = 0;
/* If the total len is within the max bd limit */
if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
- skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM))
+ skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
return skb_shinfo(skb)->nr_frags + 1U;
/* The below case will always be linearized, return
* HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
*/
if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
- (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)))
+ (!skb_is_gso(skb) && skb->len >
+ HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
return HNS3_MAX_TSO_BD_NUM + 1U;
bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
@@ -1233,31 +1232,34 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
}
-/* HW need every continuous 8 buffer data to be larger than MSS,
- * we simplify it by ensuring skb_headlen + the first continuous
- * 7 frags to to be larger than gso header len + mss, and the remaining
- * continuous 7 frags to be larger than MSS except the last 7 frags.
+/* HW need every continuous max_non_tso_bd_num buffer data to be larger
+ * than MSS, we simplify it by ensuring skb_headlen + the first continuous
+ * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
+ * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
+ * than MSS except the last max_non_tso_bd_num - 1 frags.
*/
static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
- unsigned int bd_num)
+ unsigned int bd_num, u8 max_non_tso_bd_num)
{
unsigned int tot_len = 0;
int i;
- for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++)
+ for (i = 0; i < max_non_tso_bd_num - 1U; i++)
tot_len += bd_size[i];
- /* ensure the first 8 frags is greater than mss + header */
- if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] <
+ /* ensure the first max_non_tso_bd_num frags is greater than
+ * mss + header
+ */
+ if (tot_len + bd_size[max_non_tso_bd_num - 1U] <
skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
return true;
- /* ensure every continuous 7 buffer is greater than mss
- * except the last one.
+ /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater
+ * than mss except the last one.
*/
- for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) {
+ for (i = 0; i < bd_num - max_non_tso_bd_num; i++) {
tot_len -= bd_size[i];
- tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U];
+ tot_len += bd_size[i + max_non_tso_bd_num - 1U];
if (tot_len < skb_shinfo(skb)->gso_size)
return true;
@@ -1268,7 +1270,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
{
- int i = 0;
+ int i;
for (i = 0; i < MAX_SKB_FRAGS; i++)
size[i] = skb_frag_size(&shinfo->frags[i]);
@@ -1279,14 +1281,16 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct sk_buff *skb)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u8 max_non_tso_bd_num = priv->max_non_tso_bd_num;
unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
unsigned int bd_num;
- bd_num = hns3_tx_bd_num(skb, bd_size);
- if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
+ bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
+ if (unlikely(bd_num > max_non_tso_bd_num)) {
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
- !hns3_skb_need_linearized(skb, bd_size, bd_num)) {
- trace_hns3_over_8bd(skb);
+ !hns3_skb_need_linearized(skb, bd_size, bd_num,
+ max_non_tso_bd_num)) {
+ trace_hns3_over_max_bd(skb);
goto out;
}
@@ -1296,8 +1300,8 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_count(skb->len);
if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
(!skb_is_gso(skb) &&
- bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
- trace_hns3_over_8bd(skb);
+ bd_num > max_non_tso_bd_num)) {
+ trace_hns3_over_max_bd(skb);
return -ENOMEM;
}
@@ -1397,6 +1401,27 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
return bd_num;
}
+static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
+ bool doorbell)
+{
+ ring->pending_buf += num;
+
+ if (!doorbell) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_more++;
+ u64_stats_update_end(&ring->syncp);
+ return;
+ }
+
+ if (!ring->pending_buf)
+ return;
+
+ writel(ring->pending_buf,
+ ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
+ ring->pending_buf = 0;
+ WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -1405,11 +1430,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
int pre_ntu, next_to_use_head;
struct sk_buff *frag_skb;
int bd_num = 0;
+ bool doorbell;
int ret;
/* Hardware can only handle short frames above 32 bytes */
- if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
+ if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
+ hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
return NETDEV_TX_OK;
+ }
/* Prefetch the data used later */
prefetch(skb->data);
@@ -1420,6 +1448,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
+ hns3_tx_doorbell(ring, 0, true);
return NETDEV_TX_BUSY;
} else if (ret == -ENOMEM) {
u64_stats_update_begin(&ring->syncp);
@@ -1460,11 +1489,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
- netdev_tx_sent_queue(dev_queue, skb->len);
-
- wmb(); /* Commit all data before submit */
-
- hnae3_queue_xmit(ring->tqp, bd_num);
+ doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
+ netdev_xmit_more());
+ hns3_tx_doorbell(ring, bd_num, doorbell);
return NETDEV_TX_OK;
@@ -1473,6 +1500,7 @@ fill_err:
out_err_tx_ok:
dev_kfree_skb_any(skb);
+ hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
return NETDEV_TX_OK;
}
@@ -1853,13 +1881,13 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
tx_ring->next_to_clean, napi->state);
netdev_info(ndev,
- "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
+ "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
- tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
+ tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);
netdev_info(ndev,
- "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
- tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
+ "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
+ tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
/* When mac received many pause frames continuous, it's unable to send
@@ -2034,9 +2062,10 @@ bool hns3_is_phys_func(struct pci_dev *pdev)
case HNAE3_DEV_ID_50GE_RDMA:
case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
case HNAE3_DEV_ID_100G_RDMA_MACSEC:
+ case HNAE3_DEV_ID_200G_RDMA:
return true;
- case HNAE3_DEV_ID_100G_VF:
- case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
+ case HNAE3_DEV_ID_VF:
+ case HNAE3_DEV_ID_RDMA_DCB_PFC_VF:
return false;
default:
dev_warn(&pdev->dev, "un-recognized pci device-id %u",
@@ -2061,15 +2090,6 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
}
-static void hns3_get_dev_capability(struct pci_dev *pdev,
- struct hnae3_ae_dev *ae_dev)
-{
- if (pdev->revision >= 0x21) {
- hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
- hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
- }
-}
-
/* hns3_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in hns3_pci_tbl
@@ -2091,7 +2111,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
- hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
ret = hnae3_register_ae_dev(ae_dev);
@@ -2252,6 +2271,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
struct pci_dev *pdev = h->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -2289,7 +2309,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
NETIF_F_FRAGLIST;
- if (pdev->revision >= 0x21) {
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
netdev->hw_features |= NETIF_F_GRO_HW;
netdev->features |= NETIF_F_GRO_HW;
@@ -2298,6 +2318,13 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->features |= NETIF_F_NTUPLE;
}
}
+
+ if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) {
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+ netdev->features |= NETIF_F_GSO_UDP_L4;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_L4;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+ }
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2316,17 +2343,19 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->buf = page_address(p);
cb->length = hns3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
+ page_ref_add(p, USHRT_MAX - 1);
+ cb->pagecnt_bias = USHRT_MAX;
return 0;
}
static void hns3_free_buffer(struct hns3_enet_ring *ring,
- struct hns3_desc_cb *cb)
+ struct hns3_desc_cb *cb, int budget)
{
if (cb->type == DESC_TYPE_SKB)
- dev_kfree_skb_any((struct sk_buff *)cb->priv);
- else if (!HNAE3_IS_TX_RING(ring))
- put_page((struct page *)cb->priv);
+ napi_consume_skb(cb->priv, budget);
+ else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
+ __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
memset(cb, 0, sizeof(*cb));
}
@@ -2358,7 +2387,8 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
ring->desc[i].addr = 0;
}
-static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
+static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
+ int budget)
{
struct hns3_desc_cb *cb = &ring->desc_cb[i];
@@ -2366,7 +2396,7 @@ static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
return;
hns3_buffer_detach(ring, i);
- hns3_free_buffer(ring, cb);
+ hns3_free_buffer(ring, cb, budget);
}
static void hns3_free_buffers(struct hns3_enet_ring *ring)
@@ -2374,7 +2404,7 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
int i;
for (i = 0; i < ring->desc_num; i++)
- hns3_free_buffer_detach(ring, i);
+ hns3_free_buffer_detach(ring, i, 0);
}
/* free desc along with its attached buffer */
@@ -2419,7 +2449,7 @@ static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
return 0;
out_with_buf:
- hns3_free_buffer(ring, cb);
+ hns3_free_buffer(ring, cb, 0);
out:
return ret;
}
@@ -2451,7 +2481,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
out_buffer_fail:
for (j = i - 1; j >= 0; j--)
- hns3_free_buffer_detach(ring, j);
+ hns3_free_buffer_detach(ring, j, 0);
return ret;
}
@@ -2478,71 +2508,62 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
DMA_FROM_DEVICE);
}
-static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
- int *bytes, int *pkts)
+static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
+ int *bytes, int *pkts, int budget)
{
+ /* pair with ring->last_to_use update in hns3_tx_doorbell(),
+ * smp_store_release() is not used in hns3_tx_doorbell() because
+ * the doorbell operation already have the needed barrier operation.
+ */
+ int ltu = smp_load_acquire(&ring->last_to_use);
int ntc = ring->next_to_clean;
struct hns3_desc_cb *desc_cb;
+ bool reclaimed = false;
+ struct hns3_desc *desc;
+
+ while (ltu != ntc) {
+ desc = &ring->desc[ntc];
+
+ if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) &
+ BIT(HNS3_TXD_VLD_B))
+ break;
- while (head != ntc) {
desc_cb = &ring->desc_cb[ntc];
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
/* desc_cb will be cleaned, after hnae3_free_buffer_detach */
- hns3_free_buffer_detach(ring, ntc);
+ hns3_free_buffer_detach(ring, ntc, budget);
if (++ntc == ring->desc_num)
ntc = 0;
/* Issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ntc]);
+ reclaimed = true;
}
+ if (unlikely(!reclaimed))
+ return false;
+
/* This smp_store_release() pairs with smp_load_acquire() in
* ring_space called by hns3_nic_net_xmit.
*/
smp_store_release(&ring->next_to_clean, ntc);
+ return true;
}
-static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
-{
- int u = ring->next_to_use;
- int c = ring->next_to_clean;
-
- if (unlikely(h > ring->desc_num))
- return 0;
-
- return u > c ? (h > c && h <= u) : (h > c || h <= u);
-}
-
-void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
{
struct net_device *netdev = ring_to_netdev(ring);
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct netdev_queue *dev_queue;
int bytes, pkts;
- int head;
-
- head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
-
- if (is_ring_empty(ring) || head == ring->next_to_clean)
- return; /* no data to poll */
-
- rmb(); /* Make sure head is ready before touch any data */
-
- if (unlikely(!is_valid_clean_head(ring, head))) {
- hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head,
- ring->next_to_use, ring->next_to_clean);
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.io_err_cnt++;
- u64_stats_update_end(&ring->syncp);
- return;
- }
bytes = 0;
pkts = 0;
- hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
+
+ if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget)))
+ return;
ring->tqp_vector->tx_group.total_bytes += bytes;
ring->tqp_vector->tx_group.total_packets += pkts;
@@ -2614,8 +2635,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring_ptr_move_fw(ring, next_to_use);
}
- wmb(); /* Make all data has been write before submit */
- writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
+ writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
static bool hns3_page_is_reusable(struct page *page)
@@ -2624,6 +2644,11 @@ static bool hns3_page_is_reusable(struct page *page)
!page_is_pfmemalloc(page);
}
+static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
+{
+ return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
+}
+
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -2632,6 +2657,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring);
+ desc_cb->pagecnt_bias--;
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2639,20 +2665,27 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
* when page_offset rollback to zero, flag default unreuse
*/
if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
- (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
+ (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
+ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
return;
+ }
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1;
- /* Bump ref count on page before it is given */
- get_page(desc_cb->priv);
- } else if (page_count(desc_cb->priv) == 1) {
+ } else if (hns3_can_reuse_page(desc_cb)) {
desc_cb->reuse_flag = 1;
desc_cb->page_offset = 0;
- get_page(desc_cb->priv);
+ } else if (desc_cb->pagecnt_bias) {
+ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
+ return;
+ }
+
+ if (unlikely(!desc_cb->pagecnt_bias)) {
+ page_ref_add(desc_cb->priv, USHRT_MAX);
+ desc_cb->pagecnt_bias = USHRT_MAX;
}
}
@@ -2782,8 +2815,9 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
{
struct hnae3_handle *handle = ring->tqp->handle;
struct pci_dev *pdev = ring->tqp->handle->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- if (pdev->revision == 0x20) {
+ if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) {
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
if (!(*vlan_tag & VLAN_VID_MASK))
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
@@ -2828,6 +2862,16 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
+static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
+{
+ ring->desc[ring->next_to_clean].rx.bd_base_info &=
+ cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
+ ring->next_to_clean += 1;
+
+ if (unlikely(ring->next_to_clean == ring->desc_num))
+ ring->next_to_clean = 0;
+}
+
static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
unsigned char *va)
{
@@ -2860,9 +2904,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (likely(hns3_page_is_reusable(desc_cb->priv)))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
- put_page(desc_cb->priv);
+ __page_frag_cache_drain(desc_cb->priv,
+ desc_cb->pagecnt_bias);
- ring_ptr_move_fw(ring, next_to_clean);
+ hns3_rx_ring_move_fw(ring);
return 0;
}
u64_stats_update_begin(&ring->syncp);
@@ -2873,7 +2918,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
__skb_put(skb, ring->pull_len);
hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
+ hns3_rx_ring_move_fw(ring);
return 0;
}
@@ -2928,7 +2973,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
trace_hns3_rx_desc(ring);
- ring_ptr_move_fw(ring, next_to_clean);
+ hns3_rx_ring_move_fw(ring);
ring->pending_buf++;
} while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
@@ -3070,35 +3115,32 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
prefetch(desc);
- length = le16_to_cpu(desc->rx.size);
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ if (!skb) {
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- /* Check valid BD */
- if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
- return -ENXIO;
+ /* Check valid BD */
+ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
+ return -ENXIO;
+
+ dma_rmb();
+ length = le16_to_cpu(desc->rx.size);
- if (!skb) {
ring->va = desc_cb->buf + desc_cb->page_offset;
dma_sync_single_for_cpu(ring_to_dev(ring),
desc_cb->dma + desc_cb->page_offset,
hns3_buf_size(ring),
DMA_FROM_DEVICE);
- }
- /* Prefetch first cache line of first page
- * Idea is to cache few bytes of the header of the packet. Our L1 Cache
- * line size is 64B so need to prefetch twice to make it 128B. But in
- * actual we can have greater size of caches with 128B Level 1 cache
- * lines. In such a case, single fetch would suffice to cache in the
- * relevant part of the header.
- */
- prefetch(ring->va);
-#if L1_CACHE_BYTES < 128
- prefetch(ring->va + L1_CACHE_BYTES);
-#endif
+ /* Prefetch first cache line of first page.
+ * Idea is to cache few bytes of the header of the packet.
+ * Our L1 Cache line size is 64B so need to prefetch twice to make
+ * it 128B. But in actual we can have greater size of caches with
+ * 128B Level 1 cache lines. In such a case, single fetch would
+ * suffice to cache in the relevant part of the header.
+ */
+ net_prefetch(ring->va);
- if (!skb) {
ret = hns3_alloc_skb(ring, length, ring->va);
skb = ring->skb;
@@ -3138,19 +3180,11 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring);
int recv_pkts = 0;
- int recv_bds = 0;
- int err, num;
+ int err;
- num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
- num -= unused_count;
unused_count -= ring->pending_buf;
- if (num <= 0)
- goto out;
-
- rmb(); /* Make sure num taken effect before the other data is touched */
-
- while (recv_pkts < budget && recv_bds < num) {
+ while (recv_pkts < budget) {
/* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
hns3_nic_alloc_rx_buffers(ring, unused_count);
@@ -3168,7 +3202,6 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
recv_pkts++;
}
- recv_bds += ring->pending_buf;
unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
@@ -3337,7 +3370,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
hns3_for_each_ring(ring, tqp_vector->tx_group)
- hns3_clean_tx_ring(ring);
+ hns3_clean_tx_ring(ring, budget);
/* make sure rx ring budget not smaller than 1 */
if (tqp_vector->num_tqps > 1)
@@ -3496,7 +3529,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
struct hnae3_ring_chain_node vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
- int ret = 0;
+ int ret;
int i;
hns3_nic_set_cpumask(priv);
@@ -3673,12 +3706,10 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring = &priv->ring[q->tqp_index];
desc_num = priv->ae_handle->kinfo.num_tx_desc;
ring->queue_index = q->tqp_index;
- ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
} else {
ring = &priv->ring[q->tqp_index + queue_num];
desc_num = priv->ae_handle->kinfo.num_rx_desc;
ring->queue_index = q->tqp_index;
- ring->io_base = q->io_base;
}
hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
@@ -3692,6 +3723,7 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->desc_num = desc_num;
ring->next_to_use = 0;
ring->next_to_clean = 0;
+ ring->last_to_use = 0;
}
static void hns3_queue_to_ring(struct hnae3_queue *tqp,
@@ -3771,6 +3803,7 @@ void hns3_fini_ring(struct hns3_enet_ring *ring)
ring->desc_cb = NULL;
ring->next_to_clean = 0;
ring->next_to_use = 0;
+ ring->last_to_use = 0;
ring->pending_buf = 0;
if (ring->skb) {
dev_kfree_skb_any(ring->skb);
@@ -3979,6 +4012,7 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
u16 alloc_tqps, max_rss_size;
struct hns3_nic_priv *priv;
struct net_device *netdev;
@@ -3995,6 +4029,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->netdev = netdev;
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
+ priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
@@ -4181,9 +4216,11 @@ static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_clean != ring->next_to_use) {
ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
- hns3_free_buffer_detach(ring, ring->next_to_clean);
+ hns3_free_buffer_detach(ring, ring->next_to_clean, 0);
ring_ptr_move_fw(ring, next_to_clean);
}
+
+ ring->pending_buf = 0;
}
static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
@@ -4286,6 +4323,7 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
hns3_clear_tx_ring(&priv->ring[i]);
priv->ring[i].next_to_clean = 0;
priv->ring[i].next_to_use = 0;
+ priv->ring[i].last_to_use = 0;
rx_ring = &priv->ring[i + h->kinfo.num_tqps];
hns3_init_ring_hw(rx_ring);
@@ -4582,6 +4620,8 @@ static const struct hns3_hw_error_info hns3_hw_err[] = {
.msg = "IMP CMDQ error" },
{ .type = HNAE3_IMP_RD_POISON_ERROR,
.msg = "IMP RD poison" },
+ { .type = HNAE3_ROCEE_AXI_RESP_ERROR,
+ .msg = "ROCEE AXI RESP error" },
};
static void hns3_process_hw_error(struct hnae3_handle *handle,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 9922c5fd7f94..1c81dea0da1e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -42,12 +42,9 @@ enum hns3_nic_state {
#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070
#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074
-#define HNS3_RING_PREFETCH_EN_REG 0x0007C
-#define HNS3_RING_CFG_VF_NUM_REG 0x00080
-#define HNS3_RING_ASID_REG 0x0008C
#define HNS3_RING_EN_REG 0x00090
-
-#define HNS3_TX_REG_OFFSET 0x40
+#define HNS3_RING_RX_EN_REG 0x00098
+#define HNS3_RING_TX_EN_REG 0x000D4
#define HNS3_RX_HEAD_SIZE 256
@@ -172,13 +169,12 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
-#define HNS3_MAX_NON_TSO_BD_NUM 8U
#define HNS3_MAX_TSO_BD_NUM 63U
#define HNS3_MAX_TSO_SIZE \
(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
-#define HNS3_MAX_NON_TSO_SIZE \
- (HNS3_MAX_BD_SIZE * HNS3_MAX_NON_TSO_BD_NUM)
+#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
+ (HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
#define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200
@@ -292,6 +288,7 @@ struct hns3_desc_cb {
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
+ u16 pagecnt_bias;
};
enum hns3_pkt_l3type {
@@ -348,14 +345,13 @@ enum hns3_pkt_ol4type {
};
struct ring_stats {
- u64 io_err_cnt;
u64 sw_err_cnt;
u64 seg_pkt_cnt;
union {
struct {
u64 tx_pkts;
u64 tx_bytes;
- u64 tx_err_cnt;
+ u64 tx_more;
u64 restart_queue;
u64 tx_busy;
u64 tx_copy;
@@ -380,7 +376,6 @@ struct ring_stats {
};
struct hns3_enet_ring {
- u8 __iomem *io_base; /* base io address for the ring */
struct hns3_desc *desc; /* dma map address space */
struct hns3_desc_cb *desc_cb;
struct hns3_enet_ring *next;
@@ -402,8 +397,10 @@ struct hns3_enet_ring {
* next_to_use
*/
int next_to_clean;
-
- u32 pull_len; /* head length for current packet */
+ union {
+ int last_to_use; /* last idx used by xmit */
+ u32 pull_len; /* memcpy len for current rx packet */
+ };
u32 frag_num;
void *va; /* first buffer address for current packet */
@@ -479,6 +476,7 @@ struct hns3_nic_priv {
struct hns3_enet_ring *ring;
struct hns3_enet_tqp_vector *tqp_vector;
u16 vector_num;
+ u8 max_non_tso_bd_num;
u64 tx_timeout_count;
@@ -518,11 +516,6 @@ static inline int ring_space(struct hns3_enet_ring *ring)
(begin - end)) - 1;
}
-static inline int is_ring_empty(struct hns3_enet_ring *ring)
-{
- return ring->next_to_use == ring->next_to_clean;
-}
-
static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
{
return readl(base + reg);
@@ -548,9 +541,6 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
-#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
- (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
-
#define ring_to_dev(ring) ((ring)->dev)
#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
@@ -588,7 +578,7 @@ void hns3_ethtool_set_ops(struct net_device *netdev);
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch);
-void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
@@ -607,7 +597,6 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value);
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
-int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
#ifdef CONFIG_HNS3_DCB
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 2622e04e8eed..6b07b2771172 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -27,12 +27,11 @@ struct hns3_sfp_type {
static const struct hns3_stats hns3_txq_stats[] = {
/* Tx per-queue statistics */
- HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
HNS3_TQP_STAT("dropped", sw_err_cnt),
HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("packets", tx_pkts),
HNS3_TQP_STAT("bytes", tx_bytes),
- HNS3_TQP_STAT("errors", tx_err_cnt),
+ HNS3_TQP_STAT("more", tx_more),
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
@@ -46,7 +45,6 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = {
/* Rx per-queue statistics */
- HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
HNS3_TQP_STAT("dropped", sw_err_cnt),
HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("packets", rx_pkts),
@@ -79,6 +77,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
bool vlan_filter_enable;
int ret;
@@ -98,7 +97,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
break;
}
- if (ret || h->pdev->revision >= 0x21)
+ if (ret || ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
return ret;
if (en) {
@@ -149,6 +148,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
struct net_device *ndev = skb->dev;
struct hnae3_handle *handle;
+ struct hnae3_ae_dev *ae_dev;
unsigned char *packet;
struct ethhdr *ethh;
unsigned int i;
@@ -165,7 +165,8 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* the purpose of mac or serdes selftest.
*/
handle = hns3_get_handle(ndev);
- if (handle->pdev->revision == 0x20)
+ ae_dev = pci_get_drvdata(handle->pdev);
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
@@ -232,7 +233,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
for (i = start_ringid; i <= end_ringid; i++) {
struct hns3_enet_ring *ring = &priv->ring[i];
- hns3_clean_tx_ring(ring);
+ hns3_clean_tx_ring(ring, 0);
}
}
@@ -310,9 +311,6 @@ static void hns3_self_test(struct net_device *ndev,
struct hnae3_handle *h = priv->ae_handle;
int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
bool if_running = netif_running(ndev);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
- bool dis_vlan_filter;
-#endif
int test_index = 0;
u32 i;
@@ -349,9 +347,7 @@ static void hns3_self_test(struct net_device *ndev,
#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Disable the vlan filter for selftest does not support it */
- dis_vlan_filter = (ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- h->ae_algo->ops->enable_vlan_filter;
- if (dis_vlan_filter)
+ if (h->ae_algo->ops->enable_vlan_filter)
h->ae_algo->ops->enable_vlan_filter(h, false);
#endif
@@ -388,7 +384,7 @@ static void hns3_self_test(struct net_device *ndev,
h->ae_algo->ops->halt_autoneg(h, false);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
- if (dis_vlan_filter)
+ if (h->ae_algo->ops->enable_vlan_filter)
h->ae_algo->ops->enable_vlan_filter(h, true);
#endif
@@ -763,6 +759,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
int ret;
@@ -784,7 +781,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
}
- if (handle->pdev->revision == 0x20)
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
ret = hns3_check_ksettings_param(netdev, cmd);
@@ -848,11 +845,12 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
- if ((h->pdev->revision == 0x20 &&
+ if ((ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 &&
hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
netdev_err(netdev, "hash func not supported\n");
@@ -1073,9 +1071,6 @@ static int hns3_nway_reset(struct net_device *netdev)
if (phy)
return genphy_restart_aneg(phy);
- if (handle->pdev->revision == 0x20)
- return -EOPNOTSUPP;
-
return ops->restart_autoneg(handle);
}
@@ -1363,11 +1358,12 @@ static int hns3_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u8 fec_ability;
u8 fec_mode;
- if (handle->pdev->revision == 0x20)
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
return -EOPNOTSUPP;
if (!ops->get_fec)
@@ -1385,10 +1381,11 @@ static int hns3_set_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u32 fec_mode;
- if (handle->pdev->revision == 0x20)
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
return -EOPNOTSUPP;
if (!ops->set_fec)
@@ -1406,11 +1403,13 @@ static int hns3_get_module_info(struct net_device *netdev,
#define HNS3_SFF_8636_V1_3 0x03
struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
struct hns3_sfp_type sfp_type;
int ret;
- if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom)
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !ops->get_module_eeprom)
return -EOPNOTSUPP;
memset(&sfp_type, 0, sizeof(sfp_type));
@@ -1454,9 +1453,11 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom)
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !ops->get_module_eeprom)
return -EOPNOTSUPP;
if (!ee->len)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
index 7bddcca148a5..5153e5d41bbd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -53,7 +53,7 @@ DECLARE_EVENT_CLASS(hns3_skb_template,
)
);
-DEFINE_EVENT(hns3_skb_template, hns3_over_8bd,
+DEFINE_EVENT(hns3_skb_template, hns3_over_max_bd,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb));
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 1d6c328bd9fb..e6321dda0f3f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -261,7 +261,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
bool complete = false;
u32 timeout = 0;
int handle = 0;
- int retval = 0;
+ int retval;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
@@ -330,9 +330,37 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
return retval;
}
-static enum hclge_cmd_status hclge_cmd_query_firmware_version(
- struct hclge_hw *hw, u32 *version)
+static void hclge_set_default_capability(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+}
+
+static void hclge_parse_capability(struct hclge_dev *hdev,
+ struct hclge_query_version_cmd *cmd)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ u32 caps;
+
+ caps = __le32_to_cpu(cmd->caps[0]);
+
+ if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B))
+ set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B))
+ set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B))
+ set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
+ set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
+}
+
+static enum hclge_cmd_status
+hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_query_version_cmd *resp;
struct hclge_desc desc;
int ret;
@@ -340,9 +368,20 @@ static enum hclge_cmd_status hclge_cmd_query_firmware_version(
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
resp = (struct hclge_query_version_cmd *)desc.data;
- ret = hclge_cmd_send(hw, &desc, 1);
- if (!ret)
- *version = le32_to_cpu(resp->firmware);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ hdev->fw_version = le32_to_cpu(resp->firmware);
+
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
+ HNAE3_PCI_REVISION_BIT_SIZE;
+ ae_dev->dev_version |= hdev->pdev->revision;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ hclge_set_default_capability(hdev);
+
+ hclge_parse_capability(hdev, resp);
return ret;
}
@@ -402,7 +441,6 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
int hclge_cmd_init(struct hclge_dev *hdev)
{
- u32 version;
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
@@ -431,22 +469,23 @@ int hclge_cmd_init(struct hclge_dev *hdev)
goto err_cmd_init;
}
- ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
+ /* get version and device capabilities */
+ ret = hclge_cmd_query_version_and_capability(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
- "firmware version query failed %d\n", ret);
+ "failed to query version and capabilities, ret = %d\n",
+ ret);
goto err_cmd_init;
}
- hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
HNAE3_FW_VERSION_BYTE3_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
HNAE3_FW_VERSION_BYTE2_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
HNAE3_FW_VERSION_BYTE1_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
/* ask the firmware to enable some features, driver can work without
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 463f29151ef0..096e26a2e16b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -115,7 +115,8 @@ enum hclge_opcode_type {
HCLGE_OPC_DFX_RCB_REG = 0x004D,
HCLGE_OPC_DFX_TQP_REG = 0x004E,
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
- HCLGE_OPC_DFX_QUERY_CHIP_CAP = 0x0050,
+
+ HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
/* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
@@ -362,9 +363,26 @@ struct hclge_rx_priv_buff_cmd {
u8 rsv[6];
};
+enum HCLGE_CAP_BITS {
+ HCLGE_CAP_UDP_GSO_B,
+ HCLGE_CAP_QB_B,
+ HCLGE_CAP_FD_FORWARD_TC_B,
+ HCLGE_CAP_PTP_B,
+ HCLGE_CAP_INT_QL_B,
+ HCLGE_CAP_SIMPLE_BD_B,
+ HCLGE_CAP_TX_PUSH_B,
+ HCLGE_CAP_PHY_IMP_B,
+ HCLGE_CAP_TQP_TXRX_INDEP_B,
+ HCLGE_CAP_HW_PAD_B,
+ HCLGE_CAP_STASH_B,
+};
+
+#define HCLGE_QUERY_CAP_LENGTH 3
struct hclge_query_version_cmd {
__le32 firmware;
- __le32 firmware_rsv[5];
+ __le32 hardware;
+ __le32 rsv;
+ __le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
};
#define HCLGE_RX_PRIV_EN_B 15
@@ -491,6 +509,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
#define HCLGE_CFG_SPEED_ABILITY_S 0
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
+#define HCLGE_CFG_SPEED_ABILITY_EXT_S 10
+#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10)
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
@@ -1069,6 +1089,20 @@ struct hclge_sfp_info_bd0_cmd {
u8 data[HCLGE_SFP_INFO_BD0_LEN];
};
+#define HCLGE_QUERY_DEV_SPECS_BD_NUM 4
+
+struct hclge_dev_specs_0_cmd {
+ __le32 rsv0;
+ __le32 mac_entry_num;
+ __le32 mng_entry_num;
+ __le16 rss_ind_tbl_size;
+ __le16 rss_key_size;
+ __le16 int_ql_max;
+ u8 max_non_tso_bd_num;
+ u8 rsv1;
+ __le32 max_tm_rate;
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index d6c3952aba04..3606240025a8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -2,7 +2,9 @@
// Copyright (c) 2016-2017 Hisilicon Limited.
#include "hclge_main.h"
+#include "hclge_dcb.h"
#include "hclge_tm.h"
+#include "hclge_dcb.h"
#include "hnae3.h"
#define BW_PERCENT 100
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 26f6f068b01d..16df050e72cf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -8,7 +8,7 @@
#include "hclge_tm.h"
#include "hnae3.h"
-static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
+static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .reg_type = "bios common",
.dfx_msg = &hclge_dbg_bios_common_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
@@ -115,14 +115,14 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
}
static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
- struct hclge_dbg_reg_type_info *reg_info,
+ const struct hclge_dbg_reg_type_info *reg_info,
const char *cmd_buf)
{
#define IDX_OFFSET 1
const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
- struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
- struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
struct hclge_desc *desc_src;
struct hclge_desc *desc;
int entries_per_desc;
@@ -399,7 +399,7 @@ err_dcb_cmd_send:
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
{
- struct hclge_dbg_reg_type_info *reg_info;
+ const struct hclge_dbg_reg_type_info *reg_info;
bool has_dump = false;
int i;
@@ -428,17 +428,13 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
}
}
-static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
- char *title_buf, char *true_buf,
- char *false_buf)
+static void hclge_print_tc_info(struct hclge_dev *hdev, bool flag, int index)
{
if (flag)
- dev_info(&hdev->pdev->dev, "%s(%d): %s weight: %u\n",
- title_buf, index, true_buf,
- hdev->tm_info.pg_info[0].tc_dwrr[index]);
+ dev_info(&hdev->pdev->dev, "tc(%d): no sp mode weight: %u\n",
+ index, hdev->tm_info.pg_info[0].tc_dwrr[index]);
else
- dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
- false_buf);
+ dev_info(&hdev->pdev->dev, "tc(%d): sp mode\n", index);
}
static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
@@ -469,8 +465,7 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
ets_weight->weight_offset);
for (i = 0; i < HNAE3_MAX_TC; i++)
- hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
- "tc", "no sp mode", "sp mode");
+ hclge_print_tc_info(hdev, ets_weight->tc_weight[i], i);
}
static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
@@ -1170,6 +1165,14 @@ static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
hdev->serv_processed_cnt);
}
+static void hclge_dbg_dump_interrupt(struct hclge_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "num_nic_msi: %u\n", hdev->num_nic_msi);
+ dev_info(&hdev->pdev->dev, "num_roce_msi: %u\n", hdev->num_roce_msi);
+ dev_info(&hdev->pdev->dev, "num_msi_used: %u\n", hdev->num_msi_used);
+ dev_info(&hdev->pdev->dev, "num_msi_left: %u\n", hdev->num_msi_left);
+}
+
static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
{
struct hclge_desc *desc_src, *desc_tmp;
@@ -1494,6 +1497,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
#define DUMP_REG "dump reg"
#define DUMP_TM_MAP "dump tm map"
#define DUMP_LOOPBACK "dump loopback"
+#define DUMP_INTERRUPT "dump intr"
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -1541,6 +1545,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_dump_mac_list(hdev,
&cmd_buf[sizeof("dump mc mac list")],
false);
+ } else if (strncmp(cmd_buf, DUMP_INTERRUPT,
+ strlen(DUMP_INTERRUPT)) == 0) {
+ hclge_dbg_dump_interrupt(hdev);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 38b79321c4c4..a9066e6ff697 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -81,13 +81,13 @@ struct hclge_dbg_dfx_message {
#define HCLGE_DBG_MAC_REG_TYPE_LEN 32
struct hclge_dbg_reg_type_info {
const char *reg_type;
- struct hclge_dbg_dfx_message *dfx_msg;
+ const struct hclge_dbg_dfx_message *dfx_msg;
struct hclge_dbg_reg_common_msg reg_msg;
};
#pragma pack()
-static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
{true, "DFX_MSIX_INFO_NIC_0"},
@@ -103,7 +103,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
{false, "Reserved"},
{true, "SSU_ETS_PORT_STATUS"},
{true, "SSU_ETS_TCG_STATUS"},
@@ -175,7 +175,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
{true, "prt_id"},
{true, "PACKET_TC_CURR_BUFFER_CNT_0"},
{true, "PACKET_TC_CURR_BUFFER_CNT_1"},
@@ -282,7 +282,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
{true, "OQ_INDEX"},
{true, "QUEUE_CNT"},
{false, "Reserved"},
@@ -291,7 +291,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{true, "prt_id"},
{true, "IGU_RX_ERR_PKT"},
{true, "IGU_RX_NO_SOF_PKT"},
@@ -356,7 +356,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
{true, "tc_queue_num"},
{true, "FSM_DFX_ST0"},
{true, "FSM_DFX_ST1"},
@@ -365,7 +365,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
{true, "BUF_WAIT_TIMEOUT_QID"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
{false, "Reserved"},
{true, "FIFO_DFX_ST0"},
{true, "FIFO_DFX_ST1"},
@@ -381,7 +381,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
{false, "Reserved"},
{true, "NCSI_EGU_TX_FIFO_STS"},
{true, "NCSI_PAUSE_STATUS"},
@@ -453,7 +453,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
{true, "NCSI_MAC_RX_PAUSE_FRAMES"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
{false, "Reserved"},
{true, "LGE_IGU_AFIFO_DFX_0"},
{true, "LGE_IGU_AFIFO_DFX_1"},
@@ -483,7 +483,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
{false, "Reserved"},
{true, "DROP_FROM_PRT_PKT_CNT"},
{true, "DROP_FROM_HOST_PKT_CNT"},
@@ -639,7 +639,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
{false, "Reserved"},
{true, "FSM_DFX_ST0"},
{true, "FSM_DFX_ST1"},
@@ -711,7 +711,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
{true, "q_num"},
{true, "RCB_CFG_RX_RING_TAIL"},
{true, "RCB_CFG_RX_RING_HEAD"},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 50d5ef71756b..9ee55ee0487d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -729,7 +729,7 @@ static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
struct hclge_desc desc;
int ret;
- if (hdev->pdev->revision < 0x21)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return 0;
/* configure NCSI error interrupts */
@@ -808,7 +808,7 @@ static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
desc[1].data[1] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
desc[1].data[2] =
cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
} else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
@@ -1041,7 +1041,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
if (en) {
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
desc[0].data[0] =
cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
else
@@ -1507,6 +1507,8 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
reset_type = HNAE3_FUNC_RESET;
+ hclge_report_hw_error(hdev, HNAE3_ROCEE_AXI_RESP_ERROR);
+
ret = hclge_log_rocee_axi_error(hdev);
if (ret)
return HNAE3_GLOBAL_RESET;
@@ -1548,7 +1550,8 @@ int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
struct hclge_desc desc;
int ret;
- if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev))
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !hnae3_dev_roce_supported(hdev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
@@ -1574,8 +1577,7 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
enum hnae3_reset_type reset_type;
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
- hdev->pdev->revision < 0x21)
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
return;
reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
@@ -1661,7 +1663,7 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
}
/* Handling Non-fatal Rocee RAS errors */
- if (hdev->pdev->revision >= 0x21 &&
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
dev_err(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d553ed7ee64c..1f026408ad38 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -84,6 +84,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
/* required last entry */
{0, }
};
@@ -622,7 +623,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
- int i = 0;
+ int i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
@@ -739,7 +740,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
if (stringset == ETH_SS_TEST) {
/* clear loopback bit flags at first */
handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
- if (hdev->pdev->revision >= 0x21 ||
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
@@ -965,6 +966,9 @@ static int hclge_parse_speed(int speed_cmd, int *speed)
case 5:
*speed = HCLGE_MAC_SPEED_100G;
break;
+ case 8:
+ *speed = HCLGE_MAC_SPEED_200G;
+ break;
default:
return -EINVAL;
}
@@ -1004,6 +1008,9 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
case HCLGE_MAC_SPEED_100G:
speed_bit = HCLGE_SUPPORT_100G_BIT;
break;
+ case HCLGE_MAC_SPEED_200G:
+ speed_bit = HCLGE_SUPPORT_200G_BIT;
+ break;
default:
return -EINVAL;
}
@@ -1014,7 +1021,7 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
-static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
@@ -1031,9 +1038,12 @@ static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ mac->supported);
}
-static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
@@ -1050,9 +1060,13 @@ static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ mac->supported);
}
-static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
@@ -1069,9 +1083,12 @@ static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+ mac->supported);
}
-static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
@@ -1091,6 +1108,9 @@ static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ mac->supported);
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
@@ -1115,6 +1135,7 @@ static void hclge_convert_setting_fec(struct hclge_mac *mac)
BIT(HNAE3_FEC_AUTO);
break;
case HCLGE_MAC_SPEED_100G:
+ case HCLGE_MAC_SPEED_200G:
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
break;
@@ -1125,7 +1146,7 @@ static void hclge_convert_setting_fec(struct hclge_mac *mac)
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
struct hclge_mac *mac = &hdev->hw.mac;
@@ -1136,7 +1157,7 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
hclge_convert_setting_sr(mac, speed_ability);
hclge_convert_setting_lr(mac, speed_ability);
hclge_convert_setting_cr(mac, speed_ability);
- if (hdev->pdev->revision >= 0x21)
+ if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
@@ -1145,12 +1166,12 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
}
static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
struct hclge_mac *mac = &hdev->hw.mac;
hclge_convert_setting_kr(mac, speed_ability);
- if (hdev->pdev->revision >= 0x21)
+ if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
@@ -1158,7 +1179,7 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
}
static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
unsigned long *supported = hdev->hw.mac.supported;
@@ -1188,7 +1209,7 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
}
-static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
+static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
{
u8 media_type = hdev->hw.mac.media_type;
@@ -1200,8 +1221,11 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
-static u32 hclge_get_max_speed(u8 speed_ability)
+static u32 hclge_get_max_speed(u16 speed_ability)
{
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ return HCLGE_MAC_SPEED_200G;
+
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
return HCLGE_MAC_SPEED_100G;
@@ -1231,8 +1255,11 @@ static u32 hclge_get_max_speed(u8 speed_ability)
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
+#define SPEED_ABILITY_EXT_SHIFT 8
+
struct hclge_cfg_param_cmd *req;
u64 mac_addr_tmp_high;
+ u16 speed_ability_ext;
u64 mac_addr_tmp;
unsigned int i;
@@ -1281,6 +1308,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_M,
HCLGE_CFG_SPEED_ABILITY_S);
+ speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_EXT_M,
+ HCLGE_CFG_SPEED_ABILITY_EXT_S);
+ cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
+
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
@@ -1324,6 +1356,78 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
return 0;
}
+static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
+{
+#define HCLGE_MAX_NON_TSO_BD_NUM 8U
+
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
+ ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
+}
+
+static void hclge_parse_dev_specs(struct hclge_dev *hdev,
+ struct hclge_desc *desc)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_dev_specs_0_cmd *req0;
+
+ req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
+
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
+ ae_dev->dev_specs.rss_ind_tbl_size =
+ le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
+ ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
+}
+
+static void hclge_check_dev_specs(struct hclge_dev *hdev)
+{
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
+
+ if (!dev_specs->max_non_tso_bd_num)
+ dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
+ if (!dev_specs->rss_ind_tbl_size)
+ dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
+ if (!dev_specs->rss_key_size)
+ dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
+ if (!dev_specs->max_tm_rate)
+ dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
+}
+
+static int hclge_query_dev_specs(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
+ int ret;
+ int i;
+
+ /* set default specifications as devices lower than version V3 do not
+ * support querying specifications from firmware.
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ hclge_set_default_dev_specs(hdev);
+ return 0;
+ }
+
+ for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ }
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_dev_specs(hdev, desc);
+ hclge_check_dev_specs(hdev);
+
+ return 0;
+}
+
static int hclge_get_cap(struct hclge_dev *hdev)
{
int ret;
@@ -2422,6 +2526,10 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, 5);
break;
+ case HCLGE_MAC_SPEED_200G:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 8);
+ break;
default:
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
return -EINVAL;
@@ -2856,7 +2964,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (!hdev->support_sfp_query)
return 0;
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
ret = hclge_get_sfp_info(hdev, mac);
else
ret = hclge_get_sfp_speed(hdev, &speed);
@@ -2868,7 +2976,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
return ret;
}
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
if (mac->speed_type == QUERY_ACTIVE_SPEED) {
hclge_update_port_capability(mac);
return 0;
@@ -3211,7 +3319,7 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->roce_client;
- int ret = 0;
+ int ret;
u16 i;
if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
@@ -3533,7 +3641,7 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
/* For revision 0x20, the reset interrupt source
* can only be cleared after hardware reset done
*/
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
clearval);
@@ -3944,6 +4052,9 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev)
{
unsigned long delta = round_jiffies_relative(HZ);
+ if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
+ return;
+
/* Always handle the link updating to make sure link state is
* updated when it is triggered by mbx.
*/
@@ -4537,7 +4648,7 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport;
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
@@ -4737,13 +4848,14 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
bool en_bc_pmc = true;
- /* For revision 0x20, if broadcast promisc enabled, vlan filter is
- * always bypassed. So broadcast promisc should be disabled until
- * user enable promisc mode
+ /* For device whose version below V2, if broadcast promisc enabled,
+ * vlan filter is always bypassed. So broadcast promisc should be
+ * disabled until user enable promisc mode
*/
- if (handle->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
@@ -6758,7 +6870,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
* the same, the packets are looped back in the SSU. If SSU loopback
* is disabled, packets can reach MAC even if SMAC is the same as DMAC.
*/
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
@@ -8260,7 +8372,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
HCLGE_FILTER_FE_EGRESS, enable, 0);
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
@@ -8620,7 +8732,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
int ret;
int i;
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
/* for revision 0x21, vf vlan filter is per function */
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
@@ -8975,7 +9087,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
u16 state;
int ret;
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
vport = hclge_get_vf_vport(hdev, vfid);
@@ -9950,6 +10062,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_cmd_uninit;
+ ret = hclge_query_dev_specs(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
+ ret);
+ goto err_cmd_uninit;
+ }
+
ret = hclge_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
@@ -10147,7 +10266,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
u32 new_spoofchk = enable ? 1 : 0;
int ret;
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
vport = hclge_get_vf_vport(hdev, vf);
@@ -10180,7 +10299,7 @@ static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
int ret;
int i;
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return 0;
/* resume the vf spoof check state after reset */
@@ -10200,6 +10319,7 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
u32 new_trusted = enable ? 1 : 0;
bool en_bc_pmc;
int ret;
@@ -10213,7 +10333,7 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
/* Disable promisc mode for VF if it is not trusted any more. */
if (!enable && vport->vf_info.promisc_enable) {
- en_bc_pmc = hdev->pdev->revision != 0x20;
+ en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
ret = hclge_set_vport_promisc_mode(vport, false, false,
en_bc_pmc);
if (ret)
@@ -11090,7 +11210,7 @@ static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
{
struct hclge_vport *vport = &hdev->vport[0];
struct hnae3_handle *handle = &vport->nic;
- u8 tmp_flags = 0;
+ u8 tmp_flags;
int ret;
if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 9bbdd4557c27..64e6afdb61b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -199,6 +199,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
+#define HCLGE_SUPPORT_200G_BIT BIT(8)
#define HCLGE_SUPPORT_GE \
(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
@@ -238,7 +239,8 @@ enum HCLGE_MAC_SPEED {
HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */
HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */
HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */
- HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */
+ HCLGE_MAC_SPEED_100G = 100000, /* 100000 Mbps = 100 Gbps */
+ HCLGE_MAC_SPEED_200G = 200000 /* 200000 Mbps = 200 Gbps */
};
enum HCLGE_MAC_DUPLEX {
@@ -266,7 +268,7 @@ struct hclge_mac {
u32 fec_mode; /* active fec mode */
u32 user_fec_mode;
u32 fec_ability;
- int link; /* store the link status of mac & phy (if phy exit) */
+ int link; /* store the link status of mac & phy (if phy exists) */
struct phy_device *phydev;
struct mii_bus *mdio_bus;
phy_interface_t phy_if;
@@ -349,7 +351,7 @@ struct hclge_cfg {
u8 mac_addr[ETH_ALEN];
u8 default_speed;
u32 numa_node_map;
- u8 speed_ability;
+ u16 speed_ability;
u16 umv_space;
};
@@ -749,7 +751,6 @@ struct hclge_dev {
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
u8 hw_tc_map;
- u8 tc_num_last_time;
enum hclge_fc_mode fc_mode_last_time;
u8 support_sfp_query;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 28db13253a5e..e8495f58a1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -23,14 +23,11 @@ enum hclge_shaper_level {
#define HCLGE_SHAPER_BS_U_DEF 5
#define HCLGE_SHAPER_BS_S_DEF 20
-#define HCLGE_ETHER_MAX_RATE 100000
-
/* hclge_shaper_para_calc: calculate ir parameter for the shaper
* @ir: Rate to be config, its unit is Mbps
* @shaper_level: the shaper level. eg: port, pg, priority, queueset
- * @ir_b: IR_B parameter of IR shaper
- * @ir_u: IR_U parameter of IR shaper
- * @ir_s: IR_S parameter of IR shaper
+ * @ir_para: parameters of IR shaper
+ * @max_tm_rate: max tm rate is available to config
*
* the formula:
*
@@ -41,7 +38,8 @@ enum hclge_shaper_level {
* @return: 0: calculate sucessful, negative: fail
*/
static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
- u8 *ir_b, u8 *ir_u, u8 *ir_s)
+ struct hclge_shaper_ir_para *ir_para,
+ u32 max_tm_rate)
{
#define DIVISOR_CLK (1000 * 8)
#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
@@ -59,7 +57,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Calc tick */
if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
- ir > HCLGE_ETHER_MAX_RATE)
+ ir > max_tm_rate)
return -EINVAL;
tick = tick_array[shaper_level];
@@ -74,9 +72,9 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
if (ir_calc == ir) {
- *ir_b = 126;
- *ir_u = 0;
- *ir_s = 0;
+ ir_para->ir_b = 126;
+ ir_para->ir_u = 0;
+ ir_para->ir_s = 0;
return 0;
} else if (ir_calc > ir) {
@@ -86,8 +84,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
}
- *ir_b = (ir * tick * (1 << ir_s_calc) + (DIVISOR_CLK >> 1)) /
- DIVISOR_CLK;
+ ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
+ (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
} else {
/* Increasing the numerator to select ir_u value */
u32 numerator;
@@ -99,15 +97,16 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
}
if (ir_calc == ir) {
- *ir_b = 126;
+ ir_para->ir_b = 126;
} else {
u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
- *ir_b = (ir * tick + (denominator >> 1)) / denominator;
+ ir_para->ir_b = (ir * tick + (denominator >> 1)) /
+ denominator;
}
}
- *ir_u = ir_u_calc;
- *ir_s = ir_s_calc;
+ ir_para->ir_u = ir_u_calc;
+ ir_para->ir_s = ir_s_calc;
return 0;
}
@@ -400,21 +399,22 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
+ struct hclge_shaper_ir_para ir_para;
struct hclge_desc desc;
- u8 ir_u, ir_b, ir_s;
u32 shapping_para;
int ret;
- ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
- HCLGE_SHAPER_LVL_PORT,
- &ir_b, &ir_u, &ir_s);
+ ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
if (ret)
return ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
- shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
@@ -515,21 +515,23 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ struct hclge_shaper_ir_para ir_para;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
- u8 ir_b, ir_u, ir_s;
u32 shaper_para;
int ret, i;
if (!max_tx_rate)
- max_tx_rate = HCLGE_ETHER_MAX_RATE;
+ max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
- &ir_b, &ir_u, &ir_s);
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
@@ -668,7 +670,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].pg_id = i;
hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
- hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
+ hdev->tm_info.pg_info[i].bw_limit =
+ hdev->ae_dev->dev_specs.max_tm_rate;
if (i != 0)
continue;
@@ -729,7 +732,8 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
{
- u8 ir_u, ir_b, ir_s;
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
u32 shaper_para;
int ret;
u32 i;
@@ -741,10 +745,9 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
/* Pg to pri */
for (i = 0; i < hdev->tm_info.num_pg; i++) {
/* Calc shaper para */
- ret = hclge_shaper_para_calc(
- hdev->tm_info.pg_info[i].bw_limit,
- HCLGE_SHAPER_LVL_PG,
- &ir_b, &ir_u, &ir_s);
+ ret = hclge_shaper_para_calc(hdev->tm_info.pg_info[i].bw_limit,
+ HCLGE_SHAPER_LVL_PG,
+ &ir_para, max_tm_rate);
if (ret)
return ret;
@@ -757,7 +760,9 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
@@ -861,16 +866,16 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{
- u8 ir_u, ir_b, ir_s;
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
u32 shaper_para;
int ret;
u32 i;
for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_shaper_para_calc(
- hdev->tm_info.tc_info[i].bw_limit,
- HCLGE_SHAPER_LVL_PRI,
- &ir_b, &ir_u, &ir_s);
+ ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
+ HCLGE_SHAPER_LVL_PRI,
+ &ir_para, max_tm_rate);
if (ret)
return ret;
@@ -882,7 +887,9 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
@@ -897,12 +904,13 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
- u8 ir_u, ir_b, ir_s;
+ struct hclge_shaper_ir_para ir_para;
u32 shaper_para;
int ret;
ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
- &ir_b, &ir_u, &ir_s);
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
if (ret)
return ret;
@@ -914,7 +922,8 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
@@ -929,15 +938,15 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
- u8 ir_u, ir_b, ir_s;
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
u32 i;
int ret;
for (i = 0; i < kinfo->num_tc; i++) {
- ret = hclge_shaper_para_calc(
- hdev->tm_info.tc_info[i].bw_limit,
- HCLGE_SHAPER_LVL_QSET,
- &ir_b, &ir_u, &ir_s);
+ ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
+ HCLGE_SHAPER_LVL_QSET,
+ &ir_para, max_tm_rate);
if (ret)
return ret;
}
@@ -1355,7 +1364,7 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
static int hclge_tm_bp_setup(struct hclge_dev *hdev)
{
- int ret = 0;
+ int ret;
int i;
for (i = 0; i < hdev->tm_info.num_tc; i++) {
@@ -1364,7 +1373,7 @@ static int hclge_tm_bp_setup(struct hclge_dev *hdev)
return ret;
}
- return ret;
+ return 0;
}
int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 45bcb67f90fd..bb2a2d8e9259 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -19,6 +19,8 @@
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
+#define HCLGE_ETHER_MAX_RATE 100000
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
@@ -139,6 +141,12 @@ struct hclge_port_shapping_cmd {
__le32 port_shapping_para;
};
+struct hclge_shaper_ir_para {
+ u8 ir_b; /* IR_B parameter of IR shaper */
+ u8 ir_u; /* IR_U parameter of IR shaper */
+ u8 ir_s; /* IR_S parameter of IR shaper */
+};
+
#define hclge_tm_set_field(dest, string, val) \
hnae3_set_field((dest), \
(HCLGE_TM_SHAP_##string##_MSK), \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index fec65239a3c8..66866c1cfb12 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -313,9 +313,34 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
return status;
}
-static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
- u32 *version)
+static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+}
+
+static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
+ struct hclgevf_query_version_cmd *cmd)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ u32 caps;
+
+ caps = __le32_to_cpu(cmd->caps[0]);
+
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B))
+ set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B))
+ set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B))
+ set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
+}
+
+static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclgevf_query_version_cmd *resp;
struct hclgevf_desc desc;
int status;
@@ -323,9 +348,20 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
resp = (struct hclgevf_query_version_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
- status = hclgevf_cmd_send(hw, &desc, 1);
- if (!status)
- *version = le32_to_cpu(resp->firmware);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ return status;
+
+ hdev->fw_version = le32_to_cpu(resp->firmware);
+
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
+ HNAE3_PCI_REVISION_BIT_SIZE;
+ ae_dev->dev_version |= hdev->pdev->revision;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ hclgevf_set_default_capability(hdev);
+
+ hclgevf_parse_capability(hdev, resp);
return status;
}
@@ -364,7 +400,6 @@ err_csq:
int hclgevf_cmd_init(struct hclgevf_dev *hdev)
{
- u32 version;
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
@@ -395,23 +430,22 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
goto err_cmd_init;
}
- /* get firmware version */
- ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
+ /* get version and device capabilities */
+ ret = hclgevf_cmd_query_version_and_capability(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to query firmware version\n", ret);
+ "failed to query version and capabilities, ret = %d\n", ret);
goto err_cmd_init;
}
- hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
HNAE3_FW_VERSION_BYTE3_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
HNAE3_FW_VERSION_BYTE2_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
HNAE3_FW_VERSION_BYTE1_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 40d6e602ab51..9460c128c095 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -91,6 +91,8 @@ enum hclgevf_opcode_type {
/* Generic command */
HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
+ HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050,
+
/* TQP command */
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
@@ -141,9 +143,26 @@ struct hclgevf_ctrl_vector_chain {
u8 resv;
};
+enum HCLGEVF_CAP_BITS {
+ HCLGEVF_CAP_UDP_GSO_B,
+ HCLGEVF_CAP_QB_B,
+ HCLGEVF_CAP_FD_FORWARD_TC_B,
+ HCLGEVF_CAP_PTP_B,
+ HCLGEVF_CAP_INT_QL_B,
+ HCLGEVF_CAP_SIMPLE_BD_B,
+ HCLGEVF_CAP_TX_PUSH_B,
+ HCLGEVF_CAP_PHY_IMP_B,
+ HCLGEVF_CAP_TQP_TXRX_INDEP_B,
+ HCLGEVF_CAP_HW_PAD_B,
+ HCLGEVF_CAP_STASH_B,
+};
+
+#define HCLGEVF_QUERY_CAP_LENGTH 3
struct hclgevf_query_version_cmd {
__le32 firmware;
- __le32 firmware_rsv[5];
+ __le32 hardware;
+ __le32 rsv;
+ __le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
};
#define HCLGEVF_MSIX_OFT_ROCEE_S 0
@@ -253,6 +272,19 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
+#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
+
+struct hclgevf_dev_specs_0_cmd {
+ __le32 rsv0;
+ __le32 mac_entry_num;
+ __le32 mng_entry_num;
+ __le16 rss_ind_tbl_size;
+ __le16 rss_key_size;
+ __le16 int_ql_max;
+ u8 max_non_tso_bd_num;
+ u8 rsv1[5];
+};
+
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index e972138a14ad..c8e3fdd5999c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -19,8 +19,9 @@ static struct hnae3_ae_algo ae_algovf;
static struct workqueue_struct *hclgevf_wq;
static const struct pci_device_id ae_algovf_pci_tbl[] = {
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
@@ -171,7 +172,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
- int i = 0;
+ int i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
@@ -745,7 +746,7 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
int i, ret;
- if (handle->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
/* Get hash algorithm */
if (hfunc) {
switch (rss_cfg->hash_algo) {
@@ -791,7 +792,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
- if (handle->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
/* Set the RSS Hash Key if specififed by the user */
if (key) {
switch (hfunc) {
@@ -863,7 +864,7 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
u8 tuple_sets;
int ret;
- if (handle->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
if (nfc->data &
@@ -941,7 +942,7 @@ static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 tuple_sets;
- if (handle->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
nfc->data = 0;
@@ -1154,10 +1155,9 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct pci_dev *pdev = hdev->pdev;
bool en_bc_pmc;
- en_bc_pmc = pdev->revision != 0x20;
+ en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
en_bc_pmc);
@@ -1702,6 +1702,26 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev,
return ret;
}
+static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->roce_client;
+ struct hnae3_handle *handle = &hdev->roce;
+ int ret;
+
+ if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
+ type, ret);
+ return ret;
+}
+
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1788,10 +1808,10 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_SYNC_TIME 100
- struct hclge_vf_to_pf_msg send_msg;
- int ret = 0;
-
if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (ret) {
@@ -1806,10 +1826,10 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
/* inform hardware that preparatory work is done */
msleep(HCLGEVF_RESET_SYNC_TIME);
hclgevf_reset_handshake(hdev, true);
- dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
- hdev->reset_type, ret);
+ dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
+ hdev->reset_type);
- return ret;
+ return 0;
}
static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
@@ -1865,6 +1885,11 @@ static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
hdev->rst_stats.rst_cnt++;
+ /* perform reset of the stack & ae device for a client */
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
@@ -1880,6 +1905,9 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
int ret;
hdev->rst_stats.hw_rst_done_cnt++;
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
rtnl_lock();
/* now, re-initialize the nic client and ae device */
@@ -1890,6 +1918,18 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
return ret;
}
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
+ * times
+ */
+ if (ret &&
+ hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
+ return ret;
+
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ return ret;
+
hdev->last_reset_time = jiffies;
hdev->rst_stats.rst_done_cnt++;
hdev->rst_stats.rst_fail_cnt = 0;
@@ -2186,6 +2226,9 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
unsigned long delta = round_jiffies_relative(HZ);
struct hnae3_handle *handle = &hdev->nic;
+ if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
+ return;
+
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
delta = jiffies - hdev->last_serv_processed;
@@ -2284,7 +2327,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
* register, so we should just write 0 to the bit we are
* handling, and keep other bits as cmdq_stat_reg.
*/
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
*clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
else
*clearval = cmdq_stat_reg &
@@ -2427,7 +2470,7 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
tuple_sets = &rss_cfg->rss_tuple_sets;
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
HCLGEVF_RSS_KEY_SIZE);
@@ -2452,7 +2495,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret;
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
rss_cfg->rss_hash_key);
if (ret)
@@ -2551,13 +2594,7 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
static int hclgevf_client_start(struct hnae3_handle *handle)
{
- int ret;
-
- ret = hclgevf_set_alive(handle, true);
- if (ret)
- return ret;
-
- return 0;
+ return hclgevf_set_alive(handle, true);
}
static void hclgevf_client_stop(struct hnae3_handle *handle)
@@ -2760,6 +2797,7 @@ static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
if (ret)
return ret;
+ set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
hnae3_set_client_init_flag(client, ae_dev, 1);
return 0;
@@ -2820,6 +2858,7 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
/* un-init roce, if it exists */
if (hdev->roce_client) {
+ clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
hdev->roce_client = NULL;
hdev->roce.client = NULL;
@@ -2942,6 +2981,76 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
return 0;
}
+static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_MAX_NON_TSO_BD_NUM 8U
+
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->dev_specs.max_non_tso_bd_num =
+ HCLGEVF_MAX_NON_TSO_BD_NUM;
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+}
+
+static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
+ struct hclgevf_desc *desc)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclgevf_dev_specs_0_cmd *req0;
+
+ req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
+
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
+ ae_dev->dev_specs.rss_ind_tbl_size =
+ le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
+}
+
+static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
+{
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
+
+ if (!dev_specs->max_non_tso_bd_num)
+ dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
+ if (!dev_specs->rss_ind_tbl_size)
+ dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
+ if (!dev_specs->rss_key_size)
+ dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+}
+
+static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
+ int ret;
+ int i;
+
+ /* set default specifications as devices lower than version V3 do not
+ * support querying specifications from firmware.
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ hclgevf_set_default_dev_specs(hdev);
+ return 0;
+ }
+
+ for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+ hclgevf_cmd_setup_basic_desc(&desc[i],
+ HCLGEVF_OPC_QUERY_DEV_SPECS, true);
+ desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
+ }
+ hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
+ true);
+
+ ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
+ if (ret)
+ return ret;
+
+ hclgevf_parse_dev_specs(hdev, desc);
+ hclgevf_check_dev_specs(hdev);
+
+ return 0;
+}
+
static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -3050,6 +3159,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
goto err_cmd_init;
+ ret = hclgevf_query_dev_specs(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to query dev specifications, ret = %d\n", ret);
+ goto err_cmd_init;
+ }
+
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
@@ -3146,8 +3262,8 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
hclgevf_uninit_msi(hdev);
}
- hclgevf_pci_uninit(hdev);
hclgevf_cmd_uninit(hdev);
+ hclgevf_pci_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
}
@@ -3345,6 +3461,13 @@ static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
}
+static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+}
+
static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -3530,6 +3653,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_link_mode = hclgevf_get_link_mode,
.set_promisc_mode = hclgevf_set_promisc_mode,
.request_update_promisc_mode = hclgevf_request_update_promisc_mode,
+ .get_cmdq_stat = hclgevf_get_cmdq_stat,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index c1fac8920ae3..c5bcc3894fd5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -139,6 +139,7 @@ enum hclgevf_states {
HCLGEVF_STATE_IRQ_INITED,
HCLGEVF_STATE_REMOVING,
HCLGEVF_STATE_NIC_REGISTERED,
+ HCLGEVF_STATE_ROCE_REGISTERED,
/* task states */
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 7df5d7d211d4..883d0d7c6858 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -210,7 +210,7 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
* @bus: mdio bus
* @phy_id: phy id
* @regnum: register num
- * @value: register value
+ * @data: register value
*
* Return 0 on success, negative on failure
*/
@@ -273,7 +273,6 @@ static int hns_mdio_write(struct mii_bus *bus,
* @bus: mdio bus
* @phy_id: phy id
* @regnum: register num
- * @value: register value
*
* Return phy register value
*/
diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile
index 67b59d0ba769..2f89119c9b69 100644
--- a/drivers/net/ethernet/huawei/hinic/Makefile
+++ b/drivers/net/ethernet/huawei/hinic/Makefile
@@ -4,4 +4,5 @@ obj-$(CONFIG_HINIC) += hinic.o
hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \
hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \
hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \
- hinic_common.o hinic_ethtool.o hinic_devlink.o hinic_hw_mbox.o hinic_sriov.o
+ hinic_common.o hinic_ethtool.o hinic_devlink.o hinic_hw_mbox.o \
+ hinic_sriov.o hinic_debugfs.o
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
new file mode 100644
index 000000000000..19eb839177ec
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#include "hinic_debugfs.h"
+
+static struct dentry *hinic_dbgfs_root;
+
+enum sq_dbg_info {
+ GLB_SQ_ID,
+ SQ_PI,
+ SQ_CI,
+ SQ_FI,
+ SQ_MSIX_ENTRY,
+};
+
+static char *sq_fields[] = {"glb_sq_id", "sq_pi", "sq_ci", "sq_fi", "sq_msix_entry"};
+
+static u64 hinic_dbg_get_sq_info(struct hinic_dev *nic_dev, struct hinic_sq *sq, int idx)
+{
+ struct hinic_wq *wq = sq->wq;
+
+ switch (idx) {
+ case GLB_SQ_ID:
+ return nic_dev->hwdev->func_to_io.global_qpn + sq->qid;
+ case SQ_PI:
+ return atomic_read(&wq->prod_idx) & wq->mask;
+ case SQ_CI:
+ return atomic_read(&wq->cons_idx) & wq->mask;
+ case SQ_FI:
+ return be16_to_cpu(*(__be16 *)(sq->hw_ci_addr)) & wq->mask;
+ case SQ_MSIX_ENTRY:
+ return sq->msix_entry;
+ }
+
+ return 0;
+}
+
+enum rq_dbg_info {
+ GLB_RQ_ID,
+ RQ_HW_PI,
+ RQ_SW_CI,
+ RQ_SW_PI,
+ RQ_MSIX_ENTRY,
+};
+
+static char *rq_fields[] = {"glb_rq_id", "rq_hw_pi", "rq_sw_ci", "rq_sw_pi", "rq_msix_entry"};
+
+static u64 hinic_dbg_get_rq_info(struct hinic_dev *nic_dev, struct hinic_rq *rq, int idx)
+{
+ struct hinic_wq *wq = rq->wq;
+
+ switch (idx) {
+ case GLB_RQ_ID:
+ return nic_dev->hwdev->func_to_io.global_qpn + rq->qid;
+ case RQ_HW_PI:
+ return be16_to_cpu(*(__be16 *)(rq->pi_virt_addr)) & wq->mask;
+ case RQ_SW_CI:
+ return atomic_read(&wq->cons_idx) & wq->mask;
+ case RQ_SW_PI:
+ return atomic_read(&wq->prod_idx) & wq->mask;
+ case RQ_MSIX_ENTRY:
+ return rq->msix_entry;
+ }
+
+ return 0;
+}
+
+enum func_tbl_info {
+ VALID,
+ RX_MODE,
+ MTU,
+ RQ_DEPTH,
+ QUEUE_NUM,
+};
+
+static char *func_table_fields[] = {"valid", "rx_mode", "mtu", "rq_depth", "cfg_q_num"};
+
+static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
+{
+ struct tag_sml_funcfg_tbl *funcfg_table_elem;
+ struct hinic_cmd_lt_rd *read_data;
+ u16 out_size = sizeof(*read_data);
+ int err;
+
+ read_data = kzalloc(sizeof(*read_data), GFP_KERNEL);
+ if (!read_data)
+ return ~0;
+
+ read_data->node = TBL_ID_FUNC_CFG_SM_NODE;
+ read_data->inst = TBL_ID_FUNC_CFG_SM_INST;
+ read_data->entry_size = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE;
+ read_data->lt_index = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
+ read_data->len = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE;
+
+ err = hinic_port_msg_cmd(nic_dev->hwdev, HINIC_PORT_CMD_RD_LINE_TBL, read_data,
+ sizeof(*read_data), read_data, &out_size);
+ if (err || out_size != sizeof(*read_data) || read_data->status) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to get func table, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, read_data->status, out_size);
+ kfree(read_data);
+ return ~0;
+ }
+
+ funcfg_table_elem = (struct tag_sml_funcfg_tbl *)read_data->data;
+
+ switch (idx) {
+ case VALID:
+ return funcfg_table_elem->dw0.bs.valid;
+ case RX_MODE:
+ return funcfg_table_elem->dw0.bs.nic_rx_mode;
+ case MTU:
+ return funcfg_table_elem->dw1.bs.mtu;
+ case RQ_DEPTH:
+ return funcfg_table_elem->dw13.bs.cfg_rq_depth;
+ case QUEUE_NUM:
+ return funcfg_table_elem->dw13.bs.cfg_q_num;
+ }
+
+ kfree(read_data);
+
+ return ~0;
+}
+
+static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct hinic_debug_priv *dbg;
+ char ret_buf[20];
+ int *desc;
+ u64 out;
+ int ret;
+
+ desc = filp->private_data;
+ dbg = container_of(desc, struct hinic_debug_priv, field_id[*desc]);
+
+ switch (dbg->type) {
+ case HINIC_DBG_SQ_INFO:
+ out = hinic_dbg_get_sq_info(dbg->dev, dbg->object, *desc);
+ break;
+
+ case HINIC_DBG_RQ_INFO:
+ out = hinic_dbg_get_rq_info(dbg->dev, dbg->object, *desc);
+ break;
+
+ case HINIC_DBG_FUNC_TABLE:
+ out = hinic_dbg_get_func_table(dbg->dev, *desc);
+ break;
+
+ default:
+ netif_warn(dbg->dev, drv, dbg->dev->netdev, "Invalid hinic debug cmd: %d\n",
+ dbg->type);
+ return -EINVAL;
+ }
+
+ ret = snprintf(ret_buf, sizeof(ret_buf), "0x%llx\n", out);
+
+ return simple_read_from_buffer(buffer, count, ppos, ret_buf, ret);
+}
+
+static const struct file_operations hinic_dbg_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hinic_dbg_cmd_read,
+};
+
+static int create_dbg_files(struct hinic_dev *dev, enum hinic_dbg_type type, void *data,
+ struct dentry *root, struct hinic_debug_priv **dbg, char **field,
+ int nfile)
+{
+ struct hinic_debug_priv *tmp;
+ int i;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->dev = dev;
+ tmp->object = data;
+ tmp->type = type;
+ tmp->root = root;
+
+ for (i = 0; i < nfile; i++) {
+ tmp->field_id[i] = i;
+ debugfs_create_file(field[i], 0400, root, &tmp->field_id[i], &hinic_dbg_cmd_fops);
+ }
+
+ *dbg = tmp;
+
+ return 0;
+}
+
+static void rem_dbg_files(struct hinic_debug_priv *dbg)
+{
+ if (dbg->type != HINIC_DBG_FUNC_TABLE)
+ debugfs_remove_recursive(dbg->root);
+
+ kfree(dbg);
+}
+
+int hinic_sq_debug_add(struct hinic_dev *dev, u16 sq_id)
+{
+ struct hinic_sq *sq;
+ struct dentry *root;
+ char sub_dir[16];
+
+ sq = dev->txqs[sq_id].sq;
+
+ sprintf(sub_dir, "0x%x", sq_id);
+
+ root = debugfs_create_dir(sub_dir, dev->sq_dbgfs);
+
+ return create_dbg_files(dev, HINIC_DBG_SQ_INFO, sq, root, &sq->dbg, sq_fields,
+ ARRAY_SIZE(sq_fields));
+}
+
+void hinic_sq_debug_rem(struct hinic_sq *sq)
+{
+ if (sq->dbg)
+ rem_dbg_files(sq->dbg);
+}
+
+int hinic_rq_debug_add(struct hinic_dev *dev, u16 rq_id)
+{
+ struct hinic_rq *rq;
+ struct dentry *root;
+ char sub_dir[16];
+
+ rq = dev->rxqs[rq_id].rq;
+
+ sprintf(sub_dir, "0x%x", rq_id);
+
+ root = debugfs_create_dir(sub_dir, dev->rq_dbgfs);
+
+ return create_dbg_files(dev, HINIC_DBG_RQ_INFO, rq, root, &rq->dbg, rq_fields,
+ ARRAY_SIZE(rq_fields));
+}
+
+void hinic_rq_debug_rem(struct hinic_rq *rq)
+{
+ if (rq->dbg)
+ rem_dbg_files(rq->dbg);
+}
+
+int hinic_func_table_debug_add(struct hinic_dev *dev)
+{
+ if (HINIC_IS_VF(dev->hwdev->hwif))
+ return 0;
+
+ return create_dbg_files(dev, HINIC_DBG_FUNC_TABLE, dev, dev->func_tbl_dbgfs, &dev->dbg,
+ func_table_fields, ARRAY_SIZE(func_table_fields));
+}
+
+void hinic_func_table_debug_rem(struct hinic_dev *dev)
+{
+ if (!HINIC_IS_VF(dev->hwdev->hwif) && dev->dbg)
+ rem_dbg_files(dev->dbg);
+}
+
+void hinic_sq_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->sq_dbgfs = debugfs_create_dir("SQs", nic_dev->dbgfs_root);
+}
+
+void hinic_sq_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->sq_dbgfs);
+}
+
+void hinic_rq_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->rq_dbgfs = debugfs_create_dir("RQs", nic_dev->dbgfs_root);
+}
+
+void hinic_rq_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->rq_dbgfs);
+}
+
+void hinic_func_tbl_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ nic_dev->func_tbl_dbgfs = debugfs_create_dir("func_table", nic_dev->dbgfs_root);
+}
+
+void hinic_func_tbl_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ debugfs_remove_recursive(nic_dev->func_tbl_dbgfs);
+}
+
+void hinic_dbg_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->dbgfs_root = debugfs_create_dir(pci_name(nic_dev->hwdev->hwif->pdev),
+ hinic_dbgfs_root);
+}
+
+void hinic_dbg_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->dbgfs_root);
+ nic_dev->dbgfs_root = NULL;
+}
+
+void hinic_dbg_register_debugfs(const char *debugfs_dir_name)
+{
+ hinic_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+}
+
+void hinic_dbg_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hinic_dbgfs_root);
+ hinic_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
new file mode 100644
index 000000000000..e9e00cfa1329
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_DEBUGFS_H
+#define HINIC_DEBUGFS_H
+
+#include "hinic_dev.h"
+
+#define TBL_ID_FUNC_CFG_SM_NODE 11
+#define TBL_ID_FUNC_CFG_SM_INST 1
+
+#define HINIC_FUNCTION_CONFIGURE_TABLE_SIZE 64
+#define HINIC_FUNCTION_CONFIGURE_TABLE 1
+
+struct hinic_cmd_lt_rd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ unsigned char node;
+ unsigned char inst;
+ unsigned char entry_size;
+ unsigned char rsvd;
+ unsigned int lt_index;
+ unsigned int offset;
+ unsigned int len;
+ unsigned char data[100];
+};
+
+struct tag_sml_funcfg_tbl {
+ union {
+ struct {
+ u32 rsvd0 :8;
+ u32 nic_rx_mode :5;
+ u32 rsvd1 :18;
+ u32 valid :1;
+ } bs;
+
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+ u32 vlan_id :12;
+ u32 vlan_mode :3;
+ u32 fast_recycled_mode :1;
+ u32 mtu :16;
+ } bs;
+
+ u32 value;
+ } dw1;
+
+ u32 dw2;
+ u32 dw3;
+ u32 dw4;
+ u32 dw5;
+ u32 dw6;
+ u32 dw7;
+ u32 dw8;
+ u32 dw9;
+ u32 dw10;
+ u32 dw11;
+ u32 dw12;
+
+ union {
+ struct {
+ u32 rsvd2 :15;
+ u32 cfg_q_num :9;
+ u32 cfg_rq_depth :6;
+ u32 vhd_type :2;
+ } bs;
+
+ u32 value;
+ } dw13;
+
+ u32 dw14;
+ u32 dw15;
+};
+
+int hinic_sq_debug_add(struct hinic_dev *dev, u16 sq_id);
+
+void hinic_sq_debug_rem(struct hinic_sq *sq);
+
+int hinic_rq_debug_add(struct hinic_dev *dev, u16 rq_id);
+
+void hinic_rq_debug_rem(struct hinic_rq *rq);
+
+int hinic_func_table_debug_add(struct hinic_dev *dev);
+
+void hinic_func_table_debug_rem(struct hinic_dev *dev);
+
+void hinic_sq_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_sq_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_rq_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_rq_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_func_tbl_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_func_tbl_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_dbg_init(struct hinic_dev *nic_dev);
+
+void hinic_dbg_uninit(struct hinic_dev *nic_dev);
+
+void hinic_dbg_register_debugfs(const char *debugfs_dir_name);
+
+void hinic_dbg_unregister_debugfs(void);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index 0a1e20edf7cf..fb3e89141a0d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -58,6 +58,20 @@ struct hinic_intr_coal_info {
u8 resend_timer_cfg;
};
+enum hinic_dbg_type {
+ HINIC_DBG_SQ_INFO,
+ HINIC_DBG_RQ_INFO,
+ HINIC_DBG_FUNC_TABLE,
+};
+
+struct hinic_debug_priv {
+ struct hinic_dev *dev;
+ void *object;
+ enum hinic_dbg_type type;
+ struct dentry *root;
+ int field_id[64];
+};
+
struct hinic_dev {
struct net_device *netdev;
struct hinic_hwdev *hwdev;
@@ -97,6 +111,12 @@ struct hinic_dev {
int lb_test_rx_idx;
int lb_pkt_len;
u8 *lb_test_rx_buf;
+
+ struct dentry *dbgfs_root;
+ struct dentry *sq_dbgfs;
+ struct dentry *rq_dbgfs;
+ struct dentry *func_tbl_dbgfs;
+ struct hinic_debug_priv *dbg;
struct devlink *devlink;
bool cable_unplugged;
bool module_unrecognized;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 16bda7381ba0..2630d667f393 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -281,18 +281,14 @@ static int hinic_firmware_update(struct hinic_devlink_priv *priv,
}
static int hinic_devlink_flash_update(struct devlink *devlink,
- const char *file_name,
- const char *component,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct hinic_devlink_priv *priv = devlink_priv(devlink);
const struct firmware *fw;
int err;
- if (component)
- return -EOPNOTSUPP;
-
- err = request_firmware_direct(&fw, file_name,
+ err = request_firmware_direct(&fw, params->file_name,
&priv->hwdev->hwif->pdev->dev);
if (err)
return err;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index 29e88e25a4a4..4e4029d5c8e1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -373,7 +373,7 @@ static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain)
* @chain: chain for the command
* @dest: destination node on the card that will receive the command
* @cmd: command data
- * @size: the command size
+ * @cmd_size: the command size
*
* Return 0 - Success, negative - Failure
**/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index e0eb294779ec..5a6bbee819cd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -784,7 +784,7 @@ static void free_cmdq(struct hinic_cmdq *cmdq)
* init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
* @hwdev: the NIC HW device
* @cmdqs: cmdqs to write the ctxts for
- * &db_area: db_area for all the cmdqs
+ * @db_area: db_area for all the cmdqs
*
* Return 0 - Success, negative - Failure
**/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 0c737765d113..0c74f6674634 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -437,6 +437,8 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
/**
* hinic_hwdev_ifup - Preparing the HW for passing IO
* @hwdev: the NIC HW device
+ * @sq_depth: the send queue depth
+ * @rq_depth: the receive queue depth
*
* Return 0 - Success, negative - Failure
**/
@@ -465,6 +467,7 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth)
func_to_io->hwdev = hwdev;
func_to_io->sq_depth = sq_depth;
func_to_io->rq_depth = rq_depth;
+ func_to_io->global_qpn = base_qpn;
err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
ceq_msix_entries);
@@ -581,6 +584,7 @@ void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
/**
* nic_mgmt_msg_handler - nic mgmt event handler
* @handle: private data for the handler
+ * @cmd: message command
* @buf_in: input buffer
* @in_size: input size
* @buf_out: output buffer
@@ -908,6 +912,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
/**
* hinic_init_hwdev - Initialize the NIC HW
* @pdev: the NIC pci device
+ * @devlink: the poniter of hinic devlink
*
* Return initialized NIC HW device
*
@@ -1120,7 +1125,7 @@ int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index)
* @msix_index: msix_index
* @pending_limit: the maximum pending interrupt events (unit 8)
* @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
+ * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us)
* @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
* @resend_timer: maximum wait for resending msix (unit coalesc period)
*
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 701eb81e09a7..416492e48274 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -96,6 +96,8 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_RSS_TEMP_MGR = 49,
+ HINIC_PORT_CMD_RD_LINE_TBL = 57,
+
HINIC_PORT_CMD_RSS_CFG = 66,
HINIC_PORT_CMD_FWCTXT_INIT = 69,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index ca8cb68a8d20..19942fef99d9 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -106,7 +106,7 @@ enum eq_arm_state {
* @aeqs: pointer to Async eqs of the chip
* @event: aeq event to register callback for it
* @handle: private data will be used by the callback
- * @hw_handler: callback function
+ * @hwe_handler: callback function
**/
void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
enum hinic_aeq_type event, void *handle,
@@ -188,6 +188,7 @@ static u8 eq_cons_idx_checksum_set(u32 val)
/**
* eq_update_ci - update the HW cons idx of event queue
* @eq: the event queue to update the cons idx for
+ * @arm_state: the arm bit value of eq's interrupt
**/
static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
{
@@ -368,11 +369,11 @@ static void eq_irq_work(struct work_struct *work)
/**
* ceq_tasklet - the tasklet of the EQ that received the event
- * @ceq_data: the eq
+ * @t: the tasklet struct pointer
**/
-static void ceq_tasklet(unsigned long ceq_data)
+static void ceq_tasklet(struct tasklet_struct *t)
{
- struct hinic_eq *ceq = (struct hinic_eq *)ceq_data;
+ struct hinic_eq *ceq = from_tasklet(ceq, t, ceq_tasklet);
eq_irq_handler(ceq);
}
@@ -782,8 +783,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
INIT_WORK(&aeq_work->work, eq_irq_work);
} else if (type == HINIC_CEQ) {
- tasklet_init(&eq->ceq_tasklet, ceq_tasklet,
- (unsigned long)eq);
+ tasklet_setup(&eq->ceq_tasklet, ceq_tasklet);
}
/* set the attributes of the msix entry */
@@ -794,12 +794,15 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
- if (type == HINIC_AEQ)
- err = request_irq(entry.vector, aeq_interrupt, 0,
- "hinic_aeq", eq);
- else if (type == HINIC_CEQ)
- err = request_irq(entry.vector, ceq_interrupt, 0,
- "hinic_ceq", eq);
+ if (type == HINIC_AEQ) {
+ snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_aeq%d@pci:%s", eq->q_id,
+ pci_name(pdev));
+ err = request_irq(entry.vector, aeq_interrupt, 0, eq->irq_name, eq);
+ } else if (type == HINIC_CEQ) {
+ snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_ceq%d@pci:%s", eq->q_id,
+ pci_name(pdev));
+ err = request_irq(entry.vector, ceq_interrupt, 0, eq->irq_name, eq);
+ }
if (err) {
dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
index 43065fc70869..2f3222174fc7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
@@ -186,6 +186,7 @@ struct hinic_eq {
int num_elem_in_pg;
struct msix_entry msix_entry;
+ char irq_name[64];
dma_addr_t *dma_addr;
void **virt_addr;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index bc8925c0c982..efbaed389440 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -230,6 +230,7 @@ static int wait_hwif_ready(struct hinic_hwif *hwif)
* @hwif: the HW interface of a pci function device
* @attr0: the first attribute that was read from the hw
* @attr1: the second attribute that was read from the hw
+ * @attr2: the third attribute that was read from the hw
**/
static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1,
u32 attr2)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index 3e3fa742e476..4ef4008e65bd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -305,6 +305,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
func_to_io->sq_db[q_id] = db_base;
+ qp->sq.qid = q_id;
err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
sq_msix_entry,
CI_ADDR(func_to_io->ci_addr_base, q_id),
@@ -314,6 +315,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
goto err_sq_init;
}
+ qp->rq.qid = q_id;
err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
rq_msix_entry);
if (err) {
@@ -361,8 +363,8 @@ static void destroy_qp(struct hinic_func_to_io *func_to_io,
* @func_to_io: func to io channel that holds the IO components
* @base_qpn: base qp number
* @num_qps: number queue pairs to create
- * @sq_msix_entry: msix entries for sq
- * @rq_msix_entry: msix entries for rq
+ * @sq_msix_entries: msix entries for sq
+ * @rq_msix_entries: msix entries for rq
*
* Return 0 - Success, negative - Failure
**/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
index ee6d60762d84..52159a90278a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
@@ -59,6 +59,7 @@ struct hinic_nic_cfg {
struct hinic_func_to_io {
struct hinic_hwif *hwif;
struct hinic_hwdev *hwdev;
+ u16 global_qpn;
struct hinic_ceqs ceqs;
struct hinic_wqs wqs;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index 2ebae6cb5db5..819fa13034c0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -238,6 +238,7 @@ static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
* @out_size: response length
* @direction: the direction of the original message
* @resp_msg_id: msg id to response for
+ * @timeout: time-out period of waiting for response
*
* Return 0 - Success, negative - Failure
**/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index ca3e2d060284..0dfa51ad5855 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -81,6 +81,8 @@ struct hinic_sq {
struct hinic_wq *wq;
+ u16 qid;
+
u32 irq;
u16 msix_entry;
@@ -90,6 +92,7 @@ struct hinic_sq {
void __iomem *db_base;
struct sk_buff **saved_skb;
+ struct hinic_debug_priv *dbg;
};
struct hinic_rq {
@@ -97,6 +100,8 @@ struct hinic_rq {
struct hinic_wq *wq;
+ u16 qid;
+
struct cpumask affinity_mask;
u32 irq;
u16 msix_entry;
@@ -110,6 +115,7 @@ struct hinic_rq {
u16 *pi_virt_addr;
dma_addr_t pi_dma_addr;
+ struct hinic_debug_priv *dbg;
};
struct hinic_qp {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 28581bd8ce07..350225bbe0be 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/err.h>
+#include "hinic_debugfs.h"
#include "hinic_hw_qp.h"
#include "hinic_hw_dev.h"
#include "hinic_devlink.h"
@@ -153,6 +154,8 @@ static int create_txqs(struct hinic_dev *nic_dev)
if (!nic_dev->txqs)
return -ENOMEM;
+ hinic_sq_dbgfs_init(nic_dev);
+
for (i = 0; i < num_txqs; i++) {
struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i);
@@ -162,13 +165,27 @@ static int create_txqs(struct hinic_dev *nic_dev)
"Failed to init Txq\n");
goto err_init_txq;
}
+
+ err = hinic_sq_debug_add(nic_dev, i);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to add SQ%d debug\n", i);
+ goto err_add_sq_dbg;
+ }
+
}
return 0;
+err_add_sq_dbg:
+ hinic_clean_txq(&nic_dev->txqs[i]);
err_init_txq:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
+ hinic_sq_debug_rem(nic_dev->txqs[j].sq);
hinic_clean_txq(&nic_dev->txqs[j]);
+ }
+
+ hinic_sq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->txqs);
return err;
@@ -204,8 +221,12 @@ static void free_txqs(struct hinic_dev *nic_dev)
if (!nic_dev->txqs)
return;
- for (i = 0; i < num_txqs; i++)
+ for (i = 0; i < num_txqs; i++) {
+ hinic_sq_debug_rem(nic_dev->txqs[i].sq);
hinic_clean_txq(&nic_dev->txqs[i]);
+ }
+
+ hinic_sq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->txqs);
nic_dev->txqs = NULL;
@@ -231,6 +252,8 @@ static int create_rxqs(struct hinic_dev *nic_dev)
if (!nic_dev->rxqs)
return -ENOMEM;
+ hinic_rq_dbgfs_init(nic_dev);
+
for (i = 0; i < num_rxqs; i++) {
struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
@@ -240,13 +263,26 @@ static int create_rxqs(struct hinic_dev *nic_dev)
"Failed to init rxq\n");
goto err_init_rxq;
}
+
+ err = hinic_rq_debug_add(nic_dev, i);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to add RQ%d debug\n", i);
+ goto err_add_rq_dbg;
+ }
}
return 0;
+err_add_rq_dbg:
+ hinic_clean_rxq(&nic_dev->rxqs[i]);
err_init_rxq:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
+ hinic_rq_debug_rem(nic_dev->rxqs[j].rq);
hinic_clean_rxq(&nic_dev->rxqs[j]);
+ }
+
+ hinic_rq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->rxqs);
return err;
@@ -264,8 +300,12 @@ static void free_rxqs(struct hinic_dev *nic_dev)
if (!nic_dev->rxqs)
return;
- for (i = 0; i < num_rxqs; i++)
+ for (i = 0; i < num_rxqs; i++) {
+ hinic_rq_debug_rem(nic_dev->rxqs[i].rq);
hinic_clean_rxq(&nic_dev->rxqs[i]);
+ }
+
+ hinic_rq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->rxqs);
nic_dev->rxqs = NULL;
@@ -913,11 +953,16 @@ static void netdev_features_init(struct net_device *netdev)
netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->vlan_features = netdev->hw_features;
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_UDP_TUNNEL;
}
static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
@@ -945,7 +990,7 @@ static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
* @handle: nic device for the handler
* @buf_in: input buffer
* @in_size: input size
- * @buf_in: output buffer
+ * @buf_out: output buffer
* @out_size: returned output size
*
* Return 0 - Success, negative - Failure
@@ -1284,6 +1329,16 @@ static int nic_dev_init(struct pci_dev *pdev)
goto err_init_intr;
}
+ hinic_dbg_init(nic_dev);
+
+ hinic_func_tbl_dbgfs_init(nic_dev);
+
+ err = hinic_func_table_debug_add(nic_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add func_table debug\n");
+ goto err_add_func_table_dbg;
+ }
+
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
@@ -1293,6 +1348,10 @@ static int nic_dev_init(struct pci_dev *pdev)
return 0;
err_reg_netdev:
+ hinic_func_table_debug_rem(nic_dev);
+err_add_func_table_dbg:
+ hinic_func_tbl_dbgfs_uninit(nic_dev);
+ hinic_dbg_uninit(nic_dev);
hinic_free_intr_coalesce(nic_dev);
err_init_intr:
err_set_pfc:
@@ -1415,6 +1474,12 @@ static void hinic_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ hinic_func_table_debug_rem(nic_dev);
+
+ hinic_func_tbl_dbgfs_uninit(nic_dev);
+
+ hinic_dbg_uninit(nic_dev);
+
hinic_free_intr_coalesce(nic_dev);
hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
@@ -1469,4 +1534,17 @@ static struct pci_driver hinic_driver = {
.sriov_configure = hinic_pci_sriov_configure,
};
-module_pci_driver(hinic_driver);
+static int __init hinic_module_init(void)
+{
+ hinic_dbg_register_debugfs(HINIC_DRV_NAME);
+ return pci_register_driver(&hinic_driver);
+}
+
+static void __exit hinic_module_exit(void)
+{
+ pci_unregister_driver(&hinic_driver);
+ hinic_dbg_unregister_debugfs();
+}
+
+module_init(hinic_module_init);
+module_exit(hinic_module_exit);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index d0072f5e7efc..070a7cc6392e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -595,7 +595,7 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
rxq_stats_init(rxq);
rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
- "hinic_rxq%d", qp->q_id);
+ "%s_rxq%d", netdev->name, qp->q_id);
if (!rxq->irq_name)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index c1f81e9144a1..8da7d46363b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -357,6 +357,7 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
enum hinic_l4_offload_type l4_offload;
u32 offset, l4_len, network_hdr_len;
enum hinic_l3_offload_type l3_type;
+ u32 tunnel_type = NOT_TUNNEL;
union hinic_l3 ip;
union hinic_l4 l4;
u8 l4_proto;
@@ -367,27 +368,55 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
if (skb->encapsulation) {
u32 l4_tunnel_len;
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
ip.hdr = skb_network_header(skb);
- if (ip.v4->version == 4)
+ if (ip.v4->version == 4) {
l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
- else if (ip.v4->version == 6)
+ l4_proto = ip.v4->protocol;
+ } else if (ip.v4->version == 6) {
+ unsigned char *exthdr;
+ __be16 frag_off;
l3_type = IPV6_PKT;
- else
+ tunnel_type = TUNNEL_UDP_CSUM;
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ l4.hdr = skb_transport_header(skb);
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ } else {
l3_type = L3TYPE_UNKNOWN;
+ l4_proto = IPPROTO_RAW;
+ }
hinic_task_set_outter_l3(task, l3_type,
skb_network_header_len(skb));
- l4_tunnel_len = skb_inner_network_offset(skb) -
- skb_transport_offset(skb);
-
- hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM,
- l4_tunnel_len);
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ tunnel_type = NOT_TUNNEL;
+ l4_tunnel_len = 0;
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ break;
+ default:
+ /* Unsupported tunnel packet, disable csum offload */
+ skb_checksum_help(skb);
+ return 0;
+ }
- ip.hdr = skb_inner_network_header(skb);
- l4.hdr = skb_inner_transport_header(skb);
- network_hdr_len = skb_inner_network_header_len(skb);
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
} else {
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
@@ -853,14 +882,14 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
goto err_alloc_free_sges;
}
- irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1;
+ irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
if (!txq->irq_name) {
err = -ENOMEM;
goto err_alloc_irqname;
}
- sprintf(txq->irq_name, "hinic_txq%d", qp->q_id);
+ sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
CI_UPDATE_NO_COALESC);
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index aec7e98bcc85..96c6f4f36904 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -96,23 +96,14 @@
#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
-#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT
-
-#define DMA_WBACK(ndev, addr, len) \
- do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
-
-#define DMA_INV(ndev, addr, len) \
- do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0)
-
-#define DMA_WBACK_INV(ndev, addr, len) \
- do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
-
#define SYSBUS 0x0000006c
/* big endian CPU, 82596 "big" endian mode */
#define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
#define SWAP16(x) (x)
+#define NONCOHERENT_DMA 1
+
#include "lib82596.c"
MODULE_AUTHOR("Richard Hirst");
@@ -155,7 +146,7 @@ lan_init_chip(struct parisc_device *dev)
{
struct net_device *netdevice;
struct i596_private *lp;
- int retval;
+ int retval = -ENOMEM;
int i;
if (!dev->irq) {
@@ -186,12 +177,22 @@ lan_init_chip(struct parisc_device *dev)
lp = netdev_priv(netdevice);
lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0;
+ lp->dma = dma_alloc_noncoherent(&dev->dev,
+ sizeof(struct i596_dma), &lp->dma_addr,
+ DMA_BIDIRECTIONAL, GFP_KERNEL);
+ if (!lp->dma)
+ goto out_free_netdev;
retval = i82596_probe(netdevice);
- if (retval) {
- free_netdev(netdevice);
- return -ENODEV;
- }
+ if (retval)
+ goto out_free_dma;
+ return 0;
+
+out_free_dma:
+ dma_free_noncoherent(&dev->dev, sizeof(struct i596_dma),
+ lp->dma, lp->dma_addr, DMA_BIDIRECTIONAL);
+out_free_netdev:
+ free_netdev(netdevice);
return retval;
}
@@ -201,8 +202,8 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
struct i596_private *lp = netdev_priv(dev);
unregister_netdev (dev);
- dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
- lp->dma_addr, LIB82596_DMA_ATTR);
+ dma_free_noncoherent(&pdev->dev, sizeof(struct i596_private), lp->dma,
+ lp->dma_addr, DMA_BIDIRECTIONAL);
free_netdev (dev);
return 0;
}
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index b03757e169e4..ca2fb303fcc6 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -365,13 +365,44 @@ static int max_cmd_backlog = TX_RING_SIZE-1;
static void i596_poll_controller(struct net_device *dev);
#endif
+static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v)
+{
+ return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
+}
+
+#ifdef NONCOHERENT_DMA
+static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+ dma_sync_single_for_device(ndev->dev.parent,
+ virt_to_dma(netdev_priv(ndev), addr), len,
+ DMA_BIDIRECTIONAL);
+}
+
+static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ virt_to_dma(netdev_priv(ndev), addr), len,
+ DMA_BIDIRECTIONAL);
+}
+#else
+static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+}
+static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+}
+#endif /* NONCOHERENT_DMA */
static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
{
- DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
while (--delcnt && dma->iscp.stat) {
udelay(10);
- DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
}
if (!delcnt) {
printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
@@ -384,10 +415,10 @@ static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int d
static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
{
- DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
while (--delcnt && dma->scb.command) {
udelay(10);
- DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
}
if (!delcnt) {
printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
@@ -451,12 +482,9 @@ static void i596_display_data(struct net_device *dev)
SWAP32(rbd->b_data), SWAP16(rbd->size));
rbd = rbd->v_next;
} while (rbd != lp->rbd_head);
- DMA_INV(dev, dma, sizeof(struct i596_dma));
+ dma_sync_cpu(dev, dma, sizeof(struct i596_dma));
}
-
-#define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
-
static inline int init_rx_bufs(struct net_device *dev)
{
struct i596_private *lp = netdev_priv(dev);
@@ -508,7 +536,7 @@ static inline int init_rx_bufs(struct net_device *dev)
rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
- DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+ dma_sync_dev(dev, dma, sizeof(struct i596_dma));
return 0;
}
@@ -547,7 +575,7 @@ static void rebuild_rx_bufs(struct net_device *dev)
lp->rbd_head = dma->rbds;
dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
- DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+ dma_sync_dev(dev, dma, sizeof(struct i596_dma));
}
@@ -575,9 +603,9 @@ static int init_i596_mem(struct net_device *dev)
DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
- DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
- DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
- DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp));
+ dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
ca(dev);
@@ -596,24 +624,24 @@ static int init_i596_mem(struct net_device *dev)
rebuild_rx_bufs(dev);
dma->scb.command = 0;
- DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
DEB(DEB_INIT, printk(KERN_DEBUG
"%s: queuing CmdConfigure\n", dev->name));
memcpy(dma->cf_cmd.i596_config, init_setup, 14);
dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
- DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
+ dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
i596_add_cmd(dev, &dma->cf_cmd.cmd);
DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
- DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
+ dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
i596_add_cmd(dev, &dma->sa_cmd.cmd);
DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
- DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
+ dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
i596_add_cmd(dev, &dma->tdr_cmd.cmd);
spin_lock_irqsave (&lp->lock, flags);
@@ -625,7 +653,7 @@ static int init_i596_mem(struct net_device *dev)
DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
dma->scb.command = SWAP16(RX_START);
dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
- DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
ca(dev);
@@ -659,13 +687,13 @@ static inline int i596_rx(struct net_device *dev)
rfd = lp->rfd_head; /* Ref next frame to check */
- DMA_INV(dev, rfd, sizeof(struct i596_rfd));
+ dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
if (rfd->rbd == I596_NULL)
rbd = NULL;
else if (rfd->rbd == lp->rbd_head->b_addr) {
rbd = lp->rbd_head;
- DMA_INV(dev, rbd, sizeof(struct i596_rbd));
+ dma_sync_cpu(dev, rbd, sizeof(struct i596_rbd));
} else {
printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
/* XXX Now what? */
@@ -713,7 +741,7 @@ static inline int i596_rx(struct net_device *dev)
DMA_FROM_DEVICE);
rbd->v_data = newskb->data;
rbd->b_data = SWAP32(dma_addr);
- DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
+ dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
} else {
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
}
@@ -765,7 +793,7 @@ memory_squeeze:
if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
rbd->count = 0;
lp->rbd_head = rbd->v_next;
- DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
+ dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
}
/* Tidy the frame descriptor, marking it as end of list */
@@ -779,14 +807,14 @@ memory_squeeze:
lp->dma->scb.rfd = rfd->b_next;
lp->rfd_head = rfd->v_next;
- DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
+ dma_sync_dev(dev, rfd, sizeof(struct i596_rfd));
/* Remove end-of-list from old end descriptor */
rfd->v_prev->cmd = SWAP16(CMD_FLEX);
- DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
+ dma_sync_dev(dev, rfd->v_prev, sizeof(struct i596_rfd));
rfd = lp->rfd_head;
- DMA_INV(dev, rfd, sizeof(struct i596_rfd));
+ dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
}
DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
@@ -827,12 +855,12 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private
ptr->v_next = NULL;
ptr->b_next = I596_NULL;
}
- DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
+ dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
}
wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
lp->dma->scb.cmd = I596_NULL;
- DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
}
@@ -850,7 +878,7 @@ static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
/* FIXME: this command might cause an lpmc */
lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
- DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
ca(dev);
/* wait for shutdown */
@@ -878,20 +906,20 @@ static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
cmd->v_next = NULL;
cmd->b_next = I596_NULL;
- DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
+ dma_sync_dev(dev, cmd, sizeof(struct i596_cmd));
spin_lock_irqsave (&lp->lock, flags);
if (lp->cmd_head != NULL) {
lp->cmd_tail->v_next = cmd;
lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
- DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
+ dma_sync_dev(dev, lp->cmd_tail, sizeof(struct i596_cmd));
} else {
lp->cmd_head = cmd;
wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
dma->scb.command = SWAP16(CUC_START);
- DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
ca(dev);
}
lp->cmd_tail = cmd;
@@ -956,7 +984,7 @@ static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
/* Issue a channel attention signal */
DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
lp->dma->scb.command = SWAP16(CUC_START | RX_START);
- DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
ca (dev);
lp->last_restart = dev->stats.tx_packets;
}
@@ -1014,8 +1042,8 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
tbd->data = SWAP32(tx_cmd->dma_addr);
DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
- DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
- DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
+ dma_sync_dev(dev, tx_cmd, sizeof(struct tx_cmd));
+ dma_sync_dev(dev, tbd, sizeof(struct i596_tbd));
i596_add_cmd(dev, &tx_cmd->cmd);
dev->stats.tx_packets++;
@@ -1047,9 +1075,8 @@ static const struct net_device_ops i596_netdev_ops = {
static int i82596_probe(struct net_device *dev)
{
- int i;
struct i596_private *lp = netdev_priv(dev);
- struct i596_dma *dma;
+ int ret;
/* This lot is ensure things have been cache line aligned. */
BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
@@ -1063,41 +1090,28 @@ static int i82596_probe(struct net_device *dev)
if (!dev->base_addr || !dev->irq)
return -ENODEV;
- dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
- &lp->dma_addr, GFP_KERNEL,
- LIB82596_DMA_ATTR);
- if (!dma) {
- printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
- return -ENOMEM;
- }
-
dev->netdev_ops = &i596_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- memset(dma, 0, sizeof(struct i596_dma));
- lp->dma = dma;
-
- dma->scb.command = 0;
- dma->scb.cmd = I596_NULL;
- dma->scb.rfd = I596_NULL;
+ memset(lp->dma, 0, sizeof(struct i596_dma));
+ lp->dma->scb.command = 0;
+ lp->dma->scb.cmd = I596_NULL;
+ lp->dma->scb.rfd = I596_NULL;
spin_lock_init(&lp->lock);
- DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+ dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma));
- i = register_netdev(dev);
- if (i) {
- dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
- dma, lp->dma_addr, LIB82596_DMA_ATTR);
- return i;
- }
+ ret = register_netdev(dev);
+ if (ret)
+ return ret;
DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
dev->name, dev->base_addr, dev->dev_addr,
dev->irq));
DEB(DEB_INIT, printk(KERN_INFO
"%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
- dev->name, dma, (int)sizeof(struct i596_dma),
- &dma->scb));
+ dev->name, lp->dma, (int)sizeof(struct i596_dma),
+ &lp->dma->scb));
return 0;
}
@@ -1155,7 +1169,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
dev->name, status & 0x0700));
while (lp->cmd_head != NULL) {
- DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
+ dma_sync_cpu(dev, lp->cmd_head, sizeof(struct i596_cmd));
if (!(lp->cmd_head->status & SWAP16(STAT_C)))
break;
@@ -1237,7 +1251,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
}
ptr->v_next = NULL;
ptr->b_next = I596_NULL;
- DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
+ dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
lp->last_cmd = jiffies;
}
@@ -1251,13 +1265,13 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
ptr->command &= SWAP16(0x1fff);
ptr = ptr->v_next;
- DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
+ dma_sync_dev(dev, prev, sizeof(struct i596_cmd));
}
if (lp->cmd_head != NULL)
ack_cmd |= CUC_START;
dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
- DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
+ dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
}
if ((status & 0x1000) || (status & 0x4000)) {
if ((status & 0x4000))
@@ -1282,7 +1296,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
}
wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
dma->scb.command = SWAP16(ack_cmd);
- DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
+ dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
/* DANGER: I suspect that some kind of interrupt
acknowledgement aside from acking the 82596 might be needed
@@ -1313,7 +1327,7 @@ static int i596_close(struct net_device *dev)
wait_cmd(dev, lp->dma, 100, "close1 timed out");
lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
- DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
+ dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb));
ca(dev);
@@ -1372,7 +1386,7 @@ static void set_multicast_list(struct net_device *dev)
dev->name);
else {
dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
- DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
+ dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
i596_add_cmd(dev, &dma->cf_cmd.cmd);
}
}
@@ -1404,7 +1418,7 @@ static void set_multicast_list(struct net_device *dev)
dev->name, cp));
cp += ETH_ALEN;
}
- DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
+ dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
i596_add_cmd(dev, &cmd->cmd);
}
}
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 22f5887578b2..27937c5d7956 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -24,12 +24,6 @@
static const char sni_82596_string[] = "snirm_82596";
-#define LIB82596_DMA_ATTR 0
-
-#define DMA_WBACK(priv, addr, len) do { } while (0)
-#define DMA_INV(priv, addr, len) do { } while (0)
-#define DMA_WBACK_INV(priv, addr, len) do { } while (0)
-
#define SYSBUS 0x00004400
/* big endian CPU, 82596 little endian */
@@ -134,10 +128,19 @@ static int sni_82596_probe(struct platform_device *dev)
lp->ca = ca_addr;
lp->mpu_port = mpu_addr;
+ lp->dma = dma_alloc_coherent(&dev->dev, sizeof(struct i596_dma),
+ &lp->dma_addr, GFP_KERNEL);
+ if (!lp->dma)
+ goto probe_failed;
+
retval = i82596_probe(netdevice);
- if (retval == 0)
- return 0;
+ if (retval)
+ goto probe_failed_free_dma;
+ return 0;
+probe_failed_free_dma:
+ dma_free_coherent(&dev->dev, sizeof(struct i596_dma), lp->dma,
+ lp->dma_addr);
probe_failed:
free_netdev(netdevice);
probe_failed_free_ca:
@@ -153,8 +156,8 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
struct i596_private *lp = netdev_priv(dev);
unregister_netdev(dev);
- dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
- lp->dma_addr, LIB82596_DMA_ATTR);
+ dma_free_coherent(&pdev->dev, sizeof(struct i596_private), lp->dma,
+ lp->dma_addr);
iounmap(lp->ca);
iounmap(lp->mpu_port);
free_netdev (dev);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3153d62cc73e..c2e740475786 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1212,9 +1212,9 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
}
}
-static void ehea_neq_tasklet(unsigned long data)
+static void ehea_neq_tasklet(struct tasklet_struct *t)
{
- struct ehea_adapter *adapter = (struct ehea_adapter *)data;
+ struct ehea_adapter *adapter = from_tasklet(adapter, t, neq_tasklet);
struct ehea_eqe *eqe;
u64 event_mask;
@@ -3417,8 +3417,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
goto out_free_ad;
}
- tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
- (unsigned long)adapter);
+ tasklet_setup(&adapter->neq_tasklet, ehea_neq_tasklet);
ret = ehea_create_device_sysfs(dev);
if (ret)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c5c732601e35..c3ec9ceed833 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1031,12 +1031,6 @@ static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
ret = -EOPNOTSUPP;
}
- if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) {
- netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n");
- netdev->stats.tx_dropped++;
- ret = -EOPNOTSUPP;
- }
-
return ret;
}
@@ -1349,6 +1343,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter);
int lrg_pkt = ibmveth_rxq_large_packet(adapter);
+ __sum16 iph_check = 0;
skb = ibmveth_rxq_get_buffer(adapter);
@@ -1385,16 +1380,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
- if (csum_good) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- ibmveth_rx_csum_helper(skb, adapter);
+ /* PHYP without PLSO support places a -1 in the ip
+ * checksum for large send frames.
+ */
+ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ iph_check = iph->check;
}
- if (length > netdev->mtu + ETH_HLEN) {
+ if ((length > netdev->mtu + ETH_HLEN) ||
+ lrg_pkt || iph_check == 0xffff) {
ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
adapter->rx_large_packets++;
}
+ if (csum_good) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ ibmveth_rx_csum_helper(skb, adapter);
+ }
+
napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1b702a43a5d0..da15913879f8 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -97,15 +97,14 @@ static int pending_scrq(struct ibmvnic_adapter *,
static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
static int ibmvnic_poll(struct napi_struct *napi, int data);
-static void send_map_query(struct ibmvnic_adapter *adapter);
+static void send_query_map(struct ibmvnic_adapter *adapter);
static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
static int send_request_unmap(struct ibmvnic_adapter *, u8);
static int send_login(struct ibmvnic_adapter *adapter);
-static void send_cap_queries(struct ibmvnic_adapter *adapter);
+static void send_query_cap(struct ibmvnic_adapter *adapter);
static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
-static int ibmvnic_init(struct ibmvnic_adapter *);
-static int ibmvnic_reset_init(struct ibmvnic_adapter *);
+static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
@@ -297,8 +296,7 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++)
+ for (i = 0; i < adapter->num_active_rx_pools; i++)
adapter->rx_pool[i].active = 0;
}
@@ -306,6 +304,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_rx_pool *pool)
{
int count = pool->size - atomic_read(&pool->available);
+ u64 handle = adapter->rx_scrq[pool->index]->handle;
struct device *dev = &adapter->vdev->dev;
int buffers_added = 0;
unsigned long lpar_rc;
@@ -314,7 +313,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
unsigned int offset;
dma_addr_t dma_addr;
unsigned char *dst;
- u64 *handle_array;
int shift = 0;
int index;
int i;
@@ -322,10 +320,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
if (!pool->active)
return;
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->
- off_rxadd_subcrqs));
-
for (i = 0; i < count; ++i) {
skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
if (!skb) {
@@ -369,8 +363,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
#endif
sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
- lpar_rc = send_subcrq(adapter, handle_array[pool->index],
- &sub_crq);
+ lpar_rc = send_subcrq(adapter, handle, &sub_crq);
if (lpar_rc != H_SUCCESS)
goto failure;
@@ -407,8 +400,7 @@ static void replenish_pools(struct ibmvnic_adapter *adapter)
int i;
adapter->replenish_task_cycles++;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++) {
+ for (i = 0; i < adapter->num_active_rx_pools; i++) {
if (adapter->rx_pool[i].active)
replenish_rx_pool(adapter, &adapter->rx_pool[i]);
}
@@ -475,25 +467,23 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
static int reset_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
+ u64 buff_size;
int rx_scrqs;
int i, j, rc;
- u64 *size_array;
if (!adapter->rx_pool)
return -1;
- size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
-
- rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ buff_size = adapter->cur_rx_buf_sz;
+ rx_scrqs = adapter->num_active_rx_pools;
for (i = 0; i < rx_scrqs; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
- if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
+ if (rx_pool->buff_size != buff_size) {
free_long_term_buff(adapter, &rx_pool->long_term_buff);
- rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->buff_size = buff_size;
rc = alloc_long_term_buff(adapter,
&rx_pool->long_term_buff,
rx_pool->size *
@@ -561,13 +551,11 @@ static int init_rx_pools(struct net_device *netdev)
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_rx_pool *rx_pool;
int rxadd_subcrqs;
- u64 *size_array;
+ u64 buff_size;
int i, j;
- rxadd_subcrqs =
- be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ rxadd_subcrqs = adapter->num_active_rx_scrqs;
+ buff_size = adapter->cur_rx_buf_sz;
adapter->rx_pool = kcalloc(rxadd_subcrqs,
sizeof(struct ibmvnic_rx_pool),
@@ -585,11 +573,11 @@ static int init_rx_pools(struct net_device *netdev)
netdev_dbg(adapter->netdev,
"Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
i, adapter->req_rx_add_entries_per_subcrq,
- be64_to_cpu(size_array[i]));
+ buff_size);
rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
rx_pool->index = i;
- rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->buff_size = buff_size;
rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
@@ -655,7 +643,7 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
if (!adapter->tx_pool)
return -1;
- tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) {
rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
if (rc)
@@ -744,7 +732,7 @@ static int init_tx_pools(struct net_device *netdev)
int tx_subcrqs;
int i, rc;
- tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_subcrqs = adapter->num_active_tx_scrqs;
adapter->tx_pool = kcalloc(tx_subcrqs,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
@@ -894,7 +882,7 @@ static int ibmvnic_login(struct net_device *netdev)
"Received partial success, retrying...\n");
adapter->init_done_rc = 0;
reinit_completion(&adapter->init_done);
- send_cap_queries(adapter);
+ send_query_cap(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
netdev_warn(netdev,
@@ -980,7 +968,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
return -1;
}
- if (adapter->init_done_rc == 1) {
+ if (adapter->init_done_rc == PARTIALSUCCESS) {
/* Partuial success, delay and re-send */
mdelay(1000);
resend = true;
@@ -1125,7 +1113,7 @@ static int init_resources(struct ibmvnic_adapter *adapter)
if (rc)
return rc;
- send_map_query(adapter);
+ send_query_map(adapter);
rc = init_rx_pools(netdev);
if (rc)
@@ -1197,18 +1185,27 @@ static int ibmvnic_open(struct net_device *netdev)
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc)
- return rc;
+ goto out;
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
- return rc;
+ goto out;
}
}
rc = __ibmvnic_open(netdev);
+out:
+ /*
+ * If open fails due to a pending failover, set device state and
+ * return. Device operation will be handled by reset routine.
+ */
+ if (rc && adapter->failover_pending) {
+ adapter->state = VNIC_OPEN;
+ rc = 0;
+ }
return rc;
}
@@ -1530,9 +1527,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int offset;
int num_entries = 1;
unsigned char *dst;
- u64 *handle_array;
int index = 0;
u8 proto = 0;
+ u64 handle;
netdev_tx_t ret = NETDEV_TX_OK;
if (test_bit(0, &adapter->resetting)) {
@@ -1559,8 +1556,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_scrq = adapter->tx_scrq[queue_num];
txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
+ handle = tx_scrq->handle;
index = tx_pool->free_map[tx_pool->consumer_index];
@@ -1672,14 +1668,14 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_OK;
goto tx_err_out;
}
- lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+ lpar_rc = send_subcrq_indirect(adapter, handle,
(u64)tx_buff->indir_dma,
(u64)num_entries);
dma_unmap_single(dev, tx_buff->indir_dma,
sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
} else {
tx_buff->num_entries = num_entries;
- lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+ lpar_rc = send_subcrq(adapter, handle,
&tx_crq);
}
if (lpar_rc != H_SUCCESS) {
@@ -1828,9 +1824,13 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
int rc;
rc = 0;
- ether_addr_copy(adapter->mac_addr, addr->sa_data);
- if (adapter->state != VNIC_PROBED)
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (adapter->state != VNIC_PROBED) {
+ ether_addr_copy(adapter->mac_addr, addr->sa_data);
rc = __ibmvnic_set_mac(netdev, addr->sa_data);
+ }
return rc;
}
@@ -1874,7 +1874,7 @@ static int do_change_param_reset(struct ibmvnic_adapter *adapter,
return rc;
}
- rc = ibmvnic_reset_init(adapter);
+ rc = ibmvnic_reset_init(adapter, true);
if (rc)
return IBMVNIC_INIT_FAILED;
@@ -1931,6 +1931,13 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rwi->reset_reason);
rtnl_lock();
+ /*
+ * Now that we have the rtnl lock, clear any pending failover.
+ * This will ensure ibmvnic_open() has either completed or will
+ * block until failover is complete.
+ */
+ if (rwi->reset_reason == VNIC_RESET_FAILOVER)
+ adapter->failover_pending = false;
netif_carrier_off(netdev);
adapter->reset_reason = rwi->reset_reason;
@@ -1992,7 +1999,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- rc = ibmvnic_reset_init(adapter);
+ rc = ibmvnic_reset_init(adapter, true);
if (rc) {
rc = IBMVNIC_INIT_FAILED;
goto out;
@@ -2108,7 +2115,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
return rc;
}
- rc = ibmvnic_init(adapter);
+ rc = ibmvnic_reset_init(adapter, false);
if (rc)
return rc;
@@ -2211,6 +2218,13 @@ static void __ibmvnic_reset(struct work_struct *work)
/* CHANGE_PARAM requestor holds rtnl_lock */
rc = do_change_param_reset(adapter, rwi, reset_state);
} else if (adapter->force_reset_recovery) {
+ /*
+ * Since we are doing a hard reset now, clear the
+ * failover_pending flag so we don't ignore any
+ * future MOBILITY or other resets.
+ */
+ adapter->failover_pending = false;
+
/* Transport event occurred during previous reset */
if (adapter->wait_for_reset) {
/* Previous was CHANGE_PARAM; caller locked */
@@ -2275,9 +2289,15 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
unsigned long flags;
int ret;
+ /*
+ * If failover is pending don't schedule any other reset.
+ * Instead let the failover complete. If there is already a
+ * a failover reset scheduled, we will detect and drop the
+ * duplicate reset when walking the ->rwi_list below.
+ */
if (adapter->state == VNIC_REMOVING ||
adapter->state == VNIC_REMOVED ||
- adapter->failover_pending) {
+ (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
ret = EBUSY;
netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
goto err;
@@ -3312,7 +3332,7 @@ tx_failed:
return -1;
}
-static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
+static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
{
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
@@ -3583,8 +3603,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
if (rc) {
if (rc == H_CLOSED) {
dev_warn(dev, "CRQ Queue closed\n");
- if (test_bit(0, &adapter->resetting))
- ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ /* do not reset, report the fail, wait for passive init from server */
}
dev_warn(dev, "Send error (rc=%d)\n", rc);
@@ -3595,14 +3614,31 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
{
+ struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
+ int retries = 100;
+ int rc;
memset(&crq, 0, sizeof(crq));
crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
crq.generic.cmd = IBMVNIC_CRQ_INIT;
netdev_dbg(adapter->netdev, "Sending CRQ init\n");
- return ibmvnic_send_crq(adapter, &crq);
+ do {
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc != H_CLOSED)
+ break;
+ retries--;
+ msleep(50);
+
+ } while (retries > 0);
+
+ if (rc) {
+ dev_err(dev, "Failed to send init request, rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
}
static int send_version_xchg(struct ibmvnic_adapter *adapter)
@@ -3822,7 +3858,7 @@ static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
return ibmvnic_send_crq(adapter, &crq);
}
-static void send_map_query(struct ibmvnic_adapter *adapter)
+static void send_query_map(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
@@ -3833,7 +3869,7 @@ static void send_map_query(struct ibmvnic_adapter *adapter)
}
/* Send a series of CRQs requesting various capabilities of the VNIC server */
-static void send_cap_queries(struct ibmvnic_adapter *adapter)
+static void send_query_cap(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
@@ -3950,6 +3986,113 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter)
ibmvnic_send_crq(adapter, &crq);
}
+static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
+{
+ int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
+ struct device *dev = &adapter->vdev->dev;
+ union ibmvnic_crq crq;
+
+ adapter->ip_offload_tok =
+ dma_map_single(dev,
+ &adapter->ip_offload_buf,
+ buf_sz,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(dev, "Couldn't map offload buffer\n");
+ return;
+ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
+ crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
+ crq.query_ip_offload.len = cpu_to_be32(buf_sz);
+ crq.query_ip_offload.ioba =
+ cpu_to_be32(adapter->ip_offload_tok);
+
+ ibmvnic_send_crq(adapter, &crq);
+}
+
+static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
+ struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
+ struct device *dev = &adapter->vdev->dev;
+ netdev_features_t old_hw_features = 0;
+ union ibmvnic_crq crq;
+
+ adapter->ip_offload_ctrl_tok =
+ dma_map_single(dev,
+ ctrl_buf,
+ sizeof(adapter->ip_offload_ctrl),
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
+ dev_err(dev, "Couldn't map ip offload control buffer\n");
+ return;
+ }
+
+ ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
+ ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
+ ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
+ ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
+ ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
+ ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
+ ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
+ ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
+ ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
+ ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
+
+ /* large_rx disabled for now, additional features needed */
+ ctrl_buf->large_rx_ipv4 = 0;
+ ctrl_buf->large_rx_ipv6 = 0;
+
+ if (adapter->state != VNIC_PROBING) {
+ old_hw_features = adapter->netdev->hw_features;
+ adapter->netdev->hw_features = 0;
+ }
+
+ adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+
+ if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
+ adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
+
+ if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
+ adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
+
+ if ((adapter->netdev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
+ adapter->netdev->hw_features |= NETIF_F_RXCSUM;
+
+ if (buf->large_tx_ipv4)
+ adapter->netdev->hw_features |= NETIF_F_TSO;
+ if (buf->large_tx_ipv6)
+ adapter->netdev->hw_features |= NETIF_F_TSO6;
+
+ if (adapter->state == VNIC_PROBING) {
+ adapter->netdev->features |= adapter->netdev->hw_features;
+ } else if (old_hw_features != adapter->netdev->hw_features) {
+ netdev_features_t tmp = 0;
+
+ /* disable features no longer supported */
+ adapter->netdev->features &= adapter->netdev->hw_features;
+ /* turn on features now supported if previously enabled */
+ tmp = (old_hw_features ^ adapter->netdev->hw_features) &
+ adapter->netdev->hw_features;
+ adapter->netdev->features |=
+ tmp & adapter->netdev->wanted_features;
+ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
+ crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
+ crq.control_ip_offload.len =
+ cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
+ crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
+ ibmvnic_send_crq(adapter, &crq);
+}
+
static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -4019,8 +4162,6 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
- netdev_features_t old_hw_features = 0;
- union ibmvnic_crq crq;
int i;
dma_unmap_single(dev, adapter->ip_offload_tok,
@@ -4070,74 +4211,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
buf->off_ipv6_ext_headers);
- adapter->ip_offload_ctrl_tok =
- dma_map_single(dev, &adapter->ip_offload_ctrl,
- sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
-
- if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
- dev_err(dev, "Couldn't map ip offload control buffer\n");
- return;
- }
-
- adapter->ip_offload_ctrl.len =
- cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
- adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
- adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
- adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
- adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
- adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
- adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
- adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
- adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
- adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
-
- /* large_rx disabled for now, additional features needed */
- adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
- adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
-
- if (adapter->state != VNIC_PROBING) {
- old_hw_features = adapter->netdev->hw_features;
- adapter->netdev->hw_features = 0;
- }
-
- adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
-
- if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
- adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
-
- if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
- adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
-
- if ((adapter->netdev->features &
- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
- adapter->netdev->hw_features |= NETIF_F_RXCSUM;
-
- if (buf->large_tx_ipv4)
- adapter->netdev->hw_features |= NETIF_F_TSO;
- if (buf->large_tx_ipv6)
- adapter->netdev->hw_features |= NETIF_F_TSO6;
-
- if (adapter->state == VNIC_PROBING) {
- adapter->netdev->features |= adapter->netdev->hw_features;
- } else if (old_hw_features != adapter->netdev->hw_features) {
- netdev_features_t tmp = 0;
-
- /* disable features no longer supported */
- adapter->netdev->features &= adapter->netdev->hw_features;
- /* turn on features now supported if previously enabled */
- tmp = (old_hw_features ^ adapter->netdev->hw_features) &
- adapter->netdev->hw_features;
- adapter->netdev->features |=
- tmp & adapter->netdev->wanted_features;
- }
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
- crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
- crq.control_ip_offload.len =
- cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
- crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
- ibmvnic_send_crq(adapter, &crq);
+ send_control_ip_offload(adapter);
}
static const char *ibmvnic_fw_err_cause(u16 cause)
@@ -4194,8 +4268,13 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
goto out;
}
+ /* crq->change_mac_addr.mac_addr is the requested one
+ * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
+ */
ether_addr_copy(netdev->dev_addr,
&crq->change_mac_addr_rsp.mac_addr[0]);
+ ether_addr_copy(adapter->mac_addr,
+ &crq->change_mac_addr_rsp.mac_addr[0]);
out:
complete(&adapter->fw_done);
return rc;
@@ -4263,7 +4342,7 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
be64_to_cpu(crq->request_capability_rsp.number);
}
- ibmvnic_send_req_caps(adapter, 1);
+ send_request_cap(adapter, 1);
return;
default:
dev_err(dev, "Error %d in request cap rsp\n",
@@ -4273,30 +4352,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
/* Done receiving requested capabilities, query IP offload support */
if (atomic_read(&adapter->running_cap_crqs) == 0) {
- union ibmvnic_crq newcrq;
- int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
- struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
- &adapter->ip_offload_buf;
-
adapter->wait_capability = false;
- adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
- buf_sz,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(dev, "Couldn't map offload buffer\n");
- return;
- }
-
- memset(&newcrq, 0, sizeof(newcrq));
- newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
- newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
- newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
- newcrq.query_ip_offload.ioba =
- cpu_to_be32(adapter->ip_offload_tok);
-
- ibmvnic_send_crq(adapter, &newcrq);
+ send_query_ip_offload(adapter);
}
}
@@ -4307,6 +4364,11 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
struct net_device *netdev = adapter->netdev;
struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
struct ibmvnic_login_buffer *login = adapter->login_buf;
+ u64 *tx_handle_array;
+ u64 *rx_handle_array;
+ int num_tx_pools;
+ int num_rx_pools;
+ u64 *size_array;
int i;
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
@@ -4341,6 +4403,30 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
ibmvnic_remove(adapter->vdev);
return -EIO;
}
+ size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ /* variable buffer sizes are not supported, so just read the
+ * first entry.
+ */
+ adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
+
+ num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+
+ tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
+ rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
+
+ for (i = 0; i < num_tx_pools; i++)
+ adapter->tx_scrq[i]->handle = tx_handle_array[i];
+
+ for (i = 0; i < num_rx_pools; i++)
+ adapter->rx_scrq[i]->handle = rx_handle_array[i];
+
+ adapter->num_active_tx_scrqs = num_tx_pools;
+ adapter->num_active_rx_scrqs = num_rx_pools;
+ release_login_rsp_buffer(adapter);
release_login_buffer(adapter);
complete(&adapter->init_done);
@@ -4550,7 +4636,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
out:
if (atomic_read(&adapter->running_cap_crqs) == 0) {
adapter->wait_capability = false;
- ibmvnic_send_req_caps(adapter, 0);
+ send_request_cap(adapter, 0);
}
}
@@ -4605,7 +4691,7 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
case IBMVNIC_1GBPS:
adapter->speed = SPEED_1000;
break;
- case IBMVNIC_10GBP:
+ case IBMVNIC_10GBPS:
adapter->speed = SPEED_10000;
break;
case IBMVNIC_25GBPS:
@@ -4620,6 +4706,9 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
case IBMVNIC_100GBPS:
adapter->speed = SPEED_100000;
break;
+ case IBMVNIC_200GBPS:
+ adapter->speed = SPEED_200000;
+ break;
default:
if (netif_carrier_ok(netdev))
netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
@@ -4653,7 +4742,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
adapter->from_passive_init = true;
- adapter->failover_pending = false;
if (!completion_done(&adapter->init_done)) {
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
@@ -4715,7 +4803,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
be16_to_cpu(crq->version_exchange_rsp.version);
dev_info(dev, "Partner protocol version is %d\n",
ibmvnic_version);
- send_cap_queries(adapter);
+ send_query_cap(adapter);
break;
case QUERY_CAPABILITY_RSP:
handle_query_cap_rsp(crq, adapter);
@@ -4812,9 +4900,9 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
return IRQ_HANDLED;
}
-static void ibmvnic_tasklet(void *data)
+static void ibmvnic_tasklet(struct tasklet_struct *t)
{
- struct ibmvnic_adapter *adapter = data;
+ struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
struct ibmvnic_crq_queue *queue = &adapter->crq;
union ibmvnic_crq *crq;
unsigned long flags;
@@ -4949,8 +5037,7 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
retrc = 0;
- tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
- (unsigned long)adapter);
+ tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
@@ -4986,7 +5073,7 @@ map_failed:
return retrc;
}
-static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
+static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
{
struct device *dev = &adapter->vdev->dev;
unsigned long timeout = msecs_to_jiffies(30000);
@@ -4995,12 +5082,19 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
adapter->from_passive_init = false;
- old_num_rx_queues = adapter->req_rx_queues;
- old_num_tx_queues = adapter->req_tx_queues;
+ if (reset) {
+ old_num_rx_queues = adapter->req_rx_queues;
+ old_num_tx_queues = adapter->req_tx_queues;
+ reinit_completion(&adapter->init_done);
+ }
- reinit_completion(&adapter->init_done);
adapter->init_done_rc = 0;
- ibmvnic_send_crq_init(adapter);
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc) {
+ dev_err(dev, "Send crq init failed with error %d\n", rc);
+ return rc;
+ }
+
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
return -1;
@@ -5017,7 +5111,8 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
return -1;
}
- if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
+ if (reset &&
+ test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
adapter->reset_reason != VNIC_RESET_MOBILITY) {
if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
@@ -5045,48 +5140,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
return rc;
}
-static int ibmvnic_init(struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- unsigned long timeout = msecs_to_jiffies(30000);
- int rc;
-
- adapter->from_passive_init = false;
-
- adapter->init_done_rc = 0;
- ibmvnic_send_crq_init(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
- dev_err(dev, "Initialization sequence timed out\n");
- return -1;
- }
-
- if (adapter->init_done_rc) {
- release_crq_queue(adapter);
- return adapter->init_done_rc;
- }
-
- if (adapter->from_passive_init) {
- adapter->state = VNIC_OPEN;
- adapter->from_passive_init = false;
- return -1;
- }
-
- rc = init_sub_crqs(adapter);
- if (rc) {
- dev_err(dev, "Initialization of sub crqs failed\n");
- release_crq_queue(adapter);
- return rc;
- }
-
- rc = init_sub_crq_irqs(adapter);
- if (rc) {
- dev_err(dev, "Failed to initialize sub crq irqs\n");
- release_crq_queue(adapter);
- }
-
- return rc;
-}
-
static struct device_attribute dev_attr_failover;
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
@@ -5149,7 +5202,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
goto ibmvnic_init_fail;
}
- rc = ibmvnic_init(adapter);
+ rc = ibmvnic_reset_init(adapter, false);
if (rc && rc != EAGAIN)
goto ibmvnic_init_fail;
} while (rc == EAGAIN);
@@ -5299,8 +5352,7 @@ static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
ret += 4 * PAGE_SIZE; /* the scrq message queue */
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++)
+ for (i = 0; i < adapter->num_active_rx_pools; i++)
ret += adapter->rx_pool[i].size *
IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f8416e1d4cf0..217dcc7ded70 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -373,7 +373,7 @@ struct ibmvnic_phys_parms {
#define IBMVNIC_10MBPS 0x40000000
#define IBMVNIC_100MBPS 0x20000000
#define IBMVNIC_1GBPS 0x10000000
-#define IBMVNIC_10GBP 0x08000000
+#define IBMVNIC_10GBPS 0x08000000
#define IBMVNIC_40GBPS 0x04000000
#define IBMVNIC_100GBPS 0x02000000
#define IBMVNIC_25GBPS 0x01000000
@@ -875,6 +875,7 @@ struct ibmvnic_sub_crq_queue {
struct ibmvnic_adapter *adapter;
atomic_t used;
char name[32];
+ u64 handle;
};
struct ibmvnic_long_term_buff {
@@ -1075,6 +1076,7 @@ struct ibmvnic_adapter {
u32 num_active_rx_napi;
u32 num_active_tx_scrqs;
u32 num_active_tx_pools;
+ u32 cur_rx_buf_sz;
struct tasklet_struct tasklet;
enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 36da059388dc..8cc651d37a7f 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -384,7 +384,7 @@ enum cb_status {
cb_ok = 0x2000,
};
-/**
+/*
* cb_command - Command Block flags
* @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory
*/
@@ -1531,7 +1531,7 @@ static int e100_hw_init(struct nic *nic)
e100_hw_reset(nic);
netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
- if (!in_interrupt() && (err = e100_self_test(nic)))
+ if ((err = e100_self_test(nic)))
return err;
if ((err = e100_phy_init(nic)))
@@ -2155,7 +2155,7 @@ static int e100_rx_alloc_list(struct nic *nic)
nic->rx_to_use = nic->rx_to_clean = NULL;
nic->ru_running = RU_UNINITIALIZED;
- if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
+ if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL)))
return -ENOMEM;
for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
@@ -2593,7 +2593,7 @@ static void e100_diag_test(struct net_device *netdev,
{
struct ethtool_cmd cmd;
struct nic *nic = netdev_priv(netdev);
- int i, err;
+ int i;
memset(data, 0, E100_TEST_LEN * sizeof(u64));
data[0] = !mii_link_ok(&nic->mii);
@@ -2601,7 +2601,7 @@ static void e100_diag_test(struct net_device *netdev,
if (test->flags & ETH_TEST_FL_OFFLINE) {
/* save speed, duplex & autoneg settings */
- err = mii_ethtool_gset(&nic->mii, &cmd);
+ mii_ethtool_gset(&nic->mii, &cmd);
if (netif_running(netdev))
e100_down(nic);
@@ -2610,7 +2610,7 @@ static void e100_diag_test(struct net_device *netdev,
data[4] = e100_loopback_test(nic, lb_phy);
/* restore speed, duplex & autoneg settings */
- err = mii_ethtool_sset(&nic->mii, &cmd);
+ mii_ethtool_sset(&nic->mii, &cmd);
if (netif_running(netdev))
e100_up(nic);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 4e7a0810eaeb..4c0c9433bd60 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -129,7 +129,6 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
*/
static void e1000_phy_init_script(struct e1000_hw *hw)
{
- u32 ret_val;
u16 phy_saved_data;
if (hw->phy_init_script) {
@@ -138,7 +137,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
/* Save off the current value of register 0x2F5B to be restored
* at the end of this routine.
*/
- ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+ e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
/* Disabled the PHY transmitter */
e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
@@ -377,7 +376,6 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
{
u32 ctrl;
u32 ctrl_ext;
- u32 icr;
u32 manc;
u32 led_ctrl;
s32 ret_val;
@@ -502,7 +500,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
/* Clear any pending interrupt events. */
- icr = er32(ICR);
+ er32(ICR);
/* If MWI was previously enabled, reenable it. */
if (hw->mac_type == e1000_82542_rev2_0) {
@@ -1897,7 +1895,6 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
/**
* e1000_config_mac_to_phy - sync phy and mac settings
* @hw: Struct containing variables accessed by shared code
- * @mii_reg: data to write to the MII control register
*
* Sets MAC speed and duplex settings to reflect the those in the PHY
* The contents of the PHY register containing the needed information need to
@@ -2370,16 +2367,13 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
*/
s32 e1000_check_for_link(struct e1000_hw *hw)
{
- u32 rxcw = 0;
- u32 ctrl;
u32 status;
u32 rctl;
u32 icr;
- u32 signal = 0;
s32 ret_val;
u16 phy_data;
- ctrl = er32(CTRL);
+ er32(CTRL);
status = er32(STATUS);
/* On adapters with a MAC newer than 82544, SW Definable pin 1 will be
@@ -2388,12 +2382,9 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
*/
if ((hw->media_type == e1000_media_type_fiber) ||
(hw->media_type == e1000_media_type_internal_serdes)) {
- rxcw = er32(RXCW);
+ er32(RXCW);
if (hw->media_type == e1000_media_type_fiber) {
- signal =
- (hw->mac_type >
- e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
if (status & E1000_STATUS_LU)
hw->get_link_status = false;
}
@@ -2922,7 +2913,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
*
* @hw: Struct containing variables accessed by shared code
* @reg_addr: address of the PHY register to write
- * @data: data to write to the PHY
+ * @phy_data: data to write to the PHY
*
* Writes a value to a PHY register
*/
@@ -4410,17 +4401,9 @@ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
static void e1000_clear_vfta(struct e1000_hw *hw)
{
u32 offset;
- u32 vfta_value = 0;
- u32 vfta_offset = 0;
- u32 vfta_bit_in_reg = 0;
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- /* If the offset we want to clear is the same offset of the
- * manageability VLAN ID, then clear all bits except that of the
- * manageability unit
- */
- vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
- E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
E1000_WRITE_FLUSH();
}
}
@@ -4675,78 +4658,76 @@ s32 e1000_led_off(struct e1000_hw *hw)
*/
static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
{
- volatile u32 temp;
-
- temp = er32(CRCERRS);
- temp = er32(SYMERRS);
- temp = er32(MPC);
- temp = er32(SCC);
- temp = er32(ECOL);
- temp = er32(MCC);
- temp = er32(LATECOL);
- temp = er32(COLC);
- temp = er32(DC);
- temp = er32(SEC);
- temp = er32(RLEC);
- temp = er32(XONRXC);
- temp = er32(XONTXC);
- temp = er32(XOFFRXC);
- temp = er32(XOFFTXC);
- temp = er32(FCRUC);
-
- temp = er32(PRC64);
- temp = er32(PRC127);
- temp = er32(PRC255);
- temp = er32(PRC511);
- temp = er32(PRC1023);
- temp = er32(PRC1522);
-
- temp = er32(GPRC);
- temp = er32(BPRC);
- temp = er32(MPRC);
- temp = er32(GPTC);
- temp = er32(GORCL);
- temp = er32(GORCH);
- temp = er32(GOTCL);
- temp = er32(GOTCH);
- temp = er32(RNBC);
- temp = er32(RUC);
- temp = er32(RFC);
- temp = er32(ROC);
- temp = er32(RJC);
- temp = er32(TORL);
- temp = er32(TORH);
- temp = er32(TOTL);
- temp = er32(TOTH);
- temp = er32(TPR);
- temp = er32(TPT);
-
- temp = er32(PTC64);
- temp = er32(PTC127);
- temp = er32(PTC255);
- temp = er32(PTC511);
- temp = er32(PTC1023);
- temp = er32(PTC1522);
-
- temp = er32(MPTC);
- temp = er32(BPTC);
+ er32(CRCERRS);
+ er32(SYMERRS);
+ er32(MPC);
+ er32(SCC);
+ er32(ECOL);
+ er32(MCC);
+ er32(LATECOL);
+ er32(COLC);
+ er32(DC);
+ er32(SEC);
+ er32(RLEC);
+ er32(XONRXC);
+ er32(XONTXC);
+ er32(XOFFRXC);
+ er32(XOFFTXC);
+ er32(FCRUC);
+
+ er32(PRC64);
+ er32(PRC127);
+ er32(PRC255);
+ er32(PRC511);
+ er32(PRC1023);
+ er32(PRC1522);
+
+ er32(GPRC);
+ er32(BPRC);
+ er32(MPRC);
+ er32(GPTC);
+ er32(GORCL);
+ er32(GORCH);
+ er32(GOTCL);
+ er32(GOTCH);
+ er32(RNBC);
+ er32(RUC);
+ er32(RFC);
+ er32(ROC);
+ er32(RJC);
+ er32(TORL);
+ er32(TORH);
+ er32(TOTL);
+ er32(TOTH);
+ er32(TPR);
+ er32(TPT);
+
+ er32(PTC64);
+ er32(PTC127);
+ er32(PTC255);
+ er32(PTC511);
+ er32(PTC1023);
+ er32(PTC1522);
+
+ er32(MPTC);
+ er32(BPTC);
if (hw->mac_type < e1000_82543)
return;
- temp = er32(ALGNERRC);
- temp = er32(RXERRC);
- temp = er32(TNCRS);
- temp = er32(CEXTERR);
- temp = er32(TSCTC);
- temp = er32(TSCTFC);
+ er32(ALGNERRC);
+ er32(RXERRC);
+ er32(TNCRS);
+ er32(CEXTERR);
+ er32(TSCTC);
+ er32(TSCTFC);
if (hw->mac_type <= e1000_82544)
return;
- temp = er32(MGTPRC);
- temp = er32(MGTPDC);
- temp = er32(MGTPTC);
+ er32(MGTPRC);
+ er32(MGTPDC);
+ er32(MGTPTC);
}
/**
@@ -4778,8 +4759,6 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
/**
* e1000_update_adaptive - update adaptive IFS
* @hw: Struct containing variables accessed by shared code
- * @tx_packets: Number of transmits since last callback
- * @total_collisions: Number of collisions since last callback
*
* Called during the callback/watchdog routine to update IFS value based on
* the ratio of transmits to collisions.
@@ -5064,8 +5043,6 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
/**
* e1000_check_downshift - Check if Downshift occurred
* @hw: Struct containing variables accessed by shared code
- * @downshift: output parameter : 0 - No Downshift occurred.
- * 1 - Downshift occurred.
*
* returns: - E1000_ERR_XXX
* E1000_SUCCESS
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1e6ec081fd9d..5e28cf4fa2cd 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -199,8 +199,10 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/**
- * e1000_get_hw_dev - return device
- * used by hardware layer to print debugging information
+ * e1000_get_hw_dev - helper function for getting netdev
+ * @hw: pointer to HW struct
+ *
+ * return device used by hardware layer to print debugging information
*
**/
struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
@@ -354,7 +356,7 @@ static void e1000_release_manageability(struct e1000_adapter *adapter)
/**
* e1000_configure - configure the hardware for RX and TX
- * @adapter = private board structure
+ * @adapter: private board structure
**/
static void e1000_configure(struct e1000_adapter *adapter)
{
@@ -534,7 +536,6 @@ void e1000_down(struct e1000_adapter *adapter)
void e1000_reinit_locked(struct e1000_adapter *adapter)
{
- WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
@@ -3489,8 +3490,9 @@ exit:
/**
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: number of the Tx queue that hung (unused)
**/
-static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3787,7 +3789,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
/**
* e1000_clean - NAPI Rx polling callback
- * @adapter: board private structure
+ * @napi: napi struct containing references to driver info
+ * @budget: budget given to driver for receive packets
**/
static int e1000_clean(struct napi_struct *napi, int budget)
{
@@ -3818,6 +3821,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
+ * @tx_ring: ring to clean
**/
static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
@@ -3933,7 +3937,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @adapter: board private structure
* @status_err: receive descriptor status and error fields
* @csum: receive descriptor csum field
- * @sk_buff: socket buffer with received data
+ * @skb: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
u32 csum, struct sk_buff *skb)
@@ -3970,6 +3974,9 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
/**
* e1000_consume_page - helper function for jumbo Rx path
+ * @bi: software descriptor shadow data
+ * @skb: skb being modified
+ * @length: length of data being added
**/
static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
u16 length)
@@ -4003,6 +4010,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
/**
* e1000_tbi_adjust_stats
* @hw: Struct containing variables accessed by shared code
+ * @stats: point to stats struct
* @frame_len: The length of the frame in question
* @mac_addr: The Ethernet destination address of the frame in question
*
@@ -4548,6 +4556,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
/**
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
+ * @rx_ring: pointer to ring struct
+ * @cleaned_count: number of new Rx buffers to try to allocate
**/
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
@@ -4662,7 +4672,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
/**
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
- * @adapter:
+ * @adapter: address of board private structure
**/
static void e1000_smartspeed(struct e1000_adapter *adapter)
{
@@ -4718,10 +4728,10 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
}
/**
- * e1000_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * e1000_ioctl - handle ioctl calls
+ * @netdev: pointer to our netdev
+ * @ifr: pointer to interface request structure
+ * @cmd: ioctl data
**/
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
@@ -4737,9 +4747,9 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
/**
* e1000_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * @netdev: pointer to our netdev
+ * @ifr: pointer to interface request structure
+ * @cmd: ioctl data
**/
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 4b103cca8a39..be9c695dde12 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1072,7 +1072,6 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
/**
* e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
* @hw: pointer to the HW structure
- * @duplex: current duplex setting
*
* Configure the KMRN interface by applying last minute quirks for
* 10/100 operation.
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index a8fc9208382c..03215b0aee4b 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -895,6 +895,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
mask |= BIT(18);
break;
default:
@@ -1560,6 +1561,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index b1447221669e..69a2329ea463 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -102,6 +102,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F
#define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C
#define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D
+#define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A
+#define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B
+#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C
+#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D
#define E1000_REVISION_4 4
@@ -127,6 +131,7 @@ enum e1000_mac_type {
e1000_pch_cnp,
e1000_pch_tgp,
e1000_pch_adp,
+ e1000_pch_mtp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index b2f2fcfdf732..9aa6fad8ed47 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -320,6 +320,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -464,6 +465,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -708,6 +710,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -743,7 +746,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
/**
* __e1000_access_emi_reg_locked - Read/write EMI register
* @hw: pointer to the HW structure
- * @addr: EMI address to program
+ * @address: EMI address to program
* @data: pointer to value to read/write from/to the EMI address
* @read: boolean flag to indicate read or write
*
@@ -1648,6 +1651,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2102,6 +2106,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -2266,7 +2271,7 @@ release:
/**
* e1000_configure_k1_ich8lan - Configure K1 power state
* @hw: pointer to the HW structure
- * @enable: K1 state to configure
+ * @k1_enable: K1 state to configure
*
* Configure the K1 power state based on the provided parameter.
* Assumes semaphore already acquired.
@@ -2405,8 +2410,10 @@ static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
}
/**
- * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
- * done after every PHY reset.
+ * e1000_hv_phy_workarounds_ich8lan - apply PHY workarounds
+ * @hw: pointer to the HW structure
+ *
+ * A series of PHY workarounds to be done after every PHY reset.
**/
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
@@ -2694,8 +2701,10 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
}
/**
- * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
- * done after every PHY reset.
+ * e1000_lv_phy_workarounds_ich8lan - apply ich8 specific workarounds
+ * @hw: pointer to the HW structure
+ *
+ * A series of PHY workarounds to be done after every PHY reset.
**/
static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
@@ -3141,6 +3150,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4086,6 +4096,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 664e8ccc88d2..b30f00891c03 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -501,6 +501,7 @@ rx_ring_summary:
/**
* e1000_desc_unused - calculate if we have unused descriptors
+ * @ring: pointer to ring struct to perform calculation on
**/
static int e1000_desc_unused(struct e1000_ring *ring)
{
@@ -577,6 +578,7 @@ static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
/**
* e1000_receive_skb - helper function to handle Rx indications
* @adapter: board private structure
+ * @netdev: pointer to netdev struct
* @staterr: descriptor extended error and status field as written by hardware
* @vlan: descriptor vlan field as written by hardware (no le/be conversion)
* @skb: pointer to sk_buff to be indicated to stack
@@ -601,8 +603,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
* e1000_rx_checksum - Receive Checksum Offload
* @adapter: board private structure
* @status_err: receive descriptor status and error fields
- * @csum: receive descriptor csum field
- * @sk_buff: socket buffer with received data
+ * @skb: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
struct sk_buff *skb)
@@ -673,6 +674,8 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
/**
* e1000_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: Rx descriptor ring
+ * @cleaned_count: number to reallocate
+ * @gfp: flags for allocation
**/
static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
@@ -741,6 +744,8 @@ map_skb:
/**
* e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
* @rx_ring: Rx descriptor ring
+ * @cleaned_count: number to reallocate
+ * @gfp: flags for allocation
**/
static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
@@ -844,6 +849,7 @@ no_buffers:
* e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
* @rx_ring: Rx descriptor ring
* @cleaned_count: number of buffers to allocate this pass
+ * @gfp: flags for allocation
**/
static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
@@ -933,6 +939,8 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
/**
* e1000_clean_rx_irq - Send received data up the network stack
* @rx_ring: Rx descriptor ring
+ * @work_done: output parameter for indicating completed work
+ * @work_to_do: how many packets we can clean
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -1327,6 +1335,8 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
/**
* e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
* @rx_ring: Rx descriptor ring
+ * @work_done: output parameter for indicating completed work
+ * @work_to_do: how many packets we can clean
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -1517,9 +1527,6 @@ next_desc:
return cleaned;
}
-/**
- * e1000_consume_page - helper function
- **/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
u16 length)
{
@@ -1531,7 +1538,9 @@ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
/**
* e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
- * @adapter: board private structure
+ * @rx_ring: Rx descriptor ring
+ * @work_done: output parameter for indicating completed work
+ * @work_to_do: how many packets we can clean
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -1994,6 +2003,7 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
/**
* e1000_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
*
* e1000_configure_msix sets up the hardware to properly
* generate MSI-X interrupts.
@@ -2072,6 +2082,7 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
/**
* e1000e_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: board private structure
*
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel.
@@ -2127,6 +2138,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
/**
* e1000_request_msix - Initialize MSI-X interrupts
+ * @adapter: board private structure
*
* e1000_request_msix allocates MSI-X vectors and requests interrupts from the
* kernel.
@@ -2180,6 +2192,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
/**
* e1000_request_irq - initialize interrupts
+ * @adapter: board private structure
*
* Attempts to configure interrupts using the best available
* capabilities of the hardware and kernel.
@@ -2240,6 +2253,7 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
/**
* e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
**/
static void e1000_irq_disable(struct e1000_adapter *adapter)
{
@@ -2262,6 +2276,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
/**
* e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
**/
static void e1000_irq_enable(struct e1000_adapter *adapter)
{
@@ -2332,6 +2347,8 @@ void e1000e_release_hw_control(struct e1000_adapter *adapter)
/**
* e1000_alloc_ring_dma - allocate memory for a ring structure
+ * @adapter: board private structure
+ * @ring: ring struct for which to allocate dma
**/
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
struct e1000_ring *ring)
@@ -2507,7 +2524,6 @@ void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
/**
* e1000_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
* @itr_setting: current adapter->itr
* @packets: the number of packets during this measurement interval
* @bytes: the number of bytes during this measurement interval
@@ -3049,12 +3065,13 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
}
}
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+ (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+
/**
* e1000_setup_rctl - configure the receive control registers
* @adapter: Board private structure
**/
-#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
- (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -3570,6 +3587,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -3605,6 +3623,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
/**
* e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
* @adapter: board private structure
+ * @config: timestamp configuration
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -3808,6 +3827,7 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
/**
* e1000_power_down_phy - Power down the PHY
+ * @adapter: board private structure
*
* Power down the PHY so no link is implied when interface is down.
* The PHY cannot be powered down if management or WoL is active.
@@ -3820,6 +3840,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
/**
* e1000_flush_tx_ring - remove all descriptors from the tx_ring
+ * @adapter: board private structure
*
* We want to clear all pending descriptors from the TX ring.
* zeroing happens when the HW reads the regs. We assign the ring itself as
@@ -3854,6 +3875,7 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
/**
* e1000_flush_rx_ring - remove all descriptors from the rx_ring
+ * @adapter: board private structure
*
* Mark all descriptors in the RX ring as consumed and disable the rx ring
*/
@@ -3886,6 +3908,7 @@ static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
/**
* e1000_flush_desc_rings - remove all descriptors from the descriptor rings
+ * @adapter: board private structure
*
* In i219, the descriptor rings must be emptied before resetting the HW
* or before changing the device state to D3 during runtime (runtime PM).
@@ -3968,6 +3991,7 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
/**
* e1000e_reset - bring the hardware into a known good state
+ * @adapter: board private structure
*
* This function boots the hardware and enables some settings that
* require a configuration cycle of the hardware - those cannot be
@@ -4081,6 +4105,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -4847,7 +4872,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
/**
* e1000_update_phy_info - timre call-back to update PHY info
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list containing private info adapter
*
* Need to wait a few seconds after link up to get diagnostic information from
* the phy
@@ -5187,7 +5212,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
/**
* e1000_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list containing private info adapter
**/
static void e1000_watchdog(struct timer_list *t)
{
@@ -5972,8 +5997,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/**
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: index of the hung queue (unused)
**/
-static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6174,7 +6200,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
/**
* e1000e_hwtstamp_ioctl - control hardware time stamping
* @netdev: network interface device structure
- * @ifreq: interface request
+ * @ifr: interface request
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -7853,6 +7879,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index e11c877595fb..bdd9dc163f15 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2311,6 +2311,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
/**
* e1000_get_phy_addr_for_bm_page - Retrieve PHY page address
* @page: page to access
+ * @reg: register to check
*
* Returns the phy address for the page requested.
**/
@@ -2728,6 +2729,7 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
* @offset: register offset to be read
* @data: pointer to the read data
* @locked: semaphore has already been acquired or not
+ * @page_set: BM_WUC_PAGE already set and access enabled
*
* Acquires semaphore, if necessary, then reads the PHY register at offset
* and stores the retrieved information in data. Release any acquired
@@ -2836,6 +2838,7 @@ s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
* @offset: register offset to write to
* @data: data to write at register offset
* @locked: semaphore has already been acquired or not
+ * @page_set: BM_WUC_PAGE already set and access enabled
*
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 34b988d70488..f3f671311855 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -144,7 +144,7 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device,
/**
* e1000e_phc_getsynctime - Reads the current system/device cross timestamp
* @ptp: ptp clock structure
- * @cts: structure containing timestamp
+ * @xtstamp: structure containing timestamp
*
* Read device and system (ART) clock simultaneously and return the scaled
* clock values in ns.
@@ -297,6 +297,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index d88dd41a9442..99b8252eb969 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -310,10 +310,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
rx_buffer->page_offset;
/* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES));
-#endif
+ net_prefetch(page_addr);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 140212bfe08b..9e3103fae723 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -221,8 +221,6 @@ static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
- WARN_ON(in_interrupt());
-
/* put off any impending NetWatchDogTimeout */
netif_trans_update(netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index a7e212d1caa2..537300e762f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -35,6 +35,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/udp_tunnel.h>
#include <net/xdp_sock.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
@@ -90,7 +91,7 @@
#define I40E_OEM_RELEASE_MASK 0x0000ffff
#define I40E_RX_DESC(R, i) \
- (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+ (&(((union i40e_rx_desc *)((R)->desc))[i]))
#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
@@ -133,7 +134,6 @@ enum i40e_state_t {
__I40E_PORT_SUSPENDED,
__I40E_VF_DISABLE,
__I40E_MACVLAN_SYNC_PENDING,
- __I40E_UDP_FILTER_SYNC_PENDING,
__I40E_TEMP_LINK_POLLING,
__I40E_CLIENT_SERVICE_REQUESTED,
__I40E_CLIENT_L2_CHANGE,
@@ -478,8 +478,8 @@ struct i40e_pf {
struct list_head l3_flex_pit_list;
struct list_head l4_flex_pit_list;
- struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
- u16 pending_udp_bitmap;
+ struct udp_tunnel_nic_shared udp_tunnel_shared;
+ struct udp_tunnel_nic_info udp_tunnel_nic;
struct hlist_head cloud_filter_list;
u16 num_cloud_filters;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index c897a2863e4f..593912b17609 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -541,6 +541,12 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
(aq->api_maj_ver == 1 &&
aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
+ hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
+
fallthrough;
default:
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index edec3df78971..ee394aacef4d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -85,8 +85,8 @@ struct i40e_adminq_info {
/**
* i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_ret: AdminQ handler error code can override aq_rc
- * aq_rc: AdminQ firmware error code to convert
+ * @aq_ret: AdminQ handler error code can override aq_rc
+ * @aq_rc: AdminQ firmware error code to convert
**/
static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index c0c8efe42fce..1e960c3c7ef0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -24,6 +24,8 @@
#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
+/* API version 1.10 for X722 devices adds ability to request FEC encoding */
+#define I40E_MINOR_VER_FW_REQUEST_FEC_X722 0x000A
struct i40e_aq_desc {
__le16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index befd3018183f..a2dba32383f6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -278,8 +278,6 @@ void i40e_client_update_msix_info(struct i40e_pf *pf)
/**
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
- * @client: pointer to a client struct in the client list.
- * @existing: if there was already an existing instance
*
**/
static void i40e_client_add_instance(struct i40e_pf *pf)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 6ab52cbd697a..adc9e4fa4789 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3766,9 +3766,7 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
/**
* i40e_aq_start_lldp
* @hw: pointer to the hw struct
- * @buff: buffer for result
* @persist: True if start of LLDP should be persistent across power cycles
- * @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports.
@@ -5395,6 +5393,7 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: flag to indicate if phy page should be updated
* @set_mdio: use MDIO I/F number specified by mdio_num
* @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
@@ -5439,6 +5438,7 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: flag to indicate if phy page should be updated
* @set_mdio: use MDIO I/F number specified by mdio_num
* @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d3ad2e3aa838..d7c13ca9be7d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -604,10 +604,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
- " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ " d[%03x] = 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
- rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ rxd->read.hdr_addr);
}
}
} else if (cnt == 3) {
@@ -625,10 +624,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
- "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
- rxd->read.pkt_addr, rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ rxd->read.pkt_addr, rxd->read.hdr_addr);
}
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 825c104ecba1..26ba1f3eb2d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -891,6 +891,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
break;
case I40E_PHY_TYPE_SGMII:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
@@ -1481,12 +1482,16 @@ static int i40e_set_fec_param(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
u8 fec_cfg = 0;
- int err = 0;
if (hw->device_id != I40E_DEV_ID_25G_SFP28 &&
- hw->device_id != I40E_DEV_ID_25G_B) {
- err = -EPERM;
- goto done;
+ hw->device_id != I40E_DEV_ID_25G_B &&
+ hw->device_id != I40E_DEV_ID_KX_X722)
+ return -EPERM;
+
+ if (hw->mac.type == I40E_MAC_X722 &&
+ !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) {
+ netdev_err(netdev, "Setting FEC encoding not supported by firmware. Please update the NVM image.\n");
+ return -EOPNOTSUPP;
}
switch (fecparam->fec) {
@@ -1508,14 +1513,10 @@ static int i40e_set_fec_param(struct net_device *netdev,
default:
dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d",
fecparam->fec);
- err = -EINVAL;
- goto done;
+ return -EINVAL;
}
- err = i40e_set_fec_cfg(netdev, fec_cfg);
-
-done:
- return err;
+ return i40e_set_fec_cfg(netdev, fec_cfg);
}
static int i40e_nway_reset(struct net_device *netdev)
@@ -1967,7 +1968,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
(new_rx_count == vsi->rx_rings[0]->count))
return 0;
- /* If there is a AF_XDP UMEM attached to any of Rx rings,
+ /* If there is a AF_XDP page pool attached to any of Rx rings,
* disallow changing the number of descriptors -- regardless
* if the netdev is running or not.
*/
@@ -4951,8 +4952,7 @@ flags_complete:
}
}
- if (((changed_flags & I40E_FLAG_RS_FEC) ||
- (changed_flags & I40E_FLAG_BASE_R_FEC)) &&
+ if (changed_flags & I40E_FLAG_RS_FEC &&
pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
pf->hw.device_id != I40E_DEV_ID_25G_B) {
dev_warn(&pf->pdev->dev,
@@ -4960,6 +4960,15 @@ flags_complete:
return -EOPNOTSUPP;
}
+ if (changed_flags & I40E_FLAG_BASE_R_FEC &&
+ pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
+ pf->hw.device_id != I40E_DEV_ID_25G_B &&
+ pf->hw.device_id != I40E_DEV_ID_KX_X722) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FEC configuration\n");
+ return -EOPNOTSUPP;
+ }
+
/* Process any additional changes needed as a result of flag changes.
* The changed_flags value reflects the list of bits that were
* changed in the code above.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2e433fdbf2c3..4f8a2154b93f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -287,6 +287,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
/**
* i40e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue number timing out
*
* If any port has noticed a Tx timeout, it is likely that the whole
* device is munged, not just the one netdev port, so go for the full
@@ -1609,6 +1610,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
* i40e_config_rss_aq - Prepare for RSS using AQ commands
* @vsi: vsi structure
* @seed: RSS hash seed
+ * @lut: pointer to lookup table of lut_size
+ * @lut_size: size of the lookup table
**/
static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
@@ -3122,12 +3125,12 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
}
/**
- * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
* @ring: The Tx or Rx ring
*
- * Returns the UMEM or NULL.
+ * Returns the AF_XDP buffer pool or NULL.
**/
-static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
{
bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
int qid = ring->queue_index;
@@ -3138,7 +3141,7 @@ static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
return NULL;
- return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+ return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
}
/**
@@ -3157,7 +3160,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u32 qtx_ctl = 0;
if (ring_is_xdp(ring))
- ring->xsk_umem = i40e_xsk_umem(ring);
+ ring->xsk_pool = i40e_xsk_pool(ring);
/* some ATR related tx ring init */
if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
@@ -3280,12 +3283,13 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
kfree(ring->rx_bi);
- ring->xsk_umem = i40e_xsk_umem(ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
ret = i40e_alloc_rx_bi_zc(ring);
if (ret)
return ret;
- ring->rx_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ ring->rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
@@ -3320,8 +3324,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
- /* use 32 byte descriptors */
- rx_ctx.dsize = 1;
+ /* use 16 byte descriptors */
+ rx_ctx.dsize = 0;
/* descriptor type is always zero
* rx_ctx.dtype = 0;
@@ -3368,8 +3372,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring->xsk_umem) {
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ if (ring->xsk_pool) {
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
} else {
ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
@@ -3380,7 +3384,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
*/
dev_info(&vsi->back->pdev->dev,
"Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
- ring->xsk_umem ? "UMEM enabled " : "",
+ ring->xsk_pool ? "AF_XDP ZC enabled " : "",
ring->queue_index, pf_q);
}
@@ -5814,7 +5818,6 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
/**
* i40e_channel_setup_queue_map - Setup a channel queue map
* @pf: ptr to PF device
- * @vsi: the VSI being setup
* @ctxt: VSI context structure
* @ch: ptr to channel structure
*
@@ -6057,8 +6060,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
/**
* i40e_setup_channel - setup new channel using uplink element
* @pf: ptr to PF device
- * @type: type of channel to be created (VMDq2/VF)
- * @uplink_seid: underlying HW switching element (VEB) ID
+ * @vsi: pointer to the VSI to set up the channel within
* @ch: ptr to channel structure
*
* Setup new channel (VSI) based on specified type (VMDq2/VF)
@@ -6623,6 +6625,25 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
netdev_info(vsi->netdev,
"NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
speed, req_fec, fec, an, fc);
+ } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
+ req_fec = "None";
+ fec = "None";
+ an = "False";
+
+ if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+ an = "True";
+
+ if (pf->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_KR_ENA)
+ fec = "CL74 FC-FEC/BASE-R";
+
+ if (pf->hw.phy.link_info.req_fec_info &
+ I40E_AQ_REQUEST_FEC_KR)
+ req_fec = "CL74 FC-FEC/BASE-R";
+
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ speed, req_fec, fec, an, fc);
} else {
netdev_info(vsi->netdev,
"NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
@@ -6689,7 +6710,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- WARN_ON(in_interrupt());
while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
usleep_range(1000, 2000);
i40e_down(vsi);
@@ -7779,7 +7799,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
/**
* i40e_parse_cls_flower - Parse tc flower filters provided by kernel
* @vsi: Pointer to VSI
- * @cls_flower: Pointer to struct flow_cls_offload
+ * @f: Pointer to struct flow_cls_offload
* @filter: Pointer to cloud filter structure
*
**/
@@ -8160,8 +8180,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
/**
* i40e_setup_tc_cls_flower - flower classifier offloads
- * @netdev: net device to configure
- * @type_data: offload data
+ * @np: net device to configure
+ * @cls_flower: offload data
**/
static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
struct flow_cls_offload *cls_flower)
@@ -8462,9 +8482,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
u32 val;
- WARN_ON(in_interrupt());
-
-
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -9585,6 +9602,7 @@ end_reconstitute:
/**
* i40e_get_capabilities - get info about the HW
* @pf: the PF struct
+ * @list_type: AQ capability to be queried
**/
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type)
@@ -10383,106 +10401,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
-static const char *i40e_tunnel_name(u8 type)
-{
- switch (type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- return "vxlan";
- case UDP_TUNNEL_TYPE_GENEVE:
- return "geneve";
- default:
- return "unknown";
- }
-}
-
-/**
- * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
- * @pf: board private structure
- **/
-static void i40e_sync_udp_filters(struct i40e_pf *pf)
-{
- int i;
-
- /* loop through and set pending bit for all active UDP filters */
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->udp_ports[i].port)
- pf->pending_udp_bitmap |= BIT_ULL(i);
- }
-
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
-}
-
-/**
- * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
- * @pf: board private structure
- **/
-static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- u8 filter_index, type;
- u16 port;
- int i;
-
- if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
- return;
-
- /* acquire RTNL to maintain state of flags and port requests */
- rtnl_lock();
-
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->pending_udp_bitmap & BIT_ULL(i)) {
- struct i40e_udp_port_config *udp_port;
- i40e_status ret = 0;
-
- udp_port = &pf->udp_ports[i];
- pf->pending_udp_bitmap &= ~BIT_ULL(i);
-
- port = READ_ONCE(udp_port->port);
- type = READ_ONCE(udp_port->type);
- filter_index = READ_ONCE(udp_port->filter_index);
-
- /* release RTNL while we wait on AQ command */
- rtnl_unlock();
-
- if (port)
- ret = i40e_aq_add_udp_tunnel(hw, port,
- type,
- &filter_index,
- NULL);
- else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
- ret = i40e_aq_del_udp_tunnel(hw, filter_index,
- NULL);
-
- /* reacquire RTNL so we can update filter_index */
- rtnl_lock();
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "%s %s port %d, index %d failed, err %s aq_err %s\n",
- i40e_tunnel_name(type),
- port ? "add" : "delete",
- port,
- filter_index,
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
- if (port) {
- /* failed to add, just reset port,
- * drop pending bit for any deletion
- */
- udp_port->port = 0;
- pf->pending_udp_bitmap &= ~BIT_ULL(i);
- }
- } else if (port) {
- /* record filter index on success */
- udp_port->filter_index = filter_index;
- }
- }
- }
-
- rtnl_unlock();
-}
-
/**
* i40e_service_task - Run the driver's async subtasks
* @work: pointer to work_struct containing our data
@@ -10522,7 +10440,6 @@ static void i40e_service_task(struct work_struct *work)
pf->vsi[pf->lan_vsi]);
}
i40e_sync_filters_subtask(pf);
- i40e_sync_udp_filters_subtask(pf);
} else {
i40e_reset_subtask(pf);
}
@@ -10546,7 +10463,7 @@ static void i40e_service_task(struct work_struct *work)
/**
* i40e_service_timer - timer callback
- * @data: pointer to PF struct
+ * @t: timer list pointer
**/
static void i40e_service_timer(struct timer_list *t)
{
@@ -11185,11 +11102,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
- * @cpu: cpu to be used on affinity_mask
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
{
struct i40e_q_vector *q_vector;
@@ -11222,7 +11138,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int err, v_idx, num_q_vectors, current_cpu;
+ int err, v_idx, num_q_vectors;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -11232,15 +11148,10 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
- current_cpu = cpumask_first(cpu_online_mask);
-
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx);
if (err)
goto err_out;
- current_cpu = cpumask_next(current_cpu, cpu_online_mask);
- if (unlikely(current_cpu >= nr_cpu_ids))
- current_cpu = cpumask_first(cpu_online_mask);
}
return 0;
@@ -12228,131 +12139,48 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
-/**
- * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
- * @pf: board private structure
- * @port: The UDP port to look up
- *
- * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
- **/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
-{
- u8 i;
-
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- /* Do not report ports with pending deletions as
- * being available.
- */
- if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
- continue;
- if (pf->udp_ports[i].port == port)
- return i;
- }
-
- return i;
-}
-
-/**
- * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void i40e_udp_tunnel_add(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int i40e_udp_tunnel_set_port(struct net_device *netdev,
+ unsigned int table, unsigned int idx,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- u8 next_idx;
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "port %d already offloaded\n", port);
- return;
- }
-
- /* Now check if there is space to add the new port */
- next_idx = i40e_get_udp_port_idx(pf, 0);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ u8 type, filter_index;
+ i40e_status ret;
- if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
- port);
- return;
- }
+ type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
+ I40E_AQC_TUNNEL_TYPE_NGE;
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
- return;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
- break;
- default:
- return;
+ ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
+ NULL);
+ if (ret) {
+ netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
}
- /* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].port = port;
- pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
- pf->pending_udp_bitmap |= BIT_ULL(next_idx);
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
+ udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
+ return 0;
}
-/**
- * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void i40e_udp_tunnel_del(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
+ unsigned int table, unsigned int idx,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
- goto not_found;
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ i40e_status ret;
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
- goto not_found;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
- goto not_found;
- break;
- default:
- goto not_found;
+ ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
+ if (ret) {
+ netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
}
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].port = 0;
-
- /* Toggle pending bit instead of setting it. This way if we are
- * deleting a port that has yet to be added we just clear the pending
- * bit and don't have to worry about it.
- */
- pf->pending_udp_bitmap ^= BIT_ULL(idx);
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
-
- return;
-not_found:
- netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
- port);
+ return 0;
}
static int i40e_get_phys_port_id(struct net_device *netdev,
@@ -12379,6 +12207,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
* @addr: the MAC address entry being added
* @vid: VLAN ID
* @flags: instructions from stack about fdb operation
+ * @extack: netlink extended ack, unused currently
*/
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
@@ -12644,7 +12473,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
*/
if (need_reset && prog)
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->xdp_rings[i]->xsk_umem)
+ if (vsi->xdp_rings[i]->xsk_pool)
(void)i40e_xsk_wakeup(vsi->netdev, i,
XDP_WAKEUP_RX);
@@ -12923,8 +12752,8 @@ static int i40e_xdp(struct net_device *dev,
switch (xdp->command) {
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
@@ -12957,8 +12786,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
- .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
- .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
@@ -13022,6 +12851,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
+
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= hw_enc_features;
@@ -14422,7 +14253,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_ptp_init(pf);
/* repopulate tunnel port filters */
- i40e_sync_udp_filters(pf);
+ udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
return ret;
}
@@ -15151,6 +14982,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_switch_setup;
+ pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
+ pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
+ pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+ pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
+ pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
+ pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
+ UDP_TUNNEL_TYPE_GENEVE;
+
/* The number of VSIs reported by the FW is the minimum guaranteed
* to us; HW supports far more and we share the remaining pool with
* the other PFs. We allocate space for more than the guarantee with
@@ -15160,6 +14999,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
else
pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(&pf->pdev->dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
+ }
/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index ff7b19c6bc73..7a879614ca55 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -259,7 +259,6 @@ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
/**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
* @pf: The PF private data structure
- * @vsi: The VSI with the rings relevant to 1588
*
* This watchdog task is scheduled to detect error case where hardware has
* dropped an Rx packet that was timestamped when the ring is full. The
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index 424f02077e2e..b5b12299931f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -22,7 +22,7 @@
#include <linux/tracepoint.h>
-/**
+/*
* i40e_trace() macro enables shared code to refer to trace points
* like:
*
@@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS(
i40e_rx_template,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -148,7 +148,7 @@ DEFINE_EVENT(
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq_rx,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 3e5c566ceb01..d43ce13a93c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -533,11 +533,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct pci_dev *pdev = pf->pdev;
- struct i40e_32b_rx_wb_qw0 *qw0;
+ struct i40e_16b_rx_wb_qw0 *qw0;
u32 fcnt_prog, fcnt_avail;
u32 error;
- qw0 = (struct i40e_32b_rx_wb_qw0 *)&qword0_raw;
+ qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
@@ -636,7 +636,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
unsigned long bi_size;
u16 i;
- if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
i40e_xsk_clean_tx_ring(tx_ring);
} else {
/* ring already cleared, nothing to do */
@@ -1335,7 +1335,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
rx_ring->skb = NULL;
}
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
i40e_xsk_clean_rx_ring(rx_ring);
goto skip_free;
}
@@ -1369,7 +1369,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
}
skip_free:
- if (rx_ring->xsk_umem)
+ if (rx_ring->xsk_pool)
i40e_clear_rx_bi_zc(rx_ring);
else
i40e_clear_rx_bi(rx_ring);
@@ -1418,7 +1418,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -1755,7 +1755,6 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @rx_ptype: the packet type decoded by hardware
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
@@ -1953,7 +1952,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer;
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- prefetchw(rx_buffer->page);
+ prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
@@ -1992,10 +1991,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -2078,10 +2075,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
+
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
@@ -2300,6 +2295,19 @@ void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
}
/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+}
+
+/**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -2579,7 +2587,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
i40e_clean_xdp_tx_irq(vsi, ring) :
i40e_clean_tx_irq(vsi, ring, budget);
@@ -2607,7 +2615,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget;
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned = ring->xsk_umem ?
+ int cleaned = ring->xsk_pool ?
i40e_clean_rx_irq_zc(ring, budget_per_ring) :
i40e_clean_rx_irq(ring, budget_per_ring);
@@ -3503,7 +3511,7 @@ dma_error:
/**
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
- * @xdp: data to transmit
+ * @xdpf: data to transmit
* @xdp_ring: XDP Tx ring
**/
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
@@ -3698,7 +3706,9 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/**
* i40e_xdp_xmit - Implements ndo_xdp_xmit
* @dev: netdev
- * @xdp: XDP buffer
+ * @n: number of frames
+ * @frames: array of XDP buffer pointers
+ * @flags: XDP extra info
*
* Returns number of frames successfully sent. Frames that fail are
* free'ed via XDP return API.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4036893d6825..2feed920ef8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -110,7 +110,7 @@ enum i40e_dyn_idx_t {
*/
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
-#define i40e_rx_desc i40e_32byte_rx_desc
+#define i40e_rx_desc i40e_16byte_rx_desc
#define I40E_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -388,7 +388,7 @@ struct i40e_ring {
struct i40e_channel *ch;
struct xdp_rxq_info xdp_rxq;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -482,7 +482,6 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
/**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
- * @tx_ring: ring to send buffer on
*
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 667c4dc4b39f..19da3b22160f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -21,9 +21,9 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
#define I40E_XDP_TX BIT(1)
#define I40E_XDP_REDIR BIT(2)
-/**
+/*
* build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
- **/
+ */
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
{
@@ -37,7 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
/**
* i40e_update_tx_stats - Update the egress statistics for the Tx ring
* @tx_ring: Tx ring to update
- * @total_packet: total packets sent
+ * @total_packets: total packets sent
* @total_bytes: total bytes sent
**/
static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
@@ -99,19 +99,6 @@ static inline bool i40e_rx_is_programming_status(u64 qword1)
return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK;
}
-/**
- * i40e_inc_ntc: Advance the next_to_clean index
- * @rx_ring: Rx ring
- **/
-static inline void i40e_inc_ntc(struct i40e_ring *rx_ring)
-{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(I40E_RX_DESC(rx_ring, ntc));
-}
-
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 52410d609ba1..c0bdc666f557 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -595,6 +595,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
+#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8)
u64 flags;
/* Used in set switch config AQ command */
@@ -628,7 +629,7 @@ union i40e_16byte_rx_desc {
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
- struct {
+ struct i40e_16b_rx_wb_qw0 {
struct {
union {
__le16 mirroring_status;
@@ -647,6 +648,9 @@ union i40e_16byte_rx_desc {
__le64 status_error_len;
} qword1;
} wb; /* writeback */
+ struct {
+ u64 qword[2];
+ } raw;
};
union i40e_32byte_rx_desc {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 47bfb2e95e2d..4919d22d7b6b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2244,7 +2244,8 @@ error_param:
}
/**
- * i40e_validate_queue_map
+ * i40e_validate_queue_map - check queue map is valid
+ * @vf: the VF structure pointer
* @vsi_id: vsi id
* @queuemap: Tx or Rx queue map
*
@@ -2712,6 +2713,10 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
+ if (is_valid_ether_addr(al->list[i].addr) &&
+ is_zero_ether_addr(vf->default_lan_addr.addr))
+ ether_addr_copy(vf->default_lan_addr.addr,
+ al->list[i].addr);
}
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -2739,6 +2744,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
+ bool was_unimac_deleted = false;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
i40e_status ret = 0;
@@ -2758,6 +2764,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = I40E_ERR_INVALID_MAC_ADDR;
goto error_param;
}
+ if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
+ was_unimac_deleted = true;
}
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -2778,10 +2786,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
vf->vf_id, ret);
+ if (vf->trusted && was_unimac_deleted) {
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ u8 *macaddr = NULL;
+ int bkt;
+
+ /* set last unicast mac address as default */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ if (is_valid_ether_addr(f->macaddr))
+ macaddr = f->macaddr;
+ }
+ if (macaddr)
+ ether_addr_copy(vf->default_lan_addr.addr, macaddr);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ }
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
- ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
}
/**
@@ -3160,8 +3183,8 @@ err:
/**
* i40e_validate_cloud_filter
- * @mask: mask for TC filter
- * @data: data for TC filter
+ * @vf: pointer to VF structure
+ * @tc_filter: pointer to filter requested
*
* This function validates cloud filter programmed as TC filter for ADq
**/
@@ -3294,7 +3317,7 @@ err:
/**
* i40e_find_vsi_from_seid - searches for the vsi with the given seid
* @vf: pointer to the VF info
- * @seid - seid of the vsi it is searching for
+ * @seid: seid of the vsi it is searching for
**/
static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 8ce57b507a21..567fd67e900e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -29,14 +29,16 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
}
/**
- * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
+ * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
+ * certain ring/qid
* @vsi: Current VSI
- * @umem: UMEM
- * @qid: Rx ring to associate UMEM to
+ * @pool: buffer pool
+ * @qid: Rx ring to associate buffer pool with
*
* Returns 0 on success, <0 on failure
**/
-static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
+static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
+ struct xsk_buff_pool *pool,
u16 qid)
{
struct net_device *netdev = vsi->netdev;
@@ -53,7 +55,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- err = xsk_buff_dma_map(umem, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
+ err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
if (err)
return err;
@@ -80,21 +82,22 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
}
/**
- * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
+ * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
+ * certain ring/qid
* @vsi: Current VSI
- * @qid: Rx ring to associate UMEM to
+ * @qid: Rx ring to associate buffer pool with
*
* Returns 0 on success, <0 on failure
**/
-static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
+static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
{
struct net_device *netdev = vsi->netdev;
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
bool if_running;
int err;
- umem = xdp_get_umem_from_qid(netdev, qid);
- if (!umem)
+ pool = xsk_get_pool_from_qid(netdev, qid);
+ if (!pool)
return -EINVAL;
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
@@ -106,7 +109,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
clear_bit(qid, vsi->af_xdp_zc_qps);
- xsk_buff_dma_unmap(umem, I40E_RX_DMA_ATTR);
+ xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
@@ -118,20 +121,21 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
/**
- * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
+ * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
+ * a ring/qid
* @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, or NULL to disable
- * @qid: Rx ring to (dis)associate UMEM (from)to
+ * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
+ * @qid: Rx ring to (dis)associate buffer pool (from)to
*
- * This function enables or disables a UMEM to a certain ring.
+ * This function enables or disables a buffer pool to a certain ring.
*
* Returns 0 on success, <0 on failure
**/
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid)
{
- return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
- i40e_xsk_umem_disable(vsi, qid);
+ return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
+ i40e_xsk_pool_disable(vsi, qid);
}
/**
@@ -191,7 +195,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
rx_desc = I40E_RX_DESC(rx_ring, ntu);
bi = i40e_rx_bi(rx_ring, ntu);
do {
- xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!xdp) {
ok = false;
goto no_buffers;
@@ -254,6 +258,18 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
}
/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+}
+
+/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
* @budget: NAPI budget
@@ -274,13 +290,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int size;
u64 qword;
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- !i40e_alloc_rx_buffers_zc(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
-
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
@@ -310,7 +319,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
(*bi)->data_end = (*bi)->data + size;
- xsk_buff_dma_sync_for_cpu(*bi);
+ xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
if (xdp_res) {
@@ -355,14 +364,17 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE)
+ failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -385,11 +397,11 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
while (budget-- > 0) {
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);
tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
@@ -416,7 +428,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
I40E_TXD_QW1_CMD_SHIFT);
i40e_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(xdp_ring->xsk_pool);
i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
}
@@ -448,7 +460,7 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
**/
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
{
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
u32 i, completed_frames, xsk_frames = 0;
u32 head_idx = i40e_get_head(tx_ring);
struct i40e_tx_buffer *tx_bi;
@@ -488,13 +500,13 @@ skip:
tx_ring->next_to_clean -= tx_ring->count;
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(bp, xsk_frames);
i40e_arm_wb(tx_ring, vsi, completed_frames);
out_xmit:
- if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
- xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
}
@@ -526,7 +538,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
if (queue_id >= vsi->num_queue_pairs)
return -ENXIO;
- if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ if (!vsi->xdp_rings[queue_id]->xsk_pool)
return -ENXIO;
ring = vsi->xdp_rings[queue_id];
@@ -565,7 +577,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
struct i40e_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -585,14 +597,15 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(bp, xsk_frames);
}
/**
- * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
+ * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
+ * buffer pool attached
* @vsi: vsi
*
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
**/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
@@ -600,7 +613,7 @@ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (xdp_get_umem_from_qid(netdev, i))
+ if (xsk_get_pool_from_qid(netdev, i))
return true;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index c524c142127f..7adfd8539247 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -5,12 +5,12 @@
#define _I40E_XSK_H_
struct i40e_vsi;
-struct xdp_umem;
+struct xsk_buff_pool;
struct zero_copy_allocator;
int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index baf2fe26f302..1f60518eb0e5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -85,8 +85,8 @@ struct iavf_adminq_info {
/**
* iavf_aq_rc_to_posix - convert errors to user-land codes
- * aq_ret: AdminQ handler error code can override aq_rc
- * aq_rc: AdminQ firmware error code to convert
+ * @aq_ret: AdminQ handler error code can override aq_rc
+ * @aq_rc: AdminQ firmware error code to convert
**/
static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc)
{
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index cf539db79af9..95543dfd4fe7 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -147,6 +147,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
/**
* iavf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue number that is timing out
**/
static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -2572,8 +2573,8 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
}
/**
- * iavf_del_all_cloud_filters - delete all cloud filters
- * on the traffic classes
+ * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
+ * @adapter: board private structure
**/
static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
{
@@ -2592,7 +2593,7 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
- * @type_date: tc offload data
+ * @type_data: tc offload data
*
* This function processes the config information provided by the
* user to configure traffic classes/queue channels and packages the
@@ -2690,7 +2691,7 @@ exit:
/**
* iavf_parse_cls_flower - Parse tc flower filters provided by kernel
* @adapter: board private structure
- * @cls_flower: pointer to struct flow_cls_offload
+ * @f: pointer to struct flow_cls_offload
* @filter: pointer to cloud filter structure
*/
static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
@@ -3064,8 +3065,8 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
/**
* iavf_setup_tc_cls_flower - flower classifier offloads
- * @netdev: net device to configure
- * @type_data: offload data
+ * @adapter: board private structure
+ * @cls_flower: pointer to flow_cls_offload struct with flow info
*/
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
@@ -3112,7 +3113,7 @@ static LIST_HEAD(iavf_block_cb_list);
* iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type: type of offload
- * @type_date: tc offload data
+ * @type_data: tc offload data
*
* This function is the callback to ndo_setup_tc in the
* netdev_ops.
@@ -3768,8 +3769,7 @@ err_dma:
/**
* iavf_suspend - Power management suspend routine
- * @pdev: PCI device information struct
- * @state: unused
+ * @dev_d: device info pointer
*
* Called when the system (VM) is entering sleep/suspend.
**/
@@ -3799,7 +3799,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
/**
* iavf_resume - Power management resume routine
- * @pdev: PCI device information struct
+ * @dev_d: device info pointer
*
* Called when the system (VM) is resumed from sleep/suspend.
**/
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 1058e68a02b4..82fda6f5abf0 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -22,7 +22,7 @@
#include <linux/tracepoint.h>
-/**
+/*
* iavf_trace() macro enables shared code to refer to trace points
* like:
*
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index ca041b39ffda..256fa07d54d5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1309,10 +1309,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
@@ -1376,10 +1373,8 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
+
/* build an skb around the page buffer */
skb = build_skb(va - IAVF_SKB_PAD, truesize);
if (unlikely(!skb))
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index dd3348f9da9d..e5b9ba42dd00 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -454,7 +454,6 @@ bool __iavf_chk_linearize(struct sk_buff *skb);
/**
* iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
- * @tx_ring: ring to send buffer on
*
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
@@ -514,6 +513,7 @@ static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
return count != IAVF_MAX_BUFFER_TXD;
}
/**
+ * txring_txq - helper to convert from a ring to a queue
* @ring: Tx ring to find the netdev equivalent of
**/
static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index fe140ff38f74..a0723831c4e4 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -284,6 +284,10 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
+ /* devlink port data */
+ struct devlink_port devlink_port;
+ bool devlink_port_registered;
+
u16 max_frame;
u16 rx_buf_len;
@@ -321,9 +325,9 @@ struct ice_vsi {
struct ice_ring **xdp_rings; /* XDP ring array */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
- struct xdp_umem **xsk_umems;
- u16 num_xsk_umems_used;
- u16 num_xsk_umems;
+ struct xsk_buff_pool **xsk_pools;
+ u16 num_xsk_pools_used;
+ u16 num_xsk_pools;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -375,9 +379,6 @@ enum ice_pf_flags {
struct ice_pf {
struct pci_dev *pdev;
- /* devlink port data */
- struct devlink_port devlink_port;
-
struct devlink_region *nvm_region;
struct devlink_region *devcaps_region;
@@ -507,25 +508,25 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
}
/**
- * ice_xsk_umem - get XDP UMEM bound to a ring
- * @ring - ring to use
+ * ice_xsk_pool - get XSK buffer pool bound to a ring
+ * @ring: ring to use
*
- * Returns a pointer to xdp_umem structure if there is an UMEM present,
+ * Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise.
*/
-static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
+static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
- struct xdp_umem **umems = ring->vsi->xsk_umems;
+ struct xsk_buff_pool **pools = ring->vsi->xsk_pools;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq;
- if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] ||
+ if (qid >= ring->vsi->num_xsk_pools || !pools || !pools[qid] ||
!ice_is_xdp_ena_vsi(ring->vsi))
return NULL;
- return umems[qid];
+ return pools[qid];
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index ba9375218fef..b06fbe99d8e9 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1422,7 +1422,7 @@ struct ice_aqc_nvm_comp_tbl {
u8 cvs[]; /* Component Version String */
} __packed;
-/**
+/*
* Send to PF command (indirect 0x0801) ID is only used by PF
*
* Send to VF command (indirect 0x0802) ID is only used by PF
@@ -1826,8 +1826,8 @@ struct ice_aqc_event_lan_overflow {
* @opcode: AQ command opcode
* @datalen: length in bytes of indirect/external data buffer
* @retval: return value from firmware
- * @cookie_h: opaque data high-half
- * @cookie_l: opaque data low-half
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
* @params: command-specific parameters
*
* Descriptor format for commands the driver posts on the Admin Transmit Queue
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 87008476d8fe..fe4320e2d1f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -308,12 +308,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index);
- ring->xsk_umem = ice_xsk_umem(ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = ice_xsk_pool(ring);
+ if (ring->xsk_pool) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
- xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
NULL);
if (err)
return err;
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
@@ -417,9 +417,9 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring->xsk_umem) {
- if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) {
- dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n",
+ if (ring->xsk_pool) {
+ if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
+ dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
num_bufs, ring->q_index);
dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
@@ -428,7 +428,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
err = ice_alloc_rx_bufs_zc(ring, num_bufs);
if (err)
- dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n",
+ dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 111d6bfe4222..511da59bd6f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -6,18 +6,14 @@
#include "ice_devlink.h"
#include "ice_fw_update.h"
-static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
+static void ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
{
u8 dsn[8];
/* Copy the DSN into an array in Big Endian format */
put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
- snprintf(buf, len, "%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
- dsn[0], dsn[1], dsn[2], dsn[3],
- dsn[4], dsn[5], dsn[6], dsn[7]);
-
- return 0;
+ snprintf(buf, len, "%8phD", dsn);
}
static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
@@ -106,6 +102,13 @@ static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
return 0;
}
+static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, char *buf, size_t len)
+{
+ snprintf(buf, len, "0x%08x", pf->hw.active_track_id);
+
+ return 0;
+}
+
static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
@@ -150,6 +153,7 @@ static const struct ice_devlink_version {
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
+ running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
running("fw.netlist", ice_info_netlist_ver),
running("fw.netlist.build", ice_info_netlist_build),
};
@@ -180,11 +184,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
return err;
}
- err = ice_info_get_dsn(pf, buf, sizeof(buf));
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain serial number");
- return err;
- }
+ ice_info_get_dsn(pf, buf, sizeof(buf));
err = devlink_info_serial_number_put(req, buf);
if (err) {
@@ -233,8 +233,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
/**
* ice_devlink_flash_update - Update firmware stored in flash on the device
* @devlink: pointer to devlink associated with device to update
- * @path: the path of the firmware file to use via request_firmware
- * @component: name of the component to update, or NULL
+ * @params: flash update parameters
* @extack: netlink extended ACK structure
*
* Perform a device flash update. The bulk of the update logic is contained
@@ -243,38 +242,52 @@ static int ice_devlink_info_get(struct devlink *devlink,
* Returns: zero on success, or an error code on failure.
*/
static int
-ice_devlink_flash_update(struct devlink *devlink, const char *path,
- const char *component, struct netlink_ext_ack *extack)
+ice_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
const struct firmware *fw;
+ u8 preservation;
int err;
- /* individual component update is not yet supported */
- if (component)
+ if (!params->overwrite_mask) {
+ /* preserve all settings and identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_ALL;
+ } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
+ /* overwrite settings, but preserve the vital device identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
+ } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = ICE_AQC_NVM_NO_PRESERVATION;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
return -EOPNOTSUPP;
+ }
if (!hw->dev_caps.common_cap.nvm_unified_update) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
- err = ice_check_for_pending_update(pf, component, extack);
+ err = ice_check_for_pending_update(pf, NULL, extack);
if (err)
return err;
- err = request_firmware(&fw, path, dev);
+ err = request_firmware(&fw, params->file_name, dev);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk");
return err;
}
+ dev_dbg(dev, "Beginning flash update with file '%s'\n", params->file_name);
+
devlink_flash_update_begin_notify(devlink);
- devlink_flash_update_status_notify(devlink, "Preparing to flash",
- component, 0, 0);
- err = ice_flash_pldm_image(pf, fw, extack);
+ devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
+ err = ice_flash_pldm_image(pf, fw, preservation, extack);
devlink_flash_update_end_notify(devlink);
release_firmware(fw);
@@ -283,6 +296,7 @@ ice_devlink_flash_update(struct devlink *devlink, const char *path,
}
static const struct devlink_ops ice_devlink_ops = {
+ .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
};
@@ -352,55 +366,66 @@ void ice_devlink_unregister(struct ice_pf *pf)
}
/**
- * ice_devlink_create_port - Create a devlink port for this PF
- * @pf: the PF to create a port for
+ * ice_devlink_create_port - Create a devlink port for this VSI
+ * @vsi: the VSI to create a port for
*
- * Create and register a devlink_port for this PF. Note that although each
- * physical function is connected to a separate devlink instance, the port
- * will still be numbered according to the physical function ID.
+ * Create and register a devlink_port for this VSI.
*
* Return: zero on success or an error code on failure.
*/
-int ice_devlink_create_port(struct ice_pf *pf)
+int ice_devlink_create_port(struct ice_vsi *vsi)
{
- struct devlink *devlink = priv_to_devlink(pf);
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct device *dev = ice_pf_to_dev(pf);
struct devlink_port_attrs attrs = {};
+ struct ice_port_info *pi;
+ struct devlink *devlink;
+ struct device *dev;
+ struct ice_pf *pf;
int err;
- if (!vsi) {
- dev_err(dev, "%s: unable to find main VSI\n", __func__);
- return -EIO;
- }
+ /* Currently we only create devlink_port instances for PF VSIs */
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ pf = vsi->back;
+ devlink = priv_to_devlink(pf);
+ dev = ice_pf_to_dev(pf);
+ pi = pf->hw.port_info;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pf->hw.pf_id;
- devlink_port_attrs_set(&pf->devlink_port, &attrs);
- err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id);
+ attrs.phys.port_number = pi->lport;
+ devlink_port_attrs_set(&vsi->devlink_port, &attrs);
+ err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx);
if (err) {
dev_err(dev, "devlink_port_register failed: %d\n", err);
return err;
}
+ vsi->devlink_port_registered = true;
+
return 0;
}
/**
- * ice_devlink_destroy_port - Destroy the devlink_port for this PF
- * @pf: the PF to cleanup
+ * ice_devlink_destroy_port - Destroy the devlink_port for this VSI
+ * @vsi: the VSI to cleanup
*
- * Unregisters the devlink_port structure associated with this PF.
+ * Unregisters the devlink_port structure associated with this VSI.
*/
-void ice_devlink_destroy_port(struct ice_pf *pf)
+void ice_devlink_destroy_port(struct ice_vsi *vsi)
{
- devlink_port_type_clear(&pf->devlink_port);
- devlink_port_unregister(&pf->devlink_port);
+ if (!vsi->devlink_port_registered)
+ return;
+
+ devlink_port_type_clear(&vsi->devlink_port);
+ devlink_port_unregister(&vsi->devlink_port);
+
+ vsi->devlink_port_registered = false;
}
/**
* ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
* @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
@@ -413,6 +438,7 @@ void ice_devlink_destroy_port(struct ice_pf *pf)
* error code on failure.
*/
static int ice_devlink_nvm_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
@@ -456,6 +482,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
/**
* ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
* @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
@@ -468,6 +495,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
*/
static int
ice_devlink_devcaps_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index 6e806a08dc23..e07e74426bde 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -8,8 +8,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev);
int ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
-int ice_devlink_create_port(struct ice_pf *pf);
-void ice_devlink_destroy_port(struct ice_pf *pf);
+int ice_devlink_create_port(struct ice_vsi *vsi);
+void ice_devlink_destroy_port(struct ice_vsi *vsi);
void ice_devlink_init_regions(struct ice_pf *pf);
void ice_devlink_destroy_regions(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index d7430ce6af26..2d27f66ac853 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1268,8 +1268,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
bool is_tun = tun == ICE_FD_HW_SEG_TUN;
int err;
- if (is_tun && !ice_get_open_tunnel_port(&pf->hw, TNL_ALL,
- &port_num))
+ if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num))
continue;
err = ice_fdir_write_fltr(pf, input, add, is_tun);
if (err)
@@ -1647,8 +1646,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
}
/* return error if not an update and no available filters */
- fltrs_needed = ice_get_open_tunnel_port(hw, TNL_ALL, &tunnel_port) ?
- 2 : 1;
+ fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1;
if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 6834df14332f..59c0c6a0f8c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -556,7 +556,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt;
} else {
- if (!ice_get_open_tunnel_port(hw, TNL_ALL, &tnl_port))
+ if (!ice_get_open_tunnel_port(hw, &tnl_port))
return ICE_ERR_DOES_NOT_EXIST;
if (!ice_fdir_pkt[idx].tun_pkt)
return ICE_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index b17ae3e20157..9095b4d274ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -489,8 +489,6 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
if ((label_name[len] - '0') == hw->pf_id) {
hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
hw->tnl.tbl[hw->tnl.count].valid = false;
- hw->tnl.tbl[hw->tnl.count].in_use = false;
- hw->tnl.tbl[hw->tnl.count].marked = false;
hw->tnl.tbl[hw->tnl.count].boost_addr = val;
hw->tnl.tbl[hw->tnl.count].port = 0;
hw->tnl.count++;
@@ -505,8 +503,11 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
for (i = 0; i < hw->tnl.count; i++) {
ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
&hw->tnl.tbl[i].boost_entry);
- if (hw->tnl.tbl[i].boost_entry)
+ if (hw->tnl.tbl[i].boost_entry) {
hw->tnl.tbl[i].valid = true;
+ if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
+ hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
+ }
}
}
@@ -1626,104 +1627,59 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
}
/**
- * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
* @hw: pointer to the HW structure
- * @port: port to search for
- * @index: optionally returns index
- *
- * Returns whether a port is already in use as a tunnel, and optionally its
- * index
+ * @port: returns open port
*/
-static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
{
+ bool res = false;
u16 i;
+ mutex_lock(&hw->tnl_lock);
+
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
- if (index)
- *index = i;
- return true;
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
+ *port = hw->tnl.tbl[i].port;
+ res = true;
+ break;
}
- return false;
-}
-
-/**
- * ice_tunnel_port_in_use
- * @hw: pointer to the HW structure
- * @port: port to search for
- * @index: optionally returns index
- *
- * Returns whether a port is already in use as a tunnel, and optionally its
- * index
- */
-bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
-{
- bool res;
-
- mutex_lock(&hw->tnl_lock);
- res = ice_tunnel_port_in_use_hlpr(hw, port, index);
mutex_unlock(&hw->tnl_lock);
return res;
}
/**
- * ice_find_free_tunnel_entry
+ * ice_tunnel_idx_to_entry - convert linear index to the sparse one
* @hw: pointer to the HW structure
- * @type: tunnel type
- * @index: optionally returns index
+ * @type: type of tunnel
+ * @idx: linear index
*
- * Returns whether there is a free tunnel entry, and optionally its index
+ * Stack assumes we have 2 linear tables with indexes [0, count_valid),
+ * but really the port table may be sprase, and types are mixed, so convert
+ * the stack index into the device index.
*/
-static bool
-ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *index)
+static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 idx)
{
u16 i;
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
- hw->tnl.tbl[i].type == type) {
- if (index)
- *index = i;
- return true;
- }
+ if (hw->tnl.tbl[i].valid &&
+ hw->tnl.tbl[i].type == type &&
+ idx--)
+ return i;
- return false;
-}
-
-/**
- * ice_get_open_tunnel_port - retrieve an open tunnel port
- * @hw: pointer to the HW structure
- * @type: tunnel type (TNL_ALL will return any open port)
- * @port: returns open port
- */
-bool
-ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *port)
-{
- bool res = false;
- u16 i;
-
- mutex_lock(&hw->tnl_lock);
-
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
- *port = hw->tnl.tbl[i].port;
- res = true;
- break;
- }
-
- mutex_unlock(&hw->tnl_lock);
-
- return res;
+ WARN_ON_ONCE(1);
+ return 0;
}
/**
* ice_create_tunnel
* @hw: pointer to the HW structure
+ * @index: device table entry
* @type: type of tunnel
* @port: port of tunnel to create
*
@@ -1731,27 +1687,16 @@ ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
* creating a package buffer with the tunnel info and issuing an update package
* command.
*/
-enum ice_status
-ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
+static enum ice_status
+ice_create_tunnel(struct ice_hw *hw, u16 index,
+ enum ice_tunnel_type type, u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
- u16 index;
mutex_lock(&hw->tnl_lock);
- if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
- hw->tnl.tbl[index].ref++;
- status = 0;
- goto ice_create_tunnel_end;
- }
-
- if (!ice_find_free_tunnel_entry(hw, type, &index)) {
- status = ICE_ERR_OUT_OF_RANGE;
- goto ice_create_tunnel_end;
- }
-
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
status = ICE_ERR_NO_MEMORY;
@@ -1790,11 +1735,8 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
- if (!status) {
+ if (!status)
hw->tnl.tbl[index].port = port;
- hw->tnl.tbl[index].in_use = true;
- hw->tnl.tbl[index].ref = 1;
- }
ice_create_tunnel_err:
ice_pkg_buf_free(hw, bld);
@@ -1808,46 +1750,31 @@ ice_create_tunnel_end:
/**
* ice_destroy_tunnel
* @hw: pointer to the HW structure
+ * @index: device table entry
+ * @type: type of tunnel
* @port: port of tunnel to destroy (ignored if the all parameter is true)
- * @all: flag that states to destroy all tunnels
*
* Destroys a tunnel or all tunnels by creating an update package buffer
* targeting the specific updates requested and then performing an update
* package.
*/
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+static enum ice_status
+ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
+ u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
- u16 count = 0;
- u16 index;
- u16 size;
- u16 i;
mutex_lock(&hw->tnl_lock);
- if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
- if (hw->tnl.tbl[index].ref > 1) {
- hw->tnl.tbl[index].ref--;
- status = 0;
- goto ice_destroy_tunnel_end;
- }
-
- /* determine count */
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (all || hw->tnl.tbl[i].port == port))
- count++;
-
- if (!count) {
- status = ICE_ERR_PARAM;
+ if (WARN_ON(!hw->tnl.tbl[index].valid ||
+ hw->tnl.tbl[index].type != type ||
+ hw->tnl.tbl[index].port != port)) {
+ status = ICE_ERR_OUT_OF_RANGE;
goto ice_destroy_tunnel_end;
}
- /* size of section - there is at least one entry */
- size = struct_size(sect_rx, tcam, count);
-
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
status = ICE_ERR_NO_MEMORY;
@@ -1859,13 +1786,13 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
goto ice_destroy_tunnel_err;
sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
- size);
+ struct_size(sect_rx, tcam, 1));
if (!sect_rx)
goto ice_destroy_tunnel_err;
sect_rx->count = cpu_to_le16(1);
sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
- size);
+ struct_size(sect_tx, tcam, 1));
if (!sect_tx)
goto ice_destroy_tunnel_err;
sect_tx->count = cpu_to_le16(1);
@@ -1873,26 +1800,14 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
/* copy original boost entry to update package buffer, one copy to Rx
* section, another copy to the Tx section
*/
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (all || hw->tnl.tbl[i].port == port)) {
- memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry,
- sizeof(*sect_rx->tcam));
- memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry,
- sizeof(*sect_tx->tcam));
- hw->tnl.tbl[i].marked = true;
- }
+ memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_rx->tcam));
+ memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_tx->tcam));
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
if (!status)
- for (i = 0; i < hw->tnl.count &&
- i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].marked) {
- hw->tnl.tbl[i].ref = 0;
- hw->tnl.tbl[i].port = 0;
- hw->tnl.tbl[i].in_use = false;
- hw->tnl.tbl[i].marked = false;
- }
+ hw->tnl.tbl[index].port = 0;
ice_destroy_tunnel_err:
ice_pkg_buf_free(hw, bld);
@@ -1903,6 +1818,52 @@ ice_destroy_tunnel_end:
return status;
}
+int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ enum ice_status status;
+ u16 index;
+
+ tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
+ index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
+
+ status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
+ if (status) {
+ netdev_err(netdev, "Error adding UDP tunnel - %s\n",
+ ice_stat_str(status));
+ return -EIO;
+ }
+
+ udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
+ return 0;
+}
+
+int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ enum ice_status status;
+
+ tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
+
+ status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
+ ntohs(ti->port));
+ if (status) {
+ netdev_err(netdev, "Error removing UDP tunnel - %s\n",
+ ice_stat_str(status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
/* PTG Management */
/**
@@ -4915,7 +4876,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
if (last_profile) {
/* If there are no profiles left for this VSIG,
- * then simply remove the the VSIG.
+ * then simply remove the VSIG.
*/
status = ice_rem_vsig(hw, blk, vsig, &chg);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 568ea519af51..20deddb807c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -19,12 +19,11 @@
#define ICE_PKG_CNT 4
bool
-ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *port);
-enum ice_status
-ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
-bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
+int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti);
+int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti);
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index c1c99a267a98..24063c1351b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -298,6 +298,7 @@ struct ice_pkg_enum {
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
+ __TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
@@ -311,11 +312,8 @@ struct ice_tunnel_entry {
enum ice_tunnel_type type;
u16 boost_addr;
u16 port;
- u16 ref;
struct ice_boost_tcam_entry *boost_entry;
u8 valid;
- u8 in_use;
- u8 marked;
};
#define ICE_TUNNEL_MAX_ENTRIES 16
@@ -323,6 +321,7 @@ struct ice_tunnel_entry {
struct ice_tunnel_table {
struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
u16 count;
+ u16 valid_count[__TNL_TYPE_CNT];
};
struct ice_pkg_es {
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fe677621dd51..eadc85aee389 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -99,6 +99,54 @@ static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
+static const u32 ice_ipv4_ofos_no_l4[] = {
+ 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
+static const u32 ice_ipv4_il_no_l4[] = {
+ 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
+ 0x00000008, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
+static const u32 ice_ipv6_ofos_no_l4[] = {
+ 0x00000000, 0x00000000, 0x43000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
+static const u32 ice_ipv6_il_no_l4[] = {
+ 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
+ 0x00000430, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* UDP Packet types for non-tunneled packets or tunneled
* packets with inner UDP.
*/
@@ -250,11 +298,23 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
hdrs = prof->segs[i].hdrs;
- if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
+ src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 :
+ (const unsigned long *)ice_ipv4_il_no_l4;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
(const unsigned long *)ice_ptypes_ipv4_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
+ src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 :
+ (const unsigned long *)ice_ipv6_il_no_l4;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
(const unsigned long *)ice_ptypes_ipv6_il;
@@ -385,7 +445,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* ice_flow_xtract_raws - Create extract sequence entries for raw bytes
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
- * @seg: index of packet segment whose raw fields are to be be extracted
+ * @seg: index of packet segment whose raw fields are to be extracted
*/
static enum ice_status
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
@@ -999,7 +1059,7 @@ enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
*
* This helper function stores information of a field being matched, including
* the type of the field and the locations of the value to match, the mask, and
- * and the upper-bound value in the start of the input buffer for a flow entry.
+ * the upper-bound value in the start of the input buffer for a flow entry.
* This function should only be used for fixed-size data structures.
*
* This function also opportunistically determines the protocol headers to be
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 3913da2116d2..829f90b1e998 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -194,8 +194,8 @@ struct ice_flow_entry {
u16 entry_sz;
};
-#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
-#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
+#define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e)
+#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(uintptr_t)(h))
struct ice_flow_prof {
struct list_head l_entry;
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 8968fdd4816b..8f81b95e679c 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -43,6 +43,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
enum ice_status status;
u8 *package_data;
+ dev_dbg(dev, "Sending PLDM record package data to firmware\n");
+
package_data = kmemdup(data, length, GFP_KERNEL);
if (!package_data)
return -ENOMEM;
@@ -229,6 +231,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
comp_tbl->cvs_len = component->version_len;
memcpy(comp_tbl->cvs, component->version_string, component->version_len);
+ dev_dbg(dev, "Sending component table to firmware:\n");
+
status = ice_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
transfer_flag, &comp_response,
&comp_response_code, NULL);
@@ -279,11 +283,14 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
memset(&event, 0, sizeof(event));
+ dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
+ block_size, module, offset);
+
status = ice_aq_update_nvm(hw, module, offset, block_size, block,
last_cmd, 0, NULL);
if (status) {
- dev_err(dev, "Failed to program flash module 0x%02x at offset %u, err %s aq_err %s\n",
- module, offset, ice_stat_str(status),
+ dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n",
+ module, block_size, offset, ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
@@ -297,8 +304,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15 * HZ, &event);
if (err) {
- dev_err(dev, "Timed out waiting for firmware write completion for module 0x%02x, err %d\n",
- module, err);
+ dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n",
+ module, block_size, offset, err);
NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
return -EIO;
}
@@ -324,8 +331,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
}
if (completion_retval) {
- dev_err(dev, "Firmware failed to program flash module 0x%02x at offset %u, completion err %s\n",
- module, offset,
+ dev_err(dev, "Firmware failed to flash module 0x%02x with block of size %u at offset %u, err %s\n",
+ module, block_size, offset,
ice_aq_str((enum ice_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
return -EIO;
@@ -356,12 +363,15 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
const u8 *image, u32 length,
struct netlink_ext_ack *extack)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct devlink *devlink;
u32 offset = 0;
bool last_cmd;
u8 *block;
int err;
+ dev_dbg(dev, "Beginning write of flash component '%s', module 0x%02x\n", component, module);
+
devlink = priv_to_devlink(pf);
devlink_flash_update_status_notify(devlink, "Flashing",
@@ -394,6 +404,8 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
component, offset, length);
} while (!last_cmd);
+ dev_dbg(dev, "Completed write of flash component '%s', module 0x%02x\n", component, module);
+
if (err)
devlink_flash_update_status_notify(devlink, "Flashing failed",
component, length, length);
@@ -431,6 +443,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
enum ice_status status;
int err;
+ dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
+
memset(&event, 0, sizeof(event));
devlink = priv_to_devlink(pf);
@@ -476,6 +490,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink;
}
+ dev_dbg(dev, "Completed erase of flash component '%s', module 0x%02x\n", component, module);
+
out_notify_devlink:
if (err)
devlink_flash_update_status_notify(devlink, "Erasing failed",
@@ -614,14 +630,9 @@ static int ice_finalize_update(struct pldmfw *context)
struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf;
- int err;
/* Finally, notify firmware to activate the written NVM banks */
- err = ice_switch_flash_banks(pf, priv->activate_flags, extack);
- if (err)
- return err;
-
- return 0;
+ return ice_switch_flash_banks(pf, priv->activate_flags, extack);
}
static const struct pldmfw_ops ice_fwu_ops = {
@@ -636,6 +647,7 @@ static const struct pldmfw_ops ice_fwu_ops = {
* ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device
* @pf: private device driver structure
* @fw: firmware object pointing to the relevant firmware file
+ * @preservation: preservation level to request from firmware
* @extack: netlink extended ACK structure
*
* Parse the data for a given firmware file, verifying that it is a valid PLDM
@@ -649,7 +661,7 @@ static const struct pldmfw_ops ice_fwu_ops = {
* Returns: zero on success or a negative error code on failure.
*/
int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- struct netlink_ext_ack *extack)
+ u8 preservation, struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
@@ -657,13 +669,24 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
enum ice_status status;
int err;
+ switch (preservation) {
+ case ICE_AQC_NVM_PRESERVE_ALL:
+ case ICE_AQC_NVM_PRESERVE_SELECTED:
+ case ICE_AQC_NVM_NO_PRESERVATION:
+ case ICE_AQC_NVM_FACTORY_DEFAULT:
+ break;
+ default:
+ WARN(1, "Unexpected preservation level request %u", preservation);
+ return -EINVAL;
+ }
+
memset(&priv, 0, sizeof(priv));
priv.context.ops = &ice_fwu_ops;
priv.context.dev = dev;
priv.extack = extack;
priv.pf = pf;
- priv.activate_flags = ICE_AQC_NVM_PRESERVE_ALL;
+ priv.activate_flags = preservation;
status = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
index 79472cc618b4..c6390f6851ff 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.h
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -5,7 +5,7 @@
#define _ICE_FW_UPDATE_H_
int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- struct netlink_ext_ack *extack);
+ u8 preservation, struct netlink_ext_ack *extack);
int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index ebbb8f54871c..3df67486d42d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,6 +7,7 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
+#include "ice_devlink.h"
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
@@ -1755,7 +1756,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
return ret;
for (i = 0; i < vsi->num_xdp_txq; i++)
- vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);
+ vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
return ret;
}
@@ -2616,8 +2617,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
* PF that is running the work queue items currently. This is done to
* avoid check_flush_dependency() warning on this wq
*/
- if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
+ if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
unregister_netdev(vsi->netdev);
+ ice_devlink_destroy_port(vsi);
+ }
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 54a7f55eb8c1..2dea4d0e9415 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -486,7 +486,6 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
struct ice_hw *hw = &pf->hw;
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
- WARN_ON(in_interrupt());
ice_prepare_for_reset(pf);
@@ -1057,7 +1056,9 @@ struct ice_aq_task {
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
struct ice_rq_event_info *event)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task *task;
+ unsigned long start;
long ret;
int err;
@@ -1074,6 +1075,8 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
hlist_add_head(&task->entry, &pf->aq_wait_list);
spin_unlock_bh(&pf->aq_wait_lock);
+ start = jiffies;
+
ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
timeout);
switch (task->state) {
@@ -1092,6 +1095,11 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
break;
}
+ dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
+ jiffies_to_msecs(jiffies - start),
+ jiffies_to_msecs(timeout),
+ opcode);
+
spin_lock_bh(&pf->aq_wait_lock);
hlist_del(&task->entry);
spin_unlock_bh(&pf->aq_wait_lock);
@@ -2273,7 +2281,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
return 0;
@@ -2417,7 +2425,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
int i, v_idx;
/* q_vectors are freed in reset path so there's no point in detaching
- * rings; in case of rebuild being triggered not from reset reset bits
+ * rings; in case of rebuild being triggered not from reset bits
* in pf->state won't be set, so additionally check first q_vector
* against NULL
*/
@@ -2517,13 +2525,13 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running)
ret = ice_up(vsi);
- if (!ret && prog && vsi->xsk_umems) {
+ if (!ret && prog && vsi->xsk_pools) {
int i;
ice_for_each_rxq(vsi, i) {
struct ice_ring *rx_ring = vsi->rx_rings[i];
- if (rx_ring->xsk_umem)
+ if (rx_ring->xsk_pool)
napi_schedule(&rx_ring->q_vector->napi);
}
}
@@ -2549,8 +2557,8 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
- case XDP_SETUP_XSK_UMEM:
- return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
@@ -2873,6 +2881,7 @@ static void ice_set_ops(struct net_device *netdev)
}
netdev->netdev_ops = &ice_netdev_ops;
+ netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
ice_set_ethtool_ops(netdev);
}
@@ -2953,7 +2962,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
u8 mac_addr[ETH_ALEN];
int err;
- err = ice_devlink_create_port(pf);
+ err = ice_devlink_create_port(vsi);
if (err)
return err;
@@ -2994,7 +3003,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
if (err)
goto err_free_netdev;
- devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
+ devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
netif_carrier_off(vsi->netdev);
@@ -3007,7 +3016,7 @@ err_free_netdev:
free_netdev(vsi->netdev);
vsi->netdev = NULL;
err_destroy_devlink_port:
- ice_devlink_destroy_port(pf);
+ ice_devlink_destroy_port(vsi);
return err;
}
@@ -3971,7 +3980,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
struct device *dev = &pdev->dev;
struct ice_pf *pf;
struct ice_hw *hw;
- int err;
+ int i, err;
/* this driver uses devres, see
* Documentation/driver-api/driver-model/devres.rst
@@ -4066,11 +4075,37 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_devlink_init_regions(pf);
+ pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
+ pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
+ pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+ pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
+ i = 0;
+ if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
+ pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.tnl.valid_count[TNL_VXLAN];
+ pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ UDP_TUNNEL_TYPE_VXLAN;
+ i++;
+ }
+ if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
+ pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.tnl.valid_count[TNL_GENEVE];
+ pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ UDP_TUNNEL_TYPE_GENEVE;
+ i++;
+ }
+
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
}
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(&pf->pdev->dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
+ }
pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
GFP_KERNEL);
@@ -4216,7 +4251,6 @@ probe_done:
err_send_version_unroll:
ice_vsi_release_all(pf);
err_alloc_sw_unroll:
- ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
devm_kfree(dev, pf->first_sw);
@@ -4331,7 +4365,6 @@ static void ice_remove(struct pci_dev *pdev)
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
- ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
@@ -6569,70 +6602,6 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
- * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- */
-static void
-ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
- enum ice_tunnel_type tnl_type;
- u16 port = ntohs(ti->port);
- enum ice_status status;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- tnl_type = TNL_VXLAN;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- tnl_type = TNL_GENEVE;
- break;
- default:
- netdev_err(netdev, "Unknown tunnel type\n");
- return;
- }
-
- status = ice_create_tunnel(&pf->hw, tnl_type, port);
- if (status == ICE_ERR_OUT_OF_RANGE)
- netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
- port);
- else if (status)
- netdev_err(netdev, "Error adding UDP tunnel - %s\n",
- ice_stat_str(status));
-}
-
-/**
- * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- */
-static void
-ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- enum ice_status status;
- bool retval;
-
- retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
- if (!retval) {
- netdev_info(netdev, "port %d not found in UDP tunnels list\n",
- port);
- return;
- }
-
- status = ice_destroy_tunnel(&pf->hw, port, false);
- if (status)
- netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
- port);
-}
-
-/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -6824,6 +6793,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
- .ndo_udp_tunnel_add = ice_udp_tunnel_add,
- .ndo_udp_tunnel_del = ice_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 9d0d6b0025cf..eae75260fe20 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -145,7 +145,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
{
u16 i;
- if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
ice_xsk_clean_xdp_ring(tx_ring);
goto tx_skip_free;
}
@@ -375,7 +375,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
@@ -919,10 +919,7 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
@@ -964,10 +961,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif /* L1_CACHE_BYTES */
+ net_prefetch(xdp->data);
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
@@ -1616,7 +1610,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
ice_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
ice_clean_tx_irq_zc(ring, budget) :
ice_clean_tx_irq(ring, budget);
@@ -1646,7 +1640,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* comparison in the irq context instead of many inside the
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
- cleaned = ring->xsk_umem ?
+ cleaned = ring->xsk_pool ?
ice_clean_rx_irq_zc(ring, budget_per_ring) :
ice_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 51b4df7a59d2..ff1a1cbd078e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -43,7 +43,7 @@
/**
* ice_compute_pad - compute the padding
- * rx_buf_len: buffer length
+ * @rx_buf_len: buffer length
*
* Figure out the size of half page based on given buffer length and
* then subtract the skb_shared_info followed by subtraction of the
@@ -295,7 +295,7 @@ struct ice_ring {
struct rcu_head rcu; /* to avoid race on free */
struct bpf_prog *xdp_prog;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
/* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 4cdccfadf274..2226a291a394 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -676,6 +676,9 @@ struct ice_hw {
struct mutex tnl_lock;
struct ice_tunnel_table tnl;
+ struct udp_tunnel_nic_shared udp_tunnel_shared;
+ struct udp_tunnel_nic_info udp_tunnel_nic;
+
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 71497776ac62..ec7f6c64132e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -871,7 +871,7 @@ static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
* If there are not enough resources available, return an error. This should
* always be caught by ice_set_per_vf_res().
*
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
+ * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
* in the PF's space available for SR-IOV.
*/
static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 20ac5fca68c6..797886524054 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -236,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
err = ice_setup_rx_ctx(rx_ring);
@@ -260,21 +260,21 @@ free_buf:
}
/**
- * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket
- * @vsi: VSI to allocate the UMEM on
+ * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
+ * @vsi: VSI to allocate the buffer pool on
*
* Returns 0 on success, negative on error
*/
-static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
+static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
{
- if (vsi->xsk_umems)
+ if (vsi->xsk_pools)
return 0;
- vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
+ vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
GFP_KERNEL);
- if (!vsi->xsk_umems) {
- vsi->num_xsk_umems = 0;
+ if (!vsi->xsk_pools) {
+ vsi->num_xsk_pools = 0;
return -ENOMEM;
}
@@ -282,73 +282,73 @@ static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
}
/**
- * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
+ * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
* @vsi: VSI from which the VSI will be removed
- * @qid: Ring/qid associated with the UMEM
+ * @qid: Ring/qid associated with the buffer pool
*/
-static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
+static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
{
- vsi->xsk_umems[qid] = NULL;
- vsi->num_xsk_umems_used--;
+ vsi->xsk_pools[qid] = NULL;
+ vsi->num_xsk_pools_used--;
- if (vsi->num_xsk_umems_used == 0) {
- kfree(vsi->xsk_umems);
- vsi->xsk_umems = NULL;
- vsi->num_xsk_umems = 0;
+ if (vsi->num_xsk_pools_used == 0) {
+ kfree(vsi->xsk_pools);
+ vsi->xsk_pools = NULL;
+ vsi->num_xsk_pools = 0;
}
}
/**
- * ice_xsk_umem_disable - disable a UMEM region
+ * ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
-static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
+static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{
- if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
- !vsi->xsk_umems[qid])
+ if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools ||
+ !vsi->xsk_pools[qid])
return -EINVAL;
- xsk_buff_dma_unmap(vsi->xsk_umems[qid], ICE_RX_DMA_ATTR);
- ice_xsk_remove_umem(vsi, qid);
+ xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
+ ice_xsk_remove_pool(vsi, qid);
return 0;
}
/**
- * ice_xsk_umem_enable - enable a UMEM region
+ * ice_xsk_pool_enable - enable a buffer pool region
* @vsi: Current VSI
- * @umem: pointer to a requested UMEM region
+ * @pool: pointer to a requested buffer pool region
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
static int
-ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
int err;
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
- if (!vsi->num_xsk_umems)
- vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
- if (qid >= vsi->num_xsk_umems)
+ if (!vsi->num_xsk_pools)
+ vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (qid >= vsi->num_xsk_pools)
return -EINVAL;
- err = ice_xsk_alloc_umems(vsi);
+ err = ice_xsk_alloc_pools(vsi);
if (err)
return err;
- if (vsi->xsk_umems && vsi->xsk_umems[qid])
+ if (vsi->xsk_pools && vsi->xsk_pools[qid])
return -EBUSY;
- vsi->xsk_umems[qid] = umem;
- vsi->num_xsk_umems_used++;
+ vsi->xsk_pools[qid] = pool;
+ vsi->num_xsk_pools_used++;
- err = xsk_buff_dma_map(vsi->xsk_umems[qid], ice_pf_to_dev(vsi->back),
+ err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
ICE_RX_DMA_ATTR);
if (err)
return err;
@@ -357,17 +357,17 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
}
/**
- * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state
+ * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
* @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, NULL to disable
+ * @pool: buffer pool to enable/associate to a ring, NULL to disable
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
- bool if_running, umem_present = !!umem;
- int ret = 0, umem_failure = 0;
+ bool if_running, pool_present = !!pool;
+ int ret = 0, pool_failure = 0;
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
@@ -375,26 +375,26 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
- goto xsk_umem_if_up;
+ goto xsk_pool_if_up;
}
}
- umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) :
- ice_xsk_umem_disable(vsi, qid);
+ pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
+ ice_xsk_pool_disable(vsi, qid);
-xsk_umem_if_up:
+xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
- if (!ret && umem_present)
+ if (!ret && pool_present)
napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
- if (umem_failure) {
- netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
- umem_present ? "en" : "dis", umem_failure);
- return umem_failure;
+ if (pool_failure) {
+ netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
+ pool_present ? "en" : "dis", pool_failure);
+ return pool_failure;
}
return ret;
@@ -425,7 +425,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
rx_buf = &rx_ring->rx_buf[ntu];
do {
- rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!rx_buf->xdp) {
ret = true;
break;
@@ -595,7 +595,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
+ xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
if (xdp_res) {
@@ -645,11 +645,11 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -682,11 +682,11 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);
tx_buf->bytecount = desc.len;
@@ -703,7 +703,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(xdp_ring->xsk_pool);
}
return budget > 0 && work_done;
@@ -777,10 +777,10 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
xdp_ring->next_to_clean = ntc;
if (xsk_frames)
- xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
- xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
@@ -814,7 +814,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
if (queue_id >= vsi->num_txq)
return -ENXIO;
- if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ if (!vsi->xdp_rings[queue_id]->xsk_pool)
return -ENXIO;
ring = vsi->xdp_rings[queue_id];
@@ -833,20 +833,20 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
}
/**
- * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached
+ * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
* @vsi: VSI to be checked
*
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buff pool attached
*/
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{
int i;
- if (!vsi->xsk_umems)
+ if (!vsi->xsk_pools)
return false;
- for (i = 0; i < vsi->num_xsk_umems; i++) {
- if (vsi->xsk_umems[i])
+ for (i = 0; i < vsi->num_xsk_pools; i++) {
+ if (vsi->xsk_pools[i])
return true;
}
@@ -854,7 +854,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
}
/**
- * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring
+ * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
* @rx_ring: ring to be cleaned
*/
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
@@ -872,7 +872,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
}
/**
- * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues
+ * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
* @xdp_ring: XDP_Tx ring
*/
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
@@ -896,5 +896,5 @@ void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index fc1a06b4df36..fad783690134 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -9,7 +9,8 @@
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
+ u16 qid);
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
@@ -19,8 +20,8 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
#else
static inline int
-ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
- struct xdp_umem __always_unused *umem,
+ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
+ struct xsk_buff_pool __always_unused *pool,
u16 __always_unused qid)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a32391e82762..50863fd87d53 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2554,7 +2554,7 @@ out:
/**
* __igb_access_emi_reg - Read/write EMI register
* @hw: pointer to the HW structure
- * @addr: EMI address to program
+ * @address: EMI address to program
* @data: pointer to value to read/write from/to the EMI address
* @read: boolean flag to indicate read or write
**/
@@ -2590,7 +2590,7 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
* igb_set_eee_i350 - Enable/disable EEE support
* @hw: pointer to the HW structure
* @adv1G: boolean flag enabling 1G EEE advertisement
- * @adv100m: boolean flag enabling 100M EEE advertisement
+ * @adv100M: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE based on setting in dev_spec structure.
*
@@ -2646,7 +2646,7 @@ out:
* igb_set_eee_i354 - Enable/disable EEE support
* @hw: pointer to the HW structure
* @adv1G: boolean flag enabling 1G EEE advertisement
- * @adv100m: boolean flag enabling 100M EEE advertisement
+ * @adv100M: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE legacy mode based on setting in dev_spec structure.
*
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index c393cb2c0f16..9265901455cd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -357,13 +357,14 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
/**
* igb_read_invm_i210 - Read invm wrapper function for I210/I211
* @hw: pointer to the HW structure
- * @words: number of words to read
+ * @offset: offset to read from
+ * @words: number of words to read (unused)
* @data: pointer to the data read
*
* Wrapper function to return data formerly found in the NVM.
**/
static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
- u16 words __always_unused, u16 *data)
+ u16 __always_unused words, u16 *data)
{
s32 ret_val = 0;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 3254737c07a3..fd8eb2f9ab9d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -166,6 +166,7 @@ static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
* @vlan: VLAN id to add or remove
* @vind: VMDq output index that maps queue to VLAN id
* @vlan_on: if true add filter, if false remove
+ * @vlvf_bypass: skip VLVF if no match is found
*
* Sets or clears a bit in the VLAN filter table array based on VLAN id
* and if we are adding or removing the filter
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 46debd991bfe..33cceb77e960 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -9,6 +9,7 @@
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
+ * @unlock: skip locking or not
*
* returns SUCCESS if it successfully read message from buffer
**/
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2f015b60a995..0286d2fceee4 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -19,6 +19,8 @@
#include <linux/pci.h>
#include <linux/mdio.h>
+#include <net/xdp.h>
+
struct igb_adapter;
#define E1000_PCS_CFG_IGN_SD 1
@@ -79,6 +81,12 @@ struct igb_adapter;
#define IGB_I210_RX_LATENCY_100 2213
#define IGB_I210_RX_LATENCY_1000 448
+/* XDP */
+#define IGB_XDP_PASS 0
+#define IGB_XDP_CONSUMED BIT(0)
+#define IGB_XDP_TX BIT(1)
+#define IGB_XDP_REDIR BIT(2)
+
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -132,17 +140,62 @@ struct vf_mac_filter {
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
+#define IGB_RXBUFFER_1536 1536
#define IGB_RXBUFFER_2048 2048
#define IGB_RXBUFFER_3072 3072
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_TS_HDR_LEN 16
-#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the 3K
+ * buffers.
+ */
#if (PAGE_SIZE < 8192)
-#define IGB_MAX_FRAME_BUILD_SKB \
- (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)
+#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN)
+#define IGB_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048))
+
+static inline int igb_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int igb_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (IGB_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = IGB_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return igb_compute_pad(rx_buf_len);
+}
+
+#define IGB_SKB_PAD igb_skb_pad()
#else
-#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)
+#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -194,13 +247,22 @@ enum igb_tx_flags {
#define IGB_SFF_ADDRESSING_MODE 0x4
#define IGB_SFF_8472_UNSUP 0x00
+enum igb_tx_buf_type {
+ IGB_TYPE_SKB = 0,
+ IGB_TYPE_XDP,
+};
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct igb_tx_buffer {
union e1000_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
- struct sk_buff *skb;
+ enum igb_tx_buf_type type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
@@ -248,6 +310,7 @@ struct igb_ring_container {
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
+ struct bpf_prog *xdp_prog;
struct device *dev; /* device pointer for dma mapping */
union { /* array of buffer info structs */
struct igb_tx_buffer *tx_buffer_info;
@@ -288,6 +351,7 @@ struct igb_ring {
struct u64_stats_sync rx_syncp;
};
};
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
@@ -339,7 +403,7 @@ static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
return IGB_RXBUFFER_3072;
if (ring_uses_build_skb(ring))
- return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
+ return IGB_MAX_FRAME_BUILD_SKB;
#endif
return IGB_RXBUFFER_2048;
}
@@ -467,6 +531,7 @@ struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
+ struct bpf_prog *xdp_prog;
unsigned long state;
unsigned int flags;
@@ -643,6 +708,9 @@ enum igb_boards {
extern char igb_driver_name[];
+int igb_xmit_xdp_ring(struct igb_adapter *adapter,
+ struct igb_ring *ring,
+ struct xdp_frame *xdpf);
int igb_open(struct net_device *netdev);
int igb_close(struct net_device *netdev);
int igb_up(struct igb_adapter *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 6e8231c1ddf0..28baf203459a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -961,6 +961,10 @@ static int igb_set_ringparam(struct net_device *netdev,
memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct igb_ring));
+ /* Clear copied XDP RX-queue info */
+ memset(&temp_ring[i].xdp_rxq, 0,
+ sizeof(temp_ring[i].xdp_rxq));
+
temp_ring[i].count = new_rx_count;
err = igb_setup_rx_resources(&temp_ring[i]);
if (err) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d9c3a6b169f9..5fc2c381da55 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -30,6 +30,8 @@
#include <linux/if_ether.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
#ifdef CONFIG_IGB_DCA
@@ -549,8 +551,7 @@ exit:
/**
* igb_get_i2c_data - Reads the I2C SDA data bit
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
+ * @data: opaque pointer to adapter struct
*
* Returns the I2C data bit value
**/
@@ -2220,7 +2221,6 @@ void igb_down(struct igb_adapter *adapter)
void igb_reinit_locked(struct igb_adapter *adapter)
{
- WARN_ON(in_interrupt());
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000);
igb_down(adapter);
@@ -2824,6 +2824,147 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+{
+ int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct igb_adapter *adapter = netdev_priv(dev);
+ bool running = netif_running(dev);
+ struct bpf_prog *old_prog;
+ bool need_reset;
+
+ /* verify igb ring attributes are sufficient for XDP */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+ if (frame_size > igb_rx_bufsz(ring))
+ return -EINVAL;
+ }
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+ need_reset = (!!prog != !!old_prog);
+
+ /* device is up and bpf is added/removed, must setup the RX queues */
+ if (need_reset && running) {
+ igb_close(dev);
+ } else {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ (void)xchg(&adapter->rx_ring[i]->xdp_prog,
+ adapter->xdp_prog);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ /* bpf is just replaced, RXQ and MTU are already setup */
+ if (!need_reset)
+ return 0;
+
+ if (running)
+ igb_open(dev);
+
+ return 0;
+}
+
+static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return igb_xdp_setup(dev, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void igb_xdp_ring_update_tail(struct igb_ring *ring)
+{
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
+static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
+{
+ unsigned int r_idx = smp_processor_id();
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ int cpu = smp_processor_id();
+ struct igb_ring *tx_ring;
+ struct netdev_queue *nq;
+ u32 ret;
+
+ if (unlikely(!xdpf))
+ return IGB_XDP_CONSUMED;
+
+ /* During program transitions its possible adapter->xdp_prog is assigned
+ * but ring has not been configured yet. In this case simply abort xmit.
+ */
+ tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ if (unlikely(!tx_ring))
+ return -ENXIO;
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
+static int igb_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct igb_ring *tx_ring;
+ struct netdev_queue *nq;
+ int drops = 0;
+ int i;
+
+ if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ /* During program transitions its possible adapter->xdp_prog is assigned
+ * but ring has not been configured yet. In this case simply abort xmit.
+ */
+ tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ if (unlikely(!tx_ring))
+ return -ENXIO;
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
+ if (err != IGB_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ __netif_tx_unlock(nq);
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ igb_xdp_ring_update_tail(tx_ring);
+
+ return n - drops;
+}
+
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
@@ -2848,6 +2989,8 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_fdb_add = igb_ndo_fdb_add,
.ndo_features_check = igb_features_check,
.ndo_setup_tc = igb_setup_tc,
+ .ndo_bpf = igb_xdp,
+ .ndo_xdp_xmit = igb_xdp_xmit,
};
/**
@@ -3388,7 +3531,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Width x1" : "unknown"), netdev->dev_addr);
}
- if ((hw->mac.type >= e1000_i210 ||
+ if ((hw->mac.type == e1000_82576 &&
+ rd32(E1000_EECD) & E1000_EECD_PRES) ||
+ (hw->mac.type >= e1000_i210 ||
igb_get_flash_presence_i210(hw))) {
ret_val = igb_read_part_string(hw, part_str,
E1000_PBANUM_LENGTH);
@@ -3868,6 +4013,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
/**
* igb_open - Called when a network interface is made active
* @netdev: network interface device structure
+ * @resuming: indicates whether we are in a resume call
*
* Returns 0 on success, negative value on failure
*
@@ -3985,6 +4131,7 @@ int igb_open(struct net_device *netdev)
/**
* igb_close - Disables a network interface
* @netdev: network interface device structure
+ * @suspending: indicates we are in a suspend call
*
* Returns 0, this is not allowed to fail
*
@@ -4178,6 +4325,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
**/
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
+ struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
struct device *dev = rx_ring->dev;
int size;
@@ -4200,6 +4348,13 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->xdp_prog = adapter->xdp_prog;
+
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->queue_index) < 0)
+ goto err;
+
return 0;
err:
@@ -4504,6 +4659,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
int reg_idx = ring->reg_idx;
u32 rxdctl = 0;
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL));
+
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4708,6 +4867,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
{
igb_clean_rx_ring(rx_ring);
+ rx_ring->xdp_prog = NULL;
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -5219,7 +5380,7 @@ static void igb_check_lvmmc(struct igb_adapter *adapter)
/**
* igb_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list containing our private info pointer
**/
static void igb_watchdog(struct timer_list *t)
{
@@ -6077,6 +6238,80 @@ dma_error:
return -1;
}
+int igb_xmit_xdp_ring(struct igb_adapter *adapter,
+ struct igb_ring *tx_ring,
+ struct xdp_frame *xdpf)
+{
+ union e1000_adv_tx_desc *tx_desc;
+ u32 len, cmd_type, olinfo_status;
+ struct igb_tx_buffer *tx_buffer;
+ dma_addr_t dma;
+ u16 i;
+
+ len = xdpf->len;
+
+ if (unlikely(!igb_desc_unused(tx_ring)))
+ return IGB_XDP_CONSUMED;
+
+ dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return IGB_XDP_CONSUMED;
+
+ /* record the location of the first descriptor for this packet */
+ tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ tx_buffer->bytecount = len;
+ tx_buffer->gso_segs = 1;
+ tx_buffer->protocol = 0;
+
+ i = tx_ring->next_to_use;
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+
+ dma_unmap_len_set(tx_buffer, len, len);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+ tx_buffer->type = IGB_TYPE_XDP;
+ tx_buffer->xdpf = xdpf;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
+ cmd_type |= len | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ olinfo_status = cpu_to_le32(len << E1000_ADVTXD_PAYLEN_SHIFT);
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ olinfo_status |= tx_ring->reg_idx << 4;
+
+ tx_desc->read.olinfo_status = olinfo_status;
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
+
+ /* set the timestamp */
+ tx_buffer->time_stamp = jiffies;
+
+ /* Avoid any potential race with xdp_xmit and cleanup */
+ smp_wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_buffer->next_to_watch = tx_desc;
+ tx_ring->next_to_use = i;
+
+ /* Make sure there is space in the ring for the next send. */
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
+ writel(i, tx_ring->tail);
+
+ return IGB_XDP_TX;
+}
+
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
@@ -6105,6 +6340,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->type = IGB_TYPE_SKB;
first->skb = skb;
first->bytecount = skb->len;
first->gso_segs = 1;
@@ -6192,8 +6428,9 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
/**
* igb_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: number of the Tx queue that hung (unused)
**/
-static void igb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6256,6 +6493,19 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ if (adapter->xdp_prog) {
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+ if (max_frame > igb_rx_bufsz(ring)) {
+ netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n");
+ return -EINVAL;
+ }
+ }
+ }
+
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
@@ -7809,7 +8059,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- napi_consume_skb(tx_buffer->skb, napi_budget);
+ if (tx_buffer->type == IGB_TYPE_SKB)
+ napi_consume_skb(tx_buffer->skb, napi_budget);
+ else
+ xdp_return_frame(tx_buffer->xdpf);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -7993,8 +8246,8 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
- if (unlikely(!pagecnt_bias)) {
- page_ref_add(page, USHRT_MAX);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
@@ -8033,23 +8286,21 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
- union e1000_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp,
+ union e1000_adv_rx_desc *rx_desc)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
+ unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
@@ -8057,24 +8308,24 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
return NULL;
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- va += IGB_TS_HDR_LEN;
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb);
+ xdp->data += IGB_TS_HDR_LEN;
size -= IGB_TS_HDR_LEN;
}
/* Determine available headroom for copy */
headlen = size;
if (headlen > IGB_RX_HDR_LEN)
- headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
+ headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
size -= headlen;
if (size) {
skb_add_rx_frag(skb, 0, rx_buffer->page,
- (va + headlen) - page_address(rx_buffer->page),
+ (xdp->data + headlen) - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
@@ -8090,32 +8341,29 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
- union e1000_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp,
+ union e1000_adv_rx_desc *rx_desc)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(IGB_SKB_PAD + size);
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(va - IGB_SKB_PAD, truesize);
+ skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, IGB_SKB_PAD);
- __skb_put(skb, size);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
@@ -8133,6 +8381,79 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
return skb;
}
+static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ int err, result = IGB_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ if (!xdp_prog)
+ goto xdp_out;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ result = igb_xdp_xmit_back(adapter, xdp);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+ if (!err)
+ result = IGB_XDP_REDIR;
+ else
+ result = IGB_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ result = IGB_XDP_CONSUMED;
+ break;
+ }
+xdp_out:
+ rcu_read_unlock();
+ return ERR_PTR(-result);
+}
+
+static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IGB_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
+static void igb_rx_buffer_flip(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+ unsigned int truesize = igb_rx_frame_truesize(rx_ring, size);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
static inline void igb_rx_checksum(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -8187,7 +8508,6 @@ static inline void igb_rx_hash(struct igb_ring *ring,
* igb_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -8229,6 +8549,10 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ /* XDP packets use error pointer so abort at this point */
+ if (IS_ERR(skb))
+ return true;
+
if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
struct net_device *netdev = rx_ring->netdev;
@@ -8287,6 +8611,11 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
+static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
+}
+
static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
const unsigned int size)
{
@@ -8330,10 +8659,20 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
+ struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
+ unsigned int xdp_xmit = 0;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0);
+#endif
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
@@ -8360,13 +8699,38 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rx_buffer = igb_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */
- if (skb)
+ if (!skb) {
+ xdp.data = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data -
+ igb_rx_offset(rx_ring);
+ xdp.data_end = xdp.data + size;
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
+#endif
+ skb = igb_run_xdp(adapter, rx_ring, &xdp);
+ }
+
+ if (IS_ERR(skb)) {
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ igb_rx_buffer_flip(rx_ring, rx_buffer, size);
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+ total_packets++;
+ total_bytes += size;
+ } else if (skb)
igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
else
skb = igb_construct_skb(rx_ring, rx_buffer,
- rx_desc, size);
+ &xdp, rx_desc);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -8406,6 +8770,15 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
+ if (xdp_xmit & IGB_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
+ igb_xdp_ring_update_tail(tx_ring);
+ }
+
u64_stats_update_begin(&rx_ring->rx_syncp);
rx_ring->rx_stats.packets += total_packets;
rx_ring->rx_stats.bytes += total_bytes;
@@ -8419,11 +8792,6 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
return total_packets;
}
-static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
-}
-
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
@@ -8460,14 +8828,16 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = igb_rx_offset(rx_ring);
- bi->pagecnt_bias = 1;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
return true;
}
/**
- * igb_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * igb_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: rx descriptor ring to allocate new receive buffers
+ * @cleaned_count: count of buffers to allocate
**/
void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
{
@@ -8536,9 +8906,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
/**
* igb_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * @netdev: pointer to netdev struct
+ * @ifr: interface structure
+ * @cmd: ioctl command to execute
**/
static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
@@ -8566,9 +8936,9 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
/**
* igb_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * @netdev: pointer to netdev struct
+ * @ifr: interface structure
+ * @cmd: ioctl command to execute
**/
static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 490368d3d03c..7cc5428c3b3d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -957,8 +957,8 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
/**
* igb_ptp_get_ts_config - get hardware time stamping config
- * @netdev:
- * @ifreq:
+ * @netdev: netdev struct
+ * @ifr: interface struct
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
@@ -1141,8 +1141,8 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
/**
* igb_ptp_set_ts_config - set hardware time stamping config
- * @netdev:
- * @ifreq:
+ * @netdev: netdev struct
+ * @ifr: interface struct
*
**/
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 19269f5d52bc..ee9f8c1dca83 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -61,7 +61,7 @@ static const struct igbvf_info *igbvf_info_tbl[] = {
/**
* igbvf_desc_unused - calculate if we have unused descriptors
- * @rx_ring: address of receive ring structure
+ * @ring: address of receive ring structure
**/
static int igbvf_desc_unused(struct igbvf_ring *ring)
{
@@ -74,6 +74,8 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
/**
* igbvf_receive_skb - helper function to handle Rx indications
* @adapter: board private structure
+ * @netdev: pointer to netdev struct
+ * @skb: skb to indicate to stack
* @status: descriptor status field as written by hardware
* @vlan: descriptor vlan field as written by hardware (no le/be conversion)
* @skb: pointer to sk_buff to be indicated to stack
@@ -233,6 +235,8 @@ no_buffers:
/**
* igbvf_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
+ * @work_done: output parameter used to indicate completed work
+ * @work_to_do: input parameter setting limit of work
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -406,6 +410,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
/**
* igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
* @adapter: board private structure
+ * @tx_ring: ring being initialized
*
* Return 0 on success, negative on failure
**/
@@ -444,6 +449,7 @@ err:
/**
* igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
* @adapter: board private structure
+ * @rx_ring: ring being initialized
*
* Returns 0 on success, negative on failure
**/
@@ -540,7 +546,7 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
/**
* igbvf_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
+ * @rx_ring: ring structure pointer to free buffers from
**/
static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
{
@@ -760,7 +766,7 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
/**
* igbvf_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @tx_ring: ring structure to clean descriptors from
*
* returns true if ring is completely cleaned
**/
@@ -1891,7 +1897,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
/**
* igbvf_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: timer list pointer containing private struct
**/
static void igbvf_watchdog(struct timer_list *t)
{
@@ -2372,8 +2378,9 @@ static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
/**
* igbvf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue timing out (unused)
**/
-static void igbvf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void igbvf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 2d566f3c827b..35baae900c1f 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -215,6 +215,8 @@ struct igc_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
+ ktime_t ptp_reset_start; /* Reset time in clock mono */
};
void igc_up(struct igc_adapter *adapter);
@@ -548,6 +550,7 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igc_ptp_tx_hang(struct igc_adapter *adapter);
+void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index cc5a6cf531c7..fd37d2c203af 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -215,6 +215,11 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I225_K2:
case IGC_DEV_ID_I225_LMVP:
case IGC_DEV_ID_I225_IT:
+ case IGC_DEV_ID_I226_LM:
+ case IGC_DEV_ID_I226_V:
+ case IGC_DEV_ID_I226_IT:
+ case IGC_DEV_ID_I221_V:
+ case IGC_DEV_ID_I226_BLANK_NVM:
case IGC_DEV_ID_I225_BLANK_NVM:
mac->type = igc_i225;
break;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index f1f464967f87..32f5fd684139 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -324,22 +324,10 @@
/* Advanced Receive Descriptor bit definitions */
#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
-#define IGC_RXDEXT_STATERR_CE 0x01000000
-#define IGC_RXDEXT_STATERR_SE 0x02000000
-#define IGC_RXDEXT_STATERR_SEQ 0x04000000
-#define IGC_RXDEXT_STATERR_CXE 0x10000000
-#define IGC_RXDEXT_STATERR_TCPE 0x20000000
+#define IGC_RXDEXT_STATERR_L4E 0x20000000
#define IGC_RXDEXT_STATERR_IPE 0x40000000
#define IGC_RXDEXT_STATERR_RXE 0x80000000
-/* Same mask, but for extended and packet split descriptors */
-#define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \
- IGC_RXDEXT_STATERR_CE | \
- IGC_RXDEXT_STATERR_SE | \
- IGC_RXDEXT_STATERR_SEQ | \
- IGC_RXDEXT_STATERR_CXE | \
- IGC_RXDEXT_STATERR_RXE)
-
#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
@@ -409,7 +397,7 @@
#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
/* Time Sync Transmit Control bit definitions */
-#define IGC_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */
#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 44410c2265d6..61d331ce38cd 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -321,6 +321,9 @@ static void igc_ethtool_get_regs(struct net_device *netdev,
for (i = 0; i < 8; i++)
regs_buff[205 + i] = rd32(IGC_ETQF(i));
+
+ regs_buff[213] = adapter->stats.tlpic;
+ regs_buff[214] = adapter->stats.rlpic;
}
static void igc_ethtool_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index b9fe51b91c47..55dae7c4703f 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -24,6 +24,11 @@
#define IGC_DEV_ID_I225_K2 0x3101
#define IGC_DEV_ID_I225_LMVP 0x5502
#define IGC_DEV_ID_I225_IT 0x0D9F
+#define IGC_DEV_ID_I226_LM 0x125B
+#define IGC_DEV_ID_I226_V 0x125C
+#define IGC_DEV_ID_I226_IT 0x125D
+#define IGC_DEV_ID_I221_V 0x125E
+#define IGC_DEV_ID_I226_BLANK_NVM 0x125F
#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
/* Function pointers for the MAC. */
@@ -125,9 +130,6 @@ struct igc_nvm_info {
struct igc_nvm_operations ops;
enum igc_nvm_type type;
- u32 flash_bank_size;
- u32 flash_base_addr;
-
u16 word_size;
u16 delay_usec;
u16 address_bits;
@@ -153,7 +155,6 @@ struct igc_phy_info {
u8 mdix;
bool is_mdix;
- bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
@@ -239,6 +240,8 @@ struct igc_hw_stats {
u64 prc511;
u64 prc1023;
u64 prc1522;
+ u64 tlpic;
+ u64 rlpic;
u64 gprc;
u64 bprc;
u64 mprc;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9593aa4eea36..b673ac1199bb 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -47,6 +47,11 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
/* required last entry */
{0, }
@@ -1428,7 +1433,7 @@ static void igc_rx_checksum(struct igc_ring *ring,
/* TCP/UDP checksum error bit is set */
if (igc_test_staterr(rx_desc,
- IGC_RXDEXT_STATERR_TCPE |
+ IGC_RXDEXT_STATERR_L4E |
IGC_RXDEXT_STATERR_IPE)) {
/* work around errata with sctp packets where the TCPE aka
* L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
@@ -1550,10 +1555,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* build an skb around the page buffer */
skb = build_skb(va - IGC_SKB_PAD, truesize);
@@ -1589,10 +1591,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
@@ -1743,8 +1742,7 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- if (unlikely((igc_test_staterr(rx_desc,
- IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
struct net_device *netdev = rx_ring->netdev;
if (!(netdev->features & NETIF_F_RXALL)) {
@@ -3685,6 +3683,8 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.prc511 += rd32(IGC_PRC511);
adapter->stats.prc1023 += rd32(IGC_PRC1023);
adapter->stats.prc1522 += rd32(IGC_PRC1522);
+ adapter->stats.tlpic += rd32(IGC_TLPIC);
+ adapter->stats.rlpic += rd32(IGC_RLPIC);
mpc = rd32(IGC_MPC);
adapter->stats.mpc += mpc;
@@ -3778,6 +3778,8 @@ void igc_down(struct igc_adapter *adapter)
set_bit(__IGC_DOWN, &adapter->state);
+ igc_ptp_suspend(adapter);
+
/* disable receives in the hardware */
rctl = rd32(IGC_RCTL);
wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
@@ -3831,7 +3833,6 @@ void igc_down(struct igc_adapter *adapter)
void igc_reinit_locked(struct igc_adapter *adapter)
{
- WARN_ON(in_interrupt());
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
usleep_range(1000, 2000);
igc_down(adapter);
@@ -3890,21 +3891,23 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * igc_get_stats - Get System Network Statistics
+ * igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
*
* Returns the address of the device statistics structure.
* The statistics are updated here and also from the timer callback.
*/
-static struct net_device_stats *igc_get_stats(struct net_device *netdev)
+static void igc_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ spin_lock(&adapter->stats64_lock);
if (!test_bit(__IGC_RESETTING, &adapter->state))
igc_update_stats(adapter);
-
- /* only return the current stats */
- return &netdev->stats;
+ memcpy(stats, &adapter->stats64, sizeof(*stats));
+ spin_unlock(&adapter->stats64_lock);
}
static netdev_features_t igc_fix_features(struct net_device *netdev,
@@ -4659,7 +4662,7 @@ int igc_close(struct net_device *netdev)
/**
* igc_ioctl - Access the hwtstamp interface
* @netdev: network interface device structure
- * @ifreq: interface request data
+ * @ifr: interface request data
* @cmd: ioctl command
**/
static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
@@ -4700,14 +4703,35 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
return 0;
}
-static bool validate_schedule(const struct tc_taprio_qopt_offload *qopt)
+static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
+{
+ struct timespec64 b;
+
+ b = ktime_to_timespec64(base_time);
+
+ return timespec64_compare(now, &b) > 0;
+}
+
+static bool validate_schedule(struct igc_adapter *adapter,
+ const struct tc_taprio_qopt_offload *qopt)
{
int queue_uses[IGC_MAX_TX_QUEUES] = { };
+ struct timespec64 now;
size_t n;
if (qopt->cycle_time_extension)
return false;
+ igc_ptp_read(adapter, &now);
+
+ /* If we program the controller's BASET registers with a time
+ * in the future, it will hold all the packets until that
+ * time, causing a lot of TX Hangs, so to avoid that, we
+ * reject schedules that would start in the future.
+ */
+ if (!is_base_time_past(qopt->base_time, &now))
+ return false;
+
for (n = 0; n < qopt->num_entries; n++) {
const struct tc_taprio_sched_entry *e;
int i;
@@ -4762,7 +4786,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (adapter->base_time)
return -EALREADY;
- if (!validate_schedule(qopt))
+ if (!validate_schedule(adapter, qopt))
return -EINVAL;
adapter->cycle_time = qopt->cycle_time;
@@ -4833,7 +4857,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
- .ndo_get_stats = igc_get_stats,
+ .ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
.ndo_features_check = igc_features_check,
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 6a9b5102aa55..ac0b9c85da7c 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -8,6 +8,7 @@
#include <linux/pci.h>
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
+#include <linux/ktime.h>
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
@@ -16,17 +17,12 @@
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
/* SYSTIM read access for I225 */
-static void igc_ptp_read_i225(struct igc_adapter *adapter,
- struct timespec64 *ts)
+void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts)
{
struct igc_hw *hw = &adapter->hw;
u32 sec, nsec;
- /* The timestamp latches on lowest register read. For I210/I211, the
- * lowest register is SYSTIMR. Since we only need to provide nanosecond
- * resolution, we can ignore it.
- */
- rd32(IGC_SYSTIMR);
+ /* The timestamp is latched when SYSTIML is read. */
nsec = rd32(IGC_SYSTIML);
sec = rd32(IGC_SYSTIMH);
@@ -39,9 +35,6 @@ static void igc_ptp_write_i225(struct igc_adapter *adapter,
{
struct igc_hw *hw = &adapter->hw;
- /* Writing the SYSTIMR register is not necessary as it only
- * provides sub-nanosecond resolution.
- */
wr32(IGC_SYSTIML, ts->tv_nsec);
wr32(IGC_SYSTIMH, ts->tv_sec);
}
@@ -81,7 +74,7 @@ static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta)
spin_lock_irqsave(&igc->tmreg_lock, flags);
- igc_ptp_read_i225(igc, &now);
+ igc_ptp_read(igc, &now);
now = timespec64_add(now, then);
igc_ptp_write_i225(igc, (const struct timespec64 *)&now);
@@ -102,10 +95,9 @@ static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp,
spin_lock_irqsave(&igc->tmreg_lock, flags);
ptp_read_system_prets(sts);
- rd32(IGC_SYSTIMR);
- ptp_read_system_postts(sts);
ts->tv_nsec = rd32(IGC_SYSTIML);
ts->tv_sec = rd32(IGC_SYSTIMH);
+ ptp_read_system_postts(sts);
spin_unlock_irqrestore(&igc->tmreg_lock, flags);
@@ -422,24 +414,17 @@ static void igc_ptp_tx_work(struct work_struct *work)
if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
return;
- if (time_is_before_jiffies(adapter->ptp_tx_start +
- IGC_PTP_TX_TIMEOUT)) {
- igc_ptp_tx_timeout(adapter);
+ tsynctxctl = rd32(IGC_TSYNCTXCTL);
+ if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0)))
return;
- }
- tsynctxctl = rd32(IGC_TSYNCTXCTL);
- if (tsynctxctl & IGC_TSYNCTXCTL_VALID)
- igc_ptp_tx_hwtstamp(adapter);
- else
- /* reschedule to check later */
- schedule_work(&adapter->ptp_tx_work);
+ igc_ptp_tx_hwtstamp(adapter);
}
/**
* igc_ptp_set_ts_config - set hardware time stamping config
* @netdev: network interface device structure
- * @ifreq: interface request data
+ * @ifr: interface request data
*
**/
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
@@ -466,7 +451,7 @@ int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
/**
* igc_ptp_get_ts_config - get hardware time stamping config
* @netdev: network interface device structure
- * @ifreq: interface request data
+ * @ifr: interface request data
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
@@ -515,6 +500,9 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real());
+ adapter->ptp_reset_start = ktime_get();
+
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -526,6 +514,24 @@ void igc_ptp_init(struct igc_adapter *adapter)
}
}
+static void igc_ptp_time_save(struct igc_adapter *adapter)
+{
+ igc_ptp_read(adapter, &adapter->prev_ptp_time);
+ adapter->ptp_reset_start = ktime_get();
+}
+
+static void igc_ptp_time_restore(struct igc_adapter *adapter)
+{
+ struct timespec64 ts = adapter->prev_ptp_time;
+ ktime_t delta;
+
+ delta = ktime_sub(ktime_get(), adapter->ptp_reset_start);
+
+ timespec64_add_ns(&ts, ktime_to_ns(delta));
+
+ igc_ptp_write_i225(adapter, &ts);
+}
+
/**
* igc_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
@@ -542,6 +548,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+
+ igc_ptp_time_save(adapter);
}
/**
@@ -591,9 +599,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
/* Re-initialize the timer. */
if (hw->mac.type == igc_i225) {
- struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real());
-
- igc_ptp_write_i225(adapter, &ts64);
+ igc_ptp_time_restore(adapter);
} else {
timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index cbaa933ef30d..a430871d1c27 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -98,7 +98,6 @@ bool
ixgb_adapter_stop(struct ixgb_hw *hw)
{
u32 ctrl_reg;
- u32 icr_reg;
ENTER();
@@ -142,7 +141,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
IXGB_WRITE_REG(hw, IMC, 0xffffffff);
/* Clear any pending interrupt events. */
- icr_reg = IXGB_READ_REG(hw, ICR);
+ IXGB_READ_REG(hw, ICR);
return ctrl_reg & IXGB_CTRL0_RST;
}
@@ -274,7 +273,6 @@ bool
ixgb_init_hw(struct ixgb_hw *hw)
{
u32 i;
- u32 ctrl_reg;
bool status;
ENTER();
@@ -286,7 +284,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
*/
pr_debug("Issuing a global reset to MAC\n");
- ctrl_reg = ixgb_mac_reset(hw);
+ ixgb_mac_reset(hw);
pr_debug("Issuing an EE reset to MAC\n");
#ifdef HP_ZX1
@@ -949,8 +947,6 @@ bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
static void
ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
{
- volatile u32 temp_reg;
-
ENTER();
/* if we are stopped or resetting exit gracefully */
@@ -959,66 +955,66 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
return;
}
- temp_reg = IXGB_READ_REG(hw, TPRL);
- temp_reg = IXGB_READ_REG(hw, TPRH);
- temp_reg = IXGB_READ_REG(hw, GPRCL);
- temp_reg = IXGB_READ_REG(hw, GPRCH);
- temp_reg = IXGB_READ_REG(hw, BPRCL);
- temp_reg = IXGB_READ_REG(hw, BPRCH);
- temp_reg = IXGB_READ_REG(hw, MPRCL);
- temp_reg = IXGB_READ_REG(hw, MPRCH);
- temp_reg = IXGB_READ_REG(hw, UPRCL);
- temp_reg = IXGB_READ_REG(hw, UPRCH);
- temp_reg = IXGB_READ_REG(hw, VPRCL);
- temp_reg = IXGB_READ_REG(hw, VPRCH);
- temp_reg = IXGB_READ_REG(hw, JPRCL);
- temp_reg = IXGB_READ_REG(hw, JPRCH);
- temp_reg = IXGB_READ_REG(hw, GORCL);
- temp_reg = IXGB_READ_REG(hw, GORCH);
- temp_reg = IXGB_READ_REG(hw, TORL);
- temp_reg = IXGB_READ_REG(hw, TORH);
- temp_reg = IXGB_READ_REG(hw, RNBC);
- temp_reg = IXGB_READ_REG(hw, RUC);
- temp_reg = IXGB_READ_REG(hw, ROC);
- temp_reg = IXGB_READ_REG(hw, RLEC);
- temp_reg = IXGB_READ_REG(hw, CRCERRS);
- temp_reg = IXGB_READ_REG(hw, ICBC);
- temp_reg = IXGB_READ_REG(hw, ECBC);
- temp_reg = IXGB_READ_REG(hw, MPC);
- temp_reg = IXGB_READ_REG(hw, TPTL);
- temp_reg = IXGB_READ_REG(hw, TPTH);
- temp_reg = IXGB_READ_REG(hw, GPTCL);
- temp_reg = IXGB_READ_REG(hw, GPTCH);
- temp_reg = IXGB_READ_REG(hw, BPTCL);
- temp_reg = IXGB_READ_REG(hw, BPTCH);
- temp_reg = IXGB_READ_REG(hw, MPTCL);
- temp_reg = IXGB_READ_REG(hw, MPTCH);
- temp_reg = IXGB_READ_REG(hw, UPTCL);
- temp_reg = IXGB_READ_REG(hw, UPTCH);
- temp_reg = IXGB_READ_REG(hw, VPTCL);
- temp_reg = IXGB_READ_REG(hw, VPTCH);
- temp_reg = IXGB_READ_REG(hw, JPTCL);
- temp_reg = IXGB_READ_REG(hw, JPTCH);
- temp_reg = IXGB_READ_REG(hw, GOTCL);
- temp_reg = IXGB_READ_REG(hw, GOTCH);
- temp_reg = IXGB_READ_REG(hw, TOTL);
- temp_reg = IXGB_READ_REG(hw, TOTH);
- temp_reg = IXGB_READ_REG(hw, DC);
- temp_reg = IXGB_READ_REG(hw, PLT64C);
- temp_reg = IXGB_READ_REG(hw, TSCTC);
- temp_reg = IXGB_READ_REG(hw, TSCTFC);
- temp_reg = IXGB_READ_REG(hw, IBIC);
- temp_reg = IXGB_READ_REG(hw, RFC);
- temp_reg = IXGB_READ_REG(hw, LFC);
- temp_reg = IXGB_READ_REG(hw, PFRC);
- temp_reg = IXGB_READ_REG(hw, PFTC);
- temp_reg = IXGB_READ_REG(hw, MCFRC);
- temp_reg = IXGB_READ_REG(hw, MCFTC);
- temp_reg = IXGB_READ_REG(hw, XONRXC);
- temp_reg = IXGB_READ_REG(hw, XONTXC);
- temp_reg = IXGB_READ_REG(hw, XOFFRXC);
- temp_reg = IXGB_READ_REG(hw, XOFFTXC);
- temp_reg = IXGB_READ_REG(hw, RJC);
+ IXGB_READ_REG(hw, TPRL);
+ IXGB_READ_REG(hw, TPRH);
+ IXGB_READ_REG(hw, GPRCL);
+ IXGB_READ_REG(hw, GPRCH);
+ IXGB_READ_REG(hw, BPRCL);
+ IXGB_READ_REG(hw, BPRCH);
+ IXGB_READ_REG(hw, MPRCL);
+ IXGB_READ_REG(hw, MPRCH);
+ IXGB_READ_REG(hw, UPRCL);
+ IXGB_READ_REG(hw, UPRCH);
+ IXGB_READ_REG(hw, VPRCL);
+ IXGB_READ_REG(hw, VPRCH);
+ IXGB_READ_REG(hw, JPRCL);
+ IXGB_READ_REG(hw, JPRCH);
+ IXGB_READ_REG(hw, GORCL);
+ IXGB_READ_REG(hw, GORCH);
+ IXGB_READ_REG(hw, TORL);
+ IXGB_READ_REG(hw, TORH);
+ IXGB_READ_REG(hw, RNBC);
+ IXGB_READ_REG(hw, RUC);
+ IXGB_READ_REG(hw, ROC);
+ IXGB_READ_REG(hw, RLEC);
+ IXGB_READ_REG(hw, CRCERRS);
+ IXGB_READ_REG(hw, ICBC);
+ IXGB_READ_REG(hw, ECBC);
+ IXGB_READ_REG(hw, MPC);
+ IXGB_READ_REG(hw, TPTL);
+ IXGB_READ_REG(hw, TPTH);
+ IXGB_READ_REG(hw, GPTCL);
+ IXGB_READ_REG(hw, GPTCH);
+ IXGB_READ_REG(hw, BPTCL);
+ IXGB_READ_REG(hw, BPTCH);
+ IXGB_READ_REG(hw, MPTCL);
+ IXGB_READ_REG(hw, MPTCH);
+ IXGB_READ_REG(hw, UPTCL);
+ IXGB_READ_REG(hw, UPTCH);
+ IXGB_READ_REG(hw, VPTCL);
+ IXGB_READ_REG(hw, VPTCH);
+ IXGB_READ_REG(hw, JPTCL);
+ IXGB_READ_REG(hw, JPTCH);
+ IXGB_READ_REG(hw, GOTCL);
+ IXGB_READ_REG(hw, GOTCH);
+ IXGB_READ_REG(hw, TOTL);
+ IXGB_READ_REG(hw, TOTH);
+ IXGB_READ_REG(hw, DC);
+ IXGB_READ_REG(hw, PLT64C);
+ IXGB_READ_REG(hw, TSCTC);
+ IXGB_READ_REG(hw, TSCTFC);
+ IXGB_READ_REG(hw, IBIC);
+ IXGB_READ_REG(hw, RFC);
+ IXGB_READ_REG(hw, LFC);
+ IXGB_READ_REG(hw, PFRC);
+ IXGB_READ_REG(hw, PFTC);
+ IXGB_READ_REG(hw, MCFRC);
+ IXGB_READ_REG(hw, MCFTC);
+ IXGB_READ_REG(hw, XONRXC);
+ IXGB_READ_REG(hw, XONTXC);
+ IXGB_READ_REG(hw, XOFFRXC);
+ IXGB_READ_REG(hw, XOFFTXC);
+ IXGB_READ_REG(hw, RJC);
}
/******************************************************************************
@@ -1161,18 +1157,13 @@ static void
ixgb_optics_reset(struct ixgb_hw *hw)
{
if (hw->phy_type == ixgb_phy_type_txn17401) {
- u16 mdio_reg;
-
ixgb_write_phy_reg(hw,
MDIO_CTRL1,
IXGB_PHY_ADDRESS,
MDIO_MMD_PMAPMD,
MDIO_CTRL1_RESET);
- mdio_reg = ixgb_read_phy_reg(hw,
- MDIO_CTRL1,
- IXGB_PHY_ADDRESS,
- MDIO_MMD_PMAPMD);
+ ixgb_read_phy_reg(hw, MDIO_CTRL1, IXGB_PHY_ADDRESS, MDIO_MMD_PMAPMD);
}
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 048351cf0e4a..1588376d4c67 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1109,7 +1109,7 @@ alloc_failed:
/**
* ixgb_watchdog - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: pointer to timer_list containing our private info pointer
**/
static void
@@ -1531,10 +1531,11 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/**
* ixgb_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue hanging (unused)
**/
static void
-ixgb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+ixgb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -1746,7 +1747,8 @@ ixgb_intr(int irq, void *data)
/**
* ixgb_clean - NAPI Rx polling callback
- * @adapter: board private structure
+ * @napi: napi struct pointer
+ * @budget: max number of receives to clean
**/
static int
@@ -1865,7 +1867,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
* ixgb_rx_checksum - Receive Checksum Offload for 82597.
* @adapter: board private structure
* @rx_desc: receive descriptor
- * @sk_buff: socket buffer with received data
+ * @skb: socket buffer with received data
**/
static void
@@ -1923,6 +1925,8 @@ static void ixgb_check_copybreak(struct napi_struct *napi,
/**
* ixgb_clean_rx_irq - Send received data up the network stack,
* @adapter: board private structure
+ * @work_done: output pointer to amount of packets cleaned
+ * @work_to_do: how much work we can complete
**/
static bool
@@ -2042,6 +2046,7 @@ rxdesc_done:
/**
* ixgb_alloc_rx_buffers - Replace used receive buffers
* @adapter: address of board private structure
+ * @cleaned_count: how many buffers to allocate
**/
static void
@@ -2211,7 +2216,7 @@ static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
/**
* ixgb_io_slot_reset - called after the pci bus has been reset.
- * @pdev pointer to pci device with error
+ * @pdev: pointer to pci device with error
*
* This callback is called after the PCI bus has been reset.
* Basically, this tries to restart the card from scratch.
@@ -2259,7 +2264,7 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
/**
* ixgb_io_resume - called when its OK to resume normal operations
- * @pdev pointer to pci device with error
+ * @pdev: pointer to pci device with error
*
* The error recovery driver tells us that its OK to resume
* normal operation. Implementation resembles the second-half
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 1e8a809233a0..de0fc6ecf491 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -350,7 +350,7 @@ struct ixgbe_ring {
struct ixgbe_rx_queue_stats rx_stats;
};
struct xdp_rxq_info xdp_rxq;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
u16 rx_buf_len;
} ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 71ec908266a6..a280aa34ca1d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -531,6 +531,16 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
return err;
}
+static void ixgbe_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+
+ stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
+ stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc;
+}
+
static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -3546,6 +3556,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_eeprom = ixgbe_set_eeprom,
.get_ringparam = ixgbe_get_ringparam,
.set_ringparam = ixgbe_set_ringparam,
+ .get_pause_stats = ixgbe_get_pause_stats,
.get_pauseparam = ixgbe_get_pauseparam,
.set_pauseparam = ixgbe_set_pauseparam,
.get_msglevel = ixgbe_get_msglevel,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 2e35c5706cf1..df389a11d3af 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1029,10 +1029,10 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
adapter->q_vector[v_idx] = NULL;
- napi_hash_del(&q_vector->napi);
- netif_napi_del(&q_vector->napi);
+ __netif_napi_del(&q_vector->napi);
/*
+ * after a call to __netif_napi_del() napi may still be used and
* ixgbe_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 86ca8b9ea1b8..45ae33e15303 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2095,10 +2095,8 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -2161,10 +2159,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb to around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
@@ -3156,7 +3151,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
#endif
ixgbe_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
ixgbe_clean_tx_irq(q_vector, ring, budget);
@@ -3176,7 +3171,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget = budget;
ixgbe_for_each_ring(ring, q_vector->rx) {
- int cleaned = ring->xsk_umem ?
+ int cleaned = ring->xsk_pool ?
ixgbe_clean_rx_irq_zc(q_vector, ring,
per_ring_budget) :
ixgbe_clean_rx_irq(q_vector, ring,
@@ -3471,9 +3466,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
u32 txdctl = IXGBE_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
- ring->xsk_umem = NULL;
+ ring->xsk_pool = NULL;
if (ring_is_xdp(ring))
- ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
/* disable queue to avoid issues while updating state */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
@@ -3713,8 +3708,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
- if (rx_ring->xsk_umem) {
- u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_umem);
+ if (rx_ring->xsk_pool) {
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
/* If the MAC support setting RXDCTL.RLPML, the
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
@@ -4059,12 +4054,12 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u8 reg_idx = ring->reg_idx;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
+ if (ring->xsk_pool) {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL));
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
} else {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL));
@@ -4119,8 +4114,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
#endif
}
- if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
- u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
@@ -4142,7 +4137,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- if (ring->xsk_umem)
+ if (ring->xsk_pool)
ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
else
ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
@@ -5292,7 +5287,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
u16 i = rx_ring->next_to_clean;
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
ixgbe_xsk_clean_rx_ring(rx_ring);
goto skip_free;
}
@@ -5682,7 +5677,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
{
- WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
netif_trans_update(adapter->netdev);
@@ -5989,7 +5983,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
u16 i = tx_ring->next_to_clean;
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- if (tx_ring->xsk_umem) {
+ if (tx_ring->xsk_pool) {
ixgbe_xsk_clean_tx_ring(tx_ring);
goto out;
}
@@ -6185,8 +6179,9 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
/**
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue number that timed out
**/
-static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -10161,7 +10156,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
*/
if (need_reset && prog)
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->xdp_ring[i]->xsk_umem)
+ if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX);
@@ -10175,8 +10170,8 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ixgbe_xdp_setup(dev, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 7980d7265e10..fc389eecdd2b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -771,7 +771,7 @@ mii_bus_write_done:
/**
* ixgbe_mii_bus_read - Read a clause 22/45 register
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
**/
@@ -786,7 +786,7 @@ static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
/**
* ixgbe_mii_bus_write - Write a clause 22/45 register
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
* @val: value to write
@@ -803,7 +803,7 @@ static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
/**
* ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
**/
@@ -820,7 +820,7 @@ static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
/**
* ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
* @val: value to write
@@ -901,15 +901,13 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
**/
s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
{
+ s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ s32 (*read)(struct mii_bus *bus, int addr, int regnum);
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
struct mii_bus *bus;
- bus = devm_mdiobus_alloc(dev);
- if (!bus)
- return -ENOMEM;
-
switch (hw->device_id) {
/* C3000 SoCs */
case IXGBE_DEV_ID_X550EM_A_KR:
@@ -922,16 +920,23 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
if (!ixgbe_x550em_a_has_mii(hw))
- return -ENODEV;
- bus->read = &ixgbe_x550em_a_mii_bus_read;
- bus->write = &ixgbe_x550em_a_mii_bus_write;
+ return 0;
+ read = &ixgbe_x550em_a_mii_bus_read;
+ write = &ixgbe_x550em_a_mii_bus_write;
break;
default:
- bus->read = &ixgbe_mii_bus_read;
- bus->write = &ixgbe_mii_bus_write;
+ read = &ixgbe_mii_bus_read;
+ write = &ixgbe_mii_bus_write;
break;
}
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->read = read;
+ bus->write = write;
+
/* Use the position of the device in the PCI hierarchy as the id */
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name,
pci_name(pdev));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index 7887ae4aaf4f..2aeec78029bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -28,9 +28,10 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
-struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring);
-int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
+int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid);
void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index de563cfd294d..4b93ba149ec5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -350,7 +350,6 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
hw_dbg(hw, "EEPROM read failed\n");
return IXGBE_ERR_EEPROM;
- break;
}
/* Skip pointer section if length is invalid. */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index ec7121f352e2..3771857cf887 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -8,8 +8,8 @@
#include "ixgbe.h"
#include "ixgbe_txrx_common.h"
-struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring)
+struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
{
bool xdp_on = READ_ONCE(adapter->xdp_prog);
int qid = ring->ring_idx;
@@ -17,11 +17,11 @@ struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
return NULL;
- return xdp_get_umem_from_qid(adapter->netdev, qid);
+ return xsk_get_pool_from_qid(adapter->netdev, qid);
}
-static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
- struct xdp_umem *umem,
+static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid)
{
struct net_device *netdev = adapter->netdev;
@@ -35,7 +35,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- err = xsk_buff_dma_map(umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
+ err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
if (err)
return err;
@@ -59,13 +59,13 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
return 0;
}
-static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
+static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
{
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
bool if_running;
- umem = xdp_get_umem_from_qid(adapter->netdev, qid);
- if (!umem)
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+ if (!pool)
return -EINVAL;
if_running = netif_running(adapter->netdev) &&
@@ -75,7 +75,7 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
ixgbe_txrx_ring_disable(adapter, qid);
clear_bit(qid, adapter->af_xdp_zc_qps);
- xsk_buff_dma_unmap(umem, IXGBE_RX_DMA_ATTR);
+ xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
if (if_running)
ixgbe_txrx_ring_enable(adapter, qid);
@@ -83,11 +83,12 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
return 0;
}
-int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid)
{
- return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
- ixgbe_xsk_umem_disable(adapter, qid);
+ return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
+ ixgbe_xsk_pool_disable(adapter, qid);
}
static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
@@ -149,7 +150,7 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
i -= rx_ring->count;
do {
- bi->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!bi->xdp) {
ok = false;
break;
@@ -286,7 +287,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp);
+ xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
if (xdp_res) {
@@ -344,11 +345,11 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -373,6 +374,7 @@ void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
{
+ struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbe_tx_buffer *tx_bi;
bool work_done = true;
@@ -387,12 +389,11 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
break;
}
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
- desc.len);
+ dma = xsk_buff_raw_get_dma(pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
tx_bi->bytecount = desc.len;
@@ -418,7 +419,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
if (tx_desc) {
ixgbe_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(pool);
}
return !!budget && work_done;
@@ -439,7 +440,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
unsigned int total_packets = 0, total_bytes = 0;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *pool = tx_ring->xsk_pool;
union ixgbe_adv_tx_desc *tx_desc;
struct ixgbe_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -484,10 +485,10 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
q_vector->tx.total_packets += total_packets;
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(pool, xsk_frames);
- if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
- xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(pool))
+ xsk_set_tx_need_wakeup(pool);
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
}
@@ -511,7 +512,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
return -ENETDOWN;
- if (!ring->xsk_umem)
+ if (!ring->xsk_pool)
return -ENXIO;
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
@@ -526,7 +527,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *pool = tx_ring->xsk_pool;
struct ixgbe_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -546,5 +547,5 @@ void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(pool, xsk_frames);
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a428113e6d54..82fce27f682b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -246,8 +246,9 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: transmit queue hanging (unused)
**/
-static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -866,10 +867,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -947,10 +946,7 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
* have a consumer accessing first few bytes of meta data,
* and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
@@ -2526,8 +2522,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
{
- WARN_ON(in_interrupt());
-
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
msleep(1);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index ddc757680089..e9efe074edc1 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1187,9 +1187,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
}
static void
-jme_pcc_tasklet(unsigned long arg)
+jme_pcc_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
struct net_device *netdev = jme->dev;
if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
@@ -1265,10 +1265,9 @@ jme_stop_shutdown_timer(struct jme_adapter *jme)
jwrite32f(jme, JME_APMC, apmc);
}
-static void
-jme_link_change_tasklet(unsigned long arg)
+static void jme_link_change_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, linkch_task);
struct net_device *netdev = jme->dev;
int rc;
@@ -1345,9 +1344,9 @@ out:
}
static void
-jme_rx_clean_tasklet(unsigned long arg)
+jme_rx_clean_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
struct dynpcc_info *dpi = &(jme->dpi);
jme_process_receive(jme, jme->rx_ring_size);
@@ -1380,9 +1379,9 @@ jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
}
static void
-jme_rx_empty_tasklet(unsigned long arg)
+jme_rx_empty_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
if (unlikely(atomic_read(&jme->link_changing) != 1))
return;
@@ -1392,7 +1391,7 @@ jme_rx_empty_tasklet(unsigned long arg)
netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
- jme_rx_clean_tasklet(arg);
+ jme_rx_clean_tasklet(&jme->rxclean_task);
while (atomic_read(&jme->rx_empty) > 0) {
atomic_dec(&jme->rx_empty);
@@ -1416,10 +1415,9 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
}
-static void
-jme_tx_clean_tasklet(unsigned long arg)
+static void jme_tx_clean_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
struct jme_ring *txring = &(jme->txring[0]);
struct txdesc *txdesc = txring->desc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
@@ -1834,14 +1832,10 @@ jme_open(struct net_device *netdev)
jme_clear_pm_disable_wol(jme);
JME_NAPI_ENABLE(jme);
- tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
- (unsigned long) jme);
+ tasklet_setup(&jme->linkch_task, jme_link_change_tasklet);
+ tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
+ tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
+ tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
rc = jme_request_irq(jme);
if (rc)
@@ -3040,9 +3034,7 @@ jme_init_one(struct pci_dev *pdev,
atomic_set(&jme->tx_cleaning, 1);
atomic_set(&jme->rx_empty, 1);
- tasklet_init(&jme->pcc_task,
- jme_pcc_tasklet,
- (unsigned long) jme);
+ tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
jme->dpi.cur = PCC_P1;
jme->reg_ghc = 0;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 03e034918d14..bf48f0ded9c7 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1113,7 +1113,7 @@ out:
return rc;
probe_err_register:
- kfree(lp->td_ring);
+ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
probe_err_td_ring:
iounmap(lp->tx_dma_regs);
probe_err_dma_tx:
@@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev)
iounmap(lp->eth_regs);
iounmap(lp->rx_dma_regs);
iounmap(lp->tx_dma_regs);
+ kfree((struct dma_desc *)KSEG0ADDR(lp->td_ring));
unregister_netdev(bif->dev);
free_netdev(bif->dev);
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index ef4f35ba077d..41815b609569 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -92,6 +92,12 @@ config MVPP2
This driver supports the network interface units in the
Marvell ARMADA 375, 7K and 8K SoCs.
+config MVPP2_PTP
+ bool "Marvell Armada 8K Enable PTP support"
+ depends on NETWORK_PHY_TIMESTAMPING
+ depends on (PTP_1588_CLOCK = y && MVPP2 = y) || \
+ (PTP_1588_CLOCK && MVPP2 = m)
+
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
depends on HAS_IOMEM
@@ -172,5 +178,6 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 89dea7284d5b..9f88fe822555 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
obj-y += octeontx2/
+obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5bf0409f5d42..54b0bf574c05 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -330,7 +330,6 @@
#define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
MVNETA_SKB_HEADROOM))
-#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
#define IS_TSO_HEADER(txq, addr) \
@@ -752,13 +751,12 @@ static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
static void mvneta_mib_counters_clear(struct mvneta_port *pp)
{
int i;
- u32 dummy;
/* Perform dummy reads from MIB counters */
for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
- dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
- dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
- dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
+ mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+ mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+ mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
}
/* Get System Network Statistics */
@@ -1833,7 +1831,7 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
/* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
struct mvneta_tx_queue *txq, int num,
- struct netdev_queue *nq)
+ struct netdev_queue *nq, bool napi)
{
unsigned int bytes_compl = 0, pkts_compl = 0;
int i;
@@ -1856,7 +1854,10 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
dev_kfree_skb_any(buf->skb);
} else if (buf->type == MVNETA_TYPE_XDP_TX ||
buf->type == MVNETA_TYPE_XDP_NDO) {
- xdp_return_frame(buf->xdpf);
+ if (napi && buf->type == MVNETA_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(buf->xdpf);
+ else
+ xdp_return_frame(buf->xdpf);
}
}
@@ -1874,7 +1875,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
if (!tx_done)
return;
- mvneta_txq_bufs_free(pp, txq, tx_done, nq);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
txq->count -= tx_done;
@@ -2227,8 +2228,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int *size,
- struct page *page,
- struct mvneta_stats *stats)
+ struct page *page)
{
unsigned char *data = page_address(page);
int data_len = -MVNETA_MH_SIZE, len;
@@ -2236,19 +2236,22 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
enum dma_data_direction dma_dir;
struct skb_shared_info *sinfo;
- if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
+ if (*size > MVNETA_MAX_RX_BUF_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
data_len += len;
} else {
- len = rx_desc->data_size;
+ len = *size;
data_len += len - ETH_FCS_LEN;
}
+ *size = *size - len;
dma_dir = page_pool_get_dma_dir(rxq->page_pool);
dma_sync_single_for_cpu(dev->dev.parent,
rx_desc->buf_phys_addr,
len, dma_dir);
+ rx_desc->buf_phys_addr = 0;
+
/* Prefetch header */
prefetch(data);
@@ -2259,9 +2262,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = 0;
-
- *size = rx_desc->data_size - len;
- rx_desc->buf_phys_addr = 0;
}
static void
@@ -2307,11 +2307,8 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i, num_frags = sinfo->nr_frags;
- skb_frag_t frags[MAX_SKB_FRAGS];
struct sk_buff *skb;
- memcpy(frags, sinfo->frags, sizeof(skb_frag_t) * num_frags);
-
skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -2323,12 +2320,12 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
mvneta_rx_csum(pp, desc_status, skb);
for (i = 0; i < num_frags; i++) {
- struct page *page = skb_frag_page(&frags[i]);
+ skb_frag_t *frag = &sinfo->frags[i];
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, skb_frag_off(&frags[i]),
- skb_frag_size(&frags[i]), PAGE_SIZE);
- page_pool_release_page(rxq->page_pool, page);
+ skb_frag_page(frag), skb_frag_off(frag),
+ skb_frag_size(frag), PAGE_SIZE);
+ page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
}
return skb;
@@ -2378,10 +2375,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
size = rx_desc->data_size;
frame_sz = size - ETH_FCS_LEN;
- desc_status = rx_desc->status;
+ desc_status = rx_status;
mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
- &size, page, &ps);
+ &size, page);
} else {
if (unlikely(!xdp_buf.data_hard_start)) {
rx_desc->buf_phys_addr = 0;
@@ -2865,7 +2862,7 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done = txq->count;
- mvneta_txq_bufs_free(pp, txq, tx_done, nq);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
/* reset txq */
txq->count = 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile
index 51f65a202c6e..9bd8e7964b40 100644
--- a/drivers/net/ethernet/marvell/mvpp2/Makefile
+++ b/drivers/net/ethernet/marvell/mvpp2/Makefile
@@ -4,4 +4,5 @@
#
obj-$(CONFIG_MVPP2) := mvpp2.o
-mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
+mvpp2-y := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
+mvpp2-$(CONFIG_MVPP2_PTP) += mvpp2_tai.o
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 32753cc771bf..834775843067 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <net/flow_offload.h>
@@ -461,8 +462,12 @@
#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
+#define MVPP22_GMAC_INT_SUM_STAT 0xa0
+#define MVPP22_GMAC_INT_SUM_STAT_INTERNAL BIT(1)
+#define MVPP22_GMAC_INT_SUM_STAT_PTP BIT(2)
#define MVPP22_GMAC_INT_SUM_MASK 0xa4
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
+#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
* relative to port->base.
@@ -488,9 +493,13 @@
#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
+#define MVPP22_XLG_EXT_INT_STAT 0x158
+#define MVPP22_XLG_EXT_INT_STAT_XLG BIT(1)
+#define MVPP22_XLG_EXT_INT_STAT_PTP BIT(7)
#define MVPP22_XLG_EXT_INT_MASK 0x15c
#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
+#define MVPP22_XLG_EXT_INT_MASK_PTP BIT(7)
#define MVPP22_XLG_CTRL4_REG 0x184
#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
@@ -501,6 +510,70 @@
#define MVPP22_SMI_MISC_CFG_REG 0x1204
#define MVPP22_SMI_POLLING_EN BIT(10)
+/* TAI registers, PPv2.2 only, relative to priv->iface_base */
+#define MVPP22_TAI_INT_CAUSE 0x1400
+#define MVPP22_TAI_INT_MASK 0x1404
+#define MVPP22_TAI_CR0 0x1408
+#define MVPP22_TAI_CR1 0x140c
+#define MVPP22_TAI_TCFCR0 0x1410
+#define MVPP22_TAI_TCFCR1 0x1414
+#define MVPP22_TAI_TCFCR2 0x1418
+#define MVPP22_TAI_FATWR 0x141c
+#define MVPP22_TAI_TOD_STEP_NANO_CR 0x1420
+#define MVPP22_TAI_TOD_STEP_FRAC_HIGH 0x1424
+#define MVPP22_TAI_TOD_STEP_FRAC_LOW 0x1428
+#define MVPP22_TAI_TAPDC_HIGH 0x142c
+#define MVPP22_TAI_TAPDC_LOW 0x1430
+#define MVPP22_TAI_TGTOD_SEC_HIGH 0x1434
+#define MVPP22_TAI_TGTOD_SEC_MED 0x1438
+#define MVPP22_TAI_TGTOD_SEC_LOW 0x143c
+#define MVPP22_TAI_TGTOD_NANO_HIGH 0x1440
+#define MVPP22_TAI_TGTOD_NANO_LOW 0x1444
+#define MVPP22_TAI_TGTOD_FRAC_HIGH 0x1448
+#define MVPP22_TAI_TGTOD_FRAC_LOW 0x144c
+#define MVPP22_TAI_TLV_SEC_HIGH 0x1450
+#define MVPP22_TAI_TLV_SEC_MED 0x1454
+#define MVPP22_TAI_TLV_SEC_LOW 0x1458
+#define MVPP22_TAI_TLV_NANO_HIGH 0x145c
+#define MVPP22_TAI_TLV_NANO_LOW 0x1460
+#define MVPP22_TAI_TLV_FRAC_HIGH 0x1464
+#define MVPP22_TAI_TLV_FRAC_LOW 0x1468
+#define MVPP22_TAI_TCV0_SEC_HIGH 0x146c
+#define MVPP22_TAI_TCV0_SEC_MED 0x1470
+#define MVPP22_TAI_TCV0_SEC_LOW 0x1474
+#define MVPP22_TAI_TCV0_NANO_HIGH 0x1478
+#define MVPP22_TAI_TCV0_NANO_LOW 0x147c
+#define MVPP22_TAI_TCV0_FRAC_HIGH 0x1480
+#define MVPP22_TAI_TCV0_FRAC_LOW 0x1484
+#define MVPP22_TAI_TCV1_SEC_HIGH 0x1488
+#define MVPP22_TAI_TCV1_SEC_MED 0x148c
+#define MVPP22_TAI_TCV1_SEC_LOW 0x1490
+#define MVPP22_TAI_TCV1_NANO_HIGH 0x1494
+#define MVPP22_TAI_TCV1_NANO_LOW 0x1498
+#define MVPP22_TAI_TCV1_FRAC_HIGH 0x149c
+#define MVPP22_TAI_TCV1_FRAC_LOW 0x14a0
+#define MVPP22_TAI_TCSR 0x14a4
+#define MVPP22_TAI_TUC_LSB 0x14a8
+#define MVPP22_TAI_GFM_SEC_HIGH 0x14ac
+#define MVPP22_TAI_GFM_SEC_MED 0x14b0
+#define MVPP22_TAI_GFM_SEC_LOW 0x14b4
+#define MVPP22_TAI_GFM_NANO_HIGH 0x14b8
+#define MVPP22_TAI_GFM_NANO_LOW 0x14bc
+#define MVPP22_TAI_GFM_FRAC_HIGH 0x14c0
+#define MVPP22_TAI_GFM_FRAC_LOW 0x14c4
+#define MVPP22_TAI_PCLK_DA_HIGH 0x14c8
+#define MVPP22_TAI_PCLK_DA_LOW 0x14cc
+#define MVPP22_TAI_CTCR 0x14d0
+#define MVPP22_TAI_PCLK_CCC_HIGH 0x14d4
+#define MVPP22_TAI_PCLK_CCC_LOW 0x14d8
+#define MVPP22_TAI_DTC_HIGH 0x14dc
+#define MVPP22_TAI_DTC_LOW 0x14e0
+#define MVPP22_TAI_CCC_HIGH 0x14e4
+#define MVPP22_TAI_CCC_LOW 0x14e8
+#define MVPP22_TAI_ICICE 0x14f4
+#define MVPP22_TAI_ICICC_LOW 0x14f8
+#define MVPP22_TAI_TUC_MSB 0x14fc
+
#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
@@ -527,6 +600,46 @@
#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
+/* PTP registers. PPv2.2 only */
+#define MVPP22_PTP_BASE(port) (0x7800 + (port * 0x1000))
+#define MVPP22_PTP_INT_CAUSE 0x00
+#define MVPP22_PTP_INT_CAUSE_QUEUE1 BIT(6)
+#define MVPP22_PTP_INT_CAUSE_QUEUE0 BIT(5)
+#define MVPP22_PTP_INT_MASK 0x04
+#define MVPP22_PTP_INT_MASK_QUEUE1 BIT(6)
+#define MVPP22_PTP_INT_MASK_QUEUE0 BIT(5)
+#define MVPP22_PTP_GCR 0x08
+#define MVPP22_PTP_GCR_RX_RESET BIT(13)
+#define MVPP22_PTP_GCR_TX_RESET BIT(1)
+#define MVPP22_PTP_GCR_TSU_ENABLE BIT(0)
+#define MVPP22_PTP_TX_Q0_R0 0x0c
+#define MVPP22_PTP_TX_Q0_R1 0x10
+#define MVPP22_PTP_TX_Q0_R2 0x14
+#define MVPP22_PTP_TX_Q1_R0 0x18
+#define MVPP22_PTP_TX_Q1_R1 0x1c
+#define MVPP22_PTP_TX_Q1_R2 0x20
+#define MVPP22_PTP_TPCR 0x24
+#define MVPP22_PTP_V1PCR 0x28
+#define MVPP22_PTP_V2PCR 0x2c
+#define MVPP22_PTP_Y1731PCR 0x30
+#define MVPP22_PTP_NTPTSPCR 0x34
+#define MVPP22_PTP_NTPRXPCR 0x38
+#define MVPP22_PTP_NTPTXPCR 0x3c
+#define MVPP22_PTP_WAMPPCR 0x40
+#define MVPP22_PTP_NAPCR 0x44
+#define MVPP22_PTP_FAPCR 0x48
+#define MVPP22_PTP_CAPCR 0x50
+#define MVPP22_PTP_ATAPCR 0x54
+#define MVPP22_PTP_ACTAPCR 0x58
+#define MVPP22_PTP_CATAPCR 0x5c
+#define MVPP22_PTP_CACTAPCR 0x60
+#define MVPP22_PTP_AITAPCR 0x64
+#define MVPP22_PTP_CAITAPCR 0x68
+#define MVPP22_PTP_CITAPCR 0x6c
+#define MVPP22_PTP_NTP_OFF_HIGH 0x70
+#define MVPP22_PTP_NTP_OFF_LOW 0x74
+#define MVPP22_PTP_TX_PIPE_STATUS_DELAY 0x78
+
/* System controller registers. Accessed through a regmap. */
#define GENCONF_SOFT_RESET1 0x1108
#define GENCONF_SOFT_RESET1_GOP BIT(6)
@@ -692,6 +805,43 @@ enum mvpp2_prs_l3_cast {
MVPP2_PRS_L3_BROAD_CAST
};
+/* PTP descriptor constants. The low bits of the descriptor are stored
+ * separately from the high bits.
+ */
+#define MVPP22_PTP_DESC_MASK_LOW 0xfff
+
+/* PTPAction */
+enum mvpp22_ptp_action {
+ MVPP22_PTP_ACTION_NONE = 0,
+ MVPP22_PTP_ACTION_FORWARD = 1,
+ MVPP22_PTP_ACTION_CAPTURE = 3,
+ /* The following have not been verified */
+ MVPP22_PTP_ACTION_ADDTIME = 4,
+ MVPP22_PTP_ACTION_ADDCORRECTEDTIME = 5,
+ MVPP22_PTP_ACTION_CAPTUREADDTIME = 6,
+ MVPP22_PTP_ACTION_CAPTUREADDCORRECTEDTIME = 7,
+ MVPP22_PTP_ACTION_ADDINGRESSTIME = 8,
+ MVPP22_PTP_ACTION_CAPTUREADDINGRESSTIME = 9,
+ MVPP22_PTP_ACTION_CAPTUREINGRESSTIME = 10,
+};
+
+/* PTPPacketFormat */
+enum mvpp22_ptp_packet_format {
+ MVPP22_PTP_PKT_FMT_PTPV2 = 0,
+ MVPP22_PTP_PKT_FMT_PTPV1 = 1,
+ MVPP22_PTP_PKT_FMT_Y1731 = 2,
+ MVPP22_PTP_PKT_FMT_NTPTS = 3,
+ MVPP22_PTP_PKT_FMT_NTPRX = 4,
+ MVPP22_PTP_PKT_FMT_NTPTX = 5,
+ MVPP22_PTP_PKT_FMT_TWAMP = 6,
+};
+
+#define MVPP22_PTP_ACTION(x) (((x) & 15) << 0)
+#define MVPP22_PTP_PACKETFORMAT(x) (((x) & 7) << 4)
+#define MVPP22_PTP_MACTIMESTAMPINGEN BIT(11)
+#define MVPP22_PTP_TIMESTAMPENTRYID(x) (((x) & 31) << 12)
+#define MVPP22_PTP_TIMESTAMPQUEUESELECT BIT(18)
+
/* BM constants */
#define MVPP2_BM_JUMBO_BUF_NUM 512
#define MVPP2_BM_LONG_BUF_NUM 1024
@@ -759,6 +909,8 @@ enum mvpp2_prs_l3_cast {
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+struct mvpp2_tai;
+
/* Definitions */
struct mvpp2_dbgfs_entries;
@@ -794,6 +946,7 @@ struct mvpp2 {
/* List of pointers to port structures */
int port_count;
struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
+ struct mvpp2_tai *tai;
/* Number of Tx threads used */
unsigned int nthreads;
@@ -907,6 +1060,11 @@ struct mvpp2_ethtool_fs {
struct ethtool_rxnfc rxnfc;
};
+struct mvpp2_hwtstamp_queue {
+ struct sk_buff *skb[32];
+ u8 next;
+};
+
struct mvpp2_port {
u8 id;
@@ -915,7 +1073,7 @@ struct mvpp2_port {
*/
int gop_id;
- int link_irq;
+ int port_irq;
struct mvpp2 *priv;
@@ -967,6 +1125,7 @@ struct mvpp2_port {
phy_interface_t phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
struct phy *comphy;
struct mvpp2_bm_pool *pool_long;
@@ -989,6 +1148,11 @@ struct mvpp2_port {
* them from 0
*/
int rss_ctx[MVPP22_N_RSS_TABLES];
+
+ bool hwtstamp;
+ bool rx_hwtstamp;
+ enum hwtstamp_tx_types tx_hwtstamp_type;
+ struct mvpp2_hwtstamp_queue tx_hwtstamp_queue[2];
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -1057,7 +1221,8 @@ struct mvpp22_tx_desc {
u8 packet_offset;
u8 phys_txq;
__le16 data_size;
- __le64 reserved1;
+ __le32 ptp_descriptor;
+ __le32 reserved2;
__le64 buf_dma_addr_ptp;
__le64 buf_cookie_misc;
};
@@ -1068,7 +1233,7 @@ struct mvpp22_rx_desc {
__le16 reserved1;
__le16 data_size;
__le32 reserved2;
- __le32 reserved3;
+ __le32 timestamp;
__le64 buf_dma_addr_key_hash;
__le64 buf_cookie_misc;
};
@@ -1248,4 +1413,36 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+#ifdef CONFIG_MVPP2_PTP
+int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv);
+void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp);
+void mvpp22_tai_start(struct mvpp2_tai *tai);
+void mvpp22_tai_stop(struct mvpp2_tai *tai);
+int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai);
+#else
+static inline int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv)
+{
+ return 0;
+}
+static inline void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp)
+{
+}
+static inline void mvpp22_tai_start(struct mvpp2_tai *tai)
+{
+}
+static inline void mvpp22_tai_stop(struct mvpp2_tai *tai)
+{
+}
+static inline int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai)
+{
+ return -1;
+}
+#endif
+
+static inline bool mvpp22_rx_hwtstamping(struct mvpp2_port *port)
+{
+ return IS_ENABLED(CONFIG_MVPP2_PTP) && port->rx_hwtstamp;
+}
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6e140d1b8967..f6616c8933ca 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -28,6 +28,7 @@
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/phy/phy.h>
+#include <linux/ptp_classify.h>
#include <linux/clk.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
@@ -57,13 +58,7 @@ static struct {
/* The prototype is added here to be used in start_dev when using ACPI. This
* will be removed once phylink is used for all modes (dt+ACPI).
*/
-static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state);
-static void mvpp2_mac_link_up(struct phylink_config *config,
- struct phy_device *phy,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex,
- bool tx_pause, bool rx_pause);
+static void mvpp2_acpi_start(struct mvpp2_port *port);
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
@@ -1385,6 +1380,10 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
{
u32 val;
+ mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
+ MVPP22_GMAC_INT_SUM_MASK_PTP,
+ MVPP22_GMAC_INT_SUM_MASK_PTP);
+
if (port->phylink ||
phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
@@ -1398,6 +1397,10 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
val = readl(port->base + MVPP22_XLG_INT_MASK);
val |= MVPP22_XLG_INT_MASK_LINK;
writel(val, port->base + MVPP22_XLG_INT_MASK);
+
+ mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
+ MVPP22_XLG_EXT_INT_MASK_PTP,
+ MVPP22_XLG_EXT_INT_MASK_PTP);
}
mvpp22_gop_unmask_irq(port);
@@ -1485,8 +1488,8 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port,
else
val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
- if (phy_interface_mode_is_8023z(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ if (phy_interface_mode_is_8023z(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII)
val |= MVPP2_GMAC_PCS_LB_EN_MASK;
else
val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
@@ -2980,44 +2983,67 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Per-port interrupt for link status changes */
-static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
+static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
{
- struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
- struct net_device *dev = port->dev;
- bool event = false, link = false;
- u32 val;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct mvpp2_hwtstamp_queue *queue;
+ struct sk_buff *skb;
+ void __iomem *ptp_q;
+ unsigned int id;
+ u32 r0, r1, r2;
- mvpp22_gop_mask_irq(port);
+ ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+ if (nq)
+ ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
- if (mvpp2_port_supports_xlg(port) &&
- mvpp2_is_xlg(port->phy_interface)) {
- val = readl(port->base + MVPP22_XLG_INT_STAT);
- if (val & MVPP22_XLG_INT_STAT_LINK) {
- event = true;
- val = readl(port->base + MVPP22_XLG_STATUS);
- if (val & MVPP22_XLG_STATUS_LINK_UP)
- link = true;
- }
- } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- phy_interface_mode_is_8023z(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val = readl(port->base + MVPP22_GMAC_INT_STAT);
- if (val & MVPP22_GMAC_INT_STAT_LINK) {
- event = true;
- val = readl(port->base + MVPP2_GMAC_STATUS0);
- if (val & MVPP2_GMAC_STATUS0_LINK_UP)
- link = true;
+ queue = &port->tx_hwtstamp_queue[nq];
+
+ while (1) {
+ r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
+ if (!r0)
+ break;
+
+ r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
+ r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
+
+ id = (r0 >> 1) & 31;
+
+ skb = queue->skb[id];
+ queue->skb[id] = NULL;
+ if (skb) {
+ u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
+
+ mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
}
+}
+
+static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
+{
+ void __iomem *ptp;
+ u32 val;
+
+ ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+ val = readl(ptp + MVPP22_PTP_INT_CAUSE);
+ if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
+ mvpp2_isr_handle_ptp_queue(port, 0);
+ if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
+ mvpp2_isr_handle_ptp_queue(port, 1);
+}
+
+static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
+{
+ struct net_device *dev = port->dev;
if (port->phylink) {
phylink_mac_change(port->phylink, link);
- goto handled;
+ return;
}
- if (!netif_running(dev) || !event)
- goto handled;
+ if (!netif_running(dev))
+ return;
if (link) {
mvpp2_interrupts_enable(port);
@@ -3034,8 +3060,65 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
mvpp2_interrupts_disable(port);
}
+}
+
+static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
+{
+ bool link;
+ u32 val;
+
+ val = readl(port->base + MVPP22_XLG_INT_STAT);
+ if (val & MVPP22_XLG_INT_STAT_LINK) {
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ link = (val & MVPP22_XLG_STATUS_LINK_UP);
+ mvpp2_isr_handle_link(port, link);
+ }
+}
+
+static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
+{
+ bool link;
+ u32 val;
+
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ val = readl(port->base + MVPP22_GMAC_INT_STAT);
+ if (val & MVPP22_GMAC_INT_STAT_LINK) {
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+ link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
+ mvpp2_isr_handle_link(port, link);
+ }
+ }
+}
+
+/* Per-port interrupt for link status changes */
+static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
+{
+ struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
+ u32 val;
+
+ mvpp22_gop_mask_irq(port);
+
+ if (mvpp2_port_supports_xlg(port) &&
+ mvpp2_is_xlg(port->phy_interface)) {
+ /* Check the external status register */
+ val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
+ if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
+ mvpp2_isr_handle_xlg(port);
+ if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
+ mvpp2_isr_handle_ptp(port);
+ } else {
+ /* If it's not the XLG, we must be using the GMAC.
+ * Check the summary status.
+ */
+ val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
+ if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
+ mvpp2_isr_handle_gmac_internal(port);
+ if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
+ mvpp2_isr_handle_ptp(port);
+ }
-handled:
mvpp22_gop_unmask_irq(port);
return IRQ_HANDLED;
}
@@ -3427,7 +3510,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
unsigned int frag_size;
dma_addr_t dma_addr;
phys_addr_t phys_addr;
- u32 rx_status;
+ u32 rx_status, timestamp;
int pool, rx_bytes, err, ret;
void *data;
@@ -3505,6 +3588,15 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
goto err_drop_frame;
}
+ /* If we have RX hardware timestamping enabled, grab the
+ * timestamp from the queue and convert.
+ */
+ if (mvpp22_rx_hwtstamping(port)) {
+ timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
+ mvpp22_tai_tstamp(port->priv->tai, timestamp,
+ skb_hwtstamps(skb));
+ }
+
err = mvpp2_rx_refill(port, bm_pool, pp, pool);
if (err) {
netdev_err(port->dev, "failed to refill BM pools\n");
@@ -3579,6 +3671,94 @@ tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
mvpp2_txq_desc_put(txq);
}
+static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *desc)
+{
+ /* We only need to clear the low bits */
+ if (port->priv->hw_version != MVPP21)
+ desc->pp22.ptp_descriptor &=
+ cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
+}
+
+static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ struct sk_buff *skb)
+{
+ struct mvpp2_hwtstamp_queue *queue;
+ unsigned int mtype, type, i;
+ struct ptp_header *hdr;
+ u64 ptpdesc;
+
+ if (port->priv->hw_version == MVPP21 ||
+ port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
+ return false;
+
+ type = ptp_classify_raw(skb);
+ if (!type)
+ return false;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return false;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
+ MVPP22_PTP_ACTION_CAPTURE;
+ queue = &port->tx_hwtstamp_queue[0];
+
+ switch (type & PTP_CLASS_VMASK) {
+ case PTP_CLASS_V1:
+ ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
+ break;
+
+ case PTP_CLASS_V2:
+ ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
+ mtype = hdr->tsmt & 15;
+ /* Direct PTP Sync messages to queue 1 */
+ if (mtype == 0) {
+ ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
+ queue = &port->tx_hwtstamp_queue[1];
+ }
+ break;
+ }
+
+ /* Take a reference on the skb and insert into our queue */
+ i = queue->next;
+ queue->next = (i + 1) & 31;
+ if (queue->skb[i])
+ dev_kfree_skb_any(queue->skb[i]);
+ queue->skb[i] = skb_get(skb);
+
+ ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
+
+ /*
+ * 3:0 - PTPAction
+ * 6:4 - PTPPacketFormat
+ * 7 - PTP_CF_WraparoundCheckEn
+ * 9:8 - IngressTimestampSeconds[1:0]
+ * 10 - Reserved
+ * 11 - MACTimestampingEn
+ * 17:12 - PTP_TimestampQueueEntryID[5:0]
+ * 18 - PTPTimestampQueueSelect
+ * 19 - UDPChecksumUpdateEn
+ * 27:20 - TimestampOffset
+ * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
+ * NTPTs, Y.1731 - L3 to timestamp entry
+ * 35:28 - UDP Checksum Offset
+ *
+ * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
+ */
+ tx_desc->pp22.ptp_descriptor &=
+ cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
+ tx_desc->pp22.ptp_descriptor |=
+ cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
+ tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
+ tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
+
+ return true;
+}
+
/* Handle tx fragmentation processing */
static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
struct mvpp2_tx_queue *aggr_txq,
@@ -3595,6 +3775,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
void *addr = skb_frag_address(frag);
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
@@ -3644,6 +3825,7 @@ static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
dma_addr_t addr;
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
@@ -3668,6 +3850,7 @@ static inline int mvpp2_tso_put_data(struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
dma_addr_t buf_dma_addr;
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, sz);
@@ -3784,6 +3967,9 @@ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* Get a descriptor for the first part of the packet */
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
+ !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
@@ -4007,17 +4193,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
if (port->phylink) {
phylink_start(port->phylink);
} else {
- /* Phylink isn't used as of now for ACPI, so the MAC has to be
- * configured manually when the interface is started. This will
- * be removed as soon as the phylink ACPI support lands in.
- */
- struct phylink_link_state state = {
- .interface = port->phy_interface,
- };
- mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
- mvpp2_mac_link_up(&port->phylink_config, NULL,
- MLO_AN_INBAND, port->phy_interface,
- SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
+ mvpp2_acpi_start(port);
}
netif_tx_start_all_queues(port->dev);
@@ -4227,12 +4403,13 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}
- if (priv->hw_version == MVPP22 && port->link_irq) {
- err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
+ if (priv->hw_version == MVPP22 && port->port_irq) {
+ err = request_irq(port->port_irq, mvpp2_port_isr, 0,
dev->name, port);
if (err) {
- netdev_err(port->dev, "cannot request link IRQ %d\n",
- port->link_irq);
+ netdev_err(port->dev,
+ "cannot request port link/ptp IRQ %d\n",
+ port->port_irq);
goto err_free_irq;
}
@@ -4243,7 +4420,7 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
} else {
- port->link_irq = 0;
+ port->port_irq = 0;
}
if (!valid) {
@@ -4287,8 +4464,8 @@ static int mvpp2_stop(struct net_device *dev)
if (port->phylink)
phylink_disconnect_phy(port->phylink);
- if (port->link_irq)
- free_irq(port->link_irq, port);
+ if (port->port_irq)
+ free_irq(port->port_irq, port);
mvpp2_irqs_deinit(port);
if (!port->has_tx_irqs) {
@@ -4548,10 +4725,124 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped = dev->stats.tx_dropped;
}
+static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ void __iomem *ptp;
+ u32 gcr, int_mask;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags)
+ return -EINVAL;
+
+ if (config.tx_type != HWTSTAMP_TX_OFF &&
+ config.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+
+ int_mask = gcr = 0;
+ if (config.tx_type != HWTSTAMP_TX_OFF) {
+ gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
+ int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
+ MVPP22_PTP_INT_MASK_QUEUE0;
+ }
+
+ /* It seems we must also release the TX reset when enabling the TSU */
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET;
+
+ if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
+ mvpp22_tai_start(port->priv->tai);
+
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ mvpp2_modify(ptp + MVPP22_PTP_GCR,
+ MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET |
+ MVPP22_PTP_GCR_TSU_ENABLE, gcr);
+ port->rx_hwtstamp = true;
+ } else {
+ port->rx_hwtstamp = false;
+ mvpp2_modify(ptp + MVPP22_PTP_GCR,
+ MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET |
+ MVPP22_PTP_GCR_TSU_ENABLE, gcr);
+ }
+
+ mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
+ MVPP22_PTP_INT_MASK_QUEUE1 |
+ MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
+
+ if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
+ mvpp22_tai_stop(port->priv->tai);
+
+ port->tx_hwtstamp_type = config.tx_type;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+
+ memset(&config, 0, sizeof(config));
+
+ config.tx_type = port->tx_hwtstamp_type;
+ config.rx_filter = port->rx_hwtstamp ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
+
+ info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvpp2_port *port = netdev_priv(dev);
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ if (port->hwtstamp)
+ return mvpp2_set_ts_config(port, ifr);
+ break;
+
+ case SIOCGHWTSTAMP:
+ if (port->hwtstamp)
+ return mvpp2_get_ts_config(port, ifr);
+ break;
+ }
+
if (!port->phylink)
return -ENOTSUPP;
@@ -5021,6 +5312,7 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_ts_info = mvpp2_ethtool_get_ts_info,
.set_coalesce = mvpp2_ethtool_set_coalesce,
.get_coalesce = mvpp2_ethtool_get_coalesce,
.get_drvinfo = mvpp2_ethtool_get_drvinfo,
@@ -5392,6 +5684,155 @@ static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
return container_of(config, struct mvpp2_port, phylink_config);
}
+static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mvpp2_port, phylink_pcs);
+}
+
+static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val;
+
+ state->speed = SPEED_10000;
+ state->duplex = 1;
+ state->an_complete = 1;
+
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
+
+ state->pause = 0;
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_TX;
+ if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_RX;
+}
+
+static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
+ .pcs_get_state = mvpp2_xlg_pcs_get_state,
+ .pcs_config = mvpp2_xlg_pcs_config,
+};
+
+static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+
+ state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
+ state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
+ state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
+
+ switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ state->speed = SPEED_1000;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ state->speed = SPEED_2500;
+ break;
+ default:
+ if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
+ state->speed = SPEED_1000;
+ else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+ }
+
+ state->pause = 0;
+ if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
+ state->pause |= MLO_PAUSE_RX;
+ if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 mask, val, an, old_an, changed;
+
+ mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
+ MVPP2_GMAC_IN_BAND_AUTONEG |
+ MVPP2_GMAC_AN_SPEED_EN |
+ MVPP2_GMAC_FLOW_CTRL_AUTONEG |
+ MVPP2_GMAC_AN_DUPLEX_EN;
+
+ if (phylink_autoneg_inband(mode)) {
+ mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ val = MVPP2_GMAC_IN_BAND_AUTONEG;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the speed and duplex from PHY */
+ val |= MVPP2_GMAC_AN_SPEED_EN |
+ MVPP2_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z mode has fixed speed and duplex */
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+ /* The FLOW_CTRL_AUTONEG bit selects either the hardware
+ * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
+ * manually controls the GMAC pause modes.
+ */
+ if (permit_pause_to_mac)
+ val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
+
+ /* Configure advertisement bits */
+ mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
+ if (phylink_test(advertising, Pause))
+ val |= MVPP2_GMAC_FC_ADV_EN;
+ if (phylink_test(advertising, Asym_Pause))
+ val |= MVPP2_GMAC_FC_ADV_ASM_EN;
+ }
+ } else {
+ val = 0;
+ }
+
+ old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ an = (an & ~mask) | val;
+ changed = an ^ old_an;
+ if (changed)
+ writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ /* We are only interested in the advertisement bits changing */
+ return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
+}
+
+static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
+ .pcs_get_state = mvpp2_gmac_pcs_get_state,
+ .pcs_config = mvpp2_gmac_pcs_config,
+ .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
+};
+
static void mvpp2_phylink_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
@@ -5480,89 +5921,6 @@ empty_set:
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
-{
- u32 val;
-
- state->speed = SPEED_10000;
- state->duplex = 1;
- state->an_complete = 1;
-
- val = readl(port->base + MVPP22_XLG_STATUS);
- state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
-
- state->pause = 0;
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
- state->pause |= MLO_PAUSE_TX;
- if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
- state->pause |= MLO_PAUSE_RX;
-}
-
-static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
-{
- u32 val;
-
- val = readl(port->base + MVPP2_GMAC_STATUS0);
-
- state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
- state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
- state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
-
- switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_1000BASEX:
- state->speed = SPEED_1000;
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- state->speed = SPEED_2500;
- break;
- default:
- if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
- state->speed = SPEED_1000;
- else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
- state->speed = SPEED_100;
- else
- state->speed = SPEED_10;
- }
-
- state->pause = 0;
- if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
- state->pause |= MLO_PAUSE_RX;
- if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
- state->pause |= MLO_PAUSE_TX;
-}
-
-static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
-
- if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
- u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
- mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
-
- if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
- mvpp22_xlg_pcs_get_state(port, state);
- return;
- }
- }
-
- mvpp2_gmac_pcs_get_state(port, state);
-}
-
-static void mvpp2_mac_an_restart(struct phylink_config *config)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
- port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
- port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -5586,23 +5944,16 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
- u32 old_an, an;
u32 old_ctrl0, ctrl0;
u32 old_ctrl2, ctrl2;
u32 old_ctrl4, ctrl4;
- old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
- an &= ~(MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
- MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
- MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
- ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
- MVPP2_GMAC_PCS_ENABLE_MASK);
+ ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
/* Configure port type */
if (phy_interface_mode_is_8023z(state->interface)) {
@@ -5624,12 +5975,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
}
- /* Configure advertisement bits */
- if (phylink_test(state->advertising, Pause))
- an |= MVPP2_GMAC_FC_ADV_EN;
- if (phylink_test(state->advertising, Asym_Pause))
- an |= MVPP2_GMAC_FC_ADV_ASM_EN;
-
/* Configure negotiation style */
if (!phylink_autoneg_inband(mode)) {
/* Phy or fixed speed - no in-band AN, nothing to do, leave the
@@ -5638,14 +5983,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII in-band mode receives the speed and duplex from
* the PHY. Flow control information is not received. */
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
- MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX);
- an |= MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_AN_SPEED_EN |
- MVPP2_GMAC_AN_DUPLEX_EN;
} else if (phy_interface_mode_is_8023z(state->interface)) {
/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
* they negotiate duplex: they are always operating with a fixed
@@ -5653,42 +5990,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
* speed and full duplex here.
*/
ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
- MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX);
- an |= MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-
- if (state->pause & MLO_PAUSE_AN && state->an_enabled)
- an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
- }
-
-/* Some fields of the auto-negotiation register require the port to be down when
- * their value is updated.
- */
-#define MVPP2_GMAC_AN_PORT_DOWN_MASK \
- (MVPP2_GMAC_IN_BAND_AUTONEG | \
- MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \
- MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \
- MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \
- MVPP2_GMAC_AN_DUPLEX_EN)
-
- if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK ||
- (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK ||
- (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) {
- /* Force link down */
- old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
- old_an |= MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- /* Set the GMAC in a reset state - do this in a way that
- * ensures we clear it below.
- */
- old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
- writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
}
if (old_ctrl0 != ctrl0)
@@ -5697,41 +5998,85 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
if (old_ctrl4 != ctrl4)
writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
- if (old_an != an)
- writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
- while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
- MVPP2_GMAC_PORT_RESET_MASK)
- continue;
- }
}
-static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
+static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- bool change_interface = port->phy_interface != state->interface;
/* Check for invalid configuration */
- if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
+ if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
- return;
+ return -EINVAL;
+ }
+
+ if (port->phy_interface != interface ||
+ phylink_autoneg_inband(mode)) {
+ /* Force the link down when changing the interface or if in
+ * in-band mode to ensure we do not change the configuration
+ * while the hardware is indicating link is up. We force both
+ * XLG and GMAC down to ensure that they're both in a known
+ * state.
+ */
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_FORCE_LINK_DOWN,
+ MVPP2_GMAC_FORCE_LINK_DOWN);
+
+ if (mvpp2_port_supports_xlg(port))
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
}
/* Make sure the port is disabled when reconfiguring the mode */
mvpp2_port_disable(port);
- if (port->priv->hw_version == MVPP22 && change_interface) {
- mvpp22_gop_mask_irq(port);
+ if (port->phy_interface != interface) {
+ /* Place GMAC into reset */
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
+ MVPP2_GMAC_PORT_RESET_MASK,
+ MVPP2_GMAC_PORT_RESET_MASK);
- port->phy_interface = state->interface;
+ if (port->priv->hw_version == MVPP22) {
+ mvpp22_gop_mask_irq(port);
- /* Reconfigure the serdes lanes */
- phy_power_off(port->comphy);
- mvpp22_mode_reconfigure(port);
+ phy_power_off(port->comphy);
+ }
}
+ /* Select the appropriate PCS operations depending on the
+ * configured interface mode. We will only switch to a mode
+ * that the validate() checks have already passed.
+ */
+ if (mvpp2_is_xlg(interface))
+ port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
+ else
+ port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
+
+ return 0;
+}
+
+static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ int ret;
+
+ ret = mvpp2__mac_prepare(config, mode, interface);
+ if (ret == 0)
+ phylink_set_pcs(port->phylink, &port->phylink_pcs);
+
+ return ret;
+}
+
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
/* mac (re)configuration */
if (mvpp2_is_xlg(state->interface))
mvpp2_xlg_config(port, mode, state);
@@ -5742,11 +6087,51 @@ static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
mvpp2_port_loopback_set(port, state);
+}
+
+static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ if (port->priv->hw_version == MVPP22 &&
+ port->phy_interface != interface) {
+ port->phy_interface = interface;
- if (port->priv->hw_version == MVPP22 && change_interface)
+ /* Reconfigure the serdes lanes */
+ mvpp22_mode_reconfigure(port);
+
+ /* Unmask interrupts */
mvpp22_gop_unmask_irq(port);
+ }
+
+ if (!mvpp2_is_xlg(interface)) {
+ /* Release GMAC reset and wait */
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
+ MVPP2_GMAC_PORT_RESET_MASK, 0);
+
+ while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+ MVPP2_GMAC_PORT_RESET_MASK)
+ continue;
+ }
mvpp2_port_enable(port);
+
+ /* Allow the link to come up if in in-band mode, otherwise the
+ * link is forced via mac_link_down()/mac_link_up()
+ */
+ if (phylink_autoneg_inband(mode)) {
+ if (mvpp2_is_xlg(interface))
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
+ else
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_FORCE_LINK_DOWN, 0);
+ }
+
+ return 0;
}
static void mvpp2_mac_link_up(struct phylink_config *config,
@@ -5843,13 +6228,36 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.validate = mvpp2_phylink_validate,
- .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state,
- .mac_an_restart = mvpp2_mac_an_restart,
+ .mac_prepare = mvpp2_mac_prepare,
.mac_config = mvpp2_mac_config,
+ .mac_finish = mvpp2_mac_finish,
.mac_link_up = mvpp2_mac_link_up,
.mac_link_down = mvpp2_mac_link_down,
};
+/* Work-around for ACPI */
+static void mvpp2_acpi_start(struct mvpp2_port *port)
+{
+ /* Phylink isn't used as of now for ACPI, so the MAC has to be
+ * configured manually when the interface is started. This will
+ * be removed as soon as the phylink ACPI support lands in.
+ */
+ struct phylink_link_state state = {
+ .interface = port->phy_interface,
+ };
+ mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
+ mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
+ port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
+ port->phy_interface,
+ state.advertising, false);
+ mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
+ mvpp2_mac_link_up(&port->phylink_config, NULL,
+ MLO_AN_INBAND, port->phy_interface,
+ SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
+}
+
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct fwnode_handle *port_fwnode,
@@ -5937,16 +6345,16 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_netdev;
if (port_node)
- port->link_irq = of_irq_get_byname(port_node, "link");
+ port->port_irq = of_irq_get_byname(port_node, "link");
else
- port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
- if (port->link_irq == -EPROBE_DEFER) {
+ port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
+ if (port->port_irq == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
goto err_deinit_qvecs;
}
- if (port->link_irq <= 0)
+ if (port->port_irq <= 0)
/* the link irq is optional */
- port->link_irq = 0;
+ port->port_irq = 0;
if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
port->flags |= MVPP2_F_LOOPBACK;
@@ -5983,6 +6391,12 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->stats_base = port->priv->iface_base +
MVPP22_MIB_COUNTERS_OFFSET +
port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
+
+ /* We may want a property to describe whether we should use
+ * MAC hardware timestamping.
+ */
+ if (priv->tai)
+ port->hwtstamp = true;
}
/* Alloc per-cpu and ethtool stats */
@@ -6110,8 +6524,8 @@ err_free_txq_pcpu:
err_free_stats:
free_percpu(port->stats);
err_free_irq:
- if (port->link_irq)
- irq_dispose_mapping(port->link_irq);
+ if (port->port_irq)
+ irq_dispose_mapping(port->port_irq);
err_deinit_qvecs:
mvpp2_queue_vectors_deinit(port);
err_free_netdev:
@@ -6132,8 +6546,8 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
for (i = 0; i < port->ntxqs; i++)
free_percpu(port->txqs[i]->pcpu);
mvpp2_queue_vectors_deinit(port);
- if (port->link_irq)
- irq_dispose_mapping(port->link_irq);
+ if (port->port_irq)
+ irq_dispose_mapping(port->port_irq);
free_netdev(port->dev);
}
@@ -6545,6 +6959,10 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_axi_clk;
}
+ err = mvpp22_tai_probe(&pdev->dev, priv);
+ if (err < 0)
+ goto err_axi_clk;
+
/* Initialize ports */
fwnode_for_each_available_child_node(fwnode, port_fwnode) {
err = mvpp2_port_probe(pdev, port_fwnode, priv);
@@ -6663,11 +7081,13 @@ static const struct of_device_id mvpp2_match[] = {
};
MODULE_DEVICE_TABLE(of, mvpp2_match);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id mvpp2_acpi_match[] = {
{ "MRVL0110", MVPP22 },
{ },
};
MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
+#endif
static struct platform_driver mvpp2_driver = {
.probe = mvpp2_probe,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c
new file mode 100644
index 000000000000..95862aff49f1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell PP2.2 TAI support
+ *
+ * Note:
+ * Do NOT use the event capture support.
+ * Do Not even set the MPP muxes to allow PTP_EVENT_REQ to be used.
+ * It will disrupt the operation of this driver, and there is nothing
+ * that this driver can do to prevent that. Even using PTP_EVENT_REQ
+ * as an output will be seen as a trigger input, which can't be masked.
+ * When ever a trigger input is seen, the action in the TCFCR0_TCF
+ * field will be performed - whether it is a set, increment, decrement
+ * read, or frequency update.
+ *
+ * Other notes (useful, not specified in the documentation):
+ * - PTP_PULSE_OUT (PTP_EVENT_REQ MPP)
+ * It looks like the hardware can't generate a pulse at nsec=0. (The
+ * output doesn't trigger if the nsec field is zero.)
+ * Note: when configured as an output via the register at 0xfX441120,
+ * the input is still very much alive, and will trigger the current TCF
+ * function.
+ * - PTP_CLK_OUT (PTP_TRIG_GEN MPP)
+ * This generates a "PPS" signal determined by the CCC registers. It
+ * seems this is not aligned to the TOD counter in any way (it may be
+ * initially, but if you specify a non-round second interval, it won't,
+ * and you can't easily get it back.)
+ * - PTP_PCLK_OUT
+ * This generates a 50% duty cycle clock based on the TOD counter, and
+ * seems it can be set to any period of 1ns resolution. It is probably
+ * limited by the TOD step size. Its period is defined by the PCLK_CCC
+ * registers. Again, its alignment to the second is questionable.
+ *
+ * Consequently, we support none of these.
+ */
+#include <linux/io.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/slab.h>
+
+#include "mvpp2.h"
+
+#define CR0_SW_NRESET BIT(0)
+
+#define TCFCR0_PHASE_UPDATE_ENABLE BIT(8)
+#define TCFCR0_TCF_MASK (7 << 2)
+#define TCFCR0_TCF_UPDATE (0 << 2)
+#define TCFCR0_TCF_FREQUPDATE (1 << 2)
+#define TCFCR0_TCF_INCREMENT (2 << 2)
+#define TCFCR0_TCF_DECREMENT (3 << 2)
+#define TCFCR0_TCF_CAPTURE (4 << 2)
+#define TCFCR0_TCF_NOP (7 << 2)
+#define TCFCR0_TCF_TRIGGER BIT(0)
+
+#define TCSR_CAPTURE_1_VALID BIT(1)
+#define TCSR_CAPTURE_0_VALID BIT(0)
+
+struct mvpp2_tai {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ void __iomem *base;
+ spinlock_t lock;
+ u64 period; // nanosecond period in 32.32 fixed point
+ /* This timestamp is updated every two seconds */
+ struct timespec64 stamp;
+};
+
+static void mvpp2_tai_modify(void __iomem *reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = readl_relaxed(reg) & ~mask;
+ val |= set & mask;
+ writel(val, reg);
+}
+
+static void mvpp2_tai_write(u32 val, void __iomem *reg)
+{
+ writel_relaxed(val & 0xffff, reg);
+}
+
+static u32 mvpp2_tai_read(void __iomem *reg)
+{
+ return readl_relaxed(reg) & 0xffff;
+}
+
+static struct mvpp2_tai *ptp_to_tai(struct ptp_clock_info *ptp)
+{
+ return container_of(ptp, struct mvpp2_tai, caps);
+}
+
+static void mvpp22_tai_read_ts(struct timespec64 *ts, void __iomem *base)
+{
+ ts->tv_sec = (u64)mvpp2_tai_read(base + 0) << 32 |
+ mvpp2_tai_read(base + 4) << 16 |
+ mvpp2_tai_read(base + 8);
+
+ ts->tv_nsec = mvpp2_tai_read(base + 12) << 16 |
+ mvpp2_tai_read(base + 16);
+
+ /* Read and discard fractional part */
+ readl_relaxed(base + 20);
+ readl_relaxed(base + 24);
+}
+
+static void mvpp2_tai_write_tlv(const struct timespec64 *ts, u32 frac,
+ void __iomem *base)
+{
+ mvpp2_tai_write(ts->tv_sec >> 32, base + MVPP22_TAI_TLV_SEC_HIGH);
+ mvpp2_tai_write(ts->tv_sec >> 16, base + MVPP22_TAI_TLV_SEC_MED);
+ mvpp2_tai_write(ts->tv_sec, base + MVPP22_TAI_TLV_SEC_LOW);
+ mvpp2_tai_write(ts->tv_nsec >> 16, base + MVPP22_TAI_TLV_NANO_HIGH);
+ mvpp2_tai_write(ts->tv_nsec, base + MVPP22_TAI_TLV_NANO_LOW);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW);
+}
+
+static void mvpp2_tai_op(u32 op, void __iomem *base)
+{
+ /* Trigger the operation. Note that an external unmaskable
+ * event on PTP_EVENT_REQ will also trigger this action.
+ */
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ op | TCFCR0_TCF_TRIGGER);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+}
+
+/* The adjustment has a range of +0.5ns to -0.5ns in 2^32 steps, so has units
+ * of 2^-32 ns.
+ *
+ * units(s) = 1 / (2^32 * 10^9)
+ * fractional = abs_scaled_ppm / (2^16 * 10^6)
+ *
+ * What we want to achieve:
+ * freq_adjusted = freq_nominal * (1 + fractional)
+ * freq_delta = freq_adjusted - freq_nominal => positive = faster
+ * freq_delta = freq_nominal * (1 + fractional) - freq_nominal
+ * So: freq_delta = freq_nominal * fractional
+ *
+ * However, we are dealing with periods, so:
+ * period_adjusted = period_nominal / (1 + fractional)
+ * period_delta = period_nominal - period_adjusted => positive = faster
+ * period_delta = period_nominal * fractional / (1 + fractional)
+ *
+ * Hence:
+ * period_delta = period_nominal * abs_scaled_ppm /
+ * (2^16 * 10^6 + abs_scaled_ppm)
+ *
+ * To avoid overflow, we reduce both sides of the divide operation by a factor
+ * of 16.
+ */
+static u64 mvpp22_calc_frac_ppm(struct mvpp2_tai *tai, long abs_scaled_ppm)
+{
+ u64 val = tai->period * abs_scaled_ppm >> 4;
+
+ return div_u64(val, (1000000 << 12) + (abs_scaled_ppm >> 4));
+}
+
+static s32 mvpp22_calc_max_adj(struct mvpp2_tai *tai)
+{
+ return 1000000;
+}
+
+static int mvpp22_tai_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+ bool neg_adj;
+ s32 frac;
+ u64 val;
+
+ neg_adj = scaled_ppm < 0;
+ if (neg_adj)
+ scaled_ppm = -scaled_ppm;
+
+ val = mvpp22_calc_frac_ppm(tai, scaled_ppm);
+
+ /* Convert to a signed 32-bit adjustment */
+ if (neg_adj) {
+ /* -S32_MIN warns, -val < S32_MIN fails, so go for the easy
+ * solution.
+ */
+ if (val > 0x80000000)
+ return -ERANGE;
+
+ frac = -val;
+ } else {
+ if (val > S32_MAX)
+ return -ERANGE;
+
+ frac = val;
+ }
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW);
+ mvpp2_tai_op(TCFCR0_TCF_FREQUPDATE, base);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static int mvpp22_tai_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ struct timespec64 ts;
+ unsigned long flags;
+ void __iomem *base;
+ u32 tcf;
+
+ /* We can't deal with S64_MIN */
+ if (delta == S64_MIN)
+ return -ERANGE;
+
+ if (delta < 0) {
+ delta = -delta;
+ tcf = TCFCR0_TCF_DECREMENT;
+ } else {
+ tcf = TCFCR0_TCF_INCREMENT;
+ }
+
+ ts = ns_to_timespec64(delta);
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write_tlv(&ts, 0, base);
+ mvpp2_tai_op(tcf, base);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static int mvpp22_tai_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+ u32 tcsr;
+ int ret;
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ /* XXX: the only way to read the PTP time is for the CPU to trigger
+ * an event. However, there is no way to distinguish between the CPU
+ * triggered event, and an external event on PTP_EVENT_REQ. So this
+ * is incompatible with external use of PTP_EVENT_REQ.
+ */
+ ptp_read_system_prets(sts);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ TCFCR0_TCF_CAPTURE | TCFCR0_TCF_TRIGGER);
+ ptp_read_system_postts(sts);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+
+ tcsr = readl(base + MVPP22_TAI_TCSR);
+ if (tcsr & TCSR_CAPTURE_1_VALID) {
+ mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV1_SEC_HIGH);
+ ret = 0;
+ } else if (tcsr & TCSR_CAPTURE_0_VALID) {
+ mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV0_SEC_HIGH);
+ ret = 0;
+ } else {
+ /* We don't seem to have a reading... */
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return ret;
+}
+
+static int mvpp22_tai_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write_tlv(ts, 0, base);
+
+ /* Trigger an update to load the value from the TLV registers
+ * into the TOD counter. Note that an external unmaskable event on
+ * PTP_EVENT_REQ will also trigger this action.
+ */
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_PHASE_UPDATE_ENABLE |
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ TCFCR0_TCF_UPDATE | TCFCR0_TCF_TRIGGER);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static long mvpp22_tai_aux_work(struct ptp_clock_info *ptp)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+
+ mvpp22_tai_gettimex64(ptp, &tai->stamp, NULL);
+
+ return msecs_to_jiffies(2000);
+}
+
+static void mvpp22_tai_set_step(struct mvpp2_tai *tai)
+{
+ void __iomem *base = tai->base;
+ u32 nano, frac;
+
+ nano = upper_32_bits(tai->period);
+ frac = lower_32_bits(tai->period);
+
+ /* As the fractional nanosecond is a signed offset, if the MSB (sign)
+ * bit is set, we have to increment the whole nanoseconds.
+ */
+ if (frac >= 0x80000000)
+ nano += 1;
+
+ mvpp2_tai_write(nano, base + MVPP22_TAI_TOD_STEP_NANO_CR);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TOD_STEP_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TOD_STEP_FRAC_LOW);
+}
+
+static void mvpp22_tai_init(struct mvpp2_tai *tai)
+{
+ void __iomem *base = tai->base;
+
+ mvpp22_tai_set_step(tai);
+
+ /* Release the TAI reset */
+ mvpp2_tai_modify(base + MVPP22_TAI_CR0, CR0_SW_NRESET, CR0_SW_NRESET);
+}
+
+int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai)
+{
+ return ptp_clock_index(tai->ptp_clock);
+}
+
+void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp)
+{
+ struct timespec64 ts;
+ int delta;
+
+ /* The tstamp consists of 2 bits of seconds and 30 bits of nanoseconds.
+ * We use our stored timestamp (tai->stamp) to form a full timestamp,
+ * and we must read the seconds exactly once.
+ */
+ ts.tv_sec = READ_ONCE(tai->stamp.tv_sec);
+ ts.tv_nsec = tstamp & 0x3fffffff;
+
+ /* Calculate the delta in seconds between our stored timestamp and
+ * the value read from the queue. Allow timestamps one second in the
+ * past, otherwise consider them to be in the future.
+ */
+ delta = ((tstamp >> 30) - (ts.tv_sec & 3)) & 3;
+ if (delta == 3)
+ delta -= 4;
+ ts.tv_sec += delta;
+
+ memset(hwtstamp, 0, sizeof(*hwtstamp));
+ hwtstamp->hwtstamp = timespec64_to_ktime(ts);
+}
+
+void mvpp22_tai_start(struct mvpp2_tai *tai)
+{
+ long delay;
+
+ delay = mvpp22_tai_aux_work(&tai->caps);
+
+ ptp_schedule_worker(tai->ptp_clock, delay);
+}
+
+void mvpp22_tai_stop(struct mvpp2_tai *tai)
+{
+ ptp_cancel_worker_sync(tai->ptp_clock);
+}
+
+static void mvpp22_tai_remove(void *priv)
+{
+ struct mvpp2_tai *tai = priv;
+
+ if (!IS_ERR(tai->ptp_clock))
+ ptp_clock_unregister(tai->ptp_clock);
+}
+
+int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv)
+{
+ struct mvpp2_tai *tai;
+ int ret;
+
+ tai = devm_kzalloc(dev, sizeof(*tai), GFP_KERNEL);
+ if (!tai)
+ return -ENOMEM;
+
+ spin_lock_init(&tai->lock);
+
+ tai->base = priv->iface_base;
+
+ /* The step size consists of three registers - a 16-bit nanosecond step
+ * size, and a 32-bit fractional nanosecond step size split over two
+ * registers. The fractional nanosecond step size has units of 2^-32ns.
+ *
+ * To calculate this, we calculate:
+ * (10^9 + freq / 2) / (freq * 2^-32)
+ * which gives us the nanosecond step to the nearest integer in 16.32
+ * fixed point format, and the fractional part of the step size with
+ * the MSB inverted. With rounding of the fractional nanosecond, and
+ * simplification, this becomes:
+ * (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq
+ *
+ * So:
+ * div = (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq
+ * nano = upper_32_bits(div);
+ * frac = lower_32_bits(div) ^ 0x80000000;
+ * Will give the values for the registers.
+ *
+ * This is all seems perfect, but alas it is not when considering the
+ * whole story. The system is clocked from 25MHz, which is multiplied
+ * by a PLL to 1GHz, and then divided by three, giving 333333333Hz
+ * (recurring). This gives exactly 3ns, but using 333333333Hz with
+ * the above gives an error of 13*2^-32ns.
+ *
+ * Consequently, we use the period rather than calculating from the
+ * frequency.
+ */
+ tai->period = 3ULL << 32;
+
+ mvpp22_tai_init(tai);
+
+ tai->caps.owner = THIS_MODULE;
+ strscpy(tai->caps.name, "Marvell PP2.2", sizeof(tai->caps.name));
+ tai->caps.max_adj = mvpp22_calc_max_adj(tai);
+ tai->caps.adjfine = mvpp22_tai_adjfine;
+ tai->caps.adjtime = mvpp22_tai_adjtime;
+ tai->caps.gettimex64 = mvpp22_tai_gettimex64;
+ tai->caps.settime64 = mvpp22_tai_settime64;
+ tai->caps.do_aux_work = mvpp22_tai_aux_work;
+
+ ret = devm_add_action(dev, mvpp22_tai_remove, tai);
+ if (ret)
+ return ret;
+
+ tai->ptp_clock = ptp_clock_register(&tai->caps, dev);
+ if (IS_ERR(tai->ptp_clock))
+ return PTR_ERR(tai->ptp_clock);
+
+ priv->tai = tai;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 1b25948c662b..2f7a861d0c7b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -3,9 +3,10 @@
# Makefile for Marvell's OcteonTX2 RVU Admin Function driver
#
+ccflags-y += -I$(src)
obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
-octeontx2_mbox-y := mbox.o
+octeontx2_mbox-y := mbox.o rvu_trace.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o rvu_debugfs.o
+ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index a4e65da8d95b..8f17e26dca53 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -468,6 +468,35 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
}
}
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ /* Enable inbound PTP timestamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ /* Disable inbound PTP stamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
+}
+
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 394f96591feb..27ca3291682b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -58,8 +58,10 @@
#define CGXX_SMUX_RX_FRM_CTL 0x20020
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_SMUX_RX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_SMUX_TX_CTL 0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
@@ -139,4 +141,6 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
u8 *tx_pause, u8 *rx_pause);
int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause);
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
+
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 2718fe201c14..bbabb8e64201 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -14,6 +14,7 @@
#include "rvu_reg.h"
#include "mbox.h"
+#include "rvu_trace.h"
static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
@@ -207,6 +208,9 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
*/
tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
+
+ trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
+
spin_unlock(&mdev->mbox_lock);
/* The interrupt should be fired after num_msgs is written
@@ -303,10 +307,15 @@ int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
struct mbox_msghdr *preq = mdev->mbase + ireq;
struct mbox_msghdr *prsp = mdev->mbase + irsp;
- if (preq->id != prsp->id)
+ if (preq->id != prsp->id) {
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
goto exit;
+ }
if (prsp->rc) {
rc = prsp->rc;
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
goto exit;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index ab433789d2c3..263a21129416 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -128,6 +128,7 @@ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
@@ -144,6 +145,8 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
@@ -214,6 +217,8 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
@@ -621,6 +626,7 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
@@ -859,4 +865,20 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
+enum ptp_op {
+ PTP_OP_ADJFINE = 0,
+ PTP_OP_GET_CLOCK = 1,
+};
+
+struct ptp_req {
+ struct mbox_msghdr hdr;
+ u8 op;
+ s64 scaled_ppm;
+};
+
+struct ptp_rsp {
+ struct mbox_msghdr hdr;
+ u64 clk;
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 3803af9231c6..91a9d00e4fb5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -49,6 +49,7 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_EDSA_VLAN,
NPC_LT_LB_EXDSA,
NPC_LT_LB_EXDSA_VLAN,
+ NPC_LT_LB_FDSA,
NPC_LT_LB_CUSTOM0 = 0xE,
NPC_LT_LB_CUSTOM1 = 0xF,
};
@@ -77,21 +78,21 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_ICMP,
NPC_LT_LD_SCTP,
NPC_LT_LD_ICMP6,
+ NPC_LT_LD_CUSTOM0,
+ NPC_LT_LD_CUSTOM1,
NPC_LT_LD_IGMP = 8,
- NPC_LT_LD_ESP,
NPC_LT_LD_AH,
NPC_LT_LD_GRE,
NPC_LT_LD_NVGRE,
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
- NPC_LT_LD_CUSTOM0 = 0xE,
- NPC_LT_LD_CUSTOM1 = 0xF,
};
enum npc_kpu_le_ltype {
NPC_LT_LE_VXLAN = 1,
NPC_LT_LE_GENEVE,
+ NPC_LT_LE_ESP,
NPC_LT_LE_GTPU = 4,
NPC_LT_LE_VXLANGPE,
NPC_LT_LE_GTPC,
@@ -173,8 +174,8 @@ struct npc_kpu_profile_action {
struct npc_kpu_profile {
int cam_entries;
int action_entries;
- struct npc_kpu_profile_cam *cam;
- struct npc_kpu_profile_action *action;
+ const struct npc_kpu_profile_cam *cam;
+ const struct npc_kpu_profile_action *action;
};
/* NPC KPU register formats */
@@ -296,6 +297,9 @@ struct nix_rx_action {
#endif
};
+/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
+
/* NIX Receive Vtag Action Structure */
#define VTAG0_VALID_BIT BIT_ULL(15)
#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
@@ -320,4 +324,37 @@ struct npc_mcam_kex {
u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
} __packed;
+struct npc_lt_def {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+};
+
+struct npc_lt_def_ipsec {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 spi_offset;
+ u8 spi_nz;
+};
+
+struct npc_lt_def_cfg {
+ struct npc_lt_def rx_ol2;
+ struct npc_lt_def rx_oip4;
+ struct npc_lt_def rx_iip4;
+ struct npc_lt_def rx_oip6;
+ struct npc_lt_def rx_iip6;
+ struct npc_lt_def rx_otcp;
+ struct npc_lt_def rx_itcp;
+ struct npc_lt_def rx_oudp;
+ struct npc_lt_def rx_iudp;
+ struct npc_lt_def rx_osctp;
+ struct npc_lt_def rx_isctp;
+ struct npc_lt_def_ipsec rx_ipsec[2];
+ struct npc_lt_def pck_ol2;
+ struct npc_lt_def pck_oip4;
+ struct npc_lt_def pck_oip6;
+ struct npc_lt_def pck_iip4;
+};
+
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index aa2727e6211a..77bb4ed32600 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -63,6 +63,7 @@
#define NPC_UDP_PORT_VXLANGPE 4790
#define NPC_UDP_PORT_GENEVE 6081
#define NPC_UDP_PORT_MPLS 6635
+#define NPC_UDP_PORT_ESP 4500
#define NPC_VXLANGPE_NP_IP 0x1
#define NPC_VXLANGPE_NP_IP6 0x2
@@ -139,6 +140,13 @@
#define NPC_DSA_EXTEND 0x1000
#define NPC_DSA_EDSA 0x8000
+#define NPC_DSA_FDSA 0xc000
+
+#define NPC_KEXOF_DMAC 8
+#define MKEX_SIGN 0x19bbfdbd15f /* strtoull of "mkexprof" with base:36 */
+#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
+ ((flags_ena) << 6) | ((key_ofs) & 0x3F))
enum npc_kpu_parser_state {
NPC_S_NA = 0,
@@ -166,6 +174,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU3_DSA,
NPC_S_KPU4_MPLS,
NPC_S_KPU4_NSH,
+ NPC_S_KPU4_FDSA,
NPC_S_KPU5_IP,
NPC_S_KPU5_IP6,
NPC_S_KPU5_ARP,
@@ -189,7 +198,6 @@ enum npc_kpu_parser_state {
NPC_S_KPU8_IGMP,
NPC_S_KPU8_ICMP6,
NPC_S_KPU8_GRE,
- NPC_S_KPU8_ESP,
NPC_S_KPU8_AH,
NPC_S_KPU9_TU_MPLS_IN_GRE,
NPC_S_KPU9_TU_MPLS_IN_NSH,
@@ -201,6 +209,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU9_GENEVE,
NPC_S_KPU9_GTPC,
NPC_S_KPU9_GTPU,
+ NPC_S_KPU9_ESP,
NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
NPC_S_KPU10_TU_MPLS_PL,
NPC_S_KPU10_TU_MPLS,
@@ -271,6 +280,7 @@ enum npc_kpu_lb_lflag {
NPC_F_LB_L_EDSA_VLAN,
NPC_F_LB_L_EXDSA,
NPC_F_LB_L_EXDSA_VLAN,
+ NPC_F_LB_L_FDSA,
};
enum npc_kpu_lc_uflag {
@@ -418,7 +428,7 @@ enum NPC_ERRLEV_E {
NPC_ERRLEV_ENUM_LAST = 16,
};
-static struct npc_kpu_profile_action ikpu_action_entries[] = {
+static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
@@ -979,7 +989,7 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
+ 12, 14, 20, 0, 0,
NPC_S_KPU1_EXDSA, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
@@ -997,7 +1007,7 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -1351,10 +1361,19 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
NPC_DSA_EXTEND,
NPC_DSA_EXTEND,
0x0000,
0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ NPC_DSA_FDSA,
+ NPC_DSA_FDSA,
+ 0x0000,
+ 0x0000,
0x0000,
0x0000,
},
@@ -1666,7 +1685,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
{
NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -2794,7 +2813,7 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
{
NPC_S_KPU3_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -3913,7 +3932,7 @@ static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
{
NPC_S_KPU4_MPLS, 0xff,
NPC_MPLS_S,
@@ -3996,6 +4015,69 @@ static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ 0x0000,
+ NPC_DSA_FDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4006,7 +4088,7 @@ static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
{
NPC_S_KPU5_IP, 0xff,
0x0000,
@@ -4576,7 +4658,7 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
{
NPC_S_KPU6_IP6_EXT, 0xff,
0x0000,
@@ -4921,7 +5003,7 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
{
NPC_S_KPU7_IP6_EXT, 0xff,
0x0000,
@@ -5140,7 +5222,7 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
{
NPC_S_KPU8_TCP, 0xff,
0x0000,
@@ -5341,15 +5423,24 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
{
NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
0x0000,
0x0000,
0x0000,
0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
0x0000,
0x0000,
},
{
- NPC_S_KPU8_SCTP, 0xff,
+ NPC_S_KPU8_UDP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5358,7 +5449,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_ICMP, 0xff,
+ NPC_S_KPU8_SCTP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5367,7 +5458,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_IGMP, 0xff,
+ NPC_S_KPU8_ICMP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5376,7 +5467,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_ICMP6, 0xff,
+ NPC_S_KPU8_IGMP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5385,7 +5476,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_ESP, 0xff,
+ NPC_S_KPU8_ICMP6, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5872,7 +5963,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
{
NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
NPC_MPLS_S,
@@ -6324,6 +6415,15 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_MPLS_S,
},
{
+ NPC_S_KPU9_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -6334,7 +6434,7 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
{
NPC_S_KPU10_TU_MPLS, 0xff,
NPC_MPLS_S,
@@ -6499,7 +6599,7 @@ static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
{
NPC_S_KPU11_TU_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -6808,7 +6908,7 @@ static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
{
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_TCP,
@@ -7063,7 +7163,7 @@ static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
{
NPC_S_KPU13_TU_IP6_EXT, 0xff,
0x0000,
@@ -7075,7 +7175,7 @@ static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
{
NPC_S_KPU14_TU_IP6_EXT, 0xff,
0x0000,
@@ -7087,7 +7187,7 @@ static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
{
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
@@ -7288,7 +7388,7 @@ static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
{
NPC_S_KPU16_TCP_DATA, 0xff,
0x0000,
@@ -7345,7 +7445,7 @@ static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu1_action_entries[] = {
+static const struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
@@ -7673,6 +7773,14 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 16, 2, 0,
+ NPC_S_KPU4_FDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LA, NPC_EC_EDSA_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
@@ -7962,7 +8070,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu2_action_entries[] = {
+static const struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
@@ -8965,7 +9073,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu3_action_entries[] = {
+static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -9960,7 +10068,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu4_action_entries[] = {
+static const struct npc_kpu_profile_action kpu4_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -10034,6 +10142,62 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K4,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10043,7 +10207,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu5_action_entries[] = {
+static const struct npc_kpu_profile_action kpu5_action_entries[] = {
{
NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
0, 0, 0, 0, 1,
@@ -10102,8 +10266,8 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU8_ESP, 20, 1,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
NPC_LID_LC, NPC_LT_LC_IP,
0,
0, 0, 0, 0,
@@ -10206,8 +10370,8 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU8_ESP, 0, 1,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
0,
0, 0xf, 0, 2,
@@ -10414,8 +10578,8 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU8_ESP, 40, 1,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
NPC_LID_LC, NPC_LT_LC_IP6_EXT,
0,
0, 0, 0, 0,
@@ -10550,7 +10714,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu6_action_entries[] = {
+static const struct npc_kpu_profile_action kpu6_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -10561,80 +10725,80 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 12, 0, 1, 0,
- NPC_S_KPU8_TCP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 8, 10, 1, 0,
- NPC_S_KPU8_UDP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_SCTP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ICMP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ICMP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_AH, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_GRE, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 5, 0,
- NPC_S_KPU12_TU_IP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 2, 0,
- NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -10689,8 +10853,8 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
NPC_LID_LC, NPC_LT_NA,
0,
1, 0xff, 0, 3,
@@ -10793,8 +10957,8 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
NPC_LID_LC, NPC_LT_NA,
0,
1, 0xff, 0, 3,
@@ -10857,7 +11021,7 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu7_action_entries[] = {
+static const struct npc_kpu_profile_action kpu7_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -10908,8 +11072,8 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
NPC_LID_LC, NPC_LT_NA,
0,
1, 0xff, 0, 3,
@@ -10956,80 +11120,80 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 12, 0, 0, 0,
- NPC_S_KPU8_TCP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 8, 10, 0, 0,
- NPC_S_KPU8_UDP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_SCTP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ICMP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ICMP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_AH, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_GRE, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 4, 0,
- NPC_S_KPU12_TU_IP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 1, 0,
- NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -11052,7 +11216,7 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu8_action_entries[] = {
+static const struct npc_kpu_profile_action kpu8_action_entries[] = {
{
NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -11231,6 +11395,22 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_ESP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_ESP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 7, 0,
NPC_S_KPU16_UDP_DATA, 8, 1,
NPC_LID_LD, NPC_LT_LD_UDP,
@@ -11273,14 +11453,6 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ESP,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
NPC_LID_LD, NPC_LT_LD_AH,
0,
0, 0, 0, 0,
@@ -11703,7 +11875,7 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu9_action_entries[] = {
+static const struct npc_kpu_profile_action kpu9_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -12105,6 +12277,14 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_ESP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LE, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -12114,7 +12294,7 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu10_action_entries[] = {
+static const struct npc_kpu_profile_action kpu10_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -12261,7 +12441,7 @@ static struct npc_kpu_profile_action kpu10_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu11_action_entries[] = {
+static const struct npc_kpu_profile_action kpu11_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
@@ -12536,7 +12716,7 @@ static struct npc_kpu_profile_action kpu11_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu12_action_entries[] = {
+static const struct npc_kpu_profile_action kpu12_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 12, 0, 2, 0,
@@ -12763,7 +12943,7 @@ static struct npc_kpu_profile_action kpu12_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu13_action_entries[] = {
+static const struct npc_kpu_profile_action kpu13_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12774,7 +12954,7 @@ static struct npc_kpu_profile_action kpu13_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu14_action_entries[] = {
+static const struct npc_kpu_profile_action kpu14_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12785,7 +12965,7 @@ static struct npc_kpu_profile_action kpu14_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu15_action_entries[] = {
+static const struct npc_kpu_profile_action kpu15_action_entries[] = {
{
NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -12964,7 +13144,7 @@ static struct npc_kpu_profile_action kpu15_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu16_action_entries[] = {
+static const struct npc_kpu_profile_action kpu16_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13015,7 +13195,7 @@ static struct npc_kpu_profile_action kpu16_action_entries[] = {
},
};
-static struct npc_kpu_profile npc_kpu_profiles[] = {
+static const struct npc_kpu_profile npc_kpu_profiles[] = {
{
ARRAY_SIZE(kpu1_cam_entries),
ARRAY_SIZE(kpu1_action_entries),
@@ -13114,4 +13294,163 @@ static struct npc_kpu_profile npc_kpu_profiles[] = {
},
};
+static const struct npc_lt_def_cfg npc_lt_defaults = {
+ .rx_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip6 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip6 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_otcp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_itcp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oudp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_iudp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_osctp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_isctp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_ipsec = {
+ {
+ .lid = NPC_LID_LE,
+ .ltype_match = NPC_LT_LE_ESP,
+ .ltype_mask = 0x0F,
+ },
+ {
+ .spi_offset = 8,
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_ESP,
+ .ltype_mask = 0x0F,
+ },
+ },
+ .pck_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .pck_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .pck_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+};
+
+static const struct npc_mcam_kex npc_mkex_default = {
+ .mkex_sign = MKEX_SIGN,
+ .name = "default",
+ .kpu_version = NPC_KPU_PROFILE_VER,
+ .keyx_cfg = {
+ /* nibble: LA..LE (ltype only) + Channel */
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x49247,
+ [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | ((1ULL << 19) - 1),
+ },
+ .intf_lid_lt_ld = {
+ /* Default RX MCAM KEX profile */
+ [NIX_INTF_RX] = {
+ [NPC_LID_LA] = {
+ /* Layer A: Ethernet: */
+ [NPC_LT_LA_ETHER] = {
+ /* DMAC: 6 bytes, KW1[47:0] */
+ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4),
+ },
+ },
+ [NPC_LID_LB] = {
+ /* Layer B: Single VLAN (CTAG) */
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ [NPC_LT_LB_CTAG] = {
+ KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4),
+ },
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ [NPC_LT_LB_STAG_QINQ] = {
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4),
+ },
+ [NPC_LT_LB_FDSA] = {
+ /* SWITCH PORT: 1 byte, KW0[63:48] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x6),
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x4),
+ },
+ },
+ [NPC_LID_LC] = {
+ /* Layer C: IPv4 */
+ [NPC_LT_LC_IP] = {
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+ /* TOS: 1 byte, KW1[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
+ },
+ /* Layer C: IPv6 */
+ [NPC_LT_LC_IP6] = {
+ /* Everything up to SADDR: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
+ },
+ },
+ [NPC_LID_LD] = {
+ /* Layer D:UDP */
+ [NPC_LT_LD_UDP] = {
+ /* SPORT: 2 bytes, KW3[15:0] */
+ KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
+ /* DPORT: 2 bytes, KW3[31:16] */
+ KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ },
+ /* Layer D:TCP */
+ [NPC_LT_LD_TCP] = {
+ /* SPORT: 2 bytes, KW3[15:0] */
+ KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
+ /* DPORT: 2 bytes, KW3[31:16] */
+ KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ },
+ },
+ },
+ },
+};
+
#endif /* NPC_PROFILE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
new file mode 100644
index 000000000000..f69f4f35ae48
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ptp.h"
+#include "mbox.h"
+#include "rvu.h"
+
+#define DRV_NAME "Marvell PTP Driver"
+
+#define PCI_DEVID_OCTEONTX2_PTP 0xA00C
+#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
+#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
+#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
+#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400
+#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
+#define PCI_DEVID_OCTEONTX2_RST 0xA085
+
+#define PCI_PTP_BAR_NO 0
+#define PCI_RST_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+
+#define RST_BOOT 0x1600ULL
+#define RST_MUL_BITS GENMASK_ULL(38, 33)
+#define CLOCK_BASE_RATE 50000000ULL
+
+static u64 get_clock_rate(void)
+{
+ u64 cfg, ret = CLOCK_BASE_RATE * 16;
+ struct pci_dev *pdev;
+ void __iomem *base;
+
+ /* To get the input clock frequency with which PTP co-processor
+ * block is running the base frequency(50 MHz) needs to be multiplied
+ * with multiplier bits present in RST_BOOT register of RESET block.
+ * Hence below code gets the multiplier bits from the RESET PCI
+ * device present in the system.
+ */
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RST, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+ if (!base)
+ goto error_put_pdev;
+
+ cfg = readq(base + RST_BOOT);
+ ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+struct ptp *ptp_get(void)
+{
+ struct pci_dev *pdev;
+ struct ptp *ptp;
+
+ /* If the PTP pci device is found on the system and ptp
+ * driver is bound to it then the PTP pci device is returned
+ * to the caller(rvu driver).
+ */
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_PTP, NULL);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ptp = pci_get_drvdata(pdev);
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ptp))
+ pci_dev_put(pdev);
+
+ return ptp;
+}
+
+void ptp_put(struct ptp *ptp)
+{
+ if (!ptp)
+ return;
+
+ pci_dev_put(ptp->pdev);
+}
+
+static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
+{
+ bool neg_adj = false;
+ u64 comp;
+ u64 adj;
+ s64 ppb;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per million" by which
+ * the compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated
+ * initialy in the probe function.
+ */
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* convert scaled_ppm to ppb */
+ ppb = 1 + scaled_ppm;
+ ppb *= 125;
+ ppb >>= 13;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ return 0;
+}
+
+static int ptp_get_clock(struct ptp *ptp, u64 *clk)
+{
+ /* Return the current PTP clock */
+ *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+
+ return 0;
+}
+
+static int ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ptp *ptp;
+ u64 clock_comp;
+ u64 clock_cfg;
+ int err;
+
+ ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
+ if (!ptp) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ ptp->clock_rate = get_clock_rate();
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ pci_set_drvdata(pdev, ptp);
+
+ return 0;
+
+error_free:
+ devm_kfree(dev, ptp);
+
+error:
+ /* For `ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we pretend that the
+ * initialization was successful and keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ return 0;
+}
+
+static void ptp_remove(struct pci_dev *pdev)
+{
+ struct ptp *ptp = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(ptp))
+ return;
+
+ /* Disable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id ptp_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
+ { 0, }
+};
+
+struct pci_driver ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = ptp_id_table,
+ .probe = ptp_probe,
+ .remove = ptp_remove,
+};
+
+int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
+ struct ptp_rsp *rsp)
+{
+ int err = 0;
+
+ /* This function is the PTP mailbox handler invoked when
+ * called by AF consumers/netdev drivers via mailbox mechanism.
+ * It is used by netdev driver to get the PTP clock and to set
+ * frequency adjustments. Since mailbox can be called without
+ * notion of whether the driver is bound to ptp device below
+ * validation is needed as first step.
+ */
+ if (!rvu->ptp)
+ return -ENODEV;
+
+ switch (req->op) {
+ case PTP_OP_ADJFINE:
+ err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
+ break;
+ case PTP_OP_GET_CLOCK:
+ err = ptp_get_clock(rvu->ptp, &rsp->clk);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
new file mode 100644
index 000000000000..878bc395d28f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef PTP_H
+#define PTP_H
+
+#include <linux/timecounter.h>
+#include <linux/time64.h>
+#include <linux/spinlock.h>
+
+struct ptp {
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+ u32 clock_rate;
+};
+
+struct ptp *ptp_get(void);
+void ptp_put(struct ptp *ptp);
+
+extern struct pci_driver ptp_driver;
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 557e4292c846..e1f918960730 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -18,6 +18,9 @@
#include "cgx.h"
#include "rvu.h"
#include "rvu_reg.h"
+#include "ptp.h"
+
+#include "rvu_trace.h"
#define DRV_NAME "octeontx2-af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -1548,6 +1551,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
if (rsp && err) \
rsp->hdr.rc = err; \
\
+ trace_otx2_msg_process(mbox->pdev, _id, err); \
return rsp ? err : -ENOMEM; \
}
MBOX_MESSAGES
@@ -1880,6 +1884,8 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
/* Clear interrupts */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
/* Sync with mbox memory region */
rmb();
@@ -1897,6 +1903,8 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
@@ -2565,13 +2573,21 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
+ rvu->ptp = ptp_get();
+ if (IS_ERR(rvu->ptp)) {
+ err = PTR_ERR(rvu->ptp);
+ if (err == -EPROBE_DEFER)
+ goto err_release_regions;
+ rvu->ptp = NULL;
+ }
+
/* Map Admin function CSRs */
rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
if (!rvu->afreg_base || !rvu->pfreg_base) {
dev_err(dev, "Unable to map admin function CSRs, aborting\n");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_put_ptp;
}
/* Store module params in rvu structure */
@@ -2586,7 +2602,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = rvu_setup_hw_resources(rvu);
if (err)
- goto err_release_regions;
+ goto err_put_ptp;
/* Init mailbox btw AF and PFs */
err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
@@ -2626,6 +2642,8 @@ err_hwsetup:
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
+err_put_ptp:
+ ptp_put(rvu->ptp);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
@@ -2651,6 +2669,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
+ ptp_put(rvu->ptp);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -2676,9 +2695,19 @@ static int __init rvu_init_module(void)
if (err < 0)
return err;
+ err = pci_register_driver(&ptp_driver);
+ if (err < 0)
+ goto ptp_err;
+
err = pci_register_driver(&rvu_driver);
if (err < 0)
- pci_unregister_driver(&cgx_driver);
+ goto rvu_err;
+
+ return 0;
+rvu_err:
+ pci_unregister_driver(&ptp_driver);
+ptp_err:
+ pci_unregister_driver(&cgx_driver);
return err;
}
@@ -2686,6 +2715,7 @@ static int __init rvu_init_module(void)
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index b89dde2c8b08..90eed3160915 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -289,6 +289,22 @@ struct rvu_fwdata {
u64 reserved[FWDATA_RESERVED_MEM];
};
+struct ptp;
+
+/* KPU profile adapter structure gathering all KPU configuration data and abstracting out the
+ * source where it came from.
+ */
+struct npc_kpu_profile_adapter {
+ const char *name;
+ u64 version;
+ const struct npc_lt_def_cfg *lt_def;
+ const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
+ const struct npc_kpu_profile *kpu; /* array[kpus] */
+ const struct npc_mcam_kex *mkex;
+ size_t pkinds;
+ size_t kpus;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -337,6 +353,11 @@ struct rvu {
/* Firmware data */
struct rvu_fwdata *fwdata;
+ /* NPC KPU data */
+ struct npc_kpu_profile_adapter kpu;
+
+ struct ptp *ptp;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -470,6 +491,7 @@ int rvu_npc_init(struct rvu *rvu);
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index f3c82e489897..fa9152ff5e2a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -15,6 +15,7 @@
#include "rvu.h"
#include "cgx.h"
#include "rvu_reg.h"
+#include "rvu_trace.h"
struct cgx_evq_entry {
struct list_head evq_node;
@@ -34,6 +35,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
+ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
return req; \
}
@@ -509,6 +511,45 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ cgx_lmac_ptp_config(cgxd, lmac_id, enable);
+ /* If PTP is enabled then inform NPC that packets to be
+ * parsed by this PF will have their data shifted by 8 bytes
+ * and if PTP is disabled then no shift is required
+ */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0fc70824fd6b..21a89dd76d3c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2508,6 +2508,14 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
+ case NIX_FLOW_KEY_TYPE_VLAN:
+ field->lid = NPC_LID_LB;
+ field->hdr_offset = 2; /* Skip TPID (2-bytes) */
+ field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
+ field->ltype_match = NPC_LT_LB_CTAG;
+ field->ltype_mask = 0xF;
+ field->fn_mask = 1; /* Mask out the first nibble */
+ break;
}
field->ena = 1;
@@ -3103,6 +3111,7 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
int rvu_nix_init(struct rvu *rvu)
{
+ const struct npc_lt_def_cfg *ltdefs;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
int blkaddr, err;
@@ -3133,6 +3142,7 @@ int rvu_nix_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
}
+ ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
if (err)
@@ -3180,28 +3190,38 @@ int rvu_nix_init(struct rvu *rvu)
* and validate length and checksums.
*/
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
- (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
+ ltdefs->rx_ol2.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
+ (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
+ (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
+ (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
+ (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
+ ltdefs->rx_otcp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
+ (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
+ ltdefs->rx_itcp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
+ (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
+ ltdefs->rx_oudp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
+ (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
+ ltdefs->rx_iudp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
+ (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
+ ltdefs->rx_osctp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
- 0x0F);
+ (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
+ ltdefs->rx_isctp.ltype_mask);
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
if (err)
@@ -3318,6 +3338,49 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_ctx_free(rvu, pfvf);
}
+#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
+
+static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+ int nixlf;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+
+ if (enable)
+ cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
+ else
+ cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
struct nix_lso_format_cfg *req,
struct nix_lso_format_cfg_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index fbaf9bcd83f2..511b01dd03ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -27,6 +27,9 @@
#define NIXLF_PROMISC_ENTRY 2
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
+#define NPC_HW_TSTAMP_OFFSET 8
+
+static const char def_pfl_name[] = "default";
static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc);
@@ -61,6 +64,36 @@ int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
return -1;
}
+#define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20)
+
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
+{
+ int pkind, blkaddr;
+ u64 val;
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev, "%s: pkind not mapped\n", __func__);
+ return -EINVAL;
+ }
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ val &= ~NPC_AF_ACTION0_PTR_ADVANCE;
+ /* If timestamp is enabled then configure NPC to shift 8 bytes */
+ if (enable)
+ val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE,
+ NPC_HW_TSTAMP_OFFSET);
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+
+ return 0;
+}
+
static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{
@@ -417,7 +450,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
entry.kw_mask[0] = 0xFFFULL;
if (allmulti) {
- kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+ kwi = NPC_KEXOF_DMAC / sizeof(u64);
entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */
entry.kw_mask[kwi] = BIT_ULL(40);
}
@@ -699,88 +732,8 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
-#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
- (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
- ((flags_ena) << 6) | ((key_ofs) & 0x3F))
-
-static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
-{
- struct npc_mcam *mcam = &rvu->hw->mcam;
- int lid, ltype;
- int lid_count;
- u64 cfg;
-
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- lid_count = (cfg >> 4) & 0xF;
-
- /* First clear any existing config i.e
- * disable LDATA and FLAGS extraction.
- */
- for (lid = 0; lid < lid_count; lid++) {
- for (ltype = 0; ltype < 16; ltype++) {
- SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL);
- SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL);
- SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL);
- SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL);
-
- SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL);
- }
- }
-
- if (mcam->keysize != NPC_MCAM_KEY_X2)
- return;
-
- /* Default MCAM KEX profile */
- /* Layer A: Ethernet: */
-
- /* DMAC: 6 bytes, KW1[47:0] */
- cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
-
- /* Ethertype: 2 bytes, KW0[47:32] */
- cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg);
-
- /* Layer B: Single VLAN (CTAG) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
- cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg);
-
- /* Layer B: Stacked VLAN (STAG|QinQ) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
- cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg);
-
- /* Layer C: IPv4 */
- /* SIP+DIP: 8 bytes, KW2[63:0] */
- cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg);
- /* TOS: 1 byte, KW1[63:56] */
- cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg);
-
- /* Layer D:UDP */
- /* SPORT: 2 bytes, KW3[15:0] */
- cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg);
- /* DPORT: 2 bytes, KW3[31:16] */
- cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg);
-
- /* Layer D:TCP */
- /* SPORT: 2 bytes, KW3[15:0] */
- cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg);
- /* DPORT: 2 bytes, KW3[31:16] */
- cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg);
-}
-
static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
- struct npc_mcam_kex *mkex)
+ const struct npc_mcam_kex *mkex)
{
int lid, lt, ld, fl;
@@ -820,34 +773,31 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
}
}
-/* strtoull of "mkexprof" with base:36 */
-#define MKEX_SIGN 0x19bbfdbd15f
#define MKEX_END_SIGN 0xdeadbeef
-static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
+static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
+ const char *mkex_profile)
{
- const char *mkex_profile = rvu->mkex_pfl_name;
struct device *dev = &rvu->pdev->dev;
- void __iomem *mkex_prfl_addr = NULL;
struct npc_mcam_kex *mcam_kex;
- u64 prfl_addr;
- u64 prfl_sz;
+ void *mkex_prfl_addr = NULL;
+ u64 prfl_addr, prfl_sz;
/* If user not selected mkex profile */
- if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
- goto load_default;
+ if (!strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
+ goto program_mkex;
if (!rvu->fwdata)
- goto load_default;
+ goto program_mkex;
prfl_addr = rvu->fwdata->mcam_addr;
prfl_sz = rvu->fwdata->mcam_sz;
if (!prfl_addr || !prfl_sz)
- goto load_default;
+ goto program_mkex;
- mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz);
+ mkex_prfl_addr = memremap(prfl_addr, prfl_sz, MEMREMAP_WC);
if (!mkex_prfl_addr)
- goto load_default;
+ goto program_mkex;
mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
@@ -859,35 +809,27 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
* parse nibble enable configuration has to be
* identical for both Rx and Tx interfaces.
*/
- if (is_rvu_96xx_B0(rvu) &&
- mcam_kex->keyx_cfg[NIX_INTF_RX] !=
- mcam_kex->keyx_cfg[NIX_INTF_TX])
- goto load_default;
-
- /* Program selected mkex profile */
- npc_program_mkex_profile(rvu, blkaddr, mcam_kex);
-
- goto unmap;
+ if (!is_rvu_96xx_B0(rvu) ||
+ mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX])
+ rvu->kpu.mkex = mcam_kex;
+ goto program_mkex;
}
mcam_kex++;
prfl_sz -= sizeof(struct npc_mcam_kex);
}
- dev_warn(dev, "Failed to load requested profile: %s\n",
- rvu->mkex_pfl_name);
+ dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile);
-load_default:
- dev_info(rvu->dev, "Using default mkex profile\n");
- /* Config packet data and flags extraction into PARSE result */
- npc_config_ldata_extract(rvu, blkaddr);
-
-unmap:
+program_mkex:
+ dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name);
+ /* Program selected mkex profile */
+ npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex);
if (mkex_prfl_addr)
- iounmap(mkex_prfl_addr);
+ memunmap(mkex_prfl_addr);
}
static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
- struct npc_kpu_profile_action *kpuaction,
+ const struct npc_kpu_profile_action *kpuaction,
int kpu, int entry, bool pkind)
{
struct npc_kpu_action0 action0 = {0};
@@ -929,7 +871,7 @@ static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
}
static void npc_config_kpucam(struct rvu *rvu, int blkaddr,
- struct npc_kpu_profile_cam *kpucam,
+ const struct npc_kpu_profile_cam *kpucam,
int kpu, int entry)
{
struct npc_kpu_cam cam0 = {0};
@@ -957,7 +899,7 @@ static inline u64 enable_mask(int count)
}
static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
- struct npc_kpu_profile *profile)
+ const struct npc_kpu_profile *profile)
{
int entry, num_entries, max_entries;
@@ -995,6 +937,27 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01);
}
+static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
+{
+ profile->name = def_pfl_name;
+ profile->version = NPC_KPU_PROFILE_VER;
+ profile->ikpu = ikpu_action_entries;
+ profile->pkinds = ARRAY_SIZE(ikpu_action_entries);
+ profile->kpu = npc_kpu_profiles;
+ profile->kpus = ARRAY_SIZE(npc_kpu_profiles);
+ profile->lt_def = &npc_lt_defaults;
+ profile->mkex = &npc_mkex_default;
+
+ return 0;
+}
+
+static void npc_load_kpu_profile(struct rvu *rvu)
+{
+ struct npc_kpu_profile_adapter *profile = &rvu->kpu;
+
+ npc_prepare_default_kpu(profile);
+}
+
static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1013,25 +976,26 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00);
}
+ /* Load and customize KPU profile. */
+ npc_load_kpu_profile(rvu);
+
/* First program IKPU profile i.e PKIND configs.
* Check HW max count to avoid configuring junk or
* writing to unsupported CSR addresses.
*/
pkind = &hw->pkind;
- num_pkinds = ARRAY_SIZE(ikpu_action_entries);
+ num_pkinds = rvu->kpu.pkinds;
num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
for (idx = 0; idx < num_pkinds; idx++)
- npc_config_kpuaction(rvu, blkaddr,
- &ikpu_action_entries[idx], 0, idx, true);
+ npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
/* Program KPU CAM and Action profiles */
- num_kpus = ARRAY_SIZE(npc_kpu_profiles);
+ num_kpus = rvu->kpu.kpus;
num_kpus = min_t(int, hw->npc_kpus, num_kpus);
for (idx = 0; idx < num_kpus; idx++)
- npc_program_kpu_profile(rvu, blkaddr,
- idx, &npc_kpu_profiles[idx]);
+ npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]);
}
static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
@@ -1156,11 +1120,11 @@ free_mem:
int rvu_npc_init(struct rvu *rvu)
{
+ struct npc_kpu_profile_adapter *kpu = &rvu->kpu;
struct npc_pkind *pkind = &rvu->hw->pkind;
struct npc_mcam *mcam = &rvu->hw->mcam;
- u64 keyz = NPC_MCAM_KEY_X2;
+ u64 cfg, nibble_ena, rx_kex, tx_kex;
int blkaddr, entry, bank, err;
- u64 cfg, nibble_ena;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0) {
@@ -1194,13 +1158,16 @@ int rvu_npc_init(struct rvu *rvu)
/* Config Outer L2, IPv4's NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2,
- (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ (kpu->lt_def->pck_ol2.lid << 8) | (kpu->lt_def->pck_ol2.ltype_match << 4) |
+ kpu->lt_def->pck_ol2.ltype_mask);
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ (kpu->lt_def->pck_oip4.lid << 8) | (kpu->lt_def->pck_oip4.ltype_match << 4) |
+ kpu->lt_def->pck_oip4.ltype_mask);
/* Config Inner IPV4 NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
+ (kpu->lt_def->pck_iip4.lid << 8) | (kpu->lt_def->pck_iip4.ltype_match << 4) |
+ kpu->lt_def->pck_iip4.ltype_mask);
/* Enable below for Rx pkts.
* - Outer IPv4 header checksum validation.
@@ -1216,23 +1183,25 @@ int rvu_npc_init(struct rvu *rvu)
/* Set RX and TX side MCAM search key size.
* LA..LD (ltype only) + Channel
*/
- nibble_ena = 0x49247;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
- ((keyz & 0x3) << 32) | nibble_ena);
+ rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
+ tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), rx_kex);
/* Due to an errata (35786) in A0 pass silicon, parse nibble enable
* configuration has to be identical for both Rx and Tx interfaces.
*/
- if (!is_rvu_96xx_B0(rvu))
- nibble_ena = (1ULL << 19) - 1;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
- ((keyz & 0x3) << 32) | nibble_ena);
+ if (is_rvu_96xx_B0(rvu)) {
+ tx_kex &= ~NPC_PARSE_NIBBLE;
+ tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
+ }
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), tx_kex);
err = npc_mcam_rsrcs_init(rvu, blkaddr);
if (err)
return err;
/* Configure MKEX profile */
- npc_load_mkex_profile(rvu, blkaddr);
+ npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
/* Set TX miss action to UCAST_DEFAULT i.e
* transmit the packet on NIX LF SQ's default channel.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
new file mode 100644
index 000000000000..56f90cf9c4c0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "rvu_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
new file mode 100644
index 000000000000..e6609068e81b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvu
+
+#if !defined(__RVU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVU_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/pci.h>
+
+TRACE_EVENT(otx2_msg_alloc,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
+ TP_ARGS(pdev, id, size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(u64, size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->id = id;
+ __entry->size = size;
+ ),
+ TP_printk("[%s] msg:(0x%x) size:%lld\n", __get_str(dev),
+ __entry->id, __entry->size)
+);
+
+TRACE_EVENT(otx2_msg_send,
+ TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size),
+ TP_ARGS(pdev, num_msgs, msg_size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, num_msgs)
+ __field(u64, msg_size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->num_msgs = num_msgs;
+ __entry->msg_size = msg_size;
+ ),
+ TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev),
+ __entry->num_msgs, __entry->msg_size)
+);
+
+TRACE_EVENT(otx2_msg_check,
+ TP_PROTO(const struct pci_dev *pdev, u16 reqid, u16 rspid, int rc),
+ TP_ARGS(pdev, reqid, rspid, rc),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, reqid)
+ __field(u16, rspid)
+ __field(int, rc)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->reqid = reqid;
+ __entry->rspid = rspid;
+ __entry->rc = rc;
+ ),
+ TP_printk("[%s] req->id:0x%x rsp->id:0x%x resp_code:%d\n",
+ __get_str(dev), __entry->reqid,
+ __entry->rspid, __entry->rc)
+);
+
+TRACE_EVENT(otx2_msg_interrupt,
+ TP_PROTO(const struct pci_dev *pdev, const char *msg, u64 intr),
+ TP_ARGS(pdev, msg, intr),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u64, intr)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __assign_str(str, msg)
+ __entry->intr = intr;
+ ),
+ TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev),
+ __get_str(str), __entry->intr)
+);
+
+TRACE_EVENT(otx2_msg_process,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, int err),
+ TP_ARGS(pdev, id, err),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(int, err)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->id = id;
+ __entry->err = err;
+ ),
+ TP_printk("[%s] msg:(0x%x) error:%d\n", __get_str(dev),
+ __entry->id, __entry->err)
+);
+
+#endif /* __RVU_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE rvu_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 778df331c8ac..b2c6385707c9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -6,7 +6,8 @@
obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
-octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
+octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_ptp.o
octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 93c4cf7fedbf..d2581090f9a4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -355,7 +355,7 @@ int otx2_rss_init(struct otx2_nic *pfvf)
rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
- NIX_FLOW_KEY_TYPE_SCTP;
+ NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN;
ret = otx2_set_flowkey_cfg(pfvf);
if (ret)
@@ -365,6 +365,95 @@ int otx2_rss_init(struct otx2_nic *pfvf)
return 0;
}
+/* Setup UDP segmentation algorithm in HW */
+static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4)
+{
+ struct nix_lso_format *field;
+
+ field = (struct nix_lso_format *)&lso->fields[0];
+ lso->field_mask = GENMASK(18, 0);
+
+ /* IP's Length field */
+ field->layer = NIX_TXLAYER_OL3;
+ /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
+ field->offset = v4 ? 2 : 4;
+ field->sizem1 = 1; /* i.e 2 bytes */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+
+ /* No ID field in IPv6 header */
+ if (v4) {
+ /* Increment IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1; /* i.e 2 bytes */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Update length in UDP header */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+}
+
+/* Setup segmentation algorithms in HW and retrieve algorithm index */
+void otx2_setup_segmentation(struct otx2_nic *pfvf)
+{
+ struct nix_lso_format_cfg_rsp *rsp;
+ struct nix_lso_format_cfg *lso;
+ struct otx2_hw *hw = &pfvf->hw;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* UDPv4 segmentation */
+ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
+ if (!lso)
+ goto fail;
+
+ /* Setup UDP/IP header fields that HW should update per segment */
+ otx2_setup_udp_segmentation(lso, true);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_lso_format_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
+ if (IS_ERR(rsp))
+ goto fail;
+
+ hw->lso_udpv4_idx = rsp->lso_format_idx;
+
+ /* UDPv6 segmentation */
+ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
+ if (!lso)
+ goto fail;
+
+ /* Setup UDP/IP header fields that HW should update per segment */
+ otx2_setup_udp_segmentation(lso, false);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_lso_format_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
+ if (IS_ERR(rsp))
+ goto fail;
+
+ hw->lso_udpv6_idx = rsp->lso_format_idx;
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+fail:
+ mutex_unlock(&pfvf->mbox.lock);
+ netdev_info(pfvf->netdev,
+ "Failed to get LSO index for UDP GSO offload, disabling\n");
+ pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4;
+}
+
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
{
/* Configure CQE interrupt coalescing parameters
@@ -671,6 +760,13 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (!sq->sg)
return -ENOMEM;
+ if (pfvf->ptp) {
+ err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
+ sizeof(*sq->timestamps));
+ if (err)
+ return err;
+ }
+
sq->head = 0;
sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 2fa29889522e..d6253f2a414d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -13,10 +13,14 @@
#include <linux/pci.h>
#include <linux/iommu.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <mbox.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
+#include <rvu_trace.h>
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
@@ -174,9 +178,11 @@ struct otx2_hw {
u16 rq_skid;
u8 cq_time_wait;
- /* For TSO segmentation */
+ /* Segmentation */
u8 lso_tsov4_idx;
u8 lso_tsov6_idx;
+ u8 lso_udpv4_idx;
+ u8 lso_udpv6_idx;
u8 hw_tso;
/* MSI-X */
@@ -209,6 +215,17 @@ struct refill_work {
struct otx2_nic *pf;
};
+struct otx2_ptp {
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct otx2_nic *nic;
+
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+};
+
+#define OTX2_HW_TIMESTAMP_LEN 8
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -216,6 +233,8 @@ struct otx2_nic {
u16 max_frs;
u16 rbsize; /* Receive buffer size */
+#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
+#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
@@ -251,6 +270,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
+
+ struct otx2_ptp *ptp;
+ struct hwtstamp_config tstamp;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -502,6 +524,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
+ trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
return req; \
}
@@ -561,6 +584,7 @@ void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
+void otx2_setup_segmentation(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index d59f5a9c7273..662fb80dbb9d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -13,8 +13,10 @@
#include <linux/stddef.h>
#include <linux/etherdevice.h>
#include <linux/log2.h>
+#include <linux/net_tstamp.h>
#include "otx2_common.h"
+#include "otx2_ptp.h"
#define DRV_NAME "octeontx2-nicpf"
#define DRV_VF_NAME "octeontx2-nicvf"
@@ -426,6 +428,8 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
/* Mimimum is IPv4 and IPv6, SIP/DIP */
nfc->data = RXH_IP_SRC | RXH_IP_DST;
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN)
+ nfc->data |= RXH_VLAN;
switch (nfc->flow_type) {
case TCP_V4_FLOW:
@@ -475,6 +479,11 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
return -EINVAL;
+ if (nfc->data & RXH_VLAN)
+ rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN;
+ else
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN;
+
switch (nfc->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
@@ -663,6 +672,31 @@ static u32 otx2_get_link(struct net_device *netdev)
return pfvf->linfo.link_up;
}
+static int otx2_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (!pfvf->ptp)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = otx2_ptp_clock_index(pfvf);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -687,6 +721,7 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
+ .get_ts_info = otx2_get_ts_info,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 2fb45670aca4..66f1a212f1f4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -21,6 +21,8 @@
#include "otx2_common.h"
#include "otx2_txrx.h"
#include "otx2_struct.h"
+#include "otx2_ptp.h"
+#include <rvu_trace.h>
#define DRV_NAME "octeontx2-nicpf"
#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
@@ -41,6 +43,9 @@ enum {
TYPE_PFVF,
};
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
+
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
bool if_up = netif_running(netdev);
@@ -554,6 +559,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+
return IRQ_HANDLED;
}
@@ -937,6 +944,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
mbox = &pf->mbox;
+
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
+
otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
return IRQ_HANDLED;
@@ -1282,7 +1292,8 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
/* Get the size of receive buffers to allocate */
- pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
+ pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu +
+ OTX2_ETH_HLEN);
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1497,6 +1508,9 @@ int otx2_open(struct net_device *netdev)
if (err)
goto err_disable_napi;
+ /* Setup segmentation algorithms, if failed, clear offload capability */
+ otx2_setup_segmentation(pf);
+
/* Initialize RSS */
err = otx2_rss_init(pf);
if (err)
@@ -1548,6 +1562,16 @@ int otx2_open(struct net_device *netdev)
otx2_set_cints_affinity(pf);
+ /* When reinitializing enable time stamping if it is enabled before */
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ otx2_config_hw_tx_tstamp(pf, true);
+ }
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ otx2_config_hw_rx_tstamp(pf, true);
+ }
+
pf->flags &= ~OTX2_FLAG_INTF_DOWN;
/* 'intf_down' may be checked on any cpu */
smp_wmb();
@@ -1742,6 +1766,143 @@ static void otx2_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ return 0;
+}
+
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ return 0;
+}
+
+static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ otx2_config_hw_tx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_TX_ON:
+ otx2_config_hw_tx_tstamp(pfvf, true);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ otx2_config_hw_rx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ otx2_config_hw_rx_tstamp(pfvf, true);
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ memcpy(&pfvf->tstamp, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct hwtstamp_config *cfg = &pfvf->tstamp;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return otx2_config_hwtstamp(netdev, req);
+ case SIOCGHWTSTAMP:
+ return copy_to_user(req->ifr_data, cfg,
+ sizeof(*cfg)) ? -EFAULT : 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
@@ -1752,6 +1913,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_features = otx2_set_features,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
+ .ndo_do_ioctl = otx2_ioctl,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -1924,6 +2086,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(pf);
+
/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
* HW allocates buffer pointer from stack and uses it for DMA'ing
* ingress packet. In some scenarios HW can free back allocated buffer
@@ -1939,7 +2104,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4);
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
@@ -1956,7 +2122,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_detach_rsrc;
+ goto err_ptp_destroy;
}
err = otx2_wq_init(pf);
@@ -1976,6 +2142,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
+err_ptp_destroy:
+ otx2_ptp_destroy(pf);
err_detach_rsrc:
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
@@ -2117,6 +2285,11 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
+ otx2_config_hw_tx_tstamp(pf, false);
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
+ otx2_config_hw_rx_tstamp(pf, false);
+
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
@@ -2126,6 +2299,7 @@ static void otx2_remove(struct pci_dev *pdev)
if (pf->otx2_wq)
destroy_workqueue(pf->otx2_wq);
+ otx2_ptp_destroy(pf);
otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
new file mode 100644
index 000000000000..7bcf5246350f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 PTP support for ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "otx2_common.h"
+#include "otx2_ptp.h"
+
+static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct ptp_req *req;
+ int err;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_ADJFINE;
+ req->scaled_ppm = scaled_ppm;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static u64 ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
+static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+
+ mutex_lock(&pfvf->mbox.lock);
+ timecounter_adjtime(&ptp->time_counter, delta);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 nsec;
+
+ mutex_lock(&pfvf->mbox.lock);
+ nsec = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ mutex_lock(&pfvf->mbox.lock);
+ timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+int otx2_ptp_init(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp_ptr;
+ struct cyclecounter *cc;
+ struct ptp_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* check if PTP block is available */
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp_ptr = kzalloc(sizeof(*ptp_ptr), GFP_KERNEL);
+ if (!ptp_ptr) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp_ptr->nic = pfvf;
+
+ cc = &ptp_ptr->cycle_counter;
+ cc->read = ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+
+ ptp_ptr->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "OcteonTX2 PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = otx2_ptp_adjfine,
+ .adjtime = otx2_ptp_adjtime,
+ .gettime64 = otx2_ptp_gettime,
+ .settime64 = otx2_ptp_settime,
+ .enable = otx2_ptp_enable,
+ };
+
+ ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
+ if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
+ err = ptp_ptr->ptp_clock ?
+ PTR_ERR(ptp_ptr->ptp_clock) : -ENODEV;
+ kfree(ptp_ptr);
+ goto error;
+ }
+
+ pfvf->ptp = ptp_ptr;
+
+error:
+ return err;
+}
+
+void otx2_ptp_destroy(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp = pfvf->ptp;
+
+ if (!ptp)
+ return;
+
+ ptp_clock_unregister(ptp->ptp_clock);
+ kfree(ptp);
+ pfvf->ptp = NULL;
+}
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ return ptp_clock_index(pfvf->ptp->ptp_clock);
+}
+
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
new file mode 100644
index 000000000000..706d63a43ae1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 PTP support for ethernet driver */
+
+#ifndef OTX2_PTP_H
+#define OTX2_PTP_H
+
+int otx2_ptp_init(struct otx2_nic *pfvf);
+void otx2_ptp_destroy(struct otx2_nic *pfvf);
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf);
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index e46834e043be..d5d7a2f37493 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -16,6 +16,7 @@
#include "otx2_common.h"
#include "otx2_struct.h"
#include "otx2_txrx.h"
+#include "otx2_ptp.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
@@ -81,8 +82,11 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
int budget, int *tx_pkts, int *tx_bytes)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct skb_shared_hwtstamps ts;
struct sk_buff *skb = NULL;
+ u64 timestamp, tsns;
struct sg_list *sg;
+ int err;
if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
@@ -94,6 +98,18 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
+ if (timestamp != 1) {
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
+ if (!err) {
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(tsns);
+ skb_tstamp_tx(skb, &ts);
+ }
+ }
+ }
+
*tx_bytes += skb->len;
(*tx_pkts)++;
otx2_dma_unmap_skb_frags(pfvf, sg);
@@ -101,16 +117,47 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
sg->skb = (u64)NULL;
}
+static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
+ struct sk_buff *skb, void *data)
+{
+ u64 tsns;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
+ return;
+
+ /* The first 8 bytes is the timestamp */
+ err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
+ if (err)
+ return;
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
+}
+
static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len)
+ u64 iova, int len, struct nix_rx_parse_s *parse)
{
struct page *page;
+ int off = 0;
void *va;
va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
+
+ if (likely(!skb_shinfo(skb)->nr_frags)) {
+ /* Check if data starts at some nonzero offset
+ * from the start of the buffer. For now the
+ * only possible offset is 8 bytes in the case
+ * where packet is prepended by a timestamp.
+ */
+ if (parse->laptr) {
+ otx2_set_rxtstamp(pfvf, skb, va);
+ off = OTX2_HW_TIMESTAMP_LEN;
+ }
+ }
+
page = virt_to_page(va);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page), len, pfvf->rbsize);
+ va - page_address(page) + off, len - off, pfvf->rbsize);
otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
pfvf->rbsize, DMA_FROM_DEVICE);
@@ -239,7 +286,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
- otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size);
+ otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse);
cq->pool_ptrs++;
otx2_set_rxhash(pfvf, cqe, skb);
@@ -477,15 +524,55 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
*/
ip_hdr(skb)->tot_len =
htons(ext->lso_sb - skb_network_offset(skb));
- } else {
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
ext->lso_format = pfvf->hw.lso_tsov6_idx;
+
ipv6_hdr(skb)->payload_len =
htons(ext->lso_sb - skb_network_offset(skb));
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ __be16 l3_proto = vlan_get_protocol(skb);
+ struct udphdr *udph = udp_hdr(skb);
+ u16 iplen;
+
+ ext->lso_sb = skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+
+ /* HW adds payload size to length fields in IP and
+ * UDP headers while segmentation, hence adjust the
+ * lengths to just header sizes.
+ */
+ iplen = htons(ext->lso_sb - skb_network_offset(skb));
+ if (l3_proto == htons(ETH_P_IP)) {
+ ip_hdr(skb)->tot_len = iplen;
+ ext->lso_format = pfvf->hw.lso_udpv4_idx;
+ } else {
+ ipv6_hdr(skb)->payload_len = iplen;
+ ext->lso_format = pfvf->hw.lso_udpv6_idx;
+ }
+
+ udph->len = htons(sizeof(struct udphdr));
}
+ } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ ext->tstmp = 1;
}
+
*offset += sizeof(*ext);
}
+static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
+ int alg, u64 iova)
+{
+ struct nix_sqe_mem_s *mem;
+
+ mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
+ mem->subdc = NIX_SUBDC_MEM;
+ mem->alg = alg;
+ mem->wmem = 1; /* wait for the memory operation */
+ mem->addr = iova;
+
+ *offset += sizeof(*mem);
+}
+
/* Add SQE header subdescriptor structure */
static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct nix_sqe_hdr_s *sqe_hdr,
@@ -737,6 +824,21 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
+ struct otx2_snd_queue *sq, int *offset)
+{
+ u64 iova;
+
+ if (!skb_shinfo(skb)->gso_size &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ iova = sq->timestamps->iova + (sq->head * sizeof(u64));
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ } else {
+ skb_tx_timestamp(skb);
+ }
+}
+
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx)
{
@@ -790,6 +892,8 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return false;
}
+ otx2_set_txtstamp(pfvf, skb, sq, &offset);
+
sqe_hdr->sizem1 = (offset / 16) - 1;
netdev_tx_sent_queue(txq, skb->len);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index da97f2d4416f..73af15685657 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -91,6 +91,7 @@ struct otx2_snd_queue {
struct qmem *sqe;
struct qmem *tso_hdrs;
struct sg_list *sg;
+ struct qmem *timestamps;
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 2f90f1721441..67fabf265fe6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -187,6 +187,8 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
+ trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
+
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs) {
vf->mbox.num_msgs = hdr->num_msgs;
@@ -553,7 +555,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
netdev->features = netdev->hw_features;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
new file mode 100644
index 000000000000..b6f20e2034c6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell Prestera drivers configuration
+#
+
+config PRESTERA
+ tristate "Marvell Prestera Switch ASICs support"
+ depends on NET_SWITCHDEV && VLAN_8021Q
+ depends on BRIDGE || BRIDGE=n
+ select NET_DEVLINK
+ help
+ This driver supports Marvell Prestera Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called prestera.
+
+config PRESTERA_PCI
+ tristate "PCI interface driver for Marvell Prestera Switch ASICs family"
+ depends on PCI && HAS_IOMEM && PRESTERA
+ default PRESTERA
+ help
+ This is implementation of PCI interface support for Marvell Prestera
+ Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called prestera_pci.
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
new file mode 100644
index 000000000000..93129e32ebc5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PRESTERA) += prestera.o
+prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
+ prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
+ prestera_switchdev.o
+
+obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
new file mode 100644
index 000000000000..55aa4bf8a27c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_H_
+#define _PRESTERA_H_
+
+#include <linux/notifier.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/devlink.h>
+#include <uapi/linux/if_ether.h>
+
+#define PRESTERA_DRV_NAME "prestera"
+
+#define PRESTERA_DEFAULT_VID 1
+
+struct prestera_fw_rev {
+ u16 maj;
+ u16 min;
+ u16 sub;
+};
+
+struct prestera_port_stats {
+ u64 good_octets_received;
+ u64 bad_octets_received;
+ u64 mac_trans_error;
+ u64 broadcast_frames_received;
+ u64 multicast_frames_received;
+ u64 frames_64_octets;
+ u64 frames_65_to_127_octets;
+ u64 frames_128_to_255_octets;
+ u64 frames_256_to_511_octets;
+ u64 frames_512_to_1023_octets;
+ u64 frames_1024_to_max_octets;
+ u64 excessive_collision;
+ u64 multicast_frames_sent;
+ u64 broadcast_frames_sent;
+ u64 fc_sent;
+ u64 fc_received;
+ u64 buffer_overrun;
+ u64 undersize;
+ u64 fragments;
+ u64 oversize;
+ u64 jabber;
+ u64 rx_error_frame_received;
+ u64 bad_crc;
+ u64 collisions;
+ u64 late_collision;
+ u64 unicast_frames_received;
+ u64 unicast_frames_sent;
+ u64 sent_multiple;
+ u64 sent_deferred;
+ u64 good_octets_sent;
+};
+
+struct prestera_port_caps {
+ u64 supp_link_modes;
+ u8 supp_fec;
+ u8 type;
+ u8 transceiver;
+};
+
+struct prestera_port {
+ struct net_device *dev;
+ struct prestera_switch *sw;
+ struct devlink_port dl_port;
+ u32 id;
+ u32 hw_id;
+ u32 dev_id;
+ u16 fp_id;
+ u16 pvid;
+ bool autoneg;
+ u64 adver_link_modes;
+ u8 adver_fec;
+ struct prestera_port_caps caps;
+ struct list_head list;
+ struct list_head vlans_list;
+ struct {
+ struct prestera_port_stats stats;
+ struct delayed_work caching_dw;
+ } cached_hw_stats;
+};
+
+struct prestera_device {
+ struct device *dev;
+ u8 __iomem *ctl_regs;
+ u8 __iomem *pp_regs;
+ struct prestera_fw_rev fw_rev;
+ void *priv;
+
+ /* called by device driver to handle received packets */
+ void (*recv_pkt)(struct prestera_device *dev);
+
+ /* called by device driver to pass event up to the higher layer */
+ int (*recv_msg)(struct prestera_device *dev, void *msg, size_t size);
+
+ /* called by higher layer to send request to the firmware */
+ int (*send_req)(struct prestera_device *dev, void *in_msg,
+ size_t in_size, void *out_msg, size_t out_size,
+ unsigned int wait);
+};
+
+enum prestera_event_type {
+ PRESTERA_EVENT_TYPE_UNSPEC,
+
+ PRESTERA_EVENT_TYPE_PORT,
+ PRESTERA_EVENT_TYPE_FDB,
+ PRESTERA_EVENT_TYPE_RXTX,
+
+ PRESTERA_EVENT_TYPE_MAX
+};
+
+enum prestera_rxtx_event_id {
+ PRESTERA_RXTX_EVENT_UNSPEC,
+ PRESTERA_RXTX_EVENT_RCV_PKT,
+};
+
+enum prestera_port_event_id {
+ PRESTERA_PORT_EVENT_UNSPEC,
+ PRESTERA_PORT_EVENT_STATE_CHANGED,
+};
+
+struct prestera_port_event {
+ u32 port_id;
+ union {
+ u32 oper_state;
+ } data;
+};
+
+enum prestera_fdb_event_id {
+ PRESTERA_FDB_EVENT_UNSPEC,
+ PRESTERA_FDB_EVENT_LEARNED,
+ PRESTERA_FDB_EVENT_AGED,
+};
+
+struct prestera_fdb_event {
+ u32 port_id;
+ u32 vid;
+ union {
+ u8 mac[ETH_ALEN];
+ } data;
+};
+
+struct prestera_event {
+ u16 id;
+ union {
+ struct prestera_port_event port_evt;
+ struct prestera_fdb_event fdb_evt;
+ };
+};
+
+struct prestera_switchdev;
+struct prestera_rxtx;
+
+struct prestera_switch {
+ struct prestera_device *dev;
+ struct prestera_switchdev *swdev;
+ struct prestera_rxtx *rxtx;
+ struct list_head event_handlers;
+ struct notifier_block netdev_nb;
+ char base_mac[ETH_ALEN];
+ struct list_head port_list;
+ rwlock_t port_list_lock;
+ u32 port_count;
+ u32 mtu_min;
+ u32 mtu_max;
+ u8 id;
+};
+
+struct prestera_rxtx_params {
+ bool use_sdma;
+ u32 map_addr;
+};
+
+#define prestera_dev(sw) ((sw)->dev->dev)
+
+static inline void prestera_write(const struct prestera_switch *sw,
+ unsigned int reg, u32 val)
+{
+ writel(val, sw->dev->pp_regs + reg);
+}
+
+static inline u32 prestera_read(const struct prestera_switch *sw,
+ unsigned int reg)
+{
+ return readl(sw->dev->pp_regs + reg);
+}
+
+int prestera_device_register(struct prestera_device *dev);
+void prestera_device_unregister(struct prestera_device *dev);
+
+struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
+ u32 dev_id, u32 hw_id);
+
+int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
+ u64 adver_link_modes, u8 adver_fec);
+
+struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
+
+struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
+
+int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
+
+bool prestera_netdev_check(const struct net_device *dev);
+
+#endif /* _PRESTERA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
new file mode 100644
index 000000000000..94c185a0e2b8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <net/devlink.h>
+
+#include "prestera_devlink.h"
+
+static int prestera_dl_info_get(struct devlink *dl,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_switch *sw = devlink_priv(dl);
+ char buf[16];
+ int err;
+
+ err = devlink_info_driver_name_put(req, PRESTERA_DRV_NAME);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d",
+ sw->dev->fw_rev.maj,
+ sw->dev->fw_rev.min,
+ sw->dev->fw_rev.sub);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+}
+
+static const struct devlink_ops prestera_dl_ops = {
+ .info_get = prestera_dl_info_get,
+};
+
+struct prestera_switch *prestera_devlink_alloc(void)
+{
+ struct devlink *dl;
+
+ dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch));
+
+ return devlink_priv(dl);
+}
+
+void prestera_devlink_free(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+
+ devlink_free(dl);
+}
+
+int prestera_devlink_register(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+ int err;
+
+ err = devlink_register(dl, sw->dev->dev);
+ if (err)
+ dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err);
+
+ return err;
+}
+
+void prestera_devlink_unregister(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+
+ devlink_unregister(dl);
+}
+
+int prestera_devlink_port_register(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct devlink *dl = priv_to_devlink(sw);
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = port->fp_id;
+ attrs.switch_id.id_len = sizeof(sw->id);
+ memcpy(attrs.switch_id.id, &sw->id, attrs.switch_id.id_len);
+
+ devlink_port_attrs_set(&port->dl_port, &attrs);
+
+ err = devlink_port_register(dl, &port->dl_port, port->fp_id);
+ if (err) {
+ dev_err(prestera_dev(sw), "devlink_port_register failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+void prestera_devlink_port_unregister(struct prestera_port *port)
+{
+ devlink_port_unregister(&port->dl_port);
+}
+
+void prestera_devlink_port_set(struct prestera_port *port)
+{
+ devlink_port_type_eth_set(&port->dl_port, port->dev);
+}
+
+void prestera_devlink_port_clear(struct prestera_port *port)
+{
+ devlink_port_type_clear(&port->dl_port);
+}
+
+struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ return &port->dl_port;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
new file mode 100644
index 000000000000..51bee9f75415
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_DEVLINK_H_
+#define _PRESTERA_DEVLINK_H_
+
+#include "prestera.h"
+
+struct prestera_switch *prestera_devlink_alloc(void);
+void prestera_devlink_free(struct prestera_switch *sw);
+
+int prestera_devlink_register(struct prestera_switch *sw);
+void prestera_devlink_unregister(struct prestera_switch *sw);
+
+int prestera_devlink_port_register(struct prestera_port *port);
+void prestera_devlink_port_unregister(struct prestera_port *port);
+
+void prestera_devlink_port_set(struct prestera_port *port);
+void prestera_devlink_port_clear(struct prestera_port *port);
+
+struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
+
+#endif /* _PRESTERA_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
new file mode 100644
index 000000000000..a5e01c7a307b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include "prestera_dsa.h"
+
+#define PRESTERA_DSA_W0_CMD GENMASK(31, 30)
+#define PRESTERA_DSA_W0_IS_TAGGED BIT(29)
+#define PRESTERA_DSA_W0_DEV_NUM GENMASK(28, 24)
+#define PRESTERA_DSA_W0_PORT_NUM GENMASK(23, 19)
+#define PRESTERA_DSA_W0_VPT GENMASK(15, 13)
+#define PRESTERA_DSA_W0_EXT_BIT BIT(12)
+#define PRESTERA_DSA_W0_VID GENMASK(11, 0)
+
+#define PRESTERA_DSA_W1_EXT_BIT BIT(31)
+#define PRESTERA_DSA_W1_CFI_BIT BIT(30)
+#define PRESTERA_DSA_W1_PORT_NUM GENMASK(11, 10)
+
+#define PRESTERA_DSA_W2_EXT_BIT BIT(31)
+#define PRESTERA_DSA_W2_PORT_NUM BIT(20)
+
+#define PRESTERA_DSA_W3_VID GENMASK(30, 27)
+#define PRESTERA_DSA_W3_DST_EPORT GENMASK(23, 7)
+#define PRESTERA_DSA_W3_DEV_NUM GENMASK(6, 0)
+
+#define PRESTERA_DSA_VID GENMASK(15, 12)
+#define PRESTERA_DSA_DEV_NUM GENMASK(11, 5)
+
+int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf)
+{
+ __be32 *dsa_words = (__be32 *)dsa_buf;
+ enum prestera_dsa_cmd cmd;
+ u32 words[4];
+ u32 field;
+
+ words[0] = ntohl(dsa_words[0]);
+ words[1] = ntohl(dsa_words[1]);
+ words[2] = ntohl(dsa_words[2]);
+ words[3] = ntohl(dsa_words[3]);
+
+ /* set the common parameters */
+ cmd = (enum prestera_dsa_cmd)FIELD_GET(PRESTERA_DSA_W0_CMD, words[0]);
+
+ /* only to CPU is supported */
+ if (unlikely(cmd != PRESTERA_DSA_CMD_TO_CPU))
+ return -EINVAL;
+
+ if (FIELD_GET(PRESTERA_DSA_W0_EXT_BIT, words[0]) == 0)
+ return -EINVAL;
+ if (FIELD_GET(PRESTERA_DSA_W1_EXT_BIT, words[1]) == 0)
+ return -EINVAL;
+ if (FIELD_GET(PRESTERA_DSA_W2_EXT_BIT, words[2]) == 0)
+ return -EINVAL;
+
+ field = FIELD_GET(PRESTERA_DSA_W3_VID, words[3]);
+
+ dsa->vlan.is_tagged = FIELD_GET(PRESTERA_DSA_W0_IS_TAGGED, words[0]);
+ dsa->vlan.cfi_bit = FIELD_GET(PRESTERA_DSA_W1_CFI_BIT, words[1]);
+ dsa->vlan.vpt = FIELD_GET(PRESTERA_DSA_W0_VPT, words[0]);
+ dsa->vlan.vid = FIELD_GET(PRESTERA_DSA_W0_VID, words[0]);
+ dsa->vlan.vid &= ~PRESTERA_DSA_VID;
+ dsa->vlan.vid |= FIELD_PREP(PRESTERA_DSA_VID, field);
+
+ field = FIELD_GET(PRESTERA_DSA_W3_DEV_NUM, words[3]);
+
+ dsa->hw_dev_num = FIELD_GET(PRESTERA_DSA_W0_DEV_NUM, words[0]);
+ dsa->hw_dev_num |= FIELD_PREP(PRESTERA_DSA_DEV_NUM, field);
+
+ dsa->port_num = (FIELD_GET(PRESTERA_DSA_W0_PORT_NUM, words[0]) << 0) |
+ (FIELD_GET(PRESTERA_DSA_W1_PORT_NUM, words[1]) << 5) |
+ (FIELD_GET(PRESTERA_DSA_W2_PORT_NUM, words[2]) << 7);
+
+ return 0;
+}
+
+int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf)
+{
+ __be32 *dsa_words = (__be32 *)dsa_buf;
+ u32 dev_num = dsa->hw_dev_num;
+ u32 words[4] = { 0 };
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_CMD, PRESTERA_DSA_CMD_FROM_CPU);
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_DEV_NUM, dev_num);
+ dev_num = FIELD_GET(PRESTERA_DSA_DEV_NUM, dev_num);
+ words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DEV_NUM, dev_num);
+
+ words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DST_EPORT, dsa->port_num);
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_EXT_BIT, 1);
+ words[1] |= FIELD_PREP(PRESTERA_DSA_W1_EXT_BIT, 1);
+ words[2] |= FIELD_PREP(PRESTERA_DSA_W2_EXT_BIT, 1);
+
+ dsa_words[0] = htonl(words[0]);
+ dsa_words[1] = htonl(words[1]);
+ dsa_words[2] = htonl(words[2]);
+ dsa_words[3] = htonl(words[3]);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
new file mode 100644
index 000000000000..67018629bdd2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef __PRESTERA_DSA_H_
+#define __PRESTERA_DSA_H_
+
+#include <linux/types.h>
+
+#define PRESTERA_DSA_HLEN 16
+
+enum prestera_dsa_cmd {
+ /* DSA command is "To CPU" */
+ PRESTERA_DSA_CMD_TO_CPU = 0,
+
+ /* DSA command is "From CPU" */
+ PRESTERA_DSA_CMD_FROM_CPU,
+};
+
+struct prestera_dsa_vlan {
+ u16 vid;
+ u8 vpt;
+ u8 cfi_bit;
+ bool is_tagged;
+};
+
+struct prestera_dsa {
+ struct prestera_dsa_vlan vlan;
+ u32 hw_dev_num;
+ u32 port_num;
+};
+
+int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf);
+int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf);
+
+#endif /* _PRESTERA_DSA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
new file mode 100644
index 000000000000..93a5e2baf808
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "prestera_ethtool.h"
+#include "prestera.h"
+#include "prestera_hw.h"
+
+#define PRESTERA_STATS_CNT \
+ (sizeof(struct prestera_port_stats) / sizeof(u64))
+#define PRESTERA_STATS_IDX(name) \
+ (offsetof(struct prestera_port_stats, name) / sizeof(u64))
+#define PRESTERA_STATS_FIELD(name) \
+ [PRESTERA_STATS_IDX(name)] = __stringify(name)
+
+static const char driver_kind[] = "prestera";
+
+static const struct prestera_link_mode {
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u32 speed;
+ u64 pr_mask;
+ u8 duplex;
+ u8 port_type;
+} port_link_modes[PRESTERA_LINK_MODE_MAX] = {
+ [PRESTERA_LINK_MODE_10baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ .speed = 10,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_10baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ .speed = 10,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ .speed = 100,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = 100,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_1000baseKX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseKX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_2500baseX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ .speed = 2500,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_2500baseX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ },
+ [PRESTERA_LINK_MODE_10GbaseKR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseKR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_10GbaseSR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseSR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_10GbaseLR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseLR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_20GbaseKR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ .speed = 20000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_20GbaseKR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_25GbaseCR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseCR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_25GbaseKR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseKR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_25GbaseSR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseSR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_40GbaseKR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseKR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_40GbaseCR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseCR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_40GbaseSR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseSR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_50GbaseCR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseCR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_50GbaseKR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseKR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_50GbaseSR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseSR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_100GbaseKR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseKR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100GbaseSR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseSR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_100GbaseCR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseCR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ }
+};
+
+static const struct prestera_fec {
+ u32 eth_fec;
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u8 pr_fec;
+} port_fec_caps[PRESTERA_PORT_FEC_MAX] = {
+ [PRESTERA_PORT_FEC_OFF] = {
+ .eth_fec = ETHTOOL_FEC_OFF,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_OFF,
+ },
+ [PRESTERA_PORT_FEC_BASER] = {
+ .eth_fec = ETHTOOL_FEC_BASER,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_BASER,
+ },
+ [PRESTERA_PORT_FEC_RS] = {
+ .eth_fec = ETHTOOL_FEC_RS,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_RS,
+ }
+};
+
+static const struct prestera_port_type {
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u8 eth_type;
+} port_types[PRESTERA_PORT_TYPE_MAX] = {
+ [PRESTERA_PORT_TYPE_NONE] = {
+ .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS,
+ .eth_type = PORT_NONE,
+ },
+ [PRESTERA_PORT_TYPE_TP] = {
+ .eth_mode = ETHTOOL_LINK_MODE_TP_BIT,
+ .eth_type = PORT_TP,
+ },
+ [PRESTERA_PORT_TYPE_AUI] = {
+ .eth_mode = ETHTOOL_LINK_MODE_AUI_BIT,
+ .eth_type = PORT_AUI,
+ },
+ [PRESTERA_PORT_TYPE_MII] = {
+ .eth_mode = ETHTOOL_LINK_MODE_MII_BIT,
+ .eth_type = PORT_MII,
+ },
+ [PRESTERA_PORT_TYPE_FIBRE] = {
+ .eth_mode = ETHTOOL_LINK_MODE_FIBRE_BIT,
+ .eth_type = PORT_FIBRE,
+ },
+ [PRESTERA_PORT_TYPE_BNC] = {
+ .eth_mode = ETHTOOL_LINK_MODE_BNC_BIT,
+ .eth_type = PORT_BNC,
+ },
+ [PRESTERA_PORT_TYPE_DA] = {
+ .eth_mode = ETHTOOL_LINK_MODE_TP_BIT,
+ .eth_type = PORT_TP,
+ },
+ [PRESTERA_PORT_TYPE_OTHER] = {
+ .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS,
+ .eth_type = PORT_OTHER,
+ }
+};
+
+static const char prestera_cnt_name[PRESTERA_STATS_CNT][ETH_GSTRING_LEN] = {
+ PRESTERA_STATS_FIELD(good_octets_received),
+ PRESTERA_STATS_FIELD(bad_octets_received),
+ PRESTERA_STATS_FIELD(mac_trans_error),
+ PRESTERA_STATS_FIELD(broadcast_frames_received),
+ PRESTERA_STATS_FIELD(multicast_frames_received),
+ PRESTERA_STATS_FIELD(frames_64_octets),
+ PRESTERA_STATS_FIELD(frames_65_to_127_octets),
+ PRESTERA_STATS_FIELD(frames_128_to_255_octets),
+ PRESTERA_STATS_FIELD(frames_256_to_511_octets),
+ PRESTERA_STATS_FIELD(frames_512_to_1023_octets),
+ PRESTERA_STATS_FIELD(frames_1024_to_max_octets),
+ PRESTERA_STATS_FIELD(excessive_collision),
+ PRESTERA_STATS_FIELD(multicast_frames_sent),
+ PRESTERA_STATS_FIELD(broadcast_frames_sent),
+ PRESTERA_STATS_FIELD(fc_sent),
+ PRESTERA_STATS_FIELD(fc_received),
+ PRESTERA_STATS_FIELD(buffer_overrun),
+ PRESTERA_STATS_FIELD(undersize),
+ PRESTERA_STATS_FIELD(fragments),
+ PRESTERA_STATS_FIELD(oversize),
+ PRESTERA_STATS_FIELD(jabber),
+ PRESTERA_STATS_FIELD(rx_error_frame_received),
+ PRESTERA_STATS_FIELD(bad_crc),
+ PRESTERA_STATS_FIELD(collisions),
+ PRESTERA_STATS_FIELD(late_collision),
+ PRESTERA_STATS_FIELD(unicast_frames_received),
+ PRESTERA_STATS_FIELD(unicast_frames_sent),
+ PRESTERA_STATS_FIELD(sent_multiple),
+ PRESTERA_STATS_FIELD(sent_deferred),
+ PRESTERA_STATS_FIELD(good_octets_sent),
+};
+
+static void prestera_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_switch *sw = port->sw;
+
+ strlcpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ sw->dev->fw_rev.maj,
+ sw->dev->fw_rev.min,
+ sw->dev->fw_rev.sub);
+}
+
+static u8 prestera_port_type_get(struct prestera_port *port)
+{
+ if (port->caps.type < PRESTERA_PORT_TYPE_MAX)
+ return port_types[port->caps.type].eth_type;
+
+ return PORT_OTHER;
+}
+
+static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 new_mode = PRESTERA_LINK_MODE_MAX;
+ u32 type, mode;
+ int err;
+
+ for (type = 0; type < PRESTERA_PORT_TYPE_MAX; type++) {
+ if (port_types[type].eth_type == ecmd->base.port &&
+ test_bit(port_types[type].eth_mode,
+ ecmd->link_modes.supported)) {
+ break;
+ }
+ }
+
+ if (type == port->caps.type)
+ return 0;
+ if (type != port->caps.type && ecmd->base.autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+ if (type == PRESTERA_PORT_TYPE_MAX)
+ return -EOPNOTSUPP;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes) &&
+ type == port_link_modes[mode].port_type) {
+ new_mode = mode;
+ }
+ }
+
+ if (new_mode < PRESTERA_LINK_MODE_MAX)
+ err = prestera_hw_port_link_mode_set(port, new_mode);
+ else
+ err = -EINVAL;
+
+ if (err)
+ return err;
+
+ port->caps.type = type;
+ port->autoneg = false;
+
+ return 0;
+}
+
+static void prestera_modes_to_eth(unsigned long *eth_modes, u64 link_modes,
+ u8 fec, u8 type)
+{
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask & link_modes) == 0)
+ continue;
+
+ if (type != PRESTERA_PORT_TYPE_NONE &&
+ port_link_modes[mode].port_type != type)
+ continue;
+
+ __set_bit(port_link_modes[mode].eth_mode, eth_modes);
+ }
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].pr_fec & fec) == 0)
+ continue;
+
+ __set_bit(port_fec_caps[mode].eth_mode, eth_modes);
+ }
+}
+
+static void prestera_modes_from_eth(const unsigned long *eth_modes,
+ u64 *link_modes, u8 *fec, u8 type)
+{
+ u64 adver_modes = 0;
+ u32 fec_modes = 0;
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if (!test_bit(port_link_modes[mode].eth_mode, eth_modes))
+ continue;
+
+ if (port_link_modes[mode].port_type != type)
+ continue;
+
+ adver_modes |= port_link_modes[mode].pr_mask;
+ }
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if (!test_bit(port_fec_caps[mode].eth_mode, eth_modes))
+ continue;
+
+ fec_modes |= port_fec_caps[mode].pr_fec;
+ }
+
+ *link_modes = adver_modes;
+ *fec = fec_modes;
+}
+
+static void prestera_port_supp_types_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 mode;
+ u8 ptype;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes) == 0)
+ continue;
+
+ ptype = port_link_modes[mode].port_type;
+ __set_bit(port_types[ptype].eth_mode,
+ ecmd->link_modes.supported);
+ }
+}
+
+static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ bool asym_pause;
+ bool pause;
+ u64 bitmap;
+ int err;
+
+ err = prestera_hw_port_remote_cap_get(port, &bitmap);
+ if (!err) {
+ prestera_modes_to_eth(ecmd->link_modes.lp_advertising,
+ bitmap, 0, PRESTERA_PORT_TYPE_NONE);
+
+ if (!bitmap_empty(ecmd->link_modes.lp_advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Autoneg);
+ }
+ }
+
+ err = prestera_hw_port_remote_fc_get(port, &pause, &asym_pause);
+ if (err)
+ return;
+
+ if (pause)
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Pause);
+ if (asym_pause)
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Asym_Pause);
+}
+
+static void prestera_port_speed_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 speed;
+ int err;
+
+ err = prestera_hw_port_speed_get(port, &speed);
+ ecmd->base.speed = err ? SPEED_UNKNOWN : speed;
+}
+
+static void prestera_port_duplex_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u8 duplex;
+ int err;
+
+ err = prestera_hw_port_duplex_get(port, &duplex);
+ if (err) {
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ return;
+ }
+
+ ecmd->base.duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+}
+
+static int
+prestera_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+ ethtool_link_ksettings_zero_link_mode(ecmd, lp_advertising);
+
+ ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ if (port->caps.type == PRESTERA_PORT_TYPE_TP) {
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, Autoneg);
+
+ if (netif_running(dev) &&
+ (port->autoneg ||
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER))
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ Autoneg);
+ }
+
+ prestera_modes_to_eth(ecmd->link_modes.supported,
+ port->caps.supp_link_modes,
+ port->caps.supp_fec,
+ port->caps.type);
+
+ prestera_port_supp_types_get(ecmd, port);
+
+ if (netif_carrier_ok(dev)) {
+ prestera_port_speed_get(ecmd, port);
+ prestera_port_duplex_get(ecmd, port);
+ } else {
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ecmd->base.port = prestera_port_type_get(port);
+
+ if (port->autoneg) {
+ if (netif_running(dev))
+ prestera_modes_to_eth(ecmd->link_modes.advertising,
+ port->adver_link_modes,
+ port->adver_fec,
+ port->caps.type);
+
+ if (netif_carrier_ok(dev) &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
+ prestera_port_remote_cap_get(ecmd, port);
+ }
+
+ if (port->caps.type == PRESTERA_PORT_TYPE_TP &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
+ prestera_hw_port_mdix_get(port, &ecmd->base.eth_tp_mdix,
+ &ecmd->base.eth_tp_mdix_ctrl);
+
+ return 0;
+}
+
+static int prestera_port_mdix_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ if (ecmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_INVALID &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP)
+ return prestera_hw_port_mdix_set(port,
+ ecmd->base.eth_tp_mdix_ctrl);
+
+ return 0;
+}
+
+static int prestera_port_link_mode_set(struct prestera_port *port,
+ u32 speed, u8 duplex, u8 type)
+{
+ u32 new_mode = PRESTERA_LINK_MODE_MAX;
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if (speed != port_link_modes[mode].speed)
+ continue;
+
+ if (duplex != port_link_modes[mode].duplex)
+ continue;
+
+ if (!(port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes))
+ continue;
+
+ if (type != port_link_modes[mode].port_type)
+ continue;
+
+ new_mode = mode;
+ break;
+ }
+
+ if (new_mode == PRESTERA_LINK_MODE_MAX)
+ return -EOPNOTSUPP;
+
+ return prestera_hw_port_link_mode_set(port, new_mode);
+}
+
+static int
+prestera_port_speed_duplex_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 curr_mode;
+ u8 duplex;
+ u32 speed;
+ int err;
+
+ err = prestera_hw_port_link_mode_get(port, &curr_mode);
+ if (err)
+ return err;
+ if (curr_mode >= PRESTERA_LINK_MODE_MAX)
+ return -EINVAL;
+
+ if (ecmd->base.duplex != DUPLEX_UNKNOWN)
+ duplex = ecmd->base.duplex == DUPLEX_FULL ?
+ PRESTERA_PORT_DUPLEX_FULL : PRESTERA_PORT_DUPLEX_HALF;
+ else
+ duplex = port_link_modes[curr_mode].duplex;
+
+ if (ecmd->base.speed != SPEED_UNKNOWN)
+ speed = ecmd->base.speed;
+ else
+ speed = port_link_modes[curr_mode].speed;
+
+ return prestera_port_link_mode_set(port, speed, duplex,
+ port->caps.type);
+}
+
+static int
+prestera_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u64 adver_modes;
+ u8 adver_fec;
+ int err;
+
+ err = prestera_port_type_set(ecmd, port);
+ if (err)
+ return err;
+
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) {
+ err = prestera_port_mdix_set(ecmd, port);
+ if (err)
+ return err;
+ }
+
+ prestera_modes_from_eth(ecmd->link_modes.advertising, &adver_modes,
+ &adver_fec, port->caps.type);
+
+ err = prestera_port_autoneg_set(port,
+ ecmd->base.autoneg == AUTONEG_ENABLE,
+ adver_modes, adver_fec);
+ if (err)
+ return err;
+
+ if (ecmd->base.autoneg == AUTONEG_DISABLE) {
+ err = prestera_port_speed_duplex_set(ecmd, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_ethtool_get_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u8 active;
+ u32 mode;
+ int err;
+
+ err = prestera_hw_port_fec_get(port, &active);
+ if (err)
+ return err;
+
+ fecparam->fec = 0;
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].pr_fec & port->caps.supp_fec) == 0)
+ continue;
+
+ fecparam->fec |= port_fec_caps[mode].eth_fec;
+ }
+
+ if (active < PRESTERA_PORT_FEC_MAX)
+ fecparam->active_fec = port_fec_caps[active].eth_fec;
+ else
+ fecparam->active_fec = ETHTOOL_FEC_AUTO;
+
+ return 0;
+}
+
+static int prestera_ethtool_set_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u8 fec, active;
+ u32 mode;
+ int err;
+
+ if (port->autoneg) {
+ netdev_err(dev, "FEC set is not allowed while autoneg is on\n");
+ return -EINVAL;
+ }
+
+ err = prestera_hw_port_fec_get(port, &active);
+ if (err)
+ return err;
+
+ fec = PRESTERA_PORT_FEC_MAX;
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].eth_fec & fecparam->fec) &&
+ (port_fec_caps[mode].pr_fec & port->caps.supp_fec)) {
+ fec = mode;
+ break;
+ }
+ }
+
+ if (fec == active)
+ return 0;
+
+ if (fec == PRESTERA_PORT_FEC_MAX)
+ return -EOPNOTSUPP;
+
+ return prestera_hw_port_fec_set(port, fec);
+}
+
+static int prestera_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return PRESTERA_STATS_CNT;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void prestera_ethtool_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, prestera_cnt_name, sizeof(prestera_cnt_name));
+}
+
+static void prestera_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_stats *port_stats;
+
+ port_stats = &port->cached_hw_stats.stats;
+
+ memcpy(data, port_stats, sizeof(*port_stats));
+}
+
+static int prestera_ethtool_nway_reset(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ if (netif_running(dev) &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP)
+ return prestera_hw_port_autoneg_restart(port);
+
+ return -EINVAL;
+}
+
+const struct ethtool_ops prestera_ethtool_ops = {
+ .get_drvinfo = prestera_ethtool_get_drvinfo,
+ .get_link_ksettings = prestera_ethtool_get_link_ksettings,
+ .set_link_ksettings = prestera_ethtool_set_link_ksettings,
+ .get_fecparam = prestera_ethtool_get_fecparam,
+ .set_fecparam = prestera_ethtool_set_fecparam,
+ .get_sset_count = prestera_ethtool_get_sset_count,
+ .get_strings = prestera_ethtool_get_strings,
+ .get_ethtool_stats = prestera_ethtool_get_stats,
+ .get_link = ethtool_op_get_link,
+ .nway_reset = prestera_ethtool_nway_reset
+};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
new file mode 100644
index 000000000000..523ef1f592ce
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef __PRESTERA_ETHTOOL_H_
+#define __PRESTERA_ETHTOOL_H_
+
+#include <linux/ethtool.h>
+
+extern const struct ethtool_ops prestera_ethtool_ops;
+
+#endif /* _PRESTERA_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
new file mode 100644
index 000000000000..0424718d5998
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -0,0 +1,1253 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+
+#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
+
+#define PRESTERA_MIN_MTU 64
+
+enum prestera_cmd_type_t {
+ PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1,
+ PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2,
+
+ PRESTERA_CMD_TYPE_PORT_ATTR_SET = 0x100,
+ PRESTERA_CMD_TYPE_PORT_ATTR_GET = 0x101,
+ PRESTERA_CMD_TYPE_PORT_INFO_GET = 0x110,
+
+ PRESTERA_CMD_TYPE_VLAN_CREATE = 0x200,
+ PRESTERA_CMD_TYPE_VLAN_DELETE = 0x201,
+ PRESTERA_CMD_TYPE_VLAN_PORT_SET = 0x202,
+ PRESTERA_CMD_TYPE_VLAN_PVID_SET = 0x203,
+
+ PRESTERA_CMD_TYPE_FDB_ADD = 0x300,
+ PRESTERA_CMD_TYPE_FDB_DELETE = 0x301,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_PORT = 0x310,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN = 0x311,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN = 0x312,
+
+ PRESTERA_CMD_TYPE_BRIDGE_CREATE = 0x400,
+ PRESTERA_CMD_TYPE_BRIDGE_DELETE = 0x401,
+ PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402,
+ PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403,
+
+ PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
+ PRESTERA_CMD_TYPE_RXTX_PORT_INIT = 0x801,
+
+ PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
+
+ PRESTERA_CMD_TYPE_ACK = 0x10000,
+ PRESTERA_CMD_TYPE_MAX
+};
+
+enum {
+ PRESTERA_CMD_PORT_ATTR_ADMIN_STATE = 1,
+ PRESTERA_CMD_PORT_ATTR_MTU = 3,
+ PRESTERA_CMD_PORT_ATTR_MAC = 4,
+ PRESTERA_CMD_PORT_ATTR_SPEED = 5,
+ PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE = 6,
+ PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
+ PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
+ PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
+ PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY = 10,
+ PRESTERA_CMD_PORT_ATTR_REMOTE_FC = 11,
+ PRESTERA_CMD_PORT_ATTR_LINK_MODE = 12,
+ PRESTERA_CMD_PORT_ATTR_TYPE = 13,
+ PRESTERA_CMD_PORT_ATTR_FEC = 14,
+ PRESTERA_CMD_PORT_ATTR_AUTONEG = 15,
+ PRESTERA_CMD_PORT_ATTR_DUPLEX = 16,
+ PRESTERA_CMD_PORT_ATTR_STATS = 17,
+ PRESTERA_CMD_PORT_ATTR_MDIX = 18,
+ PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART = 19,
+};
+
+enum {
+ PRESTERA_CMD_SWITCH_ATTR_MAC = 1,
+ PRESTERA_CMD_SWITCH_ATTR_AGEING = 2,
+};
+
+enum {
+ PRESTERA_CMD_ACK_OK,
+ PRESTERA_CMD_ACK_FAILED,
+
+ PRESTERA_CMD_ACK_MAX
+};
+
+enum {
+ PRESTERA_PORT_TP_NA,
+ PRESTERA_PORT_TP_MDI,
+ PRESTERA_PORT_TP_MDIX,
+ PRESTERA_PORT_TP_AUTO,
+};
+
+enum {
+ PRESTERA_PORT_GOOD_OCTETS_RCV_CNT,
+ PRESTERA_PORT_BAD_OCTETS_RCV_CNT,
+ PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT,
+ PRESTERA_PORT_BRDC_PKTS_RCV_CNT,
+ PRESTERA_PORT_MC_PKTS_RCV_CNT,
+ PRESTERA_PORT_PKTS_64L_CNT,
+ PRESTERA_PORT_PKTS_65TO127L_CNT,
+ PRESTERA_PORT_PKTS_128TO255L_CNT,
+ PRESTERA_PORT_PKTS_256TO511L_CNT,
+ PRESTERA_PORT_PKTS_512TO1023L_CNT,
+ PRESTERA_PORT_PKTS_1024TOMAXL_CNT,
+ PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT,
+ PRESTERA_PORT_MC_PKTS_SENT_CNT,
+ PRESTERA_PORT_BRDC_PKTS_SENT_CNT,
+ PRESTERA_PORT_FC_SENT_CNT,
+ PRESTERA_PORT_GOOD_FC_RCV_CNT,
+ PRESTERA_PORT_DROP_EVENTS_CNT,
+ PRESTERA_PORT_UNDERSIZE_PKTS_CNT,
+ PRESTERA_PORT_FRAGMENTS_PKTS_CNT,
+ PRESTERA_PORT_OVERSIZE_PKTS_CNT,
+ PRESTERA_PORT_JABBER_PKTS_CNT,
+ PRESTERA_PORT_MAC_RCV_ERROR_CNT,
+ PRESTERA_PORT_BAD_CRC_CNT,
+ PRESTERA_PORT_COLLISIONS_CNT,
+ PRESTERA_PORT_LATE_COLLISIONS_CNT,
+ PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT,
+ PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT,
+ PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT,
+ PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT,
+ PRESTERA_PORT_GOOD_OCTETS_SENT_CNT,
+
+ PRESTERA_PORT_CNT_MAX
+};
+
+enum {
+ PRESTERA_FC_NONE,
+ PRESTERA_FC_SYMMETRIC,
+ PRESTERA_FC_ASYMMETRIC,
+ PRESTERA_FC_SYMM_ASYMM,
+};
+
+struct prestera_fw_event_handler {
+ struct list_head list;
+ struct rcu_head rcu;
+ enum prestera_event_type type;
+ prestera_event_cb_t func;
+ void *arg;
+};
+
+struct prestera_msg_cmd {
+ u32 type;
+};
+
+struct prestera_msg_ret {
+ struct prestera_msg_cmd cmd;
+ u32 status;
+};
+
+struct prestera_msg_common_req {
+ struct prestera_msg_cmd cmd;
+};
+
+struct prestera_msg_common_resp {
+ struct prestera_msg_ret ret;
+};
+
+union prestera_msg_switch_param {
+ u8 mac[ETH_ALEN];
+ u32 ageing_timeout_ms;
+};
+
+struct prestera_msg_switch_attr_req {
+ struct prestera_msg_cmd cmd;
+ u32 attr;
+ union prestera_msg_switch_param param;
+};
+
+struct prestera_msg_switch_init_resp {
+ struct prestera_msg_ret ret;
+ u32 port_count;
+ u32 mtu_max;
+ u8 switch_id;
+};
+
+struct prestera_msg_port_autoneg_param {
+ u64 link_mode;
+ u8 enable;
+ u8 fec;
+};
+
+struct prestera_msg_port_cap_param {
+ u64 link_mode;
+ u8 type;
+ u8 fec;
+ u8 transceiver;
+};
+
+struct prestera_msg_port_mdix_param {
+ u8 status;
+ u8 admin_mode;
+};
+
+union prestera_msg_port_param {
+ u8 admin_state;
+ u8 oper_state;
+ u32 mtu;
+ u8 mac[ETH_ALEN];
+ u8 accept_frm_type;
+ u32 speed;
+ u8 learning;
+ u8 flood;
+ u32 link_mode;
+ u8 type;
+ u8 duplex;
+ u8 fec;
+ u8 fc;
+ struct prestera_msg_port_mdix_param mdix;
+ struct prestera_msg_port_autoneg_param autoneg;
+ struct prestera_msg_port_cap_param cap;
+};
+
+struct prestera_msg_port_attr_req {
+ struct prestera_msg_cmd cmd;
+ u32 attr;
+ u32 port;
+ u32 dev;
+ union prestera_msg_port_param param;
+};
+
+struct prestera_msg_port_attr_resp {
+ struct prestera_msg_ret ret;
+ union prestera_msg_port_param param;
+};
+
+struct prestera_msg_port_stats_resp {
+ struct prestera_msg_ret ret;
+ u64 stats[PRESTERA_PORT_CNT_MAX];
+};
+
+struct prestera_msg_port_info_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+};
+
+struct prestera_msg_port_info_resp {
+ struct prestera_msg_ret ret;
+ u32 hw_id;
+ u32 dev_id;
+ u16 fp_id;
+};
+
+struct prestera_msg_vlan_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 vid;
+ u8 is_member;
+ u8 is_tagged;
+};
+
+struct prestera_msg_fdb_req {
+ struct prestera_msg_cmd cmd;
+ u8 dest_type;
+ u32 port;
+ u32 dev;
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ u8 dynamic;
+ u32 flush_mode;
+};
+
+struct prestera_msg_bridge_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 bridge;
+};
+
+struct prestera_msg_bridge_resp {
+ struct prestera_msg_ret ret;
+ u16 bridge;
+};
+
+struct prestera_msg_stp_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 vid;
+ u8 state;
+};
+
+struct prestera_msg_rxtx_req {
+ struct prestera_msg_cmd cmd;
+ u8 use_sdma;
+};
+
+struct prestera_msg_rxtx_resp {
+ struct prestera_msg_ret ret;
+ u32 map_addr;
+};
+
+struct prestera_msg_rxtx_port_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+};
+
+struct prestera_msg_event {
+ u16 type;
+ u16 id;
+};
+
+union prestera_msg_event_port_param {
+ u32 oper_state;
+};
+
+struct prestera_msg_event_port {
+ struct prestera_msg_event id;
+ u32 port_id;
+ union prestera_msg_event_port_param param;
+};
+
+union prestera_msg_event_fdb_param {
+ u8 mac[ETH_ALEN];
+};
+
+struct prestera_msg_event_fdb {
+ struct prestera_msg_event id;
+ u8 dest_type;
+ u32 port_id;
+ u32 vid;
+ union prestera_msg_event_fdb_param param;
+};
+
+static int __prestera_cmd_ret(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen,
+ int waitms)
+{
+ struct prestera_device *dev = sw->dev;
+ int err;
+
+ cmd->type = type;
+
+ err = dev->send_req(dev, cmd, clen, ret, rlen, waitms);
+ if (err)
+ return err;
+
+ if (ret->cmd.type != PRESTERA_CMD_TYPE_ACK)
+ return -EBADE;
+ if (ret->status != PRESTERA_CMD_ACK_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prestera_cmd_ret(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen)
+{
+ return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, 0);
+}
+
+static int prestera_cmd_ret_wait(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen,
+ int waitms)
+{
+ return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, waitms);
+}
+
+static int prestera_cmd(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen)
+{
+ struct prestera_msg_common_resp resp;
+
+ return prestera_cmd_ret(sw, type, cmd, clen, &resp.ret, sizeof(resp));
+}
+
+static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt)
+{
+ struct prestera_msg_event_port *hw_evt = msg;
+
+ if (evt->id != PRESTERA_PORT_EVENT_STATE_CHANGED)
+ return -EINVAL;
+
+ evt->port_evt.data.oper_state = hw_evt->param.oper_state;
+ evt->port_evt.port_id = hw_evt->port_id;
+
+ return 0;
+}
+
+static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt)
+{
+ struct prestera_msg_event_fdb *hw_evt = msg;
+
+ evt->fdb_evt.port_id = hw_evt->port_id;
+ evt->fdb_evt.vid = hw_evt->vid;
+
+ ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac);
+
+ return 0;
+}
+
+static struct prestera_fw_evt_parser {
+ int (*func)(void *msg, struct prestera_event *evt);
+} fw_event_parsers[PRESTERA_EVENT_TYPE_MAX] = {
+ [PRESTERA_EVENT_TYPE_PORT] = { .func = prestera_fw_parse_port_evt },
+ [PRESTERA_EVENT_TYPE_FDB] = { .func = prestera_fw_parse_fdb_evt },
+};
+
+static struct prestera_fw_event_handler *
+__find_event_handler(const struct prestera_switch *sw,
+ enum prestera_event_type type)
+{
+ struct prestera_fw_event_handler *eh;
+
+ list_for_each_entry_rcu(eh, &sw->event_handlers, list) {
+ if (eh->type == type)
+ return eh;
+ }
+
+ return NULL;
+}
+
+static int prestera_find_event_handler(const struct prestera_switch *sw,
+ enum prestera_event_type type,
+ struct prestera_fw_event_handler *eh)
+{
+ struct prestera_fw_event_handler *tmp;
+ int err = 0;
+
+ rcu_read_lock();
+ tmp = __find_event_handler(sw, type);
+ if (tmp)
+ *eh = *tmp;
+ else
+ err = -ENOENT;
+ rcu_read_unlock();
+
+ return err;
+}
+
+static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size)
+{
+ struct prestera_switch *sw = dev->priv;
+ struct prestera_msg_event *msg = buf;
+ struct prestera_fw_event_handler eh;
+ struct prestera_event evt;
+ int err;
+
+ if (msg->type >= PRESTERA_EVENT_TYPE_MAX)
+ return -EINVAL;
+ if (!fw_event_parsers[msg->type].func)
+ return -ENOENT;
+
+ err = prestera_find_event_handler(sw, msg->type, &eh);
+ if (err)
+ return err;
+
+ evt.id = msg->id;
+
+ err = fw_event_parsers[msg->type].func(buf, &evt);
+ if (err)
+ return err;
+
+ eh.func(sw, &evt, eh.arg);
+
+ return 0;
+}
+
+static void prestera_pkt_recv(struct prestera_device *dev)
+{
+ struct prestera_switch *sw = dev->priv;
+ struct prestera_fw_event_handler eh;
+ struct prestera_event ev;
+ int err;
+
+ ev.id = PRESTERA_RXTX_EVENT_RCV_PKT;
+
+ err = prestera_find_event_handler(sw, PRESTERA_EVENT_TYPE_RXTX, &eh);
+ if (err)
+ return;
+
+ eh.func(sw, &ev, eh.arg);
+}
+
+int prestera_hw_port_info_get(const struct prestera_port *port,
+ u32 *dev_id, u32 *hw_id, u16 *fp_id)
+{
+ struct prestera_msg_port_info_req req = {
+ .port = port->id,
+ };
+ struct prestera_msg_port_info_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_INFO_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *dev_id = resp.dev_id;
+ *hw_id = resp.hw_id;
+ *fp_id = resp.fp_id;
+
+ return 0;
+}
+
+int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac)
+{
+ struct prestera_msg_switch_attr_req req = {
+ .attr = PRESTERA_CMD_SWITCH_ATTR_MAC,
+ };
+
+ ether_addr_copy(req.param.mac, mac);
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_msg_switch_init_resp resp;
+ struct prestera_msg_common_req req;
+ int err;
+
+ INIT_LIST_HEAD(&sw->event_handlers);
+
+ err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT,
+ &req.cmd, sizeof(req),
+ &resp.ret, sizeof(resp),
+ PRESTERA_SWITCH_INIT_TIMEOUT_MS);
+ if (err)
+ return err;
+
+ sw->dev->recv_msg = prestera_evt_recv;
+ sw->dev->recv_pkt = prestera_pkt_recv;
+ sw->port_count = resp.port_count;
+ sw->mtu_min = PRESTERA_MIN_MTU;
+ sw->mtu_max = resp.mtu_max;
+ sw->id = resp.switch_id;
+
+ return 0;
+}
+
+void prestera_hw_switch_fini(struct prestera_switch *sw)
+{
+ WARN_ON(!list_empty(&sw->event_handlers));
+}
+
+int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms)
+{
+ struct prestera_msg_switch_attr_req req = {
+ .attr = PRESTERA_CMD_SWITCH_ATTR_AGEING,
+ .param = {
+ .ageing_timeout_ms = ageing_ms,
+ },
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_state_set(const struct prestera_port *port,
+ bool admin_state)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_ADMIN_STATE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .admin_state = admin_state,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MTU,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .mtu = mtu,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MAC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ ether_addr_copy(req.param.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_accept_frm_type(struct prestera_port *port,
+ enum prestera_accept_frm_type type)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .accept_frm_type = type,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_cap_get(const struct prestera_port *port,
+ struct prestera_port_caps *caps)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_CAPABILITY,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ caps->supp_link_modes = resp.param.cap.link_mode;
+ caps->transceiver = resp.param.cap.transceiver;
+ caps->supp_fec = resp.param.cap.fec;
+ caps->type = resp.param.cap.type;
+
+ return err;
+}
+
+int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
+ u64 *link_mode_bitmap)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *link_mode_bitmap = resp.param.cap.link_mode;
+
+ return 0;
+}
+
+int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
+ bool *pause, bool *asym_pause)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_FC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ switch (resp.param.fc) {
+ case PRESTERA_FC_SYMMETRIC:
+ *pause = true;
+ *asym_pause = false;
+ break;
+ case PRESTERA_FC_ASYMMETRIC:
+ *pause = false;
+ *asym_pause = true;
+ break;
+ case PRESTERA_FC_SYMM_ASYMM:
+ *pause = true;
+ *asym_pause = true;
+ break;
+ default:
+ *pause = false;
+ *asym_pause = false;
+ }
+
+ return 0;
+}
+
+int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_TYPE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *type = resp.param.type;
+
+ return 0;
+}
+
+int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FEC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *fec = resp.param.fec;
+
+ return 0;
+}
+
+int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FEC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .fec = fec,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+static u8 prestera_hw_mdix_to_eth(u8 mode)
+{
+ switch (mode) {
+ case PRESTERA_PORT_TP_MDI:
+ return ETH_TP_MDI;
+ case PRESTERA_PORT_TP_MDIX:
+ return ETH_TP_MDI_X;
+ case PRESTERA_PORT_TP_AUTO:
+ return ETH_TP_MDI_AUTO;
+ default:
+ return ETH_TP_MDI_INVALID;
+ }
+}
+
+static u8 prestera_hw_mdix_from_eth(u8 mode)
+{
+ switch (mode) {
+ case ETH_TP_MDI:
+ return PRESTERA_PORT_TP_MDI;
+ case ETH_TP_MDI_X:
+ return PRESTERA_PORT_TP_MDIX;
+ case ETH_TP_MDI_AUTO:
+ return PRESTERA_PORT_TP_AUTO;
+ default:
+ return PRESTERA_PORT_TP_NA;
+ }
+}
+
+int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
+ u8 *admin_mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *status = prestera_hw_mdix_to_eth(resp.param.mdix.status);
+ *admin_mode = prestera_hw_mdix_to_eth(resp.param.mdix.admin_mode);
+
+ return 0;
+}
+
+int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ req.param.mdix.admin_mode = prestera_hw_mdix_from_eth(mode);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .link_mode = mode,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *mode = resp.param.link_mode;
+
+ return 0;
+}
+
+int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_SPEED,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *speed = resp.param.speed;
+
+ return 0;
+}
+
+int prestera_hw_port_autoneg_set(const struct prestera_port *port,
+ bool autoneg, u64 link_modes, u8 fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .autoneg = {
+ .link_mode = link_modes,
+ .enable = autoneg,
+ .fec = fec,
+ }
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_autoneg_restart(struct prestera_port *port)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_DUPLEX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *duplex = resp.param.duplex;
+
+ return 0;
+}
+
+int prestera_hw_port_stats_get(const struct prestera_port *port,
+ struct prestera_port_stats *st)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_STATS,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_stats_resp resp;
+ u64 *hw = resp.stats;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ st->good_octets_received = hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT];
+ st->bad_octets_received = hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT];
+ st->mac_trans_error = hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT];
+ st->broadcast_frames_received = hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT];
+ st->multicast_frames_received = hw[PRESTERA_PORT_MC_PKTS_RCV_CNT];
+ st->frames_64_octets = hw[PRESTERA_PORT_PKTS_64L_CNT];
+ st->frames_65_to_127_octets = hw[PRESTERA_PORT_PKTS_65TO127L_CNT];
+ st->frames_128_to_255_octets = hw[PRESTERA_PORT_PKTS_128TO255L_CNT];
+ st->frames_256_to_511_octets = hw[PRESTERA_PORT_PKTS_256TO511L_CNT];
+ st->frames_512_to_1023_octets = hw[PRESTERA_PORT_PKTS_512TO1023L_CNT];
+ st->frames_1024_to_max_octets = hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT];
+ st->excessive_collision = hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT];
+ st->multicast_frames_sent = hw[PRESTERA_PORT_MC_PKTS_SENT_CNT];
+ st->broadcast_frames_sent = hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT];
+ st->fc_sent = hw[PRESTERA_PORT_FC_SENT_CNT];
+ st->fc_received = hw[PRESTERA_PORT_GOOD_FC_RCV_CNT];
+ st->buffer_overrun = hw[PRESTERA_PORT_DROP_EVENTS_CNT];
+ st->undersize = hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT];
+ st->fragments = hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT];
+ st->oversize = hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT];
+ st->jabber = hw[PRESTERA_PORT_JABBER_PKTS_CNT];
+ st->rx_error_frame_received = hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT];
+ st->bad_crc = hw[PRESTERA_PORT_BAD_CRC_CNT];
+ st->collisions = hw[PRESTERA_PORT_COLLISIONS_CNT];
+ st->late_collision = hw[PRESTERA_PORT_LATE_COLLISIONS_CNT];
+ st->unicast_frames_received = hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT];
+ st->unicast_frames_sent = hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT];
+ st->sent_multiple = hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT];
+ st->sent_deferred = hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT];
+ st->good_octets_sent = hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT];
+
+ return 0;
+}
+
+int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LEARNING,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .learning = enable,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_flood_set(struct prestera_port *port, bool flood)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .flood = flood,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .vid = vid,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .vid = vid,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
+ bool is_member, bool untagged)
+{
+ struct prestera_msg_vlan_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .is_member = is_member,
+ .is_tagged = !untagged,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PORT_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state)
+{
+ struct prestera_msg_stp_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .state = state,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_STP_PORT_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
+ u16 vid, bool dynamic)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .dynamic = dynamic,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
+ u16 vid)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .vid = vid,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
+{
+ struct prestera_msg_bridge_resp resp;
+ struct prestera_msg_bridge_req req;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_BRIDGE_CREATE,
+ &req.cmd, sizeof(req),
+ &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *bridge_id = resp.bridge;
+
+ return 0;
+}
+
+int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_rxtx_init(struct prestera_switch *sw,
+ struct prestera_rxtx_params *params)
+{
+ struct prestera_msg_rxtx_resp resp;
+ struct prestera_msg_rxtx_req req;
+ int err;
+
+ req.use_sdma = params->use_sdma;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_RXTX_INIT,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ params->map_addr = resp.map_addr;
+
+ return 0;
+}
+
+int prestera_hw_rxtx_port_init(struct prestera_port *port)
+{
+ struct prestera_msg_rxtx_port_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_RXTX_PORT_INIT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_event_handler_register(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn,
+ void *arg)
+{
+ struct prestera_fw_event_handler *eh;
+
+ eh = __find_event_handler(sw, type);
+ if (eh)
+ return -EEXIST;
+
+ eh = kmalloc(sizeof(*eh), GFP_KERNEL);
+ if (!eh)
+ return -ENOMEM;
+
+ eh->type = type;
+ eh->func = fn;
+ eh->arg = arg;
+
+ INIT_LIST_HEAD(&eh->list);
+
+ list_add_rcu(&eh->list, &sw->event_handlers);
+
+ return 0;
+}
+
+void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn)
+{
+ struct prestera_fw_event_handler *eh;
+
+ eh = __find_event_handler(sw, type);
+ if (!eh)
+ return;
+
+ list_del_rcu(&eh->list);
+ kfree_rcu(eh, rcu);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
new file mode 100644
index 000000000000..b2b5ac95b4e3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_HW_H_
+#define _PRESTERA_HW_H_
+
+#include <linux/types.h>
+
+enum prestera_accept_frm_type {
+ PRESTERA_ACCEPT_FRAME_TYPE_TAGGED,
+ PRESTERA_ACCEPT_FRAME_TYPE_UNTAGGED,
+ PRESTERA_ACCEPT_FRAME_TYPE_ALL,
+};
+
+enum prestera_fdb_flush_mode {
+ PRESTERA_FDB_FLUSH_MODE_DYNAMIC = BIT(0),
+ PRESTERA_FDB_FLUSH_MODE_STATIC = BIT(1),
+ PRESTERA_FDB_FLUSH_MODE_ALL = PRESTERA_FDB_FLUSH_MODE_DYNAMIC
+ | PRESTERA_FDB_FLUSH_MODE_STATIC,
+};
+
+enum {
+ PRESTERA_LINK_MODE_10baseT_Half,
+ PRESTERA_LINK_MODE_10baseT_Full,
+ PRESTERA_LINK_MODE_100baseT_Half,
+ PRESTERA_LINK_MODE_100baseT_Full,
+ PRESTERA_LINK_MODE_1000baseT_Half,
+ PRESTERA_LINK_MODE_1000baseT_Full,
+ PRESTERA_LINK_MODE_1000baseX_Full,
+ PRESTERA_LINK_MODE_1000baseKX_Full,
+ PRESTERA_LINK_MODE_2500baseX_Full,
+ PRESTERA_LINK_MODE_10GbaseKR_Full,
+ PRESTERA_LINK_MODE_10GbaseSR_Full,
+ PRESTERA_LINK_MODE_10GbaseLR_Full,
+ PRESTERA_LINK_MODE_20GbaseKR2_Full,
+ PRESTERA_LINK_MODE_25GbaseCR_Full,
+ PRESTERA_LINK_MODE_25GbaseKR_Full,
+ PRESTERA_LINK_MODE_25GbaseSR_Full,
+ PRESTERA_LINK_MODE_40GbaseKR4_Full,
+ PRESTERA_LINK_MODE_40GbaseCR4_Full,
+ PRESTERA_LINK_MODE_40GbaseSR4_Full,
+ PRESTERA_LINK_MODE_50GbaseCR2_Full,
+ PRESTERA_LINK_MODE_50GbaseKR2_Full,
+ PRESTERA_LINK_MODE_50GbaseSR2_Full,
+ PRESTERA_LINK_MODE_100GbaseKR4_Full,
+ PRESTERA_LINK_MODE_100GbaseSR4_Full,
+ PRESTERA_LINK_MODE_100GbaseCR4_Full,
+
+ PRESTERA_LINK_MODE_MAX
+};
+
+enum {
+ PRESTERA_PORT_TYPE_NONE,
+ PRESTERA_PORT_TYPE_TP,
+ PRESTERA_PORT_TYPE_AUI,
+ PRESTERA_PORT_TYPE_MII,
+ PRESTERA_PORT_TYPE_FIBRE,
+ PRESTERA_PORT_TYPE_BNC,
+ PRESTERA_PORT_TYPE_DA,
+ PRESTERA_PORT_TYPE_OTHER,
+
+ PRESTERA_PORT_TYPE_MAX
+};
+
+enum {
+ PRESTERA_PORT_TCVR_COPPER,
+ PRESTERA_PORT_TCVR_SFP,
+
+ PRESTERA_PORT_TCVR_MAX
+};
+
+enum {
+ PRESTERA_PORT_FEC_OFF,
+ PRESTERA_PORT_FEC_BASER,
+ PRESTERA_PORT_FEC_RS,
+
+ PRESTERA_PORT_FEC_MAX
+};
+
+enum {
+ PRESTERA_PORT_DUPLEX_HALF,
+ PRESTERA_PORT_DUPLEX_FULL,
+};
+
+enum {
+ PRESTERA_STP_DISABLED,
+ PRESTERA_STP_BLOCK_LISTEN,
+ PRESTERA_STP_LEARN,
+ PRESTERA_STP_FORWARD,
+};
+
+struct prestera_switch;
+struct prestera_port;
+struct prestera_port_stats;
+struct prestera_port_caps;
+enum prestera_event_type;
+struct prestera_event;
+
+typedef void (*prestera_event_cb_t)
+ (struct prestera_switch *sw, struct prestera_event *evt, void *arg);
+
+struct prestera_rxtx_params;
+
+/* Switch API */
+int prestera_hw_switch_init(struct prestera_switch *sw);
+void prestera_hw_switch_fini(struct prestera_switch *sw);
+int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms);
+int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac);
+
+/* Port API */
+int prestera_hw_port_info_get(const struct prestera_port *port,
+ u32 *dev_id, u32 *hw_id, u16 *fp_id);
+int prestera_hw_port_state_set(const struct prestera_port *port,
+ bool admin_state);
+int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu);
+int prestera_hw_port_mtu_get(const struct prestera_port *port, u32 *mtu);
+int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac);
+int prestera_hw_port_mac_get(const struct prestera_port *port, char *mac);
+int prestera_hw_port_cap_get(const struct prestera_port *port,
+ struct prestera_port_caps *caps);
+int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
+ u64 *link_mode_bitmap);
+int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
+ bool *pause, bool *asym_pause);
+int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type);
+int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec);
+int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec);
+int prestera_hw_port_autoneg_set(const struct prestera_port *port,
+ bool autoneg, u64 link_modes, u8 fec);
+int prestera_hw_port_autoneg_restart(struct prestera_port *port);
+int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex);
+int prestera_hw_port_stats_get(const struct prestera_port *port,
+ struct prestera_port_stats *stats);
+int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode);
+int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode);
+int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
+ u8 *admin_mode);
+int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode);
+int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
+int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
+int prestera_hw_port_flood_set(struct prestera_port *port, bool flood);
+int prestera_hw_port_accept_frm_type(struct prestera_port *port,
+ enum prestera_accept_frm_type type);
+/* Vlan API */
+int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid);
+int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid);
+int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
+ bool is_member, bool untagged);
+int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid);
+int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state);
+
+/* FDB API */
+int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
+ u16 vid, bool dynamic);
+int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
+ u16 vid);
+int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode);
+int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode);
+int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode);
+
+/* Bridge API */
+int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id);
+int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id);
+int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id);
+int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id);
+
+/* Event handlers */
+int prestera_hw_event_handler_register(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn,
+ void *arg);
+void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn);
+
+/* RX/TX */
+int prestera_hw_rxtx_init(struct prestera_switch *sw,
+ struct prestera_rxtx_params *params);
+int prestera_hw_rxtx_port_init(struct prestera_port *port);
+
+#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
new file mode 100644
index 000000000000..0f20e0788cce
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdev_features.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_rxtx.h"
+#include "prestera_devlink.h"
+#include "prestera_ethtool.h"
+#include "prestera_switchdev.h"
+
+#define PRESTERA_MTU_DEFAULT 1536
+
+#define PRESTERA_STATS_DELAY_MS 1000
+
+#define PRESTERA_MAC_ADDR_NUM_MAX 255
+
+static struct workqueue_struct *prestera_wq;
+
+int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
+{
+ enum prestera_accept_frm_type frm_type;
+ int err;
+
+ frm_type = PRESTERA_ACCEPT_FRAME_TYPE_TAGGED;
+
+ if (vid) {
+ err = prestera_hw_vlan_port_vid_set(port, vid);
+ if (err)
+ return err;
+
+ frm_type = PRESTERA_ACCEPT_FRAME_TYPE_ALL;
+ }
+
+ err = prestera_hw_port_accept_frm_type(port, frm_type);
+ if (err && frm_type == PRESTERA_ACCEPT_FRAME_TYPE_ALL)
+ prestera_hw_vlan_port_vid_set(port, port->pvid);
+
+ port->pvid = vid;
+ return 0;
+}
+
+struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
+ u32 dev_id, u32 hw_id)
+{
+ struct prestera_port *port = NULL;
+
+ read_lock(&sw->port_list_lock);
+ list_for_each_entry(port, &sw->port_list, list) {
+ if (port->dev_id == dev_id && port->hw_id == hw_id)
+ break;
+ }
+ read_unlock(&sw->port_list_lock);
+
+ return port;
+}
+
+struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
+{
+ struct prestera_port *port = NULL;
+
+ read_lock(&sw->port_list_lock);
+ list_for_each_entry(port, &sw->port_list, list) {
+ if (port->id == id)
+ break;
+ }
+ read_unlock(&sw->port_list_lock);
+
+ return port;
+}
+
+static int prestera_port_open(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ err = prestera_hw_port_state_set(port, true);
+ if (err)
+ return err;
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int prestera_port_close(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ netif_stop_queue(dev);
+
+ err = prestera_hw_port_state_set(port, false);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ return prestera_rxtx_xmit(netdev_priv(dev), skb);
+}
+
+static int prestera_is_valid_mac_addr(struct prestera_port *port, u8 *addr)
+{
+ if (!is_valid_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+
+ /* firmware requires that port's MAC address contains first 5 bytes
+ * of the base MAC address
+ */
+ if (memcmp(port->sw->base_mac, addr, ETH_ALEN - 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prestera_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ int err;
+
+ err = prestera_is_valid_mac_addr(port, addr->sa_data);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_mac_set(port, addr->sa_data);
+ if (err)
+ return err;
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
+static int prestera_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ err = prestera_hw_port_mtu_set(port, mtu);
+ if (err)
+ return err;
+
+ dev->mtu = mtu;
+
+ return 0;
+}
+
+static void prestera_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_stats *port_stats = &port->cached_hw_stats.stats;
+
+ stats->rx_packets = port_stats->broadcast_frames_received +
+ port_stats->multicast_frames_received +
+ port_stats->unicast_frames_received;
+
+ stats->tx_packets = port_stats->broadcast_frames_sent +
+ port_stats->multicast_frames_sent +
+ port_stats->unicast_frames_sent;
+
+ stats->rx_bytes = port_stats->good_octets_received;
+
+ stats->tx_bytes = port_stats->good_octets_sent;
+
+ stats->rx_errors = port_stats->rx_error_frame_received;
+ stats->tx_errors = port_stats->mac_trans_error;
+
+ stats->rx_dropped = port_stats->buffer_overrun;
+ stats->tx_dropped = 0;
+
+ stats->multicast = port_stats->multicast_frames_received;
+ stats->collisions = port_stats->excessive_collision;
+
+ stats->rx_crc_errors = port_stats->bad_crc;
+}
+
+static void prestera_port_get_hw_stats(struct prestera_port *port)
+{
+ prestera_hw_port_stats_get(port, &port->cached_hw_stats.stats);
+}
+
+static void prestera_port_stats_update(struct work_struct *work)
+{
+ struct prestera_port *port =
+ container_of(work, struct prestera_port,
+ cached_hw_stats.caching_dw.work);
+
+ prestera_port_get_hw_stats(port);
+
+ queue_delayed_work(prestera_wq, &port->cached_hw_stats.caching_dw,
+ msecs_to_jiffies(PRESTERA_STATS_DELAY_MS));
+}
+
+static const struct net_device_ops prestera_netdev_ops = {
+ .ndo_open = prestera_port_open,
+ .ndo_stop = prestera_port_close,
+ .ndo_start_xmit = prestera_port_xmit,
+ .ndo_change_mtu = prestera_port_change_mtu,
+ .ndo_get_stats64 = prestera_port_get_stats64,
+ .ndo_set_mac_address = prestera_port_set_mac_address,
+ .ndo_get_devlink_port = prestera_devlink_get_port,
+};
+
+int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
+ u64 adver_link_modes, u8 adver_fec)
+{
+ bool refresh = false;
+ u64 link_modes;
+ int err;
+ u8 fec;
+
+ if (port->caps.type != PRESTERA_PORT_TYPE_TP)
+ return enable ? -EINVAL : 0;
+
+ if (!enable)
+ goto set_autoneg;
+
+ link_modes = port->caps.supp_link_modes & adver_link_modes;
+ fec = port->caps.supp_fec & adver_fec;
+
+ if (!link_modes && !fec)
+ return -EOPNOTSUPP;
+
+ if (link_modes && port->adver_link_modes != link_modes) {
+ port->adver_link_modes = link_modes;
+ refresh = true;
+ }
+
+ if (fec && port->adver_fec != fec) {
+ port->adver_fec = fec;
+ refresh = true;
+ }
+
+set_autoneg:
+ if (port->autoneg == enable && !refresh)
+ return 0;
+
+ err = prestera_hw_port_autoneg_set(port, enable, port->adver_link_modes,
+ port->adver_fec);
+ if (err)
+ return err;
+
+ port->autoneg = enable;
+
+ return 0;
+}
+
+static void prestera_port_list_add(struct prestera_port *port)
+{
+ write_lock(&port->sw->port_list_lock);
+ list_add(&port->list, &port->sw->port_list);
+ write_unlock(&port->sw->port_list_lock);
+}
+
+static void prestera_port_list_del(struct prestera_port *port)
+{
+ write_lock(&port->sw->port_list_lock);
+ list_del(&port->list);
+ write_unlock(&port->sw->port_list_lock);
+}
+
+static int prestera_port_create(struct prestera_switch *sw, u32 id)
+{
+ struct prestera_port *port;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(*port));
+ if (!dev)
+ return -ENOMEM;
+
+ port = netdev_priv(dev);
+
+ INIT_LIST_HEAD(&port->vlans_list);
+ port->pvid = PRESTERA_DEFAULT_VID;
+ port->dev = dev;
+ port->id = id;
+ port->sw = sw;
+
+ err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id,
+ &port->fp_id);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to get port(%u) info\n", id);
+ goto err_port_info_get;
+ }
+
+ err = prestera_devlink_port_register(port);
+ if (err)
+ goto err_dl_port_register;
+
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netdev_ops = &prestera_netdev_ops;
+ dev->ethtool_ops = &prestera_ethtool_ops;
+
+ netif_carrier_off(dev);
+
+ dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT);
+ dev->min_mtu = sw->mtu_min;
+ dev->max_mtu = sw->mtu_max;
+
+ err = prestera_hw_port_mtu_set(port, dev->mtu);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mtu(%d)\n",
+ id, dev->mtu);
+ goto err_port_init;
+ }
+
+ if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX)
+ goto err_port_init;
+
+ /* firmware requires that port's MAC address consist of the first
+ * 5 bytes of the base MAC address
+ */
+ memcpy(dev->dev_addr, sw->base_mac, dev->addr_len - 1);
+ dev->dev_addr[dev->addr_len - 1] = port->fp_id;
+
+ err = prestera_hw_port_mac_set(port, dev->dev_addr);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mac addr\n", id);
+ goto err_port_init;
+ }
+
+ err = prestera_hw_port_cap_get(port, &port->caps);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to get port(%u) caps\n", id);
+ goto err_port_init;
+ }
+
+ port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
+ prestera_port_autoneg_set(port, true, port->caps.supp_link_modes,
+ port->caps.supp_fec);
+
+ err = prestera_hw_port_state_set(port, false);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) down\n", id);
+ goto err_port_init;
+ }
+
+ err = prestera_rxtx_port_init(port);
+ if (err)
+ goto err_port_init;
+
+ INIT_DELAYED_WORK(&port->cached_hw_stats.caching_dw,
+ &prestera_port_stats_update);
+
+ prestera_port_list_add(port);
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_register_netdev;
+
+ prestera_devlink_port_set(port);
+
+ return 0;
+
+err_register_netdev:
+ prestera_port_list_del(port);
+err_port_init:
+ prestera_devlink_port_unregister(port);
+err_dl_port_register:
+err_port_info_get:
+ free_netdev(dev);
+ return err;
+}
+
+static void prestera_port_destroy(struct prestera_port *port)
+{
+ struct net_device *dev = port->dev;
+
+ cancel_delayed_work_sync(&port->cached_hw_stats.caching_dw);
+ prestera_devlink_port_clear(port);
+ unregister_netdev(dev);
+ prestera_port_list_del(port);
+ prestera_devlink_port_unregister(port);
+ free_netdev(dev);
+}
+
+static void prestera_destroy_ports(struct prestera_switch *sw)
+{
+ struct prestera_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ prestera_port_destroy(port);
+}
+
+static int prestera_create_ports(struct prestera_switch *sw)
+{
+ struct prestera_port *port, *tmp;
+ u32 port_idx;
+ int err;
+
+ for (port_idx = 0; port_idx < sw->port_count; port_idx++) {
+ err = prestera_port_create(sw, port_idx);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ prestera_port_destroy(port);
+
+ return err;
+}
+
+static void prestera_port_handle_event(struct prestera_switch *sw,
+ struct prestera_event *evt, void *arg)
+{
+ struct delayed_work *caching_dw;
+ struct prestera_port *port;
+
+ port = prestera_find_port(sw, evt->port_evt.port_id);
+ if (!port || !port->dev)
+ return;
+
+ caching_dw = &port->cached_hw_stats.caching_dw;
+
+ if (evt->id == PRESTERA_PORT_EVENT_STATE_CHANGED) {
+ if (evt->port_evt.data.oper_state) {
+ netif_carrier_on(port->dev);
+ if (!delayed_work_pending(caching_dw))
+ queue_delayed_work(prestera_wq, caching_dw, 0);
+ } else {
+ netif_carrier_off(port->dev);
+ if (delayed_work_pending(caching_dw))
+ cancel_delayed_work(caching_dw);
+ }
+ }
+}
+
+static int prestera_event_handlers_register(struct prestera_switch *sw)
+{
+ return prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_PORT,
+ prestera_port_handle_event,
+ NULL);
+}
+
+static void prestera_event_handlers_unregister(struct prestera_switch *sw)
+{
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_PORT,
+ prestera_port_handle_event);
+}
+
+static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
+{
+ struct device_node *base_mac_np;
+ struct device_node *np;
+ const char *base_mac;
+
+ np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
+ base_mac_np = of_parse_phandle(np, "base-mac-provider", 0);
+
+ base_mac = of_get_mac_address(base_mac_np);
+ of_node_put(base_mac_np);
+ if (!IS_ERR(base_mac))
+ ether_addr_copy(sw->base_mac, base_mac);
+
+ if (!is_valid_ether_addr(sw->base_mac)) {
+ eth_random_addr(sw->base_mac);
+ dev_info(prestera_dev(sw), "using random base mac address\n");
+ }
+
+ return prestera_hw_switch_mac_set(sw, sw->base_mac);
+}
+
+bool prestera_netdev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &prestera_netdev_ops;
+}
+
+static int prestera_lower_dev_walk(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct prestera_port **pport = (struct prestera_port **)priv->data;
+
+ if (prestera_netdev_check(dev)) {
+ *pport = netdev_priv(dev);
+ return 1;
+ }
+
+ return 0;
+}
+
+struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev)
+{
+ struct prestera_port *port = NULL;
+ struct netdev_nested_priv priv = {
+ .data = (void *)&port,
+ };
+
+ if (prestera_netdev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_walk_all_lower_dev(dev, prestera_lower_dev_walk, &priv);
+
+ return port;
+}
+
+static int prestera_netdev_port_event(struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ case NETDEV_CHANGEUPPER:
+ return prestera_bridge_port_event(dev, event, ptr);
+ default:
+ return 0;
+ }
+}
+
+static int prestera_netdev_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int err = 0;
+
+ if (prestera_netdev_check(dev))
+ err = prestera_netdev_port_event(dev, event, ptr);
+
+ return notifier_from_errno(err);
+}
+
+static int prestera_netdev_event_handler_register(struct prestera_switch *sw)
+{
+ sw->netdev_nb.notifier_call = prestera_netdev_event_handler;
+
+ return register_netdevice_notifier(&sw->netdev_nb);
+}
+
+static void prestera_netdev_event_handler_unregister(struct prestera_switch *sw)
+{
+ unregister_netdevice_notifier(&sw->netdev_nb);
+}
+
+static int prestera_switch_init(struct prestera_switch *sw)
+{
+ int err;
+
+ err = prestera_hw_switch_init(sw);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to init Switch device\n");
+ return err;
+ }
+
+ rwlock_init(&sw->port_list_lock);
+ INIT_LIST_HEAD(&sw->port_list);
+
+ err = prestera_switch_set_base_mac_addr(sw);
+ if (err)
+ return err;
+
+ err = prestera_netdev_event_handler_register(sw);
+ if (err)
+ return err;
+
+ err = prestera_switchdev_init(sw);
+ if (err)
+ goto err_swdev_register;
+
+ err = prestera_rxtx_switch_init(sw);
+ if (err)
+ goto err_rxtx_register;
+
+ err = prestera_event_handlers_register(sw);
+ if (err)
+ goto err_handlers_register;
+
+ err = prestera_devlink_register(sw);
+ if (err)
+ goto err_dl_register;
+
+ err = prestera_create_ports(sw);
+ if (err)
+ goto err_ports_create;
+
+ return 0;
+
+err_ports_create:
+ prestera_devlink_unregister(sw);
+err_dl_register:
+ prestera_event_handlers_unregister(sw);
+err_handlers_register:
+ prestera_rxtx_switch_fini(sw);
+err_rxtx_register:
+ prestera_switchdev_fini(sw);
+err_swdev_register:
+ prestera_netdev_event_handler_unregister(sw);
+ prestera_hw_switch_fini(sw);
+
+ return err;
+}
+
+static void prestera_switch_fini(struct prestera_switch *sw)
+{
+ prestera_destroy_ports(sw);
+ prestera_devlink_unregister(sw);
+ prestera_event_handlers_unregister(sw);
+ prestera_rxtx_switch_fini(sw);
+ prestera_switchdev_fini(sw);
+ prestera_netdev_event_handler_unregister(sw);
+ prestera_hw_switch_fini(sw);
+}
+
+int prestera_device_register(struct prestera_device *dev)
+{
+ struct prestera_switch *sw;
+ int err;
+
+ sw = prestera_devlink_alloc();
+ if (!sw)
+ return -ENOMEM;
+
+ dev->priv = sw;
+ sw->dev = dev;
+
+ err = prestera_switch_init(sw);
+ if (err) {
+ prestera_devlink_free(sw);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(prestera_device_register);
+
+void prestera_device_unregister(struct prestera_device *dev)
+{
+ struct prestera_switch *sw = dev->priv;
+
+ prestera_switch_fini(sw);
+ prestera_devlink_free(sw);
+}
+EXPORT_SYMBOL(prestera_device_unregister);
+
+static int __init prestera_module_init(void)
+{
+ prestera_wq = alloc_workqueue("prestera", 0, 0);
+ if (!prestera_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit prestera_module_exit(void)
+{
+ destroy_workqueue(prestera_wq);
+}
+
+module_init(prestera_module_init);
+module_exit(prestera_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Marvell Prestera switch driver");
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
new file mode 100644
index 000000000000..1b97adae542e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "prestera.h"
+
+#define PRESTERA_MSG_MAX_SIZE 1500
+
+#define PRESTERA_SUPP_FW_MAJ_VER 2
+#define PRESTERA_SUPP_FW_MIN_VER 0
+
+#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img"
+
+#define PRESTERA_FW_HDR_MAGIC 0x351D9D06
+#define PRESTERA_FW_DL_TIMEOUT_MS 50000
+#define PRESTERA_FW_BLK_SZ 1024
+
+#define PRESTERA_FW_VER_MAJ_MUL 1000000
+#define PRESTERA_FW_VER_MIN_MUL 1000
+
+#define PRESTERA_FW_VER_MAJ(v) ((v) / PRESTERA_FW_VER_MAJ_MUL)
+
+#define PRESTERA_FW_VER_MIN(v) \
+ (((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL)) / \
+ PRESTERA_FW_VER_MIN_MUL)
+
+#define PRESTERA_FW_VER_PATCH(v) \
+ ((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL) - \
+ (PRESTERA_FW_VER_MIN(v) * PRESTERA_FW_VER_MIN_MUL))
+
+enum prestera_pci_bar_t {
+ PRESTERA_PCI_BAR_FW = 2,
+ PRESTERA_PCI_BAR_PP = 4,
+};
+
+struct prestera_fw_header {
+ __be32 magic_number;
+ __be32 version_value;
+ u8 reserved[8];
+};
+
+struct prestera_ldr_regs {
+ u32 ldr_ready;
+ u32 pad1;
+
+ u32 ldr_img_size;
+ u32 ldr_ctl_flags;
+
+ u32 ldr_buf_offs;
+ u32 ldr_buf_size;
+
+ u32 ldr_buf_rd;
+ u32 pad2;
+ u32 ldr_buf_wr;
+
+ u32 ldr_status;
+};
+
+#define PRESTERA_LDR_REG_OFFSET(f) offsetof(struct prestera_ldr_regs, f)
+
+#define PRESTERA_LDR_READY_MAGIC 0xf00dfeed
+
+#define PRESTERA_LDR_STATUS_IMG_DL BIT(0)
+#define PRESTERA_LDR_STATUS_START_FW BIT(1)
+#define PRESTERA_LDR_STATUS_INVALID_IMG BIT(2)
+#define PRESTERA_LDR_STATUS_NOMEM BIT(3)
+
+#define PRESTERA_LDR_REG_BASE(fw) ((fw)->ldr_regs)
+#define PRESTERA_LDR_REG_ADDR(fw, reg) (PRESTERA_LDR_REG_BASE(fw) + (reg))
+
+/* fw loader registers */
+#define PRESTERA_LDR_READY_REG PRESTERA_LDR_REG_OFFSET(ldr_ready)
+#define PRESTERA_LDR_IMG_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_img_size)
+#define PRESTERA_LDR_CTL_REG PRESTERA_LDR_REG_OFFSET(ldr_ctl_flags)
+#define PRESTERA_LDR_BUF_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_size)
+#define PRESTERA_LDR_BUF_OFFS_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_offs)
+#define PRESTERA_LDR_BUF_RD_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_rd)
+#define PRESTERA_LDR_BUF_WR_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_wr)
+#define PRESTERA_LDR_STATUS_REG PRESTERA_LDR_REG_OFFSET(ldr_status)
+
+#define PRESTERA_LDR_CTL_DL_START BIT(0)
+
+#define PRESTERA_EVT_QNUM_MAX 4
+
+struct prestera_fw_evtq_regs {
+ u32 rd_idx;
+ u32 pad1;
+ u32 wr_idx;
+ u32 pad2;
+ u32 offs;
+ u32 len;
+};
+
+struct prestera_fw_regs {
+ u32 fw_ready;
+ u32 pad;
+ u32 cmd_offs;
+ u32 cmd_len;
+ u32 evt_offs;
+ u32 evt_qnum;
+
+ u32 cmd_req_ctl;
+ u32 cmd_req_len;
+ u32 cmd_rcv_ctl;
+ u32 cmd_rcv_len;
+
+ u32 fw_status;
+ u32 rx_status;
+
+ struct prestera_fw_evtq_regs evtq_list[PRESTERA_EVT_QNUM_MAX];
+};
+
+#define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f)
+
+#define PRESTERA_FW_READY_MAGIC 0xcafebabe
+
+/* fw registers */
+#define PRESTERA_FW_READY_REG PRESTERA_FW_REG_OFFSET(fw_ready)
+
+#define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs)
+#define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len)
+#define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs)
+#define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum)
+
+#define PRESTERA_CMD_REQ_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_req_ctl)
+#define PRESTERA_CMD_REQ_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_req_len)
+
+#define PRESTERA_CMD_RCV_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_ctl)
+#define PRESTERA_CMD_RCV_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_len)
+#define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status)
+#define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status)
+
+/* PRESTERA_CMD_REQ_CTL_REG flags */
+#define PRESTERA_CMD_F_REQ_SENT BIT(0)
+#define PRESTERA_CMD_F_REPL_RCVD BIT(1)
+
+/* PRESTERA_CMD_RCV_CTL_REG flags */
+#define PRESTERA_CMD_F_REPL_SENT BIT(0)
+
+#define PRESTERA_EVTQ_REG_OFFSET(q, f) \
+ (PRESTERA_FW_REG_OFFSET(evtq_list) + \
+ (q) * sizeof(struct prestera_fw_evtq_regs) + \
+ offsetof(struct prestera_fw_evtq_regs, f))
+
+#define PRESTERA_EVTQ_RD_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, rd_idx)
+#define PRESTERA_EVTQ_WR_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, wr_idx)
+#define PRESTERA_EVTQ_OFFS_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, offs)
+#define PRESTERA_EVTQ_LEN_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, len)
+
+#define PRESTERA_FW_REG_BASE(fw) ((fw)->dev.ctl_regs)
+#define PRESTERA_FW_REG_ADDR(fw, reg) PRESTERA_FW_REG_BASE((fw)) + (reg)
+
+#define PRESTERA_FW_CMD_DEFAULT_WAIT_MS 30000
+#define PRESTERA_FW_READY_WAIT_MS 20000
+
+struct prestera_fw_evtq {
+ u8 __iomem *addr;
+ size_t len;
+};
+
+struct prestera_fw {
+ struct workqueue_struct *wq;
+ struct prestera_device dev;
+ u8 __iomem *ldr_regs;
+ u8 __iomem *ldr_ring_buf;
+ u32 ldr_buf_len;
+ u32 ldr_wr_idx;
+ struct mutex cmd_mtx; /* serialize access to dev->send_req */
+ size_t cmd_mbox_len;
+ u8 __iomem *cmd_mbox;
+ struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX];
+ u8 evt_qnum;
+ struct work_struct evt_work;
+ u8 __iomem *evt_buf;
+ u8 *evt_msg;
+};
+
+static int prestera_fw_load(struct prestera_fw *fw);
+
+static void prestera_fw_write(struct prestera_fw *fw, u32 reg, u32 val)
+{
+ writel(val, PRESTERA_FW_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_fw_read(struct prestera_fw *fw, u32 reg)
+{
+ return readl(PRESTERA_FW_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid)
+{
+ return fw->evt_queue[qid].len;
+}
+
+static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid)
+{
+ u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid));
+ u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+
+ return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid));
+}
+
+static void prestera_fw_evtq_rd_set(struct prestera_fw *fw,
+ u8 qid, u32 idx)
+{
+ u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1);
+
+ prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx);
+}
+
+static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid)
+{
+ return fw->evt_queue[qid].addr;
+}
+
+static u32 prestera_fw_evtq_read32(struct prestera_fw *fw, u8 qid)
+{
+ u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+ u32 val;
+
+ val = readl(prestera_fw_evtq_buf(fw, qid) + rd_idx);
+ prestera_fw_evtq_rd_set(fw, qid, rd_idx + 4);
+ return val;
+}
+
+static ssize_t prestera_fw_evtq_read_buf(struct prestera_fw *fw,
+ u8 qid, void *buf, size_t len)
+{
+ u32 idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+ u8 __iomem *evtq_addr = prestera_fw_evtq_buf(fw, qid);
+ u32 *buf32 = buf;
+ int i;
+
+ for (i = 0; i < len / 4; buf32++, i++) {
+ *buf32 = readl_relaxed(evtq_addr + idx);
+ idx = (idx + 4) & (prestera_fw_evtq_len(fw, qid) - 1);
+ }
+
+ prestera_fw_evtq_rd_set(fw, qid, idx);
+
+ return i;
+}
+
+static u8 prestera_fw_evtq_pick(struct prestera_fw *fw)
+{
+ int qid;
+
+ for (qid = 0; qid < fw->evt_qnum; qid++) {
+ if (prestera_fw_evtq_avail(fw, qid) >= 4)
+ return qid;
+ }
+
+ return PRESTERA_EVT_QNUM_MAX;
+}
+
+static void prestera_fw_evt_work_fn(struct work_struct *work)
+{
+ struct prestera_fw *fw;
+ void *msg;
+ u8 qid;
+
+ fw = container_of(work, struct prestera_fw, evt_work);
+ msg = fw->evt_msg;
+
+ while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) {
+ u32 idx;
+ u32 len;
+
+ len = prestera_fw_evtq_read32(fw, qid);
+ idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+
+ WARN_ON(prestera_fw_evtq_avail(fw, qid) < len);
+
+ if (WARN_ON(len > PRESTERA_MSG_MAX_SIZE)) {
+ prestera_fw_evtq_rd_set(fw, qid, idx + len);
+ continue;
+ }
+
+ prestera_fw_evtq_read_buf(fw, qid, msg, len);
+
+ if (fw->dev.recv_msg)
+ fw->dev.recv_msg(&fw->dev, msg, len);
+ }
+}
+
+static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp,
+ unsigned int waitms)
+{
+ u8 __iomem *addr = PRESTERA_FW_REG_ADDR(fw, reg);
+ u32 val;
+
+ return readl_poll_timeout(addr, val, cmp == val,
+ 1 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
+}
+
+static int prestera_fw_cmd_send(struct prestera_fw *fw,
+ void *in_msg, size_t in_size,
+ void *out_msg, size_t out_size,
+ unsigned int waitms)
+{
+ u32 ret_size;
+ int err;
+
+ if (!waitms)
+ waitms = PRESTERA_FW_CMD_DEFAULT_WAIT_MS;
+
+ if (ALIGN(in_size, 4) > fw->cmd_mbox_len)
+ return -EMSGSIZE;
+
+ /* wait for finish previous reply from FW */
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG, 0, 30);
+ if (err) {
+ dev_err(fw->dev.dev, "finish reply from FW is timed out\n");
+ return err;
+ }
+
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_LEN_REG, in_size);
+ memcpy_toio(fw->cmd_mbox, in_msg, in_size);
+
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REQ_SENT);
+
+ /* wait for reply from FW */
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG,
+ PRESTERA_CMD_F_REPL_SENT, waitms);
+ if (err) {
+ dev_err(fw->dev.dev, "reply from FW is timed out\n");
+ goto cmd_exit;
+ }
+
+ ret_size = prestera_fw_read(fw, PRESTERA_CMD_RCV_LEN_REG);
+ if (ret_size > out_size) {
+ dev_err(fw->dev.dev, "ret_size (%u) > out_len(%zu)\n",
+ ret_size, out_size);
+ err = -EMSGSIZE;
+ goto cmd_exit;
+ }
+
+ memcpy_fromio(out_msg, fw->cmd_mbox + in_size, ret_size);
+
+cmd_exit:
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REPL_RCVD);
+ return err;
+}
+
+static int prestera_fw_send_req(struct prestera_device *dev,
+ void *in_msg, size_t in_size, void *out_msg,
+ size_t out_size, unsigned int waitms)
+{
+ struct prestera_fw *fw;
+ ssize_t ret;
+
+ fw = container_of(dev, struct prestera_fw, dev);
+
+ mutex_lock(&fw->cmd_mtx);
+ ret = prestera_fw_cmd_send(fw, in_msg, in_size, out_msg, out_size, waitms);
+ mutex_unlock(&fw->cmd_mtx);
+
+ return ret;
+}
+
+static int prestera_fw_init(struct prestera_fw *fw)
+{
+ u8 __iomem *base;
+ int err;
+ u8 qid;
+
+ fw->dev.send_req = prestera_fw_send_req;
+ fw->ldr_regs = fw->dev.ctl_regs;
+
+ err = prestera_fw_load(fw);
+ if (err)
+ return err;
+
+ err = prestera_fw_wait_reg32(fw, PRESTERA_FW_READY_REG,
+ PRESTERA_FW_READY_MAGIC,
+ PRESTERA_FW_READY_WAIT_MS);
+ if (err) {
+ dev_err(fw->dev.dev, "FW failed to start\n");
+ return err;
+ }
+
+ base = fw->dev.ctl_regs;
+
+ fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG);
+ fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG);
+ mutex_init(&fw->cmd_mtx);
+
+ fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG);
+ fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG);
+ fw->evt_msg = kmalloc(PRESTERA_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!fw->evt_msg)
+ return -ENOMEM;
+
+ for (qid = 0; qid < fw->evt_qnum; qid++) {
+ u32 offs = prestera_fw_read(fw, PRESTERA_EVTQ_OFFS_REG(qid));
+ struct prestera_fw_evtq *evtq = &fw->evt_queue[qid];
+
+ evtq->len = prestera_fw_read(fw, PRESTERA_EVTQ_LEN_REG(qid));
+ evtq->addr = fw->evt_buf + offs;
+ }
+
+ return 0;
+}
+
+static void prestera_fw_uninit(struct prestera_fw *fw)
+{
+ kfree(fw->evt_msg);
+}
+
+static irqreturn_t prestera_pci_irq_handler(int irq, void *dev_id)
+{
+ struct prestera_fw *fw = dev_id;
+
+ if (prestera_fw_read(fw, PRESTERA_RX_STATUS_REG)) {
+ prestera_fw_write(fw, PRESTERA_RX_STATUS_REG, 0);
+
+ if (fw->dev.recv_pkt)
+ fw->dev.recv_pkt(&fw->dev);
+ }
+
+ queue_work(fw->wq, &fw->evt_work);
+
+ return IRQ_HANDLED;
+}
+
+static void prestera_ldr_write(struct prestera_fw *fw, u32 reg, u32 val)
+{
+ writel(val, PRESTERA_LDR_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_ldr_read(struct prestera_fw *fw, u32 reg)
+{
+ return readl(PRESTERA_LDR_REG_ADDR(fw, reg));
+}
+
+static int prestera_ldr_wait_reg32(struct prestera_fw *fw,
+ u32 reg, u32 cmp, unsigned int waitms)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, reg);
+ u32 val;
+
+ return readl_poll_timeout(addr, val, cmp == val,
+ 10 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
+}
+
+static u32 prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_BUF_RD_REG);
+ u32 buf_len = fw->ldr_buf_len;
+ u32 wr_idx = fw->ldr_wr_idx;
+ u32 rd_idx;
+
+ return readl_poll_timeout(addr, rd_idx,
+ CIRC_SPACE(wr_idx, rd_idx, buf_len) >= len,
+ 1 * USEC_PER_MSEC, 100 * USEC_PER_MSEC);
+}
+
+static int prestera_ldr_wait_dl_finish(struct prestera_fw *fw)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_STATUS_REG);
+ unsigned long mask = ~(PRESTERA_LDR_STATUS_IMG_DL);
+ u32 val;
+ int err;
+
+ err = readl_poll_timeout(addr, val, val & mask, 10 * USEC_PER_MSEC,
+ PRESTERA_FW_DL_TIMEOUT_MS * USEC_PER_MSEC);
+ if (err) {
+ dev_err(fw->dev.dev, "Timeout to load FW img [state=%d]",
+ prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG));
+ return err;
+ }
+
+ return 0;
+}
+
+static void prestera_ldr_wr_idx_move(struct prestera_fw *fw, unsigned int n)
+{
+ fw->ldr_wr_idx = (fw->ldr_wr_idx + (n)) & (fw->ldr_buf_len - 1);
+}
+
+static void prestera_ldr_wr_idx_commit(struct prestera_fw *fw)
+{
+ prestera_ldr_write(fw, PRESTERA_LDR_BUF_WR_REG, fw->ldr_wr_idx);
+}
+
+static u8 __iomem *prestera_ldr_wr_ptr(struct prestera_fw *fw)
+{
+ return fw->ldr_ring_buf + fw->ldr_wr_idx;
+}
+
+static int prestera_ldr_send(struct prestera_fw *fw, const u8 *buf, size_t len)
+{
+ int err;
+ int i;
+
+ err = prestera_ldr_wait_buf(fw, len);
+ if (err) {
+ dev_err(fw->dev.dev, "failed wait for sending firmware\n");
+ return err;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ writel_relaxed(*(u32 *)(buf + i), prestera_ldr_wr_ptr(fw));
+ prestera_ldr_wr_idx_move(fw, 4);
+ }
+
+ prestera_ldr_wr_idx_commit(fw);
+ return 0;
+}
+
+static int prestera_ldr_fw_send(struct prestera_fw *fw,
+ const char *img, u32 fw_size)
+{
+ u32 status;
+ u32 pos;
+ int err;
+
+ err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_STATUS_REG,
+ PRESTERA_LDR_STATUS_IMG_DL,
+ 5 * MSEC_PER_SEC);
+ if (err) {
+ dev_err(fw->dev.dev, "Loader is not ready to load image\n");
+ return err;
+ }
+
+ for (pos = 0; pos < fw_size; pos += PRESTERA_FW_BLK_SZ) {
+ if (pos + PRESTERA_FW_BLK_SZ > fw_size)
+ break;
+
+ err = prestera_ldr_send(fw, img + pos, PRESTERA_FW_BLK_SZ);
+ if (err)
+ return err;
+ }
+
+ if (pos < fw_size) {
+ err = prestera_ldr_send(fw, img + pos, fw_size - pos);
+ if (err)
+ return err;
+ }
+
+ err = prestera_ldr_wait_dl_finish(fw);
+ if (err)
+ return err;
+
+ status = prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG);
+
+ switch (status) {
+ case PRESTERA_LDR_STATUS_INVALID_IMG:
+ dev_err(fw->dev.dev, "FW img has bad CRC\n");
+ return -EINVAL;
+ case PRESTERA_LDR_STATUS_NOMEM:
+ dev_err(fw->dev.dev, "Loader has no enough mem\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void prestera_fw_rev_parse(const struct prestera_fw_header *hdr,
+ struct prestera_fw_rev *rev)
+{
+ u32 version = be32_to_cpu(hdr->version_value);
+
+ rev->maj = PRESTERA_FW_VER_MAJ(version);
+ rev->min = PRESTERA_FW_VER_MIN(version);
+ rev->sub = PRESTERA_FW_VER_PATCH(version);
+}
+
+static int prestera_fw_rev_check(struct prestera_fw *fw)
+{
+ struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+ u16 maj_supp = PRESTERA_SUPP_FW_MAJ_VER;
+ u16 min_supp = PRESTERA_SUPP_FW_MIN_VER;
+
+ if (rev->maj == maj_supp && rev->min >= min_supp)
+ return 0;
+
+ dev_err(fw->dev.dev, "Driver supports FW version only '%u.%u.x'",
+ PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
+
+ return -EINVAL;
+}
+
+static int prestera_fw_hdr_parse(struct prestera_fw *fw,
+ const struct firmware *img)
+{
+ struct prestera_fw_header *hdr = (struct prestera_fw_header *)img->data;
+ struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+ u32 magic;
+
+ magic = be32_to_cpu(hdr->magic_number);
+ if (magic != PRESTERA_FW_HDR_MAGIC) {
+ dev_err(fw->dev.dev, "FW img hdr magic is invalid");
+ return -EINVAL;
+ }
+
+ prestera_fw_rev_parse(hdr, rev);
+
+ dev_info(fw->dev.dev, "FW version '%u.%u.%u'\n",
+ rev->maj, rev->min, rev->sub);
+
+ return prestera_fw_rev_check(fw);
+}
+
+static int prestera_fw_load(struct prestera_fw *fw)
+{
+ size_t hlen = sizeof(struct prestera_fw_header);
+ const struct firmware *f;
+ char fw_path[128];
+ int err;
+
+ err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_READY_REG,
+ PRESTERA_LDR_READY_MAGIC,
+ 5 * MSEC_PER_SEC);
+ if (err) {
+ dev_err(fw->dev.dev, "waiting for FW loader is timed out");
+ return err;
+ }
+
+ fw->ldr_ring_buf = fw->ldr_regs +
+ prestera_ldr_read(fw, PRESTERA_LDR_BUF_OFFS_REG);
+
+ fw->ldr_buf_len =
+ prestera_ldr_read(fw, PRESTERA_LDR_BUF_SIZE_REG);
+
+ fw->ldr_wr_idx = 0;
+
+ snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT,
+ PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
+
+ err = request_firmware_direct(&f, fw_path, fw->dev.dev);
+ if (err) {
+ dev_err(fw->dev.dev, "failed to request firmware file\n");
+ return err;
+ }
+
+ err = prestera_fw_hdr_parse(fw, f);
+ if (err) {
+ dev_err(fw->dev.dev, "FW image header is invalid\n");
+ goto out_release;
+ }
+
+ prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, f->size - hlen);
+ prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START);
+
+ dev_info(fw->dev.dev, "Loading %s ...", fw_path);
+
+ err = prestera_ldr_fw_send(fw, f->data + hlen, f->size - hlen);
+
+out_release:
+ release_firmware(f);
+ return err;
+}
+
+static int prestera_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ const char *driver_name = pdev->driver->name;
+ struct prestera_fw *fw;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, BIT(PRESTERA_PCI_BAR_FW) |
+ BIT(PRESTERA_PCI_BAR_PP),
+ pci_name(pdev));
+ if (err)
+ return err;
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30))) {
+ dev_err(&pdev->dev, "fail to set DMA mask\n");
+ goto err_dma_mask;
+ }
+
+ pci_set_master(pdev);
+
+ fw = devm_kzalloc(&pdev->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw) {
+ err = -ENOMEM;
+ goto err_pci_dev_alloc;
+ }
+
+ fw->dev.ctl_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_FW];
+ fw->dev.pp_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_PP];
+ fw->dev.dev = &pdev->dev;
+
+ pci_set_drvdata(pdev, fw);
+
+ err = prestera_fw_init(fw);
+ if (err)
+ goto err_prestera_fw_init;
+
+ dev_info(fw->dev.dev, "Prestera FW is ready\n");
+
+ fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
+ if (!fw->wq)
+ goto err_wq_alloc;
+
+ INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn);
+
+ err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (err < 0) {
+ dev_err(&pdev->dev, "MSI IRQ init failed\n");
+ goto err_irq_alloc;
+ }
+
+ err = request_irq(pci_irq_vector(pdev, 0), prestera_pci_irq_handler,
+ 0, driver_name, fw);
+ if (err) {
+ dev_err(&pdev->dev, "fail to request IRQ\n");
+ goto err_request_irq;
+ }
+
+ err = prestera_device_register(&fw->dev);
+ if (err)
+ goto err_prestera_dev_register;
+
+ return 0;
+
+err_prestera_dev_register:
+ free_irq(pci_irq_vector(pdev, 0), fw);
+err_request_irq:
+ pci_free_irq_vectors(pdev);
+err_irq_alloc:
+ destroy_workqueue(fw->wq);
+err_wq_alloc:
+ prestera_fw_uninit(fw);
+err_prestera_fw_init:
+err_pci_dev_alloc:
+err_dma_mask:
+ return err;
+}
+
+static void prestera_pci_remove(struct pci_dev *pdev)
+{
+ struct prestera_fw *fw = pci_get_drvdata(pdev);
+
+ prestera_device_unregister(&fw->dev);
+ free_irq(pci_irq_vector(pdev, 0), fw);
+ pci_free_irq_vectors(pdev);
+ destroy_workqueue(fw->wq);
+ prestera_fw_uninit(fw);
+}
+
+static const struct pci_device_id prestera_pci_devices[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
+
+static struct pci_driver prestera_pci_driver = {
+ .name = "Prestera DX",
+ .id_table = prestera_pci_devices,
+ .probe = prestera_pci_probe,
+ .remove = prestera_pci_remove,
+};
+module_pci_driver(prestera_pci_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Marvell Prestera switch PCI interface");
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
new file mode 100644
index 000000000000..2a13c318048c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/bitfield.h>
+#include <linux/dmapool.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "prestera_dsa.h"
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_rxtx.h"
+
+#define PRESTERA_SDMA_WAIT_MUL 10
+
+struct prestera_sdma_desc {
+ __le32 word1;
+ __le32 word2;
+ __le32 buff;
+ __le32 next;
+} __packed __aligned(16);
+
+#define PRESTERA_SDMA_BUFF_SIZE_MAX 1544
+
+#define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
+ ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
+
+#define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
+ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
+
+#define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
+ (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
+
+#define PRESTERA_SDMA_RX_DESC_CPU_OWN 0
+#define PRESTERA_SDMA_RX_DESC_DMA_OWN 1
+
+#define PRESTERA_SDMA_RX_QUEUE_NUM 8
+
+#define PRESTERA_SDMA_RX_DESC_PER_Q 1000
+
+#define PRESTERA_SDMA_TX_DESC_PER_Q 1000
+#define PRESTERA_SDMA_TX_MAX_BURST 64
+
+#define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
+ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
+
+#define PRESTERA_SDMA_TX_DESC_CPU_OWN 0
+#define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U
+
+#define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
+ (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
+
+#define PRESTERA_SDMA_TX_DESC_LAST BIT(20)
+#define PRESTERA_SDMA_TX_DESC_FIRST BIT(21)
+#define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12)
+
+#define PRESTERA_SDMA_TX_DESC_SINGLE \
+ (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
+
+#define PRESTERA_SDMA_TX_DESC_INIT \
+ (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
+
+#define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814
+#define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680
+#define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16)
+
+#define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0
+#define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868
+
+struct prestera_sdma_buf {
+ struct prestera_sdma_desc *desc;
+ dma_addr_t desc_dma;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ bool is_used;
+};
+
+struct prestera_rx_ring {
+ struct prestera_sdma_buf *bufs;
+ int next_rx;
+};
+
+struct prestera_tx_ring {
+ struct prestera_sdma_buf *bufs;
+ int next_tx;
+ int max_burst;
+ int burst;
+};
+
+struct prestera_sdma {
+ struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
+ struct prestera_tx_ring tx_ring;
+ struct prestera_switch *sw;
+ struct dma_pool *desc_pool;
+ struct work_struct tx_work;
+ struct napi_struct rx_napi;
+ struct net_device napi_dev;
+ u32 map_addr;
+ u64 dma_mask;
+ /* protect SDMA with concurrrent access from multiple CPUs */
+ spinlock_t tx_lock;
+};
+
+struct prestera_rxtx {
+ struct prestera_sdma sdma;
+};
+
+static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct prestera_sdma_desc *desc;
+ dma_addr_t dma;
+
+ desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
+ if (!desc)
+ return -ENOMEM;
+
+ buf->buf_dma = DMA_MAPPING_ERROR;
+ buf->desc_dma = dma;
+ buf->desc = desc;
+ buf->skb = NULL;
+
+ return 0;
+}
+
+static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
+{
+ return sdma->map_addr + pa;
+}
+
+static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t buf)
+{
+ u32 word = le32_to_cpu(desc->word2);
+
+ u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
+ desc->word2 = cpu_to_le32(word);
+
+ desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
+
+ /* make sure buffer is set before reset the descriptor */
+ wmb();
+
+ desc->word1 = cpu_to_le32(0xA0000000);
+}
+
+static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t next)
+{
+ desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
+}
+
+static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct device *dev = sdma->sw->dev->dev;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+
+ skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto err_dma_map;
+
+ if (buf->skb)
+ dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
+ DMA_FROM_DEVICE);
+
+ buf->buf_dma = dma;
+ buf->skb = skb;
+
+ return 0;
+
+err_dma_map:
+ kfree_skb(skb);
+
+ return -ENOMEM;
+}
+
+static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ dma_addr_t buf_dma = buf->buf_dma;
+ struct sk_buff *skb = buf->skb;
+ u32 len = skb->len;
+ int err;
+
+ err = prestera_sdma_rx_skb_alloc(sdma, buf);
+ if (err) {
+ buf->buf_dma = buf_dma;
+ buf->skb = skb;
+
+ skb = alloc_skb(skb->len, GFP_ATOMIC);
+ if (skb) {
+ skb_put(skb, len);
+ skb_copy_from_linear_data(buf->skb, skb->data, len);
+ }
+ }
+
+ prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
+
+ return skb;
+}
+
+static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
+ struct sk_buff *skb)
+{
+ const struct prestera_port *port;
+ struct prestera_dsa dsa;
+ u32 hw_port, dev_id;
+ int err;
+
+ skb_pull(skb, ETH_HLEN);
+
+ /* ethertype field is part of the dsa header */
+ err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
+ if (err)
+ return err;
+
+ dev_id = dsa.hw_dev_num;
+ hw_port = dsa.port_num;
+
+ port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
+ if (unlikely(!port)) {
+ dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
+ dev_id, hw_port);
+ return -ENOENT;
+ }
+
+ if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
+ return -EINVAL;
+
+ /* remove DSA tag and update checksum */
+ skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
+
+ memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
+ ETH_ALEN * 2);
+
+ skb_push(skb, ETH_HLEN);
+
+ skb->protocol = eth_type_trans(skb, port->dev);
+
+ if (dsa.vlan.is_tagged) {
+ u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
+
+ tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
+ if (dsa.vlan.cfi_bit)
+ tci |= VLAN_CFI_MASK;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
+ }
+
+ return 0;
+}
+
+static int prestera_sdma_next_rx_buf_idx(int buf_idx)
+{
+ return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
+}
+
+static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
+{
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ unsigned int rxq_done_map = 0;
+ struct prestera_sdma *sdma;
+ struct list_head rx_list;
+ unsigned int qmask;
+ int pkts_done = 0;
+ int q;
+
+ qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ qmask = GENMASK(qnum - 1, 0);
+
+ INIT_LIST_HEAD(&rx_list);
+
+ sdma = container_of(napi, struct prestera_sdma, rx_napi);
+
+ while (pkts_done < budget && rxq_done_map != qmask) {
+ for (q = 0; q < qnum && pkts_done < budget; q++) {
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+ struct prestera_sdma_desc *desc;
+ struct prestera_sdma_buf *buf;
+ int buf_idx = ring->next_rx;
+ struct sk_buff *skb;
+
+ buf = &ring->bufs[buf_idx];
+ desc = buf->desc;
+
+ if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
+ rxq_done_map &= ~BIT(q);
+ } else {
+ rxq_done_map |= BIT(q);
+ continue;
+ }
+
+ pkts_done++;
+
+ __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
+
+ skb = prestera_sdma_rx_skb_get(sdma, buf);
+ if (!skb)
+ goto rx_next_buf;
+
+ if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
+ goto rx_next_buf;
+
+ list_add_tail(&skb->list, &rx_list);
+rx_next_buf:
+ ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
+ }
+ }
+
+ if (pkts_done < budget && napi_complete_done(napi, pkts_done))
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
+ GENMASK(9, 2));
+
+ netif_receive_skb_list(&rx_list);
+
+ return pkts_done;
+}
+
+static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
+{
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ int q, b;
+
+ /* disable all rx queues */
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(15, 8));
+
+ for (q = 0; q < qnum; q++) {
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+
+ if (!ring->bufs)
+ break;
+
+ for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
+ struct prestera_sdma_buf *buf = &ring->bufs[b];
+
+ if (buf->desc_dma)
+ dma_pool_free(sdma->desc_pool, buf->desc,
+ buf->desc_dma);
+
+ if (!buf->skb)
+ continue;
+
+ if (buf->buf_dma != DMA_MAPPING_ERROR)
+ dma_unmap_single(sdma->sw->dev->dev,
+ buf->buf_dma, buf->skb->len,
+ DMA_FROM_DEVICE);
+ kfree_skb(buf->skb);
+ }
+ }
+}
+
+static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
+{
+ int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ int err;
+ int q;
+
+ /* disable all rx queues */
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(15, 8));
+
+ for (q = 0; q < qnum; q++) {
+ struct prestera_sdma_buf *head, *tail, *next, *prev;
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+
+ ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
+ if (!ring->bufs)
+ return -ENOMEM;
+
+ ring->next_rx = 0;
+
+ tail = &ring->bufs[bnum - 1];
+ head = &ring->bufs[0];
+ next = head;
+ prev = next;
+
+ do {
+ err = prestera_sdma_buf_init(sdma, next);
+ if (err)
+ return err;
+
+ err = prestera_sdma_rx_skb_alloc(sdma, next);
+ if (err)
+ return err;
+
+ prestera_sdma_rx_desc_init(sdma, next->desc,
+ next->buf_dma);
+
+ prestera_sdma_rx_desc_set_next(sdma, prev->desc,
+ next->desc_dma);
+
+ prev = next;
+ next++;
+ } while (prev != tail);
+
+ /* join tail with head to make a circular list */
+ prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
+ prestera_sdma_map(sdma, head->desc_dma));
+ }
+
+ /* make sure all rx descs are filled before enabling all rx queues */
+ wmb();
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(7, 0));
+
+ return 0;
+}
+
+static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc)
+{
+ desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
+ desc->word2 = 0;
+}
+
+static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t next)
+{
+ desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
+}
+
+static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t buf, size_t len)
+{
+ u32 word = le32_to_cpu(desc->word2);
+
+ u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
+
+ desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
+ desc->word2 = cpu_to_le32(word);
+}
+
+static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
+{
+ u32 word = le32_to_cpu(desc->word1);
+
+ word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
+
+ /* make sure everything is written before enable xmit */
+ wmb();
+
+ desc->word1 = cpu_to_le32(word);
+}
+
+static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+ dma_addr_t dma;
+
+ dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, dma))
+ return -ENOMEM;
+
+ buf->buf_dma = dma;
+ buf->skb = skb;
+
+ return 0;
+}
+
+static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+
+ dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
+}
+
+static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
+{
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ struct prestera_tx_ring *tx_ring;
+ struct prestera_sdma *sdma;
+ int b;
+
+ sdma = container_of(work, struct prestera_sdma, tx_work);
+
+ tx_ring = &sdma->tx_ring;
+
+ for (b = 0; b < bnum; b++) {
+ struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
+
+ if (!buf->is_used)
+ continue;
+
+ if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
+ continue;
+
+ prestera_sdma_tx_buf_unmap(sdma, buf);
+ dev_consume_skb_any(buf->skb);
+ buf->skb = NULL;
+
+ /* make sure everything is cleaned up */
+ wmb();
+
+ buf->is_used = false;
+ }
+}
+
+static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
+{
+ struct prestera_sdma_buf *head, *tail, *next, *prev;
+ struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ int err;
+
+ INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
+ spin_lock_init(&sdma->tx_lock);
+
+ tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
+ if (!tx_ring->bufs)
+ return -ENOMEM;
+
+ tail = &tx_ring->bufs[bnum - 1];
+ head = &tx_ring->bufs[0];
+ next = head;
+ prev = next;
+
+ tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
+ tx_ring->burst = tx_ring->max_burst;
+ tx_ring->next_tx = 0;
+
+ do {
+ err = prestera_sdma_buf_init(sdma, next);
+ if (err)
+ return err;
+
+ next->is_used = false;
+
+ prestera_sdma_tx_desc_init(sdma, next->desc);
+
+ prestera_sdma_tx_desc_set_next(sdma, prev->desc,
+ next->desc_dma);
+
+ prev = next;
+ next++;
+ } while (prev != tail);
+
+ /* join tail with head to make a circular list */
+ prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
+
+ /* make sure descriptors are written */
+ wmb();
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
+ prestera_sdma_map(sdma, head->desc_dma));
+
+ return 0;
+}
+
+static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
+{
+ struct prestera_tx_ring *ring = &sdma->tx_ring;
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ int b;
+
+ cancel_work_sync(&sdma->tx_work);
+
+ if (!ring->bufs)
+ return;
+
+ for (b = 0; b < bnum; b++) {
+ struct prestera_sdma_buf *buf = &ring->bufs[b];
+
+ if (buf->desc)
+ dma_pool_free(sdma->desc_pool, buf->desc,
+ buf->desc_dma);
+
+ if (!buf->skb)
+ continue;
+
+ dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
+ buf->skb->len, DMA_TO_DEVICE);
+
+ dev_consume_skb_any(buf->skb);
+ }
+}
+
+static void prestera_rxtx_handle_event(struct prestera_switch *sw,
+ struct prestera_event *evt,
+ void *arg)
+{
+ struct prestera_sdma *sdma = arg;
+
+ if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
+ return;
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
+ napi_schedule(&sdma->rx_napi);
+}
+
+static int prestera_sdma_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_sdma *sdma = &sw->rxtx->sdma;
+ struct device *dev = sw->dev->dev;
+ struct prestera_rxtx_params p;
+ int err;
+
+ p.use_sdma = true;
+
+ err = prestera_hw_rxtx_init(sw, &p);
+ if (err) {
+ dev_err(dev, "failed to init rxtx by hw\n");
+ return err;
+ }
+
+ sdma->dma_mask = dma_get_mask(dev);
+ sdma->map_addr = p.map_addr;
+ sdma->sw = sw;
+
+ sdma->desc_pool = dma_pool_create("desc_pool", dev,
+ sizeof(struct prestera_sdma_desc),
+ 16, 0);
+ if (!sdma->desc_pool)
+ return -ENOMEM;
+
+ err = prestera_sdma_rx_init(sdma);
+ if (err) {
+ dev_err(dev, "failed to init rx ring\n");
+ goto err_rx_init;
+ }
+
+ err = prestera_sdma_tx_init(sdma);
+ if (err) {
+ dev_err(dev, "failed to init tx ring\n");
+ goto err_tx_init;
+ }
+
+ err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event,
+ sdma);
+ if (err)
+ goto err_evt_register;
+
+ init_dummy_netdev(&sdma->napi_dev);
+
+ netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
+ napi_enable(&sdma->rx_napi);
+
+ return 0;
+
+err_evt_register:
+err_tx_init:
+ prestera_sdma_tx_fini(sdma);
+err_rx_init:
+ prestera_sdma_rx_fini(sdma);
+
+ dma_pool_destroy(sdma->desc_pool);
+ return err;
+}
+
+static void prestera_sdma_switch_fini(struct prestera_switch *sw)
+{
+ struct prestera_sdma *sdma = &sw->rxtx->sdma;
+
+ napi_disable(&sdma->rx_napi);
+ netif_napi_del(&sdma->rx_napi);
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event);
+ prestera_sdma_tx_fini(sdma);
+ prestera_sdma_rx_fini(sdma);
+ dma_pool_destroy(sdma->desc_pool);
+}
+
+static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
+{
+ return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
+}
+
+static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
+ struct prestera_tx_ring *tx_ring)
+{
+ int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
+
+ do {
+ if (prestera_sdma_is_ready(sdma))
+ return 0;
+
+ udelay(1);
+ } while (--tx_wait_num);
+
+ return -EBUSY;
+}
+
+static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
+{
+ prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
+ schedule_work(&sdma->tx_work);
+}
+
+static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+ struct net_device *dev = skb->dev;
+ struct prestera_tx_ring *tx_ring;
+ struct prestera_sdma_buf *buf;
+ int err;
+
+ spin_lock(&sdma->tx_lock);
+
+ tx_ring = &sdma->tx_ring;
+
+ buf = &tx_ring->bufs[tx_ring->next_tx];
+ if (buf->is_used) {
+ schedule_work(&sdma->tx_work);
+ goto drop_skb;
+ }
+
+ if (unlikely(eth_skb_pad(skb)))
+ goto drop_skb_nofree;
+
+ err = prestera_sdma_tx_buf_map(sdma, buf, skb);
+ if (err)
+ goto drop_skb;
+
+ prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
+
+ dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
+ DMA_TO_DEVICE);
+
+ if (tx_ring->burst) {
+ tx_ring->burst--;
+ } else {
+ tx_ring->burst = tx_ring->max_burst;
+
+ err = prestera_sdma_tx_wait(sdma, tx_ring);
+ if (err)
+ goto drop_skb_unmap;
+ }
+
+ tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
+ prestera_sdma_tx_desc_xmit(buf->desc);
+ buf->is_used = true;
+
+ prestera_sdma_tx_start(sdma);
+
+ goto tx_done;
+
+drop_skb_unmap:
+ prestera_sdma_tx_buf_unmap(sdma, buf);
+drop_skb:
+ dev_consume_skb_any(skb);
+drop_skb_nofree:
+ dev->stats.tx_dropped++;
+tx_done:
+ spin_unlock(&sdma->tx_lock);
+ return NETDEV_TX_OK;
+}
+
+int prestera_rxtx_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_rxtx *rxtx;
+
+ rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
+ if (!rxtx)
+ return -ENOMEM;
+
+ sw->rxtx = rxtx;
+
+ return prestera_sdma_switch_init(sw);
+}
+
+void prestera_rxtx_switch_fini(struct prestera_switch *sw)
+{
+ prestera_sdma_switch_fini(sw);
+ kfree(sw->rxtx);
+}
+
+int prestera_rxtx_port_init(struct prestera_port *port)
+{
+ int err;
+
+ err = prestera_hw_rxtx_port_init(port);
+ if (err)
+ return err;
+
+ port->dev->needed_headroom = PRESTERA_DSA_HLEN;
+
+ return 0;
+}
+
+netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
+{
+ struct prestera_dsa dsa;
+
+ dsa.hw_dev_num = port->dev_id;
+ dsa.port_num = port->hw_id;
+
+ if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
+ return NET_XMIT_DROP;
+
+ skb_push(skb, PRESTERA_DSA_HLEN);
+ memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
+
+ if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
+ return NET_XMIT_DROP;
+
+ return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h
new file mode 100644
index 000000000000..882a1225c323
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_RXTX_H_
+#define _PRESTERA_RXTX_H_
+
+#include <linux/netdevice.h>
+
+struct prestera_switch;
+struct prestera_port;
+
+int prestera_rxtx_switch_init(struct prestera_switch *sw);
+void prestera_rxtx_switch_fini(struct prestera_switch *sw);
+
+int prestera_rxtx_port_init(struct prestera_port *port);
+
+netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb);
+
+#endif /* _PRESTERA_RXTX_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
new file mode 100644
index 000000000000..7d83e1f91ef1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -0,0 +1,1277 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include <net/switchdev.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_switchdev.h"
+
+#define PRESTERA_VID_ALL (0xffff)
+
+#define PRESTERA_DEFAULT_AGEING_TIME_MS 300000
+#define PRESTERA_MAX_AGEING_TIME_MS 1000000000
+#define PRESTERA_MIN_AGEING_TIME_MS 32000
+
+struct prestera_fdb_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+struct prestera_switchdev {
+ struct prestera_switch *sw;
+ struct list_head bridge_list;
+ bool bridge_8021q_exists;
+ struct notifier_block swdev_nb_blk;
+ struct notifier_block swdev_nb;
+};
+
+struct prestera_bridge {
+ struct list_head head;
+ struct net_device *dev;
+ struct prestera_switchdev *swdev;
+ struct list_head port_list;
+ bool vlan_enabled;
+ u16 bridge_id;
+};
+
+struct prestera_bridge_port {
+ struct list_head head;
+ struct net_device *dev;
+ struct prestera_bridge *bridge;
+ struct list_head vlan_list;
+ refcount_t ref_count;
+ unsigned long flags;
+ u8 stp_state;
+};
+
+struct prestera_bridge_vlan {
+ struct list_head head;
+ struct list_head port_vlan_list;
+ u16 vid;
+};
+
+struct prestera_port_vlan {
+ struct list_head br_vlan_head;
+ struct list_head port_head;
+ struct prestera_port *port;
+ struct prestera_bridge_port *br_port;
+ u16 vid;
+};
+
+static struct workqueue_struct *swdev_wq;
+
+static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
+
+static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
+ u8 state);
+
+static struct prestera_bridge_vlan *
+prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid)
+{
+ struct prestera_bridge_vlan *br_vlan;
+
+ br_vlan = kzalloc(sizeof(*br_vlan), GFP_KERNEL);
+ if (!br_vlan)
+ return NULL;
+
+ INIT_LIST_HEAD(&br_vlan->port_vlan_list);
+ br_vlan->vid = vid;
+ list_add(&br_vlan->head, &br_port->vlan_list);
+
+ return br_vlan;
+}
+
+static void prestera_bridge_vlan_destroy(struct prestera_bridge_vlan *br_vlan)
+{
+ list_del(&br_vlan->head);
+ WARN_ON(!list_empty(&br_vlan->port_vlan_list));
+ kfree(br_vlan);
+}
+
+static struct prestera_bridge_vlan *
+prestera_bridge_vlan_by_vid(struct prestera_bridge_port *br_port, u16 vid)
+{
+ struct prestera_bridge_vlan *br_vlan;
+
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ if (br_vlan->vid == vid)
+ return br_vlan;
+ }
+
+ return NULL;
+}
+
+static int prestera_bridge_vlan_port_count(struct prestera_bridge *bridge,
+ u16 vid)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge_vlan *br_vlan;
+ int count = 0;
+
+ list_for_each_entry(br_port, &bridge->port_list, head) {
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ if (br_vlan->vid == vid) {
+ count += 1;
+ break;
+ }
+ }
+ }
+
+ return count;
+}
+
+static void prestera_bridge_vlan_put(struct prestera_bridge_vlan *br_vlan)
+{
+ if (list_empty(&br_vlan->port_vlan_list))
+ prestera_bridge_vlan_destroy(br_vlan);
+}
+
+static struct prestera_port_vlan *
+prestera_port_vlan_by_vid(struct prestera_port *port, u16 vid)
+{
+ struct prestera_port_vlan *port_vlan;
+
+ list_for_each_entry(port_vlan, &port->vlans_list, port_head) {
+ if (port_vlan->vid == vid)
+ return port_vlan;
+ }
+
+ return NULL;
+}
+
+static struct prestera_port_vlan *
+prestera_port_vlan_create(struct prestera_port *port, u16 vid, bool untagged)
+{
+ struct prestera_port_vlan *port_vlan;
+ int err;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (port_vlan)
+ return ERR_PTR(-EEXIST);
+
+ err = prestera_hw_vlan_port_set(port, vid, true, untagged);
+ if (err)
+ return ERR_PTR(err);
+
+ port_vlan = kzalloc(sizeof(*port_vlan), GFP_KERNEL);
+ if (!port_vlan) {
+ err = -ENOMEM;
+ goto err_port_vlan_alloc;
+ }
+
+ port_vlan->port = port;
+ port_vlan->vid = vid;
+
+ list_add(&port_vlan->port_head, &port->vlans_list);
+
+ return port_vlan;
+
+err_port_vlan_alloc:
+ prestera_hw_vlan_port_set(port, vid, false, false);
+ return ERR_PTR(err);
+}
+
+static void
+prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
+{
+ u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC;
+ struct prestera_port *port = port_vlan->port;
+ struct prestera_bridge_vlan *br_vlan;
+ struct prestera_bridge_port *br_port;
+ bool last_port, last_vlan;
+ u16 vid = port_vlan->vid;
+ int port_count;
+
+ br_port = port_vlan->br_port;
+ port_count = prestera_bridge_vlan_port_count(br_port->bridge, vid);
+ br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
+
+ last_vlan = list_is_singular(&br_port->vlan_list);
+ last_port = port_count == 1;
+
+ if (last_vlan)
+ prestera_hw_fdb_flush_port(port, fdb_flush_mode);
+ else if (last_port)
+ prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode);
+ else
+ prestera_hw_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
+
+ list_del(&port_vlan->br_vlan_head);
+ prestera_bridge_vlan_put(br_vlan);
+ prestera_bridge_port_put(br_port);
+ port_vlan->br_port = NULL;
+}
+
+static void prestera_port_vlan_destroy(struct prestera_port_vlan *port_vlan)
+{
+ struct prestera_port *port = port_vlan->port;
+ u16 vid = port_vlan->vid;
+
+ if (port_vlan->br_port)
+ prestera_port_vlan_bridge_leave(port_vlan);
+
+ prestera_hw_vlan_port_set(port, vid, false, false);
+ list_del(&port_vlan->port_head);
+ kfree(port_vlan);
+}
+
+static struct prestera_bridge *
+prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev)
+{
+ bool vlan_enabled = br_vlan_enabled(dev);
+ struct prestera_bridge *bridge;
+ u16 bridge_id;
+ int err;
+
+ if (vlan_enabled && swdev->bridge_8021q_exists) {
+ netdev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return ERR_PTR(-ENOMEM);
+
+ if (vlan_enabled) {
+ swdev->bridge_8021q_exists = true;
+ } else {
+ err = prestera_hw_bridge_create(swdev->sw, &bridge_id);
+ if (err) {
+ kfree(bridge);
+ return ERR_PTR(err);
+ }
+
+ bridge->bridge_id = bridge_id;
+ }
+
+ bridge->vlan_enabled = vlan_enabled;
+ bridge->swdev = swdev;
+ bridge->dev = dev;
+
+ INIT_LIST_HEAD(&bridge->port_list);
+
+ list_add(&bridge->head, &swdev->bridge_list);
+
+ return bridge;
+}
+
+static void prestera_bridge_destroy(struct prestera_bridge *bridge)
+{
+ struct prestera_switchdev *swdev = bridge->swdev;
+
+ list_del(&bridge->head);
+
+ if (bridge->vlan_enabled)
+ swdev->bridge_8021q_exists = false;
+ else
+ prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id);
+
+ WARN_ON(!list_empty(&bridge->port_list));
+ kfree(bridge);
+}
+
+static void prestera_bridge_put(struct prestera_bridge *bridge)
+{
+ if (list_empty(&bridge->port_list))
+ prestera_bridge_destroy(bridge);
+}
+
+static
+struct prestera_bridge *prestera_bridge_by_dev(struct prestera_switchdev *swdev,
+ const struct net_device *dev)
+{
+ struct prestera_bridge *bridge;
+
+ list_for_each_entry(bridge, &swdev->bridge_list, head)
+ if (bridge->dev == dev)
+ return bridge;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+__prestera_bridge_port_by_dev(struct prestera_bridge *bridge,
+ struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &bridge->port_list, head) {
+ if (br_port->dev == dev)
+ return br_port;
+ }
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_by_dev(struct prestera_switchdev *swdev,
+ struct net_device *dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ struct prestera_bridge *bridge;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge = prestera_bridge_by_dev(swdev, br_dev);
+ if (!bridge)
+ return NULL;
+
+ return __prestera_bridge_port_by_dev(bridge, dev);
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_create(struct prestera_bridge *bridge,
+ struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
+ if (!br_port)
+ return NULL;
+
+ br_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
+ BR_MCAST_FLOOD;
+ br_port->stp_state = BR_STATE_DISABLED;
+ refcount_set(&br_port->ref_count, 1);
+ br_port->bridge = bridge;
+ br_port->dev = dev;
+
+ INIT_LIST_HEAD(&br_port->vlan_list);
+ list_add(&br_port->head, &bridge->port_list);
+
+ return br_port;
+}
+
+static void
+prestera_bridge_port_destroy(struct prestera_bridge_port *br_port)
+{
+ list_del(&br_port->head);
+ WARN_ON(!list_empty(&br_port->vlan_list));
+ kfree(br_port);
+}
+
+static void prestera_bridge_port_get(struct prestera_bridge_port *br_port)
+{
+ refcount_inc(&br_port->ref_count);
+}
+
+static void prestera_bridge_port_put(struct prestera_bridge_port *br_port)
+{
+ struct prestera_bridge *bridge = br_port->bridge;
+
+ if (refcount_dec_and_test(&br_port->ref_count)) {
+ prestera_bridge_port_destroy(br_port);
+ prestera_bridge_put(bridge);
+ }
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_add(struct prestera_bridge *bridge, struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ br_port = __prestera_bridge_port_by_dev(bridge, dev);
+ if (br_port) {
+ prestera_bridge_port_get(br_port);
+ return br_port;
+ }
+
+ br_port = prestera_bridge_port_create(bridge, dev);
+ if (!br_port)
+ return ERR_PTR(-ENOMEM);
+
+ return br_port;
+}
+
+static int
+prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+ struct prestera_bridge *bridge = br_port->bridge;
+ int err;
+
+ err = prestera_hw_bridge_port_add(port, bridge->bridge_id);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ goto err_port_flood_set;
+
+ err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_learning_set;
+
+ return 0;
+
+err_port_learning_set:
+ prestera_hw_port_flood_set(port, false);
+err_port_flood_set:
+ prestera_hw_bridge_port_delete(port, bridge->bridge_id);
+
+ return err;
+}
+
+static int prestera_port_bridge_join(struct prestera_port *port,
+ struct net_device *upper)
+{
+ struct prestera_switchdev *swdev = port->sw->swdev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+ int err;
+
+ bridge = prestera_bridge_by_dev(swdev, upper);
+ if (!bridge) {
+ bridge = prestera_bridge_create(swdev, upper);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ br_port = prestera_bridge_port_add(bridge, port->dev);
+ if (IS_ERR(br_port)) {
+ err = PTR_ERR(br_port);
+ goto err_brport_create;
+ }
+
+ if (bridge->vlan_enabled)
+ return 0;
+
+ err = prestera_bridge_1d_port_join(br_port);
+ if (err)
+ goto err_port_join;
+
+ return 0;
+
+err_port_join:
+ prestera_bridge_port_put(br_port);
+err_brport_create:
+ prestera_bridge_put(bridge);
+ return err;
+}
+
+static void prestera_bridge_1q_port_leave(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+
+ prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
+ prestera_port_pvid_set(port, PRESTERA_DEFAULT_VID);
+}
+
+static void prestera_bridge_1d_port_leave(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+
+ prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
+ prestera_hw_bridge_port_delete(port, br_port->bridge->bridge_id);
+}
+
+static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
+ u8 state)
+{
+ u8 hw_state = state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ hw_state = PRESTERA_STP_DISABLED;
+ break;
+
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ hw_state = PRESTERA_STP_BLOCK_LISTEN;
+ break;
+
+ case BR_STATE_LEARNING:
+ hw_state = PRESTERA_STP_LEARN;
+ break;
+
+ case BR_STATE_FORWARDING:
+ hw_state = PRESTERA_STP_FORWARD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return prestera_hw_vlan_port_stp_set(port, vid, hw_state);
+}
+
+static void prestera_port_bridge_leave(struct prestera_port *port,
+ struct net_device *upper)
+{
+ struct prestera_switchdev *swdev = port->sw->swdev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+
+ bridge = prestera_bridge_by_dev(swdev, upper);
+ if (!bridge)
+ return;
+
+ br_port = __prestera_bridge_port_by_dev(bridge, port->dev);
+ if (!br_port)
+ return;
+
+ bridge = br_port->bridge;
+
+ if (bridge->vlan_enabled)
+ prestera_bridge_1q_port_leave(br_port);
+ else
+ prestera_bridge_1d_port_leave(br_port);
+
+ prestera_hw_port_learning_set(port, false);
+ prestera_hw_port_flood_set(port, false);
+ prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
+ prestera_bridge_port_put(br_port);
+}
+
+int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
+ void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct prestera_port *port;
+ struct net_device *upper;
+ int err;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ port = netdev_priv(dev);
+ upper = info->upper_dev;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ if (!netif_is_bridge_master(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EINVAL;
+ }
+
+ if (!info->linking)
+ break;
+
+ if (netdev_has_any_upper_dev(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved");
+ return -EINVAL;
+ }
+ break;
+
+ case NETDEV_CHANGEUPPER:
+ if (!netif_is_bridge_master(upper))
+ break;
+
+ if (info->linking) {
+ err = prestera_port_bridge_join(port, upper);
+ if (err)
+ return err;
+ } else {
+ prestera_port_bridge_leave(port, upper);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int prestera_port_attr_br_flags_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ unsigned long flags)
+{
+ struct prestera_bridge_port *br_port;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
+ if (!br_port)
+ return 0;
+
+ err = prestera_hw_port_flood_set(port, flags & BR_FLOOD);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_learning_set(port, flags & BR_LEARNING);
+ if (err)
+ return err;
+
+ memcpy(&br_port->flags, &flags, sizeof(flags));
+
+ return 0;
+}
+
+static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies);
+ struct prestera_switch *sw = port->sw;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
+ ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
+ return -ERANGE;
+ else
+ return 0;
+ }
+
+ return prestera_hw_switch_ageing_set(sw, ageing_time_ms);
+}
+
+static int prestera_port_attr_br_vlan_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ bool vlan_enabled)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *bridge;
+
+ if (!switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ bridge = prestera_bridge_by_dev(sw->swdev, dev);
+ if (WARN_ON(!bridge))
+ return -EINVAL;
+
+ if (bridge->vlan_enabled == vlan_enabled)
+ return 0;
+
+ netdev_err(bridge->dev, "VLAN filtering can't be changed for existing bridge\n");
+
+ return -EINVAL;
+}
+
+static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port,
+ struct prestera_bridge_vlan *br_vlan,
+ u8 state)
+{
+ struct prestera_port_vlan *port_vlan;
+
+ list_for_each_entry(port_vlan, &br_vlan->port_vlan_list, br_vlan_head) {
+ if (port_vlan->port != port)
+ continue;
+
+ return prestera_port_vid_stp_set(port, br_vlan->vid, state);
+ }
+
+ return 0;
+}
+
+static int presterar_port_attr_stp_state_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ u8 state)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge_vlan *br_vlan;
+ int err;
+ u16 vid;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
+ if (!br_port)
+ return 0;
+
+ if (!br_port->bridge->vlan_enabled) {
+ vid = br_port->bridge->bridge_id;
+ err = prestera_port_vid_stp_set(port, vid, state);
+ if (err)
+ goto err_port_stp_set;
+ } else {
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ err = prestera_port_bridge_vlan_stp_set(port, br_vlan,
+ state);
+ if (err)
+ goto err_port_vlan_stp_set;
+ }
+ }
+
+ br_port->stp_state = state;
+
+ return 0;
+
+err_port_vlan_stp_set:
+ list_for_each_entry_continue_reverse(br_vlan, &br_port->vlan_list, head)
+ prestera_port_bridge_vlan_stp_set(port, br_vlan, br_port->stp_state);
+ return err;
+
+err_port_stp_set:
+ prestera_port_vid_stp_set(port, vid, br_port->stp_state);
+
+ return err;
+}
+
+static int prestera_port_obj_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = presterar_port_attr_stp_state_set(port, trans,
+ attr->orig_dev,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ if (attr->u.brport_flags &
+ ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ err = -EINVAL;
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = prestera_port_attr_br_flags_set(port, trans,
+ attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = prestera_port_attr_br_ageing_set(port, trans,
+ attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ err = prestera_port_attr_br_vlan_set(port, trans,
+ attr->orig_dev,
+ attr->u.vlan_filtering);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static void
+prestera_fdb_offload_notify(struct prestera_port *port,
+ struct switchdev_notifier_fdb_info *info)
+{
+ struct switchdev_notifier_fdb_info send_info;
+
+ send_info.addr = info->addr;
+ send_info.vid = info->vid;
+ send_info.offloaded = true;
+
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, port->dev,
+ &send_info.info, NULL);
+}
+
+static int prestera_port_fdb_set(struct prestera_port *port,
+ struct switchdev_notifier_fdb_info *fdb_info,
+ bool adding)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+ int err;
+ u16 vid;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
+ if (!br_port)
+ return -EINVAL;
+
+ bridge = br_port->bridge;
+
+ if (bridge->vlan_enabled)
+ vid = fdb_info->vid;
+ else
+ vid = bridge->bridge_id;
+
+ if (adding)
+ err = prestera_hw_fdb_add(port, fdb_info->addr, vid, false);
+ else
+ err = prestera_hw_fdb_del(port, fdb_info->addr, vid);
+
+ return err;
+}
+
+static void prestera_fdb_event_work(struct work_struct *work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct prestera_fdb_event_work *swdev_work;
+ struct prestera_port *port;
+ struct net_device *dev;
+ int err;
+
+ swdev_work = container_of(work, struct prestera_fdb_event_work, work);
+ dev = swdev_work->dev;
+
+ rtnl_lock();
+
+ port = prestera_port_dev_lower_find(dev);
+ if (!port)
+ goto out_unlock;
+
+ switch (swdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb_info = &swdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
+
+ err = prestera_port_fdb_set(port, fdb_info, true);
+ if (err)
+ break;
+
+ prestera_fdb_offload_notify(port, fdb_info);
+ break;
+
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = &swdev_work->fdb_info;
+ prestera_port_fdb_set(port, fdb_info, false);
+ break;
+ }
+
+out_unlock:
+ rtnl_unlock();
+
+ kfree(swdev_work->fdb_info.addr);
+ kfree(swdev_work);
+ dev_put(dev);
+}
+
+static int prestera_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct prestera_fdb_event_work *swdev_work;
+ struct net_device *upper;
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!prestera_netdev_check(dev))
+ return NOTIFY_DONE;
+
+ upper = netdev_master_upper_dev_get_rcu(dev);
+ if (!upper)
+ return NOTIFY_DONE;
+
+ if (!netif_is_bridge_master(upper))
+ return NOTIFY_DONE;
+
+ swdev_work = kzalloc(sizeof(*swdev_work), GFP_ATOMIC);
+ if (!swdev_work)
+ return NOTIFY_BAD;
+
+ swdev_work->event = event;
+ swdev_work->dev = dev;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+
+ INIT_WORK(&swdev_work->work, prestera_fdb_event_work);
+ memcpy(&swdev_work->fdb_info, ptr,
+ sizeof(swdev_work->fdb_info));
+
+ swdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!swdev_work->fdb_info.addr)
+ goto out_bad;
+
+ ether_addr_copy((u8 *)swdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(dev);
+ break;
+
+ default:
+ kfree(swdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(swdev_wq, &swdev_work->work);
+ return NOTIFY_DONE;
+
+out_bad:
+ kfree(swdev_work);
+ return NOTIFY_BAD;
+}
+
+static int
+prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = port_vlan->port;
+ struct prestera_bridge_vlan *br_vlan;
+ u16 vid = port_vlan->vid;
+ int err;
+
+ if (port_vlan->br_port)
+ return 0;
+
+ err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_learning_set;
+
+ err = prestera_port_vid_stp_set(port, vid, br_port->stp_state);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
+ if (!br_vlan) {
+ br_vlan = prestera_bridge_vlan_create(br_port, vid);
+ if (!br_vlan) {
+ err = -ENOMEM;
+ goto err_bridge_vlan_get;
+ }
+ }
+
+ list_add(&port_vlan->br_vlan_head, &br_vlan->port_vlan_list);
+
+ prestera_bridge_port_get(br_port);
+ port_vlan->br_port = br_port;
+
+ return 0;
+
+err_bridge_vlan_get:
+ prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING);
+err_port_vid_stp_set:
+ prestera_hw_port_learning_set(port, false);
+err_port_learning_set:
+ return err;
+}
+
+static int
+prestera_bridge_port_vlan_add(struct prestera_port *port,
+ struct prestera_bridge_port *br_port,
+ u16 vid, bool is_untagged, bool is_pvid,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port_vlan *port_vlan;
+ u16 old_pvid = port->pvid;
+ u16 pvid;
+ int err;
+
+ if (is_pvid)
+ pvid = vid;
+ else
+ pvid = port->pvid == vid ? 0 : port->pvid;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (port_vlan && port_vlan->br_port != br_port)
+ return -EEXIST;
+
+ if (!port_vlan) {
+ port_vlan = prestera_port_vlan_create(port, vid, is_untagged);
+ if (IS_ERR(port_vlan))
+ return PTR_ERR(port_vlan);
+ } else {
+ err = prestera_hw_vlan_port_set(port, vid, true, is_untagged);
+ if (err)
+ goto err_port_vlan_set;
+ }
+
+ err = prestera_port_pvid_set(port, pvid);
+ if (err)
+ goto err_port_pvid_set;
+
+ err = prestera_port_vlan_bridge_join(port_vlan, br_port);
+ if (err)
+ goto err_port_vlan_bridge_join;
+
+ return 0;
+
+err_port_vlan_bridge_join:
+ prestera_port_pvid_set(port, old_pvid);
+err_port_pvid_set:
+ prestera_hw_vlan_port_set(port, vid, false, false);
+err_port_vlan_set:
+ prestera_port_vlan_destroy(port_vlan);
+
+ return err;
+}
+
+static void
+prestera_bridge_port_vlan_del(struct prestera_port *port,
+ struct prestera_bridge_port *br_port, u16 vid)
+{
+ u16 pvid = port->pvid == vid ? 0 : port->pvid;
+ struct prestera_port_vlan *port_vlan;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (WARN_ON(!port_vlan))
+ return;
+
+ prestera_port_vlan_bridge_leave(port_vlan);
+ prestera_port_pvid_set(port, pvid);
+ prestera_port_vlan_destroy(port_vlan);
+}
+
+static int prestera_port_vlans_add(struct prestera_port *port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct net_device *dev = vlan->obj.orig_dev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *bridge;
+ u16 vid;
+
+ if (netif_is_bridge_master(dev))
+ return 0;
+
+ if (switchdev_trans_ph_commit(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ if (WARN_ON(!br_port))
+ return -EINVAL;
+
+ bridge = br_port->bridge;
+ if (!bridge->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = prestera_bridge_port_vlan_add(port, br_port,
+ vid, flag_untagged,
+ flag_pvid, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ const struct switchdev_obj_port_vlan *vlan;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ return prestera_port_vlans_add(port, vlan, trans, extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_port_vlans_del(struct prestera_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *dev = vlan->obj.orig_dev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_switch *sw = port->sw;
+ u16 vid;
+
+ if (netif_is_bridge_master(dev))
+ return -EOPNOTSUPP;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ if (WARN_ON(!br_port))
+ return -EINVAL;
+
+ if (!br_port->bridge->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ prestera_bridge_port_vlan_del(port, br_port, vid);
+
+ return 0;
+}
+
+static int prestera_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_switchdev_blk_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_add);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_del);
+ break;
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_attr_set);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void prestera_fdb_event(struct prestera_switch *sw,
+ struct prestera_event *evt, void *arg)
+{
+ struct switchdev_notifier_fdb_info info;
+ struct prestera_port *port;
+
+ port = prestera_find_port(sw, evt->fdb_evt.port_id);
+ if (!port)
+ return;
+
+ info.addr = evt->fdb_evt.data.mac;
+ info.vid = evt->fdb_evt.vid;
+ info.offloaded = true;
+
+ rtnl_lock();
+
+ switch (evt->id) {
+ case PRESTERA_FDB_EVENT_LEARNED:
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ port->dev, &info.info, NULL);
+ break;
+ case PRESTERA_FDB_EVENT_AGED:
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ port->dev, &info.info, NULL);
+ break;
+ }
+
+ rtnl_unlock();
+}
+
+static int prestera_fdb_init(struct prestera_switch *sw)
+{
+ int err;
+
+ err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event, NULL);
+ if (err)
+ return err;
+
+ err = prestera_hw_switch_ageing_set(sw, PRESTERA_DEFAULT_AGEING_TIME_MS);
+ if (err)
+ goto err_ageing_set;
+
+ return 0;
+
+err_ageing_set:
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event);
+ return err;
+}
+
+static void prestera_fdb_fini(struct prestera_switch *sw)
+{
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event);
+}
+
+static int prestera_switchdev_handler_init(struct prestera_switchdev *swdev)
+{
+ int err;
+
+ swdev->swdev_nb.notifier_call = prestera_switchdev_event;
+ err = register_switchdev_notifier(&swdev->swdev_nb);
+ if (err)
+ goto err_register_swdev_notifier;
+
+ swdev->swdev_nb_blk.notifier_call = prestera_switchdev_blk_event;
+ err = register_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
+ if (err)
+ goto err_register_blk_swdev_notifier;
+
+ return 0;
+
+err_register_blk_swdev_notifier:
+ unregister_switchdev_notifier(&swdev->swdev_nb);
+err_register_swdev_notifier:
+ destroy_workqueue(swdev_wq);
+ return err;
+}
+
+static void prestera_switchdev_handler_fini(struct prestera_switchdev *swdev)
+{
+ unregister_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
+ unregister_switchdev_notifier(&swdev->swdev_nb);
+}
+
+int prestera_switchdev_init(struct prestera_switch *sw)
+{
+ struct prestera_switchdev *swdev;
+ int err;
+
+ swdev = kzalloc(sizeof(*swdev), GFP_KERNEL);
+ if (!swdev)
+ return -ENOMEM;
+
+ sw->swdev = swdev;
+ swdev->sw = sw;
+
+ INIT_LIST_HEAD(&swdev->bridge_list);
+
+ swdev_wq = alloc_ordered_workqueue("%s_ordered", 0, "prestera_br");
+ if (!swdev_wq) {
+ err = -ENOMEM;
+ goto err_alloc_wq;
+ }
+
+ err = prestera_switchdev_handler_init(swdev);
+ if (err)
+ goto err_swdev_init;
+
+ err = prestera_fdb_init(sw);
+ if (err)
+ goto err_fdb_init;
+
+ return 0;
+
+err_fdb_init:
+err_swdev_init:
+ destroy_workqueue(swdev_wq);
+err_alloc_wq:
+ kfree(swdev);
+
+ return err;
+}
+
+void prestera_switchdev_fini(struct prestera_switch *sw)
+{
+ struct prestera_switchdev *swdev = sw->swdev;
+
+ prestera_fdb_fini(sw);
+ prestera_switchdev_handler_fini(swdev);
+ destroy_workqueue(swdev_wq);
+ kfree(swdev);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
new file mode 100644
index 000000000000..606e21d2355b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_SWITCHDEV_H_
+#define _PRESTERA_SWITCHDEV_H_
+
+int prestera_switchdev_init(struct prestera_switch *sw);
+void prestera_switchdev_fini(struct prestera_switch *sw);
+
+int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
+ void *ptr);
+
+#endif /* _PRESTERA_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index eb8cf60ecf12..d1e4d42e497d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1187,11 +1187,10 @@ static int pxa168_eth_stop(struct net_device *dev)
static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
{
- int retval;
struct pxa168_eth_private *pep = netdev_priv(dev);
dev->mtu = mtu;
- retval = set_port_config_ext(pep);
+ set_port_config_ext(pep);
if (!netif_running(dev))
return 0;
@@ -1541,10 +1540,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
}
if (dev->phydev)
phy_disconnect(dev->phydev);
- if (pep->clk) {
- clk_disable_unprepare(pep->clk);
- }
+ clk_disable_unprepare(pep->clk);
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 6a930351cb23..8a9c0f490bfb 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3338,9 +3338,9 @@ static void skge_error_irq(struct skge_hw *hw)
* because accessing phy registers requires spin wait which might
* cause excess interrupt latency.
*/
-static void skge_extirq(unsigned long arg)
+static void skge_extirq(struct tasklet_struct *t)
{
- struct skge_hw *hw = (struct skge_hw *) arg;
+ struct skge_hw *hw = from_tasklet(hw, t, phy_task);
int port;
for (port = 0; port < hw->ports; port++) {
@@ -3927,7 +3927,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->pdev = pdev;
spin_lock_init(&hw->hw_lock);
spin_lock_init(&hw->phy_lock);
- tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
+ tasklet_setup(&hw->phy_task, skge_extirq);
hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 344864275ed5..25981a7a43b5 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5105,7 +5105,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
- pdev->d3_delay = 300;
+ pdev->d3hot_delay = 300;
return 0;
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 62a820b1eb16..3362b148de23 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -17,6 +17,7 @@ config NET_MEDIATEK_SOC
config NET_MEDIATEK_STAR_EMAC
tristate "MediaTek STAR Ethernet MAC support"
select PHYLIB
+ select REGMAP_MMIO
help
This driver supports the ethernet MAC IP first used on
MediaTek MT85** SoCs.
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 65f8a4b6ed0c..3b8576b9c2f9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -55,11 +55,11 @@
#define TASKLET_MAX_TIME 2
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
-void mlx4_cq_tasklet_cb(unsigned long data)
+void mlx4_cq_tasklet_cb(struct tasklet_struct *t)
{
unsigned long flags;
unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
- struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data;
+ struct mlx4_eq_tasklet *ctx = from_tasklet(ctx, t, task);
struct mlx4_cq *mcq, *temp;
spin_lock_irqsave(&ctx->lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index b816154bc79a..23849f2b9c25 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1106,6 +1106,24 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
return err;
}
+static void mlx4_en_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct bitmap_iterator it;
+
+ bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
+
+ spin_lock_bh(&priv->stats_lock);
+ if (test_bit(FLOW_PRIORITY_STATS_IDX_TX_FRAMES,
+ priv->stats_bitmap.bitmap))
+ stats->tx_pause_frames = priv->tx_flowstats.tx_pause;
+ if (test_bit(FLOW_PRIORITY_STATS_IDX_RX_FRAMES,
+ priv->stats_bitmap.bitmap))
+ stats->rx_pause_frames = priv->rx_flowstats.rx_pause;
+ spin_unlock_bh(&priv->stats_lock);
+}
+
static void mlx4_en_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
@@ -2138,6 +2156,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_msglevel = mlx4_en_set_msglevel,
.get_coalesce = mlx4_en_get_coalesce,
.set_coalesce = mlx4_en_set_coalesce,
+ .get_pause_stats = mlx4_en_get_pause_stats,
.get_pauseparam = mlx4_en_get_pauseparam,
.set_pauseparam = mlx4_en_set_pauseparam,
.get_ringparam = mlx4_en_get_ringparam,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b50c567ef508..502d1b97855c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -705,7 +705,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
frags = ring->rx_info + (index << priv->log_rx_info);
va = page_address(frags[0].page) + frags[0].page_offset;
- prefetchw(va);
+ net_prefetchw(va);
/*
* make sure we read the CQE after we read the ownership bit
*/
@@ -943,6 +943,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
bool clean_complete = true;
int done;
+ if (!budget)
+ return 0;
+
if (priv->tx_ring_num[TX_XDP]) {
xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
if (xdp_tx_cq->xdp_busy) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 9dff7b086c9f..3ddb7268e415 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
.dma = tx_info->map0_dma,
};
- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
dma_unmap_page(priv->ddev, tx_info->map0_dma,
PAGE_SIZE, priv->dma_dir);
put_page(tx_info->page);
@@ -842,6 +842,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
struct mlx4_en_tx_info *tx_info;
+ u32 __maybe_unused ring_cons;
int tx_ind;
int nr_txbb;
int desc_size;
@@ -855,7 +856,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u8 data_offset;
- u32 ring_cons;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
@@ -1075,7 +1075,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_rmb();
- ring_cons = READ_ONCE(ring->cons);
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index ae305c2e9225..9e48509ed3b2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1057,8 +1057,7 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
INIT_LIST_HEAD(&eq->tasklet_ctx.list);
INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb,
- (unsigned long)&eq->tasklet_ctx);
+ tasklet_setup(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 258c7a96f269..c326b434734e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3031,6 +3031,17 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
if (err)
return err;
+ /* Ethernet and IB drivers will normally set the port type,
+ * but if they are not built set the type now to prevent
+ * devlink_port_type_warn() from firing.
+ */
+ if (!IS_ENABLED(CONFIG_MLX4_EN) &&
+ dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+ devlink_port_type_eth_set(&info->devlink_port, NULL);
+ else if (!IS_ENABLED(CONFIG_MLX4_INFINIBAND) &&
+ dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
+ devlink_port_type_ib_set(&info->devlink_port, NULL);
+
info->dev = dev;
info->port = port;
if (!mlx4_is_slave(dev)) {
@@ -3935,6 +3946,8 @@ static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
struct devlink *devlink);
static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
@@ -3951,7 +3964,8 @@ static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
return 0;
}
-static int mlx4_devlink_reload_up(struct devlink *devlink,
+static int mlx4_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
@@ -3959,6 +3973,7 @@ static int mlx4_devlink_reload_up(struct devlink *devlink,
struct mlx4_dev_persistent *persist = dev->persist;
int err;
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
err = mlx4_restart_one_up(persist->pdev, true, devlink);
if (err)
mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n",
@@ -3969,6 +3984,7 @@ static int mlx4_devlink_reload_up(struct devlink *devlink,
static const struct devlink_ops mlx4_devlink_ops = {
.port_type_set = mlx4_devlink_port_type_set,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = mlx4_devlink_reload_down,
.reload_up = mlx4_devlink_reload_up,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 527b52e48276..64bed7ac3836 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1217,7 +1217,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev);
int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
u16 op, unsigned long timeout);
-void mlx4_cq_tasklet_cb(unsigned long data);
+void mlx4_cq_tasklet_cb(struct tasklet_struct *t);
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 86b6051da8ec..51d4eaab6a2f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -84,6 +84,11 @@ struct mlx4_en_flow_stats_rx {
MLX4_NUM_PRIORITIES)
};
+#define FLOW_PRIORITY_STATS_IDX_RX_FRAMES (NUM_MAIN_STATS + \
+ NUM_PORT_STATS + \
+ NUM_PF_STATS + \
+ NUM_FLOW_PRIORITY_STATS_RX)
+
struct mlx4_en_flow_stats_tx {
u64 tx_pause;
u64 tx_pause_duration;
@@ -93,6 +98,13 @@ struct mlx4_en_flow_stats_tx {
MLX4_NUM_PRIORITIES)
};
+#define FLOW_PRIORITY_STATS_IDX_TX_FRAMES (NUM_MAIN_STATS + \
+ NUM_PORT_STATS + \
+ NUM_PF_STATS + \
+ NUM_FLOW_PRIORITY_STATS_RX + \
+ NUM_FLOW_STATS_RX + \
+ NUM_FLOW_PRIORITY_STATS_TX)
+
#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_RX)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 10e6886c96ba..2d477f9a8cb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -16,7 +16,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
- diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o
+ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o fw_reset.o
#
# Netdev basic
@@ -24,7 +24,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
- en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
+ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o
#
@@ -37,7 +37,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
- en/mapping.o esw/chains.o en/tc_tun.o \
+ en/mapping.o lib/fs_chains.o en/tc_tun.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
@@ -49,7 +49,8 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offlo
ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
- esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
+ esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \
+ esw/devlink_port.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
index 2f13a250aab3..d6667d38e1de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIBt
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 8db4b5f0f963..291e427e9e4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -56,8 +56,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
size_t size, dma_addr_t *dma_handle,
int node)
{
+ struct device *device = mlx5_core_dma_dev(dev);
struct mlx5_priv *priv = &dev->priv;
- struct device *device = dev->device;
int original_node;
void *cpu_handle;
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
- dma_free_coherent(dev->device, buf->size, buf->frags->buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf,
buf->frags->map);
kfree(buf->frags);
@@ -140,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
if (!frag->buf)
goto err_free_buf;
if (frag->map & ((1 << buf->page_shift) - 1)) {
- dma_free_coherent(dev->device, frag_sz,
+ dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz,
buf->frags[i].buf, buf->frags[i].map);
mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
&frag->map, buf->page_shift);
@@ -153,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
err_free_buf:
while (i--)
- dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE, buf->frags[i].buf,
buf->frags[i].map);
kfree(buf->frags);
err_out:
@@ -169,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
for (i = 0; i < buf->npages; i++) {
int frag_sz = min_t(int, size, PAGE_SIZE);
- dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz, buf->frags[i].buf,
buf->frags[i].map);
size -= frag_sz;
}
@@ -275,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
__set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
- dma_free_coherent(dev->device, PAGE_SIZE,
+ dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
bitmap_free(db->u.pgdir->bitmap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 2d1f4b3be9bf..e49387dbef98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1989,9 +1989,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
- struct device *ddev = dev->device;
-
- cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
+ cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
&cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
return -ENOMEM;
@@ -2004,9 +2002,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
return 0;
}
- dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
cmd->alloc_dma);
- cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
+ cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
2 * MLX5_ADAPTER_PAGE_SIZE - 1,
&cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
@@ -2020,9 +2018,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
- struct device *ddev = dev->device;
-
- dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
cmd->alloc_dma);
}
@@ -2054,7 +2050,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
if (!cmd->stats)
return -ENOMEM;
- cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
+ cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
if (!cmd->pool) {
err = -ENOMEM;
goto dma_pool_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 8379b24cb838..df3e4938ecdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -42,11 +42,11 @@
#define TASKLET_MAX_TIME 2
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
-void mlx5_cq_tasklet_cb(unsigned long data)
+void mlx5_cq_tasklet_cb(struct tasklet_struct *t)
{
unsigned long flags;
unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
- struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
+ struct mlx5_eq_tasklet *ctx = from_tasklet(ctx, t, task);
struct mlx5_core_cq *mcq;
struct mlx5_core_cq *temp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index c709e9a385f6..a28f95df2901 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -4,22 +4,19 @@
#include <devlink.h>
#include "mlx5_core.h"
+#include "fw_reset.h"
#include "fs_core.h"
#include "eswitch.h"
static int mlx5_devlink_flash_update(struct devlink *devlink,
- const char *file_name,
- const char *component,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
const struct firmware *fw;
int err;
- if (component)
- return -EOPNOTSUPP;
-
- err = request_firmware_direct(&fw, file_name, &dev->pdev->dev);
+ err = request_firmware_direct(&fw, params->file_name, &dev->pdev->dev);
if (err)
return err;
@@ -88,21 +85,96 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
return 0;
}
+static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u8 reset_level, reset_type, net_port_alive;
+ int err;
+
+ err = mlx5_fw_reset_query(dev, &reset_level, &reset_type);
+ if (err)
+ return err;
+ if (!(reset_level & MLX5_MFRL_REG_RESET_LEVEL3)) {
+ NL_SET_ERR_MSG_MOD(extack, "FW activate requires reboot");
+ return -EINVAL;
+ }
+
+ net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE);
+ err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive);
+ if (err)
+ goto out;
+
+ err = mlx5_fw_reset_wait_reset_done(dev);
+out:
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "FW activate command failed");
+ return err;
+}
+
+static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u8 reset_level;
+ int err;
+
+ err = mlx5_fw_reset_query(dev, &reset_level, NULL);
+ if (err)
+ return err;
+ if (!(reset_level & MLX5_MFRL_REG_RESET_LEVEL0)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "FW upgrade to the stored FW can't be done by FW live patching");
+ return -EINVAL;
+ }
+
+ return mlx5_fw_reset_set_live_patch(dev);
+}
+
static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- mlx5_unload_one(dev, false);
- return 0;
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ mlx5_unload_one(dev, false);
+ return 0;
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ return mlx5_devlink_trigger_fw_live_patch(devlink, extack);
+ return mlx5_devlink_reload_fw_activate(devlink, extack);
+ default:
+ /* Unsupported action should not get to this function */
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
}
-static int mlx5_devlink_reload_up(struct devlink *devlink,
+static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- return mlx5_load_one(dev, false);
+ *actions_performed = BIT(action);
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ return mlx5_load_one(dev, false);
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ break;
+ /* On fw_activate action, also driver is reloaded and reinit performed */
+ *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+ return mlx5_load_one(dev, false);
+ default:
+ /* Unsupported action should not get to this function */
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
static const struct devlink_ops mlx5_devlink_ops = {
@@ -118,6 +190,9 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif
.flash_update = mlx5_devlink_flash_update,
.info_get = mlx5_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = mlx5_devlink_reload_down,
.reload_up = mlx5_devlink_reload_up,
};
@@ -228,6 +303,24 @@ static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id
}
#endif
+static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ mlx5_fw_reset_enable_remote_dev_reset_set(dev, ctx->val.vbool);
+ return 0;
+}
+
+static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ ctx->val.vbool = mlx5_fw_reset_enable_remote_dev_reset_get(dev);
+ return 0;
+}
+
static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
@@ -243,6 +336,9 @@ static const struct devlink_param mlx5_devlink_params[] = {
NULL, NULL,
mlx5_devlink_large_group_num_validate),
#endif
+ DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlx5_devlink_enable_remote_dev_reset_get,
+ mlx5_devlink_enable_remote_dev_reset_set, NULL),
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index ad3594c4afcb..2eb022ad7fd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -124,7 +124,7 @@ static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer)
static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev;
dma_addr_t dma;
void *buff;
gfp_t gfp;
@@ -142,6 +142,7 @@ static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
}
tracer->buff.log_buf = buff;
+ ddev = mlx5_core_dma_dev(dev);
dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE);
if (dma_mapping_error(ddev, dma)) {
mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n",
@@ -162,11 +163,12 @@ free_pages:
static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev;
if (!tracer->buff.log_buf)
return;
+ ddev = mlx5_core_dma_dev(dev);
dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE);
free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
}
@@ -1064,6 +1066,58 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
kvfree(tracer);
}
+static int mlx5_fw_tracer_recreate_strings_db(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev;
+ int err;
+
+ cancel_work_sync(&tracer->read_fw_strings_work);
+ mlx5_fw_tracer_clean_ready_list(tracer);
+ mlx5_fw_tracer_clean_print_hash(tracer);
+ mlx5_fw_tracer_clean_saved_traces_array(tracer);
+ mlx5_fw_tracer_free_strings_db(tracer);
+
+ dev = tracer->dev;
+ err = mlx5_query_mtrc_caps(tracer);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err);
+ return err;
+ }
+
+ err = mlx5_fw_tracer_allocate_strings_db(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Allocate strings DB failed %d\n", err);
+ return err;
+ }
+ mlx5_fw_tracer_init_saved_traces_array(tracer);
+
+ return 0;
+}
+
+int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev;
+ int err;
+
+ if (IS_ERR_OR_NULL(tracer))
+ return -EINVAL;
+
+ dev = tracer->dev;
+ mlx5_fw_tracer_cleanup(tracer);
+ err = mlx5_fw_tracer_recreate_strings_db(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "Failed to recreate FW tracer strings DB\n");
+ return err;
+ }
+ err = mlx5_fw_tracer_init(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "Failed to re-initialize FW tracer\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)
{
struct mlx5_fw_tracer *tracer = mlx5_nb_cof(nb, struct mlx5_fw_tracer, nb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index 40601fba80ba..97252a85d65e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -191,5 +191,6 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer);
int mlx5_fw_tracer_trigger_core_dump_general(struct mlx5_core_dev *dev);
int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer,
struct devlink_fmsg *fmsg);
+int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
index 4924a5658853..ed4fb79b4db7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
@@ -78,7 +78,7 @@ static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump
struct page *page)
{
struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump;
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev = mlx5_core_dma_dev(dev);
u32 out_seq_num;
u32 in_seq_num;
dma_addr_t dma;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index a894ea98c95a..3dc9dd3f24dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -43,19 +43,13 @@ static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
int mlx5_ec_init(struct mlx5_core_dev *dev)
{
- int err = 0;
-
if (!mlx5_core_is_ecpf(dev))
return 0;
/* ECPF shall enable HCA for peer PF in the same way a PF
* does this for its VFs.
*/
- err = mlx5_peer_pf_init(dev);
- if (err)
- return err;
-
- return 0;
+ return mlx5_peer_pf_init(dev);
}
void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 356f5852955f..2f05b0f9de01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -226,6 +226,7 @@ enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_STRIDING_RQ,
MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
MLX5E_PFLAG_XDP_TX_MPWQE,
+ MLX5E_PFLAG_SKB_TX_MPWQE,
MLX5E_NUM_PFLAGS, /* Keep last */
};
@@ -270,6 +271,7 @@ enum {
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
+ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
};
struct mlx5e_cq {
@@ -309,6 +311,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_ENABLED,
+ MLX5E_SQ_STATE_MPWQE,
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
@@ -317,26 +320,40 @@ enum {
MLX5E_SQ_STATE_PENDING_XSK_TX,
};
+struct mlx5e_tx_mpwqe {
+ /* Current MPWQE session */
+ struct mlx5e_tx_wqe *wqe;
+ u32 bytes_count;
+ u8 ds_count;
+ u8 pkt_count;
+ u8 inline_on;
+};
+
struct mlx5e_txqsq {
/* data path */
/* dirtied @completion */
u16 cc;
+ u16 skb_fifo_cc;
u32 dma_fifo_cc;
struct dim dim; /* Adaptive Moderation */
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
+ u16 skb_fifo_pc;
u32 dma_fifo_pc;
+ struct mlx5e_tx_mpwqe mpwqe;
struct mlx5e_cq cq;
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
+ u16 skb_fifo_mask;
struct mlx5e_sq_stats *stats;
struct {
struct mlx5e_sq_dma *dma_fifo;
+ struct sk_buff **skb_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} db;
void __iomem *uar_map;
@@ -403,7 +420,7 @@ struct mlx5e_xdp_info {
};
};
-struct mlx5e_xdp_xmit_data {
+struct mlx5e_xmit_data {
dma_addr_t dma_addr;
void *data;
u32 len;
@@ -416,18 +433,10 @@ struct mlx5e_xdp_info_fifo {
u32 mask;
};
-struct mlx5e_xdp_mpwqe {
- /* Current MPWQE session */
- struct mlx5e_tx_wqe *wqe;
- u8 ds_count;
- u8 pkt_count;
- u8 inline_on;
-};
-
struct mlx5e_xdpsq;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
- struct mlx5e_xdp_xmit_data *,
+ struct mlx5e_xmit_data *,
struct mlx5e_xdp_info *,
int);
@@ -442,12 +451,12 @@ struct mlx5e_xdpsq {
u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
u16 pc;
struct mlx5_wqe_ctrl_seg *doorbell_cseg;
- struct mlx5e_xdp_mpwqe mpwqe;
+ struct mlx5e_tx_mpwqe mpwqe;
struct mlx5e_cq cq;
/* read only */
- struct xdp_umem *umem;
+ struct xsk_buff_pool *xsk_pool;
struct mlx5_wq_cyc wq;
struct mlx5e_xdpsq_stats *stats;
mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
@@ -611,7 +620,7 @@ struct mlx5e_rq {
struct page_pool *page_pool;
/* AF_XDP zero-copy */
- struct xdp_umem *umem;
+ struct xsk_buff_pool *xsk_pool;
struct work_struct recover_work;
@@ -735,12 +744,13 @@ struct mlx5e_hv_vhca_stats_agent {
#endif
struct mlx5e_xsk {
- /* UMEMs are stored separately from channels, because we don't want to
- * lose them when channels are recreated. The kernel also stores UMEMs,
- * but it doesn't distinguish between zero-copy and non-zero-copy UMEMs,
- * so rely on our mechanism.
+ /* XSK buffer pools are stored separately from channels,
+ * because we don't want to lose them when channels are
+ * recreated. The kernel also stores buffer pool, but it doesn't
+ * distinguish between zero-copy and non-zero-copy UMEMs, so
+ * rely on our mechanism.
*/
- struct xdp_umem **umems;
+ struct xsk_buff_pool **pools;
u16 refcnt;
bool ever_used;
};
@@ -899,7 +909,7 @@ struct mlx5e_xsk_param;
struct mlx5e_rq_param;
int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
- struct xdp_umem *umem, struct mlx5e_rq *rq);
+ struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq);
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
@@ -909,7 +919,7 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
void mlx5e_close_icosq(struct mlx5e_icosq *sq);
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct xdp_umem *umem,
+ struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect);
void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 6fdcd5e69476..dc744702aee4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -12,9 +12,12 @@ enum {
};
struct mlx5e_tc_table {
- /* protects flow table */
+ /* Protects the dynamic assignment of the t parameter
+ * which is the nic tc root table.
+ */
struct mutex t_lock;
struct mlx5_flow_table *t;
+ struct mlx5_fs_chains *chains;
struct rhashtable ht;
@@ -24,6 +27,8 @@ struct mlx5e_tc_table {
struct notifier_block netdevice_nb;
struct netdev_net_notifier netdevice_nn;
+
+ struct mlx5_tc_ct_priv *ct;
};
struct mlx5e_flow_table {
@@ -231,6 +236,7 @@ struct mlx5e_accel_fs_tcp;
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_namespace *egress_ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool;
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 3dc200bcfabd..69a05da0e3e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -242,8 +242,8 @@ static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
{
u32 data_size;
+ int err = 0;
u32 offset;
- int err;
for (offset = 0; offset < value_len; offset += data_size) {
data_size = value_len - offset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 79cc42d88eec..d29af7b9c695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -12,7 +12,7 @@
#include "neigh.h"
#include "en_rep.h"
#include "eswitch.h"
-#include "esw/chains.h"
+#include "lib/fs_chains.h"
#include "en/tc_ct.h"
#include "en/mapping.h"
#include "en/tc_tun.h"
@@ -107,12 +107,16 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
mlx5e_tc_encap_flows_del(priv, e, &flow_list);
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ struct net_device *route_dev;
+
ether_addr_copy(e->h_dest, ha);
ether_addr_copy(eth->h_dest, ha);
/* Update the encap source mac, in case that we delete
* the flows when encap source mac changed.
*/
- ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
+ route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex);
+ if (route_dev)
+ ether_addr_copy(eth->h_source, route_dev->dev_addr);
mlx5e_tc_encap_flows_add(priv, e, &flow_list);
}
@@ -191,7 +195,7 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER:
memcpy(&tmp, f, sizeof(*f));
- if (!mlx5_esw_chains_prios_supported(esw))
+ if (!mlx5_chains_prios_supported(esw_chains(esw)))
return -EOPNOTSUPP;
/* Re-use tc offload path by moving the ft flow to the
@@ -203,12 +207,12 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
*
* We only support chain 0 of FT offload.
*/
- if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
+ if (tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)))
return -EOPNOTSUPP;
if (tmp.common.chain_index != 0)
return -EOPNOTSUPP;
- tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
tmp.common.prio++;
err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
@@ -378,12 +382,12 @@ static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
*
* We only support chain 0 of FT offload.
*/
- if (!mlx5_esw_chains_prios_supported(esw) ||
- tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) ||
+ if (!mlx5_chains_prios_supported(esw_chains(esw)) ||
+ tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)) ||
tmp.common.chain_index)
return -EOPNOTSUPP;
- tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
tmp.common.prio++;
err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
@@ -612,7 +616,6 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct tc_skb_ext *tc_skb_ext;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
- int tunnel_moffset;
int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
@@ -626,7 +629,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
- err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
+ err = mlx5_get_chain_for_tag(esw_chains(esw), reg_c0, &chain);
if (err) {
netdev_dbg(priv->netdev,
"Couldn't find chain for chain tag: %d, err: %d\n",
@@ -647,13 +650,12 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
- if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb,
+ if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
zone_restore_id))
return false;
}
- tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
- tunnel_id = reg_c1 >> (8 * tunnel_moffset);
+ tunnel_id = reg_c1 >> REG_MAPPING_SHIFT(TUNNEL_TO_REG);
return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
#endif /* CONFIG_NET_TC_SKB_EXT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a8be40cbe325..e521254d886e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -14,7 +14,7 @@
#include <linux/workqueue.h>
#include <linux/xarray.h>
-#include "esw/chains.h"
+#include "lib/fs_chains.h"
#include "en/tc_ct.h"
#include "en/mod_hdr.h"
#include "en/mapping.h"
@@ -39,8 +39,9 @@
netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
struct mlx5_tc_ct_priv {
- struct mlx5_eswitch *esw;
+ struct mlx5_core_dev *dev;
const struct net_device *netdev;
+ struct mod_hdr_tbl *mod_hdr_tbl;
struct idr fte_ids;
struct xarray tuple_ids;
struct rhashtable zone_ht;
@@ -50,13 +51,16 @@ struct mlx5_tc_ct_priv {
struct mlx5_flow_table *ct_nat;
struct mlx5_flow_table *post_ct;
struct mutex control_lock; /* guards parallel adds/dels */
+ struct mutex shared_counter_lock;
struct mapping_ctx *zone_mapping;
struct mapping_ctx *labels_mapping;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_fs_chains *chains;
};
struct mlx5_ct_flow {
- struct mlx5_esw_flow_attr pre_ct_attr;
- struct mlx5_esw_flow_attr post_ct_attr;
+ struct mlx5_flow_attr *pre_ct_attr;
+ struct mlx5_flow_attr *post_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
struct mlx5_flow_handle *post_ct_rule;
struct mlx5_ct_ft *ft;
@@ -67,12 +71,12 @@ struct mlx5_ct_flow {
struct mlx5_ct_zone_rule {
struct mlx5_flow_handle *rule;
struct mlx5e_mod_hdr_handle *mh;
- struct mlx5_esw_flow_attr attr;
+ struct mlx5_flow_attr *attr;
bool nat;
};
struct mlx5_tc_ct_pre {
- struct mlx5_flow_table *fdb;
+ struct mlx5_flow_table *ft;
struct mlx5_flow_group *flow_grp;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle *flow_rule;
@@ -114,11 +118,16 @@ struct mlx5_ct_tuple {
u16 zone;
};
+struct mlx5_ct_shared_counter {
+ struct mlx5_fc *counter;
+ refcount_t refcount;
+};
+
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
- struct mlx5_fc *counter;
+ struct mlx5_ct_shared_counter *shared_counter;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
@@ -157,18 +166,6 @@ static const struct rhashtable_params tuples_nat_ht_params = {
.min_size = 16 * 1024,
};
-static struct mlx5_tc_ct_priv *
-mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
- return uplink_priv->ct_priv;
-}
-
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
@@ -397,20 +394,30 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}
static void
+mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+{
+ if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+ return;
+
+ mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
+ kfree(entry->shared_counter);
+}
+
+static void
mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry,
bool nat)
{
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
- struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
- struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_flow_attr *attr = zone_rule->attr;
ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
- mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
- mlx5e_mod_hdr_detach(ct_priv->esw->dev,
- &esw->offloads.mod_hdr, zone_rule->mh);
+ mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
+ mlx5e_mod_hdr_detach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl, zone_rule->mh);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+ kfree(attr);
}
static void
@@ -419,8 +426,6 @@ mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
{
mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
-
- mlx5_fc_destroy(ct_priv->esw->dev, entry->counter);
}
static struct flow_action_entry *
@@ -446,29 +451,40 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
u32 labels_id,
u8 zone_restore_id)
{
- struct mlx5_eswitch *esw = ct_priv->esw;
+ enum mlx5_flow_namespace_type ns = ct_priv->ns_type;
+ struct mlx5_core_dev *dev = ct_priv->dev;
int err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
CTSTATE_TO_REG, ct_state);
if (err)
return err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
MARK_TO_REG, mark);
if (err)
return err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
LABELS_TO_REG, labels_id);
if (err)
return err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
ZONE_RESTORE_TO_REG, zone_restore_id);
if (err)
return err;
+ /* Make another copy of zone id in reg_b for
+ * NIC rx flows since we don't copy reg_c1 to
+ * reg_b upon miss.
+ */
+ if (ns != MLX5_FLOW_NAMESPACE_FDB) {
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
+ NIC_ZONE_RESTORE_TO_REG, zone_restore_id);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -549,7 +565,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
struct flow_action *flow_action = &flow_rule->action;
- struct mlx5_core_dev *mdev = ct_priv->esw->dev;
+ struct mlx5_core_dev *mdev = ct_priv->dev;
struct flow_action_entry *act;
size_t action_size;
char *modact;
@@ -560,8 +576,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_MANGLE: {
- err = alloc_mod_hdr_actions(mdev,
- MLX5_FLOW_NAMESPACE_FDB,
+ err = alloc_mod_hdr_actions(mdev, ct_priv->ns_type,
mod_acts);
if (err)
return err;
@@ -590,7 +605,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
static int
mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule,
struct mlx5e_mod_hdr_handle **mh,
u8 zone_restore_id, bool nat)
@@ -626,9 +641,9 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (err)
goto err_mapping;
- *mh = mlx5e_mod_hdr_attach(ct_priv->esw->dev,
- &ct_priv->esw->offloads.mod_hdr,
- MLX5_FLOW_NAMESPACE_FDB,
+ *mh = mlx5e_mod_hdr_attach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl,
+ ct_priv->ns_type,
&mod_acts);
if (IS_ERR(*mh)) {
err = PTR_ERR(*mh);
@@ -652,9 +667,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
bool nat, u8 zone_restore_id)
{
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
- struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
- struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
struct mlx5_flow_spec *spec = NULL;
+ struct mlx5_flow_attr *attr;
int err;
zone_rule->nat = nat;
@@ -663,6 +678,12 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
if (!spec)
return -ENOMEM;
+ attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!attr) {
+ err = -ENOMEM;
+ goto err_attr;
+ }
+
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
&zone_rule->mh,
zone_restore_id, nat);
@@ -676,9 +697,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = 0;
attr->dest_ft = ct_priv->post_ct;
- attr->fdb = nat ? ct_priv->ct_nat : ct_priv->ct;
+ attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
- attr->counter = entry->counter;
+ attr->counter = entry->shared_counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@@ -686,39 +707,100 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
entry->tuple.zone & MLX5_CT_ZONE_MASK,
MLX5_CT_ZONE_MASK);
- zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+ zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
if (IS_ERR(zone_rule->rule)) {
err = PTR_ERR(zone_rule->rule);
ct_dbg("Failed to add ct entry rule, nat: %d", nat);
goto err_rule;
}
+ zone_rule->attr = attr;
+
kfree(spec);
ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
return 0;
err_rule:
- mlx5e_mod_hdr_detach(ct_priv->esw->dev,
- &esw->offloads.mod_hdr, zone_rule->mh);
+ mlx5e_mod_hdr_detach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl, zone_rule->mh);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
err_mod_hdr:
+ kfree(attr);
+err_attr:
kfree(spec);
return err;
}
+static struct mlx5_ct_shared_counter *
+mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_entry *entry)
+{
+ struct mlx5_ct_tuple rev_tuple = entry->tuple;
+ struct mlx5_ct_shared_counter *shared_counter;
+ struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_ct_entry *rev_entry;
+ __be16 tmp_port;
+ int ret;
+
+ /* get the reversed tuple */
+ tmp_port = rev_tuple.port.src;
+ rev_tuple.port.src = rev_tuple.port.dst;
+ rev_tuple.port.dst = tmp_port;
+
+ if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ __be32 tmp_addr = rev_tuple.ip.src_v4;
+
+ rev_tuple.ip.src_v4 = rev_tuple.ip.dst_v4;
+ rev_tuple.ip.dst_v4 = tmp_addr;
+ } else if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct in6_addr tmp_addr = rev_tuple.ip.src_v6;
+
+ rev_tuple.ip.src_v6 = rev_tuple.ip.dst_v6;
+ rev_tuple.ip.dst_v6 = tmp_addr;
+ } else {
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ /* Use the same counter as the reverse direction */
+ mutex_lock(&ct_priv->shared_counter_lock);
+ rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
+ tuples_ht_params);
+ if (rev_entry) {
+ if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
+ mutex_unlock(&ct_priv->shared_counter_lock);
+ return rev_entry->shared_counter;
+ }
+ }
+ mutex_unlock(&ct_priv->shared_counter_lock);
+
+ shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
+ if (!shared_counter)
+ return ERR_PTR(-ENOMEM);
+
+ shared_counter->counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(shared_counter->counter)) {
+ ct_dbg("Failed to create counter for ct entry");
+ ret = PTR_ERR(shared_counter->counter);
+ kfree(shared_counter);
+ return ERR_PTR(ret);
+ }
+
+ refcount_set(&shared_counter->refcount, 1);
+ return shared_counter;
+}
+
static int
mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule,
struct mlx5_ct_entry *entry,
u8 zone_restore_id)
{
- struct mlx5_eswitch *esw = ct_priv->esw;
int err;
- entry->counter = mlx5_fc_create(esw->dev, true);
- if (IS_ERR(entry->counter)) {
- err = PTR_ERR(entry->counter);
+ entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+ if (IS_ERR(entry->shared_counter)) {
+ err = PTR_ERR(entry->shared_counter);
ct_dbg("Failed to create counter for ct entry");
return err;
}
@@ -738,7 +820,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
- mlx5_fc_destroy(esw->dev, entry->counter);
+ mlx5_tc_ct_shared_counter_put(ct_priv, entry);
return err;
}
@@ -828,12 +910,16 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+ mutex_lock(&ct_priv->shared_counter_lock);
if (entry->tuple_node.next)
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params);
+ mutex_unlock(&ct_priv->shared_counter_lock);
+ mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+
}
static int
@@ -870,7 +956,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
if (!entry)
return -ENOENT;
- mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse);
+ mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
@@ -943,9 +1029,7 @@ out:
return false;
}
-int
-mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec)
+int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
{
u32 ctstate = 0, ctstate_mask = 0;
@@ -961,24 +1045,21 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
return 0;
}
-void mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr)
+void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
-
- if (!ct_priv || !ct_attr->ct_labels_id)
+ if (!priv || !ct_attr->ct_labels_id)
return;
- mapping_remove(ct_priv->labels_mapping, ct_attr->ct_labels_id);
+ mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
}
int
-mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
+mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector_key_ct *mask, *key;
bool trk, est, untrk, unest, new;
@@ -991,7 +1072,7 @@ mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
return 0;
- if (!ct_priv) {
+ if (!priv) {
NL_SET_ERR_MSG_MOD(extack,
"offload of ct matching isn't available");
return -EOPNOTSUPP;
@@ -1047,7 +1128,7 @@ mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
- if (mapping_add(ct_priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
+ if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
return -EOPNOTSUPP;
mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
MLX5_CT_LABELS_MASK);
@@ -1057,14 +1138,12 @@ mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
}
int
-mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
-
- if (!ct_priv) {
+ if (!priv) {
NL_SET_ERR_MSG_MOD(extack,
"offload of ct action isn't available");
return -EOPNOTSUPP;
@@ -1083,8 +1162,8 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
{
struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
- struct mlx5_core_dev *dev = ct_priv->esw->dev;
- struct mlx5_flow_table *fdb = pre_ct->fdb;
+ struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_flow_table *ft = pre_ct->ft;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_modify_hdr *mod_hdr;
@@ -1099,14 +1178,14 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
return -ENOMEM;
zone = ct_ft->zone & MLX5_CT_ZONE_MASK;
- err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ZONE_TO_REG, zone);
+ err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ct_priv->ns_type,
+ ZONE_TO_REG, zone);
if (err) {
ct_dbg("Failed to set zone register mapping");
goto err_mapping;
}
- mod_hdr = mlx5_modify_header_alloc(dev,
- MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr = mlx5_modify_header_alloc(dev, ct_priv->ns_type,
pre_mod_acts.num_actions,
pre_mod_acts.actions);
@@ -1132,7 +1211,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate);
dest.ft = ct_priv->post_ct;
- rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add pre ct flow rule zone %d", zone);
@@ -1143,7 +1222,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
/* add miss rule */
memset(spec, 0, sizeof(*spec));
dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
- rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add pre ct miss rule zone %d", zone);
@@ -1170,7 +1249,7 @@ tc_ct_pre_ct_del_rules(struct mlx5_ct_ft *ct_ft,
struct mlx5_tc_ct_pre *pre_ct)
{
struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
- struct mlx5_core_dev *dev = ct_priv->esw->dev;
+ struct mlx5_core_dev *dev = ct_priv->dev;
mlx5_del_flow_rules(pre_ct->flow_rule);
mlx5_del_flow_rules(pre_ct->miss_rule);
@@ -1184,7 +1263,7 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
- struct mlx5_core_dev *dev = ct_priv->esw->dev;
+ struct mlx5_core_dev *dev = ct_priv->dev;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
@@ -1194,10 +1273,10 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
void *misc;
int err;
- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ ns = mlx5_get_flow_namespace(dev, ct_priv->ns_type);
if (!ns) {
err = -EOPNOTSUPP;
- ct_dbg("Failed to get FDB flow namespace");
+ ct_dbg("Failed to get flow namespace");
return err;
}
@@ -1206,7 +1285,8 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
return -ENOMEM;
ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
- ft_attr.prio = FDB_TC_OFFLOAD;
+ ft_attr.prio = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB ?
+ FDB_TC_OFFLOAD : MLX5E_TC_PRIO;
ft_attr.max_fte = 2;
ft_attr.level = 1;
ft = mlx5_create_flow_table(ns, &ft_attr);
@@ -1215,7 +1295,7 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
ct_dbg("Failed to create pre ct table");
goto out_free;
}
- pre_ct->fdb = ft;
+ pre_ct->ft = ft;
/* create flow group */
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
@@ -1279,7 +1359,7 @@ mlx5_tc_ct_free_pre_ct(struct mlx5_ct_ft *ct_ft,
tc_ct_pre_ct_del_rules(ct_ft, pre_ct);
mlx5_destroy_flow_group(pre_ct->miss_grp);
mlx5_destroy_flow_group(pre_ct->flow_grp);
- mlx5_destroy_flow_table(pre_ct->fdb);
+ mlx5_destroy_flow_table(pre_ct->ft);
}
static int
@@ -1398,7 +1478,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model:
*
* +---------------------+
- * + fdb prio (tc chain) +
+ * + ft prio (tc chain) +
* + original match +
* +---------------------+
* | set chain miss mapping
@@ -1428,17 +1508,17 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* +--------------+
*/
static struct mlx5_flow_handle *
-__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *orig_spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
struct mlx5_flow_spec *post_ct_spec = NULL;
- struct mlx5_eswitch *esw = ct_priv->esw;
- struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_flow_handle *rule;
struct mlx5_ct_flow *ct_flow;
@@ -1473,10 +1553,22 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
}
ct_flow->fte_id = fte_id;
- /* Base esw attributes of both rules on original rule attribute */
- pre_ct_attr = &ct_flow->pre_ct_attr;
- memcpy(pre_ct_attr, attr, sizeof(*attr));
- memcpy(&ct_flow->post_ct_attr, attr, sizeof(*attr));
+ /* Base flow attributes of both rules on original rule attribute */
+ ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!ct_flow->pre_ct_attr) {
+ err = -ENOMEM;
+ goto err_alloc_pre;
+ }
+
+ ct_flow->post_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!ct_flow->post_ct_attr) {
+ err = -ENOMEM;
+ goto err_alloc_post;
+ }
+
+ pre_ct_attr = ct_flow->pre_ct_attr;
+ memcpy(pre_ct_attr, attr, attr_sz);
+ memcpy(ct_flow->post_ct_attr, attr, attr_sz);
/* Modify the original rule's action to fwd and modify, leave decap */
pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
@@ -1487,22 +1579,22 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
* don't go though all prios of this chain as normal tc rules
* miss.
*/
- err = mlx5_esw_chains_get_chain_mapping(esw, attr->chain,
- &chain_mapping);
+ err = mlx5_chains_get_chain_mapping(ct_priv->chains, attr->chain,
+ &chain_mapping);
if (err) {
ct_dbg("Failed to get chain register mapping for chain");
goto err_get_chain;
}
ct_flow->chain_mapping = chain_mapping;
- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
CHAIN_TO_REG, chain_mapping);
if (err) {
ct_dbg("Failed to set chain register mapping");
goto err_mapping;
}
- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
FTEID_TO_REG, fte_id);
if (err) {
ct_dbg("Failed to set fte_id register mapping");
@@ -1516,7 +1608,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
attr->chain == 0) {
u32 tun_id = mlx5e_tc_get_flow_tun_id(flow);
- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts,
+ ct_priv->ns_type,
TUNNEL_TO_REG,
tun_id);
if (err) {
@@ -1525,8 +1618,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
}
}
- mod_hdr = mlx5_modify_header_alloc(esw->dev,
- MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
pre_mod_acts.num_actions,
pre_mod_acts.actions);
if (IS_ERR(mod_hdr)) {
@@ -1542,16 +1634,16 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
mlx5e_tc_match_to_reg_match(post_ct_spec, FTEID_TO_REG,
fte_id, MLX5_FTE_ID_MASK);
- /* Put post_ct rule on post_ct fdb */
- ct_flow->post_ct_attr.chain = 0;
- ct_flow->post_ct_attr.prio = 0;
- ct_flow->post_ct_attr.fdb = ct_priv->post_ct;
+ /* Put post_ct rule on post_ct flow table */
+ ct_flow->post_ct_attr->chain = 0;
+ ct_flow->post_ct_attr->prio = 0;
+ ct_flow->post_ct_attr->ft = ct_priv->post_ct;
- ct_flow->post_ct_attr.inner_match_level = MLX5_MATCH_NONE;
- ct_flow->post_ct_attr.outer_match_level = MLX5_MATCH_NONE;
- ct_flow->post_ct_attr.action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
- rule = mlx5_eswitch_add_offloaded_rule(esw, post_ct_spec,
- &ct_flow->post_ct_attr);
+ ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
+ rule = mlx5_tc_rule_insert(priv, post_ct_spec,
+ ct_flow->post_ct_attr);
ct_flow->post_ct_rule = rule;
if (IS_ERR(ct_flow->post_ct_rule)) {
err = PTR_ERR(ct_flow->post_ct_rule);
@@ -1561,10 +1653,9 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
/* Change original rule point to ct table */
pre_ct_attr->dest_chain = 0;
- pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.fdb : ft->pre_ct.fdb;
- ct_flow->pre_ct_rule = mlx5_eswitch_add_offloaded_rule(esw,
- orig_spec,
- pre_ct_attr);
+ pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
+ ct_flow->pre_ct_rule = mlx5_tc_rule_insert(priv, orig_spec,
+ pre_ct_attr);
if (IS_ERR(ct_flow->pre_ct_rule)) {
err = PTR_ERR(ct_flow->pre_ct_rule);
ct_dbg("Failed to add pre ct rule");
@@ -1578,14 +1669,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
return rule;
err_insert_orig:
- mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule,
- &ct_flow->post_ct_attr);
+ mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule,
+ ct_flow->post_ct_attr);
err_insert_post_ct:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
dealloc_mod_hdr_actions(&pre_mod_acts);
- mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+ mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
+ kfree(ct_flow->post_ct_attr);
+err_alloc_post:
+ kfree(ct_flow->pre_ct_attr);
+err_alloc_pre:
idr_remove(&ct_priv->fte_ids, fte_id);
err_idr:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
@@ -1597,14 +1692,14 @@ err_ft:
}
static struct mlx5_flow_handle *
-__mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
+__mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_spec *orig_spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
- struct mlx5_eswitch *esw = ct_priv->esw;
- struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
+ u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
+ struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_flow_handle *rule;
struct mlx5_ct_flow *ct_flow;
@@ -1615,8 +1710,13 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
return ERR_PTR(-ENOMEM);
/* Base esw attributes on original rule attribute */
- pre_ct_attr = &ct_flow->pre_ct_attr;
- memcpy(pre_ct_attr, attr, sizeof(*attr));
+ pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!pre_ct_attr) {
+ err = -ENOMEM;
+ goto err_attr;
+ }
+
+ memcpy(pre_ct_attr, attr, attr_sz);
err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0);
if (err) {
@@ -1624,8 +1724,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
goto err_set_registers;
}
- mod_hdr = mlx5_modify_header_alloc(esw->dev,
- MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
mod_acts->num_actions,
mod_acts->actions);
if (IS_ERR(mod_hdr)) {
@@ -1638,7 +1737,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
pre_ct_attr->modify_hdr = mod_hdr;
pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- rule = mlx5_eswitch_add_offloaded_rule(esw, orig_spec, pre_ct_attr);
+ rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add ct clear rule");
@@ -1646,6 +1745,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
}
attr->ct_attr.ct_flow = ct_flow;
+ ct_flow->pre_ct_attr = pre_ct_attr;
ct_flow->pre_ct_rule = rule;
return rule;
@@ -1654,61 +1754,67 @@ err_insert:
err_set_registers:
netdev_warn(priv->netdev,
"Failed to offload ct clear flow, err %d\n", err);
+ kfree(pre_ct_attr);
+err_attr:
+ kfree(ct_flow);
+
return ERR_PTR(err);
}
struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct mlx5_flow_handle *rule;
- if (!ct_priv)
+ if (!priv)
return ERR_PTR(-EOPNOTSUPP);
- mutex_lock(&ct_priv->control_lock);
+ mutex_lock(&priv->control_lock);
if (clear_action)
rule = __mlx5_tc_ct_flow_offload_clear(priv, spec, attr, mod_hdr_acts);
else
rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr);
- mutex_unlock(&ct_priv->control_lock);
+ mutex_unlock(&priv->control_lock);
return rule;
}
static void
__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5e_tc_flow *flow,
struct mlx5_ct_flow *ct_flow)
{
- struct mlx5_esw_flow_attr *pre_ct_attr = &ct_flow->pre_ct_attr;
- struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_flow_attr *pre_ct_attr = ct_flow->pre_ct_attr;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- mlx5_eswitch_del_offloaded_rule(esw, ct_flow->pre_ct_rule,
- pre_ct_attr);
- mlx5_modify_header_dealloc(esw->dev, pre_ct_attr->modify_hdr);
+ mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule,
+ pre_ct_attr);
+ mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
if (ct_flow->post_ct_rule) {
- mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule,
- &ct_flow->post_ct_attr);
- mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+ mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule,
+ ct_flow->post_ct_attr);
+ mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
idr_remove(&ct_priv->fte_ids, ct_flow->fte_id);
mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
}
+ kfree(ct_flow->pre_ct_attr);
+ kfree(ct_flow->post_ct_attr);
kfree(ct_flow);
}
void
-mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
/* We are called on error to clean up stuff from parsing
@@ -1717,22 +1823,15 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow,
if (!ct_flow)
return;
- mutex_lock(&ct_priv->control_lock);
- __mlx5_tc_ct_delete_flow(ct_priv, ct_flow);
- mutex_unlock(&ct_priv->control_lock);
+ mutex_lock(&priv->control_lock);
+ __mlx5_tc_ct_delete_flow(priv, flow, ct_flow);
+ mutex_unlock(&priv->control_lock);
}
static int
-mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw,
- const char **err_msg)
+mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
+ const char **err_msg)
{
-#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- /* cannot restore chain ID on HW miss */
-
- *err_msg = "tc skb extension missing";
- return -EOPNOTSUPP;
-#endif
-
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) {
*err_msg = "firmware level support is missing";
return -EOPNOTSUPP;
@@ -1766,44 +1865,61 @@ mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw,
return 0;
}
-static void
-mlx5_tc_ct_init_err(struct mlx5e_rep_priv *rpriv, const char *msg, int err)
+static int
+mlx5_tc_ct_init_check_nic_support(struct mlx5e_priv *priv,
+ const char **err_msg)
+{
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ *err_msg = "firmware level support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ const char **err_msg)
{
- if (msg)
- netdev_warn(rpriv->netdev,
- "tc ct offload not supported, %s, err: %d\n",
- msg, err);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ /* cannot restore chain ID on HW miss */
+
+ *err_msg = "tc skb extension missing";
+ return -EOPNOTSUPP;
+#endif
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ return mlx5_tc_ct_init_check_esw_support(esw, err_msg);
else
- netdev_warn(rpriv->netdev,
- "tc ct offload not supported, err: %d\n",
- err);
+ return mlx5_tc_ct_init_check_nic_support(priv, err_msg);
}
-int
-mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+#define INIT_ERR_PREFIX "tc ct offload init failed"
+
+struct mlx5_tc_ct_priv *
+mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ struct mod_hdr_tbl *mod_hdr,
+ enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_tc_ct_priv *ct_priv;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5e_priv *priv;
+ struct mlx5_core_dev *dev;
const char *msg;
int err;
- rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
- priv = netdev_priv(rpriv->netdev);
- esw = priv->mdev->priv.eswitch;
-
- err = mlx5_tc_ct_init_check_support(esw, &msg);
+ dev = priv->mdev;
+ err = mlx5_tc_ct_init_check_support(priv, ns_type, &msg);
if (err) {
- mlx5_tc_ct_init_err(rpriv, msg, err);
+ mlx5_core_warn(dev,
+ "tc ct offload not supported, %s\n",
+ msg);
goto err_support;
}
ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL);
- if (!ct_priv) {
- mlx5_tc_ct_init_err(rpriv, NULL, -ENOMEM);
+ if (!ct_priv)
goto err_alloc;
- }
ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true);
if (IS_ERR(ct_priv->zone_mapping)) {
@@ -1817,46 +1933,51 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_mapping_labels;
}
- ct_priv->esw = esw;
- ct_priv->netdev = rpriv->netdev;
- ct_priv->ct = mlx5_esw_chains_create_global_table(esw);
+ ct_priv->ns_type = ns_type;
+ ct_priv->chains = chains;
+ ct_priv->netdev = priv->netdev;
+ ct_priv->dev = priv->mdev;
+ ct_priv->mod_hdr_tbl = mod_hdr;
+ ct_priv->ct = mlx5_chains_create_global_table(chains);
if (IS_ERR(ct_priv->ct)) {
err = PTR_ERR(ct_priv->ct);
- mlx5_tc_ct_init_err(rpriv, "failed to create ct table", err);
+ mlx5_core_warn(dev,
+ "%s, failed to create ct table err: %d\n",
+ INIT_ERR_PREFIX, err);
goto err_ct_tbl;
}
- ct_priv->ct_nat = mlx5_esw_chains_create_global_table(esw);
+ ct_priv->ct_nat = mlx5_chains_create_global_table(chains);
if (IS_ERR(ct_priv->ct_nat)) {
err = PTR_ERR(ct_priv->ct_nat);
- mlx5_tc_ct_init_err(rpriv, "failed to create ct nat table",
- err);
+ mlx5_core_warn(dev,
+ "%s, failed to create ct nat table err: %d\n",
+ INIT_ERR_PREFIX, err);
goto err_ct_nat_tbl;
}
- ct_priv->post_ct = mlx5_esw_chains_create_global_table(esw);
+ ct_priv->post_ct = mlx5_chains_create_global_table(chains);
if (IS_ERR(ct_priv->post_ct)) {
err = PTR_ERR(ct_priv->post_ct);
- mlx5_tc_ct_init_err(rpriv, "failed to create post ct table",
- err);
+ mlx5_core_warn(dev,
+ "%s, failed to create post ct table err: %d\n",
+ INIT_ERR_PREFIX, err);
goto err_post_ct_tbl;
}
idr_init(&ct_priv->fte_ids);
mutex_init(&ct_priv->control_lock);
+ mutex_init(&ct_priv->shared_counter_lock);
rhashtable_init(&ct_priv->zone_ht, &zone_params);
rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
- /* Done, set ct_priv to know it initializted */
- uplink_priv->ct_priv = ct_priv;
-
- return 0;
+ return ct_priv;
err_post_ct_tbl:
- mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct_nat);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
err_ct_nat_tbl:
- mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct);
err_ct_tbl:
mapping_destroy(ct_priv->labels_mapping);
err_mapping_labels:
@@ -1866,20 +1987,22 @@ err_mapping_zone:
err_alloc:
err_support:
- return 0;
+ return NULL;
}
void
-mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
{
- struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+ struct mlx5_fs_chains *chains;
if (!ct_priv)
return;
- mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct);
- mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat);
- mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
+ chains = ct_priv->chains;
+
+ mlx5_chains_destroy_global_table(chains, ct_priv->post_ct);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
mapping_destroy(ct_priv->labels_mapping);
@@ -1887,17 +2010,15 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
rhashtable_destroy(&ct_priv->zone_ht);
mutex_destroy(&ct_priv->control_lock);
+ mutex_destroy(&ct_priv->shared_counter_lock);
idr_destroy(&ct_priv->fte_ids);
kfree(ct_priv);
-
- uplink_priv->ct_priv = NULL;
}
bool
-mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id)
{
- struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
struct mlx5_ct_tuple tuple = {};
struct mlx5_ct_entry *entry;
u16 zone;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 708c216325d3..6503b614337c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -10,12 +10,14 @@
#include "en.h"
-struct mlx5_esw_flow_attr;
+struct mlx5_flow_attr;
struct mlx5e_tc_mod_hdr_acts;
struct mlx5_rep_uplink_priv;
struct mlx5e_tc_flow;
struct mlx5e_priv;
+struct mlx5_fs_chains;
+struct mlx5_tc_ct_priv;
struct mlx5_ct_flow;
struct nf_flowtable;
@@ -76,68 +78,78 @@ struct mlx5_ct_attr {
misc_parameters_2.metadata_reg_c_1) + 3,\
}
+#define nic_zone_restore_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,\
+ .moffset = 2,\
+ .mlen = 1,\
+}
+
#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
+#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
+#define REG_MAPPING_SHIFT(reg) (REG_MAPPING_MOFFSET(reg) * 8)
#define ZONE_RESTORE_BITS (REG_MAPPING_MLEN(ZONE_RESTORE_TO_REG) * 8)
#define ZONE_RESTORE_MAX GENMASK(ZONE_RESTORE_BITS - 1, 0)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
-int
-mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv);
+struct mlx5_tc_ct_priv *
+mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ struct mod_hdr_tbl *mod_hdr,
+ enum mlx5_flow_namespace_type ns_type);
void
-mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv);
+mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv);
void
-mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr);
+mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr);
int
-mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
+mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack);
+int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec);
int
-mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec);
-int
-mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack);
struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
void
-mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
bool
-mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
#else /* CONFIG_MLX5_TC_CT */
-static inline int
-mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+static inline struct mlx5_tc_ct_priv *
+mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ struct mod_hdr_tbl *mod_hdr,
+ enum mlx5_flow_namespace_type ns_type)
{
- return 0;
+ return NULL;
}
static inline void
-mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
{
}
static inline void
-mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr) {}
+mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr) {}
static inline int
-mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
+mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
@@ -149,47 +161,44 @@ mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
return 0;
NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
- netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
return -EOPNOTSUPP;
}
static inline int
-mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec)
+mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
{
return 0;
}
static inline int
-mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
- netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
return -EOPNOTSUPP;
}
static inline struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void
-mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
}
static inline bool
-mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id)
{
if (!zone_restore_id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 7cce85faa16f..90930e54b6f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -77,13 +77,13 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
return 0;
}
-static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi4 *fl4,
- struct neighbour **out_n,
- u8 *out_ttl)
+static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi4 *fl4,
+ struct neighbour **out_n,
+ u8 *out_ttl)
{
struct neighbour *n;
struct rtable *rt;
@@ -117,18 +117,28 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
ip_rt_put(rt);
return ret;
}
+ dev_hold(*route_dev);
if (!(*out_ttl))
*out_ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
ip_rt_put(rt);
- if (!n)
+ if (!n) {
+ dev_put(*route_dev);
return -ENOMEM;
+ }
*out_n = n;
return 0;
}
+static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev,
+ struct neighbour *n)
+{
+ neigh_release(n);
+ dev_put(route_dev);
+}
+
static const char *mlx5e_netdev_kind(struct net_device *dev)
{
if (dev->rtnl_link_ops)
@@ -193,8 +203,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
fl4.saddr = tun_key->u.ipv4.src;
ttl = tun_key->ttl;
- err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
- &fl4, &n, &ttl);
+ err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev,
+ &fl4, &n, &ttl);
if (err)
return err;
@@ -223,7 +233,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev;
- e->route_dev = route_dev;
+ e->route_dev_ifindex = route_dev->ifindex;
/* It's important to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
@@ -278,7 +288,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
+ mlx5e_route_lookup_ipv4_put(route_dev, n);
return err;
destroy_neigh_entry:
@@ -286,18 +296,18 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- neigh_release(n);
+ mlx5e_route_lookup_ipv4_put(route_dev, n);
return err;
}
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi6 *fl6,
- struct neighbour **out_n,
- u8 *out_ttl)
+static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi6 *fl6,
+ struct neighbour **out_n,
+ u8 *out_ttl)
{
struct dst_entry *dst;
struct neighbour *n;
@@ -318,15 +328,25 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
return ret;
}
+ dev_hold(*route_dev);
n = dst_neigh_lookup(dst, &fl6->daddr);
dst_release(dst);
- if (!n)
+ if (!n) {
+ dev_put(*route_dev);
return -ENOMEM;
+ }
*out_n = n;
return 0;
}
+static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev,
+ struct neighbour *n)
+{
+ neigh_release(n);
+ dev_put(route_dev);
+}
+
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e)
@@ -348,8 +368,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src;
- err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
- &fl6, &n, &ttl);
+ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev,
+ &fl6, &n, &ttl);
if (err)
return err;
@@ -378,7 +398,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev;
- e->route_dev = route_dev;
+ e->route_dev_ifindex = route_dev->ifindex;
/* It's importent to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
@@ -433,7 +453,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
+ mlx5e_route_lookup_ipv6_put(route_dev, n);
return err;
destroy_neigh_entry:
@@ -441,7 +461,7 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- neigh_release(n);
+ mlx5e_route_lookup_ipv6_put(route_dev, n);
return err;
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 24336c60123a..07ee1d236ab3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -7,6 +7,21 @@
#include "en.h"
#include <linux/indirect_call_wrapper.h>
+#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
+
+/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
+ * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
+ * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
+ * full-session WQE be cache-aligned.
+ */
+#if L1_CACHE_BYTES < 128
+#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
+#else
+#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
+#endif
+
+#define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
+
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
enum mlx5e_icosq_wqe_type {
@@ -46,8 +61,6 @@ void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
-void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
@@ -110,6 +123,7 @@ struct mlx5e_tx_wqe_info {
u32 num_bytes;
u8 num_wqebbs;
u8 num_dma;
+ u8 num_fifo_pkts;
#ifdef CONFIG_MLX5_EN_TLS
struct page *resync_dump_frag_page;
#endif
@@ -194,23 +208,6 @@ static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
}
static inline void
-mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
- u16 pi, u16 nnops)
-{
- struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
-
- edge_wi = wi + nnops;
-
- /* fill sq frag edge with nops to avoid wqe wrapping two pages */
- for (; wi < edge_wi; wi++) {
- memset(wi, 0, sizeof(*wi));
- wi->num_wqebbs = 1;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- }
- sq->stats->nop += nnops;
-}
-
-static inline void
mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
struct mlx5_wqe_ctrl_seg *ctrl)
{
@@ -228,29 +225,6 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
mlx5_write64((__be32 *)ctrl, uar_map);
}
-static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
-{
- return cseg && !!cseg->tis_tir_num;
-}
-
-static inline u8
-mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
- struct sk_buff *skb)
-{
- u8 mode;
-
- if (mlx5e_transport_inline_tx_wqe(cseg))
- return MLX5_INLINE_MODE_TCP_UDP;
-
- mode = sq->min_inline_mode;
-
- if (skb_vlan_tag_present(skb) &&
- test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
- mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
-
- return mode;
-}
-
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
{
struct mlx5_core_cq *mcq;
@@ -276,6 +250,23 @@ mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
dma->type = map_type;
}
+static inline struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_txqsq *sq, u16 i)
+{
+ return &sq->db.skb_fifo[i & sq->skb_fifo_mask];
+}
+
+static inline void mlx5e_skb_fifo_push(struct mlx5e_txqsq *sq, struct sk_buff *skb)
+{
+ struct sk_buff **skb_item = mlx5e_skb_fifo_get(sq, sq->skb_fifo_pc++);
+
+ *skb_item = skb;
+}
+
+static inline struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_txqsq *sq)
+{
+ return *mlx5e_skb_fifo_get(sq, sq->skb_fifo_cc++);
+}
+
static inline void
mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
{
@@ -291,6 +282,14 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
}
}
+void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
+void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
+
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
+{
+ return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS;
+}
+
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
{
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index b28df21981a1..ae90d533a350 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -59,7 +59,7 @@ static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
struct mlx5e_dma_info *di, struct xdp_buff *xdp)
{
- struct mlx5e_xdp_xmit_data xdptxd;
+ struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
@@ -194,18 +194,22 @@ static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size)
static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
+ struct mlx5e_tx_wqe *wqe;
u16 pi;
- pi = mlx5e_xdpsq_get_next_pi(sq, MLX5_SEND_WQE_MAX_WQEBBS);
- session->wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ net_prefetchw(wqe->data);
- prefetchw(session->wqe->data);
- session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
- session->pkt_count = 0;
-
- mlx5e_xdp_update_inline_state(sq);
+ *session = (struct mlx5e_tx_mpwqe) {
+ .wqe = wqe,
+ .bytes_count = 0,
+ .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .pkt_count = 0,
+ .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
+ };
stats->mpwqe++;
}
@@ -213,7 +217,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl;
u16 ds_count = session->ds_count;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
@@ -258,10 +262,10 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
}
INDIRECT_CALLABLE_SCOPE bool
-mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
+mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi, int check_result)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
if (unlikely(xdptxd->len > sq->hw_mtu)) {
@@ -284,8 +288,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *x
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(mlx5e_xdp_no_room_for_inline_pkt(session) ||
- session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS))
+ if (unlikely(mlx5e_xdp_mpqwe_is_full(session)))
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
@@ -306,7 +309,7 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
}
INDIRECT_CALLABLE_SCOPE bool
-mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
+mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi, int check_result)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -322,7 +325,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdpsq_stats *stats = sq->stats;
- prefetchw(wqe);
+ net_prefetchw(wqe);
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
stats->err++;
@@ -445,7 +448,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
if (xsk_frames)
- xsk_umem_complete_tx(sq->umem, xsk_frames);
+ xsk_tx_completed(sq->xsk_pool, xsk_frames);
sq->stats->cqes += i;
@@ -475,7 +478,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
}
if (xsk_frames)
- xsk_umem_complete_tx(sq->umem, xsk_frames);
+ xsk_tx_completed(sq->xsk_pool, xsk_frames);
}
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
@@ -503,7 +506,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
- struct mlx5e_xdp_xmit_data xdptxd;
+ struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
bool ret;
@@ -563,4 +566,3 @@ void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw)
sq->xmit_xdp_frame = is_mpw ?
mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame;
}
-
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index e806c13d491f..d487e5e37162 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -38,27 +38,12 @@
#include "en/txrx.h"
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_EMPTY_DS_COUNT \
- (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
-#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
-
-#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
-#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \
- DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS)
-
-/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
- * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
- * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
- * full-session WQE be cache-aligned.
- */
-#if L1_CACHE_BYTES < 128
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#else
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
-#endif
+#define MLX5E_XDP_TX_DS_COUNT (MLX5E_TX_WQE_EMPTY_DS_COUNT + 1 /* SG DS */)
-#define MLX5E_XDP_MPW_MAX_NUM_DS \
- (MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
+#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
+#define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
+ (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
+ sizeof(struct mlx5_wqe_inline_seg))
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
@@ -73,11 +58,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
- struct mlx5e_xdp_xmit_data *xdptxd,
+ struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
- struct mlx5e_xdp_xmit_data *xdptxd,
+ struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
@@ -122,30 +107,28 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
/* Enable inline WQEs to shift some load from a congested HCA (HW) to
* a less congested cpu (SW).
*/
-static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
+static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
{
u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
#define MLX5E_XDP_INLINE_WATERMARK_LOW 10
#define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
- if (session->inline_on) {
- if (outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
- session->inline_on = 0;
- return;
- }
+ if (cur && outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
+ return false;
+
+ if (!cur && outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
+ return true;
- /* inline is false */
- if (outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
- session->inline_on = 1;
+ return cur;
}
-static inline bool
-mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
+static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session)
{
- return session->inline_on &&
- session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
+ if (session->inline_on)
+ return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
+ MLX5E_TX_MPW_MAX_NUM_DS;
+ return mlx5e_tx_mpwqe_is_full(session);
}
struct mlx5e_xdp_wqe_info {
@@ -155,15 +138,16 @@ struct mlx5e_xdp_wqe_info {
static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
- struct mlx5e_xdp_xmit_data *xdptxd,
+ struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdpsq_stats *stats)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5_wqe_data_seg *dseg =
(struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
u32 dma_len = xdptxd->len;
session->pkt_count++;
+ session->bytes_count += dma_len;
if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
struct mlx5_wqe_inline_seg *inline_dseg =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 331ca2b0f8a4..71e8d66fa150 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -1,31 +1,31 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
+/* Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved. */
#include <net/xdp_sock_drv.h>
-#include "umem.h"
+#include "pool.h"
#include "setup.h"
#include "en/params.h"
-static int mlx5e_xsk_map_umem(struct mlx5e_priv *priv,
- struct xdp_umem *umem)
+static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
+ struct xsk_buff_pool *pool)
{
- struct device *dev = priv->mdev->device;
+ struct device *dev = mlx5_core_dma_dev(priv->mdev);
- return xsk_buff_dma_map(umem, dev, 0);
+ return xsk_pool_dma_map(pool, dev, 0);
}
-static void mlx5e_xsk_unmap_umem(struct mlx5e_priv *priv,
- struct xdp_umem *umem)
+static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
+ struct xsk_buff_pool *pool)
{
- return xsk_buff_dma_unmap(umem, 0);
+ return xsk_pool_dma_unmap(pool, 0);
}
-static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk)
+static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
{
- if (!xsk->umems) {
- xsk->umems = kcalloc(MLX5E_MAX_NUM_CHANNELS,
- sizeof(*xsk->umems), GFP_KERNEL);
- if (unlikely(!xsk->umems))
+ if (!xsk->pools) {
+ xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS,
+ sizeof(*xsk->pools), GFP_KERNEL);
+ if (unlikely(!xsk->pools))
return -ENOMEM;
}
@@ -35,68 +35,68 @@ static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk)
return 0;
}
-static void mlx5e_xsk_put_umems(struct mlx5e_xsk *xsk)
+static void mlx5e_xsk_put_pools(struct mlx5e_xsk *xsk)
{
if (!--xsk->refcnt) {
- kfree(xsk->umems);
- xsk->umems = NULL;
+ kfree(xsk->pools);
+ xsk->pools = NULL;
}
}
-static int mlx5e_xsk_add_umem(struct mlx5e_xsk *xsk, struct xdp_umem *umem, u16 ix)
+static int mlx5e_xsk_add_pool(struct mlx5e_xsk *xsk, struct xsk_buff_pool *pool, u16 ix)
{
int err;
- err = mlx5e_xsk_get_umems(xsk);
+ err = mlx5e_xsk_get_pools(xsk);
if (unlikely(err))
return err;
- xsk->umems[ix] = umem;
+ xsk->pools[ix] = pool;
return 0;
}
-static void mlx5e_xsk_remove_umem(struct mlx5e_xsk *xsk, u16 ix)
+static void mlx5e_xsk_remove_pool(struct mlx5e_xsk *xsk, u16 ix)
{
- xsk->umems[ix] = NULL;
+ xsk->pools[ix] = NULL;
- mlx5e_xsk_put_umems(xsk);
+ mlx5e_xsk_put_pools(xsk);
}
-static bool mlx5e_xsk_is_umem_sane(struct xdp_umem *umem)
+static bool mlx5e_xsk_is_pool_sane(struct xsk_buff_pool *pool)
{
- return xsk_umem_get_headroom(umem) <= 0xffff &&
- xsk_umem_get_chunk_size(umem) <= 0xffff;
+ return xsk_pool_get_headroom(pool) <= 0xffff &&
+ xsk_pool_get_chunk_size(pool) <= 0xffff;
}
-void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk)
+void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk)
{
- xsk->headroom = xsk_umem_get_headroom(umem);
- xsk->chunk_size = xsk_umem_get_chunk_size(umem);
+ xsk->headroom = xsk_pool_get_headroom(pool);
+ xsk->chunk_size = xsk_pool_get_chunk_size(pool);
}
static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
- struct xdp_umem *umem, u16 ix)
+ struct xsk_buff_pool *pool, u16 ix)
{
struct mlx5e_params *params = &priv->channels.params;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
int err;
- if (unlikely(mlx5e_xsk_get_umem(&priv->channels.params, &priv->xsk, ix)))
+ if (unlikely(mlx5e_xsk_get_pool(&priv->channels.params, &priv->xsk, ix)))
return -EBUSY;
- if (unlikely(!mlx5e_xsk_is_umem_sane(umem)))
+ if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
return -EINVAL;
- err = mlx5e_xsk_map_umem(priv, umem);
+ err = mlx5e_xsk_map_pool(priv, pool);
if (unlikely(err))
return err;
- err = mlx5e_xsk_add_umem(&priv->xsk, umem, ix);
+ err = mlx5e_xsk_add_pool(&priv->xsk, pool, ix);
if (unlikely(err))
- goto err_unmap_umem;
+ goto err_unmap_pool;
- mlx5e_build_xsk_param(umem, &xsk);
+ mlx5e_build_xsk_param(pool, &xsk);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
/* XSK objects will be created on open. */
@@ -112,9 +112,9 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
c = priv->channels.c[ix];
- err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
+ err = mlx5e_open_xsk(priv, params, &xsk, pool, c);
if (unlikely(err))
- goto err_remove_umem;
+ goto err_remove_pool;
mlx5e_activate_xsk(c);
@@ -132,11 +132,11 @@ err_deactivate:
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
-err_remove_umem:
- mlx5e_xsk_remove_umem(&priv->xsk, ix);
+err_remove_pool:
+ mlx5e_xsk_remove_pool(&priv->xsk, ix);
-err_unmap_umem:
- mlx5e_xsk_unmap_umem(priv, umem);
+err_unmap_pool:
+ mlx5e_xsk_unmap_pool(priv, pool);
return err;
@@ -146,7 +146,7 @@ validate_closed:
*/
if (!mlx5e_validate_xsk_param(params, &xsk, priv->mdev)) {
err = -EINVAL;
- goto err_remove_umem;
+ goto err_remove_pool;
}
return 0;
@@ -154,45 +154,45 @@ validate_closed:
static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
{
- struct xdp_umem *umem = mlx5e_xsk_get_umem(&priv->channels.params,
+ struct xsk_buff_pool *pool = mlx5e_xsk_get_pool(&priv->channels.params,
&priv->xsk, ix);
struct mlx5e_channel *c;
- if (unlikely(!umem))
+ if (unlikely(!pool))
return -EINVAL;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto remove_umem;
+ goto remove_pool;
/* XSK RQ and SQ are only created if XDP program is set. */
if (!priv->channels.params.xdp_prog)
- goto remove_umem;
+ goto remove_pool;
c = priv->channels.c[ix];
mlx5e_xsk_redirect_rqt_to_drop(priv, ix);
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
-remove_umem:
- mlx5e_xsk_remove_umem(&priv->xsk, ix);
- mlx5e_xsk_unmap_umem(priv, umem);
+remove_pool:
+ mlx5e_xsk_remove_pool(&priv->xsk, ix);
+ mlx5e_xsk_unmap_pool(priv, pool);
return 0;
}
-static int mlx5e_xsk_enable_umem(struct mlx5e_priv *priv, struct xdp_umem *umem,
+static int mlx5e_xsk_enable_pool(struct mlx5e_priv *priv, struct xsk_buff_pool *pool,
u16 ix)
{
int err;
mutex_lock(&priv->state_lock);
- err = mlx5e_xsk_enable_locked(priv, umem, ix);
+ err = mlx5e_xsk_enable_locked(priv, pool, ix);
mutex_unlock(&priv->state_lock);
return err;
}
-static int mlx5e_xsk_disable_umem(struct mlx5e_priv *priv, u16 ix)
+static int mlx5e_xsk_disable_pool(struct mlx5e_priv *priv, u16 ix)
{
int err;
@@ -203,7 +203,7 @@ static int mlx5e_xsk_disable_umem(struct mlx5e_priv *priv, u16 ix)
return err;
}
-int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
+int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_params *params = &priv->channels.params;
@@ -212,6 +212,6 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
return -EINVAL;
- return umem ? mlx5e_xsk_enable_umem(priv, umem, ix) :
- mlx5e_xsk_disable_umem(priv, ix);
+ return pool ? mlx5e_xsk_enable_pool(priv, pool, ix) :
+ mlx5e_xsk_disable_pool(priv, ix);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h
new file mode 100644
index 000000000000..dca0010a0866
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_XSK_POOL_H__
+#define __MLX5_EN_XSK_POOL_H__
+
+#include "en.h"
+
+static inline struct xsk_buff_pool *mlx5e_xsk_get_pool(struct mlx5e_params *params,
+ struct mlx5e_xsk *xsk, u16 ix)
+{
+ if (!xsk || !xsk->pools)
+ return NULL;
+
+ if (unlikely(ix >= params->num_channels))
+ return NULL;
+
+ return xsk->pools[ix];
+}
+
+struct mlx5e_xsk_param;
+void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk);
+
+/* .ndo_bpf callback. */
+int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);
+
+#endif /* __MLX5_EN_XSK_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 40db27bf790b..8e7b877d8a12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -47,8 +47,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
xdp->data_end = xdp->data + cqe_bcnt32;
xdp_set_data_meta_invalid(xdp);
- xsk_buff_dma_sync_for_cpu(xdp);
- prefetch(xdp->data);
+ xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
+ net_prefetch(xdp->data);
/* Possible flows:
* - XDP_REDIRECT to XSKMAP:
@@ -93,8 +93,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
xdp->data_end = xdp->data + cqe_bcnt;
xdp_set_data_meta_invalid(xdp);
- xsk_buff_dma_sync_for_cpu(xdp);
- prefetch(xdp->data);
+ xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
+ net_prefetch(xdp->data);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index d147b2f13b54..7f88ccf67fdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -19,10 +19,10 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
-static inline int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
+static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
- dma_info->xsk = xsk_buff_alloc(rq->umem);
+ dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
if (!dma_info->xsk)
return -ENOMEM;
@@ -38,13 +38,13 @@ static inline int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
{
- if (!xsk_umem_uses_need_wakeup(rq->umem))
+ if (!xsk_uses_need_wakeup(rq->xsk_pool))
return alloc_err;
if (unlikely(alloc_err))
- xsk_set_rx_need_wakeup(rq->umem);
+ xsk_set_rx_need_wakeup(rq->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rq->umem);
+ xsk_clear_rx_need_wakeup(rq->xsk_pool);
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 55e65a438de7..be3465ba38ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -45,7 +45,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv,
}
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
+ struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
struct mlx5e_channel *c)
{
struct mlx5e_channel_param *cparam;
@@ -64,7 +64,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_free_cparam;
- err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq);
+ err = mlx5e_open_rq(c, params, &cparam->rq, xsk, pool, &c->xskrq);
if (unlikely(err))
goto err_close_rx_cq;
@@ -72,13 +72,13 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_close_rq;
- /* Create a separate SQ, so that when the UMEM is disabled, we could
+ /* Create a separate SQ, so that when the buff pool is disabled, we could
* close this SQ safely and stop receiving CQEs. In other case, e.g., if
- * the XDPSQ was used instead, we might run into trouble when the UMEM
+ * the XDPSQ was used instead, we might run into trouble when the buff pool
* is disabled and then reenabled, but the SQ continues receiving CQEs
- * from the old UMEM.
+ * from the old buff pool.
*/
- err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true);
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true);
if (unlikely(err))
goto err_close_tx_cq;
@@ -122,9 +122,9 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
/* TX queue is created active. */
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
}
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
index 0dd11b81c046..ca20f1ff5e39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
@@ -12,7 +12,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev);
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
+ struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
struct mlx5e_channel *c);
void mlx5e_close_xsk(struct mlx5e_channel *c);
void mlx5e_activate_xsk(struct mlx5e_channel *c);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 4d892f6cecb3..8e96260fce1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "tx.h"
-#include "umem.h"
+#include "pool.h"
#include "en/xdp.h"
#include "en/params.h"
#include <net/xdp_sock_drv.h>
@@ -36,9 +36,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
return 0;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
}
return 0;
@@ -66,9 +66,9 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
{
- struct xdp_umem *umem = sq->umem;
+ struct xsk_buff_pool *pool = sq->xsk_pool;
+ struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
- struct mlx5e_xdp_xmit_data xdptxd;
bool work_done = true;
bool flush = false;
@@ -87,7 +87,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
break;
}
- if (!xsk_umem_consume_tx(umem, &desc)) {
+ if (!xsk_tx_peek_desc(pool, &desc)) {
/* TX will get stuck until something wakes it up by
* triggering NAPI. Currently it's expected that the
* application calls sendto() if there are consumed, but
@@ -96,11 +96,11 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
break;
}
- xdptxd.dma_addr = xsk_buff_raw_get_dma(umem, desc.addr);
- xdptxd.data = xsk_buff_raw_get_data(umem, desc.addr);
+ xdptxd.dma_addr = xsk_buff_raw_get_dma(pool, desc.addr);
+ xdptxd.data = xsk_buff_raw_get_data(pool, desc.addr);
xdptxd.len = desc.len;
- xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len);
+ xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
@@ -119,7 +119,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xmit_xdp_doorbell(sq);
- xsk_umem_consume_tx_done(umem);
+ xsk_tx_release(pool);
}
return !(budget && work_done);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
index 39fa0a705856..a05085035f23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
@@ -15,13 +15,13 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget);
static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq)
{
- if (!xsk_umem_uses_need_wakeup(sq->umem))
+ if (!xsk_uses_need_wakeup(sq->xsk_pool))
return;
if (sq->pc != sq->cc)
- xsk_clear_tx_need_wakeup(sq->umem);
+ xsk_clear_tx_need_wakeup(sq->xsk_pool);
else
- xsk_set_tx_need_wakeup(sq->umem);
+ xsk_set_tx_need_wakeup(sq->xsk_pool);
}
#endif /* __MLX5_EN_XSK_TX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h
deleted file mode 100644
index bada94973586..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#ifndef __MLX5_EN_XSK_UMEM_H__
-#define __MLX5_EN_XSK_UMEM_H__
-
-#include "en.h"
-
-static inline struct xdp_umem *mlx5e_xsk_get_umem(struct mlx5e_params *params,
- struct mlx5e_xsk *xsk, u16 ix)
-{
- if (!xsk || !xsk->umems)
- return NULL;
-
- if (unlikely(ix >= params->num_channels))
- return NULL;
-
- return xsk->umems[ix];
-}
-
-struct mlx5e_xsk_param;
-void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk);
-
-/* .ndo_bpf callback. */
-int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid);
-
-int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries);
-
-#endif /* __MLX5_EN_XSK_UMEM_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 110476bdeffb..899b98aca0d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -107,6 +107,9 @@ struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_tx_tls_state tls;
#endif
+#ifdef CONFIG_MLX5_EN_IPSEC
+ struct mlx5e_accel_tx_ipsec_state ipsec;
+#endif
};
static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
@@ -125,27 +128,70 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
#endif
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
+ if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
+ return false;
+ }
+#endif
+
return true;
}
-static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
+static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
+{
+#ifdef CONFIG_MLX5_EN_IPSEC
+ return mlx5e_ipsec_is_tx_flow(&state->ipsec);
+#endif
+
+ return false;
+}
+
+static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
+ struct mlx5e_accel_tx_state *state)
+{
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
+ return mlx5e_ipsec_tx_ids_len(&state->ipsec);
+#endif
+
+ return 0;
+}
+
+/* Part of the eseg touched by TX offloads */
+#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
+
+static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (xfrm_offload(skb))
+ mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
+#endif
+
+#if IS_ENABLED(CONFIG_GENEVE)
+ if (skb->encapsulation)
+ mlx5e_tx_tunnel_accel(skb, eseg);
+#endif
+
+ return true;
+}
+
+static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe *wqe,
- struct mlx5e_accel_tx_state *state)
+ struct mlx5e_accel_tx_state *state,
+ struct mlx5_wqe_inline_seg *inlseg)
{
#ifdef CONFIG_MLX5_EN_TLS
mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
- if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
- if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb)))
- return false;
- }
+ if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
+ state->ipsec.xo && state->ipsec.tailen)
+ mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg);
#endif
-
- return true;
}
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 4cdd9eac647d..97f1594cee11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -191,7 +191,7 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ACCEL_FS_TCP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
- kvfree(ft->g);
+ kfree(ft->g);
kvfree(in);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index d39989cddd90..3d45341e2216 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -560,6 +560,9 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
return;
}
+ if (mlx5_is_ipsec_device(mdev))
+ netdev->gso_partial_features |= NETIF_F_GSO_ESP;
+
mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
netdev->features |= NETIF_F_GSO_ESP;
netdev->hw_features |= NETIF_F_GSO_ESP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 0fc8b4d4f4a3..6164c7f59efb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -76,6 +76,7 @@ struct mlx5e_ipsec_stats {
};
struct mlx5e_accel_fs_esp;
+struct mlx5e_ipsec_tx;
struct mlx5e_ipsec {
struct mlx5e_priv *en_priv;
@@ -87,6 +88,7 @@ struct mlx5e_ipsec {
struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs;
+ struct mlx5e_ipsec_tx *tx_fs;
};
struct mlx5e_ipsec_esn_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 429428bbc903..0e45590662a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -34,6 +34,12 @@ struct mlx5e_accel_fs_esp {
struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
};
+struct mlx5e_ipsec_tx {
+ struct mlx5_flow_table *ft;
+ struct mutex mutex; /* Protect IPsec TX steering */
+ u32 refcnt;
+};
+
/* IPsec RX flow steering */
static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
{
@@ -228,8 +234,8 @@ static int rx_fs_create(struct mlx5e_priv *priv,
fs_prot->miss_rule = miss_rule;
out:
- kfree(flow_group_in);
- kfree(spec);
+ kvfree(flow_group_in);
+ kvfree(spec);
return err;
}
@@ -323,6 +329,77 @@ out:
mutex_unlock(&fs_prot->prot_mutex);
}
+/* IPsec TX flow steering */
+static int tx_create(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ priv->fs.egress_ns =
+ mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
+ if (!priv->fs.egress_ns)
+ return -EOPNOTSUPP;
+
+ ft_attr.max_fte = NUM_IPSEC_FTE;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs.egress_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
+ return err;
+ }
+ ipsec->tx_fs->ft = ft;
+ return 0;
+}
+
+static void tx_destroy(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+
+ if (IS_ERR_OR_NULL(ipsec->tx_fs->ft))
+ return;
+
+ mlx5_destroy_flow_table(ipsec->tx_fs->ft);
+ ipsec->tx_fs->ft = NULL;
+}
+
+static int tx_ft_get(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
+ int err = 0;
+
+ mutex_lock(&tx_fs->mutex);
+ if (tx_fs->refcnt++)
+ goto out;
+
+ err = tx_create(priv);
+ if (err) {
+ tx_fs->refcnt--;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&tx_fs->mutex);
+ return err;
+}
+
+static void tx_ft_put(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
+
+ mutex_lock(&tx_fs->mutex);
+ if (--tx_fs->refcnt)
+ goto out;
+
+ tx_destroy(priv);
+
+out:
+ mutex_unlock(&tx_fs->mutex);
+}
+
static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5_flow_spec *spec,
@@ -457,6 +534,54 @@ out:
return err;
}
+static int tx_add_rule(struct mlx5e_priv *priv,
+ struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u32 ipsec_obj_id,
+ struct mlx5e_ipsec_rule *ipsec_rule)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ err = tx_ft_get(priv);
+ if (err)
+ return err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
+
+ /* Add IPsec indicator in metadata_reg_a */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_IPSEC);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_IPSEC);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+ MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT;
+ rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
+ attrs->action, err);
+ goto out;
+ }
+
+ ipsec_rule->rule = rule;
+
+out:
+ kvfree(spec);
+ if (err)
+ tx_ft_put(priv);
+ return err;
+}
+
static void rx_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule)
@@ -470,15 +595,27 @@ static void rx_del_rule(struct mlx5e_priv *priv,
rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
}
+static void tx_del_rule(struct mlx5e_priv *priv,
+ struct mlx5e_ipsec_rule *ipsec_rule)
+{
+ mlx5_del_flow_rules(ipsec_rule->rule);
+ ipsec_rule->rule = NULL;
+
+ tx_ft_put(priv);
+}
+
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5e_ipsec_rule *ipsec_rule)
{
- if (!priv->ipsec->rx_fs || attrs->action != MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ if (!priv->ipsec->rx_fs)
return -EOPNOTSUPP;
- return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
+ if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
+ else
+ return tx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
}
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
@@ -488,7 +625,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
if (!priv->ipsec->rx_fs)
return;
- rx_del_rule(priv, attrs, ipsec_rule);
+ if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ rx_del_rule(priv, attrs, ipsec_rule);
+ else
+ tx_del_rule(priv, ipsec_rule);
+}
+
+static void fs_cleanup_tx(struct mlx5e_priv *priv)
+{
+ mutex_destroy(&priv->ipsec->tx_fs->mutex);
+ WARN_ON(priv->ipsec->tx_fs->refcnt);
+ kfree(priv->ipsec->tx_fs);
+ priv->ipsec->tx_fs = NULL;
}
static void fs_cleanup_rx(struct mlx5e_priv *priv)
@@ -507,6 +655,17 @@ static void fs_cleanup_rx(struct mlx5e_priv *priv)
priv->ipsec->rx_fs = NULL;
}
+static int fs_init_tx(struct mlx5e_priv *priv)
+{
+ priv->ipsec->tx_fs =
+ kzalloc(sizeof(struct mlx5e_ipsec_tx), GFP_KERNEL);
+ if (!priv->ipsec->tx_fs)
+ return -ENOMEM;
+
+ mutex_init(&priv->ipsec->tx_fs->mutex);
+ return 0;
+}
+
static int fs_init_rx(struct mlx5e_priv *priv)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
@@ -532,13 +691,24 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
if (!priv->ipsec->rx_fs)
return;
+ fs_cleanup_tx(priv);
fs_cleanup_rx(priv);
}
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
{
+ int err;
+
if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
return -EOPNOTSUPP;
- return fs_init_rx(priv);
+ err = fs_init_tx(priv);
+ if (err)
+ return err;
+
+ err = fs_init_rx(priv);
+ if (err)
+ fs_cleanup_tx(priv);
+
+ return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 93a8d68815ad..11e31a3db2be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -34,7 +34,7 @@
#include <crypto/aead.h>
#include <net/xfrm.h>
#include <net/esp.h>
-
+#include "accel/ipsec_offload.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h"
#include "accel/accel.h"
@@ -233,18 +233,94 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
ntohs(mdata->content.tx.seq));
}
-bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
- struct mlx5_wqe_eth_seg *eseg,
- struct sk_buff *skb)
+void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st,
+ struct mlx5_wqe_inline_seg *inlseg)
+{
+ inlseg->byte_count = cpu_to_be32(ipsec_st->tailen | MLX5_INLINE_SEG);
+ esp_output_fill_trailer((u8 *)inlseg->data, 0, ipsec_st->plen, ipsec_st->xo->proto);
+}
+
+static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
+ struct sk_buff *skb,
+ struct xfrm_state *x,
+ struct xfrm_offload *xo,
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st)
+{
+ unsigned int blksize, clen, alen, plen;
+ struct crypto_aead *aead;
+ unsigned int tailen;
+
+ ipsec_st->x = x;
+ ipsec_st->xo = xo;
+ if (mlx5_is_ipsec_device(priv->mdev)) {
+ aead = x->data;
+ alen = crypto_aead_authsize(aead);
+ blksize = ALIGN(crypto_aead_blocksize(aead), 4);
+ clen = ALIGN(skb->len + 2, blksize);
+ plen = max_t(u32, clen - skb->len, 4);
+ tailen = plen + alen;
+ ipsec_st->plen = plen;
+ ipsec_st->tailen = tailen;
+ }
+
+ return 0;
+}
+
+void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
{
struct xfrm_offload *xo = xfrm_offload(skb);
- struct mlx5e_ipsec_metadata *mdata;
- struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct xfrm_encap_tmpl *encap;
struct xfrm_state *x;
struct sec_path *sp;
+ u8 l3_proto;
+
+ sp = skb_sec_path(skb);
+ if (unlikely(sp->len != 1))
+ return;
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x))
+ return;
+
+ if (unlikely(!x->xso.offload_handle ||
+ (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6))))
+ return;
+
+ mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
- if (!xo)
- return true;
+ l3_proto = (x->props.family == AF_INET) ?
+ ((struct iphdr *)skb_network_header(skb))->protocol :
+ ((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
+
+ if (mlx5_is_ipsec_device(priv->mdev)) {
+ eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
+ encap = x->encap;
+ if (!encap) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
+ } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
+ }
+ }
+}
+
+bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct mlx5e_ipsec_metadata *mdata;
+ struct xfrm_state *x;
+ struct sec_path *sp;
sp = skb_sec_path(skb);
if (unlikely(sp->len != 1)) {
@@ -270,15 +346,21 @@ bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
goto drop;
}
- mdata = mlx5e_ipsec_add_metadata(skb);
- if (IS_ERR(mdata)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
- goto drop;
+
+ if (MLX5_CAP_GEN(priv->mdev, fpga)) {
+ mdata = mlx5e_ipsec_add_metadata(skb);
+ if (IS_ERR(mdata)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
+ goto drop;
+ }
}
- mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
+
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
- mlx5e_ipsec_set_metadata(skb, mdata, xo);
+ if (MLX5_CAP_GEN(priv->mdev, fpga))
+ mlx5e_ipsec_set_metadata(skb, mdata, xo);
+
+ mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index f96e786db158..056dacb612b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -43,6 +43,13 @@
#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F)
#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
+struct mlx5e_accel_tx_ipsec_state {
+ struct xfrm_offload *xo;
+ struct xfrm_state *x;
+ u32 tailen;
+ u32 plen;
+};
+
#ifdef CONFIG_MLX5_EN_IPSEC
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
@@ -55,16 +62,32 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
-bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
- struct mlx5_wqe_eth_seg *eseg,
- struct sk_buff *skb);
+bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st);
+void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st,
+ struct mlx5_wqe_inline_seg *inlseg);
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe);
+static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
+{
+ return ipsec_st->tailen;
+}
+
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
{
return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
}
+
+static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
+{
+ return ipsec_st->x;
+}
+
+void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg);
#else
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 6bbfcf18107d..7f6221b8b1f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -188,7 +188,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
err = 0;
sq = &c->async_icosq;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg))
@@ -199,7 +199,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
unlock:
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
return err;
@@ -253,7 +253,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
goto err_out;
}
- pdev = sq->channel->priv->mdev->device;
+ pdev = mlx5_core_dma_dev(sq->channel->priv->mdev);
buf->dma_addr = dma_map_single(pdev, &buf->progress,
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
@@ -265,10 +265,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
- spin_lock(&sq->channel->async_icosq_lock);
+ spin_lock_bh(&sq->channel->async_icosq_lock);
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
- spin_unlock(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->channel->async_icosq_lock);
err = -ENOSPC;
goto err_dma_unmap;
}
@@ -299,7 +299,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
icosq_fill_wi(sq, pi, &wi);
sq->pc++;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
- spin_unlock(&sq->channel->async_icosq_lock);
+ spin_unlock_bh(&sq->channel->async_icosq_lock);
return 0;
@@ -360,7 +360,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
err = 0;
sq = &c->async_icosq;
- spin_lock(&c->async_icosq_lock);
+ spin_lock_bh(&c->async_icosq_lock);
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg)) {
@@ -372,7 +372,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
priv_rx->stats->tls_resync_res_ok++;
unlock:
- spin_unlock(&c->async_icosq_lock);
+ spin_unlock_bh(&c->async_icosq_lock);
return err;
}
@@ -390,7 +390,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
priv_rx = buf->priv_rx;
resync = &priv_rx->resync;
- dev = resync->priv->mdev->device;
+ dev = mlx5_core_dma_dev(resync->priv->mdev);
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index f4861545b236..b140e13fdcc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -345,9 +345,6 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_sq_stats *stats;
struct mlx5e_sq_dma *dma;
- if (!wi->resync_dump_frag_page)
- return;
-
dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
stats = sq->stats;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index ff4c740af10b..7521c9be735b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -29,12 +29,24 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
+static inline bool
+mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc)
+{
+ if (unlikely(wi->resync_dump_frag_page)) {
+ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma_fifo_cc);
+ return true;
+ }
+ return false;
+}
#else
-static inline void
-mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
- struct mlx5e_tx_wqe_info *wi,
- u32 *dma_fifo_cc)
+static inline bool
+mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc)
{
+ return false;
}
#endif /* CONFIG_MLX5_EN_TLS */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index b0c31d49ff8d..6982b193ee8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -189,12 +189,10 @@ static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_tls *tls)
{
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
- struct mlx5e_tx_wqe *wqe;
struct sync_info info;
struct sk_buff *nskb;
int linear_len = 0;
int headln;
- u16 pi;
int i;
sq->stats->tls_ooo++;
@@ -246,9 +244,7 @@ static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
- pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
- wqe = MLX5E_TX_FETCH_WQE(sq, pi);
- mlx5e_sq_xmit(sq, nskb, wqe, pi, true);
+ mlx5e_sq_xmit_simple(sq, nskb, true);
return true;
@@ -274,6 +270,8 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
if (!datalen)
return true;
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+
tls_ctx = tls_get_ctx(skb->sk);
if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 08270987c506..d25a56ec6876 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,7 +32,7 @@
#include "en.h"
#include "en/port.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
#include "lib/clock.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
@@ -243,7 +243,6 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
return mlx5e_self_test_num(priv);
- fallthrough;
default:
return -EOPNOTSUPP;
}
@@ -1341,6 +1340,14 @@ static int mlx5e_set_tunable(struct net_device *dev,
return err;
}
+static void mlx5e_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_pause_get(priv, pause_stats);
+}
+
void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam)
{
@@ -1901,7 +1908,7 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
return 0;
}
-static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
+static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1913,7 +1920,7 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
new_channels.params = priv->channels.params;
- MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_XDP_TX_MPWQE, enable);
+ MLX5E_SET_PFLAG(&new_channels.params, flag, enable);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -1924,6 +1931,16 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
return err;
}
+static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
+{
+ return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_XDP_TX_MPWQE, enable);
+}
+
+static int set_pflag_skb_tx_mpwqe(struct net_device *netdev, bool enable)
+{
+ return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_SKB_TX_MPWQE, enable);
+}
+
static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_cqe_moder", set_pflag_rx_cqe_based_moder },
{ "tx_cqe_moder", set_pflag_tx_cqe_based_moder },
@@ -1931,6 +1948,7 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_striding_rq", set_pflag_rx_striding_rq },
{ "rx_no_csum_complete", set_pflag_rx_no_csum_complete },
{ "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe },
+ { "skb_tx_mpwqe", set_pflag_skb_tx_mpwqe },
};
static int mlx5e_handle_pflag(struct net_device *netdev,
@@ -2033,6 +2051,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
+ .get_pause_stats = mlx5e_get_pause_stats,
.get_pauseparam = mlx5e_get_pauseparam,
.set_pauseparam = mlx5e_set_pauseparam,
.get_ts_info = mlx5e_get_ts_info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 83c9b2bbc4af..b416a8ee2eed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -33,7 +33,7 @@
#include <linux/mlx5/fs.h>
#include "en.h"
#include "en/params.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
struct mlx5e_ethtool_rule {
struct list_head list;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 42ec28e29834..ebce97921e03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -57,7 +57,7 @@
#include "en/monitor_stats.h"
#include "en/health.h"
#include "en/params.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
#include "en/xsk/setup.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
@@ -393,7 +393,7 @@ static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- struct xdp_umem *umem,
+ struct xsk_buff_pool *xsk_pool,
struct mlx5e_rq_param *rqp,
struct mlx5e_rq *rq)
{
@@ -419,9 +419,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
- rq->umem = umem;
+ rq->xsk_pool = xsk_pool;
- if (rq->umem)
+ if (rq->xsk_pool)
rq->stats = &c->priv->channel_stats[c->ix].xskrq;
else
rq->stats = &c->priv->channel_stats[c->ix].rq;
@@ -511,7 +511,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
- xsk_buff_set_rxq_info(rq->umem, &rq->xdp_rxq);
+ xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
} else {
/* Create a page_pool and register it with rxq */
pp_params.order = 0;
@@ -861,11 +861,11 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
- struct xdp_umem *umem, struct mlx5e_rq *rq)
+ struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq)
{
int err;
- err = mlx5e_alloc_rq(c, params, xsk, umem, param, rq);
+ err = mlx5e_alloc_rq(c, params, xsk, xsk_pool, param, rq);
if (err)
return err;
@@ -893,6 +893,13 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+ /* For CQE compression on striding RQ, use stride index provided by
+ * HW if capability is supported.
+ */
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
+ MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index))
+ __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state);
+
return 0;
err_destroy_rq:
@@ -970,7 +977,7 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
- struct xdp_umem *umem,
+ struct xsk_buff_pool *xsk_pool,
struct mlx5e_sq_param *param,
struct mlx5e_xdpsq *sq,
bool is_redirect)
@@ -986,9 +993,9 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->umem = umem;
+ sq->xsk_pool = xsk_pool;
- sq->stats = sq->umem ?
+ sq->stats = sq->xsk_pool ?
&c->priv->channel_stats[c->ix].xsksq :
is_redirect ?
&c->priv->channel_stats[c->ix].xdpsq :
@@ -1085,6 +1092,7 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{
kvfree(sq->db.wqe_info);
+ kvfree(sq->db.skb_fifo);
kvfree(sq->db.dma_fifo);
}
@@ -1096,15 +1104,19 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
sizeof(*sq->db.dma_fifo)),
GFP_KERNEL, numa);
+ sq->db.skb_fifo = kvzalloc_node(array_size(df_sz,
+ sizeof(*sq->db.skb_fifo)),
+ GFP_KERNEL, numa);
sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
sizeof(*sq->db.wqe_info)),
GFP_KERNEL, numa);
- if (!sq->db.dma_fifo || !sq->db.wqe_info) {
+ if (!sq->db.dma_fifo || !sq->db.skb_fifo || !sq->db.wqe_info) {
mlx5e_free_txqsq_db(sq);
return -ENOMEM;
}
sq->dma_fifo_mask = df_sz - 1;
+ sq->skb_fifo_mask = df_sz - 1;
return 0;
}
@@ -1115,6 +1127,12 @@ static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size)
sq->stop_room = mlx5e_tls_get_stop_room(sq);
sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state))
+ /* A MPWQE can take up to the maximum-sized WQE + all the normal
+ * stop room can be taken if a new packet breaks the active
+ * MPWQE session and allocates its WQEs right away.
+ */
+ sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
if (WARN_ON(sq->stop_room >= sq_size)) {
netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n",
@@ -1156,6 +1174,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (mlx5_accel_is_tls_device(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
+ if (param->is_mpw)
+ set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size);
if (err)
return err;
@@ -1449,13 +1469,13 @@ void mlx5e_close_icosq(struct mlx5e_icosq *sq)
}
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct xdp_umem *umem,
+ struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect)
{
struct mlx5e_create_sq_param csp = {};
int err;
- err = mlx5e_alloc_xdpsq(c, params, umem, param, sq, is_redirect);
+ err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect);
if (err)
return err;
@@ -1948,7 +1968,7 @@ static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
- struct xdp_umem *umem,
+ struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
@@ -1972,7 +1992,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->tstamp = &priv->tstamp;
c->ix = ix;
c->cpu = cpu;
- c->pdev = priv->mdev->device;
+ c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc;
@@ -1987,9 +2007,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (unlikely(err))
goto err_napi_del;
- if (umem) {
- mlx5e_build_xsk_param(umem, &xsk);
- err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
+ if (xsk_pool) {
+ mlx5e_build_xsk_param(xsk_pool, &xsk);
+ err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c);
if (unlikely(err))
goto err_close_queues;
}
@@ -2160,7 +2180,7 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
- param->wq.buf_numa_node = dev_to_node(mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
mlx5e_build_rx_cq_param(priv, params, xsk, &param->cqp);
}
@@ -2176,7 +2196,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
- param->wq.buf_numa_node = dev_to_node(mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
}
void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
@@ -2188,7 +2208,7 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
- param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
}
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
@@ -2204,6 +2224,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+ param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
mlx5e_build_tx_cq_param(priv, params, &param->cqp);
}
@@ -2223,6 +2244,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ bool hw_stridx = false;
void *cqc = param->cqc;
u8 log_cq_size;
@@ -2230,6 +2252,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+ hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
log_cq_size = params->log_rq_mtu_frames;
@@ -2237,7 +2260,8 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
- MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
+ MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
}
@@ -2350,12 +2374,12 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
mlx5e_build_channel_param(priv, &chs->params, cparam);
for (i = 0; i < chs->num; i++) {
- struct xdp_umem *umem = NULL;
+ struct xsk_buff_pool *xsk_pool = NULL;
if (chs->params.xdp_prog)
- umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, i);
+ xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
- err = mlx5e_open_channel(priv, i, &chs->params, cparam, umem, &chs->c[i]);
+ err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
if (err)
goto err_close_channels;
}
@@ -3222,8 +3246,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
- param->wq.buf_numa_node = dev_to_node(mdev->device);
- param->wq.db_numa_node = dev_to_node(mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
return mlx5e_alloc_cq_common(mdev, param, cq);
}
@@ -3927,13 +3951,14 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
u16 ix;
for (ix = 0; ix < chs->params.num_channels; ix++) {
- struct xdp_umem *umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, ix);
+ struct xsk_buff_pool *xsk_pool =
+ mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
struct mlx5e_xsk_param xsk;
- if (!umem)
+ if (!xsk_pool)
continue;
- mlx5e_build_xsk_param(umem, &xsk);
+ mlx5e_build_xsk_param(xsk_pool, &xsk);
if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
@@ -4466,8 +4491,8 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return mlx5e_xdp_set(dev, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return mlx5e_xsk_setup_umem(dev, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
@@ -4758,6 +4783,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv,
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE,
+ MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
/* XDP SQ */
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
@@ -5226,6 +5253,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
+ mlx5_vxlan_reset_to_default(mdev->vxlan);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index e979bff64c49..67247c33b9fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -39,7 +39,6 @@
#include <net/ipv6_stubs.h>
#include "eswitch.h"
-#include "esw/chains.h"
#include "en.h"
#include "en_rep.h"
#include "en/txrx.h"
@@ -288,6 +287,14 @@ static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
+static void mlx5e_uplink_rep_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_pause_get(priv, stats);
+}
+
static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pauseparam)
{
@@ -362,23 +369,11 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+ .get_pause_stats = mlx5e_uplink_rep_get_pause_stats,
.get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
-static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct mlx5e_priv *priv;
- u64 parent_id;
-
- priv = netdev_priv(dev);
-
- parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
- ppid->id_len = sizeof(parent_id);
- memcpy(ppid->id, &parent_id, sizeof(parent_id));
-}
-
static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
@@ -603,12 +598,13 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0;
}
-static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
+static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_core_dev *dev = priv->mdev;
- return &rpriv->dl_port;
+ return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
}
static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
@@ -1198,63 +1194,13 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
-static bool
-is_devlink_port_supported(const struct mlx5_core_dev *dev,
- const struct mlx5e_rep_priv *rpriv)
-{
- return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
- rpriv->rep->vport == MLX5_VPORT_PF ||
- mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
-}
-
-static int register_devlink_port(struct mlx5_core_dev *dev,
- struct mlx5e_rep_priv *rpriv)
-{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct devlink_port_attrs attrs = {};
- struct netdev_phys_item_id ppid = {};
- unsigned int dl_port_index = 0;
- u16 pfnum;
-
- if (!is_devlink_port_supported(dev, rpriv))
- return 0;
-
- mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
- dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport);
- pfnum = PCI_FUNC(dev->pdev->devfn);
- if (rep->vport == MLX5_VPORT_UPLINK) {
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pfnum;
- memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len);
- attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_set(&rpriv->dl_port, &attrs);
- } else if (rep->vport == MLX5_VPORT_PF) {
- memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
- rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_pci_pf_set(&rpriv->dl_port, pfnum);
- } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) {
- memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
- rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
- pfnum, rep->vport - 1);
- }
- return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
-}
-
-static void unregister_devlink_port(struct mlx5_core_dev *dev,
- struct mlx5e_rep_priv *rpriv)
-{
- if (is_devlink_port_supported(dev, rpriv))
- devlink_port_unregister(&rpriv->dl_port);
-}
-
/* e-Switch vport representors */
static int
mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
const struct mlx5e_profile *profile;
struct mlx5e_rep_priv *rpriv;
+ struct devlink_port *dl_port;
struct net_device *netdev;
int nch, err;
@@ -1304,28 +1250,19 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
- err = register_devlink_port(dev, rpriv);
- if (err) {
- netdev_warn(netdev, "Failed to register devlink port %d\n",
- rep->vport);
- goto err_neigh_cleanup;
- }
-
err = register_netdev(netdev);
if (err) {
netdev_warn(netdev,
"Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_devlink_cleanup;
+ goto err_neigh_cleanup;
}
- if (is_devlink_port_supported(dev, rpriv))
- devlink_port_type_eth_set(&rpriv->dl_port, netdev);
+ dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
+ if (dl_port)
+ devlink_port_type_eth_set(dl_port, netdev);
return 0;
-err_devlink_cleanup:
- unregister_devlink_port(dev, rpriv);
-
err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv);
@@ -1349,12 +1286,13 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *dev = priv->mdev;
+ struct devlink_port *dl_port;
void *ppriv = priv->ppriv;
- if (is_devlink_port_supported(dev, rpriv))
- devlink_port_type_clear(&rpriv->dl_port);
+ dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
+ if (dl_port)
+ devlink_port_type_clear(dl_port);
unregister_netdev(netdev);
- unregister_devlink_port(dev, rpriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
if (rep->vport == MLX5_VPORT_UPLINK)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 0d1562e20118..8932c387d46a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -101,7 +101,6 @@ struct mlx5e_rep_priv {
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
- struct devlink_port dl_port;
};
static inline
@@ -187,7 +186,7 @@ struct mlx5e_encap_entry {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
- struct net_device *route_dev;
+ int route_dev_ifindex;
struct mlx5e_tc_tunnel *tunnel;
int reformat_type;
u8 flags;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 64c8ac5eabf6..6628a0197b4e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/prefetch.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
@@ -139,8 +138,17 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
title->check_sum = mini_cqe->checksum;
title->op_own &= 0xf0;
title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
- title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
+ /* state bit set implies linked-list striding RQ wq type and
+ * HW stride index capability supported
+ */
+ if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
+ title->wqe_counter = mini_cqe->stridx;
+ return;
+ }
+
+ /* HW stride index capability not supported */
+ title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
else
@@ -282,8 +290,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
- if (rq->umem)
- return mlx5e_xsk_page_alloc_umem(rq, dma_info);
+ if (rq->xsk_pool)
+ return mlx5e_xsk_page_alloc_pool(rq, dma_info);
else
return mlx5e_page_alloc_pool(rq, dma_info);
}
@@ -314,7 +322,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info,
bool recycle)
{
- if (rq->umem)
+ if (rq->xsk_pool)
/* The `recycle` parameter is ignored, and the page is always
* put into the Reuse Ring, because there is no way to return
* the page to the userspace when the interface goes down.
@@ -401,14 +409,14 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
int err;
int i;
- if (rq->umem) {
+ if (rq->xsk_pool) {
int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
/* Check in advance that we have enough frames, instead of
* allocating one-by-one, failing and moving frames to the
* Reuse Ring.
*/
- if (unlikely(!xsk_buff_can_alloc(rq->umem, pages_desired)))
+ if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired)))
return -ENOMEM;
}
@@ -506,8 +514,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
/* Check in advance that we have enough frames, instead of allocating
* one-by-one, failing and moving frames to the Reuse Ring.
*/
- if (rq->umem &&
- unlikely(!xsk_buff_can_alloc(rq->umem, MLX5_MPWRQ_PAGES_PER_WQE))) {
+ if (rq->xsk_pool &&
+ unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) {
err = -ENOMEM;
goto err;
}
@@ -755,7 +763,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
* the driver when it refills the Fill Ring.
* 2. Otherwise, busy poll by rescheduling the NAPI poll.
*/
- if (unlikely(alloc_err == -ENOMEM && rq->umem))
+ if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
return true;
return false;
@@ -1144,8 +1152,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
frag_size, DMA_FROM_DEVICE);
- prefetchw(va); /* xdp_frame data area */
- prefetch(data);
+ net_prefetchw(va); /* xdp_frame data area */
+ net_prefetch(data);
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
@@ -1184,7 +1192,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
while (byte_cnt) {
u16 frag_consumed_bytes =
@@ -1252,6 +1260,11 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+ if (mlx5e_cqe_regb_chain(cqe))
+ if (!mlx5e_tc_update_skb(cqe, skb))
+ goto free_wqe;
+
napi_gro_receive(rq->cq.napi, skb);
free_wqe:
@@ -1399,7 +1412,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
if (unlikely(frag_offset >= PAGE_SIZE)) {
di++;
@@ -1451,8 +1464,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
frag_size, DMA_FROM_DEVICE);
- prefetchw(va); /* xdp_frame data area */
- prefetch(data);
+ net_prefetchw(va); /* xdp_frame data area */
+ net_prefetch(data);
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
@@ -1513,6 +1526,11 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
goto mpwrq_cqe_out;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+ if (mlx5e_cqe_regb_chain(cqe))
+ if (!mlx5e_tc_update_skb(cqe, skb))
+ goto mpwrq_cqe_out;
+
napi_gro_receive(rq->cq.napi, skb);
mpwrq_cqe_out:
@@ -1566,7 +1584,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out:
- if (rq->xdp_prog)
+ if (rcu_access_pointer(rq->xdp_prog))
mlx5e_xdp_rx_poll_complete(rq);
mlx5_cqwq_update_db_record(cqwq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 46790216ce86..ce8ab1f01876 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/prefetch.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/udp.h>
@@ -115,7 +114,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
skb_reserve(skb, NET_IP_ALIGN);
/* Reserve for ethernet and IP header */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index f6383bc2bc3f..78f6a6f0a7e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -110,6 +110,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
@@ -365,6 +367,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
s->tx_nop += sq_stats->nop;
+ s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
+ s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
@@ -689,6 +693,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
+#define MLX5E_READ_CTR64_BE_F(ptr, c) \
+ be64_to_cpu(*(__be64 *)((char *)ptr + \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)))
+
+void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
+ struct ethtool_pause_stats *pause_stats)
+{
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
+ return;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
+ sz, MLX5_REG_PPCNT, 0, 0);
+
+ pause_stats->tx_pause_frames =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ a_pause_mac_ctrl_frames_transmitted);
+ pause_stats->rx_pause_frames =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ a_pause_mac_ctrl_frames_received);
+}
+
#define PPORT_2863_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
@@ -1539,6 +1572,8 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 562263d62141..162daaadb0d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -105,6 +105,9 @@ void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
+void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
+ struct ethtool_pause_stats *pause_stats);
+
/* Concrete NIC Stats */
struct mlx5e_sw_stats {
@@ -118,6 +121,8 @@ struct mlx5e_sw_stats {
u64 tx_tso_inner_bytes;
u64 tx_added_vlan_packets;
u64 tx_nop;
+ u64 tx_mpwqe_blks;
+ u64 tx_mpwqe_pkts;
u64 rx_lro_packets;
u64 rx_lro_bytes;
u64 rx_mcast_packets;
@@ -348,6 +353,8 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
+ u64 mpwqe_blks;
+ u64 mpwqe_pkts;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_encrypted_packets;
u64 tls_encrypted_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 1c93f92d9210..2e2fa0440032 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -57,7 +57,6 @@
#include "en/rep/neigh.h"
#include "en_tc.h"
#include "eswitch.h"
-#include "esw/chains.h"
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
@@ -66,20 +65,11 @@
#include "en/mod_hdr.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
+#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
+#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
-
-struct mlx5_nic_flow_attr {
- u32 action;
- u32 flow_tag;
- struct mlx5_modify_hdr *modify_hdr;
- u32 hairpin_tirn;
- u8 match_level;
- struct mlx5_flow_table *hairpin_ft;
- struct mlx5_fc *counter;
-};
-
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
enum {
@@ -153,11 +143,7 @@ struct mlx5e_tc_flow {
struct rcu_head rcu_head;
struct completion init_done;
int tunnel_id; /* the mapped tunnel id of this flow */
-
- union {
- struct mlx5_esw_flow_attr esw_attr[0];
- struct mlx5_nic_flow_attr nic_attr[0];
- };
+ struct mlx5_flow_attr *attr;
};
struct mlx5e_tc_flow_parse_attr {
@@ -170,7 +156,7 @@ struct mlx5e_tc_flow_parse_attr {
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
-#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
+#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
@@ -191,6 +177,16 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[MARK_TO_REG] = mark_to_reg_ct,
[LABELS_TO_REG] = labels_to_reg_ct,
[FTEID_TO_REG] = fteid_to_reg_ct,
+ /* For NIC rules we store the retore metadata directly
+ * into reg_b that is passed to SW since we don't
+ * jump between steering domains.
+ */
+ [NIC_CHAIN_TO_REG] = {
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
+ .moffset = 0,
+ .mlen = 2,
+ },
+ [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
};
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
@@ -244,6 +240,7 @@ mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
int
mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
enum mlx5e_tc_attr_to_reg type,
u32 data)
{
@@ -253,8 +250,7 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
char *modact;
int err;
- err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
- mod_hdr_acts);
+ err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
if (err)
return err;
@@ -275,6 +271,54 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
return 0;
}
+#define esw_offloads_mode(esw) (mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
+
+static struct mlx5_tc_ct_priv *
+get_ct_priv(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (esw_offloads_mode(esw)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->ct_priv;
+ }
+
+ return priv->fs.tc.ct;
+}
+
+struct mlx5_flow_handle *
+mlx5_tc_rule_insert(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (esw_offloads_mode(esw))
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
+ return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
+}
+
+void
+mlx5_tc_rule_delete(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (esw_offloads_mode(esw)) {
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+
+ return;
+ }
+
+ mlx5e_del_offloaded_nic_rule(priv, rule, attr);
+}
+
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
@@ -370,7 +414,7 @@ static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
MLX5E_TC_FLOW_FLAG_##flag)
-static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
+bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, ESWITCH);
}
@@ -415,10 +459,7 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
return PTR_ERR(mh);
modify_hdr = mlx5e_mod_hdr_get(mh);
- if (mlx5e_is_eswitch_flow(flow))
- flow->esw_attr->modify_hdr = modify_hdr;
- else
- flow->nic_attr->modify_hdr = modify_hdr;
+ flow->attr->modify_hdr = modify_hdr;
flow->mh = mh;
return 0;
@@ -858,9 +899,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
attach_flow:
if (hpe->hp->num_channels > 1) {
flow_flag_set(flow, HAIRPIN_RSS);
- flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
+ flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
} else {
- flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
+ flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn;
}
flow->hpe = hpe;
@@ -890,129 +931,212 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
flow->hpe = NULL;
}
-static int
-mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+struct mlx5_flow_handle *
+mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
- struct mlx5_nic_flow_attr *attr = flow->nic_attr;
- struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_flow_context *flow_context = &spec->flow_context;
+ struct mlx5_fs_chains *nic_chains = nic_chains(priv);
+ struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flags = FLOW_ACT_NO_APPEND,
};
- struct mlx5_fc *counter = NULL;
- int err, dest_ix = 0;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ int dest_ix = 0;
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
- flow_context->flow_tag = attr->flow_tag;
+ flow_context->flow_tag = nic_attr->flow_tag;
- if (flow_flag_test(flow, HAIRPIN)) {
- err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
- if (err)
- return err;
-
- if (flow_flag_test(flow, HAIRPIN_RSS)) {
- dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dest_ix].ft = attr->hairpin_ft;
- } else {
- dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dest[dest_ix].tir_num = attr->hairpin_tirn;
- }
+ if (attr->dest_ft) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = attr->dest_ft;
+ dest_ix++;
+ } else if (nic_attr->hairpin_ft) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = nic_attr->hairpin_ft;
+ dest_ix++;
+ } else if (nic_attr->hairpin_tirn) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
dest_ix++;
} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ if (attr->dest_chain) {
+ dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
+ attr->dest_chain, 1,
+ MLX5E_TC_FT_LEVEL);
+ if (IS_ERR(dest[dest_ix].ft))
+ return ERR_CAST(dest[dest_ix].ft);
+ } else {
+ dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ }
dest_ix++;
}
+ if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
+ dest_ix++;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ flow_act.modify_hdr = attr->modify_hdr;
+
+ mutex_lock(&tc->t_lock);
+ if (IS_ERR_OR_NULL(tc->t)) {
+ /* Create the root table here if doesn't exist yet */
+ tc->t =
+ mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
+
+ if (IS_ERR(tc->t)) {
+ mutex_unlock(&tc->t_lock);
+ netdev_err(priv->netdev,
+ "Failed to create tc offload table\n");
+ rule = ERR_CAST(priv->fs.tc.t);
+ goto err_ft_get;
+ }
+ }
+ mutex_unlock(&tc->t_lock);
+
+ if (attr->chain || attr->prio)
+ ft = mlx5_chains_get_table(nic_chains,
+ attr->chain, attr->prio,
+ MLX5E_TC_FT_LEVEL);
+ else
+ ft = attr->ft;
+
+ if (IS_ERR(ft)) {
+ rule = ERR_CAST(ft);
+ goto err_ft_get;
+ }
+
+ if (attr->outer_match_level != MLX5_MATCH_NONE)
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ rule = mlx5_add_flow_rules(ft, spec,
+ &flow_act, dest, dest_ix);
+ if (IS_ERR(rule))
+ goto err_rule;
+
+ return rule;
+
+err_rule:
+ if (attr->chain || attr->prio)
+ mlx5_chains_put_table(nic_chains,
+ attr->chain, attr->prio,
+ MLX5E_TC_FT_LEVEL);
+err_ft_get:
+ if (attr->dest_chain)
+ mlx5_chains_put_table(nic_chains,
+ attr->dest_chain, 1,
+ MLX5E_TC_FT_LEVEL);
+
+ return ERR_CAST(rule);
+}
+
+static int
+mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_fc *counter = NULL;
+ int err;
+
+ if (flow_flag_test(flow, HAIRPIN)) {
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
+ if (err)
+ return err;
+ }
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter))
return PTR_ERR(counter);
- dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter_id = mlx5_fc_id(counter);
- dest_ix++;
attr->counter = counter;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- flow_act.modify_hdr = attr->modify_hdr;
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
- mutex_lock(&priv->fs.tc.t_lock);
- if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
- struct mlx5_flow_table_attr ft_attr = {};
- int tc_grp_size, tc_tbl_size, tc_num_grps;
- u32 max_flow_counter;
-
- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
-
- tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
-
- tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
- BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
- tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
-
- ft_attr.prio = MLX5E_TC_PRIO;
- ft_attr.max_fte = tc_tbl_size;
- ft_attr.level = MLX5E_TC_FT_LEVEL;
- ft_attr.autogroup.max_num_groups = tc_num_grps;
- priv->fs.tc.t =
- mlx5_create_auto_grouped_flow_table(priv->fs.ns,
- &ft_attr);
- if (IS_ERR(priv->fs.tc.t)) {
- mutex_unlock(&priv->fs.tc.t_lock);
- NL_SET_ERR_MSG_MOD(extack,
- "Failed to create tc offload table");
- netdev_err(priv->netdev,
- "Failed to create tc offload table\n");
- return PTR_ERR(priv->fs.tc.t);
- }
- }
+ if (flow_flag_test(flow, CT))
+ flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
+ attr, &parse_attr->mod_hdr_acts);
+ else
+ flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
+ attr);
- if (attr->match_level != MLX5_MATCH_NONE)
- parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ return PTR_ERR_OR_ZERO(flow->rule[0]);
+}
- flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
- &flow_act, dest, dest_ix);
- mutex_unlock(&priv->fs.tc.t_lock);
+void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_fs_chains *nic_chains = nic_chains(priv);
- return PTR_ERR_OR_ZERO(flow->rule[0]);
+ mlx5_del_flow_rules(rule);
+
+ if (attr->chain || attr->prio)
+ mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
+ MLX5E_TC_FT_LEVEL);
+
+ if (attr->dest_chain)
+ mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
+ MLX5E_TC_FT_LEVEL);
}
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- struct mlx5_nic_flow_attr *attr = flow->nic_attr;
- struct mlx5_fc *counter = NULL;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
- counter = attr->counter;
- if (!IS_ERR_OR_NULL(flow->rule[0]))
- mlx5_del_flow_rules(flow->rule[0]);
- mlx5_fc_destroy(priv->mdev, counter);
+ flow_flag_clear(flow, OFFLOADED);
+
+ if (flow_flag_test(flow, CT))
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+ else if (!IS_ERR_OR_NULL(flow->rule[0]))
+ mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
+ /* Remove root table if no rules are left to avoid
+ * extra steering hops.
+ */
mutex_lock(&priv->fs.tc.t_lock);
- if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
- mlx5_destroy_flow_table(priv->fs.tc.t);
+ if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
+ !IS_ERR_OR_NULL(tc->t)) {
+ mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
priv->fs.tc.t = NULL;
}
mutex_unlock(&priv->fs.tc.t_lock);
+ kvfree(attr->parse_attr);
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
+ mlx5_fc_destroy(priv->mdev, attr->counter);
+
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
+
+ kfree(flow->attr);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -1035,7 +1159,7 @@ static struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
@@ -1043,7 +1167,8 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
- return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
+ return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
+ flow, spec, attr,
mod_hdr_acts);
}
@@ -1051,7 +1176,7 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (IS_ERR(rule))
return rule;
- if (attr->split_count) {
+ if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1])) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
@@ -1065,16 +1190,16 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
static void
mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
flow_flag_clear(flow, OFFLOADED);
if (flow_flag_test(flow, CT)) {
- mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
}
- if (attr->split_count)
+ if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
@@ -1085,18 +1210,24 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec)
{
- struct mlx5_esw_flow_attr slow_attr;
+ struct mlx5_flow_attr *slow_attr;
struct mlx5_flow_handle *rule;
- memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
- slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr.split_count = 0;
- slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
+ if (!slow_attr)
+ return ERR_PTR(-ENOMEM);
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
+ memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr->esw_attr->split_count = 0;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
flow_flag_set(flow, SLOW);
+ kfree(slow_attr);
+
return rule;
}
@@ -1104,14 +1235,21 @@ static void
mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow)
{
- struct mlx5_esw_flow_attr slow_attr;
+ struct mlx5_flow_attr *slow_attr;
- memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
- slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr.split_count = 0;
- slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
+ slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
+ if (!slow_attr) {
+ mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
+ return;
+ }
+
+ memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr->esw_attr->split_count = 0;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
+ kfree(slow_attr);
}
/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
@@ -1169,9 +1307,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct net_device *out_dev, *encap_dev = NULL;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
@@ -1180,7 +1319,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int err = 0;
int out_index;
- if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
+ if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) {
NL_SET_ERR_MSG_MOD(extack,
"E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP;
@@ -1191,14 +1330,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* FDB_FT_CHAIN which is outside tc range.
* See mlx5e_rep_setup_ft_cb().
*/
- max_chain = mlx5_esw_chains_get_chain_range(esw);
+ max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested chain is out of supported range");
return -EOPNOTSUPP;
}
- max_prio = mlx5_esw_chains_get_prio_range(esw);
+ max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
if (attr->prio > max_prio) {
NL_SET_ERR_MSG_MOD(extack,
"Requested priority is out of supported range");
@@ -1211,10 +1350,13 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return err;
}
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
+
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
int mirred_ifindex;
- if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
mirred_ifindex = parse_attr->mirred_ifindex[out_index];
@@ -1227,8 +1369,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
- attr->dests[out_index].rep = rpriv->rep;
- attr->dests[out_index].mdev = out_priv->mdev;
+ esw_attr->dests[out_index].rep = rpriv->rep;
+ esw_attr->dests[out_index].mdev = out_priv->mdev;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
@@ -1244,7 +1386,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(attr->counter_dev, true);
+ counter = mlx5_fc_create(esw_attr->counter_dev, true);
if (IS_ERR(counter))
return PTR_ERR(counter);
@@ -1270,7 +1412,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
{
- struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
+ struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
@@ -1285,7 +1427,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
int out_index;
mlx5e_put_flow_tunnel_id(flow);
@@ -1306,22 +1448,24 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_eswitch_del_vlan_action(esw, attr);
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
- if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
+ if (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
mlx5e_detach_encap(priv, flow, out_index);
kfree(attr->parse_attr->tun_info[out_index]);
}
kvfree(attr->parse_attr);
- mlx5_tc_ct_match_del(priv, &flow->esw_attr->ct_attr);
+ mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
- mlx5_fc_destroy(attr->counter_dev, attr->counter);
+ mlx5_fc_destroy(attr->esw_attr->counter_dev, attr->counter);
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
+
+ kfree(flow->attr);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -1331,6 +1475,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
int err;
@@ -1353,8 +1498,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
if (!mlx5e_is_offloaded_flow(flow))
continue;
- esw_attr = flow->esw_attr;
- spec = &esw_attr->parse_attr->spec;
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ spec = &attr->parse_attr->spec;
esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
@@ -1374,7 +1520,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
if (!all_flow_encaps_valid)
continue;
/* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
@@ -1394,7 +1540,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
int err;
@@ -1402,12 +1550,14 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow))
continue;
- spec = &flow->esw_attr->parse_attr->spec;
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ spec = &attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
/* mark the flow's encap dest as non-valid */
- flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+ esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1416,7 +1566,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
continue;
}
- mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
flow->rule[0] = rule;
/* was unset when fast path rule removed */
flow_flag_set(flow, OFFLOADED);
@@ -1429,10 +1579,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
- if (mlx5e_is_eswitch_flow(flow))
- return flow->esw_attr->counter;
- else
- return flow->nic_attr->counter;
+ return flow->attr->counter;
}
/* Takes reference to all flows attached to encap and adds the flows to
@@ -1798,11 +1945,11 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct flow_match_enc_opts enc_opts_match;
struct tunnel_match_enc_opts tun_enc_opts;
struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5_flow_attr *attr = flow->attr;
struct mlx5e_rep_priv *uplink_rpriv;
struct tunnel_match_key tunnel_key;
bool enc_opts_is_dont_care = true;
@@ -1866,7 +2013,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
} else {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
err = mlx5e_tc_match_to_reg_set(priv->mdev,
- mod_hdr_acts,
+ mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
TUNNEL_TO_REG, value);
if (err)
goto err_set;
@@ -1952,8 +2099,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
if (!mlx5e_is_eswitch_flow(flow))
return -EOPNOTSUPP;
- needs_mapping = !!flow->esw_attr->chain;
- sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
+ needs_mapping = !!flow->attr->chain;
+ sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f);
*match_inner = !needs_mapping;
if ((needs_mapping || sets_mapping) &&
@@ -1965,7 +2112,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (!flow->esw_attr->chain) {
+ if (!flow->attr->chain) {
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
match_level);
if (err) {
@@ -1980,7 +2127,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
* object
*/
if (!netif_is_bareudp(filter_dev))
- flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
}
if (!needs_mapping && !sets_mapping)
@@ -2483,12 +2630,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
}
}
- if (is_eswitch_flow) {
- flow->esw_attr->inner_match_level = inner_match_level;
- flow->esw_attr->outer_match_level = outer_match_level;
- } else {
- flow->nic_attr->match_level = non_tunnel_match_level;
- }
+ flow->attr->inner_match_level = inner_match_level;
+ flow->attr->outer_match_level = outer_match_level;
+
return err;
}
@@ -2614,6 +2758,7 @@ static struct mlx5_fields fields[] = {
OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
+ OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
@@ -3090,7 +3235,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
* we can't restore ct state
*/
if (!ct_clear && modify_tuple &&
- mlx5_tc_ct_add_no_trk_match(priv, spec)) {
+ mlx5_tc_ct_add_no_trk_match(spec)) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload tuple modify header with ct matches");
netdev_info(priv->netdev,
@@ -3121,12 +3266,13 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
bool ct_flow = false, ct_clear = false;
u32 actions;
+ ct_clear = flow->attr->ct_attr.ct_action &
+ TCA_CT_ACT_CLEAR;
+ ct_flow = flow_flag_test(flow, CT) && !ct_clear;
+ actions = flow->attr->action;
+
if (mlx5e_is_eswitch_flow(flow)) {
- actions = flow->esw_attr->action;
- ct_clear = flow->esw_attr->ct_attr.ct_action &
- TCA_CT_ACT_CLEAR;
- ct_flow = flow_flag_test(flow, CT) && !ct_clear;
- if (flow->esw_attr->split_count && ct_flow) {
+ if (flow->attr->esw_attr->split_count && ct_flow) {
/* All registers used by ct are cleared when using
* split rules.
*/
@@ -3134,8 +3280,6 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
"Can't offload mirroring with action ct");
return false;
}
- } else {
- actions = flow->nic_attr->action;
}
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -3233,15 +3377,67 @@ add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
extack);
}
+static int validate_goto_chain(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ const struct flow_action_entry *act,
+ u32 actions,
+ struct netlink_ext_ack *extack)
+{
+ bool is_esw = mlx5e_is_eswitch_flow(flow);
+ struct mlx5_flow_attr *attr = flow->attr;
+ bool ft_flow = mlx5e_is_ft_flow(flow);
+ u32 dest_chain = act->chain_index;
+ struct mlx5_fs_chains *chains;
+ struct mlx5_eswitch *esw;
+ u32 reformat_and_fwd;
+ u32 max_chain;
+
+ esw = priv->mdev->priv.eswitch;
+ chains = is_esw ? esw_chains(esw) : nic_chains(priv);
+ max_chain = mlx5_chains_get_chain_range(chains);
+ reformat_and_fwd = is_esw ?
+ MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
+ MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
+
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_chains_backwards_supported(chains) &&
+ dest_chain <= attr->chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto lower numbered chain isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dest_chain > max_chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested destination chain is out of supported range");
+ return -EOPNOTSUPP;
+ }
+
+ if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ !reformat_and_fwd) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto chain is not allowed if action has reformat or decap");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
- struct mlx5_nic_flow_attr *attr = flow->nic_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
struct pedit_headers_action hdrs[2] = {};
const struct flow_action_entry *act;
+ struct mlx5_nic_flow_attr *nic_attr;
u32 action = 0;
int err, i;
@@ -3252,7 +3448,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
FLOW_ACTION_HW_STATS_DELAYED_BIT))
return -EOPNOTSUPP;
- attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ nic_attr = attr->nic_attr;
+
+ nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -3273,8 +3471,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
break;
case FLOW_ACTION_VLAN_MANGLE:
err = add_vlan_rewrite_action(priv,
@@ -3319,10 +3516,26 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return -EINVAL;
}
- attr->flow_tag = mark;
+ nic_attr->flow_tag = mark;
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
break;
+ case FLOW_ACTION_GOTO:
+ err = validate_goto_chain(priv, flow, act, action,
+ extack);
+ if (err)
+ return err;
+
+ action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->dest_chain = act->chain_index;
+ break;
+ case FLOW_ACTION_CT:
+ err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
+ if (err)
+ return err;
+
+ flow_flag_set(flow, CT);
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
return -EOPNOTSUPP;
@@ -3345,6 +3558,18 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
}
attr->action = action;
+
+ if (attr->dest_chain) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
+ }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
@@ -3476,8 +3701,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
bool *encap_valid)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
struct encap_key key;
struct mlx5e_encap_entry *e;
@@ -3563,8 +3788,8 @@ attach_flow:
flow->encaps[out_index].index = out_index;
*encap_dev = e->out_dev;
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
- attr->dests[out_index].pkt_reformat = e->pkt_reformat;
- attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
+ attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
*encap_valid = true;
} else {
*encap_valid = false;
@@ -3591,14 +3816,14 @@ static int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_decap_entry *d;
struct mlx5e_decap_key key;
uintptr_t hash_key;
int err = 0;
- parse_attr = attr->parse_attr;
+ parse_attr = flow->attr->parse_attr;
if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
NL_SET_ERR_MSG_MOD(extack,
"encap header larger than max supported");
@@ -3740,7 +3965,7 @@ static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
}
static int add_vlan_push_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct net_device **out_dev,
u32 *action)
{
@@ -3753,7 +3978,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
};
int err;
- err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
if (err)
return err;
@@ -3766,7 +3991,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
}
static int add_vlan_pop_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
u32 *action)
{
struct flow_action_entry vlan_act = {
@@ -3777,7 +4002,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
- err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
if (err)
return err;
}
@@ -3838,59 +4063,20 @@ static bool is_duplicated_output_device(struct net_device *dev,
return false;
}
-static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
- struct mlx5e_tc_flow *flow,
- const struct flow_action_entry *act,
- u32 actions,
- struct netlink_ext_ack *extack)
-{
- u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- bool ft_flow = mlx5e_is_ft_flow(flow);
- u32 dest_chain = act->chain_index;
-
- if (ft_flow) {
- NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
- return -EOPNOTSUPP;
- }
-
- if (!mlx5_esw_chains_backwards_supported(esw) &&
- dest_chain <= attr->chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto lower numbered chain isn't supported");
- return -EOPNOTSUPP;
- }
- if (dest_chain > max_chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Requested destination chain is out of supported range");
- return -EOPNOTSUPP;
- }
-
- if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
- !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto chain is not allowed if action has reformat or decap");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static int verify_uplink_forwarding(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *out_dev,
struct netlink_ext_ack *extack)
{
+ struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_rep_priv *rep_priv;
/* Forwarding non encapsulated traffic between
* uplink ports is allowed only if
* termination_table_raw_traffic cap is set.
*
- * Input vport was stored esw_attr->in_rep.
+ * Input vport was stored attr->in_rep.
* In LAG case, *priv* is the private data of
* uplink which may be not the input vport.
*/
@@ -3925,13 +4111,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
{
struct pedit_headers_action hdrs[2] = {};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL;
+ struct mlx5_flow_attr *attr = flow->attr;
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
+ struct mlx5_esw_flow_attr *esw_attr;
bool encap = false, decap = false;
u32 action = attr->action;
int err, i, if_count = 0;
@@ -3944,12 +4131,25 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
FLOW_ACTION_HW_STATS_DELAYED_BIT))
return -EOPNOTSUPP;
+ esw_attr = attr->esw_attr;
+ parse_attr = attr->parse_attr;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
+ case FLOW_ACTION_TRAP:
+ if (!flow_offload_has_one_action(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "action trap is supported as a sole action only");
+ return -EOPNOTSUPP;
+ }
+ action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT);
+ attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ break;
case FLOW_ACTION_MPLS_PUSH:
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
reformat_l2_to_l3_tunnel) ||
@@ -3990,7 +4190,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- attr->split_count = attr->out_count;
+ esw_attr->split_count = esw_attr->out_count;
}
break;
case FLOW_ACTION_CSUM:
@@ -4027,27 +4227,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+ if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
netdev_warn(priv->netdev,
"can't support more than %d output ports, can't offload forwarding\n",
- attr->out_count);
+ esw_attr->out_count);
return -EOPNOTSUPP;
}
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (encap) {
- parse_attr->mirred_ifindex[attr->out_count] =
+ parse_attr->mirred_ifindex[esw_attr->out_count] =
out_dev->ifindex;
- parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
- if (!parse_attr->tun_info[attr->out_count])
+ parse_attr->tun_info[esw_attr->out_count] = dup_tun_info(info);
+ if (!parse_attr->tun_info[esw_attr->out_count])
return -ENOMEM;
encap = false;
- attr->dests[attr->out_count].flags |=
+ esw_attr->dests[esw_attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
- attr->out_count++;
+ esw_attr->out_count++;
/* attr->dests[].rep is resolved when we
* handle encap
*/
@@ -4096,9 +4296,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
- attr->dests[attr->out_count].rep = rpriv->rep;
- attr->dests[attr->out_count].mdev = out_priv->mdev;
- attr->out_count++;
+ esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
+ esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
+ esw_attr->out_count++;
} else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
@@ -4136,12 +4336,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
act, parse_attr, hdrs,
&action, extack);
} else {
- err = parse_tc_vlan_action(priv, act, attr, &action);
+ err = parse_tc_vlan_action(priv, act, esw_attr, &action);
}
if (err)
return err;
- attr->split_count = attr->out_count;
+ esw_attr->split_count = esw_attr->out_count;
break;
case FLOW_ACTION_VLAN_MANGLE:
err = add_vlan_rewrite_action(priv,
@@ -4151,14 +4351,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (err)
return err;
- attr->split_count = attr->out_count;
+ esw_attr->split_count = esw_attr->out_count;
break;
case FLOW_ACTION_TUNNEL_DECAP:
decap = true;
break;
case FLOW_ACTION_GOTO:
- err = mlx5_validate_goto_chain(esw, flow, act, action,
- extack);
+ err = validate_goto_chain(priv, flow, act, action,
+ extack);
if (err)
return err;
@@ -4166,7 +4366,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
- err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
+ err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
if (err)
return err;
@@ -4205,7 +4405,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
(action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
- attr->split_count = 0;
+ esw_attr->split_count = 0;
}
}
@@ -4245,7 +4445,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
@@ -4296,25 +4496,37 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
{
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
+ struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
- bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
+ bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
MLX5_DEVCOM_ESW_OFFLOADS);
if (!esw_paired)
return false;
- if ((mlx5_lag_is_sriov(attr->in_mdev) ||
- mlx5_lag_is_multipath(attr->in_mdev)) &&
+ if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
+ mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
(is_rep_ingress || act_is_encap))
return true;
return false;
}
+struct mlx5_flow_attr *
+mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
+{
+ u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
+ sizeof(struct mlx5_esw_flow_attr) :
+ sizeof(struct mlx5_nic_flow_attr);
+ struct mlx5_flow_attr *attr;
+
+ return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+}
+
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct flow_cls_offload *f, unsigned long flow_flags,
@@ -4322,19 +4534,26 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct mlx5e_tc_flow **__flow)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr;
struct mlx5e_tc_flow *flow;
- int out_index, err;
+ int err = -ENOMEM;
+ int out_index;
- flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
- if (!parse_attr || !flow) {
- err = -ENOMEM;
+ if (!parse_attr || !flow)
goto err_free;
- }
- flow->cookie = f->cookie;
flow->flags = flow_flags;
+ flow->cookie = f->cookie;
flow->priv = priv;
+
+ attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
+ if (!attr)
+ goto err_free;
+
+ flow->attr = attr;
+
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
INIT_LIST_HEAD(&flow->encaps[out_index].list);
INIT_LIST_HEAD(&flow->hairpin);
@@ -4354,7 +4573,17 @@ err_free:
}
static void
-mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
+mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct flow_cls_offload *f)
+{
+ attr->parse_attr = parse_attr;
+ attr->chain = f->common.chain_index;
+ attr->prio = f->common.prio;
+}
+
+static void
+mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct flow_cls_offload *f,
@@ -4362,10 +4591,9 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
struct mlx5_core_dev *in_mdev)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- esw_attr->parse_attr = parse_attr;
- esw_attr->chain = f->common.chain_index;
- esw_attr->prio = f->common.prio;
+ mlx5e_flow_attr_init(attr, parse_attr, f);
esw_attr->in_rep = in_rep;
esw_attr->in_mdev = in_mdev;
@@ -4399,7 +4627,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto out;
parse_attr->filter_dev = filter_dev;
- mlx5e_flow_esw_attr_init(flow->esw_attr,
+ mlx5e_flow_esw_attr_init(flow->attr,
priv, parse_attr,
f, in_rep, in_mdev);
@@ -4409,8 +4637,8 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto err_free;
/* actions validation depends on parsing the ct matches first */
- err = mlx5_tc_ct_match_add(priv, &parse_attr->spec, f,
- &flow->esw_attr->ct_attr, extack);
+ err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
+ &flow->attr->ct_attr, extack);
if (err)
goto err_free;
@@ -4430,6 +4658,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return flow;
err_free:
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return ERR_PTR(err);
@@ -4441,6 +4670,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
{
struct mlx5e_priv *priv = flow->priv, *peer_priv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
+ struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_rep_priv *peer_urpriv;
@@ -4460,15 +4690,15 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
* original flow and packets redirected from uplink use the
* peer mdev.
*/
- if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
+ if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
in_mdev = peer_priv->mdev;
else
in_mdev = priv->mdev;
- parse_attr = flow->esw_attr->parse_attr;
+ parse_attr = flow->attr->parse_attr;
peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
parse_attr->filter_dev,
- flow->esw_attr->in_rep, in_mdev);
+ attr->in_rep, in_mdev);
if (IS_ERR(peer_flow)) {
err = PTR_ERR(peer_flow);
goto out;
@@ -4532,9 +4762,12 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int attr_size, err;
- /* multi-chain not supported for NIC rules */
- if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
+ return -EOPNOTSUPP;
+ } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
return -EOPNOTSUPP;
+ }
flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
attr_size = sizeof(struct mlx5_nic_flow_attr);
@@ -4544,11 +4777,18 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
goto out;
parse_attr->filter_dev = filter_dev;
+ mlx5e_flow_attr_init(flow->attr, parse_attr, f);
+
err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
f, filter_dev);
if (err)
goto err_free;
+ err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
+ &flow->attr->ct_attr, extack);
+ if (err)
+ goto err_free;
+
err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
if (err)
goto err_free;
@@ -4558,14 +4798,13 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
goto err_free;
flow_flag_set(flow, OFFLOADED);
- kvfree(parse_attr);
*__flow = flow;
return 0;
err_free:
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
- kvfree(parse_attr);
out:
return err;
}
@@ -4940,9 +5179,27 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
}
+static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
+{
+ int tc_grp_size, tc_tbl_size;
+ u32 max_flow_counter;
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+
+ tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
+
+ tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
+ BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
+
+ return tc_tbl_size;
+}
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_chains_attr attr = {};
int err;
mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
@@ -4954,6 +5211,27 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
if (err)
return err;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
+ MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+ attr.max_restore_tag = MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ }
+ attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
+ attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
+ attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
+ attr.default_ft = priv->fs.vlan.ft.t;
+
+ tc->chains = mlx5_chains_create(dev, &attr);
+ if (IS_ERR(tc->chains)) {
+ err = PTR_ERR(tc->chains);
+ goto err_chains;
+ }
+
+ tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
+ MLX5_FLOW_NAMESPACE_KERNEL);
+ if (IS_ERR(tc->ct))
+ goto err_ct;
+
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
&tc->netdevice_nb,
@@ -4961,8 +5239,17 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
if (err) {
tc->netdevice_nb.notifier_call = NULL;
mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
+ goto err_reg;
}
+ return 0;
+
+err_reg:
+ mlx5_tc_ct_clean(tc->ct);
+err_ct:
+ mlx5_chains_destroy(tc->chains);
+err_chains:
+ rhashtable_destroy(&tc->ht);
return err;
}
@@ -4987,28 +5274,38 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
mutex_destroy(&tc->hairpin_tbl_lock);
- rhashtable_destroy(&tc->ht);
+ rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) {
- mlx5_destroy_flow_table(tc->t);
+ mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
tc->t = NULL;
}
mutex_destroy(&tc->t_lock);
+
+ mlx5_tc_ct_clean(tc->ct);
+ mlx5_chains_destroy(tc->chains);
}
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *priv;
+ struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
- int err;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ int err = 0;
uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
- priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+ rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+ priv = netdev_priv(rpriv->netdev);
+ esw = priv->mdev->priv.eswitch;
- err = mlx5_tc_ct_init(uplink_priv);
- if (err)
+ uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
+ esw_chains(esw),
+ &esw->offloads.mod_hdr,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(uplink_priv->ct_priv))
goto err_ct;
mapping = mapping_create(sizeof(struct tunnel_match_key),
@@ -5037,7 +5334,7 @@ err_ht_init:
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
err_tun_mapping:
- mlx5_tc_ct_clean(uplink_priv);
+ mlx5_tc_ct_clean(uplink_priv->ct_priv);
err_ct:
netdev_warn(priv->netdev,
"Failed to initialize tc (eswitch), err: %d", err);
@@ -5051,10 +5348,11 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
+
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
- mlx5_tc_ct_clean(uplink_priv);
+ mlx5_tc_ct_clean(uplink_priv->ct_priv);
}
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
@@ -5119,3 +5417,44 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return -EOPNOTSUPP;
}
}
+
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 chain = 0, chain_tag, reg_b, zone_restore_id;
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct tc_skb_ext *tc_skb_ext;
+ int err;
+
+ reg_b = be32_to_cpu(cqe->ft_metadata);
+
+ chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+
+ err = mlx5_get_chain_for_tag(nic_chains(priv), chain_tag, &chain);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find chain for chain tag: %d, err: %d\n",
+ chain_tag, err);
+ return false;
+ }
+
+ if (chain) {
+ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (WARN_ON(!tc_skb_ext))
+ return false;
+
+ tc_skb_ext->chain = chain;
+
+ zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
+ ZONE_RESTORE_MAX;
+
+ if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
+ zone_restore_id))
+ return false;
+ }
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 437f680728fd..3b979008143d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -35,17 +35,57 @@
#include <net/pkt_cls.h>
#include "en.h"
+#include "eswitch.h"
+#include "en/tc_ct.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
#ifdef CONFIG_MLX5_ESWITCH
+#define NIC_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
+ sizeof(struct mlx5_nic_flow_attr))
+#define ESW_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
+ sizeof(struct mlx5_esw_flow_attr))
+#define ns_to_attr_sz(ns) (((ns) == MLX5_FLOW_NAMESPACE_FDB) ?\
+ ESW_FLOW_ATTR_SZ :\
+ NIC_FLOW_ATTR_SZ)
+
+
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
struct net_device *tun_dev;
};
+struct mlx5_nic_flow_attr {
+ u32 flow_tag;
+ u32 hairpin_tirn;
+ struct mlx5_flow_table *hairpin_ft;
+};
+
+struct mlx5_flow_attr {
+ u32 action;
+ struct mlx5_fc *counter;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_ct_attr ct_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ u32 chain;
+ u16 prio;
+ u32 dest_chain;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_table *dest_ft;
+ u8 inner_match_level;
+ u8 outer_match_level;
+ u32 flags;
+ union {
+ struct mlx5_esw_flow_attr esw_attr[0];
+ struct mlx5_nic_flow_attr nic_attr[0];
+ };
+};
+
+#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
+#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
+
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
struct tunnel_match_key {
@@ -90,6 +130,7 @@ enum {
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
+bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
@@ -133,6 +174,8 @@ enum mlx5e_tc_attr_to_reg {
MARK_TO_REG,
LABELS_TO_REG,
FTEID_TO_REG,
+ NIC_CHAIN_TO_REG,
+ NIC_ZONE_RESTORE_TO_REG,
};
struct mlx5e_tc_attr_to_reg_mapping {
@@ -150,6 +193,7 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
enum mlx5e_tc_attr_to_reg type,
u32 data);
@@ -181,14 +225,42 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv);
+struct mlx5_flow_handle *
+mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
+struct mlx5_flow_handle *
+mlx5_tc_rule_insert(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+void
+mlx5_tc_rule_delete(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
+
#endif /* CONFIG_MLX5_CLS_ACT */
+struct mlx5_flow_attr *mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type);
+
+struct mlx5_flow_handle *
+mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
@@ -203,4 +275,29 @@ mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
#endif
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
+{
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 chain, reg_b;
+
+ reg_b = be32_to_cpu(cqe->ft_metadata);
+
+ chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ if (chain)
+ return true;
+#endif
+
+ return false;
+}
+
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
+#else /* CONFIG_MLX5_CLS_ACT */
+static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
+{ return false; }
+static inline bool
+mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
+{ return true; }
+#endif
+
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index da596de3abba..82b4419af9d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -144,9 +144,29 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
+/* RM 2311217: no L4 inner checksum for IPsec tunnel type packet */
+static void
+ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (skb->encapsulation) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ sq->stats->csum_partial_inner++;
+ } else {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ sq->stats->csum_partial++;
+ }
+}
+
static inline void
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
{
+ if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
+ ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
+ return;
+ }
+
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -232,131 +252,188 @@ dma_unmap_wqe_err:
return -ENOMEM;
}
+struct mlx5e_tx_attr {
+ u32 num_bytes;
+ u16 headlen;
+ u16 ihs;
+ __be16 mss;
+ u16 insz;
+ u8 opcode;
+};
+
+struct mlx5e_tx_wqe_attr {
+ u16 ds_cnt;
+ u16 ds_cnt_inl;
+ u16 ds_cnt_ids;
+ u8 num_wqebbs;
+};
+
+static u8
+mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel)
+{
+ u8 mode;
+
+#ifdef CONFIG_MLX5_EN_TLS
+ if (accel && accel->tls.tls_tisn)
+ return MLX5_INLINE_MODE_TCP_UDP;
+#endif
+
+ mode = sq->min_inline_mode;
+
+ if (skb_vlan_tag_present(skb) &&
+ test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
+ mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
+
+ return mode;
+}
+
+static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel,
+ struct mlx5e_tx_attr *attr)
+{
+ struct mlx5e_sq_stats *stats = sq->stats;
+
+ if (skb_is_gso(skb)) {
+ u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
+
+ *attr = (struct mlx5e_tx_attr) {
+ .opcode = MLX5_OPCODE_LSO,
+ .mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
+ .ihs = ihs,
+ .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
+ .headlen = skb_headlen(skb) - ihs,
+ };
+
+ stats->packets += skb_shinfo(skb)->gso_segs;
+ } else {
+ u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel);
+ u16 ihs = mlx5e_calc_min_inline(mode, skb);
+
+ *attr = (struct mlx5e_tx_attr) {
+ .opcode = MLX5_OPCODE_SEND,
+ .mss = cpu_to_be16(0),
+ .ihs = ihs,
+ .num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN),
+ .headlen = skb_headlen(skb) - ihs,
+ };
+
+ stats->packets++;
+ }
+
+ attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
+ stats->bytes += attr->num_bytes;
+}
+
+static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr,
+ struct mlx5e_tx_wqe_attr *wqe_attr)
+{
+ u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT;
+ u16 ds_cnt_inl = 0;
+ u16 ds_cnt_ids = 0;
+
+ if (attr->insz)
+ ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
+ MLX5_SEND_WQE_DS);
+
+ ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids;
+ if (attr->ihs) {
+ u16 inl = attr->ihs - INL_HDR_START_SZ;
+
+ if (skb_vlan_tag_present(skb))
+ inl += VLAN_HLEN;
+
+ ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
+ ds_cnt += ds_cnt_inl;
+ }
+
+ *wqe_attr = (struct mlx5e_tx_wqe_attr) {
+ .ds_cnt = ds_cnt,
+ .ds_cnt_inl = ds_cnt_inl,
+ .ds_cnt_ids = ds_cnt_ids,
+ .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
+ };
+}
+
+static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
+{
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+
+static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
+{
+ if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) {
+ netif_tx_stop_queue(sq->txq);
+ sq->stats->stopped++;
+ }
+}
+
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
+ const struct mlx5e_tx_attr *attr,
+ const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
bool send_doorbell;
- wi->num_bytes = num_bytes;
- wi->num_dma = num_dma;
- wi->num_wqebbs = num_wqebbs;
- wi->skb = skb;
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .skb = skb,
+ .num_bytes = attr->num_bytes,
+ .num_dma = num_dma,
+ .num_wqebbs = wqe_attr->num_wqebbs,
+ .num_fifo_pkts = 0,
+ };
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ mlx5e_tx_skb_update_hwts_flags(skb);
sq->pc += wi->num_wqebbs;
- if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) {
- netif_tx_stop_queue(sq->txq);
- sq->stats->stopped++;
- }
- send_doorbell = __netdev_tx_sent_queue(sq->txq, num_bytes,
- xmit_more);
+ mlx5e_tx_check_stop(sq);
+
+ send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
if (send_doorbell)
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
-void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
+static void
+mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_eth_seg *eseg;
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 headlen, ihs, contig_wqebbs_room;
- u16 ds_cnt, ds_cnt_inl = 0;
- u8 num_wqebbs, opcode;
- u32 num_bytes;
int num_dma;
- __be16 mss;
-
- /* Calc ihs and ds cnt, no writes to wqe yet */
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- if (skb_is_gso(skb)) {
- opcode = MLX5_OPCODE_LSO;
- mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
- ihs = mlx5e_tx_get_gso_ihs(sq, skb);
- num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
- stats->packets += skb_shinfo(skb)->gso_segs;
- } else {
- u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb);
- opcode = MLX5_OPCODE_SEND;
- mss = 0;
- ihs = mlx5e_calc_min_inline(mode, skb);
- num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- stats->packets++;
- }
-
- stats->bytes += num_bytes;
stats->xmit_more += xmit_more;
- headlen = skb->len - ihs - skb->data_len;
- ds_cnt += !!headlen;
- ds_cnt += skb_shinfo(skb)->nr_frags;
-
- if (ihs) {
- ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN;
-
- ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
- ds_cnt += ds_cnt_inl;
- }
-
- num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room < num_wqebbs)) {
-#ifdef CONFIG_MLX5_EN_IPSEC
- struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
-#endif
-#ifdef CONFIG_MLX5_EN_TLS
- struct mlx5_wqe_ctrl_seg cur_ctrl = wqe->ctrl;
-#endif
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- wqe = MLX5E_TX_FETCH_WQE(sq, pi);
-#ifdef CONFIG_MLX5_EN_IPSEC
- wqe->eth = cur_eth;
-#endif
-#ifdef CONFIG_MLX5_EN_TLS
- wqe->ctrl = cur_ctrl;
-#endif
- }
-
/* fill wqe */
wi = &sq->db.wqe_info[pi];
cseg = &wqe->ctrl;
eseg = &wqe->eth;
dseg = wqe->data;
-#if IS_ENABLED(CONFIG_GENEVE)
- if (skb->encapsulation)
- mlx5e_tx_tunnel_accel(skb, eseg);
-#endif
- mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+ eseg->mss = attr->mss;
- eseg->mss = mss;
-
- if (ihs) {
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
+ if (attr->ihs) {
if (skb_vlan_tag_present(skb)) {
- ihs -= VLAN_HLEN;
- mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs);
+ eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN);
+ mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs);
stats->added_vlan_packets++;
} else {
- memcpy(eseg->inline_hdr.start, skb->data, ihs);
+ eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs);
+ memcpy(eseg->inline_hdr.start, skb->data, attr->ihs);
}
- dseg += ds_cnt_inl;
+ dseg += wqe_attr->ds_cnt_inl;
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
@@ -365,12 +442,13 @@ void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->added_vlan_packets++;
}
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
+ dseg += wqe_attr->ds_cnt_ids;
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
+ attr->headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg, xmit_more);
+ mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
return;
@@ -379,10 +457,173 @@ err_drop:
dev_kfree_skb_any(skb);
}
+static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
+{
+ return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
+ !attr->insz;
+}
+
+static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+
+ /* Assumes the session is already running and has at least one packet. */
+ return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
+}
+
+static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi;
+
+ pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ prefetchw(wqe->data);
+
+ *session = (struct mlx5e_tx_mpwqe) {
+ .wqe = wqe,
+ .bytes_count = 0,
+ .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .pkt_count = 0,
+ .inline_on = 0,
+ };
+
+ memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
+
+ sq->stats->mpwqe_blks++;
+}
+
+static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq)
+{
+ return sq->mpwqe.wqe;
+}
+
+static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+ struct mlx5_wqe_data_seg *dseg;
+
+ dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
+
+ session->pkt_count++;
+ session->bytes_count += txd->len;
+
+ dseg->addr = cpu_to_be64(txd->dma_addr);
+ dseg->byte_count = cpu_to_be32(txd->len);
+ dseg->lkey = sq->mkey_be;
+ session->ds_count++;
+
+ sq->stats->mpwqe_pkts++;
+}
+
+static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+ u8 ds_count = session->ds_count;
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5e_tx_wqe_info *wi;
+ u16 pi;
+
+ cseg = &session->wqe->ctrl;
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
+
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wi = &sq->db.wqe_info[pi];
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .skb = NULL,
+ .num_bytes = session->bytes_count,
+ .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS),
+ .num_dma = session->pkt_count,
+ .num_fifo_pkts = session->pkt_count,
+ };
+
+ sq->pc += wi->num_wqebbs;
+
+ session->wqe = NULL;
+
+ mlx5e_tx_check_stop(sq);
+
+ return cseg;
+}
+
+static void
+mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
+{
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5e_xmit_data txd;
+
+ if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
+ mlx5e_tx_mpwqe_session_start(sq, eseg);
+ } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
+ mlx5e_tx_mpwqe_session_complete(sq);
+ mlx5e_tx_mpwqe_session_start(sq, eseg);
+ }
+
+ sq->stats->xmit_more += xmit_more;
+
+ txd.data = skb->data;
+ txd.len = skb->len;
+
+ txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
+ goto err_unmap;
+ mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
+
+ mlx5e_skb_fifo_push(sq, skb);
+
+ mlx5e_tx_mpwqe_add_dseg(sq, &txd);
+
+ mlx5e_tx_skb_update_hwts_flags(skb);
+
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
+ /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
+ cseg = mlx5e_tx_mpwqe_session_complete(sq);
+
+ if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more))
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) {
+ /* Might stop the queue, but we were asked to ring the doorbell anyway. */
+ cseg = mlx5e_tx_mpwqe_session_complete(sq);
+
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ }
+
+ return;
+
+err_unmap:
+ mlx5e_dma_unmap_wqe_err(sq, 1);
+ sq->stats->dropped++;
+ dev_kfree_skb_any(skb);
+}
+
+void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
+{
+ /* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */
+ if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq)))
+ mlx5e_tx_mpwqe_session_complete(sq);
+}
+
+static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+{
+ if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
+ return false;
+
+ mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+
+ return true;
+}
+
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_accel_tx_state accel = {};
+ struct mlx5e_tx_wqe_attr wqe_attr;
+ struct mlx5e_tx_attr attr;
struct mlx5e_tx_wqe *wqe;
struct mlx5e_txqsq *sq;
u16 pi;
@@ -391,21 +632,92 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May send SKBs and WQEs. */
if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
- goto out;
+ return NETDEV_TX_OK;
- pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
+
+ if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) {
+ if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
+ struct mlx5_wqe_eth_seg eseg = {};
+
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &eseg)))
+ return NETDEV_TX_OK;
+
+ mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
+ return NETDEV_TX_OK;
+ }
+
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+ }
+
+ mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
/* May update the WQE, but may not post other WQEs. */
- if (unlikely(!mlx5e_accel_tx_finish(priv, sq, skb, wqe, &accel)))
- goto out;
+ mlx5e_accel_tx_finish(sq, wqe, &accel,
+ (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &wqe->eth)))
+ return NETDEV_TX_OK;
- mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
+ mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
-out:
return NETDEV_TX_OK;
}
+void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more)
+{
+ struct mlx5e_tx_wqe_attr wqe_attr;
+ struct mlx5e_tx_attr attr;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi;
+
+ mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, &wqe->eth);
+ mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
+}
+
+static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc)
+{
+ int i;
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+}
+
+static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, int napi_budget)
+{
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct skb_shared_hwtstamps hwts = {};
+ u64 ts = get_cqe_ts(cqe);
+
+ hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts);
+ skb_tstamp_tx(skb, &hwts);
+ }
+
+ napi_consume_skb(skb, napi_budget);
+}
+
+static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
+ struct mlx5_cqe64 *cqe, int napi_budget)
+{
+ int i;
+
+ for (i = 0; i < wi->num_fifo_pkts; i++) {
+ struct sk_buff *skb = mlx5e_skb_fifo_pop(sq);
+
+ mlx5e_consume_skb(sq, skb, cqe, napi_budget);
+ }
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq_stats *stats;
@@ -451,42 +763,33 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
- struct sk_buff *skb;
- int j;
-
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
- skb = wi->skb;
- if (unlikely(!skb)) {
- mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
- sqcc += wi->num_wqebbs;
- continue;
- }
+ sqcc += wi->num_wqebbs;
- if (unlikely(skb_shinfo(skb)->tx_flags &
- SKBTX_HW_TSTAMP)) {
- struct skb_shared_hwtstamps hwts = {};
+ if (likely(wi->skb)) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget);
- hwts.hwtstamp =
- mlx5_timecounter_cyc2time(sq->clock,
- get_cqe_ts(cqe));
- skb_tstamp_tx(skb, &hwts);
+ npkts++;
+ nbytes += wi->num_bytes;
+ continue;
}
- for (j = 0; j < wi->num_dma; j++) {
- struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, dma_fifo_cc++);
+ if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi,
+ &dma_fifo_cc)))
+ continue;
- mlx5e_tx_dma_unmap(sq->pdev, dma);
- }
+ if (wi->num_fifo_pkts) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget);
- npkts++;
- nbytes += wi->num_bytes;
- sqcc += wi->num_wqebbs;
- napi_consume_skb(skb, napi_budget);
+ npkts += wi->num_fifo_pkts;
+ nbytes += wi->num_bytes;
+ }
} while (!last_wqe);
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
@@ -525,13 +828,19 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
+static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi)
+{
+ int i;
+
+ for (i = 0; i < wi->num_fifo_pkts; i++)
+ dev_kfree_skb_any(mlx5e_skb_fifo_pop(sq));
+}
+
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
u32 dma_fifo_cc, nbytes = 0;
u16 ci, sqcc, npkts = 0;
- struct sk_buff *skb;
- int i;
sqcc = sq->cc;
dma_fifo_cc = sq->dma_fifo_cc;
@@ -539,25 +848,28 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
while (sqcc != sq->pc) {
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
- skb = wi->skb;
- if (!skb) {
- mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
- sqcc += wi->num_wqebbs;
+ sqcc += wi->num_wqebbs;
+
+ if (likely(wi->skb)) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ dev_kfree_skb_any(wi->skb);
+
+ npkts++;
+ nbytes += wi->num_bytes;
continue;
}
- for (i = 0; i < wi->num_dma; i++) {
- struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, dma_fifo_cc++);
+ if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc)))
+ continue;
- mlx5e_tx_dma_unmap(sq->pdev, dma);
- }
+ if (wi->num_fifo_pkts) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ mlx5e_tx_wi_kfree_fifo_skbs(sq, wi);
- dev_kfree_skb_any(skb);
- npkts++;
- nbytes += wi->num_bytes;
- sqcc += wi->num_wqebbs;
+ npkts += wi->num_fifo_pkts;
+ nbytes += wi->num_bytes;
+ }
}
sq->dma_fifo_cc = dma_fifo_cc;
@@ -576,9 +888,34 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
}
+static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb,
+ const struct mlx5e_tx_attr *attr,
+ struct mlx5e_tx_wqe_attr *wqe_attr)
+{
+ u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS;
+ u16 ds_cnt_inl = 0;
+
+ ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags;
+
+ if (attr->ihs) {
+ u16 inl = attr->ihs - INL_HDR_START_SZ;
+
+ ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
+ ds_cnt += ds_cnt_inl;
+ }
+
+ *wqe_attr = (struct mlx5e_tx_wqe_attr) {
+ .ds_cnt = ds_cnt,
+ .ds_cnt_inl = ds_cnt_inl,
+ .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
+ };
+}
+
void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
{
+ struct mlx5e_tx_wqe_attr wqe_attr;
+ struct mlx5e_tx_attr attr;
struct mlx5i_tx_wqe *wqe;
struct mlx5_wqe_datagram_seg *datagram;
@@ -588,47 +925,17 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 ds_cnt, ds_cnt_inl = 0;
- u8 num_wqebbs, opcode;
- u16 headlen, ihs, pi;
- u32 num_bytes;
int num_dma;
- __be16 mss;
+ u16 pi;
- /* Calc ihs and ds cnt, no writes to wqe yet */
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- if (skb_is_gso(skb)) {
- opcode = MLX5_OPCODE_LSO;
- mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
- ihs = mlx5e_tx_get_gso_ihs(sq, skb);
- num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
- stats->packets += skb_shinfo(skb)->gso_segs;
- } else {
- u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb);
+ mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
- opcode = MLX5_OPCODE_SEND;
- mss = 0;
- ihs = mlx5e_calc_min_inline(mode, skb);
- num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- stats->packets++;
- }
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
+ wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
- stats->bytes += num_bytes;
stats->xmit_more += xmit_more;
- headlen = skb->len - ihs - skb->data_len;
- ds_cnt += !!headlen;
- ds_cnt += skb_shinfo(skb)->nr_frags;
-
- if (ihs) {
- ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
- ds_cnt += ds_cnt_inl;
- }
-
- num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
- wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
-
/* fill wqe */
wi = &sq->db.wqe_info[pi];
cseg = &wqe->ctrl;
@@ -640,20 +947,20 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
- eseg->mss = mss;
+ eseg->mss = attr.mss;
- if (ihs) {
- memcpy(eseg->inline_hdr.start, skb->data, ihs);
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
- dseg += ds_cnt_inl;
+ if (attr.ihs) {
+ memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
+ eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
+ dseg += wqe_attr.ds_cnt_inl;
}
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
+ attr.headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg, xmit_more);
+ mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 22a19d391e17..8ebfe782f95e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -828,8 +828,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&eq->tasklet_ctx.list);
INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
- (unsigned long)&eq->tasklet_ctx);
+ tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
param = (struct mlx5_eq_param) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
index 07b2acd7e6b3..c3faae67e4d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -148,6 +148,11 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
esw_acl_egress_vlan_grp_destroy(vport);
}
+static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return mlx5_eswitch_is_vf_vport(esw, vport_num);
+}
+
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int table_size = 0;
@@ -157,6 +162,9 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
!MLX5_CAP_GEN(esw->dev, prio_tag_required))
return 0;
+ if (!esw_acl_egress_needed(esw, vport->vport))
+ return 0;
+
esw_acl_egress_ofld_rules_destroy(vport);
if (mlx5_esw_acl_egress_fwd2vport_supported(esw))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
deleted file mode 100644
index d5bf908dfecd..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
+++ /dev/null
@@ -1,944 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-// Copyright (c) 2020 Mellanox Technologies.
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/mlx5_ifc.h>
-#include <linux/mlx5/fs.h>
-
-#include "esw/chains.h"
-#include "en/mapping.h"
-#include "mlx5_core.h"
-#include "fs_core.h"
-#include "eswitch.h"
-#include "en.h"
-#include "en_tc.h"
-
-#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv)
-#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock)
-#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht)
-#define esw_chains_mapping(esw) (esw_chains_priv(esw)->chains_mapping)
-#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht)
-#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left)
-#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb)
-#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb)
-#define fdb_ignore_flow_level_supported(esw) \
- (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
-#define fdb_modify_header_fwd_to_table_supported(esw) \
- (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
-
-/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
- * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
- * for each flow table pool. We can allocate up to 16M of each pool,
- * and we keep track of how much we used via get_next_avail_sz_from_pool.
- * Firmware doesn't report any of this for now.
- * ESW_POOL is expected to be sorted from large to small and match firmware
- * pools.
- */
-#define ESW_SIZE (16 * 1024 * 1024)
-static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
- 1 * 1024 * 1024,
- 64 * 1024,
- 128 };
-#define ESW_FT_TBL_SZ (64 * 1024)
-
-struct mlx5_esw_chains_priv {
- struct rhashtable chains_ht;
- struct rhashtable prios_ht;
- /* Protects above chains_ht and prios_ht */
- struct mutex lock;
-
- struct mlx5_flow_table *tc_end_fdb;
- struct mapping_ctx *chains_mapping;
-
- int fdb_left[ARRAY_SIZE(ESW_POOLS)];
-};
-
-struct fdb_chain {
- struct rhash_head node;
-
- u32 chain;
-
- int ref;
- int id;
-
- struct mlx5_eswitch *esw;
- struct list_head prios_list;
- struct mlx5_flow_handle *restore_rule;
- struct mlx5_modify_hdr *miss_modify_hdr;
-};
-
-struct fdb_prio_key {
- u32 chain;
- u32 prio;
- u32 level;
-};
-
-struct fdb_prio {
- struct rhash_head node;
- struct list_head list;
-
- struct fdb_prio_key key;
-
- int ref;
-
- struct fdb_chain *fdb_chain;
- struct mlx5_flow_table *fdb;
- struct mlx5_flow_table *next_fdb;
- struct mlx5_flow_group *miss_group;
- struct mlx5_flow_handle *miss_rule;
-};
-
-static const struct rhashtable_params chain_params = {
- .head_offset = offsetof(struct fdb_chain, node),
- .key_offset = offsetof(struct fdb_chain, chain),
- .key_len = sizeof_field(struct fdb_chain, chain),
- .automatic_shrinking = true,
-};
-
-static const struct rhashtable_params prio_params = {
- .head_offset = offsetof(struct fdb_prio, node),
- .key_offset = offsetof(struct fdb_prio, key),
- .key_len = sizeof_field(struct fdb_prio, key),
- .automatic_shrinking = true,
-};
-
-bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
-{
- return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
-}
-
-bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw)
-{
- return mlx5_esw_chains_prios_supported(esw) &&
- fdb_ignore_flow_level_supported(esw);
-}
-
-u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
-{
- if (!mlx5_esw_chains_prios_supported(esw))
- return 1;
-
- if (fdb_ignore_flow_level_supported(esw))
- return UINT_MAX - 1;
-
- return FDB_TC_MAX_CHAIN;
-}
-
-u32 mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw)
-{
- return mlx5_esw_chains_get_chain_range(esw) + 1;
-}
-
-u32 mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw)
-{
- if (!mlx5_esw_chains_prios_supported(esw))
- return 1;
-
- if (fdb_ignore_flow_level_supported(esw))
- return UINT_MAX;
-
- return FDB_TC_MAX_PRIO;
-}
-
-static unsigned int mlx5_esw_chains_get_level_range(struct mlx5_eswitch *esw)
-{
- if (fdb_ignore_flow_level_supported(esw))
- return UINT_MAX;
-
- return FDB_TC_LEVELS_PER_PRIO;
-}
-
-#define POOL_NEXT_SIZE 0
-static int
-mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw,
- int desired_size)
-{
- int i, found_i = -1;
-
- for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
- if (fdb_pool_left(esw)[i] && ESW_POOLS[i] > desired_size) {
- found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
- break;
- }
- }
-
- if (found_i != -1) {
- --fdb_pool_left(esw)[found_i];
- return ESW_POOLS[found_i];
- }
-
- return 0;
-}
-
-static void
-mlx5_esw_chains_put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
-{
- int i;
-
- for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
- if (sz == ESW_POOLS[i]) {
- ++fdb_pool_left(esw)[i];
- return;
- }
- }
-
- WARN_ONCE(1, "Couldn't find size %d in fdb size pool", sz);
-}
-
-static void
-mlx5_esw_chains_init_sz_pool(struct mlx5_eswitch *esw)
-{
- u32 fdb_max;
- int i;
-
- fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, log_max_ft_size);
-
- for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--)
- fdb_pool_left(esw)[i] =
- ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
-}
-
-static struct mlx5_flow_table *
-mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
- u32 chain, u32 prio, u32 level)
-{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_table *fdb;
- int sz;
-
- if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
- ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
- MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
-
- sz = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
- mlx5_esw_chains_get_avail_sz_from_pool(esw, ESW_FT_TBL_SZ) :
- mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
- if (!sz)
- return ERR_PTR(-ENOSPC);
- ft_attr.max_fte = sz;
-
- /* We use tc_slow_fdb(esw) as the table's next_ft till
- * ignore_flow_level is allowed on FT creation and not just for FTEs.
- * Instead caller should add an explicit miss rule if needed.
- */
- ft_attr.next_ft = tc_slow_fdb(esw);
-
- /* The root table(chain 0, prio 1, level 0) is required to be
- * connected to the previous prio (FDB_BYPASS_PATH if exists).
- * We always create it, as a managed table, in order to align with
- * fs_core logic.
- */
- if (!fdb_ignore_flow_level_supported(esw) ||
- (chain == 0 && prio == 1 && level == 0)) {
- ft_attr.level = level;
- ft_attr.prio = prio - 1;
- ns = mlx5_get_fdb_sub_ns(esw->dev, chain);
- } else {
- ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
- ft_attr.prio = FDB_TC_OFFLOAD;
- /* Firmware doesn't allow us to create another level 0 table,
- * so we create all unmanaged tables as level 1.
- *
- * To connect them, we use explicit miss rules with
- * ignore_flow_level. Caller is responsible to create
- * these rules (if needed).
- */
- ft_attr.level = 1;
- ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB);
- }
-
- ft_attr.autogroup.num_reserved_entries = 2;
- ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
- fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
- if (IS_ERR(fdb)) {
- esw_warn(esw->dev,
- "Failed to create FDB table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
- (int)PTR_ERR(fdb), chain, prio, level, sz);
- mlx5_esw_chains_put_sz_to_pool(esw, sz);
- return fdb;
- }
-
- return fdb;
-}
-
-static void
-mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *fdb)
-{
- mlx5_esw_chains_put_sz_to_pool(esw, fdb->max_fte);
- mlx5_destroy_flow_table(fdb);
-}
-
-static int
-create_fdb_chain_restore(struct fdb_chain *fdb_chain)
-{
- char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
- struct mlx5_eswitch *esw = fdb_chain->esw;
- struct mlx5_modify_hdr *mod_hdr;
- u32 index;
- int err;
-
- if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw) ||
- !mlx5_esw_chains_prios_supported(esw))
- return 0;
-
- err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index);
- if (err)
- return err;
- if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
- /* we got the special default flow tag id, so we won't know
- * if we actually marked the packet with the restore rule
- * we create.
- *
- * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
- */
- err = mapping_add(esw_chains_mapping(esw),
- &fdb_chain->chain, &index);
- mapping_remove(esw_chains_mapping(esw),
- MLX5_FS_DEFAULT_FLOW_TAG);
- if (err)
- return err;
- }
-
- fdb_chain->id = index;
-
- MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, modact, field,
- mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mfield);
- MLX5_SET(set_action_in, modact, offset,
- mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].moffset * 8);
- MLX5_SET(set_action_in, modact, length,
- mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mlen * 8);
- MLX5_SET(set_action_in, modact, data, fdb_chain->id);
- mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
- 1, modact);
- if (IS_ERR(mod_hdr)) {
- err = PTR_ERR(mod_hdr);
- goto err_mod_hdr;
- }
- fdb_chain->miss_modify_hdr = mod_hdr;
-
- fdb_chain->restore_rule = esw_add_restore_rule(esw, fdb_chain->id);
- if (IS_ERR(fdb_chain->restore_rule)) {
- err = PTR_ERR(fdb_chain->restore_rule);
- goto err_rule;
- }
-
- return 0;
-
-err_rule:
- mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
-err_mod_hdr:
- /* Datapath can't find this mapping, so we can safely remove it */
- mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
- return err;
-}
-
-static void destroy_fdb_chain_restore(struct fdb_chain *fdb_chain)
-{
- struct mlx5_eswitch *esw = fdb_chain->esw;
-
- if (!fdb_chain->miss_modify_hdr)
- return;
-
- mlx5_del_flow_rules(fdb_chain->restore_rule);
- mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
- mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
-}
-
-static struct fdb_chain *
-mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
-{
- struct fdb_chain *fdb_chain = NULL;
- int err;
-
- fdb_chain = kvzalloc(sizeof(*fdb_chain), GFP_KERNEL);
- if (!fdb_chain)
- return ERR_PTR(-ENOMEM);
-
- fdb_chain->esw = esw;
- fdb_chain->chain = chain;
- INIT_LIST_HEAD(&fdb_chain->prios_list);
-
- err = create_fdb_chain_restore(fdb_chain);
- if (err)
- goto err_restore;
-
- err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node,
- chain_params);
- if (err)
- goto err_insert;
-
- return fdb_chain;
-
-err_insert:
- destroy_fdb_chain_restore(fdb_chain);
-err_restore:
- kvfree(fdb_chain);
- return ERR_PTR(err);
-}
-
-static void
-mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain)
-{
- struct mlx5_eswitch *esw = fdb_chain->esw;
-
- rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node,
- chain_params);
-
- destroy_fdb_chain_restore(fdb_chain);
- kvfree(fdb_chain);
-}
-
-static struct fdb_chain *
-mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
-{
- struct fdb_chain *fdb_chain;
-
- fdb_chain = rhashtable_lookup_fast(&esw_chains_ht(esw), &chain,
- chain_params);
- if (!fdb_chain) {
- fdb_chain = mlx5_esw_chains_create_fdb_chain(esw, chain);
- if (IS_ERR(fdb_chain))
- return fdb_chain;
- }
-
- fdb_chain->ref++;
-
- return fdb_chain;
-}
-
-static struct mlx5_flow_handle *
-mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain,
- struct mlx5_flow_table *fdb,
- struct mlx5_flow_table *next_fdb)
-{
- struct mlx5_eswitch *esw = fdb_chain->esw;
- struct mlx5_flow_destination dest = {};
- struct mlx5_flow_act act = {};
-
- act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
- act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = next_fdb;
-
- if (next_fdb == tc_end_fdb(esw) &&
- mlx5_esw_chains_prios_supported(esw)) {
- act.modify_hdr = fdb_chain->miss_modify_hdr;
- act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- }
-
- return mlx5_add_flow_rules(fdb, NULL, &act, &dest, 1);
-}
-
-static int
-mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
- struct mlx5_flow_table *next_fdb)
-{
- struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
- struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
- struct fdb_prio *pos;
- int n = 0, err;
-
- if (fdb_prio->key.level)
- return 0;
-
- /* Iterate in reverse order until reaching the level 0 rule of
- * the previous priority, adding all the miss rules first, so we can
- * revert them if any of them fails.
- */
- pos = fdb_prio;
- list_for_each_entry_continue_reverse(pos,
- &fdb_chain->prios_list,
- list) {
- miss_rules[n] = mlx5_esw_chains_add_miss_rule(fdb_chain,
- pos->fdb,
- next_fdb);
- if (IS_ERR(miss_rules[n])) {
- err = PTR_ERR(miss_rules[n]);
- goto err_prev_rule;
- }
-
- n++;
- if (!pos->key.level)
- break;
- }
-
- /* Success, delete old miss rules, and update the pointers. */
- n = 0;
- pos = fdb_prio;
- list_for_each_entry_continue_reverse(pos,
- &fdb_chain->prios_list,
- list) {
- mlx5_del_flow_rules(pos->miss_rule);
-
- pos->miss_rule = miss_rules[n];
- pos->next_fdb = next_fdb;
-
- n++;
- if (!pos->key.level)
- break;
- }
-
- return 0;
-
-err_prev_rule:
- while (--n >= 0)
- mlx5_del_flow_rules(miss_rules[n]);
-
- return err;
-}
-
-static void
-mlx5_esw_chains_put_fdb_chain(struct fdb_chain *fdb_chain)
-{
- if (--fdb_chain->ref == 0)
- mlx5_esw_chains_destroy_fdb_chain(fdb_chain);
-}
-
-static struct fdb_prio *
-mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw,
- u32 chain, u32 prio, u32 level)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_handle *miss_rule = NULL;
- struct mlx5_flow_group *miss_group;
- struct fdb_prio *fdb_prio = NULL;
- struct mlx5_flow_table *next_fdb;
- struct fdb_chain *fdb_chain;
- struct mlx5_flow_table *fdb;
- struct list_head *pos;
- u32 *flow_group_in;
- int err;
-
- fdb_chain = mlx5_esw_chains_get_fdb_chain(esw, chain);
- if (IS_ERR(fdb_chain))
- return ERR_CAST(fdb_chain);
-
- fdb_prio = kvzalloc(sizeof(*fdb_prio), GFP_KERNEL);
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!fdb_prio || !flow_group_in) {
- err = -ENOMEM;
- goto err_alloc;
- }
-
- /* Chain's prio list is sorted by prio and level.
- * And all levels of some prio point to the next prio's level 0.
- * Example list (prio, level):
- * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
- * In hardware, we will we have the following pointers:
- * (3,0) -> (5,0) -> (7,0) -> Slow path
- * (3,1) -> (5,0)
- * (5,1) -> (7,0)
- * (6,1) -> (7,0)
- */
-
- /* Default miss for each chain: */
- next_fdb = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
- tc_slow_fdb(esw) :
- tc_end_fdb(esw);
- list_for_each(pos, &fdb_chain->prios_list) {
- struct fdb_prio *p = list_entry(pos, struct fdb_prio, list);
-
- /* exit on first pos that is larger */
- if (prio < p->key.prio || (prio == p->key.prio &&
- level < p->key.level)) {
- /* Get next level 0 table */
- next_fdb = p->key.level == 0 ? p->fdb : p->next_fdb;
- break;
- }
- }
-
- fdb = mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
- if (IS_ERR(fdb)) {
- err = PTR_ERR(fdb);
- goto err_create;
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
- fdb->max_fte - 2);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- fdb->max_fte - 1);
- miss_group = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(miss_group)) {
- err = PTR_ERR(miss_group);
- goto err_group;
- }
-
- /* Add miss rule to next_fdb */
- miss_rule = mlx5_esw_chains_add_miss_rule(fdb_chain, fdb, next_fdb);
- if (IS_ERR(miss_rule)) {
- err = PTR_ERR(miss_rule);
- goto err_miss_rule;
- }
-
- fdb_prio->miss_group = miss_group;
- fdb_prio->miss_rule = miss_rule;
- fdb_prio->next_fdb = next_fdb;
- fdb_prio->fdb_chain = fdb_chain;
- fdb_prio->key.chain = chain;
- fdb_prio->key.prio = prio;
- fdb_prio->key.level = level;
- fdb_prio->fdb = fdb;
-
- err = rhashtable_insert_fast(&esw_prios_ht(esw), &fdb_prio->node,
- prio_params);
- if (err)
- goto err_insert;
-
- list_add(&fdb_prio->list, pos->prev);
-
- /* Table is ready, connect it */
- err = mlx5_esw_chains_update_prio_prevs(fdb_prio, fdb);
- if (err)
- goto err_update;
-
- kvfree(flow_group_in);
- return fdb_prio;
-
-err_update:
- list_del(&fdb_prio->list);
- rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
- prio_params);
-err_insert:
- mlx5_del_flow_rules(miss_rule);
-err_miss_rule:
- mlx5_destroy_flow_group(miss_group);
-err_group:
- mlx5_esw_chains_destroy_fdb_table(esw, fdb);
-err_create:
-err_alloc:
- kvfree(fdb_prio);
- kvfree(flow_group_in);
- mlx5_esw_chains_put_fdb_chain(fdb_chain);
- return ERR_PTR(err);
-}
-
-static void
-mlx5_esw_chains_destroy_fdb_prio(struct mlx5_eswitch *esw,
- struct fdb_prio *fdb_prio)
-{
- struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
-
- WARN_ON(mlx5_esw_chains_update_prio_prevs(fdb_prio,
- fdb_prio->next_fdb));
-
- list_del(&fdb_prio->list);
- rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
- prio_params);
- mlx5_del_flow_rules(fdb_prio->miss_rule);
- mlx5_destroy_flow_group(fdb_prio->miss_group);
- mlx5_esw_chains_destroy_fdb_table(esw, fdb_prio->fdb);
- mlx5_esw_chains_put_fdb_chain(fdb_chain);
- kvfree(fdb_prio);
-}
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level)
-{
- struct mlx5_flow_table *prev_fts;
- struct fdb_prio *fdb_prio;
- struct fdb_prio_key key;
- int l = 0;
-
- if ((chain > mlx5_esw_chains_get_chain_range(esw) &&
- chain != mlx5_esw_chains_get_ft_chain(esw)) ||
- prio > mlx5_esw_chains_get_prio_range(esw) ||
- level > mlx5_esw_chains_get_level_range(esw))
- return ERR_PTR(-EOPNOTSUPP);
-
- /* create earlier levels for correct fs_core lookup when
- * connecting tables.
- */
- for (l = 0; l < level; l++) {
- prev_fts = mlx5_esw_chains_get_table(esw, chain, prio, l);
- if (IS_ERR(prev_fts)) {
- fdb_prio = ERR_CAST(prev_fts);
- goto err_get_prevs;
- }
- }
-
- key.chain = chain;
- key.prio = prio;
- key.level = level;
-
- mutex_lock(&esw_chains_lock(esw));
- fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
- prio_params);
- if (!fdb_prio) {
- fdb_prio = mlx5_esw_chains_create_fdb_prio(esw, chain,
- prio, level);
- if (IS_ERR(fdb_prio))
- goto err_create_prio;
- }
-
- ++fdb_prio->ref;
- mutex_unlock(&esw_chains_lock(esw));
-
- return fdb_prio->fdb;
-
-err_create_prio:
- mutex_unlock(&esw_chains_lock(esw));
-err_get_prevs:
- while (--l >= 0)
- mlx5_esw_chains_put_table(esw, chain, prio, l);
- return ERR_CAST(fdb_prio);
-}
-
-void
-mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level)
-{
- struct fdb_prio *fdb_prio;
- struct fdb_prio_key key;
-
- key.chain = chain;
- key.prio = prio;
- key.level = level;
-
- mutex_lock(&esw_chains_lock(esw));
- fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
- prio_params);
- if (!fdb_prio)
- goto err_get_prio;
-
- if (--fdb_prio->ref == 0)
- mlx5_esw_chains_destroy_fdb_prio(esw, fdb_prio);
- mutex_unlock(&esw_chains_lock(esw));
-
- while (level-- > 0)
- mlx5_esw_chains_put_table(esw, chain, prio, level);
-
- return;
-
-err_get_prio:
- mutex_unlock(&esw_chains_lock(esw));
- WARN_ONCE(1,
- "Couldn't find table: (chain: %d prio: %d level: %d)",
- chain, prio, level);
-}
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw)
-{
- return tc_end_fdb(esw);
-}
-
-struct mlx5_flow_table *
-mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw)
-{
- u32 chain, prio, level;
- int err;
-
- if (!fdb_ignore_flow_level_supported(esw)) {
- err = -EOPNOTSUPP;
-
- esw_warn(esw->dev,
- "Couldn't create global flow table, ignore_flow_level not supported.");
- goto err_ignore;
- }
-
- chain = mlx5_esw_chains_get_chain_range(esw),
- prio = mlx5_esw_chains_get_prio_range(esw);
- level = mlx5_esw_chains_get_level_range(esw);
-
- return mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
-
-err_ignore:
- return ERR_PTR(err);
-}
-
-void
-mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *ft)
-{
- mlx5_esw_chains_destroy_fdb_table(esw, ft);
-}
-
-static int
-mlx5_esw_chains_init(struct mlx5_eswitch *esw)
-{
- struct mlx5_esw_chains_priv *chains_priv;
- struct mlx5_core_dev *dev = esw->dev;
- u32 max_flow_counter, fdb_max;
- struct mapping_ctx *mapping;
- int err;
-
- chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
- if (!chains_priv)
- return -ENOMEM;
- esw_chains_priv(esw) = chains_priv;
-
- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
- fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
-
- esw_debug(dev,
- "Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
- max_flow_counter, esw->params.large_group_num, fdb_max);
-
- mlx5_esw_chains_init_sz_pool(esw);
-
- if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
- esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
- } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
- } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
- /* Disabled when ttl workaround is needed, e.g
- * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
- */
- esw_warn(dev,
- "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- } else {
- esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
- mlx5_esw_chains_get_chain_range(esw),
- mlx5_esw_chains_get_prio_range(esw));
- }
-
- err = rhashtable_init(&esw_chains_ht(esw), &chain_params);
- if (err)
- goto init_chains_ht_err;
-
- err = rhashtable_init(&esw_prios_ht(esw), &prio_params);
- if (err)
- goto init_prios_ht_err;
-
- mapping = mapping_create(sizeof(u32), esw_get_max_restore_tag(esw),
- true);
- if (IS_ERR(mapping)) {
- err = PTR_ERR(mapping);
- goto mapping_err;
- }
- esw_chains_mapping(esw) = mapping;
-
- mutex_init(&esw_chains_lock(esw));
-
- return 0;
-
-mapping_err:
- rhashtable_destroy(&esw_prios_ht(esw));
-init_prios_ht_err:
- rhashtable_destroy(&esw_chains_ht(esw));
-init_chains_ht_err:
- kfree(chains_priv);
- return err;
-}
-
-static void
-mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw)
-{
- mutex_destroy(&esw_chains_lock(esw));
- mapping_destroy(esw_chains_mapping(esw));
- rhashtable_destroy(&esw_prios_ht(esw));
- rhashtable_destroy(&esw_chains_ht(esw));
-
- kfree(esw_chains_priv(esw));
-}
-
-static int
-mlx5_esw_chains_open(struct mlx5_eswitch *esw)
-{
- struct mlx5_flow_table *ft;
- int err;
-
- /* Create tc_end_fdb(esw) which is the always created ft chain */
- ft = mlx5_esw_chains_get_table(esw, mlx5_esw_chains_get_ft_chain(esw),
- 1, 0);
- if (IS_ERR(ft))
- return PTR_ERR(ft);
-
- tc_end_fdb(esw) = ft;
-
- /* Always open the root for fast path */
- ft = mlx5_esw_chains_get_table(esw, 0, 1, 0);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- goto level_0_err;
- }
-
- /* Open level 1 for split rules now if prios isn't supported */
- if (!mlx5_esw_chains_prios_supported(esw)) {
- err = mlx5_esw_vport_tbl_get(esw);
- if (err)
- goto level_1_err;
- }
-
- return 0;
-
-level_1_err:
- mlx5_esw_chains_put_table(esw, 0, 1, 0);
-level_0_err:
- mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
- return err;
-}
-
-static void
-mlx5_esw_chains_close(struct mlx5_eswitch *esw)
-{
- if (!mlx5_esw_chains_prios_supported(esw))
- mlx5_esw_vport_tbl_put(esw);
- mlx5_esw_chains_put_table(esw, 0, 1, 0);
- mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
-}
-
-int
-mlx5_esw_chains_create(struct mlx5_eswitch *esw)
-{
- int err;
-
- err = mlx5_esw_chains_init(esw);
- if (err)
- return err;
-
- err = mlx5_esw_chains_open(esw);
- if (err)
- goto err_open;
-
- return 0;
-
-err_open:
- mlx5_esw_chains_cleanup(esw);
- return err;
-}
-
-void
-mlx5_esw_chains_destroy(struct mlx5_eswitch *esw)
-{
- mlx5_esw_chains_close(esw);
- mlx5_esw_chains_cleanup(esw);
-}
-
-int
-mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
- u32 *chain_mapping)
-{
- return mapping_add(esw_chains_mapping(esw), &chain, chain_mapping);
-}
-
-int
-mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw, u32 chain_mapping)
-{
- return mapping_remove(esw_chains_mapping(esw), chain_mapping);
-}
-
-int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag,
- u32 *chain)
-{
- int err;
-
- err = mapping_find(esw_chains_mapping(esw), tag, chain);
- if (err) {
- esw_warn(esw->dev, "Can't find chain for tag: %d\n", tag);
- return -ENOENT;
- }
-
- return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
deleted file mode 100644
index 7679ac359e31..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020 Mellanox Technologies. */
-
-#ifndef __ML5_ESW_CHAINS_H__
-#define __ML5_ESW_CHAINS_H__
-
-#include "eswitch.h"
-
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
-
-bool
-mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
-bool
-mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw);
-u32
-mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
-u32
-mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw);
-u32
-mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw);
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level);
-void
-mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level);
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw);
-
-struct mlx5_flow_table *
-mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw);
-void
-mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *ft);
-
-int
-mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
- u32 *chain_mapping);
-int
-mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw,
- u32 chain_mapping);
-
-int mlx5_esw_chains_create(struct mlx5_eswitch *esw);
-void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
-
-int
-mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain);
-
-#else /* CONFIG_MLX5_CLS_ACT */
-
-static inline struct mlx5_flow_table *
-mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level) { return ERR_PTR(-EOPNOTSUPP); }
-static inline void
-mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level) {}
-
-static inline struct mlx5_flow_table *
-mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) { return ERR_PTR(-EOPNOTSUPP); }
-
-static inline int mlx5_esw_chains_create(struct mlx5_eswitch *esw) { return 0; }
-static inline void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) {}
-
-#endif /* CONFIG_MLX5_CLS_ACT */
-
-#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
new file mode 100644
index 000000000000..ffff11baa3d0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/mlx5/driver.h>
+#include "eswitch.h"
+
+static void
+mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
+{
+ u64 parent_id;
+
+ parent_id = mlx5_query_nic_system_image_guid(dev);
+ ppid->id_len = sizeof(parent_id);
+ memcpy(ppid->id, &parent_id, sizeof(parent_id));
+}
+
+static bool
+mlx5_esw_devlink_port_supported(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return vport_num == MLX5_VPORT_UPLINK ||
+ (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
+ mlx5_eswitch_is_vf_vport(esw, vport_num);
+}
+
+static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct devlink_port_attrs attrs = {};
+ struct netdev_phys_item_id ppid = {};
+ struct devlink_port *dl_port;
+ u32 controller_num = 0;
+ bool external;
+ u16 pfnum;
+
+ dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
+ if (!dl_port)
+ return NULL;
+
+ mlx5_esw_get_port_parent_id(dev, &ppid);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
+ external = mlx5_core_is_ecpf_esw_manager(dev);
+ if (external)
+ controller_num = dev->priv.eswitch->offloads.host_number + 1;
+
+ if (vport_num == MLX5_VPORT_UPLINK) {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pfnum;
+ memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
+ attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_set(dl_port, &attrs);
+ } else if (vport_num == MLX5_VPORT_PF) {
+ memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external);
+ } else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) {
+ memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
+ vport_num - 1, external);
+ }
+ return dl_port;
+}
+
+static void mlx5_esw_dl_port_free(struct devlink_port *dl_port)
+{
+ kfree(dl_port);
+}
+
+int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct devlink_port *dl_port;
+ unsigned int dl_port_index;
+ struct mlx5_vport *vport;
+ struct devlink *devlink;
+ int err;
+
+ if (!mlx5_esw_devlink_port_supported(esw, vport_num))
+ return 0;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ dl_port = mlx5_esw_dl_port_alloc(esw, vport_num);
+ if (!dl_port)
+ return -ENOMEM;
+
+ devlink = priv_to_devlink(dev);
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
+ err = devlink_port_register(devlink, dl_port, dl_port_index);
+ if (err)
+ goto reg_err;
+
+ vport->dl_port = dl_port;
+ return 0;
+
+reg_err:
+ mlx5_esw_dl_port_free(dl_port);
+ return err;
+}
+
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+
+ if (!mlx5_esw_devlink_port_supported(esw, vport_num))
+ return;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return;
+ devlink_port_unregister(vport->dl_port);
+ mlx5_esw_dl_port_free(vport->dl_port);
+ vport->dl_port = NULL;
+}
+
+struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ return vport->dl_port;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6e6a9a563992..e8e6294c7cca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1902,8 +1902,6 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
ether_addr_copy(hw_addr, vport->info.mac);
*hw_addr_len = ETH_ALEN;
err = 0;
- } else {
- NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
}
mutex_unlock(&esw->state_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 867d8120b8a5..cf87de94418f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -42,6 +42,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
+#include "lib/fs_chains.h"
#include "en/tc_ct.h"
#ifdef CONFIG_MLX5_ESWITCH
@@ -62,6 +63,9 @@
#define mlx5_esw_has_fwd_fdb(dev) \
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
+#define esw_chains(esw) \
+ ((esw)->fdb_table.offloads.esw_chains_priv)
+
struct vport_ingress {
struct mlx5_flow_table *acl;
struct mlx5_flow_handle *allow_rule;
@@ -152,14 +156,9 @@ struct mlx5_vport {
bool enabled;
enum mlx5_eswitch_vport_event enabled_events;
+ struct devlink_port *dl_port;
};
-enum offloads_fdb_flags {
- ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
-};
-
-struct mlx5_esw_chains_priv;
-
struct mlx5_eswitch_fdb {
union {
struct legacy_fdb {
@@ -183,7 +182,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
- struct mlx5_esw_chains_priv *esw_chains_priv;
+ struct mlx5_fs_chains *esw_chains_priv;
struct {
DECLARE_HASHTABLE(table, 8);
/* Protects vports.table */
@@ -217,6 +216,7 @@ struct mlx5_esw_offload {
atomic64_t num_flows;
enum devlink_eswitch_encap_mode encap;
struct ida vport_metadata_ida;
+ unsigned int host_number; /* ECPF supports one external host */
};
/* E-Switch MC FDB table hash node */
@@ -329,7 +329,7 @@ struct mlx5_termtbl_handle;
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec);
@@ -349,19 +349,19 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
@@ -401,7 +401,6 @@ struct mlx5_esw_flow_attr {
int split_count;
int out_count;
- int action;
__be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
@@ -413,19 +412,7 @@ struct mlx5_esw_flow_attr {
struct mlx5_core_dev *mdev;
struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
- struct mlx5_modify_hdr *modify_hdr;
- u8 inner_match_level;
- u8 outer_match_level;
- struct mlx5_fc *counter;
- u32 chain;
- u16 prio;
- u32 dest_chain;
- u32 flags;
- struct mlx5_flow_table *fdb;
- struct mlx5_flow_table *dest_ft;
- struct mlx5_ct_attr ct_attr;
struct mlx5_pkt_reformat *decap_pkt_reformat;
- struct mlx5e_tc_flow_parse_attr *parse_attr;
};
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
@@ -451,9 +438,9 @@ int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
@@ -677,6 +664,9 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
+int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
+struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1bcf2609dca8..c9c2962ad49f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -39,12 +39,13 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "esw/acl/ofld.h"
-#include "esw/chains.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
#include "lib/devcom.h"
#include "lib/eq.h"
+#include "lib/fs_chains.h"
+#include "en_tc.h"
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
@@ -66,6 +67,12 @@ struct mlx5_vport_key {
u16 vhca_id;
} __packed;
+struct mlx5_vport_tbl_attr {
+ u16 chain;
+ u16 prio;
+ u16 vport;
+};
+
struct mlx5_vport_table {
struct hlist_node hlist;
struct mlx5_flow_table *fdb;
@@ -94,10 +101,10 @@ esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
}
static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_vport_tbl_attr *attr,
struct mlx5_vport_key *key)
{
- key->vport = attr->in_rep->vport;
+ key->vport = attr->vport;
key->chain = attr->chain;
key->prio = attr->prio;
key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
@@ -118,7 +125,7 @@ esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32
}
static void
-esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
{
struct mlx5_vport_table *e;
struct mlx5_vport_key key;
@@ -138,7 +145,7 @@ out:
}
static struct mlx5_flow_table *
-esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *ns;
@@ -189,16 +196,15 @@ err_alloc:
int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
{
- struct mlx5_esw_flow_attr attr = {};
- struct mlx5_eswitch_rep rep = {};
+ struct mlx5_vport_tbl_attr attr;
struct mlx5_flow_table *fdb;
struct mlx5_vport *vport;
int i;
+ attr.chain = 0;
attr.prio = 1;
- attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
- attr.in_rep->vport = vport->vport;
+ attr.vport = vport->vport;
fdb = esw_vport_tbl_get(esw, &attr);
if (IS_ERR(fdb))
goto out;
@@ -212,15 +218,14 @@ out:
void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
{
- struct mlx5_esw_flow_attr attr = {};
- struct mlx5_eswitch_rep rep = {};
+ struct mlx5_vport_tbl_attr attr;
struct mlx5_vport *vport;
int i;
+ attr.chain = 0;
attr.prio = 1;
- attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
- attr.in_rep->vport = vport->vport;
+ attr.vport = vport->vport;
esw_vport_tbl_put(esw, &attr);
}
}
@@ -242,8 +247,11 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr)
{
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
- attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK)
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ attr && attr->in_rep)
+ spec->flow_context.flow_source =
+ attr->in_rep->vport == MLX5_VPORT_UPLINK ?
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
}
static void
@@ -290,11 +298,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
- bool split = !!(attr->split_count);
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ bool split = !!(esw_attr->split_count);
+ struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int j, i = 0;
@@ -308,13 +319,13 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
- flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
- flow_act.vlan[0].vid = attr->vlan_vid[0];
- flow_act.vlan[0].prio = attr->vlan_prio[0];
+ flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
+ flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
+ flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
- flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
- flow_act.vlan[1].vid = attr->vlan_vid[1];
- flow_act.vlan[1].prio = attr->vlan_prio[1];
+ flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
+ flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
+ flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
}
}
@@ -329,12 +340,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
} else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
+ dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
i++;
} else if (attr->dest_chain) {
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
- 1, 0);
+ ft = mlx5_chains_get_table(chains, attr->dest_chain,
+ 1, 0);
if (IS_ERR(ft)) {
rule = ERR_CAST(ft);
goto err_create_goto_table;
@@ -344,28 +355,29 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
dest[i].ft = ft;
i++;
} else {
- for (j = attr->split_count; j < attr->out_count; j++) {
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->dests[j].rep->vport;
+ dest[i].vport.num = esw_attr->dests[j].rep->vport;
dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
+ MLX5_CAP_GEN(esw_attr->dests[j].mdev, vhca_id);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |=
MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
+ if (esw_attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
+ flow_act.pkt_reformat =
+ esw_attr->dests[j].pkt_reformat;
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
dest[i].vport.pkt_reformat =
- attr->dests[j].pkt_reformat;
+ esw_attr->dests[j].pkt_reformat;
}
i++;
}
}
}
- if (attr->decap_pkt_reformat)
- flow_act.pkt_reformat = attr->decap_pkt_reformat;
+ if (esw_attr->decap_pkt_reformat)
+ flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -382,26 +394,30 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
flow_act.modify_hdr = attr->modify_hdr;
if (split) {
- fdb = esw_vport_tbl_get(esw, attr);
+ fwd_attr.chain = attr->chain;
+ fwd_attr.prio = attr->prio;
+ fwd_attr.vport = esw_attr->in_rep->vport;
+
+ fdb = esw_vport_tbl_get(esw, &fwd_attr);
} else {
if (attr->chain || attr->prio)
- fdb = mlx5_esw_chains_get_table(esw, attr->chain,
- attr->prio, 0);
+ fdb = mlx5_chains_get_table(chains, attr->chain,
+ attr->prio, 0);
else
- fdb = attr->fdb;
+ fdb = attr->ft;
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
- mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
}
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
}
- mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
+ mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
- rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
+ rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
&flow_act, dest, i);
else
rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
@@ -414,12 +430,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
err_add_rule:
if (split)
- esw_vport_tbl_put(esw, attr);
+ esw_vport_tbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_esw_get:
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
- mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
+ mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
err_create_goto_table:
return rule;
}
@@ -427,46 +443,51 @@ err_create_goto_table:
struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
int i;
- fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
+ fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
goto err_get_fast;
}
- fwd_fdb = esw_vport_tbl_get(esw, attr);
+ fwd_attr.chain = attr->chain;
+ fwd_attr.prio = attr->prio;
+ fwd_attr.vport = esw_attr->in_rep->vport;
+ fwd_fdb = esw_vport_tbl_get(esw, &fwd_attr);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- for (i = 0; i < attr->split_count; i++) {
+ for (i = 0; i < esw_attr->split_count; i++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->dests[i].rep->vport;
+ dest[i].vport.num = esw_attr->dests[i].rep->vport;
dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
+ MLX5_CAP_GEN(esw_attr->dests[i].mdev, vhca_id);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
+ dest[i].vport.pkt_reformat = esw_attr->dests[i].pkt_reformat;
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb,
i++;
- mlx5_eswitch_set_rule_source_port(esw, spec, attr);
- mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
@@ -481,9 +502,9 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
add_err:
- esw_vport_tbl_put(esw, attr);
+ esw_vport_tbl_put(esw, &fwd_attr);
err_get_fwd:
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast:
return rule;
}
@@ -491,10 +512,13 @@ err_get_fast:
static void
__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
bool fwd_rule)
{
- bool split = (attr->split_count > 0);
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ bool split = (esw_attr->split_count > 0);
+ struct mlx5_vport_tbl_attr fwd_attr;
int i;
mlx5_del_flow_rules(rule);
@@ -502,31 +526,36 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
- if (attr->dests[i].termtbl)
- mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
+ if (esw_attr->dests[i].termtbl)
+ mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
}
}
atomic64_dec(&esw->offloads.num_flows);
+ if (fwd_rule || split) {
+ fwd_attr.chain = attr->chain;
+ fwd_attr.prio = attr->prio;
+ fwd_attr.vport = esw_attr->in_rep->vport;
+ }
+
if (fwd_rule) {
- esw_vport_tbl_put(esw, attr);
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
+ esw_vport_tbl_put(esw, &fwd_attr);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
} else {
if (split)
- esw_vport_tbl_put(esw, attr);
+ esw_vport_tbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
- 0);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
if (attr->dest_chain)
- mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
+ mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
}
}
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
__mlx5_eswitch_del_rule(esw, rule, attr, false);
}
@@ -534,7 +563,7 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
__mlx5_eswitch_del_rule(esw, rule, attr, true);
}
@@ -611,9 +640,10 @@ out_notsupp:
}
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_eswitch_rep *vport = NULL;
bool push, pop, fwd;
int err = 0;
@@ -629,17 +659,17 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
- err = esw_add_vlan_action_check(attr, push, pop, fwd);
+ err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
if (err)
goto unlock;
attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
- vport = esw_vlan_action_get_vport(attr, push, pop);
+ vport = esw_vlan_action_get_vport(esw_attr, push, pop);
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
+ if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
}
@@ -662,11 +692,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (vport->vlan_refcount)
goto skip_set_push;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
- SET_VLAN_INSERT | SET_VLAN_STRIP);
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
+ 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
if (err)
goto out;
- vport->vlan = attr->vlan_vid[0];
+ vport->vlan = esw_attr->vlan_vid[0];
skip_set_push:
vport->vlan_refcount++;
}
@@ -679,9 +709,10 @@ unlock:
}
int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_eswitch_rep *vport = NULL;
bool push, pop, fwd;
int err = 0;
@@ -699,11 +730,11 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
- vport = esw_vlan_action_get_vport(attr, push, pop);
+ vport = esw_vlan_action_get_vport(esw_attr, push, pop);
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
+ if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
vport->vlan_refcount--;
goto out;
@@ -1137,6 +1168,126 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
}
}
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+#define fdb_modify_header_fwd_to_table_supported(esw) \
+ (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
+static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
+ *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
+ esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
+ *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
+ } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
+ *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
+ } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
+ /* Disabled when ttl workaround is needed, e.g
+ * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
+ */
+ esw_warn(dev,
+ "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
+ *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ } else {
+ *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_info(dev, "Supported tc chains and prios offload\n");
+ }
+
+ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
+ *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
+}
+
+static int
+esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_table *nf_ft, *ft;
+ struct mlx5_chains_attr attr = {};
+ struct mlx5_fs_chains *chains;
+ u32 fdb_max;
+ int err;
+
+ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
+
+ esw_init_chains_offload_flags(esw, &attr.flags);
+ attr.ns = MLX5_FLOW_NAMESPACE_FDB;
+ attr.max_ft_sz = fdb_max;
+ attr.max_grp_num = esw->params.large_group_num;
+ attr.default_ft = miss_fdb;
+ attr.max_restore_tag = esw_get_max_restore_tag(esw);
+
+ chains = mlx5_chains_create(dev, &attr);
+ if (IS_ERR(chains)) {
+ err = PTR_ERR(chains);
+ esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
+ return err;
+ }
+
+ esw->fdb_table.offloads.esw_chains_priv = chains;
+
+ /* Create tc_end_ft which is the always created ft chain */
+ nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
+ 1, 0);
+ if (IS_ERR(nf_ft)) {
+ err = PTR_ERR(nf_ft);
+ goto nf_ft_err;
+ }
+
+ /* Always open the root for fast path */
+ ft = mlx5_chains_get_table(chains, 0, 1, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto level_0_err;
+ }
+
+ /* Open level 1 for split fdb rules now if prios isn't supported */
+ if (!mlx5_chains_prios_supported(chains)) {
+ err = mlx5_esw_vport_tbl_get(esw);
+ if (err)
+ goto level_1_err;
+ }
+
+ mlx5_chains_set_end_ft(chains, nf_ft);
+
+ return 0;
+
+level_1_err:
+ mlx5_chains_put_table(chains, 0, 1, 0);
+level_0_err:
+ mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
+nf_ft_err:
+ mlx5_chains_destroy(chains);
+ esw->fdb_table.offloads.esw_chains_priv = NULL;
+
+ return err;
+}
+
+static void
+esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
+{
+ if (!mlx5_chains_prios_supported(chains))
+ mlx5_esw_vport_tbl_put(esw);
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
+ mlx5_chains_destroy(chains);
+}
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static int
+esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
+{ return 0; }
+
+static void
+esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
+{}
+
+#endif
+
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1192,9 +1343,9 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.slow_fdb = fdb;
- err = mlx5_esw_chains_create(esw);
+ err = esw_chains_create(esw, fdb);
if (err) {
- esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
+ esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
goto fdb_chains_err;
}
@@ -1288,7 +1439,7 @@ miss_err:
peer_miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
- mlx5_esw_chains_destroy(esw);
+ esw_chains_destroy(esw, esw_chains(esw));
fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
@@ -1312,7 +1463,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
- mlx5_esw_chains_destroy(esw);
+ esw_chains_destroy(esw, esw_chains(esw));
+
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
@@ -1671,15 +1823,12 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
+static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
int err;
- if (esw->mode != MLX5_ESWITCH_OFFLOADS)
- return 0;
-
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
@@ -1698,19 +1847,46 @@ err_reps:
return err;
}
-void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
+static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
- if (esw->mode != MLX5_ESWITCH_OFFLOADS)
- return;
-
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
+int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ int err;
+
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return 0;
+
+ err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
+ if (err)
+ return err;
+
+ err = mlx5_esw_offloads_rep_load(esw, vport_num);
+ if (err)
+ goto load_err;
+ return err;
+
+load_err:
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+ return err;
+}
+
+void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return;
+
+ mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+}
+
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
@@ -1868,53 +2044,38 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
-static bool
-esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
-{
- return mlx5_core_mp_enabled(esw->dev);
-}
-
-static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
-{
- return esw_check_vport_match_metadata_mandatory(esw) &&
- esw_check_vport_match_metadata_supported(esw);
-}
-
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
- u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1;
- u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
- u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
- u32 start;
- u32 end;
+ u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
+ u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1;
+ u32 pf_num;
int id;
- /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
- WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
-
- /* Trim vhca_id to ESW_VHCA_ID_BITS */
- vhca_id &= vhca_id_mask;
-
- start = (vhca_id << ESW_VPORT_BITS);
- end = start + num_vports;
- if (!vhca_id)
- start += 1; /* zero is reserved/invalid metadata */
- id = ida_alloc_range(&esw->offloads.vport_metadata_ida, start, end, GFP_KERNEL);
+ /* Only 4 bits of pf_num */
+ pf_num = PCI_FUNC(esw->dev->pdev->devfn);
+ if (pf_num > max_pf_num)
+ return 0;
- return (id < 0) ? 0 : id;
+ /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
+ /* Use only non-zero vport_id (1-4095) for all PF's */
+ id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
+ if (id < 0)
+ return 0;
+ id = (pf_num << ESW_VPORT_BITS) | id;
+ return id;
}
void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
{
- ida_free(&esw->offloads.vport_metadata_ida, metadata);
+ u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
+
+ /* Metadata contains only 12 bits of actual ida id */
+ ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
}
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (vport->vport == MLX5_VPORT_UPLINK)
- return 0;
-
vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
vport->metadata = vport->default_metadata;
return vport->metadata ? 0 : -ENOSPC;
@@ -1923,40 +2084,65 @@ static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (vport->vport == MLX5_VPORT_UPLINK || !vport->default_metadata)
+ if (!vport->default_metadata)
return;
WARN_ON(vport->metadata != vport->default_metadata);
mlx5_esw_match_metadata_free(esw, vport->default_metadata);
}
+static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return;
+
+ mlx5_esw_for_all_vports_reverse(esw, i, vport)
+ esw_offloads_vport_metadata_cleanup(esw, vport);
+}
+
+static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int err;
+ int i;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return 0;
+
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ err = esw_offloads_vport_metadata_setup(esw, vport);
+ if (err)
+ goto metadata_err;
+ }
+
+ return 0;
+
+metadata_err:
+ esw_offloads_metadata_uninit(esw);
+ return err;
+}
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int err;
- err = esw_offloads_vport_metadata_setup(esw, vport);
- if (err)
- goto metadata_err;
-
err = esw_acl_ingress_ofld_setup(esw, vport);
if (err)
- goto ingress_err;
+ return err;
- if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
- err = esw_acl_egress_ofld_setup(esw, vport);
- if (err)
- goto egress_err;
- }
+ err = esw_acl_egress_ofld_setup(esw, vport);
+ if (err)
+ goto egress_err;
return 0;
egress_err:
esw_acl_ingress_ofld_cleanup(esw, vport);
-ingress_err:
- esw_offloads_vport_metadata_cleanup(esw, vport);
-metadata_err:
return err;
}
@@ -1966,22 +2152,14 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
{
esw_acl_egress_ofld_cleanup(vport);
esw_acl_ingress_ofld_cleanup(esw, vport);
- esw_offloads_vport_metadata_cleanup(esw, vport);
}
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- int err;
-
- if (esw_use_vport_metadata(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- err = esw_vport_create_offloads_acl_tables(esw, vport);
- if (err)
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
- return err;
+ return esw_vport_create_offloads_acl_tables(esw, vport);
}
static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
@@ -1990,7 +2168,6 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
esw_vport_destroy_offloads_acl_tables(esw, vport);
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
}
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
@@ -2114,6 +2291,24 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
return NOTIFY_OK;
}
+static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
+{
+ const u32 *query_host_out;
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return 0;
+
+ query_host_out = mlx5_esw_query_functions(esw->dev);
+ if (IS_ERR(query_host_out))
+ return PTR_ERR(query_host_out);
+
+ /* Mark non local controller with non zero controller number. */
+ esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
+ host_params_context.host_number);
+ kvfree(query_host_out);
+ return 0;
+}
+
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
@@ -2128,6 +2323,17 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev);
+ err = mlx5_esw_host_number_init(esw);
+ if (err)
+ goto err_metadata;
+
+ if (esw_check_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+
+ err = esw_offloads_metadata_init(esw);
+ if (err)
+ goto err_metadata;
+
err = esw_set_passing_vport_metadata(esw, true);
if (err)
goto err_vport_metadata;
@@ -2160,6 +2366,9 @@ err_uplink:
err_steering_init:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
+ esw_offloads_metadata_uninit(esw);
+err_metadata:
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
@@ -2193,6 +2402,8 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
+ esw_offloads_metadata_uninit(esw);
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 17a0d2bc102b..ec679560a95d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -3,6 +3,7 @@
#include <linux/mlx5/fs.h>
#include "eswitch.h"
+#include "en_tc.h"
#include "fs_core.h"
struct mlx5_termtbl_handle {
@@ -228,10 +229,11 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec)
{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int i;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
@@ -244,8 +246,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
return true;
/* hairpin */
- for (i = attr->split_count; i < attr->out_count; i++)
- if (attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
return true;
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 831d2c39e153..80da50e12915 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -54,7 +54,7 @@ static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
if (unlikely(!buf->sg[0].data))
goto out;
- dma_device = &conn->fdev->mdev->pdev->dev;
+ dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
buf->sg[0].size, buf->dma_dir);
err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
@@ -86,7 +86,7 @@ static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
{
struct device *dma_device;
- dma_device = &conn->fdev->mdev->pdev->dev;
+ dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
if (buf->sg[1].data)
dma_unmap_single(dma_device, buf->sg[1].dma_addr,
buf->sg[1].size, buf->dma_dir);
@@ -388,9 +388,9 @@ static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
mlx5_fpga_conn_arm_cq(conn);
}
-static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
+static void mlx5_fpga_conn_cq_tasklet(struct tasklet_struct *t)
{
- struct mlx5_fpga_conn *conn = (void *)data;
+ struct mlx5_fpga_conn *conn = from_tasklet(conn, t, cq.tasklet);
if (unlikely(!conn->qp.active))
return;
@@ -478,8 +478,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
conn->cq.mcq.irqn = irqn;
conn->cq.mcq.uar = fdev->conn_res.uar;
- tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
- (unsigned long)conn);
+ tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index fee169732de7..babe3405132a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -776,6 +776,9 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
table_type = FS_FT_NIC_RX;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
+#ifdef CONFIG_MLX5_IPSEC
+ case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
+#endif
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 75fa44eee434..325a5b0d6829 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -126,6 +126,10 @@
#define LAG_NUM_PRIOS 1
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
+#define KERNEL_TX_IPSEC_NUM_PRIOS 1
+#define KERNEL_TX_IPSEC_NUM_LEVELS 1
+#define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
+
struct node_caps {
size_t arr_sz;
long *caps;
@@ -180,13 +184,24 @@ static struct init_tree_node {
static struct init_tree_node egress_root_fs = {
.type = FS_TYPE_NAMESPACE,
+#ifdef CONFIG_MLX5_IPSEC
+ .ar_size = 2,
+#else
.ar_size = 1,
+#endif
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+#ifdef CONFIG_MLX5_IPSEC
+ ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS_EGRESS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
+ KERNEL_TX_IPSEC_NUM_LEVELS))),
+#endif
}
};
@@ -1595,11 +1610,12 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
return true;
if (ignore_level) {
- if (ft->type != FS_FT_FDB)
+ if (ft->type != FS_FT_FDB &&
+ ft->type != FS_FT_NIC_RX)
return false;
if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
- dest->ft->type != FS_FT_FDB)
+ ft->type != dest->ft->type)
return false;
}
@@ -1994,10 +2010,11 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node, true);
- if (fte->modify_mask && fte->dests_size) {
- modify_fte(fte);
+ if (fte->dests_size) {
+ if (fte->modify_mask)
+ modify_fte(fte);
up_write_ref_node(&fte->node, false);
- } else {
+ } else if (list_empty(&fte->node.children)) {
del_hw_fte(&fte->node);
/* Avoid double call to del_hw_fte */
fte->node.del_hw_func = NULL;
@@ -2164,8 +2181,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
break;
}
- if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
+ if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
+ type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
root_ns = steering->egress_root_ns;
+ prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
root_ns = steering->rdma_rx_root_ns;
prio = RDMA_RX_BYPASS_PRIO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
new file mode 100644
index 000000000000..f9042e147c7f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "fw_reset.h"
+#include "diag/fw_tracer.h"
+
+enum {
+ MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
+ MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
+ MLX5_FW_RESET_FLAGS_PENDING_COMP
+};
+
+struct mlx5_fw_reset {
+ struct mlx5_core_dev *dev;
+ struct mlx5_nb nb;
+ struct workqueue_struct *wq;
+ struct work_struct fw_live_patch_work;
+ struct work_struct reset_request_work;
+ struct work_struct reset_reload_work;
+ struct work_struct reset_now_work;
+ struct work_struct reset_abort_work;
+ unsigned long reset_flags;
+ struct timer_list timer;
+ struct completion done;
+ int ret;
+};
+
+void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ if (enable)
+ clear_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
+ else
+ set_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
+}
+
+bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ return !test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
+}
+
+static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
+ u8 reset_type_sel, u8 sync_resp, bool sync_start)
+{
+ u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
+
+ MLX5_SET(mfrl_reg, in, reset_level, reset_level);
+ MLX5_SET(mfrl_reg, in, rst_type_sel, reset_type_sel);
+ MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_resp, sync_resp);
+ MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_start, sync_start);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 1);
+}
+
+static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
+{
+ u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 0);
+ if (err)
+ return err;
+
+ if (reset_level)
+ *reset_level = MLX5_GET(mfrl_reg, out, reset_level);
+ if (reset_type)
+ *reset_type = MLX5_GET(mfrl_reg, out, reset_type);
+
+ return 0;
+}
+
+int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
+{
+ return mlx5_reg_mfrl_query(dev, reset_level, reset_type);
+}
+
+int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ int err;
+
+ set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
+ err = mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, reset_type_sel, 0, true);
+ if (err)
+ clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
+ return err;
+}
+
+int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
+{
+ return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
+}
+
+static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ /* if this is the driver that initiated the fw reset, devlink completed the reload */
+ if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
+ complete(&fw_reset->done);
+ } else {
+ mlx5_load_one(dev, false);
+ devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
+ BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+ }
+}
+
+static void mlx5_sync_reset_reload_work(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ reset_reload_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+ int err;
+
+ mlx5_enter_error_state(dev, true);
+ mlx5_unload_one(dev, false);
+ err = mlx5_health_wait_pci_up(dev);
+ if (err)
+ mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
+ fw_reset->ret = err;
+ mlx5_fw_reset_complete_reload(dev);
+}
+
+static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ del_timer(&fw_reset->timer);
+}
+
+static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ mlx5_stop_sync_reset_poll(dev);
+ clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
+ if (poll_health)
+ mlx5_start_health_poll(dev);
+}
+
+#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
+static void poll_sync_reset(struct timer_list *t)
+{
+ struct mlx5_fw_reset *fw_reset = from_timer(fw_reset, t, timer);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+ u32 fatal_error;
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+ return;
+
+ fatal_error = mlx5_health_check_fatal_sensors(dev);
+
+ if (fatal_error) {
+ mlx5_core_warn(dev, "Got Device Reset\n");
+ mlx5_sync_reset_clear_reset_requested(dev, false);
+ queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
+ return;
+ }
+
+ mod_timer(&fw_reset->timer, round_jiffies(jiffies + MLX5_RESET_POLL_INTERVAL));
+}
+
+static void mlx5_start_sync_reset_poll(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ timer_setup(&fw_reset->timer, poll_sync_reset, 0);
+ fw_reset->timer.expires = round_jiffies(jiffies + MLX5_RESET_POLL_INTERVAL);
+ add_timer(&fw_reset->timer);
+}
+
+static int mlx5_fw_reset_set_reset_sync_ack(struct mlx5_core_dev *dev)
+{
+ return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 1, false);
+}
+
+static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev)
+{
+ return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false);
+}
+
+static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ mlx5_stop_health_poll(dev, true);
+ set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
+ mlx5_start_sync_reset_poll(dev);
+}
+
+static void mlx5_fw_live_patch_event(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ fw_live_patch_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+ struct mlx5_fw_tracer *tracer;
+
+ mlx5_core_info(dev, "Live patch updated firmware version: %d.%d.%d\n", fw_rev_maj(dev),
+ fw_rev_min(dev), fw_rev_sub(dev));
+
+ tracer = dev->tracer;
+ if (IS_ERR_OR_NULL(tracer))
+ return;
+
+ if (mlx5_fw_tracer_reload(tracer))
+ mlx5_core_err(dev, "Failed to reload FW tracer\n");
+}
+
+static void mlx5_sync_reset_request_event(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ reset_request_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+ int err;
+
+ if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags)) {
+ err = mlx5_fw_reset_set_reset_sync_nack(dev);
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
+ err ? "Failed" : "Sent");
+ return;
+ }
+ mlx5_sync_reset_set_reset_requested(dev);
+ err = mlx5_fw_reset_set_reset_sync_ack(dev);
+ if (err)
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err);
+ else
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
+}
+
+#define MLX5_PCI_LINK_UP_TIMEOUT 2000
+
+static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
+{
+ struct pci_bus *bridge_bus = dev->pdev->bus;
+ struct pci_dev *bridge = bridge_bus->self;
+ u16 reg16, dev_id, sdev_id;
+ unsigned long timeout;
+ struct pci_dev *sdev;
+ int cap, err;
+ u32 reg32;
+
+ /* Check that all functions under the pci bridge are PFs of
+ * this device otherwise fail this function.
+ */
+ err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
+ if (err)
+ return err;
+ list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
+ err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id);
+ if (err)
+ return err;
+ if (sdev_id != dev_id)
+ return -EPERM;
+ }
+
+ cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ if (!cap)
+ return -EOPNOTSUPP;
+
+ list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
+ pci_save_state(sdev);
+ pci_cfg_access_lock(sdev);
+ }
+ /* PCI link toggle */
+ err = pci_read_config_word(bridge, cap + PCI_EXP_LNKCTL, &reg16);
+ if (err)
+ return err;
+ reg16 |= PCI_EXP_LNKCTL_LD;
+ err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16);
+ if (err)
+ return err;
+ msleep(500);
+ reg16 &= ~PCI_EXP_LNKCTL_LD;
+ err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16);
+ if (err)
+ return err;
+
+ /* Check link */
+ err = pci_read_config_dword(bridge, cap + PCI_EXP_LNKCAP, &reg32);
+ if (err)
+ return err;
+ if (!(reg32 & PCI_EXP_LNKCAP_DLLLARC)) {
+ mlx5_core_warn(dev, "No PCI link reporting capability (0x%08x)\n", reg32);
+ msleep(1000);
+ goto restore;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(MLX5_PCI_LINK_UP_TIMEOUT);
+ do {
+ err = pci_read_config_word(bridge, cap + PCI_EXP_LNKSTA, &reg16);
+ if (err)
+ return err;
+ if (reg16 & PCI_EXP_LNKSTA_DLLLA)
+ break;
+ msleep(20);
+ } while (!time_after(jiffies, timeout));
+
+ if (reg16 & PCI_EXP_LNKSTA_DLLLA) {
+ mlx5_core_info(dev, "PCI Link up\n");
+ } else {
+ mlx5_core_err(dev, "PCI link not ready (0x%04x) after %d ms\n",
+ reg16, MLX5_PCI_LINK_UP_TIMEOUT);
+ err = -ETIMEDOUT;
+ }
+
+restore:
+ list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
+ pci_cfg_access_unlock(sdev);
+ pci_restore_state(sdev);
+ }
+
+ return err;
+}
+
+static void mlx5_sync_reset_now_event(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ reset_now_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+ int err;
+
+ mlx5_sync_reset_clear_reset_requested(dev, false);
+
+ mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
+
+ err = mlx5_cmd_fast_teardown_hca(dev);
+ if (err) {
+ mlx5_core_warn(dev, "Fast teardown failed, no reset done, err %d\n", err);
+ goto done;
+ }
+
+ err = mlx5_pci_link_toggle(dev);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
+ goto done;
+ }
+
+ mlx5_enter_error_state(dev, true);
+ mlx5_unload_one(dev, false);
+done:
+ fw_reset->ret = err;
+ mlx5_fw_reset_complete_reload(dev);
+}
+
+static void mlx5_sync_reset_abort_event(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ reset_abort_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+
+ mlx5_sync_reset_clear_reset_requested(dev, true);
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
+}
+
+static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct mlx5_eqe *eqe)
+{
+ struct mlx5_eqe_sync_fw_update *sync_fw_update_eqe;
+ u8 sync_event_rst_type;
+
+ sync_fw_update_eqe = &eqe->data.sync_fw_update;
+ sync_event_rst_type = sync_fw_update_eqe->sync_rst_state & SYNC_RST_STATE_MASK;
+ switch (sync_event_rst_type) {
+ case MLX5_SYNC_RST_STATE_RESET_REQUEST:
+ queue_work(fw_reset->wq, &fw_reset->reset_request_work);
+ break;
+ case MLX5_SYNC_RST_STATE_RESET_NOW:
+ queue_work(fw_reset->wq, &fw_reset->reset_now_work);
+ break;
+ case MLX5_SYNC_RST_STATE_RESET_ABORT:
+ queue_work(fw_reset->wq, &fw_reset->reset_abort_work);
+ break;
+ }
+}
+
+static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
+ struct mlx5_eqe *eqe = data;
+
+ switch (eqe->sub_type) {
+ case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
+ queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
+ break;
+ case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
+ mlx5_sync_reset_events_handle(fw_reset, eqe);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+#define MLX5_FW_RESET_TIMEOUT_MSEC 5000
+int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
+{
+ unsigned long timeout = msecs_to_jiffies(MLX5_FW_RESET_TIMEOUT_MSEC);
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ int err;
+
+ if (!wait_for_completion_timeout(&fw_reset->done, timeout)) {
+ mlx5_core_warn(dev, "FW sync reset timeout after %d seconds\n",
+ MLX5_FW_RESET_TIMEOUT_MSEC / 1000);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+ err = fw_reset->ret;
+out:
+ clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
+ return err;
+}
+
+void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
+ mlx5_eq_notifier_register(dev, &fw_reset->nb);
+}
+
+void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
+{
+ mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
+}
+
+int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+
+ if (!fw_reset)
+ return -ENOMEM;
+ fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
+ if (!fw_reset->wq) {
+ kfree(fw_reset);
+ return -ENOMEM;
+ }
+
+ fw_reset->dev = dev;
+ dev->priv.fw_reset = fw_reset;
+
+ INIT_WORK(&fw_reset->fw_live_patch_work, mlx5_fw_live_patch_event);
+ INIT_WORK(&fw_reset->reset_request_work, mlx5_sync_reset_request_event);
+ INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
+ INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
+ INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
+
+ init_completion(&fw_reset->done);
+ return 0;
+}
+
+void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ destroy_workqueue(fw_reset->wq);
+ kfree(dev->priv.fw_reset);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
new file mode 100644
index 000000000000..7761ee5fc7d0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_FW_RESET_H
+#define __MLX5_FW_RESET_H
+
+#include "mlx5_core.h"
+
+void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable);
+bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev);
+int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type);
+int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel);
+int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
+
+int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
+void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
+void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
+int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
+void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index b31f769d2df9..54523bed16cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -110,7 +110,7 @@ static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
return rfr && synd;
}
-static u32 check_fatal_sensors(struct mlx5_core_dev *dev)
+u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev)
{
if (sensor_pci_not_working(dev))
return MLX5_SENSOR_PCI_COMM_ERR;
@@ -173,7 +173,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
* Check again to avoid a redundant 2nd reset. If the fatal erros was
* PCI related a reset won't help.
*/
- fatal_error = check_fatal_sensors(dev);
+ fatal_error = mlx5_health_check_fatal_sensors(dev);
if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
fatal_error == MLX5_SENSOR_NIC_DISABLED ||
fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
@@ -195,7 +195,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
bool err_detected = false;
/* Mark the device as fatal in order to abort FW commands */
- if ((check_fatal_sensors(dev) || force) &&
+ if ((mlx5_health_check_fatal_sensors(dev) || force) &&
dev->state == MLX5_DEVICE_STATE_UP) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
err_detected = true;
@@ -208,7 +208,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
goto unlock;
}
- if (check_fatal_sensors(dev) || force) { /* protected state setting */
+ if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mlx5_cmd_flush(dev);
}
@@ -231,7 +231,7 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
mlx5_core_err(dev, "start\n");
- if (check_fatal_sensors(dev) == MLX5_SENSOR_FW_SYND_RFR) {
+ if (mlx5_health_check_fatal_sensors(dev) == MLX5_SENSOR_FW_SYND_RFR) {
/* Get cr-dump and reset FW semaphore */
lock = lock_sem_sw_reset(dev, true);
@@ -308,26 +308,31 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
/* How much time to wait until health resetting the driver (in msecs) */
#define MLX5_RECOVERY_WAIT_MSECS 60000
-static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
+int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
{
unsigned long end;
- mlx5_core_warn(dev, "handling bad device here\n");
- mlx5_handle_bad_state(dev);
end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS);
while (sensor_pci_not_working(dev)) {
- if (time_after(jiffies, end)) {
- mlx5_core_err(dev,
- "health recovery flow aborted, PCI reads still not working\n");
- return -EIO;
- }
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
msleep(100);
}
+ return 0;
+}
+static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
+{
+ mlx5_core_warn(dev, "handling bad device here\n");
+ mlx5_handle_bad_state(dev);
+ if (mlx5_health_wait_pci_up(dev)) {
+ mlx5_core_err(dev, "health recovery flow aborted, PCI reads still not working\n");
+ return -EIO;
+ }
mlx5_core_err(dev, "starting health recovery flow\n");
mlx5_recover_device(dev);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) ||
- check_fatal_sensors(dev)) {
+ mlx5_health_check_fatal_sensors(dev)) {
mlx5_core_err(dev, "health recovery failed\n");
return -EIO;
}
@@ -696,7 +701,7 @@ static void poll_health(struct timer_list *t)
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
- fatal_error = check_fatal_sensors(dev);
+ fatal_error = mlx5_health_check_fatal_sensors(dev);
if (fatal_error && !health->fatal_error) {
mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 1eef66ee849e..cac8f085b16d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -130,14 +130,6 @@ static int mlx5i_flash_device(struct net_device *netdev,
return mlx5e_ethtool_flash_device(priv, flash);
}
-enum mlx5_ptys_width {
- MLX5_PTYS_WIDTH_1X = 1 << 0,
- MLX5_PTYS_WIDTH_2X = 1 << 1,
- MLX5_PTYS_WIDTH_4X = 1 << 2,
- MLX5_PTYS_WIDTH_8X = 1 << 3,
- MLX5_PTYS_WIDTH_12X = 1 << 4,
-};
-
static inline int mlx5_ptys_width_enum_to_int(enum mlx5_ptys_width width)
{
switch (width) {
@@ -174,24 +166,6 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
}
}
-static int mlx5i_get_port_settings(struct net_device *netdev,
- u16 *ib_link_width_oper, u16 *ib_proto_oper)
-{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
- int ret;
-
- ret = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_IB, 1);
- if (ret)
- return ret;
-
- *ib_link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
- *ib_proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
-
- return 0;
-}
-
static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
{
int rate, width;
@@ -209,11 +183,14 @@ static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
static int mlx5i_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *link_ksettings)
{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
u16 ib_link_width_oper;
u16 ib_proto_oper;
int speed, ret;
- ret = mlx5i_get_port_settings(netdev, &ib_link_width_oper, &ib_proto_oper);
+ ret = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper, &ib_proto_oper,
+ 1);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 874c70e8cc54..33081b24f10a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -102,7 +102,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
if (ldev->pf[i].netdev == ndev)
return i;
- return -1;
+ return -ENOENT;
}
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
@@ -271,7 +271,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
bool do_bond, roce_lag;
int err;
- if (!dev0 || !dev1)
+ if (!mlx5_lag_is_ready(ldev))
return;
spin_lock(&lag_lock);
@@ -355,7 +355,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
{
struct net_device *upper = info->upper_dev, *ndev_tmp;
struct netdev_lag_upper_info *lag_upper_info = NULL;
- bool is_bonded;
+ bool is_bonded, is_in_lag, mode_supported;
int bond_status = 0;
int num_slaves = 0;
int idx;
@@ -374,7 +374,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx > -1)
+ if (idx >= 0)
bond_status |= (1 << idx);
num_slaves++;
@@ -391,13 +391,24 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
* of the same lag master, and only them.
- * Lag mode must be activebackup or hash.
*/
- is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
- (bond_status == 0x3) &&
- ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
- (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
+ is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
+ if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Can't activate LAG offload, PF is configured with more than 64 VFs");
+ return 0;
+ }
+
+ /* Lag mode must be activebackup or hash. */
+ mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
+ tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
+
+ if (is_in_lag && !mode_supported)
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Can't activate LAG offload, TX type isn't supported");
+
+ is_bonded = is_in_lag && mode_supported;
if (tracker->is_bonded != is_bonded) {
tracker->is_bonded = is_bonded;
return 1;
@@ -418,7 +429,7 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
return 0;
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
- if (idx == -1)
+ if (idx < 0)
return 0;
/* This information is used to determine virtual to physical
@@ -445,6 +456,10 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
ldev = container_of(this, struct mlx5_lag, nb);
+
+ if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
+ return NOTIFY_DONE;
+
tracker = ldev->tracker;
switch (event) {
@@ -493,14 +508,14 @@ static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
kfree(ldev);
}
-static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+static int mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
unsigned int fn = PCI_FUNC(dev->pdev->devfn);
if (fn >= MLX5_MAX_PORTS)
- return;
+ return -EPERM;
spin_lock(&lag_lock);
ldev->pf[fn].dev = dev;
@@ -511,6 +526,8 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev;
spin_unlock(&lag_lock);
+
+ return fn;
}
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
@@ -537,11 +554,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
{
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
- int err;
+ int i, err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
- !MLX5_CAP_GEN(dev, lag_master) ||
- (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
return;
tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -556,7 +571,18 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
}
}
- mlx5_lag_dev_add_pf(ldev, dev, netdev);
+ if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
+ return;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ tmp_dev = ldev->pf[i].dev;
+ if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
+ MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+ break;
+ }
+
+ if (i >= MLX5_MAX_PORTS)
+ ldev->flags |= MLX5_LAG_FLAG_READY;
if (!ldev->nb.notifier_call) {
ldev->nb.notifier_call = mlx5_lag_netdev_event;
@@ -587,6 +613,8 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
mlx5_lag_dev_remove_pf(ldev, dev);
+ ldev->flags &= ~MLX5_LAG_FLAG_READY;
+
for (i = 0; i < MLX5_MAX_PORTS; i++)
if (ldev->pf[i].dev)
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index f1068aac6406..8d8cf2d0bc6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -16,6 +16,7 @@ enum {
MLX5_LAG_FLAG_ROCE = 1 << 0,
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
+ MLX5_LAG_FLAG_READY = 1 << 3,
};
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
@@ -59,6 +60,12 @@ __mlx5_lag_is_active(struct mlx5_lag *ldev)
return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
}
+static inline bool
+mlx5_lag_is_ready(struct mlx5_lag *ldev)
+{
+ return ldev->flags & MLX5_LAG_FLAG_READY;
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 9e68f5926ab6..88e58ac902de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -11,7 +11,7 @@
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
+ if (!mlx5_lag_is_ready(ldev))
return false;
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
@@ -131,7 +131,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
struct net_device *nh_dev = nh->fib_nh_dev;
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
- mlx5_lag_set_port_affinity(ldev, ++i);
+ if (i < 0)
+ i = MLX5_LAG_NORMAL_AFFINITY;
+ else
+ ++i;
+
+ mlx5_lag_set_port_affinity(ldev, i);
}
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 2d55b7c22c03..c70c1f0ca0c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -150,28 +150,30 @@ static void mlx5_pps_out(struct work_struct *work)
static void mlx5_timestamp_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
- overflow_work);
+ struct mlx5_core_dev *mdev;
+ struct mlx5_clock *clock;
unsigned long flags;
+ clock = container_of(dwork, struct mlx5_clock, overflow_work);
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
}
-static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
u64 ns = timespec64_to_ns(ts);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -180,13 +182,12 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
u64 cycles, ns;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
cycles = mlx5_read_internal_timer(mdev, sts);
ns = timecounter_cyc2time(&clock->tc, cycles);
@@ -199,13 +200,14 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -213,12 +215,13 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
- u64 adj;
- u32 diff;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
int neg_adj = 0;
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ u32 diff;
+ u64 adj;
+
if (delta < 0) {
neg_adj = 1;
@@ -229,11 +232,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -431,13 +435,11 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
default:
return -EOPNOTSUPP;
}
-
- return -EOPNOTSUPP;
}
static const struct ptp_clock_info mlx5_ptp_clock_info = {
.owner = THIS_MODULE,
- .name = "mlx5_p2p",
+ .name = "mlx5_ptp",
.max_adj = 100000000,
.n_alarm = 0,
.n_ext_ts = 0,
@@ -465,7 +467,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
{
- struct mlx5_core_dev *mdev = clock->mdev;
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
+
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
@@ -538,20 +541,23 @@ static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
- struct mlx5_core_dev *mdev = clock->mdev;
struct ptp_clock_event ptp_event;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta, ns;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
+ struct mlx5_core_dev *mdev;
struct timespec64 ts;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
- ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
- be64_to_cpu(eqe->data.pps.time_stamp));
+ ptp_event.timestamp =
+ mlx5_timecounter_cyc2time(clock,
+ be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR;
ptp_event.pps_times.ts_real =
@@ -574,8 +580,8 @@ static int mlx5_pps_event(struct notifier_block *nb,
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult);
clock->pps_info.start[pin] = cycles_now + cycles_delta;
- schedule_work(&clock->pps_info.out_work);
write_sequnlock_irqrestore(&clock->lock, flags);
+ schedule_work(&clock->pps_info.out_work);
break;
default:
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
@@ -605,7 +611,6 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(41);
- clock->mdev = mdev;
timecounter_init(&clock->tc, &clock->cycles,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 5c681e31983b..81f2cc4ca1da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -78,7 +78,7 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
-void mlx5_cq_tasklet_cb(unsigned long data);
+void mlx5_cq_tasklet_cb(struct tasklet_struct *t);
struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
new file mode 100644
index 000000000000..947f346bdc2d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -0,0 +1,911 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2020 Mellanox Technologies.
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/fs.h>
+
+#include "lib/fs_chains.h"
+#include "en/mapping.h"
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "eswitch.h"
+#include "en.h"
+#include "en_tc.h"
+
+#define chains_lock(chains) ((chains)->lock)
+#define chains_ht(chains) ((chains)->chains_ht)
+#define chains_mapping(chains) ((chains)->chains_mapping)
+#define prios_ht(chains) ((chains)->prios_ht)
+#define ft_pool_left(chains) ((chains)->ft_left)
+#define tc_default_ft(chains) ((chains)->tc_default_ft)
+#define tc_end_ft(chains) ((chains)->tc_end_ft)
+#define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
+ FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
+
+/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
+ * and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
+ * for each flow table pool. We can allocate up to 16M of each pool,
+ * and we keep track of how much we used via get_next_avail_sz_from_pool.
+ * Firmware doesn't report any of this for now.
+ * ESW_POOL is expected to be sorted from large to small and match firmware
+ * pools.
+ */
+#define FT_SIZE (16 * 1024 * 1024)
+static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
+ 1 * 1024 * 1024,
+ 64 * 1024,
+ 128 };
+#define FT_TBL_SZ (64 * 1024)
+
+struct mlx5_fs_chains {
+ struct mlx5_core_dev *dev;
+
+ struct rhashtable chains_ht;
+ struct rhashtable prios_ht;
+ /* Protects above chains_ht and prios_ht */
+ struct mutex lock;
+
+ struct mlx5_flow_table *tc_default_ft;
+ struct mlx5_flow_table *tc_end_ft;
+ struct mapping_ctx *chains_mapping;
+
+ enum mlx5_flow_namespace_type ns;
+ u32 group_num;
+ u32 flags;
+
+ int ft_left[ARRAY_SIZE(FT_POOLS)];
+};
+
+struct fs_chain {
+ struct rhash_head node;
+
+ u32 chain;
+
+ int ref;
+ int id;
+
+ struct mlx5_fs_chains *chains;
+ struct list_head prios_list;
+ struct mlx5_flow_handle *restore_rule;
+ struct mlx5_modify_hdr *miss_modify_hdr;
+};
+
+struct prio_key {
+ u32 chain;
+ u32 prio;
+ u32 level;
+};
+
+struct prio {
+ struct rhash_head node;
+ struct list_head list;
+
+ struct prio_key key;
+
+ int ref;
+
+ struct fs_chain *chain;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_table *next_ft;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_handle *miss_rule;
+};
+
+static const struct rhashtable_params chain_params = {
+ .head_offset = offsetof(struct fs_chain, node),
+ .key_offset = offsetof(struct fs_chain, chain),
+ .key_len = sizeof_field(struct fs_chain, chain),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params prio_params = {
+ .head_offset = offsetof(struct prio, node),
+ .key_offset = offsetof(struct prio, key),
+ .key_len = sizeof_field(struct prio, key),
+ .automatic_shrinking = true,
+};
+
+bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
+{
+ return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+}
+
+static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+{
+ return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+}
+
+bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
+{
+ return mlx5_chains_prios_supported(chains) &&
+ mlx5_chains_ignore_flow_level_supported(chains);
+}
+
+u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
+{
+ if (!mlx5_chains_prios_supported(chains))
+ return 1;
+
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX - 1;
+
+ /* We should get here only for eswitch case */
+ return FDB_TC_MAX_CHAIN;
+}
+
+u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
+{
+ return mlx5_chains_get_chain_range(chains) + 1;
+}
+
+u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
+{
+ if (!mlx5_chains_prios_supported(chains))
+ return 1;
+
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX;
+
+ /* We should get here only for eswitch case */
+ return FDB_TC_MAX_PRIO;
+}
+
+static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
+{
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX;
+
+ /* Same value for FDB and NIC RX tables */
+ return FDB_TC_LEVELS_PER_PRIO;
+}
+
+void
+mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft)
+{
+ tc_end_ft(chains) = ft;
+}
+
+#define POOL_NEXT_SIZE 0
+static int
+mlx5_chains_get_avail_sz_from_pool(struct mlx5_fs_chains *chains,
+ int desired_size)
+{
+ int i, found_i = -1;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (ft_pool_left(chains)[i] && FT_POOLS[i] > desired_size) {
+ found_i = i;
+ if (desired_size != POOL_NEXT_SIZE)
+ break;
+ }
+ }
+
+ if (found_i != -1) {
+ --ft_pool_left(chains)[found_i];
+ return FT_POOLS[found_i];
+ }
+
+ return 0;
+}
+
+static void
+mlx5_chains_put_sz_to_pool(struct mlx5_fs_chains *chains, int sz)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (sz == FT_POOLS[i]) {
+ ++ft_pool_left(chains)[i];
+ return;
+ }
+ }
+
+ WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
+}
+
+static void
+mlx5_chains_init_sz_pool(struct mlx5_fs_chains *chains, u32 ft_max)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
+ ft_pool_left(chains)[i] =
+ FT_POOLS[i] <= ft_max ? FT_SIZE / FT_POOLS[i] : 0;
+}
+
+static struct mlx5_flow_table *
+mlx5_chains_create_table(struct mlx5_fs_chains *chains,
+ u32 chain, u32 prio, u32 level)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ int sz;
+
+ if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
+ ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ mlx5_chains_get_avail_sz_from_pool(chains, FT_TBL_SZ) :
+ mlx5_chains_get_avail_sz_from_pool(chains, POOL_NEXT_SIZE);
+ if (!sz)
+ return ERR_PTR(-ENOSPC);
+ ft_attr.max_fte = sz;
+
+ /* We use tc_default_ft(chains) as the table's next_ft till
+ * ignore_flow_level is allowed on FT creation and not just for FTEs.
+ * Instead caller should add an explicit miss rule if needed.
+ */
+ ft_attr.next_ft = tc_default_ft(chains);
+
+ /* The root table(chain 0, prio 1, level 0) is required to be
+ * connected to the previous fs_core managed prio.
+ * We always create it, as a managed table, in order to align with
+ * fs_core logic.
+ */
+ if (!mlx5_chains_ignore_flow_level_supported(chains) ||
+ (chain == 0 && prio == 1 && level == 0)) {
+ ft_attr.level = level;
+ ft_attr.prio = prio - 1;
+ ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
+ mlx5_get_fdb_sub_ns(chains->dev, chain) :
+ mlx5_get_flow_namespace(chains->dev, chains->ns);
+ } else {
+ ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = ns_to_chains_fs_prio(chains->ns);
+ /* Firmware doesn't allow us to create another level 0 table,
+ * so we create all unmanaged tables as level 1.
+ *
+ * To connect them, we use explicit miss rules with
+ * ignore_flow_level. Caller is responsible to create
+ * these rules (if needed).
+ */
+ ft_attr.level = 1;
+ ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
+ }
+
+ ft_attr.autogroup.num_reserved_entries = 2;
+ ft_attr.autogroup.max_num_groups = chains->group_num;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
+ (int)PTR_ERR(ft), chain, prio, level, sz);
+ mlx5_chains_put_sz_to_pool(chains, sz);
+ return ft;
+ }
+
+ return ft;
+}
+
+static void
+mlx5_chains_destroy_table(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft)
+{
+ mlx5_chains_put_sz_to_pool(chains, ft->max_fte);
+ mlx5_destroy_flow_table(ft);
+}
+
+static int
+create_chain_restore(struct fs_chain *chain)
+{
+ struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
+ char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
+ struct mlx5_fs_chains *chains = chain->chains;
+ enum mlx5e_tc_attr_to_reg chain_to_reg;
+ struct mlx5_modify_hdr *mod_hdr;
+ u32 index;
+ int err;
+
+ if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
+ !mlx5_chains_prios_supported(chains))
+ return 0;
+
+ err = mapping_add(chains_mapping(chains), &chain->chain, &index);
+ if (err)
+ return err;
+ if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
+ /* we got the special default flow tag id, so we won't know
+ * if we actually marked the packet with the restore rule
+ * we create.
+ *
+ * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
+ */
+ err = mapping_add(chains_mapping(chains),
+ &chain->chain, &index);
+ mapping_remove(chains_mapping(chains),
+ MLX5_FS_DEFAULT_FLOW_TAG);
+ if (err)
+ return err;
+ }
+
+ chain->id = index;
+
+ if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
+ chain_to_reg = CHAIN_TO_REG;
+ chain->restore_rule = esw_add_restore_rule(esw, chain->id);
+ if (IS_ERR(chain->restore_rule)) {
+ err = PTR_ERR(chain->restore_rule);
+ goto err_rule;
+ }
+ } else if (chains->ns == MLX5_FLOW_NAMESPACE_KERNEL) {
+ /* For NIC RX we don't need a restore rule
+ * since we write the metadata to reg_b
+ * that is passed to SW directly.
+ */
+ chain_to_reg = NIC_CHAIN_TO_REG;
+ } else {
+ err = -EINVAL;
+ goto err_rule;
+ }
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, field,
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
+ MLX5_SET(set_action_in, modact, offset,
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset * 8);
+ MLX5_SET(set_action_in, modact, length,
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen * 8);
+ MLX5_SET(set_action_in, modact, data, chain->id);
+ mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
+ 1, modact);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ goto err_mod_hdr;
+ }
+ chain->miss_modify_hdr = mod_hdr;
+
+ return 0;
+
+err_mod_hdr:
+ if (!IS_ERR_OR_NULL(chain->restore_rule))
+ mlx5_del_flow_rules(chain->restore_rule);
+err_rule:
+ /* Datapath can't find this mapping, so we can safely remove it */
+ mapping_remove(chains_mapping(chains), chain->id);
+ return err;
+}
+
+static void destroy_chain_restore(struct fs_chain *chain)
+{
+ struct mlx5_fs_chains *chains = chain->chains;
+
+ if (!chain->miss_modify_hdr)
+ return;
+
+ if (chain->restore_rule)
+ mlx5_del_flow_rules(chain->restore_rule);
+
+ mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
+ mapping_remove(chains_mapping(chains), chain->id);
+}
+
+static struct fs_chain *
+mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
+{
+ struct fs_chain *chain_s = NULL;
+ int err;
+
+ chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
+ if (!chain_s)
+ return ERR_PTR(-ENOMEM);
+
+ chain_s->chains = chains;
+ chain_s->chain = chain;
+ INIT_LIST_HEAD(&chain_s->prios_list);
+
+ err = create_chain_restore(chain_s);
+ if (err)
+ goto err_restore;
+
+ err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
+ chain_params);
+ if (err)
+ goto err_insert;
+
+ return chain_s;
+
+err_insert:
+ destroy_chain_restore(chain_s);
+err_restore:
+ kvfree(chain_s);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_chains_destroy_chain(struct fs_chain *chain)
+{
+ struct mlx5_fs_chains *chains = chain->chains;
+
+ rhashtable_remove_fast(&chains_ht(chains), &chain->node,
+ chain_params);
+
+ destroy_chain_restore(chain);
+ kvfree(chain);
+}
+
+static struct fs_chain *
+mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
+{
+ struct fs_chain *chain_s;
+
+ chain_s = rhashtable_lookup_fast(&chains_ht(chains), &chain,
+ chain_params);
+ if (!chain_s) {
+ chain_s = mlx5_chains_create_chain(chains, chain);
+ if (IS_ERR(chain_s))
+ return chain_s;
+ }
+
+ chain_s->ref++;
+
+ return chain_s;
+}
+
+static struct mlx5_flow_handle *
+mlx5_chains_add_miss_rule(struct fs_chain *chain,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_fs_chains *chains = chain->chains;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act act = {};
+
+ act.flags = FLOW_ACT_NO_APPEND;
+ if (mlx5_chains_ignore_flow_level_supported(chain->chains))
+ act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = next_ft;
+
+ if (next_ft == tc_end_ft(chains) &&
+ chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
+ mlx5_chains_prios_supported(chains)) {
+ act.modify_hdr = chain->miss_modify_hdr;
+ act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ }
+
+ return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
+}
+
+static int
+mlx5_chains_update_prio_prevs(struct prio *prio,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
+ struct fs_chain *chain = prio->chain;
+ struct prio *pos;
+ int n = 0, err;
+
+ if (prio->key.level)
+ return 0;
+
+ /* Iterate in reverse order until reaching the level 0 rule of
+ * the previous priority, adding all the miss rules first, so we can
+ * revert them if any of them fails.
+ */
+ pos = prio;
+ list_for_each_entry_continue_reverse(pos,
+ &chain->prios_list,
+ list) {
+ miss_rules[n] = mlx5_chains_add_miss_rule(chain,
+ pos->ft,
+ next_ft);
+ if (IS_ERR(miss_rules[n])) {
+ err = PTR_ERR(miss_rules[n]);
+ goto err_prev_rule;
+ }
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ /* Success, delete old miss rules, and update the pointers. */
+ n = 0;
+ pos = prio;
+ list_for_each_entry_continue_reverse(pos,
+ &chain->prios_list,
+ list) {
+ mlx5_del_flow_rules(pos->miss_rule);
+
+ pos->miss_rule = miss_rules[n];
+ pos->next_ft = next_ft;
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ return 0;
+
+err_prev_rule:
+ while (--n >= 0)
+ mlx5_del_flow_rules(miss_rules[n]);
+
+ return err;
+}
+
+static void
+mlx5_chains_put_chain(struct fs_chain *chain)
+{
+ if (--chain->ref == 0)
+ mlx5_chains_destroy_chain(chain);
+}
+
+static struct prio *
+mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
+ u32 chain, u32 prio, u32 level)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_handle *miss_rule = NULL;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_table *next_ft;
+ struct mlx5_flow_table *ft;
+ struct prio *prio_s = NULL;
+ struct fs_chain *chain_s;
+ struct list_head *pos;
+ u32 *flow_group_in;
+ int err;
+
+ chain_s = mlx5_chains_get_chain(chains, chain);
+ if (IS_ERR(chain_s))
+ return ERR_CAST(chain_s);
+
+ prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!prio_s || !flow_group_in) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* Chain's prio list is sorted by prio and level.
+ * And all levels of some prio point to the next prio's level 0.
+ * Example list (prio, level):
+ * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
+ * In hardware, we will we have the following pointers:
+ * (3,0) -> (5,0) -> (7,0) -> Slow path
+ * (3,1) -> (5,0)
+ * (5,1) -> (7,0)
+ * (6,1) -> (7,0)
+ */
+
+ /* Default miss for each chain: */
+ next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ tc_default_ft(chains) :
+ tc_end_ft(chains);
+ list_for_each(pos, &chain_s->prios_list) {
+ struct prio *p = list_entry(pos, struct prio, list);
+
+ /* exit on first pos that is larger */
+ if (prio < p->key.prio || (prio == p->key.prio &&
+ level < p->key.level)) {
+ /* Get next level 0 table */
+ next_ft = p->key.level == 0 ? p->ft : p->next_ft;
+ break;
+ }
+ }
+
+ ft = mlx5_chains_create_table(chains, chain, prio, level);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_create;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ ft->max_fte - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft->max_fte - 1);
+ miss_group = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(miss_group)) {
+ err = PTR_ERR(miss_group);
+ goto err_group;
+ }
+
+ /* Add miss rule to next_ft */
+ miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
+ if (IS_ERR(miss_rule)) {
+ err = PTR_ERR(miss_rule);
+ goto err_miss_rule;
+ }
+
+ prio_s->miss_group = miss_group;
+ prio_s->miss_rule = miss_rule;
+ prio_s->next_ft = next_ft;
+ prio_s->chain = chain_s;
+ prio_s->key.chain = chain;
+ prio_s->key.prio = prio;
+ prio_s->key.level = level;
+ prio_s->ft = ft;
+
+ err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
+ prio_params);
+ if (err)
+ goto err_insert;
+
+ list_add(&prio_s->list, pos->prev);
+
+ /* Table is ready, connect it */
+ err = mlx5_chains_update_prio_prevs(prio_s, ft);
+ if (err)
+ goto err_update;
+
+ kvfree(flow_group_in);
+ return prio_s;
+
+err_update:
+ list_del(&prio_s->list);
+ rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
+ prio_params);
+err_insert:
+ mlx5_del_flow_rules(miss_rule);
+err_miss_rule:
+ mlx5_destroy_flow_group(miss_group);
+err_group:
+ mlx5_chains_destroy_table(chains, ft);
+err_create:
+err_alloc:
+ kvfree(prio_s);
+ kvfree(flow_group_in);
+ mlx5_chains_put_chain(chain_s);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
+ struct prio *prio)
+{
+ struct fs_chain *chain = prio->chain;
+
+ WARN_ON(mlx5_chains_update_prio_prevs(prio,
+ prio->next_ft));
+
+ list_del(&prio->list);
+ rhashtable_remove_fast(&prios_ht(chains), &prio->node,
+ prio_params);
+ mlx5_del_flow_rules(prio->miss_rule);
+ mlx5_destroy_flow_group(prio->miss_group);
+ mlx5_chains_destroy_table(chains, prio->ft);
+ mlx5_chains_put_chain(chain);
+ kvfree(prio);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level)
+{
+ struct mlx5_flow_table *prev_fts;
+ struct prio *prio_s;
+ struct prio_key key;
+ int l = 0;
+
+ if ((chain > mlx5_chains_get_chain_range(chains) &&
+ chain != mlx5_chains_get_nf_ft_chain(chains)) ||
+ prio > mlx5_chains_get_prio_range(chains) ||
+ level > mlx5_chains_get_level_range(chains))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* create earlier levels for correct fs_core lookup when
+ * connecting tables.
+ */
+ for (l = 0; l < level; l++) {
+ prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
+ if (IS_ERR(prev_fts)) {
+ prio_s = ERR_CAST(prev_fts);
+ goto err_get_prevs;
+ }
+ }
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&chains_lock(chains));
+ prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
+ prio_params);
+ if (!prio_s) {
+ prio_s = mlx5_chains_create_prio(chains, chain,
+ prio, level);
+ if (IS_ERR(prio_s))
+ goto err_create_prio;
+ }
+
+ ++prio_s->ref;
+ mutex_unlock(&chains_lock(chains));
+
+ return prio_s->ft;
+
+err_create_prio:
+ mutex_unlock(&chains_lock(chains));
+err_get_prevs:
+ while (--l >= 0)
+ mlx5_chains_put_table(chains, chain, prio, l);
+ return ERR_CAST(prio_s);
+}
+
+void
+mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level)
+{
+ struct prio *prio_s;
+ struct prio_key key;
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&chains_lock(chains));
+ prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
+ prio_params);
+ if (!prio_s)
+ goto err_get_prio;
+
+ if (--prio_s->ref == 0)
+ mlx5_chains_destroy_prio(chains, prio_s);
+ mutex_unlock(&chains_lock(chains));
+
+ while (level-- > 0)
+ mlx5_chains_put_table(chains, chain, prio, level);
+
+ return;
+
+err_get_prio:
+ mutex_unlock(&chains_lock(chains));
+ WARN_ONCE(1,
+ "Couldn't find table: (chain: %d prio: %d level: %d)",
+ chain, prio, level);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
+{
+ return tc_end_ft(chains);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
+{
+ u32 chain, prio, level;
+ int err;
+
+ if (!mlx5_chains_ignore_flow_level_supported(chains)) {
+ err = -EOPNOTSUPP;
+
+ mlx5_core_warn(chains->dev,
+ "Couldn't create global flow table, ignore_flow_level not supported.");
+ goto err_ignore;
+ }
+
+ chain = mlx5_chains_get_chain_range(chains),
+ prio = mlx5_chains_get_prio_range(chains);
+ level = mlx5_chains_get_level_range(chains);
+
+ return mlx5_chains_create_table(chains, chain, prio, level);
+
+err_ignore:
+ return ERR_PTR(err);
+}
+
+void
+mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft)
+{
+ mlx5_chains_destroy_table(chains, ft);
+}
+
+static struct mlx5_fs_chains *
+mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
+{
+ struct mlx5_fs_chains *chains_priv;
+ struct mapping_ctx *mapping;
+ u32 max_flow_counter;
+ int err;
+
+ chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
+ if (!chains_priv)
+ return ERR_PTR(-ENOMEM);
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+
+ mlx5_core_dbg(dev,
+ "Init flow table chains, max counters(%d), groups(%d), max flow table size(%d)\n",
+ max_flow_counter, attr->max_grp_num, attr->max_ft_sz);
+
+ chains_priv->dev = dev;
+ chains_priv->flags = attr->flags;
+ chains_priv->ns = attr->ns;
+ chains_priv->group_num = attr->max_grp_num;
+ tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
+
+ mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
+ mlx5_chains_get_chain_range(chains_priv),
+ mlx5_chains_get_prio_range(chains_priv));
+
+ mlx5_chains_init_sz_pool(chains_priv, attr->max_ft_sz);
+
+ err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
+ if (err)
+ goto init_chains_ht_err;
+
+ err = rhashtable_init(&prios_ht(chains_priv), &prio_params);
+ if (err)
+ goto init_prios_ht_err;
+
+ mapping = mapping_create(sizeof(u32), attr->max_restore_tag,
+ true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto mapping_err;
+ }
+ chains_mapping(chains_priv) = mapping;
+
+ mutex_init(&chains_lock(chains_priv));
+
+ return chains_priv;
+
+mapping_err:
+ rhashtable_destroy(&prios_ht(chains_priv));
+init_prios_ht_err:
+ rhashtable_destroy(&chains_ht(chains_priv));
+init_chains_ht_err:
+ kfree(chains_priv);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
+{
+ mutex_destroy(&chains_lock(chains));
+ mapping_destroy(chains_mapping(chains));
+ rhashtable_destroy(&prios_ht(chains));
+ rhashtable_destroy(&chains_ht(chains));
+
+ kfree(chains);
+}
+
+struct mlx5_fs_chains *
+mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
+{
+ struct mlx5_fs_chains *chains;
+
+ chains = mlx5_chains_init(dev, attr);
+
+ return chains;
+}
+
+void
+mlx5_chains_destroy(struct mlx5_fs_chains *chains)
+{
+ mlx5_chains_cleanup(chains);
+}
+
+int
+mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
+ u32 *chain_mapping)
+{
+ return mapping_add(chains_mapping(chains), &chain, chain_mapping);
+}
+
+int
+mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
+{
+ return mapping_remove(chains_mapping(chains), chain_mapping);
+}
+
+int mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag,
+ u32 *chain)
+{
+ int err;
+
+ err = mapping_find(chains_mapping(chains), tag, chain);
+ if (err) {
+ mlx5_core_warn(chains->dev, "Can't find chain for tag: %d\n", tag);
+ return -ENOENT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
new file mode 100644
index 000000000000..6d5be31b05dd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __ML5_ESW_CHAINS_H__
+#define __ML5_ESW_CHAINS_H__
+
+#include <linux/mlx5/fs.h>
+
+struct mlx5_fs_chains;
+
+enum mlx5_chains_flags {
+ MLX5_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
+ MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED = BIT(1),
+ MLX5_CHAINS_FT_TUNNEL_SUPPORTED = BIT(2),
+};
+
+struct mlx5_chains_attr {
+ enum mlx5_flow_namespace_type ns;
+ u32 flags;
+ u32 max_ft_sz;
+ u32 max_grp_num;
+ struct mlx5_flow_table *default_ft;
+ u32 max_restore_tag;
+};
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+bool
+mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
+bool
+mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains);
+
+struct mlx5_flow_table *
+mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level);
+void
+mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level);
+
+struct mlx5_flow_table *
+mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains);
+
+struct mlx5_flow_table *
+mlx5_chains_create_global_table(struct mlx5_fs_chains *chains);
+void
+mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft);
+
+int
+mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
+ u32 *chain_mapping);
+int
+mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains,
+ u32 chain_mapping);
+
+struct mlx5_fs_chains *
+mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr);
+void mlx5_chains_destroy(struct mlx5_fs_chains *chains);
+
+int
+mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag, u32 *chain);
+
+void
+mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline struct mlx5_flow_table *
+mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level) { return ERR_PTR(-EOPNOTSUPP); }
+static inline void
+mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level) {};
+
+static inline struct mlx5_flow_table *
+mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains) { return ERR_PTR(-EOPNOTSUPP); }
+
+static inline struct mlx5_fs_chains *
+mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
+{ return NULL; }
+static inline void
+mlx5_chains_destroy(struct mlx5_fs_chains *chains) {};
+
+#endif /* CONFIG_MLX5_CLS_ACT */
+
+#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index d046db7bb047..3a9fa629503f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -90,9 +90,4 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
-static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
-{
- return devlink_net(priv_to_devlink(dev));
-}
-
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index 3315afe2f8dc..38084400ee8f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -168,6 +168,17 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
{
+ if (!mlx5_vxlan_allowed(vxlan))
+ return;
+
+ mlx5_vxlan_del_port(vxlan, IANA_VXLAN_UDP_PORT);
+ WARN_ON(!hash_empty(vxlan->htable));
+
+ kfree(vxlan);
+}
+
+void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan)
+{
struct mlx5_vxlan_port *vxlanp;
struct hlist_node *tmp;
int bkt;
@@ -175,12 +186,12 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
if (!mlx5_vxlan_allowed(vxlan))
return;
- /* Lockless since we are the only hash table consumers*/
hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
- hash_del(&vxlanp->hlist);
- mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
- kfree(vxlanp);
+ /* Don't delete default UDP port added by the HW.
+ * Remove only user configured ports
+ */
+ if (vxlanp->udp_port == IANA_VXLAN_UDP_PORT)
+ continue;
+ mlx5_vxlan_del_port(vxlan, vxlanp->udp_port);
}
-
- kfree(vxlan);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
index ec766529f49b..34ef662da35e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
@@ -56,6 +56,7 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
+void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan);
#else
static inline struct mlx5_vxlan*
mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
@@ -63,6 +64,7 @@ static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; }
+static inline void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan) { return; }
#endif
#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ce43e3feccd9..8ff207aa1479 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -57,6 +57,7 @@
#include "lib/mpfs.h"
#include "eswitch.h"
#include "devlink.h"
+#include "fw_reset.h"
#include "lib/mlx5.h"
#include "fpga/core.h"
#include "fpga/ipsec.h"
@@ -548,6 +549,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, dct))
MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
+ if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_event))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_event, 1);
+
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
MLX5_SET(cmd_hca_cap,
set_hca_cap,
@@ -739,7 +743,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
pci_set_drvdata(dev->pdev, dev);
dev->bar_addr = pci_resource_start(pdev, 0);
- priv->numa_node = dev_to_node(&dev->pdev->dev);
+ priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
err = mlx5_pci_enable_device(dev);
if (err) {
@@ -832,6 +836,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_eq_cleanup;
}
+ err = mlx5_fw_reset_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize fw reset events\n");
+ goto err_events_cleanup;
+ }
+
mlx5_cq_debugfs_init(dev);
mlx5_init_reserved_gids(dev);
@@ -893,6 +903,8 @@ err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cq_debugfs_cleanup(dev);
+ mlx5_fw_reset_cleanup(dev);
+err_events_cleanup:
mlx5_events_cleanup(dev);
err_eq_cleanup:
mlx5_eq_table_cleanup(dev);
@@ -920,6 +932,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
+ mlx5_fw_reset_cleanup(dev);
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
@@ -1078,6 +1091,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fw_tracer;
}
+ mlx5_fw_reset_events_start(dev);
mlx5_hv_vhca_init(dev->hv_vhca);
err = mlx5_rsc_dump_init(dev);
@@ -1139,6 +1153,7 @@ err_fpga_start:
mlx5_rsc_dump_cleanup(dev);
err_rsc_dump:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
+ mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
mlx5_eq_table_destroy(dev);
@@ -1161,6 +1176,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
+ mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fc1649dac11b..8cec85ab419d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -100,6 +100,11 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev)
+{
+ return &dev->pdev->dev;
+}
+
enum {
MLX5_CMD_DATA, /* print command payload only */
MLX5_CMD_TIME, /* print command execution time */
@@ -123,6 +128,8 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_error_sw_reset(struct mlx5_core_dev *dev);
+u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev);
+int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index c0e18f2ade99..150638814517 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -238,7 +238,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
rb_erase(&fwp->rb_node, root);
if (in_free_list)
list_del(&fwp->list);
- dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
+ dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
@@ -265,7 +265,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
- struct device *device = dev->device;
+ struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
struct page *page;
u64 zero_addr = 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index e4186e84b3ff..4bb219565c58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -154,24 +154,8 @@ int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
sizeof(out), MLX5_REG_MLCR, 0, 1);
}
-int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
- u8 *link_width_oper, u8 local_port)
-{
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
-
- err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
- if (err)
- return err;
-
- *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
-
-int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, u8 local_port)
+int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
+ u16 *proto_oper, u8 local_port)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
@@ -181,11 +165,12 @@ int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
if (err)
return err;
+ *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
*proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
return 0;
}
-EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper);
+EXPORT_SYMBOL(mlx5_query_ib_port_oper);
/* This function should be used after setting a port register only */
void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index c63f727273d8..7df883686d46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -203,7 +203,6 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_param mask = {};
- struct mlx5dr_match_misc3 *misc3;
struct mlx5dr_ste_build *sb;
bool inner, rx;
int idx = 0;
@@ -252,18 +251,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
- ret = mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
- dmn, inner, rx);
- if (ret)
- return ret;
+ mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
+ dmn, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer) &&
dr_mask_is_dmac_set(&mask.outer)) {
- ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask,
- inner, rx);
- if (ret)
- return ret;
+ mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask,
+ inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer))
@@ -313,8 +308,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask,
inner, rx);
- misc3 = &mask.misc3;
- if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc3) &&
+ if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(&mask.misc3) &&
mlx5dr_matcher_supp_flex_parser_icmp_v4(&dmn->info.caps)) ||
(dr_mask_is_flex_parser_icmpv6_set(&mask.misc3) &&
mlx5dr_matcher_supp_flex_parser_icmp_v6(&dmn->info.caps))) {
@@ -340,10 +334,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_smac_set(&mask.inner) &&
dr_mask_is_dmac_set(&mask.inner)) {
- ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++],
- &mask, inner, rx);
- if (ret)
- return ret;
+ mlx5dr_ste_build_eth_l2_src_des(&sb[idx++],
+ &mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.inner))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 6ec5106bc472..b3c9dc032026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -242,7 +242,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->ste_arr[new_idx];
- if (mlx5dr_ste_not_used_ste(new_ste)) {
+ if (mlx5dr_ste_is_not_used(new_ste)) {
mlx5dr_htbl_get(new_htbl);
list_add_tail(&new_ste->miss_list_node,
mlx5dr_ste_get_miss_list(new_ste));
@@ -335,7 +335,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
for (i = 0; i < cur_entries; i++) {
cur_ste = &cur_htbl->ste_arr[i];
- if (mlx5dr_ste_not_used_ste(cur_ste)) /* Empty, nothing to copy */
+ if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
continue;
err = dr_rule_rehash_copy_miss_list(matcher,
@@ -791,7 +791,7 @@ again:
miss_list = &cur_htbl->chunk->miss_list[index];
ste = &cur_htbl->ste_arr[index];
- if (mlx5dr_ste_not_used_ste(ste)) {
+ if (mlx5dr_ste_is_not_used(ste)) {
if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
ste, ste_location,
hw_ste, miss_list,
@@ -985,31 +985,28 @@ static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
static bool dr_rule_skip(enum mlx5dr_domain_type domain,
enum mlx5dr_ste_entry_type ste_type,
struct mlx5dr_match_param *mask,
- struct mlx5dr_match_param *value)
+ struct mlx5dr_match_param *value,
+ u32 flow_source)
{
+ bool rx = ste_type == MLX5DR_STE_TYPE_RX;
+
if (domain != MLX5DR_DOMAIN_TYPE_FDB)
return false;
if (mask->misc.source_port) {
- if (ste_type == MLX5DR_STE_TYPE_RX)
- if (value->misc.source_port != WIRE_PORT)
- return true;
+ if (rx && value->misc.source_port != WIRE_PORT)
+ return true;
- if (ste_type == MLX5DR_STE_TYPE_TX)
- if (value->misc.source_port == WIRE_PORT)
- return true;
+ if (!rx && value->misc.source_port == WIRE_PORT)
+ return true;
}
- /* Metadata C can be used to describe the source vport */
- if (mask->misc2.metadata_reg_c_0) {
- if (ste_type == MLX5DR_STE_TYPE_RX)
- if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) != WIRE_PORT)
- return true;
+ if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
+ return true;
+
+ if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
+ return true;
- if (ste_type == MLX5DR_STE_TYPE_TX)
- if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) == WIRE_PORT)
- return true;
- }
return false;
}
@@ -1038,7 +1035,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
INIT_LIST_HEAD(&nic_rule->rule_members_list);
- if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param))
+ if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param,
+ rule->flow_source))
return 0;
hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
@@ -1173,7 +1171,8 @@ static struct mlx5dr_rule *
dr_rule_create_rule(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
- struct mlx5dr_action *actions[])
+ struct mlx5dr_action *actions[],
+ u32 flow_source)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_param param = {};
@@ -1188,6 +1187,7 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher,
return NULL;
rule->matcher = matcher;
+ rule->flow_source = flow_source;
INIT_LIST_HEAD(&rule->rule_actions_list);
ret = dr_rule_add_action_members(rule, num_actions, actions);
@@ -1232,13 +1232,14 @@ free_rule:
struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
- struct mlx5dr_action *actions[])
+ struct mlx5dr_action *actions[],
+ u32 flow_source)
{
struct mlx5dr_rule *rule;
refcount_inc(&matcher->refcount);
- rule = dr_rule_create_rule(matcher, value, num_actions, actions);
+ rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
if (!rule)
refcount_dec(&matcher->refcount);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 2ca79b9bde1f..24dede1b0a20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -466,10 +466,10 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
* need to add the bit_mask
*/
for (j = 0; j < num_stes_per_iter; j++) {
- u8 *hw_ste = htbl->ste_arr[ste_index + j].hw_ste;
+ struct mlx5dr_ste *ste = &htbl->ste_arr[ste_index + j];
u32 ste_off = j * DR_STE_SIZE;
- if (mlx5dr_ste_is_not_valid_entry(hw_ste)) {
+ if (mlx5dr_ste_is_not_used(ste)) {
memcpy(data + ste_off,
formatted_ste, DR_STE_SIZE);
} else {
@@ -831,7 +831,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
if (!mr)
return NULL;
- dma_device = &mdev->pdev->dev;
+ dma_device = mlx5_core_dma_dev(mdev);
dma_addr = dma_map_single(dma_device, buf, size,
DMA_BIDIRECTIONAL);
err = dma_mapping_error(dma_device, dma_addr);
@@ -860,7 +860,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
{
mlx5_core_destroy_mkey(mdev, &mr->mkey);
- dma_unmap_single(&mdev->pdev->dev, mr->dma_addr, mr->size,
+ dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size,
DMA_BIDIRECTIONAL);
kfree(mr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 00c2f598f034..b01aaec75622 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -155,6 +155,13 @@ static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
return byte_mask;
}
+static u8 *mlx5dr_ste_get_tag(u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+
+ return hw_ste->tag;
+}
+
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@@ -549,25 +556,6 @@ void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
}
-/* The assumption here is that we don't update the ste->hw_ste if it is not
- * used ste, so it will be all zero, checking the next_lu_type.
- */
-bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
-{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)p_hw_ste;
-
- if (MLX5_GET(ste_general, hw_ste, next_lu_type) ==
- MLX5DR_STE_LU_TYPE_NOP)
- return true;
-
- return false;
-}
-
-bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
-{
- return !ste->refcount;
-}
-
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
@@ -728,7 +716,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
{
if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
- mlx5dr_err(dmn, "Partial mask source_port is not supported\n");
+ mlx5dr_err(dmn,
+ "Partial mask source_port is not supported\n");
+ return -EINVAL;
+ }
+ if (mask->misc.source_eswitch_owner_vhca_id &&
+ mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
+ mlx5dr_err(dmn,
+ "Partial mask source_eswitch_owner_vhca_id is not supported\n");
return -EINVAL;
}
}
@@ -760,7 +755,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
- ret = sb->ste_build_tag_func(value, sb, ste_arr);
+ ret = sb->ste_build_tag_func(value, sb, mlx5dr_ste_get_tag(ste_arr));
if (ret)
return ret;
@@ -778,8 +773,8 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
return 0;
}
-static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
+static void dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
@@ -807,13 +802,6 @@ static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value
MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
mask->svlan_tag = 0;
}
-
- if (mask->cvlan_tag || mask->svlan_tag) {
- pr_info("Invalid c/svlan mask configuration\n");
- return -EINVAL;
- }
-
- return 0;
}
static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
@@ -1059,11 +1047,9 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
@@ -1104,23 +1090,17 @@ static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
return 0;
}
-int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
- int ret;
-
- ret = dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
- if (ret)
- return ret;
+ dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
-
- return 0;
}
static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
@@ -1136,11 +1116,9 @@ static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *val
static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
@@ -1176,11 +1154,9 @@ static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *val
static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
@@ -1238,11 +1214,9 @@ static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param
static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
@@ -1328,12 +1302,10 @@ dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
}
static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
- bool inner, u8 *hw_ste_p)
+ bool inner, u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc_spec = &value->misc;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
@@ -1403,16 +1375,14 @@ static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
+ return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
@@ -1440,16 +1410,14 @@ static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
+ return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
@@ -1495,12 +1463,10 @@ static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
@@ -1561,11 +1527,9 @@ static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *va
static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
@@ -1608,11 +1572,9 @@ static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
@@ -1647,7 +1609,7 @@ void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
return 0;
}
@@ -1673,11 +1635,9 @@ static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
- u8 *tag = hw_ste->tag;
if (sb->inner)
DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
@@ -1716,11 +1676,9 @@ static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
@@ -1781,11 +1739,9 @@ static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value
static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
- u8 *tag = hw_ste->tag;
if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
@@ -1903,11 +1859,9 @@ static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
- u8 *tag = hw_ste->tag;
u32 icmp_header_data;
int dw0_location;
int dw1_location;
@@ -2007,11 +1961,9 @@ static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *val
static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
misc_2_mask, metadata_reg_a);
@@ -2052,11 +2004,9 @@ static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
- u8 *tag = hw_ste->tag;
if (sb->inner) {
DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
@@ -2102,11 +2052,9 @@ dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value
static int
dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_flags, misc3,
@@ -2158,11 +2106,9 @@ dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
static int
dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_protocol_type, misc, geneve_protocol_type);
@@ -2205,11 +2151,9 @@ static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
@@ -2249,11 +2193,9 @@ static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
@@ -2276,38 +2218,25 @@ void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
}
-static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
+static void dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
{
struct mlx5dr_match_misc *misc_mask = &value->misc;
- /* Partial misc source_port is not supported */
- if (misc_mask->source_port && misc_mask->source_port != 0xffff)
- return -EINVAL;
-
- /* Partial misc source_eswitch_owner_vhca_id is not supported */
- if (misc_mask->source_eswitch_owner_vhca_id &&
- misc_mask->source_eswitch_owner_vhca_id != 0xffff)
- return -EINVAL;
-
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
misc_mask->source_eswitch_owner_vhca_id = 0;
-
- return 0;
}
static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_cmd_caps *caps;
u8 *bit_mask = sb->bit_mask;
- u8 *tag = hw_ste->tag;
bool source_gvmi_set;
DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
@@ -2339,19 +2268,15 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
return 0;
}
-int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_domain *dmn,
- bool inner, bool rx)
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn,
+ bool inner, bool rx)
{
- int ret;
-
/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
- ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
- if (ret)
- return ret;
+ dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->dmn = dmn;
@@ -2359,6 +2284,4 @@ int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
-
- return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 0883956c58c0..f50f3b107aa3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -194,7 +194,7 @@ struct mlx5dr_ste_build {
u8 bit_mask[DR_STE_SIZE_MASK];
int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p);
+ u8 *tag);
};
struct mlx5dr_ste_htbl *
@@ -227,7 +227,6 @@ void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
-bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste);
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 ste_location);
void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
@@ -266,6 +265,11 @@ static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
ste->refcount++;
}
+static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
+{
+ return !ste->refcount;
+}
+
void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl);
bool mlx5dr_ste_equal_tag(void *src, void *dst);
@@ -284,9 +288,9 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_match_param *value,
u8 *ste_arr);
-int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
+void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
@@ -342,10 +346,10 @@ void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_domain *dmn,
- bool inner, bool rx);
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn,
+ bool inner, bool rx);
void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
/* Actions utils */
@@ -793,6 +797,7 @@ struct mlx5dr_rule {
struct mlx5dr_rule_rx_tx rx;
struct mlx5dr_rule_rx_tx tx;
struct list_head rule_actions_list;
+ u32 flow_source;
};
void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
@@ -991,7 +996,6 @@ struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size);
void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
-bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste);
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 9b08eb557a31..96c39a17d026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -487,7 +487,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
&params,
num_actions,
- actions);
+ actions,
+ fte->flow_context.flow_source);
if (!rule) {
err = -EINVAL;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 7deaca9ade3b..7914fe3fc68d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -67,7 +67,8 @@ struct mlx5dr_rule *
mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
- struct mlx5dr_action *actions[]);
+ struct mlx5dr_action *actions[],
+ u32 flow_source);
int mlx5dr_rule_destroy(struct mlx5dr_rule *rule);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index ec45a03140d7..937b8e46f8c7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -20,11 +20,13 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/firmware.h>
#include <asm/byteorder.h>
#include <net/devlink.h>
#include <trace/events/devlink.h>
#include "core.h"
+#include "core_env.h"
#include "item.h"
#include "cmd.h"
#include "port.h"
@@ -32,6 +34,7 @@
#include "emad.h"
#include "reg.h"
#include "resources.h"
+#include "../mlxfw/mlxfw.h"
static LIST_HEAD(mlxsw_core_driver_list);
static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
@@ -82,6 +85,11 @@ struct mlxsw_core {
struct mlxsw_core_port *ports;
unsigned int max_ports;
bool fw_flash_in_progress;
+ struct {
+ struct devlink_health_reporter *fw_fatal;
+ } health;
+ struct mlxsw_env *env;
+ bool is_initialized; /* Denotes if core was already initialized. */
unsigned long driver_priv[];
/* driver_priv has to be always the last item */
};
@@ -128,6 +136,11 @@ bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
+bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver->temp_warn_enabled;
+}
+
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
const struct mlxsw_fw_rev *req_rev)
@@ -607,6 +620,9 @@ static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
err = mlxsw_emad_transmit(trans->core, trans);
if (err == 0)
return;
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
} else {
err = -EIO;
}
@@ -864,6 +880,294 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
return mlxsw_driver;
}
+struct mlxsw_core_fw_info {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_core *mlxsw_core;
+};
+
+static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index, u32 *p_max_size,
+ u8 *p_align_bits, u16 *p_max_write_size)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcqi_pl[MLXSW_REG_MCQI_LEN];
+ int err;
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index, u32 component_size)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u8 *data, u16 size, u32 offset)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcda_pl[MLXSW_REG_MCDA_LEN];
+
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl);
+}
+
+static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ u8 error_code;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
+ .component_query = mlxsw_core_fw_component_query,
+ .fsm_lock = mlxsw_core_fw_fsm_lock,
+ .fsm_component_update = mlxsw_core_fw_fsm_component_update,
+ .fsm_block_download = mlxsw_core_fw_fsm_block_download,
+ .fsm_component_verify = mlxsw_core_fw_fsm_component_verify,
+ .fsm_activate = mlxsw_core_fw_fsm_activate,
+ .fsm_query_state = mlxsw_core_fw_fsm_query_state,
+ .fsm_cancel = mlxsw_core_fw_fsm_cancel,
+ .fsm_release = mlxsw_core_fw_fsm_release,
+};
+
+static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core_fw_info mlxsw_core_fw_info = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_core_fw_mlxsw_dev_ops,
+ .psid = mlxsw_core->bus_info->psid,
+ .psid_size = strlen(mlxsw_core->bus_info->psid),
+ .devlink = priv_to_devlink(mlxsw_core),
+ },
+ .mlxsw_core = mlxsw_core
+ };
+ int err;
+
+ mlxsw_core->fw_flash_in_progress = true;
+ err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack);
+ mlxsw_core->fw_flash_in_progress = false;
+
+ return err;
+}
+
+static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_fw_rev *req_rev,
+ const char *filename)
+{
+ const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev;
+ union devlink_param_value value;
+ const struct firmware *firmware;
+ int err;
+
+ /* Don't check if driver does not require it */
+ if (!req_rev || !filename)
+ return 0;
+
+ /* Don't check if devlink 'fw_load_policy' param is 'flash' */
+ err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ &value);
+ if (err)
+ return err;
+ if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
+ return 0;
+
+ /* Validate driver & FW are compatible */
+ if (rev->major != req_rev->major) {
+ WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
+ rev->major, req_rev->major);
+ return -EINVAL;
+ }
+ if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
+ return 0;
+
+ dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
+ rev->major, rev->minor, rev->subminor, req_rev->major,
+ req_rev->minor, req_rev->subminor);
+ dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename);
+
+ err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev);
+ if (err) {
+ dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename);
+ return err;
+ }
+
+ err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL);
+ release_firmware(firmware);
+ if (err)
+ dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
+
+ /* On FW flash success, tell the caller FW reset is needed
+ * if current FW supports it.
+ */
+ if (rev->minor >= req_rev->can_reset_minor)
+ return err ? err : -EAGAIN;
+ else
+ return 0;
+}
+
+static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ const struct firmware *firmware;
+ int err;
+
+ err = request_firmware_direct(&firmware, params->file_name, mlxsw_core->bus_info->dev);
+ if (err)
+ return err;
+ err = mlxsw_core_fw_flash(mlxsw_core, firmware, extack);
+ release_firmware(firmware);
+
+ return err;
+}
+
+static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER &&
+ val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) {
+ NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param mlxsw_core_fw_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlxsw_core_devlink_param_fw_load_policy_validate),
+};
+
+static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ union devlink_param_value value;
+ int err;
+
+ err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+ if (err)
+ return err;
+
+ value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
+ devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value);
+ return 0;
+}
+
+static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+}
+
static int mlxsw_devlink_port_split(struct devlink *devlink,
unsigned int port_index,
unsigned int count,
@@ -1113,7 +1417,8 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
static int
mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
- bool netns_change,
+ bool netns_change, enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
@@ -1126,11 +1431,14 @@ mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
}
static int
-mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink,
+mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
mlxsw_core->bus,
mlxsw_core->bus_priv, true,
@@ -1138,17 +1446,12 @@ mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink,
}
static int mlxsw_devlink_flash_update(struct devlink *devlink,
- const char *file_name,
- const char *component,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
- if (!mlxsw_driver->flash_update)
- return -EOPNOTSUPP;
- return mlxsw_driver->flash_update(mlxsw_core, file_name,
- component, extack);
+ return mlxsw_core_fw_flash_update(mlxsw_core, params, extack);
}
static int mlxsw_devlink_trap_init(struct devlink *devlink,
@@ -1268,6 +1571,8 @@ mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink,
}
static const struct devlink_ops mlxsw_devlink_ops = {
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
.reload_down = mlxsw_devlink_core_bus_device_reload_down,
.reload_up = mlxsw_devlink_core_bus_device_reload_up,
.port_type_set = mlxsw_devlink_port_type_set,
@@ -1296,6 +1601,263 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get,
};
+static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
+{
+ int err;
+
+ err = mlxsw_core_fw_params_register(mlxsw_core);
+ if (err)
+ return err;
+
+ if (mlxsw_core->driver->params_register) {
+ err = mlxsw_core->driver->params_register(mlxsw_core);
+ if (err)
+ goto err_params_register;
+ }
+ return 0;
+
+err_params_register:
+ mlxsw_core_fw_params_unregister(mlxsw_core);
+ return err;
+}
+
+static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core_fw_params_unregister(mlxsw_core);
+ if (mlxsw_core->driver->params_register)
+ mlxsw_core->driver->params_unregister(mlxsw_core);
+}
+
+struct mlxsw_core_health_event {
+ struct mlxsw_core *mlxsw_core;
+ char mfde_pl[MLXSW_REG_MFDE_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_core_health_event_work(struct work_struct *work)
+{
+ struct mlxsw_core_health_event *event;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_core_health_event, work);
+ mlxsw_core = event->mlxsw_core;
+ devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred",
+ event->mfde_pl);
+ kfree(event);
+}
+
+static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
+ char *mfde_pl, void *priv)
+{
+ struct mlxsw_core_health_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl));
+ INIT_WORK(&event->work, mlxsw_core_health_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_core_health_listener =
+ MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
+
+static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ char *mfde_pl = priv_ctx;
+ char *val_str;
+ u8 event_id;
+ u32 val;
+ int err;
+
+ if (!priv_ctx)
+ /* User-triggered dumps are not possible */
+ return -EOPNOTSUPP;
+
+ val = mlxsw_reg_mfde_irisc_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
+ if (err)
+ return err;
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "event");
+ if (err)
+ return err;
+
+ event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "id", event_id);
+ if (err)
+ return err;
+ switch (event_id) {
+ case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
+ val_str = "CR space timeout";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
+ val_str = "KVD insertion machine stopped";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_method_get(mfde_pl);
+ switch (val) {
+ case MLXSW_REG_MFDE_METHOD_QUERY:
+ val_str = "query";
+ break;
+ case MLXSW_REG_MFDE_METHOD_WRITE:
+ val_str = "write";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "method", val_str);
+ if (err)
+ return err;
+ }
+
+ val = mlxsw_reg_mfde_long_process_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val);
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_command_type_get(mfde_pl);
+ switch (val) {
+ case MLXSW_REG_MFDE_COMMAND_TYPE_MAD:
+ val_str = "mad";
+ break;
+ case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD:
+ val_str = "emad";
+ break;
+ case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF:
+ val_str = "cmdif";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str);
+ if (err)
+ return err;
+ }
+
+ val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val);
+ if (err)
+ return err;
+
+ if (event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO) {
+ val = mlxsw_reg_mfde_log_address_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_log_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
+ if (err)
+ return err;
+ } else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) {
+ val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter);
+ char mfgd_pl[MLXSW_REG_MFGD_LEN];
+ int err;
+
+ /* Read the register first to make sure no other bits are changed. */
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+}
+
+static const struct devlink_health_reporter_ops
+mlxsw_core_health_fw_fatal_ops = {
+ .name = "fw_fatal",
+ .dump = mlxsw_core_health_fw_fatal_dump,
+ .test = mlxsw_core_health_fw_fatal_test,
+};
+
+static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core,
+ bool enable)
+{
+ char mfgd_pl[MLXSW_REG_MFGD_LEN];
+ int err;
+
+ /* Read the register first to make sure no other bits are changed. */
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+}
+
+static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_health_reporter *fw_fatal;
+ int err;
+
+ if (!mlxsw_core->driver->fw_fatal_enabled)
+ return 0;
+
+ fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
+ 0, mlxsw_core);
+ if (IS_ERR(fw_fatal)) {
+ dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
+ return PTR_ERR(fw_fatal);
+ }
+ mlxsw_core->health.fw_fatal = fw_fatal;
+
+ err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+ if (err)
+ goto err_trap_register;
+
+ err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true);
+ if (err)
+ goto err_fw_fatal_config;
+
+ return 0;
+
+err_fw_fatal_config:
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+err_trap_register:
+ devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+ return err;
+}
+
+static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
+{
+ if (!mlxsw_core->driver->fw_fatal_enabled)
+ return;
+
+ mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+ /* Make sure there is no more event work scheduled */
+ mlxsw_core_flush_owq();
+ devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+}
+
static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
@@ -1368,12 +1930,21 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_devlink_register;
}
- if (mlxsw_driver->params_register && !reload) {
- err = mlxsw_driver->params_register(mlxsw_core);
+ if (!reload) {
+ err = mlxsw_core_params_register(mlxsw_core);
if (err)
goto err_register_params;
}
+ err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev,
+ mlxsw_driver->fw_filename);
+ if (err)
+ goto err_fw_rev_validate;
+
+ err = mlxsw_core_health_init(mlxsw_core);
+ if (err)
+ goto err_health_init;
+
if (mlxsw_driver->init) {
err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
if (err)
@@ -1389,22 +1960,31 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_thermal_init;
- if (mlxsw_driver->params_register)
- devlink_params_publish(devlink);
+ err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env);
+ if (err)
+ goto err_env_init;
+
+ mlxsw_core->is_initialized = true;
+ devlink_params_publish(devlink);
if (!reload)
devlink_reload_enable(devlink);
return 0;
+err_env_init:
+ mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
err_driver_init:
- if (mlxsw_driver->params_unregister && !reload)
- mlxsw_driver->params_unregister(mlxsw_core);
+ mlxsw_core_health_fini(mlxsw_core);
+err_health_init:
+err_fw_rev_validate:
+ if (!reload)
+ mlxsw_core_params_unregister(mlxsw_core);
err_register_params:
if (!reload)
devlink_unregister(devlink);
@@ -1469,14 +2049,16 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
}
- if (mlxsw_core->driver->params_unregister)
- devlink_params_unpublish(devlink);
+ devlink_params_unpublish(devlink);
+ mlxsw_core->is_initialized = false;
+ mlxsw_env_fini(mlxsw_core->env);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
- if (mlxsw_core->driver->params_unregister && !reload)
- mlxsw_core->driver->params_unregister(mlxsw_core);
+ mlxsw_core_health_fini(mlxsw_core);
+ if (!reload)
+ mlxsw_core_params_unregister(mlxsw_core);
if (!reload)
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
@@ -1485,12 +2067,13 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (!reload)
devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ if (!reload)
+ devlink_free(devlink);
return;
reload_fail_deinit:
- if (mlxsw_core->driver->params_unregister)
- mlxsw_core->driver->params_unregister(mlxsw_core);
+ mlxsw_core_params_unregister(mlxsw_core);
devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
@@ -2274,6 +2857,16 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
+struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->env;
+}
+
+bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->is_initialized;
+}
+
int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
{
enum mlxsw_reg_pmtm_module_type module_type;
@@ -2410,18 +3003,6 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
-void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
-{
- mlxsw_core->fw_flash_in_progress = true;
-}
-EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
-
-void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
-{
- mlxsw_core->fw_flash_in_progress = false;
-}
-EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
-
int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
struct mlxsw_res *res)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 11af3308f8cc..92f7398287be 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -32,6 +32,8 @@ void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core);
+bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core);
+
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
const struct mlxsw_fw_rev *req_rev);
@@ -221,6 +223,8 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
+struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
+bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
@@ -280,6 +284,8 @@ struct mlxsw_driver {
struct list_head list;
const char *kind;
size_t priv_size;
+ const struct mlxsw_fw_rev *fw_req_rev;
+ const char *fw_filename;
int (*init)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack);
@@ -324,9 +330,6 @@ struct mlxsw_driver {
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
- int (*flash_update)(struct mlxsw_core *mlxsw_core,
- const char *file_name, const char *component,
- struct netlink_ext_ack *extack);
int (*trap_init)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap, void *trap_ctx);
void (*trap_fini)(struct mlxsw_core *mlxsw_core,
@@ -371,6 +374,8 @@ struct mlxsw_driver {
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool res_query_enabled;
+ bool fw_fatal_enabled;
+ bool temp_warn_enabled;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -378,9 +383,6 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
-void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
-void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
-
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 056eeb85be60..dd26865bd587 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -10,6 +10,18 @@
#include "item.h"
#include "reg.h"
+struct mlxsw_env_module_info {
+ u64 module_overheat_counter;
+ bool is_overheat;
+};
+
+struct mlxsw_env {
+ struct mlxsw_core *core;
+ u8 module_count;
+ spinlock_t module_info_lock; /* Protects 'module_info'. */
+ struct mlxsw_env_module_info module_info[];
+};
+
static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
bool *qsfp, bool *cmis)
{
@@ -293,3 +305,359 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
return 0;
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom);
+
+static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
+ u8 module,
+ bool *p_has_temp_sensor)
+{
+ char mtbr_pl[MLXSW_REG_MTBR_LEN];
+ u16 temp;
+ int err;
+
+ mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
+ 1);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL);
+
+ switch (temp) {
+ case MLXSW_REG_MTBR_BAD_SENS_INFO:
+ case MLXSW_REG_MTBR_NO_CONN:
+ case MLXSW_REG_MTBR_NO_TEMP_SENS:
+ case MLXSW_REG_MTBR_INDEX_NA:
+ *p_has_temp_sensor = false;
+ break;
+ default:
+ *p_has_temp_sensor = temp ? true : false;
+ }
+ return 0;
+}
+
+static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
+ u16 sensor_index, bool enable)
+{
+ char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+ enum mlxsw_reg_mtmp_tee tee;
+ int err, threshold_hi;
+
+ mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, sensor_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err)
+ return err;
+
+ if (enable) {
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_core,
+ sensor_index -
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN,
+ SFP_TEMP_HIGH_WARN,
+ &threshold_hi);
+ /* In case it is not possible to query the module's threshold,
+ * use the default value.
+ */
+ if (err)
+ threshold_hi = MLXSW_REG_MTMP_THRESH_HI;
+ else
+ /* mlxsw_env_module_temp_thresholds_get() multiplies
+ * Celsius degrees by 1000 whereas MTMP expects
+ * temperature in 0.125 Celsius degrees units.
+ * Convert threshold_hi to correct units.
+ */
+ threshold_hi = threshold_hi / 1000 * 8;
+
+ mlxsw_reg_mtmp_temperature_threshold_hi_set(mtmp_pl, threshold_hi);
+ mlxsw_reg_mtmp_temperature_threshold_lo_set(mtmp_pl, threshold_hi -
+ MLXSW_REG_MTMP_HYSTERESIS_TEMP);
+ }
+ tee = enable ? MLXSW_REG_MTMP_TEE_GENERATE_EVENT : MLXSW_REG_MTMP_TEE_NO_EVENT;
+ mlxsw_reg_mtmp_tee_set(mtmp_pl, tee);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
+}
+
+static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 module_count)
+{
+ int i, err, sensor_index;
+ bool has_temp_sensor;
+
+ for (i = 0; i < module_count; i++) {
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_core, i,
+ &has_temp_sensor);
+ if (err)
+ return err;
+
+ if (!has_temp_sensor)
+ continue;
+
+ sensor_index = i + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
+ err = mlxsw_env_temp_event_set(mlxsw_core, sensor_index, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
+ char *mtwe_pl, void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+ int i, sensor_warning;
+ bool is_overheat;
+
+ for (i = 0; i < mlxsw_env->module_count; i++) {
+ /* 64-127 of sensor_index are mapped to the port modules
+ * sequentially (module 0 is mapped to sensor_index 64,
+ * module 1 to sensor_index 65 and so on)
+ */
+ sensor_warning =
+ mlxsw_reg_mtwe_sensor_warning_get(mtwe_pl,
+ i + MLXSW_REG_MTMP_MODULE_INDEX_MIN);
+ spin_lock(&mlxsw_env->module_info_lock);
+ is_overheat =
+ mlxsw_env->module_info[i].is_overheat;
+
+ if ((is_overheat && sensor_warning) ||
+ (!is_overheat && !sensor_warning)) {
+ /* Current state is "warning" and MTWE still reports
+ * warning OR current state in "no warning" and MTWE
+ * does not report warning.
+ */
+ spin_unlock(&mlxsw_env->module_info_lock);
+ continue;
+ } else if (is_overheat && !sensor_warning) {
+ /* MTWE reports "no warning", turn is_overheat off.
+ */
+ mlxsw_env->module_info[i].is_overheat = false;
+ spin_unlock(&mlxsw_env->module_info_lock);
+ } else {
+ /* Current state is "no warning" and MTWE reports
+ * "warning", increase the counter and turn is_overheat
+ * on.
+ */
+ mlxsw_env->module_info[i].is_overheat = true;
+ mlxsw_env->module_info[i].module_overheat_counter++;
+ spin_unlock(&mlxsw_env->module_info_lock);
+ }
+ }
+}
+
+static const struct mlxsw_listener mlxsw_env_temp_warn_listener =
+ MLXSW_EVENTL(mlxsw_env_mtwe_event_func, MTWE, MTWE);
+
+static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (!mlxsw_core_temp_warn_enabled(mlxsw_core))
+ return 0;
+
+ return mlxsw_core_trap_register(mlxsw_core,
+ &mlxsw_env_temp_warn_listener,
+ mlxsw_env);
+}
+
+static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env)
+{
+ if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core))
+ return;
+
+ mlxsw_core_trap_unregister(mlxsw_env->core,
+ &mlxsw_env_temp_warn_listener, mlxsw_env);
+}
+
+struct mlxsw_env_module_plug_unplug_event {
+ struct mlxsw_env *mlxsw_env;
+ u8 module;
+ struct work_struct work;
+};
+
+static void mlxsw_env_pmpe_event_work(struct work_struct *work)
+{
+ struct mlxsw_env_module_plug_unplug_event *event;
+ struct mlxsw_env *mlxsw_env;
+ bool has_temp_sensor;
+ u16 sensor_index;
+ int err;
+
+ event = container_of(work, struct mlxsw_env_module_plug_unplug_event,
+ work);
+ mlxsw_env = event->mlxsw_env;
+
+ spin_lock_bh(&mlxsw_env->module_info_lock);
+ mlxsw_env->module_info[event->module].is_overheat = false;
+ spin_unlock_bh(&mlxsw_env->module_info_lock);
+
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module,
+ &has_temp_sensor);
+ /* Do not disable events on modules without sensors or faulty sensors
+ * because FW returns errors.
+ */
+ if (err)
+ goto out;
+
+ if (!has_temp_sensor)
+ goto out;
+
+ sensor_index = event->module + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
+ mlxsw_env_temp_event_set(mlxsw_env->core, sensor_index, true);
+
+out:
+ kfree(event);
+}
+
+static void
+mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
+ void *priv)
+{
+ struct mlxsw_env_module_plug_unplug_event *event;
+ enum mlxsw_reg_pmpe_module_status module_status;
+ u8 module = mlxsw_reg_pmpe_module_get(pmpe_pl);
+ struct mlxsw_env *mlxsw_env = priv;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return;
+
+ module_status = mlxsw_reg_pmpe_module_status_get(pmpe_pl);
+ if (module_status != MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_ENABLED)
+ return;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+
+ event->mlxsw_env = mlxsw_env;
+ event->module = module;
+ INIT_WORK(&event->work, mlxsw_env_pmpe_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_env_module_plug_listener =
+ MLXSW_EVENTL(mlxsw_env_pmpe_listener_func, PMPE, PMPE);
+
+static int
+mlxsw_env_module_plug_event_register(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (!mlxsw_core_temp_warn_enabled(mlxsw_core))
+ return 0;
+
+ return mlxsw_core_trap_register(mlxsw_core,
+ &mlxsw_env_module_plug_listener,
+ mlxsw_env);
+}
+
+static void
+mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env)
+{
+ if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core))
+ return;
+
+ mlxsw_core_trap_unregister(mlxsw_env->core,
+ &mlxsw_env_module_plug_listener,
+ mlxsw_env);
+}
+
+static int
+mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 module_count)
+{
+ int i, err;
+
+ for (i = 0; i < module_count; i++) {
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, i,
+ MLXSW_REG_PMAOS_E_GENERATE_EVENT);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
+ u64 *p_counter)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ /* Prevent switch driver from accessing uninitialized data. */
+ if (!mlxsw_core_is_initialized(mlxsw_core)) {
+ *p_counter = 0;
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ spin_lock_bh(&mlxsw_env->module_info_lock);
+ *p_counter = mlxsw_env->module_info[module].module_overheat_counter;
+ spin_unlock_bh(&mlxsw_env->module_info_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get);
+
+int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
+{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ struct mlxsw_env *env;
+ u8 module_count;
+ int err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count);
+
+ env = kzalloc(struct_size(env, module_info, module_count), GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+
+ spin_lock_init(&env->module_info_lock);
+ env->core = mlxsw_core;
+ env->module_count = module_count;
+ *p_env = env;
+
+ err = mlxsw_env_temp_warn_event_register(mlxsw_core);
+ if (err)
+ goto err_temp_warn_event_register;
+
+ err = mlxsw_env_module_plug_event_register(mlxsw_core);
+ if (err)
+ goto err_module_plug_event_register;
+
+ err = mlxsw_env_module_oper_state_event_enable(mlxsw_core,
+ env->module_count);
+ if (err)
+ goto err_oper_state_event_enable;
+
+ err = mlxsw_env_module_temp_event_enable(mlxsw_core, env->module_count);
+ if (err)
+ goto err_temp_event_enable;
+
+ return 0;
+
+err_temp_event_enable:
+err_oper_state_event_enable:
+ mlxsw_env_module_plug_event_unregister(env);
+err_module_plug_event_register:
+ mlxsw_env_temp_warn_event_unregister(env);
+err_temp_warn_event_register:
+ kfree(env);
+ return err;
+}
+
+void mlxsw_env_fini(struct mlxsw_env *env)
+{
+ mlxsw_env_module_plug_event_unregister(env);
+ /* Make sure there is no more event work scheduled. */
+ mlxsw_core_flush_owq();
+ mlxsw_env_temp_warn_event_unregister(env);
+ kfree(env);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index 064d0e770c01..8e36a2634ef5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -14,4 +14,10 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
struct mlxsw_core *mlxsw_core, int module,
struct ethtool_eeprom *ee, u8 *data);
+int
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
+ u64 *p_counter);
+int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
+void mlxsw_env_fini(struct mlxsw_env *env);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 61719ec89808..2196c946698a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -12,8 +12,17 @@
#include "core.h"
#include "core_env.h"
-#define MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT 127
-#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT * 4 + \
+#define MLXSW_HWMON_SENSORS_MAX_COUNT 64
+#define MLXSW_HWMON_MODULES_MAX_COUNT 64
+#define MLXSW_HWMON_GEARBOXES_MAX_COUNT 32
+
+#define MLXSW_HWMON_ATTR_PER_SENSOR 3
+#define MLXSW_HWMON_ATTR_PER_MODULE 7
+#define MLXSW_HWMON_ATTR_PER_GEARBOX 4
+
+#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_SENSORS_MAX_COUNT * MLXSW_HWMON_ATTR_PER_SENSOR + \
+ MLXSW_HWMON_MODULES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_MODULE + \
+ MLXSW_HWMON_GEARBOXES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_GEARBOX + \
MLXSW_MFCR_TACHOS_MAX + MLXSW_MFCR_PWMS_MAX)
struct mlxsw_hwmon_attr {
@@ -97,7 +106,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
- char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
unsigned long val;
int index;
int err;
@@ -110,7 +119,13 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
mlxsw_hwmon->module_sensor_max);
- mlxsw_reg_mtmp_pack(mtmp_pl, index, true, true);
+
+ mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mtmp_mte_set(mtmp_pl, true);
+ mlxsw_reg_mtmp_mtr_set(mtmp_pl, true);
err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to reset temp sensor history\n");
@@ -205,25 +220,39 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
return len;
}
-static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int mlxsw_hwmon_module_temp_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
{
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
u8 module;
- int temp;
int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(dev, "Failed to query module temperature\n");
+ return err;
+ }
+ mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, NULL);
+
+ return 0;
+}
+
+static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
if (err)
return err;
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
return sprintf(buf, "%d\n", temp);
}
@@ -270,48 +299,72 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
return sprintf(buf, "%u\n", fault);
}
-static ssize_t
-mlxsw_hwmon_module_temp_critical_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int mlxsw_hwmon_module_temp_critical_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
{
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
- int temp;
u8 module;
int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_WARN, &temp);
+ SFP_TEMP_HIGH_WARN, p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
}
- return sprintf(buf, "%u\n", temp);
+ return 0;
}
static ssize_t
-mlxsw_hwmon_module_temp_emergency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+mlxsw_hwmon_module_temp_critical_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_critical_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%u\n", temp);
+}
+
+static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
{
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
u8 module;
- int temp;
int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_ALARM, &temp);
+ SFP_TEMP_HIGH_ALARM, p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
}
+ return 0;
+}
+
+static ssize_t
+mlxsw_hwmon_module_temp_emergency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &temp);
+ if (err)
+ return err;
+
return sprintf(buf, "%u\n", temp);
}
@@ -341,6 +394,53 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
return sprintf(buf, "gearbox %03u\n", index);
}
+static ssize_t mlxsw_hwmon_temp_critical_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp, emergency_temp, critic_temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ if (temp <= 0)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &emergency_temp);
+ if (err)
+ return err;
+
+ if (temp >= emergency_temp)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_critical_get(dev, attr, &critic_temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", temp >= critic_temp);
+}
+
+static ssize_t mlxsw_hwmon_temp_emergency_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp, emergency_temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ if (temp <= 0)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &emergency_temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", temp >= emergency_temp);
+}
+
enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX,
@@ -354,6 +454,8 @@ enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
};
static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
@@ -444,6 +546,20 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_label", num + 1);
break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_temp_critical_alarm_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_crit_alarm", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_temp_emergency_alarm_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_emergency_alarm", num + 1);
+ break;
default:
WARN_ON(1);
}
@@ -460,7 +576,6 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
{
char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0};
- char mtmp_pl[MLXSW_REG_MTMP_LEN];
int i;
int err;
@@ -471,7 +586,15 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
}
mlxsw_hwmon->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
for (i = 0; i < mlxsw_hwmon->sensor_count; i++) {
- mlxsw_reg_mtmp_pack(mtmp_pl, i, true, true);
+ char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+
+ mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, i);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp),
+ mtmp_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mtmp_mte_set(mtmp_pl, true);
+ mlxsw_reg_mtmp_mtr_set(mtmp_pl, true);
err = mlxsw_reg_write(mlxsw_hwmon->core,
MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -566,6 +689,12 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
+ i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
+ i, i);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 1c64b03ff48e..641cdd81882b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -620,9 +620,9 @@ static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
return elem;
}
-static void mlxsw_pci_cq_tasklet(unsigned long data)
+static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
{
- struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
struct mlxsw_pci *mlxsw_pci = q->pci;
char *cqe;
int items = 0;
@@ -733,9 +733,9 @@ static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
return elem;
}
-static void mlxsw_pci_eq_tasklet(unsigned long data)
+static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
{
- struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
struct mlxsw_pci *mlxsw_pci = q->pci;
u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
@@ -792,7 +792,7 @@ struct mlxsw_pci_queue_ops {
struct mlxsw_pci_queue *q);
void (*fini)(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q);
- void (*tasklet)(unsigned long data);
+ void (*tasklet)(struct tasklet_struct *t);
u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
u16 elem_count;
@@ -855,7 +855,7 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
q->pci = mlxsw_pci;
if (q_ops->tasklet)
- tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+ tasklet_setup(&q->tasklet, q_ops->tasklet);
mem_item->size = MLXSW_PCI_AQ_SIZE;
mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 079b080de7f7..39eff6a57ba2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4174,7 +4174,6 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M BIT(0)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII BIT(1)
-#define MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII BIT(2)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R BIT(3)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G BIT(4)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G BIT(5)
@@ -4197,7 +4196,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4)
-#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
@@ -4210,10 +4208,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
-#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
-#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX BIT(24)
-#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(25)
-#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T BIT(26)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
@@ -5411,6 +5405,64 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
mlxsw_reg_pspa_sub_port_set(payload, 0);
}
+/* PMAOS - Ports Module Administrative and Operational Status
+ * ----------------------------------------------------------
+ * This register configures and retrieves the per module status.
+ */
+#define MLXSW_REG_PMAOS_ID 0x5012
+#define MLXSW_REG_PMAOS_LEN 0x10
+
+MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN);
+
+/* reg_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmaos, slot_index, 0x00, 24, 4);
+
+/* reg_pmaos_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmaos, module, 0x00, 16, 8);
+
+/* reg_pmaos_ase
+ * Admin state update enable.
+ * If this bit is set, admin state will be updated based on admin_state field.
+ * Only relevant on Set() operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmaos, ase, 0x04, 31, 1);
+
+/* reg_pmaos_ee
+ * Event update enable.
+ * If this bit is set, event generation will be updated based on the e field.
+ * Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmaos, ee, 0x04, 30, 1);
+
+enum mlxsw_reg_pmaos_e {
+ MLXSW_REG_PMAOS_E_DO_NOT_GENERATE_EVENT,
+ MLXSW_REG_PMAOS_E_GENERATE_EVENT,
+ MLXSW_REG_PMAOS_E_GENERATE_SINGLE_EVENT,
+};
+
+/* reg_pmaos_e
+ * Event Generation on operational state change.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module,
+ enum mlxsw_reg_pmaos_e e)
+{
+ MLXSW_REG_ZERO(pmaos, payload);
+ mlxsw_reg_pmaos_module_set(payload, module);
+ mlxsw_reg_pmaos_e_set(payload, e);
+ mlxsw_reg_pmaos_ee_set(payload, true);
+}
+
/* PPLR - Port Physical Loopback Register
* --------------------------------------
* This register allows configuration of the port's loopback mode.
@@ -5447,6 +5499,50 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
}
+/* PMPE - Port Module Plug/Unplug Event Register
+ * ---------------------------------------------
+ * This register reports any operational status change of a module.
+ * A change in the module’s state will generate an event only if the change
+ * happens after arming the event mechanism. Any changes to the module state
+ * while the event mechanism is not armed will not be reported. Software can
+ * query the PMPE register for module status.
+ */
+#define MLXSW_REG_PMPE_ID 0x5024
+#define MLXSW_REG_PMPE_LEN 0x10
+
+MLXSW_REG_DEFINE(pmpe, MLXSW_REG_PMPE_ID, MLXSW_REG_PMPE_LEN);
+
+/* reg_pmpe_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmpe, slot_index, 0x00, 24, 4);
+
+/* reg_pmpe_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmpe, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmpe_module_status {
+ MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_ENABLED = 1,
+ MLXSW_REG_PMPE_MODULE_STATUS_UNPLUGGED,
+ MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_ERROR,
+ MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_DISABLED,
+};
+
+/* reg_pmpe_module_status
+ * Module status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmpe, module_status, 0x00, 0, 4);
+
+/* reg_pmpe_error_type
+ * Module error details.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmpe, error_type, 0x04, 8, 4);
+
/* PDDR - Port Diagnostics Database Register
* -----------------------------------------
* The PDDR enables to read the Phy debug database
@@ -5585,6 +5681,9 @@ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
+ MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
+ MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
@@ -8418,6 +8517,13 @@ MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16);
* 2 - Generate single event
* Access: RW
*/
+
+enum mlxsw_reg_mtmp_tee {
+ MLXSW_REG_MTMP_TEE_NO_EVENT,
+ MLXSW_REG_MTMP_TEE_GENERATE_EVENT,
+ MLXSW_REG_MTMP_TEE_GENERATE_SINGLE_EVENT,
+};
+
MLXSW_ITEM32(reg, mtmp, tee, 0x0C, 30, 2);
#define MLXSW_REG_MTMP_THRESH_HI 0x348 /* 105 Celsius */
@@ -8428,6 +8534,7 @@ MLXSW_ITEM32(reg, mtmp, tee, 0x0C, 30, 2);
*/
MLXSW_ITEM32(reg, mtmp, temperature_threshold_hi, 0x0C, 0, 16);
+#define MLXSW_REG_MTMP_HYSTERESIS_TEMP 0x28 /* 5 Celsius */
/* reg_mtmp_temperature_threshold_lo
* Low threshold for Temperature Warning Event. In 0.125 Celsius.
* Access: RW
@@ -8471,6 +8578,23 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, int *p_temp,
mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
}
+/* MTWE - Management Temperature Warning Event
+ * -------------------------------------------
+ * This register is used for over temperature warning.
+ */
+#define MLXSW_REG_MTWE_ID 0x900B
+#define MLXSW_REG_MTWE_LEN 0x10
+
+MLXSW_REG_DEFINE(mtwe, MLXSW_REG_MTWE_ID, MLXSW_REG_MTWE_LEN);
+
+/* reg_mtwe_sensor_warning
+ * Bit vector indicating which of the sensor reading is above threshold.
+ * Address 00h bit31 is sensor_warning[127].
+ * Address 0Ch bit0 is sensor_warning[0].
+ * Access: RO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1);
+
/* MTBR - Management Temperature Bulk Register
* -------------------------------------------
* This register is used for bulk temperature reading.
@@ -9827,6 +9951,26 @@ static inline void mlxsw_reg_mtptptp_pack(char *payload,
mlxsw_reg_mtptpt_message_type_set(payload, message_type);
}
+/* MFGD - Monitoring FW General Debug Register
+ * -------------------------------------------
+ */
+#define MLXSW_REG_MFGD_ID 0x90F0
+#define MLXSW_REG_MFGD_LEN 0x0C
+
+MLXSW_REG_DEFINE(mfgd, MLXSW_REG_MFGD_ID, MLXSW_REG_MFGD_LEN);
+
+/* reg_mfgd_fw_fatal_event_mode
+ * 0 - don't check FW fatal (default)
+ * 1 - check FW fatal - enable MFDE trap
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mfgd, fatal_event_mode, 0x00, 9, 2);
+
+/* reg_mfgd_trigger_test
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mfgd, trigger_test, 0x00, 11, 1);
+
/* MGPIR - Management General Peripheral Information Register
* ----------------------------------------------------------
* MGPIR register allows software to query the hardware and
@@ -9886,6 +10030,84 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
*num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload);
}
+/* MFDE - Monitoring FW Debug Register
+ * -----------------------------------
+ */
+#define MLXSW_REG_MFDE_ID 0x9200
+#define MLXSW_REG_MFDE_LEN 0x18
+
+MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
+
+/* reg_mfde_irisc_id
+ * Which irisc triggered the event
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 8, 4);
+
+enum mlxsw_reg_mfde_event_id {
+ MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1,
+ /* KVD insertion machine stopped */
+ MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP,
+};
+
+/* reg_mfde_event_id
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 8);
+
+enum mlxsw_reg_mfde_method {
+ MLXSW_REG_MFDE_METHOD_QUERY,
+ MLXSW_REG_MFDE_METHOD_WRITE,
+};
+
+/* reg_mfde_method
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, method, 0x04, 29, 1);
+
+/* reg_mfde_long_process
+ * Indicates if the command is in long_process mode.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, long_process, 0x04, 28, 1);
+
+enum mlxsw_reg_mfde_command_type {
+ MLXSW_REG_MFDE_COMMAND_TYPE_MAD,
+ MLXSW_REG_MFDE_COMMAND_TYPE_EMAD,
+ MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF,
+};
+
+/* reg_mfde_command_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, command_type, 0x04, 24, 2);
+
+/* reg_mfde_reg_attr_id
+ * EMAD - register id, MAD - attibute id
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, reg_attr_id, 0x04, 0, 16);
+
+/* reg_mfde_log_address
+ * crspace address accessed, which resulted in timeout.
+ * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, log_address, 0x10, 0, 32);
+
+/* reg_mfde_log_id
+ * Which irisc triggered the timeout.
+ * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, log_id, 0x14, 0, 4);
+
+/* reg_mfde_pipes_mask
+ * Bit per kvh pipe.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, pipes_mask, 0x10, 0, 16);
+
/* TNGCR - Tunneling NVE General Configuration Register
* ----------------------------------------------------
* The TNGCR register is used for setting up the NVE Tunneling configuration.
@@ -10948,7 +11170,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pptb),
MLXSW_REG(pbmc),
MLXSW_REG(pspa),
+ MLXSW_REG(pmaos),
MLXSW_REG(pplr),
+ MLXSW_REG(pmpe),
MLXSW_REG(pddr),
MLXSW_REG(pmtm),
MLXSW_REG(htgt),
@@ -10978,6 +11202,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(fore),
MLXSW_REG(mtcap),
MLXSW_REG(mtmp),
+ MLXSW_REG(mtwe),
MLXSW_REG(mtbr),
MLXSW_REG(mcia),
MLXSW_REG(mpat),
@@ -10999,7 +11224,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mtpppc),
MLXSW_REG(mtpptr),
MLXSW_REG(mtptpt),
+ MLXSW_REG(mfgd),
MLXSW_REG(mgpir),
+ MLXSW_REG(mfde),
MLXSW_REG(tngcr),
MLXSW_REG(tnumt),
MLXSW_REG(tnqcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f3c0e241e1b4..b08853f71b2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -42,11 +42,10 @@
#include "spectrum_span.h"
#include "spectrum_ptp.h"
#include "spectrum_trap.h"
-#include "../mlxfw/mlxfw.h"
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 2007
-#define MLXSW_SP1_FWREV_SUBMINOR 1168
+#define MLXSW_SP1_FWREV_MINOR 2008
+#define MLXSW_SP1_FWREV_SUBMINOR 1310
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -62,8 +61,8 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP2_FWREV_MAJOR 29
-#define MLXSW_SP2_FWREV_MINOR 2007
-#define MLXSW_SP2_FWREV_SUBMINOR 1168
+#define MLXSW_SP2_FWREV_MINOR 2008
+#define MLXSW_SP2_FWREV_SUBMINOR 1310
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -77,8 +76,8 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
"." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP3_FWREV_MAJOR 30
-#define MLXSW_SP3_FWREV_MINOR 2007
-#define MLXSW_SP3_FWREV_SUBMINOR 1168
+#define MLXSW_SP3_FWREV_MINOR 2008
+#define MLXSW_SP3_FWREV_SUBMINOR 1310
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -170,274 +169,6 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
-struct mlxsw_sp_mlxfw_dev {
- struct mlxfw_dev mlxfw_dev;
- struct mlxsw_sp *mlxsw_sp;
-};
-
-static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
- u16 component_index, u32 *p_max_size,
- u8 *p_align_bits, u16 *p_max_write_size)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcqi_pl[MLXSW_REG_MCQI_LEN];
- int err;
-
- mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
- if (err)
- return err;
- mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
- p_max_write_size);
-
- *p_align_bits = max_t(u8, *p_align_bits, 2);
- *p_max_write_size = min_t(u16, *p_max_write_size,
- MLXSW_REG_MCDA_MAX_DATA_LEN);
- return 0;
-}
-
-static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
- u8 control_state;
- int err;
-
- mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
- if (control_state != MLXFW_FSM_STATE_IDLE)
- return -EBUSY;
-
- mlxsw_reg_mcc_pack(mcc_pl,
- MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
- 0, *fwhandle, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
- u32 fwhandle, u16 component_index,
- u32 component_size)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
- component_index, fwhandle, component_size);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
- u32 fwhandle, u8 *data, u16 size,
- u32 offset)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcda_pl[MLXSW_REG_MCDA_LEN];
-
- mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
-}
-
-static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
- u32 fwhandle, u16 component_index)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
- component_index, fwhandle, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
- fwhandle, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
- enum mlxfw_fsm_state *fsm_state,
- enum mlxfw_fsm_state_err *fsm_state_err)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
- u8 control_state;
- u8 error_code;
- int err;
-
- mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
- *fsm_state = control_state;
- *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
- MLXFW_FSM_STATE_ERR_MAX);
- return 0;
-}
-
-static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
- fwhandle, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl,
- MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
- fwhandle, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
- .component_query = mlxsw_sp_component_query,
- .fsm_lock = mlxsw_sp_fsm_lock,
- .fsm_component_update = mlxsw_sp_fsm_component_update,
- .fsm_block_download = mlxsw_sp_fsm_block_download,
- .fsm_component_verify = mlxsw_sp_fsm_component_verify,
- .fsm_activate = mlxsw_sp_fsm_activate,
- .fsm_query_state = mlxsw_sp_fsm_query_state,
- .fsm_cancel = mlxsw_sp_fsm_cancel,
- .fsm_release = mlxsw_sp_fsm_release,
-};
-
-static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
- const struct firmware *firmware,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
- .mlxfw_dev = {
- .ops = &mlxsw_sp_mlxfw_dev_ops,
- .psid = mlxsw_sp->bus_info->psid,
- .psid_size = strlen(mlxsw_sp->bus_info->psid),
- .devlink = priv_to_devlink(mlxsw_sp->core),
- },
- .mlxsw_sp = mlxsw_sp
- };
- int err;
-
- mlxsw_core_fw_flash_start(mlxsw_sp->core);
- err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev,
- firmware, extack);
- mlxsw_core_fw_flash_end(mlxsw_sp->core);
-
- return err;
-}
-
-static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
-{
- const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
- const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
- const char *fw_filename = mlxsw_sp->fw_filename;
- union devlink_param_value value;
- const struct firmware *firmware;
- int err;
-
- /* Don't check if driver does not require it */
- if (!req_rev || !fw_filename)
- return 0;
-
- /* Don't check if devlink 'fw_load_policy' param is 'flash' */
- err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core),
- DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
- &value);
- if (err)
- return err;
- if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
- return 0;
-
- /* Validate driver & FW are compatible */
- if (rev->major != req_rev->major) {
- WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
- rev->major, req_rev->major);
- return -EINVAL;
- }
- if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
- return 0;
-
- dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
- rev->major, rev->minor, rev->subminor, req_rev->major,
- req_rev->minor, req_rev->subminor);
- dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
- fw_filename);
-
- err = request_firmware_direct(&firmware, fw_filename,
- mlxsw_sp->bus_info->dev);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
- fw_filename);
- return err;
- }
-
- err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL);
- release_firmware(firmware);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
-
- /* On FW flash success, tell the caller FW reset is needed
- * if current FW supports it.
- */
- if (rev->minor >= req_rev->can_reset_minor)
- return err ? err : -EAGAIN;
- else
- return 0;
-}
-
-static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core,
- const char *file_name, const char *component,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- const struct firmware *firmware;
- int err;
-
- if (component)
- return -EOPNOTSUPP;
-
- err = request_firmware_direct(&firmware, file_name,
- mlxsw_sp->bus_info->dev);
- if (err)
- return err;
- err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack);
- release_firmware(firmware);
-
- return err;
-}
-
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, u64 *packets,
u64 *bytes)
@@ -590,21 +321,28 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
}
-static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmtu_pl[MLXSW_REG_PMTU_LEN];
- int max_mtu;
int err;
- mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
if (err)
return err;
- max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
- if (mtu > max_mtu)
+ *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+ return 0;
+}
+
+static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ if (mtu > mlxsw_sp_port->max_mtu)
return -EINVAL;
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
@@ -872,133 +610,25 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
- int mtu)
-{
- return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
-}
-
-#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
-
-static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
- u16 delay)
-{
- delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
- BITS_PER_BYTE));
- return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
- mtu);
-}
-
-/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
- * Assumes 100m cable and maximum MTU.
- */
-#define MLXSW_SP_PAUSE_DELAY 58752
-
-static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
- u16 delay, bool pfc, bool pause)
-{
- if (pfc)
- return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
- else if (pause)
- return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
- else
- return 0;
-}
-
-static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
- bool lossy)
+static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
{
- if (lossy)
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
- else
- mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
- thres);
-}
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int err;
-int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
- u8 *prio_tc, bool pause_en,
- struct ieee_pfc *my_pfc)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
- u16 delay = !!my_pfc ? my_pfc->delay : 0;
- char pbmc_pl[MLXSW_REG_PBMC_LEN];
- u32 taken_headroom_cells = 0;
- u32 max_headroom_cells;
- int i, j, err;
+ orig_hdroom = *mlxsw_sp_port->hdroom;
- max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp);
+ hdroom = orig_hdroom;
+ hdroom.mtu = mtu;
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
- mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
- if (err)
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
return err;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- bool configure = false;
- bool pfc = false;
- u16 thres_cells;
- u16 delay_cells;
- u16 total_cells;
- bool lossy;
-
- for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
- if (prio_tc[j] == i) {
- pfc = pfc_en & BIT(j);
- configure = true;
- break;
- }
- }
-
- if (!configure)
- continue;
-
- lossy = !(pfc || pause_en);
- thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
- thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
- delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
- pfc, pause_en);
- delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
- total_cells = thres_cells + delay_cells;
-
- taken_headroom_cells += total_cells;
- if (taken_headroom_cells > max_headroom_cells)
- return -ENOBUFS;
-
- mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells,
- thres_cells, lossy);
}
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
-}
-
-int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
- int mtu, bool pause_en)
-{
- u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
- bool dcb_en = !!mlxsw_sp_port->dcb.ets;
- struct ieee_pfc *my_pfc;
- u8 *prio_tc;
-
- prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
- my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
-
- return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
- pause_en, my_pfc);
-}
-
-static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
- int err;
-
- err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
- if (err)
- return err;
- err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
- if (err)
- goto err_span_port_mtu_update;
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
if (err)
goto err_port_mtu_set;
@@ -1006,9 +636,7 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
return 0;
err_port_mtu_set:
- mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
-err_span_port_mtu_update:
- mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
return err;
}
@@ -1546,11 +1174,14 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
const struct mlxsw_sp_port_type_speed_ops *ops;
char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap_masked;
int err;
ops = mlxsw_sp->port_type_speed_ops;
- /* Set advertised speeds to supported speeds. */
+ /* Set advertised speeds to speeds supported by both the driver
+ * and the device.
+ */
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
0, false);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
@@ -1559,8 +1190,10 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
&eth_proto_admin, &eth_proto_oper);
+ eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_cap, mlxsw_sp_port->link.autoneg);
+ eth_proto_cap_masked,
+ mlxsw_sp_port->link.autoneg);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
@@ -1737,6 +1370,22 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
}
+static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+ u64 overheat_counter;
+ int err;
+
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
+ &overheat_counter);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
+ return 0;
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 split_base_local_port,
struct mlxsw_sp_port_mapping *port_mapping)
@@ -1842,6 +1491,21 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_speed_by_width_set;
}
+ err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
+ &mlxsw_sp_port->max_speed);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
+ mlxsw_sp_port->local_port);
+ goto err_max_speed_get;
+ }
+
+ err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_max_mtu_get;
+ }
+
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
@@ -1930,10 +1594,16 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
mlxsw_sp->ptp_ops->shaper_work);
- INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw,
- mlxsw_sp_span_speed_update_work);
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
+
+ err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_overheat_init_val_set;
+ }
+
err = register_netdev(dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
@@ -1947,6 +1617,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return 0;
err_register_netdev:
+err_port_overheat_init_val_set:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
err_port_vlan_create:
@@ -1963,9 +1634,12 @@ err_port_dcb_init:
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
err_port_tc_mc_mode:
err_port_ets_init:
+ mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
+err_port_max_mtu_get:
+err_max_speed_get:
err_port_speed_by_width_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
@@ -1986,7 +1660,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
- cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
@@ -1998,6 +1671,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
@@ -2390,7 +2064,6 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
netdev_info(mlxsw_sp_port->dev, "link up\n");
netif_carrier_on(mlxsw_sp_port->dev);
mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
- mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0);
} else {
netdev_info(mlxsw_sp_port->dev, "link down\n");
netif_carrier_off(mlxsw_sp_port->dev);
@@ -2783,11 +2456,36 @@ static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
{
char htgt_pl[MLXSW_REG_HTGT_LEN];
+ int err;
mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
MLXSW_REG_HTGT_INVALID_POLICER,
MLXSW_REG_HTGT_DEFAULT_PRIORITY,
MLXSW_REG_HTGT_DEFAULT_TC);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
@@ -2836,10 +2534,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info;
- err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
- if (err)
- return err;
-
mlxsw_core_emad_string_tlv_enable(mlxsw_core);
err = mlxsw_sp_base_mac_get(mlxsw_sp);
@@ -3039,8 +2733,6 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
- mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
@@ -3051,6 +2743,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
@@ -3069,8 +2762,6 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev;
- mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -3081,6 +2772,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
@@ -3097,8 +2789,6 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- mlxsw_sp->req_rev = &mlxsw_sp3_fw_rev;
- mlxsw_sp->fw_filename = MLXSW_SP3_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -3109,6 +2799,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
@@ -3451,52 +3142,6 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
}
static int
-mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
- union devlink_param_value val,
- struct netlink_ext_ack *extack)
-{
- if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) &&
- (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) {
- NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct devlink_param mlxsw_sp_devlink_params[] = {
- DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
- BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL,
- mlxsw_sp_devlink_param_fw_load_policy_validate),
-};
-
-static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core)
-{
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- union devlink_param_value value;
- int err;
-
- err = devlink_params_register(devlink, mlxsw_sp_devlink_params,
- ARRAY_SIZE(mlxsw_sp_devlink_params));
- if (err)
- return err;
-
- value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
- value);
- return 0;
-}
-
-static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
-{
- devlink_params_unregister(priv_to_devlink(mlxsw_core),
- mlxsw_sp_devlink_params,
- ARRAY_SIZE(mlxsw_sp_devlink_params));
-}
-
-static int
mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
@@ -3533,24 +3178,16 @@ static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
union devlink_param_value value;
int err;
- err = mlxsw_sp_params_register(mlxsw_core);
- if (err)
- return err;
-
err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
ARRAY_SIZE(mlxsw_sp2_devlink_params));
if (err)
- goto err_devlink_params_register;
+ return err;
value.vu32 = 0;
devlink_param_driverinit_value_set(devlink,
MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
value);
return 0;
-
-err_devlink_params_register:
- mlxsw_sp_params_unregister(mlxsw_core);
- return err;
}
static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
@@ -3558,7 +3195,6 @@ static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
devlink_params_unregister(priv_to_devlink(mlxsw_core),
mlxsw_sp2_devlink_params,
ARRAY_SIZE(mlxsw_sp2_devlink_params));
- mlxsw_sp_params_unregister(mlxsw_core);
}
static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
@@ -3573,6 +3209,8 @@ static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
static struct mlxsw_driver mlxsw_sp1_driver = {
.kind = mlxsw_sp1_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp1_fw_rev,
+ .fw_filename = MLXSW_SP1_FW_FILENAME,
.init = mlxsw_sp1_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
@@ -3588,7 +3226,6 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
- .flash_update = mlxsw_sp_flash_update,
.trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
@@ -3601,17 +3238,19 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
- .params_register = mlxsw_sp_params_register,
- .params_unregister = mlxsw_sp_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
.res_query_enabled = true,
+ .fw_fatal_enabled = true,
+ .temp_warn_enabled = true,
};
static struct mlxsw_driver mlxsw_sp2_driver = {
.kind = mlxsw_sp2_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp2_fw_rev,
+ .fw_filename = MLXSW_SP2_FW_FILENAME,
.init = mlxsw_sp2_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
@@ -3627,7 +3266,6 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
- .flash_update = mlxsw_sp_flash_update,
.trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
@@ -3645,11 +3283,15 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
+ .fw_fatal_enabled = true,
+ .temp_warn_enabled = true,
};
static struct mlxsw_driver mlxsw_sp3_driver = {
.kind = mlxsw_sp3_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp3_fw_rev,
+ .fw_filename = MLXSW_SP3_FW_FILENAME,
.init = mlxsw_sp3_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
@@ -3665,7 +3307,6 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
- .flash_update = mlxsw_sp_flash_update,
.trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
@@ -3683,6 +3324,8 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
+ .fw_fatal_enabled = true,
+ .temp_warn_enabled = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 5240bf11b6c4..74b3959b36d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -125,6 +125,7 @@ struct mlxsw_sp_mr_tcam_ops;
struct mlxsw_sp_acl_rulei_ops;
struct mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_nve_ops;
+struct mlxsw_sp_sb_ops;
struct mlxsw_sp_sb_vals;
struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
@@ -162,8 +163,6 @@ struct mlxsw_sp {
struct mlxsw_sp_counter_pool *counter_pool;
struct mlxsw_sp_span *span;
struct mlxsw_sp_trap *trap;
- const struct mlxsw_fw_rev *req_rev;
- const char *fw_filename;
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
const struct mlxsw_afa_ops *afa_ops;
const struct mlxsw_afk_ops *afk_ops;
@@ -173,6 +172,7 @@ struct mlxsw_sp {
const struct mlxsw_sp_nve_ops **nve_ops_arr;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_sb_vals *sb_vals;
+ const struct mlxsw_sp_sb_ops *sb_ops;
const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
const struct mlxsw_sp_ptp_ops *ptp_ops;
const struct mlxsw_sp_span_ops *span_ops;
@@ -316,9 +316,10 @@ struct mlxsw_sp_port {
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
u8 split_base_local_port;
- struct {
- struct delayed_work speed_update_dw;
- } span;
+ int max_mtu;
+ u32 max_speed;
+ struct mlxsw_sp_hdroom *hdroom;
+ u64 module_overheat_initial_val;
};
struct mlxsw_sp_port_type_speed_ops {
@@ -331,6 +332,7 @@ struct mlxsw_sp_port_type_speed_ops {
void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp,
bool carrier_ok, u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd);
+ int (*ptys_max_speed)(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed);
u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed);
@@ -340,6 +342,7 @@ struct mlxsw_sp_port_type_speed_ops {
u32 *p_eth_proto_cap,
u32 *p_eth_proto_admin,
u32 *p_eth_proto_oper);
+ u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
static inline struct net_device *
@@ -414,34 +417,73 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
return NULL;
}
-static inline u32
-mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
- u32 size_cells)
-{
- /* Ports with eight lanes use two headroom buffers between which the
- * configured headroom size is split. Therefore, multiply the calculated
- * headroom size by two.
- */
- return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
-}
-
enum mlxsw_sp_flood_type {
MLXSW_SP_FLOOD_TYPE_UC,
MLXSW_SP_FLOOD_TYPE_BC,
MLXSW_SP_FLOOD_TYPE_MC,
};
-int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
- int mtu, bool pause_en);
int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
int prio, char *ppcnt_pl);
int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up);
/* spectrum_buffers.c */
+struct mlxsw_sp_hdroom_prio {
+ /* Number of port buffer associated with this priority. This is the
+ * actually configured value.
+ */
+ u8 buf_idx;
+ /* Value of buf_idx deduced from the DCB ETS configuration. */
+ u8 ets_buf_idx;
+ /* Value of buf_idx taken from the dcbnl_setbuffer configuration. */
+ u8 set_buf_idx;
+ bool lossy;
+};
+
+struct mlxsw_sp_hdroom_buf {
+ u32 thres_cells;
+ u32 size_cells;
+ /* Size requirement form dcbnl_setbuffer. */
+ u32 set_size_cells;
+ bool lossy;
+};
+
+enum mlxsw_sp_hdroom_mode {
+ MLXSW_SP_HDROOM_MODE_DCB,
+ MLXSW_SP_HDROOM_MODE_TC,
+};
+
+#define MLXSW_SP_PB_COUNT 10
+
+struct mlxsw_sp_hdroom {
+ enum mlxsw_sp_hdroom_mode mode;
+
+ struct {
+ struct mlxsw_sp_hdroom_prio prio[IEEE_8021Q_MAX_PRIORITIES];
+ } prios;
+ struct {
+ struct mlxsw_sp_hdroom_buf buf[MLXSW_SP_PB_COUNT];
+ } bufs;
+ struct {
+ /* Size actually configured for the internal buffer. Equal to
+ * reserve when internal buffer is enabled.
+ */
+ u32 size_cells;
+ /* Space reserved in the headroom for the internal buffer. Port
+ * buffers are not allowed to grow into this space.
+ */
+ u32 reserve_cells;
+ bool enable;
+ } int_buf;
+ int delay_bytes;
+ int mtu;
+};
+
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
@@ -477,11 +519,20 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
u32 *p_cur, u32 *p_max);
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
-u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom);
+void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom);
+void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_hdroom *hdroom);
+int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom);
extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals;
extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals;
+extern const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops;
+extern const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops;
+extern const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops;
+
/* spectrum_switchdev.c */
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
@@ -519,9 +570,6 @@ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool dwrr, u8 dwrr_weight);
int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
u8 switch_prio, u8 tclass);
-int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
- u8 *prio_tc, bool pause_en,
- struct ieee_pfc *my_pfc);
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate, u8 burst_size);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 6f84557a5a6f..37ff29a1686e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -121,6 +121,10 @@ struct mlxsw_sp_sb_vals {
unsigned int cms_cpu_count;
};
+struct mlxsw_sp_sb_ops {
+ u32 (*int_buf_size_get)(int mtu, u32 speed);
+};
+
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
{
return mlxsw_sp->sb->cell_size * cells;
@@ -131,9 +135,14 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
}
-u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
+static u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 size_cells)
{
- return mlxsw_sp->sb->max_headroom_cells;
+ /* Ports with eight lanes use two headroom buffers between which the
+ * configured headroom size is split. Therefore, multiply the calculated
+ * headroom size by two.
+ */
+ return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
}
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
@@ -291,55 +300,308 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
(unsigned long) pm);
}
-/* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
-#define MLXSW_SP_PB_HEADROOM 25632
+void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom)
+{
+ int prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ switch (hdroom->mode) {
+ case MLXSW_SP_HDROOM_MODE_DCB:
+ hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].ets_buf_idx;
+ break;
+ case MLXSW_SP_HDROOM_MODE_TC:
+ hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].set_buf_idx;
+ break;
+ }
+ }
+}
+
+void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom)
+{
+ int prio;
+ int i;
+
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++)
+ hdroom->bufs.buf[i].lossy = true;
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++) {
+ if (!hdroom->prios.prio[prio].lossy)
+ hdroom->bufs.buf[hdroom->prios.prio[prio].buf_idx].lossy = false;
+ }
+}
+
+static u16 mlxsw_sp_hdroom_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, int mtu)
+{
+ return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
+}
+
+static void mlxsw_sp_hdroom_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, bool lossy)
+{
+ if (lossy)
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
+ else
+ mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
+ thres);
+}
+
+static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_hdroom *hdroom)
+{
+ u16 delay_cells;
+
+ delay_cells = mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->delay_bytes);
+
+ /* In the worst case scenario the delay will be made up of packets that
+ * are all of size CELL_SIZE + 1, which means each packet will require
+ * almost twice its true size when buffered in the switch. We therefore
+ * multiply this value by the "cell factor", which is close to 2.
+ *
+ * Another MTU is added in case the transmitting host already started
+ * transmitting a maximum length frame when the PFC packet was received.
+ */
+ return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu);
+}
+
+static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
+{
+ u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(speed, mtu);
+
+ return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
+}
+
+static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf)
+{
+ int prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ if (hdroom->prios.prio[prio].buf_idx == buf)
+ return true;
+ }
+ return false;
+}
+
+void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_hdroom *hdroom)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 reserve_cells;
+ int i;
+
+ /* Internal buffer. */
+ reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_speed,
+ mlxsw_sp_port->max_mtu);
+ reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
+ hdroom->int_buf.reserve_cells = reserve_cells;
+
+ if (hdroom->int_buf.enable)
+ hdroom->int_buf.size_cells = reserve_cells;
+ else
+ hdroom->int_buf.size_cells = 0;
+
+ /* PG buffers. */
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
+ struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
+ u16 thres_cells;
+ u16 delay_cells;
+
+ if (!mlxsw_sp_hdroom_buf_is_used(hdroom, i)) {
+ thres_cells = 0;
+ delay_cells = 0;
+ } else if (buf->lossy) {
+ thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
+ delay_cells = 0;
+ } else {
+ thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
+ delay_cells = mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp, hdroom);
+ }
+
+ thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
+ delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
+
+ buf->thres_cells = thres_cells;
+ if (hdroom->mode == MLXSW_SP_HDROOM_MODE_DCB) {
+ buf->size_cells = thres_cells + delay_cells;
+ } else {
+ /* Do not allow going below the minimum size, even if
+ * the user requested it.
+ */
+ buf->size_cells = max(buf->set_size_cells, buf->thres_cells);
+ }
+ }
+}
+
#define MLXSW_SP_PB_UNUSED 8
-static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
{
- const u32 pbs[] = {
- [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
- [9] = MLXSW_PORT_MAX_MTU,
- };
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ bool dirty;
+ int err;
int i;
- mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
- 0xffff, 0xffff / 2);
- for (i = 0; i < ARRAY_SIZE(pbs); i++) {
- u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
+ dirty = memcmp(&mlxsw_sp_port->hdroom->bufs, &hdroom->bufs, sizeof(hdroom->bufs));
+ if (!dirty && !force)
+ return 0;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2);
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ const struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
if (i == MLXSW_SP_PB_UNUSED)
continue;
- size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size);
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
+
+ mlxsw_sp_hdroom_buf_pack(pbmc_pl, i, buf->size_cells, buf->thres_cells, buf->lossy);
}
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
- MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->bufs = hdroom->bufs;
+ return 0;
}
-static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
{
char pptb_pl[MLXSW_REG_PPTB_LEN];
- int i;
+ bool dirty;
+ int prio;
+ int err;
+
+ dirty = memcmp(&mlxsw_sp_port->hdroom->prios, &hdroom->prios, sizeof(hdroom->prios));
+ if (!dirty && !force)
+ return 0;
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
- return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
- pptb_pl);
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, prio, hdroom->prios.prio[prio].buf_idx);
+
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), pptb_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->prios = hdroom->prios;
+ return 0;
}
-static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_hdroom_configure_int_buf(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
+{
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ bool dirty;
+ int err;
+
+ dirty = memcmp(&mlxsw_sp_port->hdroom->int_buf, &hdroom->int_buf, sizeof(hdroom->int_buf));
+ if (!dirty && !force)
+ return 0;
+
+ mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, hdroom->int_buf.size_cells);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->int_buf = hdroom->int_buf;
+ return 0;
+}
+
+static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_hdroom *hdroom)
{
+ u32 taken_headroom_cells = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++)
+ taken_headroom_cells += hdroom->bufs.buf[i].size_cells;
+
+ taken_headroom_cells += hdroom->int_buf.reserve_cells;
+ return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells;
+}
+
+static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
+{
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom tmp_hdroom;
int err;
+ int i;
+
+ /* Port buffers need to be configured in three steps. First, all buffers
+ * with non-zero size are configured. Then, prio-to-buffer map is
+ * updated, allowing traffic to flow to the now non-zero buffers.
+ * Finally, zero-sized buffers are configured, because now no traffic
+ * should be directed to them anymore. This way, in a non-congested
+ * system, no packet drops are introduced by the reconfiguration.
+ */
- err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+ tmp_hdroom = orig_hdroom;
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ if (hdroom->bufs.buf[i].size_cells)
+ tmp_hdroom.bufs.buf[i] = hdroom->bufs.buf[i];
+ }
+
+ if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, &tmp_hdroom) ||
+ !mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom))
+ return -ENOBUFS;
+
+ err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, force);
if (err)
return err;
- return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
+
+ err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom, force);
+ if (err)
+ goto err_configure_priomap;
+
+ err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false);
+ if (err)
+ goto err_configure_buffers;
+
+ err = mlxsw_sp_hdroom_configure_int_buf(mlxsw_sp_port, hdroom, false);
+ if (err)
+ goto err_configure_int_buf;
+
+ *mlxsw_sp_port->hdroom = *hdroom;
+ return 0;
+
+err_configure_int_buf:
+ mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, false);
+err_configure_buffers:
+ mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false);
+err_configure_priomap:
+ mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &orig_hdroom, false);
+ return err;
+}
+
+int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom)
+{
+ return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom, false);
+}
+
+static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_hdroom hdroom = {};
+ u32 size9;
+ int prio;
+
+ hdroom.mtu = mlxsw_sp_port->dev->mtu;
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = true;
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ /* Buffer 9 is used for control traffic. */
+ size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu);
+ hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
+
+ return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
}
static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
@@ -916,6 +1178,46 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
};
+static u32 mlxsw_sp1_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ return mtu * 5 / 2;
+}
+
+static u32 __mlxsw_sp_pb_int_buf_size_get(int mtu, u32 speed, u32 buffer_factor)
+{
+ return 3 * mtu + buffer_factor * speed / 1000;
+}
+
+#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
+
+static u32 mlxsw_sp2_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
+}
+
+#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
+
+static u32 mlxsw_sp3_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
+}
+
+const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = {
+ .int_buf_size_get = mlxsw_sp1_pb_int_buf_size_get,
+};
+
+const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = {
+ .int_buf_size_get = mlxsw_sp2_pb_int_buf_size_get,
+};
+
+const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = {
+ .int_buf_size_get = mlxsw_sp3_pb_int_buf_size_get,
+};
+
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
u32 max_headroom_size;
@@ -995,17 +1297,34 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
int err;
+ mlxsw_sp_port->hdroom = kzalloc(sizeof(*mlxsw_sp_port->hdroom), GFP_KERNEL);
+ if (!mlxsw_sp_port->hdroom)
+ return -ENOMEM;
+ mlxsw_sp_port->hdroom->mtu = mlxsw_sp_port->dev->mtu;
+
err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
if (err)
- return err;
+ goto err_headroom_init;
err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
if (err)
- return err;
+ goto err_port_sb_cms_init;
err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_sb_pms_init;
+ return 0;
+err_port_sb_pms_init:
+err_port_sb_cms_init:
+err_headroom_init:
+ kfree(mlxsw_sp_port->hdroom);
return err;
}
+void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->hdroom);
+}
+
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 0d3fb2e51ea5..5f92b1691360 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -64,87 +64,28 @@ static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 *prio_tc)
-{
- char pptb_pl[MLXSW_REG_PPTB_LEN];
- int i;
-
- mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
-
- return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
- pptb_pl);
-}
-
-static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg)
-{
- int i;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- if (prio_tc[i] == pg)
- return true;
- return false;
-}
-
-static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 *old_prio_tc, u8 *new_prio_tc)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char pbmc_pl[MLXSW_REG_PBMC_LEN];
- int err, i;
-
- mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
- if (err)
- return err;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- u8 pg = old_prio_tc[i];
-
- if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg))
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0);
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
-}
-
static int mlxsw_sp_port_headroom_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct ieee_ets *ets)
{
- bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
- struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
int err;
- /* Create the required PGs, but don't destroy existing ones, as
- * traffic is still directed to them.
- */
- err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- ets->prio_tc, pause_en,
- mlxsw_sp_port->dcb.pfc);
+ hdroom = *mlxsw_sp_port->hdroom;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].ets_buf_idx = ets->prio_tc[prio];
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
if (err) {
netdev_err(dev, "Failed to configure port's headroom\n");
return err;
}
- err = mlxsw_sp_port_pg_prio_map(mlxsw_sp_port, ets->prio_tc);
- if (err) {
- netdev_err(dev, "Failed to set PG-priority mapping\n");
- goto err_port_prio_pg_map;
- }
-
- err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc,
- ets->prio_tc);
- if (err)
- netdev_warn(dev, "Failed to remove unused PGs\n");
-
return 0;
-
-err_port_prio_pg_map:
- mlxsw_sp_port_pg_destroy(mlxsw_sp_port, ets->prio_tc, my_ets->prio_tc);
- return err;
}
static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -605,6 +546,9 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
int err;
if (pause_en && pfc->pfc_en) {
@@ -612,9 +556,21 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
return -EINVAL;
}
- err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- mlxsw_sp_port->dcb.ets->prio_tc,
- pause_en, pfc);
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom = orig_hdroom;
+ if (pfc->pfc_en)
+ hdroom.delay_bytes = DIV_ROUND_UP(pfc->delay, BITS_PER_BYTE);
+ else
+ hdroom.delay_bytes = 0;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = !(pfc->pfc_en & BIT(prio));
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
if (err) {
netdev_err(dev, "Failed to configure port's headroom for PFC\n");
return err;
@@ -632,12 +588,66 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
return 0;
err_port_pfc_set:
- __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
- mlxsw_sp_port->dcb.pfc);
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
return err;
}
+static int mlxsw_sp_dcbnl_getbuffer(struct net_device *dev, struct dcbnl_buffer *buf)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_hdroom *hdroom = mlxsw_sp_port->hdroom;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int prio;
+ int i;
+
+ buf->total_size = 0;
+
+ BUILD_BUG_ON(DCBX_MAX_BUFFERS > MLXSW_SP_PB_COUNT);
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ u32 bytes = mlxsw_sp_cells_bytes(mlxsw_sp, hdroom->bufs.buf[i].size_cells);
+
+ if (i < DCBX_MAX_BUFFERS)
+ buf->buffer_size[i] = bytes;
+ buf->total_size += bytes;
+ }
+
+ buf->total_size += mlxsw_sp_cells_bytes(mlxsw_sp, hdroom->int_buf.size_cells);
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++)
+ buf->prio2buffer[prio] = hdroom->prios.prio[prio].buf_idx;
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_setbuffer(struct net_device *dev, struct dcbnl_buffer *buf)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
+ int i;
+
+ hdroom = *mlxsw_sp_port->hdroom;
+
+ if (hdroom.mode != MLXSW_SP_HDROOM_MODE_TC) {
+ netdev_err(dev, "The use of dcbnl_setbuffer is only allowed if egress is configured using TC\n");
+ return -EINVAL;
+ }
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++)
+ hdroom.prios.prio[prio].set_buf_idx = buf->prio2buffer[prio];
+
+ BUILD_BUG_ON(DCBX_MAX_BUFFERS > MLXSW_SP_PB_COUNT);
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++)
+ hdroom.bufs.buf[i].set_size_cells = mlxsw_sp_bytes_cells(mlxsw_sp,
+ buf->buffer_size[i]);
+
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+ return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+}
+
static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
.ieee_getets = mlxsw_sp_dcbnl_ieee_getets,
.ieee_setets = mlxsw_sp_dcbnl_ieee_setets,
@@ -650,6 +660,9 @@ static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
.getdcbx = mlxsw_sp_dcbnl_getdcbx,
.setdcbx = mlxsw_sp_dcbnl_setdcbx,
+
+ .dcbnl_getbuffer = mlxsw_sp_dcbnl_getbuffer,
+ .dcbnl_setbuffer = mlxsw_sp_dcbnl_setbuffer,
};
static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 14c78f73bb65..540616469e28 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
#include "reg.h"
+#include "core.h"
#include "spectrum.h"
#include "core_env.h"
@@ -192,11 +193,19 @@ static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
pfcc_pl);
}
+/* Maximum delay buffer needed in case of PAUSE frames. Similar to PFC delay, but is
+ * measured in bytes. Assumes 100m cable and does not take into account MTU.
+ */
+#define MLXSW_SP_PAUSE_DELAY_BYTES 19476
+
static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
bool pause_en = pause->tx_pause || pause->rx_pause;
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
int err;
if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
@@ -209,7 +218,21 @@ static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
return -EINVAL;
}
- err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom = orig_hdroom;
+ if (pause_en)
+ hdroom.delay_bytes = MLXSW_SP_PAUSE_DELAY_BYTES;
+ else
+ hdroom.delay_bytes = 0;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = !pause_en;
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
if (err) {
netdev_err(dev, "Failed to configure port's headroom\n");
return err;
@@ -227,8 +250,7 @@ static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
return 0;
err_port_pause_configure:
- pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
- mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
return err;
}
@@ -531,6 +553,37 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
+struct mlxsw_sp_port_stats {
+ char str[ETH_GSTRING_LEN];
+ u64 (*getter)(struct mlxsw_sp_port *mlxsw_sp_port);
+};
+
+static u64
+mlxsw_sp_port_get_transceiver_overheat_stats(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_mapping port_mapping = mlxsw_sp_port->mapping;
+ struct mlxsw_core *mlxsw_core = mlxsw_sp_port->mlxsw_sp->core;
+ u64 stats;
+ int err;
+
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_core,
+ port_mapping.module,
+ &stats);
+ if (err)
+ return mlxsw_sp_port->module_overheat_initial_val;
+
+ return stats - mlxsw_sp_port->module_overheat_initial_val;
+}
+
+static struct mlxsw_sp_port_stats mlxsw_sp_port_transceiver_stats[] = {
+ {
+ .str = "transceiver_overheat",
+ .getter = mlxsw_sp_port_get_transceiver_overheat_stats,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_transceiver_stats)
+
#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
@@ -540,7 +593,8 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
(MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
IEEE_8021QAZ_MAX_TCS) + \
(MLXSW_SP_PORT_HW_TC_STATS_LEN * \
- TC_MAX_QUEUE))
+ TC_MAX_QUEUE) + \
+ MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN)
static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
{
@@ -616,6 +670,12 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
mlxsw_sp_port_get_tc_strings(&p, i);
mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p);
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_transceiver_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
break;
}
}
@@ -711,6 +771,17 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
}
}
+static void __mlxsw_sp_port_get_env_stats(struct net_device *dev, u64 *data, int data_index,
+ struct mlxsw_sp_port_stats *port_stats,
+ int len)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < len; i++)
+ data[data_index + i] = port_stats[i].getter(mlxsw_sp_port);
+}
+
static void mlxsw_sp_port_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -765,6 +836,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port,
data, data_index);
data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
+
+ /* Transceiver counters */
+ __mlxsw_sp_port_get_env_stats(dev, data, data_index, mlxsw_sp_port_transceiver_stats,
+ MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN);
+ data_index += MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN;
}
static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
@@ -842,6 +918,29 @@ mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type)
}
}
+static int mlxsw_sp_port_ptys_query(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper, u8 *p_connector_type)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ const struct mlxsw_sp_port_type_speed_ops *ops;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ int err;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 0, false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+
+ ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, p_eth_proto_cap, p_eth_proto_admin,
+ p_eth_proto_oper);
+ if (p_connector_type)
+ *p_connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
+ return 0;
+}
+
static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -849,21 +948,17 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
const struct mlxsw_sp_port_type_speed_ops *ops;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
u8 connector_type;
bool autoneg;
int err;
- ops = mlxsw_sp->port_type_speed_ops;
-
- autoneg = mlxsw_sp_port->link.autoneg;
- ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
- 0, false);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, &eth_proto_admin,
+ &eth_proto_oper, &connector_type);
if (err)
return err;
- ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
- &eth_proto_admin, &eth_proto_oper);
+
+ ops = mlxsw_sp->port_type_speed_ops;
+ autoneg = mlxsw_sp_port->link.autoneg;
mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap,
mlxsw_sp_port->mapping.width, cmd);
@@ -872,7 +967,6 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
mlxsw_sp_port->mapping.width, cmd);
cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
- connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
cmd->base.port = mlxsw_sp_port_connector_port(connector_type);
ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev),
eth_proto_oper, cmd);
@@ -993,22 +1087,12 @@ struct mlxsw_sp1_port_link_mode {
static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
- .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- .speed = SPEED_100,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
.mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
.speed = SPEED_1000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
- .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- .speed = SPEED_10000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
.mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
@@ -1023,11 +1107,6 @@ static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
.speed = SPEED_10000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
- .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
- .speed = SPEED_20000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
.mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
.speed = SPEED_40000,
@@ -1092,11 +1171,6 @@ static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
.mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
.speed = SPEED_100000,
},
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
- .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- .speed = SPEED_100000,
- },
};
#define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode)
@@ -1164,6 +1238,27 @@ mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
cmd->base.duplex = DUPLEX_FULL;
}
+static int mlxsw_sp1_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
+{
+ u32 eth_proto_cap;
+ u32 max_speed = 0;
+ int err;
+ int i;
+
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, NULL, NULL, NULL);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if ((eth_proto_cap & mlxsw_sp1_port_link_mode[i].mask) &&
+ mlxsw_sp1_port_link_mode[i].speed > max_speed)
+ max_speed = mlxsw_sp1_port_link_mode[i].speed;
+ }
+
+ *p_max_speed = max_speed;
+ return 0;
+}
+
static u32
mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd)
@@ -1208,15 +1303,31 @@ mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
p_eth_proto_oper);
}
+static u32 mlxsw_sp1_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+ u32 ptys_proto_cap_masked = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp1_port_link_mode[i].mask & eth_proto_cap)
+ ptys_proto_cap_masked |=
+ mlxsw_sp1_port_link_mode[i].mask;
+ }
+
+ return ptys_proto_cap_masked;
+}
+
const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp1_from_ptys_link,
.from_ptys_speed = mlxsw_sp1_from_ptys_speed,
.from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex,
+ .ptys_max_speed = mlxsw_sp1_ptys_max_speed,
.to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp1_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack,
+ .ptys_proto_cap_masked_get = mlxsw_sp1_ptys_proto_cap_masked_get,
};
static const enum ethtool_link_mode_bit_indices
@@ -1237,14 +1348,6 @@ mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii)
static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = {
- ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \
- ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii)
-
-static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_5gbase_r[] = {
ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
};
@@ -1408,16 +1511,6 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.speed = SPEED_1000,
},
{
- .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII,
- .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii,
- .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
- MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X |
- MLXSW_SP_PORT_MASK_WIDTH_8X,
- .speed = SPEED_2500,
- },
- {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
.mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
@@ -1568,6 +1661,27 @@ mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
cmd->base.duplex = DUPLEX_FULL;
}
+static int mlxsw_sp2_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
+{
+ u32 eth_proto_cap;
+ u32 max_speed = 0;
+ int err;
+ int i;
+
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, NULL, NULL, NULL);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if ((eth_proto_cap & mlxsw_sp2_port_link_mode[i].mask) &&
+ mlxsw_sp2_port_link_mode[i].speed > max_speed)
+ max_speed = mlxsw_sp2_port_link_mode[i].speed;
+ }
+
+ *p_max_speed = max_speed;
+ return 0;
+}
+
static bool
mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
const unsigned long *mode)
@@ -1632,13 +1746,29 @@ mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
p_eth_proto_admin, p_eth_proto_oper);
}
+static u32 mlxsw_sp2_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+ u32 ptys_proto_cap_masked = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp2_port_link_mode[i].mask & eth_proto_cap)
+ ptys_proto_cap_masked |=
+ mlxsw_sp2_port_link_mode[i].mask;
+ }
+
+ return ptys_proto_cap_masked;
+}
+
const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp2_from_ptys_link,
.from_ptys_speed = mlxsw_sp2_from_ptys_speed,
.from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex,
+ .ptys_max_speed = mlxsw_sp2_ptys_max_speed,
.to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp2_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack,
+ .ptys_proto_cap_masked_get = mlxsw_sp2_ptys_proto_cap_masked_get,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 9650562fc0ef..ca8090a28dec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -314,11 +314,9 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
u8 *p_message_type,
u16 *p_sequence_id)
{
- unsigned int offset = 0;
unsigned int ptp_class;
- u8 *data;
+ struct ptp_header *hdr;
- data = skb_mac_header(skb);
ptp_class = ptp_classify_raw(skb);
switch (ptp_class & PTP_CLASS_VMASK) {
@@ -329,30 +327,14 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
return -ERANGE;
}
- if (ptp_class & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (ptp_class & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return -ERANGE;
- }
-
- /* PTP header is 34 bytes. */
- if (skb->len < offset + 34)
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
return -EINVAL;
- *p_message_type = data[offset] & 0x0f;
- *p_domain_number = data[offset + 4];
- *p_sequence_id = (u16)(data[offset + 30]) << 8 | data[offset + 31];
+ *p_message_type = ptp_get_msgtype(hdr, ptp_class);
+ *p_domain_number = hdr->domain_number;
+ *p_sequence_id = be16_to_cpu(hdr->sequence_id);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 964fd444bb10..fd672c6c9133 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -140,18 +140,31 @@ static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
+ struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ int err_hdroom = 0;
int err = 0;
if (!mlxsw_sp_qdisc)
return 0;
+ if (root_qdisc == mlxsw_sp_qdisc) {
+ struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+ err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ }
+
if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
mlxsw_sp_qdisc);
mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
mlxsw_sp_qdisc->ops = NULL;
- return err;
+
+ return err_hdroom ?: err;
}
static int
@@ -159,6 +172,8 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct mlxsw_sp_qdisc_ops *ops, void *params)
{
+ struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ struct mlxsw_sp_hdroom orig_hdroom;
int err;
if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
@@ -168,6 +183,21 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
* new one.
*/
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+ if (root_qdisc == mlxsw_sp_qdisc) {
+ struct mlxsw_sp_hdroom hdroom = orig_hdroom;
+
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ if (err)
+ goto err_hdroom_configure;
+ }
+
err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
if (err)
goto err_bad_param;
@@ -191,6 +221,8 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
err_bad_param:
err_config:
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
+err_hdroom_configure:
if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 460cb523312f..4381f8c6c3fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -8038,7 +8038,6 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
- int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
@@ -8047,10 +8046,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
- if (err)
- return err;
- return 0;
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 1d18e41ab255..c6c5826aba41 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -968,42 +968,26 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu,
- u32 speed)
+static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
{
- u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
+ struct mlxsw_sp_hdroom hdroom;
- return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
+ hdroom = *mlxsw_sp_port->hdroom;
+ hdroom.int_buf.enable = enable;
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
}
static int
-mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- u32 buffsize;
- u32 speed;
- int err;
-
- err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
- if (err)
- return err;
- if (speed == SPEED_UNKNOWN)
- speed = 0;
-
- buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
- buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
- mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true);
}
-static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp,
- u8 local_port)
+static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port)
{
- char sbib_pl[MLXSW_REG_SBIB_LEN];
-
- mlxsw_reg_sbib_pack(sbib_pl, local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false);
}
static struct mlxsw_sp_span_analyzed_port *
@@ -1021,48 +1005,6 @@ mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
return NULL;
}
-int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- int err = 0;
-
- /* If port is egress mirrored, the shared buffer size should be
- * updated according to the mtu value
- */
- mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
-
- if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, port->local_port,
- false))
- err = mlxsw_sp_span_port_buffer_update(port, mtu);
-
- mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
-
- return err;
-}
-
-void mlxsw_sp_span_speed_update_work(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp *mlxsw_sp;
-
- mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
- span.speed_update_dw);
-
- /* If port is egress mirrored, the shared buffer size should be
- * updated according to the speed value.
- */
- mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
-
- if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
- mlxsw_sp_port->local_port, false))
- mlxsw_sp_span_port_buffer_update(mlxsw_sp_port,
- mlxsw_sp_port->dev->mtu);
-
- mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
-}
-
static const struct mlxsw_sp_span_entry_ops *
mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev)
@@ -1180,9 +1122,7 @@ mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
* does the mirroring.
*/
if (!ingress) {
- u16 mtu = mlxsw_sp_port->dev->mtu;
-
- err = mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, mtu);
+ err = mlxsw_sp_span_port_buffer_enable(mlxsw_sp_port);
if (err)
goto err_buffer_update;
}
@@ -1196,18 +1136,15 @@ err_buffer_update:
}
static void
-mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span,
+mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_span_analyzed_port *
analyzed_port)
{
- struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
-
/* Remove egress mirror buffer now that port is no longer analyzed
* at egress.
*/
if (!analyzed_port->ingress)
- mlxsw_sp_span_port_buffer_disable(mlxsw_sp,
- analyzed_port->local_port);
+ mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port);
list_del(&analyzed_port->list);
kfree(analyzed_port);
@@ -1258,7 +1195,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
if (!refcount_dec_and_test(&analyzed_port->ref_count))
goto out_unlock;
- mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port);
+ mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port);
out_unlock:
mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
@@ -1712,11 +1649,6 @@ static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
-{
- return mtu * 5 / 2;
-}
-
static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base)
{
@@ -1725,7 +1657,6 @@ static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
.init = mlxsw_sp1_span_init,
- .buffsize_get = mlxsw_sp1_span_buffsize_get,
.policer_id_base_set = mlxsw_sp1_span_policer_id_base_set,
};
@@ -1750,18 +1681,6 @@ static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp)
#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
-static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor)
-{
- return 3 * mtu + buffer_factor * speed / 1000;
-}
-
-static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
-{
- int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
-
- return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
-}
-
static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base)
{
@@ -1778,19 +1697,10 @@ static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
.init = mlxsw_sp2_span_init,
- .buffsize_get = mlxsw_sp2_span_buffsize_get,
.policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
};
-static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed)
-{
- int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
-
- return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
-}
-
const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
.init = mlxsw_sp2_span_init,
- .buffsize_get = mlxsw_sp3_span_buffsize_get,
.policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 1c746dd3b1bd..d907718bc8c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -47,7 +47,6 @@ struct mlxsw_sp_span_entry_ops;
struct mlxsw_sp_span_ops {
int (*init)(struct mlxsw_sp *mlxsw_sp);
- u32 (*buffsize_get)(int mtu, u32 speed);
int (*policer_id_base_set)(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base);
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 2e41c5519c1b..433f14ade464 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -291,7 +291,7 @@ static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
static const struct mlxsw_sp_trap_policer_item
mlxsw_sp_trap_policer_items_arr[] = {
{
- .policer = MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 4096),
},
{
.policer = MLXSW_SP_TRAP_POLICER(2, 128, 128),
@@ -303,25 +303,25 @@ mlxsw_sp_trap_policer_items_arr[] = {
.policer = MLXSW_SP_TRAP_POLICER(4, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(5, 16 * 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(5, 16 * 1024, 8192),
},
{
.policer = MLXSW_SP_TRAP_POLICER(6, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(7, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(7, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(8, 20 * 1024, 1024),
+ .policer = MLXSW_SP_TRAP_POLICER(8, 20 * 1024, 8192),
},
{
.policer = MLXSW_SP_TRAP_POLICER(9, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(10, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(10, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(11, 360, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(11, 256, 128),
},
{
.policer = MLXSW_SP_TRAP_POLICER(12, 128, 128),
@@ -330,19 +330,19 @@ mlxsw_sp_trap_policer_items_arr[] = {
.policer = MLXSW_SP_TRAP_POLICER(13, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(14, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(14, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(15, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(15, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(16, 24 * 1024, 4096),
+ .policer = MLXSW_SP_TRAP_POLICER(16, 24 * 1024, 16384),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(17, 19 * 1024, 4096),
+ .policer = MLXSW_SP_TRAP_POLICER(17, 19 * 1024, 8192),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 512),
},
{
.policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 6f9a725662fb..5023d91269f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -551,16 +551,6 @@ struct mlxsw_sx_port_link_mode {
static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
- .supported = SUPPORTED_100baseT_Full,
- .advertised = ADVERTISED_100baseT_Full,
- .speed = 100,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
- .speed = 100,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
.supported = SUPPORTED_1000baseKX_Full,
@@ -568,12 +558,6 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
.speed = 1000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
- .supported = SUPPORTED_10000baseT_Full,
- .advertised = ADVERTISED_10000baseT_Full,
- .speed = 10000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
.supported = SUPPORTED_10000baseKX4_Full,
@@ -590,12 +574,6 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
.speed = 10000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
- .supported = SUPPORTED_20000baseKR2_Full,
- .advertised = ADVERTISED_20000baseKR2_Full,
- .speed = 20000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
.supported = SUPPORTED_40000baseCR4_Full,
.advertised = ADVERTISED_40000baseCR4_Full,
@@ -634,8 +612,7 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
.speed = 100000,
},
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 33909887d0ac..57f9e24602d0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -120,8 +120,14 @@ enum {
};
enum mlxsw_event_trap_id {
+ /* Fatal Event generated by FW */
+ MLXSW_TRAP_ID_MFDE = 0x3,
/* Port Up/Down event generated by hardware */
MLXSW_TRAP_ID_PUDE = 0x8,
+ /* Port Module Plug/Unplug Event generated by hardware */
+ MLXSW_TRAP_ID_PMPE = 0x9,
+ /* Temperature Warning event generated by hardware */
+ MLXSW_TRAP_ID_MTWE = 0xC,
/* PTP Ingress FIFO has a new entry */
MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D,
/* PTP Egress FIFO has a new entry */
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f3f6dfe3eddc..caa251d0e381 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -587,10 +587,10 @@ out:
return err;
}
-static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
+static void ks8842_rx_frame_dma_tasklet(struct tasklet_struct *t)
{
- struct net_device *netdev = (struct net_device *)arg;
- struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_adapter *adapter = from_tasklet(adapter, t, dma_rx.tasklet);
+ struct net_device *netdev = adapter->netdev;
struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
struct sk_buff *skb = ctl->skb;
dma_addr_t addr = sg_dma_address(&ctl->sg);
@@ -720,10 +720,10 @@ static void ks8842_handle_rx_overrun(struct net_device *netdev,
netdev->stats.rx_fifo_errors++;
}
-static void ks8842_tasklet(unsigned long arg)
+static void ks8842_tasklet(struct tasklet_struct *t)
{
- struct net_device *netdev = (struct net_device *)arg;
- struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_adapter *adapter = from_tasklet(adapter, t, tasklet);
+ struct net_device *netdev = adapter->netdev;
u16 isr;
unsigned long flags;
u16 entry_bank;
@@ -953,8 +953,7 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
goto err;
}
- tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
- (unsigned long)netdev);
+ tasklet_setup(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet);
return 0;
err:
@@ -1173,7 +1172,7 @@ static int ks8842_probe(struct platform_device *pdev)
adapter->dma_tx.channel = -1;
}
- tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
+ tasklet_setup(&adapter->tasklet, ks8842_tasklet);
spin_lock_init(&adapter->lock);
netdev->netdev_ops = &ks8842_netdev_ops;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index bb646b65cc95..9ed264ed7070 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
*
* Copyright (c) 2009-2010 Micrel, Inc.
@@ -959,7 +959,7 @@ struct ksz_sw_desc {
* struct ksz_dma_buf - OS dependent DMA buffer data structure
* @skb: Associated socket buffer.
* @dma: Associated physical DMA address.
- * len: Actual len used.
+ * @len: Actual len used.
*/
struct ksz_dma_buf {
struct sk_buff *skb;
@@ -1254,6 +1254,7 @@ struct ksz_port_info {
* @multi_list_size: Multicast address list size.
* @enabled: Indication of hardware enabled.
* @rx_stop: Indication of receive process stop.
+ * @reserved2: none
* @features: Hardware features to enable.
* @overrides: Hardware features to override.
* @parent: Pointer to parent, network device private structure.
@@ -1447,7 +1448,7 @@ struct dev_info {
* struct dev_priv - Network device private data structure
* @adapter: Adapter device information.
* @port: Port information.
- * @monitor_time_info: Timer to monitor ports.
+ * @monitor_timer_info: Timer to monitor ports.
* @proc_sem: Semaphore for proc accessing.
* @id: Device ID.
* @mii_if: MII interface information.
@@ -1566,6 +1567,7 @@ static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
/**
* hw_block_intr - block hardware interrupts
+ * @hw: The hardware instance.
*
* This function blocks all interrupts of the hardware and returns the current
* interrupt enable mask so that interrupts can be restored later.
@@ -1649,8 +1651,7 @@ static inline void set_tx_len(struct ksz_desc *desc, u32 len)
#define HW_DELAY(hw, reg) \
do { \
- u16 dummy; \
- dummy = readw(hw->io + reg); \
+ readw(hw->io + reg); \
} while (0)
/**
@@ -1819,6 +1820,7 @@ static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
* port_r_mib_pkt - read dropped packet counts
* @hw: The hardware instance.
* @port: The port index.
+ * @last: last one
* @cnt: Buffer to store the receive and transmit dropped packet counts.
*
* This routine reads the dropped packet counts of the port.
@@ -1972,7 +1974,7 @@ static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
* port_chk_shift - check port bit
* @hw: The hardware instance.
* @port: The port index.
- * @offset: The offset of the register.
+ * @addr: The offset of the register.
* @shift: Number of bits to shift.
*
* This function checks whether the specified port is set in the register or
@@ -1994,7 +1996,7 @@ static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
* port_cfg_shift - set port bit
* @hw: The hardware instance.
* @port: The port index.
- * @offset: The offset of the register.
+ * @addr: The offset of the register.
* @shift: Number of bits to shift.
* @set: The flag indicating whether the port is to be set or not.
*
@@ -4425,6 +4427,8 @@ static int ksz_alloc_desc(struct dev_info *adapter)
/**
* free_dma_buf - release DMA buffer resources
* @adapter: Adapter information structure.
+ * @dma_buf: pointer to buf
+ * @direction: to or from device
*
* This routine is just a helper function to release the DMA buffer resources.
*/
@@ -4562,6 +4566,7 @@ static void ksz_free_desc(struct dev_info *adapter)
* ksz_free_buffers - free buffers used in the descriptors
* @adapter: Adapter information structure.
* @desc_info: Descriptor information structure.
+ * @direction: to or from device
*
* This local routine frees buffers used in the DMA buffers.
*/
@@ -4721,7 +4726,8 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
/**
* transmit_cleanup - clean up transmit descriptors
- * @dev: Network device.
+ * @hw_priv: Network device.
+ * @normal: break if owned
*
* This routine is called to clean up the transmitted buffers.
*/
@@ -4777,7 +4783,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
/**
* transmit_done - transmit done processing
- * @dev: Network device.
+ * @hw_priv: Network device.
*
* This routine is called when the transmit interrupt is triggered, indicating
* either a packet is sent successfully or there are transmit errors.
@@ -4883,6 +4889,7 @@ unlock:
/**
* netdev_tx_timeout - transmit timeout processing
* @dev: Network device.
+ * @txqueue: index of hanging queue
*
* This routine is called when the transmit timer expires. That indicates the
* hardware is not running correctly because transmit interrupts are not
@@ -4978,7 +4985,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
struct dev_info *hw_priv = priv->adapter;
struct ksz_dma_buf *dma_buf;
struct sk_buff *skb;
- int rx_status;
/* Received length includes 4-byte CRC. */
packet_len = status.rx.frame_len - 4;
@@ -5014,7 +5020,7 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
dev->stats.rx_bytes += packet_len;
/* Notify upper layer for received packet. */
- rx_status = netif_rx(skb);
+ netif_rx(skb);
return 0;
}
@@ -5159,9 +5165,9 @@ release_packet:
return received;
}
-static void rx_proc_task(unsigned long data)
+static void rx_proc_task(struct tasklet_struct *t)
{
- struct dev_info *hw_priv = (struct dev_info *) data;
+ struct dev_info *hw_priv = from_tasklet(hw_priv, t, rx_tasklet);
struct ksz_hw *hw = &hw_priv->hw;
if (!hw->enabled)
@@ -5181,9 +5187,9 @@ static void rx_proc_task(unsigned long data)
}
}
-static void tx_proc_task(unsigned long data)
+static void tx_proc_task(struct tasklet_struct *t)
{
- struct dev_info *hw_priv = (struct dev_info *) data;
+ struct dev_info *hw_priv = from_tasklet(hw_priv, t, tx_tasklet);
struct ksz_hw *hw = &hw_priv->hw;
hw_ack_intr(hw, KS884X_INT_TX_MASK);
@@ -5436,10 +5442,8 @@ static int prepare_hardware(struct net_device *dev)
rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
- tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
- (unsigned long) hw_priv);
- tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
- (unsigned long) hw_priv);
+ tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task);
+ tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task);
hw->promiscuous = 0;
hw->all_multi = 0;
@@ -5829,8 +5833,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Get address of MII PHY in use. */
case SIOCGMIIPHY:
data->phy_id = priv->id;
-
- /* Fallthrough... */
+ fallthrough;
/* Read MII PHY register. */
case SIOCGMIIREG:
@@ -6078,14 +6081,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
sizeof(info->bus_info));
}
-/**
- * netdev_get_regs_len - get length of register dump
- * @dev: Network device.
- *
- * This function returns the length of the register dump.
- *
- * Return length of the register dump.
- */
static struct hw_regs {
int start;
int end;
@@ -6099,6 +6094,14 @@ static struct hw_regs {
{ 0, 0 }
};
+/**
+ * netdev_get_regs_len - get length of register dump
+ * @dev: Network device.
+ *
+ * This function returns the length of the register dump.
+ *
+ * Return length of the register dump.
+ */
static int netdev_get_regs_len(struct net_device *dev)
{
struct hw_regs *range = hw_regs_range;
@@ -6240,6 +6243,8 @@ static int netdev_get_eeprom_len(struct net_device *dev)
return EEPROM_SIZE * 2;
}
+#define EEPROM_MAGIC 0x10A18842
+
/**
* netdev_get_eeprom - get EEPROM data
* @dev: Network device.
@@ -6250,8 +6255,6 @@ static int netdev_get_eeprom_len(struct net_device *dev)
*
* Return 0 if successful; otherwise an error code.
*/
-#define EEPROM_MAGIC 0x10A18842
-
static int netdev_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
@@ -6388,7 +6391,7 @@ static int netdev_set_pauseparam(struct net_device *dev,
/**
* netdev_get_ringparam - get tx/rx ring parameters
* @dev: Network device.
- * @pause: Ethtool RING settings data structure.
+ * @ring: Ethtool RING settings data structure.
*
* This procedure returns the TX/RX ring settings.
*/
@@ -6509,7 +6512,6 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
int i;
int n;
int p;
- int rc;
u64 counter[TOTAL_PORT_COUNTER_NUM];
mutex_lock(&hw_priv->lock);
@@ -6530,19 +6532,19 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
p = n;
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 1);
} else
for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
if (0 == i) {
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 2);
} else if (hw->port_mib[p].cnt_ptr) {
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 1);
@@ -6693,7 +6695,7 @@ static void mib_monitor(struct timer_list *t)
/**
* dev_monitor - periodic monitoring
- * @ptr: Network device pointer.
+ * @t: timer list containing a network device pointer.
*
* This routine is run in a kernel timer to monitor the network device.
*/
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 5bd7fb917b7a..796e46a53926 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Register map access API - ENCX24J600 support
*
* Copyright 2015 Gridpoint
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index de93cc6ebc1a..e2c99d909247 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -158,9 +158,8 @@ static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
struct lan743x_tx *tx = context;
struct lan743x_adapter *adapter = tx->adapter;
bool enable_flag = true;
- u32 int_en = 0;
- int_en = lan743x_csr_read(adapter, INT_EN_SET);
+ lan743x_csr_read(adapter, INT_EN_SET);
if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
lan743x_csr_write(adapter, INT_EN_CLR,
INT_BIT_DMA_TX_(tx->channel_number));
@@ -675,14 +674,12 @@ clean_up:
static int lan743x_dp_write(struct lan743x_adapter *adapter,
u32 select, u32 addr, u32 length, u32 *buf)
{
- int ret = -EIO;
u32 dp_sel;
int i;
- mutex_lock(&adapter->dp_lock);
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
- goto unlock;
+ return -EIO;
dp_sel = lan743x_csr_read(adapter, DP_SEL);
dp_sel &= ~DP_SEL_MASK_;
dp_sel |= select;
@@ -694,13 +691,10 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
- goto unlock;
+ return -EIO;
}
- ret = 0;
-unlock:
- mutex_unlock(&adapter->dp_lock);
- return ret;
+ return 0;
}
static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
@@ -1020,16 +1014,16 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
static int lan743x_phy_open(struct lan743x_adapter *adapter)
{
struct lan743x_phy *phy = &adapter->phy;
+ struct phy_device *phydev = NULL;
struct device_node *phynode;
- struct phy_device *phydev;
struct net_device *netdev;
int ret = -EIO;
netdev = adapter->netdev;
phynode = of_node_get(adapter->pdev->dev.of_node);
- adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
if (phynode) {
+ /* try devicetree phy, or fixed link */
of_get_phy_mode(phynode, &adapter->phy_mode);
if (of_phy_is_fixed_link(phynode)) {
@@ -1045,13 +1039,15 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
lan743x_phy_link_status_change, 0,
adapter->phy_mode);
of_node_put(phynode);
- if (!phydev)
- goto return_error;
- } else {
+ }
+
+ if (!phydev) {
+ /* try internal phy */
phydev = phy_find_first(adapter->mdiobus);
if (!phydev)
goto return_error;
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
ret = phy_connect_direct(netdev, phydev,
lan743x_phy_link_status_change,
adapter->phy_mode);
@@ -1699,10 +1695,9 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
bool start_transmitter = false;
unsigned long irq_flags = 0;
u32 ioc_bit = 0;
- u32 int_sts = 0;
ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
- int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
+ lan743x_csr_read(adapter, DMAC_INT_STS);
if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
spin_lock_irqsave(&tx->ring_lock, irq_flags);
@@ -2735,7 +2730,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
- mutex_init(&adapter->dp_lock);
ret = lan743x_gpio_init(adapter);
if (ret)
@@ -3038,7 +3032,6 @@ static int lan743x_pm_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
- int ret;
lan743x_pcidev_shutdown(pdev);
@@ -3051,9 +3044,7 @@ static int lan743x_pm_suspend(struct device *dev)
lan743x_pm_set_wol(adapter);
/* Host sets PME_En, put D3hot */
- ret = pci_prepare_to_sleep(pdev);
-
- return 0;
+ return pci_prepare_to_sleep(pdev);;
}
static int lan743x_pm_resume(struct device *dev)
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index c61a40411317..a536f4a4994d 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -712,9 +712,6 @@ struct lan743x_adapter {
struct lan743x_csr csr;
struct lan743x_intr intr;
- /* lock, used to prevent concurrent access to data port */
- struct mutex dp_lock;
-
struct lan743x_gpio gpio;
struct lan743x_ptp ptp;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index aa002db04250..70bf8c67d7ef 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -5,6 +5,7 @@
* Copyright (c) 2017 Microsemi Corporation
*/
#include <linux/if_bridge.h>
+#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
@@ -107,6 +108,13 @@ static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
ANA_PORT_VCAP_S2_CFG, port);
+
+ ocelot_write_gix(ocelot, ANA_PORT_VCAP_CFG_S1_ENA,
+ ANA_PORT_VCAP_CFG, port);
+
+ ocelot_rmw_gix(ocelot, REW_PORT_CFG_ES0_EN,
+ REW_PORT_CFG_ES0_EN,
+ REW_PORT_CFG, port);
}
static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
@@ -191,12 +199,28 @@ static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
return 0;
}
-void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
- bool vlan_aware)
+int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ bool vlan_aware, struct switchdev_trans *trans)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 val;
+ if (switchdev_trans_ph_prepare(trans)) {
+ struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
+ struct ocelot_vcap_filter *filter;
+
+ list_for_each_entry(filter, &block->rules, list) {
+ if (filter->ingress_port_mask & BIT(port) &&
+ filter->action.vid_replace_ena) {
+ dev_err(ocelot->dev,
+ "Cannot change VLAN state with vlan modify rules active\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+ }
+
ocelot_port->vlan_aware = vlan_aware;
if (vlan_aware)
@@ -210,6 +234,8 @@ void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG, port);
ocelot_port_set_native_vlan(ocelot, port, ocelot_port->vid);
+
+ return 0;
}
EXPORT_SYMBOL(ocelot_port_vlan_filtering);
@@ -413,26 +439,20 @@ void ocelot_port_disable(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_port_disable);
-int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
- struct sk_buff *skb)
+void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
- struct ocelot *ocelot = ocelot_port->ocelot;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
- ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- spin_lock(&ocelot_port->ts_id_lock);
+ spin_lock(&ocelot_port->ts_id_lock);
- shinfo->tx_flags |= SKBTX_IN_PROGRESS;
- /* Store timestamp ID in cb[0] of sk_buff */
- skb->cb[0] = ocelot_port->ts_id;
- ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
- skb_queue_tail(&ocelot_port->tx_skbs, skb);
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Store timestamp ID in cb[0] of sk_buff */
+ clone->cb[0] = ocelot_port->ts_id;
+ ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
+ skb_queue_tail(&ocelot_port->tx_skbs, clone);
- spin_unlock(&ocelot_port->ts_id_lock);
- return 0;
- }
- return -ENODATA;
+ spin_unlock(&ocelot_port->ts_id_lock);
}
EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb);
@@ -511,9 +531,7 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb_match, &shhwtstamps);
-
- dev_kfree_skb_any(skb_match);
+ skb_complete_tx_timestamp(skb_match, &shhwtstamps);
/* Next ts */
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
@@ -1102,12 +1120,24 @@ EXPORT_SYMBOL(ocelot_port_bridge_join);
int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
+ struct switchdev_trans trans;
+ int ret;
+
ocelot->bridge_mask &= ~BIT(port);
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
- ocelot_port_vlan_filtering(ocelot, port, 0);
+ trans.ph_prepare = true;
+ ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
+ if (ret)
+ return ret;
+
+ trans.ph_prepare = false;
+ ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
+ if (ret)
+ return ret;
+
ocelot_port_set_pvid(ocelot, port, 0);
return ocelot_port_set_native_vlan(ocelot, port, 0);
}
@@ -1354,22 +1384,14 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_init_port);
-/* Configure and enable the CPU port module, which is a set of queues.
- * If @npi contains a valid port index, the CPU port module is connected
- * to the Node Processor Interface (NPI). This is the mode through which
- * frames can be injected from and extracted to an external CPU,
- * over Ethernet.
+/* Configure and enable the CPU port module, which is a set of queues
+ * accessible through register MMIO, frame DMA or Ethernet (in case
+ * NPI mode is used).
*/
-void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction)
+static void ocelot_cpu_port_init(struct ocelot *ocelot)
{
int cpu = ocelot->num_phys_ports;
- ocelot->npi = npi;
- ocelot->inj_prefix = injection;
- ocelot->xtr_prefix = extraction;
-
/* The unicast destination PGID for the CPU port module is unused */
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
/* Instead set up a multicast destination PGID for traffic copied to
@@ -1381,31 +1403,13 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
ANA_PORT_PORT_CFG, cpu);
- if (npi >= 0 && npi < ocelot->num_phys_ports) {
- ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
- QSYS_EXT_CPU_CFG_EXT_CPU_PORT(npi),
- QSYS_EXT_CPU_CFG);
-
- /* Enable NPI port */
- ocelot_fields_write(ocelot, npi,
- QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
- /* NPI port Injection/Extraction configuration */
- ocelot_fields_write(ocelot, npi, SYS_PORT_MODE_INCL_XTR_HDR,
- extraction);
- ocelot_fields_write(ocelot, npi, SYS_PORT_MODE_INCL_INJ_HDR,
- injection);
-
- /* Disable transmission of pause frames */
- ocelot_fields_write(ocelot, npi, SYS_PAUSE_CFG_PAUSE_ENA, 0);
- }
-
/* Enable CPU port module */
ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
/* CPU port Injection/Extraction configuration */
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
- extraction);
+ ocelot->xtr_prefix);
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
- injection);
+ ocelot->inj_prefix);
/* Configure the CPU port to be VLAN aware */
ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
@@ -1413,7 +1417,6 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
}
-EXPORT_SYMBOL(ocelot_configure_cpu);
int ocelot_init(struct ocelot *ocelot)
{
@@ -1453,6 +1456,7 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_vcap_init(ocelot);
+ ocelot_cpu_port_init(ocelot);
for (port = 0; port < ocelot->num_phys_ports; port++) {
/* Clear all counters (5 groups) */
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index dc29e05103a1..abb407dff93c 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -98,6 +98,8 @@ int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond);
+struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port);
+int ocelot_netdev_to_port(struct net_device *dev);
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index ec1b6e2572ba..729495a1a77e 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -5,56 +5,433 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
-
+#include <soc/mscc/ocelot_vcap.h>
#include "ocelot_vcap.h"
-static int ocelot_flower_parse_action(struct flow_cls_offload *f,
+/* Arbitrarily chosen constants for encoding the VCAP block and lookup number
+ * into the chain number. This is UAPI.
+ */
+#define VCAP_BLOCK 10000
+#define VCAP_LOOKUP 1000
+#define VCAP_IS1_NUM_LOOKUPS 3
+#define VCAP_IS2_NUM_LOOKUPS 2
+#define VCAP_IS2_NUM_PAG 256
+#define VCAP_IS1_CHAIN(lookup) \
+ (1 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP)
+#define VCAP_IS2_CHAIN(lookup, pag) \
+ (2 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP + (pag))
+
+static int ocelot_chain_to_block(int chain, bool ingress)
+{
+ int lookup, pag;
+
+ if (!ingress) {
+ if (chain == 0)
+ return VCAP_ES0;
+ return -EOPNOTSUPP;
+ }
+
+ /* Backwards compatibility with older, single-chain tc-flower
+ * offload support in Ocelot
+ */
+ if (chain == 0)
+ return VCAP_IS2;
+
+ for (lookup = 0; lookup < VCAP_IS1_NUM_LOOKUPS; lookup++)
+ if (chain == VCAP_IS1_CHAIN(lookup))
+ return VCAP_IS1;
+
+ for (lookup = 0; lookup < VCAP_IS2_NUM_LOOKUPS; lookup++)
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (chain == VCAP_IS2_CHAIN(lookup, pag))
+ return VCAP_IS2;
+
+ return -EOPNOTSUPP;
+}
+
+/* Caller must ensure this is a valid IS1 or IS2 chain first,
+ * by calling ocelot_chain_to_block.
+ */
+static int ocelot_chain_to_lookup(int chain)
+{
+ return (chain / VCAP_LOOKUP) % 10;
+}
+
+/* Caller must ensure this is a valid IS2 chain first,
+ * by calling ocelot_chain_to_block.
+ */
+static int ocelot_chain_to_pag(int chain)
+{
+ int lookup = ocelot_chain_to_lookup(chain);
+
+ /* calculate PAG value as chain index relative to the first PAG */
+ return chain - VCAP_IS2_CHAIN(lookup, 0);
+}
+
+static bool ocelot_is_goto_target_valid(int goto_target, int chain,
+ bool ingress)
+{
+ int pag;
+
+ /* Can't offload GOTO in VCAP ES0 */
+ if (!ingress)
+ return (goto_target < 0);
+
+ /* Non-optional GOTOs */
+ if (chain == 0)
+ /* VCAP IS1 can be skipped, either partially or completely */
+ return (goto_target == VCAP_IS1_CHAIN(0) ||
+ goto_target == VCAP_IS1_CHAIN(1) ||
+ goto_target == VCAP_IS1_CHAIN(2) ||
+ goto_target == VCAP_IS2_CHAIN(0, 0) ||
+ goto_target == VCAP_IS2_CHAIN(1, 0));
+
+ if (chain == VCAP_IS1_CHAIN(0))
+ return (goto_target == VCAP_IS1_CHAIN(1));
+
+ if (chain == VCAP_IS1_CHAIN(1))
+ return (goto_target == VCAP_IS1_CHAIN(2));
+
+ /* Lookup 2 of VCAP IS1 can really support non-optional GOTOs,
+ * using a Policy Association Group (PAG) value, which is an 8-bit
+ * value encoding a VCAP IS2 target chain.
+ */
+ if (chain == VCAP_IS1_CHAIN(2)) {
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (goto_target == VCAP_IS2_CHAIN(0, pag))
+ return true;
+
+ return false;
+ }
+
+ /* Non-optional GOTO from VCAP IS2 lookup 0 to lookup 1.
+ * We cannot change the PAG at this point.
+ */
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (chain == VCAP_IS2_CHAIN(0, pag))
+ return (goto_target == VCAP_IS2_CHAIN(1, pag));
+
+ /* VCAP IS2 lookup 1 cannot jump anywhere */
+ return false;
+}
+
+static struct ocelot_vcap_filter *
+ocelot_find_vcap_filter_that_points_at(struct ocelot *ocelot, int chain)
+{
+ struct ocelot_vcap_filter *filter;
+ struct ocelot_vcap_block *block;
+ int block_id;
+
+ block_id = ocelot_chain_to_block(chain, true);
+ if (block_id < 0)
+ return NULL;
+
+ if (block_id == VCAP_IS2) {
+ block = &ocelot->block[VCAP_IS1];
+
+ list_for_each_entry(filter, &block->rules, list)
+ if (filter->type == OCELOT_VCAP_FILTER_PAG &&
+ filter->goto_target == chain)
+ return filter;
+ }
+
+ list_for_each_entry(filter, &ocelot->dummy_rules, list)
+ if (filter->goto_target == chain)
+ return filter;
+
+ return NULL;
+}
+
+static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
+ bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct netlink_ext_ack *extack = f->common.extack;
+ bool allow_missing_goto_target = false;
const struct flow_action_entry *a;
+ enum ocelot_tag_tpid_sel tpid;
+ int i, chain, egress_port;
u64 rate;
- int i;
-
- if (!flow_offload_has_one_action(&f->rule->action))
- return -EOPNOTSUPP;
if (!flow_action_basic_hw_stats_check(&f->rule->action,
f->common.extack))
return -EOPNOTSUPP;
+ chain = f->common.chain_index;
+ filter->block_id = ocelot_chain_to_block(chain, ingress);
+ if (filter->block_id < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain");
+ return -EOPNOTSUPP;
+ }
+ if (filter->block_id == VCAP_IS1 || filter->block_id == VCAP_IS2)
+ filter->lookup = ocelot_chain_to_lookup(chain);
+ if (filter->block_id == VCAP_IS2)
+ filter->pag = ocelot_chain_to_pag(chain);
+
+ filter->goto_target = -1;
+ filter->type = OCELOT_VCAP_FILTER_DUMMY;
+
flow_action_for_each(i, a, &f->rule->action) {
switch (a->id) {
case FLOW_ACTION_DROP:
- filter->action = OCELOT_VCAP_ACTION_DROP;
+ if (filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Drop action can only be offloaded to VCAP IS2");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ filter->action.port_mask = 0;
+ filter->action.police_ena = true;
+ filter->action.pol_ix = OCELOT_POLICER_DISCARD;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_TRAP:
- filter->action = OCELOT_VCAP_ACTION_TRAP;
+ if (filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Trap action can only be offloaded to VCAP IS2");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ filter->action.port_mask = 0;
+ filter->action.cpu_copy_ena = true;
+ filter->action.cpu_qu_num = 0;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_POLICE:
- filter->action = OCELOT_VCAP_ACTION_POLICE;
+ if (filter->block_id != VCAP_IS2 ||
+ filter->lookup != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Police action can only be offloaded to VCAP IS2 lookup 0");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.police_ena = true;
rate = a->police.rate_bytes_ps;
- filter->pol.rate = div_u64(rate, 1000) * 8;
- filter->pol.burst = a->police.burst;
+ filter->action.pol.rate = div_u64(rate, 1000) * 8;
+ filter->action.pol.burst = a->police.burst;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_REDIRECT:
+ if (filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Redirect action can only be offloaded to VCAP IS2");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ egress_port = ocelot->ops->netdev_to_port(a->dev);
+ if (egress_port < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an ocelot port");
+ return -EOPNOTSUPP;
+ }
+ filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ filter->action.port_mask = BIT(egress_port);
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ if (filter->block_id != VCAP_IS1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN pop action can only be offloaded to VCAP IS1");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.vlan_pop_cnt_ena = true;
+ filter->action.vlan_pop_cnt++;
+ if (filter->action.vlan_pop_cnt > 2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot pop more than 2 VLAN headers");
+ return -EOPNOTSUPP;
+ }
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_VLAN_MANGLE:
+ if (filter->block_id != VCAP_IS1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN modify action can only be offloaded to VCAP IS1");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ if (!ocelot_port->vlan_aware) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only modify VLAN under VLAN aware bridge");
+ return -EOPNOTSUPP;
+ }
+ filter->action.vid_replace_ena = true;
+ filter->action.pcp_dei_ena = true;
+ filter->action.vid = a->vlan.vid;
+ filter->action.pcp = a->vlan.prio;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_PRIORITY:
+ if (filter->block_id != VCAP_IS1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Priority action can only be offloaded to VCAP IS1");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.qos_ena = true;
+ filter->action.qos_val = a->priority;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_GOTO:
+ filter->goto_target = a->chain_index;
+
+ if (filter->block_id == VCAP_IS1 && filter->lookup == 2) {
+ int pag = ocelot_chain_to_pag(filter->goto_target);
+
+ filter->action.pag_override_mask = 0xff;
+ filter->action.pag_val = pag;
+ filter->type = OCELOT_VCAP_FILTER_PAG;
+ }
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (filter->block_id != VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN push action can only be offloaded to VCAP ES0");
+ return -EOPNOTSUPP;
+ }
+ switch (ntohs(a->vlan.proto)) {
+ case ETH_P_8021Q:
+ tpid = OCELOT_TAG_TPID_SEL_8021Q;
+ break;
+ case ETH_P_8021AD:
+ tpid = OCELOT_TAG_TPID_SEL_8021AD;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot push custom TPID");
+ return -EOPNOTSUPP;
+ }
+ filter->action.tag_a_tpid_sel = tpid;
+ filter->action.push_outer_tag = OCELOT_ES0_TAG;
+ filter->action.tag_a_vid_sel = 1;
+ filter->action.vid_a_val = a->vlan.vid;
+ filter->action.pcp_a_val = a->vlan.prio;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload action");
return -EOPNOTSUPP;
}
}
+ if (filter->goto_target == -1) {
+ if ((filter->block_id == VCAP_IS2 && filter->lookup == 1) ||
+ chain == 0) {
+ allow_missing_goto_target = true;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Missing GOTO action");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!ocelot_is_goto_target_valid(filter->goto_target, chain, ingress) &&
+ !allow_missing_goto_target) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload this GOTO target");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
-static int ocelot_flower_parse(struct flow_cls_offload *f,
- struct ocelot_vcap_filter *filter)
+static int ocelot_flower_parse_indev(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f,
+ struct ocelot_vcap_filter *filter)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ int key_length = vcap->keys[VCAP_ES0_IGR_PORT].length;
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct net_device *dev, *indev;
+ struct flow_match_meta match;
+ int ingress_port;
+
+ flow_rule_match_meta(rule, &match);
+
+ if (!match.mask->ingress_ifindex)
+ return 0;
+
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
+ return -EOPNOTSUPP;
+ }
+
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ if (!dev)
+ return -EINVAL;
+
+ indev = __dev_get_by_index(dev_net(dev), match.key->ingress_ifindex);
+ if (!indev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't find the ingress port to match on");
+ return -ENOENT;
+ }
+
+ ingress_port = ocelot->ops->netdev_to_port(indev);
+ if (ingress_port < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload an ocelot ingress port");
+ return -EOPNOTSUPP;
+ }
+ if (ingress_port == port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress port is equal to the egress port");
+ return -EINVAL;
+ }
+
+ filter->ingress_port.value = ingress_port;
+ filter->ingress_port.mask = GENMASK(key_length - 1, 0);
+
+ return 0;
+}
+
+static int
+ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
+ struct flow_cls_offload *f,
+ struct ocelot_vcap_filter *filter)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
+ struct netlink_ext_ack *extack = f->common.extack;
u16 proto = ntohs(f->common.protocol);
bool match_protocol = true;
+ int ret;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_META) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
@@ -63,6 +440,13 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
return -EOPNOTSUPP;
}
+ /* For VCAP ES0 (egress rewriter) we can match on the ingress port */
+ if (!ingress) {
+ ret = ocelot_flower_parse_indev(ocelot, port, f, filter);
+ if (ret)
+ return ret;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
@@ -72,6 +456,19 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on MAC address");
+ return -EOPNOTSUPP;
+ }
+
+ if (filter->block_id == VCAP_IS1 &&
+ !is_zero_ether_addr(match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Key type S1_NORMAL cannot match on destination MAC");
+ return -EOPNOTSUPP;
+ }
+
/* The hw support mac matches only for MAC_ETYPE key,
* therefore if other matches(port, tcp flags, etc) are added
* then just bail out
@@ -103,6 +500,12 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
flow_rule_match_basic(rule, &match);
if (ntohs(match.key->n_proto) == ETH_P_IP) {
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on IP protocol");
+ return -EOPNOTSUPP;
+ }
+
filter->key_type = OCELOT_VCAP_KEY_IPV4;
filter->key.ipv4.proto.value[0] =
match.key->ip_proto;
@@ -111,6 +514,12 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
match_protocol = false;
}
if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on IP protocol");
+ return -EOPNOTSUPP;
+ }
+
filter->key_type = OCELOT_VCAP_KEY_IPV6;
filter->key.ipv6.proto.value[0] =
match.key->ip_proto;
@@ -125,6 +534,18 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
struct flow_match_ipv4_addrs match;
u8 *tmp;
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on IP address");
+ return -EOPNOTSUPP;
+ }
+
+ if (filter->block_id == VCAP_IS1 && *(u32 *)&match.mask->dst) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Key type S1_NORMAL cannot match on destination IP");
+ return -EOPNOTSUPP;
+ }
+
flow_rule_match_ipv4_addrs(rule, &match);
tmp = &filter->key.ipv4.sip.value.addr[0];
memcpy(tmp, &match.key->src, 4);
@@ -148,6 +569,12 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on L4 ports");
+ return -EOPNOTSUPP;
+ }
+
flow_rule_match_ports(rule, &match);
filter->key.ipv4.sport.value = ntohs(match.key->src);
filter->key.ipv4.sport.mask = ntohs(match.mask->src);
@@ -170,6 +597,12 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
finished_key_parsing:
if (match_protocol && proto != ETH_P_ALL) {
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on L2 proto");
+ return -EOPNOTSUPP;
+ }
+
/* TODO: support SNAP, LLC etc */
if (proto < ETH_P_802_3_MIN)
return -EOPNOTSUPP;
@@ -179,14 +612,28 @@ finished_key_parsing:
}
/* else, a filter of type OCELOT_VCAP_KEY_ANY is implicitly added */
+ return 0;
+}
+
+static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress,
+ struct flow_cls_offload *f,
+ struct ocelot_vcap_filter *filter)
+{
+ int ret;
+
filter->prio = f->common.prio;
filter->id = f->cookie;
- return ocelot_flower_parse_action(f, filter);
+
+ ret = ocelot_flower_parse_action(ocelot, port, ingress, f, filter);
+ if (ret)
+ return ret;
+
+ return ocelot_flower_parse_key(ocelot, port, ingress, f, filter);
}
static struct ocelot_vcap_filter
-*ocelot_vcap_filter_create(struct ocelot *ocelot, int port,
- struct flow_cls_offload *f)
+*ocelot_vcap_filter_create(struct ocelot *ocelot, int port, bool ingress,
+ struct flow_cls_offload *f)
{
struct ocelot_vcap_filter *filter;
@@ -194,26 +641,65 @@ static struct ocelot_vcap_filter
if (!filter)
return NULL;
- filter->ingress_port_mask = BIT(port);
+ if (ingress) {
+ filter->ingress_port_mask = BIT(port);
+ } else {
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ int key_length = vcap->keys[VCAP_ES0_EGR_PORT].length;
+
+ filter->egress_port.value = port;
+ filter->egress_port.mask = GENMASK(key_length - 1, 0);
+ }
+
return filter;
}
+static int ocelot_vcap_dummy_filter_add(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ list_add(&filter->list, &ocelot->dummy_rules);
+
+ return 0;
+}
+
+static int ocelot_vcap_dummy_filter_del(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ list_del(&filter->list);
+ kfree(filter);
+
+ return 0;
+}
+
int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct ocelot_vcap_filter *filter;
+ int chain = f->common.chain_index;
int ret;
- filter = ocelot_vcap_filter_create(ocelot, port, f);
+ if (chain && !ocelot_find_vcap_filter_that_points_at(ocelot, chain)) {
+ NL_SET_ERR_MSG_MOD(extack, "No default GOTO action points to this chain");
+ return -EOPNOTSUPP;
+ }
+
+ filter = ocelot_vcap_filter_create(ocelot, port, ingress, f);
if (!filter)
return -ENOMEM;
- ret = ocelot_flower_parse(f, filter);
+ ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
if (ret) {
kfree(filter);
return ret;
}
+ /* The non-optional GOTOs for the TCAM skeleton don't need
+ * to be actually offloaded.
+ */
+ if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
+ return ocelot_vcap_dummy_filter_add(ocelot, filter);
+
return ocelot_vcap_filter_add(ocelot, filter, f->common.extack);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
@@ -221,28 +707,49 @@ EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_vcap_filter filter;
+ struct ocelot_vcap_filter *filter;
+ struct ocelot_vcap_block *block;
+ int block_id;
+
+ block_id = ocelot_chain_to_block(f->common.chain_index, ingress);
+ if (block_id < 0)
+ return 0;
- filter.prio = f->common.prio;
- filter.id = f->cookie;
+ block = &ocelot->block[block_id];
- return ocelot_vcap_filter_del(ocelot, &filter);
+ filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie);
+ if (!filter)
+ return 0;
+
+ if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
+ return ocelot_vcap_dummy_filter_del(ocelot, filter);
+
+ return ocelot_vcap_filter_del(ocelot, filter);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_vcap_filter filter;
- int ret;
+ struct ocelot_vcap_filter *filter;
+ struct ocelot_vcap_block *block;
+ int block_id, ret;
+
+ block_id = ocelot_chain_to_block(f->common.chain_index, ingress);
+ if (block_id < 0)
+ return 0;
+
+ block = &ocelot->block[block_id];
+
+ filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie);
+ if (!filter || filter->type == OCELOT_VCAP_FILTER_DUMMY)
+ return 0;
- filter.prio = f->common.prio;
- filter.id = f->cookie;
- ret = ocelot_vcap_filter_stats_update(ocelot, &filter);
+ ret = ocelot_vcap_filter_stats_update(ocelot, filter);
if (ret)
return ret;
- flow_stats_update(&f->stats, 0x0, filter.stats.pkts, 0, 0x0,
+ flow_stats_update(&f->stats, 0x0, filter->stats.pkts, 0, 0x0,
FLOW_ACTION_HW_STATS_IMMEDIATE);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index d22711282183..0acb45948418 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -71,6 +71,23 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
}
EXPORT_SYMBOL(ocelot_port_writel);
+u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 reg, u32 offset)
+{
+ u32 val;
+
+ regmap_read(ocelot->targets[target],
+ ocelot->map[target][reg] + offset, &val);
+ return val;
+}
+
+void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 val, u32 reg, u32 offset)
+{
+ regmap_write(ocelot->targets[target],
+ ocelot->map[target][reg] + offset, val);
+}
+
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 8490e42e9e2d..b34da11acf65 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -330,7 +330,6 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
u8 grp = 0; /* Send everything on CPU group 0 */
unsigned int i, count, last;
int port = priv->chip_port;
- bool do_tstamp;
val = ocelot_read(ocelot, QS_INJ_STATUS);
if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
@@ -345,7 +344,23 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
info.vid = skb_vlan_tag_get(skb);
/* Check if timestamping is needed */
- do_tstamp = (ocelot_port_add_txtstamp_skb(ocelot_port, skb) == 0);
+ if (ocelot->ptp && (shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
+ info.rew_op = ocelot_port->ptp_cmd;
+
+ if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ struct sk_buff *clone;
+
+ clone = skb_clone_sk(skb);
+ if (!clone) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ ocelot_port_add_txtstamp_skb(ocelot, port, clone);
+
+ info.rew_op |= clone->cb[0] << 3;
+ }
+ }
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
info.rew_op = ocelot_port->ptp_cmd;
@@ -383,8 +398,7 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- if (!do_tstamp)
- dev_kfree_skb_any(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -642,6 +656,37 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_do_ioctl = ocelot_ioctl,
};
+struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port_private *priv;
+
+ if (!ocelot_port)
+ return NULL;
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+
+ return priv->dev;
+}
+
+/* Checks if the net_device instance given to us originates from our driver */
+static bool ocelot_netdevice_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &ocelot_port_netdev_ops;
+}
+
+int ocelot_netdev_to_port(struct net_device *dev)
+{
+ struct ocelot_port_private *priv;
+
+ if (!dev || !ocelot_netdevice_dev_check(dev))
+ return -EINVAL;
+
+ priv = netdev_priv(dev);
+
+ return priv->chip_port;
+}
+
static void ocelot_port_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
@@ -746,7 +791,7 @@ static int ocelot_port_attr_set(struct net_device *dev,
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
ocelot_port_vlan_filtering(ocelot, port,
- attr->u.vlan_filtering);
+ attr->u.vlan_filtering, trans);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
@@ -863,12 +908,6 @@ static int ocelot_port_obj_del(struct net_device *dev,
return ret;
}
-/* Checks if the net_device instance given to us originate from our driver. */
-static bool ocelot_netdevice_dev_check(const struct net_device *dev)
-{
- return dev->netdev_ops == &ocelot_port_netdev_ops;
-}
-
static int ocelot_netdevice_port_event(struct net_device *dev,
unsigned long event,
struct netdev_notifier_changeupper_info *info)
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 1e08fe4daaef..a33ab315cc6b 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -300,7 +300,8 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
}
EXPORT_SYMBOL(ocelot_ptp_enable);
-int ocelot_init_timestamp(struct ocelot *ocelot, struct ptp_clock_info *info)
+int ocelot_init_timestamp(struct ocelot *ocelot,
+ const struct ptp_clock_info *info)
{
struct ptp_clock *ptp_clock;
int i;
diff --git a/drivers/net/ethernet/mscc/ocelot_s2.h b/drivers/net/ethernet/mscc/ocelot_s2.h
deleted file mode 100644
index 80107bec2e45..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_s2.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/* Microsemi Ocelot Switch driver
- * Copyright (c) 2018 Microsemi Corporation
- */
-
-#ifndef _OCELOT_S2_CORE_H_
-#define _OCELOT_S2_CORE_H_
-
-#define S2_CORE_UPDATE_CTRL_UPDATE_CMD(x) (((x) << 22) & GENMASK(24, 22))
-#define S2_CORE_UPDATE_CTRL_UPDATE_CMD_M GENMASK(24, 22)
-#define S2_CORE_UPDATE_CTRL_UPDATE_CMD_X(x) (((x) & GENMASK(24, 22)) >> 22)
-#define S2_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS BIT(21)
-#define S2_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS BIT(20)
-#define S2_CORE_UPDATE_CTRL_UPDATE_CNT_DIS BIT(19)
-#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR(x) (((x) << 3) & GENMASK(18, 3))
-#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR_M GENMASK(18, 3)
-#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR_X(x) (((x) & GENMASK(18, 3)) >> 3)
-#define S2_CORE_UPDATE_CTRL_UPDATE_SHOT BIT(2)
-#define S2_CORE_UPDATE_CTRL_CLEAR_CACHE BIT(1)
-#define S2_CORE_UPDATE_CTRL_MV_TRAFFIC_IGN BIT(0)
-
-#define S2_CORE_MV_CFG_MV_NUM_POS(x) (((x) << 16) & GENMASK(31, 16))
-#define S2_CORE_MV_CFG_MV_NUM_POS_M GENMASK(31, 16)
-#define S2_CORE_MV_CFG_MV_NUM_POS_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define S2_CORE_MV_CFG_MV_SIZE(x) ((x) & GENMASK(15, 0))
-#define S2_CORE_MV_CFG_MV_SIZE_M GENMASK(15, 0)
-
-#define S2_CACHE_ENTRY_DAT_RSZ 0x4
-
-#define S2_CACHE_MASK_DAT_RSZ 0x4
-
-#define S2_CACHE_ACTION_DAT_RSZ 0x4
-
-#define S2_CACHE_CNT_DAT_RSZ 0x4
-
-#define S2_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
-
-#define S2_BIST_CTRL_TCAM_BIST BIT(1)
-#define S2_BIST_CTRL_TCAM_INIT BIT(0)
-
-#define S2_BIST_CFG_TCAM_BIST_SOE_ENA BIT(8)
-#define S2_BIST_CFG_TCAM_HCG_DIS BIT(7)
-#define S2_BIST_CFG_TCAM_CG_DIS BIT(6)
-#define S2_BIST_CFG_TCAM_BIAS(x) ((x) & GENMASK(5, 0))
-#define S2_BIST_CFG_TCAM_BIAS_M GENMASK(5, 0)
-
-#define S2_BIST_STAT_BIST_RT_ERR BIT(15)
-#define S2_BIST_STAT_BIST_PENC_ERR BIT(14)
-#define S2_BIST_STAT_BIST_COMP_ERR BIT(13)
-#define S2_BIST_STAT_BIST_ADDR_ERR BIT(12)
-#define S2_BIST_STAT_BIST_BL1E_ERR BIT(11)
-#define S2_BIST_STAT_BIST_BL1_ERR BIT(10)
-#define S2_BIST_STAT_BIST_BL0E_ERR BIT(9)
-#define S2_BIST_STAT_BIST_BL0_ERR BIT(8)
-#define S2_BIST_STAT_BIST_PH1_ERR BIT(7)
-#define S2_BIST_STAT_BIST_PH0_ERR BIT(6)
-#define S2_BIST_STAT_BIST_PV1_ERR BIT(5)
-#define S2_BIST_STAT_BIST_PV0_ERR BIT(4)
-#define S2_BIST_STAT_BIST_RUN BIT(3)
-#define S2_BIST_STAT_BIST_ERR BIT(2)
-#define S2_BIST_STAT_BIST_BUSY BIT(1)
-#define S2_BIST_STAT_TCAM_RDY BIT(0)
-
-#endif /* _OCELOT_S2_CORE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 3ef620faf995..d8c778ee6f1b 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -9,9 +9,7 @@
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot_police.h"
#include "ocelot_vcap.h"
-#include "ocelot_s2.h"
-#define OCELOT_POLICER_DISCARD 0x17f
#define ENTRY_WIDTH 32
enum vcap_sel {
@@ -48,145 +46,174 @@ struct vcap_data {
u32 tg_mask; /* Current type-group mask */
};
-static u32 vcap_s2_read_update_ctrl(struct ocelot *ocelot)
+static u32 vcap_read_update_ctrl(struct ocelot *ocelot,
+ const struct vcap_props *vcap)
{
- return ocelot_read(ocelot, S2_CORE_UPDATE_CTRL);
+ return ocelot_target_read(ocelot, vcap->target, VCAP_CORE_UPDATE_CTRL);
}
-static void vcap_cmd(struct ocelot *ocelot, u16 ix, int cmd, int sel)
+static void vcap_cmd(struct ocelot *ocelot, const struct vcap_props *vcap,
+ u16 ix, int cmd, int sel)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 value = (VCAP_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) |
+ VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) |
+ VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT);
- u32 value = (S2_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) |
- S2_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) |
- S2_CORE_UPDATE_CTRL_UPDATE_SHOT);
-
- if ((sel & VCAP_SEL_ENTRY) && ix >= vcap_is2->entry_count)
+ if ((sel & VCAP_SEL_ENTRY) && ix >= vcap->entry_count)
return;
if (!(sel & VCAP_SEL_ENTRY))
- value |= S2_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS;
+ value |= VCAP_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS;
if (!(sel & VCAP_SEL_ACTION))
- value |= S2_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS;
+ value |= VCAP_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS;
if (!(sel & VCAP_SEL_COUNTER))
- value |= S2_CORE_UPDATE_CTRL_UPDATE_CNT_DIS;
+ value |= VCAP_CORE_UPDATE_CTRL_UPDATE_CNT_DIS;
+
+ ocelot_target_write(ocelot, vcap->target, value, VCAP_CORE_UPDATE_CTRL);
- ocelot_write(ocelot, value, S2_CORE_UPDATE_CTRL);
- readx_poll_timeout(vcap_s2_read_update_ctrl, ocelot, value,
- (value & S2_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0,
- 10, 100000);
+ read_poll_timeout(vcap_read_update_ctrl, value,
+ (value & VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0,
+ 10, 100000, false, ocelot, vcap);
}
/* Convert from 0-based row to VCAP entry row and run command */
-static void vcap_row_cmd(struct ocelot *ocelot, u32 row, int cmd, int sel)
+static void vcap_row_cmd(struct ocelot *ocelot, const struct vcap_props *vcap,
+ u32 row, int cmd, int sel)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
-
- vcap_cmd(ocelot, vcap_is2->entry_count - row - 1, cmd, sel);
+ vcap_cmd(ocelot, vcap, vcap->entry_count - row - 1, cmd, sel);
}
-static void vcap_entry2cache(struct ocelot *ocelot, struct vcap_data *data)
+static void vcap_entry2cache(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
u32 entry_words, i;
- entry_words = DIV_ROUND_UP(vcap_is2->entry_width, ENTRY_WIDTH);
+ entry_words = DIV_ROUND_UP(vcap->entry_width, ENTRY_WIDTH);
for (i = 0; i < entry_words; i++) {
- ocelot_write_rix(ocelot, data->entry[i], S2_CACHE_ENTRY_DAT, i);
- ocelot_write_rix(ocelot, ~data->mask[i], S2_CACHE_MASK_DAT, i);
+ ocelot_target_write_rix(ocelot, vcap->target, data->entry[i],
+ VCAP_CACHE_ENTRY_DAT, i);
+ ocelot_target_write_rix(ocelot, vcap->target, ~data->mask[i],
+ VCAP_CACHE_MASK_DAT, i);
}
- ocelot_write(ocelot, data->tg, S2_CACHE_TG_DAT);
+ ocelot_target_write(ocelot, vcap->target, data->tg, VCAP_CACHE_TG_DAT);
}
-static void vcap_cache2entry(struct ocelot *ocelot, struct vcap_data *data)
+static void vcap_cache2entry(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
u32 entry_words, i;
- entry_words = DIV_ROUND_UP(vcap_is2->entry_width, ENTRY_WIDTH);
+ entry_words = DIV_ROUND_UP(vcap->entry_width, ENTRY_WIDTH);
for (i = 0; i < entry_words; i++) {
- data->entry[i] = ocelot_read_rix(ocelot, S2_CACHE_ENTRY_DAT, i);
+ data->entry[i] = ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_ENTRY_DAT, i);
// Invert mask
- data->mask[i] = ~ocelot_read_rix(ocelot, S2_CACHE_MASK_DAT, i);
+ data->mask[i] = ~ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_MASK_DAT, i);
}
- data->tg = ocelot_read(ocelot, S2_CACHE_TG_DAT);
+ data->tg = ocelot_target_read(ocelot, vcap->target, VCAP_CACHE_TG_DAT);
}
-static void vcap_action2cache(struct ocelot *ocelot, struct vcap_data *data)
+static void vcap_action2cache(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
u32 action_words, mask;
int i, width;
/* Encode action type */
- width = vcap_is2->action_type_width;
+ width = vcap->action_type_width;
if (width) {
mask = GENMASK(width, 0);
data->action[0] = ((data->action[0] & ~mask) | data->type);
}
- action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH);
+ action_words = DIV_ROUND_UP(vcap->action_width, ENTRY_WIDTH);
for (i = 0; i < action_words; i++)
- ocelot_write_rix(ocelot, data->action[i], S2_CACHE_ACTION_DAT,
- i);
+ ocelot_target_write_rix(ocelot, vcap->target, data->action[i],
+ VCAP_CACHE_ACTION_DAT, i);
- for (i = 0; i < vcap_is2->counter_words; i++)
- ocelot_write_rix(ocelot, data->counter[i], S2_CACHE_CNT_DAT, i);
+ for (i = 0; i < vcap->counter_words; i++)
+ ocelot_target_write_rix(ocelot, vcap->target, data->counter[i],
+ VCAP_CACHE_CNT_DAT, i);
}
-static void vcap_cache2action(struct ocelot *ocelot, struct vcap_data *data)
+static void vcap_cache2action(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
u32 action_words;
int i, width;
- action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH);
+ action_words = DIV_ROUND_UP(vcap->action_width, ENTRY_WIDTH);
for (i = 0; i < action_words; i++)
- data->action[i] = ocelot_read_rix(ocelot, S2_CACHE_ACTION_DAT,
- i);
+ data->action[i] = ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_ACTION_DAT,
+ i);
- for (i = 0; i < vcap_is2->counter_words; i++)
- data->counter[i] = ocelot_read_rix(ocelot, S2_CACHE_CNT_DAT, i);
+ for (i = 0; i < vcap->counter_words; i++)
+ data->counter[i] = ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_CNT_DAT,
+ i);
/* Extract action type */
- width = vcap_is2->action_type_width;
+ width = vcap->action_type_width;
data->type = (width ? (data->action[0] & GENMASK(width, 0)) : 0);
}
/* Calculate offsets for entry */
-static void is2_data_get(struct ocelot *ocelot, struct vcap_data *data, int ix)
+static void vcap_data_offset_get(const struct vcap_props *vcap,
+ struct vcap_data *data, int ix)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
- int i, col, offset, count, cnt, base;
- int width = vcap_is2->tg_width;
+ int num_subwords_per_entry, num_subwords_per_action;
+ int i, col, offset, num_entries_per_row, base;
+ u32 width = vcap->tg_width;
- count = (data->tg_sw == VCAP_TG_HALF ? 2 : 4);
- col = (ix % 2);
- cnt = (vcap_is2->sw_count / count);
- base = (vcap_is2->sw_count - col * cnt - cnt);
+ switch (data->tg_sw) {
+ case VCAP_TG_FULL:
+ num_entries_per_row = 1;
+ break;
+ case VCAP_TG_HALF:
+ num_entries_per_row = 2;
+ break;
+ case VCAP_TG_QUARTER:
+ num_entries_per_row = 4;
+ break;
+ default:
+ return;
+ }
+
+ col = (ix % num_entries_per_row);
+ num_subwords_per_entry = (vcap->sw_count / num_entries_per_row);
+ base = (vcap->sw_count - col * num_subwords_per_entry -
+ num_subwords_per_entry);
data->tg_value = 0;
data->tg_mask = 0;
- for (i = 0; i < cnt; i++) {
+ for (i = 0; i < num_subwords_per_entry; i++) {
offset = ((base + i) * width);
data->tg_value |= (data->tg_sw << offset);
data->tg_mask |= GENMASK(offset + width - 1, offset);
}
/* Calculate key/action/counter offsets */
- col = (count - col - 1);
- data->key_offset = (base * vcap_is2->entry_width) / vcap_is2->sw_count;
- data->counter_offset = (cnt * col * vcap_is2->counter_width);
+ col = (num_entries_per_row - col - 1);
+ data->key_offset = (base * vcap->entry_width) / vcap->sw_count;
+ data->counter_offset = (num_subwords_per_entry * col *
+ vcap->counter_width);
i = data->type;
- width = vcap_is2->action_table[i].width;
- cnt = vcap_is2->action_table[i].count;
- data->action_offset =
- (((cnt * col * width) / count) + vcap_is2->action_type_width);
+ width = vcap->action_table[i].width;
+ num_subwords_per_action = vcap->action_table[i].count;
+ data->action_offset = ((num_subwords_per_action * col * width) /
+ num_entries_per_row);
+ data->action_offset += vcap->action_type_width;
}
static void vcap_data_set(u32 *data, u32 offset, u32 len, u32 value)
@@ -224,22 +251,21 @@ static void vcap_key_field_set(struct vcap_data *data, u32 offset, u32 width,
vcap_data_set(data->mask, offset + data->key_offset, width, mask);
}
-static void vcap_key_set(struct ocelot *ocelot, struct vcap_data *data,
- enum vcap_is2_half_key_field field,
- u32 value, u32 mask)
+static void vcap_key_set(const struct vcap_props *vcap, struct vcap_data *data,
+ int field, u32 value, u32 mask)
{
- u32 offset = ocelot->vcap_is2_keys[field].offset;
- u32 length = ocelot->vcap_is2_keys[field].length;
+ u32 offset = vcap->keys[field].offset;
+ u32 length = vcap->keys[field].length;
vcap_key_field_set(data, offset, length, value, mask);
}
-static void vcap_key_bytes_set(struct ocelot *ocelot, struct vcap_data *data,
- enum vcap_is2_half_key_field field,
+static void vcap_key_bytes_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field,
u8 *val, u8 *msk)
{
- u32 offset = ocelot->vcap_is2_keys[field].offset;
- u32 count = ocelot->vcap_is2_keys[field].length;
+ u32 offset = vcap->keys[field].offset;
+ u32 count = vcap->keys[field].length;
u32 i, j, n = 0, value = 0, mask = 0;
WARN_ON(count % 8);
@@ -265,37 +291,37 @@ static void vcap_key_bytes_set(struct ocelot *ocelot, struct vcap_data *data,
}
}
-static void vcap_key_l4_port_set(struct ocelot *ocelot, struct vcap_data *data,
- enum vcap_is2_half_key_field field,
+static void vcap_key_l4_port_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field,
struct ocelot_vcap_udp_tcp *port)
{
- u32 offset = ocelot->vcap_is2_keys[field].offset;
- u32 length = ocelot->vcap_is2_keys[field].length;
+ u32 offset = vcap->keys[field].offset;
+ u32 length = vcap->keys[field].length;
WARN_ON(length != 16);
vcap_key_field_set(data, offset, length, port->value, port->mask);
}
-static void vcap_key_bit_set(struct ocelot *ocelot, struct vcap_data *data,
- enum vcap_is2_half_key_field field,
+static void vcap_key_bit_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field,
enum ocelot_vcap_bit val)
{
- u32 offset = ocelot->vcap_is2_keys[field].offset;
- u32 length = ocelot->vcap_is2_keys[field].length;
u32 value = (val == OCELOT_VCAP_BIT_1 ? 1 : 0);
u32 msk = (val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
+ u32 offset = vcap->keys[field].offset;
+ u32 length = vcap->keys[field].length;
WARN_ON(length != 1);
vcap_key_field_set(data, offset, length, value, msk);
}
-static void vcap_action_set(struct ocelot *ocelot, struct vcap_data *data,
- enum vcap_is2_action_field field, u32 value)
+static void vcap_action_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field, u32 value)
{
- int offset = ocelot->vcap_is2_actions[field].offset;
- int length = ocelot->vcap_is2_actions[field].length;
+ int offset = vcap->actions[field].offset;
+ int length = vcap->actions[field].length;
vcap_data_set(data->action, offset + data->action_offset, length,
value);
@@ -304,40 +330,21 @@ static void vcap_action_set(struct ocelot *ocelot, struct vcap_data *data,
static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
struct ocelot_vcap_filter *filter)
{
- switch (filter->action) {
- case OCELOT_VCAP_ACTION_DROP:
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX,
- OCELOT_POLICER_DISCARD);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0);
- break;
- case OCELOT_VCAP_ACTION_TRAP:
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 1);
- break;
- case OCELOT_VCAP_ACTION_POLICE:
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX,
- filter->pol_ix);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0);
- break;
- }
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS2];
+ struct ocelot_vcap_action *a = &filter->action;
+
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_MASK_MODE, a->mask_mode);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_PORT_MASK, a->port_mask);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_ENA, a->police_ena);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_IDX, a->pol_ix);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_QU_NUM, a->cpu_qu_num);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_COPY_ENA, a->cpu_copy_ena);
}
static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_filter *filter)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS2];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
u32 val, msk, type, type_mask = 0xf, i, count;
struct ocelot_vcap_u64 payload;
@@ -348,52 +355,55 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
memset(&data, 0, sizeof(data));
/* Read row */
- vcap_row_cmd(ocelot, row, VCAP_CMD_READ, VCAP_SEL_ALL);
- vcap_cache2entry(ocelot, &data);
- vcap_cache2action(ocelot, &data);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
+ vcap_cache2entry(ocelot, vcap, &data);
+ vcap_cache2action(ocelot, vcap, &data);
data.tg_sw = VCAP_TG_HALF;
- is2_data_get(ocelot, &data, ix);
+ vcap_data_offset_get(vcap, &data, ix);
data.tg = (data.tg & ~data.tg_mask);
if (filter->prio != 0)
data.tg |= data.tg_value;
data.type = IS2_ACTION_TYPE_NORMAL;
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_PAG, 0, 0);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_PAG, filter->pag, 0xff);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST,
+ (filter->lookup == 0) ? OCELOT_VCAP_BIT_1 :
+ OCELOT_VCAP_BIT_0);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
~filter->ingress_port_mask);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_1);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_HOST_MATCH,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
OCELOT_VCAP_BIT_ANY);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, filter->dmac_bc);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_VID,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_BC, filter->dmac_bc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_VID,
tag->vid.value, tag->vid.mask);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_PCP,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_PCP,
tag->pcp.value[0], tag->pcp.mask[0]);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DEI, tag->dei);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_DEI, tag->dei);
switch (filter->key_type) {
case OCELOT_VCAP_KEY_ETYPE: {
struct ocelot_vcap_key_etype *etype = &filter->key.etype;
type = IS2_TYPE_ETYPE;
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
etype->dmac.value, etype->dmac.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
etype->smac.value, etype->smac.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_ETYPE,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_ETYPE,
etype->etype.value, etype->etype.mask);
/* Clear unused bits */
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
0, 0);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
0, 0);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
0, 0);
- vcap_key_bytes_set(ocelot, &data,
+ vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
etype->data.value, etype->data.mask);
break;
@@ -402,15 +412,15 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_key_llc *llc = &filter->key.llc;
type = IS2_TYPE_LLC;
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
llc->dmac.value, llc->dmac.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
llc->smac.value, llc->smac.mask);
for (i = 0; i < 4; i++) {
payload.value[i] = llc->llc.value[i];
payload.mask[i] = llc->llc.mask[i];
}
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_LLC_L2_LLC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_LLC_L2_LLC,
payload.value, payload.mask);
break;
}
@@ -418,11 +428,11 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_key_snap *snap = &filter->key.snap;
type = IS2_TYPE_SNAP;
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
snap->dmac.value, snap->dmac.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
snap->smac.value, snap->smac.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
filter->key.snap.snap.value,
filter->key.snap.snap.mask);
break;
@@ -431,24 +441,24 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_key_arp *arp = &filter->key.arp;
type = IS2_TYPE_ARP;
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_SMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_SMAC,
arp->smac.value, arp->smac.mask);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK,
arp->ethernet);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK,
arp->ip);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_LEN_OK,
arp->length);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_TARGET_MATCH,
arp->dmac_match);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_SENDER_MATCH,
arp->smac_match);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN,
arp->unknown);
@@ -457,15 +467,15 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
(arp->arp == OCELOT_VCAP_BIT_0 ? 2 : 0));
msk = ((arp->req == OCELOT_VCAP_BIT_ANY ? 0 : 1) |
(arp->arp == OCELOT_VCAP_BIT_ANY ? 0 : 2));
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_OPCODE,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_OPCODE,
val, msk);
- vcap_key_bytes_set(ocelot, &data,
+ vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP,
arp->dip.value.addr, arp->dip.mask.addr);
- vcap_key_bytes_set(ocelot, &data,
+ vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP,
arp->sip.value.addr, arp->sip.mask.addr);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
0, 0);
break;
}
@@ -534,22 +544,22 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
seq_zero = ipv6->seq_zero;
}
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_IP4,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_IP4,
ipv4 ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L3_FRAGMENT,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L3_FRAGMENT,
fragment);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_L3_FRAG_OFS_GT0, 0, 0);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L3_OPTIONS,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L3_FRAG_OFS_GT0, 0, 0);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L3_OPTIONS,
options);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_IP4_L3_TTL_GT0,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_IP4_L3_TTL_GT0,
ttl);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_TOS,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_TOS,
ds.value, ds.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_IP4_DIP,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_IP4_DIP,
dip.value.addr, dip.mask.addr);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_IP4_SIP,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_IP4_SIP,
sip.value.addr, sip.mask.addr);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DIP_EQ_SIP,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_DIP_EQ_SIP,
sip_eq_dip);
val = proto.value[0];
msk = proto.mask[0];
@@ -558,33 +568,33 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
/* UDP/TCP protocol match */
tcp = (val == 6 ?
OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_TCP, tcp);
- vcap_key_l4_port_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_TCP, tcp);
+ vcap_key_l4_port_set(vcap, &data,
VCAP_IS2_HK_L4_DPORT, dport);
- vcap_key_l4_port_set(ocelot, &data,
+ vcap_key_l4_port_set(vcap, &data,
VCAP_IS2_HK_L4_SPORT, sport);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_RNG, 0, 0);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_RNG, 0, 0);
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_L4_SPORT_EQ_DPORT,
sport_eq_dport);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_L4_SEQUENCE_EQ0,
seq_zero);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_FIN,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_FIN,
tcp_fin);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_SYN,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_SYN,
tcp_syn);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_RST,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_RST,
tcp_rst);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_PSH,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_PSH,
tcp_psh);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_ACK,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_ACK,
tcp_ack);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_URG,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_URG,
tcp_urg);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_1588_DOM,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_1588_DOM,
0, 0);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_1588_VER,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_1588_VER,
0, 0);
} else {
if (msk == 0) {
@@ -598,10 +608,10 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
payload.mask[i] = ip_data->mask[i];
}
}
- vcap_key_bytes_set(ocelot, &data,
+ vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_IP4_L3_PROTO,
proto.value, proto.mask);
- vcap_key_bytes_set(ocelot, &data,
+ vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_L3_PAYLOAD,
payload.value, payload.mask);
}
@@ -611,46 +621,271 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
default:
type = 0;
type_mask = 0;
- count = vcap_is2->entry_width / 2;
+ count = vcap->entry_width / 2;
/* Iterate over the non-common part of the key and
* clear entry data
*/
- for (i = ocelot->vcap_is2_keys[VCAP_IS2_HK_L2_DMAC].offset;
+ for (i = vcap->keys[VCAP_IS2_HK_L2_DMAC].offset;
i < count; i += ENTRY_WIDTH) {
vcap_key_field_set(&data, i, min(32u, count - i), 0, 0);
}
break;
}
- vcap_key_set(ocelot, &data, VCAP_IS2_TYPE, type, type_mask);
+ vcap_key_set(vcap, &data, VCAP_IS2_TYPE, type, type_mask);
is2_action_set(ocelot, &data, filter);
vcap_data_set(data.counter, data.counter_offset,
- vcap_is2->counter_width, filter->stats.pkts);
+ vcap->counter_width, filter->stats.pkts);
/* Write row */
- vcap_entry2cache(ocelot, &data);
- vcap_action2cache(ocelot, &data);
- vcap_row_cmd(ocelot, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+ vcap_entry2cache(ocelot, vcap, &data);
+ vcap_action2cache(ocelot, vcap, &data);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+}
+
+static void is1_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ const struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
+ const struct ocelot_vcap_action *a = &filter->action;
+
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VID_REPLACE_ENA,
+ a->vid_replace_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VID_ADD_VAL, a->vid);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VLAN_POP_CNT_ENA,
+ a->vlan_pop_cnt_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VLAN_POP_CNT,
+ a->vlan_pop_cnt);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PCP_DEI_ENA, a->pcp_dei_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PCP_VAL, a->pcp);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_DEI_VAL, a->dei);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_QOS_ENA, a->qos_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_QOS_VAL, a->qos_val);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PAG_OVERRIDE_MASK,
+ a->pag_override_mask);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PAG_VAL, a->pag_val);
}
-static void is2_entry_get(struct ocelot *ocelot, struct ocelot_vcap_filter *filter,
- int ix)
+static void is1_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
+ struct ocelot_vcap_key_vlan *tag = &filter->vlan;
+ struct ocelot_vcap_u64 payload;
struct vcap_data data;
- int row = (ix / 2);
- u32 cnt;
+ int row = ix / 2;
+ u32 type;
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&data, 0, sizeof(data));
+
+ /* Read row */
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
+ vcap_cache2entry(ocelot, vcap, &data);
+ vcap_cache2action(ocelot, vcap, &data);
- vcap_row_cmd(ocelot, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
- vcap_cache2action(ocelot, &data);
data.tg_sw = VCAP_TG_HALF;
- is2_data_get(ocelot, &data, ix);
+ data.type = IS1_ACTION_TYPE_NORMAL;
+ vcap_data_offset_get(vcap, &data, ix);
+ data.tg = (data.tg & ~data.tg_mask);
+ if (filter->prio != 0)
+ data.tg |= data.tg_value;
+
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_LOOKUP, filter->lookup, 0x3);
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_IGR_PORT_MASK, 0,
+ ~filter->ingress_port_mask);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_VID,
+ tag->vid.value, tag->vid.mask);
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP,
+ tag->pcp.value[0], tag->pcp.mask[0]);
+ type = IS1_TYPE_S1_NORMAL;
+
+ switch (filter->key_type) {
+ case OCELOT_VCAP_KEY_ETYPE: {
+ struct ocelot_vcap_key_etype *etype = &filter->key.etype;
+
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_L2_SMAC,
+ etype->smac.value, etype->smac.mask);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE,
+ etype->etype.value, etype->etype.mask);
+ break;
+ }
+ case OCELOT_VCAP_KEY_IPV4: {
+ struct ocelot_vcap_key_ipv4 *ipv4 = &filter->key.ipv4;
+ struct ocelot_vcap_udp_tcp *sport = &ipv4->sport;
+ struct ocelot_vcap_udp_tcp *dport = &ipv4->dport;
+ enum ocelot_vcap_bit tcp_udp = OCELOT_VCAP_BIT_0;
+ struct ocelot_vcap_u8 proto = ipv4->proto;
+ struct ocelot_vcap_ipv4 sip = ipv4->sip;
+ u32 val, msk;
+
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_IP_SNAP,
+ OCELOT_VCAP_BIT_1);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_IP4,
+ OCELOT_VCAP_BIT_1);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_ETYPE_LEN,
+ OCELOT_VCAP_BIT_1);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_L3_IP4_SIP,
+ sip.value.addr, sip.mask.addr);
+
+ val = proto.value[0];
+ msk = proto.mask[0];
+
+ if ((val == NEXTHDR_TCP || val == NEXTHDR_UDP) && msk == 0xff)
+ tcp_udp = OCELOT_VCAP_BIT_1;
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TCP_UDP, tcp_udp);
+
+ if (tcp_udp) {
+ enum ocelot_vcap_bit tcp = OCELOT_VCAP_BIT_0;
+
+ if (val == NEXTHDR_TCP)
+ tcp = OCELOT_VCAP_BIT_1;
+
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TCP, tcp);
+ vcap_key_l4_port_set(vcap, &data, VCAP_IS1_HK_L4_SPORT,
+ sport);
+ /* Overloaded field */
+ vcap_key_l4_port_set(vcap, &data, VCAP_IS1_HK_ETYPE,
+ dport);
+ } else {
+ /* IPv4 "other" frame */
+ struct ocelot_vcap_u16 etype = {0};
+
+ /* Overloaded field */
+ etype.value[0] = proto.value[0];
+ etype.mask[0] = proto.mask[0];
+
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE,
+ etype.value, etype.mask);
+ }
+ }
+ default:
+ break;
+ }
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TYPE,
+ type ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
+
+ is1_action_set(ocelot, &data, filter);
+ vcap_data_set(data.counter, data.counter_offset,
+ vcap->counter_width, filter->stats.pkts);
+
+ /* Write row */
+ vcap_entry2cache(ocelot, vcap, &data);
+ vcap_action2cache(ocelot, vcap, &data);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+}
+
+static void es0_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ const struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ const struct ocelot_vcap_action *a = &filter->action;
+
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PUSH_OUTER_TAG,
+ a->push_outer_tag);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PUSH_INNER_TAG,
+ a->push_inner_tag);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_TPID_SEL,
+ a->tag_a_tpid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_VID_SEL,
+ a->tag_a_vid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_PCP_SEL,
+ a->tag_a_pcp_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_VID_A_VAL, a->vid_a_val);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PCP_A_VAL, a->pcp_a_val);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_TPID_SEL,
+ a->tag_b_tpid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_VID_SEL,
+ a->tag_b_vid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_PCP_SEL,
+ a->tag_b_pcp_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_VID_B_VAL, a->vid_b_val);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PCP_B_VAL, a->pcp_b_val);
+}
+
+static void es0_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ struct ocelot_vcap_key_vlan *tag = &filter->vlan;
+ struct ocelot_vcap_u64 payload;
+ struct vcap_data data;
+ int row = ix;
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&data, 0, sizeof(data));
+
+ /* Read row */
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
+ vcap_cache2entry(ocelot, vcap, &data);
+ vcap_cache2action(ocelot, vcap, &data);
+
+ data.tg_sw = VCAP_TG_FULL;
+ data.type = ES0_ACTION_TYPE_NORMAL;
+ vcap_data_offset_get(vcap, &data, ix);
+ data.tg = (data.tg & ~data.tg_mask);
+ if (filter->prio != 0)
+ data.tg |= data.tg_value;
+
+ vcap_key_set(vcap, &data, VCAP_ES0_IGR_PORT, filter->ingress_port.value,
+ filter->ingress_port.mask);
+ vcap_key_set(vcap, &data, VCAP_ES0_EGR_PORT, filter->egress_port.value,
+ filter->egress_port.mask);
+ vcap_key_bit_set(vcap, &data, VCAP_ES0_L2_MC, filter->dmac_mc);
+ vcap_key_bit_set(vcap, &data, VCAP_ES0_L2_BC, filter->dmac_bc);
+ vcap_key_set(vcap, &data, VCAP_ES0_VID,
+ tag->vid.value, tag->vid.mask);
+ vcap_key_set(vcap, &data, VCAP_ES0_PCP,
+ tag->pcp.value[0], tag->pcp.mask[0]);
+
+ es0_action_set(ocelot, &data, filter);
+ vcap_data_set(data.counter, data.counter_offset,
+ vcap->counter_width, filter->stats.pkts);
+
+ /* Write row */
+ vcap_entry2cache(ocelot, vcap, &data);
+ vcap_action2cache(ocelot, vcap, &data);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+}
+
+static void vcap_entry_get(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[filter->block_id];
+ struct vcap_data data;
+ int row, count;
+ u32 cnt;
+
+ if (filter->block_id == VCAP_ES0)
+ data.tg_sw = VCAP_TG_FULL;
+ else
+ data.tg_sw = VCAP_TG_HALF;
+
+ count = (1 << (data.tg_sw - 1));
+ row = (ix / count);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
+ vcap_cache2action(ocelot, vcap, &data);
+ vcap_data_offset_get(vcap, &data, ix);
cnt = vcap_data_get(data.counter, data.counter_offset,
- vcap_is2->counter_width);
+ vcap->counter_width);
filter->stats.pkts = cnt;
}
+static void vcap_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
+{
+ if (filter->block_id == VCAP_IS1)
+ return is1_entry_set(ocelot, ix, filter);
+ if (filter->block_id == VCAP_IS2)
+ return is2_entry_set(ocelot, ix, filter);
+ if (filter->block_id == VCAP_ES0)
+ return es0_entry_set(ocelot, ix, filter);
+}
+
static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
struct ocelot_policer *pol)
{
@@ -679,11 +914,12 @@ static void ocelot_vcap_policer_del(struct ocelot *ocelot,
list_for_each_entry(filter, &block->rules, list) {
index++;
- if (filter->action == OCELOT_VCAP_ACTION_POLICE &&
- filter->pol_ix < pol_ix) {
- filter->pol_ix += 1;
- ocelot_vcap_policer_add(ocelot, filter->pol_ix,
- &filter->pol);
+ if (filter->block_id == VCAP_IS2 &&
+ filter->action.police_ena &&
+ filter->action.pol_ix < pol_ix) {
+ filter->action.pol_ix += 1;
+ ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
+ &filter->action.pol);
is2_entry_set(ocelot, index, filter);
}
}
@@ -701,10 +937,11 @@ static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
struct ocelot_vcap_filter *tmp;
struct list_head *pos, *n;
- if (filter->action == OCELOT_VCAP_ACTION_POLICE) {
+ if (filter->block_id == VCAP_IS2 && filter->action.police_ena) {
block->pol_lpr--;
- filter->pol_ix = block->pol_lpr;
- ocelot_vcap_policer_add(ocelot, filter->pol_ix, &filter->pol);
+ filter->action.pol_ix = block->pol_lpr;
+ ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
+ &filter->action.pol);
}
block->count++;
@@ -726,19 +963,20 @@ static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block,
struct ocelot_vcap_filter *filter)
{
struct ocelot_vcap_filter *tmp;
- int index = -1;
+ int index = 0;
list_for_each_entry(tmp, &block->rules, list) {
- ++index;
if (filter->id == tmp->id)
- break;
+ return index;
+ index++;
}
- return index;
+
+ return -ENOENT;
}
static struct ocelot_vcap_filter*
-ocelot_vcap_block_find_filter(struct ocelot_vcap_block *block,
- int index)
+ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block,
+ int index)
{
struct ocelot_vcap_filter *tmp;
int i = 0;
@@ -752,6 +990,18 @@ ocelot_vcap_block_find_filter(struct ocelot_vcap_block *block,
return NULL;
}
+struct ocelot_vcap_filter *
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id)
+{
+ struct ocelot_vcap_filter *filter;
+
+ list_for_each_entry(filter, &block->rules, list)
+ if (filter->id == id)
+ return filter;
+
+ return NULL;
+}
+
/* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based
* on destination and source MAC addresses, but only on higher-level protocol
* information. The only frame types to match on keys containing MAC addresses
@@ -763,23 +1013,23 @@ ocelot_vcap_block_find_filter(struct ocelot_vcap_block *block,
* on any _other_ keys than MAC_ETYPE ones.
*/
static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port,
- bool on)
+ int lookup, bool on)
{
u32 val = 0;
if (on)
- val = ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(3) |
- ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(3) |
- ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(3) |
- ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(3) |
- ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(3);
+ val = ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(BIT(lookup));
ocelot_rmw_gix(ocelot, val,
- ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M |
- ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M |
- ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M |
- ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M |
- ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M,
+ ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(BIT(lookup)),
ANA_PORT_VCAP_S2_CFG, port);
}
@@ -825,35 +1075,43 @@ static bool
ocelot_exclusive_mac_etype_filter_rules(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_vcap_block *block = &ocelot->block;
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
struct ocelot_vcap_filter *tmp;
unsigned long port;
int i;
+ /* We only have the S2_IP_TCPUDP_DIS set of knobs for VCAP IS2 */
+ if (filter->block_id != VCAP_IS2)
+ return true;
+
if (ocelot_vcap_is_problematic_mac_etype(filter)) {
/* Search for any non-MAC_ETYPE rules on the port */
for (i = 0; i < block->count; i++) {
- tmp = ocelot_vcap_block_find_filter(block, i);
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
if (tmp->ingress_port_mask & filter->ingress_port_mask &&
+ tmp->lookup == filter->lookup &&
ocelot_vcap_is_problematic_non_mac_etype(tmp))
return false;
}
for_each_set_bit(port, &filter->ingress_port_mask,
ocelot->num_phys_ports)
- ocelot_match_all_as_mac_etype(ocelot, port, true);
+ ocelot_match_all_as_mac_etype(ocelot, port,
+ filter->lookup, true);
} else if (ocelot_vcap_is_problematic_non_mac_etype(filter)) {
/* Search for any MAC_ETYPE rules on the port */
for (i = 0; i < block->count; i++) {
- tmp = ocelot_vcap_block_find_filter(block, i);
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
if (tmp->ingress_port_mask & filter->ingress_port_mask &&
+ tmp->lookup == filter->lookup &&
ocelot_vcap_is_problematic_mac_etype(tmp))
return false;
}
for_each_set_bit(port, &filter->ingress_port_mask,
ocelot->num_phys_ports)
- ocelot_match_all_as_mac_etype(ocelot, port, false);
+ ocelot_match_all_as_mac_etype(ocelot, port,
+ filter->lookup, false);
}
return true;
@@ -863,12 +1121,12 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter,
struct netlink_ext_ack *extack)
{
- struct ocelot_vcap_block *block = &ocelot->block;
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
int i, index;
if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) {
NL_SET_ERR_MSG_MOD(extack,
- "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules");
+ "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules, use the other IS2 lookup");
return -EBUSY;
}
@@ -877,17 +1135,19 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
/* Get the index of the inserted filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
+ if (index < 0)
+ return index;
/* Move down the rules to make place for the new filter */
for (i = block->count - 1; i > index; i--) {
struct ocelot_vcap_filter *tmp;
- tmp = ocelot_vcap_block_find_filter(block, i);
- is2_entry_set(ocelot, i, tmp);
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ vcap_entry_set(ocelot, i, tmp);
}
/* Now insert the new filter */
- is2_entry_set(ocelot, index, filter);
+ vcap_entry_set(ocelot, index, filter);
return 0;
}
@@ -901,9 +1161,10 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
list_for_each_safe(pos, q, &block->rules) {
tmp = list_entry(pos, struct ocelot_vcap_filter, list);
if (tmp->id == filter->id) {
- if (tmp->action == OCELOT_VCAP_ACTION_POLICE)
+ if (tmp->block_id == VCAP_IS2 &&
+ tmp->action.police_ena)
ocelot_vcap_policer_del(ocelot, block,
- tmp->pol_ix);
+ tmp->action.pol_ix);
list_del(pos);
kfree(tmp);
@@ -916,7 +1177,7 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_vcap_block *block = &ocelot->block;
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
struct ocelot_vcap_filter del_filter;
int i, index;
@@ -924,6 +1185,8 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
/* Gets index of the filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
+ if (index < 0)
+ return index;
/* Delete filter */
ocelot_vcap_block_remove_filter(ocelot, block, filter);
@@ -932,12 +1195,12 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
for (i = index; i < block->count; i++) {
struct ocelot_vcap_filter *tmp;
- tmp = ocelot_vcap_block_find_filter(block, i);
- is2_entry_set(ocelot, i, tmp);
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ vcap_entry_set(ocelot, i, tmp);
}
/* Now delete the last filter, because it is duplicated */
- is2_entry_set(ocelot, block->count, &del_filter);
+ vcap_entry_set(ocelot, block->count, &del_filter);
return 0;
}
@@ -945,37 +1208,115 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_vcap_block *block = &ocelot->block;
- struct ocelot_vcap_filter *tmp;
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
+ struct ocelot_vcap_filter tmp;
int index;
index = ocelot_vcap_block_get_filter_index(block, filter);
- is2_entry_get(ocelot, filter, index);
+ if (index < 0)
+ return index;
+
+ vcap_entry_get(ocelot, index, filter);
/* After we get the result we need to clear the counters */
- tmp = ocelot_vcap_block_find_filter(block, index);
- tmp->stats.pkts = 0;
- is2_entry_set(ocelot, index, tmp);
+ tmp = *filter;
+ tmp.stats.pkts = 0;
+ vcap_entry_set(ocelot, index, &tmp);
return 0;
}
-int ocelot_vcap_init(struct ocelot *ocelot)
+static void ocelot_vcap_init_one(struct ocelot *ocelot,
+ const struct vcap_props *vcap)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
- struct ocelot_vcap_block *block = &ocelot->block;
struct vcap_data data;
memset(&data, 0, sizeof(data));
- vcap_entry2cache(ocelot, &data);
- ocelot_write(ocelot, vcap_is2->entry_count, S2_CORE_MV_CFG);
- vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY);
+ vcap_entry2cache(ocelot, vcap, &data);
+ ocelot_target_write(ocelot, vcap->target, vcap->entry_count,
+ VCAP_CORE_MV_CFG);
+ vcap_cmd(ocelot, vcap, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY);
- vcap_action2cache(ocelot, &data);
- ocelot_write(ocelot, vcap_is2->action_count, S2_CORE_MV_CFG);
- vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE,
+ vcap_action2cache(ocelot, vcap, &data);
+ ocelot_target_write(ocelot, vcap->target, vcap->action_count,
+ VCAP_CORE_MV_CFG);
+ vcap_cmd(ocelot, vcap, 0, VCAP_CMD_INITIALIZE,
VCAP_SEL_ACTION | VCAP_SEL_COUNTER);
+}
+
+static void ocelot_vcap_detect_constants(struct ocelot *ocelot,
+ struct vcap_props *vcap)
+{
+ int counter_memory_width;
+ int num_default_actions;
+ int version;
+
+ version = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_VCAP_VER);
+ /* Only version 0 VCAP supported for now */
+ if (WARN_ON(version != 0))
+ return;
+
+ /* Width in bits of type-group field */
+ vcap->tg_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_TG_WIDTH);
+ /* Number of subwords per TCAM row */
+ vcap->sw_count = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_SWCNT);
+ /* Number of rows in TCAM. There can be this many full keys, or double
+ * this number half keys, or 4 times this number quarter keys.
+ */
+ vcap->entry_count = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_CNT);
+ /* Assuming there are 4 subwords per TCAM row, their layout in the
+ * actual TCAM (not in the cache) would be:
+ *
+ * | SW 3 | TG 3 | SW 2 | TG 2 | SW 1 | TG 1 | SW 0 | TG 0 |
+ *
+ * (where SW=subword and TG=Type-Group).
+ *
+ * What VCAP_CONST_ENTRY_CNT is giving us is the width of one full TCAM
+ * row. But when software accesses the TCAM through the cache
+ * registers, the Type-Group values are written through another set of
+ * registers VCAP_TG_DAT, and therefore, it appears as though the 4
+ * subwords are contiguous in the cache memory.
+ * Important mention: regardless of the number of key entries per row
+ * (and therefore of key size: 1 full key or 2 half keys or 4 quarter
+ * keys), software always has to configure 4 Type-Group values. For
+ * example, in the case of 1 full key, the driver needs to set all 4
+ * Type-Group to be full key.
+ *
+ * For this reason, we need to fix up the value that the hardware is
+ * giving us. We don't actually care about the width of the entry in
+ * the TCAM. What we care about is the width of the entry in the cache
+ * registers, which is how we get to interact with it. And since the
+ * VCAP_ENTRY_DAT cache registers access only the subwords and not the
+ * Type-Groups, this means we need to subtract the width of the
+ * Type-Groups when packing and unpacking key entry data in a TCAM row.
+ */
+ vcap->entry_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_WIDTH);
+ vcap->entry_width -= vcap->tg_width * vcap->sw_count;
+ num_default_actions = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ACTION_DEF_CNT);
+ vcap->action_count = vcap->entry_count + num_default_actions;
+ vcap->action_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ACTION_WIDTH);
+ /* The width of the counter memory, this is the complete width of all
+ * counter-fields associated with one full-word entry. There is one
+ * counter per entry sub-word (see CAP_CORE::ENTRY_SWCNT for number of
+ * subwords.)
+ */
+ vcap->counter_words = vcap->sw_count;
+ counter_memory_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_CNT_WIDTH);
+ vcap->counter_width = counter_memory_width / vcap->counter_words;
+}
+
+int ocelot_vcap_init(struct ocelot *ocelot)
+{
+ int i;
/* Create a policer that will drop the frames for the cpu.
* This policer will be used as action in the acl rules to drop
@@ -992,9 +1333,18 @@ int ocelot_vcap_init(struct ocelot *ocelot)
ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE,
OCELOT_POLICER_DISCARD);
- block->pol_lpr = OCELOT_POLICER_DISCARD - 1;
+ for (i = 0; i < OCELOT_NUM_VCAP_BLOCKS; i++) {
+ struct ocelot_vcap_block *block = &ocelot->block[i];
+ struct vcap_props *vcap = &ocelot->vcap[i];
+
+ INIT_LIST_HEAD(&block->rules);
+ block->pol_lpr = OCELOT_POLICER_DISCARD - 1;
+
+ ocelot_vcap_detect_constants(ocelot, vcap);
+ ocelot_vcap_init_one(ocelot, vcap);
+ }
- INIT_LIST_HEAD(&ocelot->block.rules);
+ INIT_LIST_HEAD(&ocelot->dummy_rules);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h
index 0dfbfc011b2e..82fd10581a14 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.h
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.h
@@ -11,6 +11,8 @@
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
+#define OCELOT_POLICER_DISCARD 0x17f
+
struct ocelot_ipv4 {
u8 addr[4];
};
@@ -76,6 +78,11 @@ struct ocelot_vcap_udp_tcp {
u16 mask;
};
+struct ocelot_vcap_port {
+ u8 value;
+ u8 mask;
+};
+
enum ocelot_vcap_key_type {
OCELOT_VCAP_KEY_ANY,
OCELOT_VCAP_KEY_ETYPE,
@@ -158,6 +165,7 @@ struct ocelot_vcap_key_ipv4 {
struct ocelot_vcap_key_ipv6 {
struct ocelot_vcap_u8 proto; /* IPv6 protocol */
struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */
+ struct ocelot_vcap_u128 dip; /* IPv6 destination (byte 0-7 ignored) */
enum ocelot_vcap_bit ttl; /* TTL zero */
struct ocelot_vcap_u8 ds;
struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
@@ -174,10 +182,71 @@ struct ocelot_vcap_key_ipv6 {
enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
};
-enum ocelot_vcap_action {
- OCELOT_VCAP_ACTION_DROP,
- OCELOT_VCAP_ACTION_TRAP,
- OCELOT_VCAP_ACTION_POLICE,
+enum ocelot_mask_mode {
+ OCELOT_MASK_MODE_NONE,
+ OCELOT_MASK_MODE_PERMIT_DENY,
+ OCELOT_MASK_MODE_POLICY,
+ OCELOT_MASK_MODE_REDIRECT,
+};
+
+enum ocelot_es0_tag {
+ OCELOT_NO_ES0_TAG,
+ OCELOT_ES0_TAG,
+ OCELOT_FORCE_PORT_TAG,
+ OCELOT_FORCE_UNTAG,
+};
+
+enum ocelot_tag_tpid_sel {
+ OCELOT_TAG_TPID_SEL_8021Q,
+ OCELOT_TAG_TPID_SEL_8021AD,
+};
+
+struct ocelot_vcap_action {
+ union {
+ /* VCAP ES0 */
+ struct {
+ enum ocelot_es0_tag push_outer_tag;
+ enum ocelot_es0_tag push_inner_tag;
+ enum ocelot_tag_tpid_sel tag_a_tpid_sel;
+ int tag_a_vid_sel;
+ int tag_a_pcp_sel;
+ u16 vid_a_val;
+ u8 pcp_a_val;
+ u8 dei_a_val;
+ enum ocelot_tag_tpid_sel tag_b_tpid_sel;
+ int tag_b_vid_sel;
+ int tag_b_pcp_sel;
+ u16 vid_b_val;
+ u8 pcp_b_val;
+ u8 dei_b_val;
+ };
+
+ /* VCAP IS1 */
+ struct {
+ bool vid_replace_ena;
+ u16 vid;
+ bool vlan_pop_cnt_ena;
+ int vlan_pop_cnt;
+ bool pcp_dei_ena;
+ u8 pcp;
+ u8 dei;
+ bool qos_ena;
+ u8 qos_val;
+ u8 pag_override_mask;
+ u8 pag_val;
+ };
+
+ /* VCAP IS2 */
+ struct {
+ bool cpu_copy_ena;
+ u8 cpu_qu_num;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ bool police_ena;
+ struct ocelot_policer pol;
+ u32 pol_ix;
+ };
+ };
};
struct ocelot_vcap_stats {
@@ -186,15 +255,30 @@ struct ocelot_vcap_stats {
u64 used;
};
+enum ocelot_vcap_filter_type {
+ OCELOT_VCAP_FILTER_DUMMY,
+ OCELOT_VCAP_FILTER_PAG,
+ OCELOT_VCAP_FILTER_OFFLOAD,
+};
+
struct ocelot_vcap_filter {
struct list_head list;
+ enum ocelot_vcap_filter_type type;
+ int block_id;
+ int goto_target;
+ int lookup;
+ u8 pag;
u16 prio;
u32 id;
- enum ocelot_vcap_action action;
+ struct ocelot_vcap_action action;
struct ocelot_vcap_stats stats;
+ /* For VCAP IS1 and IS2 */
unsigned long ingress_port_mask;
+ /* For VCAP ES0 */
+ struct ocelot_vcap_port ingress_port;
+ struct ocelot_vcap_port egress_port;
enum ocelot_vcap_bit dmac_mc;
enum ocelot_vcap_bit dmac_bc;
@@ -210,8 +294,6 @@ struct ocelot_vcap_filter {
struct ocelot_vcap_key_ipv4 ipv4;
struct ocelot_vcap_key_ipv6 ipv6;
} key;
- struct ocelot_policer pol;
- u32 pol_ix;
};
int ocelot_vcap_filter_add(struct ocelot *ocelot,
@@ -221,7 +303,10 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule);
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule);
+struct ocelot_vcap_filter *
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id);
+void ocelot_detect_vcap_constants(struct ocelot *ocelot);
int ocelot_vcap_init(struct ocelot *ocelot);
int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 8a6917691ba6..dc00772950e5 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -19,10 +19,6 @@
#include "ocelot.h"
#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
-#define VSC7514_VCAP_IS2_CNT 64
-#define VSC7514_VCAP_IS2_ENTRY_WIDTH 376
-#define VSC7514_VCAP_IS2_ACTION_WIDTH 99
-#define VSC7514_VCAP_PORT_CNT 11
static const u32 ocelot_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
@@ -241,14 +237,27 @@ static const u32 ocelot_sys_regmap[] = {
REG(SYS_PTP_CFG, 0x0006c4),
};
-static const u32 ocelot_s2_regmap[] = {
- REG(S2_CORE_UPDATE_CTRL, 0x000000),
- REG(S2_CORE_MV_CFG, 0x000004),
- REG(S2_CACHE_ENTRY_DAT, 0x000008),
- REG(S2_CACHE_MASK_DAT, 0x000108),
- REG(S2_CACHE_ACTION_DAT, 0x000208),
- REG(S2_CACHE_CNT_DAT, 0x000308),
- REG(S2_CACHE_TG_DAT, 0x000388),
+static const u32 ocelot_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG(VCAP_CONST_CORE_CNT, 0x0003b8),
+ REG(VCAP_CONST_IF_CNT, 0x0003bc),
};
static const u32 ocelot_ptp_regmap[] = {
@@ -311,7 +320,9 @@ static const u32 *ocelot_regmap[TARGET_MAX] = {
[QSYS] = ocelot_qsys_regmap,
[REW] = ocelot_rew_regmap,
[SYS] = ocelot_sys_regmap,
- [S2] = ocelot_s2_regmap,
+ [S0] = ocelot_vcap_regmap,
+ [S1] = ocelot_vcap_regmap,
+ [S2] = ocelot_vcap_regmap,
[PTP] = ocelot_ptp_regmap,
[DEV_GMII] = ocelot_dev_gmii_regmap,
};
@@ -756,6 +767,115 @@ static u16 ocelot_wm_enc(u16 value)
static const struct ocelot_ops ocelot_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
+ .port_to_netdev = ocelot_port_to_netdev,
+ .netdev_to_port = ocelot_netdev_to_port,
+};
+
+static const struct vcap_field vsc7514_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 4},
+ [VCAP_ES0_IGR_PORT] = { 4, 4},
+ [VCAP_ES0_RSV] = { 8, 2},
+ [VCAP_ES0_L2_MC] = { 10, 1},
+ [VCAP_ES0_L2_BC] = { 11, 1},
+ [VCAP_ES0_VID] = { 12, 12},
+ [VCAP_ES0_DP] = { 24, 1},
+ [VCAP_ES0_PCP] = { 25, 3},
+};
+
+static const struct vcap_field vsc7514_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
+ [VCAP_ES0_ACT_RSV] = { 49, 24},
+ [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1},
+};
+
+static const struct vcap_field vsc7514_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1},
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2},
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12},
+ [VCAP_IS1_HK_RSV] = { 15, 9},
+ [VCAP_IS1_HK_OAM_Y1731] = { 24, 1},
+ [VCAP_IS1_HK_L2_MC] = { 25, 1},
+ [VCAP_IS1_HK_L2_BC] = { 26, 1},
+ [VCAP_IS1_HK_IP_MC] = { 27, 1},
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1},
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1},
+ [VCAP_IS1_HK_TPID] = { 30, 1},
+ [VCAP_IS1_HK_VID] = { 31, 12},
+ [VCAP_IS1_HK_DEI] = { 43, 1},
+ [VCAP_IS1_HK_PCP] = { 44, 3},
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 47, 48},
+ [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1},
+ [VCAP_IS1_HK_ETYPE] = { 96, 16},
+ [VCAP_IS1_HK_IP_SNAP] = {112, 1},
+ [VCAP_IS1_HK_IP4] = {113, 1},
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = {114, 1},
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {115, 1},
+ [VCAP_IS1_HK_L3_OPTIONS] = {116, 1},
+ [VCAP_IS1_HK_L3_DSCP] = {117, 6},
+ [VCAP_IS1_HK_L3_IP4_SIP] = {123, 32},
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = {155, 1},
+ [VCAP_IS1_HK_TCP] = {156, 1},
+ [VCAP_IS1_HK_L4_SPORT] = {157, 16},
+ [VCAP_IS1_HK_L4_RNG] = {173, 8},
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1},
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12},
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1},
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3},
+ [VCAP_IS1_HK_IP4_IP4] = { 64, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1},
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1},
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6},
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32},
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {106, 32},
+ [VCAP_IS1_HK_IP4_L3_PROTO] = {138, 8},
+ [VCAP_IS1_HK_IP4_TCP_UDP] = {146, 1},
+ [VCAP_IS1_HK_IP4_TCP] = {147, 1},
+ [VCAP_IS1_HK_IP4_L4_RNG] = {148, 8},
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {156, 32},
+};
+
+static const struct vcap_field vsc7514_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
+ [VCAP_IS1_ACT_RSV] = { 29, 9},
+ /* The fields below are incorrectly shifted by 2 in the manual */
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1},
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12},
+ [VCAP_IS1_ACT_FID_SEL] = { 51, 2},
+ [VCAP_IS1_ACT_FID_VAL] = { 53, 13},
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1},
+ [VCAP_IS1_ACT_PCP_VAL] = { 67, 3},
+ [VCAP_IS1_ACT_DEI_VAL] = { 70, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2},
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4},
+ [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1},
};
static const struct vcap_field vsc7514_vcap_is2_keys[] = {
@@ -856,15 +976,32 @@ static const struct vcap_field vsc7514_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
};
-static const struct vcap_props vsc7514_vcap_props[] = {
+static struct vcap_props vsc7514_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 73, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc7514_vcap_es0_keys,
+ .actions = vsc7514_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 78, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc7514_vcap_is1_keys,
+ .actions = vsc7514_vcap_is1_actions,
+ },
[VCAP_IS2] = {
- .tg_width = 2,
- .sw_count = 4,
- .entry_count = VSC7514_VCAP_IS2_CNT,
- .entry_width = VSC7514_VCAP_IS2_ENTRY_WIDTH,
- .action_count = VSC7514_VCAP_IS2_CNT +
- VSC7514_VCAP_PORT_CNT + 2,
- .action_width = 99,
.action_type_width = 1,
.action_table = {
[IS2_ACTION_TYPE_NORMAL] = {
@@ -876,8 +1013,9 @@ static const struct vcap_props vsc7514_vcap_props[] = {
.count = 4
},
},
- .counter_words = 4,
- .counter_width = 32,
+ .target = S2,
+ .keys = vsc7514_vcap_is2_keys,
+ .actions = vsc7514_vcap_is2_actions,
},
};
@@ -932,10 +1070,6 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
if (!ocelot->ports)
return -ENOMEM;
- /* No NPI port */
- ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
- OCELOT_TAG_PREFIX_NONE);
-
for_each_available_child_of_node(ports, portnp) {
struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
@@ -1043,6 +1177,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ QSYS, "qsys" },
{ ANA, "ana" },
{ QS, "qs" },
+ { S0, "s0" },
+ { S1, "s1" },
{ S2, "s2" },
{ PTP, "ptp", 1 },
};
@@ -1119,9 +1255,10 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->num_phys_ports = of_get_child_count(ports);
- ocelot->vcap_is2_keys = vsc7514_vcap_is2_keys;
- ocelot->vcap_is2_actions = vsc7514_vcap_is2_actions;
ocelot->vcap = vsc7514_vcap_props;
+ ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;
+ ocelot->xtr_prefix = OCELOT_TAG_PREFIX_NONE;
+ ocelot->npi = -1;
err = ocelot_init(ocelot);
if (err)
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 4a5beafa0493..1634ca6d4a8f 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3543,11 +3543,10 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
}
- napi_hash_del(&ss->napi);
- netif_napi_del(&ss->napi);
+ __netif_napi_del(&ss->napi);
}
/* Wait till napi structs are no longer used, and then free ss. */
- synchronize_rcu();
+ synchronize_net();
kfree(mgp->ss);
mgp->ss = NULL;
}
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 3de8430ee8c5..b81e1487945c 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -1916,9 +1916,9 @@ static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
static int alloc_ring(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
- np->rx_ring = pci_alloc_consistent(np->pci_dev,
- sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
- &np->ring_dma);
+ np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ &np->ring_dma, GFP_KERNEL);
if (!np->rx_ring)
return -ENOMEM;
np->tx_ring = &np->rx_ring[RX_RING_SIZE];
@@ -1939,10 +1939,10 @@ static void refill_rx(struct net_device *dev)
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- np->rx_dma[entry] = pci_map_single(np->pci_dev,
- skb->data, buflen, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->rx_dma[entry])) {
+ np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev,
+ skb->data, buflen,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) {
dev_kfree_skb_any(skb);
np->rx_skbuff[entry] = NULL;
break; /* Better luck next round. */
@@ -2013,9 +2013,8 @@ static void drain_tx(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++) {
if (np->tx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->tx_dma[i], np->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i],
+ np->tx_skbuff[i]->len, DMA_TO_DEVICE);
dev_kfree_skb(np->tx_skbuff[i]);
dev->stats.tx_dropped++;
}
@@ -2034,9 +2033,9 @@ static void drain_rx(struct net_device *dev)
np->rx_ring[i].cmd_status = 0;
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev, np->rx_dma[i],
- buflen + NATSEMI_PADDING,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i],
+ buflen + NATSEMI_PADDING,
+ DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_skbuff[i]);
}
np->rx_skbuff[i] = NULL;
@@ -2052,9 +2051,9 @@ static void drain_ring(struct net_device *dev)
static void free_ring(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
- pci_free_consistent(np->pci_dev,
- sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
- np->rx_ring, np->ring_dma);
+ dma_free_coherent(&np->pci_dev->dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ np->rx_ring, np->ring_dma);
}
static void reinit_rx(struct net_device *dev)
@@ -2101,9 +2100,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
entry = np->cur_tx % TX_RING_SIZE;
np->tx_skbuff[entry] = skb;
- np->tx_dma[entry] = pci_map_single(np->pci_dev,
- skb->data,skb->len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) {
+ np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) {
np->tx_skbuff[entry] = NULL;
dev_kfree_skb_irq(skb);
dev->stats.tx_dropped++;
@@ -2169,9 +2168,8 @@ static void netdev_tx_done(struct net_device *dev)
dev->stats.tx_window_errors++;
dev->stats.tx_errors++;
}
- pci_unmap_single(np->pci_dev,np->tx_dma[entry],
- np->tx_skbuff[entry]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry],
+ np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
/* Free the original skb. */
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
@@ -2359,21 +2357,22 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
(skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
/* 16 byte align the IP header */
skb_reserve(skb, RX_OFFSET);
- pci_dma_sync_single_for_cpu(np->pci_dev,
- np->rx_dma[entry],
- buflen,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ np->rx_dma[entry],
+ buflen,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb,
np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(np->pci_dev,
- np->rx_dma[entry],
- buflen,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ np->rx_dma[entry],
+ buflen,
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(np->pci_dev, np->rx_dma[entry],
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_dma[entry],
buflen + NATSEMI_PADDING,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
}
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 8e24c7acf79b..72794d158871 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -526,8 +526,8 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;
- buf = pci_map_single(dev->pci_dev, skb->data,
- REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ buf = dma_map_single(&dev->pci_dev->dev, skb->data, REAL_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
build_rx_desc(dev, sg, 0, buf, cmdsts, 0);
/* update link of previous rx */
if (likely(next_empty != dev->rx_info.next_rx))
@@ -600,12 +600,14 @@ static void phy_intr(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
static const char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" };
u32 cfg, new_cfg;
- u32 tbisr, tanar, tanlpar;
+ u32 tanar, tanlpar;
int speed, fullduplex, newlinkstate;
cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
if (dev->CFG_cache & CFG_TBI_EN) {
+ u32 __maybe_unused tbisr;
+
/* we have an optical transceiver */
tbisr = readl(dev->base + TBISR);
tanar = readl(dev->base + TANAR);
@@ -858,8 +860,8 @@ static void rx_irq(struct net_device *ndev)
mb();
clear_rx_desc(dev, next_rx);
- pci_unmap_single(dev->pci_dev, bufptr,
- RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&dev->pci_dev->dev, bufptr, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
len = cmdsts & CMDSTS_LEN_MASK;
#ifdef NS83820_VLAN_ACCEL_SUPPORT
/* NH: As was mentioned below, this chip is kinda
@@ -923,10 +925,10 @@ out:
spin_unlock_irqrestore(&info->lock, flags);
}
-static void rx_action(unsigned long _dev)
+static void rx_action(struct tasklet_struct *t)
{
- struct net_device *ndev = (void *)_dev;
- struct ns83820 *dev = PRIV(ndev);
+ struct ns83820 *dev = from_tasklet(dev, t, rx_tasklet);
+ struct net_device *ndev = dev->ndev;
rx_irq(ndev);
writel(ihr, dev->base + IHR);
@@ -985,17 +987,13 @@ static void do_tx_done(struct net_device *ndev)
len = cmdsts & CMDSTS_LEN_MASK;
addr = desc_addr_get(desc + DESC_BUFPTR);
if (skb) {
- pci_unmap_single(dev->pci_dev,
- addr,
- len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&dev->pci_dev->dev, addr, len,
+ DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
atomic_dec(&dev->nr_tx_skbs);
} else
- pci_unmap_page(dev->pci_dev,
- addr,
- len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&dev->pci_dev->dev, addr, len,
+ DMA_TO_DEVICE);
tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC;
dev->tx_done_idx = tx_done_idx;
@@ -1023,10 +1021,10 @@ static void ns83820_cleanup_tx(struct ns83820 *dev)
dev->tx_skbs[i] = NULL;
if (skb) {
__le32 *desc = dev->tx_descs + (i * DESC_SIZE);
- pci_unmap_single(dev->pci_dev,
- desc_addr_get(desc + DESC_BUFPTR),
- le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&dev->pci_dev->dev,
+ desc_addr_get(desc + DESC_BUFPTR),
+ le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,
+ DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
atomic_dec(&dev->nr_tx_skbs);
}
@@ -1121,7 +1119,8 @@ again:
len = skb->len;
if (nr_frags)
len -= skb->data_len;
- buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+ buf = dma_map_single(&dev->pci_dev->dev, skb->data, len,
+ DMA_TO_DEVICE);
first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
@@ -1207,7 +1206,7 @@ static int ns83820_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd)
{
struct ns83820 *dev = PRIV(ndev);
- u32 cfg, tanar, tbicr;
+ u32 cfg, tbicr;
int fullduplex = 0;
u32 supported;
@@ -1226,7 +1225,7 @@ static int ns83820_get_link_ksettings(struct net_device *ndev,
/* read current configuration */
cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
- tanar = readl(dev->base + TANAR);
+ readl(dev->base + TANAR);
tbicr = readl(dev->base + TBICR);
fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
@@ -1902,12 +1901,12 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
/* See if we can set the dma mask early on; failure is fatal. */
if (sizeof(dma_addr_t) == 8 &&
- !pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
+ !dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64))) {
using_dac = 1;
- } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
using_dac = 0;
} else {
- dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n");
+ dev_warn(&pci_dev->dev, "dma_set_mask failed!\n");
return -ENODEV;
}
@@ -1927,7 +1926,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
SET_NETDEV_DEV(ndev, &pci_dev->dev);
INIT_WORK(&dev->tq_refill, queue_refill);
- tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
+ tasklet_setup(&dev->rx_tasklet, rx_action);
err = pci_enable_device(pci_dev);
if (err) {
@@ -1938,10 +1937,12 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
addr = pci_resource_start(pci_dev, 1);
dev->base = ioremap(addr, PAGE_SIZE);
- dev->tx_descs = pci_alloc_consistent(pci_dev,
- 4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs);
- dev->rx_info.descs = pci_alloc_consistent(pci_dev,
- 4 * DESC_SIZE * NR_RX_DESC, &dev->rx_info.phy_descs);
+ dev->tx_descs = dma_alloc_coherent(&pci_dev->dev,
+ 4 * DESC_SIZE * NR_TX_DESC,
+ &dev->tx_phy_descs, GFP_KERNEL);
+ dev->rx_info.descs = dma_alloc_coherent(&pci_dev->dev,
+ 4 * DESC_SIZE * NR_RX_DESC,
+ &dev->rx_info.phy_descs, GFP_KERNEL);
err = -ENOMEM;
if (!dev->base || !dev->tx_descs || !dev->rx_info.descs)
goto out_disable;
@@ -2183,8 +2184,10 @@ out_free_irq:
out_disable:
if (dev->base)
iounmap(dev->base);
- pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_TX_DESC, dev->tx_descs, dev->tx_phy_descs);
- pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_RX_DESC, dev->rx_info.descs, dev->rx_info.phy_descs);
+ dma_free_coherent(&pci_dev->dev, 4 * DESC_SIZE * NR_TX_DESC,
+ dev->tx_descs, dev->tx_phy_descs);
+ dma_free_coherent(&pci_dev->dev, 4 * DESC_SIZE * NR_RX_DESC,
+ dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(pci_dev);
out_free:
free_netdev(ndev);
@@ -2205,10 +2208,10 @@ static void ns83820_remove_one(struct pci_dev *pci_dev)
unregister_netdev(ndev);
free_irq(dev->pci_dev->irq, ndev);
iounmap(dev->base);
- pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_TX_DESC,
- dev->tx_descs, dev->tx_phy_descs);
- pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_RX_DESC,
- dev->rx_info.descs, dev->rx_info.phy_descs);
+ dma_free_coherent(&dev->pci_dev->dev, 4 * DESC_SIZE * NR_TX_DESC,
+ dev->tx_descs, dev->tx_phy_descs);
+ dma_free_coherent(&dev->pci_dev->dev, 4 * DESC_SIZE * NR_RX_DESC,
+ dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(dev->pci_dev);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index dd3605aa5f23..d17d1b4f2585 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -143,7 +143,7 @@ static int sonic_open(struct net_device *dev)
/*
* Initialize the SONIC
*/
- sonic_init(dev);
+ sonic_init(dev, true);
netif_start_queue(dev);
@@ -153,7 +153,7 @@ static int sonic_open(struct net_device *dev)
}
/* Wait for the SONIC to become idle. */
-static void sonic_quiesce(struct net_device *dev, u16 mask)
+static void sonic_quiesce(struct net_device *dev, u16 mask, bool may_sleep)
{
struct sonic_local * __maybe_unused lp = netdev_priv(dev);
int i;
@@ -163,7 +163,7 @@ static void sonic_quiesce(struct net_device *dev, u16 mask)
bits = SONIC_READ(SONIC_CMD) & mask;
if (!bits)
return;
- if (irqs_disabled() || in_interrupt())
+ if (!may_sleep)
udelay(20);
else
usleep_range(100, 200);
@@ -187,7 +187,7 @@ static int sonic_close(struct net_device *dev)
* stop the SONIC, disable interrupts
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
- sonic_quiesce(dev, SONIC_CR_ALL);
+ sonic_quiesce(dev, SONIC_CR_ALL, true);
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
@@ -229,7 +229,7 @@ static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
* disable all interrupts before releasing DMA buffers
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
- sonic_quiesce(dev, SONIC_CR_ALL);
+ sonic_quiesce(dev, SONIC_CR_ALL, false);
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
@@ -246,7 +246,7 @@ static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
}
/* Try to restart the adaptor. */
- sonic_init(dev);
+ sonic_init(dev, false);
lp->stats.tx_errors++;
netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
@@ -692,9 +692,9 @@ static void sonic_multicast_list(struct net_device *dev)
/* LCAM and TXP commands can't be used simultaneously */
spin_lock_irqsave(&lp->lock, flags);
- sonic_quiesce(dev, SONIC_CR_TXP);
+ sonic_quiesce(dev, SONIC_CR_TXP, false);
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
- sonic_quiesce(dev, SONIC_CR_LCAM);
+ sonic_quiesce(dev, SONIC_CR_LCAM, false);
spin_unlock_irqrestore(&lp->lock, flags);
}
}
@@ -708,7 +708,7 @@ static void sonic_multicast_list(struct net_device *dev)
/*
* Initialize the SONIC ethernet controller.
*/
-static int sonic_init(struct net_device *dev)
+static int sonic_init(struct net_device *dev, bool may_sleep)
{
struct sonic_local *lp = netdev_priv(dev);
int i;
@@ -730,7 +730,7 @@ static int sonic_init(struct net_device *dev)
*/
SONIC_WRITE(SONIC_CMD, 0);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
- sonic_quiesce(dev, SONIC_CR_ALL);
+ sonic_quiesce(dev, SONIC_CR_ALL, may_sleep);
/*
* initialize the receive resource area
@@ -759,7 +759,7 @@ static int sonic_init(struct net_device *dev)
netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
- sonic_quiesce(dev, SONIC_CR_RRRA);
+ sonic_quiesce(dev, SONIC_CR_RRRA, may_sleep);
/*
* Initialize the receive descriptors so that they
@@ -834,7 +834,7 @@ static int sonic_init(struct net_device *dev)
* load the CAM
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
- sonic_quiesce(dev, SONIC_CR_LCAM);
+ sonic_quiesce(dev, SONIC_CR_LCAM, may_sleep);
/*
* enable receiver, disable loopback
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index 3cbb62c860c8..a5b803eb8c8a 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -338,7 +338,7 @@ static void sonic_rx(struct net_device *dev);
static int sonic_close(struct net_device *dev);
static struct net_device_stats *sonic_get_stats(struct net_device *dev);
static void sonic_multicast_list(struct net_device *dev);
-static int sonic_init(struct net_device *dev);
+static int sonic_init(struct net_device *dev, bool may_sleep);
static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void sonic_msg_init(struct net_device *dev);
static int sonic_alloc_descriptors(struct net_device *dev);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bc94970bea45..d13d92bf7447 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -1000,7 +1000,7 @@ static void free_shared_mem(struct s2io_nic *nic)
}
}
-/**
+/*
* s2io_verify_pci_mode -
*/
@@ -1035,7 +1035,7 @@ static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
}
static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
-/**
+/*
* s2io_print_pci_mode -
*/
static int s2io_print_pci_mode(struct s2io_nic *nic)
@@ -2064,6 +2064,9 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
/**
* verify_pcc_quiescent- Checks for PCC quiescent state
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @flag: boolean controlling function path
* Return: 1 If PCC is quiescence
* 0 If PCC is not quiescence
*/
@@ -2099,6 +2102,8 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
}
/**
* verify_xena_quiescence - Checks whether the H/W is ready
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
* Description: Returns whether the H/W is ready to go or not. Depending
* on whether adapter enable bit was written or not the comparison
* differs and the calling function passes the input argument flag to
@@ -2305,6 +2310,9 @@ static int start_nic(struct s2io_nic *nic)
}
/**
* s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
+ * @fifo_data: fifo data pointer
+ * @txdlp: descriptor
+ * @get_off: unused
*/
static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
struct TxD *txdlp, int get_off)
@@ -2391,7 +2399,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
/**
* stop_nic - To stop the nic
- * @nic ; device private variable.
+ * @nic : device private variable.
* Description:
* This function does exactly the opposite of what the start_nic()
* function does. This function is called to stop the device.
@@ -2419,7 +2427,8 @@ static void stop_nic(struct s2io_nic *nic)
/**
* fill_rx_buffers - Allocates the Rx side skbs
- * @ring_info: per ring structure
+ * @nic : device private variable.
+ * @ring: per ring structure
* @from_card_up: If this is true, we will map the buffer to get
* the dma address for buf0 and buf1 to give it to the card.
* Else we will sync the already mapped buffer to give it to the card.
@@ -2864,7 +2873,7 @@ static void s2io_netpoll(struct net_device *dev)
/**
* rx_intr_handler - Rx interrupt handler
- * @ring_info: per ring structure.
+ * @ring_data: per ring structure.
* @budget: budget for napi processing.
* Description:
* If the interrupt is because of a received frame or if the
@@ -2972,7 +2981,7 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget)
/**
* tx_intr_handler - Transmit interrupt handler
- * @nic : device private variable
+ * @fifo_data : fifo data pointer
* Description:
* If an interrupt was raised to indicate DMA complete of the
* Tx packet, this function is called. It identifies the last TxD
@@ -3153,6 +3162,8 @@ static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
/**
* s2io_chk_xpak_counter - Function to check the status of the xpak counters
* @counter : counter value to be updated
+ * @regs_stat : registers status
+ * @index : index
* @flag : flag to indicate the status
* @type : counter type
* Description:
@@ -3309,8 +3320,9 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
/**
* wait_for_cmd_complete - waits for a command to complete.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @addr: address
+ * @busy_bit: bit to check for busy
+ * @bit_state: state to check
* Description: Function that waits for a command to Write into RMAC
* ADDR DATA registers to be completed and returns either success or
* error depending on whether the command was complete or not.
@@ -4335,7 +4347,7 @@ static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
/**
* s2io_handle_errors - Xframe error indication handler
- * @nic: device private variable
+ * @dev_id: opaque handle to dev
* Description: Handle alarms such as loss of link, single or
* double ECC errors, critical and serious errors.
* Return Value:
@@ -4739,7 +4751,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/**
+/*
* s2io_updt_stats -
*/
static void s2io_updt_stats(struct s2io_nic *sp)
@@ -5168,7 +5180,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
return tmp64 >> 16;
}
-/**
+/*
* s2io_set_mac_addr - driver entry point
*/
@@ -5243,8 +5255,7 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
/**
* s2io_ethtool_set_link_ksettings - Sets different link parameters.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev : pointer to netdev
* @cmd: pointer to the structure with parameters given by ethtool to set
* link information.
* Description:
@@ -5273,8 +5284,7 @@ s2io_ethtool_set_link_ksettings(struct net_device *dev,
/**
* s2io_ethtol_get_link_ksettings - Return link specific information.
- * @sp : private member of the device structure, pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @cmd : pointer to the structure with parameters given by ethtool
* to return link information.
* Description:
@@ -5313,8 +5323,7 @@ s2io_ethtool_get_link_ksettings(struct net_device *dev,
/**
* s2io_ethtool_gdrvinfo - Returns driver specific information.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @info : pointer to the structure with parameters given by ethtool to
* return driver information.
* Description:
@@ -5335,11 +5344,10 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
/**
* s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
- * @sp: private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @regs : pointer to the structure with parameters given by ethtool for
- * dumping the registers.
- * @reg_space: The input argument into which all the registers are dumped.
+ * dumping the registers.
+ * @space: The input argument into which all the registers are dumped.
* Description:
* Dumps the entire register space of xFrame NIC into the user given
* buffer area.
@@ -5471,8 +5479,7 @@ static void s2io_ethtool_gringparam(struct net_device *dev,
/**
* s2io_ethtool_getpause_data -Pause frame frame generation and reception.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
* Returns the Pause frame generation and reception capability of the NIC.
@@ -5496,8 +5503,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
/**
* s2io_ethtool_setpause_data - set/reset pause frame generation.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
* It can be used to set or reset Pause frame generation or reception
@@ -5526,6 +5532,7 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
return 0;
}
+#define S2IO_DEV_ID 5
/**
* read_eeprom - reads 4 bytes of data from user given offset.
* @sp : private member of the device structure, which is a pointer to the
@@ -5541,8 +5548,6 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
* Return value:
* -1 on failure and 0 on success.
*/
-
-#define S2IO_DEV_ID 5
static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
{
int ret = -1;
@@ -5734,8 +5739,7 @@ static void s2io_vpd_read(struct s2io_nic *nic)
/**
* s2io_ethtool_geeprom - reads the value stored in the Eeprom.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
* @data_buf : user defined value to be written into Eeprom.
@@ -5771,11 +5775,10 @@ static int s2io_ethtool_geeprom(struct net_device *dev,
/**
* s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
- * @data_buf ; user defined value to be written into Eeprom.
+ * @data_buf : user defined value to be written into Eeprom.
* Description:
* Tries to write the user provided value in the Eeprom, at the offset
* given by the user.
@@ -6027,7 +6030,7 @@ static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
/**
* s2io_link_test - verifies the link state of the nic
- * @sp ; private member of the device structure, which is a pointer to the
+ * @sp: private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data: variable that returns the result of each of the test conducted by
* the driver.
@@ -6150,8 +6153,7 @@ static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
/**
* s2io_ethtool_test - conducts 6 tsets to determine the health of card.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @ethtest : pointer to a ethtool command specific structure that will be
* returned to the user.
* @data : variable that returns the result of each of the test
@@ -6597,7 +6599,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
/**
* s2io_ioctl - Entry point for the Ioctl
* @dev : Device pointer.
- * @ifr : An IOCTL specefic structure, that can contain a pointer to
+ * @rq : An IOCTL specefic structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
* @cmd : This is used to distinguish between the different commands that
* can be passed to the IOCTL functions.
@@ -6650,7 +6652,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
/**
* s2io_set_link - Set the LInk status
- * @data: long pointer to device private structue
+ * @work: work struct containing a pointer to device private structue
* Description: Sets the link status for the adapter
*/
@@ -7187,7 +7189,7 @@ static int s2io_card_up(struct s2io_nic *sp)
/**
* s2io_restart_nic - Resets the NIC.
- * @data : long pointer to the device private structure
+ * @work : work struct containing a pointer to the device private structure
* Description:
* This function is scheduled to be run by the s2io_tx_watchdog
* function after 0.5 secs to reset the NIC. The idea is to reduce
@@ -7218,6 +7220,7 @@ out_unlock:
/**
* s2io_tx_watchdog - Watchdog for transmit side.
* @dev : Pointer to net device structure
+ * @txqueue: index of the hanging queue
* Description:
* This function is triggered if the Tx Queue is stopped
* for a pre-defined amount of time when the Interface is still up.
@@ -7242,11 +7245,8 @@ static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
/**
* rx_osm_handler - To perform some OS related operations on SKB.
- * @sp: private member of the device structure,pointer to s2io_nic structure.
- * @skb : the socket buffer pointer.
- * @len : length of the packet
- * @cksum : FCS checksum of the frame.
- * @ring_no : the ring from which this RxD was extracted.
+ * @ring_data : the ring from which this RxD was extracted.
+ * @rxdp: descriptor
* Description:
* This function is called by the Rx interrupt serivce routine to perform
* some OS related operations on the SKB before passing it to the upper
@@ -7576,9 +7576,10 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
}
/**
- * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
- * or Traffic class respectively.
+ * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
* @nic: device private variable
+ * @ds_codepoint: data
+ * @ring: ring index
* Description: The function configures the receive steering to
* desired receive ring.
* Return Value: SUCCESS on success and
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 78eba10300ae..f5d48d7c4ce2 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -988,6 +988,9 @@ exit:
/**
* vxge_hw_device_hw_info_get - Get the hw information
+ * @bar0: the bar
+ * @hw_info: the hw_info struct
+ *
* Returns the vpath mask that has the bits set for each vpath allocated
* for the driver, FW version information, and the first mac address for
* each vpath
@@ -2303,16 +2306,9 @@ exit:
static inline void
vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
{
- gfp_t flags;
void *vaddr;
- if (in_interrupt())
- flags = GFP_ATOMIC | GFP_DMA;
- else
- flags = GFP_KERNEL | GFP_DMA;
-
- vaddr = kmalloc((size), flags);
-
+ vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
}
@@ -3926,7 +3922,7 @@ exit:
/**
* vxge_hw_vpath_check_leak - Check for memory leak
- * @ringh: Handle to the ring object used for receive
+ * @ring: Handle to the ring object used for receive
*
* If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
* PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 373165119850..0cd0750484ae 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -1899,18 +1899,13 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
struct pci_dev **p_dmah,
struct pci_dev **p_dma_acch)
{
- gfp_t flags;
void *vaddr;
unsigned long misaligned = 0;
int realloc_flag = 0;
*p_dma_acch = *p_dmah = NULL;
- if (in_interrupt())
- flags = GFP_ATOMIC | GFP_DMA;
- else
- flags = GFP_KERNEL | GFP_DMA;
realloc:
- vaddr = kmalloc((size), flags);
+ vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
if (vaddr == NULL)
return vaddr;
misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index 03c3d1230c17..4d91026485ae 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -119,7 +119,7 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
* @dev: device pointer.
* @regs: pointer to the structure with parameters given by ethtool for
* dumping the registers.
- * @reg_space: The input argument into which all the registers are dumped.
+ * @space: The input argument into which all the registers are dumped.
*
* Dumps the vpath register space of Titan NIC into the user given
* buffer area.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1ded4e275086..87892bd992b1 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1275,6 +1275,7 @@ _set_all_mcast:
/**
* vxge_set_mac_addr
* @dev: pointer to the device structure
+ * @p: socket info
*
* Update entry "0" (default MAC addr)
*/
@@ -1799,7 +1800,7 @@ static void vxge_reset(struct work_struct *work)
/**
* vxge_poll - Receive handler when Receive Polling is used.
- * @dev: pointer to the device structure.
+ * @napi: pointer to the napi structure.
* @budget: Number of packets budgeted to be processed in this iteration.
*
* This function comes into picture only if Receive side is being handled
@@ -3096,7 +3097,7 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
/**
* vxge_get_stats64
* @dev: pointer to the device structure
- * @stats: pointer to struct rtnl_link_stats64
+ * @net_stats: pointer to struct rtnl_link_stats64
*
*/
static void
@@ -3245,7 +3246,7 @@ static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
/**
* vxge_ioctl
* @dev: Device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * @rq: An IOCTL specific structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
* @cmd: This is used to distinguish between the different commands that
* can be passed to the IOCTL functions.
@@ -3269,6 +3270,7 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
/**
* vxge_tx_watchdog
* @dev: pointer to net device structure
+ * @txqueue: index of the hanging queue
*
* Watchdog for transmit side.
* This function is triggered if the Tx Queue is stopped
@@ -4002,6 +4004,7 @@ static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
/**
* vxge_pm_suspend - vxge power management suspend entry point
+ * @dev_d: device pointer
*
*/
static int __maybe_unused vxge_pm_suspend(struct device *dev_d)
@@ -4010,6 +4013,7 @@ static int __maybe_unused vxge_pm_suspend(struct device *dev_d)
}
/**
* vxge_pm_resume - vxge power management resume entry point
+ * @dev_d: device pointer
*
*/
static int __maybe_unused vxge_pm_resume(struct device *dev_d)
@@ -4539,7 +4543,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
* due to the fact that HWTS is using the FCS as the location of the
* timestamp. The HW FCS checking will still correctly determine if
* there is a valid checksum, and the FCS is being removed by the driver
- * anyway. So no fucntionality is being lost. Since it is always
+ * anyway. So no functionality is being lost. Since it is always
* enabled, we now simply use the ioctl call to set whether or not the
* driver should be paying attention to the HWTS.
*/
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 709d20d9938f..ee164970b267 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -30,8 +30,6 @@
*/
enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
{
- u64 val64;
-
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -84,7 +82,7 @@ enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->xgmac_vp_int_status);
- val64 = readq(&vp_reg->vpath_general_int_status);
+ readq(&vp_reg->vpath_general_int_status);
/* Mask unwanted interrupts */
@@ -157,8 +155,6 @@ exit:
enum vxge_hw_status vxge_hw_vpath_intr_disable(
struct __vxge_hw_vpath_handle *vp)
{
- u64 val64;
-
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -179,8 +175,6 @@ enum vxge_hw_status vxge_hw_vpath_intr_disable(
(u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_general_int_mask);
- val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
-
writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
@@ -284,7 +278,7 @@ void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
/**
* vxge_hw_channel_msix_mask - Mask MSIX Vector.
- * @channeh: Channel for rx or tx handle
+ * @channel: Channel for rx or tx handle
* @msix_id: MSIX ID
*
* The function masks the msix interrupt for the given msix_id
@@ -301,7 +295,7 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
/**
* vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
- * @channeh: Channel for rx or tx handle
+ * @channel: Channel for rx or tx handle
* @msix_id: MSI ID
*
* The function unmasks the msix interrupt for the given msix_id
@@ -356,8 +350,6 @@ u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
/**
* vxge_hw_device_intr_enable - Enable interrupts.
* @hldev: HW device handle.
- * @op: One of the enum vxge_hw_device_intr enumerated values specifying
- * the type(s) of interrupts to enable.
*
* Enable Titan interrupts. The function is to be executed the last in
* Titan initialization sequence.
@@ -411,8 +403,6 @@ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
/**
* vxge_hw_device_intr_disable - Disable Titan interrupts.
* @hldev: HW device handle.
- * @op: One of the enum vxge_hw_device_intr enumerated values specifying
- * the type(s) of interrupts to disable.
*
* Disable Titan interrupts.
*
@@ -487,9 +477,7 @@ void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
*/
void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
{
- u32 val32;
-
- val32 = readl(&hldev->common_reg->titan_general_int_status);
+ readl(&hldev->common_reg->titan_general_int_status);
}
/**
@@ -1414,7 +1402,7 @@ u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
/**
* vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
- * @fifoh: Handle to the fifo object used for non offload send
+ * @fifo: Handle to the fifo object used for non offload send
* @txdlh: Reserved descriptor. On success HW fills this "out" parameter
* with a valid handle.
* @txdl_priv: Buffer to return the pointer to per txdl space
@@ -1525,8 +1513,6 @@ void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
* vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
* @fifo: Handle to the fifo object used for non offload send
* @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
- * @frags: Number of contiguous buffers that are part of a single
- * transmit operation.
*
* Post descriptor on the 'fifo' type channel for transmission.
* Prior to posting the descriptor should be filled in accordance with
@@ -1699,8 +1685,7 @@ void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
}
/**
- * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
- * to MAC address table.
+ * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath to MAC address table.
* @vp: Vpath handle.
* @macaddr: MAC address to be added for this vpath into the list
* @macaddr_mask: MAC address mask for macaddr
@@ -1716,8 +1701,8 @@ void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
enum vxge_hw_status
vxge_hw_vpath_mac_addr_add(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN],
+ u8 *macaddr,
+ u8 *macaddr_mask,
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
{
u32 i;
@@ -1765,13 +1750,13 @@ exit:
}
/**
- * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
- * from MAC address table.
+ * vxge_hw_vpath_mac_addr_get - Get the first mac address entry
* @vp: Vpath handle.
* @macaddr: First MAC address entry for this vpath in the list
* @macaddr_mask: MAC address mask for macaddr
*
- * Returns the first mac address and mac address mask in the list for this
+ * Get the first mac address entry for this vpath from MAC address table.
+ * Return: the first mac address and mac address mask in the list for this
* vpath.
* see also: vxge_hw_vpath_mac_addr_get_next
*
@@ -1779,8 +1764,8 @@ exit:
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN])
+ u8 *macaddr,
+ u8 *macaddr_mask)
{
u32 i;
u64 data1 = 0ULL;
@@ -1816,14 +1801,13 @@ exit:
}
/**
- * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
- * vpath
- * from MAC address table.
+ * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry
* @vp: Vpath handle.
* @macaddr: Next MAC address entry for this vpath in the list
* @macaddr_mask: MAC address mask for macaddr
*
- * Returns the next mac address and mac address mask in the list for this
+ * Get the next mac address entry for this vpath from MAC address table.
+ * Return: the next mac address and mac address mask in the list for this
* vpath.
* see also: vxge_hw_vpath_mac_addr_get
*
@@ -1831,8 +1815,8 @@ exit:
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN])
+ u8 *macaddr,
+ u8 *macaddr_mask)
{
u32 i;
u64 data1 = 0ULL;
@@ -1869,8 +1853,7 @@ exit:
}
/**
- * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
- * to MAC address table.
+ * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath to MAC address table.
* @vp: Vpath handle.
* @macaddr: MAC address to be added for this vpath into the list
* @macaddr_mask: MAC address mask for macaddr
@@ -1884,8 +1867,8 @@ exit:
enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN])
+ u8 *macaddr,
+ u8 *macaddr_mask)
{
u32 i;
u64 data1 = 0ULL;
@@ -1916,8 +1899,7 @@ exit:
}
/**
- * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
- * to vlan id table.
+ * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath to vlan id table.
* @vp: Vpath handle.
* @vid: vlan id to be added for this vpath into the list
*
@@ -2375,7 +2357,6 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
u8 t_code;
enum vxge_hw_status status = VXGE_HW_OK;
void *first_rxdh;
- u64 val64 = 0;
int new_count = 0;
ring->cmpl_cnt = 0;
@@ -2403,8 +2384,7 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
}
writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
&ring->vp_reg->prc_rxd_doorbell);
- val64 =
- readl(&ring->common_reg->titan_general_int_status);
+ readl(&ring->common_reg->titan_general_int_status);
ring->doorbell_cnt = 0;
}
}
@@ -2413,9 +2393,11 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
}
/**
- * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
- * the same.
+ * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.
* @fifo: Handle to the fifo object used for non offload send
+ * @skb_ptr: pointer to skb
+ * @nr_skb: number of skbs
+ * @more: more is coming
*
* The function polls the Tx for the completed descriptors and calls
* the driver via supplied completion callback.
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index ac02369174a9..53851853562c 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -111,7 +111,9 @@ static int
nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct bpf_prog *prog)
{
- int i, cnt, err;
+ int i, cnt, err = 0;
+
+ mutex_lock(&prog->aux->used_maps_mutex);
/* Quickly count the maps we will have to remember */
cnt = 0;
@@ -119,13 +121,15 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
cnt++;
if (!cnt)
- return 0;
+ goto out;
nfp_prog->map_records = kmalloc_array(cnt,
sizeof(nfp_prog->map_records[0]),
GFP_KERNEL);
- if (!nfp_prog->map_records)
- return -ENOMEM;
+ if (!nfp_prog->map_records) {
+ err = -ENOMEM;
+ goto out;
+ }
for (i = 0; i < prog->aux->used_map_cnt; i++)
if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
@@ -133,12 +137,14 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
prog->aux->used_maps[i]);
if (err) {
nfp_map_ptrs_forget(bpf, nfp_prog);
- return err;
+ goto out;
}
}
WARN_ON(cnt != nfp_prog->map_records_cnt);
- return 0;
+out:
+ mutex_unlock(&prog->aux->used_maps_mutex);
+ return err;
}
static int
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index bf516285510f..a2926b1b3cff 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -24,6 +24,7 @@
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
#define NFP_FLOWER_LAYER2_GRE BIT(0)
+#define NFP_FLOWER_LAYER2_QINQ BIT(4)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_LAYER2_TUN_IPV6 BIT(7)
@@ -319,6 +320,22 @@ struct nfp_flower_mac_mpls {
__be32 mpls_lse;
};
+/* VLAN details (2W/8B)
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | outer_tpid | outer_tci |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | inner_tpid | inner_tci |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_vlan {
+ __be16 outer_tpid;
+ __be16 outer_tci;
+ __be16 inner_tpid;
+ __be16 inner_tci;
+};
+
/* L4 ports (for UDP, TCP, SCTP) (1W/4B)
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 3bf9c1afa45e..caf12eec9945 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -30,6 +30,8 @@ struct nfp_app;
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
+#define NFP_FLOWER_KEY_MAX_LW 32
+
#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7)
#define NFP_FL_MASK_REUSE_TIME_NS 40000
@@ -44,6 +46,7 @@ struct nfp_app;
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
+#define NFP_FL_FEATS_VLAN_QINQ BIT(8)
#define NFP_FL_FEATS_HOST_ACK BIT(31)
#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
@@ -57,7 +60,8 @@ struct nfp_app;
NFP_FL_FEATS_VF_RLIM | \
NFP_FL_FEATS_FLOW_MOD | \
NFP_FL_FEATS_PRE_TUN_RULES | \
- NFP_FL_FEATS_IPV6_TUN)
+ NFP_FL_FEATS_IPV6_TUN | \
+ NFP_FL_FEATS_VLAN_QINQ)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index f7f01e2e3dce..255a4dff6288 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -10,7 +10,7 @@
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
- struct flow_rule *rule, u8 key_type)
+ struct flow_rule *rule, u8 key_type, bool qinq_sup)
{
u16 tmp_tci;
@@ -24,7 +24,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
msk->nfp_flow_key_layer = key_type;
msk->mask_id = ~0;
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ if (!qinq_sup && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
@@ -231,6 +231,50 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
}
static void
+nfp_flower_fill_vlan(struct flow_dissector_key_vlan *key,
+ struct nfp_flower_vlan *frame,
+ bool outer_vlan)
+{
+ u16 tci;
+
+ tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ key->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ key->vlan_id);
+
+ if (outer_vlan) {
+ frame->outer_tci = cpu_to_be16(tci);
+ frame->outer_tpid = key->vlan_tpid;
+ } else {
+ frame->inner_tci = cpu_to_be16(tci);
+ frame->inner_tpid = key->vlan_tpid;
+ }
+}
+
+static void
+nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
+ struct nfp_flower_vlan *msk,
+ struct flow_rule *rule)
+{
+ struct flow_match_vlan match;
+
+ memset(ext, 0, sizeof(struct nfp_flower_vlan));
+ memset(msk, 0, sizeof(struct nfp_flower_vlan));
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ flow_rule_match_vlan(rule, &match);
+ nfp_flower_fill_vlan(match.key, ext, true);
+ nfp_flower_fill_vlan(match.mask, msk, true);
+ }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ flow_rule_match_cvlan(rule, &match);
+ nfp_flower_fill_vlan(match.key, ext, false);
+ nfp_flower_fill_vlan(match.mask, msk, false);
+ }
+}
+
+static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
{
@@ -433,7 +477,10 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct nfp_flower_priv *priv = app->priv;
+ bool qinq_sup;
u32 port_id;
+ int ext_len;
int err;
u8 *ext;
u8 *msk;
@@ -446,9 +493,11 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
ext = nfp_flow->unmasked_data;
msk = nfp_flow->mask_data;
+ qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
+
nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
(struct nfp_flower_meta_tci *)msk,
- rule, key_ls->key_layer);
+ rule, key_ls->key_layer, qinq_sup);
ext += sizeof(struct nfp_flower_meta_tci);
msk += sizeof(struct nfp_flower_meta_tci);
@@ -547,6 +596,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
}
+ if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
+ (struct nfp_flower_vlan *)msk,
+ rule);
+ ext += sizeof(struct nfp_flower_vlan);
+ msk += sizeof(struct nfp_flower_vlan);
+ }
+
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
@@ -589,5 +646,15 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
}
+ /* Check that the flow key does not exceed the maximum limit.
+ * All structures in the key is multiples of 4 bytes, so use u32.
+ */
+ ext_len = (u32 *)ext - (u32 *)nfp_flow->unmasked_data;
+ if (ext_len > NFP_FLOWER_KEY_MAX_LW) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: flow key too long");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 36356f96661d..1c59aff2163c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -31,6 +31,7 @@
BIT(FLOW_DISSECTOR_KEY_PORTS) | \
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_VLAN) | \
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) | \
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
@@ -66,7 +67,8 @@
NFP_FLOWER_LAYER_IPV6)
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
- (NFP_FLOWER_LAYER_PORT | \
+ (NFP_FLOWER_LAYER_EXT_META | \
+ NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
NFP_FLOWER_LAYER_IPV4 | \
NFP_FLOWER_LAYER_IPV6)
@@ -285,6 +287,30 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
return -EOPNOTSUPP;
}
+ if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ &&
+ !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_size += sizeof(struct nfp_flower_vlan);
+ key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan cvlan;
+
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_vlan(rule, &cvlan);
+ if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_size += sizeof(struct nfp_flower_vlan);
+ key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -1066,6 +1092,7 @@ err_destroy_merge_flow:
* nfp_flower_validate_pre_tun_rule()
* @app: Pointer to the APP handle
* @flow: Pointer to NFP flow representation of rule
+ * @key_ls: Pointer to NFP key layers structure
* @extack: Netlink extended ACK report
*
* Verifies the flow as a pre-tunnel rule.
@@ -1075,10 +1102,13 @@ err_destroy_merge_flow:
static int
nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
struct nfp_fl_payload *flow,
+ struct nfp_fl_key_ls *key_ls,
struct netlink_ext_ack *extack)
{
+ struct nfp_flower_priv *priv = app->priv;
struct nfp_flower_meta_tci *meta_tci;
struct nfp_flower_mac_mpls *mac;
+ u8 *ext = flow->unmasked_data;
struct nfp_fl_act_head *act;
u8 *mask = flow->mask_data;
bool vlan = false;
@@ -1086,20 +1116,25 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
u8 key_layer;
meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
- if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
- u16 vlan_tci = be16_to_cpu(meta_tci->tci);
-
- vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
- flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
- vlan = true;
- } else {
- flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ key_layer = key_ls->key_layer;
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
+ u16 vlan_tci = be16_to_cpu(meta_tci->tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
}
- key_layer = meta_tci->nfp_flow_key_layer;
if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
return -EOPNOTSUPP;
+ } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields");
+ return -EOPNOTSUPP;
}
if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
@@ -1109,7 +1144,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
/* Skip fields known to exist. */
mask += sizeof(struct nfp_flower_meta_tci);
+ ext += sizeof(struct nfp_flower_meta_tci);
+ if (key_ls->key_layer_two) {
+ mask += sizeof(struct nfp_flower_ext_meta);
+ ext += sizeof(struct nfp_flower_ext_meta);
+ }
mask += sizeof(struct nfp_flower_in_port);
+ ext += sizeof(struct nfp_flower_in_port);
/* Ensure destination MAC address is fully matched. */
mac = (struct nfp_flower_mac_mpls *)mask;
@@ -1118,6 +1159,8 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP;
}
+ mask += sizeof(struct nfp_flower_mac_mpls);
+ ext += sizeof(struct nfp_flower_mac_mpls);
if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
key_layer & NFP_FLOWER_LAYER_IPV6) {
/* Flags and proto fields have same offset in IPv4 and IPv6. */
@@ -1130,7 +1173,6 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
sizeof(struct nfp_flower_ipv4) :
sizeof(struct nfp_flower_ipv6);
- mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */
for (i = 0; i < size; i++)
@@ -1138,6 +1180,25 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
return -EOPNOTSUPP;
}
+ ext += size;
+ mask += size;
+ }
+
+ if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+ struct nfp_flower_vlan *vlan_tags;
+ u16 vlan_tci;
+
+ vlan_tags = (struct nfp_flower_vlan *)ext;
+
+ vlan_tci = be16_to_cpu(vlan_tags->outer_tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
}
/* Action must be a single egress or pop_vlan and egress. */
@@ -1220,7 +1281,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_destroy_flow;
if (flow_pay->pre_tun_rule.dev) {
- err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
+ err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack);
if (err)
goto err_destroy_flow;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index be52510d446b..97d2b03208de 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -329,12 +329,11 @@ err_close_nsp:
}
static int
-nfp_devlink_flash_update(struct devlink *devlink, const char *path,
- const char *component, struct netlink_ext_ack *extack)
+nfp_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
{
- if (component)
- return -EOPNOTSUPP;
- return nfp_flash_update_common(devlink_priv(devlink), path, extack);
+ return nfp_flash_update_common(devlink_priv(devlink), params->file_name, extack);
}
const struct devlink_ops nfp_devlink_ops = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 21ea22694e47..b150da43adb2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2287,9 +2287,9 @@ static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
return budget;
}
-static void nfp_ctrl_poll(unsigned long arg)
+static void nfp_ctrl_poll(struct tasklet_struct *t)
{
- struct nfp_net_r_vector *r_vec = (void *)arg;
+ struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
spin_lock(&r_vec->lock);
nfp_net_tx_complete(r_vec->tx_ring, 0);
@@ -2337,8 +2337,7 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
__skb_queue_head_init(&r_vec->queue);
spin_lock_init(&r_vec->lock);
- tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
- (unsigned long)r_vec);
+ tasklet_setup(&r_vec->tasklet, nfp_ctrl_poll);
tasklet_disable(&r_vec->tasklet);
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 4075f5e59955..a6861df9904f 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -787,9 +787,9 @@ out:
return IRQ_HANDLED;
}
-static void nixge_dma_err_handler(unsigned long data)
+static void nixge_dma_err_handler(struct tasklet_struct *t)
{
- struct nixge_priv *lp = (struct nixge_priv *)data;
+ struct nixge_priv *lp = from_tasklet(lp, t, dma_err_tasklet);
struct nixge_hw_dma_bd *cur_p;
struct nixge_tx_skb *tx_skb;
u32 cr, i;
@@ -879,8 +879,7 @@ static int nixge_open(struct net_device *ndev)
phy_start(phy);
/* Enable tasklets for Axi DMA error handling */
- tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
- (unsigned long)priv);
+ tasklet_setup(&priv->dma_err_tasklet, nixge_dma_err_handler);
napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index b36aa5bf3c5f..a58f14aca10c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -8,7 +8,7 @@
#include "pch_gbe.h"
#include "pch_gbe_phy.h"
-/**
+/*
* pch_gbe_stats - Stats item information
*/
struct pch_gbe_stats {
@@ -24,7 +24,7 @@ struct pch_gbe_stats {
.offset = offsetof(struct pch_gbe_hw_stats, m), \
}
-/**
+/*
* pch_gbe_gstrings_stats - ethtool information status name list
*/
static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 23f7c76737c9..ade8c44c01cd 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -295,7 +295,7 @@ static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
/**
* pch_gbe_wait_clr_bit - Wait to clear a bit
* @reg: Pointer of register
- * @busy: Busy bit
+ * @bit: Busy bit
*/
static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
{
@@ -1034,7 +1034,7 @@ static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
/**
* pch_gbe_watchdog - Watchdog process
- * @data: Board private structure
+ * @t: timer list containing a Board private structure
*/
static void pch_gbe_watchdog(struct timer_list *t)
{
@@ -2270,6 +2270,7 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
/**
* pch_gbe_tx_timeout - Respond to a Tx Hang
* @netdev: Network interface device structure
+ * @txqueue: index of hanging queue
*/
static void pch_gbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index dceec80fd642..81fc5a6e3221 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -13,7 +13,7 @@
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
-/**
+/*
* TxDescriptors - Transmit Descriptor Count
* @Valid Range: PCH_GBE_MIN_TXD - PCH_GBE_MAX_TXD
* @Default Value: PCH_GBE_DEFAULT_TXD
@@ -22,7 +22,7 @@ static int TxDescriptors = OPTION_UNSET;
module_param(TxDescriptors, int, 0);
MODULE_PARM_DESC(TxDescriptors, "Number of transmit descriptors");
-/**
+/*
* RxDescriptors -Receive Descriptor Count
* @Valid Range: PCH_GBE_MIN_RXD - PCH_GBE_MAX_RXD
* @Default Value: PCH_GBE_DEFAULT_RXD
@@ -31,7 +31,7 @@ static int RxDescriptors = OPTION_UNSET;
module_param(RxDescriptors, int, 0);
MODULE_PARM_DESC(RxDescriptors, "Number of receive descriptors");
-/**
+/*
* Speed - User Specified Speed Override
* @Valid Range: 0, 10, 100, 1000
* - 0: auto-negotiate at all supported speeds
@@ -44,7 +44,7 @@ static int Speed = OPTION_UNSET;
module_param(Speed, int, 0);
MODULE_PARM_DESC(Speed, "Speed setting");
-/**
+/*
* Duplex - User Specified Duplex Override
* @Valid Range: 0-2
* - 0: auto-negotiate for duplex
@@ -59,7 +59,7 @@ MODULE_PARM_DESC(Duplex, "Duplex setting");
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
-/**
+/*
* AutoNeg - Auto-negotiation Advertisement Override
* @Valid Range: 0x01-0x0F, 0x20-0x2F
*
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(AutoNeg, "Advertised auto-negotiation setting");
#define PHY_ADVERTISE_1000_FULL 0x0020
#define PCH_AUTONEG_ADVERTISE_DEFAULT 0x2F
-/**
+/*
* FlowControl - User Specified Flow Control Override
* @Valid Range: 0-3
* - 0: No Flow Control
@@ -124,7 +124,7 @@ MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
#define PCH_GBE_DEFAULT_TX_CSUM true /* trueorfalse */
-/**
+/*
* pch_gbe_option - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
* Returns:
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 3da075307178..d1dd9bc1bc7f 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1060,7 +1060,7 @@ static int yellowfin_rx(struct net_device *dev)
struct sk_buff *rx_skb = yp->rx_skbuff[entry];
s16 frame_status;
u16 desc_status;
- int data_size, yf_size;
+ int data_size, __maybe_unused yf_size;
u8 *buf_addr;
if(!desc->result_status)
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 76f8cc502bf9..5f8b0bb3af6e 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -21,6 +21,7 @@ config IONIC
tristate "Pensando Ethernet IONIC Support"
depends on 64BIT && PCI
select NET_DEVLINK
+ select DIMLIB
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be
diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile
index 29f304d75261..8d3c2d3cb10d 100644
--- a/drivers/net/ethernet/pensando/ionic/Makefile
+++ b/drivers/net/ethernet/pensando/ionic/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_IONIC) := ionic.o
ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \
ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \
- ionic_txrx.o ionic_stats.o
+ ionic_txrx.o ionic_stats.o ionic_fw.o
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index f5a910c458ba..084a924431d5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -42,13 +42,11 @@ struct ionic {
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
struct ionic_identity ident;
- struct list_head lifs;
- struct ionic_lif *master_lif;
+ struct ionic_lif *lif;
unsigned int nnqs_per_lif;
unsigned int neqs_per_lif;
unsigned int ntxqs_per_lif;
unsigned int nrxqs_per_lif;
- DECLARE_BITMAP(lifbits, IONIC_LIFS_MAX);
unsigned int nintrs;
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
struct work_struct nb_work;
@@ -66,9 +64,6 @@ struct ionic_admin_ctx {
union ionic_adminq_comp comp;
};
-int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb,
- ionic_cq_done_cb done_cb, void *done_arg);
-
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_set_dma_mask(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 85c686c16741..b0d8499d373b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -266,6 +266,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot identify device: %d, aborting\n", err);
goto err_out_teardown;
}
+ ionic_debugfs_add_ident(ionic);
err = ionic_init(ionic);
if (err) {
@@ -286,29 +287,22 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_reset;
}
- /* Configure LIFs */
- err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC,
- &ionic->ident.lif);
+ /* Allocate and init the LIF */
+ err = ionic_lif_size(ionic);
if (err) {
- dev_err(dev, "Cannot identify LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot size LIF: %d, aborting\n", err);
goto err_out_port_reset;
}
- err = ionic_lifs_size(ionic);
+ err = ionic_lif_alloc(ionic);
if (err) {
- dev_err(dev, "Cannot size LIFs: %d, aborting\n", err);
- goto err_out_port_reset;
- }
-
- err = ionic_lifs_alloc(ionic);
- if (err) {
- dev_err(dev, "Cannot allocate LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot allocate LIF: %d, aborting\n", err);
goto err_out_free_irqs;
}
- err = ionic_lifs_init(ionic);
+ err = ionic_lif_init(ionic->lif);
if (err) {
- dev_err(dev, "Cannot init LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot init LIF: %d, aborting\n", err);
goto err_out_free_lifs;
}
@@ -321,9 +315,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot enable existing VFs: %d\n", err);
}
- err = ionic_lifs_register(ionic);
+ err = ionic_lif_register(ionic->lif);
if (err) {
- dev_err(dev, "Cannot register LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
goto err_out_deinit_lifs;
}
@@ -336,12 +330,13 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_deregister_lifs:
- ionic_lifs_unregister(ionic);
+ ionic_lif_unregister(ionic->lif);
err_out_deinit_lifs:
ionic_vf_dealloc(ionic);
- ionic_lifs_deinit(ionic);
+ ionic_lif_deinit(ionic->lif);
err_out_free_lifs:
- ionic_lifs_free(ionic);
+ ionic_lif_free(ionic->lif);
+ ionic->lif = NULL;
err_out_free_irqs:
ionic_bus_free_irq_vectors(ionic);
err_out_port_reset:
@@ -349,7 +344,7 @@ err_out_port_reset:
err_out_reset:
ionic_reset(ionic);
err_out_teardown:
- ionic_dev_teardown(ionic);
+ del_timer_sync(&ionic->watchdog_timer);
pci_clear_master(pdev);
/* Don't fail the probe for these errors, keep
* the hw interface around for inspection
@@ -377,17 +372,19 @@ static void ionic_remove(struct pci_dev *pdev)
if (!ionic)
return;
- if (ionic->master_lif) {
+ del_timer_sync(&ionic->watchdog_timer);
+
+ if (ionic->lif) {
ionic_devlink_unregister(ionic);
- ionic_lifs_unregister(ionic);
- ionic_lifs_deinit(ionic);
- ionic_lifs_free(ionic);
+ ionic_lif_unregister(ionic->lif);
+ ionic_lif_deinit(ionic->lif);
+ ionic_lif_free(ionic->lif);
+ ionic->lif = NULL;
ionic_bus_free_irq_vectors(ionic);
}
ionic_port_reset(ionic);
ionic_reset(ionic);
- ionic_dev_teardown(ionic);
pci_clear_master(pdev);
ionic_unmap_bars(ionic);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 11621ccc1faf..39f59849720d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -76,7 +76,7 @@ static int q_tail_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
- seq_printf(seq, "%d\n", q->tail->index);
+ seq_printf(seq, "%d\n", q->tail_idx);
return 0;
}
@@ -86,7 +86,7 @@ static int q_head_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
- seq_printf(seq, "%d\n", q->head->index);
+ seq_printf(seq, "%d\n", q->head_idx);
return 0;
}
@@ -96,7 +96,7 @@ static int cq_tail_show(struct seq_file *seq, void *v)
{
struct ionic_cq *cq = seq->private;
- seq_printf(seq, "%d\n", cq->tail->index);
+ seq_printf(seq, "%d\n", cq->tail_idx);
return 0;
}
@@ -112,7 +112,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
- struct dentry *q_dentry, *cq_dentry, *intr_dentry, *stats_dentry;
+ struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
+ struct dentry *intr_dentry, *stats_dentry;
struct ionic_dev *idev = &lif->ionic->idev;
struct debugfs_regset32 *intr_ctrl_regset;
struct ionic_intr_info *intr = &qcq->intr;
@@ -121,21 +122,21 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
struct ionic_queue *q = &qcq->q;
struct ionic_cq *cq = &qcq->cq;
- qcq->dentry = debugfs_create_dir(q->name, lif->dentry);
+ qcq_dentry = debugfs_create_dir(q->name, lif->dentry);
+ if (IS_ERR_OR_NULL(qcq_dentry))
+ return;
+ qcq->dentry = qcq_dentry;
- debugfs_create_x32("total_size", 0400, qcq->dentry, &qcq->total_size);
- debugfs_create_x64("base_pa", 0400, qcq->dentry, &qcq->base_pa);
+ debugfs_create_x64("q_base_pa", 0400, qcq_dentry, &qcq->q_base_pa);
+ debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size);
+ debugfs_create_x64("cq_base_pa", 0400, qcq_dentry, &qcq->cq_base_pa);
+ debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size);
+ debugfs_create_x64("sg_base_pa", 0400, qcq_dentry, &qcq->sg_base_pa);
+ debugfs_create_x32("sg_size", 0400, qcq_dentry, &qcq->sg_size);
q_dentry = debugfs_create_dir("q", qcq->dentry);
debugfs_create_u32("index", 0400, q_dentry, &q->index);
- debugfs_create_x64("base_pa", 0400, q_dentry, &q->base_pa);
- if (qcq->flags & IONIC_QCQ_F_SG) {
- debugfs_create_x64("sg_base_pa", 0400, q_dentry,
- &q->sg_base_pa);
- debugfs_create_u32("sg_desc_size", 0400, q_dentry,
- &q->sg_desc_size);
- }
debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
@@ -188,6 +189,8 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
&intr->index);
debugfs_create_u32("vector", 0400, intr_dentry,
&intr->vector);
+ debugfs_create_u32("dim_coal_hw", 0400, intr_dentry,
+ &intr->dim_coal_hw);
intr_ctrl_regset = devm_kzalloc(dev, sizeof(*intr_ctrl_regset),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index d83eff0ae0ac..dc5fbc2704f3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -19,10 +19,13 @@ static void ionic_watchdog_cb(struct timer_list *t)
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
+ if (!ionic->lif)
+ return;
+
hb = ionic_heartbeat_check(ionic);
- if (hb >= 0 && ionic->master_lif)
- ionic_link_status_check_request(ionic->master_lif);
+ if (hb >= 0)
+ ionic_link_status_check_request(ionic->lif, false);
}
void ionic_init_devinfo(struct ionic *ionic)
@@ -98,11 +101,6 @@ int ionic_dev_setup(struct ionic *ionic)
return 0;
}
-void ionic_dev_teardown(struct ionic *ionic)
-{
- del_timer_sync(&ionic->watchdog_timer);
-}
-
/* Devcmd Interface */
int ionic_heartbeat_check(struct ionic *ionic)
{
@@ -126,7 +124,7 @@ int ionic_heartbeat_check(struct ionic *ionic)
/* is this a transition? */
if (fw_status != idev->last_fw_status &&
idev->last_fw_status != 0xff) {
- struct ionic_lif *lif = ionic->master_lif;
+ struct ionic_lif *lif = ionic->lif;
bool trigger = false;
if (!fw_status || fw_status == 0xff) {
@@ -335,7 +333,7 @@ int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data)
union ionic_dev_cmd cmd = {
.vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
.vf_setattr.attr = attr,
- .vf_setattr.vf_index = vf,
+ .vf_setattr.vf_index = cpu_to_le16(vf),
};
int err;
@@ -393,7 +391,7 @@ void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
{
union ionic_dev_cmd cmd = {
.q_identify.opcode = IONIC_CMD_Q_IDENTIFY,
- .q_identify.lif_type = lif_type,
+ .q_identify.lif_type = cpu_to_le16(lif_type),
.q_identify.type = qtype,
.q_identify.ver = qver,
};
@@ -467,9 +465,7 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size)
{
- struct ionic_cq_info *cur;
unsigned int ring_size;
- unsigned int i;
if (desc_size == 0 || !is_power_of_2(num_descs))
return -EINVAL;
@@ -482,22 +478,9 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
cq->bound_intr = intr;
cq->num_descs = num_descs;
cq->desc_size = desc_size;
- cq->tail = cq->info;
+ cq->tail_idx = 0;
cq->done_color = 1;
- cur = cq->info;
-
- for (i = 0; i < num_descs; i++) {
- if (i + 1 == num_descs) {
- cur->next = cq->info;
- cur->last = true;
- } else {
- cur->next = cur + 1;
- }
- cur->index = i;
- cur++;
- }
-
return 0;
}
@@ -522,15 +505,18 @@ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg)
{
+ struct ionic_cq_info *cq_info;
unsigned int work_done = 0;
if (work_to_do == 0)
return 0;
- while (cb(cq, cq->tail)) {
- if (cq->tail->last)
+ cq_info = &cq->info[cq->tail_idx];
+ while (cb(cq, cq_info)) {
+ if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
- cq->tail = cq->tail->next;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+ cq_info = &cq->info[cq->tail_idx];
DEBUG_STATS_CQE_CNT(cq);
if (++work_done >= work_to_do)
@@ -548,9 +534,7 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid)
{
- struct ionic_desc_info *cur;
unsigned int ring_size;
- unsigned int i;
if (desc_size == 0 || !is_power_of_2(num_descs))
return -EINVAL;
@@ -565,24 +549,12 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
q->num_descs = num_descs;
q->desc_size = desc_size;
q->sg_desc_size = sg_desc_size;
- q->tail = q->info;
- q->head = q->tail;
+ q->tail_idx = 0;
+ q->head_idx = 0;
q->pid = pid;
snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index);
- cur = q->info;
-
- for (i = 0; i < num_descs; i++) {
- if (i + 1 == num_descs)
- cur->next = q->info;
- else
- cur->next = cur + 1;
- cur->index = i;
- cur->left = num_descs - i;
- cur++;
- }
-
return 0;
}
@@ -614,19 +586,22 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg)
{
struct device *dev = q->lif->ionic->dev;
+ struct ionic_desc_info *desc_info;
struct ionic_lif *lif = q->lif;
- q->head->cb = cb;
- q->head->cb_arg = cb_arg;
- q->head = q->head->next;
+ desc_info = &q->info[q->head_idx];
+ desc_info->cb = cb;
+ desc_info->cb_arg = cb_arg;
+
+ q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
q->lif->index, q->name, q->hw_type, q->hw_index,
- q->head->index, ring_doorbell);
+ q->head_idx, ring_doorbell);
if (ring_doorbell)
ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
- q->dbval | q->head->index);
+ q->dbval | q->head_idx);
}
static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
@@ -634,8 +609,8 @@ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
unsigned int mask, tail, head;
mask = q->num_descs - 1;
- tail = q->tail->index;
- head = q->head->index;
+ tail = q->tail_idx;
+ head = q->head_idx;
return ((pos - tail) & mask) < ((head - tail) & mask);
}
@@ -646,20 +621,22 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
struct ionic_desc_info *desc_info;
ionic_desc_cb cb;
void *cb_arg;
+ u16 index;
/* check for empty queue */
- if (q->tail->index == q->head->index)
+ if (q->tail_idx == q->head_idx)
return;
/* stop index must be for a descriptor that is not yet completed */
if (unlikely(!ionic_q_is_posted(q, stop_index)))
dev_err(q->lif->ionic->dev,
"ionic stop is not posted %s stop %u tail %u head %u\n",
- q->name, stop_index, q->tail->index, q->head->index);
+ q->name, stop_index, q->tail_idx, q->head_idx);
do {
- desc_info = q->tail;
- q->tail = desc_info->next;
+ desc_info = &q->info[q->tail_idx];
+ index = q->tail_idx;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
cb = desc_info->cb;
cb_arg = desc_info->cb_arg;
@@ -669,5 +646,5 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
if (cb)
cb(q, desc_info, cq_info, cb_arg);
- } while (desc_info->index != stop_index);
+ } while (index != stop_index);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index d5cba502abca..6c243b17312c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -29,6 +29,7 @@ struct ionic_dev_bar {
int res_index;
};
+#ifndef __CHECKER__
/* Registers */
static_assert(sizeof(struct ionic_intr) == 32);
@@ -119,6 +120,7 @@ static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
static_assert(sizeof(struct ionic_vf_setattr_comp) == 16);
static_assert(sizeof(struct ionic_vf_getattr_cmd) == 64);
static_assert(sizeof(struct ionic_vf_getattr_comp) == 16);
+#endif /* __CHECKER__ */
struct ionic_devinfo {
u8 asic_type;
@@ -149,10 +151,13 @@ struct ionic_dev {
};
struct ionic_cq_info {
- void *cq_desc;
- struct ionic_cq_info *next;
- unsigned int index;
- bool last;
+ union {
+ void *cq_desc;
+ struct ionic_txq_comp *txcq;
+ struct ionic_rxq_comp *rxcq;
+ struct ionic_admin_comp *admincq;
+ struct ionic_notifyq_event *notifyq;
+ };
};
struct ionic_queue;
@@ -169,11 +174,17 @@ struct ionic_page_info {
};
struct ionic_desc_info {
- void *desc;
- void *sg_desc;
- struct ionic_desc_info *next;
- unsigned int index;
- unsigned int left;
+ union {
+ void *desc;
+ struct ionic_txq_desc *txq_desc;
+ struct ionic_rxq_desc *rxq_desc;
+ struct ionic_admin_cmd *adminq_desc;
+ };
+ union {
+ void *sg_desc;
+ struct ionic_txq_sg_desc *txq_sg_desc;
+ struct ionic_rxq_sg_desc *rxq_sgl_desc;
+ };
unsigned int npages;
struct ionic_page_info pages[IONIC_RX_MAX_SG_ELEMS + 1];
ionic_desc_cb cb;
@@ -183,25 +194,35 @@ struct ionic_desc_info {
#define IONIC_QUEUE_NAME_MAX_SZ 32
struct ionic_queue {
+ struct device *dev;
+ struct ionic_lif *lif;
+ struct ionic_desc_info *info;
+ u16 head_idx;
+ u16 tail_idx;
+ unsigned int index;
+ unsigned int num_descs;
u64 dbell_count;
- u64 drop;
u64 stop;
u64 wake;
- struct ionic_lif *lif;
- struct ionic_desc_info *info;
- struct ionic_desc_info *tail;
- struct ionic_desc_info *head;
+ u64 drop;
struct ionic_dev *idev;
- unsigned int index;
unsigned int type;
unsigned int hw_index;
unsigned int hw_type;
u64 dbval;
- void *base;
- void *sg_base;
+ union {
+ void *base;
+ struct ionic_txq_desc *txq;
+ struct ionic_rxq_desc *rxq;
+ struct ionic_admin_cmd *adminq;
+ };
+ union {
+ void *sg_base;
+ struct ionic_txq_sg_desc *txq_sgl;
+ struct ionic_rxq_sg_desc *rxq_sgl;
+ };
dma_addr_t base_pa;
dma_addr_t sg_base_pa;
- unsigned int num_descs;
unsigned int desc_size;
unsigned int sg_desc_size;
unsigned int pid;
@@ -218,20 +239,21 @@ struct ionic_intr_info {
u64 rearm_count;
unsigned int cpu;
cpumask_t affinity_mask;
+ u32 dim_coal_hw;
};
struct ionic_cq {
- void *base;
- dma_addr_t base_pa;
struct ionic_lif *lif;
struct ionic_cq_info *info;
- struct ionic_cq_info *tail;
struct ionic_queue *bound_q;
struct ionic_intr_info *bound_intr;
+ u16 tail_idx;
bool done_color;
unsigned int num_descs;
- u64 compl_count;
unsigned int desc_size;
+ u64 compl_count;
+ void *base;
+ dma_addr_t base_pa;
};
struct ionic;
@@ -246,12 +268,12 @@ static inline void ionic_intr_init(struct ionic_dev *idev,
static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
{
- unsigned int avail = q->tail->index;
+ unsigned int avail = q->tail_idx;
- if (q->head->index >= avail)
- avail += q->head->left - 1;
+ if (q->head_idx >= avail)
+ avail += q->num_descs - q->head_idx - 1;
else
- avail -= q->head->index + 1;
+ avail -= q->head_idx + 1;
return avail;
}
@@ -263,7 +285,6 @@ static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
void ionic_init_devinfo(struct ionic *ionic);
int ionic_dev_setup(struct ionic *ionic);
-void ionic_dev_teardown(struct ionic *ionic);
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd);
u8 ionic_dev_cmd_status(struct ionic_dev *idev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index c4f4fd469fe3..51d64718ed9f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -9,6 +9,15 @@
#include "ionic_lif.h"
#include "ionic_devlink.h"
+static int ionic_dl_flash_update(struct devlink *dl,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic *ionic = devlink_priv(dl);
+
+ return ionic_firmware_update(ionic->lif, params->file_name, extack);
+}
+
static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
@@ -48,6 +57,7 @@ static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
static const struct devlink_ops ionic_dl_ops = {
.info_get = ionic_dl_info_get,
+ .flash_update = ionic_dl_flash_update,
};
struct ionic *ionic_devlink_alloc(struct device *dev)
@@ -85,7 +95,7 @@ int ionic_devlink_register(struct ionic *ionic)
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
else
devlink_port_type_eth_set(&ionic->dl_port,
- ionic->master_lif->netdev);
+ ionic->lif->netdev);
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
index 0690172fc57a..5c01a9e306d8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
@@ -6,6 +6,9 @@
#include <net/devlink.h>
+int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name,
+ struct netlink_ext_ack *extack);
+
struct ionic *ionic_devlink_alloc(struct device *dev);
void ionic_devlink_free(struct ionic *ionic);
int ionic_devlink_register(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 3c57c331729f..35c72d4a78b3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -126,6 +126,11 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(ks, supported);
+ if (!idev->port_info) {
+ netdev_err(netdev, "port_info not initialized\n");
+ return -EOPNOTSUPP;
+ }
+
/* The port_info data is found in a DMA space that the NIC keeps
* up-to-date, so there's no need to request the data from the
* NIC, we already have it in our memory space.
@@ -298,8 +303,8 @@ static void ionic_get_pauseparam(struct net_device *netdev,
pause_type = lif->ionic->idev.port_info->config.pause_type;
if (pause_type) {
- pause->rx_pause = pause_type & IONIC_PAUSE_F_RX ? 1 : 0;
- pause->tx_pause = pause_type & IONIC_PAUSE_F_TX ? 1 : 0;
+ pause->rx_pause = (pause_type & IONIC_PAUSE_F_RX) ? 1 : 0;
+ pause->tx_pause = (pause_type & IONIC_PAUSE_F_TX) ? 1 : 0;
}
}
@@ -406,6 +411,13 @@ static int ionic_get_coalesce(struct net_device *netdev,
coalesce->tx_coalesce_usecs = lif->tx_coalesce_usecs;
coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs;
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ coalesce->use_adaptive_tx_coalesce = test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
+ else
+ coalesce->use_adaptive_tx_coalesce = 0;
+
+ coalesce->use_adaptive_rx_coalesce = test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+
return 0;
}
@@ -414,10 +426,9 @@ static int ionic_set_coalesce(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_identity *ident;
- struct ionic_qcq *qcq;
+ u32 rx_coal, rx_dim;
+ u32 tx_coal, tx_dim;
unsigned int i;
- u32 rx_coal;
- u32 tx_coal;
ident = &lif->ionic->ident;
if (ident->dev.intr_coal_div == 0) {
@@ -426,10 +437,11 @@ static int ionic_set_coalesce(struct net_device *netdev,
return -EIO;
}
- /* Tx normally shares Rx interrupt, so only change Rx */
+ /* Tx normally shares Rx interrupt, so only change Rx if not split */
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) &&
- coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) {
- netdev_warn(netdev, "only the rx-usecs can be changed\n");
+ (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs ||
+ coalesce->use_adaptive_tx_coalesce)) {
+ netdev_warn(netdev, "only rx parameters can be changed\n");
return -EINVAL;
}
@@ -449,32 +461,44 @@ static int ionic_set_coalesce(struct net_device *netdev,
/* Save the new values */
lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs;
- if (rx_coal != lif->rx_coalesce_hw) {
- lif->rx_coalesce_hw = rx_coal;
-
- if (test_bit(IONIC_LIF_F_UP, lif->state)) {
- for (i = 0; i < lif->nxqs; i++) {
- qcq = lif->rxqcqs[i].qcq;
- ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- qcq->intr.index,
- lif->rx_coalesce_hw);
- }
- }
- }
+ lif->rx_coalesce_hw = rx_coal;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
lif->tx_coalesce_usecs = coalesce->tx_coalesce_usecs;
else
lif->tx_coalesce_usecs = coalesce->rx_coalesce_usecs;
- if (tx_coal != lif->tx_coalesce_hw) {
- lif->tx_coalesce_hw = tx_coal;
+ lif->tx_coalesce_hw = tx_coal;
+
+ if (coalesce->use_adaptive_rx_coalesce) {
+ set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+ rx_dim = rx_coal;
+ } else {
+ clear_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+ rx_dim = 0;
+ }
- if (test_bit(IONIC_LIF_F_UP, lif->state)) {
- for (i = 0; i < lif->nxqs; i++) {
- qcq = lif->txqcqs[i].qcq;
+ if (coalesce->use_adaptive_tx_coalesce) {
+ set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
+ tx_dim = tx_coal;
+ } else {
+ clear_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
+ tx_dim = 0;
+ }
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ for (i = 0; i < lif->nxqs; i++) {
+ if (lif->rxqcqs[i]->flags & IONIC_QCQ_F_INTR) {
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- qcq->intr.index,
+ lif->rxqcqs[i]->intr.index,
+ lif->rx_coalesce_hw);
+ lif->rxqcqs[i]->intr.dim_coal_hw = rx_dim;
+ }
+
+ if (lif->txqcqs[i]->flags & IONIC_QCQ_F_INTR) {
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->txqcqs[i]->intr.index,
lif->tx_coalesce_hw);
+ lif->txqcqs[i]->intr.dim_coal_hw = tx_dim;
}
}
}
@@ -493,18 +517,14 @@ static void ionic_get_ringparam(struct net_device *netdev,
ring->rx_pending = lif->nrxq_descs;
}
-static void ionic_set_ringsize(struct ionic_lif *lif, void *arg)
-{
- struct ethtool_ringparam *ring = arg;
-
- lif->ntxq_descs = ring->tx_pending;
- lif->nrxq_descs = ring->rx_pending;
-}
-
static int ionic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_queue_params qparam;
+ int err;
+
+ ionic_init_queue_params(lif, &qparam);
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
@@ -522,7 +542,28 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
- return ionic_reset_queues(lif, ionic_set_ringsize, ring);
+ if (ring->tx_pending != lif->ntxq_descs)
+ netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
+ lif->ntxq_descs, ring->tx_pending);
+
+ if (ring->rx_pending != lif->nrxq_descs)
+ netdev_info(netdev, "Changing Rx ring size from %d to %d\n",
+ lif->nrxq_descs, ring->rx_pending);
+
+ /* if we're not running, just set the values and return */
+ if (!netif_running(lif->netdev)) {
+ lif->ntxq_descs = ring->tx_pending;
+ lif->nrxq_descs = ring->rx_pending;
+ return 0;
+ }
+
+ qparam.ntxq_descs = ring->tx_pending;
+ qparam.nrxq_descs = ring->rx_pending;
+ err = ionic_reconfigure_queues(lif, &qparam);
+ if (err)
+ netdev_info(netdev, "Ring reconfiguration failed, changes canceled: %d\n", err);
+
+ return err;
}
static void ionic_get_channels(struct net_device *netdev,
@@ -544,32 +585,15 @@ static void ionic_get_channels(struct net_device *netdev,
}
}
-static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
-{
- struct ethtool_channels *ch = arg;
-
- if (ch->combined_count) {
- lif->nxqs = ch->combined_count;
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
- clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
- lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
- lif->tx_coalesce_hw = lif->rx_coalesce_hw;
- netdev_info(lif->netdev, "Sharing queue interrupts\n");
- }
- } else {
- lif->nxqs = ch->rx_count;
- if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
- set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
- netdev_info(lif->netdev, "Splitting queue interrupts\n");
- }
- }
-}
-
static int ionic_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct ionic_lif *lif = netdev_priv(netdev);
- int new_cnt;
+ struct ionic_queue_params qparam;
+ int max_cnt;
+ int err;
+
+ ionic_init_queue_params(lif, &qparam);
if (ch->rx_count != ch->tx_count) {
netdev_info(netdev, "The rx and tx count must be equal\n");
@@ -577,20 +601,63 @@ static int ionic_set_channels(struct net_device *netdev,
}
if (ch->combined_count && ch->rx_count) {
- netdev_info(netdev, "Use either combined_count or rx/tx_count, not both\n");
+ netdev_info(netdev, "Use either combined or rx and tx, not both\n");
return -EINVAL;
}
- if (ch->combined_count)
- new_cnt = ch->combined_count;
- else
- new_cnt = ch->rx_count;
+ max_cnt = lif->ionic->ntxqs_per_lif;
+ if (ch->combined_count) {
+ if (ch->combined_count > max_cnt)
+ return -EINVAL;
+
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ netdev_info(lif->netdev, "Sharing queue interrupts\n");
+ else if (ch->combined_count == lif->nxqs)
+ return 0;
- if (lif->nxqs != new_cnt)
- netdev_info(netdev, "Changing queue count from %d to %d\n",
- lif->nxqs, new_cnt);
+ if (lif->nxqs != ch->combined_count)
+ netdev_info(netdev, "Changing queue count from %d to %d\n",
+ lif->nxqs, ch->combined_count);
- return ionic_reset_queues(lif, ionic_set_queuecount, ch);
+ qparam.nxqs = ch->combined_count;
+ qparam.intr_split = 0;
+ } else {
+ max_cnt /= 2;
+ if (ch->rx_count > max_cnt)
+ return -EINVAL;
+
+ if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ netdev_info(lif->netdev, "Splitting queue interrupts\n");
+ else if (ch->rx_count == lif->nxqs)
+ return 0;
+
+ if (lif->nxqs != ch->rx_count)
+ netdev_info(netdev, "Changing queue count from %d to %d\n",
+ lif->nxqs, ch->rx_count);
+
+ qparam.nxqs = ch->rx_count;
+ qparam.intr_split = 1;
+ }
+
+ /* if we're not running, just set the values and return */
+ if (!netif_running(lif->netdev)) {
+ lif->nxqs = qparam.nxqs;
+
+ if (qparam.intr_split) {
+ set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ } else {
+ clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ lif->tx_coalesce_hw = lif->rx_coalesce_hw;
+ }
+ return 0;
+ }
+
+ err = ionic_reconfigure_queues(lif, &qparam);
+ if (err)
+ netdev_info(netdev, "Queue reconfiguration failed, changes canceled: %d\n", err);
+
+ return err;
}
static u32 ionic_get_priv_flags(struct net_device *netdev)
@@ -807,7 +874,9 @@ static int ionic_nway_reset(struct net_device *netdev)
}
static const struct ethtool_ops ionic_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_TX,
.get_drvinfo = ionic_get_drvinfo,
.get_regs_len = ionic_get_regs_len,
.get_regs = ionic_get_regs,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
new file mode 100644
index 000000000000..d7bbf336c6f6
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 Pensando Systems, Inc */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+
+#include "ionic.h"
+#include "ionic_dev.h"
+#include "ionic_lif.h"
+#include "ionic_devlink.h"
+
+/* The worst case wait for the install activity is about 25 minutes when
+ * installing a new CPLD, which is very seldom. Normal is about 30-35
+ * seconds. Since the driver can't tell if a CPLD update will happen we
+ * set the timeout for the ugly case.
+ */
+#define IONIC_FW_INSTALL_TIMEOUT (25 * 60)
+#define IONIC_FW_SELECT_TIMEOUT 30
+
+/* Number of periodic log updates during fw file download */
+#define IONIC_FW_INTERVAL_FRACTION 32
+
+static void ionic_dev_cmd_firmware_download(struct ionic_dev *idev, u64 addr,
+ u32 offset, u32 length)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_download.opcode = IONIC_CMD_FW_DOWNLOAD,
+ .fw_download.offset = cpu_to_le32(offset),
+ .fw_download.addr = cpu_to_le64(addr),
+ .fw_download.length = cpu_to_le32(length),
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+static void ionic_dev_cmd_firmware_install(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_control.opcode = IONIC_CMD_FW_CONTROL,
+ .fw_control.oper = IONIC_FW_INSTALL_ASYNC
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+static void ionic_dev_cmd_firmware_activate(struct ionic_dev *idev, u8 slot)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_control.opcode = IONIC_CMD_FW_CONTROL,
+ .fw_control.oper = IONIC_FW_ACTIVATE_ASYNC,
+ .fw_control.slot = slot
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+static int ionic_fw_status_long_wait(struct ionic *ionic,
+ const char *label,
+ unsigned long timeout,
+ u8 fw_cmd,
+ struct netlink_ext_ack *extack)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_control.opcode = IONIC_CMD_FW_CONTROL,
+ .fw_control.oper = fw_cmd,
+ };
+ unsigned long start_time;
+ unsigned long end_time;
+ int err;
+
+ start_time = jiffies;
+ end_time = start_time + (timeout * HZ);
+ do {
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ msleep(20);
+ } while (time_before(jiffies, end_time) && (err == -EAGAIN || err == -ETIMEDOUT));
+
+ if (err == -EAGAIN || err == -ETIMEDOUT) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait timed out");
+ dev_err(ionic->dev, "DEV_CMD firmware wait %s timed out\n", label);
+ } else if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait failed");
+ }
+
+ return err;
+}
+
+int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+ struct net_device *netdev = lif->netdev;
+ struct ionic *ionic = lif->ionic;
+ union ionic_dev_cmd_comp comp;
+ u32 buf_sz, copy_sz, offset;
+ const struct firmware *fw;
+ struct devlink *dl;
+ int next_interval;
+ int err = 0;
+ u8 fw_slot;
+
+ netdev_info(netdev, "Installing firmware %s\n", fw_name);
+
+ dl = priv_to_devlink(ionic);
+ devlink_flash_update_begin_notify(dl);
+ devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+
+ err = request_firmware(&fw, fw_name, ionic->dev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to find firmware file");
+ goto err_out;
+ }
+
+ buf_sz = sizeof(idev->dev_cmd_regs->data);
+
+ netdev_dbg(netdev,
+ "downloading firmware - size %d part_sz %d nparts %lu\n",
+ (int)fw->size, buf_sz, DIV_ROUND_UP(fw->size, buf_sz));
+
+ offset = 0;
+ next_interval = 0;
+ while (offset < fw->size) {
+ if (offset >= next_interval) {
+ devlink_flash_update_status_notify(dl, "Downloading", NULL,
+ offset, fw->size);
+ next_interval = offset + (fw->size / IONIC_FW_INTERVAL_FRACTION);
+ }
+
+ copy_sz = min_t(unsigned int, buf_sz, fw->size - offset);
+ mutex_lock(&ionic->dev_cmd_lock);
+ memcpy_toio(&idev->dev_cmd_regs->data, fw->data + offset, copy_sz);
+ ionic_dev_cmd_firmware_download(idev,
+ offsetof(union ionic_dev_cmd_regs, data),
+ offset, copy_sz);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err) {
+ netdev_err(netdev,
+ "download failed offset 0x%x addr 0x%lx len 0x%x\n",
+ offset, offsetof(union ionic_dev_cmd_regs, data),
+ copy_sz);
+ NL_SET_ERR_MSG_MOD(extack, "Segment download failed");
+ goto err_out;
+ }
+ offset += copy_sz;
+ }
+ devlink_flash_update_status_notify(dl, "Downloading", NULL,
+ fw->size, fw->size);
+
+ devlink_flash_update_timeout_notify(dl, "Installing", NULL,
+ IONIC_FW_INSTALL_TIMEOUT);
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_firmware_install(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
+ fw_slot = comp.fw_control.slot;
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware install");
+ goto err_out;
+ }
+
+ err = ionic_fw_status_long_wait(ionic, "Installing",
+ IONIC_FW_INSTALL_TIMEOUT,
+ IONIC_FW_INSTALL_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ devlink_flash_update_timeout_notify(dl, "Selecting", NULL,
+ IONIC_FW_SELECT_TIMEOUT);
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_firmware_activate(idev, fw_slot);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware select");
+ goto err_out;
+ }
+
+ err = ionic_fw_status_long_wait(ionic, "Selecting",
+ IONIC_FW_SELECT_TIMEOUT,
+ IONIC_FW_ACTIVATE_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ netdev_info(netdev, "Firmware update completed\n");
+
+err_out:
+ if (err)
+ devlink_flash_update_status_notify(dl, "Flash failed", NULL, 0, 0);
+ else
+ devlink_flash_update_status_notify(dl, "Flash done", NULL, 0, 0);
+ release_firmware(fw);
+ devlink_flash_update_end_notify(dl);
+ return err;
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index acc94b244cf3..31ccfcdc2b0a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -63,8 +63,10 @@ enum ionic_cmd_opcode {
IONIC_CMD_QOS_RESET = 245,
/* Firmware commands */
- IONIC_CMD_FW_DOWNLOAD = 254,
- IONIC_CMD_FW_CONTROL = 255,
+ IONIC_CMD_FW_DOWNLOAD = 252,
+ IONIC_CMD_FW_CONTROL = 253,
+ IONIC_CMD_FW_DOWNLOAD_V1 = 254,
+ IONIC_CMD_FW_CONTROL_V1 = 255,
};
/**
@@ -94,6 +96,7 @@ enum ionic_status_code {
IONIC_RC_ERROR = 29, /* Generic error */
IONIC_RC_ERDMA = 30, /* Generic RDMA error */
IONIC_RC_EVFID = 31, /* VF ID does not exist */
+ IONIC_RC_EBAD_FW = 32, /* FW file is invalid or corrupted */
};
enum ionic_notifyq_opcode {
@@ -2069,14 +2072,23 @@ typedef struct ionic_admin_comp ionic_fw_download_comp;
/**
* enum ionic_fw_control_oper - FW control operations
- * @IONIC_FW_RESET: Reset firmware
- * @IONIC_FW_INSTALL: Install firmware
- * @IONIC_FW_ACTIVATE: Activate firmware
+ * @IONIC_FW_RESET: Reset firmware
+ * @IONIC_FW_INSTALL: Install firmware
+ * @IONIC_FW_ACTIVATE: Activate firmware
+ * @IONIC_FW_INSTALL_ASYNC: Install firmware asynchronously
+ * @IONIC_FW_INSTALL_STATUS: Firmware installation status
+ * @IONIC_FW_ACTIVATE_ASYNC: Activate firmware asynchronously
+ * @IONIC_FW_ACTIVATE_STATUS: Firmware activate status
*/
enum ionic_fw_control_oper {
- IONIC_FW_RESET = 0,
- IONIC_FW_INSTALL = 1,
- IONIC_FW_ACTIVATE = 2,
+ IONIC_FW_RESET = 0,
+ IONIC_FW_INSTALL = 1,
+ IONIC_FW_ACTIVATE = 2,
+ IONIC_FW_INSTALL_ASYNC = 3,
+ IONIC_FW_INSTALL_STATUS = 4,
+ IONIC_FW_ACTIVATE_ASYNC = 5,
+ IONIC_FW_ACTIVATE_STATUS = 6,
+ IONIC_FW_UPDATE_CLEANUP = 7,
};
/**
@@ -2689,6 +2701,9 @@ union ionic_dev_cmd {
struct ionic_q_identify_cmd q_identify;
struct ionic_q_init_cmd q_init;
struct ionic_q_control_cmd q_control;
+
+ struct ionic_fw_download_cmd fw_download;
+ struct ionic_fw_control_cmd fw_control;
};
union ionic_dev_cmd_comp {
@@ -2722,6 +2737,9 @@ union ionic_dev_cmd_comp {
struct ionic_q_identify_comp q_identify;
struct ionic_q_init_comp q_init;
+
+ ionic_fw_download_comp fw_download;
+ struct ionic_fw_control_comp fw_control;
};
/**
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 26988ad7ec97..a12df3946a07 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -36,25 +36,44 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
+static void ionic_txrx_deinit(struct ionic_lif *lif);
+static int ionic_txrx_init(struct ionic_lif *lif);
static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
+static void ionic_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct dim_cq_moder cur_moder;
+ struct ionic_qcq *qcq;
+ u32 new_coal;
+
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ qcq = container_of(dim, struct ionic_qcq, dim);
+ new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
+ qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
+ dim->state = DIM_START_MEASURE;
+}
+
static void ionic_lif_deferred_work(struct work_struct *work)
{
struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
struct ionic_deferred *def = &lif->deferred;
struct ionic_deferred_work *w = NULL;
- spin_lock_bh(&def->lock);
- if (!list_empty(&def->list)) {
- w = list_first_entry(&def->list,
- struct ionic_deferred_work, list);
- list_del(&w->list);
- }
- spin_unlock_bh(&def->lock);
+ do {
+ spin_lock_bh(&def->lock);
+ if (!list_empty(&def->list)) {
+ w = list_first_entry(&def->list,
+ struct ionic_deferred_work, list);
+ list_del(&w->list);
+ }
+ spin_unlock_bh(&def->lock);
+
+ if (!w)
+ break;
- if (w) {
switch (w->type) {
case IONIC_DW_TYPE_RX_MODE:
ionic_lif_rx_mode(lif, w->rx_mode);
@@ -78,8 +97,8 @@ static void ionic_lif_deferred_work(struct work_struct *work)
break;
}
kfree(w);
- schedule_work(&def->work);
- }
+ w = NULL;
+ } while (true);
}
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
@@ -135,7 +154,7 @@ static void ionic_link_status_check(struct ionic_lif *lif)
clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
}
-void ionic_link_status_check_request(struct ionic_lif *lif)
+void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
{
struct ionic_deferred_work *work;
@@ -143,10 +162,12 @@ void ionic_link_status_check_request(struct ionic_lif *lif)
if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
return;
- if (in_interrupt()) {
+ if (!can_sleep) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
+ if (!work) {
+ clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
return;
+ }
work->type = IONIC_DW_TYPE_LINK_STATUS;
ionic_lif_deferred_enqueue(&lif->deferred, work);
@@ -243,31 +264,30 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
-static int ionic_qcq_disable(struct ionic_qcq *qcq)
+static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
{
- struct ionic_queue *q = &qcq->q;
- struct ionic_lif *lif = q->lif;
- struct ionic_dev *idev;
- struct device *dev;
+ struct ionic_queue *q;
+ struct ionic_lif *lif;
+ int err = 0;
struct ionic_admin_ctx ctx = {
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
.cmd.q_control = {
.opcode = IONIC_CMD_Q_CONTROL,
- .lif_index = cpu_to_le16(lif->index),
- .type = q->type,
- .index = cpu_to_le32(q->index),
.oper = IONIC_Q_DISABLE,
},
};
- idev = &lif->ionic->idev;
- dev = lif->ionic->dev;
+ if (!qcq)
+ return -ENXIO;
- dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
- ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ q = &qcq->q;
+ lif = q->lif;
if (qcq->flags & IONIC_QCQ_F_INTR) {
+ struct ionic_dev *idev = &lif->ionic->idev;
+
+ cancel_work_sync(&qcq->dim.work);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
synchronize_irq(qcq->intr.vector);
@@ -275,7 +295,17 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq)
napi_disable(&qcq->napi);
}
- return ionic_adminq_post_wait(lif, &ctx);
+ if (send_to_hw) {
+ ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
+ ctx.cmd.q_control.type = q->type;
+ ctx.cmd.q_control.index = cpu_to_le32(q->index);
+ dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
+ ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ }
+
+ return err;
}
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -297,6 +327,18 @@ static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->flags &= ~IONIC_QCQ_F_INITED;
}
+static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
+ return;
+
+ irq_set_affinity_hint(qcq->intr.vector, NULL);
+ devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
+ qcq->intr.vector = 0;
+ ionic_intr_free(lif->ionic, qcq->intr.index);
+ qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
+}
+
static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct device *dev = lif->ionic->dev;
@@ -306,51 +348,62 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
ionic_debugfs_del_qcq(qcq);
- dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
- qcq->base = NULL;
- qcq->base_pa = 0;
+ if (qcq->q_base) {
+ dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
+ qcq->q_base = NULL;
+ qcq->q_base_pa = 0;
+ }
- if (qcq->flags & IONIC_QCQ_F_INTR) {
- irq_set_affinity_hint(qcq->intr.vector, NULL);
- devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
- qcq->intr.vector = 0;
- ionic_intr_free(lif->ionic, qcq->intr.index);
+ if (qcq->cq_base) {
+ dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
+ qcq->cq_base = NULL;
+ qcq->cq_base_pa = 0;
+ }
+
+ if (qcq->sg_base) {
+ dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
+ qcq->sg_base = NULL;
+ qcq->sg_base_pa = 0;
}
- devm_kfree(dev, qcq->cq.info);
- qcq->cq.info = NULL;
- devm_kfree(dev, qcq->q.info);
- qcq->q.info = NULL;
- devm_kfree(dev, qcq);
+ ionic_qcq_intr_free(lif, qcq);
+
+ if (qcq->cq.info) {
+ devm_kfree(dev, qcq->cq.info);
+ qcq->cq.info = NULL;
+ }
+ if (qcq->q.info) {
+ devm_kfree(dev, qcq->q.info);
+ qcq->q.info = NULL;
+ }
}
static void ionic_qcqs_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
- unsigned int i;
if (lif->notifyqcq) {
ionic_qcq_free(lif, lif->notifyqcq);
+ devm_kfree(dev, lif->notifyqcq);
lif->notifyqcq = NULL;
}
if (lif->adminqcq) {
ionic_qcq_free(lif, lif->adminqcq);
+ devm_kfree(dev, lif->adminqcq);
lif->adminqcq = NULL;
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++)
- if (lif->rxqcqs[i].stats)
- devm_kfree(dev, lif->rxqcqs[i].stats);
+ devm_kfree(dev, lif->rxqstats);
+ lif->rxqstats = NULL;
devm_kfree(dev, lif->rxqcqs);
lif->rxqcqs = NULL;
}
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++)
- if (lif->txqcqs[i].stats)
- devm_kfree(dev, lif->txqcqs[i].stats);
+ devm_kfree(dev, lif->txqstats);
+ lif->txqstats = NULL;
devm_kfree(dev, lif->txqcqs);
lif->txqcqs = NULL;
}
@@ -368,6 +421,53 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
n_qcq->intr.index = src_qcq->intr.index;
}
+static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ int err;
+
+ if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
+ qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
+ return 0;
+ }
+
+ err = ionic_intr_alloc(lif, &qcq->intr);
+ if (err) {
+ netdev_warn(lif->netdev, "no intr for %s: %d\n",
+ qcq->q.name, err);
+ goto err_out;
+ }
+
+ err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
+ if (err < 0) {
+ netdev_warn(lif->netdev, "no vector for %s: %d\n",
+ qcq->q.name, err);
+ goto err_out_free_intr;
+ }
+ qcq->intr.vector = err;
+ ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
+ IONIC_INTR_MASK_SET);
+
+ err = ionic_request_irq(lif, qcq);
+ if (err) {
+ netdev_warn(lif->netdev, "irq request failed %d\n", err);
+ goto err_out_free_intr;
+ }
+
+ /* try to get the irq on the local numa node first */
+ qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
+ dev_to_node(lif->ionic->dev));
+ if (qcq->intr.cpu != -1)
+ cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
+
+ netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
+ return 0;
+
+err_out_free_intr:
+ ionic_intr_free(lif->ionic, qcq->intr.index);
+err_out:
+ return err;
+}
+
static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int index,
const char *name, unsigned int flags,
@@ -377,7 +477,6 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int pid, struct ionic_qcq **qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
- u32 q_size, cq_size, sg_size, total_size;
struct device *dev = lif->ionic->dev;
void *q_base, *cq_base, *sg_base;
dma_addr_t cq_base_pa = 0;
@@ -388,21 +487,6 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
*qcq = NULL;
- q_size = num_descs * desc_size;
- cq_size = num_descs * cq_desc_size;
- sg_size = num_descs * sg_desc_size;
-
- total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
- /* Note: aligning q_size/cq_size is not enough due to cq_base
- * address aligning as q_base could be not aligned to the page.
- * Adding PAGE_SIZE.
- */
- total_size += PAGE_SIZE;
- if (flags & IONIC_QCQ_F_SG) {
- total_size += ALIGN(sg_size, PAGE_SIZE);
- total_size += PAGE_SIZE;
- }
-
new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
if (!new) {
netdev_err(lif->netdev, "Cannot allocate queue structure\n");
@@ -417,7 +501,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n");
err = -ENOMEM;
- goto err_out;
+ goto err_out_free_qcq;
}
new->q.type = type;
@@ -426,41 +510,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
desc_size, sg_desc_size, pid);
if (err) {
netdev_err(lif->netdev, "Cannot initialize queue\n");
- goto err_out;
+ goto err_out_free_q_info;
}
- if (flags & IONIC_QCQ_F_INTR) {
- err = ionic_intr_alloc(lif, &new->intr);
- if (err) {
- netdev_warn(lif->netdev, "no intr for %s: %d\n",
- name, err);
- goto err_out;
- }
-
- err = ionic_bus_get_irq(lif->ionic, new->intr.index);
- if (err < 0) {
- netdev_warn(lif->netdev, "no vector for %s: %d\n",
- name, err);
- goto err_out_free_intr;
- }
- new->intr.vector = err;
- ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
- IONIC_INTR_MASK_SET);
-
- err = ionic_request_irq(lif, new);
- if (err) {
- netdev_warn(lif->netdev, "irq request failed %d\n", err);
- goto err_out_free_intr;
- }
-
- new->intr.cpu = cpumask_local_spread(new->intr.index,
- dev_to_node(dev));
- if (new->intr.cpu != -1)
- cpumask_set_cpu(new->intr.cpu,
- &new->intr.affinity_mask);
- } else {
- new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
- }
+ err = ionic_alloc_qcq_interrupt(lif, new);
+ if (err)
+ goto err_out;
new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
GFP_KERNEL);
@@ -473,46 +528,95 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
netdev_err(lif->netdev, "Cannot initialize completion queue\n");
- goto err_out_free_irq;
+ goto err_out_free_cq_info;
}
- new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
- GFP_KERNEL);
- if (!new->base) {
- netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
- err = -ENOMEM;
- goto err_out_free_irq;
- }
-
- new->total_size = total_size;
+ if (flags & IONIC_QCQ_F_NOTIFYQ) {
+ int q_size, cq_size;
- q_base = new->base;
- q_base_pa = new->base_pa;
+ /* q & cq need to be contiguous in case of notifyq */
+ q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
+ cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
- cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
- cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
+ new->q_size = PAGE_SIZE + q_size + cq_size;
+ new->q_base = dma_alloc_coherent(dev, new->q_size,
+ &new->q_base_pa, GFP_KERNEL);
+ if (!new->q_base) {
+ netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
+ err = -ENOMEM;
+ goto err_out_free_cq_info;
+ }
+ q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
+ q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
+ ionic_q_map(&new->q, q_base, q_base_pa);
+
+ cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
+ cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
+ ionic_cq_map(&new->cq, cq_base, cq_base_pa);
+ ionic_cq_bind(&new->cq, &new->q);
+ } else {
+ new->q_size = PAGE_SIZE + (num_descs * desc_size);
+ new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
+ GFP_KERNEL);
+ if (!new->q_base) {
+ netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
+ err = -ENOMEM;
+ goto err_out_free_cq_info;
+ }
+ q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
+ q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
+ ionic_q_map(&new->q, q_base, q_base_pa);
+
+ new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
+ new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
+ GFP_KERNEL);
+ if (!new->cq_base) {
+ netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
+ err = -ENOMEM;
+ goto err_out_free_q;
+ }
+ cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
+ cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
+ ionic_cq_map(&new->cq, cq_base, cq_base_pa);
+ ionic_cq_bind(&new->cq, &new->q);
+ }
if (flags & IONIC_QCQ_F_SG) {
- sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
- PAGE_SIZE);
- sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
+ new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
+ new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
+ GFP_KERNEL);
+ if (!new->sg_base) {
+ netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
+ err = -ENOMEM;
+ goto err_out_free_cq;
+ }
+ sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
+ sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
}
- ionic_q_map(&new->q, q_base, q_base_pa);
- ionic_cq_map(&new->cq, cq_base, cq_base_pa);
- ionic_cq_bind(&new->cq, &new->q);
+ INIT_WORK(&new->dim.work, ionic_dim_work);
+ new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
*qcq = new;
return 0;
+err_out_free_cq:
+ dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
+err_out_free_q:
+ dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
+err_out_free_cq_info:
+ devm_kfree(dev, new->cq.info);
err_out_free_irq:
- if (flags & IONIC_QCQ_F_INTR)
+ if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
-err_out_free_intr:
- if (flags & IONIC_QCQ_F_INTR)
ionic_intr_free(lif->ionic, new->intr.index);
+ }
+err_out_free_q_info:
+ devm_kfree(dev, new->q.info);
+err_out_free_qcq:
+ devm_kfree(dev, new);
err_out:
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
return err;
@@ -521,10 +625,8 @@ err_out:
static int ionic_qcqs_alloc(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
- unsigned int q_list_size;
unsigned int flags;
int err;
- int i;
flags = IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
@@ -544,63 +646,50 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(union ionic_notifyq_comp),
0, lif->kern_pid, &lif->notifyqcq);
if (err)
- goto err_out_free_adminqcq;
+ goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
/* Let the notifyq ride on the adminq interrupt */
ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
}
- q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
err = -ENOMEM;
- lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
+ lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!lif->txqcqs)
- goto err_out_free_notifyqcq;
- for (i = 0; i < lif->nxqs; i++) {
- lif->txqcqs[i].stats = devm_kzalloc(dev,
- sizeof(struct ionic_q_stats),
- GFP_KERNEL);
- if (!lif->txqcqs[i].stats)
- goto err_out_free_tx_stats;
- }
-
- lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
+ goto err_out;
+ lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!lif->rxqcqs)
- goto err_out_free_tx_stats;
- for (i = 0; i < lif->nxqs; i++) {
- lif->rxqcqs[i].stats = devm_kzalloc(dev,
- sizeof(struct ionic_q_stats),
- GFP_KERNEL);
- if (!lif->rxqcqs[i].stats)
- goto err_out_free_rx_stats;
- }
+ goto err_out;
- return 0;
+ lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
+ sizeof(struct ionic_tx_stats), GFP_KERNEL);
+ if (!lif->txqstats)
+ goto err_out;
+ lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
+ sizeof(struct ionic_rx_stats), GFP_KERNEL);
+ if (!lif->rxqstats)
+ goto err_out;
-err_out_free_rx_stats:
- for (i = 0; i < lif->nxqs; i++)
- if (lif->rxqcqs[i].stats)
- devm_kfree(dev, lif->rxqcqs[i].stats);
- devm_kfree(dev, lif->rxqcqs);
- lif->rxqcqs = NULL;
-err_out_free_tx_stats:
- for (i = 0; i < lif->nxqs; i++)
- if (lif->txqcqs[i].stats)
- devm_kfree(dev, lif->txqcqs[i].stats);
- devm_kfree(dev, lif->txqcqs);
- lif->txqcqs = NULL;
-err_out_free_notifyqcq:
- if (lif->notifyqcq) {
- ionic_qcq_free(lif, lif->notifyqcq);
- lif->notifyqcq = NULL;
- }
-err_out_free_adminqcq:
- ionic_qcq_free(lif, lif->adminqcq);
- lif->adminqcq = NULL;
+ return 0;
+err_out:
+ ionic_qcqs_free(lif);
return err;
}
+static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
+{
+ qcq->q.tail_idx = 0;
+ qcq->q.head_idx = 0;
+ qcq->cq.tail_idx = 0;
+ qcq->cq.done_color = 1;
+ memset(qcq->q_base, 0, qcq->q_size);
+ memset(qcq->cq_base, 0, qcq->cq_size);
+ memset(qcq->sg_base, 0, qcq->sg_size);
+}
+
static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct device *dev = lif->ionic->dev;
@@ -626,10 +715,10 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
unsigned int intr_index;
int err;
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ if (qcq->flags & IONIC_QCQ_F_INTR)
intr_index = qcq->intr.index;
else
- intr_index = lif->rxqcqs[q->index].qcq->intr.index;
+ intr_index = lif->rxqcqs[q->index]->intr.index;
ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
@@ -640,9 +729,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
- q->tail = q->info;
- q->head = q->tail;
- cq->tail = cq->info;
+ ionic_qcq_sanitize(qcq);
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
@@ -697,9 +784,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
- q->tail = q->info;
- q->head = q->tail;
- cq->tail = cq->info;
+ ionic_qcq_sanitize(qcq);
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
@@ -751,7 +836,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
switch (le16_to_cpu(comp->event.ecode)) {
case IONIC_EVENT_LINK_CHANGE:
- ionic_link_status_check_request(lif);
+ ionic_link_status_check_request(lif, false);
break;
case IONIC_EVENT_RESET:
work = kzalloc(sizeof(*work), GFP_ATOMIC);
@@ -771,21 +856,6 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
return true;
}
-static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
-{
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_cq *cq = &lif->notifyqcq->cq;
- u32 work_done;
-
- work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
- NULL, NULL);
- if (work_done)
- ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
- work_done, IONIC_INTR_CRED_RESET_COALESCE);
-
- return work_done;
-}
-
static bool ionic_adminq_service(struct ionic_cq *cq,
struct ionic_cq_info *cq_info)
{
@@ -801,15 +871,36 @@ static bool ionic_adminq_service(struct ionic_cq *cq,
static int ionic_adminq_napi(struct napi_struct *napi, int budget)
{
+ struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
struct ionic_lif *lif = napi_to_cq(napi)->lif;
+ struct ionic_dev *idev = &lif->ionic->idev;
+ unsigned int flags = 0;
int n_work = 0;
int a_work = 0;
+ int work_done;
- if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
- n_work = ionic_notifyq_clean(lif, budget);
- a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
+ if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
+ n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
+ ionic_notifyq_service, NULL, NULL);
+
+ if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
+ a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
+ ionic_adminq_service, NULL, NULL);
+
+ work_done = max(n_work, a_work);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ flags |= IONIC_INTR_CRED_UNMASK;
+ lif->adminqcq->cq.bound_intr->rearm_count++;
+ }
+
+ if (work_done || flags) {
+ flags |= IONIC_INTR_CRED_RESET_COALESCE;
+ ionic_intr_credits(idev->intr_ctrl,
+ intr->index,
+ n_work + a_work, flags);
+ }
- return max(n_work, a_work);
+ return work_done;
}
void ionic_get_stats64(struct net_device *netdev,
@@ -928,9 +1019,9 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
return 0;
}
-static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
+static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
+ bool can_sleep)
{
- struct ionic *ionic = lif->ionic;
struct ionic_deferred_work *work;
unsigned int nmfilters;
unsigned int nufilters;
@@ -940,8 +1031,8 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
* here before checking the need for deferral so that we
* can return an overflow error to the stack.
*/
- nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
- nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
+ nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
+ nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
lif->nmcast++;
@@ -957,7 +1048,7 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
lif->nucast--;
}
- if (in_interrupt()) {
+ if (!can_sleep) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
netdev_err(lif->netdev, "%s OOM\n", __func__);
@@ -983,12 +1074,22 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, true);
+ return ionic_lif_addr(netdev_priv(netdev), addr, true, true);
+}
+
+static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
+{
+ return ionic_lif_addr(netdev_priv(netdev), addr, true, false);
}
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, false);
+ return ionic_lif_addr(netdev_priv(netdev), addr, false, true);
+}
+
+static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
+{
+ return ionic_lif_addr(netdev_priv(netdev), addr, false, false);
}
static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
@@ -1028,11 +1129,12 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
lif->rx_mode = rx_mode;
}
-static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
+static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode,
+ bool from_ndo)
{
struct ionic_deferred_work *work;
- if (in_interrupt()) {
+ if (from_ndo) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
netdev_err(lif->netdev, "%s OOM\n", __func__);
@@ -1047,15 +1149,21 @@ static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
}
}
-static void ionic_set_rx_mode(struct net_device *netdev)
+static void ionic_dev_uc_sync(struct net_device *netdev, bool from_ndo)
+{
+ if (from_ndo)
+ __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+ else
+ __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
+
+}
+
+static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_identity *ident;
unsigned int nfilters;
unsigned int rx_mode;
- ident = &lif->ionic->ident;
-
rx_mode = IONIC_RX_MODE_F_UNICAST;
rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
@@ -1069,8 +1177,8 @@ static void ionic_set_rx_mode(struct net_device *netdev)
* we remove our overflow flag and check the netdev flags
* to see if we can disable NIC PROMISC
*/
- __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
- nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
+ ionic_dev_uc_sync(netdev, from_ndo);
+ nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
if (netdev_uc_count(netdev) + 1 > nfilters) {
rx_mode |= IONIC_RX_MODE_F_PROMISC;
lif->uc_overflow = true;
@@ -1081,8 +1189,8 @@ static void ionic_set_rx_mode(struct net_device *netdev)
}
/* same for multicast */
- __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
- nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
+ ionic_dev_uc_sync(netdev, from_ndo);
+ nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
if (netdev_mc_count(netdev) > nfilters) {
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
lif->mc_overflow = true;
@@ -1093,7 +1201,12 @@ static void ionic_set_rx_mode(struct net_device *netdev)
}
if (lif->rx_mode != rx_mode)
- _ionic_lif_rx_mode(lif, rx_mode);
+ _ionic_lif_rx_mode(lif, rx_mode, from_ndo);
+}
+
+static void ionic_ndo_set_rx_mode(struct net_device *netdev)
+{
+ ionic_set_rx_mode(netdev, true);
}
static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
@@ -1315,6 +1428,35 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
return ionic_addr_add(netdev, mac);
}
+static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
+{
+ /* Stop and clean the queues before reconfiguration */
+ mutex_lock(&lif->queue_lock);
+ netif_device_detach(lif->netdev);
+ ionic_stop_queues(lif);
+ ionic_txrx_deinit(lif);
+}
+
+static int ionic_start_queues_reconfig(struct ionic_lif *lif)
+{
+ int err;
+
+ /* Re-init the queues after reconfiguration */
+
+ /* The only way txrx_init can fail here is if communication
+ * with FW is suddenly broken. There's not much we can do
+ * at this point - error messages have already been printed,
+ * so we can continue on and the user can eventually do a
+ * DOWN and UP to try to reset and clear the issue.
+ */
+ err = ionic_txrx_init(lif);
+ mutex_unlock(&lif->queue_lock);
+ ionic_link_status_check_request(lif, true);
+ netif_device_attach(lif->netdev);
+
+ return err;
+}
+
static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1334,9 +1476,12 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
return err;
netdev->mtu = new_mtu;
- err = ionic_reset_queues(lif, NULL, NULL);
+ /* if we're not running, nothing more to do */
+ if (!netif_running(netdev))
+ return 0;
- return err;
+ ionic_stop_queues_reconfig(lif);
+ return ionic_start_queues_reconfig(lif);
}
static void ionic_tx_timeout_work(struct work_struct *ws)
@@ -1345,9 +1490,14 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
netdev_info(lif->netdev, "Tx Timeout recovery\n");
- rtnl_lock();
- ionic_reset_queues(lif, NULL, NULL);
- rtnl_unlock();
+ /* if we were stopped before this scheduled job was launched,
+ * don't bother the queues as they are already stopped.
+ */
+ if (!netif_running(lif->netdev))
+ return;
+
+ ionic_stop_queues_reconfig(lif);
+ ionic_start_queues_reconfig(lif);
}
static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
@@ -1478,22 +1628,16 @@ static void ionic_lif_rss_deinit(struct ionic_lif *lif)
static void ionic_txrx_disable(struct ionic_lif *lif)
{
unsigned int i;
- int err;
+ int err = 0;
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- err = ionic_qcq_disable(lif->txqcqs[i].qcq);
- if (err == -ETIMEDOUT)
- break;
- }
+ for (i = 0; i < lif->nxqs; i++)
+ err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
- if (err == -ETIMEDOUT)
- break;
- }
+ for (i = 0; i < lif->nxqs; i++)
+ err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
}
}
@@ -1502,18 +1646,17 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
unsigned int i;
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
- ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
- ionic_tx_empty(&lif->txqcqs[i].qcq->q);
+ for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
+ ionic_tx_flush(&lif->txqcqs[i]->cq);
+ ionic_tx_empty(&lif->txqcqs[i]->q);
}
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
- ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
- ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
+ for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
+ ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
+ ionic_rx_empty(&lif->rxqcqs[i]->q);
}
}
lif->rx_mode = 0;
@@ -1524,16 +1667,18 @@ static void ionic_txrx_free(struct ionic_lif *lif)
unsigned int i;
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_qcq_free(lif, lif->txqcqs[i].qcq);
- lif->txqcqs[i].qcq = NULL;
+ for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
+ ionic_qcq_free(lif, lif->txqcqs[i]);
+ devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
+ lif->txqcqs[i] = NULL;
}
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
- lif->rxqcqs[i].qcq = NULL;
+ for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
+ ionic_qcq_free(lif, lif->rxqcqs[i]);
+ devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
+ lif->rxqcqs[i] = NULL;
}
}
}
@@ -1561,17 +1706,19 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sizeof(struct ionic_txq_desc),
sizeof(struct ionic_txq_comp),
sg_desc_sz,
- lif->kern_pid, &lif->txqcqs[i].qcq);
+ lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
- if (flags & IONIC_QCQ_F_INTR)
+ if (flags & IONIC_QCQ_F_INTR) {
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->txqcqs[i].qcq->intr.index,
+ lif->txqcqs[i]->intr.index,
lif->tx_coalesce_hw);
+ if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
+ lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
+ }
- lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
- ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
+ ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
}
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
@@ -1581,20 +1728,21 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sizeof(struct ionic_rxq_desc),
sizeof(struct ionic_rxq_comp),
sizeof(struct ionic_rxq_sg_desc),
- lif->kern_pid, &lif->rxqcqs[i].qcq);
+ lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->rxqcqs[i].qcq->intr.index,
+ lif->rxqcqs[i]->intr.index,
lif->rx_coalesce_hw);
+ if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
+ lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
- ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
- lif->txqcqs[i].qcq);
+ ionic_link_qcq_interrupts(lif->rxqcqs[i],
+ lif->txqcqs[i]);
- lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
- ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
+ ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
}
return 0;
@@ -1611,13 +1759,13 @@ static int ionic_txrx_init(struct ionic_lif *lif)
int err;
for (i = 0; i < lif->nxqs; i++) {
- err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
+ err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
if (err)
goto err_out;
- err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
+ err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
if (err) {
- ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
goto err_out;
}
}
@@ -1625,14 +1773,14 @@ static int ionic_txrx_init(struct ionic_lif *lif)
if (lif->netdev->features & NETIF_F_RXHASH)
ionic_lif_rss_init(lif);
- ionic_set_rx_mode(lif->netdev);
+ ionic_set_rx_mode(lif->netdev, false);
return 0;
err_out:
while (i--) {
- ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
- ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
+ ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
}
return err;
@@ -1640,18 +1788,24 @@ err_out:
static int ionic_txrx_enable(struct ionic_lif *lif)
{
+ int derr = 0;
int i, err;
for (i = 0; i < lif->nxqs; i++) {
- ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
- err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
+ if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
+ dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
+ err = -ENXIO;
+ goto err_out;
+ }
+
+ ionic_rx_fill(&lif->rxqcqs[i]->q);
+ err = ionic_qcq_enable(lif->rxqcqs[i]);
if (err)
goto err_out;
- err = ionic_qcq_enable(lif->txqcqs[i].qcq);
+ err = ionic_qcq_enable(lif->txqcqs[i]);
if (err) {
- if (err != -ETIMEDOUT)
- ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
goto err_out;
}
}
@@ -1660,12 +1814,8 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err_out:
while (i--) {
- err = ionic_qcq_disable(lif->txqcqs[i].qcq);
- if (err == -ETIMEDOUT)
- break;
- err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
- if (err == -ETIMEDOUT)
- break;
+ derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
}
return err;
@@ -1688,7 +1838,7 @@ static int ionic_start_queues(struct ionic_lif *lif)
return 0;
}
-int ionic_open(struct net_device *netdev)
+static int ionic_open(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
int err;
@@ -1734,7 +1884,7 @@ static void ionic_stop_queues(struct ionic_lif *lif)
ionic_txrx_disable(lif);
}
-int ionic_stop(struct net_device *netdev)
+static int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1764,11 +1914,11 @@ static int ionic_get_vf_config(struct net_device *netdev,
ret = -EINVAL;
} else {
ivf->vf = vf;
- ivf->vlan = ionic->vfs[vf].vlanid;
+ ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
ivf->qos = 0;
ivf->spoofchk = ionic->vfs[vf].spoofchk;
ivf->linkstate = ionic->vfs[vf].linkstate;
- ivf->max_tx_rate = ionic->vfs[vf].maxrate;
+ ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
ivf->trusted = ionic->vfs[vf].trusted;
ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
}
@@ -1868,7 +2018,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
ret = ionic_set_vf_config(ionic, vf,
IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
if (!ret)
- ionic->vfs[vf].vlanid = vlan;
+ ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
}
up_write(&ionic->vf_op_lock);
@@ -1897,7 +2047,7 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
ret = ionic_set_vf_config(ionic, vf,
IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
if (!ret)
- lif->ionic->vfs[vf].maxrate = tx_max;
+ lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
}
up_write(&ionic->vf_op_lock);
@@ -1998,7 +2148,7 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_stop = ionic_stop,
.ndo_start_xmit = ionic_start_xmit,
.ndo_get_stats64 = ionic_get_stats64,
- .ndo_set_rx_mode = ionic_set_rx_mode,
+ .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
.ndo_set_features = ionic_set_features,
.ndo_set_mac_address = ionic_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
@@ -2016,35 +2166,227 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_get_vf_stats = ionic_get_vf_stats,
};
-int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
-{
- bool running;
- int err = 0;
+static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
+{
+ /* only swapping the queues, not the napi, flags, or other stuff */
+ swap(a->q.num_descs, b->q.num_descs);
+ swap(a->q.base, b->q.base);
+ swap(a->q.base_pa, b->q.base_pa);
+ swap(a->q.info, b->q.info);
+ swap(a->q_base, b->q_base);
+ swap(a->q_base_pa, b->q_base_pa);
+ swap(a->q_size, b->q_size);
+
+ swap(a->q.sg_base, b->q.sg_base);
+ swap(a->q.sg_base_pa, b->q.sg_base_pa);
+ swap(a->sg_base, b->sg_base);
+ swap(a->sg_base_pa, b->sg_base_pa);
+ swap(a->sg_size, b->sg_size);
+
+ swap(a->cq.num_descs, b->cq.num_descs);
+ swap(a->cq.base, b->cq.base);
+ swap(a->cq.base_pa, b->cq.base_pa);
+ swap(a->cq.info, b->cq.info);
+ swap(a->cq_base, b->cq_base);
+ swap(a->cq_base_pa, b->cq_base_pa);
+ swap(a->cq_size, b->cq_size);
+}
+
+int ionic_reconfigure_queues(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ struct ionic_qcq **tx_qcqs = NULL;
+ struct ionic_qcq **rx_qcqs = NULL;
+ unsigned int sg_desc_sz;
+ unsigned int flags;
+ int err = -ENOMEM;
+ unsigned int i;
- mutex_lock(&lif->queue_lock);
- running = netif_running(lif->netdev);
- if (running) {
- netif_device_detach(lif->netdev);
- err = ionic_stop(lif->netdev);
+ /* allocate temporary qcq arrays to hold new queue structs */
+ if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
+ tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
+ if (!tx_qcqs)
+ goto err_out;
+ }
+ if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) {
+ rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
+ if (!rx_qcqs)
+ goto err_out;
+ }
+
+ /* allocate new desc_info and rings, but leave the interrupt setup
+ * until later so as to not mess with the still-running queues
+ */
+ if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
+ lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
+ sizeof(struct ionic_txq_sg_desc_v1))
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
+ else
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
+
+ if (tx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
+ qparam->ntxq_descs,
+ sizeof(struct ionic_txq_desc),
+ sizeof(struct ionic_txq_comp),
+ sg_desc_sz,
+ lif->kern_pid, &tx_qcqs[i]);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ if (rx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
+ qparam->nrxq_descs,
+ sizeof(struct ionic_rxq_desc),
+ sizeof(struct ionic_rxq_comp),
+ sizeof(struct ionic_rxq_sg_desc),
+ lif->kern_pid, &rx_qcqs[i]);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ /* stop and clean the queues */
+ ionic_stop_queues_reconfig(lif);
+
+ if (qparam->nxqs != lif->nxqs) {
+ err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
if (err)
- goto reset_out;
+ goto err_out_reinit_unlock;
+ err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
+ if (err) {
+ netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
+ goto err_out_reinit_unlock;
+ }
}
- if (cb)
- cb(lif, arg);
+ /* swap new desc_info and rings, keeping existing interrupt config */
+ if (tx_qcqs) {
+ lif->ntxq_descs = qparam->ntxq_descs;
+ for (i = 0; i < qparam->nxqs; i++)
+ ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
+ }
- if (running) {
- err = ionic_open(lif->netdev);
- netif_device_attach(lif->netdev);
+ if (rx_qcqs) {
+ lif->nrxq_descs = qparam->nrxq_descs;
+ for (i = 0; i < qparam->nxqs; i++)
+ ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
}
-reset_out:
- mutex_unlock(&lif->queue_lock);
+ /* if we need to change the interrupt layout, this is the time */
+ if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
+ qparam->nxqs != lif->nxqs) {
+ if (qparam->intr_split) {
+ set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ } else {
+ clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ lif->tx_coalesce_hw = lif->rx_coalesce_hw;
+ }
+
+ /* clear existing interrupt assignments */
+ for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
+ ionic_qcq_intr_free(lif, lif->txqcqs[i]);
+ ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
+ }
+
+ /* re-assign the interrupts */
+ for (i = 0; i < qparam->nxqs; i++) {
+ lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
+ err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->rxqcqs[i]->intr.index,
+ lif->rx_coalesce_hw);
+
+ if (qparam->intr_split) {
+ lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
+ err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->txqcqs[i]->intr.index,
+ lif->tx_coalesce_hw);
+ if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
+ lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
+ } else {
+ lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
+ }
+ }
+ }
+
+ /* now we can rework the debugfs mappings */
+ if (tx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ ionic_debugfs_del_qcq(lif->txqcqs[i]);
+ ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
+ }
+ }
+
+ if (rx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ ionic_debugfs_del_qcq(lif->rxqcqs[i]);
+ ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
+ }
+ }
+
+ swap(lif->nxqs, qparam->nxqs);
+
+err_out_reinit_unlock:
+ /* re-init the queues, but don't loose an error code */
+ if (err)
+ ionic_start_queues_reconfig(lif);
+ else
+ err = ionic_start_queues_reconfig(lif);
+
+err_out:
+ /* free old allocs without cleaning intr */
+ for (i = 0; i < qparam->nxqs; i++) {
+ if (tx_qcqs && tx_qcqs[i]) {
+ tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, tx_qcqs[i]);
+ devm_kfree(lif->ionic->dev, tx_qcqs[i]);
+ tx_qcqs[i] = NULL;
+ }
+ if (rx_qcqs && rx_qcqs[i]) {
+ rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, rx_qcqs[i]);
+ devm_kfree(lif->ionic->dev, rx_qcqs[i]);
+ rx_qcqs[i] = NULL;
+ }
+ }
+
+ /* free q array */
+ if (rx_qcqs) {
+ devm_kfree(lif->ionic->dev, rx_qcqs);
+ rx_qcqs = NULL;
+ }
+ if (tx_qcqs) {
+ devm_kfree(lif->ionic->dev, tx_qcqs);
+ tx_qcqs = NULL;
+ }
+
+ /* clean the unused dma and info allocations when new set is smaller
+ * than the full array, but leave the qcq shells in place
+ */
+ for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
+ lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->txqcqs[i]);
+
+ lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->rxqcqs[i]);
+ }
return err;
}
-static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
+int ionic_lif_alloc(struct ionic *ionic)
{
struct device *dev = ionic->dev;
union ionic_lif_identity *lid;
@@ -2055,7 +2397,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lid = kzalloc(sizeof(*lid), GFP_KERNEL);
if (!lid)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
netdev = alloc_etherdev_mqs(sizeof(*lif),
ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
@@ -2069,7 +2411,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif = netdev_priv(netdev);
lif->netdev = netdev;
- ionic->master_lif = lif;
+ ionic->lif = lif;
netdev->netdev_ops = &ionic_netdev_ops;
ionic_ethtool_set_ops(netdev);
@@ -2078,8 +2420,14 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->identity = lid;
lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
- ionic_lif_identify(ionic, lif->lif_type, lif->identity);
- lif->netdev->min_mtu = le32_to_cpu(lif->identity->eth.min_frame_size);
+ err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
+ if (err) {
+ dev_err(ionic->dev, "Cannot identify type %d: %d\n",
+ lif->lif_type, err);
+ goto err_out_free_netdev;
+ }
+ lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
+ le32_to_cpu(lif->identity->eth.min_frame_size));
lif->netdev->max_mtu =
le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
@@ -2087,7 +2435,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->nxqs = ionic->ntxqs_per_lif;
lif->ionic = ionic;
- lif->index = index;
+ lif->index = 0;
lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
@@ -2098,8 +2446,10 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->rx_coalesce_usecs);
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
+ set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+ set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
- snprintf(lif->name, sizeof(lif->name), "lif%u", index);
+ snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
spin_lock_init(&lif->adminq_lock);
@@ -2119,7 +2469,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
ionic_debugfs_add_lif(lif);
- /* allocate queues */
+ /* allocate control queues and txrx queue arrays */
+ ionic_lif_queue_identify(lif);
err = ionic_qcqs_alloc(lif);
if (err)
goto err_out_free_lif_info;
@@ -2138,9 +2489,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
}
netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
- list_add_tail(&lif->list, &ionic->lifs);
-
- return lif;
+ return 0;
err_out_free_qcqs:
ionic_qcqs_free(lif);
@@ -2154,27 +2503,7 @@ err_out_free_netdev:
err_out_free_lid:
kfree(lid);
- return ERR_PTR(err);
-}
-
-int ionic_lifs_alloc(struct ionic *ionic)
-{
- struct ionic_lif *lif;
-
- INIT_LIST_HEAD(&ionic->lifs);
-
- /* only build the first lif, others are for later features */
- set_bit(0, ionic->lifbits);
-
- lif = ionic_lif_alloc(ionic, 0);
- if (IS_ERR_OR_NULL(lif)) {
- clear_bit(0, ionic->lifbits);
- return -ENOMEM;
- }
-
- ionic_lif_queue_identify(lif);
-
- return 0;
+ return err;
}
static void ionic_lif_reset(struct ionic_lif *lif)
@@ -2209,7 +2538,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
ionic_txrx_deinit(lif);
ionic_txrx_free(lif);
}
- ionic_lifs_deinit(ionic);
+ ionic_lif_deinit(lif);
ionic_reset(ionic);
ionic_qcqs_free(lif);
@@ -2227,12 +2556,20 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
dev_info(ionic->dev, "FW Up: restarting LIFs\n");
ionic_init_devinfo(ionic);
- ionic_port_init(ionic);
+ err = ionic_identify(ionic);
+ if (err)
+ goto err_out;
+ err = ionic_port_identify(ionic);
+ if (err)
+ goto err_out;
+ err = ionic_port_init(ionic);
+ if (err)
+ goto err_out;
err = ionic_qcqs_alloc(lif);
if (err)
goto err_out;
- err = ionic_lifs_init(ionic);
+ err = ionic_lif_init(lif);
if (err)
goto err_qcqs_free;
@@ -2252,7 +2589,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
}
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
- ionic_link_status_check_request(lif);
+ ionic_link_status_check_request(lif, true);
netif_device_attach(lif->netdev);
dev_info(ionic->dev, "FW Up: LIFs restarted\n");
@@ -2261,14 +2598,14 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
err_txrx_free:
ionic_txrx_free(lif);
err_lifs_deinit:
- ionic_lifs_deinit(ionic);
+ ionic_lif_deinit(lif);
err_qcqs_free:
ionic_qcqs_free(lif);
err_out:
dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
}
-static void ionic_lif_free(struct ionic_lif *lif)
+void ionic_lif_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
@@ -2297,23 +2634,10 @@ static void ionic_lif_free(struct ionic_lif *lif)
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
- list_del(&lif->list);
free_netdev(lif->netdev);
}
-void ionic_lifs_free(struct ionic *ionic)
-{
- struct list_head *cur, *tmp;
- struct ionic_lif *lif;
-
- list_for_each_safe(cur, tmp, &ionic->lifs) {
- lif = list_entry(cur, struct ionic_lif, list);
-
- ionic_lif_free(lif);
- }
-}
-
-static void ionic_lif_deinit(struct ionic_lif *lif)
+void ionic_lif_deinit(struct ionic_lif *lif)
{
if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
return;
@@ -2334,17 +2658,6 @@ static void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_reset(lif);
}
-void ionic_lifs_deinit(struct ionic *ionic)
-{
- struct list_head *cur, *tmp;
- struct ionic_lif *lif;
-
- list_for_each_safe(cur, tmp, &ionic->lifs) {
- lif = list_entry(cur, struct ionic_lif, list);
- ionic_lif_deinit(lif);
- }
-}
-
static int ionic_lif_adminq_init(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
@@ -2468,7 +2781,7 @@ static int ionic_station_set(struct ionic_lif *lif)
*/
if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
netdev->dev_addr))
- ionic_lif_addr(lif, netdev->dev_addr, true);
+ ionic_lif_addr(lif, netdev->dev_addr, true, true);
} else {
/* Update the netdev mac with the device's mac */
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
@@ -2485,12 +2798,12 @@ static int ionic_station_set(struct ionic_lif *lif)
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, true);
+ ionic_lif_addr(lif, netdev->dev_addr, true, true);
return 0;
}
-static int ionic_lif_init(struct ionic_lif *lif)
+int ionic_lif_init(struct ionic_lif *lif)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev;
@@ -2580,22 +2893,6 @@ err_out_free_dbid:
return err;
}
-int ionic_lifs_init(struct ionic *ionic)
-{
- struct list_head *cur, *tmp;
- struct ionic_lif *lif;
- int err;
-
- list_for_each_safe(cur, tmp, &ionic->lifs) {
- lif = list_entry(cur, struct ionic_lif, list);
- err = ionic_lif_init(lif);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static void ionic_lif_notify_work(struct work_struct *ws)
{
}
@@ -2644,57 +2941,53 @@ static int ionic_lif_notify(struct notifier_block *nb,
return NOTIFY_DONE;
}
-int ionic_lifs_register(struct ionic *ionic)
+int ionic_lif_register(struct ionic_lif *lif)
{
int err;
- INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
+ INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
- ionic->nb.notifier_call = ionic_lif_notify;
+ lif->ionic->nb.notifier_call = ionic_lif_notify;
- err = register_netdevice_notifier(&ionic->nb);
+ err = register_netdevice_notifier(&lif->ionic->nb);
if (err)
- ionic->nb.notifier_call = NULL;
+ lif->ionic->nb.notifier_call = NULL;
/* only register LIF0 for now */
- err = register_netdev(ionic->master_lif->netdev);
+ err = register_netdev(lif->netdev);
if (err) {
- dev_err(ionic->dev, "Cannot register net device, aborting\n");
+ dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
return err;
}
- ionic->master_lif->registered = true;
- ionic_lif_set_netdev_info(ionic->master_lif);
+ lif->registered = true;
+ ionic_lif_set_netdev_info(lif);
return 0;
}
-void ionic_lifs_unregister(struct ionic *ionic)
+void ionic_lif_unregister(struct ionic_lif *lif)
{
- if (ionic->nb.notifier_call) {
- unregister_netdevice_notifier(&ionic->nb);
- cancel_work_sync(&ionic->nb_work);
- ionic->nb.notifier_call = NULL;
+ if (lif->ionic->nb.notifier_call) {
+ unregister_netdevice_notifier(&lif->ionic->nb);
+ cancel_work_sync(&lif->ionic->nb_work);
+ lif->ionic->nb.notifier_call = NULL;
}
- /* There is only one lif ever registered in the
- * current model, so don't bother searching the
- * ionic->lif for candidates to unregister
- */
- if (ionic->master_lif &&
- ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
- unregister_netdev(ionic->master_lif->netdev);
+ if (lif->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(lif->netdev);
+ lif->registered = false;
}
static void ionic_lif_queue_identify(struct ionic_lif *lif)
{
+ union ionic_q_identity __iomem *q_ident;
struct ionic *ionic = lif->ionic;
- union ionic_q_identity *q_ident;
struct ionic_dev *idev;
int qtype;
int err;
idev = &lif->ionic->idev;
- q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
+ q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
@@ -2717,14 +3010,14 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
ionic_qtype_versions[qtype]);
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
if (!err) {
- qti->version = q_ident->version;
- qti->supported = q_ident->supported;
- qti->features = le64_to_cpu(q_ident->features);
- qti->desc_sz = le16_to_cpu(q_ident->desc_sz);
- qti->comp_sz = le16_to_cpu(q_ident->comp_sz);
- qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz);
- qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
- qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
+ qti->version = readb(&q_ident->version);
+ qti->supported = readb(&q_ident->supported);
+ qti->features = readq(&q_ident->features);
+ qti->desc_sz = readw(&q_ident->desc_sz);
+ qti->comp_sz = readw(&q_ident->comp_sz);
+ qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
+ qti->max_sg_elems = readw(&q_ident->max_sg_elems);
+ qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
}
mutex_unlock(&ionic->dev_cmd_lock);
@@ -2801,7 +3094,7 @@ int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
return 0;
}
-int ionic_lifs_size(struct ionic *ionic)
+int ionic_lif_size(struct ionic *ionic)
{
struct ionic_identity *ident = &ionic->ident;
unsigned int nintrs, dev_nintrs;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 1ee3b14c8d50..0224dfd24b8a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -4,6 +4,7 @@
#ifndef _IONIC_LIF_H_
#define _IONIC_LIF_H_
+#include <linux/dim.h>
#include <linux/pci.h>
#include "ionic_rx_filter.h"
@@ -16,32 +17,32 @@
#define IONIC_TX_BUDGET_DEFAULT 256
struct ionic_tx_stats {
- u64 dma_map_err;
u64 pkts;
u64 bytes;
- u64 clean;
- u64 linearize;
u64 csum_none;
u64 csum;
- u64 crc32_csum;
u64 tso;
u64 tso_bytes;
u64 frags;
u64 vlan_inserted;
+ u64 clean;
+ u64 linearize;
+ u64 crc32_csum;
u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
+ u64 dma_map_err;
};
struct ionic_rx_stats {
- u64 dma_map_err;
- u64 alloc_err;
u64 pkts;
u64 bytes;
u64 csum_none;
u64 csum_complete;
- u64 csum_error;
u64 buffers_posted;
u64 dropped;
u64 vlan_stripped;
+ u64 csum_error;
+ u64 dma_map_err;
+ u64 alloc_err;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -56,35 +57,29 @@ struct ionic_napi_stats {
u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR];
};
-struct ionic_q_stats {
- union {
- struct ionic_tx_stats tx;
- struct ionic_rx_stats rx;
- };
-};
-
struct ionic_qcq {
- void *base;
- dma_addr_t base_pa;
- unsigned int total_size;
+ void *q_base;
+ dma_addr_t q_base_pa;
+ u32 q_size;
+ void *cq_base;
+ dma_addr_t cq_base_pa;
+ u32 cq_size;
+ void *sg_base;
+ dma_addr_t sg_base_pa;
+ u32 sg_size;
+ struct dim dim;
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
struct napi_struct napi;
struct ionic_napi_stats napi_stats;
- struct ionic_q_stats *stats;
unsigned int flags;
struct dentry *dentry;
};
-struct ionic_qcqst {
- struct ionic_qcq *qcq;
- struct ionic_q_stats *stats;
-};
-
#define q_to_qcq(q) container_of(q, struct ionic_qcq, q)
-#define q_to_tx_stats(q) (&q_to_qcq(q)->stats->tx)
-#define q_to_rx_stats(q) (&q_to_qcq(q)->stats->rx)
+#define q_to_tx_stats(q) (&(q)->lif->txqstats[(q)->index])
+#define q_to_rx_stats(q) (&(q)->lif->rxqstats[(q)->index])
#define napi_to_qcq(napi) container_of(napi, struct ionic_qcq, napi)
#define napi_to_cq(napi) (&napi_to_qcq(napi)->cq)
@@ -138,6 +133,8 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FW_RESET,
IONIC_LIF_F_SPLIT_INTR,
+ IONIC_LIF_F_TX_DIM_INTR,
+ IONIC_LIF_F_RX_DIM_INTR,
/* leave this as last */
IONIC_LIF_F_STATE_SIZE
@@ -170,8 +167,10 @@ struct ionic_lif {
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
- struct ionic_qcqst *txqcqs;
- struct ionic_qcqst *rxqcqs;
+ struct ionic_qcq **txqcqs;
+ struct ionic_tx_stats *txqstats;
+ struct ionic_qcq **rxqcqs;
+ struct ionic_rx_stats *rxqstats;
u64 last_eid;
unsigned int neqs;
unsigned int nxqs;
@@ -212,12 +211,21 @@ struct ionic_lif {
struct work_struct tx_timeout_work;
};
-#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq)
-#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq)
-#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx)
-#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx)
-#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
-#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
+struct ionic_queue_params {
+ unsigned int nxqs;
+ unsigned int ntxq_descs;
+ unsigned int nrxq_descs;
+ unsigned int intr_split;
+};
+
+static inline void ionic_init_queue_params(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ qparam->nxqs = lif->nxqs;
+ qparam->ntxq_descs = lif->ntxq_descs;
+ qparam->nrxq_descs = lif->nrxq_descs;
+ qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
{
@@ -237,39 +245,38 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg);
-void ionic_link_status_check_request(struct ionic_lif *lif);
+void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
struct ionic_deferred_work *work);
-int ionic_lifs_alloc(struct ionic *ionic);
-void ionic_lifs_free(struct ionic *ionic);
-void ionic_lifs_deinit(struct ionic *ionic);
-int ionic_lifs_init(struct ionic *ionic);
-int ionic_lifs_register(struct ionic *ionic);
-void ionic_lifs_unregister(struct ionic *ionic);
+int ionic_lif_alloc(struct ionic *ionic);
+int ionic_lif_init(struct ionic_lif *lif);
+void ionic_lif_free(struct ionic_lif *lif);
+void ionic_lif_deinit(struct ionic_lif *lif);
+int ionic_lif_register(struct ionic_lif *lif);
+void ionic_lif_unregister(struct ionic_lif *lif);
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
union ionic_lif_identity *lif_ident);
-int ionic_lifs_size(struct ionic *ionic);
+int ionic_lif_size(struct ionic *ionic);
int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
const u8 *key, const u32 *indir);
+int ionic_reconfigure_queues(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam);
-int ionic_open(struct net_device *netdev);
-int ionic_stop(struct net_device *netdev);
-int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg);
-
-static inline void debug_stats_txq_post(struct ionic_qcq *qcq,
- struct ionic_txq_desc *desc, bool dbell)
+static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell)
{
- u8 num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
- & IONIC_TXQ_DESC_NSGE_MASK);
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
+ u8 num_sg_elems;
- qcq->q.dbell_count += dbell;
+ q->dbell_count += dbell;
+ num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
+ & IONIC_TXQ_DESC_NSGE_MASK);
if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1))
num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1;
- qcq->stats->tx.sg_cntr[num_sg_elems]++;
+ q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++;
}
static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
@@ -284,10 +291,8 @@ static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
}
#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
-#define DEBUG_STATS_RX_BUFF_CNT(qcq) ((qcq)->stats->rx.buffers_posted++)
-#define DEBUG_STATS_INTR_REARM(intr) ((intr)->rearm_count++)
-#define DEBUG_STATS_TXQ_POST(qcq, txdesc, dbell) \
- debug_stats_txq_post(qcq, txdesc, dbell)
+#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++)
+#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell)
#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
debug_stats_napi_poll(qcq, work_done)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index df5b9bcc3aba..d355676f6c16 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -64,6 +64,8 @@ static const char *ionic_error_to_str(enum ionic_status_code code)
return "IONIC_RC_ERROR";
case IONIC_RC_ERDMA:
return "IONIC_RC_ERDMA";
+ case IONIC_RC_EBAD_FW:
+ return "IONIC_RC_EBAD_FW";
default:
return "IONIC_RC_UNKNOWN";
}
@@ -170,6 +172,10 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
return "IONIC_CMD_FW_DOWNLOAD";
case IONIC_CMD_FW_CONTROL:
return "IONIC_CMD_FW_CONTROL";
+ case IONIC_CMD_FW_DOWNLOAD_V1:
+ return "IONIC_CMD_FW_DOWNLOAD_V1";
+ case IONIC_CMD_FW_CONTROL_V1:
+ return "IONIC_CMD_FW_CONTROL_V1";
case IONIC_CMD_VF_GETATTR:
return "IONIC_CMD_VF_GETATTR";
case IONIC_CMD_VF_SETATTR:
@@ -181,15 +187,17 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
static void ionic_adminq_flush(struct ionic_lif *lif)
{
- struct ionic_queue *adminq = &lif->adminqcq->q;
+ struct ionic_queue *q = &lif->adminqcq->q;
+ struct ionic_desc_info *desc_info;
spin_lock(&lif->adminq_lock);
- while (adminq->tail != adminq->head) {
- memset(adminq->tail->desc, 0, sizeof(union ionic_adminq_cmd));
- adminq->tail->cb = NULL;
- adminq->tail->cb_arg = NULL;
- adminq->tail = adminq->tail->next;
+ while (q->tail_idx != q->head_idx) {
+ desc_info = &q->info[q->tail_idx];
+ memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd));
+ desc_info->cb = NULL;
+ desc_info->cb_arg = NULL;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
}
spin_unlock(&lif->adminq_lock);
}
@@ -245,18 +253,17 @@ static void ionic_adminq_cb(struct ionic_queue *q,
static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
- struct ionic_queue *adminq;
+ struct ionic_desc_info *desc_info;
+ struct ionic_queue *q;
int err = 0;
- WARN_ON(in_interrupt());
-
if (!lif->adminqcq)
return -EIO;
- adminq = &lif->adminqcq->q;
+ q = &lif->adminqcq->q;
spin_lock(&lif->adminq_lock);
- if (!ionic_q_has_space(adminq, 1)) {
+ if (!ionic_q_has_space(q, 1)) {
err = -ENOSPC;
goto err_out;
}
@@ -265,13 +272,14 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (err)
goto err_out;
- memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd));
+ desc_info = &q->info[q->head_idx];
+ memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->cmd, sizeof(ctx->cmd), true);
- ionic_q_post(adminq, true, ionic_adminq_cb, ctx);
+ ionic_q_post(q, true, ionic_adminq_cb, ctx);
err_out:
spin_unlock(&lif->adminq_lock);
@@ -301,35 +309,9 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
return ionic_adminq_check_err(lif, ctx, (remaining == 0));
}
-int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb,
- ionic_cq_done_cb done_cb, void *done_arg)
-{
- struct ionic_qcq *qcq = napi_to_qcq(napi);
- struct ionic_cq *cq = &qcq->cq;
- u32 work_done, flags = 0;
-
- work_done = ionic_cq_service(cq, budget, cb, done_cb, done_arg);
-
- if (work_done < budget && napi_complete_done(napi, work_done)) {
- flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(cq->bound_intr);
- }
-
- if (work_done || flags) {
- flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(cq->lif->ionic->idev.intr_ctrl,
- cq->bound_intr->index,
- work_done, flags);
- }
-
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
-
- return work_done;
-}
-
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
- union ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+ union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
iowrite32(0, &regs->doorbell);
memset_io(&regs->cmd, 0, sizeof(regs->cmd));
@@ -346,24 +328,27 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
int done;
int err;
- WARN_ON(in_interrupt());
-
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
* but don't wait any longer than max_seconds.
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
+ opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode);
start_time = jiffies;
do {
done = ionic_dev_cmd_done(idev);
if (done)
break;
- msleep(5);
- hb = ionic_heartbeat_check(ionic);
+ usleep_range(100, 200);
+
+ /* Don't check the heartbeat on FW_CONTROL commands as they are
+ * notorious for interrupting the firmware's heartbeat update.
+ */
+ if (opcode != IONIC_CMD_FW_CONTROL)
+ hb = ionic_heartbeat_check(ionic);
} while (!done && !hb && time_before(jiffies, max_wait));
duration = jiffies - start_time;
- opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
dev_dbg(ionic->dev, "DEVCMD %s (%d) done=%d took %ld secs (%ld jiffies)\n",
ionic_opcode_to_str(opcode), opcode,
done, duration / HZ, duration);
@@ -387,8 +372,9 @@ try_again:
err = ionic_dev_cmd_status(&ionic->idev);
if (err) {
- if (err == IONIC_RC_EAGAIN && !time_after(jiffies, max_wait)) {
- dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) retrying...\n",
+ if (err == IONIC_RC_EAGAIN &&
+ time_before(jiffies, (max_wait - HZ))) {
+ dev_dbg(ionic->dev, "DEV_CMD %s (%d), %s (%d) retrying...\n",
ionic_opcode_to_str(opcode), opcode,
ionic_error_to_str(err), err);
@@ -398,9 +384,10 @@ try_again:
goto try_again;
}
- dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
- ionic_opcode_to_str(opcode), opcode,
- ionic_error_to_str(err), err);
+ if (!(opcode == IONIC_CMD_FW_CONTROL && err == IONIC_RC_EAGAIN))
+ dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
+ ionic_opcode_to_str(opcode), opcode,
+ ionic_error_to_str(err), err);
return ionic_error_to_errno(err);
}
@@ -444,17 +431,23 @@ int ionic_identify(struct ionic *ionic)
sz = min(sizeof(ident->dev), sizeof(idev->dev_cmd_regs->data));
memcpy_fromio(&ident->dev, &idev->dev_cmd_regs->data, sz);
}
-
mutex_unlock(&ionic->dev_cmd_lock);
- if (err)
- goto err_out_unmap;
+ if (err) {
+ dev_err(ionic->dev, "Cannot identify ionic: %dn", err);
+ goto err_out;
+ }
- ionic_debugfs_add_ident(ionic);
+ err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC,
+ &ionic->ident.lif);
+ if (err) {
+ dev_err(ionic->dev, "Cannot identify LIFs: %d\n", err);
+ goto err_out;
+ }
return 0;
-err_out_unmap:
+err_out:
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 2a1885da58a6..ff20a2ac4c2f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -179,36 +179,28 @@ static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
static void ionic_get_lif_stats(struct ionic_lif *lif,
struct ionic_lif_sw_stats *stats)
{
- struct ionic_tx_stats *tstats;
- struct ionic_rx_stats *rstats;
+ struct ionic_tx_stats *txstats;
+ struct ionic_rx_stats *rxstats;
struct rtnl_link_stats64 ns;
- struct ionic_qcq *txqcq;
- struct ionic_qcq *rxqcq;
int q_num;
memset(stats, 0, sizeof(*stats));
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- txqcq = lif_to_txqcq(lif, q_num);
- if (txqcq && txqcq->stats) {
- tstats = &txqcq->stats->tx;
- stats->tx_packets += tstats->pkts;
- stats->tx_bytes += tstats->bytes;
- stats->tx_tso += tstats->tso;
- stats->tx_tso_bytes += tstats->tso_bytes;
- stats->tx_csum_none += tstats->csum_none;
- stats->tx_csum += tstats->csum;
- }
-
- rxqcq = lif_to_rxqcq(lif, q_num);
- if (rxqcq && rxqcq->stats) {
- rstats = &rxqcq->stats->rx;
- stats->rx_packets += rstats->pkts;
- stats->rx_bytes += rstats->bytes;
- stats->rx_csum_none += rstats->csum_none;
- stats->rx_csum_complete += rstats->csum_complete;
- stats->rx_csum_error += rstats->csum_error;
- }
+ txstats = &lif->txqstats[q_num];
+ stats->tx_packets += txstats->pkts;
+ stats->tx_bytes += txstats->bytes;
+ stats->tx_tso += txstats->tso;
+ stats->tx_tso_bytes += txstats->tso_bytes;
+ stats->tx_csum_none += txstats->csum_none;
+ stats->tx_csum += txstats->csum;
+
+ rxstats = &lif->rxqstats[q_num];
+ stats->rx_packets += rxstats->pkts;
+ stats->rx_bytes += rxstats->bytes;
+ stats->rx_csum_none += rxstats->csum_none;
+ stats->rx_csum_complete += rxstats->csum_complete;
+ stats->rx_csum_error += rxstats->csum_error;
}
ionic_get_stats64(lif->netdev, &ns);
@@ -371,7 +363,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- txstats = &lif_to_txstats(lif, q_num);
+ txstats = &lif->txqstats[q_num];
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
**buf = IONIC_READ_STAT64(txstats,
@@ -381,7 +373,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- txqcq = lif_to_txqcq(lif, q_num);
+ txqcq = lif->txqcqs[q_num];
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
&ionic_txq_stats_desc[i]);
@@ -405,7 +397,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- rxstats = &lif_to_rxstats(lif, q_num);
+ rxstats = &lif->rxqstats[q_num];
for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
**buf = IONIC_READ_STAT64(rxstats,
@@ -415,7 +407,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- rxqcq = lif_to_rxqcq(lif, q_num);
+ rxqcq = lif->rxqcqs[q_num];
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
&ionic_dbg_cq_stats_desc[i]);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.h b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
index 3f543512616e..2a725834f792 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.h
@@ -49,7 +49,7 @@ extern const int ionic_num_stats_grps;
(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
#define IONIC_READ_STAT_LE64(base_ptr, desc_ptr) \
- __le64_to_cpu(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+ __le64_to_cpu(*((__le64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
struct ionic_stat_desc {
char name[ETH_GSTRING_LEN];
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index def65fee27b5..b3d2250c77d0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -22,7 +22,7 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
- DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell);
+ DEBUG_STATS_TXQ_POST(q, ring_dbell);
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
@@ -32,7 +32,7 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
{
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
- DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q));
+ DEBUG_STATS_RX_BUFF_CNT(q);
}
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
@@ -49,7 +49,7 @@ static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
struct sk_buff *skb;
netdev = lif->netdev;
- stats = q_to_rx_stats(q);
+ stats = &q->lif->rxqstats[q->index];
if (frags)
skb = napi_get_frags(&q_to_qcq(q)->napi);
@@ -200,7 +200,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
if (likely(netdev->features & NETIF_F_RXCSUM)) {
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = (__wsum)le16_to_cpu(comp->csum);
+ skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
stats->csum_complete++;
}
} else {
@@ -235,14 +235,14 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return false;
/* check for empty queue */
- if (q->tail->index == q->head->index)
+ if (q->tail_idx == q->head_idx)
return false;
- desc_info = q->tail;
- if (desc_info->index != le16_to_cpu(comp->comp_index))
+ if (q->tail_idx != le16_to_cpu(comp->comp_index))
return false;
- q->tail = desc_info->next;
+ desc_info = &q->info[q->tail_idx];
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
@@ -253,53 +253,49 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return true;
}
-void ionic_rx_flush(struct ionic_cq *cq)
-{
- struct ionic_dev *idev = &cq->lif->ionic->idev;
- u32 work_done;
-
- work_done = ionic_cq_service(cq, cq->num_descs,
- ionic_rx_service, NULL, NULL);
-
- if (work_done)
- ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
- work_done, IONIC_INTR_CRED_RESET_COALESCE);
-}
-
-static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
- dma_addr_t *dma_addr)
+static int ionic_rx_page_alloc(struct ionic_queue *q,
+ struct ionic_page_info *page_info)
{
struct ionic_lif *lif = q->lif;
struct ionic_rx_stats *stats;
struct net_device *netdev;
struct device *dev;
- struct page *page;
netdev = lif->netdev;
dev = lif->ionic->dev;
stats = q_to_rx_stats(q);
- page = alloc_page(GFP_ATOMIC);
- if (unlikely(!page)) {
- net_err_ratelimited("%s: Page alloc failed on %s!\n",
+
+ if (unlikely(!page_info)) {
+ net_err_ratelimited("%s: %s invalid page_info in alloc\n",
+ netdev->name, q->name);
+ return -EINVAL;
+ }
+
+ page_info->page = dev_alloc_page();
+ if (unlikely(!page_info->page)) {
+ net_err_ratelimited("%s: %s page alloc failed\n",
netdev->name, q->name);
stats->alloc_err++;
- return NULL;
+ return -ENOMEM;
}
- *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, *dma_addr))) {
- __free_page(page);
- net_err_ratelimited("%s: DMA single map failed on %s!\n",
+ page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) {
+ put_page(page_info->page);
+ page_info->dma_addr = 0;
+ page_info->page = NULL;
+ net_err_ratelimited("%s: %s dma map failed\n",
netdev->name, q->name);
stats->dma_map_err++;
- return NULL;
+ return -EIO;
}
- return page;
+ return 0;
}
-static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
- dma_addr_t dma_addr)
+static void ionic_rx_page_free(struct ionic_queue *q,
+ struct ionic_page_info *page_info)
{
struct ionic_lif *lif = q->lif;
struct net_device *netdev;
@@ -308,15 +304,23 @@ static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
netdev = lif->netdev;
dev = lif->ionic->dev;
- if (unlikely(!page)) {
- net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n",
+ if (unlikely(!page_info)) {
+ net_err_ratelimited("%s: %s invalid page_info in free\n",
netdev->name, q->name);
return;
}
- dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(!page_info->page)) {
+ net_err_ratelimited("%s: %s invalid page in free\n",
+ netdev->name, q->name);
+ return;
+ }
- __free_page(page);
+ dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ put_page(page_info->page);
+ page_info->dma_addr = 0;
+ page_info->page = NULL;
}
void ionic_rx_fill(struct ionic_queue *q)
@@ -338,7 +342,7 @@ void ionic_rx_fill(struct ionic_queue *q)
for (i = ionic_q_space_avail(q); i; i--) {
remain_len = len;
- desc_info = q->head;
+ desc_info = &q->info[q->head_idx];
desc = desc_info->desc;
sg_desc = desc_info->sg_desc;
page_info = &desc_info->pages[0];
@@ -352,8 +356,7 @@ void ionic_rx_fill(struct ionic_queue *q)
desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->npages = nfrags;
- page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
- if (unlikely(!page_info->page)) {
+ if (unlikely(ionic_rx_page_alloc(q, page_info))) {
desc->addr = 0;
desc->len = 0;
return;
@@ -370,8 +373,7 @@ void ionic_rx_fill(struct ionic_queue *q)
continue;
sg_elem = &sg_desc->elems[j];
- page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
- if (unlikely(!page_info->page)) {
+ if (unlikely(ionic_rx_page_alloc(q, page_info))) {
sg_elem->addr = 0;
sg_elem->len = 0;
return;
@@ -387,7 +389,7 @@ void ionic_rx_fill(struct ionic_queue *q)
}
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
- q->dbval | q->head->index);
+ q->dbval | q->head_idx);
}
static void ionic_rx_fill_cb(void *arg)
@@ -397,28 +399,48 @@ static void ionic_rx_fill_cb(void *arg)
void ionic_rx_empty(struct ionic_queue *q)
{
- struct ionic_desc_info *cur;
- struct ionic_rxq_desc *desc;
- unsigned int i;
+ struct ionic_desc_info *desc_info;
+ struct ionic_page_info *page_info;
+ unsigned int i, j;
- for (cur = q->tail; cur != q->head; cur = cur->next) {
- desc = cur->desc;
- desc->addr = 0;
- desc->len = 0;
-
- for (i = 0; i < cur->npages; i++) {
- if (likely(cur->pages[i].page)) {
- ionic_rx_page_free(q, cur->pages[i].page,
- cur->pages[i].dma_addr);
- cur->pages[i].page = NULL;
- cur->pages[i].dma_addr = 0;
- }
+ for (i = 0; i < q->num_descs; i++) {
+ desc_info = &q->info[i];
+ for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
+ page_info = &desc_info->pages[j];
+ if (page_info->page)
+ ionic_rx_page_free(q, page_info);
}
- cur->cb_arg = NULL;
+ desc_info->npages = 0;
+ desc_info->cb = NULL;
+ desc_info->cb_arg = NULL;
}
}
+static void ionic_dim_update(struct ionic_qcq *qcq)
+{
+ struct dim_sample dim_sample;
+ struct ionic_lif *lif;
+ unsigned int qi;
+
+ if (!qcq->intr.dim_coal_hw)
+ return;
+
+ lif = qcq->q.lif;
+ qi = qcq->cq.bound_q->index;
+
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->rxqcqs[qi]->intr.index,
+ qcq->intr.dim_coal_hw);
+
+ dim_update_sample(qcq->cq.bound_intr->rearm_count,
+ lif->txqstats[qi].pkts,
+ lif->txqstats[qi].bytes,
+ &dim_sample);
+
+ net_dim(&qcq->dim, dim_sample);
+}
+
int ionic_tx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
@@ -435,8 +457,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
ionic_tx_service, NULL, NULL);
if (work_done < budget && napi_complete_done(napi, work_done)) {
+ ionic_dim_update(qcq);
flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(cq->bound_intr);
+ cq->bound_intr->rearm_count++;
}
if (work_done || flags) {
@@ -470,8 +493,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(cq->bound_q);
if (work_done < budget && napi_complete_done(napi, work_done)) {
+ ionic_dim_update(qcq);
flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(cq->bound_intr);
+ cq->bound_intr->rearm_count++;
}
if (work_done || flags) {
@@ -500,7 +524,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
- txcq = &lif->txqcqs[qi].qcq->cq;
+ txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
ionic_tx_service, NULL, NULL);
@@ -511,8 +535,9 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill_cb(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
+ ionic_dim_update(qcq);
flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
+ rxcq->bound_intr->rearm_count++;
}
if (rx_work_done || flags) {
@@ -615,6 +640,7 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
struct ionic_txq_comp *comp = cq_info->cq_desc;
struct ionic_queue *q = cq->bound_q;
struct ionic_desc_info *desc_info;
+ u16 index;
if (!color_match(comp->color, cq->done_color))
return false;
@@ -623,12 +649,13 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
* several q entries completed for each cq completion
*/
do {
- desc_info = q->tail;
- q->tail = desc_info->next;
- ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg);
+ desc_info = &q->info[q->tail_idx];
+ index = q->tail_idx;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+ ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
- } while (desc_info->index != le16_to_cpu(comp->comp_index));
+ } while (index != le16_to_cpu(comp->comp_index));
return true;
}
@@ -648,16 +675,14 @@ void ionic_tx_flush(struct ionic_cq *cq)
void ionic_tx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *desc_info;
- int done = 0;
/* walk the not completed tx entries, if any */
- while (q->head != q->tail) {
- desc_info = q->tail;
- q->tail = desc_info->next;
+ while (q->head_idx != q->tail_idx) {
+ desc_info = &q->info[q->tail_idx];
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
- done++;
}
}
@@ -741,8 +766,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
struct ionic_txq_sg_elem **elem)
{
- struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
- struct ionic_txq_desc *desc = q->head->desc;
+ struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
+ struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
*elem = sg_desc->elems;
return desc;
@@ -751,13 +776,13 @@ static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_desc_info *abort = q->head;
+ struct ionic_desc_info *rewind_desc_info;
struct device *dev = q->lif->ionic->dev;
- struct ionic_desc_info *rewind = abort;
struct ionic_txq_sg_elem *elem;
struct ionic_txq_desc *desc;
unsigned int frag_left = 0;
unsigned int offset = 0;
+ u16 abort = q->head_idx;
unsigned int len_left;
dma_addr_t desc_addr;
unsigned int hdrlen;
@@ -765,12 +790,14 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
unsigned int seglen;
u64 total_bytes = 0;
u64 total_pkts = 0;
+ u16 rewind = abort;
unsigned int left;
unsigned int len;
unsigned int mss;
skb_frag_t *frag;
bool start, done;
bool outer_csum;
+ dma_addr_t addr;
bool has_vlan;
u16 desc_len;
u8 desc_nsge;
@@ -852,11 +879,10 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
if (frag_left > 0) {
len = min(frag_left, left);
frag_left -= len;
- elem->addr =
- cpu_to_le64(ionic_tx_map_frag(q, frag,
- offset, len));
- if (dma_mapping_error(dev, elem->addr))
+ addr = ionic_tx_map_frag(q, frag, offset, len);
+ if (dma_mapping_error(dev, addr))
goto err_out_abort;
+ elem->addr = cpu_to_le64(addr);
elem->len = cpu_to_le16(len);
elem++;
desc_nsge++;
@@ -909,19 +935,20 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
return 0;
err_out_abort:
- while (rewind->desc != q->head->desc) {
- ionic_tx_clean(q, rewind, NULL, NULL);
- rewind = rewind->next;
+ while (rewind != q->head_idx) {
+ rewind_desc_info = &q->info[rewind];
+ ionic_tx_clean(q, rewind_desc_info, NULL, NULL);
+ rewind = (rewind + 1) & (q->num_descs - 1);
}
- q->head = abort;
+ q->head_idx = abort;
return -ENOMEM;
}
static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
{
+ struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_txq_desc *desc = q->head->desc;
struct device *dev = q->lif->ionic->dev;
dma_addr_t dma_addr;
bool has_vlan;
@@ -960,8 +987,8 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
{
+ struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_txq_desc *desc = q->head->desc;
struct device *dev = q->lif->ionic->dev;
dma_addr_t dma_addr;
bool has_vlan;
@@ -995,7 +1022,7 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
{
- struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
+ struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
unsigned int len_left = skb->len - skb_headlen(skb);
struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
@@ -1104,9 +1131,9 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (unlikely(!lif_to_txqcq(lif, queue_index)))
+ if (unlikely(queue_index >= lif->nxqs))
queue_index = 0;
- q = lif_to_txq(lif, queue_index);
+ q = &lif->txqcqs[queue_index]->q;
ndescs = ionic_tx_descs_needed(q, skb);
if (ndescs < 0)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index a5883be0413f..7667b72232b8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -4,7 +4,6 @@
#ifndef _IONIC_TXRX_H_
#define _IONIC_TXRX_H_
-void ionic_rx_flush(struct ionic_cq *cq);
void ionic_tx_flush(struct ionic_cq *cq);
void ionic_rx_fill(struct ionic_queue *q);
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 8f743d80760b..4366c7a8de95 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -80,7 +80,7 @@ config QED
select CRC8
select NET_DEVLINK
help
- This enables the support for ...
+ This enables the support for Marvell FastLinQ adapters family.
config QED_LL2
bool
@@ -100,7 +100,8 @@ config QEDE
depends on QED
imply PTP_1588_CLOCK
help
- This enables the support for ...
+ This enables the support for Marvell FastLinQ adapters family,
+ ethernet driver.
config QED_RDMA
bool
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 86153660d245..e5c51256243a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -1189,9 +1189,6 @@ typedef struct {
#define NX_FORCE_FW_RESET 0xdeaddead
-/* Fw dump levels */
-static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
-
/* Flash read/write address */
#define NX_FW_DUMP_REG1 0x00130060
#define NX_FW_DUMP_REG2 0x001e0000
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index c3f50ddbe824..dd22cb056d03 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -814,6 +814,9 @@ netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
return 0;
}
+/* Fw dump levels */
+static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
+
static int
netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index f947b105cf14..8251755ec18c 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -9,6 +9,7 @@ qed-y := \
qed_dcbx.o \
qed_debug.o \
qed_dev.o \
+ qed_devlink.o \
qed_hw.o \
qed_init_fw_funcs.o \
qed_init_ops.o \
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index b2a7b53ee760..a20cb8a0c377 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -572,7 +572,7 @@ struct qed_hwfn {
struct qed_consq *p_consq;
/* Slow-Path definitions */
- struct tasklet_struct *sp_dpc;
+ struct tasklet_struct sp_dpc;
bool b_sp_dpc_enabled;
struct qed_ptt *p_main_ptt;
@@ -807,6 +807,7 @@ struct qed_dev {
struct qed_llh_info *p_llh_info;
/* Linux specific here */
+ struct qed_dev_info common_dev_info;
struct qede_dev *edev;
struct pci_dev *pdev;
u32 flags;
@@ -849,7 +850,6 @@ struct qed_dev {
u32 rdma_max_srq_sge;
u16 tunn_feature_mask;
- struct devlink *dl;
bool iwarp_cmt;
};
@@ -981,6 +981,7 @@ void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
+int qed_recovery_process(struct qed_dev *cdev);
void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
enum qed_hw_err_type err_type);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 3db181f3617a..d2f5855b2ea7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3973,6 +3973,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
struct qed_mcp_link_speed_params *ext_speed;
struct qed_mcp_link_capabilities *p_caps;
struct qed_mcp_link_params *link;
+ int i;
/* Read global nvm_cfg address */
nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
@@ -4299,6 +4300,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
__set_bit(QED_DEV_CAP_ROCE,
&p_hwfn->hw_info.device_capabilities);
+ /* Read device serial number information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, serial_number);
+
+ for (i = 0; i < 4; i++)
+ p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4);
+
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
new file mode 100644
index 000000000000..cf7f4da68e69
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Marvell/Qlogic FastLinQ NIC driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/qed/qed_if.h>
+#include <linux/vmalloc.h>
+#include "qed.h"
+#include "qed_devlink.h"
+
+enum qed_devlink_param_id {
+ QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+};
+
+struct qed_fw_fatal_ctx {
+ enum qed_hw_err_type err_type;
+};
+
+int qed_report_fatal_error(struct devlink *devlink, enum qed_hw_err_type err_type)
+{
+ struct qed_devlink *qdl = devlink_priv(devlink);
+ struct qed_fw_fatal_ctx fw_fatal_ctx = {
+ .err_type = err_type,
+ };
+
+ if (qdl->fw_reporter)
+ devlink_health_report(qdl->fw_reporter,
+ "Fatal error occurred", &fw_fatal_ctx);
+
+ return 0;
+}
+
+static int
+qed_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qdl = devlink_health_reporter_priv(reporter);
+ struct qed_fw_fatal_ctx *fw_fatal_ctx = priv_ctx;
+ struct qed_dev *cdev = qdl->cdev;
+ u32 dbg_data_buf_size;
+ u8 *p_dbg_data_buf;
+ int err;
+
+ /* Having context means that was a dump request after fatal,
+ * so we enable extra debugging while gathering the dump,
+ * just in case
+ */
+ cdev->print_dbg_data = fw_fatal_ctx ? true : false;
+
+ dbg_data_buf_size = qed_dbg_all_data_size(cdev);
+ p_dbg_data_buf = vzalloc(dbg_data_buf_size);
+ if (!p_dbg_data_buf) {
+ DP_NOTICE(cdev,
+ "Failed to allocate memory for a debug data buffer\n");
+ return -ENOMEM;
+ }
+
+ err = qed_dbg_all_data(cdev, p_dbg_data_buf);
+ if (err) {
+ DP_NOTICE(cdev, "Failed to obtain debug data\n");
+ vfree(p_dbg_data_buf);
+ return err;
+ }
+
+ err = devlink_fmsg_binary_pair_put(fmsg, "dump_data",
+ p_dbg_data_buf, dbg_data_buf_size);
+
+ vfree(p_dbg_data_buf);
+
+ return err;
+}
+
+static int
+qed_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qdl = devlink_health_reporter_priv(reporter);
+ struct qed_dev *cdev = qdl->cdev;
+
+ qed_recovery_process(cdev);
+
+ return 0;
+}
+
+static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = qed_fw_fatal_reporter_recover,
+ .dump = qed_fw_fatal_reporter_dump,
+};
+
+#define QED_REPORTER_FW_GRACEFUL_PERIOD 1200000
+
+void qed_fw_reporters_create(struct devlink *devlink)
+{
+ struct qed_devlink *dl = devlink_priv(devlink);
+
+ dl->fw_reporter = devlink_health_reporter_create(devlink, &qed_fw_fatal_reporter_ops,
+ QED_REPORTER_FW_GRACEFUL_PERIOD, dl);
+ if (IS_ERR(dl->fw_reporter)) {
+ DP_NOTICE(dl->cdev, "Failed to create fw reporter, err = %ld\n",
+ PTR_ERR(dl->fw_reporter));
+ dl->fw_reporter = NULL;
+ }
+}
+
+void qed_fw_reporters_destroy(struct devlink *devlink)
+{
+ struct qed_devlink *dl = devlink_priv(devlink);
+ struct devlink_health_reporter *rep;
+
+ rep = dl->fw_reporter;
+
+ if (!IS_ERR_OR_NULL(rep))
+ devlink_health_reporter_destroy(rep);
+}
+
+static int qed_dl_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl = devlink_priv(dl);
+ struct qed_dev *cdev;
+
+ cdev = qed_dl->cdev;
+ ctx->val.vbool = cdev->iwarp_cmt;
+
+ return 0;
+}
+
+static int qed_dl_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl = devlink_priv(dl);
+ struct qed_dev *cdev;
+
+ cdev = qed_dl->cdev;
+ cdev->iwarp_cmt = ctx->val.vbool;
+
+ return 0;
+}
+
+static const struct devlink_param qed_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ qed_dl_param_get, qed_dl_param_set, NULL),
+};
+
+static int qed_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qed_dl = devlink_priv(devlink);
+ struct qed_dev *cdev = qed_dl->cdev;
+ struct qed_dev_info *dev_info;
+ char buf[100];
+ int err;
+
+ dev_info = &cdev->common_dev_info;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ memcpy(buf, cdev->hwfns[0].hw_info.part_num, sizeof(cdev->hwfns[0].hw_info.part_num));
+ buf[sizeof(cdev->hwfns[0].hw_info.part_num)] = 0;
+
+ if (buf[0]) {
+ err = devlink_info_board_serial_number_put(req, buf);
+ if (err)
+ return err;
+ }
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_3),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_2),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_1),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_0));
+
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, buf);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
+ dev_info->fw_major,
+ dev_info->fw_minor,
+ dev_info->fw_rev,
+ dev_info->fw_eng);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_APP, buf);
+}
+
+static const struct devlink_ops qed_dl_ops = {
+ .info_get = qed_devlink_info_get,
+};
+
+struct devlink *qed_devlink_register(struct qed_dev *cdev)
+{
+ union devlink_param_value value;
+ struct qed_devlink *qdevlink;
+ struct devlink *dl;
+ int rc;
+
+ dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink));
+ if (!dl)
+ return ERR_PTR(-ENOMEM);
+
+ qdevlink = devlink_priv(dl);
+ qdevlink->cdev = cdev;
+
+ rc = devlink_register(dl, &cdev->pdev->dev);
+ if (rc)
+ goto err_free;
+
+ rc = devlink_params_register(dl, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+ if (rc)
+ goto err_unregister;
+
+ value.vbool = false;
+ devlink_param_driverinit_value_set(dl,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ value);
+
+ devlink_params_publish(dl);
+ cdev->iwarp_cmt = false;
+
+ qed_fw_reporters_create(dl);
+
+ return dl;
+
+err_unregister:
+ devlink_unregister(dl);
+
+err_free:
+ devlink_free(dl);
+
+ return ERR_PTR(rc);
+}
+
+void qed_devlink_unregister(struct devlink *devlink)
+{
+ if (!devlink)
+ return;
+
+ qed_fw_reporters_destroy(devlink);
+
+ devlink_params_unregister(devlink, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+
+ devlink_unregister(devlink);
+ devlink_free(devlink);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.h b/drivers/net/ethernet/qlogic/qed/qed_devlink.h
new file mode 100644
index 000000000000..ccc7d1d1bfd4
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Marvell/Qlogic FastLinQ NIC driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+#ifndef _QED_DEVLINK_H
+#define _QED_DEVLINK_H
+
+#include <linux/qed/qed_if.h>
+#include <net/devlink.h>
+
+struct devlink *qed_devlink_register(struct qed_dev *cdev);
+void qed_devlink_unregister(struct devlink *devlink);
+
+void qed_fw_reporters_create(struct devlink *devlink);
+void qed_fw_reporters_destroy(struct devlink *devlink);
+
+int qed_report_fatal_error(struct devlink *dl, enum qed_hw_err_type err_type);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index f8c5a864812d..578935f643b8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1216,9 +1216,9 @@ static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
barrier();
}
-void qed_int_sp_dpc(unsigned long hwfn_cookie)
+void qed_int_sp_dpc(struct tasklet_struct *t)
{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
+ struct qed_hwfn *p_hwfn = from_tasklet(p_hwfn, t, sp_dpc);
struct qed_pi_info *pi_info = NULL;
struct qed_sb_attn_info *sb_attn;
struct qed_sb_info *sb_info;
@@ -2285,34 +2285,14 @@ u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
{
- tasklet_init(p_hwfn->sp_dpc,
- qed_int_sp_dpc, (unsigned long)p_hwfn);
+ tasklet_setup(&p_hwfn->sp_dpc, qed_int_sp_dpc);
p_hwfn->b_sp_dpc_enabled = true;
}
-static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
-{
- p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
- if (!p_hwfn->sp_dpc)
- return -ENOMEM;
-
- return 0;
-}
-
-static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
-{
- kfree(p_hwfn->sp_dpc);
- p_hwfn->sp_dpc = NULL;
-}
-
int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int rc = 0;
- rc = qed_int_sp_dpc_alloc(p_hwfn);
- if (rc)
- return rc;
-
rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
if (rc)
return rc;
@@ -2326,7 +2306,6 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
{
qed_int_sp_sb_free(p_hwfn);
qed_int_sb_attn_free(p_hwfn);
- qed_int_sp_dpc_free(p_hwfn);
}
void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 86809d7bc2de..c5550e96bbe1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -140,7 +140,7 @@ int qed_int_sb_release(struct qed_hwfn *p_hwfn,
* @param p_hwfn - pointer to hwfn
*
*/
-void qed_int_sp_dpc(unsigned long hwfn_cookie);
+void qed_int_sp_dpc(struct tasklet_struct *t);
/**
* @brief qed_int_get_num_sbs - get the number of status
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 0452b728c527..49783f365079 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1185,7 +1185,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
.elem_size = sizeof(struct core_tx_bd),
};
struct qed_ll2_tx_packet *p_descq;
- u32 desc_size;
+ size_t desc_size;
u32 capacity;
int rc = 0;
@@ -1198,10 +1198,9 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
goto out;
capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
- /* First element is part of the packet, rest are flexibly added */
- desc_size = (sizeof(*p_descq) +
- (p_ll2_info->input.tx_max_bds_per_packet - 1) *
- sizeof(p_descq->bds_set));
+ /* All bds_set elements are flexibily added. */
+ desc_size = struct_size(p_descq, bds_set,
+ p_ll2_info->input.tx_max_bds_per_packet);
p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
if (!p_descq) {
@@ -1524,7 +1523,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
- u32 desc_size;
+ size_t desc_size;
u8 qid;
p_ptt = qed_ptt_acquire(p_hwfn);
@@ -1558,10 +1557,9 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
INIT_LIST_HEAD(&p_tx->sending_descq);
spin_lock_init(&p_tx->lock);
capacity = qed_chain_get_capacity(&p_tx->txq_chain);
- /* First element is part of the packet, rest are flexibly added */
- desc_size = (sizeof(*p_pkt) +
- (p_ll2_conn->input.tx_max_bds_per_packet - 1) *
- sizeof(p_pkt->bds_set));
+ /* All bds_set elements are flexibily added. */
+ desc_size = struct_size(p_pkt, bds_set,
+ p_ll2_conn->input.tx_max_bds_per_packet);
for (i = 0; i < capacity; i++) {
p_pkt = p_tx->descq_mem + desc_size * i;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 500d0c4f8077..df88d00053a2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -56,7 +56,7 @@ struct qed_ll2_tx_packet {
struct core_tx_bd *txq_bd;
dma_addr_t tx_frag;
u16 frag_len;
- } bds_set[1];
+ } bds_set[];
};
struct qed_ll2_rx_queue {
@@ -86,9 +86,6 @@ struct qed_ll2_tx_queue {
struct list_head active_descq;
struct list_head free_descq;
struct list_head sending_descq;
- void *descq_mem; /* memory for variable sized qed_ll2_tx_packet*/
- struct qed_ll2_tx_packet *cur_send_packet;
- struct qed_ll2_tx_packet cur_completing_packet;
u16 cur_completing_bd_idx;
void __iomem *doorbell_addr;
struct core_db_data db_msg;
@@ -96,6 +93,9 @@ struct qed_ll2_tx_queue {
u16 cur_send_frag_num;
u16 cur_completing_frag_num;
bool b_completing_packet;
+ void *descq_mem; /* memory for variable sized qed_ll2_tx_packet*/
+ struct qed_ll2_tx_packet *cur_send_packet;
+ struct qed_ll2_tx_packet cur_completing_packet;
};
struct qed_ll2_info {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 50e5eb22e60a..5bd58c65e163 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -39,6 +39,7 @@
#include "qed_hw.h"
#include "qed_selftest.h"
#include "qed_debug.h"
+#include "qed_devlink.h"
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
@@ -480,6 +481,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
}
dev_info->mtu = hw_info->mtu;
+ cdev->common_dev_info = *dev_info;
return 0;
}
@@ -512,107 +514,6 @@ static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
return 0;
}
-struct qed_devlink {
- struct qed_dev *cdev;
-};
-
-enum qed_devlink_param_id {
- QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- QED_DEVLINK_PARAM_ID_IWARP_CMT,
-};
-
-static int qed_dl_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct qed_devlink *qed_dl;
- struct qed_dev *cdev;
-
- qed_dl = devlink_priv(dl);
- cdev = qed_dl->cdev;
- ctx->val.vbool = cdev->iwarp_cmt;
-
- return 0;
-}
-
-static int qed_dl_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct qed_devlink *qed_dl;
- struct qed_dev *cdev;
-
- qed_dl = devlink_priv(dl);
- cdev = qed_dl->cdev;
- cdev->iwarp_cmt = ctx->val.vbool;
-
- return 0;
-}
-
-static const struct devlink_param qed_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
- "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- qed_dl_param_get, qed_dl_param_set, NULL),
-};
-
-static const struct devlink_ops qed_dl_ops;
-
-static int qed_devlink_register(struct qed_dev *cdev)
-{
- union devlink_param_value value;
- struct qed_devlink *qed_dl;
- struct devlink *dl;
- int rc;
-
- dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl));
- if (!dl)
- return -ENOMEM;
-
- qed_dl = devlink_priv(dl);
-
- cdev->dl = dl;
- qed_dl->cdev = cdev;
-
- rc = devlink_register(dl, &cdev->pdev->dev);
- if (rc)
- goto err_free;
-
- rc = devlink_params_register(dl, qed_devlink_params,
- ARRAY_SIZE(qed_devlink_params));
- if (rc)
- goto err_unregister;
-
- value.vbool = false;
- devlink_param_driverinit_value_set(dl,
- QED_DEVLINK_PARAM_ID_IWARP_CMT,
- value);
-
- devlink_params_publish(dl);
- cdev->iwarp_cmt = false;
-
- return 0;
-
-err_unregister:
- devlink_unregister(dl);
-
-err_free:
- cdev->dl = NULL;
- devlink_free(dl);
-
- return rc;
-}
-
-static void qed_devlink_unregister(struct qed_dev *cdev)
-{
- if (!cdev->dl)
- return;
-
- devlink_params_unregister(cdev->dl, qed_devlink_params,
- ARRAY_SIZE(qed_devlink_params));
-
- devlink_unregister(cdev->dl);
- devlink_free(cdev->dl);
-}
-
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
struct qed_probe_params *params)
@@ -641,12 +542,6 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
}
DP_INFO(cdev, "PCI init completed successfully\n");
- rc = qed_devlink_register(cdev);
- if (rc) {
- DP_INFO(cdev, "Failed to register devlink.\n");
- goto err2;
- }
-
rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
if (rc) {
DP_ERR(cdev, "hw prepare failed\n");
@@ -676,8 +571,6 @@ static void qed_remove(struct qed_dev *cdev)
qed_set_power_state(cdev, PCI_D3hot);
- qed_devlink_unregister(cdev);
-
qed_free_cdev(cdev);
}
@@ -843,7 +736,7 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
/* Slowpath interrupt */
if (unlikely(status & 0x1)) {
- tasklet_schedule(hwfn->sp_dpc);
+ tasklet_schedule(&hwfn->sp_dpc);
status &= ~0x1;
rc = IRQ_HANDLED;
}
@@ -889,7 +782,7 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
id, cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
rc = request_irq(cdev->int_params.msix_table[id].vector,
- qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
+ qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc);
} else {
unsigned long flags = 0;
@@ -921,8 +814,8 @@ static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
* enable function makes this sequence a flush-like operation.
*/
if (p_hwfn->b_sp_dpc_enabled) {
- tasklet_disable(p_hwfn->sp_dpc);
- tasklet_enable(p_hwfn->sp_dpc);
+ tasklet_disable(&p_hwfn->sp_dpc);
+ tasklet_enable(&p_hwfn->sp_dpc);
}
}
@@ -951,7 +844,7 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)
break;
synchronize_irq(cdev->int_params.msix_table[i].vector);
free_irq(cdev->int_params.msix_table[i].vector,
- cdev->hwfns[i].sp_dpc);
+ &cdev->hwfns[i].sp_dpc);
}
} else {
if (QED_LEADING_HWFN(cdev)->b_int_requested)
@@ -970,11 +863,11 @@ static int qed_nic_stop(struct qed_dev *cdev)
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (p_hwfn->b_sp_dpc_enabled) {
- tasklet_disable(p_hwfn->sp_dpc);
+ tasklet_disable(&p_hwfn->sp_dpc);
p_hwfn->b_sp_dpc_enabled = false;
DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
"Disabled sp tasklet [hwfn %d] at %p\n",
- i, p_hwfn->sp_dpc);
+ i, &p_hwfn->sp_dpc);
}
}
@@ -2926,7 +2819,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
-static int qed_recovery_process(struct qed_dev *cdev)
+int qed_recovery_process(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt;
@@ -3114,6 +3007,9 @@ const struct qed_common_ops qed_common_ops_pass = {
.get_link = &qed_get_current_link,
.drain = &qed_drain,
.update_msglvl = &qed_init_dp,
+ .devlink_register = qed_devlink_register,
+ .devlink_unregister = qed_devlink_unregister,
+ .report_fatal_error = qed_report_fatal_error,
.dbg_all_data = &qed_dbg_all_data,
.dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index a4bcde522cdf..da864d12916b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -504,7 +504,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
dev->max_mw = 0;
dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
- dev->max_pkey = QED_RDMA_MAX_P_KEY;
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn))
+ dev->max_pkey = QED_RDMA_MAX_P_KEY;
dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM;
@@ -1151,7 +1152,6 @@ qed_rdma_destroy_cq(void *rdma_cxt,
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
p_ramrod_res =
- (struct rdma_destroy_cq_output_params *)
dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct rdma_destroy_cq_output_params),
&ramrod_res_phys, GFP_KERNEL);
@@ -1463,14 +1463,14 @@ static int qed_rdma_modify_qp(void *rdma_cxt,
switch (qp->qp_type) {
case QED_RDMA_QP_TYPE_XRC_INI:
- qp->has_req = 1;
+ qp->has_req = true;
break;
case QED_RDMA_QP_TYPE_XRC_TGT:
- qp->has_resp = 1;
+ qp->has_resp = true;
break;
default:
- qp->has_req = 1;
- qp->has_resp = 1;
+ qp->has_req = true;
+ qp->has_resp = true;
}
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
@@ -1520,7 +1520,7 @@ qed_rdma_register_tid(void *rdma_cxt,
params->pbl_two_level);
SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED,
- params->zbva);
+ false);
SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
@@ -1582,15 +1582,7 @@ qed_rdma_register_tid(void *rdma_cxt,
p_ramrod->pd = cpu_to_le16(params->pd);
p_ramrod->length_hi = (u8)(params->length >> 32);
p_ramrod->length_lo = DMA_LO_LE(params->length);
- if (params->zbva) {
- /* Lower 32 bits of the registered MR address.
- * In case of zero based MR, will hold FBO
- */
- p_ramrod->va.hi = 0;
- p_ramrod->va.lo = cpu_to_le32(params->fbo);
- } else {
- DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
- }
+ DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
/* DIF */
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 803c1fcca8ad..3efc5899f656 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -172,6 +172,7 @@ struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
struct pci_dev *pdev;
+ struct devlink *devlink;
u32 dp_module;
u8 dp_level;
@@ -263,6 +264,7 @@ struct qede_dev {
struct bpf_prog *xdp_prog;
+ enum qed_hw_err_type last_err_type;
unsigned long err_flags;
#define QEDE_ERR_IS_HANDLED 31
#define QEDE_ERR_ATTN_CLR_EN 0
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index b9aa6384563b..bedbb85a179a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1026,7 +1026,9 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
args.u.mtu = new_mtu;
args.func = &qede_update_mtu;
qede_reload(edev, &args, false);
-
+#if IS_ENABLED(CONFIG_QED_RDMA)
+ qede_rdma_event_change_mtu(edev);
+#endif
edev->ops->common->update_mtu(edev->cdev, new_mtu);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9e1f41ba766c..05e3a3b60269 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1170,10 +1170,23 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
rc = -ENOMEM;
goto err2;
}
+
+ edev->devlink = qed_ops->common->devlink_register(cdev);
+ if (IS_ERR(edev->devlink)) {
+ DP_NOTICE(edev, "Cannot register devlink\n");
+ edev->devlink = NULL;
+ /* Go on, we can live without devlink */
+ }
} else {
struct net_device *ndev = pci_get_drvdata(pdev);
edev = netdev_priv(ndev);
+
+ if (edev->devlink) {
+ struct qed_devlink *qdl = devlink_priv(edev->devlink);
+
+ qdl->cdev = cdev;
+ }
edev->cdev = cdev;
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
@@ -1225,7 +1238,10 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
err4:
qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
err3:
- free_netdev(edev->ndev);
+ if (mode != QEDE_PROBE_RECOVERY)
+ free_netdev(edev->ndev);
+ else
+ edev->cdev = NULL;
err2:
qed_ops->common->slowpath_stop(cdev);
err1:
@@ -1296,6 +1312,11 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
return;
+
+ if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
+ qed_ops->common->devlink_unregister(edev->devlink);
+ edev->devlink = NULL;
+ }
qed_ops->common->remove(cdev);
edev->cdev = NULL;
@@ -2454,7 +2475,8 @@ static int qede_close(struct net_device *ndev)
qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
- edev->ops->common->update_drv_state(edev->cdev, false);
+ if (edev->cdev)
+ edev->ops->common->update_drv_state(edev->cdev, false);
return 0;
}
@@ -2576,19 +2598,12 @@ static void qede_atomic_hw_err_handler(struct qede_dev *edev)
static void qede_generic_hw_err_handler(struct qede_dev *edev)
{
- struct qed_dev *cdev = edev->cdev;
-
DP_NOTICE(edev,
"Generic sleepable HW error handling started - err_flags 0x%lx\n",
edev->err_flags);
- /* Trigger a recovery process.
- * This is placed in the sleep requiring section just to make
- * sure it is the last one, and that all the other operations
- * were completed.
- */
- if (test_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags))
- edev->ops->common->recovery_process(cdev);
+ if (edev->devlink)
+ edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
@@ -2642,6 +2657,7 @@ static void qede_schedule_hw_err_handler(void *dev,
return;
}
+ edev->last_err_type = err_type;
qede_set_hw_err_flags(edev, err_type);
qede_atomic_hw_err_handler(edev);
set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index 769ec2f4d0b7..2f6598086d9b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -234,6 +234,15 @@ static void qede_rdma_changeaddr(struct qede_dev *edev)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
}
+static void qede_rdma_change_mtu(struct qede_dev *edev)
+{
+ if (qede_rdma_supported(edev)) {
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev,
+ QEDE_CHANGE_MTU);
+ }
+}
+
static struct qede_rdma_event_work *
qede_rdma_get_free_event_node(struct qede_dev *edev)
{
@@ -287,6 +296,9 @@ static void qede_rdma_handle_event(struct work_struct *work)
case QEDE_CHANGE_ADDR:
qede_rdma_changeaddr(edev);
break;
+ case QEDE_CHANGE_MTU:
+ qede_rdma_change_mtu(edev);
+ break;
default:
DP_NOTICE(edev, "Invalid rdma event %d", event);
}
@@ -338,3 +350,8 @@ void qede_rdma_event_changeaddr(struct qede_dev *edev)
{
qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
}
+
+void qede_rdma_event_change_mtu(struct qede_dev *edev)
+{
+ qede_rdma_add_event(edev, QEDE_CHANGE_MTU);
+}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 569e2a7a64e5..27740c027681 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* QLogic QLA3xxx NIC HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
- *
- * See LICENSE.qla3xxx for copyright and licensing details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.h b/drivers/net/ethernet/qlogic/qla3xxx.h
index 73e234366a82..fb4398303ae1 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.h
+++ b/drivers/net/ethernet/qlogic/qla3xxx.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* QLogic QLA3xxx NIC HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
- *
- * See LICENSE.qla3xxx for copyright and licensing details.
*/
#ifndef _QLA3XXX_H_
#define _QLA3XXX_H_
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index d67f8265724a..be7abee160e7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#ifndef _QLCNIC_H_
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 29b9c728a65e..d8882d0b6b49 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/if_vlan.h>
@@ -658,11 +657,10 @@ int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
{
void __iomem *addr;
- u32 val;
struct qlcnic_hardware_context *ahw = adapter->ahw;
addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func);
- val = readl(addr);
+ readl(addr);
}
void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
@@ -3813,7 +3811,6 @@ static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- int retval;
netif_device_detach(netdev);
qlcnic_cancel_idc_work(adapter);
@@ -3824,11 +3821,7 @@ static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
qlcnic_83xx_disable_mbx_intr(adapter);
cancel_delayed_work_sync(&adapter->idc_aen_work);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- return 0;
+ return pci_save_state(pdev);
}
static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 73fe2f64491d..6f1d9c1fd1b0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#ifndef __QLCNIC_83XX_HW_H
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 0e2f2fb6c3a9..b8af59fc1aa4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic_sriov.h"
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 34906750b7e7..c4297aea7d15 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic.h"
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index af38d3d73291..87f76bac2e46 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic.h"
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 834208e55f7b..4d638f60f237 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index f4aa6331b367..5d79ee4370bc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#ifndef __QLCNIC_DCBX_H
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index b9894d54469c..d8a3ecaed3fc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 34e467b239a1..83a586d6fe43 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#ifndef __QLCNIC_HDR_H_
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 35d891f4655a..e1b8490bed0a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 56a3bd9e37dc..601d22495a88 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#ifndef __QLCNIC_HW_H
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index c48a0e2d4d7e..e6784023bce4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic.h"
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index ac61f614de37..bdf15d2a6431 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 173c7300cdf7..5a7e240fd469 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/vmalloc.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index f34ae8c75bc5..7760a3394e93 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <net/ip.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 5f327659efa7..7160b42f51dd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#ifndef _QLCNIC_83XX_SRIOV_H_
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 7adbb03cb931..30e52f969759 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 5632da05145a..447720b93e5a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 10037639ac2c..5c2edb715d3e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
- *
- * See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 1166b98d8bb2..8543bf3c3484 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -292,6 +292,7 @@ static void emac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
/**
* emac_update_hw_stats - read the EMAC stat registers
+ * @adpt: pointer to adapter struct
*
* Reads the stats registers and write the values to adpt->stats.
*
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 375a844cd27c..362b4f5c162c 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -167,7 +167,7 @@ static void qca_tty_wakeup(struct serdev_device *serdev)
schedule_work(&qca->tx_work);
}
-static struct serdev_device_ops qca_serdev_ops = {
+static const struct serdev_device_ops qca_serdev_ops = {
.receive_buf = qca_tty_receive,
.write_wakeup = qca_tty_wakeup,
};
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e291e6ac40cb..4e44313b7651 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1239,7 +1239,7 @@ static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
- int rc, i;
+ int i;
netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
cpr8(Cmd), cpr16(CpCmd),
@@ -1260,7 +1260,7 @@ static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
cp_stop_hw(cp);
cp_clean_rings(cp);
- rc = cp_init_rings(cp);
+ cp_init_rings(cp);
cp_start_hw(cp);
__cp_set_rx_mode(dev);
cpw16_f(IntrMask, cp_norx_intr_mask);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 227139d42227..1e5a453dea14 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -978,7 +978,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
pr_info("OQO Model 2 detected. Forcing PIO\n");
- use_io = 1;
+ use_io = true;
}
dev = rtl8139_init_board (pdev);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 11e6962a18e4..85d9c3e30c69 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -617,7 +617,6 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- unsigned irq_enabled:1;
unsigned supports_gmii:1;
unsigned aspm_manageable:1;
dma_addr_t counters_phys_addr;
@@ -701,6 +700,27 @@ static bool rtl_supports_eee(struct rtl8169_private *tp)
tp->mac_version != RTL_GIGA_MAC_VER_39;
}
+static void rtl_get_priv_stats(struct rtl8169_stats *stats,
+ u64 *pkts, u64 *bytes)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ *pkts = stats->packets;
+ *bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+}
+
+static void rtl_inc_priv_stats(struct rtl8169_stats *stats,
+ u64 pkts, u64 bytes)
+{
+ u64_stats_update_begin(&stats->syncp);
+ stats->packets += pkts;
+ stats->bytes += bytes;
+ u64_stats_update_end(&stats->syncp);
+}
+
static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg)
{
int i;
@@ -1280,12 +1300,10 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
RTL_W32(tp, IntrMask_8125, 0);
else
RTL_W16(tp, IntrMask, 0);
- tp->irq_enabled = 0;
}
static void rtl_irq_enable(struct rtl8169_private *tp)
{
- tp->irq_enabled = 1;
if (rtl_is_8125(tp))
RTL_W32(tp, IntrMask_8125, tp->irq_mask);
else
@@ -4062,9 +4080,17 @@ err_out:
return -EIO;
}
-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
+static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp)
{
- return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_60:
+ case RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_63:
+ return true;
+ default:
+ return false;
+ }
}
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
@@ -4136,8 +4162,9 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
opts[1] |= transport_offset << TCPHO_SHIFT;
} else {
- if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return !eth_skb_pad(skb);
+ if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
+ /* eth_skb_pad would free the skb on error */
+ return !__skb_put_padto(skb, ETH_ZLEN, false);
}
return true;
@@ -4316,18 +4343,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_ALL_TSO;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb->len < ETH_ZLEN) {
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_34:
- features &= ~NETIF_F_CSUM_MASK;
- break;
- default:
- break;
- }
- }
+ /* work around hw bug on some chip versions */
+ if (skb->len < ETH_ZLEN)
+ features &= ~NETIF_F_CSUM_MASK;
if (transport_offset > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp))
@@ -4399,10 +4417,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
if (tp->dirty_tx != dirty_tx) {
netdev_completed_queue(dev, pkts_compl, bytes_compl);
- u64_stats_update_begin(&tp->tx_stats.syncp);
- tp->tx_stats.packets += pkts_compl;
- tp->tx_stats.bytes += bytes_compl;
- u64_stats_update_end(&tp->tx_stats.syncp);
+ rtl_inc_priv_stats(&tp->tx_stats, pkts_compl, bytes_compl);
tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
@@ -4524,11 +4539,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
napi_gro_receive(&tp->napi, skb);
- u64_stats_update_begin(&tp->rx_stats.syncp);
- tp->rx_stats.packets++;
- tp->rx_stats.bytes += pkt_size;
- u64_stats_update_end(&tp->rx_stats.syncp);
-
+ rtl_inc_priv_stats(&tp->rx_stats, 1, pkt_size);
release_descriptor:
rtl8169_mark_to_asic(desc);
}
@@ -4544,8 +4555,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
struct rtl8169_private *tp = dev_instance;
u32 status = rtl_get_events(tp);
- if (!tp->irq_enabled || (status & 0xffff) == 0xffff ||
- !(status & tp->irq_mask))
+ if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@@ -4563,7 +4573,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
+ napi_schedule(&tp->napi);
out:
rtl_ack_events(tp, status);
@@ -4599,10 +4609,8 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
rtl_tx(dev, tp, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ if (work_done < budget && napi_complete_done(napi, work_done))
rtl_irq_enable(tp);
- }
return work_done;
}
@@ -4686,7 +4694,7 @@ static int rtl8169_close(struct net_device *dev)
phy_disconnect(tp->phydev);
- pci_free_irq(pdev, 0, tp);
+ free_irq(pci_irq_vector(pdev, 0), tp);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
@@ -4737,8 +4745,8 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp);
- retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
- dev->name);
+ retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
+ IRQF_SHARED, dev->name, tp);
if (retval < 0)
goto err_release_fw_2;
@@ -4755,7 +4763,7 @@ out:
return retval;
err_free_irq:
- pci_free_irq(pdev, 0, tp);
+ free_irq(pci_irq_vector(pdev, 0), tp);
err_release_fw_2:
rtl_release_firmware(tp);
rtl8169_rx_clear(tp);
@@ -4778,23 +4786,13 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
struct rtl8169_counters *counters = tp->counters;
- unsigned int start;
pm_runtime_get_noresume(&pdev->dev);
netdev_stats_to_stats64(stats, &dev->stats);
- do {
- start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
- stats->rx_packets = tp->rx_stats.packets;
- stats->rx_bytes = tp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
-
- do {
- start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
- stats->tx_packets = tp->tx_stats.packets;
- stats->tx_bytes = tp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
+ rtl_get_priv_stats(&tp->rx_stats, &stats->rx_packets, &stats->rx_bytes);
+ rtl_get_priv_stats(&tp->tx_stats, &stats->tx_packets, &stats->tx_bytes);
/*
* Fetch additional counter values missing in stats collected by driver
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 9f88b5db4f89..7453b17a37a2 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1036,7 +1036,10 @@ struct ravb_private {
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
unsigned wol_enabled:1;
- int num_tx_desc; /* TX descriptors per packet */
+ unsigned rxcidm:1; /* RX Clock Internal Delay Mode */
+ unsigned txcidm:1; /* TX Clock Internal Delay Mode */
+ unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */
+ int num_tx_desc; /* TX descriptors per packet */
};
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 99f7aae102ce..bd30505fbc57 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -162,7 +162,7 @@ static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
}
/* MDIO bus control struct */
-static struct mdiobb_ops bb_ops = {
+static const struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = ravb_set_mdc,
.set_mdio_dir = ravb_set_mdio_dir,
@@ -1034,11 +1034,8 @@ static int ravb_phy_init(struct net_device *ndev)
pn = of_node_get(np);
}
- iface = priv->phy_interface;
- if (priv->chip_id != RCAR_GEN2 && phy_interface_mode_is_rgmii(iface)) {
- /* ravb_set_delay_mode() takes care of internal delay mode */
- iface = PHY_INTERFACE_MODE_RGMII;
- }
+ iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
+ : priv->phy_interface;
phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
of_node_put(pn);
if (!phydev) {
@@ -1747,12 +1744,16 @@ static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
config.flags = 0;
config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
HWTSTAMP_TX_OFF;
- if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+ switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
+ case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+ break;
+ case RAVB_RXTSTAMP_TYPE_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
- else
+ break;
+ default:
config.rx_filter = HWTSTAMP_FILTER_NONE;
+ }
return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
@@ -1989,23 +1990,53 @@ static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = {
};
/* Set tx and rx clock internal delay modes */
-static void ravb_set_delay_mode(struct net_device *ndev)
+static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- int set = 0;
+ bool explicit_delay = false;
+ u32 delay;
+
+ if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 1800, according to DT bindings */
+ priv->rxcidm = !!delay;
+ explicit_delay = true;
+ }
+ if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 2000, according to DT bindings */
+ priv->txcidm = !!delay;
+ explicit_delay = true;
+ }
+
+ if (explicit_delay)
+ return;
+ /* Fall back to legacy rgmii-*id behavior */
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
- set |= APSR_DM_RDM;
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ priv->rxcidm = 1;
+ priv->rgmii_override = 1;
+ }
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
if (!WARN(soc_device_match(ravb_delay_mode_quirk_match),
"phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree",
- phy_modes(priv->phy_interface)))
- set |= APSR_DM_TDM;
+ phy_modes(priv->phy_interface))) {
+ priv->txcidm = 1;
+ priv->rgmii_override = 1;
+ }
}
+}
+
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 set = 0;
+ if (priv->rxcidm)
+ set |= APSR_DM_RDM;
+ if (priv->txcidm)
+ set |= APSR_DM_TDM;
ravb_modify(ndev, APSR, APSR_DM, set);
}
@@ -2138,8 +2169,10 @@ static int ravb_probe(struct platform_device *pdev)
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
- if (priv->chip_id != RCAR_GEN2)
+ if (priv->chip_id != RCAR_GEN2) {
+ ravb_parse_delay_mode(np, ndev);
ravb_set_delay_mode(ndev);
+ }
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f45331ed90b0..c63304632935 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -45,6 +45,15 @@
#define SH_ETH_OFFSET_DEFAULTS \
[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
+/* use some intentionally tricky logic here to initialize the whole struct to
+ * 0xffff, but then override certain fields, requiring us to indicate that we
+ * "know" that there are overrides in this structure, and we'll need to disable
+ * that warning from W=1 builds. GCC has supported this option since 4.2.X, but
+ * the macros available to do this only define GCC 8.
+ */
+__diag_push();
+__diag_ignore(GCC, 8, "-Woverride-init",
+ "logic to initialize all and then override some is OK");
static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
@@ -332,6 +341,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADRH0] = 0x0100,
};
+__diag_pop();
static void sh_eth_rcv_snd_disable(struct net_device *ndev);
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
@@ -1202,7 +1212,7 @@ static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
}
/* mdio bus control struct */
-static struct mdiobb_ops bb_ops = {
+static const struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = sh_mdc_ctrl,
.set_mdio_dir = sh_mmd_ctrl,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 9cc31f7e0df1..dd0bc7f0aaee 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -200,9 +200,9 @@ static int rocker_dma_test_offset(const struct rocker *rocker,
buf = alloc + offset;
expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
- dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(pdev, dma_handle)) {
+ dma_handle = dma_map_single(&pdev->dev, buf, ROCKER_TEST_DMA_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, dma_handle)) {
err = -EIO;
goto free_alloc;
}
@@ -234,8 +234,8 @@ static int rocker_dma_test_offset(const struct rocker *rocker,
goto unmap;
unmap:
- pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&pdev->dev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
free_alloc:
kfree(alloc);
@@ -441,9 +441,9 @@ static int rocker_dma_ring_create(const struct rocker *rocker,
if (!info->desc_info)
return -ENOMEM;
- info->desc = pci_alloc_consistent(rocker->pdev,
- info->size * sizeof(*info->desc),
- &info->mapaddr);
+ info->desc = dma_alloc_coherent(&rocker->pdev->dev,
+ info->size * sizeof(*info->desc),
+ &info->mapaddr, GFP_KERNEL);
if (!info->desc) {
kfree(info->desc_info);
return -ENOMEM;
@@ -465,9 +465,9 @@ static void rocker_dma_ring_destroy(const struct rocker *rocker,
{
rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
- pci_free_consistent(rocker->pdev,
- info->size * sizeof(struct rocker_desc),
- info->desc, info->mapaddr);
+ dma_free_coherent(&rocker->pdev->dev,
+ info->size * sizeof(struct rocker_desc), info->desc,
+ info->mapaddr);
kfree(info->desc_info);
}
@@ -506,8 +506,9 @@ static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
goto rollback;
}
- dma_handle = pci_map_single(pdev, buf, buf_size, direction);
- if (pci_dma_mapping_error(pdev, dma_handle)) {
+ dma_handle = dma_map_single(&pdev->dev, buf, buf_size,
+ direction);
+ if (dma_mapping_error(&pdev->dev, dma_handle)) {
kfree(buf);
err = -EIO;
goto rollback;
@@ -526,7 +527,8 @@ rollback:
for (i--; i >= 0; i--) {
const struct rocker_desc_info *desc_info = &info->desc_info[i];
- pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(desc_info, mapaddr),
desc_info->data_size, direction);
kfree(desc_info->data);
}
@@ -546,7 +548,8 @@ static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
desc->buf_addr = 0;
desc->buf_size = 0;
- pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(desc_info, mapaddr),
desc_info->data_size, direction);
kfree(desc_info->data);
}
@@ -615,7 +618,7 @@ static int rocker_dma_rings_init(struct rocker *rocker)
spin_lock_init(&rocker->cmd_ring_lock);
err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
+ DMA_BIDIRECTIONAL, PAGE_SIZE);
if (err) {
dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
goto err_dma_cmd_ring_bufs_alloc;
@@ -636,7 +639,7 @@ static int rocker_dma_rings_init(struct rocker *rocker)
}
err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
- PCI_DMA_FROMDEVICE, PAGE_SIZE);
+ DMA_FROM_DEVICE, PAGE_SIZE);
if (err) {
dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
goto err_dma_event_ring_bufs_alloc;
@@ -650,7 +653,7 @@ err_dma_event_ring_create:
rocker_dma_cmd_ring_waits_free(rocker);
err_dma_cmd_ring_waits_alloc:
rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
err_dma_cmd_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
return err;
@@ -659,11 +662,11 @@ err_dma_cmd_ring_bufs_alloc:
static void rocker_dma_rings_fini(struct rocker *rocker)
{
rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker->event_ring);
rocker_dma_cmd_ring_waits_free(rocker);
rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
}
@@ -675,9 +678,9 @@ static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
- dma_handle = pci_map_single(pdev, skb->data, buf_len,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, dma_handle))
+ dma_handle = dma_map_single(&pdev->dev, skb->data, buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_handle))
return -EIO;
if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
goto tlv_put_failure;
@@ -686,7 +689,7 @@ static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
return 0;
tlv_put_failure:
- pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_FROM_DEVICE);
desc_info->tlv_size = 0;
return -EMSGSIZE;
}
@@ -734,7 +737,7 @@ static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
return;
dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
- pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, len, DMA_FROM_DEVICE);
}
static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
@@ -796,7 +799,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
}
err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE,
+ DMA_TO_DEVICE,
ROCKER_DMA_TX_DESC_SIZE);
if (err) {
netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
@@ -813,7 +816,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
}
err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL,
+ DMA_BIDIRECTIONAL,
ROCKER_DMA_RX_DESC_SIZE);
if (err) {
netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
@@ -831,12 +834,12 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
err_dma_rx_ring_skbs_alloc:
rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
err_dma_rx_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
err_dma_rx_ring_create:
rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
err_dma_tx_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
return err;
@@ -848,10 +851,10 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
rocker_dma_rx_ring_skbs_free(rocker_port);
rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
}
@@ -1858,7 +1861,7 @@ static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
continue;
dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
- pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, len, DMA_TO_DEVICE);
}
}
@@ -1871,8 +1874,8 @@ static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
dma_addr_t dma_handle;
struct rocker_tlv *frag;
- dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
+ dma_handle = dma_map_single(&pdev->dev, buf, buf_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&pdev->dev, dma_handle))) {
if (net_ratelimit())
netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
return -EIO;
@@ -1892,7 +1895,7 @@ static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
nest_cancel:
rocker_tlv_nest_cancel(desc_info, frag);
unmap_frag:
- pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_TO_DEVICE);
return -EMSGSIZE;
}
@@ -2905,17 +2908,17 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_request_regions;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+ dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
goto err_pci_set_dma_mask;
}
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
goto err_pci_set_dma_mask;
}
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 2cc8184b7e6b..971f1e54b652 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -97,7 +97,7 @@ void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
/**
* sxgbe_eee_ctrl_timer
- * @arg : data hook
+ * @t: timer list containing a data
* Description:
* If there is no data transfer and if we are not in LPI state,
* then MAC Transmitter can be moved to LPI state.
@@ -255,7 +255,7 @@ static void sxgbe_adjust_link(struct net_device *dev)
/**
* sxgbe_init_phy - PHY initialization
- * @dev: net device structure
+ * @ndev: net device structure
* Description: it initializes the driver's PHY state, and attaches the PHY
* to the mac driver.
* Return value:
@@ -364,8 +364,11 @@ static int sxgbe_init_rx_buffers(struct net_device *dev,
/**
* sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
* @dev: net device structure
+ * @p: dec pointer
+ * @i: index
+ * @dma_buf_sz: size
* @rx_ring: ring to be freed
- * @rx_rsize: ring size
+ *
* Description: this function initializes the DMA RX descriptor
*/
static void sxgbe_free_rx_buffers(struct net_device *dev,
@@ -383,6 +386,7 @@ static void sxgbe_free_rx_buffers(struct net_device *dev,
/**
* init_tx_ring - init the TX descriptor ring
* @dev: net device structure
+ * @queue_no: queue
* @tx_ring: ring to be initialised
* @tx_rsize: ring size
* Description: this function initializes the DMA TX descriptor
@@ -449,6 +453,7 @@ static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
/**
* init_rx_ring - init the RX descriptor ring
* @dev: net device structure
+ * @queue_no: queue
* @rx_ring: ring to be initialised
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
@@ -548,7 +553,7 @@ static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
- * @dev: net device structure
+ * @netd: net device structure
* Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers. It suppors the chained and ring
* modes.
@@ -724,7 +729,7 @@ static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
/**
* sxgbe_tx_queue_clean:
- * @priv: driver private structure
+ * @tqueue: queue pointer
* Description: it reclaims resources after transmission completes.
*/
static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
@@ -807,6 +812,7 @@ static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
/**
* sxgbe_restart_tx_queue: irq tx error mng function
* @priv: driver private structure
+ * @queue_num: queue number
* Description: it cleans the descriptors and restarts the transmission
* in case of errors.
*/
@@ -1567,6 +1573,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget)
/**
* sxgbe_tx_timeout
* @dev : Pointer to net device structure
+ * @txqueue: index of the hanging queue
* Description: this function is called when a packet transmission fails to
* complete within a reasonable time. The driver will mark the error in the
* netdev structure and arrange for the device to be reset to a sane state
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 8507ff242014..37ff25a84030 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -112,14 +112,18 @@ struct sgiseeq_private {
static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
{
- dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
- DMA_FROM_DEVICE);
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr),
+ sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
}
static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
{
- dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
- DMA_TO_DEVICE);
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr),
+ sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
}
static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
@@ -403,6 +407,8 @@ memory_squeeze:
rd = &sp->rx_desc[sp->rx_new];
dma_sync_desc_cpu(dev, rd);
}
+ dma_sync_desc_dev(dev, rd);
+
dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
@@ -443,6 +449,7 @@ static inline void kick_tx(struct net_device *dev,
dma_sync_desc_cpu(dev, td);
}
if (td->tdma.cntinfo & HPCDMA_XIU) {
+ dma_sync_desc_dev(dev, td);
hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
}
@@ -476,6 +483,7 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp
if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
break;
if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
+ dma_sync_desc_dev(dev, td);
if (!(status & HPC3_ETXCTRL_ACTIVE)) {
hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
@@ -740,8 +748,8 @@ static int sgiseeq_probe(struct platform_device *pdev)
sp = netdev_priv(dev);
/* Make private data page aligned */
- sr = dma_alloc_attrs(&pdev->dev, sizeof(*sp->srings), &sp->srings_dma,
- GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
+ sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
+ &sp->srings_dma, DMA_BIDIRECTIONAL, GFP_KERNEL);
if (!sr) {
printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
err = -ENOMEM;
@@ -802,8 +810,8 @@ static int sgiseeq_probe(struct platform_device *pdev)
return 0;
err_out_free_attrs:
- dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
- sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
+ dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
+ sp->srings_dma, DMA_BIDIRECTIONAL);
err_out_free_dev:
free_netdev(dev);
@@ -817,8 +825,8 @@ static int sgiseeq_remove(struct platform_device *pdev)
struct sgiseeq_private *sp = netdev_priv(dev);
unregister_netdev(dev);
- dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
- sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
+ dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
+ sp->srings_dma, DMA_BIDIRECTIONAL);
free_netdev(dev);
return 0;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 4b0b2cf026a5..da6886dcac37 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -601,10 +601,14 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx_ef10_read_licensed_features(efx);
/* We can have one VI for each vi_stride-byte region.
- * However, until we use TX option descriptors we need two TX queues
- * per channel.
+ * However, until we use TX option descriptors we need up to four
+ * TX queues per channel for different checksumming combinations.
*/
- efx->tx_queues_per_channel = 2;
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ efx->tx_queues_per_channel = 4;
+ else
+ efx->tx_queues_per_channel = 2;
efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride;
if (!efx->max_vis) {
netif_err(efx, drv, efx->net_dev, "error determining max VIs\n");
@@ -1300,6 +1304,7 @@ static void efx_ef10_fini_nic(struct efx_nic *efx)
static int efx_ef10_init_nic(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ netdev_features_t hw_enc_features = 0;
int rc;
if (nic_data->must_check_datapath_caps) {
@@ -1344,6 +1349,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
+ /* add encapsulated checksum offload features */
+ if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
+ hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ /* add encapsulated TSO features */
+ if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
+ netdev_features_t encap_tso_features;
+
+ encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
+
+ hw_enc_features |= encap_tso_features | NETIF_F_TSO;
+ efx->net_dev->features |= encap_tso_features;
+ }
+ efx->net_dev->hw_enc_features = hw_enc_features;
+
/* don't fail init if RSS setup doesn't work */
rc = efx->type->rx_push_rss_config(efx, false,
efx->rss_context.rx_indir_table, NULL);
@@ -1851,18 +1871,9 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
spin_unlock_bh(&efx->stats_lock);
- if (in_interrupt()) {
- /* If in atomic context, cannot update stats. Just update the
- * software stats and return so the caller can continue.
- */
- spin_lock_bh(&efx->stats_lock);
- efx_update_sw_stats(efx, stats);
- return 0;
- }
-
efx_ef10_get_stat_mask(efx, mask);
- rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
+ rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_KERNEL);
if (rc) {
spin_lock_bh(&efx->stats_lock);
return rc;
@@ -1918,6 +1929,18 @@ static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
return efx_ef10_update_stats_common(efx, full_stats, core_stats);
}
+static size_t efx_ef10_update_stats_atomic_vf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /* In atomic context, cannot update HW stats. Just update the
+ * software stats and return so the caller can continue.
+ */
+ efx_update_sw_stats(efx, nic_data->stats);
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
+}
+
static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
@@ -2146,6 +2169,9 @@ static int efx_ef10_irq_test_generate(struct efx_nic *efx)
static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
{
+ /* low two bits of label are what we want for type */
+ BUILD_BUG_ON((EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM) != 3);
+ tx_queue->type = tx_queue->label & 3;
return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
(tx_queue->ptr_mask + 1) *
sizeof(efx_qword_t),
@@ -2168,15 +2194,15 @@ static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
/* Add Firmware-Assisted TSO v2 option descriptors to a queue.
*/
-static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
- struct sk_buff *skb,
- bool *data_mapped)
+int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ bool *data_mapped)
{
struct efx_tx_buffer *buffer;
+ u16 inner_ipv4_id = 0;
+ u16 outer_ipv4_id = 0;
struct tcphdr *tcp;
struct iphdr *ip;
-
- u16 ipv4_id;
+ u16 ip_tot_len;
u32 seqnum;
u32 mss;
@@ -2189,21 +2215,43 @@ static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
return -EINVAL;
}
- ip = ip_hdr(skb);
+ if (skb->encapsulation) {
+ if (!tx_queue->tso_encap)
+ return -EINVAL;
+ ip = ip_hdr(skb);
+ if (ip->version == 4)
+ outer_ipv4_id = ntohs(ip->id);
+
+ ip = inner_ip_hdr(skb);
+ tcp = inner_tcp_hdr(skb);
+ } else {
+ ip = ip_hdr(skb);
+ tcp = tcp_hdr(skb);
+ }
+
+ /* 8000-series EF10 hardware requires that IP Total Length be
+ * greater than or equal to the value it will have in each segment
+ * (which is at most mss + 208 + TCP header length), but also less
+ * than (0x10000 - inner_network_header). Otherwise the TCP
+ * checksum calculation will be broken for encapsulated packets.
+ * We fill in ip->tot_len with 0xff30, which should satisfy the
+ * first requirement unless the MSS is ridiculously large (which
+ * should be impossible as the driver max MTU is 9216); it is
+ * guaranteed to satisfy the second as we only attempt TSO if
+ * inner_network_header <= 208.
+ */
+ ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
+ EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
+ (tcp->doff << 2u) > ip_tot_len);
+
if (ip->version == 4) {
- /* Modify IPv4 header if needed. */
- ip->tot_len = 0;
+ ip->tot_len = htons(ip_tot_len);
ip->check = 0;
- ipv4_id = ntohs(ip->id);
+ inner_ipv4_id = ntohs(ip->id);
} else {
- /* Modify IPv6 header if needed. */
- struct ipv6hdr *ipv6 = ipv6_hdr(skb);
-
- ipv6->payload_len = 0;
- ipv4_id = 0;
+ ((struct ipv6hdr *)ip)->payload_len = htons(ip_tot_len);
}
- tcp = tcp_hdr(skb);
seqnum = ntohl(tcp->seq);
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
@@ -2216,7 +2264,7 @@ static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
- ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_IP_ID, inner_ipv4_id,
ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
);
++tx_queue->insert_count;
@@ -2226,11 +2274,12 @@ static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
buffer->flags = EFX_TX_BUF_OPTION;
buffer->len = 0;
buffer->unmap_len = 0;
- EFX_POPULATE_QWORD_4(buffer->option,
+ EFX_POPULATE_QWORD_5(buffer->option,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
+ ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id,
ESF_DZ_TX_TSO_TCP_MSS, mss
);
++tx_queue->insert_count;
@@ -2254,11 +2303,11 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx)
static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{
- bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
+ bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
+ bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
struct efx_ef10_nic_data *nic_data;
- bool tso_v2 = false;
efx_qword_t *txd;
int rc;
@@ -2281,15 +2330,18 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
* TSOv2 cannot be used with Hardware timestamping, and is never needed
* for XDP tx.
*/
- if (csum_offload && (nic_data->datapath_caps2 &
- (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
- !tx_queue->timestamping && !tx_queue->xdp_tx) {
- tso_v2 = true;
- netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
- channel->channel);
+ if (efx_has_cap(efx, TX_TSO_V2)) {
+ if ((csum_offload || inner_csum) &&
+ !tx_queue->timestamping && !tx_queue->xdp_tx) {
+ tx_queue->tso_version = 2;
+ netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
+ channel->channel);
+ }
+ } else if (efx_has_cap(efx, TX_TSO)) {
+ tx_queue->tso_version = 1;
}
- rc = efx_mcdi_tx_init(tx_queue, tso_v2);
+ rc = efx_mcdi_tx_init(tx_queue);
if (rc)
goto fail;
@@ -2302,22 +2354,19 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
tx_queue->insert_count = 1;
txd = efx_tx_desc(tx_queue, 0);
- EFX_POPULATE_QWORD_5(*txd,
+ EFX_POPULATE_QWORD_7(*txd,
ESF_DZ_TX_DESC_IS_OPT, true,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
- ESF_DZ_TX_OPTION_IP_CSUM, csum_offload,
+ ESF_DZ_TX_OPTION_IP_CSUM, csum_offload && tx_queue->tso_version != 2,
+ ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, inner_csum,
+ ESF_DZ_TX_OPTION_INNER_IP_CSUM, inner_csum && tx_queue->tso_version != 2,
ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
tx_queue->write_count = 1;
- if (tso_v2) {
- tx_queue->handle_tso = efx_ef10_tx_tso_desc;
- tx_queue->tso_version = 2;
- } else if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
- tx_queue->tso_version = 1;
- }
+ if (tx_queue->tso_version == 2 && efx_has_cap(efx, TX_TSO_V2_ENCAP))
+ tx_queue->tso_encap = true;
wmb();
efx_ef10_push_tx_desc(tx_queue, txd);
@@ -2367,7 +2416,7 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
unsigned int write_ptr;
efx_qword_t *txd;
- tx_queue->xmit_more_available = false;
+ tx_queue->xmit_pending = false;
if (unlikely(tx_queue->write_count == tx_queue->insert_count))
return;
@@ -2880,7 +2929,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
/* Get the transmit queue */
tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
tx_queue = efx_channel_get_tx_queue(channel,
- tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
if (!tx_queue->timestamping) {
/* Transmit completion */
@@ -3952,10 +4001,10 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
.update_stats = efx_ef10_update_stats_vf,
+ .update_stats_atomic = efx_ef10_update_stats_atomic_vf,
.start_stats = efx_port_dummy_op_void,
.pull_stats = efx_port_dummy_op_void,
.stop_stats = efx_port_dummy_op_void,
- .set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
.reconfigure_mac = efx_ef10_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
@@ -4066,7 +4115,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.start_stats = efx_mcdi_mac_start_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
- .set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
.reconfigure_mac = efx_ef10_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 729c425d0f78..835c838b7dfa 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -17,8 +17,49 @@
#include "ef100_ethtool.h"
#include "mcdi_functions.h"
+/* This is the maximum number of descriptor rings supported by the QDMA */
+#define EFX_EF100_MAX_DMAQ_SIZE 16384UL
+
+static void ef100_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ ring->rx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
+ ring->rx_pending = efx->rxq_entries;
+ ring->tx_pending = efx->txq_entries;
+}
+
/* Ethtool options available
*/
const struct ethtool_ops ef100_ethtool_ops = {
.get_drvinfo = efx_ethtool_get_drvinfo,
+ .get_msglevel = efx_ethtool_get_msglevel,
+ .set_msglevel = efx_ethtool_set_msglevel,
+ .get_pauseparam = efx_ethtool_get_pauseparam,
+ .set_pauseparam = efx_ethtool_set_pauseparam,
+ .get_sset_count = efx_ethtool_get_sset_count,
+ .self_test = efx_ethtool_self_test,
+ .get_strings = efx_ethtool_get_strings,
+ .get_link_ksettings = efx_ethtool_get_link_ksettings,
+ .set_link_ksettings = efx_ethtool_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ef100_ethtool_get_ringparam,
+ .get_fecparam = efx_ethtool_get_fecparam,
+ .set_fecparam = efx_ethtool_set_fecparam,
+ .get_ethtool_stats = efx_ethtool_get_stats,
+ .get_rxnfc = efx_ethtool_get_rxnfc,
+ .set_rxnfc = efx_ethtool_set_rxnfc,
+ .reset = efx_ethtool_reset,
+
+ .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
+ .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .get_rxfh = efx_ethtool_get_rxfh,
+ .set_rxfh = efx_ethtool_set_rxfh,
+ .get_rxfh_context = efx_ethtool_get_rxfh_context,
+ .set_rxfh_context = efx_ethtool_set_rxfh_context,
+
+ .get_module_info = efx_ethtool_get_module_info,
+ .get_module_eeprom = efx_ethtool_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 63c311ba28b9..67fe44db6b61 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -217,9 +217,13 @@ static const struct net_device_ops ef100_netdev_ops = {
.ndo_open = ef100_net_open,
.ndo_stop = ef100_net_stop,
.ndo_start_xmit = ef100_hard_start_xmit,
+ .ndo_tx_timeout = efx_watchdog,
.ndo_get_stats64 = efx_net_stats,
+ .ndo_change_mtu = efx_change_mtu,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode, /* Lookout */
+ .ndo_set_features = efx_set_features,
.ndo_get_phys_port_id = efx_get_phys_port_id,
.ndo_get_phys_port_name = efx_get_phys_port_name,
#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 19fe86b3b316..3148fe770356 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -428,24 +428,12 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type)
__clear_bit(reset_type, &efx->reset_pending);
rc = dev_open(efx->net_dev, NULL);
} else if (reset_type == RESET_TYPE_ALL) {
- /* A RESET_TYPE_ALL will cause filters to be removed, so we remove filters
- * and reprobe after reset to avoid removing filters twice
- */
- down_write(&efx->filter_sem);
- ef100_filter_table_down(efx);
- up_write(&efx->filter_sem);
rc = efx_mcdi_reset(efx, reset_type);
if (rc)
return rc;
netif_device_attach(efx->net_dev);
- down_write(&efx->filter_sem);
- rc = ef100_filter_table_up(efx);
- up_write(&efx->filter_sem);
- if (rc)
- return rc;
-
rc = dev_open(efx->net_dev, NULL);
} else {
rc = 1; /* Leave the device closed */
@@ -696,7 +684,7 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx,
/* NIC level access functions
*/
#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
- NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | \
+ NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
NETIF_F_TSO_MANGLEID | NETIF_F_HW_VLAN_CTAG_TX)
@@ -769,6 +757,7 @@ const struct efx_nic_type ef100_pf_nic_type = {
.rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
.reconfigure_mac = ef100_reconfigure_mac,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
.test_nvram = efx_new_mcdi_nvram_test_all,
.describe_stats = ef100_describe_stats,
.start_stats = efx_mcdi_mac_start_stats,
@@ -1172,6 +1161,10 @@ static int ef100_probe_main(struct efx_nic *efx)
rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
if (rc)
goto fail;
+ /* Enable event logging */
+ rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+ if (rc)
+ goto fail;
rc = efx_get_pf_index(efx, &nic_data->pf_index);
if (rc)
@@ -1207,10 +1200,6 @@ static int ef100_probe_main(struct efx_nic *efx)
if (rc)
goto fail;
- rc = efx_init_channels(efx);
- if (rc)
- goto fail;
-
down_write(&efx->filter_sem);
rc = ef100_filter_table_probe(efx);
up_write(&efx->filter_sem);
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index a09546e43408..a90e5a9d2a37 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -27,7 +27,6 @@ int ef100_tx_probe(struct efx_tx_queue *tx_queue)
(tx_queue->ptr_mask + 2) *
sizeof(efx_oword_t),
GFP_KERNEL);
- return 0;
}
void ef100_tx_init(struct efx_tx_queue *tx_queue)
@@ -38,7 +37,14 @@ void ef100_tx_init(struct efx_tx_queue *tx_queue)
tx_queue->channel->channel -
tx_queue->efx->tx_channel_offset);
- if (efx_mcdi_tx_init(tx_queue, false))
+ /* This value is purely documentational; as EF100 never passes through
+ * the switch statement in tx.c:__efx_enqueue_skb(), that switch does
+ * not handle case 3. EF100's TSOv3 descriptors are generated by
+ * ef100_make_tso_desc().
+ * Meanwhile, all efx_mcdi_tx_init() cares about is that it's not 2.
+ */
+ tx_queue->tso_version = 3;
+ if (efx_mcdi_tx_init(tx_queue))
netdev_WARN(tx_queue->efx->net_dev,
"failed to initialise TXQ %d\n", tx_queue->queue);
}
@@ -117,11 +123,13 @@ static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int in
return NULL;
}
-void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue)
+static void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue)
{
unsigned int write_ptr;
efx_dword_t reg;
+ tx_queue->xmit_pending = false;
+
if (unlikely(tx_queue->notify_count == tx_queue->write_count))
return;
@@ -131,7 +139,6 @@ void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue)
efx_writed_page(tx_queue->efx, &reg,
ER_GZ_TX_RING_DOORBELL, tx_queue->queue);
tx_queue->notify_count = tx_queue->write_count;
- tx_queue->xmit_more_available = false;
}
static void ef100_tx_push_buffers(struct efx_tx_queue *tx_queue)
@@ -359,28 +366,31 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
goto err;
ef100_tx_make_descriptors(tx_queue, skb, segments);
- fill_level = efx_channel_tx_fill_level(tx_queue->channel);
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
if (fill_level > efx->txq_stop_thresh) {
+ struct efx_tx_queue *txq2;
+
netif_tx_stop_queue(tx_queue->core_txq);
/* Re-read after a memory barrier in case we've raced with
* the completion path. Otherwise there's a danger we'll never
* restart the queue if all completions have just happened.
*/
smp_mb();
- fill_level = efx_channel_tx_fill_level(tx_queue->channel);
+ efx_for_each_channel_tx_queue(txq2, tx_queue->channel)
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
if (fill_level < efx->txq_stop_thresh)
netif_tx_start_queue(tx_queue->core_txq);
}
- if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more))
- tx_queue->xmit_more_available = false; /* push doorbell */
- else if (tx_queue->write_count - tx_queue->notify_count > 255)
- /* Ensure we never push more than 256 packets at once */
- tx_queue->xmit_more_available = false; /* push */
- else
- tx_queue->xmit_more_available = true; /* don't push yet */
+ tx_queue->xmit_pending = true;
- if (!tx_queue->xmit_more_available)
+ /* If xmit_more then we don't need to push the doorbell, unless there
+ * are 256 descriptors already queued in which case we have to push to
+ * ensure we never push more than 256 at once.
+ */
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
+ tx_queue->write_count - tx_queue->notify_count > 255)
ef100_tx_push_buffers(tx_queue);
if (segments) {
@@ -399,10 +409,10 @@ err:
/* If we're not expecting another transmit and we had something to push
* on this queue then we need to push here to get the previous packets
- * out. We only enter this branch from before the 'Update BQL' section
- * above, so xmit_more_available still refers to the old state.
+ * out. We only enter this branch from before the xmit_more handling
+ * above, so xmit_pending still refers to the old state.
*/
- if (tx_queue->xmit_more_available && !xmit_more)
+ if (tx_queue->xmit_pending && !xmit_more)
ef100_tx_push_buffers(tx_queue);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h
index fa23e430bdd7..ddc4b98fa6db 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.h
+++ b/drivers/net/ethernet/sfc/ef100_tx.h
@@ -17,7 +17,6 @@
int ef100_tx_probe(struct efx_tx_queue *tx_queue);
void ef100_tx_init(struct efx_tx_queue *tx_queue);
void ef100_tx_write(struct efx_tx_queue *tx_queue);
-void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue);
unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx);
void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e06fa89f2d72..718308076341 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -33,7 +33,7 @@
#include "selftest.h"
#include "sriov.h"
-#include "mcdi.h"
+#include "mcdi_port_common.h"
#include "mcdi_pcol.h"
#include "workarounds.h"
@@ -149,23 +149,17 @@ static int efx_init_port(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->init(efx);
- if (rc)
- goto fail1;
-
efx->port_initialized = true;
/* Ensure the PHY advertises the correct flow control settings */
- rc = efx->phy_op->reconfigure(efx);
+ rc = efx_mcdi_port_reconfigure(efx);
if (rc && rc != -EPERM)
- goto fail2;
+ goto fail;
mutex_unlock(&efx->mac_lock);
return 0;
-fail2:
- efx->phy_op->fini(efx);
-fail1:
+fail:
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -177,7 +171,6 @@ static void efx_fini_port(struct efx_nic *efx)
if (!efx->port_initialized)
return;
- efx->phy_op->fini(efx);
efx->port_initialized = false;
efx->link_state.up = false;
@@ -603,6 +596,7 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
+ .ndo_features_check = efx_features_check,
.ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
@@ -1229,7 +1223,7 @@ static int efx_pm_thaw(struct device *dev)
goto fail;
mutex_lock(&efx->mac_lock);
- efx->phy_op->reconfigure(efx);
+ efx_mcdi_port_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
@@ -1336,7 +1330,7 @@ static int __init efx_init_module(void)
{
int rc;
- printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
+ printk(KERN_INFO "Solarflare NET driver\n");
rc = register_netdevice_notifier(&efx_netdev_notifier);
if (rc)
@@ -1398,4 +1392,3 @@ MODULE_AUTHOR("Solarflare Communications and "
MODULE_DESCRIPTION("Solarflare network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
-MODULE_VERSION(EFX_DRIVER_VERSION);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index dd4f30ea48a8..a4a626e9cd9a 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -151,7 +151,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
*/
n_xdp_tx = num_possible_cpus();
- n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_MAX_TXQ_PER_CHANNEL);
vec_count = pci_msix_vec_count(efx->pci_dev);
if (vec_count < 0)
@@ -179,7 +179,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
efx->xdp_tx_queue_count = 0;
} else {
efx->n_xdp_channels = n_xdp_ev;
- efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
+ efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
@@ -505,8 +505,7 @@ static void efx_filter_rfs_expire(struct work_struct *data)
#endif
/* Allocate and initialise a channel structure. */
-struct efx_channel *
-efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
+static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
{
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
@@ -521,7 +520,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
channel->channel = i;
channel->type = &efx_default_channel_type;
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
tx_queue = &channel->tx_queue[j];
tx_queue->efx = efx;
tx_queue->queue = -1;
@@ -545,7 +544,7 @@ int efx_init_channels(struct efx_nic *efx)
unsigned int i;
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
- efx->channel[i] = efx_alloc_channel(efx, i, NULL);
+ efx->channel[i] = efx_alloc_channel(efx, i);
if (!efx->channel[i])
return -ENOMEM;
efx->msi_context[i].efx = efx;
@@ -595,7 +594,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
channel->napi_str.state = 0;
memset(&channel->eventq, 0, sizeof(channel->eventq));
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
tx_queue = &channel->tx_queue[j];
if (tx_queue->channel)
tx_queue->channel = channel;
@@ -895,7 +894,7 @@ int efx_set_channels(struct efx_nic *efx)
xdp_queue_number, tx_queue->queue);
/* We may have a few left-over XDP TX
* queues owing to xdp_tx_queue_count
- * not dividing evenly by EFX_TXQ_TYPES.
+ * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
* We still allocate and probe those
* TXQs, but never use them.
*/
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
index 2d71dc9a33dd..d77ec1f77fb1 100644
--- a/drivers/net/ethernet/sfc/efx_channels.h
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -31,8 +31,6 @@ void efx_stop_eventq(struct efx_channel *channel);
void efx_fini_eventq(struct efx_channel *channel);
void efx_remove_eventq(struct efx_channel *channel);
-struct efx_channel *
-efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel);
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
void efx_set_channel_names(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index dfc6032e75f4..de797e1ac5a9 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <net/gre.h>
#include "efx_common.h"
#include "efx_channels.h"
#include "efx.h"
@@ -19,6 +20,7 @@
#include "rx_common.h"
#include "tx_common.h"
#include "nic.h"
+#include "mcdi_port_common.h"
#include "io.h"
#include "mcdi_pcol.h"
@@ -544,7 +546,7 @@ void efx_start_all(struct efx_nic *efx)
* to poll now because we could have missed a change
*/
mutex_lock(&efx->mac_lock);
- if (efx->phy_op->poll(efx))
+ if (efx_mcdi_phy_poll(efx))
efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
@@ -600,7 +602,7 @@ void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
struct efx_nic *efx = netdev_priv(net_dev);
spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, stats);
+ efx_nic_update_stats_atomic(efx, NULL, stats);
spin_unlock_bh(&efx->stats_lock);
}
@@ -714,9 +716,6 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
mutex_lock(&efx->rss_lock);
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
- method != RESET_TYPE_DATAPATH)
- efx->phy_op->fini(efx);
efx->type->fini(efx);
}
@@ -759,10 +758,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
method != RESET_TYPE_DATAPATH) {
- rc = efx->phy_op->init(efx);
- if (rc)
- goto fail;
- rc = efx->phy_op->reconfigure(efx);
+ rc = efx_mcdi_port_reconfigure(efx);
if (rc && rc != -EPERM)
netif_err(efx, drv, efx->net_dev,
"could not restore PHY settings\n");
@@ -959,7 +955,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/**************************************************************************
*
- * Dummy PHY/MAC operations
+ * Dummy NIC operations
*
* Can be used for some unimplemented operations
* Needed so all function pointers are valid and do not have to be tested
@@ -972,18 +968,6 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
}
void efx_port_dummy_op_void(struct efx_nic *efx) {}
-static bool efx_port_dummy_op_poll(struct efx_nic *efx)
-{
- return false;
-}
-
-static const struct efx_phy_operations efx_dummy_phy_operations = {
- .init = efx_port_dummy_op_int,
- .reconfigure = efx_port_dummy_op_int,
- .poll = efx_port_dummy_op_poll,
- .fini = efx_port_dummy_op_void,
-};
-
/**************************************************************************
*
* Data housekeeping
@@ -1030,6 +1014,7 @@ int efx_init_struct(struct efx_nic *efx,
efx->num_mac_stats = MC_CMD_MAC_NSTATS;
BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
mutex_init(&efx->mac_lock);
+ init_rwsem(&efx->filter_sem);
#ifdef CONFIG_RFS_ACCEL
mutex_init(&efx->rps_mutex);
spin_lock_init(&efx->rps_hash_lock);
@@ -1037,7 +1022,6 @@ int efx_init_struct(struct efx_nic *efx,
efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
sizeof(*efx->rps_hash_table), GFP_KERNEL);
#endif
- efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
@@ -1104,17 +1088,7 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
pci_set_master(pci_dev);
- /* Set the PCI DMA mask. Try all possibilities from our
- * genuine mask down to 32 bits, because some architectures
- * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
- * masks event though they reject 46 bit masks.
- */
- while (dma_mask > 0x7fffffffUL) {
- rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
- if (rc == 0)
- break;
- dma_mask >>= 1;
- }
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"could not find a suitable DMA mask\n");
@@ -1315,6 +1289,89 @@ const struct pci_error_handlers efx_err_handlers = {
.resume = efx_io_resume,
};
+/* Determine whether the NIC will be able to handle TX offloads for a given
+ * encapsulated packet.
+ */
+static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct gre_base_hdr *greh;
+ __be16 dst_port;
+ u8 ipproto;
+
+ /* Does the NIC support encap offloads?
+ * If not, we should never get here, because we shouldn't have
+ * advertised encap offload feature flags in the first place.
+ */
+ if (WARN_ON_ONCE(!efx->type->udp_tnl_has_port))
+ return false;
+
+ /* Determine encapsulation protocol in use */
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ ipproto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ /* If there are extension headers, this will cause us to
+ * think we can't offload something that we maybe could have.
+ */
+ ipproto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ /* Not IP, so can't offload it */
+ return false;
+ }
+ switch (ipproto) {
+ case IPPROTO_GRE:
+ /* We support NVGRE but not IP over GRE or random gretaps.
+ * Specifically, the NIC will accept GRE as encapsulated if
+ * the inner protocol is Ethernet, but only handle it
+ * correctly if the GRE header is 8 bytes long. Moreover,
+ * it will not update the Checksum or Sequence Number fields
+ * if they are present. (The Routing Present flag,
+ * GRE_ROUTING, cannot be set else the header would be more
+ * than 8 bytes long; so we don't have to worry about it.)
+ */
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+ return false;
+ if (ntohs(skb->inner_protocol) != ETH_P_TEB)
+ return false;
+ if (skb_inner_mac_header(skb) - skb_transport_header(skb) != 8)
+ return false;
+ greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ return !(greh->flags & (GRE_CSUM | GRE_SEQ));
+ case IPPROTO_UDP:
+ /* If the port is registered for a UDP tunnel, we assume the
+ * packet is for that tunnel, and the NIC will handle it as
+ * such. If not, the NIC won't know what to do with it.
+ */
+ dst_port = udp_hdr(skb)->dest;
+ return efx->type->udp_tnl_has_port(efx, dst_port);
+ default:
+ return false;
+ }
+}
+
+netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+
+ if (skb->encapsulation) {
+ if (features & NETIF_F_GSO_MASK)
+ /* Hardware can only do TSO with at most 208 bytes
+ * of headers.
+ */
+ if (skb_inner_transport_offset(skb) >
+ EFX_TSO2_MAX_HDRLEN)
+ features &= ~(NETIF_F_GSO_MASK);
+ if (features & (NETIF_F_GSO_MASK | NETIF_F_CSUM_MASK))
+ if (!efx_can_encap_offloads(efx, skb))
+ features &= ~(NETIF_F_GSO_MASK |
+ NETIF_F_CSUM_MASK);
+ }
+ return features;
+}
+
int efx_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid)
{
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
index 4056f68f04e5..65513fd0cf6c 100644
--- a/drivers/net/ethernet/sfc/efx_common.h
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -105,6 +105,9 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu);
extern const struct pci_error_handlers efx_err_handlers;
+netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features);
+
int efx_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 4ffda7782f68..12a91c559aa2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -50,8 +50,7 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
return 1; /* cycle on/off once per second */
}
- efx->type->set_id_led(efx, mode);
- return 0;
+ return efx_mcdi_set_id_led(efx, mode);
}
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index 05ac87807929..bf1443539a1a 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -15,6 +15,7 @@
#include "selftest.h"
#include "rx_common.h"
#include "ethtool_common.h"
+#include "mcdi_port_common.h"
struct efx_sw_stat_desc {
const char *name;
@@ -105,7 +106,6 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
efx_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
@@ -221,7 +221,7 @@ int efx_ethtool_set_pauseparam(struct net_device *net_dev,
efx_link_set_wanted_fc(efx, wanted_fc);
if (efx->link_advertising[0] != old_adv ||
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
- rc = efx->phy_op->reconfigure(efx);
+ rc = efx_mcdi_port_reconfigure(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"Unable to advertise requested flow "
@@ -372,20 +372,15 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx,
efx_fill_test(n++, strings, data, &tests->registers,
"core", 0, "registers", NULL);
- if (efx->phy_op->run_tests != NULL) {
- EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
+ for (i = 0; true; ++i) {
+ const char *name;
- for (i = 0; true; ++i) {
- const char *name;
-
- EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
- name = efx->phy_op->test_name(efx, i);
- if (name == NULL)
- break;
+ EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+ name = efx_mcdi_phy_test_name(efx, i);
+ if (name == NULL)
+ break;
- efx_fill_test(n++, strings, data, &tests->phy_ext[i],
- "phy", 0, name, NULL);
- }
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i], "phy", 0, name, NULL);
}
/* Loopback tests */
@@ -412,7 +407,7 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
snprintf(strings, ETH_GSTRING_LEN,
"tx-%u.tx_packets",
channel->tx_queue[0].queue /
- EFX_TXQ_TYPES);
+ EFX_MAX_TXQ_PER_CHANNEL);
strings += ETH_GSTRING_LEN;
}
@@ -571,7 +566,7 @@ int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
u32 supported;
mutex_lock(&efx->mac_lock);
- efx->phy_op->get_link_ksettings(efx, cmd);
+ efx_mcdi_phy_get_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
/* Both MACs support pause frames (bidirectional and respond-only) */
@@ -607,7 +602,7 @@ int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
}
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->set_link_ksettings(efx, cmd);
+ rc = efx_mcdi_phy_set_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -618,10 +613,8 @@ int efx_ethtool_get_fecparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- if (!efx->phy_op || !efx->phy_op->get_fecparam)
- return -EOPNOTSUPP;
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->get_fecparam(efx, fecparam);
+ rc = efx_mcdi_phy_get_fecparam(efx, fecparam);
mutex_unlock(&efx->mac_lock);
return rc;
@@ -633,10 +626,8 @@ int efx_ethtool_set_fecparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- if (!efx->phy_op || !efx->phy_op->get_fecparam)
- return -EOPNOTSUPP;
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->set_fecparam(efx, fecparam);
+ rc = efx_mcdi_phy_set_fecparam(efx, fecparam);
mutex_unlock(&efx->mac_lock);
return rc;
@@ -1332,11 +1323,8 @@ int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int ret;
- if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
- return -EOPNOTSUPP;
-
mutex_lock(&efx->mac_lock);
- ret = efx->phy_op->get_module_eeprom(efx, ee, data);
+ ret = efx_mcdi_phy_get_module_eeprom(efx, ee, data);
mutex_unlock(&efx->mac_lock);
return ret;
@@ -1348,11 +1336,8 @@ int efx_ethtool_get_module_info(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int ret;
- if (!efx->phy_op || !efx->phy_op->get_module_info)
- return -EOPNOTSUPP;
-
mutex_lock(&efx->mac_lock);
- ret = efx->phy_op->get_module_info(efx, modinfo);
+ ret = efx_mcdi_phy_get_module_info(efx, modinfo);
mutex_unlock(&efx->mac_lock);
return ret;
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index fa1ade856b10..2c91792cec01 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -870,17 +870,12 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
{
struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
struct ef4_nic *efx = rx_queue->efx;
- bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool __maybe_unused rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
- bool rx_ev_other_err, rx_ev_pause_frm;
- bool rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned rx_ev_pkt_type;
+ bool rx_ev_pause_frm;
- rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
- rx_ev_pkt_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event,
FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event,
@@ -893,10 +888,6 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
- /* Every error apart from tobe_disc and pause_frm */
- rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
- rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
- rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
/* Count errors that are not in MAC stats. Ignore expected
* checksum errors during self-test. */
@@ -916,6 +907,13 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
* to a FIFO overflow.
*/
#ifdef DEBUG
+ {
+ /* Every error apart from tobe_disc and pause_frm */
+
+ bool rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
if (rx_ev_other_err && net_ratelimit()) {
netif_dbg(efx, rx_err, efx->net_dev,
" RX queue %d unexpected RX event "
@@ -932,6 +930,7 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
+ }
#endif
/* The frame must be discarded if any of these are true. */
@@ -1643,15 +1642,11 @@ void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
*/
void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
{
- unsigned vi_count, buftbl_min;
+ unsigned vi_count;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
- buftbl_min = ((efx->n_rx_channels * EF4_MAX_DMAQ_SIZE +
- efx->n_tx_channels * EF4_TXQ_TYPES * EF4_MAX_DMAQ_SIZE +
- efx->n_channels * EF4_MAX_EVQ_SIZE)
- * sizeof(ef4_qword_t) / EF4_BUF_SIZE);
vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
@@ -2532,7 +2527,6 @@ int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
enum ef4_farch_filter_table_id table_id;
struct ef4_farch_filter_table *table;
unsigned int filter_idx;
- struct ef4_farch_filter_spec *spec;
int rc;
table_id = ef4_farch_filter_id_table_id(filter_id);
@@ -2543,7 +2537,6 @@ int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
filter_idx = ef4_farch_filter_id_index(filter_id);
if (filter_idx >= table->size)
return -ENOENT;
- spec = &table->spec[filter_idx];
spin_lock_bh(&efx->filter_lock);
rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 05ea3523890a..966f13e7475d 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -140,6 +140,7 @@ static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
* ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
+ * @atomic: control memory allocation flags
*
* This allocates a batch of pages, maps them for DMA, and populates
* struct ef4_rx_buffers for each one. Return a negative error code or
@@ -316,6 +317,7 @@ static void ef4_discard_rx_packet(struct ef4_channel *channel,
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@max_fill. If there is insufficient atomic
* memory to do so, a slow fill will be scheduled.
+ * @atomic: control memory allocation flags
*
* The caller must provide serialisation (none is used here). In practise,
* this means this function must run from the NAPI handler, or be called
diff --git a/drivers/net/ethernet/sfc/falcon/selftest.c b/drivers/net/ethernet/sfc/falcon/selftest.c
index 147677c7c72f..6a454ac6f876 100644
--- a/drivers/net/ethernet/sfc/falcon/selftest.c
+++ b/drivers/net/ethernet/sfc/falcon/selftest.c
@@ -65,7 +65,7 @@ static const char *const ef4_interrupt_mode_names[] = {
STRING_TABLE_LOOKUP(efx->interrupt_mode, ef4_interrupt_mode)
/**
- * ef4_loopback_state - persistent state during a loopback selftest
+ * struct ef4_loopback_state - persistent state during a loopback selftest
* @flush: Drop all packets in ef4_loopback_rx_packet
* @packet_count: Number of packets being used in this test
* @skbs: An array of skbs transmitted
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 4002f9a3ae90..d75cf5ff5686 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -320,7 +320,7 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
unsigned write_ptr;
unsigned old_write_count = tx_queue->write_count;
- tx_queue->xmit_more_available = false;
+ tx_queue->xmit_pending = false;
if (unlikely(tx_queue->write_count == tx_queue->insert_count))
return;
@@ -372,6 +372,8 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
struct efx_nic *efx = tx_queue->efx;
unsigned entries;
+ tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) |
+ ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0);
entries = tx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &tx_queue->txd,
entries * sizeof(efx_qword_t));
@@ -379,7 +381,7 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
{
- int csum = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
+ int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
struct efx_nic *efx = tx_queue->efx;
efx_oword_t reg;
@@ -409,10 +411,12 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
EFX_POPULATE_OWORD_1(reg,
FRF_BZ_TX_PACE,
- (tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ?
+ (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
FFE_BZ_TX_PACE_OFF :
FFE_BZ_TX_PACE_RESERVED);
efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue);
+
+ tx_queue->tso_version = 1;
}
static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -832,13 +836,13 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
+ channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
+ channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
netif_tx_lock(efx->net_dev);
efx_farch_notify_tx_desc(tx_queue);
@@ -863,13 +867,8 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
bool rx_ev_frm_trunc, rx_ev_tobe_disc;
bool rx_ev_other_err, rx_ev_pause_frm;
- bool rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned rx_ev_pkt_type;
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
- rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
@@ -918,6 +917,8 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
+#else
+ (void) rx_ev_other_err;
#endif
if (efx->net_dev->features & NETIF_F_RXALL)
@@ -1083,9 +1084,9 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
int qid;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
- if (qid < EFX_TXQ_TYPES * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
- tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
- qid % EFX_TXQ_TYPES);
+ if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
+ tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
+ qid % EFX_MAX_TXQ_PER_CHANNEL);
if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
efx_farch_magic_event(tx_queue->channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
@@ -1678,10 +1679,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
* and the descriptor caches for those channels.
*/
buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
- total_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE +
efx->n_channels * EFX_MAX_EVQ_SIZE)
* sizeof(efx_qword_t) / EFX_BUF_SIZE);
- vi_count = max(efx->n_channels, total_tx_channels * EFX_TXQ_TYPES);
+ vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL);
#ifdef CONFIG_SFC_SRIOV
if (efx->type->sriov_wanted) {
@@ -2592,7 +2593,6 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
enum efx_farch_filter_table_id table_id;
struct efx_farch_filter_table *table;
unsigned int filter_idx;
- struct efx_farch_filter_spec *spec;
int rc;
table_id = efx_farch_filter_id_table_id(filter_id);
@@ -2604,7 +2604,6 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
if (filter_idx >= table->size)
return -ENOENT;
down_write(&state->lock);
- spec = &table->spec[filter_idx];
rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
up_write(&state->lock);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 5467819aef6e..be6bfd6b7ec7 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1868,10 +1868,9 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
return efx_mcdi_exit_assertion(efx);
}
-void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
- int rc;
BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
@@ -1881,8 +1880,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
- rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ return efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL);
}
static int efx_mcdi_reset_func(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 658cf345420d..69c2924a147c 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -190,6 +190,7 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
* 32-bit-aligned. Also, on Siena we must copy to the MC shared
* memory strictly 32 bits at a time, so add any necessary padding.
*/
+#define MCDI_TX_BUF_LEN(_len) DIV_ROUND_UP((_len), 4)
#define _MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
#define MCDI_DECLARE_BUF(_name, _len) \
@@ -348,14 +349,13 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_handle_assertion(struct efx_nic *efx);
-void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
int *id_out);
int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
int efx_mcdi_flush_rxqs(struct efx_nic *efx);
-int efx_mcdi_port_reconfigure(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c
index d8a3af86ef78..d3e6d8239f5c 100644
--- a/drivers/net/ethernet/sfc/mcdi_functions.c
+++ b/drivers/net/ethernet/sfc/mcdi_functions.c
@@ -160,11 +160,12 @@ fail:
outbuf, outlen, rc);
}
-int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
+int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
- bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
+ bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
+ bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
@@ -194,22 +195,31 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
do {
- MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
+ bool tso_v2 = tx_queue->tso_version == 2;
+
+ /* TSOv2 implies IP header checksum offload for TSO frames,
+ * so we can safely disable IP header checksum offload for
+ * everything else. If we don't have TSOv2, then we have to
+ * enable IP header checksum offload, which is strictly
+ * incorrect but better than breaking TSO.
+ */
+ MCDI_POPULATE_DWORD_6(inbuf, INIT_TXQ_IN_FLAGS,
/* This flag was removed from mcdi_pcol.h for
* the non-_EXT version of INIT_TXQ. However,
* firmware still honours it.
*/
INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
- INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !(csum_offload && tso_v2),
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
- INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
- tx_queue->timestamping);
+ INIT_TXQ_EXT_IN_FLAG_TIMESTAMP, tx_queue->timestamping,
+ INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN, inner_csum && !tso_v2,
+ INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN, inner_csum);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
NULL, 0, NULL);
if (rc == -ENOSPC && tso_v2) {
/* Retry without TSOv2 if we're short on contexts. */
- tso_v2 = false;
+ tx_queue->tso_version = 0;
netif_warn(efx, probe, efx->net_dev,
"TSOv2 context not available to segment in "
"hardware. TCP performance may be reduced.\n"
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.h b/drivers/net/ethernet/sfc/mcdi_functions.h
index 687be8b00cd8..b0e2f53a0d9b 100644
--- a/drivers/net/ethernet/sfc/mcdi_functions.h
+++ b/drivers/net/ethernet/sfc/mcdi_functions.h
@@ -19,7 +19,7 @@ int efx_mcdi_ev_probe(struct efx_channel *channel);
int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2);
void efx_mcdi_ev_remove(struct efx_channel *channel);
void efx_mcdi_ev_fini(struct efx_channel *channel);
-int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2);
+int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue);
void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue);
void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue);
int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 98eeb404f68d..94c6a345c0b1 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -70,592 +70,6 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0;
}
-static int efx_mcdi_phy_probe(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_data *phy_data;
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
- u32 caps;
- int rc;
-
- /* Initialise and populate phy_data */
- phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
- if (phy_data == NULL)
- return -ENOMEM;
-
- rc = efx_mcdi_get_phy_cfg(efx, phy_data);
- if (rc != 0)
- goto fail;
-
- /* Read initial link advertisement */
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- goto fail;
-
- /* Fill out nic state */
- efx->phy_data = phy_data;
- efx->phy_type = phy_data->type;
-
- efx->mdio_bus = phy_data->channel;
- efx->mdio.prtad = phy_data->port;
- efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
- efx->mdio.mode_support = 0;
- if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-
- caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
- if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
- mcdi_to_ethtool_linkset(phy_data->media, caps,
- efx->link_advertising);
- else
- phy_data->forced_cap = caps;
-
- /* Assert that we can map efx -> mcdi loopback modes */
- BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
- BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
- BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC);
- BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII);
- BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS);
- BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI);
- BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII);
- BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII);
- BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR);
- BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI);
- BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR);
- BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR);
- BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR);
- BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR);
- BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY);
- BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS);
- BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS);
- BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD);
- BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT);
- BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS);
- BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS);
- BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR);
- BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR);
- BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS);
- BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS);
- BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR);
- BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS);
-
- rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes);
- if (rc != 0)
- goto fail;
- /* The MC indicates that LOOPBACK_NONE is a valid loopback mode,
- * but by convention we don't */
- efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
-
- /* Set the initial link mode */
- efx_mcdi_phy_decode_link(
- efx, &efx->link_state,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
- MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
- MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
-
- /* Record the initial FEC configuration (or nearest approximation
- * representable in the ethtool configuration space)
- */
- efx->fec_config = mcdi_fec_caps_to_ethtool(caps,
- efx->link_state.speed == 25000 ||
- efx->link_state.speed == 50000);
-
- /* Default to Autonegotiated flow control if the PHY supports it */
- efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
- if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- efx->wanted_fc |= EFX_FC_AUTO;
- efx_link_set_wanted_fc(efx, efx->wanted_fc);
-
- return 0;
-
-fail:
- kfree(phy_data);
- return rc;
-}
-
-static void efx_mcdi_phy_remove(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_data *phy_data = efx->phy_data;
-
- efx->phy_data = NULL;
- kfree(phy_data);
-}
-
-static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
- struct ethtool_link_ksettings *cmd)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
- int rc;
-
- cmd->base.speed = efx->link_state.speed;
- cmd->base.duplex = efx->link_state.fd;
- cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
- cmd->base.phy_address = phy_cfg->port;
- cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
- cmd->base.mdio_support = (efx->mdio.mode_support &
- (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
-
- mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
- cmd->link_modes.supported);
- memcpy(cmd->link_modes.advertising, efx->link_advertising,
- sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- return;
- mcdi_to_ethtool_linkset(phy_cfg->media,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
- cmd->link_modes.lp_advertising);
-}
-
-static int
-efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
- const struct ethtool_link_ksettings *cmd)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 caps;
- int rc;
-
- if (cmd->base.autoneg) {
- caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
- 1 << MC_CMD_PHY_CAP_AN_LBN);
- } else if (cmd->base.duplex) {
- switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
- case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
- case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
- case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
- case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break;
- case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break;
- default: return -EINVAL;
- }
- } else {
- switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
- default: return -EINVAL;
- }
- }
-
- caps |= ethtool_fec_caps_to_mcdi(efx->fec_config);
-
- rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
- efx->loopback_mode, 0);
- if (rc)
- return rc;
-
- if (cmd->base.autoneg) {
- efx_link_set_advertising(efx, cmd->link_modes.advertising);
- phy_cfg->forced_cap = 0;
- } else {
- efx_link_clear_advertising(efx);
- phy_cfg->forced_cap = caps;
- }
- return 0;
-}
-
-static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
- const struct ethtool_fecparam *fec)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 caps;
- int rc;
-
- /* Work out what efx_mcdi_phy_set_link_ksettings() would produce from
- * saved advertising bits
- */
- if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, efx->link_advertising))
- caps = (ethtool_linkset_to_mcdi_cap(efx->link_advertising) |
- 1 << MC_CMD_PHY_CAP_AN_LBN);
- else
- caps = phy_cfg->forced_cap;
-
- caps |= ethtool_fec_caps_to_mcdi(fec->fec);
- rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
- efx->loopback_mode, 0);
- if (rc)
- return rc;
-
- /* Record the new FEC setting for subsequent set_link calls */
- efx->fec_config = fec->fec;
- return 0;
-}
-
-static const char *const mcdi_sft9001_cable_diag_names[] = {
- "cable.pairA.length",
- "cable.pairB.length",
- "cable.pairC.length",
- "cable.pairD.length",
- "cable.pairA.status",
- "cable.pairB.status",
- "cable.pairC.status",
- "cable.pairD.status",
-};
-
-static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
- int *results)
-{
- unsigned int retry, i, count = 0;
- size_t outlen;
- u32 status;
- MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
- u8 *ptr;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
- MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
- rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
- inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
- if (rc)
- goto out;
-
- /* Wait up to 10s for BIST to finish */
- for (retry = 0; retry < 100; ++retry) {
- BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto out;
-
- status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
- if (status != MC_CMD_POLL_BIST_RUNNING)
- goto finished;
-
- msleep(100);
- }
-
- rc = -ETIMEDOUT;
- goto out;
-
-finished:
- results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
-
- /* SFT9001 specific cable diagnostics output */
- if (efx->phy_type == PHY_TYPE_SFT9001B &&
- (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
- bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
- ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
- if (status == MC_CMD_POLL_BIST_PASSED &&
- outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
- for (i = 0; i < 8; i++) {
- results[count + i] =
- EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
- EFX_DWORD_0);
- }
- }
- count += 8;
- }
- rc = count;
-
-out:
- return rc;
-}
-
-static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
- unsigned flags)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 mode;
- int rc;
-
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
- rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
- if (rc < 0)
- return rc;
-
- results += rc;
- }
-
- /* If we support both LONG and SHORT, then run each in response to
- * break or not. Otherwise, run the one we support */
- mode = 0;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) {
- if ((flags & ETH_TEST_FL_OFFLINE) &&
- (phy_cfg->flags &
- (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)))
- mode = MC_CMD_PHY_BIST_CABLE_LONG;
- else
- mode = MC_CMD_PHY_BIST_CABLE_SHORT;
- } else if (phy_cfg->flags &
- (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))
- mode = MC_CMD_PHY_BIST_CABLE_LONG;
-
- if (mode != 0) {
- rc = efx_mcdi_bist(efx, mode, results);
- if (rc < 0)
- return rc;
- results += rc;
- }
-
- return 0;
-}
-
-static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
- unsigned int index)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
-
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
- if (index == 0)
- return "bist";
- --index;
- }
-
- if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) |
- (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) {
- if (index == 0)
- return "cable";
- --index;
-
- if (efx->phy_type == PHY_TYPE_SFT9001B) {
- if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
- return mcdi_sft9001_cable_diag_names[index];
- index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
- }
- }
-
- return NULL;
-}
-
-#define SFP_PAGE_SIZE 128
-#define SFF_DIAG_TYPE_OFFSET 92
-#define SFF_DIAG_ADDR_CHANGE BIT(2)
-#define SFF_8079_NUM_PAGES 2
-#define SFF_8472_NUM_PAGES 4
-#define SFF_8436_NUM_PAGES 5
-#define SFF_DMT_LEVEL_OFFSET 94
-
-/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom
- * @efx: NIC context
- * @page: EEPROM page number
- * @data: Destination data pointer
- * @offset: Offset in page to copy from in to data
- * @space: Space available in data
- *
- * Return:
- * >=0 - amount of data copied
- * <0 - error
- */
-static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx,
- unsigned int page,
- u8 *data, ssize_t offset,
- ssize_t space)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
- size_t outlen;
- unsigned int payload_len;
- unsigned int to_copy;
- int rc;
-
- if (offset > SFP_PAGE_SIZE)
- return -EINVAL;
-
- to_copy = min(space, SFP_PAGE_SIZE - offset);
-
- MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO,
- inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf),
- &outlen);
-
- if (rc)
- return rc;
-
- if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
- SFP_PAGE_SIZE))
- return -EIO;
-
- payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN);
- if (payload_len != SFP_PAGE_SIZE)
- return -EIO;
-
- memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
- to_copy);
-
- return to_copy;
-}
-
-static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx,
- unsigned int page,
- u8 byte)
-{
- int rc;
- u8 data;
-
- rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1);
- if (rc == 1)
- return data;
-
- return rc;
-}
-
-static int efx_mcdi_phy_diag_type(struct efx_nic *efx)
-{
- /* Page zero of the EEPROM includes the diagnostic type at byte 92. */
- return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
- SFF_DIAG_TYPE_OFFSET);
-}
-
-static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx)
-{
- /* Page zero of the EEPROM includes the DMT level at byte 94. */
- return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
- SFF_DMT_LEVEL_OFFSET);
-}
-
-static u32 efx_mcdi_phy_module_type(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_data *phy_data = efx->phy_data;
-
- if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS)
- return phy_data->media;
-
- /* A QSFP+ NIC may actually have an SFP+ module attached.
- * The ID is page 0, byte 0.
- */
- switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) {
- case 0x3:
- return MC_CMD_MEDIA_SFP_PLUS;
- case 0xc:
- case 0xd:
- return MC_CMD_MEDIA_QSFP_PLUS;
- default:
- return 0;
- }
-}
-
-static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
- struct ethtool_eeprom *ee, u8 *data)
-{
- int rc;
- ssize_t space_remaining = ee->len;
- unsigned int page_off;
- bool ignore_missing;
- int num_pages;
- int page;
-
- switch (efx_mcdi_phy_module_type(efx)) {
- case MC_CMD_MEDIA_SFP_PLUS:
- num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ?
- SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES;
- page = 0;
- ignore_missing = false;
- break;
- case MC_CMD_MEDIA_QSFP_PLUS:
- num_pages = SFF_8436_NUM_PAGES;
- page = -1; /* We obtain the lower page by asking for -1. */
- ignore_missing = true; /* Ignore missing pages after page 0. */
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- page_off = ee->offset % SFP_PAGE_SIZE;
- page += ee->offset / SFP_PAGE_SIZE;
-
- while (space_remaining && (page < num_pages)) {
- rc = efx_mcdi_phy_get_module_eeprom_page(efx, page,
- data, page_off,
- space_remaining);
-
- if (rc > 0) {
- space_remaining -= rc;
- data += rc;
- page_off = 0;
- page++;
- } else if (rc == 0) {
- space_remaining = 0;
- } else if (ignore_missing && (page > 0)) {
- int intended_size = SFP_PAGE_SIZE - page_off;
-
- space_remaining -= intended_size;
- if (space_remaining < 0) {
- space_remaining = 0;
- } else {
- memset(data, 0, intended_size);
- data += intended_size;
- page_off = 0;
- page++;
- rc = 0;
- }
- } else {
- return rc;
- }
- }
-
- return 0;
-}
-
-static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
- struct ethtool_modinfo *modinfo)
-{
- int sff_8472_level;
- int diag_type;
-
- switch (efx_mcdi_phy_module_type(efx)) {
- case MC_CMD_MEDIA_SFP_PLUS:
- sff_8472_level = efx_mcdi_phy_sff_8472_level(efx);
-
- /* If we can't read the diagnostics level we have none. */
- if (sff_8472_level < 0)
- return -EOPNOTSUPP;
-
- /* Check if this module requires the (unsupported) address
- * change operation.
- */
- diag_type = efx_mcdi_phy_diag_type(efx);
-
- if ((sff_8472_level == 0) ||
- (diag_type & SFF_DIAG_ADDR_CHANGE)) {
- modinfo->type = ETH_MODULE_SFF_8079;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- } else {
- modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
- }
- break;
-
- case MC_CMD_MEDIA_QSFP_PLUS:
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
- break;
-
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static const struct efx_phy_operations efx_mcdi_phy_ops = {
- .probe = efx_mcdi_phy_probe,
- .init = efx_port_dummy_op_int,
- .reconfigure = efx_mcdi_port_reconfigure,
- .poll = efx_mcdi_phy_poll,
- .fini = efx_port_dummy_op_void,
- .remove = efx_mcdi_phy_remove,
- .get_link_ksettings = efx_mcdi_phy_get_link_ksettings,
- .set_link_ksettings = efx_mcdi_phy_set_link_ksettings,
- .get_fecparam = efx_mcdi_phy_get_fecparam,
- .set_fecparam = efx_mcdi_phy_set_fecparam,
- .test_alive = efx_mcdi_phy_test_alive,
- .run_tests = efx_mcdi_phy_run_tests,
- .test_name = efx_mcdi_phy_test_name,
- .get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
- .get_module_info = efx_mcdi_phy_get_module_info,
-};
-
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
@@ -683,16 +97,13 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
{
int rc;
- /* Hook in PHY operations table */
- efx->phy_op = &efx_mcdi_phy_ops;
-
/* Set up MDIO structure for PHY */
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->mdio.mdio_read = efx_mcdi_mdio_read;
efx->mdio.mdio_write = efx_mcdi_mdio_write;
/* Fill out MDIO structure, loopback modes, and initial link state */
- rc = efx->phy_op->probe(efx);
+ rc = efx_mcdi_phy_probe(efx);
if (rc != 0)
return rc;
@@ -701,6 +112,6 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
void efx_mcdi_port_remove(struct efx_nic *efx)
{
- efx->phy_op->remove(efx);
+ efx_mcdi_phy_remove(efx);
efx_mcdi_mac_fini_stats(efx);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 714d7f937212..4bd3ef8f3384 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -308,7 +308,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
* Both RS and BASER (whether AUTO or not) means use FEC if cable and link
* partner support it, preferring RS to BASER.
*/
-u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
+u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap)
{
u32 ret = 0;
@@ -316,17 +316,21 @@ u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
return 0;
if (ethtool_cap & ETHTOOL_FEC_AUTO)
- ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
- if (ethtool_cap & ETHTOOL_FEC_RS)
+ ret |= ((1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) & supported_cap;
+ if (ethtool_cap & ETHTOOL_FEC_RS &&
+ supported_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN))
ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
- if (ethtool_cap & ETHTOOL_FEC_BASER)
- ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
+ if (ethtool_cap & ETHTOOL_FEC_BASER) {
+ if (supported_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN))
+ ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
+ if (supported_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN))
+ ret |= (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
+ }
return ret;
}
@@ -404,6 +408,196 @@ bool efx_mcdi_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
+int efx_mcdi_phy_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ u32 caps;
+ int rc;
+
+ /* Initialise and populate phy_data */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (phy_data == NULL)
+ return -ENOMEM;
+
+ rc = efx_mcdi_get_phy_cfg(efx, phy_data);
+ if (rc != 0)
+ goto fail;
+
+ /* Read initial link advertisement */
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ goto fail;
+
+ /* Fill out nic state */
+ efx->phy_data = phy_data;
+ efx->phy_type = phy_data->type;
+
+ efx->mdio_bus = phy_data->channel;
+ efx->mdio.prtad = phy_data->port;
+ efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+ efx->mdio.mode_support = 0;
+ if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+ efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
+ if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+ efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+ if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mcdi_to_ethtool_linkset(phy_data->media, caps,
+ efx->link_advertising);
+ else
+ phy_data->forced_cap = caps;
+
+ /* Assert that we can map efx -> mcdi loopback modes */
+ BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
+ BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
+ BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC);
+ BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII);
+ BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS);
+ BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI);
+ BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII);
+ BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII);
+ BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR);
+ BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI);
+ BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR);
+ BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR);
+ BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR);
+ BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR);
+ BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY);
+ BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS);
+ BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS);
+ BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD);
+ BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT);
+ BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR);
+ BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS);
+ BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS);
+ BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR);
+ BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS);
+
+ rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes);
+ if (rc != 0)
+ goto fail;
+ /* The MC indicates that LOOPBACK_NONE is a valid loopback mode,
+ * but by convention we don't
+ */
+ efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
+
+ /* Set the initial link mode */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ /* Record the initial FEC configuration (or nearest approximation
+ * representable in the ethtool configuration space)
+ */
+ efx->fec_config = mcdi_fec_caps_to_ethtool(caps,
+ efx->link_state.speed == 25000 ||
+ efx->link_state.speed == 50000);
+
+ /* Default to Autonegotiated flow control if the PHY supports it */
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->wanted_fc |= EFX_FC_AUTO;
+ efx_link_set_wanted_fc(efx, efx->wanted_fc);
+
+ return 0;
+
+fail:
+ kfree(phy_data);
+ return rc;
+}
+
+void efx_mcdi_phy_remove(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ efx->phy_data = NULL;
+ kfree(phy_data);
+}
+
+void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ksettings *cmd)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ int rc;
+
+ cmd->base.speed = efx->link_state.speed;
+ cmd->base.duplex = efx->link_state.fd;
+ cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
+ cmd->base.phy_address = phy_cfg->port;
+ cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
+ cmd->base.mdio_support = (efx->mdio.mode_support &
+ (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
+
+ mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
+ cmd->link_modes.supported);
+ memcpy(cmd->link_modes.advertising, efx->link_advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return;
+ mcdi_to_ethtool_linkset(phy_cfg->media,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
+ cmd->link_modes.lp_advertising);
+}
+
+int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 caps;
+ int rc;
+
+ if (cmd->base.autoneg) {
+ caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
+ } else if (cmd->base.duplex) {
+ switch (cmd->base.speed) {
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
+ case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
+ case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
+ case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break;
+ case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break;
+ default: return -EINVAL;
+ }
+ } else {
+ switch (cmd->base.speed) {
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
+ default: return -EINVAL;
+ }
+ }
+
+ caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config);
+
+ rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+ efx->loopback_mode, 0);
+ if (rc)
+ return rc;
+
+ if (cmd->base.autoneg) {
+ efx_link_set_advertising(efx, cmd->link_modes.advertising);
+ phy_cfg->forced_cap = 0;
+ } else {
+ efx_link_clear_advertising(efx);
+ phy_cfg->forced_cap = caps;
+ }
+ return 0;
+}
+
int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
@@ -455,6 +649,50 @@ int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec)
return 0;
}
+/* Basic validation to ensure that the caps we are going to attempt to set are
+ * in fact supported by the adapter. Note that 'no FEC' is always supported.
+ */
+static int ethtool_fec_supported(u32 supported_cap, u32 ethtool_cap)
+{
+ if (ethtool_cap & ETHTOOL_FEC_OFF)
+ return 0;
+
+ if (ethtool_cap &&
+ !ethtool_fec_caps_to_mcdi(supported_cap, ethtool_cap))
+ return -EINVAL;
+ return 0;
+}
+
+int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam *fec)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 caps;
+ int rc;
+
+ rc = ethtool_fec_supported(phy_cfg->supported_cap, fec->fec);
+ if (rc)
+ return rc;
+
+ /* Work out what efx_mcdi_phy_set_link_ksettings() would produce from
+ * saved advertising bits
+ */
+ if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, efx->link_advertising))
+ caps = (ethtool_linkset_to_mcdi_cap(efx->link_advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
+ else
+ caps = phy_cfg->forced_cap;
+
+ caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, fec->fec);
+ rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+ efx->loopback_mode, 0);
+ if (rc)
+ return rc;
+
+ /* Record the new FEC setting for subsequent set_link calls */
+ efx->fec_config = fec->fec;
+ return 0;
+}
+
int efx_mcdi_phy_test_alive(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
@@ -483,12 +721,357 @@ int efx_mcdi_port_reconfigure(struct efx_nic *efx)
ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
phy_cfg->forced_cap);
- caps |= ethtool_fec_caps_to_mcdi(efx->fec_config);
+ caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config);
return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
efx->loopback_mode, 0);
}
+static const char *const mcdi_sft9001_cable_diag_names[] = {
+ "cable.pairA.length",
+ "cable.pairB.length",
+ "cable.pairC.length",
+ "cable.pairD.length",
+ "cable.pairA.status",
+ "cable.pairB.status",
+ "cable.pairC.status",
+ "cable.pairD.status",
+};
+
+static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
+ int *results)
+{
+ unsigned int retry, i, count = 0;
+ size_t outlen;
+ u32 status;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
+ u8 *ptr;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
+ inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
+ if (rc)
+ goto out;
+
+ /* Wait up to 10s for BIST to finish */
+ for (retry = 0; retry < 100; ++retry) {
+ BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto out;
+
+ status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+ if (status != MC_CMD_POLL_BIST_RUNNING)
+ goto finished;
+
+ msleep(100);
+ }
+
+ rc = -ETIMEDOUT;
+ goto out;
+
+finished:
+ results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
+
+ /* SFT9001 specific cable diagnostics output */
+ if (efx->phy_type == PHY_TYPE_SFT9001B &&
+ (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
+ bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
+ ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ if (status == MC_CMD_POLL_BIST_PASSED &&
+ outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
+ for (i = 0; i < 8; i++) {
+ results[count + i] =
+ EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
+ EFX_DWORD_0);
+ }
+ }
+ count += 8;
+ }
+ rc = count;
+
+out:
+ return rc;
+}
+
+int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 mode;
+ int rc;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
+ rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
+ if (rc < 0)
+ return rc;
+
+ results += rc;
+ }
+
+ /* If we support both LONG and SHORT, then run each in response to
+ * break or not. Otherwise, run the one we support
+ */
+ mode = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) {
+ if ((flags & ETH_TEST_FL_OFFLINE) &&
+ (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+ else
+ mode = MC_CMD_PHY_BIST_CABLE_SHORT;
+ } else if (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+
+ if (mode != 0) {
+ rc = efx_mcdi_bist(efx, mode, results);
+ if (rc < 0)
+ return rc;
+ results += rc;
+ }
+
+ return 0;
+}
+
+const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
+ if (index == 0)
+ return "bist";
+ --index;
+ }
+
+ if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) |
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) {
+ if (index == 0)
+ return "cable";
+ --index;
+
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
+ return mcdi_sft9001_cable_diag_names[index];
+ index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
+ }
+ }
+
+ return NULL;
+}
+
+#define SFP_PAGE_SIZE 128
+#define SFF_DIAG_TYPE_OFFSET 92
+#define SFF_DIAG_ADDR_CHANGE BIT(2)
+#define SFF_8079_NUM_PAGES 2
+#define SFF_8472_NUM_PAGES 4
+#define SFF_8436_NUM_PAGES 5
+#define SFF_DMT_LEVEL_OFFSET 94
+
+/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom
+ * @efx: NIC context
+ * @page: EEPROM page number
+ * @data: Destination data pointer
+ * @offset: Offset in page to copy from in to data
+ * @space: Space available in data
+ *
+ * Return:
+ * >=0 - amount of data copied
+ * <0 - error
+ */
+static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx,
+ unsigned int page,
+ u8 *data, ssize_t offset,
+ ssize_t space)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
+ unsigned int payload_len;
+ unsigned int to_copy;
+ size_t outlen;
+ int rc;
+
+ if (offset > SFP_PAGE_SIZE)
+ return -EINVAL;
+
+ to_copy = min(space, SFP_PAGE_SIZE - offset);
+
+ MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO,
+ inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf),
+ &outlen);
+
+ if (rc)
+ return rc;
+
+ if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
+ SFP_PAGE_SIZE))
+ return -EIO;
+
+ payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN);
+ if (payload_len != SFP_PAGE_SIZE)
+ return -EIO;
+
+ memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
+ to_copy);
+
+ return to_copy;
+}
+
+static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx,
+ unsigned int page,
+ u8 byte)
+{
+ u8 data;
+ int rc;
+
+ rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1);
+ if (rc == 1)
+ return data;
+
+ return rc;
+}
+
+static int efx_mcdi_phy_diag_type(struct efx_nic *efx)
+{
+ /* Page zero of the EEPROM includes the diagnostic type at byte 92. */
+ return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
+ SFF_DIAG_TYPE_OFFSET);
+}
+
+static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx)
+{
+ /* Page zero of the EEPROM includes the DMT level at byte 94. */
+ return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
+ SFF_DMT_LEVEL_OFFSET);
+}
+
+static u32 efx_mcdi_phy_module_type(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS)
+ return phy_data->media;
+
+ /* A QSFP+ NIC may actually have an SFP+ module attached.
+ * The ID is page 0, byte 0.
+ */
+ switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) {
+ case 0x3:
+ return MC_CMD_MEDIA_SFP_PLUS;
+ case 0xc:
+ case 0xd:
+ return MC_CMD_MEDIA_QSFP_PLUS;
+ default:
+ return 0;
+ }
+}
+
+int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data)
+{
+ int rc;
+ ssize_t space_remaining = ee->len;
+ unsigned int page_off;
+ bool ignore_missing;
+ int num_pages;
+ int page;
+
+ switch (efx_mcdi_phy_module_type(efx)) {
+ case MC_CMD_MEDIA_SFP_PLUS:
+ num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ?
+ SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES;
+ page = 0;
+ ignore_missing = false;
+ break;
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ num_pages = SFF_8436_NUM_PAGES;
+ page = -1; /* We obtain the lower page by asking for -1. */
+ ignore_missing = true; /* Ignore missing pages after page 0. */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ page_off = ee->offset % SFP_PAGE_SIZE;
+ page += ee->offset / SFP_PAGE_SIZE;
+
+ while (space_remaining && (page < num_pages)) {
+ rc = efx_mcdi_phy_get_module_eeprom_page(efx, page,
+ data, page_off,
+ space_remaining);
+
+ if (rc > 0) {
+ space_remaining -= rc;
+ data += rc;
+ page_off = 0;
+ page++;
+ } else if (rc == 0) {
+ space_remaining = 0;
+ } else if (ignore_missing && (page > 0)) {
+ int intended_size = SFP_PAGE_SIZE - page_off;
+
+ space_remaining -= intended_size;
+ if (space_remaining < 0) {
+ space_remaining = 0;
+ } else {
+ memset(data, 0, intended_size);
+ data += intended_size;
+ page_off = 0;
+ page++;
+ rc = 0;
+ }
+ } else {
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo)
+{
+ int sff_8472_level;
+ int diag_type;
+
+ switch (efx_mcdi_phy_module_type(efx)) {
+ case MC_CMD_MEDIA_SFP_PLUS:
+ sff_8472_level = efx_mcdi_phy_sff_8472_level(efx);
+
+ /* If we can't read the diagnostics level we have none. */
+ if (sff_8472_level < 0)
+ return -EOPNOTSUPP;
+
+ /* Check if this module requires the (unsupported) address
+ * change operation.
+ */
+ diag_type = efx_mcdi_phy_diag_type(efx);
+
+ if (sff_8472_level == 0 ||
+ (diag_type & SFF_DIAG_ADDR_CHANGE)) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static unsigned int efx_calc_mac_mtu(struct efx_nic *efx)
{
return EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.h b/drivers/net/ethernet/sfc/mcdi_port_common.h
index 9dbeee83266f..ed31690e591c 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.h
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.h
@@ -41,13 +41,22 @@ u8 mcdi_to_ethtool_media(u32 media);
void efx_mcdi_phy_decode_link(struct efx_nic *efx,
struct efx_link_state *link_state,
u32 speed, u32 flags, u32 fcntl);
-u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap);
+u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap);
u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g);
void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
bool efx_mcdi_phy_poll(struct efx_nic *efx);
-int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
- struct ethtool_fecparam *fec);
+int efx_mcdi_phy_probe(struct efx_nic *efx);
+void efx_mcdi_phy_remove(struct efx_nic *efx);
+void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ksettings *cmd);
+int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd);
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec);
+int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam *fec);
int efx_mcdi_phy_test_alive(struct efx_nic *efx);
+int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags);
+const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index);
+int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data);
+int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo);
int efx_mcdi_set_mac(struct efx_nic *efx);
int efx_mcdi_set_mtu(struct efx_nic *efx);
int efx_mcdi_mac_init_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 062462a13847..9f7dfdf708cf 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -38,8 +38,6 @@
*
**************************************************************************/
-#define EFX_DRIVER_VERSION "4.1"
-
#ifdef DEBUG
#define EFX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
@@ -65,10 +63,13 @@
* queues. */
#define EFX_MAX_TX_TC 2
#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
-#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
-#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
-#define EFX_TXQ_TYPES 4
-#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OUTER_CSUM 1 /* Outer checksum offload */
+#define EFX_TXQ_TYPE_INNER_CSUM 2 /* Inner checksum offload */
+#define EFX_TXQ_TYPE_HIGHPRI 4 /* High-priority (for TC) */
+#define EFX_TXQ_TYPES 8
+/* HIGHPRI is Siena-only, and INNER_CSUM is EF10, so no need for both */
+#define EFX_MAX_TXQ_PER_CHANNEL 4
+#define EFX_MAX_TX_QUEUES (EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_CHANNELS)
/* Maximum possible MTU the driver supports */
#define EFX_MAX_MTU (9 * 1024)
@@ -76,6 +77,9 @@
/* Minimum MTU, from RFC791 (IP) */
#define EFX_MIN_MTU 68
+/* Maximum total header length for TSOv2 */
+#define EFX_TSO2_MAX_HDRLEN 208
+
/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
* and should be a multiple of the cache line size.
*/
@@ -192,7 +196,9 @@ struct efx_tx_buffer {
* @queue: DMA queue number
* @label: Label for TX completion events.
* Is our index within @channel->tx_queue array.
+ * @type: configuration type of this TX queue. A bitmask of %EFX_TXQ_TYPE_* flags.
* @tso_version: Version of TSO in use for this queue.
+ * @tso_encap: Is encapsulated TSO supported? Supported in TSOv2 on 8000 series.
* @channel: The associated channel
* @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring
@@ -206,8 +212,6 @@ struct efx_tx_buffer {
* @initialised: Has hardware queue been initialised?
* @timestamping: Is timestamping enabled for this channel?
* @xdp_tx: Is this an XDP tx queue?
- * @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and
- * may also map tx data, depending on the nature of the TSO implementation.
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
* @old_write_count: The value of @write_count when last checked.
@@ -244,7 +248,7 @@ struct efx_tx_buffer {
* @tso_fallbacks: Number of times TSO fallback used
* @pushes: Number of times the TX push feature has been used
* @pio_packets: Number of times the TX PIO feature has been used
- * @xmit_more_available: Are any packets waiting to be pushed to the NIC
+ * @xmit_pending: Are any packets waiting to be pushed to the NIC
* @cb_packets: Number of times the TX copybreak feature has been used
* @notify_count: Count of notified descriptors to the NIC
* @empty_read_count: If the completion path has seen the queue as empty
@@ -256,7 +260,9 @@ struct efx_tx_queue {
struct efx_nic *efx ____cacheline_aligned_in_smp;
unsigned int queue;
unsigned int label;
+ unsigned int type;
unsigned int tso_version;
+ bool tso_encap;
struct efx_channel *channel;
struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer;
@@ -269,9 +275,6 @@ struct efx_tx_queue {
bool timestamping;
bool xdp_tx;
- /* Function pointers used in the fast path. */
- int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
-
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
@@ -292,7 +295,7 @@ struct efx_tx_queue {
unsigned int tso_fallbacks;
unsigned int pushes;
unsigned int pio_packets;
- bool xmit_more_available;
+ bool xmit_pending;
unsigned int cb_packets;
unsigned int notify_count;
/* Statistics to supplement MAC stats */
@@ -455,7 +458,7 @@ enum efx_sync_events_state {
* were checked for expiry
* @rfs_expire_index: next accelerated RFS filter ID to check for expiry
* @n_rfs_succeeded: number of successful accelerated RFS filter insertions
- * @n_rfs_failed; number of failed accelerated RFS filter insertions
+ * @n_rfs_failed: number of failed accelerated RFS filter insertions
* @filter_work: Work item for efx_filter_rfs_expire()
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
@@ -481,6 +484,7 @@ enum efx_sync_events_state {
* @rx_list: list of SKBs from current RX, awaiting processing
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
+ * @tx_queue_by_type: pointers into @tx_queue, or %NULL, indexed by txq type
* @sync_events_state: Current state of sync events on this channel
* @sync_timestamp_major: Major part of the last ptp sync event
* @sync_timestamp_minor: Minor part of the last ptp sync event
@@ -542,7 +546,8 @@ struct efx_channel {
struct list_head *rx_list;
struct efx_rx_queue rx_queue;
- struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
+ struct efx_tx_queue tx_queue[EFX_MAX_TXQ_PER_CHANNEL];
+ struct efx_tx_queue *tx_queue_by_type[EFX_TXQ_TYPES];
enum efx_sync_events_state sync_events_state;
u32 sync_timestamp_major;
@@ -658,51 +663,6 @@ static inline bool efx_link_state_equal(const struct efx_link_state *left,
}
/**
- * struct efx_phy_operations - Efx PHY operations table
- * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
- * efx->loopback_modes.
- * @init: Initialise PHY
- * @fini: Shut down PHY
- * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
- * @poll: Update @link_state and report whether it changed.
- * Serialised by the mac_lock.
- * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock.
- * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock.
- * @get_fecparam: Get Forward Error Correction settings. Serialised by mac_lock.
- * @set_fecparam: Set Forward Error Correction settings. Serialised by mac_lock.
- * @set_npage_adv: Set abilities advertised in (Extended) Next Page
- * (only needed where AN bit is set in mmds)
- * @test_alive: Test that PHY is 'alive' (online)
- * @test_name: Get the name of a PHY-specific test/result
- * @run_tests: Run tests and record results as appropriate (offline).
- * Flags are the ethtool tests flags.
- */
-struct efx_phy_operations {
- int (*probe) (struct efx_nic *efx);
- int (*init) (struct efx_nic *efx);
- void (*fini) (struct efx_nic *efx);
- void (*remove) (struct efx_nic *efx);
- int (*reconfigure) (struct efx_nic *efx);
- bool (*poll) (struct efx_nic *efx);
- void (*get_link_ksettings)(struct efx_nic *efx,
- struct ethtool_link_ksettings *cmd);
- int (*set_link_ksettings)(struct efx_nic *efx,
- const struct ethtool_link_ksettings *cmd);
- int (*get_fecparam)(struct efx_nic *efx, struct ethtool_fecparam *fec);
- int (*set_fecparam)(struct efx_nic *efx,
- const struct ethtool_fecparam *fec);
- void (*set_npage_adv) (struct efx_nic *efx, u32);
- int (*test_alive) (struct efx_nic *efx);
- const char *(*test_name) (struct efx_nic *efx, unsigned int index);
- int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
- int (*get_module_eeprom) (struct efx_nic *efx,
- struct ethtool_eeprom *ee,
- u8 *data);
- int (*get_module_info) (struct efx_nic *efx,
- struct ethtool_modinfo *modinfo);
-};
-
-/**
* enum efx_phy_mode - PHY operating mode flags
* @PHY_MODE_NORMAL: on and should pass traffic
* @PHY_MODE_TX_DISABLED: on with TX disabled
@@ -920,7 +880,6 @@ struct efx_async_filter_insertion {
* field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS)
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
- * @phy_op: PHY interface
* @phy_data: PHY private data (including PHY-specific stats)
* @mdio: PHY MDIO interface
* @mdio_bus: PHY MDIO bus ID (only used by Siena)
@@ -1094,7 +1053,6 @@ struct efx_nic {
bool rx_nodesc_drops_prev_state;
unsigned int phy_type;
- const struct efx_phy_operations *phy_op;
void *phy_data;
struct mdio_if_info mdio;
unsigned int mdio_bus;
@@ -1214,10 +1172,12 @@ struct efx_udp_tunnel {
* @describe_stats: Describe statistics for ethtool
* @update_stats: Update statistics not provided by event handling.
* Either argument may be %NULL.
+ * @update_stats_atomic: Update statistics while in atomic context, if that
+ * is more limiting than @update_stats. Otherwise, leave %NULL and
+ * driver core will call @update_stats.
* @start_stats: Start the regular fetching of statistics
* @pull_stats: Pull stats from the NIC and wait until they arrive.
* @stop_stats: Stop the regular fetching of statistics
- * @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
* @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
* @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
@@ -1250,7 +1210,7 @@ struct efx_udp_tunnel {
* a pointer to the &struct efx_msi_context for the channel.
* @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
* is a pointer to the &struct efx_nic.
- * @tx_probe: Allocate resources for TX queue
+ * @tx_probe: Allocate resources for TX queue (and select TXQ type)
* @tx_init: Initialise TX queue on the NIC
* @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell
@@ -1359,10 +1319,11 @@ struct efx_nic_type {
size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
+ size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
void (*pull_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
- void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
int (*reconfigure_port)(struct efx_nic *efx);
void (*prepare_enable_fc_tx)(struct efx_nic *efx);
@@ -1546,14 +1507,6 @@ efx_get_tx_channel(struct efx_nic *efx, unsigned int index)
return efx->channel[efx->tx_channel_offset + index];
}
-static inline struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
-{
- EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels ||
- type >= efx->tx_queues_per_channel);
- return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
-}
-
static inline struct efx_channel *
efx_get_xdp_channel(struct efx_nic *efx, unsigned int index)
{
@@ -1580,10 +1533,18 @@ static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel
}
static inline struct efx_tx_queue *
-efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
+efx_channel_get_tx_queue(struct efx_channel *channel, unsigned int type)
+{
+ EFX_WARN_ON_ONCE_PARANOID(type >= EFX_TXQ_TYPES);
+ return channel->tx_queue_by_type[type];
+}
+
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned int index, unsigned int type)
{
- EFX_WARN_ON_ONCE_PARANOID(type >= efx_channel_num_tx_queues(channel));
- return &channel->tx_queue[type];
+ struct efx_channel *channel = efx_get_tx_channel(efx, index);
+
+ return efx_channel_get_tx_queue(channel, type);
}
/* Iterate over all TX queues belonging to a channel */
@@ -1683,10 +1644,6 @@ efx_channel_tx_fill_level(struct efx_channel *channel)
struct efx_tx_queue *tx_queue;
unsigned int fill_level = 0;
- /* This function is currently only used by EF100, which maybe
- * could do something simpler and just compute the fill level
- * of the single TXQ that's really in use.
- */
efx_for_each_channel_tx_queue(tx_queue, channel)
fill_level = max(fill_level,
tx_queue->insert_count - tx_queue->read_count);
@@ -1694,6 +1651,20 @@ efx_channel_tx_fill_level(struct efx_channel *channel)
return fill_level;
}
+/* Conservative approximation of efx_channel_tx_fill_level using cached value */
+static inline unsigned int
+efx_channel_tx_old_fill_level(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ unsigned int fill_level = 0;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ fill_level = max(fill_level,
+ tx_queue->insert_count - tx_queue->old_read_count);
+
+ return fill_level;
+}
+
/* Get all supported features.
* If a feature is not fixed, it is present in hw_features.
* If a feature is fixed, it does not present in hw_features, but
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 724e2776b585..5c2fe3ce3f4d 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -297,6 +297,10 @@ struct efx_ef10_nic_data {
u64 licensed_features;
};
+/* TSOv2 */
+int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ bool *data_mapped);
+
int efx_init_sriov(void);
void efx_fini_sriov(void);
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index 974107354087..b9cafe9cd568 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -65,8 +65,7 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
/* Report whether this TX queue would be empty for the given write_count.
* May return false negative.
*/
-static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
- unsigned int write_count)
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count)
{
unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
@@ -76,41 +75,6 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
}
-/* Report whether the NIC considers this TX queue empty, using
- * packet_write_count (the write count recorded for the last completable
- * doorbell push). May return false negative. EF10 only, which is OK
- * because only EF10 supports PIO.
- */
-static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
-{
- EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors);
- return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count);
-}
-
-/* Get partner of a TX queue, seen as part of the same net core queue */
-/* XXX is this a thing on EF100? */
-static inline struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
-{
- if (tx_queue->label & EFX_TXQ_TYPE_OFFLOAD)
- return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
- else
- return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
-}
-
-/* Decide whether we can use TX PIO, ie. write packet data directly into
- * a buffer on the device. This can reduce latency at the expense of
- * throughput, so we only do this if both hardware and software TX rings
- * are empty. This also ensures that only one packet at a time can be
- * using the PIO buffer.
- */
-static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
-{
- struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
-
- return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) &&
- efx_nic_tx_is_empty(partner);
-}
-
int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
@@ -125,7 +89,7 @@ int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
- bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
+ bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count);
tx_queue->empty_read_count = 0;
return was_empty && tx_queue->write_count - write_count == 1;
@@ -280,6 +244,13 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
+static inline size_t efx_nic_update_stats_atomic(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ if (efx->type->update_stats_atomic)
+ return efx->type->update_stats_atomic(efx, full_stats, core_stats);
+ return efx->type->update_stats(efx, full_stats, core_stats);
+}
#define EFX_MAX_FLUSH_TIME 5000
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index bea4725a4499..a39c5143b386 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -43,6 +43,7 @@
#include "mcdi_pcol.h"
#include "io.h"
#include "farch_regs.h"
+#include "tx.h"
#include "nic.h" /* indirectly includes ptp.h */
/* Maximum number of events expected to make up a PTP event */
@@ -172,9 +173,11 @@ struct efx_ptp_match {
/**
* struct efx_ptp_event_rx - A PTP receive event (from MC)
+ * @link: list of events
* @seq0: First part of (PTP) UUID
* @seq1: Second part of (PTP) UUID and sequence number
* @hwtimestamp: Event timestamp
+ * @expiry: Time which the packet arrived
*/
struct efx_ptp_event_rx {
struct list_head link;
@@ -222,11 +225,13 @@ struct efx_ptp_timeset {
* reset (disable, enable).
* @rxfilter_event: Receive filter when operating
* @rxfilter_general: Receive filter when operating
+ * @rxfilter_installed: Receive filter installed
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
* @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
* @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @nic_time: contains time details
* @nic_time.minor_max: Wrap point for NIC minor times
* @nic_time.sync_event_diff_min: Minimum acceptable difference between time
* in packet prefix and last MCDI time sync event i.e. how much earlier than
@@ -238,6 +243,7 @@ struct efx_ptp_timeset {
* field in MCDI time sync event.
* @min_synchronisation_ns: Minimum acceptable corrected sync window
* @capabilities: Capabilities flags from the NIC
+ * @ts_corrections: contains corrections details
* @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit
* timestamps
* @ts_corrections.ptp_rx: Required driver correction of PTP packet receive
@@ -325,7 +331,7 @@ struct efx_ptp_data {
struct work_struct pps_work;
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
- _MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+ efx_dword_t txbuf[MCDI_TX_BUF_LEN(MC_CMD_PTP_IN_TRANSMIT_LENMAX)];
unsigned int good_syncs;
unsigned int fast_syncs;
@@ -1082,10 +1088,10 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
{
struct efx_ptp_data *ptp_data = efx->ptp_data;
+ u8 type = efx_tx_csum_type_skb(skb);
struct efx_tx_queue *tx_queue;
- u8 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
- tx_queue = &ptp_data->channel->tx_queue[type];
+ tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
if (tx_queue && tx_queue->timestamping) {
efx_enqueue_skb(tx_queue, skb);
} else {
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 5e29284c89c9..19cf7cac1e6e 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -797,7 +797,6 @@ int efx_probe_filters(struct efx_nic *efx)
{
int rc;
- init_rwsem(&efx->filter_sem);
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index e71d6d37a317..3c5227afd497 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -21,6 +21,7 @@
#include "efx_common.h"
#include "efx_channels.h"
#include "nic.h"
+#include "mcdi_port_common.h"
#include "selftest.h"
#include "workarounds.h"
@@ -67,7 +68,7 @@ static const char *const efx_interrupt_mode_names[] = {
STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
/**
- * efx_loopback_state - persistent state during a loopback selftest
+ * struct efx_loopback_state - persistent state during a loopback selftest
* @flush: Drop all packets in efx_loopback_rx_packet
* @packet_count: Number of packets being used in this test
* @skbs: An array of skbs transmitted
@@ -99,10 +100,8 @@ static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
- if (efx->phy_op->test_alive) {
- rc = efx->phy_op->test_alive(efx);
- tests->phy_alive = rc ? -1 : 1;
- }
+ rc = efx_mcdi_phy_test_alive(efx);
+ tests->phy_alive = rc ? -1 : 1;
return rc;
}
@@ -257,11 +256,8 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
{
int rc;
- if (!efx->phy_op->run_tests)
- return 0;
-
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
+ rc = efx_mcdi_phy_run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
if (rc == -EPERM)
rc = 0;
@@ -660,8 +656,8 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
/* Test all enabled types of TX queue */
efx_for_each_channel_tx_queue(tx_queue, channel) {
- state->offload_csum = (tx_queue->label &
- EFX_TXQ_TYPE_OFFLOAD);
+ state->offload_csum = (tx_queue->type &
+ EFX_TXQ_TYPE_OUTER_CSUM);
rc = efx_test_loopback(tx_queue,
&tests->loopback[mode]);
if (rc)
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index ca88ebb4f6b1..a23f085bf298 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -15,8 +15,8 @@
*/
struct efx_loopback_self_tests {
- int tx_sent[EFX_TXQ_TYPES];
- int tx_done[EFX_TXQ_TYPES];
+ int tx_sent[EFX_MAX_TXQ_PER_CHANNEL];
+ int tx_done[EFX_MAX_TXQ_PER_CHANNEL];
int rx_good;
int rx_bad;
};
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index a7ea630bb5e6..16347a6d0c47 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -994,7 +994,6 @@ const struct efx_nic_type siena_a0_nic_type = {
.start_stats = efx_mcdi_mac_start_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
- .set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
.reconfigure_mac = siena_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 727201d5eb24..1665529a7271 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -59,13 +59,12 @@ u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
- /* We need to consider both queues that the net core sees as one */
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
+ /* We need to consider all queues that the net core sees as one */
struct efx_nic *efx = txq1->efx;
+ struct efx_tx_queue *txq2;
unsigned int fill_level;
- fill_level = max(txq1->insert_count - txq1->old_read_count,
- txq2->insert_count - txq2->old_read_count);
+ fill_level = efx_channel_tx_old_fill_level(txq1->channel);
if (likely(fill_level < efx->txq_stop_thresh))
return;
@@ -85,11 +84,10 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = READ_ONCE(txq1->read_count);
- txq2->old_read_count = READ_ONCE(txq2->read_count);
+ efx_for_each_channel_tx_queue(txq2, txq1->channel)
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
- fill_level = max(txq1->insert_count - txq1->old_read_count,
- txq2->insert_count - txq2->old_read_count);
+ fill_level = efx_channel_tx_old_fill_level(txq1->channel);
EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
if (likely(fill_level < efx->txq_stop_thresh)) {
smp_mb();
@@ -266,8 +264,45 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
++tx_queue->insert_count;
return 0;
}
+
+/* Decide whether we can use TX PIO, ie. write packet data directly into
+ * a buffer on the device. This can reduce latency at the expense of
+ * throughput, so we only do this if both hardware and software TX rings
+ * are empty, including all queues for the channel. This also ensures that
+ * only one packet at a time can be using the PIO buffer. If the xmit_more
+ * flag is set then we don't use this - there'll be another packet along
+ * shortly and we want to hold off the doorbell.
+ */
+static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue)
+{
+ struct efx_channel *channel = tx_queue->channel;
+
+ if (!tx_queue->piobuf)
+ return false;
+
+ EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors);
+
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count))
+ return false;
+
+ return true;
+}
#endif /* EFX_USE_PIO */
+/* Send any pending traffic for a channel. xmit_more is shared across all
+ * queues for a channel, so we must check all of them.
+ */
+static void efx_tx_send_pending(struct efx_channel *channel)
+{
+ struct efx_tx_queue *q;
+
+ efx_for_each_channel_tx_queue(q, channel) {
+ if (q->xmit_pending)
+ efx_nic_push_buffers(q);
+ }
+}
+
/*
* Add a socket buffer to a TX queue
*
@@ -303,8 +338,18 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
* size limit.
*/
if (segments) {
- EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
- rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
+ switch (tx_queue->tso_version) {
+ case 1:
+ rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped);
+ break;
+ case 2:
+ rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped);
+ break;
+ case 0: /* No TSO on this queue, SW fallback needed */
+ default:
+ rc = -EINVAL;
+ break;
+ }
if (rc == -EINVAL) {
rc = efx_tx_tso_fallback(tx_queue, skb);
tx_queue->tso_fallbacks++;
@@ -315,7 +360,7 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
goto err;
#ifdef EFX_USE_PIO
} else if (skb_len <= efx_piobuf_size && !xmit_more &&
- efx_nic_may_tx_pio(tx_queue)) {
+ efx_tx_may_pio(tx_queue)) {
/* Use PIO for short packets with an empty queue. */
if (efx_enqueue_skb_pio(tx_queue, skb))
goto err;
@@ -336,21 +381,11 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
efx_tx_maybe_stop_queue(tx_queue);
- /* Pass off to hardware */
- if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+ tx_queue->xmit_pending = true;
- /* There could be packets left on the partner queue if
- * xmit_more was set. If we do not push those they
- * could be left for a long time and cause a netdev watchdog.
- */
- if (txq2->xmit_more_available)
- efx_nic_push_buffers(txq2);
-
- efx_nic_push_buffers(tx_queue);
- } else {
- tx_queue->xmit_more_available = xmit_more;
- }
+ /* Pass off to hardware */
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
+ efx_tx_send_pending(tx_queue->channel);
if (segments) {
tx_queue->tso_bursts++;
@@ -371,14 +406,8 @@ err:
* on this queue or a partner queue then we need to push here to get the
* previous packets out.
*/
- if (!xmit_more) {
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
-
- if (txq2->xmit_more_available)
- efx_nic_push_buffers(txq2);
-
- efx_nic_push_buffers(tx_queue);
- }
+ if (!xmit_more)
+ efx_tx_send_pending(tx_queue->channel);
return NETDEV_TX_OK;
}
@@ -472,13 +501,10 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
}
/* Initiate a packet transmission. We use one channel per CPU
- * (sharing when we have more CPUs than channels). On Falcon, the TX
- * completion events will be directed back to the CPU that transmitted
- * the packet, which should be cache-efficient.
+ * (sharing when we have more CPUs than channels).
*
* Context: non-blocking.
- * Note that returning anything other than NETDEV_TX_OK will cause the
- * OS to free the skb.
+ * Should always return NETDEV_TX_OK and consume the skb.
*/
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
@@ -489,19 +515,39 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
+ index = skb_get_queue_mapping(skb);
+ type = efx_tx_csum_type_skb(skb);
+ if (index >= efx->n_tx_channels) {
+ index -= efx->n_tx_channels;
+ type |= EFX_TXQ_TYPE_HIGHPRI;
+ }
+
/* PTP "event" packet */
if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+ /* There may be existing transmits on the channel that are
+ * waiting for this packet to trigger the doorbell write.
+ * We need to send the packets at this point.
+ */
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return efx_ptp_tx(efx, skb);
}
- index = skb_get_queue_mapping(skb);
- type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
- if (index >= efx->n_tx_channels) {
- index -= efx->n_tx_channels;
- type |= EFX_TXQ_TYPE_HIGHPRI;
- }
tx_queue = efx_get_tx_queue(efx, index, type);
+ if (WARN_ON_ONCE(!tx_queue)) {
+ /* We don't have a TXQ of the right type.
+ * This should never happen, as we don't advertise offload
+ * features unless we can support them.
+ */
+ dev_kfree_skb_any(skb);
+ /* If we're not expecting another transmit and we had something to push
+ * on this queue or a partner queue then we need to push here to get the
+ * previous packets out.
+ */
+ if (!netdev_xmit_more())
+ efx_tx_send_pending(tx_queue->channel);
+ return NETDEV_TX_OK;
+ }
return __efx_enqueue_skb(tx_queue, skb);
}
@@ -552,7 +598,7 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
tx_queue->core_txq =
netdev_get_tx_queue(efx->net_dev,
tx_queue->channel->channel +
- ((tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ?
+ ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
efx->n_tx_channels : 0));
}
diff --git a/drivers/net/ethernet/sfc/tx.h b/drivers/net/ethernet/sfc/tx.h
index a3cf06c5570d..f2c4d2f89919 100644
--- a/drivers/net/ethernet/sfc/tx.h
+++ b/drivers/net/ethernet/sfc/tx.h
@@ -18,4 +18,30 @@ unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer, size_t len);
+/* What TXQ type will satisfy the checksum offloads required for this skb? */
+static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0; /* no checksum offload */
+
+ if (skb->encapsulation &&
+ skb_checksum_start_offset(skb) == skb_inner_transport_offset(skb)) {
+ /* we only advertise features for IPv4 and IPv6 checksums on
+ * encapsulated packets, so if the checksum is for the inner
+ * packet, it must be one of them; no further checking required.
+ */
+
+ /* Do we also need to offload the outer header checksum? */
+ if (skb_shinfo(skb)->gso_segs > 1 &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ return EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM;
+ return EFX_TXQ_TYPE_INNER_CSUM;
+ }
+
+ /* similarly, we only advertise features for IPv4 and IPv6 checksums,
+ * so it must be one of them. No need for further checks.
+ */
+ return EFX_TXQ_TYPE_OUTER_CSUM;
+}
#endif /* EFX_TX_H */
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 793e234819a8..d530cde2b864 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -47,11 +47,12 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
goto fail1;
}
- /* Allocate hardware ring */
+ /* Allocate hardware ring, determine TXQ type */
rc = efx_nic_probe_tx(tx_queue);
if (rc)
goto fail2;
+ tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue;
return 0;
fail2:
@@ -78,18 +79,14 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
- tx_queue->xmit_more_available = false;
+ tx_queue->xmit_pending = false;
tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
tx_queue->channel == efx_ptp_channel(efx));
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
-
- /* Set up default function pointers. These may get replaced by
- * efx_nic_init_tx() based off NIC/queue capabilities.
- */
- tx_queue->handle_tso = efx_enqueue_skb_tso;
+ tx_queue->tso_version = 0;
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
@@ -116,7 +113,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
++tx_queue->read_count;
}
- tx_queue->xmit_more_available = false;
+ tx_queue->xmit_pending = false;
netdev_tx_reset_queue(tx_queue->core_txq);
}
@@ -141,6 +138,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
+ tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
}
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
@@ -242,7 +240,6 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
struct efx_nic *efx = tx_queue->efx;
- struct efx_tx_queue *txq2;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
@@ -261,9 +258,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) {
- txq2 = efx_tx_queue_partner(tx_queue);
- fill_level = max(tx_queue->insert_count - tx_queue->read_count,
- txq2->insert_count - txq2->read_count);
+ fill_level = efx_channel_tx_fill_level(tx_queue->channel);
if (fill_level <= efx->txq_wake_thresh)
netif_tx_wake_queue(tx_queue->core_txq);
}
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index f94078f8ebe5..1fd08a04bd4e 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -301,6 +301,7 @@ struct sc92031_priv {
/* for dev->get_stats */
long rx_value;
+ struct net_device *ndev;
};
/* I don't know which registers can be safely read; however, I can guess
@@ -829,10 +830,10 @@ static void _sc92031_link_tasklet(struct net_device *dev)
}
}
-static void sc92031_tasklet(unsigned long data)
+static void sc92031_tasklet(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct sc92031_priv *priv = netdev_priv(dev);
+ struct sc92031_priv *priv = from_tasklet(priv, t, tasklet);
+ struct net_device *dev = priv->ndev;
void __iomem *port_base = priv->port_base;
u32 intr_status, intr_mask;
@@ -993,15 +994,15 @@ static int sc92031_open(struct net_device *dev)
struct sc92031_priv *priv = netdev_priv(dev);
struct pci_dev *pdev = priv->pdev;
- priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
- &priv->rx_ring_dma_addr);
+ priv->rx_ring = dma_alloc_coherent(&pdev->dev, RX_BUF_LEN,
+ &priv->rx_ring_dma_addr, GFP_KERNEL);
if (unlikely(!priv->rx_ring)) {
err = -ENOMEM;
goto out_alloc_rx_ring;
}
- priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
- &priv->tx_bufs_dma_addr);
+ priv->tx_bufs = dma_alloc_coherent(&pdev->dev, TX_BUF_TOT_LEN,
+ &priv->tx_bufs_dma_addr, GFP_KERNEL);
if (unlikely(!priv->tx_bufs)) {
err = -ENOMEM;
goto out_alloc_tx_bufs;
@@ -1031,11 +1032,11 @@ static int sc92031_open(struct net_device *dev)
return 0;
out_request_irq:
- pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
- priv->tx_bufs_dma_addr);
+ dma_free_coherent(&pdev->dev, TX_BUF_TOT_LEN, priv->tx_bufs,
+ priv->tx_bufs_dma_addr);
out_alloc_tx_bufs:
- pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
- priv->rx_ring_dma_addr);
+ dma_free_coherent(&pdev->dev, RX_BUF_LEN, priv->rx_ring,
+ priv->rx_ring_dma_addr);
out_alloc_rx_ring:
return err;
}
@@ -1058,10 +1059,10 @@ static int sc92031_stop(struct net_device *dev)
spin_unlock_bh(&priv->lock);
free_irq(pdev->irq, dev);
- pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
- priv->tx_bufs_dma_addr);
- pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
- priv->rx_ring_dma_addr);
+ dma_free_coherent(&pdev->dev, TX_BUF_TOT_LEN, priv->tx_bufs,
+ priv->tx_bufs_dma_addr);
+ dma_free_coherent(&pdev->dev, RX_BUF_LEN, priv->rx_ring,
+ priv->rx_ring_dma_addr);
return 0;
}
@@ -1108,7 +1109,7 @@ static void sc92031_poll_controller(struct net_device *dev)
disable_irq(irq);
if (sc92031_interrupt(irq, dev) != IRQ_NONE)
- sc92031_tasklet((unsigned long)dev);
+ sc92031_tasklet(&priv->tasklet);
enable_irq(irq);
}
#endif
@@ -1407,11 +1408,11 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (unlikely(err < 0))
goto out_set_dma_mask;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (unlikely(err < 0))
goto out_set_dma_mask;
@@ -1443,10 +1444,11 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->ethtool_ops = &sc92031_ethtool_ops;
priv = netdev_priv(dev);
+ priv->ndev = dev;
spin_lock_init(&priv->lock);
priv->port_base = port_base;
priv->pdev = pdev;
- tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
+ tasklet_setup(&priv->tasklet, sc92031_tasklet);
/* Fudge tasklet count so the call to sc92031_enable_interrupts at
* sc92031_open will work correctly */
tasklet_disable_nosync(&priv->tasklet);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index cfa460c7db23..620c26f71be8 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -789,10 +789,9 @@ static u16 sis900_default_phy(struct net_device * net_dev)
static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *phy)
{
u16 cap;
- u16 status;
- status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
- status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ mdio_read(net_dev, phy->phy_addr, MII_STATUS);
cap = MII_NWAY_CSMA_CD |
((phy->status & MII_STAT_CAN_TX_FDX)? MII_NWAY_TX_FDX:0) |
@@ -1302,7 +1301,7 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
/**
* sis900_timer - sis900 timer routine
- * @data: pointer to sis900 net device
+ * @t: timer list containing a pointer to sis900 net device
*
* On each timer ticks we check two things,
* link status (ON/OFF) and link mode (10/100/Full/Half)
@@ -1536,6 +1535,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
/**
* sis900_tx_timeout - sis900 transmit timeout routine
* @net_dev: the net device to transmit
+ * @txqueue: index of hanging queue
*
* print transmit timeout status
* disable interrupts and do some tasks
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index d950b312c418..51cd7dca91cd 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -374,13 +374,15 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ep->mii.phy_id_mask = 0x1f;
ep->mii.reg_num_mask = 0x1f;
- ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_iounmap;
ep->tx_ring = ring_space;
ep->tx_ring_dma = ring_dma;
- ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_unmap_tx;
ep->rx_ring = ring_space;
@@ -493,9 +495,11 @@ out:
return ret;
err_out_unmap_rx:
- pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
+ ep->rx_ring_dma);
err_out_unmap_tx:
- pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
+ ep->tx_ring_dma);
err_out_iounmap:
pci_iounmap(pdev, ioaddr);
err_out_free_netdev:
@@ -918,8 +922,10 @@ static void epic_init_ring(struct net_device *dev)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
- skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev,
+ skb->data,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
ep->rx_ring[i].rxstatus = DescOwn;
}
ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -955,8 +961,9 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = ep->cur_tx % TX_RING_SIZE;
ep->tx_skbuff[entry] = skb;
- ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
+ skb->data, skb->len,
+ DMA_TO_DEVICE);
if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
ctrl_word = 0x100000; /* No interrupt */
} else if (free_count == TX_QUEUE_LEN/2) {
@@ -1036,8 +1043,9 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
/* Free the original skb. */
skb = ep->tx_skbuff[entry];
- pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&ep->pci_dev->dev,
+ ep->tx_ring[entry].bufaddr, skb->len,
+ DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
ep->tx_skbuff[entry] = NULL;
}
@@ -1178,20 +1186,21 @@ static int epic_rx(struct net_device *dev, int budget)
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(ep->pci_dev,
- ep->rx_ring[entry].bufaddr,
- ep->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&ep->pci_dev->dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(ep->pci_dev,
- ep->rx_ring[entry].bufaddr,
- ep->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&ep->pci_dev->dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(ep->pci_dev,
- ep->rx_ring[entry].bufaddr,
- ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&ep->pci_dev->dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_put(skb = ep->rx_skbuff[entry], pkt_len);
ep->rx_skbuff[entry] = NULL;
}
@@ -1213,8 +1222,10 @@ static int epic_rx(struct net_device *dev, int budget)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
- skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
+ skb->data,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
work_done++;
}
/* AV: shouldn't we add a barrier here? */
@@ -1294,8 +1305,8 @@ static int epic_close(struct net_device *dev)
ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
ep->rx_ring[i].buflength = 0;
if (skb) {
- pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
- ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr,
+ ep->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
@@ -1305,8 +1316,8 @@ static int epic_close(struct net_device *dev)
ep->tx_skbuff[i] = NULL;
if (!skb)
continue;
- pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb(skb);
}
@@ -1502,8 +1513,10 @@ static void epic_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct epic_private *ep = netdev_priv(dev);
- pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
- pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
+ ep->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
+ ep->rx_ring_dma);
unregister_netdev(dev);
pci_iounmap(pdev, ep->ioaddr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 1c4fea9c3ec4..f6b73afd1879 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -535,10 +535,10 @@ static inline void smc_rcv(struct net_device *dev)
/*
* This is called to actually send a packet to the chip.
*/
-static void smc_hardware_send_pkt(unsigned long data)
+static void smc_hardware_send_pkt(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct smc_local *lp = netdev_priv(dev);
+ struct smc_local *lp = from_tasklet(lp, t, tx_task);
+ struct net_device *dev = lp->dev;
void __iomem *ioaddr = lp->base;
struct sk_buff *skb;
unsigned int packet_no, len;
@@ -688,7 +688,7 @@ smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* Allocation succeeded: push packet to the chip's own memory
* immediately.
*/
- smc_hardware_send_pkt((unsigned long)dev);
+ smc_hardware_send_pkt(&lp->tx_task);
}
return NETDEV_TX_OK;
@@ -1036,7 +1036,6 @@ static void smc_phy_configure(struct work_struct *work)
int phyaddr = lp->mii.phy_id;
int my_phy_caps; /* My PHY capabilities */
int my_ad_caps; /* My Advertised capabilities */
- int status;
DBG(3, dev, "smc_program_phy()\n");
@@ -1110,7 +1109,7 @@ static void smc_phy_configure(struct work_struct *work)
* auto-negotiation is restarted, sometimes it isn't ready and
* the link does not come up.
*/
- status = smc_phy_read(dev, phyaddr, MII_ADVERTISE);
+ smc_phy_read(dev, phyaddr, MII_ADVERTISE);
DBG(2, dev, "phy caps=%x\n", my_phy_caps);
DBG(2, dev, "phy advertised caps=%x\n", my_ad_caps);
@@ -1965,7 +1964,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
dev->netdev_ops = &smc_netdev_ops;
dev->ethtool_ops = &smc_ethtool_ops;
- tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
+ tasklet_setup(&lp->tx_task, smc_hardware_send_pkt);
INIT_WORK(&lp->phy_configure, smc_phy_configure);
lp->dev = dev;
lp->mii.phy_id_mask = 0x1f;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index fc168f85e7af..823d9a7184fe 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1196,9 +1196,8 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
SMSC_WARN(pdata, hw, "Timed out waiting for "
"RX FFWD to finish, RX_DP_CTRL: 0x%08X", val);
} else {
- unsigned int temp;
while (pktwords--)
- temp = smsc911x_reg_read(pdata, RX_DATA_FIFO);
+ smsc911x_reg_read(pdata, RX_DATA_FIFO);
}
}
@@ -2055,7 +2054,6 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
u8 address, u8 data)
{
u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
- u32 temp;
int ret;
SMSC_TRACE(pdata, drv, "address 0x%x, data 0x%x", address, data);
@@ -2066,7 +2064,7 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
smsc911x_reg_write(pdata, E2P_DATA, (u32)data);
/* Workaround for hardware read-after-write restriction */
- temp = smsc911x_reg_read(pdata, BYTE_TEST);
+ smsc911x_reg_read(pdata, BYTE_TEST);
ret = smsc911x_eeprom_send_cmd(pdata, op);
}
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 42bef04d65ba..c1dab009415d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -497,8 +497,9 @@ static void smsc9420_free_tx_ring(struct smsc9420_pdata *pd)
if (skb) {
BUG_ON(!pd->tx_buffers[i].mapping);
- pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pd->pdev->dev,
+ pd->tx_buffers[i].mapping, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
@@ -530,8 +531,9 @@ static void smsc9420_free_rx_ring(struct smsc9420_pdata *pd)
dev_kfree_skb_any(pd->rx_buffers[i].skb);
if (pd->rx_buffers[i].mapping)
- pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pd->pdev->dev,
+ pd->rx_buffers[i].mapping,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
pd->rx_ring[i].status = 0;
pd->rx_ring[i].length = 0;
@@ -749,8 +751,8 @@ static void smsc9420_rx_handoff(struct smsc9420_pdata *pd, const int index,
dev->stats.rx_packets++;
dev->stats.rx_bytes += packet_length;
- pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pd->pdev->dev, pd->rx_buffers[index].mapping,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
pd->rx_buffers[index].mapping = 0;
skb = pd->rx_buffers[index].skb;
@@ -782,9 +784,9 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
if (unlikely(!skb))
return -ENOMEM;
- mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pd->pdev, mapping)) {
+ mapping = dma_map_single(&pd->pdev->dev, skb_tail_pointer(skb),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pd->pdev->dev, mapping)) {
dev_kfree_skb_any(skb);
netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n");
return -ENOMEM;
@@ -901,8 +903,10 @@ static void smsc9420_complete_tx(struct net_device *dev)
BUG_ON(!pd->tx_buffers[index].skb);
BUG_ON(!pd->tx_buffers[index].mapping);
- pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping,
- pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pd->pdev->dev,
+ pd->tx_buffers[index].mapping,
+ pd->tx_buffers[index].skb->len,
+ DMA_TO_DEVICE);
pd->tx_buffers[index].mapping = 0;
dev_kfree_skb_any(pd->tx_buffers[index].skb);
@@ -932,9 +936,9 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
BUG_ON(pd->tx_buffers[index].skb);
BUG_ON(pd->tx_buffers[index].mapping);
- mapping = pci_map_single(pd->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pd->pdev, mapping)) {
+ mapping = dma_map_single(&pd->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pd->pdev->dev, mapping)) {
netif_warn(pd, tx_err, pd->dev,
"pci_map_single failed, dropping packet\n");
return NETDEV_TX_BUSY;
@@ -1522,7 +1526,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_free_netdev_2;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
netdev_err(dev, "No usable DMA configuration, aborting\n");
goto out_free_regions_3;
}
@@ -1540,10 +1544,9 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pd = netdev_priv(dev);
/* pci descriptors are created in the PCI consistent area */
- pd->rx_ring = pci_alloc_consistent(pdev,
- sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE +
- sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE,
- &pd->rx_dma_addr);
+ pd->rx_ring = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ &pd->rx_dma_addr, GFP_KERNEL);
if (!pd->rx_ring)
goto out_free_io_4;
@@ -1599,8 +1602,9 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out_free_dmadesc_5:
- pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
- (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ pd->rx_ring, pd->rx_dma_addr);
out_free_io_4:
iounmap(virt_addr - LAN9420_CPSR_ENDIAN_OFFSET);
out_free_regions_3:
@@ -1632,8 +1636,9 @@ static void smsc9420_remove(struct pci_dev *pdev)
BUG_ON(!pd->tx_ring);
BUG_ON(!pd->rx_ring);
- pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
- (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ pd->rx_ring, pd->rx_dma_addr);
iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 806eb651cea3..1503cc9ec6e2 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -6,6 +6,7 @@
#include <linux/pm_runtime.h>
#include <linux/acpi.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -1833,6 +1834,14 @@ static const struct net_device_ops netsec_netdev_ops = {
static int netsec_of_probe(struct platform_device *pdev,
struct netsec_priv *priv, u32 *phy_addr)
{
+ int err;
+
+ err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface);
+ if (err) {
+ dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
+ return err;
+ }
+
priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!priv->phy_np) {
dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
@@ -1859,6 +1868,14 @@ static int netsec_acpi_probe(struct platform_device *pdev,
if (!IS_ENABLED(CONFIG_ACPI))
return -ENODEV;
+ /* ACPI systems are assumed to configure the PHY in firmware, so
+ * there is really no need to discover the PHY mode from the DSDT.
+ * Since firmware is known to exist in the field that configures the
+ * PHY correctly but passes the wrong mode string in the phy-mode
+ * device property, we have no choice but to ignore it.
+ */
+ priv->phy_interface = PHY_INTERFACE_MODE_NA;
+
ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
if (ret) {
dev_err(&pdev->dev,
@@ -1995,13 +2012,6 @@ static int netsec_probe(struct platform_device *pdev)
priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
NETIF_MSG_LINK | NETIF_MSG_PROBE;
- priv->phy_interface = device_get_phy_mode(&pdev->dev);
- if ((int)priv->phy_interface < 0) {
- dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
- ret = -ENODEV;
- goto free_ndev;
- }
-
priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
resource_size(mmio_res));
if (!priv->ioaddr) {
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 81b554dd7221..501b9c7aba56 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1585,7 +1585,7 @@ static int ave_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- ndev = alloc_etherdev(sizeof(struct ave_private));
+ ndev = devm_alloc_etherdev(dev, sizeof(struct ave_private));
if (!ndev) {
dev_err(dev, "can't allocate ethernet device\n");
return -ENOMEM;
@@ -1632,7 +1632,7 @@ static int ave_probe(struct platform_device *pdev)
}
ret = dma_set_mask(dev, dma_mask);
if (ret)
- goto out_free_netdev;
+ return ret;
priv->tx.ndesc = AVE_NR_TXDESC;
priv->rx.ndesc = AVE_NR_RXDESC;
@@ -1645,10 +1645,8 @@ static int ave_probe(struct platform_device *pdev)
if (!name)
break;
priv->clk[i] = devm_clk_get(dev, name);
- if (IS_ERR(priv->clk[i])) {
- ret = PTR_ERR(priv->clk[i]);
- goto out_free_netdev;
- }
+ if (IS_ERR(priv->clk[i]))
+ return PTR_ERR(priv->clk[i]);
priv->nclks++;
}
@@ -1657,10 +1655,8 @@ static int ave_probe(struct platform_device *pdev)
if (!name)
break;
priv->rst[i] = devm_reset_control_get_shared(dev, name);
- if (IS_ERR(priv->rst[i])) {
- ret = PTR_ERR(priv->rst[i]);
- goto out_free_netdev;
- }
+ if (IS_ERR(priv->rst[i]))
+ return PTR_ERR(priv->rst[i]);
priv->nrsts++;
}
@@ -1669,26 +1665,23 @@ static int ave_probe(struct platform_device *pdev)
1, 0, &args);
if (ret) {
dev_err(dev, "can't get syscon-phy-mode property\n");
- goto out_free_netdev;
+ return ret;
}
priv->regmap = syscon_node_to_regmap(args.np);
of_node_put(args.np);
if (IS_ERR(priv->regmap)) {
dev_err(dev, "can't map syscon-phy-mode\n");
- ret = PTR_ERR(priv->regmap);
- goto out_free_netdev;
+ return PTR_ERR(priv->regmap);
}
ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
if (ret) {
dev_err(dev, "invalid phy-mode setting\n");
- goto out_free_netdev;
+ return ret;
}
priv->mdio = devm_mdiobus_alloc(dev);
- if (!priv->mdio) {
- ret = -ENOMEM;
- goto out_free_netdev;
- }
+ if (!priv->mdio)
+ return -ENOMEM;
priv->mdio->priv = ndev;
priv->mdio->parent = dev;
priv->mdio->read = ave_mdiobus_read;
@@ -1725,8 +1718,6 @@ static int ave_probe(struct platform_device *pdev)
out_del_napi:
netif_napi_del(&priv->napi_rx);
netif_napi_del(&priv->napi_tx);
-out_free_netdev:
- free_netdev(ndev);
return ret;
}
@@ -1739,7 +1730,6 @@ static int ave_remove(struct platform_device *pdev)
unregister_netdev(ndev);
netif_napi_del(&priv->napi_rx);
netif_napi_del(&priv->napi_tx);
- free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 9a47c5aec91a..53f14c5a9e02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -3,7 +3,7 @@ config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
select MII
- select MDIO_XPCS
+ select PCS_XPCS
select PAGE_POOL
select PHYLINK
select CRC32
@@ -209,6 +209,16 @@ config DWMAC_IMX8
device driver. This driver is used for i.MX8 series like
iMX8MP/iMX8DXL GMAC ethernet controller.
+config DWMAC_INTEL_PLAT
+ tristate "Intel dwmac support"
+ depends on OF && COMMON_CLK
+ depends on STMMAC_ETH
+ help
+ Support for ethernet controllers on Intel SoCs
+
+ This selects the Intel platform specific glue layer support for
+ the stmmac device driver. This driver is used for the Intel Keem Bay
+ SoC.
endif
config DWMAC_INTEL
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 295615ab36a7..24e6145d4eae 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
+obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o
stmmac-platform-objs:= stmmac_platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 52971f5293aa..d2cdc02d9f94 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
while (len != 0) {
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
desc = tx_q->dma_tx + entry;
if (len > bmax) {
@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
*/
p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
(((rx_q->dirty_rx) + 1) %
- DMA_RX_SIZE) *
+ priv->dma_rx_size) *
sizeof(struct dma_desc)));
}
@@ -154,7 +154,8 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
* to keep explicit chaining in the descriptor.
*/
p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
- ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
+ ((tx_q->dirty_tx + 1) %
+ priv->dma_tx_size))
* sizeof(struct dma_desc)));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 127f75862962..df7de50497a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -15,7 +15,7 @@
#include <linux/netdevice.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
-#include <linux/mdio-xpcs.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/module.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define STMMAC_VLAN_TAG_USED
@@ -42,9 +42,16 @@
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
-/* These need to be power of two, and >= 4 */
-#define DMA_TX_SIZE 512
-#define DMA_RX_SIZE 512
+/* TX and RX Descriptor Length, these need to be power of two.
+ * TX descriptor length less than 64 may cause transmit queue timed out error.
+ * RX descriptor length less than 64 may cause inconsistent Rx chain error.
+ */
+#define DMA_MIN_TX_SIZE 64
+#define DMA_MAX_TX_SIZE 1024
+#define DMA_DEFAULT_TX_SIZE 512
+#define DMA_MIN_RX_SIZE 64
+#define DMA_MAX_RX_SIZE 1024
+#define DMA_DEFAULT_RX_SIZE 512
#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1))
#undef FRAME_FILTER_DEBUG
@@ -474,6 +481,8 @@ struct mac_device_info {
unsigned int num_vlan;
u32 vlan_filter[32];
unsigned int promisc;
+ bool vlan_fail_q_en;
+ u8 vlan_fail_q;
};
struct stmmac_rx_routing {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 3c5df5eeed6c..efef5476a577 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -129,8 +129,7 @@ static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct imx_priv_data *dwmac = priv;
- if (dwmac->clk_tx)
- clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->clk_tx);
clk_disable_unprepare(dwmac->clk_mem);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
new file mode 100644
index 000000000000..f61cb997a8f6
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Intel DWMAC platform driver
+ *
+ * Copyright(C) 2020 Intel Corporation
+ */
+
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "dwmac4.h"
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+struct intel_dwmac {
+ struct device *dev;
+ struct clk *tx_clk;
+ const struct intel_dwmac_data *data;
+};
+
+struct intel_dwmac_data {
+ void (*fix_mac_speed)(void *priv, unsigned int speed);
+ unsigned long ptp_ref_clk_rate;
+ unsigned long tx_clk_rate;
+ bool tx_clk_en;
+};
+
+static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct intel_dwmac *dwmac = priv;
+ unsigned long rate;
+ int ret;
+
+ rate = clk_get_rate(dwmac->tx_clk);
+
+ switch (speed) {
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+
+ case SPEED_100:
+ rate = 25000000;
+ break;
+
+ case SPEED_10:
+ rate = 2500000;
+ break;
+
+ default:
+ dev_err(dwmac->dev, "Invalid speed\n");
+ break;
+ }
+
+ ret = clk_set_rate(dwmac->tx_clk, rate);
+ if (ret)
+ dev_err(dwmac->dev, "Failed to configure tx clock rate\n");
+}
+
+static const struct intel_dwmac_data kmb_data = {
+ .fix_mac_speed = kmb_eth_fix_mac_speed,
+ .ptp_ref_clk_rate = 200000000,
+ .tx_clk_rate = 125000000,
+ .tx_clk_en = true,
+};
+
+static const struct of_device_id intel_eth_plat_match[] = {
+ { .compatible = "intel,keembay-dwmac", .data = &kmb_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, intel_eth_plat_match);
+
+static int intel_eth_plat_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ const struct of_device_id *match;
+ struct intel_dwmac *dwmac;
+ unsigned long rate;
+ int ret;
+
+ plat_dat = priv->plat;
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat)) {
+ dev_err(&pdev->dev, "dt configuration failed\n");
+ return PTR_ERR(plat_dat);
+ }
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->dev = &pdev->dev;
+ dwmac->tx_clk = NULL;
+
+ match = of_match_device(intel_eth_plat_match, &pdev->dev);
+ if (match && match->data) {
+ dwmac->data = (const struct intel_dwmac_data *)match->data;
+
+ if (dwmac->data->fix_mac_speed)
+ plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
+
+ /* Enable TX clock */
+ if (dwmac->data->tx_clk_en) {
+ dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ if (IS_ERR(dwmac->tx_clk))
+ goto err_remove_config_dt;
+
+ clk_prepare_enable(dwmac->tx_clk);
+
+ /* Check and configure TX clock rate */
+ rate = clk_get_rate(dwmac->tx_clk);
+ if (dwmac->data->tx_clk_rate &&
+ rate != dwmac->data->tx_clk_rate) {
+ rate = dwmac->data->tx_clk_rate;
+ ret = clk_set_rate(dwmac->tx_clk, rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set tx_clk\n");
+ return ret;
+ }
+ }
+ }
+
+ /* Check and configure PTP ref clock rate */
+ rate = clk_get_rate(plat_dat->clk_ptp_ref);
+ if (dwmac->data->ptp_ref_clk_rate &&
+ rate != dwmac->data->ptp_ref_clk_rate) {
+ rate = dwmac->data->ptp_ref_clk_rate;
+ ret = clk_set_rate(plat_dat->clk_ptp_ref, rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set clk_ptp_ref\n");
+ return ret;
+ }
+ }
+ }
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate;
+
+ if (plat_dat->eee_usecs_rate > 0) {
+ u32 tx_lpi_usec;
+
+ tx_lpi_usec = (plat_dat->eee_usecs_rate / 1000000) - 1;
+ writel(tx_lpi_usec, stmmac_res.addr + GMAC_1US_TIC_COUNTER);
+ }
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret) {
+ clk_disable_unprepare(dwmac->tx_clk);
+ goto err_remove_config_dt;
+ }
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int intel_eth_plat_remove(struct platform_device *pdev)
+{
+ struct intel_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+ int ret;
+
+ ret = stmmac_pltfr_remove(pdev);
+ clk_disable_unprepare(dwmac->tx_clk);
+
+ return ret;
+}
+
+static struct platform_driver intel_eth_plat_driver = {
+ .probe = intel_eth_plat_probe,
+ .remove = intel_eth_plat_remove,
+ .driver = {
+ .name = "intel-eth-plat",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = intel_eth_plat_match,
+ },
+};
+module_platform_driver(intel_eth_plat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel DWMAC platform driver");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 9e6d60e75f85..81ee0a071b4e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/dmi.h>
#include "dwmac-intel.h"
+#include "dwmac4.h"
#include "stmmac.h"
struct intel_priv_data {
@@ -295,6 +296,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->axi->axi_blen[2] = 16;
plat->ptp_max_adj = plat->clk_ptp_rate;
+ plat->eee_usecs_rate = plat->clk_ptp_rate;
/* Set system clock */
plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
@@ -321,6 +323,11 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Set the maxmtu to a default of JUMBO_LEN */
plat->maxmtu = JUMBO_LEN;
+ plat->vlan_fail_q_en = true;
+
+ /* Use the last Rx queue */
+ plat->vlan_fail_q = plat->rx_queues_to_use - 1;
+
return 0;
}
@@ -627,6 +634,13 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
res.wol_irq = pci_irq_vector(pdev, 0);
res.irq = pci_irq_vector(pdev, 0);
+ if (plat->eee_usecs_rate > 0) {
+ u32 tx_lpi_usec;
+
+ tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
+ writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
+ }
+
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (ret) {
pci_free_irq_vectors(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 2d5573b3dee1..6ef30252bfe0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/**
- * dwmac-rk.c - Rockchip RK3288 DWMAC specific glue layer
+ * DOC: dwmac-rk.c - Rockchip RK3288 DWMAC specific glue layer
*
* Copyright (C) 2014 Chen-Zhi (Roger Chen)
*
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 61f3249bd724..592b043f9676 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -76,6 +76,7 @@
#define GMAC_PACKET_FILTER_HPF BIT(10)
#define GMAC_PACKET_FILTER_VTFE BIT(16)
#define GMAC_PACKET_FILTER_IPFE BIT(20)
+#define GMAC_PACKET_FILTER_RA BIT(31)
#define GMAC_MAX_PERFECT_ADDRESSES 128
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index ecd834e0e121..002791b77356 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -618,7 +618,18 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
value &= ~GMAC_PACKET_FILTER_PM;
value &= ~GMAC_PACKET_FILTER_PR;
if (dev->flags & IFF_PROMISC) {
- value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
+ /* VLAN Tag Filter Fail Packets Queuing */
+ if (hw->vlan_fail_q_en) {
+ value = readl(ioaddr + GMAC_RXQ_CTRL4);
+ value &= ~GMAC_RXQCTRL_VFFQ_MASK;
+ value |= GMAC_RXQCTRL_VFFQE |
+ (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
+ writel(value, ioaddr + GMAC_RXQ_CTRL4);
+ value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
+ } else {
+ value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
+ }
+
} else if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > hw->multicast_filter_bins)) {
/* Pass all multi */
@@ -680,7 +691,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
writel(value, ioaddr + GMAC_PACKET_FILTER);
- if (dev->flags & IFF_PROMISC) {
+ if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
if (!hw->promisc) {
hw->promisc = 1;
dwmac4_vlan_promisc_enable(dev, hw);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index eff82065a501..c6540b003b43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -494,10 +494,9 @@ static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
}
-static int dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
{
*len = le32_to_cpu(p->des2) & RDES2_HL;
- return 0;
}
static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 3e8faa96b4d4..56b0762c1276 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -92,6 +92,12 @@
#define TCEIE BIT(0)
#define DMA_ECC_INT_STATUS 0x00001088
+/* EQoS version 5.xx VLAN Tag Filter Fail Packets Queuing */
+#define GMAC_RXQ_CTRL4 0x00000094
+#define GMAC_RXQCTRL_VFFQ_MASK GENMASK(19, 17)
+#define GMAC_RXQCTRL_VFFQ_SHIFT 17
+#define GMAC_RXQCTRL_VFFQE BIT(16)
+
int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp);
int dwmac5_safety_feat_irq_status(struct net_device *ndev,
void __iomem *ioaddr, unsigned int asp,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index c3d654cfa9ef..0aaf19ab5672 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -286,11 +286,10 @@ static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
return -EINVAL;
}
-static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+static void dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
{
if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T)
*len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
- return 0;
}
static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index ffe2d63389b8..e2dca9b6e992 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -90,7 +90,7 @@ struct stmmac_desc_ops {
/* RSS */
int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
enum pkt_hash_types *type);
- int (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
+ void (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr);
void (*set_sarc)(struct dma_desc *p, u32 sarc_type);
void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
@@ -150,7 +150,7 @@ struct stmmac_desc_ops {
#define stmmac_get_rx_hash(__priv, __args...) \
stmmac_do_callback(__priv, desc, get_rx_hash, __args)
#define stmmac_get_rx_header_len(__priv, __args...) \
- stmmac_do_callback(__priv, desc, get_rx_header_len, __args)
+ stmmac_do_void_callback(__priv, desc, get_rx_header_len, __args)
#define stmmac_set_desc_sec_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_sec_addr, __args)
#define stmmac_set_desc_sarc(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 14bd5e7b9875..8ad900949dc8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
if (priv->extend_desc)
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 545696971f65..727e68dfaf1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -171,9 +171,11 @@ struct stmmac_priv {
/* RX Queue */
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
+ unsigned int dma_rx_size;
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+ unsigned int dma_tx_size;
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
@@ -266,6 +268,8 @@ int stmmac_dvr_probe(struct device *device,
struct stmmac_resources *res);
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 814879f91f76..9e54f953634b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -440,6 +440,33 @@ static int stmmac_nway_reset(struct net_device *dev)
return phylink_ethtool_nway_reset(priv->phylink);
}
+static void stmmac_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ ring->rx_max_pending = DMA_MAX_RX_SIZE;
+ ring->tx_max_pending = DMA_MAX_TX_SIZE;
+ ring->rx_pending = priv->dma_rx_size;
+ ring->tx_pending = priv->dma_tx_size;
+}
+
+static int stmmac_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending < DMA_MIN_RX_SIZE ||
+ ring->rx_pending > DMA_MAX_RX_SIZE ||
+ !is_power_of_2(ring->rx_pending) ||
+ ring->tx_pending < DMA_MIN_TX_SIZE ||
+ ring->tx_pending > DMA_MAX_TX_SIZE ||
+ !is_power_of_2(ring->tx_pending))
+ return -EINVAL;
+
+ return stmmac_reinit_ringparam(netdev, ring->rx_pending,
+ ring->tx_pending);
+}
+
static void
stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
@@ -843,6 +870,30 @@ static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
priv->plat->rx_queues_to_use);
}
+static void stmmac_get_channels(struct net_device *dev,
+ struct ethtool_channels *chan)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ chan->rx_count = priv->plat->rx_queues_to_use;
+ chan->tx_count = priv->plat->tx_queues_to_use;
+ chan->max_rx = priv->dma_cap.number_rx_queues;
+ chan->max_tx = priv->dma_cap.number_tx_queues;
+}
+
+static int stmmac_set_channels(struct net_device *dev,
+ struct ethtool_channels *chan)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (chan->rx_count > priv->dma_cap.number_rx_queues ||
+ chan->tx_count > priv->dma_cap.number_tx_queues ||
+ !chan->rx_count || !chan->tx_count)
+ return -EINVAL;
+
+ return stmmac_reinit_queues(dev, chan->rx_count, chan->tx_count);
+}
+
static int stmmac_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -926,6 +977,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_regs_len = stmmac_ethtool_get_regs_len,
.get_link = ethtool_op_get_link,
.nway_reset = stmmac_nway_reset,
+ .get_ringparam = stmmac_get_ringparam,
+ .set_ringparam = stmmac_set_ringparam,
.get_pauseparam = stmmac_get_pauseparam,
.set_pauseparam = stmmac_set_pauseparam,
.self_test = stmmac_selftest_run,
@@ -944,6 +997,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
+ .get_channels = stmmac_get_channels,
+ .set_channels = stmmac_set_channels,
.get_tunable = stmmac_get_tunable,
.set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b56b13d64ab4..d833908b660a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -63,8 +63,8 @@ static int phyaddr = -1;
module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
-#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
-#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
+#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
+#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
static int flow_ctrl = FLOW_AUTO;
module_param(flow_ctrl, int, 0644);
@@ -176,32 +176,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
}
}
-/**
- * stmmac_stop_all_queues - Stop all queues
- * @priv: driver private structure
- */
-static void stmmac_stop_all_queues(struct stmmac_priv *priv)
-{
- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
- u32 queue;
-
- for (queue = 0; queue < tx_queues_cnt; queue++)
- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
-}
-
-/**
- * stmmac_start_all_queues - Start all queues
- * @priv: driver private structure
- */
-static void stmmac_start_all_queues(struct stmmac_priv *priv)
-{
- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
- u32 queue;
-
- for (queue = 0; queue < tx_queues_cnt; queue++)
- netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
-}
-
static void stmmac_service_event_schedule(struct stmmac_priv *priv)
{
if (!test_bit(STMMAC_DOWN, &priv->state) &&
@@ -297,7 +271,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
if (tx_q->dirty_tx > tx_q->cur_tx)
avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
else
- avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
+ avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
return avail;
}
@@ -315,7 +289,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
if (rx_q->dirty_rx <= rx_q->cur_rx)
dirty = rx_q->cur_rx - rx_q->dirty_rx;
else
- dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
+ dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
return dirty;
}
@@ -360,7 +334,7 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
/**
* stmmac_eee_ctrl_timer - EEE TX SW timer.
- * @arg : data hook
+ * @t: timer_list struct containing private info
* Description:
* if there is no data transfer and if we are not in LPI state,
* then MAC Transmitter can be moved to LPI state.
@@ -736,7 +710,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
* a proprietary structure used to pass information to the driver.
* Description:
* This function obtain the current hardware timestamping settings
- as requested.
+ * as requested.
*/
static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
@@ -789,14 +763,14 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
static void stmmac_release_ptp(struct stmmac_priv *priv)
{
- if (priv->plat->clk_ptp_ref)
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
stmmac_ptp_unregister(priv);
}
/**
* stmmac_mac_flow_ctrl - Configure flow control in all queues
* @priv: driver private structure
+ * @duplex: duplex passed to the next function
* Description: It is used for configuring the flow control in all queues
*/
static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
@@ -1150,7 +1124,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
head_rx = (void *)rx_q->dma_rx;
/* Display RX ring */
- stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
+ stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true);
}
}
@@ -1173,7 +1147,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
else
head_tx = (void *)tx_q->dma_tx;
- stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
+ stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false);
}
}
@@ -1217,16 +1191,16 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
int i;
/* Clear the RX descriptors */
- for (i = 0; i < DMA_RX_SIZE; i++)
+ for (i = 0; i < priv->dma_rx_size; i++)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1),
+ (i == priv->dma_rx_size - 1),
priv->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1),
+ (i == priv->dma_rx_size - 1),
priv->dma_buf_sz);
}
@@ -1243,8 +1217,8 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
int i;
/* Clear the TX descriptors */
- for (i = 0; i < DMA_TX_SIZE; i++) {
- int last = (i == (DMA_TX_SIZE - 1));
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ int last = (i == (priv->dma_tx_size - 1));
struct dma_desc *p;
if (priv->extend_desc)
@@ -1398,7 +1372,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
stmmac_clear_rx_descriptors(priv, queue);
- for (i = 0; i < DMA_RX_SIZE; i++) {
+ for (i = 0; i < priv->dma_rx_size; i++) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -1413,16 +1387,18 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
}
rx_q->cur_rx = 0;
- rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+ rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
stmmac_mode_init(priv, rx_q->dma_erx,
- rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 1);
else
stmmac_mode_init(priv, rx_q->dma_rx,
- rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 0);
}
}
@@ -1436,7 +1412,7 @@ err_init_rx_buffers:
if (queue == 0)
break;
- i = DMA_RX_SIZE;
+ i = priv->dma_rx_size;
queue--;
}
@@ -1468,13 +1444,15 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
- tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 1);
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx,
- tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 0);
}
- for (i = 0; i < DMA_TX_SIZE; i++) {
+ for (i = 0; i < priv->dma_tx_size; i++) {
struct dma_desc *p;
if (priv->extend_desc)
p = &((tx_q->dma_etx + i)->basic);
@@ -1538,7 +1516,7 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
- for (i = 0; i < DMA_RX_SIZE; i++)
+ for (i = 0; i < priv->dma_rx_size; i++)
stmmac_free_rx_buffer(priv, queue, i);
}
@@ -1551,7 +1529,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
- for (i = 0; i < DMA_TX_SIZE; i++)
+ for (i = 0; i < priv->dma_tx_size; i++)
stmmac_free_tx_buffer(priv, queue, i);
}
@@ -1573,11 +1551,11 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc)
- dma_free_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_desc),
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
- dma_free_coherent(priv->device, DMA_RX_SIZE *
+ dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
@@ -1616,7 +1594,7 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
addr = tx_q->dma_tx;
}
- size *= DMA_TX_SIZE;
+ size *= priv->dma_tx_size;
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
@@ -1649,7 +1627,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
rx_q->priv_data = priv;
pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = DMA_RX_SIZE;
+ pp_params.pool_size = priv->dma_rx_size;
num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
@@ -1663,14 +1641,16 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
}
- rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
+ rx_q->buf_pool = kcalloc(priv->dma_rx_size,
+ sizeof(*rx_q->buf_pool),
GFP_KERNEL);
if (!rx_q->buf_pool)
goto err_dma;
if (priv->extend_desc) {
rx_q->dma_erx = dma_alloc_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_extended_desc),
+ priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
if (!rx_q->dma_erx)
@@ -1678,7 +1658,8 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
} else {
rx_q->dma_rx = dma_alloc_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_desc),
+ priv->dma_rx_size *
+ sizeof(struct dma_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
if (!rx_q->dma_rx)
@@ -1717,13 +1698,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
tx_q->queue_index = queue;
tx_q->priv_data = priv;
- tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
+ tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL);
if (!tx_q->tx_skbuff_dma)
goto err_dma;
- tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
+ tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
sizeof(struct sk_buff *),
GFP_KERNEL);
if (!tx_q->tx_skbuff)
@@ -1736,7 +1717,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
else
size = sizeof(struct dma_desc);
- size *= DMA_TX_SIZE;
+ size *= priv->dma_tx_size;
addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL);
@@ -1965,6 +1946,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
/**
* stmmac_tx_clean - to manage the transmission completion
* @priv: driver private structure
+ * @budget: napi budget limiting this functions packet handling
* @queue: TX queue index
* Description: it reclaims the transmit resources after transmission completes.
*/
@@ -2046,7 +2028,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
stmmac_release_tx_desc(priv, p, priv->mode);
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
}
tx_q->dirty_tx = entry;
@@ -2055,7 +2037,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
queue))) &&
- stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
+ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
netif_dbg(priv, tx_done, priv->dev,
"%s: restart transmit\n", __func__);
@@ -2328,7 +2310,8 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
rx_q->dma_rx_phy, chan);
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
+ (priv->dma_rx_size *
+ sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
rx_q->rx_tail_addr, chan);
}
@@ -2357,7 +2340,7 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
/**
* stmmac_tx_timer - mitigation sw timer for tx.
- * @data: data pointer
+ * @t: data pointer
* Description:
* This is the timer handler to directly invoke the stmmac_tx_clean.
*/
@@ -2412,12 +2395,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
/* set TX ring length */
for (chan = 0; chan < tx_channels_count; chan++)
stmmac_set_tx_ring_len(priv, priv->ioaddr,
- (DMA_TX_SIZE - 1), chan);
+ (priv->dma_tx_size - 1), chan);
/* set RX ring length */
for (chan = 0; chan < rx_channels_count; chan++)
stmmac_set_rx_ring_len(priv, priv->ioaddr,
- (DMA_RX_SIZE - 1), chan);
+ (priv->dma_rx_size - 1), chan);
}
/**
@@ -2620,6 +2603,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
+ * @init_ptp: initialize PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -2740,6 +2724,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
}
+ /* Configure real RX and TX queues */
+ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
+ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
+
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
@@ -2797,6 +2785,11 @@ static int stmmac_open(struct net_device *dev)
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+ if (!priv->dma_tx_size)
+ priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
+ if (!priv->dma_rx_size)
+ priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+
/* Earlier check for TBS */
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
@@ -2868,7 +2861,7 @@ static int stmmac_open(struct net_device *dev)
}
stmmac_enable_all_queues(priv);
- stmmac_start_all_queues(priv);
+ netif_tx_start_all_queues(priv->dev);
return 0;
@@ -2911,8 +2904,6 @@ static int stmmac_release(struct net_device *dev)
phylink_stop(priv->phylink);
phylink_disconnect_phy(priv->phylink);
- stmmac_stop_all_queues(priv);
-
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
@@ -2968,7 +2959,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
return false;
stmmac_set_tx_owner(priv, p);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
return true;
}
@@ -2977,7 +2968,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
* @priv: driver private structure
* @des: buffer start address
* @total_len: total length to fill in descriptors
- * @last_segmant: condition for the last descriptor
+ * @last_segment: condition for the last descriptor
* @queue: TX queue index
* Description:
* This function fills descriptor and request new descriptors according to
@@ -2996,7 +2987,8 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
while (tmp_len > 0) {
dma_addr_t curr_addr;
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3103,7 +3095,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
@@ -3210,7 +3203,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
@@ -3373,7 +3366,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int len = skb_frag_size(frag);
bool last_segment = (i == (nfrags - 1));
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc))
@@ -3441,7 +3434,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
@@ -3626,7 +3619,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
}
rx_q->dirty_rx = entry;
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
@@ -3638,15 +3631,15 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
struct dma_desc *p,
int status, unsigned int len)
{
- int ret, coe = priv->hw->rx_csum;
unsigned int plen = 0, hlen = 0;
+ int coe = priv->hw->rx_csum;
/* Not first descriptor, buffer is always zero */
if (priv->sph && len)
return 0;
/* First descriptor, get split header length */
- ret = stmmac_get_rx_header_len(priv, p, &hlen);
+ stmmac_get_rx_header_len(priv, p, &hlen);
if (priv->sph && hlen) {
priv->xstats.rx_split_hdr_pkt_n++;
return hlen;
@@ -3709,7 +3702,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
else
rx_head = (void *)rx_q->dma_rx;
- stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
+ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true);
}
while (count < limit) {
unsigned int buf1_len = 0, buf2_len = 0;
@@ -3751,7 +3744,8 @@ read_again:
if (unlikely(status & dma_own))
break;
- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ priv->dma_rx_size);
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
@@ -3926,7 +3920,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
priv->xstats.napi_poll++;
- work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
+ work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
work_done = min(work_done, budget);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -3943,6 +3937,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
/**
* stmmac_tx_timeout
* @dev : Pointer to net device structure
+ * @txqueue: the index of the hanging transmit queue
* Description: this function is called when a packet transmission fails to
* complete within a reasonable time. The driver will mark the error in the
* netdev structure and arrange for the device to be reset to a sane state
@@ -4319,11 +4314,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_erx,
- DMA_RX_SIZE, 1, seq);
+ priv->dma_rx_size, 1, seq);
} else {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_rx,
- DMA_RX_SIZE, 0, seq);
+ priv->dma_rx_size, 0, seq);
}
}
@@ -4335,11 +4330,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx,
- DMA_TX_SIZE, 1, seq);
+ priv->dma_tx_size, 1, seq);
} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx,
- DMA_TX_SIZE, 0, seq);
+ priv->dma_tx_size, 0, seq);
}
}
@@ -4725,6 +4720,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->dma_cap.tsoen)
dev_info(priv->device, "TSO supported\n");
+ priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
+ priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
+
/* Run HW quirks, if any */
if (priv->hwif_quirks) {
ret = priv->hwif_quirks(priv);
@@ -4747,6 +4745,87 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
return 0;
}
+static void stmmac_napi_add(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ ch->priv_data = priv;
+ ch->index = queue;
+ spin_lock_init(&ch->lock);
+
+ if (queue < priv->plat->rx_queues_to_use) {
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
+ NAPI_POLL_WEIGHT);
+ }
+ if (queue < priv->plat->tx_queues_to_use) {
+ netif_tx_napi_add(dev, &ch->tx_napi,
+ stmmac_napi_poll_tx,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+}
+
+static void stmmac_napi_del(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (queue < priv->plat->rx_queues_to_use)
+ netif_napi_del(&ch->rx_napi);
+ if (queue < priv->plat->tx_queues_to_use)
+ netif_napi_del(&ch->tx_napi);
+ }
+}
+
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ stmmac_napi_del(dev);
+
+ priv->plat->rx_queues_to_use = rx_cnt;
+ priv->plat->tx_queues_to_use = tx_cnt;
+
+ stmmac_napi_add(dev);
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ priv->dma_rx_size = rx_size;
+ priv->dma_tx_size = tx_size;
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
/**
* stmmac_dvr_probe
* @device: device pointer
@@ -4763,7 +4842,7 @@ int stmmac_dvr_probe(struct device *device,
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
- u32 queue, rxq, maxq;
+ u32 rxq;
int i, ret = 0;
ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
@@ -4827,10 +4906,6 @@ int stmmac_dvr_probe(struct device *device,
stmmac_check_ether_addr(priv);
- /* Configure real RX and TX queues */
- netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
- netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
-
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -4928,25 +5003,7 @@ int stmmac_dvr_probe(struct device *device,
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
/* Setup channels NAPI */
- maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
-
- for (queue = 0; queue < maxq; queue++) {
- struct stmmac_channel *ch = &priv->channel[queue];
-
- spin_lock_init(&ch->lock);
- ch->priv_data = priv;
- ch->index = queue;
-
- if (queue < priv->plat->rx_queues_to_use) {
- netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
- NAPI_POLL_WEIGHT);
- }
- if (queue < priv->plat->tx_queues_to_use) {
- netif_tx_napi_add(ndev, &ch->tx_napi,
- stmmac_napi_poll_tx,
- NAPI_POLL_WEIGHT);
- }
- }
+ stmmac_napi_add(ndev);
mutex_init(&priv->lock);
@@ -5011,14 +5068,7 @@ error_phy_setup:
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
- for (queue = 0; queue < maxq; queue++) {
- struct stmmac_channel *ch = &priv->channel[queue];
-
- if (queue < priv->plat->rx_queues_to_use)
- netif_napi_del(&ch->rx_napi);
- if (queue < priv->plat->tx_queues_to_use)
- netif_napi_del(&ch->tx_napi);
- }
+ stmmac_napi_del(ndev);
error_hw_init:
destroy_workqueue(priv->wq);
@@ -5086,7 +5136,6 @@ int stmmac_suspend(struct device *dev)
mutex_lock(&priv->lock);
netif_device_detach(ndev);
- stmmac_stop_all_queues(priv);
stmmac_disable_all_queues(priv);
@@ -5115,8 +5164,7 @@ int stmmac_suspend(struct device *dev)
stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
- if (priv->plat->clk_ptp_ref)
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
clk_disable_unprepare(priv->plat->pclk);
clk_disable_unprepare(priv->plat->stmmac_clk);
}
@@ -5129,7 +5177,7 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
/**
* stmmac_reset_queues_param - reset queue parameters
- * @dev: device pointer
+ * @priv: device pointer
*/
static void stmmac_reset_queues_param(struct stmmac_priv *priv)
{
@@ -5213,8 +5261,6 @@ int stmmac_resume(struct device *dev)
stmmac_enable_all_queues(priv);
- stmmac_start_all_queues(priv);
-
mutex_unlock(&priv->lock);
if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f32317fa75c8..af34a4cadbb0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -125,6 +125,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
/**
* stmmac_mtl_setup - parse DT parameters for multiple queues configuration
* @pdev: platform device
+ * @plat: enet data
*/
static int stmmac_mtl_setup(struct platform_device *pdev,
struct plat_stmmacenet_data *plat)
@@ -360,7 +361,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
/**
* stmmac_of_get_mac_mode - retrieves the interface of the MAC
- * @np - device-tree node
+ * @np: - device-tree node
* Description:
* Similar to `of_get_phy_mode()`, this function will retrieve (from
* the device-tree) the interface mode on the MAC side. This assumes
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index bf195adee393..0462dcc93e53 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -796,7 +796,7 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
u32 tail;
tail = priv->rx_queue[i].dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
+ (priv->dma_rx_size * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
stmmac_start_rx(priv, priv->ioaddr, i);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b624e177ec71..9ff894ba8d3e 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -454,8 +454,8 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
#define RX_USED_ADD(x, y) ((x)->used += (y))
#define RX_USED_SET(x, y) ((x)->used = (y))
#else
-#define RX_USED_ADD(x, y)
-#define RX_USED_SET(x, y)
+#define RX_USED_ADD(x, y) do { } while(0)
+#define RX_USED_SET(x, y) do { } while(0)
#endif
/* local page allocation routines for the receive buffers. jumbo pages
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 34fdbc6d6031..c646575e79d5 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -209,13 +209,13 @@ static void bigmac_clean_rings(struct bigmac *bp)
}
}
-static void bigmac_init_rings(struct bigmac *bp, int from_irq)
+static void bigmac_init_rings(struct bigmac *bp, bool non_blocking)
{
struct bmac_init_block *bb = bp->bmac_block;
int i;
gfp_t gfp_flags = GFP_KERNEL;
- if (from_irq || in_interrupt())
+ if (non_blocking)
gfp_flags = GFP_ATOMIC;
bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0;
@@ -489,7 +489,7 @@ static void bigmac_tcvr_init(struct bigmac *bp)
}
}
-static int bigmac_init_hw(struct bigmac *, int);
+static int bigmac_init_hw(struct bigmac *, bool);
static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
{
@@ -549,7 +549,7 @@ static void bigmac_timer(struct timer_list *t)
if (ret == -1) {
printk(KERN_ERR "%s: Link down, cable problem?\n",
bp->dev->name);
- ret = bigmac_init_hw(bp, 0);
+ ret = bigmac_init_hw(bp, true);
if (ret) {
printk(KERN_ERR "%s: Error, cannot re-init the "
"BigMAC.\n", bp->dev->name);
@@ -617,7 +617,7 @@ static void bigmac_begin_auto_negotiation(struct bigmac *bp)
add_timer(&bp->bigmac_timer);
}
-static int bigmac_init_hw(struct bigmac *bp, int from_irq)
+static int bigmac_init_hw(struct bigmac *bp, bool non_blocking)
{
void __iomem *gregs = bp->gregs;
void __iomem *cregs = bp->creg;
@@ -635,7 +635,7 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
qec_init(bp);
/* Alloc and reset the tx/rx descriptor chains. */
- bigmac_init_rings(bp, from_irq);
+ bigmac_init_rings(bp, non_blocking);
/* Initialize the PHY. */
bigmac_tcvr_init(bp);
@@ -749,7 +749,7 @@ static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_st
}
printk(" RESET\n");
- bigmac_init_hw(bp, 1);
+ bigmac_init_hw(bp, true);
}
/* BigMAC transmit complete service routines. */
@@ -921,7 +921,7 @@ static int bigmac_open(struct net_device *dev)
return ret;
}
timer_setup(&bp->bigmac_timer, bigmac_timer, 0);
- ret = bigmac_init_hw(bp, 0);
+ ret = bigmac_init_hw(bp, false);
if (ret)
free_irq(dev->irq, bp);
return ret;
@@ -945,7 +945,7 @@ static void bigmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bigmac *bp = netdev_priv(dev);
- bigmac_init_hw(bp, 0);
+ bigmac_init_hw(bp, true);
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 8deb943ca5de..58f142ee78a3 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2965,9 +2965,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* It is guaranteed that the returned buffer will be at least
* PAGE_SIZE aligned.
*/
- gp->init_block = (struct gem_init_block *)
- dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block),
- &gp->gblock_dvma, GFP_KERNEL);
+ gp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block),
+ &gp->gblock_dvma, GFP_KERNEL);
if (!gp->init_block) {
pr_err("Cannot allocate init block, aborting\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 8dc6c9ff22e1..80fde5f06fce 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1168,7 +1168,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
*(__sum16 *)(skb->data + offset) = 0;
csum = skb_copy_and_csum_bits(skb, start,
nskb->data + start,
- skb->len - start, 0);
+ skb->len - start);
/* add in the header checksums */
if (skb->protocol == htons(ETH_P_IP)) {
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index eb1c6b03c329..df26cea45904 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -513,7 +513,7 @@ void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata)
void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata)
{
- char *str = NULL;
+ char __maybe_unused *str = NULL;
XLGMAC_PR("\n");
XLGMAC_PR("=====================================================\n");
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index e28727297563..b8f4f419173f 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -138,7 +138,10 @@ static void print_eth_id(struct net_device *ndev)
* @priv: NIC private structure
* @f: fifo to initialize
* @fsz_type: fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
- * @reg_XXX: offsets of registers relative to base address
+ * @reg_CFG0: offsets of registers relative to base address
+ * @reg_CFG1: offsets of registers relative to base address
+ * @reg_RPTR: offsets of registers relative to base address
+ * @reg_WPTR: offsets of registers relative to base address
*
* 1K extra space is allocated at the end of the fifo to simplify
* processing of descriptors that wraps around fifo's end
@@ -153,11 +156,11 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
u16 memsz = FIFO_SIZE * (1 << fsz_type);
memset(f, 0, sizeof(struct fifo));
- /* pci_alloc_consistent gives us 4k-aligned memory */
- f->va = pci_alloc_consistent(priv->pdev,
- memsz + FIFO_EXTRA_SPACE, &f->da);
+ /* dma_alloc_coherent gives us 4k-aligned memory */
+ f->va = dma_alloc_coherent(&priv->pdev->dev, memsz + FIFO_EXTRA_SPACE,
+ &f->da, GFP_ATOMIC);
if (!f->va) {
- pr_err("pci_alloc_consistent failed\n");
+ pr_err("dma_alloc_coherent failed\n");
RET(-ENOMEM);
}
f->reg_CFG0 = reg_CFG0;
@@ -183,8 +186,8 @@ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
{
ENTER;
if (f->va) {
- pci_free_consistent(priv->pdev,
- f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
+ dma_free_coherent(&priv->pdev->dev,
+ f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
f->va = NULL;
}
RET();
@@ -558,7 +561,7 @@ static int bdx_reset(struct bdx_priv *priv)
/**
* bdx_close - Disables a network interface
- * @netdev: network interface device structure
+ * @ndev: network interface device structure
*
* Returns 0, this is not allowed to fail
*
@@ -585,7 +588,7 @@ static int bdx_close(struct net_device *ndev)
/**
* bdx_open - Called when a network interface is made active
- * @netdev: network interface device structure
+ * @ndev: network interface device structure
*
* Returns 0 on success, negative value on failure
*
@@ -698,7 +701,7 @@ static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
* __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
* @ndev: network device
* @vid: VLAN vid
- * @op: add or kill operation
+ * @enable: enable or disable vlan
*
* Passes VLAN filter table to hardware
*/
@@ -729,6 +732,7 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
/**
* bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
* @ndev: network device
+ * @proto: unused
* @vid: VLAN vid to add
*/
static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
@@ -740,6 +744,7 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
/**
* bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
* @ndev: network device
+ * @proto: unused
* @vid: VLAN vid to kill
*/
static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
@@ -750,7 +755,7 @@ static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
/**
* bdx_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
+ * @ndev: network interface device structure
* @new_mtu: new value for maximum frame size
*
* Returns 0 on success, negative on failure
@@ -1033,9 +1038,8 @@ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
for (i = 0; i < db->nelem; i++) {
dm = bdx_rxdb_addr_elem(db, i);
if (dm->dma) {
- pci_unmap_single(priv->pdev,
- dm->dma, f->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev, dm->dma,
+ f->m.pktsz, DMA_FROM_DEVICE);
dev_kfree_skb(dm->skb);
}
}
@@ -1097,9 +1101,8 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
idx = bdx_rxdb_alloc_elem(db);
dm = bdx_rxdb_addr_elem(db, idx);
- dm->dma = pci_map_single(priv->pdev,
- skb->data, f->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dm->dma = dma_map_single(&priv->pdev->dev, skb->data,
+ f->m.pktsz, DMA_FROM_DEVICE);
dm->skb = skb;
rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
@@ -1259,16 +1262,15 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
(skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) {
skb_reserve(skb2, NET_IP_ALIGN);
/*skb_put(skb2, len); */
- pci_dma_sync_single_for_cpu(priv->pdev,
- dm->dma, rxf_fifo->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&priv->pdev->dev, dm->dma,
+ rxf_fifo->m.pktsz,
+ DMA_FROM_DEVICE);
memcpy(skb2->data, skb->data, len);
bdx_recycle_skb(priv, rxdd);
skb = skb2;
} else {
- pci_unmap_single(priv->pdev,
- dm->dma, rxf_fifo->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev, dm->dma,
+ rxf_fifo->m.pktsz, DMA_FROM_DEVICE);
bdx_rxdb_free_elem(db, rxdd->va_lo);
}
@@ -1478,8 +1480,8 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
int i;
db->wptr->len = skb_headlen(skb);
- db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
- db->wptr->len, PCI_DMA_TODEVICE);
+ db->wptr->addr.dma = dma_map_single(&priv->pdev->dev, skb->data,
+ db->wptr->len, DMA_TO_DEVICE);
pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
@@ -1716,8 +1718,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
BDX_ASSERT(db->rptr->len == 0);
do {
BDX_ASSERT(db->rptr->addr.dma == 0);
- pci_unmap_page(priv->pdev, db->rptr->addr.dma,
- db->rptr->len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
+ db->rptr->len, DMA_TO_DEVICE);
bdx_tx_db_inc_rptr(db);
} while (db->rptr->len > 0);
tx_level -= db->rptr->len; /* '-' koz len is negative */
@@ -1756,6 +1758,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
/**
* bdx_tx_free_skbs - frees all skbs from TXD fifo.
+ * @priv: NIC private structure
+ *
* It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
*/
static void bdx_tx_free_skbs(struct bdx_priv *priv)
@@ -1765,8 +1769,8 @@ static void bdx_tx_free_skbs(struct bdx_priv *priv)
ENTER;
while (db->rptr != db->wptr) {
if (likely(db->rptr->len))
- pci_unmap_page(priv->pdev, db->rptr->addr.dma,
- db->rptr->len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
+ db->rptr->len, DMA_TO_DEVICE);
else
dev_kfree_skb(db->rptr->addr.skb);
bdx_tx_db_inc_rptr(db);
@@ -1902,12 +1906,12 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) /* it triggers interrupt, dunno why. */
goto err_pci; /* it's not a problem though */
- if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
- !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
+ if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) &&
+ !(err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)))) {
pci_using_dac = 1;
} else {
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) ||
+ (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
pr_err("No usable DMA configuration, aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 496dafb25128..6e4d4f9e32e0 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -572,13 +572,14 @@ static int am65_cpsw_nway_reset(struct net_device *ndev)
static int am65_cpsw_get_regs_len(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- u32 i, regdump_len = 0;
+ u32 ale_entries, i, regdump_len = 0;
+ ale_entries = cpsw_ale_get_num_entries(common->ale);
for (i = 0; i < ARRAY_SIZE(am65_cpsw_regdump); i++) {
if (am65_cpsw_regdump[i].hdr.module_id ==
AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL) {
regdump_len += sizeof(struct am65_cpsw_regdump_hdr);
- regdump_len += common->ale->params.ale_entries *
+ regdump_len += ale_entries *
ALE_ENTRY_WORDS * sizeof(u32);
continue;
}
@@ -592,10 +593,11 @@ static void am65_cpsw_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- u32 i, j, pos, *reg = p;
+ u32 ale_entries, i, j, pos, *reg = p;
/* update CPSW IP version */
regs->version = AM65_CPSW_REGDUMP_VER;
+ ale_entries = cpsw_ale_get_num_entries(common->ale);
pos = 0;
for (i = 0; i < ARRAY_SIZE(am65_cpsw_regdump); i++) {
@@ -603,7 +605,7 @@ static void am65_cpsw_get_regs(struct net_device *ndev,
if (am65_cpsw_regdump[i].hdr.module_id ==
AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL) {
- u32 ale_tbl_len = common->ale->params.ale_entries *
+ u32 ale_tbl_len = ale_entries *
ALE_ENTRY_WORDS * sizeof(u32) +
sizeof(struct am65_cpsw_regdump_hdr);
reg[pos++] = ale_tbl_len;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 9baf3f3da91e..501d676fd88b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -5,6 +5,7 @@
*
*/
+#include <linux/clk.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
@@ -2038,6 +2039,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct am65_cpsw_common *common;
struct device_node *node;
struct resource *res;
+ struct clk *clk;
int ret, i;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
@@ -2086,6 +2088,16 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
if (!common->ports)
return -ENOMEM;
+ clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "error getting fck clock %d\n", ret);
+ return ret;
+ }
+ common->bus_freq = clk_get_rate(clk);
+
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
@@ -2131,10 +2143,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
/* init common data */
ale_params.dev = dev;
ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
- ale_params.ale_entries = 0;
ale_params.ale_ports = common->port_num + 1;
ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
- ale_params.nu_switch_ale = true;
+ ale_params.dev_id = "am65x-cpsw2g";
+ ale_params.bus_freq = common->bus_freq;
common->ale = cpsw_ale_create(&ale_params);
if (IS_ERR(common->ale)) {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 94f666ea0e53..993e1d4d3222 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -106,6 +106,7 @@ struct am65_cpsw_common {
u32 nuss_ver;
u32 cpsw_ver;
+ unsigned long bus_freq;
bool pf_p0_rx_ptype_rrobin;
struct am65_cpts *cpts;
int est_enabled;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c59a289e428c..75056c14b161 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -83,6 +83,8 @@ struct am65_cpts_regs {
#define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
#define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
+#define AM65_CPTS_CONTROL_TX_GENF_CLR_EN BIT(17)
+
#define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
#define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
@@ -748,42 +750,23 @@ EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
unsigned int ptp_class = ptp_classify_raw(skb);
- u8 *msgtype, *data = skb->data;
- unsigned int offset = 0;
- __be16 *seqid;
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
if (ptp_class == PTP_CLASS_NONE)
return 0;
- if (ptp_class & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (ptp_class & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
return 0;
- if (unlikely(ptp_class & PTP_CLASS_V1))
- msgtype = data + offset + OFF_PTP_CONTROL;
- else
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ seqid = ntohs(hdr->sequence_id);
- seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- *mtype_seqid = (*msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
+ *mtype_seqid = (msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
- *mtype_seqid |= (ntohs(*seqid) & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+ *mtype_seqid |= (seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
return 1;
}
@@ -1005,7 +988,9 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
am65_cpts_set_add_val(cpts);
- am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN | AM65_CPTS_CONTROL_64MODE,
+ am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN |
+ AM65_CPTS_CONTROL_64MODE |
+ AM65_CPTS_CONTROL_TX_GENF_CLR_EN,
control);
am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 4a65edc5a375..9fd1f77190ad 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1278,12 +1278,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->channels = prop;
- if (of_property_read_u32(node, "ale_entries", &prop)) {
- dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
- return -EINVAL;
- }
- data->ale_entries = prop;
-
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
return -EINVAL;
@@ -1297,7 +1291,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
data->mac_control = prop;
if (of_property_read_bool(node, "dual_emac"))
- data->dual_emac = 1;
+ data->dual_emac = true;
/*
* Populate all the child nodes here...
@@ -1596,7 +1590,7 @@ static int cpsw_probe(struct platform_device *pdev)
soc = soc_device_match(cpsw_soc_devices);
if (soc)
- cpsw->quirk_irq = 1;
+ cpsw->quirk_irq = true;
data = &cpsw->data;
cpsw->slaves = devm_kcalloc(dev,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 9ad872bfae3a..a6a455c32628 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -32,6 +32,7 @@
#define ALE_STATUS 0x04
#define ALE_CONTROL 0x08
#define ALE_PRESCALE 0x10
+#define ALE_AGING_TIMER 0x14
#define ALE_UNKNOWNVLAN 0x18
#define ALE_TABLE_CONTROL 0x20
#define ALE_TABLE 0x34
@@ -46,6 +47,46 @@
#define AM65_CPSW_ALE_THREAD_DEF_REG 0x134
+/* ALE_AGING_TIMER */
+#define ALE_AGING_TIMER_MASK GENMASK(23, 0)
+
+/**
+ * struct ale_entry_fld - The ALE tbl entry field description
+ * @start_bit: field start bit
+ * @num_bits: field bit length
+ * @flags: field flags
+ */
+struct ale_entry_fld {
+ u8 start_bit;
+ u8 num_bits;
+ u8 flags;
+};
+
+enum {
+ CPSW_ALE_F_STATUS_REG = BIT(0), /* Status register present */
+ CPSW_ALE_F_HW_AUTOAGING = BIT(1), /* HW auto aging */
+
+ CPSW_ALE_F_COUNT
+};
+
+/**
+ * struct ale_dev_id - The ALE version/SoC specific configuration
+ * @dev_id: ALE version/SoC id
+ * @features: features supported by ALE
+ * @tbl_entries: number of ALE entries
+ * @major_ver_mask: mask of ALE Major Version Value in ALE_IDVER reg.
+ * @nu_switch_ale: NU Switch ALE
+ * @vlan_entry_tbl: ALE vlan entry fields description tbl
+ */
+struct cpsw_ale_dev_id {
+ const char *dev_id;
+ u32 features;
+ u32 tbl_entries;
+ u32 major_ver_mask;
+ bool nu_switch_ale;
+ const struct ale_entry_fld *vlan_entry_tbl;
+};
+
#define ALE_TABLE_WRITE BIT(31)
#define ALE_TYPE_FREE 0
@@ -60,7 +101,6 @@
#define ALE_TABLE_SIZE_MULTIPLIER 1024
#define ALE_STATUS_SIZE_MASK 0x1f
-#define ALE_TABLE_SIZE_DEFAULT 64
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
@@ -106,6 +146,59 @@ static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value, \
cpsw_ale_set_field(ale_entry, start, bits, value); \
}
+enum {
+ ALE_ENT_VID_MEMBER_LIST = 0,
+ ALE_ENT_VID_UNREG_MCAST_MSK,
+ ALE_ENT_VID_REG_MCAST_MSK,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK,
+ ALE_ENT_VID_UNREG_MCAST_IDX,
+ ALE_ENT_VID_REG_MCAST_IDX,
+ ALE_ENT_VID_LAST,
+};
+
+#define ALE_FLD_ALLOWED BIT(0)
+#define ALE_FLD_SIZE_PORT_MASK_BITS BIT(1)
+#define ALE_FLD_SIZE_PORT_NUM_BITS BIT(2)
+
+#define ALE_ENTRY_FLD(id, start, bits) \
+[id] = { \
+ .start_bit = start, \
+ .num_bits = bits, \
+ .flags = ALE_FLD_ALLOWED, \
+}
+
+#define ALE_ENTRY_FLD_DYN_MSK_SIZE(id, start) \
+[id] = { \
+ .start_bit = start, \
+ .num_bits = 0, \
+ .flags = ALE_FLD_ALLOWED | \
+ ALE_FLD_SIZE_PORT_MASK_BITS, \
+}
+
+/* dm814x, am3/am4/am5, k2hk */
+static const struct ale_entry_fld vlan_entry_cpsw[ALE_ENT_VID_LAST] = {
+ ALE_ENTRY_FLD(ALE_ENT_VID_MEMBER_LIST, 0, 3),
+ ALE_ENTRY_FLD(ALE_ENT_VID_UNREG_MCAST_MSK, 8, 3),
+ ALE_ENTRY_FLD(ALE_ENT_VID_REG_MCAST_MSK, 16, 3),
+ ALE_ENTRY_FLD(ALE_ENT_VID_FORCE_UNTAGGED_MSK, 24, 3),
+};
+
+/* k2e/k2l, k3 am65/j721e cpsw2g */
+static const struct ale_entry_fld vlan_entry_nu[ALE_ENT_VID_LAST] = {
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_MEMBER_LIST, 0),
+ ALE_ENTRY_FLD(ALE_ENT_VID_UNREG_MCAST_IDX, 20, 3),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_FORCE_UNTAGGED_MSK, 24),
+ ALE_ENTRY_FLD(ALE_ENT_VID_REG_MCAST_IDX, 44, 3),
+};
+
+/* K3 j721e/j7200 cpsw9g/5g, am64x cpsw3g */
+static const struct ale_entry_fld vlan_entry_k3_cpswxg[] = {
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_MEMBER_LIST, 0),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_UNREG_MCAST_MSK, 12),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_FORCE_UNTAGGED_MSK, 24),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_REG_MCAST_MSK, 36),
+};
+
DEFINE_ALE_FIELD(entry_type, 60, 2)
DEFINE_ALE_FIELD(vlan_id, 48, 12)
DEFINE_ALE_FIELD(mcast_state, 62, 2)
@@ -115,17 +208,76 @@ DEFINE_ALE_FIELD(ucast_type, 62, 2)
DEFINE_ALE_FIELD1(port_num, 66)
DEFINE_ALE_FIELD(blocked, 65, 1)
DEFINE_ALE_FIELD(secure, 64, 1)
-DEFINE_ALE_FIELD1(vlan_untag_force, 24)
-DEFINE_ALE_FIELD1(vlan_reg_mcast, 16)
-DEFINE_ALE_FIELD1(vlan_unreg_mcast, 8)
-DEFINE_ALE_FIELD1(vlan_member_list, 0)
DEFINE_ALE_FIELD(mcast, 40, 1)
-/* ALE NetCP nu switch specific */
-DEFINE_ALE_FIELD(vlan_unreg_mcast_idx, 20, 3)
-DEFINE_ALE_FIELD(vlan_reg_mcast_idx, 44, 3)
#define NU_VLAN_UNREG_MCAST_IDX 1
+static int cpsw_ale_entry_get_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ const struct ale_entry_fld *entry_tbl,
+ int fld_id)
+{
+ const struct ale_entry_fld *entry_fld;
+ u32 bits;
+
+ if (!ale || !ale_entry)
+ return -EINVAL;
+
+ entry_fld = &entry_tbl[fld_id];
+ if (!(entry_fld->flags & ALE_FLD_ALLOWED)) {
+ dev_err(ale->params.dev, "get: wrong ale fld id %d\n", fld_id);
+ return -ENOENT;
+ }
+
+ bits = entry_fld->num_bits;
+ if (entry_fld->flags & ALE_FLD_SIZE_PORT_MASK_BITS)
+ bits = ale->port_mask_bits;
+
+ return cpsw_ale_get_field(ale_entry, entry_fld->start_bit, bits);
+}
+
+static void cpsw_ale_entry_set_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ const struct ale_entry_fld *entry_tbl,
+ int fld_id,
+ u32 value)
+{
+ const struct ale_entry_fld *entry_fld;
+ u32 bits;
+
+ if (!ale || !ale_entry)
+ return;
+
+ entry_fld = &entry_tbl[fld_id];
+ if (!(entry_fld->flags & ALE_FLD_ALLOWED)) {
+ dev_err(ale->params.dev, "set: wrong ale fld id %d\n", fld_id);
+ return;
+ }
+
+ bits = entry_fld->num_bits;
+ if (entry_fld->flags & ALE_FLD_SIZE_PORT_MASK_BITS)
+ bits = ale->port_mask_bits;
+
+ cpsw_ale_set_field(ale_entry, entry_fld->start_bit, bits, value);
+}
+
+static int cpsw_ale_vlan_get_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ int fld_id)
+{
+ return cpsw_ale_entry_get_fld(ale, ale_entry,
+ ale->vlan_entry_tbl, fld_id);
+}
+
+static void cpsw_ale_vlan_set_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ int fld_id,
+ u32 value)
+{
+ cpsw_ale_entry_set_fld(ale, ale_entry,
+ ale->vlan_entry_tbl, fld_id, value);
+}
+
/* The MAC address field in the ALE entry cannot be macroized as above */
static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
{
@@ -420,19 +572,22 @@ static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
int idx;
/* Set VLAN registered multicast flood mask */
- idx = cpsw_ale_get_vlan_reg_mcast_idx(ale_entry);
+ idx = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_IDX);
writel(reg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
/* Set VLAN unregistered multicast flood mask */
- idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry);
+ idx = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_IDX);
writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
}
static void cpsw_ale_set_vlan_untag(struct cpsw_ale *ale, u32 *ale_entry,
u16 vid, int untag_mask)
{
- cpsw_ale_set_vlan_untag_force(ale_entry,
- untag_mask, ale->vlan_field_bits);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK,
+ untag_mask);
if (untag_mask & ALE_PORT_HOST)
bitmap_set(ale->p0_untag_vid_mask, vid, 1);
else
@@ -454,17 +609,19 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
if (!ale->params.nu_switch_ale) {
- cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
- ale->vlan_field_bits);
- cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK, reg_mcast);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK, unreg_mcast);
} else {
- cpsw_ale_set_vlan_unreg_mcast_idx(ale_entry,
- NU_VLAN_UNREG_MCAST_IDX);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_IDX,
+ NU_VLAN_UNREG_MCAST_IDX);
cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
}
- cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
- ale->vlan_field_bits);
+
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST, port_mask);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
@@ -483,20 +640,20 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
int reg_mcast, unreg_mcast;
int members, untag;
- members = cpsw_ale_get_vlan_member_list(ale_entry,
- ale->vlan_field_bits);
+ members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST);
members &= ~port_mask;
if (!members) {
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
return;
}
- untag = cpsw_ale_get_vlan_untag_force(ale_entry,
- ale->vlan_field_bits);
- reg_mcast = cpsw_ale_get_vlan_reg_mcast(ale_entry,
- ale->vlan_field_bits);
- unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry,
- ale->vlan_field_bits);
+ untag = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK);
+ reg_mcast = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK);
+ unreg_mcast = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
untag &= members;
reg_mcast &= members;
unreg_mcast &= members;
@@ -504,16 +661,16 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
if (!ale->params.nu_switch_ale) {
- cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
- ale->vlan_field_bits);
- cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK, reg_mcast);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK, unreg_mcast);
} else {
cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast,
unreg_mcast);
}
- cpsw_ale_set_vlan_member_list(ale_entry, members,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST, members);
}
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
@@ -551,15 +708,15 @@ int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
if (idx >= 0)
cpsw_ale_read(ale, idx, ale_entry);
- vlan_members = cpsw_ale_get_vlan_member_list(ale_entry,
- ale->vlan_field_bits);
- reg_mcast_members = cpsw_ale_get_vlan_reg_mcast(ale_entry,
- ale->vlan_field_bits);
+ vlan_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST);
+ reg_mcast_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK);
unreg_mcast_members =
- cpsw_ale_get_vlan_unreg_mcast(ale_entry,
- ale->vlan_field_bits);
- untag_members = cpsw_ale_get_vlan_untag_force(ale_entry,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
+ untag_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK);
vlan_members |= port_mask;
untag_members = (untag_members & ~port_mask) | untag_mask;
@@ -592,14 +749,15 @@ void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
continue;
unreg_members =
- cpsw_ale_get_vlan_unreg_mcast(ale_entry,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
if (add)
unreg_members |= unreg_mcast_mask;
else
unreg_members &= ~unreg_mcast_mask;
- cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_members,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK,
+ unreg_members);
cpsw_ale_write(ale, idx, ale_entry);
}
}
@@ -609,15 +767,15 @@ static void cpsw_ale_vlan_set_unreg_mcast(struct cpsw_ale *ale, u32 *ale_entry,
{
int unreg_mcast;
- unreg_mcast =
- cpsw_ale_get_vlan_unreg_mcast(ale_entry,
- ale->vlan_field_bits);
+ unreg_mcast = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
if (allmulti)
unreg_mcast |= ALE_PORT_HOST;
else
unreg_mcast &= ~ALE_PORT_HOST;
- cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
- ale->vlan_field_bits);
+
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK, unreg_mcast);
}
static void
@@ -627,7 +785,8 @@ cpsw_ale_vlan_set_unreg_mcast_idx(struct cpsw_ale *ale, u32 *ale_entry,
int unreg_mcast;
int idx;
- idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry);
+ idx = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_IDX);
unreg_mcast = readl(ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
@@ -651,9 +810,9 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port)
type = cpsw_ale_get_entry_type(ale_entry);
if (type != ALE_TYPE_VLAN)
continue;
- vlan_members =
- cpsw_ale_get_vlan_member_list(ale_entry,
- ale->vlan_field_bits);
+
+ vlan_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST);
if (port != -1 && !(vlan_members & BIT(port)))
continue;
@@ -960,30 +1119,146 @@ static void cpsw_ale_timer(struct timer_list *t)
}
}
+static void cpsw_ale_hw_aging_timer_start(struct cpsw_ale *ale)
+{
+ u32 aging_timer;
+
+ aging_timer = ale->params.bus_freq / 1000000;
+ aging_timer *= ale->params.ale_ageout;
+
+ if (aging_timer & ~ALE_AGING_TIMER_MASK) {
+ aging_timer = ALE_AGING_TIMER_MASK;
+ dev_warn(ale->params.dev,
+ "ALE aging timer overflow, set to max\n");
+ }
+
+ writel(aging_timer, ale->params.ale_regs + ALE_AGING_TIMER);
+}
+
+static void cpsw_ale_hw_aging_timer_stop(struct cpsw_ale *ale)
+{
+ writel(0, ale->params.ale_regs + ALE_AGING_TIMER);
+}
+
+static void cpsw_ale_aging_start(struct cpsw_ale *ale)
+{
+ if (!ale->params.ale_ageout)
+ return;
+
+ if (ale->features & CPSW_ALE_F_HW_AUTOAGING) {
+ cpsw_ale_hw_aging_timer_start(ale);
+ return;
+ }
+
+ timer_setup(&ale->timer, cpsw_ale_timer, 0);
+ ale->timer.expires = jiffies + ale->ageout;
+ add_timer(&ale->timer);
+}
+
+static void cpsw_ale_aging_stop(struct cpsw_ale *ale)
+{
+ if (!ale->params.ale_ageout)
+ return;
+
+ if (ale->features & CPSW_ALE_F_HW_AUTOAGING) {
+ cpsw_ale_hw_aging_timer_stop(ale);
+ return;
+ }
+
+ del_timer_sync(&ale->timer);
+}
+
void cpsw_ale_start(struct cpsw_ale *ale)
{
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
- timer_setup(&ale->timer, cpsw_ale_timer, 0);
- if (ale->ageout) {
- ale->timer.expires = jiffies + ale->ageout;
- add_timer(&ale->timer);
- }
+ cpsw_ale_aging_start(ale);
}
void cpsw_ale_stop(struct cpsw_ale *ale)
{
- del_timer_sync(&ale->timer);
+ cpsw_ale_aging_stop(ale);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
}
+static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
+ {
+ /* am3/4/5, dra7. dm814x, 66ak2hk-gbe */
+ .dev_id = "cpsw",
+ .tbl_entries = 1024,
+ .major_ver_mask = 0xff,
+ .vlan_entry_tbl = vlan_entry_cpsw,
+ },
+ {
+ /* 66ak2h_xgbe */
+ .dev_id = "66ak2h-xgbe",
+ .tbl_entries = 2048,
+ .major_ver_mask = 0xff,
+ .vlan_entry_tbl = vlan_entry_cpsw,
+ },
+ {
+ .dev_id = "66ak2el",
+ .features = CPSW_ALE_F_STATUS_REG,
+ .major_ver_mask = 0x7,
+ .nu_switch_ale = true,
+ .vlan_entry_tbl = vlan_entry_nu,
+ },
+ {
+ .dev_id = "66ak2g",
+ .features = CPSW_ALE_F_STATUS_REG,
+ .tbl_entries = 64,
+ .major_ver_mask = 0x7,
+ .nu_switch_ale = true,
+ .vlan_entry_tbl = vlan_entry_nu,
+ },
+ {
+ .dev_id = "am65x-cpsw2g",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .tbl_entries = 64,
+ .major_ver_mask = 0x7,
+ .nu_switch_ale = true,
+ .vlan_entry_tbl = vlan_entry_nu,
+ },
+ {
+ .dev_id = "j721e-cpswxg",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .major_ver_mask = 0x7,
+ .vlan_entry_tbl = vlan_entry_k3_cpswxg,
+ },
+ { },
+};
+
+static const struct
+cpsw_ale_dev_id *cpsw_ale_match_id(const struct cpsw_ale_dev_id *id,
+ const char *dev_id)
+{
+ if (!dev_id)
+ return NULL;
+
+ while (id->dev_id) {
+ if (strcmp(dev_id, id->dev_id) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
{
+ const struct cpsw_ale_dev_id *ale_dev_id;
struct cpsw_ale *ale;
u32 rev, ale_entries;
+ ale_dev_id = cpsw_ale_match_id(cpsw_ale_id_match, params->dev_id);
+ if (!ale_dev_id)
+ return ERR_PTR(-EINVAL);
+
+ params->ale_entries = ale_dev_id->tbl_entries;
+ params->major_ver_mask = ale_dev_id->major_ver_mask;
+ params->nu_switch_ale = ale_dev_id->nu_switch_ale;
+
ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
if (!ale)
return ERR_PTR(-ENOMEM);
@@ -997,10 +1272,10 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ale->params = *params;
ale->ageout = ale->params.ale_ageout * HZ;
+ ale->features = ale_dev_id->features;
+ ale->vlan_entry_tbl = ale_dev_id->vlan_entry_tbl;
rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
- if (!ale->params.major_ver_mask)
- ale->params.major_ver_mask = 0xff;
ale->version =
(ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) |
ALE_VERSION_MINOR(rev);
@@ -1008,7 +1283,8 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask),
ALE_VERSION_MINOR(rev));
- if (!ale->params.ale_entries) {
+ if (ale->features & CPSW_ALE_F_STATUS_REG &&
+ !ale->params.ale_entries) {
ale_entries =
readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
ALE_STATUS_SIZE_MASK;
@@ -1017,16 +1293,12 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
* table which shows the size as a multiple of 1024 entries.
* For these, params.ale_entries will be set to zero. So
* read the register and update the value of ale_entries.
- * ALE table on NetCP lite, is much smaller and is indicated
- * by a value of zero in ALE_STATUS. So use a default value
- * of ALE_TABLE_SIZE_DEFAULT for this. Caller is expected
- * to set the value of ale_entries for all other versions
- * of ALE.
+ * return error if ale_entries is zero in ALE_STATUS.
*/
if (!ale_entries)
- ale_entries = ALE_TABLE_SIZE_DEFAULT;
- else
- ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
+ return ERR_PTR(-EINVAL);
+
+ ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
ale->params.ale_entries = ale_entries;
}
dev_info(ale->params.dev,
@@ -1079,3 +1351,8 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
data += ALE_ENTRY_WORDS;
}
}
+
+u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale)
+{
+ return ale ? ale->params.ale_entries : 0;
+}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 6a3cb6898728..5e4a69662c5f 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -24,18 +24,24 @@ struct cpsw_ale_params {
* pass it from caller.
*/
u32 major_ver_mask;
+ const char *dev_id;
+ unsigned long bus_freq;
};
+struct ale_entry_fld;
+
struct cpsw_ale {
struct cpsw_ale_params params;
struct timer_list timer;
unsigned long ageout;
u32 version;
+ u32 features;
/* These bits are different on NetCP NU Switch ALE */
u32 port_mask_bits;
u32 port_num_bits;
u32 vlan_field_bits;
unsigned long *p0_untag_vid_mask;
+ const struct ale_entry_fld *vlan_entry_tbl;
};
enum cpsw_ale_control {
@@ -119,6 +125,7 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
int control, int value);
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
+u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale);
static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
{
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index fa54efe3be63..4619c3a950b0 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -339,7 +339,8 @@ int cpsw_get_regs_len(struct net_device *ndev)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
+ return cpsw_ale_get_num_entries(cpsw->ale) *
+ ALE_ENTRY_WORDS * sizeof(u32);
}
void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p)
@@ -727,7 +728,6 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
(1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 15672d0a4de6..f779d2e1b5c5 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1244,7 +1244,6 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
data->active_slave = 0;
data->channels = CPSW_MAX_QUEUES;
- data->ale_entries = CPSW_ALE_NUM_ENTRIES;
data->dual_emac = true;
data->bd_ram_size = CPSW_BD_RAM_SIZE;
data->mac_control = 0;
@@ -1661,12 +1660,10 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
for (i = 0; i < cpsw->data.slaves; i++) {
struct cpsw_slave *slave = &cpsw->slaves[i];
struct net_device *sl_ndev = slave->ndev;
- struct cpsw_priv *priv;
if (!sl_ndev)
continue;
- priv = netdev_priv(sl_ndev);
if (switch_en)
vlan = cpsw->data.default_vlan;
else
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 482a1a451e43..31c5e36ff706 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -500,8 +500,8 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
ale_params.dev = dev;
ale_params.ale_ageout = ale_ageout;
- ale_params.ale_entries = data->ale_entries;
ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
+ ale_params.dev_id = "cpsw";
cpsw->ale = cpsw_ale_create(&ale_params);
if (IS_ERR(cpsw->ale)) {
@@ -639,13 +639,10 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_NTP_ALL:
- return -ERANGE;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
- break;
+ return -ERANGE;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index bf4e179b4ca4..7b7f3596b20d 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -117,7 +117,6 @@ do { \
#define CPSW_MAX_QUEUES 8
#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
#define CPSW_ALE_AGEOUT_DEFAULT 10 /* sec */
-#define CPSW_ALE_NUM_ENTRIES 1024
#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
#define CPSW_FIFO_SHAPE_EN_SHIFT 16
#define CPSW_FIFO_RATE_EN_SHIFT 20
@@ -294,7 +293,6 @@ struct cpsw_platform_data {
u32 channels; /* number of cpdma channels (symmetric) */
u32 slaves; /* number of slave cpgmac ports */
u32 active_slave;/* time stamping, ethtool and SIOCGMIIPHY slave */
- u32 ale_entries; /* ale table size */
u32 bd_ram_size; /*buffer descriptor ram size */
u32 mac_control; /* Mac control register */
u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 7c55d395de2c..d1fc7955d422 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -446,41 +446,22 @@ static const struct ptp_clock_info cpts_info = {
static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
unsigned int ptp_class = ptp_classify_raw(skb);
- u8 *msgtype, *data = skb->data;
- unsigned int offset = 0;
- u16 *seqid;
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
if (ptp_class == PTP_CLASS_NONE)
return 0;
- if (ptp_class & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (ptp_class & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
return 0;
- if (unlikely(ptp_class & PTP_CLASS_V1))
- msgtype = data + offset + OFF_PTP_CONTROL;
- else
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ seqid = ntohs(hdr->sequence_id);
- seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- *mtype_seqid = (*msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
- *mtype_seqid |= (ntohs(*seqid) & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
+ *mtype_seqid = (msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
+ *mtype_seqid |= (seqid & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
return 1;
}
@@ -528,6 +509,11 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
int ret;
u64 ns;
+ /* cpts_rx_timestamp() is called before eth_type_trans(), so
+ * skb MAC Hdr properties are not configured yet. Hence need to
+ * reset skb MAC header here
+ */
+ skb_reset_mac_header(skb);
ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
if (!ret)
return;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 6614fa3089b2..d2eab5cd1e0c 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -718,7 +718,7 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
most_chan->desc_num += desc_cnt;
}
-/**
+/*
* cpdma_chan_split_pool - Splits ctrl pool between all channels.
* Has to be called under ctlr lock
*/
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index de282531f68b..c7031e1960d4 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -671,7 +671,7 @@ static int emac_hash_del(struct emac_priv *priv, u8 *mac_addr)
* emac_add_mcast - Set multicast address in the EMAC adapter (Internal)
* @priv: The DaVinci EMAC private adapter structure
* @action: multicast operation to perform
- * mac_addr: mac address to set
+ * @mac_addr: mac address to set
*
* Set multicast addresses in EMAC adapter - internal function
*
@@ -977,6 +977,7 @@ fail_tx:
/**
* emac_dev_tx_timeout - EMAC Transmit timeout function
* @ndev: The DaVinci EMAC network adapter
+ * @txqueue: the index of the hung transmit queue
*
* Called when system detects that a skb timeout period has expired
* potentially due to a fault in the adapter in not being able to send
@@ -1209,7 +1210,7 @@ static int emac_hw_enable(struct emac_priv *priv)
/**
* emac_poll - EMAC NAPI Poll function
- * @ndev: The DaVinci EMAC network adapter
+ * @napi: pointer to the napi_struct containing The DaVinci EMAC network adapter
* @budget: Number of receive packets to process (as told by NAPI layer)
*
* NAPI Poll function implemented to process packets as per budget. We check
@@ -1227,7 +1228,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = priv->ndev;
struct device *emac_dev = &ndev->dev;
u32 status = 0;
- u32 num_tx_pkts = 0, num_rx_pkts = 0;
+ u32 num_rx_pkts = 0;
/* Check interrupt vectors and call packet processing */
status = emac_read(EMAC_MACINVECTOR);
@@ -1238,8 +1239,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
if (status & mask) {
- num_tx_pkts = cpdma_chan_process(priv->txchan,
- EMAC_DEF_TX_MAX_SERVICE);
+ cpdma_chan_process(priv->txchan, EMAC_DEF_TX_MAX_SERVICE);
} /* TX processing */
mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 28093923a7fb..33c1592d5381 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -51,7 +51,6 @@
#define GBE13_CPTS_OFFSET 0x500
#define GBE13_ALE_OFFSET 0x600
#define GBE13_HOST_PORT_NUM 0
-#define GBE13_NUM_ALE_ENTRIES 1024
/* 1G Ethernet NU SS defines */
#define GBENU_MODULE_NAME "netcp-gbenu"
@@ -101,7 +100,6 @@
#define XGBE10_ALE_OFFSET 0x700
#define XGBE10_HW_STATS_OFFSET 0x800
#define XGBE10_HOST_PORT_NUM 0
-#define XGBE10_NUM_ALE_ENTRIES 2048
#define GBE_TIMER_INTERVAL (HZ / 2)
@@ -711,7 +709,6 @@ struct gbe_priv {
struct netcp_device *netcp_device;
struct timer_list timer;
u32 num_slaves;
- u32 ale_entries;
u32 ale_ports;
bool enable_ale;
u8 max_num_slaves;
@@ -3309,7 +3306,6 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
- gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
/* Subsystem registers */
@@ -3433,7 +3429,6 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBE13_HOST_PORT_NUM;
- gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
/* Subsystem registers */
@@ -3697,12 +3692,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ale_params.dev = gbe_dev->dev;
ale_params.ale_regs = gbe_dev->ale_reg;
ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
- ale_params.ale_entries = gbe_dev->ale_entries;
ale_params.ale_ports = gbe_dev->ale_ports;
- if (IS_SS_ID_MU(gbe_dev)) {
- ale_params.major_ver_mask = 0x7;
- ale_params.nu_switch_ale = true;
- }
+ ale_params.dev_id = "cpsw";
+ if (IS_SS_ID_NU(gbe_dev))
+ ale_params.dev_id = "66ak2el";
+ else if (IS_SS_ID_2U(gbe_dev))
+ ale_params.dev_id = "66ak2g";
+ else if (IS_SS_ID_XGBE(gbe_dev))
+ ale_params.dev_id = "66ak2h-xgbe";
+
gbe_dev->ale = cpsw_ale_create(&ale_params);
if (IS_ERR(gbe_dev->ale)) {
dev_err(gbe_dev->dev, "error initializing ale engine\n");
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 76a342ea3797..267c080ee084 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -305,9 +305,8 @@ static void tlan_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
if (priv->dma_storage) {
- pci_free_consistent(priv->pci_dev,
- priv->dma_size, priv->dma_storage,
- priv->dma_storage_dma);
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage, priv->dma_storage_dma);
}
#ifdef CONFIG_PCI
@@ -482,7 +481,7 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
priv->adapter = &board_info[ent->driver_data];
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
pr_err("No suitable PCI mapping available\n");
goto err_out_free_dev;
@@ -584,8 +583,8 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
return 0;
err_out_uninit:
- pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
- priv->dma_storage_dma);
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage, priv->dma_storage_dma);
err_out_free_dev:
free_netdev(dev);
err_out_regions:
@@ -609,9 +608,9 @@ static void tlan_eisa_cleanup(void)
dev = tlan_eisa_devices;
priv = netdev_priv(dev);
if (priv->dma_storage) {
- pci_free_consistent(priv->pci_dev, priv->dma_size,
- priv->dma_storage,
- priv->dma_storage_dma);
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage,
+ priv->dma_storage_dma);
}
release_region(dev->base_addr, 0x10);
unregister_netdev(dev);
@@ -654,7 +653,6 @@ module_exit(tlan_exit);
static void __init tlan_eisa_probe(void)
{
long ioaddr;
- int rc = -ENODEV;
int irq;
u16 device_id;
@@ -719,8 +717,7 @@ static void __init tlan_eisa_probe(void)
/* Setup the newly found eisa adapter */
- rc = tlan_probe1(NULL, ioaddr, irq,
- 12, NULL);
+ tlan_probe1(NULL, ioaddr, irq, 12, NULL);
continue;
out:
@@ -826,9 +823,8 @@ static int tlan_init(struct net_device *dev)
dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
* (sizeof(struct tlan_list));
- priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
- dma_size,
- &priv->dma_storage_dma);
+ priv->dma_storage = dma_alloc_coherent(&priv->pci_dev->dev, dma_size,
+ &priv->dma_storage_dma, GFP_KERNEL);
priv->dma_size = dma_size;
if (priv->dma_storage == NULL) {
@@ -1069,9 +1065,9 @@ static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
tail_list->forward = 0;
- tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
+ tail_list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
skb->data, txlen,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tlan_store_skb(tail_list, skb);
tail_list->frame_size = (u16) txlen;
@@ -1365,10 +1361,10 @@ static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
struct sk_buff *skb = tlan_get_skb(head_list);
ack++;
- pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
- max(skb->len,
- (unsigned int)TLAN_MIN_FRAME_SIZE),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pci_dev->dev,
+ head_list->buffer[0].address,
+ max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0;
@@ -1511,8 +1507,8 @@ static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
goto drop_and_reuse;
skb = tlan_get_skb(head_list);
- pci_unmap_single(priv->pci_dev, frame_dma,
- TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pci_dev->dev, frame_dma,
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
skb_put(skb, frame_size);
dev->stats.rx_bytes += frame_size;
@@ -1521,8 +1517,8 @@ static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
netif_rx(skb);
head_list->buffer[0].address =
- pci_map_single(priv->pci_dev, new_skb->data,
- TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ dma_map_single(&priv->pci_dev->dev, new_skb->data,
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
tlan_store_skb(head_list, new_skb);
drop_and_reuse:
@@ -1923,10 +1919,10 @@ static void tlan_reset_lists(struct net_device *dev)
if (!skb)
break;
- list->buffer[0].address = pci_map_single(priv->pci_dev,
+ list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
skb->data,
TLAN_MAX_FRAME_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
tlan_store_skb(list, skb);
list->buffer[1].count = 0;
list->buffer[1].address = 0;
@@ -1954,12 +1950,10 @@ static void tlan_free_lists(struct net_device *dev)
list = priv->tx_list + i;
skb = tlan_get_skb(list);
if (skb) {
- pci_unmap_single(
- priv->pci_dev,
- list->buffer[0].address,
- max(skb->len,
- (unsigned int)TLAN_MIN_FRAME_SIZE),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pci_dev->dev,
+ list->buffer[0].address,
+ max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
@@ -1970,10 +1964,9 @@ static void tlan_free_lists(struct net_device *dev)
list = priv->rx_list + i;
skb = tlan_get_skb(list);
if (skb) {
- pci_unmap_single(priv->pci_dev,
+ dma_unmap_single(&priv->pci_dev->dev,
list->buffer[0].address,
- TLAN_MAX_FRAME_SIZE,
- PCI_DMA_FROMDEVICE);
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
@@ -2511,7 +2504,7 @@ static void tlan_phy_power_down(struct net_device *dev)
}
/* Wait for 50 ms and powerup
- * This is abitrary. It is intended to make sure the
+ * This is arbitrary. It is intended to make sure the
* transceiver settles.
*/
tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 6bcda20ed7e7..7a6e5ff8e5d4 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -454,9 +454,9 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
skb = netdev_alloc_skb(dev, RX_BUF_SIZE);
if (!skb)
return NULL;
- *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(hwdev, *dma_handle)) {
+ *dma_handle = dma_map_single(&hwdev->dev, skb->data, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&hwdev->dev, *dma_handle)) {
dev_kfree_skb_any(skb);
return NULL;
}
@@ -466,8 +466,8 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle)
{
- pci_unmap_single(hwdev, dma_handle, RX_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&hwdev->dev, dma_handle, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
@@ -876,9 +876,9 @@ tc35815_init_queues(struct net_device *dev)
sizeof(struct TxFD) * TX_FD_NUM >
PAGE_SIZE * FD_PAGE_NUM);
- lp->fd_buf = pci_alloc_consistent(lp->pci_dev,
- PAGE_SIZE * FD_PAGE_NUM,
- &lp->fd_buf_dma);
+ lp->fd_buf = dma_alloc_coherent(&lp->pci_dev->dev,
+ PAGE_SIZE * FD_PAGE_NUM,
+ &lp->fd_buf_dma, GFP_ATOMIC);
if (!lp->fd_buf)
return -ENOMEM;
for (i = 0; i < RX_BUF_NUM; i++) {
@@ -892,10 +892,9 @@ tc35815_init_queues(struct net_device *dev)
lp->rx_skbs[i].skb_dma);
lp->rx_skbs[i].skb = NULL;
}
- pci_free_consistent(lp->pci_dev,
- PAGE_SIZE * FD_PAGE_NUM,
- lp->fd_buf,
- lp->fd_buf_dma);
+ dma_free_coherent(&lp->pci_dev->dev,
+ PAGE_SIZE * FD_PAGE_NUM,
+ lp->fd_buf, lp->fd_buf_dma);
lp->fd_buf = NULL;
return -ENOMEM;
}
@@ -990,7 +989,9 @@ tc35815_clear_queues(struct net_device *dev)
BUG_ON(lp->tx_skbs[i].skb != skb);
#endif
if (skb) {
- pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->tx_skbs[i].skb_dma, skb->len,
+ DMA_TO_DEVICE);
lp->tx_skbs[i].skb = NULL;
lp->tx_skbs[i].skb_dma = 0;
dev_kfree_skb_any(skb);
@@ -1022,7 +1023,9 @@ tc35815_free_queues(struct net_device *dev)
BUG_ON(lp->tx_skbs[i].skb != skb);
#endif
if (skb) {
- pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->tx_skbs[i].skb_dma,
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
lp->tx_skbs[i].skb = NULL;
lp->tx_skbs[i].skb_dma = 0;
@@ -1044,8 +1047,8 @@ tc35815_free_queues(struct net_device *dev)
}
}
if (lp->fd_buf) {
- pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
- lp->fd_buf, lp->fd_buf_dma);
+ dma_free_coherent(&lp->pci_dev->dev, PAGE_SIZE * FD_PAGE_NUM,
+ lp->fd_buf, lp->fd_buf_dma);
lp->fd_buf = NULL;
}
}
@@ -1292,7 +1295,10 @@ tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
BUG_ON(lp->tx_skbs[lp->tfd_start].skb);
#endif
lp->tx_skbs[lp->tfd_start].skb = skb;
- lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_skbs[lp->tfd_start].skb_dma = dma_map_single(&lp->pci_dev->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
/*add to ring */
txfd = &lp->tfd_base[lp->tfd_start];
@@ -1500,9 +1506,9 @@ tc35815_rx(struct net_device *dev, int limit)
skb = lp->rx_skbs[cur_bd].skb;
prefetch(skb->data);
lp->rx_skbs[cur_bd].skb = NULL;
- pci_unmap_single(lp->pci_dev,
+ dma_unmap_single(&lp->pci_dev->dev,
lp->rx_skbs[cur_bd].skb_dma,
- RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
memmove(skb->data, skb->data - NET_IP_ALIGN,
pkt_len);
@@ -1756,7 +1762,9 @@ tc35815_txdone(struct net_device *dev)
#endif
if (skb) {
dev->stats.tx_bytes += skb->len;
- pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->tx_skbs[lp->tfd_end].skb_dma,
+ skb->len, DMA_TO_DEVICE);
lp->tx_skbs[lp->tfd_end].skb = NULL;
lp->tx_skbs[lp->tfd_end].skb_dma = 0;
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 55b0ddab1776..73ca597ebd1b 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1504,7 +1504,7 @@ static void rhine_init_cam_filter(struct net_device *dev)
/**
* rhine_update_vcam - update VLAN CAM filters
- * @rp: rhine_private data of this Rhine
+ * @dev: rhine_private data of this Rhine
*
* Update VLAN CAM filters to match configuration change.
*/
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 6d2a31488a74..b65767f9e499 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -372,7 +372,7 @@ static const struct pci_device_id velocity_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
-/**
+/*
* Describe the OF device identifiers that we support in this
* device driver. Used for devicetree nodes.
*/
@@ -384,7 +384,7 @@ MODULE_DEVICE_TABLE(of, velocity_of_ids);
/**
* get_chip_name - identifier to name
- * @id: chip identifier
+ * @chip_id: chip identifier
*
* Given a chip identifier return a suitable description. Returns
* a pointer a static string valid while the driver is loaded.
@@ -748,7 +748,7 @@ static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
/**
* velocity_mii_write - write MII data
* @regs: velocity registers
- * @index: MII register index
+ * @mii_addr: MII register index
* @data: 16bit data for the MII register
*
* Perform a single write to an MII 16bit register. Returns zero
@@ -869,6 +869,7 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
/**
* velocity_set_media_mode - set media mode
+ * @vptr: velocity adapter
* @mii_status: old MII link state
*
* Check the media link state and configure the flow control
@@ -877,26 +878,13 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
*/
static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
{
- u32 curr_status;
struct mac_regs __iomem *regs = vptr->mac_regs;
vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
- curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
/* Set mii link status */
set_mii_flow_control(vptr);
- /*
- Check if new status is consistent with current status
- if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
- (mii_status==curr_status)) {
- vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
- vptr->mii_status=check_connection_type(vptr->mac_regs);
- netdev_info(vptr->netdev, "Velocity link no change\n");
- return 0;
- }
- */
-
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -1269,6 +1257,7 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
/**
* setup_queue_timers - Setup interrupt timers
+ * @vptr: velocity adapter
*
* Setup interrupt frequency during suppression (timeout if the frame
* count isn't filled).
@@ -1293,8 +1282,7 @@ static void setup_queue_timers(struct velocity_info *vptr)
/**
* setup_adaptive_interrupts - Setup interrupt suppression
- *
- * @vptr velocity adapter
+ * @vptr: velocity adapter
*
* The velocity is able to suppress interrupt during high interrupt load.
* This function turns on that feature.
@@ -1735,6 +1723,7 @@ err_free_dma_rings_0:
* velocity_free_tx_buf - free transmit buffer
* @vptr: velocity
* @tdinfo: buffer
+ * @td: transmit descriptor to free
*
* Release an transmit buffer. If the buffer was preallocated then
* recycle it, if not then unmap the buffer.
@@ -1909,7 +1898,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
/**
* tx_srv - transmit interrupt service
- * @vptr; Velocity
+ * @vptr: Velocity
*
* Scan the queues looking for transmitted packets that
* we can complete and clean up. Update any statistics as
@@ -2003,8 +1992,7 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
* velocity_rx_copy - in place Rx copy for small packets
* @rx_skb: network layer packet buffer candidate
* @pkt_size: received data size
- * @rd: receive packet descriptor
- * @dev: network device
+ * @vptr: velocity adapter
*
* Replace the current skb that is scheduled for Rx processing by a
* shorter, immediately allocated skb, if the received packet is small
@@ -2110,6 +2098,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
/**
* velocity_rx_srv - service RX interrupt
* @vptr: velocity
+ * @budget_left: remaining budget
*
* Walk the receive ring of the velocity adapter and remove
* any received packets from the receive queue. Hand the ring
@@ -2658,7 +2647,6 @@ static const struct net_device_ops velocity_netdev_ops = {
/**
* velocity_init_info - init private data
- * @pdev: PCI device
* @vptr: Velocity info
* @info: Board type
*
@@ -2677,7 +2665,6 @@ static void velocity_init_info(struct velocity_info *vptr,
/**
* velocity_get_pci_info - retrieve PCI info for device
* @vptr: velocity device
- * @pdev: PCI device it matches
*
* Retrieve the PCI configuration space data that interests us from
* the kernel PCI layer
@@ -2714,7 +2701,6 @@ static int velocity_get_pci_info(struct velocity_info *vptr)
/**
* velocity_get_platform_info - retrieve platform info for device
* @vptr: velocity device
- * @pdev: platform device it matches
*
* Retrieve the Platform configuration data that interests us
*/
@@ -2764,8 +2750,9 @@ static u32 velocity_get_link(struct net_device *dev)
/**
* velocity_probe - set up discovered velocity device
- * @pdev: PCI device
- * @ent: PCI device table entry that matched
+ * @dev: PCI device
+ * @info: table of match
+ * @irq: interrupt info
* @bustype: bus that device is connected to
*
* Configure a discovered adapter from scratch. Return a negative
@@ -2982,6 +2969,7 @@ static int velocity_platform_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
/**
* wol_calc_crc - WOL CRC
+ * @size: size of the wake mask
* @pattern: data pattern
* @mask_pattern: mask
*
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9a15f14daa47..60c199fcb91e 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -106,7 +106,7 @@ static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
*/
#define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
-/**
+/*
* temac_indirect_busywait - Wait for current indirect register access
* to complete.
*/
@@ -121,7 +121,7 @@ int temac_indirect_busywait(struct temac_local *lp)
return 0;
}
-/**
+/*
* temac_indirect_in32 - Indirect register read access. This function
* must be called without lp->indirect_lock being held.
*/
@@ -136,7 +136,7 @@ u32 temac_indirect_in32(struct temac_local *lp, int reg)
return val;
}
-/**
+/*
* temac_indirect_in32_locked - Indirect register read access. This
* function must be called with lp->indirect_lock being held. Use
* this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
@@ -164,7 +164,7 @@ u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
return temac_ior(lp, XTE_LSW0_OFFSET);
}
-/**
+/*
* temac_indirect_out32 - Indirect register write access. This function
* must be called without lp->indirect_lock being held.
*/
@@ -177,7 +177,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
spin_unlock_irqrestore(lp->indirect_lock, flags);
}
-/**
+/*
* temac_indirect_out32_locked - Indirect register write access. This
* function must be called with lp->indirect_lock being held. Use
* this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
@@ -202,7 +202,7 @@ void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
WARN_ON(temac_indirect_busywait(lp));
}
-/**
+/*
* temac_dma_in32_* - Memory mapped DMA read, these function expects a
* register input that is based on DCR word addresses which are then
* converted to memory mapped byte addresses. To be assigned to
@@ -218,7 +218,7 @@ static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
return ioread32(lp->sdma_regs + (reg << 2));
}
-/**
+/*
* temac_dma_out32_* - Memory mapped DMA read, these function expects
* a register input that is based on DCR word addresses which are then
* converted to memory mapped byte addresses. To be assigned to
@@ -240,7 +240,7 @@ static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
*/
#ifdef CONFIG_PPC_DCR
-/**
+/*
* temac_dma_dcr_in32 - DCR based DMA read
*/
static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
@@ -248,7 +248,7 @@ static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
return dcr_read(lp->sdma_dcrs, reg);
}
-/**
+/*
* temac_dma_dcr_out32 - DCR based DMA write
*/
static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
@@ -256,7 +256,7 @@ static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
dcr_write(lp->sdma_dcrs, reg, value);
}
-/**
+/*
* temac_dcr_setup - If the DMA is DCR based, then setup the address and
* I/O functions
*/
@@ -293,7 +293,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
#endif
-/**
+/*
* temac_dma_bd_release - Release buffer descriptor rings
*/
static void temac_dma_bd_release(struct net_device *ndev)
@@ -323,7 +323,7 @@ static void temac_dma_bd_release(struct net_device *ndev)
lp->tx_bd_v, lp->tx_bd_p);
}
-/**
+/*
* temac_dma_bd_init - Setup buffer descriptor rings
*/
static int temac_dma_bd_init(struct net_device *ndev)
@@ -593,7 +593,7 @@ static struct temac_option {
{}
};
-/**
+/*
* temac_setoptions
*/
static u32 temac_setoptions(struct net_device *ndev, u32 options)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index fa5dc2993520..9aafd3ecdaa4 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2038,8 +2038,7 @@ static int axienet_remove(struct platform_device *pdev)
axienet_mdio_teardown(lp);
- if (lp->clk)
- clk_disable_unprepare(lp->clk);
+ clk_disable_unprepare(lp->clk);
of_node_put(lp->phy_node);
lp->phy_node = NULL;
diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h
index 991857f6a83c..706fa619b703 100644
--- a/drivers/net/fddi/skfp/h/smc.h
+++ b/drivers/net/fddi/skfp/h/smc.h
@@ -122,7 +122,7 @@ struct s_rmt {
u_char timer1_exp ; /* flag : timer 1 expired */
u_char timer2_exp ; /* flag : timer 2 expired */
- u_char rm_pad1[1] ;
+ u_char rm_pad1;
} ;
/*
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 974a244f45ba..d07008a818df 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -217,7 +217,6 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
{
struct genevehdr *gnvh = geneve_hdr(skb);
struct metadata_dst *tun_dst = NULL;
- struct pcpu_sw_netstats *stats;
unsigned int len;
int err = 0;
void *oiph;
@@ -296,13 +295,9 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
len = skb->len;
err = gro_cells_receive(&geneve->gro_cells, skb);
- if (likely(err == NET_RX_SUCCESS)) {
- stats = this_cpu_ptr(geneve->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += len;
- u64_stats_update_end(&stats->syncp);
- }
+ if (likely(err == NET_RX_SUCCESS))
+ dev_sw_netstats_rx_add(geneve->dev, len);
+
return;
drop:
/* Consume bad packet */
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 8e47d0112e5d..dc668ed280b9 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -182,8 +182,6 @@ static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
unsigned int hdrlen, unsigned int role)
{
- struct pcpu_sw_netstats *stats;
-
if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
return 1;
@@ -204,11 +202,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
skb->dev = pctx->dev;
- stats = this_cpu_ptr(pctx->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ dev_sw_netstats_rx_add(pctx->dev, skb->len);
netif_rx(skb);
return 0;
@@ -663,10 +657,6 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
gtp = netdev_priv(dev);
- err = gtp_encap_enable(gtp, data);
- if (err < 0)
- return err;
-
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
} else {
@@ -677,12 +667,16 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
- goto out_encap;
+ return err;
+
+ err = gtp_encap_enable(gtp, data);
+ if (err < 0)
+ goto out_hashtable;
err = register_netdevice(dev);
if (err < 0) {
netdev_dbg(dev, "failed to register new netdev %d\n", err);
- goto out_hashtable;
+ goto out_encap;
}
gn = net_generic(dev_net(dev), gtp_net_id);
@@ -693,11 +687,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
return 0;
+out_encap:
+ gtp_encap_disable(gtp);
out_hashtable:
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
-out_encap:
- gtp_encap_disable(gtp);
return err;
}
@@ -928,8 +922,8 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
}
}
-static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
- struct genl_info *info)
+static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
+ struct genl_info *info)
{
struct pdp_ctx *pctx, *pctx_tid = NULL;
struct net_device *dev = gtp->dev;
@@ -956,12 +950,12 @@ static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
if (found) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
- return -EEXIST;
+ return ERR_PTR(-EEXIST);
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
if (pctx && pctx_tid)
- return -EEXIST;
+ return ERR_PTR(-EEXIST);
if (!pctx)
pctx = pctx_tid;
@@ -974,13 +968,13 @@ static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
- return 0;
+ return pctx;
}
pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
if (pctx == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
sock_hold(sk);
pctx->sk = sk;
@@ -1018,7 +1012,7 @@ static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
break;
}
- return 0;
+ return pctx;
}
static void pdp_context_free(struct rcu_head *head)
@@ -1036,9 +1030,12 @@ static void pdp_context_delete(struct pdp_ctx *pctx)
call_rcu(&pctx->rcu_head, pdp_context_free);
}
+static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
+
static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
{
unsigned int version;
+ struct pdp_ctx *pctx;
struct gtp_dev *gtp;
struct sock *sk;
int err;
@@ -1068,7 +1065,6 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
}
rtnl_lock();
- rcu_read_lock();
gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
if (!gtp) {
@@ -1088,10 +1084,15 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
goto out_unlock;
}
- err = gtp_pdp_add(gtp, sk, info);
+ pctx = gtp_pdp_add(gtp, sk, info);
+ if (IS_ERR(pctx)) {
+ err = PTR_ERR(pctx);
+ } else {
+ gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL);
+ err = 0;
+ }
out_unlock:
- rcu_read_unlock();
rtnl_unlock();
return err;
}
@@ -1159,6 +1160,7 @@ static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+ gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC);
pdp_context_delete(pctx);
out_unlock:
@@ -1168,6 +1170,14 @@ out_unlock:
static struct genl_family gtp_genl_family;
+enum gtp_multicast_groups {
+ GTP_GENL_MCGRP,
+};
+
+static const struct genl_multicast_group gtp_genl_mcgrps[] = {
+ [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
+};
+
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
int flags, u32 type, struct pdp_ctx *pctx)
{
@@ -1205,6 +1215,26 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ ret = genlmsg_multicast_netns(&gtp_genl_family, dev_net(pctx->dev), msg,
+ 0, GTP_GENL_MCGRP, GFP_ATOMIC);
+ return ret;
+}
+
static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
{
struct pdp_ctx *pctx = NULL;
@@ -1303,7 +1333,7 @@ static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_O_TEI] = { .type = NLA_U32, },
};
-static const struct genl_ops gtp_genl_ops[] = {
+static const struct genl_small_ops gtp_genl_ops[] = {
{
.cmd = GTP_CMD_NEWPDP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -1333,8 +1363,10 @@ static struct genl_family gtp_genl_family __ro_after_init = {
.policy = gtp_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = gtp_genl_ops,
- .n_ops = ARRAY_SIZE(gtp_genl_ops),
+ .small_ops = gtp_genl_ops,
+ .n_small_ops = ARRAY_SIZE(gtp_genl_ops),
+ .mcgrps = gtp_genl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
};
static int __net_init gtp_net_init(struct net *net)
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 1e915871baa7..36eeb80406f2 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -7,7 +7,7 @@
* ------------------
*
* You can find a subset of the documentation in
- * Documentation/networking/device_drivers/wan/z8530drv.rst.
+ * Documentation/networking/device_drivers/hamradio/z8530drv.rst.
*/
/*
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index a4b3fce69ecd..22010384c4a3 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -151,7 +151,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out;
}
- tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ tmpptr = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
rrpriv->tx_ring = tmpptr;
rrpriv->tx_ring_dma = ring_dma;
@@ -160,7 +161,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out;
}
- tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ tmpptr = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
rrpriv->rx_ring = tmpptr;
rrpriv->rx_ring_dma = ring_dma;
@@ -169,7 +171,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out;
}
- tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma);
+ tmpptr = dma_alloc_coherent(&pdev->dev, EVT_RING_SIZE, &ring_dma,
+ GFP_KERNEL);
rrpriv->evt_ring = tmpptr;
rrpriv->evt_ring_dma = ring_dma;
@@ -198,14 +201,14 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out:
if (rrpriv->evt_ring)
- pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring,
- rrpriv->evt_ring_dma);
+ dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rrpriv->evt_ring,
+ rrpriv->evt_ring_dma);
if (rrpriv->rx_ring)
- pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
- rrpriv->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rrpriv->rx_ring,
+ rrpriv->rx_ring_dma);
if (rrpriv->tx_ring)
- pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
- rrpriv->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rrpriv->tx_ring,
+ rrpriv->tx_ring_dma);
if (rrpriv->regs)
pci_iounmap(pdev, rrpriv->regs);
if (pdev)
@@ -228,12 +231,12 @@ static void rr_remove_one(struct pci_dev *pdev)
}
unregister_netdev(dev);
- pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
- rr->evt_ring_dma);
- pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
- rr->rx_ring_dma);
- pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
- rr->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rr->evt_ring,
+ rr->evt_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rr->rx_ring,
+ rr->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rr->tx_ring,
+ rr->tx_ring_dma);
pci_iounmap(pdev, rr->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -648,8 +651,8 @@ static int rr_init1(struct net_device *dev)
goto error;
}
rrpriv->rx_skbuff[i] = skb;
- addr = pci_map_single(rrpriv->pci_dev, skb->data,
- dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
+ addr = dma_map_single(&rrpriv->pci_dev->dev, skb->data,
+ dev->mtu + HIPPI_HLEN, DMA_FROM_DEVICE);
/*
* Sanity test to see if we conflict with the DMA
* limitations of the Roadrunner.
@@ -699,10 +702,10 @@ static int rr_init1(struct net_device *dev)
struct sk_buff *skb = rrpriv->rx_skbuff[i];
if (skb) {
- pci_unmap_single(rrpriv->pci_dev,
+ dma_unmap_single(&rrpriv->pci_dev->dev,
rrpriv->rx_ring[i].addr.addrlo,
dev->mtu + HIPPI_HLEN,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rrpriv->rx_ring[i].size = 0;
set_rraddr(&rrpriv->rx_ring[i].addr, 0);
dev_kfree_skb(skb);
@@ -953,18 +956,18 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
dev->stats.rx_dropped++;
goto defer;
} else {
- pci_dma_sync_single_for_cpu(rrpriv->pci_dev,
- desc->addr.addrlo,
- pkt_len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&rrpriv->pci_dev->dev,
+ desc->addr.addrlo,
+ pkt_len,
+ DMA_FROM_DEVICE);
skb_put_data(skb, rx_skb->data,
pkt_len);
- pci_dma_sync_single_for_device(rrpriv->pci_dev,
- desc->addr.addrlo,
- pkt_len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&rrpriv->pci_dev->dev,
+ desc->addr.addrlo,
+ pkt_len,
+ DMA_FROM_DEVICE);
}
}else{
struct sk_buff *newskb;
@@ -974,16 +977,17 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
if (newskb){
dma_addr_t addr;
- pci_unmap_single(rrpriv->pci_dev,
- desc->addr.addrlo, dev->mtu +
- HIPPI_HLEN, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&rrpriv->pci_dev->dev,
+ desc->addr.addrlo,
+ dev->mtu + HIPPI_HLEN,
+ DMA_FROM_DEVICE);
skb = rx_skb;
skb_put(skb, pkt_len);
rrpriv->rx_skbuff[index] = newskb;
- addr = pci_map_single(rrpriv->pci_dev,
- newskb->data,
- dev->mtu + HIPPI_HLEN,
- PCI_DMA_FROMDEVICE);
+ addr = dma_map_single(&rrpriv->pci_dev->dev,
+ newskb->data,
+ dev->mtu + HIPPI_HLEN,
+ DMA_FROM_DEVICE);
set_rraddr(&desc->addr, addr);
} else {
printk("%s: Out of memory, deferring "
@@ -1068,9 +1072,9 @@ static irqreturn_t rr_interrupt(int irq, void *dev_id)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- pci_unmap_single(rrpriv->pci_dev,
+ dma_unmap_single(&rrpriv->pci_dev->dev,
desc->addr.addrlo, skb->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
rrpriv->tx_skbuff[txcon] = NULL;
@@ -1110,8 +1114,9 @@ static inline void rr_raz_tx(struct rr_private *rrpriv,
if (skb) {
struct tx_desc *desc = &(rrpriv->tx_ring[i]);
- pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rrpriv->pci_dev->dev,
+ desc->addr.addrlo, skb->len,
+ DMA_TO_DEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
@@ -1132,8 +1137,10 @@ static inline void rr_raz_rx(struct rr_private *rrpriv,
if (skb) {
struct rx_desc *desc = &(rrpriv->rx_ring[i]);
- pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
- dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&rrpriv->pci_dev->dev,
+ desc->addr.addrlo,
+ dev->mtu + HIPPI_HLEN,
+ DMA_FROM_DEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
@@ -1188,17 +1195,17 @@ static int rr_open(struct net_device *dev)
goto error;
}
- rrpriv->rx_ctrl = pci_alloc_consistent(pdev,
- 256 * sizeof(struct ring_ctrl),
- &dma_addr);
+ rrpriv->rx_ctrl = dma_alloc_coherent(&pdev->dev,
+ 256 * sizeof(struct ring_ctrl),
+ &dma_addr, GFP_KERNEL);
if (!rrpriv->rx_ctrl) {
ecode = -ENOMEM;
goto error;
}
rrpriv->rx_ctrl_dma = dma_addr;
- rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info),
- &dma_addr);
+ rrpriv->info = dma_alloc_coherent(&pdev->dev, sizeof(struct rr_info),
+ &dma_addr, GFP_KERNEL);
if (!rrpriv->info) {
ecode = -ENOMEM;
goto error;
@@ -1237,13 +1244,13 @@ static int rr_open(struct net_device *dev)
spin_unlock_irqrestore(&rrpriv->lock, flags);
if (rrpriv->info) {
- pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
- rrpriv->info_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct rr_info),
+ rrpriv->info, rrpriv->info_dma);
rrpriv->info = NULL;
}
if (rrpriv->rx_ctrl) {
- pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
- rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
+ dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
+ rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
}
@@ -1365,12 +1372,12 @@ static int rr_close(struct net_device *dev)
rr_raz_tx(rrpriv, dev);
rr_raz_rx(rrpriv, dev);
- pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
- rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
+ dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
+ rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
- pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
- rrpriv->info_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct rr_info), rrpriv->info,
+ rrpriv->info_dma);
rrpriv->info = NULL;
spin_unlock_irqrestore(&rrpriv->lock, flags);
@@ -1430,8 +1437,8 @@ static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
index = txctrl->pi;
rrpriv->tx_skbuff[index] = skb;
- set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single(
- rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE));
+ set_rraddr(&rrpriv->tx_ring[index].addr,
+ dma_map_single(&rrpriv->pci_dev->dev, skb->data, len + 8, DMA_TO_DEVICE));
rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
txctrl->pi = (index + 1) % TX_RING_ENTRIES;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5a57d1985bae..0c3de94b5178 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -846,7 +846,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
}
for (i = 0; i < page_count; i++) {
- char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
+ char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
u32 offset = pb[i].offset;
u32 len = pb[i].len;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 9869e390875e..261e6e55a907 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -373,32 +373,29 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
return txq;
}
-static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
+static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
struct hv_page_buffer *pb)
{
int j = 0;
- /* Deal with compound pages by ignoring unused part
- * of the page.
- */
- page += (offset >> PAGE_SHIFT);
- offset &= ~PAGE_MASK;
+ hvpfn += offset >> HV_HYP_PAGE_SHIFT;
+ offset = offset & ~HV_HYP_PAGE_MASK;
while (len > 0) {
unsigned long bytes;
- bytes = PAGE_SIZE - offset;
+ bytes = HV_HYP_PAGE_SIZE - offset;
if (bytes > len)
bytes = len;
- pb[j].pfn = page_to_pfn(page);
+ pb[j].pfn = hvpfn;
pb[j].offset = offset;
pb[j].len = bytes;
offset += bytes;
len -= bytes;
- if (offset == PAGE_SIZE && len) {
- page++;
+ if (offset == HV_HYP_PAGE_SIZE && len) {
+ hvpfn++;
offset = 0;
j++;
}
@@ -421,23 +418,26 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
* 2. skb linear data
* 3. skb fragment data
*/
- slots_used += fill_pg_buf(virt_to_page(hdr),
- offset_in_page(hdr),
- len, &pb[slots_used]);
+ slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
+ offset_in_hvpage(hdr),
+ len,
+ &pb[slots_used]);
packet->rmsg_size = len;
packet->rmsg_pgcnt = slots_used;
- slots_used += fill_pg_buf(virt_to_page(data),
- offset_in_page(data),
- skb_headlen(skb), &pb[slots_used]);
+ slots_used += fill_pg_buf(virt_to_hvpfn(data),
+ offset_in_hvpage(data),
+ skb_headlen(skb),
+ &pb[slots_used]);
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
- slots_used += fill_pg_buf(skb_frag_page(frag),
- skb_frag_off(frag),
- skb_frag_size(frag), &pb[slots_used]);
+ slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
+ skb_frag_off(frag),
+ skb_frag_size(frag),
+ &pb[slots_used]);
}
return slots_used;
}
@@ -453,8 +453,8 @@ static int count_skb_frag_slots(struct sk_buff *skb)
unsigned long offset = skb_frag_off(frag);
/* Skip unused frames from start of page */
- offset &= ~PAGE_MASK;
- pages += PFN_UP(offset + size);
+ offset &= ~HV_HYP_PAGE_MASK;
+ pages += HVPFN_UP(offset + size);
}
return pages;
}
@@ -462,12 +462,12 @@ static int count_skb_frag_slots(struct sk_buff *skb)
static int netvsc_get_slots(struct sk_buff *skb)
{
char *data = skb->data;
- unsigned int offset = offset_in_page(data);
+ unsigned int offset = offset_in_hvpage(data);
unsigned int len = skb_headlen(skb);
int slots;
int frag_slots;
- slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE);
frag_slots = count_skb_frag_slots(skb);
return slots + frag_slots;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 12ad471ac5e1..b22e47bcfeca 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -25,7 +25,7 @@
static void rndis_set_multicast(struct work_struct *w);
-#define RNDIS_EXT_LEN PAGE_SIZE
+#define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
struct rndis_request {
struct list_head list_ent;
struct completion wait_event;
@@ -215,18 +215,17 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->page_buf_cnt = 1;
pb[0].pfn = virt_to_phys(&req->request_msg) >>
- PAGE_SHIFT;
+ HV_HYP_PAGE_SHIFT;
pb[0].len = req->request_msg.msg_len;
- pb[0].offset =
- (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
+ pb[0].offset = offset_in_hvpage(&req->request_msg);
/* Add one page_buf when request_msg crossing page boundary */
- if (pb[0].offset + pb[0].len > PAGE_SIZE) {
+ if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
packet->page_buf_cnt++;
- pb[0].len = PAGE_SIZE -
+ pb[0].len = HV_HYP_PAGE_SIZE -
pb[0].offset;
pb[1].pfn = virt_to_phys((void *)&req->request_msg
- + pb[0].len) >> PAGE_SHIFT;
+ + pb[0].len) >> HV_HYP_PAGE_SHIFT;
pb[1].offset = 0;
pb[1].len = req->request_msg.msg_len -
pb[0].len;
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index c20e7ef18bc9..c0bf7d78276e 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -583,7 +583,7 @@ static const struct nla_policy hwsim_genl_policy[MAC802154_HWSIM_ATTR_MAX + 1] =
};
/* Generic Netlink operations array */
-static const struct genl_ops hwsim_nl_ops[] = {
+static const struct genl_small_ops hwsim_nl_ops[] = {
{
.cmd = MAC802154_HWSIM_CMD_NEW_RADIO,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -628,8 +628,8 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.maxattr = MAC802154_HWSIM_ATTR_MAX,
.policy = hwsim_genl_policy,
.module = THIS_MODULE,
- .ops = hwsim_nl_ops,
- .n_ops = ARRAY_SIZE(hwsim_nl_ops),
+ .small_ops = hwsim_nl_ops,
+ .n_small_ops = ARRAY_SIZE(hwsim_nl_ops),
.mcgrps = hwsim_mcgrps,
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 0e63d35320aa..6bfac1efe037 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -254,8 +254,8 @@ static void gsi_irq_enable(struct gsi *gsi)
/* We don't use inter-EE channel or event interrupts */
val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
- val &= ~MSK_INTER_EE_CH_CTRL_FMASK;
- val &= ~MSK_INTER_EE_EV_CTRL_FMASK;
+ val &= ~INTER_EE_CH_CTRL_FMASK;
+ val &= ~INTER_EE_EV_CTRL_FMASK;
iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
val = GENMASK(gsi->channel_count - 1, 0);
@@ -271,7 +271,7 @@ static void gsi_irq_enable(struct gsi *gsi)
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
/* Never enable GSI_BREAK_POINT */
- val = GSI_CNTXT_GSI_IRQ_ALL & ~EN_BREAK_POINT_FMASK;
+ val = GSI_CNTXT_GSI_IRQ_ALL & ~BREAK_POINT_FMASK;
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
}
@@ -1074,8 +1074,8 @@ static void gsi_isr_glob_ee(struct gsi *gsi)
val &= ~ERROR_INT_FMASK;
- if (val & EN_GP_INT1_FMASK) {
- val ^= EN_GP_INT1_FMASK;
+ if (val & GP_INT1_FMASK) {
+ val ^= GP_INT1_FMASK;
gsi_isr_gp_int1(gsi);
}
@@ -1600,7 +1600,7 @@ err_unwind_modem:
/* Compute which modem channels need to be deallocated */
mask ^= gsi->modem_channel_bitmap;
while (mask) {
- u32 channel_id = __fls(mask);
+ channel_id = __fls(mask);
mask ^= BIT(channel_id);
@@ -1628,7 +1628,7 @@ static void gsi_channel_teardown(struct gsi *gsi)
mutex_lock(&gsi->mutex);
while (mask) {
- u32 channel_id = __fls(mask);
+ channel_id = __fls(mask);
mask ^= BIT(channel_id);
@@ -1972,7 +1972,6 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
*/
init_dummy_netdev(&gsi->dummy_dev);
- /* Get the GSI IRQ and request for it to wake the system */
ret = platform_get_irq_byname(pdev, "gsi");
if (ret <= 0) {
dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
@@ -1987,31 +1986,26 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
}
gsi->irq = irq;
- ret = enable_irq_wake(gsi->irq);
- if (ret)
- dev_warn(dev, "error %d enabling gsi wake irq\n", ret);
- gsi->irq_wake_enabled = !ret;
-
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
dev_err(dev, "DT error getting \"gsi\" memory property\n");
ret = -ENODEV;
- goto err_disable_irq_wake;
+ goto err_free_irq;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(dev, "DT memory resource \"gsi\" out of range\n");
ret = -EINVAL;
- goto err_disable_irq_wake;
+ goto err_free_irq;
}
gsi->virt = ioremap(res->start, size);
if (!gsi->virt) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
ret = -ENOMEM;
- goto err_disable_irq_wake;
+ goto err_free_irq;
}
ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
@@ -2025,9 +2019,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
err_iounmap:
iounmap(gsi->virt);
-err_disable_irq_wake:
- if (gsi->irq_wake_enabled)
- (void)disable_irq_wake(gsi->irq);
+err_free_irq:
free_irq(gsi->irq, gsi);
return ret;
@@ -2038,8 +2030,6 @@ void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
- if (gsi->irq_wake_enabled)
- (void)disable_irq_wake(gsi->irq);
free_irq(gsi->irq, gsi);
iounmap(gsi->virt);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 061312773df0..3f9f29d531c4 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -150,7 +150,6 @@ struct gsi {
struct net_device dummy_dev; /* needed for NAPI */
void __iomem *virt;
u32 irq;
- bool irq_wake_enabled;
u32 channel_count;
u32 evt_ring_count;
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index acc9e744c67d..8e0e9350c383 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -258,6 +258,11 @@
GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
(0x0001f080 + 0x4000 * (ee))
+#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
+ (0x0001f088 + 0x4000 * (ee))
+/* The masks below are used for the TYPE_IRQ and TYPE_IRQ_MASK registers */
#define CH_CTRL_FMASK GENMASK(0, 0)
#define EV_CTRL_FMASK GENMASK(1, 1)
#define GLOB_EE_FMASK GENMASK(2, 2)
@@ -265,18 +270,6 @@
#define INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
#define INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
#define GENERAL_FMASK GENMASK(6, 6)
-
-#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
- (0x0001f088 + 0x4000 * (ee))
-#define MSK_CH_CTRL_FMASK GENMASK(0, 0)
-#define MSK_EV_CTRL_FMASK GENMASK(1, 1)
-#define MSK_GLOB_EE_FMASK GENMASK(2, 2)
-#define MSK_IEOB_FMASK GENMASK(3, 3)
-#define MSK_INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
-#define MSK_INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
-#define MSK_GENERAL_FMASK GENMASK(6, 6)
#define GSI_CNTXT_TYPE_IRQ_MSK_ALL GENMASK(6, 0)
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
@@ -328,57 +321,39 @@
GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(ee) \
(0x0001f100 + 0x4000 * (ee))
-#define ERROR_INT_FMASK GENMASK(0, 0)
-#define GP_INT1_FMASK GENMASK(1, 1)
-#define GP_INT2_FMASK GENMASK(2, 2)
-#define GP_INT3_FMASK GENMASK(3, 3)
-
#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(ee) \
(0x0001f108 + 0x4000 * (ee))
-#define EN_ERROR_INT_FMASK GENMASK(0, 0)
-#define EN_GP_INT1_FMASK GENMASK(1, 1)
-#define EN_GP_INT2_FMASK GENMASK(2, 2)
-#define EN_GP_INT3_FMASK GENMASK(3, 3)
-#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0)
-
#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
(0x0001f110 + 0x4000 * (ee))
-#define CLR_ERROR_INT_FMASK GENMASK(0, 0)
-#define CLR_GP_INT1_FMASK GENMASK(1, 1)
-#define CLR_GP_INT2_FMASK GENMASK(2, 2)
-#define CLR_GP_INT3_FMASK GENMASK(3, 3)
+/* The masks below are used for the general IRQ STTS, EN, and CLR registers */
+#define ERROR_INT_FMASK GENMASK(0, 0)
+#define GP_INT1_FMASK GENMASK(1, 1)
+#define GP_INT2_FMASK GENMASK(2, 2)
+#define GP_INT3_FMASK GENMASK(3, 3)
+#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0)
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(ee) \
(0x0001f118 + 0x4000 * (ee))
-#define BREAK_POINT_FMASK GENMASK(0, 0)
-#define BUS_ERROR_FMASK GENMASK(1, 1)
-#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
-#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
-
#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(ee) \
(0x0001f120 + 0x4000 * (ee))
-#define EN_BREAK_POINT_FMASK GENMASK(0, 0)
-#define EN_BUS_ERROR_FMASK GENMASK(1, 1)
-#define EN_CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
-#define EN_MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
-#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0)
-
#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
(0x0001f128 + 0x4000 * (ee))
-#define CLR_BREAK_POINT_FMASK GENMASK(0, 0)
-#define CLR_BUS_ERROR_FMASK GENMASK(1, 1)
-#define CLR_CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
-#define CLR_MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+/* The masks below are used for the general IRQ STTS, EN, and CLR registers */
+#define BREAK_POINT_FMASK GENMASK(0, 0)
+#define BUS_ERROR_FMASK GENMASK(1, 1)
+#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
+#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0)
#define GSI_CNTXT_INTSET_OFFSET \
GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index bdbfeed359db..92642030e735 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -81,7 +81,6 @@ struct gsi_tre {
/* gsi_tre->flags mask values (in CPU byte order) */
#define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
-#define TRE_FLAGS_IEOB_FMASK GENMASK(8, 8)
#define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
#define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
#define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
@@ -398,15 +397,24 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
/* assert(which < trans->tre_count); */
- /* Set the page information for the buffer. We also need to fill in
- * the DMA address and length for the buffer (something dma_map_sg()
- * normally does).
+ /* Commands are quite different from data transfer requests.
+ * Their payloads come from a pool whose memory is allocated
+ * using dma_alloc_coherent(). We therefore do *not* map them
+ * for DMA (unlike what we do for pages and skbs).
+ *
+ * When a transaction completes, the SGL is normally unmapped.
+ * A command transaction has direction DMA_NONE, which tells
+ * gsi_trans_complete() to skip the unmapping step.
+ *
+ * The only things we use directly in a command scatter/gather
+ * entry are the DMA address and length. We still need the SG
+ * table flags to be maintained though, so assign a NULL page
+ * pointer for that purpose.
*/
sg = &trans->sgl[which];
-
- sg_set_buf(sg, buf, size);
+ sg_assign_page(sg, NULL);
sg_dma_address(sg) = addr;
- sg_dma_len(sg) = sg->length;
+ sg_dma_len(sg) = size;
info = &trans->info[which];
info->opcode = opcode;
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 55115cfb2972..6c2371084c55 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -10,7 +10,6 @@
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm_wakeup.h>
-#include <linux/notifier.h>
#include "ipa_version.h"
#include "gsi.h"
@@ -29,14 +28,24 @@ struct ipa_smp2p;
struct ipa_interrupt;
/**
+ * enum ipa_flag - IPA state flags
+ * @IPA_FLAG_RESUMED: Whether resume from suspend has been signaled
+ * @IPA_FLAG_COUNT: Number of defined IPA flags
+ */
+enum ipa_flag {
+ IPA_FLAG_RESUMED,
+ IPA_FLAG_COUNT, /* Last; not a flag */
+};
+
+/**
* struct ipa - IPA information
* @gsi: Embedded GSI structure
+ * @flags: Boolean state flags
* @version: IPA hardware version
* @pdev: Platform device
* @modem_rproc: Remoteproc handle for modem subsystem
* @smp2p: SMP2P information
* @clock: IPA clocking information
- * @suspend_ref: Whether clock reference preventing suspend taken
* @table_addr: DMA address of filter/route table content
* @table_virt: Virtual address of filter/route table content
* @interrupt: IPA Interrupt information
@@ -71,6 +80,7 @@ struct ipa_interrupt;
*/
struct ipa {
struct gsi gsi;
+ DECLARE_BITMAP(flags, IPA_FLAG_COUNT);
enum ipa_version version;
struct platform_device *pdev;
struct rproc *modem_rproc;
@@ -78,7 +88,6 @@ struct ipa {
void *notifier;
struct ipa_smp2p *smp2p;
struct ipa_clock *clock;
- atomic_t suspend_ref;
dma_addr_t table_addr;
__le64 *table_virt;
@@ -105,8 +114,6 @@ struct ipa {
void *zero_virt;
size_t zero_size;
- struct wakeup_source *wakeup_source;
-
/* Bit masks indicating endpoint state */
u32 available; /* supported by hardware */
u32 filter_map;
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 398f2e47043d..a2c0fde05819 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -4,7 +4,7 @@
* Copyright (C) 2018-2020 Linaro Ltd.
*/
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -51,7 +51,7 @@
* @config_path: Configuration space interconnect
*/
struct ipa_clock {
- atomic_t count;
+ refcount_t count;
struct mutex mutex; /* protects clock enable/disable */
struct clk *core;
struct icc_path *memory_path;
@@ -195,14 +195,13 @@ static void ipa_clock_disable(struct ipa *ipa)
*/
bool ipa_clock_get_additional(struct ipa *ipa)
{
- return !!atomic_inc_not_zero(&ipa->clock->count);
+ return refcount_inc_not_zero(&ipa->clock->count);
}
/* Get an IPA clock reference. If the reference count is non-zero, it is
* incremented and return is immediate. Otherwise it is checked again
- * under protection of the mutex, and if appropriate the clock (and
- * interconnects) are enabled suspended endpoints (if any) are resumed
- * before returning.
+ * under protection of the mutex, and if appropriate the IPA clock
+ * is enabled.
*
* Incrementing the reference count is intentionally deferred until
* after the clock is running and endpoints are resumed.
@@ -229,28 +228,23 @@ void ipa_clock_get(struct ipa *ipa)
goto out_mutex_unlock;
}
- ipa_endpoint_resume(ipa);
-
- atomic_inc(&clock->count);
+ refcount_set(&clock->count, 1);
out_mutex_unlock:
mutex_unlock(&clock->mutex);
}
-/* Attempt to remove an IPA clock reference. If this represents the last
- * reference, suspend endpoints and disable the clock (and interconnects)
- * under protection of a mutex.
+/* Attempt to remove an IPA clock reference. If this represents the
+ * last reference, disable the IPA clock under protection of the mutex.
*/
void ipa_clock_put(struct ipa *ipa)
{
struct ipa_clock *clock = ipa->clock;
/* If this is not the last reference there's nothing more to do */
- if (!atomic_dec_and_mutex_lock(&clock->count, &clock->mutex))
+ if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
return;
- ipa_endpoint_suspend(ipa);
-
ipa_clock_disable(ipa);
mutex_unlock(&clock->mutex);
@@ -294,7 +288,7 @@ struct ipa_clock *ipa_clock_init(struct device *dev)
goto err_kfree;
mutex_init(&clock->mutex);
- atomic_set(&clock->count, 0);
+ refcount_set(&clock->count, 0);
return clock;
@@ -311,7 +305,7 @@ void ipa_clock_exit(struct ipa_clock *clock)
{
struct clk *clk = clock->core;
- WARN_ON(atomic_read(&clock->count) != 0);
+ WARN_ON(refcount_read(&clock->count) != 0);
mutex_destroy(&clock->mutex);
ipa_interconnect_exit(clock);
kfree(clock);
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index b7efd7c95e9c..b40b711cf4bd 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -42,11 +42,8 @@
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
IPA_STATUS_OPCODE_PACKET = 0x01,
- IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02,
IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
- IPA_STATUS_OPCODE_LOG = 0x10,
- IPA_STATUS_OPCODE_DCMP = 0x20,
IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
};
@@ -54,13 +51,6 @@ enum ipa_status_opcode {
enum ipa_status_exception {
/* 0 means no exception */
IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
- IPA_STATUS_EXCEPTION_IPTYPE = 0x04,
- IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08,
- IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10,
- IPA_STATUS_EXCEPTION_SW_FILT = 0x20,
- /* The meaning of the next value depends on whether the IP version */
- IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */
- IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT,
};
/* Status element provided by hardware */
@@ -79,36 +69,9 @@ struct ipa_status {
};
/* Field masks for struct ipa_status structure fields */
-
-#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
-
#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
-
-#define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0)
-#define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1)
-#define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2)
-#define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3)
-#define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4)
-#define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14)
-#define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15)
-#define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16)
-#define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17)
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
-#define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0)
-#define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1)
-#define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14)
-#define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16)
-
-#define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0)
-#define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8)
-
-#define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0)
-#define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1)
-#define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11)
-#define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12)
-#define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16)
-
#ifdef IPA_VALIDATE
static void ipa_endpoint_validate_build(void)
@@ -1048,8 +1011,7 @@ static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
}
/* The format of a packet status element is the same for several status
- * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types
- * aren't currently supported
+ * types (opcodes). Other types aren't currently supported.
*/
static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
{
@@ -1086,7 +1048,7 @@ static bool ipa_status_drop_packet(const struct ipa_status *status)
{
u32 val;
- /* Deaggregation exceptions we drop; others we consume */
+ /* Deaggregation exceptions we drop; all other types we consume */
if (status->exception)
return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
@@ -1432,11 +1394,10 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
return;
- if (!endpoint->toward_ipa)
+ if (!endpoint->toward_ipa) {
ipa_endpoint_replenish_disable(endpoint);
-
- if (!endpoint->toward_ipa)
(void)ipa_endpoint_program_suspend(endpoint, true);
+ }
/* IPA v3.5.1 doesn't use channel stop for suspend */
stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
@@ -1471,6 +1432,9 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
void ipa_endpoint_suspend(struct ipa *ipa)
{
+ if (!ipa->setup_complete)
+ return;
+
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
@@ -1482,6 +1446,9 @@ void ipa_endpoint_suspend(struct ipa *ipa)
void ipa_endpoint_resume(struct ipa *ipa)
{
+ if (!ipa->setup_complete)
+ return;
+
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index 90353987c45f..cc1ea28f7bc2 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -237,8 +237,16 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
goto err_kfree;
}
+ ret = enable_irq_wake(irq);
+ if (ret) {
+ dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret);
+ goto err_free_irq;
+ }
+
return interrupt;
+err_free_irq:
+ free_irq(interrupt->irq, interrupt);
err_kfree:
kfree(interrupt);
@@ -248,6 +256,12 @@ err_kfree:
/* Tear down the IPA interrupt framework */
void ipa_interrupt_teardown(struct ipa_interrupt *interrupt)
{
+ struct device *dev = &interrupt->ipa->pdev->dev;
+ int ret;
+
+ ret = disable_irq_wake(interrupt->irq);
+ if (ret)
+ dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret);
free_irq(interrupt->irq, interrupt);
kfree(interrupt);
}
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 1fdfec41e442..cd4d993b0bbb 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -75,17 +75,19 @@
* @ipa: IPA pointer
* @irq_id: IPA interrupt type (unused)
*
- * When in suspended state, the IPA can trigger a resume by sending a SUSPEND
- * IPA interrupt.
+ * If an RX endpoint is in suspend state, and the IPA has a packet
+ * destined for that endpoint, the IPA generates a SUSPEND interrupt
+ * to inform the AP that it should resume the endpoint. If we get
+ * one of these interrupts we just resume everything.
*/
static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
{
- /* Take a a single clock reference to prevent suspend. All
- * endpoints will be resumed as a result. This reference will
- * be dropped when we get a power management suspend request.
+ /* Just report the event, and let system resume handle the rest.
+ * More than one endpoint could signal this; if so, ignore
+ * all but the first.
*/
- if (!atomic_xchg(&ipa->suspend_ref, 1))
- ipa_clock_get(ipa);
+ if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags))
+ pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
/* Acknowledge/clear the suspend interrupt on all endpoints */
ipa_interrupt_suspend_clear_all(ipa->interrupt);
@@ -106,6 +108,7 @@ int ipa_setup(struct ipa *ipa)
{
struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint;
+ struct device *dev = &ipa->pdev->dev;
int ret;
/* Setup for IPA v3.5.1 has some slight differences */
@@ -123,6 +126,10 @@ int ipa_setup(struct ipa *ipa)
ipa_uc_setup(ipa);
+ ret = device_init_wakeup(dev, true);
+ if (ret)
+ goto err_uc_teardown;
+
ipa_endpoint_setup(ipa);
/* We need to use the AP command TX endpoint to perform other
@@ -158,7 +165,7 @@ int ipa_setup(struct ipa *ipa)
ipa->setup_complete = true;
- dev_info(&ipa->pdev->dev, "IPA driver setup completed successfully\n");
+ dev_info(dev, "IPA driver setup completed successfully\n");
return 0;
@@ -173,6 +180,8 @@ err_command_disable:
ipa_endpoint_disable_one(command_endpoint);
err_endpoint_teardown:
ipa_endpoint_teardown(ipa);
+ (void)device_init_wakeup(dev, false);
+err_uc_teardown:
ipa_uc_teardown(ipa);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
ipa_interrupt_teardown(ipa->interrupt);
@@ -200,6 +209,7 @@ static void ipa_teardown(struct ipa *ipa)
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa);
+ (void)device_init_wakeup(&ipa->pdev->dev, false);
ipa_uc_teardown(ipa);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
ipa_interrupt_teardown(ipa->interrupt);
@@ -508,7 +518,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
* is held after initialization completes, and won't get dropped
* unless/until a system suspend request arrives.
*/
- atomic_set(&ipa->suspend_ref, 1);
ipa_clock_get(ipa);
ipa_hardware_config(ipa);
@@ -544,7 +553,6 @@ err_endpoint_deconfig:
err_hardware_deconfig:
ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa);
- atomic_set(&ipa->suspend_ref, 0);
return ret;
}
@@ -562,7 +570,6 @@ static void ipa_deconfig(struct ipa *ipa)
ipa_endpoint_deconfig(ipa);
ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa);
- atomic_set(&ipa->suspend_ref, 0);
}
static int ipa_firmware_load(struct device *dev)
@@ -709,7 +716,6 @@ static void ipa_validate_build(void)
*/
static int ipa_probe(struct platform_device *pdev)
{
- struct wakeup_source *wakeup_source;
struct device *dev = &pdev->dev;
const struct ipa_data *data;
struct ipa_clock *clock;
@@ -717,8 +723,8 @@ static int ipa_probe(struct platform_device *pdev)
bool modem_alloc;
bool modem_init;
struct ipa *ipa;
- phandle phandle;
bool prefetch;
+ phandle ph;
int ret;
ipa_validate_build();
@@ -730,13 +736,13 @@ static int ipa_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
/* We rely on remoteproc to tell us about modem state changes */
- phandle = of_property_read_phandle(dev->of_node, "modem-remoteproc");
- if (!phandle) {
+ ph = of_property_read_phandle(dev->of_node, "modem-remoteproc");
+ if (!ph) {
dev_err(dev, "DT missing \"modem-remoteproc\" property\n");
return -EINVAL;
}
- rproc = rproc_get_by_phandle(phandle);
+ rproc = rproc_get_by_phandle(ph);
if (!rproc)
return -EPROBE_DEFER;
@@ -758,27 +764,17 @@ static int ipa_probe(struct platform_device *pdev)
goto err_clock_exit;
}
- /* Create a wakeup source. */
- wakeup_source = wakeup_source_register(dev, "ipa");
- if (!wakeup_source) {
- /* The most likely reason for failure is memory exhaustion */
- ret = -ENOMEM;
- goto err_clock_exit;
- }
-
/* Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
if (!ipa) {
ret = -ENOMEM;
- goto err_wakeup_source_unregister;
+ goto err_clock_exit;
}
ipa->pdev = pdev;
dev_set_drvdata(dev, ipa);
ipa->modem_rproc = rproc;
ipa->clock = clock;
- atomic_set(&ipa->suspend_ref, 0);
- ipa->wakeup_source = wakeup_source;
ipa->version = data->version;
ret = ipa_reg_init(ipa);
@@ -857,8 +853,6 @@ err_reg_exit:
ipa_reg_exit(ipa);
err_kfree_ipa:
kfree(ipa);
-err_wakeup_source_unregister:
- wakeup_source_unregister(wakeup_source);
err_clock_exit:
ipa_clock_exit(clock);
err_rproc_put:
@@ -872,11 +866,8 @@ static int ipa_remove(struct platform_device *pdev)
struct ipa *ipa = dev_get_drvdata(&pdev->dev);
struct rproc *rproc = ipa->modem_rproc;
struct ipa_clock *clock = ipa->clock;
- struct wakeup_source *wakeup_source;
int ret;
- wakeup_source = ipa->wakeup_source;
-
if (ipa->setup_complete) {
ret = ipa_modem_stop(ipa);
if (ret)
@@ -893,7 +884,6 @@ static int ipa_remove(struct platform_device *pdev)
ipa_mem_exit(ipa);
ipa_reg_exit(ipa);
kfree(ipa);
- wakeup_source_unregister(wakeup_source);
ipa_clock_exit(clock);
rproc_put(rproc);
@@ -907,13 +897,22 @@ static int ipa_remove(struct platform_device *pdev)
* Return: Always returns zero
*
* Called by the PM framework when a system suspend operation is invoked.
+ * Suspends endpoints and releases the clock reference held to keep
+ * the IPA clock running until this point.
*/
static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
+ /* When a suspended RX endpoint has a packet ready to receive, we
+ * get an IPA SUSPEND interrupt. We trigger a system resume in
+ * that case, but only on the first such interrupt since suspend.
+ */
+ __clear_bit(IPA_FLAG_RESUMED, ipa->flags);
+
+ ipa_endpoint_suspend(ipa);
+
ipa_clock_put(ipa);
- atomic_set(&ipa->suspend_ref, 0);
return 0;
}
@@ -925,6 +924,8 @@ static int ipa_suspend(struct device *dev)
* Return: Always returns 0
*
* Called by the PM framework when a system resume operation is invoked.
+ * Takes an IPA clock reference to keep the clock running until suspend,
+ * and resumes endpoints.
*/
static int ipa_resume(struct device *dev)
{
@@ -933,9 +934,10 @@ static int ipa_resume(struct device *dev)
/* This clock reference will keep the IPA out of suspend
* until we get a power management suspend request.
*/
- atomic_set(&ipa->suspend_ref, 1);
ipa_clock_get(ipa);
+ ipa_endpoint_resume(ipa);
+
return 0;
}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index eb4e39fa7d4b..e542598fd775 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -426,7 +426,7 @@ enum ipa_cs_offload_en {
IPA_CS_RSVD
};
-/** enum ipa_aggr_en - aggregation type field in ENDP_INIT_AGGR_N */
+/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */
enum ipa_aggr_en {
IPA_BYPASS_AGGR = 0,
IPA_ENABLE_AGGR = 1,
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 1a0b04e0ab74..b382d47bc70d 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -144,7 +144,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
* should only receive responses from the microcontroller when it has
* sent it a request message.
*
- * We can drop the clock reference taken in ipa_uc_init() once we
+ * We can drop the clock reference taken in ipa_uc_setup() once we
* know the microcontroller has finished its initialization.
*/
switch (shared->response) {
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 5bca94c99006..60b7d93bb834 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -684,6 +684,13 @@ static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
[IFLA_IPVLAN_FLAGS] = { .type = NLA_U16 },
};
+static struct net *ipvlan_get_link_net(const struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ return dev_net(ipvlan->phy_dev);
+}
+
static struct rtnl_link_ops ipvlan_link_ops = {
.kind = "ipvlan",
.priv_size = sizeof(struct ipvl_dev),
@@ -691,6 +698,7 @@ static struct rtnl_link_ops ipvlan_link_ops = {
.setup = ipvlan_link_setup,
.newlink = ipvlan_link_new,
.dellink = ipvlan_link_delete,
+ .get_link_net = ipvlan_get_link_net,
};
int ipvlan_link_register(struct rtnl_link_ops *ops)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 787ac2c8e74e..11ca5fa902a1 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1613,7 +1613,7 @@ static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_PN] = { .type = NLA_MIN_LEN, .len = 4 },
+ [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
.len = MACSEC_KEYID_LEN, },
[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
@@ -3287,7 +3287,7 @@ done:
return skb->len;
}
-static const struct genl_ops macsec_genl_ops[] = {
+static const struct genl_small_ops macsec_genl_ops[] = {
{
.cmd = MACSEC_CMD_GET_TXSC,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -3363,8 +3363,8 @@ static struct genl_family macsec_fam __ro_after_init = {
.policy = macsec_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = macsec_genl_ops,
- .n_ops = ARRAY_SIZE(macsec_genl_ops),
+ .small_ops = macsec_genl_ops,
+ .n_small_ops = ARRAY_SIZE(macsec_genl_ops),
};
static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
@@ -3647,30 +3647,10 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu)
static void macsec_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
- int cpu;
-
if (!dev->tstats)
return;
- for_each_possible_cpu(cpu) {
- struct pcpu_sw_netstats *stats;
- struct pcpu_sw_netstats tmp;
- int start;
-
- stats = per_cpu_ptr(dev->tstats, cpu);
- do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- tmp.rx_packets = stats->rx_packets;
- tmp.rx_bytes = stats->rx_bytes;
- tmp.tx_packets = stats->tx_packets;
- tmp.tx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-
- s->rx_packets += tmp.rx_packets;
- s->rx_bytes += tmp.rx_bytes;
- s->tx_packets += tmp.tx_packets;
- s->tx_bytes += tmp.tx_bytes;
- }
+ dev_fetch_sw_netstats(s, dev->tstats);
s->rx_dropped = dev->stats.rx_dropped;
s->tx_dropped = dev->stats.tx_dropped;
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
new file mode 100644
index 000000000000..a10cc460d7cf
--- /dev/null
+++ b/drivers/net/mdio/Kconfig
@@ -0,0 +1,251 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# MDIO Layer Configuration
+#
+
+menuconfig MDIO_DEVICE
+ tristate "MDIO bus device drivers"
+ help
+ MDIO devices and driver infrastructure code.
+
+if MDIO_DEVICE
+
+config MDIO_BUS
+ tristate
+ default m if PHYLIB=m
+ default MDIO_DEVICE
+ help
+ This internal symbol is used for link time dependencies and it
+ reflects whether the mdio_bus/mdio_device code is built as a
+ loadable module or built-in.
+
+config OF_MDIO
+ def_tristate PHYLIB
+ depends on OF
+ depends on PHYLIB
+ select FIXED_PHY
+ help
+ OpenFirmware MDIO bus (Ethernet PHY) accessors
+
+if MDIO_BUS
+
+config MDIO_DEVRES
+ tristate
+
+config MDIO_SUN4I
+ tristate "Allwinner sun4i MDIO interface support"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the Allwinner SoC that have an EMAC (A10,
+ A12, A10s, etc.)
+
+config MDIO_XGENE
+ tristate "APM X-Gene SoC MDIO bus controller"
+ depends on ARCH_XGENE || COMPILE_TEST
+ help
+ This module provides a driver for the MDIO busses found in the
+ APM X-Gene SoC's.
+
+config MDIO_ASPEED
+ tristate "ASPEED MDIO bus controller"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM
+ help
+ This module provides a driver for the independent MDIO bus
+ controllers found in the ASPEED AST2600 SoC. This is a driver for the
+ third revision of the ASPEED MDIO register interface - the first two
+ revisions are the "old" and "new" interfaces found in the AST2400 and
+ AST2500, embedded in the MAC. For legacy reasons, FTGMAC100 driver
+ continues to drive the embedded MDIO controller for the AST2400 and
+ AST2500 SoCs, so say N if AST2600 support is not required.
+
+config MDIO_BITBANG
+ tristate "Bitbanged MDIO buses"
+ help
+ This module implements the MDIO bus protocol in software,
+ for use by low level drivers that export the ability to
+ drive the relevant pins.
+
+ If in doubt, say N.
+
+config MDIO_BCM_IPROC
+ tristate "Broadcom iProc MDIO bus controller"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on HAS_IOMEM && OF_MDIO
+ default ARCH_BCM_IPROC
+ help
+ This module provides a driver for the MDIO busses found in the
+ Broadcom iProc SoC's.
+
+config MDIO_BCM_UNIMAC
+ tristate "Broadcom UniMAC MDIO bus controller"
+ depends on HAS_IOMEM
+ help
+ This module provides a driver for the Broadcom UniMAC MDIO busses.
+ This hardware can be found in the Broadcom GENET Ethernet MAC
+ controllers as well as some Broadcom Ethernet switches such as the
+ Starfighter 2 switches.
+
+config MDIO_CAVIUM
+ tristate
+
+config MDIO_GPIO
+ tristate "GPIO lib-based bitbanged MDIO buses"
+ depends on MDIO_BITBANG
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Supports GPIO lib-based MDIO busses.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mdio-gpio.
+
+config MDIO_HISI_FEMAC
+ tristate "Hisilicon FEMAC MDIO bus controller"
+ depends on HAS_IOMEM && OF_MDIO
+ help
+ This module provides a driver for the MDIO busses found in the
+ Hisilicon SoC that have an Fast Ethernet MAC.
+
+config MDIO_I2C
+ tristate
+ depends on I2C
+ help
+ Support I2C based PHYs. This provides a MDIO bus bridged
+ to I2C to allow PHYs connected in I2C mode to be accessed
+ using the existing infrastructure.
+
+ This is library mode.
+
+config MDIO_MVUSB
+ tristate "Marvell USB to MDIO Adapter"
+ depends on USB
+ select MDIO_DEVRES
+ help
+ A USB to MDIO converter present on development boards for
+ Marvell's Link Street family of Ethernet switches.
+
+config MDIO_MSCC_MIIM
+ tristate "Microsemi MIIM interface support"
+ depends on HAS_IOMEM
+ select MDIO_DEVRES
+ help
+ This driver supports the MIIM (MDIO) interface found in the network
+ switches of the Microsemi SoCs; it is recommended to switch on
+ CONFIG_HIGH_RES_TIMERS
+
+config MDIO_MOXART
+ tristate "MOXA ART MDIO interface support"
+ depends on ARCH_MOXART || COMPILE_TEST
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the MOXA ART SoC
+
+config MDIO_OCTEON
+ tristate "Octeon and some ThunderX SOCs MDIO buses"
+ depends on (64BIT && OF_MDIO) || COMPILE_TEST
+ depends on HAS_IOMEM
+ select MDIO_CAVIUM
+ select MDIO_DEVRES
+ help
+ This module provides a driver for the Octeon and ThunderX MDIO
+ buses. It is required by the Octeon and ThunderX ethernet device
+ drivers on some systems.
+
+config MDIO_IPQ4019
+ tristate "Qualcomm IPQ4019 MDIO interface support"
+ depends on HAS_IOMEM && OF_MDIO
+ help
+ This driver supports the MDIO interface found in Qualcomm
+ IPQ40xx series Soc-s.
+
+config MDIO_IPQ8064
+ tristate "Qualcomm IPQ8064 MDIO interface support"
+ depends on HAS_IOMEM && OF_MDIO
+ depends on MFD_SYSCON
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the IPQ8064 SoC
+
+config MDIO_THUNDER
+ tristate "ThunderX SOCs MDIO buses"
+ depends on 64BIT
+ depends on PCI
+ select MDIO_CAVIUM
+ select MDIO_DEVRES
+ help
+ This driver supports the MDIO interfaces found on Cavium
+ ThunderX SoCs when the MDIO bus device appears as a PCI
+ device.
+
+comment "MDIO Multiplexers"
+
+config MDIO_BUS_MUX
+ tristate
+ depends on OF_MDIO
+ help
+ This module provides a driver framework for MDIO bus
+ multiplexers which connect one of several child MDIO busses
+ to a parent bus. Switching between child busses is done by
+ device specific drivers.
+
+config MDIO_BUS_MUX_MESON_G12A
+ tristate "Amlogic G12a based MDIO bus multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
+ select MDIO_BUS_MUX
+ default m if ARCH_MESON
+ help
+ This module provides a driver for the MDIO multiplexer/glue of
+ the amlogic g12a SoC. The multiplexers connects either the external
+ or the internal MDIO bus to the parent bus.
+
+config MDIO_BUS_MUX_BCM_IPROC
+ tristate "Broadcom iProc based MDIO bus multiplexers"
+ depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ default ARCH_BCM_IPROC
+ help
+ This module provides a driver for MDIO bus multiplexers found in
+ iProc based Broadcom SoCs. This multiplexer connects one of several
+ child MDIO bus to a parent bus. Buses could be internal as well as
+ external and selection logic lies inside the same multiplexer.
+
+config MDIO_BUS_MUX_GPIO
+ tristate "GPIO controlled MDIO bus multiplexers"
+ depends on OF_GPIO && OF_MDIO
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexers that
+ are controlled via GPIO lines. The multiplexer connects one of
+ several child MDIO busses to a parent bus. Child bus
+ selection is under the control of GPIO lines.
+
+config MDIO_BUS_MUX_MULTIPLEXER
+ tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
+ depends on OF_MDIO
+ select MULTIPLEXER
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexer
+ that is controlled via the kernel multiplexer subsystem. The
+ bus multiplexer connects one of several child MDIO busses to
+ a parent bus. Child bus selection is under the control of
+ the kernel multiplexer subsystem.
+
+config MDIO_BUS_MUX_MMIOREG
+ tristate "MMIO device-controlled MDIO bus multiplexers"
+ depends on OF_MDIO && HAS_IOMEM
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexers that
+ are controlled via a simple memory-mapped device, like an FPGA.
+ The multiplexer connects one of several child MDIO busses to a
+ parent bus. Child bus selection is under the control of one of
+ the FPGA's registers.
+
+ Currently, only 8/16/32 bits registers are supported.
+
+
+endif
+endif
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
new file mode 100644
index 000000000000..5c498dde463f
--- /dev/null
+++ b/drivers/net/mdio/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Linux MDIO bus drivers
+
+obj-$(CONFIG_OF_MDIO) += of_mdio.o
+
+obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o
+obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
+obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
+obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
+obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
+obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
+obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
+obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
+obj-$(CONFIG_MDIO_IPQ4019) += mdio-ipq4019.o
+obj-$(CONFIG_MDIO_IPQ8064) += mdio-ipq8064.o
+obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
+obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
+obj-$(CONFIG_MDIO_MVUSB) += mdio-mvusb.o
+obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
+obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
+obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
+
+obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
+obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
+obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
+obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
+obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
diff --git a/drivers/net/phy/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index cad820568f75..cad820568f75 100644
--- a/drivers/net/phy/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/mdio/mdio-bcm-iproc.c
index 77fc970cdfde..77fc970cdfde 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-bcm-iproc.c
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index fbd36891ee64..fbd36891ee64 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 5136275c8e73..5136275c8e73 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
diff --git a/drivers/net/phy/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c
index 1afd6fc1a351..1afd6fc1a351 100644
--- a/drivers/net/phy/mdio-cavium.c
+++ b/drivers/net/mdio/mdio-cavium.c
diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/mdio/mdio-cavium.h
index a2245d436f5d..a2245d436f5d 100644
--- a/drivers/net/phy/mdio-cavium.h
+++ b/drivers/net/mdio/mdio-cavium.h
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c
index 1b00235d7dc5..1b00235d7dc5 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/mdio/mdio-gpio.c
diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/mdio/mdio-hisi-femac.c
index f231c2fbb1de..f231c2fbb1de 100644
--- a/drivers/net/phy/mdio-hisi-femac.c
+++ b/drivers/net/mdio/mdio-hisi-femac.c
diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index 0746e2cc39ae..09200a70b315 100644
--- a/drivers/net/phy/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -10,10 +10,9 @@
* of their settings.
*/
#include <linux/i2c.h>
+#include <linux/mdio/mdio-i2c.h>
#include <linux/phy.h>
-#include "mdio-i2c.h"
-
/*
* I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is
* specified to be present in SFP modules. These correspond with PHY
diff --git a/drivers/net/phy/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 1ce81ff2f41d..25c25ea6da66 100644
--- a/drivers/net/phy/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -12,6 +12,7 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
+#define MDIO_MODE_REG 0x40
#define MDIO_ADDR_REG 0x44
#define MDIO_DATA_WRITE_REG 0x48
#define MDIO_DATA_READ_REG 0x4c
@@ -20,9 +21,15 @@
#define MDIO_CMD_ACCESS_START BIT(8)
#define MDIO_CMD_ACCESS_CODE_READ 0
#define MDIO_CMD_ACCESS_CODE_WRITE 1
+#define MDIO_CMD_ACCESS_CODE_C45_ADDR 0
+#define MDIO_CMD_ACCESS_CODE_C45_WRITE 1
+#define MDIO_CMD_ACCESS_CODE_C45_READ 2
-#define ipq4019_MDIO_TIMEOUT 10000
-#define ipq4019_MDIO_SLEEP 10
+/* 0 = Clause 22, 1 = Clause 45 */
+#define MDIO_MODE_C45 BIT(8)
+
+#define IPQ4019_MDIO_TIMEOUT 10000
+#define IPQ4019_MDIO_SLEEP 10
struct ipq4019_mdio_data {
void __iomem *membase;
@@ -35,25 +42,50 @@ static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
return readl_poll_timeout(priv->membase + MDIO_CMD_REG, busy,
(busy & MDIO_CMD_ACCESS_BUSY) == 0,
- ipq4019_MDIO_SLEEP, ipq4019_MDIO_TIMEOUT);
+ IPQ4019_MDIO_SLEEP, IPQ4019_MDIO_TIMEOUT);
}
static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int data;
unsigned int cmd;
- /* Reject clause 45 */
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- /* issue the phy address and reg */
- writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+ /* Clause 45 support */
+ if (regnum & MII_ADDR_C45) {
+ unsigned int mmd = (regnum >> 16) & 0x1F;
+ unsigned int reg = regnum & 0xFFFF;
+
+ /* Enter Clause 45 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
+
+ data |= MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and mmd */
+ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
+
+ /* issue reg */
+ writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
+ } else {
+ /* Enter Clause 22 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
+ data &= ~MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
+ }
/* issue read command */
writel(cmd, priv->membase + MDIO_CMD_REG);
@@ -62,6 +94,15 @@ static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
+ if (regnum & MII_ADDR_C45) {
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_READ;
+
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+ }
+
/* Read and return data */
return readl(priv->membase + MDIO_DATA_READ_REG);
}
@@ -70,23 +111,57 @@ static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int data;
unsigned int cmd;
- /* Reject clause 45 */
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- /* issue the phy address and reg */
- writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+ /* Clause 45 support */
+ if (regnum & MII_ADDR_C45) {
+ unsigned int mmd = (regnum >> 16) & 0x1F;
+ unsigned int reg = regnum & 0xFFFF;
+
+ /* Enter Clause 45 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
+
+ data |= MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and mmd */
+ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
+
+ /* issue reg */
+ writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
+
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+ } else {
+ /* Enter Clause 22 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
+
+ data &= ~MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+ }
/* issue write data */
writel(value, priv->membase + MDIO_DATA_WRITE_REG);
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
/* issue write command */
+ if (regnum & MII_ADDR_C45)
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_WRITE;
+ else
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
+
writel(cmd, priv->membase + MDIO_CMD_REG);
/* Wait write complete */
diff --git a/drivers/net/phy/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
index 1bd18857e1c5..1bd18857e1c5 100644
--- a/drivers/net/phy/mdio-ipq8064.c
+++ b/drivers/net/mdio/mdio-ipq8064.c
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/mdio/mdio-moxart.c
index b72c6d185175..b72c6d185175 100644
--- a/drivers/net/phy/mdio-moxart.c
+++ b/drivers/net/mdio/mdio-moxart.c
diff --git a/drivers/net/phy/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 11f583fd4611..11f583fd4611 100644
--- a/drivers/net/phy/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index 42fb5f166136..42fb5f166136 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c
index 10a758fdc9e6..10a758fdc9e6 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/mdio/mdio-mux-gpio.c
diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index bf86c9c7a288..bf86c9c7a288 100644
--- a/drivers/net/phy/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c
index d1a8780e24d8..d1a8780e24d8 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/mdio/mdio-mux-mmioreg.c
diff --git a/drivers/net/phy/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c
index d6564381aa3e..d6564381aa3e 100644
--- a/drivers/net/phy/mdio-mux-multiplexer.c
+++ b/drivers/net/mdio/mdio-mux-multiplexer.c
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index 6a1d3540210b..6a1d3540210b 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
diff --git a/drivers/net/phy/mdio-mvusb.c b/drivers/net/mdio/mdio-mvusb.c
index d5eabddfdf51..d5eabddfdf51 100644
--- a/drivers/net/phy/mdio-mvusb.c
+++ b/drivers/net/mdio/mdio-mvusb.c
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index d1e1009d51af..d1e1009d51af 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/mdio/mdio-sun4i.c
index f798de3276dc..f798de3276dc 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/mdio/mdio-sun4i.c
diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 3d7eda99d34e..3d7eda99d34e 100644
--- a/drivers/net/phy/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c
index 34990eaa3298..461207cdf5d6 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/mdio/mdio-xgene.c
@@ -11,6 +11,7 @@
#include <linux/efi.h>
#include <linux/if_vlan.h>
#include <linux/io.h>
+#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
@@ -18,7 +19,6 @@
#include <linux/prefetch.h>
#include <linux/phy.h>
#include <net/ip.h>
-#include "mdio-xgene.h"
static bool xgene_mdio_status;
diff --git a/drivers/of/of_mdio.c b/drivers/net/mdio/of_mdio.c
index cb32d7ef4938..4daf94bb56a5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -338,6 +338,29 @@ unregister:
EXPORT_SYMBOL(of_mdiobus_register);
/**
+ * of_mdio_find_device - Given a device tree node, find the mdio_device
+ * @np: pointer to the mdio_device's device tree node
+ *
+ * If successful, returns a pointer to the mdio_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure.
+ * The caller should call put_device() on the mdio_device after its use
+ */
+struct mdio_device *of_mdio_find_device(struct device_node *np)
+{
+ struct device *d;
+
+ if (!np)
+ return NULL;
+
+ d = bus_find_device_by_of_node(&mdio_bus_type, np);
+ if (!d)
+ return NULL;
+
+ return to_mdio_device(d);
+}
+EXPORT_SYMBOL(of_mdio_find_device);
+
+/**
* of_phy_find_device - Give a PHY node, find the phy_device
* @phy_np: Pointer to the phy's device tree node
*
@@ -346,19 +369,16 @@ EXPORT_SYMBOL(of_mdiobus_register);
*/
struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
- struct device *d;
struct mdio_device *mdiodev;
- if (!phy_np)
+ mdiodev = of_mdio_find_device(phy_np);
+ if (!mdiodev)
return NULL;
- d = bus_find_device_by_of_node(&mdio_bus_type, phy_np);
- if (d) {
- mdiodev = to_mdio_device(d);
- if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
- return to_phy_device(d);
- put_device(d);
- }
+ if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
+ return to_phy_device(&mdiodev->dev);
+
+ put_device(&mdiodev->dev);
return NULL;
}
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 4dfb389dbfd8..ade086eed955 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -3,7 +3,7 @@
obj-$(CONFIG_NETDEVSIM) += netdevsim.o
netdevsim-objs := \
- netdev.o dev.o fib.o bus.o health.o udp_tunnels.o
+ netdev.o dev.o ethtool.o fib.o bus.o health.o udp_tunnels.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 32f339fedb21..d07061417675 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -40,7 +40,9 @@ static struct dentry *nsim_dev_ddir;
#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
static int
-nsim_dev_take_snapshot(struct devlink *devlink, struct netlink_ext_ack *extack,
+nsim_dev_take_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
u8 **data)
{
void *dummy_data;
@@ -68,7 +70,7 @@ static ssize_t nsim_dev_take_snapshot_write(struct file *file,
devlink = priv_to_devlink(nsim_dev);
- err = nsim_dev_take_snapshot(devlink, NULL, &dummy_data);
+ err = nsim_dev_take_snapshot(devlink, NULL, NULL, &dummy_data);
if (err)
return err;
@@ -201,6 +203,8 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
return PTR_ERR(nsim_dev->ports_ddir);
debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
&nsim_dev->fw_update_status);
+ debugfs_create_u32("fw_update_overwrite_mask", 0600, nsim_dev->ddir,
+ &nsim_dev->fw_update_overwrite_mask);
debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
&nsim_dev->max_macs);
debugfs_create_bool("test1", 0600, nsim_dev->ddir,
@@ -697,6 +701,7 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev);
static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action, enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
@@ -713,7 +718,8 @@ static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
return 0;
}
-static int nsim_dev_reload_up(struct devlink *devlink,
+static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
@@ -726,6 +732,7 @@ static int nsim_dev_reload_up(struct devlink *devlink,
return -EINVAL;
}
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
return nsim_dev_reload_create(nsim_dev, extack);
}
@@ -740,24 +747,27 @@ static int nsim_dev_info_get(struct devlink *devlink,
#define NSIM_DEV_FLASH_CHUNK_SIZE 1000
#define NSIM_DEV_FLASH_CHUNK_TIME_MS 10
-static int nsim_dev_flash_update(struct devlink *devlink, const char *file_name,
- const char *component,
+static int nsim_dev_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
int i;
+ if ((params->overwrite_mask & ~nsim_dev->fw_update_overwrite_mask) != 0)
+ return -EOPNOTSUPP;
+
if (nsim_dev->fw_update_status) {
devlink_flash_update_begin_notify(devlink);
devlink_flash_update_status_notify(devlink,
"Preparing to flash",
- component, 0, 0);
+ params->component, 0, 0);
}
for (i = 0; i < NSIM_DEV_FLASH_SIZE / NSIM_DEV_FLASH_CHUNK_SIZE; i++) {
if (nsim_dev->fw_update_status)
devlink_flash_update_status_notify(devlink, "Flashing",
- component,
+ params->component,
i * NSIM_DEV_FLASH_CHUNK_SIZE,
NSIM_DEV_FLASH_SIZE);
msleep(NSIM_DEV_FLASH_CHUNK_TIME_MS);
@@ -765,11 +775,13 @@ static int nsim_dev_flash_update(struct devlink *devlink, const char *file_name,
if (nsim_dev->fw_update_status) {
devlink_flash_update_status_notify(devlink, "Flashing",
- component,
+ params->component,
NSIM_DEV_FLASH_SIZE,
NSIM_DEV_FLASH_SIZE);
+ devlink_flash_update_timeout_notify(devlink, "Flash select",
+ params->component, 81);
devlink_flash_update_status_notify(devlink, "Flashing done",
- component, 0, 0);
+ params->component, 0, 0);
devlink_flash_update_end_notify(devlink);
}
@@ -875,6 +887,9 @@ nsim_dev_devlink_trap_policer_counter_get(struct devlink *devlink,
}
static const struct devlink_ops nsim_dev_devlink_ops = {
+ .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT |
+ DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = nsim_dev_reload_down,
.reload_up = nsim_dev_reload_up,
.info_get = nsim_dev_info_get,
@@ -989,6 +1004,7 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
INIT_LIST_HEAD(&nsim_dev->port_list);
mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
+ nsim_dev->fw_update_overwrite_mask = 0;
nsim_dev->fib_data = nsim_fib_create(devlink, extack);
if (IS_ERR(nsim_dev->fib_data))
@@ -1047,6 +1063,7 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
INIT_LIST_HEAD(&nsim_dev->port_list);
mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
+ nsim_dev->fw_update_overwrite_mask = 0;
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
spin_lock_init(&nsim_dev->fa_cookie_lock);
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
new file mode 100644
index 000000000000..f1884d90a876
--- /dev/null
+++ b/drivers/net/netdevsim/ethtool.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/debugfs.h>
+#include <linux/ethtool.h>
+#include <linux/random.h>
+
+#include "netdevsim.h"
+
+static void
+nsim_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (ns->ethtool.report_stats_rx)
+ pause_stats->rx_pause_frames = 1;
+ if (ns->ethtool.report_stats_tx)
+ pause_stats->tx_pause_frames = 2;
+}
+
+static void
+nsim_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ pause->autoneg = 0; /* We don't support ksettings, so can't pretend */
+ pause->rx_pause = ns->ethtool.rx;
+ pause->tx_pause = ns->ethtool.tx;
+}
+
+static int
+nsim_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (pause->autoneg)
+ return -EINVAL;
+
+ ns->ethtool.rx = pause->rx_pause;
+ ns->ethtool.tx = pause->tx_pause;
+ return 0;
+}
+
+static const struct ethtool_ops nsim_ethtool_ops = {
+ .get_pause_stats = nsim_get_pause_stats,
+ .get_pauseparam = nsim_get_pauseparam,
+ .set_pauseparam = nsim_set_pauseparam,
+};
+
+void nsim_ethtool_init(struct netdevsim *ns)
+{
+ struct dentry *ethtool, *dir;
+
+ ns->netdev->ethtool_ops = &nsim_ethtool_ops;
+
+ ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir);
+
+ dir = debugfs_create_dir("pause", ethtool);
+ debugfs_create_bool("report_stats_rx", 0600, dir,
+ &ns->ethtool.report_stats_rx);
+ debugfs_create_bool("report_stats_tx", 0600, dir,
+ &ns->ethtool.report_stats_tx);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 97cfb015a50b..7178468302c8 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -301,6 +301,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
dev->netdev_ops = &nsim_netdev_ops;
+ nsim_ethtool_init(ns);
err = nsim_udp_tunnels_info_create(nsim_dev, dev);
if (err)
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 284f7092241d..827fc80f50a0 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -20,6 +20,7 @@
#include <linux/netdevice.h>
#include <linux/u64_stats_sync.h>
#include <net/devlink.h>
+#include <net/udp_tunnel.h>
#include <net/xdp.h>
#define DRV_NAME "netdevsim"
@@ -50,6 +51,13 @@ struct nsim_ipsec {
u32 ok;
};
+struct nsim_ethtool {
+ bool rx;
+ bool tx;
+ bool report_stats_rx;
+ bool report_stats_tx;
+};
+
struct netdevsim {
struct net_device *netdev;
struct nsim_dev *nsim_dev;
@@ -77,15 +85,20 @@ struct netdevsim {
struct {
u32 inject_error;
u32 sleep;
- u32 ports[2][NSIM_UDP_TUNNEL_N_PORTS];
+ u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
+ u32 (*ports)[NSIM_UDP_TUNNEL_N_PORTS];
struct debugfs_u32_array dfs_ports[2];
} udp_ports;
+
+ struct nsim_ethtool ethtool;
};
struct netdevsim *
nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
void nsim_destroy(struct netdevsim *ns);
+void nsim_ethtool_init(struct netdevsim *ns);
+
void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev);
int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
struct net_device *dev);
@@ -185,6 +198,7 @@ struct nsim_dev {
struct list_head port_list;
struct mutex port_list_lock; /* protects port list */
bool fw_update_status;
+ u32 fw_update_overwrite_mask;
u32 max_macs;
bool test1;
bool dont_allow_reload;
@@ -197,9 +211,13 @@ struct nsim_dev {
bool fail_trap_policer_set;
bool fail_trap_policer_counter_get;
struct {
+ struct udp_tunnel_nic_shared utn_shared;
+ u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
bool sync_all;
bool open_only;
bool ipv4_only;
+ bool shared;
+ bool static_iana_vxlan;
u32 sleep;
} udp_ports;
};
diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
index 22c06a76033c..6ab023acefd6 100644
--- a/drivers/net/netdevsim/udp_tunnels.c
+++ b/drivers/net/netdevsim/udp_tunnels.c
@@ -22,11 +22,13 @@ nsim_udp_tunnel_set_port(struct net_device *dev, unsigned int table,
msleep(ns->udp_ports.sleep);
if (!ret) {
- if (ns->udp_ports.ports[table][entry])
+ if (ns->udp_ports.ports[table][entry]) {
+ WARN(1, "entry already in use\n");
ret = -EBUSY;
- else
+ } else {
ns->udp_ports.ports[table][entry] =
be16_to_cpu(ti->port) << 16 | ti->type;
+ }
}
netdev_info(dev, "set [%d, %d] type %d family %d port %d - %d\n",
@@ -50,10 +52,13 @@ nsim_udp_tunnel_unset_port(struct net_device *dev, unsigned int table,
if (!ret) {
u32 val = be16_to_cpu(ti->port) << 16 | ti->type;
- if (val == ns->udp_ports.ports[table][entry])
+ if (val == ns->udp_ports.ports[table][entry]) {
ns->udp_ports.ports[table][entry] = 0;
- else
+ } else {
+ WARN(1, "entry not installed %x vs %x\n",
+ val, ns->udp_ports.ports[table][entry]);
ret = -ENOENT;
+ }
}
netdev_info(dev, "unset [%d, %d] type %d family %d port %d - %d\n",
@@ -107,7 +112,7 @@ nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data,
struct net_device *dev = file->private_data;
struct netdevsim *ns = netdev_priv(dev);
- memset(&ns->udp_ports.ports, 0, sizeof(ns->udp_ports.ports));
+ memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
rtnl_lock();
udp_tunnel_nic_reset_ntf(dev);
rtnl_unlock();
@@ -127,6 +132,17 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
struct netdevsim *ns = netdev_priv(dev);
struct udp_tunnel_nic_info *info;
+ if (nsim_dev->udp_ports.shared && nsim_dev->udp_ports.open_only) {
+ dev_err(&nsim_dev->nsim_bus_dev->dev,
+ "shared can't be used in conjunction with open_only\n");
+ return -EINVAL;
+ }
+
+ if (!nsim_dev->udp_ports.shared)
+ ns->udp_ports.ports = ns->udp_ports.__ports;
+ else
+ ns->udp_ports.ports = nsim_dev->udp_ports.__ports;
+
debugfs_create_u32("udp_ports_inject_error", 0600,
ns->nsim_dev_port->ddir,
&ns->udp_ports.inject_error);
@@ -168,6 +184,10 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
info->flags |= UDP_TUNNEL_NIC_INFO_OPEN_ONLY;
if (nsim_dev->udp_ports.ipv4_only)
info->flags |= UDP_TUNNEL_NIC_INFO_IPV4_ONLY;
+ if (nsim_dev->udp_ports.shared)
+ info->shared = &nsim_dev->udp_ports.utn_shared;
+ if (nsim_dev->udp_ports.static_iana_vxlan)
+ info->flags |= UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
dev->udp_tunnel_nic_info = info;
return 0;
@@ -187,6 +207,10 @@ void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev)
&nsim_dev->udp_ports.open_only);
debugfs_create_bool("udp_ports_ipv4_only", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.ipv4_only);
+ debugfs_create_bool("udp_ports_shared", 0600, nsim_dev->ddir,
+ &nsim_dev->udp_ports.shared);
+ debugfs_create_bool("udp_ports_static_iana_vxlan", 0600, nsim_dev->ddir,
+ &nsim_dev->udp_ports.static_iana_vxlan);
debugfs_create_u32("udp_ports_sleep", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.sleep);
}
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
new file mode 100644
index 000000000000..22ba7b0b476d
--- /dev/null
+++ b/drivers/net/pcs/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# PCS Layer Configuration
+#
+
+menu "PCS device drivers"
+
+config PCS_XPCS
+ tristate "Synopsys DesignWare XPCS controller"
+ depends on MDIO_DEVICE && MDIO_BUS
+ help
+ This module provides helper functions for Synopsys DesignWare XPCS
+ controllers.
+
+config PCS_LYNX
+ tristate
+ help
+ This module provides helpers to phylink for managing the Lynx PCS
+ which is part of the Layerscape and QorIQ Ethernet SERDES.
+
+endmenu
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
new file mode 100644
index 000000000000..c23146755972
--- /dev/null
+++ b/drivers/net/pcs/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Linux PCS drivers
+
+obj-$(CONFIG_PCS_XPCS) += pcs-xpcs.o
+obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
new file mode 100644
index 000000000000..62bb9272dcb2
--- /dev/null
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2020 NXP
+ * Lynx PCS MDIO helpers
+ */
+
+#include <linux/mdio.h>
+#include <linux/phylink.h>
+#include <linux/pcs-lynx.h>
+
+#define SGMII_CLOCK_PERIOD_NS 8 /* PCS is clocked at 125 MHz */
+#define LINK_TIMER_VAL(ns) ((u32)((ns) / SGMII_CLOCK_PERIOD_NS))
+
+#define SGMII_AN_LINK_TIMER_NS 1600000 /* defined by SGMII spec */
+
+#define LINK_TIMER_LO 0x12
+#define LINK_TIMER_HI 0x13
+#define IF_MODE 0x14
+#define IF_MODE_SGMII_EN BIT(0)
+#define IF_MODE_USE_SGMII_AN BIT(1)
+#define IF_MODE_SPEED(x) (((x) << 2) & GENMASK(3, 2))
+#define IF_MODE_SPEED_MSK GENMASK(3, 2)
+#define IF_MODE_HALF_DUPLEX BIT(4)
+
+enum sgmii_speed {
+ SGMII_SPEED_10 = 0,
+ SGMII_SPEED_100 = 1,
+ SGMII_SPEED_1000 = 2,
+ SGMII_SPEED_2500 = 2,
+};
+
+#define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs)
+
+static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int status, lpa;
+
+ status = mdiobus_c45_read(bus, addr, MDIO_MMD_VEND2, MII_BMSR);
+ if (status < 0)
+ return;
+
+ state->link = !!(status & MDIO_STAT1_LSTATUS);
+ state->an_complete = !!(status & MDIO_AN_STAT1_COMPLETE);
+ if (!state->link || !state->an_complete)
+ return;
+
+ lpa = mdiobus_c45_read(bus, addr, MDIO_MMD_VEND2, MII_LPA);
+ if (lpa < 0)
+ return;
+
+ phylink_decode_usxgmii_word(state, lpa);
+}
+
+static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int bmsr, lpa;
+
+ bmsr = mdiobus_read(bus, addr, MII_BMSR);
+ lpa = mdiobus_read(bus, addr, MII_LPA);
+ if (bmsr < 0 || lpa < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(bmsr & BMSR_LSTATUS);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
+ if (!state->link)
+ return;
+
+ state->speed = SPEED_2500;
+ state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
+ state->duplex = DUPLEX_FULL;
+}
+
+static void lynx_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ phylink_mii_c22_pcs_get_state(lynx->mdio, state);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ lynx_pcs_get_state_2500basex(lynx->mdio, state);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ lynx_pcs_get_state_usxgmii(lynx->mdio, state);
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ phylink_mii_c45_pcs_get_state(lynx->mdio, state);
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(&lynx->mdio->dev,
+ "mode=%s/%s/%s link=%u an_enabled=%u an_complete=%u\n",
+ phy_modes(state->interface),
+ phy_speed_to_str(state->speed),
+ phy_duplex_to_str(state->duplex),
+ state->link, state->an_enabled, state->an_complete);
+}
+
+static int lynx_pcs_config_sgmii(struct mdio_device *pcs, unsigned int mode,
+ const unsigned long *advertising)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ u16 if_mode;
+ int err;
+
+ if_mode = IF_MODE_SGMII_EN;
+ if (mode == MLO_AN_INBAND) {
+ u32 link_timer;
+
+ if_mode |= IF_MODE_USE_SGMII_AN;
+
+ /* Adjust link timer for SGMII */
+ link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS);
+ mdiobus_write(bus, addr, LINK_TIMER_LO, link_timer & 0xffff);
+ mdiobus_write(bus, addr, LINK_TIMER_HI, link_timer >> 16);
+ }
+ err = mdiobus_modify(bus, addr, IF_MODE,
+ IF_MODE_SGMII_EN | IF_MODE_USE_SGMII_AN,
+ if_mode);
+ if (err)
+ return err;
+
+ return phylink_mii_c22_pcs_config(pcs, mode, PHY_INTERFACE_MODE_SGMII,
+ advertising);
+}
+
+static int lynx_pcs_config_usxgmii(struct mdio_device *pcs, unsigned int mode,
+ const unsigned long *advertising)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+
+ if (!phylink_autoneg_inband(mode)) {
+ dev_err(&pcs->dev, "USXGMII only supports in-band AN for now\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Configure device ability for the USXGMII Replicator */
+ return mdiobus_c45_write(bus, addr, MDIO_MMD_VEND2, MII_ADVERTISE,
+ MDIO_USXGMII_10G | MDIO_USXGMII_LINK |
+ MDIO_USXGMII_FULL_DUPLEX |
+ ADVERTISE_SGMII | ADVERTISE_LPACK);
+}
+
+static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t ifmode,
+ const unsigned long *advertising,
+ bool permit)
+{
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ switch (ifmode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return lynx_pcs_config_sgmii(lynx->mdio, mode, advertising);
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (phylink_autoneg_inband(mode)) {
+ dev_err(&lynx->mdio->dev,
+ "AN not supported on 3.125GHz SerDes lane\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ return lynx_pcs_config_usxgmii(lynx->mdio, mode, advertising);
+ case PHY_INTERFACE_MODE_10GBASER:
+ /* Nothing to do here for 10GBASER */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, unsigned int mode,
+ int speed, int duplex)
+{
+ struct mii_bus *bus = pcs->bus;
+ u16 if_mode = 0, sgmii_speed;
+ int addr = pcs->addr;
+
+ /* The PCS needs to be configured manually only
+ * when not operating on in-band mode
+ */
+ if (mode == MLO_AN_INBAND)
+ return;
+
+ if (duplex == DUPLEX_HALF)
+ if_mode |= IF_MODE_HALF_DUPLEX;
+
+ switch (speed) {
+ case SPEED_1000:
+ sgmii_speed = SGMII_SPEED_1000;
+ break;
+ case SPEED_100:
+ sgmii_speed = SGMII_SPEED_100;
+ break;
+ case SPEED_10:
+ sgmii_speed = SGMII_SPEED_10;
+ break;
+ case SPEED_UNKNOWN:
+ /* Silently don't do anything */
+ return;
+ default:
+ dev_err(&pcs->dev, "Invalid PCS speed %d\n", speed);
+ return;
+ }
+ if_mode |= IF_MODE_SPEED(sgmii_speed);
+
+ mdiobus_modify(bus, addr, IF_MODE,
+ IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
+ if_mode);
+}
+
+/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane
+ * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have
+ * auto-negotiation of any link parameters. Electrically it is compatible with
+ * a single lane of XAUI.
+ * The hardware reference manual wants to call this mode SGMII, but it isn't
+ * really, since the fundamental features of SGMII:
+ * - Downgrading the link speed by duplicating symbols
+ * - Auto-negotiation
+ * are not there.
+ * The speed is configured at 1000 in the IF_MODE because the clock frequency
+ * is actually given by a PLL configured in the Reset Configuration Word (RCW).
+ * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o
+ * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a
+ * lower link speed on line side, the system-side interface remains fixed at
+ * 2500 Mbps and we do rate adaptation through pause frames.
+ */
+static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs,
+ unsigned int mode,
+ int speed, int duplex)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ u16 if_mode = 0;
+
+ if (mode == MLO_AN_INBAND) {
+ dev_err(&pcs->dev, "AN not supported for 2500BaseX\n");
+ return;
+ }
+
+ if (duplex == DUPLEX_HALF)
+ if_mode |= IF_MODE_HALF_DUPLEX;
+ if_mode |= IF_MODE_SPEED(SGMII_SPEED_2500);
+
+ mdiobus_modify(bus, addr, IF_MODE,
+ IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
+ if_mode);
+}
+
+static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex)
+{
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ lynx_pcs_link_up_sgmii(lynx->mdio, mode, speed, duplex);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ lynx_pcs_link_up_2500basex(lynx->mdio, mode, speed, duplex);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ /* At the moment, only in-band AN is supported for USXGMII
+ * so nothing to do in link_up
+ */
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct phylink_pcs_ops lynx_pcs_phylink_ops = {
+ .pcs_get_state = lynx_pcs_get_state,
+ .pcs_config = lynx_pcs_config,
+ .pcs_link_up = lynx_pcs_link_up,
+};
+
+struct lynx_pcs *lynx_pcs_create(struct mdio_device *mdio)
+{
+ struct lynx_pcs *lynx_pcs;
+
+ lynx_pcs = kzalloc(sizeof(*lynx_pcs), GFP_KERNEL);
+ if (!lynx_pcs)
+ return NULL;
+
+ lynx_pcs->mdio = mdio;
+ lynx_pcs->pcs.ops = &lynx_pcs_phylink_ops;
+ lynx_pcs->pcs.poll = true;
+
+ return lynx_pcs;
+}
+EXPORT_SYMBOL(lynx_pcs_create);
+
+void lynx_pcs_destroy(struct lynx_pcs *pcs)
+{
+ kfree(pcs);
+}
+EXPORT_SYMBOL(lynx_pcs_destroy);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/phy/mdio-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 0d66a8ba7eb6..1aa9903d602e 100644
--- a/drivers/net/phy/mdio-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -7,8 +7,8 @@
*/
#include <linux/delay.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/mdio.h>
-#include <linux/mdio-xpcs.h>
#include <linux/phylink.h>
#include <linux/workqueue.h>
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 1c5a10b672fc..698bea312adc 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,247 +3,6 @@
# PHY Layer Configuration
#
-menuconfig MDIO_DEVICE
- tristate "MDIO bus device drivers"
- help
- MDIO devices and driver infrastructure code.
-
-if MDIO_DEVICE
-
-config MDIO_BUS
- tristate
- default m if PHYLIB=m
- default MDIO_DEVICE
- help
- This internal symbol is used for link time dependencies and it
- reflects whether the mdio_bus/mdio_device code is built as a
- loadable module or built-in.
-
-if MDIO_BUS
-
-config MDIO_DEVRES
- tristate
-
-config MDIO_ASPEED
- tristate "ASPEED MDIO bus controller"
- depends on ARCH_ASPEED || COMPILE_TEST
- depends on OF_MDIO && HAS_IOMEM
- help
- This module provides a driver for the independent MDIO bus
- controllers found in the ASPEED AST2600 SoC. This is a driver for the
- third revision of the ASPEED MDIO register interface - the first two
- revisions are the "old" and "new" interfaces found in the AST2400 and
- AST2500, embedded in the MAC. For legacy reasons, FTGMAC100 driver
- continues to drive the embedded MDIO controller for the AST2400 and
- AST2500 SoCs, so say N if AST2600 support is not required.
-
-config MDIO_BCM_IPROC
- tristate "Broadcom iProc MDIO bus controller"
- depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on HAS_IOMEM && OF_MDIO
- default ARCH_BCM_IPROC
- help
- This module provides a driver for the MDIO busses found in the
- Broadcom iProc SoC's.
-
-config MDIO_BCM_UNIMAC
- tristate "Broadcom UniMAC MDIO bus controller"
- depends on HAS_IOMEM
- help
- This module provides a driver for the Broadcom UniMAC MDIO busses.
- This hardware can be found in the Broadcom GENET Ethernet MAC
- controllers as well as some Broadcom Ethernet switches such as the
- Starfighter 2 switches.
-
-config MDIO_BITBANG
- tristate "Bitbanged MDIO buses"
- help
- This module implements the MDIO bus protocol in software,
- for use by low level drivers that export the ability to
- drive the relevant pins.
-
- If in doubt, say N.
-
-config MDIO_BUS_MUX
- tristate
- depends on OF_MDIO
- help
- This module provides a driver framework for MDIO bus
- multiplexers which connect one of several child MDIO busses
- to a parent bus. Switching between child busses is done by
- device specific drivers.
-
-config MDIO_BUS_MUX_BCM_IPROC
- tristate "Broadcom iProc based MDIO bus multiplexers"
- depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
- select MDIO_BUS_MUX
- default ARCH_BCM_IPROC
- help
- This module provides a driver for MDIO bus multiplexers found in
- iProc based Broadcom SoCs. This multiplexer connects one of several
- child MDIO bus to a parent bus. Buses could be internal as well as
- external and selection logic lies inside the same multiplexer.
-
-config MDIO_BUS_MUX_GPIO
- tristate "GPIO controlled MDIO bus multiplexers"
- depends on OF_GPIO && OF_MDIO
- select MDIO_BUS_MUX
- help
- This module provides a driver for MDIO bus multiplexers that
- are controlled via GPIO lines. The multiplexer connects one of
- several child MDIO busses to a parent bus. Child bus
- selection is under the control of GPIO lines.
-
-config MDIO_BUS_MUX_MESON_G12A
- tristate "Amlogic G12a based MDIO bus multiplexer"
- depends on ARCH_MESON || COMPILE_TEST
- depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
- select MDIO_BUS_MUX
- default m if ARCH_MESON
- help
- This module provides a driver for the MDIO multiplexer/glue of
- the amlogic g12a SoC. The multiplexers connects either the external
- or the internal MDIO bus to the parent bus.
-
-config MDIO_BUS_MUX_MMIOREG
- tristate "MMIO device-controlled MDIO bus multiplexers"
- depends on OF_MDIO && HAS_IOMEM
- select MDIO_BUS_MUX
- help
- This module provides a driver for MDIO bus multiplexers that
- are controlled via a simple memory-mapped device, like an FPGA.
- The multiplexer connects one of several child MDIO busses to a
- parent bus. Child bus selection is under the control of one of
- the FPGA's registers.
-
- Currently, only 8/16/32 bits registers are supported.
-
-config MDIO_BUS_MUX_MULTIPLEXER
- tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
- depends on OF_MDIO
- select MULTIPLEXER
- select MDIO_BUS_MUX
- help
- This module provides a driver for MDIO bus multiplexer
- that is controlled via the kernel multiplexer subsystem. The
- bus multiplexer connects one of several child MDIO busses to
- a parent bus. Child bus selection is under the control of
- the kernel multiplexer subsystem.
-
-config MDIO_CAVIUM
- tristate
-
-config MDIO_GPIO
- tristate "GPIO lib-based bitbanged MDIO buses"
- depends on MDIO_BITBANG
- depends on GPIOLIB || COMPILE_TEST
- help
- Supports GPIO lib-based MDIO busses.
-
- To compile this driver as a module, choose M here: the module
- will be called mdio-gpio.
-
-config MDIO_HISI_FEMAC
- tristate "Hisilicon FEMAC MDIO bus controller"
- depends on HAS_IOMEM && OF_MDIO
- help
- This module provides a driver for the MDIO busses found in the
- Hisilicon SoC that have an Fast Ethernet MAC.
-
-config MDIO_I2C
- tristate
- depends on I2C
- help
- Support I2C based PHYs. This provides a MDIO bus bridged
- to I2C to allow PHYs connected in I2C mode to be accessed
- using the existing infrastructure.
-
- This is library mode.
-
-config MDIO_IPQ4019
- tristate "Qualcomm IPQ4019 MDIO interface support"
- depends on HAS_IOMEM && OF_MDIO
- help
- This driver supports the MDIO interface found in Qualcomm
- IPQ40xx series Soc-s.
-
-config MDIO_IPQ8064
- tristate "Qualcomm IPQ8064 MDIO interface support"
- depends on HAS_IOMEM && OF_MDIO
- depends on MFD_SYSCON
- help
- This driver supports the MDIO interface found in the network
- interface units of the IPQ8064 SoC
-
-config MDIO_MOXART
- tristate "MOXA ART MDIO interface support"
- depends on ARCH_MOXART || COMPILE_TEST
- help
- This driver supports the MDIO interface found in the network
- interface units of the MOXA ART SoC
-
-config MDIO_MSCC_MIIM
- tristate "Microsemi MIIM interface support"
- depends on HAS_IOMEM
- select MDIO_DEVRES
- help
- This driver supports the MIIM (MDIO) interface found in the network
- switches of the Microsemi SoCs; it is recommended to switch on
- CONFIG_HIGH_RES_TIMERS
-
-config MDIO_MVUSB
- tristate "Marvell USB to MDIO Adapter"
- depends on USB
- select MDIO_DEVRES
- help
- A USB to MDIO converter present on development boards for
- Marvell's Link Street family of Ethernet switches.
-
-config MDIO_OCTEON
- tristate "Octeon and some ThunderX SOCs MDIO buses"
- depends on (64BIT && OF_MDIO) || COMPILE_TEST
- depends on HAS_IOMEM
- select MDIO_CAVIUM
- help
- This module provides a driver for the Octeon and ThunderX MDIO
- buses. It is required by the Octeon and ThunderX ethernet device
- drivers on some systems.
-
-config MDIO_SUN4I
- tristate "Allwinner sun4i MDIO interface support"
- depends on ARCH_SUNXI || COMPILE_TEST
- help
- This driver supports the MDIO interface found in the network
- interface units of the Allwinner SoC that have an EMAC (A10,
- A12, A10s, etc.)
-
-config MDIO_THUNDER
- tristate "ThunderX SOCs MDIO buses"
- depends on 64BIT
- depends on PCI
- select MDIO_CAVIUM
- select MDIO_DEVRES
- help
- This driver supports the MDIO interfaces found on Cavium
- ThunderX SoCs when the MDIO bus device appears as a PCI
- device.
-
-config MDIO_XGENE
- tristate "APM X-Gene SoC MDIO bus controller"
- depends on ARCH_XGENE || COMPILE_TEST
- help
- This module provides a driver for the MDIO busses found in the
- APM X-Gene SoC's.
-
-config MDIO_XPCS
- tristate "Synopsys DesignWare XPCS controller"
- help
- This module provides helper functions for Synopsys DesignWare XPCS
- controllers.
-
-endif
-endif
-
config PHYLINK
tristate
depends on NETDEVICES
@@ -286,7 +45,15 @@ config LED_TRIGGER_PHY
for any speed known to the PHY.
-comment "MII PHY device drivers"
+config FIXED_PHY
+ tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB
+ select SWPHY
+ help
+ Adds the platform "fixed" MDIO Bus to cover the boards that use
+ PHYs that are not connected to the real MDIO bus.
+
+ Currently tested with mpc866ads and mpc8349e-mitx.
config SFP
tristate "SFP cage support"
@@ -294,6 +61,19 @@ config SFP
depends on HWMON || HWMON=n
select MDIO_I2C
+comment "MII PHY device drivers"
+
+config AMD_PHY
+ tristate "AMD PHYs"
+ help
+ Currently supports the am79c874
+
+config MESON_GXL_PHY
+ tristate "Amlogic Meson GXL Internal PHY"
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ Currently has a driver for the Amlogic Meson GXL Internal PHY
+
config ADIN_PHY
tristate "Analog Devices Industrial Ethernet PHYs"
help
@@ -303,11 +83,6 @@ config ADIN_PHY
- ADIN1300 - Robust,Industrial, Low Latency 10/100/1000 Gigabit
Ethernet PHY
-config AMD_PHY
- tristate "AMD PHYs"
- help
- Currently supports the am79c874
-
config AQUANTIA_PHY
tristate "Aquantia PHYs"
help
@@ -319,6 +94,24 @@ config AX88796B_PHY
Currently supports the Asix Electronics PHY found in the X-Surf 100
AX88796B package.
+config BROADCOM_PHY
+ tristate "Broadcom 54XX PHYs"
+ select BCM_NET_PHYLIB
+ help
+ Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
+ BCM5481, BCM54810 and BCM5482 PHYs.
+
+config BCM54140_PHY
+ tristate "Broadcom BCM54140 PHY"
+ depends on PHYLIB
+ depends on HWMON || HWMON=n
+ select BCM_NET_PHYLIB
+ help
+ Support the Broadcom BCM54140 Quad SGMII/QSGMII PHY.
+
+ This driver also supports the hardware monitoring of this PHY and
+ exposes voltage and temperature sensors.
+
config BCM63XX_PHY
tristate "Broadcom 63xx SOCs internal PHY"
depends on BCM63XX || COMPILE_TEST
@@ -333,6 +126,12 @@ config BCM7XXX_PHY
Currently supports the BCM7366, BCM7439, BCM7445, and
40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
+config BCM84881_PHY
+ tristate "Broadcom BCM84881 PHY"
+ depends on PHYLIB
+ help
+ Support the Broadcom BCM84881 PHY.
+
config BCM87XX_PHY
tristate "Broadcom BCM8706 and BCM8727 PHYs"
help
@@ -354,30 +153,6 @@ config BCM_CYGNUS_PHY
config BCM_NET_PHYLIB
tristate
-config BROADCOM_PHY
- tristate "Broadcom PHYs"
- select BCM_NET_PHYLIB
- help
- Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
- BCM5481, BCM54810 and BCM5482 PHYs.
-
-config BCM54140_PHY
- tristate "Broadcom BCM54140 PHY"
- depends on PHYLIB
- depends on HWMON || HWMON=n
- select BCM_NET_PHYLIB
- help
- Support the Broadcom BCM54140 Quad SGMII/QSGMII PHY.
-
- This driver also supports the hardware monitoring of this PHY and
- exposes voltage and temperature sensors.
-
-config BCM84881_PHY
- tristate "Broadcom BCM84881 PHY"
- depends on PHYLIB
- help
- Support the Broadcom BCM84881 PHY.
-
config CICADA_PHY
tristate "Cicada PHYs"
help
@@ -393,48 +168,16 @@ config DAVICOM_PHY
help
Currently supports dm9161e and dm9131
-config DP83822_PHY
- tristate "Texas Instruments DP83822/825/826 PHYs"
- help
- Supports the DP83822, DP83825I, DP83825CM, DP83825CS, DP83825S,
- DP83826C and DP83826NC PHYs.
-
-config DP83TC811_PHY
- tristate "Texas Instruments DP83TC811 PHY"
- help
- Supports the DP83TC811 PHY.
-
-config DP83848_PHY
- tristate "Texas Instruments DP83848 PHY"
- help
- Supports the DP83848 PHY.
-
-config DP83867_PHY
- tristate "Texas Instruments DP83867 Gigabit PHY"
- help
- Currently supports the DP83867 PHY.
-
-config DP83869_PHY
- tristate "Texas Instruments DP83869 Gigabit PHY"
- help
- Currently supports the DP83869 PHY. This PHY supports copper and
- fiber connections.
-
-config FIXED_PHY
- tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs"
- depends on PHYLIB
- select SWPHY
- help
- Adds the platform "fixed" MDIO Bus to cover the boards that use
- PHYs that are not connected to the real MDIO bus.
-
- Currently tested with mpc866ads and mpc8349e-mitx.
-
config ICPLUS_PHY
tristate "ICPlus PHYs"
help
Currently supports the IP175C and IP1001 PHYs.
+config LXT_PHY
+ tristate "Intel LXT PHYs"
+ help
+ Currently supports the lxt970, lxt971
+
config INTEL_XWAY_PHY
tristate "Intel XWAY PHYs"
help
@@ -448,27 +191,16 @@ config LSI_ET1011C_PHY
help
Supports the LSI ET1011C PHY.
-config LXT_PHY
- tristate "Intel LXT PHYs"
- help
- Currently supports the lxt970, lxt971
-
config MARVELL_PHY
- tristate "Marvell PHYs"
+ tristate "Marvell Alaska PHYs"
help
- Currently has a driver for the 88E1011S
+ Currently has a driver for the 88E1XXX
config MARVELL_10G_PHY
tristate "Marvell Alaska 10Gbit PHYs"
help
Support for the Marvell Alaska MV88X3310 and compatible PHYs.
-config MESON_GXL_PHY
- tristate "Amlogic Meson GXL Internal PHY"
- depends on ARCH_MESON || COMPILE_TEST
- help
- Currently has a driver for the Amlogic Meson GXL Internal PHY
-
config MICREL_PHY
tristate "Micrel PHYs"
help
@@ -519,12 +251,12 @@ config REALTEK_PHY
Supports the Realtek 821x PHY.
config RENESAS_PHY
- tristate "Driver for Renesas PHYs"
+ tristate "Renesas PHYs"
help
Supports the Renesas PHYs uPD60620 and uPD60620A.
config ROCKCHIP_PHY
- tristate "Driver for Rockchip Ethernet PHYs"
+ tristate "Rockchip Ethernet PHYs"
help
Currently supports the integrated Ethernet PHY.
@@ -543,6 +275,33 @@ config TERANETICS_PHY
help
Currently supports the Teranetics TN2020
+config DP83822_PHY
+ tristate "Texas Instruments DP83822/825/826 PHYs"
+ help
+ Supports the DP83822, DP83825I, DP83825CM, DP83825CS, DP83825S,
+ DP83826C and DP83826NC PHYs.
+
+config DP83TC811_PHY
+ tristate "Texas Instruments DP83TC811 PHY"
+ help
+ Supports the DP83TC811 PHY.
+
+config DP83848_PHY
+ tristate "Texas Instruments DP83848 PHY"
+ help
+ Supports the DP83848 PHY.
+
+config DP83867_PHY
+ tristate "Texas Instruments DP83867 Gigabit PHY"
+ help
+ Currently supports the DP83867 PHY.
+
+config DP83869_PHY
+ tristate "Texas Instruments DP83869 Gigabit PHY"
+ help
+ Currently supports the DP83869 PHY. This PHY supports copper and
+ fiber connections.
+
config VITESSE_PHY
tristate "Vitesse PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index d84bab489a53..a13e402074cf 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# Makefile for Linux PHY drivers and MDIO bus drivers
+# Makefile for Linux PHY drivers
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
linkmode.o
@@ -24,31 +24,6 @@ libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
-obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o
-obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
-obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
-obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
-obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
-obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
-obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
-obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
-obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
-obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
-obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
-obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
-obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
-obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
-obj-$(CONFIG_MDIO_IPQ4019) += mdio-ipq4019.o
-obj-$(CONFIG_MDIO_IPQ8064) += mdio-ipq8064.o
-obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
-obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
-obj-$(CONFIG_MDIO_MVUSB) += mdio-mvusb.o
-obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
-obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
-obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
-obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
-obj-$(CONFIG_MDIO_XPCS) += mdio-xpcs.o
-
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
obj-$(CONFIG_SFP) += sfp.o
@@ -62,32 +37,32 @@ ifdef CONFIG_HWMON
aquantia-objs += aquantia_hwmon.o
endif
obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
-obj-$(CONFIG_AX88796B_PHY) += ax88796b.o
obj-$(CONFIG_AT803X_PHY) += at803x.o
+obj-$(CONFIG_AX88796B_PHY) += ax88796b.o
+obj-$(CONFIG_BCM54140_PHY) += bcm54140.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
+obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
-obj-$(CONFIG_BCM54140_PHY) += bcm54140.o
-obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
obj-$(CONFIG_DAVICOM_PHY) += davicom.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
obj-$(CONFIG_DP83822_PHY) += dp83822.o
-obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_DP83869_PHY) += dp83869.o
+obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
obj-$(CONFIG_LXT_PHY) += lxt.o
-obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o
+obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 101651b2de54..ed601a7e46a0 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -343,7 +343,7 @@ static int at803x_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
}
-static struct regulator_ops vddio_regulator_ops = {
+static const struct regulator_ops vddio_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.set_voltage_sel = at803x_rgmii_reg_set_voltage_sel,
.get_voltage_sel = at803x_rgmii_reg_get_voltage_sel,
@@ -364,7 +364,7 @@ static const struct regulator_desc vddio_desc = {
.owner = THIS_MODULE,
};
-static struct regulator_ops vddh_regulator_ops = {
+static const struct regulator_ops vddh_regulator_ops = {
};
static const struct regulator_desc vddh_desc = {
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 692048d86ab1..15812001b3ff 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -11,6 +11,7 @@
#include "bcm-phy-lib.h"
#include <linux/bitops.h>
#include <linux/brcmphy.h>
+#include <linux/clk.h>
#include <linux/mdio.h>
/* Broadcom BCM7xxx internal PHY registers */
@@ -39,6 +40,7 @@
struct bcm7xxx_phy_priv {
u64 *stats;
+ struct clk *clk;
};
static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
@@ -521,6 +523,7 @@ static void bcm7xxx_28nm_get_phy_stats(struct phy_device *phydev,
static int bcm7xxx_28nm_probe(struct phy_device *phydev)
{
struct bcm7xxx_phy_priv *priv;
+ int ret = 0;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -534,7 +537,30 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
if (!priv->stats)
return -ENOMEM;
- return 0;
+ priv->clk = devm_clk_get_optional(&phydev->mdio.dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* Dummy read to a register to workaround an issue upon reset where the
+ * internal inverter may not allow the first MDIO transaction to pass
+ * the MDIO management controller and make us return 0xffff for such
+ * reads. This is needed to ensure that any subsequent reads to the
+ * PHY will succeed.
+ */
+ phy_read(phydev, MII_BMSR);
+
+ return ret;
+}
+
+static void bcm7xxx_28nm_remove(struct phy_device *phydev)
+{
+ struct bcm7xxx_phy_priv *priv = phydev->priv;
+
+ clk_disable_unprepare(priv->clk);
}
#define BCM7XXX_28NM_GPHY(_oui, _name) \
@@ -552,6 +578,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.get_strings = bcm_phy_get_strings, \
.get_stats = bcm7xxx_28nm_get_phy_stats, \
.probe = bcm7xxx_28nm_probe, \
+ .remove = bcm7xxx_28nm_remove, \
}
#define BCM7XXX_28NM_EPHY(_oui, _name) \
@@ -567,6 +594,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.get_strings = bcm_phy_get_strings, \
.get_stats = bcm7xxx_28nm_get_phy_stats, \
.probe = bcm7xxx_28nm_probe, \
+ .remove = bcm7xxx_28nm_remove, \
}
#define BCM7XXX_40NM_EPHY(_oui, _name) \
@@ -583,6 +611,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
}
static struct phy_driver bcm7xxx_driver[] = {
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM72113, "Broadcom BCM72113"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
@@ -603,6 +632,7 @@ static struct phy_driver bcm7xxx_driver[] = {
};
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+ { PHY_ID_BCM72113, 0xfffffff0 },
{ PHY_ID_BCM7250, 0xfffffff0, },
{ PHY_ID_BCM7255, 0xfffffff0, },
{ PHY_ID_BCM7260, 0xfffffff0, },
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 79e67f2fe00a..f2caccaf4408 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -798,51 +798,32 @@ static int decode_evnt(struct dp83640_private *dp83640,
return parsed;
}
-#define DP83640_PACKET_HASH_OFFSET 20
#define DP83640_PACKET_HASH_LEN 10
static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
{
- unsigned int offset = 0;
- u8 *msgtype, *data = skb_mac_header(skb);
- __be16 *seqid;
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
u16 hash;
/* check sequenceID, messageType, 12 bit hash of offset 20-29 */
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
return 0;
- }
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
- return 0;
+ msgtype = ptp_get_msgtype(hdr, type);
- if (unlikely(type & PTP_CLASS_V1))
- msgtype = data + offset + OFF_PTP_CONTROL;
- else
- msgtype = data + offset;
- if (rxts->msgtype != (*msgtype & 0xf))
+ if (rxts->msgtype != (msgtype & 0xf))
return 0;
- seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- if (rxts->seqid != ntohs(*seqid))
+ seqid = be16_to_cpu(hdr->sequence_id);
+ if (rxts->seqid != seqid)
return 0;
hash = ether_crc(DP83640_PACKET_HASH_LEN,
- data + offset + DP83640_PACKET_HASH_OFFSET) >> 20;
+ (unsigned char *)&hdr->source_port_identity) >> 20;
if (rxts->hash != hash)
return 0;
@@ -982,35 +963,16 @@ static void decode_status_frame(struct dp83640_private *dp83640,
static int is_sync(struct sk_buff *skb, int type)
{
- u8 *data = skb->data, *msgtype;
- unsigned int offset = 0;
-
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (type & PTP_CLASS_V1)
- offset += OFF_PTP_CONTROL;
+ struct ptp_header *hdr;
+ u8 msgtype;
- if (skb->len < offset + 1)
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
return 0;
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, type);
- return (*msgtype & 0xf) == 0;
+ return (msgtype & 0xf) == 0;
}
static void dp83640_free_clocks(void)
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 37643c468e19..c162c9551bd1 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -23,16 +23,31 @@
#define DP83822_DEVADDR 0x1f
+#define MII_DP83822_CTRL_2 0x0a
+#define MII_DP83822_PHYSTS 0x10
#define MII_DP83822_PHYSCR 0x11
#define MII_DP83822_MISR1 0x12
#define MII_DP83822_MISR2 0x13
+#define MII_DP83822_FCSCR 0x14
#define MII_DP83822_RCSR 0x17
#define MII_DP83822_RESET_CTRL 0x1f
#define MII_DP83822_GENCFG 0x465
+#define MII_DP83822_SOR1 0x467
+
+/* GENCFG */
+#define DP83822_SIG_DET_LOW BIT(0)
+
+/* Control Register 2 bits */
+#define DP83822_FX_ENABLE BIT(14)
#define DP83822_HW_RESET BIT(15)
#define DP83822_SW_RESET BIT(14)
+/* PHY STS bits */
+#define DP83822_PHYSTS_DUPLEX BIT(2)
+#define DP83822_PHYSTS_10 BIT(1)
+#define DP83822_PHYSTS_LINK BIT(0)
+
/* PHYSCR Register Fields */
#define DP83822_PHYSCR_INT_OE BIT(0) /* Interrupt Output Enable */
#define DP83822_PHYSCR_INTEN BIT(1) /* Interrupt Enable */
@@ -83,6 +98,27 @@
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
+/* SOR1 mode */
+#define DP83822_STRAP_MODE1 0
+#define DP83822_STRAP_MODE2 BIT(0)
+#define DP83822_STRAP_MODE3 BIT(1)
+#define DP83822_STRAP_MODE4 GENMASK(1, 0)
+
+#define DP83822_COL_STRAP_MASK GENMASK(11, 10)
+#define DP83822_COL_SHIFT 10
+#define DP83822_RX_ER_STR_MASK GENMASK(9, 8)
+#define DP83822_RX_ER_SHIFT 8
+
+#define MII_DP83822_FIBER_ADVERTISE (ADVERTISED_TP | ADVERTISED_MII | \
+ ADVERTISED_FIBRE | \
+ ADVERTISED_Pause | ADVERTISED_Asym_Pause)
+
+struct dp83822_private {
+ bool fx_signal_det_low;
+ int fx_enabled;
+ u16 fx_sd_enable;
+};
+
static int dp83822_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -197,6 +233,7 @@ static void dp83822_get_wol(struct phy_device *phydev,
static int dp83822_config_intr(struct phy_device *phydev)
{
+ struct dp83822_private *dp83822 = phydev->priv;
int misr_status;
int physcr_status;
int err;
@@ -208,13 +245,16 @@ static int dp83822_config_intr(struct phy_device *phydev)
misr_status |= (DP83822_RX_ERR_HF_INT_EN |
DP83822_FALSE_CARRIER_HF_INT_EN |
- DP83822_ANEG_COMPLETE_INT_EN |
- DP83822_DUP_MODE_CHANGE_INT_EN |
- DP83822_SPEED_CHANGED_INT_EN |
DP83822_LINK_STAT_INT_EN |
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
+ if (!dp83822->fx_enabled)
+ misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
+ DP83822_DUP_MODE_CHANGE_INT_EN |
+ DP83822_SPEED_CHANGED_INT_EN;
+
+
err = phy_write(phydev, MII_DP83822_MISR1, misr_status);
if (err < 0)
return err;
@@ -224,14 +264,16 @@ static int dp83822_config_intr(struct phy_device *phydev)
return misr_status;
misr_status |= (DP83822_JABBER_DET_INT_EN |
- DP83822_WOL_PKT_INT_EN |
DP83822_SLEEP_MODE_INT_EN |
- DP83822_MDI_XOVER_INT_EN |
DP83822_LB_FIFO_INT_EN |
DP83822_PAGE_RX_INT_EN |
- DP83822_ANEG_ERR_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
+ if (!dp83822->fx_enabled)
+ misr_status |= DP83822_MDI_XOVER_INT_EN |
+ DP83822_ANEG_ERR_INT_EN |
+ DP83822_WOL_PKT_INT_EN;
+
err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
if (err < 0)
return err;
@@ -270,13 +312,60 @@ static int dp8382x_disable_wol(struct phy_device *phydev)
MII_DP83822_WOL_CFG, value);
}
+static int dp83822_read_status(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int status = phy_read(phydev, MII_DP83822_PHYSTS);
+ int ctrl2;
+ int ret;
+
+ if (dp83822->fx_enabled) {
+ if (status & DP83822_PHYSTS_LINK) {
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ } else {
+ ctrl2 = phy_read(phydev, MII_DP83822_CTRL_2);
+ if (ctrl2 < 0)
+ return ctrl2;
+
+ if (!(ctrl2 & DP83822_FX_ENABLE)) {
+ ret = phy_write(phydev, MII_DP83822_CTRL_2,
+ DP83822_FX_ENABLE | ctrl2);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (status < 0)
+ return status;
+
+ if (status & DP83822_PHYSTS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (status & DP83822_PHYSTS_10)
+ phydev->speed = SPEED_10;
+ else
+ phydev->speed = SPEED_100;
+
+ return 0;
+}
+
static int dp83822_config_init(struct phy_device *phydev)
{
+ struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
int rgmii_delay;
s32 rx_int_delay;
s32 tx_int_delay;
int err = 0;
+ int bmcr;
if (phy_interface_is_rgmii(phydev)) {
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
@@ -302,6 +391,61 @@ static int dp83822_config_init(struct phy_device *phydev)
}
}
+ if (dp83822->fx_enabled) {
+ err = phy_modify(phydev, MII_DP83822_CTRL_2,
+ DP83822_FX_ENABLE, 1);
+ if (err < 0)
+ return err;
+
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT,
+ phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT,
+ phydev->advertising);
+
+ /* Auto neg is not supported in fiber mode */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_ANENABLE) {
+ err = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (err < 0)
+ return err;
+ }
+ phydev->autoneg = AUTONEG_DISABLE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
+
+ /* Setup fiber advertisement */
+ err = phy_modify_changed(phydev, MII_ADVERTISE,
+ MII_DP83822_FIBER_ADVERTISE,
+ MII_DP83822_FIBER_ADVERTISE);
+
+ if (err < 0)
+ return err;
+
+ if (dp83822->fx_signal_det_low) {
+ err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_GENCFG,
+ DP83822_SIG_DET_LOW);
+ if (err)
+ return err;
+ }
+ }
return dp8382x_disable_wol(phydev);
}
@@ -314,13 +458,85 @@ static int dp83822_phy_reset(struct phy_device *phydev)
{
int err;
- err = phy_write(phydev, MII_DP83822_RESET_CTRL, DP83822_HW_RESET);
+ err = phy_write(phydev, MII_DP83822_RESET_CTRL, DP83822_SW_RESET);
if (err < 0)
return err;
return phydev->drv->config_init(phydev);
}
+#ifdef CONFIG_OF_MDIO
+static int dp83822_of_init(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+
+ /* Signal detection for the PHY is only enabled if the FX_EN and the
+ * SD_EN pins are strapped. Signal detection can only enabled if FX_EN
+ * is strapped otherwise signal detection is disabled for the PHY.
+ */
+ if (dp83822->fx_enabled && dp83822->fx_sd_enable)
+ dp83822->fx_signal_det_low = device_property_present(dev,
+ "ti,link-loss-low");
+ if (!dp83822->fx_enabled)
+ dp83822->fx_enabled = device_property_present(dev,
+ "ti,fiber-mode");
+
+ return 0;
+}
+#else
+static int dp83822_of_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int dp83822_read_straps(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int fx_enabled, fx_sd_enable;
+ int val;
+
+ val = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_SOR1);
+ if (val < 0)
+ return val;
+
+ fx_enabled = (val & DP83822_COL_STRAP_MASK) >> DP83822_COL_SHIFT;
+ if (fx_enabled == DP83822_STRAP_MODE2 ||
+ fx_enabled == DP83822_STRAP_MODE3)
+ dp83822->fx_enabled = 1;
+
+ if (dp83822->fx_enabled) {
+ fx_sd_enable = (val & DP83822_RX_ER_STR_MASK) >> DP83822_RX_ER_SHIFT;
+ if (fx_sd_enable == DP83822_STRAP_MODE3 ||
+ fx_sd_enable == DP83822_STRAP_MODE4)
+ dp83822->fx_sd_enable = 1;
+ }
+
+ return 0;
+}
+
+static int dp83822_probe(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822;
+ int ret;
+
+ dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
+ GFP_KERNEL);
+ if (!dp83822)
+ return -ENOMEM;
+
+ phydev->priv = dp83822;
+
+ ret = dp83822_read_straps(phydev);
+ if (ret)
+ return ret;
+
+ dp83822_of_init(phydev);
+
+ return 0;
+}
+
static int dp83822_suspend(struct phy_device *phydev)
{
int value;
@@ -352,8 +568,10 @@ static int dp83822_resume(struct phy_device *phydev)
PHY_ID_MATCH_MODEL(_id), \
.name = (_name), \
/* PHY_BASIC_FEATURES */ \
+ .probe = dp83822_probe, \
.soft_reset = dp83822_phy_reset, \
.config_init = dp83822_config_init, \
+ .read_status = dp83822_read_status, \
.get_wol = dp83822_get_wol, \
.set_wol = dp83822_set_wol, \
.ack_interrupt = dp83822_ack_interrupt, \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index cd7032628a28..69d3eacc2b96 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for the Texas Instruments DP83867 PHY
+/* Driver for the Texas Instruments DP83867 PHY
*
* Copyright (C) 2015 Texas Instruments Inc.
*/
@@ -113,7 +112,6 @@
#define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0
#define DP83867_RGMII_RX_CLK_DELAY_INV (DP83867_RGMII_RX_CLK_DELAY_MAX + 1)
-
/* IO_MUX_CFG bits */
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
@@ -384,22 +382,22 @@ static int dp83867_set_downshift(struct phy_device *phydev, u8 cnt)
DP83867_DOWNSHIFT_EN);
switch (cnt) {
- case DP83867_DOWNSHIFT_1_COUNT:
- count = DP83867_DOWNSHIFT_1_COUNT_VAL;
- break;
- case DP83867_DOWNSHIFT_2_COUNT:
- count = DP83867_DOWNSHIFT_2_COUNT_VAL;
- break;
- case DP83867_DOWNSHIFT_4_COUNT:
- count = DP83867_DOWNSHIFT_4_COUNT_VAL;
- break;
- case DP83867_DOWNSHIFT_8_COUNT:
- count = DP83867_DOWNSHIFT_8_COUNT_VAL;
- break;
- default:
- phydev_err(phydev,
- "Downshift count must be 1, 2, 4 or 8\n");
- return -EINVAL;
+ case DP83867_DOWNSHIFT_1_COUNT:
+ count = DP83867_DOWNSHIFT_1_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_2_COUNT:
+ count = DP83867_DOWNSHIFT_2_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_4_COUNT:
+ count = DP83867_DOWNSHIFT_4_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_8_COUNT:
+ count = DP83867_DOWNSHIFT_8_COUNT_VAL;
+ break;
+ default:
+ phydev_err(phydev,
+ "Downshift count must be 1, 2, 4 or 8\n");
+ return -EINVAL;
}
val = DP83867_DOWNSHIFT_EN;
@@ -411,7 +409,7 @@ static int dp83867_set_downshift(struct phy_device *phydev, u8 cnt)
}
static int dp83867_get_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, void *data)
+ struct ethtool_tunable *tuna, void *data)
{
switch (tuna->id) {
case ETHTOOL_PHY_DOWNSHIFT:
@@ -422,7 +420,7 @@ static int dp83867_get_tunable(struct phy_device *phydev,
}
static int dp83867_set_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, const void *data)
+ struct ethtool_tunable *tuna, const void *data)
{
switch (tuna->id) {
case ETHTOOL_PHY_DOWNSHIFT:
@@ -524,11 +522,10 @@ static int dp83867_of_init(struct phy_device *phydev)
dp83867->io_impedance = -1; /* leave at default */
dp83867->rxctrl_strap_quirk = of_property_read_bool(of_node,
- "ti,dp83867-rxctrl-strap-quirk");
+ "ti,dp83867-rxctrl-strap-quirk");
dp83867->sgmii_ref_clk_en = of_property_read_bool(of_node,
- "ti,sgmii-ref-clock-output-enable");
-
+ "ti,sgmii-ref-clock-output-enable");
dp83867->rx_id_delay = DP83867_RGMII_RX_CLK_DELAY_INV;
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 6b98d74b5102..cf6dec7b7d8e 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -4,12 +4,14 @@
*/
#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/delay.h>
+#include <linux/bitfield.h>
#include <dt-bindings/net/ti-dp83869.h>
@@ -19,6 +21,7 @@
#define MII_DP83869_PHYCTRL 0x10
#define MII_DP83869_MICR 0x12
#define MII_DP83869_ISR 0x13
+#define DP83869_CFG2 0x14
#define DP83869_CTRL 0x1f
#define DP83869_CFG4 0x1e
@@ -27,6 +30,13 @@
#define DP83869_RGMIICTL 0x0032
#define DP83869_STRAP_STS1 0x006e
#define DP83869_RGMIIDCTL 0x0086
+#define DP83869_RXFCFG 0x0134
+#define DP83869_RXFPMD1 0x0136
+#define DP83869_RXFPMD2 0x0137
+#define DP83869_RXFPMD3 0x0138
+#define DP83869_RXFSOP1 0x0139
+#define DP83869_RXFSOP2 0x013A
+#define DP83869_RXFSOP3 0x013B
#define DP83869_IO_MUX_CFG 0x0170
#define DP83869_OP_MODE 0x01df
#define DP83869_FX_CTRL 0x0c00
@@ -52,6 +62,10 @@
BMCR_FULLDPLX | \
BMCR_SPEED1000)
+#define MII_DP83869_FIBER_ADVERTISE (ADVERTISED_FIBRE | \
+ ADVERTISED_Pause | \
+ ADVERTISED_Asym_Pause)
+
/* This is the same bit mask as the BMCR so re-use the BMCR default */
#define DP83869_FX_CTRL_DEFAULT MII_DP83869_BMCR_DEFAULT
@@ -100,6 +114,26 @@
#define DP83869_OP_MODE_MII BIT(5)
#define DP83869_SGMII_RGMII_BRIDGE BIT(6)
+/* RXFCFG bits*/
+#define DP83869_WOL_MAGIC_EN BIT(0)
+#define DP83869_WOL_PATTERN_EN BIT(1)
+#define DP83869_WOL_BCAST_EN BIT(2)
+#define DP83869_WOL_UCAST_EN BIT(4)
+#define DP83869_WOL_SEC_EN BIT(5)
+#define DP83869_WOL_ENH_MAC BIT(7)
+
+/* CFG2 bits */
+#define DP83869_DOWNSHIFT_EN (BIT(8) | BIT(9))
+#define DP83869_DOWNSHIFT_ATTEMPT_MASK (BIT(10) | BIT(11))
+#define DP83869_DOWNSHIFT_1_COUNT_VAL 0
+#define DP83869_DOWNSHIFT_2_COUNT_VAL 1
+#define DP83869_DOWNSHIFT_4_COUNT_VAL 2
+#define DP83869_DOWNSHIFT_8_COUNT_VAL 3
+#define DP83869_DOWNSHIFT_1_COUNT 1
+#define DP83869_DOWNSHIFT_2_COUNT 2
+#define DP83869_DOWNSHIFT_4_COUNT 4
+#define DP83869_DOWNSHIFT_8_COUNT 8
+
enum {
DP83869_PORT_MIRRORING_KEEP,
DP83869_PORT_MIRRORING_EN,
@@ -118,6 +152,28 @@ struct dp83869_private {
int mode;
};
+static int dp83869_read_status(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+ int ret;
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported)) {
+ if (phydev->link) {
+ if (dp83869->mode == DP83869_RGMII_100_BASE)
+ phydev->speed = SPEED_100;
+ } else {
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ }
+ }
+
+ return 0;
+}
+
static int dp83869_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, MII_DP83869_ISR);
@@ -151,6 +207,256 @@ static int dp83869_config_intr(struct phy_device *phydev)
return phy_write(phydev, MII_DP83869_MICR, micr_status);
}
+static int dp83869_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *ndev = phydev->attached_dev;
+ int val_rxcfg, val_micr;
+ u8 *mac;
+ int ret;
+
+ val_rxcfg = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RXFCFG);
+ if (val_rxcfg < 0)
+ return val_rxcfg;
+
+ val_micr = phy_read(phydev, MII_DP83869_MICR);
+ if (val_micr < 0)
+ return val_micr;
+
+ if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_UCAST |
+ WAKE_BCAST)) {
+ val_rxcfg |= DP83869_WOL_ENH_MAC;
+ val_micr |= MII_DP83869_MICR_WOL_INT_EN;
+
+ if (wol->wolopts & WAKE_MAGIC ||
+ wol->wolopts & WAKE_MAGICSECURE) {
+ mac = (u8 *)ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFPMD1,
+ mac[1] << 8 | mac[0]);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFPMD2,
+ mac[3] << 8 | mac[2]);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFPMD3,
+ mac[5] << 8 | mac[4]);
+ if (ret)
+ return ret;
+
+ val_rxcfg |= DP83869_WOL_MAGIC_EN;
+ } else {
+ val_rxcfg &= ~DP83869_WOL_MAGIC_EN;
+ }
+
+ if (wol->wolopts & WAKE_MAGICSECURE) {
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP1,
+ (wol->sopass[1] << 8) | wol->sopass[0]);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP2,
+ (wol->sopass[3] << 8) | wol->sopass[2]);
+ if (ret)
+ return ret;
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP3,
+ (wol->sopass[5] << 8) | wol->sopass[4]);
+ if (ret)
+ return ret;
+
+ val_rxcfg |= DP83869_WOL_SEC_EN;
+ } else {
+ val_rxcfg &= ~DP83869_WOL_SEC_EN;
+ }
+
+ if (wol->wolopts & WAKE_UCAST)
+ val_rxcfg |= DP83869_WOL_UCAST_EN;
+ else
+ val_rxcfg &= ~DP83869_WOL_UCAST_EN;
+
+ if (wol->wolopts & WAKE_BCAST)
+ val_rxcfg |= DP83869_WOL_BCAST_EN;
+ else
+ val_rxcfg &= ~DP83869_WOL_BCAST_EN;
+ } else {
+ val_rxcfg &= ~DP83869_WOL_ENH_MAC;
+ val_micr &= ~MII_DP83869_MICR_WOL_INT_EN;
+ }
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RXFCFG, val_rxcfg);
+ if (ret)
+ return ret;
+
+ return phy_write(phydev, MII_DP83869_MICR, val_micr);
+}
+
+static void dp83869_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int value, sopass_val;
+
+ wol->supported = (WAKE_UCAST | WAKE_BCAST | WAKE_MAGIC |
+ WAKE_MAGICSECURE);
+ wol->wolopts = 0;
+
+ value = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RXFCFG);
+ if (value < 0) {
+ phydev_err(phydev, "Failed to read RX CFG\n");
+ return;
+ }
+
+ if (value & DP83869_WOL_UCAST_EN)
+ wol->wolopts |= WAKE_UCAST;
+
+ if (value & DP83869_WOL_BCAST_EN)
+ wol->wolopts |= WAKE_BCAST;
+
+ if (value & DP83869_WOL_MAGIC_EN)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (value & DP83869_WOL_SEC_EN) {
+ sopass_val = phy_read_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP1);
+ if (sopass_val < 0) {
+ phydev_err(phydev, "Failed to read RX SOP 1\n");
+ return;
+ }
+
+ wol->sopass[0] = (sopass_val & 0xff);
+ wol->sopass[1] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP2);
+ if (sopass_val < 0) {
+ phydev_err(phydev, "Failed to read RX SOP 2\n");
+ return;
+ }
+
+ wol->sopass[2] = (sopass_val & 0xff);
+ wol->sopass[3] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP3);
+ if (sopass_val < 0) {
+ phydev_err(phydev, "Failed to read RX SOP 3\n");
+ return;
+ }
+
+ wol->sopass[4] = (sopass_val & 0xff);
+ wol->sopass[5] = (sopass_val >> 8);
+
+ wol->wolopts |= WAKE_MAGICSECURE;
+ }
+
+ if (!(value & DP83869_WOL_ENH_MAC))
+ wol->wolopts = 0;
+}
+
+static int dp83869_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable, count;
+
+ val = phy_read(phydev, DP83869_CFG2);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(DP83869_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(DP83869_DOWNSHIFT_ATTEMPT_MASK, val);
+
+ switch (cnt) {
+ case DP83869_DOWNSHIFT_1_COUNT_VAL:
+ count = DP83869_DOWNSHIFT_1_COUNT;
+ break;
+ case DP83869_DOWNSHIFT_2_COUNT_VAL:
+ count = DP83869_DOWNSHIFT_2_COUNT;
+ break;
+ case DP83869_DOWNSHIFT_4_COUNT_VAL:
+ count = DP83869_DOWNSHIFT_4_COUNT;
+ break;
+ case DP83869_DOWNSHIFT_8_COUNT_VAL:
+ count = DP83869_DOWNSHIFT_8_COUNT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *data = enable ? count : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int dp83869_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val, count;
+
+ if (cnt > DP83869_DOWNSHIFT_8_COUNT)
+ return -EINVAL;
+
+ if (!cnt)
+ return phy_clear_bits(phydev, DP83869_CFG2,
+ DP83869_DOWNSHIFT_EN);
+
+ switch (cnt) {
+ case DP83869_DOWNSHIFT_1_COUNT:
+ count = DP83869_DOWNSHIFT_1_COUNT_VAL;
+ break;
+ case DP83869_DOWNSHIFT_2_COUNT:
+ count = DP83869_DOWNSHIFT_2_COUNT_VAL;
+ break;
+ case DP83869_DOWNSHIFT_4_COUNT:
+ count = DP83869_DOWNSHIFT_4_COUNT_VAL;
+ break;
+ case DP83869_DOWNSHIFT_8_COUNT:
+ count = DP83869_DOWNSHIFT_8_COUNT_VAL;
+ break;
+ default:
+ phydev_err(phydev,
+ "Downshift count must be 1, 2, 4 or 8\n");
+ return -EINVAL;
+ }
+
+ val = DP83869_DOWNSHIFT_EN;
+ val |= FIELD_PREP(DP83869_DOWNSHIFT_ATTEMPT_MASK, count);
+
+ return phy_modify(phydev, DP83869_CFG2,
+ DP83869_DOWNSHIFT_EN | DP83869_DOWNSHIFT_ATTEMPT_MASK,
+ val);
+}
+
+static int dp83869_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return dp83869_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dp83869_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return dp83869_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int dp83869_config_port_mirroring(struct phy_device *phydev)
{
struct dp83869_private *dp83869 = phydev->priv;
@@ -295,6 +601,51 @@ static int dp83869_configure_rgmii(struct phy_device *phydev,
return ret;
}
+static int dp83869_configure_fiber(struct phy_device *phydev,
+ struct dp83869_private *dp83869)
+{
+ int bmcr;
+ int ret;
+
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+ linkmode_set_bit(ADVERTISED_FIBRE, phydev->advertising);
+
+ if (dp83869->mode == DP83869_RGMII_1000_BASE) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported);
+ } else {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT,
+ phydev->supported);
+
+ /* Auto neg is not supported in 100base FX mode */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ phydev->autoneg = AUTONEG_DISABLE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->advertising);
+
+ if (bmcr & BMCR_ANENABLE) {
+ ret = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Update advertising from supported */
+ linkmode_or(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ return 0;
+}
+
static int dp83869_configure_mode(struct phy_device *phydev,
struct dp83869_private *dp83869)
{
@@ -384,6 +735,7 @@ static int dp83869_configure_mode(struct phy_device *phydev,
break;
case DP83869_RGMII_1000_BASE:
case DP83869_RGMII_100_BASE:
+ ret = dp83869_configure_fiber(phydev, dp83869);
break;
default:
return -EINVAL;
@@ -397,6 +749,12 @@ static int dp83869_config_init(struct phy_device *phydev)
struct dp83869_private *dp83869 = phydev->priv;
int ret, val;
+ /* Force speed optimization for the PHY even if it strapped */
+ ret = phy_modify(phydev, DP83869_CFG2, DP83869_DOWNSHIFT_EN,
+ DP83869_DOWNSHIFT_EN);
+ if (ret)
+ return ret;
+
ret = dp83869_configure_mode(phydev, dp83869);
if (ret)
return ret;
@@ -494,6 +852,13 @@ static struct phy_driver dp83869_driver[] = {
/* IRQ related */
.ack_interrupt = dp83869_ack_interrupt,
.config_intr = dp83869_config_intr,
+ .read_status = dp83869_read_status,
+
+ .get_tunable = dp83869_get_tunable,
+ .set_tunable = dp83869_set_tunable,
+
+ .get_wol = dp83869_get_wol,
+ .set_wol = dp83869_set_wol,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index bb86ac0bd092..5aec673a0120 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1598,21 +1598,15 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
static void m88e1318_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- int oldpage, ret = 0;
+ int ret;
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
- oldpage = phy_select_page(phydev, MII_MARVELL_WOL_PAGE);
- if (oldpage < 0)
- goto error;
-
- ret = __phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
- if (ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
+ ret = phy_read_paged(phydev, MII_MARVELL_WOL_PAGE,
+ MII_88E1318S_PHY_WOL_CTRL);
+ if (ret >= 0 && ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
wol->wolopts |= WAKE_MAGIC;
-
-error:
- phy_restore_page(phydev, oldpage, ret);
}
static int m88e1318_set_wol(struct phy_device *phydev,
diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h
deleted file mode 100644
index b1d27f7cd23f..000000000000
--- a/drivers/net/phy/mdio-i2c.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * MDIO I2C bridge
- *
- * Copyright (C) 2015 Russell King
- */
-#ifndef MDIO_I2C_H
-#define MDIO_I2C_H
-
-struct device;
-struct i2c_adapter;
-struct mii_bus;
-
-struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c);
-
-#endif
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
deleted file mode 100644
index 8af93ada8b64..000000000000
--- a/drivers/net/phy/mdio-xgene.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/* Applied Micro X-Gene SoC MDIO Driver
- *
- * Copyright (c) 2016, Applied Micro Circuits Corporation
- * Author: Iyappan Subramanian <isubramanian@apm.com>
- */
-
-#ifndef __MDIO_XGENE_H__
-#define __MDIO_XGENE_H__
-
-#define BLOCK_XG_MDIO_CSR_OFFSET 0x5000
-#define BLOCK_DIAG_CSR_OFFSET 0xd000
-#define XGENET_CONFIG_REG_ADDR 0x20
-
-#define MAC_ADDR_REG_OFFSET 0x00
-#define MAC_COMMAND_REG_OFFSET 0x04
-#define MAC_WRITE_REG_OFFSET 0x08
-#define MAC_READ_REG_OFFSET 0x0c
-#define MAC_COMMAND_DONE_REG_OFFSET 0x10
-
-#define CLKEN_OFFSET 0x08
-#define SRST_OFFSET 0x00
-
-#define MENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70
-#define MENET_BLOCK_MEM_RDY_ADDR 0x74
-
-#define MAC_CONFIG_1_ADDR 0x00
-#define MII_MGMT_COMMAND_ADDR 0x24
-#define MII_MGMT_ADDRESS_ADDR 0x28
-#define MII_MGMT_CONTROL_ADDR 0x2c
-#define MII_MGMT_STATUS_ADDR 0x30
-#define MII_MGMT_INDICATORS_ADDR 0x34
-#define SOFT_RESET BIT(31)
-
-#define MII_MGMT_CONFIG_ADDR 0x20
-#define MII_MGMT_COMMAND_ADDR 0x24
-#define MII_MGMT_ADDRESS_ADDR 0x28
-#define MII_MGMT_CONTROL_ADDR 0x2c
-#define MII_MGMT_STATUS_ADDR 0x30
-#define MII_MGMT_INDICATORS_ADDR 0x34
-
-#define MIIM_COMMAND_ADDR 0x20
-#define MIIM_FIELD_ADDR 0x24
-#define MIIM_CONFIGURATION_ADDR 0x28
-#define MIIM_LINKFAILVECTOR_ADDR 0x2c
-#define MIIM_INDICATOR_ADDR 0x30
-#define MIIMRD_FIELD_ADDR 0x34
-
-#define MDIO_CSR_OFFSET 0x5000
-
-#define REG_ADDR_POS 0
-#define REG_ADDR_LEN 5
-#define PHY_ADDR_POS 8
-#define PHY_ADDR_LEN 5
-
-#define HSTMIIMWRDAT_POS 0
-#define HSTMIIMWRDAT_LEN 16
-#define HSTPHYADX_POS 23
-#define HSTPHYADX_LEN 5
-#define HSTREGADX_POS 18
-#define HSTREGADX_LEN 5
-#define HSTLDCMD BIT(3)
-#define HSTMIIMCMD_POS 0
-#define HSTMIIMCMD_LEN 3
-
-#define BUSY_MASK BIT(0)
-#define READ_CYCLE_MASK BIT(0)
-
-enum xgene_enet_cmd {
- XGENE_ENET_WR_CMD = BIT(31),
- XGENE_ENET_RD_CMD = BIT(30)
-};
-
-enum {
- MIIM_CMD_IDLE,
- MIIM_CMD_LEGACY_WRITE,
- MIIM_CMD_LEGACY_READ,
-};
-
-enum xgene_mdio_id {
- XGENE_MDIO_RGMII = 1,
- XGENE_MDIO_XFI
-};
-
-struct xgene_mdio_pdata {
- struct clk *clk;
- struct device *dev;
- void __iomem *mac_csr_addr;
- void __iomem *diag_csr_addr;
- void __iomem *mdio_csr_addr;
- struct mii_bus *mdio_bus;
- int mdio_id;
- spinlock_t mac_lock; /* mac lock */
-};
-
-/* Set the specified value into a bit-field defined by its starting position
- * and length within a single u64.
- */
-static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val)
-{
- return (val & ((1ULL << len) - 1)) << pos;
-}
-
-#define SET_VAL(field, val) \
- xgene_enet_set_field_value(field ## _POS, field ## _LEN, val)
-
-#define SET_BIT(field) \
- xgene_enet_set_field_value(field ## _POS, 1, 1)
-
-/* Get the value from a bit-field defined by its starting position
- * and length within the specified u64.
- */
-static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
-{
- return (src >> pos) & ((1ULL << len) - 1);
-}
-
-#define GET_VAL(field, src) \
- xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
-
-#define GET_BIT(field, src) \
- xgene_enet_get_field_value(field ## _POS, 1, src)
-
-u32 xgene_mdio_rd_mac(struct xgene_mdio_pdata *pdata, u32 rd_addr);
-void xgene_mdio_wr_mac(struct xgene_mdio_pdata *pdata, u32 wr_addr, u32 data);
-int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
-int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
-struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
-
-#endif /* __MDIO_XGENE_H__ */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 0af20faad69d..757e950fb745 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -825,9 +825,6 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
- if (WARN_ON_ONCE(in_interrupt()))
- return -EINVAL;
-
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
retval = __mdiobus_read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
@@ -850,9 +847,6 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
- if (WARN_ON_ONCE(in_interrupt()))
- return -EINVAL;
-
mutex_lock(&bus->mdio_lock);
retval = __mdiobus_read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
@@ -879,9 +873,6 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
- if (WARN_ON_ONCE(in_interrupt()))
- return -EINVAL;
-
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
err = __mdiobus_write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
@@ -905,9 +896,6 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
- if (WARN_ON_ONCE(in_interrupt()))
- return -EINVAL;
-
mutex_lock(&bus->mdio_lock);
err = __mdiobus_write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
@@ -929,9 +917,6 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
{
int err;
- if (WARN_ON_ONCE(in_interrupt()))
- return -EINVAL;
-
mutex_lock(&bus->mdio_lock);
err = __mdiobus_modify_changed(bus, addr, regnum, mask, set);
mutex_unlock(&bus->mdio_lock);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3fe552675dd2..a7f74b3b97af 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1315,6 +1315,19 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = kszphy_resume,
}, {
+ .phy_id = PHY_ID_LAN8814,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip INDY Gigabit Quad PHY",
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
+ .soft_reset = genphy_soft_reset,
+ .read_status = ksz9031_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = kszphy_resume,
+}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
@@ -1387,6 +1400,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index 1d4c012194e9..6cf9b798b710 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -958,7 +958,7 @@ static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
return 0;
}
-static struct macsec_ops vsc8584_macsec_ops = {
+static const struct macsec_ops vsc8584_macsec_ops = {
.mdo_dev_open = vsc8584_macsec_dev_open,
.mdo_dev_stop = vsc8584_macsec_dev_stop,
.mdo_add_secy = vsc8584_macsec_add_secy,
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index ff8e14b01eeb..8d333d3084ed 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -6,9 +6,14 @@
#include <linux/phy.h>
#include <linux/of.h>
+/**
+ * phy_speed_to_str - Return a string representing the PHY link speed
+ *
+ * @speed: Speed of the link
+ */
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 90,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 92,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -52,6 +57,11 @@ const char *phy_speed_to_str(int speed)
}
EXPORT_SYMBOL_GPL(phy_speed_to_str);
+/**
+ * phy_duplex_to_str - Return string describing the duplex
+ *
+ * @duplex: Duplex setting to describe
+ */
const char *phy_duplex_to_str(unsigned int duplex)
{
if (duplex == DUPLEX_HALF)
@@ -160,6 +170,8 @@ static const struct phy_setting settings[] = {
PHY_SETTING( 100, FULL, 100baseT_Full ),
PHY_SETTING( 100, FULL, 100baseT1_Full ),
PHY_SETTING( 100, HALF, 100baseT_Half ),
+ PHY_SETTING( 100, HALF, 100baseFX_Half ),
+ PHY_SETTING( 100, FULL, 100baseFX_Full ),
/* 10M */
PHY_SETTING( 10, FULL, 10baseT_Full ),
PHY_SETTING( 10, HALF, 10baseT_Half ),
@@ -250,6 +262,16 @@ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
return __set_linkmode_max_speed(max_speed, phydev->supported);
}
+/**
+ * phy_set_max_speed - Set the maximum speed the PHY should support
+ *
+ * @phydev: The phy_device struct
+ * @max_speed: Maximum speed
+ *
+ * The PHY might be more capable than the MAC. For example a Fast Ethernet
+ * is connected to a 1G PHY. This function allows the MAC to indicate its
+ * maximum speed, and so limit what the PHY will advertise.
+ */
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
{
int err;
@@ -306,6 +328,16 @@ void of_set_phy_eee_broken(struct phy_device *phydev)
phydev->eee_broken_modes = broken;
}
+/**
+ * phy_resolve_aneg_pause - Determine pause autoneg results
+ *
+ * @phydev: The phy_device struct
+ *
+ * Once autoneg has completed the local pause settings can be
+ * resolved. Determine if pause and asymmetric pause should be used
+ * by the MAC.
+ */
+
void phy_resolve_aneg_pause(struct phy_device *phydev)
{
if (phydev->duplex == DUPLEX_FULL) {
@@ -319,7 +351,7 @@ void phy_resolve_aneg_pause(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(phy_resolve_aneg_pause);
/**
- * phy_resolve_aneg_linkmode - resolve the advertisements into phy settings
+ * phy_resolve_aneg_linkmode - resolve the advertisements into PHY settings
* @phydev: The phy_device struct
*
* Resolve our and the link partner advertisements into their corresponding
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 8947d58f2a25..35525a671400 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -456,7 +456,16 @@ int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_do_ioctl);
-/* same as phy_do_ioctl, but ensures that net_device is running */
+/**
+ * phy_do_ioctl_running - generic ndo_do_ioctl implementation but test first
+ *
+ * @dev: the net_device struct
+ * @ifr: &struct ifreq for socket ioctl's
+ * @cmd: ioctl cmd to execute
+ *
+ * Same as phy_do_ioctl, but ensures that net_device is running before
+ * handling the ioctl.
+ */
int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd)
{
if (!netif_running(dev))
@@ -466,6 +475,12 @@ int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_do_ioctl_running);
+/**
+ * phy_queue_state_machine - Trigger the state machine to run soon
+ *
+ * @phydev: the phy_device struct
+ * @jiffies: Run the state machine after these jiffies
+ */
void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
{
mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
@@ -473,6 +488,11 @@ void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
}
EXPORT_SYMBOL(phy_queue_state_machine);
+/**
+ * phy_queue_state_machine - Trigger the state machine to run now
+ *
+ * @phydev: the phy_device struct
+ */
static void phy_trigger_machine(struct phy_device *phydev)
{
phy_queue_state_machine(phydev, 0);
@@ -489,6 +509,12 @@ static void phy_abort_cable_test(struct phy_device *phydev)
phydev_err(phydev, "Error while aborting cable test");
}
+/**
+ * phy_ethtool_get_strings - Get the statistic counter names
+ *
+ * @phydev: the phy_device struct
+ * @data: Where to put the strings
+ */
int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
{
if (!phydev->drv)
@@ -502,6 +528,11 @@ int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
}
EXPORT_SYMBOL(phy_ethtool_get_strings);
+/**
+ * phy_ethtool_get_sset_count - Get the number of statistic counters
+ *
+ * @phydev: the phy_device struct
+ */
int phy_ethtool_get_sset_count(struct phy_device *phydev)
{
int ret;
@@ -523,6 +554,13 @@ int phy_ethtool_get_sset_count(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_ethtool_get_sset_count);
+/**
+ * phy_ethtool_get_stats - Get the statistic counters
+ *
+ * @phydev: the phy_device struct
+ * @stats: What counters to get
+ * @data: Where to store the counters
+ */
int phy_ethtool_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
@@ -537,6 +575,12 @@ int phy_ethtool_get_stats(struct phy_device *phydev,
}
EXPORT_SYMBOL(phy_ethtool_get_stats);
+/**
+ * phy_start_cable_test - Start a cable test
+ *
+ * @phydev: the phy_device struct
+ * @extack: extack for reporting useful error messages
+ */
int phy_start_cable_test(struct phy_device *phydev,
struct netlink_ext_ack *extack)
{
@@ -600,6 +644,13 @@ out:
}
EXPORT_SYMBOL(phy_start_cable_test);
+/**
+ * phy_start_cable_test_tdr - Start a raw TDR cable test
+ *
+ * @phydev: the phy_device struct
+ * @extack: extack for reporting useful error messages
+ * @config: Configuration of the test to run
+ */
int phy_start_cable_test_tdr(struct phy_device *phydev,
struct netlink_ext_ack *extack,
const struct phy_tdr_config *config)
@@ -1363,6 +1414,12 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
}
EXPORT_SYMBOL(phy_ethtool_set_eee);
+/**
+ * phy_ethtool_set_wol - Configure Wake On LAN
+ *
+ * @phydev: target phy_device struct
+ * @wol: Configuration requested
+ */
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
if (phydev->drv && phydev->drv->set_wol)
@@ -1372,6 +1429,12 @@ int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
}
EXPORT_SYMBOL(phy_ethtool_set_wol);
+/**
+ * phy_ethtool_get_wol - Get the current Wake On LAN configuration
+ *
+ * @phydev: target phy_device struct
+ * @wol: Store the current configuration here
+ */
void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
if (phydev->drv && phydev->drv->get_wol)
@@ -1405,6 +1468,10 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev,
}
EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
+/**
+ * phy_ethtool_nway_reset - Restart auto negotiation
+ * @ndev: Network device to restart autoneg for
+ */
int phy_ethtool_nway_reset(struct net_device *ndev)
{
struct phy_device *phydev = ndev->phydev;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 32f4e8ec96cf..fe2296fdda19 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -535,8 +535,10 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
if (pl->pcs_ops)
pl->pcs_ops->pcs_get_state(pl->pcs, state);
- else
+ else if (pl->mac_ops->mac_pcs_get_state)
pl->mac_ops->mac_pcs_get_state(pl->config, state);
+ else
+ state->link = 0;
}
/* The fixed state is... fixed except for the link state,
@@ -2319,6 +2321,49 @@ static void phylink_decode_sgmii_word(struct phylink_link_state *state,
}
/**
+ * phylink_decode_usxgmii_word() - decode the USXGMII word from a MAC PCS
+ * @state: a pointer to a struct phylink_link_state.
+ * @lpa: a 16 bit value which stores the USXGMII auto-negotiation word
+ *
+ * Helper for MAC PCS supporting the USXGMII protocol and the auto-negotiation
+ * code word. Decode the USXGMII code word and populate the corresponding fields
+ * (speed, duplex) into the phylink_link_state structure.
+ */
+void phylink_decode_usxgmii_word(struct phylink_link_state *state,
+ uint16_t lpa)
+{
+ switch (lpa & MDIO_USXGMII_SPD_MASK) {
+ case MDIO_USXGMII_10:
+ state->speed = SPEED_10;
+ break;
+ case MDIO_USXGMII_100:
+ state->speed = SPEED_100;
+ break;
+ case MDIO_USXGMII_1000:
+ state->speed = SPEED_1000;
+ break;
+ case MDIO_USXGMII_2500:
+ state->speed = SPEED_2500;
+ break;
+ case MDIO_USXGMII_5000:
+ state->speed = SPEED_5000;
+ break;
+ case MDIO_USXGMII_10G:
+ state->speed = SPEED_10000;
+ break;
+ default:
+ state->link = false;
+ return;
+ }
+
+ if (lpa & MDIO_USXGMII_FULL_DUPLEX)
+ state->duplex = DUPLEX_FULL;
+ else
+ state->duplex = DUPLEX_HALF;
+}
+EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word);
+
+/**
* phylink_mii_c22_pcs_get_state() - read the MAC PCS state
* @pcs: a pointer to a &struct mdio_device.
* @state: a pointer to a &struct phylink_link_state.
@@ -2361,6 +2406,7 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
break;
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
phylink_decode_sgmii_word(state, lpa);
break;
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 0f0960971800..575580d3ffe0 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -26,11 +26,16 @@
#define RTL821x_EXT_PAGE_SELECT 0x1e
#define RTL821x_PAGE_SELECT 0x1f
+#define RTL8211F_PHYCR1 0x18
#define RTL8211F_INSR 0x1d
#define RTL8211F_TX_DELAY BIT(8)
#define RTL8211F_RX_DELAY BIT(3)
+#define RTL8211F_ALDPS_PLL_OFF BIT(1)
+#define RTL8211F_ALDPS_ENABLE BIT(2)
+#define RTL8211F_ALDPS_XTAL_OFF BIT(12)
+
#define RTL8211E_CTRL_DELAY BIT(13)
#define RTL8211E_TX_DELAY BIT(12)
#define RTL8211E_RX_DELAY BIT(11)
@@ -177,8 +182,12 @@ static int rtl8211f_config_init(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
u16 val_txdly, val_rxdly;
+ u16 val;
int ret;
+ val = RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_XTAL_OFF;
+ phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1, val, val);
+
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
val_txdly = 0;
@@ -401,7 +410,7 @@ static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
return ret;
}
-static int rtl8125_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
+static int rtl822x_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
{
int ret = rtlgen_read_mmd(phydev, devnum, regnum);
@@ -425,7 +434,7 @@ static int rtl8125_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
return ret;
}
-static int rtl8125_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
+static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
u16 val)
{
int ret = rtlgen_write_mmd(phydev, devnum, regnum, val);
@@ -442,7 +451,7 @@ static int rtl8125_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
return ret;
}
-static int rtl8125_get_features(struct phy_device *phydev)
+static int rtl822x_get_features(struct phy_device *phydev)
{
int val;
@@ -460,7 +469,7 @@ static int rtl8125_get_features(struct phy_device *phydev)
return genphy_read_abilities(phydev);
}
-static int rtl8125_config_aneg(struct phy_device *phydev)
+static int rtl822x_config_aneg(struct phy_device *phydev)
{
int ret = 0;
@@ -480,7 +489,7 @@ static int rtl8125_config_aneg(struct phy_device *phydev)
return __genphy_config_aneg(phydev, ret);
}
-static int rtl8125_read_status(struct phy_device *phydev)
+static int rtl822x_read_status(struct phy_device *phydev)
{
int ret;
@@ -522,7 +531,7 @@ static int rtlgen_match_phy_device(struct phy_device *phydev)
!rtlgen_supports_2_5gbps(phydev);
}
-static int rtl8125_match_phy_device(struct phy_device *phydev)
+static int rtl8226_match_phy_device(struct phy_device *phydev)
{
return phydev->phy_id == RTL_GENERIC_PHYID &&
rtlgen_supports_2_5gbps(phydev);
@@ -542,6 +551,8 @@ static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
.name = "RTL8201CP Ethernet",
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
@@ -627,29 +638,29 @@ static struct phy_driver realtek_drvs[] = {
.read_mmd = rtlgen_read_mmd,
.write_mmd = rtlgen_write_mmd,
}, {
- .name = "RTL8125 2.5Gbps internal",
- .match_phy_device = rtl8125_match_phy_device,
- .get_features = rtl8125_get_features,
- .config_aneg = rtl8125_config_aneg,
- .read_status = rtl8125_read_status,
+ .name = "RTL8226 2.5Gbps PHY",
+ .match_phy_device = rtl8226_match_phy_device,
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
- .read_mmd = rtl8125_read_mmd,
- .write_mmd = rtl8125_write_mmd,
+ .read_mmd = rtl822x_read_mmd,
+ .write_mmd = rtl822x_write_mmd,
}, {
PHY_ID_MATCH_EXACT(0x001cc840),
- .name = "RTL8125B 2.5Gbps internal",
- .get_features = rtl8125_get_features,
- .config_aneg = rtl8125_config_aneg,
- .read_status = rtl8125_read_status,
+ .name = "RTL8226B_RTL8221B 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
- .read_mmd = rtl8125_read_mmd,
- .write_mmd = rtl8125_write_mmd,
+ .read_mmd = rtl822x_read_mmd,
+ .write_mmd = rtl822x_write_mmd,
}, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index cf83314c8591..34aa196b7465 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -7,6 +7,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
+#include <linux/mdio/mdio-i2c.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -16,7 +17,6 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include "mdio-i2c.h"
#include "sfp.h"
#include "swphy.h"
@@ -2389,7 +2389,8 @@ static int sfp_probe(struct platform_device *pdev)
continue;
sfp->gpio_irq[i] = gpiod_to_irq(sfp->gpio[i]);
- if (!sfp->gpio_irq[i]) {
+ if (sfp->gpio_irq[i] < 0) {
+ sfp->gpio_irq[i] = 0;
sfp->need_poll = true;
continue;
}
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 74568ae16125..ec97669be5c2 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -12,6 +12,7 @@
*
*/
+#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mii.h>
@@ -21,6 +22,17 @@
#include <linux/netdevice.h>
#include <linux/smscphy.h>
+/* Vendor-specific PHY Definitions */
+/* EDPD NLP / crossover time configuration */
+#define PHY_EDPD_CONFIG 16
+#define PHY_EDPD_CONFIG_EXT_CROSSOVER_ 0x0001
+
+/* Control/Status Indication Register */
+#define SPECIAL_CTRL_STS 27
+#define SPECIAL_CTRL_STS_OVRRD_AMDIX_ 0x8000
+#define SPECIAL_CTRL_STS_AMDIX_ENABLE_ 0x4000
+#define SPECIAL_CTRL_STS_AMDIX_STATE_ 0x2000
+
struct smsc_hw_stat {
const char *string;
u8 reg;
@@ -33,14 +45,22 @@ static struct smsc_hw_stat smsc_hw_stats[] = {
struct smsc_phy_priv {
bool energy_enable;
+ struct clk *refclk;
};
static int smsc_phy_config_intr(struct phy_device *phydev)
{
- int rc = phy_write (phydev, MII_LAN83C185_IM,
- ((PHY_INTERRUPT_ENABLED == phydev->interrupts)
- ? MII_LAN83C185_ISF_INT_PHYLIB_EVENTS
- : 0));
+ struct smsc_phy_priv *priv = phydev->priv;
+ u16 intmask = 0;
+ int rc;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
+ if (priv->energy_enable)
+ intmask |= MII_LAN83C185_ISF_INT7;
+ }
+
+ rc = phy_write(phydev, MII_LAN83C185_IM, intmask);
return rc < 0 ? rc : 0;
}
@@ -55,19 +75,21 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_init(struct phy_device *phydev)
{
struct smsc_phy_priv *priv = phydev->priv;
+ int rc;
- int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (!priv->energy_enable)
+ return 0;
+
+ rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
return rc;
- if (priv->energy_enable) {
- /* Enable energy detect mode for this SMSC Transceivers */
- rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
- rc | MII_LAN83C185_EDPWRDOWN);
- if (rc < 0)
- return rc;
- }
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
return smsc_phy_ack_interrupt(phydev);
}
@@ -96,6 +118,54 @@ static int lan911x_config_init(struct phy_device *phydev)
return smsc_phy_ack_interrupt(phydev);
}
+static int lan87xx_config_aneg(struct phy_device *phydev)
+{
+ int rc;
+ int val;
+
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI:
+ val = SPECIAL_CTRL_STS_OVRRD_AMDIX_;
+ break;
+ case ETH_TP_MDI_X:
+ val = SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
+ SPECIAL_CTRL_STS_AMDIX_STATE_;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = SPECIAL_CTRL_STS_AMDIX_ENABLE_;
+ break;
+ default:
+ return genphy_config_aneg(phydev);
+ }
+
+ rc = phy_read(phydev, SPECIAL_CTRL_STS);
+ if (rc < 0)
+ return rc;
+
+ rc &= ~(SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
+ SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
+ SPECIAL_CTRL_STS_AMDIX_STATE_);
+ rc |= val;
+ phy_write(phydev, SPECIAL_CTRL_STS, rc);
+
+ phydev->mdix = phydev->mdix_ctrl;
+ return genphy_config_aneg(phydev);
+}
+
+static int lan87xx_config_aneg_ext(struct phy_device *phydev)
+{
+ int rc;
+
+ /* Extend Manual AutoMDIX timer */
+ rc = phy_read(phydev, PHY_EDPD_CONFIG);
+ if (rc < 0)
+ return rc;
+
+ rc |= PHY_EDPD_CONFIG_EXT_CROSSOVER_;
+ phy_write(phydev, PHY_EDPD_CONFIG, rc);
+ return lan87xx_config_aneg(phydev);
+}
+
/*
* The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
* plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
@@ -185,11 +255,20 @@ static void smsc_get_stats(struct phy_device *phydev,
data[i] = smsc_get_stat(phydev, i);
}
+static void smsc_phy_remove(struct phy_device *phydev)
+{
+ struct smsc_phy_priv *priv = phydev->priv;
+
+ clk_disable_unprepare(priv->refclk);
+ clk_put(priv->refclk);
+}
+
static int smsc_phy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct device_node *of_node = dev->of_node;
struct smsc_phy_priv *priv;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -202,6 +281,19 @@ static int smsc_phy_probe(struct phy_device *phydev)
phydev->priv = priv;
+ /* Make clk optional to keep DTB backward compatibility. */
+ priv->refclk = clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->refclk))
+ dev_err_probe(dev, PTR_ERR(priv->refclk), "Failed to request clock\n");
+
+ ret = clk_prepare_enable(priv->refclk);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(priv->refclk, 50 * 1000 * 1000);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -250,6 +342,9 @@ static struct phy_driver smsc_phy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
+ /* This covers internal PHY (phy_id: 0x0007C0C3) for
+ * LAN9500 (PID: 0x9500), LAN9514 (PID: 0xec00), LAN9505 (PID: 0x9505)
+ */
.phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8700",
@@ -262,6 +357,7 @@ static struct phy_driver smsc_phy_driver[] = {
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
+ .config_aneg = lan87xx_config_aneg,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
@@ -293,19 +389,23 @@ static struct phy_driver smsc_phy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
+ /* This covers internal PHY (phy_id: 0x0007C0F0) for
+ * LAN9500A (PID: 0x9E00), LAN9505A (PID: 0x9E01)
+ */
.phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8710/LAN8720",
/* PHY_BASIC_FEATURES */
- .flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
+ .remove = smsc_phy_remove,
/* basic functions */
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
+ .config_aneg = lan87xx_config_aneg_ext,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 7475cef17cf7..ca49c1ad3efc 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -300,7 +300,7 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
struct device *dev;
struct ks8995_switch *ks8995;
- dev = container_of(kobj, struct device, kobj);
+ dev = kobj_to_dev(kobj);
ks8995 = dev_get_drvdata(dev);
return ks8995_read(ks8995, buf, off, count);
@@ -312,7 +312,7 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
struct device *dev;
struct ks8995_switch *ks8995;
- dev = container_of(kobj, struct device, kobj);
+ dev = kobj_to_dev(kobj);
ks8995 = dev_get_drvdata(dev);
return ks8995_write(ks8995, buf, off, count);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bcc4a4c011f1..07f1f3933927 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2796,7 +2796,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
return err;
}
-static const struct genl_ops team_nl_ops[] = {
+static const struct genl_small_ops team_nl_ops[] = {
{
.cmd = TEAM_CMD_NOOP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -2833,8 +2833,8 @@ static struct genl_family team_nl_family __ro_after_init = {
.policy = team_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = team_nl_ops,
- .n_ops = ARRAY_SIZE(team_nl_ops),
+ .small_ops = team_nl_ops,
+ .n_small_ops = ARRAY_SIZE(team_nl_ops),
.mcgrps = team_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(team_nl_mcgrps),
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7959b5c2d11f..be69d272052f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -219,24 +219,6 @@ struct veth {
__be16 h_vlan_TCI;
};
-bool tun_is_xdp_frame(void *ptr)
-{
- return (unsigned long)ptr & TUN_XDP_FLAG;
-}
-EXPORT_SYMBOL(tun_is_xdp_frame);
-
-void *tun_xdp_to_ptr(void *ptr)
-{
- return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
-}
-EXPORT_SYMBOL(tun_xdp_to_ptr);
-
-void *tun_ptr_to_xdp(void *ptr)
-{
- return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
-}
-EXPORT_SYMBOL(tun_ptr_to_xdp);
-
static int tun_napi_receive(struct napi_struct *napi, int budget)
{
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index c7bcfca7d70b..b46993d5f997 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -346,6 +346,8 @@ config USB_NET_SMSC75XX
config USB_NET_SMSC95XX
tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices"
depends on USB_USBNET
+ select PHYLIB
+ select SMSC_PHY
select BITREVERSE
select CRC16
select CRC32
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 32b08b18e120..ca89d8258dd3 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -40,6 +40,11 @@ enum cx82310_status {
#define CX82310_MTU 1514
#define CMD_EP 0x01
+struct cx82310_priv {
+ struct work_struct reenable_work;
+ struct usbnet *dev;
+};
+
/*
* execute control command
* - optionally send some data (command parameters)
@@ -66,8 +71,8 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
if (ret < 0) {
if (cmd != CMD_GET_LINK_STATUS)
- dev_err(&dev->udev->dev, "send command %#x: error %d\n",
- cmd, ret);
+ netdev_err(dev->net, "send command %#x: error %d\n",
+ cmd, ret);
goto end;
}
@@ -79,30 +84,27 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
CMD_TIMEOUT);
if (ret < 0) {
if (cmd != CMD_GET_LINK_STATUS)
- dev_err(&dev->udev->dev,
- "reply receive error %d\n",
- ret);
+ netdev_err(dev->net, "reply receive error %d\n",
+ ret);
goto end;
}
if (actual_len > 0)
break;
}
if (actual_len == 0) {
- dev_err(&dev->udev->dev, "no reply to command %#x\n",
- cmd);
+ netdev_err(dev->net, "no reply to command %#x\n", cmd);
ret = -EIO;
goto end;
}
if (buf[0] != cmd) {
- dev_err(&dev->udev->dev,
- "got reply to command %#x, expected: %#x\n",
- buf[0], cmd);
+ netdev_err(dev->net, "got reply to command %#x, expected: %#x\n",
+ buf[0], cmd);
ret = -EIO;
goto end;
}
if (buf[1] != STATUS_SUCCESS) {
- dev_err(&dev->udev->dev, "command %#x failed: %#x\n",
- cmd, buf[1]);
+ netdev_err(dev->net, "command %#x failed: %#x\n", cmd,
+ buf[1]);
ret = -EIO;
goto end;
}
@@ -115,6 +117,23 @@ end:
return ret;
}
+static int cx82310_enable_ethernet(struct usbnet *dev)
+{
+ int ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
+
+ if (ret)
+ netdev_err(dev->net, "unable to enable ethernet mode: %d\n",
+ ret);
+ return ret;
+}
+
+static void cx82310_reenable_work(struct work_struct *work)
+{
+ struct cx82310_priv *priv = container_of(work, struct cx82310_priv,
+ reenable_work);
+ cx82310_enable_ethernet(priv->dev);
+}
+
#define partial_len data[0] /* length of partial packet data */
#define partial_rem data[1] /* remaining (missing) data length */
#define partial_data data[2] /* partial packet data */
@@ -126,6 +145,7 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
struct usb_device *udev = dev->udev;
u8 link[3];
int timeout = 50;
+ struct cx82310_priv *priv;
/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -152,6 +172,15 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
if (!dev->partial_data)
return -ENOMEM;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_partial;
+ }
+ dev->driver_priv = priv;
+ INIT_WORK(&priv->reenable_work, cx82310_reenable_work);
+ priv->dev = dev;
+
/* wait for firmware to become ready (indicated by the link being up) */
while (--timeout) {
ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
@@ -162,24 +191,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
msleep(500);
}
if (!timeout) {
- dev_err(&udev->dev, "firmware not ready in time\n");
+ netdev_err(dev->net, "firmware not ready in time\n");
ret = -ETIMEDOUT;
goto err;
}
/* enable ethernet mode (?) */
- ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
- if (ret) {
- dev_err(&udev->dev, "unable to enable ethernet mode: %d\n",
- ret);
+ if (cx82310_enable_ethernet(dev))
goto err;
- }
/* get the MAC address */
ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0,
dev->net->dev_addr, ETH_ALEN);
if (ret) {
- dev_err(&udev->dev, "unable to read MAC address: %d\n", ret);
+ netdev_err(dev->net, "unable to read MAC address: %d\n", ret);
goto err;
}
@@ -190,13 +215,19 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
return 0;
err:
+ kfree(dev->driver_priv);
+err_partial:
kfree((void *)dev->partial_data);
return ret;
}
static void cx82310_unbind(struct usbnet *dev, struct usb_interface *intf)
{
+ struct cx82310_priv *priv = dev->driver_priv;
+
kfree((void *)dev->partial_data);
+ cancel_work_sync(&priv->reenable_work);
+ kfree(dev->driver_priv);
}
/*
@@ -211,6 +242,7 @@ static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
int len;
struct sk_buff *skb2;
+ struct cx82310_priv *priv = dev->driver_priv;
/*
* If the last skb ended with an incomplete packet, this skb contains
@@ -245,9 +277,11 @@ static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
break;
}
- if (len > CX82310_MTU) {
- dev_err(&dev->udev->dev, "RX packet too long: %d B\n",
- len);
+ if (len == 0xffff) {
+ netdev_info(dev->net, "router was rebooted, re-enabling ethernet mode");
+ schedule_work(&priv->reenable_work);
+ } else if (len > CX82310_MTU) {
+ netdev_err(dev->net, "RX packet too long: %d B\n", len);
return 0;
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index ed01dc964c99..144c686b4333 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -103,10 +103,6 @@ static int kaweth_probe(
const struct usb_device_id *id /* from id_table */
);
static void kaweth_disconnect(struct usb_interface *intf);
-static int kaweth_internal_control_msg(struct usb_device *usb_dev,
- unsigned int pipe,
- struct usb_ctrlrequest *cmd, void *data,
- int len, int timeout);
static int kaweth_suspend(struct usb_interface *intf, pm_message_t message);
static int kaweth_resume(struct usb_interface *intf);
@@ -236,65 +232,17 @@ struct kaweth_device
};
/****************************************************************
- * kaweth_control
- ****************************************************************/
-static int kaweth_control(struct kaweth_device *kaweth,
- unsigned int pipe,
- __u8 request,
- __u8 requesttype,
- __u16 value,
- __u16 index,
- void *data,
- __u16 size,
- int timeout)
-{
- struct usb_ctrlrequest *dr;
- int retval;
-
- if(in_interrupt()) {
- netdev_dbg(kaweth->net, "in_interrupt()\n");
- return -EBUSY;
- }
-
- dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!dr)
- return -ENOMEM;
-
- dr->bRequestType = requesttype;
- dr->bRequest = request;
- dr->wValue = cpu_to_le16(value);
- dr->wIndex = cpu_to_le16(index);
- dr->wLength = cpu_to_le16(size);
-
- retval = kaweth_internal_control_msg(kaweth->dev,
- pipe,
- dr,
- data,
- size,
- timeout);
-
- kfree(dr);
- return retval;
-}
-
-/****************************************************************
* kaweth_read_configuration
****************************************************************/
static int kaweth_read_configuration(struct kaweth_device *kaweth)
{
- int retval;
-
- retval = kaweth_control(kaweth,
- usb_rcvctrlpipe(kaweth->dev, 0),
+ return usb_control_msg(kaweth->dev, usb_rcvctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_GET_ETHERNET_DESC,
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- (void *)&kaweth->configuration,
+ 0, 0,
+ &kaweth->configuration,
sizeof(kaweth->configuration),
KAWETH_CONTROL_TIMEOUT);
-
- return retval;
}
/****************************************************************
@@ -302,21 +250,14 @@ static int kaweth_read_configuration(struct kaweth_device *kaweth)
****************************************************************/
static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size)
{
- int retval;
-
netdev_dbg(kaweth->net, "Setting URB size to %d\n", (unsigned)urb_size);
- retval = kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
- KAWETH_COMMAND_SET_URB_SIZE,
- USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- urb_size,
- 0,
- (void *)&kaweth->scratch,
- 0,
- KAWETH_CONTROL_TIMEOUT);
-
- return retval;
+ return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
+ KAWETH_COMMAND_SET_URB_SIZE,
+ USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
+ urb_size, 0,
+ &kaweth->scratch, 0,
+ KAWETH_CONTROL_TIMEOUT);
}
/****************************************************************
@@ -324,21 +265,14 @@ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size)
****************************************************************/
static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait)
{
- int retval;
-
netdev_dbg(kaweth->net, "Set SOFS wait to %d\n", (unsigned)sofs_wait);
- retval = kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
- KAWETH_COMMAND_SET_SOFS_WAIT,
- USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- sofs_wait,
- 0,
- (void *)&kaweth->scratch,
- 0,
- KAWETH_CONTROL_TIMEOUT);
-
- return retval;
+ return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
+ KAWETH_COMMAND_SET_SOFS_WAIT,
+ USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
+ sofs_wait, 0,
+ &kaweth->scratch, 0,
+ KAWETH_CONTROL_TIMEOUT);
}
/****************************************************************
@@ -347,22 +281,15 @@ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait)
static int kaweth_set_receive_filter(struct kaweth_device *kaweth,
__u16 receive_filter)
{
- int retval;
-
netdev_dbg(kaweth->net, "Set receive filter to %d\n",
(unsigned)receive_filter);
- retval = kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
- KAWETH_COMMAND_SET_PACKET_FILTER,
- USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- receive_filter,
- 0,
- (void *)&kaweth->scratch,
- 0,
- KAWETH_CONTROL_TIMEOUT);
-
- return retval;
+ return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
+ KAWETH_COMMAND_SET_PACKET_FILTER,
+ USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
+ receive_filter, 0,
+ &kaweth->scratch, 0,
+ KAWETH_CONTROL_TIMEOUT);
}
/****************************************************************
@@ -407,14 +334,11 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
kaweth->firmware_buf, kaweth);
netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len);
- return kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
+ return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_SCAN,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- 0,
- 0,
- (void *)kaweth->firmware_buf,
- data_len,
+ 0, 0,
+ kaweth->firmware_buf, data_len,
KAWETH_CONTROL_TIMEOUT);
}
@@ -433,15 +357,12 @@ static int kaweth_trigger_firmware(struct kaweth_device *kaweth,
kaweth->firmware_buf[6] = 0x00;
kaweth->firmware_buf[7] = 0x00;
- return kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
- KAWETH_COMMAND_SCAN,
- USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- 0,
- 0,
- (void *)kaweth->firmware_buf,
- 8,
- KAWETH_CONTROL_TIMEOUT);
+ return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
+ KAWETH_COMMAND_SCAN,
+ USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
+ 0, 0,
+ (void *)kaweth->firmware_buf, 8,
+ KAWETH_CONTROL_TIMEOUT);
}
/****************************************************************
@@ -564,7 +485,8 @@ static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth,
return result;
}
-static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth);
+static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth,
+ bool may_sleep);
/****************************************************************
* kaweth_usb_receive
@@ -694,7 +616,7 @@ static int kaweth_open(struct net_device *net)
netif_start_queue(net);
- kaweth_async_set_rx_mode(kaweth);
+ kaweth_async_set_rx_mode(kaweth, true);
return 0;
err_out:
@@ -782,7 +704,7 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
spin_lock_irq(&kaweth->device_lock);
- kaweth_async_set_rx_mode(kaweth);
+ kaweth_async_set_rx_mode(kaweth, false);
netif_stop_queue(net);
if (IS_BLOCKED(kaweth->status)) {
goto skip;
@@ -859,36 +781,31 @@ static void kaweth_set_rx_mode(struct net_device *net)
/****************************************************************
* kaweth_async_set_rx_mode
****************************************************************/
-static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
+static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth,
+ bool may_sleep)
{
- int result;
+ int ret;
__u16 packet_filter_bitmap = kaweth->packet_filter_bitmap;
kaweth->packet_filter_bitmap = 0;
if (packet_filter_bitmap == 0)
return;
- if (in_interrupt())
+ if (!may_sleep)
return;
- result = kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
- KAWETH_COMMAND_SET_PACKET_FILTER,
- USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- packet_filter_bitmap,
- 0,
- (void *)&kaweth->scratch,
- 0,
- KAWETH_CONTROL_TIMEOUT);
-
- if(result < 0) {
+ ret = usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
+ KAWETH_COMMAND_SET_PACKET_FILTER,
+ USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
+ packet_filter_bitmap, 0,
+ &kaweth->scratch, 0,
+ KAWETH_CONTROL_TIMEOUT);
+ if (ret < 0)
dev_err(&kaweth->intf->dev, "Failed to set Rx mode: %d\n",
- result);
- }
- else {
+ ret);
+ else
netdev_dbg(kaweth->net, "Set Rx mode to %d\n",
packet_filter_bitmap);
- }
}
/****************************************************************
@@ -1196,88 +1113,4 @@ static void kaweth_disconnect(struct usb_interface *intf)
}
-// FIXME this completion stuff is a modified clone of
-// an OLD version of some stuff in usb.c ...
-struct usb_api_data {
- wait_queue_head_t wqh;
- int done;
-};
-
-/*-------------------------------------------------------------------*
- * completion handler for compatibility wrappers (sync control/bulk) *
- *-------------------------------------------------------------------*/
-static void usb_api_blocking_completion(struct urb *urb)
-{
- struct usb_api_data *awd = (struct usb_api_data *)urb->context;
-
- awd->done=1;
- wake_up(&awd->wqh);
-}
-
-/*-------------------------------------------------------------------*
- * COMPATIBILITY STUFF *
- *-------------------------------------------------------------------*/
-
-// Starts urb and waits for completion or timeout
-static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
-{
- struct usb_api_data awd;
- int status;
-
- init_waitqueue_head(&awd.wqh);
- awd.done = 0;
-
- urb->context = &awd;
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- // something went wrong
- usb_free_urb(urb);
- return status;
- }
-
- if (!wait_event_timeout(awd.wqh, awd.done, timeout)) {
- // timeout
- dev_warn(&urb->dev->dev, "usb_control/bulk_msg: timeout\n");
- usb_kill_urb(urb); // remove urb safely
- status = -ETIMEDOUT;
- }
- else {
- status = urb->status;
- }
-
- if (actual_length) {
- *actual_length = urb->actual_length;
- }
-
- usb_free_urb(urb);
- return status;
-}
-
-/*-------------------------------------------------------------------*/
-// returns status (negative) or length (positive)
-static int kaweth_internal_control_msg(struct usb_device *usb_dev,
- unsigned int pipe,
- struct usb_ctrlrequest *cmd, void *data,
- int len, int timeout)
-{
- struct urb *urb;
- int retv;
- int length = 0; /* shut up GCC */
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
-
- usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char*)cmd, data,
- len, usb_api_blocking_completion, NULL);
-
- retv = usb_start_wait_urb(urb, timeout, &length);
- if (retv < 0) {
- return retv;
- }
- else {
- return length;
- }
-}
-
module_usb_driver(kaweth_driver);
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 1f04f1720426..b0c0c9dd6a02 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -113,7 +113,6 @@ nc_register_read(struct usbnet *dev, u8 regnum, u16 *retval_ptr)
return nc_vendor_read(dev, REQUEST_REGISTER, regnum, retval_ptr);
}
-// no retval ... can become async, usable in_interrupt()
static void
nc_vendor_write(struct usbnet *dev, u8 req, u8 regnum, u16 value)
{
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 060a8a03e6c4..32e1335c94ad 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -124,62 +124,31 @@ static void async_ctrl_callback(struct urb *urb)
static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
{
- u8 *buf;
- int ret;
-
- buf = kmalloc(size, GFP_NOIO);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
- PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
- indx, buf, size, 1000);
- if (ret < 0)
- netif_dbg(pegasus, drv, pegasus->net,
- "%s returned %d\n", __func__, ret);
- else if (ret <= size)
- memcpy(data, buf, ret);
- kfree(buf);
- return ret;
+ return usb_control_msg_recv(pegasus->usb, 0, PEGASUS_REQ_GET_REGS,
+ PEGASUS_REQT_READ, 0, indx, data, size,
+ 1000, GFP_NOIO);
}
static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
const void *data)
{
- u8 *buf;
- int ret;
-
- buf = kmemdup(data, size, GFP_NOIO);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
- PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
- indx, buf, size, 100);
- if (ret < 0)
- netif_dbg(pegasus, drv, pegasus->net,
- "%s returned %d\n", __func__, ret);
- kfree(buf);
- return ret;
+ return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
+ PEGASUS_REQT_WRITE, 0, indx, data, size,
+ 1000, GFP_NOIO);
}
+/*
+ * There is only one way to write to a single ADM8511 register and this is via
+ * specific control request. 'data' is ignored by the device, but it is here to
+ * not break the API.
+ */
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
- u8 *buf;
- int ret;
-
- buf = kmemdup(&data, 1, GFP_NOIO);
- if (!buf)
- return -ENOMEM;
+ void *buf = &data;
- ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
- PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
- indx, buf, 1, 1000);
- if (ret < 0)
- netif_dbg(pegasus, drv, pegasus->net,
- "%s returned %d\n", __func__, ret);
- kfree(buf);
- return ret;
+ return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
+ PEGASUS_REQT_WRITE, data, indx, buf, 1,
+ 1000, GFP_NOIO);
}
static int update_eth_regs_async(pegasus_t *pegasus)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5ca1356b8656..581ed51abb53 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -126,31 +126,9 @@ static void qmimux_get_stats64(struct net_device *net,
struct rtnl_link_stats64 *stats)
{
struct qmimux_priv *priv = netdev_priv(net);
- unsigned int start;
- int cpu;
netdev_stats_to_stats64(stats, &net->stats);
-
- for_each_possible_cpu(cpu) {
- struct pcpu_sw_netstats *stats64;
- u64 rx_packets, rx_bytes;
- u64 tx_packets, tx_bytes;
-
- stats64 = per_cpu_ptr(priv->stats64, cpu);
-
- do {
- start = u64_stats_fetch_begin_irq(&stats64->syncp);
- rx_packets = stats64->rx_packets;
- rx_bytes = stats64->rx_bytes;
- tx_packets = stats64->tx_packets;
- tx_bytes = stats64->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
- stats->tx_packets += tx_packets;
- stats->tx_bytes += tx_bytes;
- }
+ dev_fetch_sw_netstats(stats, priv->stats64);
}
static const struct net_device_ops qmimux_netdev_ops = {
@@ -1331,6 +1309,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 9d079dc2a535..bf8a60533f3e 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -152,36 +152,16 @@ static const char driver_name [] = "rtl8150";
*/
static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
{
- void *buf;
- int ret;
-
- buf = kmalloc(size, GFP_NOIO);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
- RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
- indx, 0, buf, size, 500);
- if (ret > 0 && ret <= size)
- memcpy(data, buf, ret);
- kfree(buf);
- return ret;
+ return usb_control_msg_recv(dev->udev, 0, RTL8150_REQ_GET_REGS,
+ RTL8150_REQT_READ, indx, 0, data, size,
+ 1000, GFP_NOIO);
}
static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
{
- void *buf;
- int ret;
-
- buf = kmemdup(data, size, GFP_NOIO);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
- indx, 0, buf, size, 500);
- kfree(buf);
- return ret;
+ return usb_control_msg_send(dev->udev, 0, RTL8150_REQ_SET_REGS,
+ RTL8150_REQT_WRITE, indx, 0, data, size,
+ 1000, GFP_NOIO);
}
static void async_set_reg_cb(struct urb *urb)
@@ -281,7 +261,7 @@ static void set_ethernet_addr(rtl8150_t *dev)
ret = get_registers(dev, IDR, sizeof(node_id), node_id);
- if (ret == sizeof(node_id)) {
+ if (!ret) {
ether_addr_copy(dev->netdev->dev_addr, node_id);
} else {
eth_hw_addr_random(dev->netdev);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 9556d431885f..8689835a5214 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -757,13 +757,14 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc75xx_init_mac_address(struct usbnet *dev)
{
- const u8 *mac_addr;
-
/* maybe the boot loader passed the MAC address in devicetree */
- mac_addr = of_get_mac_address(dev->udev->dev.of_node);
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(dev->net->dev_addr, mac_addr);
- return;
+ if (!eth_platform_get_mac_address(&dev->udev->dev,
+ dev->net->dev_addr)) {
+ if (is_valid_ether_addr(dev->net->dev_addr)) {
+ /* device tree values are valid so use them */
+ netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n");
+ return;
+ }
}
/* try reading mac address from EEPROM */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bb4ccbda031a..ea0d5f04dc3a 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -18,10 +18,12 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/of_net.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
-#define SMSC_DRIVER_VERSION "1.0.6"
+#define SMSC_DRIVER_VERSION "2.0.0"
#define HS_USB_PKT_SIZE (512)
#define FS_USB_PKT_SIZE (64)
#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE)
@@ -49,10 +51,7 @@
#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
-#define CARRIER_CHECK_DELAY (2 * HZ)
-
struct smsc95xx_priv {
- u32 chip_id;
u32 mac_cr;
u32 hash_hi;
u32 hash_lo;
@@ -60,10 +59,8 @@ struct smsc95xx_priv {
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
- u8 mdix_ctrl;
- bool link_ok;
- struct delayed_work carrier_check;
- struct usbnet *dev;
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
};
static bool turbo_mode = true;
@@ -173,10 +170,14 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
return -EIO;
}
-static int __smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
+static u32 mii_address_cmd(int phy_id, int idx, u16 op)
+{
+ return (phy_id & 0x1f) << 11 | (idx & 0x1f) << 6 | op;
+}
+
+static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
int in_pm)
{
- struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
int ret;
@@ -185,14 +186,12 @@ static int __smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
/* confirm MII not busy */
ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n");
+ netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
}
/* set the address, index & direction (read from PHY) */
- phy_id &= dev->mii.phy_id_mask;
- idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
+ addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_);
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
if (ret < 0) {
netdev_warn(dev->net, "Error writing MII_ADDR\n");
@@ -218,10 +217,9 @@ done:
return ret;
}
-static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
+static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
int idx, int regval, int in_pm)
{
- struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
int ret;
@@ -230,7 +228,7 @@ static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
/* confirm MII not busy */
ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n");
+ netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
}
@@ -242,9 +240,7 @@ static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
}
/* set the address, index & direction (write to PHY) */
- phy_id &= dev->mii.phy_id_mask;
- idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
+ addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_);
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
if (ret < 0) {
netdev_warn(dev->net, "Error writing MII_ADDR\n");
@@ -261,27 +257,34 @@ done:
mutex_unlock(&dev->phy_mutex);
}
-static int smsc95xx_mdio_read_nopm(struct net_device *netdev, int phy_id,
- int idx)
+static int smsc95xx_mdio_read_nopm(struct usbnet *dev, int idx)
{
- return __smsc95xx_mdio_read(netdev, phy_id, idx, 1);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+
+ return __smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, idx, 1);
}
-static void smsc95xx_mdio_write_nopm(struct net_device *netdev, int phy_id,
- int idx, int regval)
+static void smsc95xx_mdio_write_nopm(struct usbnet *dev, int idx, int regval)
{
- __smsc95xx_mdio_write(netdev, phy_id, idx, regval, 1);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+
+ __smsc95xx_mdio_write(dev, pdata->phydev->mdio.addr, idx, regval, 1);
}
-static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+static int smsc95xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
{
- return __smsc95xx_mdio_read(netdev, phy_id, idx, 0);
+ struct usbnet *dev = bus->priv;
+
+ return __smsc95xx_mdio_read(dev, phy_id, idx, 0);
}
-static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
- int regval)
+static int smsc95xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
+ u16 regval)
{
- __smsc95xx_mdio_write(netdev, phy_id, idx, regval, 0);
+ struct usbnet *dev = bus->priv;
+
+ __smsc95xx_mdio_write(dev, phy_id, idx, regval, 0);
+ return 0;
}
static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
@@ -455,7 +458,7 @@ static unsigned int smsc95xx_hash(char addr[ETH_ALEN])
static void smsc95xx_set_multicast(struct net_device *netdev)
{
struct usbnet *dev = netdev_priv(netdev);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
int ret;
@@ -511,22 +514,23 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
netdev_warn(dev->net, "failed to initiate async write to MAC_CR\n");
}
-static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
- u16 lcladv, u16 rmtadv)
+static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev)
{
u32 flow = 0, afc_cfg;
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+ bool tx_pause, rx_pause;
int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
if (ret < 0)
return ret;
- if (duplex == DUPLEX_FULL) {
- u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+ if (pdata->phydev->duplex == DUPLEX_FULL) {
+ phy_get_pause(pdata->phydev, &tx_pause, &rx_pause);
- if (cap & FLOW_CTRL_RX)
+ if (rx_pause)
flow = 0xFFFF0002;
- if (cap & FLOW_CTRL_TX) {
+ if (tx_pause) {
afc_cfg |= 0xF;
flow |= 0xFFFF0000;
} else {
@@ -534,8 +538,8 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
}
netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n",
- cap & FLOW_CTRL_RX ? "enabled" : "disabled",
- cap & FLOW_CTRL_TX ? "enabled" : "disabled");
+ rx_pause ? "enabled" : "disabled",
+ tx_pause ? "enabled" : "disabled");
} else {
netif_dbg(dev, link, dev->net, "half duplex\n");
afc_cfg |= 0xF;
@@ -550,33 +554,16 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
static int smsc95xx_link_reset(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- struct mii_if_info *mii = &dev->mii;
- struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
- u16 lcladv, rmtadv;
int ret;
- /* clear interrupt status */
- ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
- if (ret < 0)
- return ret;
-
ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
if (ret < 0)
return ret;
- mii_check_media(mii, 1, 1);
- mii_ethtool_gset(&dev->mii, &ecmd);
- lcladv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
- rmtadv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
-
- netif_dbg(dev, link, dev->net,
- "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n",
- ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv);
-
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
- if (ecmd.duplex != DUPLEX_FULL) {
+ if (pdata->phydev->duplex != DUPLEX_FULL) {
pdata->mac_cr &= ~MAC_CR_FDPX_;
pdata->mac_cr |= MAC_CR_RCVOWN_;
} else {
@@ -589,7 +576,7 @@ static int smsc95xx_link_reset(struct usbnet *dev)
if (ret < 0)
return ret;
- ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
+ ret = smsc95xx_phy_update_flowcontrol(dev);
if (ret < 0)
netdev_warn(dev->net, "Error updating PHY flow control\n");
@@ -616,44 +603,6 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
intdata);
}
-static void set_carrier(struct usbnet *dev, bool link)
-{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
-
- if (pdata->link_ok == link)
- return;
-
- pdata->link_ok = link;
-
- if (link)
- usbnet_link_change(dev, 1, 0);
- else
- usbnet_link_change(dev, 0, 0);
-}
-
-static void check_carrier(struct work_struct *work)
-{
- struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
- carrier_check.work);
- struct usbnet *dev = pdata->dev;
- int ret;
-
- if (pdata->suspend_flags != 0)
- return;
-
- ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read MII_BMSR\n");
- return;
- }
- if (ret & BMSR_LSTATUS)
- set_carrier(dev, 1);
- else
- set_carrier(dev, 0);
-
- schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
-}
-
/* Enable or disable Tx & Rx checksum offload engines */
static int smsc95xx_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -747,7 +696,7 @@ static void smsc95xx_ethtool_get_wol(struct net_device *net,
struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
wolinfo->supported = SUPPORTED_WAKE;
wolinfo->wolopts = pdata->wolopts;
@@ -757,7 +706,7 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
if (wolinfo->wolopts & ~SUPPORTED_WAKE)
@@ -772,108 +721,15 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
return ret;
}
-static int get_mdix_status(struct net_device *net)
+static u32 smsc95xx_get_link(struct net_device *net)
{
- struct usbnet *dev = netdev_priv(net);
- u32 val;
- int buf;
-
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, SPECIAL_CTRL_STS);
- if (buf & SPECIAL_CTRL_STS_OVRRD_AMDIX_) {
- if (buf & SPECIAL_CTRL_STS_AMDIX_ENABLE_)
- return ETH_TP_MDI_AUTO;
- else if (buf & SPECIAL_CTRL_STS_AMDIX_STATE_)
- return ETH_TP_MDI_X;
- } else {
- buf = smsc95xx_read_reg(dev, STRAP_STATUS, &val);
- if (val & STRAP_STATUS_AMDIX_EN_)
- return ETH_TP_MDI_AUTO;
- }
-
- return ETH_TP_MDI;
-}
-
-static void set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
-{
- struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- int buf;
-
- if ((pdata->chip_id == ID_REV_CHIP_ID_9500A_) ||
- (pdata->chip_id == ID_REV_CHIP_ID_9530_) ||
- (pdata->chip_id == ID_REV_CHIP_ID_89530_) ||
- (pdata->chip_id == ID_REV_CHIP_ID_9730_)) {
- /* Extend Manual AutoMDIX timer for 9500A/9500Ai */
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- PHY_EDPD_CONFIG);
- buf |= PHY_EDPD_CONFIG_EXT_CROSSOVER_;
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- PHY_EDPD_CONFIG, buf);
- }
-
- if (mdix_ctrl == ETH_TP_MDI) {
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS);
- buf |= SPECIAL_CTRL_STS_OVRRD_AMDIX_;
- buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
- SPECIAL_CTRL_STS_AMDIX_STATE_);
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS, buf);
- } else if (mdix_ctrl == ETH_TP_MDI_X) {
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS);
- buf |= SPECIAL_CTRL_STS_OVRRD_AMDIX_;
- buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
- SPECIAL_CTRL_STS_AMDIX_STATE_);
- buf |= SPECIAL_CTRL_STS_AMDIX_STATE_;
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS, buf);
- } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS);
- buf &= ~SPECIAL_CTRL_STS_OVRRD_AMDIX_;
- buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
- SPECIAL_CTRL_STS_AMDIX_STATE_);
- buf |= SPECIAL_CTRL_STS_AMDIX_ENABLE_;
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS, buf);
- }
- pdata->mdix_ctrl = mdix_ctrl;
-}
-
-static int smsc95xx_get_link_ksettings(struct net_device *net,
- struct ethtool_link_ksettings *cmd)
-{
- struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- int retval;
-
- retval = usbnet_get_link_ksettings(net, cmd);
-
- cmd->base.eth_tp_mdix = pdata->mdix_ctrl;
- cmd->base.eth_tp_mdix_ctrl = pdata->mdix_ctrl;
-
- return retval;
-}
-
-static int smsc95xx_set_link_ksettings(struct net_device *net,
- const struct ethtool_link_ksettings *cmd)
-{
- struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- int retval;
-
- if (pdata->mdix_ctrl != cmd->base.eth_tp_mdix_ctrl)
- set_mdix_status(net, cmd->base.eth_tp_mdix_ctrl);
-
- retval = usbnet_set_link_ksettings(net, cmd);
-
- return retval;
+ phy_read_status(net->phydev);
+ return net->phydev->link;
}
static const struct ethtool_ops smsc95xx_ethtool_ops = {
- .get_link = usbnet_get_link,
- .nway_reset = usbnet_nway_reset,
+ .get_link = smsc95xx_get_link,
+ .nway_reset = phy_ethtool_nway_reset,
.get_drvinfo = usbnet_get_drvinfo,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
@@ -884,30 +740,29 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_regs = smsc95xx_ethtool_getregs,
.get_wol = smsc95xx_ethtool_get_wol,
.set_wol = smsc95xx_ethtool_set_wol,
- .get_link_ksettings = smsc95xx_get_link_ksettings,
- .set_link_ksettings = smsc95xx_set_link_ksettings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
{
- struct usbnet *dev = netdev_priv(netdev);
-
if (!netif_running(netdev))
return -EINVAL;
- return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+ return phy_mii_ioctl(netdev->phydev, rq, cmd);
}
static void smsc95xx_init_mac_address(struct usbnet *dev)
{
- const u8 *mac_addr;
-
/* maybe the boot loader passed the MAC address in devicetree */
- mac_addr = of_get_mac_address(dev->udev->dev.of_node);
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(dev->net->dev_addr, mac_addr);
- return;
+ if (!eth_platform_get_mac_address(&dev->udev->dev,
+ dev->net->dev_addr)) {
+ if (is_valid_ether_addr(dev->net->dev_addr)) {
+ /* device tree values are valid so use them */
+ netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n");
+ return;
+ }
}
/* try reading mac address from EEPROM */
@@ -942,7 +797,7 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
/* starts the TX path */
static int smsc95xx_start_tx_path(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
int ret;
@@ -962,7 +817,7 @@ static int smsc95xx_start_tx_path(struct usbnet *dev)
/* Starts the Receive path */
static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
@@ -972,54 +827,9 @@ static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
return __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
}
-static int smsc95xx_phy_initialize(struct usbnet *dev)
-{
- int bmcr, ret, timeout = 0;
-
- /* Initialize MII structure */
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = smsc95xx_mdio_read;
- dev->mii.mdio_write = smsc95xx_mdio_write;
- dev->mii.phy_id_mask = 0x1f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = SMSC95XX_INTERNAL_PHY_ID;
-
- /* reset phy and wait for reset to complete */
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
-
- do {
- msleep(10);
- bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
- timeout++;
- } while ((bmcr & BMCR_RESET) && (timeout < 100));
-
- if (timeout >= 100) {
- netdev_warn(dev->net, "timeout on PHY Reset");
- return -EIO;
- }
-
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
- ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
-
- /* read to clear */
- ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read PHY_INT_SRC during init\n");
- return ret;
- }
-
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
- PHY_INT_MASK_DEFAULT_);
- mii_nway_restart(&dev->mii);
-
- netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n");
- return 0;
-}
-
static int smsc95xx_reset(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 read_buf, write_buf, burst_cap;
int ret = 0, timeout;
@@ -1198,12 +1008,6 @@ static int smsc95xx_reset(struct usbnet *dev)
smsc95xx_set_multicast(dev->net);
- ret = smsc95xx_phy_initialize(dev);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to init PHY\n");
- return ret;
- }
-
ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
if (ret < 0)
return ret;
@@ -1247,7 +1051,8 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
{
- struct smsc95xx_priv *pdata = NULL;
+ struct smsc95xx_priv *pdata;
+ bool is_internal_phy;
u32 val;
int ret;
@@ -1259,13 +1064,12 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
return ret;
}
- dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
- GFP_KERNEL);
-
- pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
+ dev->driver_priv = pdata;
+
spin_lock_init(&pdata->mac_cr_lock);
/* LAN95xx devices do not alter the computed checksum of 0 to 0xffff.
@@ -1290,15 +1094,50 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
goto free_pdata;
+ pdata->mdiobus = mdiobus_alloc();
+ if (!pdata->mdiobus) {
+ ret = -ENOMEM;
+ goto free_pdata;
+ }
+
+ ret = smsc95xx_read_reg(dev, HW_CFG, &val);
+ if (ret < 0)
+ goto free_mdio;
+
+ is_internal_phy = !(val & HW_CFG_PSEL_);
+ if (is_internal_phy)
+ pdata->mdiobus->phy_mask = ~(1u << SMSC95XX_INTERNAL_PHY_ID);
+
+ pdata->mdiobus->priv = dev;
+ pdata->mdiobus->read = smsc95xx_mdiobus_read;
+ pdata->mdiobus->write = smsc95xx_mdiobus_write;
+ pdata->mdiobus->name = "smsc95xx-mdiobus";
+ pdata->mdiobus->parent = &dev->udev->dev;
+
+ snprintf(pdata->mdiobus->id, ARRAY_SIZE(pdata->mdiobus->id),
+ "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum);
+
+ ret = mdiobus_register(pdata->mdiobus);
+ if (ret) {
+ netdev_err(dev->net, "Could not register MDIO bus\n");
+ goto free_mdio;
+ }
+
+ pdata->phydev = phy_find_first(pdata->mdiobus);
+ if (!pdata->phydev) {
+ netdev_err(dev->net, "no PHY found\n");
+ ret = -ENODEV;
+ goto unregister_mdio;
+ }
+
+ pdata->phydev->is_internal = is_internal_phy;
+
/* detect device revision as different features may be available */
ret = smsc95xx_read_reg(dev, ID_REV, &val);
if (ret < 0)
- goto free_pdata;
+ goto unregister_mdio;
val >>= 16;
- pdata->chip_id = val;
- pdata->mdix_ctrl = get_mdix_status(dev->net);
-
if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9530_) ||
(val == ID_REV_CHIP_ID_89530_) || (val == ID_REV_CHIP_ID_9730_))
pdata->features = (FEATURE_8_WAKEUP_FILTERS |
@@ -1314,12 +1153,13 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->min_mtu = ETH_MIN_MTU;
dev->net->max_mtu = ETH_DATA_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+ return 0;
- pdata->dev = dev;
- INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
- schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+unregister_mdio:
+ mdiobus_unregister(pdata->mdiobus);
- return 0;
+free_mdio:
+ mdiobus_free(pdata->mdiobus);
free_pdata:
kfree(pdata);
@@ -1328,15 +1168,47 @@ free_pdata:
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
-
- if (pdata) {
- cancel_delayed_work_sync(&pdata->carrier_check);
- netif_dbg(dev, ifdown, dev->net, "free pdata\n");
- kfree(pdata);
- pdata = NULL;
- dev->data[0] = 0;
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+
+ mdiobus_unregister(pdata->mdiobus);
+ mdiobus_free(pdata->mdiobus);
+ netif_dbg(dev, ifdown, dev->net, "free pdata\n");
+ kfree(pdata);
+}
+
+static void smsc95xx_handle_link_change(struct net_device *net)
+{
+ phy_print_status(net->phydev);
+}
+
+static int smsc95xx_start_phy(struct usbnet *dev)
+{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+ struct net_device *net = dev->net;
+ int ret;
+
+ ret = smsc95xx_reset(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_connect_direct(net, pdata->phydev,
+ &smsc95xx_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id);
+ return ret;
}
+
+ phy_attached_info(net->phydev);
+ phy_start(net->phydev);
+ return 0;
+}
+
+static int smsc95xx_disconnect_phy(struct usbnet *dev)
+{
+ phy_stop(dev->net->phydev);
+ phy_disconnect(dev->net->phydev);
+ return 0;
}
static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
@@ -1347,39 +1219,37 @@ static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
{
- struct mii_if_info *mii = &dev->mii;
int ret;
netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
/* read to clear */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
+ ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_SRC);
if (ret < 0)
return ret;
/* enable interrupt source */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
+ ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_MASK);
if (ret < 0)
return ret;
ret |= mask;
- smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret);
+ smsc95xx_mdio_write_nopm(dev, PHY_INT_MASK, ret);
return 0;
}
static int smsc95xx_link_ok_nopm(struct usbnet *dev)
{
- struct mii_if_info *mii = &dev->mii;
int ret;
/* first, a dummy read, needed to latch some MII phys */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+ ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
if (ret < 0)
return ret;
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+ ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
if (ret < 0)
return ret;
@@ -1388,7 +1258,7 @@ static int smsc95xx_link_ok_nopm(struct usbnet *dev)
static int smsc95xx_enter_suspend0(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1427,8 +1297,7 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
static int smsc95xx_enter_suspend1(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- struct mii_if_info *mii = &dev->mii;
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1436,17 +1305,17 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
* compatibility with non-standard link partners
*/
if (pdata->features & FEATURE_PHY_NLP_CROSSOVER)
- smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_EDPD_CONFIG,
- PHY_EDPD_CONFIG_DEFAULT);
+ smsc95xx_mdio_write_nopm(dev, PHY_EDPD_CONFIG,
+ PHY_EDPD_CONFIG_DEFAULT);
/* enable energy detect power-down mode */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS);
+ ret = smsc95xx_mdio_read_nopm(dev, PHY_MODE_CTRL_STS);
if (ret < 0)
return ret;
ret |= MODE_CTRL_STS_EDPWRDOWN_;
- smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS, ret);
+ smsc95xx_mdio_write_nopm(dev, PHY_MODE_CTRL_STS, ret);
/* enter SUSPEND1 mode */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
@@ -1475,7 +1344,7 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
static int smsc95xx_enter_suspend2(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1497,7 +1366,7 @@ static int smsc95xx_enter_suspend2(struct usbnet *dev)
static int smsc95xx_enter_suspend3(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1536,7 +1405,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
if (!netif_running(dev->net)) {
@@ -1584,7 +1453,7 @@ static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val, link_up;
int ret;
@@ -1594,8 +1463,6 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
return ret;
}
- cancel_delayed_work_sync(&pdata->carrier_check);
-
if (pdata->suspend_flags) {
netdev_warn(dev->net, "error during last resume\n");
pdata->suspend_flags = 0;
@@ -1839,10 +1706,6 @@ done:
if (ret && PMSG_IS_AUTO(message))
usbnet_resume(intf);
- if (ret)
- schedule_delayed_work(&pdata->carrier_check,
- CARRIER_CHECK_DELAY);
-
return ret;
}
@@ -1855,14 +1718,13 @@ static int smsc95xx_resume(struct usb_interface *intf)
u32 val;
BUG_ON(!dev);
- pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ pdata = dev->driver_priv;
suspend_flags = pdata->suspend_flags;
netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags);
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
- schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
@@ -1893,6 +1755,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
if (ret < 0)
netdev_warn(dev->net, "usbnet_resume error\n");
+ phy_init_hw(pdata->phydev);
return ret;
}
@@ -2075,7 +1938,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
static int smsc95xx_manage_power(struct usbnet *dev, int on)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
dev->intf->needs_remote_wakeup = on;
@@ -2098,7 +1961,8 @@ static const struct driver_info smsc95xx_info = {
.bind = smsc95xx_bind,
.unbind = smsc95xx_unbind,
.link_reset = smsc95xx_link_reset,
- .reset = smsc95xx_reset,
+ .reset = smsc95xx_start_phy,
+ .stop = smsc95xx_disconnect_phy,
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
.status = smsc95xx_status,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2b2a841cd938..6062dc27870e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -34,9 +34,6 @@
#include <linux/kernel.h>
#include <linux/pm_runtime.h>
-#define DRIVER_VERSION "22-Aug-2005"
-
-
/*-------------------------------------------------------------------------*/
/*
@@ -597,7 +594,7 @@ static void rx_complete (struct urb *urb)
case -EPIPE:
dev->net->stats.rx_errors++;
usbnet_defer_kevent (dev, EVENT_RX_HALT);
- // FALLTHROUGH
+ fallthrough;
/* software-driven interface shutdown */
case -ECONNRESET: /* async unlink */
@@ -986,31 +983,9 @@ EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings);
void usbnet_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats)
{
struct usbnet *dev = netdev_priv(net);
- unsigned int start;
- int cpu;
netdev_stats_to_stats64(stats, &net->stats);
-
- for_each_possible_cpu(cpu) {
- struct pcpu_sw_netstats *stats64;
- u64 rx_packets, rx_bytes;
- u64 tx_packets, tx_bytes;
-
- stats64 = per_cpu_ptr(dev->stats64, cpu);
-
- do {
- start = u64_stats_fetch_begin_irq(&stats64->syncp);
- rx_packets = stats64->rx_packets;
- rx_bytes = stats64->rx_bytes;
- tx_packets = stats64->tx_packets;
- tx_bytes = stats64->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
- stats->tx_packets += tx_packets;
- stats->tx_bytes += tx_bytes;
- }
+ dev_fetch_sw_netstats(stats, dev->stats64);
}
EXPORT_SYMBOL_GPL(usbnet_get_stats64);
@@ -1047,7 +1022,6 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
struct usbnet *dev = netdev_priv(net);
strlcpy (info->driver, dev->driver_name, sizeof info->driver);
- strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
strlcpy (info->fw_version, dev->driver_info->description,
sizeof info->fw_version);
usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index a475f48d43c4..8c737668008a 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -234,14 +234,14 @@ static bool veth_is_xdp_frame(void *ptr)
return (unsigned long)ptr & VETH_XDP_FLAG;
}
-static void *veth_ptr_to_xdp(void *ptr)
+static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
{
return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
}
-static void *veth_xdp_to_ptr(void *ptr)
+static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
{
- return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
+ return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
}
static void veth_ptr_free(void *ptr)
@@ -420,6 +420,14 @@ static int veth_select_rxq(struct net_device *dev)
return smp_processor_id() % dev->real_num_rx_queues;
}
+static struct net_device *veth_peer_dev(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+
+ /* Callers must be under RCU read side. */
+ return rcu_dereference(priv->peer);
+}
+
static int veth_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames,
u32 flags, bool ndo_xmit)
@@ -897,14 +905,13 @@ static void veth_napi_del(struct net_device *dev)
struct veth_rq *rq = &priv->rq[i];
napi_disable(&rq->xdp_napi);
- napi_hash_del(&rq->xdp_napi);
+ __netif_napi_del(&rq->xdp_napi);
}
synchronize_net();
for (i = 0; i < dev->real_num_rx_queues; i++) {
struct veth_rq *rq = &priv->rq[i];
- netif_napi_del(&rq->xdp_napi);
rq->rx_notify_masked = false;
ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
}
@@ -1225,6 +1232,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_set_rx_headroom = veth_set_rx_headroom,
.ndo_bpf = veth_xdp,
.ndo_xdp_xmit = veth_ndo_xdp_xmit,
+ .ndo_get_peer_dev = veth_peer_dev,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 668685c09e65..21b71148c532 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2610,12 +2610,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_hash_del(&vi->rq[i].napi);
- netif_napi_del(&vi->rq[i].napi);
- netif_napi_del(&vi->sq[i].napi);
+ __netif_napi_del(&vi->rq[i].napi);
+ __netif_napi_del(&vi->sq[i].napi);
}
- /* We called napi_hash_del() before netif_napi_del(),
+ /* We called __netif_napi_del(),
* we need to respect an RCU grace period before freeing vi->rq
*/
synchronize_net();
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 60c1aadece89..f2793ffde191 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -608,8 +608,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
-static int vrf_finish_direct(struct net *net, struct sock *sk,
- struct sk_buff *skb)
+static void vrf_finish_direct(struct sk_buff *skb)
{
struct net_device *vrf_dev = skb->dev;
@@ -628,7 +627,8 @@ static int vrf_finish_direct(struct net *net, struct sock *sk,
skb_pull(skb, ETH_HLEN);
}
- return 1;
+ /* reset skb device */
+ nf_reset_ct(skb);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -707,15 +707,41 @@ static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
return skb;
}
+static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ vrf_finish_direct(skb);
+
+ return vrf_ip6_local_out(net, sk, skb);
+}
+
static int vrf_output6_direct(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
+ int err = 1;
+
skb->protocol = htons(ETH_P_IPV6);
- return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb->dev,
- vrf_finish_direct,
- !(IPCB(skb)->flags & IPSKB_REROUTED));
+ if (!(IPCB(skb)->flags & IPSKB_REROUTED))
+ err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
+ NULL, skb->dev, vrf_output6_direct_finish);
+
+ if (likely(err == 1))
+ vrf_finish_direct(skb);
+
+ return err;
+}
+
+static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = vrf_output6_direct(net, sk, skb);
+ if (likely(err == 1))
+ err = vrf_ip6_local_out(net, sk, skb);
+
+ return err;
}
static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
@@ -728,18 +754,15 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
- skb, NULL, vrf_dev, vrf_output6_direct);
+ skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
if (likely(err == 1))
err = vrf_output6_direct(net, sk, skb);
- /* reset skb device */
if (likely(err == 1))
- nf_reset_ct(skb);
- else
- skb = NULL;
+ return skb;
- return skb;
+ return NULL;
}
static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
@@ -919,15 +942,41 @@ static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
return skb;
}
+static int vrf_output_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ vrf_finish_direct(skb);
+
+ return vrf_ip_local_out(net, sk, skb);
+}
+
static int vrf_output_direct(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
+ int err = 1;
+
skb->protocol = htons(ETH_P_IP);
- return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
- net, sk, skb, NULL, skb->dev,
- vrf_finish_direct,
- !(IPCB(skb)->flags & IPSKB_REROUTED));
+ if (!(IPCB(skb)->flags & IPSKB_REROUTED))
+ err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
+ NULL, skb->dev, vrf_output_direct_finish);
+
+ if (likely(err == 1))
+ vrf_finish_direct(skb);
+
+ return err;
+}
+
+static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = vrf_output_direct(net, sk, skb);
+ if (likely(err == 1))
+ err = vrf_ip_local_out(net, sk, skb);
+
+ return err;
}
static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
@@ -940,18 +989,15 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
- skb, NULL, vrf_dev, vrf_output_direct);
+ skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
if (likely(err == 1))
err = vrf_output_direct(net, sk, skb);
- /* reset skb device */
if (likely(err == 1))
- nf_reset_ct(skb);
- else
- skb = NULL;
+ return skb;
- return skb;
+ return NULL;
}
static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index b9fefe27e3e8..1a557aeba32b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -190,8 +190,9 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
}
-/* Find VXLAN socket based on network namespace, address family and UDP port
- * and enabled unshareable flags.
+/* Find VXLAN socket based on network namespace, address family, UDP port,
+ * enabled unshareable flags and socket device binding (see l3mdev with
+ * non-default VRF).
*/
static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
__be16 port, u32 flags, int ifindex)
@@ -1825,7 +1826,6 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
- struct pcpu_sw_netstats *stats;
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
struct vxlanhdr unparsed;
@@ -1875,6 +1875,10 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
!net_eq(vxlan->net, dev_net(vxlan->dev))))
goto drop;
+ if (vs->flags & VXLAN_F_REMCSUM_RX)
+ if (unlikely(!vxlan_remcsum(&unparsed, skb, vs->flags)))
+ goto drop;
+
if (vxlan_collect_metadata(vs)) {
struct metadata_dst *tun_dst;
@@ -1891,9 +1895,6 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
memset(md, 0, sizeof(*md));
}
- if (vs->flags & VXLAN_F_REMCSUM_RX)
- if (!vxlan_remcsum(&unparsed, skb, vs->flags))
- goto drop;
if (vs->flags & VXLAN_F_GBP)
vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
/* Note that GBP and GPE can never be active together. This is
@@ -1938,12 +1939,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- stats = this_cpu_ptr(vxlan->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
-
+ dev_sw_netstats_rx_add(vxlan->dev, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
rcu_read_unlock();
@@ -3890,7 +3886,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
}
err = rtnl_configure_link(dev, NULL);
- if (err)
+ if (err < 0)
goto unlink;
if (f) {
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index f8aed0696d77..2369ca250cd6 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -889,6 +889,7 @@ static ssize_t cosa_write(struct file *file,
chan->tx_status = 1;
spin_unlock_irqrestore(&cosa->lock, flags);
up(&chan->wsem);
+ kfree(kbuf);
return -ERESTARTSYS;
}
}
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 9edd94679283..dca97cd7c4e7 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -1295,3 +1295,4 @@ static struct platform_driver ucc_hdlc_driver = {
module_platform_driver(ucc_hdlc_driver);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 9b00708676cf..1bdd3df0867a 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -46,7 +46,15 @@ static struct hdlc_proto *first_proto;
static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *p, struct net_device *orig_dev)
{
- struct hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct hdlc_device *hdlc;
+
+ /* First make sure "dev" is an HDLC device */
+ if (!(dev->priv_flags & IFF_WAN_HDLC)) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ hdlc = dev_to_hdlc(dev);
if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(skb);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index d6cfd51613ed..409e5a7ad8e2 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -271,65 +271,62 @@ static inline struct net_device **get_dev_p(struct pvc_device *pvc,
}
-static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
+static int fr_hard_header(struct sk_buff *skb, u16 dlci)
{
- u16 head_len;
- struct sk_buff *skb = *skb_p;
-
- switch (skb->protocol) {
- case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_CCITT_ANSI_LMI;
- break;
-
- case cpu_to_be16(NLPID_CISCO_LMI):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_CISCO_LMI;
- break;
-
- case cpu_to_be16(ETH_P_IP):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_IP;
- break;
-
- case cpu_to_be16(ETH_P_IPV6):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_IPV6;
- break;
-
- case cpu_to_be16(ETH_P_802_3):
- head_len = 10;
- if (skb_headroom(skb) < head_len) {
- struct sk_buff *skb2 = skb_realloc_headroom(skb,
- head_len);
- if (!skb2)
- return -ENOBUFS;
- dev_kfree_skb(skb);
- skb = *skb_p = skb2;
+ if (!skb->dev) { /* Control packets */
+ switch (dlci) {
+ case LMI_CCITT_ANSI_DLCI:
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_CCITT_ANSI_LMI;
+ break;
+
+ case LMI_CISCO_DLCI:
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_CISCO_LMI;
+ break;
+
+ default:
+ return -EINVAL;
}
- skb_push(skb, head_len);
+
+ } else if (skb->dev->type == ARPHRD_DLCI) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_IP;
+ break;
+
+ case htons(ETH_P_IPV6):
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_IPV6;
+ break;
+
+ default:
+ skb_push(skb, 10);
+ skb->data[3] = FR_PAD;
+ skb->data[4] = NLPID_SNAP;
+ /* OUI 00-00-00 indicates an Ethertype follows */
+ skb->data[5] = 0x00;
+ skb->data[6] = 0x00;
+ skb->data[7] = 0x00;
+ /* This should be an Ethertype: */
+ *(__be16 *)(skb->data + 8) = skb->protocol;
+ }
+
+ } else if (skb->dev->type == ARPHRD_ETHER) {
+ skb_push(skb, 10);
skb->data[3] = FR_PAD;
skb->data[4] = NLPID_SNAP;
- skb->data[5] = FR_PAD;
+ /* OUI 00-80-C2 stands for the 802.1 organization */
+ skb->data[5] = 0x00;
skb->data[6] = 0x80;
skb->data[7] = 0xC2;
+ /* PID 00-07 stands for Ethernet frames without FCS */
skb->data[8] = 0x00;
- skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
- break;
+ skb->data[9] = 0x07;
- default:
- head_len = 10;
- skb_push(skb, head_len);
- skb->data[3] = FR_PAD;
- skb->data[4] = NLPID_SNAP;
- skb->data[5] = FR_PAD;
- skb->data[6] = FR_PAD;
- skb->data[7] = FR_PAD;
- *(__be16*)(skb->data + 8) = skb->protocol;
+ } else {
+ return -EINVAL;
}
dlci_to_q922(skb->data, dlci);
@@ -410,38 +407,49 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct pvc_device *pvc = dev->ml_priv;
- if (pvc->state.active) {
- if (dev->type == ARPHRD_ETHER) {
- int pad = ETH_ZLEN - skb->len;
- if (pad > 0) { /* Pad the frame with zeros */
- int len = skb->len;
- if (skb_tailroom(skb) < pad)
- if (pskb_expand_head(skb, 0, pad,
- GFP_ATOMIC)) {
- dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- skb_put(skb, pad);
- memset(skb->data + len, 0, pad);
- }
- skb->protocol = cpu_to_be16(ETH_P_802_3);
- }
- if (!fr_hard_header(&skb, pvc->dlci)) {
- dev->stats.tx_bytes += skb->len;
- dev->stats.tx_packets++;
- if (pvc->state.fecn) /* TX Congestion counter */
- dev->stats.tx_compressed++;
- skb->dev = pvc->frad;
- skb->protocol = htons(ETH_P_HDLC);
- skb_reset_network_header(skb);
- dev_queue_xmit(skb);
- return NETDEV_TX_OK;
+ if (!pvc->state.active)
+ goto drop;
+
+ if (dev->type == ARPHRD_ETHER) {
+ int pad = ETH_ZLEN - skb->len;
+
+ if (pad > 0) { /* Pad the frame with zeros */
+ if (__skb_pad(skb, pad, false))
+ goto drop;
+ skb_put(skb, pad);
}
}
+ /* We already requested the header space with dev->needed_headroom.
+ * So this is just a protection in case the upper layer didn't take
+ * dev->needed_headroom into consideration.
+ */
+ if (skb_headroom(skb) < 10) {
+ struct sk_buff *skb2 = skb_realloc_headroom(skb, 10);
+
+ if (!skb2)
+ goto drop;
+ dev_kfree_skb(skb);
+ skb = skb2;
+ }
+
+ skb->dev = dev;
+ if (fr_hard_header(skb, pvc->dlci))
+ goto drop;
+
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ if (pvc->state.fecn) /* TX Congestion counter */
+ dev->stats.tx_compressed++;
+ skb->dev = pvc->frad;
+ skb->protocol = htons(ETH_P_HDLC);
+ skb_reset_network_header(skb);
+ dev_queue_xmit(skb);
+ return NETDEV_TX_OK;
+
+drop:
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -494,11 +502,9 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
memset(skb->data, 0, len);
skb_reserve(skb, 4);
if (lmi == LMI_CISCO) {
- skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
- fr_hard_header(&skb, LMI_CISCO_DLCI);
+ fr_hard_header(skb, LMI_CISCO_DLCI);
} else {
- skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
- fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
+ fr_hard_header(skb, LMI_CCITT_ANSI_DLCI);
}
data = skb_tail_pointer(skb);
data[i++] = LMI_CALLREF;
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 08e0a46501de..c70a518b8b47 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -99,6 +99,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
old_qlen = dev->tx_queue_len;
ether_setup(dev);
dev->tx_queue_len = old_qlen;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
eth_hw_addr_random(dev);
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_off(dev);
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
index f999db788506..2b6051bda3fb 100644
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ b/drivers/net/wan/lmc/lmc_debug.c
@@ -62,22 +62,4 @@ void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
}
#endif /* DEBUG */
-void lmc_trace(struct net_device *dev, char *msg){
-#ifdef LMC_TRACE
- unsigned long j = jiffies + 3; /* Wait for 50 ms */
-
- if(in_interrupt()){
- printk("%s: * %s\n", dev->name, msg);
-// while(time_before(jiffies, j+10))
-// ;
- }
- else {
- printk("%s: %s\n", dev->name, msg);
- while(time_before(jiffies, j))
- schedule();
- }
-#endif
-}
-
-
/* --------------------------- end if_lmc_linux.c ------------------------ */
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
index 820adcae5d67..cfae9eddf003 100644
--- a/drivers/net/wan/lmc/lmc_debug.h
+++ b/drivers/net/wan/lmc/lmc_debug.h
@@ -48,6 +48,5 @@ extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
-void lmc_trace(struct net_device *dev, char *msg);
#endif
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 842def21e089..36600b0a0ab0 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -113,8 +113,6 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
u16 regVal;
unsigned long flags;
- lmc_trace(dev, "lmc_ioctl in");
-
/*
* Most functions mess with the structure
* Disable interrupts while we do the polling
@@ -619,8 +617,6 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
}
- lmc_trace(dev, "lmc_ioctl out");
-
return ret;
}
@@ -634,8 +630,6 @@ static void lmc_watchdog(struct timer_list *t) /*fold00*/
u32 ticks;
unsigned long flags;
- lmc_trace(dev, "lmc_watchdog in");
-
spin_lock_irqsave(&sc->lmc_lock, flags);
if(sc->check != 0xBEAFCAFE){
@@ -782,9 +776,6 @@ kick_timer:
add_timer (&sc->timer);
spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- lmc_trace(dev, "lmc_watchdog out");
-
}
static int lmc_attach(struct net_device *dev, unsigned short encoding,
@@ -813,8 +804,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
static int cards_found;
- /* lmc_trace(dev, "lmc_init_one in"); */
-
err = pcim_enable_device(pdev);
if (err) {
printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
@@ -955,7 +944,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
sc->lmc_ok = 0;
sc->last_link_status = 0;
- lmc_trace(dev, "lmc_init_one out");
return 0;
}
@@ -981,8 +969,6 @@ static int lmc_open(struct net_device *dev)
lmc_softc_t *sc = dev_to_sc(dev);
int err;
- lmc_trace(dev, "lmc_open in");
-
lmc_led_on(sc, LMC_DS3_LED0);
lmc_dec_reset(sc);
@@ -992,17 +978,14 @@ static int lmc_open(struct net_device *dev)
LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
lmc_mii_readreg(sc, 0, 17));
- if (sc->lmc_ok){
- lmc_trace(dev, "lmc_open lmc_ok out");
+ if (sc->lmc_ok)
return 0;
- }
lmc_softreset (sc);
/* Since we have to use PCI bus, this should work on x86,alpha,ppc */
if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
- lmc_trace(dev, "lmc_open irq failed out");
return -EAGAIN;
}
sc->got_irq = 1;
@@ -1078,8 +1061,6 @@ static int lmc_open(struct net_device *dev)
sc->timer.expires = jiffies + HZ;
add_timer (&sc->timer);
- lmc_trace(dev, "lmc_open out");
-
return 0;
}
@@ -1091,8 +1072,6 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
{
lmc_softc_t *sc = dev_to_sc(dev);
- lmc_trace(dev, "lmc_running_reset in");
-
/* stop interrupts */
/* Clear the interrupt mask */
LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
@@ -1114,8 +1093,6 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-
- lmc_trace(dev, "lmc_running_reset_out");
}
@@ -1128,16 +1105,12 @@ static int lmc_close(struct net_device *dev)
/* not calling release_region() as we should */
lmc_softc_t *sc = dev_to_sc(dev);
- lmc_trace(dev, "lmc_close in");
-
sc->lmc_ok = 0;
sc->lmc_media->set_link_status (sc, 0);
del_timer (&sc->timer);
lmc_proto_close(sc);
lmc_ifdown (dev);
- lmc_trace(dev, "lmc_close out");
-
return 0;
}
@@ -1149,8 +1122,6 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
u32 csr6;
int i;
- lmc_trace(dev, "lmc_ifdown in");
-
/* Don't let anything else go on right now */
// dev->start = 0;
netif_stop_queue(dev);
@@ -1200,8 +1171,6 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
netif_wake_queue(dev);
sc->extra_stats.tx_tbusy0++;
- lmc_trace(dev, "lmc_ifdown out");
-
return 0;
}
@@ -1220,8 +1189,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
int max_work = LMC_RXDESCS;
int handled = 0;
- lmc_trace(dev, "lmc_interrupt in");
-
spin_lock(&sc->lmc_lock);
/*
@@ -1264,12 +1231,10 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
lmc_running_reset (dev);
break;
}
-
- if (csr & TULIP_STS_RXINTR){
- lmc_trace(dev, "rx interrupt");
+
+ if (csr & TULIP_STS_RXINTR)
lmc_rx (dev);
-
- }
+
if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
int n_compl = 0 ;
@@ -1389,7 +1354,6 @@ lmc_int_fail_out:
spin_unlock(&sc->lmc_lock);
- lmc_trace(dev, "lmc_interrupt out");
return IRQ_RETVAL(handled);
}
@@ -1401,8 +1365,6 @@ static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
int entry;
unsigned long flags;
- lmc_trace(dev, "lmc_start_xmit in");
-
spin_lock_irqsave(&sc->lmc_lock, flags);
/* normal path, tbusy known to be zero */
@@ -1477,7 +1439,6 @@ static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&sc->lmc_lock, flags);
- lmc_trace(dev, "lmc_start_xmit_out");
return NETDEV_TX_OK;
}
@@ -1493,8 +1454,6 @@ static int lmc_rx(struct net_device *dev)
struct sk_buff *skb, *nsb;
u16 len;
- lmc_trace(dev, "lmc_rx in");
-
lmc_led_on(sc, LMC_DS3_LED3);
rxIntLoopCnt = 0; /* debug -baz */
@@ -1673,9 +1632,6 @@ static int lmc_rx(struct net_device *dev)
lmc_led_off(sc, LMC_DS3_LED3);
skip_out_of_mem:
-
- lmc_trace(dev, "lmc_rx out");
-
return 0;
}
@@ -1684,16 +1640,12 @@ static struct net_device_stats *lmc_get_stats(struct net_device *dev)
lmc_softc_t *sc = dev_to_sc(dev);
unsigned long flags;
- lmc_trace(dev, "lmc_get_stats in");
-
spin_lock_irqsave(&sc->lmc_lock, flags);
sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
spin_unlock_irqrestore(&sc->lmc_lock, flags);
- lmc_trace(dev, "lmc_get_stats out");
-
return &sc->lmc_device->stats;
}
@@ -1712,12 +1664,8 @@ unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned reg
int command = (0xf6 << 10) | (devaddr << 5) | regno;
int retval = 0;
- lmc_trace(sc->lmc_device, "lmc_mii_readreg in");
-
LMC_MII_SYNC (sc);
- lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync");
-
for (i = 15; i >= 0; i--)
{
int dataval = (command & (1 << i)) ? 0x20000 : 0;
@@ -1730,8 +1678,6 @@ unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned reg
/* __SLOW_DOWN_IO; */
}
- lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1");
-
for (i = 19; i > 0; i--)
{
LMC_CSR_WRITE (sc, csr_9, 0x40000);
@@ -1743,8 +1689,6 @@ unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned reg
/* __SLOW_DOWN_IO; */
}
- lmc_trace(sc->lmc_device, "lmc_mii_readreg out");
-
return (retval >> 1) & 0xffff;
}
@@ -1753,8 +1697,6 @@ void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno,
int i = 32;
int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
- lmc_trace(sc->lmc_device, "lmc_mii_writereg in");
-
LMC_MII_SYNC (sc);
i = 31;
@@ -1787,16 +1729,12 @@ void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno,
/* __SLOW_DOWN_IO; */
i--;
}
-
- lmc_trace(sc->lmc_device, "lmc_mii_writereg out");
}
static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
{
int i;
- lmc_trace(sc->lmc_device, "lmc_softreset in");
-
/* Initialize the receive rings and buffers. */
sc->lmc_txfull = 0;
sc->lmc_next_rx = 0;
@@ -1871,55 +1809,40 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
}
sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
-
- lmc_trace(sc->lmc_device, "lmc_softreset out");
}
void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
{
- lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
sc->lmc_gpio_io &= ~bits;
LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
- lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
}
void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
{
- lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
sc->lmc_gpio_io |= bits;
LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
- lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
}
void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
{
- lmc_trace(sc->lmc_device, "lmc_led_on in");
- if((~sc->lmc_miireg16) & led){ /* Already on! */
- lmc_trace(sc->lmc_device, "lmc_led_on aon out");
+ if ((~sc->lmc_miireg16) & led) /* Already on! */
return;
- }
-
+
sc->lmc_miireg16 &= ~led;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
- lmc_trace(sc->lmc_device, "lmc_led_on out");
}
void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
{
- lmc_trace(sc->lmc_device, "lmc_led_off in");
- if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
- lmc_trace(sc->lmc_device, "lmc_led_off aoff out");
+ if (sc->lmc_miireg16 & led) /* Already set don't do anything */
return;
- }
-
+
sc->lmc_miireg16 |= led;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
- lmc_trace(sc->lmc_device, "lmc_led_off out");
}
static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
{
- lmc_trace(sc->lmc_device, "lmc_reset in");
sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
@@ -1955,13 +1878,11 @@ static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
sc->lmc_media->init(sc);
sc->extra_stats.resetCount++;
- lmc_trace(sc->lmc_device, "lmc_reset out");
}
static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
{
u32 val;
- lmc_trace(sc->lmc_device, "lmc_dec_reset in");
/*
* disable all interrupts
@@ -2017,14 +1938,11 @@ static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
val = LMC_CSR_READ(sc, csr_sia_general);
val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
LMC_CSR_WRITE(sc, csr_sia_general, val);
-
- lmc_trace(sc->lmc_device, "lmc_dec_reset out");
}
static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
size_t csr_size)
{
- lmc_trace(sc->lmc_device, "lmc_initcsrs in");
sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size;
sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size;
sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size;
@@ -2041,7 +1959,6 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00
sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size;
sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size;
sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size;
- lmc_trace(sc->lmc_device, "lmc_initcsrs out");
}
static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
@@ -2050,8 +1967,6 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
u32 csr6;
unsigned long flags;
- lmc_trace(dev, "lmc_driver_timeout in");
-
spin_lock_irqsave(&sc->lmc_lock, flags);
printk("%s: Xmitter busy|\n", dev->name);
@@ -2094,8 +2009,4 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
bug_out:
spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- lmc_trace(dev, "lmc_driver_timeout out");
-
-
}
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index 23ca4a62d6f5..ec1ac7b1f3fd 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -1026,7 +1026,6 @@ lmc_t1_get_link_status (lmc_softc_t * const sc)
* led3 red = Loss of Signal (LOS) or out of frame (OOF)
* conditions detected on T3 receive signal
*/
- lmc_trace(sc->lmc_device, "lmc_t1_get_link_status in");
lmc_led_on(sc, LMC_DS3_LED2);
lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS);
@@ -1120,9 +1119,6 @@ lmc_t1_get_link_status (lmc_softc_t * const sc)
lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS);
sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18);
-
- lmc_trace(sc->lmc_device, "lmc_t1_get_link_status out");
-
return ret;
}
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index a58301dd0c1f..4e9cc83b615a 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -47,7 +47,6 @@
// attach
void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
{
- lmc_trace(sc->lmc_device, "lmc_proto_attach in");
if (sc->if_type == LMC_NET) {
struct net_device *dev = sc->lmc_device;
/*
@@ -57,12 +56,10 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
dev->hard_header_len = 0;
dev->addr_len = 0;
}
- lmc_trace(sc->lmc_device, "lmc_proto_attach out");
}
int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd)
{
- lmc_trace(sc->lmc_device, "lmc_proto_ioctl");
if (sc->if_type == LMC_PPP)
return hdlc_ioctl(sc->lmc_device, ifr, cmd);
return -EOPNOTSUPP;
@@ -72,54 +69,38 @@ int lmc_proto_open(lmc_softc_t *sc)
{
int ret = 0;
- lmc_trace(sc->lmc_device, "lmc_proto_open in");
-
if (sc->if_type == LMC_PPP) {
ret = hdlc_open(sc->lmc_device);
if (ret < 0)
printk(KERN_WARNING "%s: HDLC open failed: %d\n",
sc->name, ret);
}
-
- lmc_trace(sc->lmc_device, "lmc_proto_open out");
return ret;
}
void lmc_proto_close(lmc_softc_t *sc)
{
- lmc_trace(sc->lmc_device, "lmc_proto_close in");
-
if (sc->if_type == LMC_PPP)
hdlc_close(sc->lmc_device);
-
- lmc_trace(sc->lmc_device, "lmc_proto_close out");
}
__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
{
- lmc_trace(sc->lmc_device, "lmc_proto_type in");
switch(sc->if_type){
case LMC_PPP:
return hdlc_type_trans(skb, sc->lmc_device);
- break;
case LMC_NET:
return htons(ETH_P_802_2);
- break;
case LMC_RAW: /* Packet type for skbuff kind of useless */
return htons(ETH_P_802_2);
- break;
default:
printk(KERN_WARNING "%s: No protocol set for this interface, assuming 802.2 (which is wrong!!)\n", sc->name);
return htons(ETH_P_802_2);
- break;
}
- lmc_trace(sc->lmc_device, "lmc_proto_tye out");
-
}
void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
{
- lmc_trace(sc->lmc_device, "lmc_proto_netif in");
switch(sc->if_type){
case LMC_PPP:
case LMC_NET:
@@ -129,5 +110,4 @@ void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
case LMC_RAW:
break;
}
- lmc_trace(sc->lmc_device, "lmc_proto_netif out");
}
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 40c04ea1200a..2fde439543fb 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -260,11 +260,12 @@ static int __init sbni_init(struct net_device *dev)
return sbni_isa_probe( dev );
/* otherwise we have to perform search our adapter */
- if( io[ num ] != -1 )
- dev->base_addr = io[ num ],
+ if( io[ num ] != -1 ) {
+ dev->base_addr = io[ num ];
dev->irq = irq[ num ];
- else if( scandone || io[ 0 ] != -1 )
+ } else if( scandone || io[ 0 ] != -1 ) {
return -ENODEV;
+ }
/* if io[ num ] contains non-zero address, then that is on ISA bus */
if( dev->base_addr )
@@ -399,12 +400,13 @@ sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
nl->maxframe = DEFAULT_FRAME_LEN;
nl->csr1.rate = baud[ num ];
- if( (nl->cur_rxl_index = rxl[ num ]) == -1 )
+ if( (nl->cur_rxl_index = rxl[ num ]) == -1 ) {
/* autotune rxl */
- nl->cur_rxl_index = DEF_RXL,
+ nl->cur_rxl_index = DEF_RXL;
nl->delta_rxl = DEF_RXL_DELTA;
- else
+ } else {
nl->delta_rxl = 0;
+ }
nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
if( inb( ioaddr + CSR0 ) & 0x01 )
nl->state |= FL_SLOW_MODE;
@@ -512,13 +514,15 @@ sbni_interrupt( int irq, void *dev_id )
do {
repeat = 0;
- if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) )
- handle_channel( dev ),
+ if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) ) {
+ handle_channel( dev );
repeat = 1;
+ }
if( nl->second && /* second channel present */
- (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) )
- handle_channel( nl->second ),
+ (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) ) {
+ handle_channel( nl->second );
repeat = 1;
+ }
} while( repeat );
if( nl->second )
@@ -610,11 +614,12 @@ recv_frame( struct net_device *dev )
nl->state |= FL_PREV_OK;
if( framelen > 4 )
nl->in_stats.all_rx_number++;
- } else
- nl->state &= ~FL_PREV_OK,
- change_level( dev ),
- nl->in_stats.all_rx_number++,
+ } else {
+ nl->state &= ~FL_PREV_OK;
+ change_level( dev );
+ nl->in_stats.all_rx_number++;
nl->in_stats.bad_rx_number++;
+ }
return !frame_ok || framelen > 4;
}
@@ -689,9 +694,10 @@ download_data( struct net_device *dev, u32 *crc_p )
*crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
/* if packet too short we should write some more bytes to pad */
- for( len = nl->framelen - len; len--; )
- outb( 0, dev->base_addr + DAT ),
+ for( len = nl->framelen - len; len--; ) {
+ outb( 0, dev->base_addr + DAT );
*crc_p = CRC32( 0, *crc_p );
+ }
}
@@ -703,9 +709,10 @@ upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
int frame_ok;
- if( is_first )
- nl->wait_frameno = frameno,
+ if( is_first ) {
+ nl->wait_frameno = frameno;
nl->inppos = 0;
+ }
if( nl->wait_frameno == frameno ) {
@@ -717,33 +724,35 @@ upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
* error was occurred... drop entire packet
*/
else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
- != 0 )
- nl->wait_frameno = 0,
- nl->inppos = 0,
+ != 0 ) {
+ nl->wait_frameno = 0;
+ nl->inppos = 0;
#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.rx_errors++,
+ nl->master->stats.rx_errors++;
nl->master->stats.rx_missed_errors++;
#else
- dev->stats.rx_errors++,
+ dev->stats.rx_errors++;
dev->stats.rx_missed_errors++;
#endif
+ }
/* now skip all frames until is_first != 0 */
} else
frame_ok = skip_tail( dev->base_addr, framelen, crc );
- if( is_first && !frame_ok )
+ if( is_first && !frame_ok ) {
/*
* Frame has been broken, but we had already stored
* is_first... Drop entire packet.
*/
- nl->wait_frameno = 0,
+ nl->wait_frameno = 0;
#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.rx_errors++,
+ nl->master->stats.rx_errors++;
nl->master->stats.rx_crc_errors++;
#else
- dev->stats.rx_errors++,
+ dev->stats.rx_errors++;
dev->stats.rx_crc_errors++;
#endif
+ }
return frame_ok;
}
@@ -782,17 +791,18 @@ interpret_ack( struct net_device *dev, unsigned ack )
if( nl->state & FL_WAIT_ACK ) {
nl->outpos += nl->framelen;
- if( --nl->tx_frameno )
+ if( --nl->tx_frameno ) {
nl->framelen = min_t(unsigned int,
nl->maxframe,
nl->tx_buf_p->len - nl->outpos);
- else
- send_complete( dev ),
+ } else {
+ send_complete( dev );
#ifdef CONFIG_SBNI_MULTILINE
netif_wake_queue( nl->master );
#else
netif_wake_queue( dev );
#endif
+ }
}
}
@@ -872,16 +882,17 @@ drop_xmit_queue( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
- if( nl->tx_buf_p )
- dev_kfree_skb_any( nl->tx_buf_p ),
- nl->tx_buf_p = NULL,
+ if( nl->tx_buf_p ) {
+ dev_kfree_skb_any( nl->tx_buf_p );
+ nl->tx_buf_p = NULL;
#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.tx_errors++,
+ nl->master->stats.tx_errors++;
nl->master->stats.tx_carrier_errors++;
#else
- dev->stats.tx_errors++,
+ dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
#endif
+ }
nl->tx_frameno = 0;
nl->framelen = 0;
@@ -1327,12 +1338,13 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
spin_lock( &nl->lock );
flags = *(struct sbni_flags*) &ifr->ifr_ifru;
- if( flags.fixed_rxl )
- nl->delta_rxl = 0,
+ if( flags.fixed_rxl ) {
+ nl->delta_rxl = 0;
nl->cur_rxl_index = flags.rxl;
- else
- nl->delta_rxl = DEF_RXL_DELTA,
+ } else {
+ nl->delta_rxl = DEF_RXL_DELTA;
nl->cur_rxl_index = DEF_RXL;
+ }
nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
nl->csr1.rate = flags.rate;
@@ -1526,13 +1538,16 @@ sbni_setup( char *p )
(*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
if( !*p || *p == ')' )
return 1;
- if( *p == ';' )
- ++p, ++n, parm = 0;
- else if( *p++ != ',' )
+ if( *p == ';' ) {
+ ++p;
+ ++n;
+ parm = 0;
+ } else if( *p++ != ',' ) {
break;
- else
+ } else {
if( ++parm >= 5 )
break;
+ }
}
bad_param:
pr_err("Error in sbni kernel parameter!\n");
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index 29053bec694e..8e3b1c717c10 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -22,8 +22,6 @@
#include <linux/io.h>
#include "slic_ds26522.h"
-#define DRV_NAME "ds26522"
-
#define SLIC_TRANS_LEN 1
#define SLIC_TWO_LEN 2
#define SLIC_THREE_LEN 3
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index c418767a890a..54b1a5aee82d 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -202,8 +202,7 @@ static void x25_asy_bump(struct x25_asy *sl)
return;
}
skb_put_data(skb, sl->rbuff, count);
- skb->protocol = x25_type_trans(skb, sl->dev);
- err = lapb_data_received(skb->dev, skb);
+ err = lapb_data_received(sl->dev, skb);
if (err != LAPB_OK) {
kfree_skb(skb);
printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
@@ -243,8 +242,6 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
sl->xleft = count - actual;
sl->xhead = sl->xbuff + actual;
- /* VSV */
- clear_bit(SLF_OUTWAIT, &sl->flags); /* reset outfill flag */
}
/*
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
index eb4a4216ee94..87798287c9ca 100644
--- a/drivers/net/wan/x25_asy.h
+++ b/drivers/net/wan/x25_asy.h
@@ -35,7 +35,6 @@ struct x25_asy {
#define SLF_INUSE 0 /* Channel in use */
#define SLF_ESCAPE 1 /* ESC received */
#define SLF_ERROR 2 /* Parity, etc. error */
-#define SLF_OUTWAIT 4 /* Waiting for output */
};
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 9afed3b133d3..8df98757d901 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -656,8 +656,6 @@ void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code)
*
* @i2400m: device descriptor
*
- * @msg_skb: an skb *
- *
* @buf: pointer to the buffer containing the message to be sent; it
* has to start with a &struct i2400M_l3l4_hdr and then
* followed by the payload. Once this function returns, the
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index 20a4f3c0a0a1..d0f3b6d7f408 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -22,8 +22,8 @@ static struct genl_family genl_family;
static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
[WGDEVICE_A_IFINDEX] = { .type = NLA_U32 },
[WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
- [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
- [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
+ [WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
[WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
[WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
[WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
@@ -31,12 +31,12 @@ static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
};
static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
- [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
- [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN },
+ [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
+ [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN),
[WGPEER_A_FLAGS] = { .type = NLA_U32 },
- [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) },
+ [WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)),
[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
- [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) },
+ [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)),
[WGPEER_A_RX_BYTES] = { .type = NLA_U64 },
[WGPEER_A_TX_BYTES] = { .type = NLA_U64 },
[WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED },
@@ -45,7 +45,7 @@ static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = {
[WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
- [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) },
+ [WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)),
[WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 }
};
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 22f9f2f8af10..5cf2045fadef 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -324,8 +324,8 @@ static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
/* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */
- pci_unmap_single(priv->pdev, info->mapping,
- info->skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pdev->dev, info->mapping,
+ info->skb->len, DMA_TO_DEVICE);
ieee80211_tx_info_clear_status(txi);
@@ -382,35 +382,34 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
} else if (pktlen < RX_COPY_BREAK) {
skb = dev_alloc_skb(pktlen);
if (skb) {
- pci_dma_sync_single_for_cpu(
- priv->pdev,
- priv->rx_buffers[entry].mapping,
- pktlen, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping,
+ pktlen,
+ DMA_FROM_DEVICE);
skb_put_data(skb,
skb_tail_pointer(priv->rx_buffers[entry].skb),
pktlen);
- pci_dma_sync_single_for_device(
- priv->pdev,
- priv->rx_buffers[entry].mapping,
- RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping,
+ RX_PKT_SIZE,
+ DMA_FROM_DEVICE);
}
} else {
newskb = dev_alloc_skb(RX_PKT_SIZE);
if (newskb) {
skb = priv->rx_buffers[entry].skb;
skb_put(skb, pktlen);
- pci_unmap_single(
- priv->pdev,
- priv->rx_buffers[entry].mapping,
- RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping,
+ RX_PKT_SIZE, DMA_FROM_DEVICE);
priv->rx_buffers[entry].skb = newskb;
priv->rx_buffers[entry].mapping =
- pci_map_single(priv->pdev,
+ dma_map_single(&priv->pdev->dev,
skb_tail_pointer(newskb),
RX_PKT_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(priv->pdev,
- priv->rx_buffers[entry].mapping)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping)) {
priv->rx_buffers[entry].skb = NULL;
dev_kfree_skb(newskb);
skb = NULL;
@@ -1449,11 +1448,11 @@ static int adm8211_init_rings(struct ieee80211_hw *dev)
rx_info->skb = dev_alloc_skb(RX_PKT_SIZE);
if (rx_info->skb == NULL)
break;
- rx_info->mapping = pci_map_single(priv->pdev,
+ rx_info->mapping = dma_map_single(&priv->pdev->dev,
skb_tail_pointer(rx_info->skb),
RX_PKT_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, rx_info->mapping)) {
dev_kfree_skb(rx_info->skb);
rx_info->skb = NULL;
break;
@@ -1490,10 +1489,9 @@ static void adm8211_free_rings(struct ieee80211_hw *dev)
if (!priv->rx_buffers[i].skb)
continue;
- pci_unmap_single(
- priv->pdev,
- priv->rx_buffers[i].mapping,
- RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev,
+ priv->rx_buffers[i].mapping, RX_PKT_SIZE,
+ DMA_FROM_DEVICE);
dev_kfree_skb(priv->rx_buffers[i].skb);
}
@@ -1502,10 +1500,9 @@ static void adm8211_free_rings(struct ieee80211_hw *dev)
if (!priv->tx_buffers[i].skb)
continue;
- pci_unmap_single(priv->pdev,
+ dma_unmap_single(&priv->pdev->dev,
priv->tx_buffers[i].mapping,
- priv->tx_buffers[i].skb->len,
- PCI_DMA_TODEVICE);
+ priv->tx_buffers[i].skb->len, DMA_TO_DEVICE);
dev_kfree_skb(priv->tx_buffers[i].skb);
}
@@ -1632,9 +1629,9 @@ static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
unsigned int entry;
u32 flag;
- mapping = pci_map_single(priv->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(priv->pdev, mapping))
+ mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, mapping))
return -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
@@ -1745,8 +1742,8 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
/* Allocate TX/RX descriptors */
ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size +
sizeof(struct adm8211_desc) * priv->tx_ring_size;
- priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size,
- &priv->rx_ring_dma);
+ priv->rx_ring = dma_alloc_coherent(&priv->pdev->dev, ring_size,
+ &priv->rx_ring_dma, GFP_KERNEL);
if (!priv->rx_ring) {
kfree(priv->rx_buffers);
@@ -1818,8 +1815,8 @@ static int adm8211_probe(struct pci_dev *pdev,
return err; /* someone else grabbed it? don't disable it */
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "%s (adm8211): No suitable DMA available\n",
pci_name(pdev));
goto err_free_reg;
@@ -1929,10 +1926,10 @@ static int adm8211_probe(struct pci_dev *pdev,
kfree(priv->eeprom);
err_free_desc:
- pci_free_consistent(pdev,
- sizeof(struct adm8211_desc) * priv->rx_ring_size +
- sizeof(struct adm8211_desc) * priv->tx_ring_size,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct adm8211_desc) * priv->rx_ring_size +
+ sizeof(struct adm8211_desc) * priv->tx_ring_size,
+ priv->rx_ring, priv->rx_ring_dma);
kfree(priv->rx_buffers);
err_iounmap:
@@ -1962,10 +1959,10 @@ static void adm8211_remove(struct pci_dev *pdev)
priv = dev->priv;
- pci_free_consistent(pdev,
- sizeof(struct adm8211_desc) * priv->rx_ring_size +
- sizeof(struct adm8211_desc) * priv->tx_ring_size,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct adm8211_desc) * priv->rx_ring_size +
+ sizeof(struct adm8211_desc) * priv->tx_ring_size,
+ priv->rx_ring, priv->rx_ring_dma);
kfree(priv->rx_buffers);
kfree(priv->eeprom);
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 5b6db6e66f65..4481ed375f55 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -12,18 +12,11 @@
void ath10k_bmi_start(struct ath10k *ar)
{
- int ret;
-
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
ar->bmi.done_sent = false;
-
- /* Enable hardware clock to speed up firmware download */
- if (ar->hw_params.hw_ops->enable_pll_clk) {
- ret = ar->hw_params.hw_ops->enable_pll_clk(ar);
- ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi enable pll ret %d\n", ret);
- }
}
+EXPORT_SYMBOL(ath10k_bmi_start);
int ath10k_bmi_done(struct ath10k *ar)
{
@@ -197,6 +190,7 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
return 0;
}
+EXPORT_SYMBOL(ath10k_bmi_read_memory);
int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
{
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 294fbc1e89ab..c45c814fd122 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1299,29 +1299,24 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 ctrl_addr = ce_state->ctrl_addr;
- spin_lock_bh(&ce->ce_lock);
-
- /* Clear the copy-complete interrupts that will be handled here. */
+ /*
+ * Clear before handling
+ *
+ * Misc CE interrupts are not being handled, but still need
+ * to be cleared.
+ *
+ * NOTE: When the last copy engine interrupt is cleared the
+ * hardware will go to sleep. Once this happens any access to
+ * the CE registers can cause a hardware fault.
+ */
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
- wm_regs->cc_mask);
-
- spin_unlock_bh(&ce->ce_lock);
+ wm_regs->cc_mask | wm_regs->wm_mask);
if (ce_state->recv_cb)
ce_state->recv_cb(ce_state);
if (ce_state->send_cb)
ce_state->send_cb(ce_state);
-
- spin_lock_bh(&ce->ce_lock);
-
- /*
- * Misc CE interrupts are not being handled, but still need
- * to be cleared.
- */
- ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
-
- spin_unlock_bh(&ce->ce_lock);
}
EXPORT_SYMBOL(ath10k_ce_per_engine_service);
@@ -1372,45 +1367,55 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
}
-int ath10k_ce_disable_interrupts(struct ath10k *ar)
+void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state;
u32 ctrl_addr;
- int ce_id;
- for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
- ce_state = &ce->ce_states[ce_id];
- if (ce_state->attr_flags & CE_ATTR_POLL)
- continue;
+ ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ return;
- ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ ctrl_addr = ath10k_ce_base_address(ar, ce_id);
- ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
- ath10k_ce_error_intr_disable(ar, ctrl_addr);
- ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
- }
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+ ath10k_ce_error_intr_disable(ar, ctrl_addr);
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+}
+EXPORT_SYMBOL(ath10k_ce_disable_interrupt);
- return 0;
+void ath10k_ce_disable_interrupts(struct ath10k *ar)
+{
+ int ce_id;
+
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ ath10k_ce_disable_interrupt(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
-void ath10k_ce_enable_interrupts(struct ath10k *ar)
+void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
- int ce_id;
struct ath10k_ce_pipe *ce_state;
+ ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ return;
+
+ ath10k_ce_per_engine_handler_adjust(ce_state);
+}
+EXPORT_SYMBOL(ath10k_ce_enable_interrupt);
+
+void ath10k_ce_enable_interrupts(struct ath10k *ar)
+{
+ int ce_id;
+
/* Enable interrupts for copy engine that
* are not using polling mode.
*/
- for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
- ce_state = &ce->ce_states[ce_id];
- if (ce_state->attr_flags & CE_ATTR_POLL)
- continue;
-
- ath10k_ce_per_engine_handler_adjust(ce_state);
- }
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ ath10k_ce_enable_interrupt(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
@@ -1555,7 +1560,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
if (ret) {
dma_free_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc_64) +
+ (nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space_unaligned,
base_addr);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 75df79d43120..666ce384a1d8 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -255,10 +255,13 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
-int ath10k_ce_disable_interrupts(struct ath10k *ar);
+void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id);
+void ath10k_ce_disable_interrupts(struct ath10k *ar);
+void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id);
void ath10k_ce_enable_interrupts(struct ath10k *ar);
void ath10k_ce_dump_registers(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data);
+
void ath10k_ce_alloc_rri(struct ath10k *ar);
void ath10k_ce_free_rri(struct ath10k *ar);
@@ -369,18 +372,14 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
-#define CE_INTERRUPT_SUMMARY (GENMASK(CE_COUNT_MAX - 1, 0))
static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
- if (!ar->hw_params.per_ce_irq)
- return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
- ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
- CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
- else
- return CE_INTERRUPT_SUMMARY;
+ return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
+ ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
+ CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
}
/* Host software's Copy Engine configuration. */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 340ce327ac14..d73ad60b571c 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -119,7 +119,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -155,7 +154,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -220,7 +218,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -255,7 +252,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -290,7 +286,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -328,12 +323,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = true,
.tx_stats_over_pktlog = false,
+ .supports_peer_stats_info = true,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -369,7 +364,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -417,7 +411,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -462,7 +455,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -497,7 +489,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -534,7 +525,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -603,7 +593,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -631,7 +620,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = TARGET_HL_TLV_NUM_WDS_ENTRIES,
.target_64bit = true,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
- .per_ce_irq = true,
.shadow_reg_support = true,
.rri_on_ddr = true,
.hw_filter_reset_required = false,
@@ -740,6 +728,16 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (ret)
return ret;
+ ret = ath10k_bmi_read32(ar, hi_option_flag2, &param);
+ if (ret)
+ return ret;
+
+ param |= HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_HOST;
+
+ ret = ath10k_bmi_write32(ar, hi_option_flag2, param);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -1024,7 +1022,7 @@ static int ath10k_core_check_smbios(struct ath10k *ar)
return 0;
}
-static int ath10k_core_check_dt(struct ath10k *ar)
+int ath10k_core_check_dt(struct ath10k *ar)
{
struct device_node *node;
const char *variant = NULL;
@@ -1045,6 +1043,7 @@ static int ath10k_core_check_dt(struct ath10k *ar)
return 0;
}
+EXPORT_SYMBOL(ath10k_core_check_dt);
static int ath10k_download_fw(struct ath10k *ar)
{
@@ -1439,10 +1438,17 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
}
if (ar->id.qmi_ids_valid) {
- scnprintf(name, name_len,
- "bus=%s,qmi-board-id=%x",
- ath10k_bus_str(ar->hif.bus),
- ar->id.qmi_board_id);
+ if (with_variant && ar->id.bdf_ext[0] != '\0')
+ scnprintf(name, name_len,
+ "bus=%s,qmi-board-id=%x,qmi-chip-id=%x%s",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.qmi_board_id, ar->id.qmi_chip_id,
+ variant);
+ else
+ scnprintf(name, name_len,
+ "bus=%s,qmi-board-id=%x",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.qmi_board_id);
goto out;
}
@@ -2320,7 +2326,7 @@ static void ath10k_core_restart(struct work_struct *work)
break;
case ATH10K_STATE_RESTARTED:
ar->state = ATH10K_STATE_WEDGED;
- /* fall through */
+ fallthrough;
case ATH10K_STATE_WEDGED:
ath10k_warn(ar, "device is wedged, will not restart\n");
break;
@@ -2614,6 +2620,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ar->running_fw->fw_file.fw_features)) {
ath10k_bmi_start(ar);
+ /* Enable hardware clock to speed up firmware download */
+ if (ar->hw_params.hw_ops->enable_pll_clk) {
+ status = ar->hw_params.hw_ops->enable_pll_clk(ar);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot enable pll ret %d\n",
+ status);
+ }
+
if (ath10k_init_configure_target(ar)) {
status = -EINVAL;
goto err;
@@ -2797,6 +2810,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
val |= WMI_10_4_REPORT_AIRTIME;
+ if (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ ar->wmi.svc_map))
+ val |= WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT;
+
status = ath10k_mac_ext_resource_config(ar, val);
if (status) {
ath10k_err(ar,
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 5c18f6c20462..b50ab9e229dc 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -82,6 +82,8 @@
/* Default Airtime weight multipler (Tuned for multiclient performance) */
#define ATH10K_AIRTIME_WEIGHT_MULTIPLIER 4
+#define ATH10K_MAX_RETRY_COUNT 30
+
struct ath10k;
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -109,6 +111,7 @@ enum ath10k_skb_flags {
ATH10K_SKB_F_MGMT = BIT(3),
ATH10K_SKB_F_QOS = BIT(4),
ATH10K_SKB_F_RAW_TX = BIT(5),
+ ATH10K_SKB_F_NOACK_TID = BIT(6),
};
struct ath10k_skb_cb {
@@ -509,6 +512,8 @@ struct ath10k_htt_tx_stats {
u64 ack_fails;
};
+#define ATH10K_TID_MAX 8
+
struct ath10k_sta {
struct ath10k_vif *arvif;
@@ -542,6 +547,13 @@ struct ath10k_sta {
#endif
/* Protected with ar->data_lock */
u32 peer_ps_state;
+ struct work_struct tid_config_wk;
+ int noack[ATH10K_TID_MAX];
+ int retry_long[ATH10K_TID_MAX];
+ int ampdu[ATH10K_TID_MAX];
+ u8 rate_ctrl[ATH10K_TID_MAX];
+ u32 rate_code[ATH10K_TID_MAX];
+ int rtscts[ATH10K_TID_MAX];
};
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
@@ -614,6 +626,14 @@ struct ath10k_vif {
/* For setting VHT peer fixed rate, protected by conf_mutex */
int vht_num_rates;
u8 vht_pfr;
+ u32 tid_conf_changed[ATH10K_TID_MAX];
+ int noack[ATH10K_TID_MAX];
+ int retry_long[ATH10K_TID_MAX];
+ int ampdu[ATH10K_TID_MAX];
+ u8 rate_ctrl[ATH10K_TID_MAX];
+ u32 rate_code[ATH10K_TID_MAX];
+ int rtscts[ATH10K_TID_MAX];
+ u32 tids_rst;
};
struct ath10k_vif_iter {
@@ -1056,6 +1076,7 @@ struct ath10k {
bool bmi_ids_valid;
bool qmi_ids_valid;
u32 qmi_board_id;
+ u32 qmi_chip_id;
u8 bmi_board_id;
u8 bmi_eboard_id;
u8 bmi_chip_id;
@@ -1295,6 +1316,7 @@ int ath10k_core_register(struct ath10k *ar,
const struct ath10k_bus_params *bus_params);
void ath10k_core_unregister(struct ath10k *ar);
int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type);
+int ath10k_core_check_dt(struct ath10k *ar);
void ath10k_core_free_board_files(struct ath10k *ar);
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index 2a4498067024..7eb72290a925 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -270,6 +270,277 @@ static const struct ath10k_mem_section qca6174_hw21_register_sections[] = {
{0x80010, 0x80020},
};
+static const struct ath10k_mem_section qca6174_hw30_sdio_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8010, 0x8060},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80ec},
+ {0x8110, 0x8128},
+ {0x9000, 0x9004},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+
+ /* EFUSE0,1,2 is disabled here
+ * because its state may be reset
+ *
+ * {0x24800, 0x24804},
+ * {0x25000, 0x25004},
+ * {0x25800, 0x25804},
+ */
+
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A074},
+
+ /* DBI windows is skipped here, it can be only accessed when pcie
+ * is active (not in reset) and CORE_CTRL_PCIE_LTSSM_EN = 0 &&
+ * PCIE_CTRL_APP_LTSSM_ENALBE=0.
+ * {0x3C000 , 0x3C004},
+ */
+
+ {0x40000, 0x400A4},
+
+ /* SI register is skiped here.
+ * Because it will cause bus hang
+ *
+ * {0x50000, 0x50018},
+ */
+
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
static const struct ath10k_mem_section qca6174_hw30_register_sections[] = {
{0x800, 0x810},
{0x820, 0x82C},
@@ -602,6 +873,59 @@ static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = {
},
};
+static const struct ath10k_mem_region qca6174_hw30_sdio_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0xa8000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM1,
+ .start = 0x00980000,
+ .len = 0x00080000,
+ .name = "IRAM1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM2,
+ .start = 0x00a00000,
+ .len = 0x00040000,
+ .name = "IRAM2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw30_sdio_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw30_sdio_register_sections),
+ },
+ },
+};
+
static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
{
.type = ATH10K_MEM_REGION_TYPE_DRAM,
@@ -968,6 +1292,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_1_0_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
@@ -976,6 +1301,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_1_1_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
@@ -984,6 +1310,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_1_3_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
@@ -992,6 +1319,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_2_1_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw21_mem_regions,
.size = ARRAY_SIZE(qca6174_hw21_mem_regions),
@@ -1000,6 +1328,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_3_0_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
@@ -1008,14 +1337,25 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_3_2_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
},
},
{
+ .hw_id = QCA6174_HW_3_2_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_SDIO,
+ .region_table = {
+ .regions = qca6174_hw30_sdio_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_sdio_mem_regions),
+ },
+ },
+ {
.hw_id = QCA9377_HW_1_1_DEV_VERSION,
.hw_rev = ATH10K_HW_QCA9377,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
@@ -1024,6 +1364,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA988X_HW_2_0_VERSION,
.hw_rev = ATH10K_HW_QCA988X,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca988x_hw20_mem_regions,
.size = ARRAY_SIZE(qca988x_hw20_mem_regions),
@@ -1032,6 +1373,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA9984_HW_1_0_DEV_VERSION,
.hw_rev = ATH10K_HW_QCA9984,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca9984_hw10_mem_regions,
.size = ARRAY_SIZE(qca9984_hw10_mem_regions),
@@ -1040,6 +1382,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA9888_HW_2_0_DEV_VERSION,
.hw_rev = ATH10K_HW_QCA9888,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca9984_hw10_mem_regions,
.size = ARRAY_SIZE(qca9984_hw10_mem_regions),
@@ -1048,6 +1391,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA99X0_HW_2_0_DEV_VERSION,
.hw_rev = ATH10K_HW_QCA99X0,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca99x0_hw20_mem_regions,
.size = ARRAY_SIZE(qca99x0_hw20_mem_regions),
@@ -1056,6 +1400,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA4019_HW_1_0_DEV_VERSION,
.hw_rev = ATH10K_HW_QCA4019,
+ .bus = ATH10K_BUS_AHB,
.region_table = {
.regions = qca4019_hw10_mem_regions,
.size = ARRAY_SIZE(qca4019_hw10_mem_regions),
@@ -1064,6 +1409,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = WCN3990_HW_1_0_DEV_VERSION,
.hw_rev = ATH10K_HW_WCN3990,
+ .bus = ATH10K_BUS_SNOC,
.region_table = {
.regions = wcn399x_hw10_mem_regions,
.size = ARRAY_SIZE(wcn399x_hw10_mem_regions),
@@ -1111,7 +1457,8 @@ const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k
for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
if (ar->target_version == hw_mem_layouts[i].hw_id &&
- ar->hw_rev == hw_mem_layouts[i].hw_rev)
+ ar->hw_rev == hw_mem_layouts[i].hw_rev &&
+ hw_mem_layouts[i].bus == ar->hif.bus)
return &hw_mem_layouts[i];
}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index e760ce1a5f1e..42404e246e0e 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -156,6 +156,7 @@ struct ath10k_mem_region {
struct ath10k_hw_mem_layout {
u32 hw_id;
u32 hw_rev;
+ enum ath10k_bus bus;
struct {
const struct ath10k_mem_region *regions;
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index d787cbead56a..5c1af2021883 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -142,6 +142,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ if (idx < 0 || idx >= htt->rx_ring.size) {
+ ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
+ idx &= htt->rx_ring.size_mask;
+ ret = -ENOMEM;
+ goto fail;
+ }
+
while (num > 0) {
skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
if (!skb) {
@@ -941,6 +949,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
u8 preamble = 0;
u8 group_id;
u32 info1, info2, info3;
+ u32 stbc, nsts_su;
info1 = __le32_to_cpu(rxd->ppdu_start.info1);
info2 = __le32_to_cpu(rxd->ppdu_start.info2);
@@ -985,11 +994,16 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
*/
bw = info2 & 3;
sgi = info3 & 1;
+ stbc = (info2 >> 3) & 1;
group_id = (info2 >> 4) & 0x3F;
if (GROUP_ID_IS_SU_MIMO(group_id)) {
mcs = (info3 >> 4) & 0x0F;
- nss = ((info2 >> 10) & 0x07) + 1;
+ nsts_su = ((info2 >> 10) & 0x07);
+ if (stbc)
+ nss = (nsts_su >> 2) + 1;
+ else
+ nss = (nsts_su + 1);
} else {
/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
* so it's impossible to decode MCS. Also since
@@ -3017,7 +3031,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
break;
case -EAGAIN:
- /* fall through */
+ fallthrough;
default:
/* Should not happen. */
ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
@@ -3575,12 +3589,14 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
}
if (ar->htt.disable_tx_comp) {
- arsta->tx_retries += peer_stats->retry_pkts;
arsta->tx_failed += peer_stats->failed_pkts;
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d tx failed %d\n",
- arsta->tx_retries, arsta->tx_failed);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
+ arsta->tx_failed);
}
+ arsta->tx_retries += peer_stats->retry_pkts;
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
+
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
rate_idx);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index bbe869575855..1fc0a312ab58 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1314,7 +1314,7 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* fall through */
+ fallthrough;
case ATH10K_HW_TXRX_ETHERNET:
flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
break;
@@ -1460,7 +1460,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* fall through */
+ fallthrough;
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
ext_desc_t = htt->frag_desc.vaddr_desc_32;
@@ -1662,7 +1662,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* fall through */
+ fallthrough;
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
ext_desc_t = htt->frag_desc.vaddr_desc_64;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index f16edcb9f326..c6ded21f5ed6 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -593,9 +593,6 @@ struct ath10k_hw_params {
/* Target rx ring fill level */
u32 rx_ring_fill_level;
- /* target supporting per ce IRQ */
- bool per_ce_irq;
-
/* target supporting shadow register for ce write */
bool shadow_reg_support;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 3c0c33a9f30c..2e3eb5bbe49c 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -2019,8 +2019,8 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
if (!arvif->is_up)
return;
- if (!ieee80211_csa_is_complete(vif)) {
- ieee80211_csa_update_counter(vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(vif)) {
+ ieee80211_beacon_update_cntdwn(vif);
ret = ath10k_mac_setup_bcn_tmpl(arvif);
if (ret)
@@ -2468,17 +2468,17 @@ ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
idx_limit = -1;
switch (idx_limit) {
- case 0: /* fall through */
- case 1: /* fall through */
- case 2: /* fall through */
- case 3: /* fall through */
- case 4: /* fall through */
- case 5: /* fall through */
- case 6: /* fall through */
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
default:
/* see ath10k_mac_can_set_bitrate_mask() */
WARN_ON(1);
- /* fall through */
+ fallthrough;
case -1:
mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
break;
@@ -3013,6 +3013,69 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
cancel_delayed_work_sync(&arvif->connection_loss_work);
}
+static int ath10k_new_peer_tid_config(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ath10k_vif *arvif)
+{
+ struct wmi_per_peer_per_tid_cfg_arg arg = {};
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ bool config_apply;
+ int ret, i;
+
+ for (i = 0; i < ATH10K_TID_MAX; i++) {
+ config_apply = false;
+ if (arvif->retry_long[i] || arvif->ampdu[i] ||
+ arvif->rate_ctrl[i] || arvif->rtscts[i]) {
+ config_apply = true;
+ arg.tid = i;
+ arg.vdev_id = arvif->vdev_id;
+ arg.retry_count = arvif->retry_long[i];
+ arg.aggr_control = arvif->ampdu[i];
+ arg.rate_ctrl = arvif->rate_ctrl[i];
+ arg.rcode_flags = arvif->rate_code[i];
+
+ if (arvif->rtscts[i])
+ arg.ext_tid_cfg_bitmap =
+ WMI_EXT_TID_RTS_CTS_CONFIG;
+ else
+ arg.ext_tid_cfg_bitmap = 0;
+
+ arg.rtscts_ctrl = arvif->rtscts[i];
+ }
+
+ if (arvif->noack[i]) {
+ arg.ack_policy = arvif->noack[i];
+ arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE;
+ arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
+ config_apply = true;
+ }
+
+ /* Assign default value(-1) to newly connected station.
+ * This is to identify station specific tid configuration not
+ * configured for the station.
+ */
+ arsta->retry_long[i] = -1;
+ arsta->noack[i] = -1;
+ arsta->ampdu[i] = -1;
+
+ if (!config_apply)
+ continue;
+
+ ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
+
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to set per tid retry/aggr config for sta %pM: %d\n",
+ sta->addr, ret);
+ return ret;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ }
+
+ return 0;
+}
+
static int ath10k_station_assoc(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -3078,7 +3141,10 @@ static int ath10k_station_assoc(struct ath10k *ar,
}
}
- return ret;
+ if (!test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map))
+ return ret;
+
+ return ath10k_new_peer_tid_config(ar, sta, arvif);
}
static int ath10k_station_disassoc(struct ath10k *ar,
@@ -3626,7 +3692,10 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool is_data = ieee80211_is_data(hdr->frame_control) ||
ieee80211_is_data_qos(hdr->frame_control);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_sta *arsta;
+ u8 tid, *qos_ctl;
+ bool noack = false;
cb->flags = 0;
if (!ath10k_tx_h_use_hwcrypto(vif, skb))
@@ -3635,8 +3704,27 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
if (ieee80211_is_mgmt(hdr->frame_control))
cb->flags |= ATH10K_SKB_F_MGMT;
- if (ieee80211_is_data_qos(hdr->frame_control))
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
cb->flags |= ATH10K_SKB_F_QOS;
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ tid = (*qos_ctl) & IEEE80211_QOS_CTL_TID_MASK;
+
+ if (arvif->noack[tid] == WMI_PEER_TID_CONFIG_NOACK)
+ noack = true;
+
+ if (sta) {
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+
+ if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_NOACK)
+ noack = true;
+
+ if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_ACK)
+ noack = false;
+ }
+
+ if (noack)
+ cb->flags |= ATH10K_SKB_F_NOACK_TID;
+ }
/* Data frames encrypted in software will be posted to firmware
* with tx encap mode set to RAW. Ex: Multicast traffic generated
@@ -4238,7 +4326,7 @@ void __ath10k_scan_finish(struct ath10k *ar)
} else if (ar->scan.roc_notify) {
ieee80211_remain_on_channel_expired(ar->hw);
}
- /* fall through */
+ fallthrough;
case ATH10K_SCAN_STARTING:
ar->scan.state = ATH10K_SCAN_IDLE;
ar->scan_channel = NULL;
@@ -6597,6 +6685,581 @@ out:
return ret;
}
+struct ath10k_mac_iter_tid_conf_data {
+ struct ieee80211_vif *curr_vif;
+ struct ath10k *ar;
+ bool reset_config;
+};
+
+static bool
+ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *vht_num_rates)
+{
+ int num_rates = 0;
+ int i, tmp;
+
+ num_rates += hweight32(mask->control[band].legacy);
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight8(mask->control[band].ht_mcs[i]);
+
+ *vht_num_rates = 0;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ tmp = hweight16(mask->control[band].vht_mcs[i]);
+ num_rates += tmp;
+ *vht_num_rates += tmp;
+ }
+
+ return num_rates == 1;
+}
+
+static int
+ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u8 *rate, u8 *nss, bool vht_only)
+{
+ int rate_idx;
+ int i;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
+
+ if (vht_only)
+ goto next;
+
+ if (hweight32(mask->control[band].legacy) == 1) {
+ rate_idx = ffs(mask->control[band].legacy) - 1;
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
+ bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
+
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ *nss = 1;
+ *rate = preamble << 6 |
+ (*nss - 1) << 4 |
+ hw_rate << 0;
+
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_HT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].ht_mcs[i]) - 1);
+
+ return 0;
+ }
+ }
+
+next:
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_VHT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].vht_mcs[i]) - 1);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath10k_mac_validate_rate_mask(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ u32 rate_ctrl_flag, u8 nss)
+{
+ if (nss > sta->rx_nss) {
+ ath10k_warn(ar, "Invalid nss field, configured %u limit %u\n",
+ nss, sta->rx_nss);
+ return -EINVAL;
+ }
+
+ if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_VHT) {
+ if (!sta->vht_cap.vht_supported) {
+ ath10k_warn(ar, "Invalid VHT rate for sta %pM\n",
+ sta->addr);
+ return -EINVAL;
+ }
+ } else if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_HT) {
+ if (!sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+ ath10k_warn(ar, "Invalid HT rate for sta %pM\n",
+ sta->addr);
+ return -EINVAL;
+ }
+ } else {
+ if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ath10k_mac_tid_bitrate_config(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 *rate_ctrl_flag, u8 *rate_ctrl,
+ enum nl80211_tx_rate_setting txrate_type,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 nss, rate;
+ int vht_num_rates, ret;
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return -EINVAL;
+
+ if (txrate_type == NL80211_TX_RATE_AUTOMATIC) {
+ *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO;
+ *rate_ctrl_flag = 0;
+ return 0;
+ }
+
+ band = def.chan->band;
+
+ if (!ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask,
+ &vht_num_rates)) {
+ return -EINVAL;
+ }
+
+ ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+ &rate, &nss, false);
+ if (ret) {
+ ath10k_warn(ar, "failed to get single rate: %d\n",
+ ret);
+ return ret;
+ }
+
+ *rate_ctrl_flag = rate;
+
+ if (sta && ath10k_mac_validate_rate_mask(ar, sta, *rate_ctrl_flag, nss))
+ return -EINVAL;
+
+ if (txrate_type == NL80211_TX_RATE_FIXED)
+ *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_FIXED_RATE;
+ else if (txrate_type == NL80211_TX_RATE_LIMITED &&
+ (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ ar->wmi.svc_map)))
+ *rate_ctrl = WMI_PEER_TID_CONFIG_RATE_UPPER_CAP;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int ath10k_mac_set_tid_config(struct ath10k *ar, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, u32 changed,
+ struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_sta *arsta;
+ int ret;
+
+ if (sta) {
+ if (!sta->wme)
+ return -ENOTSUPP;
+
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if ((arsta->retry_long[arg->tid] > 0 ||
+ arsta->rate_code[arg->tid] > 0 ||
+ arsta->ampdu[arg->tid] ==
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) &&
+ arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) {
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK);
+ arg->ack_policy = 0;
+ arg->aggr_control = 0;
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg->aggr_control = 0;
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG);
+ }
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+ }
+
+ ether_addr_copy(arg->peer_macaddr.addr, sta->addr);
+
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, arg);
+ if (ret)
+ return ret;
+
+ /* Store the configured parameters in success case */
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ arsta->noack[arg->tid] = arg->ack_policy;
+ arg->ack_policy = 0;
+ arg->aggr_control = 0;
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
+ arsta->retry_long[arg->tid] = arg->retry_count;
+ arg->retry_count = 0;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ arsta->ampdu[arg->tid] = arg->aggr_control;
+ arg->aggr_control = 0;
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ arsta->rate_ctrl[arg->tid] = arg->rate_ctrl;
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ arsta->rtscts[arg->tid] = arg->rtscts_ctrl;
+ arg->ext_tid_cfg_bitmap = 0;
+ }
+ } else {
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if ((arvif->retry_long[arg->tid] ||
+ arvif->rate_code[arg->tid] ||
+ arvif->ampdu[arg->tid] ==
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) &&
+ arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) {
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK);
+ } else {
+ arvif->noack[arg->tid] = arg->ack_policy;
+ arvif->ampdu[arg->tid] = arg->aggr_control;
+ arvif->rate_ctrl[arg->tid] = arg->rate_ctrl;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
+ if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK)
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG);
+ else
+ arvif->retry_long[arg->tid] = arg->retry_count;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK)
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
+ else
+ arvif->ampdu[arg->tid] = arg->aggr_control;
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
+ changed &= ~(BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE));
+ } else {
+ arvif->rate_ctrl[arg->tid] = arg->rate_ctrl;
+ arvif->rate_code[arg->tid] = arg->rcode_flags;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ arvif->rtscts[arg->tid] = arg->rtscts_ctrl;
+ arg->ext_tid_cfg_bitmap = 0;
+ }
+
+ if (changed)
+ arvif->tid_conf_changed[arg->tid] |= changed;
+ }
+
+ return 0;
+}
+
+static int
+ath10k_mac_parse_tid_config(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif,
+ struct cfg80211_tid_cfg *tid_conf,
+ struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ u32 changed = tid_conf->mask;
+ int ret = 0, i = 0;
+
+ if (!changed)
+ return -EINVAL;
+
+ while (i < ATH10K_TID_MAX) {
+ if (!(tid_conf->tids & BIT(i))) {
+ i++;
+ continue;
+ }
+
+ arg->tid = i;
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE) {
+ arg->ack_policy = WMI_PEER_TID_CONFIG_NOACK;
+ arg->rate_ctrl =
+ WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE;
+ arg->aggr_control =
+ WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
+ } else {
+ arg->ack_policy =
+ WMI_PEER_TID_CONFIG_ACK;
+ arg->rate_ctrl =
+ WMI_TID_CONFIG_RATE_CONTROL_AUTO;
+ arg->aggr_control =
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG))
+ arg->retry_count = tid_conf->retry_long;
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE)
+ arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
+ else
+ arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ ret = ath10k_mac_tid_bitrate_config(ar, vif, sta,
+ &arg->rcode_flags,
+ &arg->rate_ctrl,
+ tid_conf->txrate_type,
+ &tid_conf->txrate_mask);
+ if (ret) {
+ ath10k_warn(ar, "failed to configure bitrate mask %d\n",
+ ret);
+ arg->rcode_flags = 0;
+ arg->rate_ctrl = 0;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ if (tid_conf->rtscts)
+ arg->rtscts_ctrl = tid_conf->rtscts;
+
+ arg->ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG;
+ }
+
+ ret = ath10k_mac_set_tid_config(ar, sta, vif, changed, arg);
+ if (ret)
+ return ret;
+ i++;
+ }
+
+ return ret;
+}
+
+static int ath10k_mac_reset_tid_config(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ath10k_vif *arvif,
+ u8 tids)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct wmi_per_peer_per_tid_cfg_arg arg;
+ int ret = 0, i = 0;
+
+ arg.vdev_id = arvif->vdev_id;
+ while (i < ATH10K_TID_MAX) {
+ if (!(tids & BIT(i))) {
+ i++;
+ continue;
+ }
+
+ arg.tid = i;
+ arg.ack_policy = WMI_PEER_TID_CONFIG_ACK;
+ arg.retry_count = ATH10K_MAX_RETRY_COUNT;
+ arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO;
+ arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
+ arg.rtscts_ctrl = WMI_TID_CONFIG_RTSCTS_CONTROL_ENABLE;
+ arg.ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG;
+
+ ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
+
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
+ if (ret)
+ return ret;
+
+ if (!arvif->tids_rst) {
+ arsta->retry_long[i] = -1;
+ arsta->noack[i] = -1;
+ arsta->ampdu[i] = -1;
+ arsta->rate_code[i] = -1;
+ arsta->rate_ctrl[i] = 0;
+ arsta->rtscts[i] = -1;
+ } else {
+ arvif->retry_long[i] = 0;
+ arvif->noack[i] = 0;
+ arvif->ampdu[i] = 0;
+ arvif->rate_code[i] = 0;
+ arvif->rate_ctrl[i] = 0;
+ arvif->rtscts[i] = 0;
+ }
+
+ i++;
+ }
+
+ return ret;
+}
+
+static void ath10k_sta_tid_cfg_wk(struct work_struct *wk)
+{
+ struct wmi_per_peer_per_tid_cfg_arg arg = {};
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+ struct ath10k_vif *arvif;
+ struct ath10k *ar;
+ bool config_apply;
+ int ret, i;
+ u32 changed;
+ u8 nss;
+
+ arsta = container_of(wk, struct ath10k_sta, tid_config_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (arvif->tids_rst) {
+ ret = ath10k_mac_reset_tid_config(ar, sta, arvif,
+ arvif->tids_rst);
+ goto exit;
+ }
+
+ ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
+
+ for (i = 0; i < ATH10K_TID_MAX; i++) {
+ config_apply = false;
+ changed = arvif->tid_conf_changed[i];
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if (arsta->noack[i] != -1) {
+ arg.ack_policy = 0;
+ } else {
+ config_apply = true;
+ arg.ack_policy = arvif->noack[i];
+ arg.aggr_control = arvif->ampdu[i];
+ arg.rate_ctrl = arvif->rate_ctrl[i];
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
+ if (arsta->retry_long[i] != -1 ||
+ arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg.retry_count = 0;
+ } else {
+ arg.retry_count = arvif->retry_long[i];
+ config_apply = true;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (arsta->ampdu[i] != -1 ||
+ arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg.aggr_control = 0;
+ } else {
+ arg.aggr_control = arvif->ampdu[i];
+ config_apply = true;
+ }
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ nss = ATH10K_HW_NSS(arvif->rate_code[i]);
+ ret = ath10k_mac_validate_rate_mask(ar, sta,
+ arvif->rate_code[i],
+ nss);
+ if (ret &&
+ arvif->rate_ctrl[i] > WMI_TID_CONFIG_RATE_CONTROL_AUTO) {
+ arg.rate_ctrl = 0;
+ arg.rcode_flags = 0;
+ }
+
+ if (arsta->rate_ctrl[i] >
+ WMI_TID_CONFIG_RATE_CONTROL_AUTO ||
+ arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg.rate_ctrl = 0;
+ arg.rcode_flags = 0;
+ } else {
+ arg.rate_ctrl = arvif->rate_ctrl[i];
+ arg.rcode_flags = arvif->rate_code[i];
+ config_apply = true;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ if (arsta->rtscts[i]) {
+ arg.rtscts_ctrl = 0;
+ arg.ext_tid_cfg_bitmap = 0;
+ } else {
+ arg.rtscts_ctrl = arvif->rtscts[i] - 1;
+ arg.ext_tid_cfg_bitmap =
+ WMI_EXT_TID_RTS_CTS_CONFIG;
+ config_apply = true;
+ }
+ }
+
+ arg.tid = i;
+
+ if (config_apply) {
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
+ if (ret)
+ ath10k_warn(ar, "failed to set per tid config for sta %pM: %d\n",
+ sta->addr, ret);
+ }
+
+ arg.ack_policy = 0;
+ arg.retry_count = 0;
+ arg.aggr_control = 0;
+ arg.rate_ctrl = 0;
+ arg.rcode_flags = 0;
+ }
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_vif_stations_tid_conf(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_mac_iter_tid_conf_data *iter_data = data;
+ struct ieee80211_vif *sta_vif = arsta->arvif->vif;
+
+ if (sta_vif != iter_data->curr_vif || !sta->wme)
+ return;
+
+ ieee80211_queue_work(iter_data->ar->hw, &arsta->tid_config_wk);
+}
+
static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -6616,6 +7279,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
arsta->arvif = arvif;
arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+ INIT_WORK(&arsta->tid_config_wk, ath10k_sta_tid_cfg_wk);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
ath10k_mac_txq_init(sta->txq[i]);
@@ -6623,8 +7287,10 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/* cancel must be done outside the mutex to avoid deadlock */
if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST))
+ new_state == IEEE80211_STA_NOTEXIST)) {
cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->tid_config_wk);
+ }
mutex_lock(&ar->conf_mutex);
@@ -7033,8 +7699,6 @@ exit:
return ret;
}
-#define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
-
static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel *chan,
@@ -7278,7 +7942,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
struct ieee80211_channel *channel)
{
int ret;
- enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
+ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
lockdep_assert_held(&ar->conf_mutex);
@@ -7347,30 +8011,6 @@ exit:
}
static bool
-ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
- enum nl80211_band band,
- const struct cfg80211_bitrate_mask *mask,
- int *vht_num_rates)
-{
- int num_rates = 0;
- int i, tmp;
-
- num_rates += hweight32(mask->control[band].legacy);
-
- for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
- num_rates += hweight8(mask->control[band].ht_mcs[i]);
-
- *vht_num_rates = 0;
- for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
- tmp = hweight16(mask->control[band].vht_mcs[i]);
- num_rates += tmp;
- *vht_num_rates += tmp;
- }
-
- return num_rates == 1;
-}
-
-static bool
ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
@@ -7419,69 +8059,6 @@ ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
return true;
}
-static int
-ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
- enum nl80211_band band,
- const struct cfg80211_bitrate_mask *mask,
- u8 *rate, u8 *nss, bool vht_only)
-{
- int rate_idx;
- int i;
- u16 bitrate;
- u8 preamble;
- u8 hw_rate;
-
- if (vht_only)
- goto next;
-
- if (hweight32(mask->control[band].legacy) == 1) {
- rate_idx = ffs(mask->control[band].legacy) - 1;
-
- if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
- rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
-
- hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
- bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
-
- if (ath10k_mac_bitrate_is_cck(bitrate))
- preamble = WMI_RATE_PREAMBLE_CCK;
- else
- preamble = WMI_RATE_PREAMBLE_OFDM;
-
- *nss = 1;
- *rate = preamble << 6 |
- (*nss - 1) << 4 |
- hw_rate << 0;
-
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
- if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
- *nss = i + 1;
- *rate = WMI_RATE_PREAMBLE_HT << 6 |
- (*nss - 1) << 4 |
- (ffs(mask->control[band].ht_mcs[i]) - 1);
-
- return 0;
- }
- }
-
-next:
- for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
- if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
- *nss = i + 1;
- *rate = WMI_RATE_PREAMBLE_VHT << 6 |
- (*nss - 1) << 4 |
- (ffs(mask->control[band].vht_mcs[i]) - 1);
-
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
u8 rate, u8 nss, u8 sgi, u8 ldpc)
{
@@ -8363,19 +8940,32 @@ static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8
u8 *flags, u8 *bw)
{
struct ath10k_index_ht_data_rate_type *mcs_rate;
+ u8 index;
+ size_t len_nss1 = ARRAY_SIZE(supported_ht_mcs_rate_nss1);
+ size_t len_nss2 = ARRAY_SIZE(supported_ht_mcs_rate_nss2);
+
+ if (mcs >= (len_nss1 + len_nss2)) {
+ ath10k_warn(ar, "not supported mcs %d in current rate table", mcs);
+ return;
+ }
mcs_rate = (struct ath10k_index_ht_data_rate_type *)
((nss == 1) ? &supported_ht_mcs_rate_nss1 :
&supported_ht_mcs_rate_nss2);
- if (rate == mcs_rate[mcs].supported_rate[0]) {
+ if (mcs >= len_nss1)
+ index = mcs - len_nss1;
+ else
+ index = mcs;
+
+ if (rate == mcs_rate[index].supported_rate[0]) {
*bw = RATE_INFO_BW_20;
- } else if (rate == mcs_rate[mcs].supported_rate[1]) {
+ } else if (rate == mcs_rate[index].supported_rate[1]) {
*bw |= RATE_INFO_BW_40;
- } else if (rate == mcs_rate[mcs].supported_rate[2]) {
+ } else if (rate == mcs_rate[index].supported_rate[2]) {
*bw |= RATE_INFO_BW_20;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
- } else if (rate == mcs_rate[mcs].supported_rate[3]) {
+ } else if (rate == mcs_rate[index].supported_rate[3]) {
*bw |= RATE_INFO_BW_40;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else {
@@ -8436,6 +9026,9 @@ static void ath10k_mac_parse_bitrate(struct ath10k *ar, u32 rate_code,
u8 mcs = WMI_TLV_GET_HW_RC_RATE_V1(rate_code);
u8 flags = 0, bw = 0;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac parse rate code 0x%x bitrate %d kbps\n",
+ rate_code, bitrate_kbps);
+
if (preamble == WMI_RATE_PREAMBLE_HT)
mode = ATH10K_PHY_MODE_HT;
else if (preamble == WMI_RATE_PREAMBLE_VHT)
@@ -8528,29 +9121,99 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
- if (!arsta->txrate.legacy && !arsta->txrate.nss)
- return;
-
- if (arsta->txrate.legacy) {
- sinfo->txrate.legacy = arsta->txrate.legacy;
- } else {
- sinfo->txrate.mcs = arsta->txrate.mcs;
- sinfo->txrate.nss = arsta->txrate.nss;
- sinfo->txrate.bw = arsta->txrate.bw;
+ if (arsta->txrate.legacy || arsta->txrate.nss) {
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
- sinfo->txrate.flags = arsta->txrate.flags;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
if (ar->htt.disable_tx_comp) {
- sinfo->tx_retries = arsta->tx_retries;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
sinfo->tx_failed = arsta->tx_failed;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
}
+ sinfo->tx_retries = arsta->tx_retries;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+
ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo);
}
+static int ath10k_mac_op_set_tid_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_mac_iter_tid_conf_data data = {};
+ struct wmi_per_peer_per_tid_cfg_arg arg = {};
+ int ret, i;
+
+ mutex_lock(&ar->conf_mutex);
+ arg.vdev_id = arvif->vdev_id;
+
+ arvif->tids_rst = 0;
+ memset(arvif->tid_conf_changed, 0, sizeof(arvif->tid_conf_changed));
+
+ for (i = 0; i < tid_config->n_tid_conf; i++) {
+ ret = ath10k_mac_parse_tid_config(ar, sta, vif,
+ &tid_config->tid_conf[i],
+ &arg);
+ if (ret)
+ goto exit;
+ }
+
+ if (sta)
+ goto exit;
+
+ ret = 0;
+ arvif->tids_rst = 0;
+ data.curr_vif = vif;
+ data.ar = ar;
+
+ ieee80211_iterate_stations_atomic(hw, ath10k_mac_vif_stations_tid_conf,
+ &data);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_mac_op_reset_tid_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u8 tids)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_mac_iter_tid_conf_data data = {};
+ struct ath10k *ar = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta) {
+ arvif->tids_rst = 0;
+ ret = ath10k_mac_reset_tid_config(ar, sta, arvif, tids);
+ goto exit;
+ }
+
+ arvif->tids_rst = tids;
+ data.curr_vif = vif;
+ data.ar = ar;
+ ieee80211_iterate_stations_atomic(hw, ath10k_mac_vif_stations_tid_conf,
+ &data);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_mac_op_tx,
.wake_tx_queue = ath10k_mac_op_wake_tx_queue,
@@ -8594,6 +9257,8 @@ static const struct ieee80211_ops ath10k_ops = {
.switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
.sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
.sta_statistics = ath10k_sta_statistics,
+ .set_tid_config = ath10k_mac_op_set_tid_config,
+ .reset_tid_config = ath10k_mac_op_reset_tid_config,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
@@ -9264,6 +9929,28 @@ int ath10k_mac_register(struct ath10k *ar)
if (test_bit(WMI_SERVICE_TX_PWR_PER_PEER, ar->wmi.svc_map))
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_STA_TX_PWR);
+
+ if (test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map)) {
+ ar->hw->wiphy->tid_config_support.vif |=
+ BIT(NL80211_TID_CONFIG_ATTR_NOACK) |
+ BIT(NL80211_TID_CONFIG_ATTR_RETRY_SHORT) |
+ BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG) |
+ BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE);
+
+ if (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ ar->wmi.svc_map)) {
+ ar->hw->wiphy->tid_config_support.vif |=
+ BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL);
+ }
+
+ ar->hw->wiphy->tid_config_support.peer =
+ ar->hw->wiphy->tid_config_support.vif;
+ ar->hw->wiphy->max_data_retry_count = ATH10K_MAX_RETRY_COUNT;
+ } else {
+ ar->ops->set_tid_config = NULL;
+ }
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index cfde7791291a..36426efdb2ea 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2184,7 +2184,7 @@ err_req:
if (ret == 0 && resp_len) {
*resp_len = min(*resp_len, xfer.resp_len);
- memcpy(resp, tresp, xfer.resp_len);
+ memcpy(resp, tresp, *resp_len);
}
err_dma:
kfree(treq);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 5468a41e928e..ae6b1f402adf 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -576,6 +576,8 @@ static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
if (resp->chip_info_valid) {
qmi->chip_info.chip_id = resp->chip_info.chip_id;
qmi->chip_info.chip_family = resp->chip_info.chip_family;
+ } else {
+ qmi->chip_info.chip_id = 0xFF;
}
if (resp->board_info_valid)
@@ -817,12 +819,18 @@ err_setup_msa:
static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
{
struct ath10k *ar = qmi->ar;
+ int ret;
ar->hif.bus = ATH10K_BUS_SNOC;
ar->id.qmi_ids_valid = true;
ar->id.qmi_board_id = qmi->board_info.board_id;
+ ar->id.qmi_chip_id = qmi->chip_info.chip_id;
ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
+ ret = ath10k_core_check_dt(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "DT bdf variant name not set.\n");
+
return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
}
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 63f882c690bf..81ddaafb6721 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -23,6 +23,9 @@
#include "targaddrs.h"
#include "trace.h"
#include "sdio.h"
+#include "coredump.h"
+
+void ath10k_sdio_fw_crashed_dump(struct ath10k *ar);
#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)
@@ -557,6 +560,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
le16_to_cpu(htc_hdr->len),
ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
ret = -ENOMEM;
+
+ queue_work(ar->workqueue, &ar->restart_work);
+ ath10k_warn(ar, "exceeds length, start recovery\n");
+
goto err;
}
@@ -912,10 +919,9 @@ static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
out:
mutex_unlock(&irq_data->mtx);
- if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK) {
- ath10k_err(ar, "firmware crashed!\n");
- queue_work(ar->workqueue, &ar->restart_work);
- }
+ if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK)
+ ath10k_sdio_fw_crashed_dump(ar);
+
return ret;
}
@@ -2181,6 +2187,323 @@ static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
return done;
}
+static int ath10k_sdio_read_host_interest_value(struct ath10k *ar,
+ u32 item_offset,
+ u32 *val)
+{
+ u32 addr;
+ int ret;
+
+ addr = host_interest_item_address(item_offset);
+
+ ret = ath10k_sdio_diag_read32(ar, addr, val);
+
+ if (ret)
+ ath10k_warn(ar, "unable to read host interest offset %d value\n",
+ item_offset);
+
+ return ret;
+}
+
+static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf,
+ u32 buf_len)
+{
+ u32 val;
+ int i, ret;
+
+ for (i = 0; i < buf_len; i += 4) {
+ ret = ath10k_sdio_diag_read32(ar, address + i, &val);
+ if (ret) {
+ ath10k_warn(ar, "unable to read mem %d value\n", address + i);
+ break;
+ }
+ memcpy(buf + i, &val, 4);
+ }
+
+ return ret;
+}
+
+static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar)
+{
+ u32 param;
+
+ ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), &param);
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param);
+
+ return param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW;
+}
+
+static void ath10k_sdio_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data,
+ bool fast_dump)
+{
+ u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
+ int i, ret;
+ u32 reg_dump_area;
+
+ ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state),
+ &reg_dump_area);
+ if (ret) {
+ ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret);
+ return;
+ }
+
+ if (fast_dump)
+ ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values,
+ sizeof(reg_dump_values));
+ else
+ ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values,
+ sizeof(reg_dump_values));
+
+ if (ret) {
+ ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret);
+ return;
+ }
+
+ ath10k_err(ar, "firmware register dump:\n");
+ for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4)
+ ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i,
+ reg_dump_values[i],
+ reg_dump_values[i + 1],
+ reg_dump_values[i + 2],
+ reg_dump_values[i + 3]);
+
+ if (!crash_data)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++)
+ crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]);
+}
+
+static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
+ const struct ath10k_mem_region *mem_region,
+ u8 *buf, size_t buf_len)
+{
+ const struct ath10k_mem_section *cur_section, *next_section;
+ unsigned int count, section_size, skip_size;
+ int ret, i, j;
+
+ if (!mem_region || !buf)
+ return 0;
+
+ cur_section = &mem_region->section_table.sections[0];
+
+ if (mem_region->start > cur_section->start) {
+ ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
+ mem_region->start, cur_section->start);
+ return 0;
+ }
+
+ skip_size = cur_section->start - mem_region->start;
+
+ /* fill the gap between the first register section and register
+ * start address
+ */
+ for (i = 0; i < skip_size; i++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count = 0;
+
+ for (i = 0; cur_section; i++) {
+ section_size = cur_section->end - cur_section->start;
+
+ if (section_size <= 0) {
+ ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
+ cur_section->start,
+ cur_section->end);
+ break;
+ }
+
+ if ((i + 1) == mem_region->section_table.size) {
+ /* last section */
+ next_section = NULL;
+ skip_size = 0;
+ } else {
+ next_section = cur_section + 1;
+
+ if (cur_section->end > next_section->start) {
+ ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
+ next_section->start,
+ cur_section->end);
+ break;
+ }
+
+ skip_size = next_section->start - cur_section->end;
+ }
+
+ if (buf_len < (skip_size + section_size)) {
+ ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
+ break;
+ }
+
+ buf_len -= skip_size + section_size;
+
+ /* read section to dest memory */
+ ret = ath10k_sdio_read_mem(ar, cur_section->start,
+ buf, section_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
+ cur_section->start, ret);
+ break;
+ }
+
+ buf += section_size;
+ count += section_size;
+
+ /* fill in the gap between this section and the next */
+ for (j = 0; j < skip_size; j++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count += skip_size;
+
+ if (!next_section)
+ /* this was the last section */
+ break;
+
+ cur_section = next_section;
+ }
+
+ return count;
+}
+
+/* if an error happened returns < 0, otherwise the length */
+static int ath10k_sdio_dump_memory_generic(struct ath10k *ar,
+ const struct ath10k_mem_region *current_region,
+ u8 *buf,
+ bool fast_dump)
+{
+ int ret;
+
+ if (current_region->section_table.size > 0)
+ /* Copy each section individually. */
+ return ath10k_sdio_dump_memory_section(ar,
+ current_region,
+ buf,
+ current_region->len);
+
+ /* No individiual memory sections defined so we can
+ * copy the entire memory region.
+ */
+ if (fast_dump)
+ ret = ath10k_bmi_read_memory(ar,
+ current_region->start,
+ buf,
+ current_region->len);
+ else
+ ret = ath10k_sdio_read_mem(ar,
+ current_region->start,
+ buf,
+ current_region->len);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
+ current_region->name, ret);
+ return ret;
+ }
+
+ return current_region->len;
+}
+
+static void ath10k_sdio_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data,
+ bool fast_dump)
+{
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ u32 count;
+ size_t buf_len;
+ int ret, i;
+ u8 *buf;
+
+ if (!crash_data)
+ return;
+
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
+
+ current_region = &mem_layout->region_table.regions[0];
+
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+
+ memset(buf, 0, buf_len);
+
+ for (i = 0; i < mem_layout->region_table.size; i++) {
+ count = 0;
+
+ if (current_region->len > buf_len) {
+ ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
+ current_region->name,
+ current_region->len,
+ buf_len);
+ break;
+ }
+
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
+
+ ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf,
+ fast_dump);
+ if (ret >= 0)
+ count = ret;
+
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32(current_region->start);
+ hdr->length = cpu_to_le32(count);
+
+ if (count == 0)
+ /* Note: the header remains, just with zero length. */
+ break;
+
+ buf += count;
+ buf_len -= count;
+
+ current_region++;
+ }
+}
+
+void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data;
+ char guid[UUID_STRING_LEN + 1];
+ bool fast_dump;
+
+ fast_dump = ath10k_sdio_is_fast_dump_supported(ar);
+
+ if (fast_dump)
+ ath10k_bmi_start(ar);
+
+ ar->stats.fw_crash_counter++;
+
+ ath10k_sdio_disable_intrs(ar);
+
+ crash_data = ath10k_coredump_new(ar);
+
+ if (crash_data)
+ scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
+ else
+ scnprintf(guid, sizeof(guid), "n/a");
+
+ ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
+ ath10k_print_driver_info(ar);
+ ath10k_sdio_dump_registers(ar, crash_data, fast_dump);
+ ath10k_sdio_dump_memory(ar, crash_data, fast_dump);
+
+ ath10k_sdio_enable_intrs(ar);
+
+ queue_work(ar->workqueue, &ar->restart_work);
+}
+
static int ath10k_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 354d49b1cd45..fd41f25456dc 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -45,6 +46,7 @@ static const char * const ath10k_regulators[] = {
"vdd-1.8-xo",
"vdd-1.3-rfa",
"vdd-3.3-ch0",
+ "vdd-3.3-ch1",
};
static const char * const ath10k_clocks[] = {
@@ -923,6 +925,7 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
napi_enable(&ar->napi);
ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
@@ -1158,7 +1161,9 @@ static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
return IRQ_HANDLED;
}
- ath10k_snoc_irq_disable(ar);
+ ath10k_ce_disable_interrupt(ar, ce_id);
+ set_bit(ce_id, ar_snoc->pending_ce_irqs);
+
napi_schedule(&ar->napi);
return IRQ_HANDLED;
@@ -1167,20 +1172,25 @@ static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
{
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
int done = 0;
+ int ce_id;
if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
napi_complete(ctx);
return done;
}
- ath10k_ce_per_engine_service_any(ar);
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ if (test_and_clear_bit(ce_id, ar_snoc->pending_ce_irqs)) {
+ ath10k_ce_per_engine_service(ar, ce_id);
+ ath10k_ce_enable_interrupt(ar, ce_id);
+ }
+
done = ath10k_htt_txrx_compl_task(ar, budget);
- if (done < budget) {
+ if (done < budget)
napi_complete(ctx);
- ath10k_snoc_irq_enable(ar);
- }
return done;
}
@@ -1772,9 +1782,18 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
return 0;
}
+static void ath10k_snoc_shutdown(struct platform_device *pdev)
+{
+ struct ath10k *ar = platform_get_drvdata(pdev);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc shutdown\n");
+ ath10k_snoc_remove(pdev);
+}
+
static struct platform_driver ath10k_snoc_driver = {
.probe = ath10k_snoc_probe,
.remove = ath10k_snoc_remove,
+ .shutdown = ath10k_snoc_shutdown,
.driver = {
.name = "ath10k_snoc",
.of_match_table = ath10k_snoc_dt_match,
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index a3dd06f6ac62..5095d1893681 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -78,6 +78,7 @@ struct ath10k_snoc {
unsigned long flags;
bool xo_cal_supported;
u32 xo_cal_data;
+ DECLARE_BITMAP(pending_ce_irqs, CE_COUNT_MAX);
};
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index dff6c8ac9dba..ec556bb88d65 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -334,6 +334,17 @@ struct host_interest {
#define HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK (1 << 17)
/*
+ * If both SDIO_CRASH_DUMP_ENHANCEMENT_HOST and SDIO_CRASH_DUMP_ENHANCEMENT_FW
+ * flags are set, then crashdump upload will be done using the BMI host/target
+ * communication channel.
+ */
+/* HOST to support using BMI dump FW memory when hit assert */
+#define HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_HOST 0x400
+
+/* FW to support using BMI dump FW memory when hit assert */
+#define HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW 0x800
+
+/*
* CONSOLE FLAGS
*
* Bit Range Meaning
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index f46b9083bbf1..aefe1f7f906c 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -50,6 +50,7 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
struct ath10k_skb_cb *skb_cb;
struct ath10k_txq *artxq;
struct sk_buff *msdu;
+ u8 flags;
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx completion msdu_id %u status %d\n",
@@ -78,6 +79,7 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
artxq->num_fw_queued--;
}
+ flags = skb_cb->flags;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
@@ -101,18 +103,21 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
+ !(flags & ATH10K_SKB_F_NOACK_TID))
info->flags |= IEEE80211_TX_STAT_ACK;
if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
info->flags &= ~IEEE80211_TX_STAT_ACK;
if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
- (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
+ (flags & ATH10K_SKB_F_NOACK_TID)))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
- if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ if ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
+ (flags & ATH10K_SKB_F_NOACK_TID))
info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
else
info->flags &= ~IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 0dd484f85082..aa57d807491c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -224,6 +224,8 @@ struct wmi_ops {
struct sk_buff *(*gen_bb_timing)
(struct ath10k *ar,
const struct wmi_bb_timing_cfg_arg *arg);
+ struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar,
+ const struct wmi_per_peer_per_tid_cfg_arg *arg);
};
@@ -1656,4 +1658,21 @@ ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->set_bb_timing_cmdid);
}
+
+static inline int
+ath10k_wmi_set_per_peer_per_tid_cfg(struct ath10k *ar,
+ const struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_per_peer_per_tid_cfg)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_per_peer_per_tid_cfg(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->per_peer_per_tid_config_cmdid);
+}
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index e77b97ca5c7f..b39c9b78b32b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1614,6 +1614,8 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
WMI_SERVICE_MESH_11S, len);
SVCMAP(WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
WMI_SERVICE_SYNC_DELETE_CMDS, len);
+ SVCMAP(WMI_TLV_SERVICE_PEER_STATS_INFO,
+ WMI_SERVICE_PEER_STATS, len);
}
static inline void
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index a81a1ab2de19..1fa7107a5051 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -740,6 +740,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
.tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
.radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
+ .per_peer_per_tid_config_cmdid = WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID,
};
static struct wmi_peer_param_map wmi_peer_param_map = {
@@ -3878,7 +3879,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
* actual channel switch is done
*/
if (arvif->vif->csa_active &&
- ieee80211_csa_is_complete(arvif->vif)) {
+ ieee80211_beacon_cntdwn_is_complete(arvif->vif)) {
ieee80211_csa_finish(arvif->vif);
continue;
}
@@ -6551,7 +6552,7 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
struct wmi_init_cmd *cmd;
struct sk_buff *buf;
struct wmi_resource_config config = {};
- u32 len, val;
+ u32 val;
config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
@@ -6603,10 +6604,8 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
- len = sizeof(*cmd) +
- (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
-
- buf = ath10k_wmi_alloc_skb(ar, len);
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -6624,7 +6623,7 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
struct wmi_init_cmd_10x *cmd;
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
- u32 len, val;
+ u32 val;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
@@ -6668,10 +6667,8 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
- len = sizeof(*cmd) +
- (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
-
- buf = ath10k_wmi_alloc_skb(ar, len);
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -6689,7 +6686,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
struct wmi_init_cmd_10_2 *cmd;
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
- u32 len, val, features;
+ u32 val, features;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
@@ -6741,10 +6738,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
- len = sizeof(*cmd) +
- (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
-
- buf = ath10k_wmi_alloc_skb(ar, len);
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -6776,7 +6771,6 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
struct wmi_init_cmd_10_4 *cmd;
struct sk_buff *buf;
struct wmi_resource_config_10_4 config = {};
- u32 len;
config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
config.num_peers = __cpu_to_le32(ar->max_num_peers);
@@ -6838,10 +6832,8 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
- len = sizeof(*cmd) +
- (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
-
- buf = ath10k_wmi_alloc_skb(ar, len);
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -7549,12 +7541,9 @@ ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
struct sk_buff *skb;
struct wmi_channel_arg *ch;
struct wmi_channel *ci;
- int len;
int i;
- len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
-
- skb = ath10k_wmi_alloc_skb(ar, len);
+ skb = ath10k_wmi_alloc_skb(ar, struct_size(cmd, chan_info, arg->n_channels));
if (!skb)
return ERR_PTR(-EINVAL);
@@ -9005,6 +8994,39 @@ ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar,
}
static struct sk_buff *
+ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k *ar,
+ const struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ struct wmi_peer_per_tid_cfg_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ memset(skb->data, 0, sizeof(*cmd));
+
+ cmd = (struct wmi_peer_per_tid_cfg_cmd *)skb->data;
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr.addr);
+ cmd->tid = cpu_to_le32(arg->tid);
+ cmd->ack_policy = cpu_to_le32(arg->ack_policy);
+ cmd->aggr_control = cpu_to_le32(arg->aggr_control);
+ cmd->rate_control = cpu_to_le32(arg->rate_ctrl);
+ cmd->retry_count = cpu_to_le32(arg->retry_count);
+ cmd->rcode_flags = cpu_to_le32(arg->rcode_flags);
+ cmd->ext_tid_cfg_bitmap = cpu_to_le32(arg->ext_tid_cfg_bitmap);
+ cmd->rtscts_ctrl = cpu_to_le32(arg->rtscts_ctrl);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi noack tid %d vdev id %d ack_policy %d aggr %u rate_ctrl %u rcflag %u retry_count %d rtscts %d ext_tid_cfg_bitmap %d mac_addr %pM\n",
+ arg->tid, arg->vdev_id, arg->ack_policy, arg->aggr_control,
+ arg->rate_ctrl, arg->rcode_flags, arg->retry_count,
+ arg->rtscts_ctrl, arg->ext_tid_cfg_bitmap, arg->peer_macaddr.addr);
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
{
struct wmi_echo_cmd *cmd;
@@ -9413,6 +9435,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_pdev_get_tpc_table_cmdid =
ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid,
.gen_radar_found = ath10k_wmi_10_4_gen_radar_found,
+ .gen_per_peer_per_tid_cfg = ath10k_wmi_10_4_gen_per_peer_per_tid_cfg,
/* shared with 10.2 */
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 511144b36231..4898e19b0af6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -203,6 +203,8 @@ enum wmi_service {
WMI_SERVICE_SYNC_DELETE_CMDS,
WMI_SERVICE_TX_PWR_PER_PEER,
WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT,
+ WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
/* Remember to add the new value to wmi_service_name()! */
@@ -503,6 +505,8 @@ static inline char *wmi_service_name(enum wmi_service service_id)
SVCSTR(WMI_SERVICE_SYNC_DELETE_CMDS);
SVCSTR(WMI_SERVICE_TX_PWR_PER_PEER);
SVCSTR(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS);
+ SVCSTR(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT);
+ SVCSTR(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT);
case WMI_SERVICE_MAX:
return NULL;
@@ -834,6 +838,10 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_TX_PWR_PER_PEER, len);
SVCMAP(WMI_10_4_SERVICE_RESET_CHIP,
WMI_SERVICE_RESET_CHIP, len);
+ SVCMAP(WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT,
+ WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, len);
}
#undef SVCMAP
@@ -1036,6 +1044,7 @@ struct wmi_cmd_map {
u32 tdls_set_offchan_mode_cmdid;
u32 radar_found_cmdid;
u32 set_bb_timing_cmdid;
+ u32 per_peer_per_tid_config_cmdid;
};
/*
@@ -1877,6 +1886,8 @@ enum wmi_10_4_cmd_id {
WMI_10_4_PDEV_SET_BRIDGE_MACADDR_CMDID,
WMI_10_4_ATF_GROUP_WMM_AC_CONFIG_REQUEST_CMDID,
WMI_10_4_RADAR_FOUND_CMDID,
+ WMI_10_4_PEER_CFR_CAPTURE_CMDID,
+ WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID,
WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
};
@@ -7220,6 +7231,71 @@ struct wmi_tdls_peer_event {
__le32 vdev_id;
} __packed;
+enum wmi_tid_aggr_control_conf {
+ WMI_TID_CONFIG_AGGR_CONTROL_IGNORE,
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE,
+ WMI_TID_CONFIG_AGGR_CONTROL_DISABLE,
+};
+
+enum wmi_noack_tid_conf {
+ WMI_NOACK_TID_CONFIG_IGNORE_ACK_POLICY,
+ WMI_PEER_TID_CONFIG_ACK,
+ WMI_PEER_TID_CONFIG_NOACK,
+};
+
+enum wmi_tid_rate_ctrl_conf {
+ WMI_TID_CONFIG_RATE_CONTROL_IGNORE,
+ WMI_TID_CONFIG_RATE_CONTROL_AUTO,
+ WMI_TID_CONFIG_RATE_CONTROL_FIXED_RATE,
+ WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE,
+ WMI_PEER_TID_CONFIG_RATE_UPPER_CAP,
+};
+
+enum wmi_tid_rtscts_control_conf {
+ WMI_TID_CONFIG_RTSCTS_CONTROL_ENABLE,
+ WMI_TID_CONFIG_RTSCTS_CONTROL_DISABLE,
+};
+
+enum wmi_ext_tid_config_map {
+ WMI_EXT_TID_RTS_CTS_CONFIG = BIT(0),
+};
+
+struct wmi_per_peer_per_tid_cfg_arg {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ enum wmi_noack_tid_conf ack_policy;
+ enum wmi_tid_aggr_control_conf aggr_control;
+ u8 rate_ctrl;
+ u32 retry_count;
+ u32 rcode_flags;
+ u32 ext_tid_cfg_bitmap;
+ u32 rtscts_ctrl;
+};
+
+struct wmi_peer_per_tid_cfg_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 tid;
+
+ /* see enum wmi_noack_tid_conf */
+ __le32 ack_policy;
+
+ /* see enum wmi_tid_aggr_control_conf */
+ __le32 aggr_control;
+
+ /* see enum wmi_tid_rate_ctrl_conf */
+ __le32 rate_control;
+ __le32 rcode_flags;
+ __le32 retry_count;
+
+ /* See enum wmi_ext_tid_config_map */
+ __le32 ext_tid_cfg_bitmap;
+
+ /* see enum wmi_tid_rtscts_control_conf */
+ __le32 rtscts_ctrl;
+} __packed;
+
enum wmi_txbf_conf {
WMI_TXBF_CONF_UNSUPPORTED,
WMI_TXBF_CONF_BEFORE_ASSOC,
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 8c26adddd034..7d65c115669f 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -275,7 +275,7 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_IBSS:
__set_bit(WOW_BEACON_EVENT, &wow_mask);
- /* fall through */
+ fallthrough;
case WMI_VDEV_TYPE_AP:
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index 88a97356f0a1..ad5cc6cac05b 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -2,9 +2,7 @@
config ATH11K
tristate "Qualcomm Technologies 802.11ax chipset support"
depends on MAC80211 && HAS_DMA
- depends on REMOTEPROC
depends on CRYPTO_MICHAEL_MIC
- depends on ARCH_QCOM || COMPILE_TEST
select ATH_COMMON
select QCOM_QMI_HELPERS
help
@@ -13,6 +11,22 @@ config ATH11K
If you choose to build a module, it'll be called ath11k.
+config ATH11K_AHB
+ tristate "Atheros ath11k AHB support"
+ depends on ATH11K
+ depends on REMOTEPROC
+ help
+ This module adds support for AHB bus
+
+config ATH11K_PCI
+ tristate "Atheros ath11k PCI support"
+ depends on ATH11K && PCI
+ select MHI_BUS
+ select QRTR
+ select QRTR_MHI
+ help
+ This module adds support for PCIE bus
+
config ATH11K_DEBUG
bool "QCA ath11k debugging"
depends on ATH11K
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index 104186373c9e..c41d87bd025a 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -4,7 +4,6 @@ ath11k-y += core.o \
hal.o \
hal_tx.o \
hal_rx.o \
- ahb.o \
wmi.o \
mac.o \
reg.o \
@@ -16,13 +15,20 @@ ath11k-y += core.o \
debug.o \
ce.o \
peer.o \
- dbring.o
+ dbring.o \
+ hw.o
-ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o
+ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
+obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
+ath11k_ahb-y += ahb.o
+
+obj-$(CONFIG_ATH11K_PCI) += ath11k_pci.o
+ath11k_pci-y += mhi.o pci.o
+
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 30092841ac46..430723c64adc 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -20,248 +20,19 @@ static const struct of_device_id ath11k_ahb_of_match[] = {
{ .compatible = "qcom,ipq8074-wifi",
.data = (void *)ATH11K_HW_IPQ8074,
},
+ { .compatible = "qcom,ipq6018-wifi",
+ .data = (void *)ATH11K_HW_IPQ6018_HW10,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
-/* Target firmware's Copy Engine configuration. */
-static const struct ce_pipe_config target_ce_config_wlan[] = {
- /* CE0: host->target HTC control and raw streams */
- {
- .pipenum = __cpu_to_le32(0),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE1: target->host HTT + HTC control */
- {
- .pipenum = __cpu_to_le32(1),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE2: target->host WMI */
- {
- .pipenum = __cpu_to_le32(2),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE3: host->target WMI */
- {
- .pipenum = __cpu_to_le32(3),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE4: host->target HTT */
- {
- .pipenum = __cpu_to_le32(4),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(256),
- .nbytes_max = __cpu_to_le32(256),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE5: target->host Pktlog */
- {
- .pipenum = __cpu_to_le32(5),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(0),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE6: Reserved for target autonomous hif_memcpy */
- {
- .pipenum = __cpu_to_le32(6),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(65535),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE7 used only by Host */
- {
- .pipenum = __cpu_to_le32(7),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE8 target->host used only by IPA */
- {
- .pipenum = __cpu_to_le32(8),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(65535),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE9 host->target HTT */
- {
- .pipenum = __cpu_to_le32(9),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE10 target->host HTT */
- {
- .pipenum = __cpu_to_le32(10),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
- .nentries = __cpu_to_le32(0),
- .nbytes_max = __cpu_to_le32(0),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE11 Not used */
- {
- .pipenum = __cpu_to_le32(0),
- .pipedir = __cpu_to_le32(0),
- .nentries = __cpu_to_le32(0),
- .nbytes_max = __cpu_to_le32(0),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-};
-
-/* Map from service/endpoint to Copy Engine.
- * This table is derived from the CE_PCI TABLE, above.
- * It is passed to the Target at startup for use by firmware.
- */
-static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(3),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(3),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(3),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(3),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(3),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(7),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(9),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(0),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(1),
- },
- { /* not used */
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(0),
- },
- { /* not used */
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(1),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(4),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(1),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(5),
- },
-
- /* (Additions here) */
-
- { /* terminator entry */ }
+static const struct ath11k_bus_params ath11k_ahb_bus_params = {
+ .mhi_support = false,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
};
#define ATH11K_IRQ_CE0_OFFSET 4
@@ -321,78 +92,6 @@ static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
"tcl2host-status-ring",
};
-#define ATH11K_TX_RING_MASK_0 0x1
-#define ATH11K_TX_RING_MASK_1 0x2
-#define ATH11K_TX_RING_MASK_2 0x4
-
-#define ATH11K_RX_RING_MASK_0 0x1
-#define ATH11K_RX_RING_MASK_1 0x2
-#define ATH11K_RX_RING_MASK_2 0x4
-#define ATH11K_RX_RING_MASK_3 0x8
-
-#define ATH11K_RX_ERR_RING_MASK_0 0x1
-
-#define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
-
-#define ATH11K_REO_STATUS_RING_MASK_0 0x1
-
-#define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
-#define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
-#define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
-
-#define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
-#define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
-#define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
-
-#define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
-#define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
-#define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
-
-const u8 ath11k_tx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_TX_RING_MASK_0,
- ATH11K_TX_RING_MASK_1,
- ATH11K_TX_RING_MASK_2,
-};
-
-const u8 rx_mon_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- 0, 0, 0, 0,
- ATH11K_RX_MON_STATUS_RING_MASK_0,
- ATH11K_RX_MON_STATUS_RING_MASK_1,
- ATH11K_RX_MON_STATUS_RING_MASK_2,
-};
-
-const u8 ath11k_rx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- 0, 0, 0, 0, 0, 0, 0,
- ATH11K_RX_RING_MASK_0,
- ATH11K_RX_RING_MASK_1,
- ATH11K_RX_RING_MASK_2,
- ATH11K_RX_RING_MASK_3,
-};
-
-const u8 ath11k_rx_err_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_RX_ERR_RING_MASK_0,
-};
-
-const u8 ath11k_rx_wbm_rel_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_RX_WBM_REL_RING_MASK_0,
-};
-
-const u8 ath11k_reo_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_REO_STATUS_RING_MASK_0,
-};
-
-const u8 ath11k_rxdma2host_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_RXDMA2HOST_RING_MASK_0,
- ATH11K_RXDMA2HOST_RING_MASK_1,
- ATH11K_RXDMA2HOST_RING_MASK_2,
-};
-
-const u8 ath11k_host2rxdma_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_HOST2RXDMA_RING_MASK_0,
- ATH11K_HOST2RXDMA_RING_MASK_1,
- ATH11K_HOST2RXDMA_RING_MASK_2,
-};
-
/* enum ext_irq_num - irq numbers that can be used by external modules
* like datapath
*/
@@ -449,10 +148,10 @@ static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
{
int i;
- for (i = 0; i < CE_COUNT; i++) {
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
tasklet_kill(&ce_pipe->intr_tq);
@@ -509,7 +208,7 @@ static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_pipe_config *ce_config;
- ce_config = &target_ce_config_wlan[ce_id];
+ ce_config = &ab->hw_params.target_ce_config[ce_id];
if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
@@ -524,7 +223,7 @@ static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_pipe_config *ce_config;
- ce_config = &target_ce_config_wlan[ce_id];
+ ce_config = &ab->hw_params.target_ce_config[ce_id];
if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
@@ -540,8 +239,8 @@ static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
int i;
int irq_idx;
- for (i = 0; i < CE_COUNT; i++) {
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
@@ -568,8 +267,8 @@ static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
{
int i;
- for (i = 0; i < CE_COUNT; i++) {
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_ahb_ce_irq_enable(ab, i);
}
@@ -579,8 +278,8 @@ static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
{
int i;
- for (i = 0; i < CE_COUNT; i++) {
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_ahb_ce_irq_disable(ab, i);
}
@@ -624,9 +323,10 @@ static void ath11k_ahb_stop(struct ath11k_base *ab)
static int ath11k_ahb_power_up(struct ath11k_base *ab)
{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
int ret;
- ret = rproc_boot(ab->tgt_rproc);
+ ret = rproc_boot(ab_ahb->tgt_rproc);
if (ret)
ath11k_err(ab, "failed to boot the remote processor Q6\n");
@@ -635,17 +335,20 @@ static int ath11k_ahb_power_up(struct ath11k_base *ab)
static void ath11k_ahb_power_down(struct ath11k_base *ab)
{
- rproc_shutdown(ab->tgt_rproc);
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ rproc_shutdown(ab_ahb->tgt_rproc);
}
static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
{
struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
- cfg->tgt_ce_len = ARRAY_SIZE(target_ce_config_wlan) - 1;
- cfg->tgt_ce = target_ce_config_wlan;
- cfg->svc_to_ce_map_len = ARRAY_SIZE(target_service_to_ce_map_wlan);
- cfg->svc_to_ce_map = target_service_to_ce_map_wlan;
+ cfg->tgt_ce_len = ab->hw_params.target_ce_count;
+ cfg->tgt_ce = ab->hw_params.target_ce_config;
+ cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
+ cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
+ ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074;
}
static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
@@ -665,8 +368,8 @@ static void ath11k_ahb_free_irq(struct ath11k_base *ab)
int irq_idx;
int i;
- for (i = 0; i < CE_COUNT; i++) {
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
@@ -675,9 +378,9 @@ static void ath11k_ahb_free_irq(struct ath11k_base *ab)
ath11k_ahb_free_ext_irq(ab);
}
-static void ath11k_ahb_ce_tasklet(unsigned long data)
+static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
{
- struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data;
+ struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
@@ -734,6 +437,7 @@ static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
{
+ struct ath11k_hw_params *hw = &ab->hw_params;
int i, j;
int irq;
int ret;
@@ -749,45 +453,45 @@ static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
- if (ath11k_tx_ring_mask[i] & BIT(j)) {
+ if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
wbm2host_tx_completions_ring1 - j;
}
- if (ath11k_rx_ring_mask[i] & BIT(j)) {
+ if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
reo2host_destination_ring1 - j;
}
- if (ath11k_rx_err_ring_mask[i] & BIT(j))
+ if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
irq_grp->irqs[num_irq++] = reo2host_exception;
- if (ath11k_rx_wbm_rel_ring_mask[i] & BIT(j))
+ if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
irq_grp->irqs[num_irq++] = wbm2host_rx_release;
- if (ath11k_reo_status_ring_mask[i] & BIT(j))
+ if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
irq_grp->irqs[num_irq++] = reo2host_status;
- if (j < MAX_RADIOS) {
- if (ath11k_rxdma2host_ring_mask[i] & BIT(j)) {
+ if (j < ab->hw_params.max_radios) {
+ if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
- rxdma2host_destination_ring_mac1
- - ath11k_core_get_hw_mac_id(ab, j);
+ rxdma2host_destination_ring_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
}
- if (ath11k_host2rxdma_ring_mask[i] & BIT(j)) {
+ if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
- host2rxdma_host_buf_ring_mac1
- - ath11k_core_get_hw_mac_id(ab, j);
+ host2rxdma_host_buf_ring_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
}
- if (rx_mon_status_ring_mask[i] & BIT(j)) {
+ if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
ppdu_end_interrupts_mac1 -
- ath11k_core_get_hw_mac_id(ab, j);
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
irq_grp->irqs[num_irq++] =
rxdma2host_monitor_status_ring_mac1 -
- ath11k_core_get_hw_mac_id(ab, j);
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
}
}
}
@@ -819,16 +523,15 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
int ret;
/* Configure CE irqs */
- for (i = 0; i < CE_COUNT; i++) {
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
- tasklet_init(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet,
- (unsigned long)ce_pipe);
+ tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
IRQF_TRIGGER_RISING, irq_name[irq_idx],
@@ -852,8 +555,8 @@ static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id
bool ul_set = false, dl_set = false;
int i;
- for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
- entry = &target_service_to_ce_map_wlan[i];
+ for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params.svc_to_ce_map[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
@@ -900,6 +603,28 @@ static const struct ath11k_hif_ops ath11k_ahb_hif_ops = {
.power_up = ath11k_ahb_power_up,
};
+static int ath11k_core_get_rproc(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *dev = ab->dev;
+ struct rproc *prproc;
+ phandle rproc_phandle;
+
+ if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
+ ath11k_err(ab, "failed to get q6_rproc handle\n");
+ return -ENOENT;
+ }
+
+ prproc = rproc_get_by_phandle(rproc_phandle);
+ if (!prproc) {
+ ath11k_err(ab, "failed to get rproc\n");
+ return -EINVAL;
+ }
+ ab_ahb->tgt_rproc = prproc;
+
+ return 0;
+}
+
static int ath11k_ahb_probe(struct platform_device *pdev)
{
struct ath11k_base *ab;
@@ -926,7 +651,9 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
return ret;
}
- ab = ath11k_core_alloc(&pdev->dev, 0, ATH11K_BUS_AHB);
+ ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
+ ATH11K_BUS_AHB,
+ &ath11k_ahb_bus_params);
if (!ab) {
dev_err(&pdev->dev, "failed to allocate ath11k base\n");
return -ENOMEM;
@@ -939,6 +666,10 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
ab->mem_len = resource_size(mem_res);
platform_set_drvdata(pdev, ab);
+ ret = ath11k_core_pre_init(ab);
+ if (ret)
+ goto err_core_free;
+
ret = ath11k_hal_srng_init(ab);
if (ret)
goto err_core_free;
@@ -951,9 +682,9 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
ath11k_ahb_init_qmi_ce_config(ab);
- ret = ath11k_ahb_config_irq(ab);
+ ret = ath11k_core_get_rproc(ab);
if (ret) {
- ath11k_err(ab, "failed to configure irq: %d\n", ret);
+ ath11k_err(ab, "failed to get rproc: %d\n", ret);
goto err_ce_free;
}
@@ -963,6 +694,12 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
goto err_ce_free;
}
+ ret = ath11k_ahb_config_irq(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to configure irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
return 0;
err_ce_free:
@@ -981,12 +718,16 @@ err_core_free:
static int ath11k_ahb_remove(struct platform_device *pdev)
{
struct ath11k_base *ab = platform_get_drvdata(pdev);
+ unsigned long left;
reinit_completion(&ab->driver_recovery);
- if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags))
- wait_for_completion_timeout(&ab->driver_recovery,
- ATH11K_AHB_RECOVERY_TIMEOUT);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
+ left = wait_for_completion_timeout(&ab->driver_recovery,
+ ATH11K_AHB_RECOVERY_TIMEOUT);
+ if (!left)
+ ath11k_warn(ab, "failed to receive recovery response completion\n");
+ }
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
cancel_work_sync(&ab->restart_work);
@@ -1023,5 +764,5 @@ static void ath11k_ahb_exit(void)
}
module_exit(ath11k_ahb_exit);
-MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax wireless chip");
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
index 6c7b26ac6545..51e6e4a5f686 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.h
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -10,4 +10,12 @@
#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
struct ath11k_base;
+struct ath11k_ahb {
+ struct rproc *tgt_rproc;
+};
+
+static inline struct ath11k_ahb *ath11k_ahb_priv(struct ath11k_base *ab)
+{
+ return (struct ath11k_ahb *)ab->drv_priv;
+}
#endif
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index cdd40c8fc867..9d730f8ac816 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -5,8 +5,9 @@
#include "dp_rx.h"
#include "debug.h"
+#include "hif.h"
-static const struct ce_attr host_ce_config_wlan[] = {
+const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
@@ -108,6 +109,104 @@ static const struct ce_attr host_ce_config_wlan[] = {
},
};
+const struct ce_attr ath11k_host_ce_config_qca6390[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+};
+
+static bool ath11k_ce_need_shadow_fix(int ce_id)
+{
+ /* only ce4 needs shadow workaroud*/
+ if (ce_id == 4)
+ return true;
+ return false;
+}
+
+static void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
+{
+ int i;
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++)
+ if (ath11k_ce_need_shadow_fix(i))
+ ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
+}
+
static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
struct sk_buff *skb, dma_addr_t paddr)
{
@@ -352,6 +451,31 @@ static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
}
}
+static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
+ struct hal_srng_params *ring_params)
+{
+ u32 msi_data_start;
+ u32 msi_data_count;
+ u32 msi_irq_start;
+ u32 addr_lo;
+ u32 addr_hi;
+ int ret;
+
+ ret = ath11k_get_user_msi_vector(ab, "CE",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+
+ if (ret)
+ return;
+
+ ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
static int ath11k_ce_init_ring(struct ath11k_base *ab,
struct ath11k_ce_ring *ce_ring,
int ce_id, enum hal_ring_type type)
@@ -363,21 +487,24 @@ static int ath11k_ce_init_ring(struct ath11k_base *ab,
params.ring_base_vaddr = ce_ring->base_addr_owner_space;
params.num_entries = ce_ring->nentries;
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
+ ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
+
switch (type) {
case HAL_CE_SRC:
- if (!(CE_ATTR_DIS_INTR & host_ce_config_wlan[ce_id].flags))
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
params.intr_batch_cntr_thres_entries = 1;
break;
case HAL_CE_DST:
- params.max_buffer_len = host_ce_config_wlan[ce_id].src_sz_max;
- if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
+ if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
params.intr_timer_thres_us = 1024;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.low_threshold = ce_ring->nentries - 3;
}
break;
case HAL_CE_DST_STATUS:
- if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
params.intr_batch_cntr_thres_entries = 1;
params.intr_timer_thres_us = 0x1000;
}
@@ -395,8 +522,15 @@ static int ath11k_ce_init_ring(struct ath11k_base *ab,
ret, ce_id);
return ret;
}
+
ce_ring->hal_ring_id = ret;
+ if (ab->hw_params.supports_shadow_regs &&
+ ath11k_ce_need_shadow_fix(ce_id))
+ ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
+ ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
+ ce_ring->hal_ring_id);
+
return 0;
}
@@ -440,7 +574,7 @@ ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
- const struct ce_attr *attr = &host_ce_config_wlan[ce_id];
+ const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
struct ath11k_ce_ring *ring;
int nentries;
int desc_sz;
@@ -494,6 +628,7 @@ void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
pipe->send_cb(pipe);
}
+EXPORT_SYMBOL(ath11k_ce_per_engine_service);
int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
u16 transfer_id)
@@ -568,6 +703,9 @@ int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
ath11k_hal_srng_access_end(ab, srng);
+ if (ath11k_ce_need_shadow_fix(pipe_id))
+ ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
+
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
@@ -604,12 +742,57 @@ static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
}
}
+static void ath11k_ce_shadow_config(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ab->hw_params.host_ce_config[i].src_nentries)
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_SRC, i);
+
+ if (ab->hw_params.host_ce_config[i].dest_nentries) {
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_DST, i);
+
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_DST_STATUS, i);
+ }
+ }
+}
+
+void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+
+ /* shadow is already configured */
+ if (*shadow_cfg_len)
+ return;
+
+ /* shadow isn't configured yet, configure now.
+ * non-CE srngs are configured firstly, then
+ * all CE srngs.
+ */
+ ath11k_hal_srng_shadow_config(ab);
+ ath11k_ce_shadow_config(ab);
+
+ /* get the shadow configuration */
+ ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+}
+EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
+
void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
int pipe_num;
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ ath11k_ce_stop_shadow_timers(ab);
+
+ for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
pipe = &ab->ce.ce_pipe[pipe_num];
ath11k_ce_rx_pipe_cleanup(pipe);
@@ -619,6 +802,7 @@ void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
/* NOTE: Should we also clean up tx buffer in all pipes? */
}
}
+EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
{
@@ -626,7 +810,7 @@ void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
int i;
int ret;
- for (i = 0; i < CE_COUNT; i++) {
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
ret = ath11k_ce_rx_post_pipe(pipe);
if (ret) {
@@ -642,6 +826,7 @@ void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
}
}
}
+EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
void ath11k_ce_rx_replenish_retry(struct timer_list *t)
{
@@ -656,7 +841,10 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab)
int i;
int ret;
- for (i = 0; i < CE_COUNT; i++) {
+ ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
+ &ab->qmi.ce_cfg.shadow_reg_v2_len);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
if (pipe->src_ring) {
@@ -714,9 +902,12 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab)
int desc_sz;
int i;
- for (i = 0; i < CE_COUNT; i++) {
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
+ if (ath11k_ce_need_shadow_fix(i))
+ ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
+
if (pipe->src_ring) {
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
dma_free_coherent(ab->dev,
@@ -752,6 +943,7 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab)
}
}
}
+EXPORT_SYMBOL(ath11k_ce_free_pipes);
int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
{
@@ -762,8 +954,8 @@ int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
spin_lock_init(&ab->ce.ce_lock);
- for (i = 0; i < CE_COUNT; i++) {
- attr = &host_ce_config_wlan[i];
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ attr = &ab->hw_params.host_ce_config[i];
pipe = &ab->ce.ce_pipe[i];
pipe->pipe_num = i;
pipe->ab = ab;
@@ -779,6 +971,7 @@ int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
return 0;
}
+EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
/* For Big Endian Host, Copy Engine byte_swap is enabled
* When Copy Engine does byte_swap, need to byte swap again for the
@@ -799,10 +992,11 @@ void ath11k_ce_byte_swap(void *mem, u32 len)
}
}
-int ath11k_ce_get_attr_flags(int ce_id)
+int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
{
- if (ce_id >= CE_COUNT)
+ if (ce_id >= ab->hw_params.ce_count)
return -EINVAL;
- return host_ce_config_wlan[ce_id].flags;
+ return ab->hw_params.host_ce_config[ce_id].flags;
}
+EXPORT_SYMBOL(ath11k_ce_get_attr_flags);
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index 688f357e6eaf..269b599ac0b0 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -6,7 +6,7 @@
#ifndef ATH11K_CE_H
#define ATH11K_CE_H
-#define CE_COUNT 12
+#define CE_COUNT_MAX 12
/* Byte swap data words */
#define CE_ATTR_BYTE_SWAP_DATA 2
@@ -165,11 +165,15 @@ struct ath11k_ce_pipe {
};
struct ath11k_ce {
- struct ath11k_ce_pipe ce_pipe[CE_COUNT];
+ struct ath11k_ce_pipe ce_pipe[CE_COUNT_MAX];
/* Protects rings of all ce pipes */
spinlock_t ce_lock;
+ struct ath11k_hp_update_timer hp_timer[CE_COUNT_MAX];
};
+extern const struct ce_attr ath11k_host_ce_config_ipq8074[];
+extern const struct ce_attr ath11k_host_ce_config_qca6390[];
+
void ath11k_ce_cleanup_pipes(struct ath11k_base *ab);
void ath11k_ce_rx_replenish_retry(struct timer_list *t);
void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id);
@@ -179,6 +183,11 @@ void ath11k_ce_rx_post_buf(struct ath11k_base *ab);
int ath11k_ce_init_pipes(struct ath11k_base *ab);
int ath11k_ce_alloc_pipes(struct ath11k_base *ab);
void ath11k_ce_free_pipes(struct ath11k_base *ab);
-int ath11k_ce_get_attr_flags(int ce_id);
+int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id);
void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id);
+int ath11k_ce_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+int ath11k_ce_attr_attach(struct ath11k_base *ab);
+void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 905cd8beaf28..ebd6886a8c18 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -14,43 +14,139 @@
#include "hif.h"
unsigned int ath11k_debug_mask;
+EXPORT_SYMBOL(ath11k_debug_mask);
module_param_named(debug_mask, ath11k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
-static const struct ath11k_hw_params ath11k_hw_params = {
- .name = "ipq8074",
- .fw = {
- .dir = IPQ8074_FW_DIR,
- .board_size = IPQ8074_MAX_BOARD_DATA_SZ,
- .cal_size = IPQ8074_MAX_CAL_DATA_SZ,
+static unsigned int ath11k_crypto_mode;
+module_param_named(crypto_mode, ath11k_crypto_mode, uint, 0644);
+MODULE_PARM_DESC(crypto_mode, "crypto mode: 0-hardware, 1-software");
+
+/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
+unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
+module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
+MODULE_PARM_DESC(frame_mode,
+ "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
+
+static const struct ath11k_hw_params ath11k_hw_params[] = {
+ {
+ .hw_rev = ATH11K_HW_IPQ8074,
+ .name = "ipq8074 hw2.0",
+ .fw = {
+ .dir = "IPQ8074/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_size = 256 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &ipq8074_ops,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .internal_sleep_clock = false,
+ .regs = &ipq8074_regs,
+ .host_ce_config = ath11k_host_ce_config_ipq8074,
+ .ce_count = 12,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
+ .target_ce_count = 11,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
+ .svc_to_ce_map_len = 21,
+ .single_pdev_only = false,
+ .needs_band_to_mac = true,
+ .rxdma1_enable = true,
+ .num_rxmda_per_pdev = 1,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+ .tcl_0_only = false,
+ .spectral_fft_sz = 2,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = true,
+ .supports_shadow_regs = false,
+ .idle_ps = false,
+ },
+ {
+ .hw_rev = ATH11K_HW_IPQ6018_HW10,
+ .name = "ipq6018 hw1.0",
+ .fw = {
+ .dir = "IPQ6018/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_size = 256 * 1024,
+ },
+ .max_radios = 2,
+ .bdf_addr = 0x4ABC0000,
+ .hw_ops = &ipq6018_ops,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .internal_sleep_clock = false,
+ .regs = &ipq8074_regs,
+ .host_ce_config = ath11k_host_ce_config_ipq8074,
+ .ce_count = 12,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
+ .target_ce_count = 11,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
+ .svc_to_ce_map_len = 19,
+ .single_pdev_only = false,
+ .needs_band_to_mac = true,
+ .rxdma1_enable = true,
+ .num_rxmda_per_pdev = 1,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+ .tcl_0_only = false,
+ .spectral_fft_sz = 4,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = true,
+ .supports_shadow_regs = false,
+ .idle_ps = false,
+ },
+ {
+ .name = "qca6390 hw2.0",
+ .hw_rev = ATH11K_HW_QCA6390_HW20,
+ .fw = {
+ .dir = "QCA6390/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_size = 256 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &qca6390_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &qca6390_regs,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .needs_band_to_mac = false,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+ .tcl_0_only = true,
+ .spectral_fft_sz = 0,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
},
};
-/* Map from pdev index to hw mac index */
-u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx)
-{
- switch (pdev_idx) {
- case 0:
- return 0;
- case 1:
- return 2;
- case 2:
- return 1;
- default:
- ath11k_warn(ab, "Invalid pdev idx %d\n", pdev_idx);
- return ATH11K_INVALID_HW_MAC_ID;
- }
-}
-EXPORT_SYMBOL(ath11k_core_get_hw_mac_id);
-
static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
size_t name_len)
{
- /* Note: bus is fixed to ahb. When other bus type supported,
- * make it to dynamic.
- */
scnprintf(name, name_len,
- "bus=ahb,qmi-chip-id=%d,qmi-board-id=%d",
+ "bus=%s,qmi-chip-id=%d,qmi-board-id=%d",
+ ath11k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
ab->qmi.target.board_id);
@@ -59,29 +155,24 @@ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
return 0;
}
-static const struct firmware *ath11k_fetch_fw_file(struct ath11k_base *ab,
- const char *dir,
- const char *file)
+const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
+ const char *file)
{
- char filename[100];
const struct firmware *fw;
+ char path[100];
int ret;
if (file == NULL)
return ERR_PTR(-ENOENT);
- if (dir == NULL)
- dir = ".";
-
- snprintf(filename, sizeof(filename), "%s/%s", dir, file);
- ret = firmware_request_nowarn(&fw, filename, ab->dev);
- ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot fw request '%s': %d\n",
- filename, ret);
+ ath11k_core_create_firmware_path(ab, file, path, sizeof(path));
+ ret = firmware_request_nowarn(&fw, path, ab->dev);
if (ret)
return ERR_PTR(ret);
- ath11k_warn(ab, "Downloading BDF: %s, size: %zu\n",
- filename, fw->size);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot firmware request %s size %zu\n",
+ path, fw->size);
return fw;
}
@@ -181,26 +272,30 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
{
size_t len, magic_len;
const u8 *data;
- char *filename = ATH11K_BOARD_API2_FILE;
+ char *filename, filepath[100];
size_t ie_len;
struct ath11k_fw_ie *hdr;
int ret, ie_id;
+ filename = ATH11K_BOARD_API2_FILE;
+
if (!bd->fw)
- bd->fw = ath11k_fetch_fw_file(ab,
- ab->hw_params.fw.dir,
- filename);
+ bd->fw = ath11k_core_firmware_request(ab, filename);
+
if (IS_ERR(bd->fw))
return PTR_ERR(bd->fw);
data = bd->fw->data;
len = bd->fw->size;
+ ath11k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+
/* magic has extra null byte padded */
magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
if (len < magic_len) {
- ath11k_err(ab, "failed to find magic value in %s/%s, file too short: %zu\n",
- ab->hw_params.fw.dir, filename, len);
+ ath11k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
+ filepath, len);
ret = -EINVAL;
goto err;
}
@@ -214,8 +309,8 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
/* magic is padded to 4 bytes */
magic_len = ALIGN(magic_len, 4);
if (len < magic_len) {
- ath11k_err(ab, "failed: %s/%s too small to contain board data, len: %zu\n",
- ab->hw_params.fw.dir, filename, len);
+ ath11k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
+ filepath, len);
ret = -EINVAL;
goto err;
}
@@ -263,8 +358,8 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
out:
if (!bd->data || !bd->len) {
ath11k_err(ab,
- "failed to fetch board data for %s from %s/%s\n",
- boardname, ab->hw_params.fw.dir, filename);
+ "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
ret = -ENODATA;
goto err;
}
@@ -279,9 +374,7 @@ err:
static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
struct ath11k_board_data *bd)
{
- bd->fw = ath11k_fetch_fw_file(ab,
- ab->hw_params.fw.dir,
- ATH11K_DEFAULT_BOARD_FILE);
+ bd->fw = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_BOARD_FILE);
if (IS_ERR(bd->fw))
return PTR_ERR(bd->fw);
@@ -325,6 +418,7 @@ static void ath11k_core_stop(struct ath11k_base *ab)
{
if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath11k_qmi_firmware_stop(ab);
+
ath11k_hif_stop(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);
@@ -342,7 +436,7 @@ static int ath11k_core_soc_create(struct ath11k_base *ab)
return ret;
}
- ret = ath11k_debug_soc_create(ab);
+ ret = ath11k_debugfs_soc_create(ab);
if (ret) {
ath11k_err(ab, "failed to create ath11k debugfs\n");
goto err_qmi_deinit;
@@ -357,7 +451,7 @@ static int ath11k_core_soc_create(struct ath11k_base *ab)
return 0;
err_debugfs_reg:
- ath11k_debug_soc_destroy(ab);
+ ath11k_debugfs_soc_destroy(ab);
err_qmi_deinit:
ath11k_qmi_deinit_service(ab);
return ret;
@@ -365,7 +459,7 @@ err_qmi_deinit:
static void ath11k_core_soc_destroy(struct ath11k_base *ab)
{
- ath11k_debug_soc_destroy(ab);
+ ath11k_debugfs_soc_destroy(ab);
ath11k_dp_free(ab);
ath11k_reg_free(ab);
ath11k_qmi_deinit_service(ab);
@@ -375,7 +469,7 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab)
{
int ret;
- ret = ath11k_debug_pdev_create(ab);
+ ret = ath11k_debugfs_pdev_create(ab);
if (ret) {
ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret);
return ret;
@@ -415,7 +509,7 @@ err_dp_pdev_free:
err_mac_unregister:
ath11k_mac_unregister(ab);
err_pdev_debug:
- ath11k_debug_pdev_destroy(ab);
+ ath11k_debugfs_pdev_destroy(ab);
return ret;
}
@@ -427,7 +521,7 @@ static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
ath11k_mac_unregister(ab);
ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
- ath11k_debug_pdev_destroy(ab);
+ ath11k_debugfs_pdev_destroy(ab);
}
static int ath11k_core_start(struct ath11k_base *ab,
@@ -557,6 +651,23 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
return ret;
}
+ switch (ath11k_crypto_mode) {
+ case ATH11K_CRYPT_MODE_SW:
+ set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ case ATH11K_CRYPT_MODE_HW:
+ clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ default:
+ ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
+ return -EINVAL;
+ }
+
+ if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW)
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+
mutex_lock(&ab->core_lock);
ret = ath11k_core_start(ab, ATH11K_FIRMWARE_MODE_NORMAL);
if (ret) {
@@ -706,7 +817,7 @@ static void ath11k_core_restart(struct work_struct *work)
break;
case ATH11K_STATE_RESTARTED:
ar->state = ATH11K_STATE_WEDGED;
- /* fall through */
+ fallthrough;
case ATH11K_STATE_WEDGED:
ath11k_warn(ab,
"device is wedged, will not restart radio %d\n", i);
@@ -717,25 +828,47 @@ static void ath11k_core_restart(struct work_struct *work)
complete(&ab->driver_recovery);
}
-int ath11k_core_init(struct ath11k_base *ab)
+static int ath11k_init_hw_params(struct ath11k_base *ab)
{
- struct device *dev = ab->dev;
- struct rproc *prproc;
- phandle rproc_phandle;
- int ret;
+ const struct ath11k_hw_params *hw_params = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_hw_params); i++) {
+ hw_params = &ath11k_hw_params[i];
- if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
- ath11k_err(ab, "failed to get q6_rproc handle\n");
- return -ENOENT;
+ if (hw_params->hw_rev == ab->hw_rev)
+ break;
}
- prproc = rproc_get_by_phandle(rproc_phandle);
- if (!prproc) {
- ath11k_err(ab, "failed to get rproc\n");
+ if (i == ARRAY_SIZE(ath11k_hw_params)) {
+ ath11k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev);
return -EINVAL;
}
- ab->tgt_rproc = prproc;
- ab->hw_params = ath11k_hw_params;
+
+ ab->hw_params = *hw_params;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "Hardware name %s\n", ab->hw_params.name);
+
+ return 0;
+}
+
+int ath11k_core_pre_init(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_init_hw_params(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to get hw params: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_pre_init);
+
+int ath11k_core_init(struct ath11k_base *ab)
+{
+ int ret;
ret = ath11k_core_soc_create(ab);
if (ret) {
@@ -745,6 +878,7 @@ int ath11k_core_init(struct ath11k_base *ab)
return 0;
}
+EXPORT_SYMBOL(ath11k_core_init);
void ath11k_core_deinit(struct ath11k_base *ab)
{
@@ -759,14 +893,17 @@ void ath11k_core_deinit(struct ath11k_base *ab)
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
}
+EXPORT_SYMBOL(ath11k_core_deinit);
void ath11k_core_free(struct ath11k_base *ab)
{
kfree(ab);
}
+EXPORT_SYMBOL(ath11k_core_free);
struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
- enum ath11k_bus bus)
+ enum ath11k_bus bus,
+ const struct ath11k_bus_params *bus_params)
{
struct ath11k_base *ab;
@@ -789,6 +926,8 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->restart_work, ath11k_core_restart);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
ab->dev = dev;
+ ab->bus_params = *bus_params;
+ ab->hif.bus = bus;
return ab;
@@ -796,3 +935,7 @@ err_sc_free:
kfree(ab);
return NULL;
}
+EXPORT_SYMBOL(ath11k_core_alloc);
+
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ax wireless LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index e5c4e19020ee..18b97420f0d8 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -35,6 +35,10 @@
#define ATH11K_INVALID_HW_MAC_ID 0xFF
+extern unsigned int ath11k_frame_mode;
+
+#define ATH11K_MON_TIMER_INTERVAL 10
+
enum ath11k_supported_bw {
ATH11K_BW_20 = 0,
ATH11K_BW_40 = 1,
@@ -54,6 +58,13 @@ enum wme_ac {
#define ATH11K_VHT_MCS_MAX 9
#define ATH11K_HE_MCS_MAX 11
+enum ath11k_crypt_mode {
+ /* Only use hardware crypto engine */
+ ATH11K_CRYPT_MODE_HW,
+ /* Only use software crypto */
+ ATH11K_CRYPT_MODE_SW,
+};
+
static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
{
return (((tid == 0) || (tid == 3)) ? WME_AC_BE :
@@ -90,6 +101,8 @@ struct ath11k_skb_rxcb {
enum ath11k_hw_rev {
ATH11K_HW_IPQ8074,
+ ATH11K_HW_QCA6390_HW20,
+ ATH11K_HW_IPQ6018_HW10,
};
enum ath11k_firmware_mode {
@@ -101,18 +114,8 @@ enum ath11k_firmware_mode {
};
#define ATH11K_IRQ_NUM_MAX 52
-#define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
#define ATH11K_EXT_IRQ_NUM_MAX 16
-extern const u8 ath11k_reo_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_tx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_rx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_rx_err_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_rx_wbm_rel_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_rxdma2host_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_host2rxdma_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 rx_mon_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-
struct ath11k_ext_irq_grp {
struct ath11k_base *ab;
u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
@@ -226,6 +229,7 @@ struct ath11k_vif {
int txpower;
bool rsnie_present;
bool wpaie_present;
+ struct ieee80211_chanctx_conf chanctx;
};
struct ath11k_vif_iter {
@@ -554,6 +558,7 @@ struct ath11k {
};
struct ath11k_band_cap {
+ u32 phy_id;
u32 max_bw_supported;
u32 ht_cap_info;
u32 he_cap_info[2];
@@ -589,6 +594,13 @@ struct ath11k_board_data {
size_t len;
};
+struct ath11k_bus_params {
+ bool mhi_support;
+ bool m3_fw_support;
+ bool fixed_bdf_addr;
+ bool fixed_mem_region;
+};
+
/* IPQ8074 HW channel counters frequency value in hertz */
#define IPQ8074_CC_FREQ_HERTZ 320000
@@ -638,7 +650,6 @@ struct ath11k_base {
struct ath11k_qmi qmi;
struct ath11k_wmi_base wmi_ab;
struct completion fw_ready;
- struct rproc *tgt_rproc;
int num_radios;
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
@@ -651,6 +662,7 @@ struct ath11k_base {
unsigned long mem_len;
struct {
+ enum ath11k_bus bus;
const struct ath11k_hif_ops *ops;
} hif;
@@ -677,7 +689,10 @@ struct ath11k_base {
u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
bool pdevs_macaddr_valid;
int bd_api;
+
struct ath11k_hw_params hw_params;
+ struct ath11k_bus_params bus_params;
+
const struct firmware *cal_file;
/* Below regd's are protected by ab->data_lock */
@@ -714,6 +729,7 @@ struct ath11k_base {
struct ath11k_dbring_cap *db_caps;
u32 num_db_cap;
+ struct timer_list mon_reap_timer;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
@@ -841,6 +857,13 @@ struct ath11k_fw_stats_bcn {
u32 tx_bcn_outage_cnt;
};
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[];
+
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[];
+
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
u8 *mac_addr, u16 ast_hash);
@@ -850,17 +873,21 @@ struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
+int ath11k_core_pre_init(struct ath11k_base *ab);
int ath11k_core_init(struct ath11k_base *ath11k);
void ath11k_core_deinit(struct ath11k_base *ath11k);
struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
- enum ath11k_bus bus);
+ enum ath11k_bus bus,
+ const struct ath11k_bus_params *bus_params);
void ath11k_core_free(struct ath11k_base *ath11k);
int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
struct ath11k_board_data *bd);
void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
void ath11k_core_halt(struct ath11k *ar);
-u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx);
+
+const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
+ const char *filename);
static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state)
{
@@ -894,4 +921,30 @@ static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif)
return (struct ath11k_vif *)vif->drv_priv;
}
+static inline struct ath11k *ath11k_ab_to_ar(struct ath11k_base *ab,
+ int mac_id)
+{
+ return ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+}
+
+static inline void ath11k_core_create_firmware_path(struct ath11k_base *ab,
+ const char *filename,
+ void *buf, size_t buf_len)
+{
+ snprintf(buf, buf_len, "%s/%s/%s", ATH11K_FW_DIR,
+ ab->hw_params.fw.dir, filename);
+}
+
+static inline const char *ath11k_bus_str(enum ath11k_bus bus)
+{
+ switch (bus) {
+ case ATH11K_BUS_PCI:
+ return "pci";
+ case ATH11K_BUS_AHB:
+ return "ahb";
+ }
+
+ return "unknown";
+}
+
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index cf20db370123..5e1f5437b418 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -168,7 +168,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar,
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
ring->bufs_max = ring->refill_srng.size /
- ath11k_hal_srng_get_entrysize(HAL_RXDMA_DIR_BUF);
+ ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
ring->buf_sz = db_cap->min_buf_sz;
ring->buf_align = db_cap->min_buf_align;
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
index 62a1aa0565a9..c86de95fbdc5 100644
--- a/drivers/net/wireless/ath/ath11k/debug.c
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -6,48 +6,6 @@
#include <linux/vmalloc.h>
#include "core.h"
#include "debug.h"
-#include "wmi.h"
-#include "hal_rx.h"
-#include "dp_tx.h"
-#include "debug_htt_stats.h"
-#include "peer.h"
-
-static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
- "REO2SW1_RING",
- "REO2SW2_RING",
- "REO2SW3_RING",
- "REO2SW4_RING",
- "WBM2REO_LINK_RING",
- "REO2TCL_RING",
- "REO2FW_RING",
- "RELEASE_RING",
- "PPE_RELEASE_RING",
- "TCL2TQM_RING",
- "TQM_RELEASE_RING",
- "REO_RELEASE_RING",
- "WBM2SW0_RELEASE_RING",
- "WBM2SW1_RELEASE_RING",
- "WBM2SW2_RELEASE_RING",
- "WBM2SW3_RELEASE_RING",
- "REO_CMD_RING",
- "REO_STATUS_RING",
-};
-
-static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
- "FW2RXDMA_BUF_RING",
- "FW2RXDMA_STATUS_RING",
- "FW2RXDMA_LINK_RING",
- "SW2RXDMA_BUF_RING",
- "WBM2RXDMA_LINK_RING",
- "RXDMA2FW_RING",
- "RXDMA2SW_RING",
- "RXDMA2RELEASE_RING",
- "RXDMA2REO_RING",
- "MONITOR_STATUS_RING",
- "MONITOR_BUF_RING",
- "MONITOR_DESC_RING",
- "MONITOR_DEST_RING",
-};
void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
{
@@ -62,6 +20,7 @@ void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
/* TODO: Trace the log */
va_end(args);
}
+EXPORT_SYMBOL(ath11k_info);
void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
{
@@ -76,6 +35,7 @@ void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
/* TODO: Trace the log */
va_end(args);
}
+EXPORT_SYMBOL(ath11k_err);
void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
{
@@ -90,8 +50,10 @@ void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
/* TODO: Trace the log */
va_end(args);
}
+EXPORT_SYMBOL(ath11k_warn);
#ifdef CONFIG_ATH11K_DEBUG
+
void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
const char *fmt, ...)
{
@@ -110,6 +72,7 @@ void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
va_end(args);
}
+EXPORT_SYMBOL(__ath11k_dbg);
void ath11k_dbg_dump(struct ath11k_base *ab,
enum ath11k_debug_mask mask,
@@ -138,1059 +101,6 @@ void ath11k_dbg_dump(struct ath11k_base *ab,
}
}
}
+EXPORT_SYMBOL(ath11k_dbg_dump);
-#endif
-
-#ifdef CONFIG_ATH11K_DEBUGFS
-static void ath11k_fw_stats_pdevs_free(struct list_head *head)
-{
- struct ath11k_fw_stats_pdev *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
-static void ath11k_fw_stats_vdevs_free(struct list_head *head)
-{
- struct ath11k_fw_stats_vdev *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
-static void ath11k_fw_stats_bcn_free(struct list_head *head)
-{
- struct ath11k_fw_stats_bcn *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
-static void ath11k_debug_fw_stats_reset(struct ath11k *ar)
-{
- spin_lock_bh(&ar->data_lock);
- ar->debug.fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
- ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
- spin_unlock_bh(&ar->data_lock);
-}
-
-void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
-{
- struct ath11k_fw_stats stats = {};
- struct ath11k *ar;
- struct ath11k_pdev *pdev;
- bool is_end;
- static unsigned int num_vdev, num_bcn;
- size_t total_vdevs_started = 0;
- int i, ret;
-
- INIT_LIST_HEAD(&stats.pdevs);
- INIT_LIST_HEAD(&stats.vdevs);
- INIT_LIST_HEAD(&stats.bcn);
-
- ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
- if (ret) {
- ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
- goto free;
- }
-
- rcu_read_lock();
- ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
- if (!ar) {
- rcu_read_unlock();
- ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
- stats.pdev_id, ret);
- goto free;
- }
-
- spin_lock_bh(&ar->data_lock);
-
- if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
- list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
- ar->debug.fw_stats_done = true;
- goto complete;
- }
-
- if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
- if (list_empty(&stats.vdevs)) {
- ath11k_warn(ab, "empty vdev stats");
- goto complete;
- }
- /* FW sends all the active VDEV stats irrespective of PDEV,
- * hence limit until the count of all VDEVs started
- */
- for (i = 0; i < ab->num_radios; i++) {
- pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar)
- total_vdevs_started += ar->num_started_vdevs;
- }
-
- is_end = ((++num_vdev) == total_vdevs_started);
-
- list_splice_tail_init(&stats.vdevs,
- &ar->debug.fw_stats.vdevs);
-
- if (is_end) {
- ar->debug.fw_stats_done = true;
- num_vdev = 0;
- }
- goto complete;
- }
-
- if (stats.stats_id == WMI_REQUEST_BCN_STAT) {
- if (list_empty(&stats.bcn)) {
- ath11k_warn(ab, "empty bcn stats");
- goto complete;
- }
- /* Mark end until we reached the count of all started VDEVs
- * within the PDEV
- */
- is_end = ((++num_bcn) == ar->num_started_vdevs);
-
- list_splice_tail_init(&stats.bcn,
- &ar->debug.fw_stats.bcn);
-
- if (is_end) {
- ar->debug.fw_stats_done = true;
- num_bcn = 0;
- }
- }
-complete:
- complete(&ar->debug.fw_stats_complete);
- rcu_read_unlock();
- spin_unlock_bh(&ar->data_lock);
-
-free:
- ath11k_fw_stats_pdevs_free(&stats.pdevs);
- ath11k_fw_stats_vdevs_free(&stats.vdevs);
- ath11k_fw_stats_bcn_free(&stats.bcn);
-}
-
-static int ath11k_debug_fw_stats_request(struct ath11k *ar,
- struct stats_request_params *req_param)
-{
- struct ath11k_base *ab = ar->ab;
- unsigned long timeout, time_left;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- /* FW stats can get split when exceeding the stats data buffer limit.
- * In that case, since there is no end marking for the back-to-back
- * received 'update stats' event, we keep a 3 seconds timeout in case,
- * fw_stats_done is not marked yet
- */
- timeout = jiffies + msecs_to_jiffies(3 * HZ);
-
- ath11k_debug_fw_stats_reset(ar);
-
- reinit_completion(&ar->debug.fw_stats_complete);
-
- ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
-
- if (ret) {
- ath11k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
- }
-
- time_left =
- wait_for_completion_timeout(&ar->debug.fw_stats_complete,
- 1 * HZ);
- if (!time_left)
- return -ETIMEDOUT;
-
- for (;;) {
- if (time_after(jiffies, timeout))
- break;
-
- spin_lock_bh(&ar->data_lock);
- if (ar->debug.fw_stats_done) {
- spin_unlock_bh(&ar->data_lock);
- break;
- }
- spin_unlock_bh(&ar->data_lock);
- }
- return 0;
-}
-
-static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
-{
- struct ath11k *ar = inode->i_private;
- struct ath11k_base *ab = ar->ab;
- struct stats_request_params req_param;
- void *buf = NULL;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto err_unlock;
- }
-
- buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
- if (!buf) {
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- req_param.pdev_id = ar->pdev->pdev_id;
- req_param.vdev_id = 0;
- req_param.stats_id = WMI_REQUEST_PDEV_STAT;
-
- ret = ath11k_debug_fw_stats_request(ar, &req_param);
- if (ret) {
- ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
- goto err_free;
- }
-
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
-
- file->private_data = buf;
-
- mutex_unlock(&ar->conf_mutex);
- return 0;
-
-err_free:
- vfree(buf);
-
-err_unlock:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static int ath11k_release_pdev_stats(struct inode *inode, struct file *file)
-{
- vfree(file->private_data);
-
- return 0;
-}
-
-static ssize_t ath11k_read_pdev_stats(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- const char *buf = file->private_data;
- size_t len = strlen(buf);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_pdev_stats = {
- .open = ath11k_open_pdev_stats,
- .release = ath11k_release_pdev_stats,
- .read = ath11k_read_pdev_stats,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
-{
- struct ath11k *ar = inode->i_private;
- struct stats_request_params req_param;
- void *buf = NULL;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto err_unlock;
- }
-
- buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
- if (!buf) {
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- req_param.pdev_id = ar->pdev->pdev_id;
- /* VDEV stats is always sent for all active VDEVs from FW */
- req_param.vdev_id = 0;
- req_param.stats_id = WMI_REQUEST_VDEV_STAT;
-
- ret = ath11k_debug_fw_stats_request(ar, &req_param);
- if (ret) {
- ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
- goto err_free;
- }
-
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
-
- file->private_data = buf;
-
- mutex_unlock(&ar->conf_mutex);
- return 0;
-
-err_free:
- vfree(buf);
-
-err_unlock:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static int ath11k_release_vdev_stats(struct inode *inode, struct file *file)
-{
- vfree(file->private_data);
-
- return 0;
-}
-
-static ssize_t ath11k_read_vdev_stats(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- const char *buf = file->private_data;
- size_t len = strlen(buf);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_vdev_stats = {
- .open = ath11k_open_vdev_stats,
- .release = ath11k_release_vdev_stats,
- .read = ath11k_read_vdev_stats,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
-{
- struct ath11k *ar = inode->i_private;
- struct ath11k_vif *arvif;
- struct stats_request_params req_param;
- void *buf = NULL;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto err_unlock;
- }
-
- buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
- if (!buf) {
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- req_param.stats_id = WMI_REQUEST_BCN_STAT;
- req_param.pdev_id = ar->pdev->pdev_id;
-
- /* loop all active VDEVs for bcn stats */
- list_for_each_entry(arvif, &ar->arvifs, list) {
- if (!arvif->is_up)
- continue;
-
- req_param.vdev_id = arvif->vdev_id;
- ret = ath11k_debug_fw_stats_request(ar, &req_param);
- if (ret) {
- ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
- goto err_free;
- }
- }
-
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
-
- /* since beacon stats request is looped for all active VDEVs, saved fw
- * stats is not freed for each request until done for all active VDEVs
- */
- spin_lock_bh(&ar->data_lock);
- ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn);
- spin_unlock_bh(&ar->data_lock);
-
- file->private_data = buf;
-
- mutex_unlock(&ar->conf_mutex);
- return 0;
-
-err_free:
- vfree(buf);
-
-err_unlock:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static int ath11k_release_bcn_stats(struct inode *inode, struct file *file)
-{
- vfree(file->private_data);
-
- return 0;
-}
-
-static ssize_t ath11k_read_bcn_stats(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- const char *buf = file->private_data;
- size_t len = strlen(buf);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_bcn_stats = {
- .open = ath11k_open_bcn_stats,
- .release = ath11k_release_bcn_stats,
- .read = ath11k_read_bcn_stats,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t ath11k_read_simulate_fw_crash(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- const char buf[] =
- "To simulate firmware crash write one of the keywords to this file:\n"
- "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"
- "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
-}
-
-/* Simulate firmware crash:
- * 'soft': Call wmi command causing firmware hang. This firmware hang is
- * recoverable by warm firmware reset.
- * 'hard': Force firmware crash by setting any vdev parameter for not allowed
- * vdev id. This is hard firmware crash because it is recoverable only by cold
- * firmware reset.
- */
-static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath11k_base *ab = file->private_data;
- struct ath11k_pdev *pdev;
- struct ath11k *ar = ab->pdevs[0].ar;
- char buf[32] = {0};
- ssize_t rc;
- int i, ret, radioup = 0;
-
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (ar && ar->state == ATH11K_STATE_ON) {
- radioup = 1;
- break;
- }
- }
- /* filter partial writes and invalid commands */
- if (*ppos != 0 || count >= sizeof(buf) || count == 0)
- return -EINVAL;
-
- rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
- if (rc < 0)
- return rc;
-
- /* drop the possible '\n' from the end */
- if (buf[*ppos - 1] == '\n')
- buf[*ppos - 1] = '\0';
-
- if (radioup == 0) {
- ret = -ENETDOWN;
- goto exit;
- }
-
- if (!strcmp(buf, "assert")) {
- ath11k_info(ab, "simulating firmware assert crash\n");
- ret = ath11k_wmi_force_fw_hang_cmd(ar,
- ATH11K_WMI_FW_HANG_ASSERT_TYPE,
- ATH11K_WMI_FW_HANG_DELAY);
- } else {
- ret = -EINVAL;
- goto exit;
- }
-
- if (ret) {
- ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
- goto exit;
- }
-
- ret = count;
-
-exit:
- return ret;
-}
-
-static const struct file_operations fops_simulate_fw_crash = {
- .read = ath11k_read_simulate_fw_crash,
- .write = ath11k_write_simulate_fw_crash,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
- const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct ath11k *ar = file->private_data;
- u32 filter;
- int ret;
-
- if (kstrtouint_from_user(ubuf, count, 0, &filter))
- return -EINVAL;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto out;
- }
-
- if (filter == ar->debug.extd_tx_stats) {
- ret = count;
- goto out;
- }
-
- ar->debug.extd_tx_stats = filter;
- ret = count;
-
-out:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
- char __user *ubuf,
- size_t count, loff_t *ppos)
-
-{
- char buf[32] = {0};
- struct ath11k *ar = file->private_data;
- int len = 0;
-
- mutex_lock(&ar->conf_mutex);
- len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
- ar->debug.extd_tx_stats);
- mutex_unlock(&ar->conf_mutex);
-
- return simple_read_from_buffer(ubuf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_extd_tx_stats = {
- .read = ath11k_read_enable_extd_tx_stats,
- .write = ath11k_write_enable_extd_tx_stats,
- .open = simple_open
-};
-
-static ssize_t ath11k_write_extd_rx_stats(struct file *file,
- const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct ath11k *ar = file->private_data;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
- u32 enable, rx_filter = 0, ring_id;
- int ret;
-
- if (kstrtouint_from_user(ubuf, count, 0, &enable))
- return -EINVAL;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto exit;
- }
-
- if (enable > 1) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (enable == ar->debug.extd_rx_stats) {
- ret = count;
- goto exit;
- }
-
- if (enable) {
- rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
- rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
- rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
- rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
- rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
- rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
-
- tlv_filter.rx_filter = rx_filter;
- tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
- tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
- tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
- tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
- HTT_RX_FP_DATA_FILTER_FLASG3;
- } else {
- tlv_filter = ath11k_mac_mon_status_filter_default;
- }
-
- ar->debug.rx_filter = tlv_filter.rx_filter;
-
- ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
- ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
- HAL_RXDMA_MONITOR_STATUS,
- DP_RX_BUFFER_SIZE, &tlv_filter);
-
- if (ret) {
- ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
- goto exit;
- }
-
- ar->debug.extd_rx_stats = enable;
- ret = count;
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static ssize_t ath11k_read_extd_rx_stats(struct file *file,
- char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct ath11k *ar = file->private_data;
- char buf[32];
- int len = 0;
-
- mutex_lock(&ar->conf_mutex);
- len = scnprintf(buf, sizeof(buf) - len, "%d\n",
- ar->debug.extd_rx_stats);
- mutex_unlock(&ar->conf_mutex);
-
- return simple_read_from_buffer(ubuf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_extd_rx_stats = {
- .read = ath11k_read_extd_rx_stats,
- .write = ath11k_write_extd_rx_stats,
- .open = simple_open,
-};
-
-static int ath11k_fill_bp_stats(struct ath11k_base *ab,
- struct ath11k_bp_stats *bp_stats,
- char *buf, int len, int size)
-{
- lockdep_assert_held(&ab->base_lock);
-
- len += scnprintf(buf + len, size - len, "count: %u\n",
- bp_stats->count);
- len += scnprintf(buf + len, size - len, "hp: %u\n",
- bp_stats->hp);
- len += scnprintf(buf + len, size - len, "tp: %u\n",
- bp_stats->tp);
- len += scnprintf(buf + len, size - len, "seen before: %ums\n\n",
- jiffies_to_msecs(jiffies - bp_stats->jiffies));
- return len;
-}
-
-static ssize_t ath11k_debug_dump_soc_ring_bp_stats(struct ath11k_base *ab,
- char *buf, int size)
-{
- struct ath11k_bp_stats *bp_stats;
- bool stats_rxd = false;
- u8 i, pdev_idx;
- int len = 0;
-
- len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n");
- len += scnprintf(buf + len, size - len, "==================\n");
-
- spin_lock_bh(&ab->base_lock);
- for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) {
- bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i];
-
- if (!bp_stats->count)
- continue;
-
- len += scnprintf(buf + len, size - len, "Ring: %s\n",
- htt_bp_umac_ring[i]);
- len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
- stats_rxd = true;
- }
-
- for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) {
- for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) {
- bp_stats =
- &ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx];
-
- if (!bp_stats->count)
- continue;
-
- len += scnprintf(buf + len, size - len, "Ring: %s\n",
- htt_bp_lmac_ring[i]);
- len += scnprintf(buf + len, size - len, "pdev: %d\n",
- pdev_idx);
- len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
- stats_rxd = true;
- }
- }
- spin_unlock_bh(&ab->base_lock);
-
- if (!stats_rxd)
- len += scnprintf(buf + len, size - len,
- "No Ring Backpressure stats received\n\n");
-
- return len;
-}
-
-static ssize_t ath11k_debug_dump_soc_dp_stats(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath11k_base *ab = file->private_data;
- struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats;
- int len = 0, i, retval;
- const int size = 4096;
- static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
- "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC",
- "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse",
- "AMSDU parse", "SA timeout", "DA timeout",
- "Flow timeout", "Flush req"};
- static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
- "Desc addr zero", "Desc inval", "AMPDU in non BA",
- "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump",
- "Frame OOR", "BAR OOR", "No BA session",
- "Frame SN equal SSN", "PN check fail", "2k err",
- "PN err", "Desc blocked"};
-
- char *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n");
- len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
- soc_stats->err_ring_pkts);
- len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
- soc_stats->invalid_rbm);
- len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
- for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
- len += scnprintf(buf + len, size - len, "%s: %u\n",
- rxdma_err[i], soc_stats->rxdma_error[i]);
-
- len += scnprintf(buf + len, size - len, "\nREO errors:\n");
- for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
- len += scnprintf(buf + len, size - len, "%s: %u\n",
- reo_err[i], soc_stats->reo_error[i]);
-
- len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
- len += scnprintf(buf + len, size - len,
- "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n",
- soc_stats->hal_reo_error[0],
- soc_stats->hal_reo_error[1],
- soc_stats->hal_reo_error[2],
- soc_stats->hal_reo_error[3]);
-
- len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
- len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
-
- for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
- len += scnprintf(buf + len, size - len, "ring%d: %u\n",
- i, soc_stats->tx_err.desc_na[i]);
-
- len += scnprintf(buf + len, size - len,
- "\nMisc Transmit Failures: %d\n",
- atomic_read(&soc_stats->tx_err.misc_fail));
-
- len += ath11k_debug_dump_soc_ring_bp_stats(ab, buf + len, size - len);
-
- if (len > size)
- len = size;
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-}
-
-static const struct file_operations fops_soc_dp_stats = {
- .read = ath11k_debug_dump_soc_dp_stats,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-int ath11k_debug_pdev_create(struct ath11k_base *ab)
-{
- if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
- return 0;
-
- ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
-
- if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
- if (IS_ERR(ab->debugfs_soc))
- return PTR_ERR(ab->debugfs_soc);
- return -ENOMEM;
- }
-
- debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
- &fops_simulate_fw_crash);
-
- debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
- &fops_soc_dp_stats);
-
- return 0;
-}
-
-void ath11k_debug_pdev_destroy(struct ath11k_base *ab)
-{
- debugfs_remove_recursive(ab->debugfs_ath11k);
- ab->debugfs_ath11k = NULL;
-}
-
-int ath11k_debug_soc_create(struct ath11k_base *ab)
-{
- ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
-
- if (IS_ERR_OR_NULL(ab->debugfs_ath11k)) {
- if (IS_ERR(ab->debugfs_ath11k))
- return PTR_ERR(ab->debugfs_ath11k);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void ath11k_debug_soc_destroy(struct ath11k_base *ab)
-{
- debugfs_remove_recursive(ab->debugfs_soc);
- ab->debugfs_soc = NULL;
-}
-
-void ath11k_debug_fw_stats_init(struct ath11k *ar)
-{
- struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
- ar->debug.debugfs_pdev);
-
- ar->debug.fw_stats.debugfs_fwstats = fwstats_dir;
-
- /* all stats debugfs files created are under "fw_stats" directory
- * created per PDEV
- */
- debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
- &fops_pdev_stats);
- debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
- &fops_vdev_stats);
- debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
- &fops_bcn_stats);
-
- INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
- INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
- INIT_LIST_HEAD(&ar->debug.fw_stats.bcn);
-
- init_completion(&ar->debug.fw_stats_complete);
-}
-
-static ssize_t ath11k_write_pktlog_filter(struct file *file,
- const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct ath11k *ar = file->private_data;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
- u32 rx_filter = 0, ring_id, filter, mode;
- u8 buf[128] = {0};
- int ret;
- ssize_t rc;
-
- mutex_lock(&ar->conf_mutex);
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto out;
- }
-
- rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
- if (rc < 0) {
- ret = rc;
- goto out;
- }
- buf[rc] = '\0';
-
- ret = sscanf(buf, "0x%x %u", &filter, &mode);
- if (ret != 2) {
- ret = -EINVAL;
- goto out;
- }
-
- if (filter) {
- ret = ath11k_wmi_pdev_pktlog_enable(ar, filter);
- if (ret) {
- ath11k_warn(ar->ab,
- "failed to enable pktlog filter %x: %d\n",
- ar->debug.pktlog_filter, ret);
- goto out;
- }
- } else {
- ret = ath11k_wmi_pdev_pktlog_disable(ar);
- if (ret) {
- ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret);
- goto out;
- }
- }
-
-#define HTT_RX_FILTER_TLV_LITE_MODE \
- (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
- HTT_RX_FILTER_TLV_FLAGS_MPDU_START)
-
- if (mode == ATH11K_PKTLOG_MODE_FULL) {
- rx_filter = HTT_RX_FILTER_TLV_LITE_MODE |
- HTT_RX_FILTER_TLV_FLAGS_MSDU_START |
- HTT_RX_FILTER_TLV_FLAGS_MSDU_END |
- HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
- HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
- HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
- } else if (mode == ATH11K_PKTLOG_MODE_LITE) {
- ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
- HTT_PPDU_STATS_TAG_PKTLOG);
- if (ret) {
- ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret);
- goto out;
- }
-
- rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
- } else {
- ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
- HTT_PPDU_STATS_TAG_DEFAULT);
- if (ret) {
- ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n",
- ret);
- goto out;
- }
- }
-
- tlv_filter.rx_filter = rx_filter;
- if (rx_filter) {
- tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
- tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
- tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
- tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
- HTT_RX_FP_DATA_FILTER_FLASG3;
- }
-
- ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
- ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
- HAL_RXDMA_MONITOR_STATUS,
- DP_RX_BUFFER_SIZE, &tlv_filter);
- if (ret) {
- ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
- goto out;
- }
-
- ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n",
- filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
-
- ar->debug.pktlog_filter = filter;
- ar->debug.pktlog_mode = mode;
- ret = count;
-
-out:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static ssize_t ath11k_read_pktlog_filter(struct file *file,
- char __user *ubuf,
- size_t count, loff_t *ppos)
-
-{
- char buf[32] = {0};
- struct ath11k *ar = file->private_data;
- int len = 0;
-
- mutex_lock(&ar->conf_mutex);
- len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n",
- ar->debug.pktlog_filter,
- ar->debug.pktlog_mode);
- mutex_unlock(&ar->conf_mutex);
-
- return simple_read_from_buffer(ubuf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_pktlog_filter = {
- .read = ath11k_read_pktlog_filter,
- .write = ath11k_write_pktlog_filter,
- .open = simple_open
-};
-
-static ssize_t ath11k_write_simulate_radar(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath11k *ar = file->private_data;
- int ret;
-
- ret = ath11k_wmi_simulate_radar(ar);
- if (ret)
- return ret;
-
- return count;
-}
-
-static const struct file_operations fops_simulate_radar = {
- .write = ath11k_write_simulate_radar,
- .open = simple_open
-};
-
-int ath11k_debug_register(struct ath11k *ar)
-{
- struct ath11k_base *ab = ar->ab;
- char pdev_name[5];
- char buf[100] = {0};
-
- snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
-
- ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
-
- if (IS_ERR_OR_NULL(ar->debug.debugfs_pdev)) {
- if (IS_ERR(ar->debug.debugfs_pdev))
- return PTR_ERR(ar->debug.debugfs_pdev);
-
- return -ENOMEM;
- }
-
- /* Create a symlink under ieee80211/phy* */
- snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev);
- debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf);
-
- ath11k_debug_htt_stats_init(ar);
-
- ath11k_debug_fw_stats_init(ar);
-
- debugfs_create_file("ext_tx_stats", 0644,
- ar->debug.debugfs_pdev, ar,
- &fops_extd_tx_stats);
- debugfs_create_file("ext_rx_stats", 0644,
- ar->debug.debugfs_pdev, ar,
- &fops_extd_rx_stats);
- debugfs_create_file("pktlog_filter", 0644,
- ar->debug.debugfs_pdev, ar,
- &fops_pktlog_filter);
-
- if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
- debugfs_create_file("dfs_simulate_radar", 0200,
- ar->debug.debugfs_pdev, ar,
- &fops_simulate_radar);
- debugfs_create_bool("dfs_block_radar_events", 0200,
- ar->debug.debugfs_pdev,
- &ar->dfs_block_radar_events);
- }
-
- return 0;
-}
-
-void ath11k_debug_unregister(struct ath11k *ar)
-{
-}
-#endif /* CONFIG_ATH11K_DEBUGFS */
+#endif /* CONFIG_ATH11K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index c30085406bfb..659a275e2eb3 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -6,11 +6,8 @@
#ifndef _ATH11K_DEBUG_H_
#define _ATH11K_DEBUG_H_
-#include "hal_tx.h"
#include "trace.h"
-
-#define ATH11K_TX_POWER_MAX_VAL 70
-#define ATH11K_TX_POWER_MIN_VAL 0
+#include "debugfs.h"
enum ath11k_debug_mask {
ATH11K_DBG_AHB = 0x00000001,
@@ -25,101 +22,12 @@ enum ath11k_debug_mask {
ATH11K_DBG_REG = 0x00000200,
ATH11K_DBG_TESTMODE = 0x00000400,
ATH11k_DBG_HAL = 0x00000800,
+ ATH11K_DBG_PCI = 0x00001000,
+ ATH11K_DBG_DP_TX = 0x00001000,
+ ATH11K_DBG_DP_RX = 0x00002000,
ATH11K_DBG_ANY = 0xffffffff,
};
-/* htt_dbg_ext_stats_type */
-enum ath11k_dbg_htt_ext_stats_type {
- ATH11K_DBG_HTT_EXT_STATS_RESET = 0,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
- ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7,
- ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
- ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11,
- ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
- ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13,
- ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14,
- ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
- ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
- ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
- ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
- ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
- ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
- ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24,
-
- /* keep this last */
- ATH11K_DBG_HTT_NUM_EXT_STATS,
-};
-
-struct debug_htt_stats_req {
- bool done;
- u8 pdev_id;
- u8 type;
- u8 peer_addr[ETH_ALEN];
- struct completion cmpln;
- u32 buf_len;
- u8 buf[];
-};
-
-struct ath_pktlog_hdr {
- u16 flags;
- u16 missed_cnt;
- u16 log_type;
- u16 size;
- u32 timestamp;
- u32 type_specific_data;
- u8 payload[];
-};
-
-#define ATH11K_HTT_PEER_STATS_RESET BIT(16)
-
-#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
-#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
-
-enum ath11k_pktlog_filter {
- ATH11K_PKTLOG_RX = 0x000000001,
- ATH11K_PKTLOG_TX = 0x000000002,
- ATH11K_PKTLOG_RCFIND = 0x000000004,
- ATH11K_PKTLOG_RCUPDATE = 0x000000008,
- ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020,
- ATH11K_PKTLOG_EVENT_SW = 0x000000040,
- ATH11K_PKTLOG_ANY = 0x00000006f,
-};
-
-enum ath11k_pktlog_mode {
- ATH11K_PKTLOG_MODE_LITE = 1,
- ATH11K_PKTLOG_MODE_FULL = 2,
-};
-
-enum ath11k_pktlog_enum {
- ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
- ATH11K_PKTLOG_TYPE_TX_STAT = 2,
- ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
- ATH11K_PKTLOG_TYPE_RX_STAT = 5,
- ATH11K_PKTLOG_TYPE_RC_FIND = 6,
- ATH11K_PKTLOG_TYPE_RC_UPDATE = 7,
- ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8,
- ATH11K_PKTLOG_TYPE_RX_CBF = 10,
- ATH11K_PKTLOG_TYPE_RX_STATBUF = 22,
- ATH11K_PKTLOG_TYPE_PPDU_STATS = 23,
- ATH11K_PKTLOG_TYPE_LITE_RX = 24,
-};
-
-enum ath11k_dbg_aggr_mode {
- ATH11K_DBG_AGGR_MODE_AUTO,
- ATH11K_DBG_AGGR_MODE_MANUAL,
- ATH11K_DBG_AGGR_MODE_MAX,
-};
-
__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);
@@ -150,153 +58,6 @@ static inline void ath11k_dbg_dump(struct ath11k_base *ab,
}
#endif /* CONFIG_ATH11K_DEBUG */
-#ifdef CONFIG_ATH11K_DEBUGFS
-int ath11k_debug_soc_create(struct ath11k_base *ab);
-void ath11k_debug_soc_destroy(struct ath11k_base *ab);
-int ath11k_debug_pdev_create(struct ath11k_base *ab);
-void ath11k_debug_pdev_destroy(struct ath11k_base *ab);
-int ath11k_debug_register(struct ath11k *ar);
-void ath11k_debug_unregister(struct ath11k *ar);
-void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
- struct sk_buff *skb);
-void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
-
-void ath11k_debug_fw_stats_init(struct ath11k *ar);
-int ath11k_dbg_htt_stats_req(struct ath11k *ar);
-
-static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
-{
- return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE);
-}
-
-static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
-{
- return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode);
-}
-
-static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
-{
- return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode &&
- ether_addr_equal(addr, ar->debug.pktlog_peer_addr));
-}
-
-static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
-{
- return ar->debug.extd_tx_stats;
-}
-
-static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
-{
- return ar->debug.extd_rx_stats;
-}
-
-static inline int ath11k_debug_rx_filter(struct ath11k *ar)
-{
- return ar->debug.rx_filter;
-}
-
-void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct dentry *dir);
-void
-ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
- struct ath11k_per_peer_tx_stats *peer_stats,
- u8 legacy_rate_idx);
-void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
- struct sk_buff *msdu,
- struct hal_tx_status *ts);
-#else
-static inline int ath11k_debug_soc_create(struct ath11k_base *ab)
-{
- return 0;
-}
-
-static inline void ath11k_debug_soc_destroy(struct ath11k_base *ab)
-{
-}
-
-static inline int ath11k_debug_pdev_create(struct ath11k_base *ab)
-{
- return 0;
-}
-
-static inline void ath11k_debug_pdev_destroy(struct ath11k_base *ab)
-{
-}
-
-static inline int ath11k_debug_register(struct ath11k *ar)
-{
- return 0;
-}
-
-static inline void ath11k_debug_unregister(struct ath11k *ar)
-{
-}
-
-static inline void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
- struct sk_buff *skb)
-{
-}
-
-static inline void ath11k_debug_fw_stats_process(struct ath11k_base *ab,
- struct sk_buff *skb)
-{
-}
-
-static inline void ath11k_debug_fw_stats_init(struct ath11k *ar)
-{
-}
-
-static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
-{
- return 0;
-}
-
-static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
-{
- return 0;
-}
-
-static inline int ath11k_dbg_htt_stats_req(struct ath11k *ar)
-{
- return 0;
-}
-
-static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
-{
- return false;
-}
-
-static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
-{
- return false;
-}
-
-static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
-{
- return false;
-}
-
-static inline int ath11k_debug_rx_filter(struct ath11k *ar)
-{
- return 0;
-}
-
-static inline void
-ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
- struct ath11k_per_peer_tx_stats *peer_stats,
- u8 legacy_rate_idx)
-{
-}
-
-static inline void
-ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
- struct sk_buff *msdu,
- struct hal_tx_status *ts)
-{
-}
-
-#endif /* CONFIG_MAC80211_DEBUGFS*/
-
#define ath11k_dbg(ar, dbg_mask, fmt, ...) \
do { \
if (ath11k_debug_mask & dbg_mask) \
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
new file mode 100644
index 000000000000..1b914e67d314
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -0,0 +1,1097 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include "debugfs.h"
+
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "debugfs_htt_stats.h"
+#include "peer.h"
+
+static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
+ "REO2SW1_RING",
+ "REO2SW2_RING",
+ "REO2SW3_RING",
+ "REO2SW4_RING",
+ "WBM2REO_LINK_RING",
+ "REO2TCL_RING",
+ "REO2FW_RING",
+ "RELEASE_RING",
+ "PPE_RELEASE_RING",
+ "TCL2TQM_RING",
+ "TQM_RELEASE_RING",
+ "REO_RELEASE_RING",
+ "WBM2SW0_RELEASE_RING",
+ "WBM2SW1_RELEASE_RING",
+ "WBM2SW2_RELEASE_RING",
+ "WBM2SW3_RELEASE_RING",
+ "REO_CMD_RING",
+ "REO_STATUS_RING",
+};
+
+static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
+ "FW2RXDMA_BUF_RING",
+ "FW2RXDMA_STATUS_RING",
+ "FW2RXDMA_LINK_RING",
+ "SW2RXDMA_BUF_RING",
+ "WBM2RXDMA_LINK_RING",
+ "RXDMA2FW_RING",
+ "RXDMA2SW_RING",
+ "RXDMA2RELEASE_RING",
+ "RXDMA2REO_RING",
+ "MONITOR_STATUS_RING",
+ "MONITOR_BUF_RING",
+ "MONITOR_DESC_RING",
+ "MONITOR_DEST_RING",
+};
+
+static void ath11k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k_fw_stats stats = {};
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ bool is_end;
+ static unsigned int num_vdev, num_bcn;
+ size_t total_vdevs_started = 0;
+ int i, ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
+ ar->debug.fw_stats_done = true;
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats.vdevs)) {
+ ath11k_warn(ab, "empty vdev stats");
+ goto complete;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += ar->num_started_vdevs;
+ }
+
+ is_end = ((++num_vdev) == total_vdevs_started);
+
+ list_splice_tail_init(&stats.vdevs,
+ &ar->debug.fw_stats.vdevs);
+
+ if (is_end) {
+ ar->debug.fw_stats_done = true;
+ num_vdev = 0;
+ }
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats.bcn)) {
+ ath11k_warn(ab, "empty bcn stats");
+ goto complete;
+ }
+ /* Mark end until we reached the count of all started VDEVs
+ * within the PDEV
+ */
+ is_end = ((++num_bcn) == ar->num_started_vdevs);
+
+ list_splice_tail_init(&stats.bcn,
+ &ar->debug.fw_stats.bcn);
+
+ if (is_end) {
+ ar->debug.fw_stats_done = true;
+ num_bcn = 0;
+ }
+ }
+complete:
+ complete(&ar->debug.fw_stats_complete);
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->data_lock);
+
+free:
+ ath11k_fw_stats_pdevs_free(&stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&stats.vdevs);
+ ath11k_fw_stats_bcn_free(&stats.bcn);
+}
+
+static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long timeout, time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ timeout = jiffies + msecs_to_jiffies(3 * HZ);
+
+ ath11k_debugfs_fw_stats_reset(ar);
+
+ reinit_completion(&ar->debug.fw_stats_complete);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left =
+ wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->debug.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ return 0;
+}
+
+static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_pdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+ .open = ath11k_open_pdev_stats,
+ .release = ath11k_release_pdev_stats,
+ .read = ath11k_read_pdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ /* VDEV stats is always sent for all active VDEVs from FW */
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_vdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+ .open = ath11k_open_vdev_stats,
+ .release = ath11k_release_vdev_stats,
+ .read = ath11k_read_vdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_vif *arvif;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.stats_id = WMI_REQUEST_BCN_STAT;
+ req_param.pdev_id = ar->pdev->pdev_id;
+
+ /* loop all active VDEVs for bcn stats */
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_up)
+ continue;
+
+ req_param.vdev_id = arvif->vdev_id;
+ ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+ goto err_free;
+ }
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ /* since beacon stats request is looped for all active VDEVs, saved fw
+ * stats is not freed for each request until done for all active VDEVs
+ */
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn);
+ spin_unlock_bh(&ar->data_lock);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_bcn_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+ .open = ath11k_open_bcn_stats,
+ .release = ath11k_release_bcn_stats,
+ .read = ath11k_read_bcn_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar = ab->pdevs[0].ar;
+ char buf[32] = {0};
+ ssize_t rc;
+ int i, ret, radioup = 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ar && ar->state == ATH11K_STATE_ON) {
+ radioup = 1;
+ break;
+ }
+ }
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ if (radioup == 0) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!strcmp(buf, "assert")) {
+ ath11k_info(ab, "simulating firmware assert crash\n");
+ ret = ath11k_wmi_force_fw_hang_cmd(ar,
+ ATH11K_WMI_FW_HANG_ASSERT_TYPE,
+ ATH11K_WMI_FW_HANG_DELAY);
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath11k_read_simulate_fw_crash,
+ .write = ath11k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (filter == ar->debug.extd_tx_stats) {
+ ret = count;
+ goto out;
+ }
+
+ ar->debug.extd_tx_stats = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.extd_tx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_tx_stats = {
+ .read = ath11k_read_enable_extd_tx_stats,
+ .write = ath11k_write_enable_extd_tx_stats,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_extd_rx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_base *ab = ar->ab;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 enable, rx_filter = 0, ring_id;
+ int i;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (enable > 1) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (enable == ar->debug.extd_rx_stats) {
+ ret = count;
+ goto exit;
+ }
+
+ if (enable) {
+ rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+
+ tlv_filter.rx_filter = rx_filter;
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ }
+
+ ar->debug.rx_filter = tlv_filter.rx_filter;
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto exit;
+ }
+ }
+
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_extd_rx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->debug.extd_rx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+ .read = ath11k_read_extd_rx_stats,
+ .write = ath11k_write_extd_rx_stats,
+ .open = simple_open,
+};
+
+static int ath11k_fill_bp_stats(struct ath11k_base *ab,
+ struct ath11k_bp_stats *bp_stats,
+ char *buf, int len, int size)
+{
+ lockdep_assert_held(&ab->base_lock);
+
+ len += scnprintf(buf + len, size - len, "count: %u\n",
+ bp_stats->count);
+ len += scnprintf(buf + len, size - len, "hp: %u\n",
+ bp_stats->hp);
+ len += scnprintf(buf + len, size - len, "tp: %u\n",
+ bp_stats->tp);
+ len += scnprintf(buf + len, size - len, "seen before: %ums\n\n",
+ jiffies_to_msecs(jiffies - bp_stats->jiffies));
+ return len;
+}
+
+static ssize_t ath11k_debugfs_dump_soc_ring_bp_stats(struct ath11k_base *ab,
+ char *buf, int size)
+{
+ struct ath11k_bp_stats *bp_stats;
+ bool stats_rxd = false;
+ u8 i, pdev_idx;
+ int len = 0;
+
+ len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n");
+ len += scnprintf(buf + len, size - len, "==================\n");
+
+ spin_lock_bh(&ab->base_lock);
+ for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) {
+ bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i];
+
+ if (!bp_stats->count)
+ continue;
+
+ len += scnprintf(buf + len, size - len, "Ring: %s\n",
+ htt_bp_umac_ring[i]);
+ len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
+ stats_rxd = true;
+ }
+
+ for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) {
+ for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) {
+ bp_stats =
+ &ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx];
+
+ if (!bp_stats->count)
+ continue;
+
+ len += scnprintf(buf + len, size - len, "Ring: %s\n",
+ htt_bp_lmac_ring[i]);
+ len += scnprintf(buf + len, size - len, "pdev: %d\n",
+ pdev_idx);
+ len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
+ stats_rxd = true;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!stats_rxd)
+ len += scnprintf(buf + len, size - len,
+ "No Ring Backpressure stats received\n\n");
+
+ return len;
+}
+
+static ssize_t ath11k_debugfs_dump_soc_dp_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats;
+ int len = 0, i, retval;
+ const int size = 4096;
+ static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
+ "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC",
+ "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse",
+ "AMSDU parse", "SA timeout", "DA timeout",
+ "Flow timeout", "Flush req"};
+ static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
+ "Desc addr zero", "Desc inval", "AMPDU in non BA",
+ "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump",
+ "Frame OOR", "BAR OOR", "No BA session",
+ "Frame SN equal SSN", "PN check fail", "2k err",
+ "PN err", "Desc blocked"};
+
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n");
+ len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
+ soc_stats->err_ring_pkts);
+ len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
+ soc_stats->invalid_rbm);
+ len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
+ for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ rxdma_err[i], soc_stats->rxdma_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nREO errors:\n");
+ for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ reo_err[i], soc_stats->reo_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
+ len += scnprintf(buf + len, size - len,
+ "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n",
+ soc_stats->hal_reo_error[0],
+ soc_stats->hal_reo_error[1],
+ soc_stats->hal_reo_error[2],
+ soc_stats->hal_reo_error[3]);
+
+ len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
+ len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len, "ring%d: %u\n",
+ i, soc_stats->tx_err.desc_na[i]);
+
+ len += scnprintf(buf + len, size - len,
+ "\nMisc Transmit Failures: %d\n",
+ atomic_read(&soc_stats->tx_err.misc_fail));
+
+ len += ath11k_debugfs_dump_soc_ring_bp_stats(ab, buf + len, size - len);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_soc_dp_stats = {
+ .read = ath11k_debugfs_dump_soc_dp_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
+{
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
+ if (IS_ERR(ab->debugfs_soc))
+ return PTR_ERR(ab->debugfs_soc);
+
+ debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
+ &fops_soc_dp_stats);
+
+ return 0;
+}
+
+void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+}
+
+int ath11k_debugfs_soc_create(struct ath11k_base *ab)
+{
+ ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
+
+ return PTR_ERR_OR_ZERO(ab->debugfs_ath11k);
+}
+
+void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_ath11k);
+ ab->debugfs_ath11k = NULL;
+}
+
+void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
+{
+ struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+ ar->debug.debugfs_pdev);
+
+ ar->debug.fw_stats.debugfs_fwstats = fwstats_dir;
+
+ /* all stats debugfs files created are under "fw_stats" directory
+ * created per PDEV
+ */
+ debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+ &fops_pdev_stats);
+ debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+ &fops_vdev_stats);
+ debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+ &fops_bcn_stats);
+
+ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.bcn);
+
+ init_completion(&ar->debug.fw_stats_complete);
+}
+
+static ssize_t ath11k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_base *ab = ar->ab;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 rx_filter = 0, ring_id, filter, mode;
+ u8 buf[128] = {0};
+ int i, ret;
+ ssize_t rc;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (rc < 0) {
+ ret = rc;
+ goto out;
+ }
+ buf[rc] = '\0';
+
+ ret = sscanf(buf, "0x%x %u", &filter, &mode);
+ if (ret != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (filter) {
+ ret = ath11k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath11k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+#define HTT_RX_FILTER_TLV_LITE_MODE \
+ (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START)
+
+ if (mode == ATH11K_PKTLOG_MODE_FULL) {
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
+ } else if (mode == ATH11K_PKTLOG_MODE_LITE) {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_PKTLOG);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret);
+ goto out;
+ }
+
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
+ } else {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ tlv_filter.rx_filter = rx_filter;
+ if (rx_filter) {
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+
+ if (ret) {
+ ath11k_warn(ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n",
+ filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+
+ ar->debug.pktlog_filter = filter;
+ ar->debug.pktlog_mode = mode;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_pktlog_filter(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n",
+ ar->debug.pktlog_filter,
+ ar->debug.pktlog_mode);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath11k_read_pktlog_filter,
+ .write = ath11k_write_pktlog_filter,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+
+ ret = ath11k_wmi_simulate_radar(ar);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath11k_write_simulate_radar,
+ .open = simple_open
+};
+
+int ath11k_debugfs_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ char pdev_name[5];
+ char buf[100] = {0};
+
+ snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
+
+ ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
+ if (IS_ERR(ar->debug.debugfs_pdev))
+ return PTR_ERR(ar->debug.debugfs_pdev);
+
+ /* Create a symlink under ieee80211/phy* */
+ snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev);
+ debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf);
+
+ ath11k_debugfs_htt_stats_init(ar);
+
+ ath11k_debugfs_fw_stats_init(ar);
+
+ debugfs_create_file("ext_tx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_tx_stats);
+ debugfs_create_file("ext_rx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_rx_stats);
+ debugfs_create_file("pktlog_filter", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_pktlog_filter);
+
+ if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
+ debugfs_create_file("dfs_simulate_radar", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_simulate_radar);
+ debugfs_create_bool("dfs_block_radar_events", 0200,
+ ar->debug.debugfs_pdev,
+ &ar->dfs_block_radar_events);
+ }
+
+ return 0;
+}
+
+void ath11k_debugfs_unregister(struct ath11k *ar)
+{
+}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
new file mode 100644
index 000000000000..e5346af71f24
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUGFS_H_
+#define _ATH11K_DEBUGFS_H_
+
+#include "hal_tx.h"
+
+#define ATH11K_TX_POWER_MAX_VAL 70
+#define ATH11K_TX_POWER_MIN_VAL 0
+
+/* htt_dbg_ext_stats_type */
+enum ath11k_dbg_htt_ext_stats_type {
+ ATH11K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7,
+ ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
+ ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13,
+ ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14,
+ ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
+ ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
+ ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
+ ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
+ ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24,
+
+ /* keep this last */
+ ATH11K_DBG_HTT_NUM_EXT_STATS,
+};
+
+struct debug_htt_stats_req {
+ bool done;
+ u8 pdev_id;
+ u8 type;
+ u8 peer_addr[ETH_ALEN];
+ struct completion cmpln;
+ u32 buf_len;
+ u8 buf[];
+};
+
+struct ath_pktlog_hdr {
+ u16 flags;
+ u16 missed_cnt;
+ u16 log_type;
+ u16 size;
+ u32 timestamp;
+ u32 type_specific_data;
+ u8 payload[];
+};
+
+#define ATH11K_HTT_PEER_STATS_RESET BIT(16)
+
+#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
+#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+enum ath11k_pktlog_filter {
+ ATH11K_PKTLOG_RX = 0x000000001,
+ ATH11K_PKTLOG_TX = 0x000000002,
+ ATH11K_PKTLOG_RCFIND = 0x000000004,
+ ATH11K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020,
+ ATH11K_PKTLOG_EVENT_SW = 0x000000040,
+ ATH11K_PKTLOG_ANY = 0x00000006f,
+};
+
+enum ath11k_pktlog_mode {
+ ATH11K_PKTLOG_MODE_LITE = 1,
+ ATH11K_PKTLOG_MODE_FULL = 2,
+};
+
+enum ath11k_pktlog_enum {
+ ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH11K_PKTLOG_TYPE_TX_STAT = 2,
+ ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
+ ATH11K_PKTLOG_TYPE_RX_STAT = 5,
+ ATH11K_PKTLOG_TYPE_RC_FIND = 6,
+ ATH11K_PKTLOG_TYPE_RC_UPDATE = 7,
+ ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8,
+ ATH11K_PKTLOG_TYPE_RX_CBF = 10,
+ ATH11K_PKTLOG_TYPE_RX_STATBUF = 22,
+ ATH11K_PKTLOG_TYPE_PPDU_STATS = 23,
+ ATH11K_PKTLOG_TYPE_LITE_RX = 24,
+};
+
+enum ath11k_dbg_aggr_mode {
+ ATH11K_DBG_AGGR_MODE_AUTO,
+ ATH11K_DBG_AGGR_MODE_MANUAL,
+ ATH11K_DBG_AGGR_MODE_MAX,
+};
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+int ath11k_debugfs_soc_create(struct ath11k_base *ab);
+void ath11k_debugfs_soc_destroy(struct ath11k_base *ab);
+int ath11k_debugfs_pdev_create(struct ath11k_base *ab);
+void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab);
+int ath11k_debugfs_register(struct ath11k *ar);
+void ath11k_debugfs_unregister(struct ath11k *ar);
+void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
+
+void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
+
+static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE);
+}
+
+static inline bool ath11k_debugfs_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode);
+}
+
+static inline bool ath11k_debugfs_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode &&
+ ether_addr_equal(addr, ar->debug.pktlog_peer_addr));
+}
+
+static inline int ath11k_debugfs_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_tx_stats;
+}
+
+static inline int ath11k_debugfs_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_rx_stats;
+}
+
+static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
+{
+ return ar->debug.rx_filter;
+}
+
+#else
+static inline int ath11k_debugfs_soc_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debugfs_register(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_unregister(struct ath11k *ar)
+{
+}
+
+static inline void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
+{
+}
+
+static inline int ath11k_debugfs_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_debugfs_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return false;
+}
+
+static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
+{
+ return 0;
+}
+
+#endif /* CONFIG_MAC80211_DEBUGFS*/
+
+#endif /* _ATH11K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index 6b532dc99c98..9191ffa081c2 100644
--- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -8,7 +8,7 @@
#include "dp_tx.h"
#include "dp_rx.h"
#include "debug.h"
-#include "debug_htt_stats.h"
+#include "debugfs_htt_stats.h"
#define HTT_DBG_OUT(buf, len, fmt, ...) \
scnprintf(buf, len, fmt "\n", ##__VA_ARGS__)
@@ -3895,50 +3895,6 @@ static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
}
}
-static inline void htt_htt_stats_debug_dump(const u32 *tag_buf,
- struct debug_htt_stats_req *stats_req)
-{
- u8 *buf = stats_req->buf;
- u32 len = stats_req->buf_len;
- u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- u32 tlv_len = 0, i = 0, word_len = 0;
-
- tlv_len = FIELD_GET(HTT_TLV_LEN, *tag_buf) + HTT_TLV_HDR_LEN;
- word_len = (tlv_len % 4) == 0 ? (tlv_len / 4) : ((tlv_len / 4) + 1);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "============================================");
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HKDBG TLV DUMP: (tag_len=%u bytes, words=%u)",
- tlv_len, word_len);
-
- for (i = 0; i + 3 < word_len; i += 4) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "0x%08x 0x%08x 0x%08x 0x%08x",
- tag_buf[i], tag_buf[i + 1],
- tag_buf[i + 2], tag_buf[i + 3]);
- }
-
- if (i + 3 == word_len) {
- len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x 0x%08x 0x%08x ",
- tag_buf[i], tag_buf[i + 1], tag_buf[i + 2]);
- } else if (i + 2 == word_len) {
- len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x 0x%08x ",
- tag_buf[i], tag_buf[i + 1]);
- } else if (i + 1 == word_len) {
- len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x ",
- tag_buf[i]);
- }
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "============================================");
-
- if (len >= buf_len)
- buf[buf_len - 1] = 0;
- else
- buf[len] = 0;
-
- stats_req->buf_len = len;
-}
-
static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -4297,8 +4253,8 @@ static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
return 0;
}
-void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
- struct sk_buff *skb)
+void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
{
struct ath11k_htt_extd_stats_msg *msg;
struct debug_htt_stats_req *stats_req;
@@ -4446,7 +4402,7 @@ static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type,
return 0;
}
-int ath11k_dbg_htt_stats_req(struct ath11k *ar)
+int ath11k_debugfs_htt_stats_req(struct ath11k *ar)
{
struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
u8 type = stats_req->type;
@@ -4520,7 +4476,7 @@ static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
ar->debug.htt_stats.stats_req = stats_req;
stats_req->type = type;
- ret = ath11k_dbg_htt_stats_req(ar);
+ ret = ath11k_debugfs_htt_stats_req(ar);
if (ret < 0)
goto out;
@@ -4630,7 +4586,7 @@ static const struct file_operations fops_htt_stats_reset = {
.llseek = default_llseek,
};
-void ath11k_debug_htt_stats_init(struct ath11k *ar)
+void ath11k_debugfs_htt_stats_init(struct ath11k *ar)
{
spin_lock_init(&ar->debug.htt_stats.lock);
debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index 682a6ff222bd..74b2086eed9d 100644
--- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -1660,8 +1660,6 @@ struct htt_pdev_obss_pd_stats_tlv {
u32 num_obss_tx_ppdu_failure;
};
-void ath11k_debug_htt_stats_init(struct ath11k *ar);
-
struct htt_ring_backpressure_stats_tlv {
u32 pdev_id;
u32 current_head_idx;
@@ -1687,4 +1685,29 @@ struct htt_ring_backpressure_stats_tlv {
u32 backpressure_hist[5];
};
+#ifdef CONFIG_ATH11K_DEBUGFS
+
+void ath11k_debugfs_htt_stats_init(struct ath11k *ar);
+void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+int ath11k_debugfs_htt_stats_req(struct ath11k *ar);
+
+#else /* CONFIG_ATH11K_DEBUGFS */
+
+static inline void ath11k_debugfs_htt_stats_init(struct ath11k *ar)
+{
+}
+
+static inline void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline int ath11k_debugfs_htt_stats_req(struct ath11k *ar)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ATH11K_DEBUGFS */
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 7308ed254232..270c0edbb10f 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -5,16 +5,16 @@
#include <linux/vmalloc.h>
+#include "debugfs_sta.h"
#include "core.h"
#include "peer.h"
#include "debug.h"
#include "dp_tx.h"
-#include "debug_htt_stats.h"
+#include "debugfs_htt_stats.h"
-void
-ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
- struct ath11k_per_peer_tx_stats *peer_stats,
- u8 legacy_rate_idx)
+void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
{
struct rate_info *txrate = &arsta->txrate;
struct ath11k_htt_tx_stats *tx_stats;
@@ -125,9 +125,9 @@ ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
tx_stats->tx_duration += peer_stats->duration;
}
-void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
- struct sk_buff *msdu,
- struct hal_tx_status *ts)
+void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
@@ -200,7 +200,8 @@ void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
arsta->txrate.nss = arsta->last_txrate.nss;
arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
- ath11k_accumulate_per_peer_tx_stats(arsta, peer_stats, rate_idx);
+ ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
+
err_out:
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
@@ -428,7 +429,7 @@ ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
ar->debug.htt_stats.stats_req = stats_req;
stats_req->type = ATH11K_DBG_HTT_EXT_STATS_PEER_INFO;
memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
- ret = ath11k_dbg_htt_stats_req(ar);
+ ret = ath11k_debugfs_htt_stats_req(ar);
mutex_unlock(&ar->conf_mutex);
if (ret < 0)
goto out;
@@ -820,15 +821,15 @@ static const struct file_operations fops_htt_peer_stats_reset = {
.llseek = default_llseek,
};
-void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct dentry *dir)
+void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
{
struct ath11k *ar = hw->priv;
- if (ath11k_debug_is_extd_tx_stats_enabled(ar))
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
debugfs_create_file("tx_stats", 0400, dir, sta,
&fops_tx_stats);
- if (ath11k_debug_is_extd_rx_stats_enabled(ar))
+ if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
debugfs_create_file("rx_stats", 0400, dir, sta,
&fops_rx_stats);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.h b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
new file mode 100644
index 000000000000..18dc65d9edcf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUGFS_STA_H_
+#define _ATH11K_DEBUGFS_STA_H_
+
+#include <net/mac80211.h>
+
+#include "core.h"
+#include "hal_tx.h"
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+
+void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx);
+void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts);
+
+#else /* CONFIG_ATH11K_DEBUGFS */
+
+#define ath11k_debugfs_sta_op_add NULL
+
+static inline void
+ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+}
+
+static inline void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+}
+
+#endif /* CONFIG_ATH11K_DEBUGFS */
+
+#endif /* _ATH11K_DEBUGFS_STA_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 1d64c3c51ac9..59dd185a0cfc 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -7,6 +7,7 @@
#include "core.h"
#include "dp_tx.h"
#include "hal_tx.h"
+#include "hif.h"
#include "debug.h"
#include "dp_rx.h"
#include "peer.h"
@@ -106,13 +107,120 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
ring->vaddr_unaligned = NULL;
}
+static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
+{
+ int ext_group_num;
+ u8 mask = 1 << ring_num;
+
+ for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
+ ext_group_num++) {
+ if (mask & grp_mask[ext_group_num])
+ return ext_group_num;
+ }
+
+ return -ENOENT;
+}
+
+static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
+ enum hal_ring_type type, int ring_num)
+{
+ const u8 *grp_mask;
+
+ switch (type) {
+ case HAL_WBM2SW_RELEASE:
+ if (ring_num < 3) {
+ grp_mask = &ab->hw_params.ring_mask->tx[0];
+ } else if (ring_num == 3) {
+ grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
+ ring_num = 0;
+ } else {
+ return -ENOENT;
+ }
+ break;
+ case HAL_REO_EXCEPTION:
+ grp_mask = &ab->hw_params.ring_mask->rx_err[0];
+ break;
+ case HAL_REO_DST:
+ grp_mask = &ab->hw_params.ring_mask->rx[0];
+ break;
+ case HAL_REO_STATUS:
+ grp_mask = &ab->hw_params.ring_mask->reo_status[0];
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ case HAL_RXDMA_MONITOR_DST:
+ grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
+ break;
+ case HAL_RXDMA_DST:
+ grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
+ break;
+ case HAL_RXDMA_BUF:
+ grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_REO_CMD:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_TCL_STATUS:
+ case HAL_REO_REINJECT:
+ case HAL_CE_SRC:
+ case HAL_CE_DST:
+ case HAL_CE_DST_STATUS:
+ default:
+ return -ENOENT;
+ }
+
+ return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
+}
+
+static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
+ struct hal_srng_params *ring_params,
+ enum hal_ring_type type, int ring_num)
+{
+ int msi_group_number, msi_data_count;
+ u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
+ int ret;
+
+ ret = ath11k_get_user_msi_vector(ab, "DP",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+ if (ret)
+ return;
+
+ msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
+ ring_num);
+ if (msi_group_number < 0) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "ring not part of an ext_group; ring_type: %d,ring_num %d",
+ type, ring_num);
+ ring_params->msi_addr = 0;
+ ring_params->msi_data = 0;
+ return;
+ }
+
+ if (msi_group_number > msi_data_count) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "multiple msi_groups share one msi, msi_group_num %d",
+ msi_group_number);
+ }
+
+ ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (msi_group_number % msi_data_count)
+ + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
enum hal_ring_type type, int ring_num,
int mac_id, int num_entries)
{
struct hal_srng_params params = { 0 };
- int entry_sz = ath11k_hal_srng_get_entrysize(type);
- int max_entries = ath11k_hal_srng_get_max_entries(type);
+ int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
+ int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
int ret;
if (max_entries < 0 || entry_sz < 0)
@@ -135,6 +243,7 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
params.ring_base_vaddr = ring->vaddr;
params.ring_base_paddr = ring->paddr;
params.num_entries = num_entries;
+ ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
switch (type) {
case HAL_REO_DST:
@@ -159,7 +268,7 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
break;
}
/* follow through when ring_num >= 3 */
- /* fall through */
+ fallthrough;
case HAL_REO_EXCEPTION:
case HAL_REO_REINJECT:
case HAL_REO_CMD:
@@ -195,11 +304,25 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
return 0;
}
+static void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
+{
+ int i;
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
+
+ ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
+}
+
static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
int i;
+ ath11k_dp_stop_shadow_timers(ab);
ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
@@ -265,6 +388,10 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
ath11k_hal_tx_init_data_ring(ab, srng);
+
+ ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
+ ATH11K_SHADOW_DP_TIMER_INTERVAL,
+ dp->tx_ring[i].tcl_data_ring.ring_id);
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
@@ -300,6 +427,10 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
ath11k_hal_reo_init_cmd_ring(ab, srng);
+ ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
+ ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
+ dp->reo_cmd_ring.ring_id);
+
ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
0, 0, DP_REO_STATUS_RING_SIZE);
if (ret) {
@@ -367,7 +498,7 @@ static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
u32 end_offset;
n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
- ath11k_hal_srng_get_entrysize(HAL_WBM_IDLE_LINK);
+ ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
@@ -565,7 +696,7 @@ int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
return ret;
/* Setup link desc idle list for HW internal usage */
- entry_sz = ath11k_hal_srng_get_entrysize(ring_type);
+ entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
tot_mem_sz = entry_sz * n_link_desc;
/* Setup scatter desc list when the total memory requirement is more */
@@ -622,16 +753,16 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
struct napi_struct *napi = &irq_grp->napi;
int grp_id = irq_grp->grp_id;
int work_done = 0;
- int i = 0;
+ int i = 0, j;
int tot_work_done = 0;
- while (ath11k_tx_ring_mask[grp_id] >> i) {
- if (ath11k_tx_ring_mask[grp_id] & BIT(i))
+ while (ab->hw_params.ring_mask->tx[grp_id] >> i) {
+ if (ab->hw_params.ring_mask->tx[grp_id] & BIT(i))
ath11k_dp_tx_completion_handler(ab, i);
i++;
}
- if (ath11k_rx_err_ring_mask[grp_id]) {
+ if (ab->hw_params.ring_mask->rx_err[grp_id]) {
work_done = ath11k_dp_process_rx_err(ab, napi, budget);
budget -= work_done;
tot_work_done += work_done;
@@ -639,7 +770,7 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
goto done;
}
- if (ath11k_rx_wbm_rel_ring_mask[grp_id]) {
+ if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
work_done = ath11k_dp_rx_process_wbm_err(ab,
napi,
budget);
@@ -650,8 +781,8 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
goto done;
}
- if (ath11k_rx_ring_mask[grp_id]) {
- i = fls(ath11k_rx_ring_mask[grp_id]) - 1;
+ if (ab->hw_params.ring_mask->rx[grp_id]) {
+ i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
work_done = ath11k_dp_process_rx(ab, i, napi,
budget);
budget -= work_done;
@@ -660,41 +791,51 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
goto done;
}
- if (rx_mon_status_ring_mask[grp_id]) {
- for (i = 0; i < ab->num_radios; i++) {
- if (rx_mon_status_ring_mask[grp_id] & BIT(i)) {
- work_done =
- ath11k_dp_rx_process_mon_rings(ab,
- i, napi,
- budget);
- budget -= work_done;
- tot_work_done += work_done;
+ if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxmda_per_pdev + j;
+
+ if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
+ BIT(id)) {
+ work_done =
+ ath11k_dp_rx_process_mon_rings(ab,
+ id,
+ napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
}
- if (budget <= 0)
- goto done;
}
}
- if (ath11k_reo_status_ring_mask[grp_id])
+ if (ab->hw_params.ring_mask->reo_status[grp_id])
ath11k_dp_process_reo_status(ab);
for (i = 0; i < ab->num_radios; i++) {
- if (ath11k_rxdma2host_ring_mask[grp_id] & BIT(i)) {
- work_done = ath11k_dp_process_rxdma_err(ab, i, budget);
- budget -= work_done;
- tot_work_done += work_done;
- }
+ for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxmda_per_pdev + j;
- if (budget <= 0)
- goto done;
+ if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
+ work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
- if (ath11k_host2rxdma_ring_mask[grp_id] & BIT(i)) {
- struct ath11k_pdev_dp *dp = &ab->pdevs[i].ar->dp;
- struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ if (budget <= 0)
+ goto done;
- ath11k_dp_rxbufs_replenish(ab, i, rx_ring, 0,
- HAL_RX_BUF_RBM_SW3_BM,
- GFP_ATOMIC);
+ if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
+ struct ath11k *ar = ath11k_ab_to_ar(ab, id);
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
+ HAL_RX_BUF_RBM_SW3_BM);
+ }
}
}
/* TODO: Implement handler for other interrupts */
@@ -709,10 +850,12 @@ void ath11k_dp_pdev_free(struct ath11k_base *ab)
struct ath11k *ar;
int i;
+ del_timer_sync(&ab->mon_reap_timer);
+
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
ath11k_dp_rx_pdev_free(ab, i);
- ath11k_debug_unregister(ar);
+ ath11k_debugfs_unregister(ar);
ath11k_dp_rx_pdev_mon_detach(ar);
}
}
@@ -722,6 +865,7 @@ void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
struct ath11k *ar;
struct ath11k_pdev_dp *dp;
int i;
+ int j;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@@ -731,8 +875,10 @@ void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
atomic_set(&dp->num_tx_pending, 0);
init_waitqueue_head(&dp->tx_empty_waitq);
- idr_init(&dp->rx_mon_status_refill_ring.bufs_idr);
- spin_lock_init(&dp->rx_mon_status_refill_ring.idr_lock);
+ for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
+ spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
+ }
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
}
@@ -797,13 +943,20 @@ int ath11k_dp_htt_connect(struct ath11k_dp *dp)
static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
{
- /* For STA mode, enable address search index,
- * tcl uses ast_hash value in the descriptor.
+ /* When v2_map_support is true:for STA mode, enable address
+ * search index, tcl uses ast_hash value in the descriptor.
+ * When v2_map_support is false: for STA mode, dont' enable
+ * address search index.
*/
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
- arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
- arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
+ if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
+ } else {
+ arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ }
break;
case WMI_VDEV_TYPE_AP:
case WMI_VDEV_TYPE_IBSS:
@@ -935,3 +1088,78 @@ fail_link_desc_cleanup:
return ret;
}
+
+static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
+{
+ struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
+ t, timer);
+ struct ath11k_base *ab = update_timer->ab;
+ struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ /* when the timer is fired, the handler checks whether there
+ * are new TX happened. The handler updates HP only when there
+ * are no TX operations during the timeout interval, and stop
+ * the timer. Timer will be started again when TX happens again.
+ */
+ if (update_timer->timer_tx_num != update_timer->tx_num) {
+ update_timer->timer_tx_num = update_timer->tx_num;
+ mod_timer(&update_timer->timer, jiffies +
+ msecs_to_jiffies(update_timer->interval));
+ } else {
+ update_timer->started = false;
+ ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
+ }
+
+ spin_unlock_bh(&srng->lock);
+}
+
+void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
+ struct hal_srng *srng,
+ struct ath11k_hp_update_timer *update_timer)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ update_timer->tx_num++;
+
+ if (update_timer->started)
+ return;
+
+ update_timer->started = true;
+ update_timer->timer_tx_num = update_timer->tx_num;
+ mod_timer(&update_timer->timer, jiffies +
+ msecs_to_jiffies(update_timer->interval));
+}
+
+void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ if (!update_timer->init)
+ return;
+
+ del_timer_sync(&update_timer->timer);
+}
+
+void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer,
+ u32 interval, u32 ring_id)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ update_timer->tx_num = 0;
+ update_timer->timer_tx_num = 0;
+ update_timer->ab = ab;
+ update_timer->ring_id = ring_id;
+ update_timer->interval = interval;
+ update_timer->init = true;
+ timer_setup(&update_timer->timer,
+ ath11k_dp_shadow_timer_handler, 0);
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 7587862d2e32..ee8db812589b 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -8,6 +8,8 @@
#include "hal_rx.h"
+#define MAX_RXDMA_PER_PDEV 2
+
struct ath11k_base;
struct ath11k_peer;
struct ath11k_dp;
@@ -38,6 +40,7 @@ struct dp_rx_tid {
#define DP_REO_DESC_FREE_THRESHOLD 64
#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
+#define DP_MON_SERVICE_BUDGET 128
struct dp_reo_cache_flush_elem {
struct list_head list;
@@ -142,12 +145,13 @@ struct ath11k_pdev_dp {
atomic_t num_tx_pending;
wait_queue_head_t tx_empty_waitq;
struct dp_rxdma_ring rx_refill_buf_ring;
- struct dp_srng rxdma_err_dst_ring;
+ struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
struct dp_srng rxdma_mon_dst_ring;
struct dp_srng rxdma_mon_desc_ring;
struct dp_rxdma_ring rxdma_mon_buf_ring;
- struct dp_rxdma_ring rx_mon_status_refill_ring;
+ struct dp_rxdma_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
struct ieee80211_rx_status rx_status;
struct ath11k_mon_data mon_data;
};
@@ -202,6 +206,20 @@ struct ath11k_pdev_dp {
#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
+#define ATH11K_SHADOW_DP_TIMER_INTERVAL 20
+#define ATH11K_SHADOW_CTRL_TIMER_INTERVAL 10
+
+struct ath11k_hp_update_timer {
+ struct timer_list timer;
+ bool started;
+ bool init;
+ u32 tx_num;
+ u32 timer_tx_num;
+ u32 ring_id;
+ u32 interval;
+ struct ath11k_base *ab;
+};
+
struct ath11k_dp {
struct ath11k_base *ab;
enum ath11k_htc_ep_id eid;
@@ -231,6 +249,8 @@ struct ath11k_dp {
* - reo_cmd_cache_flush_count
*/
spinlock_t reo_cmd_lock;
+ struct ath11k_hp_update_timer reo_cmd_timer;
+ struct ath11k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
};
/* HTT definitions */
@@ -494,7 +514,7 @@ struct htt_ppdu_stats_cfg_cmd {
} __packed;
#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
-#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(16, 9)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8)
#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
enum htt_ppdu_stats_tag_type {
@@ -936,11 +956,13 @@ struct htt_rx_ring_tlv_filter {
enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
- HTT_T2H_MSG_TYPE_PEER_MAP = 0x1e,
- HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x1f,
+ HTT_T2H_MSG_TYPE_PEER_MAP2 = 0x1e,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP2 = 0x1f,
HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
@@ -1610,5 +1632,13 @@ int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
struct dp_link_desc_bank *link_desc_banks,
u32 ring_type, struct hal_srng *srng,
u32 n_link_desc);
+void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
+ struct hal_srng *srng,
+ struct ath11k_hp_update_timer *update_timer);
+void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer);
+void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer,
+ u32 interval, u32 ring_id);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 791d971784ce..01625327eef7 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -9,6 +9,8 @@
#include <crypto/hash.h>
#include "core.h"
#include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "debugfs_sta.h"
#include "hal_desc.h"
#include "hw.h"
#include "dp_rx.h"
@@ -260,12 +262,23 @@ static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
}
+static void ath11k_dp_service_mon_ring(struct timer_list *t)
+{
+ struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
+ int i;
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
+ ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
+
+ mod_timer(&ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
+}
+
/* Returns number of Rx buffers replenished */
int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
- enum hal_rx_buf_return_buf_manager mgr,
- gfp_t gfp)
+ enum hal_rx_buf_return_buf_manager mgr)
{
struct hal_srng *srng;
u32 *desc;
@@ -312,7 +325,7 @@ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
spin_lock_bh(&rx_ring->idr_lock);
buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
- rx_ring->bufs_max * 3, gfp);
+ rx_ring->bufs_max * 3, GFP_ATOMIC);
spin_unlock_bh(&rx_ring->idr_lock);
if (buf_id < 0)
goto fail_dma_unmap;
@@ -375,7 +388,13 @@ static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
idr_destroy(&rx_ring->bufs_idr);
spin_unlock_bh(&rx_ring->idr_lock);
- rx_ring = &dp->rx_mon_status_refill_ring;
+ /* if rxdma1_enable is false, mon_status_refill_ring
+ * isn't setup, so don't clean.
+ */
+ if (!ar->ab->hw_params.rxdma1_enable)
+ return 0;
+
+ rx_ring = &dp->rx_mon_status_refill_ring[0];
spin_lock_bh(&rx_ring->idr_lock);
idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
@@ -390,21 +409,27 @@ static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
idr_destroy(&rx_ring->bufs_idr);
spin_unlock_bh(&rx_ring->idr_lock);
+
return 0;
}
static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ int i;
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
rx_ring = &dp->rxdma_mon_buf_ring;
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
- rx_ring = &dp->rx_mon_status_refill_ring;
- ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ rx_ring = &dp->rx_mon_status_refill_ring[i];
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+ }
+
return 0;
}
@@ -416,26 +441,32 @@ static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
int num_entries;
num_entries = rx_ring->refill_buf_ring.size /
- ath11k_hal_srng_get_entrysize(ringtype);
+ ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
rx_ring->bufs_max = num_entries;
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
- HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
+ HAL_RX_BUF_RBM_SW3_BM);
return 0;
}
static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ int i;
ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
- rx_ring = &dp->rxdma_mon_buf_ring;
- ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
+ if (ar->ab->hw_params.rxdma1_enable) {
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
+ }
- rx_ring = &dp->rx_mon_status_refill_ring;
- ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ rx_ring = &dp->rx_mon_status_refill_ring[i];
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
+ }
return 0;
}
@@ -443,11 +474,21 @@ static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ int i;
- ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring);
- ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring);
- ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring);
- ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ if (ab->hw_params.rx_mac_buf_ring)
+ ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
+
+ ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
+ ath11k_dp_srng_cleanup(ab,
+ &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
+ }
+
+ ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
}
void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
@@ -486,7 +527,9 @@ err_reo_cleanup:
static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
struct dp_srng *srng = NULL;
+ int i;
int ret;
ret = ath11k_dp_srng_setup(ar->ab,
@@ -498,24 +541,55 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
return ret;
}
- ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring,
- HAL_RXDMA_DST, 0, dp->mac_id,
- DP_RXDMA_ERR_DST_RING_SIZE);
- if (ret) {
- ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n");
- return ret;
+ if (ar->ab->hw_params.rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rx_mac_buf_ring[i],
+ HAL_RXDMA_BUF, 1,
+ dp->mac_id + i, 1024);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
+ i);
+ return ret;
+ }
+ }
}
- srng = &dp->rx_mon_status_refill_ring.refill_buf_ring;
- ret = ath11k_dp_srng_setup(ar->ab,
- srng,
- HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id,
- DP_RXDMA_MON_STATUS_RING_SIZE);
- if (ret) {
- ath11k_warn(ar->ab,
- "failed to setup rx_mon_status_refill_ring\n");
- return ret;
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
+ HAL_RXDMA_DST, 0, dp->mac_id + i,
+ DP_RXDMA_ERR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
+ return ret;
+ }
}
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ret = ath11k_dp_srng_setup(ar->ab,
+ srng,
+ HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
+ DP_RXDMA_MON_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup rx_mon_status_refill_ring %d\n", i);
+ return ret;
+ }
+ }
+
+ /* if rxdma1_enable is false, then it doesn't need
+ * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
+ * and rxdma_mon_desc_ring.
+ * init reap timer for QCA6390.
+ */
+ if (!ar->ab->hw_params.rxdma1_enable) {
+ //init mon status buffer reap timer
+ timer_setup(&ar->ab->mon_reap_timer,
+ ath11k_dp_service_mon_ring, 0);
+ return 0;
+ }
+
ret = ath11k_dp_srng_setup(ar->ab,
&dp->rxdma_mon_buf_ring.refill_buf_ring,
HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
@@ -1377,9 +1451,8 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
- if (ath11k_debug_is_extd_tx_stats_enabled(ar))
- ath11k_accumulate_per_peer_tx_stats(arsta,
- peer_stats, rate_idx);
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
+ ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
}
spin_unlock_bh(&ab->base_lock);
@@ -1421,7 +1494,7 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
}
spin_unlock_bh(&ar->data_lock);
- ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
+ ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
if (!ppdu_info)
return NULL;
@@ -1455,7 +1528,7 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
goto exit;
}
- if (ath11k_debug_is_pktlog_lite_mode_enabled(ar))
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
@@ -1577,11 +1650,23 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
resp->peer_map_ev.info1);
ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
peer_mac_h16, mac_addr);
+ ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP2:
+ vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
+ resp->peer_map_ev.info);
+ peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
+ resp->peer_map_ev.info);
+ peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
+ resp->peer_map_ev.info1);
+ ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+ peer_mac_h16, mac_addr);
ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
resp->peer_map_ev.info2);
ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
break;
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
resp->peer_unmap_ev.info);
ath11k_peer_unmap_event(ab, peer_id);
@@ -1590,7 +1675,7 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
ath11k_htt_pull_ppdu_stats(ab, skb);
break;
case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
- ath11k_dbg_htt_ext_stats_handler(ab, skb);
+ ath11k_debugfs_htt_ext_stats_handler(ab, skb);
break;
case HTT_T2H_MSG_TYPE_PKTLOG:
ath11k_htt_pktlog(ab, skb);
@@ -2065,8 +2150,6 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
mcast = is_multicast_ether_addr(hdr->addr1);
fill_crypto_hdr = mcast;
- is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
-
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
if (peer) {
@@ -2080,6 +2163,8 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
spin_unlock_bh(&ar->ab->base_lock);
err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
/* Clear per-MPDU flags while leaving per-PPDU flags intact */
rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
@@ -2282,6 +2367,9 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
!!(status->flag & RX_FLAG_MMIC_ERROR),
!!(status->flag & RX_FLAG_AMSDU_MORE));
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
+ msdu->data, msdu->len);
+
/* TODO: trace rx packet */
ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
@@ -2526,7 +2614,7 @@ try_again:
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ HAL_RX_BUF_RBM_SW3_BM);
}
ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
@@ -2608,7 +2696,7 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
struct dp_rxdma_ring *rx_ring,
- int *buf_id, gfp_t gfp)
+ int *buf_id)
{
struct sk_buff *skb;
dma_addr_t paddr;
@@ -2633,7 +2721,7 @@ static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
spin_lock_bh(&rx_ring->idr_lock);
*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
- rx_ring->bufs_max, gfp);
+ rx_ring->bufs_max, GFP_ATOMIC);
spin_unlock_bh(&rx_ring->idr_lock);
if (*buf_id < 0)
goto fail_dma_unmap;
@@ -2653,8 +2741,7 @@ fail_alloc_skb:
int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
- enum hal_rx_buf_return_buf_manager mgr,
- gfp_t gfp)
+ enum hal_rx_buf_return_buf_manager mgr)
{
struct hal_srng *srng;
u32 *desc;
@@ -2680,7 +2767,7 @@ int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
while (num_remain > 0) {
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
- &buf_id, gfp);
+ &buf_id);
if (!skb)
break;
paddr = ATH11K_SKB_RXCB(skb)->paddr;
@@ -2719,20 +2806,25 @@ fail_desc_get:
static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring;
+ struct ath11k *ar;
+ struct ath11k_pdev_dp *dp;
+ struct dp_rxdma_ring *rx_ring;
struct hal_srng *srng;
void *rx_mon_status_desc;
struct sk_buff *skb;
struct ath11k_skb_rxcb *rxcb;
struct hal_tlv_hdr *tlv;
u32 cookie;
- int buf_id;
+ int buf_id, srng_id;
dma_addr_t paddr;
u8 rbm;
int num_buffs_reaped = 0;
+ ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+ dp = &ar->dp;
+ srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
+ rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
+
srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
spin_lock_bh(&srng->lock);
@@ -2786,7 +2878,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
}
move_next:
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
- &buf_id, GFP_ATOMIC);
+ &buf_id);
if (!skb) {
ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
@@ -2813,7 +2905,7 @@ move_next:
int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
enum hal_rx_mon_status hal_status;
struct sk_buff *skb;
struct sk_buff_head skb_list;
@@ -2833,7 +2925,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
memset(&ppdu_info, 0, sizeof(ppdu_info));
ppdu_info.peer_id = HAL_INVALID_PEERID;
- if (ath11k_debug_is_pktlog_rx_stats_enabled(ar))
+ if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar))
trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
@@ -2861,7 +2953,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
arsta = (struct ath11k_sta *)peer->sta->drv_priv;
ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
- if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr))
+ if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
spin_unlock_bh(&ab->base_lock);
@@ -3599,7 +3691,7 @@ exit:
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ HAL_RX_BUF_RBM_SW3_BM);
}
return tot_n_bufs_reaped;
@@ -3709,8 +3801,7 @@ static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
* instead, it is good to drop such packets in mac80211
* after incrementing the replay counters.
*/
-
- /* fall through */
+ fallthrough;
default:
/* TODO: Review other errors and process them to mac80211
* as appropriate.
@@ -3820,7 +3911,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
int total_num_buffs_reaped = 0;
int ret, i;
- for (i = 0; i < MAX_RADIOS; i++)
+ for (i = 0; i < ab->num_radios; i++)
__skb_queue_head_init(&msdu_list[i]);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
@@ -3896,7 +3987,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ HAL_RX_BUF_RBM_SW3_BM);
}
rcu_read_lock();
@@ -3923,9 +4014,9 @@ done:
int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
- struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring;
- struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring;
+ struct ath11k *ar;
+ struct dp_srng *err_ring;
+ struct dp_rxdma_ring *rx_ring;
struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
struct hal_srng *srng;
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
@@ -3944,6 +4035,11 @@ int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
int i;
int buf_id;
+ ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+ err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
+ mac_id)];
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
srng = &ab->hal.srng_list[err_ring->ring_id];
spin_lock_bh(&srng->lock);
@@ -4000,7 +4096,7 @@ int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
if (num_buf_freed)
ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ HAL_RX_BUF_RBM_SW3_BM);
return budget - quota;
}
@@ -4097,6 +4193,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
struct ath11k *ar = ab->pdevs[mac_id].ar;
struct ath11k_pdev_dp *dp = &ar->dp;
u32 ring_id;
+ int i;
int ret;
ret = ath11k_dp_rx_pdev_srng_alloc(ar);
@@ -4119,14 +4216,33 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
return ret;
}
- ring_id = dp->rxdma_err_dst_ring.ring_id;
- ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST);
- if (ret) {
- ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n",
- ret);
- return ret;
+ if (ab->hw_params.rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i, HAL_RXDMA_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rxdma_err_dst_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i, HAL_RXDMA_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
}
+ if (!ab->hw_params.rxdma1_enable)
+ goto config_refill_ring;
+
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id, HAL_RXDMA_MONITOR_BUF);
@@ -4151,15 +4267,20 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
ret);
return ret;
}
- ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
- ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id,
- HAL_RXDMA_MONITOR_STATUS);
- if (ret) {
- ath11k_warn(ab,
- "failed to configure mon_status_refill_ring %d\n",
- ret);
- return ret;
+
+config_refill_ring:
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to configure mon_status_refill_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
}
+
return 0;
}
@@ -4185,8 +4306,13 @@ int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
void *src_srng_desc;
int ret = 0;
- dp_srng = &dp->rxdma_mon_desc_ring;
- hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+ if (ar->ab->hw_params.rxdma1_enable) {
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+ } else {
+ dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
+ hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+ }
ath11k_hal_srng_access_begin(ar->ab, hal_srng);
@@ -4210,16 +4336,16 @@ int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
static
void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
dma_addr_t *paddr, u32 *sw_cookie,
+ u8 *rbm,
void **pp_buf_addr_info)
{
struct hal_rx_msdu_link *msdu_link =
(struct hal_rx_msdu_link *)rx_msdu_link_desc;
struct ath11k_buffer_addr *buf_addr_info;
- u8 rbm = 0;
buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
- ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm);
+ ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
*pp_buf_addr_info = (void *)buf_addr_info;
}
@@ -4330,7 +4456,7 @@ static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
}
static u32
-ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
+ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
void *ring_entry, struct sk_buff **head_msdu,
struct sk_buff **tail_msdu, u32 *npackets,
u32 *ppdu_id)
@@ -4355,9 +4481,15 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
struct hal_reo_entrance_ring *ent_desc =
(struct hal_reo_entrance_ring *)ring_entry;
int buf_id;
+ u32 rx_link_buf_info[2];
+ u8 rbm;
+
+ if (!ar->ab->hw_params.rxdma1_enable)
+ rx_ring = &dp->rx_refill_buf_ring;
ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
- &sw_cookie, &p_last_buf_addr_info,
+ &sw_cookie,
+ &p_last_buf_addr_info, &rbm,
&msdu_cnt);
if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
@@ -4383,9 +4515,14 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
return rx_bufs_used;
}
- rx_msdu_link_desc =
- (void *)pmon->link_desc_banks[sw_cookie].vaddr +
- (paddr - pmon->link_desc_banks[sw_cookie].paddr);
+ if (ar->ab->hw_params.rxdma1_enable)
+ rx_msdu_link_desc =
+ (void *)pmon->link_desc_banks[sw_cookie].vaddr +
+ (paddr - pmon->link_desc_banks[sw_cookie].paddr);
+ else
+ rx_msdu_link_desc =
+ (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
+ (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
&num_msdus);
@@ -4481,15 +4618,22 @@ next_msdu:
spin_unlock_bh(&rx_ring->idr_lock);
}
+ ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
+
ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
- &sw_cookie,
+ &sw_cookie, &rbm,
&p_buf_addr_info);
- if (ath11k_dp_rx_monitor_link_desc_return(ar,
- p_last_buf_addr_info,
- dp->mac_id))
- ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
- "dp_rx_monitor_link_desc_return failed");
+ if (ar->ab->hw_params.rxdma1_enable) {
+ if (ath11k_dp_rx_monitor_link_desc_return(ar,
+ p_last_buf_addr_info,
+ dp->mac_id))
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dp_rx_monitor_link_desc_return failed");
+ } else {
+ ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
p_last_buf_addr_info = p_buf_addr_info;
@@ -4673,8 +4817,8 @@ mon_deliver_fail:
return -EINVAL;
}
-static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
- struct napi_struct *napi)
+static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
+ u32 quota, struct napi_struct *napi)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
@@ -4682,10 +4826,16 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
void *mon_dst_srng;
u32 ppdu_id;
u32 rx_bufs_used;
+ u32 ring_id;
struct ath11k_pdev_mon_stats *rx_mon_stats;
u32 npackets = 0;
- mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ if (ar->ab->hw_params.rxdma1_enable)
+ ring_id = dp->rxdma_mon_dst_ring.ring_id;
+ else
+ ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
+
+ mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
if (!mon_dst_srng) {
ath11k_warn(ar->ab,
@@ -4708,7 +4858,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
head_msdu = NULL;
tail_msdu = NULL;
- rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry,
+ rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
&head_msdu,
&tail_msdu,
&npackets, &ppdu_id);
@@ -4735,15 +4885,21 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
if (rx_bufs_used) {
rx_mon_stats->dest_ppdu_done++;
- ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
- &dp->rxdma_mon_buf_ring,
- rx_bufs_used,
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ if (ar->ab->hw_params.rxdma1_enable)
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rxdma_mon_buf_ring,
+ rx_bufs_used,
+ HAL_RX_BUF_RBM_SW3_BM);
+ else
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rx_refill_buf_ring,
+ rx_bufs_used,
+ HAL_RX_BUF_RBM_SW3_BM);
}
}
static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
- u32 quota,
+ int mac_id, u32 quota,
struct napi_struct *napi)
{
struct ath11k_pdev_dp *dp = &ar->dp;
@@ -4767,7 +4923,7 @@ static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
rx_mon_stats->status_ppdu_done++;
pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
- ath11k_dp_rx_mon_dest_process(ar, quota, napi);
+ ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
}
dev_kfree_skb_any(status_skb);
@@ -4777,15 +4933,15 @@ static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
int num_buffs_reaped = 0;
- num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget,
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget,
&pmon->rx_status_q);
if (num_buffs_reaped)
- ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi);
+ ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi);
return num_buffs_reaped;
}
@@ -4793,7 +4949,7 @@ static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
int ret = 0;
if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
@@ -4832,9 +4988,15 @@ int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
return ret;
}
+ /* if rxdma1_enable is false, no need to setup
+ * rxdma_mon_desc_ring.
+ */
+ if (!ar->ab->hw_params.rxdma1_enable)
+ return 0;
+
dp_srng = &dp->rxdma_mon_desc_ring;
n_link_desc = dp_srng->size /
- ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC);
+ ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
mon_desc_srng =
&ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
@@ -4848,6 +5010,7 @@ int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
pmon->mon_last_linkdesc_paddr = 0;
pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
spin_lock_init(&pmon->mon_lock);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
index 88bbcae14e34..fbea45f79c9b 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -74,8 +74,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
- enum hal_rx_buf_return_buf_manager mgr,
- gfp_t gfp);
+ enum hal_rx_buf_return_buf_manager mgr);
int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
const void *ptr, void *data),
@@ -87,8 +86,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
- enum hal_rx_buf_return_buf_manager mgr,
- gfp_t gfp);
+ enum hal_rx_buf_return_buf_manager mgr);
int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id);
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 1af76775b1a8..3d962eee4d61 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -6,6 +6,7 @@
#include "core.h"
#include "dp_tx.h"
#include "debug.h"
+#include "debugfs_sta.h"
#include "hw.h"
#include "peer.h"
@@ -13,8 +14,12 @@ static enum hal_tcl_encap_type
ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath11k_base *ab = arvif->ar->ab;
- if (tx_info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ return HAL_TCL_ENCAP_TYPE_RAW;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
return HAL_TCL_ENCAP_TYPE_ETHERNET;
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
@@ -79,6 +84,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
struct ath11k_dp *dp = &ab->dp;
struct hal_tx_info ti = {0};
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -93,7 +99,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
- if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
return -ENOTSUPP;
@@ -110,7 +116,12 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
tcl_ring_sel:
tcl_ring_retry = false;
- ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX;
+ /* For some chip, it can only use tcl0 to tx */
+ if (ar->ab->hw_params.tcl_0_only)
+ ti.ring_id = 0;
+ else
+ ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX;
+
ring_map |= BIT(ti.ring_id);
tx_ring = &dp->tx_ring[ti.ring_id];
@@ -137,11 +148,17 @@ tcl_ring_sel:
ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
ti.meta_data_flags = arvif->tcl_metadata;
- if (info->control.hw_key)
- ti.encrypt_type =
- ath11k_dp_tx_get_encrypt_type(info->control.hw_key->cipher);
- else
- ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
+ if (key) {
+ ti.encrypt_type =
+ ath11k_dp_tx_get_encrypt_type(key->cipher);
+
+ if (ieee80211_has_protected(hdr->frame_control))
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ } else {
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+ }
ti.addr_search_flags = arvif->hal_addr_search_flags;
ti.search_type = arvif->search_type;
@@ -151,7 +168,8 @@ tcl_ring_sel:
ti.bss_ast_hash = arvif->ast_hash;
ti.dscp_tid_tbl_idx = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
@@ -171,10 +189,11 @@ tcl_ring_sel:
ath11k_dp_tx_encap_nwifi(skb);
break;
case HAL_TCL_ENCAP_TYPE_RAW:
- /* TODO: for CHECKSUM_PARTIAL case in raw mode, HW checksum offload
- * is not applicable, hence manual checksum calculation using
- * skb_checksum_help() is needed
- */
+ if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ret = -EINVAL;
+ goto fail_remove_idr;
+ }
+ break;
case HAL_TCL_ENCAP_TYPE_ETHERNET:
/* no need to encap */
break;
@@ -221,7 +240,8 @@ tcl_ring_sel:
* checking this ring earlier for each pkt tx.
* Restart ring selection if some rings are not checked yet.
*/
- if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1)) {
+ if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1) &&
+ !ar->ab->hw_params.tcl_0_only) {
tcl_ring_retry = true;
ring_selector++;
}
@@ -234,8 +254,13 @@ tcl_ring_sel:
ath11k_hal_srng_access_end(ab, tcl_ring);
+ ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
+
spin_unlock_bh(&tcl_ring->lock);
+ ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ",
+ skb->data, skb->len);
+
atomic_inc(&ar->dp.num_tx_pending);
return 0;
@@ -346,7 +371,6 @@ ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
status_desc->info0);
-
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
@@ -436,7 +460,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
- if (ath11k_debug_is_extd_tx_stats_enabled(ar)) {
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
if (ar->last_ppdu_id == 0) {
ar->last_ppdu_id = ts->ppdu_id;
@@ -444,12 +468,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
ar->cached_ppdu_id == ar->last_ppdu_id) {
ar->cached_ppdu_id = ar->last_ppdu_id;
ar->cached_stats.is_ampdu = true;
- ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
+ ath11k_debugfs_sta_update_txcompl(ar, msdu, ts);
memset(&ar->cached_stats, 0,
sizeof(struct ath11k_per_peer_tx_stats));
} else {
ar->cached_stats.is_ampdu = false;
- ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
+ ath11k_debugfs_sta_update_txcompl(ar, msdu, ts);
memset(&ar->cached_stats, 0,
sizeof(struct ath11k_per_peer_tx_stats));
}
@@ -514,6 +538,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
u32 msdu_id;
u8 mac_id;
+ spin_lock_bh(&status_ring->lock);
+
ath11k_hal_srng_access_begin(ab, status_ring);
while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
@@ -533,6 +559,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
ath11k_hal_srng_access_end(ab, status_ring);
+ spin_unlock_bh(&status_ring->lock);
+
while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
struct hal_wbm_release_ring *tx_status;
u32 desc_id;
@@ -633,14 +661,28 @@ ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
switch (ring_type) {
case HAL_RXDMA_BUF:
lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
- if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
- lmac_ring_id_offset) ||
- ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
- lmac_ring_id_offset))) {
- ret = -EINVAL;
+
+ /* for QCA6390, host fills rx buffer to fw and fw fills to
+ * rxbuf ring for each rxdma
+ */
+ if (!ab->hw_params.rx_mac_buf_ring) {
+ if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
+ lmac_ring_id_offset) ||
+ ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
+ lmac_ring_id_offset))) {
+ ret = -EINVAL;
+ }
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ } else {
+ if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
+ *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
+ *htt_ring_type = HTT_SW_TO_SW_RING;
+ } else {
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ }
}
- *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_DST:
*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
@@ -720,7 +762,7 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT;
- ret = ath11k_hal_srng_get_entrysize(ring_type);
+ ret = ath11k_hal_srng_get_entrysize(ab, ring_type);
if (ret < 0)
goto err_free;
@@ -750,9 +792,9 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
HAL_ADDR_MSB_REG_SHIFT;
- cmd->ring_msi_addr_lo = 0;
- cmd->ring_msi_addr_hi = 0;
- cmd->msi_data = 0;
+ cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff;
+ cmd->ring_msi_addr_hi = ((uint64_t)(params.msi_addr) >> 32) & 0xffffffff;
+ cmd->msi_data = params.msi_data;
cmd->intr_info = FIELD_PREP(
HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
@@ -768,6 +810,15 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
params.low_threshold);
}
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
+ __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
+ cmd->msi_data);
+
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
+ ring_id, ring_type, cmd->intr_info, cmd->info2);
+
ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
@@ -832,24 +883,27 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
int len = sizeof(*cmd);
u8 pdev_mask;
int ret;
-
- skb = ath11k_htc_alloc_skb(ab, len);
- if (!skb)
- return -ENOMEM;
-
- skb_put(skb, len);
- cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
- cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
- HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
-
- pdev_mask = 1 << (ar->pdev_idx);
- cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
- cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
-
- ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
- if (ret) {
- dev_kfree_skb_any(skb);
- return ret;
+ int i;
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
+ cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
+
+ pdev_mask = 1 << (i + 1);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
}
return 0;
@@ -968,8 +1022,9 @@ ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
{
struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
- int ret = 0, ring_id = 0;
+ int ret = 0, ring_id = 0, i;
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
@@ -991,23 +1046,44 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
HTT_RX_MON_MO_DATA_FILTER_FLASG3;
}
- ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
- HAL_RXDMA_MONITOR_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
+ if (ab->hw_params.rxdma1_enable) {
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
+ HAL_RXDMA_MONITOR_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ } else if (!reset) {
+ /* set in monitor mode only */
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ dp->mac_id + i,
+ HAL_RXDMA_BUF,
+ 1024,
+ &tlv_filter);
+ }
+ }
+
if (ret)
return ret;
- ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
- if (!reset)
- tlv_filter.rx_filter =
- HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
- else
- tlv_filter = ath11k_mac_mon_status_filter_default;
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ if (!reset)
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ else
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ dp->mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ }
+
+ if (!ar->ab->hw_params.rxdma1_enable)
+ mod_timer(&ar->ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
- ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
- HAL_RXDMA_MONITOR_STATUS,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index d63785178afa..9904c0eb7587 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -8,7 +8,7 @@
#include "hal_desc.h"
#include "hif.h"
-static const struct hal_srng_config hw_srng_config[] = {
+static const struct hal_srng_config hw_srng_config_template[] = {
/* TODO: max_rings can populated by querying HW capabilities */
{ /* REO_DST */
.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
@@ -16,14 +16,6 @@ static const struct hal_srng_config hw_srng_config[] = {
.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP,
- },
- .reg_size = {
- HAL_REO2_RING_BASE_LSB - HAL_REO1_RING_BASE_LSB,
- HAL_REO2_RING_HP - HAL_REO1_RING_HP,
- },
.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_EXCEPTION */
@@ -36,10 +28,6 @@ static const struct hal_srng_config hw_srng_config[] = {
.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP,
- },
.max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_REINJECT */
@@ -48,10 +36,6 @@ static const struct hal_srng_config hw_srng_config[] = {
.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP,
- },
.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_CMD */
@@ -61,10 +45,6 @@ static const struct hal_srng_config hw_srng_config[] = {
sizeof(struct hal_reo_get_queue_stats)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP,
- },
.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_STATUS */
@@ -74,11 +54,6 @@ static const struct hal_srng_config hw_srng_config[] = {
sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_REO_REG +
- HAL_REO_STATUS_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP,
- },
.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
},
{ /* TCL_DATA */
@@ -88,14 +63,6 @@ static const struct hal_srng_config hw_srng_config[] = {
sizeof(struct hal_tcl_data_cmd)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP,
- },
- .reg_size = {
- HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB,
- HAL_TCL2_RING_HP - HAL_TCL1_RING_HP,
- },
.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
},
{ /* TCL_CMD */
@@ -105,10 +72,6 @@ static const struct hal_srng_config hw_srng_config[] = {
sizeof(struct hal_tcl_gse_cmd)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP,
- },
.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
},
{ /* TCL_STATUS */
@@ -118,11 +81,6 @@ static const struct hal_srng_config hw_srng_config[] = {
sizeof(struct hal_tcl_status_ring)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_TCL_REG +
- HAL_TCL_STATUS_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP,
- },
.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
},
{ /* CE_SRC */
@@ -344,7 +302,7 @@ static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
struct hal_srng *srng, int ring_num)
{
- const struct hal_srng_config *srng_config = &hw_srng_config[HAL_CE_DST];
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
u32 addr;
u32 val;
@@ -371,33 +329,33 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
ath11k_hif_write32(ab, reg_base +
- HAL_REO1_RING_MSI1_BASE_LSB_OFFSET,
- (u32)srng->msi_addr);
+ HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
+ srng->msi_addr);
val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
((u64)srng->msi_addr >>
HAL_ADDR_MSB_REG_SHIFT)) |
HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
ath11k_hif_write32(ab, reg_base +
- HAL_REO1_RING_MSI1_BASE_MSB_OFFSET, val);
+ HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val);
ath11k_hif_write32(ab,
- reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET,
+ reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab),
srng->msi_data);
}
- ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+ ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
((u64)srng->ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
(srng->entry_size * srng->num_entries));
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val);
val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val);
/* interrupt setup */
val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
@@ -408,21 +366,21 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
srng->entry_size));
ath11k_hif_write32(ab,
- reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET,
+ reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab),
val);
hp_addr = hal->rdp.paddr +
((unsigned long)srng->u.dst_ring.hp_addr -
(unsigned long)hal->rdp.vaddr);
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET,
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab),
hp_addr & HAL_ADDR_LSB_REG_MASK);
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET,
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab),
hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
/* Initialize head and tail pointers to indicate ring is empty */
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
ath11k_hif_write32(ab, reg_base, 0);
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0);
*srng->u.dst_ring.hp_addr = 0;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
@@ -435,7 +393,7 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
val |= HAL_REO1_RING_MISC_MSI_SWAP;
val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val);
}
static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
@@ -450,33 +408,33 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
ath11k_hif_write32(ab, reg_base +
- HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET,
- (u32)srng->msi_addr);
+ HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
+ srng->msi_addr);
val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
((u64)srng->msi_addr >>
HAL_ADDR_MSB_REG_SHIFT)) |
HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
ath11k_hif_write32(ab, reg_base +
- HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET,
+ HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
val);
ath11k_hif_write32(ab, reg_base +
- HAL_TCL1_RING_MSI1_DATA_OFFSET,
+ HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
srng->msi_data);
}
- ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+ ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
((u64)srng->ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
(srng->entry_size * srng->num_entries));
- ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
- ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
/* interrupt setup */
/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
@@ -490,7 +448,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
srng->entry_size));
ath11k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
val);
val = 0;
@@ -499,7 +457,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
srng->u.src_ring.low_threshold);
}
ath11k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
val);
if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
@@ -507,10 +465,10 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
((unsigned long)srng->u.src_ring.tp_addr -
(unsigned long)hal->rdp.vaddr);
ath11k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET,
+ reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
tp_addr & HAL_ADDR_LSB_REG_MASK);
ath11k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET,
+ reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
}
@@ -534,7 +492,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
- ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
}
static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
@@ -550,7 +508,7 @@ static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
enum hal_ring_type type,
int ring_num, int mac_id)
{
- const struct hal_srng_config *srng_config = &hw_srng_config[type];
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
int ring_id;
if (ring_num >= srng_config->max_rings) {
@@ -568,26 +526,26 @@ static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
return ring_id;
}
-int ath11k_hal_srng_get_entrysize(u32 ring_type)
+int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type)
{
- const struct hal_srng_config *srng_config;
+ struct hal_srng_config *srng_config;
if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
return -EINVAL;
- srng_config = &hw_srng_config[ring_type];
+ srng_config = &ab->hal.srng_config[ring_type];
return (srng_config->entry_size << 2);
}
-int ath11k_hal_srng_get_max_entries(u32 ring_type)
+int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type)
{
- const struct hal_srng_config *srng_config;
+ struct hal_srng_config *srng_config;
if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
return -EINVAL;
- srng_config = &hw_srng_config[ring_type];
+ srng_config = &ab->hal.srng_config[ring_type];
return (srng_config->max_size / srng_config->entry_size);
}
@@ -602,6 +560,8 @@ void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
params->intr_batch_cntr_thres_entries =
srng->intr_batch_cntr_thres_entries;
params->low_threshold = srng->u.src_ring.low_threshold;
+ params->msi_addr = srng->msi_addr;
+ params->msi_data = srng->msi_data;
params->flags = srng->flags;
}
@@ -1003,7 +963,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
struct hal_srng_params *params)
{
struct ath11k_hal *hal = &ab->hal;
- const struct hal_srng_config *srng_config = &hw_srng_config[type];
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
struct hal_srng *srng;
int ring_id;
u32 lmac_idx;
@@ -1027,6 +987,8 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
params->intr_batch_cntr_thres_entries;
srng->intr_timer_thres_us = params->intr_timer_thres_us;
srng->flags = params->flags;
+ srng->msi_addr = params->msi_addr;
+ srng->msi_data = params->msi_data;
srng->initialized = 1;
spin_lock_init(&srng->lock);
@@ -1058,8 +1020,16 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
lmac_idx);
srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
} else {
- srng->u.src_ring.hp_addr =
+ if (!ab->hw_params.supports_shadow_regs)
+ srng->u.src_ring.hp_addr =
(u32 *)((unsigned long)ab->mem + reg_base);
+ else
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem);
}
} else {
/* During initialization loop count in all the descriptors
@@ -1083,9 +1053,18 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
lmac_idx);
srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
} else {
- srng->u.dst_ring.tp_addr =
+ if (!ab->hw_params.supports_shadow_regs)
+ srng->u.dst_ring.tp_addr =
(u32 *)((unsigned long)ab->mem + reg_base +
- (HAL_REO1_RING_TP - HAL_REO1_RING_HP));
+ (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
+ else
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base + (HAL_REO1_RING_TP(ab) -
+ HAL_REO1_RING_HP(ab)),
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem);
}
}
@@ -1102,6 +1081,162 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
return ring_id;
}
+static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
+ int shadow_cfg_idx,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct hal_srng *srng;
+ struct ath11k_hal *hal = &ab->hal;
+ int ring_id;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
+ if (ring_id < 0)
+ return;
+
+ srng = &hal->srng_list[ring_id];
+
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+ else
+ srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+}
+
+int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+ int shadow_cfg_idx = hal->num_shadow_reg_configured;
+ u32 target_reg;
+
+ if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
+ return -EINVAL;
+
+ hal->num_shadow_reg_configured++;
+
+ target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
+ target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
+ ring_num;
+
+ /* For destination ring, shadow the TP */
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ target_reg += HAL_OFFSET_FROM_HP_TO_TP;
+
+ hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
+
+ /* update hp/tp addr to hal structure*/
+ ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
+ ring_num);
+
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
+ target_reg,
+ HAL_SHADOW_REG(shadow_cfg_idx),
+ shadow_cfg_idx,
+ ring_type, ring_num);
+
+ return 0;
+}
+
+void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ int ring_type, ring_num;
+
+ /* update all the non-CE srngs. */
+ for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ if (ring_type == HAL_CE_SRC ||
+ ring_type == HAL_CE_DST ||
+ ring_type == HAL_CE_DST_STATUS)
+ continue;
+
+ if (srng_config->lmac_ring)
+ continue;
+
+ for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
+ ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
+ }
+}
+
+void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ u32 **cfg, u32 *len)
+{
+ struct ath11k_hal *hal = &ab->hal;
+
+ *len = hal->num_shadow_reg_configured;
+ *cfg = hal->shadow_reg_addr;
+}
+
+void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* check whether the ring is emptry. Update the shadow
+ * HP only when then ring isn't' empty.
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
+ *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
+ ath11k_hal_srng_access_end(ab, srng);
+}
+
+static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_srng_config *s;
+
+ hal->srng_config = kmemdup(hw_srng_config_template,
+ sizeof(hw_srng_config_template),
+ GFP_KERNEL);
+ if (!hal->srng_config)
+ return -ENOMEM;
+
+ s = &hal->srng_config[HAL_REO_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab);
+ s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_EXCEPTION];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_REINJECT];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
+
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab);
+
+ s = &hal->srng_config[HAL_TCL_DATA];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+
+ return 0;
+}
+
int ath11k_hal_srng_init(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
@@ -1109,7 +1244,9 @@ int ath11k_hal_srng_init(struct ath11k_base *ab)
memset(hal, 0, sizeof(*hal));
- hal->srng_config = hw_srng_config;
+ ret = ath11k_hal_srng_create_config(ab);
+ if (ret)
+ goto err_hal;
ret = ath11k_hal_alloc_cont_rdp(ab);
if (ret)
@@ -1127,12 +1264,17 @@ err_free_cont_rdp:
err_hal:
return ret;
}
+EXPORT_SYMBOL(ath11k_hal_srng_init);
void ath11k_hal_srng_deinit(struct ath11k_base *ab)
{
+ struct ath11k_hal *hal = &ab->hal;
+
ath11k_hal_free_cont_rdp(ab);
ath11k_hal_free_cont_wrp(ab);
+ kfree(hal->srng_config);
}
+EXPORT_SYMBOL(ath11k_hal_srng_deinit);
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
{
@@ -1142,10 +1284,10 @@ void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
int i;
ath11k_err(ab, "Last interrupt received for each CE:\n");
- for (i = 0; i < CE_COUNT; i++) {
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
ce_pipe = &ab->ce.ce_pipe[i];
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 780a3e11b609..1f1b29cd0aa3 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -31,8 +31,12 @@ struct ath11k_base;
#define HAL_DSCP_TID_TBL_SIZE 24
/* calculate the register address from bar0 of shadow register x */
-#define SHADOW_BASE_ADDRESS 0x00003024
-#define SHADOW_NUM_REGISTERS 36
+#define HAL_SHADOW_BASE_ADDR 0x000008fc
+#define HAL_SHADOW_NUM_REGS 36
+#define HAL_HP_OFFSET_IN_REG_START 1
+#define HAL_OFFSET_FROM_HP_TO_TP 4
+
+#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
/* WCSS Relative address */
#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
@@ -46,40 +50,47 @@ struct ath11k_base;
/* SW2TCL(x) R0 ring configuration address */
#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014
#define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c
-#define HAL_TCL1_RING_BASE_LSB 0x00000510
-#define HAL_TCL1_RING_BASE_MSB 0x00000514
-#define HAL_TCL1_RING_ID 0x00000518
-#define HAL_TCL1_RING_MISC 0x00000520
-#define HAL_TCL1_RING_TP_ADDR_LSB 0x0000052c
-#define HAL_TCL1_RING_TP_ADDR_MSB 0x00000530
-#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 0x00000540
-#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 0x00000544
-#define HAL_TCL1_RING_MSI1_BASE_LSB 0x00000558
-#define HAL_TCL1_RING_MSI1_BASE_MSB 0x0000055c
-#define HAL_TCL1_RING_MSI1_DATA 0x00000560
-#define HAL_TCL2_RING_BASE_LSB 0x00000568
-#define HAL_TCL_RING_BASE_LSB 0x00000618
-
-#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET \
- (HAL_TCL1_RING_MSI1_BASE_LSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET \
- (HAL_TCL1_RING_MSI1_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MSI1_DATA_OFFSET \
- (HAL_TCL1_RING_MSI1_DATA - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_BASE_MSB_OFFSET \
- (HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_ID_OFFSET \
- (HAL_TCL1_RING_ID - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET \
- (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET \
- (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET \
- (HAL_TCL1_RING_TP_ADDR_LSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET \
- (HAL_TCL1_RING_TP_ADDR_MSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MISC_OFFSET \
- (HAL_TCL1_RING_MISC - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl1_ring_base_lsb
+#define HAL_TCL1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_tcl1_ring_base_msb
+#define HAL_TCL1_RING_ID(ab) ab->hw_params.regs->hal_tcl1_ring_id
+#define HAL_TCL1_RING_MISC(ab) ab->hw_params.regs->hal_tcl1_ring_misc
+#define HAL_TCL1_RING_TP_ADDR_LSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_tp_addr_lsb
+#define HAL_TCL1_RING_TP_ADDR_MSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_tp_addr_msb
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix0
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix1
+#define HAL_TCL1_RING_MSI1_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_base_lsb
+#define HAL_TCL1_RING_MSI1_BASE_MSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_base_msb
+#define HAL_TCL1_RING_MSI1_DATA(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_data
+#define HAL_TCL2_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl2_ring_base_lsb
+#define HAL_TCL_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl_ring_base_lsb
+
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_BASE_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_ID_OFFSET(ab) \
+ (HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MISC_OFFSET(ab) \
+ (HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB(ab))
/* SW2TCL(x) R2 ring pointers (head/tail) address */
#define HAL_TCL1_RING_HP 0x00002000
@@ -91,7 +102,8 @@ struct ath11k_base;
(HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
/* TCL STATUS ring address */
-#define HAL_TCL_STATUS_RING_BASE_LSB 0x00000720
+#define HAL_TCL_STATUS_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_tcl_status_ring_base_lsb
#define HAL_TCL_STATUS_RING_HP 0x00002030
/* REO2SW(x) R0 ring configuration address */
@@ -100,51 +112,63 @@ struct ath11k_base;
#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
-#define HAL_REO1_RING_BASE_LSB 0x0000029c
-#define HAL_REO1_RING_BASE_MSB 0x000002a0
-#define HAL_REO1_RING_ID 0x000002a4
-#define HAL_REO1_RING_MISC 0x000002ac
-#define HAL_REO1_RING_HP_ADDR_LSB 0x000002b0
-#define HAL_REO1_RING_HP_ADDR_MSB 0x000002b4
-#define HAL_REO1_RING_PRODUCER_INT_SETUP 0x000002c0
-#define HAL_REO1_RING_MSI1_BASE_LSB 0x000002e4
-#define HAL_REO1_RING_MSI1_BASE_MSB 0x000002e8
-#define HAL_REO1_RING_MSI1_DATA 0x000002ec
-#define HAL_REO2_RING_BASE_LSB 0x000002f4
-#define HAL_REO1_AGING_THRESH_IX_0 0x00000564
-#define HAL_REO1_AGING_THRESH_IX_1 0x00000568
-#define HAL_REO1_AGING_THRESH_IX_2 0x0000056c
-#define HAL_REO1_AGING_THRESH_IX_3 0x00000570
-
-#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET \
- (HAL_REO1_RING_MSI1_BASE_LSB - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET \
- (HAL_REO1_RING_MSI1_BASE_MSB - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_MSI1_DATA_OFFSET \
- (HAL_REO1_RING_MSI1_DATA - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_BASE_MSB_OFFSET \
- (HAL_REO1_RING_BASE_MSB - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_ID_OFFSET (HAL_REO1_RING_ID - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET \
- (HAL_REO1_RING_PRODUCER_INT_SETUP - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET \
- (HAL_REO1_RING_HP_ADDR_LSB - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET \
- (HAL_REO1_RING_HP_ADDR_MSB - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_MISC_OFFSET (HAL_REO1_RING_MISC - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo1_ring_base_lsb
+#define HAL_REO1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_reo1_ring_base_msb
+#define HAL_REO1_RING_ID(ab) ab->hw_params.regs->hal_reo1_ring_id
+#define HAL_REO1_RING_MISC(ab) ab->hw_params.regs->hal_reo1_ring_misc
+#define HAL_REO1_RING_HP_ADDR_LSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_hp_addr_lsb
+#define HAL_REO1_RING_HP_ADDR_MSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_hp_addr_msb
+#define HAL_REO1_RING_PRODUCER_INT_SETUP(ab) \
+ ab->hw_params.regs->hal_reo1_ring_producer_int_setup
+#define HAL_REO1_RING_MSI1_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_base_lsb
+#define HAL_REO1_RING_MSI1_BASE_MSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_base_msb
+#define HAL_REO1_RING_MSI1_DATA(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_data
+#define HAL_REO2_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo2_ring_base_lsb
+#define HAL_REO1_AGING_THRESH_IX_0(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_0
+#define HAL_REO1_AGING_THRESH_IX_1(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_1
+#define HAL_REO1_AGING_THRESH_IX_2(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_2
+#define HAL_REO1_AGING_THRESH_IX_3(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_3
+
+#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MSI1_DATA_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_BASE_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_ID_OFFSET(ab) (HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab) \
+ (HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab) \
+ (HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MISC_OFFSET(ab) \
+ (HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab))
/* REO2SW(x) R2 ring pointers (head/tail) address */
-#define HAL_REO1_RING_HP 0x00003038
-#define HAL_REO1_RING_TP 0x0000303c
-#define HAL_REO2_RING_HP 0x00003040
+#define HAL_REO1_RING_HP(ab) ab->hw_params.regs->hal_reo1_ring_hp
+#define HAL_REO1_RING_TP(ab) ab->hw_params.regs->hal_reo1_ring_tp
+#define HAL_REO2_RING_HP(ab) ab->hw_params.regs->hal_reo2_ring_hp
-#define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP)
+#define HAL_REO1_RING_TP_OFFSET(ab) (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab))
/* REO2TCL R0 ring configuration address */
-#define HAL_REO_TCL_RING_BASE_LSB 0x000003fc
+#define HAL_REO_TCL_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_tcl_ring_base_lsb
/* REO2TCL R2 ring pointer (head/tail) address */
-#define HAL_REO_TCL_RING_HP 0x00003058
+#define HAL_REO_TCL_RING_HP(ab) ab->hw_params.regs->hal_reo_tcl_ring_hp
/* REO CMD R0 address */
#define HAL_REO_CMD_RING_BASE_LSB 0x00000194
@@ -168,8 +192,9 @@ struct ath11k_base;
#define HAL_CE_DST_STATUS_RING_HP 0x00000408
/* REO status address */
-#define HAL_REO_STATUS_RING_BASE_LSB 0x00000504
-#define HAL_REO_STATUS_HP 0x00003070
+#define HAL_REO_STATUS_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_status_ring_base_lsb
+#define HAL_REO_STATUS_HP(ab) ab->hw_params.regs->hal_reo_status_hp
/* WBM Idle R0 address */
#define HAL_WBM_IDLE_LINK_RING_BASE_LSB 0x00000860
@@ -458,6 +483,8 @@ struct hal_srng_params {
u32 flags;
u32 max_buffer_len;
u32 low_threshold;
+ dma_addr_t msi_addr;
+ u32 msi_data;
/* Add more params as needed */
};
@@ -839,7 +866,7 @@ struct ath11k_hal {
struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
/* SRNG configuration table */
- const struct hal_srng_config *srng_config;
+ struct hal_srng_config *srng_config;
/* Remote pointer memory for HW/FW updates */
struct {
@@ -859,7 +886,7 @@ struct ath11k_hal {
u8 current_blk_index;
/* shadow register configuration */
- u32 shadow_reg_addr[SHADOW_NUM_REGISTERS];
+ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
int num_shadow_reg_configured;
};
@@ -885,8 +912,8 @@ void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
u8 byte_swap_data);
void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr);
u32 ath11k_hal_ce_dst_status_get_length(void *buf);
-int ath11k_hal_srng_get_entrysize(u32 ring_type);
-int ath11k_hal_srng_get_max_entries(u32 ring_type);
+int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type);
+int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type);
void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
struct hal_srng_params *params);
u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
@@ -912,5 +939,12 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
int ath11k_hal_srng_init(struct ath11k_base *ath11k);
void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
-
+void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ u32 **cfg, u32 *len);
+int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num);
+void ath11k_hal_srng_shadow_config(struct ath11k_base *ab);
+void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
+ struct hal_srng *srng);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 129c9e1efeb9..fac2396edf32 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -256,6 +256,8 @@ int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
break;
}
+ ath11k_dp_shadow_start_timer(ab, srng, &ab->dp.reo_cmd_timer);
+
out:
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
@@ -786,7 +788,7 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
memset(&params, 0, sizeof(params));
- entry_size = ath11k_hal_srng_get_entrysize(HAL_REO_CMD);
+ entry_size = ath11k_hal_srng_get_entrysize(ab, HAL_REO_CMD);
ath11k_hal_srng_get_params(ab, srng, &params);
entry = (u8 *)params.ring_base_vaddr;
@@ -813,13 +815,13 @@ void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map)
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
@@ -1195,7 +1197,7 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
u32 *sw_cookie, void **pp_buf_addr,
- u32 *msdu_cnt)
+ u8 *rbm, u32 *msdu_cnt)
{
struct hal_reo_entrance_ring *reo_ent_ring =
(struct hal_reo_entrance_ring *)rx_desc;
@@ -1217,6 +1219,8 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
*sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
buf_addr_info->info1);
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ buf_addr_info->info1);
*pp_buf_addr = (void *)buf_addr_info;
}
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index c436191ae1e8..d464a270c049 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -321,7 +321,7 @@ void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
dma_addr_t *paddr, u32 *desc_bank);
void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
dma_addr_t *paddr, u32 *sw_cookie,
- void **pp_buf_addr_info,
+ void **pp_buf_addr_info, u8 *rbm,
u32 *msdu_cnt);
enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
index 81937c29ffca..a755aa86c5de 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -141,7 +141,7 @@ void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab, struct hal_srng *srng)
memset(&params, 0, sizeof(params));
- entry_size = ath11k_hal_srng_get_entrysize(HAL_TCL_DATA);
+ entry_size = ath11k_hal_srng_get_entrysize(ab, HAL_TCL_DATA);
ath11k_hal_srng_get_params(ab, srng, &params);
desc = (u8 *)params.ring_base_vaddr;
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index 165f7e51c238..dbe5568916e8 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -3,6 +3,9 @@
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*/
+#ifndef _HIF_H_
+#define _HIF_H_
+
#include "core.h"
struct ath11k_hif_ops {
@@ -16,6 +19,11 @@ struct ath11k_hif_ops {
void (*power_down)(struct ath11k_base *sc);
int (*map_service_to_pipe)(struct ath11k_base *sc, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe);
+ int (*get_user_msi_vector)(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+ void (*get_msi_address)(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
};
static inline int ath11k_hif_start(struct ath11k_base *sc)
@@ -63,3 +71,25 @@ static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *sc, u16 ser
{
return sc->hif.ops->map_service_to_pipe(sc, service_id, ul_pipe, dl_pipe);
}
+
+static inline int ath11k_get_user_msi_vector(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ if (!ab->hif.ops->get_user_msi_vector)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->get_user_msi_vector(ab, user_name, num_vectors,
+ user_base_data,
+ base_vector);
+}
+
+static inline void ath11k_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ if (!ab->hif.ops->get_msi_address)
+ return;
+
+ ab->hif.ops->get_msi_address(ab, msi_addr_lo, msi_addr_hi);
+}
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
index ad13c648b679..6b57dc273e0b 100644
--- a/drivers/net/wireless/ath/ath11k/htc.c
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -50,15 +50,6 @@ static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab)
return skb;
}
-static inline void ath11k_htc_restore_tx_skb(struct ath11k_htc *htc,
- struct sk_buff *skb)
-{
- struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
-
- dma_unmap_single(htc->ab->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
- skb_pull(skb, sizeof(struct ath11k_htc_hdr));
-}
-
static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep,
struct sk_buff *skb)
{
@@ -478,7 +469,7 @@ int ath11k_htc_wait_target(struct ath11k_htc *htc)
if (!time_left) {
ath11k_warn(ab, "failed to receive control response completion, polling..\n");
- for (i = 0; i < CE_COUNT; i++)
+ for (i = 0; i < ab->hw_params.ce_count; i++)
ath11k_ce_per_engine_service(htc->ab, i);
time_left =
@@ -524,6 +515,12 @@ int ath11k_htc_wait_target(struct ath11k_htc *htc)
return -ECOMM;
}
+ /* For QCA6390, wmi endpoint uses 1 credit to avoid
+ * back-to-back write.
+ */
+ if (ab->hw_params.supports_shadow_regs)
+ htc->total_transmit_credits = 1;
+
ath11k_htc_setup_target_buffer_assignments(htc);
return 0;
@@ -748,7 +745,7 @@ int ath11k_htc_init(struct ath11k_base *ab)
htc->wmi_ep_count = 3;
break;
default:
- htc->wmi_ep_count = 3;
+ htc->wmi_ep_count = ab->hw_params.max_radios;
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
new file mode 100644
index 000000000000..11a411b76fe4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -0,0 +1,894 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#include "hw.h"
+#include "core.h"
+#include "ce.h"
+
+/* Map from pdev index to hw mac index */
+static u8 ath11k_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
+{
+ switch (pdev_idx) {
+ case 0:
+ return 0;
+ case 1:
+ return 2;
+ case 2:
+ return 1;
+ default:
+ return ATH11K_INVALID_HW_MAC_ID;
+ }
+}
+
+static u8 ath11k_hw_ipq6018_mac_from_pdev_id(int pdev_idx)
+{
+ return pdev_idx;
+}
+
+static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
+ struct target_resource_config *config)
+{
+ config->num_vdevs = 4;
+ config->num_peers = 16;
+ config->num_tids = 32;
+
+ config->num_offload_peers = 3;
+ config->num_offload_reorder_buffs = 3;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = 0;
+ config->num_mcast_table_elems = 0;
+ config->mcast2ucast_mode = 0;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = 0;
+ config->dma_burst_size = 0;
+ config->rx_skip_defrag_timeout_dup_detection_check = 0;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = 2;
+ config->num_msdu_desc = 0x400;
+ config->beacon_tx_offload_max_vdev = 2;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+
+ config->peer_map_unmap_v2_support = 0;
+ config->use_pdev_id = 1;
+ config->max_frag_entries = 0xa;
+ config->num_tdls_vdevs = 0x1;
+ config->num_tdls_conn_table_entries = 8;
+ config->beacon_tx_offload_max_vdev = 0x2;
+ config->num_multicast_filter_entries = 0x20;
+ config->num_wow_filters = 0x16;
+ config->num_keep_alive_pattern = 0;
+}
+
+static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
+ struct target_resource_config *config)
+{
+ config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
+
+ if (ab->num_radios == 2) {
+ config->num_peers = TARGET_NUM_PEERS(DBS);
+ config->num_tids = TARGET_NUM_TIDS(DBS);
+ } else if (ab->num_radios == 3) {
+ config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
+ config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
+ } else {
+ /* Control should not reach here */
+ config->num_peers = TARGET_NUM_PEERS(SINGLE);
+ config->num_tids = TARGET_NUM_TIDS(SINGLE);
+ }
+ config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+ config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
+ else
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+ config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+ config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+ config->dma_burst_size = TARGET_DMA_BURST_SIZE;
+ config->rx_skip_defrag_timeout_dup_detection_check =
+ TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+ config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
+ config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+ config->peer_map_unmap_v2_support = 1;
+ config->twt_ap_pdev_count = 2;
+ config->twt_ap_sta_count = 1000;
+}
+
+static int ath11k_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static int ath11k_hw_mac_id_to_srng_id_ipq8074(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static int ath11k_hw_mac_id_to_pdev_id_qca6390(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static int ath11k_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+const struct ath11k_hw_ops ipq8074_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+};
+
+const struct ath11k_hw_ops ipq6018_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+};
+
+const struct ath11k_hw_ops qca6390_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+};
+
+#define ATH11K_TX_RING_MASK_0 0x1
+#define ATH11K_TX_RING_MASK_1 0x2
+#define ATH11K_TX_RING_MASK_2 0x4
+
+#define ATH11K_RX_RING_MASK_0 0x1
+#define ATH11K_RX_RING_MASK_1 0x2
+#define ATH11K_RX_RING_MASK_2 0x4
+#define ATH11K_RX_RING_MASK_3 0x8
+
+#define ATH11K_RX_ERR_RING_MASK_0 0x1
+
+#define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
+
+#define ATH11K_REO_STATUS_RING_MASK_0 0x1
+
+#define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
+#define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
+#define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
+
+#define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
+#define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
+#define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
+
+#define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
+#define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
+#define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ ATH11K_HOST2RXDMA_RING_MASK_0,
+ ATH11K_HOST2RXDMA_RING_MASK_1,
+ ATH11K_HOST2RXDMA_RING_MASK_2,
+ },
+};
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ },
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE10 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE11 Not used */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(7),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(9),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(7),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE 9, 10, 11 are used by MHI driver */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[] = {
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+const struct ath11k_hw_regs ipq8074_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000510,
+ .hal_tcl1_ring_base_msb = 0x00000514,
+ .hal_tcl1_ring_id = 0x00000518,
+ .hal_tcl1_ring_misc = 0x00000520,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000052c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000530,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000540,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000544,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000558,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000055c,
+ .hal_tcl1_ring_msi1_data = 0x00000560,
+ .hal_tcl2_ring_base_lsb = 0x00000568,
+ .hal_tcl_ring_base_lsb = 0x00000618,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000720,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x0000029c,
+ .hal_reo1_ring_base_msb = 0x000002a0,
+ .hal_reo1_ring_id = 0x000002a4,
+ .hal_reo1_ring_misc = 0x000002ac,
+ .hal_reo1_ring_hp_addr_lsb = 0x000002b0,
+ .hal_reo1_ring_hp_addr_msb = 0x000002b4,
+ .hal_reo1_ring_producer_int_setup = 0x000002c0,
+ .hal_reo1_ring_msi1_base_lsb = 0x000002e4,
+ .hal_reo1_ring_msi1_base_msb = 0x000002e8,
+ .hal_reo1_ring_msi1_data = 0x000002ec,
+ .hal_reo2_ring_base_lsb = 0x000002f4,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003038,
+ .hal_reo1_ring_tp = 0x0000303c,
+ .hal_reo2_ring_hp = 0x00003040,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+};
+
+const struct ath11k_hw_regs qca6390_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000684,
+ .hal_tcl1_ring_base_msb = 0x00000688,
+ .hal_tcl1_ring_id = 0x0000068c,
+ .hal_tcl1_ring_misc = 0x00000694,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006a0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006a4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006b8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006cc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006d0,
+ .hal_tcl1_ring_msi1_data = 0x000006d4,
+ .hal_tcl2_ring_base_lsb = 0x000006dc,
+ .hal_tcl_ring_base_lsb = 0x0000078c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000894,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x00000244,
+ .hal_reo1_ring_base_msb = 0x00000248,
+ .hal_reo1_ring_id = 0x0000024c,
+ .hal_reo1_ring_misc = 0x00000254,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000258,
+ .hal_reo1_ring_hp_addr_msb = 0x0000025c,
+ .hal_reo1_ring_producer_int_setup = 0x00000268,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000028c,
+ .hal_reo1_ring_msi1_base_msb = 0x00000290,
+ .hal_reo1_ring_msi1_data = 0x00000294,
+ .hal_reo2_ring_base_lsb = 0x0000029c,
+ .hal_reo1_aging_thresh_ix_0 = 0x0000050c,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000510,
+ .hal_reo1_aging_thresh_ix_2 = 0x00000514,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000518,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003030,
+ .hal_reo1_ring_tp = 0x00003034,
+ .hal_reo2_ring_hp = 0x00003038,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003a4,
+ .hal_reo_tcl_ring_hp = 0x00003050,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x000004ac,
+ .hal_reo_status_hp = 0x00003068,
+};
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index dc4434aefbbe..1dda4257e6d7 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -6,6 +6,8 @@
#ifndef ATH11K_HW_H
#define ATH11K_HW_H
+#include "wmi.h"
+
/* Target configuration defines */
/* Num VDEVS per radio */
@@ -68,15 +70,12 @@
#define ATH11K_FW_DIR "ath11k"
-/* IPQ8074 definitions */
-#define IPQ8074_FW_DIR "IPQ8074"
-#define IPQ8074_MAX_BOARD_DATA_SZ (256 * 1024)
-#define IPQ8074_MAX_CAL_DATA_SZ IPQ8074_MAX_BOARD_DATA_SZ
-
#define ATH11K_BOARD_MAGIC "QCA-ATH11K-BOARD"
#define ATH11K_BOARD_API2_FILE "board-2.bin"
-#define ATH11K_DEFAULT_BOARD_FILE "bdwlan.bin"
+#define ATH11K_DEFAULT_BOARD_FILE "board.bin"
#define ATH11K_DEFAULT_CAL_FILE "caldata.bin"
+#define ATH11K_AMSS_FILE "amss.bin"
+#define ATH11K_M3_FILE "m3.bin"
enum ath11k_hw_rate_cck {
ATH11K_HW_RATE_CCK_LP_11M = 0,
@@ -104,15 +103,109 @@ enum ath11k_bus {
ATH11K_BUS_PCI,
};
+#define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
+
+struct ath11k_hw_ring_mask {
+ u8 tx[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_mon_status[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_err[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_wbm_rel[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 reo_status[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rxdma2host[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+};
+
struct ath11k_hw_params {
const char *name;
+ u16 hw_rev;
+ u8 max_radios;
+ u32 bdf_addr;
+
struct {
const char *dir;
size_t board_size;
size_t cal_size;
} fw;
+
+ const struct ath11k_hw_ops *hw_ops;
+ const struct ath11k_hw_ring_mask *ring_mask;
+
+ bool internal_sleep_clock;
+
+ const struct ath11k_hw_regs *regs;
+ const struct ce_attr *host_ce_config;
+ u32 ce_count;
+ const struct ce_pipe_config *target_ce_config;
+ u32 target_ce_count;
+ const struct service_to_pipe *svc_to_ce_map;
+ u32 svc_to_ce_map_len;
+
+ bool single_pdev_only;
+
+ /* For example on QCA6390 struct
+ * wmi_init_cmd_param::band_to_mac_config needs to be false as the
+ * firmware creates the mapping.
+ */
+ bool needs_band_to_mac;
+
+ bool rxdma1_enable;
+ int num_rxmda_per_pdev;
+ bool rx_mac_buf_ring;
+ bool vdev_start_delay;
+ bool htt_peer_map_v2;
+ bool tcl_0_only;
+ u8 spectral_fft_sz;
+
+ u16 interface_modes;
+ bool supports_monitor;
+ bool supports_shadow_regs;
+ bool idle_ps;
+};
+
+struct ath11k_hw_ops {
+ u8 (*get_hw_mac_from_pdev_id)(int pdev_id);
+ void (*wmi_init_config)(struct ath11k_base *ab,
+ struct target_resource_config *config);
+ int (*mac_id_to_pdev_id)(struct ath11k_hw_params *hw, int mac_id);
+ int (*mac_id_to_srng_id)(struct ath11k_hw_params *hw, int mac_id);
};
+extern const struct ath11k_hw_ops ipq8074_ops;
+extern const struct ath11k_hw_ops ipq6018_ops;
+extern const struct ath11k_hw_ops qca6390_ops;
+
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
+
+static inline
+int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw,
+ int pdev_idx)
+{
+ if (hw->hw_ops->get_hw_mac_from_pdev_id)
+ return hw->hw_ops->get_hw_mac_from_pdev_id(pdev_idx);
+
+ return 0;
+}
+
+static inline int ath11k_hw_mac_id_to_pdev_id(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_pdev_id)
+ return hw->hw_ops->mac_id_to_pdev_id(hw, mac_id);
+
+ return 0;
+}
+
+static inline int ath11k_hw_mac_id_to_srng_id(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_srng_id)
+ return hw->hw_ops->mac_id_to_srng_id(hw, mac_id);
+
+ return 0;
+}
+
struct ath11k_fw_ie {
__le32 id;
__le32 len;
@@ -130,4 +223,51 @@ enum ath11k_bd_ie_type {
ATH11K_BD_IE_BOARD_EXT = 1,
};
+struct ath11k_hw_regs {
+ u32 hal_tcl1_ring_base_lsb;
+ u32 hal_tcl1_ring_base_msb;
+ u32 hal_tcl1_ring_id;
+ u32 hal_tcl1_ring_misc;
+ u32 hal_tcl1_ring_tp_addr_lsb;
+ u32 hal_tcl1_ring_tp_addr_msb;
+ u32 hal_tcl1_ring_consumer_int_setup_ix0;
+ u32 hal_tcl1_ring_consumer_int_setup_ix1;
+ u32 hal_tcl1_ring_msi1_base_lsb;
+ u32 hal_tcl1_ring_msi1_base_msb;
+ u32 hal_tcl1_ring_msi1_data;
+ u32 hal_tcl2_ring_base_lsb;
+ u32 hal_tcl_ring_base_lsb;
+
+ u32 hal_tcl_status_ring_base_lsb;
+
+ u32 hal_reo1_ring_base_lsb;
+ u32 hal_reo1_ring_base_msb;
+ u32 hal_reo1_ring_id;
+ u32 hal_reo1_ring_misc;
+ u32 hal_reo1_ring_hp_addr_lsb;
+ u32 hal_reo1_ring_hp_addr_msb;
+ u32 hal_reo1_ring_producer_int_setup;
+ u32 hal_reo1_ring_msi1_base_lsb;
+ u32 hal_reo1_ring_msi1_base_msb;
+ u32 hal_reo1_ring_msi1_data;
+ u32 hal_reo2_ring_base_lsb;
+ u32 hal_reo1_aging_thresh_ix_0;
+ u32 hal_reo1_aging_thresh_ix_1;
+ u32 hal_reo1_aging_thresh_ix_2;
+ u32 hal_reo1_aging_thresh_ix_3;
+
+ u32 hal_reo1_ring_hp;
+ u32 hal_reo1_ring_tp;
+ u32 hal_reo2_ring_hp;
+
+ u32 hal_reo_tcl_ring_base_lsb;
+ u32 hal_reo_tcl_ring_hp;
+
+ u32 hal_reo_status_ring_base_lsb;
+ u32 hal_reo_status_hp;
+};
+
+extern const struct ath11k_hw_regs ipq8074_regs;
+extern const struct ath11k_hw_regs qca6390_regs;
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 94ae2b9ea663..7f8dd47d2333 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -14,6 +14,7 @@
#include "dp_rx.h"
#include "testmode.h"
#include "peer.h"
+#include "debugfs_sta.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -42,12 +43,6 @@
.max_power = 30, \
}
-/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
-static unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
-module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
-MODULE_PARM_DESC(frame_mode,
- "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
-
static const struct ieee80211_channel ath11k_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
@@ -244,6 +239,9 @@ static const u32 ath11k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
+static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw)
{
u8 ret = 0;
@@ -521,6 +519,11 @@ struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id)
int i;
struct ath11k_pdev *pdev;
+ if (ab->hw_params.single_pdev_only) {
+ pdev = rcu_dereference(ab->pdevs_active[0]);
+ return pdev ? pdev->ar : NULL;
+ }
+
if (WARN_ON(pdev_id > ab->num_radios))
return NULL;
@@ -755,21 +758,12 @@ static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id)
static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
{
- struct ath11k *ar = hw->priv;
- int ret = 0;
-
/* mac80211 requires this op to be present and that's why
* there's an empty function, this can be extended when
* required.
*/
- mutex_lock(&ar->conf_mutex);
-
- /* TODO: Handle configuration changes as appropriate */
-
- mutex_unlock(&ar->conf_mutex);
-
- return ret;
+ return 0;
}
static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
@@ -1138,13 +1132,13 @@ ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
idx_limit = -1;
switch (idx_limit) {
- case 0: /* fall through */
- case 1: /* fall through */
- case 2: /* fall through */
- case 3: /* fall through */
- case 4: /* fall through */
- case 5: /* fall through */
- case 6: /* fall through */
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
case 7:
mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
break;
@@ -1156,7 +1150,7 @@ ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
break;
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case -1:
mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
break;
@@ -1268,6 +1262,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
struct peer_assoc_params *arg)
{
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ u8 ampdu_factor;
u16 v;
if (!he_cap->has_he)
@@ -1284,6 +1279,30 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
/* the top most byte is used to indicate BSS color info */
arg->peer_he_ops &= 0xffffff;
+ /* As per section 26.6.1 11ax Draft5.0, if the Max AMPDU Exponent Extension
+ * in HE cap is zero, use the arg->peer_max_mpdu as calculated while parsing
+ * VHT caps(if VHT caps is present) or HT caps (if VHT caps is not present).
+ *
+ * For non-zero value of Max AMPDU Extponent Extension in HE MAC caps,
+ * if a HE STA sends VHT cap and HE cap IE in assoc request then, use
+ * MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length.
+ * If a HE STA that does not send VHT cap, but HE and HT cap in assoc
+ * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
+ * length.
+ */
+ ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] &
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >>
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_SHIFT;
+
+ if (ampdu_factor) {
+ if (sta->vht_cap.vht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ else if (sta->ht_cap.ht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ }
+
if (he_cap->he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
int bit = 7;
@@ -1339,7 +1358,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
arg->peer_he_mcs_count++;
- /* fall through */
+ fallthrough;
default:
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
@@ -2114,7 +2133,7 @@ void __ath11k_mac_scan_finish(struct ath11k *ar)
} else if (ar->scan.roc_notify) {
ieee80211_remain_on_channel_expired(ar->hw);
}
- /* fall through */
+ fallthrough;
case ATH11K_SCAN_STARTING:
ar->scan.state = ATH11K_SCAN_IDLE;
ar->scan_channel = NULL;
@@ -2372,6 +2391,9 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
reinit_completion(&ar->install_key_done);
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 0;
+
if (cmd == DISABLE_KEY) {
/* TODO: Check if FW expects value other than NONE for del */
/* arg.key_cipher = WMI_CIPHER_NONE; */
@@ -2403,8 +2425,13 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
return -EOPNOTSUPP;
}
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ar->ab->dev_flags))
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+
install:
ret = ath11k_wmi_vdev_install_key(arvif->ar, &arg);
+
if (ret)
return ret;
@@ -2476,6 +2503,9 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
return 1;
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 1;
+
if (key->keyidx > WMI_MAX_KEY_INDEX)
return -ENOSPC;
@@ -2929,7 +2959,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
- if (ath11k_debug_is_extd_tx_stats_enabled(ar)) {
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
if (!arsta->tx_stats) {
ret = -ENOMEM;
@@ -2955,6 +2985,15 @@ static int ath11k_mac_station_add(struct ath11k *ar,
goto free_tx_stats;
}
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_start_vdev_delay(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
+ goto free_tx_stats;
+ }
+ }
+
return 0;
free_tx_stats:
@@ -3039,10 +3078,6 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
if (ret)
ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
sta->addr);
- else
- ath11k_info(ar->ab,
- "Station %pM moved to assoc state\n",
- sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -3052,10 +3087,6 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
if (ret)
ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
sta->addr);
- else
- ath11k_info(ar->ab,
- "Station %pM moved to disassociated state\n",
- sta->addr);
}
mutex_unlock(&ar->conf_mutex);
@@ -3898,7 +3929,7 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
return -ENOSPC;
info = IEEE80211_SKB_CB(skb);
- if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)) {
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
@@ -4025,7 +4056,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
bool is_prb_rsp;
int ret;
- if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
@@ -4057,18 +4088,25 @@ void ath11k_mac_drain_tx(struct ath11k *ar)
static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
{
struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct ath11k_base *ab = ar->ab;
+ int i, ret = 0;
u32 ring_id;
if (enable) {
tlv_filter = ath11k_mac_mon_status_filter_default;
- tlv_filter.rx_filter = ath11k_debug_rx_filter(ar);
+ tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
}
- ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE,
+ &tlv_filter);
+ }
- return ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
- HAL_RXDMA_MONITOR_STATUS,
- DP_RX_BUFFER_SIZE, &tlv_filter);
+ return ret;
}
static int ath11k_mac_op_start(struct ieee80211_hw *hw)
@@ -4170,6 +4208,15 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
&ab->pdevs[ar->pdev_idx]);
+ /* allow device to enter IMPS */
+ if (ab->hw_params.idle_ps) {
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ 1, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to enable idle ps: %d\n", ret);
+ goto err;
+ }
+ }
return 0;
err:
@@ -4304,6 +4351,37 @@ static int ath11k_set_he_mu_sounding_mode(struct ath11k *ar,
return ret;
}
+static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 param_id, param_value;
+ int ret;
+
+ param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
+ if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET ||
+ (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP))
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+
+ if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH11K_HW_TXRX_RAW;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ }
+}
+
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -4313,7 +4391,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct vdev_create_params vdev_param = {0};
struct peer_create_params peer_param;
u32 param_id, param_value;
- int hw_encap = 0;
u16 nss;
int i;
int ret;
@@ -4368,7 +4445,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_MESH_POINT:
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP;
break;
@@ -4407,30 +4484,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
- param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
- if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET)
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_AP:
- hw_encap = 1;
- break;
- default:
- break;
- }
-
- if (ieee80211_set_hw_80211_encap(vif, hw_encap))
- param_value = ATH11K_HW_TXRX_ETHERNET;
- else
- param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
-
- ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- param_id, param_value);
- if (ret) {
- ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
- arvif->vdev_id, ret);
- goto err_vdev_del;
- }
+ ath11k_mac_op_update_vif_offload(hw, vif);
nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
@@ -4649,6 +4703,10 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
ath11k_warn(ar->ab,
"fail to set monitor filter: %d\n", ret);
}
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n",
+ changed_flags, *total_flags, reset_flag);
+
mutex_unlock(&ar->conf_mutex);
}
@@ -5112,6 +5170,39 @@ unlock:
mutex_unlock(&ar->conf_mutex);
}
+static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ if (WARN_ON(arvif->is_started))
+ return -EBUSY;
+
+ ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx.def);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ arvif->chanctx.def.chan->center_freq, ret);
+ return ret;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ab, "failed put monitor up: %d\n", ret);
+ return ret;
+ }
+ }
+
+ arvif->is_started = true;
+
+ /* TODO: Setup ps and cts/rts protection */
+ return 0;
+}
+
static int
ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -5121,6 +5212,7 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
int ret;
+ struct peer_create_params param;
mutex_lock(&ar->conf_mutex);
@@ -5128,11 +5220,27 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
"mac chanctx assign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
+ /* for QCA6390 bss peer must be created before vdev_start */
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+ memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+ }
+
if (WARN_ON(arvif->is_started)) {
mutex_unlock(&ar->conf_mutex);
return -EBUSY;
}
+ if (ab->hw_params.vdev_start_delay) {
+ param.vdev_id = arvif->vdev_id;
+ param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ param.peer_addr = ar->mac_addr;
+ ret = ath11k_peer_create(ar, arvif, NULL, &param);
+ }
+
ret = ath11k_mac_vdev_start(arvif, &ctx->def);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
@@ -5178,6 +5286,11 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
WARN_ON(!arvif->is_started);
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
+ ath11k_peer_find_by_addr(ab, ar->mac_addr))
+ ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+
ret = ath11k_mac_vdev_stop(arvif);
if (ret)
ath11k_warn(ab, "failed to stop vdev %i: %d\n",
@@ -5185,6 +5298,10 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+
mutex_unlock(&ar->conf_mutex);
}
@@ -5753,6 +5870,7 @@ static const struct ieee80211_ops ath11k_ops = {
.reconfig_complete = ath11k_mac_op_reconfig_complete,
.add_interface = ath11k_mac_op_add_interface,
.remove_interface = ath11k_mac_op_remove_interface,
+ .update_vif_offload = ath11k_mac_op_update_vif_offload,
.config = ath11k_mac_op_config,
.bss_info_changed = ath11k_mac_op_bss_info_changed,
.configure_filter = ath11k_mac_op_configure_filter,
@@ -5780,39 +5898,10 @@ static const struct ieee80211_ops ath11k_ops = {
.sta_statistics = ath11k_mac_op_sta_statistics,
CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
#ifdef CONFIG_ATH11K_DEBUGFS
- .sta_add_debugfs = ath11k_sta_add_debugfs,
+ .sta_add_debugfs = ath11k_debugfs_sta_op_add,
#endif
};
-static const struct ieee80211_iface_limit ath11k_if_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 16,
- .types = BIT(NL80211_IFTYPE_AP)
-#ifdef CONFIG_MAC80211_MESH
- | BIT(NL80211_IFTYPE_MESH_POINT)
-#endif
- },
-};
-
-static const struct ieee80211_iface_combination ath11k_if_comb[] = {
- {
- .limits = ath11k_if_limits,
- .n_limits = ARRAY_SIZE(ath11k_if_limits),
- .max_interfaces = 16,
- .num_different_channels = 1,
- .beacon_int_infra_match = true,
- .beacon_int_min_gcd = 100,
- .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80),
- },
-};
-
static void ath11k_mac_update_ch_list(struct ath11k *ar,
struct ieee80211_supported_band *band,
u32 freq_low, u32 freq_high)
@@ -5829,12 +5918,29 @@ static void ath11k_mac_update_ch_list(struct ath11k *ar,
}
}
+static u32 ath11k_get_phy_id(struct ath11k *ar, u32 band)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+
+ if (band == WMI_HOST_WLAN_2G_CAP)
+ return pdev_cap->band[NL80211_BAND_2GHZ].phy_id;
+
+ if (band == WMI_HOST_WLAN_5G_CAP)
+ return pdev_cap->band[NL80211_BAND_5GHZ].phy_id;
+
+ ath11k_warn(ar->ab, "unsupported phy cap:%d\n", band);
+
+ return 0;
+}
+
static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
u32 supported_bands)
{
struct ieee80211_supported_band *band;
struct ath11k_hal_reg_capabilities_ext *reg_cap;
void *channels;
+ u32 phy_id;
BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) +
ARRAY_SIZE(ath11k_5ghz_channels) +
@@ -5857,6 +5963,11 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
band->n_bitrates = ath11k_g_rates_size;
band->bitrates = ath11k_g_rates;
ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
+ reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
ath11k_mac_update_ch_list(ar, band,
reg_cap->low_2ghz_chan,
reg_cap->high_2ghz_chan);
@@ -5901,6 +6012,12 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
band->n_bitrates = ath11k_a_rates_size;
band->bitrates = ath11k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+ reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+
ath11k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
@@ -5910,6 +6027,52 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
return 0;
}
+static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_iface_combination *combinations;
+ struct ieee80211_iface_limit *limits;
+ int n_limits;
+
+ combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+ if (!combinations)
+ return -ENOMEM;
+
+ n_limits = 2;
+
+ limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
+ if (!limits) {
+ kfree(combinations);
+ return -ENOMEM;
+ }
+
+ limits[0].max = 1;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+
+ limits[1].max = 16;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = n_limits;
+ combinations[0].max_interfaces = 16;
+ combinations[0].num_different_channels = 1;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80);
+
+ ar->hw->wiphy->iface_combinations = combinations;
+ ar->hw->wiphy->n_iface_combinations = 1;
+
+ return 0;
+}
+
static const u8 ath11k_if_types_ext_capa[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
@@ -5960,6 +6123,9 @@ static void __ath11k_mac_unregister(struct ath11k *ar)
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
SET_IEEE80211_DEV(ar->hw, NULL);
}
@@ -6006,17 +6172,21 @@ static int __ath11k_mac_register(struct ath11k *ar)
ret = ath11k_mac_setup_channels_rates(ar,
cap->supported_bands);
if (ret)
- goto err_free;
+ goto err;
ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
ath11k_mac_setup_he_cap(ar, cap);
+ ret = ath11k_mac_setup_iface_combinations(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
+ goto err_free_channels;
+ }
+
ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
- ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT);
+ ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes;
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
@@ -6026,7 +6196,6 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(ar->hw, AP_LINK_PS);
ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
- ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
@@ -6034,6 +6203,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
if (ht_cap & WMI_HT_CAP_ENABLED) {
ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
@@ -6078,9 +6248,6 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->vif_data_size = sizeof(struct ath11k_vif);
ar->hw->sta_data_size = sizeof(struct ath11k_sta);
- ar->hw->wiphy->iface_combinations = ath11k_if_comb;
- ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath11k_if_comb);
-
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
@@ -6093,34 +6260,51 @@ static int __ath11k_mac_register(struct ath11k *ar)
ath11k_reg_init(ar);
- /* advertise HW checksum offload capabilities */
- ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ }
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
- goto err_free;
+ goto err_free_if_combs;
}
+ if (!ab->hw_params.supports_monitor)
+ /* There's a race between calling ieee80211_register_hw()
+ * and here where the monitor mode is enabled for a little
+ * while. But that time is so short and in practise it make
+ * a difference in real life.
+ */
+ ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
+
/* Apply the regd received during initialization */
ret = ath11k_regd_update(ar, true);
if (ret) {
ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
- goto err_free;
+ goto err_free_if_combs;
}
- ret = ath11k_debug_register(ar);
+ ret = ath11k_debugfs_register(ar);
if (ret) {
ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
- goto err_free;
+ goto err_free_if_combs;
}
return 0;
-err_free:
+err_free_if_combs:
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
+err_free_channels:
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+err:
SET_IEEE80211_DEV(ar->hw, NULL);
return ret;
}
@@ -6194,7 +6378,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
ar->ab = ab;
ar->pdev = pdev;
ar->pdev_idx = i;
- ar->lmac_id = ath11k_core_get_hw_mac_id(ab, i);
+ ar->lmac_id = ath11k_hw_get_mac_from_pdev_id(&ab->hw_params, i);
ar->wmi = &ab->wmi_ab.wmi[i];
/* FIXME wmi[0] is already initialized during attach,
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
new file mode 100644
index 000000000000..aded9a719d51
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/* Copyright (c) 2020 The Linux Foundation. All rights reserved. */
+
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+#include "core.h"
+#include "debug.h"
+#include "mhi.h"
+
+#define MHI_TIMEOUT_DEFAULT_MS 90000
+
+static struct mhi_channel_config ath11k_mhi_channels[] = {
+ {
+ .num = 0,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .auto_start = false,
+ },
+ {
+ .num = 1,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .auto_start = false,
+ },
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .auto_start = true,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ .auto_start = true,
+ },
+};
+
+static struct mhi_event_config ath11k_mhi_events[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static struct mhi_controller_config ath11k_mhi_config = {
+ .max_channels = 128,
+ .timeout_ms = 2000,
+ .use_bounce_buf = false,
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(ath11k_mhi_channels),
+ .ch_cfg = ath11k_mhi_channels,
+ .num_events = ARRAY_SIZE(ath11k_mhi_events),
+ .event_cfg = ath11k_mhi_events,
+};
+
+void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
+{
+ u32 val;
+
+ val = ath11k_pci_read32(ab, MHISTATUS);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "MHISTATUS 0x%x\n", val);
+
+ /* Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS
+ * has SYSERR bit set and thus need to set MHICTRL_RESET
+ * to clear SYSERR.
+ */
+ ath11k_pci_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
+
+ mdelay(10);
+}
+
+static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab)
+{
+ ath11k_pci_write32(ab, PCIE_TXVECDB, 0);
+}
+
+static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab)
+{
+ ath11k_pci_write32(ab, PCIE_TXVECSTATUS, 0);
+}
+
+static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab)
+{
+ ath11k_pci_write32(ab, PCIE_RXVECDB, 0);
+}
+
+static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab)
+{
+ ath11k_pci_write32(ab, PCIE_RXVECSTATUS, 0);
+}
+
+void ath11k_mhi_clear_vector(struct ath11k_base *ab)
+{
+ ath11k_mhi_reset_txvecdb(ab);
+ ath11k_mhi_reset_txvecstatus(ab);
+ ath11k_mhi_reset_rxvecdb(ab);
+ ath11k_mhi_reset_rxvecstatus(ab);
+}
+
+static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ u32 user_base_data, base_vector;
+ int ret, num_vectors, i;
+ int *irq;
+
+ ret = ath11k_pci_get_user_msi_assignment(ab_pci,
+ "MHI", &num_vectors,
+ &user_base_data, &base_vector);
+ if (ret)
+ return ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "Number of assigned MSI for MHI is %d, base vector is %d\n",
+ num_vectors, base_vector);
+
+ irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
+ if (!irq)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vectors; i++)
+ irq[i] = ath11k_pci_get_msi_irq(ab->dev,
+ base_vector + i);
+
+ ab_pci->mhi_ctrl->irq = irq;
+ ab_pci->mhi_ctrl->nr_irqs = num_vectors;
+
+ return 0;
+}
+
+static int ath11k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl)
+{
+ return 0;
+}
+
+static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb)
+{
+}
+
+static int ath11k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 *out)
+{
+ *out = readl(addr);
+
+ return 0;
+}
+
+static void ath11k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 val)
+{
+ writel(val, addr);
+}
+
+int ath11k_mhi_register(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ struct mhi_controller *mhi_ctrl;
+ int ret;
+
+ mhi_ctrl = kzalloc(sizeof(*mhi_ctrl), GFP_KERNEL);
+ if (!mhi_ctrl)
+ return -ENOMEM;
+
+ ath11k_core_create_firmware_path(ab, ATH11K_AMSS_FILE,
+ ab_pci->amss_path,
+ sizeof(ab_pci->amss_path));
+
+ ab_pci->mhi_ctrl = mhi_ctrl;
+ mhi_ctrl->cntrl_dev = ab->dev;
+ mhi_ctrl->fw_image = ab_pci->amss_path;
+ mhi_ctrl->regs = ab->mem;
+
+ ret = ath11k_mhi_get_msi(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to get msi for mhi\n");
+ kfree(mhi_ctrl);
+ return ret;
+ }
+
+ mhi_ctrl->iova_start = 0;
+ mhi_ctrl->iova_stop = 0xffffffff;
+ mhi_ctrl->sbl_size = SZ_512K;
+ mhi_ctrl->seg_len = SZ_512K;
+ mhi_ctrl->fbc_download = true;
+ mhi_ctrl->runtime_get = ath11k_mhi_op_runtime_get;
+ mhi_ctrl->runtime_put = ath11k_mhi_op_runtime_put;
+ mhi_ctrl->status_cb = ath11k_mhi_op_status_cb;
+ mhi_ctrl->read_reg = ath11k_mhi_op_read_reg;
+ mhi_ctrl->write_reg = ath11k_mhi_op_write_reg;
+
+ ret = mhi_register_controller(mhi_ctrl, &ath11k_mhi_config);
+ if (ret) {
+ ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
+ kfree(mhi_ctrl);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
+{
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+
+ mhi_unregister_controller(mhi_ctrl);
+ kfree(mhi_ctrl->irq);
+}
+
+static char *ath11k_mhi_state_to_str(enum ath11k_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case ATH11K_MHI_INIT:
+ return "INIT";
+ case ATH11K_MHI_DEINIT:
+ return "DEINIT";
+ case ATH11K_MHI_POWER_ON:
+ return "POWER_ON";
+ case ATH11K_MHI_POWER_OFF:
+ return "POWER_OFF";
+ case ATH11K_MHI_FORCE_POWER_OFF:
+ return "FORCE_POWER_OFF";
+ case ATH11K_MHI_SUSPEND:
+ return "SUSPEND";
+ case ATH11K_MHI_RESUME:
+ return "RESUME";
+ case ATH11K_MHI_TRIGGER_RDDM:
+ return "TRIGGER_RDDM";
+ case ATH11K_MHI_RDDM_DONE:
+ return "RDDM_DONE";
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static void ath11k_mhi_set_state_bit(struct ath11k_pci *ab_pci,
+ enum ath11k_mhi_state mhi_state)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ switch (mhi_state) {
+ case ATH11K_MHI_INIT:
+ set_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_DEINIT:
+ clear_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_POWER_ON:
+ set_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_POWER_OFF:
+ case ATH11K_MHI_FORCE_POWER_OFF:
+ clear_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
+ clear_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
+ clear_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_SUSPEND:
+ set_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_RESUME:
+ clear_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_TRIGGER_RDDM:
+ set_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_RDDM_DONE:
+ set_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
+ break;
+ default:
+ ath11k_err(ab, "unhandled mhi state (%d)\n", mhi_state);
+ }
+}
+
+static int ath11k_mhi_check_state_bit(struct ath11k_pci *ab_pci,
+ enum ath11k_mhi_state mhi_state)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ switch (mhi_state) {
+ case ATH11K_MHI_INIT:
+ if (!test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_DEINIT:
+ case ATH11K_MHI_POWER_ON:
+ if (test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state) &&
+ !test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_FORCE_POWER_OFF:
+ if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_POWER_OFF:
+ case ATH11K_MHI_SUSPEND:
+ if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
+ !test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_RESUME:
+ if (test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_TRIGGER_RDDM:
+ if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
+ !test_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_RDDM_DONE:
+ return 0;
+ default:
+ ath11k_err(ab, "unhandled mhi state: %s(%d)\n",
+ ath11k_mhi_state_to_str(mhi_state), mhi_state);
+ }
+
+ ath11k_err(ab, "failed to set mhi state %s(%d) in current mhi state (0x%lx)\n",
+ ath11k_mhi_state_to_str(mhi_state), mhi_state,
+ ab_pci->mhi_state);
+
+ return -EINVAL;
+}
+
+static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
+ enum ath11k_mhi_state mhi_state)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ int ret;
+
+ ret = ath11k_mhi_check_state_bit(ab_pci, mhi_state);
+ if (ret)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "setting mhi state: %s(%d)\n",
+ ath11k_mhi_state_to_str(mhi_state), mhi_state);
+
+ switch (mhi_state) {
+ case ATH11K_MHI_INIT:
+ ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
+ break;
+ case ATH11K_MHI_DEINIT:
+ mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
+ ret = 0;
+ break;
+ case ATH11K_MHI_POWER_ON:
+ ret = mhi_async_power_up(ab_pci->mhi_ctrl);
+ break;
+ case ATH11K_MHI_POWER_OFF:
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+ ret = 0;
+ break;
+ case ATH11K_MHI_FORCE_POWER_OFF:
+ mhi_power_down(ab_pci->mhi_ctrl, false);
+ ret = 0;
+ break;
+ case ATH11K_MHI_SUSPEND:
+ break;
+ case ATH11K_MHI_RESUME:
+ break;
+ case ATH11K_MHI_TRIGGER_RDDM:
+ ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
+ break;
+ case ATH11K_MHI_RDDM_DONE:
+ break;
+ default:
+ ath11k_err(ab, "unhandled MHI state (%d)\n", mhi_state);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto out;
+
+ ath11k_mhi_set_state_bit(ab_pci, mhi_state);
+
+ return 0;
+
+out:
+ ath11k_err(ab, "failed to set mhi state: %s(%d)\n",
+ ath11k_mhi_state_to_str(mhi_state), mhi_state);
+ return ret;
+}
+
+int ath11k_mhi_start(struct ath11k_pci *ab_pci)
+{
+ int ret;
+
+ ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
+
+ ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_INIT);
+ if (ret)
+ goto out;
+
+ ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_ON);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
+{
+ ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_OFF);
+ ath11k_mhi_set_state(ab_pci, ATH11K_MHI_DEINIT);
+}
+
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
new file mode 100644
index 000000000000..a7fd5e201d18
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+#ifndef _ATH11K_MHI_H
+#define _ATH11K_MHI_H
+
+#include "pci.h"
+
+#define PCIE_TXVECDB 0x360
+#define PCIE_TXVECSTATUS 0x368
+#define PCIE_RXVECDB 0x394
+#define PCIE_RXVECSTATUS 0x39C
+
+#define MHISTATUS 0x48
+#define MHICTRL 0x38
+#define MHICTRL_RESET_MASK 0x2
+
+enum ath11k_mhi_state {
+ ATH11K_MHI_INIT,
+ ATH11K_MHI_DEINIT,
+ ATH11K_MHI_POWER_ON,
+ ATH11K_MHI_POWER_OFF,
+ ATH11K_MHI_FORCE_POWER_OFF,
+ ATH11K_MHI_SUSPEND,
+ ATH11K_MHI_RESUME,
+ ATH11K_MHI_TRIGGER_RDDM,
+ ATH11K_MHI_RDDM,
+ ATH11K_MHI_RDDM_DONE,
+};
+
+int ath11k_mhi_start(struct ath11k_pci *ar_pci);
+void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
+int ath11k_mhi_register(struct ath11k_pci *ar_pci);
+void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
+void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
+void ath11k_mhi_clear_vector(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
new file mode 100644
index 000000000000..d7eb6b7160bb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -0,0 +1,1062 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "core.h"
+#include "hif.h"
+#include "mhi.h"
+#include "debug.h"
+
+#define ATH11K_PCI_BAR_NUM 0
+#define ATH11K_PCI_DMA_MASK 32
+
+#define ATH11K_PCI_IRQ_CE0_OFFSET 3
+
+#define WINDOW_ENABLE_BIT 0x40000000
+#define WINDOW_REG_ADDRESS 0x310c
+#define WINDOW_VALUE_MASK GENMASK(24, 19)
+#define WINDOW_START 0x80000
+#define WINDOW_RANGE_MASK GENMASK(18, 0)
+
+#define TCSR_SOC_HW_VERSION 0x0224
+#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8)
+#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
+
+/* BAR0 + 4k is always accessible, and no
+ * need to force wakeup.
+ * 4K - 32 = 0xFE0
+ */
+#define ACCESS_ALWAYS_OFF 0xFE0
+
+#define QCA6390_DEVICE_ID 0x1101
+
+static const struct pci_device_id ath11k_pci_id_table[] = {
+ { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
+
+static const struct ath11k_bus_params ath11k_pci_bus_params = {
+ .mhi_support = true,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+};
+
+static const struct ath11k_msi_config msi_config = {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+};
+
+static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
+ "bhi",
+ "mhi-er0",
+ "mhi-er1",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset);
+
+ lockdep_assert_held(&ab_pci->window_lock);
+
+ if (window != ab_pci->register_window) {
+ iowrite32(WINDOW_ENABLE_BIT | window,
+ ab->mem + WINDOW_REG_ADDRESS);
+ ab_pci->register_window = window;
+ }
+}
+
+void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup MHI to access.
+ */
+ if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+
+ if (offset < WINDOW_START) {
+ iowrite32(value, ab->mem + offset);
+ } else {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ }
+
+ if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+}
+
+u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ u32 val;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup MHI to access.
+ */
+ if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+
+ if (offset < WINDOW_START) {
+ val = ioread32(ab->mem + offset);
+ } else {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ val = ioread32(ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ }
+
+ if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+
+ return val;
+}
+
+static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
+{
+ u32 val, delay;
+
+ val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+
+ val |= PCIE_SOC_GLOBAL_RESET_V;
+
+ ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ /* TODO: exact time to sleep is uncertain */
+ delay = 10;
+ mdelay(delay);
+
+ /* Need to toggle V bit back otherwise stuck in reset status */
+ val &= ~PCIE_SOC_GLOBAL_RESET_V;
+
+ ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ mdelay(delay);
+
+ val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+ if (val == 0xffffffff)
+ ath11k_warn(ab, "link down error during global reset\n");
+}
+
+static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
+{
+ u32 val;
+
+ /* read cookie */
+ val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val);
+
+ val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
+
+ /* TODO: exact time to sleep is uncertain */
+ mdelay(10);
+
+ /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
+ * continuing warm path and entering dead loop.
+ */
+ ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
+ mdelay(10);
+
+ val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
+
+ /* A read clear register. clear the register to prevent
+ * Q6 from entering wrong code path.
+ */
+ val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val);
+}
+
+static void ath11k_pci_force_wake(struct ath11k_base *ab)
+{
+ ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
+ mdelay(5);
+}
+
+static void ath11k_pci_sw_reset(struct ath11k_base *ab)
+{
+ ath11k_pci_soc_global_reset(ab);
+ ath11k_mhi_clear_vector(ab);
+ ath11k_pci_soc_global_reset(ab);
+ ath11k_mhi_set_mhictrl_reset(ab);
+ ath11k_pci_clear_dbg_registers(ab);
+}
+
+int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+ return pci_irq_vector(pci_dev, vector);
+}
+
+static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ struct pci_dev *pci_dev = to_pci_dev(ab->dev);
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+ msi_addr_lo);
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+ msi_addr_hi);
+}
+
+int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ int idx;
+
+ for (idx = 0; idx < msi_config.total_users; idx++) {
+ if (strcmp(user_name, msi_config.users[idx].name) == 0) {
+ *num_vectors = msi_config.users[idx].num_vectors;
+ *user_base_data = msi_config.users[idx].base_vector
+ + ab_pci->msi_ep_base_data;
+ *base_vector = msi_config.users[idx].base_vector;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+ user_name, *num_vectors, *user_base_data,
+ *base_vector);
+
+ return 0;
+ }
+ }
+
+ ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
+
+ return -EINVAL;
+}
+
+static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ return ath11k_pci_get_user_msi_assignment(ab_pci, user_name,
+ num_vectors, user_base_data,
+ base_vector);
+}
+
+static void ath11k_pci_free_ext_irq(struct ath11k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
+ }
+}
+
+static void ath11k_pci_free_irq(struct ath11k_base *ab)
+{
+ int i, irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath11k_pci_free_ext_irq(ab);
+}
+
+static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ enable_irq(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pci_ce_irq_disable(ab, i);
+ }
+}
+
+static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath11k_pci_ce_tasklet(unsigned long data)
+{
+ struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data;
+
+ ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath11k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ce_pipe *ce_pipe = arg;
+
+ ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
+
+ ath11k_pci_ext_grp_disable(irq_grp);
+
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ }
+}
+
+static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ napi_enable(&irq_grp->napi);
+ ath11k_pci_ext_grp_enable(irq_grp);
+ }
+}
+
+static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab)
+{
+ int i, j, irq_idx;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab)
+{
+ __ath11k_pci_ext_irq_disable(ab);
+ ath11k_pci_sync_ext_irqs(ab);
+}
+
+static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath11k_ext_irq_grp,
+ napi);
+ struct ath11k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath11k_pci_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ext_irq_grp *irq_grp = arg;
+
+ ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
+
+ ath11k_pci_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
+{
+ int i, j, ret, num_vectors = 0;
+ u32 user_base_data = 0, base_vector = 0;
+
+ ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP",
+ &num_vectors,
+ &user_base_data,
+ &base_vector);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ init_dummy_netdev(&irq_grp->napi_ndev);
+ netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
+
+ if (ab->hw_params.ring_mask->tx[i] ||
+ ab->hw_params.ring_mask->rx[i] ||
+ ab->hw_params.ring_mask->rx_err[i] ||
+ ab->hw_params.ring_mask->rx_wbm_rel[i] ||
+ ab->hw_params.ring_mask->reo_status[i] ||
+ ab->hw_params.ring_mask->rxdma2host[i] ||
+ ab->hw_params.ring_mask->host2rxdma[i] ||
+ ab->hw_params.ring_mask->rx_mon_status[i]) {
+ num_irq = 1;
+ }
+
+ irq_grp->num_irq = num_irq;
+ irq_grp->irqs[0] = base_vector + i;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+ int vector = (i % num_vectors) + base_vector;
+ int irq = ath11k_pci_get_msi_irq(ab->dev, vector);
+
+ ab->irq_num[irq_idx] = irq;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "irq:%d group:%d\n", irq, i);
+ ret = request_irq(irq, ath11k_pci_ext_interrupt_handler,
+ IRQF_SHARED,
+ "DP_EXT_IRQ", irq_grp);
+ if (ret) {
+ ath11k_err(ab, "failed request irq %d: %d\n",
+ vector, ret);
+ return ret;
+ }
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_pci_config_irq(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *ce_pipe;
+ u32 msi_data_start;
+ u32 msi_data_count;
+ u32 msi_irq_start;
+ unsigned int msi_data;
+ int irq, i, ret, irq_idx;
+
+ ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab),
+ "CE", &msi_data_count,
+ &msi_data_start, &msi_irq_start);
+ if (ret)
+ return ret;
+
+ /* Configure CE irqs */
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ msi_data = (i % msi_data_count) + msi_irq_start;
+ irq = ath11k_pci_get_msi_irq(ab->dev, msi_data);
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+
+ tasklet_init(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet,
+ (unsigned long)ce_pipe);
+
+ ret = request_irq(irq, ath11k_pci_ce_interrupt_handler,
+ IRQF_SHARED, irq_name[irq_idx],
+ ce_pipe);
+ if (ret) {
+ ath11k_err(ab, "failed to request irq %d: %d\n",
+ irq_idx, ret);
+ return ret;
+ }
+
+ ab->irq_num[irq_idx] = irq;
+ ath11k_pci_ce_irq_disable(ab, i);
+ }
+
+ ret = ath11k_pci_ext_irq_config(ab);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
+{
+ struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce = ab->hw_params.target_ce_config;
+ cfg->tgt_ce_len = ab->hw_params.target_ce_count;
+
+ cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
+ cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
+ ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390;
+
+ ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
+ &cfg->shadow_reg_v2_len);
+}
+
+static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pci_ce_irq_enable(ab, i);
+ }
+}
+
+static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ struct msi_desc *msi_desc;
+ int num_vectors;
+ int ret;
+
+ num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
+ msi_config.total_vectors,
+ msi_config.total_vectors,
+ PCI_IRQ_MSI);
+ if (num_vectors != msi_config.total_vectors) {
+ ath11k_err(ab, "failed to get %d MSI vectors, only %d available",
+ msi_config.total_vectors, num_vectors);
+
+ if (num_vectors >= 0)
+ return -EINVAL;
+ else
+ return num_vectors;
+ }
+
+ msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
+ if (!msi_desc) {
+ ath11k_err(ab, "msi_desc is NULL!\n");
+ ret = -EINVAL;
+ goto free_msi_vector;
+ }
+
+ ab_pci->msi_ep_base_data = msi_desc->msg.data;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
+
+ return 0;
+
+free_msi_vector:
+ pci_free_irq_vectors(ab_pci->pdev);
+
+ return ret;
+}
+
+static void ath11k_pci_disable_msi(struct ath11k_pci *ab_pci)
+{
+ pci_free_irq_vectors(ab_pci->pdev);
+}
+
+static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ u16 device_id;
+ int ret = 0;
+
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+ if (device_id != ab_pci->dev_id) {
+ ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
+ device_id, ab_pci->dev_id);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM);
+ if (ret) {
+ ath11k_err(ab, "failed to assign pci resource: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ath11k_err(ab, "failed to enable pci device: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci");
+ if (ret) {
+ ath11k_err(ab, "failed to request pci region: %d\n", ret);
+ goto disable_device;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
+ if (ret) {
+ ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
+ ATH11K_PCI_DMA_MASK, ret);
+ goto release_region;
+ }
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
+ if (ret) {
+ ath11k_err(ab, "failed to set pci consistent dma mask to %d: %d\n",
+ ATH11K_PCI_DMA_MASK, ret);
+ goto release_region;
+ }
+
+ pci_set_master(pdev);
+
+ ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
+ ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0);
+ if (!ab->mem) {
+ ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM);
+ ret = -EIO;
+ goto clear_master;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
+ return 0;
+
+clear_master:
+ pci_clear_master(pdev);
+release_region:
+ pci_release_region(pdev, ATH11K_PCI_BAR_NUM);
+disable_device:
+ pci_disable_device(pdev);
+out:
+ return ret;
+}
+
+static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ struct pci_dev *pci_dev = ab_pci->pdev;
+
+ pci_iounmap(pci_dev, ab->mem);
+ ab->mem = NULL;
+ pci_clear_master(pci_dev);
+ pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM);
+ if (pci_is_enabled(pci_dev))
+ pci_disable_device(pci_dev);
+}
+
+static int ath11k_pci_power_up(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ int ret;
+
+ ab_pci->register_window = 0;
+ clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath11k_pci_sw_reset(ab_pci->ab);
+
+ ret = ath11k_mhi_start(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to start mhi: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath11k_pci_power_down(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ ath11k_mhi_stop(ab_pci);
+ clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath11k_pci_force_wake(ab_pci->ab);
+ ath11k_pci_sw_reset(ab_pci->ab);
+}
+
+static void ath11k_pci_kill_tasklets(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+static void ath11k_pci_stop(struct ath11k_base *ab)
+{
+ ath11k_pci_ce_irqs_disable(ab);
+ ath11k_pci_sync_ce_irqs(ab);
+ ath11k_pci_kill_tasklets(ab);
+ ath11k_ce_cleanup_pipes(ab);
+}
+
+static int ath11k_pci_start(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+
+ ath11k_pci_ce_irqs_enable(ab);
+ ath11k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params.svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
+ .start = ath11k_pci_start,
+ .stop = ath11k_pci_stop,
+ .read32 = ath11k_pci_read32,
+ .write32 = ath11k_pci_write32,
+ .power_down = ath11k_pci_power_down,
+ .power_up = ath11k_pci_power_up,
+ .irq_enable = ath11k_pci_ext_irq_enable,
+ .irq_disable = ath11k_pci_ext_irq_disable,
+ .get_msi_address = ath11k_pci_get_msi_address,
+ .get_user_msi_vector = ath11k_get_user_msi_assignment,
+ .map_service_to_pipe = ath11k_pci_map_service_to_pipe,
+};
+
+static int ath11k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ struct ath11k_base *ab;
+ struct ath11k_pci *ab_pci;
+ u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor;
+ int ret;
+
+ dev_warn(&pdev->dev, "WARNING: ath11k PCI support is experimental!\n");
+
+ ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
+ &ath11k_pci_bus_params);
+ if (!ab) {
+ dev_err(&pdev->dev, "failed to allocate ath11k base\n");
+ return -ENOMEM;
+ }
+
+ ab->dev = &pdev->dev;
+ pci_set_drvdata(pdev, ab);
+ ab_pci = ath11k_pci_priv(ab);
+ ab_pci->dev_id = pci_dev->device;
+ ab_pci->ab = ab;
+ ab_pci->pdev = pdev;
+ ab->hif.ops = &ath11k_pci_hif_ops;
+ pci_set_drvdata(pdev, ab);
+ spin_lock_init(&ab_pci->window_lock);
+
+ ret = ath11k_pci_claim(ab_pci, pdev);
+ if (ret) {
+ ath11k_err(ab, "failed to claim device: %d\n", ret);
+ goto err_free_core;
+ }
+
+ switch (pci_dev->device) {
+ case QCA6390_DEVICE_ID:
+ soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
+ soc_hw_version_major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
+ soc_hw_version);
+ soc_hw_version_minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
+ soc_hw_version);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n",
+ soc_hw_version_major, soc_hw_version_minor);
+
+ switch (soc_hw_version_major) {
+ case 2:
+ ab->hw_rev = ATH11K_HW_QCA6390_HW20;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n",
+ soc_hw_version_major, soc_hw_version_minor);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
+ pci_dev->device);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_pci_enable_msi(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to enable msi: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_core_pre_init(ab);
+ if (ret)
+ goto err_pci_disable_msi;
+
+ ret = ath11k_mhi_register(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to register mhi: %d\n", ret);
+ goto err_pci_disable_msi;
+ }
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_mhi_unregister;
+
+ ret = ath11k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath11k_pci_init_qmi_ce_config(ab);
+
+ ret = ath11k_pci_config_irq(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to config irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+ goto err_free_irq;
+ }
+ return 0;
+
+err_free_irq:
+ ath11k_pci_free_irq(ab);
+
+err_ce_free:
+ ath11k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+
+err_mhi_unregister:
+ ath11k_mhi_unregister(ab_pci);
+
+err_pci_disable_msi:
+ ath11k_pci_disable_msi(ab_pci);
+
+err_pci_free_region:
+ ath11k_pci_free_region(ab_pci);
+
+err_free_core:
+ ath11k_core_free(ab);
+
+ return ret;
+}
+
+static void ath11k_pci_remove(struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = pci_get_drvdata(pdev);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+
+ ath11k_core_deinit(ab);
+
+ ath11k_mhi_unregister(ab_pci);
+
+ ath11k_pci_free_irq(ab);
+ ath11k_pci_disable_msi(ab_pci);
+ ath11k_pci_free_region(ab_pci);
+
+ ath11k_hal_srng_deinit(ab);
+ ath11k_ce_free_pipes(ab);
+ ath11k_core_free(ab);
+}
+
+static void ath11k_pci_shutdown(struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = pci_get_drvdata(pdev);
+
+ ath11k_pci_power_down(ab);
+}
+
+static struct pci_driver ath11k_pci_driver = {
+ .name = "ath11k_pci",
+ .id_table = ath11k_pci_id_table,
+ .probe = ath11k_pci_probe,
+ .remove = ath11k_pci_remove,
+ .shutdown = ath11k_pci_shutdown,
+};
+
+static int ath11k_pci_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&ath11k_pci_driver);
+ if (ret)
+ pr_err("failed to register ath11k pci driver: %d\n",
+ ret);
+
+ return ret;
+}
+module_init(ath11k_pci_init);
+
+static void ath11k_pci_exit(void)
+{
+ pci_unregister_driver(&ath11k_pci_driver);
+}
+
+module_exit(ath11k_pci_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
new file mode 100644
index 000000000000..43562f774a37
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+#ifndef _ATH11K_PCI_H
+#define _ATH11K_PCI_H
+
+#include <linux/mhi.h>
+
+#include "core.h"
+
+#define PCIE_SOC_GLOBAL_RESET 0x3008
+#define PCIE_SOC_GLOBAL_RESET_V 1
+
+#define WLAON_WARM_SW_ENTRY 0x1f80504
+#define WLAON_SOC_RESET_CAUSE_REG 0x01f8060c
+
+#define PCIE_Q6_COOKIE_ADDR 0x01f80500
+#define PCIE_Q6_COOKIE_DATA 0xc0000000
+
+/* register to wake the UMAC from power collapse */
+#define PCIE_SCRATCH_0_SOC_PCIE_REG 0x4040
+
+/* register used for handshake mechanism to validate UMAC is awake */
+#define PCIE_SOC_WAKE_PCIE_LOCAL_REG 0x3004
+
+struct ath11k_msi_user {
+ char *name;
+ int num_vectors;
+ u32 base_vector;
+};
+
+struct ath11k_msi_config {
+ int total_vectors;
+ int total_users;
+ struct ath11k_msi_user *users;
+};
+
+enum ath11k_pci_flags {
+ ATH11K_PCI_FLAG_INIT_DONE,
+};
+
+struct ath11k_pci {
+ struct pci_dev *pdev;
+ struct ath11k_base *ab;
+ u16 dev_id;
+ char amss_path[100];
+ u32 msi_ep_base_data;
+ struct mhi_controller *mhi_ctrl;
+ unsigned long mhi_state;
+ u32 register_window;
+
+ /* protects register_window above */
+ spinlock_t window_lock;
+
+ /* enum ath11k_pci_flags */
+ unsigned long flags;
+};
+
+static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
+{
+ return (struct ath11k_pci *)ab->drv_priv;
+}
+
+int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ar_pci, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector);
+void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value);
+u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 297172538620..61ad9300eafb 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -223,9 +223,6 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
peer = ath11k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, param->peer_addr);
if (peer) {
spin_unlock_bh(&ar->ab->base_lock);
- ath11k_info(ar->ab,
- "ignoring the peer %pM creation on same pdev idx %d\n",
- param->peer_addr, ar->pdev_idx);
return -EINVAL;
}
spin_unlock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index c00a99ad8dbc..c2b165158225 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -3,12 +3,17 @@
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
+#include <linux/elf.h>
+
#include "qmi.h"
#include "core.h"
#include "debug.h"
#include <linux/of.h>
#include <linux/firmware.h>
+#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
+#define HOST_CSTATE_BIT 0x04
+
static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
@@ -1516,15 +1521,35 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
req.bdf_support_valid = 1;
req.bdf_support = 1;
- req.m3_support_valid = 0;
- req.m3_support = 0;
-
- req.m3_cache_support_valid = 0;
- req.m3_cache_support = 0;
+ if (ab->bus_params.m3_fw_support) {
+ req.m3_support_valid = 1;
+ req.m3_support = 1;
+ req.m3_cache_support_valid = 1;
+ req.m3_cache_support = 1;
+ } else {
+ req.m3_support_valid = 0;
+ req.m3_support = 0;
+ req.m3_cache_support_valid = 0;
+ req.m3_cache_support = 0;
+ }
req.cal_done_valid = 1;
req.cal_done = ab->qmi.cal_done;
+ if (ab->hw_params.internal_sleep_clock) {
+ req.nm_modem_valid = 1;
+
+ /* Notify firmware that this is non-qualcomm platform. */
+ req.nm_modem |= HOST_CSTATE_BIT;
+
+ /* Notify firmware about the sleep clock selection,
+ * nm_modem_bit[1] is used for this purpose. Host driver on
+ * non-qualcomm platforms should select internal sleep
+ * clock.
+ */
+ req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
+ }
+
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -1634,19 +1659,30 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
memset(&resp, 0, sizeof(resp));
- req->mem_seg_len = ab->qmi.mem_seg_count;
+ /* For QCA6390 by default FW requests a block of ~4M contiguous
+ * DMA memory, it's hard to allocate from OS. So host returns
+ * failure to FW and FW will then request mulitple blocks of small
+ * chunk size memory.
+ */
+ if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
+ ab->qmi.mem_seg_count);
+ memset(req, 0, sizeof(*req));
+ } else {
+ req->mem_seg_len = ab->qmi.mem_seg_count;
+
+ for (i = 0; i < req->mem_seg_len ; i++) {
+ req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
+ req->mem_seg[i].size = ab->qmi.target_mem[i].size;
+ req->mem_seg[i].type = ab->qmi.target_mem[i].type;
+ }
+ }
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
- for (i = 0; i < req->mem_seg_len ; i++) {
- req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
- req->mem_seg[i].size = ab->qmi.target_mem[i].size;
- req->mem_seg[i].type = ab->qmi.target_mem[i].type;
- }
-
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_RESPOND_MEM_REQ_V01,
QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
@@ -1674,15 +1710,56 @@ out:
return ret;
}
+static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
+{
+ int i;
+
+ if (ab->bus_params.fixed_mem_region)
+ return;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ if (!ab->qmi.target_mem[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev,
+ ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].vaddr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].vaddr = NULL;
+ }
+}
+
static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
{
+ int i;
+ struct target_mem_chunk *chunk;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ chunk = &ab->qmi.target_mem[i];
+ chunk->vaddr = dma_alloc_coherent(ab->dev,
+ chunk->size,
+ &chunk->paddr,
+ GFP_KERNEL);
+ if (!chunk->vaddr) {
+ ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
+ chunk->size,
+ chunk->type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
+{
int i, idx;
for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
switch (ab->qmi.target_mem[i].type) {
case BDF_MEM_REGION_TYPE:
- ab->qmi.target_mem[idx].paddr = ATH11K_QMI_BDF_ADDRESS;
- ab->qmi.target_mem[idx].vaddr = ATH11K_QMI_BDF_ADDRESS;
+ ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr;
+ ab->qmi.target_mem[idx].vaddr = NULL;
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
@@ -1694,7 +1771,7 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
}
/* TODO ath11k does not support cold boot calibration */
ab->qmi.target_mem[idx].paddr = 0;
- ab->qmi.target_mem[idx].vaddr = 0;
+ ab->qmi.target_mem[idx].vaddr = NULL;
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
@@ -1772,11 +1849,11 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
sizeof(ab->qmi.target.fw_build_id));
- ath11k_info(ab, "qmi target: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x\n",
+ ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
ab->qmi.target.chip_id, ab->qmi.target.chip_family,
ab->qmi.target.board_id, ab->qmi.target.soc_id);
- ath11k_info(ab, "qmi fw_version: 0x%x fw_build_timestamp: %s fw_build_id: %s",
+ ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
ab->qmi.target.fw_version,
ab->qmi.target.fw_build_timestamp,
ab->qmi.target.fw_build_id);
@@ -1790,21 +1867,19 @@ ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
struct qmi_wlanfw_bdf_download_req_msg_v01 *req,
void __iomem *bdf_addr)
{
- struct device *dev = ab->dev;
- char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
const struct firmware *fw_entry;
struct ath11k_board_data bd;
u32 fw_size;
- int ret = 0;
-
- memset(&bd, 0, sizeof(bd));
+ int ret;
switch (type) {
case ATH11K_QMI_FILE_TYPE_BDF_GOLDEN:
+ memset(&bd, 0, sizeof(bd));
+
ret = ath11k_core_fetch_bdf(ab, &bd);
if (ret) {
ath11k_warn(ab, "qmi failed to load BDF\n");
- goto out;
+ return ret;
}
fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
@@ -1812,12 +1887,12 @@ ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
ath11k_core_free_bdf(ab, &bd);
break;
case ATH11K_QMI_FILE_TYPE_CALDATA:
- snprintf(filename, sizeof(filename),
- "%s/%s", ab->hw_params.fw.dir, ATH11K_QMI_DEFAULT_CAL_FILE_NAME);
- ret = request_firmware(&fw_entry, filename, dev);
- if (ret) {
- ath11k_warn(ab, "qmi failed to load CAL: %s\n", filename);
- goto out;
+ fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
+ if (IS_ERR(fw_entry)) {
+ ret = PTR_ERR(fw_entry);
+ ath11k_warn(ab, "failed to load %s: %d\n",
+ ATH11K_DEFAULT_CAL_FILE, ret);
+ return ret;
}
fw_size = min_t(u32, ab->hw_params.fw.board_size,
@@ -1825,23 +1900,18 @@ ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
memcpy_toio(bdf_addr + ATH11K_QMI_CALDATA_OFFSET,
fw_entry->data, fw_size);
- ath11k_info(ab, "qmi downloading BDF: %s, size: %zu\n",
- filename, fw_entry->size);
release_firmware(fw_entry);
break;
default:
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
req->total_size = fw_size;
-
-out:
- return ret;
+ return 0;
}
-static int ath11k_qmi_load_bdf(struct ath11k_base *ab)
+static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab)
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
@@ -1854,7 +1924,7 @@ static int ath11k_qmi_load_bdf(struct ath11k_base *ab)
return -ENOMEM;
memset(&resp, 0, sizeof(resp));
- bdf_addr = ioremap(ATH11K_QMI_BDF_ADDRESS, ATH11K_QMI_BDF_MAX_SIZE);
+ bdf_addr = ioremap(ab->hw_params.bdf_addr, ATH11K_QMI_BDF_MAX_SIZE);
if (!bdf_addr) {
ath11k_warn(ab, "qmi ioremap error for BDF\n");
ret = -EIO;
@@ -1905,7 +1975,6 @@ static int ath11k_qmi_load_bdf(struct ath11k_base *ab)
goto out_qmi_bdf;
}
}
- ath11k_info(ab, "qmi BDF downloaded\n");
out_qmi_bdf:
iounmap(bdf_addr);
@@ -1914,8 +1983,151 @@ out:
return ret;
}
+static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct ath11k_board_data bd;
+ unsigned int remaining;
+ struct qmi_txn txn = {};
+ int ret;
+ const u8 *temp;
+ int bdf_type;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ memset(&resp, 0, sizeof(resp));
+
+ memset(&bd, 0, sizeof(bd));
+ ret = ath11k_core_fetch_bdf(ab, &bd);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to load bdf:\n");
+ goto out;
+ }
+
+ temp = bd.data;
+ remaining = bd.len;
+
+ if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
+ bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
+ else
+ bdf_type = ATH11K_QMI_BDF_TYPE_BIN;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf_type %d\n", bdf_type);
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = ab->qmi.target.board_id;
+ req->total_size_valid = 1;
+ req->total_size = bd.len;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ req->bdf_type = bdf_type;
+ req->bdf_type_valid = 1;
+ req->end_valid = 1;
+ req->end = 0;
+
+ if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ memcpy(req->data, temp, req->data_len);
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto out_qmi_bdf;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out_qmi_bdf;
+ }
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ }
+
+out_qmi_bdf:
+ ath11k_core_free_bdf(ab, &bd);
+
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_m3_load(struct ath11k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ const struct firmware *fw;
+ char path[100];
+ int ret;
+
+ if (m3_mem->vaddr || m3_mem->size)
+ return 0;
+
+ fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE);
+ if (IS_ERR(fw)) {
+ ret = PTR_ERR(fw);
+ ath11k_core_create_firmware_path(ab, ATH11K_M3_FILE,
+ path, sizeof(path));
+ ath11k_err(ab, "failed to load %s: %d\n", path, ret);
+ return ret;
+ }
+
+ m3_mem->vaddr = dma_alloc_coherent(ab->dev,
+ fw->size, &m3_mem->paddr,
+ GFP_KERNEL);
+ if (!m3_mem->vaddr) {
+ ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n",
+ fw->size);
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ memcpy(m3_mem->vaddr, fw->data, fw->size);
+ m3_mem->size = fw->size;
+ release_firmware(fw);
+
+ return 0;
+}
+
+static void ath11k_qmi_m3_free(struct ath11k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+
+ if (!ab->bus_params.m3_fw_support || !m3_mem->vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, m3_mem->size,
+ m3_mem->vaddr, m3_mem->paddr);
+ m3_mem->vaddr = NULL;
+}
+
static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req;
struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
struct qmi_txn txn = {};
@@ -1923,8 +2135,20 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
- req.addr = 0;
- req.size = 0;
+
+ if (ab->bus_params.m3_fw_support) {
+ ret = ath11k_qmi_m3_load(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to load m3 firmware: %d", ret);
+ return ret;
+ }
+
+ req.addr = m3_mem->paddr;
+ req.size = m3_mem->size;
+ } else {
+ req.addr = 0;
+ req.size = 0;
+ }
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
@@ -2034,7 +2258,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
req->tgt_cfg_valid = 1;
/* This is number of CE configs */
req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
- for (pipe_num = 0; pipe_num <= req->tgt_cfg_len ; pipe_num++) {
+ for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
@@ -2051,7 +2275,18 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
}
req->shadow_reg_valid = 0;
- req->shadow_reg_v2_valid = 0;
+
+ /* set shadow v2 configuration */
+ if (ab->hw_params.supports_shadow_regs) {
+ req->shadow_reg_v2_valid = 1;
+ req->shadow_reg_v2_len = min_t(u32,
+ ab->qmi.ce_cfg.shadow_reg_v2_len,
+ QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01);
+ memcpy(&req->shadow_reg_v2, ab->qmi.ce_cfg.shadow_reg_v2,
+ sizeof(u32) * req->shadow_reg_v2_len);
+ } else {
+ req->shadow_reg_v2_valid = 0;
+ }
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
@@ -2181,7 +2416,10 @@ static void ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
return;
}
- ret = ath11k_qmi_load_bdf(ab);
+ if (ab->bus_params.fixed_bdf_addr)
+ ret = ath11k_qmi_load_bdf_fixed_addr(ab);
+ else
+ ret = ath11k_qmi_load_bdf_qmi(ab);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to load board data file:%d\n", ret);
return;
@@ -2220,10 +2458,20 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
msg->mem_seg[i].type, msg->mem_seg[i].size);
}
- ret = ath11k_qmi_alloc_target_mem_chunk(ab);
- if (ret < 0) {
- ath11k_warn(ab, "qmi failed to alloc target memory:%d\n", ret);
- return;
+ if (ab->bus_params.fixed_mem_region) {
+ ret = ath11k_qmi_assign_target_mem_chunk(ab);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to assign target memory: %d\n",
+ ret);
+ return;
+ }
+ } else if (msg->mem_seg_len > 2) {
+ ret = ath11k_qmi_alloc_target_mem_chunk(ab);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
+ ret);
+ return;
+ }
}
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_REQUEST_MEM, NULL);
@@ -2265,21 +2513,21 @@ static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
.ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
- .decoded_size = sizeof(qmi_wlanfw_request_mem_ind_msg_v01_ei),
+ .decoded_size = sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01),
.fn = ath11k_qmi_msg_mem_request_cb,
},
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
.ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei,
- .decoded_size = sizeof(qmi_wlanfw_mem_ready_ind_msg_v01_ei),
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_mem_ready_ind_msg_v01),
.fn = ath11k_qmi_msg_mem_ready_cb,
},
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_FW_READY_IND_V01,
.ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei,
- .decoded_size = sizeof(qmi_wlanfw_fw_ready_ind_msg_v01_ei),
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
.fn = ath11k_qmi_msg_fw_ready_cb,
},
{
@@ -2287,7 +2535,7 @@ static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
.msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01,
.ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei,
.decoded_size =
- sizeof(qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei),
+ sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01),
.fn = ath11k_qmi_msg_cold_boot_cal_done_cb,
},
};
@@ -2416,9 +2664,10 @@ int ath11k_qmi_init_service(struct ath11k_base *ab)
ret = qmi_add_lookup(&ab->qmi.handle, ATH11K_QMI_WLFW_SERVICE_ID_V01,
ATH11K_QMI_WLFW_SERVICE_VERS_V01,
- ATH11K_QMI_WLFW_SERVICE_INS_ID_V01);
+ ab->qmi.service_ins_id);
if (ret < 0) {
ath11k_warn(ab, "failed to add qmi lookup\n");
+ destroy_workqueue(ab->qmi.event_wq);
return ret;
}
@@ -2430,5 +2679,7 @@ void ath11k_qmi_deinit_service(struct ath11k_base *ab)
qmi_handle_release(&ab->qmi.handle);
cancel_work_sync(&ab->qmi.event_work);
destroy_workqueue(ab->qmi.event_wq);
+ ath11k_qmi_m3_free(ab);
+ ath11k_qmi_free_target_mem_chunk(ab);
}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 3f7db642d869..b0a818f0401b 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -12,18 +12,18 @@
#define ATH11K_HOST_VERSION_STRING "WIN"
#define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000
#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
-#define ATH11K_QMI_BDF_ADDRESS 0x4B0C0000
#define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024)
#define ATH11K_QMI_CALDATA_OFFSET (128 * 1024)
#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
#define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45
#define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390 0x01
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074 0x02
#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
#define ATH11K_QMI_RESP_LEN_MAX 8192
#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 32
#define ATH11K_QMI_CALDB_SIZE 0x480000
-#define ATH11K_QMI_DEFAULT_CAL_FILE_NAME "caldata.bin"
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
@@ -42,6 +42,11 @@ enum ath11k_qmi_file_type {
ATH11K_QMI_MAX_FILE_TYPE,
};
+enum ath11k_qmi_bdf_type {
+ ATH11K_QMI_BDF_TYPE_BIN = 0,
+ ATH11K_QMI_BDF_TYPE_ELF = 1,
+};
+
enum ath11k_qmi_event_type {
ATH11K_QMI_EVENT_SERVER_ARRIVE,
ATH11K_QMI_EVENT_SERVER_EXIT,
@@ -72,7 +77,7 @@ struct ath11k_qmi_ce_cfg {
int svc_to_ce_map_len;
const u8 *shadow_reg;
int shadow_reg_len;
- u8 *shadow_reg_v2;
+ u32 *shadow_reg_v2;
int shadow_reg_v2_len;
};
@@ -85,7 +90,7 @@ struct target_mem_chunk {
u32 size;
u32 type;
dma_addr_t paddr;
- u32 vaddr;
+ u32 *vaddr;
};
struct target_info {
@@ -98,6 +103,12 @@ struct target_info {
char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
};
+struct m3_mem_region {
+ u32 size;
+ dma_addr_t paddr;
+ void *vaddr;
+};
+
struct ath11k_qmi {
struct ath11k_base *ab;
struct qmi_handle handle;
@@ -112,6 +123,8 @@ struct ath11k_qmi {
u32 target_mem_mode;
u8 cal_done;
struct target_info target;
+ struct m3_mem_region m3_mem;
+ unsigned int service_ins_id;
};
#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189
@@ -254,6 +267,14 @@ struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 {
char placeholder;
};
+struct qmi_wlanfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
+ char placeholder;
+};
+
#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207
#define QMI_WLANFW_CAP_REQ_V01 0x0024
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index 7c9dc91cc48a..f6a1f0352989 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -206,7 +206,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
ab = ar->ab;
pdev_id = ar->pdev_idx;
- spin_lock(&ab->base_lock);
+ spin_lock_bh(&ab->base_lock);
if (init) {
/* Apply the regd received during init through
@@ -227,7 +227,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
if (!regd) {
ret = -EINVAL;
- spin_unlock(&ab->base_lock);
+ spin_unlock_bh(&ab->base_lock);
goto err;
}
@@ -238,7 +238,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
if (regd_copy)
ath11k_copy_regd(regd, regd_copy);
- spin_unlock(&ab->base_lock);
+ spin_unlock_bh(&ab->base_lock);
if (!regd_copy) {
ret = -ENOMEM;
@@ -699,7 +699,7 @@ void ath11k_reg_free(struct ath11k_base *ab)
{
int i;
- for (i = 0; i < MAX_RADIOS; i++) {
+ for (i = 0; i < ab->hw_params.max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
}
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 1c5d65bb411f..ac2a8cfdc1c0 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -17,8 +17,6 @@
#define ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS 32
#define ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS 256
-#define ATH11K_SPECTRAL_SAMPLE_FFT_BIN_MASK 0xFF
-
#define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095
/* Max channel computed by sum of 2g and 5g band channels */
@@ -557,16 +555,16 @@ static u8 ath11k_spectral_get_max_exp(s8 max_index, u8 max_magnitude,
return max_exp;
}
-static void ath11k_spectral_parse_16bit_fft(u8 *outbins, u8 *inbins, int num_bins)
+static void ath11k_spectral_parse_fft(u8 *outbins, u8 *inbins, int num_bins, u8 fft_sz)
{
- int i;
- __le16 *data = (__le16 *)inbins;
+ int i, j;
i = 0;
+ j = 0;
while (i < num_bins) {
- outbins[i] = (__le16_to_cpu(data[i])) &
- ATH11K_SPECTRAL_SAMPLE_FFT_BIN_MASK;
+ outbins[i] = inbins[j];
i++;
+ j += fft_sz;
}
}
@@ -588,6 +586,12 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
lockdep_assert_held(&ar->spectral.lock);
+ if (!ab->hw_params.spectral_fft_sz) {
+ ath11k_warn(ab, "invalid bin size type for hw rev %d\n",
+ ab->hw_rev);
+ return -EINVAL;
+ }
+
tlv = (struct spectral_tlv *)data;
tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header));
/* convert Dword into bytes */
@@ -649,9 +653,8 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
freq = summary->meta.freq2;
fft_sample->freq2 = __cpu_to_be16(freq);
- ath11k_spectral_parse_16bit_fft(fft_sample->data,
- fft_report->bins,
- num_bins);
+ ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
+ ab->hw_params.spectral_fft_sz);
fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index,
search.peak_mag,
@@ -773,6 +776,8 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
i += sizeof(*tlv) + tlv_len;
}
+ ret = 0;
+
err:
kfree(fft_sample);
unlock:
@@ -954,10 +959,11 @@ int ath11k_spectral_init(struct ath11k_base *ab)
int i;
if (!test_bit(WMI_TLV_SERVICE_FREQINFO_IN_METADATA,
- ab->wmi_ab.svc_map)) {
- ath11k_info(ab, "spectral not supported\n");
+ ab->wmi_ab.svc_map))
+ return 0;
+
+ if (!ab->hw_params.spectral_fft_sz)
return 0;
- }
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@@ -966,10 +972,8 @@ int ath11k_spectral_init(struct ath11k_base *ab)
ret = ath11k_dbring_get_cap(ar->ab, ar->pdev_idx,
WMI_DIRECT_BUF_SPECTRAL,
&db_cap);
- if (ret) {
- ath11k_info(ab, "spectral not enabled for pdev %d\n", i);
+ if (ret)
continue;
- }
idr_init(&sp->rx_ring.bufs_idr);
spin_lock_init(&sp->rx_ring.idr_lock);
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index 5a7e150c621b..c96b26f39a25 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -53,7 +53,7 @@ ath11k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
return ret;
}
-static struct thermal_cooling_device_ops ath11k_thermal_ops = {
+static const struct thermal_cooling_device_ops ath11k_thermal_ops = {
.get_max_state = ath11k_thermal_get_max_throttle_state,
.get_cur_state = ath11k_thermal_get_cur_throttle_state,
.set_cur_state = ath11k_thermal_set_cur_throttle_state,
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 8e3437a65673..8eca92520837 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -338,7 +338,7 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
mac_phy_caps = wmi_mac_phy_caps + phy_idx;
pdev->pdev_id = mac_phy_caps->pdev_id;
- pdev_cap->supported_bands = mac_phy_caps->supported_bands;
+ pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
@@ -371,27 +371,33 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
pdev_cap->rx_chain_mask_shift =
find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
- cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
- cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
- cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
- cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
- cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
- cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
- memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
- sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
- memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
- sizeof(struct ath11k_ppe_threshold));
-
- cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
- cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
- cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
- cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
- cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
- cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
- memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
- sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
- memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
- sizeof(struct ath11k_ppe_threshold));
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ cap_band->phy_id = mac_phy_caps->phy_id;
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
+ sizeof(struct ath11k_ppe_threshold));
+ }
+
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+ cap_band->phy_id = mac_phy_caps->phy_id;
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+ }
cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
@@ -1593,8 +1599,8 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->tim_ie_offset = offs->tim_offset;
- cmd->csa_switch_count_offset = offs->csa_counter_offs[0];
- cmd->ext_csa_switch_count_offset = offs->csa_counter_offs[1];
+ cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
+ cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
cmd->buf_len = bcn->len;
ptr = skb->data + sizeof(*cmd);
@@ -3175,7 +3181,7 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
(param->num_band_to_mac * sizeof(*band_to_mac));
len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
- (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS);
+ (param->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -3336,50 +3342,7 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab)
memset(&init_param, 0, sizeof(init_param));
memset(&config, 0, sizeof(config));
- config.num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
-
- if (ab->num_radios == 2) {
- config.num_peers = TARGET_NUM_PEERS(DBS);
- config.num_tids = TARGET_NUM_TIDS(DBS);
- } else if (ab->num_radios == 3) {
- config.num_peers = TARGET_NUM_PEERS(DBS_SBS);
- config.num_tids = TARGET_NUM_TIDS(DBS_SBS);
- } else {
- /* Control should not reach here */
- config.num_peers = TARGET_NUM_PEERS(SINGLE);
- config.num_tids = TARGET_NUM_TIDS(SINGLE);
- }
- config.num_offload_peers = TARGET_NUM_OFFLD_PEERS;
- config.num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
- config.num_peer_keys = TARGET_NUM_PEER_KEYS;
- config.ast_skid_limit = TARGET_AST_SKID_LIMIT;
- config.tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
- config.rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
- config.rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
- config.rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
- config.rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
- config.rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
- config.rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
- config.scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
- config.bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
- config.roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
- config.roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
- config.num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
- config.num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
- config.mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
- config.tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
- config.num_wds_entries = TARGET_NUM_WDS_ENTRIES;
- config.dma_burst_size = TARGET_DMA_BURST_SIZE;
- config.rx_skip_defrag_timeout_dup_detection_check =
- TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
- config.vow_config = TARGET_VOW_CONFIG;
- config.gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
- config.num_msdu_desc = TARGET_NUM_MSDU_DESC;
- config.beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
- config.rx_batchmode = TARGET_RX_BATCHMODE;
- config.peer_map_unmap_v2_support = 1;
- config.twt_ap_pdev_count = ab->num_radios;
- config.twt_ap_sta_count = 1000;
+ ab->hw_params.hw_ops->wmi_init_config(ab, &config);
memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
@@ -3391,9 +3354,10 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab)
if (wmi_sc->preferred_hw_mode == WMI_HOST_HW_MODE_SINGLE)
init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
- init_param.num_band_to_mac = ab->num_radios;
-
- ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
+ if (ab->hw_params.needs_band_to_mac) {
+ init_param.num_band_to_mac = ab->num_radios;
+ ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
+ }
return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param);
}
@@ -3688,6 +3652,8 @@ static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc,
i++;
}
+ ath11k_dbg(soc, ATH11K_DBG_WMI, "preferred_hw_mode:%d\n",
+ soc->wmi_ab.preferred_hw_mode);
if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
return -EINVAL;
@@ -3778,6 +3744,7 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
u32 phy_id_map;
+ int pdev_index = 0;
int ret;
svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
@@ -3793,7 +3760,7 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
svc_rdy_ext->soc_hal_reg_caps,
svc_rdy_ext->mac_phy_caps,
hw_mode_id, soc->num_radios,
- &soc->pdevs[soc->num_radios]);
+ &soc->pdevs[pdev_index]);
if (ret) {
ath11k_warn(soc, "failed to extract mac caps, idx :%d\n",
soc->num_radios);
@@ -3802,9 +3769,25 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
soc->num_radios++;
+ /* For QCA6390, save mac_phy capability in the same pdev */
+ if (soc->hw_params.single_pdev_only)
+ pdev_index = 0;
+ else
+ pdev_index = soc->num_radios;
+
/* TODO: mac_phy_cap prints */
phy_id_map >>= 1;
}
+
+ /* For QCA6390, set num_radios to 1 because host manages
+ * both 2G and 5G radio in one pdev.
+ * Set pdev_id = 0 and 0 means soc level.
+ */
+ if (soc->hw_params.single_pdev_only) {
+ soc->num_radios = 1;
+ soc->pdevs[0].pdev_id = 0;
+ }
+
return 0;
}
@@ -5434,8 +5417,17 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
pdev_idx = reg_info->phy_id;
- if (pdev_idx >= ab->num_radios)
- goto fallback;
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback.
+ */
+ if (ab->hw_params.single_pdev_only &&
+ pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+ goto mem_free;
+ else
+ goto fallback;
+ }
/* Avoid multiple overwrites to default regd, during core
* stop-start after mac registration.
@@ -6260,7 +6252,7 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- ath11k_debug_fw_stats_process(ab, skb);
+ ath11k_debugfs_fw_stats_process(ab, skb);
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -6538,7 +6530,7 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
break;
/* TODO: Add remaining events */
default:
- ath11k_warn(ab, "Unknown eventid: 0x%x\n", id);
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
break;
}
@@ -6682,7 +6674,7 @@ int ath11k_wmi_connect(struct ath11k_base *ab)
u8 wmi_ep_count;
wmi_ep_count = ab->htc.wmi_ep_count;
- if (wmi_ep_count > MAX_RADIOS)
+ if (wmi_ep_count > ab->hw_params.max_radios)
return -1;
for (i = 0; i < wmi_ep_count; i++)
@@ -6704,7 +6696,7 @@ int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
{
struct ath11k_pdev_wmi *wmi_handle;
- if (pdev_id >= MAX_RADIOS)
+ if (pdev_id >= ab->hw_params.max_radios)
return -EINVAL;
wmi_handle = &ab->wmi_ab.wmi[pdev_id];
@@ -6728,6 +6720,10 @@ int ath11k_wmi_attach(struct ath11k_base *ab)
ab->wmi_ab.ab = ab;
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
+ /* It's overwritten when service_ext_ready is handled */
+ if (ab->hw_params.single_pdev_only)
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
+
/* TODO: Init remaining wmi soc resources required */
init_completion(&ab->wmi_ab.service_ready);
init_completion(&ab->wmi_ab.unified_ready);
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 979800c6f57f..234ea939d316 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -410,7 +410,7 @@ enum ath5k_radio {
* This article claims Super G sticks to bonding of channels 5 and 6 for
* USA:
*
- * http://www.pcworld.com/article/id,113428-page,1/article.html
+ * https://www.pcworld.com/article/id,113428-page,1/article.html
*
* The channel bonding seems to be driver specific though.
*
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 65a4c142640d..4c6e57f9976d 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1098,7 +1098,7 @@ err:
/**
* ath5k_drain_tx_buffs - Empty tx buffers
*
- * @ah The &struct ath5k_hw
+ * @ah: The &struct ath5k_hw
*
* Empty tx buffers from all queues in preparation
* of a reset or during shutdown.
@@ -1536,12 +1536,12 @@ ath5k_set_current_imask(struct ath5k_hw *ah)
}
static void
-ath5k_tasklet_rx(unsigned long data)
+ath5k_tasklet_rx(struct tasklet_struct *t)
{
struct ath5k_rx_status rs = {};
struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr;
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = from_tasklet(ah, t, rxtq);
struct ath_common *common = ath5k_hw_common(ah);
struct ath5k_buf *bf;
struct ath5k_desc *ds;
@@ -1784,10 +1784,10 @@ ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
}
static void
-ath5k_tasklet_tx(unsigned long data)
+ath5k_tasklet_tx(struct tasklet_struct *t)
{
int i;
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = from_tasklet(ah, t, txtq);
for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
@@ -2176,9 +2176,9 @@ ath5k_beacon_config(struct ath5k_hw *ah)
spin_unlock_bh(&ah->block);
}
-static void ath5k_tasklet_beacon(unsigned long data)
+static void ath5k_tasklet_beacon(struct tasklet_struct *t)
{
- struct ath5k_hw *ah = (struct ath5k_hw *) data;
+ struct ath5k_hw *ah = from_tasklet(ah, t, beacontq);
/*
* Software beacon alert--time to send a beacon.
@@ -2447,9 +2447,9 @@ ath5k_calibrate_work(struct work_struct *work)
static void
-ath5k_tasklet_ani(unsigned long data)
+ath5k_tasklet_ani(struct tasklet_struct *t)
{
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = from_tasklet(ah, t, ani_tasklet);
ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
ath5k_ani_calibration(ah);
@@ -3069,10 +3069,10 @@ ath5k_init(struct ieee80211_hw *hw)
hw->queues = 1;
}
- tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
- tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
- tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
- tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
+ tasklet_setup(&ah->rxtq, ath5k_tasklet_rx);
+ tasklet_setup(&ah->txtq, ath5k_tasklet_tx);
+ tasklet_setup(&ah->beacontq, ath5k_tasklet_beacon);
+ tasklet_setup(&ah->ani_tasklet, ath5k_tasklet_ani);
INIT_WORK(&ah->reset_work, ath5k_reset_work);
INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 2eaba1ccab20..4b41160e5d38 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -161,33 +161,14 @@ static int reg_show(struct seq_file *seq, void *p)
return 0;
}
-static const struct seq_operations register_seq_ops = {
+static const struct seq_operations registers_sops = {
.start = reg_start,
.next = reg_next,
.stop = reg_stop,
.show = reg_show
};
-static int open_file_registers(struct inode *inode, struct file *file)
-{
- struct seq_file *s;
- int res;
- res = seq_open(file, &register_seq_ops);
- if (res == 0) {
- s = file->private_data;
- s->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations fops_registers = {
- .open = open_file_registers,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
- .owner = THIS_MODULE,
-};
-
+DEFINE_SEQ_ATTRIBUTE(registers);
/* debugfs: beacons */
@@ -1005,7 +986,7 @@ ath5k_debug_init_device(struct ath5k_hw *ah)
return;
debugfs_create_file("debug", 0600, phydir, ah, &fops_debug);
- debugfs_create_file("registers", 0400, phydir, ah, &fops_registers);
+ debugfs_create_file("registers", 0400, phydir, ah, &registers_fops);
debugfs_create_file("beacon", 0600, phydir, ah, &fops_beacon);
debugfs_create_file("reset", 0200, phydir, ah, &fops_reset);
debugfs_create_file("antenna", 0600, phydir, ah, &fops_antenna);
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 307f1fea0a88..1fbc2c19848f 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1172,13 +1172,13 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
offset += ath5k_pdgains_size_2413(ee,
AR5K_EEPROM_MODE_11B) +
AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
- /* fall through */
+ fallthrough;
case AR5K_EEPROM_MODE_11B:
if (AR5K_EEPROM_HDR_11A(ee->ee_header))
offset += ath5k_pdgains_size_2413(ee,
AR5K_EEPROM_MODE_11A) +
AR5K_EEPROM_N_5GHZ_CHAN / 2;
- /* fall through */
+ fallthrough;
case AR5K_EEPROM_MODE_11A:
break;
default:
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 05140d8baa36..f2db7cf16566 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -101,6 +101,7 @@ static const unsigned int ack_rates_high[] =
/**
* ath5k_hw_get_frame_duration() - Get tx time of a frame
* @ah: The &struct ath5k_hw
+ * @band: One of enum nl80211_band
* @len: Frame's length in bytes
* @rate: The @struct ieee80211_rate
* @shortpre: Indicate short preample
@@ -670,7 +671,7 @@ ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
break;
case NL80211_IFTYPE_ADHOC:
AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM);
- /* fall through */
+ fallthrough;
default:
/* On non-STA modes timer1 is used as next DMA
* beacon alert (DBA) timer and timer2 as next
@@ -913,7 +914,7 @@ ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
AR5K_STA_ID1_PWR_SV : 0);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_MONITOR:
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
@@ -945,7 +946,6 @@ ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
* ath5k_hw_pcu_init() - Initialize PCU
* @ah: The &struct ath5k_hw
* @op_mode: One of enum nl80211_iftype
- * @mode: One of enum ath5k_driver_mode
*
* This function is used to initialize PCU by setting current
* operation mode and various other settings.
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index ae08572c4b58..00f9e347d414 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -3229,10 +3229,10 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
switch (pdcurves) {
case 3:
reg |= AR5K_REG_SM(pdg_to_idx[2], AR5K_PHY_TPC_RG1_PDGAIN_3);
- /* Fall through */
+ fallthrough;
case 2:
reg |= AR5K_REG_SM(pdg_to_idx[1], AR5K_PHY_TPC_RG1_PDGAIN_2);
- /* Fall through */
+ fallthrough;
case 1:
reg |= AR5K_REG_SM(pdg_to_idx[0], AR5K_PHY_TPC_RG1_PDGAIN_1);
break;
@@ -3353,7 +3353,7 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
table_min[pdg] = table_max[pdg] - 126;
}
- /* Fall through */
+ fallthrough;
case AR5K_PWRTABLE_PWR_TO_PCDAC:
case AR5K_PWRTABLE_PWR_TO_PDADC:
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 56d7925a0c2c..9fdb5283b39c 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -522,7 +522,7 @@ ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
switch (mode) {
case AR5K_PM_AUTO:
staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA;
- /* fallthrough */
+ fallthrough;
case AR5K_PM_NETWORK_SLEEP:
if (set_chip)
ath5k_hw_reg_write(ah,
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index aed34d9954c0..151935c4827f 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -42,7 +42,7 @@
* Also check out reg.h and U.S. Patent 6677779 B1 (about buffer
* registers and control registers):
*
- * http://www.google.com/patents?id=qNURAAAAEBAJ
+ * https://www.google.com/patents?id=qNURAAAAEBAJ
*/
diff --git a/drivers/net/wireless/ath/ath5k/rfkill.c b/drivers/net/wireless/ath/ath5k/rfkill.c
index 270a319f3aeb..855ed7fc720d 100644
--- a/drivers/net/wireless/ath/ath5k/rfkill.c
+++ b/drivers/net/wireless/ath/ath5k/rfkill.c
@@ -73,9 +73,9 @@ ath5k_is_rfkill_set(struct ath5k_hw *ah)
}
static void
-ath5k_tasklet_rfkill_toggle(unsigned long data)
+ath5k_tasklet_rfkill_toggle(struct tasklet_struct *t)
{
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = from_tasklet(ah, t, rf_kill.toggleq);
bool blocked;
blocked = ath5k_is_rfkill_set(ah);
@@ -90,8 +90,7 @@ ath5k_rfkill_hw_start(struct ath5k_hw *ah)
ah->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
ah->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
- tasklet_init(&ah->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
- (unsigned long)ah);
+ tasklet_setup(&ah->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle);
ath5k_rfkill_disable(ah);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 67f8f2aa7a53..9c83e9a4299b 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3897,19 +3897,19 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
switch (ar->hw.cap) {
case WMI_11AN_CAP:
ht = true;
- /* fall through */
+ fallthrough;
case WMI_11A_CAP:
band_5gig = true;
break;
case WMI_11GN_CAP:
ht = true;
- /* fall through */
+ fallthrough;
case WMI_11G_CAP:
band_2gig = true;
break;
case WMI_11AGN_CAP:
ht = true;
- /* fall through */
+ fallthrough;
case WMI_11AG_CAP:
band_2gig = true;
band_5gig = true;
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 811fad6d60c0..39bf19686175 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1752,7 +1752,7 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
ret = ath6kl_init_service_ep(ar);
if (ret) {
- ath6kl_err("Endpoint service initilisation failed: %d\n", ret);
+ ath6kl_err("Endpoint service initialization failed: %d\n", ret);
goto err_cleanup_scatter;
}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 5e7ea838a921..d3aa9e7a37c2 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -389,7 +389,7 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
if (!ik->valid || ik->key_type != WAPI_CRYPT)
break;
/* for WAPI, we need to set the delayed group key, continue: */
- /* fall through */
+ fallthrough;
case WPA_PSK_AUTH:
case WPA2_PSK_AUTH:
case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
@@ -430,6 +430,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
+ if (aid < 1 || aid > AP_MAX_NUM_STA)
+ return;
+
if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
struct ieee80211_mgmt *mgmt =
(struct ieee80211_mgmt *) assoc_info;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 6885d2ded53a..dbc47702a268 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1201,8 +1201,7 @@ static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
{
struct wmi_bit_rate_reply *reply;
- s32 rate;
- u32 sgi, index;
+ u32 index;
if (len < sizeof(struct wmi_bit_rate_reply))
return -EINVAL;
@@ -1211,15 +1210,10 @@ static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
ath6kl_dbg(ATH6KL_DBG_WMI, "rateindex %d\n", reply->rate_index);
- if (reply->rate_index == (s8) RATE_AUTO) {
- rate = RATE_AUTO;
- } else {
+ if (reply->rate_index != (s8) RATE_AUTO) {
index = reply->rate_index & 0x7f;
if (WARN_ON_ONCE(index > (RATE_MCS_7_40 + 1)))
return -EINVAL;
-
- sgi = (reply->rate_index & 0x80) ? 1 : 0;
- rate = wmi_rate_tbl[index][sgi];
}
ath6kl_wakeup_event(wmi->parent_dev);
@@ -2645,6 +2639,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
return -EINVAL;
}
+ if (tsid >= 16) {
+ ath6kl_err("invalid tsid: %d\n", tsid);
+ return -EINVAL;
+ }
+
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d5e9af2dddd8..a84bb9b6573f 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -22,10 +22,10 @@ config ATH9K
tristate "Atheros 802.11n wireless cards support"
depends on MAC80211 && HAS_DMA
select ATH9K_HW
- select MAC80211_LEDS
- select LEDS_CLASS
- select NEW_LEDS
select ATH9K_COMMON
+ imply NEW_LEDS
+ imply LEDS_CLASS
+ imply MAC80211_LEDS
help
This module adds support for wireless adapters based on
Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
@@ -177,10 +177,10 @@ config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
select ATH9K_HW
- select MAC80211_LEDS
- select LEDS_CLASS
- select NEW_LEDS
select ATH9K_COMMON
+ imply NEW_LEDS
+ imply LEDS_CLASS
+ imply MAC80211_LEDS
help
Support for Atheros HTC based cards.
Chipsets supported: AR9271
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 5214dd7a3936..41d192709e8e 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -74,7 +74,7 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = {
* Regardless of alignment in time, the antenna signals add constructively after
* FFT and improve your reception. For more information:
*
- * http://en.wikipedia.org/wiki/Maximal-ratio_combining
+ * https://en.wikipedia.org/wiki/Maximal-ratio_combining
*/
struct ani_cck_level_entry {
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
index 467ccfae2cee..7da8365ae69a 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -459,12 +459,6 @@ static const u32 ar5416Common[][2] = {
{0x0000a3e0, 0x000001ce},
};
-static const u32 ar5416Bank0[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x1e5795e5},
- {0x000098e0, 0x02008020},
-};
-
static const u32 ar5416BB_RfGain[][3] = {
/* Addr 5G 2G */
{0x00009a00, 0x00000000, 0x00000000},
@@ -533,60 +527,6 @@ static const u32 ar5416BB_RfGain[][3] = {
{0x00009afc, 0x000000f9, 0x000000f9},
};
-static const u32 ar5416Bank1[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x02108421},
- {0x000098ec, 0x00000008},
-};
-
-static const u32 ar5416Bank2[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x0e73ff17},
- {0x000098e0, 0x00000420},
-};
-
-static const u32 ar5416Bank3[][3] = {
- /* Addr 5G 2G */
- {0x000098f0, 0x01400018, 0x01c00018},
-};
-
-static const u32 ar5416Bank6[][3] = {
- /* Addr 5G 2G */
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00e00000, 0x00e00000},
- {0x0000989c, 0x005e0000, 0x005e0000},
- {0x0000989c, 0x00120000, 0x00120000},
- {0x0000989c, 0x00620000, 0x00620000},
- {0x0000989c, 0x00020000, 0x00020000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x40ff0000, 0x40ff0000},
- {0x0000989c, 0x005f0000, 0x005f0000},
- {0x0000989c, 0x00870000, 0x00870000},
- {0x0000989c, 0x00f90000, 0x00f90000},
- {0x0000989c, 0x007b0000, 0x007b0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00f50000, 0x00f50000},
- {0x0000989c, 0x00dc0000, 0x00dc0000},
- {0x0000989c, 0x00110000, 0x00110000},
- {0x0000989c, 0x006100a8, 0x006100a8},
- {0x0000989c, 0x004210a2, 0x004210a2},
- {0x0000989c, 0x0014008f, 0x0014008f},
- {0x0000989c, 0x00c40003, 0x00c40003},
- {0x0000989c, 0x003000f2, 0x003000f2},
- {0x0000989c, 0x00440016, 0x00440016},
- {0x0000989c, 0x00410040, 0x00410040},
- {0x0000989c, 0x0001805e, 0x0001805e},
- {0x0000989c, 0x0000c0ab, 0x0000c0ab},
- {0x0000989c, 0x000000f1, 0x000000f1},
- {0x0000989c, 0x00002081, 0x00002081},
- {0x0000989c, 0x000000d4, 0x000000d4},
- {0x000098d0, 0x0000000f, 0x0010000f},
-};
-
static const u32 ar5416Bank6TPC[][3] = {
/* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
@@ -624,13 +564,6 @@ static const u32 ar5416Bank6TPC[][3] = {
{0x000098d0, 0x0000000f, 0x0010000f},
};
-static const u32 ar5416Bank7[][2] = {
- /* Addr allmodes */
- {0x0000989c, 0x00000500},
- {0x0000989c, 0x00000800},
- {0x000098cc, 0x0000000e},
-};
-
static const u32 ar5416Addac[][2] = {
/* Addr allmodes */
{0x0000989c, 0x00000000},
@@ -671,4 +604,3 @@ static const u32 ar5416Addac[][2] = {
{0x0000989c, 0x00000000},
{0x000098c4, 0x00000000},
};
-
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index dae95402eb3a..2fa30834a88d 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -18,7 +18,6 @@
#include "hw-ops.h"
#include "../regd.h"
#include "ar9002_phy.h"
-#include "ar5008_initvals.h"
/* All code below is for AR5008, AR9001, AR9002 */
@@ -51,6 +50,36 @@ static const int m2ThreshLowExt_off = 127;
static const int m1ThreshExt_off = 127;
static const int m2ThreshExt_off = 127;
+static const u32 ar5416Bank0[][2] = {
+ /* Addr allmodes */
+ {0x000098b0, 0x1e5795e5},
+ {0x000098e0, 0x02008020},
+};
+
+static const u32 ar5416Bank1[][2] = {
+ /* Addr allmodes */
+ {0x000098b0, 0x02108421},
+ {0x000098ec, 0x00000008},
+};
+
+static const u32 ar5416Bank2[][2] = {
+ /* Addr allmodes */
+ {0x000098b0, 0x0e73ff17},
+ {0x000098e0, 0x00000420},
+};
+
+static const u32 ar5416Bank3[][3] = {
+ /* Addr 5G 2G */
+ {0x000098f0, 0x01400018, 0x01c00018},
+};
+
+static const u32 ar5416Bank7[][2] = {
+ /* Addr allmodes */
+ {0x0000989c, 0x00000500},
+ {0x0000989c, 0x00000800},
+ {0x000098cc, 0x0000000e},
+};
+
static const struct ar5416IniArray bank0 = STATIC_INI_ARRAY(ar5416Bank0);
static const struct ar5416IniArray bank1 = STATIC_INI_ARRAY(ar5416Bank1);
static const struct ar5416IniArray bank2 = STATIC_INI_ARRAY(ar5416Bank2);
@@ -579,14 +608,14 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
case 0x5:
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
- /* fall through */
+ fallthrough;
case 0x3:
if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
break;
}
- /* fall through */
+ fallthrough;
case 0x1:
case 0x2:
case 0x7:
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
index 59524e1d4678..aa5f086fa3b0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -459,43 +459,6 @@ static const u32 ar5416Common_9100[][2] = {
{0x0000a3e0, 0x000001ce},
};
-static const u32 ar5416Bank6_9100[][3] = {
- /* Addr 5G 2G */
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00e00000, 0x00e00000},
- {0x0000989c, 0x005e0000, 0x005e0000},
- {0x0000989c, 0x00120000, 0x00120000},
- {0x0000989c, 0x00620000, 0x00620000},
- {0x0000989c, 0x00020000, 0x00020000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x005f0000, 0x005f0000},
- {0x0000989c, 0x00870000, 0x00870000},
- {0x0000989c, 0x00f90000, 0x00f90000},
- {0x0000989c, 0x007b0000, 0x007b0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00f50000, 0x00f50000},
- {0x0000989c, 0x00dc0000, 0x00dc0000},
- {0x0000989c, 0x00110000, 0x00110000},
- {0x0000989c, 0x006100a8, 0x006100a8},
- {0x0000989c, 0x004210a2, 0x004210a2},
- {0x0000989c, 0x0014000f, 0x0014000f},
- {0x0000989c, 0x00c40002, 0x00c40002},
- {0x0000989c, 0x003000f2, 0x003000f2},
- {0x0000989c, 0x00440016, 0x00440016},
- {0x0000989c, 0x00410040, 0x00410040},
- {0x0000989c, 0x000180d6, 0x000180d6},
- {0x0000989c, 0x0000c0aa, 0x0000c0aa},
- {0x0000989c, 0x000000b1, 0x000000b1},
- {0x0000989c, 0x00002000, 0x00002000},
- {0x0000989c, 0x000000d4, 0x000000d4},
- {0x000098d0, 0x0000000f, 0x0010000f},
-};
-
static const u32 ar5416Bank6TPC_9100[][3] = {
/* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 4d18c66a6790..e01b5c3728b8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -897,20 +897,6 @@ static const u32 ar9280Modes_original_tx_gain_9280_2[][5] = {
{0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480},
};
-static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x9248fd00},
- {0x00004040, 0x24924924},
- {0x00004040, 0xa8000019},
- {0x00004040, 0x13160820},
- {0x00004040, 0xe5980560},
- {0x00004040, 0xc01dcffc},
- {0x00004040, 0x1aaabe41},
- {0x00004040, 0xbe105554},
- {0x00004040, 0x00043007},
- {0x00004044, 0x00000000},
-};
-
static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
/* Addr allmodes */
{0x00004040, 0x9248fd00},
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 4b3c9b108197..ce9a0a53771e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -267,7 +267,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
switch (i->aggr) {
case AGGR_BUF_FIRST:
ctl6 |= SM(i->aggr_len, AR_AggrLen);
- /* fall through */
+ fallthrough;
case AGGR_BUF_MIDDLE:
ctl1 |= AR_IsAggr | AR_MoreAggr;
ctl6 |= SM(i->ndelim, AR_PadDelim);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 6f32b8d2ec7f..fcfed8e59d29 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -119,7 +119,7 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
aModeRefSel = 2;
if (aModeRefSel)
break;
- /* fall through */
+ fallthrough;
case 1:
default:
aModeRefSel = 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index e1fe7a7c3ad8..76b538942a79 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -120,7 +120,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
switch (i->aggr) {
case AGGR_BUF_FIRST:
ctl17 |= SM(i->aggr_len, AR_AggrLen);
- /* fall through */
+ fallthrough;
case AGGR_BUF_MIDDLE:
ctl12 |= AR_IsAggr | AR_MoreAggr;
ctl17 |= SM(i->ndelim, AR_PadDelim);
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index f4c9befb3949..fab14e0a87b9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -1328,27 +1328,6 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
{0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
};
-static const u32 ar9580_1p0_pcie_phy_clkreq_enable_L1[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x0835365e},
- {0x00004040, 0x0008003b},
- {0x00004044, 0x00000000},
-};
-
-static const u32 ar9580_1p0_pcie_phy_clkreq_disable_L1[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x0831365e},
- {0x00004040, 0x0008003b},
- {0x00004044, 0x00000000},
-};
-
-static const u32 ar9580_1p0_pcie_phy_pll_on_clkreq[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x0831265e},
- {0x00004040, 0x0008003b},
- {0x00004044, 0x00000000},
-};
-
static const u32 ar9580_1p0_baseband_postamble_dfs_channel[][3] = {
/* Addr 5G 2G */
{0x00009814, 0x3400c00f, 0x3400c00f},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a412b352182c..e06b74a54a69 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -713,7 +713,7 @@ struct ath_beacon {
bool tx_last;
};
-void ath9k_beacon_tasklet(unsigned long data);
+void ath9k_beacon_tasklet(struct tasklet_struct *t);
void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif,
bool beacons);
void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
@@ -1117,7 +1117,7 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
common->bus_ops->read_cachesize(common, csz);
}
-void ath9k_tasklet(unsigned long data);
+void ath9k_tasklet(struct tasklet_struct *t);
int ath_cabq_update(struct ath_softc *);
u8 ath9k_parse_mpdudensity(u8 mpdudensity);
irqreturn_t ath_isr(int irq, void *dev);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index e36f947e19fc..71e2ada86793 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -365,7 +365,7 @@ bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
if (!vif || !vif->csa_active)
return false;
- if (!ieee80211_csa_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif))
return false;
ieee80211_csa_finish(vif);
@@ -385,9 +385,9 @@ void ath9k_csa_update(struct ath_softc *sc)
ath9k_csa_update_vif, sc);
}
-void ath9k_beacon_tasklet(unsigned long data)
+void ath9k_beacon_tasklet(struct tasklet_struct *t)
{
- struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_softc *sc = from_tasklet(sc, t, bcon_tasklet);
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index fd61ae4782b6..6cf087522157 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -706,7 +706,7 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
"Move chanctx state from FORCE_ACTIVE to IDLE\n");
sc->sched.state = ATH_CHANCTX_STATE_IDLE;
- /* fall through */
+ fallthrough;
case ATH_CHANCTX_EVENT_SWITCH:
if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) ||
sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
@@ -1080,7 +1080,7 @@ static void ath_offchannel_timer(struct timer_list *t)
mod_timer(&sc->offchannel.timer, jiffies + HZ / 10);
break;
}
- /* fall through */
+ fallthrough;
case ATH_OFFCHANNEL_SUSPEND:
if (!sc->offchannel.scan_req)
return;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 56b44fc7a8e6..9729a69d3e2e 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -402,7 +402,7 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return AR5416_PWR_TABLE_OFFSET_DB;
case EEP_ANTENNA_GAIN_2G:
band = 1;
- /* fall through */
+ fallthrough;
case EEP_ANTENNA_GAIN_5G:
return max_t(u8, max_t(u8,
pModal[band].antennaGainCh[0],
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 3f563e02d17d..860da13bfb6a 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -449,10 +449,19 @@ static void hif_usb_stop(void *hif_handle)
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
/* The pending URBs have to be canceled. */
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_pending, list) {
+ usb_get_urb(tx_buf->urb);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
}
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
}
@@ -762,27 +771,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
unsigned long flags;
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_buf, list) {
+ usb_get_urb(tx_buf->urb);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_urb(tx_buf->urb);
list_del(&tx_buf->list);
usb_free_urb(tx_buf->urb);
kfree(tx_buf->buf);
kfree(tx_buf);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
}
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_pending, list) {
+ usb_get_urb(tx_buf->urb);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_urb(tx_buf->urb);
list_del(&tx_buf->list);
usb_free_urb(tx_buf->urb);
kfree(tx_buf->buf);
kfree(tx_buf);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
}
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
}
@@ -1375,7 +1394,7 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
if (hif_dev->flags & HIF_USB_READY) {
ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
ath9k_hif_usb_dev_deinit(hif_dev);
- ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv);
+ ath9k_destroy_wmi(hif_dev->htc_handle->drv_priv);
ath9k_htc_hw_free(hif_dev->htc_handle);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 9f64e32381f9..0a1634238e67 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -583,14 +583,14 @@ int ath9k_htc_tx_get_slot(struct ath9k_htc_priv *priv);
void ath9k_htc_tx_clear_slot(struct ath9k_htc_priv *priv, int slot);
void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv);
void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event);
-void ath9k_tx_failed_tasklet(unsigned long data);
+void ath9k_tx_failed_tasklet(struct tasklet_struct *t);
void ath9k_htc_tx_cleanup_timer(struct timer_list *t);
bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv);
int ath9k_rx_init(struct ath9k_htc_priv *priv);
void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
void ath9k_host_rx_init(struct ath9k_htc_priv *priv);
-void ath9k_rx_tasklet(unsigned long data);
+void ath9k_rx_tasklet(struct tasklet_struct *t);
u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index f20c839aeda2..c745897aa3d6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -514,7 +514,7 @@ bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv)
if (!vif || !vif->csa_active)
return false;
- if (!ieee80211_csa_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif))
return false;
ieee80211_csa_finish(vif);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 1d6ad8d46607..db0c6fa9c9dc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -645,10 +645,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
spin_lock_init(&priv->tx.tx_lock);
mutex_init(&priv->mutex);
mutex_init(&priv->htc_pm_lock);
- tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
- (unsigned long)priv);
- tasklet_init(&priv->tx_failed_tasklet, ath9k_tx_failed_tasklet,
- (unsigned long)priv);
+ tasklet_setup(&priv->rx_tasklet, ath9k_rx_tasklet);
+ tasklet_setup(&priv->tx_failed_tasklet, ath9k_tx_failed_tasklet);
INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
INIT_WORK(&priv->ps_work, ath9k_ps_work);
INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
@@ -973,7 +971,7 @@ err_init:
ath9k_stop_wmi(priv);
hif_dev = (struct hif_device_usb *)htc_handle->hif_dev;
ath9k_hif_usb_dealloc_urbs(hif_dev);
- ath9k_destoy_wmi(priv);
+ ath9k_destroy_wmi(priv);
err_free:
ieee80211_free_hw(hw);
return ret;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index b353995bdd45..0bdc4dcb7b8f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -570,9 +570,9 @@ void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv)
spin_unlock_bh(&priv->tx.tx_lock);
}
-void ath9k_tx_failed_tasklet(unsigned long data)
+void ath9k_tx_failed_tasklet(struct tasklet_struct *t)
{
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath9k_htc_priv *priv = from_tasklet(priv, t, tx_failed_tasklet);
spin_lock(&priv->tx.tx_lock);
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
@@ -974,7 +974,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ath_htc_rx_status *rxstatus;
struct ath_rx_status rx_stats;
bool decrypt_error = false;
- __be16 rs_datalen;
+ u16 rs_datalen;
bool is_phyerr;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
@@ -1062,9 +1062,9 @@ rx_next:
/*
* FIXME: Handle FLUSH later on.
*/
-void ath9k_rx_tasklet(unsigned long data)
+void ath9k_rx_tasklet(struct tasklet_struct *t)
{
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath9k_htc_priv *priv = from_tasklet(priv, t, rx_tasklet);
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
struct ieee80211_rx_status rx_status;
struct sk_buff *skb;
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index d2e062eaf561..510e61e97dbc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -339,6 +339,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
if (skb) {
htc_hdr = (struct htc_frame_hdr *) skb->data;
+ if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint))
+ goto ret;
endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
skb_pull(skb, sizeof(struct htc_frame_hdr));
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8c97db73e34c..6609ce122e6e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1277,12 +1277,12 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
}
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
set |= AR_STA_ID1_STA_AP;
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_STATION:
REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
@@ -2293,7 +2293,7 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
case NL80211_IFTYPE_ADHOC:
REG_SET_BIT(ah, AR_TXCFG,
AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 4d72cd7daaa2..690fe3a1b516 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -728,9 +728,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
spin_lock_init(&sc->sc_pm_lock);
spin_lock_init(&sc->chan_lock);
mutex_init(&sc->mutex);
- tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
- tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
- (unsigned long)sc);
+ tasklet_setup(&sc->intr_tq, ath9k_tasklet);
+ tasklet_setup(&sc->bcon_tasklet, ath9k_beacon_tasklet);
timer_setup(&sc->sleep_timer, ath_ps_full_sleep, 0);
INIT_WORK(&sc->hw_reset_work, ath_reset_work);
@@ -1014,6 +1013,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
}
int ath9k_init_device(u16 devid, struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a47f6e978095..8dbf68b94228 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -19,6 +19,9 @@
#include "ath9k.h"
#include "btcoex.h"
+static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
+
u8 ath9k_parse_mpdudensity(u8 mpdudensity)
{
/*
@@ -368,9 +371,9 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
ath_dynack_node_deinit(sc->sc_ah, an);
}
-void ath9k_tasklet(unsigned long data)
+void ath9k_tasklet(struct tasklet_struct *t)
{
- struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_softc *sc = from_tasklet(sc, t, intr_tq);
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
enum ath_reset_type type;
@@ -1701,6 +1704,15 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
}
+ /* There may be MPDUs queued for the outgoing PTK key. Flush queues to
+ * make sure these are not send unencrypted or with a wrong (new) key
+ */
+ if (cmd == DISABLE_KEY && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ ieee80211_stop_queues(hw);
+ ath9k_flush(hw, vif, 0, true);
+ ieee80211_wake_queues(hw);
+ }
+
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd);
@@ -1934,7 +1946,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
flush = true;
- /* fall through */
+ fallthrough;
case IEEE80211_AMPDU_TX_STOP_CONT:
ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f3461b193c7a..cff9af3af38d 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -825,6 +825,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
struct pci_dev *pdev = to_pci_dev(sc->dev);
struct pci_dev *parent;
u16 aspm;
+ int ret;
if (!ah->is_pciexpress)
return;
@@ -866,8 +867,8 @@ static void ath_pci_aspm_init(struct ath_common *common)
if (AR_SREV_9462(ah))
pci_read_config_dword(pdev, 0x70c, &ah->config.aspm_l1_fix);
- pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
- if (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1)) {
+ ret = pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
+ if (!ret && (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1))) {
ah->aspm_enabled = true;
/* Initialize PCIe PM and SERDES registers. */
ath9k_hw_configpcipowersave(ah, false);
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index e7a3127395be..fe29ad4b9023 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -106,8 +106,7 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
mutex_init(&wmi->multi_rmw_mutex);
init_completion(&wmi->cmd_wait);
INIT_LIST_HEAD(&wmi->pending_tx_events);
- tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
- (unsigned long)wmi);
+ tasklet_setup(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet);
return wmi;
}
@@ -121,7 +120,7 @@ void ath9k_stop_wmi(struct ath9k_htc_priv *priv)
mutex_unlock(&wmi->op_mutex);
}
-void ath9k_destoy_wmi(struct ath9k_htc_priv *priv)
+void ath9k_destroy_wmi(struct ath9k_htc_priv *priv)
{
kfree(priv->wmi);
}
@@ -136,9 +135,9 @@ void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv)
spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
}
-void ath9k_wmi_event_tasklet(unsigned long data)
+void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
{
- struct wmi *wmi = (struct wmi *)data;
+ struct wmi *wmi = from_tasklet(wmi, t, wmi_event_tasklet);
struct ath9k_htc_priv *priv = wmi->drv_priv;
struct wmi_cmd_hdr *hdr;
void *wmi_event;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index d8b912206232..5c3b710b8f31 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -185,11 +185,11 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
u8 *cmd_buf, u32 cmd_len,
u8 *rsp_buf, u32 rsp_len,
u32 timeout);
-void ath9k_wmi_event_tasklet(unsigned long data);
+void ath9k_wmi_event_tasklet(struct tasklet_struct *t);
void ath9k_fatal_work(struct work_struct *work);
void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv);
void ath9k_stop_wmi(struct ath9k_htc_priv *priv);
-void ath9k_destoy_wmi(struct ath9k_htc_priv *priv);
+void ath9k_destroy_wmi(struct ath9k_htc_priv *priv);
#define WMI_CMD(_wmi_cmd) \
do { \
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 237d0cda1bcb..0d38100d6e4f 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -68,7 +68,10 @@
#define PAYLOAD_MAX (CARL9170_MAX_CMD_LEN / 4 - 1)
-static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 3, 2, 1, 0 };
+static inline u8 ar9170_qmap(u8 idx)
+{
+ return 3 - idx; /* { 3, 2, 1, 0 } */
+}
#define CARL9170_MAX_RX_BUFFER_SIZE 8192
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 816929fb5b14..dbef9d8fc893 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1374,7 +1374,7 @@ static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
int ret;
mutex_lock(&ar->mutex);
- memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
+ memcpy(&ar->edcf[ar9170_qmap(queue)], param, sizeof(*param));
ret = carl9170_set_qos(ar);
mutex_unlock(&ar->mutex);
return ret;
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 23ab8a80c18c..908c4c8b7f82 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -766,7 +766,7 @@ static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len)
goto drop;
}
- /* fall through */
+ fallthrough;
case AR9170_RX_STATUS_MPDU_MIDDLE:
/* These are just data + mac status */
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 2407931440ed..235cf77cd60c 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -663,7 +663,7 @@ static void __carl9170_tx_process_status(struct ar9170 *ar,
unsigned int r, t, q;
bool success = true;
- q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE];
+ q = ar9170_qmap(info & CARL9170_TX_STATUS_QUEUE);
skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
if (!skb) {
@@ -830,12 +830,12 @@ static bool carl9170_tx_rts_check(struct ar9170 *ar,
case CARL9170_ERP_AUTO:
if (ampdu)
break;
- /* fall through */
+ fallthrough;
case CARL9170_ERP_MAC80211:
if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
break;
- /* fall through */
+ fallthrough;
case CARL9170_ERP_RTS:
if (likely(!multi))
@@ -856,7 +856,7 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
case CARL9170_ERP_MAC80211:
if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
break;
- /* fall through */
+ fallthrough;
case CARL9170_ERP_CTS:
return true;
@@ -979,7 +979,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
((CARL9170_TX_SUPER_MISC_VIF_ID >>
CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
- hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)];
+ hw_queue = ar9170_qmap(carl9170_get_queue(ar, skb));
hdr = (void *)skb->data;
info = IEEE80211_SKB_CB(skb);
@@ -1279,7 +1279,7 @@ void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
super = (void *)skb->data;
SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
- ar9170_qmap[carl9170_get_queue(ar, skb)]);
+ ar9170_qmap(carl9170_get_queue(ar, skb)));
__carl9170_tx_process_status(ar, super->s.cookie, q);
}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index ead79335823a..e4eb666c6eea 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -377,9 +377,9 @@ void carl9170_usb_handle_tx_err(struct ar9170 *ar)
}
}
-static void carl9170_usb_tasklet(unsigned long data)
+static void carl9170_usb_tasklet(struct tasklet_struct *t)
{
- struct ar9170 *ar = (struct ar9170 *) data;
+ struct ar9170 *ar = from_tasklet(ar, t, usb_tasklet);
if (!IS_INITIALIZED(ar))
return;
@@ -1082,8 +1082,7 @@ static int carl9170_usb_probe(struct usb_interface *intf,
init_completion(&ar->cmd_wait);
init_completion(&ar->fw_boot_wait);
init_completion(&ar->fw_load_wait);
- tasklet_init(&ar->usb_tasklet, carl9170_usb_tasklet,
- (unsigned long)ar);
+ tasklet_setup(&ar->usb_tasklet, carl9170_usb_tasklet);
atomic_set(&ar->tx_cmd_urbs, 0);
atomic_set(&ar->tx_anch_urbs, 0);
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index a274eb0d1968..0813473793df 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -253,17 +253,15 @@ channel_detector_get(struct dfs_pattern_detector *dpd, u16 freq)
static void dpd_reset(struct dfs_pattern_detector *dpd)
{
struct channel_detector *cd;
- if (!list_empty(&dpd->channel_detectors))
- list_for_each_entry(cd, &dpd->channel_detectors, head)
- channel_detector_reset(dpd, cd);
+ list_for_each_entry(cd, &dpd->channel_detectors, head)
+ channel_detector_reset(dpd, cd);
}
static void dpd_exit(struct dfs_pattern_detector *dpd)
{
struct channel_detector *cd, *cd0;
- if (!list_empty(&dpd->channel_detectors))
- list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
- channel_detector_exit(dpd, cd);
+ list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
+ channel_detector_exit(dpd, cd);
kfree(dpd);
}
@@ -331,9 +329,8 @@ static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
return false;
/* delete all channel detectors for previous DFS domain */
- if (!list_empty(&dpd->channel_detectors))
- list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
- channel_detector_exit(dpd, cd);
+ list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
+ channel_detector_exit(dpd, cd);
dpd->radar_spec = rt->radar_types;
dpd->num_radar_types = rt->num_radar_types;
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index bab30f7a443c..63079231e48e 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -334,6 +334,7 @@ void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
spin_lock_irqsave(&wcn->dxe_lock, flags);
skb = wcn->tx_ack_skb;
wcn->tx_ack_skb = NULL;
+ del_timer(&wcn->tx_ack_timer);
spin_unlock_irqrestore(&wcn->dxe_lock, flags);
if (!skb) {
@@ -345,6 +346,8 @@ void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
if (status == 1)
info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
@@ -352,6 +355,32 @@ void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
ieee80211_wake_queues(wcn->hw);
}
+static void wcn36xx_dxe_tx_timer(struct timer_list *t)
+{
+ struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ /* TX Timeout */
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "TX timeout\n");
+
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ skb = wcn->tx_ack_skb;
+ wcn->tx_ack_skb = NULL;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+ info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ ieee80211_tx_status_irqsafe(wcn->hw, skb);
+ ieee80211_wake_queues(wcn->hw);
+}
+
static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
{
struct wcn36xx_dxe_ctl *ctl;
@@ -397,6 +426,7 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
{
struct wcn36xx *wcn = (struct wcn36xx *)dev;
int int_src, int_reason;
+ bool transmitted = false;
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
@@ -434,8 +464,10 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
int_reason);
if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
- WCN36XX_CH_STAT_INT_ED_MASK))
+ WCN36XX_CH_STAT_INT_ED_MASK)) {
reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
+ transmitted = true;
+ }
}
if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
@@ -473,9 +505,27 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
int_reason);
if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
- WCN36XX_CH_STAT_INT_ED_MASK))
+ WCN36XX_CH_STAT_INT_ED_MASK)) {
reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
+ transmitted = true;
+ }
+ }
+
+ spin_lock(&wcn->dxe_lock);
+ if (wcn->tx_ack_skb && transmitted) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(wcn->tx_ack_skb);
+
+ /* TX complete, no need to wait for 802.11 ack indication */
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS &&
+ info->flags & IEEE80211_TX_CTL_NO_ACK) {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ del_timer(&wcn->tx_ack_timer);
+ ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
+ wcn->tx_ack_skb = NULL;
+ ieee80211_wake_queues(wcn->hw);
+ }
}
+ spin_unlock(&wcn->dxe_lock);
return IRQ_HANDLED;
}
@@ -916,6 +966,8 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
if (ret < 0)
goto out_err_irq;
+ timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
+
return 0;
out_err_irq:
@@ -934,6 +986,7 @@ void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
{
free_irq(wcn->tx_irq, wcn);
free_irq(wcn->rx_irq, wcn);
+ del_timer(&wcn->tx_ack_timer);
if (wcn->tx_ack_skb) {
ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index aab5a58616fc..65ef893f2736 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -726,7 +726,137 @@ enum pe_stats_mask {
#define WCN36XX_HAL_CFG_AP_LINK_MONITOR_TIMEOUT 102
#define WCN36XX_HAL_CFG_BTC_DWELL_TIME_MULTIPLIER 103
#define WCN36XX_HAL_CFG_ENABLE_TDLS_OXYGEN_MODE 104
-#define WCN36XX_HAL_CFG_MAX_PARAMS 105
+#define WCN36XX_HAL_CFG_ENABLE_NAT_KEEP_ALIVE_FILTER 105
+#define WCN36XX_HAL_CFG_ENABLE_SAP_OBSS_PROT 106
+#define WCN36XX_HAL_CFG_PSPOLL_DATA_RECEP_TIMEOUT 107
+#define WCN36XX_HAL_CFG_TDLS_PUAPSD_BUFFER_STA_CAPABLE 108
+#define WCN36XX_HAL_CFG_TDLS_PUAPSD_MASK 109
+#define WCN36XX_HAL_CFG_TDLS_PUAPSD_INACTIVITY_TIME 110
+#define WCN36XX_HAL_CFG_TDLS_PUAPSD_RX_FRAME_THRESHOLD 111
+#define WCN36XX_HAL_CFG_ANTENNA_DIVERSITY 112
+#define WCN36XX_HAL_CFG_ATH_DISABLE 113
+#define WCN36XX_HAL_CFG_FLEXCONNECT_POWER_FACTOR 114
+#define WCN36XX_HAL_CFG_ENABLE_ADAPTIVE_RX_DRAIN 115
+#define WCN36XX_HAL_CFG_TDLS_OFF_CHANNEL_CAPABLE 116
+#define WCN36XX_HAL_CFG_MWS_COEX_V1_WAN_FREQ 117
+#define WCN36XX_HAL_CFG_MWS_COEX_V1_WLAN_FREQ 118
+#define WCN36XX_HAL_CFG_MWS_COEX_V1_CONFIG 119
+#define WCN36XX_HAL_CFG_MWS_COEX_V1_CONFIG2 120
+#define WCN36XX_HAL_CFG_MWS_COEX_V2_WAN_FREQ 121
+#define WCN36XX_HAL_CFG_MWS_COEX_V2_WLAN_FREQ 122
+#define WCN36XX_HAL_CFG_MWS_COEX_V2_CONFIG 123
+#define WCN36XX_HAL_CFG_MWS_COEX_V2_CONFIG2 124
+#define WCN36XX_HAL_CFG_MWS_COEX_V3_WAN_FREQ 125
+#define WCN36XX_HAL_CFG_MWS_COEX_V3_WLAN_FREQ 126
+#define WCN36XX_HAL_CFG_MWS_COEX_V3_CONFIG 127
+#define WCN36XX_HAL_CFG_MWS_COEX_V3_CONFIG2 128
+#define WCN36XX_HAL_CFG_MWS_COEX_V4_WAN_FREQ 129
+#define WCN36XX_HAL_CFG_MWS_COEX_V4_WLAN_FREQ 130
+#define WCN36XX_HAL_CFG_MWS_COEX_V4_CONFIG 131
+#define WCN36XX_HAL_CFG_MWS_COEX_V4_CONFIG2 132
+#define WCN36XX_HAL_CFG_MWS_COEX_V5_WAN_FREQ 133
+#define WCN36XX_HAL_CFG_MWS_COEX_V5_WLAN_FREQ 134
+#define WCN36XX_HAL_CFG_MWS_COEX_V5_CONFIG 135
+#define WCN36XX_HAL_CFG_MWS_COEX_V5_CONFIG2 136
+#define WCN36XX_HAL_CFG_MWS_COEX_V6_WAN_FREQ 137
+#define WCN36XX_HAL_CFG_MWS_COEX_V6_WLAN_FREQ 138
+#define WCN36XX_HAL_CFG_MWS_COEX_V6_CONFIG 139
+#define WCN36XX_HAL_CFG_MWS_COEX_V6_CONFIG2 140
+#define WCN36XX_HAL_CFG_MWS_COEX_V7_WAN_FREQ 141
+#define WCN36XX_HAL_CFG_MWS_COEX_V7_WLAN_FREQ 142
+#define WCN36XX_HAL_CFG_MWS_COEX_V7_CONFIG 143
+#define WCN36XX_HAL_CFG_MWS_COEX_V7_CONFIG2 144
+#define WCN36XX_HAL_CFG_MWS_COEX_V8_WAN_FREQ 145
+#define WCN36XX_HAL_CFG_MWS_COEX_V8_WLAN_FREQ 146
+#define WCN36XX_HAL_CFG_MWS_COEX_V8_CONFIG 147
+#define WCN36XX_HAL_CFG_MWS_COEX_V8_CONFIG2 148
+#define WCN36XX_HAL_CFG_MWS_COEX_V9_WAN_FREQ 149
+#define WCN36XX_HAL_CFG_MWS_COEX_V9_WLAN_FREQ 150
+#define WCN36XX_HAL_CFG_MWS_COEX_V9_CONFIG 151
+#define WCN36XX_HAL_CFG_MWS_COEX_V9_CONFIG2 152
+#define WCN36XX_HAL_CFG_MWS_COEX_V10_WAN_FREQ 153
+#define WCN36XX_HAL_CFG_MWS_COEX_V10_WLAN_FREQ 154
+#define WCN36XX_HAL_CFG_MWS_COEX_V10_CONFIG 155
+#define WCN36XX_HAL_CFG_MWS_COEX_V10_CONFIG2 156
+#define WCN36XX_HAL_CFG_MWS_COEX_MODEM_BACKOFF 157
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG1 158
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG2 159
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG3 160
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG4 161
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG5 162
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG6 163
+#define WCN36XX_HAL_CFG_SAR_POWER_BACKOFF 164
+#define WCN36XX_HAL_CFG_GO_LINK_MONITOR_TIMEOUT 165
+#define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_ACTIVE_WLAN_LEN 166
+#define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_ACTIVE_BT_LEN 167
+#define WCN36XX_HAL_CFG_BTC_SAP_STATIC_OPP_ACTIVE_WLAN_LEN 168
+#define WCN36XX_HAL_CFG_BTC_SAP_STATIC_OPP_ACTIVE_BT_LEN 169
+#define WCN36XX_HAL_CFG_RMC_FIXED_RATE 170
+#define WCN36XX_HAL_CFG_ASD_PROBE_INTERVAL 171
+#define WCN36XX_HAL_CFG_ASD_TRIGGER_THRESHOLD 172
+#define WCN36XX_HAL_CFG_ASD_RTT_RSSI_HYST_THRESHOLD 173
+#define WCN36XX_HAL_CFG_BTC_CTS2S_ON_STA_DURING_SCO 174
+#define WCN36XX_HAL_CFG_SHORT_PREAMBLE 175
+#define WCN36XX_HAL_CFG_SHORT_SLOT_TIME 176
+#define WCN36XX_HAL_CFG_DELAYED_BA 177
+#define WCN36XX_HAL_CFG_IMMEDIATE_BA 178
+#define WCN36XX_HAL_CFG_DOT11_MODE 179
+#define WCN36XX_HAL_CFG_HT_CAPS 180
+#define WCN36XX_HAL_CFG_AMPDU_PARAMS 181
+#define WCN36XX_HAL_CFG_TX_BF_INFO 182
+#define WCN36XX_HAL_CFG_ASC_CAP_INFO 183
+#define WCN36XX_HAL_CFG_EXT_HT_CAPS 184
+#define WCN36XX_HAL_CFG_QOS_ENABLED 185
+#define WCN36XX_HAL_CFG_WME_ENABLED 186
+#define WCN36XX_HAL_CFG_WSM_ENABLED 187
+#define WCN36XX_HAL_CFG_WMM_ENABLED 188
+#define WCN36XX_HAL_CFG_UAPSD_PER_AC_BITMASK 189
+#define WCN36XX_HAL_CFG_MCS_RATES 190
+#define WCN36XX_HAL_CFG_VHT_CAPS 191
+#define WCN36XX_HAL_CFG_VHT_RX_SUPP_MCS 192
+#define WCN36XX_HAL_CFG_VHT_TX_SUPP_MCS 193
+#define WCN36XX_HAL_CFG_RA_FILTER_ENABLE 194
+#define WCN36XX_HAL_CFG_RA_RATE_LIMIT_INTERVAL 195
+#define WCN36XX_HAL_CFG_BTC_FATAL_HID_NSNIFF_BLK 196
+#define WCN36XX_HAL_CFG_BTC_CRITICAL_HID_NSNIFF_BLK 197
+#define WCN36XX_HAL_CFG_BTC_DYN_A2DP_TX_QUEUE_THOLD 198
+#define WCN36XX_HAL_CFG_BTC_DYN_OPP_TX_QUEUE_THOLD 199
+#define WCN36XX_HAL_CFG_LINK_FAIL_TIMEOUT 200
+#define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_SP 201
+#define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_RX_CNT 202
+#define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_TX_CNT 203
+#define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW 204
+#define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW 205
+#define WCN36XX_HAL_CFG_MAX_PSPOLL_IN_WMM_UAPSD_PS_MODE 206
+#define WCN36XX_HAL_CFG_MAX_UAPSD_INACTIVITY_INTERVALS 207
+#define WCN36XX_HAL_CFG_ENABLE_DYNAMIC_WMMPS 208
+#define WCN36XX_HAL_CFG_BURST_MODE_BE_TXOP_VALUE 209
+#define WCN36XX_HAL_CFG_ENABLE_DYNAMIC_RA_START_RATE 210
+#define WCN36XX_HAL_CFG_BTC_FAST_WLAN_CONN_PREF 211
+#define WCN36XX_HAL_CFG_ENABLE_RTSCTS_HTVHT 212
+#define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN 213
+#define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_IDLE_BT_LEN 214
+#define WCN36XX_HAL_CFG_LINK_FAIL_TX_CNT 215
+#define WCN36XX_HAL_CFG_TOGGLE_ARP_BDRATES 216
+#define WCN36XX_HAL_CFG_OPTIMIZE_CA_EVENT 217
+#define WCN36XX_HAL_CFG_EXT_SCAN_CONC_MODE 218
+#define WCN36XX_HAL_CFG_BAR_WAKEUP_HOST_DISABLE 219
+#define WCN36XX_HAL_CFG_SAR_BOFFSET_CORRECTION_ENABLE 220
+#define WCN36XX_HAL_CFG_UNITS_OF_BCN_WAIT_TIME 221
+#define WCN36XX_HAL_CFG_CONS_BCNMISS_COUNT 222
+#define WCN36XX_HAL_CFG_BTC_DISABLE_WLAN_LINK_CRITICAL 223
+#define WCN36XX_HAL_CFG_DISABLE_SCAN_DURING_SCO 224
+#define WCN36XX_HAL_CFG_TRIGGER_NULLFRAME_BEFORE_HB 225
+#define WCN36XX_HAL_CFG_ENABLE_POWERSAVE_OFFLOAD 226
+#define WCN36XX_HAL_CFG_MAX_PARAMS 227
+
+/* Specify the starting bitrate, 11B and 11A/G rates can be specified in
+ * multiples of 0.5 So for 5.5 mbps => 11. for MCS 0 - 7 rates, Bit 7 should
+ * set to 1 and Bit 0-6 represent the MCS index. so for MCS2 => 130.
+ * Any invalid non-zero value or unsupported rate will set the start rate
+ * to 6 mbps.
+ */
+#define WCN36XX_HAL_CFG_ENABLE_DYNAMIC_RA_START_RATE 210
/* Message definitons - All the messages below need to be packed */
@@ -1405,6 +1535,76 @@ struct wcn36xx_hal_config_sta_req_msg {
struct wcn36xx_hal_config_sta_params sta_params;
} __packed;
+struct wcn36xx_hal_supported_rates_v1 {
+ /* For Self STA Entry: this represents Self Mode.
+ * For Peer Stations, this represents the mode of the peer.
+ * On Station:
+ *
+ * --this mode is updated when PE adds the Self Entry.
+ *
+ * -- OR when PE sends 'ADD_BSS' message and station context in BSS
+ * is used to indicate the mode of the AP.
+ *
+ * ON AP:
+ *
+ * -- this mode is updated when PE sends 'ADD_BSS' and Sta entry
+ * for that BSS is used to indicate the self mode of the AP.
+ *
+ * -- OR when a station is associated, PE sends 'ADD_STA' message
+ * with this mode updated.
+ */
+
+ enum sta_rate_mode op_rate_mode;
+
+ /* 11b, 11a and aniLegacyRates are IE rates which gives rate in
+ * unit of 500Kbps
+ */
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES];
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES];
+ u16 legacy_rates[WCN36XX_HAL_NUM_POLARIS_RATES];
+ u16 reserved;
+
+ /* Taurus only supports 26 Titan Rates(no ESF/concat Rates will be
+ * supported) First 26 bits are reserved for those Titan rates and
+ * the last 4 bits(bit28-31) for Taurus, 2(bit26-27) bits are
+ * reserved
+ * Titan and Taurus Rates
+ */
+ u32 enhanced_rate_bitmap;
+
+ /* 0-76 bits used, remaining reserved
+ * bits 0-15 and 32 should be set.
+ */
+ u8 supported_mcs_set[WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET];
+
+ /* RX Highest Supported Data Rate defines the highest data
+ * rate that the STA is able to receive, in unites of 1Mbps.
+ * This value is derived from "Supported MCS Set field" inside
+ * the HT capability element.
+ */
+ u16 rx_highest_data_rate;
+
+ /* Indicates the Maximum MCS that can be received for each spatial
+ * stream.
+ */
+ u16 vht_rx_mcs_map;
+
+ /* Indicates the highest VHT data rate that the STA is able to
+ * receive.
+ */
+ u16 vht_rx_highest_data_rate;
+
+ /* Indicates the Maximum MCS that can be transmitted for each spatial
+ * stream.
+ */
+ u16 vht_tx_mcs_map;
+
+ /* Indicates the highest VHT data rate that the STA is able to
+ * transmit.
+ */
+ u16 vht_tx_highest_data_rate;
+} __packed;
+
struct wcn36xx_hal_config_sta_params_v1 {
/* BSSID of STA */
u8 bssid[ETH_ALEN];
@@ -1507,12 +1707,22 @@ struct wcn36xx_hal_config_sta_params_v1 {
u8 p2p;
/* Reserved to align next field on a dword boundary */
- u8 reserved;
+ u8 ht_ldpc_enabled:1;
+ u8 vht_ldpc_enabled:1;
+ u8 vht_tx_bf_enabled:1;
+ u8 vht_tx_mu_beamformee_capable:1;
+ u8 reserved:4;
/* These rates are the intersection of peer and self capabilities. */
- struct wcn36xx_hal_supported_rates supported_rates;
+ struct wcn36xx_hal_supported_rates_v1 supported_rates;
+
+ u8 vht_capable;
+ u8 vht_tx_channel_width_set;
+
} __packed;
+#define WCN36XX_DIFF_STA_PARAMS_V1_NOVHT 10
+
struct wcn36xx_hal_config_sta_req_msg_v1 {
struct wcn36xx_hal_msg_header header;
struct wcn36xx_hal_config_sta_params_v1 sta_params;
@@ -1933,8 +2143,14 @@ struct wcn36xx_hal_config_bss_params_v1 {
* "STA context"
*/
struct wcn36xx_hal_config_sta_params_v1 sta;
+
+ u8 vht_capable;
+ u8 vht_tx_channel_width_set;
+
} __packed;
+#define WCN36XX_DIFF_BSS_PARAMS_V1_NOVHT (WCN36XX_DIFF_STA_PARAMS_V1_NOVHT + 2)
+
struct wcn36xx_hal_config_bss_req_msg_v1 {
struct wcn36xx_hal_msg_header header;
struct wcn36xx_hal_config_bss_params_v1 bss_params;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 702b689c06df..706728fba72d 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -39,10 +39,10 @@ MODULE_PARM_DESC(debug_mask, "Debugging mask");
.max_power = 25, \
}
-#define CHAN5G(_freq, _idx) { \
+#define CHAN5G(_freq, _idx, _phy_val) { \
.band = NL80211_BAND_5GHZ, \
.center_freq = (_freq), \
- .hw_value = (_idx), \
+ .hw_value = (_phy_val) << HW_VALUE_PHY_SHIFT | HW_VALUE_CHANNEL(_idx), \
.max_power = 25, \
}
@@ -67,29 +67,29 @@ static struct ieee80211_channel wcn_2ghz_channels[] = {
};
static struct ieee80211_channel wcn_5ghz_channels[] = {
- CHAN5G(5180, 36),
- CHAN5G(5200, 40),
- CHAN5G(5220, 44),
- CHAN5G(5240, 48),
- CHAN5G(5260, 52),
- CHAN5G(5280, 56),
- CHAN5G(5300, 60),
- CHAN5G(5320, 64),
- CHAN5G(5500, 100),
- CHAN5G(5520, 104),
- CHAN5G(5540, 108),
- CHAN5G(5560, 112),
- CHAN5G(5580, 116),
- CHAN5G(5600, 120),
- CHAN5G(5620, 124),
- CHAN5G(5640, 128),
- CHAN5G(5660, 132),
- CHAN5G(5700, 140),
- CHAN5G(5745, 149),
- CHAN5G(5765, 153),
- CHAN5G(5785, 157),
- CHAN5G(5805, 161),
- CHAN5G(5825, 165)
+ CHAN5G(5180, 36, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5200, 40, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
+ CHAN5G(5220, 44, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5240, 48, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
+ CHAN5G(5260, 52, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5280, 56, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
+ CHAN5G(5300, 60, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5320, 64, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
+ CHAN5G(5500, 100, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5520, 104, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
+ CHAN5G(5540, 108, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5560, 112, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
+ CHAN5G(5580, 116, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5600, 120, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
+ CHAN5G(5620, 124, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5640, 128, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
+ CHAN5G(5660, 132, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5700, 140, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5745, 149, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5765, 153, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
+ CHAN5G(5785, 157, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5805, 161, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
+ CHAN5G(5825, 165, 0)
};
#define RATE(_bitrate, _hw_rate, _flags) { \
@@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
.mcs = {
.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- .rx_highest = cpu_to_le16(72),
+ .rx_highest = cpu_to_le16(150),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
}
}
@@ -354,8 +354,6 @@ static void wcn36xx_stop(struct ieee80211_hw *hw)
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
- cancel_work_sync(&wcn->scan_work);
-
mutex_lock(&wcn->scan_lock);
if (wcn->scan_req) {
struct cfg80211_scan_info scan_info = {
@@ -378,12 +376,37 @@ static void wcn36xx_stop(struct ieee80211_hw *hw)
kfree(wcn->hal_buf);
}
-static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+static void wcn36xx_change_ps(struct wcn36xx *wcn, bool enable)
{
- struct wcn36xx *wcn = hw->priv;
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *tmp;
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = wcn36xx_priv_to_vif(tmp);
+ if (enable && !wcn->sw_scan) {
+ if (vif->bss_conf.ps) /* ps allowed ? */
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ } else {
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ }
+}
+
+static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch)
+{
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = wcn36xx_priv_to_vif(tmp);
+ wcn36xx_smd_switch_channel(wcn, vif, ch);
+ }
+}
+
+static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
mutex_lock(&wcn->conf_mutex);
@@ -392,24 +415,29 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
int ch = WCN36XX_HW_CHANNEL(wcn);
wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
ch);
- list_for_each_entry(tmp, &wcn->vif_list, list) {
- vif = wcn36xx_priv_to_vif(tmp);
- wcn36xx_smd_switch_channel(wcn, vif, ch);
- }
- }
- if (changed & IEEE80211_CONF_CHANGE_PS) {
- list_for_each_entry(tmp, &wcn->vif_list, list) {
- vif = wcn36xx_priv_to_vif(tmp);
- if (hw->conf.flags & IEEE80211_CONF_PS) {
- if (vif->bss_conf.ps) /* ps allowed ? */
- wcn36xx_pmc_enter_bmps_state(wcn, vif);
- } else {
- wcn36xx_pmc_exit_bmps_state(wcn, vif);
- }
+ if (wcn->sw_scan_opchannel == ch) {
+ /* If channel is the initial operating channel, we may
+ * want to receive/transmit regular data packets, then
+ * simply stop the scan session and exit PS mode.
+ */
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
+ wcn->sw_scan_vif);
+ } else if (wcn->sw_scan) {
+ /* A scan is ongoing, do not change the operating
+ * channel, but start a scan session on the channel.
+ */
+ wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN,
+ wcn->sw_scan_vif);
+ wcn36xx_smd_start_scan(wcn, ch);
+ } else {
+ wcn36xx_change_opchannel(wcn, ch);
}
}
+ if (changed & IEEE80211_CONF_CHANGE_PS)
+ wcn36xx_change_ps(wcn, hw->conf.flags & IEEE80211_CONF_PS);
+
mutex_unlock(&wcn->conf_mutex);
return 0;
@@ -582,6 +610,15 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
}
}
+ /* FIXME: Only enable bmps support when encryption is enabled.
+ * For any reasons, when connected to open/no-security BSS,
+ * the wcn36xx controller in bmps mode does not forward
+ * 'wake-up' beacons despite AP sends DTIM with station AID.
+ * It could be due to a firmware issue or to the way driver
+ * configure the station.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ vif_priv->allow_bmps = true;
break;
case DISABLE_KEY:
if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
@@ -614,55 +651,26 @@ out:
return ret;
}
-static void wcn36xx_hw_scan_worker(struct work_struct *work)
+static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
{
- struct wcn36xx *wcn = container_of(work, struct wcn36xx, scan_work);
- struct cfg80211_scan_request *req = wcn->scan_req;
- u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
- struct cfg80211_scan_info scan_info = {};
- bool aborted = false;
+ struct wcn36xx *wcn = hw->priv;
int i;
- wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels);
-
- for (i = 0; i < req->n_channels; i++)
- channels[i] = req->channels[i]->hw_value;
-
- wcn36xx_smd_update_scan_params(wcn, channels, req->n_channels);
-
- wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
- for (i = 0; i < req->n_channels; i++) {
- mutex_lock(&wcn->scan_lock);
- aborted = wcn->scan_aborted;
- mutex_unlock(&wcn->scan_lock);
-
- if (aborted)
- break;
-
- wcn->scan_freq = req->channels[i]->center_freq;
- wcn->scan_band = req->channels[i]->band;
-
- wcn36xx_smd_start_scan(wcn, req->channels[i]->hw_value);
- msleep(30);
- wcn36xx_smd_end_scan(wcn, req->channels[i]->hw_value);
-
- wcn->scan_freq = 0;
+ if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ /* fallback to mac80211 software scan */
+ return 1;
}
- wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
-
- scan_info.aborted = aborted;
- ieee80211_scan_completed(wcn->hw, &scan_info);
- mutex_lock(&wcn->scan_lock);
- wcn->scan_req = NULL;
- mutex_unlock(&wcn->scan_lock);
-}
+ /* For unknown reason, the hardware offloaded scan only works with
+ * 2.4Ghz channels, fallback to software scan in other cases.
+ */
+ for (i = 0; i < hw_req->req.n_channels; i++) {
+ if (hw_req->req.channels[i]->band != NL80211_BAND_2GHZ)
+ return 1;
+ }
-static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_scan_request *hw_req)
-{
- struct wcn36xx *wcn = hw->priv;
mutex_lock(&wcn->scan_lock);
if (wcn->scan_req) {
mutex_unlock(&wcn->scan_lock);
@@ -674,12 +682,6 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
mutex_unlock(&wcn->scan_lock);
- if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
- /* legacy manual/sw scan */
- schedule_work(&wcn->scan_work);
- return 0;
- }
-
return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
}
@@ -696,16 +698,35 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
/* ieee80211_scan_completed will be called on FW scan
* indication */
wcn36xx_smd_stop_hw_scan(wcn);
- } else {
- struct cfg80211_scan_info scan_info = {
- .aborted = true,
- };
-
- cancel_work_sync(&wcn->scan_work);
- ieee80211_scan_completed(wcn->hw, &scan_info);
}
}
+static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+
+ wcn->sw_scan = true;
+ wcn->sw_scan_vif = vif;
+ if (vif_priv->sta_assoc)
+ wcn->sw_scan_opchannel = WCN36XX_HW_CHANNEL(wcn);
+ else
+ wcn->sw_scan_opchannel = 0;
+}
+
+static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ /* ensure that any scan session is finished */
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif);
+ wcn->sw_scan = false;
+ wcn->sw_scan_opchannel = 0;
+}
+
static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
enum nl80211_band band)
{
@@ -745,7 +766,16 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
sta->ht_cap.mcs.rx_mask,
sizeof(sta->ht_cap.mcs.rx_mask));
}
+
+ if (sta->vht_cap.vht_supported) {
+ sta_priv->supported_rates.op_rate_mode = STA_11ac;
+ sta_priv->supported_rates.vht_rx_mcs_map =
+ sta->vht_cap.vht_mcs.rx_mcs_map;
+ sta_priv->supported_rates.vht_tx_mcs_map =
+ sta->vht_cap.vht_mcs.tx_mcs_map;
+ }
}
+
void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
{
u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = {
@@ -772,6 +802,14 @@ void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES);
rates->supported_mcs_set[0] = 0xFF;
}
+
+void wcn36xx_set_default_rates_v1(struct wcn36xx_hal_supported_rates_v1 *rates)
+{
+ rates->op_rate_mode = STA_11ac;
+ rates->vht_rx_mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ rates->vht_tx_mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9;
+}
+
static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -879,6 +917,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
vif->addr,
bss_conf->aid);
vif_priv->sta_assoc = false;
+ vif_priv->allow_bmps = false;
wcn36xx_smd_set_link_st(wcn,
bss_conf->bssid,
vif->addr,
@@ -1083,6 +1122,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
u16 tid = params->tid;
u16 *ssn = &params->ssn;
int ret = 0;
+ u8 session;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
@@ -1092,10 +1132,11 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
sta_priv->tid = tid;
- wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
- get_sta_index(vif, sta_priv));
- wcn36xx_smd_add_ba(wcn);
- wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
+ session = wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
+ get_sta_index(vif, sta_priv));
+ wcn36xx_smd_add_ba(wcn, session);
+ wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv), tid,
+ session);
break;
case IEEE80211_AMPDU_RX_STOP:
wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
@@ -1149,6 +1190,8 @@ static const struct ieee80211_ops wcn36xx_ops = {
.set_key = wcn36xx_set_key,
.hw_scan = wcn36xx_hw_scan,
.cancel_hw_scan = wcn36xx_cancel_hw_scan,
+ .sw_scan_start = wcn36xx_sw_scan_start,
+ .sw_scan_complete = wcn36xx_sw_scan_complete,
.bss_info_changed = wcn36xx_bss_info_changed,
.set_rts_threshold = wcn36xx_set_rts_threshold,
.sta_add = wcn36xx_sta_add,
@@ -1158,6 +1201,35 @@ static const struct ieee80211_ops wcn36xx_ops = {
CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd)
};
+static void
+wcn36xx_set_ieee80211_vht_caps(struct ieee80211_sta_vht_cap *vht_cap)
+{
+ vht_cap->vht_supported = true;
+
+ vht_cap->cap = (IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
+ 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
+
+ vht_cap->vht_mcs.rx_mcs_map =
+ cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 2 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
+
+ vht_cap->vht_mcs.rx_highest = cpu_to_le16(433);
+ vht_cap->vht_mcs.tx_highest = vht_cap->vht_mcs.rx_highest;
+
+ vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
+}
+
static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
{
static const u32 cipher_suites[] = {
@@ -1173,6 +1245,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS);
+ ieee80211_hw_set(wcn->hw, REPORTS_TX_ACK_STATUS);
wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -1183,6 +1256,9 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
if (wcn->rf_id != RF_IRIS_WCN3620)
wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
+ if (wcn->rf_id == RF_IRIS_WCN3680)
+ wcn36xx_set_ieee80211_vht_caps(&wcn_band_5ghz.vht_cap);
+
wcn->hw->wiphy->max_scan_ssids = WCN36XX_MAX_SCAN_SSIDS;
wcn->hw->wiphy->max_scan_ie_len = WCN36XX_MAX_SCAN_IE_LEN;
@@ -1280,6 +1356,8 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;
+ if (of_device_is_compatible(iris_node, "qcom,wcn3680"))
+ wcn->rf_id = RF_IRIS_WCN3680;
of_node_put(iris_node);
}
@@ -1326,8 +1404,6 @@ static int wcn36xx_probe(struct platform_device *pdev)
goto out_wq;
}
- INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker);
-
wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw);
if (IS_ERR(wcn->smd_channel)) {
wcn36xx_err("failed to open WLAN_CTRL channel\n");
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 1976b80c235f..2d0780fefd47 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -23,11 +23,15 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
{
int ret = 0;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- /* TODO: Make sure the TX chain clean */
+
+ if (!vif_priv->allow_bmps)
+ return -ENOTSUPP;
+
ret = wcn36xx_smd_enter_bmps(wcn, vif);
if (!ret) {
wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
vif_priv->pw_state = WCN36XX_BMPS;
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
} else {
/*
* One of the reasons why HW will not enter BMPS is because
@@ -52,6 +56,7 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
}
wcn36xx_smd_exit_bmps(wcn, vif);
vif_priv->pw_state = WCN36XX_FULL_POWER;
+ vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
return 0;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 77269ac7f352..766400f7b61c 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -45,8 +45,8 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
WCN36XX_CFG_VAL(MAX_MEDIUM_TIME, 6000),
WCN36XX_CFG_VAL(MAX_MPDUS_IN_AMPDU, 64),
WCN36XX_CFG_VAL(RTS_THRESHOLD, 2347),
- WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 6),
- WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 6),
+ WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 15),
+ WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 15),
WCN36XX_CFG_VAL(FRAGMENTATION_THRESHOLD, 8000),
WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ZERO, 5),
WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ONE, 10),
@@ -77,6 +77,103 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000),
WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
+ WCN36XX_CFG_VAL(ENABLE_DYNAMIC_RA_START_RATE, 133), /* MCS 5 */
+};
+
+static struct wcn36xx_cfg_val wcn3680_cfg_vals[] = {
+ WCN36XX_CFG_VAL(CURRENT_TX_ANTENNA, 1),
+ WCN36XX_CFG_VAL(CURRENT_RX_ANTENNA, 1),
+ WCN36XX_CFG_VAL(LOW_GAIN_OVERRIDE, 0),
+ WCN36XX_CFG_VAL(POWER_STATE_PER_CHAIN, 785),
+ WCN36XX_CFG_VAL(CAL_PERIOD, 5),
+ WCN36XX_CFG_VAL(CAL_CONTROL, 1),
+ WCN36XX_CFG_VAL(PROXIMITY, 0),
+ WCN36XX_CFG_VAL(NETWORK_DENSITY, 3),
+ WCN36XX_CFG_VAL(MAX_MEDIUM_TIME, 4096),
+ WCN36XX_CFG_VAL(MAX_MPDUS_IN_AMPDU, 64),
+ WCN36XX_CFG_VAL(RTS_THRESHOLD, 2347),
+ WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 15),
+ WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 15),
+ WCN36XX_CFG_VAL(FRAGMENTATION_THRESHOLD, 8000),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ZERO, 5),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ONE, 10),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_TWO, 15),
+ WCN36XX_CFG_VAL(FIXED_RATE, 0),
+ WCN36XX_CFG_VAL(RETRYRATE_POLICY, 4),
+ WCN36XX_CFG_VAL(RETRYRATE_SECONDARY, 0),
+ WCN36XX_CFG_VAL(RETRYRATE_TERTIARY, 0),
+ WCN36XX_CFG_VAL(FORCE_POLICY_PROTECTION, 5),
+ WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_24GHZ, 1),
+ WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_5GHZ, 5),
+ WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_24GHZ, 1),
+ WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_5GHZ, 5),
+ WCN36XX_CFG_VAL(MAX_BA_SESSIONS, 40),
+ WCN36XX_CFG_VAL(PS_DATA_INACTIVITY_TIMEOUT, 200),
+ WCN36XX_CFG_VAL(PS_ENABLE_BCN_FILTER, 1),
+ WCN36XX_CFG_VAL(PS_ENABLE_RSSI_MONITOR, 1),
+ WCN36XX_CFG_VAL(NUM_BEACON_PER_RSSI_AVERAGE, 20),
+ WCN36XX_CFG_VAL(STATS_PERIOD, 10),
+ WCN36XX_CFG_VAL(CFP_MAX_DURATION, 30000),
+ WCN36XX_CFG_VAL(FRAME_TRANS_ENABLED, 0),
+ WCN36XX_CFG_VAL(BA_THRESHOLD_HIGH, 128),
+ WCN36XX_CFG_VAL(MAX_BA_BUFFERS, 2560),
+ WCN36XX_CFG_VAL(DYNAMIC_PS_POLL_VALUE, 0),
+ WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
+ WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
+ WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
+ WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_BT, 120000),
+ WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000),
+ WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
+ WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
+ WCN36XX_CFG_VAL(TDLS_PUAPSD_MASK, 0),
+ WCN36XX_CFG_VAL(TDLS_PUAPSD_BUFFER_STA_CAPABLE, 1),
+ WCN36XX_CFG_VAL(TDLS_PUAPSD_INACTIVITY_TIME, 0),
+ WCN36XX_CFG_VAL(TDLS_PUAPSD_RX_FRAME_THRESHOLD, 10),
+ WCN36XX_CFG_VAL(TDLS_OFF_CHANNEL_CAPABLE, 1),
+ WCN36XX_CFG_VAL(ENABLE_ADAPTIVE_RX_DRAIN, 1),
+ WCN36XX_CFG_VAL(FLEXCONNECT_POWER_FACTOR, 0),
+ WCN36XX_CFG_VAL(ANTENNA_DIVERSITY, 3),
+ WCN36XX_CFG_VAL(ATH_DISABLE, 0),
+ WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_ACTIVE_WLAN_LEN, 60000),
+ WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_ACTIVE_BT_LEN, 90000),
+ WCN36XX_CFG_VAL(BTC_SAP_STATIC_OPP_ACTIVE_WLAN_LEN, 30000),
+ WCN36XX_CFG_VAL(BTC_SAP_STATIC_OPP_ACTIVE_BT_LEN, 30000),
+ WCN36XX_CFG_VAL(ASD_PROBE_INTERVAL, 50),
+ WCN36XX_CFG_VAL(ASD_TRIGGER_THRESHOLD, -60),
+ WCN36XX_CFG_VAL(ASD_RTT_RSSI_HYST_THRESHOLD, 3),
+ WCN36XX_CFG_VAL(BTC_CTS2S_ON_STA_DURING_SCO, 0),
+ WCN36XX_CFG_VAL(RA_FILTER_ENABLE, 0),
+ WCN36XX_CFG_VAL(RA_RATE_LIMIT_INTERVAL, 60),
+ WCN36XX_CFG_VAL(BTC_FATAL_HID_NSNIFF_BLK, 2),
+ WCN36XX_CFG_VAL(BTC_CRITICAL_HID_NSNIFF_BLK, 1),
+ WCN36XX_CFG_VAL(BTC_DYN_A2DP_TX_QUEUE_THOLD, 0),
+ WCN36XX_CFG_VAL(BTC_DYN_OPP_TX_QUEUE_THOLD, 1),
+ WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_SP, 10),
+ WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_RX_CNT, 50),
+ WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_TX_CNT, 50),
+ WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW, 500),
+ WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW, 500),
+ WCN36XX_CFG_VAL(MAX_PSPOLL_IN_WMM_UAPSD_PS_MODE, 0),
+ WCN36XX_CFG_VAL(MAX_UAPSD_INACTIVITY_INTERVALS, 10),
+ WCN36XX_CFG_VAL(ENABLE_DYNAMIC_WMMPS, 1),
+ WCN36XX_CFG_VAL(BURST_MODE_BE_TXOP_VALUE, 0),
+ WCN36XX_CFG_VAL(ENABLE_DYNAMIC_RA_START_RATE, 136),
+ WCN36XX_CFG_VAL(BTC_FAST_WLAN_CONN_PREF, 1),
+ WCN36XX_CFG_VAL(ENABLE_RTSCTS_HTVHT, 0),
+ WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN, 30000),
+ WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_BT_LEN, 120000),
+ WCN36XX_CFG_VAL(LINK_FAIL_TX_CNT, 200),
+ WCN36XX_CFG_VAL(TOGGLE_ARP_BDRATES, 0),
+ WCN36XX_CFG_VAL(OPTIMIZE_CA_EVENT, 0),
+ WCN36XX_CFG_VAL(EXT_SCAN_CONC_MODE, 0),
+ WCN36XX_CFG_VAL(BAR_WAKEUP_HOST_DISABLE, 0),
+ WCN36XX_CFG_VAL(SAR_BOFFSET_CORRECTION_ENABLE, 0),
+ WCN36XX_CFG_VAL(BTC_DISABLE_WLAN_LINK_CRITICAL, 5),
+ WCN36XX_CFG_VAL(DISABLE_SCAN_DURING_SCO, 2),
+ WCN36XX_CFG_VAL(CONS_BCNMISS_COUNT, 0),
+ WCN36XX_CFG_VAL(UNITS_OF_BCN_WAIT_TIME, 0),
+ WCN36XX_CFG_VAL(TRIGGER_NULLFRAME_BEFORE_HB, 0),
+ WCN36XX_CFG_VAL(ENABLE_POWERSAVE_OFFLOAD, 0),
};
static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
@@ -121,6 +218,7 @@ static inline u8 is_cap_supported(unsigned long caps, unsigned long flag)
{
return caps & flag ? 1 : 0;
}
+
static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_bss_params *bss_params)
@@ -145,6 +243,15 @@ static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
}
}
+static void
+wcn36xx_smd_set_bss_vht_params(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params_v1 *bss)
+{
+ if (sta && sta->vht_cap.vht_supported)
+ bss->vht_capable = 1;
+}
+
static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params *sta_params)
{
@@ -173,6 +280,37 @@ static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
}
}
+static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params_v1 *sta_params)
+{
+ if (sta->vht_cap.vht_supported) {
+ unsigned long caps = sta->vht_cap.cap;
+
+ sta_params->vht_capable = sta->vht_cap.vht_supported;
+ sta_params->vht_ldpc_enabled =
+ is_cap_supported(caps, IEEE80211_VHT_CAP_RXLDPC);
+ if (get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
+ sta_params->vht_tx_mu_beamformee_capable =
+ is_cap_supported(caps, IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+ if (sta_params->vht_tx_mu_beamformee_capable)
+ sta_params->vht_tx_bf_enabled = 1;
+ } else {
+ sta_params->vht_tx_mu_beamformee_capable = 0;
+ }
+ sta_params->vht_tx_channel_width_set = 0;
+ }
+}
+
+static void wcn36xx_smd_set_sta_ht_ldpc_params(struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params_v1 *sta_params)
+{
+ if (sta->ht_cap.ht_supported) {
+ sta_params->ht_ldpc_enabled =
+ is_cap_supported(sta->ht_cap.cap, IEEE80211_HT_CAP_LDPC_CODING);
+ }
+}
+
static void wcn36xx_smd_set_sta_default_ht_params(
struct wcn36xx_hal_config_sta_params *sta_params)
{
@@ -189,6 +327,31 @@ static void wcn36xx_smd_set_sta_default_ht_params(
sta_params->dsss_cck_mode_40mhz = 1;
}
+static void wcn36xx_smd_set_sta_default_vht_params(struct wcn36xx *wcn,
+ struct wcn36xx_hal_config_sta_params_v1 *sta_params)
+{
+ if (wcn->rf_id == RF_IRIS_WCN3680) {
+ sta_params->vht_capable = 1;
+ sta_params->vht_tx_mu_beamformee_capable = 1;
+ } else {
+ sta_params->vht_capable = 0;
+ sta_params->vht_tx_mu_beamformee_capable = 0;
+ }
+
+ sta_params->vht_ldpc_enabled = 0;
+ sta_params->vht_tx_channel_width_set = 0;
+ sta_params->vht_tx_bf_enabled = 0;
+}
+
+static void wcn36xx_smd_set_sta_default_ht_ldpc_params(struct wcn36xx *wcn,
+ struct wcn36xx_hal_config_sta_params_v1 *sta_params)
+{
+ if (wcn->rf_id == RF_IRIS_WCN3680)
+ sta_params->ht_ldpc_enabled = 1;
+ else
+ sta_params->ht_ldpc_enabled = 0;
+}
+
static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -241,9 +404,10 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
sta_params->aid = sta_priv->aid;
wcn36xx_smd_set_sta_ht_params(sta, sta_params);
memcpy(&sta_params->supported_rates, &sta_priv->supported_rates,
- sizeof(sta_priv->supported_rates));
+ sizeof(struct wcn36xx_hal_supported_rates));
} else {
- wcn36xx_set_default_rates(&sta_params->supported_rates);
+ wcn36xx_set_default_rates((struct wcn36xx_hal_supported_rates *)
+ &sta_params->supported_rates);
wcn36xx_smd_set_sta_default_ht_params(sta_params);
}
}
@@ -290,14 +454,20 @@ static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
hdr->len = msg_size + sizeof(*hdr);
}
-#define INIT_HAL_MSG(msg_body, type) \
+#define __INIT_HAL_MSG(msg_body, type, version) \
do { \
memset(&msg_body, 0, sizeof(msg_body)); \
msg_body.header.msg_type = type; \
- msg_body.header.msg_version = WCN36XX_HAL_MSG_VERSION0; \
+ msg_body.header.msg_version = version; \
msg_body.header.len = sizeof(msg_body); \
} while (0) \
+#define INIT_HAL_MSG(msg_body, type) \
+ __INIT_HAL_MSG(msg_body, type, WCN36XX_HAL_MSG_VERSION0)
+
+#define INIT_HAL_MSG_V1(msg_body, type) \
+ __INIT_HAL_MSG(msg_body, type, WCN36XX_HAL_MSG_VERSION1)
+
#define INIT_HAL_PTT_MSG(p_msg_body, ppt_msg_len) \
do { \
memset(p_msg_body, 0, sizeof(*p_msg_body) + ppt_msg_len); \
@@ -449,6 +619,8 @@ int wcn36xx_smd_start(struct wcn36xx *wcn)
int ret;
int i;
size_t len;
+ int cfg_elements;
+ static struct wcn36xx_cfg_val *cfg_vals;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
@@ -461,9 +633,17 @@ int wcn36xx_smd_start(struct wcn36xx *wcn)
body = (struct wcn36xx_hal_mac_start_req_msg *)wcn->hal_buf;
len = body->header.len;
- for (i = 0; i < ARRAY_SIZE(wcn36xx_cfg_vals); i++) {
- ret = put_cfg_tlv_u32(wcn, &len, wcn36xx_cfg_vals[i].cfg_id,
- wcn36xx_cfg_vals[i].value);
+ if (wcn->rf_id == RF_IRIS_WCN3680) {
+ cfg_vals = wcn3680_cfg_vals;
+ cfg_elements = ARRAY_SIZE(wcn3680_cfg_vals);
+ } else {
+ cfg_vals = wcn36xx_cfg_vals;
+ cfg_elements = ARRAY_SIZE(wcn36xx_cfg_vals);
+ }
+
+ for (i = 0; i < cfg_elements; i++) {
+ ret = put_cfg_tlv_u32(wcn, &len, cfg_vals[i].cfg_id,
+ cfg_vals[i].value);
if (ret)
goto out;
}
@@ -517,8 +697,10 @@ out:
return ret;
}
-int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode,
+ struct ieee80211_vif *vif)
{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_init_scan_req_msg msg_body;
int ret;
@@ -526,6 +708,13 @@ int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
msg_body.mode = mode;
+ if (vif_priv->bss_index != WCN36XX_HAL_BSS_INVALID_IDX) {
+ /* Notify BSSID with null DATA packet */
+ msg_body.frame_type = 2;
+ msg_body.notify = 1;
+ msg_body.scan_entry.bss_index[0] = vif_priv->bss_index;
+ msg_body.scan_entry.active_bss_count = 1;
+ }
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -607,8 +796,10 @@ out:
}
int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
- enum wcn36xx_hal_sys_mode mode)
+ enum wcn36xx_hal_sys_mode mode,
+ struct ieee80211_vif *vif)
{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_finish_scan_req_msg msg_body;
int ret;
@@ -616,6 +807,14 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
msg_body.mode = mode;
+ msg_body.oper_channel = WCN36XX_HW_CHANNEL(wcn);
+ if (vif_priv->bss_index != WCN36XX_HAL_BSS_INVALID_IDX) {
+ /* Notify BSSID with null data packet */
+ msg_body.notify = 1;
+ msg_body.frame_type = 2;
+ msg_body.scan_entry.bss_index[0] = vif_priv->bss_index;
+ msg_body.scan_entry.active_bss_count = 1;
+ }
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -674,8 +873,10 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
msg_body->num_channel = min_t(u8, req->n_channels,
sizeof(msg_body->channels));
- for (i = 0; i < msg_body->num_channel; i++)
- msg_body->channels[i] = req->channels[i]->hw_value;
+ for (i = 0; i < msg_body->num_channel; i++) {
+ msg_body->channels[i] =
+ HW_VALUE_CHANNEL(req->channels[i]->hw_value);
+ }
msg_body->header.len -= WCN36XX_MAX_SCAN_IE_LEN;
@@ -1163,6 +1364,31 @@ static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
v1->p2p = orig->p2p;
}
+static void
+wcn36xx_smd_set_sta_params_v1(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params_v1 *sta_par)
+{
+ struct wcn36xx_sta *sta_priv = NULL;
+ struct wcn36xx_hal_config_sta_params sta_par_v0;
+
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, &sta_par_v0);
+ wcn36xx_smd_convert_sta_to_v1(wcn, &sta_par_v0, sta_par);
+
+ if (sta) {
+ sta_priv = wcn36xx_sta_to_priv(sta);
+ wcn36xx_smd_set_sta_vht_params(wcn, sta, sta_par);
+ wcn36xx_smd_set_sta_ht_ldpc_params(sta, sta_par);
+ memcpy(&sta_par->supported_rates, &sta_priv->supported_rates,
+ sizeof(sta_par->supported_rates));
+ } else {
+ wcn36xx_set_default_rates_v1(&sta_par->supported_rates);
+ wcn36xx_smd_set_sta_default_vht_params(wcn, sta_par);
+ wcn36xx_smd_set_sta_default_ht_ldpc_params(wcn, sta_par);
+ }
+}
+
static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
void *buf,
@@ -1197,53 +1423,69 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
}
static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn,
- const struct wcn36xx_hal_config_sta_req_msg *orig)
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct wcn36xx_hal_config_sta_req_msg_v1 msg_body;
- struct wcn36xx_hal_config_sta_params_v1 *sta = &msg_body.sta_params;
+ struct wcn36xx_hal_config_sta_params_v1 *sta_params;
- INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+ if (wcn->rf_id == RF_IRIS_WCN3680) {
+ INIT_HAL_MSG_V1(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+ } else {
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+ msg_body.header.len -= WCN36XX_DIFF_STA_PARAMS_V1_NOVHT;
+ }
- wcn36xx_smd_convert_sta_to_v1(wcn, &orig->sta_params,
- &msg_body.sta_params);
+ sta_params = &msg_body.sta_params;
+
+ wcn36xx_smd_set_sta_params_v1(wcn, vif, sta, sta_params);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
- sta->action, sta->sta_index, sta->bssid_index,
- sta->bssid, sta->type, sta->mac, sta->aid);
+ sta_params->action, sta_params->sta_index, sta_params->bssid_index,
+ sta_params->bssid, sta_params->type, sta_params->mac, sta_params->aid);
return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
}
-int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static int wcn36xx_smd_config_sta_v0(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct wcn36xx_hal_config_sta_req_msg msg;
struct wcn36xx_hal_config_sta_params *sta_params;
- int ret;
- mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
sta_params = &msg.sta_params;
wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
- if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
- ret = wcn36xx_smd_config_sta_v1(wcn, &msg);
- } else {
- PREPARE_HAL_BUF(wcn->hal_buf, msg);
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
- sta_params->action, sta_params->sta_index,
- sta_params->bssid_index, sta_params->bssid,
- sta_params->type, sta_params->mac, sta_params->aid);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta_params->action, sta_params->sta_index,
+ sta_params->bssid_index, sta_params->bssid,
+ sta_params->type, sta_params->mac, sta_params->aid);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+}
+
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24))
+ ret = wcn36xx_smd_config_sta_v1(wcn, vif, sta);
+ else
+ ret = wcn36xx_smd_config_sta_v0(wcn, vif, sta);
- ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
- }
if (ret) {
wcn36xx_err("Sending hal_config_sta failed\n");
goto out;
@@ -1261,189 +1503,14 @@ out:
return ret;
}
-static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
- const struct wcn36xx_hal_config_bss_req_msg *orig)
-{
- struct wcn36xx_hal_config_bss_req_msg_v1 *msg_body;
- struct wcn36xx_hal_config_bss_params_v1 *bss;
- struct wcn36xx_hal_config_sta_params_v1 *sta;
- int ret;
-
- msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL);
- if (!msg_body)
- return -ENOMEM;
-
- INIT_HAL_MSG((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ);
-
- bss = &msg_body->bss_params;
- sta = &bss->sta;
-
- /* convert orig to v1 */
- memcpy(&msg_body->bss_params.bssid,
- &orig->bss_params.bssid, ETH_ALEN);
- memcpy(&msg_body->bss_params.self_mac_addr,
- &orig->bss_params.self_mac_addr, ETH_ALEN);
-
- msg_body->bss_params.bss_type = orig->bss_params.bss_type;
- msg_body->bss_params.oper_mode = orig->bss_params.oper_mode;
- msg_body->bss_params.nw_type = orig->bss_params.nw_type;
-
- msg_body->bss_params.short_slot_time_supported =
- orig->bss_params.short_slot_time_supported;
- msg_body->bss_params.lla_coexist = orig->bss_params.lla_coexist;
- msg_body->bss_params.llb_coexist = orig->bss_params.llb_coexist;
- msg_body->bss_params.llg_coexist = orig->bss_params.llg_coexist;
- msg_body->bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
- msg_body->bss_params.lln_non_gf_coexist =
- orig->bss_params.lln_non_gf_coexist;
-
- msg_body->bss_params.lsig_tx_op_protection_full_support =
- orig->bss_params.lsig_tx_op_protection_full_support;
- msg_body->bss_params.rifs_mode = orig->bss_params.rifs_mode;
- msg_body->bss_params.beacon_interval = orig->bss_params.beacon_interval;
- msg_body->bss_params.dtim_period = orig->bss_params.dtim_period;
- msg_body->bss_params.tx_channel_width_set =
- orig->bss_params.tx_channel_width_set;
- msg_body->bss_params.oper_channel = orig->bss_params.oper_channel;
- msg_body->bss_params.ext_channel = orig->bss_params.ext_channel;
-
- msg_body->bss_params.reserved = orig->bss_params.reserved;
-
- memcpy(&msg_body->bss_params.ssid,
- &orig->bss_params.ssid,
- sizeof(orig->bss_params.ssid));
-
- msg_body->bss_params.action = orig->bss_params.action;
- msg_body->bss_params.rateset = orig->bss_params.rateset;
- msg_body->bss_params.ht = orig->bss_params.ht;
- msg_body->bss_params.obss_prot_enabled =
- orig->bss_params.obss_prot_enabled;
- msg_body->bss_params.rmf = orig->bss_params.rmf;
- msg_body->bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
- msg_body->bss_params.dual_cts_protection =
- orig->bss_params.dual_cts_protection;
-
- msg_body->bss_params.max_probe_resp_retry_limit =
- orig->bss_params.max_probe_resp_retry_limit;
- msg_body->bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
- msg_body->bss_params.proxy_probe_resp =
- orig->bss_params.proxy_probe_resp;
- msg_body->bss_params.edca_params_valid =
- orig->bss_params.edca_params_valid;
-
- memcpy(&msg_body->bss_params.acbe,
- &orig->bss_params.acbe,
- sizeof(orig->bss_params.acbe));
- memcpy(&msg_body->bss_params.acbk,
- &orig->bss_params.acbk,
- sizeof(orig->bss_params.acbk));
- memcpy(&msg_body->bss_params.acvi,
- &orig->bss_params.acvi,
- sizeof(orig->bss_params.acvi));
- memcpy(&msg_body->bss_params.acvo,
- &orig->bss_params.acvo,
- sizeof(orig->bss_params.acvo));
-
- msg_body->bss_params.ext_set_sta_key_param_valid =
- orig->bss_params.ext_set_sta_key_param_valid;
-
- memcpy(&msg_body->bss_params.ext_set_sta_key_param,
- &orig->bss_params.ext_set_sta_key_param,
- sizeof(orig->bss_params.acvo));
-
- msg_body->bss_params.wcn36xx_hal_persona =
- orig->bss_params.wcn36xx_hal_persona;
- msg_body->bss_params.spectrum_mgt_enable =
- orig->bss_params.spectrum_mgt_enable;
- msg_body->bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
- msg_body->bss_params.max_tx_power = orig->bss_params.max_tx_power;
-
- wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta,
- &msg_body->bss_params.sta);
-
- PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body));
-
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
- bss->bssid, bss->self_mac_addr, bss->bss_type,
- bss->oper_mode, bss->nw_type);
-
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
- sta->bssid, sta->action, sta->sta_index,
- sta->bssid_index, sta->aid, sta->type, sta->mac);
-
- ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
- kfree(msg_body);
-
- return ret;
-}
-
-
-static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- void *buf,
- size_t len)
-{
- struct wcn36xx_hal_config_bss_rsp_msg *rsp;
- struct wcn36xx_hal_config_bss_rsp_params *params;
- struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
-
- if (len < sizeof(*rsp))
- return -EINVAL;
-
- rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
- params = &rsp->bss_rsp_params;
-
- if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
- wcn36xx_warn("hal config bss response failure: %d\n",
- params->status);
- return -EIO;
- }
-
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
- " sta_idx %d self_idx %d bcast_idx %d mac %pM"
- " power %d ucast_dpu_signature %d\n",
- params->status, params->bss_index, params->dpu_desc_index,
- params->bss_sta_index, params->bss_self_sta_index,
- params->bss_bcast_sta_idx, params->mac,
- params->tx_mgmt_power, params->ucast_dpu_signature);
-
- vif_priv->bss_index = params->bss_index;
-
- if (sta) {
- struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
- sta_priv->bss_sta_index = params->bss_sta_index;
- sta_priv->bss_dpu_desc_index = params->dpu_desc_index;
- }
-
- vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature;
-
- return 0;
-}
-
-int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, const u8 *bssid,
- bool update)
+static void wcn36xx_smd_set_bss_params(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ const u8 *bssid,
+ bool update,
+ struct wcn36xx_hal_config_bss_params *bss)
{
- struct wcn36xx_hal_config_bss_req_msg *msg;
- struct wcn36xx_hal_config_bss_params *bss;
- struct wcn36xx_hal_config_sta_params *sta_params;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret;
-
- mutex_lock(&wcn->hal_mutex);
- msg = kzalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg) {
- ret = -ENOMEM;
- goto out;
- }
- INIT_HAL_MSG((*msg), WCN36XX_HAL_CONFIG_BSS_REQ);
-
- bss = &msg->bss_params;
- sta_params = &bss->sta;
WARN_ON(is_zero_ether_addr(bssid));
@@ -1498,7 +1565,6 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE;
bss->reserved = 0;
- wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
/* wcn->ssid is only valid in AP and IBSS mode */
bss->ssid.length = vif_priv->ssid.length;
@@ -1523,6 +1589,154 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
bss->action = update;
vif_priv->bss_type = bss->bss_type;
+}
+
+static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta_80211,
+ const u8 *bssid,
+ bool update)
+{
+ struct wcn36xx_hal_config_bss_req_msg_v1 *msg_body;
+ struct wcn36xx_hal_config_bss_params_v1 *bss;
+ struct wcn36xx_hal_config_bss_params bss_v0;
+ struct wcn36xx_hal_config_sta_params_v1 *sta;
+ struct cfg80211_chan_def *chandef;
+ int ret;
+
+ msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL);
+ if (!msg_body)
+ return -ENOMEM;
+
+ if (wcn->rf_id == RF_IRIS_WCN3680) {
+ INIT_HAL_MSG_V1((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ);
+ } else {
+ INIT_HAL_MSG((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ);
+ msg_body->header.len -= WCN36XX_DIFF_BSS_PARAMS_V1_NOVHT;
+ }
+
+ bss = &msg_body->bss_params;
+ sta = &bss->sta;
+
+ memset(&bss_v0, 0x00, sizeof(bss_v0));
+ wcn36xx_smd_set_bss_params(wcn, vif, sta_80211, bssid, update, &bss_v0);
+ wcn36xx_smd_set_sta_params_v1(wcn, vif, sta_80211, sta);
+
+ /* convert orig to v1 */
+ memcpy(bss->bssid, &bss_v0.bssid, ETH_ALEN);
+ memcpy(bss->self_mac_addr, &bss_v0.self_mac_addr, ETH_ALEN);
+
+ bss->bss_type = bss_v0.bss_type;
+ bss->oper_mode = bss_v0.oper_mode;
+ bss->nw_type = bss_v0.nw_type;
+
+ bss->short_slot_time_supported =
+ bss_v0.short_slot_time_supported;
+ bss->lla_coexist = bss_v0.lla_coexist;
+ bss->llb_coexist = bss_v0.llb_coexist;
+ bss->llg_coexist = bss_v0.llg_coexist;
+ bss->ht20_coexist = bss_v0.ht20_coexist;
+ bss->lln_non_gf_coexist = bss_v0.lln_non_gf_coexist;
+
+ bss->lsig_tx_op_protection_full_support =
+ bss_v0.lsig_tx_op_protection_full_support;
+ bss->rifs_mode = bss_v0.rifs_mode;
+ bss->beacon_interval = bss_v0.beacon_interval;
+ bss->dtim_period = bss_v0.dtim_period;
+ bss->tx_channel_width_set = bss_v0.tx_channel_width_set;
+ bss->oper_channel = bss_v0.oper_channel;
+
+ if (wcn->hw->conf.chandef.width == NL80211_CHAN_WIDTH_80) {
+ chandef = &wcn->hw->conf.chandef;
+ bss->ext_channel = HW_VALUE_PHY(chandef->chan->hw_value);
+ } else {
+ bss->ext_channel = bss_v0.ext_channel;
+ }
+
+ bss->reserved = bss_v0.reserved;
+
+ memcpy(&bss->ssid, &bss_v0.ssid,
+ sizeof(bss_v0.ssid));
+
+ bss->action = bss_v0.action;
+ bss->rateset = bss_v0.rateset;
+ bss->ht = bss_v0.ht;
+ bss->obss_prot_enabled = bss_v0.obss_prot_enabled;
+ bss->rmf = bss_v0.rmf;
+ bss->ht_oper_mode = bss_v0.ht_oper_mode;
+ bss->dual_cts_protection = bss_v0.dual_cts_protection;
+
+ bss->max_probe_resp_retry_limit =
+ bss_v0.max_probe_resp_retry_limit;
+ bss->hidden_ssid = bss_v0.hidden_ssid;
+ bss->proxy_probe_resp = bss_v0.proxy_probe_resp;
+ bss->edca_params_valid = bss_v0.edca_params_valid;
+
+ memcpy(&bss->acbe, &bss_v0.acbe,
+ sizeof(bss_v0.acbe));
+ memcpy(&bss->acbk, &bss_v0.acbk,
+ sizeof(bss_v0.acbk));
+ memcpy(&bss->acvi, &bss_v0.acvi,
+ sizeof(bss_v0.acvi));
+ memcpy(&bss->acvo, &bss_v0.acvo,
+ sizeof(bss_v0.acvo));
+
+ bss->ext_set_sta_key_param_valid =
+ bss_v0.ext_set_sta_key_param_valid;
+
+ memcpy(&bss->ext_set_sta_key_param,
+ &bss_v0.ext_set_sta_key_param,
+ sizeof(bss_v0.acvo));
+
+ bss->wcn36xx_hal_persona = bss_v0.wcn36xx_hal_persona;
+ bss->spectrum_mgt_enable = bss_v0.spectrum_mgt_enable;
+ bss->tx_mgmt_power = bss_v0.tx_mgmt_power;
+ bss->max_tx_power = bss_v0.max_tx_power;
+
+ wcn36xx_smd_set_bss_vht_params(vif, sta_80211, bss);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body));
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta->bssid, sta->action, sta->sta_index,
+ sta->bssid_index, sta->aid, sta->type, sta->mac);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
+ kfree(msg_body);
+
+ return ret;
+}
+
+static int wcn36xx_smd_config_bss_v0(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ const u8 *bssid,
+ bool update)
+{
+ struct wcn36xx_hal_config_bss_req_msg *msg;
+ struct wcn36xx_hal_config_bss_params *bss;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ INIT_HAL_MSG((*msg), WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ bss = &msg->bss_params;
+ sta_params = &bss->sta;
+
+ wcn36xx_smd_set_bss_params(wcn, vif, sta, bssid, update, bss);
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, (*msg));
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
@@ -1536,13 +1750,69 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
sta_params->aid, sta_params->type,
sta_params->mac);
- if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
- ret = wcn36xx_smd_config_bss_v1(wcn, msg);
- } else {
- PREPARE_HAL_BUF(wcn->hal_buf, (*msg));
+ ret = wcn36xx_smd_send_and_wait(wcn, msg->header.len);
+ kfree(msg);
+
+ return ret;
+}
+
+static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_bss_rsp_msg *rsp;
+ struct wcn36xx_hal_config_bss_rsp_params *params;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
+ params = &rsp->bss_rsp_params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config bss response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
+ " sta_idx %d self_idx %d bcast_idx %d mac %pM"
+ " power %d ucast_dpu_signature %d\n",
+ params->status, params->bss_index, params->dpu_desc_index,
+ params->bss_sta_index, params->bss_self_sta_index,
+ params->bss_bcast_sta_idx, params->mac,
+ params->tx_mgmt_power, params->ucast_dpu_signature);
- ret = wcn36xx_smd_send_and_wait(wcn, msg->header.len);
+ vif_priv->bss_index = params->bss_index;
+
+ if (sta) {
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
+ sta_priv->bss_sta_index = params->bss_sta_index;
+ sta_priv->bss_dpu_desc_index = params->dpu_desc_index;
}
+
+ vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature;
+
+ return 0;
+}
+
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update)
+{
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24))
+ ret = wcn36xx_smd_config_bss_v1(wcn, vif, sta, bssid, update);
+ else
+ ret = wcn36xx_smd_config_bss_v0(wcn, vif, sta, bssid, update);
+
if (ret) {
wcn36xx_err("Sending hal_config_bss failed\n");
goto out;
@@ -1552,12 +1822,10 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
sta,
wcn->hal_buf,
wcn->hal_rsp_len);
- if (ret) {
+ if (ret)
wcn36xx_err("hal_config_bss response failed err=%d\n", ret);
- goto out;
- }
+
out:
- kfree(msg);
mutex_unlock(&wcn->hal_mutex);
return ret;
}
@@ -1924,6 +2192,7 @@ out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
+
int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
{
struct wcn36xx_hal_set_power_params_req_msg msg_body;
@@ -1953,6 +2222,7 @@ out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
+
/* Notice: This function should be called after associated, or else it
* will be invalid
*/
@@ -2080,6 +2350,8 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+ if (wcn->rf_id == RF_IRIS_WCN3680)
+ set_feat_caps(msg_body.feat_caps, DOT11AC);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2102,6 +2374,22 @@ out:
return ret;
}
+static int wcn36xx_smd_add_ba_session_rsp(void *buf, int len, u8 *session)
+{
+ struct wcn36xx_hal_add_ba_session_rsp_msg *rsp;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_add_ba_session_rsp_msg *)buf;
+ if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS)
+ return rsp->status;
+
+ *session = rsp->ba_session_id;
+
+ return 0;
+}
+
int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
u16 tid,
@@ -2110,6 +2398,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_add_ba_session_req_msg msg_body;
+ u8 session_id;
int ret;
mutex_lock(&wcn->hal_mutex);
@@ -2135,17 +2424,20 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
wcn36xx_err("Sending hal_add_ba_session failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ ret = wcn36xx_smd_add_ba_session_rsp(wcn->hal_buf, wcn->hal_rsp_len,
+ &session_id);
if (ret) {
wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
goto out;
}
+
+ ret = session_id;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
-int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id)
{
struct wcn36xx_hal_add_ba_req_msg msg_body;
int ret;
@@ -2153,7 +2445,7 @@ int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
- msg_body.session_id = 0;
+ msg_body.session_id = session_id;
msg_body.win_size = WCN36XX_AGGR_BUFFER_SIZE;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2212,7 +2504,7 @@ static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len)
return rsp->status;
}
-int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
@@ -2221,7 +2513,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
- msg_body.session_id = 0;
+ msg_body.session_id = session_id;
msg_body.candidate_cnt = 1;
msg_body.header.len += sizeof(*candidate);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2229,7 +2521,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
candidate = (struct wcn36xx_hal_trigger_ba_req_candidate *)
(wcn->hal_buf + sizeof(msg_body));
candidate->sta_index = sta_index;
- candidate->tid_bitmap = 1;
+ candidate->tid_bitmap = 1 << tid;
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
@@ -2610,6 +2902,7 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
kfree(hal_ind_msg);
}
}
+
int wcn36xx_smd_open(struct wcn36xx *wcn)
{
wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index ff15df8ab56f..b1d8083d9d9d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -59,11 +59,13 @@ void wcn36xx_smd_close(struct wcn36xx *wcn);
int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
int wcn36xx_smd_start(struct wcn36xx *wcn);
int wcn36xx_smd_stop(struct wcn36xx *wcn);
-int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel);
int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel);
-int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
- enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode,
+ struct ieee80211_vif *vif);
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode,
+ struct ieee80211_vif *vif);
+
int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count);
int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
@@ -132,9 +134,9 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u16 *ssn,
u8 direction,
u8 sta_index);
-int wcn36xx_smd_add_ba(struct wcn36xx *wcn);
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id);
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
-int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index a6902371e89c..1b831157ede1 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -23,11 +23,214 @@ static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
return 100 - ((bd->phy_stat0 >> 24) & 0xff);
}
+struct wcn36xx_rate {
+ u16 bitrate;
+ u16 mcs_or_legacy_index;
+ enum mac80211_rx_encoding encoding;
+ enum mac80211_rx_encoding_flags encoding_flags;
+ enum rate_info_bw bw;
+};
+
+static const struct wcn36xx_rate wcn36xx_rate_table[] = {
+ /* 11b rates */
+ { 10, 0, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 20, 1, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 55, 2, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 110, 3, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+
+ /* 11b SP (short preamble) */
+ { 10, 0, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
+ { 20, 1, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
+ { 55, 2, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
+ { 110, 3, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
+
+ /* 11ag */
+ { 60, 4, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 90, 5, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 120, 6, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 180, 7, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 240, 8, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 360, 9, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 480, 10, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 540, 11, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+
+ /* 11n */
+ { 65, 0, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 130, 1, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 195, 2, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 260, 3, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 390, 4, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 520, 5, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 585, 6, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 650, 7, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+
+ /* 11n SGI */
+ { 72, 0, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 144, 1, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 217, 2, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 289, 3, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 434, 4, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 578, 5, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 650, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 722, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+
+ /* 11n GF (greenfield) */
+ { 65, 0, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 130, 1, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 195, 2, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 260, 3, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 390, 4, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 520, 5, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 585, 6, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 650, 7, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+
+ /* 11n CB (channel bonding) */
+ { 135, 0, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 270, 1, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 405, 2, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 540, 3, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 810, 4, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1080, 5, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1215, 6, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1350, 7, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+
+ /* 11n CB + SGI */
+ { 150, 0, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 300, 1, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 450, 2, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 600, 3, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 900, 4, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1200, 5, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1500, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11n GF + CB */
+ { 135, 0, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 270, 1, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 405, 2, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 540, 3, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 810, 4, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 1080, 5, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 1215, 6, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 1350, 7, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+
+ /* 11ac reserved indices */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 20 MHz 800ns GI MCS 0-8 */
+ { 65, 0, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 130, 1, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 195, 2, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 260, 3, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 390, 4, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 520, 5, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 585, 6, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 650, 7, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 780, 8, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+
+ /* 11ac reserved indices */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 20 MHz 400ns SGI MCS 6-8 */
+ { 655, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 722, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 866, 8, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+
+ /* 11ac reserved indices */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 40 MHz 800ns GI MCS 0-9 */
+ { 135, 0, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 270, 1, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 405, 2, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 540, 3, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 810, 4, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1080, 5, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1215, 6, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1350, 7, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1350, 7, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1620, 8, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1800, 9, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+
+ /* 11ac reserved indices */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 40 MHz 400ns SGI MCS 5-7 */
+ { 1200, 5, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1500, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac reserved index */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 40 MHz 400ns SGI MCS 5-7 */
+ { 1800, 8, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 2000, 9, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac reserved index */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 80 MHz 800ns GI MCS 0-7 */
+ { 292, 0, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 585, 1, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 877, 2, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 1170, 3, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 1755, 4, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 2340, 5, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 2632, 6, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 2925, 7, RX_ENC_HT, 0, RATE_INFO_BW_80},
+
+ /* 11 ac reserved index */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 80 MHz 800 ns GI MCS 8-9 */
+ { 3510, 8, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 3900, 9, RX_ENC_HT, 0, RATE_INFO_BW_80},
+
+ /* 11 ac reserved indices */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 80 MHz 400 ns SGI MCS 6-7 */
+ { 2925, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
+ { 3250, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
+
+ /* 11ac reserved index */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 80 MHz 400ns SGI MCS 8-9 */
+ { 3900, 8, RX_ENC_VHT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
+ { 4333, 9, RX_ENC_VHT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
+};
+
int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
{
struct ieee80211_rx_status status;
+ const struct wcn36xx_rate *rate;
struct ieee80211_hdr *hdr;
struct wcn36xx_rx_bd *bd;
+ struct ieee80211_supported_band *sband;
u16 fc, sn;
/*
@@ -49,19 +252,11 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
fc = __le16_to_cpu(hdr->frame_control);
sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
- /* When scanning associate beacons to this */
- if (ieee80211_is_beacon(hdr->frame_control) && wcn->scan_freq) {
- status.freq = wcn->scan_freq;
- status.band = wcn->scan_band;
- } else {
- status.freq = WCN36XX_CENTER_FREQ(wcn);
- status.band = WCN36XX_BAND(wcn);
- }
-
+ status.freq = WCN36XX_CENTER_FREQ(wcn);
+ status.band = WCN36XX_BAND(wcn);
status.mactime = 10;
status.signal = -get_rssi0(bd);
status.antenna = 1;
- status.rate_idx = 1;
status.flag = 0;
status.rx_flags = 0;
status.flag |= RX_FLAG_IV_STRIPPED |
@@ -70,6 +265,28 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
+ if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) {
+ rate = &wcn36xx_rate_table[bd->rate_id];
+ status.encoding = rate->encoding;
+ status.enc_flags = rate->encoding_flags;
+ status.bw = rate->bw;
+ status.rate_idx = rate->mcs_or_legacy_index;
+ sband = wcn->hw->wiphy->bands[status.band];
+ status.nss = 1;
+
+ if (status.band == NL80211_BAND_5GHZ &&
+ status.encoding == RX_ENC_LEGACY &&
+ status.rate_idx >= sband->n_bitrates) {
+ /* no dsss rates in 5Ghz rates table */
+ status.rate_idx -= 4;
+ }
+ } else {
+ status.encoding = 0;
+ status.bw = 0;
+ status.enc_flags = 0;
+ status.rate_idx = 0;
+ }
+
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
if (ieee80211_is_beacon(hdr->frame_control)) {
@@ -100,7 +317,8 @@ static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
bd->pdu.mpdu_header_off;
bd->pdu.mpdu_len = len;
bd->pdu.tid = tid;
- bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_QOS;
+ /* Use seq number generated by mac80211 */
+ bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_HOST;
}
static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
@@ -160,9 +378,11 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
bool bcast)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *__vif_priv = NULL;
- bool is_data_qos;
+ bool is_data_qos = ieee80211_is_data_qos(hdr->frame_control);
+ u16 tid = 0;
bd->bd_rate = WCN36XX_BD_RATE_DATA;
@@ -191,9 +411,21 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
bd->dpu_sign = __vif_priv->self_ucast_dpu_sign;
}
- if (ieee80211_is_nullfunc(hdr->frame_control) ||
- (sta_priv && !sta_priv->is_data_encrypted))
+ if (is_data_qos) {
+ tid = ieee80211_get_tid(hdr);
+ /* TID->QID is one-to-one mapping */
+ bd->queue_id = tid;
+ }
+
+ if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT ||
+ (sta_priv && !sta_priv->is_data_encrypted)) {
bd->dpu_ne = 1;
+ }
+
+ if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
+ /* Don't use a regular queue for null packet (no ampdu) */
+ bd->queue_id = WCN36XX_TX_U_WQ_ID;
+ }
if (bcast) {
bd->ub = 1;
@@ -201,13 +433,11 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
}
*vif_priv = __vif_priv;
- is_data_qos = ieee80211_is_data_qos(hdr->frame_control);
-
wcn36xx_set_tx_pdu(bd,
is_data_qos ?
sizeof(struct ieee80211_qos_hdr) :
sizeof(struct ieee80211_hdr_3addr),
- skb->len, sta_priv ? sta_priv->tid : 0);
+ skb->len, tid);
if (sta_priv && is_data_qos)
wcn36xx_tx_start_ampdu(wcn, sta_priv, skb);
@@ -287,9 +517,9 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
bd.dpu_rf = WCN36XX_BMU_WQ_TX;
- bd.tx_comp = !!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS);
- if (bd.tx_comp) {
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
+
spin_lock_irqsave(&wcn->dxe_lock, flags);
if (wcn->tx_ack_skb) {
spin_unlock_irqrestore(&wcn->dxe_lock, flags);
@@ -302,10 +532,15 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
/* Only one at a time is supported by fw. Stop the TX queues
* until the ack status gets back.
- *
- * TODO: Add watchdog in case FW does not answer
*/
ieee80211_stop_queues(wcn->hw);
+
+ /* TX watchdog if no TX irq or ack indication received */
+ mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
+
+ /* Request ack indication from the firmware */
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ bd.tx_comp = 1;
}
/* Data frames served first*/
@@ -319,7 +554,7 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
bd.tx_bd_sign = 0xbdbdbdbd;
ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
- if (ret && bd.tx_comp) {
+ if (ret && (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
/* If the skb has not been transmitted,
* don't keep a reference to it.
*/
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index a58f313983b9..71fa9992b118 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -83,7 +83,11 @@ enum wcn36xx_ampdu_state {
WCN36XX_AMPDU_OPERATIONAL,
};
-#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
+#define HW_VALUE_PHY_SHIFT 8
+#define HW_VALUE_PHY(hw_value) ((hw_value) >> HW_VALUE_PHY_SHIFT)
+#define HW_VALUE_CHANNEL(hw_value) ((hw_value) & 0xFF)
+#define WCN36XX_HW_CHANNEL(__wcn)\
+ HW_VALUE_CHANNEL(__wcn->hw->conf.chandef.chan->hw_value)
#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
#define WCN36XX_LISTEN_INTERVAL(__wcn) (__wcn->hw->conf.listen_interval)
@@ -92,6 +96,7 @@ enum wcn36xx_ampdu_state {
#define RF_UNKNOWN 0x0000
#define RF_IRIS_WCN3620 0x3620
+#define RF_IRIS_WCN3680 0x3680
static inline void buff_to_be(u32 *buf, size_t len)
{
@@ -122,6 +127,7 @@ struct wcn36xx_vif {
enum wcn36xx_hal_bss_type bss_type;
/* Power management */
+ bool allow_bmps;
enum wcn36xx_power_state pw_state;
u8 bss_index;
@@ -167,7 +173,7 @@ struct wcn36xx_sta {
u8 bss_dpu_desc_index;
bool is_data_encrypted;
/* Rates */
- struct wcn36xx_hal_supported_rates supported_rates;
+ struct wcn36xx_hal_supported_rates_v1 supported_rates;
spinlock_t ampdu_lock; /* protects next two fields */
enum wcn36xx_ampdu_state ampdu_state[16];
@@ -223,10 +229,10 @@ struct wcn36xx {
spinlock_t hal_ind_lock;
struct list_head hal_ind_queue;
- struct work_struct scan_work;
struct cfg80211_scan_request *scan_req;
- int scan_freq;
- int scan_band;
+ bool sw_scan;
+ u8 sw_scan_opchannel;
+ struct ieee80211_vif *sw_scan_vif;
struct mutex scan_lock;
bool scan_aborted;
@@ -245,6 +251,7 @@ struct wcn36xx {
struct wcn36xx_dxe_mem_pool data_mem_pool;
struct sk_buff *tx_ack_skb;
+ struct timer_list tx_ack_timer;
/* RF module */
unsigned rf_id;
@@ -268,6 +275,7 @@ static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
wcn->fw_revision == revision);
}
void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
+void wcn36xx_set_default_rates_v1(struct wcn36xx_hal_supported_rates_v1 *rates);
static inline
struct ieee80211_sta *wcn36xx_priv_to_sta(struct wcn36xx_sta *sta_priv)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 0851d2bede89..1c42410d68e1 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1739,7 +1739,7 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
return wil_p2p_cancel_listen(vif, cookie);
}
-/**
+/*
* find a specific IE in a list of IEs
* return a pointer to the beginning of IE in the list
* or NULL if not found
@@ -1766,7 +1766,7 @@ static const u8 *_wil_cfg80211_find_ie(const u8 *ies, u16 ies_len, const u8 *ie,
ies_len);
}
-/**
+/*
* merge the IEs in two lists into a single list.
* do not include IEs from the second list which exist in the first list.
* add only vendor specific IEs from second list to keep
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 11d0c79e9056..2d618f90afa7 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -443,10 +443,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
/**
* wil6210_debugfs_init_offset - create set of debugfs files
- * @wil - driver's context, used for printing
- * @dbg - directory on the debugfs, where files will be created
- * @base - base address used in address calculation
- * @tbl - table with file descriptions. Should be terminated with empty element.
+ * @wil: driver's context, used for printing
+ * @dbg: directory on the debugfs, where files will be created
+ * @base: base address used in address calculation
+ * @tbl: table with file descriptions. Should be terminated with empty element.
*
* Creates files accordingly to the @tbl.
*/
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index b1480b41cd3a..d13d081fdcc6 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -645,9 +645,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
return IRQ_HANDLED;
}
-/**
- * thread IRQ handler
- */
+/* thread IRQ handler */
static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 9b4ca6b256d2..a2f7b4c1da48 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -29,8 +29,7 @@ void wil_pmc_init(struct wil6210_priv *wil)
mutex_init(&wil->pmc.lock);
}
-/**
- * Allocate the physical ring (p-ring) and the required
+/* Allocate the physical ring (p-ring) and the required
* number of descriptors of required size.
* Initialize the descriptors as required by pmc dma.
* The descriptors' buffers dwords are initialized to hold
@@ -221,8 +220,7 @@ no_release_err:
mutex_unlock(&pmc->lock);
}
-/**
- * Traverse the p-ring and release all buffers.
+/* Traverse the p-ring and release all buffers.
* At the end release the p-ring memory
*/
void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
@@ -299,8 +297,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
mutex_unlock(&pmc->lock);
}
-/**
- * Status of the last operation requested via debugfs: alloc/free/read.
+/* Status of the last operation requested via debugfs: alloc/free/read.
* 0 - success or negative errno
*/
int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
@@ -311,8 +308,7 @@ int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
return wil->pmc.last_cmd_status;
}
-/**
- * Read from required position up to the end of current descriptor,
+/* Read from required position up to the end of current descriptor,
* depends on descriptor size configured during alloc request.
*/
ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 080e5aa60bea..cc830c795b33 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -249,8 +249,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
vring->ctx = NULL;
}
-/**
- * Allocate one skb for Rx VRING
+/* Allocate one skb for Rx VRING
*
* Safe to call from IRQ
*/
@@ -295,8 +294,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
return 0;
}
-/**
- * Adds radiotap header
+/* Adds radiotap header
*
* Any error indicated as "Bad FCS"
*
@@ -432,8 +430,7 @@ static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
return cid;
}
-/**
- * reap 1 frame from @swhead
+/* reap 1 frame from @swhead
*
* Rx descriptor copied to skb->cb
*
@@ -597,8 +594,7 @@ again:
return skb;
}
-/**
- * allocate and fill up to @count buffers in rx ring
+/* allocate and fill up to @count buffers in rx ring
* buffers posted at @swtail
* Note: we have a single RX queue for servicing all VIFs, but we
* allocate skbs with headroom according to main interface only. This
@@ -1002,8 +998,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
wil_netif_rx(skb, ndev, cid, stats, true);
}
-/**
- * Proceed all completed skb's from Rx VRING
+/* Proceed all completed skb's from Rx VRING
*
* Safe to call from NAPI poll, i.e. softirq with interrupts enabled
*/
@@ -1629,8 +1624,7 @@ void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
}
-/**
- * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+/* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
* @skb is used to obtain the protocol and headers length.
* @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
* 2 - middle, 3 - last descriptor.
@@ -1660,8 +1654,7 @@ static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
}
-/**
- * Sets the descriptor @d up for csum. The corresponding
+/* Sets the descriptor @d up for csum. The corresponding
* @skb is used to obtain the protocol and headers length.
* Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
* Note, if d==NULL, the function only returns the protocol result.
@@ -2216,8 +2209,7 @@ static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
return rc;
}
-/**
- * Check status of tx vrings and stop/wake net queues if needed
+/* Check status of tx vrings and stop/wake net queues if needed
* It will start/stop net queues of a specific VIF net_device.
*
* This function does one of two checks:
@@ -2419,8 +2411,7 @@ void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
sta->stats.tx_latency_max_us = skb_time_us;
}
-/**
- * Clean up transmitted skb's from the Tx VRING
+/* Clean up transmitted skb's from the Tx VRING
*
* Return number of descriptors cleared
*
@@ -2460,8 +2451,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
while (!wil_ring_is_empty(vring)) {
int new_swtail;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
- /**
- * For the fragmented skb, HW will set DU bit only for the
+ /* For the fragmented skb, HW will set DU bit only for the
* last fragment. look for it.
* In TSO the first DU will include hdr desc
*/
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 7bfe867c7509..8ca2ce51c83e 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -147,9 +147,7 @@ out_free:
return rc;
}
-/**
- * Allocate one skb for Rx descriptor RING
- */
+/* Allocate one skb for Rx descriptor RING */
static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
struct wil_ring *ring, u32 i)
{
@@ -1152,8 +1150,7 @@ wil_get_next_tx_status_msg(struct wil_status_ring *sring, u8 *dr_bit,
*msg = *_msg;
}
-/**
- * Clean up transmitted skb's from the Tx descriptor RING.
+/* Clean up transmitted skb's from the Tx descriptor RING.
* Return number of descriptors cleared.
*/
int wil_tx_sring_handler(struct wil6210_priv *wil,
@@ -1314,8 +1311,7 @@ again:
return desc_cnt;
}
-/**
- * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+/* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
* @skb is used to obtain the protocol and headers length.
* @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
* 2 - middle, 3 - last descriptor.
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
index 10e10dc9fedf..e152dc29d177 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.c
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -15,8 +15,7 @@ void wil_platform_modexit(void)
{
}
-/**
- * wil_platform_init() - wil6210 platform module init
+/* wil_platform_init() - wil6210 platform module init
*
* The function must be called before all other functions in this module.
* It returns a handle which is used with the rest of the API
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index c7136ce567ee..421aebbb49e5 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -32,7 +32,7 @@ MODULE_PARM_DESC(led_id,
#define WIL_WMI_PCP_STOP_TO_MS 5000
/**
- * WMI event receiving - theory of operations
+ * DOC: WMI event receiving - theory of operations
*
* When firmware about to report WMI event, it fills memory area
* in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
@@ -49,7 +49,7 @@ MODULE_PARM_DESC(led_id,
*/
/**
- * Addressing - theory of operations
+ * DOC: Addressing - theory of operations
*
* There are several buses present on the WIL6210 card.
* Same memory areas are visible at different address on
@@ -66,8 +66,7 @@ MODULE_PARM_DESC(led_id,
* AHB address must be used.
*/
-/**
- * @sparrow_fw_mapping provides memory remapping table for sparrow
+/* sparrow_fw_mapping provides memory remapping table for sparrow
*
* array size should be in sync with the declaration in the wil6210.h
*
@@ -103,16 +102,14 @@ const struct fw_map sparrow_fw_mapping[] = {
{0x800000, 0x804000, 0x940000, "uc_data", false, false},
};
-/**
- * @sparrow_d0_mac_rgf_ext - mac_rgf_ext section for Sparrow D0
+/* sparrow_d0_mac_rgf_ext - mac_rgf_ext section for Sparrow D0
* it is a bit larger to support extra features
*/
const struct fw_map sparrow_d0_mac_rgf_ext = {
0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true
};
-/**
- * @talyn_fw_mapping provides memory remapping table for Talyn
+/* talyn_fw_mapping provides memory remapping table for Talyn
*
* array size should be in sync with the declaration in the wil6210.h
*
@@ -154,8 +151,7 @@ const struct fw_map talyn_fw_mapping[] = {
{0x800000, 0x808000, 0xa78000, "uc_data", false, false},
};
-/**
- * @talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
+/* talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
*
* array size should be in sync with the declaration in the wil6210.h
*
@@ -229,7 +225,7 @@ u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
/**
* return AHB address for given firmware internal (linker) address
- * @x - internal address
+ * @x: internal address
* If address have no valid AHB mapping, return 0
*/
static u32 wmi_addr_remap(u32 x)
@@ -247,7 +243,7 @@ static u32 wmi_addr_remap(u32 x)
/**
* find fw_mapping entry by section name
- * @section - section name
+ * @section: section name
*
* Return pointer to section or NULL if not found
*/
@@ -265,8 +261,9 @@ struct fw_map *wil_find_fw_mapping(const char *section)
/**
* Check address validity for WMI buffer; remap if needed
- * @ptr - internal (linker) fw/ucode address
- * @size - if non zero, validate the block does not
+ * @wil: driver data
+ * @ptr: internal (linker) fw/ucode address
+ * @size: if non zero, validate the block does not
* exceed the device memory (bar)
*
* Valid buffer should be DWORD aligned
@@ -300,9 +297,7 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
return wmi_buffer_block(wil, ptr_, 0);
}
-/**
- * Check address validity
- */
+/* Check address validity */
void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
{
u32 off;
@@ -1577,8 +1572,7 @@ wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len)
evt->payload, payload_size);
}
-/**
- * find cid and ringid for the station vif
+/* find cid and ringid for the station vif
*
* return error, if other interfaces are used or ring was not found
*/
@@ -1868,8 +1862,7 @@ wmi_evt_link_monitor(struct wil6210_vif *vif, int id, void *d, int len)
cfg80211_cqm_rssi_notify(ndev, event_type, evt->rssi_level, GFP_KERNEL);
}
-/**
- * Some events are ignored for purpose; and need not be interpreted as
+/* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
*/
static void wmi_evt_ignore(struct wil6210_vif *vif, int id, void *d, int len)
@@ -2578,6 +2571,7 @@ out:
/**
* wmi_rxon - turn radio on/off
+ * @wil: driver data
* @on: turn on if true, off otherwise
*
* Only switch radio. Channel should be set separately.
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index a63b5c2f1e17..404257800033 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -432,7 +432,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
case STATE_DFU_DOWNLOAD_IDLE:
at76_dbg(DBG_DFU, "DOWNLOAD...");
- /* fall through */
+ fallthrough;
case STATE_DFU_IDLE:
at76_dbg(DBG_DFU, "DFU IDLE");
@@ -1199,7 +1199,6 @@ static void at76_rx_callback(struct urb *urb)
{
struct at76_priv *priv = urb->context;
- priv->rx_tasklet.data = (unsigned long)urb;
tasklet_schedule(&priv->rx_tasklet);
}
@@ -1545,10 +1544,10 @@ exit:
return ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
}
-static void at76_rx_tasklet(unsigned long param)
+static void at76_rx_tasklet(struct tasklet_struct *t)
{
- struct urb *urb = (struct urb *)param;
- struct at76_priv *priv = urb->context;
+ struct at76_priv *priv = from_tasklet(priv, t, rx_tasklet);
+ struct urb *urb = priv->rx_urb;
struct at76_rx_buffer *buf;
struct ieee80211_rx_status rx_status = { 0 };
@@ -2215,7 +2214,7 @@ static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
INIT_WORK(&priv->work_join_bssid, at76_work_join_bssid);
INIT_DELAYED_WORK(&priv->dwork_hw_scan, at76_dwork_hw_scan);
- tasklet_init(&priv->rx_tasklet, at76_rx_tasklet, 0);
+ tasklet_setup(&priv->rx_tasklet, at76_rx_tasklet);
priv->pm_mode = AT76_PM_OFF;
priv->pm_period = 0;
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index d5875836068c..707fe66727f8 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1227,7 +1227,7 @@ static irqreturn_t service_interrupt(int irq, void *dev_id)
case ISR_RxFRAMELOST:
priv->wstats.discard.misc++;
- /* fall through */
+ fallthrough;
case ISR_RxCOMPLETE:
rx_done_irq(priv);
break;
@@ -4228,7 +4228,7 @@ static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
/* Copyright 2003 Matthew T. Russotto */
/* But derived from the Atmel 76C502 firmware written by Atmel and */
/* included in "atmel wireless lan drivers" package */
-/**
+/*
This file is part of net.russotto.AtmelMACFW, hereto referred to
as AtmelMACFW
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index ca671fc13116..9a7c62bd5e43 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1317,7 +1317,7 @@ static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
switch (queue_prio) {
default:
B43_WARN_ON(1);
- /* fallthrough */
+ fallthrough;
case 0:
ring = dev->dma.tx_ring_AC_VO;
break;
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index a54dd4f7fa54..f175dbaffc30 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -781,8 +781,9 @@ void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on)
b43_write16(dev, B43_MMIO_XMTSEL, 0x0826);
b43_write16(dev, B43_MMIO_TXE0_CTL, 0x0000);
- if (!pa_on && phy->type == B43_PHYTYPE_N)
+ if (!pa_on && phy->type == B43_PHYTYPE_N) {
; /*b43_nphy_pa_override(dev, false) */
+ }
switch (phy->type) {
case B43_PHYTYPE_N:
@@ -1873,7 +1874,7 @@ static void b43_handle_firmware_panic(struct b43_wldev *dev)
switch (reason) {
default:
b43dbg(dev->wl, "The panic reason is unknown.\n");
- /* fallthrough */
+ fallthrough;
case B43_FWPANIC_DIE:
/* Do not restart the controller or firmware.
* The device is nonfunctional from now on.
@@ -2013,8 +2014,9 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
handle_irq_beacon(dev);
if (reason & B43_IRQ_PMQ)
handle_irq_pmq(dev);
- if (reason & B43_IRQ_TXFIFO_FLUSH_OK)
+ if (reason & B43_IRQ_TXFIFO_FLUSH_OK) {
;/* TODO */
+ }
if (reason & B43_IRQ_NOISESAMPLE_OK)
handle_irq_noise(dev);
@@ -2266,7 +2268,7 @@ fw_ready:
size = be32_to_cpu(hdr->size);
if (size != ctx->blob->size - sizeof(struct b43_fw_header))
goto err_format;
- /* fallthrough */
+ fallthrough;
case B43_FW_TYPE_IV:
if (hdr->ver != 1)
goto err_format;
@@ -3178,7 +3180,7 @@ static void b43_rate_memory_init(struct b43_wldev *dev)
b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1);
b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1);
b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1);
- /* fallthrough */
+ fallthrough;
case B43_PHYTYPE_B:
b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0);
b43_rate_memory_write(dev, B43_CCK_RATE_2MB, 0);
@@ -5329,7 +5331,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
/* There are 14e4:4321 PCI devs with 2.4 GHz BCM4321 (N-PHY) */
if (dev->phy.type != B43_PHYTYPE_G)
break;
- /* fall through */
+ fallthrough;
case 0x4313: /* BCM4311 */
case 0x431a: /* BCM4318 */
case 0x432a: /* BCM4321 */
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
index 1de4de094d61..285490f6f0a1 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.c
+++ b/drivers/net/wireless/broadcom/b43/phy_common.c
@@ -458,7 +458,7 @@ void b43_software_rfkill(struct b43_wldev *dev, bool blocked)
b43_mac_enable(dev);
}
-/**
+/*
* b43_phy_txpower_adjust_work - TX power workqueue.
*
* Workqueue for updating the TX power parameters in hardware.
diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c
index c685b4bb5ed6..d050971d150a 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ht.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ht.c
@@ -900,9 +900,6 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
b43_phy_write(dev, 0x70, 0x50);
b43_phy_write(dev, 0x1ff, 0x30);
- if (0) /* TODO: condition */
- ; /* TODO: PHY op on reg 0x217 */
-
if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0);
else
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index ca2018da9753..b669dff24b6e 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -3239,7 +3239,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
if (!(dev->phy.rev >= 4 &&
b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
break;
- /* FALL THROUGH */
+ fallthrough;
case 0:
case 1:
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
@@ -3342,8 +3342,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_ED_CRS20UDEASSERTTHRESH0, 0x0381);
b43_phy_write(dev, B43_NPHY_ED_CRS20UDEASSERTTHRESH1, 0x0381);
- if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK)
+ if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK) {
; /* TODO: 0x0080000000000000 HF */
+ }
}
static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -4602,10 +4603,11 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
if (nphy->gband_spurwar_en) {
/* TODO: N PHY Adjust Analog Pfbw (7) */
- if (channel == 11 && b43_is_40mhz(dev))
+ if (channel == 11 && b43_is_40mhz(dev)) {
; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
- else
+ } else {
; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ }
/* TODO: N PHY Adjust CRS Min Power (0x1E) */
}
@@ -4635,10 +4637,11 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
noise[0] = 0;
}
- if (!tone[0] && !noise[0])
+ if (!tone[0] && !noise[0]) {
; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
- else
+ } else {
; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ }
}
if (nphy->hang_avoid)
@@ -6166,8 +6169,9 @@ static int b43_phy_initn(struct b43_wldev *dev)
if (nphy->phyrxchain != 3)
b43_nphy_set_rx_core_state(dev, nphy->phyrxchain);
- if (nphy->mphase_cal_phase_id > 0)
+ if (nphy->mphase_cal_phase_id > 0) {
;/* TODO PHY Periodic Calibration Multi-Phase Restart */
+ }
do_rssi_cal = false;
if (phy->rev >= 3) {
@@ -6211,8 +6215,9 @@ static int b43_phy_initn(struct b43_wldev *dev)
if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false))
if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
b43_nphy_save_cal(dev);
- } else if (nphy->mphase_cal_phase_id == 0)
+ } else if (nphy->mphase_cal_phase_id == 0) {
;/* N PHY Periodic Calibration with arg 3 */
+ }
} else {
b43_nphy_restore_cal(dev);
}
diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
index 1a11c5dfb8d9..8c28a9250cd1 100644
--- a/drivers/net/wireless/broadcom/b43/pio.c
+++ b/drivers/net/wireless/broadcom/b43/pio.c
@@ -294,7 +294,7 @@ static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev,
switch (queue_prio) {
default:
B43_WARN_ON(1);
- /* fallthrough */
+ fallthrough;
case 0:
q = dev->pio.tx_queue_AC_VO;
break;
diff --git a/drivers/net/wireless/broadcom/b43/tables_nphy.c b/drivers/net/wireless/broadcom/b43/tables_nphy.c
index 7957db94e84c..41a25d909d0d 100644
--- a/drivers/net/wireless/broadcom/b43/tables_nphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_nphy.c
@@ -3717,7 +3717,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
case 5:
if (sprom->fem.ghz2.extpa_gain == 3)
return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
- /* fall through */
+ fallthrough;
case 4:
case 3:
return b43_ntab_tx_gain_epa_rev3_2g;
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index f7594e2a896e..7e2f70c4207c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -189,7 +189,7 @@ return dev->dma.tx_ring1;
switch (queue_priority) {
default:
B43legacy_WARN_ON(1);
- /* fallthrough */
+ fallthrough;
case 0:
ring = dev->dma.tx_ring3;
break;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 2eaf481f03f1..a27125b7922c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1275,9 +1275,9 @@ static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
}
/* Interrupt handler bottom-half */
-static void b43legacy_interrupt_tasklet(unsigned long data)
+static void b43legacy_interrupt_tasklet(struct tasklet_struct *t)
{
- struct b43legacy_wldev *dev = (struct b43legacy_wldev *)data;
+ struct b43legacy_wldev *dev = from_tasklet(dev, t, isr_tasklet);
u32 reason;
u32 dma_reason[ARRAY_SIZE(dev->dma_reason)];
u32 merged_dma_reason = 0;
@@ -1340,8 +1340,9 @@ static void b43legacy_interrupt_tasklet(unsigned long data)
handle_irq_beacon(dev);
if (reason & B43legacy_IRQ_PMQ)
handle_irq_pmq(dev);
- if (reason & B43legacy_IRQ_TXFIFO_FLUSH_OK)
+ if (reason & B43legacy_IRQ_TXFIFO_FLUSH_OK) {
;/*TODO*/
+ }
if (reason & B43legacy_IRQ_NOISESAMPLE_OK)
handle_irq_noise(dev);
@@ -1537,7 +1538,7 @@ static int do_request_fw(struct b43legacy_wldev *dev,
size = be32_to_cpu(hdr->size);
if (size != (*fw)->size - sizeof(struct b43legacy_fw_header))
goto err_format;
- /* fallthrough */
+ fallthrough;
case B43legacy_FW_TYPE_IV:
if (hdr->ver != 1)
goto err_format;
@@ -2076,7 +2077,7 @@ static void b43legacy_rate_memory_init(struct b43legacy_wldev *dev)
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_36MB, 1);
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_48MB, 1);
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_54MB, 1);
- /* fallthrough */
+ fallthrough;
case B43legacy_PHYTYPE_B:
b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_1MB, 0);
b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_2MB, 0);
@@ -3741,9 +3742,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
wldev->wl = wl;
b43legacy_set_status(wldev, B43legacy_STAT_UNINIT);
wldev->bad_frames_preempt = modparam_bad_frames_preempt;
- tasklet_init(&wldev->isr_tasklet,
- b43legacy_interrupt_tasklet,
- (unsigned long)wldev);
+ tasklet_setup(&wldev->isr_tasklet, b43legacy_interrupt_tasklet);
if (modparam_pio)
wldev->__using_pio = true;
INIT_LIST_HEAD(&wldev->list);
diff --git a/drivers/net/wireless/broadcom/b43legacy/pio.c b/drivers/net/wireless/broadcom/b43legacy/pio.c
index cbb761378619..aac413d0f629 100644
--- a/drivers/net/wireless/broadcom/b43legacy/pio.c
+++ b/drivers/net/wireless/broadcom/b43legacy/pio.c
@@ -264,9 +264,9 @@ static int pio_tx_packet(struct b43legacy_pio_txpacket *packet)
return 0;
}
-static void tx_tasklet(unsigned long d)
+static void tx_tasklet(struct tasklet_struct *t)
{
- struct b43legacy_pioqueue *queue = (struct b43legacy_pioqueue *)d;
+ struct b43legacy_pioqueue *queue = from_tasklet(queue, t, txtask);
struct b43legacy_wldev *dev = queue->dev;
unsigned long flags;
struct b43legacy_pio_txpacket *packet, *tmp_packet;
@@ -331,8 +331,7 @@ struct b43legacy_pioqueue *b43legacy_setup_pioqueue(struct b43legacy_wldev *dev,
INIT_LIST_HEAD(&queue->txfree);
INIT_LIST_HEAD(&queue->txqueue);
INIT_LIST_HEAD(&queue->txrunning);
- tasklet_init(&queue->txtask, tx_tasklet,
- (unsigned long)queue);
+ tasklet_setup(&queue->txtask, tx_tasklet);
value = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
value &= ~B43legacy_MACCTL_BE;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 2c95a08a5871..3984fd7d918e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -397,9 +397,9 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
}
static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool inirq)
{
- brcmf_fws_rxreorder(ifp, skb);
+ brcmf_fws_rxreorder(ifp, skb, inirq);
}
static void
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 1a7ab49295aa..f9ebb98b0e3c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -45,6 +45,7 @@
#define SDIO_FUNC2_BLOCKSIZE 512
#define SDIO_4373_FUNC2_BLOCKSIZE 256
#define SDIO_435X_FUNC2_BLOCKSIZE 256
+#define SDIO_4329_FUNC2_BLOCKSIZE 128
/* Maximum milliseconds to wait for F2 to come up */
#define SDIO_WAIT_F2RDY 3000
@@ -73,7 +74,7 @@ static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
sdiodev->irq_en = false;
}
- brcmf_sdio_isr(sdiodev->bus);
+ brcmf_sdio_isr(sdiodev->bus, true);
return IRQ_HANDLED;
}
@@ -85,7 +86,7 @@ static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
brcmf_dbg(INTR, "IB intr triggered\n");
- brcmf_sdio_isr(sdiodev->bus);
+ brcmf_sdio_isr(sdiodev->bus, false);
}
/* dummy handler for SDIO function 2 interrupt */
@@ -916,12 +917,13 @@ int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
f2_blksz = SDIO_4373_FUNC2_BLOCKSIZE;
break;
case SDIO_DEVICE_ID_BROADCOM_4359:
- /* fallthrough */
case SDIO_DEVICE_ID_BROADCOM_4354:
- /* fallthrough */
case SDIO_DEVICE_ID_BROADCOM_4356:
f2_blksz = SDIO_435X_FUNC2_BLOCKSIZE;
break;
+ case SDIO_DEVICE_ID_BROADCOM_4329:
+ f2_blksz = SDIO_4329_FUNC2_BLOCKSIZE;
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index ec2bec0999d1..f9f18ff451ea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -65,11 +65,12 @@ enum brcmf_btcoex_state {
* @reg68: saved value of btc_params 68
* @saved_regs_part1: flag indicating regs 66,41,68
* have been saved
+ * @reg50: saved value of btc_params 50
* @reg51: saved value of btc_params 51
* @reg64: saved value of btc_params 64
* @reg65: saved value of btc_params 65
* @reg71: saved value of btc_params 71
- * @saved_regs_part1: flag indicating regs 50,51,64,65,71
+ * @saved_regs_part2: flag indicating regs 50,51,64,65,71
* have been saved
*/
struct brcmf_btcoex_info {
@@ -226,7 +227,7 @@ static bool brcmf_btcoex_is_sco_active(struct brcmf_if *ifp)
return res;
}
-/**
+/*
* btcmf_btcoex_save_part1() - save first step parameters.
*/
static void btcmf_btcoex_save_part1(struct brcmf_btcoex_info *btci)
@@ -246,7 +247,7 @@ static void btcmf_btcoex_save_part1(struct brcmf_btcoex_info *btci)
}
}
-/**
+/*
* brcmf_btcoex_restore_part1() - restore first step parameters.
*/
static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
@@ -266,7 +267,7 @@ static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
}
}
-/**
+/*
* brcmf_btcoex_timerfunc() - BT coex timer callback
*/
static void brcmf_btcoex_timerfunc(struct timer_list *t)
@@ -441,9 +442,8 @@ static void brcmf_btcoex_dhcp_end(struct brcmf_btcoex_info *btci)
}
}
-/**
+/*
* brcmf_btcoex_set_mode - set BT coex mode
- * @cfg: driver private cfg80211 data
* @mode: Wifi-Bluetooth coexistence mode
*
* return: 0 on success
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 623c0168da79..08f9d47f2e5c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -249,7 +249,8 @@ int brcmf_bus_reset(struct brcmf_bus *bus)
*/
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event,
+ bool inirq);
/* Receive async event packet from firmware. Callee disposes of rxp. */
void brcmf_rx_event(struct device *dev, struct sk_buff *rxp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index ab0da2ff982e..a2dbbb977d0c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -56,6 +56,7 @@
#define RSN_AKM_PSK 2 /* Pre-shared Key */
#define RSN_AKM_SHA256_1X 5 /* SHA256, 802.1X */
#define RSN_AKM_SHA256_PSK 6 /* SHA256, Pre-shared Key */
+#define RSN_AKM_SAE 8 /* SAE */
#define RSN_CAP_LEN 2 /* Length of RSN capabilities */
#define RSN_CAP_PTK_REPLAY_CNTR_MASK (BIT(2) | BIT(3))
#define RSN_CAP_MFPR_MASK BIT(6)
@@ -3477,7 +3478,7 @@ brcmf_get_netinfo_array(struct brcmf_pno_scanresults_le *pfn_v1)
switch (pfn_v1->version) {
default:
WARN_ON(1);
- /* fall-thru */
+ fallthrough;
case cpu_to_le32(1):
netinfo = (struct brcmf_pno_net_info_le *)(pfn_v1 + 1);
break;
@@ -3991,10 +3992,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
}
brcmf_dbg(CONN, "set_pmksa - PMK bssid: %pM =\n", pmk[npmk].bssid);
- for (i = 0; i < WLAN_PMKID_LEN; i += 4)
- brcmf_dbg(CONN, "%02x %02x %02x %02x\n", pmk[npmk].pmkid[i],
- pmk[npmk].pmkid[i + 1], pmk[npmk].pmkid[i + 2],
- pmk[npmk].pmkid[i + 3]);
+ brcmf_dbg(CONN, "%*ph\n", WLAN_PMKID_LEN, pmk[npmk].pmkid);
err = brcmf_update_pmklist(cfg, ifp);
@@ -4245,6 +4243,10 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
brcmf_dbg(TRACE, "RSN_AKM_MFP_1X\n");
wpa_auth |= WPA2_AUTH_1X_SHA256;
break;
+ case RSN_AKM_SAE:
+ brcmf_dbg(TRACE, "RSN_AKM_SAE\n");
+ wpa_auth |= WPA3_AUTH_SAE_PSK;
+ break;
default:
bphy_err(drvr, "Invalid key mgmt info\n");
}
@@ -4262,11 +4264,12 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
brcmf_dbg(TRACE, "MFP Required\n");
mfp = BRCMF_MFP_REQUIRED;
/* Firmware only supports mfp required in
- * combination with WPA2_AUTH_PSK_SHA256 or
- * WPA2_AUTH_1X_SHA256.
+ * combination with WPA2_AUTH_PSK_SHA256,
+ * WPA2_AUTH_1X_SHA256, or WPA3_AUTH_SAE_PSK.
*/
if (!(wpa_auth & (WPA2_AUTH_PSK_SHA256 |
- WPA2_AUTH_1X_SHA256))) {
+ WPA2_AUTH_1X_SHA256 |
+ WPA3_AUTH_SAE_PSK))) {
err = -EINVAL;
goto exit;
}
@@ -4682,6 +4685,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = cfg->pub;
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+ struct cfg80211_crypto_settings *crypto = &settings->crypto;
const struct brcmf_tlv *ssid_ie;
const struct brcmf_tlv *country_ie;
struct brcmf_ssid_le ssid_le;
@@ -4821,6 +4826,25 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
goto exit;
}
+ if (crypto->psk) {
+ brcmf_dbg(INFO, "using PSK offload\n");
+ profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_PSK);
+ err = brcmf_set_pmk(ifp, crypto->psk,
+ BRCMF_WSEC_MAX_PSK_LEN);
+ if (err < 0)
+ goto exit;
+ }
+ if (crypto->sae_pwd) {
+ brcmf_dbg(INFO, "using SAE offload\n");
+ profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE);
+ err = brcmf_set_sae_password(ifp, crypto->sae_pwd,
+ crypto->sae_pwd_len);
+ if (err < 0)
+ goto exit;
+ }
+ if (profile->use_fwauth == 0)
+ profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
+
err = brcmf_parse_configure_security(ifp, settings,
NL80211_IFTYPE_AP);
if (err < 0) {
@@ -4907,6 +4931,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = cfg->pub;
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
s32 err;
struct brcmf_fil_bss_enable_le bss_enable;
struct brcmf_join_params join_params;
@@ -4918,6 +4943,14 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
/* first to make sure they get processed by fw. */
msleep(400);
+ if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) {
+ if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK))
+ brcmf_set_pmk(ifp, NULL, 0);
+ if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE))
+ brcmf_set_sae_password(ifp, NULL, 0);
+ profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
+ }
+
if (ifp->vif->mbss) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
return err;
@@ -6476,7 +6509,7 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
default:
wiphy_warn(wiphy, "Firmware reported unsupported bandwidth %d\n",
ch.bw);
- /* fall through */
+ fallthrough;
case BRCMU_CHAN_BW_20:
/* enable the channel and disable other bandwidths
* for now as mentioned order assure they are enabled
@@ -6614,10 +6647,10 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
switch (mimo_bwcap) {
case WLC_N_BW_40ALL:
bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
- /* fall-thru */
+ fallthrough;
case WLC_N_BW_20IN2G_40IN5G:
bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
- /* fall-thru */
+ fallthrough;
case WLC_N_BW_20ALL:
bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
@@ -7066,6 +7099,13 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_SAE_OFFLOAD);
}
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_FWAUTH)) {
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK);
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD_AP);
+ }
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 333fdf394f95..17817cdb5de2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -129,6 +129,19 @@ enum brcmf_profile_fwsup {
};
/**
+ * enum brcmf_profile_fwauth - firmware authenticator profile
+ *
+ * @BRCMF_PROFILE_FWAUTH_NONE: no firmware authenticator
+ * @BRCMF_PROFILE_FWAUTH_PSK: authenticator for WPA/WPA2-PSK
+ * @BRCMF_PROFILE_FWAUTH_SAE: authenticator for SAE
+ */
+enum brcmf_profile_fwauth {
+ BRCMF_PROFILE_FWAUTH_NONE,
+ BRCMF_PROFILE_FWAUTH_PSK,
+ BRCMF_PROFILE_FWAUTH_SAE
+};
+
+/**
* struct brcmf_cfg80211_profile - profile information.
*
* @bssid: bssid of joined/joining ibss.
@@ -140,6 +153,7 @@ struct brcmf_cfg80211_profile {
struct brcmf_cfg80211_security sec;
struct brcmf_wsec_key key[BRCMF_MAX_DEFAULT_KEYS];
enum brcmf_profile_fwsup use_fwsup;
+ u16 use_fwauth;
bool is_ft;
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index a3a257089696..5bf11e46fc49 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -1390,7 +1390,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
case BRCM_CC_4345_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
- /* fall-through */
+ fallthrough;
case BRCM_CC_43241_CHIP_ID:
case BRCM_CC_4335_CHIP_ID:
case BRCM_CC_4339_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index f89010a81ffb..3dd28f5fef19 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -395,7 +395,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
}
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
{
/* Most of Broadcom's firmwares send 802.11f ADD frame every time a new
* STA connects to the AP interface. This is an obsoleted standard most
@@ -418,14 +418,15 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
ifp->ndev->stats.rx_packets++;
brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
- if (in_interrupt())
+ if (inirq) {
netif_rx(skb);
- else
+ } else {
/* If the receive is not processed inside an ISR,
* the softirqd must be woken explicitly to service
* the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
*/
netif_rx_ni(skb);
+ }
}
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
@@ -474,7 +475,7 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
- brcmf_netif_rx(ifp, skb);
+ brcmf_netif_rx(ifp, skb, false);
}
static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
@@ -486,7 +487,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
if (ret || !(*ifp) || !(*ifp)->ndev) {
- if (ret != -ENODATA && *ifp)
+ if (ret != -ENODATA && *ifp && (*ifp)->ndev)
(*ifp)->ndev->stats.rx_errors++;
brcmu_pkt_buf_free_skb(skb);
return -ENODATA;
@@ -496,7 +497,8 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
return 0;
}
-void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
+ bool inirq)
{
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -508,14 +510,16 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
return;
if (brcmf_proto_is_reorder_skb(skb)) {
- brcmf_proto_rxreorder(ifp, skb);
+ brcmf_proto_rxreorder(ifp, skb, inirq);
} else {
/* Process special event packets */
- if (handle_event)
- brcmf_fweh_process_skb(ifp->drvr, skb,
- BCMILCP_SUBTYPE_VENDOR_LONG);
+ if (handle_event) {
+ gfp_t gfp = inirq ? GFP_ATOMIC : GFP_KERNEL;
- brcmf_netif_rx(ifp, skb);
+ brcmf_fweh_process_skb(ifp->drvr, skb,
+ BCMILCP_SUBTYPE_VENDOR_LONG, gfp);
+ }
+ brcmf_netif_rx(ifp, skb, inirq);
}
}
@@ -530,7 +534,7 @@ void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
if (brcmf_rx_hdrpull(drvr, skb, &ifp))
return;
- brcmf_fweh_process_skb(ifp->drvr, skb, 0);
+ brcmf_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL);
brcmu_pkt_buf_free_skb(skb);
}
@@ -1422,6 +1426,11 @@ void brcmf_detach(struct device *dev)
#endif
brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
+ /* make sure primary interface removed last */
+ for (i = BRCMF_MAX_IFS - 1; i > -1; i--) {
+ if (drvr->iflist[i])
+ brcmf_remove_interface(drvr->iflist[i], false);
+ }
brcmf_bus_stop(drvr->bus_if);
brcmf_fweh_detach(drvr);
@@ -1432,12 +1441,6 @@ void brcmf_detach(struct device *dev)
drvr->mon_if = NULL;
}
- /* make sure primary interface removed last */
- for (i = BRCMF_MAX_IFS - 1; i > -1; i--) {
- if (drvr->iflist[i])
- brcmf_del_if(drvr, drvr->iflist[i]->bsscfgidx, false);
- }
-
if (drvr->config) {
brcmf_p2p_detach(&drvr->config->p2p);
brcmf_cfg80211_detach(drvr->config);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 33b2ab3b54b0..5767d665cee5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -208,7 +208,7 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked);
int brcmf_net_mon_attach(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 0dcefbd0c000..7c68d9849324 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -42,6 +42,7 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
{ BRCMF_FEAT_DOT11H, "802.11h" },
{ BRCMF_FEAT_SAE, "sae" },
+ { BRCMF_FEAT_FWAUTH, "idauth" },
};
#ifdef DEBUG
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index cda3fc1bab7f..d1f4257af696 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -28,6 +28,7 @@
* MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
* DOT11H: firmware supports 802.11h
* SAE: simultaneous authentication of equals
+ * FWAUTH: Firmware authenticator
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -49,7 +50,8 @@
BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
BRCMF_FEAT_DEF(DOT11H) \
- BRCMF_FEAT_DEF(SAE)
+ BRCMF_FEAT_DEF(SAE) \
+ BRCMF_FEAT_DEF(FWAUTH)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 3aed4c4b887a..d821a4758f8c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -59,7 +59,7 @@ struct nvram_parser {
bool boardrev_found;
};
-/**
+/*
* is_nvram_char() - check if char is a valid one for NVRAM entry
*
* It accepts all printable ASCII chars except for '#' which opens a comment.
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index a5cced2c89ac..430d2cca98b3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -23,6 +23,7 @@
* @ifidx: interface index related to this event.
* @ifaddr: ethernet address for interface.
* @emsg: common parameters of the firmware event message.
+ * @datalen: length of the data array
* @data: event specific data part of the firmware event.
*/
struct brcmf_fweh_queue_item {
@@ -35,7 +36,7 @@ struct brcmf_fweh_queue_item {
u8 data[];
};
-/**
+/*
* struct brcmf_fweh_event_name - code, name mapping entry.
*/
struct brcmf_fweh_event_name {
@@ -118,8 +119,8 @@ static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr,
* brcmf_fweh_handle_if_event() - handle IF event.
*
* @drvr: driver information object.
- * @item: queue entry.
- * @ifpp: interface object (may change upon ADD action).
+ * @emsg: event message object.
+ * @data: event object.
*/
static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
struct brcmf_event_msg *emsg,
@@ -128,7 +129,6 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
struct brcmf_if_event *ifevent = data;
struct brcmf_if *ifp;
bool is_p2pdev;
- int err = 0;
brcmf_dbg(EVENT, "action: %u ifidx: %u bsscfgidx: %u flags: %u role: %u\n",
ifevent->action, ifevent->ifidx, ifevent->bsscfgidx,
@@ -171,8 +171,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
brcmf_proto_reset_if(drvr, ifp);
- err = brcmf_fweh_call_event_handler(drvr, ifp, emsg->event_code, emsg,
- data);
+ brcmf_fweh_call_event_handler(drvr, ifp, emsg->event_code, emsg,
+ data);
if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
bool armed = brcmf_cfg80211_vif_event_armed(drvr->config);
@@ -304,10 +304,12 @@ void brcmf_fweh_detach(struct brcmf_pub *drvr)
{
struct brcmf_fweh_info *fweh = &drvr->fweh;
- /* cancel the worker */
- cancel_work_sync(&fweh->event_work);
- WARN_ON(!list_empty(&fweh->event_q));
- memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
+ /* cancel the worker if initialized */
+ if (fweh->event_work.func) {
+ cancel_work_sync(&fweh->event_work);
+ WARN_ON(!list_empty(&fweh->event_q));
+ memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
+ }
}
/**
@@ -381,18 +383,18 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp)
*
* @drvr: driver information object.
* @event_packet: event packet to process.
+ * @packet_len: length of the packet
*
* If the packet buffer contains a firmware event message it will
* dispatch the event to a registered handler (using worker).
*/
void brcmf_fweh_process_event(struct brcmf_pub *drvr,
struct brcmf_event *event_packet,
- u32 packet_len)
+ u32 packet_len, gfp_t gfp)
{
enum brcmf_fweh_event_code code;
struct brcmf_fweh_info *fweh = &drvr->fweh;
struct brcmf_fweh_queue_item *event;
- gfp_t alloc_flag = GFP_KERNEL;
void *data;
u32 datalen;
@@ -411,10 +413,7 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
datalen + sizeof(*event_packet) > packet_len)
return;
- if (in_interrupt())
- alloc_flag = GFP_ATOMIC;
-
- event = kzalloc(sizeof(*event) + datalen, alloc_flag);
+ event = kzalloc(sizeof(*event) + datalen, gfp);
if (!event)
return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index a82f51bc1e69..48414e8b9389 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -319,11 +319,12 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
int brcmf_fweh_activate_events(struct brcmf_if *ifp);
void brcmf_fweh_process_event(struct brcmf_pub *drvr,
struct brcmf_event *event_packet,
- u32 packet_len);
+ u32 packet_len, gfp_t gfp);
void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing);
static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
- struct sk_buff *skb, u16 stype)
+ struct sk_buff *skb, u16 stype,
+ gfp_t gfp)
{
struct brcmf_event *event_packet;
u16 subtype, usr_stype;
@@ -354,7 +355,7 @@ static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
if (usr_stype != BCMILCP_BCM_SUBTYPE_EVENT)
return;
- brcmf_fweh_process_event(drvr, event_packet, skb->len + ETH_HLEN);
+ brcmf_fweh_process_event(drvr, event_packet, skb->len + ETH_HLEN, gfp);
}
#endif /* FWEH_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 2df6811c066e..437e83ea8902 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -354,6 +354,7 @@ enum brcmf_fws_mac_desc_state {
/**
* struct brcmf_fws_mac_descriptor - firmware signalling data per node/interface
*
+ * @name: name of the descriptor.
* @occupied: slot is in use.
* @mac_handle: handle for mac entry determined by firmware.
* @interface_id: interface index.
@@ -362,10 +363,15 @@ enum brcmf_fws_mac_desc_state {
* @generation: generation bit.
* @ac_bitmap: ac queue bitmap.
* @requested_credit: credits requested by firmware.
+ * @requested_packet: packet requested by firmware.
* @ea: ethernet address.
* @seq: per-node free-running sequence.
* @psq: power-save queue.
* @transit_count: packet in transit to firmware.
+ * @suppr_transit_count: suppressed packet in transit to firmware.
+ * @send_tim_signal: if set tim signal will be sent.
+ * @traffic_pending_bmp: traffic pending bitmap.
+ * @traffic_lastreported_bmp: traffic last reported bitmap.
*/
struct brcmf_fws_mac_descriptor {
char name[16];
@@ -498,20 +504,6 @@ struct brcmf_fws_info {
bool avoid_queueing;
};
-/*
- * brcmf_fws_prio2fifo - mapping from 802.1d priority to firmware fifo index.
- */
-static const int brcmf_fws_prio2fifo[] = {
- BRCMF_FWS_FIFO_AC_BE,
- BRCMF_FWS_FIFO_AC_BK,
- BRCMF_FWS_FIFO_AC_BK,
- BRCMF_FWS_FIFO_AC_BE,
- BRCMF_FWS_FIFO_AC_VI,
- BRCMF_FWS_FIFO_AC_VI,
- BRCMF_FWS_FIFO_AC_VO,
- BRCMF_FWS_FIFO_AC_VO
-};
-
#define BRCMF_FWS_TLV_DEF(name, id, len) \
case BRCMF_FWS_TYPE_ ## name: \
return len;
@@ -1672,7 +1664,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
rfi->pend_pkts -= skb_queue_len(skb_list);
}
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
{
struct brcmf_pub *drvr = ifp->drvr;
u8 *reorder_data;
@@ -1690,7 +1682,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
/* validate flags and flow id */
if (flags == 0xFF) {
bphy_err(drvr, "invalid flags...so ignore this packet\n");
- brcmf_netif_rx(ifp, pkt);
+ brcmf_netif_rx(ifp, pkt, inirq);
return;
}
@@ -1702,7 +1694,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
if (rfi == NULL) {
brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
flow_id);
- brcmf_netif_rx(ifp, pkt);
+ brcmf_netif_rx(ifp, pkt, inirq);
return;
}
@@ -1727,7 +1719,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
rfi = kzalloc(buf_size, GFP_ATOMIC);
if (rfi == NULL) {
bphy_err(drvr, "failed to alloc buffer\n");
- brcmf_netif_rx(ifp, pkt);
+ brcmf_netif_rx(ifp, pkt, inirq);
return;
}
@@ -1841,7 +1833,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
netif_rx:
skb_queue_walk_safe(&reorder_list, pkt, pnext) {
__skb_unlink(pkt, &reorder_list);
- brcmf_netif_rx(ifp, pkt);
+ brcmf_netif_rx(ifp, pkt, inirq);
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index b16a9d1c0508..50e424b5880d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -42,6 +42,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index f1a20db8daab..7c8e08ee8f0f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -536,7 +536,8 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
return -ENODEV;
}
-static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb,
+ bool inirq)
{
}
@@ -1128,7 +1129,7 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
skb->protocol = eth_type_trans(skb, ifp->ndev);
- brcmf_fweh_process_skb(ifp->drvr, skb, 0);
+ brcmf_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL);
exit:
brcmu_pkt_buf_free_skb(skb);
@@ -1190,7 +1191,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
}
skb->protocol = eth_type_trans(skb, ifp->ndev);
- brcmf_netif_rx(ifp, skb);
+ brcmf_netif_rx(ifp, skb, false);
}
static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
@@ -1620,6 +1621,8 @@ fail:
BRCMF_TX_IOCTL_MAX_MSG_SIZE,
msgbuf->ioctbuf,
msgbuf->ioctbuf_handle);
+ if (msgbuf->txflow_wq)
+ destroy_workqueue(msgbuf->txflow_wq);
kfree(msgbuf);
}
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index debd887e159e..ec6fc7a150a6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -145,11 +145,11 @@ struct brcmf_p2p_scan_le {
*
* @category: P2P_PUB_AF_CATEGORY
* @action: P2P_PUB_AF_ACTION
- * @oui[3]: P2P_OUI
+ * @oui: P2P_OUI
* @oui_type: OUI type - P2P_VER
* @subtype: OUI subtype - P2P_TYPE_*
* @dialog_token: nonzero, identifies req/rsp transaction
- * @elts[1]: Variable length information elements.
+ * @elts: Variable length information elements.
*/
struct brcmf_p2p_pub_act_frame {
u8 category;
@@ -165,11 +165,11 @@ struct brcmf_p2p_pub_act_frame {
* struct brcmf_p2p_action_frame - WiFi P2P Action Frame
*
* @category: P2P_AF_CATEGORY
- * @OUI[3]: OUI - P2P_OUI
+ * @oui: OUI - P2P_OUI
* @type: OUI Type - P2P_VER
* @subtype: OUI Subtype - P2P_AF_*
* @dialog_token: nonzero, identifies req/resp tranaction
- * @elts[1]: Variable length information elements.
+ * @elts: Variable length information elements.
*/
struct brcmf_p2p_action_frame {
u8 category;
@@ -186,7 +186,7 @@ struct brcmf_p2p_action_frame {
* @category: 0x04 Public Action Frame
* @action: 0x6c Advertisement Protocol
* @dialog_token: nonzero, identifies req/rsp transaction
- * @query_data[1]: Query Data. SD gas ireq SD gas iresp
+ * @query_data: Query Data. SD gas ireq SD gas iresp
*/
struct brcmf_p2psd_gas_pub_act_frame {
u8 category;
@@ -201,7 +201,7 @@ struct brcmf_p2psd_gas_pub_act_frame {
* @mpc_onoff: To make sure to send successfully action frame, we have to
* turn off mpc 0: off, 1: on, (-1): do nothing
* @search_channel: 1: search peer's channel to send af
- * extra_listen: keep the dwell time to get af response frame.
+ * @extra_listen: keep the dwell time to get af response frame.
*/
struct brcmf_config_af_params {
s32 mpc_onoff;
@@ -763,9 +763,8 @@ exit:
* brcmf_p2p_run_escan() - escan callback for peer-to-peer.
*
* @cfg: driver private data for cfg80211 interface.
- * @ndev: net device for which scan is requested.
+ * @ifp: interface control.
* @request: scan request from cfg80211.
- * @action: scan action.
*
* Determines the P2P discovery state based to scan request parameters and
* validates the channels in the request.
@@ -913,8 +912,6 @@ int brcmf_p2p_scan_prep(struct wiphy *wiphy,
if (err)
return err;
- vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
-
/* override .run_escan() callback. */
cfg->escan_info.run = brcmf_p2p_run_escan;
}
@@ -969,9 +966,10 @@ exit:
* brcmf_p2p_remain_on_channel() - put device on channel and stay there.
*
* @wiphy: wiphy device.
+ * @wdev: wireless device.
* @channel: channel to stay on.
* @duration: time in ms to remain on channel.
- *
+ * @cookie: cookie.
*/
int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *channel,
@@ -1056,7 +1054,7 @@ void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp)
* brcmf_p2p_act_frm_search() - search function for action frame.
*
* @p2p: p2p device.
- * channel: channel on which action frame is to be trasmitted.
+ * @channel: channel on which action frame is to be trasmitted.
*
* search function to reach at common channel to send action frame. When
* channel is 0 then all social channels will be used to send af
@@ -1331,6 +1329,7 @@ brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
* brcmf_p2p_gon_req_collision() - Check if go negotiaton collission
*
* @p2p: p2p device info struct.
+ * @mac: MAC address.
*
* return true if recevied action frame is to be dropped.
*/
@@ -1546,7 +1545,6 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
struct brcmf_cfg80211_vif *vif;
struct brcmf_p2p_action_frame *p2p_af;
s32 err = 0;
- s32 timeout = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -1582,8 +1580,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
(p2p->wait_for_offchan_complete) ?
"off-channel" : "on-channel");
- timeout = wait_for_completion_timeout(&p2p->send_af_done,
- P2P_AF_MAX_WAIT_TIME);
+ wait_for_completion_timeout(&p2p->send_af_done, P2P_AF_MAX_WAIT_TIME);
if (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status)) {
brcmf_dbg(TRACE, "TX action frame operation is success\n");
@@ -2041,8 +2038,8 @@ static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p,
/**
* Change a P2P Role.
- * Parameters:
- * @mac: MAC address of the BSS to change a role
+ * @cfg: driver private data for cfg80211 interface.
+ * @if_type: interface type.
* Returns 0 if success.
*/
int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index bd08d3aaa8f4..f4a79e217da5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -32,7 +32,7 @@ struct brcmf_proto {
u8 peer[ETH_ALEN]);
void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
u8 peer[ETH_ALEN]);
- void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
+ void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
void (*add_if)(struct brcmf_if *ifp);
void (*del_if)(struct brcmf_if *ifp);
void (*reset_if)(struct brcmf_if *ifp);
@@ -109,9 +109,9 @@ static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
}
static inline void
-brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
{
- ifp->drvr->proto->rxreorder(ifp, skb);
+ ifp->drvr->proto->rxreorder(ifp, skb, inirq);
}
static inline void
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 3c07d1bbe1c6..99987a789e7e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1704,7 +1704,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
brcmf_rx_event(bus->sdiodev->dev, pfirst);
else
brcmf_rx_frame(bus->sdiodev->dev, pfirst,
- false);
+ false, false);
bus->sdcnt.rxglompkts++;
}
@@ -2038,7 +2038,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
brcmf_rx_event(bus->sdiodev->dev, pkt);
else
brcmf_rx_frame(bus->sdiodev->dev, pkt,
- false);
+ false, false);
/* prepare the descriptor for the next read */
rd->len = rd->len_nxtfrm << 4;
@@ -3625,7 +3625,7 @@ void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
}
}
-void brcmf_sdio_isr(struct brcmf_sdio *bus)
+void brcmf_sdio_isr(struct brcmf_sdio *bus, bool in_isr)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -3636,7 +3636,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
/* Count the interrupt call */
bus->sdcnt.intrcount++;
- if (in_interrupt())
+ if (in_isr)
atomic_set(&bus->ipend, 1);
else
if (brcmf_sdio_intr_rstatus(bus)) {
@@ -4278,8 +4278,9 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
CY_43012_MESBUSYCTRL, &err);
break;
+ case SDIO_DEVICE_ID_BROADCOM_4329:
case SDIO_DEVICE_ID_BROADCOM_4339:
- brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 4339\n",
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_4339_F2_WATERMARK);
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
CY_4339_F2_WATERMARK, &err);
@@ -4292,7 +4293,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
CY_4339_MESBUSYCTRL, &err);
break;
case SDIO_DEVICE_ID_BROADCOM_43455:
- brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 43455\n",
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_43455_F2_WATERMARK);
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
CY_43455_F2_WATERMARK, &err);
@@ -4305,9 +4306,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
CY_43455_MESBUSYCTRL, &err);
break;
case SDIO_DEVICE_ID_BROADCOM_4359:
- /* fallthrough */
case SDIO_DEVICE_ID_BROADCOM_4354:
- /* fallthrough */
case SDIO_DEVICE_ID_BROADCOM_4356:
brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_435X_F2_WATERMARK);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 12108927fb50..15d2c02fa3ec 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -372,7 +372,7 @@ int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev);
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdio_remove(struct brcmf_sdio *bus);
-void brcmf_sdio_isr(struct brcmf_sdio *bus);
+void brcmf_sdio_isr(struct brcmf_sdio *bus, bool in_isr);
void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, bool active);
void brcmf_sdio_wowl_config(struct device *dev, bool enabled);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index ac5463838fcf..586f4dfc638b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -532,7 +532,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP ||
devinfo->bus_pub.state == BRCMFMAC_USB_STATE_SLEEP) {
skb_put(skb, urb->actual_length);
- brcmf_rx_frame(devinfo->dev, skb, true);
+ brcmf_rx_frame(devinfo->dev, skb, true, true);
brcmf_usb_rx_refill(devinfo, req);
usb_mark_last_busy(urb->dev);
} else {
@@ -1578,6 +1578,9 @@ void brcmf_usb_exit(void)
brcmf_dbg(USB, "Enter\n");
ret = driver_for_each_device(drv, NULL, NULL,
brcmf_usb_reset_device);
+ if (ret)
+ brcmf_err("failed to reset all usb devices %d\n", ret);
+
usb_deregister(&brcmf_usbdrvr);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
index fa391e4eb098..c9fb4b0cffaf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
@@ -645,7 +645,7 @@ void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
u32 rspec = 0, rspec_fallback = 0;
u32 rts_rspec = 0, rts_rspec_fallback = 0;
- u8 plcp0, plcp3, is40, sgi, mcs;
+ u8 plcp0, is40, mcs;
u16 mch;
u8 preamble_type = BRCMS_GF_PREAMBLE;
u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
@@ -704,15 +704,12 @@ void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
txh->MacTxControlLow = cpu_to_le16(mcl);
fbr = txrate[1].count > 0;
- if (!fbr) {
+ if (!fbr)
plcp0 = plcp[0];
- plcp3 = plcp[3];
- } else {
+ else
plcp0 = txh->FragPLCPFallback[0];
- plcp3 = txh->FragPLCPFallback[3];
- }
+
is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
- sgi = plcp3_issgi(plcp3) ? 1 : 0;
mcs = plcp0 & ~MIMO_PLCP_40MHZ;
if (is40) {
@@ -850,10 +847,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
bool ba_recd = false, ack_recd = false;
u8 suc_mpdu = 0, tot_mpdu = 0;
uint supr_status;
- bool update_rate = true, retry = true, tx_error = false;
+ bool retry = true;
u16 mimoantsel = 0;
- u8 antselid = 0;
- u8 retry_limit, rr_retry_limit;
+ u8 retry_limit;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
#ifdef DEBUG
@@ -866,15 +862,11 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
ini = &scb_ampdu->ini[tid];
retry_limit = ampdu->retry_limit_tid[tid];
- rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
memset(bitmap, 0, sizeof(bitmap));
queue = txs->frameid & TXFID_QUEUE_MASK;
supr_status = txs->status & TX_STATUS_SUPR_MASK;
if (txs->status & TX_STATUS_ACK_RCV) {
- if (TX_STATUS_SUPR_UF == supr_status)
- update_rate = false;
-
WARN_ON(!(txs->status & TX_STATUS_INTERMEDIATE));
start_seq = txs->sequence >> SEQNUM_SHIFT;
bitmap[0] = (txs->status & TX_STATUS_BA_BMAP03_MASK) >>
@@ -898,7 +890,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
ba_recd = true;
} else {
if (supr_status) {
- update_rate = false;
if (supr_status == TX_STATUS_SUPR_BADCH) {
brcms_dbg_ht(wlc->hw->d11core,
"%s: Pkt tx suppressed, illegal channel possibly %d\n",
@@ -923,11 +914,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
* if there were underflows, but pre-loading
* is not active, notify rate adaptation.
*/
- if (brcms_c_ffpld_check_txfunfl(wlc, queue) > 0)
- tx_error = true;
+ brcms_c_ffpld_check_txfunfl(wlc, queue);
}
} else if (txs->phyerr) {
- update_rate = false;
brcms_dbg_ht(wlc->hw->d11core,
"%s: ampdu tx phy error (0x%x)\n",
__func__, txs->phyerr);
@@ -1023,20 +1012,15 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
}
/* update rate state */
- antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
+ brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
}
void
brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
struct sk_buff *p, struct tx_status *txs)
{
- struct scb_ampdu *scb_ampdu;
struct brcms_c_info *wlc = ampdu->wlc;
- struct scb_ampdu_tid_ini *ini;
u32 s1 = 0, s2 = 0;
- struct ieee80211_tx_info *tx_info;
-
- tx_info = IEEE80211_SKB_CB(p);
/* BMAC_NOTE: For the split driver, second level txstatus comes later
* So if the ACK was received then wait for the second level else just
@@ -1060,8 +1044,6 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
}
if (scb) {
- scb_ampdu = &scb->scb_ampdu;
- ini = &scb_ampdu->ini[p->priority];
brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
} else {
/* loop through all pkts and free */
@@ -1069,7 +1051,6 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
struct d11txh *txh;
u16 mcl;
while (p) {
- tx_info = IEEE80211_SKB_CB(p);
txh = (struct d11txh *) p->data;
trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
sizeof(*txh));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 648efcbc819f..818e523f6025 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -275,14 +275,13 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
}
}
-/**
+/*
* This function frees the WL per-device resources.
*
* This function frees resources owned by the WL device pointed to
* by the wl parameter.
*
* precondition: can both be called locked and unlocked
- *
*/
static void brcms_free(struct brcms_info *wl)
{
@@ -982,11 +981,11 @@ static const struct ieee80211_ops brcms_ops = {
.set_tim = brcms_ops_beacon_set_tim,
};
-void brcms_dpc(unsigned long data)
+void brcms_dpc(struct tasklet_struct *t)
{
struct brcms_info *wl;
- wl = (struct brcms_info *) data;
+ wl = from_tasklet(wl, t, tasklet);
spin_lock_bh(&wl->lock);
@@ -1115,7 +1114,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
return ieee_hw_rate_init(hw);
}
-/**
+/*
* attach to the WL device.
*
* Attach to the WL device identified by vendor and device parameters.
@@ -1149,7 +1148,7 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
init_waitqueue_head(&wl->tx_flush_wq);
/* setup the bottom half handler */
- tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
+ tasklet_setup(&wl->tasklet, brcms_dpc);
spin_lock_init(&wl->lock);
spin_lock_init(&wl->isr_lock);
@@ -1210,7 +1209,7 @@ fail:
-/**
+/*
* determines if a device is a WL device, and if so, attaches it.
*
* This function determines if a device pointed to by pdev is a WL device,
@@ -1290,7 +1289,7 @@ static struct bcma_driver brcms_bcma_driver = {
.id_table = brcms_coreid_table,
};
-/**
+/*
* This is the main entry point for the brcmsmac driver.
*
* This function is scheduled upon module initialization and
@@ -1317,7 +1316,7 @@ static int __init brcms_module_init(void)
return 0;
}
-/**
+/*
* This function unloads the brcmsmac driver from the system.
*
* This function unconditionally unloads the brcmsmac driver module from the
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h
index 198053dfc310..eaf926a96a88 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h
@@ -106,7 +106,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
void brcms_free_timer(struct brcms_timer *timer);
void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
bool brcms_del_timer(struct brcms_timer *timer);
-void brcms_dpc(unsigned long data);
+void brcms_dpc(struct tasklet_struct *t);
void brcms_timer(struct brcms_timer *t);
void brcms_fatal_error(struct brcms_info *wl);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 77494fc30c2c..763e0ec583d7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -842,7 +842,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
uint supr_status;
bool lastframe;
struct ieee80211_hdr *h;
- u16 mcl;
struct ieee80211_tx_info *tx_info;
struct ieee80211_tx_rate *txrate;
int i;
@@ -879,7 +878,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
}
txh = (struct d11txh *) (p->data);
- mcl = le16_to_cpu(txh->MacTxControlLow);
if (txs->phyerr)
brcms_dbg_tx(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
@@ -1776,7 +1774,6 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
{
struct brcms_phy_pub *pih = wlc_hw->band->pi;
u32 phy_bw_clkbits;
- bool phy_in_reset = false;
brcms_dbg_info(wlc_hw->d11core, "wl%d: reset phy\n", wlc_hw->unit);
@@ -1799,7 +1796,6 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
/* reset the PHY */
brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE),
(SICF_PRST | SICF_PCLKE));
- phy_in_reset = true;
} else {
brcms_b_core_ioctl(wlc_hw,
(SICF_PRST | SICF_PCLKE | SICF_BWMASK),
@@ -2270,11 +2266,8 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
static void brcms_ucode_download(struct brcms_hardware *wlc_hw)
{
- struct brcms_c_info *wlc;
struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
- wlc = wlc_hw->wlc;
-
if (wlc_hw->ucode_loaded)
return;
@@ -3173,7 +3166,6 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
struct bcma_device *core = wlc_hw->d11core;
- u32 sflags;
u32 bcnint_us;
uint i = 0;
bool fifosz_fixup = false;
@@ -3206,7 +3198,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
brcms_c_gpio_init(wlc);
- sflags = bcma_aread32(core, BCMA_IOST);
+ bcma_aread32(core, BCMA_IOST);
if (D11REV_IS(wlc_hw->corerev, 17) || D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band))
@@ -3899,7 +3891,6 @@ static void brcms_c_setband(struct brcms_c_info *wlc,
static void brcms_c_set_chanspec(struct brcms_c_info *wlc, u16 chanspec)
{
uint bandunit;
- bool switchband = false;
u16 old_chanspec = wlc->chanspec;
if (!brcms_c_valid_chanspec_db(wlc->cmi, chanspec)) {
@@ -3912,7 +3903,6 @@ static void brcms_c_set_chanspec(struct brcms_c_info *wlc, u16 chanspec)
if (wlc->pub->_nbands > 1) {
bandunit = chspec_bandunit(chanspec);
if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) {
- switchband = true;
if (wlc->bandlocked) {
brcms_err(wlc->hw->d11core,
"wl%d: %s: chspec %d band is locked!\n",
@@ -5095,13 +5085,6 @@ int brcms_c_up(struct brcms_c_info *wlc)
return 0;
}
-static uint brcms_c_down_del_timer(struct brcms_c_info *wlc)
-{
- uint callbacks = 0;
-
- return callbacks;
-}
-
static int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw)
{
bool dev_gone;
@@ -5179,7 +5162,6 @@ uint brcms_c_down(struct brcms_c_info *wlc)
uint callbacks = 0;
int i;
- bool dev_gone = false;
brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
@@ -5197,7 +5179,7 @@ uint brcms_c_down(struct brcms_c_info *wlc)
callbacks += brcms_b_bmac_down_prep(wlc->hw);
- dev_gone = brcms_deviceremoved(wlc);
+ brcms_deviceremoved(wlc);
/* Call any registered down handlers */
for (i = 0; i < BRCMS_MAXMODULES; i++) {
@@ -5212,8 +5194,6 @@ uint brcms_c_down(struct brcms_c_info *wlc)
callbacks++;
wlc->WDarmed = false;
}
- /* cancel all other timers */
- callbacks += brcms_c_down_del_timer(wlc);
wlc->pub->up = false;
@@ -5390,15 +5370,7 @@ brcms_c_set_internal_rateset(struct brcms_c_info *wlc,
static void brcms_c_ofdm_rateset_war(struct brcms_c_info *wlc)
{
- u8 r;
- bool war = false;
-
- if (wlc->pub->associated)
- r = wlc->bsscfg->current_bss->rateset.rates[0];
- else
- r = wlc->default_bss->rateset.rates[0];
-
- wlc_phy_ofdm_rateset_war(wlc->band->pi, war);
+ wlc_phy_ofdm_rateset_war(wlc->band->pi, false);
}
int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel)
@@ -5873,7 +5845,6 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
bool issgi = ((int_val & NRATE_SGI_MASK) >> NRATE_SGI_SHIFT);
bool override_mcs_only = ((int_val & NRATE_OVERRIDE_MCS_ONLY)
== NRATE_OVERRIDE_MCS_ONLY);
- int bcmerror = 0;
if (!ismcs)
return (u32) rate;
@@ -5884,7 +5855,6 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
if (stf > PHY_TXC1_MODE_SDM) {
brcms_err(core, "wl%d: %s: Invalid stf\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
@@ -5895,7 +5865,6 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
&& (stf != PHY_TXC1_MODE_CDD))) {
brcms_err(core, "wl%d: %s: Invalid mcs 32\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
/* mcs > 7 must use stf SDM */
@@ -5917,7 +5886,6 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
&& (stf == PHY_TXC1_MODE_STBC))) {
brcms_err(core, "wl%d: %s: Invalid STBC\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
}
@@ -5925,7 +5893,6 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) {
brcms_err(core, "wl%d: %s: Invalid OFDM\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
} else if (is_cck_rate(rate)) {
@@ -5933,20 +5900,17 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
|| (stf != PHY_TXC1_MODE_SISO)) {
brcms_err(core, "wl%d: %s: Invalid CCK\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
} else {
brcms_err(core, "wl%d: %s: Unknown rate type\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
/* make sure multiple antennae are available for non-siso rates */
if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) {
brcms_err(core, "wl%d: %s: SISO antenna but !SISO "
"request\n", wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
@@ -6210,7 +6174,6 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
bool use_rts = false;
bool use_cts = false;
bool use_rifs = false;
- bool short_preamble[2] = { false, false };
u8 preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
u8 rts_preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
u8 *rts_plcp, rts_plcp_fallback[D11_PHY_HDR_LEN];
@@ -6296,10 +6259,6 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
rspec[k] =
hw->wiphy->bands[tx_info->band]->
bitrates[txrate[k]->idx].hw_value;
- short_preamble[k] =
- txrate[k]->
- flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ?
- true : false;
} else {
rspec[k] = BRCM_RATE_1M;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index 2441714169de..ccc621b8ed9f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -1513,14 +1513,12 @@ static s8 wlc_phy_env_measure_temperature(struct brcms_phy *pi)
static void wlc_phy_upd_env_txpwr_rate_limits(struct brcms_phy *pi, u32 band)
{
u8 i;
- s8 temp, vbat;
for (i = 0; i < TXP_NUM_RATES; i++)
pi->txpwr_env_limit[i] = BRCMS_TXPWR_MAX;
- vbat = wlc_phy_env_measure_vbat(pi);
- temp = wlc_phy_env_measure_temperature(pi);
-
+ wlc_phy_env_measure_vbat(pi);
+ wlc_phy_env_measure_temperature(pi);
}
static s8
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index 7ef36234a25d..7717eb85a1db 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -357,61 +357,6 @@ u16 rxiq_cal_rf_reg[11] = {
RADIO_2064_REG12A,
};
-static const
-struct lcnphy_rx_iqcomp lcnphy_rx_iqcomp_table_rev0[] = {
- {1, 0, 0},
- {2, 0, 0},
- {3, 0, 0},
- {4, 0, 0},
- {5, 0, 0},
- {6, 0, 0},
- {7, 0, 0},
- {8, 0, 0},
- {9, 0, 0},
- {10, 0, 0},
- {11, 0, 0},
- {12, 0, 0},
- {13, 0, 0},
- {14, 0, 0},
- {34, 0, 0},
- {38, 0, 0},
- {42, 0, 0},
- {46, 0, 0},
- {36, 0, 0},
- {40, 0, 0},
- {44, 0, 0},
- {48, 0, 0},
- {52, 0, 0},
- {56, 0, 0},
- {60, 0, 0},
- {64, 0, 0},
- {100, 0, 0},
- {104, 0, 0},
- {108, 0, 0},
- {112, 0, 0},
- {116, 0, 0},
- {120, 0, 0},
- {124, 0, 0},
- {128, 0, 0},
- {132, 0, 0},
- {136, 0, 0},
- {140, 0, 0},
- {149, 0, 0},
- {153, 0, 0},
- {157, 0, 0},
- {161, 0, 0},
- {165, 0, 0},
- {184, 0, 0},
- {188, 0, 0},
- {192, 0, 0},
- {196, 0, 0},
- {200, 0, 0},
- {204, 0, 0},
- {208, 0, 0},
- {212, 0, 0},
- {216, 0, 0},
-};
-
static const u32 lcnphy_23bitgaincode_table[] = {
0x200100,
0x200200,
@@ -1363,7 +1308,7 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
u16 tx_pwr_ctrl;
u8 tx_gain_index_old = 0;
bool result = false, tx_gain_override_old = false;
- u16 i, Core1TxControl_old, RFOverride0_old,
+ u16 i, Core1TxControl_old,
RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
rfoverride3_old, rfoverride3val_old, rfoverride4_old,
rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
@@ -1404,7 +1349,7 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
or_phy_reg(pi, 0x631, 0x0015);
- RFOverride0_old = read_phy_reg(pi, 0x44c);
+ read_phy_reg(pi, 0x44c); /* RFOverride0_old */
RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
rfoverride2_old = read_phy_reg(pi, 0x4b0);
rfoverride2val_old = read_phy_reg(pi, 0x4b1);
@@ -1664,7 +1609,7 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
const struct chan_info_2064_lcnphy *ci;
u8 rfpll_doubler = 0;
u8 pll_pwrup, pll_pwrup_ovr;
- s32 qFxtal, qFref, qFvco, qFcal;
+ s32 qFcal;
u8 d15, d16, f16, e44, e45;
u32 div_int, div_frac, fvco3, fpfd, fref3, fcal_div;
u16 loop_bw, d30, setCount;
@@ -1738,10 +1683,7 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
fvco3 = (ci->freq * 3);
fref3 = 2 * fpfd;
- qFxtal = wlc_lcnphy_qdiv_roundup(pi->xtalfreq, PLL_2064_MHZ, 16);
- qFref = wlc_lcnphy_qdiv_roundup(fpfd, PLL_2064_MHZ, 16);
qFcal = pi->xtalfreq * fcal_div / PLL_2064_MHZ;
- qFvco = wlc_lcnphy_qdiv_roundup(fvco3, 2, 16);
write_radio_reg(pi, RADIO_2064_REG04F, 0x02);
@@ -2853,7 +2795,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
bool suspend, tx_gain_override_old;
struct lcnphy_txgains old_gains;
struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
- u16 idleTssi, idleTssi0_2C, idleTssi0_OB, idleTssi0_regvalue_OB,
+ u16 idleTssi0_2C, idleTssi0_OB, idleTssi0_regvalue_OB,
idleTssi0_regvalue_2C;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
u16 SAVE_lpfgain = read_radio_reg(pi, RADIO_2064_REG112);
@@ -2863,7 +2805,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
- idleTssi = read_phy_reg(pi, 0x4ab);
+ read_phy_reg(pi, 0x4ab); /* idleTssi */
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
@@ -2887,8 +2829,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
wlc_lcnphy_set_bbmult(pi, 0x0);
wlc_phy_do_dummy_tx(pi, true, OFF);
- idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
- >> 0);
+ read_phy_reg(pi, 0x4ab); /* idleTssi */
idleTssi0_2C = ((read_phy_reg(pi, 0x63e) & (0x1ff << 0))
>> 0);
@@ -3858,8 +3799,6 @@ void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b)
static void wlc_lcnphy_tx_iqlo_soft_cal_full(struct brcms_phy *pi)
{
- struct lcnphy_unsign16_struct iqcc0, locc2, locc3, locc4;
-
wlc_lcnphy_set_cc(pi, 0, 0, 0);
wlc_lcnphy_set_cc(pi, 2, 0, 0);
wlc_lcnphy_set_cc(pi, 3, 0, 0);
@@ -3872,10 +3811,10 @@ static void wlc_lcnphy_tx_iqlo_soft_cal_full(struct brcms_phy *pi)
wlc_lcnphy_a1(pi, 2, 2, 1);
wlc_lcnphy_a1(pi, 0, 4, 3);
- iqcc0 = wlc_lcnphy_get_cc(pi, 0);
- locc2 = wlc_lcnphy_get_cc(pi, 2);
- locc3 = wlc_lcnphy_get_cc(pi, 3);
- locc4 = wlc_lcnphy_get_cc(pi, 4);
+ wlc_lcnphy_get_cc(pi, 0);
+ wlc_lcnphy_get_cc(pi, 2);
+ wlc_lcnphy_get_cc(pi, 3);
+ wlc_lcnphy_get_cc(pi, 4);
}
u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi)
@@ -4191,9 +4130,7 @@ static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
{
- bool suspend, full_cal;
- const struct lcnphy_rx_iqcomp *rx_iqcomp;
- int rx_iqcomp_sz;
+ bool suspend;
u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
s8 index;
struct phytbl_info tab;
@@ -4203,9 +4140,6 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
pi->phy_lastcal = pi->sh->now;
pi->phy_forcecal = false;
- full_cal =
- (pi_lcn->lcnphy_full_cal_channel !=
- CHSPEC_CHANNEL(pi->radio_chanspec));
pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
index = pi_lcn->lcnphy_current_index;
@@ -4220,9 +4154,6 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
wlc_lcnphy_txpwrtbl_iqlo_cal(pi);
- rx_iqcomp = lcnphy_rx_iqcomp_table_rev0;
- rx_iqcomp_sz = ARRAY_SIZE(lcnphy_rx_iqcomp_table_rev0);
-
if (LCNREV_IS(pi->pubpi.phy_rev, 1))
wlc_lcnphy_rx_iq_cal(pi, NULL, 0, true, false, 1, 40);
else
@@ -4916,10 +4847,6 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
offset_ofdm >>= 4;
}
} else {
- u8 opo = 0;
-
- opo = sprom->opo;
-
for (i = TXP_FIRST_CCK; i <= TXP_LAST_CCK; i++)
pi->tx_srom_max_rate_2g[i] = txpwr;
@@ -5065,8 +4992,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft;
pi->pi_fptr.detach = wlc_phy_detach_lcnphy;
- if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
+ if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) {
+ kfree(pi->u.pi_lcnphy);
return false;
+ }
if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index a3f094568cfb..8580a2754789 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -19033,7 +19033,6 @@ static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
u32 nphy_adj_noise_var_buf[] = { 0x3ff, 0x3ff };
bool isAdjustNoiseVar = false;
uint numTonesAdjust = 0;
- u32 tempval = 0;
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
if (pi->phyhang_avoid)
@@ -19139,9 +19138,6 @@ static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
numTonesAdjust,
nphy_adj_tone_id_buf,
nphy_adj_noise_var_buf);
-
- tempval = 0;
-
} else {
wlc_phy_adjust_min_noisevar_nphy(pi, 0, NULL,
NULL);
@@ -21980,7 +21976,7 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
u16 auxADC_rssi_ctrlL, auxADC_rssi_ctrlH;
s32 auxADC_Vl;
u16 RfctrlOverride5_save, RfctrlOverride6_save;
- u16 RfctrlMiscReg5_save, RfctrlMiscReg6_save;
+ u16 RfctrlMiscReg5_save;
u16 RSSIMultCoef0QPowerDet_save;
u16 tempsense_Rcal;
@@ -21995,7 +21991,7 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
RfctrlOverride5_save = read_phy_reg(pi, 0x346);
RfctrlOverride6_save = read_phy_reg(pi, 0x347);
RfctrlMiscReg5_save = read_phy_reg(pi, 0x344);
- RfctrlMiscReg6_save = read_phy_reg(pi, 0x345);
+ read_phy_reg(pi, 0x345); /* RfctrlMiscReg6_save */
wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x0A, 16,
&auxADC_Vmid_save);
@@ -22983,7 +22979,7 @@ int
wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh)
{
s16 rxpwr, rxpwr0, rxpwr1;
- s16 phyRx0_l, phyRx2_l;
+ s16 phyRx2_l;
rxpwr = 0;
rxpwr0 = rxh->PhyRxStatus_1 & PRXS1_nphy_PWR0_MASK;
@@ -22994,7 +22990,6 @@ wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh)
if (rxpwr1 > 127)
rxpwr1 -= 256;
- phyRx0_l = rxh->PhyRxStatus_0 & 0x00ff;
phyRx2_l = rxh->PhyRxStatus_2 & 0x00ff;
if (phyRx2_l > 127)
phyRx2_l -= 256;
@@ -23097,8 +23092,7 @@ wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
u16 bb_mult;
u8 phy_bw, sample_cmd;
u16 orig_RfseqCoreActv;
- u16 lpf_bw_ctl_override3, lpf_bw_ctl_override4, lpf_bw_ctl_miscreg3,
- lpf_bw_ctl_miscreg4;
+ u16 lpf_bw_ctl_override3, lpf_bw_ctl_override4;
if (pi->phyhang_avoid)
wlc_phy_stay_in_carriersearch_nphy(pi, true);
@@ -23111,12 +23105,7 @@ wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
lpf_bw_ctl_override3 = read_phy_reg(pi, 0x342) & (0x1 << 7);
lpf_bw_ctl_override4 = read_phy_reg(pi, 0x343) & (0x1 << 7);
- if (lpf_bw_ctl_override3 | lpf_bw_ctl_override4) {
- lpf_bw_ctl_miscreg3 = read_phy_reg(pi, 0x340) &
- (0x7 << 8);
- lpf_bw_ctl_miscreg4 = read_phy_reg(pi, 0x341) &
- (0x7 << 8);
- } else {
+ if (!(lpf_bw_ctl_override3 | lpf_bw_ctl_override4)) {
wlc_phy_rfctrl_override_nphy_rev7(
pi,
(0x1 << 7),
@@ -23126,12 +23115,9 @@ wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
NPHY_REV7_RFCTRLOVERRIDE_ID1);
pi->nphy_sample_play_lpf_bw_ctl_ovr = true;
-
- lpf_bw_ctl_miscreg3 = read_phy_reg(pi, 0x340) &
- (0x7 << 8);
- lpf_bw_ctl_miscreg4 = read_phy_reg(pi, 0x341) &
- (0x7 << 8);
}
+ read_phy_reg(pi, 0x340); /* lpf_bw_ctl_miscreg3 */
+ read_phy_reg(pi, 0x341); /* lpf_bw_ctl_miscreg4 */
}
if ((pi->nphy_bb_mult_save & BB_MULT_VALID_MASK) == 0) {
@@ -23403,7 +23389,6 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
struct nphy_iqcal_params *params)
{
u8 k;
- int idx;
u16 gain_index;
u8 band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
@@ -23436,13 +23421,10 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
(target_gain.pga[core_no] << 4) |
(target_gain.txgm[core_no] << 8));
- idx = -1;
for (k = 0; k < NPHY_IQCAL_NUMGAINS; k++) {
if (tbl_iqcal_gainparams_nphy[band_idx][k][0] ==
- gain_index) {
- idx = k;
+ gain_index)
break;
- }
}
params->txgm = tbl_iqcal_gainparams_nphy[band_idx][k][1];
@@ -24704,7 +24686,6 @@ wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *txgains,
{
u16 phy_a1, phy_a2, phy_a3;
u16 phy_a4, phy_a5;
- bool phy_a6;
u8 phy_a7, m[2];
u32 phy_a8 = 0;
struct nphy_txgains phy_a9;
@@ -24714,9 +24695,6 @@ wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *txgains,
phy_a7 = (core == PHY_CORE_0) ? 1 : 0;
- phy_a6 = ((cal_mode == CAL_GCTRL)
- || (cal_mode == CAL_SOFT)) ? true : false;
-
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
phy_a9 = wlc_phy_get_tx_gain_nphy(pi);
@@ -24996,7 +24974,6 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
s32 phy_a7, phy_a8;
u32 phy_a9;
int phy_a10;
- bool phy_a11 = false;
int phy_a12;
u8 phy_a13 = 0;
u8 phy_a14;
@@ -25064,8 +25041,6 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
if (!phy_a6 && (phy_a3 != phy_a5)) {
if (!phy_a3)
phy_a12 -= (u8) phy_a1;
-
- phy_a11 = true;
break;
}
@@ -25079,8 +25054,6 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
phy_a12 = phy_a14;
else
phy_a12 = phy_a13;
-
- phy_a11 = true;
break;
}
@@ -25110,8 +25083,6 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
if (!phy_a6 && (phy_a3 != phy_a5)) {
if (!phy_a3)
phy_a12 -= (u8) phy_a1;
-
- phy_a11 = true;
break;
}
@@ -25125,8 +25096,6 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
phy_a12 = 0;
else
phy_a12 = 127;
-
- phy_a11 = true;
break;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c
index be703be34616..5331b5468e14 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -105,105 +105,6 @@ static const u32 dot11lcn_gain_tbl_rev0[] = {
0x00000000,
};
-static const u32 dot11lcn_gain_tbl_rev1[] = {
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000008,
- 0x00000004,
- 0x00000008,
- 0x00000001,
- 0x00000005,
- 0x00000009,
- 0x0000000D,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x000000d1,
- 0x00000053,
- 0x00000093,
- 0x000000d3,
- 0x000000d7,
- 0x00000117,
- 0x00000517,
- 0x00000917,
- 0x00000957,
- 0x00000d57,
- 0x00001157,
- 0x00001197,
- 0x00005197,
- 0x00009197,
- 0x0000d197,
- 0x00011197,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000008,
- 0x00000004,
- 0x00000008,
- 0x00000001,
- 0x00000005,
- 0x00000009,
- 0x0000000D,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x000000d1,
- 0x00000053,
- 0x00000093,
- 0x000000d3,
- 0x000000d7,
- 0x00000117,
- 0x00000517,
- 0x00000917,
- 0x00000957,
- 0x00000d57,
- 0x00001157,
- 0x00005157,
- 0x00009157,
- 0x0000d157,
- 0x00011157,
- 0x00015157,
- 0x00019157,
- 0x0001d157,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
-
static const u16 dot11lcn_aux_gain_idx_tbl_rev0[] = {
0x0401,
0x0402,
@@ -1507,19 +1408,6 @@ const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = {
,
};
-static const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = {
- {&dot11lcn_gain_tbl_rev1,
- ARRAY_SIZE(dot11lcn_gain_tbl_rev1), 18,
- 0, 32}
- ,
- {&dot11lcn_aux_gain_idx_tbl_rev0,
- ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
- ,
- {&dot11lcn_gain_idx_tbl_rev0,
- ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
- ,
-};
-
const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = {
{&dot11lcn_gain_tbl_2G,
ARRAY_SIZE(dot11lcn_gain_tbl_2G), 18, 0,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
index 7607e67d20c7..396d005f4d16 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
@@ -9014,274 +9014,6 @@ static const u16 papd_comp_rfpwr_tbl_core1_rev3[] = {
0x01d6,
};
-static const u32 papd_comp_epsilon_tbl_core0_rev3[] = {
- 0x00000000,
- 0x00001fa0,
- 0x00019f78,
- 0x0001df7e,
- 0x03fa9f86,
- 0x03fd1f90,
- 0x03fe5f8a,
- 0x03fb1f94,
- 0x03fd9fa0,
- 0x00009f98,
- 0x03fd1fac,
- 0x03ff9fa2,
- 0x03fe9fae,
- 0x00001fae,
- 0x03fddfb4,
- 0x03ff1fb8,
- 0x03ff9fbc,
- 0x03ffdfbe,
- 0x03fe9fc2,
- 0x03fedfc6,
- 0x03fedfc6,
- 0x03ff9fc8,
- 0x03ff5fc6,
- 0x03fedfc2,
- 0x03ff9fc0,
- 0x03ff5fac,
- 0x03ff5fac,
- 0x03ff9fa2,
- 0x03ff9fa6,
- 0x03ff9faa,
- 0x03ff5fb0,
- 0x03ff5fb4,
- 0x03ff1fca,
- 0x03ff5fce,
- 0x03fcdfdc,
- 0x03fb4006,
- 0x00000030,
- 0x03ff808a,
- 0x03ff80da,
- 0x0000016c,
- 0x03ff8318,
- 0x03ff063a,
- 0x03fd8bd6,
- 0x00014ffe,
- 0x00034ffe,
- 0x00034ffe,
- 0x0003cffe,
- 0x00040ffe,
- 0x00040ffe,
- 0x0003cffe,
- 0x0003cffe,
- 0x00020ffe,
- 0x03fe0ffe,
- 0x03fdcffe,
- 0x03f94ffe,
- 0x03f54ffe,
- 0x03f44ffe,
- 0x03ef8ffe,
- 0x03ee0ffe,
- 0x03ebcffe,
- 0x03e8cffe,
- 0x03e74ffe,
- 0x03e4cffe,
- 0x03e38ffe,
-};
-
-static const u32 papd_cal_scalars_tbl_core0_rev3[] = {
- 0x05af005a,
- 0x0571005e,
- 0x05040066,
- 0x04bd006c,
- 0x047d0072,
- 0x04430078,
- 0x03f70081,
- 0x03cb0087,
- 0x03870091,
- 0x035e0098,
- 0x032e00a1,
- 0x030300aa,
- 0x02d800b4,
- 0x02ae00bf,
- 0x028900ca,
- 0x026400d6,
- 0x024100e3,
- 0x022200f0,
- 0x020200ff,
- 0x01e5010e,
- 0x01ca011e,
- 0x01b0012f,
- 0x01990140,
- 0x01830153,
- 0x016c0168,
- 0x0158017d,
- 0x01450193,
- 0x013301ab,
- 0x012101c5,
- 0x011101e0,
- 0x010201fc,
- 0x00f4021a,
- 0x00e6011d,
- 0x00d9012e,
- 0x00cd0140,
- 0x00c20153,
- 0x00b70167,
- 0x00ac017c,
- 0x00a30193,
- 0x009a01ab,
- 0x009101c4,
- 0x008901df,
- 0x008101fb,
- 0x007a0219,
- 0x00730239,
- 0x006d025b,
- 0x0067027e,
- 0x006102a4,
- 0x005c02cc,
- 0x005602f6,
- 0x00520323,
- 0x004d0353,
- 0x00490385,
- 0x004503bb,
- 0x004103f3,
- 0x003d042f,
- 0x003a046f,
- 0x003704b2,
- 0x003404f9,
- 0x00310545,
- 0x002e0596,
- 0x002b05f5,
- 0x00290640,
- 0x002606a4,
-};
-
-static const u32 papd_comp_epsilon_tbl_core1_rev3[] = {
- 0x00000000,
- 0x00001fa0,
- 0x00019f78,
- 0x0001df7e,
- 0x03fa9f86,
- 0x03fd1f90,
- 0x03fe5f8a,
- 0x03fb1f94,
- 0x03fd9fa0,
- 0x00009f98,
- 0x03fd1fac,
- 0x03ff9fa2,
- 0x03fe9fae,
- 0x00001fae,
- 0x03fddfb4,
- 0x03ff1fb8,
- 0x03ff9fbc,
- 0x03ffdfbe,
- 0x03fe9fc2,
- 0x03fedfc6,
- 0x03fedfc6,
- 0x03ff9fc8,
- 0x03ff5fc6,
- 0x03fedfc2,
- 0x03ff9fc0,
- 0x03ff5fac,
- 0x03ff5fac,
- 0x03ff9fa2,
- 0x03ff9fa6,
- 0x03ff9faa,
- 0x03ff5fb0,
- 0x03ff5fb4,
- 0x03ff1fca,
- 0x03ff5fce,
- 0x03fcdfdc,
- 0x03fb4006,
- 0x00000030,
- 0x03ff808a,
- 0x03ff80da,
- 0x0000016c,
- 0x03ff8318,
- 0x03ff063a,
- 0x03fd8bd6,
- 0x00014ffe,
- 0x00034ffe,
- 0x00034ffe,
- 0x0003cffe,
- 0x00040ffe,
- 0x00040ffe,
- 0x0003cffe,
- 0x0003cffe,
- 0x00020ffe,
- 0x03fe0ffe,
- 0x03fdcffe,
- 0x03f94ffe,
- 0x03f54ffe,
- 0x03f44ffe,
- 0x03ef8ffe,
- 0x03ee0ffe,
- 0x03ebcffe,
- 0x03e8cffe,
- 0x03e74ffe,
- 0x03e4cffe,
- 0x03e38ffe,
-};
-
-static const u32 papd_cal_scalars_tbl_core1_rev3[] = {
- 0x05af005a,
- 0x0571005e,
- 0x05040066,
- 0x04bd006c,
- 0x047d0072,
- 0x04430078,
- 0x03f70081,
- 0x03cb0087,
- 0x03870091,
- 0x035e0098,
- 0x032e00a1,
- 0x030300aa,
- 0x02d800b4,
- 0x02ae00bf,
- 0x028900ca,
- 0x026400d6,
- 0x024100e3,
- 0x022200f0,
- 0x020200ff,
- 0x01e5010e,
- 0x01ca011e,
- 0x01b0012f,
- 0x01990140,
- 0x01830153,
- 0x016c0168,
- 0x0158017d,
- 0x01450193,
- 0x013301ab,
- 0x012101c5,
- 0x011101e0,
- 0x010201fc,
- 0x00f4021a,
- 0x00e6011d,
- 0x00d9012e,
- 0x00cd0140,
- 0x00c20153,
- 0x00b70167,
- 0x00ac017c,
- 0x00a30193,
- 0x009a01ab,
- 0x009101c4,
- 0x008901df,
- 0x008101fb,
- 0x007a0219,
- 0x00730239,
- 0x006d025b,
- 0x0067027e,
- 0x006102a4,
- 0x005c02cc,
- 0x005602f6,
- 0x00520323,
- 0x004d0353,
- 0x00490385,
- 0x004503bb,
- 0x004103f3,
- 0x003d042f,
- 0x003a046f,
- 0x003704b2,
- 0x003404f9,
- 0x00310545,
- 0x002e0596,
- 0x002b05f5,
- 0x00290640,
- 0x002606a4,
-};
-
const struct phytbl_info mimophytbl_info_rev3_volatile[] = {
{&ant_swctrl_tbl_rev3, ARRAY_SIZE(ant_swctrl_tbl_rev3), 9, 0, 16},
};
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 316672486d82..87b9398b03fd 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -321,8 +321,8 @@ static int do8bitIO /* = 0 */;
#define CMD_DELTLV 0x002b
#define CMD_FINDNEXTTLV 0x002c
#define CMD_PSPNODES 0x0030
-#define CMD_SETCW 0x0031
-#define CMD_SETPCF 0x0032
+#define CMD_SETCW 0x0031
+#define CMD_SETPCF 0x0032
#define CMD_SETPHYREG 0x003e
#define CMD_TXTEST 0x003f
#define MAC_ENABLETX 0x0101
@@ -433,7 +433,7 @@ static int do8bitIO /* = 0 */;
#define STATUS_INTS (EV_AWAKE|EV_LINK|EV_TXEXC|EV_TX|EV_TXCPY|EV_RX|EV_MIC)
#ifdef CHECK_UNKNOWN_INTS
-#define IGNORE_INTS ( EV_CMD | EV_UNKNOWN)
+#define IGNORE_INTS (EV_CMD | EV_UNKNOWN)
#else
#define IGNORE_INTS (~STATUS_INTS)
#endif
@@ -1107,9 +1107,9 @@ static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)";
struct airo_info;
-static int get_dec_u16( char *buffer, int *start, int limit );
-static void OUT4500( struct airo_info *, u16 reg, u16 value );
-static unsigned short IN4500( struct airo_info *, u16 reg );
+static int get_dec_u16(char *buffer, int *start, int limit);
+static void OUT4500(struct airo_info *, u16 reg, u16 value);
+static unsigned short IN4500(struct airo_info *, u16 reg);
static u16 setup_card(struct airo_info*, u8 *mac, int lock);
static int enable_MAC(struct airo_info *ai, int lock);
static void disable_MAC(struct airo_info *ai, int lock);
@@ -1127,24 +1127,24 @@ static int PC4500_accessrid(struct airo_info*, u16 rid, u16 accmd);
static int PC4500_readrid(struct airo_info*, u16 rid, void *pBuf, int len, int lock);
static int PC4500_writerid(struct airo_info*, u16 rid, const void
*pBuf, int len, int lock);
-static int do_writerid( struct airo_info*, u16 rid, const void *rid_data,
- int len, int dummy );
+static int do_writerid(struct airo_info*, u16 rid, const void *rid_data,
+ int len, int dummy);
static u16 transmit_allocate(struct airo_info*, int lenPayload, int raw);
static int transmit_802_3_packet(struct airo_info*, int len, char *pPacket);
static int transmit_802_11_packet(struct airo_info*, int len, char *pPacket);
-static int mpi_send_packet (struct net_device *dev);
+static int mpi_send_packet(struct net_device *dev);
static void mpi_unmap_card(struct pci_dev *pci);
static void mpi_receive_802_3(struct airo_info *ai);
static void mpi_receive_802_11(struct airo_info *ai);
-static int waitbusy (struct airo_info *ai);
+static int waitbusy(struct airo_info *ai);
-static irqreturn_t airo_interrupt( int irq, void* dev_id);
+static irqreturn_t airo_interrupt(int irq, void* dev_id);
static int airo_thread(void *data);
-static void timer_func( struct net_device *dev );
+static void timer_func(struct net_device *dev);
static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev);
-static void airo_read_wireless_stats (struct airo_info *local);
+static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev);
+static void airo_read_wireless_stats(struct airo_info *local);
#ifdef CISCO_EXT
static int readrids(struct net_device *dev, aironet_ioctl *comp);
static int writerids(struct net_device *dev, aironet_ioctl *comp);
@@ -1155,8 +1155,8 @@ static int micsetup(struct airo_info *ai);
static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len);
static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket, u16 payLen);
-static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi);
-static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm);
+static u8 airo_rssi_to_dbm(tdsRssiEntry *rssi_rid, u8 rssi);
+static u8 airo_dbm_to_pct(tdsRssiEntry *rssi_rid, u8 dbm);
static void airo_networks_free(struct airo_info *ai);
@@ -1261,16 +1261,16 @@ static inline int bap_read(struct airo_info *ai, __le16 *pu16Dst, int bytelen,
return ai->bap_read(ai, pu16Dst, bytelen, whichbap);
}
-static int setup_proc_entry( struct net_device *dev,
- struct airo_info *apriv );
-static int takedown_proc_entry( struct net_device *dev,
- struct airo_info *apriv );
+static int setup_proc_entry(struct net_device *dev,
+ struct airo_info *apriv);
+static int takedown_proc_entry(struct net_device *dev,
+ struct airo_info *apriv);
static int cmdreset(struct airo_info *ai);
-static int setflashmode (struct airo_info *ai);
-static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime);
+static int setflashmode(struct airo_info *ai);
+static int flashgchar(struct airo_info *ai, int matchbyte, int dwelltime);
static int flashputbuf(struct airo_info *ai);
-static int flashrestart(struct airo_info *ai,struct net_device *dev);
+static int flashrestart(struct airo_info *ai, struct net_device *dev);
#define airo_print(type, name, fmt, args...) \
printk(type DRV_NAME "(%s): " fmt "\n", name, ##args)
@@ -1294,14 +1294,14 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev);
***********************************************************************
*/
-static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq);
+static int RxSeqValid(struct airo_info *ai, miccntx *context, int mcast, u32 micSeq);
static void MoveWindow(miccntx *context, u32 micSeq);
static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
struct crypto_sync_skcipher *tfm);
static void emmh32_init(emmh32_context *context);
static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
static void emmh32_final(emmh32_context *context, u8 digest[4]);
-static int flashpchar(struct airo_info *ai,int byte,int dwelltime);
+static int flashpchar(struct airo_info *ai, int byte, int dwelltime);
static void age_mic_context(miccntx *cur, miccntx *old, u8 *key, int key_len,
struct crypto_sync_skcipher *tfm)
@@ -1361,7 +1361,8 @@ static void micinit(struct airo_info *ai)
/* micsetup - Get ready for business */
-static int micsetup(struct airo_info *ai) {
+static int micsetup(struct airo_info *ai)
+{
int i;
if (ai->tfm == NULL)
@@ -1373,32 +1374,32 @@ static int micsetup(struct airo_info *ai) {
return ERROR;
}
- for (i=0; i < NUM_MODULES; i++) {
- memset(&ai->mod[i].mCtx,0,sizeof(miccntx));
- memset(&ai->mod[i].uCtx,0,sizeof(miccntx));
+ for (i = 0; i < NUM_MODULES; i++) {
+ memset(&ai->mod[i].mCtx, 0, sizeof(miccntx));
+ memset(&ai->mod[i].uCtx, 0, sizeof(miccntx));
}
return SUCCESS;
}
-static const u8 micsnap[] = {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02};
+static const u8 micsnap[] = {0xAA, 0xAA, 0x03, 0x00, 0x40, 0x96, 0x00, 0x02};
/*===========================================================================
* Description: Mic a packet
- *
+ *
* Inputs: etherHead * pointer to an 802.3 frame
- *
+ *
* Returns: BOOLEAN if successful, otherwise false.
* PacketTxLen will be updated with the mic'd packets size.
*
* Caveats: It is assumed that the frame buffer will already
* be big enough to hold the largets mic message possible.
* (No memory allocation is done here).
- *
+ *
* Author: sbraneky (10/15/01)
* Merciless hacks by rwilcher (1/14/02)
*/
-static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, int payLen)
+static int encapsulate(struct airo_info *ai, etherHead *frame, MICBuffer *mic, int payLen)
{
miccntx *context;
@@ -1409,7 +1410,7 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i
context = &ai->mod[0].mCtx;
else
context = &ai->mod[0].uCtx;
-
+
if (!context->valid)
return ERROR;
@@ -1422,10 +1423,10 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i
context->tx += 2;
emmh32_init(&context->seed); // Mic the packet
- emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA
- emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap
- emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ
- emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload
+ emmh32_update(&context->seed, frame->da, ETH_ALEN * 2); // DA, SA
+ emmh32_update(&context->seed, (u8*)&mic->typelen, 10); // Type/Length and Snap
+ emmh32_update(&context->seed, (u8*)&mic->seq, sizeof(mic->seq)); //SEQ
+ emmh32_update(&context->seed, (u8*)(frame + 1), payLen); //payload
emmh32_final(&context->seed, (u8*)&mic->mic);
/* New Type/length ?????????? */
@@ -1444,11 +1445,11 @@ typedef enum {
/*===========================================================================
* Description: Decapsulates a MIC'd packet and returns the 802.3 packet
* (removes the MIC stuff) if packet is a valid packet.
- *
- * Inputs: etherHead pointer to the 802.3 packet
- *
+ *
+ * Inputs: etherHead pointer to the 802.3 packet
+ *
* Returns: BOOLEAN - TRUE if packet should be dropped otherwise FALSE
- *
+ *
* Author: sbraneky (10/15/01)
* Merciless hacks by rwilcher (1/14/02)
*---------------------------------------------------------------------------
@@ -1488,35 +1489,35 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16
//Now do the mic error checking.
//Receive seq must be odd
- if ( (micSEQ & 1) == 0 ) {
+ if ((micSEQ & 1) == 0) {
ai->micstats.rxWrongSequence++;
return ERROR;
}
for (i = 0; i < NUM_MODULES; i++) {
int mcast = eth->da[0] & 1;
- //Determine proper context
+ //Determine proper context
context = mcast ? &ai->mod[i].mCtx : &ai->mod[i].uCtx;
-
+
//Make sure context is valid
if (!context->valid) {
if (i == 0)
micError = NOMICPLUMMED;
- continue;
+ continue;
}
- //DeMic it
+ //DeMic it
if (!mic->typelen)
mic->typelen = htons(payLen + sizeof(MICBuffer) - 2);
-
+
emmh32_init(&context->seed);
- emmh32_update(&context->seed, eth->da, ETH_ALEN*2);
- emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap));
- emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq));
- emmh32_update(&context->seed, (u8 *)(eth + 1),payLen);
+ emmh32_update(&context->seed, eth->da, ETH_ALEN*2);
+ emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap));
+ emmh32_update(&context->seed, (u8 *)&mic->seq, sizeof(mic->seq));
+ emmh32_update(&context->seed, (u8 *)(eth + 1), payLen);
//Calculate MIC
emmh32_final(&context->seed, digest);
-
+
if (memcmp(digest, &mic->mic, 4)) { //Make sure the mics match
//Invalid Mic
if (i == 0)
@@ -1547,22 +1548,22 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16
/*===========================================================================
* Description: Checks the Rx Seq number to make sure it is valid
* and hasn't already been received
- *
+ *
* Inputs: miccntx - mic context to check seq against
* micSeq - the Mic seq number
- *
- * Returns: TRUE if valid otherwise FALSE.
+ *
+ * Returns: TRUE if valid otherwise FALSE.
*
* Author: sbraneky (10/15/01)
* Merciless hacks by rwilcher (1/14/02)
*---------------------------------------------------------------------------
*/
-static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq)
+static int RxSeqValid(struct airo_info *ai, miccntx *context, int mcast, u32 micSeq)
{
- u32 seq,index;
+ u32 seq, index;
- //Allow for the ap being rebooted - if it is then use the next
+ //Allow for the ap being rebooted - if it is then use the next
//sequence number of the current sequence number - might go backwards
if (mcast) {
@@ -1583,10 +1584,10 @@ static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSe
//Too old of a SEQ number to check.
if ((s32)seq < 0)
return ERROR;
-
- if ( seq > 64 ) {
+
+ if (seq > 64) {
//Window is infinite forward
- MoveWindow(context,micSeq);
+ MoveWindow(context, micSeq);
return SUCCESS;
}
@@ -1599,7 +1600,7 @@ static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSe
//Add seqence number to the list of received numbers.
context->rx |= index;
- MoveWindow(context,micSeq);
+ MoveWindow(context, micSeq);
return SUCCESS;
}
@@ -1613,7 +1614,7 @@ static void MoveWindow(miccntx *context, u32 micSeq)
//Move window if seq greater than the middle of the window
if (micSeq > context->window) {
shift = (micSeq - context->window) >> 1;
-
+
//Shift out old
if (shift < 32)
context->rx >>= shift;
@@ -1638,7 +1639,7 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
{
/* take the keying material, expand if necessary, truncate at 16-bytes */
/* run through AES counter mode to generate context->coeff[] */
-
+
SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg;
u8 iv[AES_BLOCK_SIZE] = {};
@@ -1669,11 +1670,11 @@ static void emmh32_init(emmh32_context *context)
static void emmh32_update(emmh32_context *context, u8 *pOctets, int len)
{
int coeff_position, byte_position;
-
+
if (len == 0) return;
-
+
coeff_position = context->position >> 2;
-
+
/* deal with partial 32-bit word left over from last update */
byte_position = context->position & 3;
if (byte_position) {
@@ -1712,12 +1713,12 @@ static void emmh32_final(emmh32_context *context, u8 digest[4])
{
int coeff_position, byte_position;
u32 val;
-
+
u64 sum, utmp;
s64 stmp;
coeff_position = context->position >> 2;
-
+
/* deal with partial 32-bit word left over from last update */
byte_position = context->position & 3;
if (byte_position) {
@@ -1750,7 +1751,7 @@ static int readBSSListRid(struct airo_info *ai, int first,
if (first == 1) {
if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LISTBSS;
+ cmd.cmd = CMD_LISTBSS;
if (down_interruptible(&ai->sem))
return -ERESTARTSYS;
ai->list_bss_task = current;
@@ -1815,7 +1816,7 @@ static inline void checkThrottle(struct airo_info *ai)
int i;
/* Old hardware had a limit on encryption speed */
if (ai->config.authType != AUTH_OPEN && maxencrypt) {
- for(i=0; i<8; i++) {
+ for (i = 0; i<8; i++) {
if (ai->config.rates[i] > maxencrypt) {
ai->config.rates[i] = 0;
}
@@ -1840,7 +1841,7 @@ static int writeConfigRid(struct airo_info *ai, int lock)
else
clear_bit(FLAG_ADHOC, &ai->flags);
- return PC4500_writerid( ai, RID_CONFIG, &cfgr, sizeof(cfgr), lock);
+ return PC4500_writerid(ai, RID_CONFIG, &cfgr, sizeof(cfgr), lock);
}
static int readStatusRid(struct airo_info *ai, StatusRid *statr, int lock)
@@ -1871,7 +1872,8 @@ static void try_auto_wep(struct airo_info *ai)
}
}
-static int airo_open(struct net_device *dev) {
+static int airo_open(struct net_device *dev)
+{
struct airo_info *ai = dev->ml_priv;
int rc = 0;
@@ -1947,7 +1949,7 @@ static netdev_tx_t mpi_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&ai->aux_lock, flags);
skb_queue_tail (&ai->txq, skb);
pending = test_bit(FLAG_PENDING_XMIT, &ai->flags);
- spin_unlock_irqrestore(&ai->aux_lock,flags);
+ spin_unlock_irqrestore(&ai->aux_lock, flags);
netif_wake_queue (dev);
if (pending == 0) {
@@ -2096,7 +2098,8 @@ static void get_tx_error(struct airo_info *ai, s32 fid)
}
}
-static void airo_end_xmit(struct net_device *dev) {
+static void airo_end_xmit(struct net_device *dev)
+{
u16 status;
int i;
struct airo_info *priv = dev->ml_priv;
@@ -2110,7 +2113,7 @@ static void airo_end_xmit(struct net_device *dev) {
up(&priv->sem);
i = 0;
- if ( status == SUCCESS ) {
+ if (status == SUCCESS) {
netif_trans_update(dev);
for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
} else {
@@ -2130,7 +2133,7 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
struct airo_info *priv = dev->ml_priv;
u32 *fids = priv->fids;
- if ( skb == NULL ) {
+ if (skb == NULL) {
airo_print_err(dev->name, "%s: skb == NULL!", __func__);
return NETDEV_TX_OK;
}
@@ -2140,10 +2143,10 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
}
/* Find a vacant FID */
- for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ );
- for( j = i + 1; j < MAX_FIDS / 2 && (fids[j] & 0xffff0000); j++ );
+ for (i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++);
+ for (j = i + 1; j < MAX_FIDS / 2 && (fids[j] & 0xffff0000); j++);
- if ( j >= MAX_FIDS / 2 ) {
+ if (j >= MAX_FIDS / 2) {
netif_stop_queue(dev);
if (i == MAX_FIDS / 2) {
@@ -2167,7 +2170,8 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void airo_end_xmit11(struct net_device *dev) {
+static void airo_end_xmit11(struct net_device *dev)
+{
u16 status;
int i;
struct airo_info *priv = dev->ml_priv;
@@ -2181,7 +2185,7 @@ static void airo_end_xmit11(struct net_device *dev) {
up(&priv->sem);
i = MAX_FIDS / 2;
- if ( status == SUCCESS ) {
+ if (status == SUCCESS) {
netif_trans_update(dev);
for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
} else {
@@ -2208,7 +2212,7 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- if ( skb == NULL ) {
+ if (skb == NULL) {
airo_print_err(dev->name, "%s: skb == NULL!", __func__);
return NETDEV_TX_OK;
}
@@ -2218,10 +2222,10 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb,
}
/* Find a vacant FID */
- for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ );
- for( j = i + 1; j < MAX_FIDS && (fids[j] & 0xffff0000); j++ );
+ for (i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++);
+ for (j = i + 1; j < MAX_FIDS && (fids[j] & 0xffff0000); j++);
- if ( j >= MAX_FIDS ) {
+ if (j >= MAX_FIDS) {
netif_stop_queue(dev);
if (i == MAX_FIDS) {
@@ -2295,19 +2299,21 @@ static struct net_device_stats *airo_get_stats(struct net_device *dev)
return &dev->stats;
}
-static void airo_set_promisc(struct airo_info *ai) {
+static void airo_set_promisc(struct airo_info *ai)
+{
Cmd cmd;
Resp rsp;
memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_SETMODE;
+ cmd.cmd = CMD_SETMODE;
clear_bit(JOB_PROMISC, &ai->jobs);
cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC;
issuecommand(ai, &cmd, &rsp);
up(&ai->sem);
}
-static void airo_set_multicast_list(struct net_device *dev) {
+static void airo_set_multicast_list(struct net_device *dev)
+{
struct airo_info *ai = dev->ml_priv;
if ((dev->flags ^ ai->flags) & IFF_PROMISC) {
@@ -2357,7 +2363,8 @@ static void del_airo_dev(struct airo_info *ai)
list_del(&ai->dev_list);
}
-static int airo_close(struct net_device *dev) {
+static int airo_close(struct net_device *dev)
+{
struct airo_info *ai = dev->ml_priv;
netif_stop_queue(dev);
@@ -2372,7 +2379,7 @@ static int airo_close(struct net_device *dev) {
set_bit(FLAG_RADIO_DOWN, &ai->flags);
disable_MAC(ai, 1);
#endif
- disable_interrupts( ai );
+ disable_interrupts(ai);
free_irq(dev->irq, dev);
@@ -2382,16 +2389,16 @@ static int airo_close(struct net_device *dev) {
return 0;
}
-void stop_airo_card( struct net_device *dev, int freeres )
+void stop_airo_card(struct net_device *dev, int freeres)
{
struct airo_info *ai = dev->ml_priv;
set_bit(FLAG_RADIO_DOWN, &ai->flags);
disable_MAC(ai, 1);
disable_interrupts(ai);
- takedown_proc_entry( dev, ai );
+ takedown_proc_entry(dev, ai);
if (test_bit(FLAG_REGISTERED, &ai->flags)) {
- unregister_netdev( dev );
+ unregister_netdev(dev);
if (ai->wifidev) {
unregister_netdev(ai->wifidev);
free_netdev(ai->wifidev);
@@ -2415,7 +2422,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
kfree(ai->SSID);
if (freeres) {
/* PCMCIA frees this stuff, so only for PCI and ISA */
- release_region( dev->base_addr, 64 );
+ release_region(dev->base_addr, 64);
if (test_bit(FLAG_MPI, &ai->flags)) {
if (ai->pci)
mpi_unmap_card(ai->pci);
@@ -2423,13 +2430,13 @@ void stop_airo_card( struct net_device *dev, int freeres )
iounmap(ai->pcimem);
if (ai->pciaux)
iounmap(ai->pciaux);
- pci_free_consistent(ai->pci, PCI_SHARED_LEN,
- ai->shared, ai->shared_dma);
+ dma_free_coherent(&ai->pci->dev, PCI_SHARED_LEN,
+ ai->shared, ai->shared_dma);
}
}
crypto_free_sync_skcipher(ai->tfm);
del_airo_dev(ai);
- free_netdev( dev );
+ free_netdev(dev);
}
EXPORT_SYMBOL(stop_airo_card);
@@ -2468,56 +2475,56 @@ static int mpi_init_descriptors (struct airo_info *ai)
/* Alloc card RX descriptors */
netif_stop_queue(ai->dev);
- memset(&rsp,0,sizeof(rsp));
- memset(&cmd,0,sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_ALLOCATEAUX;
cmd.parm0 = FID_RX;
cmd.parm1 = (ai->rxfids[0].card_ram_off - ai->pciaux);
cmd.parm2 = MPI_MAX_FIDS;
- rc=issuecommand(ai, &cmd, &rsp);
+ rc = issuecommand(ai, &cmd, &rsp);
if (rc != SUCCESS) {
airo_print_err(ai->dev->name, "Couldn't allocate RX FID");
return rc;
}
- for (i=0; i<MPI_MAX_FIDS; i++) {
+ for (i = 0; i<MPI_MAX_FIDS; i++) {
memcpy_toio(ai->rxfids[i].card_ram_off,
&ai->rxfids[i].rx_desc, sizeof(RxFid));
}
/* Alloc card TX descriptors */
- memset(&rsp,0,sizeof(rsp));
- memset(&cmd,0,sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_ALLOCATEAUX;
cmd.parm0 = FID_TX;
cmd.parm1 = (ai->txfids[0].card_ram_off - ai->pciaux);
cmd.parm2 = MPI_MAX_FIDS;
- for (i=0; i<MPI_MAX_FIDS; i++) {
+ for (i = 0; i<MPI_MAX_FIDS; i++) {
ai->txfids[i].tx_desc.valid = 1;
memcpy_toio(ai->txfids[i].card_ram_off,
&ai->txfids[i].tx_desc, sizeof(TxFid));
}
ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */
- rc=issuecommand(ai, &cmd, &rsp);
+ rc = issuecommand(ai, &cmd, &rsp);
if (rc != SUCCESS) {
airo_print_err(ai->dev->name, "Couldn't allocate TX FID");
return rc;
}
/* Alloc card Rid descriptor */
- memset(&rsp,0,sizeof(rsp));
- memset(&cmd,0,sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_ALLOCATEAUX;
cmd.parm0 = RID_RW;
cmd.parm1 = (ai->config_desc.card_ram_off - ai->pciaux);
cmd.parm2 = 1; /* Magic number... */
- rc=issuecommand(ai, &cmd, &rsp);
+ rc = issuecommand(ai, &cmd, &rsp);
if (rc != SUCCESS) {
airo_print_err(ai->dev->name, "Couldn't allocate RID");
return rc;
@@ -2574,9 +2581,10 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
}
/* Reserve PKTSIZE for each fid and 2K for the Rids */
- ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma);
+ ai->shared = dma_alloc_coherent(&pci->dev, PCI_SHARED_LEN,
+ &ai->shared_dma, GFP_KERNEL);
if (!ai->shared) {
- airo_print_err("", "Couldn't alloc_consistent %d",
+ airo_print_err("", "Couldn't alloc_coherent %d",
PCI_SHARED_LEN);
goto free_auxmap;
}
@@ -2589,7 +2597,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
vpackoff = ai->shared;
/* RX descriptor setup */
- for(i = 0; i < MPI_MAX_FIDS; i++) {
+ for (i = 0; i < MPI_MAX_FIDS; i++) {
ai->rxfids[i].pending = 0;
ai->rxfids[i].card_ram_off = pciaddroff;
ai->rxfids[i].virtual_host_addr = vpackoff;
@@ -2604,7 +2612,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
}
/* TX descriptor setup */
- for(i = 0; i < MPI_MAX_FIDS; i++) {
+ for (i = 0; i < MPI_MAX_FIDS; i++) {
ai->txfids[i].card_ram_off = pciaddroff;
ai->txfids[i].virtual_host_addr = vpackoff;
ai->txfids[i].tx_desc.valid = 1;
@@ -2636,7 +2644,8 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
return 0;
free_shared:
- pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma);
+ dma_free_coherent(&pci->dev, PCI_SHARED_LEN, ai->shared,
+ ai->shared_dma);
free_auxmap:
iounmap(ai->pciaux);
free_memmap:
@@ -2674,7 +2683,7 @@ static void wifi_setup(struct net_device *dev)
dev->min_mtu = 68;
dev->max_mtu = MIC_MSGLEN_MAX;
dev->addr_len = ETH_ALEN;
- dev->tx_queue_len = 100;
+ dev->tx_queue_len = 100;
eth_broadcast_addr(dev->broadcast);
@@ -2703,13 +2712,14 @@ static struct net_device *init_wifidev(struct airo_info *ai,
return dev;
}
-static int reset_card( struct net_device *dev , int lock) {
+static int reset_card(struct net_device *dev, int lock)
+{
struct airo_info *ai = dev->ml_priv;
if (lock && down_interruptible(&ai->sem))
return -1;
waitbusy (ai);
- OUT4500(ai,COMMAND,CMD_SOFTRESET);
+ OUT4500(ai, COMMAND, CMD_SOFTRESET);
msleep(200);
waitbusy (ai);
msleep(200);
@@ -2774,9 +2784,9 @@ static const struct net_device_ops mpi_netdev_ops = {
};
-static struct net_device *_init_airo_card( unsigned short irq, int port,
+static struct net_device *_init_airo_card(unsigned short irq, int port,
int is_pcmcia, struct pci_dev *pci,
- struct device *dmdev )
+ struct device *dmdev)
{
struct net_device *dev;
struct airo_info *ai;
@@ -2849,7 +2859,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
if (probe) {
if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) {
- airo_print_err(dev->name, "MAC could not be enabled" );
+ airo_print_err(dev->name, "MAC could not be enabled");
rc = -EIO;
goto err_out_map;
}
@@ -2907,8 +2917,8 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
/* Allocate the transmit buffers */
if (probe && !test_bit(FLAG_MPI,&ai->flags))
- for( i = 0; i < MAX_FIDS; i++ )
- ai->fids[i] = transmit_allocate(ai,AIRO_DEF_MTU,i>=MAX_FIDS/2);
+ for (i = 0; i < MAX_FIDS; i++)
+ ai->fids[i] = transmit_allocate(ai, AIRO_DEF_MTU, i>=MAX_FIDS/2);
if (setup_proc_entry(dev, dev->ml_priv) < 0)
goto err_out_wifi;
@@ -2922,14 +2932,15 @@ err_out_reg:
unregister_netdev(dev);
err_out_map:
if (test_bit(FLAG_MPI,&ai->flags) && pci) {
- pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma);
+ dma_free_coherent(&pci->dev, PCI_SHARED_LEN, ai->shared,
+ ai->shared_dma);
iounmap(ai->pciaux);
iounmap(ai->pcimem);
mpi_unmap_card(ai->pci);
}
err_out_res:
if (!is_pcmcia)
- release_region( dev->base_addr, 64 );
+ release_region(dev->base_addr, 64);
err_out_nets:
airo_networks_free(ai);
err_out_free:
@@ -2938,15 +2949,16 @@ err_out_free:
return NULL;
}
-struct net_device *init_airo_card( unsigned short irq, int port, int is_pcmcia,
+struct net_device *init_airo_card(unsigned short irq, int port, int is_pcmcia,
struct device *dmdev)
{
- return _init_airo_card ( irq, port, is_pcmcia, NULL, dmdev);
+ return _init_airo_card (irq, port, is_pcmcia, NULL, dmdev);
}
EXPORT_SYMBOL(init_airo_card);
-static int waitbusy (struct airo_info *ai) {
+static int waitbusy (struct airo_info *ai)
+{
int delay = 0;
while ((IN4500(ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) {
udelay (10);
@@ -2956,7 +2968,7 @@ static int waitbusy (struct airo_info *ai) {
return delay < 10000;
}
-int reset_airo_card( struct net_device *dev )
+int reset_airo_card(struct net_device *dev)
{
int i;
struct airo_info *ai = dev->ml_priv;
@@ -2964,24 +2976,25 @@ int reset_airo_card( struct net_device *dev )
if (reset_card (dev, 1))
return -1;
- if ( setup_card(ai, dev->dev_addr, 1 ) != SUCCESS ) {
+ if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) {
airo_print_err(dev->name, "MAC could not be enabled");
return -1;
}
airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr);
/* Allocate the transmit buffers if needed */
if (!test_bit(FLAG_MPI,&ai->flags))
- for( i = 0; i < MAX_FIDS; i++ )
- ai->fids[i] = transmit_allocate (ai,AIRO_DEF_MTU,i>=MAX_FIDS/2);
+ for (i = 0; i < MAX_FIDS; i++)
+ ai->fids[i] = transmit_allocate (ai, AIRO_DEF_MTU, i>=MAX_FIDS/2);
- enable_interrupts( ai );
+ enable_interrupts(ai);
netif_wake_queue(dev);
return 0;
}
EXPORT_SYMBOL(reset_airo_card);
-static void airo_send_event(struct net_device *dev) {
+static void airo_send_event(struct net_device *dev)
+{
struct airo_info *ai = dev->ml_priv;
union iwreq_data wrqu;
StatusRid status_rid;
@@ -2998,7 +3011,8 @@ static void airo_send_event(struct net_device *dev) {
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
}
-static void airo_process_scan_results (struct airo_info *ai) {
+static void airo_process_scan_results (struct airo_info *ai)
+{
union iwreq_data wrqu;
BSSListRid bss;
int rc;
@@ -3014,14 +3028,14 @@ static void airo_process_scan_results (struct airo_info *ai) {
/* Try to read the first entry of the scan result */
rc = PC4500_readrid(ai, ai->bssListFirst, &bss, ai->bssListRidLen, 0);
- if((rc) || (bss.index == cpu_to_le16(0xffff))) {
+ if ((rc) || (bss.index == cpu_to_le16(0xffff))) {
/* No scan results */
goto out;
}
/* Read and parse all entries */
tmp_net = NULL;
- while((!rc) && (bss.index != cpu_to_le16(0xffff))) {
+ while ((!rc) && (bss.index != cpu_to_le16(0xffff))) {
/* Grab a network off the free list */
if (!list_empty(&ai->network_free_list)) {
tmp_net = list_entry(ai->network_free_list.next,
@@ -3062,13 +3076,14 @@ out:
wireless_send_event(ai->dev, SIOCGIWSCAN, &wrqu, NULL);
}
-static int airo_thread(void *data) {
+static int airo_thread(void *data)
+{
struct net_device *dev = data;
struct airo_info *ai = dev->ml_priv;
int locked;
set_freezable();
- while(1) {
+ while (1) {
/* make swsusp happy with our thread */
try_to_freeze();
@@ -3088,11 +3103,11 @@ static int airo_thread(void *data) {
break;
if (ai->expires || ai->scan_timeout) {
if (ai->scan_timeout &&
- time_after_eq(jiffies,ai->scan_timeout)){
+ time_after_eq(jiffies, ai->scan_timeout)) {
set_bit(JOB_SCAN_RESULTS, &ai->jobs);
break;
} else if (ai->expires &&
- time_after_eq(jiffies,ai->expires)){
+ time_after_eq(jiffies, ai->expires)) {
set_bit(JOB_AUTOWEP, &ai->jobs);
break;
}
@@ -3442,11 +3457,11 @@ static void airo_handle_tx(struct airo_info *ai, u16 status)
spin_lock_irqsave(&ai->aux_lock, flags);
if (!skb_queue_empty(&ai->txq)) {
- spin_unlock_irqrestore(&ai->aux_lock,flags);
+ spin_unlock_irqrestore(&ai->aux_lock, flags);
mpi_send_packet(ai->dev);
} else {
clear_bit(FLAG_PENDING_XMIT, &ai->flags);
- spin_unlock_irqrestore(&ai->aux_lock,flags);
+ spin_unlock_irqrestore(&ai->aux_lock, flags);
netif_wake_queue(ai->dev);
}
OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC));
@@ -3526,9 +3541,9 @@ static irqreturn_t airo_interrupt(int irq, void *dev_id)
if (status & (EV_TX | EV_TXCPY | EV_TXEXC))
airo_handle_tx(ai, status);
- if ( status & ~STATUS_INTS & ~IGNORE_INTS ) {
+ if (status & ~STATUS_INTS & ~IGNORE_INTS) {
airo_print_warn(ai->dev->name, "Got weird status %x",
- status & ~STATUS_INTS & ~IGNORE_INTS );
+ status & ~STATUS_INTS & ~IGNORE_INTS);
}
}
@@ -3547,27 +3562,29 @@ static irqreturn_t airo_interrupt(int irq, void *dev_id)
* NOTE: If use with 8bit mode and SMP bad things will happen!
* Why would some one do 8 bit IO in an SMP machine?!?
*/
-static void OUT4500( struct airo_info *ai, u16 reg, u16 val ) {
+static void OUT4500(struct airo_info *ai, u16 reg, u16 val)
+{
if (test_bit(FLAG_MPI,&ai->flags))
reg <<= 1;
- if ( !do8bitIO )
- outw( val, ai->dev->base_addr + reg );
+ if (!do8bitIO)
+ outw(val, ai->dev->base_addr + reg);
else {
- outb( val & 0xff, ai->dev->base_addr + reg );
- outb( val >> 8, ai->dev->base_addr + reg + 1 );
+ outb(val & 0xff, ai->dev->base_addr + reg);
+ outb(val >> 8, ai->dev->base_addr + reg + 1);
}
}
-static u16 IN4500( struct airo_info *ai, u16 reg ) {
+static u16 IN4500(struct airo_info *ai, u16 reg)
+{
unsigned short rc;
if (test_bit(FLAG_MPI,&ai->flags))
reg <<= 1;
- if ( !do8bitIO )
- rc = inw( ai->dev->base_addr + reg );
+ if (!do8bitIO)
+ rc = inw(ai->dev->base_addr + reg);
else {
- rc = inb( ai->dev->base_addr + reg );
- rc += ((int)inb( ai->dev->base_addr + reg + 1 )) << 8;
+ rc = inb(ai->dev->base_addr + reg);
+ rc += ((int)inb(ai->dev->base_addr + reg + 1)) << 8;
}
return rc;
}
@@ -3611,7 +3628,8 @@ static int enable_MAC(struct airo_info *ai, int lock)
return rc;
}
-static void disable_MAC( struct airo_info *ai, int lock ) {
+static void disable_MAC(struct airo_info *ai, int lock)
+{
Cmd cmd;
Resp rsp;
@@ -3630,13 +3648,15 @@ static void disable_MAC( struct airo_info *ai, int lock ) {
up(&ai->sem);
}
-static void enable_interrupts( struct airo_info *ai ) {
+static void enable_interrupts(struct airo_info *ai)
+{
/* Enable the interrupts */
- OUT4500( ai, EVINTEN, STATUS_INTS );
+ OUT4500(ai, EVINTEN, STATUS_INTS);
}
-static void disable_interrupts( struct airo_info *ai ) {
- OUT4500( ai, EVINTEN, 0 );
+static void disable_interrupts(struct airo_info *ai)
+{
+ OUT4500(ai, EVINTEN, 0);
}
static void mpi_receive_802_3(struct airo_info *ai)
@@ -3660,7 +3680,7 @@ static void mpi_receive_802_3(struct airo_info *ai)
ai->dev->stats.rx_dropped++;
goto badrx;
}
- buffer = skb_put(skb,len);
+ buffer = skb_put(skb, len);
memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2);
if (ai->micstats.enabled) {
memcpy(&micbuf,
@@ -3739,8 +3759,8 @@ static void mpi_receive_802_11(struct airo_info *ai)
fc = get_unaligned((__le16 *)ptr);
hdrlen = header_len(fc);
- skb = dev_alloc_skb( len + hdrlen + 2 );
- if ( !skb ) {
+ skb = dev_alloc_skb(len + hdrlen + 2);
+ if (!skb) {
ai->dev->stats.rx_dropped++;
goto badrx;
}
@@ -3784,7 +3804,7 @@ static void mpi_receive_802_11(struct airo_info *ai)
skb->dev = ai->wifidev;
skb->protocol = htons(ETH_P_802_2);
skb->ip_summed = CHECKSUM_NONE;
- netif_rx( skb );
+ netif_rx(skb);
badrx:
if (rxd.valid == 0) {
@@ -3815,7 +3835,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
WepKeyRid wkr;
int rc;
- memset( &mySsid, 0, sizeof( mySsid ) );
+ memset(&mySsid, 0, sizeof(mySsid));
kfree (ai->flash);
ai->flash = NULL;
@@ -3824,12 +3844,12 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
cmd.parm0 = cmd.parm1 = cmd.parm2 = 0;
if (lock && down_interruptible(&ai->sem))
return ERROR;
- if ( issuecommand( ai, &cmd, &rsp ) != SUCCESS ) {
+ if (issuecommand(ai, &cmd, &rsp) != SUCCESS) {
if (lock)
up(&ai->sem);
return ERROR;
}
- disable_MAC( ai, 0);
+ disable_MAC(ai, 0);
// Let's figure out if we need to use the AUX port
if (!test_bit(FLAG_MPI,&ai->flags)) {
@@ -3859,13 +3879,13 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
ai->SSID = NULL;
// general configuration (read/modify/write)
status = readConfigRid(ai, lock);
- if ( status != SUCCESS ) return ERROR;
+ if (status != SUCCESS) return ERROR;
status = readCapabilityRid(ai, &cap_rid, lock);
- if ( status != SUCCESS ) return ERROR;
+ if (status != SUCCESS) return ERROR;
- status = PC4500_readrid(ai,RID_RSSI,&rssi_rid,sizeof(rssi_rid),lock);
- if ( status == SUCCESS ) {
+ status = PC4500_readrid(ai, RID_RSSI,&rssi_rid, sizeof(rssi_rid), lock);
+ if (status == SUCCESS) {
if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
}
@@ -3890,15 +3910,15 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
}
/* Save off the MAC */
- for( i = 0; i < ETH_ALEN; i++ ) {
+ for (i = 0; i < ETH_ALEN; i++) {
mac[i] = ai->config.macAddr[i];
}
/* Check to see if there are any insmod configured
rates to add */
- if ( rates[0] ) {
- memset(ai->config.rates,0,sizeof(ai->config.rates));
- for( i = 0; i < 8 && rates[i]; i++ ) {
+ if (rates[0]) {
+ memset(ai->config.rates, 0, sizeof(ai->config.rates));
+ for (i = 0; i < 8 && rates[i]; i++) {
ai->config.rates[i] = rates[i];
}
}
@@ -3906,9 +3926,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
}
/* Setup the SSIDs if present */
- if ( ssids[0] ) {
+ if (ssids[0]) {
int i;
- for( i = 0; i < 3 && ssids[i]; i++ ) {
+ for (i = 0; i < 3 && ssids[i]; i++) {
size_t len = strlen(ssids[i]);
if (len > 32)
len = 32;
@@ -3919,12 +3939,12 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
}
status = writeConfigRid(ai, lock);
- if ( status != SUCCESS ) return ERROR;
+ if (status != SUCCESS) return ERROR;
/* Set up the SSID list */
- if ( ssids[0] ) {
+ if (ssids[0]) {
status = writeSsidRid(ai, &mySsid, lock);
- if ( status != SUCCESS ) return ERROR;
+ if (status != SUCCESS) return ERROR;
}
status = enable_MAC(ai, lock);
@@ -3939,14 +3959,15 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
ai->defindex = wkr.mac[0];
}
rc = readWepKeyRid(ai, &wkr, 0, lock);
- } while(lastindex != wkr.kindex);
+ } while (lastindex != wkr.kindex);
try_auto_wep(ai);
return SUCCESS;
}
-static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
+static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp)
+{
// Im really paranoid about letting it run forever!
int max_tries = 600000;
@@ -3966,7 +3987,7 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
schedule();
}
- if ( max_tries == -1 ) {
+ if (max_tries == -1) {
airo_print_err(ai->dev->name,
"Max tries exceeded when issuing command");
if (IN4500(ai, COMMAND) & COMMAND_BUSY)
@@ -3998,7 +4019,7 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
/* Sets up the bap to start exchange data. whichbap should
* be one of the BAP0 or BAP1 defines. Locks should be held before
* calling! */
-static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap )
+static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap)
{
int timeout = 50;
int max_tries = 3;
@@ -4013,15 +4034,15 @@ static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap )
if (timeout--) {
continue;
}
- } else if ( status & BAP_ERR ) {
+ } else if (status & BAP_ERR) {
/* invalid rid or offset */
airo_print_err(ai->dev->name, "BAP error %x %d",
- status, whichbap );
+ status, whichbap);
return ERROR;
} else if (status & BAP_DONE) { // success
return SUCCESS;
}
- if ( !(max_tries--) ) {
+ if (!(max_tries--)) {
airo_print_err(ai->dev->name,
"BAP setup error too many retries\n");
return ERROR;
@@ -4067,15 +4088,15 @@ static int aux_bap_read(struct airo_info *ai, __le16 *pu16Dst,
next = aux_setup(ai, page, offset, &len);
words = (bytelen+1)>>1;
- for (i=0; i<words;) {
+ for (i = 0; i<words;) {
int count;
count = (len>>1) < (words-i) ? (len>>1) : (words-i);
- if ( !do8bitIO )
- insw( ai->dev->base_addr+DATA0+whichbap,
- pu16Dst+i,count );
+ if (!do8bitIO)
+ insw(ai->dev->base_addr+DATA0+whichbap,
+ pu16Dst+i, count);
else
- insb( ai->dev->base_addr+DATA0+whichbap,
- pu16Dst+i, count << 1 );
+ insb(ai->dev->base_addr+DATA0+whichbap,
+ pu16Dst+i, count << 1);
i += count;
if (i<words) {
next = aux_setup(ai, next, 4, &len);
@@ -4091,10 +4112,10 @@ static int fast_bap_read(struct airo_info *ai, __le16 *pu16Dst,
int bytelen, int whichbap)
{
bytelen = (bytelen + 1) & (~1); // round up to even value
- if ( !do8bitIO )
- insw( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen>>1 );
+ if (!do8bitIO)
+ insw(ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen>>1);
else
- insb( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen );
+ insb(ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen);
return SUCCESS;
}
@@ -4103,11 +4124,11 @@ static int bap_write(struct airo_info *ai, const __le16 *pu16Src,
int bytelen, int whichbap)
{
bytelen = (bytelen + 1) & (~1); // round up to even value
- if ( !do8bitIO )
- outsw( ai->dev->base_addr+DATA0+whichbap,
- pu16Src, bytelen>>1 );
+ if (!do8bitIO)
+ outsw(ai->dev->base_addr+DATA0+whichbap,
+ pu16Src, bytelen>>1);
else
- outsb( ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen );
+ outsb(ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen);
return SUCCESS;
}
@@ -4122,7 +4143,7 @@ static int PC4500_accessrid(struct airo_info *ai, u16 rid, u16 accmd)
cmd.parm0 = rid;
status = issuecommand(ai, &cmd, &rsp);
if (status != 0) return status;
- if ( (rsp.status & 0x7F00) != 0) {
+ if ((rsp.status & 0x7F00) != 0) {
return (accmd << 8) + (rsp.rsp0 & 0xFF);
}
return 0;
@@ -4177,10 +4198,10 @@ static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len, in
// length for remaining part of rid
len = min(len, (int)le16_to_cpu(*(__le16*)pBuf)) - 2;
- if ( len <= 2 ) {
+ if (len <= 2) {
airo_print_err(ai->dev->name,
"Rid %x has a length of %d which is too short",
- (int)rid, (int)len );
+ (int)rid, (int)len);
rc = ERROR;
goto done;
}
@@ -4248,7 +4269,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
}
} else {
// --- first access so that we can write the rid data
- if ( (status = PC4500_accessrid(ai, rid, CMD_ACCESS)) != 0) {
+ if ((status = PC4500_accessrid(ai, rid, CMD_ACCESS)) != 0) {
rc = status;
goto done;
}
@@ -4285,7 +4306,7 @@ static u16 transmit_allocate(struct airo_info *ai, int lenPayload, int raw)
txFid = ERROR;
goto done;
}
- if ( (rsp.status & 0xFF00) != 0) {
+ if ((rsp.status & 0xFF00) != 0) {
txFid = ERROR;
goto done;
}
@@ -4344,9 +4365,9 @@ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket)
}
len -= ETH_ALEN * 2;
- if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
+ if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
(ntohs(((__be16 *)pPacket)[6]) != 0x888E)) {
- if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS)
+ if (encapsulate(ai, (etherHead *)pPacket,&pMic, len) != SUCCESS)
return ERROR;
miclen = sizeof(pMic);
}
@@ -4356,17 +4377,17 @@ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket)
/* The hardware addresses aren't counted as part of the payload, so
* we have to subtract the 12 bytes for the addresses off */
payloadLen = cpu_to_le16(len + miclen);
- bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1);
+ bap_write(ai, &payloadLen, sizeof(payloadLen), BAP1);
bap_write(ai, (__le16*)pPacket, sizeof(etherHead), BAP1);
if (miclen)
bap_write(ai, (__le16*)&pMic, miclen, BAP1);
bap_write(ai, (__le16*)(pPacket + sizeof(etherHead)), len, BAP1);
// issue the transmit command
- memset( &cmd, 0, sizeof( cmd ) );
+ memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_TRANSMIT;
cmd.parm0 = txFid;
if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR;
- if ( (rsp.status & 0xFF00) != 0) return ERROR;
+ if ((rsp.status & 0xFF00) != 0) return ERROR;
return SUCCESS;
}
@@ -4395,18 +4416,18 @@ static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket)
/* The 802.11 header aren't counted as part of the payload, so
* we have to subtract the header bytes off */
payloadLen = cpu_to_le16(len-hdrlen);
- bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1);
+ bap_write(ai, &payloadLen, sizeof(payloadLen), BAP1);
if (bap_setup(ai, txFid, 0x0014, BAP1) != SUCCESS) return ERROR;
bap_write(ai, (__le16 *)pPacket, hdrlen, BAP1);
bap_write(ai, (__le16 *)(tail + (hdrlen - 10)), 38 - hdrlen, BAP1);
bap_write(ai, (__le16 *)(pPacket + hdrlen), len - hdrlen, BAP1);
// issue the transmit command
- memset( &cmd, 0, sizeof( cmd ) );
+ memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_TRANSMIT;
cmd.parm0 = txFid;
if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR;
- if ( (rsp.status & 0xFF00) != 0) return ERROR;
+ if ((rsp.status & 0xFF00) != 0) return ERROR;
return SUCCESS;
}
@@ -4415,25 +4436,25 @@ static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket)
* like! Feel free to clean it up!
*/
-static ssize_t proc_read( struct file *file,
+static ssize_t proc_read(struct file *file,
char __user *buffer,
size_t len,
loff_t *offset);
-static ssize_t proc_write( struct file *file,
+static ssize_t proc_write(struct file *file,
const char __user *buffer,
size_t len,
- loff_t *offset );
-static int proc_close( struct inode *inode, struct file *file );
-
-static int proc_stats_open( struct inode *inode, struct file *file );
-static int proc_statsdelta_open( struct inode *inode, struct file *file );
-static int proc_status_open( struct inode *inode, struct file *file );
-static int proc_SSID_open( struct inode *inode, struct file *file );
-static int proc_APList_open( struct inode *inode, struct file *file );
-static int proc_BSSList_open( struct inode *inode, struct file *file );
-static int proc_config_open( struct inode *inode, struct file *file );
-static int proc_wepkey_open( struct inode *inode, struct file *file );
+ loff_t *offset);
+static int proc_close(struct inode *inode, struct file *file);
+
+static int proc_stats_open(struct inode *inode, struct file *file);
+static int proc_statsdelta_open(struct inode *inode, struct file *file);
+static int proc_status_open(struct inode *inode, struct file *file);
+static int proc_SSID_open(struct inode *inode, struct file *file);
+static int proc_APList_open(struct inode *inode, struct file *file);
+static int proc_BSSList_open(struct inode *inode, struct file *file);
+static int proc_config_open(struct inode *inode, struct file *file);
+static int proc_wepkey_open(struct inode *inode, struct file *file);
static const struct proc_ops proc_statsdelta_ops = {
.proc_read = proc_read,
@@ -4508,12 +4529,13 @@ struct proc_data {
void (*on_close) (struct inode *, struct file *);
};
-static int setup_proc_entry( struct net_device *dev,
- struct airo_info *apriv ) {
+static int setup_proc_entry(struct net_device *dev,
+ struct airo_info *apriv)
+{
struct proc_dir_entry *entry;
/* First setup the device directory */
- strcpy(apriv->proc_name,dev->name);
+ strcpy(apriv->proc_name, dev->name);
apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm,
airo_entry);
if (!apriv->proc_entry)
@@ -4582,8 +4604,8 @@ fail:
return -ENOMEM;
}
-static int takedown_proc_entry( struct net_device *dev,
- struct airo_info *apriv )
+static int takedown_proc_entry(struct net_device *dev,
+ struct airo_info *apriv)
{
remove_proc_subtree(apriv->proc_name, airo_entry);
return 0;
@@ -4601,10 +4623,10 @@ static int takedown_proc_entry( struct net_device *dev,
* The read routine is generic, it relies on the preallocated rbuffer
* to supply the data.
*/
-static ssize_t proc_read( struct file *file,
+static ssize_t proc_read(struct file *file,
char __user *buffer,
size_t len,
- loff_t *offset )
+ loff_t *offset)
{
struct proc_data *priv = file->private_data;
@@ -4619,10 +4641,10 @@ static ssize_t proc_read( struct file *file,
* The write routine is generic, it fills in a preallocated rbuffer
* to supply the data.
*/
-static ssize_t proc_write( struct file *file,
+static ssize_t proc_write(struct file *file,
const char __user *buffer,
size_t len,
- loff_t *offset )
+ loff_t *offset)
{
ssize_t ret;
struct proc_data *priv = file->private_data;
@@ -4648,10 +4670,10 @@ static int proc_status_open(struct inode *inode, struct file *file)
u16 mode;
int i;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(2048, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
@@ -4671,7 +4693,7 @@ static int proc_status_open(struct inode *inode, struct file *file)
mode & 0x100 ? "KEY ": "",
mode & 0x200 ? "WEP ": "",
mode & 0x8000 ? "ERR ": "");
- sprintf( data->rbuffer+i, "Mode: %x\n"
+ sprintf(data->rbuffer+i, "Mode: %x\n"
"Signal Strength: %d\n"
"Signal Quality: %d\n"
"SSID: %-.*s\n"
@@ -4701,26 +4723,28 @@ static int proc_status_open(struct inode *inode, struct file *file)
le16_to_cpu(cap_rid.softVer),
le16_to_cpu(cap_rid.softSubVer),
le16_to_cpu(cap_rid.bootBlockVer));
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
static int proc_stats_rid_open(struct inode*, struct file*, u16);
-static int proc_statsdelta_open( struct inode *inode,
- struct file *file ) {
+static int proc_statsdelta_open(struct inode *inode,
+ struct file *file)
+{
if (file->f_mode&FMODE_WRITE) {
return proc_stats_rid_open(inode, file, RID_STATSDELTACLEAR);
}
return proc_stats_rid_open(inode, file, RID_STATSDELTA);
}
-static int proc_stats_open( struct inode *inode, struct file *file ) {
+static int proc_stats_open(struct inode *inode, struct file *file)
+{
return proc_stats_rid_open(inode, file, RID_STATS);
}
-static int proc_stats_rid_open( struct inode *inode,
+static int proc_stats_rid_open(struct inode *inode,
struct file *file,
- u16 rid )
+ u16 rid)
{
struct proc_data *data;
struct net_device *dev = PDE_DATA(inode);
@@ -4730,10 +4754,10 @@ static int proc_stats_rid_open( struct inode *inode,
__le32 *vals = stats.vals;
int len;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 4096, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(4096, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
@@ -4742,7 +4766,7 @@ static int proc_stats_rid_open( struct inode *inode,
len = le16_to_cpu(stats.len);
j = 0;
- for(i=0; statsLabels[i]!=(char *)-1 && i*4<len; i++) {
+ for (i = 0; statsLabels[i]!=(char *)-1 && i*4<len; i++) {
if (!statsLabels[i]) continue;
if (j+strlen(statsLabels[i])+16>4096) {
airo_print_warn(apriv->dev->name,
@@ -4759,7 +4783,8 @@ static int proc_stats_rid_open( struct inode *inode,
return 0;
}
-static int get_dec_u16( char *buffer, int *start, int limit ) {
+static int get_dec_u16(char *buffer, int *start, int limit)
+{
u16 value;
int valid = 0;
for (value = 0; *start < limit && buffer[*start] >= '0' &&
@@ -4768,7 +4793,7 @@ static int get_dec_u16( char *buffer, int *start, int limit ) {
value *= 10;
value += buffer[*start] - '0';
}
- if ( !valid ) return -1;
+ if (!valid) return -1;
return value;
}
@@ -4789,15 +4814,15 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
struct airo_info *ai = dev->ml_priv;
char *line;
- if ( !data->writelen ) return;
+ if (!data->writelen) return;
readConfigRid(ai, 1);
set_bit (FLAG_COMMIT, &ai->flags);
line = data->wbuffer;
- while( line[0] ) {
+ while (line[0]) {
/*** Mode processing */
- if ( !strncmp( line, "Mode: ", 6 ) ) {
+ if (!strncmp(line, "Mode: ", 6)) {
line += 6;
if (sniffing_mode(ai))
set_bit (FLAG_RESET, &ai->flags);
@@ -4805,19 +4830,19 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
clear_bit (FLAG_802_11, &ai->flags);
ai->config.opmode &= ~MODE_CFG_MASK;
ai->config.scanMode = SCANMODE_ACTIVE;
- if ( line[0] == 'a' ) {
+ if (line[0] == 'a') {
ai->config.opmode |= MODE_STA_IBSS;
} else {
ai->config.opmode |= MODE_STA_ESS;
- if ( line[0] == 'r' ) {
+ if (line[0] == 'r') {
ai->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER;
ai->config.scanMode = SCANMODE_PASSIVE;
set_bit (FLAG_802_11, &ai->flags);
- } else if ( line[0] == 'y' ) {
+ } else if (line[0] == 'y') {
ai->config.rmode |= RXMODE_RFMON_ANYBSS | RXMODE_DISABLE_802_3_HEADER;
ai->config.scanMode = SCANMODE_PASSIVE;
set_bit (FLAG_802_11, &ai->flags);
- } else if ( line[0] == 'l' )
+ } else if (line[0] == 'l')
ai->config.rmode |= RXMODE_LANMON;
}
set_bit (FLAG_COMMIT, &ai->flags);
@@ -4826,68 +4851,68 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
/*** Radio status */
else if (!strncmp(line,"Radio: ", 7)) {
line += 7;
- if (!strncmp(line,"off",3)) {
+ if (!strncmp(line,"off", 3)) {
set_bit (FLAG_RADIO_OFF, &ai->flags);
} else {
clear_bit (FLAG_RADIO_OFF, &ai->flags);
}
}
/*** NodeName processing */
- else if ( !strncmp( line, "NodeName: ", 10 ) ) {
+ else if (!strncmp(line, "NodeName: ", 10)) {
int j;
line += 10;
- memset( ai->config.nodeName, 0, 16 );
+ memset(ai->config.nodeName, 0, 16);
/* Do the name, assume a space between the mode and node name */
- for( j = 0; j < 16 && line[j] != '\n'; j++ ) {
+ for (j = 0; j < 16 && line[j] != '\n'; j++) {
ai->config.nodeName[j] = line[j];
}
set_bit (FLAG_COMMIT, &ai->flags);
}
/*** PowerMode processing */
- else if ( !strncmp( line, "PowerMode: ", 11 ) ) {
+ else if (!strncmp(line, "PowerMode: ", 11)) {
line += 11;
- if ( !strncmp( line, "PSPCAM", 6 ) ) {
+ if (!strncmp(line, "PSPCAM", 6)) {
ai->config.powerSaveMode = POWERSAVE_PSPCAM;
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "PSP", 3 ) ) {
+ } else if (!strncmp(line, "PSP", 3)) {
ai->config.powerSaveMode = POWERSAVE_PSP;
set_bit (FLAG_COMMIT, &ai->flags);
} else {
ai->config.powerSaveMode = POWERSAVE_CAM;
set_bit (FLAG_COMMIT, &ai->flags);
}
- } else if ( !strncmp( line, "DataRates: ", 11 ) ) {
+ } else if (!strncmp(line, "DataRates: ", 11)) {
int v, i = 0, k = 0; /* i is index into line,
k is index to rates */
line += 11;
- while((v = get_dec_u16(line, &i, 3))!=-1) {
+ while ((v = get_dec_u16(line, &i, 3))!=-1) {
ai->config.rates[k++] = (u8)v;
line += i + 1;
i = 0;
}
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "Channel: ", 9 ) ) {
+ } else if (!strncmp(line, "Channel: ", 9)) {
int v, i = 0;
line += 9;
v = get_dec_u16(line, &i, i+3);
- if ( v != -1 ) {
+ if (v != -1) {
ai->config.channelSet = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
}
- } else if ( !strncmp( line, "XmitPower: ", 11 ) ) {
+ } else if (!strncmp(line, "XmitPower: ", 11)) {
int v, i = 0;
line += 11;
v = get_dec_u16(line, &i, i+3);
- if ( v != -1 ) {
+ if (v != -1) {
ai->config.txPower = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
}
- } else if ( !strncmp( line, "WEP: ", 5 ) ) {
+ } else if (!strncmp(line, "WEP: ", 5)) {
line += 5;
- switch( line[0] ) {
+ switch(line[0]) {
case 's':
set_auth_type(ai, AUTH_SHAREDKEY);
break;
@@ -4899,7 +4924,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
break;
}
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "LongRetryLimit: ", 16 ) ) {
+ } else if (!strncmp(line, "LongRetryLimit: ", 16)) {
int v, i = 0;
line += 16;
@@ -4907,7 +4932,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
v = (v<0) ? 0 : ((v>255) ? 255 : v);
ai->config.longRetryLimit = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "ShortRetryLimit: ", 17 ) ) {
+ } else if (!strncmp(line, "ShortRetryLimit: ", 17)) {
int v, i = 0;
line += 17;
@@ -4915,7 +4940,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
v = (v<0) ? 0 : ((v>255) ? 255 : v);
ai->config.shortRetryLimit = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "RTSThreshold: ", 14 ) ) {
+ } else if (!strncmp(line, "RTSThreshold: ", 14)) {
int v, i = 0;
line += 14;
@@ -4923,7 +4948,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
v = (v<0) ? 0 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v);
ai->config.rtsThres = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "TXMSDULifetime: ", 16 ) ) {
+ } else if (!strncmp(line, "TXMSDULifetime: ", 16)) {
int v, i = 0;
line += 16;
@@ -4931,7 +4956,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
v = (v<0) ? 0 : v;
ai->config.txLifetime = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "RXMSDULifetime: ", 16 ) ) {
+ } else if (!strncmp(line, "RXMSDULifetime: ", 16)) {
int v, i = 0;
line += 16;
@@ -4939,17 +4964,17 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
v = (v<0) ? 0 : v;
ai->config.rxLifetime = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "TXDiversity: ", 13 ) ) {
+ } else if (!strncmp(line, "TXDiversity: ", 13)) {
ai->config.txDiversity =
(line[13]=='l') ? 1 :
((line[13]=='r')? 2: 3);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "RXDiversity: ", 13 ) ) {
+ } else if (!strncmp(line, "RXDiversity: ", 13)) {
ai->config.rxDiversity =
(line[13]=='l') ? 1 :
((line[13]=='r')? 2: 3);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "FragThreshold: ", 15 ) ) {
+ } else if (!strncmp(line, "FragThreshold: ", 15)) {
int v, i = 0;
line += 15;
@@ -4961,24 +4986,24 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
} else if (!strncmp(line, "Modulation: ", 12)) {
line += 12;
switch(*line) {
- case 'd': ai->config.modulation=MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'c': ai->config.modulation=MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'm': ai->config.modulation=MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'd': ai->config.modulation = MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'c': ai->config.modulation = MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'm': ai->config.modulation = MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break;
default: airo_print_warn(ai->dev->name, "Unknown modulation");
}
} else if (!strncmp(line, "Preamble: ", 10)) {
line += 10;
switch(*line) {
- case 'a': ai->config.preamble=PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'l': ai->config.preamble=PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 's': ai->config.preamble=PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'a': ai->config.preamble = PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'l': ai->config.preamble = PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 's': ai->config.preamble = PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break;
default: airo_print_warn(ai->dev->name, "Unknown preamble");
}
} else {
airo_print_warn(ai->dev->name, "Couldn't figure out %s", line);
}
- while( line[0] && line[0] != '\n' ) line++;
- if ( line[0] ) line++;
+ while (line[0] && line[0] != '\n') line++;
+ if (line[0]) line++;
}
airo_config_commit(dev, NULL, NULL, NULL);
}
@@ -5001,14 +5026,14 @@ static int proc_config_open(struct inode *inode, struct file *file)
int i;
__le16 mode;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(2048, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
- if ((data->wbuffer = kzalloc( 2048, GFP_KERNEL )) == NULL) {
+ if ((data->wbuffer = kzalloc(2048, GFP_KERNEL)) == NULL) {
kfree (data->rbuffer);
kfree (file->private_data);
return -ENOMEM;
@@ -5019,7 +5044,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
readConfigRid(ai, 1);
mode = ai->config.opmode & MODE_CFG_MASK;
- i = sprintf( data->rbuffer,
+ i = sprintf(data->rbuffer,
"Mode: %s\n"
"Radio: %s\n"
"NodeName: %-16s\n"
@@ -5048,7 +5073,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
le16_to_cpu(ai->config.channelSet),
le16_to_cpu(ai->config.txPower)
);
- sprintf( data->rbuffer + i,
+ sprintf(data->rbuffer + i,
"LongRetryLimit: %d\n"
"ShortRetryLimit: %d\n"
"RTSThreshold: %d\n"
@@ -5079,7 +5104,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
ai->config.preamble == PREAMBLE_LONG ? "long" :
ai->config.preamble == PREAMBLE_SHORT ? "short" : "error"
);
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
@@ -5119,14 +5144,15 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
enable_MAC(ai, 1);
}
-static void proc_APList_on_close( struct inode *inode, struct file *file ) {
+static void proc_APList_on_close(struct inode *inode, struct file *file)
+{
struct proc_data *data = file->private_data;
struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
APListRid *APList_rid = &ai->APList;
int i;
- if ( !data->writelen ) return;
+ if (!data->writelen) return;
memset(APList_rid, 0, sizeof(*APList_rid));
APList_rid->len = cpu_to_le16(sizeof(*APList_rid));
@@ -5140,8 +5166,9 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) {
}
/* This function wraps PC4500_writerid with a MAC disable */
-static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data,
- int len, int dummy ) {
+static int do_writerid(struct airo_info *ai, u16 rid, const void *rid_data,
+ int len, int dummy)
+{
int rc;
disable_MAC(ai, 1);
@@ -5241,7 +5268,8 @@ static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock)
return rc;
}
-static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
+static void proc_wepkey_on_close(struct inode *inode, struct file *file)
+{
struct proc_data *data;
struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
@@ -5253,7 +5281,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
memset(key, 0, sizeof(key));
data = file->private_data;
- if ( !data->writelen ) return;
+ if (!data->writelen) return;
if (data->wbuffer[0] >= '0' && data->wbuffer[0] <= '3' &&
(data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) {
@@ -5273,7 +5301,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
return;
}
- for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) {
+ for (i = 0; i < 16*3 && data->wbuffer[i+j]; i++) {
switch(i%3) {
case 0:
key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4;
@@ -5291,7 +5319,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
}
}
-static int proc_wepkey_open( struct inode *inode, struct file *file )
+static int proc_wepkey_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
struct net_device *dev = PDE_DATA(inode);
@@ -5299,20 +5327,20 @@ static int proc_wepkey_open( struct inode *inode, struct file *file )
char *ptr;
WepKeyRid wkr;
__le16 lastindex;
- int j=0;
+ int j = 0;
int rc;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
memset(&wkr, 0, sizeof(wkr));
data = file->private_data;
- if ((data->rbuffer = kzalloc( 180, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kzalloc(180, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
data->writelen = 0;
data->maxwritelen = 80;
- if ((data->wbuffer = kzalloc( 80, GFP_KERNEL )) == NULL) {
+ if ((data->wbuffer = kzalloc(80, GFP_KERNEL)) == NULL) {
kfree (data->rbuffer);
kfree (file->private_data);
return -ENOMEM;
@@ -5333,9 +5361,9 @@ static int proc_wepkey_open( struct inode *inode, struct file *file )
le16_to_cpu(wkr.klen));
}
readWepKeyRid(ai, &wkr, 0, 1);
- } while((lastindex != wkr.kindex) && (j < 180-30));
+ } while ((lastindex != wkr.kindex) && (j < 180-30));
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
@@ -5348,10 +5376,10 @@ static int proc_SSID_open(struct inode *inode, struct file *file)
char *ptr;
SsidRid SSID_rid;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(104, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
@@ -5379,11 +5407,12 @@ static int proc_SSID_open(struct inode *inode, struct file *file)
*ptr++ = '\n';
}
*ptr = '\0';
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
-static int proc_APList_open( struct inode *inode, struct file *file ) {
+static int proc_APList_open(struct inode *inode, struct file *file)
+{
struct proc_data *data;
struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
@@ -5391,16 +5420,16 @@ static int proc_APList_open( struct inode *inode, struct file *file ) {
char *ptr;
APListRid *APList_rid = &ai->APList;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(104, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
data->writelen = 0;
data->maxwritelen = 4*6*3;
- if ((data->wbuffer = kzalloc( data->maxwritelen, GFP_KERNEL )) == NULL) {
+ if ((data->wbuffer = kzalloc(data->maxwritelen, GFP_KERNEL)) == NULL) {
kfree (data->rbuffer);
kfree (file->private_data);
return -ENOMEM;
@@ -5408,20 +5437,21 @@ static int proc_APList_open( struct inode *inode, struct file *file ) {
data->on_close = proc_APList_on_close;
ptr = data->rbuffer;
- for( i = 0; i < 4; i++ ) {
+ for (i = 0; i < 4; i++) {
// We end when we find a zero MAC
- if ( !*(int*)APList_rid->ap[i] &&
+ if (!*(int*)APList_rid->ap[i] &&
!*(int*)&APList_rid->ap[i][2]) break;
ptr += sprintf(ptr, "%pM\n", APList_rid->ap[i]);
}
if (i==0) ptr += sprintf(ptr, "Not using specific APs\n");
*ptr = '\0';
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
-static int proc_BSSList_open( struct inode *inode, struct file *file ) {
+static int proc_BSSList_open(struct inode *inode, struct file *file)
+{
struct proc_data *data;
struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
@@ -5431,10 +5461,10 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
/* If doLoseSync is not 1, we won't do a Lose Sync */
int doLoseSync = -1;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 1024, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(1024, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
@@ -5454,7 +5484,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
return -ENETDOWN;
}
memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LISTBSS;
+ cmd.cmd = CMD_LISTBSS;
if (down_interruptible(&ai->sem)) {
kfree(data->rbuffer);
kfree(file->private_data);
@@ -5472,7 +5502,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
Since it is a rare condition, we'll just live with it, otherwise
we have to add a spin lock... */
rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
- while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
+ while (rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
ptr += sprintf(ptr, "%pM %.*s rssi = %d",
BSSList_rid.bssid,
(int)BSSList_rid.ssidLen,
@@ -5487,11 +5517,11 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
rc = readBSSListRid(ai, 0, &BSSList_rid);
}
*ptr = '\0';
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
-static int proc_close( struct inode *inode, struct file *file )
+static int proc_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
@@ -5508,7 +5538,8 @@ static int proc_close( struct inode *inode, struct file *file )
will switch WEP modes to see if that will help. If the card is
associated we will check every minute to see if anything has
changed. */
-static void timer_func( struct net_device *dev ) {
+static void timer_func(struct net_device *dev)
+{
struct airo_info *apriv = dev->ml_priv;
/* We don't have a link so try changing the authtype */
@@ -5642,7 +5673,7 @@ static int __maybe_unused airo_pci_resume(struct device *dev_d)
}
#endif
-static int __init airo_init_module( void )
+static int __init airo_init_module(void)
{
int i;
@@ -5658,9 +5689,10 @@ static int __init airo_init_module( void )
for (i = 0; i < 4 && io[i] && irq[i]; i++) {
airo_print_info("", "Trying to configure ISA adapter at irq=%d "
- "io=0x%x", irq[i], io[i] );
- if (init_airo_card( irq[i], io[i], 0, NULL ))
+ "io = 0x%x", irq[i], io[i]);
+ if (init_airo_card(irq[i], io[i], 0, NULL)) {
/* do nothing */ ;
+ }
}
#ifdef CONFIG_PCI
@@ -5680,10 +5712,10 @@ static int __init airo_init_module( void )
return 0;
}
-static void __exit airo_cleanup_module( void )
+static void __exit airo_cleanup_module(void)
{
struct airo_info *ai;
- while(!list_empty(&airo_devices)) {
+ while (!list_empty(&airo_devices)) {
ai = list_entry(airo_devices.next, struct airo_info, dev_list);
airo_print_info(ai->dev->name, "Unregistering...");
stop_airo_card(ai->dev, 1);
@@ -5783,7 +5815,7 @@ static int airo_set_freq(struct net_device *dev,
int rc = -EINPROGRESS; /* Call commit handler */
/* If setting by frequency, convert to a channel */
- if(fwrq->e == 1) {
+ if (fwrq->e == 1) {
int f = fwrq->m / 100000;
/* Hack to fall through... */
@@ -5797,7 +5829,7 @@ static int airo_set_freq(struct net_device *dev,
int channel = fwrq->m;
/* We should do a better check than that,
* based on the card capability !!! */
- if((channel < 1) || (channel > 14)) {
+ if ((channel < 1) || (channel > 14)) {
airo_print_dbg(dev->name, "New channel value of %d is invalid!",
fwrq->m);
rc = -EINVAL;
@@ -5831,7 +5863,7 @@ static int airo_get_freq(struct net_device *dev,
readStatusRid(local, &status_rid, 1);
ch = le16_to_cpu(status_rid.channel);
- if((ch > 0) && (ch < 15)) {
+ if ((ch > 0) && (ch < 15)) {
fwrq->m = 100000 *
ieee80211_channel_to_frequency(ch, NL80211_BAND_2GHZ);
fwrq->e = 1;
@@ -5935,7 +5967,7 @@ static int airo_set_wap(struct net_device *dev,
else if (is_broadcast_ether_addr(awrq->sa_data) ||
is_zero_ether_addr(awrq->sa_data)) {
memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LOSE_SYNC;
+ cmd.cmd = CMD_LOSE_SYNC;
if (down_interruptible(&local->sem))
return -ERESTARTSYS;
issuecommand(local, &cmd, &rsp);
@@ -5984,7 +6016,7 @@ static int airo_set_nick(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
/* Check the size of the string */
- if(dwrq->length > 16) {
+ if (dwrq->length > 16) {
return -E2BIG;
}
readConfigRid(local, 1);
@@ -6032,7 +6064,7 @@ static int airo_set_rate(struct net_device *dev,
readCapabilityRid(local, &cap_rid, 1);
/* Which type of value ? */
- if((vwrq->value < 8) && (vwrq->value >= 0)) {
+ if ((vwrq->value < 8) && (vwrq->value >= 0)) {
/* Setting by rate index */
/* Find value in the magic rate table */
brate = cap_rid.supportedRates[vwrq->value];
@@ -6041,36 +6073,36 @@ static int airo_set_rate(struct net_device *dev,
u8 normvalue = (u8) (vwrq->value/500000);
/* Check if rate is valid */
- for(i = 0 ; i < 8 ; i++) {
- if(normvalue == cap_rid.supportedRates[i]) {
+ for (i = 0 ; i < 8 ; i++) {
+ if (normvalue == cap_rid.supportedRates[i]) {
brate = normvalue;
break;
}
}
}
/* -1 designed the max rate (mostly auto mode) */
- if(vwrq->value == -1) {
+ if (vwrq->value == -1) {
/* Get the highest available rate */
- for(i = 0 ; i < 8 ; i++) {
- if(cap_rid.supportedRates[i] == 0)
+ for (i = 0 ; i < 8 ; i++) {
+ if (cap_rid.supportedRates[i] == 0)
break;
}
- if(i != 0)
+ if (i != 0)
brate = cap_rid.supportedRates[i - 1];
}
/* Check that it is valid */
- if(brate == 0) {
+ if (brate == 0) {
return -EINVAL;
}
readConfigRid(local, 1);
/* Now, check if we want a fixed or auto value */
- if(vwrq->fixed == 0) {
+ if (vwrq->fixed == 0) {
/* Fill all the rates up to this max rate */
memset(local->config.rates, 0, 8);
- for(i = 0 ; i < 8 ; i++) {
+ for (i = 0 ; i < 8 ; i++) {
local->config.rates[i] = cap_rid.supportedRates[i];
- if(local->config.rates[i] == brate)
+ if (local->config.rates[i] == brate)
break;
}
} else {
@@ -6118,9 +6150,9 @@ static int airo_set_rts(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
int rthr = vwrq->value;
- if(vwrq->disabled)
+ if (vwrq->disabled)
rthr = AIRO_DEF_MTU;
- if((rthr < 0) || (rthr > AIRO_DEF_MTU)) {
+ if ((rthr < 0) || (rthr > AIRO_DEF_MTU)) {
return -EINVAL;
}
readConfigRid(local, 1);
@@ -6161,9 +6193,9 @@ static int airo_set_frag(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
int fthr = vwrq->value;
- if(vwrq->disabled)
+ if (vwrq->disabled)
fthr = AIRO_DEF_MTU;
- if((fthr < 256) || (fthr > AIRO_DEF_MTU)) {
+ if ((fthr < 256) || (fthr > AIRO_DEF_MTU)) {
return -EINVAL;
}
fthr &= ~0x1; /* Get an even value - is it really needed ??? */
@@ -6340,7 +6372,7 @@ static int airo_set_encode(struct net_device *dev,
else
key.len = MIN_KEY_SIZE;
/* Check if the key is not marked as invalid */
- if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
+ if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
/* Cleanup */
memset(key.key, 0, MAX_KEY_SIZE);
/* Copy the key in the driver */
@@ -6357,7 +6389,7 @@ static int airo_set_encode(struct net_device *dev,
/* WE specify that if a valid key is set, encryption
* should be enabled (user may turn it off later)
* This is also how "iwconfig ethX key on" works */
- if((index == current_index) && (key.len > 0) &&
+ if ((index == current_index) && (key.len > 0) &&
(local->config.authType == AUTH_OPEN))
set_auth_type(local, AUTH_ENCRYPT);
} else {
@@ -6380,7 +6412,7 @@ static int airo_set_encode(struct net_device *dev,
/* Read the flags */
if (dwrq->flags & IW_ENCODE_DISABLED)
set_auth_type(local, AUTH_OPEN); /* disable encryption */
- if(dwrq->flags & IW_ENCODE_RESTRICTED)
+ if (dwrq->flags & IW_ENCODE_RESTRICTED)
set_auth_type(local, AUTH_SHAREDKEY); /* Only Both */
if (dwrq->flags & IW_ENCODE_OPEN)
set_auth_type(local, AUTH_ENCRYPT); /* Only Wep */
@@ -6458,7 +6490,7 @@ static int airo_set_encodeext(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int perm = ( encoding->flags & IW_ENCODE_TEMP ? 0 : 1 );
+ int perm = (encoding->flags & IW_ENCODE_TEMP ? 0 : 1);
__le16 currentAuthType = local->config.authType;
int idx, key_len, alg = ext->alg, set_key = 1, rc;
wep_key_t key;
@@ -6540,7 +6572,7 @@ static int airo_set_encodeext(struct net_device *dev,
/* Read the flags */
if (encoding->flags & IW_ENCODE_DISABLED)
set_auth_type(local, AUTH_OPEN); /* disable encryption */
- if(encoding->flags & IW_ENCODE_RESTRICTED)
+ if (encoding->flags & IW_ENCODE_RESTRICTED)
set_auth_type(local, AUTH_SHAREDKEY); /* Only Both */
if (encoding->flags & IW_ENCODE_OPEN)
set_auth_type(local, AUTH_ENCRYPT);
@@ -6606,7 +6638,7 @@ static int airo_get_encodeext(struct net_device *dev,
/* We can't return the key, so set the proper flag and return zero */
encoding->flags |= IW_ENCODE_NOKEY;
memset(extra, 0, 16);
-
+
/* Copy the key to the user buffer */
wep_key_len = get_wep_key(local, idx, &buf[0], sizeof(buf));
if (wep_key_len < 0) {
@@ -6806,13 +6838,13 @@ static int airo_set_retry(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
int rc = -EINVAL;
- if(vwrq->disabled) {
+ if (vwrq->disabled) {
return -EINVAL;
}
readConfigRid(local, 1);
- if(vwrq->flags & IW_RETRY_LIMIT) {
+ if (vwrq->flags & IW_RETRY_LIMIT) {
__le16 v = cpu_to_le16(vwrq->value);
- if(vwrq->flags & IW_RETRY_LONG)
+ if (vwrq->flags & IW_RETRY_LONG)
local->config.longRetryLimit = v;
else if (vwrq->flags & IW_RETRY_SHORT)
local->config.shortRetryLimit = v;
@@ -6824,7 +6856,7 @@ static int airo_set_retry(struct net_device *dev,
set_bit (FLAG_COMMIT, &local->flags);
rc = -EINPROGRESS; /* Call commit handler */
}
- if(vwrq->flags & IW_RETRY_LIFETIME) {
+ if (vwrq->flags & IW_RETRY_LIFETIME) {
local->config.txLifetime = cpu_to_le16(vwrq->value / 1024);
set_bit (FLAG_COMMIT, &local->flags);
rc = -EINPROGRESS; /* Call commit handler */
@@ -6847,16 +6879,16 @@ static int airo_get_retry(struct net_device *dev,
readConfigRid(local, 1);
/* Note : by default, display the min retry number */
- if((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
+ if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
vwrq->flags = IW_RETRY_LIFETIME;
vwrq->value = le16_to_cpu(local->config.txLifetime) * 1024;
- } else if((vwrq->flags & IW_RETRY_LONG)) {
+ } else if ((vwrq->flags & IW_RETRY_LONG)) {
vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
vwrq->value = le16_to_cpu(local->config.longRetryLimit);
} else {
vwrq->flags = IW_RETRY_LIMIT;
vwrq->value = le16_to_cpu(local->config.shortRetryLimit);
- if(local->config.shortRetryLimit != local->config.longRetryLimit)
+ if (local->config.shortRetryLimit != local->config.longRetryLimit)
vwrq->flags |= IW_RETRY_SHORT;
}
@@ -6888,7 +6920,7 @@ static int airo_get_range(struct net_device *dev,
/* Should be based on cap_rid.country to give only
* what the current card support */
k = 0;
- for(i = 0; i < 14; i++) {
+ for (i = 0; i < 14; i++) {
range->freq[k].i = i + 1; /* List index */
range->freq[k].m = 100000 *
ieee80211_channel_to_frequency(i + 1, NL80211_BAND_2GHZ);
@@ -6918,9 +6950,9 @@ static int airo_get_range(struct net_device *dev,
}
range->avg_qual.noise = 0x100 - 85; /* -85 dBm */
- for(i = 0 ; i < 8 ; i++) {
+ for (i = 0 ; i < 8 ; i++) {
range->bitrate[i] = cap_rid.supportedRates[i] * 500000;
- if(range->bitrate[i] == 0)
+ if (range->bitrate[i] == 0)
break;
}
range->num_bitrates = i;
@@ -6928,7 +6960,7 @@ static int airo_get_range(struct net_device *dev,
/* Set an indication of the max TCP throughput
* in bit/s that we can expect using this interface.
* May be use for QoS stuff... Jean II */
- if(i > 2)
+ if (i > 2)
range->throughput = 5000 * 1000;
else
range->throughput = 1500 * 1000;
@@ -6938,7 +6970,7 @@ static int airo_get_range(struct net_device *dev,
range->min_frag = 256;
range->max_frag = AIRO_DEF_MTU;
- if(cap_rid.softCap & cpu_to_le16(2)) {
+ if (cap_rid.softCap & cpu_to_le16(2)) {
// WEP: RC4 40 bits
range->encoding_size[0] = 5;
// RC4 ~128 bits
@@ -6962,9 +6994,9 @@ static int airo_get_range(struct net_device *dev,
range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
/* Transmit Power - values are in mW */
- for(i = 0 ; i < 8 ; i++) {
+ for (i = 0 ; i < 8 ; i++) {
range->txpower[i] = le16_to_cpu(cap_rid.txPowerLevels[i]);
- if(range->txpower[i] == 0)
+ if (range->txpower[i] == 0)
break;
}
range->num_txpower = i;
@@ -7235,7 +7267,7 @@ static int airo_set_scan(struct net_device *dev,
/* Initiate a scan command */
ai->scan_timeout = RUN_AT(3*HZ);
memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LISTBSS;
+ cmd.cmd = CMD_LISTBSS;
issuecommand(ai, &cmd, &rsp);
wake = 1;
@@ -7276,7 +7308,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Add the ESSID */
iwe.u.data.length = bss->ssidLen;
- if(iwe.u.data.length > 32)
+ if (iwe.u.data.length > 32)
iwe.u.data.length = 32;
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
@@ -7286,8 +7318,8 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Add mode */
iwe.cmd = SIOCGIWMODE;
capabilities = bss->cap;
- if(capabilities & (CAP_ESS | CAP_IBSS)) {
- if(capabilities & CAP_ESS)
+ if (capabilities & (CAP_ESS | CAP_IBSS)) {
+ if (capabilities & CAP_ESS)
iwe.u.mode = IW_MODE_MASTER;
else
iwe.u.mode = IW_MODE_ADHOC;
@@ -7327,7 +7359,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Add encryption capability */
iwe.cmd = SIOCGIWENCODE;
- if(capabilities & CAP_PRIVACY)
+ if (capabilities & CAP_PRIVACY)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
@@ -7343,9 +7375,9 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Those two flags are ignored... */
iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
/* Max 8 values */
- for(i = 0 ; i < 8 ; i++) {
+ for (i = 0 ; i < 8 ; i++) {
/* NULL terminated */
- if(bss->rates[i] == 0)
+ if (bss->rates[i] == 0)
break;
/* Bit rate given in 500 kb/s units (+ 0x80) */
iwe.u.bitrate.value = ((bss->rates[i] & 0x7f) * 500000);
@@ -7453,7 +7485,7 @@ static int airo_get_scan(struct net_device *dev,
&net->bss);
/* Check if there is space for one more entry */
- if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
+ if ((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
/* Ask user space to try again with a bigger buffer */
err = -E2BIG;
goto out;
@@ -7491,7 +7523,7 @@ static int airo_config_commit(struct net_device *dev,
readSsidRid(local, &SSID_rid);
if (test_bit(FLAG_MPI,&local->flags))
- setup_card(local, dev->dev_addr, 1 );
+ setup_card(local, dev->dev_addr, 1);
else
reset_airo_card(dev);
disable_MAC(local, 1);
@@ -7635,9 +7667,9 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int val = AIROMAGIC;
aironet_ioctl com;
- if (copy_from_user(&com,rq->ifr_data,sizeof(com)))
+ if (copy_from_user(&com, rq->ifr_data, sizeof(com)))
rc = -EFAULT;
- else if (copy_to_user(com.data,(char *)&val,sizeof(val)))
+ else if (copy_to_user(com.data, (char *)&val, sizeof(val)))
rc = -EFAULT;
}
break;
@@ -7651,24 +7683,24 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*/
{
aironet_ioctl com;
- if (copy_from_user(&com,rq->ifr_data,sizeof(com))) {
+ if (copy_from_user(&com, rq->ifr_data, sizeof(com))) {
rc = -EFAULT;
break;
}
/* Separate R/W functions bracket legality here
*/
- if ( com.command == AIRORSWVERSION ) {
+ if (com.command == AIRORSWVERSION) {
if (copy_to_user(com.data, swversion, sizeof(swversion)))
rc = -EFAULT;
else
rc = 0;
}
- else if ( com.command <= AIRORRID)
+ else if (com.command <= AIRORRID)
rc = readrids(dev,&com);
- else if ( com.command >= AIROPCAP && com.command <= (AIROPLEAPUSR+2) )
+ else if (com.command >= AIROPCAP && com.command <= (AIROPLEAPUSR+2))
rc = writerids(dev,&com);
- else if ( com.command >= AIROFLSHRST && com.command <= AIRORESTART )
+ else if (com.command >= AIROFLSHRST && com.command <= AIRORESTART)
rc = flashcard(dev,&com);
else
rc = -EINVAL; /* Bad command in ioctl */
@@ -7770,7 +7802,8 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
* as needed. This represents the READ side of control I/O to
* the card
*/
-static int readrids(struct net_device *dev, aironet_ioctl *comp) {
+static int readrids(struct net_device *dev, aironet_ioctl *comp)
+{
unsigned short ridcode;
unsigned char *iobuf;
int len;
@@ -7800,7 +7833,7 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
case AIROGSTATSC32: ridcode = RID_STATS; break;
case AIROGMICSTATS:
if (copy_to_user(comp->data, &ai->micstats,
- min((int)comp->len,(int)sizeof(ai->micstats))))
+ min((int)comp->len, (int)sizeof(ai->micstats))))
return -EFAULT;
return 0;
case AIRORRID: ridcode = comp->ridnum; break;
@@ -7817,7 +7850,7 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
return -ENOMEM;
- PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
+ PC4500_readrid(ai, ridcode, iobuf, RIDSIZE, 1);
/* get the count of bytes in the rid docs say 1st 2 bytes is it.
* then return it to the user
* 9/22/2000 Honor user given length
@@ -7836,7 +7869,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
* Danger Will Robinson write the rids here
*/
-static int writerids(struct net_device *dev, aironet_ioctl *comp) {
+static int writerids(struct net_device *dev, aironet_ioctl *comp)
+{
struct airo_info *ai = dev->ml_priv;
int ridcode;
int enabled;
@@ -7893,10 +7927,10 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
return -ENOMEM;
- PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1);
+ PC4500_readrid(ai, RID_STATSDELTACLEAR, iobuf, RIDSIZE, 1);
enabled = ai->micstats.enabled;
- memset(&ai->micstats,0,sizeof(ai->micstats));
+ memset(&ai->micstats, 0, sizeof(ai->micstats));
ai->micstats.enabled = enabled;
if (copy_to_user(comp->data, iobuf,
@@ -7910,13 +7944,13 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
default:
return -EOPNOTSUPP; /* Blarg! */
}
- if(comp->len > RIDSIZE)
+ if (comp->len > RIDSIZE)
return -EINVAL;
if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
return -ENOMEM;
- if (copy_from_user(iobuf,comp->data,comp->len)) {
+ if (copy_from_user(iobuf, comp->data, comp->len)) {
kfree (iobuf);
return -EFAULT;
}
@@ -7933,7 +7967,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
clear_bit (FLAG_ADHOC, &ai->flags);
}
- if((*writer)(ai, ridcode, iobuf,comp->len,1)) {
+ if ((*writer)(ai, ridcode, iobuf, comp->len, 1)) {
kfree (iobuf);
return -EIO;
}
@@ -7950,7 +7984,8 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
* Flash command switch table
*/
-static int flashcard(struct net_device *dev, aironet_ioctl *comp) {
+static int flashcard(struct net_device *dev, aironet_ioctl *comp)
+{
int z;
/* Only super-user can modify flash */
@@ -7969,23 +8004,23 @@ static int flashcard(struct net_device *dev, aironet_ioctl *comp) {
return setflashmode((struct airo_info *)dev->ml_priv);
case AIROFLSHGCHR: /* Get char from aux */
- if(comp->len != sizeof(int))
+ if (comp->len != sizeof(int))
return -EINVAL;
- if (copy_from_user(&z,comp->data,comp->len))
+ if (copy_from_user(&z, comp->data, comp->len))
return -EFAULT;
return flashgchar((struct airo_info *)dev->ml_priv, z, 8000);
case AIROFLSHPCHR: /* Send char to card. */
- if(comp->len != sizeof(int))
+ if (comp->len != sizeof(int))
return -EINVAL;
- if (copy_from_user(&z,comp->data,comp->len))
+ if (copy_from_user(&z, comp->data, comp->len))
return -EFAULT;
return flashpchar((struct airo_info *)dev->ml_priv, z, 8000);
case AIROFLPUTBUF: /* Send 32k to card */
if (!AIRO_FLASH(dev))
return -ENOMEM;
- if(comp->len > FLASHSIZE)
+ if (comp->len > FLASHSIZE)
return -EINVAL;
if (copy_from_user(AIRO_FLASH(dev), comp->data, comp->len))
return -EFAULT;
@@ -8009,19 +8044,20 @@ static int flashcard(struct net_device *dev, aironet_ioctl *comp) {
* card.
*/
-static int cmdreset(struct airo_info *ai) {
+static int cmdreset(struct airo_info *ai)
+{
disable_MAC(ai, 1);
- if(!waitbusy (ai)){
+ if (!waitbusy (ai)) {
airo_print_info(ai->dev->name, "Waitbusy hang before RESET");
return -EBUSY;
}
- OUT4500(ai,COMMAND,CMD_SOFTRESET);
+ OUT4500(ai, COMMAND, CMD_SOFTRESET);
ssleep(1); /* WAS 600 12/7/00 */
- if(!waitbusy (ai)){
+ if (!waitbusy (ai)) {
airo_print_info(ai->dev->name, "Waitbusy hang AFTER RESET");
return -EBUSY;
}
@@ -8033,22 +8069,23 @@ static int cmdreset(struct airo_info *ai) {
* mode
*/
-static int setflashmode (struct airo_info *ai) {
+static int setflashmode (struct airo_info *ai)
+{
set_bit (FLAG_FLASHING, &ai->flags);
OUT4500(ai, SWS0, FLASH_COMMAND);
OUT4500(ai, SWS1, FLASH_COMMAND);
if (probe) {
OUT4500(ai, SWS0, FLASH_COMMAND);
- OUT4500(ai, COMMAND,0x10);
+ OUT4500(ai, COMMAND, 0x10);
} else {
OUT4500(ai, SWS2, FLASH_COMMAND);
OUT4500(ai, SWS3, FLASH_COMMAND);
- OUT4500(ai, COMMAND,0);
+ OUT4500(ai, COMMAND, 0);
}
msleep(500); /* 500ms delay */
- if(!waitbusy(ai)) {
+ if (!waitbusy(ai)) {
clear_bit (FLAG_FLASHING, &ai->flags);
airo_print_info(ai->dev->name, "Waitbusy hang after setflash mode");
return -EIO;
@@ -8060,16 +8097,17 @@ static int setflashmode (struct airo_info *ai) {
* x 50us for echo .
*/
-static int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
+static int flashpchar(struct airo_info *ai, int byte, int dwelltime)
+{
int echo;
int waittime;
byte |= 0x8000;
- if(dwelltime == 0 )
+ if (dwelltime == 0)
dwelltime = 200;
- waittime=dwelltime;
+ waittime = dwelltime;
/* Wait for busy bit d15 to go false indicating buffer empty */
while ((IN4500 (ai, SWS0) & 0x8000) && waittime > 0) {
@@ -8078,20 +8116,20 @@ static int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
}
/* timeout for busy clear wait */
- if(waittime <= 0 ){
+ if (waittime <= 0) {
airo_print_info(ai->dev->name, "flash putchar busywait timeout!");
return -EBUSY;
}
/* Port is clear now write byte and wait for it to echo back */
do {
- OUT4500(ai,SWS0,byte);
+ OUT4500(ai, SWS0, byte);
udelay(50);
dwelltime -= 50;
- echo = IN4500(ai,SWS1);
+ echo = IN4500(ai, SWS1);
} while (dwelltime >= 0 && echo != byte);
- OUT4500(ai,SWS1,0);
+ OUT4500(ai, SWS1, 0);
return (echo == byte) ? 0 : -EIO;
}
@@ -8100,29 +8138,30 @@ static int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
* Get a character from the card matching matchbyte
* Step 3)
*/
-static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){
+static int flashgchar(struct airo_info *ai, int matchbyte, int dwelltime)
+{
int rchar;
- unsigned char rbyte=0;
+ unsigned char rbyte = 0;
do {
- rchar = IN4500(ai,SWS1);
+ rchar = IN4500(ai, SWS1);
- if(dwelltime && !(0x8000 & rchar)){
+ if (dwelltime && !(0x8000 & rchar)) {
dwelltime -= 10;
mdelay(10);
continue;
}
rbyte = 0xff & rchar;
- if( (rbyte == matchbyte) && (0x8000 & rchar) ){
- OUT4500(ai,SWS1,0);
+ if ((rbyte == matchbyte) && (0x8000 & rchar)) {
+ OUT4500(ai, SWS1, 0);
return 0;
}
- if( rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar)
+ if (rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar)
break;
- OUT4500(ai,SWS1,0);
+ OUT4500(ai, SWS1, 0);
- }while(dwelltime > 0);
+ } while (dwelltime > 0);
return -EIO;
}
@@ -8131,21 +8170,22 @@ static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){
* send to the card
*/
-static int flashputbuf(struct airo_info *ai){
+static int flashputbuf(struct airo_info *ai)
+{
int nwords;
/* Write stuff */
if (test_bit(FLAG_MPI,&ai->flags))
memcpy_toio(ai->pciaux + 0x8000, ai->flash, FLASHSIZE);
else {
- OUT4500(ai,AUXPAGE,0x100);
- OUT4500(ai,AUXOFF,0);
+ OUT4500(ai, AUXPAGE, 0x100);
+ OUT4500(ai, AUXOFF, 0);
- for(nwords=0;nwords != FLASHSIZE / 2;nwords++){
- OUT4500(ai,AUXDATA,ai->flash[nwords] & 0xffff);
+ for (nwords = 0; nwords != FLASHSIZE / 2; nwords++) {
+ OUT4500(ai, AUXDATA, ai->flash[nwords] & 0xffff);
}
}
- OUT4500(ai,SWS0,0x8000);
+ OUT4500(ai, SWS0, 0x8000);
return 0;
}
@@ -8153,8 +8193,9 @@ static int flashputbuf(struct airo_info *ai){
/*
*
*/
-static int flashrestart(struct airo_info *ai,struct net_device *dev){
- int i,status;
+static int flashrestart(struct airo_info *ai, struct net_device *dev)
+{
+ int i, status;
ssleep(1); /* Added 12/7/00 */
clear_bit (FLAG_FLASHING, &ai->flags);
@@ -8166,9 +8207,9 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev){
status = setup_card(ai, dev->dev_addr, 1);
if (!test_bit(FLAG_MPI,&ai->flags))
- for( i = 0; i < MAX_FIDS; i++ ) {
+ for (i = 0; i < MAX_FIDS; i++) {
ai->fids[i] = transmit_allocate
- ( ai, AIRO_DEF_MTU, i >= MAX_FIDS / 2 );
+ (ai, AIRO_DEF_MTU, i >= MAX_FIDS / 2);
}
ssleep(1); /* Added 12/7/00 */
diff --git a/drivers/net/wireless/intel/ipw2x00/Kconfig b/drivers/net/wireless/intel/ipw2x00/Kconfig
index b1e7b4470842..1650d5865aa0 100644
--- a/drivers/net/wireless/intel/ipw2x00/Kconfig
+++ b/drivers/net/wireless/intel/ipw2x00/Kconfig
@@ -160,11 +160,7 @@ config LIBIPW
select WIRELESS_EXT
select WEXT_SPY
select CRYPTO
- select CRYPTO_ARC4
- select CRYPTO_ECB
- select CRYPTO_AES
select CRYPTO_MICHAEL_MIC
- select CRYPTO_ECB
select CRC32
select LIB80211
select LIB80211_CRYPT_WEP
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 461e955aa259..23fbddd0c1f8 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -201,8 +201,7 @@ static u32 ipw2100_debug_level = IPW_DL_NONE;
#define IPW_DEBUG(level, message...) \
do { \
if (ipw2100_debug_level & (level)) { \
- printk(KERN_DEBUG "ipw2100: %c %s ", \
- in_interrupt() ? 'I' : 'U', __func__); \
+ printk(KERN_DEBUG "ipw2100: %s ", __func__); \
printk(message); \
} \
} while (0)
@@ -3204,9 +3203,9 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
}
}
-static void ipw2100_irq_tasklet(unsigned long data)
+static void ipw2100_irq_tasklet(struct tasklet_struct *t)
{
- struct ipw2100_priv *priv = (struct ipw2100_priv *)data;
+ struct ipw2100_priv *priv = from_tasklet(priv, t, irq_tasklet);
struct net_device *dev = priv->net_dev;
unsigned long flags;
u32 inta, tmp;
@@ -6005,7 +6004,7 @@ static void ipw2100_rf_kill(struct work_struct *work)
spin_unlock_irqrestore(&priv->low_lock, flags);
}
-static void ipw2100_irq_tasklet(unsigned long data);
+static void ipw2100_irq_tasklet(struct tasklet_struct *t);
static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_open = ipw2100_open,
@@ -6135,8 +6134,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
- tasklet_init(&priv->irq_tasklet,
- ipw2100_irq_tasklet, (unsigned long)priv);
+ tasklet_setup(&priv->irq_tasklet, ipw2100_irq_tasklet);
/* NOTE: We do not start the deferred work for status checks yet */
priv->stop_rf_kill = 1;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 129ef2f6248a..ada6ce32c1f1 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -1945,12 +1945,11 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
}
-static void ipw_irq_tasklet(unsigned long data)
+static void ipw_irq_tasklet(struct tasklet_struct *t)
{
- struct ipw_priv *priv = (struct ipw_priv *)data;
+ struct ipw_priv *priv = from_tasklet(priv, t, irq_tasklet);
u32 inta, inta_mask, handled = 0;
unsigned long flags;
- int rc = 0;
spin_lock_irqsave(&priv->irq_lock, flags);
@@ -1980,7 +1979,7 @@ static void ipw_irq_tasklet(unsigned long data)
if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
IPW_DEBUG_HC("Command completed.\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
+ ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
priv->status &= ~STATUS_HCMD_ACTIVE;
wake_up_interruptible(&priv->wait_command_queue);
handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
@@ -1988,25 +1987,25 @@ static void ipw_irq_tasklet(unsigned long data)
if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
IPW_DEBUG_TX("TX_QUEUE_1\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
+ ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
handled |= IPW_INTA_BIT_TX_QUEUE_1;
}
if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
IPW_DEBUG_TX("TX_QUEUE_2\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
+ ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
handled |= IPW_INTA_BIT_TX_QUEUE_2;
}
if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
IPW_DEBUG_TX("TX_QUEUE_3\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
+ ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
handled |= IPW_INTA_BIT_TX_QUEUE_3;
}
if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
IPW_DEBUG_TX("TX_QUEUE_4\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
+ ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
handled |= IPW_INTA_BIT_TX_QUEUE_4;
}
@@ -2999,7 +2998,7 @@ static void ipw_remove_current_network(struct ipw_priv *priv)
spin_unlock_irqrestore(&priv->ieee->lock, flags);
}
-/**
+/*
* Check that card is still alive.
* Reads debug register from domain0.
* If card is present, pre-defined value should
@@ -3114,7 +3113,7 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
mdelay(1);
/* write ucode */
- /**
+ /*
* @bug
* Do NOT set indirect address register once and then
* store data to indirect data register in the loop.
@@ -3667,7 +3666,7 @@ static int ipw_load(struct ipw_priv *priv)
return rc;
}
-/**
+/*
* DMA services
*
* Theory of operation
@@ -3690,11 +3689,11 @@ static int ipw_load(struct ipw_priv *priv)
* we only utilize the first data transmit queue (queue1).
*/
-/**
+/*
* Driver allocates buffers of this size for Rx
*/
-/**
+/*
* ipw_rx_queue_space - Return number of free slots available in queue.
*/
static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
@@ -3725,7 +3724,7 @@ static inline int ipw_queue_inc_wrap(int index, int n_bd)
return (++index == n_bd) ? 0 : index;
}
-/**
+/*
* Initialize common DMA queue structure
*
* @param q queue to init
@@ -3789,7 +3788,7 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
return 0;
}
-/**
+/*
* Free one TFD, those at index [txq->q.last_used].
* Do NOT advance any indexes
*
@@ -3812,7 +3811,7 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
IPW_ERROR("Too many chunks: %i\n",
le32_to_cpu(bd->u.data.num_chunks));
- /** @todo issue fatal error, it is quite serious situation */
+ /* @todo issue fatal error, it is quite serious situation */
return;
}
@@ -3829,7 +3828,7 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
}
}
-/**
+/*
* Deallocate DMA queue.
*
* Empty queue by removing and destroying all BD's.
@@ -3861,7 +3860,7 @@ static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
memset(txq, 0, sizeof(*txq));
}
-/**
+/*
* Destroy all DMA queues and structures
*
* @param priv
@@ -4466,7 +4465,7 @@ static void handle_scan_event(struct ipw_priv *priv)
}
}
-/**
+/*
* Handle host notification packet.
* Called from interrupt routine
*/
@@ -4926,7 +4925,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
}
}
-/**
+/*
* Destroys all DMA structures and initialise them again
*
* @param priv
@@ -4935,7 +4934,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
static int ipw_queue_reset(struct ipw_priv *priv)
{
int rc = 0;
- /** @todo customize queue sizes */
+ /* @todo customize queue sizes */
int nTx = 64, nTxCmd = 8;
ipw_tx_queue_free(priv);
/* Tx CMD queue */
@@ -4991,7 +4990,7 @@ static int ipw_queue_reset(struct ipw_priv *priv)
return rc;
}
-/**
+/*
* Reclaim Tx queue entries no more used by NIC.
*
* When FW advances 'R' index, all entries between old and
@@ -8248,12 +8247,12 @@ static void ipw_rx(struct ipw_priv *priv)
struct ipw_rx_mem_buffer *rxb;
struct ipw_rx_packet *pkt;
struct libipw_hdr_4addr *header;
- u32 r, w, i;
+ u32 r, i;
u8 network_packet;
u8 fill_rx = 0;
r = ipw_read32(priv, IPW_RX_READ_INDEX);
- w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
+ ipw_read32(priv, IPW_RX_WRITE_INDEX);
i = priv->rxq->read;
if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
@@ -8446,7 +8445,7 @@ static void ipw_rx(struct ipw_priv *priv)
#define DEFAULT_SHORT_RETRY_LIMIT 7U
#define DEFAULT_LONG_RETRY_LIMIT 4U
-/**
+/*
* ipw_sw_reset
* @option: options to control different reset behaviour
* 0 = reset everything except the 'disable' module_param
@@ -10673,8 +10672,7 @@ static void ipw_setup_deferred_work(struct ipw_priv *priv)
INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
#endif /* CONFIG_IPW2200_QOS */
- tasklet_init(&priv->irq_tasklet,
- ipw_irq_tasklet, (unsigned long)priv);
+ tasklet_setup(&priv->irq_tasklet, ipw_irq_tasklet);
}
static void shim__set_security(struct net_device *dev,
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
index e1ec1c96dcd8..98fe62737888 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -1382,14 +1382,12 @@ BIT_ARG16(x)
#define IPW_DEBUG(level, fmt, args...) \
do { if (ipw_debug_level & (level)) \
- printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
+ printk(KERN_DEBUG DRV_NAME": %s " fmt, __func__ , ## args); } while (0)
#ifdef CONFIG_IPW2200_DEBUG
#define IPW_LL_DEBUG(level, fmt, args...) \
do { if (ipw_debug_level & (level)) \
- printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
+ printk(KERN_DEBUG DRV_NAME": %s " fmt, __func__ , ## args); } while (0)
#else
#define IPW_LL_DEBUG(level, fmt, args...) do {} while (0)
#endif /* CONFIG_IPW2200_DEBUG */
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
index e87538a8b88b..7964ef7d15f0 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw.h
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -60,8 +60,7 @@
extern u32 libipw_debug_level;
#define LIBIPW_DEBUG(level, fmt, args...) \
do { if (libipw_debug_level & (level)) \
- printk(KERN_DEBUG "libipw: %c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
+ printk(KERN_DEBUG "libipw: %s " fmt, __func__ , ## args); } while (0)
#else
#define LIBIPW_DEBUG(level, fmt, args...) do {} while (0)
#endif /* CONFIG_LIBIPW_DEBUG */
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 9167c3d2711d..4ca8212d4fa4 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -365,7 +365,7 @@ il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
case WLAN_CIPHER_SUITE_WEP104:
tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
+ fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
tx_cmd->sec_ctl |=
TX_CMD_SEC_WEP | (info->control.hw_key->
@@ -807,7 +807,7 @@ il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
wake_up(&il->wait_command_queue);
}
-/**
+/*
* il3945_setup_handlers - Initialize Rx handler callbacks
*
* Setup the RX handlers for each of the reply types sent from the uCode
@@ -907,7 +907,7 @@ il3945_setup_handlers(struct il_priv *il)
*
*/
-/**
+/*
* il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
*/
static inline __le32
@@ -916,7 +916,7 @@ il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
return cpu_to_le32((u32) dma_addr);
}
-/**
+/*
* il3945_rx_queue_restock - refill RX queue from pre-allocated pool
*
* If there are slots in the RX queue that need to be restocked,
@@ -966,7 +966,7 @@ il3945_rx_queue_restock(struct il_priv *il)
}
}
-/**
+/*
* il3945_rx_replenish - Move all used packet from rx_used to rx_free
*
* When moving to rx_free an SKB is allocated for the slot.
@@ -1167,7 +1167,7 @@ il3945_calc_db_from_ratio(int sig_ratio)
return (int)ratio2dB[sig_ratio];
}
-/**
+/*
* il3945_rx_handle - Main entry function for receiving responses from uCode
*
* Uses the il->handlers callback function array to invoke
@@ -1374,9 +1374,9 @@ il3945_dump_nic_error_log(struct il_priv *il)
}
static void
-il3945_irq_tasklet(unsigned long data)
+il3945_irq_tasklet(struct tasklet_struct *t)
{
- struct il_priv *il = (struct il_priv *)data;
+ struct il_priv *il = from_tasklet(il, t, irq_tasklet);
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -1654,7 +1654,7 @@ il3945_dealloc_ucode_pci(struct il_priv *il)
il_free_fw_desc(il->pci_dev, &il->ucode_boot);
}
-/**
+/*
* il3945_verify_inst_full - verify runtime uCode image in card vs. host,
* looking at all data.
*/
@@ -1693,7 +1693,7 @@ il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
return rc;
}
-/**
+/*
* il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
@@ -1730,7 +1730,7 @@ il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
return rc;
}
-/**
+/*
* il3945_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
@@ -1811,7 +1811,7 @@ IL3945_UCODE_GET(init_size);
IL3945_UCODE_GET(init_data_size);
IL3945_UCODE_GET(boot_size);
-/**
+/*
* il3945_read_ucode - Read uCode images from disk file.
*
* Copy into buffers for card to fetch via bus-mastering
@@ -2047,7 +2047,7 @@ error:
return ret;
}
-/**
+/*
* il3945_set_ucode_ptrs - Set uCode address location
*
* Tell initialization uCode where to find runtime uCode.
@@ -2081,7 +2081,7 @@ il3945_set_ucode_ptrs(struct il_priv *il)
return 0;
}
-/**
+/*
* il3945_init_alive_start - Called after N_ALIVE notification received
*
* Called after N_ALIVE notification received from "initialize" uCode.
@@ -2125,7 +2125,7 @@ restart:
queue_work(il->workqueue, &il->restart);
}
-/**
+/*
* il3945_alive_start - called after N_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
* Alive gets handled by il3945_init_alive_start()).
@@ -3399,9 +3399,7 @@ il3945_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
- tasklet_init(&il->irq_tasklet,
- il3945_irq_tasklet,
- (unsigned long)il);
+ tasklet_setup(&il->irq_tasklet, il3945_irq_tasklet);
}
static void
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index 0af9e997c9f6..b2478cbe558e 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -124,7 +124,7 @@ il3945_clear_win(struct il3945_rate_scale_data *win)
win->stamp = 0;
}
-/**
+/*
* il3945_rate_scale_flush_wins - flush out the rate scale wins
*
* Returns the number of wins that have gathered data but were
@@ -229,7 +229,7 @@ il3945_bg_rate_scale_flush(struct timer_list *t)
D_RATE("leave\n");
}
-/**
+/*
* il3945_collect_tx_data - Update the success/failure sliding win
*
* We keep a sliding win of the last 64 packets transmitted
@@ -416,7 +416,7 @@ il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
del_timer_sync(&rs_sta->rate_scale_flush);
}
-/**
+/*
* il3945_rs_tx_status - Update rate control values based on Tx results
*
* NOTE: Uses il_priv->retry_rate for the # of retries attempted by
@@ -584,7 +584,7 @@ il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
return (high << 8) | low;
}
-/**
+/*
* il3945_rs_get_rate - find the rate for the requested packet
*
* Returns the ieee80211_rate structure allocated by the driver.
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index fd63eba47ba2..0597d828bee1 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -90,7 +90,7 @@ il3945_get_prev_ieee_rate(u8 rate_idx)
#define IL_EVT_DISABLE (0)
#define IL_EVT_DISABLE_SIZE (1532/32)
-/**
+/*
* il3945_disable_events - Disable selected events in uCode event log
*
* Disable an event by writing "1"s into "disable"
@@ -261,7 +261,7 @@ il3945_rs_next_rate(struct il_priv *il, int rate)
return next_rate;
}
-/**
+/*
* il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
*
* When FW advances 'R' idx, all entries between old and new 'R' idx
@@ -291,7 +291,7 @@ il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
il_wake_queue(il, txq);
}
-/**
+/*
* il3945_hdl_tx - Handle Tx response
*/
static void
@@ -627,7 +627,7 @@ il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
return 0;
}
-/**
+/*
* il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
*
* Does NOT advance any idxes
@@ -675,7 +675,7 @@ il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
}
}
-/**
+/*
* il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
*
*/
@@ -828,7 +828,7 @@ il3945_tx_reset(struct il_priv *il)
return 0;
}
-/**
+/*
* il3945_txq_ctx_reset - Reset TX queue context
*
* Destroys all DMA structures and initialize them again
@@ -993,7 +993,7 @@ il3945_hw_nic_init(struct il_priv *il)
return 0;
}
-/**
+/*
* il3945_hw_txq_ctx_free - Free TXQ Context
*
* Destroy all TX DMA queues and structures
@@ -1035,7 +1035,7 @@ il3945_hw_txq_ctx_stop(struct il_priv *il)
}
}
-/**
+/*
* il3945_hw_reg_adjust_power_by_temp
* return idx delta into power gain settings table
*/
@@ -1045,7 +1045,7 @@ il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
return (new_reading - old_reading) * (-11) / 100;
}
-/**
+/*
* il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
*/
static inline int
@@ -1060,7 +1060,7 @@ il3945_hw_get_temperature(struct il_priv *il)
return _il_rd(il, CSR_UCODE_DRV_GP2);
}
-/**
+/*
* il3945_hw_reg_txpower_get_temperature
* get the current temperature by reading from NIC
*/
@@ -1096,7 +1096,7 @@ il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
* Both are lower than older versions' 9 degrees */
#define IL_TEMPERATURE_LIMIT_TIMER 6
-/**
+/*
* il3945_is_temp_calib_needed - determines if new calibration is needed
*
* records new temperature in tx_mgr->temperature.
@@ -1315,7 +1315,7 @@ il3945_hw_reg_fix_power_idx(int idx)
/* Kick off thermal recalibration check every 60 seconds */
#define REG_RECALIB_PERIOD (60)
-/**
+/*
* il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
*
* Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
@@ -1372,7 +1372,7 @@ il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
power_gain_table[band_idx][power_idx].dsp_atten;
}
-/**
+/*
* il3945_send_tx_power - fill in Tx Power command with gain settings
*
* Configures power settings for all rates for the current channel,
@@ -1439,7 +1439,7 @@ il3945_send_tx_power(struct il_priv *il)
}
-/**
+/*
* il3945_hw_reg_set_new_power - Configures power tables at new levels
* @ch_info: Channel to update. Uses power_info.requested_power.
*
@@ -1510,7 +1510,7 @@ il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
return 0;
}
-/**
+/*
* il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
*
* NOTE: Returned power limit may be less (but not more) than requested,
@@ -1537,7 +1537,7 @@ il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
return min(max_power, ch_info->max_power_avg);
}
-/**
+/*
* il3945_hw_reg_comp_txpower_temp - Compensate for temperature
*
* Compensate txpower settings of *all* channels for temperature.
@@ -1699,7 +1699,7 @@ il3945_send_rxon_assoc(struct il_priv *il)
return rc;
}
-/**
+/*
* il3945_commit_rxon - commit staging_rxon to hardware
*
* The RXON command in staging_rxon is committed to the hardware and
@@ -1830,7 +1830,7 @@ il3945_commit_rxon(struct il_priv *il)
return 0;
}
-/**
+/*
* il3945_reg_txpower_periodic - called when time to check our temperature.
*
* -- reset periodic timer
@@ -1873,7 +1873,7 @@ out:
mutex_unlock(&il->mutex);
}
-/**
+/*
* il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
*
* This function is used when initializing channel-info structs.
@@ -1912,7 +1912,7 @@ il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
return group_idx;
}
-/**
+/*
* il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
*
* Interpolate to get nominal (i.e. at factory calibration temperature) idx
@@ -2035,7 +2035,7 @@ il3945_hw_reg_init_channel_groups(struct il_priv *il)
}
}
-/**
+/*
* il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
*
* Second pass (during init) to set up il->channel_info
@@ -2305,7 +2305,7 @@ il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
vif->bss_conf.bssid);
}
-/**
+/*
* il3945_init_hw_rate_table - Initialize the hardware rate fallback table
*/
int
@@ -2520,7 +2520,7 @@ il3945_eeprom_release_semaphore(struct il_priv *il)
return;
}
- /**
+ /*
* il3945_load_bsm - Load bootstrap instructions
*
* BSM operation:
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-calib.c b/drivers/net/wireless/intel/iwlegacy/4965-calib.c
index e78bdefb8952..2f97cbd42320 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-calib.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-calib.c
@@ -598,7 +598,7 @@ il4965_find_first_chain(u8 mask)
return CHAIN_C;
}
-/**
+/*
* Run disconnected antenna algorithm to find out which antennas are
* disconnected.
*/
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index e73c223a7d28..28675a4ad861 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -226,7 +226,7 @@ il4965_hw_nic_init(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
*/
static inline __le32
@@ -235,7 +235,7 @@ il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
return cpu_to_le32((u32) (dma_addr >> 8));
}
-/**
+/*
* il4965_rx_queue_restock - refill RX queue from pre-allocated pool
*
* If there are slots in the RX queue that need to be restocked,
@@ -288,7 +288,7 @@ il4965_rx_queue_restock(struct il_priv *il)
}
}
-/**
+/*
* il4965_rx_replenish - Move all used packet from rx_used to rx_free
*
* When moving to rx_free an SKB is allocated for the slot.
@@ -544,7 +544,7 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
break;
}
- /* fall through - if TTAK OK */
+ fallthrough; /* if TTAK OK */
default:
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
@@ -1127,7 +1127,7 @@ il4965_count_chain_bitmap(u32 chain_bitmap)
return res;
}
-/**
+/*
* il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
*
* Selects how many and which Rx receivers/antennas/chains to use.
@@ -1617,7 +1617,7 @@ il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
case WLAN_CIPHER_SUITE_WEP104:
tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
+ fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
tx_cmd->sec_ctl |=
(TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
@@ -1933,7 +1933,7 @@ il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
memset(ptr, 0, sizeof(*ptr));
}
-/**
+/*
* il4965_hw_txq_ctx_free - Free TXQ Context
*
* Destroy all TX DMA queues and structures
@@ -1959,12 +1959,9 @@ il4965_hw_txq_ctx_free(struct il_priv *il)
il_free_txq_mem(il);
}
-/**
+/*
* il4965_txq_ctx_alloc - allocate TX queue context
* Allocate all Tx DMA structures and initialize them
- *
- * @param il
- * @return error code
*/
int
il4965_txq_ctx_alloc(struct il_priv *il)
@@ -2060,7 +2057,7 @@ il4965_txq_ctx_unmap(struct il_priv *il)
il_tx_queue_unmap(il, txq_id);
}
-/**
+/*
* il4965_txq_ctx_stop - Stop all Tx DMA channels
*/
void
@@ -2101,7 +2098,7 @@ il4965_txq_ctx_activate_free(struct il_priv *il)
return -1;
}
-/**
+/*
* il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
*/
static void
@@ -2114,7 +2111,7 @@ il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
(1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}
-/**
+/*
* il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
*/
static int
@@ -2141,7 +2138,7 @@ il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
return 0;
}
-/**
+/*
* il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
*
* NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
@@ -2276,7 +2273,7 @@ il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
return ret;
}
-/**
+/*
* txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
* il->lock must be held by the caller
*/
@@ -2488,7 +2485,7 @@ il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
return nfreed;
}
-/**
+/*
* il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
*
* Go through block-ack's bitmap of ACK'd frames, update driver's record of
@@ -2641,7 +2638,7 @@ il4965_tx_status_to_mac80211(u32 status)
}
}
-/**
+/*
* il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
*/
static int
@@ -2753,7 +2750,7 @@ il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
return 0;
}
-/**
+/*
* il4965_hdl_tx - Handle standard (non-aggregation) Tx response
*/
static void
@@ -2873,7 +2870,7 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
spin_unlock_irqrestore(&il->sta_lock, flags);
}
-/**
+/*
* translate ucode response to mac80211 tx status control values
*/
void
@@ -2897,7 +2894,7 @@ il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
}
-/**
+/*
* il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
*
* Handles block-acknowledge notification from device, which reports success
@@ -3502,7 +3499,7 @@ il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
return ret;
}
-/**
+/*
* il4965_alloc_bcast_station - add broadcast station into driver's station table.
*
* This adds the broadcast station into the driver's station table
@@ -3543,7 +3540,7 @@ il4965_alloc_bcast_station(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_update_bcast_station - update broadcast station's LQ command
*
* Only used by iwl4965. Placed here to have all bcast station management
@@ -3579,7 +3576,7 @@ il4965_update_bcast_stations(struct il_priv *il)
return il4965_update_bcast_station(il);
}
-/**
+/*
* il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
*/
int
@@ -3903,10 +3900,8 @@ il4965_tfd_get_num_tbs(struct il_tfd *tfd)
return tfd->num_tbs & 0x1f;
}
-/**
+/*
* il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @il - driver ilate data
- * @txq - tx queue
*
* Does NOT advance any TFD circular buffer read/write idxes
* Does NOT free the TFD itself (which is within circular buffer)
@@ -4044,7 +4039,7 @@ il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
IL_WARN("uCode did not respond OK.\n");
}
-/**
+/*
* il4965_bg_stats_periodic - Timer callback to queue stats
*
* This callback is provided in order to send a stats request.
@@ -4155,7 +4150,7 @@ il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
wake_up(&il->wait_command_queue);
}
-/**
+/*
* il4965_setup_handlers - Initialize Rx handler callbacks
*
* Setup the RX handlers for each of the reply types sent from the uCode
@@ -4199,7 +4194,7 @@ il4965_setup_handlers(struct il_priv *il)
il->handlers[C_TX] = il4965_hdl_tx;
}
-/**
+/*
* il4965_rx_handle - Main entry function for receiving responses from uCode
*
* Uses the il->handlers callback function array to invoke
@@ -4344,9 +4339,9 @@ il4965_synchronize_irq(struct il_priv *il)
}
static void
-il4965_irq_tasklet(unsigned long data)
+il4965_irq_tasklet(struct tasklet_struct *t)
{
- struct il_priv *il = (struct il_priv *)data;
+ struct il_priv *il = from_tasklet(il, t, irq_tasklet);
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -4756,7 +4751,7 @@ il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
return 0;
}
-/**
+/*
* il4965_ucode_callback - callback when firmware was loaded
*
* If loaded successfully, copies the firmware into buffers
@@ -5259,7 +5254,7 @@ il4965_alive_notify(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_alive_start - called after N_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
* Alive gets handled by il_init_alive_start()).
@@ -6238,9 +6233,7 @@ il4965_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
- tasklet_init(&il->irq_tasklet,
- il4965_irq_tasklet,
- (unsigned long)il);
+ tasklet_setup(&il->irq_tasklet, il4965_irq_tasklet);
}
static void
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index 1f196665d21f..9a491e5db75b 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -142,7 +142,7 @@ il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
}
#endif
-/**
+/*
* The following tables contain the expected throughput metrics for all rates
*
* 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
@@ -393,7 +393,7 @@ il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
return 0;
}
-/**
+/*
* il4965_rs_collect_tx_data - Update the success/failure sliding win
*
* We keep a sliding win of the last 62 packets transmitted
@@ -620,7 +620,7 @@ il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
return 1;
}
-/**
+/*
* Green-field mode is valid if the station supports it and
* there are no non-GF stations present in the BSS.
*/
@@ -631,7 +631,7 @@ il4965_rs_use_green(struct il_priv *il, struct ieee80211_sta *sta)
!il->ht.non_gf_sta_present;
}
-/**
+/*
* il4965_rs_get_supported_rates - get the available rates
*
* if management frame or broadcast frame only return
@@ -2114,7 +2114,7 @@ out:
lq_sta->last_txrate_idx = i;
}
-/**
+/*
* il4965_rs_initialize_lq - Initialize a station's hardware rate table
*
* The uCode's station table contains a table of fallback rates
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index fc8fa5818de7..9fa556486511 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -25,7 +25,7 @@
#include "common.h"
#include "4965.h"
-/**
+/*
* il_verify_inst_sparse - verify runtime uCode image in card vs. host,
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
@@ -57,7 +57,7 @@ il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
return ret;
}
-/**
+/*
* il4965_verify_inst_full - verify runtime uCode image in card vs. host,
* looking at all data.
*/
@@ -96,7 +96,7 @@ il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
return ret;
}
-/**
+/*
* il4965_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
@@ -292,7 +292,7 @@ il4965_verify_bsm(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_load_bsm - Load bootstrap instructions
*
* BSM operation:
@@ -402,7 +402,7 @@ il4965_load_bsm(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_set_ucode_ptrs - Set uCode address location
*
* Tell initialization uCode where to find runtime uCode.
@@ -435,7 +435,7 @@ il4965_set_ucode_ptrs(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_init_alive_start - Called after N_ALIVE notification received
*
* Called after N_ALIVE notification received from "initialize" uCode.
@@ -567,7 +567,7 @@ il4965_math_div_round(s32 num, s32 denom, s32 * res)
return 1;
}
-/**
+/*
* il4965_get_voltage_compensation - Power supply voltage comp for txpower
*
* Determines power supply voltage compensation for txpower calculations.
@@ -654,7 +654,7 @@ il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
}
}
-/**
+/*
* il4965_interpolate_chan - Interpolate factory measurements for one channel
*
* Interpolates factory measurements from the two sample channels within a
@@ -1231,7 +1231,7 @@ il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
return 0;
}
-/**
+/*
* il4965_send_tx_power - Configure the TXPOWER level user limit
*
* Uses the active RXON for channel, band, and characteristics (ht40, high)
@@ -1528,7 +1528,7 @@ il4965_hw_channel_switch(struct il_priv *il,
return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
}
-/**
+/*
* il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
static void
@@ -1553,9 +1553,8 @@ il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
bc_ent;
}
-/**
+/*
* il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
- * @stats: Provides the temperature reading from the uCode
*
* A return of <0 indicates bogus data in the stats
*/
@@ -1619,7 +1618,7 @@ il4965_hw_get_temperature(struct il_priv *il)
/* Adjust Txpower only if temperature variance is greater than threshold. */
#define IL_TEMPERATURE_THRESHOLD 3
-/**
+/*
* il4965_is_temp_calib_needed - determines if new calibration is needed
*
* If the temperature changed has changed sufficiently, then a recalibration
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index f78e062df572..0651a6a416d1 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -685,7 +685,7 @@ il_eeprom_query16(const struct il_priv *il, size_t offset)
}
EXPORT_SYMBOL(il_eeprom_query16);
-/**
+/*
* il_eeprom_init - read EEPROM contents
*
* Load the EEPROM contents from adapter into il->eeprom
@@ -836,7 +836,7 @@ il_init_band_reference(const struct il_priv *il, int eep_band,
#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
? # x " " : "")
-/**
+/*
* il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
*
* Does not set up a command, or touch hardware.
@@ -877,7 +877,7 @@ il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
? # x " " : "")
-/**
+/*
* il_init_channel_map - Set up driver's info for all possible channels
*/
int
@@ -1024,7 +1024,7 @@ il_free_channel_map(struct il_priv *il)
}
EXPORT_SYMBOL(il_free_channel_map);
-/**
+/*
* il_get_channel_info - Find driver's ilate channel info
*
* Based on band and channel number.
@@ -1343,7 +1343,7 @@ il_do_scan_abort(struct il_priv *il)
D_SCAN("Successfully send scan abort\n");
}
-/**
+/*
* il_scan_cancel - Cancel any currently executing HW scan
*/
int
@@ -1355,7 +1355,7 @@ il_scan_cancel(struct il_priv *il)
}
EXPORT_SYMBOL(il_scan_cancel);
-/**
+/*
* il_scan_cancel_timeout - Cancel any currently executing HW scan
* @ms: amount of time to wait (in milliseconds) for scan to abort
*
@@ -1607,10 +1607,9 @@ il_bg_scan_check(struct work_struct *data)
mutex_unlock(&il->mutex);
}
-/**
+/*
* il_fill_probe_req - fill in all required fields and IE for probe request
*/
-
u16
il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
const u8 *ta, const u8 *ies, int ie_len, int left)
@@ -1913,7 +1912,7 @@ done:
return;
}
-/**
+/*
* il_prep_station - Prepare station information for addition
*
* should be called with sta_lock held
@@ -2000,7 +1999,7 @@ EXPORT_SYMBOL_GPL(il_prep_station);
#define STA_WAIT_TIMEOUT (HZ/2)
-/**
+/*
* il_add_station_common -
*/
int
@@ -2060,7 +2059,7 @@ il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
}
EXPORT_SYMBOL(il_add_station_common);
-/**
+/*
* il_sta_ucode_deactivate - deactivate ucode status for a station
*
* il->sta_lock must be held
@@ -2136,7 +2135,7 @@ il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
return ret;
}
-/**
+/*
* il_remove_station - Remove driver's knowledge of station.
*/
int
@@ -2192,7 +2191,7 @@ out_err:
}
EXPORT_SYMBOL_GPL(il_remove_station);
-/**
+/*
* il_clear_ucode_stations - clear ucode station table bits
*
* This function clears all the bits in the driver indicating
@@ -2224,7 +2223,7 @@ il_clear_ucode_stations(struct il_priv *il)
}
EXPORT_SYMBOL(il_clear_ucode_stations);
-/**
+/*
* il_restore_stations() - Restore driver known stations to device
*
* All stations considered active by driver, but not present in ucode, is
@@ -2356,7 +2355,7 @@ il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
}
#endif
-/**
+/*
* il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
*
* It sometimes happens when a HT rate has been in use and we
@@ -2385,7 +2384,7 @@ il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
return true;
}
-/**
+/*
* il_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
* after station has been added.
@@ -2531,7 +2530,7 @@ EXPORT_SYMBOL(il_mac_sta_remove);
*
*/
-/**
+/*
* il_rx_queue_space - Return number of free slots available in queue.
*/
int
@@ -2548,7 +2547,7 @@ il_rx_queue_space(const struct il_rx_queue *q)
}
EXPORT_SYMBOL(il_rx_queue_space);
-/**
+/*
* il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
*/
void
@@ -2677,7 +2676,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_BAD_KEY_TTAK)
break;
- /* fall through */
+ fallthrough;
case RX_RES_STATUS_SEC_TYPE_WEP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
@@ -2687,7 +2686,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
D_RX("Packet destroyed\n");
return -1;
}
- /* fall through */
+ fallthrough;
case RX_RES_STATUS_SEC_TYPE_CCMP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_DECRYPT_OK) {
@@ -2703,7 +2702,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
}
EXPORT_SYMBOL(il_set_decrypted_flag);
-/**
+/*
* il_txq_update_write_ptr - Send new write idx to hardware
*/
void
@@ -2743,7 +2742,7 @@ il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
}
EXPORT_SYMBOL(il_txq_update_write_ptr);
-/**
+/*
* il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
*/
void
@@ -2762,7 +2761,7 @@ il_tx_queue_unmap(struct il_priv *il, int txq_id)
}
EXPORT_SYMBOL(il_tx_queue_unmap);
-/**
+/*
* il_tx_queue_free - Deallocate DMA queue.
* @txq: Transmit queue to deallocate.
*
@@ -2805,7 +2804,7 @@ il_tx_queue_free(struct il_priv *il, int txq_id)
}
EXPORT_SYMBOL(il_tx_queue_free);
-/**
+/*
* il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
*/
void
@@ -2843,9 +2842,8 @@ il_cmd_queue_unmap(struct il_priv *il)
}
EXPORT_SYMBOL(il_cmd_queue_unmap);
-/**
+/*
* il_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
*
* Empty queue by removing and destroying all BD's.
* Free all buffers.
@@ -2924,7 +2922,7 @@ il_queue_space(const struct il_queue *q)
EXPORT_SYMBOL(il_queue_space);
-/**
+/*
* il_queue_init - Initialize queue's high/low-water and read/write idxes
*/
static int
@@ -2958,7 +2956,7 @@ il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
return 0;
}
-/**
+/*
* il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
*/
static int
@@ -2998,7 +2996,7 @@ error:
return -ENOMEM;
}
-/**
+/*
* il_tx_queue_init - Allocate and initialize one tx/cmd queue
*/
int
@@ -3105,7 +3103,7 @@ EXPORT_SYMBOL(il_tx_queue_reset);
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-/**
+/*
* il_enqueue_hcmd - enqueue a uCode command
* @il: device ilate data point
* @cmd: a point to the ucode command structure
@@ -3123,7 +3121,6 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
struct il_cmd_meta *out_meta;
dma_addr_t phys_addr;
unsigned long flags;
- int len;
u32 idx;
u16 fix_size;
@@ -3182,9 +3179,6 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
if (cmd->flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
- len = sizeof(struct il_device_cmd);
- if (idx == TFD_CMD_SLOTS)
- len = IL_MAX_CMD_SIZE;
#ifdef CONFIG_IWLEGACY_DEBUG
switch (out_cmd->hdr.cmd) {
@@ -3233,7 +3227,7 @@ out:
return idx;
}
-/**
+/*
* il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
*
* When FW advances 'R' idx, all entries between old and new 'R' idx
@@ -3266,7 +3260,7 @@ il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
}
}
-/**
+/*
* il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
* @rxb: Rx buffer to reclaim
*
@@ -3417,7 +3411,7 @@ il_init_ht_hw_capab(const struct il_priv *il,
}
}
-/**
+/*
* il_init_geos - Initialize mac80211's geo/channel info based from eeprom
*/
int
@@ -3763,7 +3757,7 @@ il_check_rxon_cmd(struct il_priv *il)
}
EXPORT_SYMBOL(il_check_rxon_cmd);
-/**
+/*
* il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
* @il: staging_rxon is compared to active_rxon
*
@@ -3943,7 +3937,7 @@ il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
}
EXPORT_SYMBOL(il_get_single_channel_number);
-/**
+/*
* il_set_rxon_channel - Set the band and channel values in staging RXON
* @ch: requested channel as a pointer to struct ieee80211_channel
@@ -4146,7 +4140,7 @@ il_print_rx_config_cmd(struct il_priv *il)
}
EXPORT_SYMBOL(il_print_rx_config_cmd);
#endif
-/**
+/*
* il_irq_handle_error - called for HW or SW error interrupt from card
*/
void
@@ -5011,7 +5005,7 @@ il_update_qos(struct il_priv *il)
&il->qos_data.def_qos_parm, NULL);
}
-/**
+/*
* il_mac_config - mac80211 config callback
*/
int
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index bc9cd7e5ccb8..ea1b1bb7ddcb 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -2925,8 +2925,8 @@ do { \
#define IL_DBG(level, fmt, args...) \
do { \
if (il_get_debug_level(il) & level) \
- dev_err(&il->hw->wiphy->dev, "%c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __func__ , ##args); \
+ dev_err(&il->hw->wiphy->dev, "%s " fmt, __func__, \
+ ##args); \
} while (0)
#define il_print_hex_dump(il, level, p, len) \
diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c
index 4f741b305d96..d998a3f1b056 100644
--- a/drivers/net/wireless/intel/iwlegacy/debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/debug.c
@@ -1364,9 +1364,8 @@ il_dbgfs_register(struct il_priv *il, const char *name)
}
EXPORT_SYMBOL(il_dbgfs_register);
-/**
+/*
* Remove the debugfs files and directories
- *
*/
void
il_dbgfs_unregister(struct il_priv *il)
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index fbcd1405aeea..14b0db28143b 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -13,9 +13,10 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
+iwlwifi-objs += queue/tx.o
iwlwifi-objs += fw/img.o fw/notif-wait.o
-iwlwifi-objs += fw/dbg.o
+iwlwifi-objs += fw/dbg.o fw/pnvm.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index efe427049a6e..d2bbe6a73514 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -57,7 +57,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 56
+#define IWL_22000_UCODE_API_MAX 59
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -89,6 +89,9 @@
#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
+#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-"
+#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-"
+#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-"
#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
@@ -118,6 +121,12 @@
IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \
IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_SNJ_A_HR_B_MODULE_FIRMWARE(api) \
+ IWL_SNJ_A_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \
+ IWL_MA_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
+ IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -244,7 +253,7 @@ const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
.integrated = true,
- .xtal_latency = 5000,
+ .xtal_latency = 500,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
};
@@ -335,14 +344,32 @@ const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = {
.bisr_workaround = 1,
};
+const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_AX210,
+ .base_params = &iwl_ax210_base_params,
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .integrated = true,
+ .umac_prph_offset = 0x300000
+};
+
+const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
-const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
+const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz";
+const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz";
+const char iwl_ma_name[] = "Intel(R) Wi-Fi 6";
const char iwl_ax200_killer_1650w_name[] =
"Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
const char iwl_ax200_killer_1650x_name[] =
"Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)";
+const char iwl_ax201_killer_1650s_name[] =
+ "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)";
+const char iwl_ax201_killer_1650i_name[] =
+ "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)";
const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
@@ -539,7 +566,7 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
+ .name = iwl_ax211_name,
.fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@@ -547,7 +574,7 @@ const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = {
- .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
+ .name = iwl_ax211_name,
.fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@@ -565,7 +592,7 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
+ .name = iwl_ax411_name,
.fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@@ -573,7 +600,7 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
- .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
+ .name = iwl_ax411_name,
.fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@@ -583,7 +610,7 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
};
const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
+ .name = iwl_ax411_name,
.fw_name_pre = IWL_SNJ_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@@ -591,13 +618,35 @@ const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
};
const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
+ .name = iwl_ax211_name,
.fw_name_pre = IWL_SNJ_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwlax201_cfg_snj_hr_b0 = {
+ .name = iwl_ax201_name,
+ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_ma_a0_gf_a0 = {
+ .fw_name_pre = IWL_MA_A_GF_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
+ .fw_name_pre = IWL_MA_A_MR_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
@@ -612,3 +661,6 @@ MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index f84b8e5d3f0b..be4acf4a0e32 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -180,7 +180,16 @@ const struct iwl_cfg_trans_params iwl9560_trans_cfg = {
.mq_rx_supported = true,
.rf_id = true,
.integrated = true,
- .xtal_latency = 5000,
+ .xtal_latency = 650,
+};
+
+const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_9000,
+ .base_params = &iwl9000_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
+ .integrated = true,
+ .xtal_latency = 2820,
};
const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg = {
@@ -189,7 +198,7 @@ const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg = {
.mq_rx_supported = true,
.rf_id = true,
.integrated = true,
- .xtal_latency = 5000,
+ .xtal_latency = 670,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
index 588b15697710..974e1c324ca7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
@@ -761,7 +761,7 @@ static inline u8 find_first_chain(u8 mask)
return CHAIN_C;
}
-/**
+/*
* Run disconnected antenna algorithm to find out which antennas are
* disconnected.
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index d42bc46fe566..c3e25885d194 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -58,8 +58,8 @@ static void iwl1000_nic_config(struct iwl_priv *priv)
/**
* iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
+ * @priv: pointer to iwl_priv data structure
+ * @tsf_bits: number of bits need to shift for masking)
*/
static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
u16 tsf_bits)
@@ -69,8 +69,8 @@ static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
/**
* iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
+ * @priv: pointer to iwl_priv data structure
+ * @tsf_bits: number of bits need to shift for masking)
*/
static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
u16 tsf_bits)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index eab94d2f46b1..3b937a7dd403 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -110,7 +110,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
vif->bss_conf.bssid);
}
-/**
+/*
* iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
*
* pre-requirements:
@@ -769,7 +769,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
return res;
}
-/**
+/*
* iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
*
* Selects how many and which Rx receivers/antennas/chains to use.
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index b882705ff66d..461af5831156 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -374,7 +374,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
&statistics_cmd);
}
-/**
+/*
* iwl_bg_statistics_periodic - Timer callback to queue statistics
*
* This callback is provided in order to send a statistics request.
@@ -533,7 +533,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
priv->event_log.next_entry = next_entry;
}
-/**
+/*
* iwl_bg_ucode_trace - Timer callback to log ucode event
*
* The timer is continually set to execute every
@@ -762,7 +762,7 @@ static void iwl_send_bt_config(struct iwl_priv *priv)
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
-/**
+/*
* iwl_alive_start - called after REPLY_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
* Alive gets handled by iwl_init_alive_start()).
@@ -1682,9 +1682,8 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
#define EVENT_START_OFFSET (4 * sizeof(u32))
-/**
+/*
* iwl_print_event_log - Dump error event log to syslog
- *
*/
static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
u32 num_events, u32 mode,
@@ -1762,7 +1761,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
return pos;
}
-/**
+/*
* iwl_print_last_event_logs - Dump the newest # of event log to syslog
*/
static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index 4fa4eab2d7f3..548540dd0c0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -151,7 +151,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
{}
#endif
-/**
+/*
* The following tables contain the expected throughput metrics for all rates
*
* 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
@@ -318,7 +318,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
}
#ifdef CONFIG_MAC80211_DEBUGFS
-/**
+/*
* Program the device to use fixed rate for frame transmit
* This is for debugging/testing only
* once the device start use fixed rate, we need to reload the module
@@ -440,7 +440,7 @@ static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
return 0;
}
-/**
+/*
* rs_collect_tx_data - Update the success/failure sliding window
*
* We keep a sliding window of the last 62 packets transmitted
@@ -673,7 +673,7 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
return 1;
}
-/**
+/*
* Green-field mode is valid if the station supports it and
* there are no non-GF stations present in the BSS.
*/
@@ -689,7 +689,7 @@ static bool rs_use_green(struct ieee80211_sta *sta)
return false;
}
-/**
+/*
* rs_get_supported_rates - get the available rates
*
* if management frame or broadcast frame only return
@@ -2612,7 +2612,7 @@ out:
lq_sta->last_txrate_idx = index;
}
-/**
+/*
* rs_initialize_lq - Initialize a station's hardware rate table
*
* The uCode's station table contains a table of fallback rates
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 673d60784bfa..9d55ece05020 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -132,7 +132,7 @@ static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
}
-/**
+/*
* iwl_good_plcp_health - checks for plcp error.
*
* When the plcp error is exceeding the thresholds, reset the radio
@@ -929,7 +929,7 @@ static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
kfree_rcu(old_data, rcu_head);
}
-/**
+/*
* iwl_setup_rx_handlers - Initialize Rx handler callbacks
*
* Setup the RX handlers for each of the reply types sent from the uCode
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 6f37c9fac31d..12a3d464ae64 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -689,7 +689,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
_iwl_set_rxon_ht(priv, ht_conf, ctx);
}
-/**
+/*
* iwl_set_rxon_channel - Set the band and channel values in staging RXON
* @ch: requested channel as a pointer to struct ieee80211_channel
@@ -826,7 +826,7 @@ static int iwl_check_rxon_cmd(struct iwl_priv *priv,
return errors ? -EINVAL : 0;
}
-/**
+/*
* iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
* @priv: staging_rxon is compared to active_rxon
*
@@ -1007,7 +1007,7 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv,
ctx->staging.ofdm_basic_rates = ofdm;
}
-/**
+/*
* iwlagn_commit_rxon - commit staging_rxon to hardware
*
* The RXON command in staging_rxon is committed to the hardware and
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index 1d8590046ff7..832fcbb787e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -186,7 +186,7 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
}
-/**
+/*
* iwl_scan_cancel - Cancel any currently executing HW scan
*/
int iwl_scan_cancel(struct iwl_priv *priv)
@@ -196,10 +196,9 @@ int iwl_scan_cancel(struct iwl_priv *priv)
return 0;
}
-/**
+/*
* iwl_scan_cancel_timeout - Cancel any currently executing HW scan
* @ms: amount of time to wait (in milliseconds) for scan to abort
- *
*/
void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
{
@@ -560,10 +559,9 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
return added;
}
-/**
+/*
* iwl_fill_probe_req - fill in all required fields and IE for probe request
*/
-
static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
const u8 *ies, int ie_len, const u8 *ssid,
u8 ssid_len, int left)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index 51158edce15b..e622948661fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -234,7 +234,7 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
priv->stations[index].sta.station_flags |= flags;
}
-/**
+/*
* iwl_prep_station - Prepare station information for addition
*
* should be called with sta_lock held
@@ -323,7 +323,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
#define STA_WAIT_TIMEOUT (HZ/2)
-/**
+/*
* iwl_add_station_common -
*/
int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
@@ -383,7 +383,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
return ret;
}
-/**
+/*
* iwl_sta_ucode_deactivate - deactivate ucode status for a station
*/
static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
@@ -451,7 +451,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
return ret;
}
-/**
+/*
* iwl_remove_station - Remove driver's knowledge of station.
*/
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
@@ -601,7 +601,7 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
link_cmd->sta_id = sta_id;
}
-/**
+/*
* iwl_clear_ucode_stations - clear ucode station table bits
*
* This function clears all the bits in the driver indicating
@@ -636,7 +636,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
"No active stations found to be cleared\n");
}
-/**
+/*
* iwl_restore_stations() - Restore driver known stations to device
*
* All stations considered active by driver, but not present in ucode, is
@@ -773,7 +773,7 @@ static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
}
#endif
-/**
+/*
* is_lq_table_valid() - Test one aspect of LQ cmd for validity
*
* It sometimes happens when a HT rate has been in use and we
@@ -807,7 +807,7 @@ static bool is_lq_table_valid(struct iwl_priv *priv,
return true;
}
-/**
+/*
* iwl_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
* after station has been added.
@@ -1258,7 +1258,7 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
return ret;
}
-/**
+/*
* iwlagn_alloc_bcast_station - add broadcast station into driver's station table.
*
* This adds the broadcast station into the driver's station table
@@ -1298,7 +1298,7 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
return 0;
}
-/**
+/*
* iwl_update_bcast_station - update broadcast station's LQ command
*
* Only used by iwlagn. Placed here to have all bcast station management
@@ -1341,7 +1341,7 @@ int iwl_update_bcast_stations(struct iwl_priv *priv)
return ret;
}
-/**
+/*
* iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
*/
int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index fd454836adbe..e3962bb52328 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -803,7 +803,7 @@ static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
rcu_read_unlock();
}
-/**
+/*
* translate ucode response to mac80211 tx status control values
*/
static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
@@ -1256,7 +1256,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
}
}
-/**
+/*
* iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
*
* Handles block-acknowledge notification from device, which reports success
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index dc769b580431..3e5a35e26ad3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -118,8 +118,8 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
* method (DSM) interface. The returned acpi object must be freed by calling
* function.
*/
-void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
- union acpi_object *args)
+static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
+ union acpi_object *args)
{
union acpi_object *obj;
@@ -400,9 +400,9 @@ out_free:
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
-int iwl_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled)
+static int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled)
{
int i;
@@ -418,18 +418,13 @@ int iwl_sar_set_profile(union acpi_object *table,
return 0;
}
-IWL_EXPORT_SYMBOL(iwl_sar_set_profile);
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
- int prof_a, int prof_b)
+static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_subbands,
+ int prof_a, int prof_b)
{
- int i, j, idx;
int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
-
- BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
- BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
- ACPI_SAR_TABLE_SIZE);
+ int i, j, idx;
for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
struct iwl_sar_profile *prof;
@@ -461,9 +456,9 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
"SAR EWRD: chain %d profile index %d\n",
i, profs[i]);
IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
- for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
- idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
- per_chain_restriction[i][j] =
+ for (j = 0; j < n_subbands; j++) {
+ idx = i * ACPI_SAR_NUM_SUB_BANDS + j;
+ per_chain[i * n_subbands + j] =
cpu_to_le16(prof->table[idx]);
IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
j, prof->table[idx]);
@@ -472,6 +467,23 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
return 0;
}
+
+int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n_tables; i++) {
+ ret = iwl_sar_fill_table(fwrt,
+ &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAIN_LIMITS],
+ n_subbands, prof_a, prof_b);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
@@ -632,25 +644,8 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
-int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
- struct iwl_host_cmd *cmd)
-{
- struct iwl_geo_tx_power_profiles_resp *resp;
- int ret;
-
- resp = (void *)cmd->resp_pkt->data;
- ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
- ret = -EIO;
- IWL_WARN(fwrt, "Invalid geographic profile idx (%d)\n", ret);
- }
-
- return ret;
-}
-IWL_EXPORT_SYMBOL(iwl_validate_sar_geo_profile);
-
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset_group *table)
+ struct iwl_per_chain_offset *table, u32 n_bands)
{
int ret, i, j;
@@ -666,23 +661,26 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
return -ENOENT;
}
- BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
- ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
-
- BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
-
for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
- struct iwl_per_chain_offset *chain =
- (struct iwl_per_chain_offset *)&table[i];
-
- for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
+ for (j = 0; j < n_bands; j++) {
+ struct iwl_per_chain_offset *chain =
+ &table[i * n_bands + j];
u8 *value;
+ if (j * ACPI_GEO_PER_CHAIN_SIZE >=
+ ARRAY_SIZE(fwrt->geo_profiles[0].values))
+ /*
+ * Currently we only store lb an hb values, and
+ * don't have any special ones for uhb. So leave
+ * those empty for the time being
+ */
+ break;
+
value = &fwrt->geo_profiles[i].values[j *
ACPI_GEO_PER_CHAIN_SIZE];
- chain[j].max_tx_power = cpu_to_le16(value[0]);
- chain[j].chain_a = value[1];
- chain[j].chain_b = value[2];
+ chain->max_tx_power = cpu_to_le16(value[0]);
+ chain->chain_a = value[1];
+ chain->chain_b = value[2];
IWL_DEBUG_RADIO(fwrt,
"SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
i, j, value[1], value[2], value[0]);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 0ada9eddb8b1..bddf8a44e163 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -89,6 +89,7 @@
#define ACPI_SAR_NUM_CHAIN_LIMITS 2
#define ACPI_SAR_NUM_SUB_BANDS 5
+#define ACPI_SAR_NUM_TABLES 1
#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2)
#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \
@@ -104,13 +105,12 @@
#define APCI_WTAS_BLACK_LIST_MAX 16
#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
-#define ACPI_WGDS_NUM_BANDS 2
#define ACPI_WGDS_TABLE_SIZE 3
-#define ACPI_PPAG_NUM_CHAINS 2
-#define ACPI_PPAG_NUM_SUB_BANDS 5
-#define ACPI_PPAG_WIFI_DATA_SIZE ((ACPI_PPAG_NUM_CHAINS * \
- ACPI_PPAG_NUM_SUB_BANDS) + 3)
+#define ACPI_PPAG_WIFI_DATA_SIZE ((IWL_NUM_CHAIN_LIMITS * \
+ IWL_NUM_SUB_BANDS) + 3)
+#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
+ IWL_NUM_SUB_BANDS_V2) + 3)
/* PPAG gain value bounds in 1/8 dBm */
#define ACPI_PPAG_MIN_LB -16
@@ -133,15 +133,26 @@ enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
};
+enum iwl_dsm_values_srd {
+ DSM_VALUE_SRD_ACTIVE,
+ DSM_VALUE_SRD_PASSIVE,
+ DSM_VALUE_SRD_DISABLE,
+ DSM_VALUE_SRD_MAX
+};
+
+enum iwl_dsm_values_indonesia {
+ DSM_VALUE_INDONESIA_DISABLE,
+ DSM_VALUE_INDONESIA_ENABLE,
+ DSM_VALUE_INDONESIA_RESERVED,
+ DSM_VALUE_INDONESIA_MAX
+};
+
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
-void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
- union acpi_object *args);
-
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
@@ -171,12 +182,8 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev);
*/
int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
-int iwl_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled);
-
int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
int prof_a, int prof_b);
int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt);
@@ -187,11 +194,8 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
-int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
- struct iwl_host_cmd *cmd);
-
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset_group *table);
+ struct iwl_per_chain_offset *table, u32 n_bands);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *black_list_array,
int *black_list_size);
@@ -237,15 +241,8 @@ static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
return -ENOENT;
}
-static inline int iwl_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled)
-{
- return -ENOENT;
-}
-
static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
int prof_a, int prof_b)
{
return -ENOENT;
@@ -271,18 +268,6 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
return false;
}
-static inline int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
- struct iwl_host_cmd *cmd)
-{
- return -ENOENT;
-}
-
-static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset_group *table)
-{
- return -ENOENT;
-}
-
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
__le32 *black_list_array,
int *black_list_size)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index df1bd0d2450e..a1cac47395bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +30,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -129,19 +128,31 @@ struct iwl_umac_alive {
struct iwl_umac_debug_addrs dbg_ptrs;
} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
-struct mvm_alive_resp_v3 {
+struct iwl_sku_id {
+ __le32 data[3];
+} __packed; /* SKU_ID_API_S_VER_1 */
+
+struct iwl_alive_ntf_v3 {
__le16 status;
__le16 flags;
struct iwl_lmac_alive lmac_data;
struct iwl_umac_alive umac_data;
-} __packed; /* ALIVE_RES_API_S_VER_3 */
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+
+struct iwl_alive_ntf_v4 {
+ __le16 status;
+ __le16 flags;
+ struct iwl_lmac_alive lmac_data[2];
+ struct iwl_umac_alive umac_data;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_4 */
-struct mvm_alive_resp {
+struct iwl_alive_ntf_v5 {
__le16 status;
__le16 flags;
struct iwl_lmac_alive lmac_data[2];
struct iwl_umac_alive umac_data;
-} __packed; /* ALIVE_RES_API_S_VER_4 */
+ struct iwl_sku_id sku_id;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_5 */
/**
* enum iwl_extended_cfg_flag - commands driver may send before
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
index 570f19026c91..6cb22a9a9380 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
@@ -27,7 +27,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
@@ -59,10 +59,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-
#ifndef __iwl_fw_api_binding_h__
#define __iwl_fw_api_binding_h__
+#include <fw/file.h>
+#include <fw/img.h>
+
#define MAX_MACS_IN_BINDING (3)
#define MAX_BINDINGS (4)
@@ -112,6 +114,14 @@ struct iwl_binding_cmd {
#define IWL_LMAC_24G_INDEX 0
#define IWL_LMAC_5G_INDEX 1
+static inline u32 iwl_mvm_get_lmac_id(const struct iwl_fw *fw,
+ enum nl80211_band band){
+ if (!fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT) ||
+ band == NL80211_BAND_2GHZ)
+ return IWL_LMAC_24G_INDEX;
+ return IWL_LMAC_5G_INDEX;
+}
+
/* The maximal number of fragments in the FW's schedule session */
#define IWL_MVM_MAX_QUOTA 128
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 4f46f3ed8794..8cc36dbb2311 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -104,11 +104,12 @@ enum iwl_mvm_command_groups {
*/
enum iwl_legacy_cmds {
/**
- * @MVM_ALIVE:
+ * @UCODE_ALIVE_NTFY:
* Alive data from the firmware, as described in
- * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp.
+ * &struct iwl_alive_ntf_v3 or &struct iwl_alive_ntf_v4 or
+ * &struct iwl_alive_ntf_v5.
*/
- MVM_ALIVE = 0x1,
+ UCODE_ALIVE_NTFY = 0x1,
/**
* @REPLY_ERROR: Cause an error in the firmware, for testing purposes.
@@ -410,7 +411,8 @@ enum iwl_legacy_cmds {
* one of &struct iwl_statistics_cmd,
* &struct iwl_notif_statistics_v11,
* &struct iwl_notif_statistics_v10,
- * &struct iwl_notif_statistics
+ * &struct iwl_notif_statistics,
+ * &struct iwl_statistics_operational_ntfy
*/
STATISTICS_CMD = 0x9c,
@@ -418,7 +420,8 @@ enum iwl_legacy_cmds {
* @STATISTICS_NOTIFICATION:
* one of &struct iwl_notif_statistics_v10,
* &struct iwl_notif_statistics_v11,
- * &struct iwl_notif_statistics
+ * &struct iwl_notif_statistic,
+ * &struct iwl_statistics_operational_ntfy
*/
STATISTICS_NOTIFICATION = 0x9d,
@@ -431,8 +434,7 @@ enum iwl_legacy_cmds {
/**
* @REDUCE_TX_POWER_CMD:
- * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd_v4
- * or &struct iwl_dev_tx_power_cmd
+ * &struct iwl_dev_tx_power_cmd
*/
REDUCE_TX_POWER_CMD = 0x9f,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index c4562e1f8d18..5db301a6a312 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -451,10 +451,15 @@ union iwl_all_tsc_rsc {
struct iwl_aes_rsc_tsc aes;
}; /* ALL_TSC_RSC_API_S_VER_2 */
-struct iwl_wowlan_rsc_tsc_params_cmd {
+struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 {
union iwl_all_tsc_rsc all_tsc_rsc;
} __packed; /* ALL_TSC_RSC_API_S_VER_2 */
+struct iwl_wowlan_rsc_tsc_params_cmd {
+ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 params;
+ __le32 sta_id;
+} __packed; /* ALL_TSC_RSC_API_S_VER_4 */
+
#define IWL_MIC_KEY_SIZE 8
struct iwl_mic_keys {
u8 tx[IWL_MIC_KEY_SIZE];
@@ -469,17 +474,26 @@ struct iwl_p1k_cache {
#define IWL_NUM_RX_P1K_CACHE 2
-struct iwl_wowlan_tkip_params_cmd {
+struct iwl_wowlan_tkip_params_cmd_ver_1 {
struct iwl_mic_keys mic_keys;
struct iwl_p1k_cache tx;
struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */
+struct iwl_wowlan_tkip_params_cmd {
+ struct iwl_mic_keys mic_keys;
+ struct iwl_p1k_cache tx;
+ struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
+ struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
+ u8 reversed[2];
+ __le32 sta_id;
+} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_2 */
+
#define IWL_KCK_MAX_SIZE 32
#define IWL_KEK_MAX_SIZE 32
-struct iwl_wowlan_kek_kck_material_cmd {
+struct iwl_wowlan_kek_kck_material_cmd_v2 {
u8 kck[IWL_KCK_MAX_SIZE];
u8 kek[IWL_KEK_MAX_SIZE];
__le16 kck_len;
@@ -487,6 +501,18 @@ struct iwl_wowlan_kek_kck_material_cmd {
__le64 replay_ctr;
} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */
+struct iwl_wowlan_kek_kck_material_cmd_v3 {
+ u8 kck[IWL_KCK_MAX_SIZE];
+ u8 kek[IWL_KEK_MAX_SIZE];
+ __le16 kck_len;
+ __le16 kek_len;
+ __le64 replay_ctr;
+ __le32 akm;
+ __le32 gtk_cipher;
+ __le32 igtk_cipher;
+ __le32 bigtk_cipher;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_3 */
+
#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
enum iwl_wowlan_rekey_status {
@@ -525,7 +551,7 @@ struct iwl_wowlan_gtk_status_v1 {
u8 reserved[3];
u8 decrypt_key[16];
u8 tkip_mic_key[8];
- struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
#define WOWLAN_KEY_MAX_SIZE 32
@@ -550,7 +576,7 @@ struct iwl_wowlan_gtk_status {
u8 key_flags;
u8 reserved[2];
u8 tkip_mic_key[8];
- struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */
#define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1))
@@ -635,7 +661,7 @@ struct iwl_wowlan_status_v7 {
} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
/**
- * struct iwl_wowlan_status - WoWLAN status
+ * struct iwl_wowlan_status_v9 - WoWLAN status (version 9)
* @gtk: GTK data
* @igtk: IGTK data
* @replay_ctr: GTK rekey replay counter
@@ -653,7 +679,7 @@ struct iwl_wowlan_status_v7 {
* @reserved: unused
* @wake_packet: wakeup packet
*/
-struct iwl_wowlan_status {
+struct iwl_wowlan_status_v9 {
struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
@@ -671,6 +697,44 @@ struct iwl_wowlan_status {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_9 */
+/**
+ * struct iwl_wowlan_status - WoWLAN status
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @bigtk: BIGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @tid_tear_down: bitmap of TIDs torn down
+ * @reserved: reserved
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status {
+ struct iwl_wowlan_gtk_status gtk[1];
+ struct iwl_wowlan_igtk_status igtk[1];
+ struct iwl_wowlan_igtk_status bigtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ u8 tid_tear_down;
+ u8 reserved[3];
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_11 */
+
static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
{
return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 74ac65bd545a..95ada51d3f9e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -135,6 +135,25 @@ struct iwl_fw_ini_region_err_table {
} __packed; /* FW_TLV_DEBUG_REGION_ERROR_TABLE_API_S_VER_1 */
/**
+ * struct iwl_fw_ini_region_special_device_memory - special device memory
+ *
+ * Configuration to read a special memory
+ *
+ * @type: type of the special memory
+ * @version: version of the special memory
+ * @base_addr: base address of the error table
+ * @size: size of the error table
+ * @offset: offset to add to &base_addr
+ */
+struct iwl_fw_ini_region_special_device_memory {
+ __le16 type;
+ __le16 version;
+ __le32 base_addr;
+ __le32 size;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_REGION_SPECIAL_DEVICE_ADDR_API_S_VER_1 */
+
+/**
* struct iwl_fw_ini_region_internal_buffer - internal buffer region data
*
* Configuration to read internal monitor buffer
@@ -185,6 +204,7 @@ struct iwl_fw_ini_region_tlv {
struct iwl_fw_ini_region_fifos fifos;
struct iwl_fw_ini_region_err_table err_table;
struct iwl_fw_ini_region_internal_buffer internal_buffer;
+ struct iwl_fw_ini_region_special_device_memory special_mem;
__le32 dram_alloc_id;
__le32 tlv_mask;
}; /* FW_TLV_DEBUG_REGION_CONF_PARAMS_API_U_VER_1 */
@@ -281,6 +301,7 @@ struct iwl_fw_ini_hcmd_tlv {
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
+ * @IWL_FW_INI_ALLOCATION_ID_INTERNAL: allocation meant for Intreanl SMEM in D3
* @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids
*/
enum iwl_fw_ini_allocation_id {
@@ -288,6 +309,7 @@ enum iwl_fw_ini_allocation_id {
IWL_FW_INI_ALLOCATION_ID_DBGC1,
IWL_FW_INI_ALLOCATION_ID_DBGC2,
IWL_FW_INI_ALLOCATION_ID_DBGC3,
+ IWL_FW_INI_ALLOCATION_ID_INTERNAL,
IWL_FW_INI_ALLOCATION_NUM,
}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */
@@ -327,6 +349,7 @@ enum iwl_fw_ini_buffer_location {
* @IWL_FW_INI_REGION_CSR: CSR registers
* @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
* @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config
+ * @IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY: special device memory
* @IWL_FW_INI_REGION_NUM: number of region types
*/
enum iwl_fw_ini_region_type {
@@ -347,6 +370,7 @@ enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_CSR,
IWL_FW_INI_REGION_DRAM_IMR,
IWL_FW_INI_REGION_PCI_IOSF_CONFIG,
+ IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY,
IWL_FW_INI_REGION_NUM
}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
@@ -362,13 +386,13 @@ enum iwl_fw_ini_region_type {
* @IWL_FW_INI_TIME_POINT_FW_ASSERT: FW assert
* @IWL_FW_INI_TIME_POINT_FW_HW_ERROR: FW HW error
* @IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG: TFD queue hang
- * @IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFOCATION: DHC cmd response and notif
+ * @IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: DHC cmd response and notif
* @IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF: FW response or notification.
* data field holds id and group
* @IWL_FW_INI_TIME_POINT_USER_TRIGGER: user trigger time point
* @IWL_FW_INI_TIME_POINT_PERIODIC: periodic timepoint that fires in constant
* intervals. data field holds the interval time in msec
- * @IWL_FW_INI_TIME_POINT_WDG_TIMEOUT: watchdog timeout
+ * @IWL_FW_INI_TIME_POINT_RESERVED: reserved
* @IWL_FW_INI_TIME_POINT_HOST_ASSERT: Unused
* @IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT: alive timeout
* @IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE: device enable
@@ -395,11 +419,11 @@ enum iwl_fw_ini_time_point {
IWL_FW_INI_TIME_POINT_FW_ASSERT,
IWL_FW_INI_TIME_POINT_FW_HW_ERROR,
IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG,
- IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFOCATION,
+ IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION,
IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF,
IWL_FW_INI_TIME_POINT_USER_TRIGGER,
IWL_FW_INI_TIME_POINT_PERIODIC,
- IWL_FW_INI_TIME_POINT_WDG_TIMEOUT,
+ IWL_FW_INI_TIME_POINT_RESERVED,
IWL_FW_INI_TIME_POINT_HOST_ASSERT,
IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT,
IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 1df2e497fabf..465a8e3974e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -321,12 +321,54 @@ struct iwl_tof_responder_config_cmd {
* data (if exists) follows, and then 0-padding again to complete a
* 4-multiple long buffer.
*/
-struct iwl_tof_responder_dyn_config_cmd {
+struct iwl_tof_responder_dyn_config_cmd_v2 {
__le32 lci_len;
__le32 civic_len;
u8 lci_civic[];
} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */
+#define IWL_LCI_MAX_SIZE 160
+#define IWL_CIVIC_MAX_SIZE 160
+#define HLTK_11AZ_LEN 32
+
+/**
+ * enum iwl_responder_dyn_cfg_valid_flags - valid flags for dyn_config_cmd
+ * @IWL_RESPONDER_DYN_CFG_VALID_LCI: LCI data is valid
+ * @IWL_RESPONDER_DYN_CFG_VALID_CIVIC: Civic data is valid
+ * @IWL_RESPONDER_DYN_CFG_VALID_PASN_STA: the pasn_addr, HLTK and cipher fields
+ * are valid.
+ */
+enum iwl_responder_dyn_cfg_valid_flags {
+ IWL_RESPONDER_DYN_CFG_VALID_LCI = BIT(0),
+ IWL_RESPONDER_DYN_CFG_VALID_CIVIC = BIT(1),
+ IWL_RESPONDER_DYN_CFG_VALID_PASN_STA = BIT(2),
+};
+
+/**
+ * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings
+ * @cipher: The negotiated cipher. see &enum iwl_location_cipher.
+ * @valid_flags: flags indicating which fields in the command are valid. see
+ * &enum iwl_responder_dyn_cfg_valid_flags.
+ * @lci_len: length of the LCI data in bytes
+ * @civic_len: length of the Civic data in bytes
+ * @lci_buf: the LCI buffer
+ * @civic_buf: the Civic buffer
+ * @hltk_buf: HLTK for secure LTF bits generation for the specified station
+ * @addr: mac address of the station for which to use the HLTK
+ * @reserved: for alignment
+ */
+struct iwl_tof_responder_dyn_config_cmd {
+ u8 cipher;
+ u8 valid_flags;
+ u8 lci_len;
+ u8 civic_len;
+ u8 lci_buf[IWL_LCI_MAX_SIZE];
+ u8 civic_buf[IWL_LCI_MAX_SIZE];
+ u8 hltk_buf[HLTK_11AZ_LEN];
+ u8 addr[ETH_ALEN];
+ u8 reserved[2];
+} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_3 */
+
/**
* struct iwl_tof_range_req_ext_cmd - extended range req for WLS
* @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
@@ -507,7 +549,6 @@ enum iwl_location_bw {
IWL_LOCATION_BW_80MHZ,
};
-#define HLTK_11AZ_LEN 32
#define TK_11AZ_LEN 32
#define LOCATION_BW_POS 4
@@ -552,15 +593,19 @@ struct iwl_tof_range_req_ap_entry_v4 {
* @IWL_LOCATION_CIPHER_CCMP_128: CCMP 128
* @IWL_LOCATION_CIPHER_GCMP_128: GCMP 128
* @IWL_LOCATION_CIPHER_GCMP_256: GCMP 256
+ * @IWL_LOCATION_CIPHER_INVALID: security is not used.
+ * @IWL_LOCATION_CIPHER_MAX: maximum value for this enum.
*/
enum iwl_location_cipher {
IWL_LOCATION_CIPHER_CCMP_128,
IWL_LOCATION_CIPHER_GCMP_128,
IWL_LOCATION_CIPHER_GCMP_256,
+ IWL_LOCATION_CIPHER_INVALID,
+ IWL_LOCATION_CIPHER_MAX,
};
/**
- * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * struct iwl_tof_range_req_ap_entry_v6 - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @channel_num: AP Channel number
* @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
@@ -588,7 +633,7 @@ enum iwl_location_cipher {
* @beacon_interval: beacon interval of the AP in TUs. Only required if
* &IWL_INITIATOR_AP_FLAGS_TB is set.
*/
-struct iwl_tof_range_req_ap_entry {
+struct iwl_tof_range_req_ap_entry_v6 {
__le32 initiator_ap_flags;
u8 channel_num;
u8 format_bw;
@@ -607,6 +652,61 @@ struct iwl_tof_range_req_ap_entry {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_6 */
/**
+ * struct iwl_tof_range_req_ap_entry_v7 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @sta_id: the station id of the AP. Only relevant when associated to the AP,
+ * otherwise should be set to &IWL_MVM_INVALID_STA.
+ * @cipher: pairwise cipher suite for secured measurement.
+ * &enum iwl_location_cipher.
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ * @calib: An array of calibration values per FTM rx bandwidth.
+ * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
+ * calibration value that corresponds to the rx bandwidth of the FTM
+ * frame.
+ * @beacon_interval: beacon interval of the AP in TUs. Only required if
+ * &IWL_INITIATOR_AP_FLAGS_TB is set.
+ * @rx_pn: the next expected PN for protected management frames Rx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @tx_pn: the next PN to use for protected management frames Tx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ */
+struct iwl_tof_range_req_ap_entry_v7 {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ u8 sta_id;
+ u8 cipher;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ __le16 calib[IWL_TOF_BW_NUM];
+ __le16 beacon_interval;
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */
+
+/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
@@ -772,7 +872,7 @@ struct iwl_tof_range_req_cmd_v8 {
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
/**
- * struct iwl_tof_range_req_cmd - start measurement cmd
+ * struct iwl_tof_range_req_cmd_v9 - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
@@ -787,7 +887,7 @@ struct iwl_tof_range_req_cmd_v8 {
* TSF of this mac id. 0xff to disable TSF reporting.
* @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
*/
-struct iwl_tof_range_req_cmd {
+struct iwl_tof_range_req_cmd_v9 {
__le32 initiator_flags;
u8 request_id;
u8 num_of_ap;
@@ -796,9 +896,37 @@ struct iwl_tof_range_req_cmd {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v6 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */
+/**
+ * struct iwl_tof_range_req_cmd_v11 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd_v11 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */
+
/*
* enum iwl_tof_range_request_status - status of the sent request
* @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
@@ -960,7 +1088,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */
/**
- * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v5 - AP parameters (response)
* @bssid: BSSID of the AP
* @measure_status: current APs measurement status, one of
* &enum iwl_tof_entry_status.
@@ -992,7 +1120,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 {
* @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
* @reserved: for alignment
*/
-struct iwl_tof_range_rsp_ap_entry_ntfy {
+struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
u8 bssid[ETH_ALEN];
u8 measure_status;
u8 measure_bw;
@@ -1017,6 +1145,69 @@ struct iwl_tof_range_rsp_ap_entry_ntfy {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v6 - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @last_burst: 1 if no more FTM sessions are scheduled for this responder
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @start_tsf: measurement start time in TSF of the mac specified in the range
+ * request
+ * @rx_rate_n_flags: rate and flags of the last FTM frame received from this
+ * responder
+ * @tx_rate_n_flags: rate and flags of the last ack sent to this responder
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
+ * @reserved: for alignment
+ * @rx_pn: the last PN used for this responder Rx in case PMF is configured in
+ * LE byte order.
+ * @tx_pn: the last PN used for this responder Tx in case PMF is configured in
+ * LE byte order.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy_v6 {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 last_burst;
+ u8 refusal_period;
+ __le32 timestamp;
+ __le32 start_tsf;
+ __le32 rx_rate_n_flags;
+ __le32 tx_rate_n_flags;
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+ u8 rttConfidence;
+ u8 reserved[3];
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6 */
+
+/**
* enum iwl_tof_response_status - tof response status
*
* @IWL_TOF_RESPONSE_SUCCESS: successful range.
@@ -1066,21 +1257,37 @@ struct iwl_tof_range_rsp_ntfy_v6 {
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
/**
- * struct iwl_tof_range_rsp_ntfy - ranging response notification
+ * struct iwl_tof_range_rsp_ntfy_v7 - ranging response notification
* @request_id: A Token ID of the corresponding Range request
* @num_of_aps: Number of APs results
* @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
* @reserved: reserved
* @ap: per-AP data
*/
-struct iwl_tof_range_rsp_ntfy {
+struct iwl_tof_range_rsp_ntfy_v7 {
u8 request_id;
u8 num_of_aps;
u8 last_report;
u8 reserved;
- struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
+/**
+ * struct iwl_tof_range_rsp_ntfy_v8 - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @num_of_aps: Number of APs results
+ * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
+ * @reserved: reserved
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy_v8 {
+ u8 request_id;
+ u8 num_of_aps;
+ u8 last_report;
+ u8 reserved;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8 */
+
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
/**
* struct iwl_tof_mcsi_notif - used for debug
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 73fb0030c496..260f9978a6ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -5,9 +5,8 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,9 +26,8 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -72,7 +70,7 @@
#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
-#define IWL_MVM_STATION_COUNT 16
+#define IWL_MVM_STATION_COUNT_MAX 16
#define IWL_MVM_INVALID_STA 0xFF
enum iwl_ac {
@@ -188,9 +186,17 @@ struct iwl_mac_data_ibss {
/**
* enum iwl_mac_data_policy - policy of the data path for this MAC
* @TWT_SUPPORTED: twt is supported
+ * @MORE_DATA_ACK_SUPPORTED: AP supports More Data Ack according to
+ * paragraph 9.4.1.17 in P802.11ax_D4 specification. Used for TWT
+ * early termination detection.
+ * @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule
+ * @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w)
*/
enum iwl_mac_data_policy {
- TWT_SUPPORTED = BIT(0),
+ TWT_SUPPORTED = BIT(0),
+ MORE_DATA_ACK_SUPPORTED = BIT(1),
+ FLEXIBLE_TWT_SUPPORTED = BIT(2),
+ PROTECTED_TWT_SUPPORTED = BIT(3),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index b6c31f01ea9e..55573168444e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -90,6 +90,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
* @TAS_CONFIG: &struct iwl_tas_config_cmd
*/
TAS_CONFIG = 0x3,
+
+ /**
+ * @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy
+ */
+ PNVM_INIT_COMPLETE_NTFY = 0xFE,
};
/**
@@ -476,4 +481,12 @@ struct iwl_lari_config_change_cmd {
__le32 config_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_1 */
+/**
+ * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
+ * @status: PNVM image loading status
+ */
+struct iwl_pnvm_init_complete_ntfy {
+ __le32 status;
+} __packed; /* PNVM_INIT_COMPLETE_NTFY_S_VER_1 */
+
#endif /* __iwl_fw_api_nvm_reg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index b833b80ea3d6..e6a069683462 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -181,15 +179,37 @@ struct iwl_phy_context_cmd_tail {
* @ci: channel info
* @tail: command tail
*/
-struct iwl_phy_context_cmd {
+struct iwl_phy_context_cmd_v1 {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
- /* PHY_CONTEXT_DATA_API_S_VER_1 */
+ /* PHY_CONTEXT_DATA_API_S_VER_3 */
__le32 apply_time;
__le32 tx_param_color;
struct iwl_fw_channel_info ci;
struct iwl_phy_context_cmd_tail tail;
} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
+/**
+ * struct iwl_phy_context_cmd - config of the PHY context
+ * ( PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @lmac_id: the lmac id the phy context belongs to
+ * @ci: channel info
+ * @rxchain_info: ???
+ * @dsp_cfg_flags: set to 0
+ * @reserved: reserved to align to 64 bit
+ */
+struct iwl_phy_context_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* PHY_CONTEXT_DATA_API_S_VER_3 */
+ struct iwl_fw_channel_info ci;
+ __le32 lmac_id;
+ __le32 rxchain_info;
+ __le32 dsp_cfg_flags;
+ __le32 reserved;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3 */
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
index 8991ddffbf5e..0debca6dd037 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -214,6 +214,15 @@ struct iwl_dts_measurement_notif_v2 {
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */
/**
+ * struct iwl_dts_measurement_resp - measurements response
+ *
+ * @temp: the measured temperature
+ */
+struct iwl_dts_measurement_resp {
+ __le32 temp;
+} __packed; /* CMD_DTS_MEASUREMENT_RSP_API_S_VER_1 */
+
+/**
* struct ct_kill_notif - CT-kill entry notification
*
* @temperature: the current temperature in celsius
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 6e1b9b21904e..4e6ad1793d0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -329,48 +329,56 @@ enum iwl_dev_tx_power_cmd_mode {
IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5,
}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_5 */;
+#define IWL_NUM_CHAIN_TABLES 1
+#define IWL_NUM_CHAIN_TABLES_V2 2
#define IWL_NUM_CHAIN_LIMITS 2
#define IWL_NUM_SUB_BANDS 5
+#define IWL_NUM_SUB_BANDS_V2 11
/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * struct iwl_dev_tx_power_common - Common part of the TX power reduction cmd
* @set_mode: see &enum iwl_dev_tx_power_cmd_mode
* @mac_context_id: id of the mac ctx for which we are reducing TX power.
* @pwr_restriction: TX power restriction in 1/8 dBms.
* @dev_24: device TX power restriction in 1/8 dBms
* @dev_52_low: device TX power restriction upper band - low
* @dev_52_high: device TX power restriction upper band - high
- * @per_chain_restriction: per chain restrictions
*/
-struct iwl_dev_tx_power_cmd_v3 {
+struct iwl_dev_tx_power_common {
__le32 set_mode;
__le32 mac_context_id;
__le16 pwr_restriction;
__le16 dev_24;
__le16 dev_52_low;
__le16 dev_52_high;
- __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+};
+
+/**
+ * struct iwl_dev_tx_power_cmd_v3 - TX power reduction command version 3
+ * @per_chain: per chain restrictions
+ */
+struct iwl_dev_tx_power_cmd_v3 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
#define IWL_DEV_MAX_TX_POWER 0x7FFF
/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
- * @v3: version 3 of the command, embedded here for easier software handling
+ * struct iwl_dev_tx_power_cmd_v4 - TX power reduction command version 4
+ * @per_chain: per chain restrictions
* @enable_ack_reduction: enable or disable close range ack TX power
* reduction.
* @reserved: reserved (padding)
*/
struct iwl_dev_tx_power_cmd_v4 {
- /* v4 is just an extension of v3 - keep this here */
- struct iwl_dev_tx_power_cmd_v3 v3;
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
u8 enable_ack_reduction;
u8 reserved[3];
} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
- * @v3: version 3 of the command, embedded here for easier software handling
+ * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5
+ * @per_chain: per chain restrictions
* @enable_ack_reduction: enable or disable close range ack TX power
* reduction.
* @per_chain_restriction_changed: is per_chain_restriction has changed
@@ -381,16 +389,56 @@ struct iwl_dev_tx_power_cmd_v4 {
* @timer_period: timer in milliseconds. if expires FW will change to default
* BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
*/
-struct iwl_dev_tx_power_cmd {
- /* v5 is just an extension of v3 - keep this here */
- struct iwl_dev_tx_power_cmd_v3 v3;
+struct iwl_dev_tx_power_cmd_v5 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
u8 enable_ack_reduction;
u8 per_chain_restriction_changed;
u8 reserved[2];
__le32 timer_period;
} __packed; /* TX_REDUCED_POWER_API_S_VER_5 */
+/**
+ * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ */
+struct iwl_dev_tx_power_cmd_v6 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_6 */
+
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
+ * @common: common part of the command
+ * @v3: version 3 part of the command
+ * @v4: version 4 part of the command
+ * @v5: version 5 part of the command
+ * @v6: version 6 part of the command
+ */
+struct iwl_dev_tx_power_cmd {
+ struct iwl_dev_tx_power_common common;
+ union {
+ struct iwl_dev_tx_power_cmd_v3 v3;
+ struct iwl_dev_tx_power_cmd_v4 v4;
+ struct iwl_dev_tx_power_cmd_v5 v5;
+ struct iwl_dev_tx_power_cmd_v6 v6;
+ };
+};
+
#define IWL_NUM_GEO_PROFILES 3
+#define IWL_NUM_BANDS_PER_CHAIN_V1 2
+#define IWL_NUM_BANDS_PER_CHAIN_V2 3
/**
* enum iwl_geo_per_chain_offset_operation - type of operation
@@ -414,11 +462,6 @@ struct iwl_per_chain_offset {
u8 chain_b;
} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
-struct iwl_per_chain_offset_group {
- struct iwl_per_chain_offset lb;
- struct iwl_per_chain_offset hb;
-} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
-
/**
* struct iwl_geo_tx_power_profile_cmd_v1 - struct for GEO_TX_POWER_LIMIT cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
@@ -426,20 +469,38 @@ struct iwl_per_chain_offset_group {
*/
struct iwl_geo_tx_power_profiles_cmd_v1 {
__le32 ops;
- struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1];
} __packed; /* GEO_TX_POWER_LIMIT_VER_1 */
/**
- * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
+ * struct iwl_geo_tx_power_profile_cmd_v2 - struct for GEO_TX_POWER_LIMIT cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ * @table_revision: BIOS table revision.
+ */
+struct iwl_geo_tx_power_profiles_cmd_v2 {
+ __le32 ops;
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1];
+ __le32 table_revision;
+} __packed; /* GEO_TX_POWER_LIMIT_VER_2 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd_v3 - struct for GEO_TX_POWER_LIMIT cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
*/
-struct iwl_geo_tx_power_profiles_cmd {
+struct iwl_geo_tx_power_profiles_cmd_v3 {
__le32 ops;
- struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V2];
__le32 table_revision;
-} __packed; /* GEO_TX_POWER_LIMIT */
+} __packed; /* GEO_TX_POWER_LIMIT_VER_3 */
+
+union iwl_geo_tx_power_profiles_cmd {
+ struct iwl_geo_tx_power_profiles_cmd_v1 v1;
+ struct iwl_geo_tx_power_profiles_cmd_v2 v2;
+ struct iwl_geo_tx_power_profiles_cmd_v3 v3;
+};
/**
* struct iwl_geo_tx_power_profiles_resp - response to GEO_TX_POWER_LIMIT cmd
@@ -450,16 +511,26 @@ struct iwl_geo_tx_power_profiles_resp {
} __packed; /* GEO_TX_POWER_LIMIT_RESP */
/**
- * struct iwl_ppag_table_cmd - struct for PER_PLATFORM_ANT_GAIN_CMD cmd.
+ * union iwl_ppag_table_cmd - union for all versions of PPAG command
+ * @v1: version 1, table revision = 0
+ * @v2: version 2, table revision = 1
+ *
* @enabled: 1 if PPAG is enabled, 0 otherwise
* @gain: table of antenna gain values per chain and sub-band
* @reserved: reserved
*/
-struct iwl_ppag_table_cmd {
- __le32 enabled;
- s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
- s8 reserved[2];
-} __packed; /* PER_PLATFORM_ANT_GAIN_CMD */
+union iwl_ppag_table_cmd {
+ struct {
+ __le32 enabled;
+ s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+ s8 reserved[2];
+ } v1;
+ struct {
+ __le32 enabled;
+ s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ s8 reserved[2];
+ } v2;
+} __packed;
/**
* struct iwl_beacon_filter_cmd
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 4347be6491e9..1ea54f643030 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -193,6 +193,8 @@ enum IWL_TLC_HT_BW_RATES {
* @sgi_ch_width_supp: bitmap of SGI support per channel width
* use BIT(@enum iwl_tlc_mng_cfg_cw)
* @reserved2: reserved
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
*/
struct iwl_tlc_config_cmd {
u8 sta_id;
@@ -206,8 +208,9 @@ struct iwl_tlc_config_cmd {
__le16 ht_rates[IWL_TLC_NSS_MAX][2];
__le16 max_mpdu_len;
u8 sgi_ch_width_supp;
- u8 reserved2[1];
-} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_2 */
+ u8 reserved2;
+ __le32 max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
/**
* enum iwl_tlc_update_flags - updated fields
@@ -486,6 +489,13 @@ enum {
#define RATE_MCS_HE_106T_POS 28
#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS)
+/* Bit 30-31: (1) RTS, (2) CTS */
+#define RATE_MCS_RTS_REQUIRED_POS (30)
+#define RATE_MCS_RTS_REQUIRED_MSK (0x1 << RATE_MCS_RTS_REQUIRED_POS)
+
+#define RATE_MCS_CTS_REQUIRED_POS (31)
+#define RATE_MCS_CTS_REQUIRED_MSK (0x1 << RATE_MCS_CTS_REQUIRED_POS)
+
/* Link Quality definitions */
/* # entries in rate scale table to support Tx retries */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index b8b36a4f9eb9..8a8a204bfe26 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -308,17 +308,11 @@ enum iwl_rx_mpdu_status {
IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13),
IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14),
IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15),
-};
-enum iwl_rx_mpdu_hash_filter {
- IWL_RX_MPDU_HF_A1_HASH_MASK = 0x3f,
- IWL_RX_MPDU_HF_FILTER_STATUS_MASK = 0xc0,
-};
+ IWL_RX_MPDU_STATUS_KEY = 0x3f0000,
+ IWL_RX_MPDU_STATUS_DUPLICATE = BIT(22),
-enum iwl_rx_mpdu_sta_id_flags {
- IWL_RX_MPDU_SIF_STA_ID_MASK = 0x1f,
- IWL_RX_MPDU_SIF_RRF_ABORT = 0x20,
- IWL_RX_MPDU_SIF_FILTER_STATUS_MASK = 0xc0,
+ IWL_RX_MPDU_STATUS_STA_ID = 0x1f000000,
};
#define IWL_RX_REORDER_DATA_INVALID_BAID 0x7f
@@ -560,7 +554,11 @@ struct iwl_rx_mpdu_desc_v3 {
/**
* @raw_xsum: raw xsum value
*/
- __le32 raw_xsum;
+ __be16 raw_xsum;
+ /**
+ * @reserved_xsum: reserved high bits in the raw checksum
+ */
+ __le16 reserved_xsum;
/* DW11 */
/**
* @rate_n_flags: RX rate/flags encoding
@@ -668,15 +666,8 @@ struct iwl_rx_mpdu_desc {
/**
* @status: &enum iwl_rx_mpdu_status
*/
- __le16 status;
- /**
- * @hash_filter: hash filter value
- */
- u8 hash_filter;
- /**
- * @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags
- */
- u8 sta_id_flags;
+ __le32 status;
+
/* DW6 */
/**
* @reorder_data: &enum iwl_rx_mpdu_reorder_data
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index c010e6febbf4..d43e0d3f3a12 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -380,7 +380,7 @@ struct iwl_mvm_add_sta_cmd {
u8 add_modify;
u8 awake_acs;
__le16 tid_disable_tx;
- __le32 mac_id_n_color;
+ __le32 mac_id_n_color; /* can be used for lmac id when using cmd v12 */
u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
__le16 reserved2;
u8 sta_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
index 318843138490..d41cab4016fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,9 +27,8 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -383,14 +381,14 @@ struct mvm_statistics_load {
__le32 air_time[MAC_INDEX_AUX];
__le32 byte_count[MAC_INDEX_AUX];
__le32 pkt_count[MAC_INDEX_AUX];
- u8 avg_energy[IWL_MVM_STATION_COUNT];
+ u8 avg_energy[IWL_MVM_STATION_COUNT_MAX];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */
struct mvm_statistics_load_v1 {
__le32 air_time[NUM_MAC_INDEX];
__le32 byte_count[NUM_MAC_INDEX];
__le32 pkt_count[NUM_MAC_INDEX];
- u8 avg_energy[IWL_MVM_STATION_COUNT];
+ u8 avg_energy[IWL_MVM_STATION_COUNT_MAX];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
struct mvm_statistics_rx {
@@ -466,4 +464,465 @@ struct iwl_statistics_cmd {
__le32 flags;
} __packed; /* STATISTICS_CMD_API_S_VER_1 */
+#define MAX_BCAST_FILTER_NUM 8
+
+/**
+ * enum iwl_fw_statistics_type
+ *
+ * @FW_STATISTICS_OPERATIONAL: operational statistics
+ * @FW_STATISTICS_PHY: phy statistics
+ * @FW_STATISTICS_MAC: mac statistics
+ * @FW_STATISTICS_RX: rx statistics
+ * @FW_STATISTICS_TX: tx statistics
+ * @FW_STATISTICS_DURATION: duration statistics
+ * @FW_STATISTICS_HE: he statistics
+ */
+enum iwl_fw_statistics_type {
+ FW_STATISTICS_OPERATIONAL,
+ FW_STATISTICS_PHY,
+ FW_STATISTICS_MAC,
+ FW_STATISTICS_RX,
+ FW_STATISTICS_TX,
+ FW_STATISTICS_DURATION,
+ FW_STATISTICS_HE,
+}; /* FW_STATISTICS_TYPE_API_E_VER_1 */
+
+/**
+ * struct iwl_statistics_ntfy_hdr
+ *
+ * @type: struct type
+ * @version: version of the struct
+ * @size: size in bytes
+ */
+struct iwl_statistics_ntfy_hdr {
+ u8 type;
+ u8 version;
+ __le16 size;
+}; /* STATISTICS_NTFY_HDR_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_operational_ntfy
+ *
+ * @hdr: general statistics header
+ * @flags: bitmap of possible notification structures
+ * @mac_id: mac on which the beacon was received
+ * @beacon_filter_average_energy: Average energy [-dBm] of the 2
+ * antennas.
+ * @beacon_filter_reason: beacon filter reason
+ * @radio_temperature: radio temperature
+ * @air_time: air time
+ * @beacon_counter: all beacons (both filtered and not filtered)
+ * @beacon_average_energy: all beacons (both filtered and not
+ * filtered)
+ * @beacon_rssi_a: beacon RSSI on antenna A
+ * @beacon_rssi_b: beacon RSSI on antenna B
+ * @rx_bytes: per MAC RX byte count
+ * @rx_time: rx time
+ * @tx_time: usec the radio is transmitting.
+ * @on_time_rf: The total time in usec the RF is awake.
+ * @on_time_scan: usec the radio is awake due to scan.
+ * @average_energy: in fact it is minus the energy..
+ * @reserved: reserved
+ */
+struct iwl_statistics_operational_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 flags;
+ __le32 mac_id;
+ __le32 beacon_filter_average_energy;
+ __le32 beacon_filter_reason;
+ __le32 radio_temperature;
+ __le32 air_time[MAC_INDEX_AUX];
+ __le32 beacon_counter[MAC_INDEX_AUX];
+ __le32 beacon_average_energy[MAC_INDEX_AUX];
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 rx_bytes[MAC_INDEX_AUX];
+ __le64 rx_time;
+ __le64 tx_time;
+ __le64 on_time_rf;
+ __le64 on_time_scan;
+ __le32 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ __le32 reserved;
+} __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_14 */
+
+/**
+ * struct iwl_statistics_phy_ntfy
+ *
+ * @hdr: general statistics header
+ * RX PHY related statistics
+ * @energy_and_config: ???
+ * @rssi_band: @31:24 rssiAllBand_B, 23:16 rssiInBand_B, 15:8
+ * rssiAllBand_A, 7:0 rssiInBand_A
+ * @agc_word: @31:16 agcWord_B, 15:0 agcWord_A
+ * @agc_gain: @19:10 agcGain_B, 9:0 agcGain_A
+ * @dfe_gain: @19:10 dfeGain_B, 9:0 dfeGain_A
+ * @snr_calc_main: @18:0 snrCalcMain
+ * @energy_calc_main: @18:0 energyCalcMain
+ * @snr_calc_aux: @18:0 snrCalcAux
+ * @dsp_dc_estim_a: @27:14 dspDcEstimQA, 13:0 dspDcEstimIA
+ * @dsp_dc_estim_b: @27:14 dspDcEstimQB, 13:0 dspDcEstimIB
+ * @ina_detec_type_and_ofdm_corr_comb: @31:31 inaDetectCckMrc,
+ * 30:27 inaDetectType, 26:0 ofdmCorrComb
+ * @cw_corr_comb: @26:0 cwCorrComb
+ * @rssi_comb: @25:0 rssiComb
+ * @auto_corr_cck: @23:12 autoCck, 11:00 crossCck
+ * @ofdm_fine_freq_and_pina_freq_err: @18:7 ofdmFineFreq, 6:0
+ * ofdmPinaFreqErr
+ * @snrm_evm_main: @31:0 snrmEvmMain
+ * @snrm_evm_aux: @31:0 snrmEvmAux
+ * @rx_rate: @31:0 rate
+ * TX PHY related statistics
+ * @per_chain_enums_and_dsp_atten_a: @perChainEnumsAndDspAtten
+ * (per version)
+ * @target_power_and_power_meas_a: @31:16 targetPower_A, 15:0
+ * powerMeasuredCalc_A
+ * @tx_config_as_i_and_ac_a: @31:16 txConfigAsI_A, 15:0
+ * txConfigAc_A
+ * @predist_dcq_and_dci_a: @31:16 predist_dci_A, 15:0
+ * predist_dcq_A
+ * @per_chain_enums_and_dsp_atten_b: @perChainEnumsAndDspAtten
+ * (per version)
+ * @target_power_and_power_meas_b: @31:16 targetPower_B, 15:0
+ * powerMeasuredCalc_B
+ * @tx_config_as_i_and_ac_b: @31:16 txConfigAsI_B, 15:0
+ * txConfigAc_B
+ * @predist_dcq_and_dci_b: @31:16 predist_dci_B, 15:0
+ * predist_dcq_B
+ * @tx_rate: @31:0 rate
+ * @tlc_backoff: @31:0 tlcBackoff
+ * @mpapd_calib_mode_mpapd_calib_type_a: @31:16
+ * mpapdCalibMode_A, 15:0 mpapdCalibType_A
+ * @psat_and_phy_power_limit_a: @31:16 psat_A, 15:0
+ * phyPowerLimit_A
+ * @sar_and_regulatory_power_limit_a: @31:16 sarPowerLimit_A,
+ * 15:0 regulatoryPowerLimit_A
+ * @mpapd_calib_mode_mpapd_calib_type_b: @31:16
+ * mpapdCalibMode_B, 15:0 mpapdCalibType_B
+ * @psat_and_phy_power_limit_b: @31:16 psat_B, 15:0
+ * phyPowerLimit_B
+ * @sar_and_regulatory_power_limit_b: @31:16 sarPowerLimit_B,
+ * 15:0 regulatoryPowerLimit_B
+ * @srd_and_driver_power_limits: @31:16 srdPowerLimit, 15:0
+ * driverPowerLimit
+ * @reserved: reserved
+ */
+struct iwl_statistics_phy_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 energy_and_config;
+ __le32 rssi_band;
+ __le32 agc_word;
+ __le32 agc_gain;
+ __le32 dfe_gain;
+ __le32 snr_calc_main;
+ __le32 energy_calc_main;
+ __le32 snr_calc_aux;
+ __le32 dsp_dc_estim_a;
+ __le32 dsp_dc_estim_b;
+ __le32 ina_detec_type_and_ofdm_corr_comb;
+ __le32 cw_corr_comb;
+ __le32 rssi_comb;
+ __le32 auto_corr_cck;
+ __le32 ofdm_fine_freq_and_pina_freq_err;
+ __le32 snrm_evm_main;
+ __le32 snrm_evm_aux;
+ __le32 rx_rate;
+ __le32 per_chain_enums_and_dsp_atten_a;
+ __le32 target_power_and_power_meas_a;
+ __le32 tx_config_as_i_and_ac_a;
+ __le32 predist_dcq_and_dci_a;
+ __le32 per_chain_enums_and_dsp_atten_b;
+ __le32 target_power_and_power_meas_b;
+ __le32 tx_config_as_i_and_ac_b;
+ __le32 predist_dcq_and_dci_b;
+ __le32 tx_rate;
+ __le32 tlc_backoff;
+ __le32 mpapd_calib_mode_mpapd_calib_type_a;
+ __le32 psat_and_phy_power_limit_a;
+ __le32 sar_and_regulatory_power_limit_a;
+ __le32 mpapd_calib_mode_mpapd_calib_type_b;
+ __le32 psat_and_phy_power_limit_b;
+ __le32 sar_and_regulatory_power_limit_b;
+ __le32 srd_and_driver_power_limits;
+ __le32 reserved;
+} __packed; /* STATISTICS_PHY_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_mac_ntfy
+ *
+ * @hdr: general statistics header
+ * @bcast_filter_passed_per_mac: bcast filter passed per mac
+ * @bcast_filter_dropped_per_mac: bcast filter dropped per mac
+ * @bcast_filter_passed_per_filter: bcast filter passed per filter
+ * @bcast_filter_dropped_per_filter: bcast filter dropped per filter
+ * @reserved: reserved
+ */
+struct iwl_statistics_mac_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 bcast_filter_passed_per_mac[NUM_MAC_INDEX_CDB];
+ __le32 bcast_filter_dropped_per_mac[NUM_MAC_INDEX_CDB];
+ __le32 bcast_filter_passed_per_filter[MAX_BCAST_FILTER_NUM];
+ __le32 bcast_filter_dropped_per_filter[MAX_BCAST_FILTER_NUM];
+ __le32 reserved;
+} __packed; /* STATISTICS_MAC_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_rx_ntfy
+ *
+ * @hdr: general statistics header
+ * @rx_agg_mpdu_cnt: aggregation frame count (number of
+ * delimiters)
+ * @rx_agg_cnt: number of RX Aggregations
+ * @unsupported_mcs: number of PLCP headers that have rate which
+ * is unsupported by DSP
+ * @bogus_cts: CTS received when not expecting CTS
+ * @bogus_ack: ACK received when not expecting ACK
+ * @rx_byte_count: ???
+ * @rx_packet_count: ???
+ * @missed_beacons: ???
+ * @unresponded_rts: un-responded RTS, due to NAV not zero
+ * @rxe_frame_limit_overrun: RXE got frame limit overrun
+ * @sent_ba_rsp_cnt: BA response TX count
+ * @late_rx_handle: count the number of times the RX path was
+ * aborted due to late entry
+ * @num_bt_kills: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_rx_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 rx_agg_mpdu_cnt;
+ __le32 rx_agg_cnt;
+ __le32 unsupported_mcs;
+ __le32 bogus_cts;
+ __le32 bogus_ack;
+ __le32 rx_byte_count[MAC_INDEX_AUX];
+ __le32 rx_packet_count[MAC_INDEX_AUX];
+ __le32 missed_beacons;
+ __le32 unresponded_rts;
+ __le32 rxe_frame_limit_overrun;
+ __le32 sent_ba_rsp_cnt;
+ __le32 late_rx_handle;
+ __le32 num_bt_kills;
+ __le32 reserved;
+} __packed; /* STATISTICS_RX_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_tx_ntfy
+ *
+ * @hdr: general statistics header
+ * @cts_timeout: timeout when waiting for CTS
+ * @ack_timeout: timeout when waiting for ACK
+ * @dump_msdu_cnt: number of MSDUs that were dumped due to any
+ * reason
+ * @burst_abort_missing_next_frame_cnt: number of times a burst
+ * was aborted due to missing next frame bytes in txfifo
+ * number of times got timeout when waiting for CTS/ACK/BA and energy was
+ * detected just after sending the RTS/DATA. this statistics may help getting
+ * interesting indicators, like the likelihood of collision (so the benefit of
+ * protection may be estimated Vs. its cost). Or how many of the failures are
+ * due to collision and how many due to SNR.
+ * For Link-quality the CTS collision indication is more reliable then the ACK
+ * collision indication as the RTS frame is short and has more chance that the
+ * frame/s which caused the collision continue after the RTS was sent.
+ * @cts_timeout_collision: ???
+ * ACK/BA failed and energy as detected after DATA
+ * Note: to get the collision ratio need to:
+ * ackOrBaTimeoutCollision / (ack_timeout + ba_timeout)
+ * @ack_or_ba_timeout_collision: ???
+ * @ba_timeout: timeout when waiting for immediate BA response
+ * @ba_reschedule_frames: failed to get BA response and
+ * rescheduled all the non-ACKed frames
+ * gives the avarage number of frames inside aggregation
+ * @scd_query_agg_frame_cnt: ???
+ * @scd_query_no_agg: scheduler query prevented aggregation
+ * @scd_query_agg: scheduler query allowed aggregation
+ * @scd_query_mismatch: scheduler query inaccurate, either too
+ * short or too long
+ * @agg_terminated_underrun: aggregation was terminated due to
+ * underrun
+ * @agg_terminated_bt_prio_kill: aggregation was terminated due
+ * to BT
+ * @tx_kill_on_long_retry: count the tx frames dropped due to
+ * long retry limit (DATA frame failed)
+ * @tx_kill_on_short_retry: count the tx frames dropped due to
+ * short retry limit (RTS frame failed)
+ * TX deffer on energy. This counter is reset on each successful transmit.
+ * When timer exceed TX deffer limit than will be uCode assert.
+ * @tx_deffer_counter: ???
+ * @tx_deffer_base_time: Keep the time of the last successful
+ * transmit
+ * @tx_underrun: TX killed due to underrun
+ * @bt_defer: TX deferred due to BT priority, so probably TX was
+ * not started.
+ * @tx_kill_on_dsp_timeout: TX killed on DSP problem detected
+ * @tx_kill_on_immediate_quiet: TX killed due to immediate quiet
+ * @kill_ba_cnt: number of times sending BA failed
+ * @kill_ack_cnt: number of times sending ACK failed
+ * @kill_cts_cnt: number of times sending CTS failed
+ * @burst_terminated: Count burst or fragmentation termination
+ * occurrence
+ * @late_tx_vec_wr_cnt: ???
+ * TX is not sent because ucode failed to notify the TRM in SIFS-delta from
+ * ON_AIR deassertion.
+ * @late_rx2_tx_cnt: ???
+ * @scd_query_cnt: count the times SCD query was done to check
+ * for TX AGG
+ * @tx_frames_acked_in_agg: count the number of frames
+ * transmitted inside AGG and were successful
+ * @last_tx_ch_width_indx: ???
+ * number of deferred TX per channel width, 0 - 20, 1/2/3 - 40/80/160
+ * @rx_detected_per_ch_width: ???
+ * @success_per_ch_width: ???
+ * @fail_per_ch_width: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_tx_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 cts_timeout;
+ __le32 ack_timeout;
+ __le32 dump_msdu_cnt;
+ __le32 burst_abort_missing_next_frame_cnt;
+ __le32 cts_timeout_collision;
+ __le32 ack_or_ba_timeout_collision;
+ __le32 ba_timeout;
+ __le32 ba_reschedule_frames;
+ __le32 scd_query_agg_frame_cnt;
+ __le32 scd_query_no_agg;
+ __le32 scd_query_agg;
+ __le32 scd_query_mismatch;
+ __le32 agg_terminated_underrun;
+ __le32 agg_terminated_bt_prio_kill;
+ __le32 tx_kill_on_long_retry;
+ __le32 tx_kill_on_short_retry;
+ __le32 tx_deffer_counter;
+ __le32 tx_deffer_base_time;
+ __le32 tx_underrun;
+ __le32 bt_defer;
+ __le32 tx_kill_on_dsp_timeout;
+ __le32 tx_kill_on_immediate_quiet;
+ __le32 kill_ba_cnt;
+ __le32 kill_ack_cnt;
+ __le32 kill_cts_cnt;
+ __le32 burst_terminated;
+ __le32 late_tx_vec_wr_cnt;
+ __le32 late_rx2_tx_cnt;
+ __le32 scd_query_cnt;
+ __le32 tx_frames_acked_in_agg;
+ __le32 last_tx_ch_width_indx;
+ __le32 rx_detected_per_ch_width[4];
+ __le32 success_per_ch_width[4];
+ __le32 fail_per_ch_width[4];
+ __le32 reserved;
+} __packed; /* STATISTICS_TX_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_duration_ntfy
+ *
+ * @hdr: general statistics header
+ * @cont_burst_chk_cnt: number of times continuation or
+ * fragmentation or bursting was checked
+ * @cont_burst_cnt: number of times continuation or fragmentation
+ * or bursting was successful
+ * @wait_for_silence_timeout_cnt: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_duration_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 cont_burst_chk_cnt;
+ __le32 cont_burst_cnt;
+ __le32 wait_for_silence_timeout_cnt;
+ __le32 reserved;
+} __packed; /* STATISTICS_DURATION_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_he_ntfy
+ *
+ * @hdr: general statistics header
+ * received HE frames
+ * @rx_siga_valid_cnt: rx HE SIG-A valid
+ * @rx_siga_invalid_cnt: rx HE SIG-A invalid
+ * received HE frames w/ valid Sig-A
+ * @rx_trig_based_frame_cnt: rx HE-TB (trig-based)
+ * @rx_su_frame_cnt: rx HE-SU
+ * @rx_sigb_invalid_cnt: rx (suspected) HE-MU w/ bad SIG-B
+ * @rx_our_bss_color_cnt: rx valid HE SIG-A w/ our BSS color
+ * @rx_other_bss_color_cnt: rx valid HE SIG-A w/ other BSS color
+ * @rx_zero_bss_color_cnt: ???
+ * received HE-MU frames w/ good Sig-B
+ * @rx_mu_for_us_cnt: match AID
+ * @rx_mu_not_for_us_cnt: no matched AID
+ * received HE-MU frames for us (w/ our AID)
+ * @rx_mu_nss_ar: 0 - SISO, 1 - MIMO2
+ * @rx_mu_mimo_cnt: full BW RU, compressed SIG-B
+ * @rx_mu_ru_bw_ar: MU alloc, MHz: 0 - 2, 1 - 5, 2 - 10, 3 - 20,
+ * 4 - 40, 5 - 80, 6 - 160
+ * received trigger frames
+ * @rx_trig_for_us_cnt: ???
+ * @rx_trig_not_for_us_cnt: ???
+ * trigger for us
+ * @rx_trig_with_cs_req_cnt: ???
+ * @rx_trig_type_ar: ???
+ * @rx_trig_in_agg_cnt: ???
+ * basic trigger for us allocations
+ * @rx_basic_trig_alloc_nss_ar: ???
+ * @rx_basic_trig_alloc_mu_mimo_cnt: ???
+ * @rx_basic_trig_alloc_ru_bw_ar: ???
+ * @rx_basic_trig_total_byte_cnt: ???
+ * trig-based TX
+ * @tx_trig_based_cs_req_fail_cnt: ???
+ * @tx_trig_based_sifs_ok_cnt: ???
+ * @tx_trig_based_sifs_fail_cnt: ???
+ * @tx_trig_based_byte_cnt: ???
+ * @tx_trig_based_pad_byte_cnt: ???
+ * @tx_trig_based_frame_cnt: ???
+ * @tx_trig_based_acked_frame_cnt: ???
+ * @tx_trig_based_ack_timeout_cnt: ???
+ * HE-SU TX
+ * @tx_su_frame_cnt: ???
+ * EDCA <--> MU-EDCA transitions
+ * @tx_edca_to_mu_edca_cnt: ???
+ * @tx_mu_edca_to_edca_by_timeout_cnt: ???
+ * @tx_mu_edca_to_edca_by_ack_fail_cnt: ???
+ * @tx_mu_edca_to_edca_by_small_alloc_cnt: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_he_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 rx_siga_valid_cnt;
+ __le32 rx_siga_invalid_cnt;
+ __le32 rx_trig_based_frame_cnt;
+ __le32 rx_su_frame_cnt;
+ __le32 rx_sigb_invalid_cnt;
+ __le32 rx_our_bss_color_cnt;
+ __le32 rx_other_bss_color_cnt;
+ __le32 rx_zero_bss_color_cnt;
+ __le32 rx_mu_for_us_cnt;
+ __le32 rx_mu_not_for_us_cnt;
+ __le32 rx_mu_nss_ar[2];
+ __le32 rx_mu_mimo_cnt;
+ __le32 rx_mu_ru_bw_ar[7];
+ __le32 rx_trig_for_us_cnt;
+ __le32 rx_trig_not_for_us_cnt;
+ __le32 rx_trig_with_cs_req_cnt;
+ __le32 rx_trig_type_ar[8 + 1];
+ __le32 rx_trig_in_agg_cnt;
+ __le32 rx_basic_trig_alloc_nss_ar[2];
+ __le32 rx_basic_trig_alloc_mu_mimo_cnt;
+ __le32 rx_basic_trig_alloc_ru_bw_ar[7];
+ __le32 rx_basic_trig_total_byte_cnt;
+ __le32 tx_trig_based_cs_req_fail_cnt;
+ __le32 tx_trig_based_sifs_ok_cnt;
+ __le32 tx_trig_based_sifs_fail_cnt;
+ __le32 tx_trig_based_byte_cnt;
+ __le32 tx_trig_based_pad_byte_cnt;
+ __le32 tx_trig_based_frame_cnt;
+ __le32 tx_trig_based_acked_frame_cnt;
+ __le32 tx_trig_based_ack_timeout_cnt;
+ __le32 tx_su_frame_cnt;
+ __le32 tx_edca_to_mu_edca_cnt;
+ __le32 tx_mu_edca_to_edca_by_timeout_cnt;
+ __le32 tx_mu_edca_to_edca_by_ack_fail_cnt;
+ __le32 tx_mu_edca_to_edca_by_small_alloc_cnt;
+ __le32 reserved;
+} __packed; /* STATISTICS_HE_NTFY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_stats_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 82d59b5a5f8c..de2e2ca7a3ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -5,9 +5,8 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,9 +26,8 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,6 +59,7 @@
#ifndef __iwl_fw_api_tx_h__
#define __iwl_fw_api_tx_h__
+#include <linux/ieee80211.h>
/**
* enum iwl_tx_flags - bitmasks for tx_flags in TX command
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 7ea55cfdd8a8..ab4a8b942c81 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1507,6 +1507,27 @@ iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
+static int
+iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_region_special_device_memory *special_mem =
+ &reg->special_mem;
+
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(special_mem->base_addr) +
+ le32_to_cpu(special_mem->offset);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = special_mem->size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(special_mem->size));
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
@@ -1636,6 +1657,21 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
return dump->ranges;
}
+static void *
+iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_special_device_memory *dump = data;
+
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+ dump->type = reg->special_mem.type;
+ dump->version = reg->special_mem.version;
+
+ return dump->ranges;
+}
+
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1827,6 +1863,20 @@ iwl_dump_ini_err_table_get_size(struct iwl_fw_runtime *fwrt,
}
static u32
+iwl_dump_ini_special_mem_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->special_mem.size);
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_special_device_memory) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
+static u32
iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -2125,6 +2175,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.fill_range = iwl_dump_ini_config_iter,
},
+ [IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_special_mem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_special_mem_fill_header,
+ .fill_range = iwl_dump_ini_special_mem_iter,
+ },
};
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 72bfc64580ab..cb40f509ab61 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -495,6 +495,20 @@ struct iwl_fw_ini_monitor_dump {
} __packed;
/**
+ * struct iwl_fw_ini_special_device_memory - special device memory
+ * @header: header of the region
+ * @type: type of special memory
+ * @version: struct special memory version
+ * @ranges: the memory ranges of this this region
+ */
+struct iwl_fw_ini_special_device_memory {
+ struct iwl_fw_ini_error_dump_header header;
+ __le16 type;
+ __le16 version;
+ struct iwl_fw_ini_error_dump_range ranges[];
+} __packed;
+
+/**
* struct iwl_fw_error_dump_paging - content of the UMAC's image page
* block on DRAM
* @index: the index of the page block
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 1fb45fd30ffa..02c64b988a13 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -90,6 +90,7 @@ struct iwl_ucode_header {
};
#define IWL_UCODE_TLV_DEBUG_BASE 0x1000005
+#define IWL_UCODE_TLV_CONST_BASE 0x100
/*
* new TLV uCode file layout
@@ -145,7 +146,13 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
- IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
+ IWL_UCODE_TLV_HW_TYPE = 58,
+ IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
+
+ IWL_UCODE_TLV_PNVM_VERSION = 62,
+ IWL_UCODE_TLV_PNVM_SKU = 64,
+
+ IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0,
IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0,
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1,
@@ -405,8 +412,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* to report the CSI information with (certain) RX frames
* @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both
* initiator and responder
- *
* @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
+ * @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -451,6 +458,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52,
IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
+ IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
index de8cff463dbe..c2a4e60518bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -25,7 +25,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -57,22 +57,25 @@
#include "img.h"
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)
- return IWL_FW_CMD_VER_UNKNOWN;
+ return def;
entry = fw->ucode_capa.cmd_versions;
for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
- if (entry->group == grp && entry->cmd == cmd)
+ if (entry->group == grp && entry->cmd == cmd) {
+ if (entry->cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return def;
return entry->cmd_ver;
+ }
}
- return IWL_FW_CMD_VER_UNKNOWN;
+ return def;
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_cmd_ver);
@@ -97,3 +100,43 @@ u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
return def;
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver);
+
+#define FW_SYSASSERT_CPU_MASK 0xf0000000
+static const struct {
+ const char *name;
+ u8 num;
+} advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "BAD_COMMAND", 0x39 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
+ { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
+ { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+const char *iwl_fw_lookup_assert_desc(u32 num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
+ if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
+ return advanced_lookup[i].name;
+
+ /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+ return advanced_lookup[i].name;
+}
+EXPORT_SYMBOL_GPL(iwl_fw_lookup_assert_desc);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index a8630bf90b63..f836f3a8567b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +30,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -107,6 +106,7 @@ struct iwl_ucode_capabilities {
u32 flags;
u32 error_log_addr;
u32 error_log_size;
+ u32 num_stations;
unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
@@ -313,7 +313,8 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd);
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
+const char *iwl_fw_lookup_assert_desc(u32 num);
#endif /* __iwl_fw_img_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index b373606e1241..f8516c7ca767 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -134,7 +134,8 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC) >= 2 &&
+ SCAN_REQ_UMAC,
+ IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
fwrt->trans->trans_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
new file mode 100644
index 000000000000..6d8f7bff1243
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/******************************************************************************
+ *
+ * Copyright(c) 2020 Intel Corporation
+ *
+ *****************************************************************************/
+
+#include "iwl-drv.h"
+#include "pnvm.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "fw/api/commands.h"
+#include "fw/api/nvm-reg.h"
+#include "fw/api/alive.h"
+
+struct iwl_pnvm_section {
+ __le32 offset;
+ const u8 data[];
+} __packed;
+
+static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_trans *trans = (struct iwl_trans *)data;
+ struct iwl_pnvm_init_complete_ntfy *pnvm_ntf = (void *)pkt->data;
+
+ IWL_DEBUG_FW(trans,
+ "PNVM complete notification received with status %d\n",
+ le32_to_cpu(pnvm_ntf->status));
+
+ return true;
+}
+
+static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
+ size_t len)
+{
+ struct iwl_ucode_tlv *tlv;
+ u32 sha1 = 0;
+ u16 mac_type = 0, rf_id = 0;
+ u8 *pnvm_data = NULL, *tmp;
+ u32 size = 0;
+ int ret;
+
+ IWL_DEBUG_FW(trans, "Handling PNVM section\n");
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len) {
+ IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ data += sizeof(*tlv);
+
+ switch (tlv_type) {
+ case IWL_UCODE_TLV_PNVM_VERSION:
+ if (tlv_len < sizeof(__le32)) {
+ IWL_DEBUG_FW(trans,
+ "Invalid size for IWL_UCODE_TLV_PNVM_VERSION (expected %zd, got %d)\n",
+ sizeof(__le32), tlv_len);
+ break;
+ }
+
+ sha1 = le32_to_cpup((__le32 *)data);
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_PNVM_VERSION %0x\n",
+ sha1);
+ break;
+ case IWL_UCODE_TLV_HW_TYPE:
+ if (tlv_len < 2 * sizeof(__le16)) {
+ IWL_DEBUG_FW(trans,
+ "Invalid size for IWL_UCODE_TLV_HW_TYPE (expected %zd, got %d)\n",
+ 2 * sizeof(__le16), tlv_len);
+ break;
+ }
+
+ mac_type = le16_to_cpup((__le16 *)data);
+ rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
+ mac_type, rf_id);
+
+ if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) ||
+ rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ IWL_DEBUG_FW(trans,
+ "HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n",
+ CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ break;
+ case IWL_UCODE_TLV_SEC_RT: {
+ struct iwl_pnvm_section *section = (void *)data;
+ u32 data_len = tlv_len - sizeof(*section);
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_SEC_RT len %d\n",
+ tlv_len);
+
+ /* TODO: remove, this is a deprecated separator */
+ if (le32_to_cpup((__le32 *)data) == 0xddddeeee) {
+ IWL_DEBUG_FW(trans, "Ignoring separator.\n");
+ break;
+ }
+
+ IWL_DEBUG_FW(trans, "Adding data (size %d)\n",
+ data_len);
+
+ tmp = krealloc(pnvm_data, size + data_len, GFP_KERNEL);
+ if (!tmp) {
+ IWL_DEBUG_FW(trans,
+ "Couldn't allocate (more) pnvm_data\n");
+
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pnvm_data = tmp;
+
+ memcpy(pnvm_data + size, section->data, data_len);
+
+ size += data_len;
+
+ break;
+ }
+ case IWL_UCODE_TLV_PNVM_SKU:
+ IWL_DEBUG_FW(trans,
+ "New PNVM section started, stop parsing.\n");
+ goto done;
+ default:
+ IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n",
+ tlv_type, tlv_len);
+ break;
+ }
+
+ len -= ALIGN(tlv_len, 4);
+ data += ALIGN(tlv_len, 4);
+ }
+
+done:
+ if (!size) {
+ IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n");
+ ret = -ENOENT;
+ goto out;
+ }
+
+ IWL_INFO(trans, "loaded PNVM version 0x%0x\n", sha1);
+
+ ret = iwl_trans_set_pnvm(trans, pnvm_data, size);
+out:
+ kfree(pnvm_data);
+ return ret;
+}
+
+static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
+ size_t len)
+{
+ struct iwl_ucode_tlv *tlv;
+
+ IWL_DEBUG_FW(trans, "Parsing PNVM file\n");
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len) {
+ IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
+ struct iwl_sku_id *sku_id =
+ (void *)(data + sizeof(*tlv));
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
+ tlv_len);
+ IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
+ le32_to_cpu(sku_id->data[0]),
+ le32_to_cpu(sku_id->data[1]),
+ le32_to_cpu(sku_id->data[2]));
+
+ if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
+ trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
+ trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+ int ret;
+
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+ len -= ALIGN(tlv_len, 4);
+
+ ret = iwl_pnvm_handle_section(trans, data, len);
+ if (!ret)
+ return 0;
+ } else {
+ IWL_DEBUG_FW(trans, "SKU ID didn't match!\n");
+ }
+ } else {
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+ len -= ALIGN(tlv_len, 4);
+ }
+ }
+
+ return -ENOENT;
+}
+
+int iwl_pnvm_load(struct iwl_trans *trans,
+ struct iwl_notif_wait_data *notif_wait)
+{
+ const struct firmware *pnvm;
+ struct iwl_notification_wait pnvm_wait;
+ static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ PNVM_INIT_COMPLETE_NTFY) };
+ char pnvm_name[64];
+ int ret;
+
+ /* if the SKU_ID is empty, there's nothing to do */
+ if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
+ return 0;
+
+ /* if we already have it, nothing to do either */
+ if (trans->pnvm_loaded)
+ return 0;
+
+ /*
+ * The prefix unfortunately includes a hyphen at the end, so
+ * don't add the dot here...
+ */
+ snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
+ trans->cfg->fw_name_pre);
+
+ /* ...but replace the hyphen with the dot here. */
+ if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
+ pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
+
+ ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
+ if (ret) {
+ IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
+ pnvm_name, ret);
+ } else {
+ iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
+
+ release_firmware(pnvm);
+ }
+
+ iwl_init_notification_wait(notif_wait, &pnvm_wait,
+ ntf_cmds, ARRAY_SIZE(ntf_cmds),
+ iwl_pnvm_complete_fn, trans);
+
+ /* kick the doorbell */
+ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ UREG_DOORBELL_TO_ISR6_PNVM);
+
+ return iwl_wait_notification(notif_wait, &pnvm_wait,
+ MVM_UCODE_PNVM_TIMEOUT);
+}
+IWL_EXPORT_SYMBOL(iwl_pnvm_load);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
new file mode 100644
index 000000000000..e4f91bce222d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/******************************************************************************
+ *
+ * Copyright(c) 2020 Intel Corporation
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_PNVM_H__
+#define __IWL_PNVM_H__
+
+#include "fw/notif-wait.h"
+
+#define MVM_UCODE_PNVM_TIMEOUT (HZ / 10)
+
+int iwl_pnvm_load(struct iwl_trans *trans,
+ struct iwl_notif_wait_data *notif_wait);
+
+#endif /* __IWL_PNVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index b5e5e32b6152..cddcb4d9a264 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -207,7 +207,8 @@ struct iwl_fw_runtime {
u8 sar_chain_b_profile;
struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
u32 geo_rev;
- struct iwl_ppag_table_cmd ppag_table;
+ union iwl_ppag_table_cmd ppag_table;
+ u32 ppag_ver;
#endif
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index e27c13263a23..ca4967b81d01 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -472,6 +472,7 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_QU 0x33
#define IWL_CFG_MAC_TYPE_QUZ 0x35
#define IWL_CFG_MAC_TYPE_QNJ 0x36
+#define IWL_CFG_MAC_TYPE_MA 0x44
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -479,6 +480,8 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_JF1 0x108
#define IWL_CFG_RF_TYPE_HR2 0x10A
#define IWL_CFG_RF_TYPE_HR1 0x10C
+#define IWL_CFG_RF_TYPE_GF 0x10D
+#define IWL_CFG_RF_TYPE_MR 0x110
#define IWL_CFG_RF_ID_TH 0x1
#define IWL_CFG_RF_ID_TH1 0x1
@@ -516,12 +519,14 @@ struct iwl_dev_info {
*/
extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
extern const struct iwl_cfg_trans_params iwl9560_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
extern const char iwl9260_1_name[];
@@ -543,7 +548,11 @@ extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
extern const char iwl_ax200_killer_1650x_name[];
-
+extern const char iwl_ax201_killer_1650s_name[];
+extern const char iwl_ax201_killer_1650i_name[];
+extern const char iwl_ma_name[];
+extern const char iwl_ax211_name[];
+extern const char iwl_ax411_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -641,6 +650,9 @@ extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0;
+extern const struct iwl_cfg iwlax201_cfg_snj_hr_b0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index 9d7a04833cd0..5624fe42efd9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -138,16 +138,16 @@ struct iwl_prph_scratch_control {
} __packed; /* PERIPH_SCRATCH_CONTROL_S */
/*
- * struct iwl_prph_scratch_ror_cfg - ror config
- * @ror_base_addr: ror start address
- * @ror_size: ror size in DWs
+ * struct iwl_prph_scratch_pnvm_cfg - ror config
+ * @pnvm_base_addr: PNVM start address
+ * @pnvm_size: PNVM size in DWs
* @reserved: reserved
*/
-struct iwl_prph_scratch_ror_cfg {
- __le64 ror_base_addr;
- __le32 ror_size;
+struct iwl_prph_scratch_pnvm_cfg {
+ __le64 pnvm_base_addr;
+ __le32 pnvm_size;
__le32 reserved;
-} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */
+} __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */
/*
* struct iwl_prph_scratch_hwm_cfg - hwm config
@@ -175,14 +175,14 @@ struct iwl_prph_scratch_rbd_cfg {
* struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
* @version: version information of context info and HW
* @control: control flags of FH configurations
- * @ror_cfg: ror configuration
+ * @pnvm_cfg: ror configuration
* @hwm_cfg: hwm configuration
* @rbd_cfg: default RX queue configuration
*/
struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_version version;
struct iwl_prph_scratch_control control;
- struct iwl_prph_scratch_ror_cfg ror_cfg;
+ struct iwl_prph_scratch_pnvm_cfg pnvm_cfg;
struct iwl_prph_scratch_hwm_cfg hwm_cfg;
struct iwl_prph_scratch_rbd_cfg rbd_cfg;
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
@@ -291,4 +291,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw);
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
+int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
+ const void *data, u32 len);
+
#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index eeaa8cbdddce..76b7bbdf8393 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -225,5 +225,8 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
const struct fw_img *fw,
struct iwl_context_info_dram *ctxt_dram);
+int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const void *data, u32 len,
+ struct iwl_dram_data *dram);
#endif /* __iwl_context_info_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 9ce7207d9ec5..51ce93d21ffe 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -182,9 +182,13 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
goto err;
- if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH ||
- buf_location == IWL_FW_INI_LOCATION_NPK_PATH) &&
- alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH &&
+ alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ goto err;
+
+ if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
+ alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1 &&
+ alloc_id != IWL_FW_INI_ALLOCATION_ID_INTERNAL)
goto err;
trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
@@ -233,6 +237,13 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
+ /* For safe using a string from FW make sure we have a
+ * null terminator
+ */
+ reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
+
+ IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
+
if (id >= IWL_FW_INI_MAX_REGION_ID) {
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
return -EINVAL;
@@ -947,9 +958,8 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
struct iwl_rx_packet *pkt = tp_data->fw_pkt;
struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
- if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) ||
- (pkt->hdr.cmd == wanted_hdr->cmd &&
- pkt->hdr.group_id == wanted_hdr->group_id))) {
+ if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
+ pkt->hdr.group_id == wanted_hdr->group_id)) {
struct iwl_rx_packet *fw_pkt =
kmemdup(pkt,
sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
@@ -1012,6 +1022,9 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
int ret, i;
+ if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
+ return;
+
IWL_DEBUG_FW(fwrt,
"WRT: Generating active triggers list, domain 0x%x\n",
fwrt->trans->dbg.domains_bitmap);
@@ -1076,6 +1089,7 @@ void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
break;
case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
+ case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data,
iwl_dbg_tlv_check_fw_pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
index e1a41fd503a8..7df173cc9ddc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
@@ -121,10 +121,9 @@ void __iwl_dbg(struct device *dev,
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_have_debug_level(level) &&
(!limit || net_ratelimit()))
- dev_printk(KERN_DEBUG, dev, "%c %s %pV",
- in_interrupt() ? 'I' : 'U', function, &vaf);
+ dev_printk(KERN_DEBUG, dev, "%s %pV", function, &vaf);
#endif
- trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
+ trace_iwlwifi_dbg(level, function, &vaf);
va_end(args);
}
IWL_EXPORT_SYMBOL(__iwl_dbg);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index 063d8add147f..528eba441926 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -139,7 +139,7 @@ do { \
/* 0x00000F00 - 0x00000100 */
#define IWL_DL_POWER 0x00000100
#define IWL_DL_TEMP 0x00000200
-#define IWL_DL_RPM 0x00000400
+#define IWL_DL_WOWLAN 0x00000400
#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
#define IWL_DL_ASSOC 0x00001000
@@ -205,7 +205,7 @@ do { \
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
#define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a)
-#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
+#define IWL_DEBUG_WOWLAN(p, f, a...) IWL_DEBUG(p, IWL_DL_WOWLAN, f, ## a)
#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
#define IWL_DEBUG_FW_INFO(p, f, a...) \
IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
index 9ad93ef60890..d0467da5af03 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -54,18 +54,16 @@ DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_crit,
);
TRACE_EVENT(iwlwifi_dbg,
- TP_PROTO(u32 level, bool in_interrupt, const char *function,
+ TP_PROTO(u32 level, const char *function,
struct va_format *vaf),
- TP_ARGS(level, in_interrupt, function, vaf),
+ TP_ARGS(level, function, vaf),
TP_STRUCT__entry(
__field(u32, level)
- __field(u8, in_interrupt)
__string(function, function)
__dynamic_array(char, msg, MAX_MSG_LEN)
),
TP_fast_assign(
__entry->level = level;
- __entry->in_interrupt = in_interrupt;
__assign_str(function, function);
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
MAX_MSG_LEN, vaf->fmt,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 04f14bfdd091..9dcd2e990c9c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -76,6 +76,7 @@
#include "iwl-config.h"
#include "iwl-modparams.h"
#include "fw/api/alive.h"
+#include "fw/api/mac.h"
/******************************************************************************
*
@@ -102,6 +103,9 @@ static struct dentry *iwl_dbgfs_root;
* @fw_index: firmware revision to try loading
* @firmware_name: composite filename of ucode file to load
* @request_firmware_complete: the firmware has been obtained from user space
+ * @dbgfs_drv: debugfs root directory entry
+ * @dbgfs_trans: debugfs transport directory entry
+ * @dbgfs_op_mode: debugfs op_mode directory entry
*/
struct iwl_drv {
struct list_head list;
@@ -1124,6 +1128,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
fseq_ver->version);
}
break;
+ case IWL_UCODE_TLV_FW_NUM_STATIONS:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ if (le32_to_cpup((__le32 *)tlv_data) >
+ IWL_MVM_STATION_COUNT_MAX) {
+ IWL_ERR(drv,
+ "%d is an invalid number of station\n",
+ le32_to_cpup((__le32 *)tlv_data));
+ goto tlv_error;
+ }
+ capa->num_stations =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
struct iwl_umac_debug_addrs *dbg_ptrs =
(void *)tlv_data;
@@ -1319,7 +1336,7 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv)
}
}
-/**
+/*
* iwl_req_fw_callback - callback when firmware was loaded
*
* If loaded successfully, copies the firmware into buffers
@@ -1345,6 +1362,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
+ fw->ucode_capa.num_stations = IWL_MVM_STATION_COUNT_MAX;
/* dump all fw memory areas by default */
fw->dbg.dump_mask = 0xffffffff;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index ee410417761d..6d19de3058d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -254,6 +254,65 @@ enum iwl_reg_capa_flags {
REG_CAPA_11AX_DISABLED = BIT(10),
};
+/**
+ * enum iwl_reg_capa_flags_v2 - global flags applied for the whole regulatory
+ * domain (version 2).
+ * @REG_CAPA_V2_STRADDLE_DISABLED: Straddle channels (144, 142, 138) are
+ * disabled.
+ * @REG_CAPA_V2_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 2.4Ghz band is allowed.
+ * @REG_CAPA_V2_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 5Ghz band is allowed.
+ * @REG_CAPA_V2_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_V2_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_V2_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
+ * @REG_CAPA_V2_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
+ * @REG_CAPA_V2_WEATHER_DISABLED: Weather radar channels (120, 124, 128, 118,
+ * 126, 122) are disabled.
+ * @REG_CAPA_V2_40MHZ_ALLOWED: 11n channel with a width of 40Mhz is allowed
+ * for this regulatory domain (uvalid only in 5Ghz).
+ * @REG_CAPA_V2_11AX_DISABLED: 11ax is forbidden for this regulatory domain.
+ */
+enum iwl_reg_capa_flags_v2 {
+ REG_CAPA_V2_STRADDLE_DISABLED = BIT(0),
+ REG_CAPA_V2_BF_CCD_LOW_BAND = BIT(1),
+ REG_CAPA_V2_BF_CCD_HIGH_BAND = BIT(2),
+ REG_CAPA_V2_160MHZ_ALLOWED = BIT(3),
+ REG_CAPA_V2_80MHZ_ALLOWED = BIT(4),
+ REG_CAPA_V2_MCS_8_ALLOWED = BIT(5),
+ REG_CAPA_V2_MCS_9_ALLOWED = BIT(6),
+ REG_CAPA_V2_WEATHER_DISABLED = BIT(7),
+ REG_CAPA_V2_40MHZ_ALLOWED = BIT(8),
+ REG_CAPA_V2_11AX_DISABLED = BIT(13),
+};
+
+/*
+* API v2 for reg_capa_flags is relevant from version 6 and onwards of the
+* MCC update command response.
+*/
+#define REG_CAPA_V2_RESP_VER 6
+
+/**
+ * struct iwl_reg_capa - struct for global regulatory capabilities, Used for
+ * handling the different APIs of reg_capa_flags.
+ *
+ * @allow_40mhz: 11n channel with a width of 40Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @disable_11ax: 11ax is forbidden for this regulatory domain.
+ */
+struct iwl_reg_capa {
+ u16 allow_40mhz;
+ u16 allow_80mhz;
+ u16 allow_160mhz;
+ u16 disable_11ax;
+};
+
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
int chan, u32 flags)
{
@@ -1064,7 +1123,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
- u16 cap_flags,
+ struct iwl_reg_capa reg_capa,
const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1104,29 +1163,46 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
flags |= NL80211_RRF_GO_CONCURRENT;
/*
- * cap_flags is per regulatory domain so apply it for every channel
+ * reg_capa is per regulatory domain so apply it for every channel
*/
if (ch_idx >= NUM_2GHZ_CHANNELS) {
- if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
+ if (!reg_capa.allow_40mhz)
flags |= NL80211_RRF_NO_HT40;
- if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
+ if (!reg_capa.allow_80mhz)
flags |= NL80211_RRF_NO_80MHZ;
- if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
+ if (!reg_capa.allow_160mhz)
flags |= NL80211_RRF_NO_160MHZ;
}
-
- if (cap_flags & REG_CAPA_11AX_DISABLED)
+ if (reg_capa.disable_11ax)
flags |= NL80211_RRF_NO_HE;
return flags;
}
+static struct iwl_reg_capa iwl_get_reg_capa(u16 flags, u8 resp_ver)
+{
+ struct iwl_reg_capa reg_capa;
+
+ if (resp_ver >= REG_CAPA_V2_RESP_VER) {
+ reg_capa.allow_40mhz = flags & REG_CAPA_V2_40MHZ_ALLOWED;
+ reg_capa.allow_80mhz = flags & REG_CAPA_V2_80MHZ_ALLOWED;
+ reg_capa.allow_160mhz = flags & REG_CAPA_V2_160MHZ_ALLOWED;
+ reg_capa.disable_11ax = flags & REG_CAPA_V2_11AX_DISABLED;
+ } else {
+ reg_capa.allow_40mhz = !(flags & REG_CAPA_40MHZ_FORBIDDEN);
+ reg_capa.allow_80mhz = flags & REG_CAPA_80MHZ_ALLOWED;
+ reg_capa.allow_160mhz = flags & REG_CAPA_160MHZ_ALLOWED;
+ reg_capa.disable_11ax = flags & REG_CAPA_11AX_DISABLED;
+ }
+ return reg_capa;
+}
+
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u16 cap)
+ u16 geo_info, u16 cap, u8 resp_ver)
{
int ch_idx;
u16 ch_flags;
@@ -1139,6 +1215,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int valid_rules = 0;
bool new_rule;
int max_num_ch;
+ struct iwl_reg_capa reg_capa;
if (cfg->uhb_supported) {
max_num_ch = IWL_NVM_NUM_CHANNELS_UHB;
@@ -1169,6 +1246,9 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
regd->alpha2[0] = fw_mcc >> 8;
regd->alpha2[1] = fw_mcc & 0xff;
+ /* parse regulatory capability flags */
+ reg_capa = iwl_get_reg_capa(cap, resp_ver);
+
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
band = iwl_nl80211_band_from_channel_idx(ch_idx);
@@ -1183,7 +1263,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
}
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
- ch_flags, cap,
+ ch_flags, reg_capa,
cfg);
/* we can't continue the same rule */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index fb0b385d10fd..50bd7fdcf852 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -104,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u16 cap);
+ u16 geo_info, u16 cap, u8 resp_ver);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 8e254c0eda13..fa3f15778fc7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -460,6 +460,7 @@ enum {
#define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0)
#define UREG_DOORBELL_TO_ISR6_SUSPEND BIT(18)
#define UREG_DOORBELL_TO_ISR6_RESUME BIT(19)
+#define UREG_DOORBELL_TO_ISR6_PNVM BIT(20)
#define FSEQ_ERROR_CODE 0xA340C8
#define FSEQ_TOP_INIT_VERSION 0xA34038
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index f91197e4ae40..becee92a5fd6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -60,17 +62,20 @@
#include <linux/kernel.h>
#include <linux/bsearch.h>
+#include "fw/api/tx.h"
#include "iwl-trans.h"
#include "iwl-drv.h"
#include "iwl-fh.h"
+#include "queue/tx.h"
+#include <linux/dmapool.h>
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_trans_ops *ops,
- unsigned int cmd_pool_size,
- unsigned int cmd_pool_align)
+ const struct iwl_cfg_trans_params *cfg_trans)
{
struct iwl_trans *trans;
+ int txcmd_size, txcmd_align;
#ifdef CONFIG_LOCKDEP
static struct lock_class_key __key;
#endif
@@ -79,6 +84,25 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
if (!trans)
return NULL;
+ trans->trans_cfg = cfg_trans;
+ if (!cfg_trans->gen2) {
+ txcmd_size = sizeof(struct iwl_tx_cmd);
+ txcmd_align = sizeof(void *);
+ } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+ txcmd_align = 64;
+ } else {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+ txcmd_align = 128;
+ }
+
+ txcmd_size += sizeof(struct iwl_cmd_header);
+ txcmd_size += 36; /* biggest possible 802.11 header */
+
+ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+ if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+ return ERR_PTR(-EINVAL);
+
#ifdef CONFIG_LOCKDEP
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
&__key, 0);
@@ -88,22 +112,68 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
trans->ops = ops;
trans->num_rx_queues = 1;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+ else
+ trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+ /*
+ * For gen2 devices, we use a single allocation for each byte-count
+ * table, but they're pretty small (1k) so use a DMA pool that we
+ * allocate here.
+ */
+ if (trans->trans_cfg->gen2) {
+ trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", dev,
+ trans->txqs.bc_tbl_size,
+ 256, 0);
+ if (!trans->txqs.bc_pool)
+ return NULL;
+ }
+
+ if (trans->trans_cfg->use_tfh) {
+ trans->txqs.tfd.addr_size = 64;
+ trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
+ trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
+ } else {
+ trans->txqs.tfd.addr_size = 36;
+ trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
+ trans->txqs.tfd.size = sizeof(struct iwl_tfd);
+ }
+ trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
+
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
- cmd_pool_size, cmd_pool_align,
+ txcmd_size, txcmd_align,
SLAB_HWCACHE_ALIGN, NULL);
if (!trans->dev_cmd_pool)
return NULL;
WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
+ trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
+ if (!trans->txqs.tso_hdr_page) {
+ kmem_cache_destroy(trans->dev_cmd_pool);
+ return NULL;
+ }
+
return trans;
}
void iwl_trans_free(struct iwl_trans *trans)
{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct iwl_tso_hdr_page *p =
+ per_cpu_ptr(trans->txqs.tso_hdr_page, i);
+
+ if (p->page)
+ __free_page(p->page);
+ }
+
+ free_percpu(trans->txqs.tso_hdr_page);
+
kmem_cache_destroy(trans->dev_cmd_pool);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 34788e7afc7b..11a040e75bf3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -73,6 +73,7 @@
#include "iwl-config.h"
#include "fw/img.h"
#include "iwl-op-mode.h"
+#include <linux/firmware.h>
#include "fw/api/cmdhdr.h"
#include "fw/api/txq.h"
#include "fw/api/dbg-tlv.h"
@@ -215,6 +216,12 @@ struct iwl_device_tx_cmd {
*/
#define IWL_MAX_CMD_TBS_PER_TFD 2
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
+
/**
* enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
@@ -316,6 +323,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define IWL_MGMT_TID 15
#define IWL_FRAME_LIMIT 64
#define IWL_MAX_RX_HW_QUEUES 16
+#define IWL_9000_MAX_RX_HW_QUEUES 6
/**
* enum iwl_wowlan_status - WoWLAN image/device status
@@ -561,6 +569,8 @@ struct iwl_trans_rxq_dma_data {
* Note that the transport must fill in the proper file headers.
* @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
* of the trans debugfs
+ * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
+ * context info.
*/
struct iwl_trans_ops {
@@ -633,6 +643,7 @@ struct iwl_trans_ops {
u32 dump_mask);
void (*debugfs_cleanup)(struct iwl_trans *trans);
void (*sync_nmi)(struct iwl_trans *trans);
+ int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
};
/**
@@ -906,19 +917,37 @@ struct iwl_txq {
/**
* struct iwl_trans_txqs - transport tx queues data
*
+ * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
+ * @page_offs: offset from skb->cb to mac header page pointer
+ * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
* @queue_used - bit mask of used queues
* @queue_stopped - bit mask of stopped queues
+ * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
+ struct dma_pool *bc_pool;
+ size_t bc_tbl_size;
+ bool bc_table_dword;
+ u8 page_offs;
+ u8 dev_cmd_offs;
+ struct __percpu iwl_tso_hdr_page * tso_hdr_page;
+
struct {
u8 fifo;
u8 q_id;
unsigned int wdg_timeout;
} cmd;
+ struct {
+ u8 max_tbs;
+ u16 size;
+ u8 addr_size;
+ } tfd;
+
+ struct iwl_dma_ptr scd_bc_tbls;
};
/**
@@ -971,11 +1000,13 @@ struct iwl_trans {
u32 hw_rf_id;
u32 hw_id;
char hw_id_str[52];
+ u32 sku_id[3];
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
bool pm_support;
bool ltr_enabled;
+ u8 pnvm_loaded:1;
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
@@ -1425,6 +1456,21 @@ static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
trans->ops->sync_nmi(trans);
}
+static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
+ const void *data, u32 len)
+{
+ if (trans->ops->set_pnvm) {
+ int ret = trans->ops->set_pnvm(trans, data, len);
+
+ if (ret)
+ return ret;
+ }
+
+ trans->pnvm_loaded = true;
+
+ return 0;
+}
+
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
{
return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
@@ -1435,10 +1481,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
* transport helper functions
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
- struct device *dev,
- const struct iwl_trans_ops *ops,
- unsigned int cmd_pool_size,
- unsigned int cmd_pool_align);
+ struct device *dev,
+ const struct iwl_trans_ops *ops,
+ const struct iwl_cfg_trans_params *cfg_trans);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
index 4094a4158032..5e731c57e4f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
@@ -26,7 +26,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
@@ -86,11 +86,8 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
size = sizeof(cmd);
- if (phyctxt->channel->band == NL80211_BAND_2GHZ ||
- !iwl_mvm_is_cdb_supported(mvm))
- cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
- else
- cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+ cmd.lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm->fw,
+ phyctxt->channel->band));
} else {
size = IWL_BINDING_CMD_SIZE_V1;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index b0268f44b2ea..2487871eac73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -152,12 +152,18 @@
#define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
#define IWL_MVM_FTM_INITIATOR_DYNACK true
#define IWL_MVM_D3_DEBUG false
-#define IWL_MVM_USE_TWT false
+#define IWL_MVM_USE_TWT true
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
#define IWL_MVM_USE_NSSN_SYNC 0
#define IWL_MVM_PHY_FILTER_CHAIN_A 0
#define IWL_MVM_PHY_FILTER_CHAIN_B 0
#define IWL_MVM_PHY_FILTER_CHAIN_C 0
#define IWL_MVM_PHY_FILTER_CHAIN_D 0
+#define IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH false
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA 40
+/* 20016 pSec is 6 meter RTT, meaning 3 meter range */
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT 20016
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT 20016
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC 2
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 2a94545d737f..d21143495e70 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -70,6 +70,7 @@
#include "iwl-modparams.h"
#include "fw-api.h"
#include "mvm.h"
+#include "fw/img.h"
void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -80,8 +81,11 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
- memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
+ mvmvif->rekey_data.kek_len = data->kek_len;
+ mvmvif->rekey_data.kck_len = data->kck_len;
+ memcpy(mvmvif->rekey_data.kek, data->kek, data->kek_len);
+ memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len);
+ mvmvif->rekey_data.akm = data->akm & 0xFF;
mvmvif->rekey_data.replay_ctr =
cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
mvmvif->rekey_data.valid = true;
@@ -156,6 +160,7 @@ static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
struct wowlan_key_data {
struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
struct iwl_wowlan_tkip_params_cmd *tkip;
+ struct iwl_wowlan_kek_kck_material_cmd_v3 *kek_kck_cmd;
bool error, use_rsc_tsc, use_tkip, configure_keys;
int wep_key_idx;
};
@@ -232,7 +237,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
default:
data->error = true;
return;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ return;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
/*
* Ignore CMAC keys -- the WoWLAN firmware doesn't support them
* but we also shouldn't abort suspend due to that. It does have
@@ -245,8 +255,10 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
if (sta) {
u64 pn64;
- tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
- tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+ tkip_sc =
+ data->rsc_tsc->params.all_tsc_rsc.tkip.unicast_rsc;
+ tkip_tx_sc =
+ &data->rsc_tsc->params.all_tsc_rsc.tkip.tsc;
rx_p1ks = data->tkip->rx_uni;
@@ -265,9 +277,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
rx_mic_key = data->tkip->mic_keys.rx_unicast;
} else {
tkip_sc =
- data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+ data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc;
rx_p1ks = data->tkip->rx_multi;
rx_mic_key = data->tkip->mic_keys.rx_mcast;
+ data->kek_kck_cmd->gtk_cipher =
+ cpu_to_le32(STA_KEY_FLG_TKIP);
}
/*
@@ -299,16 +313,25 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
data->use_rsc_tsc = true;
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
if (sta) {
u64 pn64;
- aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
- aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+ aes_sc =
+ data->rsc_tsc->params.all_tsc_rsc.aes.unicast_rsc;
+ aes_tx_sc =
+ &data->rsc_tsc->params.all_tsc_rsc.aes.tsc;
pn64 = atomic64_read(&key->tx_pn);
aes_tx_sc->pn = cpu_to_le64(pn64);
} else {
- aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+ aes_sc =
+ data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc;
+ data->kek_kck_cmd->gtk_cipher =
+ key->cipher == WLAN_CIPHER_SUITE_CCMP ?
+ cpu_to_le32(STA_KEY_FLG_CCM) :
+ cpu_to_le32(STA_KEY_FLG_GCMP);
}
/*
@@ -321,11 +344,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
const u8 *pn;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- ptk_pn = rcu_dereference_protected(
- mvmsta->ptk_pn[key->keyidx],
- lockdep_is_held(&mvm->mutex));
- if (WARN_ON(!ptk_pn))
+ rcu_read_lock();
+ ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
+ if (WARN_ON(!ptk_pn)) {
+ rcu_read_unlock();
break;
+ }
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
@@ -337,6 +361,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
((u64)pn[1] << 32) |
((u64)pn[0] << 40));
}
+
+ rcu_read_unlock();
} else {
for (i = 0; i < IWL_NUM_RSC; i++) {
u8 *pn = seq.ccmp.pn;
@@ -354,6 +380,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
break;
}
+ IWL_DEBUG_WOWLAN(mvm, "GTK cipher %d\n", data->kek_kck_cmd->gtk_cipher);
+
if (data->configure_keys) {
mutex_lock(&mvm->mutex);
/*
@@ -734,7 +762,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 cmd_flags)
{
- struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
+ struct iwl_wowlan_kek_kck_material_cmd_v3 kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -743,9 +771,12 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
+ .kek_kck_cmd = &kek_kck_cmd,
};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
+ u8 cmd_ver;
+ size_t cmd_size;
key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
if (!key_data.rsc_tsc)
@@ -772,10 +803,29 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
}
if (key_data.use_rsc_tsc) {
- ret = iwl_mvm_send_cmd_pdu(mvm,
- WOWLAN_TSC_RSC_PARAM, cmd_flags,
- sizeof(*key_data.rsc_tsc),
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ WOWLAN_TSC_RSC_PARAM,
+ IWL_FW_CMD_VER_UNKNOWN);
+ int size;
+
+ if (ver == 4) {
+ size = sizeof(*key_data.rsc_tsc);
+ key_data.rsc_tsc->sta_id =
+ cpu_to_le32(mvmvif->ap_sta_id);
+
+ } else if (ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) {
+ size = sizeof(key_data.rsc_tsc->params);
+ } else {
+ ret = 0;
+ WARN_ON_ONCE(1);
+ goto out;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM,
+ cmd_flags,
+ size,
key_data.rsc_tsc);
+
if (ret)
goto out;
}
@@ -783,9 +833,27 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
if (key_data.use_tkip &&
!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ WOWLAN_TKIP_PARAM,
+ IWL_FW_CMD_VER_UNKNOWN);
+ int size;
+
+ if (ver == 2) {
+ size = sizeof(tkip_cmd);
+ key_data.tkip->sta_id =
+ cpu_to_le32(mvmvif->ap_sta_id);
+ } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) {
+ size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1);
+ } else {
+ ret = -EINVAL;
+ WARN_ON_ONCE(1);
+ goto out;
+ }
+
+ /* send relevant data according to CMD version */
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_TKIP_PARAM,
- cmd_flags, sizeof(tkip_cmd),
+ cmd_flags, size,
&tkip_cmd);
if (ret)
goto out;
@@ -793,18 +861,33 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
/* configure rekey data only if offloaded rekey is supported (d3) */
if (mvmvif->rekey_data.valid) {
- memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ IWL_ALWAYS_LONG_GROUP,
+ WOWLAN_KEK_KCK_MATERIAL,
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 &&
+ cmd_ver != IWL_FW_CMD_VER_UNKNOWN))
+ return -EINVAL;
+ if (cmd_ver == 3)
+ cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
+ else
+ cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
+
memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
- NL80211_KCK_LEN);
- kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+ mvmvif->rekey_data.kck_len);
+ kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len);
memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
- NL80211_KEK_LEN);
- kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+ mvmvif->rekey_data.kek_len);
+ kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
+ kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
+
+ IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
+ mvmvif->rekey_data.akm);
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
- sizeof(kek_kck_cmd),
+ cmd_size,
&kek_kck_cmd);
if (ret)
goto out;
@@ -1283,10 +1366,12 @@ static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx],
- lockdep_is_held(&mvm->mutex));
- if (WARN_ON(!ptk_pn))
+ rcu_read_lock();
+ ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
+ if (WARN_ON(!ptk_pn)) {
+ rcu_read_unlock();
return;
+ }
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct ieee80211_key_seq seq = {};
@@ -1298,6 +1383,7 @@ static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
memcpy(ptk_pn->q[i].pn[tid],
seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
}
+ rcu_read_unlock();
} else {
for (tid = 0; tid < IWL_NUM_RSC; tid++) {
struct ieee80211_key_seq seq = {};
@@ -1331,6 +1417,8 @@ static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
break;
case WLAN_CIPHER_SUITE_TKIP:
@@ -1367,6 +1455,8 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
/* ignore WEP completely, nothing to do */
return;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_TKIP:
/* we support these */
break;
@@ -1392,6 +1482,8 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
sta, key);
atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
@@ -1460,6 +1552,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
ieee80211_iter_keys(mvm->hw, vif,
iwl_mvm_d3_update_keys, &gtkdata);
+ IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n",
+ le32_to_cpu(status->num_of_gtk_rekeys));
if (status->num_of_gtk_rekeys) {
struct ieee80211_key_conf *key;
struct {
@@ -1472,13 +1566,26 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
};
__be64 replay_ctr;
+ IWL_DEBUG_WOWLAN(mvm,
+ "Received from FW GTK cipher %d, key index %d\n",
+ conf.conf.cipher, conf.conf.keyidx);
switch (gtkdata.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
conf.conf.keylen = WLAN_KEY_LEN_CCMP;
memcpy(conf.conf.key, status->gtk[0].key,
WLAN_KEY_LEN_CCMP);
break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
+ conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
+ memcpy(conf.conf.key, status->gtk[0].key,
+ WLAN_KEY_LEN_GCMP_256);
+ break;
case WLAN_CIPHER_SUITE_TKIP:
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
conf.conf.keylen = WLAN_KEY_LEN_TKIP;
memcpy(conf.conf.key, status->gtk[0].key, 16);
/* leave TX MIC key zeroed, we don't use it anyway */
@@ -1508,15 +1615,60 @@ out:
return true;
}
+/* Occasionally, templates would be nice. This is one of those times ... */
+#define iwl_mvm_parse_wowlan_status_common(_ver) \
+static struct iwl_wowlan_status * \
+iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
+ void *_data, int len) \
+{ \
+ struct iwl_wowlan_status *status; \
+ struct iwl_wowlan_status_ ##_ver *data = _data; \
+ int data_size; \
+ \
+ if (len < sizeof(*data)) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return ERR_PTR(-EIO); \
+ } \
+ \
+ data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \
+ if (len != sizeof(*data) + data_size) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return ERR_PTR(-EIO); \
+ } \
+ \
+ status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \
+ if (!status) \
+ return ERR_PTR(-ENOMEM); \
+ \
+ /* copy all the common fields */ \
+ status->replay_ctr = data->replay_ctr; \
+ status->pattern_number = data->pattern_number; \
+ status->non_qos_seq_ctr = data->non_qos_seq_ctr; \
+ memcpy(status->qos_seq_ctr, data->qos_seq_ctr, \
+ sizeof(status->qos_seq_ctr)); \
+ status->wakeup_reasons = data->wakeup_reasons; \
+ status->num_of_gtk_rekeys = data->num_of_gtk_rekeys; \
+ status->received_beacons = data->received_beacons; \
+ status->wake_packet_length = data->wake_packet_length; \
+ status->wake_packet_bufsize = data->wake_packet_bufsize; \
+ memcpy(status->wake_packet, data->wake_packet, \
+ le32_to_cpu(status->wake_packet_bufsize)); \
+ \
+ return status; \
+}
+
+iwl_mvm_parse_wowlan_status_common(v6)
+iwl_mvm_parse_wowlan_status_common(v7)
+iwl_mvm_parse_wowlan_status_common(v9)
+
struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
{
- struct iwl_wowlan_status_v7 *v7;
struct iwl_wowlan_status *status;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_WANT_SKB,
};
- int ret, len, status_size, data_size;
+ int ret, len;
u8 notif_ver;
lockdep_assert_held(&mvm->mutex);
@@ -1528,28 +1680,19 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
}
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+
+ /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */
+ notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ WOWLAN_GET_STATUSES, 7);
+
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
- status_size = sizeof(*v6);
-
- if (len < status_size) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- status = ERR_PTR(-EIO);
- goto out_free_resp;
- }
-
- data_size = ALIGN(le32_to_cpu(v6->wake_packet_bufsize), 4);
-
- if (len != (status_size + data_size)) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- status = ERR_PTR(-EIO);
- goto out_free_resp;
- }
-
- status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
- if (!status)
+ status = iwl_mvm_parse_wowlan_status_common_v6(mvm,
+ cmd.resp_pkt->data,
+ len);
+ if (IS_ERR(status))
goto out_free_resp;
BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) >
@@ -1574,47 +1717,37 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
* currently used key.
*/
status->gtk[0].key_flags = v6->gtk.key_index | BIT(7);
+ } else if (notif_ver == 7) {
+ struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data;
- status->replay_ctr = v6->replay_ctr;
-
- /* everything starting from pattern_number is identical */
- memcpy(&status->pattern_number, &v6->pattern_number,
- offsetof(struct iwl_wowlan_status, wake_packet) -
- offsetof(struct iwl_wowlan_status, pattern_number) +
- data_size);
-
- goto out_free_resp;
- }
-
- v7 = (void *)cmd.resp_pkt->data;
- notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
- WOWLAN_GET_STATUSES, 0);
+ status = iwl_mvm_parse_wowlan_status_common_v7(mvm,
+ cmd.resp_pkt->data,
+ len);
+ if (IS_ERR(status))
+ goto out_free_resp;
- status_size = sizeof(*status);
+ status->gtk[0] = v7->gtk[0];
+ status->igtk[0] = v7->igtk[0];
+ } else if (notif_ver == 9) {
+ struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data;
- if (notif_ver == IWL_FW_CMD_VER_UNKNOWN || notif_ver < 9)
- status_size = sizeof(*v7);
+ status = iwl_mvm_parse_wowlan_status_common_v9(mvm,
+ cmd.resp_pkt->data,
+ len);
+ if (IS_ERR(status))
+ goto out_free_resp;
- if (len < status_size) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- status = ERR_PTR(-EIO);
- goto out_free_resp;
- }
- data_size = ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4);
+ status->gtk[0] = v9->gtk[0];
+ status->igtk[0] = v9->igtk[0];
- if (len != (status_size + data_size)) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ status->tid_tear_down = v9->tid_tear_down;
+ } else {
+ IWL_ERR(mvm,
+ "Firmware advertises unknown WoWLAN status response %d!\n",
+ notif_ver);
status = ERR_PTR(-EIO);
- goto out_free_resp;
}
- status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
- if (!status)
- goto out_free_resp;
-
- memcpy(status, v7, status_size);
- memcpy(status->wake_packet, (u8 *)v7 + status_size, data_size);
-
out_free_resp:
iwl_free_resp(&cmd);
return status;
@@ -1647,6 +1780,9 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (IS_ERR_OR_NULL(fw_status))
goto out_unlock;
+ IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n",
+ le32_to_cpu(fw_status->wakeup_reasons));
+
status.pattern_number = le16_to_cpu(fw_status->pattern_number);
for (i = 0; i < 8; i++)
status.qos_seq_ctr[i] =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 8fae7e707374..3395c4675988 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -174,7 +174,7 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
return -EINVAL;
- if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
+ if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations)
return -EINVAL;
if (drain < 0 || drain > 1)
return -EINVAL;
@@ -403,7 +403,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 5ca45915cf7c..a0ce761d0c59 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -76,6 +76,103 @@ struct iwl_mvm_loc_entry {
u8 buf[];
};
+struct iwl_mvm_smooth_entry {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ s64 rtt_avg;
+ u64 host_time;
+};
+
+struct iwl_mvm_ftm_pasn_entry {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ u8 cipher;
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+};
+
+int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
+ u8 *hltk, u32 hltk_len)
+{
+ struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn),
+ GFP_KERNEL);
+ u32 expected_tk_len;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!pasn)
+ return -ENOBUFS;
+
+ pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
+
+ switch (pasn->cipher) {
+ case IWL_LOCATION_CIPHER_CCMP_128:
+ case IWL_LOCATION_CIPHER_GCMP_128:
+ expected_tk_len = WLAN_KEY_LEN_CCMP;
+ break;
+ case IWL_LOCATION_CIPHER_GCMP_256:
+ expected_tk_len = WLAN_KEY_LEN_GCMP_256;
+ break;
+ default:
+ goto out;
+ }
+
+ /*
+ * If associated to this AP and already have security context,
+ * the TK is already configured for this station, so it
+ * shouldn't be set again here.
+ */
+ if (vif->bss_conf.assoc &&
+ !memcmp(addr, vif->bss_conf.bssid, ETH_ALEN)) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
+ if (!IS_ERR_OR_NULL(sta) && sta->mfp)
+ expected_tk_len = 0;
+ rcu_read_unlock();
+ }
+
+ if (tk_len != expected_tk_len || hltk_len != sizeof(pasn->hltk)) {
+ IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n",
+ tk_len, hltk_len);
+ goto out;
+ }
+
+ memcpy(pasn->addr, addr, sizeof(pasn->addr));
+ memcpy(pasn->hltk, hltk, sizeof(pasn->hltk));
+
+ if (tk && tk_len)
+ memcpy(pasn->tk, tk, sizeof(pasn->tk));
+
+ list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list);
+ return 0;
+out:
+ kfree(pasn);
+ return -EINVAL;
+}
+
+void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr)
+{
+ struct iwl_mvm_ftm_pasn_entry *entry, *prev;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list,
+ list) {
+ if (memcmp(entry->addr, addr, sizeof(entry->addr)))
+ continue;
+
+ list_del(&entry->list);
+ kfree(entry);
+ return;
+ }
+}
+
static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
{
struct iwl_mvm_loc_entry *e, *t;
@@ -84,6 +181,7 @@ static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
mvm->ftm_initiator.req_wdev = NULL;
memset(mvm->ftm_initiator.responses, 0,
sizeof(mvm->ftm_initiator.responses));
+
list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
list_del(&e->list);
kfree(e);
@@ -120,6 +218,30 @@ void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
iwl_mvm_ftm_reset(mvm);
}
+void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm)
+{
+ INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp);
+
+ IWL_DEBUG_INFO(mvm,
+ "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n",
+ IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT);
+}
+
+void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_smooth_entry *se, *st;
+
+ list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp,
+ list) {
+ list_del(&se->list);
+ kfree(se);
+ }
+}
+
static int
iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)
{
@@ -166,7 +288,7 @@ static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_tof_range_req_cmd *cmd,
+ struct iwl_tof_range_req_cmd_v9 *cmd,
struct cfg80211_pmsr_request *req)
{
int i;
@@ -335,7 +457,7 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
static void
iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry *target)
+ struct iwl_tof_range_req_ap_entry_v6 *target)
{
memcpy(target->bssid, peer->addr, ETH_ALEN);
target->burst_period =
@@ -411,7 +533,7 @@ iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
static int
iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry *target)
+ struct iwl_tof_range_req_ap_entry_v6 *target)
{
int ret;
@@ -421,7 +543,7 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
return ret;
- iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
+ iwl_mvm_ftm_put_target_common(mvm, peer, target);
if (vif->bss_conf.assoc &&
!memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) {
@@ -539,7 +661,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
- struct iwl_tof_range_req_cmd cmd;
+ struct iwl_tof_range_req_cmd_v9 cmd;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.dataflags[0] = IWL_HCMD_DFL_DUP,
@@ -553,7 +675,7 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
for (i = 0; i < cmd.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
- struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
+ struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i];
err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
if (err)
@@ -563,6 +685,93 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
}
+static void iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct iwl_tof_range_req_ap_entry_v6 *target = data;
+
+ if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
+ return;
+
+ WARN_ON(!sta->mfp);
+
+ if (WARN_ON(key->keylen > sizeof(target->tk)))
+ return;
+
+ memcpy(target->tk, key->key, key->keylen);
+ target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
+ WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID);
+}
+
+static void
+iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_tof_range_req_ap_entry_v7 *target)
+{
+ struct iwl_mvm_ftm_pasn_entry *entry;
+ u32 flags = le32_to_cpu(target->initiator_ap_flags);
+
+ if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
+ IWL_INITIATOR_AP_FLAGS_TB)))
+ return;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
+ if (memcmp(entry->addr, target->bssid, sizeof(entry->addr)))
+ continue;
+
+ target->cipher = entry->cipher;
+ memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
+
+ if (vif->bss_conf.assoc &&
+ !memcmp(vif->bss_conf.bssid, target->bssid,
+ sizeof(target->bssid)))
+ ieee80211_iter_keys(mvm->hw, vif, iter, target);
+ else
+ memcpy(target->tk, entry->tk, sizeof(target->tk));
+
+ memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn));
+ memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn));
+
+ target->initiator_ap_flags |=
+ cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED);
+ return;
+ }
+}
+
+static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v11 cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i];
+
+ err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
+ if (err)
+ return err;
+
+ iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
@@ -577,9 +786,13 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (new_api) {
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RANGE_REQ_CMD);
+ TOF_RANGE_REQ_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
switch (cmd_ver) {
+ case 11:
+ err = iwl_mvm_ftm_start_v11(mvm, vif, req);
+ break;
case 9:
case 10:
err = iwl_mvm_ftm_start_v9(mvm, vif, req);
@@ -696,6 +909,95 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
return 0;
}
+static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_result *res)
+{
+ struct iwl_mvm_smooth_entry *resp;
+ s64 rtt_avg, rtt = res->ftm.rtt_avg;
+ u32 undershoot, overshoot;
+ u8 alpha;
+ bool found;
+
+ if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH)
+ return;
+
+ WARN_ON(rtt < 0);
+
+ if (res->status != NL80211_PMSR_STATUS_SUCCESS) {
+ IWL_DEBUG_INFO(mvm,
+ ": %pM: ignore failed measurement. Status=%u\n",
+ res->addr, res->status);
+ return;
+ }
+
+ found = false;
+ list_for_each_entry(resp, &mvm->ftm_initiator.smooth.resp, list) {
+ if (!memcmp(res->addr, resp->addr, ETH_ALEN)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return;
+
+ memcpy(resp->addr, res->addr, ETH_ALEN);
+ list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp);
+
+ resp->rtt_avg = rtt;
+
+ IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n",
+ resp->addr, resp->rtt_avg);
+ goto update_time;
+ }
+
+ if (res->host_time - resp->host_time >
+ IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) {
+ resp->rtt_avg = rtt;
+
+ IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n",
+ resp->addr, resp->rtt_avg);
+ goto update_time;
+ }
+
+ /* Smooth the results based on the tracked RTT average */
+ undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT;
+ overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
+ alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
+
+ rtt_avg = (alpha * rtt + (100 - alpha) * resp->rtt_avg) / 100;
+
+ IWL_DEBUG_INFO(mvm,
+ "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
+ resp->addr, resp->rtt_avg, rtt_avg, rtt);
+
+ /*
+ * update the responder's average RTT results regardless of
+ * the under/over shoot logic below
+ */
+ resp->rtt_avg = rtt_avg;
+
+ /* smooth the results */
+ if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) {
+ res->ftm.rtt_avg = rtt_avg;
+
+ IWL_DEBUG_INFO(mvm,
+ "undershoot: val=%lld\n",
+ (rtt_avg - rtt));
+ } else if (rtt_avg < rtt && (rtt - rtt_avg) >
+ overshoot) {
+ res->ftm.rtt_avg = rtt_avg;
+ IWL_DEBUG_INFO(mvm,
+ "overshoot: val=%lld\n",
+ (rtt - rtt_avg));
+ }
+
+update_time:
+ resp->host_time = res->host_time;
+}
+
static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
struct cfg80211_pmsr_result *res)
{
@@ -715,12 +1017,31 @@ static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg);
}
+static void
+iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap)
+{
+ struct iwl_mvm_ftm_pasn_entry *entry;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
+ if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr)))
+ continue;
+
+ memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn));
+ memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn));
+ return;
+ }
+}
+
void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
- struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data;
int i;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
@@ -733,12 +1054,12 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
}
if (new_api) {
- if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp->request_id,
- fw_resp->num_of_aps))
+ if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id,
+ fw_resp_v8->num_of_aps))
return;
- num_of_aps = fw_resp->num_of_aps;
- last_in_batch = fw_resp->last_report;
+ num_of_aps = fw_resp_v8->num_of_aps;
+ last_in_batch = fw_resp_v8->last_report;
} else {
if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id,
fw_resp_v5->num_of_aps))
@@ -754,17 +1075,21 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
struct cfg80211_pmsr_result result = {};
- struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap;
int peer_idx;
if (new_api) {
- if (fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
- fw_ap = &fw_resp->ap[i];
- else
+ if (mvm->cmd_ver.range_resp == 8) {
+ fw_ap = &fw_resp_v8->ap[i];
+ iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap);
+ } else if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) {
+ fw_ap = (void *)&fw_resp_v7->ap[i];
+ } else {
fw_ap = (void *)&fw_resp_v6->ap[i];
+ }
- result.final = fw_resp->ap[i].last_burst;
+ result.final = fw_ap->last_burst;
result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
result.ap_tsf_valid = 1;
} else {
@@ -830,6 +1155,8 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_ftm_get_lci_civic(mvm, &result);
+ iwl_mvm_ftm_rtt_smoothing(mvm, &result);
+
cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
mvm->ftm_initiator.req,
&result, GFP_KERNEL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 0b6c32098b5a..dd3662b9a5bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -62,6 +62,18 @@
#include "mvm.h"
#include "constants.h"
+struct iwl_mvm_pasn_sta {
+ struct list_head list;
+ struct iwl_mvm_int_sta int_sta;
+ u8 addr[ETH_ALEN];
+};
+
+struct iwl_mvm_pasn_hltk_data {
+ u8 *addr;
+ u8 cipher;
+ u8 *hltk;
+};
+
static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef,
u8 *bw, u8 *ctrl_ch_position)
{
@@ -137,7 +149,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
.sta_id = mvmvif->bcast_sta.sta_id,
};
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_CONFIG_CMD);
+ TOF_RESPONDER_CONFIG_CMD, 6);
int err;
lockdep_assert_held(&mvm->mutex);
@@ -162,11 +174,11 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
}
static int
-iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_ftm_responder_params *params)
+iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ftm_responder_params *params)
{
- struct iwl_tof_responder_dyn_config_cmd cmd = {
+ struct iwl_tof_responder_dyn_config_cmd_v2 cmd = {
.lci_len = cpu_to_le32(params->lci_len + 2),
.civic_len = cpu_to_le32(params->civicloc_len + 2),
};
@@ -207,6 +219,171 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
return iwl_mvm_send_cmd(mvm, &hcmd);
}
+static int
+iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ftm_responder_params *params,
+ struct iwl_mvm_pasn_hltk_data *hltk_data)
+{
+ struct iwl_tof_responder_dyn_config_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
+ LOCATION_GROUP, 0),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ /* may not be able to DMA from stack */
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ cmd.valid_flags = 0;
+
+ if (params) {
+ if (params->lci_len + 2 > sizeof(cmd.lci_buf) ||
+ params->civicloc_len + 2 > sizeof(cmd.civic_buf)) {
+ IWL_ERR(mvm,
+ "LCI/civic data too big (lci=%zd, civic=%zd)\n",
+ params->lci_len, params->civicloc_len);
+ return -ENOBUFS;
+ }
+
+ cmd.lci_buf[0] = WLAN_EID_MEASURE_REPORT;
+ cmd.lci_buf[1] = params->lci_len;
+ memcpy(cmd.lci_buf + 2, params->lci, params->lci_len);
+ cmd.lci_len = params->lci_len + 2;
+
+ cmd.civic_buf[0] = WLAN_EID_MEASURE_REPORT;
+ cmd.civic_buf[1] = params->civicloc_len;
+ memcpy(cmd.civic_buf + 2, params->civicloc,
+ params->civicloc_len);
+ cmd.civic_len = params->civicloc_len + 2;
+
+ cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_LCI |
+ IWL_RESPONDER_DYN_CFG_VALID_CIVIC;
+ }
+
+ if (hltk_data) {
+ if (hltk_data->cipher > IWL_LOCATION_CIPHER_GCMP_256) {
+ IWL_ERR(mvm, "invalid cipher: %u\n",
+ hltk_data->cipher);
+ return -EINVAL;
+ }
+
+ cmd.cipher = hltk_data->cipher;
+ memcpy(cmd.addr, hltk_data->addr, sizeof(cmd.addr));
+ memcpy(cmd.hltk_buf, hltk_data->hltk, sizeof(cmd.hltk_buf));
+ cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_PASN_STA;
+ }
+
+ return iwl_mvm_send_cmd(mvm, &hcmd);
+}
+
+static int
+iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ftm_responder_params *params)
+{
+ int ret;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+
+ switch (cmd_ver) {
+ case 2:
+ ret = iwl_mvm_ftm_responder_dyn_cfg_v2(mvm, vif,
+ params);
+ break;
+ case 3:
+ ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif,
+ params, NULL);
+ break;
+ default:
+ IWL_ERR(mvm, "Unsupported DYN_CONFIG_CMD version %u\n",
+ cmd_ver);
+ ret = -ENOTSUPP;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_pasn_sta *sta)
+{
+ list_del(&sta->list);
+ iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
+ iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta);
+ kfree(sta);
+}
+
+int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
+ u8 *hltk, u32 hltk_len)
+{
+ int ret;
+ struct iwl_mvm_pasn_sta *sta = NULL;
+ struct iwl_mvm_pasn_hltk_data hltk_data = {
+ .addr = addr,
+ .hltk = hltk,
+ };
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (cmd_ver < 3) {
+ IWL_ERR(mvm, "Adding PASN station not supported by FW\n");
+ return -ENOTSUPP;
+ }
+
+ hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher);
+ if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) {
+ IWL_ERR(mvm, "invalid cipher: %u\n", cipher);
+ return -EINVAL;
+ }
+
+ if (tk && tk_len) {
+ sta = kzalloc(sizeof(*sta), GFP_KERNEL);
+ if (!sta)
+ return -ENOBUFS;
+
+ ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr,
+ cipher, tk, tk_len);
+ if (ret) {
+ kfree(sta);
+ return ret;
+ }
+
+ memcpy(sta->addr, addr, ETH_ALEN);
+ list_add_tail(&sta->list, &mvm->resp_pasn_list);
+ }
+
+ ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, NULL, &hltk_data);
+ if (ret && sta)
+ iwl_mvm_resp_del_pasn_sta(mvm, vif, sta);
+
+ return ret;
+}
+
+int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct iwl_mvm_pasn_sta *sta, *prev;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list) {
+ if (!memcmp(sta->addr, addr, ETH_ALEN)) {
+ iwl_mvm_resp_del_pasn_sta(mvm, vif, sta);
+ return 0;
+ }
+ }
+
+ IWL_ERR(mvm, "FTM: PASN station %pM not found\n", addr);
+ return -EINVAL;
+}
+
int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -255,12 +432,24 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
+void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_pasn_sta *sta, *prev;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list)
+ iwl_mvm_resp_del_pasn_sta(mvm, vif, sta);
+}
+
void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
if (!vif->bss_conf.ftm_responder)
return;
+ iwl_mvm_ftm_responder_clear(mvm, vif);
iwl_mvm_ftm_start_responder(mvm, vif);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 95a613537047..6385b9641126 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -70,6 +70,7 @@
#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
#include "iwl-prph.h"
#include "fw/acpi.h"
+#include "fw/pnvm.h"
#include "mvm.h"
#include "fw/dbg.h"
@@ -77,8 +78,8 @@
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
-#define MVM_UCODE_ALIVE_TIMEOUT HZ
-#define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
+#define MVM_UCODE_ALIVE_TIMEOUT (HZ)
+#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
#define UCODE_VALID_OK cpu_to_le32(0x1)
@@ -132,7 +133,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm)
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
- /* Do not configure default queue, it is configured via context info */
+ /*
+ * The default queue is configured via context info, so if we
+ * have a single queue, there's nothing to do here.
+ */
+ if (mvm->trans->num_rx_queues == 1)
+ return 0;
+
+ /* skip the default queue */
num_queues = mvm->trans->num_rx_queues - 1;
size = struct_size(cmd, data, num_queues);
@@ -210,25 +218,55 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data;
- struct mvm_alive_resp_v3 *palive3;
- struct mvm_alive_resp *palive;
struct iwl_umac_alive *umac;
struct iwl_lmac_alive *lmac1;
struct iwl_lmac_alive *lmac2 = NULL;
u16 status;
- u32 lmac_error_event_table, umac_error_event_table;
+ u32 lmac_error_event_table, umac_error_table;
+
+ /*
+ * For v5 and above, we can check the version, for older
+ * versions we need to check the size.
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ UCODE_ALIVE_NTFY, 0) == 5) {
+ struct iwl_alive_ntf_v5 *palive;
- if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
palive = (void *)pkt->data;
umac = &palive->umac_data;
lmac1 = &palive->lmac_data[0];
lmac2 = &palive->lmac_data[1];
status = le16_to_cpu(palive->status);
- } else {
+
+ mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
+ mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
+ mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
+
+ IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
+ mvm->trans->sku_id[0],
+ mvm->trans->sku_id[1],
+ mvm->trans->sku_id[2]);
+ } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) {
+ struct iwl_alive_ntf_v4 *palive;
+
+ palive = (void *)pkt->data;
+ umac = &palive->umac_data;
+ lmac1 = &palive->lmac_data[0];
+ lmac2 = &palive->lmac_data[1];
+ status = le16_to_cpu(palive->status);
+ } else if (iwl_rx_packet_payload_len(pkt) ==
+ sizeof(struct iwl_alive_ntf_v3)) {
+ struct iwl_alive_ntf_v3 *palive3;
+
palive3 = (void *)pkt->data;
umac = &palive3->umac_data;
lmac1 = &palive3->lmac_data;
status = le16_to_cpu(palive3->status);
+ } else {
+ WARN(1, "unsupported alive notification (size %d)\n",
+ iwl_rx_packet_payload_len(pkt));
+ /* get timeout later */
+ return false;
}
lmac_error_event_table =
@@ -239,26 +277,22 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
mvm->trans->dbg.lmac_error_event_table[1] =
le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
- umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
+ umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
- if (!umac_error_event_table) {
- mvm->support_umac_log = false;
- } else if (umac_error_event_table >=
- mvm->trans->cfg->min_umac_error_event_table) {
- mvm->support_umac_log = true;
- } else {
- IWL_ERR(mvm,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- umac_error_event_table,
- (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
- "Init" : "RT");
- mvm->support_umac_log = false;
+ if (umac_error_table) {
+ if (umac_error_table >=
+ mvm->trans->cfg->min_umac_error_event_table) {
+ iwl_fw_umac_set_alive_err_table(mvm->trans,
+ umac_error_table);
+ } else {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ umac_error_table,
+ (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
+ "Init" : "RT");
+ }
}
- if (mvm->support_umac_log)
- iwl_fw_umac_set_alive_err_table(mvm->trans,
- umac_error_event_table);
-
alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
alive_data->valid = status == IWL_ALIVE_STATUS_OK;
@@ -310,7 +344,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
const struct fw_img *fw;
int ret;
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
- static const u16 alive_cmd[] = { MVM_ALIVE };
+ static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
bool run_in_rfkill =
ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
@@ -390,6 +424,13 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return -EIO;
}
+ ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait);
+ if (ret) {
+ IWL_ERR(mvm, "Timeout waiting for PNVM load!\n");
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
+ return ret;
+ }
+
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
@@ -590,7 +631,8 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
mvm->fw->default_calib[ucode_type].flow_trigger;
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- PHY_CONFIGURATION_CMD);
+ PHY_CONFIGURATION_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 3) {
iwl_mvm_phy_filter_init(mvm, &phy_filters);
memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters,
@@ -740,28 +782,42 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
- union {
- struct iwl_dev_tx_power_cmd v5;
- struct iwl_dev_tx_power_cmd_v4 v4;
- } cmd = {
- .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ struct iwl_dev_tx_power_cmd cmd = {
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
+ __le16 *per_chain;
int ret;
u16 len = 0;
-
- if (fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_REDUCE_TX_POWER))
+ u32 n_subbands;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ REDUCE_TX_POWER_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver == 6) {
+ len = sizeof(cmd.v6);
+ n_subbands = IWL_NUM_SUB_BANDS_V2;
+ per_chain = cmd.v6.per_chain[0][0];
+ } else if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_REDUCE_TX_POWER)) {
len = sizeof(cmd.v5);
- else if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
- len = sizeof(struct iwl_dev_tx_power_cmd_v4);
- else
- len = sizeof(cmd.v4.v3);
+ n_subbands = IWL_NUM_SUB_BANDS;
+ per_chain = cmd.v5.per_chain[0][0];
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) {
+ len = sizeof(cmd.v4);
+ n_subbands = IWL_NUM_SUB_BANDS;
+ per_chain = cmd.v4.per_chain[0][0];
+ } else {
+ len = sizeof(cmd.v3);
+ n_subbands = IWL_NUM_SUB_BANDS;
+ per_chain = cmd.v3.per_chain[0][0];
+ }
+ /* all structs have the same common part, add it */
+ len += sizeof(cmd.common);
- ret = iwl_sar_select_profile(&mvm->fwrt,
- cmd.v5.v3.per_chain_restriction,
- prof_a, prof_b);
+ ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, ACPI_SAR_NUM_TABLES,
+ n_subbands, prof_a, prof_b);
/* return on error or if the profile is disabled (positive number) */
if (ret)
@@ -773,21 +829,26 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
- union geo_tx_power_profiles_cmd geo_tx_cmd;
+ union iwl_geo_tx_power_profiles_cmd geo_tx_cmd;
+ struct iwl_geo_tx_power_profiles_resp *resp;
u16 len;
int ret;
struct iwl_host_cmd cmd;
-
- if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
- geo_tx_cmd.geo_cmd.ops =
- cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
- len = sizeof(geo_tx_cmd.geo_cmd);
- } else {
- geo_tx_cmd.geo_cmd_v1.ops =
- cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
- len = sizeof(geo_tx_cmd.geo_cmd_v1);
- }
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
+ GEO_TX_POWER_LIMIT,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ /* the ops field is at the same spot for all versions, so set in v1 */
+ geo_tx_cmd.v1.ops =
+ cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
+
+ if (cmd_ver == 3)
+ len = sizeof(geo_tx_cmd.v3);
+ else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER))
+ len = sizeof(geo_tx_cmd.v2);
+ else
+ len = sizeof(geo_tx_cmd.v1);
if (!iwl_sar_geo_support(&mvm->fwrt))
return -EOPNOTSUPP;
@@ -804,21 +865,55 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
return ret;
}
- ret = iwl_validate_sar_geo_profile(&mvm->fwrt, &cmd);
+
+ resp = (void *)cmd.resp_pkt->data;
+ ret = le32_to_cpu(resp->profile_idx);
+
+ if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES))
+ ret = -EIO;
+
iwl_free_resp(&cmd);
return ret;
}
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
- u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
- union geo_tx_power_profiles_cmd cmd;
+ union iwl_geo_tx_power_profiles_cmd cmd;
u16 len;
+ u32 n_bands;
int ret;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
+ GEO_TX_POWER_LIMIT,
+ IWL_FW_CMD_VER_UNKNOWN);
- cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+ BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops));
+ /* the ops field is at the same spot for all versions, so set in v1 */
+ cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+
+ if (cmd_ver == 3) {
+ len = sizeof(cmd.v3);
+ n_bands = ARRAY_SIZE(cmd.v3.table[0]);
+ cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ len = sizeof(cmd.v2);
+ n_bands = ARRAY_SIZE(cmd.v2.table[0]);
+ cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ } else {
+ len = sizeof(cmd.v1);
+ n_bands = ARRAY_SIZE(cmd.v1.table[0]);
+ }
+
+ BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table));
+ /* the table is at the same position for all versions, so set use v1 */
+ ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0], n_bands);
- ret = iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
/*
* It is a valid scenario to not support SAR, or miss wgds table,
* but in that case there is no need to send the command.
@@ -826,42 +921,61 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (ret)
return 0;
- cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
-
- if (!fw_has_api(&mvm->fwrt.fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
- len = sizeof(struct iwl_geo_tx_power_profiles_cmd_v1);
- } else {
- len = sizeof(cmd.geo_cmd);
- }
-
- return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, len, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
+ 0, len, &cmd);
}
static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
{
union acpi_object *wifi_pkg, *data, *enabled;
- int i, j, ret, tbl_rev;
+ union iwl_ppag_table_cmd ppag_table;
+ int i, j, ret, tbl_rev, num_sub_bands;
int idx = 2;
+ s8 *gain;
- mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
+ /*
+ * The 'enabled' field is the same in v1 and v2 so we can just
+ * use v1 to access it.
+ */
+ mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
+ /* try to read ppag table revision 1 */
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
-
- if (IS_ERR(wifi_pkg)) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
+ ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 1) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = mvm->fwrt.ppag_table.v2.gain[0];
+ mvm->fwrt.ppag_ver = 2;
+ IWL_DEBUG_RADIO(mvm, "Reading PPAG table v2 (tbl_rev=1)\n");
+ goto read_table;
}
- if (tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
+ /* try to read ppag table revision 0 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+ ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ num_sub_bands = IWL_NUM_SUB_BANDS;
+ gain = mvm->fwrt.ppag_table.v1.gain[0];
+ mvm->fwrt.ppag_ver = 1;
+ IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n");
+ goto read_table;
}
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+read_table:
enabled = &wifi_pkg->package.elements[1];
if (enabled->type != ACPI_TYPE_INTEGER ||
(enabled->integer.value != 0 && enabled->integer.value != 1)) {
@@ -869,8 +983,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
}
- mvm->fwrt.ppag_table.enabled = cpu_to_le32(enabled->integer.value);
- if (!mvm->fwrt.ppag_table.enabled) {
+ ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
+ if (!ppag_table.v1.enabled) {
ret = 0;
goto out_free;
}
@@ -880,8 +994,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
* first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
* following sub-bands to High-Band (5GHz).
*/
- for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
- for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
union acpi_object *ent;
ent = &wifi_pkg->package.elements[idx++];
@@ -890,11 +1004,11 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
(j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) ||
(j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) ||
(j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) {
- mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
+ ppag_table.v1.enabled = cpu_to_le32(0);
ret = -EINVAL;
goto out_free;
}
- mvm->fwrt.ppag_table.gain[i][j] = ent->integer.value;
+ gain[i * num_sub_bands + j] = ent->integer.value;
}
}
ret = 0;
@@ -905,34 +1019,56 @@ out_free:
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
- int i, j, ret;
+ u8 cmd_ver;
+ int i, j, ret, num_sub_bands, cmd_size;
+ union iwl_ppag_table_cmd ppag_table;
+ s8 *gain;
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
IWL_DEBUG_RADIO(mvm,
"PPAG capability not supported by FW, command not sent.\n");
return 0;
}
-
- if (!mvm->fwrt.ppag_table.enabled) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG not enabled, command not sent.\n");
+ if (!mvm->fwrt.ppag_table.v1.enabled) {
+ IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n");
return 0;
}
- IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
+ PER_PLATFORM_ANT_GAIN_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS;
+ gain = mvm->fwrt.ppag_table.v1.gain[0];
+ cmd_size = sizeof(ppag_table.v1);
+ if (mvm->fwrt.ppag_ver == 2) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG table is v2 but FW supports v1, sending truncated table\n");
+ }
+ } else if (cmd_ver == 2) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = mvm->fwrt.ppag_table.v2.gain[0];
+ cmd_size = sizeof(ppag_table.v2);
+ if (mvm->fwrt.ppag_ver == 1) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG table is v1 but FW supports v2, sending padded table\n");
+ }
+ } else {
+ IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n");
+ return 0;
+ }
- for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
- for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
IWL_DEBUG_RADIO(mvm,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, mvm->fwrt.ppag_table.gain[i][j]);
+ i, j, gain[i * num_sub_bands + j]);
}
}
-
+ IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
- 0, sizeof(mvm->fwrt.ppag_table),
- &mvm->fwrt.ppag_table);
+ 0, cmd_size, &ppag_table);
if (ret < 0)
IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
@@ -989,41 +1125,90 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
-static bool iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
+static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
{
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
DSM_FUNC_ENABLE_INDONESIA_5G2);
- IWL_DEBUG_RADIO(mvm,
- "Evaluated DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
- ret);
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm,
+ "Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
+ ret);
+
+ else if (ret >= DSM_VALUE_INDONESIA_MAX)
+ IWL_DEBUG_RADIO(mvm,
+ "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n",
+ ret);
- return ret == 1;
+ else if (ret == DSM_VALUE_INDONESIA_ENABLE) {
+ IWL_DEBUG_RADIO(mvm,
+ "Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n");
+ return DSM_VALUE_INDONESIA_ENABLE;
+ }
+ /* default behaviour is disabled */
+ return DSM_VALUE_INDONESIA_DISABLE;
+}
+
+static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
+{
+ int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
+ DSM_FUNC_DISABLE_SRD);
+
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm,
+ "Failed to evaluate DSM function DISABLE_SRD, ret=%d\n",
+ ret);
+
+ else if (ret >= DSM_VALUE_SRD_MAX)
+ IWL_DEBUG_RADIO(mvm,
+ "DSM function DISABLE_SRD return invalid value, ret=%d\n",
+ ret);
+
+ else if (ret == DSM_VALUE_SRD_PASSIVE) {
+ IWL_DEBUG_RADIO(mvm,
+ "Evaluated DSM function DISABLE_SRD: setting SRD to passive\n");
+ return DSM_VALUE_SRD_PASSIVE;
+
+ } else if (ret == DSM_VALUE_SRD_DISABLE) {
+ IWL_DEBUG_RADIO(mvm,
+ "Evaluated DSM function DISABLE_SRD: disabling SRD\n");
+ return DSM_VALUE_SRD_DISABLE;
+ }
+ /* default behaviour is active */
+ return DSM_VALUE_SRD_ACTIVE;
}
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
- int ret;
+ u8 ret;
+ int cmd_ret;
struct iwl_lari_config_change_cmd cmd = {};
- if (iwl_mvm_eval_dsm_indonesia_5g2(mvm))
+ if (iwl_mvm_eval_dsm_indonesia_5g2(mvm) == DSM_VALUE_INDONESIA_ENABLE)
cmd.config_bitmap |=
cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+ ret = iwl_mvm_eval_dsm_disable_srd(mvm);
+ if (ret == DSM_VALUE_SRD_PASSIVE)
+ cmd.config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+
+ else if (ret == DSM_VALUE_SRD_DISABLE)
+ cmd.config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+
/* apply more config masks here */
if (cmd.config_bitmap) {
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
- le32_to_cpu(cmd.config_bitmap));
- ret = iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE),
- 0, sizeof(cmd), &cmd);
- if (ret < 0)
+ IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE\n");
+ cmd_ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE),
+ 0, sizeof(cmd), &cmd);
+ if (cmd_ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
- ret);
+ cmd_ret);
}
}
#else /* CONFIG_ACPI */
@@ -1260,7 +1445,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* init the fw <-> mac80211 STA mapping */
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
@@ -1274,10 +1459,23 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- /* Add auxiliary station for scanning */
- ret = iwl_mvm_add_aux_sta(mvm);
- if (ret)
- goto error;
+ /*
+ * Add auxiliary station for scanning.
+ * Newer versions of this command implies that the fw uses
+ * internal aux station for all aux activities that don't
+ * requires a dedicated data queue.
+ */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA,
+ 0) < 12) {
+ /*
+ * In old version the aux station uses mac id like other
+ * station and not lmac id
+ */
+ ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
+ if (ret)
+ goto error;
+ }
/* Add all the PHY contexts */
i = 0;
@@ -1383,6 +1581,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_tas_init(mvm);
iwl_mvm_leds_sync(mvm);
+ iwl_mvm_ftm_initiator_smooth_config(mvm);
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
@@ -1421,13 +1621,24 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
goto error;
/* init the fw <-> mac80211 STA mapping */
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
- /* Add auxiliary station for scanning */
- ret = iwl_mvm_add_aux_sta(mvm);
- if (ret)
- goto error;
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA,
+ 0) < 12) {
+ /*
+ * Add auxiliary station for scanning.
+ * Newer versions of this command implies that the fw uses
+ * internal aux station for all aux activities that don't
+ * requires a dedicated data queue.
+ * In old version the aux station uses mac id like other
+ * station and not lmac id
+ */
+ ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
+ if (ret)
+ goto error;
+ }
return 0;
error:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index b78992e341d5..cbdebefb854a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -665,7 +663,7 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
* allow multicast data frames only as long as the station is
* authorized, i.e., GTK keys are already installed (if needed)
*/
- if (ap_sta_id < IWL_MVM_STATION_COUNT) {
+ if (ap_sta_id < mvm->fw->ucode_capa.num_stations) {
struct ieee80211_sta *sta;
rcu_read_lock();
@@ -704,8 +702,12 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
- if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT)
+ if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) {
ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED);
+ if (vif->bss_conf.twt_protected)
+ ctxt_sta->data_policy |=
+ cpu_to_le32(PROTECTED_TWT_SUPPORTED);
+ }
}
@@ -1300,8 +1302,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
mvmvif->csa_countdown = true;
- if (!ieee80211_csa_is_complete(csa_vif)) {
- int c = ieee80211_csa_update_counter(csa_vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
+ int c = ieee80211_beacon_update_cntdwn(csa_vif);
iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
if (csa_vif->p2p &&
@@ -1543,7 +1545,7 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA &&
notif->csa_counter >= 1)
- ieee80211_csa_set_counter(vif, notif->csa_counter);
+ ieee80211_beacon_set_cntdwn(vif, notif->csa_counter);
}
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
@@ -1595,9 +1597,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
RCU_INIT_POINTER(mvm->csa_vif, NULL);
return;
case NL80211_IFTYPE_STATION:
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
- iwl_mvm_csa_client_absent(mvm, vif);
+ iwl_mvm_csa_client_absent(mvm, vif);
cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(vif, true);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 9374c85c5caf..688c1125e67b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -234,6 +234,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mcc_update_resp *resp;
+ u8 resp_ver;
IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
@@ -252,13 +253,16 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
*changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
status == MCC_RESP_ILLEGAL);
}
+ resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ MCC_UPDATE_CMD, 0);
+ IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver);
regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
__le16_to_cpu(resp->geo_info),
- __le16_to_cpu(resp->cap));
+ __le16_to_cpu(resp->cap), resp_ver);
/* Store the return source id */
src_id = resp->source_id;
kfree(resp);
@@ -662,14 +666,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ WOWLAN_KEK_KCK_MATERIAL,
+ IWL_FW_CMD_VER_UNKNOWN) == 3)
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_SCAN_START_TIME);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_BSS_PARENT_TSF);
- wiphy_ext_feature_set(hw->wiphy,
- NL80211_EXT_FEATURE_SET_SCAN_DWELL);
}
if (iwl_mvm_is_oce_supported(mvm)) {
@@ -753,6 +760,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_PROTECTED_TWT);
+
hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm);
hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm);
@@ -813,7 +824,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
iwl_mvm_vif_from_mac80211(info->control.vif);
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
- if (ap_sta_id < IWL_MVM_STATION_COUNT) {
+ if (ap_sta_id < mvm->fw->ucode_capa.num_stations) {
/* mac80211 holds rcu read lock */
sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
if (IS_ERR_OR_NULL(sta))
@@ -1203,6 +1214,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
+ iwl_mvm_ftm_initiator_smooth_stop(mvm);
+
/* firmware counters are obviously reset now, but we shouldn't
* partially track so also clear the fw_reset_accu counters.
*/
@@ -1210,13 +1223,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* async_handlers_wk is now blocked */
- /*
- * The work item could be running or queued if the
- * ROC time event stops just as we get here.
- */
- flush_work(&mvm->roc_done_wk);
-
- iwl_mvm_rm_aux_sta(mvm);
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, 0) < 12)
+ iwl_mvm_rm_aux_sta(mvm);
iwl_mvm_stop_device(mvm);
@@ -1271,6 +1279,12 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
+ /*
+ * The work item could be running or queued if the
+ * ROC time event stops just as we get here.
+ */
+ flush_work(&mvm->roc_done_wk);
+
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);
mutex_unlock(&mvm->mutex);
@@ -1300,27 +1314,32 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
int len;
- union {
- struct iwl_dev_tx_power_cmd v5;
- struct iwl_dev_tx_power_cmd_v4 v4;
- } cmd = {
- .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
- .v5.v3.mac_context_id =
+ struct iwl_dev_tx_power_cmd cmd = {
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+ .common.mac_context_id =
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
- .v5.v3.pwr_restriction = cpu_to_le16(8 * tx_power),
+ .common.pwr_restriction = cpu_to_le16(8 * tx_power),
};
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ REDUCE_TX_POWER_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
- cmd.v5.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+ cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
- if (fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_REDUCE_TX_POWER))
+ if (cmd_ver == 6)
+ len = sizeof(cmd.v6);
+ else if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_REDUCE_TX_POWER))
len = sizeof(cmd.v5);
else if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v4);
else
- len = sizeof(cmd.v4.v3);
+ len = sizeof(cmd.v3);
+
+ /* all structs have the same common part, add it */
+ len += sizeof(cmd.common);
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
@@ -1352,9 +1371,7 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
goto out_unlock;
}
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
- iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
@@ -1392,10 +1409,14 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
mutex_lock(&mvm->mutex);
- WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(MAC_CONF_GROUP,
- CHANNEL_SWITCH_TIME_EVENT_CMD),
- 0, sizeof(cmd), &cmd));
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
+ iwl_mvm_remove_csa_period(mvm, vif);
+ else
+ WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP,
+ CHANNEL_SWITCH_TIME_EVENT_CMD),
+ 0, sizeof(cmd), &cmd));
mutex_unlock(&mvm->mutex);
WARN_ON(iwl_mvm_post_channel_switch(hw, vif));
@@ -2629,6 +2650,8 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_update_quotas(mvm, false, NULL);
+ iwl_mvm_ftm_responder_clear(mvm, vif);
+
/*
* This is not very nice, but the simplest:
* For older FWs removing the mcast sta before the bcast station may
@@ -2864,7 +2887,7 @@ void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
struct iwl_mvm_sta *mvmsta;
bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
- if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
+ if (WARN_ON(notif->sta_id >= mvm->fw->ucode_capa.num_stations))
return;
rcu_read_lock();
@@ -3428,15 +3451,16 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
*/
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
ret = -EOPNOTSUPP;
- else
- ret = 0;
+ break;
+ }
if (key->cipher != WLAN_CIPHER_SUITE_GCMP &&
key->cipher != WLAN_CIPHER_SUITE_GCMP_256 &&
!iwl_mvm_has_new_tx_api(mvm)) {
key->hw_key_idx = STA_KEY_IDX_INVALID;
+ ret = 0;
break;
}
@@ -3452,6 +3476,8 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
if (i >= ARRAY_SIZE(mvmvif->ap_early_keys))
ret = -ENOSPC;
+ else
+ ret = 0;
break;
}
@@ -3693,9 +3719,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
tail->apply_time_max_delay = cpu_to_le32(delay);
IWL_DEBUG_TE(mvm,
- "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
- channel->hw_value, req_dur, duration, delay,
- dtim_interval);
+ "ROC: Requesting to remain on channel %u for %ums\n",
+ channel->hw_value, req_dur);
+ IWL_DEBUG_TE(mvm,
+ "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
+ duration, delay, dtim_interval);
+
/* Set the node address */
memcpy(tail->node_addr, vif->addr, ETH_ALEN);
@@ -3780,6 +3809,17 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
/* Use aux roc framework (HS20) */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA, 0) >= 12) {
+ u32 lmac_id;
+
+ lmac_id = iwl_mvm_get_lmac_id(mvm->fw,
+ channel->band);
+ ret = iwl_mvm_add_aux_sta(mvm, lmac_id);
+ if (WARN(ret,
+ "Failed to allocate aux station"))
+ goto out_unlock;
+ }
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
vif, duration);
goto out_unlock;
@@ -4658,7 +4698,7 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
}
mutex_lock(&mvm->mutex);
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
struct ieee80211_sta *sta;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
@@ -4700,7 +4740,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* flush the AP-station and all TDLS peers */
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
@@ -4714,7 +4754,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
if (drop) {
- if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0))
+ if (iwl_mvm_flush_sta(mvm, mvmsta, false))
IWL_ERR(mvm, "flush request fail\n");
} else {
msk |= mvmsta->tfd_queue_msk;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index e2f7f6ec711e..7159d1da3e77 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -184,11 +184,6 @@ enum iwl_power_scheme {
IWL_POWER_SCHEME_LP
};
-union geo_tx_power_profiles_cmd {
- struct iwl_geo_tx_power_profiles_cmd geo_cmd;
- struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
-};
-
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -421,7 +416,11 @@ struct iwl_mvm_vif {
#ifdef CONFIG_PM
/* WoWLAN GTK rekey data */
struct {
- u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
+ u8 kck[NL80211_KCK_EXT_LEN];
+ u8 kek[NL80211_KEK_EXT_LEN];
+ size_t kek_len;
+ size_t kck_len;
+ u32 akm;
__le64 replay_ctr;
bool valid;
} rekey_data;
@@ -852,7 +851,6 @@ struct iwl_mvm {
bool hw_registered;
bool rfkill_safe_init_done;
- bool support_umac_log;
u32 ampdu_ref;
bool ampdu_toggle;
@@ -890,7 +888,7 @@ struct iwl_mvm {
/* data related to data path */
struct iwl_rx_phy_info last_phy_info;
- struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
+ struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX];
u8 rx_ba_sessions;
/* configured by mac80211 */
@@ -1113,10 +1111,17 @@ struct iwl_mvm {
struct wireless_dev *req_wdev;
struct list_head loc_list;
int responses[IWL_MVM_TOF_MAX_APS];
+ struct {
+ struct list_head resp;
+ } smooth;
+ struct list_head pasn_list;
} ftm_initiator;
+ struct list_head resp_pasn_list;
+
struct {
u8 d0i3_resp;
+ u8 range_resp;
} cmd_ver;
struct ieee80211_vif *nan_vif;
@@ -1200,7 +1205,7 @@ iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id)
{
struct ieee80211_sta *sta;
- if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ if (sta_id >= mvm->fw->ucode_capa.num_stations)
return NULL;
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
@@ -1217,7 +1222,7 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
{
struct ieee80211_sta *sta;
- if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ if (sta_id >= mvm->fw->ucode_capa.num_stations)
return NULL;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
@@ -1523,7 +1528,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
#endif
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags);
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
u16 tids, u32 flags);
@@ -1996,6 +2001,14 @@ void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 *addr);
+int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
+ u8 *hltk, u32 hltk_len);
+void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
/* FTM initiator */
void iwl_mvm_ftm_restart(struct iwl_mvm *mvm);
@@ -2006,6 +2019,12 @@ void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm,
int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *request);
void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req);
+void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm);
+void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm);
+int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
+ u8 *hltk, u32 hltk_len);
+void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr);
/* TDLS */
@@ -2146,8 +2165,24 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
{
u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_OFFLOAD_UPDATE_PROFILES_CMD);
+ SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
}
+
+static inline
+enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ return IWL_LOCATION_CIPHER_CCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return IWL_LOCATION_CIPHER_GCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return IWL_LOCATION_CIPHER_GCMP_256;
+ default:
+ return IWL_LOCATION_CIPHER_INVALID;
+ }
+}
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index d095ff847be9..f1c5b3a9c26f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -133,6 +133,7 @@ module_exit(iwl_mvm_exit);
static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct iwl_trans_debug *dbg = &mvm->trans->dbg;
u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
u32 reg_val = 0;
u32 phy_config = iwl_mvm_get_phy_config(mvm);
@@ -169,7 +170,10 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
- if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
+ if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt) ||
+ (iwl_trans_dbg_ini_valid(mvm->trans) &&
+ dbg->fw_mon_cfg[IWL_FW_INI_ALLOCATION_ID_INTERNAL].buf_location)
+ )
reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
@@ -319,7 +323,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
- HCMD_NAME(MVM_ALIVE),
+ HCMD_NAME(UCODE_ALIVE_NTFY),
HCMD_NAME(REPLY_ERROR),
HCMD_NAME(ECHO_CMD),
HCMD_NAME(INIT_COMPLETE_NOTIF),
@@ -463,15 +467,6 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
-static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
- HCMD_NAME(DBGC_SUSPEND_RESUME),
- HCMD_NAME(BUFFER_ALLOCATION),
- HCMD_NAME(MFU_ASSERT_DUMP_NTF),
-};
-
-/* Please keep this array *SORTED* by hex value.
- * Access is done through binary search
- */
static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
HCMD_NAME(TOF_RANGE_REQ_CMD),
HCMD_NAME(TOF_CONFIG_CMD),
@@ -622,11 +617,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
enum iwl_amsdu_size rb_size_default;
/*
- * We use IWL_MVM_STATION_COUNT to check the validity of the station
+ * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station
* index all over the driver - check that its value corresponds to the
* array size.
*/
- BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) !=
+ IWL_MVM_STATION_COUNT_MAX);
/********************************
* 1. Allocating and configuring HW data
@@ -695,6 +691,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->async_handlers_list);
spin_lock_init(&mvm->time_event_lock);
INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list);
+ INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list);
+ INIT_LIST_HEAD(&mvm->resp_pasn_list);
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
@@ -724,6 +722,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
goto out_free;
+ mvm->cmd_ver.range_resp =
+ iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RANGE_RESPONSE_NOTIF, 5);
+ /* we only support up to version 8 */
+ if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 8))
+ goto out_free;
+
/*
* Populate the state variables that the transport layer needs
* to know about.
@@ -1106,7 +1111,7 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
mvm->tvqm_info[hw_queue].sta_id :
mvm->queue_info[hw_queue].ra_sta_id;
- if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
+ if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
return;
rcu_read_lock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 0243dbe8ac49..bf2fc44dcb8d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -7,8 +7,8 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -125,30 +125,19 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
*/
static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd *cmd,
- u32 action, u32 apply_time)
+ u32 action)
{
- memset(cmd, 0, sizeof(struct iwl_phy_context_cmd));
-
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id,
ctxt->color));
cmd->action = cpu_to_le32(action);
- cmd->apply_time = cpu_to_le32(apply_time);
}
-/*
- * Add the phy configuration to the PHY context command
- */
-static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
- struct iwl_phy_context_cmd *cmd,
- struct cfg80211_chan_def *chandef,
- u8 chains_static, u8 chains_dynamic)
+static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
+ __le32 *rxchain_info,
+ u8 chains_static,
+ u8 chains_dynamic)
{
u8 active_cnt, idle_cnt;
- struct iwl_phy_context_cmd_tail *tail =
- iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci);
-
- /* Set the channel info data */
- iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
/* Set rx the chains */
idle_cnt = chains_static;
@@ -166,20 +155,56 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
active_cnt = 2;
}
- tail->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
+ *rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
PHY_RX_CHAIN_VALID_POS);
- tail->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
- tail->rxchain_info |= cpu_to_le32(active_cnt <<
+ *rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+ *rxchain_info |= cpu_to_le32(active_cnt <<
PHY_RX_CHAIN_MIMO_CNT_POS);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (unlikely(mvm->dbgfs_rx_phyinfo))
- tail->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
+ *rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
#endif
+}
+
+/*
+ * Add the phy configuration to the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
+ struct iwl_phy_context_cmd_v1 *cmd,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ struct iwl_phy_context_cmd_tail *tail =
+ iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci);
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
+
+ iwl_mvm_phy_ctxt_set_rxchain(mvm, &tail->rxchain_info,
+ chains_static, chains_dynamic);
tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
}
/*
+ * Add the phy configuration to the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+ struct iwl_phy_context_cmd *cmd,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm->fw,
+ chandef->chan->band));
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
+
+ iwl_mvm_phy_ctxt_set_rxchain(mvm, &cmd->rxchain_info,
+ chains_static, chains_dynamic);
+}
+
+/*
* Send a command to apply the current phy configuration. The command is send
* only if something in the configuration changed: in case that this is the
* first time that the phy configuration is applied or in case that the phy
@@ -189,20 +214,46 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic,
- u32 action, u32 apply_time)
+ u32 action)
{
- struct iwl_phy_context_cmd cmd;
int ret;
- u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
-
- /* Set the command header fields */
- iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ PHY_CONTEXT_CMD, 1);
+
+ if (ver == 3) {
+ struct iwl_phy_context_cmd cmd = {};
+
+ /* Set the command header fields */
+ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action);
+
+ /* Set the command data */
+ iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
+ chains_static,
+ chains_dynamic);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
+ 0, sizeof(cmd), &cmd);
+ } else if (ver < 3) {
+ struct iwl_phy_context_cmd_v1 cmd = {};
+ u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
+
+ /* Set the command header fields */
+ iwl_mvm_phy_ctxt_cmd_hdr(ctxt,
+ (struct iwl_phy_context_cmd *)&cmd,
+ action);
+
+ /* Set the command data */
+ iwl_mvm_phy_ctxt_cmd_data_v1(mvm, &cmd, chandef,
+ chains_static,
+ chains_dynamic);
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
+ 0, len, &cmd);
+ } else {
+ IWL_ERR(mvm, "PHY ctxt cmd error ver %d not supported\n", ver);
+ return -EOPNOTSUPP;
+ }
- /* Set the command data */
- iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
- chains_static, chains_dynamic);
- ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, len, &cmd);
if (ret)
IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
return ret;
@@ -223,7 +274,7 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
- FW_CTXT_ACTION_ADD, 0);
+ FW_CTXT_ACTION_ADD);
}
/*
@@ -257,7 +308,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
/* ... remove it here ...*/
ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
- FW_CTXT_ACTION_REMOVE, 0);
+ FW_CTXT_ACTION_REMOVE);
if (ret)
return ret;
@@ -269,7 +320,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
- action, 0);
+ action);
}
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 6f4d241d47e9..e0e80906fdc6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -195,14 +195,20 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
{
u16 supp;
int i, highest_mcs;
- u8 nss = sta->rx_nss;
+ u8 max_nss = sta->rx_nss;
+ struct ieee80211_vht_cap ieee_vht_cap = {
+ .vht_cap_info = cpu_to_le32(vht_cap->cap),
+ .supp_mcs = vht_cap->vht_mcs,
+ };
/* the station support only a single receive chain */
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
- nss = 1;
+ max_nss = 1;
- for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
- highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
+ for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) {
+ int nss = i + 1;
+
+ highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, nss);
if (!highest_mcs)
continue;
@@ -211,7 +217,15 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(supp);
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ /*
+ * Check if VHT extended NSS indicates that the bandwidth/NSS
+ * configuration is supported - only for MCS 0 since we already
+ * decoded the MCS bits anyway ourselves.
+ */
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160 &&
+ ieee80211_get_vht_max_nss(&ieee_vht_cap,
+ IEEE80211_VHT_CHANWIDTH_160MHZ,
+ 0, true, nss) >= nss)
cmd->ht_rates[i][IWL_TLC_HT_BW_160] =
cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160];
}
@@ -454,6 +468,12 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
.amsdu = iwl_mvm_is_csum_supported(mvm),
};
int ret;
+ u16 cmd_size = sizeof(cfg_cmd);
+
+ /* In old versions of the API the struct is 4 bytes smaller */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD, 0) < 3)
+ cmd_size -= 4;
memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
@@ -468,7 +488,7 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
sta->max_amsdu_len = max_amsdu_len;
- ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cfg_cmd),
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
&cfg_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 86b2ebb5d5fb..ed7382e7ea17 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -830,6 +830,12 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
return ucode_rate;
}
+ /* set RTS protection for all non legacy rates
+ * This helps with congested environments reducing the conflict cost to
+ * RTS retries only, instead of the entire BA packet.
+ */
+ ucode_rate |= RATE_MCS_RTS_REQUIRED_MSK;
+
if (is_ht(rate)) {
if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 77b8def26edb..0059c83c2783 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -420,7 +420,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
- if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
+ if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta))
sta = NULL;
@@ -569,7 +569,8 @@ struct iwl_mvm_stat_data {
__le32 flags;
__le32 mac_id;
u8 beacon_filter_average_energy;
- void *general;
+ __le32 *beacon_counter;
+ u8 *beacon_average_energy;
};
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@@ -589,23 +590,10 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
* data copied into the "data" struct, but rather the data from
* the notification directly.
*/
- if (iwl_mvm_has_new_rx_stats_api(mvm)) {
- struct mvm_statistics_general *general =
- data->general;
-
- mvmvif->beacon_stats.num_beacons =
- le32_to_cpu(general->beacon_counter[vif_id]);
- mvmvif->beacon_stats.avg_signal =
- -general->beacon_average_energy[vif_id];
- } else {
- struct mvm_statistics_general_v8 *general =
- data->general;
-
- mvmvif->beacon_stats.num_beacons =
- le32_to_cpu(general->beacon_counter[vif_id]);
- mvmvif->beacon_stats.avg_signal =
- -general->beacon_average_energy[vif_id];
- }
+ mvmvif->beacon_stats.num_beacons =
+ le32_to_cpu(data->beacon_counter[vif_id]);
+ mvmvif->beacon_stats.avg_signal =
+ -data->beacon_average_energy[vif_id];
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
@@ -701,18 +689,136 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL);
}
+static void iwl_mvm_update_avg_energy(struct iwl_mvm *mvm,
+ u8 energy[IWL_MVM_STATION_COUNT_MAX])
+{
+ int i;
+
+ if (WARN_ONCE(mvm->fw->ucode_capa.num_stations >
+ IWL_MVM_STATION_COUNT_MAX,
+ "Driver and FW station count mismatch %d\n",
+ mvm->fw->ucode_capa.num_stations))
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ struct iwl_mvm_sta *sta;
+
+ if (!energy[i])
+ continue;
+
+ sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
+ if (!sta)
+ continue;
+ sta->avg_energy = energy[i];
+ }
+ rcu_read_unlock();
+}
+
+static void
+iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le,
+ __le32 *rx_bytes_le)
+{
+ int i;
+
+ spin_lock(&mvm->tcm.lock);
+ for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i];
+ u32 rx_bytes = le32_to_cpu(rx_bytes_le[i]);
+ u32 airtime = le32_to_cpu(air_time_le[i]);
+
+ mdata->rx.airtime += airtime;
+ mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes;
+ if (airtime) {
+ /* re-init every time to store rate from FW */
+ ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
+ ewma_rate_add(&mdata->uapsd_nonagg_detect.rate,
+ rx_bytes * 8 / airtime);
+ }
+ }
+ spin_unlock(&mvm->tcm.lock);
+}
+
+static void
+iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mvm_stat_data data = {
+ .mvm = mvm,
+ };
+ u8 beacon_average_energy[MAC_INDEX_AUX];
+ u8 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ struct iwl_statistics_operational_ntfy *stats;
+ int expected_size;
+ __le32 flags;
+ int i;
+
+ expected_size = sizeof(*stats);
+ if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size,
+ "received invalid statistics size (%d)!, expected_size: %d\n",
+ iwl_rx_packet_payload_len(pkt), expected_size))
+ return;
+
+ stats = (void *)&pkt->data;
+
+ if (WARN_ONCE(stats->hdr.type != FW_STATISTICS_OPERATIONAL ||
+ stats->hdr.version != 1,
+ "received unsupported hdr type %d, version %d\n",
+ stats->hdr.type, stats->hdr.version))
+ return;
+
+ flags = stats->flags;
+ mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time);
+ mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time);
+ mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf);
+ mvm->radio_stats.on_time_scan = le64_to_cpu(stats->on_time_scan);
+
+ iwl_mvm_rx_stats_check_trigger(mvm, pkt);
+
+ data.mac_id = stats->mac_id;
+ data.beacon_filter_average_energy =
+ le32_to_cpu(stats->beacon_filter_average_energy);
+ data.flags = flags;
+ data.beacon_counter = stats->beacon_counter;
+ for (i = 0; i < ARRAY_SIZE(beacon_average_energy); i++)
+ beacon_average_energy[i] =
+ le32_to_cpu(stats->beacon_average_energy[i]);
+
+ data.beacon_average_energy = beacon_average_energy;
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_stat_iterator,
+ &data);
+
+ for (i = 0; i < ARRAY_SIZE(average_energy); i++)
+ average_energy[i] = le32_to_cpu(stats->average_energy[i]);
+ iwl_mvm_update_avg_energy(mvm, average_energy);
+
+ /*
+ * Don't update in case the statistics are not cleared, since
+ * we will end up counting twice the same airtime, once in TCM
+ * request and once in statistics notification.
+ */
+ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ iwl_mvm_update_tcm_from_stats(mvm, stats->air_time,
+ stats->rx_bytes);
+}
+
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_mvm_stat_data data = {
.mvm = mvm,
};
+ __le32 *bytes, *air_time, flags;
int expected_size;
- int i;
u8 *energy;
- __le32 *bytes;
- __le32 *air_time;
- __le32 flags;
+
+ /* From ver 14 and up we use TLV statistics format */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+ STATISTICS_CMD, 0) >= 14)
+ return iwl_mvm_handle_rx_statistics_tlv(mvm, pkt);
if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
if (iwl_mvm_has_new_rx_api(mvm))
@@ -746,8 +852,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
mvm->radio_stats.on_time_scan =
le64_to_cpu(stats->general.common.on_time_scan);
- data.general = &stats->general;
-
+ data.beacon_counter = stats->general.beacon_counter;
+ data.beacon_average_energy =
+ stats->general.beacon_average_energy;
flags = stats->flag;
} else {
struct iwl_notif_statistics *stats = (void *)&pkt->data;
@@ -767,8 +874,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
mvm->radio_stats.on_time_scan =
le64_to_cpu(stats->general.common.on_time_scan);
- data.general = &stats->general;
-
+ data.beacon_counter = stats->general.beacon_counter;
+ data.beacon_average_energy =
+ stats->general.beacon_average_energy;
flags = stats->flag;
}
data.flags = flags;
@@ -797,45 +905,16 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
air_time = (void *)&stats->load_stats.air_time;
}
- rcu_read_lock();
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
- struct iwl_mvm_sta *sta;
-
- if (!energy[i])
- continue;
-
- sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
- if (!sta)
- continue;
- sta->avg_energy = energy[i];
- }
- rcu_read_unlock();
+ iwl_mvm_update_avg_energy(mvm, energy);
/*
* Don't update in case the statistics are not cleared, since
* we will end up counting twice the same airtime, once in TCM
* request and once in statistics notification.
*/
- if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR))
- return;
-
- spin_lock(&mvm->tcm.lock);
- for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
- struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i];
- u32 airtime = le32_to_cpu(air_time[i]);
- u32 rx_bytes = le32_to_cpu(bytes[i]);
+ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ iwl_mvm_update_tcm_from_stats(mvm, air_time, bytes);
- mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes;
- if (airtime) {
- /* re-init every time to store rate from FW */
- ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
- ewma_rate_add(&mdata->uapsd_nonagg_detect.rate,
- rx_bytes * 8 / airtime);
- }
-
- mdata->rx.airtime += airtime;
- }
- spin_unlock(&mvm->tcm.lock);
}
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index c15f7dbc9516..838734fec502 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -221,6 +221,31 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
skb_put_data(skb, hdr, hdrlen);
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
+ /*
+ * If we did CHECKSUM_COMPLETE, the hardware only does it right for
+ * certain cases and starts the checksum after the SNAP. Check if
+ * this is the case - it's easier to just bail out to CHECKSUM_NONE
+ * in the cases the hardware didn't handle, since it's rare to see
+ * such packets, even though the hardware did calculate the checksum
+ * in this case, just starting after the MAC header instead.
+ */
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ struct {
+ u8 hdr[6];
+ __be16 type;
+ } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len);
+
+ if (unlikely(headlen - hdrlen < sizeof(*shdr) ||
+ !ether_addr_equal(shdr->hdr, rfc1042_header) ||
+ (shdr->type != htons(ETH_P_IP) &&
+ shdr->type != htons(ETH_P_ARP) &&
+ shdr->type != htons(ETH_P_IPV6) &&
+ shdr->type != htons(ETH_P_8021Q) &&
+ shdr->type != htons(ETH_P_PAE) &&
+ shdr->type != htons(ETH_P_TDLS))))
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
fraglen = len - headlen;
if (fraglen) {
@@ -308,7 +333,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct iwl_rx_mpdu_desc *desc,
u32 pkt_flags, int queue, u8 *crypt_len)
{
- u16 status = le16_to_cpu(desc->status);
+ u32 status = le32_to_cpu(desc->status);
/*
* Drop UNKNOWN frames in aggregation, unless in monitor mode
@@ -393,22 +418,36 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
return 0;
}
-static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+static void iwl_mvm_rx_csum(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct sk_buff *skb,
- struct iwl_rx_mpdu_desc *desc)
+ struct iwl_rx_packet *pkt)
{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
- u16 flags = le16_to_cpu(desc->l3l4_flags);
- u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
- IWL_RX_L3_PROTO_POS);
-
- if (mvmvif->features & NETIF_F_RXCSUM &&
- flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
- (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
- l3_prot == IWL_RX_L3_TYPE_IPV6 ||
- l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
+ u16 hwsum = be16_to_cpu(desc->v3.raw_xsum);
+
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold(~(__force __sum16)hwsum);
+ }
+ } else {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif;
+ u16 flags = le16_to_cpu(desc->l3l4_flags);
+ u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+ IWL_RX_L3_PROTO_POS);
+
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ if (mvmvif->features & NETIF_F_RXCSUM &&
+ flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+ (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
}
/*
@@ -1668,10 +1707,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
* Keep packets with CRC errors (and with overrun) for monitor mode
* (otherwise the firmware discards them) but mark them as bad.
*/
- if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
- !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
+ if (!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
+ !(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
- le16_to_cpu(desc->status));
+ le32_to_cpu(desc->status));
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* set the preamble flag if appropriate */
@@ -1731,10 +1770,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rcu_read_lock();
- if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
- u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
+ if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
+ u8 id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
- if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
+ if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta))
sta = NULL;
@@ -1796,7 +1835,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
if (ieee80211_is_data(hdr->frame_control))
- iwl_mvm_rx_csum(sta, skb, desc);
+ iwl_mvm_rx_csum(mvm, sta, skb, pkt);
if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
kfree_skb(skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 51a061b138ba..875281cf7fc0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -147,7 +147,7 @@ struct iwl_mvm_scan_params {
struct cfg80211_match_set *match_sets;
int n_scan_plans;
struct cfg80211_sched_scan_plan *scan_plans;
- u32 measurement_dwell;
+ bool iter_notif;
};
static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
@@ -337,33 +337,6 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
-static int
-iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm,
- struct cfg80211_scan_request *req,
- struct iwl_mvm_scan_params *params)
-{
- u32 duration = scan_timing[params->type].max_out_time;
-
- if (!req->duration)
- return 0;
-
- if (iwl_mvm_is_cdb_supported(mvm)) {
- u32 hb_time = scan_timing[params->hb_type].max_out_time;
-
- duration = min_t(u32, duration, hb_time);
- }
-
- if (req->duration_mandatory && req->duration > duration) {
- IWL_DEBUG_SCAN(mvm,
- "Measurement scan - too long dwell %hu (max out time %u)\n",
- req->duration,
- duration);
- return -EOPNOTSUPP;
- }
-
- return min_t(u32, (u32)req->duration, duration);
-}
-
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
{
/* require rrm scan whenever the fw supports it */
@@ -725,14 +698,28 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
NL80211_BAND_2GHZ,
no_cck);
- tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA,
+ 0) < 12) {
+ tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
+ tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
+
+ /*
+ * Fw doesn't use this sta anymore, pending deprecation via HOST API
+ * change
+ */
+ } else {
+ tx_cmd[0].sta_id = 0xff;
+ tx_cmd[1].sta_id = 0xff;
+ }
tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
TX_CMD_FLG_BT_DIS);
+
tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
NL80211_BAND_5GHZ,
no_cck);
- tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
}
static void
@@ -1144,6 +1131,10 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+ /* This function should not be called when using ADD_STA ver >=12 */
+ WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA, 0) >= 12);
+
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@@ -1192,6 +1183,10 @@ static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+ /* This function should not be called when using ADD_STA ver >=12 */
+ WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA, 0) >= 12);
+
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@@ -1305,7 +1300,16 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
memset(&cfg, 0, sizeof(cfg));
- cfg.bcast_sta_id = mvm->aux_sta.sta_id;
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA, 0) < 12)
+ cfg.bcast_sta_id = mvm->aux_sta.sta_id;
+ /*
+ * Fw doesn't use this sta anymore, pending deprecation via HOST API
+ * change.
+ */
+ else
+ cfg.bcast_sta_id = 0xff;
+
cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
@@ -1333,10 +1337,8 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
u8 active_dwell, passive_dwell;
timing = &scan_timing[params->type];
- active_dwell = params->measurement_dwell ?
- params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE;
- passive_dwell = params->measurement_dwell ?
- params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE;
+ active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ passive_dwell = IWL_SCAN_DWELL_PASSIVE;
if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
cmd->v7.adwell_default_n_aps_social =
@@ -1389,8 +1391,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
}
}
} else {
- cmd->v1.extended_dwell = params->measurement_dwell ?
- params->measurement_dwell : IWL_SCAN_DWELL_EXTENDED;
+ cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
cmd->v1.active_dwell = active_dwell;
cmd->v1.passive_dwell = passive_dwell;
cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
@@ -1443,10 +1444,8 @@ iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm,
u8 active_dwell, passive_dwell;
timing = &scan_timing[params->type];
- active_dwell = params->measurement_dwell ?
- params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE;
- passive_dwell = params->measurement_dwell ?
- params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE;
+ active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ passive_dwell = IWL_SCAN_DWELL_PASSIVE;
general_params->adwell_default_social_chn =
IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
@@ -1737,7 +1736,7 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
if (!iwl_mvm_is_regular_scan(params))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
- if (params->measurement_dwell ||
+ if (params->iter_notif ||
mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
@@ -1782,7 +1781,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (!iwl_mvm_is_regular_scan(params))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
- if (params->measurement_dwell)
+ if (params->iter_notif)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -2229,7 +2228,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC);
+ SCAN_REQ_UMAC,
+ IWL_FW_CMD_VER_UNKNOWN);
for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
const struct iwl_scan_umac_handler *ver_handler =
@@ -2293,11 +2293,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_fill_scan_type(mvm, &params, vif);
- ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
- if (ret < 0)
- return ret;
-
- params.measurement_dwell = ret;
+ if (req->duration)
+ params.iter_notif = true;
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
@@ -2569,7 +2566,8 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size, tail_size;
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC);
+ SCAN_REQ_UMAC,
+ IWL_FW_CMD_VER_UNKNOWN);
base_size = iwl_scan_req_umac_get_size(scan_ver);
if (base_size)
@@ -2626,6 +2624,15 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
}
+ uid = iwl_mvm_scan_uid_by_status(mvm,
+ IWL_MVM_SCAN_STOPPING_REGULAR);
+ if (uid >= 0)
+ mvm->scan_uid_status[uid] = 0;
+
+ uid = iwl_mvm_scan_uid_by_status(mvm,
+ IWL_MVM_SCAN_STOPPING_SCHED);
+ if (uid >= 0)
+ mvm->scan_uid_status[uid] = 0;
/* We shouldn't have any UIDs still set. Loop over all the
* UIDs to make sure there's nothing left there and warn if
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 9e124755a3ce..017537944fd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -85,7 +85,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
int sta_id;
u32 reserved_ids = 0;
- BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
+ BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
lockdep_assert_held(&mvm->mutex);
@@ -95,7 +95,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
reserved_ids = BIT(0);
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
- for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
+ for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
if (BIT(sta_id) & reserved_ids)
continue;
@@ -770,8 +770,6 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
queue, sta_id, tid);
- IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
-
return queue;
}
@@ -1540,8 +1538,15 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
memset(&cmd, 0, sizeof(cmd));
cmd.sta_id = sta->sta_id;
- cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
- color));
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
+ 0) >= 12 &&
+ sta->type == IWL_STA_AUX_ACTIVITY)
+ cmd.mac_id_n_color = cpu_to_le32(mac_id);
+ else
+ cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
+ color));
+
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
cmd.station_type = sta->type;
@@ -1858,7 +1863,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
return ret;
/* flush its queues here since we are freeing mvm_sta */
- ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
+ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
if (ret)
return ret;
if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -1997,7 +2002,7 @@ static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
}
static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
- int maccolor,
+ int maccolor, u8 *addr,
struct iwl_mvm_int_sta *sta,
u16 *queue, int fifo)
{
@@ -2007,7 +2012,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
- ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor);
+ ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
if (ret) {
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_disable_txq(mvm, NULL, *queue,
@@ -2034,7 +2039,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
return 0;
}
-int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
{
int ret;
@@ -2047,7 +2052,11 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
if (ret)
return ret;
- ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0,
+ /*
+ * In CDB NICs we need to specify which lmac to use for aux activity
+ * using the mac_id argument place to send lmac_id to the function
+ */
+ ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
&mvm->aux_sta, &mvm->aux_queue,
IWL_MVM_TX_FIFO_MCAST);
if (ret) {
@@ -2065,7 +2074,8 @@ int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
- &mvm->snif_sta, &mvm->snif_queue,
+ NULL, &mvm->snif_sta,
+ &mvm->snif_queue,
IWL_MVM_TX_FIFO_BE);
}
@@ -2189,7 +2199,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
+ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
switch (vif->type) {
case NL80211_IFTYPE_AP:
@@ -2438,7 +2448,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
+ iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
@@ -2863,7 +2873,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
} else {
tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- ret = 0;
+ ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
}
out:
@@ -3761,7 +3771,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
struct ieee80211_sta *sta;
u32 sta_id = le32_to_cpu(notif->sta_id);
- if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
+ if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
return;
rcu_read_lock();
@@ -3844,7 +3854,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
/* Block/unblock all the stations of the given mvmvif */
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
@@ -3903,3 +3913,43 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
}
+
+int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
+ u8 *key, u32 key_len)
+{
+ int ret;
+ u16 queue;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_key_conf *keyconf;
+
+ ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
+ NL80211_IFTYPE_UNSPECIFIED,
+ IWL_STA_LINK);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
+ addr, sta, &queue,
+ IWL_MVM_TX_FIFO_BE);
+ if (ret)
+ goto out;
+
+ keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
+ if (!keyconf) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ keyconf->cipher = cipher;
+ memcpy(keyconf->key, key, key_len);
+ keyconf->keylen = key_len;
+
+ ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
+ 0, NULL, 0, 0, true);
+ kfree(keyconf);
+ return 0;
+out:
+ iwl_mvm_dealloc_int_sta(mvm, sta);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index da2d1ac01229..d7578c981a65 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -70,7 +70,7 @@
#include <linux/wait.h>
#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
-#include "fw-api.h" /* IWL_MVM_STATION_COUNT */
+#include "fw-api.h" /* IWL_MVM_STATION_COUNT_MAX */
#include "rs.h"
struct iwl_mvm;
@@ -540,7 +540,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start);
-int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id);
int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm);
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -579,5 +579,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
-
+int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
+ u8 *key, u32 key_len);
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index d781777b6b96..2ad959b4ce0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(C) 2018 - 2019 Intel Corporation
+ * Copyright(C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(C) 2018 - 2019 Intel Corporation
+ * Copyright(C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,7 +77,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls)
@@ -100,7 +100,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls)
@@ -144,7 +144,7 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* populate TDLS peer data */
cnt = 0;
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta) || !sta->tdls)
@@ -273,7 +273,7 @@ void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
return;
}
- if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
+ if (WARN_ON(sta_id >= mvm->fw->ucode_capa.num_stations))
return;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 1babc4bb5194..7fce79c1c114 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -116,14 +114,9 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* event finishes or is canceled, so that frames queued for it
* won't get stuck on the queue and be transmitted in the next
* time event.
- * We have to send the command asynchronously since this cannot
- * be under the mutex for locking reasons, but that's not an
- * issue as it will have to complete before the next command is
- * executed, and a new time event means a new command.
*/
- iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
- /* Do the same for the P2P device queue (STA) */
+ mutex_lock(&mvm->mutex);
if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
struct iwl_mvm_vif *mvmvif;
@@ -136,10 +129,20 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
if (!WARN_ON(!mvm->p2p_device_vif)) {
mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
- iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
- CMD_ASYNC);
+ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
}
+ } else {
+ /* do the same in case of hot spot 2.0 */
+ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
+ /* In newer version of this command an aux station is added only
+ * in cases of dedicated tx queue and need to be removed in end
+ * of use */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA, 0) >= 12)
+ iwl_mvm_rm_aux_sta(mvm);
}
+
+ mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
@@ -172,7 +175,7 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
* So we just do nothing here and the switch
* will be performed on the last TBTT.
*/
- if (!ieee80211_csa_is_complete(csa_vif)) {
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
IWL_WARN(mvm, "CSA NOA started too early\n");
goto out_unlock;
}
@@ -1013,6 +1016,28 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_roc_finished(mvm);
}
+void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ u32 id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!te_data->running)
+ return;
+
+ spin_lock_bh(&mvm->time_event_lock);
+ id = te_data->id;
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if (id != TE_CHANNEL_SWITCH_PERIOD)
+ return;
+
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+}
+
int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 duration, u32 apply_time)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index 3186d7e40567..b6bac776f236 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -216,6 +216,9 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm);
void iwl_mvm_roc_done_wk(struct work_struct *wk);
+void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
/**
* iwl_mvm_schedule_csa_period - request channel switch absence period
* @mvm: the mvm component
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 0c95663bf9ed..340c892b30ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -228,24 +228,67 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_enter_ctkill(mvm);
}
-static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
+/*
+ * send the DTS_MEASUREMENT_TRIGGER command with or without waiting for a
+ * response. If we get a response then the measurement is stored in 'temp'
+ */
+static int iwl_mvm_send_temp_cmd(struct iwl_mvm *mvm, bool response, s32 *temp)
{
- struct iwl_dts_measurement_cmd cmd = {
+ struct iwl_host_cmd cmd = {};
+ struct iwl_dts_measurement_cmd dts_cmd = {
.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
};
- struct iwl_ext_dts_measurement_cmd extcmd = {
+ struct iwl_ext_dts_measurement_cmd ext_cmd = {
.control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE),
};
- u32 cmdid;
+ struct iwl_dts_measurement_resp *resp;
+ void *cmd_ptr;
+ int ret;
+ u32 cmd_flags = 0;
+ u16 len;
+
+ /* Check which command format is used (regular/extended) */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE)) {
+ len = sizeof(ext_cmd);
+ cmd_ptr = &ext_cmd;
+ } else {
+ len = sizeof(dts_cmd);
+ cmd_ptr = &dts_cmd;
+ }
+ /* The command version where we get a response is zero length */
+ if (response) {
+ cmd_flags = CMD_WANT_SKB;
+ len = 0;
+ }
- cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
- PHY_OPS_GROUP, 0);
+ cmd.id = WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE);
+ cmd.len[0] = len;
+ cmd.flags = cmd_flags;
+ cmd.data[0] = cmd_ptr;
+
+ IWL_DEBUG_TEMP(mvm,
+ "Sending temperature measurement command - %s response\n",
+ response ? "with" : "without");
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ if (ret) {
+ IWL_ERR(mvm,
+ "Failed to send the temperature measurement command (err=%d)\n",
+ ret);
+ return ret;
+ }
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
- return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd);
+ if (response) {
+ resp = (void *)cmd.resp_pkt->data;
+ *temp = le32_to_cpu(resp->temp);
+ IWL_DEBUG_TEMP(mvm,
+ "Got temperature measurement response: temp=%d\n",
+ *temp);
+ iwl_free_resp(&cmd);
+ }
- return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
+ return ret;
}
int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
@@ -254,6 +297,18 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
DTS_MEASUREMENT_NOTIF_WIDE) };
int ret;
+ u8 cmd_ver;
+
+ /*
+ * If command version is 1 we send the command and immediately get
+ * a response. For older versions we send the command and wait for a
+ * notification (no command TLV for previous versions).
+ */
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
+ CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver == 1)
+ return iwl_mvm_send_temp_cmd(mvm, true, temp);
lockdep_assert_held(&mvm->mutex);
@@ -261,9 +316,8 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
temp_notif, ARRAY_SIZE(temp_notif),
iwl_mvm_temp_notif_wait, temp);
- ret = iwl_mvm_get_temp_cmd(mvm);
+ ret = iwl_mvm_send_temp_cmd(mvm, false, temp);
if (ret) {
- IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret);
iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif);
return ret;
}
@@ -295,6 +349,8 @@ static void check_exit_ctkill(struct work_struct *work)
duration = tt->params.ct_kill_duration;
+ flush_work(&mvm->roc_done_wk);
+
mutex_lock(&mvm->mutex);
if (__iwl_mvm_mac_start(mvm))
@@ -345,7 +401,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
struct iwl_mvm_sta *mvmsta;
int i, err;
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
if (!mvmsta)
continue;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 2f6484e0d726..fe1c538cd718 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -793,11 +793,7 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
u8 ac = tid_to_mac80211_ac[tid];
unsigned int txf;
- int lmac = IWL_LMAC_24G_INDEX;
-
- if (iwl_mvm_is_cdb_supported(mvm) &&
- band == NL80211_BAND_5GHZ)
- lmac = IWL_LMAC_5G_INDEX;
+ int lmac = iwl_mvm_get_lmac_id(mvm->fw, band);
/* For HE redirect to trigger based fifos */
if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
@@ -1371,7 +1367,7 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
}
}
-/**
+/*
* translate ucode response to mac80211 tx status control values
*/
static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
@@ -1413,7 +1409,7 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
}
}
-/**
+/*
* iwl_mvm_get_scd_ssn - returns the SSN of the SCD
* @tx_resp: the Tx response from the fw (agg or non-agg)
*
@@ -1768,13 +1764,13 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
struct ieee80211_tx_info *ba_info, u32 rate)
{
struct sk_buff_head reclaimed_skbs;
- struct iwl_mvm_tid_data *tid_data;
+ struct iwl_mvm_tid_data *tid_data = NULL;
struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_sta *mvmsta = NULL;
struct sk_buff *skb;
int freed;
- if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
+ if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations ||
tid > IWL_MAX_TID_COUNT,
"sta_id %d tid %d", sta_id, tid))
return;
@@ -1784,11 +1780,44 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
/* Reclaiming frames for a station that has been deleted ? */
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ if (WARN_ON_ONCE(!sta)) {
rcu_read_unlock();
return;
}
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ /*
+ * Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway).
+ */
+ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ }
+
+ /*
+ * It's possible to get a BA response after invalidating the rcu (rcu is
+ * invalidated in order to prevent new Tx from being sent, but there may
+ * be some frames already in-flight).
+ * In this case we just want to reclaim, and could skip all the
+ * sta-dependent stuff since it's in the middle of being removed
+ * anyways.
+ */
+ if (IS_ERR(sta))
+ goto out;
+
mvmsta = iwl_mvm_sta_from_mac80211(sta);
tid_data = &mvmsta->tid_data[tid];
@@ -1800,15 +1829,6 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
return;
}
- __skb_queue_head_init(&reclaimed_skbs);
-
- /*
- * Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway).
- */
- iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
-
spin_lock_bh(&mvmsta->lock);
tid_data->next_reclaimed = index;
@@ -1832,15 +1852,6 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
else
WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
- iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
-
- memset(&info->status, 0, sizeof(info->status));
- /* Packet was transmitted successfully, failures come as single
- * frames because before failing a frame the firmware transmits
- * it without aggregation at least once.
- */
- info->flags |= IEEE80211_TX_STAT_ACK;
-
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
if (freed == 1) {
@@ -1917,8 +1928,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
rcu_read_lock();
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
- if (!mvmsta)
- goto out_unlock;
+ /*
+ * It's possible to get a BA response after invalidating the rcu
+ * (rcu is invalidated in order to prevent new Tx from being
+ * sent, but there may be some frames already in-flight).
+ * In this case we just want to reclaim, and could skip all the
+ * sta-dependent stuff since it's in the middle of being removed
+ * anyways.
+ */
/* Free per TID */
for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
@@ -1929,7 +1946,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (tid == IWL_MGMT_TID)
tid = IWL_MAX_TID_COUNT;
- mvmsta->tid_data[i].lq_color = lq_color;
+ if (mvmsta)
+ mvmsta->tid_data[i].lq_color = lq_color;
+
iwl_mvm_tx_reclaim(mvm, sta_id, tid,
(int)(le16_to_cpu(ba_tfd->q_num)),
le16_to_cpu(ba_tfd->tfd_index),
@@ -1937,9 +1956,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
le32_to_cpu(ba_res->tx_rate));
}
- iwl_mvm_tx_airtime(mvm, mvmsta,
- le32_to_cpu(ba_res->wireless_time));
-out_unlock:
+ if (mvmsta)
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le32_to_cpu(ba_res->wireless_time));
rcu_read_unlock();
out:
IWL_DEBUG_TX_REPLY(mvm,
@@ -2036,7 +2055,7 @@ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
return ret;
}
-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
{
struct iwl_mvm_int_sta *int_sta = sta;
struct iwl_mvm_sta *mvm_sta = sta;
@@ -2045,12 +2064,10 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
offsetof(struct iwl_mvm_sta, sta_id));
if (iwl_mvm_has_new_tx_api(mvm))
- return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
- 0xffff, flags);
+ return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff, 0);
if (internal)
- return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
- flags);
+ return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, 0);
- return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
+ return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index be57b8391850..3123036978a5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -67,6 +67,7 @@
#include "iwl-csr.h"
#include "mvm.h"
#include "fw/api/rs.h"
+#include "fw/img.h"
/*
* Will return 0 even if the cmd failed when RFKILL is asserted unless
@@ -289,45 +290,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx;
}
-#define FW_SYSASSERT_CPU_MASK 0xf0000000
-static const struct {
- const char *name;
- u8 num;
-} advanced_lookup[] = {
- { "NMI_INTERRUPT_WDG", 0x34 },
- { "SYSASSERT", 0x35 },
- { "UCODE_VERSION_MISMATCH", 0x37 },
- { "BAD_COMMAND", 0x38 },
- { "BAD_COMMAND", 0x39 },
- { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
- { "FATAL_ERROR", 0x3D },
- { "NMI_TRM_HW_ERR", 0x46 },
- { "NMI_INTERRUPT_TRM", 0x4C },
- { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
- { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
- { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
- { "NMI_INTERRUPT_HOST", 0x66 },
- { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
- { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
- { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
- { "NMI_INTERRUPT_ACTION_PT", 0x7C },
- { "NMI_INTERRUPT_UNKNOWN", 0x84 },
- { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
- { "ADVANCED_SYSASSERT", 0 },
-};
-
-static const char *desc_lookup(u32 num)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
- if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
- return advanced_lookup[i].name;
-
- /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
- return advanced_lookup[i].name;
-}
-
/*
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
@@ -463,7 +425,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
struct iwl_umac_error_event_table table;
u32 base = mvm->trans->dbg.umac_error_event_table;
- if (!mvm->support_umac_log &&
+ if (!base &&
!(mvm->trans->dbg.error_event_table_tlv_status &
IWL_ERROR_EVENT_TABLE_UMAC))
return;
@@ -480,7 +442,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
}
IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
- desc_lookup(table.error_id));
+ iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
@@ -550,7 +512,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
- desc_lookup(table.error_id));
+ iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
@@ -658,7 +620,8 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
- * @sync: This command can be sent synchronously.
+ * @mvm: Driver data.
+ * @lq: Link quality command to send.
*
* The link quality command is sent as the last step of station creation.
* This is the special case in which init is set and we call a callback in
@@ -683,8 +646,10 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
/**
* iwl_mvm_update_smps - Get a request to change the SMPS mode
+ * @mvm: Driver data.
+ * @vif: Pointer to the ieee80211_vif structure
* @req_type: The part of the driver who call for a change.
- * @smps_requests: The request to change the SMPS mode.
+ * @smps_request: The request to change the SMPS mode.
*
* Get a requst to change the SMPS mode,
* and change it according to all other requests in the driver.
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 1ab136600415..a0352fa873d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -301,3 +301,30 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
trans_pcie->prph_info_dma_addr = 0;
trans_pcie->prph_info = NULL;
}
+
+int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
+ const void *data, u32 len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+ &trans_pcie->prph_scratch->ctrl_cfg;
+ int ret;
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
+ &trans_pcie->pnvm_dram);
+ if (ret < 0) {
+ IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n",
+ ret);
+ return ret;
+ }
+
+ prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
+ cpu_to_le64(trans_pcie->pnvm_dram.physical);
+ prph_sc_ctrl->pnvm_cfg.pnvm_size =
+ cpu_to_le32(trans_pcie->pnvm_dram.size);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 23abfbd096b0..13fe9c00d7e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -73,7 +73,7 @@ static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
if (!result)
return NULL;
- if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
+ if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) {
void *old = result;
dma_addr_t oldphys = *phys;
@@ -93,17 +93,17 @@ static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
}
-static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
- const struct fw_desc *sec,
- struct iwl_dram_data *dram)
+int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const void *data, u32 len,
+ struct iwl_dram_data *dram)
{
- dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, sec->len,
+ dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,
&dram->physical);
if (!dram->block)
return -ENOMEM;
- dram->size = sec->len;
- memcpy(dram->block, sec->data, sec->len);
+ dram->size = len;
+ memcpy(dram->block, data, len);
return 0;
}
@@ -156,7 +156,8 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
/* initialize lmac sections */
for (i = 0; i < lmac_cnt; i++) {
- ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i],
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[i].data,
+ fw->sec[i].len,
&dram->fw[dram->fw_cnt]);
if (ret)
return ret;
@@ -169,7 +170,8 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
for (i = 0; i < umac_cnt; i++) {
/* access FW with +1 to make up for lmac separator */
ret = iwl_pcie_ctxt_info_alloc_dma(trans,
- &fw->sec[dram->fw_cnt + 1],
+ fw->sec[dram->fw_cnt + 1].data,
+ fw->sec[dram->fw_cnt + 1].len,
&dram->fw[dram->fw_cnt]);
if (ret)
return ret;
@@ -192,7 +194,8 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
/* access FW with +2 to make up for lmac & umac separators */
int fw_idx = dram->fw_cnt + i + 2;
- ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx],
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[fw_idx].data,
+ fw->sec[fw_idx].len,
&dram->paging[i]);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index e02bafb8921f..129021f26791 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -512,9 +512,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 9000 Series */
{IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)},
- {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9560_trans_cfg)},
- {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9560_trans_cfg)},
- {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_trans_cfg)},
+ {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_trans_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_trans_cfg)},
{IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_trans_cfg)},
{IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_trans_cfg)},
@@ -540,20 +540,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0070, iwlax201_cfg_snj_hr_b0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0074, iwlax201_cfg_snj_hr_b0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0078, iwlax201_cfg_snj_hr_b0)},
+ {IWL_PCI_DEVICE(0x2726, 0x007C, iwlax201_cfg_snj_hr_b0)},
{IWL_PCI_DEVICE(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0098, iwlax211_cfg_snj_gf_a0)},
{IWL_PCI_DEVICE(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
{IWL_PCI_DEVICE(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x2074, iwlax201_cfg_snj_hr_b0)},
+ {IWL_PCI_DEVICE(0x2726, 0x4070, iwlax201_cfg_snj_hr_b0)},
{IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
+/* Ma devices */
+ {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7E80, PCI_ANY_ID, iwl_ma_trans_cfg)},
+
#endif /* CONFIG_IWLMVM */
{0}
@@ -595,6 +608,12 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
/* QnJ with Hr */
IWL_DEV_INFO(0x2720, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name),
+ /* SnJ with HR*/
+ IWL_DEV_INFO(0x2726, 0x0244, iwlax201_cfg_snj_hr_b0, iwl_ax101_name),
+ IWL_DEV_INFO(0x2726, 0x1651, iwlax201_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name),
+ IWL_DEV_INFO(0x2726, 0x1652, iwlax201_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
+ IWL_DEV_INFO(0x2726, 0x4244, iwlax201_cfg_snj_hr_b0, iwl_ax101_name),
+
/* Qu with Hr */
IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
@@ -613,6 +632,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
@@ -955,6 +975,18 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_quz_a0_hr1_b0, iwl_ax101_name),
+/* Ma */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_ma_a0_gf_a0, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_ma_a0_mr_a0, iwl_ma_name),
+
#endif /* CONFIG_IWLMVM */
};
@@ -987,9 +1019,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
- /* the trans_cfg should never change, so set it now */
- iwl_trans->trans_cfg = trans;
-
iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 55808ba10d27..ff542d2f0054 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -79,12 +79,7 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
#include "iwl-drv.h"
-
-/* We need 2 entries for the TX command and header, and another one might
- * be needed for potential data in the SKB's head. The remaining ones can
- * be used for frags.
- */
-#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
+#include "queue/tx.h"
/*
* RX related structures and functions
@@ -247,16 +242,6 @@ struct iwl_rb_allocator {
};
/**
- * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- */
-static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
-{
- return ++index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-}
-
-/**
* iwl_get_closed_rb_stts - get closed rb stts from different structs
* @rxq - the rxq to get the rb stts from
*/
@@ -274,28 +259,6 @@ static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
}
}
-/**
- * iwl_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- */
-static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
-{
- return --index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-}
-
-static inline dma_addr_t
-iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
-{
- return txq->first_tb_dma +
- sizeof(struct iwl_pcie_first_tb_buf) * idx;
-}
-
-struct iwl_tso_hdr_page {
- struct page *page;
- u8 *pos;
-};
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
/**
* enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
@@ -375,8 +338,8 @@ struct cont_rec {
* count for allocating and freeing the memory.
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
- * @scd_bc_tbls: pointer to the byte count table of the scheduler
* @kw: keep warm address
+ * @pnvm_dram: DRAM area that contains the PNVM data
* @pci_dev: basic pci-network driver stuff
* @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied.
@@ -384,7 +347,6 @@ struct cont_rec {
* @cmd_queue - command queue number
* @def_rx_queue - default rx queue number
* @rx_buf_size: Rx buffer size
- * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
@@ -434,8 +396,6 @@ struct iwl_trans_pcie {
struct net_device napi_dev;
- struct __percpu iwl_tso_hdr_page *tso_hdr_page;
-
/* INT ICT Table */
__le32 *ict_tbl;
dma_addr_t ict_tbl_dma;
@@ -449,9 +409,9 @@ struct iwl_trans_pcie {
struct mutex mutex;
u32 inta_mask;
u32 scd_base_addr;
- struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw;
- struct dma_pool *bc_pool;
+
+ struct iwl_dram_data pnvm_dram;
struct iwl_txq *txq_memory;
@@ -465,17 +425,12 @@ struct iwl_trans_pcie {
wait_queue_head_t wait_command_queue;
wait_queue_head_t sx_waitq;
- u8 page_offs, dev_cmd_offs;
-
u8 def_rx_queue;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
- u8 max_tbs;
- u16 tfd_size;
u16 num_rx_bufs;
enum iwl_amsdu_size rx_buf_size;
- bool bc_table_dword;
bool scd_set_active;
bool sw_csum_tx;
bool pcie_dbg_dumped_once;
@@ -579,19 +534,7 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
-/*
- * We need this inline in case dma_addr_t is only 32-bits - since the
- * hardware is always 64-bit, the issue can still occur in that case,
- * so use u64 for 'phys' here to force the addition in 64-bit.
- */
-static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
-{
- return upper_32_bits(phys) != upper_32_bits(phys + len);
-}
-
int iwl_pcie_tx_init(struct iwl_trans *trans);
-int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
- int queue_size);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
@@ -602,14 +545,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode);
-void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
- struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
- struct iwl_txq *txq);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb);
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
@@ -617,22 +556,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
- u8 idx)
-{
- if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->tb_len);
- } else {
- struct iwl_tfd *tfd = _tfd;
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
- }
-}
-
/*****************************************************
* Error handling
******************************************************/
@@ -800,22 +723,6 @@ static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
}
}
-static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
-{
- return index & (q->n_window - 1);
-}
-
-static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq, int idx)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (trans->trans_cfg->use_tfh)
- idx = iwl_pcie_get_cmd_index(txq, idx);
-
- return txq->tfds + trans_pcie->tfd_size * idx;
-}
-
static inline const char *queue_name(struct device *dev,
struct iwl_trans_pcie *trans_p, int i)
{
@@ -867,37 +774,6 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
-static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
- iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
- }
-}
-
-static inline void iwl_stop_queue(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
- iwl_op_mode_queue_full(trans->op_mode, txq->id);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
- } else
- IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
- txq->id);
-}
-
-static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
-{
- int index = iwl_pcie_get_cmd_index(q, i);
- int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
- int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
-
- return w >= r ?
- (index >= r && index < w) :
- !(index < r && index >= w);
-}
-
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -964,23 +840,12 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
bool was_in_rfkill);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
-int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, bool cmd_queue);
-int iwl_pcie_txq_alloc(struct iwl_trans *trans,
- struct iwl_txq *txq, int slots_num, bool cmd_queue);
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
void iwl_pcie_apply_destination(struct iwl_trans *trans);
-void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
- struct sk_buff *skb);
-#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
- struct sk_buff *skb);
-#endif
/* common functions that are used by gen3 transport */
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
@@ -989,28 +854,10 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
-void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
- struct iwl_txq *txq);
-int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
- struct iwl_txq **intxq, int size,
- unsigned int timeout);
-int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_host_cmd *hcmd);
-int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int timeout);
-void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
-int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
-void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
-void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
-void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
bool test, bool reset);
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 9463c108aa96..94299f259518 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1359,7 +1359,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
sequence = le16_to_cpu(pkt->hdr.sequence);
index = SEQ_TO_INDEX(sequence);
- cmd_index = iwl_pcie_get_cmd_index(txq, index);
+ cmd_index = iwl_txq_get_cmd_index(txq, index);
if (rxq->id == trans_pcie->def_rx_queue)
iwl_op_mode_rx(trans->op_mode, &rxq->napi,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 97c9e9c87436..91ec9379c061 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -162,7 +162,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
IWL_DEBUG_INFO(trans,
"DEVICE_ENABLED bit was set and is now cleared\n");
- iwl_pcie_gen2_tx_stop(trans);
+ iwl_txq_gen2_tx_stop(trans);
iwl_pcie_rx_stop(trans);
}
@@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_pcie_gen2_tx_init(trans, trans->txqs.cmd.q_id, queue_size))
+ if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index e5160d620868..d2e69ad53b27 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -81,6 +81,7 @@
#include "fw/api/tx.h"
#include "internal.h"
#include "iwl-fh.h"
+#include "iwl-context-info-gen3.h"
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START 0x40000
@@ -1607,11 +1608,15 @@ iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int max_irqs, num_irqs, i, ret;
u16 pci_cmd;
+ u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
if (!cfg_trans->mq_rx_supported)
goto enable_msi;
- max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
+ if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
+ max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
+
+ max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
for (i = 0; i < max_irqs; i++)
trans_pcie->msix_entries[i].entry = i;
@@ -1907,6 +1912,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+ trans->txqs.page_offs = trans_cfg->cb_data_offs;
+ trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
else
@@ -1924,13 +1932,10 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
- trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+ trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
- trans_pcie->page_offs = trans_cfg->cb_data_offs;
- trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
-
trans->command_groups = trans_cfg->command_groups;
trans->command_groups_size = trans_cfg->command_groups_size;
@@ -1951,7 +1956,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
if (trans->trans_cfg->gen2)
- iwl_pcie_gen2_tx_free(trans);
+ iwl_txq_gen2_tx_free(trans);
else
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
@@ -1975,15 +1980,11 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_free_fw_monitor(trans);
- for_each_possible_cpu(i) {
- struct iwl_tso_hdr_page *p =
- per_cpu_ptr(trans_pcie->tso_hdr_page, i);
-
- if (p->page)
- __free_page(p->page);
- }
+ if (trans_pcie->pnvm_dram.size)
+ dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size,
+ trans_pcie->pnvm_dram.block,
+ trans_pcie->pnvm_dram.physical);
- free_percpu(trans_pcie->tso_hdr_page);
mutex_destroy(&trans_pcie->mutex);
iwl_trans_free(trans);
}
@@ -2276,36 +2277,6 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
#define IWL_FLUSH_WAIT_MS 2000
-void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- u32 txq_id = txq->id;
- u32 status;
- bool active;
- u8 fifo;
-
- if (trans->trans_cfg->use_tfh) {
- IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
- txq->read_ptr, txq->write_ptr);
- /* TODO: access new SCD registers and dump them */
- return;
- }
-
- status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
- fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
-
- IWL_ERR(trans,
- "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
- txq_id, active ? "" : "in", fifo,
- jiffies_to_msecs(txq->wd_timeout),
- txq->read_ptr, txq->write_ptr,
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
-}
-
static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data)
{
@@ -2374,7 +2345,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
if (txq->read_ptr != txq->write_ptr) {
IWL_ERR(trans,
"fail to flush all tx fifo queues Q %d\n", txq_idx);
- iwl_trans_pcie_log_scd_error(trans, txq);
+ iwl_txq_log_scd_error(trans, txq);
return -ETIMEDOUT;
}
@@ -2985,12 +2956,11 @@ static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 cmdlen = 0;
int i;
- for (i = 0; i < trans_pcie->max_tbs; i++)
- cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
+ for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
+ cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
return cmdlen;
}
@@ -3329,14 +3299,14 @@ static struct iwl_trans_dump_data
data = (void *)dump_data->data;
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
- u16 tfd_size = trans_pcie->tfd_size;
+ u16 tfd_size = trans->txqs.tfd.size;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data;
spin_lock_bh(&cmdq->lock);
ptr = cmdq->write_ptr;
for (i = 0; i < cmdq->n_window; i++) {
- u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+ u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
u8 tfdidx;
u32 caplen, cmdlen;
@@ -3359,7 +3329,7 @@ static struct iwl_trans_dump_data
txcmd = (void *)((u8 *)txcmd->data + caplen);
}
- ptr = iwl_queue_dec_wrap(trans, ptr);
+ ptr = iwl_txq_dec_wrap(trans, ptr);
}
spin_unlock_bh(&cmdq->lock);
@@ -3478,15 +3448,16 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.send_cmd = iwl_trans_pcie_gen2_send_hcmd,
- .tx = iwl_trans_pcie_gen2_tx,
+ .tx = iwl_txq_gen2_tx,
.reclaim = iwl_trans_pcie_reclaim,
.set_q_ptrs = iwl_trans_pcie_set_q_ptrs,
- .txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
- .txq_free = iwl_trans_pcie_dyn_txq_free,
+ .txq_alloc = iwl_txq_dyn_alloc,
+ .txq_free = iwl_txq_dyn_free,
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
+ .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
#endif
@@ -3498,34 +3469,18 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
{
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
- int ret, addr_size, txcmd_size, txcmd_align;
+ int ret, addr_size;
const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
- if (!cfg_trans->gen2) {
+ if (!cfg_trans->gen2)
ops = &trans_ops_pcie;
- txcmd_size = sizeof(struct iwl_tx_cmd);
- txcmd_align = sizeof(void *);
- } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
- txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
- txcmd_align = 64;
- } else {
- txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
- txcmd_align = 128;
- }
-
- txcmd_size += sizeof(struct iwl_cmd_header);
- txcmd_size += 36; /* biggest possible 802.11 header */
-
- /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
- if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
- return ERR_PTR(-EINVAL);
ret = pcim_enable_device(pdev);
if (ret)
return ERR_PTR(ret);
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
- txcmd_size, txcmd_align);
+ cfg_trans);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3547,11 +3502,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
- trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
- if (!trans_pcie->tso_hdr_page) {
- ret = -ENOMEM;
- goto out_no_pci;
- }
trans_pcie->debug_rfkill = -1;
if (!cfg_trans->base_params->pcie_l1_allowed) {
@@ -3567,19 +3517,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->def_rx_queue = 0;
- if (cfg_trans->use_tfh) {
- addr_size = 64;
- trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
- trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
- } else {
- addr_size = 36;
- trans_pcie->max_tbs = IWL_NUM_OF_TBS;
- trans_pcie->tfd_size = sizeof(struct iwl_tfd);
- }
- trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
-
pci_set_master(pdev);
+ addr_size = trans->txqs.tfd.addr_size;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
if (!ret)
ret = pci_set_consistent_dma_mask(pdev,
@@ -3661,24 +3601,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->sx_waitq);
- /*
- * For gen2 devices, we use a single allocation for each byte-count
- * table, but they're pretty small (1k) so use a DMA pool that we
- * allocate here.
- */
- if (cfg_trans->gen2) {
- size_t bc_tbl_size;
-
- if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_AX210)
- bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
- else
- bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
-
- trans_pcie->bc_pool = dmam_pool_create("iwlwifi:bc", &pdev->dev,
- bc_tbl_size, 256, 0);
- if (!trans_pcie->bc_pool)
- goto out_no_pci;
- }
if (trans_pcie->msix_enabled) {
ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
@@ -3712,7 +3634,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
out_free_ict:
iwl_pcie_free_ict(trans);
out_no_pci:
- free_percpu(trans_pcie->tso_hdr_page);
destroy_workqueue(trans_pcie->rba.alloc_wq);
out_free_trans:
iwl_trans_free(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 606bef2ecc7b..baa83a0b8593 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -58,751 +58,7 @@
#include "iwl-io.h"
#include "internal.h"
#include "fw/api/tx.h"
-
- /*
- * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
- */
-void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
-{
- int txq_id;
-
- /*
- * This function can be called before the op_mode disabled the
- * queues. This happens when we have an rfkill interrupt.
- * Since we stop Tx altogether - mark the queues as stopped.
- */
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
- if (!trans->txqs.txq[txq_id])
- continue;
- iwl_pcie_gen2_txq_unmap(trans, txq_id);
- }
-}
-
-/*
- * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
- */
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
- int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- u8 filled_tfd_size, num_fetch_chunks;
- u16 len = byte_cnt;
- __le16 bc_ent;
-
- if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
- return;
-
- filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
- num_tbs * sizeof(struct iwl_tfh_tb);
- /*
- * filled_tfd_size contains the number of filled bytes in the TFD.
- * Dividing it by 64 will give the number of chunks to fetch
- * to SRAM- 0 for one chunk, 1 for 2 and so on.
- * If, for example, TFD contains only 3 TBs then 32 bytes
- * of the TFD are used, and only one chunk of 64 bytes should
- * be fetched
- */
- num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
-
- /* Starting from AX210, the HW expects bytes */
- WARN_ON(trans_pcie->bc_table_dword);
- WARN_ON(len > 0x3FFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
- } else {
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
-
- /* Before AX210, the HW expects DW */
- WARN_ON(!trans_pcie->bc_table_dword);
- len = DIV_ROUND_UP(len, 4);
- WARN_ON(len > 0xFFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
- }
-}
-
-/*
- * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
- */
-void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- lockdep_assert_held(&txq->lock);
-
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
-
- /*
- * if not in power-save mode, uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
-}
-
-static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd)
-{
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
-}
-
-static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_tfh_tfd *tfd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i, num_tbs;
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
-
- if (num_tbs > trans_pcie->max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- return;
- }
-
- /* first TB is never freed - it's the bidirectional DMA data */
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- }
-
- tfd->num_tbs = 0;
-}
-
-static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
- * idx is bounded by n_window
- */
- int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
-
- lockdep_assert_held(&txq->lock);
-
- iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_pcie_get_tfd(trans, txq, idx));
-
- /* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
-
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
- }
-}
-
-static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd, dma_addr_t addr,
- u16 len)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
- struct iwl_tfh_tb *tb;
-
- /*
- * Only WARN here so we know about the issue, but we mess up our
- * unmap path because not every place currently checks for errors
- * returned from this function - it can only return an error if
- * there's no more space, and so when we know there is enough we
- * don't always check ...
- */
- WARN(iwl_pcie_crosses_4g_boundary(addr, len),
- "possible DMA problem with iova:0x%llx, len:%d\n",
- (unsigned long long)addr, len);
-
- if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
- return -EINVAL;
- tb = &tfd->tbs[idx];
-
- /* Each TFD can point to a maximum max_tbs Tx buffers */
- if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
- IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans_pcie->max_tbs);
- return -EINVAL;
- }
-
- put_unaligned_le64(addr, &tb->addr);
- tb->tb_len = cpu_to_le16(len);
-
- tfd->num_tbs = cpu_to_le16(idx + 1);
-
- return idx;
-}
-
-static struct page *get_workaround_page(struct iwl_trans *trans,
- struct sk_buff *skb)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct page **page_ptr;
- struct page *ret;
-
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
-
- ret = alloc_page(GFP_ATOMIC);
- if (!ret)
- return NULL;
-
- /* set the chaining pointer to the previous page if there */
- *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
- *page_ptr = ret;
-
- return ret;
-}
-
-/*
- * Add a TB and if needed apply the FH HW bug workaround;
- * meta != NULL indicates that it's a page mapping and we
- * need to dma_unmap_page() and set the meta->tbs bit in
- * this case.
- */
-static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- dma_addr_t phys, void *virt,
- u16 len, struct iwl_cmd_meta *meta)
-{
- dma_addr_t oldphys = phys;
- struct page *page;
- int ret;
-
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
-
- if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
- ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
-
- if (ret < 0)
- goto unmap;
-
- if (meta)
- meta->tbs |= BIT(ret);
-
- ret = 0;
- goto trace;
- }
-
- /*
- * Work around a hardware bug. If (as expressed in the
- * condition above) the TB ends on a 32-bit boundary,
- * then the next TB may be accessed with the wrong
- * address.
- * To work around it, copy the data elsewhere and make
- * a new mapping for it so the device will not fail.
- */
-
- if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
- ret = -ENOBUFS;
- goto unmap;
- }
-
- page = get_workaround_page(trans, skb);
- if (!page) {
- ret = -ENOMEM;
- goto unmap;
- }
-
- memcpy(page_address(page), virt, len);
-
- phys = dma_map_single(trans->dev, page_address(page), len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
- ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
- if (ret < 0) {
- /* unmap the new allocation as single */
- oldphys = phys;
- meta = NULL;
- goto unmap;
- }
- IWL_WARN(trans,
- "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
- len, (unsigned long long)oldphys, (unsigned long long)phys);
-
- ret = 0;
-unmap:
- if (meta)
- dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
-trace:
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
-
- return ret;
-}
-
-static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd, int start_len,
- u8 hdr_len,
- struct iwl_device_tx_cmd *dev_cmd)
-{
-#ifdef CONFIG_INET
- struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
- struct ieee80211_hdr *hdr = (void *)skb->data;
- unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- u16 length, amsdu_pad;
- u8 *start_hdr;
- struct iwl_tso_hdr_page *hdr_page;
- struct tso_t tso;
-
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
- &dev_cmd->hdr, start_len, 0);
-
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
- snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
- total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
- amsdu_pad = 0;
-
- /* total amount of header we may need for this A-MSDU */
- hdr_room = DIV_ROUND_UP(total_len, mss) *
- (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
-
- /* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room, skb);
- if (!hdr_page)
- return -ENOMEM;
-
- start_hdr = hdr_page->pos;
-
- /*
- * Pull the ieee80211 header to be able to use TSO core,
- * we will restore it for the tx_status flow.
- */
- skb_pull(skb, hdr_len);
-
- /*
- * Remove the length of all the headers that we don't actually
- * have in the MPDU by themselves, but that we duplicate into
- * all the different MSDUs inside the A-MSDU.
- */
- le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
-
- tso_start(skb, &tso);
-
- while (total_len) {
- /* this is the data left for this subframe */
- unsigned int data_left = min_t(unsigned int, mss, total_len);
- struct sk_buff *csum_skb = NULL;
- unsigned int tb_len;
- dma_addr_t tb_phys;
- u8 *subf_hdrs_start = hdr_page->pos;
-
- total_len -= data_left;
-
- memset(hdr_page->pos, 0, amsdu_pad);
- hdr_page->pos += amsdu_pad;
- amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
- data_left)) & 0x3;
- ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
- hdr_page->pos += ETH_ALEN;
- ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
- hdr_page->pos += ETH_ALEN;
-
- length = snap_ip_tcp_hdrlen + data_left;
- *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
- hdr_page->pos += sizeof(length);
-
- /*
- * This will copy the SNAP as well which will be considered
- * as MAC header.
- */
- tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
-
- hdr_page->pos += snap_ip_tcp_hdrlen;
-
- tb_len = hdr_page->pos - start_hdr;
- tb_phys = dma_map_single(trans->dev, start_hdr,
- tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
- dev_kfree_skb(csum_skb);
- goto out_err;
- }
- /*
- * No need for _with_wa, this is from the TSO page and
- * we leave some space at the end of it so can't hit
- * the buggy scenario.
- */
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
- tb_phys, tb_len);
- /* add this subframe's headers' length to the tx_cmd */
- le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
-
- /* prepare the start_hdr for the next subframe */
- start_hdr = hdr_page->pos;
-
- /* put the payload */
- while (data_left) {
- int ret;
-
- tb_len = min_t(unsigned int, tso.size, data_left);
- tb_phys = dma_map_single(trans->dev, tso.data,
- tb_len, DMA_TO_DEVICE);
- ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
- tb_phys, tso.data,
- tb_len, NULL);
- if (ret) {
- dev_kfree_skb(csum_skb);
- goto out_err;
- }
-
- data_left -= tb_len;
- tso_build_data(skb, &tso, tb_len);
- }
- }
-
- /* re -add the WiFi header */
- skb_push(skb, hdr_len);
-
- return 0;
-
-out_err:
-#endif
- return -EINVAL;
-}
-
-static struct
-iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len)
-{
- int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len;
- void *tb1_addr;
-
- tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- /* do not align A-MSDU to dword as the subframe header aligns it */
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
-
- if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
- len + IWL_FIRST_TB_SIZE,
- hdr_len, dev_cmd))
- goto out_err;
-
- /* building the A-MSDU might have changed this data, memcpy it now */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
- return tfd;
-
-out_err:
- iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- struct iwl_cmd_meta *out_meta)
-{
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- dma_addr_t tb_phys;
- unsigned int fragsz = skb_frag_size(frag);
- int ret;
-
- if (!fragsz)
- continue;
-
- tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- fragsz, DMA_TO_DEVICE);
- ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb_frag_address(frag),
- fragsz, out_meta);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct
-iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len,
- bool pad)
-{
- int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len, tb1_len, tb2_len;
- void *tb1_addr;
- struct sk_buff *frag;
-
- tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
-
- /* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- if (pad)
- tb1_len = ALIGN(len, 4);
- else
- tb1_len = len;
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
-
- /* set up TFD's third entry to point to remainder of skb's head */
- tb2_len = skb_headlen(skb) - hdr_len;
-
- if (tb2_len > 0) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
- tb2_len, DMA_TO_DEVICE);
- ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb->data + hdr_len, tb2_len,
- NULL);
- if (ret)
- goto out_err;
- }
-
- if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
- goto out_err;
-
- skb_walk_frags(skb, frag) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, frag->data,
- skb_headlen(frag), DMA_TO_DEVICE);
- ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- frag->data,
- skb_headlen(frag), NULL);
- if (ret)
- goto out_err;
- if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
- goto out_err;
- }
-
- return tfd;
-
-out_err:
- iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static
-struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
- int len, hdr_len;
- bool amsdu;
-
- /* There must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
-
- memset(tfd, 0, sizeof(*tfd));
-
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- len = sizeof(struct iwl_tx_cmd_gen2);
- else
- len = sizeof(struct iwl_tx_cmd_gen3);
-
- amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
- (*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_A_MSDU_PRESENT);
-
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- /*
- * Only build A-MSDUs here if doing so by GSO, otherwise it may be
- * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
- * built in the higher layers already.
- */
- if (amsdu && skb_shinfo(skb)->gso_size)
- return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
- out_meta, hdr_len, len);
-
- return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
- hdr_len, len, !amsdu);
-}
-
-int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_cmd_meta *out_meta;
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
- u16 cmd_len;
- int idx;
- void *tfd;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return -EINVAL;
-
- if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
- "TX on unused queue %d\n", txq_id))
- return -EINVAL;
-
- if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
- __skb_linearize(skb))
- return -ENOMEM;
-
- spin_lock(&txq->lock);
-
- if (iwl_queue_space(trans, txq) < txq->high_mark) {
- iwl_stop_queue(trans, txq);
-
- /* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_tx_cmd **dev_cmd_ptr;
-
- dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans_pcie->dev_cmd_offs);
-
- *dev_cmd_ptr = dev_cmd;
- __skb_queue_tail(&txq->overflow_q, skb);
- spin_unlock(&txq->lock);
- return 0;
- }
- }
-
- idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
-
- /* Set up driver data for this TFD */
- txq->entries[idx].skb = skb;
- txq->entries[idx].cmd = dev_cmd;
-
- dev_cmd->hdr.sequence =
- cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(idx)));
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[idx].meta;
- out_meta->flags = 0;
-
- tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
- if (!tfd) {
- spin_unlock(&txq->lock);
- return -1;
- }
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen3->len);
- } else {
- struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen2->len);
- }
-
- /* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
- iwl_pcie_gen2_get_num_tbs(trans, tfd));
-
- /* start timer if queue currently empty */
- if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-
- /* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
- iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually.
- */
- spin_unlock(&txq->lock);
- return 0;
-}
+#include "queue/tx.h"
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -902,11 +158,11 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
- if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -984,8 +240,8 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
- iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
- tb0_size);
+ iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx),
+ tb0_size);
/* map first command fragment, if any remains */
if (copy_size > tb0_size) {
@@ -995,11 +251,11 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
idx = -ENOMEM;
- iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
goto out;
}
- iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
- copy_size - tb0_size);
+ iwl_txq_gen2_set_tb(trans, tfd, phys_addr,
+ copy_size - tb0_size);
}
/* map the remaining (adjusted) nocopy/dup fragments */
@@ -1017,10 +273,10 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
idx = -ENOMEM;
- iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
goto out;
}
- iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
+ iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
}
BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
@@ -1037,8 +293,8 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
- iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
out:
@@ -1169,322 +425,3 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
}
-/*
- * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's
- */
-void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
-
- spin_lock_bh(&txq->lock);
- while (txq->write_ptr != txq->read_ptr) {
- IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, txq->read_ptr);
-
- if (txq_id != trans->txqs.cmd.q_id) {
- int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb = txq->entries[idx].skb;
-
- if (WARN_ON_ONCE(!skb))
- continue;
-
- iwl_pcie_free_tso_page(trans_pcie, skb);
- }
- iwl_pcie_gen2_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
- }
-
- while (!skb_queue_empty(&txq->overflow_q)) {
- struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
-
- iwl_op_mode_free_skb(trans->op_mode, skb);
- }
-
- spin_unlock_bh(&txq->lock);
-
- /* just in case - this queue may have been stopped */
- iwl_wake_queue(trans, txq);
-}
-
-void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct device *dev = trans->dev;
-
- /* De-alloc circular buffer of TFDs */
- if (txq->tfds) {
- dma_free_coherent(dev,
- trans_pcie->tfd_size * txq->n_window,
- txq->tfds, txq->dma_addr);
- dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->n_window,
- txq->first_tb_bufs, txq->first_tb_dma);
- }
-
- kfree(txq->entries);
- if (txq->bc_tbl.addr)
- dma_pool_free(trans_pcie->bc_pool, txq->bc_tbl.addr,
- txq->bc_tbl.dma);
- kfree(txq);
-}
-
-/*
- * iwl_pcie_txq_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_txq *txq;
- int i;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return;
-
- txq = trans->txqs.txq[txq_id];
-
- if (WARN_ON(!txq))
- return;
-
- iwl_pcie_gen2_txq_unmap(trans, txq_id);
-
- /* De-alloc array of command/tx buffers */
- if (txq_id == trans->txqs.cmd.q_id)
- for (i = 0; i < txq->n_window; i++) {
- kfree_sensitive(txq->entries[i].cmd);
- kfree_sensitive(txq->entries[i].free_buf);
- }
- del_timer_sync(&txq->stuck_timer);
-
- iwl_pcie_gen2_txq_free_memory(trans, txq);
-
- trans->txqs.txq[txq_id] = NULL;
-
- clear_bit(txq_id, trans->txqs.queue_used);
-}
-
-int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
- struct iwl_txq **intxq, int size,
- unsigned int timeout)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t bc_tbl_size, bc_tbl_entries;
- struct iwl_txq *txq;
- int ret;
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
- bc_tbl_entries = bc_tbl_size / sizeof(u16);
- } else {
- bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
- bc_tbl_entries = bc_tbl_size / sizeof(u16);
- }
-
- if (WARN_ON(size > bc_tbl_entries))
- return -EINVAL;
-
- txq = kzalloc(sizeof(*txq), GFP_KERNEL);
- if (!txq)
- return -ENOMEM;
-
- txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->bc_pool, GFP_KERNEL,
- &txq->bc_tbl.dma);
- if (!txq->bc_tbl.addr) {
- IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
- kfree(txq);
- return -ENOMEM;
- }
-
- ret = iwl_pcie_txq_alloc(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue alloc failed\n");
- goto error;
- }
- ret = iwl_pcie_txq_init(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue init failed\n");
- goto error;
- }
-
- txq->wd_timeout = msecs_to_jiffies(timeout);
-
- *intxq = txq;
- return 0;
-
-error:
- iwl_pcie_gen2_txq_free_memory(trans, txq);
- return ret;
-}
-
-int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_host_cmd *hcmd)
-{
- struct iwl_tx_queue_cfg_rsp *rsp;
- int ret, qid;
- u32 wr_ptr;
-
- if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
- sizeof(*rsp))) {
- ret = -EINVAL;
- goto error_free_resp;
- }
-
- rsp = (void *)hcmd->resp_pkt->data;
- qid = le16_to_cpu(rsp->queue_number);
- wr_ptr = le16_to_cpu(rsp->write_pointer);
-
- if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
- WARN_ONCE(1, "queue index %d unsupported", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- if (test_and_set_bit(qid, trans->txqs.queue_used)) {
- WARN_ONCE(1, "queue %d already used", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- txq->id = qid;
- trans->txqs.txq[qid] = txq;
- wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-
- /* Place first TFD at index corresponding to start sequence number */
- txq->read_ptr = wr_ptr;
- txq->write_ptr = wr_ptr;
-
- IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
-
- iwl_free_resp(hcmd);
- return qid;
-
-error_free_resp:
- iwl_free_resp(hcmd);
- iwl_pcie_gen2_txq_free_memory(trans, txq);
- return ret;
-}
-
-int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int timeout)
-{
- struct iwl_txq *txq = NULL;
- struct iwl_tx_queue_cfg_cmd cmd = {
- .flags = flags,
- .sta_id = sta_id,
- .tid = tid,
- };
- struct iwl_host_cmd hcmd = {
- .id = cmd_id,
- .len = { sizeof(cmd) },
- .data = { &cmd, },
- .flags = CMD_WANT_SKB,
- };
- int ret;
-
- ret = iwl_trans_pcie_dyn_txq_alloc_dma(trans, &txq, size, timeout);
- if (ret)
- return ret;
-
- cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
- cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
-
- ret = iwl_trans_send_cmd(trans, &hcmd);
- if (ret)
- goto error;
-
- return iwl_trans_pcie_txq_alloc_response(trans, txq, &hcmd);
-
-error:
- iwl_pcie_gen2_txq_free_memory(trans, txq);
- return ret;
-}
-
-void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
-{
- if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", queue))
- return;
-
- /*
- * Upon HW Rfkill - we stop the device, and then stop the queues
- * in the op_mode. Just for the sake of the simplicity of the op_mode,
- * allow the op_mode to call txq_disable after it already called
- * stop_device.
- */
- if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
- WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
- "queue %d not used", queue);
- return;
- }
-
- iwl_pcie_gen2_txq_unmap(trans, queue);
-
- iwl_pcie_gen2_txq_free_memory(trans, trans->txqs.txq[queue]);
- trans->txqs.txq[queue] = NULL;
-
- IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
-}
-
-void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
-{
- int i;
-
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
-
- /* Free all TX queues */
- for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
- if (!trans->txqs.txq[i])
- continue;
-
- iwl_pcie_gen2_txq_free(trans, i);
- }
-}
-
-int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
-{
- struct iwl_txq *queue;
- int ret;
-
- /* alloc and init the tx queue */
- if (!trans->txqs.txq[txq_id]) {
- queue = kzalloc(sizeof(*queue), GFP_KERNEL);
- if (!queue) {
- IWL_ERR(trans, "Not enough memory for tx queue\n");
- return -ENOMEM;
- }
- trans->txqs.txq[txq_id] = queue;
- ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- } else {
- queue = trans->txqs.txq[txq_id];
- }
-
- ret = iwl_pcie_txq_init(trans, queue, queue_size,
- (txq_id == trans->txqs.cmd.q_id));
- if (ret) {
- IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
- goto error;
- }
- trans->txqs.txq[txq_id]->id = txq_id;
- set_bit(txq_id, trans->txqs.queue_used);
-
- return 0;
-
-error:
- iwl_pcie_gen2_tx_free(trans);
- return ret;
-}
-
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index eb396c06b7fb..966be5689d63 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -77,9 +77,6 @@
#include "internal.h"
#include "fw/api/tx.h"
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
* DMA services
*
@@ -102,60 +99,6 @@
*
***************************************************/
-int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
-{
- unsigned int max;
- unsigned int used;
-
- /*
- * To avoid ambiguity between empty and completely full queues, there
- * should always be less than max_tfd_queue_size elements in the queue.
- * If q->n_window is smaller than max_tfd_queue_size, there is no need
- * to reserve any queue entries for this purpose.
- */
- if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
- max = q->n_window;
- else
- max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
-
- /*
- * max_tfd_queue_size is a power of 2, so the following is equivalent to
- * modulo by max_tfd_queue_size and is well defined.
- */
- used = (q->write_ptr - q->read_ptr) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-
- if (WARN_ON(used > max))
- return 0;
-
- return max - used;
-}
-
-/*
- * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_queue_init(struct iwl_txq *q, int slots_num)
-{
- q->n_window = slots_num;
-
- /* slots_num must be power-of-two size, otherwise
- * iwl_pcie_get_cmd_index is broken. */
- if (WARN_ON(!is_power_of_2(slots_num)))
- return -EINVAL;
-
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
-
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
-
- q->write_ptr = 0;
- q->read_ptr = 0;
-
- return 0;
-}
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
struct iwl_dma_ptr *ptr, size_t size)
@@ -180,99 +123,6 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
memset(ptr, 0, sizeof(*ptr));
}
-static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
-{
- struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
- struct iwl_trans *trans = txq->trans;
-
- spin_lock(&txq->lock);
- /* check if triggered erroneously */
- if (txq->read_ptr == txq->write_ptr) {
- spin_unlock(&txq->lock);
- return;
- }
- spin_unlock(&txq->lock);
-
- iwl_trans_pcie_log_scd_error(trans, txq);
-
- iwl_force_nmi(trans);
-}
-
-/*
- * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int write_ptr = txq->write_ptr;
- int txq_id = txq->id;
- u8 sec_ctl = 0;
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
- u8 sta_id = tx_cmd->sta_id;
-
- scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
-
- sec_ctl = tx_cmd->sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += IEEE80211_CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += IEEE80211_TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
- break;
- }
- if (trans_pcie->bc_table_dword)
- len = DIV_ROUND_UP(len, 4);
-
- if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
- return;
-
- bc_ent = cpu_to_le16(len | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- int txq_id = txq->id;
- int read_ptr = txq->read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
-
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != trans->txqs.cmd.q_id)
- sta_id = tx_cmd->sta_id;
-
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
-
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
-}
-
/*
* iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
*/
@@ -339,35 +189,6 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
}
}
-static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
- void *_tfd, u8 idx)
-{
-
- if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
-
- return (dma_addr_t)(le64_to_cpu(tb->addr));
- } else {
- struct iwl_tfd *tfd = _tfd;
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- dma_addr_t hi_len;
-
- if (sizeof(dma_addr_t) <= sizeof(u32))
- return addr;
-
- hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
-
- /*
- * shift by 16 twice to avoid warnings on 32-bit
- * (where this code never runs anyway due to the
- * if statement above)
- */
- return addr | ((hi_len << 16) << 16);
- }
-}
-
static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
u8 idx, dma_addr_t addr, u16 len)
{
@@ -384,67 +205,6 @@ static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
tfd_fh->num_tbs = idx + 1;
}
-static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
-{
- if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
-
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
- } else {
- struct iwl_tfd *tfd = _tfd;
-
- return tfd->num_tbs & 0x1f;
- }
-}
-
-static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_txq *txq, int index)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i, num_tbs;
- void *tfd = iwl_pcie_get_tfd(trans, txq, index);
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
-
- if (num_tbs > trans_pcie->max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* first TB is never freed - it's the bidirectional DMA data */
-
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
- iwl_pcie_tfd_tb_get_len(trans, tfd, i),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- iwl_pcie_tfd_tb_get_addr(trans, tfd,
- i),
- iwl_pcie_tfd_tb_get_len(trans, tfd,
- i),
- DMA_TO_DEVICE);
- }
-
- meta->tbs = 0;
-
- if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
-
- tfd_fh->num_tbs = 0;
- } else {
- struct iwl_tfd *tfd_fh = (void *)tfd;
-
- tfd_fh->num_tbs = 0;
- }
-
-}
-
/*
* iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @trans - transport private data
@@ -460,14 +220,14 @@ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
* idx is bounded by n_window
*/
int rd_ptr = txq->read_ptr;
- int idx = iwl_pcie_get_cmd_index(txq, rd_ptr);
+ int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
lockdep_assert_held(&txq->lock);
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
- iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
+ iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
/* free SKB */
if (txq->entries) {
@@ -489,21 +249,20 @@ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
dma_addr_t addr, u16 len, bool reset)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
void *tfd;
u32 num_tbs;
- tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
+ tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
if (reset)
- memset(tfd, 0, trans_pcie->tfd_size);
+ memset(tfd, 0, trans->txqs.tfd.size);
- num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
/* Each TFD can point to a maximum max_tbs Tx buffers */
- if (num_tbs >= trans_pcie->max_tbs) {
+ if (num_tbs >= trans->txqs.tfd.max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans_pcie->max_tbs);
+ trans->txqs.tfd.max_tbs);
return -EINVAL;
}
@@ -516,126 +275,6 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
return num_tbs;
}
-int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, bool cmd_queue)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = trans_pcie->tfd_size *
- trans->trans_cfg->base_params->max_tfd_queue_size;
- size_t tb0_buf_sz;
- int i;
-
- if (WARN_ON(txq->entries || txq->tfds))
- return -EINVAL;
-
- if (trans->trans_cfg->use_tfh)
- tfd_sz = trans_pcie->tfd_size * slots_num;
-
- timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
- txq->trans = trans;
-
- txq->n_window = slots_num;
-
- txq->entries = kcalloc(slots_num,
- sizeof(struct iwl_pcie_txq_entry),
- GFP_KERNEL);
-
- if (!txq->entries)
- goto error;
-
- if (cmd_queue)
- for (i = 0; i < slots_num; i++) {
- txq->entries[i].cmd =
- kmalloc(sizeof(struct iwl_device_cmd),
- GFP_KERNEL);
- if (!txq->entries[i].cmd)
- goto error;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->dma_addr, GFP_KERNEL);
- if (!txq->tfds)
- goto error;
-
- BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs));
-
- tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
-
- txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
- &txq->first_tb_dma,
- GFP_KERNEL);
- if (!txq->first_tb_bufs)
- goto err_free_tfds;
-
- return 0;
-err_free_tfds:
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
-error:
- if (txq->entries && cmd_queue)
- for (i = 0; i < slots_num; i++)
- kfree(txq->entries[i].cmd);
- kfree(txq->entries);
- txq->entries = NULL;
-
- return -ENOMEM;
-
-}
-
-int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, bool cmd_queue)
-{
- int ret;
- u32 tfd_queue_max_size =
- trans->trans_cfg->base_params->max_tfd_queue_size;
-
- txq->need_update = false;
-
- /* max_tfd_queue_size must be power-of-two size, otherwise
- * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
- if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
- "Max tfd queue size must be a power of two, but is %d",
- tfd_queue_max_size))
- return -EINVAL;
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(txq, slots_num);
- if (ret)
- return ret;
-
- spin_lock_init(&txq->lock);
-
- if (cmd_queue) {
- static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
-
- lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
- }
-
- __skb_queue_head_init(&txq->overflow_q);
-
- return 0;
-}
-
-void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
- struct sk_buff *skb)
-{
- struct page **page_ptr;
- struct page *next;
-
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- next = *page_ptr;
- *page_ptr = NULL;
-
- while (next) {
- struct page *tmp = next;
-
- next = *(void **)(page_address(next) + PAGE_SIZE -
- sizeof(void *));
- __free_page(tmp);
- }
-}
-
static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -671,10 +310,10 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
if (WARN_ON_ONCE(!skb))
continue;
- iwl_pcie_free_tso_page(trans_pcie, skb);
+ iwl_txq_free_tso_page(trans, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
@@ -708,7 +347,6 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
*/
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
struct device *dev = trans->dev;
int i;
@@ -728,7 +366,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans_pcie->tfd_size *
+ trans->txqs.tfd.size *
trans->trans_cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
@@ -774,7 +412,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
- trans_pcie->scd_bc_tbls.dma >> 10);
+ trans->txqs.scd_bc_tbls.dma >> 10);
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
@@ -939,7 +577,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
- iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
+ iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls);
}
/*
@@ -965,7 +603,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
goto error;
}
- ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls,
bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
@@ -1000,8 +638,8 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
- ret = iwl_pcie_txq_alloc(trans, trans->txqs.txq[txq_id],
- slots_num, cmd_queue);
+ ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
+ cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
@@ -1053,8 +691,8 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
- ret = iwl_pcie_txq_init(trans, trans->txqs.txq[txq_id],
- slots_num, cmd_queue);
+ ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
+ cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
@@ -1111,10 +749,9 @@ static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
- int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
- int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+ int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
+ int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
int last_to_free;
/* This function is not meant to release cmd queue*/
@@ -1137,9 +774,9 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
+ last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
- if (!iwl_queue_used(txq, last_to_free)) {
+ if (!iwl_txq_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free,
@@ -1153,28 +790,28 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
for (;
read_ptr != tfd_num;
- txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
- read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
+ read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
struct sk_buff *skb = txq->entries[read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
- iwl_pcie_free_tso_page(trans_pcie, skb);
+ iwl_txq_free_tso_page(trans, skb);
__skb_queue_tail(skbs, skb);
txq->entries[read_ptr].skb = NULL;
if (!trans->trans_cfg->use_tfh)
- iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
+ iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
iwl_pcie_txq_free_tfd(trans, txq);
}
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(trans, txq) > txq->low_mark &&
+ if (iwl_txq_space(trans, txq) > txq->low_mark &&
test_bit(txq_id, trans->txqs.queue_stopped)) {
struct sk_buff_head overflow_skbs;
@@ -1204,17 +841,17 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
- trans_pcie->dev_cmd_offs);
+ trans->txqs.dev_cmd_offs);
/*
* Note that we can very well be overflowing again.
- * In that case, iwl_queue_space will be small again
+ * In that case, iwl_txq_space will be small again
* and we won't wake mac80211's queue.
*/
iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
}
- if (iwl_queue_space(trans, txq) > txq->low_mark)
+ if (iwl_txq_space(trans, txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
spin_lock_bh(&txq->lock);
@@ -1295,11 +932,11 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
lockdep_assert_held(&txq->lock);
- idx = iwl_pcie_get_cmd_index(txq, idx);
- r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+ idx = iwl_txq_get_cmd_index(txq, idx);
+ r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
- (!iwl_queue_used(txq, idx))) {
+ (!iwl_txq_used(txq, idx))) {
WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
@@ -1308,9 +945,9 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
return;
}
- for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
- r = iwl_queue_inc_wrap(trans, r)) {
- txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
+ for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
+ r = iwl_txq_inc_wrap(trans, r)) {
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
@@ -1627,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -1636,7 +1273,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
@@ -1719,7 +1356,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
iwl_pcie_txq_build_tfd(trans, txq,
- iwl_pcie_get_first_tb_dma(txq, idx),
+ iwl_txq_get_first_tb_dma(txq, idx),
tb0_size, true);
/* map first command fragment, if any remains */
@@ -1729,8 +1366,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta, txq,
- txq->write_ptr);
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
@@ -1753,8 +1390,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
phys_addr = dma_map_single(trans->dev, (void *)data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta, txq,
- txq->write_ptr);
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
@@ -1783,7 +1420,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1828,13 +1465,13 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- cmd_index = iwl_pcie_get_cmd_index(txq, index);
+ cmd_index = iwl_txq_get_cmd_index(txq, index);
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
group_id = cmd->hdr.group_id;
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
- iwl_pcie_tfd_unmap(trans, meta, txq, index);
+ iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -2055,51 +1692,6 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
- struct sk_buff *skb)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
- struct page **page_ptr;
-
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
-
- if (WARN_ON(*page_ptr))
- return NULL;
-
- if (!p->page)
- goto alloc;
-
- /*
- * Check if there's enough room on this page
- *
- * Note that we put a page chaining pointer *last* in the
- * page - we need it somewhere, and if it's there then we
- * avoid DMA mapping the last bits of the page which may
- * trigger the 32-bit boundary hardware bug.
- *
- * (see also get_workaround_page() in tx-gen2.c)
- */
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
- sizeof(void *))
- goto out;
-
- /* We don't have enough room on this page, get a new one. */
- __free_page(p->page);
-
-alloc:
- p->page = alloc_page(GFP_ATOMIC);
- if (!p->page)
- return NULL;
- p->pos = page_address(p->page);
- /* set the chaining pointer to NULL */
- *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
-out:
- *page_ptr = p->page;
- get_page(p->page);
- return p;
-}
-
static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
bool ipv6, unsigned int len)
{
@@ -2142,8 +1734,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
- trans_pcie->tfd_size,
+ iwl_txq_get_tfd(trans, txq, txq->write_ptr),
+ trans->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
@@ -2352,7 +1944,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
__skb_linearize(skb))
return -ENOMEM;
@@ -2365,15 +1957,15 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(trans, txq) < txq->high_mark) {
- iwl_stop_queue(trans, txq);
+ if (iwl_txq_space(trans, txq) < txq->high_mark) {
+ iwl_txq_stop(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(trans, txq) < 3)) {
+ if (unlikely(iwl_txq_space(trans, txq) < 3)) {
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans_pcie->dev_cmd_offs);
+ trans->txqs.dev_cmd_offs);
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
@@ -2402,7 +1994,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(txq->write_ptr)));
- tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
+ tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
@@ -2452,9 +2044,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans, txq,
- txq->write_ptr),
- trans_pcie->tfd_size,
+ iwl_txq_get_tfd(trans, txq, txq->write_ptr),
+ trans->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
@@ -2486,10 +2077,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* building the A-MSDU might have changed this data, so memcpy it now */
memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
- tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
+ tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
- iwl_pcie_tfd_get_num_tbs(trans, tfd));
+ iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
+ iwl_txq_gen1_tfd_get_num_tbs(trans,
+ tfd));
wait_write_ptr = ieee80211_has_morefrags(fc);
@@ -2509,7 +2101,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
@@ -2520,7 +2112,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_unlock(&txq->lock);
return 0;
out_err:
- iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
spin_unlock(&txq->lock);
return -1;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
new file mode 100644
index 000000000000..af0b27a68d84
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -0,0 +1,1529 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2020 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2020 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/tso.h>
+#include <linux/tcp.h>
+
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "fw/api/tx.h"
+#include "queue/tx.h"
+#include "iwl-fh.h"
+#include "iwl-scd.h"
+#include <linux/dmapool.h>
+
+/*
+ * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
+ */
+void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
+{
+ int txq_id;
+
+ /*
+ * This function can be called before the op_mode disabled the
+ * queues. This happens when we have an rfkill interrupt.
+ * Since we stop Tx altogether - mark the queues as stopped.
+ */
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
+ if (!trans->txqs.txq[txq_id])
+ continue;
+ iwl_txq_gen2_unmap(trans, txq_id);
+ }
+}
+
+/*
+ * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ u8 filled_tfd_size, num_fetch_chunks;
+ u16 len = byte_cnt;
+ __le16 bc_ent;
+
+ if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
+ return;
+
+ filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+
+ /* Starting from AX210, the HW expects bytes */
+ WARN_ON(trans->txqs.bc_table_dword);
+ WARN_ON(len > 0x3FFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
+ scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+ } else {
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+
+ /* Before AX210, the HW expects DW */
+ WARN_ON(!trans->txqs.bc_table_dword);
+ len = DIV_ROUND_UP(len, 4);
+ WARN_ON(len > 0xFFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ }
+}
+
+/*
+ * iwl_txq_inc_wr_ptr - Send new write index to hardware
+ */
+void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
+}
+
+static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd)
+{
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+}
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd)
+{
+ int i, num_tbs;
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
+
+ if (num_tbs > trans->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ return;
+ }
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ }
+
+ tfd->num_tbs = 0;
+}
+
+void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+
+ lockdep_assert_held(&txq->lock);
+
+ iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_txq_get_tfd(trans, txq, idx));
+
+ /* free SKB */
+ if (txq->entries) {
+ struct sk_buff *skb;
+
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+ }
+}
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
+ dma_addr_t addr, u16 len)
+{
+ int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
+ struct iwl_tfh_tb *tb;
+
+ /*
+ * Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_txq_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
+ if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
+ return -EINVAL;
+ tb = &tfd->tbs[idx];
+
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ trans->txqs.tfd.max_tbs);
+ return -EINVAL;
+ }
+
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
+
+ tfd->num_tbs = cpu_to_le16(idx + 1);
+
+ return idx;
+}
+
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct page **page_ptr;
+ struct page *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ /* set the chaining pointer to the previous page if there */
+ *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+ IWL_WARN(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys, (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
+#ifdef CONFIG_INET
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb)
+{
+ struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
+ struct page **page_ptr;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
+
+ if (!p->page)
+ goto alloc;
+
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
+ sizeof(void *))
+ goto out;
+
+ /* We don't have enough room on this page, get a new one. */
+ __free_page(p->page);
+
+alloc:
+ p->page = alloc_page(GFP_ATOMIC);
+ if (!p->page)
+ return NULL;
+ p->pos = page_address(p->page);
+ /* set the chaining pointer to NULL */
+ *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+out:
+ *page_ptr = p->page;
+ get_page(p->page);
+ return p;
+}
+#endif
+
+static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd, int start_len,
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+#ifdef CONFIG_INET
+ struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ u16 length, amsdu_pad;
+ u8 *start_hdr;
+ struct iwl_tso_hdr_page *hdr_page;
+ struct tso_t tso;
+
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
+ &dev_cmd->hdr, start_len, 0);
+
+ ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
+ amsdu_pad = 0;
+
+ /* total amount of header we may need for this A-MSDU */
+ hdr_room = DIV_ROUND_UP(total_len, mss) *
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
+
+ /* Our device supports 9 segments at most, it will fit in 1 page */
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
+ if (!hdr_page)
+ return -ENOMEM;
+
+ start_hdr = hdr_page->pos;
+
+ /*
+ * Pull the ieee80211 header to be able to use TSO core,
+ * we will restore it for the tx_status flow.
+ */
+ skb_pull(skb, hdr_len);
+
+ /*
+ * Remove the length of all the headers that we don't actually
+ * have in the MPDU by themselves, but that we duplicate into
+ * all the different MSDUs inside the A-MSDU.
+ */
+ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
+ tso_start(skb, &tso);
+
+ while (total_len) {
+ /* this is the data left for this subframe */
+ unsigned int data_left = min_t(unsigned int, mss, total_len);
+ struct sk_buff *csum_skb = NULL;
+ unsigned int tb_len;
+ dma_addr_t tb_phys;
+ u8 *subf_hdrs_start = hdr_page->pos;
+
+ total_len -= data_left;
+
+ memset(hdr_page->pos, 0, amsdu_pad);
+ hdr_page->pos += amsdu_pad;
+ amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
+ data_left)) & 0x3;
+ ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
+ hdr_page->pos += ETH_ALEN;
+ ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
+ hdr_page->pos += ETH_ALEN;
+
+ length = snap_ip_tcp_hdrlen + data_left;
+ *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
+ hdr_page->pos += sizeof(length);
+
+ /*
+ * This will copy the SNAP as well which will be considered
+ * as MAC header.
+ */
+ tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
+
+ hdr_page->pos += snap_ip_tcp_hdrlen;
+
+ tb_len = hdr_page->pos - start_hdr;
+ tb_phys = dma_map_single(trans->dev, start_hdr,
+ tb_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ dev_kfree_skb(csum_skb);
+ goto out_err;
+ }
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ tb_phys, tb_len);
+ /* add this subframe's headers' length to the tx_cmd */
+ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
+
+ /* prepare the start_hdr for the next subframe */
+ start_hdr = hdr_page->pos;
+
+ /* put the payload */
+ while (data_left) {
+ int ret;
+
+ tb_len = min_t(unsigned int, tso.size, data_left);
+ tb_phys = dma_map_single(trans->dev, tso.data,
+ tb_len, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL);
+ if (ret) {
+ dev_kfree_skb(csum_skb);
+ goto out_err;
+ }
+
+ data_left -= tb_len;
+ tso_build_data(skb, &tso, tb_len);
+ }
+ }
+
+ /* re -add the WiFi header */
+ skb_push(skb, hdr_len);
+
+ return 0;
+
+out_err:
+#endif
+ return -EINVAL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len;
+ void *tb1_addr;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
+
+ if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
+ hdr_len, dev_cmd))
+ goto out_err;
+
+ /* building the A-MSDU might have changed this data, memcpy it now */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ return tfd;
+
+out_err:
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ struct iwl_cmd_meta *out_meta)
+{
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t tb_phys;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
+
+ if (!fragsz)
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len,
+ bool pad)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len, tb1_len, tb2_len;
+ void *tb1_addr;
+ struct sk_buff *frag;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /* The first TB points to bi-directional DMA data */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ if (pad)
+ tb1_len = ALIGN(len, 4);
+ else
+ tb1_len = len;
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
+
+ /* set up TFD's third entry to point to remainder of skb's head */
+ tb2_len = skb_headlen(skb) - hdr_len;
+
+ if (tb2_len > 0) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
+ tb2_len, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL);
+ if (ret)
+ goto out_err;
+ }
+
+ if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
+ goto out_err;
+
+ skb_walk_frags(skb, frag) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, frag->data,
+ skb_headlen(frag), DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL);
+ if (ret)
+ goto out_err;
+ if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
+ goto out_err;
+ }
+
+ return tfd;
+
+out_err:
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static
+struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ int len, hdr_len;
+ bool amsdu;
+
+ /* There must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ len = sizeof(struct iwl_tx_cmd_gen2);
+ else
+ len = sizeof(struct iwl_tx_cmd_gen3);
+
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ /*
+ * Only build A-MSDUs here if doing so by GSO, otherwise it may be
+ * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
+ * built in the higher layers already.
+ */
+ if (amsdu && skb_shinfo(skb)->gso_size)
+ return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+ out_meta, hdr_len, len);
+ return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+ hdr_len, len, !amsdu);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
+{
+ unsigned int max;
+ unsigned int used;
+
+ /*
+ * To avoid ambiguity between empty and completely full queues, there
+ * should always be less than max_tfd_queue_size elements in the queue.
+ * If q->n_window is smaller than max_tfd_queue_size, there is no need
+ * to reserve any queue entries for this purpose.
+ */
+ if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
+ max = q->n_window;
+ else
+ max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
+
+ /*
+ * max_tfd_queue_size is a power of 2, so the following is equivalent to
+ * modulo by max_tfd_queue_size and is well defined.
+ */
+ used = (q->write_ptr - q->read_ptr) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+
+ if (WARN_ON(used > max))
+ return 0;
+
+ return max - used;
+}
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
+{
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ u16 cmd_len;
+ int idx;
+ void *tfd;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return -EINVAL;
+
+ if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
+ "TX on unused queue %d\n", txq_id))
+ return -EINVAL;
+
+ if (skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
+ __skb_linearize(skb))
+ return -ENOMEM;
+
+ spin_lock(&txq->lock);
+
+ if (iwl_txq_space(trans, txq) < txq->high_mark) {
+ iwl_txq_stop(trans, txq);
+
+ /* don't put the packet on the ring, if there is no room */
+ if (unlikely(iwl_txq_space(trans, txq) < 3)) {
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
+
+ dev_cmd_ptr = (void *)((u8 *)skb->cb +
+ trans->txqs.dev_cmd_offs);
+
+ *dev_cmd_ptr = dev_cmd;
+ __skb_queue_tail(&txq->overflow_q, skb);
+ spin_unlock(&txq->lock);
+ return 0;
+ }
+ }
+
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+
+ /* Set up driver data for this TFD */
+ txq->entries[idx].skb = skb;
+ txq->entries[idx].cmd = dev_cmd;
+
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(idx)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[idx].meta;
+ out_meta->flags = 0;
+
+ tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
+ if (!tfd) {
+ spin_unlock(&txq->lock);
+ return -1;
+ }
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ } else {
+ struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ }
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
+ iwl_txq_gen2_get_num_tbs(trans, tfd));
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_txq_inc_wr_ptr(trans, txq);
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually.
+ */
+ spin_unlock(&txq->lock);
+ return 0;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/*
+ * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
+
+ spin_lock_bh(&txq->lock);
+ while (txq->write_ptr != txq->read_ptr) {
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, txq->read_ptr);
+
+ if (txq_id != trans->txqs.cmd.q_id) {
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct sk_buff *skb = txq->entries[idx].skb;
+
+ if (WARN_ON_ONCE(!skb))
+ continue;
+
+ iwl_txq_free_tso_page(trans, skb);
+ }
+ iwl_txq_gen2_free_tfd(trans, txq);
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
+ }
+
+ while (!skb_queue_empty(&txq->overflow_q)) {
+ struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ }
+
+ spin_unlock_bh(&txq->lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_wake_queue(trans, txq);
+}
+
+static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct device *dev = trans->dev;
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ trans->txqs.tfd.size * txq->n_window,
+ txq->tfds, txq->dma_addr);
+ dma_free_coherent(dev,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
+ txq->first_tb_bufs, txq->first_tb_dma);
+ }
+
+ kfree(txq->entries);
+ if (txq->bc_tbl.addr)
+ dma_pool_free(trans->txqs.bc_pool,
+ txq->bc_tbl.addr, txq->bc_tbl.dma);
+ kfree(txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_txq *txq;
+ int i;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return;
+
+ txq = trans->txqs.txq[txq_id];
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_txq_gen2_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans->txqs.cmd.q_id)
+ for (i = 0; i < txq->n_window; i++) {
+ kfree_sensitive(txq->entries[i].cmd);
+ kfree_sensitive(txq->entries[i].free_buf);
+ }
+ del_timer_sync(&txq->stuck_timer);
+
+ iwl_txq_gen2_free_memory(trans, txq);
+
+ trans->txqs.txq[txq_id] = NULL;
+
+ clear_bit(txq_id, trans->txqs.queue_used);
+}
+
+/*
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_queue_init(struct iwl_txq *q, int slots_num)
+{
+ q->n_window = slots_num;
+
+ /* slots_num must be power-of-two size, otherwise
+ * iwl_txq_get_cmd_index is broken. */
+ if (WARN_ON(!is_power_of_2(slots_num)))
+ return -EINVAL;
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = 0;
+ q->read_ptr = 0;
+
+ return 0;
+}
+
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
+ bool cmd_queue)
+{
+ int ret;
+ u32 tfd_queue_max_size =
+ trans->trans_cfg->base_params->max_tfd_queue_size;
+
+ txq->need_update = false;
+
+ /* max_tfd_queue_size must be power-of-two size, otherwise
+ * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
+ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+ "Max tfd queue size must be a power of two, but is %d",
+ tfd_queue_max_size))
+ return -EINVAL;
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(txq, slots_num);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&txq->lock);
+
+ if (cmd_queue) {
+ static struct lock_class_key iwl_txq_cmd_queue_lock_class;
+
+ lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
+ }
+
+ __skb_queue_head_init(&txq->overflow_q);
+
+ return 0;
+}
+
+void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
+{
+ struct page **page_ptr;
+ struct page *next;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
+
+ while (next) {
+ struct page *tmp = next;
+
+ next = *(void **)(page_address(next) + PAGE_SIZE -
+ sizeof(void *));
+ __free_page(tmp);
+ }
+}
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ u32 txq_id = txq->id;
+ u32 status;
+ bool active;
+ u8 fifo;
+
+ if (trans->trans_cfg->use_tfh) {
+ IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
+ txq->read_ptr, txq->write_ptr);
+ /* TODO: access new SCD registers and dump them */
+ return;
+ }
+
+ status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
+ fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+
+ IWL_ERR(trans,
+ "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
+ txq_id, active ? "" : "in", fifo,
+ jiffies_to_msecs(txq->wd_timeout),
+ txq->read_ptr, txq->write_ptr,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
+}
+
+static void iwl_txq_stuck_timer(struct timer_list *t)
+{
+ struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
+ struct iwl_trans *trans = txq->trans;
+
+ spin_lock(&txq->lock);
+ /* check if triggered erroneously */
+ if (txq->read_ptr == txq->write_ptr) {
+ spin_unlock(&txq->lock);
+ return;
+ }
+ spin_unlock(&txq->lock);
+
+ iwl_txq_log_scd_error(trans, txq);
+
+ iwl_force_nmi(trans);
+}
+
+int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
+ bool cmd_queue)
+{
+ size_t tfd_sz = trans->txqs.tfd.size *
+ trans->trans_cfg->base_params->max_tfd_queue_size;
+ size_t tb0_buf_sz;
+ int i;
+
+ if (WARN_ON(txq->entries || txq->tfds))
+ return -EINVAL;
+
+ if (trans->trans_cfg->use_tfh)
+ tfd_sz = trans->txqs.tfd.size * slots_num;
+
+ timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
+ txq->trans = trans;
+
+ txq->n_window = slots_num;
+
+ txq->entries = kcalloc(slots_num,
+ sizeof(struct iwl_pcie_txq_entry),
+ GFP_KERNEL);
+
+ if (!txq->entries)
+ goto error;
+
+ if (cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->entries[i].cmd =
+ kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->entries[i].cmd)
+ goto error;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+ &txq->dma_addr, GFP_KERNEL);
+ if (!txq->tfds)
+ goto error;
+
+ BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
+
+ tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
+
+ txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
+ &txq->first_tb_dma,
+ GFP_KERNEL);
+ if (!txq->first_tb_bufs)
+ goto err_free_tfds;
+
+ return 0;
+err_free_tfds:
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
+error:
+ if (txq->entries && cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ return -ENOMEM;
+}
+
+static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
+ struct iwl_txq **intxq, int size,
+ unsigned int timeout)
+{
+ size_t bc_tbl_size, bc_tbl_entries;
+ struct iwl_txq *txq;
+ int ret;
+
+ WARN_ON(!trans->txqs.bc_tbl_size);
+
+ bc_tbl_size = trans->txqs.bc_tbl_size;
+ bc_tbl_entries = bc_tbl_size / sizeof(u16);
+
+ if (WARN_ON(size > bc_tbl_entries))
+ return -EINVAL;
+
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq)
+ return -ENOMEM;
+
+ txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
+ &txq->bc_tbl.dma);
+ if (!txq->bc_tbl.addr) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ kfree(txq);
+ return -ENOMEM;
+ }
+
+ ret = iwl_txq_alloc(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue alloc failed\n");
+ goto error;
+ }
+ ret = iwl_txq_init(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue init failed\n");
+ goto error;
+ }
+
+ txq->wd_timeout = msecs_to_jiffies(timeout);
+
+ *intxq = txq;
+ return 0;
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
+ struct iwl_host_cmd *hcmd)
+{
+ struct iwl_tx_queue_cfg_rsp *rsp;
+ int ret, qid;
+ u32 wr_ptr;
+
+ if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
+ sizeof(*rsp))) {
+ ret = -EINVAL;
+ goto error_free_resp;
+ }
+
+ rsp = (void *)hcmd->resp_pkt->data;
+ qid = le16_to_cpu(rsp->queue_number);
+ wr_ptr = le16_to_cpu(rsp->write_pointer);
+
+ if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
+ WARN_ONCE(1, "queue index %d unsupported", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ if (test_and_set_bit(qid, trans->txqs.queue_used)) {
+ WARN_ONCE(1, "queue %d already used", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ txq->id = qid;
+ trans->txqs.txq[qid] = txq;
+ wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+
+ /* Place first TFD at index corresponding to start sequence number */
+ txq->read_ptr = wr_ptr;
+ txq->write_ptr = wr_ptr;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
+
+ iwl_free_resp(hcmd);
+ return qid;
+
+error_free_resp:
+ iwl_free_resp(hcmd);
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
+ int cmd_id, int size, unsigned int timeout)
+{
+ struct iwl_txq *txq = NULL;
+ struct iwl_tx_queue_cfg_cmd cmd = {
+ .flags = flags,
+ .sta_id = sta_id,
+ .tid = tid,
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = cmd_id,
+ .len = { sizeof(cmd) },
+ .data = { &cmd, },
+ .flags = CMD_WANT_SKB,
+ };
+ int ret;
+
+ ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
+ if (ret)
+ return ret;
+
+ cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret)
+ goto error;
+
+ return iwl_txq_alloc_response(trans, txq, &hcmd);
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
+{
+ if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", queue))
+ return;
+
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
+ if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", queue);
+ return;
+ }
+
+ iwl_txq_gen2_unmap(trans, queue);
+
+ iwl_txq_gen2_free_memory(trans, trans->txqs.txq[queue]);
+
+ trans->txqs.txq[queue] = NULL;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
+}
+
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
+{
+ int i;
+
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+
+ /* Free all TX queues */
+ for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
+ if (!trans->txqs.txq[i])
+ continue;
+
+ iwl_txq_gen2_free(trans, i);
+ }
+}
+
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
+{
+ struct iwl_txq *queue;
+ int ret;
+
+ /* alloc and init the tx queue */
+ if (!trans->txqs.txq[txq_id]) {
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) {
+ IWL_ERR(trans, "Not enough memory for tx queue\n");
+ return -ENOMEM;
+ }
+ trans->txqs.txq[txq_id] = queue;
+ ret = iwl_txq_alloc(trans, queue, queue_size, true);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ } else {
+ queue = trans->txqs.txq[txq_id];
+ }
+
+ ret = iwl_txq_init(trans, queue, queue_size,
+ (txq_id == trans->txqs.cmd.q_id));
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ trans->txqs.txq[txq_id]->id = txq_id;
+ set_bit(txq_id, trans->txqs.queue_used);
+
+ return 0;
+
+error:
+ iwl_txq_gen2_tx_free(trans);
+ return ret;
+}
+
+static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
+{
+ struct iwl_tfd *tfd;
+ struct iwl_tfd_tb *tb;
+ dma_addr_t addr;
+ dma_addr_t hi_len;
+
+ if (trans->trans_cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return (dma_addr_t)(le64_to_cpu(tb->addr));
+ }
+
+ tfd = _tfd;
+ tb = &tfd->tbs[idx];
+ addr = get_unaligned_le32(&tb->lo);
+
+ if (sizeof(dma_addr_t) <= sizeof(u32))
+ return addr;
+
+ hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
+
+ /*
+ * shift by 16 twice to avoid warnings on 32-bit
+ * (where this code never runs anyway due to the
+ * if statement above)
+ */
+ return addr | ((hi_len << 16) << 16);
+}
+
+void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_txq *txq, int index)
+{
+ int i, num_tbs;
+ void *tfd = iwl_txq_get_tfd(trans, txq, index);
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
+
+ if (num_tbs > trans->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(trans,
+ tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(trans,
+ tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ }
+
+ meta->tbs = 0;
+
+ if (trans->trans_cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ } else {
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ }
+}
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+/*
+ * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ int write_ptr = txq->write_ptr;
+ int txq_id = txq->id;
+ u8 sec_ctl = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ u8 sta_id = tx_cmd->sta_id;
+
+ scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
+
+ sec_ctl = tx_cmd->sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += IEEE80211_CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += IEEE80211_TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
+ break;
+ }
+ if (trans->txqs.bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
+
+ if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+ return;
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ bc_ent;
+}
+
+void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
+ int txq_id = txq->id;
+ int read_ptr = txq->read_ptr;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != trans->txqs.cmd.q_id)
+ sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
+ bc_ent;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
new file mode 100644
index 000000000000..c67577dfa21d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
@@ -0,0 +1,230 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2020 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2020 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_trans_queue_tx_h__
+#define __iwl_trans_queue_tx_h__
+#include "iwl-fh.h"
+#include "fw/api/tx.h"
+
+struct iwl_tso_hdr_page {
+ struct page *page;
+ u8 *pos;
+};
+
+static inline dma_addr_t
+iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
+{
+ return txq->first_tb_dma +
+ sizeof(struct iwl_pcie_first_tb_buf) * idx;
+}
+
+static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
+{
+ return index & (q->n_window - 1);
+}
+
+void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
+
+static inline void iwl_wake_queue(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
+ }
+}
+
+static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq, int idx)
+{
+ if (trans->trans_cfg->use_tfh)
+ idx = iwl_txq_get_cmd_index(txq, idx);
+
+ return txq->tfds + trans->txqs.tfd.size * idx;
+}
+
+int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
+ bool cmd_queue);
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
+
+static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
+ txq->id);
+ }
+}
+
+/**
+ * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ */
+static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
+{
+ return ++index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_txq_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ */
+static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
+{
+ return --index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+}
+
+static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
+{
+ int index = iwl_txq_get_cmd_index(q, i);
+ int r = iwl_txq_get_cmd_index(q, q->read_ptr);
+ int w = iwl_txq_get_cmd_index(q, q->write_ptr);
+
+ return w >= r ?
+ (index >= r && index < w) :
+ !(index < r && index >= w);
+}
+
+void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd, dma_addr_t addr,
+ u16 len);
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd);
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans,
+ __le16 flags, u8 sta_id, u8 tid,
+ int cmd_id, int size,
+ unsigned int timeout);
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
+void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
+void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
+ bool cmd_queue);
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
+#ifdef CONFIG_INET
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb);
+#endif
+static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
+ void *_tfd)
+{
+ struct iwl_tfd *tfd;
+
+ if (trans->trans_cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+ }
+
+ tfd = (struct iwl_tfd *)_tfd;
+ return tfd->num_tbs & 0x1f;
+}
+
+static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
+{
+ struct iwl_tfd *tfd;
+ struct iwl_tfd_tb *tb;
+
+ if (trans->trans_cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->tb_len);
+ }
+
+ tfd = (struct iwl_tfd *)_tfd;
+ tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_txq *txq, int index);
+void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq);
+void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs);
+#endif /* __iwl_trans_queue_tx_h__ */
diff --git a/drivers/net/wireless/intersil/hostap/Kconfig b/drivers/net/wireless/intersil/hostap/Kconfig
index 6ad88299432f..c865d3156cea 100644
--- a/drivers/net/wireless/intersil/hostap/Kconfig
+++ b/drivers/net/wireless/intersil/hostap/Kconfig
@@ -5,11 +5,7 @@ config HOSTAP
select WEXT_SPY
select WEXT_PRIV
select CRYPTO
- select CRYPTO_ARC4
- select CRYPTO_ECB
- select CRYPTO_AES
select CRYPTO_MICHAEL_MIC
- select CRYPTO_ECB
select CRC32
select LIB80211
select LIB80211_CRYPT_WEP
diff --git a/drivers/net/wireless/intersil/hostap/hostap.h b/drivers/net/wireless/intersil/hostap/hostap.h
index 8130d29c7989..c4b81ff7d7e4 100644
--- a/drivers/net/wireless/intersil/hostap/hostap.h
+++ b/drivers/net/wireless/intersil/hostap/hostap.h
@@ -8,8 +8,10 @@
#include "hostap_wlan.h"
#include "hostap_ap.h"
-static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
- 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
+static const long __maybe_unused freq_list[] = {
+ 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484
+};
#define FREQ_COUNT ARRAY_SIZE(freq_list)
/* hostap.c */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index 3ec46f48cfde..8bcc1cdcb75b 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -1504,7 +1504,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb,
u16 resp = WLAN_STATUS_SUCCESS;
struct sta_info *sta = NULL;
int send_deauth = 0;
- char *txt = "";
+ char __always_unused *txt = "";
u8 prev_ap[ETH_ALEN];
left = len = skb->len - IEEE80211_MGMT_HDR_LEN;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index b6c497ce12e1..22cfb6452644 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -320,12 +320,6 @@ static int hfa384x_cmd(struct net_device *dev, u16 cmd, u16 param0,
iface = netdev_priv(dev);
local = iface->local;
- if (in_interrupt()) {
- printk(KERN_DEBUG "%s: hfa384x_cmd called from interrupt "
- "context\n", dev->name);
- return -1;
- }
-
if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN) {
printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n",
dev->name);
@@ -1560,12 +1554,6 @@ static void prism2_hw_reset(struct net_device *dev)
iface = netdev_priv(dev);
local = iface->local;
- if (in_interrupt()) {
- printk(KERN_DEBUG "%s: driver bug - prism2_hw_reset() called "
- "in interrupt context\n", dev->name);
- return;
- }
-
if (local->hw_downloading)
return;
@@ -1803,7 +1791,7 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
struct hfa384x_tx_frame txdesc;
struct hostap_skb_tx_data *meta;
int hdr_len, data_len, idx, res, ret = -1;
- u16 tx_control, fc;
+ u16 tx_control;
iface = netdev_priv(dev);
local = iface->local;
@@ -1826,7 +1814,6 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
/* skb->data starts with txdesc->frame_control */
hdr_len = 24;
skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len);
- fc = le16_to_cpu(txdesc.frame_control);
if (ieee80211_is_data(txdesc.frame_control) &&
ieee80211_has_a4(txdesc.frame_control) &&
skb->len >= 30) {
@@ -2083,9 +2070,9 @@ static void hostap_rx_skb(local_info_t *local, struct sk_buff *skb)
/* Called only as a tasklet (software IRQ) */
-static void hostap_rx_tasklet(unsigned long data)
+static void hostap_rx_tasklet(struct tasklet_struct *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_tasklet(local, t, rx_tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->rx_list)) != NULL)
@@ -2288,9 +2275,9 @@ static void prism2_tx_ev(local_info_t *local)
/* Called only as a tasklet (software IRQ) */
-static void hostap_sta_tx_exc_tasklet(unsigned long data)
+static void hostap_sta_tx_exc_tasklet(struct tasklet_struct *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_tasklet(local, t, sta_tx_exc_tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->sta_tx_exc_list)) != NULL) {
@@ -2390,9 +2377,9 @@ static void prism2_txexc(local_info_t *local)
/* Called only as a tasklet (software IRQ) */
-static void hostap_info_tasklet(unsigned long data)
+static void hostap_info_tasklet(struct tasklet_struct *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_tasklet(local, t, info_tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->info_list)) != NULL) {
@@ -2469,9 +2456,9 @@ static void prism2_info(local_info_t *local)
/* Called only as a tasklet (software IRQ) */
-static void hostap_bap_tasklet(unsigned long data)
+static void hostap_bap_tasklet(struct tasklet_struct *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_tasklet(local, t, bap_tasklet);
struct net_device *dev = local->dev;
u16 ev;
int frames = 30;
@@ -3183,7 +3170,7 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
/* Initialize tasklets for handling hardware IRQ related operations
* outside hw IRQ handler */
#define HOSTAP_TASKLET_INIT(q, f, d) \
-do { memset((q), 0, sizeof(*(q))); (q)->func = (f); (q)->data = (d); } \
+do { memset((q), 0, sizeof(*(q))); (q)->func = (void(*)(unsigned long))(f); } \
while (0)
HOSTAP_TASKLET_INIT(&local->bap_tasklet, hostap_bap_tasklet,
(unsigned long) local);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index 1ca9731d9b14..514c7b01dbf6 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -1955,7 +1955,7 @@ static inline int prism2_translate_scan(local_info_t *local,
char *buffer, int buflen)
{
struct hfa384x_hostscan_result *scan;
- int entry, hostscan;
+ int entry;
char *current_ev = buffer;
char *end_buf = buffer + buflen;
struct list_head *ptr;
@@ -1968,7 +1968,6 @@ static inline int prism2_translate_scan(local_info_t *local,
bss->included = 0;
}
- hostscan = local->last_scan_type == PRISM2_HOSTSCAN;
for (entry = 0; entry < local->last_scan_results_count; entry++) {
int found = 0;
scan = &local->last_scan_results[entry];
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 00264a14e52c..0e73a10cc06c 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -1062,9 +1062,9 @@ static void orinoco_rx(struct net_device *dev,
stats->rx_dropped++;
}
-static void orinoco_rx_isr_tasklet(unsigned long data)
+static void orinoco_rx_isr_tasklet(struct tasklet_struct *t)
{
- struct orinoco_private *priv = (struct orinoco_private *) data;
+ struct orinoco_private *priv = from_tasklet(priv, t, rx_tasklet);
struct net_device *dev = priv->ndev;
struct orinoco_rx_data *rx_data, *temp;
struct hermes_rx_descriptor *desc;
@@ -1503,7 +1503,7 @@ void __orinoco_ev_info(struct net_device *dev, struct hermes *hw)
schedule_work(&priv->join_work);
break;
}
- /* fall through */
+ fallthrough;
case HERMES_INQ_HOSTSCAN:
case HERMES_INQ_HOSTSCAN_SYMBOL: {
/* Result of a scanning. Contains information about
@@ -1594,7 +1594,7 @@ void __orinoco_ev_info(struct net_device *dev, struct hermes *hw)
/* Ignore this frame for now */
if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
break;
- /* fall through */
+ fallthrough;
default:
printk(KERN_DEBUG "%s: Unknown information frame received: "
"type 0x%04x, length %d\n", dev->name, type, len);
@@ -2198,8 +2198,7 @@ struct orinoco_private
INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
INIT_LIST_HEAD(&priv->rx_list);
- tasklet_init(&priv->rx_tasklet, orinoco_rx_isr_tasklet,
- (unsigned long) priv);
+ tasklet_setup(&priv->rx_tasklet, orinoco_rx_isr_tasklet);
spin_lock_init(&priv->scan_lock);
INIT_LIST_HEAD(&priv->scan_list);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 11fa38fedd87..b849d27bd741 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -423,13 +423,13 @@ static void ezusb_ctx_complete(struct request_context *ctx)
}
}
-/**
+/*
* ezusb_req_queue_run:
* Description:
* Note: Only one active CTX at any one time, because there's no
* other (reliable) way to match the response URB to the correct
* CTX.
- **/
+ */
static void ezusb_req_queue_run(struct ezusb_priv *upriv)
{
unsigned long flags;
@@ -535,7 +535,7 @@ static void ezusb_request_out_callback(struct urb *urb)
flags);
break;
}
- /* fall through */
+ fallthrough;
case EZUSB_CTX_RESP_RECEIVED:
/* IN already received before this OUT-ACK */
ctx->state = EZUSB_CTX_COMPLETE;
@@ -557,7 +557,7 @@ static void ezusb_request_out_callback(struct urb *urb)
case EZUSB_CTX_REQ_SUBMITTED:
case EZUSB_CTX_RESP_RECEIVED:
ctx->state = EZUSB_CTX_REQ_FAILED;
- /* fall through */
+ fallthrough;
case EZUSB_CTX_REQ_FAILED:
case EZUSB_CTX_REQ_TIMEOUT:
@@ -704,7 +704,7 @@ static inline u16 build_crc(struct ezusb_packet *data)
return crc;
}
-/**
+/*
* ezusb_fill_req:
*
* if data == NULL and length > 0 the data is assumed to be already in
@@ -897,11 +897,11 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
case EZUSB_CTX_REQ_SUBMITTED:
if (!ctx->in_rid)
break;
- /* fall through */
+ fallthrough;
default:
err("%s: Unexpected context state %d", __func__,
state);
- /* fall through */
+ fallthrough;
case EZUSB_CTX_REQ_TIMEOUT:
case EZUSB_CTX_REQ_FAILED:
case EZUSB_CTX_RESP_TIMEOUT:
diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
index 9d96c8b8409d..e97ee547b9f3 100644
--- a/drivers/net/wireless/intersil/p54/p54pci.c
+++ b/drivers/net/wireless/intersil/p54/p54pci.c
@@ -278,10 +278,10 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
}
}
-static void p54p_tasklet(unsigned long dev_id)
+static void p54p_tasklet(struct tasklet_struct *t)
{
- struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
- struct p54p_priv *priv = dev->priv;
+ struct p54p_priv *priv = from_tasklet(priv, t, tasklet);
+ struct ieee80211_hw *dev = pci_get_drvdata(priv->pdev);
struct p54p_ring_control *ring_control = priv->ring_control;
p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
@@ -333,10 +333,12 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
struct p54p_desc *desc;
dma_addr_t mapping;
u32 idx, i;
+ __le32 device_addr;
spin_lock_irqsave(&priv->lock, flags);
idx = le32_to_cpu(ring_control->host_idx[1]);
i = idx % ARRAY_SIZE(ring_control->tx_data);
+ device_addr = ((struct p54_hdr *)skb->data)->req_id;
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
@@ -350,7 +352,7 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
desc = &ring_control->tx_data[i];
desc->host_addr = cpu_to_le32(mapping);
- desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
+ desc->device_addr = device_addr;
desc->len = cpu_to_le16(skb->len);
desc->flags = 0;
@@ -620,7 +622,7 @@ static int p54p_probe(struct pci_dev *pdev,
priv->common.tx = p54p_tx;
spin_lock_init(&priv->lock);
- tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
+ tasklet_setup(&priv->tasklet, p54p_tasklet);
err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
&priv->pdev->dev, GFP_KERNEL,
diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c
index a1f956707887..ae964de347f7 100644
--- a/drivers/net/wireless/intersil/prism54/isl_38xx.c
+++ b/drivers/net/wireless/intersil/prism54/isl_38xx.c
@@ -223,7 +223,7 @@ isl38xx_in_queue(isl38xx_control_block *cb, int queue)
/* send queues */
case ISL38XX_CB_TX_MGMTQ:
BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
- /* fall through */
+ fallthrough;
case ISL38XX_CB_TX_DATA_LQ:
case ISL38XX_CB_TX_DATA_HQ:
diff --git a/drivers/net/wireless/intersil/prism54/isl_ioctl.c b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
index 3ccf2a4b548c..2076f449b6e2 100644
--- a/drivers/net/wireless/intersil/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
@@ -163,7 +163,6 @@ prism54_update_stats(struct work_struct *work)
{
islpci_private *priv = container_of(work, islpci_private, stats_work);
char *data;
- int j;
struct obj_bss bss, *bss2;
union oid_res_t r;
@@ -187,7 +186,7 @@ prism54_update_stats(struct work_struct *work)
kfree(data);
/* now ask for the corresponding bss */
- j = mgt_get_request(priv, DOT11_OID_BSSFIND, 0, (void *) &bss, &r);
+ mgt_get_request(priv, DOT11_OID_BSSFIND, 0, (void *) &bss, &r);
bss2 = r.ptr;
/* report the rssi and use it to calculate
* link quality through a signal-noise
@@ -1691,7 +1690,7 @@ static int prism54_get_encodeext(struct net_device *ndev,
case DOT11_AUTH_BOTH:
case DOT11_AUTH_SK:
wrqu->encoding.flags |= IW_ENCODE_RESTRICTED;
- /* fall through */
+ fallthrough;
case DOT11_AUTH_OS:
default:
wrqu->encoding.flags |= IW_ENCODE_OPEN;
diff --git a/drivers/net/wireless/intersil/prism54/islpci_dev.c b/drivers/net/wireless/intersil/prism54/islpci_dev.c
index efd64e555bb5..8eb6d5e4bd57 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_dev.c
+++ b/drivers/net/wireless/intersil/prism54/islpci_dev.c
@@ -918,7 +918,7 @@ islpci_set_state(islpci_private *priv, islpci_state_t new_state)
switch (new_state) {
case PRV_STATE_OFF:
priv->state_off++;
- /* fall through */
+ fallthrough;
default:
priv->state = new_state;
break;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9dd9d73f4484..3b3fc7c9c91d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -99,7 +99,7 @@ MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type");
* domain requests. The first radio will adhere to the first custom world
* regulatory domain, the second one to the second custom world regulatory
* domain. All other devices will world roam.
- * @HWSIM_REGTEST_STRICT_FOLLOW_: Used for testing strict regulatory domain
+ * @HWSIM_REGTEST_STRICT_FOLLOW: Used for testing strict regulatory domain
* settings, only the first radio will send a regulatory domain request
* and use strict settings. The rest of the radios are expected to follow.
* @HWSIM_REGTEST_STRICT_ALL: Used for testing strict regulatory domain
@@ -377,6 +377,49 @@ static const struct ieee80211_channel hwsim_channels_5ghz[] = {
CHAN5G(5925), /* Channel 185 */
};
+#define NUM_S1G_CHANS_US 51
+static struct ieee80211_channel hwsim_channels_s1g[NUM_S1G_CHANS_US];
+
+static const struct ieee80211_sta_s1g_cap hwsim_s1g_cap = {
+ .s1g = true,
+ .cap = { S1G_CAP0_SGI_1MHZ | S1G_CAP0_SGI_2MHZ,
+ 0,
+ 0,
+ S1G_CAP3_MAX_MPDU_LEN,
+ 0,
+ S1G_CAP5_AMPDU,
+ 0,
+ S1G_CAP7_DUP_1MHZ,
+ S1G_CAP8_TWT_RESPOND | S1G_CAP8_TWT_REQUEST,
+ 0},
+ .nss_mcs = { 0xfc | 1, /* MCS 7 for 1 SS */
+ /* RX Highest Supported Long GI Data Rate 0:7 */
+ 0,
+ /* RX Highest Supported Long GI Data Rate 0:7 */
+ /* TX S1G MCS Map 0:6 */
+ 0xfa,
+ /* TX S1G MCS Map :7 */
+ /* TX Highest Supported Long GI Data Rate 0:6 */
+ 0x80,
+ /* TX Highest Supported Long GI Data Rate 7:8 */
+ /* Rx Single spatial stream and S1G-MCS Map for 1MHz */
+ /* Tx Single spatial stream and S1G-MCS Map for 1MHz */
+ 0 },
+};
+
+static void hwsim_init_s1g_channels(struct ieee80211_channel *channels)
+{
+ int ch, freq;
+
+ for (ch = 0; ch < NUM_S1G_CHANS_US; ch++) {
+ freq = 902000 + (ch + 1) * 500;
+ channels[ch].band = NL80211_BAND_S1GHZ;
+ channels[ch].center_freq = KHZ_TO_MHZ(freq);
+ channels[ch].freq_offset = freq % 1000;
+ channels[ch].hw_value = ch + 1;
+ }
+}
+
static const struct ieee80211_rate hwsim_rates[] = {
{ .bitrate = 10 },
{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
@@ -505,6 +548,7 @@ struct mac80211_hwsim_data {
struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
+ struct ieee80211_channel channels_s1g[ARRAY_SIZE(hwsim_channels_s1g)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
struct ieee80211_iface_combination if_combination;
struct ieee80211_iface_limit if_limits[3];
@@ -900,12 +944,14 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
struct mac80211_hwsim_data *data = hw->priv;
struct sk_buff *skb;
struct hwsim_radiotap_hdr *hdr;
- u16 flags;
+ u16 flags, bitrate;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_skb);
struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);
- if (WARN_ON(!txrate))
- return;
+ if (!txrate)
+ bitrate = 0;
+ else
+ bitrate = txrate->bitrate;
if (!netif_running(hwsim_mon))
return;
@@ -924,10 +970,10 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
(1 << IEEE80211_RADIOTAP_CHANNEL));
hdr->rt_tsft = __mac80211_hwsim_get_tsf(data);
hdr->rt_flags = 0;
- hdr->rt_rate = txrate->bitrate / 5;
+ hdr->rt_rate = bitrate / 5;
hdr->rt_channel = cpu_to_le16(chan->center_freq);
flags = IEEE80211_CHAN_2GHZ;
- if (txrate->flags & IEEE80211_RATE_ERP_G)
+ if (txrate && txrate->flags & IEEE80211_RATE_ERP_G)
flags |= IEEE80211_CHAN_OFDM;
else
flags |= IEEE80211_CHAN_CCK;
@@ -1341,6 +1387,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
memset(&rx_status, 0, sizeof(rx_status));
rx_status.flag |= RX_FLAG_MACTIME_START;
rx_status.freq = chan->center_freq;
+ rx_status.freq_offset = chan->freq_offset ? 1 : 0;
rx_status.band = chan->band;
if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
rx_status.rate_idx =
@@ -1522,14 +1569,18 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
/* fake header transmission time */
struct ieee80211_mgmt *mgmt;
struct ieee80211_rate *txrate;
+ /* TODO: get MCS */
+ int bitrate = 100;
u64 ts;
mgmt = (struct ieee80211_mgmt *)skb->data;
txrate = ieee80211_get_tx_rate(hw, txi);
+ if (txrate)
+ bitrate = txrate->bitrate;
ts = mac80211_hwsim_get_tsf_raw();
mgmt->u.probe_resp.timestamp =
cpu_to_le64(ts + data->tsf_offset +
- 24 * 8 * 10 / txrate->bitrate);
+ 24 * 8 * 10 / bitrate);
}
mac80211_hwsim_monitor_rx(hw, skb, channel);
@@ -1664,6 +1715,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_rate *txrate;
struct ieee80211_mgmt *mgmt;
struct sk_buff *skb;
+ /* TODO: get MCS */
+ int bitrate = 100;
hwsim_check_magic(vif);
@@ -1683,13 +1736,25 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
ARRAY_SIZE(info->control.rates));
txrate = ieee80211_get_tx_rate(hw, info);
+ if (txrate)
+ bitrate = txrate->bitrate;
mgmt = (struct ieee80211_mgmt *) skb->data;
/* fake header transmission time */
data->abs_bcn_ts = mac80211_hwsim_get_tsf_raw();
- mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts +
- data->tsf_offset +
- 24 * 8 * 10 / txrate->bitrate);
+ if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+ struct ieee80211_ext *ext = (void *) mgmt;
+
+ ext->u.s1g_beacon.timestamp = cpu_to_le32(data->abs_bcn_ts +
+ data->tsf_offset +
+ 10 * 8 * 10 /
+ bitrate);
+ } else {
+ mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts +
+ data->tsf_offset +
+ 24 * 8 * 10 /
+ bitrate);
+ }
mac80211_hwsim_tx_frame(hw, skb,
rcu_dereference(vif->chanctx_conf)->def.chan);
@@ -1699,7 +1764,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
rcu_dereference(vif->chanctx_conf)->def.chan);
}
- if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
ieee80211_csa_finish(vif);
}
@@ -1737,6 +1802,11 @@ static const char * const hwsim_chanwidths[] = {
[NL80211_CHAN_WIDTH_80] = "vht80",
[NL80211_CHAN_WIDTH_80P80] = "vht80p80",
[NL80211_CHAN_WIDTH_160] = "vht160",
+ [NL80211_CHAN_WIDTH_1] = "1MHz",
+ [NL80211_CHAN_WIDTH_2] = "2MHz",
+ [NL80211_CHAN_WIDTH_4] = "4MHz",
+ [NL80211_CHAN_WIDTH_8] = "8MHz",
+ [NL80211_CHAN_WIDTH_16] = "16MHz",
};
static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
@@ -3079,6 +3149,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sizeof(hwsim_channels_2ghz));
memcpy(data->channels_5ghz, hwsim_channels_5ghz,
sizeof(hwsim_channels_5ghz));
+ memcpy(data->channels_s1g, hwsim_channels_s1g,
+ sizeof(hwsim_channels_s1g));
memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
@@ -3121,6 +3193,12 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->vht_cap.vht_mcs.tx_mcs_map =
sband->vht_cap.vht_mcs.rx_mcs_map;
break;
+ case NL80211_BAND_S1GHZ:
+ memcpy(&sband->s1g_cap, &hwsim_s1g_cap,
+ sizeof(sband->s1g_cap));
+ sband->channels = data->channels_s1g;
+ sband->n_channels = ARRAY_SIZE(hwsim_channels_s1g);
+ break;
default:
continue;
}
@@ -3886,7 +3964,7 @@ done:
}
/* Generic Netlink operations array */
-static const struct genl_ops hwsim_ops[] = {
+static const struct genl_small_ops hwsim_ops[] = {
{
.cmd = HWSIM_CMD_REGISTER,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -3930,8 +4008,8 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.policy = hwsim_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = hwsim_ops,
- .n_ops = ARRAY_SIZE(hwsim_ops),
+ .small_ops = hwsim_ops,
+ .n_small_ops = ARRAY_SIZE(hwsim_ops),
.mcgrps = hwsim_mcgrps,
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
@@ -4318,6 +4396,8 @@ static int __init init_mac80211_hwsim(void)
goto out_exit_virtio;
}
+ hwsim_init_s1g_channels(hwsim_channels_s1g);
+
for (i = 0; i < radios; i++) {
struct hwsim_new_radio_params param = { 0 };
diff --git a/drivers/net/wireless/marvell/libertas/defs.h b/drivers/net/wireless/marvell/libertas/defs.h
index 58e2ead7b0cc..f7e7bf56f924 100644
--- a/drivers/net/wireless/marvell/libertas/defs.h
+++ b/drivers/net/wireless/marvell/libertas/defs.h
@@ -50,8 +50,7 @@ extern unsigned int lbs_debug;
#ifdef DEBUG
#define LBS_DEB_LL(grp, grpnam, fmt, args...) \
do { if ((lbs_debug & (grp)) == (grp)) \
- printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \
- in_interrupt() ? " (INT)" : "", ## args); } while (0)
+ printk(KERN_DEBUG DRV_NAME grpnam ": " fmt, ## args); } while (0)
#else
#define LBS_DEB_LL(grp, grpnam, fmt, args...) do {} while (0)
#endif
diff --git a/drivers/net/wireless/marvell/libertas/firmware.c b/drivers/net/wireless/marvell/libertas/firmware.c
index 69029c59a272..f124110944b7 100644
--- a/drivers/net/wireless/marvell/libertas/firmware.c
+++ b/drivers/net/wireless/marvell/libertas/firmware.c
@@ -121,12 +121,12 @@ void lbs_wait_for_firmware_load(struct lbs_private *priv)
* either a helper firmware and a main firmware (2-stage), or just the helper.
*
* @priv: Pointer to lbs_private instance
- * @dev: A pointer to &device structure
+ * @device: A pointer to &device structure
* @card_model: Bus-specific card model ID used to filter firmware table
* elements
* @fw_table: Table of firmware file names and device model numbers
* terminated by an entry with a NULL helper name
- * @callback: User callback to invoke when firmware load succeeds or fails.
+ * @callback: User callback to invoke when firmware load succeeds or fails.
*/
int lbs_get_firmware_async(struct lbs_private *priv, struct device *device,
u32 card_model, const struct lbs_fw_table *fw_table,
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 2233b59cdf44..ee4cf3437e28 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -721,7 +721,7 @@ EXPORT_SYMBOL_GPL(lbs_resume);
* lbs_cmd_timeout_handler - handles the timeout of command sending.
* It will re-send the same command again.
*
- * @data: &struct lbs_private pointer
+ * @t: Context from which to retrieve a &struct lbs_private pointer
*/
static void lbs_cmd_timeout_handler(struct timer_list *t)
{
@@ -755,7 +755,7 @@ out:
* to the hardware. This is known to frequently happen with SD8686 when
* waking up after a Wake-on-WLAN-triggered resume.
*
- * @data: &struct lbs_private pointer
+ * @t: Context from which to retrieve a &struct lbs_private pointer
*/
static void lbs_tx_lockup_handler(struct timer_list *t)
{
@@ -777,7 +777,7 @@ static void lbs_tx_lockup_handler(struct timer_list *t)
/**
* auto_deepsleep_timer_fn - put the device back to deep sleep mode when
* timer expires and no activity (command, event, data etc.) is detected.
- * @data: &struct lbs_private pointer
+ * @t: Context from which to retrieve a &struct lbs_private pointer
* returns: N/A
*/
static void auto_deepsleep_timer_fn(struct timer_list *t)
diff --git a/drivers/net/wireless/marvell/libertas/rx.c b/drivers/net/wireless/marvell/libertas/rx.c
index f28aa09d1f9e..9f24b0760e1f 100644
--- a/drivers/net/wireless/marvell/libertas/rx.c
+++ b/drivers/net/wireless/marvell/libertas/rx.c
@@ -147,10 +147,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, dev);
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
+ netif_rx_any_context(skb);
ret = 0;
done:
@@ -265,11 +262,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, priv->dev);
-
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
+ netif_rx_any_context(skb);
ret = 0;
diff --git a/drivers/net/wireless/marvell/libertas_tf/cmd.c b/drivers/net/wireless/marvell/libertas_tf/cmd.c
index a0b4c9debc11..efb98304555a 100644
--- a/drivers/net/wireless/marvell/libertas_tf/cmd.c
+++ b/drivers/net/wireless/marvell/libertas_tf/cmd.c
@@ -32,10 +32,10 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv);
/**
* lbtf_cmd_copyback - Simple callback that copies response back into command
*
- * @priv A pointer to struct lbtf_private structure
- * @extra A pointer to the original command structure for which
+ * @priv: A pointer to struct lbtf_private structure
+ * @extra: A pointer to the original command structure for which
* 'resp' is a response
- * @resp A pointer to the command response
+ * @resp: A pointer to the command response
*
* Returns: 0 on success, error on failure
*/
@@ -72,7 +72,7 @@ static void lbtf_geo_init(struct lbtf_private *priv)
/**
* lbtf_update_hw_spec: Updates the hardware details.
*
- * @priv A pointer to struct lbtf_private structure
+ * @priv: A pointer to struct lbtf_private structure
*
* Returns: 0 on success, error on failure
*/
@@ -141,8 +141,8 @@ out:
/**
* lbtf_set_channel: Set the radio channel
*
- * @priv A pointer to struct lbtf_private structure
- * @channel The desired channel, or 0 to clear a locked channel
+ * @priv: A pointer to struct lbtf_private structure
+ * @channel: The desired channel, or 0 to clear a locked channel
*
* Returns: 0 on success, error on failure
*/
@@ -268,7 +268,7 @@ static void lbtf_submit_command(struct lbtf_private *priv,
lbtf_deb_leave(LBTF_DEB_HOST);
}
-/**
+/*
* This function inserts command node to cmdfreeq
* after cleans it. Requires priv->driver_lock held.
*/
@@ -434,7 +434,7 @@ void lbtf_set_mac_control(struct lbtf_private *priv)
/**
* lbtf_allocate_cmd_buffer - Allocates cmd buffer, links it to free cmd queue
*
- * @priv A pointer to struct lbtf_private structure
+ * @priv: A pointer to struct lbtf_private structure
*
* Returns: 0 on success.
*/
@@ -482,7 +482,7 @@ done:
/**
* lbtf_free_cmd_buffer - Frees the cmd buffer.
*
- * @priv A pointer to struct lbtf_private structure
+ * @priv: A pointer to struct lbtf_private structure
*
* Returns: 0
*/
@@ -519,7 +519,7 @@ done:
/**
* lbtf_get_cmd_ctrl_node - Gets free cmd node from free cmd queue.
*
- * @priv A pointer to struct lbtf_private structure
+ * @priv: A pointer to struct lbtf_private structure
*
* Returns: pointer to a struct cmd_ctrl_node or NULL if none available.
*/
@@ -553,7 +553,7 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
/**
* lbtf_execute_next_command: execute next command in cmd pending queue.
*
- * @priv A pointer to struct lbtf_private structure
+ * @priv: A pointer to struct lbtf_private structure
*
* Returns: 0 on success.
*/
diff --git a/drivers/net/wireless/marvell/libertas_tf/deb_defs.h b/drivers/net/wireless/marvell/libertas_tf/deb_defs.h
index 37a98e228b46..0b520df62f1f 100644
--- a/drivers/net/wireless/marvell/libertas_tf/deb_defs.h
+++ b/drivers/net/wireless/marvell/libertas_tf/deb_defs.h
@@ -48,8 +48,7 @@ extern unsigned int lbtf_debug;
#ifdef DEBUG
#define LBTF_DEB_LL(grp, grpnam, fmt, args...) \
do { if ((lbtf_debug & (grp)) == (grp)) \
- printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \
- in_interrupt() ? " (INT)" : "", ## args); } while (0)
+ printk(KERN_DEBUG DRV_NAME grpnam ": " fmt, ## args); } while (0)
#else
#define LBTF_DEB_LL(grp, grpnam, fmt, args...) do {} while (0)
#endif
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index bedc09215088..a92916dc81a9 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -50,7 +50,7 @@ static int if_usb_reset_device(struct lbtf_private *priv);
/**
* if_usb_wrike_bulk_callback - call back to handle URB status
*
- * @param urb pointer to urb structure
+ * @urb: pointer to urb structure
*/
static void if_usb_write_bulk_callback(struct urb *urb)
{
@@ -67,7 +67,7 @@ static void if_usb_write_bulk_callback(struct urb *urb)
/**
* if_usb_free - free tx/rx urb, skb and rx buffer
*
- * @param cardp pointer if_usb_card
+ * @cardp: pointer if_usb_card
*/
static void if_usb_free(struct if_usb_card *cardp)
{
@@ -136,8 +136,8 @@ static const struct lbtf_ops if_usb_ops = {
/**
* if_usb_probe - sets the configuration values
*
- * @ifnum interface number
- * @id pointer to usb_device_id
+ * @intf: USB interface structure
+ * @id: pointer to usb_device_id
*
* Returns: 0 on success, error code on failure
*/
@@ -238,7 +238,7 @@ lbtf_deb_leave(LBTF_DEB_MAIN);
/**
* if_usb_disconnect - free resource and cleanup
*
- * @intf USB interface structure
+ * @intf: USB interface structure
*/
static void if_usb_disconnect(struct usb_interface *intf)
{
@@ -264,7 +264,7 @@ static void if_usb_disconnect(struct usb_interface *intf)
/**
* if_usb_send_fw_pkt - This function downloads the FW
*
- * @priv pointer to struct lbtf_private
+ * @cardp: pointer if_usb_card
*
* Returns: 0
*/
@@ -360,10 +360,10 @@ static int if_usb_reset_device(struct lbtf_private *priv)
/**
* usb_tx_block - transfer data to the device
*
- * @priv pointer to struct lbtf_private
- * @payload pointer to payload data
- * @nb data length
- * @data non-zero for data, zero for commands
+ * @cardp: pointer if_usb_card
+ * @payload: pointer to payload data
+ * @nb: data length
+ * @data: non-zero for data, zero for commands
*
* Returns: 0 on success, nonzero otherwise.
*/
@@ -619,7 +619,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
/**
* if_usb_receive - read data received from the device.
*
- * @urb pointer to struct urb
+ * @urb: pointer to struct urb
*/
static void if_usb_receive(struct urb *urb)
{
@@ -702,10 +702,10 @@ setup_for_next:
/**
* if_usb_host_to_card - Download data to the device
*
- * @priv pointer to struct lbtf_private structure
- * @type type of data
- * @buf pointer to data buffer
- * @len number of bytes
+ * @priv: pointer to struct lbtf_private structure
+ * @type: type of data
+ * @payload: pointer to payload buffer
+ * @nb: number of bytes
*
* Returns: 0 on success, nonzero otherwise
*/
@@ -734,7 +734,8 @@ static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
/**
* if_usb_issue_boot_command - Issue boot command to Boot2.
*
- * @ivalue 1 boots from FW by USB-Download, 2 boots from FW in EEPROM.
+ * @cardp: pointer if_usb_card
+ * @ivalue: 1 boots from FW by USB-Download, 2 boots from FW in EEPROM.
*
* Returns: 0
*/
@@ -757,8 +758,8 @@ static int if_usb_issue_boot_command(struct if_usb_card *cardp, int ivalue)
/**
* check_fwfile_format - Check the validity of Boot2/FW image.
*
- * @data pointer to image
- * @totlen image length
+ * @data: pointer to image
+ * @totlen: image length
*
* Returns: 0 if the image is valid, nonzero otherwise.
*/
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 02bd7c99b358..71492211904b 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -15,7 +15,6 @@
/* thinfirm version: 5.132.X.pX */
#define LBTF_FW_VER_MIN 0x05840300
#define LBTF_FW_VER_MAX 0x0584ffff
-#define QOS_CONTROL_LEN 2
/* Module parameters */
unsigned int lbtf_debug;
@@ -121,7 +120,7 @@ static void lbtf_cmd_work(struct work_struct *work)
lbtf_deb_leave(LBTF_DEB_CMD);
}
-/**
+/*
* This function handles the timeout of command sending.
* It will re-send the same command again.
*/
@@ -542,11 +541,9 @@ done:
}
EXPORT_SYMBOL_GPL(lbtf_rx);
-/**
+/*
* lbtf_add_card: Add and initialize the card.
*
- * @card A pointer to card
- *
* Returns: pointer to struct lbtf_priv.
*/
struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev,
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 96848fa0e417..a6b9dc6700b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1163,7 +1163,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as IBSS\n", dev->name);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */
return 0;
default:
@@ -1194,7 +1194,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as STA\n", dev->name);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_STATION: /* This shouldn't happen */
return 0;
default:
@@ -1217,7 +1217,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as AP\n", dev->name);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_AP: /* This shouldn't happen */
return 0;
default:
@@ -1257,7 +1257,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as P2P\n", dev->name);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
return 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index d068b9075c32..3a11342a6bde 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -322,9 +322,9 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
adapter->seq_num++;
sleep_cfm_buf->seq_num =
- cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
+ cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
(adapter->seq_num, priv->bss_num,
- priv->bss_type)));
+ priv->bss_type));
mwifiex_dbg(adapter, CMD,
"cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index d9f8bdbc817b..470d669c7f14 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -513,10 +513,10 @@ enum mwifiex_channel_flags {
#define RF_ANTENNA_AUTO 0xFFFF
-#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \
- (((seq) & 0x00ff) | \
- (((num) & 0x000f) << 8)) | \
- (((type) & 0x000f) << 12); }
+#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) \
+ ((((seq) & 0x00ff) | \
+ (((num) & 0x000f) << 8)) | \
+ (((type) & 0x000f) << 12))
#define HostCmd_GET_SEQ_NO(seq) \
((seq) & HostCmd_SEQ_NUM_MASK)
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 811abe963af2..40e99eaf5a30 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -374,7 +374,7 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
(const u8 *)hdr,
token_len))
break;
- /* fall through */
+ fallthrough;
default:
if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
err = -EINVAL;
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 82d69bc3aaaf..f006a3d72b40 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -695,14 +695,12 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
int ret;
u32 poll_num = 1;
- if (adapter->if_ops.check_fw_status) {
- /* check if firmware is already running */
- ret = adapter->if_ops.check_fw_status(adapter, poll_num);
- if (!ret) {
- mwifiex_dbg(adapter, MSG,
- "WLAN FW already running! Skip FW dnld\n");
- return 0;
- }
+ /* check if firmware is already running */
+ ret = adapter->if_ops.check_fw_status(adapter, poll_num);
+ if (!ret) {
+ mwifiex_dbg(adapter, MSG,
+ "WLAN FW already running! Skip FW dnld\n");
+ return 0;
}
/* check if we are the winner for downloading FW */
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 9ee5600351a7..9ba8a8f64976 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -48,6 +48,8 @@ bool aggr_ctrl;
module_param(aggr_ctrl, bool, 0000);
MODULE_PARM_DESC(aggr_ctrl, "usb tx aggregation enable:1, disable:0");
+const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+
/*
* This function registers the device and performs all the necessary
* initializations.
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 87b4ccca4b9a..6a10ff0377a2 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -33,6 +33,155 @@
static struct mwifiex_if_ops pcie_ops;
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = PCIE_SCRATCH_6_REG,
+ .tx_wrptr = PCIE_SCRATCH_7_REG,
+ .rx_rdptr = PCIE_SCRATCH_8_REG,
+ .rx_wrptr = PCIE_SCRATCH_9_REG,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 0,
+ .tx_mask = MWIFIEX_TXBD_MASK,
+ .tx_wrap_mask = 0,
+ .rx_mask = MWIFIEX_RXBD_MASK,
+ .rx_wrap_mask = 0,
+ .tx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .rx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .ring_flag_sop = 0,
+ .ring_flag_eop = 0,
+ .ring_flag_xs_sop = 0,
+ .ring_flag_xs_eop = 0,
+ .ring_tx_start_ptr = 0,
+ .pfu_enabled = 0,
+ .sleep_cookie = 1,
+ .msix_support = 0,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = PCIE_RD_DATA_PTR_Q0_Q1,
+ .tx_wrptr = PCIE_WR_DATA_PTR_Q0_Q1,
+ .rx_rdptr = PCIE_WR_DATA_PTR_Q0_Q1,
+ .rx_wrptr = PCIE_RD_DATA_PTR_Q0_Q1,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 16,
+ .tx_mask = 0x03FF0000,
+ .tx_wrap_mask = 0x07FF0000,
+ .rx_mask = 0x000003FF,
+ .rx_wrap_mask = 0x000007FF,
+ .tx_rollover_ind = MWIFIEX_BD_FLAG_TX_ROLLOVER_IND,
+ .rx_rollover_ind = MWIFIEX_BD_FLAG_RX_ROLLOVER_IND,
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+ .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+ .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+ .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+ .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+ .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+ .pfu_enabled = 1,
+ .sleep_cookie = 0,
+ .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
+ .fw_dump_start = PCIE_SCRATCH_14_REG,
+ .fw_dump_end = 0xcff,
+ .fw_dump_host_ready = 0xee,
+ .fw_dump_read_done = 0xfe,
+ .msix_support = 0,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = 0xC1A4,
+ .tx_wrptr = 0xC174,
+ .rx_rdptr = 0xC174,
+ .rx_wrptr = 0xC1A4,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 16,
+ .tx_mask = 0x0FFF0000,
+ .tx_wrap_mask = 0x1FFF0000,
+ .rx_mask = 0x00000FFF,
+ .rx_wrap_mask = 0x00001FFF,
+ .tx_rollover_ind = BIT(28),
+ .rx_rollover_ind = BIT(12),
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+ .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+ .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+ .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+ .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+ .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+ .pfu_enabled = 1,
+ .sleep_cookie = 0,
+ .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
+ .fw_dump_start = PCIE_SCRATCH_14_REG,
+ .fw_dump_end = 0xcff,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_read_done = 0xdd,
+ .msix_support = 0,
+};
+
+static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = {
+ {"ITCM", NULL, 0, 0xF0},
+ {"DTCM", NULL, 0, 0xF1},
+ {"SQRAM", NULL, 0, 0xF2},
+ {"IRAM", NULL, 0, 0xF3},
+ {"APU", NULL, 0, 0xF4},
+ {"CIU", NULL, 0, 0xF5},
+ {"ICU", NULL, 0, 0xF6},
+ {"MAC", NULL, 0, 0xF7},
+};
+
+static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
+ {"DUMP", NULL, 0, 0xDD},
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
+ .reg = &mwifiex_reg_8766,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .can_dump_fw = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
+ .reg = &mwifiex_reg_8897,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .can_dump_fw = true,
+ .mem_type_mapping_tbl = mem_type_mapping_tbl_w8897,
+ .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8897),
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
+ .reg = &mwifiex_reg_8997,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .can_dump_fw = true,
+ .mem_type_mapping_tbl = mem_type_mapping_tbl_w8997,
+ .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8997),
+ .can_ext_scan = true,
+};
+
static const struct of_device_id mwifiex_pcie_of_match_table[] = {
{ .compatible = "pci11ab,2b42" },
{ .compatible = "pci1b4b,2b42" },
@@ -58,8 +207,8 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
struct pcie_service_card *card = adapter->card;
struct mwifiex_dma_mapping mapping;
- mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
- if (pci_dma_mapping_error(card->dev, mapping.addr)) {
+ mapping.addr = dma_map_single(&card->dev->dev, skb->data, size, flags);
+ if (dma_mapping_error(&card->dev->dev, mapping.addr)) {
mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n");
return -1;
}
@@ -75,7 +224,7 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
struct mwifiex_dma_mapping mapping;
mwifiex_get_mapping(skb, &mapping);
- pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
+ dma_unmap_single(&card->dev->dev, mapping.addr, mapping.len, flags);
}
/*
@@ -461,10 +610,9 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
struct sk_buff *cmdrsp = card->cmdrsp_buf;
for (count = 0; count < max_delay_loop_cnt; count++) {
- pci_dma_sync_single_for_cpu(card->dev,
- MWIFIEX_SKB_DMA_ADDR(cmdrsp),
- sizeof(sleep_cookie),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(cmdrsp),
+ sizeof(sleep_cookie), DMA_FROM_DEVICE);
buffer = cmdrsp->data;
sleep_cookie = get_unaligned_le32(buffer);
@@ -473,10 +621,10 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
"sleep cookie found at count %d\n", count);
break;
}
- pci_dma_sync_single_for_device(card->dev,
- MWIFIEX_SKB_DMA_ADDR(cmdrsp),
- sizeof(sleep_cookie),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(cmdrsp),
+ sizeof(sleep_cookie),
+ DMA_FROM_DEVICE);
usleep_range(20, 30);
}
@@ -630,7 +778,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
if (mwifiex_map_pci_memory(adapter, skb,
MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -687,7 +835,7 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
skb_put(skb, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE)) {
+ DMA_FROM_DEVICE)) {
kfree_skb(skb);
kfree(card->evtbd_ring_vbase);
return -1;
@@ -730,7 +878,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -739,7 +887,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -769,7 +917,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -778,7 +926,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -804,7 +952,7 @@ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
if (card->evt_buf_list[i]) {
skb = card->evt_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
card->evt_buf_list[i] = NULL;
@@ -845,18 +993,20 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: txbd_ring: Allocating %d bytes\n",
card->txbd_ring_size);
- card->txbd_ring_vbase = pci_alloc_consistent(card->dev,
- card->txbd_ring_size,
- &card->txbd_ring_pbase);
+ card->txbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
+ card->txbd_ring_size,
+ &card->txbd_ring_pbase,
+ GFP_KERNEL);
if (!card->txbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
- "allocate consistent memory (%d bytes) failed!\n",
+ "allocate coherent memory (%d bytes) failed!\n",
card->txbd_ring_size);
return -ENOMEM;
}
+
mwifiex_dbg(adapter, DATA,
- "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
- card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
+ "info: txbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
+ card->txbd_ring_vbase, (u32)card->txbd_ring_pbase,
(u32)((u64)card->txbd_ring_pbase >> 32),
card->txbd_ring_size);
@@ -871,9 +1021,9 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_txq_ring(adapter);
if (card->txbd_ring_vbase)
- pci_free_consistent(card->dev, card->txbd_ring_size,
- card->txbd_ring_vbase,
- card->txbd_ring_pbase);
+ dma_free_coherent(&card->dev->dev, card->txbd_ring_size,
+ card->txbd_ring_vbase,
+ card->txbd_ring_pbase);
card->txbd_ring_size = 0;
card->txbd_wrptr = 0;
card->txbd_rdptr = 0 | reg->tx_rollover_ind;
@@ -909,12 +1059,13 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: rxbd_ring: Allocating %d bytes\n",
card->rxbd_ring_size);
- card->rxbd_ring_vbase = pci_alloc_consistent(card->dev,
- card->rxbd_ring_size,
- &card->rxbd_ring_pbase);
+ card->rxbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
+ card->rxbd_ring_size,
+ &card->rxbd_ring_pbase,
+ GFP_KERNEL);
if (!card->rxbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
- "allocate consistent memory (%d bytes) failed!\n",
+ "allocate coherent memory (%d bytes) failed!\n",
card->rxbd_ring_size);
return -ENOMEM;
}
@@ -939,9 +1090,9 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_rxq_ring(adapter);
if (card->rxbd_ring_vbase)
- pci_free_consistent(card->dev, card->rxbd_ring_size,
- card->rxbd_ring_vbase,
- card->rxbd_ring_pbase);
+ dma_free_coherent(&card->dev->dev, card->rxbd_ring_size,
+ card->rxbd_ring_vbase,
+ card->rxbd_ring_pbase);
card->rxbd_ring_size = 0;
card->rxbd_wrptr = 0;
card->rxbd_rdptr = 0 | reg->rx_rollover_ind;
@@ -972,13 +1123,14 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: evtbd_ring: Allocating %d bytes\n",
- card->evtbd_ring_size);
- card->evtbd_ring_vbase = pci_alloc_consistent(card->dev,
- card->evtbd_ring_size,
- &card->evtbd_ring_pbase);
+ card->evtbd_ring_size);
+ card->evtbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
+ card->evtbd_ring_size,
+ &card->evtbd_ring_pbase,
+ GFP_KERNEL);
if (!card->evtbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
- "allocate consistent memory (%d bytes) failed!\n",
+ "allocate coherent memory (%d bytes) failed!\n",
card->evtbd_ring_size);
return -ENOMEM;
}
@@ -1003,9 +1155,9 @@ static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_evt_ring(adapter);
if (card->evtbd_ring_vbase)
- pci_free_consistent(card->dev, card->evtbd_ring_size,
- card->evtbd_ring_vbase,
- card->evtbd_ring_pbase);
+ dma_free_coherent(&card->dev->dev, card->evtbd_ring_size,
+ card->evtbd_ring_vbase,
+ card->evtbd_ring_pbase);
card->evtbd_wrptr = 0;
card->evtbd_rdptr = 0 | reg->evt_rollover_ind;
card->evtbd_ring_size = 0;
@@ -1032,7 +1184,7 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
}
skb_put(skb, MWIFIEX_UPLD_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE)) {
+ DMA_FROM_DEVICE)) {
kfree_skb(skb);
return -1;
}
@@ -1056,14 +1208,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
if (card && card->cmdrsp_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(card->cmdrsp_buf);
card->cmdrsp_buf = NULL;
}
if (card && card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}
@@ -1078,11 +1230,13 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
struct pcie_service_card *card = adapter->card;
u32 tmp;
- card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32),
- &card->sleep_cookie_pbase);
+ card->sleep_cookie_vbase = dma_alloc_coherent(&card->dev->dev,
+ sizeof(u32),
+ &card->sleep_cookie_pbase,
+ GFP_KERNEL);
if (!card->sleep_cookie_vbase) {
mwifiex_dbg(adapter, ERROR,
- "pci_alloc_consistent failed!\n");
+ "dma_alloc_coherent failed!\n");
return -ENOMEM;
}
/* Init val of Sleep Cookie */
@@ -1109,9 +1263,9 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter)
card = adapter->card;
if (card && card->sleep_cookie_vbase) {
- pci_free_consistent(card->dev, sizeof(u32),
- card->sleep_cookie_vbase,
- card->sleep_cookie_pbase);
+ dma_free_coherent(&card->dev->dev, sizeof(u32),
+ card->sleep_cookie_vbase,
+ card->sleep_cookie_pbase);
card->sleep_cookie_vbase = NULL;
}
@@ -1183,7 +1337,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
"SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
skb, wrdoneidx);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
unmap_count++;
@@ -1276,7 +1430,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
put_unaligned_le16(MWIFIEX_TYPE_DATA, payload + 2);
if (mwifiex_map_pci_memory(adapter, skb, skb->len,
- PCI_DMA_TODEVICE))
+ DMA_TO_DEVICE))
return -1;
wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
@@ -1358,7 +1512,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
return -EINPROGRESS;
done_unmap:
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
card->tx_buf_list[wrindx] = NULL;
atomic_dec(&adapter->tx_hw_pending);
if (reg->pfu_enabled)
@@ -1412,7 +1566,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
if (!skb_data)
return -ENOMEM;
- mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_data, DMA_FROM_DEVICE);
card->rx_buf_list[rd_index] = NULL;
/* Get data length from interface header -
@@ -1450,7 +1604,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
if (mwifiex_map_pci_memory(adapter, skb_tmp,
MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
@@ -1527,7 +1681,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
return -1;
}
- if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE))
return -1;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -1539,7 +1693,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write download command to boot code.\n",
__func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1551,7 +1705,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write download command to boot code.\n",
__func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1560,7 +1714,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write command len to cmd_size scratch reg\n",
__func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1569,7 +1723,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
CPU_INTR_DOOR_BELL)) {
mwifiex_dbg(adapter, ERROR,
"%s: failed to assert door-bell intr\n", __func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1628,7 +1782,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
put_unaligned_le16((u16)skb->len, &payload[0]);
put_unaligned_le16(MWIFIEX_TYPE_CMD, &payload[2]);
- if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE))
return -1;
card->cmd_buf = skb;
@@ -1728,17 +1882,16 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
"info: Rx CMD Response\n");
if (adapter->curr_cmd)
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_FROM_DEVICE);
else
- pci_dma_sync_single_for_cpu(card->dev,
- MWIFIEX_SKB_DMA_ADDR(skb),
- MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(skb),
+ MWIFIEX_UPLD_SIZE, DMA_FROM_DEVICE);
/* Unmap the command as a response has been received. */
if (card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}
@@ -1749,10 +1902,10 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (!adapter->curr_cmd) {
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
- pci_dma_sync_single_for_device(card->dev,
- MWIFIEX_SKB_DMA_ADDR(skb),
- MWIFIEX_SLEEP_COOKIE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(skb),
+ MWIFIEX_SLEEP_COOKIE_SIZE,
+ DMA_FROM_DEVICE);
if (mwifiex_write_reg(adapter,
PCIE_CPU_INT_EVENT,
CPU_INTR_SLEEP_CFM_DONE)) {
@@ -1763,7 +1916,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
mwifiex_delay_for_sleep_cookie(adapter,
MWIFIEX_MAX_DELAY_COUNT);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_pull(skb, adapter->intf_hdr_len);
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
@@ -1779,7 +1932,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
skb_push(skb, adapter->intf_hdr_len);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
skb_pull(skb, adapter->intf_hdr_len);
@@ -1821,7 +1974,7 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
card->cmdrsp_buf = skb;
skb_push(card->cmdrsp_buf, adapter->intf_hdr_len);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
}
@@ -1876,7 +2029,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: Read Index: %d\n", rdptr);
skb_cmd = card->evt_buf_list[rdptr];
- mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_cmd, DMA_FROM_DEVICE);
/* Take the pointer and set it to event pointer in adapter
and will return back after event handling callback */
@@ -1956,7 +2109,7 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
skb_put(skb, MAX_EVENT_SIZE - skb->len);
if (mwifiex_map_pci_memory(adapter, skb,
MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
card->evt_buf_list[rdptr] = skb;
desc = card->evtbd_ring[rdptr];
@@ -2238,7 +2391,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
"interrupt status during fw dnld.\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
ret = -1;
goto done;
}
@@ -2250,12 +2403,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
mwifiex_dbg(adapter, ERROR, "%s: Card failed to ACK download\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
ret = -1;
goto done;
}
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
offset += txlen;
} while (true);
@@ -2925,15 +3078,9 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
pci_set_master(pdev);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret) {
- pr_err("set_dma_mask(32) failed: %d\n", ret);
- goto err_set_dma_mask;
- }
-
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
- pr_err("set_consistent_dma_mask(64) failed\n");
+ pr_err("dma_set_mask(32) failed: %d\n", ret);
goto err_set_dma_mask;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index fc59b522f670..843d57eda820 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -158,127 +158,6 @@ struct mwifiex_pcie_card_reg {
u8 msix_support;
};
-static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
- .cmd_addr_lo = PCIE_SCRATCH_0_REG,
- .cmd_addr_hi = PCIE_SCRATCH_1_REG,
- .cmd_size = PCIE_SCRATCH_2_REG,
- .fw_status = PCIE_SCRATCH_3_REG,
- .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
- .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
- .tx_rdptr = PCIE_SCRATCH_6_REG,
- .tx_wrptr = PCIE_SCRATCH_7_REG,
- .rx_rdptr = PCIE_SCRATCH_8_REG,
- .rx_wrptr = PCIE_SCRATCH_9_REG,
- .evt_rdptr = PCIE_SCRATCH_10_REG,
- .evt_wrptr = PCIE_SCRATCH_11_REG,
- .drv_rdy = PCIE_SCRATCH_12_REG,
- .tx_start_ptr = 0,
- .tx_mask = MWIFIEX_TXBD_MASK,
- .tx_wrap_mask = 0,
- .rx_mask = MWIFIEX_RXBD_MASK,
- .rx_wrap_mask = 0,
- .tx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
- .rx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
- .evt_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
- .ring_flag_sop = 0,
- .ring_flag_eop = 0,
- .ring_flag_xs_sop = 0,
- .ring_flag_xs_eop = 0,
- .ring_tx_start_ptr = 0,
- .pfu_enabled = 0,
- .sleep_cookie = 1,
- .msix_support = 0,
-};
-
-static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
- .cmd_addr_lo = PCIE_SCRATCH_0_REG,
- .cmd_addr_hi = PCIE_SCRATCH_1_REG,
- .cmd_size = PCIE_SCRATCH_2_REG,
- .fw_status = PCIE_SCRATCH_3_REG,
- .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
- .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
- .tx_rdptr = PCIE_RD_DATA_PTR_Q0_Q1,
- .tx_wrptr = PCIE_WR_DATA_PTR_Q0_Q1,
- .rx_rdptr = PCIE_WR_DATA_PTR_Q0_Q1,
- .rx_wrptr = PCIE_RD_DATA_PTR_Q0_Q1,
- .evt_rdptr = PCIE_SCRATCH_10_REG,
- .evt_wrptr = PCIE_SCRATCH_11_REG,
- .drv_rdy = PCIE_SCRATCH_12_REG,
- .tx_start_ptr = 16,
- .tx_mask = 0x03FF0000,
- .tx_wrap_mask = 0x07FF0000,
- .rx_mask = 0x000003FF,
- .rx_wrap_mask = 0x000007FF,
- .tx_rollover_ind = MWIFIEX_BD_FLAG_TX_ROLLOVER_IND,
- .rx_rollover_ind = MWIFIEX_BD_FLAG_RX_ROLLOVER_IND,
- .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
- .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
- .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
- .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
- .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
- .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
- .pfu_enabled = 1,
- .sleep_cookie = 0,
- .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
- .fw_dump_start = PCIE_SCRATCH_14_REG,
- .fw_dump_end = 0xcff,
- .fw_dump_host_ready = 0xee,
- .fw_dump_read_done = 0xfe,
- .msix_support = 0,
-};
-
-static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
- .cmd_addr_lo = PCIE_SCRATCH_0_REG,
- .cmd_addr_hi = PCIE_SCRATCH_1_REG,
- .cmd_size = PCIE_SCRATCH_2_REG,
- .fw_status = PCIE_SCRATCH_3_REG,
- .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
- .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
- .tx_rdptr = 0xC1A4,
- .tx_wrptr = 0xC174,
- .rx_rdptr = 0xC174,
- .rx_wrptr = 0xC1A4,
- .evt_rdptr = PCIE_SCRATCH_10_REG,
- .evt_wrptr = PCIE_SCRATCH_11_REG,
- .drv_rdy = PCIE_SCRATCH_12_REG,
- .tx_start_ptr = 16,
- .tx_mask = 0x0FFF0000,
- .tx_wrap_mask = 0x1FFF0000,
- .rx_mask = 0x00000FFF,
- .rx_wrap_mask = 0x00001FFF,
- .tx_rollover_ind = BIT(28),
- .rx_rollover_ind = BIT(12),
- .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
- .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
- .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
- .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
- .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
- .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
- .pfu_enabled = 1,
- .sleep_cookie = 0,
- .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
- .fw_dump_start = PCIE_SCRATCH_14_REG,
- .fw_dump_end = 0xcff,
- .fw_dump_host_ready = 0xcc,
- .fw_dump_read_done = 0xdd,
- .msix_support = 0,
-};
-
-static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = {
- {"ITCM", NULL, 0, 0xF0},
- {"DTCM", NULL, 0, 0xF1},
- {"SQRAM", NULL, 0, 0xF2},
- {"IRAM", NULL, 0, 0xF3},
- {"APU", NULL, 0, 0xF4},
- {"CIU", NULL, 0, 0xF5},
- {"ICU", NULL, 0, 0xF6},
- {"MAC", NULL, 0, 0xF7},
-};
-
-static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
- {"DUMP", NULL, 0, 0xDD},
-};
-
struct mwifiex_pcie_device {
const struct mwifiex_pcie_card_reg *reg;
u16 blksz_fw_dl;
@@ -289,34 +168,6 @@ struct mwifiex_pcie_device {
bool can_ext_scan;
};
-static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
- .reg = &mwifiex_reg_8766,
- .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .can_dump_fw = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
- .reg = &mwifiex_reg_8897,
- .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .can_dump_fw = true,
- .mem_type_mapping_tbl = mem_type_mapping_tbl_w8897,
- .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8897),
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
- .reg = &mwifiex_reg_8997,
- .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .can_dump_fw = true,
- .mem_type_mapping_tbl = mem_type_mapping_tbl_w8997,
- .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8997),
- .can_ext_scan = true,
-};
-
struct mwifiex_evt_buf_desc {
u64 paddr;
u16 len;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index ff932627a46c..c2a685f63e95 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1328,7 +1328,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
case WLAN_EID_CHANNEL_SWITCH:
bss_entry->chan_sw_ie_present = true;
- /* fall through */
+ fallthrough;
case WLAN_EID_PWR_CAPABILITY:
case WLAN_EID_TPC_REPORT:
case WLAN_EID_QUIET:
@@ -1889,7 +1889,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
chan, CFG80211_BSS_FTYPE_UNKNOWN,
bssid, timestamp,
cap_info_bitmap, beacon_period,
- ie_buf, ie_len, rssi, GFP_KERNEL);
+ ie_buf, ie_len, rssi, GFP_ATOMIC);
if (bss) {
bss_priv = (struct mwifiex_bss_priv *)bss->priv;
bss_priv->band = band;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index a042965962a2..bde9e4bbfffe 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -35,6 +35,433 @@ static void mwifiex_sdio_work(struct work_struct *work);
static struct mwifiex_if_ops sdio_ops;
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
+ .start_rd_port = 1,
+ .start_wr_port = 1,
+ .base_0_reg = 0x0040,
+ .base_1_reg = 0x0041,
+ .poll_reg = 0x30,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK,
+ .host_int_rsr_reg = 0x1,
+ .host_int_mask_reg = 0x02,
+ .host_int_status_reg = 0x03,
+ .status_reg_0 = 0x60,
+ .status_reg_1 = 0x61,
+ .sdio_int_mask = 0x3f,
+ .data_port_mask = 0x0000fffe,
+ .io_port_0_reg = 0x78,
+ .io_port_1_reg = 0x79,
+ .io_port_2_reg = 0x7A,
+ .max_mp_regs = 64,
+ .rd_bitmap_l = 0x04,
+ .rd_bitmap_u = 0x05,
+ .wr_bitmap_l = 0x06,
+ .wr_bitmap_u = 0x07,
+ .rd_len_p0_l = 0x08,
+ .rd_len_p0_u = 0x09,
+ .card_misc_cfg_reg = 0x6c,
+ .func1_dump_reg_start = 0x0,
+ .func1_dump_reg_end = 0x9,
+ .func1_scratch_reg = 0x60,
+ .func1_spec_reg_num = 5,
+ .func1_spec_reg_table = {0x28, 0x30, 0x34, 0x38, 0x3c},
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0x60,
+ .base_1_reg = 0x61,
+ .poll_reg = 0x50,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x1,
+ .host_int_status_reg = 0x03,
+ .host_int_mask_reg = 0x02,
+ .status_reg_0 = 0xc0,
+ .status_reg_1 = 0xc1,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xD8,
+ .io_port_1_reg = 0xD9,
+ .io_port_2_reg = 0xDA,
+ .max_mp_regs = 184,
+ .rd_bitmap_l = 0x04,
+ .rd_bitmap_u = 0x05,
+ .rd_bitmap_1l = 0x06,
+ .rd_bitmap_1u = 0x07,
+ .wr_bitmap_l = 0x08,
+ .wr_bitmap_u = 0x09,
+ .wr_bitmap_1l = 0x0a,
+ .wr_bitmap_1u = 0x0b,
+ .rd_len_p0_l = 0x0c,
+ .rd_len_p0_u = 0x0d,
+ .card_misc_cfg_reg = 0xcc,
+ .card_cfg_2_1_reg = 0xcd,
+ .cmd_rd_len_0 = 0xb4,
+ .cmd_rd_len_1 = 0xb5,
+ .cmd_rd_len_2 = 0xb6,
+ .cmd_rd_len_3 = 0xb7,
+ .cmd_cfg_0 = 0xb8,
+ .cmd_cfg_1 = 0xb9,
+ .cmd_cfg_2 = 0xba,
+ .cmd_cfg_3 = 0xbb,
+ .fw_dump_host_ready = 0xee,
+ .fw_dump_ctrl = 0xe2,
+ .fw_dump_start = 0xe3,
+ .fw_dump_end = 0xea,
+ .func1_dump_reg_start = 0x0,
+ .func1_dump_reg_end = 0xb,
+ .func1_scratch_reg = 0xc0,
+ .func1_spec_reg_num = 8,
+ .func1_spec_reg_table = {0x4C, 0x50, 0x54, 0x55, 0x58,
+ 0x59, 0x5c, 0x5d},
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8977 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xe8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+ 0x60, 0x61, 0x62, 0x64,
+ 0x65, 0x66, 0x68, 0x69,
+ 0x6a},
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xe8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+ 0x60, 0x61, 0x62, 0x64,
+ 0x65, 0x66, 0x68, 0x69,
+ 0x6a},
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0x6C,
+ .base_1_reg = 0x6D,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0x90,
+ .status_reg_1 = 0x91,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0x90,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
+ 0x61, 0x62, 0x64, 0x65, 0x66,
+ 0x68, 0x69, 0x6a},
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf9,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xE8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
+ 0x61, 0x62, 0x64, 0x65, 0x66,
+ 0x68, 0x69, 0x6a},
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
+ .firmware = SD8786_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = false,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
+ .firmware = SD8787_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
+ .firmware = SD8797_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
+ .firmware = SD8897_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8897,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
+ .firmware = SD8977_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8977,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+ .firmware = SD8997_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8997,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
+ .firmware = SD8887_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8887,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = false,
+ .can_auto_tdls = true,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
+ .firmware = SD8987_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8987,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = true,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
+ .firmware = SD8801_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
static struct memory_type_mapping generic_mem_type_map[] = {
{"DUMP", NULL, 0, 0xDD},
};
@@ -1976,6 +2403,8 @@ error:
kfree(card->mpa_rx.buf);
card->mpa_tx.buf_size = 0;
card->mpa_rx.buf_size = 0;
+ card->mpa_tx.buf = NULL;
+ card->mpa_rx.buf = NULL;
}
return ret;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index 8b476b007c5e..dec534a6ddb1 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -290,433 +290,6 @@ struct mwifiex_sdio_device {
bool can_ext_scan;
};
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
- .start_rd_port = 1,
- .start_wr_port = 1,
- .base_0_reg = 0x0040,
- .base_1_reg = 0x0041,
- .poll_reg = 0x30,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK,
- .host_int_rsr_reg = 0x1,
- .host_int_mask_reg = 0x02,
- .host_int_status_reg = 0x03,
- .status_reg_0 = 0x60,
- .status_reg_1 = 0x61,
- .sdio_int_mask = 0x3f,
- .data_port_mask = 0x0000fffe,
- .io_port_0_reg = 0x78,
- .io_port_1_reg = 0x79,
- .io_port_2_reg = 0x7A,
- .max_mp_regs = 64,
- .rd_bitmap_l = 0x04,
- .rd_bitmap_u = 0x05,
- .wr_bitmap_l = 0x06,
- .wr_bitmap_u = 0x07,
- .rd_len_p0_l = 0x08,
- .rd_len_p0_u = 0x09,
- .card_misc_cfg_reg = 0x6c,
- .func1_dump_reg_start = 0x0,
- .func1_dump_reg_end = 0x9,
- .func1_scratch_reg = 0x60,
- .func1_spec_reg_num = 5,
- .func1_spec_reg_table = {0x28, 0x30, 0x34, 0x38, 0x3c},
-};
-
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
- .start_rd_port = 0,
- .start_wr_port = 0,
- .base_0_reg = 0x60,
- .base_1_reg = 0x61,
- .poll_reg = 0x50,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
- CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
- .host_int_rsr_reg = 0x1,
- .host_int_status_reg = 0x03,
- .host_int_mask_reg = 0x02,
- .status_reg_0 = 0xc0,
- .status_reg_1 = 0xc1,
- .sdio_int_mask = 0xff,
- .data_port_mask = 0xffffffff,
- .io_port_0_reg = 0xD8,
- .io_port_1_reg = 0xD9,
- .io_port_2_reg = 0xDA,
- .max_mp_regs = 184,
- .rd_bitmap_l = 0x04,
- .rd_bitmap_u = 0x05,
- .rd_bitmap_1l = 0x06,
- .rd_bitmap_1u = 0x07,
- .wr_bitmap_l = 0x08,
- .wr_bitmap_u = 0x09,
- .wr_bitmap_1l = 0x0a,
- .wr_bitmap_1u = 0x0b,
- .rd_len_p0_l = 0x0c,
- .rd_len_p0_u = 0x0d,
- .card_misc_cfg_reg = 0xcc,
- .card_cfg_2_1_reg = 0xcd,
- .cmd_rd_len_0 = 0xb4,
- .cmd_rd_len_1 = 0xb5,
- .cmd_rd_len_2 = 0xb6,
- .cmd_rd_len_3 = 0xb7,
- .cmd_cfg_0 = 0xb8,
- .cmd_cfg_1 = 0xb9,
- .cmd_cfg_2 = 0xba,
- .cmd_cfg_3 = 0xbb,
- .fw_dump_host_ready = 0xee,
- .fw_dump_ctrl = 0xe2,
- .fw_dump_start = 0xe3,
- .fw_dump_end = 0xea,
- .func1_dump_reg_start = 0x0,
- .func1_dump_reg_end = 0xb,
- .func1_scratch_reg = 0xc0,
- .func1_spec_reg_num = 8,
- .func1_spec_reg_table = {0x4C, 0x50, 0x54, 0x55, 0x58,
- 0x59, 0x5c, 0x5d},
-};
-
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8977 = {
- .start_rd_port = 0,
- .start_wr_port = 0,
- .base_0_reg = 0xF8,
- .base_1_reg = 0xF9,
- .poll_reg = 0x5C,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
- CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
- .host_int_rsr_reg = 0x4,
- .host_int_status_reg = 0x0C,
- .host_int_mask_reg = 0x08,
- .status_reg_0 = 0xE8,
- .status_reg_1 = 0xE9,
- .sdio_int_mask = 0xff,
- .data_port_mask = 0xffffffff,
- .io_port_0_reg = 0xE4,
- .io_port_1_reg = 0xE5,
- .io_port_2_reg = 0xE6,
- .max_mp_regs = 196,
- .rd_bitmap_l = 0x10,
- .rd_bitmap_u = 0x11,
- .rd_bitmap_1l = 0x12,
- .rd_bitmap_1u = 0x13,
- .wr_bitmap_l = 0x14,
- .wr_bitmap_u = 0x15,
- .wr_bitmap_1l = 0x16,
- .wr_bitmap_1u = 0x17,
- .rd_len_p0_l = 0x18,
- .rd_len_p0_u = 0x19,
- .card_misc_cfg_reg = 0xd8,
- .card_cfg_2_1_reg = 0xd9,
- .cmd_rd_len_0 = 0xc0,
- .cmd_rd_len_1 = 0xc1,
- .cmd_rd_len_2 = 0xc2,
- .cmd_rd_len_3 = 0xc3,
- .cmd_cfg_0 = 0xc4,
- .cmd_cfg_1 = 0xc5,
- .cmd_cfg_2 = 0xc6,
- .cmd_cfg_3 = 0xc7,
- .fw_dump_host_ready = 0xcc,
- .fw_dump_ctrl = 0xf0,
- .fw_dump_start = 0xf1,
- .fw_dump_end = 0xf8,
- .func1_dump_reg_start = 0x10,
- .func1_dump_reg_end = 0x17,
- .func1_scratch_reg = 0xe8,
- .func1_spec_reg_num = 13,
- .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
- 0x60, 0x61, 0x62, 0x64,
- 0x65, 0x66, 0x68, 0x69,
- 0x6a},
-};
-
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
- .start_rd_port = 0,
- .start_wr_port = 0,
- .base_0_reg = 0xF8,
- .base_1_reg = 0xF9,
- .poll_reg = 0x5C,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
- CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
- .host_int_rsr_reg = 0x4,
- .host_int_status_reg = 0x0C,
- .host_int_mask_reg = 0x08,
- .status_reg_0 = 0xE8,
- .status_reg_1 = 0xE9,
- .sdio_int_mask = 0xff,
- .data_port_mask = 0xffffffff,
- .io_port_0_reg = 0xE4,
- .io_port_1_reg = 0xE5,
- .io_port_2_reg = 0xE6,
- .max_mp_regs = 196,
- .rd_bitmap_l = 0x10,
- .rd_bitmap_u = 0x11,
- .rd_bitmap_1l = 0x12,
- .rd_bitmap_1u = 0x13,
- .wr_bitmap_l = 0x14,
- .wr_bitmap_u = 0x15,
- .wr_bitmap_1l = 0x16,
- .wr_bitmap_1u = 0x17,
- .rd_len_p0_l = 0x18,
- .rd_len_p0_u = 0x19,
- .card_misc_cfg_reg = 0xd8,
- .card_cfg_2_1_reg = 0xd9,
- .cmd_rd_len_0 = 0xc0,
- .cmd_rd_len_1 = 0xc1,
- .cmd_rd_len_2 = 0xc2,
- .cmd_rd_len_3 = 0xc3,
- .cmd_cfg_0 = 0xc4,
- .cmd_cfg_1 = 0xc5,
- .cmd_cfg_2 = 0xc6,
- .cmd_cfg_3 = 0xc7,
- .fw_dump_host_ready = 0xcc,
- .fw_dump_ctrl = 0xf0,
- .fw_dump_start = 0xf1,
- .fw_dump_end = 0xf8,
- .func1_dump_reg_start = 0x10,
- .func1_dump_reg_end = 0x17,
- .func1_scratch_reg = 0xe8,
- .func1_spec_reg_num = 13,
- .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
- 0x60, 0x61, 0x62, 0x64,
- 0x65, 0x66, 0x68, 0x69,
- 0x6a},
-};
-
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
- .start_rd_port = 0,
- .start_wr_port = 0,
- .base_0_reg = 0x6C,
- .base_1_reg = 0x6D,
- .poll_reg = 0x5C,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
- CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
- .host_int_rsr_reg = 0x4,
- .host_int_status_reg = 0x0C,
- .host_int_mask_reg = 0x08,
- .status_reg_0 = 0x90,
- .status_reg_1 = 0x91,
- .sdio_int_mask = 0xff,
- .data_port_mask = 0xffffffff,
- .io_port_0_reg = 0xE4,
- .io_port_1_reg = 0xE5,
- .io_port_2_reg = 0xE6,
- .max_mp_regs = 196,
- .rd_bitmap_l = 0x10,
- .rd_bitmap_u = 0x11,
- .rd_bitmap_1l = 0x12,
- .rd_bitmap_1u = 0x13,
- .wr_bitmap_l = 0x14,
- .wr_bitmap_u = 0x15,
- .wr_bitmap_1l = 0x16,
- .wr_bitmap_1u = 0x17,
- .rd_len_p0_l = 0x18,
- .rd_len_p0_u = 0x19,
- .card_misc_cfg_reg = 0xd8,
- .card_cfg_2_1_reg = 0xd9,
- .cmd_rd_len_0 = 0xc0,
- .cmd_rd_len_1 = 0xc1,
- .cmd_rd_len_2 = 0xc2,
- .cmd_rd_len_3 = 0xc3,
- .cmd_cfg_0 = 0xc4,
- .cmd_cfg_1 = 0xc5,
- .cmd_cfg_2 = 0xc6,
- .cmd_cfg_3 = 0xc7,
- .func1_dump_reg_start = 0x10,
- .func1_dump_reg_end = 0x17,
- .func1_scratch_reg = 0x90,
- .func1_spec_reg_num = 13,
- .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
- 0x61, 0x62, 0x64, 0x65, 0x66,
- 0x68, 0x69, 0x6a},
-};
-
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
- .start_rd_port = 0,
- .start_wr_port = 0,
- .base_0_reg = 0xF8,
- .base_1_reg = 0xF9,
- .poll_reg = 0x5C,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
- CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
- .host_int_rsr_reg = 0x4,
- .host_int_status_reg = 0x0C,
- .host_int_mask_reg = 0x08,
- .status_reg_0 = 0xE8,
- .status_reg_1 = 0xE9,
- .sdio_int_mask = 0xff,
- .data_port_mask = 0xffffffff,
- .io_port_0_reg = 0xE4,
- .io_port_1_reg = 0xE5,
- .io_port_2_reg = 0xE6,
- .max_mp_regs = 196,
- .rd_bitmap_l = 0x10,
- .rd_bitmap_u = 0x11,
- .rd_bitmap_1l = 0x12,
- .rd_bitmap_1u = 0x13,
- .wr_bitmap_l = 0x14,
- .wr_bitmap_u = 0x15,
- .wr_bitmap_1l = 0x16,
- .wr_bitmap_1u = 0x17,
- .rd_len_p0_l = 0x18,
- .rd_len_p0_u = 0x19,
- .card_misc_cfg_reg = 0xd8,
- .card_cfg_2_1_reg = 0xd9,
- .cmd_rd_len_0 = 0xc0,
- .cmd_rd_len_1 = 0xc1,
- .cmd_rd_len_2 = 0xc2,
- .cmd_rd_len_3 = 0xc3,
- .cmd_cfg_0 = 0xc4,
- .cmd_cfg_1 = 0xc5,
- .cmd_cfg_2 = 0xc6,
- .cmd_cfg_3 = 0xc7,
- .fw_dump_host_ready = 0xcc,
- .fw_dump_ctrl = 0xf9,
- .fw_dump_start = 0xf1,
- .fw_dump_end = 0xf8,
- .func1_dump_reg_start = 0x10,
- .func1_dump_reg_end = 0x17,
- .func1_scratch_reg = 0xE8,
- .func1_spec_reg_num = 13,
- .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
- 0x61, 0x62, 0x64, 0x65, 0x66,
- 0x68, 0x69, 0x6a},
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
- .firmware = SD8786_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd87xx,
- .max_ports = 16,
- .mp_agg_pkt_limit = 8,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .supports_sdio_new_mode = false,
- .has_control_mask = true,
- .can_dump_fw = false,
- .can_auto_tdls = false,
- .can_ext_scan = false,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
- .firmware = SD8787_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd87xx,
- .max_ports = 16,
- .mp_agg_pkt_limit = 8,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .supports_sdio_new_mode = false,
- .has_control_mask = true,
- .can_dump_fw = false,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
- .firmware = SD8797_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd87xx,
- .max_ports = 16,
- .mp_agg_pkt_limit = 8,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .supports_sdio_new_mode = false,
- .has_control_mask = true,
- .can_dump_fw = false,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
- .firmware = SD8897_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8897,
- .max_ports = 32,
- .mp_agg_pkt_limit = 16,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .supports_sdio_new_mode = true,
- .has_control_mask = false,
- .can_dump_fw = true,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
- .firmware = SD8977_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8977,
- .max_ports = 32,
- .mp_agg_pkt_limit = 16,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .supports_sdio_new_mode = true,
- .has_control_mask = false,
- .can_dump_fw = true,
- .fw_dump_enh = true,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
- .firmware = SD8997_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8997,
- .max_ports = 32,
- .mp_agg_pkt_limit = 16,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .supports_sdio_new_mode = true,
- .has_control_mask = false,
- .can_dump_fw = true,
- .fw_dump_enh = true,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
- .firmware = SD8887_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8887,
- .max_ports = 32,
- .mp_agg_pkt_limit = 16,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
- .supports_sdio_new_mode = true,
- .has_control_mask = false,
- .can_dump_fw = false,
- .can_auto_tdls = true,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
- .firmware = SD8987_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8987,
- .max_ports = 32,
- .mp_agg_pkt_limit = 16,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .supports_sdio_new_mode = true,
- .has_control_mask = false,
- .can_dump_fw = true,
- .fw_dump_enh = true,
- .can_auto_tdls = true,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
- .firmware = SD8801_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd87xx,
- .max_ports = 16,
- .mp_agg_pkt_limit = 8,
- .supports_sdio_new_mode = false,
- .has_control_mask = true,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .can_dump_fw = false,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
/*
* .cmdrsp_complete handler
*/
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 77c8595f84f8..9bbdb8dfce62 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -350,11 +350,7 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
/* Forward multicast/broadcast packet to upper layer*/
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
-
+ netif_rx_any_context(skb);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 6f3cfde4654c..426e39d4ccf0 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -1353,7 +1353,8 @@ static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter)
skb_dequeue(&port->tx_aggr.aggr_list)))
mwifiex_write_data_complete(adapter, skb_tmp,
0, -1);
- del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
+ if (port->tx_aggr.timer_cnxt.hold_timer.function)
+ del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index de89a1e710b1..d583fa600a29 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -488,11 +488,7 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
(skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
-
+ netif_rx_any_context(skb);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index a06fff199ea3..b8f19ca73414 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -40,6 +40,21 @@
static bool disable_tx_amsdu;
module_param(disable_tx_amsdu, bool, 0644);
+/* This table inverses the tos_to_tid operation to get a priority
+ * which is in sequential order, and can be compared.
+ * Use this to compare the priority of two different TIDs.
+ */
+const u8 tos_to_tid_inv[] = {
+ 0x02, /* from tos_to_tid[2] = 0 */
+ 0x00, /* from tos_to_tid[0] = 1 */
+ 0x01, /* from tos_to_tid[1] = 2 */
+ 0x03,
+ 0x04,
+ 0x05,
+ 0x06,
+ 0x07
+};
+
/* WMM information IE */
static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
0x00, 0x50, 0xf2, 0x02,
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.h b/drivers/net/wireless/marvell/mwifiex/wmm.h
index 04d7da95e307..1cb3d1804758 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.h
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.h
@@ -31,22 +31,8 @@ enum ieee_types_wmm_ecw_bitmasks {
MWIFIEX_ECW_MAX = (BIT(4) | BIT(5) | BIT(6) | BIT(7)),
};
-static const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
-
-/*
- * This table inverses the tos_to_tid operation to get a priority
- * which is in sequential order, and can be compared.
- * Use this to compare the priority of two different TIDs.
- */
-static const u8 tos_to_tid_inv[] = {
- 0x02, /* from tos_to_tid[2] = 0 */
- 0x00, /* from tos_to_tid[0] = 1 */
- 0x01, /* from tos_to_tid[1] = 2 */
- 0x03,
- 0x04,
- 0x05,
- 0x06,
- 0x07};
+extern const u16 mwifiex_1d_to_wmm_queue[];
+extern const u8 tos_to_tid_inv[];
/*
* This function retrieves the TID of the given RA list.
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 97f23f93f6e7..23efd7075df6 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -4630,10 +4630,10 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void mwl8k_tx_poll(unsigned long data)
+static void mwl8k_tx_poll(struct tasklet_struct *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_priv *priv = from_tasklet(priv, t, poll_tx_task);
+ struct ieee80211_hw *hw = pci_get_drvdata(priv->pdev);
int limit;
int i;
@@ -4659,10 +4659,10 @@ static void mwl8k_tx_poll(unsigned long data)
}
}
-static void mwl8k_rx_poll(unsigned long data)
+static void mwl8k_rx_poll(struct tasklet_struct *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_priv *priv = from_tasklet(priv, t, poll_rx_task);
+ struct ieee80211_hw *hw = pci_get_drvdata(priv->pdev);
int limit;
limit = 32;
@@ -6120,9 +6120,9 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
INIT_WORK(&priv->fw_reload, mwl8k_hw_restart_work);
/* TX reclaim and RX tasklets. */
- tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
+ tasklet_setup(&priv->poll_tx_task, mwl8k_tx_poll);
tasklet_disable(&priv->poll_tx_task);
- tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
+ tasklet_setup(&priv->poll_rx_task, mwl8k_rx_poll);
tasklet_disable(&priv->poll_rx_task);
/* Power management cookie */
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index 5d58b16bfe9f..52f583cb1418 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -31,15 +31,14 @@ int mt76_queues_read(struct seq_file *s, void *data)
int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- struct mt76_sw_queue *q = &dev->q_tx[i];
+ struct mt76_queue *q = dev->q_tx[i];
- if (!q->q)
+ if (!q)
continue;
seq_printf(s,
- "%d: queued=%d head=%d tail=%d swq_queued=%d\n",
- i, q->q->queued, q->q->head, q->q->tail,
- q->swq_queued);
+ "%d: queued=%d head=%d tail=%d\n",
+ i, q->queued, q->head, q->tail);
}
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 6c25859dd386..214fc95b8a33 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -7,6 +7,76 @@
#include "mt76.h"
#include "dma.h"
+static struct mt76_txwi_cache *
+mt76_alloc_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+ dma_addr_t addr;
+ u8 *txwi;
+ int size;
+
+ size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
+ txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+ if (!txwi)
+ return NULL;
+
+ addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
+ t->dma_addr = addr;
+
+ return t;
+}
+
+static struct mt76_txwi_cache *
+__mt76_get_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = NULL;
+
+ spin_lock(&dev->lock);
+ if (!list_empty(&dev->txwi_cache)) {
+ t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
+ list);
+ list_del(&t->list);
+ }
+ spin_unlock(&dev->lock);
+
+ return t;
+}
+
+static struct mt76_txwi_cache *
+mt76_get_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
+
+ if (t)
+ return t;
+
+ return mt76_alloc_txwi(dev);
+}
+
+void
+mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ if (!t)
+ return;
+
+ spin_lock(&dev->lock);
+ list_add(&t->list, &dev->txwi_cache);
+ spin_unlock(&dev->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_put_txwi);
+
+static void
+mt76_free_pending_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+
+ while ((t = __mt76_get_txwi(dev)) != NULL)
+ dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+}
+
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
@@ -49,6 +119,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi)
{
+ struct mt76_queue_entry *entry;
struct mt76_desc *desc;
u32 ctrl;
int i, idx = -1;
@@ -61,10 +132,27 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
+ idx = q->head;
+ q->head = (q->head + 1) % q->ndesc;
+
+ desc = &q->desc[idx];
+ entry = &q->entry[idx];
+
+ if (buf[0].skip_unmap)
+ entry->skip_buf0 = true;
+ entry->skip_buf1 = i == nbufs - 1;
+
+ entry->dma_addr[0] = buf[0].addr;
+ entry->dma_len[0] = buf[0].len;
+
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
if (i < nbufs - 1) {
+ entry->dma_addr[1] = buf[1].addr;
+ entry->dma_len[1] = buf[1].len;
buf1 = buf[1].addr;
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+ if (buf[1].skip_unmap)
+ entry->skip_buf1 = true;
}
if (i == nbufs - 1)
@@ -72,11 +160,6 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
else if (i == nbufs - 2)
ctrl |= MT_DMA_CTL_LAST_SEC1;
- idx = q->head;
- q->head = (q->head + 1) % q->ndesc;
-
- desc = &q->desc[idx];
-
WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->info, cpu_to_le32(info));
@@ -96,24 +179,14 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *prev_e)
{
struct mt76_queue_entry *e = &q->entry[idx];
- __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
- u32 ctrl = le32_to_cpu(__ctrl);
- if (!e->skip_buf0) {
- __le32 addr = READ_ONCE(q->desc[idx].buf0);
- u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
-
- dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+ if (!e->skip_buf0)
+ dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
DMA_TO_DEVICE);
- }
- if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
- __le32 addr = READ_ONCE(q->desc[idx].buf1);
- u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
-
- dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+ if (!e->skip_buf1)
+ dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
DMA_TO_DEVICE);
- }
if (e->txwi == DMA_DUMMY_DATA)
e->txwi = NULL;
@@ -137,19 +210,17 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
+ wmb();
writel(q->head, &q->regs->cpu_idx);
}
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
- struct mt76_sw_queue *sq = &dev->q_tx[qid];
- struct mt76_queue *q = sq->q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_entry entry;
- unsigned int n_swq_queued[8] = {};
- unsigned int n_queued = 0;
bool wake = false;
- int i, last;
+ int last;
if (!q)
return;
@@ -159,16 +230,9 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
else
last = readl(&q->regs->dma_idx);
- while ((q->queued > n_queued) && q->tail != last) {
+ while (q->queued > 0 && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
- if (entry.schedule)
- n_swq_queued[entry.qid]++;
-
- q->tail = (q->tail + 1) % q->ndesc;
- n_queued++;
-
- if (entry.skb)
- dev->drv->tx_complete_skb(dev, qid, &entry);
+ mt76_queue_tx_complete(dev, q, &entry);
if (entry.txwi) {
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
@@ -178,29 +242,14 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (!flush && q->tail == last)
last = readl(&q->regs->dma_idx);
- }
-
- spin_lock_bh(&q->lock);
-
- q->queued -= n_queued;
- for (i = 0; i < 4; i++) {
- if (!n_swq_queued[i])
- continue;
-
- dev->q_tx[i].swq_queued -= n_swq_queued[i];
- }
-
- /* ext PHY */
- for (i = 0; i < 4; i++) {
- if (!n_swq_queued[i])
- continue;
- dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i];
}
if (flush) {
+ spin_lock_bh(&q->lock);
mt76_dma_sync_idx(dev, q);
mt76_dma_kick_queue(dev, q);
+ spin_unlock_bh(&q->lock);
}
wake = wake && q->stopped &&
@@ -211,8 +260,6 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (!q->queued)
wake_up(&dev->tx_wait);
- spin_unlock_bh(&q->lock);
-
if (wake)
ieee80211_wake_queue(dev->hw, qid);
}
@@ -227,7 +274,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
void *buf = e->buf;
int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
- buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
+ buf_addr = e->dma_addr[0];
if (len) {
u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
@@ -268,7 +315,7 @@ static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
@@ -300,7 +347,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_tx_info tx_info = {
.skb = skb,
};
@@ -378,7 +425,7 @@ free:
e.skb = tx_info.skb;
e.txwi = t;
- dev->drv->tx_complete_skb(dev, qid, &e);
+ dev->drv->tx_complete_skb(dev, &e);
mt76_put_txwi(dev, t);
return ret;
}
@@ -612,6 +659,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;
+ mt76_worker_disable(&dev->tx_worker);
netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
mt76_dma_tx_cleanup(dev, i, true);
@@ -620,5 +668,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
netif_napi_del(&dev->napi[i]);
mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
}
+
+ mt76_free_pending_txwi(dev);
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 3d4bf72700a5..4befe7f937a9 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
+#include <linux/sched.h>
#include <linux/of.h>
#include "mt76.h"
@@ -304,11 +305,11 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
- ieee80211_hw_set(hw, TX_AMSDU);
- /* TODO: avoid linearization for SDIO */
- if (!mt76_is_sdio(dev))
+ if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
+ ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, TX_FRAG_LIST);
+ }
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
@@ -433,14 +434,13 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
skb_queue_head_init(&dev->mcu.res_q);
init_waitqueue_head(&dev->mcu.wait);
mutex_init(&dev->mcu.mutex);
+ dev->tx_worker.fn = mt76_tx_worker;
INIT_LIST_HEAD(&dev->txwi_cache);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
skb_queue_head_init(&dev->rx_skb[i]);
- tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev);
-
dev->wq = alloc_ordered_workqueue("mt76", 0);
if (!dev->wq) {
ieee80211_free_hw(hw);
@@ -483,7 +483,14 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
return ret;
}
- return ieee80211_register_hw(hw);
+ ret = ieee80211_register_hw(hw);
+ if (ret)
+ return ret;
+
+ WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
+ sched_set_fifo_low(dev->tx_worker.task);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76_register_device);
@@ -500,12 +507,11 @@ EXPORT_SYMBOL_GPL(mt76_unregister_device);
void mt76_free_device(struct mt76_dev *dev)
{
+ mt76_worker_teardown(&dev->tx_worker);
if (dev->wq) {
destroy_workqueue(dev->wq);
dev->wq = NULL;
}
- if (mt76_is_mmio(dev))
- mt76_tx_free(dev);
ieee80211_free_hw(dev->hw);
}
EXPORT_SYMBOL_GPL(mt76_free_device);
@@ -540,7 +546,7 @@ bool mt76_has_tx_pending(struct mt76_phy *phy)
offset = __MT_TXQ_MAX * (phy != &dev->phy);
for (i = 0; i < __MT_TXQ_MAX; i++) {
- q = dev->q_tx[offset + i].q;
+ q = dev->q_tx[offset + i];
if (q && q->queued)
return true;
}
@@ -870,7 +876,6 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
struct ieee80211_hw *hw;
struct mt76_wcid *wcid = status->wcid;
bool ps;
- int i;
hw = mt76_phy_hw(dev, status->ext_phy);
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
@@ -920,20 +925,6 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
dev->drv->sta_ps(dev, sta, ps);
ieee80211_sta_ps_transition(sta, ps);
-
- if (ps)
- return;
-
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
- struct mt76_txq *mtxq;
-
- if (!sta->txq[i])
- continue;
-
- mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
- if (!skb_queue_empty(&mtxq->retry_q))
- ieee80211_schedule_txq(hw, sta->txq[i]);
- }
}
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
@@ -995,8 +986,6 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
mtxq->wcid = wcid;
-
- mt76_txq_init(dev, sta->txq[i]);
}
ewma_signal_init(&wcid->rssi);
@@ -1024,8 +1013,6 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
dev->drv->sta_remove(dev, vif, sta);
mt76_tx_status_check(dev, wcid, true);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76_txq_remove(dev, sta->txq[i]);
mt76_wcid_mask_clear(dev->wcid_mask, idx);
mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
}
@@ -1095,7 +1082,7 @@ EXPORT_SYMBOL_GPL(mt76_get_txpower);
static void
__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
ieee80211_csa_finish(vif);
}
@@ -1120,7 +1107,7 @@ __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->csa_active)
return;
- dev->csa_complete |= ieee80211_csa_is_complete(vif);
+ dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
}
void mt76_csa_check(struct mt76_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index af35bc388ae2..a5be66de1cff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -17,11 +17,13 @@
#include "util.h"
#include "testmode.h"
-#define MT_TX_RING_SIZE 256
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
#define MT_SKB_HEAD_LEN 128
+#define MT_MAX_NON_AQL_PKT 16
+#define MT_TXQ_FREE_THR 32
+
struct mt76_dev;
struct mt76_phy;
struct mt76_wcid;
@@ -79,7 +81,8 @@ enum mt76_rxq_id {
struct mt76_queue_buf {
dma_addr_t addr;
- int len;
+ u16 len;
+ bool skip_unmap;
};
struct mt76_tx_info {
@@ -99,9 +102,11 @@ struct mt76_queue_entry {
struct urb *urb;
int buf_sz;
};
- enum mt76_txq_id qid;
+ u32 dma_addr[2];
+ u16 dma_len[2];
+ u16 wcid;
bool skip_buf0:1;
- bool schedule:1;
+ bool skip_buf1:1;
bool done:1;
};
@@ -135,13 +140,6 @@ struct mt76_queue {
struct page_frag_cache rx_page;
};
-struct mt76_sw_queue {
- struct mt76_queue *q;
-
- struct list_head swq;
- int swq_queued;
-};
-
struct mt76_mcu_ops {
u32 headroom;
u32 tailroom;
@@ -204,6 +202,7 @@ DECLARE_EWMA(signal, 10, 8);
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
+ atomic_t non_aql_packets;
unsigned long flags;
struct ewma_signal rssi;
@@ -214,6 +213,7 @@ struct mt76_wcid {
u8 sta:1;
u8 ext_phy:1;
+ u8 amsdu:1;
u8 rx_check_pn;
u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
@@ -226,11 +226,8 @@ struct mt76_wcid {
};
struct mt76_txq {
- struct mt76_sw_queue *swq;
struct mt76_wcid *wcid;
- struct sk_buff_head retry_q;
-
u16 agg_ssn;
bool send_bar;
bool aggr;
@@ -309,6 +306,7 @@ struct mt76_hw_cap {
#define MT_DRV_SW_RX_AIRTIME BIT(2)
#define MT_DRV_RX_DMA_HDR BIT(3)
#define MT_DRV_HW_MGMT_TXQ BIT(4)
+#define MT_DRV_AMSDU_OFFLOAD BIT(5)
struct mt76_driver_ops {
u32 drv_flags;
@@ -322,7 +320,7 @@ struct mt76_driver_ops {
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
- void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ void (*tx_complete_skb)(struct mt76_dev *dev,
struct mt76_queue_entry *e);
bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
@@ -445,14 +443,24 @@ struct mt76_usb {
} mcu;
};
+#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
struct mt76_sdio {
- struct task_struct *tx_kthread;
- struct task_struct *kthread;
+ struct workqueue_struct *txrx_wq;
+ struct {
+ struct work_struct xmit_work;
+ struct work_struct status_work;
+ } tx;
+ struct {
+ struct work_struct recv_work;
+ struct work_struct net_work;
+ } rx;
+
struct work_struct stat_work;
- unsigned long state;
+ u8 *xmit_buf[MT_TXQ_MCU_WA];
struct sdio_func *func;
+ void *intr_data;
struct {
struct mutex lock;
@@ -593,12 +601,12 @@ struct mt76_dev {
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
struct list_head txwi_cache;
- struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX];
+ struct mt76_queue *q_tx[2 * __MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
- struct tasklet_struct tx_tasklet;
+ struct mt76_worker tx_worker;
struct napi_struct tx_napi;
struct delayed_work mac_work;
@@ -892,14 +900,13 @@ static inline bool mt76_testmode_enabled(struct mt76_dev *dev)
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb);
-void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
-void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool send_bar);
+void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
void mt76_txq_schedule_all(struct mt76_phy *phy);
-void mt76_tx_tasklet(unsigned long data);
+void mt76_tx_worker(struct mt76_worker *w);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
@@ -932,7 +939,7 @@ struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
struct sk_buff_head *list);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
struct sk_buff_head *list);
-void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
+void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb);
void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
bool flush);
int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -996,8 +1003,6 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
return hw;
}
-void mt76_tx_free(struct mt76_dev *dev);
-struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
struct napi_struct *napi);
@@ -1005,6 +1010,8 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_dev *dev);
+void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
@@ -1039,7 +1046,7 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
}
-int mt76_skb_adjust_pad(struct sk_buff *skb);
+int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index 7a41cdf1c4ae..d728c5e43783 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -29,7 +29,7 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
- dev->mt76.q_tx[MT_TXQ_CAB].q->hw_idx) |
+ dev->mt76.q_tx[MT_TXQ_CAB]->hw_idx) |
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
@@ -78,7 +78,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
data.dev = dev;
__skb_queue_head_init(&data.q);
- q = dev->mt76.q_tx[MT_TXQ_BEACON].q;
+ q = dev->mt76.q_tx[MT_TXQ_BEACON];
spin_lock_bh(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -95,7 +95,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
if (dev->mt76.csa_complete)
goto out;
- q = dev->mt76.q_tx[MT_TXQ_CAB].q;
+ q = dev->mt76.q_tx[MT_TXQ_CAB];
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
@@ -136,7 +136,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
- if (dev->mt76.q_tx[MT_TXQ_BEACON].q->queued >
+ if (dev->mt76.q_tx[MT_TXQ_BEACON]->queued >
hweight8(dev->mt76.beacon_mask))
dev->beacon_check++;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
index 8ce6880b2bb8..f52165dff422 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -70,7 +70,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt7603_edcca_get,
mt7603_edcca_set, "%lld\n");
static int
-mt7603_ampdu_stat_read(struct seq_file *file, void *data)
+mt7603_ampdu_stat_show(struct seq_file *file, void *data)
{
struct mt7603_dev *dev = file->private;
int bound[3], i, range;
@@ -91,18 +91,7 @@ mt7603_ampdu_stat_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt7603_ampdu_stat_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt7603_ampdu_stat_read, inode->i_private);
-}
-
-static const struct file_operations fops_ampdu_stat = {
- .open = mt7603_ampdu_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mt7603_ampdu_stat);
void mt7603_init_debugfs(struct mt7603_dev *dev)
{
@@ -112,7 +101,8 @@ void mt7603_init_debugfs(struct mt7603_dev *dev)
if (!dir)
return;
- debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev,
+ &mt7603_ampdu_stat_fops);
debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
mt76_queues_read);
debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index a08b85281170..d60d00f6f6a0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -5,8 +5,7 @@
#include "../dma.h"
static int
-mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7603_init_tx_queue(struct mt7603_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
@@ -19,8 +18,7 @@ mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
if (err < 0)
return err;
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
@@ -123,7 +121,7 @@ void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt76_rx(&dev->mt76, q, skb);
return;
}
- /* fall through */
+ fallthrough;
default:
dev_kfree_skb(skb);
break;
@@ -165,7 +163,7 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget)
mt7603_mac_sta_poll(dev);
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
return 0;
}
@@ -193,29 +191,28 @@ int mt7603_dma_init(struct mt7603_dev *dev)
mt7603_pse_client_reset(dev);
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[i],
- wmm_queue_map[i],
- MT_TX_RING_SIZE);
+ ret = mt7603_init_tx_queue(dev, i, wmm_queue_map[i],
+ MT7603_TX_RING_SIZE);
if (ret)
return ret;
}
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
- MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_PSD,
+ MT_TX_HW_QUEUE_MGMT, MT7603_PSD_RING_SIZE);
if (ret)
return ret;
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_MCU,
MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
if (ret)
return ret;
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_BEACON],
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_BEACON,
MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE);
if (ret)
return ret;
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_CAB],
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_CAB,
MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE);
if (ret)
return ret;
@@ -249,6 +246,5 @@ void mt7603_dma_cleanup(struct mt7603_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN |
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
- tasklet_kill(&dev->mt76.tx_tasklet);
mt76_dma_cleanup(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
index 3ee06e2577b8..01f1e0da5ee1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -147,8 +147,14 @@ static int mt7603_check_eeprom(struct mt76_dev *dev)
}
}
+static inline bool is_mt7688(struct mt7603_dev *dev)
+{
+ return mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4);
+}
+
int mt7603_eeprom_init(struct mt7603_dev *dev)
{
+ u8 *eeprom;
int ret;
ret = mt7603_eeprom_load(dev);
@@ -163,9 +169,16 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
MT7603_EEPROM_SIZE);
}
+ eeprom = (u8 *)dev->mt76.eeprom.data;
dev->mt76.cap.has_2ghz = true;
- memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
- ETH_ALEN);
+ memcpy(dev->mt76.macaddr, eeprom + MT_EE_MAC_ADDR, ETH_ALEN);
+
+ /* Check for 1SS devices */
+ dev->mphy.antenna_mask = 3;
+ if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, eeprom[MT_EE_NIC_CONF_0]) == 1 ||
+ FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, eeprom[MT_EE_NIC_CONF_0]) == 1 ||
+ is_mt7688(dev))
+ dev->mphy.antenna_mask = 1;
mt76_eeprom_override(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
index b893facfba48..4687d6dc00dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
@@ -85,4 +85,7 @@ enum mt7603_eeprom_source {
MT_EE_SRC_FLASH,
};
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 94196599797e..c4848fafd270 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -536,11 +536,6 @@ int mt7603_register_device(struct mt7603_dev *dev)
tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
(unsigned long)dev);
- /* Check for 7688, which only has 1SS */
- dev->mphy.antenna_mask = 3;
- if (mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4))
- dev->mphy.antenna_mask = 1;
-
dev->slottime = 9;
dev->sensitivity_limit = 28;
dev->dynamic_sensitivity = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 8060c1514396..f665a1c95eed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -445,7 +445,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
for (i = 0; i < 4; i++) {
- struct mt76_queue *q = dev->mt76.q_tx[i].q;
+ struct mt76_queue *q = dev->mt76.q_tx[i];
u8 qidx = q->hw_idx;
u8 tid = ac_to_tid[i];
u32 txtime = airtime[qidx];
@@ -592,7 +592,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
case MT_PHY_TYPE_CCK:
cck = true;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_OFDM:
i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
@@ -896,7 +896,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_vif *vif = info->control.vif;
- struct mt76_queue *q = dev->mt76.q_tx[qid].q;
+ struct mt76_queue *q = dev->mt76.q_tx[qid];
struct mt7603_vif *mvif;
int wlan_idx;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
@@ -1036,6 +1036,8 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
IEEE80211_TX_CTL_CLEAR_PS_FILT)) ||
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
mt7603_wtbl_set_ps(dev, msta, false);
+
+ mt76_tx_check_agg_ssn(sta, tx_info->skb);
}
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
@@ -1161,7 +1163,7 @@ out:
switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
case MT_PHY_TYPE_CCK:
cck = true;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_OFDM:
if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
sband = &dev->mphy.sband_5g.sband;
@@ -1269,8 +1271,7 @@ out:
rcu_read_unlock();
}
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e)
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct sk_buff *skb = e->skb;
@@ -1280,10 +1281,8 @@ void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
return;
}
- if (qid < 4)
- dev->tx_hang_check = 0;
-
- mt76_tx_complete_skb(mdev, skb);
+ dev->tx_hang_check = 0;
+ mt76_tx_complete_skb(mdev, e->wcid, skb);
}
static bool
@@ -1403,7 +1402,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
/* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mphy);
- tasklet_disable(&dev->mt76.tx_tasklet);
+ mt76_worker_disable(&dev->mt76.tx_worker);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
napi_disable(&dev->mt76.napi[0]);
napi_disable(&dev->mt76.napi[1]);
@@ -1452,7 +1451,7 @@ skip_dma_reset:
clear_bit(MT76_RESET, &dev->mphy.state);
mutex_unlock(&dev->mt76.mutex);
- tasklet_enable(&dev->mt76.tx_tasklet);
+ mt76_worker_enable(&dev->mt76.tx_worker);
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
@@ -1515,7 +1514,7 @@ static bool mt7603_tx_hang(struct mt7603_dev *dev)
int i;
for (i = 0; i < 4; i++) {
- q = dev->mt76.q_tx[i].q;
+ q = dev->mt76.q_tx[i];
if (!q->queued)
continue;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 447f2c63ef38..c9226dceb510 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -75,7 +75,6 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid;
- mt76_txq_init(&dev->mt76, vif->txq);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
@@ -99,7 +98,6 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mt7603_beacon_set_timer(dev, mvif->idx, 0);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- mt76_txq_remove(&dev->mt76, vif->txq);
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&msta->poll_list))
@@ -514,7 +512,7 @@ mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
u16 cw_max = (1 << 10) - 1;
u32 val;
- queue = dev->mt76.q_tx[queue].q->hw_idx;
+ queue = dev->mt76.q_tx[queue]->hw_idx;
if (params->cw_min)
cw_min = params->cw_min;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index c86305241e66..2a6e4332ad06 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -17,6 +17,8 @@
#define MT7603_MCU_RX_RING_SIZE 64
#define MT7603_RX_RING_SIZE 128
+#define MT7603_TX_RING_SIZE 256
+#define MT7603_PSD_RING_SIZE 128
#define MT7603_FIRMWARE_E1 "mt7603_e1.bin"
#define MT7603_FIRMWARE_E2 "mt7603_e2.bin"
@@ -241,8 +243,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e);
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
index 2f2f337e2201..a5845da3547a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
@@ -44,6 +44,8 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
ret = devm_request_irq(mdev->dev, pdev->irq, mt7603_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index de170765e938..ba927033bbe8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -35,6 +35,8 @@ mt76_wmac_probe(struct platform_device *pdev)
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
ret = devm_request_irq(mdev->dev, irq, mt7603_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index 88931658a9fb..00ba550fc48f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -165,15 +165,14 @@ mt7615_reset_test_set(void *data, u64 val)
if (!mt7615_wait_for_mcu_init(dev))
return 0;
- mt7615_mutex_acquire(dev);
-
skb = alloc_skb(1, GFP_KERNEL);
if (!skb)
return -ENOMEM;
skb_put(skb, 1);
- mt76_tx_queue_skb_raw(dev, 0, skb, 0);
+ mt7615_mutex_acquire(dev);
+ mt76_tx_queue_skb_raw(dev, 0, skb, 0);
mt7615_mutex_release(dev);
return 0;
@@ -221,7 +220,7 @@ mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy,
}
static int
-mt7615_ampdu_stat_read(struct seq_file *file, void *data)
+mt7615_ampdu_stat_show(struct seq_file *file, void *data)
{
struct mt7615_dev *dev = file->private;
@@ -235,18 +234,7 @@ mt7615_ampdu_stat_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt7615_ampdu_stat_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt7615_ampdu_stat_read, inode->i_private);
-}
-
-static const struct file_operations fops_ampdu_stat = {
- .open = mt7615_ampdu_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mt7615_ampdu_stat);
static void
mt7615_radio_read_phy(struct mt7615_phy *phy, struct seq_file *s)
@@ -340,15 +328,15 @@ mt7615_queues_read(struct seq_file *s, void *data)
int i;
for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
- struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id];
+ struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id];
- if (!q->q)
+ if (!q)
continue;
seq_printf(s,
"%s: queued=%d head=%d tail=%d\n",
- queue_map[i].queue, q->q->queued, q->q->head,
- q->q->tail);
+ queue_map[i].queue, q->queued, q->head,
+ q->tail);
}
return 0;
@@ -393,7 +381,7 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
mt76_queues_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7615_queues_acq);
- debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt7615_ampdu_stat_fops);
debugfs_create_file("scs", 0600, dir, dev, &fops_scs);
debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc);
debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 1231a5ddf9ea..bf8ae14121db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -12,8 +12,7 @@
#include "mac.h"
static int
-mt7615_init_tx_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7615_init_tx_queue(struct mt7615_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
@@ -26,8 +25,7 @@ mt7615_init_tx_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
if (err < 0)
return err;
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
return 0;
}
@@ -45,19 +43,18 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
int i;
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[i],
- wmm_queue_map[i],
+ ret = mt7615_init_tx_queue(dev, i, wmm_queue_map[i],
MT7615_TX_RING_SIZE / 2);
if (ret)
return ret;
}
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_PSD,
MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE);
if (ret)
return ret;
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU,
MT7622_TXQ_MCU, MT7615_TX_MCU_RING_SIZE);
return ret;
}
@@ -65,10 +62,9 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
static int
mt7615_init_tx_queues(struct mt7615_dev *dev)
{
- struct mt76_sw_queue *q;
int ret, i;
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_FWDL,
MT7615_TXQ_FWDL,
MT7615_TX_FWDL_RING_SIZE);
if (ret)
@@ -77,52 +73,28 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
if (!is_mt7615(&dev->mt76))
return mt7622_init_tx_queues_multi(dev);
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[0], 0,
- MT7615_TX_RING_SIZE);
+ ret = mt7615_init_tx_queue(dev, 0, 0, MT7615_TX_RING_SIZE);
if (ret)
return ret;
- for (i = 1; i < MT_TXQ_MCU; i++) {
- q = &dev->mt76.q_tx[i];
- INIT_LIST_HEAD(&q->swq);
- q->q = dev->mt76.q_tx[0].q;
- }
+ for (i = 1; i < MT_TXQ_MCU; i++)
+ dev->mt76.q_tx[i] = dev->mt76.q_tx[0];
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
- MT7615_TXQ_MCU,
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU, MT7615_TXQ_MCU,
MT7615_TX_MCU_RING_SIZE);
return 0;
}
-static void
-mt7615_tx_cleanup(struct mt7615_dev *dev)
-{
- int i;
-
- mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
- mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
- if (is_mt7615(&dev->mt76)) {
- mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
- } else {
- for (i = 0; i < IEEE80211_NUM_ACS; i++)
- mt76_queue_tx_cleanup(dev, i, false);
- }
-}
-
static int mt7615_poll_tx(struct napi_struct *napi, int budget)
{
struct mt7615_dev *dev;
dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
- mt7615_tx_cleanup(dev);
+ mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
if (napi_complete_done(napi, 0))
- mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
-
- mt7615_tx_cleanup(dev);
-
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev));
return 0;
}
@@ -306,7 +278,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN);
/* enable interrupts for TX/RX rings */
- mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev) |
MT_INT_MCU_CMD);
if (is_mt7622(&dev->mt76))
@@ -325,6 +297,5 @@ void mt7615_dma_cleanup(struct mt7615_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN);
mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
- tasklet_kill(&dev->mt76.tx_tasklet);
mt76_dma_cleanup(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index 22e4eabe6578..f4756bb946c3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -125,6 +125,9 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
case MT_EE_2GHZ:
dev->mt76.cap.has_2ghz = true;
break;
+ case MT_EE_DBDC:
+ dev->dbdc_support = true;
+ /* fall through */
default:
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 1f57b43693bc..e194259c84e9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -217,6 +217,22 @@ static const struct ieee80211_iface_limit if_limits[] = {
}
};
+static const struct ieee80211_iface_combination if_comb_radar[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 4,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160) |
+ BIT(NL80211_CHAN_WIDTH_80P80),
+ }
+};
+
static const struct ieee80211_iface_combination if_comb[] = {
{
.limits = if_limits,
@@ -306,8 +322,13 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
hw->sta_data_size = sizeof(struct mt7615_sta);
hw->vif_data_size = sizeof(struct mt7615_vif);
- wiphy->iface_combinations = if_comb;
- wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ if (is_mt7663(&phy->dev->mt76)) {
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ } else {
+ wiphy->iface_combinations = if_comb_radar;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_radar);
+ }
wiphy->reg_notifier = mt7615_regd_notifier;
wiphy->max_sched_scan_plan_interval = MT7615_MAX_SCHED_SCAN_INTERVAL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index 3dd8dd28690e..8dc645e398fd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -378,7 +378,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
case MT_PHY_TYPE_CCK:
cck = true;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_OFDM:
i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
@@ -1271,7 +1271,7 @@ out:
switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
case MT_PHY_TYPE_CCK:
cck = true;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_OFDM:
mphy = &dev->mphy;
if (sta->wcid.ext_phy && dev->mt76.phy2)
@@ -1400,6 +1400,9 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
{
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
+ __le32 *txwi_data;
+ u32 val;
+ u8 wcid;
trace_mac_tx_free(dev, token);
@@ -1410,9 +1413,13 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
if (!txwi)
return;
+ txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi);
+ val = le32_to_cpu(txwi_data[1]);
+ wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val);
+
mt7615_txp_skb_unmap(mdev, txwi);
if (txwi->skb) {
- mt76_tx_complete_skb(mdev, txwi->skb);
+ mt76_tx_complete_skb(mdev, wcid, txwi->skb);
txwi->skb = NULL;
}
@@ -1424,6 +1431,14 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
u8 i, count;
+ mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
+ if (is_mt7615(&dev->mt76)) {
+ mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
+ } else {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ mt76_queue_tx_cleanup(dev, i, false);
+ }
+
count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
if (is_mt7615(&dev->mt76)) {
__le16 *token = &free->token[0];
@@ -1439,11 +1454,15 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
dev_kfree_skb(skb);
+ if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state))
+ return;
+
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt7615_pm_power_save_sched(dev);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
}
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
@@ -1478,7 +1497,7 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt76_rx(&dev->mt76, q, skb);
return;
}
- /* fall through */
+ fallthrough;
default:
dev_kfree_skb(skb);
break;
@@ -1845,7 +1864,7 @@ void mt7615_pm_wake_work(struct work_struct *work)
pm.wake_work);
mphy = dev->phy.mt76;
- if (mt7615_driver_own(dev)) {
+ if (mt7615_mcu_set_drv_ctrl(dev)) {
dev_err(mphy->dev->dev, "failed to wake device\n");
goto out;
}
@@ -1853,12 +1872,13 @@ void mt7615_pm_wake_work(struct work_struct *work)
spin_lock_bh(&dev->pm.txq_lock);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
struct mt7615_sta *msta = dev->pm.tx_q[i].msta;
- struct mt76_wcid *wcid = msta ? &msta->wcid : NULL;
struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid;
if (!dev->pm.tx_q[i].skb)
continue;
+ wcid = msta ? &msta->wcid : &dev->mt76.global_wcid;
if (msta && wcid->sta)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
@@ -1868,7 +1888,7 @@ void mt7615_pm_wake_work(struct work_struct *work)
}
spin_unlock_bh(&dev->pm.txq_lock);
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
out:
ieee80211_wake_queues(mphy->hw);
@@ -1943,7 +1963,7 @@ void mt7615_pm_power_save_work(struct work_struct *work)
goto out;
}
- if (!mt7615_firmware_own(dev))
+ if (!mt7615_mcu_set_fw_ctrl(dev))
return;
out:
queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
@@ -2110,7 +2130,7 @@ void mt7615_mac_reset_work(struct work_struct *work)
if (ext_phy)
mt76_txq_schedule_all(ext_phy);
- tasklet_disable(&dev->mt76.tx_tasklet);
+ mt76_worker_disable(&dev->mt76.tx_worker);
napi_disable(&dev->mt76.napi[0]);
napi_disable(&dev->mt76.napi[1]);
napi_disable(&dev->mt76.tx_napi);
@@ -2131,7 +2151,7 @@ void mt7615_mac_reset_work(struct work_struct *work)
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
clear_bit(MT76_RESET, &dev->mphy.state);
- tasklet_enable(&dev->mt76.tx_tasklet);
+ mt76_worker_enable(&dev->mt76.tx_worker);
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 2d0b1f49fdbc..3186b7b2ca48 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -205,7 +205,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid;
- mt76_txq_init(&dev->mt76, vif->txq);
}
ret = mt7615_mcu_add_dev_info(dev, vif, true);
@@ -256,8 +255,6 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mt7615_mcu_add_dev_info(dev, vif, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- if (vif->txq)
- mt76_txq_remove(&dev->mt76, vif->txq);
dev->mphy.vif_mask &= ~BIT(mvif->idx);
dev->omac_mask &= ~BIT(mvif->omac_idx);
@@ -361,7 +358,10 @@ mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd,
wd->key.keylen = key->keylen;
wd->key.cmd = cmd;
+ spin_lock_bh(&dev->mt76.lock);
list_add_tail(&wd->node, &dev->wd_head);
+ spin_unlock_bh(&dev->mt76.lock);
+
queue_work(dev->mt76.wq, &dev->wtbl_work);
return 0;
@@ -703,7 +703,8 @@ mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
return;
}
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ dev->pm.last_activity = jiffies;
+ mt76_worker_schedule(&dev->mt76.tx_worker);
}
static void mt7615_tx(struct ieee80211_hw *hw,
@@ -732,6 +733,7 @@ static void mt7615_tx(struct ieee80211_hw *hw,
}
if (!test_bit(MT76_STATE_PM, &mphy->state)) {
+ dev->pm.last_activity = jiffies;
mt76_tx(mphy, control->sta, wcid, skb);
return;
}
@@ -813,7 +815,6 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_START:
ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid);
params->ssn = ssn;
- mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index bd316dbd9041..31b40fb83f6c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -324,6 +324,97 @@ int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val)
sizeof(req), false);
}
+static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
+{
+ if (!is_mt7622(&dev->mt76))
+ return;
+
+ regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
+ MT_INFRACFG_MISC_AP2CONN_WAKE,
+ !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
+}
+
+static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 addr;
+ int err;
+
+ addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
+ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
+
+ mt7622_trigger_hif_int(dev, true);
+
+ addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
+
+ mt7622_trigger_hif_int(dev, false);
+
+ if (err) {
+ dev_err(mdev->dev, "driver own failed\n");
+ return -ETIMEDOUT;
+ }
+
+ clear_bit(MT76_STATE_PM, &mphy->state);
+
+ return 0;
+}
+
+static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ int i;
+
+ if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+ goto out;
+
+ for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
+ mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
+ if (mt76_poll_msec(dev, MT_CONN_HIF_ON_LPCTL,
+ MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
+ break;
+ }
+
+ if (i == MT7615_DRV_OWN_RETRY_COUNT) {
+ dev_err(dev->mt76.dev, "driver own failed\n");
+ set_bit(MT76_STATE_PM, &mphy->state);
+ return -EIO;
+ }
+
+out:
+ dev->pm.last_activity = jiffies;
+
+ return 0;
+}
+
+static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ int err = 0;
+ u32 addr;
+
+ if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
+ return 0;
+
+ mt7622_trigger_hif_int(dev, true);
+
+ addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
+
+ if (is_mt7622(&dev->mt76) &&
+ !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
+ MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
+ dev_err(dev->mt76.dev, "Timeout for firmware own\n");
+ clear_bit(MT76_STATE_PM, &mphy->state);
+ err = -EIO;
+ }
+
+ mt7622_trigger_hif_int(dev, false);
+
+ return err;
+}
+
static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
@@ -650,12 +741,12 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
- if (offs.csa_counter_offs[0]) {
+ if (offs.cntdwn_counter_offs[0]) {
u16 csa_offs;
- csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
+ csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
req.csa_ie_pos = cpu_to_le16(csa_offs);
- req.csa_cnt = skb->data[offs.csa_counter_offs[0]];
+ req.csa_cnt = skb->data[offs.cntdwn_counter_offs[0]];
}
dev_kfree_skb(skb);
@@ -1106,7 +1197,7 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
wtbl_tlv, sta_wtbl);
ht = (struct wtbl_ht *)tlv;
- ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+ ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
ht->af = sta->ht_cap.ampdu_factor;
ht->mm = sta->ht_cap.ampdu_density;
ht->ht = 1;
@@ -1124,7 +1215,7 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
wtbl_tlv, sta_wtbl);
vht = (struct wtbl_vht *)tlv;
- vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC,
+ vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
vht->vht = 1;
af = (sta->vht_cap.cap &
@@ -1314,6 +1405,8 @@ static const struct mt7615_mcu_ops wtbl_update_ops = {
.add_tx_ba = mt7615_mcu_wtbl_tx_ba,
.add_rx_ba = mt7615_mcu_wtbl_rx_ba,
.sta_add = mt7615_mcu_wtbl_sta_add,
+ .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
+ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
};
static int
@@ -1410,6 +1503,8 @@ static const struct mt7615_mcu_ops sta_update_ops = {
.add_tx_ba = mt7615_mcu_sta_tx_ba,
.add_rx_ba = mt7615_mcu_sta_rx_ba,
.sta_add = mt7615_mcu_add_sta,
+ .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
+ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
};
static int
@@ -1713,10 +1808,10 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
- if (offs.csa_counter_offs[0]) {
+ if (offs.cntdwn_counter_offs[0]) {
u16 csa_offs;
- csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
+ csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
}
dev_kfree_skb(skb);
@@ -1823,6 +1918,8 @@ static const struct mt7615_mcu_ops uni_update_ops = {
.add_tx_ba = mt7615_mcu_uni_tx_ba,
.add_rx_ba = mt7615_mcu_uni_rx_ba,
.sta_add = mt7615_mcu_uni_add_sta,
+ .set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl,
+ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
};
static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
@@ -1895,81 +1992,6 @@ static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
&req, sizeof(req), true);
}
-static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
-{
- if (!is_mt7622(&dev->mt76))
- return;
-
- regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
- MT_INFRACFG_MISC_AP2CONN_WAKE,
- !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
-}
-
-int mt7615_driver_own(struct mt7615_dev *dev)
-{
- struct mt76_phy *mphy = &dev->mt76.phy;
- struct mt76_dev *mdev = &dev->mt76;
- int i;
-
- if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
- goto out;
-
- mt7622_trigger_hif_int(dev, true);
-
- for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
- u32 addr;
-
- addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
- mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
-
- addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
- if (mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
- break;
- }
-
- mt7622_trigger_hif_int(dev, false);
-
- if (i == MT7615_DRV_OWN_RETRY_COUNT) {
- dev_err(mdev->dev, "driver own failed\n");
- set_bit(MT76_STATE_PM, &mphy->state);
- return -EIO;
- }
-
-out:
- dev->pm.last_activity = jiffies;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mt7615_driver_own);
-
-int mt7615_firmware_own(struct mt7615_dev *dev)
-{
- struct mt76_phy *mphy = &dev->mt76.phy;
- int err = 0;
- u32 addr;
-
- if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
- return 0;
-
- mt7622_trigger_hif_int(dev, true);
-
- addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
- mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
-
- if (is_mt7622(&dev->mt76) &&
- !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
- MT_CFG_LPCR_HOST_FW_OWN, 300)) {
- dev_err(dev->mt76.dev, "Timeout for firmware own\n");
- clear_bit(MT76_STATE_PM, &mphy->state);
- err = -EIO;
- }
-
- mt7622_trigger_hif_int(dev, false);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(mt7615_firmware_own);
-
static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
{
const struct mt7615_patch_hdr *hdr;
@@ -2452,7 +2474,7 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
dev->mt76.mcu_ops = &mt7615_mcu_ops,
- ret = mt7615_driver_own(dev);
+ ret = mt7615_mcu_drv_pmctrl(dev);
if (ret)
return ret;
@@ -2482,7 +2504,7 @@ EXPORT_SYMBOL_GPL(mt7615_mcu_init);
void mt7615_mcu_exit(struct mt7615_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- mt7615_firmware_own(dev);
+ mt7615_mcu_set_fw_ctrl(dev);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
EXPORT_SYMBOL_GPL(mt7615_mcu_exit);
@@ -2847,14 +2869,6 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
.center_chan2 = ieee80211_frequency_to_channel(freq2),
};
-#ifdef CONFIG_NL80211_TESTMODE
- if (dev->mt76.test.state == MT76_TM_STATE_TX_FRAMES &&
- dev->mt76.test.tx_antenna_mask) {
- req.tx_streams = hweight8(dev->mt76.test.tx_antenna_mask);
- req.rx_streams_mask = dev->mt76.test.tx_antenna_mask;
- }
-#endif
-
if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
@@ -3279,7 +3293,7 @@ static int mt7615_dcoc_freq_idx(u16 freq, u8 bw)
freq = freq_bw40[idx];
break;
}
- /* fall through */
+ fallthrough;
case NL80211_CHAN_WIDTH_40:
idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40),
freq);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index 133f93a6ed1b..6de492a4cf02 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -101,30 +101,29 @@ static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
static void mt7615_irq_tasklet(unsigned long data)
{
struct mt7615_dev *dev = (struct mt7615_dev *)data;
- u32 intr, mask = 0;
+ u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev);
mt76_wr(dev, MT_INT_MASK_CSR, 0);
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
- intr &= dev->mt76.mmio.irqmask;
- if (intr & MT_INT_TX_DONE_ALL) {
- mask |= MT_INT_TX_DONE_ALL;
+ mask |= intr & MT_INT_RX_DONE_ALL;
+ if (intr & tx_mcu_mask)
+ mask |= tx_mcu_mask;
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+
+ if (intr & tx_mcu_mask)
napi_schedule(&dev->mt76.tx_napi);
- }
- if (intr & MT_INT_RX_DONE(0)) {
- mask |= MT_INT_RX_DONE(0);
+ if (intr & MT_INT_RX_DONE(0))
napi_schedule(&dev->mt76.napi[0]);
- }
- if (intr & MT_INT_RX_DONE(1)) {
- mask |= MT_INT_RX_DONE(1);
+ if (intr & MT_INT_RX_DONE(1))
napi_schedule(&dev->mt76.napi[1]);
- }
if (intr & MT_INT_MCU_CMD) {
u32 val = mt76_rr(dev, MT_MCU_CMD);
@@ -135,8 +134,6 @@ static void mt7615_irq_tasklet(unsigned long data)
wake_up(&dev->reset_wait);
}
}
-
- mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr)
@@ -227,6 +224,8 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
bus_ops->rmw = mt7615_rmw;
dev->mt76.bus = bus_ops;
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 571eadc033a3..6a9f9187f76a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -220,6 +220,8 @@ struct mt7615_phy {
#define mt7615_mcu_add_bss_info(phy, ...) (phy->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__)
#define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__)
#define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__)
+#define mt7615_mcu_set_drv_ctrl(dev) (dev)->mcu_ops->set_drv_ctrl((dev))
+#define mt7615_mcu_set_fw_ctrl(dev) (dev)->mcu_ops->set_fw_ctrl((dev))
struct mt7615_mcu_ops {
int (*add_tx_ba)(struct mt7615_dev *dev,
struct ieee80211_ampdu_params *params,
@@ -238,6 +240,8 @@ struct mt7615_mcu_ops {
struct ieee80211_hw *hw,
struct ieee80211_vif *vif, bool enable);
int (*set_pm_state)(struct mt7615_dev *dev, int band, int state);
+ int (*set_drv_ctrl)(struct mt7615_dev *dev);
+ int (*set_fw_ctrl)(struct mt7615_dev *dev);
};
struct mt7615_dev {
@@ -278,6 +282,7 @@ struct mt7615_dev {
bool fw_debug;
bool flash_eeprom;
+ bool dbdc_support;
spinlock_t token_lock;
struct idr token;
@@ -535,6 +540,11 @@ static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac)
return lmac_queue_map[ac];
}
+static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev)
+{
+ return MT_INT_TX_DONE(dev->mt76.q_tx[MT_TXQ_MCU]->hw_idx);
+}
+
void mt7615_dma_reset(struct mt7615_dev *dev);
void mt7615_scan_work(struct work_struct *work);
void mt7615_roc_work(struct work_struct *work);
@@ -608,8 +618,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e);
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -638,8 +647,6 @@ int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_channel *chan, int duration);
-int mt7615_firmware_own(struct mt7615_dev *dev);
-int mt7615_driver_own(struct mt7615_dev *dev);
int mt7615_init_debugfs(struct mt7615_dev *dev);
int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
@@ -666,7 +673,6 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
- enum mt76_txq_id qid,
struct mt76_queue_entry *e);
void mt7663_usb_sdio_wtbl_work(struct work_struct *work);
int mt7663_usb_sdio_register_device(struct mt7615_dev *dev);
@@ -675,9 +681,8 @@ int mt7663u_mcu_init(struct mt7615_dev *dev);
/* sdio */
u32 mt7663s_read_pcr(struct mt7615_dev *dev);
int mt7663s_mcu_init(struct mt7615_dev *dev);
-int mt7663s_driver_own(struct mt7615_dev *dev);
-int mt7663s_firmware_own(struct mt7615_dev *dev);
-int mt7663s_kthread_run(void *data);
+void mt7663s_tx_work(struct work_struct *work);
+void mt7663s_rx_work(struct work_struct *work);
void mt7663s_sdio_irq(struct sdio_func *func);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index 2328d78e06a1..dbd29d897b29 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -88,7 +88,7 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
napi_disable(&mdev->tx_napi);
- tasklet_kill(&mdev->tx_tasklet);
+ mt76_worker_disable(&mdev->tx_worker);
mt76_for_each_q_rx(mdev, i) {
napi_disable(&mdev->napi[i]);
@@ -118,7 +118,7 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (err)
goto restore;
- err = mt7615_firmware_own(dev);
+ err = mt7615_mcu_set_fw_ctrl(dev);
if (err)
goto restore;
@@ -142,7 +142,7 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
bool pdma_reset;
int i, err;
- err = mt7615_driver_own(dev);
+ err = mt7615_mcu_set_drv_ctrl(dev);
if (err < 0)
return err;
@@ -162,6 +162,7 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
if (pdma_reset)
dev_err(mdev->dev, "PDMA engine must be reinitialized\n");
+ mt76_worker_enable(&mdev->tx_worker);
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
napi_schedule(&mdev->napi[i]);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
index 7224a0078211..06a0f8f7bc89 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
@@ -25,6 +25,9 @@ static void mt7615_init_work(struct work_struct *work)
mt7615_phy_init(dev);
mt7615_mcu_del_wtbl_all(dev);
mt7615_check_offload_capability(dev);
+
+ if (dev->dbdc_support)
+ mt7615_register_ext_phy(dev);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index 2d67f9a148cd..4cf7c5d34325 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -14,8 +14,7 @@
#include "../dma.h"
#include "mac.h"
-void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e)
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
if (!e->txwi) {
dev_kfree_skb_any(e->skb);
@@ -45,7 +44,7 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
}
if (e->skb)
- mt76_tx_complete_skb(mdev, e->skb);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
}
static void
@@ -107,6 +106,7 @@ mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
/* pass partial skb header to fw */
tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->buf[1].skip_unmap = true;
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index 9137d9e6b51d..61623f480806 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -575,7 +575,7 @@ enum mt7615_reg_base {
#define MT_MCU_PTA_BASE 0x81060000
#define MT_MCU_PTA(_n) (MT_MCU_PTA_BASE + (_n))
-#define MT_ANT_SWITCH_CON(n) MT_MCU_PTA(0x0c8)
+#define MT_ANT_SWITCH_CON(_n) MT_MCU_PTA(0x0c8 + ((_n) - 1) * 4)
#define MT_ANT_SWITCH_CON_MODE(_n) (GENMASK(4, 0) << (_n * 8))
#define MT_ANT_SWITCH_CON_MODE1(_n) (GENMASK(3, 0) << (_n * 8))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index dabce51117b0..874c929d8552 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -323,7 +323,7 @@ static int mt7663s_probe(struct sdio_func *func,
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = MT_USB_TXD_SIZE,
- .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
+ .drv_flags = MT_DRV_RX_DMA_HDR,
.tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
@@ -346,7 +346,7 @@ static int mt7663s_probe(struct sdio_func *func,
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
- int ret;
+ int i, ret;
ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops),
GFP_KERNEL);
@@ -364,23 +364,39 @@ static int mt7663s_probe(struct sdio_func *func,
dev->ops = ops;
sdio_set_drvdata(func, dev);
- mdev->sdio.tx_kthread = kthread_create(mt7663s_kthread_run, dev,
- "mt7663s_tx");
- if (IS_ERR(mdev->sdio.tx_kthread))
- return PTR_ERR(mdev->sdio.tx_kthread);
-
ret = mt76s_init(mdev, func, &mt7663s_ops);
if (ret < 0)
goto err_free;
+ INIT_WORK(&mdev->sdio.tx.xmit_work, mt7663s_tx_work);
+ INIT_WORK(&mdev->sdio.rx.recv_work, mt7663s_rx_work);
+
ret = mt7663s_hw_init(dev, func);
if (ret)
- goto err_free;
+ goto err_deinit;
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mdev->sdio.intr_data = devm_kmalloc(mdev->dev,
+ sizeof(struct mt76s_intr),
+ GFP_KERNEL);
+ if (!mdev->sdio.intr_data) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
+ mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
+ MT76S_XMIT_BUF_SZ,
+ GFP_KERNEL);
+ if (!mdev->sdio.xmit_buf[i]) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+ }
+
ret = mt76s_alloc_queues(&dev->mt76);
if (ret)
goto err_deinit;
@@ -426,9 +442,11 @@ static int mt7663s_suspend(struct device *dev)
return err;
}
+ sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+
mt76s_stop_txrx(&mdev->mt76);
- return mt7663s_firmware_own(mdev);
+ return mt7615_mcu_set_fw_ctrl(mdev);
}
static int mt7663s_resume(struct device *dev)
@@ -437,7 +455,7 @@ static int mt7663s_resume(struct device *dev)
struct mt7615_dev *mdev = sdio_get_drvdata(func);
int err;
- err = mt7663s_driver_own(mdev);
+ err = mt7615_mcu_set_drv_ctrl(mdev);
if (err)
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
index 28b86bec7fc2..38670c00380c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
@@ -53,7 +53,7 @@ mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
goto out;
- mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU].q);
+ mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU]);
if (wait_resp)
ret = mt7615_mcu_wait_response(dev, cmd, seq);
@@ -63,7 +63,7 @@ out:
return ret;
}
-int mt7663s_driver_own(struct mt7615_dev *dev)
+static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -75,7 +75,7 @@ int mt7663s_driver_own(struct mt7615_dev *dev)
sdio_claim_host(func);
- sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, 0);
+ sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL);
ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
@@ -95,7 +95,7 @@ out:
return 0;
}
-int mt7663s_firmware_own(struct mt7615_dev *dev)
+static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -107,7 +107,7 @@ int mt7663s_firmware_own(struct mt7615_dev *dev)
sdio_claim_host(func);
- sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, 0);
+ sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL);
ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
!(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
@@ -132,9 +132,10 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
.mcu_rr = mt7615_mcu_reg_rr,
.mcu_wr = mt7615_mcu_reg_wr,
};
+ struct mt7615_mcu_ops *mcu_ops;
int ret;
- ret = mt7663s_driver_own(dev);
+ ret = mt7663s_mcu_drv_pmctrl(dev);
if (ret)
return ret;
@@ -152,6 +153,15 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
if (ret)
return ret;
+ mcu_ops = devm_kmemdup(dev->mt76.dev, dev->mcu_ops, sizeof(*mcu_ops),
+ GFP_KERNEL);
+ if (!mcu_ops)
+ return -ENOMEM;
+
+ mcu_ops->set_drv_ctrl = mt7663s_mcu_drv_pmctrl;
+ mcu_ops->set_fw_ctrl = mt7663s_mcu_fw_pmctrl;
+ dev->mcu_ops = mcu_ops;
+
ret = mt7663s_mcu_init_sched(dev);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
index 443a4ecdad3a..2486cda3243b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
@@ -19,21 +19,40 @@
#include "sdio.h"
#include "mac.h"
-static void mt7663s_refill_sched_quota(struct mt7615_dev *dev, u32 *data)
+static int mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
{
- struct mt76_sdio *sdio = &dev->mt76.sdio;
+ u32 ple_ac_data_quota[] = {
+ FIELD_GET(TXQ_CNT_L, data[4]), /* VO */
+ FIELD_GET(TXQ_CNT_H, data[3]), /* VI */
+ FIELD_GET(TXQ_CNT_L, data[3]), /* BE */
+ FIELD_GET(TXQ_CNT_H, data[2]), /* BK */
+ };
+ u32 pse_ac_data_quota[] = {
+ FIELD_GET(TXQ_CNT_H, data[1]), /* VO */
+ FIELD_GET(TXQ_CNT_L, data[1]), /* VI */
+ FIELD_GET(TXQ_CNT_H, data[0]), /* BE */
+ FIELD_GET(TXQ_CNT_L, data[0]), /* BK */
+ };
+ u32 pse_mcu_quota = FIELD_GET(TXQ_CNT_L, data[2]);
+ u32 pse_data_quota = 0, ple_data_quota = 0;
+ struct mt76_sdio *sdio = &dev->sdio;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pse_ac_data_quota); i++) {
+ pse_data_quota += pse_ac_data_quota[i];
+ ple_data_quota += ple_ac_data_quota[i];
+ }
+
+ if (!pse_data_quota && !ple_data_quota && !pse_mcu_quota)
+ return 0;
mutex_lock(&sdio->sched.lock);
- sdio->sched.pse_data_quota += FIELD_GET(TXQ_CNT_L, data[0]) + /* BK */
- FIELD_GET(TXQ_CNT_H, data[0]) + /* BE */
- FIELD_GET(TXQ_CNT_L, data[1]) + /* VI */
- FIELD_GET(TXQ_CNT_H, data[1]); /* VO */
- sdio->sched.ple_data_quota += FIELD_GET(TXQ_CNT_H, data[2]) + /* BK */
- FIELD_GET(TXQ_CNT_L, data[3]) + /* BE */
- FIELD_GET(TXQ_CNT_H, data[3]) + /* VI */
- FIELD_GET(TXQ_CNT_L, data[4]); /* VO */
- sdio->sched.pse_mcu_quota += FIELD_GET(TXQ_CNT_L, data[2]);
+ sdio->sched.pse_mcu_quota += pse_mcu_quota;
+ sdio->sched.pse_data_quota += pse_data_quota;
+ sdio->sched.ple_data_quota += ple_data_quota;
mutex_unlock(&sdio->sched.lock);
+
+ return pse_data_quota + ple_data_quota + pse_mcu_quota;
}
static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len,
@@ -61,11 +80,11 @@ static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len,
return skb;
}
-static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
+static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
struct mt76s_intr *intr)
{
- struct mt76_queue *q = &dev->mt76.q_rx[qid];
- struct mt76_sdio *sdio = &dev->mt76.sdio;
+ struct mt76_queue *q = &dev->q_rx[qid];
+ struct mt76_sdio *sdio = &dev->sdio;
int len = 0, err, i, order;
struct page *page;
u8 *buf;
@@ -86,15 +105,18 @@ static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
buf = page_address(page);
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
+ sdio_release_host(sdio->func);
+
if (err < 0) {
- dev_err(dev->mt76.dev, "sdio read data failed:%d\n", err);
+ dev_err(dev->dev, "sdio read data failed:%d\n", err);
__free_pages(page, order);
return err;
}
for (i = 0; i < intr->rx.num[qid]; i++) {
- int index = (q->tail + i) % q->ndesc;
+ int index = (q->head + i) % q->ndesc;
struct mt76_queue_entry *e = &q->entry[index];
len = intr->rx.len[qid][i];
@@ -109,160 +131,198 @@ static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
__free_pages(page, order);
spin_lock_bh(&q->lock);
- q->tail = (q->tail + i) % q->ndesc;
+ q->head = (q->head + i) % q->ndesc;
q->queued += i;
spin_unlock_bh(&q->lock);
- return err;
+ return i;
}
-static int mt7663s_tx_update_sched(struct mt7615_dev *dev,
- struct mt76_queue_entry *e,
- bool mcu)
+static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid,
+ int buf_sz, int *pse_size, int *ple_size)
{
- struct mt76_sdio *sdio = &dev->mt76.sdio;
- struct mt76_phy *mphy = &dev->mt76.phy;
- struct ieee80211_hdr *hdr;
- int size, ret = -EBUSY;
-
- size = DIV_ROUND_UP(e->buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ);
+ int pse_sz;
- if (mcu) {
- if (!test_bit(MT76_STATE_MCU_RUNNING, &mphy->state))
- return 0;
+ pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ);
- mutex_lock(&sdio->sched.lock);
- if (sdio->sched.pse_mcu_quota > size) {
- sdio->sched.pse_mcu_quota -= size;
- ret = 0;
- }
- mutex_unlock(&sdio->sched.lock);
+ if (qid == MT_TXQ_MCU) {
+ if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz)
+ return -EBUSY;
+ } else {
+ if (sdio->sched.pse_data_quota < *pse_size + pse_sz ||
+ sdio->sched.ple_data_quota < *ple_size)
+ return -EBUSY;
- return ret;
+ *ple_size = *ple_size + 1;
}
+ *pse_size = *pse_size + pse_sz;
- hdr = (struct ieee80211_hdr *)(e->skb->data + MT_USB_TXD_SIZE);
- if (ieee80211_is_ctl(hdr->frame_control))
- return 0;
+ return 0;
+}
+static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid,
+ int pse_size, int ple_size)
+{
mutex_lock(&sdio->sched.lock);
- if (sdio->sched.pse_data_quota > size &&
- sdio->sched.ple_data_quota > 0) {
- sdio->sched.pse_data_quota -= size;
- sdio->sched.ple_data_quota--;
- ret = 0;
+ if (qid == MT_TXQ_MCU) {
+ sdio->sched.pse_mcu_quota -= pse_size;
+ } else {
+ sdio->sched.pse_data_quota -= pse_size;
+ sdio->sched.ple_data_quota -= ple_size;
}
mutex_unlock(&sdio->sched.lock);
+}
+
+static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+ int err;
+
+ if (len > sdio->func->cur_blksize)
+ len = roundup(len, sdio->func->cur_blksize);
+
+ sdio_claim_host(sdio->func);
+ err = sdio_writesb(sdio->func, MCR_WTDR1, data, len);
+ sdio_release_host(sdio->func);
+
+ if (err)
+ dev_err(dev->dev, "sdio write failed: %d\n", err);
- return ret;
+ return err;
}
-static int mt7663s_tx_run_queue(struct mt7615_dev *dev, struct mt76_queue *q)
+static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
{
- bool mcu = q == dev->mt76.q_tx[MT_TXQ_MCU].q;
- struct mt76_sdio *sdio = &dev->mt76.sdio;
- int nframes = 0;
+ int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
+ struct mt76_queue *q = dev->q_tx[qid];
+ struct mt76_sdio *sdio = &dev->sdio;
- while (q->first != q->tail) {
+ while (q->first != q->head) {
struct mt76_queue_entry *e = &q->entry[q->first];
- int err, len = e->skb->len;
+ struct sk_buff *iter;
- if (mt7663s_tx_update_sched(dev, e, mcu))
+ if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
+ __skb_put_zero(e->skb, 4);
+ err = __mt7663s_xmit_queue(dev, e->skb->data,
+ e->skb->len);
+ if (err)
+ return err;
+
+ goto next;
+ }
+
+ if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
break;
- if (len > sdio->func->cur_blksize)
- len = roundup(len, sdio->func->cur_blksize);
+ if (mt7663s_tx_pick_quota(sdio, qid, e->buf_sz, &pse_sz,
+ &ple_sz))
+ break;
- /* TODO: skb_walk_frags and then write to SDIO port */
- err = sdio_writesb(sdio->func, MCR_WTDR1, e->skb->data, len);
- if (err) {
- dev_err(dev->mt76.dev, "sdio write failed: %d\n", err);
- return -EIO;
- }
+ memcpy(sdio->xmit_buf[qid] + len, e->skb->data,
+ skb_headlen(e->skb));
+ len += skb_headlen(e->skb);
+ nframes++;
- e->done = true;
+ skb_walk_frags(e->skb, iter) {
+ memcpy(sdio->xmit_buf[qid] + len, iter->data,
+ iter->len);
+ len += iter->len;
+ nframes++;
+ }
+next:
q->first = (q->first + 1) % q->ndesc;
- nframes++;
+ e->done = true;
+ }
+
+ if (nframes) {
+ memset(sdio->xmit_buf[qid] + len, 0, 4);
+ err = __mt7663s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4);
+ if (err)
+ return err;
}
+ mt7663s_tx_update_quota(sdio, qid, pse_sz, ple_sz);
return nframes;
}
-static int mt7663s_tx_run_queues(struct mt7615_dev *dev)
+void mt7663s_tx_work(struct work_struct *work)
{
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ tx.xmit_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i, nframes = 0;
for (i = 0; i < MT_TXQ_MCU_WA; i++) {
int ret;
- ret = mt7663s_tx_run_queue(dev, dev->mt76.q_tx[i].q);
+ ret = mt7663s_tx_run_queue(dev, i);
if (ret < 0)
- return ret;
+ break;
nframes += ret;
}
+ if (nframes)
+ queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
- return nframes;
+ queue_work(sdio->txrx_wq, &sdio->tx.status_work);
}
-int mt7663s_kthread_run(void *data)
+void mt7663s_rx_work(struct work_struct *work)
{
- struct mt7615_dev *dev = data;
- struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ rx.recv_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ struct mt76s_intr *intr = sdio->intr_data;
+ int nframes = 0, ret;
- while (!kthread_should_stop()) {
- int ret;
+ /* disable interrupt */
+ sdio_claim_host(sdio->func);
+ sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
+ ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr));
+ sdio_release_host(sdio->func);
- cond_resched();
+ if (ret < 0)
+ goto out;
- sdio_claim_host(dev->mt76.sdio.func);
- ret = mt7663s_tx_run_queues(dev);
- sdio_release_host(dev->mt76.sdio.func);
+ trace_dev_irq(dev, intr->isr, 0);
- if (ret <= 0 || !test_bit(MT76_STATE_RUNNING, &mphy->state)) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- } else {
- wake_up_process(dev->mt76.sdio.kthread);
+ if (intr->isr & WHIER_RX0_DONE_INT_EN) {
+ ret = mt7663s_rx_run_queue(dev, 0, intr);
+ if (ret > 0) {
+ queue_work(sdio->txrx_wq, &sdio->rx.net_work);
+ nframes += ret;
}
}
- return 0;
+ if (intr->isr & WHIER_RX1_DONE_INT_EN) {
+ ret = mt7663s_rx_run_queue(dev, 1, intr);
+ if (ret > 0) {
+ queue_work(sdio->txrx_wq, &sdio->rx.net_work);
+ nframes += ret;
+ }
+ }
+
+ if (mt7663s_refill_sched_quota(dev, intr->tx.wtqcr))
+ queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
+
+ if (nframes) {
+ queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
+ return;
+ }
+out:
+ /* enable interrupt */
+ sdio_claim_host(sdio->func);
+ sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
+ sdio_release_host(sdio->func);
}
void mt7663s_sdio_irq(struct sdio_func *func)
{
struct mt7615_dev *dev = sdio_get_drvdata(func);
struct mt76_sdio *sdio = &dev->mt76.sdio;
- struct mt76s_intr intr;
-
- /* disable interrupt */
- sdio_writel(func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, 0);
-
- do {
- sdio_readsb(func, &intr, MCR_WHISR, sizeof(struct mt76s_intr));
- trace_dev_irq(&dev->mt76, intr.isr, 0);
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
- goto out;
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
+ return;
- if (intr.isr & WHIER_RX0_DONE_INT_EN) {
- mt7663s_rx_run_queue(dev, 0, &intr);
- wake_up_process(sdio->kthread);
- }
-
- if (intr.isr & WHIER_RX1_DONE_INT_EN) {
- mt7663s_rx_run_queue(dev, 1, &intr);
- wake_up_process(sdio->kthread);
- }
-
- if (intr.isr & WHIER_TX_DONE_INT_EN) {
- mt7663s_refill_sched_quota(dev, intr.tx.wtqcr);
- mt7663s_tx_run_queues(dev);
- wake_up_process(sdio->kthread);
- }
- } while (intr.isr);
-out:
- /* enable interrupt */
- sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, 0);
+ queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
index 1730751133aa..e4dc62314bae 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
@@ -70,7 +70,7 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
if (dev->mt76.test.state != MT76_TM_STATE_OFF)
tx_power = dev->mt76.test.tx_power;
- len = sizeof(req_hdr) + MT7615_EE_MAX - MT_EE_NIC_CONF_0;
+ len = MT7615_EE_MAX - MT_EE_NIC_CONF_0;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len);
if (!skb)
return -ENOMEM;
@@ -80,13 +80,12 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
for (i = 0; i < target_chains; i++) {
- int index;
-
ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i);
- if (ret < 0)
+ if (ret < 0) {
+ dev_kfree_skb(skb);
return -EINVAL;
+ }
- index = ret - MT_EE_NIC_CONF_0;
if (tx_power && tx_power[i])
data[ret - MT_EE_NIC_CONF_0] = tx_power[i];
}
@@ -191,7 +190,7 @@ mt7615_tm_set_tx_antenna(struct mt7615_dev *dev, bool en)
for (i = 0; i < 4; i++) {
mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i),
MT_WF_PHY_RFINTF3_0_ANT,
- td->tx_antenna_mask & BIT(i) ? 0 : 0xa);
+ (td->tx_antenna_mask & BIT(i)) ? 0 : 0xa);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 23a21338c46e..f0ad83af9e00 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -180,9 +180,7 @@ static int mt7663u_suspend(struct usb_interface *intf, pm_message_t state)
}
mt76u_stop_rx(&dev->mt76);
-
mt76u_stop_tx(&dev->mt76);
- tasklet_kill(&dev->mt76.tx_tasklet);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index 0b33df3e3bfe..4d8be366af31 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -18,7 +18,7 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- int ret, seq, ep;
+ int ret, seq, ep, len, pad;
mutex_lock(&mdev->mcu.mutex);
@@ -28,8 +28,10 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
else
ep = MT_EP_OUT_AC_BE;
- put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
- ret = mt76_skb_adjust_pad(skb);
+ len = skb->len;
+ put_unaligned_le32(len, skb_push(skb, sizeof(len)));
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ ret = mt76_skb_adjust_pad(skb, pad);
if (ret < 0)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index 6dffdaaa9ad5..3b29a6d3dc64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -226,7 +226,6 @@ bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_status_data);
void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
- enum mt76_txq_id qid,
struct mt76_queue_entry *e)
{
unsigned int headroom = MT_USB_TXD_SIZE;
@@ -235,7 +234,7 @@ void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
headroom += MT_USB_HDR_SIZE;
skb_pull(e->skb, headroom);
- mt76_tx_complete_skb(mdev, e->skb);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_complete_skb);
@@ -248,6 +247,7 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct sk_buff *skb = tx_info->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int pad;
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
!msta->rate_probe) {
@@ -259,10 +259,16 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
}
mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
- if (mt76_is_usb(mdev))
- put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
+ if (mt76_is_usb(mdev)) {
+ u32 len = skb->len;
+
+ put_unaligned_le32(len, skb_push(skb, sizeof(len)));
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ } else {
+ pad = round_up(skb->len, 4) - skb->len;
+ }
- return mt76_skb_adjust_pad(skb);
+ return mt76_skb_adjust_pad(skb, pad);
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb);
@@ -359,14 +365,15 @@ int mt7663_usb_sdio_register_device(struct mt7615_dev *dev)
if (err)
return err;
- /* check hw sg support in order to enable AMSDU */
- if (dev->mt76.usb.sg_en || mt76_is_sdio(&dev->mt76))
- hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
- else
- hw->max_tx_fragments = 1;
hw->extra_tx_headroom += MT_USB_TXD_SIZE;
- if (mt76_is_usb(&dev->mt76))
+ if (mt76_is_usb(&dev->mt76)) {
hw->extra_tx_headroom += MT_USB_HDR_SIZE;
+ /* check hw sg support in order to enable AMSDU */
+ if (dev->mt76.usb.sg_en)
+ hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
+ else
+ hw->max_tx_fragments = 1;
+ }
err = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index dc8bf4c6969a..d78866bf41ba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -10,6 +10,7 @@
#include "eeprom.h"
#include "mcu.h"
#include "initvals.h"
+#include "initvals_init.h"
#include "../mt76x02_phy.h"
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
index 3dcd9620a126..99808ed0c6cb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -11,139 +11,6 @@
#include "phy.h"
-static const struct mt76_reg_pair common_mac_reg_table[] = {
- { MT_BCN_OFFSET(0), 0xf8f0e8e0 },
- { MT_BCN_OFFSET(1), 0x6f77d0c8 },
- { MT_LEGACY_BASIC_RATE, 0x0000013f },
- { MT_HT_BASIC_RATE, 0x00008003 },
- { MT_MAC_SYS_CTRL, 0x00000000 },
- { MT_RX_FILTR_CFG, 0x00017f97 },
- { MT_BKOFF_SLOT_CFG, 0x00000209 },
- { MT_TX_SW_CFG0, 0x00000000 },
- { MT_TX_SW_CFG1, 0x00080606 },
- { MT_TX_LINK_CFG, 0x00001020 },
- { MT_TX_TIMEOUT_CFG, 0x000a2090 },
- { MT_MAX_LEN_CFG, 0xa0fff | 0x00001000 },
- { MT_LED_CFG, 0x7f031e46 },
- { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f },
- { MT_PBF_RX_MAX_PCNT, 0x0000fe9f },
- { MT_TX_RETRY_CFG, 0x47d01f0f },
- { MT_AUTO_RSP_CFG, 0x00000013 },
- { MT_CCK_PROT_CFG, 0x07f40003 },
- { MT_OFDM_PROT_CFG, 0x07f42004 },
- { MT_PBF_CFG, 0x00f40006 },
- { MT_WPDMA_GLO_CFG, 0x00000030 },
- { MT_GF20_PROT_CFG, 0x01742004 },
- { MT_GF40_PROT_CFG, 0x03f42084 },
- { MT_MM20_PROT_CFG, 0x01742004 },
- { MT_MM40_PROT_CFG, 0x03f42084 },
- { MT_TXOP_CTRL_CFG, 0x0000583f },
- { MT_TX_RTS_CFG, 0x00ffff20 },
- { MT_EXP_ACK_TIME, 0x002400ca },
- { MT_TXOP_HLDR_ET, 0x00000002 },
- { MT_XIFS_TIME_CFG, 0x33a41010 },
- { MT_PWR_PIN_CFG, 0x00000000 },
-};
-
-static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
- { MT_IOCFG_6, 0xa0040080 },
- { MT_PBF_SYS_CTRL, 0x00080c00 },
- { MT_PBF_CFG, 0x77723c1f },
- { MT_FCE_PSE_CTRL, 0x00000001 },
- { MT_AMPDU_MAX_LEN_20M1S, 0xAAA99887 },
- { MT_TX_SW_CFG0, 0x00000601 },
- { MT_TX_SW_CFG1, 0x00040000 },
- { MT_TX_SW_CFG2, 0x00000000 },
- { 0xa44, 0x00000000 },
- { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
- { MT_TSO_CTRL, 0x00000000 },
- { MT_BB_PA_MODE_CFG1, 0x00500055 },
- { MT_RF_PA_MODE_CFG1, 0x00500055 },
- { MT_TX_ALC_CFG_0, 0x2F2F000C },
- { MT_TX0_BB_GAIN_ATTEN, 0x00000000 },
- { MT_TX_PWR_CFG_0, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_1, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_2, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_3, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_4, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_7, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_8, 0x0000003A },
- { MT_TX_PWR_CFG_9, 0x0000003A },
- { 0x150C, 0x00000002 },
- { 0x1238, 0x001700C8 },
- { MT_LDO_CTRL_0, 0x00A647B6 },
- { MT_LDO_CTRL_1, 0x6B006464 },
- { MT_HT_BASIC_RATE, 0x00004003 },
- { MT_HT_CTRL_CFG, 0x000001FF },
- { MT_TXOP_HLDR_ET, 0x00000000 },
- { MT_PN_PAD_MODE, 0x00000003 },
- { MT_TX_PROT_CFG6, 0xe3f42004 },
- { MT_TX_PROT_CFG7, 0xe3f42084 },
- { MT_TX_PROT_CFG8, 0xe3f42104 },
- { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
-};
-
-static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
- { MT_BBP(CORE, 1), 0x00000002 },
- { MT_BBP(CORE, 4), 0x00000000 },
- { MT_BBP(CORE, 24), 0x00000000 },
- { MT_BBP(CORE, 32), 0x4003000a },
- { MT_BBP(CORE, 42), 0x00000000 },
- { MT_BBP(CORE, 44), 0x00000000 },
- { MT_BBP(IBI, 11), 0x0FDE8081 },
- { MT_BBP(AGC, 0), 0x00021400 },
- { MT_BBP(AGC, 1), 0x00000003 },
- { MT_BBP(AGC, 2), 0x003A6464 },
- { MT_BBP(AGC, 15), 0x88A28CB8 },
- { MT_BBP(AGC, 22), 0x00001E21 },
- { MT_BBP(AGC, 23), 0x0000272C },
- { MT_BBP(AGC, 24), 0x00002F3A },
- { MT_BBP(AGC, 25), 0x8000005A },
- { MT_BBP(AGC, 26), 0x007C2005 },
- { MT_BBP(AGC, 33), 0x00003238 },
- { MT_BBP(AGC, 34), 0x000A0C0C },
- { MT_BBP(AGC, 37), 0x2121262C },
- { MT_BBP(AGC, 41), 0x38383E45 },
- { MT_BBP(AGC, 57), 0x00001010 },
- { MT_BBP(AGC, 59), 0xBAA20E96 },
- { MT_BBP(AGC, 63), 0x00000001 },
- { MT_BBP(TXC, 0), 0x00280403 },
- { MT_BBP(TXC, 1), 0x00000000 },
- { MT_BBP(RXC, 1), 0x00000012 },
- { MT_BBP(RXC, 2), 0x00000011 },
- { MT_BBP(RXC, 3), 0x00000005 },
- { MT_BBP(RXC, 4), 0x00000000 },
- { MT_BBP(RXC, 5), 0xF977C4EC },
- { MT_BBP(RXC, 7), 0x00000090 },
- { MT_BBP(TXO, 8), 0x00000000 },
- { MT_BBP(TXBE, 0), 0x00000000 },
- { MT_BBP(TXBE, 4), 0x00000004 },
- { MT_BBP(TXBE, 6), 0x00000000 },
- { MT_BBP(TXBE, 8), 0x00000014 },
- { MT_BBP(TXBE, 9), 0x20000000 },
- { MT_BBP(TXBE, 10), 0x00000000 },
- { MT_BBP(TXBE, 12), 0x00000000 },
- { MT_BBP(TXBE, 13), 0x00000000 },
- { MT_BBP(TXBE, 14), 0x00000000 },
- { MT_BBP(TXBE, 15), 0x00000000 },
- { MT_BBP(TXBE, 16), 0x00000000 },
- { MT_BBP(TXBE, 17), 0x00000000 },
- { MT_BBP(RXFE, 1), 0x00008800 },
- { MT_BBP(RXFE, 3), 0x00000000 },
- { MT_BBP(RXFE, 4), 0x00000000 },
- { MT_BBP(RXO, 13), 0x00000192 },
- { MT_BBP(RXO, 14), 0x00060612 },
- { MT_BBP(RXO, 15), 0xC8321B18 },
- { MT_BBP(RXO, 16), 0x0000001E },
- { MT_BBP(RXO, 17), 0x00000000 },
- { MT_BBP(RXO, 18), 0xCC00A993 },
- { MT_BBP(RXO, 19), 0xB9CB9CB9 },
- { MT_BBP(RXO, 20), 0x26c00057 },
- { MT_BBP(RXO, 21), 0x00000001 },
- { MT_BBP(RXO, 24), 0x00000006 },
- { MT_BBP(RXO, 28), 0x0000003F },
-};
-
static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 4), 0x1FEDA049 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 4), 0x1FECA054 } },
@@ -215,16 +82,4 @@ static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(RXFE, 0), 0x895000E0 } },
};
-static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
- { MT_BBP(CAL, 47), 0x000010F0 },
- { MT_BBP(CAL, 48), 0x00008080 },
- { MT_BBP(CAL, 49), 0x00000F07 },
- { MT_BBP(CAL, 50), 0x00000040 },
- { MT_BBP(CAL, 51), 0x00000404 },
- { MT_BBP(CAL, 52), 0x00080803 },
- { MT_BBP(CAL, 53), 0x00000704 },
- { MT_BBP(CAL, 54), 0x00002828 },
- { MT_BBP(CAL, 55), 0x00005050 },
-};
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h
new file mode 100644
index 000000000000..9e99ba75f490
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76X0U_INITVALS_INIT_H
+#define __MT76X0U_INITVALS_INIT_H
+
+#include "phy.h"
+
+static const struct mt76_reg_pair common_mac_reg_table[] = {
+ { MT_BCN_OFFSET(0), 0xf8f0e8e0 },
+ { MT_BCN_OFFSET(1), 0x6f77d0c8 },
+ { MT_LEGACY_BASIC_RATE, 0x0000013f },
+ { MT_HT_BASIC_RATE, 0x00008003 },
+ { MT_MAC_SYS_CTRL, 0x00000000 },
+ { MT_RX_FILTR_CFG, 0x00017f97 },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TX_SW_CFG0, 0x00000000 },
+ { MT_TX_SW_CFG1, 0x00080606 },
+ { MT_TX_LINK_CFG, 0x00001020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2090 },
+ { MT_MAX_LEN_CFG, 0xa0fff | 0x00001000 },
+ { MT_LED_CFG, 0x7f031e46 },
+ { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f },
+ { MT_PBF_RX_MAX_PCNT, 0x0000fe9f },
+ { MT_TX_RETRY_CFG, 0x47d01f0f },
+ { MT_AUTO_RSP_CFG, 0x00000013 },
+ { MT_CCK_PROT_CFG, 0x07f40003 },
+ { MT_OFDM_PROT_CFG, 0x07f42004 },
+ { MT_PBF_CFG, 0x00f40006 },
+ { MT_WPDMA_GLO_CFG, 0x00000030 },
+ { MT_GF20_PROT_CFG, 0x01742004 },
+ { MT_GF40_PROT_CFG, 0x03f42084 },
+ { MT_MM20_PROT_CFG, 0x01742004 },
+ { MT_MM40_PROT_CFG, 0x03f42084 },
+ { MT_TXOP_CTRL_CFG, 0x0000583f },
+ { MT_TX_RTS_CFG, 0x00ffff20 },
+ { MT_EXP_ACK_TIME, 0x002400ca },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { MT_XIFS_TIME_CFG, 0x33a41010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+};
+
+static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
+ { MT_IOCFG_6, 0xa0040080 },
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x77723c1f },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_AMPDU_MAX_LEN_20M1S, 0xAAA99887 },
+ { MT_TX_SW_CFG0, 0x00000601 },
+ { MT_TX_SW_CFG1, 0x00040000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { 0xa44, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_TSO_CTRL, 0x00000000 },
+ { MT_BB_PA_MODE_CFG1, 0x00500055 },
+ { MT_RF_PA_MODE_CFG1, 0x00500055 },
+ { MT_TX_ALC_CFG_0, 0x2F2F000C },
+ { MT_TX0_BB_GAIN_ATTEN, 0x00000000 },
+ { MT_TX_PWR_CFG_0, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_1, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_2, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_3, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_4, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_7, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_8, 0x0000003A },
+ { MT_TX_PWR_CFG_9, 0x0000003A },
+ { 0x150C, 0x00000002 },
+ { 0x1238, 0x001700C8 },
+ { MT_LDO_CTRL_0, 0x00A647B6 },
+ { MT_LDO_CTRL_1, 0x6B006464 },
+ { MT_HT_BASIC_RATE, 0x00004003 },
+ { MT_HT_CTRL_CFG, 0x000001FF },
+ { MT_TXOP_HLDR_ET, 0x00000000 },
+ { MT_PN_PAD_MODE, 0x00000003 },
+ { MT_TX_PROT_CFG6, 0xe3f42004 },
+ { MT_TX_PROT_CFG7, 0xe3f42084 },
+ { MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
+};
+
+static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
+ { MT_BBP(CORE, 1), 0x00000002 },
+ { MT_BBP(CORE, 4), 0x00000000 },
+ { MT_BBP(CORE, 24), 0x00000000 },
+ { MT_BBP(CORE, 32), 0x4003000a },
+ { MT_BBP(CORE, 42), 0x00000000 },
+ { MT_BBP(CORE, 44), 0x00000000 },
+ { MT_BBP(IBI, 11), 0x0FDE8081 },
+ { MT_BBP(AGC, 0), 0x00021400 },
+ { MT_BBP(AGC, 1), 0x00000003 },
+ { MT_BBP(AGC, 2), 0x003A6464 },
+ { MT_BBP(AGC, 15), 0x88A28CB8 },
+ { MT_BBP(AGC, 22), 0x00001E21 },
+ { MT_BBP(AGC, 23), 0x0000272C },
+ { MT_BBP(AGC, 24), 0x00002F3A },
+ { MT_BBP(AGC, 25), 0x8000005A },
+ { MT_BBP(AGC, 26), 0x007C2005 },
+ { MT_BBP(AGC, 33), 0x00003238 },
+ { MT_BBP(AGC, 34), 0x000A0C0C },
+ { MT_BBP(AGC, 37), 0x2121262C },
+ { MT_BBP(AGC, 41), 0x38383E45 },
+ { MT_BBP(AGC, 57), 0x00001010 },
+ { MT_BBP(AGC, 59), 0xBAA20E96 },
+ { MT_BBP(AGC, 63), 0x00000001 },
+ { MT_BBP(TXC, 0), 0x00280403 },
+ { MT_BBP(TXC, 1), 0x00000000 },
+ { MT_BBP(RXC, 1), 0x00000012 },
+ { MT_BBP(RXC, 2), 0x00000011 },
+ { MT_BBP(RXC, 3), 0x00000005 },
+ { MT_BBP(RXC, 4), 0x00000000 },
+ { MT_BBP(RXC, 5), 0xF977C4EC },
+ { MT_BBP(RXC, 7), 0x00000090 },
+ { MT_BBP(TXO, 8), 0x00000000 },
+ { MT_BBP(TXBE, 0), 0x00000000 },
+ { MT_BBP(TXBE, 4), 0x00000004 },
+ { MT_BBP(TXBE, 6), 0x00000000 },
+ { MT_BBP(TXBE, 8), 0x00000014 },
+ { MT_BBP(TXBE, 9), 0x20000000 },
+ { MT_BBP(TXBE, 10), 0x00000000 },
+ { MT_BBP(TXBE, 12), 0x00000000 },
+ { MT_BBP(TXBE, 13), 0x00000000 },
+ { MT_BBP(TXBE, 14), 0x00000000 },
+ { MT_BBP(TXBE, 15), 0x00000000 },
+ { MT_BBP(TXBE, 16), 0x00000000 },
+ { MT_BBP(TXBE, 17), 0x00000000 },
+ { MT_BBP(RXFE, 1), 0x00008800 },
+ { MT_BBP(RXFE, 3), 0x00000000 },
+ { MT_BBP(RXFE, 4), 0x00000000 },
+ { MT_BBP(RXO, 13), 0x00000192 },
+ { MT_BBP(RXO, 14), 0x00060612 },
+ { MT_BBP(RXO, 15), 0xC8321B18 },
+ { MT_BBP(RXO, 16), 0x0000001E },
+ { MT_BBP(RXO, 17), 0x00000000 },
+ { MT_BBP(RXO, 18), 0xCC00A993 },
+ { MT_BBP(RXO, 19), 0xB9CB9CB9 },
+ { MT_BBP(RXO, 20), 0x26c00057 },
+ { MT_BBP(RXO, 21), 0x00000001 },
+ { MT_BBP(RXO, 24), 0x00000006 },
+ { MT_BBP(RXO, 28), 0x0000003F },
+};
+
+static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
+ { MT_BBP(CAL, 47), 0x000010F0 },
+ { MT_BBP(CAL, 48), 0x00008080 },
+ { MT_BBP(CAL, 49), 0x00000F07 },
+ { MT_BBP(CAL, 50), 0x00000040 },
+ { MT_BBP(CAL, 51), 0x00000404 },
+ { MT_BBP(CAL, 52), 0x00080803 },
+ { MT_BBP(CAL, 53), 0x00000704 },
+ { MT_BBP(CAL, 54), 0x00002828 },
+ { MT_BBP(CAL, 55), 0x00005050 },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index f7ec3400e368..dda11c704aba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -180,6 +180,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
@@ -202,7 +204,7 @@ static void mt76x0e_cleanup(struct mt76x02_dev *dev)
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev);
- mt76x02_dma_cleanup(dev);
+ mt76_dma_cleanup(&dev->mt76);
mt76x02_mcu_cleanup(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 09f34deb6ba1..3de33aadf794 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -734,7 +734,7 @@ mt76x0_phy_get_delta_power(struct mt76x02_dev *dev, u8 tx_mode,
case 1:
if (chan->band == NL80211_BAND_2GHZ)
tssi_target += 29491; /* 3.6 * 8192 */
- /* fall through */
+ fallthrough;
case 0:
break;
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 4660b9691ec3..d626817a2103 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -15,6 +15,8 @@
#include "mt76x02_dfs.h"
#include "mt76x02_dma.h"
+#define MT76x02_TX_RING_SIZE 512
+#define MT76x02_PSD_RING_SIZE 128
#define MT76x02_N_WCIDS 128
#define MT_CALIBRATE_INTERVAL HZ
#define MT_MAC_WORK_INTERVAL (HZ / 10)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index ff448a1ad4e3..c4fe1c436aaa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -7,7 +7,7 @@
#include "mt76x02.h"
static int
-mt76x02_ampdu_stat_read(struct seq_file *file, void *data)
+mt76x02_ampdu_stat_show(struct seq_file *file, void *data)
{
struct mt76x02_dev *dev = file->private;
int i, j;
@@ -31,11 +31,7 @@ mt76x02_ampdu_stat_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt76x02_ampdu_stat_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt76x02_ampdu_stat_read, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(mt76x02_ampdu_stat);
static int read_txpower(struct seq_file *file, void *data)
{
@@ -48,15 +44,8 @@ static int read_txpower(struct seq_file *file, void *data)
return 0;
}
-static const struct file_operations fops_ampdu_stat = {
- .open = mt76x02_ampdu_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static int
-mt76x02_dfs_stat_read(struct seq_file *file, void *data)
+mt76x02_dfs_stat_show(struct seq_file *file, void *data)
{
struct mt76x02_dev *dev = file->private;
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@@ -81,18 +70,7 @@ mt76x02_dfs_stat_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt76x02_dfs_stat_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt76x02_dfs_stat_read, inode->i_private);
-}
-
-static const struct file_operations fops_dfs_stat = {
- .open = mt76x02_dfs_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mt76x02_dfs_stat);
static int read_agc(struct seq_file *file, void *data)
{
@@ -150,8 +128,8 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
- debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
- debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt76x02_ampdu_stat_fops);
+ debugfs_create_file("dfs_stats", 0400, dir, dev, &mt76x02_dfs_stat_fops);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
read_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index ff6a9e4daac0..b29cd39dc176 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -429,11 +429,11 @@ static int mt76x02_dfs_create_sequence(struct mt76x02_dev *dev,
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sw_detector_params *sw_params;
- u32 width_delta, with_sum, factor, cur_pri;
+ u32 width_delta, with_sum;
struct mt76x02_dfs_sequence seq, *seq_p;
struct mt76x02_dfs_event_rb *event_rb;
struct mt76x02_dfs_event *cur_event;
- int i, j, end, pri;
+ int i, j, end, pri, factor, cur_pri;
event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
: &dfs_pd->event_rb[0];
@@ -517,7 +517,7 @@ static u16 mt76x02_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
struct mt76x02_dfs_sw_detector_params *sw_params;
struct mt76x02_dfs_sequence *seq, *tmp_seq;
u16 max_seq_len = 0;
- u32 factor, pri;
+ int factor, pri;
sw_params = &dfs_pd->sw_dpd_params;
list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
index 4aff4f8e87b6..23b0e7d10d57 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
@@ -61,6 +61,5 @@ mt76x02_wait_for_wpdma(struct mt76_dev *dev, int timeout)
int mt76x02_dma_init(struct mt76x02_dev *dev);
void mt76x02_dma_disable(struct mt76x02_dev *dev);
-void mt76x02_dma_cleanup(struct mt76x02_dev *dev);
#endif /* __MT76x02_DMA_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index e4e03beabe43..da6d3f51f6d4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -300,7 +300,7 @@ mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
return 0;
case MT_PHY_TYPE_HT_GF:
txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_HT:
txrate->flags |= IEEE80211_TX_RC_MCS;
txrate->idx = idx;
@@ -349,6 +349,8 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
memset(txwi, 0, sizeof(*txwi));
+ mt76_tx_check_agg_ssn(sta, skb);
+
if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff &&
ieee80211_has_protected(hdr->frame_control)) {
wcid = NULL;
@@ -462,7 +464,7 @@ mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy)
rates[1].idx = 0;
break;
}
- /* fall through */
+ fallthrough;
default:
rates[1].idx = max_t(int, rates[0].idx - 1, 0);
break;
@@ -677,7 +679,7 @@ mt76x02_mac_process_rate(struct mt76x02_dev *dev,
return 0;
case MT_PHY_TYPE_HT_GF:
status->enc_flags |= RX_ENC_FLAG_HT_GF;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_HT:
status->encoding = RX_ENC_HT;
status->rate_idx = idx;
@@ -898,8 +900,7 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
}
}
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e)
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
@@ -916,7 +917,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
txwi = (struct mt76x02_txwi *)txwi_ptr;
trace_mac_txdone(mdev, txwi->wcid, txwi->pktid);
- mt76_tx_complete_skb(mdev, e->skb);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
}
EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index c70d17b2290c..0cfbaca50210 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -194,8 +194,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len);
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e);
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt76x02_update_channel(struct mt76_dev *mdev);
void mt76x02_mac_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index bacb1f10a699..cf68731bd094 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -14,7 +14,7 @@
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
- struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q;
+ struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD];
struct beacon_bc_data data = {};
struct sk_buff *skb;
int i;
@@ -104,8 +104,7 @@ void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
static int
-mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt76x02_init_tx_queue(struct mt76x02_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
@@ -118,8 +117,7 @@ mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
if (err < 0)
return err;
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
@@ -151,9 +149,11 @@ static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
mt76x02_send_tx_status(dev, &stat, &update);
}
-static void mt76x02_tx_tasklet(unsigned long data)
+static void mt76x02_tx_worker(struct mt76_worker *w)
{
- struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
+ struct mt76x02_dev *dev;
+
+ dev = container_of(w, struct mt76x02_dev, mt76.tx_worker);
mt76x02_mac_poll_tx_status(dev, false);
mt76x02_process_tx_status_fifo(dev);
@@ -178,7 +178,7 @@ static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
return 0;
}
@@ -197,8 +197,7 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (!status_fifo)
return -ENOMEM;
- tasklet_init(&dev->mt76.tx_tasklet, mt76x02_tx_tasklet,
- (unsigned long)dev);
+ dev->mt76.tx_worker.fn = mt76x02_tx_worker;
tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
(unsigned long)dev);
@@ -210,19 +209,18 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i],
- mt76_ac_to_hwq(i),
- MT_TX_RING_SIZE);
+ ret = mt76x02_init_tx_queue(dev, i, mt76_ac_to_hwq(i),
+ MT76x02_TX_RING_SIZE);
if (ret)
return ret;
}
- ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
- MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+ ret = mt76x02_init_tx_queue(dev, MT_TXQ_PSD,
+ MT_TX_HW_QUEUE_MGMT, MT76x02_PSD_RING_SIZE);
if (ret)
return ret;
- ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ ret = mt76x02_init_tx_queue(dev, MT_TXQ_MCU,
MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
if (ret)
return ret;
@@ -263,9 +261,10 @@ EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
{
struct mt76x02_dev *dev = dev_instance;
- u32 intr;
+ u32 intr, mask;
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
@@ -273,17 +272,17 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
- intr &= dev->mt76.mmio.irqmask;
+ mask = intr & (MT_INT_RX_DONE_ALL | MT_INT_GPTIMER);
+ if (intr & (MT_INT_TX_DONE_ALL | MT_INT_TX_STAT))
+ mask |= MT_INT_TX_DONE_ALL;
+
+ mt76x02_irq_disable(dev, mask);
- if (intr & MT_INT_RX_DONE(0)) {
- mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
+ if (intr & MT_INT_RX_DONE(0))
napi_schedule(&dev->mt76.napi[0]);
- }
- if (intr & MT_INT_RX_DONE(1)) {
- mt76x02_irq_disable(dev, MT_INT_RX_DONE(1));
+ if (intr & MT_INT_RX_DONE(1))
napi_schedule(&dev->mt76.napi[1]);
- }
if (intr & MT_INT_PRE_TBTT)
tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
@@ -293,21 +292,17 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
if (dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76);
else
- mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q);
+ mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD]);
}
if (intr & MT_INT_TX_STAT)
mt76x02_mac_poll_tx_status(dev, true);
- if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) {
- mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL))
napi_schedule(&dev->mt76.tx_napi);
- }
- if (intr & MT_INT_GPTIMER) {
- mt76x02_irq_disable(dev, MT_INT_GPTIMER);
+ if (intr & MT_INT_GPTIMER)
tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
- }
return IRQ_HANDLED;
}
@@ -329,13 +324,6 @@ static void mt76x02_dma_enable(struct mt76x02_dev *dev)
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
}
-void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
-{
- tasklet_kill(&dev->mt76.tx_tasklet);
- mt76_dma_cleanup(&dev->mt76);
-}
-EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
-
void mt76x02_dma_disable(struct mt76x02_dev *dev)
{
u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
@@ -369,7 +357,7 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
int i;
for (i = 0; i < 4; i++) {
- q = dev->mt76.q_tx[i].q;
+ q = dev->mt76.q_tx[i];
if (!q->queued)
continue;
@@ -453,7 +441,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
set_bit(MT76_RESET, &dev->mphy.state);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
- tasklet_disable(&dev->mt76.tx_tasklet);
+ mt76_worker_disable(&dev->mt76.tx_worker);
napi_disable(&dev->mt76.tx_napi);
mt76_for_each_q_rx(&dev->mt76, i) {
@@ -510,7 +498,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
clear_bit(MT76_RESET, &dev->mphy.state);
- tasklet_enable(&dev->mt76.tx_tasklet);
+ mt76_worker_enable(&dev->mt76.tx_worker);
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
index a57dcc8820aa..b5be884b3549 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -19,8 +19,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e);
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt76x02u_init_beacon_config(struct mt76x02_dev *dev);
void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev);
#endif /* __MT76x02_USB_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 37321e656776..2c2f56112b57 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -15,11 +15,10 @@ static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
mt76x02_remove_hdr_pad(skb, 2);
}
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e)
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
mt76x02u_remove_dma_hdr(e->skb);
- mt76_tx_complete_skb(mdev, e->skb);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
@@ -46,7 +45,7 @@ EXPORT_SYMBOL_GPL(mt76x02u_mac_start);
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
{
- u32 info;
+ u32 info, pad;
/* Buffer layout:
* | 4B | xfer len | pad | 4B |
@@ -58,7 +57,8 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
put_unaligned_le32(info, skb_push(skb, sizeof(info)));
- return mt76_skb_adjust_pad(skb);
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ return mt76_skb_adjust_pad(skb, pad);
}
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
@@ -67,7 +67,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
- int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
+ int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid]->hw_idx);
struct mt76x02_txwi *txwi;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index dbd4077ea283..11b769af2f8f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -294,8 +294,6 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
mvif->group_wcid.hw_key_idx = -1;
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->group_wcid;
-
- mt76_txq_init(&dev->mt76, vif->txq);
}
int
@@ -347,7 +345,6 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- mt76_txq_remove(&dev->mt76, vif->txq);
dev->mphy.vif_mask &= ~BIT(mvif->idx);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
@@ -490,7 +487,7 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 cw_min = 5, cw_max = 10, qid;
u32 val;
- qid = dev->mt76.q_tx[queue].q->hw_idx;
+ qid = dev->mt76.q_tx[queue]->hw_idx;
if (params->cw_min)
cw_min = fls(params->cw_min);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 6dfb0df8ec8a..4d50dad29ddf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -63,6 +63,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
@@ -111,7 +113,7 @@ mt76x2e_suspend(struct pci_dev *pdev, pm_message_t state)
napi_disable(&mdev->tx_napi);
tasklet_kill(&mdev->pre_tbtt_tasklet);
- tasklet_kill(&mdev->tx_tasklet);
+ mt76_worker_disable(&mdev->tx_worker);
mt76_for_each_q_rx(mdev, i)
napi_disable(&mdev->napi[i]);
@@ -145,6 +147,7 @@ mt76x2e_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
+ mt76_worker_enable(&mdev->tx_worker);
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
napi_schedule(&mdev->napi[i]);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 101a0fe00ef3..48a3ebc9892a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -283,7 +283,7 @@ void mt76x2_cleanup(struct mt76x02_dev *dev)
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76x2_stop_hardware(dev);
- mt76x02_dma_cleanup(dev);
+ mt76_dma_cleanup(&dev->mt76);
mt76x02_mcu_cleanup(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 38f473d587c9..1049927faf24 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -21,7 +21,6 @@ static int mt7915_ser_trigger_set(void *data, u64 val)
switch (val) {
case SER_SET_RECOVER_L1:
case SER_SET_RECOVER_L2:
- /* fall through */
ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), 0);
if (ret)
return ret;
@@ -292,15 +291,15 @@ mt7915_queues_read(struct seq_file *s, void *data)
int i;
for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
- struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id];
+ struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id];
- if (!q->q)
+ if (!q)
continue;
seq_printf(s,
"%s: queued=%d head=%d tail=%d\n",
- queue_map[i].queue, q->q->queued, q->q->head,
- q->q->tail);
+ queue_map[i].queue, q->queued, q->head,
+ q->tail);
}
return 0;
@@ -400,7 +399,7 @@ static int mt7915_sta_fixed_rate_set(void *data, u64 rate)
struct ieee80211_sta *sta = data;
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- return mt7915_mcu_set_fixed_rate(msta->vif->dev, sta, rate);
+ return mt7915_mcu_set_fixed_rate(msta->vif->phy->dev, sta, rate);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_fixed_rate, NULL,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index a8832c5e6004..cfa12c4c671f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -8,7 +8,6 @@
static int
mt7915_init_tx_queues(struct mt7915_dev *dev, int n_desc)
{
- struct mt76_sw_queue *q;
struct mt76_queue *hwq;
int err, i;
@@ -21,18 +20,14 @@ mt7915_init_tx_queues(struct mt7915_dev *dev, int n_desc)
if (err < 0)
return err;
- for (i = 0; i < MT_TXQ_MCU; i++) {
- q = &dev->mt76.q_tx[i];
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
- }
+ for (i = 0; i < MT_TXQ_MCU; i++)
+ dev->mt76.q_tx[i] = hwq;
return 0;
}
static int
-mt7915_init_mcu_queue(struct mt7915_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7915_init_mcu_queue(struct mt7915_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
@@ -45,8 +40,7 @@ mt7915_init_mcu_queue(struct mt7915_dev *dev, struct mt76_sw_queue *q,
if (err < 0)
return err;
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
return 0;
}
@@ -72,7 +66,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt76_rx(&dev->mt76, q, skb);
return;
}
- /* fall through */
+ fallthrough;
default:
dev_kfree_skb(skb);
break;
@@ -84,8 +78,6 @@ mt7915_tx_cleanup(struct mt7915_dev *dev)
{
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU_WA, false);
- mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
- mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
}
static int mt7915_poll_tx(struct napi_struct *napi, int budget)
@@ -97,13 +89,7 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget)
mt7915_tx_cleanup(dev);
if (napi_complete_done(napi, 0))
- mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL);
-
- mt7915_tx_cleanup(dev);
-
- mt7915_mac_sta_poll(dev);
-
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU);
return 0;
}
@@ -138,12 +124,120 @@ void mt7915_dma_prefetch(struct mt7915_dev *dev)
mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL, PREFETCH(0x480, 0x0));
}
+static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
+{
+ static const struct {
+ u32 phys;
+ u32 mapped;
+ u32 size;
+ } fixed_map[] = {
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ };
+ int i;
+
+ if (addr < 0x100000)
+ return addr;
+
+ for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ u32 ofs;
+
+ if (addr < fixed_map[i].phys)
+ continue;
+
+ ofs = addr - fixed_map[i].phys;
+ if (ofs > fixed_map[i].size)
+ continue;
+
+ return fixed_map[i].mapped + ofs;
+ }
+
+ if ((addr >= 0x18000000 && addr < 0x18c00000) ||
+ (addr >= 0x70000000 && addr < 0x78000000) ||
+ (addr >= 0x7c000000 && addr < 0x7c400000))
+ return mt7915_reg_map_l1(dev, addr);
+
+ return mt7915_reg_map_l2(dev, addr);
+}
+
+static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
int mt7915_dma_init(struct mt7915_dev *dev)
{
/* Increase buffer size to receive large VHT/HE MPDUs */
+ struct mt76_bus_ops *bus_ops;
int rx_buf_size = MT_RX_BUF_SIZE * 2;
int ret;
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7915_rr;
+ bus_ops->wr = mt7915_wr;
+ bus_ops->rmw = mt7915_rmw;
+ dev->mt76.bus = bus_ops;
+
mt76_dma_attach(&dev->mt76);
/* configure global setting */
@@ -168,22 +262,19 @@ int mt7915_dma_init(struct mt7915_dev *dev)
return ret;
/* command to WM */
- ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
- MT7915_TXQ_MCU_WM,
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU, MT7915_TXQ_MCU_WM,
MT7915_TX_MCU_RING_SIZE);
if (ret)
return ret;
/* command to WA */
- ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU_WA],
- MT7915_TXQ_MCU_WA,
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU_WA, MT7915_TXQ_MCU_WA,
MT7915_TX_MCU_RING_SIZE);
if (ret)
return ret;
/* firmware download */
- ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
- MT7915_TXQ_FWDL,
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_FWDL, MT7915_TXQ_FWDL,
MT7915_TX_FWDL_RING_SIZE);
if (ret)
return ret;
@@ -248,7 +339,7 @@ int mt7915_dma_init(struct mt7915_dev *dev)
MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
/* enable interrupts for TX/RX rings */
- mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_MCU |
MT_INT_MCU_CMD);
return 0;
@@ -281,6 +372,5 @@ void mt7915_dma_cleanup(struct mt7915_dev *dev)
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
- tasklet_kill(&dev->mt76.tx_tasklet);
mt76_dma_cleanup(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 8d6ceb3b67b4..0232b66acb4f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -135,6 +135,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+ /*
+ * force firmware operation mode into normal state,
+ * which should be set before firmware download stage.
+ */
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+
ret = mt7915_mcu_init(dev);
if (ret)
return ret;
@@ -612,6 +618,7 @@ int mt7915_register_ext_phy(struct mt7915_dev *dev)
mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
mt7915_init_wiphy(mphy->hw);
+ INIT_LIST_HEAD(&phy->stats_list);
INIT_DELAYED_WORK(&phy->mac_work, mt7915_mac_work);
/*
@@ -652,7 +659,10 @@ int mt7915_register_device(struct mt7915_dev *dev)
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
+ INIT_LIST_HEAD(&dev->phy.stats_list);
+ INIT_WORK(&dev->rc_work, mt7915_mac_sta_rc_work);
INIT_DELAYED_WORK(&dev->phy.mac_work, mt7915_mac_work);
+ INIT_LIST_HEAD(&dev->sta_rc_list);
INIT_LIST_HEAD(&dev->sta_poll_list);
spin_lock_init(&dev->sta_poll_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 036207f828f3..6f159d99a596 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -88,17 +88,16 @@ bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
0, 5000);
}
-static u32 mt7915_mac_wtbl_lmac_read(struct mt7915_dev *dev, u16 wcid,
- u16 addr)
+static u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid)
{
mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
- return mt76_rr(dev, MT_WTBL_LMAC_OFFS(wcid, addr));
+ return MT_WTBL_LMAC_OFFS(wcid, 0);
}
/* TODO: use txfree airtime info to avoid runtime accessing in the long run */
-void mt7915_mac_sta_poll(struct mt7915_dev *dev)
+static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
{
static const u8 ac_to_tid[] = {
[IEEE80211_AC_BE] = 0,
@@ -106,47 +105,50 @@ void mt7915_mac_sta_poll(struct mt7915_dev *dev)
[IEEE80211_AC_VI] = 4,
[IEEE80211_AC_VO] = 6
};
- static const u8 hw_queue_map[] = {
- [IEEE80211_AC_BK] = 0,
- [IEEE80211_AC_BE] = 1,
- [IEEE80211_AC_VI] = 2,
- [IEEE80211_AC_VO] = 3,
- };
struct ieee80211_sta *sta;
struct mt7915_sta *msta;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
+ LIST_HEAD(sta_poll_list);
int i;
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&dev->sta_poll_list, &sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
rcu_read_lock();
while (true) {
bool clear = false;
+ u32 addr;
u16 idx;
spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&dev->sta_poll_list)) {
+ if (list_empty(&sta_poll_list)) {
spin_unlock_bh(&dev->sta_poll_lock);
break;
}
- msta = list_first_entry(&dev->sta_poll_list,
+ msta = list_first_entry(&sta_poll_list,
struct mt7915_sta, poll_list);
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
- for (i = 0, idx = msta->wcid.idx; i < IEEE80211_NUM_ACS; i++) {
+ idx = msta->wcid.idx;
+ addr = mt7915_mac_wtbl_lmac_addr(dev, idx) + 20 * 4;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u32 tx_last = msta->airtime_ac[i];
- u32 rx_last = msta->airtime_ac[i + IEEE80211_NUM_ACS];
+ u32 rx_last = msta->airtime_ac[i + 4];
+
+ msta->airtime_ac[i] = mt76_rr(dev, addr);
+ msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
- msta->airtime_ac[i] =
- mt7915_mac_wtbl_lmac_read(dev, idx, 20 + i);
- msta->airtime_ac[i + IEEE80211_NUM_ACS] =
- mt7915_mac_wtbl_lmac_read(dev, idx, 21 + i);
tx_time[i] = msta->airtime_ac[i] - tx_last;
- rx_time[i] = msta->airtime_ac[i + IEEE80211_NUM_ACS] -
- rx_last;
+ rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
if ((tx_last | rx_last) & BIT(30))
clear = true;
+
+ addr += 8;
}
if (clear) {
@@ -161,8 +163,9 @@ void mt7915_mac_sta_poll(struct mt7915_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 tx_cur = tx_time[i];
- u32 rx_cur = rx_time[hw_queue_map[i]];
+ u8 q = mt7915_lmac_mapping(dev, i);
+ u32 tx_cur = tx_time[q];
+ u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
if (!tx_cur && !rx_cur)
@@ -468,7 +471,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
switch (mode) {
case MT_PHY_TYPE_CCK:
cck = true;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_OFDM:
i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
@@ -487,7 +490,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
break;
case MT_PHY_TYPE_HE_MU:
status->flag |= RX_FLAG_RADIOTAP_HE_MU;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_HE_SU:
case MT_PHY_TYPE_HE_EXT_SU:
case MT_PHY_TYPE_HE_TB:
@@ -565,13 +568,15 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
bool multicast = is_multicast_ether_addr(hdr->addr1);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_phy *mphy = &dev->mphy;
bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
__le16 fc = hdr->frame_control;
- u16 tx_count = 4, seqno = 0;
+ u16 tx_count = 15, seqno = 0;
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
u32 val;
if (vif) {
@@ -587,6 +592,10 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+ txwi[4] = 0;
+ txwi[5] = 0;
+ txwi[6] = 0;
+
if (beacon) {
p_fmt = MT_TX_TYPE_FW;
q_idx = MT_LMAC_BCN0;
@@ -599,6 +608,20 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
}
+ if (ieee80211_is_action(fc) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+
+ txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
+ tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
+ } else if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
+ u16 control = le16_to_cpu(bar->control);
+
+ tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
+ }
+
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
@@ -609,8 +632,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
FIELD_PREP(MT_TXD1_HDR_INFO,
ieee80211_get_hdrlen_from_skb(skb) / 2) |
- FIELD_PREP(MT_TXD1_TID,
- skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
+ FIELD_PREP(MT_TXD1_TID, tid) |
FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
@@ -634,10 +656,6 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
}
txwi[2] = cpu_to_le32(val);
- txwi[4] = 0;
- txwi[5] = 0;
- txwi[6] = 0;
-
if (!ieee80211_is_data(fc) || multicast) {
u16 rate;
@@ -665,20 +683,24 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+ if (wcid->amsdu)
+ val |= MT_TXD7_HW_AMSDU;
txwi[7] = cpu_to_le32(val);
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
- if (ieee80211_is_data_qos(fc)) {
- seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- val |= MT_TXD3_SN_VALID;
- } else if (ieee80211_is_back_req(fc)) {
- struct ieee80211_bar *bar;
-
- bar = (struct ieee80211_bar *)skb->data;
- seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
- val |= MT_TXD3_SN_VALID;
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ seqno = le16_to_cpu(hdr->seq_ctrl);
+
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar;
+
+ bar = (struct ieee80211_bar *)skb->data;
+ seqno = le16_to_cpu(bar->start_seq_num);
+ }
+
+ val |= MT_TXD3_SN_VALID |
+ FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
}
- val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
txwi[3] |= cpu_to_le32(val);
}
@@ -715,6 +737,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
/* pass partial skb header to fw */
tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->buf[1].skip_unmap = true;
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
@@ -747,45 +770,29 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return 0;
}
-static inline bool
-mt7915_tx_check_aggr_tid(struct mt7915_sta *msta, u8 tid)
-{
- bool ret = false;
-
- spin_lock_bh(&msta->ampdu_lock);
- if (msta->ampdu_state[tid] == MT7915_AGGR_STOP)
- ret = true;
- spin_unlock_bh(&msta->ampdu_lock);
-
- return ret;
-}
-
static void
-mt7915_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
+mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt7915_sta *msta;
- u16 tid;
-
- if (!sta->ht_cap.ht_supported)
- return;
+ u16 fc, tid;
+ u32 val;
- if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ if (!sta || !sta->ht_cap.ht_supported)
return;
- if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
+ tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+ if (tid >= 6) /* skip VO queue */
return;
- if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ val = le32_to_cpu(txwi[2]);
+ fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
+ FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
+ if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
return;
msta = (struct mt7915_sta *)sta->drv_priv;
- tid = ieee80211_get_tid(hdr);
-
- if (mt7915_tx_check_aggr_tid(msta, tid)) {
+ if (!test_and_set_bit(tid, &msta->ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
- mt7915_set_aggr_state(msta, tid, MT7915_AGGR_PROGRESS);
- }
}
static inline void
@@ -822,8 +829,6 @@ mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
if (info->flags & IEEE80211_TX_CTL_AMPDU)
info->flags |= IEEE80211_TX_STAT_AMPDU;
- else if (sta)
- mt7915_tx_check_aggr(sta, skb);
if (stat)
ieee80211_tx_info_clear_status(info);
@@ -864,6 +869,10 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta = NULL;
u8 i, count;
+ /* clean DMA queues and unmap buffers first */
+ mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
+ mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
+
/*
* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
* to the time ack is received or dropped by hw (air + hw queue time).
@@ -880,6 +889,7 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
*/
if (info & MT_TX_FREE_PAIR) {
struct mt7915_sta *msta;
+ struct mt7915_phy *phy;
struct mt76_wcid *wcid;
u16 idx;
@@ -891,8 +901,13 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
continue;
msta = container_of(wcid, struct mt7915_sta, wcid);
- ieee80211_queue_work(mt76_hw(dev), &msta->stats_work);
- continue;
+ phy = msta->vif->phy;
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->stats_list))
+ list_add_tail(&msta->stats_list, &phy->stats_list);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
}
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
@@ -907,6 +922,21 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
mt7915_txp_skb_unmap(mdev, txwi);
if (txwi->skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
+ void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
+
+ if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7915_tx_check_aggr(sta, txwi_ptr);
+
+ if (sta && !info->tx_time_est) {
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ int pending;
+
+ pending = atomic_dec_return(&wcid->non_aql_packets);
+ if (pending < 0)
+ atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
+ }
+
mt7915_tx_complete_status(mdev, txwi->skb, sta, stat);
txwi->skb = NULL;
}
@@ -914,10 +944,12 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
mt76_put_txwi(mdev, txwi);
}
dev_kfree_skb(skb);
+
+ mt7915_mac_sta_poll(dev);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
}
-void mt7915_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e)
+void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
struct mt7915_dev *dev;
@@ -1186,7 +1218,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (ext_phy)
mt76_txq_schedule_all(ext_phy);
- tasklet_disable(&dev->mt76.tx_tasklet);
+ mt76_worker_disable(&dev->mt76.tx_worker);
napi_disable(&dev->mt76.napi[0]);
napi_disable(&dev->mt76.napi[1]);
napi_disable(&dev->mt76.napi[2]);
@@ -1206,7 +1238,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
clear_bit(MT76_RESET, &dev->mphy.state);
- tasklet_enable(&dev->mt76.tx_tasklet);
+ mt76_worker_enable(&dev->mt76.tx_worker);
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
@@ -1281,39 +1313,63 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
}
}
-void mt7915_mac_sta_stats_work(struct work_struct *work)
+static void
+mt7915_mac_sta_stats_work(struct mt7915_phy *phy)
{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_sta *msta;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&phy->stats_list, &list);
+
+ while (!list_empty(&list)) {
+ msta = list_first_entry(&list, struct mt7915_sta, stats_list);
+ list_del_init(&msta->stats_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ /* use MT_TX_FREE_RATE to report Tx rate for further devices */
+ mt7915_mcu_get_rate_info(dev, RATE_CTRL_RU_INFO, msta->wcid.idx);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ }
+
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+void mt7915_mac_sta_rc_work(struct work_struct *work)
+{
+ struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
- struct mt7915_sta_stats *stats;
struct mt7915_sta *msta;
- struct mt7915_dev *dev;
+ u32 changed;
+ LIST_HEAD(list);
- msta = container_of(work, struct mt7915_sta, stats_work);
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
- vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
- dev = msta->vif->dev;
- stats = &msta->stats;
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&dev->sta_rc_list, &list);
- /* use MT_TX_FREE_RATE to report Tx rate for further devices */
- if (time_after(jiffies, stats->jiffies + HZ)) {
- mt7915_mcu_get_rate_info(dev, RATE_CTRL_RU_INFO,
- msta->wcid.idx);
+ while (!list_empty(&list)) {
+ msta = list_first_entry(&list, struct mt7915_sta, rc_list);
+ list_del_init(&msta->rc_list);
+ changed = msta->stats.changed;
+ msta->stats.changed = 0;
+ spin_unlock_bh(&dev->sta_poll_lock);
- stats->jiffies = jiffies;
- }
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
- if (test_and_clear_bit(IEEE80211_RC_SUPP_RATES_CHANGED |
+ if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
IEEE80211_RC_NSS_CHANGED |
- IEEE80211_RC_BW_CHANGED, &stats->changed))
- mt7915_mcu_add_rate_ctrl(dev, vif, sta);
+ IEEE80211_RC_BW_CHANGED))
+ mt7915_mcu_add_rate_ctrl(dev, vif, sta);
- if (test_and_clear_bit(IEEE80211_RC_SMPS_CHANGED, &stats->changed))
- mt7915_mcu_add_smps(dev, vif, sta);
+ if (changed & IEEE80211_RC_SMPS_CHANGED)
+ mt7915_mcu_add_smps(dev, vif, sta);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ }
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->poll_list))
- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
}
@@ -1335,6 +1391,11 @@ void mt7915_mac_work(struct work_struct *work)
mt7915_mac_update_mib_stats(phy);
}
+ if (++phy->sta_work_count == 10) {
+ phy->sta_work_count = 0;
+ mt7915_mac_sta_stats_work(phy);
+ };
+
mutex_unlock(&mdev->mutex);
ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index f95a0b55c4a2..c48158392057 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -137,7 +137,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
goto out;
}
mvif->omac_idx = idx;
- mvif->dev = dev;
+ mvif->phy = phy;
mvif->band_idx = ext_phy;
if (ext_phy)
@@ -155,6 +155,8 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
idx = MT7915_WTBL_RESERVED - mvif->idx;
+ INIT_LIST_HEAD(&mvif->sta.rc_list);
+ INIT_LIST_HEAD(&mvif->sta.stats_list);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.ext_phy = mvif->band_idx;
@@ -167,7 +169,6 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid;
- mt76_txq_init(&dev->mt76, vif->txq);
}
out:
@@ -190,8 +191,6 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
mt7915_mcu_add_dev_info(dev, vif, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- if (vif->txq)
- mt76_txq_remove(&dev->mt76, vif->txq);
mutex_lock(&dev->mt76.mutex);
phy->mt76->vif_mask &= ~BIT(mvif->idx);
@@ -493,9 +492,9 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
+ INIT_LIST_HEAD(&msta->rc_list);
+ INIT_LIST_HEAD(&msta->stats_list);
INIT_LIST_HEAD(&msta->poll_list);
- INIT_WORK(&msta->stats_work, mt7915_mac_sta_stats_work);
- spin_lock_init(&msta->ampdu_lock);
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
@@ -528,6 +527,10 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&msta->poll_list))
list_del_init(&msta->poll_list);
+ if (!list_empty(&msta->stats_list))
+ list_del_init(&msta->stats_list);
+ if (!list_empty(&msta->rc_list))
+ list_del_init(&msta->rc_list);
spin_unlock_bh(&dev->sta_poll_lock);
}
@@ -603,23 +606,21 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7915_set_aggr_state(msta, tid, MT7915_AGGR_OPERATIONAL);
mt7915_mcu_add_tx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
- mt7915_set_aggr_state(msta, tid, MT7915_AGGR_STOP);
+ clear_bit(tid, &msta->ampdu_state);
mt7915_mcu_add_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
- mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
- mt7915_set_aggr_state(msta, tid, MT7915_AGGR_START);
+ set_bit(tid, &msta->ampdu_state);
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- mt7915_set_aggr_state(msta, tid, MT7915_AGGR_STOP);
+ clear_bit(tid, &msta->ampdu_state);
mt7915_mcu_add_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -789,18 +790,16 @@ mt7915_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u32 changed)
{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, sta->addr);
- if (!sta) {
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
+ spin_lock_bh(&dev->sta_poll_lock);
+ msta->stats.changed |= changed;
+ if (list_empty(&msta->rc_list))
+ list_add_tail(&msta->rc_list, &dev->sta_rc_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
- set_bit(changed, &msta->stats.changed);
- ieee80211_queue_work(hw, &msta->stats_work);
+ ieee80211_queue_work(hw, &dev->rc_work);
}
const struct ieee80211_ops mt7915_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index eaed5ef05401..a3ccc1785661 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -522,6 +522,9 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
return;
wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ return;
+
msta = container_of(wcid, struct mt7915_sta, wcid);
stats = &msta->stats;
@@ -714,8 +717,8 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
ptlv = skb_put(skb, sub_len);
memcpy(ptlv, &tlv, sizeof(tlv));
- *sub_ntlv = cpu_to_le16(le16_to_cpu(*sub_ntlv) + 1);
- *len = cpu_to_le16(le16_to_cpu(*len) + sub_len);
+ le16_add_cpu(sub_ntlv, 1);
+ le16_add_cpu(len, sub_len);
return ptlv;
}
@@ -933,11 +936,11 @@ mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
he = (struct bss_info_he *)tlv;
- he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext * 4;
+ he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
if (!he->he_pe_duration)
he->he_pe_duration = DEFAULT_HE_PE_DURATION;
- he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th * 32);
+ he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
if (!he->he_rts_thres)
he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
@@ -947,6 +950,23 @@ mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
static void
+mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb)
+{
+#define TXD_CMP_MAP1 GENMASK(15, 0)
+#define TXD_CMP_MAP2 (GENMASK(31, 0) & ~BIT(23))
+ struct bss_info_hw_amsdu *amsdu;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
+
+ amsdu = (struct bss_info_hw_amsdu *)tlv;
+ amsdu->cmp_bitmap_0 = cpu_to_le32(TXD_CMP_MAP1);
+ amsdu->cmp_bitmap_1 = cpu_to_le32(TXD_CMP_MAP2);
+ amsdu->trig_thres = cpu_to_le16(2);
+ amsdu->enable = true;
+}
+
+static void
mt7915_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7915_vif *mvif)
{
/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
@@ -1020,6 +1040,7 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
mt7915_mcu_bss_rfch_tlv(skb, vif, phy);
mt7915_mcu_bss_bmc_tlv(skb, phy);
mt7915_mcu_bss_ra_tlv(skb, vif, phy);
+ mt7915_mcu_bss_hw_amsdu_tlv(skb);
if (vif->bss_conf.he_support)
mt7915_mcu_bss_he_tlv(skb, vif, phy);
@@ -1178,6 +1199,9 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
struct sk_buff *skb;
int ret;
+ if (enable && tx && !params->amsdu)
+ msta->wcid.amsdu = false;
+
skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
MT7915_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
@@ -1407,7 +1431,7 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
he->max_nss_mcs[CMD_HE_MCS_BW160] =
he_cap->he_mcs_nss_supp.rx_mcs_160;
- /* fall through */
+ fallthrough;
default:
he->max_nss_mcs[CMD_HE_MCS_BW80] =
he_cap->he_mcs_nss_supp.rx_mcs_80;
@@ -1441,6 +1465,38 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
static void
+mt7915_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
+{
+ struct sta_rec_uapsd *uapsd;
+ struct tlv *tlv;
+
+ if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
+ return;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
+ uapsd = (struct sta_rec_uapsd *)tlv;
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
+ uapsd->dac_map |= BIT(3);
+ uapsd->tac_map |= BIT(3);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
+ uapsd->dac_map |= BIT(2);
+ uapsd->tac_map |= BIT(2);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
+ uapsd->dac_map |= BIT(1);
+ uapsd->tac_map |= BIT(1);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
+ uapsd->dac_map |= BIT(0);
+ uapsd->tac_map |= BIT(0);
+ }
+ uapsd->max_sp = sta->max_sp;
+}
+
+static void
mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
@@ -1512,8 +1568,39 @@ mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
}
static void
+mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct sta_rec_amsdu *amsdu;
+ struct tlv *tlv;
+
+ if (!sta->max_amsdu_len)
+ return;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
+ amsdu = (struct sta_rec_amsdu *)tlv;
+ amsdu->max_amsdu_num = 8;
+ amsdu->amsdu_en = true;
+ amsdu->max_mpdu_size = sta->max_amsdu_len >=
+ IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ msta->wcid.amsdu = true;
+}
+
+static bool
+mt7915_hw_amsdu_supported(struct ieee80211_vif *vif)
+{
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_STATION:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void
mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta, struct ieee80211_vif *vif)
{
struct tlv *tlv;
@@ -1524,6 +1611,9 @@ mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+
+ if (mt7915_hw_amsdu_supported(vif))
+ mt7915_mcu_sta_amsdu_tlv(skb, sta);
}
/* starec vht */
@@ -1540,6 +1630,9 @@ mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
/* starec he */
if (sta->he_cap.has_he)
mt7915_mcu_sta_he_tlv(skb, sta);
+
+ /* starec uapsd */
+ mt7915_mcu_sta_uapsd_tlv(skb, sta, vif);
}
static void
@@ -2176,7 +2269,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable);
if (enable && sta)
- mt7915_mcu_sta_tlv(dev, skb, sta);
+ mt7915_mcu_sta_tlv(dev, skb, sta, vif);
sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
@@ -2282,7 +2375,7 @@ mt7915_mcu_beacon_csa(struct sk_buff *rskb, struct sk_buff *skb,
struct bss_info_bcn *bcn,
struct ieee80211_mutable_offsets *offs)
{
- if (offs->csa_counter_offs[0]) {
+ if (offs->cntdwn_counter_offs[0]) {
struct tlv *tlv;
struct bss_info_bcn_csa *csa;
@@ -2290,7 +2383,7 @@ mt7915_mcu_beacon_csa(struct sk_buff *rskb, struct sk_buff *skb,
sizeof(*csa), &bcn->sub_ntlv,
&bcn->len);
csa = (struct bss_info_bcn_csa *)tlv;
- csa->cnt = skb->data[offs->csa_counter_offs[0]];
+ csa->cnt = skb->data[offs->cntdwn_counter_offs[0]];
}
}
@@ -2312,8 +2405,8 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb,
cont->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
cont->tim_ofs = cpu_to_le16(offs->tim_offset);
- if (offs->csa_counter_offs[0])
- cont->csa_ofs = cpu_to_le16(offs->csa_counter_offs[0] - 4);
+ if (offs->cntdwn_counter_offs[0])
+ cont->csa_ofs = cpu_to_le16(offs->cntdwn_counter_offs[0] - 4);
buf = (u8 *)tlv + sizeof(*cont);
mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL,
@@ -2335,14 +2428,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
struct bss_info_bcn *bcn;
int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
- rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
- if (IS_ERR(rskb))
- return PTR_ERR(rskb);
-
- tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
- bcn = (struct bss_info_bcn *)tlv;
- bcn->enable = en;
-
skb = ieee80211_beacon_get_template(hw, vif, &offs);
if (!skb)
return -EINVAL;
@@ -2353,6 +2438,16 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
return -EINVAL;
}
+ rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ if (IS_ERR(rskb)) {
+ dev_kfree_skb(skb);
+ return PTR_ERR(rskb);
+ }
+
+ tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+ bcn = (struct bss_info_bcn *)tlv;
+ bcn->enable = en;
+
if (mvif->band_idx) {
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
@@ -2901,6 +2996,7 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
struct edca *e = &req.edca[ac];
+ e->set = WMM_PARAM_SET;
e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS;
e->aifs = q->aifs;
e->txop = cpu_to_le16(q->txop);
@@ -3052,8 +3148,10 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
.channel_band = chandef->chan->band,
};
- if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index cb35e718409a..c656d66385c4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -402,6 +402,16 @@ struct bss_info_ra {
__le32 fast_interval;
} __packed;
+struct bss_info_hw_amsdu {
+ __le16 tag;
+ __le16 len;
+ __le32 cmp_bitmap_0;
+ __le32 cmp_bitmap_1;
+ __le16 trig_thres;
+ u8 enable;
+ u8 rsv;
+} __packed;
+
struct bss_info_he {
__le16 tag;
__le16 len;
@@ -645,6 +655,17 @@ struct sta_rec_vht {
u8 rsv[3];
} __packed;
+struct sta_rec_uapsd {
+ __le16 tag;
+ __le16 len;
+ u8 dac_map;
+ u8 tac_map;
+ u8 max_sp;
+ u8 rsv0;
+ __le16 listen_interval;
+ u8 rsv1[2];
+} __packed;
+
struct sta_rec_muru {
__le16 tag;
__le16 len;
@@ -725,6 +746,15 @@ struct sta_rec_ba {
__le16 winsize;
} __packed;
+struct sta_rec_amsdu {
+ __le16 tag;
+ __le16 len;
+ u8 max_amsdu_num;
+ u8 max_mpdu_size;
+ u8 amsdu_en;
+ u8 rsv;
+} __packed;
+
struct sec_key {
u8 cipher_id;
u8 cipher_len;
@@ -951,6 +981,8 @@ enum {
sizeof(struct sta_rec_he) + \
sizeof(struct sta_rec_ba) + \
sizeof(struct sta_rec_vht) + \
+ sizeof(struct sta_rec_uapsd) + \
+ sizeof(struct sta_rec_amsdu) + \
sizeof(struct tlv) + \
MT7915_WTBL_UPDATE_MAX_SIZE)
@@ -962,6 +994,7 @@ enum {
sizeof(struct bss_info_basic) +\
sizeof(struct bss_info_rf_ch) +\
sizeof(struct bss_info_ra) + \
+ sizeof(struct bss_info_hw_amsdu) +\
sizeof(struct bss_info_he) + \
sizeof(struct bss_info_bmc_rate) +\
sizeof(struct bss_info_ext_bss) +\
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index d8a13b4a2359..4b8908fa7eda 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -62,13 +62,6 @@ enum mt7915_rxq_id {
MT7915_RXQ_MCU_WA,
};
-enum mt7915_ampdu_state {
- MT7915_AGGR_STOP,
- MT7915_AGGR_PROGRESS,
- MT7915_AGGR_START,
- MT7915_AGGR_OPERATIONAL
-};
-
struct mt7915_sta_stats {
struct rate_info prob_rate;
struct rate_info tx_rate;
@@ -83,14 +76,14 @@ struct mt7915_sta {
struct mt7915_vif *vif;
+ struct list_head stats_list;
struct list_head poll_list;
+ struct list_head rc_list;
u32 airtime_ac[8];
struct mt7915_sta_stats stats;
- struct work_struct stats_work;
- spinlock_t ampdu_lock;
- enum mt7915_ampdu_state ampdu_state[IEEE80211_NUM_TIDS];
+ unsigned long ampdu_state;
};
struct mt7915_vif {
@@ -100,7 +93,7 @@ struct mt7915_vif {
u8 wmm_idx;
struct mt7915_sta sta;
- struct mt7915_dev *dev;
+ struct mt7915_phy *phy;
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
};
@@ -135,9 +128,11 @@ struct mt7915_phy {
u32 ampdu_ref;
struct mib_stats mib;
+ struct list_head stats_list;
struct delayed_work mac_work;
u8 mac_work_count;
+ u8 sta_work_count;
};
struct mt7915_dev {
@@ -146,15 +141,18 @@ struct mt7915_dev {
struct mt76_phy mphy;
};
+ const struct mt76_bus_ops *bus_ops;
struct mt7915_phy phy;
u16 chainmask;
struct work_struct init_work;
+ struct work_struct rc_work;
struct work_struct reset_work;
wait_queue_head_t reset_wait;
u32 reset_state;
+ struct list_head sta_rc_list;
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
@@ -260,26 +258,8 @@ mt7915_ext_phy(struct mt7915_dev *dev)
static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac)
{
- static const u8 lmac_queue_map[] = {
- [IEEE80211_AC_BK] = MT_LMAC_AC00,
- [IEEE80211_AC_BE] = MT_LMAC_AC01,
- [IEEE80211_AC_VI] = MT_LMAC_AC02,
- [IEEE80211_AC_VO] = MT_LMAC_AC03,
- };
-
- if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map)))
- return MT_LMAC_AC01; /* BE */
-
- return lmac_queue_map[ac];
-}
-
-static inline void
-mt7915_set_aggr_state(struct mt7915_sta *msta, u8 tid,
- enum mt7915_ampdu_state state)
-{
- spin_lock_bh(&msta->ampdu_lock);
- msta->ampdu_state[tid] = state;
- spin_unlock_bh(&msta->ampdu_lock);
+ /* LMAC uses the reverse order of mac80211 AC indexes */
+ return 3 - ac;
}
extern const struct ieee80211_ops mt7915_ops;
@@ -448,7 +428,6 @@ mt7915_l2_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val)
bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask);
void mt7915_mac_reset_counters(struct mt7915_phy *phy);
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
-void mt7915_mac_sta_poll(struct mt7915_dev *dev);
void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, bool beacon);
@@ -461,13 +440,12 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7915_mac_work(struct work_struct *work);
void mt7915_mac_reset_work(struct work_struct *work);
-void mt7915_mac_sta_stats_work(struct work_struct *work);
+void mt7915_mac_sta_rc_work(struct work_struct *work);
int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt7915_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e);
+void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 0ec4e184b889..fe62b4d853e4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -29,9 +29,10 @@ mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
{
struct mt7915_dev *dev = dev_instance;
- u32 intr;
+ u32 intr, mask;
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
@@ -39,27 +40,23 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
- intr &= dev->mt76.mmio.irqmask;
+ mask = intr & MT_INT_RX_DONE_ALL;
+ if (intr & MT_INT_TX_DONE_MCU)
+ mask |= MT_INT_TX_DONE_MCU;
- if (intr & MT_INT_TX_DONE_ALL) {
- mt7915_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ mt7915_irq_disable(dev, mask);
+
+ if (intr & MT_INT_TX_DONE_MCU)
napi_schedule(&dev->mt76.tx_napi);
- }
- if (intr & MT_INT_RX_DONE_DATA) {
- mt7915_irq_disable(dev, MT_INT_RX_DONE_DATA);
+ if (intr & MT_INT_RX_DONE_DATA)
napi_schedule(&dev->mt76.napi[0]);
- }
- if (intr & MT_INT_RX_DONE_WM) {
- mt7915_irq_disable(dev, MT_INT_RX_DONE_WM);
+ if (intr & MT_INT_RX_DONE_WM)
napi_schedule(&dev->mt76.napi[1]);
- }
- if (intr & MT_INT_RX_DONE_WA) {
- mt7915_irq_disable(dev, MT_INT_RX_DONE_WA);
+ if (intr & MT_INT_RX_DONE_WA)
napi_schedule(&dev->mt76.napi[2]);
- }
if (intr & MT_INT_MCU_CMD) {
u32 val = mt76_rr(dev, MT_MCU_CMD);
@@ -103,7 +100,8 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
+ MT_DRV_AMSDU_OFFLOAD,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
@@ -149,6 +147,8 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
(mt7915_l1_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
/* master switch of PCIe tnterrupt enable */
mt7915_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index e0989141d9da..64327153b7fa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -313,9 +313,17 @@
#define MT_INT_RX_DONE_WA BIT(1)
#define MT_INT_RX_DONE(_n) ((_n) ? BIT((_n) - 1) : BIT(16))
#define MT_INT_RX_DONE_ALL (BIT(0) | BIT(1) | BIT(16))
-#define MT_INT_TX_DONE_ALL (BIT(15) | GENMASK(27, 26) | BIT(30))
+#define MT_INT_TX_DONE_MCU_WA BIT(15)
+#define MT_INT_TX_DONE_FWDL BIT(26)
+#define MT_INT_TX_DONE_MCU_WM BIT(27)
+#define MT_INT_TX_DONE_BAND0 BIT(30)
+#define MT_INT_TX_DONE_BAND1 BIT(31)
#define MT_INT_MCU_CMD BIT(29)
+#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WA | \
+ MT_INT_TX_DONE_MCU_WM | \
+ MT_INT_TX_DONE_FWDL)
+
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
@@ -352,6 +360,13 @@
#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
#define MT_HIF_REMAP_BASE_L2 0x00000
+#define MT_SWDEF_BASE 0x41f200
+#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
+#define MT_SWDEF_MODE MT_SWDEF(0x3c)
+#define MT_SWDEF_NORMAL_MODE 0
+#define MT_SWDEF_ICAP_MODE 1
+#define MT_SWDEF_SPECTRUM_MODE 2
+
#define MT_TOP_BASE 0x18060000
#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index d2b38ed7f3b4..9a4d95a2a707 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -42,15 +42,13 @@ static int mt76s_alloc_tx(struct mt76_dev *dev)
int i;
for (i = 0; i < MT_TXQ_MCU_WA; i++) {
- INIT_LIST_HEAD(&dev->q_tx[i].swq);
-
q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
if (!q)
return -ENOMEM;
spin_lock_init(&q->lock);
q->hw_idx = i;
- dev->q_tx[i].q = q;
+ dev->q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
@@ -68,6 +66,10 @@ void mt76s_stop_txrx(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
+ cancel_work_sync(&sdio->tx.xmit_work);
+ cancel_work_sync(&sdio->tx.status_work);
+ cancel_work_sync(&sdio->rx.recv_work);
+ cancel_work_sync(&sdio->rx.net_work);
cancel_work_sync(&sdio->stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
@@ -94,8 +96,8 @@ mt76s_get_next_rx_entry(struct mt76_queue *q)
spin_lock_bh(&q->lock);
if (q->queued > 0) {
- e = &q->entry[q->head];
- q->head = (q->head + 1) % q->ndesc;
+ e = &q->entry[q->tail];
+ q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
}
spin_unlock_bh(&q->lock);
@@ -129,38 +131,26 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
return nframes;
}
-static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
+static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
{
- struct mt76_sw_queue *sq = &dev->q_tx[qid];
- u32 n_dequeued = 0, n_sw_dequeued = 0;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_entry entry;
- struct mt76_queue *q = sq->q;
bool wake;
- while (q->queued > n_dequeued) {
- if (!q->entry[q->head].done)
+ while (q->queued > 0) {
+ if (!q->entry[q->tail].done)
break;
- if (q->entry[q->head].schedule) {
- q->entry[q->head].schedule = false;
- n_sw_dequeued++;
- }
-
- entry = q->entry[q->head];
- q->entry[q->head].done = false;
- q->head = (q->head + 1) % q->ndesc;
- n_dequeued++;
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
- if (qid == MT_TXQ_MCU)
+ if (qid == MT_TXQ_MCU) {
dev_kfree_skb(entry.skb);
- else
- dev->drv->tx_complete_skb(dev, qid, &entry);
- }
-
- spin_lock_bh(&q->lock);
+ entry.skb = NULL;
+ }
- sq->swq_queued -= n_sw_dequeued;
- q->queued -= n_dequeued;
+ mt76_queue_tx_complete(dev, q, &entry);
+ }
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
@@ -169,19 +159,13 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
if (!q->queued)
wake_up(&dev->tx_wait);
- spin_unlock_bh(&q->lock);
-
if (qid == MT_TXQ_MCU)
- goto out;
+ return;
mt76_txq_schedule(&dev->phy, qid);
if (wake)
ieee80211_wake_queue(dev->hw, qid);
-
- wake_up_process(dev->sdio.tx_kthread);
-out:
- return n_dequeued;
}
static void mt76s_tx_status_data(struct work_struct *work)
@@ -214,12 +198,12 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_tx_info tx_info = {
.skb = skb,
};
int err, len = skb->len;
- u16 idx = q->tail;
+ u16 idx = q->head;
if (q->queued == q->ndesc)
return -ENOSPC;
@@ -229,9 +213,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
if (err < 0)
return err;
- q->entry[q->tail].skb = tx_info.skb;
- q->entry[q->tail].buf_sz = len;
- q->tail = (q->tail + 1) % q->ndesc;
+ q->entry[q->head].skb = tx_info.skb;
+ q->entry[q->head].buf_sz = len;
+ q->head = (q->head + 1) % q->ndesc;
q->queued++;
return idx;
@@ -241,25 +225,31 @@ static int
mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
- int ret = -ENOSPC, len = skb->len;
+ struct mt76_queue *q = dev->q_tx[qid];
+ int ret = -ENOSPC, len = skb->len, pad;
- spin_lock_bh(&q->lock);
if (q->queued == q->ndesc)
- goto out;
+ goto error;
- ret = mt76_skb_adjust_pad(skb);
+ pad = round_up(skb->len, 4) - skb->len;
+ ret = mt76_skb_adjust_pad(skb, pad);
if (ret)
- goto out;
+ goto error;
- q->entry[q->tail].buf_sz = len;
- q->entry[q->tail].skb = skb;
- q->tail = (q->tail + 1) % q->ndesc;
+ spin_lock_bh(&q->lock);
+
+ q->entry[q->head].buf_sz = len;
+ q->entry[q->head].skb = skb;
+ q->head = (q->head + 1) % q->ndesc;
q->queued++;
-out:
spin_unlock_bh(&q->lock);
+ return 0;
+
+error:
+ dev_kfree_skb(skb);
+
return ret;
}
@@ -267,7 +257,7 @@ static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
{
struct mt76_sdio *sdio = &dev->sdio;
- wake_up_process(sdio->tx_kthread);
+ queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
}
static const struct mt76_queue_ops sdio_queue_ops = {
@@ -276,41 +266,37 @@ static const struct mt76_queue_ops sdio_queue_ops = {
.tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
};
-static int mt76s_kthread_run(void *data)
+static void mt76s_tx_work(struct work_struct *work)
{
- struct mt76_dev *dev = data;
- struct mt76_phy *mphy = &dev->phy;
-
- while (!kthread_should_stop()) {
- int i, nframes = 0;
-
- cond_resched();
-
- /* rx processing */
- local_bh_disable();
- rcu_read_lock();
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ tx.status_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ int i;
- mt76_for_each_q_rx(dev, i)
- nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
+ for (i = 0; i < MT_TXQ_MCU_WA; i++)
+ mt76s_process_tx_queue(dev, i);
- rcu_read_unlock();
- local_bh_enable();
+ if (dev->drv->tx_status_data &&
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+ queue_work(dev->wq, &dev->sdio.stat_work);
+}
- /* tx processing */
- for (i = 0; i < MT_TXQ_MCU_WA; i++)
- nframes += mt76s_process_tx_queue(dev, i);
+static void mt76s_rx_work(struct work_struct *work)
+{
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ rx.net_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ int i;
- if (dev->drv->tx_status_data &&
- !test_and_set_bit(MT76_READING_STATS, &mphy->state))
- queue_work(dev->wq, &dev->sdio.stat_work);
+ /* rx processing */
+ local_bh_disable();
+ rcu_read_lock();
- if (!nframes || !test_bit(MT76_STATE_RUNNING, &mphy->state)) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- }
- }
+ mt76_for_each_q_rx(dev, i)
+ mt76s_process_rx_queue(dev, &dev->q_rx[i]);
- return 0;
+ rcu_read_unlock();
+ local_bh_enable();
}
void mt76s_deinit(struct mt76_dev *dev)
@@ -318,9 +304,11 @@ void mt76s_deinit(struct mt76_dev *dev)
struct mt76_sdio *sdio = &dev->sdio;
int i;
- kthread_stop(sdio->kthread);
- kthread_stop(sdio->tx_kthread);
mt76s_stop_txrx(dev);
+ if (sdio->txrx_wq) {
+ destroy_workqueue(sdio->txrx_wq);
+ sdio->txrx_wq = NULL;
+ }
sdio_claim_host(sdio->func);
sdio_release_irq(sdio->func);
@@ -348,11 +336,15 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
{
struct mt76_sdio *sdio = &dev->sdio;
- sdio->kthread = kthread_create(mt76s_kthread_run, dev, "mt76s");
- if (IS_ERR(sdio->kthread))
- return PTR_ERR(sdio->kthread);
+ sdio->txrx_wq = alloc_workqueue("mt76s_txrx_wq",
+ WQ_UNBOUND | WQ_HIGHPRI,
+ WQ_UNBOUND_MAX_ACTIVE);
+ if (!sdio->txrx_wq)
+ return -ENOMEM;
INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
+ INIT_WORK(&sdio->tx.status_work, mt76s_tx_work);
+ INIT_WORK(&sdio->rx.net_work, mt76s_rx_work);
mutex_init(&sdio->sched.lock);
dev->queue_ops = &sdio_queue_ops;
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 75bb02cdfdae..883f59c7a7e4 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -29,11 +29,12 @@ void mt76_testmode_tx_pending(struct mt76_dev *dev)
return;
qid = skb_get_queue_mapping(skb);
- q = dev->q_tx[qid].q;
+ q = dev->q_tx[qid];
spin_lock_bh(&q->lock);
- while (td->tx_pending > 0 && q->queued < q->ndesc / 2) {
+ while (td->tx_pending > 0 && td->tx_queued - td->tx_done < 1000 &&
+ q->queued < q->ndesc / 2) {
int ret;
ret = dev->queue_ops->tx_queue_skb(dev, qid, skb_get(skb), wcid, NULL);
@@ -160,7 +161,7 @@ mt76_testmode_tx_start(struct mt76_dev *dev)
td->tx_queued = 0;
td->tx_done = 0;
td->tx_pending = td->tx_count;
- tasklet_schedule(&dev->tx_tasklet);
+ mt76_worker_schedule(&dev->tx_worker);
}
static void
@@ -168,11 +169,11 @@ mt76_testmode_tx_stop(struct mt76_dev *dev)
{
struct mt76_testmode_data *td = &dev->test;
- tasklet_disable(&dev->tx_tasklet);
+ mt76_worker_disable(&dev->tx_worker);
td->tx_pending = 0;
- tasklet_enable(&dev->tx_tasklet);
+ mt76_worker_enable(&dev->tx_worker);
wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 10 * HZ);
@@ -442,9 +443,13 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
mutex_lock(&dev->mutex);
if (tb[MT76_TM_ATTR_STATS]) {
+ err = -EINVAL;
+
a = nla_nest_start(msg, MT76_TM_ATTR_STATS);
- err = mt76_testmode_dump_stats(dev, msg);
- nla_nest_end(msg, a);
+ if (a) {
+ err = mt76_testmode_dump_stats(dev, msg);
+ nla_nest_end(msg, a);
+ }
goto out;
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 3afd89ecd6c9..44ef4bc7a46e 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -5,75 +5,6 @@
#include "mt76.h"
-static struct mt76_txwi_cache *
-mt76_alloc_txwi(struct mt76_dev *dev)
-{
- struct mt76_txwi_cache *t;
- dma_addr_t addr;
- u8 *txwi;
- int size;
-
- size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
- txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
- if (!txwi)
- return NULL;
-
- addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
- DMA_TO_DEVICE);
- t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
- t->dma_addr = addr;
-
- return t;
-}
-
-static struct mt76_txwi_cache *
-__mt76_get_txwi(struct mt76_dev *dev)
-{
- struct mt76_txwi_cache *t = NULL;
-
- spin_lock_bh(&dev->lock);
- if (!list_empty(&dev->txwi_cache)) {
- t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
- list);
- list_del(&t->list);
- }
- spin_unlock_bh(&dev->lock);
-
- return t;
-}
-
-struct mt76_txwi_cache *
-mt76_get_txwi(struct mt76_dev *dev)
-{
- struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
-
- if (t)
- return t;
-
- return mt76_alloc_txwi(dev);
-}
-
-void
-mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- if (!t)
- return;
-
- spin_lock_bh(&dev->lock);
- list_add(&t->list, &dev->txwi_cache);
- spin_unlock_bh(&dev->lock);
-}
-EXPORT_SYMBOL_GPL(mt76_put_txwi);
-
-void mt76_tx_free(struct mt76_dev *dev)
-{
- struct mt76_txwi_cache *t;
-
- while ((t = __mt76_get_txwi(dev)) != NULL)
- dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
- DMA_TO_DEVICE);
-}
-
static int
mt76_txq_get_qid(struct ieee80211_txq *txq)
{
@@ -83,17 +14,27 @@ mt76_txq_get_qid(struct ieee80211_txq *txq)
return txq->ac;
}
-static void
-mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
+void
+mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_txq *txq;
+ struct mt76_txq *mtxq;
+ u8 tid;
- if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
!ieee80211_is_data_present(hdr->frame_control))
return;
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ txq = sta->txq[tid];
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+ if (!mtxq->aggr)
+ return;
+
mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
}
+EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
void
mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
@@ -231,7 +172,32 @@ mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
}
EXPORT_SYMBOL_GPL(mt76_tx_status_check);
-void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
+static void
+mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_wcid *wcid;
+ int pending;
+
+ if (info->tx_time_est)
+ return;
+
+ if (wcid_idx >= ARRAY_SIZE(dev->wcid))
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->wcid[wcid_idx]);
+ if (wcid) {
+ pending = atomic_dec_return(&wcid->non_aql_packets);
+ if (pending < 0)
+ atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
+ }
+
+ rcu_read_unlock();
+}
+
+void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
{
struct ieee80211_hw *hw;
struct sk_buff_head list;
@@ -244,6 +210,8 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
}
#endif
+ mt76_tx_check_non_aql(dev, wcid_idx, skb);
+
if (!skb->prev) {
hw = mt76_tx_status_get_hw(dev, skb);
ieee80211_free_txskb(hw, skb);
@@ -256,6 +224,32 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
+static int
+__mt76_tx_queue_skb(struct mt76_dev *dev, int qid, struct sk_buff *skb,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ bool *stop)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_queue *q;
+ bool non_aql;
+ int pending;
+ int idx;
+
+ non_aql = !info->tx_time_est;
+ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
+ if (idx < 0 || !sta || !non_aql)
+ return idx;
+
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ q = dev->q_tx[qid];
+ q->entry[idx].wcid = wcid->idx;
+ pending = atomic_inc_return(&wcid->non_aql_packets);
+ if (stop && pending >= MT_MAX_NON_AQL_PKT)
+ *stop = true;
+
+ return idx;
+}
+
void
mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
@@ -288,26 +282,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
- if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
- struct ieee80211_txq *txq;
- struct mt76_txq *mtxq;
- u8 tid;
-
- tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- txq = sta->txq[tid];
- mtxq = (struct mt76_txq *)txq->drv_priv;
-
- if (mtxq->aggr)
- mt76_check_agg_ssn(mtxq, skb);
- }
-
if (ext_phy)
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
- q = dev->q_tx[qid].q;
+ q = dev->q_tx[qid];
spin_lock_bh(&q->lock);
- dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
+ __mt76_tx_queue_skb(dev, qid, skb, wcid, sta, NULL);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8 && !q->stopped) {
@@ -320,23 +301,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_tx);
static struct sk_buff *
-mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq, bool ps)
+mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_tx_info *info;
bool ext_phy = phy != &phy->dev->phy;
struct sk_buff *skb;
- skb = skb_dequeue(&mtxq->retry_q);
- if (skb) {
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
-
- if (ps && skb_queue_empty(&mtxq->retry_q))
- ieee80211_sta_set_buffered(txq->sta, tid, false);
-
- return skb;
- }
-
skb = ieee80211_tx_dequeue(phy->hw, txq);
if (!skb)
return NULL;
@@ -361,7 +332,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
IEEE80211_TX_CTL_REQ_TX_STATUS;
mt76_skb_set_moredata(skb, !last);
- dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
+ __mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta, NULL);
}
void
@@ -373,7 +344,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
struct sk_buff *last_skb = NULL;
- struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
+ struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD];
int i;
spin_lock_bh(&hwq->lock);
@@ -386,13 +357,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
continue;
do {
- skb = mt76_txq_dequeue(phy, mtxq, true);
+ skb = mt76_txq_dequeue(phy, mtxq);
if (!skb)
break;
- if (mtxq->aggr)
- mt76_check_agg_ssn(mtxq, skb);
-
nframes--;
if (last_skb)
mt76_queue_ps_skb(dev, sta, last_skb, false);
@@ -413,26 +381,26 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
-mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq,
+mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
struct mt76_txq *mtxq)
{
struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
struct mt76_wcid *wcid = mtxq->wcid;
- struct mt76_queue *hwq = sq->q;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- int n_frames = 1, limit;
- struct ieee80211_tx_rate tx_rate;
- bool ampdu;
- bool probe;
+ int n_frames = 1;
+ bool stop = false;
int idx;
if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
return 0;
- skb = mt76_txq_dequeue(phy, mtxq, false);
+ if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
+ return 0;
+
+ skb = mt76_txq_dequeue(phy, mtxq);
if (!skb)
return 0;
@@ -440,62 +408,39 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq,
if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
- tx_rate = info->control.rates[0];
-
- probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
- ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
- limit = ampdu ? 16 : 3;
-
- if (ampdu)
- mt76_check_agg_ssn(mtxq, skb);
-
- idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
+ idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop);
if (idx < 0)
return idx;
do {
- bool cur_ampdu;
+ if (test_bit(MT76_STATE_PM, &phy->state) ||
+ test_bit(MT76_RESET, &phy->state))
+ return -EBUSY;
- if (probe)
+ if (stop)
break;
- if (test_bit(MT76_RESET, &phy->state))
- return -EBUSY;
+ if (q->queued + MT_TXQ_FREE_THR >= q->ndesc)
+ break;
- skb = mt76_txq_dequeue(phy, mtxq, false);
+ skb = mt76_txq_dequeue(phy, mtxq);
if (!skb)
break;
info = IEEE80211_SKB_CB(skb);
- cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
-
- if (ampdu != cur_ampdu ||
- (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
- skb_queue_tail(&mtxq->retry_q, skb);
- break;
- }
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
+ ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
+ info->control.rates, 1);
- info->control.rates[0] = tx_rate;
-
- if (cur_ampdu)
- mt76_check_agg_ssn(mtxq, skb);
-
- idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
- txq->sta);
+ idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop);
if (idx < 0)
- return idx;
+ break;
n_frames++;
- } while (n_frames < limit);
-
- if (!probe) {
- hwq->entry[idx].qid = sq - dev->q_tx;
- hwq->entry[idx].schedule = true;
- sq->swq_queued++;
- }
+ } while (1);
- dev->queue_ops->kick(dev, hwq);
+ dev->queue_ops->kick(dev, q);
return n_frames;
}
@@ -504,23 +449,23 @@ static int
mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{
struct mt76_dev *dev = phy->dev;
- struct mt76_sw_queue *sq = &dev->q_tx[qid];
- struct mt76_queue *hwq = sq->q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct ieee80211_txq *txq;
struct mt76_txq *mtxq;
struct mt76_wcid *wcid;
int ret = 0;
- spin_lock_bh(&hwq->lock);
+ spin_lock_bh(&q->lock);
while (1) {
- if (sq->swq_queued >= 4)
- break;
-
- if (test_bit(MT76_RESET, &phy->state)) {
+ if (test_bit(MT76_STATE_PM, &phy->state) ||
+ test_bit(MT76_RESET, &phy->state)) {
ret = -EBUSY;
break;
}
+ if (q->queued + MT_TXQ_FREE_THR >= q->ndesc)
+ break;
+
txq = ieee80211_next_txq(phy->hw, qid);
if (!txq)
break;
@@ -538,32 +483,26 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
u8 tid = txq->tid;
mtxq->send_bar = false;
- spin_unlock_bh(&hwq->lock);
+ spin_unlock_bh(&q->lock);
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
- spin_lock_bh(&hwq->lock);
+ spin_lock_bh(&q->lock);
}
- ret += mt76_txq_send_burst(phy, sq, mtxq);
- ieee80211_return_txq(phy->hw, txq,
- !skb_queue_empty(&mtxq->retry_q));
+ ret += mt76_txq_send_burst(phy, q, mtxq);
+ ieee80211_return_txq(phy->hw, txq, false);
}
- spin_unlock_bh(&hwq->lock);
+ spin_unlock_bh(&q->lock);
return ret;
}
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{
- struct mt76_dev *dev = phy->dev;
- struct mt76_sw_queue *sq = &dev->q_tx[qid];
int len;
if (qid >= 4)
return;
- if (sq->swq_queued >= 4)
- return;
-
rcu_read_lock();
do {
@@ -585,9 +524,9 @@ void mt76_txq_schedule_all(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
-void mt76_tx_tasklet(unsigned long data)
+void mt76_tx_worker(struct mt76_worker *w)
{
- struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
mt76_txq_schedule_all(&dev->phy);
if (dev->phy2)
@@ -612,8 +551,8 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
if (!txq)
continue;
+ hwq = dev->q_tx[mt76_txq_get_qid(txq)];
mtxq = (struct mt76_txq *)txq->drv_priv;
- hwq = mtxq->swq->q;
spin_lock_bh(&hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar;
@@ -630,38 +569,10 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
if (!test_bit(MT76_STATE_RUNNING, &phy->state))
return;
- tasklet_schedule(&dev->tx_tasklet);
+ mt76_worker_schedule(&dev->tx_worker);
}
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
-void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
-{
- struct ieee80211_hw *hw;
- struct mt76_txq *mtxq;
- struct sk_buff *skb;
-
- if (!txq)
- return;
-
- mtxq = (struct mt76_txq *)txq->drv_priv;
-
- while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) {
- hw = mt76_tx_status_get_hw(dev, skb);
- ieee80211_free_txskb(hw, skb);
- }
-}
-EXPORT_SYMBOL_GPL(mt76_txq_remove);
-
-void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
-{
- struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
-
- skb_queue_head_init(&mtxq->retry_q);
-
- mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
-}
-EXPORT_SYMBOL_GPL(mt76_txq_init);
-
u8 mt76_ac_to_hwq(u8 ac)
{
static const u8 wmm_queue_map[] = {
@@ -678,13 +589,9 @@ u8 mt76_ac_to_hwq(u8 ac)
}
EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
-int mt76_skb_adjust_pad(struct sk_buff *skb)
+int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
{
struct sk_buff *iter, *last = skb;
- u32 pad;
-
- /* Add zero pad of 4 - 7 bytes */
- pad = round_up(skb->len, 4) + 4 - skb->len;
/* First packet of a A-MSDU burst keeps track of the whole burst
* length, need to update length of it and the last packet.
@@ -706,3 +613,16 @@ int mt76_skb_adjust_pad(struct sk_buff *skb)
return 0;
}
EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
+
+void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e)
+{
+ if (e->skb)
+ dev->drv->tx_complete_skb(dev, e);
+
+ spin_lock_bh(&q->lock);
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+ spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index dcab5993763a..7d3f0a2e5fa0 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -497,8 +497,8 @@ mt76u_get_next_rx_entry(struct mt76_queue *q)
spin_lock_irqsave(&q->lock, flags);
if (q->queued > 0) {
- urb = q->entry[q->head].urb;
- q->head = (q->head + 1) % q->ndesc;
+ urb = q->entry[q->tail].urb;
+ q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
}
spin_unlock_irqrestore(&q->lock, flags);
@@ -616,16 +616,16 @@ static void mt76u_complete_rx(struct urb *urb)
default:
dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
urb->status);
- /* fall through */
+ fallthrough;
case 0:
break;
}
spin_lock_irqsave(&q->lock, flags);
- if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
+ if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
goto out;
- q->tail = (q->tail + 1) % q->ndesc;
+ q->head = (q->head + 1) % q->ndesc;
q->queued++;
tasklet_schedule(&dev->usb.rx_tasklet);
out:
@@ -792,43 +792,27 @@ int mt76u_resume_rx(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76u_resume_rx);
-static void mt76u_tx_tasklet(unsigned long data)
+static void mt76u_tx_worker(struct mt76_worker *w)
{
- struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
struct mt76_queue_entry entry;
- struct mt76_sw_queue *sq;
struct mt76_queue *q;
bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 n_dequeued = 0, n_sw_dequeued = 0;
-
- sq = &dev->q_tx[i];
- q = sq->q;
+ q = dev->q_tx[i];
- while (q->queued > n_dequeued) {
- if (!q->entry[q->head].done)
+ while (q->queued > 0) {
+ if (!q->entry[q->tail].done)
break;
- if (q->entry[q->head].schedule) {
- q->entry[q->head].schedule = false;
- n_sw_dequeued++;
- }
-
- entry = q->entry[q->head];
- q->entry[q->head].done = false;
- q->head = (q->head + 1) % q->ndesc;
- n_dequeued++;
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
- dev->drv->tx_complete_skb(dev, i, &entry);
+ mt76_queue_tx_complete(dev, q, &entry);
}
- spin_lock_bh(&q->lock);
-
- sq->swq_queued -= n_sw_dequeued;
- q->queued -= n_dequeued;
-
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
@@ -836,8 +820,6 @@ static void mt76u_tx_tasklet(unsigned long data)
if (!q->queued)
wake_up(&dev->tx_wait);
- spin_unlock_bh(&q->lock);
-
mt76_txq_schedule(&dev->phy, i);
if (dev->drv->tx_status_data &&
@@ -882,7 +864,7 @@ static void mt76u_complete_tx(struct urb *urb)
dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
e->done = true;
- tasklet_schedule(&dev->tx_tasklet);
+ mt76_worker_schedule(&dev->tx_worker);
}
static int
@@ -909,11 +891,11 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_tx_info tx_info = {
.skb = skb,
};
- u16 idx = q->tail;
+ u16 idx = q->head;
int err;
if (q->queued == q->ndesc)
@@ -932,7 +914,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
q->entry[idx].urb, mt76u_complete_tx,
&q->entry[idx]);
- q->tail = (q->tail + 1) % q->ndesc;
+ q->head = (q->head + 1) % q->ndesc;
q->entry[idx].skb = tx_info.skb;
q->queued++;
@@ -944,7 +926,7 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
struct urb *urb;
int err;
- while (q->first != q->tail) {
+ while (q->first != q->head) {
urb = q->entry[q->first].urb;
trace_submit_urb(dev, urb);
@@ -987,10 +969,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
int i, j, err;
for (i = 0; i <= MT_TXQ_PSD; i++) {
- INIT_LIST_HEAD(&dev->q_tx[i].swq);
-
if (i >= IEEE80211_NUM_ACS) {
- dev->q_tx[i].q = dev->q_tx[0].q;
+ dev->q_tx[i] = dev->q_tx[0];
continue;
}
@@ -1000,7 +980,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
spin_lock_init(&q->lock);
q->hw_idx = mt76u_ac_to_hwq(dev, i);
- dev->q_tx[i].q = q;
+ dev->q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
@@ -1027,7 +1007,7 @@ static void mt76u_free_tx(struct mt76_dev *dev)
struct mt76_queue *q;
int j;
- q = dev->q_tx[i].q;
+ q = dev->q_tx[i];
if (!q)
continue;
@@ -1040,6 +1020,8 @@ void mt76u_stop_tx(struct mt76_dev *dev)
{
int ret;
+ mt76_worker_disable(&dev->tx_worker);
+
ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
HZ / 5);
if (!ret) {
@@ -1050,7 +1032,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
dev_err(dev->dev, "timed out waiting for pending tx\n");
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = dev->q_tx[i].q;
+ q = dev->q_tx[i];
if (!q)
continue;
@@ -1058,32 +1040,26 @@ void mt76u_stop_tx(struct mt76_dev *dev)
usb_kill_urb(q->entry[j].urb);
}
- tasklet_kill(&dev->tx_tasklet);
-
/* On device removal we maight queue skb's, but mt76u_tx_kick()
* will fail to submit urb, cleanup those skb's manually.
*/
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = dev->q_tx[i].q;
+ q = dev->q_tx[i];
if (!q)
continue;
- /* Assure we are in sync with killed tasklet. */
- spin_lock_bh(&q->lock);
- while (q->queued) {
- entry = q->entry[q->head];
- q->head = (q->head + 1) % q->ndesc;
- q->queued--;
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
- dev->drv->tx_complete_skb(dev, i, &entry);
- }
- spin_unlock_bh(&q->lock);
+ mt76_queue_tx_complete(dev, q, &entry);
}
}
cancel_work_sync(&dev->usb.stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
+ mt76_worker_enable(&dev->tx_worker);
+
mt76_tx_status_check(dev, NULL, true);
}
EXPORT_SYMBOL_GPL(mt76u_stop_tx);
@@ -1133,8 +1109,8 @@ int mt76u_init(struct mt76_dev *dev,
mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
+ dev->tx_worker.fn = mt76u_tx_worker;
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
- tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index f53bb4ae5001..581964425468 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -110,4 +110,32 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy)
}
EXPORT_SYMBOL_GPL(mt76_get_min_avg_rssi);
+int __mt76_worker_fn(void *ptr)
+{
+ struct mt76_worker *w = ptr;
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_park()) {
+ kthread_parkme();
+ continue;
+ }
+
+ if (!test_and_clear_bit(MT76_WORKER_SCHEDULED, &w->state)) {
+ schedule();
+ continue;
+ }
+
+ set_bit(MT76_WORKER_RUNNING, &w->state);
+ set_current_state(TASK_RUNNING);
+ w->fn(w);
+ cond_resched();
+ clear_bit(MT76_WORKER_RUNNING, &w->state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__mt76_worker_fn);
+
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
index fd1a68820e0a..1c363ea9ab9c 100644
--- a/drivers/net/wireless/mediatek/mt76/util.h
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -10,6 +10,19 @@
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <net/mac80211.h>
+
+struct mt76_worker
+{
+ struct task_struct *task;
+ void (*fn)(struct mt76_worker *);
+ unsigned long state;
+};
+
+enum {
+ MT76_WORKER_SCHEDULED,
+ MT76_WORKER_RUNNING,
+};
#define MT76_INCR(_var, _size) \
(_var = (((_var) + 1) % (_size)))
@@ -45,4 +58,67 @@ mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
}
+int __mt76_worker_fn(void *ptr);
+
+static inline int
+mt76_worker_setup(struct ieee80211_hw *hw, struct mt76_worker *w,
+ void (*fn)(struct mt76_worker *),
+ const char *name)
+{
+ const char *dev_name = wiphy_name(hw->wiphy);
+ int ret;
+
+ if (fn)
+ w->fn = fn;
+ w->task = kthread_create(__mt76_worker_fn, w, "mt76-%s %s",
+ name, dev_name);
+
+ ret = PTR_ERR_OR_ZERO(w->task);
+ if (ret) {
+ w->task = NULL;
+ return ret;
+ }
+
+ wake_up_process(w->task);
+
+ return 0;
+}
+
+static inline void mt76_worker_schedule(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ if (!test_and_set_bit(MT76_WORKER_SCHEDULED, &w->state) &&
+ !test_bit(MT76_WORKER_RUNNING, &w->state))
+ wake_up_process(w->task);
+}
+
+static inline void mt76_worker_disable(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ kthread_park(w->task);
+ WRITE_ONCE(w->state, 0);
+}
+
+static inline void mt76_worker_enable(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ kthread_unpark(w->task);
+ mt76_worker_schedule(w);
+}
+
+static inline void mt76_worker_teardown(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ kthread_stop(w->task);
+ w->task = NULL;
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
index 300242bce799..20669eacb66e 100644
--- a/drivers/net/wireless/mediatek/mt7601u/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -30,7 +30,7 @@ mt76_reg_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
static int
-mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
+mt7601u_ampdu_stat_show(struct seq_file *file, void *data)
{
struct mt7601u_dev *dev = file->private;
int i, j;
@@ -73,21 +73,10 @@ mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt7601u_ampdu_stat_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt7601u_ampdu_stat_read, inode->i_private);
-}
-
-static const struct file_operations fops_ampdu_stat = {
- .open = mt7601u_ampdu_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mt7601u_ampdu_stat);
static int
-mt7601u_eeprom_param_read(struct seq_file *file, void *data)
+mt7601u_eeprom_param_show(struct seq_file *file, void *data)
{
struct mt7601u_dev *dev = file->private;
struct mt7601u_rate_power *rp = &dev->ee->power_rate_table;
@@ -131,18 +120,7 @@ mt7601u_eeprom_param_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt7601u_eeprom_param_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt7601u_eeprom_param_read, inode->i_private);
-}
-
-static const struct file_operations fops_eeprom_param = {
- .open = mt7601u_eeprom_param_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mt7601u_eeprom_param);
void mt7601u_init_debugfs(struct mt7601u_dev *dev)
{
@@ -157,6 +135,6 @@ void mt7601u_init_debugfs(struct mt7601u_dev *dev)
debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg);
debugfs_create_file("regval", 0600, dir, dev, &fops_regval);
- debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
- debugfs_create_file("eeprom_param", 0400, dir, dev, &fops_eeprom_param);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt7601u_ampdu_stat_fops);
+ debugfs_create_file("eeprom_param", 0400, dir, dev, &mt7601u_eeprom_param_fops);
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index f6a0454abe04..09f931d4598c 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -196,7 +196,7 @@ static void mt7601u_complete_rx(struct urb *urb)
default:
dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
urb->status);
- /* fall through */
+ fallthrough;
case 0:
break;
}
@@ -241,7 +241,7 @@ static void mt7601u_complete_tx(struct urb *urb)
default:
dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
urb->status);
- /* fall through */
+ fallthrough;
case 0:
break;
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
index cad5e81fcf77..d2ee1aaa3c81 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mac.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -45,7 +45,7 @@ mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
return;
case MT_PHY_TYPE_HT_GF:
txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_HT:
txrate->flags |= IEEE80211_TX_RC_MCS;
txrate->idx = idx;
@@ -419,7 +419,7 @@ mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
return;
case MT_PHY_TYPE_HT_GF:
status->enc_flags |= RX_ENC_FLAG_HT_GF;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_HT:
status->encoding = RX_ENC_HT;
status->rate_idx = idx;
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index d863ab4a66c9..28db24a2b5e5 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -787,7 +787,7 @@ mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate)
switch (phy_mode) {
case MT_PHY_TYPE_OFDM:
tx_rate += 4;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_CCK:
reg = dev->rf_pa_mode[0];
break;
@@ -1210,7 +1210,7 @@ void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path)
/**
* mt7601u_set_tx_dac - set which tx DAC to use
* @dev: pointer to adapter structure
- * @path: DAC index, values are 0-based
+ * @dac: DAC index, values are 0-based
*/
void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 dac)
{
diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
index 358ac8601333..b5a1b65c087c 100644
--- a/drivers/net/wireless/microchip/wilc1000/mon.c
+++ b/drivers/net/wireless/microchip/wilc1000/mon.c
@@ -235,11 +235,10 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
if (register_netdevice(wl->monitor_dev)) {
netdev_err(real_dev, "register_netdevice failed\n");
+ free_netdev(wl->monitor_dev);
return NULL;
}
priv = netdev_priv(wl->monitor_dev);
- if (!priv)
- return NULL;
priv->real_ndev = real_dev;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 3ece7b0b0392..351ff909ab1c 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -149,9 +149,10 @@ static int wilc_sdio_probe(struct sdio_func *func,
wilc->dev = &func->dev;
wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc");
- if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
+ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
+ kfree(sdio_priv);
return -EPROBE_DEFER;
- else if (!IS_ERR(wilc->rtc_clk))
+ } else if (!IS_ERR(wilc->rtc_clk))
clk_prepare_enable(wilc->rtc_clk);
dev_info(&func->dev, "Driver Initializing success\n");
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 3f19e3f38a39..a18dac0aa6b6 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -112,9 +112,10 @@ static int wilc_bus_probe(struct spi_device *spi)
wilc->dev_irq_num = spi->irq;
wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk");
- if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
+ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
+ kfree(spi_priv);
return -EPROBE_DEFER;
- else if (!IS_ERR(wilc->rtc_clk))
+ } else if (!IS_ERR(wilc->rtc_clk))
clk_prepare_enable(wilc->rtc_clk);
return 0;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index f40d8c3c3d9e..f3ccbd2b1084 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -869,6 +869,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
default:
pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid,
vif->vifid, vif->wdev.iftype);
+ dev_kfree_skb(cmd_skb);
ret = -EINVAL;
goto out;
}
@@ -1924,6 +1925,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
break;
default:
pr_err("unsupported iftype %d\n", vif->wdev.iftype);
+ dev_kfree_skb(cmd_skb);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index e013ebe3079c..bf6dbeb61842 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -15,7 +15,6 @@
#include "util.h"
#include "switchdev.h"
-#define QTNF_DMP_MAX_LEN 48
#define QTNF_PRIMARY_VIF_IDX 0
static bool slave_radar = true;
@@ -140,34 +139,13 @@ static void qtnf_netdev_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *stats)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
- unsigned int start;
- int cpu;
netdev_stats_to_stats64(stats, &ndev->stats);
if (!vif->stats64)
return;
- for_each_possible_cpu(cpu) {
- struct pcpu_sw_netstats *stats64;
- u64 rx_packets, rx_bytes;
- u64 tx_packets, tx_bytes;
-
- stats64 = per_cpu_ptr(vif->stats64, cpu);
-
- do {
- start = u64_stats_fetch_begin_irq(&stats64->syncp);
- rx_packets = stats64->rx_packets;
- rx_bytes = stats64->rx_bytes;
- tx_packets = stats64->tx_packets;
- tx_bytes = stats64->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
- stats->tx_packets += tx_packets;
- stats->tx_bytes += tx_bytes;
- }
+ dev_fetch_sw_netstats(stats, vif->stats64);
}
/* Netdev handler for transmission timeout.
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index eb67b66b846b..9a20c0f29078 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -1091,9 +1091,9 @@ fw_load_exit:
put_device(&pdev->dev);
}
-static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data)
+static void qtnf_pearl_reclaim_tasklet_fn(struct tasklet_struct *t)
{
- struct qtnf_pcie_pearl_state *ps = (void *)data;
+ struct qtnf_pcie_pearl_state *ps = from_tasklet(ps, t, base.reclaim_tq);
qtnf_pearl_data_tx_reclaim(ps);
qtnf_en_txdone_irq(ps);
@@ -1145,8 +1145,7 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
return ret;
}
- tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn,
- (unsigned long)ps);
+ tasklet_setup(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn);
netif_napi_add(&bus->mux_dev, &bus->mux_napi,
qtnf_pcie_pearl_rx_poll, 10);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index d1b850aa4657..4b87d3151017 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -1105,9 +1105,9 @@ fw_load_exit:
put_device(&pdev->dev);
}
-static void qtnf_reclaim_tasklet_fn(unsigned long data)
+static void qtnf_reclaim_tasklet_fn(struct tasklet_struct *t)
{
- struct qtnf_pcie_topaz_state *ts = (void *)data;
+ struct qtnf_pcie_topaz_state *ts = from_tasklet(ts, t, base.reclaim_tq);
qtnf_topaz_data_tx_reclaim(ts);
}
@@ -1158,8 +1158,7 @@ static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus,
return ret;
}
- tasklet_init(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn,
- (unsigned long)ts);
+ tasklet_setup(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn);
netif_napi_add(&bus->mux_dev, &bus->mux_napi,
qtnf_topaz_rx_poll, 10);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index c1ac933349d1..8f860c14da58 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1291,7 +1291,7 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
break;
case 2: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Fall through - this is a failed frame! */
+ fallthrough; /* this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
@@ -1319,9 +1319,10 @@ static inline void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
-static void rt2400pci_txstatus_tasklet(unsigned long data)
+static void rt2400pci_txstatus_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ txstatus_tasklet);
u32 reg;
/*
@@ -1347,17 +1348,18 @@ static void rt2400pci_txstatus_tasklet(unsigned long data)
}
}
-static void rt2400pci_tbtt_tasklet(unsigned long data)
+static void rt2400pci_tbtt_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet);
rt2x00lib_beacondone(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
}
-static void rt2400pci_rxdone_tasklet(unsigned long data)
+static void rt2400pci_rxdone_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ rxdone_tasklet);
if (rt2x00mmio_rxdone(rt2x00dev))
tasklet_schedule(&rt2x00dev->rxdone_tasklet);
else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index 0859adebd860..e940443c52ad 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -1419,7 +1419,7 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
break;
case 2: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Fall through - this is a failed frame! */
+ fallthrough; /* this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
@@ -1447,9 +1447,10 @@ static inline void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
-static void rt2500pci_txstatus_tasklet(unsigned long data)
+static void rt2500pci_txstatus_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ txstatus_tasklet);
u32 reg;
/*
@@ -1475,17 +1476,18 @@ static void rt2500pci_txstatus_tasklet(unsigned long data)
}
}
-static void rt2500pci_tbtt_tasklet(unsigned long data)
+static void rt2500pci_tbtt_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet);
rt2x00lib_beacondone(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
}
-static void rt2500pci_rxdone_tasklet(unsigned long data)
+static void rt2500pci_rxdone_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ rxdone_tasklet);
if (rt2x00mmio_rxdone(rt2x00dev))
tasklet_schedule(&rt2x00dev->rxdone_tasklet);
else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index a779fe771a55..fed6d21cd6ce 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -928,7 +928,7 @@ static void rt2800_rate_from_status(struct skb_frame_desc *skbdesc,
switch (rt2x00_get_field32(status, TX_STA_FIFO_PHYMODE)) {
case RATE_MODE_HT_GREENFIELD:
flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
+ fallthrough;
case RATE_MODE_HT_MIX:
flags |= IEEE80211_TX_RC_MCS;
break;
@@ -2567,7 +2567,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.tx_chain_num) {
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
- /* fall through */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
break;
@@ -2576,7 +2576,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.rx_chain_num) {
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
- /* fall through */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
break;
@@ -2768,10 +2768,10 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
- /* fallthrough */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
- /* fallthrough */
+ fallthrough;
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
break;
@@ -2780,10 +2780,10 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
- /* fallthrough */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
- /* fallthrough */
+ fallthrough;
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
break;
@@ -3005,10 +3005,10 @@ static void rt2800_config_channel_rf3853(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
- /* fallthrough */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
- /* fallthrough */
+ fallthrough;
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
break;
@@ -3017,10 +3017,10 @@ static void rt2800_config_channel_rf3853(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
- /* fallthrough */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
- /* fallthrough */
+ fallthrough;
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
break;
@@ -4216,14 +4216,14 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rf->channel > 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN,
rf->channel <= 14);
- /* fall-through */
+ fallthrough;
case 2:
/* Turn on secondary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN,
rf->channel > 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN,
rf->channel <= 14);
- /* fall-through */
+ fallthrough;
case 1:
/* Turn on primary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
@@ -4241,12 +4241,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
/* Turn on tertiary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
- /* fall-through */
+ fallthrough;
case 2:
/* Turn on secondary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
- /* fall-through */
+ fallthrough;
case 1:
/* Turn on primary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
@@ -5438,10 +5438,10 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN, 1);
- /* fall through */
+ fallthrough;
case 2:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
- /* fall through */
+ fallthrough;
case 1:
default:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
@@ -5451,10 +5451,10 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A2_EN, 1);
- /* fall through */
+ fallthrough;
case 2:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
- /* fall through */
+ fallthrough;
case 1:
default:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, 1);
@@ -10100,10 +10100,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
switch (rx_chains) {
case 3:
spec->ht.mcs.rx_mask[2] = 0xff;
- /* fall through */
+ fallthrough;
case 2:
spec->ht.mcs.rx_mask[1] = 0xff;
- /* fall through */
+ fallthrough;
case 1:
spec->ht.mcs.rx_mask[0] = 0xff;
spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index 110bb391c372..862098f753d2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -210,18 +210,19 @@ static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
-void rt2800mmio_pretbtt_tasklet(unsigned long data)
+void rt2800mmio_pretbtt_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ pretbtt_tasklet);
rt2x00lib_pretbtt(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
}
EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
-void rt2800mmio_tbtt_tasklet(unsigned long data)
+void rt2800mmio_tbtt_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet);
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u32 reg;
@@ -254,9 +255,10 @@ void rt2800mmio_tbtt_tasklet(unsigned long data)
}
EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
-void rt2800mmio_rxdone_tasklet(unsigned long data)
+void rt2800mmio_rxdone_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ rxdone_tasklet);
if (rt2x00mmio_rxdone(rt2x00dev))
tasklet_schedule(&rt2x00dev->rxdone_tasklet);
else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
@@ -264,9 +266,10 @@ void rt2800mmio_rxdone_tasklet(unsigned long data)
}
EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
-void rt2800mmio_autowake_tasklet(unsigned long data)
+void rt2800mmio_autowake_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ autowake_tasklet);
rt2800mmio_wakeup(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2800mmio_enable_interrupt(rt2x00dev,
@@ -307,9 +310,10 @@ static void rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
}
-void rt2800mmio_txstatus_tasklet(unsigned long data)
+void rt2800mmio_txstatus_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ txstatus_tasklet);
rt2800_txdone(rt2x00dev, 16);
@@ -593,7 +597,6 @@ void rt2800mmio_queue_init(struct data_queue *queue)
break;
case QID_ATIM:
- /* fallthrough */
default:
BUG();
break;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
index adcd9d54ac1c..05708950f24d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
@@ -126,11 +126,11 @@ void rt2800mmio_fill_rxdone(struct queue_entry *entry,
struct rxdone_entry_desc *rxdesc);
/* Interrupt functions */
-void rt2800mmio_txstatus_tasklet(unsigned long data);
-void rt2800mmio_pretbtt_tasklet(unsigned long data);
-void rt2800mmio_tbtt_tasklet(unsigned long data);
-void rt2800mmio_rxdone_tasklet(unsigned long data);
-void rt2800mmio_autowake_tasklet(unsigned long data);
+void rt2800mmio_txstatus_tasklet(struct tasklet_struct *t);
+void rt2800mmio_pretbtt_tasklet(struct tasklet_struct *t);
+void rt2800mmio_tbtt_tasklet(struct tasklet_struct *t);
+void rt2800mmio_rxdone_tasklet(struct tasklet_struct *t);
+void rt2800mmio_autowake_tasklet(struct tasklet_struct *t);
irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance);
void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 4cc64fe481a7..d08b251ec5a2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -746,7 +746,6 @@ static void rt2800usb_queue_init(struct data_queue *queue)
break;
case QID_ATIM:
- /* fallthrough */
default:
BUG();
break;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index ecc60d8cbb01..780be81863b6 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -518,11 +518,11 @@ struct rt2x00lib_ops {
/*
* TX status tasklet handler.
*/
- void (*txstatus_tasklet) (unsigned long data);
- void (*pretbtt_tasklet) (unsigned long data);
- void (*tbtt_tasklet) (unsigned long data);
- void (*rxdone_tasklet) (unsigned long data);
- void (*autowake_tasklet) (unsigned long data);
+ void (*txstatus_tasklet) (struct tasklet_struct *t);
+ void (*pretbtt_tasklet) (struct tasklet_struct *t);
+ void (*tbtt_tasklet) (struct tasklet_struct *t);
+ void (*rxdone_tasklet) (struct tasklet_struct *t);
+ void (*autowake_tasklet) (struct tasklet_struct *t);
/*
* Device init handlers.
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 8c6d3099b19d..b04f76551ca4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1167,9 +1167,8 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
*/
#define RT2X00_TASKLET_INIT(taskletname) \
if (rt2x00dev->ops->lib->taskletname) { \
- tasklet_init(&rt2x00dev->taskletname, \
- rt2x00dev->ops->lib->taskletname, \
- (unsigned long)rt2x00dev); \
+ tasklet_setup(&rt2x00dev->taskletname, \
+ rt2x00dev->ops->lib->taskletname); \
}
RT2X00_TASKLET_INIT(txstatus_tasklet);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index eefce76fc99b..02da5dd37ddd 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -2130,7 +2130,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
case 6: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Fall through - this is a failed frame! */
+ fallthrough; /* this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
@@ -2190,34 +2190,38 @@ static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
-static void rt61pci_txstatus_tasklet(unsigned long data)
+static void rt61pci_txstatus_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ txstatus_tasklet);
+
rt61pci_txdone(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE);
}
-static void rt61pci_tbtt_tasklet(unsigned long data)
+static void rt61pci_tbtt_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet);
rt2x00lib_beacondone(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE);
}
-static void rt61pci_rxdone_tasklet(unsigned long data)
+static void rt61pci_rxdone_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ rxdone_tasklet);
if (rt2x00mmio_rxdone(rt2x00dev))
tasklet_schedule(&rt2x00dev->rxdone_tasklet);
else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
}
-static void rt61pci_autowake_tasklet(unsigned long data)
+static void rt61pci_autowake_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ autowake_tasklet);
rt61pci_wakeup(rt2x00dev);
rt2x00mmio_register_write(rt2x00dev,
M2H_CMD_DONE_CSR, 0xffffffff);
@@ -2953,7 +2957,6 @@ static void rt61pci_queue_init(struct data_queue *queue)
break;
case QID_ATIM:
- /* fallthrough */
default:
BUG();
break;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index e908c303b677..e69793773d87 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -2373,7 +2373,6 @@ static void rt73usb_queue_init(struct data_queue *queue)
break;
case QID_ATIM:
- /* fallthrough */
default:
BUG();
break;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index ba3286f732cc..2477e18c7cae 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -260,20 +260,20 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
if (unlikely(!new_skb))
goto done;
- mapping = pci_map_single(priv->pdev,
- skb_tail_pointer(new_skb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&priv->pdev->dev,
+ skb_tail_pointer(new_skb),
+ MAX_RX_SIZE, DMA_FROM_DEVICE);
- if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ if (dma_mapping_error(&priv->pdev->dev, mapping)) {
kfree_skb(new_skb);
dev_err(&priv->pdev->dev, "RX DMA map error\n");
goto done;
}
- pci_unmap_single(priv->pdev,
+ dma_unmap_single(&priv->pdev->dev,
*((dma_addr_t *)skb->cb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ MAX_RX_SIZE, DMA_FROM_DEVICE);
skb_put(skb, flags & 0xFFF);
rx_status.antenna = (flags2 >> 15) & 1;
@@ -355,8 +355,8 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
ring->idx = (ring->idx + 1) % ring->entries;
skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pdev->dev, le32_to_cpu(entry->tx_buf),
+ skb->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
@@ -473,10 +473,10 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
prio = skb_get_queue_mapping(skb);
ring = &priv->tx_ring[prio];
- mapping = pci_map_single(priv->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
- if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ if (dma_mapping_error(&priv->pdev->dev, mapping)) {
kfree_skb(skb);
dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
return;
@@ -1004,8 +1004,9 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
else
priv->rx_ring_sz = sizeof(struct rtl8180_rx_desc);
- priv->rx_ring = pci_zalloc_consistent(priv->pdev, priv->rx_ring_sz * 32,
- &priv->rx_ring_dma);
+ priv->rx_ring = dma_alloc_coherent(&priv->pdev->dev,
+ priv->rx_ring_sz * 32,
+ &priv->rx_ring_dma, GFP_KERNEL);
if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
wiphy_err(dev->wiphy, "Cannot allocate RX ring\n");
return -ENOMEM;
@@ -1018,20 +1019,23 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
dma_addr_t *mapping;
entry = priv->rx_ring + priv->rx_ring_sz*i;
if (!skb) {
- pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&priv->pdev->dev,
+ priv->rx_ring_sz * 32,
+ priv->rx_ring, priv->rx_ring_dma);
wiphy_err(dev->wiphy, "Cannot allocate RX skb\n");
return -ENOMEM;
}
priv->rx_buf[i] = skb;
mapping = (dma_addr_t *)skb->cb;
- *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ *mapping = dma_map_single(&priv->pdev->dev,
+ skb_tail_pointer(skb), MAX_RX_SIZE,
+ DMA_FROM_DEVICE);
- if (pci_dma_mapping_error(priv->pdev, *mapping)) {
+ if (dma_mapping_error(&priv->pdev->dev, *mapping)) {
kfree_skb(skb);
- pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&priv->pdev->dev,
+ priv->rx_ring_sz * 32,
+ priv->rx_ring, priv->rx_ring_dma);
wiphy_err(dev->wiphy, "Cannot map DMA for RX skb\n");
return -ENOMEM;
}
@@ -1054,14 +1058,13 @@ static void rtl8180_free_rx_ring(struct ieee80211_hw *dev)
if (!skb)
continue;
- pci_unmap_single(priv->pdev,
- *((dma_addr_t *)skb->cb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev, *((dma_addr_t *)skb->cb),
+ MAX_RX_SIZE, DMA_FROM_DEVICE);
kfree_skb(skb);
}
- pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&priv->pdev->dev, priv->rx_ring_sz * 32,
+ priv->rx_ring, priv->rx_ring_dma);
priv->rx_ring = NULL;
}
@@ -1073,8 +1076,8 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev,
dma_addr_t dma;
int i;
- ring = pci_zalloc_consistent(priv->pdev, sizeof(*ring) * entries,
- &dma);
+ ring = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ring) * entries,
+ &dma, GFP_KERNEL);
if (!ring || (unsigned long)ring & 0xFF) {
wiphy_err(dev->wiphy, "Cannot allocate TX ring (prio = %d)\n",
prio);
@@ -1103,14 +1106,15 @@ static void rtl8180_free_tx_ring(struct ieee80211_hw *dev, unsigned int prio)
struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pdev->dev, le32_to_cpu(entry->tx_buf),
+ skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
- pci_free_consistent(priv->pdev, sizeof(*ring->desc)*ring->entries,
- ring->desc, ring->dma);
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(*ring->desc) * ring->entries, ring->desc,
+ ring->dma);
ring->desc = NULL;
}
@@ -1754,8 +1758,8 @@ static int rtl8180_probe(struct pci_dev *pdev,
goto err_free_reg;
}
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) ||
+ (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
printk(KERN_ERR "%s (rtl8180): No suitable DMA available\n",
pci_name(pdev));
goto err_free_reg;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 19efae462a24..5cd7ef3625c5 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5795,7 +5795,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
- usb_free_urb(urb);
goto error;
}
@@ -5804,6 +5803,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
error:
+ usb_free_urb(urb);
return ret;
}
@@ -6318,6 +6318,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
struct rtl8xxxu_priv *priv = hw->priv;
struct rtl8xxxu_rx_urb *rx_urb;
struct rtl8xxxu_tx_urb *tx_urb;
+ struct sk_buff *skb;
unsigned long flags;
int ret, i;
@@ -6368,6 +6369,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
rx_urb->hw = hw;
ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
+ if (ret) {
+ if (ret != -ENOMEM) {
+ skb = (struct sk_buff *)rx_urb->urb.context;
+ dev_kfree_skb(skb);
+ }
+ rtl8xxxu_queue_rx_urb(priv, rx_urb);
+ }
}
schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index a4489b9302d4..6e8bd99e8911 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -195,8 +195,8 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
} else {
if (get_rf_type(rtlphy) == RF_1T2R ||
get_rf_type(rtlphy) == RF_2T2R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "1T2R or 2T2R\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "1T2R or 2T2R\n");
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0xFF;
ht_cap->mcs.rx_mask[4] = 0x01;
@@ -204,7 +204,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
ht_cap->mcs.rx_highest =
cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
} else if (get_rf_type(rtlphy) == RF_1T1R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n");
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0x00;
@@ -436,6 +436,10 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
}
}
+static void rtl_watchdog_wq_callback(struct work_struct *work);
+static void rtl_fwevt_wq_callback(struct work_struct *work);
+static void rtl_c2hcmd_wq_callback(struct work_struct *work);
+
static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -454,17 +458,14 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
}
INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
- (void *)rtl_watchdog_wq_callback);
+ rtl_watchdog_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
- (void *)rtl_ips_nic_off_wq_callback);
- INIT_DELAYED_WORK(&rtlpriv->works.ps_work,
- (void *)rtl_swlps_wq_callback);
+ rtl_ips_nic_off_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.ps_work, rtl_swlps_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq,
- (void *)rtl_swlps_rfon_wq_callback);
- INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
- (void *)rtl_fwevt_wq_callback);
- INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq,
- (void *)rtl_c2hcmd_wq_callback);
+ rtl_swlps_rfon_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, rtl_fwevt_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, rtl_c2hcmd_wq_callback);
}
void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
@@ -1324,7 +1325,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
rtlpriv->cfg->ops->chk_switch_dmdp(hw);
}
if (ieee80211_is_auth(fc)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
mac->link_state = MAC80211_LINKING;
/* Dul mac */
@@ -1385,7 +1386,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
if (mac->act_scanning)
return false;
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
"%s ACT_ADDBAREQ From :%pM\n",
is_tx ? "Tx" : "Rx", hdr->addr2);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "req\n",
@@ -1400,8 +1401,8 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
rcu_read_lock();
sta = rtl_find_sta(hw, hdr->addr3);
if (sta == NULL) {
- RT_TRACE(rtlpriv, COMP_SEND | COMP_RECV,
- DBG_DMESG, "sta is NULL\n");
+ rtl_dbg(rtlpriv, COMP_SEND | COMP_RECV,
+ DBG_DMESG, "sta is NULL\n");
rcu_read_unlock();
return true;
}
@@ -1428,13 +1429,13 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
}
break;
case ACT_ADDBARSP:
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "%s ACT_ADDBARSP From :%pM\n",
- is_tx ? "Tx" : "Rx", hdr->addr2);
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "%s ACT_ADDBARSP From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2);
break;
case ACT_DELBA:
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "ACT_ADDBADEL From :%pM\n", hdr->addr2);
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "ACT_ADDBADEL From :%pM\n", hdr->addr2);
break;
}
break;
@@ -1455,7 +1456,7 @@ static void setup_special_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc,
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_special_packet_notify(
rtlpriv, type);
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, false);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}
@@ -1519,9 +1520,9 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
/* 68 : UDP BOOTP client
* 67 : UDP BOOTP server
*/
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
- DBG_DMESG, "dhcp %s !!\n",
- (is_tx) ? "Tx" : "Rx");
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV),
+ DBG_DMESG, "dhcp %s !!\n",
+ (is_tx) ? "Tx" : "Rx");
if (is_tx)
setup_special_tx(rtlpriv, ppsc,
@@ -1540,12 +1541,12 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
rtlpriv->btcoexist.btc_info.in_4way = true;
rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies;
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx");
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx");
if (is_tx) {
rtlpriv->ra.is_special_data = true;
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, false);
ppsc->last_delaylps_stamp_jiffies = jiffies;
setup_special_tx(rtlpriv, ppsc, PACKET_EAPOL);
@@ -1583,12 +1584,12 @@ static void rtl_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
if (ack) {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
- "tx report: ack\n");
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
+ "tx report: ack\n");
info->flags |= IEEE80211_TX_STAT_ACK;
} else {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
- "tx report: not ack\n");
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
+ "tx report: not ack\n");
info->flags &= ~IEEE80211_TX_STAT_ACK;
}
ieee80211_tx_status_irqsafe(hw, skb);
@@ -1626,8 +1627,8 @@ static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw,
tx_report->last_sent_time = jiffies;
tx_info->sn = sn;
tx_info->send_time = tx_report->last_sent_time;
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
- "Send TX-Report sn=0x%X\n", sn);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
+ "Send TX-Report sn=0x%X\n", sn);
return sn;
}
@@ -1674,9 +1675,9 @@ void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len)
break;
}
}
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
- "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n",
- st, sn, retry);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
+ "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n",
+ st, sn, retry);
}
EXPORT_SYMBOL_GPL(rtl_tx_report_handler);
@@ -1689,9 +1690,9 @@ bool rtl_check_tx_report_acked(struct ieee80211_hw *hw)
return true;
if (time_before(tx_report->last_sent_time + 3 * HZ, jiffies)) {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_WARNING,
- "Check TX-Report timeout!! s_sn=0x%X r_sn=0x%X\n",
- tx_report->last_sent_sn, tx_report->last_recv_sn);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_WARNING,
+ "Check TX-Report timeout!! s_sn=0x%X r_sn=0x%X\n",
+ tx_report->last_sent_sn, tx_report->last_recv_sn);
return true; /* 3 sec. (timeout) seen as acked */
}
@@ -1707,8 +1708,8 @@ void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms)
if (rtl_check_tx_report_acked(hw))
break;
usleep_range(1000, 2000);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
}
}
@@ -1770,9 +1771,9 @@ int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return -ENXIO;
tid_data = &sta_entry->tids[tid];
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
- *ssn);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
+ *ssn);
tid_data->agg.agg_state = RTL_AGG_START;
@@ -1788,8 +1789,8 @@ int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1828,8 +1829,8 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
return -ENXIO;
tid_data = &sta_entry->tids[tid];
- RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_RECV, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
tid_data->agg.rx_agg_state = RTL_RX_AGG_START;
return 0;
@@ -1844,8 +1845,8 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1865,8 +1866,8 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1886,9 +1887,9 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
&ctrl_agg_size, &agg_size);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Set RX AMPDU: coex - reject=%d, ctrl_agg_size=%d, size=%d",
- reject_agg, ctrl_agg_size, agg_size);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Set RX AMPDU: coex - reject=%d, ctrl_agg_size=%d, size=%d",
+ reject_agg, ctrl_agg_size, agg_size);
rtlpriv->hw->max_rx_aggregation_subframes =
(ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
@@ -1976,9 +1977,9 @@ void rtl_scan_list_expire(struct ieee80211_hw *hw)
list_del(&entry->list);
rtlpriv->scan_list.num--;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "BSSID=%pM is expire in scan list (total=%d)\n",
- entry->bssid, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "BSSID=%pM is expire in scan list (total=%d)\n",
+ entry->bssid, rtlpriv->scan_list.num);
kfree(entry);
}
@@ -2012,9 +2013,9 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
if (memcmp(entry->bssid, hdr->addr3, ETH_ALEN) == 0) {
list_del_init(&entry->list);
entry_found = true;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Update BSSID=%pM to scan list (total=%d)\n",
- hdr->addr3, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Update BSSID=%pM to scan list (total=%d)\n",
+ hdr->addr3, rtlpriv->scan_list.num);
break;
}
}
@@ -2028,9 +2029,9 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
memcpy(entry->bssid, hdr->addr3, ETH_ALEN);
rtlpriv->scan_list.num++;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Add BSSID=%pM to scan list (total=%d)\n",
- hdr->addr3, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Add BSSID=%pM to scan list (total=%d)\n",
+ hdr->addr3, rtlpriv->scan_list.num);
}
entry->age = jiffies;
@@ -2042,11 +2043,10 @@ label_err:
}
EXPORT_SYMBOL(rtl_collect_scan_list);
-void rtl_watchdog_wq_callback(void *data)
+static void rtl_watchdog_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks = container_of_dwork_rtl(data,
- struct rtl_works,
- watchdog_wq);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ watchdog_wq.work);
struct ieee80211_hw *hw = rtlworks->hw;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -2147,9 +2147,9 @@ void rtl_watchdog_wq_callback(void *data)
if (rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod > 8 ||
rtlpriv->link_info.num_rx_inperiod > 2)
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, true);
else
- rtl_lps_enter(hw);
+ rtl_lps_enter(hw, true);
label_lps_done:
;
@@ -2190,8 +2190,8 @@ label_lps_done:
if ((rtlpriv->link_info.bcn_rx_inperiod +
rtlpriv->link_info.num_rx_inperiod) == 0) {
rtlpriv->link_info.roam_times++;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "AP off for %d s\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "AP off for %d s\n",
(rtlpriv->link_info.roam_times * 2));
/* if we can't recv beacon for 10s,
@@ -2239,10 +2239,10 @@ void rtl_watch_dog_timer_callback(struct timer_list *t)
jiffies + MSECS(RTL_WATCH_DOG_TIME));
}
-void rtl_fwevt_wq_callback(void *data)
+static void rtl_fwevt_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks =
- container_of_dwork_rtl(data, struct rtl_works, fwevt_wq);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ fwevt_wq.work);
struct ieee80211_hw *hw = rtlworks->hw;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -2304,11 +2304,11 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw,
switch (cmd_id) {
case C2H_DBG:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_DBG!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_DBG!!\n");
break;
case C2H_TXBF:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_TXBF!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_TXBF!!\n");
break;
case C2H_TX_REPORT:
rtl_tx_report_handler(hw, cmd_buf, cmd_len);
@@ -2318,20 +2318,20 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw,
hal_ops->c2h_ra_report_handler(hw, cmd_buf, cmd_len);
break;
case C2H_BT_INFO:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_BT_INFO!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_BT_INFO!!\n");
if (rtlpriv->cfg->ops->get_btc_status())
btc_ops->btc_btinfo_notify(rtlpriv, cmd_buf, cmd_len);
break;
case C2H_BT_MP:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_BT_MP!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_BT_MP!!\n");
if (rtlpriv->cfg->ops->get_btc_status())
btc_ops->btc_btmpinfo_notify(rtlpriv, cmd_buf, cmd_len);
break;
default:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], Unknown packet!! cmd_id(%#X)!\n", cmd_id);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], Unknown packet!! cmd_id(%#X)!\n", cmd_id);
break;
}
}
@@ -2355,8 +2355,8 @@ void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec)
if (!skb)
break;
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "C2H rx_desc_shift=%d\n",
- *((u8 *)skb->cb));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "C2H rx_desc_shift=%d\n",
+ *((u8 *)skb->cb));
RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_DMESG,
"C2H data: ", skb->data, skb->len);
@@ -2368,11 +2368,10 @@ void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec)
}
}
-void rtl_c2hcmd_wq_callback(void *data)
+static void rtl_c2hcmd_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks = container_of_dwork_rtl(data,
- struct rtl_works,
- c2hcmd_wq);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ c2hcmd_wq.work);
struct ieee80211_hw *hw = rtlworks->hw;
rtl_c2hcmd_launcher(hw, 1);
@@ -2443,7 +2442,7 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
case IEEE80211_SMPS_AUTOMATIC:/* 0 */
case IEEE80211_SMPS_NUM_MODES:/* 4 */
WARN_ON(1);
- /* fall through */
+ fallthrough;
case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
action_frame->u.action.u.ht_smps.smps_control =
WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
@@ -2701,29 +2700,29 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
(memcmp(mac->bssid, ap5_6, 3) == 0) ||
vendor == PEER_ATH) {
vendor = PEER_ATH;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ath find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ath find\n");
} else if ((memcmp(mac->bssid, ap4_4, 3) == 0) ||
(memcmp(mac->bssid, ap4_5, 3) == 0) ||
(memcmp(mac->bssid, ap4_1, 3) == 0) ||
(memcmp(mac->bssid, ap4_2, 3) == 0) ||
(memcmp(mac->bssid, ap4_3, 3) == 0) ||
vendor == PEER_RAL) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ral find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ral find\n");
vendor = PEER_RAL;
} else if (memcmp(mac->bssid, ap6_1, 3) == 0 ||
vendor == PEER_CISCO) {
vendor = PEER_CISCO;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>cisco find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>cisco find\n");
} else if ((memcmp(mac->bssid, ap3_1, 3) == 0) ||
(memcmp(mac->bssid, ap3_2, 3) == 0) ||
(memcmp(mac->bssid, ap3_3, 3) == 0) ||
vendor == PEER_BROAD) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>broad find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>broad find\n");
vendor = PEER_BROAD;
} else if (memcmp(mac->bssid, ap7_1, 3) == 0 ||
vendor == PEER_MARV) {
vendor = PEER_MARV;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>marv find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>marv find\n");
}
mac->vendor = vendor;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index fa92e29fffda..0e4f8a8ae3a5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -108,9 +108,6 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
int rtl_rx_agg_stop(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid);
void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv);
-void rtl_watchdog_wq_callback(void *data);
-void rtl_fwevt_wq_callback(void *data);
-void rtl_c2hcmd_wq_callback(void *data);
void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec);
void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 658ff425c256..edcd3c879f7f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -54,8 +54,8 @@ static u8 btc8192e2ant_bt_rssi_state(struct btc_coexist *btcoexist,
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -118,8 +118,8 @@ static u8 btc8192e2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -183,26 +183,26 @@ static void btc8192e2ant_monitor_bt_enable_disable(struct btc_coexist
bt_disabled = false;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is enabled !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is enabled !!\n");
} else {
bt_disable_cnt++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt all counters = 0, %d times!!\n",
+ bt_disable_cnt);
if (bt_disable_cnt >= 2) {
bt_disabled = true;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is disabled !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is disabled !!\n");
}
}
if (pre_bt_disabled != bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
pre_bt_disabled = bt_disabled;
}
}
@@ -398,12 +398,12 @@ static void btc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -418,9 +418,9 @@ static void btc8192e2ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -526,8 +526,8 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "No BT link exists!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "No BT link exists!!!\n");
return algorithm;
}
@@ -542,29 +542,29 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "SCO only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "HID only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "HID only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "A2DP only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "A2DP only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "PAN(HS) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "PAN(HS) only\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "PAN(EDR) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "PAN(EDR) only\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR;
}
@@ -573,22 +573,22 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "SCO + HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO + HID\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "SCO + A2DP ==> SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO + A2DP ==> SCO\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"SCO + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"SCO + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
@@ -598,14 +598,14 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
if (stack_info->num_of_hid >= 2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"HID*2 + A2DP\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"HID + A2DP\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
@@ -613,29 +613,29 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"HID + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "A2DP + PAN(HS)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -645,34 +645,34 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "SCO + HID + A2DP ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO + HID + A2DP ==> HID\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "SCO + HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + HID + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "SCO + HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + HID + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "SCO + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + A2DP + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "SCO + A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -682,15 +682,15 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID + A2DP + PAN(HS)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "HID + A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -702,14 +702,14 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "ErrorSCO+HID+A2DP+PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "ErrorSCO+HID+A2DP+PAN(HS)\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "SCO+HID+A2DP+PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO+HID+A2DP+PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -731,10 +731,10 @@ static void btc8192e2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swing_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -747,9 +747,9 @@ static void btc8192e2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
h2c_parameter[0] = dec_bt_pwr_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
- dec_bt_pwr_lvl, h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
+ dec_bt_pwr_lvl, h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -759,15 +759,15 @@ static void btc8192e2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s Dec BT power level = %d\n",
- force_exec ? "force to" : "", dec_bt_pwr_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s Dec BT power level = %d\n",
+ force_exec ? "force to" : "", dec_bt_pwr_lvl);
coex_dm->cur_dec_bt_pwr = dec_bt_pwr_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
- coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
}
btc8192e2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
@@ -785,9 +785,9 @@ static void btc8192e2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
if (enable_auto_report)
h2c_parameter[0] |= BIT0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+ (enable_auto_report ? "Enabled!!" : "Disabled!!"),
h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
@@ -799,17 +799,17 @@ static void btc8192e2ant_bt_auto_report(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""),
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec ? "force to" : ""),
((enable_auto_report) ? "Enabled" : "Disabled"));
coex_dm->cur_bt_auto_report = enable_auto_report;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
- coex_dm->pre_bt_auto_report,
- coex_dm->cur_bt_auto_report);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
return;
@@ -825,16 +825,16 @@ static void btc8192e2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -854,8 +854,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
if (rx_rf_shrink_on) {
/* Shrink RF Rx LPF corner */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff, 0xffffc);
} else {
@@ -863,8 +863,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
* After initialized, we can use coex_dm->btRf0x1eBackup
*/
if (btcoexist->initialized) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff,
coex_dm->bt_rf0x1e_backup);
@@ -877,17 +877,17 @@ static void btc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (force_exec ? "force to" : ""),
- ((rx_rf_shrink_on) ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec ? "force to" : ""),
+ ((rx_rf_shrink_on) ? "ON" : "OFF"));
coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
if (coex_dm->pre_rf_rx_lpf_shrink ==
coex_dm->cur_rf_rx_lpf_shrink)
@@ -905,8 +905,8 @@ static void btc8192e2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 val = (u8)level;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
}
@@ -926,22 +926,22 @@ static void btc8192e2ant_dac_swing(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl = 0x%x\n",
- (force_exec ? "force to" : ""),
- ((dac_swing_on) ? "ON" : "OFF"), dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ ((dac_swing_on) ? "ON" : "OFF"), dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -961,8 +961,8 @@ static void btc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
/* BB AGC Gain Table */
if (agc_table_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BB Agc Table On!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BB Agc Table On!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
@@ -970,8 +970,8 @@ static void btc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BB Agc Table Off!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BB Agc Table Off!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -986,17 +986,17 @@ static void btc8192e2ant_agc_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s %s Agc Table\n",
- (force_exec ? "force to" : ""),
- ((agc_table_en) ? "Enable" : "Disable"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ ((agc_table_en) ? "Enable" : "Disable"));
coex_dm->cur_agc_table_en = agc_table_en;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
- coex_dm->pre_agc_table_en,
- coex_dm->cur_agc_table_en);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en,
+ coex_dm->cur_agc_table_en);
if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
return;
@@ -1012,20 +1012,20 @@ static void btc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -1035,30 +1035,30 @@ static void btc8192e2ant_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
- (force_exec ? "force to" : ""), val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- val0x6c4, val0x6c8, val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
+ (force_exec ? "force to" : ""), val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
- coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
- coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
- coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
- coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1113,9 +1113,9 @@ static void btc8192e2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -1125,18 +1125,18 @@ static void btc8192e2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreIgnoreWlanAct = %d ",
- coex_dm->pre_ignore_wlan_act);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "bCurIgnoreWlanAct = %d!!\n",
- coex_dm->cur_ignore_wlan_act);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreIgnoreWlanAct = %d ",
+ coex_dm->pre_ignore_wlan_act);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1166,11 +1166,11 @@ static void btc8192e2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 | h2c_parameter[4]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1196,20 +1196,20 @@ static void btc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn %s PS TDMA, type=%d\n",
- (force_exec ? "force to" : ""),
- (turn_on ? "ON" : "OFF"), type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1337,8 +1337,8 @@ static void btc8192e2ant_set_switch_ss_type(struct btc_coexist *btcoexist,
u8 mimops = BTC_MIMO_PS_DYNAMIC;
u32 dis_ra_mask = 0x0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], REAL set SS Type = %d\n", ss_type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], REAL set SS Type = %d\n", ss_type);
dis_ra_mask = btc8192e2ant_decide_ra_mask(btcoexist, ss_type,
coex_dm->cur_ra_mask_type);
@@ -1372,9 +1372,9 @@ static void btc8192e2ant_switch_ss_type(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s Switch SS Type = %d\n",
- (force_exec ? "force to" : ""), new_ss_type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s Switch SS Type = %d\n",
+ (force_exec ? "force to" : ""), new_ss_type);
coex_dm->cur_ss_type = new_ss_type;
if (!force_exec) {
@@ -1456,8 +1456,8 @@ static bool btc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non-connected idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non-connected idle!!\n");
if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) ||
@@ -1491,8 +1491,8 @@ static bool btc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Wifi connected + BT non connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi connected + BT non connected-idle!!\n");
btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 2);
btc8192e2ant_coex_table_with_type(btcoexist,
@@ -1517,8 +1517,8 @@ static bool btc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hs_on)
return false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Wifi connected + BT connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi connected + BT connected-idle!!\n");
btc8192e2ant_switch_ss_type(btcoexist,
NORMAL_EXEC, 2);
@@ -1543,12 +1543,12 @@ static bool btc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Wifi Connected-Busy + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Wifi Connected-Idle + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi Connected-Idle + BT Busy!!\n");
btc8192e2ant_switch_ss_type(btcoexist,
NORMAL_EXEC, 1);
@@ -1580,13 +1580,13 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
static int up, dn, m, n, wait_cnt;
u8 retry_cnt = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TdmaDurationAdjust()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -1669,11 +1669,11 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
retry_cnt = coex_sta->bt_retry_cnt;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], retry_cnt = %d\n", retry_cnt);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
- up, dn, m, n, wait_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], retry_cnt = %d\n", retry_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
+ up, dn, m, n, wait_cnt);
wait_cnt++;
/* no retry in the last 2-second duration */
if (retry_cnt == 0) {
@@ -1688,8 +1688,8 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
n = 3;
up = 0;
dn = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex]Increase wifi duration!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]Increase wifi duration!!\n");
}
} else if (retry_cnt <= 3) {
up--;
@@ -1711,8 +1711,8 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
wait_cnt = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Reduce wifi duration for retry<3\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Reduce wifi duration for retry<3\n");
}
} else {
if (wait_cnt == 1)
@@ -1727,12 +1727,12 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
wait_cnt = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Decrease wifi duration for retryCounter>3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Decrease wifi duration for retryCounter>3!!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], max Interval = %d\n", max_interval);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
}
/* if current PsTdma not match with
@@ -1742,10 +1742,10 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PsTdma type mismatch!!!, ");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "curPsTdma=%d, recordPsTdma=%d\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PsTdma type mismatch!!!, ");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "curPsTdma=%d, recordPsTdma=%d\n",
coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
@@ -1756,8 +1756,8 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, coex_dm->tdma_adj_type);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -1962,8 +1962,8 @@ static void btc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
bt_rssi_state == BTC_RSSI_STATE_STAY_LOW) &&
(wifi_rssi_state == BTC_RSSI_STATE_LOW ||
wifi_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
long_dist = true;
}
if (long_dist) {
@@ -2464,105 +2464,105 @@ static void btc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 algorithm = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], return for Manual CTRL <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], return for Manual CTRL <===\n");
return;
}
if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = btc8192e2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
btc8192e2ant_action_bt_inquiry(btcoexist);
return;
}
coex_dm->cur_algorithm = algorithm;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
if (btc8192e2ant_is_common_action(btcoexist)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant common\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = false;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
coex_dm->pre_algorithm,
coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8192E_2ANT_COEX_ALGO_SCO:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = SCO\n");
btc8192e2ant_action_sco(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
btc8192e2ant_action_sco_pan(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HID\n");
btc8192e2ant_action_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = A2DP\n");
btc8192e2ant_action_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
btc8192e2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = PAN(EDR)\n");
btc8192e2ant_action_pan_edr(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = HS mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HS mode\n");
btc8192e2ant_action_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = PAN+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = PAN+A2DP\n");
btc8192e2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
btc8192e2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = HID+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HID+A2DP\n");
btc8192e2ant_action_hid_a2dp(btcoexist);
break;
default:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = unknown!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = unknown!!\n");
/* btc8192e2ant_coex_all_off(btcoexist); */
break;
}
@@ -2577,8 +2577,8 @@ static void btc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
u16 u16tmp = 0;
u8 u8tmp = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 2Ant Init HW Config!!\n");
if (backup) {
/* backup rf 0x1e value */
@@ -2659,8 +2659,8 @@ void ex_btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btc8192e2ant_init_coex_dm(btcoexist);
}
@@ -2876,13 +2876,13 @@ void ex_btc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_IPS_ENTER == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
btc8192e2ant_coex_all_off(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
}
}
@@ -2892,12 +2892,12 @@ void ex_btc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_LPS_ENABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -2907,11 +2907,11 @@ void ex_btc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_SCAN_START == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
else if (BTC_SCAN_FINISH == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
}
void ex_btc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
@@ -2919,11 +2919,11 @@ void ex_btc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_ASSOCIATE_START == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
else if (BTC_ASSOCIATE_FINISH == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
}
void ex_btc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -2940,11 +2940,11 @@ void ex_btc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (BTC_MEDIA_CONNECT == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA connect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA disconnect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -2964,10 +2964,10 @@ void ex_btc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -2978,8 +2978,8 @@ void ex_btc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (type == BTC_PACKET_DHCP)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], DHCP Packet notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], DHCP Packet notify\n");
}
void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -2998,19 +2998,19 @@ void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt info[%d], length=%d, hex data = [",
- rsp_source, length);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length=%d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length-1)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x, ", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) {
@@ -3028,8 +3028,8 @@ void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and loss of the info.
*/
if ((coex_sta->bt_info_ext & BIT1)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "bit1, send wifi BW&Chnl to BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bit1, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -3045,8 +3045,8 @@ void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
if ((coex_sta->bt_info_ext & BIT3)) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "bit3, BT NOT ignore Wlan active!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bit3, BT NOT ignore Wlan active!\n");
btc8192e2ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -3102,25 +3102,25 @@ void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8192E_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Non-Connected idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
} else if (bt_info & BT_INFO_8192E_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3145,7 +3145,7 @@ void ex_btc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
btc8192e2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
ex_btc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3159,29 +3159,29 @@ void ex_btc8192e2ant_periodical(struct btc_coexist *btcoexist)
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "=======================Periodical=======================\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "=======================Periodical=======================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "************************************************\n");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "************************************************\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "BT stack/ hci ext ver = %s / %d\n",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "************************************************\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "************************************************\n");
}
if (!btcoexist->auto_report_2ant) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 528e442f25a4..70492929d7e4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -468,9 +468,9 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -496,20 +496,20 @@ static void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -520,8 +520,8 @@ static void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
(force_exec ? "force to" : ""),
val0x6c0, val0x6c4, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
@@ -636,9 +636,9 @@ halbtc8723b1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -648,15 +648,15 @@ static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
@@ -682,8 +682,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
if (ap_enable) {
if ((byte1 & BIT4) && !(byte1 & BIT5)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW for 1Ant AP mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW for 1Ant AP mode\n");
real_byte1 &= ~BIT4;
real_byte1 |= BIT5;
@@ -704,13 +704,13 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = real_byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 |
- h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 |
- h2c_parameter[4]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -731,22 +731,22 @@ static void halbtc8723b1ant_lps_rpwm(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
- (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
coex_dm->cur_lps = lps_val;
coex_dm->cur_rpwm = rpwm_val;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
- coex_dm->cur_lps, coex_dm->cur_rpwm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
(coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
- coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
return;
}
@@ -762,8 +762,8 @@ static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
@@ -861,16 +861,16 @@ static void halbtc8723b1ant_set_ant_path(struct btc_coexist *btcoexist,
0x49d);
cnt_bt_cal_chk++;
if (u8tmp & BIT(0)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], ########### BT is calibrating (wait cnt=%d) ###########\n",
- cnt_bt_cal_chk);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], ########### BT is calibrating (wait cnt=%d) ###########\n",
+ cnt_bt_cal_chk);
mdelay(50);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], ********** BT is NOT calibrating (wait cnt=%d)**********\n",
- cnt_bt_cal_chk);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], ********** BT is NOT calibrating (wait cnt=%d)**********\n",
+ cnt_bt_cal_chk);
break;
}
}
@@ -1426,8 +1426,8 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
s32 result;
u8 retry_count = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TdmaDurationAdjustForAcl()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjustForAcl()\n");
if ((wifi_status ==
BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN) ||
@@ -1451,8 +1451,8 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
coex_dm->ps_tdma_du_adj_type = 2;
@@ -1490,8 +1490,8 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Increase wifi duration!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
/* <=3 retry in the last 2-second duration */
@@ -1523,8 +1523,8 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
/* retry count > 3, once retry count > 3, to reduce
@@ -1548,8 +1548,8 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
if (result == -1) {
@@ -1690,10 +1690,10 @@ static void halbtc8723b1ant_monitor_bt_enable_disable(struct btc_coexist
bt_disabled = true;
}
if (coex_sta->bt_disabled != bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is from %s to %s!!\n",
- (coex_sta->bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (coex_sta->bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
coex_sta->bt_disabled = bt_disabled;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
@@ -2029,15 +2029,15 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
bool scan = false, link = false, roam = false;
bool under_4way = false, ap_enable = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect()===>\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
&under_4way);
if (under_4way) {
halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
return;
}
@@ -2051,8 +2051,8 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
else
halbtc8723b1ant_action_wifi_connected_special_packet(
btcoexist);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
}
@@ -2152,30 +2152,30 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u32 num_of_wifi_link = 0;
u32 wifi_bw;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
return;
}
if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
if (coex_sta->bt_whck_test) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
halbtc8723b1ant_action_bt_whck_test(btcoexist);
return;
}
@@ -2276,8 +2276,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (!wifi_connected) {
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is non connected-idle !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is non connected-idle !!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2314,8 +2314,8 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
u32 u32tmp = 0;
u8 u8tmpa = 0, u8tmpb = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 1Ant Init HW Config!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 1Ant Init HW Config!!\n");
/* 0xf0[15:12] --> Chip Cut information */
coex_sta->cut_version =
@@ -2347,9 +2347,9 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x67);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x\n",
- u32tmp, u8tmpa, u8tmpb);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x\n",
+ u32tmp, u8tmpa, u8tmpb);
}
/**************************************************************
@@ -2363,8 +2363,8 @@ void ex_btc8723b1ant_power_on_setting(struct btc_coexist *btcoexist)
u16 u16tmp = 0x0;
u32 value;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "xxxxxxxxxxxxxxxx Execute 8723b 1-Ant PowerOn Setting xxxxxxxxxxxxxxxx!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "xxxxxxxxxxxxxxxx Execute 8723b 1-Ant PowerOn Setting xxxxxxxxxxxxxxxx!!\n");
btcoexist->stop_coex_dm = true;
@@ -2436,8 +2436,8 @@ void ex_btc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btcoexist->stop_coex_dm = false;
@@ -2718,8 +2718,8 @@ void ex_btc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_IPS_ENTER == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
@@ -2729,8 +2729,8 @@ void ex_btc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
halbtc8723b1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 0);
} else if (BTC_IPS_LEAVE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
halbtc8723b1ant_init_hw_config(btcoexist, false, false);
@@ -2747,12 +2747,12 @@ void ex_btc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_LPS_ENABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -2773,8 +2773,8 @@ void ex_btc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
if (type == BTC_SCAN_START) {
coex_sta->wifi_is_high_pri_task = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
/* Force antenna setup for no scan result issue */
halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
@@ -2783,13 +2783,13 @@ void ex_btc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x67);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x\n",
- u32tmp, u8tmpa, u8tmpb);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x\n",
+ u32tmp, u8tmpa, u8tmpb);
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
&coex_sta->scan_ap_num);
@@ -2824,8 +2824,8 @@ void ex_btc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_SCAN_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
if (!wifi_connected)
/* non-connected scan */
btc8723b1ant_action_wifi_not_conn_scan(btcoexist);
@@ -2833,8 +2833,8 @@ void ex_btc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
/* wifi is connected */
btc8723b1ant_action_wifi_conn_scan(btcoexist);
} else if (BTC_SCAN_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
if (!wifi_connected)
/* non-connected scan */
btc8723b1ant_action_wifi_not_conn(btcoexist);
@@ -2866,13 +2866,13 @@ void ex_btc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
FORCE_EXEC, false, false);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
coex_dm->arp_cnt = 0;
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
}
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
@@ -2896,12 +2896,12 @@ void ex_btc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_ASSOCIATE_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist);
} else if (BTC_ASSOCIATE_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
@@ -2927,8 +2927,8 @@ void ex_btc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (type == BTC_MEDIA_CONNECT) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA connect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
/* Force antenna setup for no scan result issue */
halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
@@ -2958,8 +2958,8 @@ void ex_btc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->backup_ampdu_max_time =
btcoexist->btc_read_1byte(btcoexist, 0x456);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA disconnect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
coex_dm->arp_cnt = 0;
btcoexist->btc_write_1byte(btcoexist, 0x6cd, 0x0); /* CCK Tx */
@@ -2986,10 +2986,10 @@ void ex_btc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3014,12 +3014,12 @@ void ex_btc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
if (type == BTC_PACKET_DHCP || type == BTC_PACKET_EAPOL ||
type == BTC_PACKET_ARP) {
if (type == BTC_PACKET_ARP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], special Packet ARP notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet ARP notify\n");
coex_dm->arp_cnt++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ARP Packet Count = %d\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ARP Packet Count = %d\n",
coex_dm->arp_cnt);
if ((coex_dm->arp_cnt >= 10) && (!under_4way))
@@ -3031,13 +3031,13 @@ void ex_btc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
coex_sta->wifi_is_high_pri_task = true;
} else {
coex_sta->wifi_is_high_pri_task = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], special Packet DHCP or EAPOL notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet DHCP or EAPOL notify\n");
}
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], special Packet [Type = %d] notify\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet [Type = %d] notify\n",
type);
}
@@ -3065,8 +3065,8 @@ void ex_btc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
if (BTC_PACKET_DHCP == type ||
BTC_PACKET_EAPOL == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], special Packet(%d) notify\n", type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet(%d) notify\n", type);
halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
}
}
@@ -3087,19 +3087,19 @@ void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt info[%d], length=%d, hex data = [",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length=%d, hex data = [",
rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length - 1)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x, ", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
/* if 0xff, it means BT is under WHCK test */
@@ -3142,8 +3142,8 @@ void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
/* BT into is responded by BT FW and BT RF REG
* 0x3C != 0x15 => Need to switch BT TRx Mask
*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n");
btcoexist->btc_set_bt_reg(btcoexist, BTC_BT_REG_RF,
0x3c, 0x15);
@@ -3158,8 +3158,8 @@ void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and loss of the info.
*/
if (coex_sta->bt_info_ext & BIT1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -3173,8 +3173,8 @@ void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (coex_sta->bt_info_ext & BIT3) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
halbtc8723b1ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -3280,29 +3280,29 @@ void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
/* connection exists but no busy */
} else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
coex_dm->auto_tdma_adjust = false;
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
}
if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3322,16 +3322,16 @@ void ex_btc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
u32 u32tmp;
u8 u8tmpa, u8tmpb, u8tmpc;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF Status notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RF Status notify\n");
if (type == BTC_RF_ON) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF is turned ON!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RF is turned ON!!\n");
btcoexist->stop_coex_dm = false;
} else if (type == BTC_RF_OFF) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF is turned OFF!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RF is turned OFF!!\n");
halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
@@ -3347,9 +3347,9 @@ void ex_btc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x67);
u8tmpc = btcoexist->btc_read_1byte(btcoexist, 0x76e);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x, 0x76e=0x%x\n",
- u32tmp, u8tmpa, u8tmpb, u8tmpc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x, 0x76e=0x%x\n",
+ u32tmp, u8tmpa, u8tmpb, u8tmpc);
}
}
@@ -3357,7 +3357,7 @@ void ex_btc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
btcoexist->stop_coex_dm = true;
@@ -3379,11 +3379,11 @@ void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to SLEEP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
FORCE_EXEC, false, true);
halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
@@ -3401,8 +3401,8 @@ void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
coex_sta->under_lps = false;
btcoexist->stop_coex_dm = true;
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
halbtc8723b1ant_init_hw_config(btcoexist, false, false);
halbtc8723b1ant_init_coex_dm(btcoexist);
@@ -3414,8 +3414,8 @@ void ex_btc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], *****************Coex DM Reset****************\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], *****************Coex DM Reset****************\n");
halbtc8723b1ant_init_hw_config(btcoexist, false, false);
halbtc8723b1ant_init_coex_dm(btcoexist);
@@ -3426,8 +3426,8 @@ void ex_btc8723b1ant_periodical(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ==========================Periodical===========================\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (!btcoexist->auto_report_1ant) {
halbtc8723b1ant_query_bt_info(btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index 9f7b9af5bdcd..fb57cc8b2e47 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -53,28 +53,28 @@ static u8 btc8723b2ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -83,12 +83,12 @@ static u8 btc8723b2ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -97,26 +97,26 @@ static u8 btc8723b2ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -144,28 +144,28 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -176,12 +176,12 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -190,26 +190,26 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -277,12 +277,12 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
else
bt_link_info->slave_role = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -334,9 +334,9 @@ static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -446,8 +446,8 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], No BT link exists!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -462,29 +462,29 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], HID only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], A2DP only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], A2DP only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], PAN(HS) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(HS) only\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], PAN(EDR) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(EDR) only\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR;
}
@@ -493,23 +493,23 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + A2DP ==> SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(HS)\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -517,35 +517,35 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], HID + A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID + A2DP\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(HS)\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex],A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex],A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -555,36 +555,36 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + HID + A2DP ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP ==> HID\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -594,15 +594,15 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -614,13 +614,13 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -641,10 +641,10 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swing_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -657,8 +657,8 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
h2c_parameter[0] = dec_bt_pwr_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], decrease Bt Power Level : %u\n", dec_bt_pwr_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], decrease Bt Power Level : %u\n", dec_bt_pwr_lvl);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -668,15 +668,15 @@ static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Dec BT power level = %u\n", dec_bt_pwr_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Dec BT power level = %u\n", dec_bt_pwr_lvl);
coex_dm->cur_dec_bt_pwr_lvl = dec_bt_pwr_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PreDecBtPwrLvl=%d, CurDecBtPwrLvl=%d\n",
- coex_dm->pre_dec_bt_pwr_lvl,
- coex_dm->cur_dec_bt_pwr_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PreDecBtPwrLvl=%d, CurDecBtPwrLvl=%d\n",
+ coex_dm->pre_dec_bt_pwr_lvl,
+ coex_dm->cur_dec_bt_pwr_lvl);
if (coex_dm->pre_dec_bt_pwr_lvl == coex_dm->cur_dec_bt_pwr_lvl)
return;
@@ -721,16 +721,16 @@ static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -759,9 +759,9 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf6; /* MCS5 or OFDM36 */
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -771,17 +771,17 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn LowPenaltyRA = %s\n",
- (force_exec ? "force to" : ""), (low_penalty_ra ?
- "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""), (low_penalty_ra ?
+ "ON" : "OFF"));
coex_dm->cur_low_penalty_ra = low_penalty_ra;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
- coex_dm->pre_low_penalty_ra,
- coex_dm->cur_low_penalty_ra);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
return;
@@ -797,8 +797,8 @@ static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 val = (u8) level;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
}
@@ -818,20 +818,20 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
- (force_exec ? "force to" : ""),
- (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec ? "force to" : ""),
+ (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl,
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -851,20 +851,20 @@ static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -875,24 +875,24 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
- force_exec ? "force to" : "",
- val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ force_exec ? "force to" : "",
+ val0x6c0, val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
- coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
- coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
- coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
- coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -991,9 +991,9 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -1030,16 +1030,16 @@ static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1070,11 +1070,11 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 | h2c_parameter[4]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1220,10 +1220,10 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
coex_dm->switch_thres_offset;
bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn %s PS TDMA, type=%d\n",
- (force_exec ? "force to" : ""),
- (turn_on ? "ON" : "OFF"), type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
@@ -1237,12 +1237,12 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
}
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1585,13 +1585,13 @@ static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
} else if (scan || link || roam) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi link process + BT Inq/Page!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi link process + BT Inq/Page!!\n");
btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
} else if (wifi_connected) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT Inq/Page!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT Inq/Page!!\n");
btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
} else {
@@ -1620,9 +1620,9 @@ static void btc8723b2ant_action_wifi_link_process(struct btc_coexist
u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 0x948 = 0x%x, 0x765 = 0x%x, 0x76e = 0x%x\n",
- u32tmp, u8tmpa, u8tmpb);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 0x948 = 0x%x, 0x765 = 0x%x, 0x76e = 0x%x\n",
+ u32tmp, u8tmpa, u8tmpb);
}
static bool btc8723b2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
@@ -1645,8 +1645,8 @@ static bool btc8723b2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
/* office environment */
if (BTC_RSSI_HIGH(wifi_rssi_state1) && (coex_sta->hid_exist) &&
(coex_sta->a2dp_exist)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n");
btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
@@ -1685,8 +1685,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
false, false, 0x8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non-connected idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non-connected idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
0x0);
@@ -1709,8 +1709,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
false, false, 0x8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
@@ -1734,8 +1734,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hs_on)
return false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
false, false, 0x8);
@@ -1759,12 +1759,12 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
common =
btc8723b2ant_action_wifi_idle_process(
@@ -1786,13 +1786,13 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
s32 result;
u8 retry_count = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TdmaDurationAdjust()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -1901,11 +1901,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
(coex_sta->low_priority_rx) > 1250)
retry_count++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], retry_count = %d\n", retry_count);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
- up, dn, m, n, wait_count);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+ up, dn, m, n, wait_count);
result = 0;
wait_count++;
/* no retry in the last 2-second duration*/
@@ -1925,8 +1925,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Increase wifi duration!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Increase wifi duration!!\n");
} /* <=3 retry in the last 2-second duration*/
} else if (retry_count <= 3) {
up--;
@@ -1957,8 +1957,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
}
} else {
/* retry count > 3, once retry count > 3, to reduce
@@ -1982,12 +1982,12 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], max Interval = %d\n", max_interval);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1) {
if (tx_pause) {
if (coex_dm->cur_ps_tdma == 71) {
@@ -2736,17 +2736,17 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
}
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], max Interval = %d\n", max_interval);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
/* if current PsTdma not match with the recorded one (scan, dhcp, ...),
* then we have to adjust it back to the previous recorded one.
*/
if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PsTdma type mismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
- coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PsTdma type mismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2756,8 +2756,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
coex_dm->ps_tdma_du_adj_type);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -3352,26 +3352,26 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
bool miracast_plus_bt = false;
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = btc8723b2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
btc8723b2ant_action_bt_inquiry(btcoexist);
return;
}
@@ -3381,8 +3381,8 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
if (scan || link || roam) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WiFi is under Link Process !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], WiFi is under Link Process !!\n");
btc8723b2ant_action_wifi_link_process(btcoexist);
return;
}
@@ -3394,9 +3394,9 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if ((num_of_wifi_link >= 2) ||
(wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
- num_of_wifi_link, wifi_link_status);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
+ num_of_wifi_link, wifi_link_status);
if (bt_link_info->bt_link_exist)
miracast_plus_bt = true;
@@ -3415,76 +3415,76 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
&miracast_plus_bt);
coex_dm->cur_algorithm = algorithm;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Algorithm = %d\n",
- coex_dm->cur_algorithm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Algorithm = %d\n",
+ coex_dm->cur_algorithm);
if (btc8723b2ant_is_common_action(btcoexist)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant common\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = false;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
- coex_dm->pre_algorithm,
- coex_dm->cur_algorithm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8723B_2ANT_COEX_ALGO_SCO:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = SCO\n");
btc8723b2ant_action_sco(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID\n");
btc8723b2ant_action_hid(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
btc8723b2ant_action_a2dp(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
btc8723b2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
btc8723b2ant_action_pan_edr(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
btc8723b2ant_action_pan_hs(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
btc8723b2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
btc8723b2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
btc8723b2ant_action_hid_a2dp(btcoexist);
break;
default:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
btc8723b2ant_coex_alloff(btcoexist);
break;
}
@@ -3531,8 +3531,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u8tmp = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 2Ant Init HW Config!!\n");
coex_dm->bt_rf0x1e_backup =
btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
@@ -3631,8 +3631,8 @@ void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btc8723b2ant_init_coex_dm(btcoexist);
}
@@ -3853,15 +3853,15 @@ void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_IPS_ENTER == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
btc8723b2ant_wifioff_hwcfg(btcoexist);
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
btc8723b2ant_coex_alloff(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
ex_btc8723b2ant_init_hwconfig(btcoexist);
btc8723b2ant_init_coex_dm(btcoexist);
@@ -3874,12 +3874,12 @@ void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_LPS_ENABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -3895,16 +3895,16 @@ void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
if (BTC_SCAN_START == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
else if (BTC_SCAN_FINISH == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
&coex_sta->scan_ap_num);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x76e=0x%x\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x76e=0x%x\n",
u32tmp, u8tmpa, u8tmpb);
}
@@ -3913,11 +3913,11 @@ void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_ASSOCIATE_START == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
else if (BTC_ASSOCIATE_FINISH == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
}
void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3930,11 +3930,11 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 ap_num = 0;
if (BTC_MEDIA_CONNECT == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA connect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA disconnect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist,
@@ -3961,10 +3961,10 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x66=0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3975,8 +3975,8 @@ void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (type == BTC_PACKET_DHCP)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], DHCP Packet notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], DHCP Packet notify\n");
}
void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3995,24 +3995,24 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt info[%d], length=%d, hex data=[",
- rsp_source, length);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
if (i == 1)
bt_info = tmpbuf[i];
if (i == length - 1)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmpbuf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmpbuf[i]);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x, ", tmpbuf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmpbuf[i]);
}
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
return;
}
@@ -4043,8 +4043,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
* because BT is reset and loss of the info.
*/
if ((coex_sta->bt_info_ext & BIT1)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -4058,8 +4058,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
}
if ((coex_sta->bt_info_ext & BIT3)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
false);
} else {
@@ -4120,26 +4120,26 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
/* connection exists but no busy */
} else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -4164,7 +4164,7 @@ void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
btc8723b2ant_wifioff_hwcfg(btcoexist);
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -4175,11 +4175,11 @@ void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
if (pnp_state == BTC_WIFI_PNP_SLEEP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to SLEEP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
/* Driver do not leave IPS/LPS when driver is going to sleep, so
* BTCoexistence think wifi is still under IPS/LPS
@@ -4190,8 +4190,8 @@ void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
coex_sta->under_ips = false;
coex_sta->under_lps = false;
} else if (pnp_state == BTC_WIFI_PNP_WAKE_UP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
ex_btc8723b2ant_init_hwconfig(btcoexist);
btc8723b2ant_init_coex_dm(btcoexist);
btc8723b2ant_query_bt_info(btcoexist);
@@ -4203,8 +4203,8 @@ void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ==========================Periodical===========================\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (coex_sta->dis_ver_info_cnt <= 5) {
coex_sta->dis_ver_info_cnt += 1;
@@ -4212,8 +4212,8 @@ void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
/* Antenna config to set 0x765 = 0x0 (GNT_BT control by
* PTA) after initial
*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Set GNT_BT control by PTA\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set GNT_BT control by PTA\n");
btc8723b2ant_set_ant_path(
btcoexist, BTC_ANT_WIFI_AT_MAIN, false, false);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index fa5b73f81c57..9f5e85be9764 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -55,28 +55,28 @@ static u8 btc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -85,12 +85,12 @@ static u8 btc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -99,26 +99,26 @@ static u8 btc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -145,28 +145,28 @@ static u8 btc8821a1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -177,12 +177,12 @@ static u8 btc8821a1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -191,26 +191,26 @@ static u8 btc8821a1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -397,9 +397,9 @@ static void btc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -471,8 +471,8 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], No BT link exists!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -487,28 +487,28 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = HID only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = HID only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = A2DP only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = PAN(HS) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = PAN(HS) only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = PAN(EDR) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = PAN(EDR) only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR;
}
}
@@ -516,56 +516,56 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP;
}
}
@@ -574,33 +574,33 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -609,14 +609,14 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
}
@@ -627,14 +627,14 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -660,9 +660,9 @@ static void btc8821a1ant_set_sw_penalty_tx_rate(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -688,20 +688,20 @@ static void btc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -711,10 +711,10 @@ static void btc8821a1ant_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
- val0x6c8, val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
coex_dm->cur_val_0x6c0 = val0x6c0;
coex_dm->cur_val_0x6c4 = val0x6c4;
coex_dm->cur_val_0x6c8 = val0x6c8;
@@ -786,9 +786,9 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -798,15 +798,15 @@ static void btc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
@@ -831,8 +831,8 @@ static void btc8821a1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
if (ap_enable) {
if (byte1 & BIT4 && !(byte1 & BIT5)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW for 1Ant AP mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW for 1Ant AP mode\n");
real_byte1 &= ~BIT4;
real_byte1 |= BIT5;
@@ -853,13 +853,13 @@ static void btc8821a1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = real_byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 |
- h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 |
- h2c_parameter[4]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -878,22 +878,22 @@ static void btc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
- (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
coex_dm->cur_lps = lps_val;
coex_dm->cur_rpwm = rpwm_val;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
- coex_dm->cur_lps, coex_dm->cur_rpwm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
(coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
- coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
return;
}
@@ -909,8 +909,8 @@ static void btc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
btc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
@@ -1010,13 +1010,13 @@ static void btc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
if (!force_exec) {
if (coex_dm->cur_ps_tdma_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ********** TDMA(on, %d) **********\n",
+ coex_dm->cur_ps_tdma);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** TDMA(off, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ********** TDMA(off, %d) **********\n",
+ coex_dm->cur_ps_tdma);
}
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1254,50 +1254,50 @@ static bool btc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
if (!wifi_connected &&
BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE !=
coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else {
if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
}
common = false;
@@ -1743,15 +1743,15 @@ static void btc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
bool under_4way = false;
bool ap_enable = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect()===>\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
&under_4way);
if (under_4way) {
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
return;
}
@@ -1764,8 +1764,8 @@ static void btc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
else
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
}
@@ -1834,58 +1834,58 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
if (!btc8821a1ant_is_common_action(btcoexist)) {
switch (coex_dm->cur_algorithm) {
case BT_8821A_1ANT_COEX_ALGO_SCO:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = SCO\n");
btc8821a1ant_action_sco(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID\n");
btc8821a1ant_action_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = A2DP\n");
btc8821a1ant_action_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
btc8821a1ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN(EDR)\n");
btc8821a1ant_action_pan_edr(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HS mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HS mode\n");
btc8821a1ant_action_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN+A2DP\n");
btc8821a1ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
btc8821a1ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID+A2DP\n");
btc8821a1ant_action_hid_a2dp(btcoexist);
break;
default:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = coexist All Off!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = coexist All Off!!\n");
/*btc8821a1ant_coex_all_off(btcoexist);*/
break;
}
@@ -1906,31 +1906,31 @@ static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u32 num_of_wifi_link = 0;
bool wifi_under_5g = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
return;
}
if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
@@ -2001,8 +2001,8 @@ static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (!wifi_connected) {
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is non connected-idle !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is non connected-idle !!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2040,8 +2040,8 @@ static void btc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
u8 u1_tmp = 0;
bool wifi_under_5g = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 1Ant Init HW Config!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 1Ant Init HW Config!!\n");
if (wifi_only)
return;
@@ -2096,8 +2096,8 @@ void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btcoexist->stop_coex_dm = false;
@@ -2353,15 +2353,15 @@ void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
return;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
if (BTC_IPS_ENTER == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
btc8821a1ant_set_ant_path(btcoexist,
BTC_ANT_PATH_BT, false, true);
@@ -2370,8 +2370,8 @@ void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
btc8821a1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 0);
} else if (BTC_IPS_LEAVE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
btc8821a1ant_init_hw_config(btcoexist, false, false);
@@ -2388,12 +2388,12 @@ void ex_btc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_LPS_ENABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -2412,23 +2412,23 @@ void ex_btc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
return;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
if (type == BTC_SCAN_START) {
coex_sta->wifi_is_high_pri_task = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
/* Force antenna setup for no scan result issue */
btc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
}
if (coex_sta->bt_disabled)
@@ -2461,8 +2461,8 @@ void ex_btc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_SCAN_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
if (!wifi_connected) {
/* non-connected scan */
btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
@@ -2471,8 +2471,8 @@ void ex_btc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
btc8821a1ant_action_wifi_connected_scan(btcoexist);
}
} else if (BTC_SCAN_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
if (!wifi_connected) {
/* non-connected scan */
btc8821a1ant_action_wifi_not_connected(btcoexist);
@@ -2497,21 +2497,21 @@ void ex_btc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
return;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
if (type == BTC_ASSOCIATE_START) {
coex_sta->wifi_is_high_pri_task = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
coex_dm->arp_cnt = 0;
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
coex_dm->arp_cnt = 0;
}
@@ -2536,12 +2536,12 @@ void ex_btc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_ASSOCIATE_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
} else if (BTC_ASSOCIATE_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
@@ -2568,18 +2568,18 @@ void ex_btc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
return;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
if (BTC_MEDIA_CONNECT == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA connect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA disconnect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
coex_dm->arp_cnt = 0;
}
@@ -2602,11 +2602,11 @@ void ex_btc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 |
- h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -2628,8 +2628,8 @@ void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
@@ -2639,17 +2639,17 @@ void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
coex_sta->wifi_is_high_pri_task = true;
if (type == BTC_PACKET_ARP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet ARP notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], specific Packet ARP notify\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet DHCP or EAPOL notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], specific Packet DHCP or EAPOL notify\n");
}
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet [Type = %d] notify\n",
- type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], specific Packet [Type = %d] notify\n",
+ type);
}
coex_sta->special_pkt_period_cnt = 0;
@@ -2678,9 +2678,9 @@ void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
type == BTC_PACKET_ARP) {
if (type == BTC_PACKET_ARP) {
coex_dm->arp_cnt++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ARP Packet Count = %d\n",
- coex_dm->arp_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ARP Packet Count = %d\n",
+ coex_dm->arp_cnt);
if (coex_dm->arp_cnt >= 10)
/* if APR PKT > 10 after connect, do not go to
* btc8821a1ant_act_wifi_conn_sp_pkt
@@ -2688,8 +2688,8 @@ void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
return;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], special Packet(%d) notify\n", type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet(%d) notify\n", type);
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
}
}
@@ -2715,19 +2715,19 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt info[%d], length = %d, hex data = [",
- rsp_source, length);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length = %d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length - 1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x, ", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
}
@@ -2749,8 +2749,8 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
/* BT into is responded by BT FW and BT RF REG 0x3C !=
* 0x15 => Need to switch BT TRx Mask
*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n");
btcoexist->btc_set_bt_reg(btcoexist, BTC_BT_REG_RF,
0x3c, 0x15);
}
@@ -2759,8 +2759,8 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and lost the info
*/
if (coex_sta->bt_info_ext & BIT1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected) {
@@ -2775,8 +2775,8 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
btc8821a1ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -2827,28 +2827,28 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8821A_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) {
/* connection exists but no busy */
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8821A_1ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info & BT_INFO_8821A_1ANT_B_ACL_BUSY) {
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
coex_dm->auto_tdma_adjust = false;
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -2868,12 +2868,12 @@ void ex_btc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_under_5g = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Halt notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Halt notify\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
@@ -2897,18 +2897,18 @@ void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify\n");
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to SLEEP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
/* BT should clear UnderIPS/UnderLPS state to avoid mismatch
* state after wakeup.
*/
@@ -2922,8 +2922,8 @@ void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false,
true);
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
btc8821a1ant_init_hw_config(btcoexist, false, false);
btc8821a1ant_init_coex_dm(btcoexist);
@@ -2939,33 +2939,33 @@ void ex_btc8821a1ant_periodical(struct btc_coexist *btcoexist)
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ==========================Periodical===========================\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ****************************************************************\n");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- stack_info->profile_notified ? "Yes" : "No",
- stack_info->hci_version);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8821a_1ant,
- glcoex_ver_8821a_1ant,
- fw_ver, bt_patch_ver,
- bt_patch_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ****************************************************************\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8821a_1ant,
+ glcoex_ver_8821a_1ant,
+ fw_ver, bt_patch_ver,
+ bt_patch_ver);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
}
if (!btcoexist->auto_report_1ant) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index e9e211fda264..e53789f11b08 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -54,28 +54,28 @@ static u8 btc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >=
rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -85,12 +85,12 @@ static u8 btc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
(rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -100,26 +100,26 @@ static u8 btc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
(rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -147,28 +147,28 @@ static u8 btc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -180,12 +180,12 @@ static u8 btc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
(rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -194,26 +194,26 @@ static u8 btc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -273,12 +273,12 @@ static void btc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
else
bt_link_info->slave_role = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -330,9 +330,9 @@ static void btc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -437,7 +437,7 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -453,28 +453,28 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], HID only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], A2DP only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], A2DP only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], PAN(HS) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(HS) only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], PAN(EDR) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(EDR) only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR;
}
}
@@ -482,58 +482,58 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + A2DP ==> SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
}
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], HID + A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID + A2DP\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(EDR)\n");
algorithm =
BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], A2DP + PAN(HS)\n");
algorithm =
BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], A2DP + PAN(EDR)\n");
algorithm =
BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -543,33 +543,33 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + HID + A2DP ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP ==> HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
}
@@ -578,15 +578,15 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(HS)\n");
algorithm =
BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -598,14 +598,14 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
}
@@ -625,10 +625,10 @@ static void btc8821a2ant_set_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swing_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -641,9 +641,9 @@ static void btc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
h2c_parameter[0] = dec_bt_pwr_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], decrease Bt Power Level : %u, FW write 0x62 = 0x%x\n",
- dec_bt_pwr_lvl, h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], decrease Bt Power Level : %u, FW write 0x62 = 0x%x\n",
+ dec_bt_pwr_lvl, h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -653,15 +653,15 @@ static void btc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s Dec BT power level = %u\n",
- (force_exec ? "force to" : ""), dec_bt_pwr_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s Dec BT power level = %u\n",
+ (force_exec ? "force to" : ""), dec_bt_pwr_lvl);
coex_dm->cur_dec_bt_pwr_lvl = dec_bt_pwr_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_dec_bt_pwr_lvl = %d, cur_dec_bt_pwr_lvl = %d\n",
- coex_dm->pre_dec_bt_pwr_lvl,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_dec_bt_pwr_lvl = %d, cur_dec_bt_pwr_lvl = %d\n",
+ coex_dm->pre_dec_bt_pwr_lvl,
coex_dm->cur_dec_bt_pwr_lvl);
if (coex_dm->pre_dec_bt_pwr_lvl == coex_dm->cur_dec_bt_pwr_lvl)
@@ -677,16 +677,16 @@ static void btc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -719,9 +719,9 @@ static void btc8821a2ant_set_sw_penalty_tx_rate_adaptive(
h2c_parameter[5] = 0xa0;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -731,17 +731,17 @@ static void btc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn LowPenaltyRA = %s\n",
- (force_exec ? "force to" : ""),
- ((low_penalty_ra) ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""),
+ ((low_penalty_ra) ? "ON" : "OFF"));
coex_dm->cur_low_penalty_ra = low_penalty_ra;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
- coex_dm->pre_low_penalty_ra,
- coex_dm->cur_low_penalty_ra);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
return;
@@ -758,8 +758,8 @@ static void btc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 val = (u8)level;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
}
@@ -779,21 +779,21 @@ static void btc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
- (force_exec ? "force to" : ""),
- ((dac_swing_on) ? "ON" : "OFF"),
- dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ ((dac_swing_on) ? "ON" : "OFF"),
+ dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl,
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl ==
@@ -814,20 +814,20 @@ static void btc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -837,28 +837,28 @@ static void btc8821a2ant_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""),
- val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ val0x6c0, val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
- coex_dm->pre_val0x6c0,
- coex_dm->pre_val0x6c4,
- coex_dm->pre_val0x6c8,
- coex_dm->pre_val0x6cc);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
- coex_dm->cur_val0x6c0,
- coex_dm->cur_val0x6c4,
- coex_dm->cur_val0x6c8,
- coex_dm->cur_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
+ coex_dm->pre_val0x6c0,
+ coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8,
+ coex_dm->pre_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
+ coex_dm->cur_val0x6c0,
+ coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8,
+ coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -967,9 +967,9 @@ static void btc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter);
}
@@ -1006,15 +1006,15 @@ static void btc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
@@ -1045,13 +1045,13 @@ static void btc8821a2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 |
- h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 |
- h2c_parameter[4]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1137,20 +1137,20 @@ static void btc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
type = type + 100;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn %s PS TDMA, type = %d\n",
- (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
- type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn %s PS TDMA, type = %d\n",
+ (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
+ type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1472,18 +1472,18 @@ static void btc8821a2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
if (scan || link || roam) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi link process + BT Inq/Page!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi link process + BT Inq/Page!!\n");
btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
} else if (wifi_connected) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT Inq/Page!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT Inq/Page!!\n");
btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi no-link + BT Inq/Page!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi no-link + BT Inq/Page!!\n");
btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
}
@@ -1509,8 +1509,8 @@ static void btc8821a2ant_action_wifi_link_process(struct btc_coexist *btcoexist)
u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 0x765=0x%x, 0x76e=0x%x\n", u8tmpa, u8tmpb);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 0x765=0x%x, 0x76e=0x%x\n", u8tmpa, u8tmpb);
}
static bool btc8821a2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
@@ -1531,8 +1531,8 @@ static bool btc8821a2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
/* define the office environment */
if (BTC_RSSI_HIGH(wifi_rssi_state1) && (coex_sta->hid_exist) &&
(coex_sta->a2dp_exist)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n");
btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
@@ -1550,8 +1550,8 @@ static bool btc8821a2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
return true;
} else if (coex_sta->pan_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi idle process for BT PAN exist!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi idle process for BT PAN exist!!\n");
btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
@@ -1592,8 +1592,8 @@ static bool btc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
0x8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non-connected idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non-connected idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
0x0);
@@ -1620,8 +1620,8 @@ static bool btc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC,
false, false, 0x8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
@@ -1650,8 +1650,8 @@ static bool btc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hs_on)
return false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC,
false, false, 0x8);
@@ -1679,12 +1679,12 @@ static bool btc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
common =
btc8821a2ant_action_wifi_idle_process(
btcoexist);
@@ -1707,13 +1707,13 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
int result;
u8 retry_count = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TdmaDurationAdjust()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -1801,11 +1801,11 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
retry_count = coex_sta->bt_retry_cnt;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], retry_count = %d\n", retry_count);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
- (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
+ (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
result = 0;
wait_count++;
@@ -1826,8 +1826,8 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Increase wifi duration!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
/* <=3 retry in the last 2-second duration */
@@ -1856,8 +1856,8 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
/* retry count > 3, if retry count > 3 happens once,
@@ -1878,12 +1878,12 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], max Interval = %d\n", max_interval);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1) {
if (tx_pause) {
@@ -2591,9 +2591,9 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PsTdma type mismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
- coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PsTdma type mismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
+ coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2603,8 +2603,8 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
coex_dm->ps_tdma_du_adj_type);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
}
@@ -3389,31 +3389,31 @@ static void btc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
bool scan = false, link = false, roam = false;
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Manual control!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Manual control!!!\n");
return;
}
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
btc8821a2ant_coex_under_5g(btcoexist);
return;
}
if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = btc8821a2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
btc8821a2ant_action_bt_inquiry(btcoexist);
return;
}
@@ -3423,8 +3423,8 @@ static void btc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
if (scan || link || roam) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WiFi is under Link Process !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], WiFi is under Link Process !!\n");
btc8821a2ant_action_wifi_link_process(btcoexist);
return;
}
@@ -3436,9 +3436,9 @@ static void btc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if ((num_of_wifi_link >= 2) ||
(wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
- num_of_wifi_link, wifi_link_status);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
+ num_of_wifi_link, wifi_link_status);
if (bt_link_info->bt_link_exist)
miracast_plus_bt = true;
@@ -3457,75 +3457,75 @@ static void btc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
&miracast_plus_bt);
coex_dm->cur_algorithm = algorithm;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
if (btc8821a2ant_is_common_action(btcoexist)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant common\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = true;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
- coex_dm->pre_algorithm,
- coex_dm->cur_algorithm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8821A_2ANT_COEX_ALGO_SCO:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = SCO\n");
btc8821a2ant_action_sco(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID\n");
btc8821a2ant_action_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
btc8821a2ant_action_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
btc8821a2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
btc8821a2ant_action_pan_edr(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
btc8821a2ant_action_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
btc8821a2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
btc8821a2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
btc8821a2ant_action_hid_a2dp(btcoexist);
break;
default:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
btc8821a2ant_coex_all_off(btcoexist);
break;
}
@@ -3561,8 +3561,8 @@ void ex_btc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u1tmp = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 2Ant Init HW Config!!\n");
/* backup rf 0x1e value */
coex_dm->bt_rf0x1e_backup =
@@ -3629,8 +3629,8 @@ void ex_btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btc8821a2ant_init_coex_dm(btcoexist);
}
@@ -3840,15 +3840,15 @@ void ex_btc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_IPS_ENTER == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
btc8821a2ant_wifi_off_hw_cfg(btcoexist);
btc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
btc8821a2ant_coex_all_off(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
ex_btc8821a2ant_init_hwconfig(btcoexist);
btc8821a2ant_init_coex_dm(btcoexist);
@@ -3861,12 +3861,12 @@ void ex_btc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_LPS_ENABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -3876,11 +3876,11 @@ void ex_btc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_SCAN_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
} else if (BTC_SCAN_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
}
}
@@ -3889,11 +3889,11 @@ void ex_btc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_ASSOCIATE_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
} else if (BTC_ASSOCIATE_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
}
}
@@ -3907,11 +3907,11 @@ void ex_btc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 ap_num = 0;
if (BTC_MEDIA_CONNECT == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA connect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA disconnect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
}
/* only 2.4G we need to inform bt the chnl mask */
@@ -3937,11 +3937,11 @@ void ex_btc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 |
- h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3952,8 +3952,8 @@ void ex_btc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (type == BTC_PACKET_DHCP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], DHCP Packet notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], DHCP Packet notify\n");
}
}
@@ -3976,25 +3976,25 @@ void ex_btc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt info[%d], length = %d, hex data = [",
- rsp_source, length);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length = %d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length - 1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x, ", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
}
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
return;
}
@@ -4016,8 +4016,8 @@ void ex_btc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
/* BT into is responded by BT FW and BT RF REG 0x3C !=
* 0x01 => Need to switch BT TRx Mask
*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x01\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x01\n");
btcoexist->btc_set_bt_reg(btcoexist, BTC_BT_REG_RF,
0x3c, 0x01);
}
@@ -4039,31 +4039,31 @@ void ex_btc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
}
if (!btcoexist->manual_control && !wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info = 0x%x!!\n",
- coex_sta->bt_info_ext);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info = 0x%x!!\n",
+ coex_sta->bt_info_ext);
if ((coex_sta->bt_info_ext & BIT(3))) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3=1, wifi_connected=%d\n",
- wifi_connected);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3=1, wifi_connected=%d\n",
+ wifi_connected);
if (wifi_connected) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
btc8821a2ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3=0, wifi_connected=%d\n",
- wifi_connected);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3=0, wifi_connected=%d\n",
+ wifi_connected);
/* BT already NOT ignore Wlan active, do nothing
* here.
*/
if (!wifi_connected) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"[BTCoex], BT ext info bit3 check, set BT to ignore Wlan active!!\n");
btc8821a2ant_ignore_wlan_act(
btcoexist, FORCE_EXEC, true);
@@ -4117,26 +4117,26 @@ void ex_btc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8821A_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8821A_2ANT_B_CONNECTION) {
/* connection exists but no busy */
coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_CON_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8821A_2ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8821A_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info & BT_INFO_8821A_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((coex_dm->bt_status == BT_8821A_2ANT_BT_STATUS_ACL_BUSY) ||
@@ -4161,8 +4161,8 @@ void ex_btc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Halt notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Halt notify\n");
btc8821a2ant_wifi_off_hw_cfg(btcoexist);
btc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -4173,14 +4173,14 @@ void ex_btc8821a2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
if (pnp_state == BTC_WIFI_PNP_SLEEP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to SLEEP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
} else if (pnp_state == BTC_WIFI_PNP_WAKE_UP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
ex_btc8821a2ant_init_hwconfig(btcoexist);
btc8821a2ant_init_coex_dm(btcoexist);
btc8821a2ant_query_bt_info(btcoexist);
@@ -4191,8 +4191,8 @@ void ex_btc8821a2ant_periodical(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ==========================Periodical===========================\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (coex_sta->dis_ver_info_cnt <= 5) {
coex_sta->dis_ver_info_cnt += 1;
@@ -4200,8 +4200,8 @@ void ex_btc8821a2ant_periodical(struct btc_coexist *btcoexist)
/* Antenna config to set 0x765 = 0x0 (GNT_BT control by
* PTA) after initial
*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Set GNT_BT control by PTA\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set GNT_BT control by PTA\n");
btc8821a2ant_set_ant_path(btcoexist,
BTC_ANT_WIFI_AT_MAIN, false, false);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 2b140c1e8e8d..2c05369b79e4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -129,8 +129,8 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
if (rtlphy->current_channel != 0)
chnl = rtlphy->current_channel;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "static halbtc_get_wifi_central_chnl:%d\n", chnl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "%s:%d\n", __func__, chnl);
return chnl;
}
@@ -250,16 +250,16 @@ bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code,
if (!wait_ms)
return true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "btmpinfo wait req_num=%d wait=%ld\n", req_num, wait_ms);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "btmpinfo wait req_num=%d wait=%ld\n", req_num, wait_ms);
if (in_interrupt())
return false;
if (wait_for_completion_timeout(&btcoexist->bt_mp_comp,
msecs_to_jiffies(wait_ms)) == 0) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "btmpinfo wait (req_num=%d) timeout\n", req_num);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "btmpinfo wait (req_num=%d) timeout\n", req_num);
return false; /* timeout */
}
@@ -278,14 +278,15 @@ static void halbtc_leave_lps(struct btc_coexist *btcoexist)
&ap_enable);
if (ap_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "%s()<--dont leave lps under AP mode\n", __func__);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "%s()<--dont leave lps under AP mode\n", __func__);
return;
}
btcoexist->bt_info.bt_ctrl_lps = true;
btcoexist->bt_info.bt_lps_on = false;
- rtl_lps_leave(rtlpriv->mac80211.hw);
+ /* FIXME: Context is unclear. Is it allowed to block? */
+ rtl_lps_leave(rtlpriv->mac80211.hw, false);
}
static void halbtc_enter_lps(struct btc_coexist *btcoexist)
@@ -299,14 +300,15 @@ static void halbtc_enter_lps(struct btc_coexist *btcoexist)
&ap_enable);
if (ap_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "%s()<--dont enter lps under AP mode\n", __func__);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "%s()<--dont enter lps under AP mode\n", __func__);
return;
}
btcoexist->bt_info.bt_ctrl_lps = true;
btcoexist->bt_info.bt_lps_on = true;
- rtl_lps_enter(rtlpriv->mac80211.hw);
+ /* FIXME: Context is unclear. Is it allowed to block? */
+ rtl_lps_enter(rtlpriv->mac80211.hw, false);
}
static void halbtc_normal_lps(struct btc_coexist *btcoexist)
@@ -317,7 +319,8 @@ static void halbtc_normal_lps(struct btc_coexist *btcoexist)
if (btcoexist->bt_info.bt_ctrl_lps) {
btcoexist->bt_info.bt_lps_on = false;
- rtl_lps_leave(rtlpriv->mac80211.hw);
+ /* FIXME: Context is unclear. Is it allowed to block? */
+ rtl_lps_leave(rtlpriv->mac80211.hw, false);
btcoexist->bt_info.bt_ctrl_lps = false;
}
}
@@ -328,7 +331,8 @@ static void halbtc_pre_normal_lps(struct btc_coexist *btcoexist)
if (btcoexist->bt_info.bt_ctrl_lps) {
btcoexist->bt_info.bt_lps_on = false;
- rtl_lps_leave(rtlpriv->mac80211.hw);
+ /* FIXME: Context is unclear. Is it allowed to block? */
+ rtl_lps_leave(rtlpriv->mac80211.hw, false);
}
}
@@ -1368,11 +1372,11 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
btcoexist->board_info.tfbga_package = true;
if (btcoexist->board_info.tfbga_package)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Package Type = TFBGA\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Package Type = TFBGA\n");
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Package Type = Non-TFBGA\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Package Type = Non-TFBGA\n");
btcoexist->board_info.rfe_type = rtl_get_hwpg_rfe_type(rtlpriv);
btcoexist->board_info.ant_div_cfg = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index b8c4536af6c0..4641999f3fe9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -191,7 +191,7 @@ void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
u8 bt_exist;
bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"%s, bt_exist is %d\n", __func__, bt_exist);
if (!btcoexist)
@@ -383,8 +383,8 @@ void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
break;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "btmpinfo complete req_num=%d\n", seq);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "btmpinfo complete req_num=%d\n", seq);
complete(&btcoexist->bt_mp_comp);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index bf0e0bb1f99b..7aa28da39409 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -43,14 +43,14 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE %x: %x\n",
- rtlpriv->cfg->maps[WCAMI], target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The Key ID is %d\n", entry_no);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE %x: %x\n",
- rtlpriv->cfg->maps[RWCAM], target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[WCAMI], target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The Key ID is %d\n", entry_no);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[RWCAM], target_command);
} else if (entry_i == 1) {
@@ -64,10 +64,10 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A4: %x\n", target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A0: %x\n", target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A4: %x\n", target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A0: %x\n", target_command);
} else {
@@ -83,15 +83,15 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A4: %x\n", target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A0: %x\n", target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A4: %x\n", target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A0: %x\n", target_command);
}
}
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "after set key, usconfig:%x\n", us_config);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "after set key, usconfig:%x\n", us_config);
}
u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
@@ -101,14 +101,14 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
u32 us_config;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
- ul_entry_idx, ul_key_id, ul_enc_alg,
- ul_default_key, mac_addr);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
+ ul_entry_idx, ul_key_id, ul_enc_alg,
+ ul_default_key, mac_addr);
if (ul_key_id == TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ulKeyId exceed!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ulKeyId exceed!\n");
return 0;
}
@@ -120,7 +120,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
(u8 *)key_content, us_config);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "end\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "end\n");
return 1;
@@ -133,7 +133,7 @@ int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
u32 ul_command;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "key_idx:%d\n", ul_key_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "key_idx:%d\n", ul_key_id);
ul_command = ul_key_id * CAM_CONTENT_COUNT;
ul_command = ul_command | BIT(31) | BIT(16);
@@ -141,10 +141,10 @@ int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_delete_one_entry(): WRITE A4: %x\n", 0);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_delete_one_entry(): WRITE A0: %x\n", ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A4: %x\n", __func__, 0);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
return 0;
@@ -195,10 +195,10 @@ void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_mark_invalid(): WRITE A4: %x\n", ul_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_mark_invalid(): WRITE A0: %x\n", ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A4: %x\n", __func__, ul_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
}
EXPORT_SYMBOL(rtl_cam_mark_invalid);
@@ -245,12 +245,10 @@ void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "rtl_cam_empty_entry(): WRITE A4: %x\n",
- ul_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "rtl_cam_empty_entry(): WRITE A0: %x\n",
- ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "%s: WRITE A4: %x\n", __func__, ul_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
}
}
@@ -313,8 +311,8 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
/* Remove from HW Security CAM */
eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]);
rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "&&&&&&&&&del entry %d\n", i);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "&&&&&&&&&del entry %d\n", i);
}
}
return;
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 4dd82c6052f0..a7259dbc953d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -76,8 +76,8 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "Firmware callback routine entered!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
if (rtlpriv->cfg->alt_fw_name) {
@@ -214,8 +214,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
u8 retry_limit = 0x30;
if (mac->vif) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "vif has been set!! mac->vif = 0x%p\n", mac->vif);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "vif has been set!! mac->vif = 0x%p\n", mac->vif);
return -EOPNOTSUPP;
}
@@ -227,19 +227,19 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_CLIENT:
mac->p2p = P2P_ROLE_CLIENT;
- /*fall through*/
+ fallthrough;
case NL80211_IFTYPE_STATION:
if (mac->beacon_enabled == 1) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_STATION\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_STATION\n");
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
}
break;
case NL80211_IFTYPE_ADHOC:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_ADHOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_ADHOC\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -254,10 +254,10 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_P2P_GO:
mac->p2p = P2P_ROLE_GO;
- /*fall through*/
+ fallthrough;
case NL80211_IFTYPE_AP:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_AP\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_AP\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -271,8 +271,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
retry_limit = 0x07;
break;
case NL80211_IFTYPE_MESH_POINT:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_MESH_POINT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_MESH_POINT\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -293,8 +293,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
}
if (mac->p2p) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "p2p role %x\n", vif->type);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "p2p role %x\n", vif->type);
mac->basic_rates = 0xff0;/*disable cck rate for p2p*/
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
(u8 *)(&mac->basic_rates));
@@ -360,8 +360,8 @@ static int rtl_op_change_interface(struct ieee80211_hw *hw,
vif->type = new_type;
vif->p2p = p2p;
ret = rtl_op_add_interface(hw, vif);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "p2p %x\n", p2p);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "p2p %x\n", p2p);
return ret;
}
@@ -435,8 +435,8 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
memset(mask, 0, MAX_WOL_BIT_MASK_SIZE);
if (patterns[i].pattern_len < 0 ||
patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_WARNING,
- "Pattern[%d] is too long\n", i);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_WARNING,
+ "Pattern[%d] is too long\n", i);
continue;
}
pattern_os = patterns[i].pattern;
@@ -515,8 +515,8 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
"pattern to hw\n", content, len);
/* 3. calculate crc */
rtl_pattern.crc = _calculate_wol_pattern_crc(content, len);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "CRC_Remainder = 0x%x\n", rtl_pattern.crc);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "CRC_Remainder = 0x%x\n", rtl_pattern.crc);
/* 4. write crc & mask_for_hw to hw */
rtlpriv->cfg->ops->add_wowlan_pattern(hw, &rtl_pattern, i);
@@ -531,7 +531,7 @@ static int rtl_op_suspend(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
if (WARN_ON(!wow))
return -EINVAL;
@@ -544,7 +544,7 @@ static int rtl_op_suspend(struct ieee80211_hw *hw,
rtlhal->driver_is_goingto_unload = true;
rtlhal->enter_pnp_sleep = true;
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, true);
rtl_op_stop(hw);
device_set_wakeup_enable(wiphy_dev(hw->wiphy), true);
return 0;
@@ -557,7 +557,7 @@ static int rtl_op_resume(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
time64_t now;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
rtlhal->driver_is_goingto_unload = false;
rtlhal->enter_pnp_sleep = false;
rtlhal->wake_from_pnp_sleep = true;
@@ -588,8 +588,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&rtlpriv->locks.conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /* BIT(2)*/
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n");
}
/*For IPS */
@@ -632,9 +632,9 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
- hw->conf.long_frame_max_tx_count);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
+ hw->conf.long_frame_max_tx_count);
/* brought up everything changes (changed == ~0) indicates first
* open, so use our default value instead of that of wiphy.
*/
@@ -809,13 +809,13 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (*new_flags & FIF_ALLMULTI) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive multicast frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive multicast frame\n");
} else {
mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB]);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive multicast frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive multicast frame\n");
}
update_rcr = true;
}
@@ -823,12 +823,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_FCSFAIL) {
if (*new_flags & FIF_FCSFAIL) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive FCS error frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive FCS error frame\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive FCS error frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive FCS error frame\n");
}
if (!update_rcr)
update_rcr = true;
@@ -855,12 +855,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (*new_flags & FIF_CONTROL) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive control frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive control frame.\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive control frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive control frame.\n");
}
if (!update_rcr)
update_rcr = true;
@@ -869,12 +869,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_OTHER_BSS) {
if (*new_flags & FIF_OTHER_BSS) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive other BSS's frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive other BSS's frame.\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive other BSS's frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive other BSS's frame.\n");
}
if (!update_rcr)
update_rcr = true;
@@ -923,7 +923,7 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
sta->supp_rates[0] &= 0xfffffff0;
memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
"Add sta addr is %pM\n", sta->addr);
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0, true);
}
@@ -939,8 +939,8 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry;
if (sta) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "Remove sta addr is %pM\n", sta->addr);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "Remove sta addr is %pM\n", sta->addr);
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
sta_entry->wireless_mode = 0;
sta_entry->ratr_index = 0;
@@ -988,8 +988,8 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw,
int aci;
if (queue >= AC_MAX) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "queue number %d is incorrect!\n", queue);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "queue number %d is incorrect!\n", queue);
return -EINVAL;
}
@@ -1034,8 +1034,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
(changed & BSS_CHANGED_BEACON_ENABLED &&
bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 0) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_BEACON_ENABLED\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_BEACON_ENABLED\n");
/*start hw beacon interrupt. */
/*rtlpriv->cfg->ops->set_bcn_reg(hw); */
@@ -1052,8 +1052,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if ((changed & BSS_CHANGED_BEACON_ENABLED &&
!bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 1) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "ADHOC DISABLE BEACON\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "ADHOC DISABLE BEACON\n");
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -1062,8 +1062,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
if (changed & BSS_CHANGED_BEACON_INT) {
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_TRACE,
- "BSS_CHANGED_BEACON_INT\n");
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_TRACE,
+ "BSS_CHANGED_BEACON_INT\n");
mac->beacon_interval = bss_conf->beacon_int;
rtlpriv->cfg->ops->set_bcn_intv(hw);
}
@@ -1102,8 +1102,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rcu_read_unlock();
goto out;
}
- RT_TRACE(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
- "send PS STATIC frame\n");
+ rtl_dbg(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
+ "send PS STATIC frame\n");
if (rtlpriv->dm.supp_phymode_switch) {
if (sta->ht_cap.ht_supported)
rtl_send_smps_action(hw, sta,
@@ -1143,15 +1143,15 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
HW_VAR_KEEP_ALIVE,
(u8 *)(&keep_alive));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_ASSOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_ASSOC\n");
} else {
struct cfg80211_bss *bss = NULL;
mstatus = RT_MEDIA_DISCONNECT;
if (mac->link_state == MAC80211_LINKED)
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, true);
if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
mac->link_state = MAC80211_NOLINK;
@@ -1161,14 +1161,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
IEEE80211_BSS_TYPE_ESS,
IEEE80211_PRIVACY_OFF);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "bssid = %pMF\n", mac->bssid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "bssid = %pMF\n", mac->bssid);
if (bss) {
cfg80211_unlink_bss(hw->wiphy, bss);
cfg80211_put_bss(hw->wiphy, bss);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "cfg80211_unlink !!\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "cfg80211_unlink !!\n");
}
eth_zero_addr(mac->bssid);
@@ -1179,8 +1179,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->chk_switch_dmdp)
rtlpriv->cfg->ops->chk_switch_dmdp(hw);
}
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_UN_ASSOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_UN_ASSOC\n");
}
rtlpriv->cfg->ops->set_network_type(hw, vif->type);
/* For FW LPS:
@@ -1198,14 +1198,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_ERP_CTS_PROT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_ERP_CTS_PROT\n");
mac->use_cts_protect = bss_conf->use_cts_prot;
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
bss_conf->use_short_preamble);
mac->short_preamble = bss_conf->use_short_preamble;
@@ -1214,8 +1214,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_SLOT) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_ERP_SLOT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_ERP_SLOT\n");
if (bss_conf->use_short_slot)
mac->slot_time = RTL_SLOT_TIME_9;
@@ -1229,8 +1229,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_HT) {
struct ieee80211_sta *sta = NULL;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_HT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_HT\n");
rcu_read_lock();
sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
@@ -1261,8 +1261,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
(u8 *)bss_conf->bssid);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "bssid: %pM\n", bss_conf->bssid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "bssid: %pM\n", bss_conf->bssid);
mac->vendor = PEER_UNKNOWN;
memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
@@ -1393,27 +1393,27 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_TX_START:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
return rtl_tx_agg_start(hw, vif, sta, tid, ssn);
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
return rtl_tx_agg_stop(hw, vif, sta, tid);
case IEEE80211_AMPDU_TX_OPERATIONAL:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid);
rtl_tx_agg_oper(hw, sta, tid);
break;
case IEEE80211_AMPDU_RX_START:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_RX_START:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_RX_START:TID:%d\n", tid);
return rtl_rx_agg_start(hw, sta, tid);
case IEEE80211_AMPDU_RX_STOP:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
return rtl_rx_agg_stop(hw, sta, tid);
default:
pr_err("IEEE80211_AMPDU_ERR!!!!:\n");
@@ -1429,7 +1429,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
mac->act_scanning = true;
if (rtlpriv->link_info.higher_busytraffic) {
mac->skip_scan = true;
@@ -1448,7 +1448,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
}
if (mac->link_state == MAC80211_LINKED) {
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, true);
mac->link_state = MAC80211_LINKED_SCANNING;
} else {
rtl_ips_nic_on(hw);
@@ -1467,7 +1467,7 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
mac->act_scanning = false;
mac->skip_scan = false;
@@ -1517,8 +1517,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->btcoexist.btc_info.in_4way = false;
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "not open hw encryption\n");
return -ENOSPC; /*User disabled HW-crypto */
}
/* To support IBSS, use sw-crypto for GTK */
@@ -1526,10 +1526,10 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
vif->type == NL80211_IFTYPE_MESH_POINT) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -ENOSPC;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "%s hardware based encryption for keyidx: %d, mac: %pM\n",
- cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
- sta ? sta->addr : bcast_addr);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s hardware based encryption for keyidx: %d, mac: %pM\n",
+ cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
+ sta ? sta->addr : bcast_addr);
rtlpriv->sec.being_setkey = true;
rtl_ips_nic_on(hw);
mutex_lock(&rtlpriv->locks.conf_mutex);
@@ -1538,28 +1538,28 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key_type = WEP40_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP40\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP40\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP104\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP104\n");
key_type = WEP104_ENCRYPTION;
break;
case WLAN_CIPHER_SUITE_TKIP:
key_type = TKIP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:TKIP\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:TKIP\n");
break;
case WLAN_CIPHER_SUITE_CCMP:
key_type = AESCCMP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n");
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
/* HW don't support CMAC encryption,
* use software CMAC encryption
*/
key_type = AESCMAC_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "HW don't support CMAC encryption, use software CMAC encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "HW don't support CMAC encryption, use software CMAC encryption\n");
err = -EOPNOTSUPP;
goto out_unlock;
default:
@@ -1605,9 +1605,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type == WEP104_ENCRYPTION))
wep_only = true;
rtlpriv->sec.pairwise_enc_algorithm = key_type;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1 TKIP:2 AES:4 WEP104:5)\n",
- key_type);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1 TKIP:2 AES:4 WEP104:5)\n",
+ key_type);
rtlpriv->cfg->ops->enable_hw_sec(hw);
}
}
@@ -1615,8 +1615,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
if (wep_only) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set WEP(group/pairwise) key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set WEP(group/pairwise) key\n");
/* Pairwise key with an assigned MAC address. */
rtlpriv->sec.pairwise_enc_algorithm = key_type;
rtlpriv->sec.group_enc_algorithm = key_type;
@@ -1626,8 +1626,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->sec.key_len[key_idx] = key->keylen;
eth_zero_addr(mac_addr);
} else if (group_key) { /* group key */
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
/* group key */
rtlpriv->sec.group_enc_algorithm = key_type;
/*set local buf about group key. */
@@ -1636,8 +1636,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->sec.key_len[key_idx] = key->keylen;
memcpy(mac_addr, bcast_addr, ETH_ALEN);
} else { /* pairwise key */
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set pairwise key\n");
if (!sta) {
WARN_ONCE(true,
"rtlwifi: pairwise key without mac_addr\n");
@@ -1669,8 +1669,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
case DISABLE_KEY:
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "disable key delete one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "disable key delete one entry\n");
/*set local buf about wep key. */
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT) {
@@ -1718,9 +1718,9 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) {
rtlpriv->rfkill.rfkill_state = radio_state;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "wireless radio switch turned %s\n",
- radio_state ? "on" : "off");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "wireless radio switch turned %s\n",
+ radio_state ? "on" : "off");
blocked = !rtlpriv->rfkill.rfkill_state;
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
@@ -1765,26 +1765,27 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
do {
cfg_cmd = pwrcfgcmd[ary_idx];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x), interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
- GET_PWR_CFG_OFFSET(cfg_cmd),
- GET_PWR_CFG_CUT_MASK(cfg_cmd),
- GET_PWR_CFG_FAB_MASK(cfg_cmd),
- GET_PWR_CFG_INTF_MASK(cfg_cmd),
- GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
- GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: offset(%#x),cut_msk(%#x), famsk(%#x), interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
+ __func__,
+ GET_PWR_CFG_OFFSET(cfg_cmd),
+ GET_PWR_CFG_CUT_MASK(cfg_cmd),
+ GET_PWR_CFG_FAB_MASK(cfg_cmd),
+ GET_PWR_CFG_INTF_MASK(cfg_cmd),
+ GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
+ GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) &&
(GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) &&
(GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) {
switch (GET_PWR_CFG_CMD(cfg_cmd)) {
case PWR_CMD_READ:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n");
break;
case PWR_CMD_WRITE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): PWR_CMD_WRITE\n", __func__);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s(): PWR_CMD_WRITE\n", __func__);
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
/*Read the value from system register*/
@@ -1797,7 +1798,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
rtl_write_byte(rtlpriv, offset, value);
break;
case PWR_CMD_POLLING:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
polling_bit = false;
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
@@ -1818,8 +1819,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
} while (!polling_bit);
break;
case PWR_CMD_DELAY:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: PWR_CMD_DELAY\n", __func__);
if (GET_PWR_CFG_VALUE(cfg_cmd) ==
PWRSEQ_DELAY_US)
udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
@@ -1827,8 +1828,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
break;
case PWR_CMD_END:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: PWR_CMD_END\n", __func__);
return true;
default:
WARN_ONCE(true,
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 55db71c766fe..901cdfe3723c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -8,26 +8,6 @@
#include <linux/vmalloc.h>
#ifdef CONFIG_RTLWIFI_DEBUG
-void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
- const char *fmt, ...)
-{
- if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- level <= rtlpriv->cfg->mod_params->debug_level)) {
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- pr_info(":<%lx> %pV", in_interrupt(), &vaf);
-
- va_end(args);
- }
-}
-EXPORT_SYMBOL_GPL(_rtl_dbg_trace);
-
void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...)
{
@@ -404,8 +384,8 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
&path, &addr, &bitmask, &data);
if (num != 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "Format is <path> <addr> <mask> <data>\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "Format is <path> <addr> <mask> <data>\n");
return count;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
index 69f169d4d4ae..1c0bcf8ec1a9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.h
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.h
@@ -149,10 +149,6 @@ enum dbgp_flag_e {
struct rtl_priv;
__printf(4, 5)
-void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
- const char *fmt, ...);
-
-__printf(4, 5)
void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...);
@@ -160,8 +156,8 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *titlestring,
const void *hexdata, int hexdatalen);
-#define RT_TRACE(rtlpriv, comp, level, fmt, ...) \
- _rtl_dbg_trace(rtlpriv, comp, level, \
+#define rtl_dbg(rtlpriv, comp, level, fmt, ...) \
+ _rtl_dbg_print(rtlpriv, comp, level, \
fmt, ##__VA_ARGS__)
#define RTPRINT(rtlpriv, dbgtype, dbgflag, fmt, ...) \
@@ -177,9 +173,9 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
struct rtl_priv;
__printf(4, 5)
-static inline void RT_TRACE(struct rtl_priv *rtlpriv,
- u64 comp, int level,
- const char *fmt, ...)
+static inline void rtl_dbg(struct rtl_priv *rtlpriv,
+ u64 comp, int level,
+ const char *fmt, ...)
{
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index cef9f2a9303b..2e945554ed6d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -120,8 +120,8 @@ void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
const u32 efuse_len =
rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr=%x Data =%x\n",
- address, value);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr=%x Data =%x\n",
+ address, value);
if (address < efuse_len) {
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
@@ -211,9 +211,9 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
u8 efuse_usage;
if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) {
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "read_efuse(): Invalid offset(%#x) with read bytes(%#x)!!\n",
- _offset, _size_byte);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "%s: Invalid offset(%#x) with read bytes(%#x)!!\n",
+ __func__, _offset, _size_byte);
return;
}
@@ -376,9 +376,9 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
(EFUSE_MAX_SIZE - rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))
result = false;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse_shadow_update_chk(): totalbytes(%#x), hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
- totalbytes, hdr_num, words_need, efuse_used);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "%s: totalbytes(%#x), hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
+ __func__, totalbytes, hdr_num, words_need, efuse_used);
return result;
}
@@ -416,7 +416,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
u8 word_en = 0x0F;
u8 first_pg = false;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
if (!efuse_shadow_update_chk(hw)) {
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
@@ -424,8 +424,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse out of capacity!!\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "efuse out of capacity!!\n");
return false;
}
efuse_power_switch(hw, true, true);
@@ -464,8 +464,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
if (!efuse_pg_packet_write(hw, (u8) offset, word_en,
tmpdata)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "PG section(%#x) fail!!\n", offset);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "PG section(%#x) fail!!\n", offset);
break;
}
}
@@ -478,7 +478,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
return true;
}
@@ -616,8 +616,8 @@ static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmpidx = 0;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "Addr = %x Data=%x\n", addr, data);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "Addr = %x Data=%x\n", addr, data);
rtl_write_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff));
@@ -996,8 +996,8 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
if (efuse_addr >= (EFUSE_MAX_SIZE -
rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse_addr(%#x) Out of size!!\n", efuse_addr);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "efuse_addr(%#x) Out of size!!\n", efuse_addr);
}
return true;
@@ -1037,8 +1037,8 @@ static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
u8 tmpdata[8];
memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "word_en = %x efuse_addr=%x\n", word_en, efuse_addr);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "word_en = %x efuse_addr=%x\n", word_en, efuse_addr);
if (!(word_en & BIT(0))) {
tmpaddr = start_addr;
@@ -1240,11 +1240,11 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
eeprom_id = *((u16 *)&hwinfo[0]);
if (eeprom_id != params[0]) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
rtlefuse->autoload_failflag = true;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
}
@@ -1255,30 +1255,30 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
rtlefuse->eeprom_did = *(u16 *)&hwinfo[params[2]];
rtlefuse->eeprom_svid = *(u16 *)&hwinfo[params[3]];
rtlefuse->eeprom_smid = *(u16 *)&hwinfo[params[4]];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROMId = 0x%4x\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
for (i = 0; i < 6; i += 2) {
usvalue = *(u16 *)&hwinfo[params[5] + i];
*((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
rtlefuse->eeprom_channelplan = *&hwinfo[params[6]];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[params[7]];
rtlefuse->txpwr_fromeprom = true;
rtlefuse->eeprom_oemid = *&hwinfo[params[8]];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
/* set channel plan to world wide 13 */
rtlefuse->channel_plan = params[9];
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 25335bd2873b..3776495fd9d0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -204,8 +204,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
return;
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "PCI(Bridge) UNKNOWN\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "PCI(Bridge) UNKNOWN\n");
return;
}
@@ -254,8 +254,8 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
return;
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "PCI(Bridge) UNKNOWN\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "PCI(Bridge) UNKNOWN\n");
return;
}
@@ -271,10 +271,10 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
u_pcibridge_aspmsetting);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PlatformEnableASPM(): Write reg[%x] = %x\n",
- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
- u_pcibridge_aspmsetting);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PlatformEnableASPM(): Write reg[%x] = %x\n",
+ (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
+ u_pcibridge_aspmsetting);
udelay(50);
@@ -331,11 +331,11 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
list) {
tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "pcipriv->ndis_adapter.funcnumber %x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "pcipriv->ndis_adapter.funcnumber %x\n",
pcipriv->ndis_adapter.funcnumber);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "tpcipriv->ndis_adapter.funcnumber %x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "tpcipriv->ndis_adapter.funcnumber %x\n",
tpcipriv->ndis_adapter.funcnumber);
if (pcipriv->ndis_adapter.busnumber ==
@@ -350,8 +350,8 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "find_buddy_priv %d\n", find_buddy_priv);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "find_buddy_priv %d\n", find_buddy_priv);
if (find_buddy_priv)
*buddy_priv = tpriv;
@@ -388,8 +388,8 @@ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg);
pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
- pcipriv->ndis_adapter.linkctrl_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
+ pcipriv->ndis_adapter.linkctrl_reg);
pci_read_config_byte(pdev, 0x98, &tmp);
tmp |= BIT(4);
@@ -547,21 +547,20 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
ring->idx = (ring->idx + 1) % ring->entries;
skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->
- get_desc(hw, (u8 *)entry, true,
- HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
/* remove early mode header */
if (rtlpriv->rtlhal.earlymode_enable)
skb_pull(skb, EM_HDR_LEN);
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
- "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
- ring->idx,
- skb_queue_len(&ring->queue),
- *(u16 *)(skb->data + 22));
+ rtl_dbg(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
+ "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
+ ring->idx,
+ skb_queue_len(&ring->queue),
+ *(u16 *)(skb->data + 22));
if (prio == TXCMD_QUEUE) {
dev_kfree_skb(skb);
@@ -608,10 +607,10 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
}
if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
- prio, ring->idx,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
+ prio, ring->idx,
+ skb_queue_len(&ring->queue));
ieee80211_wake_queue(hw, skb_get_queue_mapping(skb));
}
@@ -622,7 +621,7 @@ tx_status_ok:
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
rtlpriv->link_info.num_rx_inperiod > 2)
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, false);
}
static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
@@ -646,10 +645,10 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
remap:
/* just set skb->cb to mapping addr for pci_unmap_single use */
*((dma_addr_t *)skb->cb) =
- pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
- rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ dma_map_single(&rtlpci->pdev->dev, skb_tail_pointer(skb),
+ rtlpci->rxbuffersize, DMA_FROM_DEVICE);
bufferaddress = *((dma_addr_t *)skb->cb);
- if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
+ if (dma_mapping_error(&rtlpci->pdev->dev, bufferaddress))
return 0;
rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
if (rtlpriv->use_new_trx_flow) {
@@ -773,8 +772,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
* AAAAAAttention !!!
* We can NOT access 'skb' before 'pci_unmap_single'
*/
- pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
- rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev, *((dma_addr_t *)skb->cb),
+ rtlpci->rxbuffersize, DMA_FROM_DEVICE);
/* get a new skb - if fail, old one will be reused */
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
@@ -801,9 +800,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
skb_reserve(skb, stats.rx_drvinfo_size +
stats.rx_bufshift);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "skb->end - skb->tail = %d, len is %d\n",
- skb->end - skb->tail, len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "skb->end - skb->tail = %d, len is %d\n",
+ skb->end - skb->tail, len);
dev_kfree_skb_any(skb);
goto new_trx_end;
}
@@ -875,7 +874,7 @@ new_trx_end:
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
rtlpriv->link_info.num_rx_inperiod > 2)
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, false);
skb = new_skb;
no_new:
if (rtlpriv->use_new_trx_flow) {
@@ -925,67 +924,67 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
/*<1> beacon related */
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "beacon ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "beacon ok interrupt!\n");
if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "beacon err interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "beacon err interrupt!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "prepare beacon for interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "prepare beacon for interrupt!\n");
tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
}
/*<2> Tx related */
if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "Manage ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "Manage ok interrupt!\n");
_rtl_pci_tx_isr(hw, MGNT_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "HIGH_QUEUE ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "HIGH_QUEUE ok interrupt!\n");
_rtl_pci_tx_isr(hw, HIGH_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "BK Tx OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "BK Tx OK interrupt!\n");
_rtl_pci_tx_isr(hw, BK_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "BE TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "BE TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, BE_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "VI TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "VI TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VI_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "Vo TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "Vo TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VO_QUEUE);
}
@@ -993,8 +992,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (intvec.intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "H2C TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "H2C TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, H2C_QUEUE);
}
}
@@ -1003,34 +1002,34 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "CMD TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "CMD TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, TXCMD_QUEUE);
}
}
/*<3> Rx related */
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "rx descriptor unavailable!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "rx descriptor unavailable!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
_rtl_pci_rx_interrupt(hw);
}
/*<4> fw related*/
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "firmware interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "firmware interrupt!\n");
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.fwevt_wq, 0);
}
@@ -1046,8 +1045,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
if (unlikely(intvec.inta &
rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "hsisr interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "hsisr interrupt!\n");
_rtl_pci_hs_interrupt(hw);
}
}
@@ -1061,16 +1060,18 @@ done:
return ret;
}
-static void _rtl_pci_irq_tasklet(unsigned long data)
+static void _rtl_pci_irq_tasklet(struct tasklet_struct *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct rtl_priv *rtlpriv = from_tasklet(rtlpriv, t, works.irq_tasklet);
+ struct ieee80211_hw *hw = rtlpriv->hw;
_rtl_pci_tx_chk_waitq(hw);
}
-static void _rtl_pci_prepare_bcn_tasklet(unsigned long data)
+static void _rtl_pci_prepare_bcn_tasklet(struct tasklet_struct *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *rtlpriv = from_tasklet(rtlpriv, t,
+ works.irq_prepare_bcn_tasklet);
+ struct ieee80211_hw *hw = rtlpriv->hw;
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl8192_tx_ring *ring = NULL;
@@ -1092,10 +1093,10 @@ static void _rtl_pci_prepare_bcn_tasklet(unsigned long data)
else
entry = (u8 *)(&ring->desc[ring->idx]);
if (pskb) {
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(
- hw, (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
- pskb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ pskb->len, DMA_TO_DEVICE);
kfree_skb(pskb);
}
@@ -1194,12 +1195,9 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
rtlpci->acm_method = EACMWAY2_SW;
/*task */
- tasklet_init(&rtlpriv->works.irq_tasklet,
- _rtl_pci_irq_tasklet,
- (unsigned long)hw);
- tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
- _rtl_pci_prepare_bcn_tasklet,
- (unsigned long)hw);
+ tasklet_setup(&rtlpriv->works.irq_tasklet, _rtl_pci_irq_tasklet);
+ tasklet_setup(&rtlpriv->works.irq_prepare_bcn_tasklet,
+ _rtl_pci_prepare_bcn_tasklet);
INIT_WORK(&rtlpriv->works.lps_change_work,
rtl_lps_change_work_callback);
}
@@ -1218,9 +1216,9 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
/* alloc tx buffer desc for new trx flow*/
if (rtlpriv->use_new_trx_flow) {
buffer_desc =
- pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*buffer_desc) * entries,
- &buffer_desc_dma);
+ dma_alloc_coherent(&rtlpci->pdev->dev,
+ sizeof(*buffer_desc) * entries,
+ &buffer_desc_dma, GFP_KERNEL);
if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
pr_err("Cannot allocate TX ring (prio = %d)\n",
@@ -1236,8 +1234,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
}
/* alloc dma for this ring */
- desc = pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*desc) * entries, &desc_dma);
+ desc = dma_alloc_coherent(&rtlpci->pdev->dev, sizeof(*desc) * entries,
+ &desc_dma, GFP_KERNEL);
if (!desc || (unsigned long)desc & 0xFF) {
pr_err("Cannot allocate TX ring (prio = %d)\n", prio);
@@ -1251,8 +1249,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
rtlpci->tx_ring[prio].entries = entries;
skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
- prio, desc);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
+ prio, desc);
/* init every desc in this ring */
if (!rtlpriv->use_new_trx_flow) {
@@ -1280,11 +1278,10 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
struct rtl_rx_buffer_desc *entry = NULL;
/* alloc dma for this ring */
rtlpci->rx_ring[rxring_idx].buffer_desc =
- pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].
- buffer_desc) *
- rtlpci->rxringcount,
- &rtlpci->rx_ring[rxring_idx].dma);
+ dma_alloc_coherent(&rtlpci->pdev->dev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
+ rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma, GFP_KERNEL);
if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
(ulong)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
pr_err("Cannot allocate RX ring\n");
@@ -1304,10 +1301,10 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
u8 tmp_one = 1;
/* alloc dma for this ring */
rtlpci->rx_ring[rxring_idx].desc =
- pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].
- desc) * rtlpci->rxringcount,
- &rtlpci->rx_ring[rxring_idx].dma);
+ dma_alloc_coherent(&rtlpci->pdev->dev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
+ rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma, GFP_KERNEL);
if (!rtlpci->rx_ring[rxring_idx].desc ||
(unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
pr_err("Cannot allocate RX ring\n");
@@ -1347,24 +1344,23 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
else
entry = (u8 *)(&ring->desc[ring->idx]);
- pci_unmap_single(rtlpci->pdev,
+ dma_unmap_single(&rtlpci->pdev->dev,
rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
- true,
- HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
/* free dma of this ring */
- pci_free_consistent(rtlpci->pdev,
- sizeof(*ring->desc) * ring->entries,
- ring->desc, ring->dma);
+ dma_free_coherent(&rtlpci->pdev->dev,
+ sizeof(*ring->desc) * ring->entries, ring->desc,
+ ring->dma);
ring->desc = NULL;
if (rtlpriv->use_new_trx_flow) {
- pci_free_consistent(rtlpci->pdev,
- sizeof(*ring->buffer_desc) * ring->entries,
- ring->buffer_desc, ring->buffer_desc_dma);
+ dma_free_coherent(&rtlpci->pdev->dev,
+ sizeof(*ring->buffer_desc) * ring->entries,
+ ring->buffer_desc, ring->buffer_desc_dma);
ring->buffer_desc = NULL;
}
}
@@ -1381,25 +1377,25 @@ static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
if (!skb)
continue;
- pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
- rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev, *((dma_addr_t *)skb->cb),
+ rtlpci->rxbuffersize, DMA_FROM_DEVICE);
kfree_skb(skb);
}
/* free dma of this ring */
if (rtlpriv->use_new_trx_flow) {
- pci_free_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].
- buffer_desc) * rtlpci->rxringcount,
- rtlpci->rx_ring[rxring_idx].buffer_desc,
- rtlpci->rx_ring[rxring_idx].dma);
+ dma_free_coherent(&rtlpci->pdev->dev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
+ rtlpci->rxringcount,
+ rtlpci->rx_ring[rxring_idx].buffer_desc,
+ rtlpci->rx_ring[rxring_idx].dma);
rtlpci->rx_ring[rxring_idx].buffer_desc = NULL;
} else {
- pci_free_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
- rtlpci->rxringcount,
- rtlpci->rx_ring[rxring_idx].desc,
- rtlpci->rx_ring[rxring_idx].dma);
+ dma_free_coherent(&rtlpci->pdev->dev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
+ rtlpci->rxringcount,
+ rtlpci->rx_ring[rxring_idx].desc,
+ rtlpci->rx_ring[rxring_idx].dma);
rtlpci->rx_ring[rxring_idx].desc = NULL;
}
}
@@ -1527,13 +1523,10 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
else
entry = (u8 *)(&ring->desc[ring->idx]);
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->
- get_desc(hw, (u8 *)
- entry,
- true,
- HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
@@ -1649,10 +1642,10 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
true, HW_DESC_OWN);
if (own == 1 && hw_queue != BEACON_QUEUE) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
- hw_queue, ring->idx, idx,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue));
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
flags);
@@ -1662,8 +1655,8 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->get_available_desc &&
rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "get_available_desc fail\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "get_available_desc fail\n");
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
return skb->len;
}
@@ -1686,8 +1679,8 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
hw_queue != BEACON_QUEUE) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
hw_queue, ring->idx, idx,
skb_queue_len(&ring->queue));
@@ -1794,8 +1787,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
err = rtlpriv->cfg->ops->hw_init(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Failed to config hardware!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Failed to config hardware!\n");
kfree(rtlpriv->btcoexist.btc_context);
kfree(rtlpriv->btcoexist.wifi_only_context);
return err;
@@ -1804,7 +1797,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
&rtlmac->retry_long);
rtlpriv->cfg->ops->enable_interrupt(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
rtl_init_rx_config(hw);
@@ -1815,7 +1808,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
rtlpci->up_first_time = false;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s OK\n", __func__);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%s OK\n", __func__);
return 0;
}
@@ -1909,71 +1902,71 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
deviceid == RTL_PCI_8171_DID) {
switch (revisionid) {
case RTL_PCI_REVISION_ID_8192PCIE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192 PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192 PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
return false;
case RTL_PCI_REVISION_ID_8192SE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192SE is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192SE is found - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Err: Unknown device - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Err: Unknown device - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
}
} else if (deviceid == RTL_PCI_8723AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8723AE PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8723AE PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8192CET_DID ||
deviceid == RTL_PCI_8192CE_DID ||
deviceid == RTL_PCI_8191CE_DID ||
deviceid == RTL_PCI_8188CE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192C PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192C PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8192DE_DID ||
deviceid == RTL_PCI_8192DE_DID2) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192D PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192D PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8188EE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8188EE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8188EE\n");
} else if (deviceid == RTL_PCI_8723BE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8723BE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8723BE\n");
} else if (deviceid == RTL_PCI_8192EE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8192EE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8192EE\n");
} else if (deviceid == RTL_PCI_8821AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8821AE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8821AE\n");
} else if (deviceid == RTL_PCI_8812AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8812AE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8812AE\n");
} else if (deviceid == RTL_PCI_8822BE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8822BE;
rtlhal->bandset = BAND_ON_BOTH;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8822BE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8822BE\n");
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Err: Unknown device - vid/did=%x/%x\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Err: Unknown device - vid/did=%x/%x\n",
venderid, deviceid);
rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
@@ -1982,17 +1975,17 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
if (revisionid == 0 || revisionid == 1) {
if (revisionid == 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find 92DE MAC0\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find 92DE MAC0\n");
rtlhal->interfaceindex = 0;
} else if (revisionid == 1) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find 92DE MAC1\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find 92DE MAC1\n");
rtlhal->interfaceindex = 1;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
venderid, deviceid, revisionid);
rtlhal->interfaceindex = 0;
}
@@ -2026,9 +2019,9 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
pcipriv->ndis_adapter.pcibridge_vendor = tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Pci Bridge Vendor is found index: %d\n",
- tmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Pci Bridge Vendor is found index: %d\n",
+ tmp);
break;
}
}
@@ -2056,22 +2049,22 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
- pcipriv->ndis_adapter.busnumber,
- pcipriv->ndis_adapter.devnumber,
- pcipriv->ndis_adapter.funcnumber,
- pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
+ pcipriv->ndis_adapter.busnumber,
+ pcipriv->ndis_adapter.devnumber,
+ pcipriv->ndis_adapter.funcnumber,
+ pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
- pcipriv->ndis_adapter.pcibridge_busnum,
- pcipriv->ndis_adapter.pcibridge_devnum,
- pcipriv->ndis_adapter.pcibridge_funcnum,
- pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
- pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
- pcipriv->ndis_adapter.pcibridge_linkctrlreg,
- pcipriv->ndis_adapter.amd_l1_patch);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
+ pcipriv->ndis_adapter.pcibridge_busnum,
+ pcipriv->ndis_adapter.pcibridge_devnum,
+ pcipriv->ndis_adapter.pcibridge_funcnum,
+ pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg,
+ pcipriv->ndis_adapter.amd_l1_patch);
rtl_pci_parse_configuration(pdev, hw);
list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
@@ -2099,8 +2092,8 @@ static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
rtlpci->using_msi = true;
- RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
- "MSI Interrupt Mode!\n");
+ rtl_dbg(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
+ "MSI Interrupt Mode!\n");
return 0;
}
@@ -2117,8 +2110,8 @@ static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
return ret;
rtlpci->using_msi = false;
- RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
- "Pin-based Interrupt Mode!\n");
+ rtl_dbg(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
+ "Pin-based Interrupt Mode!\n");
return 0;
}
@@ -2172,8 +2165,8 @@ int rtl_pci_probe(struct pci_dev *pdev,
}
if (((struct rtl_hal_cfg *)id->driver_data)->mod_params->dma64 &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
WARN_ONCE(true,
"Unable to obtain 64bit DMA for consistent allocations\n");
err = -ENOMEM;
@@ -2181,8 +2174,8 @@ int rtl_pci_probe(struct pci_dev *pdev,
}
platform_enable_dma64(pdev, true);
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
WARN_ONCE(true,
"rtlwifi: Unable to obtain 32bit DMA for consistent allocations\n");
err = -ENOMEM;
@@ -2245,10 +2238,10 @@ int rtl_pci_probe(struct pci_dev *pdev,
goto fail2;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
- pmem_start, pmem_len, pmem_flags,
- rtlpriv->io.pci_mem_start);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
+ pmem_start, pmem_len, pmem_flags,
+ rtlpriv->io.pci_mem_start);
/* Disable Clk Request */
pci_write_config_byte(pdev, 0x81, 0);
@@ -2310,9 +2303,9 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpci = rtl_pcidev(pcipriv);
err = rtl_pci_intr_mode_decide(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "%s: failed to register IRQ handler\n",
- wiphy_name(hw->wiphy));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "%s: failed to register IRQ handler\n",
+ wiphy_name(hw->wiphy));
goto fail3;
}
rtlpci->irq_alloc = 1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 90f92728e16a..f99882255d48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -19,8 +19,8 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
rtlpriv->intf_ops->reset_trx_ring(hw);
if (is_hal_stop(rtlhal))
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Driver is already down!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Driver is already down!\n");
/*<2> Enable Adapter */
if (rtlpriv->cfg->ops->hw_init(hw))
@@ -80,9 +80,9 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
if (ppsc->rfchange_inprogress) {
spin_unlock(&rtlpriv->locks.rf_ps_lock);
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "RF Change in progress! Wait to set..state_toset(%d).\n",
- state_toset);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "RF Change in progress! Wait to set..state_toset(%d).\n",
+ state_toset);
/* Set RF after the previous action is done. */
while (ppsc->rfchange_inprogress) {
@@ -179,10 +179,10 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
ppsc->swrf_processing = false;
}
-void rtl_ips_nic_off_wq_callback(void *data)
+void rtl_ips_nic_off_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks =
- container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ ips_nic_off_wq.work);
struct ieee80211_hw *hw = rtlworks->hw;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -191,8 +191,8 @@ void rtl_ips_nic_off_wq_callback(void *data)
enum rf_pwrstate rtstate;
if (mac->opmode != NL80211_IFTYPE_STATION) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "not station return\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "not station return\n");
return;
}
@@ -228,8 +228,8 @@ void rtl_ips_nic_off_wq_callback(void *data)
!ppsc->swrf_processing &&
(mac->link_state == MAC80211_NOLINK) &&
!mac->act_scanning) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "IPSEnter(): Turn off RF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "IPSEnter(): Turn off RF\n");
ppsc->inactive_pwrstate = ERFOFF;
ppsc->in_powersavemode = true;
@@ -307,8 +307,8 @@ static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw)
ppsc->last_delaylps_stamp_jiffies);
if (ps_timediff < 2000) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Delay enter Fw LPS for DHCP, ARP, or EAPOL exchanging state\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Delay enter Fw LPS for DHCP, ARP, or EAPOL exchanging state\n");
return false;
}
@@ -353,9 +353,9 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
if ((ppsc->fwctrl_lps) && ppsc->report_linked) {
if (ppsc->dot11_psmode == EACTIVE) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "FW LPS leave ps_mode:%x\n",
- FW_PS_ACTIVE_MODE);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "FW LPS leave ps_mode:%x\n",
+ FW_PS_ACTIVE_MODE);
enter_fwlps = false;
ppsc->pwr_mode = FW_PS_ACTIVE_MODE;
ppsc->smart_ps = 0;
@@ -368,9 +368,9 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode);
} else {
if (rtl_get_fwlps_doze(hw)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "FW LPS enter ps_mode:%x\n",
- ppsc->fwctrl_psmode);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "FW LPS enter ps_mode:%x\n",
+ ppsc->fwctrl_psmode);
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode);
enter_fwlps = true;
@@ -420,8 +420,8 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
* bt_ccoexist may ask to enter lps.
* In normal case, this constraint move to rtl_lps_set_psmode().
*/
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Enter 802.11 power save mode...\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Enter 802.11 power save mode...\n");
rtl_lps_set_psmode(hw, EAUTOPS);
mutex_unlock(&rtlpriv->locks.lps_mutex);
@@ -449,8 +449,8 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Busy Traffic,Leave 802.11 power save..\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Busy Traffic,Leave 802.11 power save..\n");
rtl_lps_set_psmode(hw, EACTIVE);
}
@@ -534,8 +534,8 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.ps_work, MSECS(5));
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
}
}
EXPORT_SYMBOL_GPL(rtl_swlps_beacon);
@@ -562,10 +562,10 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
mutex_unlock(&rtlpriv->locks.lps_mutex);
}
-void rtl_swlps_rfon_wq_callback(void *data)
+void rtl_swlps_rfon_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks =
- container_of_dwork_rtl(data, struct rtl_works, ps_rfon_wq);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ ps_rfon_wq.work);
struct ieee80211_hw *hw = rtlworks->hw;
rtl_swlps_rf_awake(hw);
@@ -630,9 +630,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
/* this print should always be dtim_conter = 0 &
* sleep = dtim_period, that meaons, we should
* awake before every dtim */
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "dtim_counter:%x will sleep :%d beacon_intv\n",
- rtlpriv->psc.dtim_counter, sleep_intv);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "dtim_counter:%x will sleep :%d beacon_intv\n",
+ rtlpriv->psc.dtim_counter, sleep_intv);
/* we tested that 40ms is enough for sw & hw sw delay */
queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq,
@@ -653,33 +653,32 @@ void rtl_lps_change_work_callback(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
-void rtl_lps_enter(struct ieee80211_hw *hw)
+void rtl_lps_enter(struct ieee80211_hw *hw, bool may_block)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- if (!in_interrupt())
+ if (may_block)
return rtl_lps_enter_core(hw);
rtlpriv->enter_ps = true;
schedule_work(&rtlpriv->works.lps_change_work);
}
EXPORT_SYMBOL_GPL(rtl_lps_enter);
-void rtl_lps_leave(struct ieee80211_hw *hw)
+void rtl_lps_leave(struct ieee80211_hw *hw, bool may_block)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- if (!in_interrupt())
+ if (may_block)
return rtl_lps_leave_core(hw);
rtlpriv->enter_ps = false;
schedule_work(&rtlpriv->works.lps_change_work);
}
EXPORT_SYMBOL_GPL(rtl_lps_leave);
-void rtl_swlps_wq_callback(void *data)
+void rtl_swlps_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks = container_of_dwork_rtl(data,
- struct rtl_works,
- ps_work);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ ps_work.work);
struct ieee80211_hw *hw = rtlworks->hw;
struct rtl_priv *rtlpriv = rtl_priv(hw);
bool ps = false;
@@ -744,9 +743,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
if (ie[0] == 12) {
find_p2p_ps_ie = true;
if ((noa_len - 2) % 13 != 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "P2P notice of absence: invalid length.%d\n",
- noa_len);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "P2P notice of absence: invalid length.%d\n",
+ noa_len);
return;
} else {
noa_num = (noa_len - 2) / 13;
@@ -757,8 +756,8 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
noa_index = ie[3];
if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
P2P_PS_NONE || noa_index != p2pinfo->noa_index) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "update NOA ie.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "update NOA ie.\n");
p2pinfo->noa_index = noa_index;
p2pinfo->opp_ps = (ie[4] >> 7);
p2pinfo->ctwindow = ie[4] & 0x7F;
@@ -829,7 +828,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
if (ie == NULL)
return;
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
/*to find noa ie*/
while (ie + 1 < end) {
noa_len = le16_to_cpu(*(__le16 *)&ie[1]);
@@ -837,13 +836,13 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
return;
if (ie[0] == 12) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "find NOA IE.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "find NOA IE.\n");
RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, "noa ie ",
ie, noa_len);
if ((noa_len - 2) % 13 != 0) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "P2P notice of absence: invalid length.%d\n",
- noa_len);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "P2P notice of absence: invalid length.%d\n",
+ noa_len);
return;
} else {
noa_num = (noa_len - 2) / 13;
@@ -901,7 +900,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state)
struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, " p2p state %x\n" , p2p_ps_state);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, " p2p state %x\n", p2p_ps_state);
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
p2pinfo->p2p_ps_state = p2p_ps_state;
@@ -953,18 +952,18 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "ctwindow %x oppps %x\n",
- p2pinfo->ctwindow , p2pinfo->opp_ps);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "count %x duration %x index %x interval %x start time %x noa num %x\n",
- p2pinfo->noa_count_type[0],
- p2pinfo->noa_duration[0],
- p2pinfo->noa_index,
- p2pinfo->noa_interval[0],
- p2pinfo->noa_start_time[0],
- p2pinfo->noa_num);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "end\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "ctwindow %x oppps %x\n",
+ p2pinfo->ctwindow, p2pinfo->opp_ps);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "count %x duration %x index %x interval %x start time %x noa num %x\n",
+ p2pinfo->noa_count_type[0],
+ p2pinfo->noa_duration[0],
+ p2pinfo->noa_index,
+ p2pinfo->noa_interval[0],
+ p2pinfo->noa_start_time[0],
+ p2pinfo->noa_num);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "end\n");
}
void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h
index aaa2ed2bbe16..b37a929def82 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.h
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.h
@@ -10,15 +10,15 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
bool rtl_ps_disable_nic(struct ieee80211_hw *hw);
void rtl_ips_nic_off(struct ieee80211_hw *hw);
void rtl_ips_nic_on(struct ieee80211_hw *hw);
-void rtl_ips_nic_off_wq_callback(void *data);
-void rtl_lps_enter(struct ieee80211_hw *hw);
-void rtl_lps_leave(struct ieee80211_hw *hw);
+void rtl_ips_nic_off_wq_callback(struct work_struct *work);
+void rtl_lps_enter(struct ieee80211_hw *hw, bool may_block);
+void rtl_lps_leave(struct ieee80211_hw *hw, bool may_block);
void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode);
void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len);
-void rtl_swlps_wq_callback(void *data);
-void rtl_swlps_rfon_wq_callback(void *data);
+void rtl_swlps_wq_callback(struct work_struct *work);
+void rtl_swlps_rfon_wq_callback(struct work_struct *work);
void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state);
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 8be31e0ad878..4cf8face0bbd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -393,13 +393,13 @@ int rtl_regd_init(struct ieee80211_hw *hw,
rtlpriv->regd.country_code =
channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM regdomain: 0x%0x country code: %d\n",
- rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_DMESG,
+ "rtl: EEPROM regdomain: 0x%0x country code: %d\n",
+ rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
- RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n");
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_DMESG,
+ "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n");
rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
}
@@ -414,9 +414,9 @@ int rtl_regd_init(struct ieee80211_hw *hw,
rtlpriv->regd.alpha2[1] = '0';
}
- RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
- "rtl: Country alpha2 being used: %c%c\n",
- rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]);
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_TRACE,
+ "rtl: Country alpha2 being used: %c%c\n",
+ rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]);
_rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier);
@@ -428,7 +428,7 @@ void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_LOUD, "\n");
_rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index 1ffa188a65c9..d10c14c694da 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -415,16 +415,16 @@ static void rtl88e_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(15)|BIT(14), 0);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(15)|BIT(14), 2);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
}
static void rtl88e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
@@ -459,8 +459,8 @@ static void rtl88e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
dm_dig->cur_cck_cca_thres = cur_cck_cca_thresh;
dm_dig->pre_cck_cca_thres = dm_dig->cur_cck_cca_thres;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "CCK cca thresh hold =%x\n", dm_dig->cur_cck_cca_thres);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "CCK cca thresh hold =%x\n", dm_dig->cur_cck_cca_thres);
}
static void rtl88e_dm_dig(struct ieee80211_hw *hw)
@@ -520,7 +520,7 @@ static void rtl88e_dm_dig(struct ieee80211_hw *hw)
} else {
dm_dig->rx_gain_max = dm_dig_max;
dig_dynamic_min = dm_dig_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
}
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
@@ -624,8 +624,8 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -637,47 +637,47 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x0)\n");
} else if ((undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x10)\n");
} else if (undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl !=
rtlpriv->dm.last_dtp_lvl)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl88e_phy_set_txpower_level(hw, rtlphy->current_channel);
}
@@ -690,8 +690,8 @@ void rtl88e_dm_write_dig(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_dig = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
dm_dig->cur_igvalue, dm_dig->pre_igvalue,
dm_dig->back_val);
@@ -881,17 +881,17 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
/*Initilization (7 steps in total) */
rtlpriv->dm.txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "dm_txpower_track_cb_therm\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "%s\n", __func__);
thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER,
0xfc00);
if (!thermalvalue)
return;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter);
/*1. Query OFDM Default Setting: Path A*/
ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) &
@@ -900,8 +900,8 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[0] = (u8)i;
rtldm->swing_idx_ofdm_base[RF90_PATH_A] = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n",
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n",
ROFDM0_XATXIQIMBALANCE,
ele_d, ofdm_index_old[0]);
break;
@@ -915,24 +915,24 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
if (memcmp(&temp_cck, &cck_tbl_ch14[i][2], 4) == 0) {
cck_index_old = (u8)i;
rtldm->swing_idx_cck_base = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch 14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.cck_inch14);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch 14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
} else {
if (memcmp(&temp_cck, &cck_tbl_ch1_13[i][2], 4) == 0) {
cck_index_old = (u8)i;
rtldm->swing_idx_cck_base = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.cck_inch14);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
}
@@ -987,11 +987,11 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
(thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+ delta_iqk);
/* 6 If necessary, do LCK.*/
if (delta_lck >= 8) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
@@ -1072,7 +1072,7 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
if (rtldm->txpower_track_control)
rtldm->thermalvalue = thermalvalue;
rtldm->txpowercount = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
}
static void rtl88e_dm_init_txpower_tracking(struct ieee80211_hw *hw)
@@ -1087,9 +1087,9 @@ static void rtl88e_dm_init_txpower_tracking(struct ieee80211_hw *hw)
rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A] = 12;
rtlpriv->dm.swing_idx_ofdm_cur = 12;
rtlpriv->dm.swing_flag_ofdm = false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "rtlpriv->dm.txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "rtlpriv->dm.txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
@@ -1102,13 +1102,13 @@ void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17)|BIT(16),
0x03);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 88E Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 88E Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking !!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking !!\n");
dm_txpower_track_cb_therm(hw);
rtlpriv->dm.tm_trigger = 0;
}
@@ -1138,14 +1138,14 @@ static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver is going to unload\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver does not control rate adaptive mask\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver does not control rate adaptive mask\n");
return;
}
@@ -1180,14 +1180,14 @@ static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI = %ld\n",
- rtlpriv->dm.undec_sm_pwdb);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI_LEVEL = %d\n", p_ra->ratr_state);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "PreState = %d, CurState = %d\n",
- p_ra->pre_ratr_state, p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
+ rtlpriv->dm.undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state);
rcu_read_lock();
sta = rtl_find_sta(hw, mac->bssid);
@@ -1224,8 +1224,8 @@ static void rtl88e_dm_update_rx_idle_ant(struct ieee80211_hw *hw,
u32 default_ant, optional_ant;
if (pfat_table->rx_idle_ant != ant) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "need to update rx idle ant\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "need to update rx idle ant\n");
if (ant == MAIN_ANT) {
default_ant =
(pfat_table->rx_idle_ant == CG_TRX_HW_ANTDIV) ?
@@ -1260,8 +1260,8 @@ static void rtl88e_dm_update_rx_idle_ant(struct ieee80211_hw *hw,
}
}
pfat_table->rx_idle_ant = ant;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RxIdleAnt %s\n",
- (ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT"));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "RxIdleAnt %s\n",
+ (ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT"));
}
static void rtl88e_dm_update_tx_ant(struct ieee80211_hw *hw,
@@ -1280,9 +1280,9 @@ static void rtl88e_dm_update_tx_ant(struct ieee80211_hw *hw,
pfat_table->antsel_a[mac_id] = target_ant & BIT(0);
pfat_table->antsel_b[mac_id] = (target_ant & BIT(1)) >> 1;
pfat_table->antsel_c[mac_id] = (target_ant & BIT(2)) >> 2;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "txfrominfo target ant %s\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "txfrominfo target ant %s\n",
(ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT"));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "antsel_tr_mux = 3'b%d%d%d\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "antsel_tr_mux = 3'b%d%d%d\n",
pfat_table->antsel_c[mac_id],
pfat_table->antsel_b[mac_id],
pfat_table->antsel_a[mac_id]);
@@ -1464,15 +1464,15 @@ static void rtl88e_dm_hw_ant_div(struct ieee80211_hw *hw)
target_ant = (main_rssi == aux_rssi) ?
pfat_table->rx_idle_ant : ((main_rssi >= aux_rssi) ?
MAIN_ANT : AUX_ANT);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"main_ant_sum %d main_ant_cnt %d\n",
pfat_table->main_ant_sum[i],
pfat_table->main_ant_cnt[i]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "aux_ant_sum %d aux_ant_cnt %d\n",
- pfat_table->aux_ant_sum[i], pfat_table->aux_ant_cnt[i]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "main_rssi %d aux_rssi%d\n",
- main_rssi, aux_rssi);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "aux_ant_sum %d aux_ant_cnt %d\n",
+ pfat_table->aux_ant_sum[i], pfat_table->aux_ant_cnt[i]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "main_rssi %d aux_rssi%d\n",
+ main_rssi, aux_rssi);
local_max_rssi = (main_rssi > aux_rssi) ? main_rssi : aux_rssi;
if ((local_max_rssi > ant_div_max_rssi) && (local_max_rssi < 40))
ant_div_max_rssi = local_max_rssi;
@@ -1699,10 +1699,10 @@ static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw)
struct fast_ant_training *pfat_table = &rtldm->fat_table;
if (mac->link_state < MAC80211_LINKED) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "No Link\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "No Link\n");
if (pfat_table->becomelinked) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "need to turn off HW AntDiv\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "need to turn off HW AntDiv\n");
rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0);
rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA1_11N,
BIT(15), 0);
@@ -1716,8 +1716,8 @@ static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw)
return;
} else {
if (!pfat_table->becomelinked) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Need to turn on HW AntDiv\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Need to turn on HW AntDiv\n");
rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1);
rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA1_11N,
BIT(15), 1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index fc7b9ad7e5d0..7252bc621211 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -40,7 +40,7 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw,
u32 pagenums, remainsize;
u32 page, offset;
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
rtl_fill_dummy(bufferptr, &size);
@@ -123,14 +123,14 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "normal Firmware SIZE %d\n", fwsize);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "normal Firmware SIZE %d\n", fwsize);
if (IS_FW_HEADER_EXIST(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware Version(%d), Signature(%#x), Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtlwifi_firmware_header));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Firmware Version(%d), Signature(%#x), Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtlwifi_firmware_header));
pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
@@ -181,22 +181,22 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
unsigned long flag;
u8 idx;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -238,17 +238,17 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
box_extreg = REG_HMEBOX_EXT_3;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", boxnum);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", boxnum);
break;
}
isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum);
while (!isfw_read) {
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
@@ -256,24 +256,24 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -309,8 +309,8 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", cmd_len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", cmd_len);
break;
}
@@ -320,16 +320,16 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl88e_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -359,8 +359,8 @@ void rtl88e_firmware_selfreset(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2))));
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp | BIT(2)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "8051Reset88E(): 8051 reset success\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "8051Reset88E(): 8051 reset success\n");
}
@@ -370,7 +370,7 @@ void rtl88e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
u8 u1_h2c_set_pwrmode[H2C_88E_PWEMODE_LENGTH] = { 0 };
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
u8 rlbm, power_state = 0;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
set_h2ccmd_pwrmode_parm_mode(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM=2.*/
@@ -610,15 +610,15 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
b_dlok = true;
if (b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"H2C_RSVDPAGE:\n", u1rsvdpageloc, 3);
rtl88e_fill_h2c_cmd(hw, H2C_88E_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
/*Should check FW support p2p or not.*/
@@ -643,11 +643,11 @@ void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -703,11 +703,11 @@ void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 70716631de85..63f9ea21962f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -75,11 +75,10 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(
- hw,
- (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
@@ -140,9 +139,9 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
if (content & IMR_CPWM) {
rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_88E;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Receive CPWM INT!!! Set pHalData->FwPSState = %X\n",
- rtlhal->fw_ps_state);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Receive CPWM INT!!! Set pHalData->FwPSState = %X\n",
+ rtlhal->fw_ps_state);
}
}
@@ -400,8 +399,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -445,9 +444,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -459,9 +458,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -500,9 +499,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break; }
case HW_VAR_AC_PARAM:{
@@ -536,9 +535,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -559,9 +558,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break; }
case HW_VAR_RCR:
@@ -775,22 +774,22 @@ static bool _rtl88ee_llt_table_init(struct ieee80211_hw *hw)
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
status = _rtl88ee_llt_write(hw, i, i + 1);
- if (true != status)
+ if (!status)
return status;
}
status = _rtl88ee_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
- if (true != status)
+ if (!status)
return status;
for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl88ee_llt_write(hw, i, (i + 1));
- if (true != status)
+ if (!status)
return status;
}
status = _rtl88ee_llt_write(hw, maxpage, txpktbuf_bndy);
- if (true != status)
+ if (!status)
return status;
return true;
@@ -834,8 +833,8 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
RTL8188EE_NIC_ENABLE_FLOW)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
return false;
}
@@ -869,9 +868,9 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, MSR, 0x00);
if (!rtlhal->mac_func_enable) {
- if (_rtl88ee_llt_table_init(hw) == false) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "LLT table init fail\n");
+ if (!_rtl88ee_llt_table_init(hw)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "LLT table init fail\n");
return false;
}
}
@@ -1002,14 +1001,14 @@ void rtl88ee_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -1024,8 +1023,8 @@ void rtl88ee_enable_hw_security_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -1068,7 +1067,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
}
rtstatus = _rtl88ee_init_mac(hw);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_info("Init MAC failed\n");
err = 1;
goto exit;
@@ -1076,8 +1075,8 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
err = rtl88e_download_fw(hw, false);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
goto exit;
}
@@ -1130,9 +1129,9 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
rtl88e_phy_set_rfpath_switch(hw, false);
rtlpriv->dm.fat_table.rx_idle_ant = AUX_ANT;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "rx idle ant %s\n",
- (rtlpriv->dm.fat_table.rx_idle_ant == MAIN_ANT) ?
- ("MAIN_ANT") : ("AUX_ANT"));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "rx idle ant %s\n",
+ (rtlpriv->dm.fat_table.rx_idle_ant == MAIN_ANT) ?
+ ("MAIN_ANT") : ("AUX_ANT"));
if (rtlphy->iqk_initialized) {
rtl88e_phy_iq_calibrate(hw, true);
@@ -1148,7 +1147,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
tmp_u1b = efuse_read_1byte(hw, 0x1FA);
if (!(tmp_u1b & BIT(0))) {
rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path A\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path A\n");
}
if (!(tmp_u1b & BIT(4))) {
@@ -1157,7 +1156,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
udelay(10);
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "under 1.5V\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "under 1.5V\n");
}
rtl_write_byte(rtlpriv, REG_NAV_CTRL+2, ((30000+127)/128));
rtl88e_dm_init(hw);
@@ -1185,9 +1184,9 @@ static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw)
}
rtlphy->rf_type = RF_1T1R;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
- "RF_2T2R" : "RF_1T1R");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R");
return version;
}
@@ -1203,26 +1202,26 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
mode = MSR_NOLINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
mode = MSR_AP;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not support!\n", type);
@@ -1248,9 +1247,9 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
_rtl88ee_resume_tx_beacon(hw);
_rtl88ee_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- mode);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
}
rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
@@ -1370,7 +1369,7 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
rtlhal->mac_func_enable = false;
rtlpriv->intf_ops->enable_aspm(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "POWER OFF adapter\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "POWER OFF adapter\n");
u1b_tmp = rtl_read_byte(rtlpriv, REG_TX_RPT_CTRL);
rtl_write_byte(rtlpriv, REG_TX_RPT_CTRL, u1b_tmp & (~BIT(1)));
@@ -1427,7 +1426,7 @@ void rtl88ee_card_disable(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
enum nl80211_iftype opmode;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8188ee card disable\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8188ee card disable\n");
mac->link_state = MAC80211_NOLINK;
opmode = NL80211_IFTYPE_UNSPECIFIED;
@@ -1486,8 +1485,8 @@ void rtl88ee_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
/*rtl88ee_disable_interrupt(hw);*/
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
/*rtl88ee_enable_interrupt(hw);*/
@@ -1499,8 +1498,8 @@ void rtl88ee_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1559,15 +1558,15 @@ static void read_power_value_fromprom(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 rfpath, eeaddr = EEPROM_TX_PWR_INX, group, txcnt = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "hal_ReadPowerValueFromPROM88E():PROMContent[0x%x]=0x%x\n",
- (eeaddr+1), hwinfo[eeaddr+1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "hal_ReadPowerValueFromPROM88E():PROMContent[0x%x]=0x%x\n",
+ (eeaddr + 1), hwinfo[eeaddr + 1]);
if (0xFF == hwinfo[eeaddr+1]) /*YJ,add,120316*/
autoload_fail = true;
if (autoload_fail) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "auto load fail : Use Default value!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "auto load fail : Use Default value!\n");
for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) {
/* 2.4G default value */
set_24g_base(pwrinfo24g, rfpath);
@@ -1826,8 +1825,8 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
if (rtlefuse->eeprom_oemid == 0xFF)
rtlefuse->eeprom_oemid = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
/* set channel plan from efuse */
rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*tx power*/
@@ -1925,8 +1924,8 @@ static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw)
@@ -1943,18 +1942,18 @@ void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw)
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl88ee_read_adapter_info(hw);
} else {
@@ -2049,8 +2048,8 @@ static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -2169,17 +2168,17 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
}
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[4] = macid | (b_shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1],
- rate_mask[2], rate_mask[3],
- rate_mask[4]);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4]);
rtl88e_fill_h2c_cmd(hw, H2C_88E_RA_MASK, 5, rate_mask);
_rtl88ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
}
@@ -2236,16 +2235,16 @@ bool rtl88ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
e_rfpowerstate_toset = (u4tmp & BIT(31)) ? ERFON : ERFOFF;
if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
b_actuallyset = true;
} else if ((!ppsc->hwradiooff) &&
(e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2295,7 +2294,7 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2354,27 +2353,27 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
index 4ef6d5907521..006b979da1c6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
@@ -19,8 +19,8 @@ void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -35,8 +35,8 @@ void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -47,8 +47,8 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -72,8 +72,8 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG1, (ledcfg | BIT(3)));
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
@@ -123,7 +123,7 @@ void rtl88ee_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n",
- ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n",
+ ledaction);
_rtl88ee_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index d13983ec09ad..9be032e8ec95 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -16,7 +16,12 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
-static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask);
+static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask)
+{
+ u32 i = ffs(bitmask);
+
+ return i ? i - 1 : 32;
+}
static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw);
static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
static bool phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
@@ -43,15 +48,15 @@ u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask,
- regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask,
+ regaddr, originalvalue);
return returnvalue;
@@ -63,9 +68,9 @@ void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -75,9 +80,9 @@ void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
@@ -86,9 +91,9 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -99,9 +104,9 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -112,9 +117,9 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -133,9 +138,9 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
@@ -179,9 +184,9 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
}
@@ -203,20 +208,9 @@ static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
newoffset = offset;
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
-}
-
-static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
bool rtl88e_phy_mac_config(struct ieee80211_hw *hw)
@@ -381,11 +375,11 @@ static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8188EMACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8188EMACPHY_Array\n");
arraylength = RTL8188EEMAC_1T_ARRAYLEN;
ptrarray = RTL8188EEMAC_1T_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Img:RTL8188EEMAC_1T_ARRAY LEN %d\n", arraylength);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Img:RTL8188EEMAC_1T_ARRAY LEN %d\n", arraylength);
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]);
return true;
@@ -487,9 +481,9 @@ static void handle_branch2(struct ieee80211_hw *hw, u16 arraylen,
READ_NEXT_PAIR(v1, v2, i);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n",
- array_table[i], array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n",
+ array_table[i], array_table[i + 1]);
}
}
@@ -521,52 +515,52 @@ static void store_pwrindex_rate_offset(struct ieee80211_hw *hw,
if (regaddr == RTXAGC_A_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[count][0] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][0]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][0]);
}
if (regaddr == RTXAGC_A_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[count][1] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][1]);
}
if (regaddr == RTXAGC_A_CCK1_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[count][6] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][6]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][6]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
rtlphy->mcs_txpwrlevel_origoffset[count][7] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][7]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][7]);
}
if (regaddr == RTXAGC_A_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[count][2] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][2]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][2]);
}
if (regaddr == RTXAGC_A_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[count][3] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][3]);
}
if (regaddr == RTXAGC_A_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[count][4] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][4]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][4]);
}
if (regaddr == RTXAGC_A_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[count][5] = data;
@@ -574,66 +568,66 @@ static void store_pwrindex_rate_offset(struct ieee80211_hw *hw,
count++;
rtlphy->pwrgroup_cnt = count;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][5]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][5]);
}
if (regaddr == RTXAGC_B_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[count][8] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][8]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][8]);
}
if (regaddr == RTXAGC_B_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[count][9] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][9]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][9]);
}
if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[count][14] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][14]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][14]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
rtlphy->mcs_txpwrlevel_origoffset[count][15] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][15]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][15]);
}
if (regaddr == RTXAGC_B_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[count][10] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][10]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][10]);
}
if (regaddr == RTXAGC_B_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[count][11] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][11]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][11]);
}
if (regaddr == RTXAGC_B_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[count][12] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][12]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][12]);
}
if (regaddr == RTXAGC_B_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[count][13] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][13]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][13]);
if (get_rf_type(rtlphy) != RF_1T1R) {
count++;
rtlphy->pwrgroup_cnt = count;
@@ -696,8 +690,8 @@ static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype)
}
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -769,9 +763,9 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radioa_arraylen = RTL8188EE_RADIOA_1TARRAYLEN;
radioa_array_table = RTL8188EE_RADIOA_1TARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Radio_A:RTL8188EE_RADIOA_1TARRAY %d\n", radioa_arraylen);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8188EE_RADIOA_1TARRAY %d\n", radioa_arraylen);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
process_path_a(hw, radioa_arraylen, radioa_array_table);
@@ -798,21 +792,21 @@ void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
static void _rtl88e_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
@@ -1081,10 +1075,10 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
@@ -1138,7 +1132,7 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl88e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
}
void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
@@ -1155,8 +1149,8 @@ void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl88e_phy_set_bw_mode_callback(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -1169,8 +1163,8 @@ void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 delay;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
do {
@@ -1188,7 +1182,7 @@ void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw)
@@ -1208,13 +1202,13 @@ u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl88e_phy_sw_chnl_callback(hw);
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schedule workitem current channel %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schedule workitem current channel %d\n",
+ rtlphy->current_channel);
rtlphy->sw_chnl_inprogress = false;
} else {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
@@ -1315,9 +1309,9 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -1581,7 +1575,7 @@ static void _rtl88e_phy_path_adda_on(struct ieee80211_hw *hw,
u32 i;
pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
- if (false == is2t) {
+ if (!is2t) {
pathon = 0x0bdb25a0;
rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
} else {
@@ -1749,8 +1743,8 @@ static void _rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw,
for (i = 0; i < retrycount; i++) {
patha_ok = _rtl88e_phy_path_a_iqk(hw, is2t);
if (patha_ok == 0x01) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A Tx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Tx IQK Success!!\n");
result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
@@ -1762,22 +1756,22 @@ static void _rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw,
for (i = 0; i < retrycount; i++) {
patha_ok = _rtl88e_phy_path_a_rx_iqk(hw, is2t);
if (patha_ok == 0x03) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A Rx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Rx IQK Success!!\n");
result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
0x3FF0000) >> 16;
result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
0x3FF0000) >> 16;
break;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path a RX iqk fail!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path a RX iqk fail!!!\n");
}
}
if (0 == patha_ok)
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A IQK Success!!\n");
if (is2t) {
_rtl88e_phy_path_a_standby(hw);
_rtl88e_phy_path_adda_on(hw, adda_reg, false, is2t);
@@ -1828,7 +1822,7 @@ static void _rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "88ee IQK Finish!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "88ee IQK Finish!!\n");
}
static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
@@ -1874,7 +1868,7 @@ static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
} else {
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
}
static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw,
@@ -1883,7 +1877,7 @@ static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
if (is_hal_stop(rtlhal)) {
u8 u1btmp;
@@ -2074,24 +2068,24 @@ bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &rtlpriv->phy;
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", iotype);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -2102,7 +2096,7 @@ bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl88e_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
@@ -2112,9 +2106,9 @@ static void rtl88e_phy_set_io(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -2128,14 +2122,14 @@ static void rtl88e_phy_set_io(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
static void rtl88ee_phy_set_rf_on(struct ieee80211_hw *hw)
@@ -2180,19 +2174,18 @@ static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus &&
(initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl88ee_phy_set_rf_on(hw);
}
@@ -2213,27 +2206,27 @@ static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -2256,34 +2249,34 @@ static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl88ee_phy_set_rf_sleep(hw);
break;
}
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", rfpwr_state);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
index c376817a1bf4..24dc7011b7b2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
@@ -474,13 +474,13 @@ static bool _rtl88e_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index a5d2d6ece8db..b9775eec4c54 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -410,9 +410,9 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
else
wake_match = 0;
if (wake_match)
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
- "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- wake_match);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
@@ -515,11 +515,11 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
memset(skb->data, 0, EM_HDR_LEN);
}
buf_len = skb->len;
- mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_88e));
@@ -533,9 +533,9 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
rtl88ee_insert_emcontent(ptcb_desc,
(__le32 *)(skb->data));
}
@@ -631,7 +631,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
}
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
"Enable RDG function.\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
@@ -662,7 +662,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
}
rtl88e_dm_set_tx_ant_by_tx_info(hw, pdesc8, ptcb_desc->mac_id);
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -674,16 +674,15 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
@@ -733,7 +732,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc8,
{
__le32 *pdesc = (__le32 *)pdesc8;
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
set_tx_desc_own(pdesc, 1);
@@ -774,7 +773,7 @@ u64 rtl88ee_get_desc(struct ieee80211_hw *hw,
u32 ret = 0;
__le32 *pdesc = (__le32 *)pdesc8;
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
ret = get_tx_desc_own(pdesc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 06fc9b5cdd8f..265a1a336304 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -242,16 +242,16 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
}
static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
@@ -408,10 +408,10 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
rtl92c_dm_write_dig(hw);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "curmultista_cstate = %x dig_ext_port_stage %x\n",
- dm_digtable->curmultista_cstate,
- dm_digtable->dig_ext_port_stage);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "curmultista_cstate = %x dig_ext_port_stage %x\n",
+ dm_digtable->curmultista_cstate,
+ dm_digtable->dig_ext_port_stage);
}
static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
@@ -419,9 +419,9 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "presta_cstate = %x, cursta_cstate = %x\n",
- dm_digtable->presta_cstate, dm_digtable->cursta_cstate);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "presta_cstate = %x, cursta_cstate = %x\n",
+ dm_digtable->presta_cstate, dm_digtable->cursta_cstate);
if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
@@ -537,10 +537,10 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
- dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
- dm_digtable->back_val);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
+ dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
+ dm_digtable->back_val);
if (rtlpriv->rtlhal.interface == INTF_USB &&
!dm_digtable->dig_enable_flag) {
@@ -559,12 +559,12 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
dm_digtable->pre_igvalue = dm_digtable->cur_igvalue;
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_WARNING,
- "dig values 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
- dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
- dm_digtable->rssi_val_min, dm_digtable->back_val,
- dm_digtable->rx_gain_max, dm_digtable->rx_gain_min,
- dm_digtable->large_fa_hit, dm_digtable->forbidden_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_WARNING,
+ "dig values 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
+ dm_digtable->rssi_val_min, dm_digtable->back_val,
+ dm_digtable->rx_gain_max, dm_digtable->rx_gain_min,
+ dm_digtable->large_fa_hit, dm_digtable->forbidden_igi);
}
EXPORT_SYMBOL(rtl92c_dm_write_dig);
@@ -713,15 +713,15 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
u8 ofdm_min_index = 6, rf;
rtlpriv->dm.txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "rtl92c_dm_txpower_tracking_callback_thermalmeter\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "%s\n", __func__);
thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter);
rtl92c_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
@@ -738,10 +738,10 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[0] = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XATXIQIMBALANCE,
- ele_d, ofdm_index_old[0]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XATXIQIMBALANCE,
+ ele_d, ofdm_index_old[0]);
break;
}
}
@@ -754,11 +754,11 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
if (ele_d == (ofdmswing_table[i] &
MASKOFDM_D)) {
ofdm_index_old[1] = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XBTXIQIMBALANCE, ele_d,
- ofdm_index_old[1]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XBTXIQIMBALANCE, ele_d,
+ ofdm_index_old[1]);
break;
}
}
@@ -774,12 +774,12 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
4) == 0) {
cck_index_old = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.cck_inch14);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
} else {
@@ -789,12 +789,12 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
4) == 0) {
cck_index_old = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.cck_inch14);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
}
@@ -823,11 +823,11 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
(thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+ delta_iqk);
if (delta_lck > 1) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
@@ -846,16 +846,16 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
}
if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.ofdm_index[1],
- rtlpriv->dm.cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.ofdm_index[1],
+ rtlpriv->dm.cck_index);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x, cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x, cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.cck_index);
}
if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
@@ -946,14 +946,14 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
cck_index = 0;
if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
- ofdm_index[0], ofdm_index[1],
- cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
+ ofdm_index[0], ofdm_index[1],
+ cck_index);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x, cck_index=0x%x\n",
- ofdm_index[0], cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x, cck_index=0x%x\n",
+ ofdm_index[0], cck_index);
}
}
@@ -1111,7 +1111,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
rtlpriv->dm.thermalvalue = thermalvalue;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
}
@@ -1123,9 +1123,9 @@ static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
rtlpriv->dm.txpower_tracking = true;
rtlpriv->dm.txpower_trackinginit = false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pMgntInfo->txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
@@ -1149,13 +1149,13 @@ static void rtl92c_dm_check_txpower_tracking_thermal_meter(
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
0x60);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 92S Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 92S Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking direct call!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking direct call!!\n");
rtl92c_dm_txpower_tracking_directcall(hw);
rtlpriv->dm.tm_trigger = 0;
}
@@ -1276,29 +1276,29 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
if (((mac->link_state == MAC80211_NOLINK)) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
dm_pstable->rssi_val_min = 0;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
}
if (mac->link_state == MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
dm_pstable->rssi_val_min =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
} else {
dm_pstable->rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
}
} else {
dm_pstable->rssi_val_min =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
}
/* Power Saving for 92C */
@@ -1350,8 +1350,8 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -1362,42 +1362,42 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL2;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
if (rtlpriv->dm.dynamic_txhighpower_lvl ==
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
index 86b1b88cc4ed..b618f07f29b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
@@ -54,7 +54,7 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
bool is_version_b;
u8 *bufferptr = (u8 *)buffer;
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
is_version_b = IS_NORMAL_CHIP(version);
if (is_version_b) {
u32 pagenums, remainsize;
@@ -143,10 +143,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
pfwdata = (u8 *)rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
if (IS_FW_HEADER_EXIST(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware Version(%d), Signature(%#x),Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtlwifi_firmware_header));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Firmware Version(%d), Signature(%#x),Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtlwifi_firmware_header));
rtlhal->fw_version = le16_to_cpu(pfwheader->version);
rtlhal->fw_subversion = pfwheader->subversion;
@@ -198,21 +198,21 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
unsigned long flag;
u8 idx;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -254,8 +254,8 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
box_extreg = REG_HMEBOX_EXT_3;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", boxnum);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -263,9 +263,9 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
while (!isfw_read) {
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
@@ -273,24 +273,24 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -358,8 +358,8 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", cmd_len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", cmd_len);
break;
}
@@ -369,16 +369,16 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -428,7 +428,7 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
u8 u1_h2c_set_pwrmode[3] = { 0 };
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
@@ -636,16 +636,16 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
b_dlok = true;
if (b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"H2C_RSVDPAGE:\n",
u1rsvdpageloc, 3);
rtl92c_fill_h2c_cmd(hw, H2C_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
EXPORT_SYMBOL(rtl92c_set_fw_rsvdpagepkt);
@@ -717,13 +717,13 @@ void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -751,12 +751,12 @@ void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 661249d618c0..3d29c8dbb255 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -14,15 +14,15 @@ u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
- regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
}
@@ -34,9 +34,9 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -46,9 +46,9 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
@@ -112,9 +112,9 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb,
- retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf_rb,
+ retvalue);
return retvalue;
}
EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
@@ -137,21 +137,17 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
newoffset = offset;
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset,
- data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset,
+ data_and_addr);
}
EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
+ u32 i = ffs(bitmask);
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ return i ? i - 1 : 32;
}
EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
@@ -192,7 +188,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
}
if (rtlphy->rf_type == RF_1T2R) {
_rtl92c_phy_bb_config_1t(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
}
if (rtlefuse->autoload_failflag == false) {
rtlphy->pwrgroup_cnt = 0;
@@ -227,145 +223,145 @@ void _rtl92c_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
if (regaddr == RTXAGC_A_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][0]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][0]);
}
if (regaddr == RTXAGC_A_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][1]);
}
if (regaddr == RTXAGC_A_CCK1_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][6]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][6]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][7]);
}
if (regaddr == RTXAGC_A_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][2]);
}
if (regaddr == RTXAGC_A_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][3]);
}
if (regaddr == RTXAGC_A_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][4]);
}
if (regaddr == RTXAGC_A_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][5]);
}
if (regaddr == RTXAGC_B_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][8]);
}
if (regaddr == RTXAGC_B_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][9]);
}
if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][14]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][15]);
}
if (regaddr == RTXAGC_B_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][10]);
}
if (regaddr == RTXAGC_B_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][11]);
}
if (regaddr == RTXAGC_B_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][12]);
}
if (regaddr == RTXAGC_B_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][13]);
rtlphy->pwrgroup_cnt++;
@@ -387,21 +383,21 @@ void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR3, MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR2, MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
@@ -588,9 +584,9 @@ bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
else
ofdmtxpwridx = 0;
- RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
- "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
- power_indbm, ccktxpwridx, ofdmtxpwridx);
+ rtl_dbg(rtlpriv, COMP_TXAGC, DBG_TRACE,
+ "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
+ power_indbm, ccktxpwridx, ofdmtxpwridx);
for (idx = 0; idx < 14; idx++) {
for (rf_path = 0; rf_path < 2; rf_path++) {
rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
@@ -675,8 +671,8 @@ void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -690,8 +686,8 @@ void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 delay;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
do {
@@ -709,7 +705,7 @@ void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
EXPORT_SYMBOL(rtl92c_phy_sw_chnl_callback);
@@ -730,12 +726,12 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl92c_phy_sw_chnl_callback(hw);
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schedule workitem\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schedule workitem\n");
rtlphy->sw_chnl_inprogress = false;
} else {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
@@ -884,9 +880,9 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
_rtl92c_phy_sw_rf_seting(hw, channel);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -1103,7 +1099,7 @@ static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
u32 i;
pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
- if (false == is2t) {
+ if (!is2t) {
pathon = 0x0bdb25a0;
rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
} else {
@@ -1220,10 +1216,9 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
0x522, 0x550, 0x551, 0x040
};
const u32 retrycount = 2;
- u32 bbvalue;
if (t == 0) {
- bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
+ rtl_get_bbreg(hw, 0x800, MASKDWORD);
_rtl92c_phy_save_adda_registers(hw, adda_reg,
rtlphy->adda_backup, 16);
@@ -1522,24 +1517,24 @@ bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", iotype);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -1550,7 +1545,7 @@ bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl92c_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
EXPORT_SYMBOL(rtl92c_phy_set_io_cmd);
@@ -1561,9 +1556,9 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -1576,14 +1571,14 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
rtl92c_dm_write_dig(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
EXPORT_SYMBOL(rtl92c_phy_set_io);
@@ -1622,8 +1617,8 @@ void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Switch RF timeout !!!.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Switch RF timeout !!!.\n");
return;
}
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
index a3e2c8a60967..34a730a0d81e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
@@ -28,8 +28,8 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -40,43 +40,43 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 6402a9e09be7..bb5a0c4aec93 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -183,8 +183,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -223,9 +223,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -238,9 +238,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -287,9 +287,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -326,9 +326,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -349,9 +349,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break;
}
@@ -613,22 +613,22 @@ static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw)
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
status = _rtl92ce_llt_write(hw, i, i + 1);
- if (true != status)
+ if (!status)
return status;
}
status = _rtl92ce_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
- if (true != status)
+ if (!status)
return status;
for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl92ce_llt_write(hw, i, (i + 1));
- if (true != status)
+ if (!status)
return status;
}
status = _rtl92ce_llt_write(hw, maxpage, txpktbuf_bndy);
- if (true != status)
+ if (!status)
return status;
return true;
@@ -690,15 +690,15 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
udelay(2);
retry = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n",
- rtl_read_dword(rtlpriv, 0xEC), bytetmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n",
+ rtl_read_dword(rtlpriv, 0xEC), bytetmp);
while ((bytetmp & BIT(0)) && retry < 1000) {
retry++;
udelay(50);
bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n",
- rtl_read_dword(rtlpriv, 0xEC), bytetmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n",
+ rtl_read_dword(rtlpriv, 0xEC), bytetmp);
udelay(50);
}
@@ -880,14 +880,14 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -902,8 +902,8 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
@@ -946,8 +946,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
err = rtl92c_download_fw(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
goto exit;
}
@@ -1013,12 +1013,12 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
tmp_u1b = efuse_read_1byte(hw, 0x1FA);
if (!(tmp_u1b & BIT(0))) {
rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
}
if (!(tmp_u1b & BIT(1)) && is92c) {
rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path B\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path B\n");
}
if (!(tmp_u1b & BIT(4))) {
@@ -1027,7 +1027,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
udelay(10);
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
}
rtl92c_dm_init(hw);
exit:
@@ -1122,8 +1122,8 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
- rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
+ rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
return version;
}
@@ -1141,30 +1141,30 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
mode = MSR_NOLINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
mode = MSR_AP;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
case NL80211_IFTYPE_MESH_POINT:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Mesh Point!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Mesh Point!\n");
break;
default:
pr_err("Network type %d not supported!\n", type);
@@ -1190,9 +1190,9 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
_rtl92ce_resume_tx_beacon(hw);
_rtl92ce_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- mode);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
}
rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
@@ -1393,8 +1393,8 @@ void rtl92ce_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl92ce_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl92ce_enable_interrupt(hw);
@@ -1406,8 +1406,8 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
- add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
+ add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1714,8 +1714,8 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
@@ -1732,18 +1732,18 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl92ce_read_adapter_info(hw);
} else {
@@ -1839,8 +1839,8 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
- rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -1962,14 +1962,14 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
}
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %5phC\n",
- ratr_index, ratr_bitmap, rate_mask);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %5phC\n",
+ ratr_index, ratr_bitmap, rate_mask);
rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
}
@@ -2031,15 +2031,15 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
actuallyset = true;
} else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2090,7 +2090,7 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2150,24 +2150,24 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
entry_id);
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY length is %d\n",
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]);
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
+
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
"Pairwise Key content",
@@ -2175,8 +2175,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
rtlpriv->sec.
key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
@@ -2184,8 +2184,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
rtlpriv->sec.
key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
index d6933d36ada2..57132278eb5c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
@@ -19,8 +19,8 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -47,8 +47,8 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -119,7 +119,7 @@ void rtl92ce_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n",
- ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n",
+ ledaction);
_rtl92ce_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index f6574f31fa3b..04735da11168 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -25,9 +25,9 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
u32 original_value, readback_value, bitshift;
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -44,9 +44,9 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -99,9 +99,9 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -132,9 +132,9 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
@@ -144,10 +144,10 @@ static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
arraylength = MAC_2T_ARRAYLENGTH;
ptrarray = RTL8192CEMAC_2T_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CEMAC_2T_ARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CEMAC_2T_ARRAY\n");
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
return true;
@@ -180,20 +180,20 @@ bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
- phy_regarray_table[i],
- phy_regarray_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
}
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
agctab_array_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
}
return true;
@@ -221,8 +221,8 @@ bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -243,21 +243,21 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radioa_array_table = RTL8192CERADIOA_2TARRAY;
radiob_arraylen = RADIOB_2TARRAYLENGTH;
radiob_array_table = RTL8192CE_RADIOB_2TARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_A:RTL8192CERADIOA_2TARRAY\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_B:RTL8192CE_RADIOB_2TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_A:RTL8192CERADIOA_2TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_B:RTL8192CE_RADIOB_2TARRAY\n");
} else {
radioa_arraylen = RADIOA_1TARRAYLENGTH;
radioa_array_table = RTL8192CE_RADIOA_1TARRAY;
radiob_arraylen = RADIOB_1TARRAYLENGTH;
radiob_array_table = RTL8192CE_RADIOB_1TARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_A:RTL8192CE_RADIOA_1TARRAY\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_B:RTL8192CE_RADIOB_1TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_A:RTL8192CE_RADIOA_1TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_B:RTL8192CE_RADIOB_1TARRAY\n");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
@@ -293,9 +293,9 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
@@ -348,7 +348,7 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl92ce_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
@@ -418,8 +418,8 @@ static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Switch RF timeout !!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Switch RF timeout !!!\n");
return;
}
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
@@ -446,18 +446,17 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl92ce_phy_set_rf_on(hw);
}
@@ -472,8 +471,8 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFOFF:{
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -498,27 +497,27 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- i + 1, queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl92ce_phy_set_rf_sleep(hw);
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
index 713859488744..8508a711d46a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
@@ -470,13 +470,13 @@ static bool _rtl92ce_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 8fc3cb824066..c0635309a92d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -361,15 +361,14 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
bool lastseg = ((hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
u8 bw_40 = 0;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
rcu_read_lock();
@@ -477,8 +476,8 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -516,7 +515,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_bmc(pdesc, 1);
}
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -528,16 +527,15 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
index 9d1167ff3b50..9823872692b1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
@@ -25,8 +25,8 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -37,42 +37,42 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
if (rtlpriv->dm.dynamic_txhighpower_lvl ==
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 0ae9cfc65272..6312fddd9c00 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -386,8 +386,8 @@ static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RT Customized ID: 0x%02X\n",
- rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "RT Customized ID: 0x%02X\n",
+ rtlhal->oem_id);
}
void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
@@ -403,11 +403,11 @@ void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
rtlefuse->epromtype = (tmp_u1b & BOOT_FROM_EEPROM) ?
EEPROM_93C46 : EEPROM_BOOT_EFUSE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from %s\n",
- tmp_u1b & BOOT_FROM_EEPROM ? "EERROM" : "EFUSE");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from %s\n",
+ tmp_u1b & BOOT_FROM_EEPROM ? "EERROM" : "EFUSE");
rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload %s\n",
- tmp_u1b & EEPROM_EN ? "OK!!" : "ERR!!");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload %s\n",
+ tmp_u1b & EEPROM_EN ? "OK!!" : "ERR!!");
_rtl92cu_read_adapter_info(hw);
_rtl92cu_hal_customized_behavior(hw);
return;
@@ -424,8 +424,8 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
do {
if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Autoload Done!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Autoload Done!\n");
break;
}
if (pollingcount++ > 100) {
@@ -443,9 +443,9 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
if (0 == (value8 & LDV12_EN)) {
value8 |= LDV12_EN;
rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- " power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x\n",
- value8);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ " power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x\n",
+ value8);
udelay(100);
value8 = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
value8 &= ~ISO_MD2PP;
@@ -828,7 +828,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
? WMM_CHIP_B_TX_PAGE_BOUNDARY
: WMM_CHIP_A_TX_PAGE_BOUNDARY;
}
- if (false == rtl92c_init_llt_table(hw, boundary)) {
+ if (!rtl92c_init_llt_table(hw, boundary)) {
pr_err("Failed to init LLT Table!\n");
return -EINVAL;
}
@@ -860,13 +860,13 @@ void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
u8 sec_reg_value = 0x0;
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open sw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open sw encryption\n");
return;
}
sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE;
@@ -877,8 +877,8 @@ void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
if (IS_NORMAL_CHIP(rtlhal->version))
sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "The SECR-value %x\n",
- sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD, "The SECR-value %x\n",
+ sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -958,8 +958,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
}
err = rtl92c_download_fw(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
goto exit;
}
@@ -1280,32 +1280,32 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
_rtl92cu_resume_tx_beacon(hw);
_rtl92cu_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS:No such media status(%x)\n",
- type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS:No such media status(%x)\n",
+ type);
}
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
bt_msr |= MSR_NOLINK;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= MSR_AP;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not supported!\n", type);
@@ -1438,9 +1438,9 @@ void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_TCR, value32);
value32 |= TSFRST;
rtl_write_dword(rtlpriv, REG_TCR, value32);
- RT_TRACE(rtlpriv, COMP_INIT|COMP_BEACON, DBG_LOUD,
- "SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
- value32);
+ rtl_dbg(rtlpriv, COMP_INIT | COMP_BEACON, DBG_LOUD,
+ "SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
+ value32);
/* TODO: Modify later (Find the right parameters)
* NOTE: Fix test chip's bug (about contention windows's randomness) */
if ((mac->opmode == NL80211_IFTYPE_ADHOC) ||
@@ -1458,8 +1458,8 @@ void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, "beacon_interval:%d\n",
- bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG, "beacon_interval:%d\n",
+ bcn_interval);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
}
@@ -1599,7 +1599,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
rtl_write_byte(rtlpriv, REG_R2T_SIFS+1, val[0]);
rtl_write_byte(rtlpriv, REG_T2T_SIFS+1, val[0]);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, "HW_VAR_SIFS\n");
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD, "HW_VAR_SIFS\n");
break;
}
case HW_VAR_SLOT_TIME:{
@@ -1607,8 +1607,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 QOS_MODE = 1;
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
if (QOS_MODE) {
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -1672,9 +1672,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
0xf8) |
min_spacing_to_set);
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
}
@@ -1687,9 +1687,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set &= 0x1f;
mac->min_space_cfg &= 0x07;
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
break;
@@ -1721,9 +1721,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(REG_AGGLEN_LMT + index),
p_regtoset[index]);
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -1740,9 +1740,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u4b_ac_param |= (u32) ((cw_max & 0xF) <<
AC_PARAM_ECW_MAX_OFFSET);
u4b_ac_param |= (u32) tx_op << AC_PARAM_TXOP_OFFSET;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "queue:%x, ac_param:%x\n",
- e_aci, u4b_ac_param);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "queue:%x, ac_param:%x\n",
+ e_aci, u4b_ac_param);
switch (e_aci) {
case AC1_BK:
rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
@@ -1770,8 +1770,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_RCR:{
rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
mac->rx_conf = ((u32 *) (val))[0];
- RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
- "### Set RCR(0x%08x) ###\n", mac->rx_conf);
+ rtl_dbg(rtlpriv, COMP_RECV, DBG_DMESG,
+ "### Set RCR(0x%08x) ###\n", mac->rx_conf);
break;
}
case HW_VAR_RETRY_LIMIT:{
@@ -1780,9 +1780,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
retry_limit << RETRY_LIMIT_LONG_SHIFT);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_DMESG,
- "Set HW_VAR_RETRY_LIMIT(0x%08x)\n",
- retry_limit);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_DMESG,
+ "Set HW_VAR_RETRY_LIMIT(0x%08x)\n",
+ retry_limit);
break;
}
case HW_VAR_DUAL_TSF_RST:
@@ -1987,8 +1987,8 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
- rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -2121,14 +2121,14 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
}
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %5phC\n",
- ratr_index, ratr_bitmap, rate_mask);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %5phC\n",
+ ratr_index, ratr_bitmap, rate_mask);
memcpy(rtlpriv->rate_mask, rate_mask, 5);
/* rtl92c_fill_h2c_cmd() does USB I/O and will result in a
* "scheduled while atomic" if called directly */
@@ -2194,8 +2194,8 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
u1tmp = rtl_read_byte(rtlpriv, REG_HSISR);
e_rfpowerstate_toset = (u1tmp & BIT(7)) ?
ERFOFF : ERFON;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "pwrdown, 0x5c(BIT7)=%02x\n", u1tmp);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "pwrdown, 0x5c(BIT7)=%02x\n", u1tmp);
} else {
rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG,
rtl_read_byte(rtlpriv,
@@ -2203,26 +2203,26 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
e_rfpowerstate_toset = (u1tmp & BIT(3)) ?
ERFON : ERFOFF;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "GPIO_IN=%02x\n", u1tmp);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "GPIO_IN=%02x\n", u1tmp);
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "N-SS RF =%x\n",
- e_rfpowerstate_toset);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "N-SS RF =%x\n",
+ e_rfpowerstate_toset);
}
if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
ppsc->hwradiooff = false;
actuallyset = true;
} else if ((!ppsc->hwradiooff) && (e_rfpowerstate_toset ==
ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "GPIOChangeRF - HW Radio OFF\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "GPIOChangeRF - HW Radio OFF\n");
ppsc->hwradiooff = true;
actuallyset = true;
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "pHalData->bHwRadioOff and eRfPowerStateToSet do not match: pHalData->bHwRadioOff %x, eRfPowerStateToSet %x\n",
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "pHalData->bHwRadioOff and eRfPowerStateToSet do not match: pHalData->bHwRadioOff %x, eRfPowerStateToSet %x\n",
ppsc->hwradiooff, e_rfpowerstate_toset);
}
if (actuallyset) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
index cc13a4a8f856..1488f52a2d2f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
@@ -23,8 +23,8 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -49,8 +49,8 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -113,6 +113,6 @@ void rtl92cu_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n", ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n", ledaction);
_rtl92cu_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index b4b67341dc83..2890a495a23e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -91,24 +91,24 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
versionid = "UNKNOWN";
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Chip Version ID: %s\n", versionid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Chip Version ID: %s\n", versionid);
if (IS_92C_SERIAL(rtlhal->version))
rtlphy->rf_type =
(IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R;
else
rtlphy->rf_type = RF_1T1R;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip RF Type: %s\n",
- rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip RF Type: %s\n",
+ rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
if (get_rf_type(rtlphy) == RF_1T1R)
rtlpriv->dm.rfpath_rxenable[0] = true;
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
}
/**
@@ -158,14 +158,14 @@ bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
for (i = 0; i < (boundary - 1); i++) {
rst = rtl92c_llt_write(hw, i , i + 1);
- if (true != rst) {
+ if (!rst) {
pr_err("===> %s #1 fail\n", __func__);
return rst;
}
}
/* end of list */
rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF);
- if (true != rst) {
+ if (!rst) {
pr_err("===> %s #2 fail\n", __func__);
return rst;
}
@@ -176,14 +176,14 @@ bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
*/
for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) {
rst = rtl92c_llt_write(hw, i, (i + 1));
- if (true != rst) {
+ if (!rst) {
pr_err("===> %s #3 fail\n", __func__);
return rst;
}
}
/* Let last entry point to the start entry of ring buffer */
rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary);
- if (true != rst) {
+ if (!rst) {
pr_err("===> %s #4 fail\n", __func__);
return rst;
}
@@ -215,7 +215,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
rtl_cam_empty_entry(hw, cam_offset + idx);
@@ -269,30 +269,30 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
}
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry\n");
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY length is %d\n",
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY length is %d\n",
rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
"Pairwise Key content",
rtlpriv->sec.pairwise_key,
rtlpriv->sec.
key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
@@ -300,8 +300,8 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
rtlpriv->sec.
key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
rtlefuse->dev_addr,
@@ -383,27 +383,27 @@ int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
value = NT_NO_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
value = NT_LINK_AD_HOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
value = NT_LINK_AP;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
value = NT_AS_AP;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Set Network type to AP!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Network type %d not supported!\n", type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Network type %d not supported!\n", type);
return -EOPNOTSUPP;
}
rtl_write_byte(rtlpriv, MSR, value);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
index 9cd028cb2239..a8d9fe269f31 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
@@ -22,9 +22,9 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
u32 original_value, readback_value, bitshift;
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
original_value = _rtl92c_phy_rf_serial_read(hw,
rfpath, regaddr);
@@ -34,9 +34,9 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
}
bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -48,9 +48,9 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92c_phy_rf_serial_read(hw,
@@ -74,9 +74,9 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
}
_rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw)
@@ -121,10 +121,10 @@ bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_ARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_ARRAY\n");
arraylength = rtlphy->hwparam_tables[MAC_REG].length ;
ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CUMAC_2T_ARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CUMAC_2T_ARRAY\n");
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
return true;
@@ -158,20 +158,20 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The phy_regarray_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n",
- phy_regarray_table[i],
- phy_regarray_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
}
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
agctab_array_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
}
return true;
@@ -198,8 +198,8 @@ bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
phy_regarray_table_pg[i + 2]);
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -220,21 +220,21 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radioa_array_table = rtlphy->hwparam_tables[RADIOA_2T].pdata;
radiob_arraylen = rtlphy->hwparam_tables[RADIOB_2T].length;
radiob_array_table = rtlphy->hwparam_tables[RADIOB_2T].pdata;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_A:RTL8192CURADIOA_2TARRAY\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_B:RTL8192CU_RADIOB_2TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_A:RTL8192CURADIOA_2TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_B:RTL8192CU_RADIOB_2TARRAY\n");
} else {
radioa_arraylen = rtlphy->hwparam_tables[RADIOA_1T].length;
radioa_array_table = rtlphy->hwparam_tables[RADIOA_1T].pdata;
radiob_arraylen = rtlphy->hwparam_tables[RADIOB_1T].length;
radiob_array_table = rtlphy->hwparam_tables[RADIOB_1T].pdata;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_A:RTL8192CU_RADIOA_1TARRAY\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_B:RTL8192CU_RADIOB_1TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_A:RTL8192CU_RADIOA_1TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_B:RTL8192CU_RADIOB_1TARRAY\n");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
@@ -269,9 +269,9 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
return;
@@ -319,7 +319,7 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
void rtl92cu_bb_block_on(struct ieee80211_hw *hw)
@@ -390,17 +390,17 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
init_count++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (init_count < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl92ce_phy_set_rf_on(hw);
}
@@ -421,26 +421,26 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- i + 1,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1,
+ queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -463,25 +463,25 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- i + 1, queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl92c_phy_set_rf_sleep(hw);
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
index d259794a308b..288033f02266 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
@@ -431,12 +431,12 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
break;
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
goto phy_rf_cfg_fail;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
phy_rf_cfg_fail:
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index fc526477740f..1ad0cf37f60b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -82,8 +82,8 @@ static void twooutepmapping(struct ieee80211_hw *hw, bool is_chip8,
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB Chip-B & WMM Setting.....\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB Chip-B & WMM Setting.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 2;
ep_map->ep_mapping[RTL_TXQ_BK] = 3;
ep_map->ep_mapping[RTL_TXQ_VI] = 3;
@@ -92,8 +92,8 @@ static void twooutepmapping(struct ieee80211_hw *hw, bool is_chip8,
ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
ep_map->ep_mapping[RTL_TXQ_HI] = 2;
} else { /* typical setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB typical Setting.....\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB typical Setting.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 3;
ep_map->ep_mapping[RTL_TXQ_BK] = 3;
ep_map->ep_mapping[RTL_TXQ_VI] = 2;
@@ -110,8 +110,8 @@ static void threeoutepmapping(struct ieee80211_hw *hw, bool bwificfg,
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB 3EP Setting for WMM.....\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB 3EP Setting for WMM.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 5;
ep_map->ep_mapping[RTL_TXQ_BK] = 3;
ep_map->ep_mapping[RTL_TXQ_VI] = 3;
@@ -120,8 +120,8 @@ static void threeoutepmapping(struct ieee80211_hw *hw, bool bwificfg,
ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
ep_map->ep_mapping[RTL_TXQ_HI] = 2;
} else { /* typical setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB 3EP Setting for typical.....\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB 3EP Setting for typical.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 5;
ep_map->ep_mapping[RTL_TXQ_BK] = 5;
ep_map->ep_mapping[RTL_TXQ_VI] = 3;
@@ -248,24 +248,24 @@ static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw,
switch (mac80211_queue_index) {
case 0: /* VO */
qsel = QSLT_VO;
- RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- "VO queue, set qsel = 0x%x\n", QSLT_VO);
+ rtl_dbg(rtlpriv, COMP_USB, DBG_DMESG,
+ "VO queue, set qsel = 0x%x\n", QSLT_VO);
break;
case 1: /* VI */
qsel = QSLT_VI;
- RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- "VI queue, set qsel = 0x%x\n", QSLT_VI);
+ rtl_dbg(rtlpriv, COMP_USB, DBG_DMESG,
+ "VI queue, set qsel = 0x%x\n", QSLT_VI);
break;
case 3: /* BK */
qsel = QSLT_BK;
- RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- "BK queue, set qsel = 0x%x\n", QSLT_BK);
+ rtl_dbg(rtlpriv, COMP_USB, DBG_DMESG,
+ "BK queue, set qsel = 0x%x\n", QSLT_BK);
break;
case 2: /* BE */
default:
qsel = QSLT_BE;
- RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- "BE queue, set qsel = 0x%x\n", QSLT_BE);
+ rtl_dbg(rtlpriv, COMP_USB, DBG_DMESG,
+ "BE queue, set qsel = 0x%x\n", QSLT_BE);
break;
}
out:
@@ -398,18 +398,18 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
fc = hdr->frame_control;
bv = ieee80211_is_probe_resp(fc);
if (bv)
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Got probe response frame\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Got probe response frame\n");
if (ieee80211_is_beacon(fc))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got beacon frame\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Got beacon frame\n");
if (ieee80211_is_data(fc))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got data frame\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:0x%02X\n",
- fc,
- (u32)hdr->addr1[0], (u32)hdr->addr1[1],
- (u32)hdr->addr1[2], (u32)hdr->addr1[3],
- (u32)hdr->addr1[4], (u32)hdr->addr1[5]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Got data frame\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:0x%02X\n",
+ fc,
+ (u32)hdr->addr1[0], (u32)hdr->addr1[1],
+ (u32)hdr->addr1[2], (u32)hdr->addr1[3],
+ (u32)hdr->addr1[4], (u32)hdr->addr1[5]);
ieee80211_rx(hw, skb);
}
@@ -570,8 +570,8 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_use_rate(txdesc, tcb_desc->use_driver_rate ? 1 : 0);
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function\n");
set_tx_desc_rdg_enable(txdesc, 1);
set_tx_desc_htc(txdesc, 1);
}
@@ -597,7 +597,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_bmc(txdesc, 1);
_rtl_fill_usb_tx_desc(txdesc);
_rtl_tx_desc_checksum(txdesc);
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n");
}
void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc8,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
index 71f3b6b5d7bd..b3f25a228532 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
@@ -194,21 +194,21 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
- falsealm_cnt->cnt_fast_fsync_fail,
- falsealm_cnt->cnt_sb_search_fail);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail,
- falsealm_cnt->cnt_mcs_fail);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail,
- falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
+ falsealm_cnt->cnt_fast_fsync_fail,
+ falsealm_cnt->cnt_sb_search_fail);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail,
+ falsealm_cnt->cnt_mcs_fail);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail,
+ falsealm_cnt->cnt_all);
}
static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
@@ -221,33 +221,33 @@ static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
de_digtable->min_undec_pwdb_for_dm = 0;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "Not connected to any\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
de_digtable->min_undec_pwdb_for_dm =
rtlpriv->dm.UNDEC_SM_PWDB;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
rtlpriv->dm.UNDEC_SM_PWDB);
} else {
de_digtable->min_undec_pwdb_for_dm =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "STA Default Port PWDB = 0x%x\n",
- de_digtable->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ de_digtable->min_undec_pwdb_for_dm);
}
} else {
de_digtable->min_undec_pwdb_for_dm = rtlpriv->dm.UNDEC_SM_PWDB;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnect PWDB = 0x%x\n",
- de_digtable->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
+ de_digtable->min_undec_pwdb_for_dm);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
- de_digtable->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
+ de_digtable->min_undec_pwdb_for_dm);
}
static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
@@ -287,14 +287,14 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
}
de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
- de_digtable->cursta_cstate == DIG_STA_CONNECT ?
- "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
- de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
- "Low RSSI " : "High RSSI ");
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
- IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
+ de_digtable->cursta_cstate == DIG_STA_CONNECT ?
+ "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
+ de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
+ "Low RSSI " : "High RSSI ");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
+ IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
}
@@ -303,12 +303,12 @@ void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *de_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
- de_digtable->cur_igvalue, de_digtable->pre_igvalue,
- de_digtable->back_val);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
+ de_digtable->cur_igvalue, de_digtable->pre_igvalue,
+ de_digtable->back_val);
if (de_digtable->dig_enable_flag == false) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
de_digtable->pre_igvalue = 0x17;
return;
}
@@ -327,21 +327,21 @@ static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
(rtlpriv->mac80211.vendor == PEER_CISCO)) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
if (de_digtable->last_min_undec_pwdb_for_dm >= 50
&& de_digtable->min_undec_pwdb_for_dm < 50) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Early Mode Off\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Early Mode Off\n");
} else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 &&
de_digtable->min_undec_pwdb_for_dm > 55) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Early Mode On\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Early Mode On\n");
}
} else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
}
}
@@ -352,7 +352,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
u8 value_igi = de_digtable->cur_igvalue;
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
if (rtlpriv->rtlhal.earlymode_enable) {
rtl92d_early_mode_enabled(rtlpriv);
de_digtable->last_min_undec_pwdb_for_dm =
@@ -371,7 +371,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
/* Not STA mode return tmp */
if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
return;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
/* Decide the current status and if modify initial gain or not */
if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
de_digtable->cursta_cstate = DIG_STA_CONNECT;
@@ -387,17 +387,17 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
value_igi++;
else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2)
value_igi += 2;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
- de_digtable->large_fa_hit, de_digtable->forbidden_igi);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
- de_digtable->recover_cnt, de_digtable->rx_gain_min);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable->large_fa_hit, de_digtable->forbidden_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
+ de_digtable->recover_cnt, de_digtable->rx_gain_min);
/* deal with abnormally large false alarm */
if (falsealm_cnt->cnt_all > 10000) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG(): Abnormally false alarm case\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG(): Abnormally false alarm case\n");
de_digtable->large_fa_hit++;
if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) {
@@ -435,12 +435,12 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
}
}
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
- de_digtable->large_fa_hit, de_digtable->forbidden_igi);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n",
- de_digtable->recover_cnt, de_digtable->rx_gain_min);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable->large_fa_hit, de_digtable->forbidden_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n",
+ de_digtable->recover_cnt, de_digtable->rx_gain_min);
if (value_igi > DM_DIG_MAX)
value_igi = DM_DIG_MAX;
@@ -450,7 +450,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
rtl92d_dm_write_dig(hw);
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
rtl92d_dm_cck_packet_detection_thresh(hw);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
}
static void rtl92d_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
@@ -477,8 +477,8 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
}
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
@@ -487,49 +487,49 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "IBSS Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "IBSS Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (rtlhal->current_bandtype == BAND_ON_5G) {
if (undec_sm_pwdb >= 0x33) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
- RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
- "5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_HIPWR, DBG_LOUD,
+ "5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < 0x33)
&& (undec_sm_pwdb >= 0x2b)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
- "5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_HIPWR, DBG_LOUD,
+ "5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb < 0x2b) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
- "5G:TxHighPwrLevel_Normal\n");
+ rtl_dbg(rtlpriv, COMP_HIPWR, DBG_LOUD,
+ "5G:TxHighPwrLevel_Normal\n");
}
} else {
if (undec_sm_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else
if ((undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3))
@@ -538,20 +538,20 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel);
rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
@@ -666,8 +666,8 @@ static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
u4tmp = (index_mapping[(rtlpriv->efuse.eeprom_thermalmeter -
rtlpriv->dm.thermalvalue_rxgain)]) << 12;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===> Rx Gain %x\n", u4tmp);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===> Rx Gain %x\n", u4tmp);
for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK,
(rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
@@ -695,11 +695,11 @@ static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
if (temp_cck == le32_to_cpu(*((__le32 *)cckswing))) {
*cck_index_old = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- *cck_index_old,
- rtlpriv->dm.cck_inch14);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ *cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
}
@@ -821,12 +821,12 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
};
rtlpriv->dm.txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
- thermalvalue,
- rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue,
+ rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
rtl92d_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
@@ -846,10 +846,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[0] = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XATXIQIMBALANCE,
- ele_d, ofdm_index_old[0]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XATXIQIMBALANCE,
+ ele_d, ofdm_index_old[0]);
break;
}
}
@@ -860,11 +860,11 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
if (ele_d ==
(ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[1] = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
- ROFDM0_XBTXIQIMBALANCE, ele_d,
- ofdm_index_old[1]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
+ ROFDM0_XBTXIQIMBALANCE, ele_d,
+ ofdm_index_old[1]);
break;
}
}
@@ -889,8 +889,8 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "reload ofdm index for band switch\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "reload ofdm index for band switch\n");
}
old_index_done:
for (i = 0; i < rf; i++)
@@ -934,11 +934,11 @@ old_index_done:
(thermalvalue > rtlpriv->dm.thermalvalue_rxgain) ?
(thermalvalue - rtlpriv->dm.thermalvalue_rxgain) :
(rtlpriv->dm.thermalvalue_rxgain - thermalvalue);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+ delta_iqk);
if (delta_lck > rtlefuse->delta_lck && rtlefuse->delta_lck != 0) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtl92d_phy_lc_calibrate(hw);
@@ -974,16 +974,16 @@ old_index_done:
index_mapping_internal_pa);
}
if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.ofdm_index[1],
- rtlpriv->dm.cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.ofdm_index[1],
+ rtlpriv->dm.cck_index);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.cck_index);
}
for (i = 0; i < rf; i++) {
if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1)
@@ -1003,14 +1003,14 @@ old_index_done:
}
}
if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
- ofdm_index[0], ofdm_index[1],
- cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
+ ofdm_index[0], ofdm_index[1],
+ cck_index);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x,cck_index = 0x%x\n",
- ofdm_index[0], cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x,cck_index = 0x%x\n",
+ ofdm_index[0], cck_index);
}
ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
val_x = rtlphy->iqk_matrix[indexforchannel].value[0][0];
@@ -1050,11 +1050,11 @@ old_index_done:
BIT(24), 0x00);
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
- rtlhal->interfaceindex,
- val_x, val_y, ele_a, ele_c, ele_d,
- val_x, val_y);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
+ rtlhal->interfaceindex,
+ val_x, val_y, ele_a, ele_c, ele_d,
+ val_x, val_y);
if (cck_index >= CCK_TABLE_SIZE)
cck_index = CCK_TABLE_SIZE - 1;
@@ -1134,17 +1134,17 @@ old_index_done:
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), 0x00);
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
- val_x, val_y, ele_a, ele_c,
- ele_d, val_x, val_y);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
+ val_x, val_y, ele_a, ele_c,
+ ele_d, val_x, val_y);
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
- rtl_get_bbreg(hw, 0xc80, MASKDWORD),
- rtl_get_bbreg(hw, 0xc94, MASKDWORD),
- rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
- RFREG_OFFSET_MASK));
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
+ rtl_get_bbreg(hw, 0xc80, MASKDWORD),
+ rtl_get_bbreg(hw, 0xc94, MASKDWORD),
+ rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
+ RFREG_OFFSET_MASK));
check_delta:
if (delta_iqk > rtlefuse->delta_iqk && rtlefuse->delta_iqk != 0) {
@@ -1161,7 +1161,7 @@ check_delta:
rtlpriv->dm.thermalvalue = thermalvalue;
exit:
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
}
static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
@@ -1171,9 +1171,9 @@ static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
rtlpriv->dm.txpower_tracking = true;
rtlpriv->dm.txpower_trackinginit = false;
rtlpriv->dm.txpower_track_control = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pMgntInfo->txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
@@ -1186,13 +1186,13 @@ void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
BIT(16), 0x03);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 92S Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 92S Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking direct call!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking direct call!!\n");
rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
rtlpriv->dm.tm_trigger = 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 2064813f9381..9ddb8478784b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -47,7 +47,7 @@ static void _rtl92d_write_fw(struct ieee80211_hw *hw,
u32 pagenums, remainsize;
u32 page, offset;
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
rtl_fill_dummy(bufferptr, &size);
pagenums = size / FW_8192D_PAGE_SIZE;
@@ -104,8 +104,8 @@ void rtl92d_firmware_selfreset(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
}
WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n");
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "=====> 8051 reset success (%d)\n", delay);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "=====> 8051 reset success (%d)\n", delay);
}
static int _rtl92d_fw_init(struct ieee80211_hw *hw)
@@ -114,27 +114,27 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u32 counter;
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n");
/* polling for FW ready */
counter = 0;
do {
if (rtlhal->interfaceindex == 0) {
if (rtl_read_byte(rtlpriv, FW_MAC0_READY) &
MAC0_READY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
- rtl_read_byte(rtlpriv,
- FW_MAC0_READY));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
+ rtl_read_byte(rtlpriv,
+ FW_MAC0_READY));
return 0;
}
udelay(5);
} else {
if (rtl_read_byte(rtlpriv, FW_MAC1_READY) &
MAC1_READY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
- rtl_read_byte(rtlpriv,
- FW_MAC1_READY));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
+ rtl_read_byte(rtlpriv,
+ FW_MAC1_READY));
return 0;
}
udelay(5);
@@ -142,17 +142,17 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
} while (counter++ < POLLING_READY_TIMEOUT_COUNT);
if (rtlhal->interfaceindex == 0) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n",
- rtl_read_byte(rtlpriv, FW_MAC0_READY));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n",
+ rtl_read_byte(rtlpriv, FW_MAC0_READY));
} else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n",
- rtl_read_byte(rtlpriv, FW_MAC1_READY));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n",
+ rtl_read_byte(rtlpriv, FW_MAC1_READY));
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
- rtl_read_dword(rtlpriv, REG_MCUFWDL));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
+ rtl_read_dword(rtlpriv, REG_MCUFWDL));
return -1;
}
@@ -177,13 +177,13 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
pfwdata = rtlhal->pfirmware;
rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader);
rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "FirmwareVersion(%d), FirmwareSubVersion(%d), Signature(%#x)\n",
- rtlhal->fw_version, rtlhal->fw_subversion,
- GET_FIRMWARE_HDR_SIGNATURE(pfwheader));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "FirmwareVersion(%d), FirmwareSubVersion(%d), Signature(%#x)\n",
+ rtlhal->fw_version, rtlhal->fw_subversion,
+ GET_FIRMWARE_HDR_SIGNATURE(pfwheader));
if (IS_FW_HEADER_EXIST(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Shift 32 bytes for FW header!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Shift 32 bytes for FW header!!\n");
pfwdata = pfwdata + 32;
fwsize = fwsize - 32;
}
@@ -214,8 +214,8 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
else if (!fwdl_in_process)
break;
else
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Wait for another mac download fw\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Wait for another mac download fw\n");
}
spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
value = rtl_read_byte(rtlpriv, 0x1f);
@@ -286,25 +286,25 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
u8 idx;
if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Return as RF is off!!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Return as RF is off!!!\n");
return;
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d)\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d)\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -353,30 +353,30 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
while (!isfw_read) {
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
udelay(10);
isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
boxcontent[0] &= ~(BIT(7));
@@ -430,14 +430,14 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
rtlhal->last_hmeboxnum = boxnum + 1;
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -653,15 +653,15 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
dlok = true;
}
if (dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"H2C_RSVDPAGE", u1rsvdpageloc, 3);
rtl92d_fill_h2c_cmd(hw, H2C_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!\n");
}
void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 146fe144f5f5..f849291cc587 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -204,8 +204,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME: {
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -235,9 +235,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) |
min_spacing_to_set);
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
}
@@ -249,9 +249,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *val;
mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
break;
@@ -284,9 +284,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
| (factor_toset);
}
rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoset);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -318,9 +318,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -340,9 +340,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break;
}
@@ -563,13 +563,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
/* 18. LLT_table_init(Adapter); */
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
status = _rtl92de_llt_write(hw, i, i + 1);
- if (true != status)
+ if (!status)
return status;
}
/* end of list */
status = _rtl92de_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
- if (true != status)
+ if (!status)
return status;
/* Make the other pages as ring buffer */
@@ -578,13 +578,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
/* Otherwise used as local loopback buffer. */
for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl92de_llt_write(hw, i, (i + 1));
- if (true != status)
+ if (!status)
return status;
}
/* Let last entry point to the start entry of ring buffer */
status = _rtl92de_llt_write(hw, maxpage, txpktbuf_bndy);
- if (true != status)
+ if (!status)
return status;
return true;
@@ -851,13 +851,13 @@ void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
sec_reg_value = SCR_TXENCENABLE | SCR_RXENCENABLE;
@@ -867,8 +867,8 @@ void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
}
sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -902,8 +902,8 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
err = rtl92d_download_fw(hw);
spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW..\n");
return 1;
}
rtlhal->last_hmeboxnum = 0;
@@ -914,8 +914,8 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x605, tmp_u1b);
if (rtlhal->earlymode_enable) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EarlyMode Enabled!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EarlyMode Enabled!!!\n");
tmp_u1b = rtl_read_byte(rtlpriv, 0x4d0);
tmp_u1b = tmp_u1b | 0x1f;
@@ -1033,10 +1033,10 @@ static enum version_8192d _rtl92de_read_chip_version(struct ieee80211_hw *hw)
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
if (!(value32 & 0x000f0000)) {
version = VERSION_TEST_CHIP_92D_SINGLEPHY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "TEST CHIP!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "TEST CHIP!!!\n");
} else {
version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Normal CHIP!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Normal CHIP!!!\n");
}
return version;
}
@@ -1060,9 +1060,9 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
_rtl92de_resume_tx_beacon(hw);
_rtl92de_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
- type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
+ type);
}
bcnfunc_enable = rtl_read_byte(rtlpriv, REG_BCN_CTRL);
switch (type) {
@@ -1070,27 +1070,27 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
bt_msr |= MSR_NOLINK;
ledaction = LED_CTL_LINK;
bcnfunc_enable &= 0xF7;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= MSR_ADHOC;
bcnfunc_enable |= 0x08;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= MSR_INFRA;
ledaction = LED_CTL_LINK;
bcnfunc_enable &= 0xF7;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= MSR_AP;
bcnfunc_enable |= 0x08;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not supported!\n", type);
@@ -1156,8 +1156,8 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
if (!rtlphy->iqk_matrix[indexforchannel].iqk_done) {
- RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_DMESG,
- "Do IQK for channel:%d\n", channel);
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_INIT, DBG_DMESG,
+ "Do IQK for channel:%d\n", channel);
rtl92d_phy_iq_calibrate(hw);
}
}
@@ -1255,9 +1255,9 @@ static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw)
/* is set as 0x18, they had ever met auto load fail problem. */
rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, 0x10);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "In PowerOff,reg0x%x=%X\n",
- REG_SPS0_CTRL, rtl_read_byte(rtlpriv, REG_SPS0_CTRL));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "In PowerOff,reg0x%x=%X\n",
+ REG_SPS0_CTRL, rtl_read_byte(rtlpriv, REG_SPS0_CTRL));
/* r. Note: for PCIe interface, PON will not turn */
/* off m-bias and BandGap in PCIe suspend mode. */
@@ -1270,7 +1270,7 @@ static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&globalmutex_power, flags);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<=======\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<=======\n");
}
void rtl92de_card_disable(struct ieee80211_hw *hw)
@@ -1328,7 +1328,7 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xff);
udelay(50);
rtl_write_byte(rtlpriv, REG_CR, 0x0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "==> Do power off.......\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==> Do power off.......\n");
if (rtl92d_phy_check_poweroff(hw))
_rtl92de_poweroff_adapter(hw);
return;
@@ -1370,8 +1370,8 @@ void rtl92de_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl92de_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl92de_enable_interrupt(hw);
@@ -1383,8 +1383,8 @@ void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
- add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
+ add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
if (rm_msr)
@@ -1560,10 +1560,10 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
!((hwinfo[EEPROM_TSSI_A_5G] & BIT(6)) >> 6);
rtlefuse->internal_pa_5g[1] =
!((hwinfo[EEPROM_TSSI_B_5G] & BIT(6)) >> 6);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Is D cut,Internal PA0 %d Internal PA1 %d\n",
- rtlefuse->internal_pa_5g[0],
- rtlefuse->internal_pa_5g[1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Is D cut,Internal PA0 %d Internal PA1 %d\n",
+ rtlefuse->internal_pa_5g[0],
+ rtlefuse->internal_pa_5g[1]);
}
rtlefuse->eeprom_c9 = hwinfo[EEPROM_RF_OPT6];
rtlefuse->eeprom_cc = hwinfo[EEPROM_RF_OPT7];
@@ -1612,15 +1612,15 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
rtlefuse->delta_lck = tempval[1] - 1;
if (rtlefuse->eeprom_c9 == 0xFF)
rtlefuse->eeprom_c9 = 0x00;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "CrystalCap = 0x%x\n", rtlefuse->crystalcap);
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "Delta_IQK = 0x%x Delta_LCK = 0x%x\n",
- rtlefuse->delta_iqk, rtlefuse->delta_lck);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "CrystalCap = 0x%x\n", rtlefuse->crystalcap);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "Delta_IQK = 0x%x Delta_LCK = 0x%x\n",
+ rtlefuse->delta_iqk, rtlefuse->delta_lck);
for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
@@ -1655,12 +1655,12 @@ static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw,
if (macphy_crvalue & BIT(3)) {
rtlhal->macphymode = SINGLEMAC_SINGLEPHY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode SINGLEMAC_SINGLEPHY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode SINGLEMAC_SINGLEPHY\n");
} else {
rtlhal->macphymode = DUALMAC_DUALPHY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode DUALMAC_DUALPHY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode DUALMAC_DUALPHY\n");
}
}
@@ -1687,15 +1687,15 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
switch (chipvalue) {
case 0xAA55:
chipver |= CHIP_92D_C_CUT;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n");
break;
case 0x9966:
chipver |= CHIP_92D_D_CUT;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
break;
case 0xCC33:
chipver |= CHIP_92D_E_CUT;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n");
break;
default:
chipver |= CHIP_92D_D_CUT;
@@ -1737,7 +1737,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
}
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR,
rtlefuse->dev_addr);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
_rtl92de_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo);
/* Read Channel Plan */
@@ -1771,14 +1771,14 @@ void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
rtlefuse->autoload_status = tmp_u1b;
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl92de_read_adapter_info(hw);
@@ -1866,8 +1866,8 @@ static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
(shortgi_rate << 4) | (shortgi_rate);
}
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
- rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -1998,9 +1998,9 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
value[0] = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28);
value[1] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x value0:%x value1:%x\n",
- ratr_bitmap, value[0], value[1]);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x value0:%x value1:%x\n",
+ ratr_bitmap, value[0], value[1]);
rtl92d_fill_h2c_cmd(hw, H2C_RA_MASK, 5, (u8 *) value);
if (macid != 0)
sta_entry->ratr_index = ratr_index;
@@ -2059,14 +2059,14 @@ bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
actuallyset = true;
} else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
actuallyset = true;
@@ -2110,7 +2110,7 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 idx;
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
rtl_cam_empty_entry(hw, cam_offset + idx);
@@ -2164,38 +2164,38 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
}
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY length is %d\n",
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
"Pairwise Key content",
rtlpriv->sec.pairwise_key,
rtlpriv->
sec.key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->
sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
rtlefuse->dev_addr,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
index 2b76a025deb8..93d1c6a610c3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
@@ -19,8 +19,8 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -56,8 +56,8 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -128,7 +128,7 @@ void rtl92de_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
_rtl92ce_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 4b672199c81d..e34d33e73e52 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -162,14 +162,9 @@ static u32 targetchnl_2g[TARGET_CHNL_NUM_2G] = {
static u32 _rtl92d_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
+ u32 i = ffs(bitmask);
- return i;
+ return i ? i - 1 : 32;
}
u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
@@ -178,8 +173,8 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
- regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob) {
u8 dbi_direct = 0;
@@ -196,9 +191,9 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
}
bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
}
@@ -210,9 +205,9 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
u8 dbi_direct = 0;
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (rtlhal->during_mac1init_radioa)
dbi_direct = BIT(3);
else if (rtlhal->during_mac0init_radiob)
@@ -233,9 +228,9 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
rtl92de_write_dword_dbi(hw, (u16) regaddr, data, dbi_direct);
else
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
@@ -279,8 +274,8 @@ static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
}
@@ -298,8 +293,8 @@ static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
/* T65 RF */
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
@@ -308,17 +303,17 @@ u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -329,9 +324,9 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
if (bitmask == 0)
return;
spin_lock(&rtlpriv->locks.rf_lock);
@@ -346,9 +341,9 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
_rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data);
}
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
bool rtl92d_phy_mac_config(struct ieee80211_hw *hw)
@@ -358,10 +353,10 @@ bool rtl92d_phy_mac_config(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
arraylength = MAC_2T_ARRAYLENGTH;
ptrarray = rtl8192de_mac_2tarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:Rtl819XMAC_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Img:Rtl819XMAC_Array\n");
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) {
@@ -519,36 +514,36 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
if (rtlhal->interfaceindex == 0) {
agctab_arraylen = AGCTAB_ARRAYLENGTH;
agctab_array_table = rtl8192de_agctab_array;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> phy:MAC0, Rtl819XAGCTAB_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC0, Rtl819XAGCTAB_Array\n");
} else {
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
agctab_arraylen = AGCTAB_2G_ARRAYLENGTH;
agctab_array_table = rtl8192de_agctab_2garray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> phy:MAC1, Rtl819XAGCTAB_2GArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC1, Rtl819XAGCTAB_2GArray\n");
} else {
agctab_5garraylen = AGCTAB_5G_ARRAYLENGTH;
agctab_5garray_table = rtl8192de_agctab_5garray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> phy:MAC1, Rtl819XAGCTAB_5GArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC1, Rtl819XAGCTAB_5GArray\n");
}
}
phy_reg_arraylen = PHY_REG_2T_ARRAYLENGTH;
phy_regarray_table = rtl8192de_phy_reg_2tarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> phy:Rtl819XPHY_REG_Array_PG\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:Rtl819XPHY_REG_Array_PG\n");
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_arraylen; i = i + 2) {
rtl_addr_delay(phy_regarray_table[i]);
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
- phy_regarray_table[i],
- phy_regarray_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
}
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
if (rtlhal->interfaceindex == 0) {
@@ -559,13 +554,13 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
/* Add 1us delay between BB/RF register
* setting. */
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Normal Chip, MAC0, load Rtl819XAGCTAB_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Normal Chip, MAC0, load Rtl819XAGCTAB_Array\n");
} else {
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
@@ -575,13 +570,13 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
/* Add 1us delay between BB/RF register
* setting. */
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Load Rtl819XAGCTAB_2GArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Load Rtl819XAGCTAB_2GArray\n");
} else {
for (i = 0; i < agctab_5garraylen; i = i + 2) {
rtl_set_bbreg(hw,
@@ -591,13 +586,13 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
/* Add 1us delay between BB/RF registeri
* setting. */
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_5GArray_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
- agctab_5garray_table[i],
- agctab_5garray_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The Rtl819XAGCTAB_5GArray_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
+ agctab_5garray_table[i],
+ agctab_5garray_table[i + 1]);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Load Rtl819XAGCTAB_5GArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Load Rtl819XAGCTAB_5GArray\n");
}
}
}
@@ -648,10 +643,10 @@ static void _rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
return;
rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
- rtlphy->pwrgroup_cnt, index,
- rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
+ rtlphy->pwrgroup_cnt, index,
+ rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
if (index == 13)
rtlphy->pwrgroup_cnt++;
}
@@ -675,8 +670,8 @@ static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
phy_regarray_table_pg[i + 2]);
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -688,7 +683,7 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
bool rtstatus = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n");
rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
if (!rtstatus) {
@@ -698,7 +693,7 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw)
/* if (rtlphy->rf_type == RF_1T2R) {
* _rtl92c_phy_bb_config_1t(hw);
- * RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
+ * rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
*} */
if (rtlefuse->autoload_failflag == false) {
@@ -777,18 +772,18 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radiob_arraylen = RADIOB_2T_INT_PA_ARRAYLENGTH;
radiob_array_table = rtl8192de_radiob_2t_int_paarray;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PHY_ConfigRFWithHeaderFile() Radio_A:Rtl819XRadioA_1TArray\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PHY_ConfigRFWithHeaderFile() Radio_B:Rtl819XRadioB_1TArray\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PHY_ConfigRFWithHeaderFile() Radio_A:Rtl819XRadioA_1TArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PHY_ConfigRFWithHeaderFile() Radio_B:Rtl819XRadioB_1TArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
/* this only happens when DMDP, mac0 start on 2.4G,
* mac1 start on 5G, mac 0 has to set phy0&phy1
* pathA or mac1 has to set phy0&phy1 pathA */
if ((content == radiob_txt) && (rfpath == RF90_PATH_A)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> althougth Path A, we load radiob.txt\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> althougth Path A, we load radiob.txt\n");
radioa_arraylen = radiob_arraylen;
radioa_array_table = radiob_array_table;
}
@@ -828,19 +823,19 @@ void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
(u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
rtlphy->default_initialgain[3] =
(u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
static void _rtl92d_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
@@ -938,14 +933,14 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
if (rtlphy->set_bwmode_inprogress)
return;
if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "FALSE driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "FALSE driver sleep or unload\n");
return;
}
rtlphy->set_bwmode_inprogress = true;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
switch (rtlphy->current_chan_bw) {
@@ -1001,7 +996,7 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
}
rtl92d_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
static void _rtl92d_phy_stop_trx_before_changeband(struct ieee80211_hw *hw)
@@ -1018,7 +1013,7 @@ static void rtl92d_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 value8;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
rtlhal->bandset = band;
rtlhal->current_bandtype = band;
if (IS_92D_SINGLEPHY(rtlhal->version))
@@ -1028,13 +1023,13 @@ static void rtl92d_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
/* reconfig BB/RF according to wireless mode */
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* BB & RF Config */
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "====>2.4G\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "====>2.4G\n");
if (rtlhal->interfaceindex == 1)
_rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
} else {
/* 5G band */
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "====>5G\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "====>5G\n");
if (rtlhal->interfaceindex == 1)
_rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
@@ -1062,7 +1057,7 @@ static void rtl92d_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
0 ? REG_MAC0 : REG_MAC1), value8);
}
mdelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<==Switch Band OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<==Switch Band OK\n");
}
static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
@@ -1074,9 +1069,9 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
u8 group, i;
unsigned long flag = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>path %d\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>path %d\n", rfpath);
if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(25) | BIT(24), 0);
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
/* fc area 0xd2c */
@@ -1097,14 +1092,14 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 1);
} else {
/* G band. */
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Load RF IMR parameters for G band. IMR already setting %d\n",
- rtlpriv->rtlhal.load_imrandiqk_setting_for2g);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Load RF IMR parameters for G band. IMR already setting %d\n",
+ rtlpriv->rtlhal.load_imrandiqk_setting_for2g);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
if (!rtlpriv->rtlhal.load_imrandiqk_setting_for2g) {
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Load RF IMR parameters for G band. %d\n",
- rfpath);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Load RF IMR parameters for G band. %d\n",
+ rfpath);
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(25) | BIT(24), 0);
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
@@ -1122,7 +1117,7 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
@@ -1132,7 +1127,7 @@ static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
/*----Store original RFENV control type----*/
switch (rfpath) {
case RF90_PATH_A:
@@ -1158,7 +1153,7 @@ static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
/*Set 0 to 12 bits for 8255 */
rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
udelay(1);
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
}
static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
@@ -1168,7 +1163,7 @@ static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
/*----Restore RFENV control type----*/
switch (rfpath) {
case RF90_PATH_A:
@@ -1181,7 +1176,7 @@ static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
*pu4_regval);
break;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<=====\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<=====\n");
}
static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
@@ -1195,10 +1190,10 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
bool need_pwr_down = false, internal_pa = false;
u32 u4regvalue, mask = 0x1C000, value = 0, u4tmp, u4tmp2;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>\n");
/* config path A for 5G */
if (rtlhal->current_bandtype == BAND_ON_5G) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
u4tmp = curveindex_5g[channel - 1];
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"ver 1 set RF-A, 5G, 0x28 = 0x%x !!\n", u4tmp);
@@ -1246,14 +1241,14 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
RFREG_OFFSET_MASK,
rf_reg_pram_c_5g[index][i]);
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "offset 0x%x value 0x%x path %d index %d readback 0x%x\n",
- rf_reg_for_c_cut_5g[i],
- rf_reg_pram_c_5g[index][i],
- path, index,
- rtl_get_rfreg(hw, (enum radio_path)path,
- rf_reg_for_c_cut_5g[i],
- RFREG_OFFSET_MASK));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "offset 0x%x value 0x%x path %d index %d readback 0x%x\n",
+ rf_reg_for_c_cut_5g[i],
+ rf_reg_pram_c_5g[index][i],
+ path, index,
+ rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ RFREG_OFFSET_MASK));
}
if (need_pwr_down)
_rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
@@ -1285,11 +1280,11 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
rf_for_c_cut_5g_internal_pa[i],
RFREG_OFFSET_MASK,
rf_pram_c_5g_int_pa[index][i]);
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "offset 0x%x value 0x%x path %d index %d\n",
- rf_for_c_cut_5g_internal_pa[i],
- rf_pram_c_5g_int_pa[index][i],
- rfpath, index);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "offset 0x%x value 0x%x path %d index %d\n",
+ rf_for_c_cut_5g_internal_pa[i],
+ rf_pram_c_5g_int_pa[index][i],
+ rfpath, index);
}
} else {
rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
@@ -1297,7 +1292,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
}
}
} else if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
u4tmp = curveindex_2g[channel - 1];
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", u4tmp);
@@ -1333,14 +1328,14 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
RFREG_OFFSET_MASK,
rf_reg_param_for_c_cut_2g
[index][i]);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "offset 0x%x value 0x%x mak 0x%x path %d index %d readback 0x%x\n",
- rf_reg_for_c_cut_2g[i],
- rf_reg_param_for_c_cut_2g[index][i],
- rf_reg_mask_for_c_cut_2g[i], path, index,
- rtl_get_rfreg(hw, (enum radio_path)path,
- rf_reg_for_c_cut_2g[i],
- RFREG_OFFSET_MASK));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "offset 0x%x value 0x%x mak 0x%x path %d index %d readback 0x%x\n",
+ rf_reg_for_c_cut_2g[i],
+ rf_reg_param_for_c_cut_2g[index][i],
+ rf_reg_mask_for_c_cut_2g[i], path, index,
+ rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ RFREG_OFFSET_MASK));
}
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
@@ -1354,7 +1349,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
if (rtlhal->during_mac0init_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
@@ -2358,8 +2353,8 @@ void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw)
rtlphy->iqk_matrix[indexforchannel].iqk_done =
true;
- RT_TRACE(rtlpriv, COMP_SCAN | COMP_MLME, DBG_LOUD,
- "IQK OK indexforchannel %d\n", indexforchannel);
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_MLME, DBG_LOUD,
+ "IQK OK indexforchannel %d\n", indexforchannel);
}
}
@@ -2370,26 +2365,26 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
u8 indexforchannel;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "channel %d\n", channel);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "channel %d\n", channel);
/*------Do IQK for normal chip and test chip 5G band------- */
indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "indexforchannel %d done %d\n",
- indexforchannel,
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "indexforchannel %d done %d\n",
+ indexforchannel,
rtlphy->iqk_matrix[indexforchannel].iqk_done);
if (0 && !rtlphy->iqk_matrix[indexforchannel].iqk_done &&
rtlphy->need_iqk) {
/* Re Do IQK. */
- RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_LOUD,
- "Do IQK Matrix reg for channel:%d....\n", channel);
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_INIT, DBG_LOUD,
+ "Do IQK Matrix reg for channel:%d....\n", channel);
rtl92d_phy_iq_calibrate(hw);
} else {
/* Just load the value. */
/* 2G band just load once. */
if (((!rtlhal->load_imrandiqk_setting_for2g) &&
indexforchannel == 0) || indexforchannel > 0) {
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Just Read IQK Matrix reg for channel:%d....\n",
- channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Just Read IQK Matrix reg for channel:%d....\n",
+ channel);
if ((rtlphy->iqk_matrix[indexforchannel].
value[0] != NULL)
/*&&(regea4 != 0) */)
@@ -2413,7 +2408,7 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
}
}
rtlphy->need_iqk = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
static u32 _rtl92d_phy_get_abs(u32 val1, u32 val2)
@@ -2477,7 +2472,7 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
u32 u4tmp = 0, u4regvalue = 0;
bool bneed_powerdown_radio = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "path %d\n", erfpath);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "path %d\n", erfpath);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "band type = %d\n",
rtlpriv->rtlhal.current_bandtype);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "channel = %d\n", channel);
@@ -2522,7 +2517,7 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
if (rtlpriv->rtlhal.during_mac0init_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
@@ -2695,11 +2690,11 @@ void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u8 i;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "settings regs %d default regs %d\n",
- (int)(sizeof(rtlphy->iqk_matrix) /
- sizeof(struct iqk_matrix_regs)),
- IQK_MATRIX_REG_NUM);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "settings regs %d default regs %d\n",
+ (int)(sizeof(rtlphy->iqk_matrix) /
+ sizeof(struct iqk_matrix_regs)),
+ IQK_MATRIX_REG_NUM);
/* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */
for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
rtlphy->iqk_matrix[i].value[0][0] = 0x100;
@@ -2844,8 +2839,8 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
return 0;
if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
return 0;
}
while (rtlphy->lck_inprogress && timecount < timeout) {
@@ -2886,8 +2881,8 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
channel = 1;
rtlphy->sw_chnl_stage = 0;
rtlphy->sw_chnl_step = 0;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
do {
if (!rtlphy->sw_chnl_inprogress)
@@ -2904,7 +2899,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
rtlphy->sw_chnl_inprogress = false;
return 1;
}
@@ -2915,9 +2910,9 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
struct dig_t *de_digtable = &rtlpriv->dm_digtable;
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -2935,8 +2930,8 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n",
+ rtlphy->current_io_type);
}
bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
@@ -2945,19 +2940,19 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan\n");
postprocessing = true;
break;
default:
@@ -2973,7 +2968,7 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl92d_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
return true;
}
@@ -3030,8 +3025,8 @@ static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Fail !!! Switch RF timeout\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Fail !!! Switch RF timeout\n");
return;
}
/* e. For PCIE: SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB TRX function */
@@ -3065,18 +3060,18 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
u32 initializecount = 0;
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "awake, sleeped:%d ms state_inap:%x\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_sleep_jiffies),
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "awake, slept:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies),
rtlpriv->psc.state_inap);
ppsc->last_awake_jiffies = jiffies;
_rtl92d_phy_set_rfon(hw);
@@ -3091,8 +3086,8 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFOFF:
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -3116,35 +3111,35 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else if (rtlpci->pdev->current_state != PCI_D0) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] !=0 but lower power state!\n",
- i + 1, queue_id);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] !=0 but lower power state!\n",
+ i + 1, queue_id);
break;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- i + 1, queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x, queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x, queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "Set rfsleep awaked:%d ms\n",
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "Set rfsleep awakened:%d ms\n",
jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "sleep awaked:%d ms state_inap:%x\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies),
- rtlpriv->psc.state_inap);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "sleep awakened:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies),
+ rtlpriv->psc.state_inap);
ppsc->last_sleep_jiffies = jiffies;
_rtl92d_phy_set_rfsleep(hw);
break;
@@ -3167,18 +3162,18 @@ void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw)
switch (rtlhal->macphymode) {
case DUALMAC_DUALPHY:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: DUALMAC_DUALPHY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: DUALMAC_DUALPHY\n");
rtl_write_byte(rtlpriv, offset, 0xF3);
break;
case SINGLEMAC_SINGLEPHY:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: SINGLEMAC_SINGLEPHY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: SINGLEMAC_SINGLEPHY\n");
rtl_write_byte(rtlpriv, offset, 0xF4);
break;
case DUALMAC_SINGLEPHY:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: DUALMAC_SINGLEPHY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: DUALMAC_SINGLEPHY\n");
rtl_write_byte(rtlpriv, offset, 0xF1);
break;
}
@@ -3346,7 +3341,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 rfpath, i;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
/* r_select_5G for path_A/B 0 for 2.4G, 1 for 5G */
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* r_select_5G for path_A/B,0x878 */
@@ -3494,8 +3489,8 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
BIT(13), 0x3);
} else {
rtl92d_phy_enable_anotherphy(hw, false);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MAC1 use DBI to update 0x888\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MAC1 use DBI to update 0x888\n");
/* 0x888 */
rtl92de_write_dword_dbi(hw, RFPGA0_ADDALLOCKEN,
rtl92de_read_dword_dbi(hw,
@@ -3520,9 +3515,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
RFREG_OFFSET_MASK);
}
for (i = 0; i < 2; i++)
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[i]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<==\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[i]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<==\n");
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
index 915a36f7af5e..83787fd293de 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
@@ -23,9 +23,9 @@ void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) |
BIT(11), 0x01);
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "20M RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[rfpath]);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "20M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]);
}
break;
@@ -35,9 +35,9 @@ void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
((rtlphy->rfreg_chnlval[rfpath] & 0xfffff3ff));
rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) | BIT(11),
0x00);
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "40M RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[rfpath]);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "40M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]);
}
break;
default:
@@ -391,11 +391,11 @@ bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0)
rtlhal->during_mac0init_radiob = false;
rtlhal->during_mac1init_radioa = false;
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "===>\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "===>\n");
/* MAC0 Need PHY1 load radio_b.txt . Driver use DBI to write. */
u1btmp = rtl_read_byte(rtlpriv, mac_reg);
if (!(u1btmp & mac_on_bit)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable BB & RF\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "enable BB & RF\n");
/* Enable BB and RF power */
rtl92de_write_dword_dbi(hw, REG_SYS_ISO_CTRL,
rtl92de_read_dword_dbi(hw, REG_SYS_ISO_CTRL, direct) |
@@ -405,7 +405,7 @@ bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0)
* and radio_b.txt has been load. */
bresult = false;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<===\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<===\n");
return bresult;
}
@@ -421,17 +421,17 @@ void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0)
rtlhal->during_mac0init_radiob = false;
rtlhal->during_mac1init_radioa = false;
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
/* check MAC0 enable or not again now, if
* enabled, not power down radio A. */
u1btmp = rtl_read_byte(rtlpriv, mac_reg);
if (!(u1btmp & mac_on_bit)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "power down\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "power down\n");
/* power down RF radio A according to YuNan's advice. */
rtl92de_write_dword_dbi(hw, RFPGA0_XA_LSSIPARAMETER,
0x00000000, direct);
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
}
bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
@@ -573,8 +573,8 @@ bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
break;
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
goto phy_rf_cfg_fail;
}
@@ -588,7 +588,7 @@ bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
rtl92d_phy_powerdown_anotherphy(hw, false);
else if (need_pwrdown_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
return rtstatus;
phy_rf_cfg_fail:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index ab5b05ef168e..8944712274b5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -508,11 +508,11 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
memset(skb->data, 0, EM_HDR_LEN);
}
buf_len = skb->len;
- mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_92d));
@@ -526,9 +526,9 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
- "Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_LOUD,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
_rtl92de_insert_emcontent(ptcb_desc,
(u8 *)(skb->data));
}
@@ -625,8 +625,8 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
}
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -652,7 +652,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_pkt_id(pdesc, 8);
}
set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -664,15 +664,15 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 fw_queue = QSLT_BEACON;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len, PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
__le32 *pdesc = (__le32 *)pdesc8;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
index 551aa86825ed..997ff115b9ab 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
@@ -86,16 +86,16 @@ static void rtl92ee_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 0);
rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
}
static void rtl92ee_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
@@ -174,7 +174,7 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
} else {
dm_dig->rx_gain_max = dm_dig_max;
dig_min_0 = dm_dig_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
}
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
@@ -334,34 +334,34 @@ static void rtl92ee_dm_find_minimum_rssi(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
rtl_dm_dig->min_undec_pwdb_for_dm = 0;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "Not connected to any\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- rtlpriv->dm.entry_min_undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.entry_min_undec_sm_pwdb);
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "STA Default Port PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnect PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "MinUndecoratedPWDBForDM =%d\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "MinUndecoratedPWDBForDM =%d\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
static void rtl92ee_dm_check_rssi_monitor(struct ieee80211_hw *hw)
@@ -687,8 +687,8 @@ static void rtl92ee_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
if (rtlpriv->cfg->ops->get_btc_status()) {
if (!rtlpriv->btcoexist.btc_ops->
btc_is_bt_disabled(rtlpriv)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "odm_DynamicATCSwitch(): Disable CFO tracking for BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "odm_DynamicATCSwitch(): Disable CFO tracking for BT!!\n");
return;
}
}
@@ -718,11 +718,11 @@ static void rtl92ee_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
(rtldm->cfo_ave_pre - cfo_ave) :
(cfo_ave - rtldm->cfo_ave_pre);
- if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) {
- rtldm->large_cfo_hit = 1;
+ if (cfo_ave_diff > 20 && !rtldm->large_cfo_hit) {
+ rtldm->large_cfo_hit = true;
return;
}
- rtldm->large_cfo_hit = 0;
+ rtldm->large_cfo_hit = false;
rtldm->cfo_ave_pre = cfo_ave;
@@ -842,8 +842,8 @@ static bool _rtl92ee_dm_ra_state_check(struct ieee80211_hw *hw,
low_rssithresh_for_ra += go_up_gap;
break;
default:
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "wrong rssi level setting %d !\n", *ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "wrong rssi level setting %d !\n", *ratr_state);
break;
}
@@ -872,14 +872,14 @@ static void rtl92ee_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver is going to unload\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver does not control rate adaptive mask\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver does not control rate adaptive mask\n");
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 05462422d247..88b7a715f4c5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -36,7 +36,7 @@ static void _rtl92ee_write_fw(struct ieee80211_hw *hw,
u32 pagenums, remainsize;
u32 page, offset;
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
rtl_fill_dummy(bufferptr, &size);
@@ -118,21 +118,21 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = (u8 *)rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "normal Firmware SIZE %d\n" , fwsize);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "normal Firmware SIZE %d\n", fwsize);
if (IS_FW_HEADER_EXIST(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware Version(%d), Signature(%#x),Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtlwifi_firmware_header));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Firmware Version(%d), Signature(%#x),Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtlwifi_firmware_header));
pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
} else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware no Header, Signature(%#x)\n",
- pfwheader->signature);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Firmware no Header, Signature(%#x)\n",
+ pfwheader->signature);
}
if (rtlhal->mac_func_enable) {
@@ -180,12 +180,12 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
if (ppsc->dot11_psmode != EACTIVE ||
ppsc->inactive_pwrstate == ERFOFF) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "FillH2CCommand8192E(): Return because RF is off!!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "FillH2CCommand8192E(): Return because RF is off!!!\n");
return;
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
/* 1. Prevent race condition in setting H2C cmd.
* (copy from MgntActSet_RF_State().)
@@ -193,17 +193,17 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -240,8 +240,8 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
box_extreg = REG_HMEBOX_EXT_3;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", boxnum);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -263,18 +263,18 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
while (!isfw_read) {
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "Waiting too long for FW read clear HMEBox(%d)!!!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!!!\n",
+ boxnum);
break;
}
udelay(10);
isfw_read =
_rtl92ee_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
+ boxnum, u1b_tmp);
}
}
@@ -282,18 +282,18 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
* H2C cmd, break and give up this H2C.
*/
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "Write H2C reg BOX[%d] fail,Fw don't read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C reg BOX[%d] fail,Fw don't read.\n",
+ boxnum);
break;
}
/* 4. Fill the H2C cmd into box */
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -329,8 +329,8 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", cmd_len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", cmd_len);
break;
}
@@ -340,16 +340,16 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl92ee_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -388,8 +388,8 @@ void rtl92ee_firmware_selfreset(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp | BIT(2)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD ,
- " _8051Reset92E(): 8051 reset success .\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " _8051Reset92E(): 8051 reset success .\n");
}
void rtl92ee_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
@@ -408,8 +408,8 @@ void rtl92ee_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
if (bt_ctrl_lps)
mode = (bt_lps_on ? FW_PS_MIN_MODE : FW_PS_ACTIVE_MODE);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
- mode, bt_ctrl_lps);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
+ mode, bt_ctrl_lps);
switch (mode) {
case FW_PS_MIN_MODE:
@@ -750,15 +750,15 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
b_dlok = true;
if (b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD ,
"H2C_RSVDPAGE:\n", u1rsvdpageloc, 3);
rtl92ee_fill_h2c_cmd(hw, H2C_92E_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
}
@@ -783,11 +783,11 @@ void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -838,11 +838,11 @@ void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 53011c2a44f5..88fa2e593fef 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -119,9 +119,9 @@ static void _rtl92ee_set_fw_clock_on(struct ieee80211_hw *hw,
if (content & IMR_CPWM) {
rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_92E;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Receive CPWM INT!!! PSState = %X\n",
- rtlhal->fw_ps_state);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Receive CPWM INT!!! PSState = %X\n",
+ rtlhal->fw_ps_state);
}
}
@@ -319,8 +319,8 @@ void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HAL_DEF_WOWLAN:
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -390,8 +390,8 @@ static void _rtl92ee_download_rsvd_page(struct ieee80211_hw *hw)
} while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5);
if (!(bcnvalid_reg & BIT(0)))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Download RSVD page failed!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Download RSVD page failed!\n");
/* Enable Bcn */
_rtl92ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
@@ -447,8 +447,8 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_TRACE,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_TRACE,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -494,8 +494,8 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(REG_AGGLEN_LMT + i),
reg[i]);
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR:%#x\n", fac);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR:%#x\n", fac);
}
}
break;
@@ -528,9 +528,9 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -545,16 +545,16 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case %#x not processed\n",
- e_aci);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
}
break;
@@ -665,8 +665,8 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -771,8 +771,8 @@ static bool _rtl92ee_init_mac(struct ieee80211_hw *hw)
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
PWR_INTF_PCI_MSK,
RTL8192E_NIC_ENABLE_FLOW)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
return false;
}
@@ -794,9 +794,9 @@ static bool _rtl92ee_init_mac(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_CR, 0x2ff);
if (!rtlhal->mac_func_enable) {
- if (_rtl92ee_llt_table_init(hw) == false) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "LLT table init fail\n");
+ if (!_rtl92ee_llt_table_init(hw)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "LLT table init fail\n");
return false;
}
}
@@ -1107,14 +1107,14 @@ void rtl92ee_enable_hw_security_config(struct ieee80211_hw *hw)
u8 sec_reg_value;
u8 tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -1130,8 +1130,8 @@ void rtl92ee_enable_hw_security_config(struct ieee80211_hw *hw)
tmp = rtl_read_byte(rtlpriv, REG_CR + 1);
rtl_write_byte(rtlpriv, REG_CR + 1, tmp | BIT(1));
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -1153,8 +1153,8 @@ static bool _rtl8192ee_check_pcie_dma_hang(struct rtl_priv *rtlpriv)
*/
tmp = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3);
if ((tmp & BIT(0)) || (tmp & BIT(1))) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "CheckPcieDMAHang8192EE(): true!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CheckPcieDMAHang8192EE(): true!!\n");
return true;
}
return false;
@@ -1167,8 +1167,8 @@ static void _rtl8192ee_reset_pcie_interface_dma(struct rtl_priv *rtlpriv,
bool release_mac_rx_pause;
u8 backup_pcie_dma_pause;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "ResetPcieInterfaceDMA8192EE()\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "ResetPcieInterfaceDMA8192EE()\n");
/* Revise Note: Follow the document "PCIe RX DMA Hang Reset Flow_v03"
* released by SD1 Alan.
@@ -1281,7 +1281,7 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
u8 tmp_u1b, u1byte;
u32 tmp_u4b;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, " Rtl8192EE hw init\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, " Rtl8192EE hw init\n");
rtlpriv->rtlhal.being_init_adapter = true;
rtlpriv->intf_ops->disable_aspm(hw);
@@ -1295,7 +1295,7 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
}
if (_rtl8192ee_check_pcie_dma_hang(rtlpriv)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "92ee dma hang!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "92ee dma hang!\n");
_rtl8192ee_reset_pcie_interface_dma(rtlpriv,
rtlhal->mac_func_enable);
rtlhal->mac_func_enable = false;
@@ -1324,8 +1324,8 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, 0x8000);
err = rtl92ee_download_fw(hw, false);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
rtlhal->fw_ready = false;
return err;
@@ -1401,12 +1401,12 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
efuse_one_byte_read(hw, 0x1FA, &tmp_u1b);
if (!(tmp_u1b & BIT(0))) {
rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path A\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path A\n");
}
if ((!(tmp_u1b & BIT(1))) && (rtlphy->rf_type == RF_2T2R)) {
rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path B\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path B\n");
}
rtl_write_byte(rtlpriv, REG_NAV_UPPER, ((30000 + 127) / 128));
@@ -1421,8 +1421,8 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, 0x4fc, 0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "end of Rtl8192EE hw init %x\n", err);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "end of Rtl8192EE hw init %x\n", err);
return 0;
}
@@ -1441,9 +1441,9 @@ static enum version_8192e _rtl92ee_read_chip_version(struct ieee80211_hw *hw)
else
version = (enum version_8192e)VERSION_NORMAL_CHIP_2T2R_8192E;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
- "RF_2T2R" : "RF_1T1R");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R");
return version;
}
@@ -1459,26 +1459,26 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw,
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
mode = MSR_NOLINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
mode = MSR_AP;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not support!\n", type);
@@ -1503,9 +1503,9 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw,
_rtl92ee_resume_tx_beacon(hw);
_rtl92ee_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- mode);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
}
rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
@@ -1611,7 +1611,7 @@ static void _rtl92ee_poweroff_adapter(struct ieee80211_hw *hw)
rtlhal->mac_func_enable = false;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "POWER OFF adapter\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "POWER OFF adapter\n");
/* Run LPS WL RFOFF flow */
rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
@@ -1651,7 +1651,7 @@ void rtl92ee_card_disable(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
enum nl80211_iftype opmode;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8192ee card disable\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8192ee card disable\n");
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
@@ -1710,8 +1710,8 @@ void rtl92ee_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
}
@@ -1721,8 +1721,8 @@ void rtl92ee_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1788,15 +1788,15 @@ static void _rtl8192ee_read_power_value_fromprom(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 rf, addr = EEPROM_TX_PWR_INX, group, i = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "hal_ReadPowerValueFromPROM92E(): PROMContent[0x%x]=0x%x\n",
- (addr + 1), hwinfo[addr + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "hal_ReadPowerValueFromPROM92E(): PROMContent[0x%x]=0x%x\n",
+ (addr + 1), hwinfo[addr + 1]);
if (0xFF == hwinfo[addr+1]) /*YJ,add,120316*/
autoload_fail = true;
if (autoload_fail) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "auto load fail : Use Default value!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "auto load fail : Use Default value!\n");
for (rf = 0 ; rf < MAX_RF_PATH ; rf++) {
/* 2.4G default value */
for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
@@ -2113,8 +2113,8 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw)
if (rtlefuse->eeprom_oemid == 0xFF)
rtlefuse->eeprom_oemid = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
/* set channel plan from efuse */
rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*tx power*/
@@ -2134,8 +2134,8 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->board_type |= BIT(2); /* ODM_BOARD_BT */
rtlhal->board_type = rtlefuse->board_type;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "board_type = 0x%x\n", rtlefuse->board_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "board_type = 0x%x\n", rtlefuse->board_type);
/*parse xtal*/
rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_92E];
if (hwinfo[EEPROM_XTAL_92E] == 0xFF)
@@ -2172,8 +2172,8 @@ static void _rtl92ee_hal_customized_behavior(struct ieee80211_hw *hw)
rtlpriv->ledctl.led_opendrain = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw)
@@ -2191,18 +2191,18 @@ void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw)
rtlpriv->dm.rfpath_rxenable[0] = true;
rtlpriv->dm.rfpath_rxenable[1] = true;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl92ee_read_adapter_info(hw);
} else {
@@ -2361,8 +2361,8 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw,
ratr_index = _rtl92ee_mrate_idx_to_arfr_id(hw, ratr_index);
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[0] = macid;
@@ -2372,11 +2372,11 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw,
rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8);
rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap, rate_mask[0], rate_mask[1],
- rate_mask[2], rate_mask[3], rate_mask[4],
- rate_mask[5], rate_mask[6]);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap, rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3], rate_mask[4],
+ rate_mask[5], rate_mask[6]);
rtl92ee_fill_h2c_cmd(hw, H2C_92E_RA_MASK, 7, rate_mask);
_rtl92ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
}
@@ -2438,7 +2438,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2466,8 +2466,8 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case %#x not processed\n", enc_algo);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -2498,27 +2498,27 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
@@ -2603,7 +2603,7 @@ void rtl92ee_allow_all_destaddr(struct ieee80211_hw *hw,
if (write_into_reg)
rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config=0x%08X, write_into_reg=%d\n",
- rtlpci->receive_config, write_into_reg);
+ rtl_dbg(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
+ "receive_config=0x%08X, write_into_reg=%d\n",
+ rtlpci->receive_config, write_into_reg);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
index 78202ad4036e..fb4ea3a8481f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
@@ -19,8 +19,8 @@ void rtl92ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u32 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -38,8 +38,8 @@ void rtl92ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -50,8 +50,8 @@ void rtl92ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -68,8 +68,8 @@ void rtl92ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
@@ -118,6 +118,6 @@ void rtl92ee_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n", ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n", ledaction);
_rtl92ee_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index bb291b951f4d..cc0bcaf13e96 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -43,15 +43,15 @@ u32 rtl92ee_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
}
@@ -62,9 +62,9 @@ void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -74,9 +74,9 @@ void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
@@ -85,9 +85,9 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -97,9 +97,9 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x),rfpath(%#x),bitmask(%#x),original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x),rfpath(%#x),bitmask(%#x),original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -111,9 +111,9 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- addr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ addr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -127,9 +127,9 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- addr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ addr, bitmask, data, rfpath);
}
static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
@@ -172,9 +172,9 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
}
@@ -196,20 +196,16 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
newoffset = offset;
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFW-%d Addr[0x%x]=0x%x\n", rfpath,
- pphyreg->rf3wire_offset, data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFW-%d Addr[0x%x]=0x%x\n", rfpath,
+ pphyreg->rf3wire_offset, data_and_addr);
}
static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
+ u32 i = ffs(bitmask);
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ return i ? i - 1 : 32;
}
bool rtl92ee_phy_mac_config(struct ieee80211_hw *hw)
@@ -400,8 +396,8 @@ static void _rtl92ee_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d\n", path);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d\n", path);
return;
}
@@ -420,14 +416,14 @@ static void _rtl92ee_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value;
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
+ rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Band %d\n", band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d\n", band);
}
}
@@ -440,8 +436,8 @@ static u8 _rtl92ee_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
u8 value = 0;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d\n", path);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d\n", path);
return 0;
}
@@ -460,14 +456,14 @@ static u8 _rtl92ee_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3];
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
+ rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Band %d()\n", band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d()\n", band);
}
return value;
}
@@ -606,8 +602,8 @@ static void phy_convert_txpwr_dbm_to_rel_val(struct ieee80211_hw *hw)
0, 3, base);
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "<==phy_convert_txpwr_dbm_to_rel_val()\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "<==%s\n", __func__);
}
static void _rtl92ee_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw)
@@ -659,11 +655,11 @@ static bool _rtl92ee_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8192EMACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8192EMACPHY_Array\n");
arraylength = RTL8192EE_MAC_ARRAY_LEN;
ptrarray = RTL8192EE_MAC_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Img:RTL8192EE_MAC_ARRAY LEN %d\n" , arraylength);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Img:RTL8192EE_MAC_ARRAY LEN %d\n", arraylength);
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]);
return true;
@@ -776,10 +772,10 @@ static bool phy_config_bb_with_hdr_file(struct ieee80211_hw *hw,
}
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n",
- array[i],
- array[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n",
+ array[i],
+ array[i + 1]);
}
}
return true;
@@ -843,17 +839,17 @@ static void _rtl92ee_store_tx_power_by_rate(struct ieee80211_hw *hw,
u8 section = _rtl92ee_get_rate_section_index(regaddr);
if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid Band %d\n", band);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR, "Invalid Band %d\n", band);
return;
}
if (rfpath > MAX_RF_PATH - 1) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR,
- "Invalid RfPath %d\n", rfpath);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR,
+ "Invalid RfPath %d\n", rfpath);
return;
}
if (txnum > MAX_RF_PATH - 1) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid TxNum %d\n", txnum);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR, "Invalid TxNum %d\n", txnum);
return;
}
@@ -888,8 +884,8 @@ static bool phy_config_bb_with_pghdrfile(struct ieee80211_hw *hw,
}
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -914,9 +910,9 @@ bool rtl92ee_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
case RF90_PATH_A:
len = RTL8192EE_RADIOA_ARRAY_LEN;
array = RTL8192EE_RADIOA_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Radio_A:RTL8192EE_RADIOA_ARRAY %d\n" , len);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8192EE_RADIOA_ARRAY %d\n", len);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
for (i = 0; i < len; i = i + 2) {
v1 = array[i];
v2 = array[i+1];
@@ -961,9 +957,9 @@ bool rtl92ee_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
case RF90_PATH_B:
len = RTL8192EE_RADIOB_ARRAY_LEN;
array = RTL8192EE_RADIOB_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Radio_A:RTL8192EE_RADIOB_ARRAY %d\n" , len);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8192EE_RADIOB_ARRAY %d\n", len);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
for (i = 0; i < len; i = i + 2) {
v1 = array[i];
v2 = array[i+1];
@@ -1025,21 +1021,21 @@ void rtl92ee_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR3, MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR2, MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
static void phy_init_bb_rf_register_def(struct ieee80211_hw *hw)
@@ -1236,8 +1232,8 @@ static u8 _rtl92ee_get_txpower_index(struct ieee80211_hw *hw,
if (channel < 1 || channel > 14) {
index = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_DMESG,
- "Illegal channel!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_DMESG,
+ "Illegal channel!!\n");
}
if (IS_CCK_RATE((s8)rate))
@@ -1395,8 +1391,8 @@ static void _rtl92ee_set_txpower_index(struct ieee80211_hw *hw, u8 pwr_idx,
pwr_idx);
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid Rate!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid Rate!!\n");
break;
}
} else if (rfpath == RF90_PATH_B) {
@@ -1514,12 +1510,12 @@ static void _rtl92ee_set_txpower_index(struct ieee80211_hw *hw, u8 pwr_idx,
pwr_idx);
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid Rate!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid Rate!!\n");
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
}
}
@@ -1578,8 +1574,8 @@ static void phy_set_txpower_index_by_rate_section(struct ieee80211_hw *hw,
rtlphy->current_chan_bw,
channel, ht_rates2t, 8);
} else
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR,
- "Invalid RateSection %d\n", section);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR,
+ "Invalid RateSection %d\n", section);
}
void rtl92ee_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
@@ -1665,10 +1661,10 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
@@ -1722,7 +1718,7 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl92ee_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
}
void rtl92ee_phy_set_bw_mode(struct ieee80211_hw *hw,
@@ -1739,8 +1735,8 @@ void rtl92ee_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl92ee_phy_set_bw_mode_callback(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -1753,8 +1749,8 @@ void rtl92ee_phy_sw_chnl_callback(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 delay;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
do {
@@ -1772,7 +1768,7 @@ void rtl92ee_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw)
@@ -1792,13 +1788,13 @@ u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl92ee_phy_sw_chnl_callback(hw);
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schedule workitem current channel %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schedule workitem current channel %d\n",
+ rtlphy->current_channel);
rtlphy->sw_chnl_inprogress = false;
} else {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
@@ -1900,9 +1896,9 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -2248,7 +2244,7 @@ static u8 _rtl92ee_phy_path_b_rx_iqk(struct ieee80211_hw *hw, bool config_pathb)
(((reg_ecc & 0x03FF0000) >> 16) != 0x36))
result |= 0x02;
else
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "Path B Rx IQK fail!!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "Path B Rx IQK fail!!\n");
return result;
}
@@ -2545,8 +2541,8 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
patha_ok = _rtl92ee_phy_path_a_iqk(hw, is2t);
if (patha_ok == 0x01) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path A Tx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path A Tx IQK Success!!\n");
result[t][0] = (rtl_get_bbreg(hw,
RTX_POWER_BEFORE_IQK_A,
MASKDWORD) & 0x3FF0000)
@@ -2556,17 +2552,17 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
>> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path A Tx IQK Fail!!, ret = 0x%x\n",
- patha_ok);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path A Tx IQK Fail!!, ret = 0x%x\n",
+ patha_ok);
}
for (i = 0 ; i < retrycount ; i++) {
patha_ok = _rtl92ee_phy_path_a_rx_iqk(hw, is2t);
if (patha_ok == 0x03) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path A Rx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path A Rx IQK Success!!\n");
result[t][2] = (rtl_get_bbreg(hw,
RRX_POWER_BEFORE_IQK_A_2,
MASKDWORD) & 0x3FF0000)
@@ -2577,14 +2573,14 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
>> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path A Rx IQK Fail!!, ret = 0x%x\n",
- patha_ok);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path A Rx IQK Fail!!, ret = 0x%x\n",
+ patha_ok);
}
if (0x00 == patha_ok)
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path A IQK failed!!, ret = 0\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path A IQK failed!!, ret = 0\n");
if (is2t) {
_rtl92ee_phy_path_a_standby(hw);
/* Turn Path B ADDA on */
@@ -2598,8 +2594,8 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
for (i = 0 ; i < retrycount ; i++) {
pathb_ok = _rtl92ee_phy_path_b_iqk(hw);
if (pathb_ok == 0x01) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path B Tx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path B Tx IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw,
RTX_POWER_BEFORE_IQK_B,
MASKDWORD) & 0x3FF0000)
@@ -2610,16 +2606,16 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
>> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path B Tx IQK Fail!!, ret = 0x%x\n",
- pathb_ok);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path B Tx IQK Fail!!, ret = 0x%x\n",
+ pathb_ok);
}
for (i = 0 ; i < retrycount ; i++) {
pathb_ok = _rtl92ee_phy_path_b_rx_iqk(hw, is2t);
if (pathb_ok == 0x03) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path B Rx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path B Rx IQK Success!!\n");
result[t][6] = (rtl_get_bbreg(hw,
RRX_POWER_BEFORE_IQK_B_2,
MASKDWORD) & 0x3FF0000)
@@ -2630,18 +2626,18 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
>> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path B Rx IQK Fail!!, ret = 0x%x\n",
- pathb_ok);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path B Rx IQK Fail!!, ret = 0x%x\n",
+ pathb_ok);
}
if (0x00 == pathb_ok)
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path B IQK failed!!, ret = 0\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path B IQK failed!!, ret = 0\n");
}
/* Back to BB mode, load original value */
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "IQK:Back to BB mode, load original value!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "IQK:Back to BB mode, load original value!\n");
rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0);
if (t != 0) {
@@ -2724,7 +2720,7 @@ static void _rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD , "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
if (is_hal_stop(rtlhal)) {
u8 u1btmp;
@@ -2953,24 +2949,24 @@ bool rtl92ee_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &rtlpriv->phy;
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", iotype);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -2981,7 +2977,7 @@ bool rtl92ee_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl92ee_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
@@ -2991,14 +2987,14 @@ static void rtl92ee_phy_set_io(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
struct dig_t *dm_dig = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
rtl92ee_dm_write_dig(hw, rtlphy->initgain_backup.xaagccore1);
rtl92ee_dm_write_cck_cca_thres(hw, rtlphy->initgain_backup.cca);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE , "no set txpower\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "no set txpower\n");
rtl92ee_phy_set_txpower_level(hw, rtlphy->current_channel);
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
@@ -3009,14 +3005,14 @@ static void rtl92ee_phy_set_io(struct ieee80211_hw *hw)
rtl92ee_dm_write_cck_cca_thres(hw, 0x40);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
static void rtl92ee_phy_set_rf_on(struct ieee80211_hw *hw)
@@ -3062,16 +3058,16 @@ static bool _rtl92ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeping:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON sleeping:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl92ee_phy_set_rf_on(hw);
}
@@ -3089,27 +3085,27 @@ static bool _rtl92ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -3132,32 +3128,32 @@ static bool _rtl92ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl92ee_phy_set_rf_sleep(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", rfpwr_state);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
index 6b8ef680dc57..bbe632d56b19 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
@@ -118,12 +118,12 @@ static bool _rtl92ee_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index dc7b515bdc85..eef7a041e80d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -363,9 +363,9 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
else
wake_match = 0;
if (wake_match)
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
- "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- wake_match);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
@@ -468,9 +468,9 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
write_point = (u16)(tmp_4byte & 0x7ff);
if (write_point != rtlpci->rx_ring[queue_index].next_rx_rp) {
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_DMESG,
- "!!!write point is 0x%x, reg 0x3B4 value is 0x%x\n",
- write_point, tmp_4byte);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_DMESG,
+ "!!!write point is 0x%x, reg 0x3B4 value is 0x%x\n",
+ write_point, tmp_4byte);
tmp_4byte = rtl_read_dword(rtlpriv, REG_RXQ_TXBD_IDX);
read_point = (u16)((tmp_4byte>>16) & 0x7ff);
write_point = (u16)(tmp_4byte & 0x7ff);
@@ -675,11 +675,11 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
skb_push(skb, EM_HDR_LEN);
memset(skb->data, 0, EM_HDR_LEN);
}
- mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
@@ -697,9 +697,9 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_offset(pdesc,
USB_HWDESC_HEADER_LEN + EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
_rtl92ee_insert_emcontent(ptcb_desc,
(u8 *)(skb->data));
}
@@ -798,8 +798,8 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
}
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function.\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function.\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -824,7 +824,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
set_tx_desc_bmc(pdesc, 1);
}
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -834,15 +834,14 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 fw_queue = QSLT_BEACON;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
u8 txdesc_len = 40;
__le32 *pdesc = (__le32 *)pdesc8;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, txdesc_len);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
index a6e4384ceea1..5fce3db52cd9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
@@ -144,10 +144,10 @@ static void _rtl92s_dm_txpowertracking_callback_thermalmeter(
thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermal meter 0x%x\n",
- thermalvalue,
- rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermal meter 0x%x\n",
+ thermalvalue,
+ rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
if (thermalvalue) {
rtlpriv->dm.thermalvalue = thermalvalue;
@@ -158,8 +158,8 @@ static void _rtl92s_dm_txpowertracking_callback_thermalmeter(
(rtlpriv->efuse.thermalmeter[0] << 8) |
(thermalvalue << 16));
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Write to FW Thermal Val = 0x%x\n", fw_cmd);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Write to FW Thermal Val = 0x%x\n", fw_cmd);
rtl_write_dword(rtlpriv, WFM5, fw_cmd);
rtl92s_phy_chk_fwcmd_iodone(hw);
@@ -264,10 +264,10 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
}
if (ra->pre_ratr_state != ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI = %ld RSSI_LEVEL = %d PreState = %d, CurState = %d\n",
- rtlpriv->dm.undec_sm_pwdb, ra->ratr_state,
- ra->pre_ratr_state, ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld RSSI_LEVEL = %d PreState = %d, CurState = %d\n",
+ rtlpriv->dm.undec_sm_pwdb, ra->ratr_state,
+ ra->pre_ratr_state, ra->ratr_state);
rcu_read_lock();
sta = rtl_find_sta(hw, mac->bssid);
@@ -576,8 +576,8 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
@@ -588,21 +588,21 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
txpwr_threshold_lv2 = TX_POWER_NEAR_FIELD_THRESH_LVL2;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
index 47a5b95ca2b9..f570495af044 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
@@ -39,8 +39,8 @@ static bool _rtl92s_firmware_enable_cpu(struct ieee80211_hw *hw)
do {
cpustatus = rtl_read_byte(rtlpriv, TCR);
if (cpustatus & IMEM_RDY) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "IMEM Ready after CPU has refilled\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "IMEM Ready after CPU has refilled\n");
break;
}
@@ -195,8 +195,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
short pollingcnt = 1000;
bool rtstatus = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "LoadStaus(%d)\n", loadfw_status);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "LoadStaus(%d)\n", loadfw_status);
firmware->fwstatus = (enum fw_status)loadfw_status;
@@ -256,9 +256,9 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
goto status_check_fail;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "DMEM code download success, cpustatus(%#x)\n",
- cpustatus);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "DMEM code download success, cpustatus(%#x)\n",
+ cpustatus);
/* Prevent Delay too much and being scheduled out */
/* Polling Load Firmware ready */
@@ -270,9 +270,9 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
udelay(40);
} while (pollingcnt--);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Polling Load Firmware ready, cpustatus(%x)\n",
- cpustatus);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Polling Load Firmware ready, cpustatus(%x)\n",
+ cpustatus);
if (((cpustatus & LOAD_FW_READY) != LOAD_FW_READY) ||
(pollingcnt <= 0)) {
@@ -290,8 +290,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, RCR, (tmpu4b | RCR_APPFCS |
RCR_APP_ICV | RCR_APP_MIC));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Current RCR settings(%#x)\n", tmpu4b);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Current RCR settings(%#x)\n", tmpu4b);
/* Set to normal mode. */
rtl_write_byte(rtlpriv, LBKMD_SEL, LBK_NORMAL);
@@ -304,9 +304,9 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
}
status_check_fail:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "loadfw_status(%d), rtstatus(%x)\n",
- loadfw_status, rtstatus);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "loadfw_status(%d), rtstatus(%x)\n",
+ loadfw_status, rtstatus);
return rtstatus;
}
@@ -337,11 +337,11 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
firmware->firmwareversion = byte(pfwheader->version, 0);
firmware->pfwheader->fwpriv.hci_sel = 1;/* pcie */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "signature:%x, version:%x, size:%x, imemsize:%x, sram size:%x\n",
- pfwheader->signature,
- pfwheader->version, pfwheader->dmem_size,
- pfwheader->img_imem_size, pfwheader->img_sram_size);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "signature:%x, version:%x, size:%x, imemsize:%x, sram size:%x\n",
+ pfwheader->signature,
+ pfwheader->version, pfwheader->dmem_size,
+ pfwheader->img_imem_size, pfwheader->img_sram_size);
/* 2. Retrieve IMEM image. */
if ((pfwheader->img_imem_size == 0) || (pfwheader->img_imem_size >
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 81313e0ca834..47fabce5c235 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -111,8 +111,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, SLOT_TIME, val[0]);
@@ -156,9 +156,9 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -172,9 +172,9 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -215,9 +215,9 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(factorlevel[17] << 4));
rtl_write_byte(rtlpriv, AGGLEN_LMT_H, regtoset);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -253,9 +253,9 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -276,8 +276,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "HW_VAR_ACM_CTRL Write 0x%X\n", acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "HW_VAR_ACM_CTRL Write 0x%X\n", acm_ctrl);
rtl_write_byte(rtlpriv, ACMHWCTRL, acm_ctrl);
break;
}
@@ -417,14 +417,14 @@ void rtl92se_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value = 0x0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -435,8 +435,8 @@ void rtl92se_enable_hw_security_config(struct ieee80211_hw *hw)
sec_reg_value |= SCR_RXUSEDK;
}
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "The SECR-value %x\n",
- sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD, "The SECR-value %x\n",
+ sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
@@ -870,10 +870,10 @@ static void _rtl92se_macconfig_after_fwdownload(struct ieee80211_hw *hw)
/* Change Program timing */
rtl_write_byte(rtlpriv, REG_EFUSE_CTRL + 3, 0x72);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "EFUSE CONFIG OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "EFUSE CONFIG OK\n");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "OK\n");
}
@@ -960,9 +960,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
/* 2. download firmware */
rtstatus = rtl92s_download_fw(hw);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now... "
- "Please copy FW into /lib/firmware/rtlwifi\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now... Please copy FW into /lib/firmware/rtlwifi\n");
err = 1;
goto exit;
}
@@ -1014,7 +1013,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, RF_CTRL, 0x07);
if (!rtl92s_phy_rf_config(hw)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n");
err = rtstatus;
goto exit;
}
@@ -1147,23 +1146,23 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
bt_msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= (MSR_LINK_ADHOC << MSR_LINK_SHIFT);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= (MSR_LINK_MASTER << MSR_LINK_SHIFT);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not supported!\n", type);
@@ -1606,8 +1605,8 @@ void rtl92se_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
- add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
+ add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1671,11 +1670,11 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
eeprom_id = *((u16 *)&hwinfo[0]);
if (eeprom_id != RTL8190_EEPROM_ID) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
rtlefuse->autoload_failflag = true;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
}
@@ -1692,16 +1691,16 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROMId = 0x%4x\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
for (i = 0; i < 6; i += 2) {
usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
@@ -1711,7 +1710,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
for (i = 0; i < 6; i++)
rtl_write_byte(rtlpriv, MACIDR0 + i, rtlefuse->dev_addr[i]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
/* Get Tx Power Level by Channel */
/* Read Tx power of Channel 1 ~ 14 from EEPROM. */
@@ -1906,7 +1905,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
* index diff of legacy to HT OFDM rate. */
tempval = hwinfo[EEPROM_RFIND_POWERDIFF] & 0xff;
rtlefuse->eeprom_txpowerdiff = tempval;
- rtlefuse->legacy_httxpowerdiff =
+ rtlefuse->legacy_ht_txpowerdiff =
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0];
RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
@@ -1964,15 +1963,15 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
tempval = rtl_read_byte(rtlpriv, 0x07);
if (!(tempval & BIT(0))) {
rtlefuse->b1x1_recvcombine = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "RF_TYPE=1T2R but only 1SS\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "RF_TYPE=1T2R but only 1SS\n");
}
}
rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine;
rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
- rtlefuse->eeprom_oemid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
+ rtlefuse->eeprom_oemid);
/* set channel paln to world wide 13 */
rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
@@ -1987,15 +1986,15 @@ void rtl92se_read_eeprom_info(struct ieee80211_hw *hw)
tmp_u1b = rtl_read_byte(rtlpriv, EPROM_CMD);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl92se_read_adapter_info(hw);
} else {
@@ -2101,8 +2100,8 @@ static void rtl92se_update_hal_rate_table(struct ieee80211_hw *hw,
else
rtl92s_phy_set_fw_cmd(hw, FW_CMD_RA_REFRESH_BG);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
- rtl_read_dword(rtlpriv, ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, ARFR0));
}
static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -2256,8 +2255,8 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
mask |= (bmulticast ? 1 : 0) << 9 | (macid & 0x1f) << 4 | (band & 0xf);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_TRACE, "mask = %x, bitmap = %x\n",
- mask, ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_TRACE, "mask = %x, bitmap = %x\n",
+ mask, ratr_bitmap);
rtl_write_dword(rtlpriv, 0x2c4, ratr_bitmap);
rtl_write_dword(rtlpriv, WFM5, (FW_RA_UPDATE_MASK | (mask << 8)));
@@ -2332,15 +2331,15 @@ bool rtl92se_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
rfpwr_toset = _rtl92se_rf_onoff_detect(hw);
if ((ppsc->hwradiooff) && (rfpwr_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "RFKILL-HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "RFKILL-HW Radio ON, RF ON\n");
rfpwr_toset = ERFON;
ppsc->hwradiooff = false;
actuallyset = true;
} else if ((!ppsc->hwradiooff) && (rfpwr_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF,
- DBG_DMESG, "RFKILL-HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF,
+ DBG_DMESG, "RFKILL-HW Radio OFF, RF OFF\n");
rfpwr_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2404,7 +2403,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2463,26 +2462,26 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
index 2d18bc1ee480..ecbf425f679f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
@@ -27,8 +27,8 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- LEDCFG, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ LEDCFG, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, LEDCFG);
@@ -57,8 +57,8 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
rtlpriv = rtl_priv(hw);
if (!rtlpriv || rtlpriv->max_fw_size)
return;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- LEDCFG, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ LEDCFG, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, LEDCFG);
@@ -119,7 +119,7 @@ void rtl92se_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n", ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n", ledaction);
_rtl92se_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index d5c0eb462315..63283d9e7485 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -16,14 +16,9 @@
static u32 _rtl92s_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
+ u32 i = ffs(bitmask);
- return i;
+ return i ? i - 1 : 32;
}
u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
@@ -31,15 +26,15 @@ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue = 0, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
- regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
@@ -51,9 +46,9 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -63,9 +58,9 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
@@ -123,8 +118,8 @@ static u32 _rtl92s_phy_rf_serial_read(struct ieee80211_hw *hw,
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSI_READBACK_DATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
@@ -146,8 +141,8 @@ static void _rtl92s_phy_rf_serial_write(struct ieee80211_hw *hw,
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
@@ -157,8 +152,8 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -170,9 +165,9 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -187,9 +182,9 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
if (!((rtlphy->rf_pathmap >> rfpath) & 0x1))
return;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -204,9 +199,9 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
@@ -239,9 +234,9 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u8 reg_bw_opmode;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (rtlphy->set_bwmode_inprogress)
return;
@@ -296,7 +291,7 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
rtl92s_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
static bool _rtl92s_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
@@ -434,8 +429,8 @@ u8 rtl92s_phy_sw_chnl(struct ieee80211_hw *hw)
u32 delay;
bool ret;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "switch to channel%d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "switch to channel%d\n",
+ rtlphy->current_channel);
if (rtlphy->sw_chnl_inprogress)
return 0;
@@ -471,7 +466,7 @@ u8 rtl92s_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
return 1;
}
@@ -530,20 +525,19 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
u32 initializecount = 0;
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "awake, sleeped:%d ms state_inap:%x\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies),
- rtlpriv->psc.state_inap);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "awake, slept:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies),
+ rtlpriv->psc.state_inap);
ppsc->last_awake_jiffies = jiffies;
rtl_write_word(rtlpriv, CMDR, 0x37FC);
rtl_write_byte(rtlpriv, TXPAUSE, 0x00);
@@ -560,8 +554,8 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFOFF:{
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -586,34 +580,34 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] = %d before doze!\n",
- i + 1, queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] = %d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "sleep awaked:%d ms state_inap:%x\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies),
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "sleep awaked:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies),
rtlpriv->psc.state_inap);
ppsc->last_sleep_jiffies = jiffies;
_rtl92se_phy_set_rf_sleep(hw);
@@ -968,7 +962,7 @@ u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
radio_b_tblen = RADIOB_ARRAYLENGTH;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
rtstatus = true;
switch (rfpath) {
@@ -1088,20 +1082,20 @@ void rtl92s_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
ROFDM0_XCAGCCORE1, MASKBYTE0);
rtlphy->default_initialgain[3] = rtl_get_bbreg(hw,
ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
/* read framesync */
rtlphy->framesync = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
@@ -1163,10 +1157,10 @@ void rtl92s_phy_set_txpower(struct ieee80211_hw *hw, u8 channel)
_rtl92s_phy_get_txpower_index(hw, channel, &cckpowerlevel[0],
&ofdmpowerlevel[0]);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Channel-%d, cckPowerLevel (A / B) = 0x%x / 0x%x, ofdmPowerLevel (A / B) = 0x%x / 0x%x\n",
- channel, cckpowerlevel[0], cckpowerlevel[1],
- ofdmpowerlevel[0], ofdmpowerlevel[1]);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Channel-%d, cckPowerLevel (A / B) = 0x%x / 0x%x, ofdmPowerLevel (A / B) = 0x%x / 0x%x\n",
+ channel, cckpowerlevel[0], cckpowerlevel[1],
+ ofdmpowerlevel[0], ofdmpowerlevel[1]);
_rtl92s_phy_ccxpower_indexcheck(hw, channel, &cckpowerlevel[0],
&ofdmpowerlevel[0]);
@@ -1224,17 +1218,17 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
skip:
switch (rtlhal->current_fwcmd_io) {
case FW_CMD_RA_RESET:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_RESET\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_RESET\n");
rtl_write_dword(rtlpriv, WFM5, FW_RA_RESET);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_ACTIVE:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_ACTIVE\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_ACTIVE\n");
rtl_write_dword(rtlpriv, WFM5, FW_RA_ACTIVE);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_REFRESH_N:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_REFRESH_N\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_REFRESH_N\n");
input = FW_RA_REFRESH;
rtl_write_dword(rtlpriv, WFM5, input);
rtl92s_phy_chk_fwcmd_iodone(hw);
@@ -1242,29 +1236,29 @@ skip:
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_REFRESH_BG:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- "FW_CMD_RA_REFRESH_BG\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG,
+ "FW_CMD_RA_REFRESH_BG\n");
rtl_write_dword(rtlpriv, WFM5, FW_RA_REFRESH);
rtl92s_phy_chk_fwcmd_iodone(hw);
rtl_write_dword(rtlpriv, WFM5, FW_RA_DISABLE_RSSI_MASK);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_REFRESH_N_COMB:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- "FW_CMD_RA_REFRESH_N_COMB\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG,
+ "FW_CMD_RA_REFRESH_N_COMB\n");
input = FW_RA_IOT_N_COMB;
rtl_write_dword(rtlpriv, WFM5, input);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_REFRESH_BG_COMB:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- "FW_CMD_RA_REFRESH_BG_COMB\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG,
+ "FW_CMD_RA_REFRESH_BG_COMB\n");
input = FW_RA_IOT_BG_COMB;
rtl_write_dword(rtlpriv, WFM5, input);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_IQK_ENABLE:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_IQK_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_IQK_ENABLE\n");
rtl_write_dword(rtlpriv, WFM5, FW_IQK_ENABLE);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
@@ -1299,7 +1293,7 @@ skip:
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
break;
case FW_CMD_LPS_ENTER:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_LPS_ENTER\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_LPS_ENTER\n");
current_aid = rtlpriv->mac80211.assoc_id;
rtl_write_dword(rtlpriv, WFM5, (FW_LPS_ENTER |
((current_aid | 0xc000) << 8)));
@@ -1308,18 +1302,18 @@ skip:
* turbo mode until driver leave LPS */
break;
case FW_CMD_LPS_LEAVE:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_LPS_LEAVE\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_LPS_LEAVE\n");
rtl_write_dword(rtlpriv, WFM5, FW_LPS_LEAVE);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_ADD_A2_ENTRY:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_ADD_A2_ENTRY\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_ADD_A2_ENTRY\n");
rtl_write_dword(rtlpriv, WFM5, FW_ADD_A2_ENTRY);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_CTRL_DM_BY_DRIVER:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "FW_CMD_CTRL_DM_BY_DRIVER\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "FW_CMD_CTRL_DM_BY_DRIVER\n");
rtl_write_dword(rtlpriv, WFM5, FW_CTRL_DM_BY_DRIVER);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
@@ -1344,9 +1338,9 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
u16 fw_cmdmap = FW_CMD_IO_QUERY(rtlpriv);
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Set FW Cmd(%#x), set_fwcmd_inprogress(%d)\n",
- fw_cmdio, rtlhal->set_fwcmd_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Set FW Cmd(%#x), set_fwcmd_inprogress(%d)\n",
+ fw_cmdio, rtlhal->set_fwcmd_inprogress);
do {
/* We re-map to combined FW CMD ones if firmware version */
@@ -1383,30 +1377,30 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
* DM map table in the future. */
switch (fw_cmdio) {
case FW_CMD_RA_INIT:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "RA init!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "RA init!!\n");
fw_cmdmap |= FW_RA_INIT_CTL;
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
/* Clear control flag to sync with FW. */
FW_CMD_IO_CLR(rtlpriv, FW_RA_INIT_CTL);
break;
case FW_CMD_DIG_DISABLE:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Set DIG disable!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Set DIG disable!!\n");
fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
break;
case FW_CMD_DIG_ENABLE:
case FW_CMD_DIG_RESUME:
if (!(rtlpriv->dm.dm_flag & HAL_DM_DIG_DISABLE)) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Set DIG enable or resume!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Set DIG enable or resume!!\n");
fw_cmdmap |= (FW_DIG_ENABLE_CTL | FW_SS_CTL);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
}
break;
case FW_CMD_DIG_HALT:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Set DIG halt!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Set DIG halt!!\n");
fw_cmdmap &= ~(FW_DIG_ENABLE_CTL | FW_SS_CTL);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
break;
@@ -1421,9 +1415,9 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
fw_param |= ((thermalval << 24) |
(rtlefuse->thermalmeter[0] << 16));
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Set TxPwr tracking!! FwCmdMap(%#x), FwParam(%#x)\n",
- fw_cmdmap, fw_param);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Set TxPwr tracking!! FwCmdMap(%#x), FwParam(%#x)\n",
+ fw_cmdmap, fw_param);
FW_CMD_PARA_SET(rtlpriv, fw_param);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
@@ -1443,9 +1437,9 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
/* Clear FW parameter in terms of RA parts. */
fw_param &= FW_RA_PARAM_CLR;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "[FW CMD] [New Version] Set RA/IOT Comb in n mode!! FwCmdMap(%#x), FwParam(%#x)\n",
- fw_cmdmap, fw_param);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "[FW CMD] [New Version] Set RA/IOT Comb in n mode!! FwCmdMap(%#x), FwParam(%#x)\n",
+ fw_cmdmap, fw_param);
FW_CMD_PARA_SET(rtlpriv, fw_param);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
@@ -1531,8 +1525,8 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
break;
case FW_CMD_PAPE_CONTROL:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "[FW CMD] Set PAPE Control\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "[FW CMD] Set PAPE Control\n");
fw_cmdmap &= ~FW_PAPE_CTL_BY_SW_HW;
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
index a37855f57e76..5a493602aaf2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
@@ -25,7 +25,7 @@ static void _rtl92s_get_powerbase(struct ieee80211_hw *hw, u8 *p_pwrlevel,
/* We only care about the path A for legacy. */
if (rtlefuse->eeprom_version < 2) {
- pwrbase0 = pwrlevel[0] + (rtlefuse->legacy_httxpowerdiff & 0xf);
+ pwrbase0 = pwrlevel[0] + (rtlefuse->legacy_ht_txpowerdiff & 0xf);
} else {
legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff
[RF90_PATH_A][chnl - 1];
@@ -95,13 +95,13 @@ static void _rtl92s_get_powerbase(struct ieee80211_hw *hw, u8 *p_pwrlevel,
}
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "40MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n",
- p_final_pwridx[0], p_final_pwridx[1]);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "40MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n",
+ p_final_pwridx[0], p_final_pwridx[1]);
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "20MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n",
- p_final_pwridx[0], p_final_pwridx[1]);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "20MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n",
+ p_final_pwridx[0], p_final_pwridx[1]);
}
}
@@ -124,9 +124,9 @@ static void _rtl92s_set_antennadiff(struct ieee80211_hw *hw,
if (ant_pwr_diff < -8)
ant_pwr_diff = -8;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Antenna Diff from RF-B to RF-A = %d (0x%x)\n",
- ant_pwr_diff, ant_pwr_diff & 0xf);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Antenna Diff from RF-B to RF-A = %d (0x%x)\n",
+ ant_pwr_diff, ant_pwr_diff & 0xf);
ant_pwr_diff &= 0xf;
}
@@ -143,8 +143,8 @@ static void _rtl92s_set_antennadiff(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, RFPGA0_TXGAINSTAGE, (BXBTXAGC | BXCTXAGC | BXDTXAGC),
u4reg_val);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Write BCD-Diff(0x%x) = 0x%x\n",
- RFPGA0_TXGAINSTAGE, u4reg_val);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Write BCD-Diff(0x%x) = 0x%x\n",
+ RFPGA0_TXGAINSTAGE, u4reg_val);
}
static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
@@ -169,8 +169,8 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
writeval = rtlphy->mcs_offset[chnlgroup][index] +
((index < 2) ? pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "RTK better performance, writeval = 0x%x\n", writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "RTK better performance, writeval = 0x%x\n", writeval);
break;
case 1:
/* Realtek regulatory increase power diff defined
@@ -178,9 +178,9 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
writeval = ((index < 2) ? pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Realtek regulatory, 40MHz, writeval = 0x%x\n",
- writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Realtek regulatory, 40MHz, writeval = 0x%x\n",
+ writeval);
} else {
chnlgroup = 0;
@@ -199,16 +199,16 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
+ ((index < 2) ?
pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Realtek regulatory, 20MHz, writeval = 0x%x\n",
- writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Realtek regulatory, 20MHz, writeval = 0x%x\n",
+ writeval);
}
break;
case 2:
/* Better regulatory don't increase any power diff */
writeval = ((index < 2) ? pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Better regulatory, writeval = 0x%x\n", writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Better regulatory, writeval = 0x%x\n", writeval);
break;
case 3:
/* Customer defined power diff. increase power diff
@@ -216,15 +216,15 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
chnlgroup = 0;
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "customer's limit, 40MHz = 0x%x\n",
- rtlefuse->pwrgroup_ht40
- [RF90_PATH_A][chnl - 1]);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "customer's limit, 40MHz = 0x%x\n",
+ rtlefuse->pwrgroup_ht40
+ [RF90_PATH_A][chnl - 1]);
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "customer's limit, 20MHz = 0x%x\n",
- rtlefuse->pwrgroup_ht20
- [RF90_PATH_A][chnl - 1]);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "customer's limit, 20MHz = 0x%x\n",
+ rtlefuse->pwrgroup_ht20
+ [RF90_PATH_A][chnl - 1]);
}
for (i = 0; i < 4; i++) {
@@ -256,20 +256,20 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
(pwrdiff_limit[2] << 16) |
(pwrdiff_limit[1] << 8) |
(pwrdiff_limit[0]);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Customer's limit = 0x%x\n", customer_limit);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Customer's limit = 0x%x\n", customer_limit);
writeval = customer_limit + ((index < 2) ?
pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Customer, writeval = 0x%x\n", writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Customer, writeval = 0x%x\n", writeval);
break;
default:
chnlgroup = 0;
writeval = rtlphy->mcs_offset[chnlgroup][index] +
((index < 2) ? pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "RTK better performance, writeval = 0x%x\n", writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "RTK better performance, writeval = 0x%x\n", writeval);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 7a54497b7df2..6d352a3161b8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -65,8 +65,8 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
struct rt_firmware *pfirmware = NULL;
char *fw_name = "rtlwifi/rtl8192sefw.bin";
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "Firmware callback routine entered!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
pr_err("Firmware %s not available\n", fw_name);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index 9eaa5348b556..38034102aacb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -328,13 +328,13 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
bool firstseg = (!(hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)));
bool lastseg = (!(hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)));
- dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
u8 bw_40 = 0;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
if (mac->opmode == NL80211_IFTYPE_STATION) {
@@ -488,7 +488,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
/* DOWRD 8 */
set_tx_desc_tx_buffer_address(pdesc, mapping);
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
@@ -500,12 +500,12 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
/* Clear all status */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
index c61a92df9d73..8ada31380efa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
@@ -82,16 +82,16 @@ static void rtl8723e_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
}
static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
@@ -150,9 +150,9 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
dm_digtable->cur_igvalue = dm_digtable->rssi_val_min + 10 -
dm_digtable->back_val;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "rssi_val_min = %x back_val %x\n",
- dm_digtable->rssi_val_min, dm_digtable->back_val);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "rssi_val_min = %x back_val %x\n",
+ dm_digtable->rssi_val_min, dm_digtable->back_val);
rtl8723e_dm_write_dig(hw);
}
@@ -201,10 +201,10 @@ static void rtl8723e_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
rtl8723e_dm_write_dig(hw);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "curmultista_cstate = %x dig_ext_port_stage %x\n",
- dm_digtable->curmultista_cstate,
- dm_digtable->dig_ext_port_stage);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "curmultista_cstate = %x dig_ext_port_stage %x\n",
+ dm_digtable->curmultista_cstate,
+ dm_digtable->dig_ext_port_stage);
}
static void rtl8723e_dm_initial_gain_sta(struct ieee80211_hw *hw)
@@ -212,10 +212,10 @@ static void rtl8723e_dm_initial_gain_sta(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "presta_cstate = %x, cursta_cstate = %x\n",
- dm_digtable->presta_cstate,
- dm_digtable->cursta_cstate);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "presta_cstate = %x, cursta_cstate = %x\n",
+ dm_digtable->presta_cstate,
+ dm_digtable->cursta_cstate);
if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
@@ -296,8 +296,8 @@ static void rtl8723e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "CCKPDStage=%x\n", dm_digtable->cur_cck_pd_state);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "CCKPDStage=%x\n", dm_digtable->cur_cck_pd_state);
}
@@ -354,8 +354,8 @@ static void rtl8723e_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -367,47 +367,47 @@ static void rtl8723e_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if (rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel);
rtl8723e_phy_set_txpower_level(hw, rtlphy->current_channel);
}
@@ -419,10 +419,10 @@ void rtl8723e_dm_write_dig(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
- dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
- dm_digtable->back_val);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
+ dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
+ dm_digtable->back_val);
if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) {
rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
@@ -521,9 +521,9 @@ static void rtl8723e_dm_initialize_txpower_tracking_thermalmeter(
rtlpriv->dm.txpower_tracking = true;
rtlpriv->dm.txpower_trackinginit = false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pMgntInfo->txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
static void rtl8723e_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
@@ -561,14 +561,14 @@ static void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- " driver is going to unload\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ " driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- " driver does not control rate adaptive mask\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ " driver does not control rate adaptive mask\n");
return;
}
@@ -612,14 +612,14 @@ static void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI = %ld\n",
- rtlpriv->dm.undec_sm_pwdb);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI_LEVEL = %d\n", p_ra->ratr_state);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "PreState = %d, CurState = %d\n",
- p_ra->pre_ratr_state, p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
+ rtlpriv->dm.undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state);
rcu_read_lock();
sta = rtl_find_sta(hw, mac->bssid);
@@ -716,31 +716,31 @@ static void rtl8723e_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
if (((mac->link_state == MAC80211_NOLINK)) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
dm_pstable->rssi_val_min = 0;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "Not connected to any\n");
}
if (mac->link_state == MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
dm_pstable->rssi_val_min =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
} else {
dm_pstable->rssi_val_min =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
}
} else {
dm_pstable->rssi_val_min =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
}
rtl8723e_dm_rf_saving(hw, false);
@@ -820,21 +820,21 @@ void rtl8723e_dm_bt_coexist(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmp_byte = 0;
if (!rtlpriv->btcoexist.bt_coexistence) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[DM]{BT], BT not exist!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[DM]{BT], BT not exist!!\n");
return;
}
if (!rtlpriv->btcoexist.init_set) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[DM][BT], rtl8723e_dm_bt_coexist()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[DM][BT], %s\n", __func__);
rtl8723e_dm_init_bt_coexist(hw);
}
tmp_byte = rtl_read_byte(rtlpriv, 0x40);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[DM][BT], 0x40 is 0x%x\n", tmp_byte);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[DM][BT], bt_dm_coexist start\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[DM][BT], 0x40 is 0x%x\n", tmp_byte);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[DM][BT], bt_dm_coexist start\n");
rtl8723e_dm_bt_coexist_8723(hw);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index 33481232fad0..d1b50a80c191 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -43,22 +43,22 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
unsigned long flag;
u8 idx;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -110,9 +110,9 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
@@ -121,24 +121,24 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
isfw_read = _rtl8723e_check_fw_read_last_h2c(hw,
boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -217,16 +217,16 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -252,7 +252,7 @@ void rtl8723e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
u8 u1_h2c_set_pwrmode[3] = { 0 };
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
@@ -458,16 +458,16 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
b_dlok = true;
if (b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"H2C_RSVDPAGE:\n",
u1rsvdpageloc, 3);
rtl8723e_fill_h2c_cmd(hw, H2C_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
void rtl8723e_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
@@ -501,11 +501,11 @@ void rtl8723e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -564,11 +564,11 @@ void rtl8723e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
index 3ac31ec26517..6c4fedc3ed63 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -102,12 +102,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
BT_COEX_STATE_WIFI_RSSI_1_HIGH;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to High\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state stay at Low\n");
}
} else {
if (undecoratedsmoothed_pwdb < rssi_thresh) {
@@ -116,18 +116,18 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
BT_COEX_STATE_WIFI_RSSI_1_LOW;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 thresh error!!\n");
return rtlpriv->btcoexist.bt_pre_rssi_state;
}
@@ -144,12 +144,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state stay at Low\n");
}
} else if ((rtlpriv->btcoexist.bt_pre_rssi_state ==
BT_RSSI_STATE_MEDIUM) ||
@@ -164,8 +164,8 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to High\n");
} else if (undecoratedsmoothed_pwdb < rssi_thresh) {
bt_rssi_state = BT_RSSI_STATE_LOW;
rtlpriv->btcoexist.cstate |=
@@ -174,12 +174,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state stay at Medium\n");
}
} else {
if (undecoratedsmoothed_pwdb < rssi_thresh1) {
@@ -190,12 +190,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state stay at High\n");
}
}
}
@@ -230,12 +230,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
|= BT_COEX_STATE_WIFI_RSSI_HIGH;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to High\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state stay at Low\n");
}
} else {
if (undecoratedsmoothed_pwdb < rssi_thresh) {
@@ -244,18 +244,18 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
|= BT_COEX_STATE_WIFI_RSSI_LOW;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI thresh error!!\n");
return rtlpriv->btcoexist.bt_pre_rssi_state;
}
if ((rtlpriv->btcoexist.bt_pre_rssi_state ==
@@ -271,12 +271,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state stay at Low\n");
}
} else if ((rtlpriv->btcoexist.bt_pre_rssi_state ==
BT_RSSI_STATE_MEDIUM) ||
@@ -291,8 +291,8 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to High\n");
} else if (undecoratedsmoothed_pwdb < rssi_thresh) {
bt_rssi_state = BT_RSSI_STATE_LOW;
rtlpriv->btcoexist.cstate
@@ -301,12 +301,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state stay at Medium\n");
}
} else {
if (undecoratedsmoothed_pwdb < rssi_thresh1) {
@@ -317,12 +317,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state stay at High\n");
}
}
}
@@ -342,9 +342,9 @@ long rtl8723e_dm_bt_get_rx_ss(struct ieee80211_hw *hw)
undecoratedsmoothed_pwdb
= rtlpriv->dm.entry_min_undec_sm_pwdb;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_get_rx_ss() = %ld\n",
- undecoratedsmoothed_pwdb);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "%s = %ld\n", __func__,
+ undecoratedsmoothed_pwdb);
return undecoratedsmoothed_pwdb;
}
@@ -367,10 +367,10 @@ void rtl8723e_dm_bt_balance(struct ieee80211_hw *hw,
}
rtlpriv->btcoexist.balance_on = balance_on;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n",
- balance_on ? "ON" : "OFF", ms0, ms1, h2c_parameter[0]<<16 |
- h2c_parameter[1]<<8 | h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n",
+ balance_on ? "ON" : "OFF", ms0, ms1, h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 | h2c_parameter[2]);
rtl8723e_fill_h2c_cmd(hw, 0xc, 3, h2c_parameter);
}
@@ -381,8 +381,8 @@ void rtl8723e_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (type == BT_AGCTABLE_OFF) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BT]AGCTable Off!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BT]AGCTable Off!\n");
rtl_write_dword(rtlpriv, 0xc78, 0x641c0001);
rtl_write_dword(rtlpriv, 0xc78, 0x631d0001);
rtl_write_dword(rtlpriv, 0xc78, 0x621e0001);
@@ -400,8 +400,8 @@ void rtl8723e_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type)
rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_G1, 0xfffff, 0x30355);
} else if (type == BT_AGCTABLE_ON) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BT]AGCTable On!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BT]AGCTable On!\n");
rtl_write_dword(rtlpriv, 0xc78, 0x4e1c0001);
rtl_write_dword(rtlpriv, 0xc78, 0x4d1d0001);
rtl_write_dword(rtlpriv, 0xc78, 0x4c1e0001);
@@ -428,12 +428,12 @@ void rtl8723e_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (type == BT_BB_BACKOFF_OFF) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BT]BBBackOffLevel Off!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BT]BBBackOffLevel Off!\n");
rtl_write_dword(rtlpriv, 0xc04, 0x3a05611);
} else if (type == BT_BB_BACKOFF_ON) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BT]BBBackOffLevel On!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BT]BBBackOffLevel On!\n");
rtl_write_dword(rtlpriv, 0xc04, 0x3a07611);
rtlpriv->btcoexist.sw_coexist_all_off = false;
}
@@ -442,14 +442,14 @@ void rtl8723e_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type)
void rtl8723e_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_fw_coex_all_off()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "rtl8723e_dm_bt_fw_coex_all_off()\n");
if (rtlpriv->btcoexist.fw_coexist_all_off)
return;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_fw_coex_all_off(), real Do\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "rtl8723e_dm_bt_fw_coex_all_off(), real Do\n");
rtl8723e_dm_bt_fw_coex_all_off_8723a(hw);
rtlpriv->btcoexist.fw_coexist_all_off = true;
}
@@ -458,14 +458,14 @@ void rtl8723e_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_sw_coex_all_off()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "%s\n", __func__);
if (rtlpriv->btcoexist.sw_coexist_all_off)
return;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_sw_coex_all_off(), real Do\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "%s, real Do\n", __func__);
rtl8723e_dm_bt_sw_coex_all_off_8723a(hw);
rtlpriv->btcoexist.sw_coexist_all_off = true;
}
@@ -474,13 +474,13 @@ void rtl8723e_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_hw_coex_all_off()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "%s\n", __func__);
if (rtlpriv->btcoexist.hw_coexist_all_off)
return;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_hw_coex_all_off(), real Do\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "%s, real Do\n", __func__);
rtl8723e_dm_bt_hw_coex_all_off_8723a(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
index 652d8ff9cccb..53af0d209b11 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
@@ -20,7 +20,7 @@ void rtl8723e_dm_bt_turn_off_bt_coexist_before_enter_lps(struct ieee80211_hw *hw
return;
if (ppsc->inactiveps) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BT][DM], Before enter IPS, turn off all Coexist DM\n");
rtlpriv->btcoexist.cstate = 0;
rtlpriv->btcoexist.previous_state = 0;
@@ -68,9 +68,10 @@ void rtl_8723e_bt_wifi_media_status_notify(struct ieee80211_hw *hw,
else
h2c_parameter[2] = 0x20;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], FW write 0x19=0x%x\n",
- h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], FW write 0x19=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
rtl8723e_fill_h2c_cmd(hw, 0x19, 3, h2c_parameter);
}
@@ -98,7 +99,7 @@ static void rtl8723e_dm_bt_set_fw_3a(struct ieee80211_hw *hw,
h2c_parameter[2] = byte3;
h2c_parameter[3] = byte4;
h2c_parameter[4] = byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], FW write 0x3a(4bytes)=0x%x%8x\n",
h2c_parameter[0], h2c_parameter[1]<<24 |
h2c_parameter[2]<<16 | h2c_parameter[3]<<8 |
@@ -111,7 +112,7 @@ static bool rtl8723e_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"Need to decrease bt power\n");
rtlpriv->btcoexist.cstate |=
BT_COEX_STATE_DEC_BT_POWER;
@@ -130,12 +131,12 @@ static bool rtl8723e_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw)
rtlpriv->btcoexist.cstate) &&
(rtlpriv->btcoexist.previous_state_h ==
rtlpriv->btcoexist.cstate_h)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[DM][BT], Coexist state do not change!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[DM][BT], Coexist state do not change!!\n");
return true;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[DM][BT], Coexist state changed!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[DM][BT], Coexist state changed!!\n");
return false;
}
}
@@ -146,16 +147,16 @@ static void rtl8723e_dm_bt_set_coex_table(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "set coex table, set 0x6c0=0x%x\n", val_0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "set coex table, set 0x6c0=0x%x\n", val_0x6c0);
rtl_write_dword(rtlpriv, 0x6c0, val_0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "set coex table, set 0x6c8=0x%x\n", val_0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "set coex table, set 0x6c8=0x%x\n", val_0x6c8);
rtl_write_dword(rtlpriv, 0x6c8, val_0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "set coex table, set 0x6cc=0x%x\n", val_0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "set coex table, set 0x6cc=0x%x\n", val_0x6cc);
rtl_write_byte(rtlpriv, 0x6cc, val_0x6cc);
}
@@ -164,12 +165,12 @@ static void rtl8723e_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool b_mode)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (BT_PTA_MODE_ON == b_mode) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode on\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode on\n");
/* Enable GPIO 0/1/2/3/8 pins for bt */
rtl_write_byte(rtlpriv, 0x40, 0x20);
rtlpriv->btcoexist.hw_coexist_all_off = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode off\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode off\n");
rtl_write_byte(rtlpriv, 0x40, 0x0);
}
}
@@ -181,15 +182,15 @@ static void rtl8723e_dm_bt_set_sw_rf_rx_lpf_corner(struct ieee80211_hw *hw,
if (BT_RF_RX_LPF_CORNER_SHRINK == type) {
/* Shrink RF Rx LPF corner, 0x1e[7:4]=1111 ==> [11:4] */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "Shrink RF Rx LPF corner!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "Shrink RF Rx LPF corner!!\n");
rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e,
0xfffff, 0xf0ff7);
rtlpriv->btcoexist.sw_coexist_all_off = false;
} else if (BT_RF_RX_LPF_CORNER_RESUME == type) {
/*Resume RF Rx LPF corner*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "Resume RF Rx LPF corner!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "Resume RF Rx LPF corner!!\n");
rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff,
rtlpriv->btcoexist.bt_rfreg_origin_1e);
}
@@ -204,12 +205,12 @@ static void dm_bt_set_sw_penalty_tx_rate_adapt(struct ieee80211_hw *hw,
tmp_u1 = rtl_read_byte(rtlpriv, 0x4fd);
tmp_u1 |= BIT(0);
if (BT_TX_RATE_ADAPTIVE_LOW_PENALTY == ra_type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"Tx rate adaptive, set low penalty!!\n");
tmp_u1 &= ~BIT(2);
rtlpriv->btcoexist.sw_coexist_all_off = false;
} else if (BT_TX_RATE_ADAPTIVE_NORMAL == ra_type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"Tx rate adaptive, set normal!!\n");
tmp_u1 |= BIT(2);
}
@@ -279,14 +280,14 @@ static bool rtl8723e_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw)
if (!rtl8723e_dm_bt_is_wifi_busy(hw) &&
!rtlpriv->btcoexist.bt_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi idle + Bt idle, bt coex mechanism always off!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi idle + Bt idle, bt coex mechanism always off!!\n");
rtl8723e_dm_bt_btdm_structure_reload_all_off(hw, &btdm8723);
b_common = true;
} else if (rtl8723e_dm_bt_is_wifi_busy(hw) &&
!rtlpriv->btcoexist.bt_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi non-idle + Bt disabled/idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi non-idle + Bt disabled/idle!!\n");
btdm8723.low_penalty_rate_adaptive = true;
btdm8723.rf_rx_lpf_shrink = false;
btdm8723.reject_aggre_pkt = false;
@@ -307,14 +308,14 @@ static bool rtl8723e_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw)
b_common = true;
} else if (rtlpriv->btcoexist.bt_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"Bt non-idle!\n");
if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"Wifi connection exist\n");
b_common = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"No Wifi connection!\n");
btdm8723.rf_rx_lpf_shrink = true;
btdm8723.low_penalty_rate_adaptive = false;
@@ -359,14 +360,14 @@ static void rtl8723e_dm_bt_set_sw_full_time_dac_swing(
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (sw_dac_swing_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl);
rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000,
sw_dac_swing_lvl);
rtlpriv->btcoexist.sw_coexist_all_off = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], SwDacSwing Off!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], SwDacSwing Off!\n");
rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
}
}
@@ -384,9 +385,9 @@ static void rtl8723e_dm_bt_set_fw_dec_bt_pwr(
rtlpriv->btcoexist.fw_coexist_all_off = false;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], decrease Bt Power : %s, write 0x21=0x%x\n",
- (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], decrease Bt Power : %s, write 0x21=0x%x\n",
+ (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x21, 1, h2c_parameter);
}
@@ -404,10 +405,10 @@ static void rtl8723e_dm_bt_set_fw_2_ant_hid(struct ieee80211_hw *hw,
if (b_dac_swing_on)
h2c_parameter[0] |= BIT(1); /* Dac Swing default enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15=0x%x\n",
- (b_enable ? "ON!!" : "OFF!!"), (b_dac_swing_on ? "ON" : "OFF"),
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15=0x%x\n",
+ (b_enable ? "ON!!" : "OFF!!"), (b_dac_swing_on ? "ON" : "OFF"),
+ h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x15, 1, h2c_parameter);
}
@@ -424,56 +425,56 @@ static void rtl8723e_dm_bt_set_fw_tdma_ctrl(struct ieee80211_hw *hw,
h2c_parameter1[0] = 0;
if (b_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], set BT PTA update manager to trigger update!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], set BT PTA update manager to trigger update!!\n");
h2c_parameter1[0] |= BIT(0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], turn TDMA mode ON!!\n");
h2c_parameter[0] |= BIT(0); /* function enable */
if (TDMA_1ANT == ant_num) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TDMA_1ANT\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TDMA_1ANT\n");
h2c_parameter[0] |= BIT(1);
} else if (TDMA_2ANT == ant_num) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TDMA_2ANT\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TDMA_2ANT\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], Unknown Ant\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], Unknown Ant\n");
}
if (TDMA_NAV_OFF == nav_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TDMA_NAV_OFF\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TDMA_NAV_OFF\n");
} else if (TDMA_NAV_ON == nav_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TDMA_NAV_ON\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TDMA_NAV_ON\n");
h2c_parameter[0] |= BIT(2);
}
if (TDMA_DAC_SWING_OFF == dac_swing_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], TDMA_DAC_SWING_OFF\n");
} else if (TDMA_DAC_SWING_ON == dac_swing_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], TDMA_DAC_SWING_ON\n");
h2c_parameter[0] |= BIT(4);
}
rtlpriv->btcoexist.fw_coexist_all_off = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], set BT PTA update manager to no update!!\n");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], turn TDMA mode OFF!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], set BT PTA update manager to no update!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], turn TDMA mode OFF!!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], FW2AntTDMA, write 0x26=0x%x\n",
- h2c_parameter1[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], FW2AntTDMA, write 0x26=0x%x\n",
+ h2c_parameter1[0]);
rtl8723e_fill_h2c_cmd(hw, 0x26, 1, h2c_parameter1);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], FW2AntTDMA, write 0x14=0x%x\n",
h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x14, 1, h2c_parameter);
@@ -486,18 +487,18 @@ static void rtl8723e_dm_bt_set_fw_ignore_wlan_act(struct ieee80211_hw *hw,
u8 h2c_parameter[1] = {0};
if (b_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], BT Ignore Wlan_Act !!\n");
h2c_parameter[0] |= BIT(0); /* function enable */
rtlpriv->btcoexist.fw_coexist_all_off = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], BT don't ignore Wlan_Act !!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25=0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25=0x%x\n",
+ h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x25, 1, h2c_parameter);
}
@@ -513,43 +514,43 @@ static void rtl8723e_dm_bt_set_fw_tra_tdma_ctrl(struct ieee80211_hw *hw,
/* Only 8723 B cut should do this */
if (IS_VENDOR_8723_A_CUT(rtlhal->version)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], not 8723B cut, don't set Traditional TDMA!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], not 8723B cut, don't set Traditional TDMA!!\n");
return;
}
if (b_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], turn TTDMA mode ON!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], turn TTDMA mode ON!!\n");
h2c_parameter[0] |= BIT(0); /* function enable */
if (TDMA_1ANT == ant_num) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TTDMA_1ANT\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TTDMA_1ANT\n");
h2c_parameter[0] |= BIT(1);
} else if (TDMA_2ANT == ant_num) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TTDMA_2ANT\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TTDMA_2ANT\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], Unknown Ant\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], Unknown Ant\n");
}
if (TDMA_NAV_OFF == nav_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TTDMA_NAV_OFF\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TTDMA_NAV_OFF\n");
} else if (TDMA_NAV_ON == nav_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TTDMA_NAV_ON\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TTDMA_NAV_ON\n");
h2c_parameter[1] |= BIT(0);
}
rtlpriv->btcoexist.fw_coexist_all_off = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], turn TTDMA mode OFF!!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], FW Traditional TDMA, write 0x33=0x%x\n",
h2c_parameter[0] << 8 | h2c_parameter[1]);
@@ -563,9 +564,9 @@ static void rtl8723e_dm_bt_set_fw_dac_swing_level(struct ieee80211_hw *hw,
u8 h2c_parameter[1] = {0};
h2c_parameter[0] = dac_swing_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], write 0x29=0x%x\n", h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x29, 1, h2c_parameter);
@@ -582,9 +583,9 @@ static void rtl8723e_dm_bt_set_fw_bt_hid_info(struct ieee80211_hw *hw,
h2c_parameter[0] |= BIT(0);
rtlpriv->btcoexist.fw_coexist_all_off = false;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], Set BT HID information=0x%x\n", b_enable);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], write 0x24=0x%x\n", h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x24, 1, h2c_parameter);
@@ -597,9 +598,9 @@ static void rtl8723e_dm_bt_set_fw_bt_retry_index(struct ieee80211_hw *hw,
u8 h2c_parameter[1] = {0};
h2c_parameter[0] = retry_index;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], Set BT Retry Index=%d\n", retry_index);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], write 0x23=0x%x\n", h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x23, 1, h2c_parameter);
@@ -614,12 +615,12 @@ static void rtl8723e_dm_bt_set_fw_wlan_act(struct ieee80211_hw *hw,
h2c_parameter_hi[0] = wlan_act_hi;
h2c_parameter_lo[0] = wlan_act_lo;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], Set WLAN_ACT Hi:Lo=0x%x/0x%x\n",
wlan_act_hi, wlan_act_lo);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], write 0x22=0x%x\n", h2c_parameter_hi[0]);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], write 0x11=0x%x\n", h2c_parameter_lo[0]);
/* WLAN_ACT = High duration, unit:ms */
@@ -646,107 +647,107 @@ void rtl8723e_dm_bt_set_bt_dm(struct ieee80211_hw *hw,
/* check new setting is different with the old one, */
/* if all the same, don't do the setting again. */
if (memcmp(btdm_8723, btdm, sizeof(struct btdm_8723)) == 0) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], the same coexist setting, return!!\n");
return;
} else { /* save the new coexist setting */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], UPDATE TO NEW COEX SETTING!!\n");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new bAllOff=0x%x/ 0x%x\n",
btdm_8723->all_off, btdm->all_off);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new agc_table_en=0x%x/ 0x%x\n",
btdm_8723->agc_table_en, btdm->agc_table_en);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new adc_back_off_on=0x%x/ 0x%x\n",
- btdm_8723->adc_back_off_on,
- btdm->adc_back_off_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new b2_ant_hid_en=0x%x/ 0x%x\n",
- btdm_8723->b2_ant_hid_en, btdm->b2_ant_hid_en);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new bLowPenaltyRateAdaptive=0x%x/ 0x%x\n",
- btdm_8723->low_penalty_rate_adaptive,
- btdm->low_penalty_rate_adaptive);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new bRfRxLpfShrink=0x%x/ 0x%x\n",
- btdm_8723->rf_rx_lpf_shrink,
- btdm->rf_rx_lpf_shrink);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new bRejectAggrePkt=0x%x/ 0x%x\n",
- btdm_8723->reject_aggre_pkt,
- btdm->reject_aggre_pkt);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new tdma_on=0x%x/ 0x%x\n",
- btdm_8723->tdma_on, btdm->tdma_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new tdmaAnt=0x%x/ 0x%x\n",
- btdm_8723->tdma_ant, btdm->tdma_ant);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new tdmaNav=0x%x/ 0x%x\n",
- btdm_8723->tdma_nav, btdm->tdma_nav);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new tdma_dac_swing=0x%x/ 0x%x\n",
- btdm_8723->tdma_dac_swing, btdm->tdma_dac_swing);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new fw_dac_swing_lvl=0x%x/ 0x%x\n",
- btdm_8723->fw_dac_swing_lvl,
- btdm->fw_dac_swing_lvl);
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new bTraTdmaOn=0x%x/ 0x%x\n",
- btdm_8723->tra_tdma_on, btdm->tra_tdma_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new traTdmaAnt=0x%x/ 0x%x\n",
- btdm_8723->tra_tdma_ant, btdm->tra_tdma_ant);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new traTdmaNav=0x%x/ 0x%x\n",
- btdm_8723->tra_tdma_nav, btdm->tra_tdma_nav);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new bPsTdmaOn=0x%x/ 0x%x\n",
- btdm_8723->ps_tdma_on, btdm->ps_tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new adc_back_off_on=0x%x/ 0x%x\n",
+ btdm_8723->adc_back_off_on,
+ btdm->adc_back_off_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new b2_ant_hid_en=0x%x/ 0x%x\n",
+ btdm_8723->b2_ant_hid_en, btdm->b2_ant_hid_en);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new bLowPenaltyRateAdaptive=0x%x/ 0x%x\n",
+ btdm_8723->low_penalty_rate_adaptive,
+ btdm->low_penalty_rate_adaptive);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new bRfRxLpfShrink=0x%x/ 0x%x\n",
+ btdm_8723->rf_rx_lpf_shrink,
+ btdm->rf_rx_lpf_shrink);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new bRejectAggrePkt=0x%x/ 0x%x\n",
+ btdm_8723->reject_aggre_pkt,
+ btdm->reject_aggre_pkt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new tdma_on=0x%x/ 0x%x\n",
+ btdm_8723->tdma_on, btdm->tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new tdmaAnt=0x%x/ 0x%x\n",
+ btdm_8723->tdma_ant, btdm->tdma_ant);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new tdmaNav=0x%x/ 0x%x\n",
+ btdm_8723->tdma_nav, btdm->tdma_nav);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new tdma_dac_swing=0x%x/ 0x%x\n",
+ btdm_8723->tdma_dac_swing, btdm->tdma_dac_swing);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new fw_dac_swing_lvl=0x%x/ 0x%x\n",
+ btdm_8723->fw_dac_swing_lvl,
+ btdm->fw_dac_swing_lvl);
+
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new bTraTdmaOn=0x%x/ 0x%x\n",
+ btdm_8723->tra_tdma_on, btdm->tra_tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new traTdmaAnt=0x%x/ 0x%x\n",
+ btdm_8723->tra_tdma_ant, btdm->tra_tdma_ant);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new traTdmaNav=0x%x/ 0x%x\n",
+ btdm_8723->tra_tdma_nav, btdm->tra_tdma_nav);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new bPsTdmaOn=0x%x/ 0x%x\n",
+ btdm_8723->ps_tdma_on, btdm->ps_tdma_on);
for (i = 0; i < 5; i++) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new psTdmaByte[i]=0x%x/ 0x%x\n",
- btdm_8723->ps_tdma_byte[i],
- btdm->ps_tdma_byte[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new psTdmaByte[i]=0x%x/ 0x%x\n",
+ btdm_8723->ps_tdma_byte[i],
+ btdm->ps_tdma_byte[i]);
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new bIgnoreWlanAct=0x%x/ 0x%x\n",
btdm_8723->ignore_wlan_act,
btdm->ignore_wlan_act);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new bPtaOn=0x%x/ 0x%x\n",
btdm_8723->pta_on, btdm->pta_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new val_0x6c0=0x%x/ 0x%x\n",
btdm_8723->val_0x6c0, btdm->val_0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new val_0x6c8=0x%x/ 0x%x\n",
btdm_8723->val_0x6c8, btdm->val_0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new val_0x6cc=0x%x/ 0x%x\n",
btdm_8723->val_0x6cc, btdm->val_0x6cc);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new sw_dac_swing_on=0x%x/ 0x%x\n",
- btdm_8723->sw_dac_swing_on,
- btdm->sw_dac_swing_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new sw_dac_swing_lvl=0x%x/ 0x%x\n",
- btdm_8723->sw_dac_swing_lvl,
- btdm->sw_dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new wlanActHi=0x%x/ 0x%x\n",
- btdm_8723->wlan_act_hi, btdm->wlan_act_hi);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new wlanActLo=0x%x/ 0x%x\n",
- btdm_8723->wlan_act_lo, btdm->wlan_act_lo);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new btRetryIndex=0x%x/ 0x%x\n",
- btdm_8723->bt_retry_index, btdm->bt_retry_index);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new sw_dac_swing_on=0x%x/ 0x%x\n",
+ btdm_8723->sw_dac_swing_on,
+ btdm->sw_dac_swing_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new sw_dac_swing_lvl=0x%x/ 0x%x\n",
+ btdm_8723->sw_dac_swing_lvl,
+ btdm->sw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new wlanActHi=0x%x/ 0x%x\n",
+ btdm_8723->wlan_act_hi, btdm->wlan_act_hi);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new wlanActLo=0x%x/ 0x%x\n",
+ btdm_8723->wlan_act_lo, btdm->wlan_act_lo);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new btRetryIndex=0x%x/ 0x%x\n",
+ btdm_8723->bt_retry_index, btdm->bt_retry_index);
memcpy(btdm_8723, btdm, sizeof(struct btdm_8723));
}
@@ -756,14 +757,14 @@ void rtl8723e_dm_bt_set_bt_dm(struct ieee80211_hw *hw,
*/
if (rtlpriv->btcoexist.hold_for_bt_operation) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], set to ignore wlanAct for BT OP!!\n");
rtl8723e_dm_bt_set_fw_ignore_wlan_act(hw, true);
return;
}
if (btdm->all_off) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], disable all coexist mechanism !!\n");
rtl8723e_btdm_coex_all_off(hw);
return;
@@ -929,34 +930,34 @@ static u8 rtl8723e_dm_bt_bt_tx_rx_counter_level(struct ieee80211_hw *hw)
bt_tx_rx_cnt = rtl8723e_dm_bt_tx_rx_couter_h(hw)
+ rtl8723e_dm_bt_tx_rx_couter_l(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt);
rtlpriv->btcoexist.cstate_h &= ~
(BT_COEX_STATE_BT_CNT_LEVEL_0 | BT_COEX_STATE_BT_CNT_LEVEL_1|
BT_COEX_STATE_BT_CNT_LEVEL_2);
if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_3) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters at level 3\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters at level 3\n");
bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_3;
rtlpriv->btcoexist.cstate_h |=
BT_COEX_STATE_BT_CNT_LEVEL_3;
} else if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters at level 2\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters at level 2\n");
bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_2;
rtlpriv->btcoexist.cstate_h |=
BT_COEX_STATE_BT_CNT_LEVEL_2;
} else if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters at level 1\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters at level 1\n");
bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_1;
rtlpriv->btcoexist.cstate_h |=
BT_COEX_STATE_BT_CNT_LEVEL_1;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters at level 0\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters at level 0\n");
bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_0;
rtlpriv->btcoexist.cstate_h |=
BT_COEX_STATE_BT_CNT_LEVEL_0;
@@ -979,11 +980,11 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
btdm8723.reject_aggre_pkt = false;
bt_tx_rx_cnt_lvl = rtl8723e_dm_bt_bt_tx_rx_counter_level(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
/* coex table */
btdm8723.val_0x6c0 = 0x55555555;
btdm8723.val_0x6c8 = 0xffff;
@@ -997,24 +998,24 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
/* fw mechanism */
btdm8723.ps_tdma_on = true;
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x2;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x2;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1022,8 +1023,8 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
btdm8723.ps_tdma_byte[4] = 0x80;
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "HT20 or Legacy\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "HT20 or Legacy\n");
bt_rssi_state =
rtl8723e_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
bt_rssi_state1 =
@@ -1037,14 +1038,14 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
/* sw mechanism */
if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi high\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi high\n");
btdm8723.agc_table_en = true;
btdm8723.adc_back_off_on = true;
btdm8723.sw_dac_swing_on = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi low\n");
btdm8723.agc_table_en = false;
btdm8723.adc_back_off_on = false;
btdm8723.sw_dac_swing_on = false;
@@ -1054,30 +1055,30 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
btdm8723.ps_tdma_on = true;
if ((bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
(bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi-1 high\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi-1 high\n");
/* only rssi high we need to do this, */
/* when rssi low, the value will modified by fw */
rtl_write_byte(rtlpriv, 0x883, 0x40);
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x83;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters>= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters>= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x83;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1085,27 +1086,27 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
btdm8723.ps_tdma_byte[4] = 0x80;
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi-1 low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi-1 low\n");
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x2;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x2;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1120,13 +1121,13 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
/* Always ignore WlanAct if bHid|bSCOBusy|bSCOeSCO */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
- hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+ hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl);
if ((hal_coex_8723.bt_inq_page_start_time) ||
(BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
btdm8723.ps_tdma_on = true;
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
@@ -1157,11 +1158,11 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
bt_tx_rx_cnt_lvl = rtl8723e_dm_bt_bt_tx_rx_counter_level(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
bt_rssi_state =
rtl8723e_dm_bt_check_coex_rssi_state(hw, 2, 37, 0);
@@ -1179,27 +1180,27 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
btdm8723.ps_tdma_on = true;
if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi high\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi high\n");
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x81;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x81;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1207,11 +1208,11 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
btdm8723.ps_tdma_byte[4] = 0x80;
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi low\n");
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
@@ -1219,16 +1220,16 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl ==
BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x0;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1237,8 +1238,8 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
}
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "HT20 or Legacy\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "HT20 or Legacy\n");
bt_rssi_state =
rtl8723e_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
bt_rssi_state1 =
@@ -1252,14 +1253,14 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
/* sw mechanism */
if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi high\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi high\n");
btdm8723.agc_table_en = true;
btdm8723.adc_back_off_on = true;
btdm8723.sw_dac_swing_on = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi low\n");
btdm8723.agc_table_en = false;
btdm8723.adc_back_off_on = false;
btdm8723.sw_dac_swing_on = false;
@@ -1269,30 +1270,30 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
btdm8723.ps_tdma_on = true;
if ((bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
(bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi-1 high\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi-1 high\n");
/* only rssi high we need to do this, */
/* when rssi low, the value will modified by fw */
rtl_write_byte(rtlpriv, 0x883, 0x40);
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x81;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x81;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1300,27 +1301,27 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
btdm8723.ps_tdma_byte[4] = 0x80;
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi-1 low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi-1 low\n");
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x0;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x0;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1333,14 +1334,14 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
if (rtl8723e_dm_bt_need_to_dec_bt_pwr(hw))
btdm8723.dec_bt_pwr = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
- hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+ hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl);
if ((hal_coex_8723.bt_inq_page_start_time) ||
(BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
btdm8723.ps_tdma_on = true;
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
@@ -1366,20 +1367,20 @@ static void rtl8723e_dm_bt_inq_page_monitor(struct ieee80211_hw *hw)
rtlpriv->btcoexist.cstate |=
BT_COEX_STATE_BT_INQ_PAGE;
hal_coex_8723.bt_inq_page_start_time = cur_time;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT Inquiry/page is started at time : 0x%x\n",
- hal_coex_8723.bt_inq_page_start_time);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT Inquiry/page is started at time : 0x%x\n",
+ hal_coex_8723.bt_inq_page_start_time);
}
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x\n",
- hal_coex_8723.bt_inq_page_start_time, cur_time);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x\n",
+ hal_coex_8723.bt_inq_page_start_time, cur_time);
if (hal_coex_8723.bt_inq_page_start_time) {
if ((((long)cur_time -
(long)hal_coex_8723.bt_inq_page_start_time) / HZ)
>= 10) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], BT Inquiry/page >= 10sec!!!\n");
hal_coex_8723.bt_inq_page_start_time = 0;
rtlpriv->btcoexist.cstate &=
@@ -1406,14 +1407,14 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 bt_info_original;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex] Get bt info by fw!!\n");
_rtl8723_dm_bt_check_wifi_state(hw);
if (hal_coex_8723.c2h_bt_info_req_sent) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex] c2h for bt_info not rcvd yet!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex] c2h for bt_info not rcvd yet!!\n");
}
bt_info_original = hal_coex_8723.c2h_bt_info_original;
@@ -1426,8 +1427,8 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
if (rtl8723e_dm_bt_is_2_ant_common_action(hw)) {
rtlpriv->btcoexist.bt_profile_case = BT_COEX_MECH_COMMON;
rtlpriv->btcoexist.bt_profile_action = BT_COEX_MECH_COMMON;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Action 2-Ant common.\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Action 2-Ant common.\n");
} else {
if ((bt_info_original & BTINFO_B_HID) ||
(bt_info_original & BTINFO_B_SCO_BUSY) ||
@@ -1438,8 +1439,8 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
BT_COEX_MECH_HID_SCO_ESCO;
rtlpriv->btcoexist.bt_profile_action =
BT_COEX_MECH_HID_SCO_ESCO;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n");
rtl8723e_dm_bt_2_ant_hid_sco_esco(hw);
} else if ((bt_info_original & BTINFO_B_FTP) ||
(bt_info_original & BTINFO_B_A2DP)) {
@@ -1449,8 +1450,8 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
BT_COEX_MECH_FTP_A2DP;
rtlpriv->btcoexist.bt_profile_action =
BT_COEX_MECH_FTP_A2DP;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "BTInfo: bFTP|bA2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "BTInfo: bFTP|bA2DP\n");
rtl8723e_dm_bt_2_ant_ftp_a2dp(hw);
} else {
rtlpriv->btcoexist.cstate |=
@@ -1459,8 +1460,8 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
BT_COEX_MECH_NONE;
rtlpriv->btcoexist.bt_profile_action =
BT_COEX_MECH_NONE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BTInfo: undefined case!!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BTInfo: undefined case!!!!\n");
rtl8723e_dm_bt_2_ant_hid_sco_esco(hw);
}
}
@@ -1513,7 +1514,7 @@ static void rtl8723e_dm_bt_query_bt_information(struct ieee80211_hw *hw)
h2c_parameter[0] |= BIT(0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"Query Bt information, write 0x38=0x%x\n", h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x38, 1, h2c_parameter);
@@ -1548,10 +1549,10 @@ static void rtl8723e_dm_bt_bt_hw_counters_monitor(struct ieee80211_hw *hw)
hal_coex_8723.low_priority_tx = reg_lp_tx;
hal_coex_8723.low_priority_rx = reg_lp_rx;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
reg_hp_tx_rx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
reg_lp_tx_rx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
rtlpriv->btcoexist.lps_counter = 0;
@@ -1584,26 +1585,26 @@ static void rtl8723e_dm_bt_bt_enable_disable_check(struct ieee80211_hw *hw)
if (bt_alife) {
rtlpriv->btcoexist.bt_active_zero_cnt = 0;
rtlpriv->btcoexist.cur_bt_disabled = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "8723A BT is enabled !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "8723A BT is enabled !!\n");
} else {
rtlpriv->btcoexist.bt_active_zero_cnt++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "8723A bt all counters=0, %d times!!\n",
- rtlpriv->btcoexist.bt_active_zero_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "8723A bt all counters=0, %d times!!\n",
+ rtlpriv->btcoexist.bt_active_zero_cnt);
if (rtlpriv->btcoexist.bt_active_zero_cnt >= 2) {
rtlpriv->btcoexist.cur_bt_disabled = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "8723A BT is disabled !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "8723A BT is disabled !!\n");
}
}
if (rtlpriv->btcoexist.pre_bt_disabled !=
rtlpriv->btcoexist.cur_bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_TRACE, "8723A BT is from %s to %s!!\n",
- (rtlpriv->btcoexist.pre_bt_disabled ?
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_TRACE, "8723A BT is from %s to %s!!\n",
+ (rtlpriv->btcoexist.pre_bt_disabled ?
"disabled" : "enabled"),
- (rtlpriv->btcoexist.cur_bt_disabled ?
+ (rtlpriv->btcoexist.cur_bt_disabled ?
"disabled" : "enabled"));
rtlpriv->btcoexist.pre_bt_disabled
= rtlpriv->btcoexist.cur_bt_disabled;
@@ -1620,22 +1621,22 @@ void rtl8723e_dm_bt_coexist_8723(struct ieee80211_hw *hw)
rtl8723e_dm_bt_bt_enable_disable_check(hw);
if (rtlpriv->btcoexist.bt_ant_num == ANT_X2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], 2 Ant mechanism\n");
_rtl8723e_dm_bt_coexist_2_ant(hw);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], 1 Ant mechanism\n");
_rtl8723e_dm_bt_coexist_1_ant(hw);
}
if (!rtl8723e_dm_bt_is_same_coexist_state(hw)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], Coexist State[bitMap] change from 0x%x%8x to 0x%x%8x\n",
- rtlpriv->btcoexist.previous_state_h,
- rtlpriv->btcoexist.previous_state,
- rtlpriv->btcoexist.cstate_h,
- rtlpriv->btcoexist.cstate);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], Coexist State[bitMap] change from 0x%x%8x to 0x%x%8x\n",
+ rtlpriv->btcoexist.previous_state_h,
+ rtlpriv->btcoexist.previous_state,
+ rtlpriv->btcoexist.cstate_h,
+ rtlpriv->btcoexist.cstate);
rtlpriv->btcoexist.previous_state
= rtlpriv->btcoexist.cstate;
rtlpriv->btcoexist.previous_state_h
@@ -1658,14 +1659,14 @@ static void rtl8723e_dm_bt_parse_bt_info(struct ieee80211_hw *hw,
else if (i == 1)
hal_coex_8723.bt_retry_cnt = tmp_buf[i];
if (i == len-1)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "0x%2x]", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "0x%2x]", tmp_buf[i]);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "0x%2x, ", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "0x%2x, ", tmp_buf[i]);
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"BT info bt_info (Data)= 0x%x\n",
hal_coex_8723.c2h_bt_info_original);
bt_info = hal_coex_8723.c2h_bt_info_original;
@@ -1677,12 +1678,12 @@ static void rtl8723e_dm_bt_parse_bt_info(struct ieee80211_hw *hw,
if (bt_info & BTINFO_B_CONNECTION) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTC2H], BTInfo: bConnect=true\n");
rtlpriv->btcoexist.bt_busy = true;
rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_BT_IDLE;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTC2H], BTInfo: bConnect=false\n");
rtlpriv->btcoexist.bt_busy = false;
rtlpriv->btcoexist.cstate |= BT_COEX_STATE_BT_IDLE;
@@ -1697,14 +1698,14 @@ void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw)
u8 u1b_tmp = 0;
memset(&c2h_event, 0, sizeof(c2h_event));
u1b_tmp = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL);
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
"&&&&&&: REG_C2HEVT_MSG_NORMAL is 0x%x\n", u1b_tmp);
c2h_event.cmd_id = u1b_tmp & 0xF;
c2h_event.cmd_len = (u1b_tmp & 0xF0) >> 4;
c2h_event.cmd_seq = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 1);
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "cmd_id: %d, cmd_len: %d, cmd_seq: %d\n",
- c2h_event.cmd_id , c2h_event.cmd_len, c2h_event.cmd_seq);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "cmd_id: %d, cmd_len: %d, cmd_seq: %d\n",
+ c2h_event.cmd_id, c2h_event.cmd_len, c2h_event.cmd_seq);
u1b_tmp = rtl_read_byte(rtlpriv, 0x01AF);
if (u1b_tmp == C2H_EVT_HOST_CLOSE) {
return;
@@ -1714,8 +1715,8 @@ void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw)
}
ptmp_buf = kzalloc(c2h_event.cmd_len, GFP_KERNEL);
if (ptmp_buf == NULL) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "malloc cmd buf failed\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "malloc cmd buf failed\n");
return;
}
@@ -1733,13 +1734,13 @@ void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw)
break;
case C2H_V0_BT_INFO:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
"BT info Byte[0] (ID) is 0x%x\n",
c2h_event.cmd_id);
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
"BT info Byte[1] (Seq) is 0x%x\n",
c2h_event.cmd_seq);
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
"BT info Byte[2] (Data)= 0x%x\n", ptmp_buf[0]);
rtl8723e_dm_bt_parse_bt_info(hw, ptmp_buf, c2h_event.cmd_len);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 7a46c6a9deae..a36dc6e726d2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -122,8 +122,8 @@ void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HAL_DEF_WOWLAN:
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -187,8 +187,8 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -227,9 +227,9 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -242,9 +242,9 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *((u8 *)val);
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -289,9 +289,9 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
p_regtoset[index]);
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -328,9 +328,9 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -345,16 +345,16 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- e_aci);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break;
}
@@ -526,8 +526,8 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -703,8 +703,8 @@ static bool _rtl8712e_init_mac(struct ieee80211_hw *hw)
} while (tmpu2b != 0xc290 && retry < 100);
if (retry >= 100) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "InitMAC(): ePHY configure fail!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "InitMAC(): ePHY configure fail!!!\n");
return false;
}
@@ -878,14 +878,14 @@ void rtl8723e_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -900,8 +900,8 @@ void rtl8723e_enable_hw_security_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
@@ -942,8 +942,8 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
err = rtl8723_download_fw(hw, false, FW_8723A_POLLING_TIMEOUT_COUNT);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
goto exit;
}
@@ -1009,7 +1009,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
tmp_u1b = efuse_read_1byte(hw, 0x1FA);
if (!(tmp_u1b & BIT(0))) {
rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
}
if (!(tmp_u1b & BIT(4))) {
@@ -1018,7 +1018,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
udelay(10);
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
}
rtl8723e_dm_init(hw);
exit:
@@ -1069,16 +1069,16 @@ static enum version_8723e _rtl8723e_read_chip_version(struct ieee80211_hw *hw)
}
switch (version) {
case VERSION_TEST_UMC_CHIP_8723:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Chip Version ID: VERSION_TEST_UMC_CHIP_8723.\n");
- break;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Chip Version ID: VERSION_TEST_UMC_CHIP_8723.\n");
+ break;
case VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT.\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT.\n");
break;
case VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT.\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT.\n");
break;
default:
pr_err("Chip Version ID: Unknown. Bug?\n");
@@ -1088,7 +1088,7 @@ static enum version_8723e _rtl8723e_read_chip_version(struct ieee80211_hw *hw)
if (IS_8723_SERIES(version))
rtlphy->rf_type = RF_1T1R;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
(rtlphy->rf_type == RF_2T2R) ? "RF_2T2R" : "RF_1T1R");
return version;
@@ -1103,30 +1103,30 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw,
u8 mode = MSR_NOLINK;
rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_LOUD,
"clear 0x550 when set HW_VAR_MEDIA_STATUS\n");
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
mode = MSR_NOLINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
mode = MSR_AP;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to AP!\n");
break;
default:
@@ -1153,9 +1153,9 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw,
_rtl8723e_resume_tx_beacon(hw);
_rtl8723e_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- mode);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
}
rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
@@ -1350,8 +1350,8 @@ void rtl8723e_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl8723e_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl8723e_enable_interrupt(hw);
@@ -1363,8 +1363,8 @@ void rtl8723e_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1782,8 +1782,8 @@ static void _rtl8723e_hal_customized_behavior(struct ieee80211_hw *hw)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw)
@@ -1806,19 +1806,19 @@ void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw)
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl8723e_read_adapter_info(hw, false);
} else {
@@ -1914,8 +1914,8 @@ static void rtl8723e_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -2036,17 +2036,17 @@ static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw,
}
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1],
- rate_mask[2], rate_mask[3],
- rate_mask[4]);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4]);
rtl8723e_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
}
@@ -2111,15 +2111,15 @@ bool rtl8723e_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
b_actuallyset = true;
} else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2170,7 +2170,7 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2198,8 +2198,8 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", enc_algo);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -2229,26 +2229,26 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
@@ -2288,9 +2288,9 @@ static void rtl8723e_bt_var_init(struct ieee80211_hw *hw)
rtlpriv->btcoexist.bt_radio_shared_type =
rtlpriv->btcoexist.eeprom_bt_radio_shared;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BT Coexistence = 0x%x\n",
- rtlpriv->btcoexist.bt_coexistence);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BT Coexistence = 0x%x\n",
+ rtlpriv->btcoexist.bt_coexistence);
if (rtlpriv->btcoexist.bt_coexistence) {
rtlpriv->btcoexist.bt_busy_traffic = false;
@@ -2301,47 +2301,47 @@ static void rtl8723e_bt_var_init(struct ieee80211_hw *hw)
rtlpriv->btcoexist.previous_state = 0;
if (rtlpriv->btcoexist.bt_ant_num == ANT_X2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_Ant_Num = Antx2\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_Ant_Num = Antx2\n");
} else if (rtlpriv->btcoexist.bt_ant_num == ANT_X1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_Ant_Num = Antx1\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_Ant_Num = Antx1\n");
}
switch (rtlpriv->btcoexist.bt_coexist_type) {
case BT_2WIRE:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_2Wire\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_2Wire\n");
break;
case BT_ISSC_3WIRE:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_ISSC_3Wire\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_ISSC_3Wire\n");
break;
case BT_ACCEL:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_ACCEL\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_ACCEL\n");
break;
case BT_CSR_BC4:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_CSR_BC4\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_CSR_BC4\n");
break;
case BT_CSR_BC8:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_CSR_BC8\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_CSR_BC8\n");
break;
case BT_RTL8756:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_RTL8756\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_RTL8756\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = Unknown\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = Unknown\n");
break;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_Ant_isolation = %d\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_Ant_isolation = %d\n",
rtlpriv->btcoexist.bt_ant_isolation);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BT_RadioSharedType = 0x%x\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BT_RadioSharedType = 0x%x\n",
rtlpriv->btcoexist.bt_radio_shared_type);
rtlpriv->btcoexist.bt_active_zero_cnt = 0;
rtlpriv->btcoexist.cur_bt_disabled = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
index 5e503dbc463b..7fab02e01a8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
@@ -19,8 +19,8 @@ void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -48,8 +48,8 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -128,7 +128,7 @@ void rtl8723e_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n",
- ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n",
+ ledaction);
_rtl8723e_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 772aecedf0b4..fa0eed434d4f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -38,9 +38,9 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw,
u32 original_value = 0, readback_value, bitshift;
struct rtl_phy *rtlphy = &rtlpriv->phy;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -54,9 +54,9 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -69,9 +69,9 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 original_value = 0, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -99,9 +99,9 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
@@ -185,30 +185,30 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
bool rtstatus;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("Write BB Reg Fail!!\n");
return false;
}
if (rtlphy->rf_type == RF_1T2R) {
_rtl8723e_phy_bb_config_1t(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
}
if (rtlefuse->autoload_failflag == false) {
rtlphy->pwrgroup_cnt = 0;
rtstatus = _rtl8723e_phy_config_bb_with_pgheaderfile(hw,
BASEBAND_CONFIG_PHY_REG);
}
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("BB_PG Reg Fail!!\n");
return false;
}
rtstatus =
_rtl8723e_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("AGC Table Fail\n");
return false;
}
@@ -226,12 +226,12 @@ static bool _rtl8723e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl723MACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl723MACPHY_Array\n");
arraylength = RTL8723E_MACARRAYLENGTH;
ptrarray = RTL8723EMAC_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Img:RTL8192CEMAC_2T_ARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Img:RTL8192CEMAC_2T_ARRAY\n");
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
return true;
@@ -267,20 +267,20 @@ static bool _rtl8723e_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
- phy_regarray_table[i],
- phy_regarray_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
}
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
agctab_array_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
}
return true;
@@ -296,146 +296,146 @@ static void store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
if (regaddr == RTXAGC_A_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][0]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][0]);
}
if (regaddr == RTXAGC_A_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][1]);
}
if (regaddr == RTXAGC_A_CCK1_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][6]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][6]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][7]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][7]);
}
if (regaddr == RTXAGC_A_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][2]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][2]);
}
if (regaddr == RTXAGC_A_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][3]);
}
if (regaddr == RTXAGC_A_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][4]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][4]);
}
if (regaddr == RTXAGC_A_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][5]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][5]);
}
if (regaddr == RTXAGC_B_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][8]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][8]);
}
if (regaddr == RTXAGC_B_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][9]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][9]);
}
if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][14]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][14]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][15]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][15]);
}
if (regaddr == RTXAGC_B_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][10]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][10]);
}
if (regaddr == RTXAGC_B_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][11]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][11]);
}
if (regaddr == RTXAGC_B_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][12]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][12]);
}
if (regaddr == RTXAGC_B_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][13]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][13]);
rtlphy->pwrgroup_cnt++;
}
@@ -473,8 +473,8 @@ static bool _rtl8723e_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
phy_regarray_table_pg[i + 2]);
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -534,21 +534,21 @@ void rtl8723e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8) rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR3, MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR2, MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
void rtl8723e_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
@@ -622,7 +622,7 @@ void rtl8723e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 cckpowerlevel[2], ofdmpowerlevel[2];
- if (rtlefuse->txpwr_fromeprom == false)
+ if (!rtlefuse->txpwr_fromeprom)
return;
_rtl8723e_get_txpower_index(hw, channel,
&cckpowerlevel[0], &ofdmpowerlevel[0]);
@@ -650,9 +650,9 @@ bool rtl8723e_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
else
ofdmtxpwridx = 0;
- RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
- "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
- power_indbm, ccktxpwridx, ofdmtxpwridx);
+ rtl_dbg(rtlpriv, COMP_TXAGC, DBG_TRACE,
+ "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
+ power_indbm, ccktxpwridx, ofdmtxpwridx);
for (idx = 0; idx < 14; idx++) {
for (rf_path = 0; rf_path < 2; rf_path++) {
rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
@@ -734,10 +734,10 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
@@ -791,7 +791,7 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl8723e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
void rtl8723e_phy_set_bw_mode(struct ieee80211_hw *hw,
@@ -808,8 +808,8 @@ void rtl8723e_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723e_phy_set_bw_mode_callback(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -822,8 +822,8 @@ void rtl8723e_phy_sw_chnl_callback(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 delay;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
do {
@@ -841,7 +841,7 @@ void rtl8723e_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw)
@@ -861,12 +861,12 @@ u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723e_phy_sw_chnl_callback(hw);
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schedule workitem\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schedule workitem\n");
rtlphy->sw_chnl_inprogress = false;
} else {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
@@ -991,9 +991,9 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
_rtl8723e_phy_sw_rf_seting(hw, channel);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -1444,24 +1444,24 @@ bool rtl8723e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &rtlpriv->phy;
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", iotype);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -1472,7 +1472,7 @@ bool rtl8723e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl8723e_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
@@ -1482,9 +1482,9 @@ static void rtl8723e_phy_set_io(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -1497,14 +1497,14 @@ static void rtl8723e_phy_set_io(struct ieee80211_hw *hw)
rtl8723e_dm_write_dig(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
static void rtl8723e_phy_set_rf_on(struct ieee80211_hw *hw)
@@ -1541,8 +1541,8 @@ static void _rtl8723e_phy_set_rf_sleep(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Switch RF timeout !!!.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Switch RF timeout !!!.\n");
return;
}
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
@@ -1569,18 +1569,17 @@ static bool _rtl8723e_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl8723e_phy_set_rf_on(hw);
}
@@ -1594,8 +1593,8 @@ static bool _rtl8723e_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFOFF:
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -1619,33 +1618,33 @@ static bool _rtl8723e_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl8723e_phy_set_rf_sleep(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", rfpwr_state);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
index 9058527a7f94..b8ed80c84266 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
@@ -49,7 +49,7 @@ void rtl8723e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
if (rtlefuse->eeprom_regulatory != 0)
turbo_scanoff = true;
- if (mac->act_scanning == true) {
+ if (mac->act_scanning) {
tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
@@ -479,13 +479,13 @@ static bool _rtl8723e_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
break;
}
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ if (!rtstatus) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index a04ce15d5538..e3ee91b7ea8d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -362,14 +362,13 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
bool lastseg = ((hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
u8 bw_40 = 0;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
if (mac->opmode == NL80211_IFTYPE_STATION) {
@@ -477,8 +476,8 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function.\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function.\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -517,7 +516,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_bmc(pdesc, 1);
}
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -529,16 +528,15 @@ void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
@@ -591,7 +589,7 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc8,
{
__le32 *pdesc = (__le32 *)pdesc8;
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
set_tx_desc_own(pdesc, 1);
@@ -632,7 +630,7 @@ u64 rtl8723e_get_desc(struct ieee80211_hw *hw,
u32 ret = 0;
__le32 *pdesc = (__le32 *)pdesc8;
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
ret = get_tx_desc_own(pdesc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
index c9b3d9d09c48..c3c990cc032f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
@@ -225,9 +225,9 @@ static void rtl8723be_dm_init_txpower_tracking(struct ieee80211_hw *hw)
rtlpriv->dm.delta_power_index_last[RF90_PATH_A] = 0;
rtlpriv->dm.power_index_offset[RF90_PATH_A] = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- " rtlpriv->dm.txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "rtlpriv->dm.txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
static void rtl8723be_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
@@ -265,33 +265,33 @@ static void rtl8723be_dm_find_minimum_rssi(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
rtl_dm_dig->min_undec_pwdb_for_dm = 0;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "Not connected to any\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- rtlpriv->dm.entry_min_undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.entry_min_undec_sm_pwdb);
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "STA Default Port PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnect PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
static void rtl8723be_dm_check_rssi_monitor(struct ieee80211_hw *hw)
@@ -421,7 +421,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
} else {
dm_digtable->rx_gain_max = dm_dig_max;
dig_min_0 = dm_dig_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
}
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
@@ -576,18 +576,18 @@ static void rtl8723be_dm_false_alarm_counter_statistics(
rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 0);
rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail,
- falsealm_cnt->cnt_mcs_fail);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail,
- falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail,
+ falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail,
+ falsealm_cnt->cnt_all);
}
static void rtl8723be_dm_dynamic_txpower(struct ieee80211_hw *hw)
@@ -747,18 +747,18 @@ static void rtl8723be_dm_txpower_tracking_callback_thermalmeter(
/*Initilization ( 7 steps in total )*/
rtlpriv->dm.txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "rtl8723be_dm_txpower_tracking_callback_thermalmeter\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "%s\n", __func__);
thermalvalue = (u8)rtl_get_rfreg(hw,
RF90_PATH_A, RF_T_METER, 0xfc00);
if (!rtlpriv->dm.txpower_track_control || thermalvalue == 0 ||
rtlefuse->eeprom_thermalmeter == 0xFF)
return;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
- thermalvalue, rtldm->thermalvalue,
- rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue, rtldm->thermalvalue,
+ rtlefuse->eeprom_thermalmeter);
/*3 Initialize ThermalValues of RFCalibrateInfo*/
if (!rtldm->thermalvalue) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
@@ -792,10 +792,10 @@ static void rtl8723be_dm_txpower_tracking_callback_thermalmeter(
(thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk);
/* 6 If necessary, do LCK.*/
if (delta_lck >= IQK_THRESHOLD) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
@@ -876,7 +876,7 @@ static void rtl8723be_dm_txpower_tracking_callback_thermalmeter(
}
rtldm->txpowercount = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
}
@@ -890,13 +890,13 @@ void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw)
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) | BIT(16),
0x03);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 8723be Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 8723be Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking !!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking !!\n");
rtl8723be_dm_txpower_tracking_callback_thermalmeter(hw);
rtlpriv->dm.tm_trigger = 0;
}
@@ -914,14 +914,14 @@ static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver is going to unload\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver does not control rate adaptive mask\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver does not control rate adaptive mask\n");
return;
}
@@ -949,14 +949,14 @@ static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI = %ld\n",
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
rtlpriv->dm.undec_sm_pwdb);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI_LEVEL = %d\n", p_ra->ratr_state);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "PreState = %d, CurState = %d\n",
- p_ra->pre_ratr_state, p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state);
rcu_read_lock();
sta = rtl_find_sta(hw, mac->bssid);
@@ -1073,8 +1073,8 @@ static void rtl8723be_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
dm_digtable->pre_cck_cca_thres = dm_digtable->cur_cck_cca_thres;
dm_digtable->cur_cck_cca_thres = cur_cck_cca_thresh;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "CCK cca thresh hold =%x\n", dm_digtable->cur_cck_cca_thres);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "CCK cca thresh hold =%x\n", dm_digtable->cur_cck_cca_thres);
}
static void rtl8723be_dm_dynamic_edcca(struct ieee80211_hw *hw)
@@ -1121,8 +1121,8 @@ static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
}
if (rtlpriv->cfg->ops->get_btc_status()) {
if (!rtlpriv->btcoexist.btc_ops->btc_is_bt_disabled(rtlpriv)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "odm_DynamicATCSwitch(): Disable CFO tracking for BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "odm_DynamicATCSwitch(): Disable CFO tracking for BT!!\n");
return;
}
}
@@ -1152,11 +1152,11 @@ static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
(rtldm->cfo_ave_pre - cfo_ave) :
(cfo_ave - rtldm->cfo_ave_pre);
- if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) {
- rtldm->large_cfo_hit = 1;
+ if (cfo_ave_diff > 20 && !rtldm->large_cfo_hit) {
+ rtldm->large_cfo_hit = true;
return;
} else
- rtldm->large_cfo_hit = 0;
+ rtldm->large_cfo_hit = false;
rtldm->cfo_ave_pre = cfo_ave;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index aa56058af56e..b3e6c91e26c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -41,22 +41,22 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
unsigned long flag;
u8 idx;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -107,9 +107,9 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
while (!isfw_read) {
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
@@ -118,24 +118,24 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
isfw_read = _rtl8723be_check_fw_read_last_h2c(hw,
boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -182,16 +182,16 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
@@ -229,8 +229,8 @@ void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
if (bt_ctrl_lps)
mode = (bt_lps_on ? FW_PS_MIN_MODE : FW_PS_ACTIVE_MODE);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
- mode, bt_ctrl_lps);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
+ mode, bt_ctrl_lps);
switch (mode) {
case FW_PS_MIN_MODE:
@@ -572,15 +572,15 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
b_dlok = true;
if (b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n",
u1rsvdpageloc, sizeof(u1rsvdpageloc));
rtl8723be_fill_h2c_cmd(hw, H2C_8723B_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
/*Should check FW support p2p or not.*/
@@ -607,11 +607,11 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -668,11 +668,11 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 979e5bfe5f45..0748aedce2ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -37,11 +37,10 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(
- hw,
- (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
@@ -146,9 +145,9 @@ static void _rtl8723be_set_fw_clock_on(struct ieee80211_hw *hw, u8 rpwm_val,
if (content & IMR_CPWM) {
rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
rtlhal->fw_ps_state = FW_PS_STATE_RF_ON;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Receive CPWM INT!!! Set pHalData->FwPSState = %X\n",
- rtlhal->fw_ps_state);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Receive CPWM INT!!! Set pHalData->FwPSState = %X\n",
+ rtlhal->fw_ps_state);
}
}
@@ -331,8 +330,8 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HAL_DEF_WOWLAN:
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -436,8 +435,8 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -479,9 +478,9 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -494,9 +493,9 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *((u8 *)val);
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -534,9 +533,9 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
}
break;
@@ -571,9 +570,9 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -588,16 +587,16 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- e_aci);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
}
break;
@@ -705,8 +704,8 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -821,8 +820,8 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
RTL8723_NIC_ENABLE_FLOW)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "init MAC Fail as power on failure\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "init MAC Fail as power on failure\n");
return false;
}
@@ -859,7 +858,7 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_CR, 0x2ff);
if (!rtlhal->mac_func_enable) {
- if (_rtl8723be_llt_table_init(hw) == false)
+ if (!_rtl8723be_llt_table_init(hw))
return false;
}
@@ -1121,14 +1120,14 @@ void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -1143,8 +1142,8 @@ void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -1208,8 +1207,8 @@ static bool _rtl8723be_check_pcie_dma_hang(struct rtl_priv *rtlpriv)
*/
tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL + 3);
if ((tmp & BIT(0)) || (tmp & BIT(1))) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "CheckPcieDMAHang8723BE(): true!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CheckPcieDMAHang8723BE(): true!!\n");
return true;
}
return false;
@@ -1222,8 +1221,8 @@ static void _rtl8723be_reset_pcie_interface_dma(struct rtl_priv *rtlpriv,
bool release_mac_rx_pause;
u8 backup_pcie_dma_pause;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "ResetPcieInterfaceDMA8723BE()\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "ResetPcieInterfaceDMA8723BE()\n");
/* Revise Note: Follow the document "PCIe RX DMA Hang Reset Flow_v03"
* released by SD1 Alan.
@@ -1375,8 +1374,8 @@ int rtl8723be_hw_init(struct ieee80211_hw *hw)
err = rtl8723_download_fw(hw, true, FW_8723B_POLLING_TIMEOUT_COUNT);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
goto exit;
}
@@ -1460,7 +1459,7 @@ static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw)
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1);
if ((value32 & (CHIP_8723B)) != CHIP_8723B)
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unknown chip version\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "unknown chip version\n");
else
version = (enum version_8723e)CHIP_8723B;
@@ -1476,9 +1475,9 @@ static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw)
if (((value32 & EXT_VENDOR_ID) >> 18) == 0x01)
version = (enum version_8723e)(version | CHIP_VENDOR_SMIC);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
- "RF_2T2R" : "RF_1T1R");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R");
return version;
}
@@ -1494,26 +1493,26 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
mode = MSR_NOLINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
mode = MSR_AP;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not support!\n", type);
@@ -1538,9 +1537,9 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
_rtl8723be_resume_tx_beacon(hw);
_rtl8723be_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- mode);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
}
rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
@@ -1702,8 +1701,8 @@ void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl8723be_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl8723be_enable_interrupt(hw);
@@ -1715,8 +1714,8 @@ void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1747,15 +1746,15 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 path, addr = EEPROM_TX_PWR_INX, group, cnt = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "hal_ReadPowerValueFromPROM8723BE(): PROMContent[0x%x]=0x%x\n",
- (addr + 1), hwinfo[addr + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "hal_ReadPowerValueFromPROM8723BE(): PROMContent[0x%x]=0x%x\n",
+ (addr + 1), hwinfo[addr + 1]);
if (0xFF == hwinfo[addr + 1]) /*YJ,add,120316*/
autoload_fail = true;
if (autoload_fail) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "auto load fail : Use Default value!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "auto load fail : Use Default value!\n");
for (path = 0; path < MAX_RF_PATH; path++) {
/* 2.4G default value */
for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
@@ -2099,8 +2098,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
rtlefuse->board_type |= BIT(2); /* ODM_BOARD_BT */
rtlhal->board_type = rtlefuse->board_type;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "board_type = 0x%x\n", rtlefuse->board_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "board_type = 0x%x\n", rtlefuse->board_type);
rtlhal->package_type = _rtl8723be_read_package_type(hw);
@@ -2237,8 +2236,8 @@ static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw)
@@ -2255,18 +2254,18 @@ void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw)
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl8723be_read_adapter_info(hw, false);
} else {
@@ -2417,8 +2416,8 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[0] = macid;
@@ -2431,13 +2430,13 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1],
- rate_mask[2], rate_mask[3],
- rate_mask[4], rate_mask[5],
- rate_mask[6]);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4], rate_mask[5],
+ rate_mask[6]);
rtl8723be_fill_h2c_cmd(hw, H2C_8723B_RA_MASK, 7, rate_mask);
_rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
}
@@ -2500,15 +2499,15 @@ bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
b_actuallyset = true;
} else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2559,7 +2558,7 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2587,8 +2586,8 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", enc_algo);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -2618,26 +2617,26 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
index 525f2c47da5b..3954624ab314 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
@@ -19,8 +19,8 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -47,8 +47,8 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -127,6 +127,6 @@ void rtl8723be_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
_rtl8723be_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 9528ac3f3b87..f09f55b0468a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -34,9 +34,9 @@ u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -46,9 +46,9 @@ u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -59,9 +59,9 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, path);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, path);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -77,9 +77,9 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, path);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, path);
}
@@ -158,18 +158,18 @@ static bool _rtl8723be_check_positive(struct ieee80211_hw *hw,
rtlhal->type_alna << 16 |
rtlhal->type_apa << 24;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
- cond1, cond2);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
- driver1, driver2);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
+ cond1, cond2);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
+ driver1, driver2);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- " (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- " (Board, Package) = (0x%X, 0x%X)\n",
- rtlhal->board_type, rtlhal->package_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "(Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "(Board, Package) = (0x%X, 0x%X)\n",
+ rtlhal->board_type, rtlhal->package_type);
/*============== Value Defined Check ===============*/
/*QFN Type [15:12] and Cut Version [27:24] need to do value check*/
@@ -283,9 +283,9 @@ static void _rtl8723be_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n",
- path);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n",
+ path);
return;
}
@@ -304,15 +304,15 @@ static void _rtl8723be_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value;
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Band %d in PHY_SetTxPowerByRateBase()\n",
- band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d in PHY_SetTxPowerByRateBase()\n",
+ band);
}
}
@@ -325,9 +325,9 @@ static u8 _rtl8723be_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 value = 0;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
- path);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
+ path);
return 0;
}
@@ -346,15 +346,15 @@ static u8 _rtl8723be_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3];
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Band %d in PHY_GetTxPowerByRateBase()\n",
- band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d in PHY_GetTxPowerByRateBase()\n",
+ band);
}
return value;
@@ -477,8 +477,8 @@ static void _rtl8723be_phy_convert_txpower_dbm_to_relative_value(
&rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][7],
0, 3, base);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "<===_rtl8723be_phy_convert_txpower_dbm_to_relative_value()\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "<===%s\n", __func__);
}
static void phy_txpower_by_rate_config(struct ieee80211_hw *hw)
@@ -588,7 +588,7 @@ static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n");
return rtl8723be_phy_config_with_headerfile(hw,
RTL8723BEMAC_1T_ARRAY, RTL8723BEMAC_1T_ARRAYLEN,
@@ -684,16 +684,16 @@ static void _rtl8723be_store_tx_power_by_rate(struct ieee80211_hw *hw,
u8 rate_section = _rtl8723be_get_rate_section_index(regaddr);
if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid Band %d\n", band);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR, "Invalid Band %d\n", band);
return;
}
if (rfpath > MAX_RF_PATH - 1) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR,
- "Invalid RfPath %d\n", rfpath);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR,
+ "Invalid RfPath %d\n", rfpath);
return;
}
if (txnum > MAX_RF_PATH - 1) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid TxNum %d\n", txnum);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR, "Invalid TxNum %d\n", txnum);
return;
}
@@ -734,8 +734,8 @@ static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
}
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -747,7 +747,7 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
bool ret = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
ret = rtl8723be_phy_config_with_headerfile(hw,
@@ -762,8 +762,8 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
case RF90_PATH_C:
break;
case RF90_PATH_D:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", rfpath);
break;
}
return ret;
@@ -783,21 +783,21 @@ void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
@@ -950,16 +950,16 @@ static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path,
if (channel > 14 || channel < 1) {
index = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Illegal channel!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Illegal channel!\n");
}
if (RX_HAL_IS_CCK_RATE(rate))
txpower = rtlefuse->txpwrlevel_cck[path][index];
else if (DESC92C_RATE6M <= rate)
txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
else
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "invalid rate\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "invalid rate\n");
if (DESC92C_RATE6M <= rate && rate <= DESC92C_RATE54M &&
!RX_HAL_IS_CCK_RATE(rate))
@@ -1099,11 +1099,11 @@ static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw,
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Rate!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Rate!!\n");
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
}
}
@@ -1187,10 +1187,10 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
@@ -1244,7 +1244,7 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl8723be_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
}
void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
@@ -1261,8 +1261,8 @@ void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723be_phy_set_bw_mode_callback(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -1275,8 +1275,8 @@ void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 delay = 0;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
do {
@@ -1296,7 +1296,7 @@ void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw)
@@ -1316,13 +1316,13 @@ u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723be_phy_sw_chnl_callback(hw);
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schedule workitem current channel %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schedule workitem current channel %d\n",
+ rtlphy->current_channel);
rtlphy->sw_chnl_inprogress = false;
} else {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
@@ -1428,9 +1428,9 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -2058,7 +2058,7 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
for (i = 0; i < retrycount; i++) {
patha_ok = _rtl8723be_phy_path_a_iqk(hw);
if (patha_ok == 0x01) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Path A Tx IQK Success!!\n");
result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
@@ -2066,36 +2066,36 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
0x3FF0000) >> 16;
break;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A Tx IQK Fail!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Tx IQK Fail!!\n");
}
}
/* path A RX IQK */
for (i = 0; i < retrycount; i++) {
patha_ok = _rtl8723be_phy_path_a_rx_iqk(hw);
if (patha_ok == 0x03) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A Rx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Rx IQK Success!!\n");
result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
0x3FF0000) >> 16;
result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
0x3FF0000) >> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A Rx IQK Fail!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Rx IQK Fail!!\n");
}
if (0x00 == patha_ok)
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Path A IQK Fail!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Path A IQK Fail!!\n");
if (is2t) {
/* path B TX IQK */
for (i = 0; i < retrycount; i++) {
pathb_ok = _rtl8723be_phy_path_b_iqk(hw);
if (pathb_ok == 0x01) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path B Tx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path B Tx IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw, 0xe94,
MASKDWORD) &
0x3FF0000) >> 16;
@@ -2104,15 +2104,15 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
0x3FF0000) >> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path B Tx IQK Fail!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path B Tx IQK Fail!!\n");
}
/* path B RX IQK */
for (i = 0; i < retrycount; i++) {
pathb_ok = _rtl8723be_phy_path_b_rx_iqk(hw);
if (pathb_ok == 0x03) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path B Rx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path B Rx IQK Success!!\n");
result[t][6] = (rtl_get_bbreg(hw, 0xea4,
MASKDWORD) &
0x3FF0000) >> 16;
@@ -2121,8 +2121,8 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
0x3FF0000) >> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path B Rx IQK Fail!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path B Rx IQK Fail!!\n");
}
}
@@ -2150,7 +2150,7 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "8723be IQK Finish!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "8723be IQK Finish!!\n");
}
static u8 _get_right_chnl_place_for_iqk(u8 chnl)
@@ -2224,14 +2224,14 @@ static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
} else {
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
}
static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw,
bool bmain, bool is2t)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
if (bmain) /* left antenna */
rtl_set_bbreg(hw, 0x92C, MASKDWORD, 0x1);
@@ -2418,24 +2418,24 @@ bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &rtlpriv->phy;
bool b_postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
b_postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
b_postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", iotype);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -2446,7 +2446,7 @@ bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl8723be_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
@@ -2456,9 +2456,9 @@ static void rtl8723be_phy_set_io(struct ieee80211_hw *hw)
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
struct rtl_phy *rtlphy = &rtlpriv->phy;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -2472,14 +2472,14 @@ static void rtl8723be_phy_set_io(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
static void rtl8723be_phy_set_rf_on(struct ieee80211_hw *hw)
@@ -2522,16 +2522,16 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
u32 initializecount = 0;
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl8723be_phy_set_rf_on(hw);
}
@@ -2555,27 +2555,27 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -2599,34 +2599,34 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl8723be_phy_set_rf_sleep(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", rfpwr_state);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
index af72e489e31c..8a856fb42b8d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
@@ -478,12 +478,12 @@ static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index b8081e196cdf..559ab78687c3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -340,9 +340,9 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
else
wake_match = 0;
if (wake_match)
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
- "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- wake_match);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
@@ -442,10 +442,10 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
memset(skb->data, 0, EM_HDR_LEN);
}
buf_len = skb->len;
- mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n");
+ mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_8723be));
@@ -459,9 +459,9 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
_rtl8723be_insert_emcontent(ptcb_desc,
(__le32 *)(skb->data));
}
@@ -551,8 +551,8 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
/* from being overwritten by retried packet rate.*/
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function.\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function.\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -583,7 +583,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_bmc(pdesc, 1);
}
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
@@ -595,13 +595,12 @@ void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
index 37036e653e56..36c00b89ccae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
@@ -41,7 +41,7 @@ void rtl8723_write_fw(struct ieee80211_hw *hw,
u32 page_nums, remain_size;
u32 page, offset;
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
rtl_fill_dummy(bufferptr, &size);
@@ -63,7 +63,7 @@ void rtl8723_write_fw(struct ieee80211_hw *hw,
page = page_nums;
rtl_fw_page_write(hw, page, (bufferptr + offset), remain_size);
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
}
EXPORT_SYMBOL_GPL(rtl8723_write_fw);
@@ -109,8 +109,8 @@ void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp | BIT(2)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " _8051Reset8723be(): 8051 reset success .\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "_8051Reset8723be(): 8051 reset success .\n");
}
EXPORT_SYMBOL_GPL(rtl8723be_firmware_selfreset);
@@ -143,9 +143,9 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be,
do {
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
if (value32 & WINTINI_RDY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
- value32);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
+ value32);
err = 0;
goto exit;
}
@@ -188,10 +188,10 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
else
max_page = 8;
if (rtlpriv->cfg->ops->is_fw_header(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "Firmware Version(%d), Signature(%#x), Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtlwifi_firmware_header));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "Firmware Version(%d), Signature(%#x), Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtlwifi_firmware_header));
pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
index debecc623a01..47b6c1aa36b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
@@ -14,15 +14,15 @@ u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask,
- regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask,
+ regaddr, originalvalue);
return returnvalue;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_query_bb_reg);
@@ -33,9 +33,9 @@ void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr, bitmask,
- data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr, bitmask,
+ data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -45,21 +45,17 @@ void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_set_bb_reg);
u32 rtl8723_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
+ u32 i = ffs(bitmask);
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ return i ? i - 1 : 32;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_calculate_bit_shift);
@@ -105,9 +101,9 @@ u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_read);
@@ -130,10 +126,10 @@ void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
newoffset = offset;
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset,
- data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset,
+ data_and_addr);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_write);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 97a30ccf0b27..f6bff0ebd6b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -437,26 +437,26 @@ static void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw)
mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- rtlpriv->dm.entry_min_undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.entry_min_undec_sm_pwdb);
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "STA Default Port PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnect PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "MinUndecoratedPWDBForDM =%d\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "MinUndecoratedPWDBForDM =%d\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
static void rtl8812ae_dm_rssi_dump_to_register(struct ieee80211_hw *hw)
@@ -626,11 +626,11 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
u8 dm_dig_max, dm_dig_min, offset;
u8 current_igi = dm_digtable->cur_igvalue;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "\n");
if (mac->act_scanning) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Return: In Scan Progress\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Return: In Scan Progress\n");
return;
}
@@ -666,10 +666,10 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
dm_digtable->rx_gain_max =
dm_digtable->rssi_val_min + offset;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_digtable->rssi_val_min=0x%x,dm_digtable->rx_gain_max = 0x%x\n",
- dm_digtable->rssi_val_min,
- dm_digtable->rx_gain_max);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_digtable->rssi_val_min=0x%x,dm_digtable->rx_gain_max = 0x%x\n",
+ dm_digtable->rssi_val_min,
+ dm_digtable->rx_gain_max);
if (rtlpriv->dm.one_entry_only) {
offset = 0;
@@ -682,22 +682,21 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
dig_min_0 =
dm_digtable->rssi_val_min - offset;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "bOneEntryOnly=TRUE, dig_min_0=0x%x\n",
- dig_min_0);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "bOneEntryOnly=TRUE, dig_min_0=0x%x\n",
+ dig_min_0);
} else {
dig_min_0 = dm_dig_min;
}
} else {
dm_digtable->rx_gain_max = dm_dig_max;
dig_min_0 = dm_dig_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "No Link\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "No Link\n");
}
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Abnormally false alarm case.\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Abnormally false alarm case.\n");
if (dm_digtable->large_fa_hit != 3)
dm_digtable->large_fa_hit++;
@@ -728,23 +727,23 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
dig_min_0;
dm_digtable->rx_gain_min =
dig_min_0;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Normal Case: At Lower Bound\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Normal Case: At Lower Bound\n");
} else {
dm_digtable->forbidden_igi--;
dm_digtable->rx_gain_min =
(dm_digtable->forbidden_igi + 1);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Normal Case: Approach Lower Bound\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Normal Case: Approach Lower Bound\n");
}
} else {
dm_digtable->large_fa_hit = 0;
}
}
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "pDM_DigTable->LargeFAHit=%d\n",
- dm_digtable->large_fa_hit);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "pDM_DigTable->LargeFAHit=%d\n",
+ dm_digtable->large_fa_hit);
if (rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10)
dm_digtable->rx_gain_min = dm_dig_min;
@@ -754,15 +753,15 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
/*Adjust initial gain by false alarm*/
if (mac->link_state >= MAC80211_LINKED) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "DIG AfterLink\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "DIG AfterLink\n");
if (first_connect) {
if (dm_digtable->rssi_val_min <= dig_max_of_min)
current_igi = dm_digtable->rssi_val_min;
else
current_igi = dig_max_of_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "First Connect\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "First Connect\n");
} else {
if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2)
current_igi = current_igi + 4;
@@ -774,17 +773,17 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
if ((rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10) &&
(rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)) {
current_igi = dm_digtable->rx_gain_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Beacon is less than 10 and FA is less than 768, IGI GOES TO 0x1E!!!!!!!!!!!!\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Beacon is less than 10 and FA is less than 768, IGI GOES TO 0x1E!!!!!!!!!!!!\n");
}
}
} else {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "DIG BeforeLink\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "DIG BeforeLink\n");
if (first_disconnect) {
current_igi = dm_digtable->rx_gain_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "First DisConnect\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "First DisConnect\n");
} else {
/* 2012.03.30 LukeLee: enable DIG before
* link but with very high thresholds
@@ -799,11 +798,11 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
if (current_igi >= 0x3e)
current_igi = 0x3e;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "England DIG\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "England DIG\n");
}
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "DIG End Adjust IGI\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "DIG End Adjust IGI\n");
/* Check initial gain by upper/lower bound*/
if (current_igi > dm_digtable->rx_gain_max)
@@ -811,13 +810,13 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
if (current_igi < dm_digtable->rx_gain_min)
current_igi = dm_digtable->rx_gain_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "rx_gain_max=0x%x, rx_gain_min=0x%x\n",
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "rx_gain_max=0x%x, rx_gain_min=0x%x\n",
dm_digtable->rx_gain_max, dm_digtable->rx_gain_min);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "TotalFA=%d\n", rtlpriv->falsealm_cnt.cnt_all);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "CurIGValue=0x%x\n", current_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "TotalFA=%d\n", rtlpriv->falsealm_cnt.cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "CurIGValue=0x%x\n", current_igi);
rtl8821ae_dm_write_dig(hw, current_igi);
dm_digtable->media_connect_0 =
@@ -880,12 +879,12 @@ static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 0);
rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 1);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Cnt_Cck_fail=%d\n",
- falsealm_cnt->cnt_cck_fail);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "cnt_ofdm_fail=%d\n",
- falsealm_cnt->cnt_ofdm_fail);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Total False Alarm=%d\n",
- falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Cnt_Cck_fail=%d\n",
+ falsealm_cnt->cnt_cck_fail);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "cnt_ofdm_fail=%d\n",
+ falsealm_cnt->cnt_ofdm_fail);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Total False Alarm=%d\n",
+ falsealm_cnt->cnt_all);
}
static void rtl8812ae_dm_check_txpower_tracking_thermalmeter(
@@ -896,13 +895,13 @@ static void rtl8812ae_dm_check_txpower_tracking_thermalmeter(
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E,
BIT(17) | BIT(16), 0x03);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 8812 Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 8812 Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking direct call!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking direct call!!\n");
rtl8812ae_dm_txpower_tracking_callback_thermalmeter(hw);
}
@@ -981,8 +980,8 @@ void rtl8821ae_dm_update_init_rate(struct ieee80211_hw *hw, u8 rate)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 p = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Get C2H Command! Rate=0x%x\n", rate);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Get C2H Command! Rate=0x%x\n", rate);
rtldm->tx_rate = rate;
@@ -1145,9 +1144,9 @@ u8 rtl8821ae_hw_rate_to_mrate(struct ieee80211_hw *hw, u8 rate)
ret_rate = MGN_VHT2SS_MCS9;
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "HwRateToMRate8812(): Non supported Rate [%x]!!!\n",
- rate);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "HwRateToMRate8812(): Non supported Rate [%x]!!!\n",
+ rate);
break;
}
return ret_rate;
@@ -1187,8 +1186,8 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
tx_rate =
rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>rtl8812ae_dm_txpwr_track_set_pwr\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===>%s\n", __func__);
/*20130429 Mimic Modify High Rate BBSwing Limit.*/
if (tx_rate != 0xFF) {
/*CCK*/
@@ -1259,13 +1258,13 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
else
pwr_tracking_limit = 24;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxRate=0x%x, PwrTrackingLimit=%d\n",
- tx_rate, pwr_tracking_limit);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxRate=0x%x, PwrTrackingLimit=%d\n",
+ tx_rate, pwr_tracking_limit);
if (method == BBSWING) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>rtl8812ae_dm_txpwr_track_set_pwr\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===>%s\n", __func__);
if (rf_path == RF90_PATH_A) {
u32 tmp;
@@ -1276,10 +1275,10 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
pwr_tracking_limit :
rtldm->ofdm_index[RF90_PATH_A];
tmp = final_swing_idx[RF90_PATH_A];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d,pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
- rtldm->ofdm_index[RF90_PATH_A],
- final_swing_idx[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d,pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_A],
+ final_swing_idx[RF90_PATH_A]);
rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000,
txscaling_tbl[tmp]);
@@ -1292,20 +1291,20 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
pwr_tracking_limit :
rtldm->ofdm_index[RF90_PATH_B];
tmp = final_swing_idx[RF90_PATH_B];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_B]=%d, pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_B]=%d\n",
- rtldm->ofdm_index[RF90_PATH_B],
- final_swing_idx[RF90_PATH_B]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_B]=%d, pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_B]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_B],
+ final_swing_idx[RF90_PATH_B]);
rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000,
txscaling_tbl[tmp]);
}
} else if (method == MIX_MODE) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->DefaultOfdmIndex=%d, pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
- rtldm->default_ofdm_index,
- rtldm->absolute_ofdm_swing_idx[rf_path],
- rf_path);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->DefaultOfdmIndex=%d, pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
+ rtldm->default_ofdm_index,
+ rtldm->absolute_ofdm_swing_idx[rf_path],
+ rf_path);
final_ofdm_swing_index = rtldm->default_ofdm_index +
rtldm->absolute_ofdm_swing_idx[rf_path];
@@ -1333,10 +1332,10 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtlphy->current_channel,
RF90_PATH_A);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_A Over BBSwing Limit ,PwrTrackingLimit = %d ,Remnant TxAGC Value = %d\n",
- pwr_tracking_limit,
- rtldm->remnant_ofdm_swing_idx[rf_path]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_A Over BBSwing Limit ,PwrTrackingLimit = %d ,Remnant TxAGC Value = %d\n",
+ pwr_tracking_limit,
+ rtldm->remnant_ofdm_swing_idx[rf_path]);
} else if (final_ofdm_swing_index < 0) {
rtldm->remnant_cck_idx = final_ofdm_swing_index;
/* CCK Follow the same compensate value as Path A*/
@@ -1352,15 +1351,15 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtl8821ae_phy_set_txpower_level_by_path(hw,
rtlphy->current_channel, RF90_PATH_A);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
- rtldm->remnant_ofdm_swing_idx[rf_path]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
+ rtldm->remnant_ofdm_swing_idx[rf_path]);
} else {
rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000,
txscaling_tbl[(u8)final_ofdm_swing_index]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_A Compensate with BBSwing, Final_OFDM_Swing_Index = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_A Compensate with BBSwing, Final_OFDM_Swing_Index = %d\n",
final_ofdm_swing_index);
/*If TxAGC has changed, reset TxAGC again*/
if (rtldm->modify_txagc_flag_path_a) {
@@ -1372,9 +1371,9 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtlphy->current_channel, RF90_PATH_A);
rtldm->modify_txagc_flag_path_a = false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE\n");
}
}
}
@@ -1395,9 +1394,9 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtl8821ae_phy_set_txpower_level_by_path(hw,
rtlphy->current_channel, RF90_PATH_B);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_B Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d\n",
- pwr_tracking_limit,
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_B Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d\n",
+ pwr_tracking_limit,
rtldm->remnant_ofdm_swing_idx[rf_path]);
} else if (final_ofdm_swing_index < 0) {
rtldm->remnant_ofdm_swing_idx[rf_path] =
@@ -1412,15 +1411,15 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtl8821ae_phy_set_txpower_level_by_path(hw,
rtlphy->current_channel, RF90_PATH_B);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_B Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
- rtldm->remnant_ofdm_swing_idx[rf_path]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_B Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
+ rtldm->remnant_ofdm_swing_idx[rf_path]);
} else {
rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000,
txscaling_tbl[(u8)final_ofdm_swing_index]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_B Compensate with BBSwing ,Final_OFDM_Swing_Index = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_B Compensate with BBSwing ,Final_OFDM_Swing_Index = %d\n",
final_ofdm_swing_index);
/*If TxAGC has changed, reset TxAGC again*/
if (rtldm->modify_txagc_flag_path_b) {
@@ -1433,8 +1432,8 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtldm->modify_txagc_flag_path_b =
false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_B pDM_Odm->Modify_TxAGC_Flag = FALSE\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_B pDM_Odm->Modify_TxAGC_Flag = FALSE\n");
}
}
}
@@ -1474,18 +1473,18 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
- rtldm->swing_idx_cck_base,
- rtldm->swing_idx_ofdm_base[RF90_PATH_A],
- rtldm->default_ofdm_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ rtldm->swing_idx_cck_base,
+ rtldm->swing_idx_ofdm_base[RF90_PATH_A],
+ rtldm->default_ofdm_index);
thermal_value = (u8)rtl_get_rfreg(hw, RF90_PATH_A,
/*0x42: RF Reg[15:10] 88E*/
RF_T_METER_8812A, 0xfc00);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
if (!rtldm->txpower_track_control ||
rtlefuse->eeprom_thermalmeter == 0 ||
rtlefuse->eeprom_thermalmeter == 0xFF)
@@ -1494,8 +1493,8 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
/* 3. Initialize ThermalValues of RFCalibrateInfo*/
if (rtlhal->reloadtxpowerindex)
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "reload ofdm index for band switch\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "reload ofdm index for band switch\n");
/*4. Calculate average thermal meter*/
rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value;
@@ -1514,9 +1513,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
if (thermal_value_avg_count) {
thermal_value = (u8)(thermal_value_avg /
thermal_value_avg_count);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
}
/*5. Calculate delta, delta_LCK, delta_IQK.
@@ -1533,17 +1532,17 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
(thermal_value - rtldm->thermalvalue_iqk) :
(rtldm->thermalvalue_iqk - thermal_value);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
- delta, delta_lck, delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
+ delta, delta_lck, delta_iqk);
/* 6. If necessary, do LCK.
* Delta temperature is equal to or larger than 20 centigrade.
*/
if (delta_lck >= IQK_THRESHOLD) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_LCK(%d) >= Threshold_IQK(%d)\n",
- delta_lck, IQK_THRESHOLD);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_LCK(%d) >= Threshold_IQK(%d)\n",
+ delta_lck, IQK_THRESHOLD);
rtldm->thermalvalue_lck = thermal_value;
rtl8821ae_phy_lc_calibrate(hw);
}
@@ -1564,9 +1563,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
/*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/
if (thermal_value > rtlefuse->eeprom_thermalmeter) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_swing_table_idx_tup_a[%d] = %d\n",
- delta, delta_swing_table_idx_tup_a[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_swing_table_idx_tup_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_a[delta]);
rtldm->delta_power_index_last[RF90_PATH_A] =
rtldm->delta_power_index[RF90_PATH_A];
rtldm->delta_power_index[RF90_PATH_A] =
@@ -1576,13 +1575,13 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
delta_swing_table_idx_tup_a[delta];
/*Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_swing_table_idx_tup_b[%d] = %d\n",
- delta, delta_swing_table_idx_tup_b[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_swing_table_idx_tup_b[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_b[delta]);
rtldm->delta_power_index_last[RF90_PATH_B] =
rtldm->delta_power_index[RF90_PATH_B];
rtldm->delta_power_index[RF90_PATH_B] =
@@ -1592,13 +1591,13 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
delta_swing_table_idx_tup_b[delta];
/*Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
- rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+ rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_swing_table_idx_tdown_a[%d] = %d\n",
- delta, delta_swing_table_idx_tdown_a[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_swing_table_idx_tdown_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_a[delta]);
rtldm->delta_power_index_last[RF90_PATH_A] =
rtldm->delta_power_index[RF90_PATH_A];
@@ -1608,13 +1607,13 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->absolute_ofdm_swing_idx[RF90_PATH_A] =
-1 * delta_swing_table_idx_tdown_a[delta];
/* Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
- rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "deltaSwingTableIdx_TDOWN_B[%d] = %d\n",
- delta, delta_swing_table_idx_tdown_b[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "deltaSwingTableIdx_TDOWN_B[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_b[delta]);
rtldm->delta_power_index_last[RF90_PATH_B] =
rtldm->delta_power_index[RF90_PATH_B];
@@ -1625,15 +1624,15 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
-1 * delta_swing_table_idx_tdown_b[delta];
/*Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
- rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+ rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]);
}
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "============================= [Path-%c]Calculating PowerIndexOffset =============================\n",
- (p == RF90_PATH_A ? 'A' : 'B'));
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "============================= [Path-%c]Calculating PowerIndexOffset =============================\n",
+ (p == RF90_PATH_A ? 'A' : 'B'));
if (rtldm->delta_power_index[p] ==
rtldm->delta_power_index_last[p])
@@ -1647,12 +1646,12 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
/* Power Index Diff between 2
* times Power Tracking
*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "[Path-%c] PowerIndexOffset(%d) =DeltaPowerIndex(%d) -DeltaPowerIndexLast(%d)\n",
- (p == RF90_PATH_A ? 'A' : 'B'),
- rtldm->power_index_offset[p],
- rtldm->delta_power_index[p] ,
- rtldm->delta_power_index_last[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "[Path-%c] PowerIndexOffset(%d) =DeltaPowerIndex(%d) -DeltaPowerIndexLast(%d)\n",
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->power_index_offset[p],
+ rtldm->delta_power_index[p],
+ rtldm->delta_power_index_last[p]);
rtldm->ofdm_index[p] =
rtldm->swing_idx_ofdm_base[p] +
@@ -1666,17 +1665,17 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
/****Print BB Swing Base and Index Offset */
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
- rtldm->swing_idx_cck,
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
+ rtldm->swing_idx_cck,
rtldm->swing_idx_cck_base,
rtldm->power_index_offset[p]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
- rtldm->swing_idx_ofdm[p],
- (p == RF90_PATH_A ? 'A' : 'B'),
- rtldm->swing_idx_ofdm_base[p],
- rtldm->power_index_offset[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
+ rtldm->swing_idx_ofdm[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->swing_idx_ofdm_base[p],
+ rtldm->power_index_offset[p]);
/*7.1 Handle boundary conditions of index.*/
@@ -1685,32 +1684,32 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
else if (rtldm->ofdm_index[p] < ofdm_min_index)
rtldm->ofdm_index[p] = ofdm_min_index;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "\n\n====================================================================================\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "\n\n====================================================================================\n");
if (rtldm->cck_index > TXSCALE_TABLE_SIZE - 1)
rtldm->cck_index = TXSCALE_TABLE_SIZE - 1;
else if (rtldm->cck_index < 0)
rtldm->cck_index = 0;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The thermal meter is unchanged or TxPowerTracking OFF(%d): ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
- rtldm->txpower_track_control,
- thermal_value,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The thermal meter is unchanged or TxPowerTracking OFF(%d): ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
+ rtldm->txpower_track_control,
+ thermal_value,
+ rtldm->thermalvalue);
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
rtldm->power_index_offset[p] = 0;
}
/*Print Swing base & current*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPowerTracking: [CCK] Swing Current Index: %d,Swing Base Index: %d\n",
- rtldm->cck_index, rtldm->swing_idx_cck_base);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPowerTracking: [CCK] Swing Current Index: %d,Swing Base Index: %d\n",
+ rtldm->cck_index, rtldm->swing_idx_cck_base);
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPowerTracking: [OFDM] Swing Current Index: %d,Swing Base Index[%c]: %d\n",
- rtldm->ofdm_index[p],
- (p == RF90_PATH_A ? 'A' : 'B'),
- rtldm->swing_idx_ofdm_base[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPowerTracking: [OFDM] Swing Current Index: %d,Swing Base Index[%c]: %d\n",
+ rtldm->ofdm_index[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->swing_idx_ofdm_base[p]);
}
if ((rtldm->power_index_offset[RF90_PATH_A] != 0 ||
@@ -1727,52 +1726,52 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
*tx power in tx agc for 88E.
*/
if (thermal_value > rtldm->thermalvalue) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d,EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_A],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Increasing(B): delta_pi: %d ,delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_B],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d,EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Increasing(B): delta_pi: %d ,delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_B],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
} else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_A],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Decreasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_B],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Decreasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_B],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
}
if (thermal_value > rtlefuse->eeprom_thermalmeter) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature(%d) higher than PG value(%d)\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature(%d) higher than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "**********Enter POWER Tracking MIX_MODE**********\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "**********Enter POWER Tracking MIX_MODE**********\n");
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE,
p, 0);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature(%d) lower than PG value(%d)\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature(%d) lower than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "**********Enter POWER Tracking MIX_MODE**********\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "**********Enter POWER Tracking MIX_MODE**********\n");
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE,
p, index_for_channel);
@@ -1783,9 +1782,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->swing_idx_ofdm_base[p] =
rtldm->swing_idx_ofdm[p];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
- rtldm->thermalvalue, thermal_value);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value);
/*Record last Power Tracking Thermal Value*/
rtldm->thermalvalue = thermal_value;
}
@@ -1794,8 +1793,8 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
if (delta_iqk >= IQK_THRESHOLD)
rtl8812ae_do_iqk(hw, delta_iqk, thermal_value, 8);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "<===%s\n", __func__);
}
static void rtl8821ae_get_delta_swing_table(struct ieee80211_hw *hw,
@@ -1865,7 +1864,7 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
if (rtldm->tx_rate != 0xFF)
tx_rate = rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "===>%s\n", __func__);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "===>%s\n", __func__);
if (tx_rate != 0xFF) { /* Mimic Modify High Rate BBSwing Limit.*/
/*CCK*/
@@ -1908,33 +1907,33 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
else
pwr_tracking_limit = 24;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxRate=0x%x, PwrTrackingLimit=%d\n",
- tx_rate, pwr_tracking_limit);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxRate=0x%x, PwrTrackingLimit=%d\n",
+ tx_rate, pwr_tracking_limit);
if (method == BBSWING) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>%s\n", __func__);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===>%s\n", __func__);
if (rf_path == RF90_PATH_A) {
final_swing_idx[RF90_PATH_A] =
(rtldm->ofdm_index[RF90_PATH_A] >
pwr_tracking_limit) ?
pwr_tracking_limit :
rtldm->ofdm_index[RF90_PATH_A];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d,pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
- rtldm->ofdm_index[RF90_PATH_A],
- final_swing_idx[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d,pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_A],
+ final_swing_idx[RF90_PATH_A]);
rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000,
txscaling_tbl[final_swing_idx[RF90_PATH_A]]);
}
} else if (method == MIX_MODE) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->DefaultOfdmIndex=%d,pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
- rtldm->default_ofdm_index,
- rtldm->absolute_ofdm_swing_idx[rf_path],
- rf_path);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->DefaultOfdmIndex=%d,pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
+ rtldm->default_ofdm_index,
+ rtldm->absolute_ofdm_swing_idx[rf_path],
+ rf_path);
final_ofdm_swing_index =
rtldm->default_ofdm_index +
@@ -1961,10 +1960,10 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtlphy->current_channel,
RF90_PATH_A);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
" ******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d\n",
- pwr_tracking_limit,
- rtldm->remnant_ofdm_swing_idx[rf_path]);
+ pwr_tracking_limit,
+ rtldm->remnant_ofdm_swing_idx[rf_path]);
} else if (final_ofdm_swing_index < 0) {
rtldm->remnant_cck_idx = final_ofdm_swing_index;
/* CCK Follow the same compensate value as Path A*/
@@ -1980,16 +1979,16 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtl8821ae_phy_set_txpower_level_by_path(hw,
rtlphy->current_channel, RF90_PATH_A);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
- rtldm->remnant_ofdm_swing_idx[rf_path]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
+ rtldm->remnant_ofdm_swing_idx[rf_path]);
} else {
rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000,
txscaling_tbl[(u8)final_ofdm_swing_index]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_A Compensate with BBSwing ,Final_OFDM_Swing_Index = %d\n",
- final_ofdm_swing_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_A Compensate with BBSwing ,Final_OFDM_Swing_Index = %d\n",
+ final_ofdm_swing_index);
/*If TxAGC has changed, reset TxAGC again*/
if (rtldm->modify_txagc_flag_path_a) {
rtldm->remnant_cck_idx = 0;
@@ -2001,9 +2000,9 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtldm->modify_txagc_flag_path_a = false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "******Path_A pDM_Odm->Modify_TxAGC_Flag= FALSE\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "******Path_A pDM_Odm->Modify_TxAGC_Flag= FALSE\n");
}
}
}
@@ -2042,12 +2041,12 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>%s,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
- __func__,
- rtldm->swing_idx_cck_base,
- rtldm->swing_idx_ofdm_base[RF90_PATH_A],
- rtldm->default_ofdm_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===>%s,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ __func__,
+ rtldm->swing_idx_cck_base,
+ rtldm->swing_idx_ofdm_base[RF90_PATH_A],
+ rtldm->default_ofdm_index);
/*0x42: RF Reg[15:10] 88E*/
thermal_value = (u8)rtl_get_rfreg(hw,
RF90_PATH_A, RF_T_METER_8812A, 0xfc00);
@@ -2059,8 +2058,8 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
/* 3. Initialize ThermalValues of RFCalibrateInfo*/
if (rtlhal->reloadtxpowerindex) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "reload ofdm index for band switch\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "reload ofdm index for band switch\n");
}
/*4. Calculate average thermal meter*/
@@ -2080,9 +2079,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
if (thermal_value_avg_count) {
thermal_value = (u8)(thermal_value_avg /
thermal_value_avg_count);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
}
/*5. Calculate delta, delta_LCK, delta_IQK.
@@ -2099,16 +2098,16 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
(thermal_value - rtldm->thermalvalue_iqk) :
(rtldm->thermalvalue_iqk - thermal_value);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
- delta, delta_lck, delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
+ delta, delta_lck, delta_iqk);
/* 6. If necessary, do LCK. */
/*Delta temperature is equal to or larger than 20 centigrade.*/
if (delta_lck >= IQK_THRESHOLD) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_LCK(%d) >= Threshold_IQK(%d)\n",
- delta_lck, IQK_THRESHOLD);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_LCK(%d) >= Threshold_IQK(%d)\n",
+ delta_lck, IQK_THRESHOLD);
rtldm->thermalvalue_lck = thermal_value;
rtl8821ae_phy_lc_calibrate(hw);
}
@@ -2129,9 +2128,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
/*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/
if (thermal_value > rtlefuse->eeprom_thermalmeter) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_swing_table_idx_tup_a[%d] = %d\n",
- delta, delta_swing_table_idx_tup_a[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_swing_table_idx_tup_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_a[delta]);
rtldm->delta_power_index_last[RF90_PATH_A] =
rtldm->delta_power_index[RF90_PATH_A];
rtldm->delta_power_index[RF90_PATH_A] =
@@ -2141,13 +2140,13 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
delta_swing_table_idx_tup_a[delta];
/*Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
- rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_swing_table_idx_tdown_a[%d] = %d\n",
- delta, delta_swing_table_idx_tdown_a[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_swing_table_idx_tdown_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_a[delta]);
rtldm->delta_power_index_last[RF90_PATH_A] =
rtldm->delta_power_index[RF90_PATH_A];
@@ -2157,15 +2156,15 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->absolute_ofdm_swing_idx[RF90_PATH_A] =
-1 * delta_swing_table_idx_tdown_a[delta];
/* Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
- rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
}
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "\n\n================================ [Path-%c]Calculating PowerIndexOffset ================================\n",
- (p == RF90_PATH_A ? 'A' : 'B'));
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "\n\n================================ [Path-%c]Calculating PowerIndexOffset ================================\n",
+ (p == RF90_PATH_A ? 'A' : 'B'));
/*If Thermal value changes but lookup table value
* still the same
*/
@@ -2179,9 +2178,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->delta_power_index_last[p];
/*Power Index Diff between 2 times Power Tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
- (p == RF90_PATH_A ? 'A' : 'B'),
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
+ (p == RF90_PATH_A ? 'A' : 'B'),
rtldm->power_index_offset[p],
rtldm->delta_power_index[p] ,
rtldm->delta_power_index_last[p]);
@@ -2198,17 +2197,17 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
/*********Print BB Swing Base and Index Offset********/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
- rtldm->swing_idx_cck,
- rtldm->swing_idx_cck_base,
- rtldm->power_index_offset[p]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
- rtldm->swing_idx_ofdm[p],
- (p == RF90_PATH_A ? 'A' : 'B'),
- rtldm->swing_idx_ofdm_base[p],
- rtldm->power_index_offset[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
+ rtldm->swing_idx_cck,
+ rtldm->swing_idx_cck_base,
+ rtldm->power_index_offset[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
+ rtldm->swing_idx_ofdm[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->swing_idx_ofdm_base[p],
+ rtldm->power_index_offset[p]);
/*7.1 Handle boundary conditions of index.*/
@@ -2217,32 +2216,32 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
else if (rtldm->ofdm_index[p] < ofdm_min_index)
rtldm->ofdm_index[p] = ofdm_min_index;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "\n\n========================================================================================================\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "\n\n========================================================================================================\n");
if (rtldm->cck_index > TXSCALE_TABLE_SIZE - 1)
rtldm->cck_index = TXSCALE_TABLE_SIZE - 1;
else if (rtldm->cck_index < 0)
rtldm->cck_index = 0;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The thermal meter is unchanged or TxPowerTracking OFF(%d):ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
- rtldm->txpower_track_control,
- thermal_value,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The thermal meter is unchanged or TxPowerTracking OFF(%d):ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
+ rtldm->txpower_track_control,
+ thermal_value,
+ rtldm->thermalvalue);
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtldm->power_index_offset[p] = 0;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
- /*Print Swing base & current*/
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
+ /*Print Swing base & current*/
rtldm->cck_index, rtldm->swing_idx_cck_base);
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
- rtldm->ofdm_index[p],
- (p == RF90_PATH_A ? 'A' : 'B'),
- rtldm->swing_idx_ofdm_base[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
+ rtldm->ofdm_index[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->swing_idx_ofdm_base[p]);
}
if ((rtldm->power_index_offset[RF90_PATH_A] != 0 ||
@@ -2259,38 +2258,38 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
* set tx power in tx agc for 88E.
*/
if (thermal_value > rtldm->thermalvalue) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Increasing(A): delta_pi: %d , delta_t: %d,Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_A],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Increasing(A): delta_pi: %d , delta_t: %d,Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
} else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_A],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
}
if (thermal_value > rtlefuse->eeprom_thermalmeter) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature(%d) higher than PG value(%d)\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature(%d) higher than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "****Enter POWER Tracking MIX_MODE****\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "****Enter POWER Tracking MIX_MODE****\n");
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtl8821ae_dm_txpwr_track_set_pwr(hw,
MIX_MODE, p, index_for_channel);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature(%d) lower than PG value(%d)\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature(%d) lower than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "*****Enter POWER Tracking MIX_MODE*****\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "*****Enter POWER Tracking MIX_MODE*****\n");
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtl8812ae_dm_txpwr_track_set_pwr(hw,
MIX_MODE, p, index_for_channel);
@@ -2300,9 +2299,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
- rtldm->thermalvalue, thermal_value);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value);
/*Record last Power Tracking Thermal Value*/
rtldm->thermalvalue = thermal_value;
}
@@ -2323,7 +2322,7 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
}
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===%s\n", __func__);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===%s\n", __func__);
}
void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw)
@@ -2332,13 +2331,13 @@ void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw)
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16),
0x03);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 8821ae Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 8821ae Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking !!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking !!\n");
rtl8821ae_dm_txpower_tracking_callback_thermalmeter(hw);
rtlpriv->dm.tm_trigger = 0;
@@ -2357,14 +2356,14 @@ static void rtl8821ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver is going to unload\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver does not control rate adaptive mask\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver does not control rate adaptive mask\n");
return;
}
@@ -2392,14 +2391,14 @@ static void rtl8821ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI = %ld\n",
- rtlpriv->dm.undec_sm_pwdb);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI_LEVEL = %d\n", p_ra->ratr_state);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "PreState = %d, CurState = %d\n",
- p_ra->pre_ratr_state, p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
+ rtlpriv->dm.undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state);
rcu_read_lock();
sta = rtl_find_sta(hw, mac->bssid);
@@ -2454,22 +2453,22 @@ static void rtl8821ae_dm_edca_choose_traffic_idx(
if (b_bias_on_rx) {
if (cur_tx_bytes > (cur_rx_bytes*4)) {
*pb_is_cur_rdl_state = false;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "Uplink Traffic\n");
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "Uplink Traffic\n");
} else {
*pb_is_cur_rdl_state = true;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "Balance Traffic\n");
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "Balance Traffic\n");
}
} else {
if (cur_rx_bytes > (cur_tx_bytes*4)) {
*pb_is_cur_rdl_state = true;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "Downlink Traffic\n");
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "Downlink Traffic\n");
} else {
*pb_is_cur_rdl_state = false;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "Balance Traffic\n");
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "Balance Traffic\n");
}
}
return;
@@ -2492,11 +2491,11 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
bool b_bias_on_rx = false;
bool b_edca_turbo_on = false;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "rtl8821ae_dm_check_edca_turbo=====>\n");
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "Original BE PARAM: 0x%x\n",
- rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N));
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "%s=====>\n", __func__);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "Original BE PARAM: 0x%x\n",
+ rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N));
if (rtlpriv->dm.dbginfo.num_non_be_pkt > 0x100)
rtlpriv->dm.is_any_nonbepkts = true;
@@ -2528,20 +2527,20 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
}
}
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "bIsAnyNonBEPkts : 0x%x bDisableFrameBursting : 0x%x\n",
- rtlpriv->dm.is_any_nonbepkts,
- rtlpriv->dm.disable_framebursting);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "bIsAnyNonBEPkts : 0x%x bDisableFrameBursting : 0x%x\n",
+ rtlpriv->dm.is_any_nonbepkts,
+ rtlpriv->dm.disable_framebursting);
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "bEdcaTurboOn : 0x%x bBiasOnRx : 0x%x\n",
- b_edca_turbo_on, b_bias_on_rx);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "bEdcaTurboOn : 0x%x bBiasOnRx : 0x%x\n",
+ b_edca_turbo_on, b_bias_on_rx);
if (b_edca_turbo_on) {
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "curTxOkCnt : 0x%llx\n", cur_tx_ok_cnt);
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "curRxOkCnt : 0x%llx\n", cur_rx_ok_cnt);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "curTxOkCnt : 0x%llx\n", cur_tx_ok_cnt);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "curRxOkCnt : 0x%llx\n", cur_rx_ok_cnt);
if (b_bias_on_rx)
rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt,
cur_rx_ok_cnt, true, pb_is_cur_rdl_state);
@@ -2553,14 +2552,14 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, DM_REG_EDCA_BE_11N, edca_be);
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "EDCA Turbo on: EDCA_BE:0x%x\n", edca_be);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "EDCA Turbo on: EDCA_BE:0x%x\n", edca_be);
rtlpriv->dm.current_turbo_edca = true;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "EDCA_BE_DL : 0x%x EDCA_BE_UL : 0x%x EDCA_BE : 0x%x\n",
- edca_be_dl, edca_be_ul, edca_be);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "EDCA_BE_DL : 0x%x EDCA_BE_UL : 0x%x EDCA_BE : 0x%x\n",
+ edca_be_dl, edca_be_ul, edca_be);
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
@@ -2606,8 +2605,8 @@ static void rtl8821ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
dm_digtable->pre_cck_cca_thres = dm_digtable->cur_cck_cca_thres;
dm_digtable->cur_cck_cca_thres = cur_cck_cca_thresh;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "CCK cca thresh hold =%x\n", dm_digtable->cur_cck_cca_thres);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "CCK cca thresh hold =%x\n", dm_digtable->cur_cck_cca_thres);
}
static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
@@ -2626,9 +2625,9 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
rtldm->atc_status = ATC_STATUS_ON;
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "No link!!\n");
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "atc_status = %d\n", rtldm->atc_status);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "No link!!\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "atc_status = %d\n", rtldm->atc_status);
if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap) {
rtldm->crystal_cap = rtlpriv->efuse.crystalcap;
@@ -2643,8 +2642,8 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
0xfff000, (crystal_cap |
(crystal_cap << 6)));
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "crystal_cap = 0x%x\n",
- rtldm->crystal_cap);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "crystal_cap = 0x%x\n",
+ rtldm->crystal_cap);
} else{
/*1. Calculate CFO for path-A & path-B*/
cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280;
@@ -2653,15 +2652,15 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
/*2.No new packet*/
if (packet_count == rtldm->packet_count_pre) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "packet counter doesn't change\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "packet counter doesn't change\n");
return;
}
rtldm->packet_count_pre = packet_count;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "packet counter = %d\n",
- rtldm->packet_count);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "packet counter = %d\n",
+ rtldm->packet_count);
/*3.Average CFO*/
if (rtlpriv->phy.rf_type == RF_1T1R)
@@ -2669,22 +2668,22 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
else
cfo_ave = (cfo_khz_a + cfo_khz_b) >> 1;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cfo_khz_a = %dkHz, cfo_khz_b = %dkHz, cfo_ave = %dkHz\n",
- cfo_khz_a, cfo_khz_b, cfo_ave);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cfo_khz_a = %dkHz, cfo_khz_b = %dkHz, cfo_ave = %dkHz\n",
+ cfo_khz_a, cfo_khz_b, cfo_ave);
/*4.Avoid abnormal large CFO*/
cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave) ?
(rtldm->cfo_ave_pre - cfo_ave) :
(cfo_ave - rtldm->cfo_ave_pre);
- if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "first large CFO hit\n");
- rtldm->large_cfo_hit = 1;
+ if (cfo_ave_diff > 20 && !rtldm->large_cfo_hit) {
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "first large CFO hit\n");
+ rtldm->large_cfo_hit = true;
return;
} else
- rtldm->large_cfo_hit = 0;
+ rtldm->large_cfo_hit = false;
rtldm->cfo_ave_pre = cfo_ave;
@@ -2701,9 +2700,9 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
rtldm->cfo_threshold = CFO_THRESHOLD_XTAL;
}
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Dynamic threshold = %d\n",
- rtldm->cfo_threshold);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Dynamic threshold = %d\n",
+ rtldm->cfo_threshold);
/* 2.Calculate Xtal offset*/
if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f)
@@ -2711,9 +2710,9 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) &&
rtlpriv->dm.crystal_cap > 0)
adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 2) - 1;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Crystal cap = 0x%x, Crystal cap offset = %d\n",
- rtldm->crystal_cap, adjust_xtal);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Crystal cap = 0x%x, Crystal cap offset = %d\n",
+ rtldm->crystal_cap, adjust_xtal);
/*3.Adjudt Crystal Cap.*/
if (adjust_xtal != 0) {
@@ -2735,9 +2734,9 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, REG_MAC_PHY_CTRL,
0xfff000, (crystal_cap |
(crystal_cap << 6)));
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "New crystal cap = 0x%x\n",
- rtldm->crystal_cap);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "New crystal cap = 0x%x\n",
+ rtldm->crystal_cap);
}
}
}
@@ -2781,7 +2780,7 @@ void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw)
spin_unlock(&rtlpriv->locks.rf_ps_lock);
rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_DMESG, "\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_DMESG, "\n");
}
void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index fe32d397d287..1ae56e15ca7f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -39,7 +39,7 @@ static void _rtl8821ae_write_fw(struct ieee80211_hw *hw,
u32 pagenums, remainsize;
u32 page, offset;
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
rtl_fill_dummy(bufferptr, &size);
@@ -75,9 +75,9 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw)
(!(value32 & FWDL_CHKSUM_RPT)));
if (counter >= FW_8821AE_POLLING_TIMEOUT_COUNT) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "chksum report fail! REG_MCUFWDL:0x%08x .\n",
- value32);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "chksum report fail! REG_MCUFWDL:0x%08x .\n",
+ value32);
goto exit;
}
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
@@ -154,15 +154,15 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
fwsize = rtlhal->fwsize;
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "%s Firmware SIZE %d\n",
- buse_wake_on_wlan_fw ? "Wowlan" : "Normal", fwsize);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "%s Firmware SIZE %d\n",
+ buse_wake_on_wlan_fw ? "Wowlan" : "Normal", fwsize);
if (IS_FW_HEADER_EXIST_8812(pfwheader) ||
IS_FW_HEADER_EXIST_8821(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware Version(%d), Signature(%#x)\n",
- pfwheader->version, pfwheader->signature);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Firmware Version(%d), Signature(%#x)\n",
+ pfwheader->version, pfwheader->signature);
pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
@@ -180,11 +180,11 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
err = _rtl8821ae_fw_free_to_go(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "Firmware is not ready to run!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "Firmware is not ready to run!\n");
} else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "Firmware is ready to run!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "Firmware is ready to run!\n");
}
return 0;
@@ -199,13 +199,13 @@ void rtl8821ae_set_fw_related_for_wowlan(struct ieee80211_hw *hw,
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
/* 1. Before WoWLAN or After WOWLAN we need to re-download Fw. */
if (rtl8821ae_download_fw(hw, used_wowlan_fw)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Re-Download Firmware failed!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Re-Download Firmware failed!!\n");
rtlhal->fw_ready = false;
return;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Re-Download Firmware Success !!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Re-Download Firmware Success !!\n");
rtlhal->fw_ready = true;
/* 2. Re-Init the variables about Fw related setting. */
@@ -249,22 +249,22 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
unsigned long flag = 0;
u8 idx = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -300,8 +300,8 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
box_extreg = REG_HMEBOX_EXT_3;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", boxnum);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -324,9 +324,9 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
/*wait until Fw read*/
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
@@ -335,25 +335,25 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
isfw_read =
_rtl8821ae_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
+ boxnum, u1b_tmp);
}
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -389,8 +389,8 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", cmd_len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", cmd_len);
break;
}
@@ -400,16 +400,16 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -458,8 +458,8 @@ void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp | BIT(2)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "_8051Reset8812ae(): 8051 reset success .\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "_8051Reset8812ae(): 8051 reset success .\n");
}
void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
@@ -478,8 +478,8 @@ void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
if (bt_ctrl_lps)
mode = (bt_lps_on ? FW_PS_MIN_MODE : FW_PS_ACTIVE_MODE);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
- mode, bt_ctrl_lps);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
+ mode, bt_ctrl_lps);
switch (mode) {
case FW_PS_MIN_MODE:
@@ -590,7 +590,7 @@ void rtl8821ae_set_fw_wowlan_mode(struct ieee80211_hw *hw, bool func_en)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
u8 fw_wowlan_info[H2C_8821AE_WOWLAN_LENGTH] = {0};
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "enable(%d)\n", func_en);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "enable(%d)\n", func_en);
SET_8812_H2CCMD_WOWLAN_FUNC_ENABLE(fw_wowlan_info,
(func_en ? true : false));
@@ -624,9 +624,9 @@ void rtl8821ae_set_fw_remote_wake_ctrl_cmd(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 remote_wake_ctrl_parm[H2C_8821AE_REMOTE_WAKE_CTRL_LEN] = {0};
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "enable=%d, ARP offload=%d, GTK offload=%d\n",
- enable, ppsc->arp_offload_enable, ppsc->gtk_offload_enable);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "enable=%d, ARP offload=%d, GTK offload=%d\n",
+ enable, ppsc->arp_offload_enable, ppsc->gtk_offload_enable);
SET_8812_H2CCMD_REMOTE_WAKECTRL_ENABLE(remote_wake_ctrl_parm, enable);
SET_8812_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(remote_wake_ctrl_parm,
@@ -651,7 +651,7 @@ void rtl8821ae_set_fw_keep_alive_cmd(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 keep_alive_info[H2C_8821AE_KEEP_ALIVE_CTRL_LENGTH] = {0};
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Enable(%d)\n", func_en);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Enable(%d)\n", func_en);
SET_8812_H2CCMD_KEEP_ALIVE_ENABLE(keep_alive_info, func_en);
/* 1: the period is controled by driver, 0: by Fw default */
@@ -690,9 +690,9 @@ void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw)
struct rtl_security *sec = &rtlpriv->sec;
u8 remote_wakeup_sec_info[H2C_8821AE_AOAC_GLOBAL_INFO_LEN] = {0};
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PairwiseEncAlgorithm=%d, GroupEncAlgorithm=%d\n",
- sec->pairwise_enc_algorithm, sec->group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PairwiseEncAlgorithm=%d, GroupEncAlgorithm=%d\n",
+ sec->pairwise_enc_algorithm, sec->group_enc_algorithm);
SET_8812_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(
remote_wakeup_sec_info,
@@ -1646,8 +1646,8 @@ out:
}
if (!b_dlok)
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
@@ -1771,8 +1771,8 @@ out:
b_dlok = true;
if (!b_dl_finished && b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"H2C_RSVDPAGE:\n", u1rsvdpageloc, 5);
rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE,
@@ -1788,8 +1788,8 @@ out:
}
if (!b_dlok) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
}
@@ -1815,11 +1815,11 @@ void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -1873,11 +1873,11 @@ void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 198d419ebb9c..33ffc24d3675 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -33,11 +33,10 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(
- hw,
- (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
@@ -143,9 +142,9 @@ change_done:
if (content & IMR_CPWM) {
rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_8821AE;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Receive CPWM INT!!! Set rtlhal->FwPSState = %X\n",
- rtlhal->fw_ps_state);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Receive CPWM INT!!! Set rtlhal->FwPSState = %X\n",
+ rtlhal->fw_ps_state);
}
}
@@ -330,8 +329,8 @@ static void _rtl8821ae_download_rsvd_page(struct ieee80211_hw *hw,
} while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5);
if (!(bcnvalid_reg & BIT(0)))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Download RSVD page failed!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Download RSVD page failed!\n");
if (bcnvalid_reg & BIT(0) && rtlhal->enter_pnp_sleep) {
rtl_write_byte(rtlpriv, REG_TDECTRL + 2, bcnvalid_reg | BIT(0));
_rtl8821ae_return_beacon_queue_skb(hw);
@@ -365,8 +364,8 @@ static void _rtl8821ae_download_rsvd_page(struct ieee80211_hw *hw,
} while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5);
if (!(bcnvalid_reg & BIT(0)))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "2 Download RSVD page failed!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "2 Download RSVD page failed!\n");
}
}
@@ -458,8 +457,8 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((bool *)(val)) = false;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -511,8 +510,8 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -558,9 +557,9 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -572,9 +571,9 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *((u8 *)val);
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -632,9 +631,9 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -649,16 +648,16 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- e_aci);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break; }
case HW_VAR_RCR:
@@ -761,9 +760,9 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u32 us_nav_upper = *(u32 *)val;
if (us_nav_upper > HAL_92C_NAV_UPPER_UNIT * 0xFF) {
- RT_TRACE(rtlpriv, COMP_INIT , DBG_WARNING,
- "The setting value (0x%08X us) of NAV_UPPER is larger than (%d * 0xFF)!!!\n",
- us_nav_upper, HAL_92C_NAV_UPPER_UNIT);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING,
+ "The setting value (0x%08X us) of NAV_UPPER is larger than (%d * 0xFF)!!!\n",
+ us_nav_upper, HAL_92C_NAV_UPPER_UNIT);
break;
}
rtl_write_byte(rtlpriv, REG_NAV_UPPER,
@@ -779,8 +778,8 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
array);
break; }
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -910,16 +909,16 @@ static bool _rtl8821ae_init_mac(struct ieee80211_hw *hw)
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
RTL8812_NIC_ENABLE_FLOW)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "init 8812 MAC Fail as power on failure\n");
- return false;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "init 8812 MAC Fail as power on failure\n");
+ return false;
}
} else {
/* HW Power on sequence */
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_A_MSK,
PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
RTL8821A_NIC_ENABLE_FLOW)){
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"init 8821 MAC Fail as power on failure\n");
return false;
}
@@ -1161,14 +1160,14 @@ void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw)
u8 sec_reg_value;
u8 tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -1184,8 +1183,8 @@ void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw)
tmp = rtl_read_byte(rtlpriv, REG_CR + 1);
rtl_write_byte(rtlpriv, REG_CR + 1, tmp | BIT(1));
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -1207,10 +1206,10 @@ static void rtl8821ae_macid_initialize_mediastatus(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_MEDIASTATUSRPT, media_rpt);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Initialize MacId media status: from %d to %d\n",
- MAC_ID_STATIC_FOR_BROADCAST_MULTICAST,
- MAC_ID_STATIC_FOR_BT_CLIENT_END);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Initialize MacId media status: from %d to %d\n",
+ MAC_ID_STATIC_FOR_BROADCAST_MULTICAST,
+ MAC_ID_STATIC_FOR_BT_CLIENT_END);
}
static bool _rtl8821ae_check_pcie_dma_hang(struct ieee80211_hw *hw)
@@ -1229,8 +1228,8 @@ static bool _rtl8821ae_check_pcie_dma_hang(struct ieee80211_hw *hw)
/* read reg 0x350 Bit[24] if 1 : TX hang */
tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL + 3);
if ((tmp & BIT(0)) || (tmp & BIT(1))) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "CheckPcieDMAHang8821AE(): true! Reset PCIE DMA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CheckPcieDMAHang8821AE(): true! Reset PCIE DMA!\n");
return true;
} else {
return false;
@@ -1247,7 +1246,7 @@ static bool _rtl8821ae_reset_pcie_interface_dma(struct ieee80211_hw *hw,
bool release_mac_rx_pause;
u8 backup_pcie_dma_pause;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
/* 1. Disable register write lock. 0x1c[1] = 0 */
tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL);
@@ -1346,8 +1345,8 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
fw_reason = rtl_read_byte(rtlpriv, REG_MCUTST_WOWLAN);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "WOL Read 0x1c7 = %02X\n",
- fw_reason);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "WOL Read 0x1c7 = %02X\n",
+ fw_reason);
ppsc->wakeup_reason = 0;
@@ -1356,63 +1355,63 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
switch (fw_reason) {
case FW_WOW_V2_PTK_UPDATE_EVENT:
ppsc->wakeup_reason = WOL_REASON_PTK_UPDATE;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a WOL PTK Key update event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a WOL PTK Key update event!\n");
break;
case FW_WOW_V2_GTK_UPDATE_EVENT:
ppsc->wakeup_reason = WOL_REASON_GTK_UPDATE;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a WOL GTK Key update event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a WOL GTK Key update event!\n");
break;
case FW_WOW_V2_DISASSOC_EVENT:
ppsc->wakeup_reason = WOL_REASON_DISASSOC;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a disassociation event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a disassociation event!\n");
break;
case FW_WOW_V2_DEAUTH_EVENT:
ppsc->wakeup_reason = WOL_REASON_DEAUTH;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a deauth event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a deauth event!\n");
break;
case FW_WOW_V2_FW_DISCONNECT_EVENT:
ppsc->wakeup_reason = WOL_REASON_AP_LOST;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a Fw disconnect decision (AP lost) event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a Fw disconnect decision (AP lost) event!\n");
break;
case FW_WOW_V2_MAGIC_PKT_EVENT:
ppsc->wakeup_reason = WOL_REASON_MAGIC_PKT;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a magic packet event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a magic packet event!\n");
break;
case FW_WOW_V2_UNICAST_PKT_EVENT:
ppsc->wakeup_reason = WOL_REASON_UNICAST_PKT;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's an unicast packet event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's an unicast packet event!\n");
break;
case FW_WOW_V2_PATTERN_PKT_EVENT:
ppsc->wakeup_reason = WOL_REASON_PATTERN_PKT;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a pattern match event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a pattern match event!\n");
break;
case FW_WOW_V2_RTD3_SSID_MATCH_EVENT:
ppsc->wakeup_reason = WOL_REASON_RTD3_SSID_MATCH;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's an RTD3 Ssid match event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's an RTD3 Ssid match event!\n");
break;
case FW_WOW_V2_REALWOW_V2_WAKEUPPKT:
ppsc->wakeup_reason = WOL_REASON_REALWOW_V2_WAKEUPPKT;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's an RealWoW wake packet event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's an RealWoW wake packet event!\n");
break;
case FW_WOW_V2_REALWOW_V2_ACKLOST:
ppsc->wakeup_reason = WOL_REASON_REALWOW_V2_ACKLOST;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's an RealWoW ack lost event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's an RealWoW ack lost event!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "WOL Read 0x1c7 = %02X, Unknown reason!\n",
- fw_reason);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "WOL Read 0x1c7 = %02X, Unknown reason!\n",
+ fw_reason);
break;
}
}
@@ -1484,9 +1483,9 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
(u8 *)(&support_remote_wakeup));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "boundary=%#X, NPQ_RQPNValue=%#X, RQPNValue=%#X\n",
- boundary, npq_rqpn_value, rqpn_val);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "boundary=%#X, NPQ_RQPNValue=%#X, RQPNValue=%#X\n",
+ boundary, npq_rqpn_value, rqpn_val);
/* stop PCIe DMA
* 1. 0x301[7:0] = 0xFE */
@@ -1500,12 +1499,12 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
tmp16 = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
count++;
if ((count % 200) == 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Tx queue is not empty for 20ms!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Tx queue is not empty for 20ms!\n");
}
if (count >= 1000) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Wait for Tx FIFO empty timeout!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Wait for Tx FIFO empty timeout!\n");
break;
}
}
@@ -1521,8 +1520,8 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
udelay(100);
count++;
if (count >= 500) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Wait for TX State Machine ready timeout !!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Wait for TX State Machine ready timeout !!\n");
break;
}
}
@@ -1540,9 +1539,9 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
count++;
} while (!(tmp & BIT(1)) && count < 100);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Wait until Rx DMA Idle. count=%d REG[0x286]=0x%x\n",
- count, tmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Wait until Rx DMA Idle. count=%d REG[0x286]=0x%x\n",
+ count, tmp);
/* reset BB
* 7. 0x02 [0] = 0 */
@@ -1599,8 +1598,8 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
/* init LLT
* 17. init LLT */
if (!_rtl8821ae_init_llt_table(hw, boundary)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING,
- "Failed to init LLT table!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING,
+ "Failed to init LLT table!\n");
return false;
}
@@ -1620,7 +1619,7 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, (tmp&~BIT(2)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "End.\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "End.\n");
return ret;
}
@@ -1655,12 +1654,12 @@ static void _rtl8821ae_enable_l1off(struct ieee80211_hw *hw)
u8 tmp = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "--->\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "--->\n");
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x160);
if (!(tmp & (BIT(2) | BIT(3)))) {
- RT_TRACE(rtlpriv, COMP_POWER | COMP_INIT, DBG_LOUD,
- "0x160(%#x)return!!\n", tmp);
+ rtl_dbg(rtlpriv, COMP_POWER | COMP_INIT, DBG_LOUD,
+ "0x160(%#x)return!!\n", tmp);
return;
}
@@ -1670,7 +1669,7 @@ static void _rtl8821ae_enable_l1off(struct ieee80211_hw *hw)
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x718);
_rtl8821ae_dbi_write(rtlpriv, 0x718, tmp | BIT(5));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<---\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<---\n");
}
static void _rtl8821ae_enable_ltr(struct ieee80211_hw *hw)
@@ -1678,13 +1677,13 @@ static void _rtl8821ae_enable_ltr(struct ieee80211_hw *hw)
u8 tmp = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "--->\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "--->\n");
/* Check 0x98[10] */
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x99);
if (!(tmp & BIT(2))) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "<---0x99(%#x) return!!\n", tmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "<---0x99(%#x) return!!\n", tmp);
return;
}
@@ -1701,7 +1700,7 @@ static void _rtl8821ae_enable_ltr(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x7a4, (tmp & (~BIT(0))));
rtl_write_byte(rtlpriv, 0x7a4, (tmp | BIT(0)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<---\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<---\n");
}
static bool _rtl8821ae_wowlan_initialize_adapter(struct ieee80211_hw *hw)
@@ -1724,14 +1723,14 @@ static bool _rtl8821ae_wowlan_initialize_adapter(struct ieee80211_hw *hw)
/* Release Pcie Interface Rx DMA to allow wake packet DMA. */
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xFE);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Enable PCIE Rx DMA.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Enable PCIE Rx DMA.\n");
/* Check wake up event.
* We should check wake packet bit before disable wowlan by H2C or
* Fw will clear the bit. */
tmp = rtl_read_byte(rtlpriv, REG_FTISR + 3);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Read REG_FTISR 0x13f = %#X\n", tmp);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Read REG_FTISR 0x13f = %#X\n", tmp);
/* Set the WoWLAN related function control disable. */
rtl8821ae_set_fw_wowlan_mode(hw, false);
@@ -1796,7 +1795,7 @@ static void _rtl8821ae_poweroff_adapter(struct ieee80211_hw *hw)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
/* Combo (PCIe + USB) Card and PCIe-MF Card */
/* 1. Run LPS WL RFOFF flow */
- /* RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ /* rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"=====>CardDisableRTL8812E,RTL8821A_NIC_LPS_ENTER_FLOW\n");
*/
rtl_hal_pwrseqcmdparsing(rtlpriv,
@@ -1862,8 +1861,8 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
tmp_u1b = rtl_read_byte(rtlpriv, REG_CR);
if (tmp_u1b != 0 && tmp_u1b != 0xEA) {
rtlhal->mac_func_enable = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MAC has already power on.\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MAC has already power on.\n");
} else {
rtlhal->mac_func_enable = false;
rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE;
@@ -1895,7 +1894,7 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
}
rtstatus = _rtl8821ae_init_mac(hw);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("Init MAC failed\n");
err = 1;
return err;
@@ -1907,8 +1906,8 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
err = rtl8821ae_download_fw(hw, false);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now\n");
err = 1;
rtlhal->fw_ready = false;
return err;
@@ -1987,7 +1986,7 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
rtl8821ae_dm_init(hw);
rtl8821ae_macid_initialize_mediastatus(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "rtl8821ae_hw_init() <====\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "%s() <====\n", __func__);
return err;
}
@@ -2000,16 +1999,16 @@ static enum version_8821ae _rtl8821ae_read_chip_version(struct ieee80211_hw *hw)
u32 value32;
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "ReadChipVersion8812A 0xF0 = 0x%x\n", value32);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "ReadChipVersion8812A 0xF0 = 0x%x\n", value32);
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
rtlphy->rf_type = RF_2T2R;
else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
rtlphy->rf_type = RF_1T1R;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "RF_Type is %x!!\n", rtlphy->rf_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "RF_Type is %x!!\n", rtlphy->rf_type);
if (value32 & TRP_VAUX_EN) {
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
@@ -2049,44 +2048,44 @@ static enum version_8821ae _rtl8821ae_read_chip_version(struct ieee80211_hw *hw)
switch (version) {
case VERSION_TEST_CHIP_1T1R_8812:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_TEST_CHIP_1T1R_8812\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_TEST_CHIP_1T1R_8812\n");
break;
case VERSION_TEST_CHIP_2T2R_8812:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_TEST_CHIP_2T2R_8812\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_TEST_CHIP_2T2R_8812\n");
break;
case VERSION_NORMAL_TSMC_CHIP_1T1R_8812:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID:VERSION_NORMAL_TSMC_CHIP_1T1R_8812\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID:VERSION_NORMAL_TSMC_CHIP_1T1R_8812\n");
break;
case VERSION_NORMAL_TSMC_CHIP_2T2R_8812:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812\n");
break;
case VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812 C CUT\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812 C CUT\n");
break;
case VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812 C CUT\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812 C CUT\n");
break;
case VERSION_TEST_CHIP_8821:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_TEST_CHIP_8821\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_TEST_CHIP_8821\n");
break;
case VERSION_NORMAL_TSMC_CHIP_8821:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 A CUT\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 A CUT\n");
break;
case VERSION_NORMAL_TSMC_CHIP_8821_B_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 B CUT\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 B CUT\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: Unknown (0x%X)\n", version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: Unknown (0x%X)\n", version);
break;
}
@@ -2102,7 +2101,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
bt_msr &= 0xfc;
rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_LOUD,
"clear 0x550 when set HW_VAR_MEDIA_STATUS\n");
if (type == NL80211_IFTYPE_UNSPECIFIED ||
@@ -2114,33 +2113,33 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
_rtl8821ae_resume_tx_beacon(hw);
_rtl8821ae_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ type);
}
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
bt_msr |= MSR_NOLINK;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= MSR_AP;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not support!\n", type);
@@ -2183,7 +2182,7 @@ int rtl8821ae_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "rtl8821ae_set_network_type!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "%s!\n", __func__);
if (_rtl8821ae_set_media_status(hw, type))
return -EOPNOTSUPP;
@@ -2283,16 +2282,16 @@ static void _rtl8821ae_clear_pci_pme_status(struct ieee80211_hw *hw)
* offset 0x34 from the Function Header */
pci_read_config_byte(rtlpci->pdev, 0x34, &cap_pointer);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PCI configuration 0x34 = 0x%2x\n", cap_pointer);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PCI configuration 0x34 = 0x%2x\n", cap_pointer);
do {
pci_read_config_word(rtlpci->pdev, cap_pointer, &cap_hdr);
cap_id = cap_hdr & 0xFF;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "in pci configuration, cap_pointer%x = %x\n",
- cap_pointer, cap_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "in pci configuration, cap_pointer%x = %x\n",
+ cap_pointer, cap_id);
if (cap_id == 0x01) {
break;
@@ -2322,17 +2321,17 @@ static void _rtl8821ae_clear_pci_pme_status(struct ieee80211_hw *hw)
/* Read it back to check */
pci_read_config_byte(rtlpci->pdev, cap_pointer + 5,
&pmcs_reg);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Clear PME status 0x%2x to 0x%2x\n",
- cap_pointer + 5, pmcs_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Clear PME status 0x%2x to 0x%2x\n",
+ cap_pointer + 5, pmcs_reg);
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PME status(0x%2x) = 0x%2x\n",
- cap_pointer + 5, pmcs_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PME status(0x%2x) = 0x%2x\n",
+ cap_pointer + 5, pmcs_reg);
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING,
- "Cannot find PME Capability\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING,
+ "Cannot find PME Capability\n");
}
}
@@ -2354,13 +2353,13 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
if (!(support_remote_wakeup && mac->opmode == NL80211_IFTYPE_STATION)
|| !rtlhal->enter_pnp_sleep) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Normal Power off\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Normal Power off\n");
mac->link_state = MAC80211_NOLINK;
opmode = NL80211_IFTYPE_UNSPECIFIED;
_rtl8821ae_set_media_status(hw, opmode);
_rtl8821ae_poweroff_adapter(hw);
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Wowlan Supported.\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Wowlan Supported.\n");
/* 3 <1> Prepare for configuring wowlan related infomations */
/* Clear Fw WoWLAN event. */
rtl_write_byte(rtlpriv, REG_MCUTST_WOWLAN, 0x0);
@@ -2410,9 +2409,9 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
udelay(10);
tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Wait Rx DMA Finished before host sleep. count=%d\n",
- count);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Wait Rx DMA Finished before host sleep. count=%d\n",
+ count);
/* reset trx ring */
rtlpriv->intf_ops->reset_trx_ring(hw);
@@ -2438,7 +2437,7 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
/* Stop Pcie Interface Tx DMA. */
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xff);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Stop PCIE Tx DMA.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Stop PCIE Tx DMA.\n");
/* Wait for TxDMA idle. */
count = 0;
@@ -2447,9 +2446,9 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
udelay(10);
count++;
} while ((tmp != 0) && (count < 100));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Wait Tx DMA Finished before host sleep. count=%d\n",
- count);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Wait Tx DMA Finished before host sleep. count=%d\n",
+ count);
if (rtlhal->hw_rof_enable) {
printk("hw_rof_enable\n");
@@ -2501,8 +2500,8 @@ void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl8821ae_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl8821ae_enable_interrupt(hw);
@@ -2514,8 +2513,8 @@ void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -2586,15 +2585,15 @@ static void _rtl8821ae_read_power_value_fromprom(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 rfpath, eeaddr = EEPROM_TX_PWR_INX, group, txcount = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "hal_ReadPowerValueFromPROM8821ae(): hwinfo[0x%x]=0x%x\n",
- (eeaddr + 1), hwinfo[eeaddr + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "hal_ReadPowerValueFromPROM8821ae(): hwinfo[0x%x]=0x%x\n",
+ (eeaddr + 1), hwinfo[eeaddr + 1]);
if (hwinfo[eeaddr + 1] == 0xFF) /*YJ,add,120316*/
autoload_fail = true;
if (autoload_fail) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "auto load fail : Use Default value!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "auto load fail : Use Default value!\n");
for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) {
/*2.4G default value*/
for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
@@ -3048,8 +3047,8 @@ static void _rtl8821ae_read_rfe_type(struct ieee80211_hw *hw, u8 *hwinfo,
rtlhal->rfe_type = 0x04;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "RFE Type: 0x%2x\n", rtlhal->rfe_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "RFE Type: 0x%2x\n", rtlhal->rfe_type);
}
static void _rtl8812ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
@@ -3153,8 +3152,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
rtlefuse->board_type |= ODM_BOARD_BT;
rtlhal->board_type = rtlefuse->board_type;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "board_type = 0x%x\n", rtlefuse->board_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "board_type = 0x%x\n", rtlefuse->board_type);
rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
if (rtlefuse->eeprom_channelplan == 0xff)
@@ -3176,8 +3175,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
}
rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
if (!rtlefuse->autoload_failflag) {
rtlefuse->antenna_div_cfg =
@@ -3197,7 +3196,7 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
rtlefuse->antenna_div_type = 0;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n",
rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type);
@@ -3246,8 +3245,8 @@ exit:
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}*/
void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw)
@@ -3264,20 +3263,20 @@ void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw)
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl8821ae_read_adapter_info(hw, false);
} else {
@@ -3378,8 +3377,8 @@ static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
}
static u32 _rtl8821ae_rate_to_bitmap_2ssvht(__le16 vht_rate)
@@ -3524,8 +3523,8 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
wirelessmode = sta_entry->wireless_mode;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_LOUD,
- "wireless mode = 0x%x\n", wirelessmode);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_LOUD,
+ "wireless mode = 0x%x\n", wirelessmode);
if (mac->opmode == NL80211_IFTYPE_STATION ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
curtxbw_40mhz = mac->bw_40;
@@ -3675,8 +3674,8 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
ratr_bitmap = _rtl8821ae_set_ra_vht_ratr_bitmap(hw, wirelessmode,
ratr_bitmap);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_LOUD,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_LOUD,
+ "ratr_bitmap :%x\n", ratr_bitmap);
/* *(u32 *)& rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
(ratr_index << 28)); */
@@ -3692,10 +3691,10 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1],
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
rate_mask[2], rate_mask[3],
rate_mask[4], rate_mask[5],
rate_mask[6]);
@@ -3710,7 +3709,7 @@ void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
if (rtlpriv->dm.useramask)
rtl8821ae_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
else
- /*RT_TRACE(rtlpriv, COMP_RATR,DBG_LOUD,
+ /*rtl_dbg(rtlpriv, COMP_RATR,DBG_LOUD,
"rtl8821ae_update_hal_rate_tbl() Error! 8821ae FW RA Only\n");*/
rtl8821ae_update_hal_rate_table(hw, sta);
}
@@ -3782,16 +3781,16 @@ bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
b_actuallyset = true;
} else if ((!ppsc->hwradiooff)
&& (e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -3841,7 +3840,7 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -3868,8 +3867,8 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", enc_algo);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -3898,26 +3897,26 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
@@ -3982,7 +3981,7 @@ void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw,
if (write_into_reg)
rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
"receive_config=0x%08X, write_into_reg=%d\n",
rtlpci->receive_config, write_into_reg);
}
@@ -4035,9 +4034,9 @@ void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw,
cam |= BIT(26);
rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_L, cam);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "WRITE entry[%d] 0x%x: %x\n", addr,
- REG_PKTBUF_DBG_DATA_L, cam);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "WRITE entry[%d] 0x%x: %x\n", addr,
+ REG_PKTBUF_DBG_DATA_L, cam);
/* Write to Rx packet buffer. */
rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0x0f01);
@@ -4045,18 +4044,18 @@ void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw,
cam = rtl_pattern->mask[addr - 2];
rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_L, cam);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "WRITE entry[%d] 0x%x: %x\n", addr,
- REG_PKTBUF_DBG_DATA_L, cam);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "WRITE entry[%d] 0x%x: %x\n", addr,
+ REG_PKTBUF_DBG_DATA_L, cam);
rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0x0f01);
} else if (addr == 3 || addr == 5) {/* WKFM[127:0] */
cam = rtl_pattern->mask[addr - 2];
rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_H, cam);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "WRITE entry[%d] 0x%x: %x\n", addr,
- REG_PKTBUF_DBG_DATA_H, cam);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "WRITE entry[%d] 0x%x: %x\n", addr,
+ REG_PKTBUF_DBG_DATA_H, cam);
rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0xf001);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
index dd7553e80ab1..7d6fb134c10f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
@@ -20,8 +20,8 @@ void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -37,8 +37,8 @@ void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -64,9 +64,9 @@ void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "In SwLedOn, LedAddr:%X LEDPIN=%d\n",
- ledreg, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "In SwLedOn, LedAddr:%X LEDPIN=%d\n",
+ ledreg, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, ledreg);
ledcfg |= BIT(5); /*Set 0x4c[21]*/
@@ -81,8 +81,8 @@ void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -109,8 +109,8 @@ void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3));
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
@@ -135,9 +135,9 @@ void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "In SwLedOff,LedAddr:%X LEDPIN=%d\n",
- ledreg, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "In SwLedOff,LedAddr:%X LEDPIN=%d\n",
+ ledreg, pled->ledpin);
/*Open-drain arrangement for controlling the LED*/
if (rtlpriv->ledctl.led_opendrain) {
u8 ledcfg = rtl_read_byte(rtlpriv, ledreg);
@@ -207,7 +207,7 @@ void rtl8821ae_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n",
- ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n",
+ ledaction);
_rtl8821ae_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index b8a2b2326902..f41a7643b9c4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -27,7 +27,12 @@ static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
-static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask);
+static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
+{
+ u32 i = ffs(bitmask);
+
+ return i ? i - 1 : 32;
+}
static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw);
/*static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);*/
static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
@@ -96,16 +101,16 @@ u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x)\n",
- regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
}
@@ -115,9 +120,9 @@ void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -128,9 +133,9 @@ void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
@@ -140,9 +145,9 @@ u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -152,9 +157,9 @@ u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -166,9 +171,9 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -183,8 +188,8 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
}
@@ -267,20 +272,9 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
data_and_addr = ((newoffset << 20) |
(data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
-}
-
-static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw)
@@ -370,7 +364,7 @@ static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
break;
}
- /* fall through */
+ fallthrough;
case 0:
case 2:
default:
@@ -450,10 +444,10 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
u32 out = 0x200;
const s8 auto_temp = -1;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "===> PHY_GetTXBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d,autoload_failflag=%d.\n",
- (int)swing_2g, (int)swing_5g,
- (int)rtlefuse->autoload_failflag);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "===> PHY_GetTXBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d,autoload_failflag=%d.\n",
+ (int)swing_2g, (int)swing_5g,
+ (int)rtlefuse->autoload_failflag);
if (rtlefuse->autoload_failflag) {
if (band == BAND_ON_2_4G) {
@@ -531,9 +525,9 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
swing_a = (swing & 0x3) >> 0; /* 0xC6/C7[1:0] */
swing_b = (swing & 0xC) >> 2; /* 0xC6/C7[3:2] */
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "===> PHY_GetTXBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n",
- swing_a, swing_b);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "===> PHY_GetTXBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n",
+ swing_a, swing_b);
/* 3 Path-A */
if (swing_a == 0x0) {
@@ -589,8 +583,8 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
}
}
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "<=== PHY_GetTXBBSwing_8812A, out = 0x%X\n", out);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "<=== PHY_GetTXBBSwing_8812A, out = 0x%X\n", out);
return out;
}
@@ -652,23 +646,23 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
count = 0;
reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Reg41A value %d\n", reg_41a);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Reg41A value %d\n", reg_41a);
reg_41a &= 0x30;
while ((reg_41a != 0x30) && (count < 50)) {
udelay(50);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "Delay 50us\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "Delay 50us\n");
reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
reg_41a &= 0x30;
count++;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Reg41A value %d\n", reg_41a);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Reg41A value %d\n", reg_41a);
}
if (count != 0)
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "PHY_SwitchWirelessBand8812(): Switch to 5G Band. Count = %d reg41A=0x%x\n",
- count, reg_41a);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "PHY_SwitchWirelessBand8812(): Switch to 5G Band. Count = %d reg41A=0x%x\n",
+ count, reg_41a);
/* 2012/02/01, Sinda add registry to switch workaround
without long-run verification for scan issue. */
@@ -693,9 +687,9 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
rtl_set_bbreg(hw, RTXPATH, 0xf0, 0);
rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "==>PHY_SwitchWirelessBand8812() BAND_ON_5G settings OFDM index 0x%x\n",
- rtlpriv->dm.ofdm_index[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "==>PHY_SwitchWirelessBand8812() BAND_ON_5G settings OFDM index 0x%x\n",
+ rtlpriv->dm.ofdm_index[RF90_PATH_A]);
}
if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) ||
@@ -722,8 +716,8 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
rtl8821ae_dm_clear_txpower_tracking_state(hw);
}
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "<==rtl8821ae_phy_switch_wirelessband():Switch Band OK.\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "<==%s():Switch Band OK.\n", __func__);
return;
}
@@ -756,18 +750,18 @@ static bool _rtl8821ae_check_positive(struct ieee80211_hw *hw,
rtlhal->type_alna << 16 |
rtlhal->type_apa << 24;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
- cond1, cond2);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
- driver1, driver2);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
+ cond1, cond2);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
+ driver1, driver2);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- " (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- " (Board, Package) = (0x%X, 0x%X)\n",
- rtlhal->board_type, rtlhal->package_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ " (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ " (Board, Package) = (0x%X, 0x%X)\n",
+ rtlhal->board_type, rtlhal->package_type);
/*============== Value Defined Check ===============*/
/*QFN Type [15:12] and Cut Version [27:24] need to do value check*/
@@ -918,7 +912,7 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n", path);
return;
}
@@ -944,9 +938,9 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
rtlphy->txpwr_by_rate_base_24g[path][txnum][5] = value;
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in Band 2.4G,Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G,Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
break;
}
} else if (band == BAND_ON_5G) {
@@ -967,13 +961,13 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
rtlphy->txpwr_by_rate_base_5g[path][txnum][4] = value;
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d in PHY_SetTxPowerByRateBase()\n", band);
}
}
@@ -987,9 +981,9 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
u8 value = 0;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
- path);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
+ path);
return 0;
}
@@ -1014,9 +1008,9 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
value = rtlphy->txpwr_by_rate_base_24g[path][txnum][5];
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
break;
}
} else if (band == BAND_ON_5G) {
@@ -1037,14 +1031,14 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
value = rtlphy->txpwr_by_rate_base_5g[path][txnum][4];
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band);
}
return value;
@@ -1144,7 +1138,7 @@ static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee8
[bw][rate_section][channel][RF90_PATH_A];
if (temp_pwrlmt == MAX_POWER_INDEX) {
if (bw == 0 || bw == 1) { /*5G 20M 40M VHT and HT can cross reference*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"No power limit table of the specified band %d, bandwidth %d, ratesection %d, channel %d, rf path %d\n",
1, bw, rate_section, channel, RF90_PATH_A);
if (rate_section == 2) {
@@ -1161,7 +1155,9 @@ static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee8
rtlphy->txpwr_limit_5g[regulation][bw][3][channel][RF90_PATH_A];
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "use other value %d\n", temp_pwrlmt);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "use other value %d\n",
+ temp_pwrlmt);
}
}
}
@@ -1218,7 +1214,7 @@ static u8 _rtl8812ae_phy_get_txpower_by_rate_base_index(struct ieee80211_hw *hw,
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Wrong rate 0x%x to obtain index in 2.4G in PHY_GetTxPowerByRateBaseIndex()\n",
rate);
break;
@@ -1285,7 +1281,7 @@ static u8 _rtl8812ae_phy_get_txpower_by_rate_base_index(struct ieee80211_hw *hw,
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Wrong rate 0x%x to obtain index in 5G in PHY_GetTxPowerByRateBaseIndex()\n",
rate);
break;
@@ -1306,7 +1302,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
s8 temp_value = 0, temp_pwrlmt = 0;
u8 rf_path = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"=====> _rtl8812ae_phy_convert_txpower_limit_to_power_index()\n");
_rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(hw);
@@ -1355,7 +1351,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
temp_value;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"TxPwrLimit_2_4G[regulation %d][bw %d][rateSection %d][channel %d] = %d\n(TxPwrLimit in dBm %d - BW40PwrLmt2_4G[channel %d][rfpath %d] %d)\n",
regulation, bw, rate_section, channel,
rtlphy->txpwr_limit_2_4g[regulation][bw]
@@ -1420,7 +1416,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
[rf_path] = temp_value;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"TxPwrLimit_5G[regulation %d][bw %d][rateSection %d][channel %d] =%d\n(TxPwrLimit in dBm %d - BW40PwrLmt5G[chnl group %d][rfpath %d] %d)\n",
regulation, bw, rate_section,
channel, rtlphy->txpwr_limit_5g[regulation]
@@ -1431,8 +1427,8 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
}
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "<===== _rtl8812ae_phy_convert_txpower_limit_to_power_index()\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "<===== %s()\n", __func__);
}
static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw)
@@ -1441,8 +1437,8 @@ static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 i, j, k, l, m;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "=====> _rtl8821ae_phy_init_txpower_limit()!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "=====>`%s()!\n", __func__);
for (i = 0; i < MAX_REGULATION_NUM; ++i) {
for (j = 0; j < MAX_2_4G_BANDWIDTH_NUM; ++j)
@@ -1463,8 +1459,8 @@ static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw)
= MAX_POWER_INDEX;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "<===== _rtl8821ae_phy_init_txpower_limit()!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "<===== %s()!\n", __func__);
}
static void _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(struct ieee80211_hw *hw)
@@ -1574,7 +1570,7 @@ static void _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(struct ieee8021
0, 3, base);
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
"<===_rtl8821ae_phy_convert_txpower_dbm_to_relative_value()\n");
}
@@ -1630,13 +1626,13 @@ static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
channel_index = i;
}
} else
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Band %d in %s\n",
- band, __func__);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Band %d in %s\n",
+ band, __func__);
if (channel_index == -1)
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid Channel %d of Band %d in %s\n", channel,
- band, __func__);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid Channel %d of Band %d in %s\n", channel,
+ band, __func__);
return channel_index;
}
@@ -1655,9 +1651,9 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul
if (!_rtl8812ae_get_integer_from_string((char *)pchannel, &channel) ||
!_rtl8812ae_get_integer_from_string((char *)ppower_limit,
&power_limit)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Illegal index of pwr_lmt table [chnl %d][val %d]\n",
- channel, power_limit);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Illegal index of pwr_lmt table [chnl %d][val %d]\n",
+ channel, power_limit);
}
power_limit = power_limit > MAX_POWER_INDEX ?
@@ -1717,10 +1713,10 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul
[rate_section][channel_index][RF90_PATH_A] =
power_limit;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "2.4G [regula %d][bw %d][sec %d][chnl %d][val %d]\n",
- regulation, bandwidth, rate_section, channel_index,
- rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "2.4G [regula %d][bw %d][sec %d][chnl %d][val %d]\n",
+ regulation, bandwidth, rate_section, channel_index,
+ rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
[rate_section][channel_index][RF90_PATH_A]);
} else if (_rtl8812ae_eq_n_byte(pband, (u8 *)("5G"), 2)) {
ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
@@ -1740,14 +1736,14 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul
rtlphy->txpwr_limit_5g[regulation][bandwidth]
[rate_section][channel_index][RF90_PATH_A] = power_limit;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "5G: [regul %d][bw %d][sec %d][chnl %d][val %d]\n",
- regulation, bandwidth, rate_section, channel,
- rtlphy->txpwr_limit_5g[regulation][bandwidth]
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "5G: [regul %d][bw %d][sec %d][chnl %d][val %d]\n",
+ regulation, bandwidth, rate_section, channel,
+ rtlphy->txpwr_limit_5g[regulation][bandwidth]
[rate_section][channel_index][RF90_PATH_A]);
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Cannot recognize the band info in %s\n", pband);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Cannot recognize the band info in %s\n", pband);
return;
}
}
@@ -1779,8 +1775,7 @@ static void _rtl8821ae_phy_read_and_config_txpwr_lmt(struct ieee80211_hw *hw)
array = RTL8821AE_TXPWR_LMT;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
for (i = 0; i < array_len; i += 7) {
u8 *regulation = array[i];
@@ -1812,7 +1807,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("Write BB Reg Fail!!\n");
return false;
}
@@ -1821,7 +1816,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8821ae_phy_config_bb_with_pgheaderfile(hw,
BASEBAND_CONFIG_PHY_REG);
}
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("BB_PG Reg Fail!!\n");
return false;
}
@@ -1835,7 +1830,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("AGC Table Fail\n");
return false;
}
@@ -1903,7 +1898,7 @@ static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read MAC_REG_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read MAC_REG_Array\n");
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
arraylength = RTL8821AE_MAC_1T_ARRAYLEN;
ptrarray = RTL8821AE_MAC_REG_ARRAY;
@@ -1911,8 +1906,8 @@ static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
arraylength = RTL8812AE_MAC_1T_ARRAYLEN;
ptrarray = RTL8812AE_MAC_REG_ARRAY;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Img: MAC_REG_ARRAY LEN %d\n", arraylength);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Img: MAC_REG_ARRAY LEN %d\n", arraylength);
return __rtl8821ae_phy_config_with_headerfile(hw,
ptrarray, arraylength, rtl_write_byte_with_val32);
@@ -1978,22 +1973,22 @@ static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw,
u8 rate_section = _rtl8821ae_get_rate_section_index(regaddr);
if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band);
band = BAND_ON_2_4G;
}
if (rfpath >= MAX_RF_PATH) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath);
rfpath = MAX_RF_PATH - 1;
}
if (txnum >= MAX_RF_PATH) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum);
txnum = MAX_RF_PATH - 1;
}
rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n",
- band, rfpath, txnum, rate_section,
- rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n",
+ band, rfpath, txnum, rate_section,
+ rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section]);
}
static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
@@ -2015,8 +2010,8 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
}
if (configtype != BASEBAND_CONFIG_PHY_REG) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
return true;
}
for (i = 0; i < arraylen; i += 6) {
@@ -2082,9 +2077,9 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radioa_array_table_a = RTL8812AE_RADIOA_ARRAY;
radioa_arraylen_b = RTL8812AE_RADIOB_1TARRAYLEN;
radioa_array_table_b = RTL8812AE_RADIOB_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen_a);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen_a);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
return __rtl8821ae_phy_config_with_headerfile(hw,
@@ -2113,9 +2108,9 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radioa_arraylen = RTL8821AE_RADIOA_1TARRAYLEN;
radioa_array_table = RTL8821AE_RADIOA_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
return __rtl8821ae_phy_config_with_headerfile(hw,
@@ -2146,21 +2141,21 @@ void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR3, MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR2, MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
static void phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
@@ -2427,14 +2422,14 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
rate_section = 5;
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
"Wrong rate 0x%x\n", rate);
break;
}
if (band_temp == BAND_ON_5G && rate_section == 0)
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Wrong rate 0x%x: No CCK in 5G Band\n", rate);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Wrong rate 0x%x: No CCK in 5G Band\n", rate);
/*workaround for wrong index combination to obtain tx power limit,
OFDM only exists in BW 20M*/
@@ -2459,10 +2454,10 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
if (band_temp == -1 || regulation == -1 || bandwidth_temp == -1 ||
rate_section == -1 || channel_temp == -1) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Wrong index value to access power limit table [band %d][regulation %d][bandwidth %d][rf_path %d][rate_section %d][chnl %d]\n",
- band_temp, regulation, bandwidth_temp, rf_path,
- rate_section, channel_temp);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Wrong index value to access power limit table [band %d][regulation %d][bandwidth %d][rf_path %d][rate_section %d][chnl %d]\n",
+ band_temp, regulation, bandwidth_temp, rf_path,
+ rate_section, channel_temp);
return MAX_POWER_INDEX;
}
@@ -2496,8 +2491,8 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
rtlphy->txpwr_limit_5g[regu][chnl]
[sec][chnl][rf_path];
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "No power limit table of the specified band\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "No power limit table of the specified band\n");
}
return power_limit;
}
@@ -2605,7 +2600,7 @@ static s8 _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw,
else
tx_pwr_diff = tx_pwr_diff > limit ? limit : tx_pwr_diff;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Maximum power by rate %d, final power by rate %d\n",
limit, tx_pwr_diff);
}
@@ -2628,7 +2623,7 @@ static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path,
(channel > 14 || channel < 1)) ||
((rtlhal->current_bandtype == BAND_ON_5G) && (channel <= 14))) {
index = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Illegal channel!!\n");
}
@@ -2639,7 +2634,7 @@ static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path,
else if (DESC_RATE6M <= rate)
txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
else
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "invalid rate\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "invalid rate\n");
if (DESC_RATE6M <= rate && rate <= DESC_RATE54M &&
!RTL8821AE_RX_HAL_IS_CCK_RATE(rate))
@@ -2673,8 +2668,8 @@ static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path,
if (DESC_RATE6M <= rate)
txpower = rtlefuse->txpwr_5g_bw40base[path][index];
else
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_WARNING,
- "INVALID Rate.\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_WARNING,
+ "INVALID Rate.\n");
if (DESC_RATE6M <= rate && rate <= DESC_RATE54M &&
!RTL8821AE_RX_HAL_IS_CCK_RATE(rate))
@@ -2940,7 +2935,7 @@ static void _rtl8821ae_phy_set_txpower_index(struct ieee80211_hw *hw,
MASKBYTE3, power_index);
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
"Invalid Rate!!\n");
break;
}
@@ -3139,13 +3134,13 @@ static void _rtl8821ae_phy_set_txpower_index(struct ieee80211_hw *hw,
MASKBYTE3, power_index);
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid Rate!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid Rate!!\n");
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid RFPath!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid RFPath!!\n");
}
}
@@ -3352,7 +3347,7 @@ static void _rtl8821ae_phy_set_reg_bw(struct rtl_priv *rtlpriv, u8 bw)
rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFF7F);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "unknown Bandwidth: 0x%x\n", bw);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "unknown Bandwidth: 0x%x\n", bw);
break;
}
}
@@ -3403,11 +3398,11 @@ void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 sub_chnl = 0;
u8 l1pk_val = 0;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "Switch to %s bandwidth\n",
- (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" :
- (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40 ?
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" :
+ (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40 ?
"40MHz" : "80MHz")));
_rtl8821ae_phy_set_reg_bw(rtlpriv, rtlphy->current_chan_bw);
@@ -3477,7 +3472,7 @@ void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
rtl8821ae_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
}
void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw,
@@ -3494,8 +3489,8 @@ void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
rtl8821ae_phy_set_bw_mode_callback(hw);
else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "FALSE driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "FALSE driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -3510,8 +3505,8 @@ void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
u8 path;
u32 data;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
@@ -3553,7 +3548,7 @@ void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
}
}
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw)
@@ -3570,8 +3565,8 @@ u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw)
return 0;
if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
return 0;
}
while (rtlphy->lck_inprogress && timecount < timeout) {
@@ -3588,16 +3583,16 @@ u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw)
if (channel == 0)
channel = 1;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d, band type is %d\n",
- rtlphy->current_channel, rtlhal->current_bandtype);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d, band type is %d\n",
+ rtlphy->current_channel, rtlhal->current_bandtype);
rtl8821ae_phy_sw_chnl_callback(hw);
rtl8821ae_dm_clear_txpower_tracking_state(hw);
rtl8821ae_phy_set_txpower_level(hw, rtlphy->current_channel);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
rtlphy->sw_chnl_inprogress = false;
return 1;
}
@@ -3638,7 +3633,7 @@ static void _rtl8821ae_iqk_backup_macbb(struct ieee80211_hw *hw,
for (i = 0; i < mac_bb_num; i++)
macbb_backup[i] = rtl_read_dword(rtlpriv, backup_macbb_reg[i]);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "BackupMacBB Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "BackupMacBB Success!!!!\n");
}
static void _rtl8821ae_iqk_backup_afe(struct ieee80211_hw *hw, u32 *afe_backup,
@@ -3651,7 +3646,7 @@ static void _rtl8821ae_iqk_backup_afe(struct ieee80211_hw *hw, u32 *afe_backup,
/*Save AFE Parameters */
for (i = 0; i < afe_num; i++)
afe_backup[i] = rtl_read_dword(rtlpriv, backup_afe_REG[i]);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "BackupAFE Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "BackupAFE Success!!!!\n");
}
static void _rtl8821ae_iqk_backup_rf(struct ieee80211_hw *hw, u32 *rfa_backup,
@@ -3669,7 +3664,7 @@ static void _rtl8821ae_iqk_backup_rf(struct ieee80211_hw *hw, u32 *rfa_backup,
rfb_backup[i] = rtl_get_rfreg(hw, RF90_PATH_B, backup_rf_reg[i],
BMASKDWORD);
}
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "BackupRF Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "BackupRF Success!!!!\n");
}
static void _rtl8821ae_iqk_configure_mac(
@@ -3698,13 +3693,13 @@ static void _rtl8821ae_iqk_tx_fill_iqc(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
rtl_set_bbreg(hw, 0xccc, 0x000007ff, tx_y);
rtl_set_bbreg(hw, 0xcd4, 0x000007ff, tx_x);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "TX_X = %x;;TX_Y = %x =====> fill to IQC\n",
- tx_x, tx_y);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "0xcd4 = %x;;0xccc = %x ====>fill to IQC\n",
- rtl_get_bbreg(hw, 0xcd4, 0x000007ff),
- rtl_get_bbreg(hw, 0xccc, 0x000007ff));
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "TX_X = %x;;TX_Y = %x =====> fill to IQC\n",
+ tx_x, tx_y);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "0xcd4 = %x;;0xccc = %x ====>fill to IQC\n",
+ rtl_get_bbreg(hw, 0xcd4, 0x000007ff),
+ rtl_get_bbreg(hw, 0xccc, 0x000007ff));
break;
default:
break;
@@ -3720,12 +3715,12 @@ static void _rtl8821ae_iqk_rx_fill_iqc(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */
rtl_set_bbreg(hw, 0xc10, 0x000003ff, rx_x>>1);
rtl_set_bbreg(hw, 0xc10, 0x03ff0000, rx_y>>1);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "rx_x = %x;;rx_y = %x ====>fill to IQC\n",
- rx_x>>1, rx_y>>1);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "0xc10 = %x ====>fill to IQC\n",
- rtl_read_dword(rtlpriv, 0xc10));
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "rx_x = %x;;rx_y = %x ====>fill to IQC\n",
+ rx_x >> 1, rx_y >> 1);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "0xc10 = %x ====>fill to IQC\n",
+ rtl_read_dword(rtlpriv, 0xc10));
break;
default:
break;
@@ -3750,9 +3745,9 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
int i, k, vdf_y[3], vdf_x[3],
ii, dx = 0, dy = 0, tx_finish = 0, rx_finish = 0;
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "BandWidth = %d.\n",
- rtlphy->current_chan_bw);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "BandWidth = %d.\n",
+ rtlphy->current_chan_bw);
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
vdf_enable = true;
@@ -3856,7 +3851,7 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
rtl_write_dword(rtlpriv, 0xc8c, 0x00163e96);
if (vdf_enable == 1) {
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "VDF_enable\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "VDF_enable\n");
for (k = 0; k <= 2; k++) {
switch (k) {
case 0:
@@ -3870,9 +3865,9 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0);
break;
case 2:
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
"vdf_y[1] = %x;;;vdf_y[0] = %x\n", vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
"vdf_x[1] = %x;;;vdf_x[0] = %x\n", vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff);
tx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20);
tx_dt[cal] = ((16*tx_dt[cal])*10000/15708);
@@ -3992,7 +3987,7 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
if (vdf_enable == 1) {
rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0); /* TX VDF Disable */
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "RXVDF Start\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "RXVDF Start\n");
for (k = 0; k <= 2; k++) {
/* ====== RX mode TXK (RXK Step 1) ====== */
rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */
@@ -4029,14 +4024,17 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
break;
case 2:
{
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "VDF_Y[1] = %x;;;VDF_Y[0] = %x\n",
- vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "VDF_X[1] = %x;;;VDF_X[0] = %x\n",
- vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "VDF_Y[1] = %x;;;VDF_Y[0] = %x\n",
+ vdf_y[1] >> 21 & 0x00007ff,
+ vdf_y[0] >> 21 & 0x00007ff);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "VDF_X[1] = %x;;;VDF_X[0] = %x\n",
+ vdf_x[1] >> 21 & 0x00007ff,
+ vdf_x[0] >> 21 & 0x00007ff);
rx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "Rx_dt = %d\n", rx_dt[cal]);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "Rx_dt = %d\n",
+ rx_dt[cal]);
rx_dt[cal] = ((16*rx_dt[cal])*10000/13823);
rx_dt[cal] = (rx_dt[cal] >> 1)+(rx_dt[cal] & BIT(0));
rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);/* TX_TONE_idx[9:0], TxK_Mask[29] TX_Tone = 16 */
@@ -4098,10 +4096,10 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
tx_x0_rxk[cal] = tx_x0[cal];
tx_y0_rxk[cal] = tx_y0[cal];
tx0iqkok = true;
- RT_TRACE(rtlpriv,
- COMP_IQK,
- DBG_LOUD,
- "RXK Step 1 fail\n");
+ rtl_dbg(rtlpriv,
+ COMP_IQK,
+ DBG_LOUD,
+ "RXK Step 1 fail\n");
}
/* ====== RX IQK ====== */
@@ -4257,8 +4255,8 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
tx_x0_rxk[cal] = tx_x0[cal];
tx_y0_rxk[cal] = tx_y0[cal];
tx0iqkok = true;
- RT_TRACE(rtlpriv, COMP_IQK,
- DBG_LOUD, "1");
+ rtl_dbg(rtlpriv, COMP_IQK,
+ DBG_LOUD, "1");
}
/* ====== RX IQK ====== */
@@ -4352,20 +4350,20 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
/* FillIQK Result */
switch (path) {
case RF90_PATH_A:
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "========Path_A =======\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "========Path_A =======\n");
if (tx_average == 0)
break;
for (i = 0; i < tx_average; i++) {
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "TX_X0_RXK[%d] = %x ;; TX_Y0_RXK[%d] = %x\n", i,
- (tx_x0_rxk[i])>>21&0x000007ff, i,
- (tx_y0_rxk[i])>>21&0x000007ff);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n", i,
- (tx_x0[i])>>21&0x000007ff, i,
- (tx_y0[i])>>21&0x000007ff);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "TX_X0_RXK[%d] = %x ;; TX_Y0_RXK[%d] = %x\n", i,
+ (tx_x0_rxk[i]) >> 21 & 0x000007ff, i,
+ (tx_y0_rxk[i]) >> 21 & 0x000007ff);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n", i,
+ (tx_x0[i]) >> 21 & 0x000007ff, i,
+ (tx_y0[i]) >> 21 & 0x000007ff);
}
for (i = 0; i < tx_average; i++) {
for (ii = i+1; ii < tx_average; ii++) {
@@ -4393,7 +4391,7 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
break;
for (i = 0; i < rx_average; i++)
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
"RX_X0[%d] = %x ;; RX_Y0[%d] = %x\n", i,
(rx_x0[i])>>21&0x000007ff, i,
(rx_y0[i])>>21&0x000007ff);
@@ -4439,8 +4437,8 @@ static void _rtl8821ae_iqk_restore_rf(struct ieee80211_hw *hw,
switch (path) {
case RF90_PATH_A:
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "RestoreRF Path A Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "RestoreRF Path A Success!!!!\n");
break;
default:
break;
@@ -4468,7 +4466,7 @@ static void _rtl8821ae_iqk_restore_afe(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
rtl_write_dword(rtlpriv, 0xcb8, 0x0);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "RestoreAFE Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "RestoreAFE Success!!!!\n");
}
static void _rtl8821ae_iqk_restore_macbb(struct ieee80211_hw *hw,
@@ -4483,7 +4481,7 @@ static void _rtl8821ae_iqk_restore_macbb(struct ieee80211_hw *hw,
/* Reload MacBB Parameters */
for (i = 0; i < macbb_num; i++)
rtl_write_dword(rtlpriv, backup_macbb_reg[i], macbb_backup[i]);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "RestoreMacBB Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "RestoreMacBB Success!!!!\n");
}
#undef MACBB_REG_NUM
@@ -4531,7 +4529,7 @@ static void _rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool main)
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); */
/* struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
if (main)
rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x1);
@@ -4579,11 +4577,11 @@ void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 i;
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n",
- (int)(sizeof(rtlphy->iqk_matrix) /
- sizeof(struct iqk_matrix_regs)),
- IQK_MATRIX_SETTINGS_NUM);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n",
+ (int)(sizeof(rtlphy->iqk_matrix) /
+ sizeof(struct iqk_matrix_regs)),
+ IQK_MATRIX_SETTINGS_NUM);
for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
rtlphy->iqk_matrix[i].value[0][0] = 0x100;
@@ -4630,20 +4628,20 @@ bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &rtlpriv->phy;
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
postprocessing = true;
break;
default:
@@ -4659,7 +4657,7 @@ bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl8821ae_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
@@ -4669,9 +4667,9 @@ static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw)
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
struct rtl_phy *rtlphy = &rtlpriv->phy;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC)
@@ -4696,8 +4694,8 @@ static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw)
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw)
@@ -4731,18 +4729,17 @@ static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl8821ae_phy_set_rf_on(hw);
}
@@ -4763,27 +4760,27 @@ static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
index a6e56872e063..e339f2383e6d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
@@ -428,13 +428,13 @@ static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
/*put arrays in dm.c*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index cd809c992245..9d6f8dcbf2d6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -391,7 +391,7 @@ static bool rtl8821ae_get_rxdesc_is_ht(struct ieee80211_hw *hw, __le32 *pdesc)
rx_rate = get_rx_desc_rxmcs(pdesc);
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
if ((rx_rate >= DESC_RATEMCS0) && (rx_rate <= DESC_RATEMCS15))
return true;
@@ -405,7 +405,7 @@ static bool rtl8821ae_get_rxdesc_is_vht(struct ieee80211_hw *hw, __le32 *pdesc)
rx_rate = get_rx_desc_rxmcs(pdesc);
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
if (rx_rate >= DESC_RATEVHT1SS_MCS0)
return true;
@@ -461,12 +461,12 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
status->vht_nss = rtl8821ae_get_rx_vht_nss(hw, pdesc);
status->is_cck = RTL8821AE_RX_HAL_IS_CCK_RATE(status->rate);
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
- "rx_packet_bw=%s,is_ht %d, is_vht %d, vht_nss=%d,is_short_gi %d.\n",
- (status->rx_packet_bw == 2) ? "80M" :
- (status->rx_packet_bw == 1) ? "40M" : "20M",
- status->is_ht, status->is_vht, status->vht_nss,
- status->is_short_gi);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "rx_packet_bw=%s,is_ht %d, is_vht %d, vht_nss=%d,is_short_gi %d.\n",
+ (status->rx_packet_bw == 2) ? "80M" :
+ (status->rx_packet_bw == 1) ? "40M" : "20M",
+ status->is_ht, status->is_vht, status->vht_nss,
+ status->is_short_gi);
if (get_rx_status_desc_rpt_sel(pdesc))
status->packet_report_type = C2H_PACKET;
@@ -483,9 +483,9 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
wake_match = 0;
if (wake_match)
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
- "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- wake_match);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
@@ -558,9 +558,10 @@ static u8 rtl8821ae_bw_mapping(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 bw_setting_of_desc = 0;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "rtl8821ae_bw_mapping, current_chan_bw %d, packet_bw %d\n",
- rtlphy->current_chan_bw, ptcb_desc->packet_bw);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "%s, current_chan_bw %d, packet_bw %d\n",
+ __func__,
+ rtlphy->current_chan_bw, ptcb_desc->packet_bw);
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80)
@@ -602,8 +603,9 @@ static u8 rtl8821ae_sc_mapping(struct ieee80211_hw *hw,
sc_setting_of_desc =
VHT_DATA_SC_40_UPPER_OF_80MHZ;
else
- RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
- "rtl8821ae_sc_mapping: Not Correct Primary40MHz Setting\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_LOUD,
+ "%s: Not Correct Primary40MHz Setting\n",
+ __func__);
} else {
if ((mac->cur_40_prime_sc ==
HAL_PRIME_CHNL_OFFSET_LOWER) &&
@@ -630,8 +632,9 @@ static u8 rtl8821ae_sc_mapping(struct ieee80211_hw *hw,
sc_setting_of_desc =
VHT_DATA_SC_20_UPPERST_OF_80MHZ;
else
- RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
- "rtl8821ae_sc_mapping: Not Correct Primary40MHz Setting\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_LOUD,
+ "%s: Not Correct Primary40MHz Setting\n",
+ __func__);
}
} else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) {
@@ -690,11 +693,11 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
memset(skb->data, 0, EM_HDR_LEN);
}
buf_len = skb->len;
- mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_8821ae));
@@ -708,9 +711,9 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
rtl8821ae_insert_emcontent(ptcb_desc,
(__le32 *)skb->data);
}
@@ -789,8 +792,8 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function.\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function.\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -822,7 +825,7 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
}
rtl8821ae_dm_set_tx_ant_by_tx_info(hw, pdesc8, ptcb_desc->mac_id);
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -834,13 +837,12 @@ void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index d05e709536ea..06e073defad6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -259,15 +259,15 @@ static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
? USB_HIGH_SPEED_BULK_SIZE
: USB_FULL_SPEED_BULK_SIZE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB Max Bulk-out Size=%d\n",
- rtlusb->max_bulk_out_size);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "USB Max Bulk-out Size=%d\n",
+ rtlusb->max_bulk_out_size);
for (i = 0; i < __RTL_TXQ_NUM; i++) {
u32 ep_num = rtlusb->ep_map.ep_mapping[i];
if (!ep_num) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Invalid endpoint map setting!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Invalid endpoint map setting!\n");
return -EINVAL;
}
}
@@ -289,7 +289,7 @@ static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
return 0;
}
-static void _rtl_rx_work(unsigned long param);
+static void _rtl_rx_work(struct tasklet_struct *t);
static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
{
@@ -310,8 +310,8 @@ static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
init_usb_anchor(&rtlusb->rx_cleanup_urbs);
skb_queue_head_init(&rtlusb->rx_queue);
- rtlusb->rx_work_tasklet.func = _rtl_rx_work;
- rtlusb->rx_work_tasklet.data = (unsigned long)rtlusb;
+ rtlusb->rx_work_tasklet.func = (void(*))_rtl_rx_work;
+ rtlusb->rx_work_tasklet.data = (unsigned long)&rtlusb->rx_work_tasklet;
return 0;
}
@@ -337,10 +337,10 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
else if (usb_endpoint_dir_out(pep_desc))
rtlusb->out_ep_nums++;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
- pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
- pep_desc->bInterval);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
+ pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
+ pep_desc->bInterval);
}
if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) {
pr_err("Too few input end points found\n");
@@ -528,9 +528,9 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
#define __RX_SKB_MAX_QUEUED 64
-static void _rtl_rx_work(unsigned long param)
+static void _rtl_rx_work(struct tasklet_struct *t)
{
- struct rtl_usb *rtlusb = (struct rtl_usb *)param;
+ struct rtl_usb *rtlusb = from_tasklet(rtlusb, t, rx_work_tasklet);
struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
struct sk_buff *skb;
@@ -933,7 +933,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (ieee80211_is_auth(fc)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
}
if (rtlpriv->psc.sw_ps_enabled) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 13421cf2d201..fdccfd29fd61 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1966,7 +1966,6 @@ struct rtl_efuse {
u8 txpwr_safetyflag; /* Band edge enable flag */
u16 eeprom_txpowerdiff;
- u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
u8 antenna_txpwdiff[3];
u8 eeprom_regulatory;
@@ -2936,9 +2935,6 @@ enum bt_radio_shared {
#define RT_SET_PS_LEVEL(ppsc, _ps_flg) \
(ppsc->cur_ps_level |= _ps_flg)
-#define container_of_dwork_rtl(x, y, z) \
- container_of(to_delayed_work(x), y, z)
-
#define FILL_OCTET_STRING(_os, _octet, _len) \
(_os).octet = (u8 *)(_octet); \
(_os).length = (_len);
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index f769c982cc91..3852c4f0ac0b 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -229,7 +229,8 @@ static int rtw_debugfs_get_rsvd_page(struct seq_file *m, void *v)
if (!buf)
return -ENOMEM;
- ret = rtw_dump_drv_rsvd_page(rtwdev, offset, buf_size, (u32 *)buf);
+ ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RSVD_PAGE, offset,
+ buf_size, (u32 *)buf);
if (ret) {
rtw_err(rtwdev, "failed to dump rsvd page\n");
vfree(buf);
@@ -427,12 +428,11 @@ static int rtw_debug_get_mac_page(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- u32 val;
u32 page = debugfs_priv->cb_data;
int i, n;
int max = 0xff;
- val = rtw_read32(rtwdev, debugfs_priv->cb_data);
+ rtw_read32(rtwdev, debugfs_priv->cb_data);
for (n = 0; n <= max; ) {
seq_printf(m, "\n%8.8x ", n + page);
for (i = 0; i < 4 && n <= max; i++, n += 4)
@@ -447,12 +447,11 @@ static int rtw_debug_get_bb_page(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- u32 val;
u32 page = debugfs_priv->cb_data;
int i, n;
int max = 0xff;
- val = rtw_read32(rtwdev, debugfs_priv->cb_data);
+ rtw_read32(rtwdev, debugfs_priv->cb_data);
for (n = 0; n <= max; ) {
seq_printf(m, "\n%8.8x ", n + page);
for (i = 0; i < 4 && n <= max; i++, n += 4)
@@ -545,6 +544,28 @@ static void rtw_print_rate(struct seq_file *m, u8 rate)
}
}
+#define case_REGD(src) \
+ case RTW_REGD_##src: return #src
+
+static const char *rtw_get_regd_string(u8 regd)
+{
+ switch (regd) {
+ case_REGD(FCC);
+ case_REGD(MKK);
+ case_REGD(ETSI);
+ case_REGD(IC);
+ case_REGD(KCC);
+ case_REGD(ACMA);
+ case_REGD(CHILE);
+ case_REGD(UKRAINE);
+ case_REGD(MEXICO);
+ case_REGD(CN);
+ case_REGD(WW);
+ default:
+ return "Unknown";
+ }
+}
+
static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
@@ -556,6 +577,7 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
u8 ch = hal->current_channel;
u8 regd = rtwdev->regd.txpwr_regd;
+ seq_printf(m, "regulatory: %s\n", rtw_get_regd_string(regd));
seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s) %-4s\n",
"path", "rate", "pwr", "", "base", "", "byr", "lmt", "rem");
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 63b00bc19000..042015bc8055 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -193,6 +193,15 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
}
EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
+void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev)
+{
+ if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER)
+ rtw_fw_recovery(rtwdev);
+ else
+ rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n");
+}
+EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr);
+
static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
u8 *h2c)
{
@@ -1404,29 +1413,16 @@ free:
return ret;
}
-int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
- u32 offset, u32 size, u32 *buf)
+static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
+ u32 *buf, u32 residue, u16 start_pg)
{
- struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- u32 residue, i;
- u16 start_pg;
+ u32 i;
u16 idx = 0;
u16 ctl;
u8 rcr;
- if (size & 0x3) {
- rtw_warn(rtwdev, "should be 4-byte aligned\n");
- return -EINVAL;
- }
-
- offset += fifo->rsvd_boundary << TX_PAGE_SIZE_SHIFT;
- residue = offset & (FIFO_PAGE_SIZE - 1);
- start_pg = offset >> FIFO_PAGE_SIZE_SHIFT;
- start_pg += RSVD_PAGE_START_ADDR;
-
rcr = rtw_read8(rtwdev, REG_RCR + 2);
ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
-
/* disable rx clock gate */
rtw_write8(rtwdev, REG_RCR, rcr | BIT(3));
@@ -1448,6 +1444,64 @@ int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
out:
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
rtw_write8(rtwdev, REG_RCR + 2, rcr);
+}
+
+static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
+ u32 offset, u32 size, u32 *buf)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u32 start_pg, residue;
+
+ if (sel >= RTW_FW_FIFO_MAX) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n");
+ return;
+ }
+ if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE)
+ offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT;
+ residue = offset & (FIFO_PAGE_SIZE - 1);
+ start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel];
+
+ rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg);
+}
+
+static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev,
+ enum rtw_fw_fifo_sel sel,
+ u32 start_addr, u32 size)
+{
+ switch (sel) {
+ case RTW_FW_FIFO_SEL_TX:
+ case RTW_FW_FIFO_SEL_RX:
+ if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel])
+ return false;
+ /*fall through*/
+ default:
+ return true;
+ }
+}
+
+int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
+ u32 *buffer)
+{
+ if (!rtwdev->chip->fw_fifo_addr) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n");
+ return -ENOTSUPP;
+ }
+
+ if (size == 0 || !buffer)
+ return -EINVAL;
+
+ if (size & 0x3) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n");
+ return -EINVAL;
+ }
+
+ if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n");
+ return -EINVAL;
+ }
+
+ rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer);
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 686dcd3bbda6..08644540d259 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -16,7 +16,6 @@
#define FIFO_PAGE_SIZE_SHIFT 12
#define FIFO_PAGE_SIZE 4096
-#define RSVD_PAGE_START_ADDR 0x780
#define FIFO_DUMP_ADDR 0x8000
#define DLFW_PAGE_SIZE_SHIFT_LEGACY 12
@@ -508,6 +507,20 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_NLO_LOC_NLO_INFO(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define GET_FW_DUMP_LEN(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(15, 0))
+#define GET_FW_DUMP_SEQ(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(22, 16))
+#define GET_FW_DUMP_MORE(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x00), BIT(23))
+#define GET_FW_DUMP_VERSION(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(31, 24))
+#define GET_FW_DUMP_TLV_TYPE(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x01), GENMASK(15, 0))
+#define GET_FW_DUMP_TLV_LEN(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x01), GENMASK(31, 16))
+#define GET_FW_DUMP_TLV_VAL(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x02), GENMASK(31, 0))
static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
{
u32 pkt_offset;
@@ -564,5 +577,8 @@ void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
struct cfg80211_ssid *ssid);
void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable);
void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c);
+void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev);
+int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
+ u32 *buffer);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 19b9b7ab016b..59028b121b00 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -114,18 +114,13 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
{
- u32 cnt;
+ u32 val;
target &= mask;
- for (cnt = 0; cnt < RTW_PWR_POLLING_CNT; cnt++) {
- if ((rtw_read8(rtwdev, addr) & mask) == target)
- return true;
-
- udelay(50);
- }
-
- return false;
+ return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target,
+ 50, 50 * RTW_PWR_POLLING_CNT, false,
+ rtwdev, addr) == 0;
}
static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 6b199152abcf..c92fba2fa480 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -358,13 +358,10 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_leave_lps_deep(rtwdev);
if (changed & BSS_CHANGED_ASSOC) {
- enum rtw_net_type net_type;
-
+ rtw_vif_assoc_changed(rtwvif, conf);
if (conf->assoc) {
rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH);
- net_type = RTW_NET_MGD_LINKED;
- rtwvif->aid = conf->aid;
rtw_fw_download_rsvd_page(rtwdev);
rtw_send_rsvd_page_h2c(rtwdev);
rtw_coex_media_status_notify(rtwdev, conf->assoc);
@@ -372,12 +369,9 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_bf_assoc(rtwdev, vif, conf);
} else {
rtw_leave_lps(rtwdev);
- net_type = RTW_NET_NO_LINK;
- rtwvif->aid = 0;
rtw_bf_disassoc(rtwdev, vif, conf);
}
- rtwvif->net_type = net_type;
config |= PORT_SET_NET_TYPE;
config |= PORT_SET_AID;
}
@@ -429,56 +423,17 @@ static int rtw_ops_conf_tx(struct ieee80211_hw *hw,
return 0;
}
-static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
-{
- unsigned long mac_id;
-
- mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
- if (mac_id < RTW_MAX_MAC_ID_NUM)
- set_bit(mac_id, rtwdev->mac_id_map);
-
- return mac_id;
-}
-
-static void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
-{
- clear_bit(mac_id, rtwdev->mac_id_map);
-}
-
static int rtw_ops_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
- int i;
int ret = 0;
mutex_lock(&rtwdev->mutex);
-
- si->mac_id = rtw_acquire_macid(rtwdev);
- if (si->mac_id >= RTW_MAX_MAC_ID_NUM) {
- ret = -ENOSPC;
- goto out;
- }
-
- si->sta = sta;
- si->vif = vif;
- si->init_ra_lv = 1;
- ewma_rssi_init(&si->avg_rssi);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- rtw_txq_init(rtwdev, sta->txq[i]);
-
- rtw_update_sta_info(rtwdev, si);
- rtw_fw_media_status_report(rtwdev, si->mac_id, true);
-
- rtwdev->sta_cnt++;
-
- rtw_info(rtwdev, "sta %pM joined with macid %d\n",
- sta->addr, si->mac_id);
-
-out:
+ ret = rtw_sta_add(rtwdev, sta, vif);
mutex_unlock(&rtwdev->mutex);
+
return ret;
}
@@ -487,25 +442,11 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
- int i;
mutex_lock(&rtwdev->mutex);
-
- rtw_release_macid(rtwdev, si->mac_id);
- rtw_fw_media_status_report(rtwdev, si->mac_id, false);
-
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- rtw_txq_cleanup(rtwdev, sta->txq[i]);
-
- kfree(si->mask);
-
- rtwdev->sta_cnt--;
-
- rtw_info(rtwdev, "sta %pM with macid %d left\n",
- sta->addr, si->mac_id);
-
+ rtw_sta_remove(rtwdev, sta, true);
mutex_unlock(&rtwdev->mutex);
+
return 0;
}
@@ -845,6 +786,17 @@ static void rtw_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled)
}
#endif
+static void rtw_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART)
+ clear_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
+ mutex_unlock(&rtwdev->mutex);
+}
+
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
@@ -871,6 +823,7 @@ const struct ieee80211_ops rtw_ops = {
.set_bitrate_mask = rtw_ops_set_bitrate_mask,
.set_antenna = rtw_ops_set_antenna,
.get_antenna = rtw_ops_get_antenna,
+ .reconfig_complete = rtw_reconfig_complete,
#ifdef CONFIG_PM
.suspend = rtw_ops_suspend,
.resume = rtw_ops_resume,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 54044abf30d7..565efd880624 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -259,6 +259,198 @@ static void rtw_c2h_work(struct work_struct *work)
}
}
+static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
+{
+ unsigned long mac_id;
+
+ mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
+ if (mac_id < RTW_MAX_MAC_ID_NUM)
+ set_bit(mac_id, rtwdev->mac_id_map);
+
+ return mac_id;
+}
+
+int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int i;
+
+ si->mac_id = rtw_acquire_macid(rtwdev);
+ if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
+ return -ENOSPC;
+
+ si->sta = sta;
+ si->vif = vif;
+ si->init_ra_lv = 1;
+ ewma_rssi_init(&si->avg_rssi);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ rtw_txq_init(rtwdev, sta->txq[i]);
+
+ rtw_update_sta_info(rtwdev, si);
+ rtw_fw_media_status_report(rtwdev, si->mac_id, true);
+
+ rtwdev->sta_cnt++;
+ rtw_info(rtwdev, "sta %pM joined with macid %d\n",
+ sta->addr, si->mac_id);
+
+ return 0;
+}
+
+void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ bool fw_exist)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int i;
+
+ rtw_release_macid(rtwdev, si->mac_id);
+ if (fw_exist)
+ rtw_fw_media_status_report(rtwdev, si->mac_id, false);
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ rtw_txq_cleanup(rtwdev, sta->txq[i]);
+
+ kfree(si->mask);
+
+ rtwdev->sta_cnt--;
+ rtw_info(rtwdev, "sta %pM with macid %d left\n",
+ sta->addr, si->mac_id);
+}
+
+static bool rtw_fw_dump_crash_log(struct rtw_dev *rtwdev)
+{
+ u32 size = rtwdev->chip->fw_rxff_size;
+ u32 *buf;
+ u8 seq;
+ bool ret = true;
+
+ buf = vmalloc(size);
+ if (!buf)
+ goto exit;
+
+ if (rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, size, buf)) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "dump fw fifo fail\n");
+ goto free_buf;
+ }
+
+ if (GET_FW_DUMP_LEN(buf) == 0) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's length is 0\n");
+ goto free_buf;
+ }
+
+ seq = GET_FW_DUMP_SEQ(buf);
+ if (seq > 0 && seq != (rtwdev->fw.prev_dump_seq + 1)) {
+ rtw_dbg(rtwdev, RTW_DBG_FW,
+ "fw crash dump's seq is wrong: %d\n", seq);
+ goto free_buf;
+ }
+ if (seq == 0 &&
+ (GET_FW_DUMP_TLV_TYPE(buf) != FW_CD_TYPE ||
+ GET_FW_DUMP_TLV_LEN(buf) != FW_CD_LEN ||
+ GET_FW_DUMP_TLV_VAL(buf) != FW_CD_VAL)) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's tlv is wrong\n");
+ goto free_buf;
+ }
+
+ print_hex_dump_bytes("rtw88 fw dump: ", DUMP_PREFIX_OFFSET, buf, size);
+
+ if (GET_FW_DUMP_MORE(buf) == 1) {
+ rtwdev->fw.prev_dump_seq = seq;
+ ret = false;
+ }
+
+free_buf:
+ vfree(buf);
+exit:
+ rtw_write8(rtwdev, REG_MCU_TST_CFG, 0);
+
+ return ret;
+}
+
+void rtw_vif_assoc_changed(struct rtw_vif *rtwvif,
+ struct ieee80211_bss_conf *conf)
+{
+ if (conf && conf->assoc) {
+ rtwvif->aid = conf->aid;
+ rtwvif->net_type = RTW_NET_MGD_LINKED;
+ } else {
+ rtwvif->aid = 0;
+ rtwvif->net_type = RTW_NET_NO_LINK;
+ }
+}
+
+static void rtw_reset_key_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct rtw_dev *rtwdev = (struct rtw_dev *)data;
+ struct rtw_sec_desc *sec = &rtwdev->sec;
+
+ rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx);
+}
+
+static void rtw_reset_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_dev *rtwdev = (struct rtw_dev *)data;
+
+ if (rtwdev->sta_cnt == 0) {
+ rtw_warn(rtwdev, "sta count before reset should not be 0\n");
+ return;
+ }
+ rtw_sta_remove(rtwdev, sta, false);
+}
+
+static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = (struct rtw_dev *)data;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+
+ rtw_bf_disassoc(rtwdev, vif, NULL);
+ rtw_vif_assoc_changed(rtwvif, NULL);
+ rtw_txq_cleanup(rtwdev, vif->txq);
+}
+
+void rtw_fw_recovery(struct rtw_dev *rtwdev)
+{
+ if (!test_bit(RTW_FLAG_RESTARTING, rtwdev->flags))
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->fw_recovery_work);
+}
+
+static void rtw_fw_recovery_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ fw_recovery_work);
+
+ /* rtw_fw_dump_crash_log() returns false indicates that there are
+ * still more log to dump. Driver set 0x1cf[7:0] = 0x1 to tell firmware
+ * to dump the remaining part of the log, and firmware will trigger an
+ * IMR_C2HCMD interrupt to inform driver the log is ready.
+ */
+ if (!rtw_fw_dump_crash_log(rtwdev)) {
+ rtw_write8(rtwdev, REG_HRCV_MSG, 1);
+ return;
+ }
+ rtwdev->fw.prev_dump_seq = 0;
+
+ WARN(1, "firmware crash, start reset and recover\n");
+
+ mutex_lock(&rtwdev->mutex);
+
+ set_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
+ rcu_read_lock();
+ rtw_iterate_keys_rcu(rtwdev, NULL, rtw_reset_key_iter, rtwdev);
+ rcu_read_unlock();
+ rtw_iterate_stas_atomic(rtwdev, rtw_reset_sta_iter, rtwdev);
+ rtw_iterate_vifs_atomic(rtwdev, rtw_reset_vif_iter, rtwdev);
+ rtw_enter_ips(rtwdev);
+
+ mutex_unlock(&rtwdev->mutex);
+
+ ieee80211_restart_hw(rtwdev->hw);
+}
+
struct rtw_txq_ba_iter_data {
};
@@ -474,10 +666,10 @@ static u8 hw_bw_cap_to_bitamp(u8 bw_cap)
case EFUSE_HW_CAP_IGNORE:
case EFUSE_HW_CAP_SUPP_BW80:
bw |= BIT(RTW_CHANNEL_WIDTH_80);
- /* fall through */
+ fallthrough;
case EFUSE_HW_CAP_SUPP_BW40:
bw |= BIT(RTW_CHANNEL_WIDTH_40);
- /* fall through */
+ fallthrough;
default:
bw |= BIT(RTW_CHANNEL_WIDTH_20);
break;
@@ -1422,8 +1614,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
timer_setup(&rtwdev->tx_report.purge_timer,
rtw_tx_report_purge_timer, 0);
- tasklet_init(&rtwdev->tx_tasklet, rtw_tx_tasklet,
- (unsigned long)rtwdev);
+ tasklet_setup(&rtwdev->tx_tasklet, rtw_tx_tasklet);
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
@@ -1432,6 +1623,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
INIT_DELAYED_WORK(&coex->wl_remain_work, rtw_coex_wl_remain_work);
INIT_DELAYED_WORK(&coex->bt_remain_work, rtw_coex_bt_remain_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
+ INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work);
INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
skb_queue_head_init(&rtwdev->c2h_queue);
skb_queue_head_init(&rtwdev->coex.queue);
@@ -1473,6 +1665,9 @@ int rtw_core_init(struct rtw_dev *rtwdev)
ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW);
if (ret) {
rtw_warn(rtwdev, "no wow firmware loaded\n");
+ wait_for_completion(&rtwdev->fw.completion);
+ if (rtwdev->fw.firmware)
+ release_firmware(rtwdev->fw.firmware);
return ret;
}
}
@@ -1487,6 +1682,8 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
struct rtw_rsvd_page *rsvd_pkt, *tmp;
unsigned long flags;
+ rtw_wait_firmware_completion(rtwdev);
+
if (fw->firmware)
release_firmware(fw->firmware);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 276b5d381467..ffb02e614217 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -359,6 +359,7 @@ enum rtw_flags {
RTW_FLAG_DIG_DISABLE,
RTW_FLAG_BUSY_TRAFFIC,
RTW_FLAG_WOWLAN,
+ RTW_FLAG_RESTARTING,
NUM_OF_RTW_FLAGS,
};
@@ -1082,6 +1083,17 @@ enum rtw_wlan_cpu {
RTW_WCPU_11N,
};
+enum rtw_fw_fifo_sel {
+ RTW_FW_FIFO_SEL_TX,
+ RTW_FW_FIFO_SEL_RX,
+ RTW_FW_FIFO_SEL_RSVD_PAGE,
+ RTW_FW_FIFO_SEL_REPORT,
+ RTW_FW_FIFO_SEL_LLT,
+ RTW_FW_FIFO_SEL_RXBUF_FW,
+
+ RTW_FW_FIFO_MAX,
+};
+
/* hardware configuration for each IC */
struct rtw_chip_info {
struct rtw_chip_ops *ops;
@@ -1098,6 +1110,7 @@ struct rtw_chip_info {
u32 ptct_efuse_size;
u32 txff_size;
u32 rxff_size;
+ u32 fw_rxff_size;
u8 band;
u8 page_size;
u8 csi_buf_pg_num;
@@ -1108,6 +1121,8 @@ struct rtw_chip_info {
bool rx_ldpc;
u8 max_power_index;
+ u16 fw_fifo_addr[RTW_FW_FIFO_MAX];
+
bool ht_supported;
bool vht_supported;
u8 lps_deep_mode_supported;
@@ -1606,6 +1621,9 @@ struct rtw_fifo_conf {
const struct rtw_rqpn *rqpn;
};
+#define FW_CD_TYPE 0xffff
+#define FW_CD_LEN 4
+#define FW_CD_VAL 0xaabbccdd
struct rtw_fw_state {
const struct firmware *firmware;
struct rtw_dev *rtwdev;
@@ -1614,6 +1632,7 @@ struct rtw_fw_state {
u8 sub_version;
u8 sub_index;
u16 h2c_version;
+ u8 prev_dump_seq;
};
struct rtw_hal {
@@ -1699,6 +1718,7 @@ struct rtw_dev {
/* c2h cmd queue & handler work */
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ struct work_struct fw_recovery_work;
/* used to protect txqs list */
spinlock_t txq_lock;
@@ -1799,6 +1819,11 @@ static inline bool rtw_chip_has_rx_ldpc(struct rtw_dev *rtwdev)
return rtwdev->chip->rx_ldpc;
}
+static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
+{
+ clear_bit(mac_id, rtwdev->mac_id_map);
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
@@ -1821,5 +1846,12 @@ void rtw_core_deinit(struct rtw_dev *rtwdev);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
u16 rtw_desc_to_bitrate(u8 desc_rate);
+void rtw_vif_assoc_changed(struct rtw_vif *rtwvif,
+ struct ieee80211_bss_conf *conf);
+int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif);
+void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ bool fw_exist);
+void rtw_fw_recovery(struct rtw_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 3413973bc475..676d861aaf99 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -109,7 +109,7 @@ static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
tx_data = rtw_pci_get_tx_data(skb);
dma = tx_data->dma;
- pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
@@ -125,7 +125,7 @@ static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
/* free the ring itself */
- pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
+ dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
tx_ring->r.head = NULL;
}
@@ -144,7 +144,7 @@ static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
continue;
dma = *((dma_addr_t *)skb->cb);
- pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rx_ring->buf[i] = NULL;
}
@@ -159,7 +159,7 @@ static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
- pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
+ dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
}
static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
@@ -194,7 +194,7 @@ static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
return -EINVAL;
}
- head = pci_zalloc_consistent(pdev, ring_sz, &dma);
+ head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
if (!head) {
rtw_err(rtwdev, "failed to allocate tx ring\n");
return -ENOMEM;
@@ -223,8 +223,8 @@ static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
if (!skb)
return -EINVAL;
- dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, dma))
+ dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma))
return -EBUSY;
*((dma_addr_t *)skb->cb) = dma;
@@ -272,7 +272,7 @@ static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
return -EINVAL;
}
- head = pci_zalloc_consistent(pdev, ring_sz, &dma);
+ head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
if (!head) {
rtw_err(rtwdev, "failed to allocate rx ring\n");
return -ENOMEM;
@@ -311,11 +311,11 @@ err_out:
if (!skb)
continue;
dma = *((dma_addr_t *)skb->cb);
- pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
rx_ring->buf[i] = NULL;
}
- pci_free_consistent(pdev, ring_sz, head, dma);
+ dma_free_coherent(&pdev->dev, ring_sz, head, dma);
rtw_err(rtwdev, "failed to init rx buffer\n");
@@ -389,6 +389,7 @@ static int rtw_pci_init(struct rtw_dev *rtwdev)
IMR_VODOK |
IMR_ROK |
IMR_BCNDMAINT_E |
+ IMR_C2HCMD |
0;
rtwpci->irq_mask[1] = IMR_TXFOVW |
0;
@@ -675,8 +676,7 @@ static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
tx_data = rtw_pci_get_tx_data(prev);
dma = tx_data->dma;
- pci_unmap_single(rtwpci->pdev, dma, prev->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
dev_kfree_skb_any(prev);
}
@@ -755,9 +755,9 @@ static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
memset(pkt_desc, 0, tx_pkt_desc_sz);
pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
rtw_tx_fill_tx_desc(pkt_info, skb);
- dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtwpci->pdev, dma))
+ dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtwpci->pdev->dev, dma))
return -EBUSY;
/* after this we got dma mapped, there is no way back */
@@ -896,8 +896,8 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
break;
}
tx_data = rtw_pci_get_tx_data(skb);
- pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
+ DMA_TO_DEVICE);
/* just free command packets from host to card */
if (hw_queue == RTW_TX_QUEUE_H2C) {
@@ -1080,6 +1080,8 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
if (irq_status[0] & IMR_ROK)
rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
+ if (unlikely(irq_status[0] & IMR_C2HCMD))
+ rtw_fw_c2h_cmd_isr(rtwdev);
/* all of the jobs for this interrupt have been done */
rtw_pci_enable_interrupt(rtwdev, rtwpci);
@@ -1599,6 +1601,8 @@ void rtw_pci_shutdown(struct pci_dev *pdev)
if (chip->ops->shutdown)
chip->ops->shutdown(rtwdev);
+
+ pci_set_power_state(pdev, PCI_D3hot);
}
EXPORT_SYMBOL(rtw_pci_shutdown);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 024c2bc275cb..ca17aa9cf7dc 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -9,8 +9,8 @@
#define RTK_BEQ_TX_DESC_NUM 256
#define RTK_MAX_RX_DESC_NUM 512
-/* 8K + rx desc size */
-#define RTK_PCI_RX_BUF_SIZE (8192 + 24)
+/* 11K + rx desc size */
+#define RTK_PCI_RX_BUF_SIZE (11454 + 24)
#define RTK_PCI_CTRL 0x300
#define BIT_RST_TRXDMA_INTF BIT(20)
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 8d93f3159746..5cd9cc42648e 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -147,12 +147,13 @@ void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
- const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
u32 addr, mask;
u8 path;
- if (dig_cck)
+ if (chip->dig_cck) {
+ const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1);
+ }
for (path = 0; path < hal->rf_path_num; path++) {
addr = chip->dig[path].addr;
@@ -1522,7 +1523,7 @@ static u8 rtw_get_channel_group(u8 channel)
switch (channel) {
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case 1:
case 2:
case 36:
@@ -1668,7 +1669,7 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
switch (bandwidth) {
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case RTW_CHANNEL_WIDTH_20:
tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor;
if (above_2ss)
@@ -1712,7 +1713,7 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
switch (bandwidth) {
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case RTW_CHANNEL_WIDTH_20:
tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor;
if (above_2ss)
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 8f468d6b5f78..86b94c008a27 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -126,6 +126,9 @@
BIT_WINTINI_RDY | BIT_RAM_DL_SEL)
#define FW_READY_MASK 0xffff
+#define REG_MCU_TST_CFG 0x84
+#define VAL_FW_TRIGGER 0x1
+
#define REG_EFUSE_ACCESS 0x00CF
#define EFUSE_ACCESS_ON 0x69
#define EFUSE_ACCESS_OFF 0x00
@@ -616,6 +619,8 @@
#define BIT_ANAPAR_BTPS BIT(22)
#define REG_RSTB_SEL 0x1c38
+#define REG_HRCV_MSG 0x1cf
+
#define REG_IGN_GNTBT4 0x4160
#define RF_MODE 0x00
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index d8863d8a5468..da2e7415be8f 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -68,7 +68,7 @@ static const u32 rtw8821c_txscale_tbl[] = {
0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe
};
-static const u8 rtw8821c_get_swing_index(struct rtw_dev *rtwdev)
+static u8 rtw8821c_get_swing_index(struct rtw_dev *rtwdev)
{
u8 i = 0;
u32 swing, table_value;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 351cd055a295..22d0dd640ac9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -1009,12 +1009,12 @@ static int rtw8822b_set_antenna(struct rtw_dev *rtwdev,
antenna_tx, antenna_rx);
if (!rtw8822b_check_rf_path(antenna_tx)) {
- rtw_info(rtwdev, "unsupport tx path 0x%x\n", antenna_tx);
+ rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
return -EINVAL;
}
if (!rtw8822b_check_rf_path(antenna_rx)) {
- rtw_info(rtwdev, "unsupport rx path 0x%x\n", antenna_rx);
+ rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
return -EINVAL;
}
@@ -2442,6 +2442,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.ptct_efuse_size = 96,
.txff_size = 262144,
.rxff_size = 24576,
+ .fw_rxff_size = 12288,
.txgi_factor = 1,
.is_pwr_by_rate_dec = true,
.max_power_index = 0x3f,
@@ -2504,6 +2505,8 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8822b),
.coex_info_hw_regs = coex_info_hw_regs_8822b,
+
+ .fw_fifo_addr = {0x780, 0x700, 0x780, 0x660, 0x650, 0x680},
};
EXPORT_SYMBOL(rtw8822b_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 426808413baa..e37300e98517 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -154,25 +154,16 @@ static void rtw8822c_rf_minmax_cmp(struct rtw_dev *rtwdev, u32 value,
}
}
-static void swap_u32(u32 *v1, u32 *v2)
-{
- u32 tmp;
-
- tmp = *v1;
- *v1 = *v2;
- *v2 = tmp;
-}
-
static void __rtw8822c_dac_iq_sort(struct rtw_dev *rtwdev, u32 *v1, u32 *v2)
{
if (*v1 >= 0x200 && *v2 >= 0x200) {
if (*v1 > *v2)
- swap_u32(v1, v2);
+ swap(*v1, *v2);
} else if (*v1 < 0x200 && *v2 < 0x200) {
if (*v1 > *v2)
- swap_u32(v1, v2);
+ swap(*v1, *v2);
} else if (*v1 < 0x200 && *v2 >= 0x200) {
- swap_u32(v1, v2);
+ swap(*v1, *v2);
}
}
@@ -2014,7 +2005,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
case BB_PATH_AB:
break;
default:
- rtw_info(rtwdev, "unsupport tx path 0x%x\n", antenna_tx);
+ rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
return -EINVAL;
}
@@ -2024,7 +2015,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
case BB_PATH_AB:
break;
default:
- rtw_info(rtwdev, "unsupport rx path 0x%x\n", antenna_rx);
+ rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
return -EINVAL;
}
@@ -4303,6 +4294,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.ptct_efuse_size = 124,
.txff_size = 262144,
.rxff_size = 24576,
+ .fw_rxff_size = 12288,
.txgi_factor = 2,
.is_pwr_by_rate_dec = false,
.max_power_index = 0x7f,
@@ -4373,6 +4365,8 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8822c),
.coex_info_hw_regs = coex_info_hw_regs_8822c,
+
+ .fw_fifo_addr = {0x780, 0x700, 0x780, 0x660, 0x650, 0x680},
};
EXPORT_SYMBOL(rtw8822c_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index 08d01a7bb1bf..3a204a7533df 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -23889,7 +23889,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 0, 11, 60, },
{ 8, 0, 0, 0, 11, 72, },
{ 9, 0, 0, 0, 11, 60, },
- { 0, 0, 0, 0, 12, 52, },
+ { 0, 0, 0, 0, 12, 44, },
{ 2, 0, 0, 0, 12, 60, },
{ 1, 0, 0, 0, 12, 68, },
{ 3, 0, 0, 0, 12, 52, },
@@ -23899,7 +23899,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 0, 12, 60, },
{ 8, 0, 0, 0, 12, 52, },
{ 9, 0, 0, 0, 12, 60, },
- { 0, 0, 0, 0, 13, 48, },
+ { 0, 0, 0, 0, 13, 40, },
{ 2, 0, 0, 0, 13, 60, },
{ 1, 0, 0, 0, 13, 68, },
{ 3, 0, 0, 0, 13, 48, },
@@ -24029,7 +24029,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 1, 11, 60, },
{ 8, 0, 0, 1, 11, 52, },
{ 9, 0, 0, 1, 11, 60, },
- { 0, 0, 0, 1, 12, 40, },
+ { 0, 0, 0, 1, 12, 32, },
{ 2, 0, 0, 1, 12, 60, },
{ 1, 0, 0, 1, 12, 76, },
{ 3, 0, 0, 1, 12, 40, },
@@ -24039,7 +24039,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 1, 12, 60, },
{ 8, 0, 0, 1, 12, 40, },
{ 9, 0, 0, 1, 12, 60, },
- { 0, 0, 0, 1, 13, 28, },
+ { 0, 0, 0, 1, 13, 20, },
{ 2, 0, 0, 1, 13, 60, },
{ 1, 0, 0, 1, 13, 76, },
{ 3, 0, 0, 1, 13, 28, },
@@ -24169,7 +24169,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 2, 11, 60, },
{ 8, 0, 0, 2, 11, 52, },
{ 9, 0, 0, 2, 11, 60, },
- { 0, 0, 0, 2, 12, 40, },
+ { 0, 0, 0, 2, 12, 32, },
{ 2, 0, 0, 2, 12, 60, },
{ 1, 0, 0, 2, 12, 76, },
{ 3, 0, 0, 2, 12, 40, },
@@ -24179,7 +24179,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 2, 12, 60, },
{ 8, 0, 0, 2, 12, 40, },
{ 9, 0, 0, 2, 12, 60, },
- { 0, 0, 0, 2, 13, 28, },
+ { 0, 0, 0, 2, 13, 20, },
{ 2, 0, 0, 2, 13, 60, },
{ 1, 0, 0, 2, 13, 76, },
{ 3, 0, 0, 2, 13, 28, },
@@ -24309,7 +24309,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 3, 11, 36, },
{ 8, 0, 0, 3, 11, 52, },
{ 9, 0, 0, 3, 11, 36, },
- { 0, 0, 0, 3, 12, 40, },
+ { 0, 0, 0, 3, 12, 32, },
{ 2, 0, 0, 3, 12, 36, },
{ 1, 0, 0, 3, 12, 66, },
{ 3, 0, 0, 3, 12, 40, },
@@ -24319,7 +24319,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 3, 12, 36, },
{ 8, 0, 0, 3, 12, 40, },
{ 9, 0, 0, 3, 12, 36, },
- { 0, 0, 0, 3, 13, 28, },
+ { 0, 0, 0, 3, 13, 20, },
{ 2, 0, 0, 3, 13, 36, },
{ 1, 0, 0, 3, 13, 66, },
{ 3, 0, 0, 3, 13, 28, },
@@ -25844,7 +25844,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 0, 11, 60, },
{ 8, 0, 0, 0, 11, 72, },
{ 9, 0, 0, 0, 11, 60, },
- { 0, 0, 0, 0, 12, 52, },
+ { 0, 0, 0, 0, 12, 44, },
{ 2, 0, 0, 0, 12, 60, },
{ 1, 0, 0, 0, 12, 68, },
{ 3, 0, 0, 0, 12, 52, },
@@ -25854,7 +25854,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 0, 12, 60, },
{ 8, 0, 0, 0, 12, 52, },
{ 9, 0, 0, 0, 12, 60, },
- { 0, 0, 0, 0, 13, 48, },
+ { 0, 0, 0, 0, 13, 40, },
{ 2, 0, 0, 0, 13, 60, },
{ 1, 0, 0, 0, 13, 68, },
{ 3, 0, 0, 0, 13, 48, },
@@ -25984,7 +25984,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 1, 11, 60, },
{ 8, 0, 0, 1, 11, 52, },
{ 9, 0, 0, 1, 11, 60, },
- { 0, 0, 0, 1, 12, 40, },
+ { 0, 0, 0, 1, 12, 32, },
{ 2, 0, 0, 1, 12, 60, },
{ 1, 0, 0, 1, 12, 76, },
{ 3, 0, 0, 1, 12, 40, },
@@ -25994,7 +25994,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 1, 12, 60, },
{ 8, 0, 0, 1, 12, 40, },
{ 9, 0, 0, 1, 12, 60, },
- { 0, 0, 0, 1, 13, 28, },
+ { 0, 0, 0, 1, 13, 20, },
{ 2, 0, 0, 1, 13, 60, },
{ 1, 0, 0, 1, 13, 76, },
{ 3, 0, 0, 1, 13, 28, },
@@ -26124,7 +26124,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 2, 11, 60, },
{ 8, 0, 0, 2, 11, 52, },
{ 9, 0, 0, 2, 11, 60, },
- { 0, 0, 0, 2, 12, 40, },
+ { 0, 0, 0, 2, 12, 32, },
{ 2, 0, 0, 2, 12, 60, },
{ 1, 0, 0, 2, 12, 76, },
{ 3, 0, 0, 2, 12, 40, },
@@ -26134,7 +26134,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 2, 12, 60, },
{ 8, 0, 0, 2, 12, 40, },
{ 9, 0, 0, 2, 12, 60, },
- { 0, 0, 0, 2, 13, 28, },
+ { 0, 0, 0, 2, 13, 20, },
{ 2, 0, 0, 2, 13, 60, },
{ 1, 0, 0, 2, 13, 76, },
{ 3, 0, 0, 2, 13, 28, },
@@ -26264,7 +26264,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 3, 11, 36, },
{ 8, 0, 0, 3, 11, 52, },
{ 9, 0, 0, 3, 11, 36, },
- { 0, 0, 0, 3, 12, 40, },
+ { 0, 0, 0, 3, 12, 32, },
{ 2, 0, 0, 3, 12, 36, },
{ 1, 0, 0, 3, 12, 66, },
{ 3, 0, 0, 3, 12, 40, },
@@ -26274,7 +26274,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 3, 12, 36, },
{ 8, 0, 0, 3, 12, 40, },
{ 9, 0, 0, 3, 12, 36, },
- { 0, 0, 0, 3, 13, 28, },
+ { 0, 0, 0, 3, 13, 20, },
{ 2, 0, 0, 3, 13, 36, },
{ 1, 0, 0, 3, 13, 66, },
{ 3, 0, 0, 3, 13, 28, },
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 7fcc992b01a8..ca8072177ae3 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -587,9 +587,9 @@ static void rtw_txq_push(struct rtw_dev *rtwdev,
rcu_read_unlock();
}
-void rtw_tx_tasklet(unsigned long data)
+void rtw_tx_tasklet(struct tasklet_struct *t)
{
- struct rtw_dev *rtwdev = (void *)data;
+ struct rtw_dev *rtwdev = from_tasklet(rtwdev, t, tx_tasklet);
struct rtw_txq *rtwtxq, *tmp;
spin_lock_bh(&rtwdev->txq_lock);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index cfe84eef5923..6673dbcaa21c 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -94,7 +94,7 @@ void rtw_tx(struct rtw_dev *rtwdev,
struct sk_buff *skb);
void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
-void rtw_tx_tasklet(unsigned long data);
+void rtw_tx_tasklet(struct tasklet_struct *t);
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/realtek/rtw88/util.h b/drivers/net/wireless/realtek/rtw88/util.h
index 41c10e7144df..0c23b5069be0 100644
--- a/drivers/net/wireless/realtek/rtw88/util.h
+++ b/drivers/net/wireless/realtek/rtw88/util.h
@@ -17,6 +17,8 @@ struct rtw_dev;
ieee80211_iterate_stations_atomic(rtwdev->hw, iterator, data)
#define rtw_iterate_keys(rtwdev, vif, iterator, data) \
ieee80211_iter_keys(rtwdev->hw, vif, iterator, data)
+#define rtw_iterate_keys_rcu(rtwdev, vif, iterator, data) \
+ ieee80211_iter_keys_rcu((rtwdev)->hw, vif, iterator, data)
static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
{
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 8852a1832951..75b5d545b49e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -3112,7 +3112,7 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
retval = rndis_query_oid(usbdev,
RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED,
&networks_supported, &len);
- if (retval >= 0) {
+ if (!retval) {
n = le32_to_cpu(networks_supported.num_items);
if (n > 8)
n = 8;
@@ -3137,7 +3137,7 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
retval = rndis_query_oid(usbdev,
RNDIS_OID_802_11_CAPABILITY,
&caps, &len);
- if (retval >= 0) {
+ if (!retval) {
netdev_dbg(usbdev->net, "RNDIS_OID_802_11_CAPABILITY -> len %d, "
"ver %d, pmkids %d, auth-encr-pairs %d\n",
le32_to_cpu(caps.length),
diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c
index c8ba148f8c6c..a0c5d02ae88c 100644
--- a/drivers/net/wireless/rsi/rsi_91x_coex.c
+++ b/drivers/net/wireless/rsi/rsi_91x_coex.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2018 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 3644d7d99463..2d49c5b5eefb 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
index c71b41e45423..24a417ea2ae7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c
+++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 6f8d5f9a9f7e..3f7e3cfb6f00 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 5c0adb0efc5d..16025300cddb 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -731,7 +731,7 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
/**
* rsi_get_connected_channel() - This function is used to get the current
* connected channel number.
- * @adapter: Pointer to the adapter structure.
+ * @vif: Pointer to the ieee80211_vif structure.
*
* Return: Current connected AP's channel number is returned.
*/
@@ -855,7 +855,7 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
/**
* rsi_mac80211_conf_filter() - This function configure the device's RX filter.
* @hw: Pointer to the ieee80211_hw structure.
- * @changed: Changed flags set.
+ * @changed_flags: Changed flags set.
* @total_flags: Total initial flags set.
* @multicast: Multicast.
*
@@ -936,6 +936,7 @@ static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw,
* @hw: Pointer to the ieee80211_hw structure.
* @vif: Pointer to the ieee80211_vif structure.
* @key: Pointer to the ieee80211_key_conf structure.
+ * @sta: Pointer to the ieee80211_sta structure.
*
* Return: status: 0 on success, negative error codes on failure.
*/
@@ -1237,6 +1238,7 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
* @common: Pointer to the driver private structure.
* @bssid: pointer to the bssid.
* @rssi: RSSI value.
+ * @vif: Pointer to the ieee80211_vif structure.
*/
static void rsi_perform_cqm(struct rsi_common *common,
u8 *bssid,
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 29d83049c5f5..9a3d2439a8e7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -148,6 +148,7 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
/**
* rsi_read_pkt() - This function reads frames from the card.
* @common: Pointer to the driver private structure.
+ * @rx_pkt: Received pkt.
* @rcv_pkt_len: Received pkt length. In case of USB it is 0.
*
* Return: 0 on success, -1 on failure.
@@ -279,7 +280,7 @@ void rsi_set_bt_context(void *priv, void *bt_context)
/**
* rsi_91x_init() - This function initializes os interface operations.
- * @void: Void.
+ * @oper_mode: One of DEV_OPMODE_*.
*
* Return: Pointer to the adapter structure on success, NULL on failure .
*/
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 9cc8a335d519..33c76d39a8e9 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -477,7 +477,6 @@ static int rsi_load_radio_caps(struct rsi_common *common)
* @common: Pointer to the driver private structure.
* @msg: Pointer to received packet.
* @msg_len: Length of the received packet.
- * @type: Type of received packet.
*
* Return: 0 on success, -1 on failure.
*/
@@ -528,6 +527,8 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
* @bssid: bssid.
* @qos_enable: Qos is enabled.
* @aid: Aid (unique for all STA).
+ * @sta_id: station id.
+ * @vif: Pointer to the ieee80211_vif structure.
*
* Return: status: 0 on success, corresponding negative error code on failure.
*/
@@ -603,6 +604,7 @@ int rsi_hal_send_sta_notify_frame(struct rsi_common *common, enum opmode opmode,
* @ssn: ssn.
* @buf_size: buffer size.
* @event: notification about station connection.
+ * @sta_id: station id.
*
* Return: 0 on success, corresponding negative error code on failure.
*/
@@ -699,7 +701,10 @@ static int rsi_program_bb_rf(struct rsi_common *common)
/**
* rsi_set_vap_capabilities() - This function send vap capability to firmware.
* @common: Pointer to the driver private structure.
- * @opmode: Operating mode of device.
+ * @mode: Operating mode of device.
+ * @mac_addr: MAC address
+ * @vap_id: Rate information - offset and mask
+ * @vap_status: VAP status - ADD, DELETE or UPDATE
*
* Return: 0 on success, corresponding negative error code on failure.
*/
@@ -780,6 +785,8 @@ int rsi_set_vap_capabilities(struct rsi_common *common,
* @key_type: Type of key: GROUP/PAIRWISE.
* @key_id: Key index.
* @cipher: Type of cipher used.
+ * @sta_id: Station id.
+ * @vif: Pointer to the ieee80211_vif structure.
*
* Return: 0 on success, -1 on failure.
*/
@@ -1045,6 +1052,7 @@ static int rsi_send_reset_mac(struct rsi_common *common)
/**
* rsi_band_check() - This function programs the band
* @common: Pointer to the driver private structure.
+ * @curchan: Pointer to the current channel structure.
*
* Return: 0 on success, corresponding error code on failure.
*/
@@ -1165,7 +1173,6 @@ int rsi_set_channel(struct rsi_common *common,
* rsi_send_radio_params_update() - This function sends the radio
* parameters update to device
* @common: Pointer to the driver private structure.
- * @channel: Channel value to be set.
*
* Return: 0 on success, corresponding error code on failure.
*/
@@ -1289,6 +1296,9 @@ static bool rsi_map_rates(u16 rate, int *offset)
* rsi_send_auto_rate_request() - This function is to set rates for connection
* and send autorate request to firmware.
* @common: Pointer to the driver private structure.
+ * @sta: mac80211 station.
+ * @sta_id: station id.
+ * @vif: Pointer to the ieee80211_vif structure.
*
* Return: 0 on success, corresponding error code on failure.
*/
@@ -1439,10 +1449,15 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
* help of sta notify params by sending an internal
* management frame to firmware.
* @common: Pointer to the driver private structure.
+ * @opmode: Operating mode of device.
* @status: Bss status type.
- * @bssid: Bssid.
+ * @addr: Address of the register.
* @qos_enable: Qos is enabled.
* @aid: Aid (unique for all STAs).
+ * @sta: mac80211 station.
+ * @sta_id: station id.
+ * @assoc_cap: capabilities.
+ * @vif: Pointer to the ieee80211_vif structure.
*
* Return: None.
*/
@@ -1535,9 +1550,9 @@ static int rsi_eeprom_read(struct rsi_common *common)
* This function sends a frame to block/unblock
* data queues in the firmware
*
- * @param common Pointer to the driver private structure.
- * @param block event - block if true, unblock if false
- * @return 0 on success, -1 on failure.
+ * @common: Pointer to the driver private structure.
+ * @block_event: Event block if true, unblock if false
+ * returns 0 on success, -1 on failure.
*/
int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event)
{
@@ -1581,7 +1596,7 @@ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event)
* @common: Pointer to the driver private structure.
* @rx_filter_word: Flags of filter packets
*
- * @Return: 0 on success, -1 on failure.
+ * Returns 0 on success, -1 on failure.
*/
int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word)
{
diff --git a/drivers/net/wireless/rsi/rsi_91x_ps.c b/drivers/net/wireless/rsi/rsi_91x_ps.c
index 01472fac8b9a..fdaa5a7260dd 100644
--- a/drivers/net/wireless/rsi/rsi_91x_ps.c
+++ b/drivers/net/wireless/rsi/rsi_91x_ps.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index a04ff75c409f..a7b8684143f4 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -799,7 +799,7 @@ static int rsi_sdio_host_intf_write_pkt(struct rsi_hw *adapter,
/**
* rsi_sdio_host_intf_read_pkt() - This function reads the packet
- from the device.
+ * from the device.
* @adapter: Pointer to the adapter data structure.
* @pkt: Pointer to the packet data to be read from the the device.
* @length: Length of the data to be read from the device.
@@ -832,11 +832,10 @@ int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter,
* rsi_init_sdio_interface() - This function does init specific to SDIO.
*
* @adapter: Pointer to the adapter data structure.
- * @pkt: Pointer to the packet data to be read from the the device.
+ * @pfunction: Pointer to the sdio_func structure.
*
* Return: 0 on success, -1 on failure.
*/
-
static int rsi_init_sdio_interface(struct rsi_hw *adapter,
struct sdio_func *pfunction)
{
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 449f6d23c5e3..7825c9a889d3 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index c86f31dcc981..d9b6147bbb52 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -1028,14 +1028,12 @@ static int wsm_find_complete_indication(struct cw1200_common *priv,
static int wsm_ba_timeout_indication(struct cw1200_common *priv,
struct wsm_buf *buf)
{
- u32 dummy;
u8 tid;
- u8 dummy2;
u8 addr[ETH_ALEN];
- dummy = WSM_GET32(buf);
+ WSM_GET32(buf);
tid = WSM_GET8(buf);
- dummy2 = WSM_GET8(buf);
+ WSM_GET8(buf);
WSM_GET(buf, addr, ETH_ALEN);
pr_info("BlockACK timeout, tid %d, addr %pM\n",
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 480a8d084878..136a0d3b23c9 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -558,7 +558,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
out:
dev_kfree_skb(skb);
if (ret)
- wl1251_warning("cmd buld null data failed: %d", ret);
+ wl1251_warning("cmd build null data failed: %d", ret);
return ret;
}
diff --git a/drivers/net/wireless/ti/wl1251/reg.h b/drivers/net/wireless/ti/wl1251/reg.h
index e03f8321ea60..890176c915ab 100644
--- a/drivers/net/wireless/ti/wl1251/reg.h
+++ b/drivers/net/wireless/ti/wl1251/reg.h
@@ -217,7 +217,7 @@ enum wl12xx_acx_int_reg {
Halt eCPU - 32bit RW
------------------------------------------
0 HALT_ECPU Halt Embedded CPU - This bit is the
- compliment of bit 1 (MDATA2) in the SOR_CFG register.
+ complement of bit 1 (MDATA2) in the SOR_CFG register.
During a hardware reset, this bit holds
the inverse of MDATA2.
When downloading firmware from the host,
diff --git a/drivers/net/wireless/ti/wl12xx/reg.h b/drivers/net/wireless/ti/wl12xx/reg.h
index 247f558ba630..8ff018808020 100644
--- a/drivers/net/wireless/ti/wl12xx/reg.h
+++ b/drivers/net/wireless/ti/wl12xx/reg.h
@@ -139,7 +139,7 @@
Halt eCPU - 32bit RW
------------------------------------------
0 HALT_ECPU Halt Embedded CPU - This bit is the
- compliment of bit 1 (MDATA2) in the SOR_CFG register.
+ complement of bit 1 (MDATA2) in the SOR_CFG register.
During a hardware reset, this bit holds
the inverse of MDATA2.
When downloading firmware from the host,
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 6ef8fc9ae627..32a2e27cc561 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -825,7 +825,7 @@ out:
*
* @wl: wl struct
* @buf: buffer containing the command, with all headers, must work with dma
- * @len: length of the buffer
+ * @buf_len: length of the buffer
* @answer: is answer needed
*/
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
@@ -855,7 +855,8 @@ EXPORT_SYMBOL_GPL(wl1271_cmd_test);
* @wl: wl struct
* @id: acx id
* @buf: buffer for the response, including all headers, must work with dma
- * @len: length of buf
+ * @cmd_len: length of command
+ * @res_len: length of payload
*/
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
size_t cmd_len, size_t res_len)
@@ -1080,7 +1081,7 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
out:
dev_kfree_skb(skb);
if (ret)
- wl1271_warning("cmd buld null data failed %d", ret);
+ wl1271_warning("cmd build null data failed %d", ret);
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 48adb1876ab9..cce8d75d8b81 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -122,13 +122,6 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value,
pm_runtime_put_autosuspend(wl->dev);
}
-
-static inline void no_write_handler(struct wl1271 *wl,
- unsigned long value,
- unsigned long param)
-{
-}
-
#define WL12XX_CONF_DEBUGFS(param, conf_sub_struct, \
min_val, max_val, write_handler_locked, \
write_handler_arg) \
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index fc3bb0d2ab8d..b143293e694f 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -78,13 +78,13 @@ static ssize_t sub## _ ##name## _read(struct file *file, \
struct wl1271 *wl = file->private_data; \
struct struct_type *stats = wl->stats.fw_stats; \
char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = ""; \
- int res, i; \
+ int i; \
\
wl1271_debugfs_update_stats(wl); \
\
for (i = 0; i < len; i++) \
- res = snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \
- buf, i, stats->sub.name[i]); \
+ snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \
+ buf, i, stats->sub.name[i]); \
\
return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \
} \
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index d2bbd5108f7e..6863fd552d5e 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -30,7 +30,6 @@
#include "sysfs.h"
#define WL1271_BOOT_RETRIES 3
-#define WL1271_SUSPEND_SLEEP 100
#define WL1271_WAKEUP_TIMEOUT 500
static char *fwlog_param;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 686161db8706..026e88b80bfc 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -134,8 +134,8 @@ static const struct {
/**
* iw_valid_channel - validate channel in regulatory domain
- * @reg_comain - regulatory domain
- * @channel - channel to validate
+ * @reg_comain: regulatory domain
+ * @channel: channel to validate
*
* Returns 0 if invalid in the specified regulatory domain, non-zero if valid.
*/
@@ -154,7 +154,7 @@ static int iw_valid_channel(int reg_domain, int channel)
/**
* iw_default_channel - get default channel for a regulatory domain
- * @reg_comain - regulatory domain
+ * @reg_domain: regulatory domain
*
* Returns the default channel for a regulatory domain
*/
@@ -237,6 +237,7 @@ static int wl3501_get_flash_mac_addr(struct wl3501_card *this)
/**
* wl3501_set_to_wla - Move 'size' bytes from PC to card
+ * @this: Card
* @dest: Card addressing space
* @src: PC addressing space
* @size: Bytes to move
@@ -259,6 +260,7 @@ static void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src,
/**
* wl3501_get_from_wla - Move 'size' bytes from card to PC
+ * @this: Card
* @src: Card addressing space
* @dest: PC addressing space
* @size: Bytes to move
@@ -455,7 +457,7 @@ out:
/**
* wl3501_send_pkt - Send a packet.
- * @this - card
+ * @this: Card
*
* Send a packet.
*
@@ -720,7 +722,7 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
/**
* wl3501_block_interrupt - Mask interrupt from SUTRO
- * @this - card
+ * @this: Card
*
* Mask interrupt from SUTRO. (i.e. SUTRO cannot interrupt the HOST)
* Return: 1 if interrupt is originally enabled
@@ -737,7 +739,7 @@ static int wl3501_block_interrupt(struct wl3501_card *this)
/**
* wl3501_unblock_interrupt - Enable interrupt from SUTRO
- * @this - card
+ * @this: Card
*
* Enable interrupt from SUTRO. (i.e. SUTRO can interrupt the HOST)
* Return: 1 if interrupt is originally enabled
@@ -1110,8 +1112,8 @@ static inline void wl3501_ack_interrupt(struct wl3501_card *this)
/**
* wl3501_interrupt - Hardware interrupt from card.
- * @irq - Interrupt number
- * @dev_id - net_device
+ * @irq: Interrupt number
+ * @dev_id: net_device
*
* We must acknowledge the interrupt as soon as possible, and block the
* interrupt from the same card immediately to prevent re-entry.
@@ -1247,7 +1249,7 @@ static int wl3501_close(struct net_device *dev)
/**
* wl3501_reset - Reset the SUTRO.
- * @dev - network device
+ * @dev: network device
*
* It is almost the same as wl3501_open(). In fact, we may just wl3501_close()
* and wl3501_open() again, but I wouldn't like to free_irq() when the driver
@@ -1410,7 +1412,7 @@ static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
/**
* wl3501_detach - deletes a driver "instance"
- * @link - FILL_IN
+ * @link: FILL_IN
*
* This deletes a driver "instance". The device is de-registered with Card
* Services. If it has been released, all local data structures are freed.
@@ -1431,9 +1433,7 @@ static void wl3501_detach(struct pcmcia_device *link)
wl3501_release(link);
unregister_netdev(dev);
-
- if (link->priv)
- free_netdev(link->priv);
+ free_netdev(dev);
}
static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info,
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 41641fc2be74..718c4ee865ba 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -1652,15 +1652,11 @@ static int zd1201_set_maxassoc(struct net_device *dev,
struct iw_request_info *info, struct iw_param *rrq, char *extra)
{
struct zd1201 *zd = netdev_priv(dev);
- int err;
if (!zd->ap)
return -EOPNOTSUPP;
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, rrq->value);
- if (err)
- return err;
- return 0;
+ return zd1201_setconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, rrq->value);
}
static int zd1201_get_maxassoc(struct net_device *dev,
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
index 0af4b1986e48..3bb51dc8d035 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
@@ -1375,8 +1375,8 @@ static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame)
/**
* zd_rx_rate - report zd-rate
- * @rx_frame - received frame
- * @rx_status - rx_status as given by the device
+ * @rx_frame: received frame
+ * @status: rx_status as given by the device
*
* This function converts the rate as encoded in the received packet to the
* zd-rate, we are using on other places in the driver.
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index a9999d10ae81..3ef8533205f9 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -416,11 +416,10 @@ int zd_restore_settings(struct zd_mac *mac)
/**
* zd_mac_tx_status - reports tx status of a packet if required
- * @hw - a &struct ieee80211_hw pointer
- * @skb - a sk-buffer
- * @flags: extra flags to set in the TX status info
+ * @hw: a &struct ieee80211_hw pointer
+ * @skb: a sk-buffer
* @ackssi: ACK signal strength
- * @success - True for successful transmission of the frame
+ * @tx_status: success and/or retry
*
* This information calls ieee80211_tx_status_irqsafe() if required by the
* control information. It copies the control information into the status
@@ -477,7 +476,7 @@ static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
/**
* zd_mac_tx_failed - callback for failed frames
- * @dev: the mac80211 wireless device
+ * @urb: pointer to the urb structure
*
* This function is called if a frame couldn't be successfully
* transferred. The first frame from the tx queue, will be selected and
@@ -913,9 +912,9 @@ static int fill_ctrlset(struct zd_mac *mac,
/**
* zd_op_tx - transmits a network frame to the device
*
- * @dev: mac80211 hardware device
- * @skb: socket buffer
+ * @hw: a &struct ieee80211_hw pointer
* @control: the control structure
+ * @skb: socket buffer
*
* This function transmit an IEEE 802.11 network frame to the device. The
* control block of the skbuff will be initialized. If necessary the incoming
@@ -946,7 +945,7 @@ fail:
/**
* filter_ack - filters incoming packets for acknowledgements
- * @dev: the mac80211 device
+ * @hw: a &struct ieee80211_hw pointer
* @rx_hdr: received header
* @stats: the status for the received packet
*
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 65b5985ad402..66367ab7e4c1 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -378,7 +378,6 @@ static inline void handle_regs_int(struct urb *urb)
int len;
u16 int_num;
- ZD_ASSERT(in_interrupt());
spin_lock_irqsave(&intr->lock, flags);
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
@@ -1140,9 +1139,9 @@ static void zd_rx_idle_timer_handler(struct work_struct *work)
zd_usb_reset_rx(usb);
}
-static void zd_usb_reset_rx_idle_timer_tasklet(unsigned long param)
+static void zd_usb_reset_rx_idle_timer_tasklet(struct tasklet_struct *t)
{
- struct zd_usb *usb = (struct zd_usb *)param;
+ struct zd_usb *usb = from_tasklet(usb, t, rx.reset_timer_tasklet);
zd_usb_reset_rx_idle_timer(usb);
}
@@ -1178,8 +1177,9 @@ static inline void init_usb_rx(struct zd_usb *usb)
}
ZD_ASSERT(rx->fragment_length == 0);
INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
- rx->reset_timer_tasklet.func = zd_usb_reset_rx_idle_timer_tasklet;
- rx->reset_timer_tasklet.data = (unsigned long)usb;
+ rx->reset_timer_tasklet.func = (void (*))
+ zd_usb_reset_rx_idle_timer_tasklet;
+ rx->reset_timer_tasklet.data = (unsigned long)&rx->reset_timer_tasklet;
}
static inline void init_usb_tx(struct zd_usb *usb)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index ae477f7756af..8ee24e351bdc 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -140,6 +140,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */
char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
struct xenvif *vif; /* Parent VIF */
+ /*
+ * TX/RX common EOI handling.
+ * When feature-split-event-channels = 0, interrupt handler sets
+ * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set
+ * by the RX and TX interrupt handlers.
+ * RX and TX handler threads will issue an EOI when either
+ * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or
+ * NETBK_TX_EOI) are set and they will reset those bits.
+ */
+ atomic_t eoi_pending;
+#define NETBK_RX_EOI 0x01
+#define NETBK_TX_EOI 0x02
+#define NETBK_COMMON_EOI 0x04
+
/* Use NAPI for guest TX */
struct napi_struct napi;
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
@@ -378,6 +392,7 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
+bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
void xenvif_rx_action(struct xenvif_queue *queue);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 8af497285691..acb786d8b1d8 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -77,12 +77,28 @@ int xenvif_schedulable(struct xenvif *vif)
!vif->disabled;
}
+static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
+{
+ bool rc;
+
+ rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
+ if (rc)
+ napi_schedule(&queue->napi);
+ return rc;
+}
+
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
+ int old;
- if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
- napi_schedule(&queue->napi);
+ old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
+ WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
+
+ if (!xenvif_handle_tx_interrupt(queue)) {
+ atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ }
return IRQ_HANDLED;
}
@@ -116,19 +132,46 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
+{
+ bool rc;
+
+ rc = xenvif_have_rx_work(queue, false);
+ if (rc)
+ xenvif_kick_thread(queue);
+ return rc;
+}
+
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
+ int old;
- xenvif_kick_thread(queue);
+ old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
+ WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
+
+ if (!xenvif_handle_rx_interrupt(queue)) {
+ atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ }
return IRQ_HANDLED;
}
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
{
- xenvif_tx_interrupt(irq, dev_id);
- xenvif_rx_interrupt(irq, dev_id);
+ struct xenvif_queue *queue = dev_id;
+ int old;
+
+ old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
+ WARN(old, "Interrupt while EOI pending\n");
+
+ /* Use bitwise or as we need to call both functions. */
+ if ((!xenvif_handle_tx_interrupt(queue) |
+ !xenvif_handle_rx_interrupt(queue))) {
+ atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ }
return IRQ_HANDLED;
}
@@ -605,7 +648,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
goto err_unmap;
- err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
+ err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
if (err < 0)
goto err_unmap;
@@ -709,7 +752,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
- err = bind_interdomain_evtchn_to_irqhandler(
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
queue->name, queue);
if (err < 0)
@@ -720,7 +763,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
/* feature-split-event-channels == 1 */
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
"%s-tx", queue->name);
- err = bind_interdomain_evtchn_to_irqhandler(
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
queue->tx_irq_name, queue);
if (err < 0)
@@ -730,7 +773,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
"%s-rx", queue->name);
- err = bind_interdomain_evtchn_to_irqhandler(
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
queue->rx_irq_name, queue);
if (err < 0)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6dfca7265644..bc3421d14576 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -169,6 +169,10 @@ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
if (more_to_do)
napi_schedule(&queue->napi);
+ else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
+ &queue->eoi_pending) &
+ (NETBK_TX_EOI | NETBK_COMMON_EOI))
+ xen_irq_lateeoi(queue->tx_irq, 0);
}
static void tx_add_credit(struct xenvif_queue *queue)
@@ -1643,9 +1647,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif)
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
{
struct xenvif *vif = data;
+ unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
- while (xenvif_ctrl_work_todo(vif))
+ while (xenvif_ctrl_work_todo(vif)) {
xenvif_ctrl_action(vif);
+ eoi_flag = 0;
+ }
+
+ xen_irq_lateeoi(irq, eoi_flag);
return IRQ_HANDLED;
}
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index ac034f69a170..b8febe1d1bfd 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -503,13 +503,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
return queue->stalled && prod - cons >= 1;
}
-static bool xenvif_have_rx_work(struct xenvif_queue *queue)
+bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
{
return xenvif_rx_ring_slots_available(queue) ||
(queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue) ||
xenvif_rx_queue_ready(queue))) ||
- kthread_should_stop() ||
+ (test_kthread && kthread_should_stop()) ||
queue->vif->disabled;
}
@@ -540,15 +540,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
{
DEFINE_WAIT(wait);
- if (xenvif_have_rx_work(queue))
+ if (xenvif_have_rx_work(queue, true))
return;
for (;;) {
long ret;
prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
- if (xenvif_have_rx_work(queue))
+ if (xenvif_have_rx_work(queue, true))
break;
+ if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
+ &queue->eoi_pending) &
+ (NETBK_RX_EOI | NETBK_COMMON_EOI))
+ xen_irq_lateeoi(queue->rx_irq, 0);
+
ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
if (!ret)
break;
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index f5bb7ace2ff5..84f2983bf384 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -210,7 +210,7 @@ static void pn533_usb_abort_cmd(struct pn533 *dev, gfp_t flags)
usb_kill_urb(phy->in_urb);
}
-/* ACR122 specific structs and fucntions */
+/* ACR122 specific structs and functions */
/* ACS ACR122 pn533 frame definitions */
#define PN533_ACR122_TX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_tx_frame) \
diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
index af9d18690afe..3f8b6da58280 100644
--- a/drivers/nfc/s3fwrn5/Kconfig
+++ b/drivers/nfc/s3fwrn5/Kconfig
@@ -2,6 +2,7 @@
config NFC_S3FWRN5
tristate
select CRYPTO
+ select CRYPTO_HASH
help
Core driver for Samsung S3FWRN5 NFC chip. Contains core utilities
of chip. It's intended to be used by PHYs to avoid duplicating lots
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index 69857f080704..ec930ee2c847 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -348,7 +348,7 @@ static int s3fwrn5_fw_get_base_addr(
}
static inline bool
-s3fwrn5_fw_is_custom(struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
+s3fwrn5_fw_is_custom(const struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
{
return !!bootinfo->hw_version[2];
}
@@ -399,7 +399,7 @@ err:
return ret;
}
-bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version)
+bool s3fwrn5_fw_check_version(const struct s3fwrn5_fw_info *fw_info, u32 version)
{
struct s3fwrn5_fw_version *new = (void *) &fw_info->fw.version;
struct s3fwrn5_fw_version *old = (void *) &version;
diff --git a/drivers/nfc/s3fwrn5/firmware.h b/drivers/nfc/s3fwrn5/firmware.h
index cf1a83a5a525..3c83e6730d30 100644
--- a/drivers/nfc/s3fwrn5/firmware.h
+++ b/drivers/nfc/s3fwrn5/firmware.h
@@ -91,7 +91,7 @@ struct s3fwrn5_fw_info {
void s3fwrn5_fw_init(struct s3fwrn5_fw_info *fw_info, const char *fw_name);
int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info);
-bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version);
+bool s3fwrn5_fw_check_version(const struct s3fwrn5_fw_info *fw_info, u32 version);
int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info);
void s3fwrn5_fw_cleanup(struct s3fwrn5_fw_info *fw_info);
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index b4eb926d220a..dc995286be84 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -164,7 +164,6 @@ out:
static irqreturn_t s3fwrn5_i2c_irq_thread_fn(int irq, void *phy_id)
{
struct s3fwrn5_i2c_phy *phy = phy_id;
- int ret = 0;
if (!phy || !phy->ndev) {
WARN_ON_ONCE(1);
@@ -179,10 +178,9 @@ static irqreturn_t s3fwrn5_i2c_irq_thread_fn(int irq, void *phy_id)
switch (phy->mode) {
case S3FWRN5_MODE_NCI:
case S3FWRN5_MODE_FW:
- ret = s3fwrn5_i2c_read(phy);
+ s3fwrn5_i2c_read(phy);
break;
case S3FWRN5_MODE_COLD:
- ret = -EREMOTEIO;
break;
}
@@ -200,13 +198,21 @@ static int s3fwrn5_i2c_parse_dt(struct i2c_client *client)
if (!np)
return -ENODEV;
- phy->gpio_en = of_get_named_gpio(np, "s3fwrn5,en-gpios", 0);
- if (!gpio_is_valid(phy->gpio_en))
- return -ENODEV;
+ phy->gpio_en = of_get_named_gpio(np, "en-gpios", 0);
+ if (!gpio_is_valid(phy->gpio_en)) {
+ /* Support also deprecated property */
+ phy->gpio_en = of_get_named_gpio(np, "s3fwrn5,en-gpios", 0);
+ if (!gpio_is_valid(phy->gpio_en))
+ return -ENODEV;
+ }
- phy->gpio_fw_wake = of_get_named_gpio(np, "s3fwrn5,fw-gpios", 0);
- if (!gpio_is_valid(phy->gpio_fw_wake))
- return -ENODEV;
+ phy->gpio_fw_wake = of_get_named_gpio(np, "wake-gpios", 0);
+ if (!gpio_is_valid(phy->gpio_fw_wake)) {
+ /* Support also deprecated property */
+ phy->gpio_fw_wake = of_get_named_gpio(np, "s3fwrn5,fw-gpios", 0);
+ if (!gpio_is_valid(phy->gpio_fw_wake))
+ return -ENODEV;
+ }
return 0;
}
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index f25f1ec5f9e9..807eae04c1e3 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -331,8 +331,7 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
- transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
- skb->len - 2, GFP_KERNEL);
+ transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
if (!transaction)
return -ENOMEM;
diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c
index 2ce17932a073..6ca0d2f56b18 100644
--- a/drivers/nfc/st21nfca/core.c
+++ b/drivers/nfc/st21nfca/core.c
@@ -794,7 +794,6 @@ static int st21nfca_hci_im_transceive(struct nfc_hci_dev *hdev,
skb->len,
st21nfca_hci_data_exchange_cb,
info);
- break;
default:
return 1;
}
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 6586378cacb0..c8bdf078d111 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -315,8 +315,7 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
- transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
- skb->len - 2, GFP_KERNEL);
+ transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
if (!transaction)
return -ENOMEM;
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 3bd97c73f983..c70f62fe321e 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -1382,7 +1382,6 @@ static int trf7970a_is_iso15693_write_or_lock(u8 cmd)
case ISO15693_CMD_WRITE_DSFID:
case ISO15693_CMD_LOCK_DSFID:
return 1;
- break;
default:
return 0;
}
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 88e1db65be02..71428d8cbcfc 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -1203,6 +1203,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
err_dma_mask:
pci_clear_master(pdev);
+ pci_release_regions(pdev);
err_pci_regions:
pci_disable_device(pdev);
err_pci_enable:
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index 3185efeab487..093dd20057b9 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -1893,7 +1893,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
goto err_init_dev;
} else {
rc = -EINVAL;
- goto err_ndev;
+ goto err_init_pci;
}
ndev_reset_unsafe_flags(ndev);
diff --git a/drivers/ntb/test/ntb_msi_test.c b/drivers/ntb/test/ntb_msi_test.c
index 99d826ed9c34..7095ecd6223a 100644
--- a/drivers/ntb/test/ntb_msi_test.c
+++ b/drivers/ntb/test/ntb_msi_test.c
@@ -319,7 +319,6 @@ static void ntb_msit_remove_dbgfs(struct ntb_msit_ctx *nm)
static int ntb_msit_probe(struct ntb_client *client, struct ntb_dev *ntb)
{
struct ntb_msit_ctx *nm;
- size_t struct_size;
int peers;
int ret;
@@ -352,9 +351,7 @@ static int ntb_msit_probe(struct ntb_client *client, struct ntb_dev *ntb)
return ret;
}
- struct_size = sizeof(*nm) + sizeof(*nm->peers) * peers;
-
- nm = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL);
+ nm = devm_kzalloc(&ntb->dev, struct_size(nm, peers, peers), GFP_KERNEL);
if (!nm)
return -ENOMEM;
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c
index b9eeefa27e3a..aaf6e215a8c6 100644
--- a/drivers/nvdimm/badrange.c
+++ b/drivers/nvdimm/badrange.c
@@ -211,7 +211,7 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
}
static void badblocks_populate(struct badrange *badrange,
- struct badblocks *bb, const struct resource *res)
+ struct badblocks *bb, const struct range *range)
{
struct badrange_entry *bre;
@@ -222,34 +222,34 @@ static void badblocks_populate(struct badrange *badrange,
u64 bre_end = bre->start + bre->length - 1;
/* Discard intervals with no intersection */
- if (bre_end < res->start)
+ if (bre_end < range->start)
continue;
- if (bre->start > res->end)
+ if (bre->start > range->end)
continue;
/* Deal with any overlap after start of the namespace */
- if (bre->start >= res->start) {
+ if (bre->start >= range->start) {
u64 start = bre->start;
u64 len;
- if (bre_end <= res->end)
+ if (bre_end <= range->end)
len = bre->length;
else
- len = res->start + resource_size(res)
+ len = range->start + range_len(range)
- bre->start;
- __add_badblock_range(bb, start - res->start, len);
+ __add_badblock_range(bb, start - range->start, len);
continue;
}
/*
* Deal with overlap for badrange starting before
* the namespace.
*/
- if (bre->start < res->start) {
+ if (bre->start < range->start) {
u64 len;
- if (bre_end < res->end)
- len = bre->start + bre->length - res->start;
+ if (bre_end < range->end)
+ len = bre->start + bre->length - range->start;
else
- len = resource_size(res);
+ len = range_len(range);
__add_badblock_range(bb, 0, len);
}
}
@@ -267,7 +267,7 @@ static void badblocks_populate(struct badrange *badrange,
* and add badblocks entries for all matching sub-ranges
*/
void nvdimm_badblocks_populate(struct nd_region *nd_region,
- struct badblocks *bb, const struct resource *res)
+ struct badblocks *bb, const struct range *range)
{
struct nvdimm_bus *nvdimm_bus;
@@ -279,7 +279,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
nvdimm_bus_lock(&nvdimm_bus->dev);
- badblocks_populate(&nvdimm_bus->badrange, bb, res);
+ badblocks_populate(&nvdimm_bus->badrange, bb, range);
nvdimm_bus_unlock(&nvdimm_bus->dev);
}
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 1f718381a045..22e5617b2cea 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -226,7 +226,6 @@ static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
static const struct block_device_operations nd_blk_fops = {
.owner = THIS_MODULE,
.submit_bio = nd_blk_submit_bio,
- .revalidate_disk = nvdimm_revalidate_disk,
};
static void nd_blk_release_queue(void *q)
@@ -284,7 +283,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
device_add_disk(dev, disk, NULL);
- revalidate_disk(disk);
+ nvdimm_check_and_set_ro(disk);
return 0;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 0ff610e728ff..12ff6f8784ac 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1513,7 +1513,6 @@ static const struct block_device_operations btt_fops = {
.submit_bio = btt_submit_bio,
.rw_page = btt_rw_page,
.getgeo = btt_getgeo,
- .revalidate_disk = nvdimm_revalidate_disk,
};
static int btt_blk_init(struct btt *btt)
@@ -1538,8 +1537,6 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->private_data = btt;
btt->btt_disk->queue = btt->btt_queue;
btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
- btt->btt_disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_SYNCHRONOUS_IO;
blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
@@ -1558,7 +1555,7 @@ static int btt_blk_init(struct btt *btt)
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
- revalidate_disk(btt->btt_disk);
+ nvdimm_check_and_set_ro(btt->btt_disk);
return 0;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 955265656b96..2304c6183822 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -628,7 +628,7 @@ int __nd_driver_register(struct nd_device_driver *nd_drv, struct module *owner,
}
EXPORT_SYMBOL(__nd_driver_register);
-int nvdimm_revalidate_disk(struct gendisk *disk)
+void nvdimm_check_and_set_ro(struct gendisk *disk)
{
struct device *dev = disk_to_dev(disk)->parent;
struct nd_region *nd_region = to_nd_region(dev->parent);
@@ -639,16 +639,13 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
* read-only if the disk is already read-only.
*/
if (disk_ro || nd_region->ro == disk_ro)
- return 0;
+ return;
dev_info(dev, "%s read-only, marking %s read-only\n",
dev_name(&nd_region->dev), disk->disk_name);
set_disk_ro(disk, 1);
-
- return 0;
-
}
-EXPORT_SYMBOL(nvdimm_revalidate_disk);
+EXPORT_SYMBOL(nvdimm_check_and_set_ro);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 45964acba944..5a7c80053c62 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -268,7 +268,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
if (rw == READ) {
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
return -EIO;
- if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
+ if (copy_mc_to_kernel(buf, nsio->addr + offset, size) != 0)
return -EIO;
return 0;
}
@@ -303,13 +303,16 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
resource_size_t size)
{
- struct resource *res = &nsio->res;
struct nd_namespace_common *ndns = &nsio->common;
+ struct range range = {
+ .start = nsio->res.start,
+ .end = nsio->res.end,
+ };
nsio->size = size;
- if (!devm_request_mem_region(dev, res->start, size,
+ if (!devm_request_mem_region(dev, range.start, size,
dev_name(&ndns->dev))) {
- dev_warn(dev, "could not reserve region %pR\n", res);
+ dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
return -EBUSY;
}
@@ -317,9 +320,9 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
if (devm_init_badblocks(dev, &nsio->bb))
return -ENOMEM;
nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
- &nsio->res);
+ &range);
- nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
+ nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM);
return PTR_ERR_OR_ZERO(nsio->addr);
}
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 85c1ae813ea3..696b55556d4d 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -361,7 +361,7 @@ u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
void nvdimm_bus_lock(struct device *dev);
void nvdimm_bus_unlock(struct device *dev);
bool is_nvdimm_bus_locked(struct device *dev);
-int nvdimm_revalidate_disk(struct gendisk *disk);
+void nvdimm_check_and_set_ro(struct gendisk *disk);
void nvdimm_drvdata_release(struct kref *kref);
void put_ndd(struct nvdimm_drvdata *ndd);
int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
@@ -377,8 +377,9 @@ int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name);
unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
+struct range;
void nvdimm_badblocks_populate(struct nd_region *nd_region,
- struct badblocks *bb, const struct resource *res);
+ struct badblocks *bb, const struct range *range);
int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
resource_size_t size);
void devm_namespace_disable(struct device *dev,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 3e11ef8d3f5b..b499df630d4d 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -672,7 +672,7 @@ static unsigned long init_altmap_reserve(resource_size_t base)
static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
{
- struct resource *res = &pgmap->res;
+ struct range *range = &pgmap->range;
struct vmem_altmap *altmap = &pgmap->altmap;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = le64_to_cpu(pfn_sb->dataoff);
@@ -689,16 +689,17 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
.end_pfn = PHYS_PFN(end),
};
- memcpy(res, &nsio->res, sizeof(*res));
- res->start += start_pad;
- res->end -= end_trunc;
-
+ *range = (struct range) {
+ .start = nsio->res.start + start_pad,
+ .end = nsio->res.end - end_trunc,
+ };
+ pgmap->nr_range = 1;
if (nd_pfn->mode == PFN_MODE_RAM) {
if (offset < reserve)
return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
- nd_pfn->npfns = PHYS_PFN((resource_size(res) - offset));
+ nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev,
"number of pfns truncated from %lld to %ld\n",
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index fab29b514372..875076b0ea6c 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -125,7 +125,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
while (len) {
mem = kmap_atomic(page);
chunk = min_t(unsigned int, len, PAGE_SIZE - off);
- rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
+ rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
kunmap_atomic(mem);
if (rem)
return BLK_STS_IOERR;
@@ -281,7 +281,6 @@ static const struct block_device_operations pmem_fops = {
.owner = THIS_MODULE,
.submit_bio = pmem_submit_bio,
.rw_page = pmem_rw_page,
- .revalidate_disk = nvdimm_revalidate_disk,
};
static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
@@ -304,7 +303,7 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
/*
* Use the 'no check' versions of copy_from_iter_flushcache() and
- * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
+ * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds
* checking, both file offset and device offset, is handled by
* dax_iomap_actor()
*/
@@ -317,7 +316,7 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i)
{
- return _copy_to_iter_mcsafe(addr, bytes, i);
+ return _copy_mc_to_iter(addr, bytes, i);
}
static const struct dax_operations pmem_dax_ops = {
@@ -376,7 +375,7 @@ static int pmem_attach_disk(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
int nid = dev_to_node(dev), fua;
struct resource *res = &nsio->res;
- struct resource bb_res;
+ struct range bb_range;
struct nd_pfn *nd_pfn = NULL;
struct dax_device *dax_dev;
struct nd_pfn_sb *pfn_sb;
@@ -435,24 +434,27 @@ static int pmem_attach_disk(struct device *dev,
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
pmem->pfn_pad = resource_size(res) -
- resource_size(&pmem->pgmap.res);
+ range_len(&pmem->pgmap.range);
pmem->pfn_flags |= PFN_MAP;
- memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
- bb_res.start += pmem->data_offset;
+ bb_range = pmem->pgmap.range;
+ bb_range.start += pmem->data_offset;
} else if (pmem_should_map_pages(dev)) {
- memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
+ pmem->pgmap.range.start = res->start;
+ pmem->pgmap.range.end = res->end;
+ pmem->pgmap.nr_range = 1;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
- memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
+ bb_range = pmem->pgmap.range;
} else {
if (devm_add_action_or_reset(dev, pmem_release_queue,
&pmem->pgmap))
return -ENOMEM;
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);
- memcpy(&bb_res, &nsio->res, sizeof(bb_res));
+ bb_range.start = res->start;
+ bb_range.end = res->end;
}
if (IS_ERR(addr))
@@ -476,13 +478,12 @@ static int pmem_attach_disk(struct device *dev,
disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
disk->private_data = pmem;
- disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ 512);
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
- nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
+ nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
disk->bb = &pmem->bb;
if (is_nvdimm_sync(nd_region))
@@ -501,7 +502,7 @@ static int pmem_attach_disk(struct device *dev,
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
- revalidate_disk(disk);
+ nvdimm_check_and_set_ro(disk);
pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
"badblocks");
@@ -593,8 +594,8 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
resource_size_t offset = 0, end_trunc = 0;
struct nd_namespace_common *ndns;
struct nd_namespace_io *nsio;
- struct resource res;
struct badblocks *bb;
+ struct range range;
struct kernfs_node *bb_state;
if (event != NVDIMM_REVALIDATE_POISON)
@@ -630,9 +631,9 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
nsio = to_nd_namespace_io(&ndns->dev);
}
- res.start = nsio->res.start + offset;
- res.end = nsio->res.end - end_trunc;
- nvdimm_badblocks_populate(nd_region, bb, &res);
+ range.start = nsio->res.start + offset;
+ range.end = nsio->res.end - end_trunc;
+ nvdimm_badblocks_populate(nd_region, bb, &range);
if (bb_state)
sysfs_notify_dirent(bb_state);
}
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 0f6978e72e7c..bfce87ed72ab 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -35,7 +35,10 @@ static int nd_region_probe(struct device *dev)
return rc;
if (is_memory(&nd_region->dev)) {
- struct resource ndr_res;
+ struct range range = {
+ .start = nd_region->ndr_start,
+ .end = nd_region->ndr_start + nd_region->ndr_size - 1,
+ };
if (devm_init_badblocks(dev, &nd_region->bb))
return -ENODEV;
@@ -44,9 +47,7 @@ static int nd_region_probe(struct device *dev)
if (!nd_region->bb_state)
dev_warn(&nd_region->dev,
"'badblocks' notification disabled\n");
- ndr_res.start = nd_region->ndr_start;
- ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1;
- nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
+ nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
}
rc = nd_region_register_namespaces(nd_region, &err);
@@ -121,14 +122,16 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
{
if (event == NVDIMM_REVALIDATE_POISON) {
struct nd_region *nd_region = to_nd_region(dev);
- struct resource res;
if (is_memory(&nd_region->dev)) {
- res.start = nd_region->ndr_start;
- res.end = nd_region->ndr_start +
- nd_region->ndr_size - 1;
+ struct range range = {
+ .start = nd_region->ndr_start,
+ .end = nd_region->ndr_start +
+ nd_region->ndr_size - 1,
+ };
+
nvdimm_badblocks_populate(nd_region,
- &nd_region->bb, &res);
+ &nd_region->bb, &range);
if (nd_region->bb_state)
sysfs_notify_dirent(nd_region->bb_state);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 893e29624c16..9b01afcb7777 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -89,26 +89,38 @@ static dev_t nvme_chr_devt;
static struct class *nvme_class;
static struct class *nvme_subsys_class;
-static int _nvme_revalidate_disk(struct gendisk *disk);
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid);
+static void nvme_update_bdev_size(struct gendisk *disk)
+{
+ struct block_device *bdev = bdget_disk(disk, 0);
+
+ if (bdev) {
+ bd_set_nr_sectors(bdev, get_capacity(disk));
+ bdput(bdev);
+ }
+}
+
+/*
+ * Prepare a queue for teardown.
+ *
+ * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
+ * the capacity to 0 after that to avoid blocking dispatchers that may be
+ * holding bd_butex. This will end buffered writers dirtying pages that can't
+ * be synced.
+ */
static void nvme_set_queue_dying(struct nvme_ns *ns)
{
- /*
- * Revalidating a dead namespace sets capacity to 0. This will end
- * buffered writers dirtying pages that can't be synced.
- */
if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
+
blk_set_queue_dying(ns->queue);
- /* Forcibly unquiesce queues to avoid blocking dispatch */
blk_mq_unquiesce_queue(ns->queue);
- /*
- * Revalidate after unblocking dispatchers that may be holding bd_butex
- */
- revalidate_disk(ns->disk);
+
+ set_capacity(ns->disk, 0);
+ nvme_update_bdev_size(ns->disk);
}
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -236,6 +248,10 @@ static blk_status_t nvme_error_status(u16 status)
return BLK_STS_NEXUS;
case NVME_SC_HOST_PATH_ERROR:
return BLK_STS_TRANSPORT;
+ case NVME_SC_ZONE_TOO_MANY_ACTIVE:
+ return BLK_STS_ZONE_ACTIVE_RESOURCE;
+ case NVME_SC_ZONE_TOO_MANY_OPEN:
+ return BLK_STS_ZONE_OPEN_RESOURCE;
default:
return BLK_STS_IOERR;
}
@@ -955,10 +971,10 @@ static u32 nvme_known_admin_effects(u8 opcode)
{
switch (opcode) {
case nvme_admin_format_nvm:
- return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
+ return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
NVME_CMD_EFFECTS_CSE_MASK;
case nvme_admin_sanitize_nvm:
- return NVME_CMD_EFFECTS_CSE_MASK;
+ return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
default:
break;
}
@@ -996,7 +1012,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
* For simplicity, IO to all namespaces is quiesced even if the command
* effects say only one namespace is affected.
*/
- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
mutex_lock(&ctrl->scan_lock);
mutex_lock(&ctrl->subsys->lock);
nvme_mpath_start_freeze(ctrl->subsys);
@@ -1007,36 +1023,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return effects;
}
-static void nvme_update_formats(struct nvme_ctrl *ctrl, u32 *effects)
-{
- struct nvme_ns *ns;
-
- down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- if (_nvme_revalidate_disk(ns->disk))
- nvme_set_queue_dying(ns);
- else if (blk_queue_is_zoned(ns->disk->queue)) {
- /*
- * IO commands are required to fully revalidate a zoned
- * device. Force the command effects to trigger rescan
- * work so report zones can run in a context with
- * unfrozen IO queues.
- */
- *effects |= NVME_CMD_EFFECTS_NCC;
- }
- up_read(&ctrl->namespaces_rwsem);
-}
-
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
{
- /*
- * Revalidate LBA changes prior to unfreezing. This is necessary to
- * prevent memory corruption if a logical block size was changed by
- * this command.
- */
- if (effects & NVME_CMD_EFFECTS_LBCC)
- nvme_update_formats(ctrl, &effects);
- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
nvme_unfreeze(ctrl);
nvme_mpath_unfreeze(ctrl->subsys);
mutex_unlock(&ctrl->subsys->lock);
@@ -1296,6 +1285,8 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
int status, pos, len;
void *data;
+ if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
+ return 0;
if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
return 0;
@@ -1339,19 +1330,8 @@ free_data:
return status;
}
-static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
-{
- struct nvme_command c = { };
-
- c.identify.opcode = nvme_admin_identify;
- c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
- c.identify.nsid = cpu_to_le32(nsid);
- return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list,
- NVME_IDENTIFY_DATA_SIZE);
-}
-
-static int nvme_identify_ns(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_id_ns **id)
+static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ struct nvme_ns_ids *ids, struct nvme_id_ns **id)
{
struct nvme_command c = { };
int error;
@@ -1368,9 +1348,24 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl,
error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
if (error) {
dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
- kfree(*id);
+ goto out_free_id;
}
+ error = -ENODEV;
+ if ((*id)->ncap == 0) /* namespace not allocated or attached */
+ goto out_free_id;
+
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+ !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+ if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+
+ return 0;
+
+out_free_id:
+ kfree(*id);
return error;
}
@@ -1892,20 +1887,6 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
nvme_lba_to_sect(ns, max_blocks));
}
-static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
- struct nvme_id_ns *id, struct nvme_ns_ids *ids)
-{
- memset(ids, 0, sizeof(*ids));
-
- if (ctrl->vs >= NVME_VS(1, 1, 0))
- memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
- if (ctrl->vs >= NVME_VS(1, 2, 0))
- memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
- if (ctrl->vs >= NVME_VS(1, 3, 0) || nvme_multi_css(ctrl))
- return nvme_identify_ns_descs(ctrl, nsid, ids);
- return 0;
-}
-
static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
{
return !uuid_is_null(&ids->uuid) ||
@@ -1946,6 +1927,68 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return 0;
}
+static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+
+ /*
+ * The PI implementation requires the metadata size to be equal to the
+ * t10 pi tuple size.
+ */
+ ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
+ if (ns->ms == sizeof(struct t10_pi_tuple))
+ ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+ else
+ ns->pi_type = 0;
+
+ ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
+ return 0;
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ /*
+ * The NVMe over Fabrics specification only supports metadata as
+ * part of the extended data LBA. We rely on HCA/HBA support to
+ * remap the separate metadata buffer from the block layer.
+ */
+ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
+ return -EINVAL;
+ if (ctrl->max_integrity_segments)
+ ns->features |=
+ (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ } else {
+ /*
+ * For PCIe controllers, we can't easily remap the separate
+ * metadata buffer from the block layer and thus require a
+ * separate metadata buffer for block layer metadata/PI support.
+ * We allow extended LBAs for the passthrough interface, though.
+ */
+ if (id->flbas & NVME_NS_FLBAS_META_EXT)
+ ns->features |= NVME_NS_EXT_LBAS;
+ else
+ ns->features |= NVME_NS_METADATA_SUPPORTED;
+ }
+
+ return 0;
+}
+
+static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
+ struct request_queue *q)
+{
+ bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
+
+ if (ctrl->max_hw_sectors) {
+ u32 max_segments =
+ (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
+
+ max_segments = min_not_zero(max_segments, ctrl->max_segments);
+ blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
+ blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
+ }
+ blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
+ blk_queue_dma_alignment(q, 7);
+ blk_queue_write_cache(q, vwc, vwc);
+}
+
static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
@@ -1953,11 +1996,15 @@ static void nvme_update_disk_info(struct gendisk *disk,
unsigned short bs = 1 << ns->lba_shift;
u32 atomic_bs, phys_bs, io_opt = 0;
+ /*
+ * The block layer can't support LBA sizes larger than the page size
+ * yet, so catch this early and don't allow block I/O.
+ */
if (ns->lba_shift > PAGE_SHIFT) {
- /* unsupported block size, set capacity to 0 later */
+ capacity = 0;
bs = (1 << 9);
}
- blk_mq_freeze_queue(disk->queue);
+
blk_integrity_unregister(disk);
atomic_bs = phys_bs = bs;
@@ -1992,13 +2039,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
blk_queue_io_opt(disk->queue, io_opt);
/*
- * The block layer can't support LBA sizes larger than the page size
- * yet, so catch this early and don't allow block I/O.
- */
- if (ns->lba_shift > PAGE_SHIFT)
- capacity = 0;
-
- /*
* Register a metadata profile for PI, or the plain non-integrity NVMe
* metadata masquerading as Type 0 if supported, otherwise reject block
* I/O to namespaces with metadata except when the namespace supports
@@ -2020,10 +2060,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
if (id->nsattr & NVME_NS_ATTR_RO)
set_disk_ro(disk, true);
- else
- set_disk_ro(disk, false);
-
- blk_mq_unfreeze_queue(disk->queue);
}
static inline bool nvme_first_scan(struct gendisk *disk)
@@ -2063,150 +2099,49 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
blk_queue_chunk_sectors(ns->queue, iob);
}
-static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
{
unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
- struct nvme_ns *ns = disk->private_data;
- struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
- /*
- * If identify namespace failed, use default 512 byte block size so
- * block layer can use before failing read/write for 0 capacity.
- */
+ blk_mq_freeze_queue(ns->disk->queue);
ns->lba_shift = id->lbaf[lbaf].ds;
- if (ns->lba_shift == 0)
- ns->lba_shift = 9;
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
- switch (ns->head->ids.csi) {
- case NVME_CSI_NVM:
- break;
- case NVME_CSI_ZNS:
- ret = nvme_update_zone_info(disk, ns, lbaf);
- if (ret) {
- dev_warn(ctrl->device,
- "failed to add zoned namespace:%u ret:%d\n",
- ns->head->ns_id, ret);
- return ret;
- }
- break;
- default:
- dev_warn(ctrl->device, "unknown csi:%u ns:%u\n",
- ns->head->ids.csi, ns->head->ns_id);
- return -ENODEV;
+ if (ns->head->ids.csi == NVME_CSI_ZNS) {
+ ret = nvme_update_zone_info(ns, lbaf);
+ if (ret)
+ goto out_unfreeze;
}
- ns->features = 0;
- ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
- /* the PI implementation requires metadata equal t10 pi tuple size */
- if (ns->ms == sizeof(struct t10_pi_tuple))
- ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
- else
- ns->pi_type = 0;
+ ret = nvme_configure_metadata(ns, id);
+ if (ret)
+ goto out_unfreeze;
+ nvme_set_chunk_sectors(ns, id);
+ nvme_update_disk_info(ns->disk, ns, id);
+ blk_mq_unfreeze_queue(ns->disk->queue);
- if (ns->ms) {
- /*
- * For PCIe only the separate metadata pointer is supported,
- * as the block layer supplies metadata in a separate bio_vec
- * chain. For Fabrics, only metadata as part of extended data
- * LBA is supported on the wire per the Fabrics specification,
- * but the HBA/HCA will do the remapping from the separate
- * metadata buffers for us.
- */
- if (id->flbas & NVME_NS_FLBAS_META_EXT) {
- ns->features |= NVME_NS_EXT_LBAS;
- if ((ctrl->ops->flags & NVME_F_FABRICS) &&
- (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED) &&
- ctrl->max_integrity_segments)
- ns->features |= NVME_NS_METADATA_SUPPORTED;
- } else {
- if (WARN_ON_ONCE(ctrl->ops->flags & NVME_F_FABRICS))
- return -EINVAL;
- if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
- ns->features |= NVME_NS_METADATA_SUPPORTED;
- }
+ if (blk_queue_is_zoned(ns->queue)) {
+ ret = nvme_revalidate_zones(ns);
+ if (ret && !nvme_first_scan(ns->disk))
+ return ret;
}
- nvme_set_chunk_sectors(ns, id);
- nvme_update_disk_info(disk, ns, id);
#ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk) {
+ blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
- nvme_mpath_update_disk_size(ns->head->disk);
+ blk_queue_update_readahead(ns->head->disk->queue);
+ nvme_update_bdev_size(ns->head->disk);
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
}
#endif
return 0;
-}
-
-static int _nvme_revalidate_disk(struct gendisk *disk)
-{
- struct nvme_ns *ns = disk->private_data;
- struct nvme_ctrl *ctrl = ns->ctrl;
- struct nvme_id_ns *id;
- struct nvme_ns_ids ids;
- int ret = 0;
-
- if (test_bit(NVME_NS_DEAD, &ns->flags)) {
- set_capacity(disk, 0);
- return -ENODEV;
- }
-
- ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id);
- if (ret)
- goto out;
-
- if (id->ncap == 0) {
- ret = -ENODEV;
- goto free_id;
- }
-
- ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
- if (ret)
- goto free_id;
-
- if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
- dev_err(ctrl->device,
- "identifiers changed for nsid %d\n", ns->head->ns_id);
- ret = -ENODEV;
- goto free_id;
- }
- ret = __nvme_revalidate_disk(disk, id);
-free_id:
- kfree(id);
-out:
- /*
- * Only fail the function if we got a fatal error back from the
- * device, otherwise ignore the error and just move on.
- */
- if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR)))
- ret = 0;
- else if (ret > 0)
- ret = blk_status_to_errno(nvme_error_status(ret));
- return ret;
-}
-
-static int nvme_revalidate_disk(struct gendisk *disk)
-{
- int ret;
-
- ret = _nvme_revalidate_disk(disk);
- if (ret)
- return ret;
-
-#ifdef CONFIG_BLK_DEV_ZONED
- if (blk_queue_is_zoned(disk->queue)) {
- struct nvme_ns *ns = disk->private_data;
- struct nvme_ctrl *ctrl = ns->ctrl;
-
- ret = blk_revalidate_disk_zones(disk, NULL);
- if (!ret)
- blk_queue_max_zone_append_sectors(disk->queue,
- ctrl->max_zone_append);
- }
-#endif
+out_unfreeze:
+ blk_mq_unfreeze_queue(ns->disk->queue);
return ret;
}
@@ -2339,7 +2274,6 @@ static const struct block_device_operations nvme_fops = {
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
- .revalidate_disk= nvme_revalidate_disk,
.report_zones = nvme_report_zones,
.pr_ops = &nvme_pr_ops,
};
@@ -2489,26 +2423,6 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
-static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
- struct request_queue *q)
-{
- bool vwc = false;
-
- if (ctrl->max_hw_sectors) {
- u32 max_segments =
- (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
-
- max_segments = min_not_zero(max_segments, ctrl->max_segments);
- blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
- blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
- }
- blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, 7);
- if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
- vwc = true;
- blk_queue_write_cache(q, vwc, vwc);
-}
-
static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
{
__le64 ts;
@@ -3012,26 +2926,10 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
}
-static struct nvme_cel *nvme_find_cel(struct nvme_ctrl *ctrl, u8 csi)
-{
- struct nvme_cel *cel, *ret = NULL;
-
- spin_lock_irq(&ctrl->lock);
- list_for_each_entry(cel, &ctrl->cels, entry) {
- if (cel->csi == csi) {
- ret = cel;
- break;
- }
- }
- spin_unlock_irq(&ctrl->lock);
-
- return ret;
-}
-
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
struct nvme_effects_log **log)
{
- struct nvme_cel *cel = nvme_find_cel(ctrl, csi);
+ struct nvme_cel *cel = xa_load(&ctrl->cels, csi);
int ret;
if (cel)
@@ -3049,10 +2947,7 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
}
cel->csi = csi;
-
- spin_lock_irq(&ctrl->lock);
- list_add_tail(&cel->entry, &ctrl->cels);
- spin_unlock_irq(&ctrl->lock);
+ xa_store(&ctrl->cels, cel->csi, cel, GFP_KERNEL);
out:
*log = &cel->log;
return 0;
@@ -3833,25 +3728,16 @@ out:
}
static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
- struct nvme_id_ns *id)
+ struct nvme_ns_ids *ids, bool is_shared)
{
struct nvme_ctrl *ctrl = ns->ctrl;
- bool is_shared = id->nmic & NVME_NS_NMIC_SHARED;
struct nvme_ns_head *head = NULL;
- struct nvme_ns_ids ids;
int ret = 0;
- ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
- if (ret) {
- if (ret < 0)
- return ret;
- return blk_status_to_errno(nvme_error_status(ret));
- }
-
mutex_lock(&ctrl->subsys->lock);
head = nvme_find_ns_head(ctrl->subsys, nsid);
if (!head) {
- head = nvme_alloc_ns_head(ctrl, nsid, &ids);
+ head = nvme_alloc_ns_head(ctrl, nsid, ids);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out_unlock;
@@ -3864,7 +3750,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
"Duplicate unshared namespace %d\n", nsid);
goto out_put_ns_head;
}
- if (!nvme_ns_ids_equal(&head->ids, &ids)) {
+ if (!nvme_ns_ids_equal(&head->ids, ids)) {
dev_err(ctrl->device,
"IDs don't match for shared namespace %d\n",
nsid);
@@ -3912,7 +3798,8 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
}
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
-static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ struct nvme_ns_ids *ids)
{
struct nvme_ns *ns;
struct gendisk *disk;
@@ -3920,17 +3807,19 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
char disk_name[DISK_NAME_LEN];
int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret;
+ if (nvme_identify_ns(ctrl, nsid, ids, &id))
+ return;
+
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
- return;
+ goto out_free_id;
ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue))
goto out_free_ns;
if (ctrl->opts && ctrl->opts->data_digest)
- ns->queue->backing_dev_info->capabilities
- |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
@@ -3938,23 +3827,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
-
kref_init(&ns->kref);
- ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
- blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
- nvme_set_queue_limits(ctrl, ns->queue);
-
- ret = nvme_identify_ns(ctrl, nsid, &id);
+ ret = nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED);
if (ret)
goto out_free_queue;
-
- if (id->ncap == 0) /* no namespace (legacy quirk) */
- goto out_free_id;
-
- ret = nvme_init_ns_head(ns, nsid, id);
- if (ret)
- goto out_free_id;
nvme_set_disk_name(disk_name, ns, ctrl, &flags);
disk = alloc_disk_node(0, node);
@@ -3968,7 +3845,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
ns->disk = disk;
- if (__nvme_revalidate_disk(disk, id))
+ if (nvme_update_ns_info(ns, id))
goto out_put_disk;
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
@@ -4003,12 +3880,12 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
list_del_init(&ns->head->entry);
mutex_unlock(&ctrl->subsys->lock);
nvme_put_ns_head(ns->head);
- out_free_id:
- kfree(id);
out_free_queue:
blk_cleanup_queue(ns->queue);
out_free_ns:
kfree(ns);
+ out_free_id:
+ kfree(id);
}
static void nvme_ns_remove(struct nvme_ns *ns)
@@ -4016,6 +3893,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
+ set_capacity(ns->disk, 0);
nvme_fault_inject_fini(&ns->fault_inject);
mutex_lock(&ns->ctrl->subsys->lock);
@@ -4053,17 +3931,75 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
}
}
-static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
+{
+ struct nvme_id_ns *id;
+ int ret = -ENODEV;
+
+ if (test_bit(NVME_NS_DEAD, &ns->flags))
+ goto out;
+
+ ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
+ if (ret)
+ goto out;
+
+ ret = -ENODEV;
+ if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
+ dev_err(ns->ctrl->device,
+ "identifiers changed for nsid %d\n", ns->head->ns_id);
+ goto out_free_id;
+ }
+
+ ret = nvme_update_ns_info(ns, id);
+
+out_free_id:
+ kfree(id);
+out:
+ /*
+ * Only remove the namespace if we got a fatal error back from the
+ * device, otherwise ignore the error and just move on.
+ *
+ * TODO: we should probably schedule a delayed retry here.
+ */
+ if (ret && ret != -ENOMEM && !(ret > 0 && !(ret & NVME_SC_DNR)))
+ nvme_ns_remove(ns);
+ else
+ revalidate_disk_size(ns->disk, true);
+}
+
+static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
+ struct nvme_ns_ids ids = { };
struct nvme_ns *ns;
+ if (nvme_identify_ns_descs(ctrl, nsid, &ids))
+ return;
+
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
- if (revalidate_disk(ns->disk))
- nvme_ns_remove(ns);
+ nvme_validate_ns(ns, &ids);
nvme_put_ns(ns);
- } else
- nvme_alloc_ns(ctrl, nsid);
+ return;
+ }
+
+ switch (ids.csi) {
+ case NVME_CSI_NVM:
+ nvme_alloc_ns(ctrl, nsid, &ids);
+ break;
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ dev_warn(ctrl->device,
+ "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
+ nsid);
+ break;
+ }
+ nvme_alloc_ns(ctrl, nsid, &ids);
+ break;
+ default:
+ dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
+ ids.csi, nsid);
+ break;
+ }
}
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
@@ -4099,7 +4035,14 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
return -ENOMEM;
for (;;) {
- ret = nvme_identify_ns_list(ctrl, prev, ns_list);
+ struct nvme_command cmd = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
+ .identify.nsid = cpu_to_le32(prev),
+ };
+
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
+ NVME_IDENTIFY_DATA_SIZE);
if (ret)
goto free;
@@ -4108,7 +4051,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
if (!nsid) /* end of the list? */
goto out;
- nvme_validate_ns(ctrl, nsid);
+ nvme_validate_or_alloc_ns(ctrl, nsid);
while (++prev < nsid)
nvme_ns_remove_by_nsid(ctrl, prev);
}
@@ -4131,7 +4074,7 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
kfree(id);
for (i = 1; i <= nn; i++)
- nvme_validate_ns(ctrl, i);
+ nvme_validate_or_alloc_ns(ctrl, i);
nvme_remove_invalid_namespaces(ctrl, nn);
}
@@ -4436,15 +4379,11 @@ static void nvme_free_ctrl(struct device *dev)
struct nvme_ctrl *ctrl =
container_of(dev, struct nvme_ctrl, ctrl_device);
struct nvme_subsystem *subsys = ctrl->subsys;
- struct nvme_cel *cel, *next;
if (!subsys || ctrl->instance != subsys->instance)
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
- list_for_each_entry_safe(cel, next, &ctrl->cels, entry) {
- list_del(&cel->entry);
- kfree(cel);
- }
+ xa_destroy(&ctrl->cels);
nvme_mpath_uninit(ctrl);
__free_page(ctrl->discard_page);
@@ -4476,7 +4415,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
- INIT_LIST_HEAD(&ctrl->cels);
+ xa_init(&ctrl->cels);
init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;
ctrl->ops = ops;
@@ -4641,8 +4580,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
-
-void nvme_sync_queues(struct nvme_ctrl *ctrl)
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
@@ -4650,34 +4588,24 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl)
list_for_each_entry(ns, &ctrl->namespaces, list)
blk_sync_queue(ns->queue);
up_read(&ctrl->namespaces_rwsem);
+}
+EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
+void nvme_sync_queues(struct nvme_ctrl *ctrl)
+{
+ nvme_sync_io_queues(ctrl);
if (ctrl->admin_q)
blk_sync_queue(ctrl->admin_q);
}
EXPORT_SYMBOL_GPL(nvme_sync_queues);
-struct nvme_ctrl *nvme_ctrl_get_by_path(const char *path)
+struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
{
- struct nvme_ctrl *ctrl;
- struct file *f;
-
- f = filp_open(path, O_RDWR, 0);
- if (IS_ERR(f))
- return ERR_CAST(f);
-
- if (f->f_op != &nvme_dev_fops) {
- ctrl = ERR_PTR(-EINVAL);
- goto out_close;
- }
-
- ctrl = f->private_data;
- nvme_get_ctrl(ctrl);
-
-out_close:
- filp_close(f, NULL);
- return ctrl;
+ if (file->f_op != &nvme_dev_fops)
+ return NULL;
+ return file->private_data;
}
-EXPORT_SYMBOL_NS_GPL(nvme_ctrl_get_by_path, NVME_TARGET_PASSTHRU);
+EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU);
/*
* Check we didn't inadvertently grow the command structure sizes:
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index e2e09e25c056..f4c246462658 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -26,6 +26,10 @@ enum nvme_fc_queue_flags {
};
#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
+#define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects
+ * when connected and a
+ * connection failure.
+ */
struct nvme_fc_queue {
struct nvme_fc_ctrl *ctrl;
@@ -142,7 +146,8 @@ struct nvme_fc_rport {
/* fc_ctrl flags values - specified as bit positions */
#define ASSOC_ACTIVE 0
-#define FCCTRL_TERMIO 1
+#define ASSOC_FAILED 1
+#define FCCTRL_TERMIO 2
struct nvme_fc_ctrl {
spinlock_t lock;
@@ -153,7 +158,6 @@ struct nvme_fc_ctrl {
u32 cnum;
bool ioq_live;
- atomic_t err_work_active;
u64 association_id;
struct nvmefc_ls_rcv_op *rcv_disconn;
@@ -163,7 +167,6 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set tag_set;
struct delayed_work connect_work;
- struct work_struct err_work;
struct kref ref;
unsigned long flags;
@@ -1837,8 +1840,10 @@ __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
if (opstate != FCPOP_STATE_ACTIVE)
atomic_set(&op->state, opstate);
- else if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
+ else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
+ op->flags |= FCOP_FLAGS_TERMIO;
ctrl->iocnt++;
+ }
spin_unlock_irqrestore(&ctrl->lock, flags);
if (opstate != FCPOP_STATE_ACTIVE)
@@ -1874,7 +1879,8 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
if (opstate == FCPOP_STATE_ABORTED) {
spin_lock_irqsave(&ctrl->lock, flags);
- if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
+ if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
+ op->flags & FCOP_FLAGS_TERMIO) {
if (!--ctrl->iocnt)
wake_up(&ctrl->ioabort_wait);
}
@@ -2314,7 +2320,7 @@ nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
return 0;
delete_queues:
- for (; i >= 0; i--)
+ for (; i > 0; i--)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
return ret;
}
@@ -2407,24 +2413,97 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
nvme_fc_ctrl_put(ctrl);
}
+/*
+ * This routine is used by the transport when it needs to find active
+ * io on a queue that is to be terminated. The transport uses
+ * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
+ * this routine to kill them on a 1 by 1 basis.
+ *
+ * As FC allocates FC exchange for each io, the transport must contact
+ * the LLDD to terminate the exchange, thus releasing the FC exchange.
+ * After terminating the exchange the LLDD will call the transport's
+ * normal io done path for the request, but it will have an aborted
+ * status. The done path will return the io request back to the block
+ * layer with an error status.
+ */
+static bool
+nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+{
+ struct nvme_ctrl *nctrl = data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+
+ __nvme_fc_abort_op(ctrl, op);
+ return true;
+}
+
+/*
+ * This routine runs through all outstanding commands on the association
+ * and aborts them. This routine is typically be called by the
+ * delete_association routine. It is also called due to an error during
+ * reconnect. In that scenario, it is most likely a command that initializes
+ * the controller, including fabric Connect commands on io queues, that
+ * may have timed out or failed thus the io must be killed for the connect
+ * thread to see the error.
+ */
static void
-nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
{
- int active;
+ /*
+ * If io queues are present, stop them and terminate all outstanding
+ * ios on them. As FC allocates FC exchange for each io, the
+ * transport must contact the LLDD to terminate the exchange,
+ * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
+ * to tell us what io's are busy and invoke a transport routine
+ * to kill them with the LLDD. After terminating the exchange
+ * the LLDD will call the transport's normal io done path, but it
+ * will have an aborted status. The done path will return the
+ * io requests back to the block layer as part of normal completions
+ * (but with error status).
+ */
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
+ if (start_queues)
+ nvme_start_queues(&ctrl->ctrl);
+ }
/*
- * if an error (io timeout, etc) while (re)connecting,
- * it's an error on creating the new association.
- * Start the error recovery thread if it hasn't already
- * been started. It is expected there could be multiple
- * ios hitting this path before things are cleaned up.
+ * Other transports, which don't have link-level contexts bound
+ * to sqe's, would try to gracefully shutdown the controller by
+ * writing the registers for shutdown and polling (call
+ * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
+ * just aborted and we will wait on those contexts, and given
+ * there was no indication of how live the controlelr is on the
+ * link, don't send more io to create more contexts for the
+ * shutdown. Let the controller fail via keepalive failure if
+ * its still present.
+ */
+
+ /*
+ * clean up the admin queue. Same thing as above.
+ */
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_fc_terminate_exchange, &ctrl->ctrl);
+ blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+}
+
+static void
+nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+{
+ /*
+ * if an error (io timeout, etc) while (re)connecting, the remote
+ * port requested terminating of the association (disconnect_ls)
+ * or an error (timeout or abort) occurred on an io while creating
+ * the controller. Abort any ios on the association and let the
+ * create_association error path resolve things.
*/
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
- active = atomic_xchg(&ctrl->err_work_active, 1);
- if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
- atomic_set(&ctrl->err_work_active, 0);
- WARN_ON(1);
- }
+ __nvme_fc_abort_outstanding_ios(ctrl, true);
+ set_bit(ASSOC_FAILED, &ctrl->flags);
return;
}
@@ -2433,7 +2512,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
return;
dev_warn(ctrl->ctrl.device,
- "NVME-FC{%d}: transport association error detected: %s\n",
+ "NVME-FC{%d}: transport association event: %s\n",
ctrl->cnum, errmsg);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: resetting controller\n", ctrl->cnum);
@@ -2446,15 +2525,20 @@ nvme_fc_timeout(struct request *rq, bool reserved)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
+ struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ struct nvme_command *sqe = &cmdiu->sqe;
/*
- * we can't individually ABTS an io without affecting the queue,
- * thus killing the queue, and thus the association.
- * So resolve by performing a controller reset, which will stop
- * the host/io stack, terminate the association on the link,
- * and recreate an association on the link.
+ * Attempt to abort the offending command. Command completion
+ * will detect the aborted io and will fail the connection.
*/
- nvme_fc_error_recovery(ctrl, "io timeout error");
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
+ "x%08x/x%08x\n",
+ ctrl->cnum, op->queue->qnum, sqe->common.opcode,
+ sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
+ if (__nvme_fc_abort_op(ctrl, op))
+ nvme_fc_error_recovery(ctrl, "io timeout abort failed");
/*
* the io abort has been initiated. Have the reset timer
@@ -2726,36 +2810,13 @@ nvme_fc_complete_rq(struct request *rq)
struct nvme_fc_ctrl *ctrl = op->ctrl;
atomic_set(&op->state, FCPOP_STATE_IDLE);
+ op->flags &= ~FCOP_FLAGS_TERMIO;
nvme_fc_unmap_data(ctrl, rq, op);
nvme_complete_rq(rq);
nvme_fc_ctrl_put(ctrl);
}
-/*
- * This routine is used by the transport when it needs to find active
- * io on a queue that is to be terminated. The transport uses
- * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
- * this routine to kill them on a 1 by 1 basis.
- *
- * As FC allocates FC exchange for each io, the transport must contact
- * the LLDD to terminate the exchange, thus releasing the FC exchange.
- * After terminating the exchange the LLDD will call the transport's
- * normal io done path for the request, but it will have an aborted
- * status. The done path will return the io request back to the block
- * layer with an error status.
- */
-static bool
-nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
-{
- struct nvme_ctrl *nctrl = data;
- struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
-
- __nvme_fc_abort_op(ctrl, op);
- return true;
-}
-
static const struct blk_mq_ops nvme_fc_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
@@ -2876,11 +2937,14 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_delete_hw_queues;
- if (prior_ioq_cnt != nr_io_queues)
+ if (prior_ioq_cnt != nr_io_queues) {
dev_info(ctrl->ctrl.device,
"reconnect: revising io queue count from %d to %d\n",
prior_ioq_cnt, nr_io_queues);
- blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
+ nvme_wait_freeze(&ctrl->ctrl);
+ blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
+ nvme_unfreeze(&ctrl->ctrl);
+ }
return 0;
@@ -2972,6 +3036,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
ctrl->cnum, ctrl->lport->localport.port_name,
ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
+ clear_bit(ASSOC_FAILED, &ctrl->flags);
+
/*
* Create the admin queue
*/
@@ -3000,7 +3066,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
*/
ret = nvme_enable_ctrl(&ctrl->ctrl);
- if (ret)
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_disconnect_admin_queue;
ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
@@ -3010,7 +3076,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
ret = nvme_init_identify(&ctrl->ctrl);
- if (ret)
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_disconnect_admin_queue;
/* sanity checks */
@@ -3055,9 +3121,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
ret = nvme_fc_create_io_queues(ctrl);
else
ret = nvme_fc_recreate_io_queues(ctrl);
- if (ret)
- goto out_term_aen_ops;
}
+ if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
+ goto out_term_aen_ops;
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -3090,6 +3156,7 @@ out_free_queue:
return ret;
}
+
/*
* This routine stops operation of the controller on the host side.
* On the host os stack side: Admin and IO queues are stopped,
@@ -3110,46 +3177,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
ctrl->iocnt = 0;
spin_unlock_irqrestore(&ctrl->lock, flags);
- /*
- * If io queues are present, stop them and terminate all outstanding
- * ios on them. As FC allocates FC exchange for each io, the
- * transport must contact the LLDD to terminate the exchange,
- * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
- * to tell us what io's are busy and invoke a transport routine
- * to kill them with the LLDD. After terminating the exchange
- * the LLDD will call the transport's normal io done path, but it
- * will have an aborted status. The done path will return the
- * io requests back to the block layer as part of normal completions
- * (but with error status).
- */
- if (ctrl->ctrl.queue_count > 1) {
- nvme_stop_queues(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_fc_terminate_exchange, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
- }
-
- /*
- * Other transports, which don't have link-level contexts bound
- * to sqe's, would try to gracefully shutdown the controller by
- * writing the registers for shutdown and polling (call
- * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
- * just aborted and we will wait on those contexts, and given
- * there was no indication of how live the controlelr is on the
- * link, don't send more io to create more contexts for the
- * shutdown. Let the controller fail via keepalive failure if
- * its still present.
- */
-
- /*
- * clean up the admin queue. Same thing as above.
- * use blk_mq_tagset_busy_itr() and the transport routine to
- * terminate the exchanges.
- */
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_fc_terminate_exchange, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ __nvme_fc_abort_outstanding_ios(ctrl, false);
/* kill the aens as they are a separate path */
nvme_fc_abort_aen_ops(ctrl);
@@ -3205,7 +3233,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -3260,74 +3287,35 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
}
static void
-__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
-{
- /*
- * if state is connecting - the error occurred as part of a
- * reconnect attempt. The create_association error paths will
- * clean up any outstanding io.
- *
- * if it's a different state - ensure all pending io is
- * terminated. Given this can delay while waiting for the
- * aborted io to return, we recheck adapter state below
- * before changing state.
- */
- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
- nvme_stop_keep_alive(&ctrl->ctrl);
-
- /* will block will waiting for io to terminate */
- nvme_fc_delete_association(ctrl);
- }
-
- if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
- !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
- dev_err(ctrl->ctrl.device,
- "NVME-FC{%d}: error_recovery: Couldn't change state "
- "to CONNECTING\n", ctrl->cnum);
-}
-
-static void
nvme_fc_reset_ctrl_work(struct work_struct *work)
{
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
- int ret;
-
- __nvme_fc_terminate_io(ctrl);
nvme_stop_ctrl(&ctrl->ctrl);
- if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
- ret = nvme_fc_create_association(ctrl);
- else
- ret = -ENOTCONN;
-
- if (ret)
- nvme_fc_reconnect_or_delete(ctrl, ret);
- else
- dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: controller reset complete\n",
- ctrl->cnum);
-}
-
-static void
-nvme_fc_connect_err_work(struct work_struct *work)
-{
- struct nvme_fc_ctrl *ctrl =
- container_of(work, struct nvme_fc_ctrl, err_work);
-
- __nvme_fc_terminate_io(ctrl);
+ /* will block will waiting for io to terminate */
+ nvme_fc_delete_association(ctrl);
- atomic_set(&ctrl->err_work_active, 0);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: error_recovery: Couldn't change state "
+ "to CONNECTING\n", ctrl->cnum);
- /*
- * Rescheduling the connection after recovering
- * from the io error is left to the reconnect work
- * item, which is what should have stalled waiting on
- * the io that had the error that scheduled this work.
- */
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
+ if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: failed to schedule connect "
+ "after reset\n", ctrl->cnum);
+ } else {
+ flush_delayed_work(&ctrl->connect_work);
+ }
+ } else {
+ nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
+ }
}
+
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.name = "fc",
.module = THIS_MODULE,
@@ -3403,7 +3391,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
{
struct nvme_fc_ctrl *ctrl;
unsigned long flags;
- int ret, idx;
+ int ret, idx, ctrl_loss_tmo;
if (!(rport->remoteport.port_role &
(FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@@ -3429,6 +3417,19 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_free_ctrl;
}
+ /*
+ * if ctrl_loss_tmo is being enforced and the default reconnect delay
+ * is being used, change to a shorter reconnect delay for FC.
+ */
+ if (opts->max_reconnects != -1 &&
+ opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY &&
+ opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) {
+ ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay;
+ opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO;
+ opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
+ opts->reconnect_delay);
+ }
+
ctrl->ctrl.opts = opts;
ctrl->ctrl.nr_reconnects = 0;
if (lport->dev)
@@ -3441,7 +3442,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->dev = lport->dev;
ctrl->cnum = idx;
ctrl->ioq_live = false;
- atomic_set(&ctrl->err_work_active, 0);
init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
@@ -3449,7 +3449,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
- INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3542,7 +3541,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
cancel_work_sync(&ctrl->ctrl.reset_work);
- cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
ctrl->ctrl.opts = NULL;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index d4ba736c6c89..74896be40c17 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -673,13 +673,9 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
nvme_mpath_set_live(ns);
}
- if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
- struct gendisk *disk = ns->head->disk;
-
- if (disk)
- disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_STABLE_WRITES;
- }
+ if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
+ ns->head->disk->queue);
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 2aaedfa43ed8..bc330bf0d3bd 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -176,7 +176,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
static inline u16 nvme_req_qid(struct request *req)
{
- if (!req->rq_disk)
+ if (!req->q->queuedata)
return 0;
return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1;
}
@@ -300,7 +300,7 @@ struct nvme_ctrl {
unsigned long quirks;
struct nvme_id_power_state psd[32];
struct nvme_effects_log *effects;
- struct list_head cels;
+ struct xarray cels;
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
@@ -602,6 +602,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl);
void nvme_sync_queues(struct nvme_ctrl *ctrl);
+void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
void nvme_unfreeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze(struct nvme_ctrl *ctrl);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
@@ -682,16 +683,6 @@ static inline void nvme_trace_bio_complete(struct request *req,
trace_block_bio_complete(ns->head->disk->queue, req->bio);
}
-static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
-{
- struct block_device *bdev = bdget_disk(disk, 0);
-
- if (bdev) {
- bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT);
- bdput(bdev);
- }
-}
-
extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy;
@@ -766,15 +757,11 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
{
}
-static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
-{
-}
#endif /* CONFIG_NVME_MULTIPATH */
+int nvme_revalidate_zones(struct nvme_ns *ns);
#ifdef CONFIG_BLK_DEV_ZONED
-int nvme_update_zone_info(struct gendisk *disk, struct nvme_ns *ns,
- unsigned lbaf);
-
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
int nvme_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
@@ -791,9 +778,7 @@ static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
return BLK_STS_NOTSUPP;
}
-static inline int nvme_update_zone_info(struct gendisk *disk,
- struct nvme_ns *ns,
- unsigned lbaf)
+static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
{
dev_warn(ns->ctrl->device,
"Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
@@ -838,7 +823,7 @@ static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
void nvme_execute_passthru_rq(struct request *rq);
-struct nvme_ctrl *nvme_ctrl_get_by_path(const char *path);
+struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
void nvme_put_ns(struct nvme_ns *ns);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8984796db0c8..0578ff253c47 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -198,6 +198,7 @@ struct nvme_queue {
u32 q_depth;
u16 cq_vector;
u16 sq_tail;
+ u16 last_sq_tail;
u16 cq_head;
u16 qid;
u8 cq_phase;
@@ -455,11 +456,24 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
return 0;
}
-static inline void nvme_write_sq_db(struct nvme_queue *nvmeq)
+/*
+ * Write sq tail if we are asked to, or if the next command would wrap.
+ */
+static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
{
+ if (!write_sq) {
+ u16 next_tail = nvmeq->sq_tail + 1;
+
+ if (next_tail == nvmeq->q_depth)
+ next_tail = 0;
+ if (next_tail != nvmeq->last_sq_tail)
+ return;
+ }
+
if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
writel(nvmeq->sq_tail, nvmeq->q_db);
+ nvmeq->last_sq_tail = nvmeq->sq_tail;
}
/**
@@ -476,8 +490,7 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
cmd, sizeof(*cmd));
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
- if (write_sq)
- nvme_write_sq_db(nvmeq);
+ nvme_write_sq_db(nvmeq, write_sq);
spin_unlock(&nvmeq->sq_lock);
}
@@ -486,7 +499,8 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
struct nvme_queue *nvmeq = hctx->driver_data;
spin_lock(&nvmeq->sq_lock);
- nvme_write_sq_db(nvmeq);
+ if (nvmeq->sq_tail != nvmeq->last_sq_tail)
+ nvme_write_sq_db(nvmeq, true);
spin_unlock(&nvmeq->sq_lock);
}
@@ -1496,6 +1510,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
struct nvme_dev *dev = nvmeq->dev;
nvmeq->sq_tail = 0;
+ nvmeq->last_sq_tail = 0;
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
@@ -2038,32 +2053,30 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
.calc_sets = nvme_calc_irq_sets,
.priv = dev,
};
- unsigned int irq_queues, this_p_queues;
+ unsigned int irq_queues, poll_queues;
/*
- * Poll queues don't need interrupts, but we need at least one IO
- * queue left over for non-polled IO.
+ * Poll queues don't need interrupts, but we need at least one I/O queue
+ * left over for non-polled I/O.
*/
- this_p_queues = dev->nr_poll_queues;
- if (this_p_queues >= nr_io_queues) {
- this_p_queues = nr_io_queues - 1;
- irq_queues = 1;
- } else {
- irq_queues = nr_io_queues - this_p_queues + 1;
- }
- dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
+ poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1);
+ dev->io_queues[HCTX_TYPE_POLL] = poll_queues;
- /* Initialize for the single interrupt case */
+ /*
+ * Initialize for the single interrupt case, will be updated in
+ * nvme_calc_irq_sets().
+ */
dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
dev->io_queues[HCTX_TYPE_READ] = 0;
/*
- * Some Apple controllers require all queues to use the
- * first vector.
+ * We need interrupts for the admin queue and each non-polled I/O queue,
+ * but some Apple controllers require all queues to use the first
+ * vector.
*/
- if (dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)
- irq_queues = 1;
-
+ irq_queues = 1;
+ if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
+ irq_queues += (nr_io_queues - poll_queues);
return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
}
@@ -3187,7 +3200,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
- { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
@@ -3195,6 +3209,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
NVME_QUIRK_128_BYTES_SQES |
NVME_QUIRK_SHARED_TAGS },
+
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, nvme_id_table);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 9e378d0a0c01..65e3d0ef36e1 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -122,7 +122,6 @@ struct nvme_rdma_ctrl {
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
- struct mutex teardown_lock;
bool use_inline_data;
u32 io_queues[HCTX_MAX_TYPES];
};
@@ -1010,8 +1009,8 @@ out_free_io_queues:
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- mutex_lock(&ctrl->teardown_lock);
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (ctrl->ctrl.admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
@@ -1021,16 +1020,15 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (remove)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove);
- mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- mutex_lock(&ctrl->teardown_lock);
if (ctrl->ctrl.queue_count > 1) {
nvme_start_freeze(&ctrl->ctrl);
nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
if (ctrl->ctrl.tagset) {
blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
@@ -1041,7 +1039,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove);
}
- mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
@@ -1730,10 +1727,11 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
req->result = cqe->result;
if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
- if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
+ if (unlikely(!req->mr ||
+ wc->ex.invalidate_rkey != req->mr->rkey)) {
dev_err(queue->ctrl->ctrl.device,
"Bogus remote invalidation for rkey %#x\n",
- req->mr->rkey);
+ req->mr ? req->mr->rkey : 0);
nvme_rdma_error_recovery(queue->ctrl);
}
} else if (req->mr) {
@@ -1767,6 +1765,14 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
+ /* sanity checking for received data length */
+ if (unlikely(wc->byte_len < len)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Unexpected nvme completion length(%d)\n", wc->byte_len);
+ nvme_rdma_error_recovery(queue->ctrl);
+ return;
+ }
+
ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
/*
* AEN requests are special as they don't time out and can
@@ -1889,10 +1895,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
}
- ret = rdma_connect(queue->cm_id, &param);
+ ret = rdma_connect_locked(queue->cm_id, &param);
if (ret) {
dev_err(ctrl->ctrl.device,
- "rdma_connect failed (%d).\n", ret);
+ "rdma_connect_locked failed (%d).\n", ret);
goto out_destroy_queue_ib;
}
@@ -1926,7 +1932,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
complete(&queue->cm_done);
return 0;
case RDMA_CM_EVENT_REJECTED:
- nvme_rdma_destroy_queue_ib(queue);
cm_error = nvme_rdma_conn_rejected(queue, ev);
break;
case RDMA_CM_EVENT_ROUTE_ERROR:
@@ -1968,16 +1973,12 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
- struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- /* fence other contexts that may complete the command */
- mutex_lock(&ctrl->teardown_lock);
nvme_rdma_stop_queue(queue);
- if (!blk_mq_request_completed(rq)) {
+ if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
blk_mq_complete_request(rq);
}
- mutex_unlock(&ctrl->teardown_lock);
}
static enum blk_eh_timer_return
@@ -2312,7 +2313,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
return ERR_PTR(-ENOMEM);
ctrl->ctrl.opts = opts;
INIT_LIST_HEAD(&ctrl->list);
- mutex_init(&ctrl->teardown_lock);
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
opts->trsvcid =
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index d6a3e1487354..c0c33320fe65 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -124,7 +124,6 @@ struct nvme_tcp_ctrl {
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
- struct mutex teardown_lock;
struct work_struct err_work;
struct delayed_work connect_work;
struct nvme_tcp_request async_req;
@@ -1886,8 +1885,8 @@ out_free_queue:
static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
bool remove)
{
- mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
blk_mq_quiesce_queue(ctrl->admin_q);
+ blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
if (ctrl->admin_tagset) {
blk_mq_tagset_busy_iter(ctrl->admin_tagset,
@@ -1897,18 +1896,17 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
if (remove)
blk_mq_unquiesce_queue(ctrl->admin_q);
nvme_tcp_destroy_admin_queue(ctrl, remove);
- mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
}
static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
bool remove)
{
- mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
if (ctrl->queue_count <= 1)
- goto out;
+ return;
blk_mq_quiesce_queue(ctrl->admin_q);
nvme_start_freeze(ctrl);
nvme_stop_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
if (ctrl->tagset) {
blk_mq_tagset_busy_iter(ctrl->tagset,
@@ -1918,8 +1916,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
if (remove)
nvme_start_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove);
-out:
- mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
}
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
@@ -2171,14 +2167,11 @@ static void nvme_tcp_complete_timed_out(struct request *rq)
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
- /* fence other contexts that may complete the command */
- mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
- if (!blk_mq_request_completed(rq)) {
+ if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
blk_mq_complete_request(rq);
}
- mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
}
static enum blk_eh_timer_return
@@ -2455,7 +2448,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
nvme_tcp_reconnect_ctrl_work);
INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
- mutex_init(&ctrl->teardown_lock);
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
opts->trsvcid =
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 57cfd78731fb..67e87e9f306f 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -7,6 +7,17 @@
#include <linux/vmalloc.h>
#include "nvme.h"
+int nvme_revalidate_zones(struct nvme_ns *ns)
+{
+ struct request_queue *q = ns->queue;
+ int ret;
+
+ ret = blk_revalidate_disk_zones(ns->disk, NULL);
+ if (!ret)
+ blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
+ return ret;
+}
+
static int nvme_set_max_append(struct nvme_ctrl *ctrl)
{
struct nvme_command c = { };
@@ -35,11 +46,10 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
return 0;
}
-int nvme_update_zone_info(struct gendisk *disk, struct nvme_ns *ns,
- unsigned lbaf)
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
{
struct nvme_effects_log *log = ns->head->effects;
- struct request_queue *q = disk->queue;
+ struct request_queue *q = ns->queue;
struct nvme_command c = { };
struct nvme_id_ns_zns *id;
int status;
@@ -133,28 +143,6 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
return NULL;
}
-static int __nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
- struct nvme_zone_report *report,
- size_t buflen)
-{
- struct nvme_command c = { };
- int ret;
-
- c.zmr.opcode = nvme_cmd_zone_mgmt_recv;
- c.zmr.nsid = cpu_to_le32(ns->head->ns_id);
- c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
- c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen));
- c.zmr.zra = NVME_ZRA_ZONE_REPORT;
- c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
- c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
-
- ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
- if (ret)
- return ret;
-
- return le64_to_cpu(report->nr_zones);
-}
-
static int nvme_zone_parse_entry(struct nvme_ns *ns,
struct nvme_zone_descriptor *entry,
unsigned int idx, report_zones_cb cb,
@@ -182,6 +170,7 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nvme_zone_report *report;
+ struct nvme_command c = { };
int ret, zone_idx = 0;
unsigned int nz, i;
size_t buflen;
@@ -190,14 +179,26 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
if (!report)
return -ENOMEM;
+ c.zmr.opcode = nvme_cmd_zone_mgmt_recv;
+ c.zmr.nsid = cpu_to_le32(ns->head->ns_id);
+ c.zmr.numd = cpu_to_le32(nvme_bytes_to_numd(buflen));
+ c.zmr.zra = NVME_ZRA_ZONE_REPORT;
+ c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
+ c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
+
sector &= ~(ns->zsze - 1);
while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
memset(report, 0, buflen);
- ret = __nvme_ns_report_zones(ns, sector, report, buflen);
- if (ret < 0)
+
+ c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns, sector));
+ ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen);
+ if (ret) {
+ if (ret > 0)
+ ret = -EIO;
goto out_free;
+ }
- nz = min_t(unsigned int, ret, nr_zones);
+ nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones);
if (!nz)
break;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index e9fe91786bbb..dca34489a1dc 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -727,7 +727,9 @@ u16 nvmet_set_feat_kato(struct nvmet_req *req)
{
u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
+ nvmet_stop_keep_alive_timer(req->sq->ctrl);
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
+ nvmet_start_keep_alive_timer(req->sq->ctrl);
nvmet_set_result(req, req->sq->ctrl->kato);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b7b63330b5ef..957b39a82431 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -395,7 +395,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
nvmet_ctrl_fatal_error(ctrl);
}
-static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
+void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
if (unlikely(ctrl->kato == 0))
return;
@@ -407,7 +407,7 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
-static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
+void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
if (unlikely(ctrl->kato == 0))
return;
@@ -907,8 +907,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0;
- trace_nvmet_req_init(req, req->cmd);
-
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
req->error_loc = offsetof(struct nvme_common_command, flags);
@@ -938,6 +936,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
if (status)
goto fail;
+ trace_nvmet_req_init(req, req->cmd);
+
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto fail;
@@ -1126,7 +1126,8 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
* in case a host died before it enabled the controller. Hence, simply
* reset the keep alive timer when the controller is enabled.
*/
- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ if (ctrl->kato)
+ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index e6861cc10e7d..cd4e73aa9807 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1019,7 +1019,7 @@ static void
nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
{
/* if LLDD not implemented, leave as NULL */
- if (!hostport->hosthandle)
+ if (!hostport || !hostport->hosthandle)
return;
nvmet_fc_hostport_put(hostport);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 0d6008cf66a2..f6d81239be21 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -579,7 +579,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
0 /* no quirks, we're perfect! */);
if (ret)
- goto out_put_ctrl;
+ goto out;
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
WARN_ON_ONCE(1);
@@ -635,8 +635,8 @@ out_free_queues:
kfree(ctrl->queues);
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
-out_put_ctrl:
nvme_put_ctrl(&ctrl->ctrl);
+out:
if (ret > 0)
ret = -EIO;
return ERR_PTR(ret);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 47ee3fb193bd..559a15ccc322 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -395,6 +395,8 @@ void nvmet_get_feat_async_event(struct nvmet_req *req);
u16 nvmet_set_feat_kato(struct nvmet_req *req);
u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
void nvmet_execute_async_event(struct nvmet_req *req);
+void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
+void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index dacfa7435d0b..8ee94f056898 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -26,7 +26,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
u16 status = NVME_SC_SUCCESS;
struct nvme_id_ctrl *id;
- u32 max_hw_sectors;
+ int max_hw_sectors;
int page_shift;
id = kzalloc(sizeof(*id), GFP_KERNEL);
@@ -48,6 +48,13 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
pctrl->max_hw_sectors);
+ /*
+ * nvmet_passthru_map_sg is limitted to using a single bio so limit
+ * the mdts based on BIO_MAX_PAGES as well
+ */
+ max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9),
+ max_hw_sectors);
+
page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
@@ -180,18 +187,20 @@ static void nvmet_passthru_req_done(struct request *rq,
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
{
- int sg_cnt = req->sg_cnt;
struct scatterlist *sg;
int op_flags = 0;
struct bio *bio;
int i, ret;
+ if (req->sg_cnt > BIO_MAX_PAGES)
+ return -EINVAL;
+
if (req->cmd->common.opcode == nvme_cmd_flush)
op_flags = REQ_FUA;
else if (nvme_is_write(req->cmd))
op_flags = REQ_SYNC | REQ_IDLE;
- bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
bio->bi_end_io = bio_put;
bio->bi_opf = req_op(rq) | op_flags;
@@ -201,7 +210,6 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
bio_put(bio);
return -EINVAL;
}
- sg_cnt--;
}
ret = blk_rq_append_bio(rq, &bio);
@@ -236,7 +244,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
q = ns->queue;
}
- rq = nvme_alloc_request(q, req->cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+ rq = nvme_alloc_request(q, req->cmd, 0, NVME_QID_ANY);
if (IS_ERR(rq)) {
status = NVME_SC_INTERNAL;
goto out_put_ns;
@@ -456,10 +464,26 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
+ case NVME_ID_CNS_CS_CTRL:
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ }
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
case NVME_ID_CNS_NS:
req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
+ case NVME_ID_CNS_CS_NS:
+ switch (req->cmd->identify.csi) {
+ case NVME_CSI_ZNS:
+ req->execute = nvmet_passthru_execute_cmd;
+ req->p.use_workqueue = true;
+ return NVME_SC_SUCCESS;
+ }
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
default:
return nvmet_setup_passthru_command(req);
}
@@ -474,6 +498,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
{
struct nvme_ctrl *ctrl;
+ struct file *file;
int ret = -EINVAL;
void *old;
@@ -488,24 +513,29 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
goto out_unlock;
}
- ctrl = nvme_ctrl_get_by_path(subsys->passthru_ctrl_path);
- if (IS_ERR(ctrl)) {
- ret = PTR_ERR(ctrl);
+ file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_unlock;
+ }
+
+ ctrl = nvme_ctrl_from_file(file);
+ if (!ctrl) {
pr_err("failed to open nvme controller %s\n",
subsys->passthru_ctrl_path);
- goto out_unlock;
+ goto out_put_file;
}
old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
subsys, GFP_KERNEL);
if (xa_is_err(old)) {
ret = xa_err(old);
- goto out_put_ctrl;
+ goto out_put_file;
}
if (old)
- goto out_put_ctrl;
+ goto out_put_file;
subsys->passthru_ctrl = ctrl;
subsys->ver = ctrl->vs;
@@ -516,13 +546,12 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
NVME_TERTIARY(subsys->ver));
subsys->ver = NVME_VS(1, 2, 1);
}
-
+ nvme_get_ctrl(ctrl);
__module_get(subsys->passthru_ctrl->ops->module);
- mutex_unlock(&subsys->lock);
- return 0;
+ ret = 0;
-out_put_ctrl:
- nvme_put_ctrl(ctrl);
+out_put_file:
+ filp_close(file, NULL);
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 8e0d766d2722..dc1f0f647189 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -94,7 +94,6 @@ struct nvmet_tcp_queue {
struct socket *sock;
struct nvmet_tcp_port *port;
struct work_struct io_work;
- int cpu;
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
@@ -144,7 +143,6 @@ struct nvmet_tcp_port {
struct work_struct accept_work;
struct nvmet_port *nport;
struct sockaddr_storage addr;
- int last_cpu;
void (*data_ready)(struct sock *);
};
@@ -219,6 +217,11 @@ static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
list_add_tail(&cmd->entry, &cmd->queue->free_list);
}
+static inline int queue_cpu(struct nvmet_tcp_queue *queue)
+{
+ return queue->sock->sk->sk_incoming_cpu;
+}
+
static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
{
return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
@@ -506,7 +509,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
struct nvmet_tcp_queue *queue = cmd->queue;
llist_add(&cmd->lentry, &queue->resp_list);
- queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
}
static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
@@ -1223,7 +1226,7 @@ static void nvmet_tcp_io_work(struct work_struct *w)
* We exahusted our budget, requeue our selves
*/
if (pending)
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
}
static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
@@ -1383,7 +1386,7 @@ static void nvmet_tcp_data_ready(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
if (likely(queue))
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -1403,7 +1406,7 @@ static void nvmet_tcp_write_space(struct sock *sk)
if (sk_stream_is_writeable(sk)) {
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
}
out:
read_unlock_bh(&sk->sk_callback_lock);
@@ -1512,9 +1515,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret)
goto out_free_connect;
- port->last_cpu = cpumask_next_wrap(port->last_cpu,
- cpu_online_mask, -1, false);
- queue->cpu = port->last_cpu;
nvmet_prepare_receive_pdu(queue);
mutex_lock(&nvmet_tcp_queue_mutex);
@@ -1525,7 +1525,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret)
goto out_destroy_sq;
- queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
return 0;
out_destroy_sq:
@@ -1612,7 +1612,6 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
}
port->nport = nport;
- port->last_cpu = -1;
INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
if (port->nport->inline_data_size < 0)
port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index 0458046d6501..c14e3249a14d 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -46,19 +46,12 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
return req->sq->ctrl;
}
-static inline void __assign_disk_name(char *name, struct nvmet_req *req,
- bool init)
+static inline void __assign_req_name(char *name, struct nvmet_req *req)
{
- struct nvmet_ctrl *ctrl = nvmet_req_to_ctrl(req);
- struct nvmet_ns *ns;
-
- if ((init && req->sq->qid) || (!init && req->cq->qid)) {
- ns = nvmet_find_namespace(ctrl, req->cmd->rw.nsid);
- strncpy(name, ns->device_path, DISK_NAME_LEN);
- return;
- }
-
- memset(name, 0, DISK_NAME_LEN);
+ if (req->ns)
+ strncpy(name, req->ns->device_path, DISK_NAME_LEN);
+ else
+ memset(name, 0, DISK_NAME_LEN);
}
#endif
@@ -81,7 +74,7 @@ TRACE_EVENT(nvmet_req_init,
TP_fast_assign(
__entry->cmd = cmd;
__entry->ctrl = nvmet_req_to_ctrl(req);
- __assign_disk_name(__entry->disk, req, true);
+ __assign_req_name(__entry->disk, req);
__entry->qid = req->sq->qid;
__entry->cid = cmd->common.command_id;
__entry->opcode = cmd->common.opcode;
@@ -121,7 +114,7 @@ TRACE_EVENT(nvmet_req_complete,
__entry->cid = req->cqe->command_id;
__entry->result = le64_to_cpu(req->cqe->result.u64);
__entry->status = le16_to_cpu(req->cqe->status) >> 1;
- __assign_disk_name(__entry->disk, req, false);
+ __assign_req_name(__entry->disk, req);
),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
__print_ctrl_name(__entry->ctrl),
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 6cd3edb2eaf6..a09ff8409f60 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -128,7 +128,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
if (attr->private)
dev = attr->private;
else
- dev = container_of(kobj, struct device, kobj);
+ dev = kobj_to_dev(kobj);
nvmem = to_nvmem_device(dev);
/* Stop the user from reading */
@@ -168,7 +168,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
if (attr->private)
dev = attr->private;
else
- dev = container_of(kobj, struct device, kobj);
+ dev = kobj_to_dev(kobj);
nvmem = to_nvmem_device(dev);
/* Stop the user from writing */
@@ -219,7 +219,7 @@ static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
struct bin_attribute *attr, int i)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct nvmem_device *nvmem = to_nvmem_device(dev);
return nvmem_bin_attr_get_umode(nvmem);
@@ -321,7 +321,7 @@ static void nvmem_release(struct device *dev)
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
- ida_simple_remove(&nvmem_ida, nvmem->id);
+ ida_free(&nvmem_ida, nvmem->id);
gpiod_put(nvmem->wp_gpio);
kfree(nvmem);
}
@@ -361,16 +361,14 @@ static void nvmem_cell_add(struct nvmem_cell *cell)
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
}
-static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
- const struct nvmem_cell_info *info,
- struct nvmem_cell *cell)
+static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info,
+ struct nvmem_cell *cell)
{
cell->nvmem = nvmem;
cell->offset = info->offset;
cell->bytes = info->bytes;
- cell->name = kstrdup_const(info->name, GFP_KERNEL);
- if (!cell->name)
- return -ENOMEM;
+ cell->name = info->name;
cell->bit_offset = info->bit_offset;
cell->nbits = info->nbits;
@@ -382,13 +380,30 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
dev_err(&nvmem->dev,
"cell %s unaligned to nvmem stride %d\n",
- cell->name, nvmem->stride);
+ cell->name ?: "<unknown>", nvmem->stride);
return -EINVAL;
}
return 0;
}
+static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info,
+ struct nvmem_cell *cell)
+{
+ int err;
+
+ err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
+ if (err)
+ return err;
+
+ cell->name = kstrdup_const(info->name, GFP_KERNEL);
+ if (!cell->name)
+ return -ENOMEM;
+
+ return 0;
+}
+
/**
* nvmem_add_cells() - Add cell information to an nvmem device
*
@@ -596,7 +611,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (!nvmem)
return ERR_PTR(-ENOMEM);
- rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
+ rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
if (rval < 0) {
kfree(nvmem);
return ERR_PTR(rval);
@@ -608,7 +623,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
GPIOD_OUT_HIGH);
if (IS_ERR(nvmem->wp_gpio)) {
- ida_simple_remove(&nvmem_ida, nvmem->id);
+ ida_free(&nvmem_ida, nvmem->id);
rval = PTR_ERR(nvmem->wp_gpio);
kfree(nvmem);
return ERR_PTR(rval);
@@ -835,6 +850,7 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
{
struct device_node *nvmem_np;
+ struct nvmem_device *nvmem;
int index = 0;
if (id)
@@ -844,7 +860,9 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
if (!nvmem_np)
return ERR_PTR(-ENOENT);
- return __nvmem_device_get(nvmem_np, device_match_of_node);
+ nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
+ of_node_put(nvmem_np);
+ return nvmem;
}
EXPORT_SYMBOL_GPL(of_nvmem_device_get);
#endif
@@ -1460,7 +1478,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
if (!nvmem)
return -EINVAL;
- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
+ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
if (rc)
return rc;
@@ -1490,7 +1508,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
if (!nvmem)
return -EINVAL;
- rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
+ rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
if (rc)
return rc;
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 856d9c3fc38e..6a537d959f14 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -28,19 +28,6 @@ static int mtk_reg_read(void *context,
return 0;
}
-static int mtk_reg_write(void *context,
- unsigned int reg, void *_val, size_t bytes)
-{
- struct mtk_efuse_priv *priv = context;
- u32 *val = _val;
- int i = 0, words = bytes / 4;
-
- while (words--)
- writel(*val++, priv->base + reg + (i++ * 4));
-
- return 0;
-}
-
static int mtk_efuse_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -61,7 +48,6 @@ static int mtk_efuse_probe(struct platform_device *pdev)
econfig.stride = 4;
econfig.word_size = 4;
econfig.reg_read = mtk_reg_read;
- econfig.reg_write = mtk_reg_write;
econfig.size = resource_size(res);
econfig.priv = priv;
econfig.dev = dev;
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index d91618641be6..18450437d5d5 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -74,13 +74,6 @@ config OF_NET
depends on NETDEVICES
def_bool y
-config OF_MDIO
- def_tristate PHYLIB
- depends on PHYLIB
- select FIXED_PHY
- help
- OpenFirmware MDIO bus (Ethernet PHY) accessors
-
config OF_RESERVED_MEM
bool
depends on OF_EARLY_FLATTREE
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 663a4af0cccd..6e1e5212f058 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_OF_ADDRESS) += address.o
obj-$(CONFIG_OF_IRQ) += irq.o
obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_UNITTEST) += unittest.o
-obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
obj-$(CONFIG_OF_RESOLVE) += resolver.o
obj-$(CONFIG_OF_OVERLAY) += overlay.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index da4f7341323f..1c3257a2d4e3 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -13,6 +13,7 @@
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/dma-direct.h> /* for bus_dma_region */
#include "of_private.h"
@@ -937,33 +938,33 @@ void __iomem *of_io_request_and_map(struct device_node *np, int index,
}
EXPORT_SYMBOL(of_io_request_and_map);
+#ifdef CONFIG_HAS_DMA
/**
- * of_dma_get_range - Get DMA range info
+ * of_dma_get_range - Get DMA range info and put it into a map array
* @np: device node to get DMA range info
- * @dma_addr: pointer to store initial DMA address of DMA range
- * @paddr: pointer to store initial CPU address of DMA range
- * @size: pointer to store size of DMA range
+ * @map: dma range structure to return
*
* Look in bottom up direction for the first "dma-ranges" property
- * and parse it.
- * dma-ranges format:
+ * and parse it. Put the information into a DMA offset map array.
+ *
+ * dma-ranges format:
* DMA addr (dma_addr) : naddr cells
* CPU addr (phys_addr_t) : pna cells
* size : nsize cells
*
- * It returns -ENODEV if "dma-ranges" property was not found
- * for this device in DT.
+ * It returns -ENODEV if "dma-ranges" property was not found for this
+ * device in the DT.
*/
-int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
+int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
{
struct device_node *node = of_node_get(np);
const __be32 *ranges = NULL;
- int len;
- int ret = 0;
bool found_dma_ranges = false;
struct of_range_parser parser;
struct of_range range;
- u64 dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
+ struct bus_dma_region *r;
+ int len, num_ranges = 0;
+ int ret = 0;
while (node) {
ranges = of_get_property(node, "dma-ranges", &len);
@@ -989,49 +990,39 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
}
of_dma_range_parser_init(&parser, node);
+ for_each_of_range(&parser, &range)
+ num_ranges++;
+
+ r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /*
+ * Record all info in the generic DMA ranges array for struct device.
+ */
+ *map = r;
+ of_dma_range_parser_init(&parser, node);
for_each_of_range(&parser, &range) {
pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
range.bus_addr, range.cpu_addr, range.size);
-
- if (dma_offset && range.cpu_addr - range.bus_addr != dma_offset) {
- pr_warn("Can't handle multiple dma-ranges with different offsets on node(%pOF)\n", node);
- /* Don't error out as we'd break some existing DTs */
- continue;
- }
if (range.cpu_addr == OF_BAD_ADDR) {
pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
range.bus_addr, node);
continue;
}
- dma_offset = range.cpu_addr - range.bus_addr;
-
- /* Take lower and upper limits */
- if (range.bus_addr < dma_start)
- dma_start = range.bus_addr;
- if (range.bus_addr + range.size > dma_end)
- dma_end = range.bus_addr + range.size;
- }
-
- if (dma_start >= dma_end) {
- ret = -EINVAL;
- pr_debug("Invalid DMA ranges configuration on node(%pOF)\n",
- node);
- goto out;
+ r->cpu_start = range.cpu_addr;
+ r->dma_start = range.bus_addr;
+ r->size = range.size;
+ r->offset = range.cpu_addr - range.bus_addr;
+ r++;
}
-
- *dma_addr = dma_start;
- *size = dma_end - dma_start;
- *paddr = dma_start + dma_offset;
-
- pr_debug("final: dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
- *dma_addr, *paddr, *size);
-
out:
of_node_put(node);
-
return ret;
}
+#endif /* CONFIG_HAS_DMA */
/**
* of_dma_is_coherent - Check if device is coherent
@@ -1043,11 +1034,13 @@ out:
*/
bool of_dma_is_coherent(struct device_node *np)
{
- struct device_node *node = of_node_get(np);
+ struct device_node *node;
if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
return true;
+ node = of_node_get(np);
+
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
of_node_put(node);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index ea44fea99813..161a23631472 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1869,6 +1869,7 @@ int of_remove_property(struct device_node *np, struct property *prop)
return rc;
}
+EXPORT_SYMBOL_GPL(of_remove_property);
int __of_update_property(struct device_node *np, struct property *newprop,
struct property **oldpropp)
diff --git a/drivers/of/device.c b/drivers/of/device.c
index b439c1e05434..aedfaaafd3e7 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -5,7 +5,8 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_iommu.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h> /* for bus_dma_region */
+#include <linux/dma-map-ops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -90,14 +91,14 @@ int of_device_add(struct platform_device *ofdev)
int of_dma_configure_id(struct device *dev, struct device_node *np,
bool force_dma, const u32 *id)
{
- u64 dma_addr, paddr, size = 0;
- int ret;
- bool coherent;
- unsigned long offset;
const struct iommu_ops *iommu;
- u64 mask, end;
+ const struct bus_dma_region *map = NULL;
+ u64 dma_start = 0;
+ u64 mask, end, size = 0;
+ bool coherent;
+ int ret;
- ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
+ ret = of_dma_get_range(np, &map);
if (ret < 0) {
/*
* For legacy reasons, we have to assume some devices need
@@ -106,26 +107,35 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
*/
if (!force_dma)
return ret == -ENODEV ? 0 : ret;
-
- dma_addr = offset = 0;
} else {
- offset = PFN_DOWN(paddr - dma_addr);
+ const struct bus_dma_region *r = map;
+ u64 dma_end = 0;
+
+ /* Determine the overall bounds of all DMA regions */
+ for (dma_start = ~0; r->size; r++) {
+ /* Take lower and upper limits */
+ if (r->dma_start < dma_start)
+ dma_start = r->dma_start;
+ if (r->dma_start + r->size > dma_end)
+ dma_end = r->dma_start + r->size;
+ }
+ size = dma_end - dma_start;
/*
* Add a work around to treat the size as mask + 1 in case
* it is defined in DT as a mask.
*/
if (size & 1) {
- dev_warn(dev, "Invalid size 0x%llx for dma-range\n",
+ dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n",
size);
size = size + 1;
}
if (!size) {
dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
+ kfree(map);
return -EINVAL;
}
- dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
}
/*
@@ -144,13 +154,11 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
else if (!size)
size = 1ULL << 32;
- dev->dma_pfn_offset = offset;
-
/*
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- end = dma_addr + size - 1;
+ end = dma_start + size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
@@ -163,14 +171,17 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np, id);
- if (PTR_ERR(iommu) == -EPROBE_DEFER)
+ if (PTR_ERR(iommu) == -EPROBE_DEFER) {
+ kfree(map);
return -EPROBE_DEFER;
+ }
dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not ");
- arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent);
+ arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
+ dev->dma_range_map = map;
return 0;
}
EXPORT_SYMBOL_GPL(of_dma_configure_id);
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index edc682249c00..d9e6a324de0a 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -157,12 +157,13 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np,
extern int of_bus_n_addr_cells(struct device_node *np);
extern int of_bus_n_size_cells(struct device_node *np);
-#ifdef CONFIG_OF_ADDRESS
-extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
- u64 *paddr, u64 *size);
+struct bus_dma_region;
+#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA)
+int of_dma_get_range(struct device_node *np,
+ const struct bus_dma_region **map);
#else
-static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr,
- u64 *paddr, u64 *size)
+static inline int of_dma_get_range(struct device_node *np,
+ const struct bus_dma_region **map)
{
return -ENODEV;
}
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 46b9371c8a33..a7fbc5e37e19 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -162,7 +162,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
}
static const struct of_device_id __rmem_of_table_sentinel
- __used __section(__reservedmem_of_table_end);
+ __used __section("__reservedmem_of_table_end");
/**
* __reserved_mem_init_node() - call region specific reserved memory init code
@@ -200,6 +200,16 @@ static int __init __rmem_cmp(const void *a, const void *b)
if (ra->base > rb->base)
return 1;
+ /*
+ * Put the dynamic allocations (address == 0, size == 0) before static
+ * allocations at address 0x0 so that overlap detection works
+ * correctly.
+ */
+ if (ra->size < rb->size)
+ return -1;
+ if (ra->size > rb->size)
+ return 1;
+
return 0;
}
@@ -217,8 +227,7 @@ static void __init __rmem_check_for_overlap(void)
this = &reserved_mem[i];
next = &reserved_mem[i + 1];
- if (!(this->base && next->base))
- continue;
+
if (this->base + this->size > next->base) {
phys_addr_t this_end, next_end;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 071f04da32c8..b557a0fcd4ba 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -590,7 +590,7 @@ EXPORT_SYMBOL_GPL(of_platform_device_destroy);
void of_platform_depopulate(struct device *parent)
{
if (parent->of_node && of_node_check_flag(parent->of_node, OF_POPULATED_BUS)) {
- device_for_each_child(parent, NULL, of_platform_device_destroy);
+ device_for_each_child_reverse(parent, NULL, of_platform_device_destroy);
of_node_clear_flag(parent->of_node, OF_POPULATED_BUS);
}
}
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 9b7e84bdc7d4..06cc988faf78 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -7,6 +7,7 @@
#include <linux/memblock.h>
#include <linux/clk.h>
+#include <linux/dma-direct.h> /* to test phys_to_dma/dma_to_phys */
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/hashtable.h>
@@ -869,10 +870,11 @@ static void __init of_unittest_changeset(void)
}
static void __init of_unittest_dma_ranges_one(const char *path,
- u64 expect_dma_addr, u64 expect_paddr, u64 expect_size)
+ u64 expect_dma_addr, u64 expect_paddr)
{
+#ifdef CONFIG_HAS_DMA
struct device_node *np;
- u64 dma_addr, paddr, size;
+ const struct bus_dma_region *map = NULL;
int rc;
np = of_find_node_by_path(path);
@@ -881,28 +883,40 @@ static void __init of_unittest_dma_ranges_one(const char *path,
return;
}
- rc = of_dma_get_range(np, &dma_addr, &paddr, &size);
+ rc = of_dma_get_range(np, &map);
unittest(!rc, "of_dma_get_range failed on node %pOF rc=%i\n", np, rc);
+
if (!rc) {
- unittest(size == expect_size,
- "of_dma_get_range wrong size on node %pOF size=%llx\n", np, size);
+ phys_addr_t paddr;
+ dma_addr_t dma_addr;
+ struct device dev_bogus;
+
+ dev_bogus.dma_range_map = map;
+ paddr = dma_to_phys(&dev_bogus, expect_dma_addr);
+ dma_addr = phys_to_dma(&dev_bogus, expect_paddr);
+
unittest(paddr == expect_paddr,
- "of_dma_get_range wrong phys addr (%llx) on node %pOF", paddr, np);
+ "of_dma_get_range: wrong phys addr %pap (expecting %llx) on node %pOF\n",
+ &paddr, expect_paddr, np);
unittest(dma_addr == expect_dma_addr,
- "of_dma_get_range wrong DMA addr (%llx) on node %pOF", dma_addr, np);
+ "of_dma_get_range: wrong DMA addr %pad (expecting %llx) on node %pOF\n",
+ &dma_addr, expect_dma_addr, np);
+
+ kfree(map);
}
of_node_put(np);
+#endif
}
static void __init of_unittest_parse_dma_ranges(void)
{
of_unittest_dma_ranges_one("/testcase-data/address-tests/device@70000000",
- 0x0, 0x20000000, 0x40000000);
+ 0x0, 0x20000000);
of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000",
- 0x100000000, 0x20000000, 0x2000000000);
+ 0x100000000, 0x20000000);
of_unittest_dma_ranges_one("/testcase-data/address-tests/pci@90000000",
- 0x80000000, 0x20000000, 0x10000000);
+ 0x80000000, 0x20000000);
}
static void __init of_unittest_pci_dma_ranges(void)
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 3ca7543142bf..0e0a5269dc82 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -703,12 +703,10 @@ static int _generic_set_opp_regulator(struct opp_table *opp_table,
* Enable the regulator after setting its voltages, otherwise it breaks
* some boot-enabled regulators.
*/
- if (unlikely(!opp_table->regulator_enabled)) {
+ if (unlikely(!opp_table->enabled)) {
ret = regulator_enable(reg);
if (ret < 0)
dev_warn(dev, "Failed to enable regulator: %d", ret);
- else
- opp_table->regulator_enabled = true;
}
return 0;
@@ -781,29 +779,39 @@ static int _set_opp_custom(const struct opp_table *opp_table,
return opp_table->set_opp(data);
}
+static int _set_required_opp(struct device *dev, struct device *pd_dev,
+ struct dev_pm_opp *opp, int i)
+{
+ unsigned int pstate = likely(opp) ? opp->required_opps[i]->pstate : 0;
+ int ret;
+
+ if (!pd_dev)
+ return 0;
+
+ ret = dev_pm_genpd_set_performance_state(pd_dev, pstate);
+ if (ret) {
+ dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n",
+ dev_name(pd_dev), pstate, ret);
+ }
+
+ return ret;
+}
+
/* This is only called for PM domain for now */
static int _set_required_opps(struct device *dev,
struct opp_table *opp_table,
- struct dev_pm_opp *opp)
+ struct dev_pm_opp *opp, bool up)
{
struct opp_table **required_opp_tables = opp_table->required_opp_tables;
struct device **genpd_virt_devs = opp_table->genpd_virt_devs;
- unsigned int pstate;
int i, ret = 0;
if (!required_opp_tables)
return 0;
/* Single genpd case */
- if (!genpd_virt_devs) {
- pstate = likely(opp) ? opp->required_opps[0]->pstate : 0;
- ret = dev_pm_genpd_set_performance_state(dev, pstate);
- if (ret) {
- dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
- dev_name(dev), pstate, ret);
- }
- return ret;
- }
+ if (!genpd_virt_devs)
+ return _set_required_opp(dev, dev, opp, 0);
/* Multiple genpd case */
@@ -813,19 +821,21 @@ static int _set_required_opps(struct device *dev,
*/
mutex_lock(&opp_table->genpd_virt_dev_lock);
- for (i = 0; i < opp_table->required_opp_count; i++) {
- pstate = likely(opp) ? opp->required_opps[i]->pstate : 0;
-
- if (!genpd_virt_devs[i])
- continue;
-
- ret = dev_pm_genpd_set_performance_state(genpd_virt_devs[i], pstate);
- if (ret) {
- dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n",
- dev_name(genpd_virt_devs[i]), pstate, ret);
- break;
+ /* Scaling up? Set required OPPs in normal order, else reverse */
+ if (up) {
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i);
+ if (ret)
+ break;
+ }
+ } else {
+ for (i = opp_table->required_opp_count - 1; i >= 0; i--) {
+ ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i);
+ if (ret)
+ break;
}
}
+
mutex_unlock(&opp_table->genpd_virt_dev_lock);
return ret;
@@ -862,6 +872,34 @@ int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_bw);
+static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table)
+{
+ int ret;
+
+ if (!opp_table->enabled)
+ return 0;
+
+ /*
+ * Some drivers need to support cases where some platforms may
+ * have OPP table for the device, while others don't and
+ * opp_set_rate() just needs to behave like clk_set_rate().
+ */
+ if (!_get_opp_count(opp_table))
+ return 0;
+
+ ret = _set_opp_bw(opp_table, NULL, dev, true);
+ if (ret)
+ return ret;
+
+ if (opp_table->regulators)
+ regulator_disable(opp_table->regulators[0]);
+
+ ret = _set_required_opps(dev, opp_table, NULL, false);
+
+ opp_table->enabled = false;
+ return ret;
+}
+
/**
* dev_pm_opp_set_rate() - Configure new OPP based on frequency
* @dev: device for which we do this operation
@@ -888,33 +926,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
}
if (unlikely(!target_freq)) {
- /*
- * Some drivers need to support cases where some platforms may
- * have OPP table for the device, while others don't and
- * opp_set_rate() just needs to behave like clk_set_rate().
- */
- if (!_get_opp_count(opp_table)) {
- ret = 0;
- goto put_opp_table;
- }
-
- if (!opp_table->required_opp_tables && !opp_table->regulators &&
- !opp_table->paths) {
- dev_err(dev, "target frequency can't be 0\n");
- ret = -EINVAL;
- goto put_opp_table;
- }
-
- ret = _set_opp_bw(opp_table, NULL, dev, true);
- if (ret)
- goto put_opp_table;
-
- if (opp_table->regulator_enabled) {
- regulator_disable(opp_table->regulators[0]);
- opp_table->regulator_enabled = false;
- }
-
- ret = _set_required_opps(dev, opp_table, NULL);
+ ret = _opp_set_rate_zero(dev, opp_table);
goto put_opp_table;
}
@@ -933,14 +945,11 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
old_freq = clk_get_rate(clk);
/* Return early if nothing to do */
- if (old_freq == freq) {
- if (!opp_table->required_opp_tables && !opp_table->regulators &&
- !opp_table->paths) {
- dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
- __func__, freq);
- ret = 0;
- goto put_opp_table;
- }
+ if (opp_table->enabled && old_freq == freq) {
+ dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+ __func__, freq);
+ ret = 0;
+ goto put_opp_table;
}
/*
@@ -976,7 +985,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
/* Scaling up? Configure required OPPs before frequency */
if (freq >= old_freq) {
- ret = _set_required_opps(dev, opp_table, opp);
+ ret = _set_required_opps(dev, opp_table, opp, true);
if (ret)
goto put_opp;
}
@@ -996,13 +1005,16 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
/* Scaling down? Configure required OPPs after frequency */
if (!ret && freq < old_freq) {
- ret = _set_required_opps(dev, opp_table, opp);
+ ret = _set_required_opps(dev, opp_table, opp, false);
if (ret)
dev_err(dev, "Failed to set required opps: %d\n", ret);
}
- if (!ret)
+ if (!ret) {
ret = _set_opp_bw(opp_table, opp, dev, false);
+ if (!ret)
+ opp_table->enabled = true;
+ }
put_opp:
dev_pm_opp_put(opp);
@@ -1068,7 +1080,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
*/
opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
if (!opp_table)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mutex_init(&opp_table->lock);
mutex_init(&opp_table->genpd_virt_dev_lock);
@@ -1079,8 +1091,8 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
opp_dev = _add_opp_dev(dev, opp_table);
if (!opp_dev) {
- kfree(opp_table);
- return NULL;
+ ret = -ENOMEM;
+ goto err;
}
_of_init_opp_table(opp_table, dev, index);
@@ -1089,16 +1101,21 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
opp_table->clk = clk_get(dev, NULL);
if (IS_ERR(opp_table->clk)) {
ret = PTR_ERR(opp_table->clk);
- if (ret != -EPROBE_DEFER)
- dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
- ret);
+ if (ret == -EPROBE_DEFER)
+ goto err;
+
+ dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
}
/* Find interconnect path(s) for the device */
ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
- if (ret)
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ goto err;
+
dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
__func__, ret);
+ }
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
@@ -1107,6 +1124,10 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
/* Secure the device table modification */
list_add(&opp_table->node, &opp_tables);
return opp_table;
+
+err:
+ kfree(opp_table);
+ return ERR_PTR(ret);
}
void _get_opp_table_kref(struct opp_table *opp_table)
@@ -1129,7 +1150,7 @@ static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
if (opp_table) {
if (!_add_opp_dev_unlocked(dev, opp_table)) {
dev_pm_opp_put_opp_table(opp_table);
- opp_table = NULL;
+ opp_table = ERR_PTR(-ENOMEM);
}
goto unlock;
}
@@ -1160,6 +1181,10 @@ static void _opp_table_kref_release(struct kref *kref)
struct opp_device *opp_dev, *temp;
int i;
+ /* Drop the lock as soon as we can */
+ list_del(&opp_table->node);
+ mutex_unlock(&opp_table_lock);
+
_of_clear_opp_table(opp_table);
/* Release clk */
@@ -1187,10 +1212,7 @@ static void _opp_table_kref_release(struct kref *kref)
mutex_destroy(&opp_table->genpd_virt_dev_lock);
mutex_destroy(&opp_table->lock);
- list_del(&opp_table->node);
kfree(opp_table);
-
- mutex_unlock(&opp_table_lock);
}
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
@@ -1581,8 +1603,8 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
struct opp_table *opp_table;
opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(opp_table))
+ return opp_table;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
@@ -1640,8 +1662,8 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
struct opp_table *opp_table;
opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(opp_table))
+ return opp_table;
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
@@ -1733,8 +1755,8 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
int ret, i;
opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(opp_table))
+ return opp_table;
/* This should be called before OPPs are initialized */
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
@@ -1804,11 +1826,9 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
- if (opp_table->regulator_enabled) {
+ if (opp_table->enabled) {
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_disable(opp_table->regulators[i]);
-
- opp_table->regulator_enabled = false;
}
for (i = opp_table->regulator_count - 1; i >= 0; i--)
@@ -1843,8 +1863,8 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
int ret;
opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(opp_table))
+ return opp_table;
/* This should be called before OPPs are initialized */
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
@@ -1911,8 +1931,8 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
return ERR_PTR(-EINVAL);
opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(opp_table))
+ return opp_table;
/* This should be called before OPPs are initialized */
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
@@ -1949,6 +1969,9 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
{
int index;
+ if (!opp_table->genpd_virt_devs)
+ return;
+
for (index = 0; index < opp_table->required_opp_count; index++) {
if (!opp_table->genpd_virt_devs[index])
continue;
@@ -1992,8 +2015,11 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
const char **name = names;
opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(opp_table))
+ return opp_table;
+
+ if (opp_table->genpd_virt_devs)
+ return opp_table;
/*
* If the genpd's OPP table isn't already initialized, parsing of the
@@ -2020,12 +2046,6 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
goto err;
}
- if (opp_table->genpd_virt_devs[index]) {
- dev_err(dev, "Genpd virtual device already set %s\n",
- *name);
- goto err;
- }
-
virt_dev = dev_pm_domain_attach_by_name(dev, *name);
if (IS_ERR(virt_dev)) {
ret = PTR_ERR(virt_dev);
@@ -2098,9 +2118,6 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
int dest_pstate = -EINVAL;
int i;
- if (!pstate)
- return 0;
-
/*
* Normally the src_table will have the "required_opps" property set to
* point to one of the OPPs in the dst_table, but in some cases the
@@ -2163,8 +2180,8 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
int ret;
opp_table = dev_pm_opp_get_opp_table(dev);
- if (!opp_table)
- return -ENOMEM;
+ if (IS_ERR(opp_table))
+ return PTR_ERR(opp_table);
/* Fix regulator count for dynamic OPPs */
opp_table->regulator_count = 1;
@@ -2405,7 +2422,14 @@ int dev_pm_opp_unregister_notifier(struct device *dev,
}
EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
-void _dev_pm_opp_find_and_remove_table(struct device *dev)
+/**
+ * dev_pm_opp_remove_table() - Free all OPPs associated with the device
+ * @dev: device pointer used to lookup OPP table.
+ *
+ * Free both OPPs created using static entries present in DT and the
+ * dynamically added entries.
+ */
+void dev_pm_opp_remove_table(struct device *dev)
{
struct opp_table *opp_table;
@@ -2432,16 +2456,4 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev)
/* Drop reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
}
-
-/**
- * dev_pm_opp_remove_table() - Free all OPPs associated with the device
- * @dev: device pointer used to lookup OPP table.
- *
- * Free both OPPs created using static entries present in DT and the
- * dynamically added entries.
- */
-void dev_pm_opp_remove_table(struct device *dev)
-{
- _dev_pm_opp_find_and_remove_table(dev);
-}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index b5055cc886ef..5004335cf0de 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -124,7 +124,7 @@ void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask,
continue;
}
- _dev_pm_opp_find_and_remove_table(cpu_dev);
+ dev_pm_opp_remove_table(cpu_dev);
}
}
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 0430290670ab..9faeb83e4b32 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -434,9 +434,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths);
static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
struct device_node *np)
{
- unsigned int count = opp_table->supported_hw_count;
- u32 version;
- int ret;
+ unsigned int levels = opp_table->supported_hw_count;
+ int count, versions, ret, i, j;
+ u32 val;
if (!opp_table->supported_hw) {
/*
@@ -451,21 +451,40 @@ static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
return true;
}
- while (count--) {
- ret = of_property_read_u32_index(np, "opp-supported-hw", count,
- &version);
- if (ret) {
- dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
- __func__, count, ret);
- return false;
+ count = of_property_count_u32_elems(np, "opp-supported-hw");
+ if (count <= 0 || count % levels) {
+ dev_err(dev, "%s: Invalid opp-supported-hw property (%d)\n",
+ __func__, count);
+ return false;
+ }
+
+ versions = count / levels;
+
+ /* All levels in at least one of the versions should match */
+ for (i = 0; i < versions; i++) {
+ bool supported = true;
+
+ for (j = 0; j < levels; j++) {
+ ret = of_property_read_u32_index(np, "opp-supported-hw",
+ i * levels + j, &val);
+ if (ret) {
+ dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
+ __func__, i * levels + j, ret);
+ return false;
+ }
+
+ /* Check if the level is supported */
+ if (!(val & opp_table->supported_hw[j])) {
+ supported = false;
+ break;
+ }
}
- /* Both of these are bitwise masks of the versions */
- if (!(version & opp_table->supported_hw[count]))
- return false;
+ if (supported)
+ return true;
}
- return true;
+ return false;
}
static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
@@ -616,7 +635,7 @@ free_microvolt:
*/
void dev_pm_opp_of_remove_table(struct device *dev)
{
- _dev_pm_opp_find_and_remove_table(dev);
+ dev_pm_opp_remove_table(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
@@ -823,7 +842,7 @@ free_opp:
static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
{
struct device_node *np;
- int ret, count = 0, pstate_count = 0;
+ int ret, count = 0;
struct dev_pm_opp *opp;
/* OPP table is already initialized for the device */
@@ -857,20 +876,14 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
goto remove_static_opp;
}
- list_for_each_entry(opp, &opp_table->opp_list, node)
- pstate_count += !!opp->pstate;
-
- /* Either all or none of the nodes shall have performance state set */
- if (pstate_count && pstate_count != count) {
- dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
- count, pstate_count);
- ret = -ENOENT;
- goto remove_static_opp;
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
+ /* Any non-zero performance state would enable the feature */
+ if (opp->pstate) {
+ opp_table->genpd_performance_state = true;
+ break;
+ }
}
- if (pstate_count)
- opp_table->genpd_performance_state = true;
-
return 0;
remove_static_opp:
@@ -886,11 +899,25 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
const __be32 *val;
int nr, ret = 0;
+ mutex_lock(&opp_table->lock);
+ if (opp_table->parsed_static_opps) {
+ opp_table->parsed_static_opps++;
+ mutex_unlock(&opp_table->lock);
+ return 0;
+ }
+
+ opp_table->parsed_static_opps = 1;
+ mutex_unlock(&opp_table->lock);
+
prop = of_find_property(dev->of_node, "operating-points", NULL);
- if (!prop)
- return -ENODEV;
- if (!prop->value)
- return -ENODATA;
+ if (!prop) {
+ ret = -ENODEV;
+ goto remove_static_opp;
+ }
+ if (!prop->value) {
+ ret = -ENODATA;
+ goto remove_static_opp;
+ }
/*
* Each OPP is a set of tuples consisting of frequency and
@@ -899,13 +926,10 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
nr = prop->length / sizeof(u32);
if (nr % 2) {
dev_err(dev, "%s: Invalid OPP table\n", __func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto remove_static_opp;
}
- mutex_lock(&opp_table->lock);
- opp_table->parsed_static_opps = 1;
- mutex_unlock(&opp_table->lock);
-
val = prop->value;
while (nr) {
unsigned long freq = be32_to_cpup(val++) * 1000;
@@ -915,12 +939,16 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
if (ret) {
dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
__func__, freq, ret);
- _opp_remove_all_static(opp_table);
- return ret;
+ goto remove_static_opp;
}
nr -= 2;
}
+ return 0;
+
+remove_static_opp:
+ _opp_remove_all_static(opp_table);
+
return ret;
}
@@ -947,8 +975,8 @@ int dev_pm_opp_of_add_table(struct device *dev)
int ret;
opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
- if (!opp_table)
- return -ENOMEM;
+ if (IS_ERR(opp_table))
+ return PTR_ERR(opp_table);
/*
* OPPs have two version of bindings now. Also try the old (v1)
@@ -1002,8 +1030,8 @@ int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
}
opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
- if (!opp_table)
- return -ENOMEM;
+ if (IS_ERR(opp_table))
+ return PTR_ERR(opp_table);
ret = _of_add_opp_table_v2(dev, opp_table);
if (ret)
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index c3fcd571e446..ebd930e0b3ca 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -147,11 +147,11 @@ enum opp_table_access {
* @clk: Device's clock handle
* @regulators: Supply regulators
* @regulator_count: Number of power supply regulators. Its value can be -1
- * @regulator_enabled: Set to true if regulators were previously enabled.
* (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
* property).
* @paths: Interconnect path handles
* @path_count: Number of interconnect paths
+ * @enabled: Set to true if the device's resources are enabled/configured.
* @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
* @set_opp: Platform specific set_opp callback
@@ -195,9 +195,9 @@ struct opp_table {
struct clk *clk;
struct regulator **regulators;
int regulator_count;
- bool regulator_enabled;
struct icc_path **paths;
unsigned int path_count;
+ bool enabled;
bool genpd_performance_state;
bool is_genpd;
@@ -217,7 +217,6 @@ void _get_opp_table_kref(struct opp_table *opp_table);
int _get_opp_count(struct opp_table *opp_table);
struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
-void _dev_pm_opp_find_and_remove_table(struct device *dev);
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
void _opp_free(struct dev_pm_opp *opp);
int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 4d7695289eda..cc917865f13a 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -116,7 +116,7 @@ module_load_notify(struct notifier_block *self, unsigned long val, void *data)
{
#ifdef CONFIG_MODULES
if (val != MODULE_STATE_COMING)
- return 0;
+ return NOTIFY_DONE;
/* FIXME: should we process all CPU buffers ? */
mutex_lock(&buffer_mutex);
@@ -124,7 +124,7 @@ module_load_notify(struct notifier_block *self, unsigned long val, void *data)
add_event_entry(MODULE_LOADED_CODE);
mutex_unlock(&buffer_mutex);
#endif
- return 0;
+ return NOTIFY_OK;
}
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index a5507f75b524..b5f9ee81a46c 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -39,6 +39,7 @@
#include <linux/reboot.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/dma-map-ops.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <linux/export.h>
@@ -356,8 +357,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
** ggg sacrifices another 710 to the computer gods.
*/
- boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
- 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
+ boundary_size = dma_get_seg_boundary_nr_pages(dev, IOVP_SHIFT);
if (pages_needed <= 8) {
/*
@@ -1025,6 +1025,8 @@ static const struct dma_map_ops ccio_ops = {
.map_sg = ccio_map_sg,
.unmap_sg = ccio_unmap_sg,
.get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
};
#ifdef CONFIG_PROC_FS
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index d4314fba0269..dce4cdf786cd 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -25,6 +25,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
+#include <linux/dma-map-ops.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
@@ -342,8 +343,7 @@ sba_search_bitmap(struct ioc *ioc, struct device *dev,
unsigned long shift;
int ret;
- boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
- 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
+ boundary_size = dma_get_seg_boundary_nr_pages(dev, IOVP_SHIFT);
#if defined(ZX1_SUPPORT)
BUG_ON(ioc->ibase & ~IOVP_MASK);
@@ -1077,6 +1077,8 @@ static const struct dma_map_ops sba_ops = {
.map_sg = sba_map_sg,
.unmap_sg = sba_unmap_sg,
.get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
};
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 4bef5c2bae9f..0c473d75e625 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -56,6 +56,9 @@ config PCI_MSI_IRQ_DOMAIN
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
+config PCI_MSI_ARCH_FALLBACKS
+ bool
+
config PCI_QUIRKS
default y
bool "Enable PCI quirk workarounds" if EXPERT
@@ -187,6 +190,68 @@ config PCI_HYPERV
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
+choice
+ prompt "PCI Express hierarchy optimization setting"
+ default PCIE_BUS_DEFAULT
+ depends on PCI && EXPERT
+ help
+ MPS (Max Payload Size) and MRRS (Max Read Request Size) are PCIe
+ device parameters that affect performance and the ability to
+ support hotplug and peer-to-peer DMA.
+
+ The following choices set the MPS and MRRS optimization strategy
+ at compile-time. The choices are the same as those offered for
+ the kernel command-line parameter 'pci', i.e.,
+ 'pci=pcie_bus_tune_off', 'pci=pcie_bus_safe',
+ 'pci=pcie_bus_perf', and 'pci=pcie_bus_peer2peer'.
+
+ This is a compile-time setting and can be overridden by the above
+ command-line parameters. If unsure, choose PCIE_BUS_DEFAULT.
+
+config PCIE_BUS_TUNE_OFF
+ bool "Tune Off"
+ depends on PCI
+ help
+ Use the BIOS defaults; don't touch MPS at all. This is the same
+ as booting with 'pci=pcie_bus_tune_off'.
+
+config PCIE_BUS_DEFAULT
+ bool "Default"
+ depends on PCI
+ help
+ Default choice; ensure that the MPS matches upstream bridge.
+
+config PCIE_BUS_SAFE
+ bool "Safe"
+ depends on PCI
+ help
+ Use largest MPS that boot-time devices support. If you have a
+ closed system with no possibility of adding new devices, this
+ will use the largest MPS that's supported by all devices. This
+ is the same as booting with 'pci=pcie_bus_safe'.
+
+config PCIE_BUS_PERFORMANCE
+ bool "Performance"
+ depends on PCI
+ help
+ Use MPS and MRRS for best performance. Ensure that a given
+ device's MPS is no larger than its parent MPS, which allows us to
+ keep all switches/bridges to the max MPS supported by their
+ parent. This is the same as booting with 'pci=pcie_bus_perf'.
+
+config PCIE_BUS_PEER2PEER
+ bool "Peer2peer"
+ depends on PCI
+ help
+ Set MPS = 128 for all devices. MPS configuration effected by the
+ other options could cause the MPS on one root port to be
+ different than that of the MPS on another, which may cause
+ hot-added devices or peer-to-peer DMA to fail. Set MPS to the
+ smallest possible value (128B) system-wide to avoid these issues.
+ This is the same as booting with 'pci=pcie_bus_peer2peer'.
+
+endchoice
+
source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/controller/Kconfig"
source "drivers/pci/endpoint/Kconfig"
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index f18c3725ef80..64e2f5e379aa 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -12,7 +12,7 @@ config PCI_MVEBU
select PCI_BRIDGE_EMUL
config PCI_AARDVARK
- bool "Aardvark PCIe controller"
+ tristate "Aardvark PCIe controller"
depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
@@ -41,6 +41,7 @@ config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
+ select PCI_MSI_ARCH_FALLBACKS
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -67,6 +68,7 @@ config PCIE_RCAR_HOST
bool "Renesas R-Car PCIe host controller"
depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
+ select PCI_MSI_ARCH_FALLBACKS
help
Say Y here if you want PCIe controller support on R-Car SoCs in host
mode.
@@ -95,6 +97,7 @@ config PCI_HOST_GENERIC
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
depends on OF || COMPILE_TEST
+ select PCI_MSI_ARCH_FALLBACKS
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
@@ -270,9 +273,10 @@ config VMD
config PCIE_BRCMSTB
tristate "Broadcom Brcmstb PCIe host controller"
- depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
+ default ARCH_BRCMSTB
help
Say Y here to enable PCIe host controller support for
Broadcom STB based SoCs, like the Raspberry Pi 4.
@@ -294,6 +298,13 @@ config PCI_LOONGSON
Say Y here if you want to enable PCI controller support on
Loongson systems.
+config PCIE_HISI_ERR
+ depends on ACPI_APEI_GHES && (ARM64 || COMPILE_TEST)
+ bool "HiSilicon HIP PCIe controller error handling driver"
+ help
+ Say Y here if you want error handling support
+ for the PCIe controller's errors on HiSilicon HIP SoCs
+
source "drivers/pci/controller/dwc/Kconfig"
source "drivers/pci/controller/mobiveil/Kconfig"
source "drivers/pci/controller/cadence/Kconfig"
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index bcdbf49ab1e4..04c6edc285c5 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
+obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
obj-y += mobiveil/
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 254a3e1eff50..84cc58dc8512 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -328,7 +328,6 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
cdns_pcie_ep_assert_intx(ep, fn, intx, true);
/*
* The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
- * from drivers/pci/dwc/pci-dra7xx.c
*/
mdelay(1);
cdns_pcie_ep_assert_intx(ep, fn, intx, false);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 4550e0d469ca..811c1cb2e8de 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -337,7 +337,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
struct resource_entry *entry;
u64 cpu_addr = cfg_res->start;
u32 addr0, addr1, desc1;
- int r, err, busnr = 0;
+ int r, busnr = 0;
entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
if (entry)
@@ -383,11 +383,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
r++;
}
- err = cdns_pcie_host_map_dma_ranges(rc);
- if (err)
- return err;
-
- return 0;
+ return cdns_pcie_host_map_dma_ranges(rc);
}
static int cdns_pcie_host_init(struct device *dev,
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 044a3761c44f..bc049865f8e0 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -237,8 +237,9 @@ config PCIE_HISI_STB
Say Y here if you want PCIe controller support on HiSilicon STB SoCs
config PCI_MESON
- bool "MESON PCIe controller"
+ tristate "MESON PCIe controller"
depends on PCI_MSI_IRQ_DOMAIN
+ default m if ARCH_MESON
select PCIE_DW_HOST
help
Say Y here if you want to enable PCI controller support on Amlogic
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index dc387724cf08..6d012d2b1e90 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -73,8 +73,6 @@
#define LINK_UP BIT(16)
#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
-#define EXP_CAP_ID_OFFSET 0x70
-
#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
@@ -91,7 +89,6 @@ struct dra7xx_pcie {
void __iomem *base; /* DT ti_conf */
int phy_count; /* DT phy-names count */
struct phy **phy;
- int link_gen;
struct irq_domain *irq_domain;
enum dw_pcie_device_mode mode;
};
@@ -142,33 +139,12 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
struct device *dev = pci->dev;
u32 reg;
- u32 exp_cap_off = EXP_CAP_ID_OFFSET;
if (dw_pcie_link_up(pci)) {
dev_err(dev, "link is already up\n");
return 0;
}
- if (dra7xx->link_gen == 1) {
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
- 4, &reg);
- if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
- reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCAP, 4, reg);
- }
-
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
- 2, &reg);
- if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
- reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCTL2, 2, reg);
- }
- }
-
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
reg |= LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
@@ -490,7 +466,9 @@ static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = {
static int dra7xx_pcie_msi_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
u32 ctrl, num_ctrls;
+ int ret;
pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip;
@@ -506,7 +484,21 @@ static int dra7xx_pcie_msi_host_init(struct pcie_port *pp)
~0);
}
- return dw_pcie_allocate_domains(pp);
+ ret = dw_pcie_allocate_domains(pp);
+ if (ret)
+ return ret;
+
+ pp->msi_data = dma_map_single_attrs(dev, &pp->msi_msg,
+ sizeof(pp->msi_msg),
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ ret = dma_mapping_error(dev, pp->msi_data);
+ if (ret) {
+ dev_err(dev, "Failed to map MSI data\n");
+ pp->msi_data = 0;
+ dw_pcie_free_msi(pp);
+ }
+ return ret;
}
static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
@@ -937,10 +929,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
reg &= ~LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
- dra7xx->link_gen = of_pci_get_max_link_speed(np);
- if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
- dra7xx->link_gen = 2;
-
switch (mode) {
case DW_PCIE_RC_TYPE:
if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 8d82c43ae299..242683cde04a 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -336,32 +336,37 @@ static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
exynos_pcie_sideband_dbi_w_mode(ep, false);
}
-static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- exynos_pcie_sideband_dbi_r_mode(ep, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_r_mode(ep, false);
- return ret;
+ if (PCI_SLOT(devfn)) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
+static int exynos_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- exynos_pcie_sideband_dbi_w_mode(ep, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_w_mode(ep, false);
- return ret;
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
}
+static struct pci_ops exynos_pci_ops = {
+ .read = exynos_pcie_rd_own_conf,
+ .write = exynos_pcie_wr_own_conf,
+};
+
static int exynos_pcie_link_up(struct dw_pcie *pci)
{
struct exynos_pcie *ep = to_exynos_pcie(pci);
@@ -379,6 +384,8 @@ static int exynos_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct exynos_pcie *ep = to_exynos_pcie(pci);
+ pp->bridge->ops = &exynos_pci_ops;
+
exynos_pcie_establish_link(ep);
exynos_pcie_enable_interrupts(ep);
@@ -386,8 +393,6 @@ static int exynos_pcie_host_init(struct pcie_port *pp)
}
static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
- .rd_own_conf = exynos_pcie_rd_own_conf,
- .wr_own_conf = exynos_pcie_wr_own_conf,
.host_init = exynos_pcie_host_init,
};
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 5fef2613b223..5cf1ef12fb9b 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -79,7 +79,6 @@ struct imx6_pcie {
u32 tx_deemph_gen2_6db;
u32 tx_swing_full;
u32 tx_swing_low;
- int link_gen;
struct regulator *vpcie;
void __iomem *phy_base;
@@ -94,15 +93,6 @@ struct imx6_pcie {
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
-/* PCIe Root Complex registers (memory-mapped) */
-#define PCIE_RC_IMX6_MSI_CAP 0x50
-#define PCIE_RC_LCR 0x7c
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
-
-#define PCIE_RC_LCSR 0x80
-
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
@@ -116,8 +106,6 @@ struct imx6_pcie {
#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
#define PCIE_PHY_STAT_ACK BIT(16)
-#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
-
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_ATEOVRD 0x10
#define PCIE_PHY_ATEOVRD_EN BIT(2)
@@ -761,6 +749,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
struct device *dev = pci->dev;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 tmp;
int ret;
@@ -769,10 +758,10 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
- tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
- tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
- dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/* Start LTSSM. */
imx6_pcie_ltssm_enable(dev);
@@ -781,12 +770,12 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
if (ret)
goto err_reset_phy;
- if (imx6_pcie->link_gen == 2) {
+ if (pci->link_gen == 2) {
/* Allow Gen2 mode after the link is up. */
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
- tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
- tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
- dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_5_0GB;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
* Start Directed Speed Change so the best possible
@@ -824,8 +813,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
dev_info(dev, "Link: Gen2 disabled\n");
}
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
- dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
+ tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+ dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
@@ -847,9 +836,7 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
imx6_setup_phy_mpll(imx6_pcie);
dw_pcie_setup_rc(pp);
imx6_pcie_establish_link(imx6_pcie);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ dw_pcie_msi_init(pp);
return 0;
}
@@ -1073,38 +1060,33 @@ static int imx6_pcie_probe(struct platform_device *pdev)
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pcie_phy)) {
- dev_err(dev, "pcie_phy clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_phy);
- }
+ if (IS_ERR(imx6_pcie->pcie_phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
+ "pcie_phy clock source missing or invalid\n");
imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(imx6_pcie->pcie_bus)) {
- dev_err(dev, "pcie_bus clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_bus);
- }
+ if (IS_ERR(imx6_pcie->pcie_bus))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
+ "pcie_bus clock source missing or invalid\n");
imx6_pcie->pcie = devm_clk_get(dev, "pcie");
- if (IS_ERR(imx6_pcie->pcie)) {
- dev_err(dev, "pcie clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie);
- }
+ if (IS_ERR(imx6_pcie->pcie))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
+ "pcie clock source missing or invalid\n");
switch (imx6_pcie->drvdata->variant) {
case IMX6SX:
imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
"pcie_inbound_axi");
- if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
- dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_inbound_axi);
- }
+ if (IS_ERR(imx6_pcie->pcie_inbound_axi))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
+ "pcie_inbound_axi clock missing or invalid\n");
break;
case IMX8MQ:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux)) {
- dev_err(dev, "pcie_aux clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_aux);
- }
+ if (IS_ERR(imx6_pcie->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
fallthrough;
case IMX7D:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
@@ -1165,10 +1147,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->tx_swing_low = 127;
/* Limit link speed */
- ret = of_property_read_u32(node, "fsl,max-link-speed",
- &imx6_pcie->link_gen);
- if (ret)
- imx6_pcie->link_gen = 1;
+ pci->link_gen = 1;
+ ret = of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
if (IS_ERR(imx6_pcie->vpcie)) {
@@ -1188,11 +1168,10 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return ret;
if (pci_msi_enabled()) {
- val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
- PCI_MSI_FLAGS);
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
val |= PCI_MSI_FLAGS_ENABLE;
- dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
- val);
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
}
return 0;
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index c8c9d6a75f17..a222728238ca 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -96,8 +96,6 @@
#define LEG_EP 0x1
#define RC 0x2
-#define EXP_CAP_ID_OFFSET 0x70
-
#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
#define AM654_PCIE_DEV_TYPE_MASK 0x3
@@ -123,7 +121,6 @@ struct keystone_pcie {
int msi_host_irq;
int num_lanes;
- u32 num_viewport;
struct phy **phy;
struct device_link **link;
struct device_node *msi_intc_np;
@@ -397,13 +394,17 @@ static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
u32 val;
- u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
- u64 start = pp->mem->start;
- u64 end = pp->mem->end;
+ u32 num_viewport = pci->num_viewport;
+ u64 start, end;
+ struct resource *mem;
int i;
+ mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
+ start = mem->start;
+ end = mem->end;
+
/* Disable BARs for inbound access */
ks_pcie_set_dbi_mode(ks_pcie);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
@@ -430,10 +431,10 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
}
-static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 *val)
+static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
@@ -444,36 +445,29 @@ static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
reg |= CFG_TYPE1;
ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
- return dw_pcie_read(pp->va_cfg0_base + where, size, val);
+ return pp->va_cfg0_base + where;
}
-static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u32 reg;
-
- reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
- CFG_FUNC(PCI_FUNC(devfn));
- if (!pci_is_root_bus(bus->parent))
- reg |= CFG_TYPE1;
- ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
-
- return dw_pcie_write(pp->va_cfg0_base + where, size, val);
-}
+static struct pci_ops ks_child_pcie_ops = {
+ .map_bus = ks_pcie_other_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
/**
- * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
+ * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
*
* This sets BAR0 to enable inbound access for MSI_IRQ register
*/
-static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
+static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
{
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ if (!pci_is_root_bus(bus))
+ return 0;
+
/* Configure and set up BAR0 */
ks_pcie_set_dbi_mode(ks_pcie);
@@ -488,8 +482,17 @@ static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
* be sufficient. Use physical address to avoid any conflicts.
*/
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
+ return 0;
}
+static struct pci_ops ks_pcie_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ .add_bus = ks_pcie_v3_65_add_bus,
+};
+
/**
* ks_pcie_link_up() - Check if link up
*/
@@ -807,6 +810,9 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
int ret;
+ pp->bridge->ops = &ks_pcie_ops;
+ pp->bridge->child_ops = &ks_child_pcie_ops;
+
ret = ks_pcie_config_legacy_irq(ks_pcie);
if (ret)
return ret;
@@ -842,11 +848,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
}
static const struct dw_pcie_host_ops ks_pcie_host_ops = {
- .rd_other_conf = ks_pcie_rd_other_conf,
- .wr_other_conf = ks_pcie_wr_other_conf,
.host_init = ks_pcie_host_init,
.msi_host_init = ks_pcie_msi_host_init,
- .scan_bus = ks_pcie_v3_65_scan_bus,
};
static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
@@ -867,16 +870,8 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = &pdev->dev;
- struct resource *res;
int ret;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
-
- pp->va_cfg1_base = pp->va_cfg0_base;
-
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
@@ -886,18 +881,6 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
return 0;
}
-static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base,
- u32 reg, size_t size)
-{
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u32 val;
-
- ks_pcie_set_dbi_mode(ks_pcie);
- dw_pcie_read(base + reg, size, &val);
- ks_pcie_clear_dbi_mode(ks_pcie);
- return val;
-}
-
static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
u32 reg, size_t size, u32 val)
{
@@ -912,7 +895,6 @@ static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
.start_link = ks_pcie_start_link,
.stop_link = ks_pcie_stop_link,
.link_up = ks_pcie_link_up,
- .read_dbi2 = ks_pcie_am654_read_dbi2,
.write_dbi2 = ks_pcie_am654_write_dbi2,
};
@@ -1125,31 +1107,6 @@ static int ks_pcie_am654_set_mode(struct device *dev,
return 0;
}
-static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed)
-{
- u32 val;
-
- dw_pcie_dbi_ro_wr_en(pci);
-
- val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP);
- if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= link_speed;
- dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP,
- val);
- }
-
- val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2);
- if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= link_speed;
- dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2,
- val);
- }
-
- dw_pcie_dbi_ro_wr_dis(pci);
-}
-
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
.version = 0x365A,
@@ -1197,13 +1154,10 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct keystone_pcie *ks_pcie;
struct device_link **link;
struct gpio_desc *gpiod;
- void __iomem *atu_base;
struct resource *res;
unsigned int version;
void __iomem *base;
- u32 num_viewport;
struct phy **phy;
- int link_speed;
u32 num_lanes;
char name[10];
int ret;
@@ -1320,29 +1274,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- if (pci->version >= 0x480A) {
- atu_base = devm_platform_ioremap_resource_byname(pdev, "atu");
- if (IS_ERR(atu_base)) {
- ret = PTR_ERR(atu_base);
- goto err_get_sync;
- }
-
- pci->atu_base = atu_base;
-
+ if (pci->version >= 0x480A)
ret = ks_pcie_am654_set_mode(dev, mode);
- if (ret < 0)
- goto err_get_sync;
- } else {
+ else
ret = ks_pcie_set_mode(dev);
- if (ret < 0)
- goto err_get_sync;
- }
-
- link_speed = of_pci_get_max_link_speed(np);
- if (link_speed < 0)
- link_speed = 2;
-
- ks_pcie_set_link_speed(pci, link_speed);
+ if (ret < 0)
+ goto err_get_sync;
switch (mode) {
case DW_PCIE_RC_TYPE:
@@ -1351,12 +1288,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- ret = of_property_read_u32(np, "num-viewport", &num_viewport);
- if (ret < 0) {
- dev_err(dev, "unable to read *num-viewport* property\n");
- goto err_get_sync;
- }
-
/*
* "Power Sequencing and Reset Signal Timings" table in
* PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
@@ -1370,7 +1301,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
gpiod_set_value_cansleep(gpiod, 1);
}
- ks_pcie->num_viewport = num_viewport;
pci->pp.ops = host_ops;
ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
if (ret < 0)
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 0d151cead1b7..84206f265e54 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -20,50 +20,58 @@
#define PCIE_DBI2_OFFSET 0x1000 /* DBI2 base address*/
-struct ls_pcie_ep {
- struct dw_pcie *pci;
+#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
+
+struct ls_pcie_ep_drvdata {
+ u32 func_offset;
+ const struct dw_pcie_ep_ops *ops;
+ const struct dw_pcie_ops *dw_pcie_ops;
};
-#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
+struct ls_pcie_ep {
+ struct dw_pcie *pci;
+ struct pci_epc_features *ls_epc;
+ const struct ls_pcie_ep_drvdata *drvdata;
+};
static int ls_pcie_establish_link(struct dw_pcie *pci)
{
return 0;
}
-static const struct dw_pcie_ops ls_pcie_ep_ops = {
+static const struct dw_pcie_ops dw_ls_pcie_ep_ops = {
.start_link = ls_pcie_establish_link,
};
-static const struct of_device_id ls_pcie_ep_of_match[] = {
- { .compatible = "fsl,ls-pcie-ep",},
- { },
-};
-
-static const struct pci_epc_features ls_pcie_epc_features = {
- .linkup_notifier = false,
- .msi_capable = true,
- .msix_capable = false,
- .bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
-};
-
static const struct pci_epc_features*
ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
- return &ls_pcie_epc_features;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ return pcie->ls_epc;
}
static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+ struct dw_pcie_ep_func *ep_func;
enum pci_barno bar;
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, 0);
+ if (!ep_func)
+ return;
+
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
+
+ pcie->ls_epc->msi_capable = ep_func->msi_cap ? true : false;
+ pcie->ls_epc->msix_capable = ep_func->msix_cap ? true : false;
}
static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -73,21 +81,51 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
case PCI_EPC_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
case PCI_EPC_IRQ_MSIX:
- return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no,
+ interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
return -EINVAL;
}
}
-static const struct dw_pcie_ep_ops pcie_ep_ops = {
+static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ WARN_ON(func_no && !pcie->drvdata->func_offset);
+ return pcie->drvdata->func_offset * func_no;
+}
+
+static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
.ep_init = ls_pcie_ep_init,
.raise_irq = ls_pcie_ep_raise_irq,
.get_features = ls_pcie_ep_get_features,
+ .func_conf_select = ls_pcie_ep_func_conf_select,
+};
+
+static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
+ .ops = &ls_pcie_ep_ops,
+ .dw_pcie_ops = &dw_ls_pcie_ep_ops,
+};
+
+static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
+ .func_offset = 0x20000,
+ .ops = &ls_pcie_ep_ops,
+ .dw_pcie_ops = &dw_ls_pcie_ep_ops,
+};
+
+static const struct of_device_id ls_pcie_ep_of_match[] = {
+ { .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata },
+ { .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { },
};
static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
- struct platform_device *pdev)
+ struct platform_device *pdev)
{
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
@@ -96,7 +134,7 @@ static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,
int ret;
ep = &pci->ep;
- ep->ops = &pcie_ep_ops;
+ ep->ops = pcie->drvdata->ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
@@ -119,6 +157,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct ls_pcie_ep *pcie;
+ struct pci_epc_features *ls_epc;
struct resource *dbi_base;
int ret;
@@ -130,15 +169,26 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
+ ls_epc = devm_kzalloc(dev, sizeof(*ls_epc), GFP_KERNEL);
+ if (!ls_epc)
+ return -ENOMEM;
+
+ pcie->drvdata = of_device_get_match_data(dev);
+
+ pci->dev = dev;
+ pci->ops = pcie->drvdata->dw_pcie_ops;
+
+ ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
+
+ pcie->pci = pci;
+ pcie->ls_epc = ls_epc;
+
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET;
- pci->dev = dev;
- pci->ops = &ls_pcie_ep_ops;
- pcie->pci = pci;
platform_set_drvdata(pdev, pcie);
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 4f183b96afbb..1913dc2c8fa0 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -17,37 +17,13 @@
#include <linux/resource.h>
#include <linux/types.h>
#include <linux/phy/phy.h>
+#include <linux/module.h>
#include "pcie-designware.h"
#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
-/* External local bus interface registers */
-#define PLR_OFFSET 0x700
-#define PCIE_PORT_LINK_CTRL_OFF (PLR_OFFSET + 0x10)
-#define FAST_LINK_MODE BIT(7)
-#define LINK_CAPABLE_MASK GENMASK(21, 16)
-#define LINK_CAPABLE_X1 BIT(16)
-
-#define PCIE_GEN2_CTRL_OFF (PLR_OFFSET + 0x10c)
-#define NUM_OF_LANES_MASK GENMASK(12, 8)
-#define NUM_OF_LANES_X1 BIT(8)
-#define DIRECT_SPEED_CHANGE BIT(17)
-
-#define TYPE1_HDR_OFFSET 0x0
-#define PCIE_STATUS_COMMAND (TYPE1_HDR_OFFSET + 0x04)
-#define PCI_IO_EN BIT(0)
-#define PCI_MEM_SPACE_EN BIT(1)
-#define PCI_BUS_MASTER_EN BIT(2)
-
-#define PCIE_BASE_ADDR0 (TYPE1_HDR_OFFSET + 0x10)
-#define PCIE_BASE_ADDR1 (TYPE1_HDR_OFFSET + 0x14)
-
-#define PCIE_CAP_OFFSET 0x70
-#define PCIE_DEV_CTRL_DEV_STUS (PCIE_CAP_OFFSET + 0x08)
-#define PCIE_CAP_MAX_PAYLOAD_MASK GENMASK(7, 5)
#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5)
-#define PCIE_CAP_MAX_READ_REQ_MASK GENMASK(14, 12)
#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12)
/* PCIe specific config registers */
@@ -77,11 +53,6 @@ enum pcie_data_rate {
PCIE_GEN4
};
-struct meson_pcie_mem_res {
- void __iomem *elbi_base;
- void __iomem *cfg_base;
-};
-
struct meson_pcie_clk_res {
struct clk *clk;
struct clk *port_clk;
@@ -95,7 +66,7 @@ struct meson_pcie_rc_reset {
struct meson_pcie {
struct dw_pcie pci;
- struct meson_pcie_mem_res mem_res;
+ void __iomem *cfg_base;
struct meson_pcie_clk_res clk_res;
struct meson_pcie_rc_reset mrst;
struct gpio_desc *reset_gpio;
@@ -134,28 +105,18 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
return 0;
}
-static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
- struct meson_pcie *mp,
- const char *id)
-{
- struct device *dev = mp->pci.dev;
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
-
- return devm_ioremap_resource(dev, res);
-}
-
static int meson_pcie_get_mems(struct platform_device *pdev,
struct meson_pcie *mp)
{
- mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi");
- if (IS_ERR(mp->mem_res.elbi_base))
- return PTR_ERR(mp->mem_res.elbi_base);
+ struct dw_pcie *pci = &mp->pci;
+
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
- mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg");
- if (IS_ERR(mp->mem_res.cfg_base))
- return PTR_ERR(mp->mem_res.cfg_base);
+ mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(mp->cfg_base))
+ return PTR_ERR(mp->cfg_base);
return 0;
}
@@ -253,24 +214,14 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
return 0;
}
-static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg)
-{
- writel(val, mp->mem_res.elbi_base + reg);
-}
-
-static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg)
-{
- return readl(mp->mem_res.elbi_base + reg);
-}
-
static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
{
- return readl(mp->mem_res.cfg_base + reg);
+ return readl(mp->cfg_base + reg);
}
static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
{
- writel(val, mp->mem_res.cfg_base + reg);
+ writel(val, mp->cfg_base + reg);
}
static void meson_pcie_assert_reset(struct meson_pcie *mp)
@@ -287,25 +238,6 @@ static void meson_pcie_init_dw(struct meson_pcie *mp)
val = meson_cfg_readl(mp, PCIE_CFG0);
val |= APP_LTSSM_ENABLE;
meson_cfg_writel(mp, val, PCIE_CFG0);
-
- val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE);
- meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val |= LINK_CAPABLE_X1;
- meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
- val &= ~NUM_OF_LANES_MASK;
- meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
- val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE;
- meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
-
- meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0);
- meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1);
}
static int meson_size_to_payload(struct meson_pcie *mp, int size)
@@ -327,37 +259,34 @@ static int meson_size_to_payload(struct meson_pcie *mp, int size)
static void meson_set_max_payload(struct meson_pcie *mp, int size)
{
+ struct dw_pcie *pci = &mp->pci;
u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int max_payload_size = meson_size_to_payload(mp, size);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
- val &= ~PCIE_CAP_MAX_PAYLOAD_MASK;
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
}
static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
{
+ struct dw_pcie *pci = &mp->pci;
u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int max_rd_req_size = meson_size_to_payload(mp, size);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
- val &= ~PCIE_CAP_MAX_READ_REQ_MASK;
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
-}
-
-static inline void meson_enable_memory_space(struct meson_pcie *mp)
-{
- /* Set the RC Bus Master, Memory Space and I/O Space enables */
- meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN,
- PCIE_STATUS_COMMAND);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
}
static int meson_pcie_establish_link(struct meson_pcie *mp)
@@ -370,26 +299,18 @@ static int meson_pcie_establish_link(struct meson_pcie *mp)
meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
dw_pcie_setup_rc(pp);
- meson_enable_memory_space(mp);
meson_pcie_assert_reset(mp);
return dw_pcie_wait_for_link(pci);
}
-static void meson_pcie_enable_interrupts(struct meson_pcie *mp)
+static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
{
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(&mp->pci.pp);
-}
-
-static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
int ret;
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
@@ -410,13 +331,11 @@ static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
return PCIBIOS_SUCCESSFUL;
}
-static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where,
- int size, u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- return dw_pcie_write(pci->dbi_base + where, size, val);
-}
+static struct pci_ops meson_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = meson_pcie_rd_own_conf,
+ .write = pci_generic_config_write,
+};
static int meson_pcie_link_up(struct dw_pcie *pci)
{
@@ -463,18 +382,18 @@ static int meson_pcie_host_init(struct pcie_port *pp)
struct meson_pcie *mp = to_meson_pcie(pci);
int ret;
+ pp->bridge->ops = &meson_pci_ops;
+
ret = meson_pcie_establish_link(mp);
if (ret)
return ret;
- meson_pcie_enable_interrupts(mp);
+ dw_pcie_msi_init(pp);
return 0;
}
static const struct dw_pcie_host_ops meson_pcie_host_ops = {
- .rd_own_conf = meson_pcie_rd_own_conf,
- .wr_own_conf = meson_pcie_wr_own_conf,
.host_init = meson_pcie_host_init,
};
@@ -493,7 +412,6 @@ static int meson_add_pcie_port(struct meson_pcie *mp,
}
pp->ops = &meson_pcie_host_ops;
- pci->dbi_base = mp->mem_res.elbi_base;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -522,6 +440,7 @@ static int meson_pcie_probe(struct platform_device *pdev)
pci = &mp->pci;
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ pci->num_lanes = 1;
mp->phy = devm_phy_get(dev, "pcie");
if (IS_ERR(mp->phy)) {
@@ -589,6 +508,7 @@ static const struct of_device_id meson_pcie_of_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, meson_pcie_of_match);
static struct platform_driver meson_pcie_driver = {
.probe = meson_pcie_probe,
@@ -598,4 +518,8 @@ static struct platform_driver meson_pcie_driver = {
},
};
-builtin_platform_driver(meson_pcie_driver);
+module_platform_driver(meson_pcie_driver);
+
+MODULE_AUTHOR("Yue Wang <yue.wang@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic PCIe Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index d57d4ee15848..f973fbca90cf 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -217,14 +217,15 @@ static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
reg);
}
-static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie,
- unsigned int busnr,
- unsigned int devfn)
+static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
+ struct pcie_port *pp = bus->sysdata;
+ struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
+ unsigned int busnr = bus->number;
struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;
unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask;
- struct pcie_port *pp = &pcie->pci->pp;
void __iomem *pci_base_addr;
pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base +
@@ -240,52 +241,14 @@ static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie,
target_bus_cfg->reg_mask);
}
- return pci_base_addr;
-}
-
-static int al_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct al_pcie *pcie = to_al_pcie(pci);
- unsigned int busnr = bus->number;
- void __iomem *pci_addr;
- int rc;
-
- pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
-
- rc = dw_pcie_read(pci_addr + where, size, val);
-
- dev_dbg(pci->dev, "%d-byte config read from %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
- size, pci_domain_nr(bus), bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where,
- (pci_addr + where), *val);
-
- return rc;
+ return pci_base_addr + where;
}
-static int al_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct al_pcie *pcie = to_al_pcie(pci);
- unsigned int busnr = bus->number;
- void __iomem *pci_addr;
- int rc;
-
- pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn);
-
- rc = dw_pcie_write(pci_addr + where, size, val);
-
- dev_dbg(pci->dev, "%d-byte config write to %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n",
- size, pci_domain_nr(bus), bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where,
- (pci_addr + where), val);
-
- return rc;
-}
+static struct pci_ops al_child_pci_ops = {
+ .map_bus = al_pcie_conf_addr_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
static void al_pcie_config_prepare(struct al_pcie *pcie)
{
@@ -297,6 +260,7 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
u8 secondary_bus;
u32 cfg_control;
u32 reg;
+ struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
target_bus_cfg = &pcie->target_bus_cfg;
@@ -310,13 +274,13 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
target_bus_cfg->ecam_mask = ecam_bus_mask;
/* This portion is taken from the cfg_target_bus reg */
target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask;
- target_bus_cfg->reg_val = pp->busn->start & target_bus_cfg->reg_mask;
+ target_bus_cfg->reg_val = bus->start & target_bus_cfg->reg_mask;
al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val,
target_bus_cfg->reg_mask);
- secondary_bus = pp->busn->start + 1;
- subordinate_bus = pp->busn->end;
+ secondary_bus = bus->start + 1;
+ subordinate_bus = bus->end;
/* Set the valid values of secondary and subordinate buses */
cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl +
@@ -339,6 +303,8 @@ static int al_pcie_host_init(struct pcie_port *pp)
struct al_pcie *pcie = to_al_pcie(pci);
int rc;
+ pp->bridge->child_ops = &al_child_pci_ops;
+
rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id);
if (rc)
return rc;
@@ -353,8 +319,6 @@ static int al_pcie_host_init(struct pcie_port *pp)
}
static const struct dw_pcie_host_ops al_pcie_host_ops = {
- .rd_other_conf = al_pcie_rd_other_conf,
- .wr_other_conf = al_pcie_wr_other_conf,
.host_init = al_pcie_host_init,
};
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 97d50bb50f06..929448e9e0bc 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -44,13 +44,6 @@ struct artpec_pcie_of_data {
static const struct of_device_id artpec6_pcie_of_match[];
-/* PCIe Port Logic registers (memory-mapped) */
-#define PL_OFFSET 0x700
-
-#define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc)
-#define ACK_N_FTS_MASK GENMASK(15, 8)
-#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK)
-
/* ARTPEC-6 specific registers */
#define PCIECFG 0x18
#define PCIECFG_DBG_OEN BIT(24)
@@ -289,30 +282,6 @@ static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)
}
}
-static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- u32 val;
-
- if (artpec6_pcie->variant != ARTPEC7)
- return;
-
- /*
- * Increase the N_FTS (Number of Fast Training Sequences)
- * to be transmitted when transitioning from L0s to L0.
- */
- val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF);
- val &= ~ACK_N_FTS_MASK;
- val |= ACK_N_FTS(180);
- dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val);
-
- /*
- * Set the Number of Fast Training Sequences that the core
- * advertises as its N_FTS during Gen2 or Gen3 link training.
- */
- dw_pcie_link_set_n_fts(pci, 180);
-}
-
static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
{
u32 val;
@@ -346,29 +315,23 @@ static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
usleep_range(100, 200);
}
-static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-}
-
static int artpec6_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ if (artpec6_pcie->variant == ARTPEC7) {
+ pci->n_fts[0] = 180;
+ pci->n_fts[1] = 180;
+ }
artpec6_pcie_assert_core_reset(artpec6_pcie);
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
- artpec6_pcie_set_nfts(artpec6_pcie);
dw_pcie_setup_rc(pp);
artpec6_pcie_establish_link(pci);
dw_pcie_wait_for_link(pci);
- artpec6_pcie_enable_interrupts(artpec6_pcie);
+ dw_pcie_msi_init(pp);
return 0;
}
@@ -412,7 +375,6 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
- artpec6_pcie_set_nfts(artpec6_pcie);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 305bfec2424d..ad7da4ea43a5 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -12,6 +12,8 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+#include "../../pci.h"
+
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
@@ -28,12 +30,39 @@ void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
-static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
- int flags)
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+{
+ struct dw_pcie_ep_func *ep_func;
+
+ list_for_each_entry(ep_func, &ep->func_list, list) {
+ if (ep_func->func_no == func_no)
+ return ep_func;
+ }
+
+ return NULL;
+}
+
+static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no)
+{
+ unsigned int func_offset = 0;
+
+ if (ep->ops->func_conf_select)
+ func_offset = ep->ops->func_conf_select(ep, func_no);
+
+ return func_offset;
+}
+
+static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
+ enum pci_barno bar, int flags)
{
u32 reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep *ep = &pci->ep;
- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writel_dbi2(pci, reg, 0x0);
dw_pcie_writel_dbi(pci, reg, 0x0);
@@ -46,7 +75,53 @@ static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
- __dw_pcie_ep_reset_bar(pci, bar, 0);
+ u8 func_no, funcs;
+
+ funcs = pci->ep.epc->max_functions;
+
+ for (func_no = 0; func_no < funcs; func_no++)
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
+}
+
+static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
+ u8 cap_ptr, u8 cap)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+ u8 cap_id, next_cap_ptr;
+ u16 reg;
+
+ if (!cap_ptr)
+ return 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
+ cap_id = (reg & 0x00ff);
+
+ if (cap_id > PCI_CAP_ID_MAX)
+ return 0;
+
+ if (cap_id == cap)
+ return cap_ptr;
+
+ next_cap_ptr = (reg & 0xff00) >> 8;
+ return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+}
+
+static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+ u8 next_cap_ptr;
+ u16 reg;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST);
+ next_cap_ptr = (reg & 0x00ff);
+
+ return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
@@ -54,28 +129,31 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ unsigned int func_offset = 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
- dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
- dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
- dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code);
- dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE,
+ dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code);
+ dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE,
hdr->subclass_code | hdr->baseclass_code << 8);
- dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE,
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE,
hdr->cache_line_size);
- dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID,
+ dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID,
hdr->subsys_vendor_id);
- dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
- dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
+ dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN,
hdr->interrupt_pin);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
-static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
- dma_addr_t cpu_addr,
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_barno bar, dma_addr_t cpu_addr,
enum dw_pcie_as_type as_type)
{
int ret;
@@ -88,7 +166,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
return -EINVAL;
}
- ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
+ ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr,
as_type);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
@@ -101,7 +179,8 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
return 0;
}
-static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
+ phys_addr_t phys_addr,
u64 pci_addr, size_t size)
{
u32 free_win;
@@ -113,8 +192,8 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
return -EINVAL;
}
- dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
set_bit(free_win, ep->ob_window_map);
ep->outbound_addr[free_win] = phys_addr;
@@ -130,7 +209,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
enum pci_barno bar = epf_bar->barno;
u32 atu_index = ep->bar_to_atu[bar];
- __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
clear_bit(atu_index, ep->ib_window_map);
@@ -147,14 +226,20 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
size_t size = epf_bar->size;
int flags = epf_bar->flags;
enum dw_pcie_as_type as_type;
- u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ u32 reg;
+ unsigned int func_offset = 0;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
if (!(flags & PCI_BASE_ADDRESS_SPACE))
as_type = DW_PCIE_AS_MEM;
else
as_type = DW_PCIE_AS_IO;
- ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, bar,
+ epf_bar->phys_addr, as_type);
if (ret)
return ret;
@@ -213,7 +298,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
+ ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
if (ret) {
dev_err(pci->dev, "Failed to enable address\n");
return ret;
@@ -227,11 +312,16 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- reg = ep->msi_cap + PCI_MSI_FLAGS;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
if (!(val & PCI_MSI_FLAGS_ENABLE))
return -EINVAL;
@@ -246,11 +336,16 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- reg = ep->msi_cap + PCI_MSI_FLAGS;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCI_MSI_FLAGS_QMASK;
val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
@@ -266,11 +361,16 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msix_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- reg = ep->msix_cap + PCI_MSIX_FLAGS;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
return -EINVAL;
@@ -286,23 +386,28 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
+ unsigned int func_offset = 0;
+ struct dw_pcie_ep_func *ep_func;
- if (!ep->msix_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
dw_pcie_dbi_ro_wr_en(pci);
- reg = ep->msix_cap + PCI_MSIX_FLAGS;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
val |= interrupts;
dw_pcie_writew_dbi(pci, reg, val);
- reg = ep->msix_cap + PCI_MSIX_TABLE;
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
val = offset | bir;
dw_pcie_writel_dbi(pci, reg, val);
- reg = ep->msix_cap + PCI_MSIX_PBA;
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA;
val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
dw_pcie_writel_dbi(pci, reg, val);
@@ -385,31 +490,36 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
unsigned int aligned_offset;
+ unsigned int func_offset = 0;
u16 msg_ctrl, msg_data;
u32 msg_addr_lower, msg_addr_upper, reg;
u64 msg_addr;
bool has_upper;
int ret;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
- reg = ep->msi_cap + PCI_MSI_FLAGS;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
msg_ctrl = dw_pcie_readw_dbi(pci, reg);
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
- reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO;
msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
if (has_upper) {
- reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI;
msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
- reg = ep->msi_cap + PCI_MSI_DATA_64;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64;
msg_data = dw_pcie_readw_dbi(pci, reg);
} else {
msg_addr_upper = 0;
- reg = ep->msi_cap + PCI_MSI_DATA_32;
+ reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32;
msg_data = dw_pcie_readw_dbi(pci, reg);
}
aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
@@ -427,12 +537,33 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ u32 msg_data;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) |
+ (interrupt_num - 1);
+
+ dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
+
+ return 0;
+}
+
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
- u16 interrupt_num)
+ u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
struct pci_epf_msix_tbl *msix_tbl;
struct pci_epc *epc = ep->epc;
+ unsigned int func_offset = 0;
u32 reg, msg_data, vec_ctrl;
unsigned int aligned_offset;
u32 tbl_offset;
@@ -440,7 +571,13 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
int ret;
u8 bir;
- reg = ep->msix_cap + PCI_MSIX_TABLE;
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ func_offset = dw_pcie_ep_func_select(ep, func_no);
+
+ reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
tbl_offset = dw_pcie_readl_dbi(pci, reg);
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
@@ -505,7 +642,8 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
u32 reg;
int i;
- hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE);
+ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
+ PCI_HEADER_TYPE_MASK;
if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
dev_err(pci->dev,
"PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
@@ -513,23 +651,21 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
return -EIO;
}
- ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+ offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX);
+ dw_pcie_dbi_ro_wr_en(pci);
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
if (offset) {
reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
PCI_REBAR_CTRL_NBAR_SHIFT;
- dw_pcie_dbi_ro_wr_en(pci);
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
- dw_pcie_dbi_ro_wr_dis(pci);
}
dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
@@ -539,11 +675,15 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
int ret;
void *addr;
+ u8 func_no;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
const struct pci_epc_features *epc_features;
+ struct dw_pcie_ep_func *ep_func;
+
+ INIT_LIST_HEAD(&ep->func_list);
if (!pci->dbi_base || !pci->dbi_base2) {
dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
@@ -590,6 +730,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return -ENOMEM;
ep->outbound_addr = addr;
+ if (pci->link_gen < 1)
+ pci->link_gen = of_pci_get_max_link_speed(np);
+
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
@@ -599,13 +742,27 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->epc = epc;
epc_set_drvdata(epc, ep);
- if (ep->ops->ep_init)
- ep->ops->ep_init(ep);
-
ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
if (ret < 0)
epc->max_functions = 1;
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ return -ENOMEM;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
+ }
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
+
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
if (ret < 0) {
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 9dafecba347f..44c2a6572199 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -20,30 +20,7 @@
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
-
-static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci;
-
- if (pp->ops->rd_own_conf)
- return pp->ops->rd_own_conf(pp, where, size, val);
-
- pci = to_dw_pcie_from_pp(pp);
- return dw_pcie_read(pci->dbi_base + where, size, val);
-}
-
-static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
-{
- struct dw_pcie *pci;
-
- if (pp->ops->wr_own_conf)
- return pp->ops->wr_own_conf(pp, where, size, val);
-
- pci = to_dw_pcie_from_pp(pp);
- return dw_pcie_write(pci->dbi_base + where, size, val);
-}
+static struct pci_ops dw_child_pcie_ops;
static void dw_msi_ack_irq(struct irq_data *d)
{
@@ -82,13 +59,13 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
unsigned long val;
u32 status, num_ctrls;
irqreturn_t ret = IRQ_NONE;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
for (i = 0; i < num_ctrls; i++) {
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
- (i * MSI_REG_CTRL_BLOCK_SIZE),
- 4, &status);
+ status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE));
if (!status)
continue;
@@ -148,6 +125,7 @@ static int dw_pci_msi_set_affinity(struct irq_data *d,
static void dw_pci_bottom_mask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -158,8 +136,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_mask[ctrl] |= BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -167,6 +144,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)
static void dw_pci_bottom_unmask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
@@ -177,8 +155,7 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_mask[ctrl] &= ~BIT(bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -186,13 +163,14 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
static void dw_pci_bottom_ack(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
@@ -288,32 +266,26 @@ void dw_pcie_free_msi(struct pcie_port *pp)
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
- if (pp->msi_page)
- __free_page(pp->msi_page);
+ if (pp->msi_data) {
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+
+ dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ }
}
void dw_pcie_msi_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- u64 msi_target;
+ u64 msi_target = (u64)pp->msi_data;
- pp->msi_page = alloc_page(GFP_KERNEL);
- pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, pp->msi_data)) {
- dev_err(dev, "Failed to map MSI data\n");
- __free_page(pp->msi_page);
- pp->msi_page = NULL;
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
return;
- }
- msi_target = (u64)pp->msi_data;
/* Program the msi_data */
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
- lower_32_bits(msi_target));
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
- upper_32_bits(msi_target));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
@@ -324,20 +296,16 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
- struct pci_bus *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
- u32 hdr_type;
int ret;
raw_spin_lock_init(&pci->pp.lock);
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
- pp->cfg0_size = resource_size(cfg_res) >> 1;
- pp->cfg1_size = resource_size(cfg_res) >> 1;
+ pp->cfg0_size = resource_size(cfg_res);
pp->cfg0_base = cfg_res->start;
- pp->cfg1_base = cfg_res->start + pp->cfg0_size;
} else if (!pp->va_cfg0_base) {
dev_err(dev, "Missing *config* reg space\n");
}
@@ -346,47 +314,33 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (!bridge)
return -ENOMEM;
+ pp->bridge = bridge;
+
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry(win, &bridge->windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
- pp->io = win->res;
- pp->io->name = "I/O";
- pp->io_size = resource_size(pp->io);
- pp->io_bus_addr = pp->io->start - win->offset;
- pp->io_base = pci_pio_to_address(pp->io->start);
- break;
- case IORESOURCE_MEM:
- pp->mem = win->res;
- pp->mem->name = "MEM";
- pp->mem_size = resource_size(pp->mem);
- pp->mem_bus_addr = pp->mem->start - win->offset;
+ pp->io_size = resource_size(win->res);
+ pp->io_bus_addr = win->res->start - win->offset;
+ pp->io_base = pci_pio_to_address(win->res->start);
break;
case 0:
- pp->cfg = win->res;
- pp->cfg0_size = resource_size(pp->cfg) >> 1;
- pp->cfg1_size = resource_size(pp->cfg) >> 1;
- pp->cfg0_base = pp->cfg->start;
- pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
- break;
- case IORESOURCE_BUS:
- pp->busn = win->res;
+ dev_err(dev, "Missing *config* reg space\n");
+ pp->cfg0_size = resource_size(win->res);
+ pp->cfg0_base = win->res->start;
+ if (!pci->dbi_base) {
+ pci->dbi_base = devm_pci_remap_cfgspace(dev,
+ pp->cfg0_base,
+ pp->cfg0_size);
+ if (!pci->dbi_base) {
+ dev_err(dev, "Error with ioremap\n");
+ return -ENOMEM;
+ }
+ }
break;
}
}
- if (!pci->dbi_base) {
- pci->dbi_base = devm_pci_remap_cfgspace(dev,
- pp->cfg->start,
- resource_size(pp->cfg));
- if (!pci->dbi_base) {
- dev_err(dev, "Error with ioremap\n");
- return -ENOMEM;
- }
- }
-
- pp->mem_base = pp->mem->start;
-
if (!pp->va_cfg0_base) {
pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
pp->cfg0_base, pp->cfg0_size);
@@ -396,20 +350,13 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
- if (!pp->va_cfg1_base) {
- pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
- pp->cfg1_base,
- pp->cfg1_size);
- if (!pp->va_cfg1_base) {
- dev_err(dev, "Error with ioremap\n");
- return -ENOMEM;
- }
- }
-
ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
if (ret)
pci->num_viewport = 2;
+ if (pci->link_gen < 1)
+ pci->link_gen = of_pci_get_max_link_speed(np);
+
if (pci_msi_enabled()) {
/*
* If a specific SoC driver needs to change the
@@ -440,6 +387,16 @@ int dw_pcie_host_init(struct pcie_port *pp)
irq_set_chained_handler_and_data(pp->msi_irq,
dw_chained_msi_isr,
pp);
+
+ pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
+ sizeof(pp->msi_msg),
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(pci->dev, pp->msi_data)) {
+ dev_err(pci->dev, "Failed to map MSI data\n");
+ pp->msi_data = 0;
+ goto err_free_msi;
+ }
} else {
ret = pp->ops->msi_host_init(pp);
if (ret < 0)
@@ -447,47 +404,21 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
+ /* Set default bus ops */
+ bridge->ops = &dw_pcie_ops;
+ bridge->child_ops = &dw_child_pcie_ops;
+
if (pp->ops->host_init) {
ret = pp->ops->host_init(pp);
if (ret)
goto err_free_msi;
}
- ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type);
- if (ret != PCIBIOS_SUCCESSFUL) {
- dev_err(pci->dev, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n",
- ret);
- ret = pcibios_err_to_errno(ret);
- goto err_free_msi;
- }
- if (hdr_type != PCI_HEADER_TYPE_BRIDGE) {
- dev_err(pci->dev,
- "PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n",
- hdr_type);
- ret = -EIO;
- goto err_free_msi;
- }
-
bridge->sysdata = pp;
- bridge->ops = &dw_pcie_ops;
-
- ret = pci_scan_root_bus_bridge(bridge);
- if (ret)
- goto err_free_msi;
-
- pp->root_bus = bridge->bus;
-
- if (pp->ops->scan_bus)
- pp->ops->scan_bus(pp);
- pci_bus_size_bridges(pp->root_bus);
- pci_bus_assign_resources(pp->root_bus);
-
- list_for_each_entry(child, &pp->root_bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(pp->root_bus);
- return 0;
+ ret = pci_host_probe(bridge);
+ if (!ret)
+ return 0;
err_free_msi:
if (pci_msi_enabled() && !pp->ops->msi_host_init)
@@ -498,125 +429,104 @@ EXPORT_SYMBOL_GPL(dw_pcie_host_init);
void dw_pcie_host_deinit(struct pcie_port *pp)
{
- pci_stop_root_bus(pp->root_bus);
- pci_remove_root_bus(pp->root_bus);
+ pci_stop_root_bus(pp->bridge->bus);
+ pci_remove_root_bus(pp->bridge->bus);
if (pci_msi_enabled() && !pp->ops->msi_host_init)
dw_pcie_free_msi(pp);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
-static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 *val,
- bool write)
+static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
- int ret, type;
- u32 busdev, cfg_size;
- u64 cpu_addr;
- void __iomem *va_cfg_base;
+ int type;
+ u32 busdev;
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
+
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
- if (pci_is_root_bus(bus->parent)) {
+ if (pci_is_root_bus(bus->parent))
type = PCIE_ATU_TYPE_CFG0;
- cpu_addr = pp->cfg0_base;
- cfg_size = pp->cfg0_size;
- va_cfg_base = pp->va_cfg0_base;
- } else {
- type = PCIE_ATU_TYPE_CFG1;
- cpu_addr = pp->cfg1_base;
- cfg_size = pp->cfg1_size;
- va_cfg_base = pp->va_cfg1_base;
- }
-
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- type, cpu_addr,
- busdev, cfg_size);
- if (write)
- ret = dw_pcie_write(va_cfg_base + where, size, *val);
else
- ret = dw_pcie_read(va_cfg_base + where, size, val);
-
- if (pci->num_viewport <= 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
-
- return ret;
-}
-
-static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 *val)
-{
- if (pp->ops->rd_other_conf)
- return pp->ops->rd_other_conf(pp, bus, devfn, where,
- size, val);
+ type = PCIE_ATU_TYPE_CFG1;
- return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val,
- false);
-}
-static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 val)
-{
- if (pp->ops->wr_other_conf)
- return pp->ops->wr_other_conf(pp, bus, devfn, where,
- size, val);
+ dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
+ type, pp->cfg0_base,
+ busdev, pp->cfg0_size);
- return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val,
- true);
+ return pp->va_cfg0_base + where;
}
-static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
- int dev)
+static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
+ int ret;
+ struct pcie_port *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- /* If there is no link, then there is no device */
- if (!pci_is_root_bus(bus)) {
- if (!dw_pcie_link_up(pci))
- return 0;
- } else if (dev > 0)
- /* Access only one slot on each root port */
- return 0;
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
+
+ if (!ret && pci->num_viewport <= 2)
+ dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
- return 1;
+ return ret;
}
-static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
- int size, u32 *val)
+static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
+ int ret;
struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
- *val = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
+ ret = pci_generic_config_write(bus, devfn, where, size, val);
- if (pci_is_root_bus(bus))
- return dw_pcie_rd_own_conf(pp, where, size, val);
+ if (!ret && pci->num_viewport <= 2)
+ dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
- return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
+ return ret;
}
-static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
- int where, int size, u32 val)
+static struct pci_ops dw_child_pcie_ops = {
+ .map_bus = dw_pcie_other_conf_map_bus,
+ .read = dw_pcie_rd_other_conf,
+ .write = dw_pcie_wr_other_conf,
+};
+
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
{
struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (pci_is_root_bus(bus))
- return dw_pcie_wr_own_conf(pp, where, size, val);
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
- return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
+ return pci->dbi_base + where;
}
+EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
static struct pci_ops dw_pcie_ops = {
- .read = dw_pcie_rd_conf,
- .write = dw_pcie_wr_conf,
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
void dw_pcie_setup_rc(struct pcie_port *pp)
@@ -632,18 +542,18 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_setup(pci);
- if (!pp->ops->msi_host_init) {
+ if (pci_msi_enabled() && !pp->ops->msi_host_init) {
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
pp->irq_mask[ctrl] = ~0;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, pp->irq_mask[ctrl]);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, ~0);
+ ~0);
}
}
@@ -671,28 +581,36 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
/*
- * If the platform provides ->rd_other_conf, it means the platform
- * uses its own address translation component rather than ATU, so
- * we should not program the ATU here.
+ * If the platform provides its own child bus config accesses, it means
+ * the platform uses its own address translation component rather than
+ * ATU, so we should not program the ATU here.
*/
- if (!pp->ops->rd_other_conf) {
+ if (pp->bridge->child_ops == &dw_child_pcie_ops) {
+ struct resource_entry *tmp, *entry = NULL;
+
+ /* Get last memory resource entry */
+ resource_list_for_each_entry(tmp, &pp->bridge->windows)
+ if (resource_type(tmp->res) == IORESOURCE_MEM)
+ entry = tmp;
+
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_TYPE_MEM, pp->mem_base,
- pp->mem_bus_addr, pp->mem_size);
+ PCIE_ATU_TYPE_MEM, entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
if (pci->num_viewport > 2)
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size);
}
- dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
/* Program correct class for RC */
- dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+ dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
- dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val |= PORT_LOGIC_SPEED_CHANGE;
- dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
dw_pcie_dbi_ro_wr_dis(pci);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 712456f6ce36..e3e300669ed5 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -39,9 +39,7 @@ static int dw_plat_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
dw_pcie_wait_for_link(pci);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ dw_pcie_msi_init(pp);
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index b723e0cc41fb..c2dea8fc97c8 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/types.h>
#include "../../pci.h"
@@ -166,21 +167,6 @@ void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
}
EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
-u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size)
-{
- int ret;
- u32 val;
-
- if (pci->ops->read_dbi2)
- return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size);
-
- ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val);
- if (ret)
- dev_err(pci->dev, "read DBI address failed\n");
-
- return val;
-}
-
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
@@ -195,31 +181,31 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
dev_err(pci->dev, "write DBI address failed\n");
}
-u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size)
+static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
{
int ret;
u32 val;
if (pci->ops->read_dbi)
- return pci->ops->read_dbi(pci, pci->atu_base, reg, size);
+ return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
- ret = dw_pcie_read(pci->atu_base + reg, size, &val);
+ ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
if (ret)
dev_err(pci->dev, "Read ATU address failed\n");
return val;
}
-void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
+static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
{
int ret;
if (pci->ops->write_dbi) {
- pci->ops->write_dbi(pci, pci->atu_base, reg, size, val);
+ pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
return;
}
- ret = dw_pcie_write(pci->atu_base + reg, size, val);
+ ret = dw_pcie_write(pci->atu_base + reg, 4, val);
if (ret)
dev_err(pci->dev, "Write ATU address failed\n");
}
@@ -239,9 +225,10 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
dw_pcie_writel_atu(pci, offset + reg, val);
}
-static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
- int type, u64 cpu_addr,
- u64 pci_addr, u32 size)
+static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
+ int index, int type,
+ u64 cpu_addr, u64 pci_addr,
+ u32 size)
{
u32 retries, val;
u64 limit_addr = cpu_addr + size - 1;
@@ -259,7 +246,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
upper_32_bits(pci_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
- type);
+ type | PCIE_ATU_FUNC_NUM(func_no));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
PCIE_ATU_ENABLE);
@@ -278,8 +265,9 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u32 size)
+static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u32 size)
{
u32 retries, val;
@@ -287,8 +275,8 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
if (pci->iatu_unroll_enabled) {
- dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
- pci_addr, size);
+ dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
return;
}
@@ -304,7 +292,8 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
lower_32_bits(pci_addr));
dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
upper_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
/*
@@ -321,6 +310,21 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
+void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u32 size)
+{
+ __dw_pcie_prog_outbound_atu(pci, 0, index, type,
+ cpu_addr, pci_addr, size);
+}
+
+void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u32 size)
+{
+ __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
+}
+
static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
@@ -336,8 +340,8 @@ static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
dw_pcie_writel_atu(pci, offset + reg, val);
}
-static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
- int bar, u64 cpu_addr,
+static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
+ int index, int bar, u64 cpu_addr,
enum dw_pcie_as_type as_type)
{
int type;
@@ -359,8 +363,10 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
return -EINVAL;
}
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
PCIE_ATU_ENABLE |
PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
@@ -381,14 +387,15 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
return -EBUSY;
}
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
- u64 cpu_addr, enum dw_pcie_as_type as_type)
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int bar, u64 cpu_addr,
+ enum dw_pcie_as_type as_type)
{
int type;
u32 retries, val;
if (pci->iatu_unroll_enabled)
- return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
+ return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
cpu_addr, as_type);
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
@@ -407,9 +414,11 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
return -EINVAL;
}
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
- | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
+ PCIE_ATU_FUNC_NUM_MATCH_EN |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
/*
* Make sure ATU enable takes effect before any subsequent config
@@ -444,7 +453,7 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
}
dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
@@ -488,50 +497,41 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
-void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
{
- u32 reg, val;
+ u32 cap, ctrl2, link_speed;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
- reg = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
- reg &= ~PCI_EXP_LNKCTL2_TLS;
+ cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
switch (pcie_link_speed[link_gen]) {
case PCIE_SPEED_2_5GT:
- reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+ link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
break;
case PCIE_SPEED_5_0GT:
- reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
+ link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
break;
case PCIE_SPEED_8_0GT:
- reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
+ link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
break;
case PCIE_SPEED_16_0GT:
- reg |= PCI_EXP_LNKCTL2_TLS_16_0GT;
+ link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
break;
default:
/* Use hardware capability */
- val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
- val = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
- reg &= ~PCI_EXP_LNKCTL2_HASD;
- reg |= FIELD_PREP(PCI_EXP_LNKCTL2_TLS, val);
+ link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
break;
}
- dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, reg);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_link_set_max_speed);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
-void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts)
-{
- u32 val;
+ cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- val &= ~PORT_LOGIC_N_FTS_MASK;
- val |= n_fts & PORT_LOGIC_N_FTS_MASK;
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
-EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts);
static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
{
@@ -546,32 +546,58 @@ static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
void dw_pcie_setup(struct dw_pcie *pci)
{
- int ret;
u32 val;
- u32 lanes;
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
+ struct platform_device *pdev = to_platform_device(dev);
if (pci->version >= 0x480A || (!pci->version &&
dw_pcie_iatu_unroll_enabled(pci))) {
pci->iatu_unroll_enabled = true;
if (!pci->atu_base)
+ pci->atu_base =
+ devm_platform_ioremap_resource_byname(pdev, "atu");
+ if (IS_ERR(pci->atu_base))
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
}
dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
"enabled" : "disabled");
+ if (pci->link_gen > 0)
+ dw_pcie_link_set_max_speed(pci, pci->link_gen);
- ret = of_property_read_u32(np, "num-lanes", &lanes);
- if (ret) {
- dev_dbg(pci->dev, "property num-lanes isn't found\n");
+ /* Configure Gen1 N_FTS */
+ if (pci->n_fts[0]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
+ val |= PORT_AFR_N_FTS(pci->n_fts[0]);
+ val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
+ }
+
+ /* Configure Gen2+ N_FTS */
+ if (pci->n_fts[1]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_N_FTS_MASK;
+ val |= pci->n_fts[pci->link_gen - 1];
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_FAST_LINK_MODE;
+ val |= PORT_LINK_DLL_LINK_EN;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+ of_property_read_u32(np, "num-lanes", &pci->num_lanes);
+ if (!pci->num_lanes) {
+ dev_dbg(pci->dev, "Using h/w default number of lanes\n");
return;
}
/* Set the number of lanes */
- val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_FAST_LINK_MODE;
val &= ~PORT_LINK_MODE_MASK;
- switch (lanes) {
+ switch (pci->num_lanes) {
case 1:
val |= PORT_LINK_MODE_1_LANES;
break;
@@ -585,7 +611,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
val |= PORT_LINK_MODE_8_LANES;
break;
default:
- dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
+ dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
return;
}
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
@@ -593,7 +619,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
/* Set link width speed control register */
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
- switch (lanes) {
+ switch (pci->num_lanes) {
case 1:
val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
break;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index f911760dcc69..9d2f511f13fa 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -32,10 +32,18 @@
/* Synopsys-specific PCIe configuration registers */
#define PCIE_PORT_AFR 0x70C
#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
+#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
#define PORT_AFR_CC_N_FTS_MASK GENMASK(23, 16)
+#define PORT_AFR_CC_N_FTS(n) FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, n)
+#define PORT_AFR_ENTER_ASPM BIT(30)
+#define PORT_AFR_L0S_ENTRANCE_LAT_SHIFT 24
+#define PORT_AFR_L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
+#define PORT_AFR_L1_ENTRANCE_LAT_SHIFT 27
+#define PORT_AFR_L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
#define PCIE_PORT_LINK_CONTROL 0x710
#define PORT_LINK_DLL_LINK_EN BIT(5)
+#define PORT_LINK_FAST_LINK_MODE BIT(7)
#define PORT_LINK_MODE_MASK GENMASK(21, 16)
#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n)
#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
@@ -80,9 +88,11 @@
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_CR2 0x908
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x90C
#define PCIE_ATU_UPPER_BASE 0x910
#define PCIE_ATU_LIMIT 0x914
@@ -95,6 +105,9 @@
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
#define PCIE_DBI_RO_WR_EN BIT(0)
+#define PCIE_MSIX_DOORBELL 0x948
+#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
+
#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
@@ -160,14 +173,7 @@ enum dw_pcie_device_mode {
};
struct dw_pcie_host_ops {
- int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
- int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
- int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val);
- int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val);
int (*host_init)(struct pcie_port *pp);
- void (*scan_bus)(struct pcie_port *pp);
void (*set_num_vectors)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp);
};
@@ -176,30 +182,20 @@ struct pcie_port {
u64 cfg0_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
- u64 cfg1_base;
- void __iomem *va_cfg1_base;
- u32 cfg1_size;
resource_size_t io_base;
phys_addr_t io_bus_addr;
u32 io_size;
- u64 mem_base;
- phys_addr_t mem_bus_addr;
- u32 mem_size;
- struct resource *cfg;
- struct resource *io;
- struct resource *mem;
- struct resource *busn;
int irq;
const struct dw_pcie_host_ops *ops;
int msi_irq;
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
+ u16 msi_msg;
dma_addr_t msi_data;
- struct page *msi_page;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
- struct pci_bus *root_bus;
+ struct pci_host_bridge *bridge;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
@@ -215,10 +211,26 @@ struct dw_pcie_ep_ops {
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num);
const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
+ /*
+ * Provide a method to implement the different func config space
+ * access for different platform, if different func have different
+ * offset, return the offset of func. if use write a register way
+ * return a 0, and implement code in callback function of platform
+ * driver.
+ */
+ unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no);
+};
+
+struct dw_pcie_ep_func {
+ struct list_head list;
+ u8 func_no;
+ u8 msi_cap; /* MSI capability offset */
+ u8 msix_cap; /* MSI-X capability offset */
};
struct dw_pcie_ep {
struct pci_epc *epc;
+ struct list_head func_list;
const struct dw_pcie_ep_ops *ops;
phys_addr_t phys_base;
size_t addr_size;
@@ -231,8 +243,6 @@ struct dw_pcie_ep {
u32 num_ob_windows;
void __iomem *msi_mem;
phys_addr_t msi_mem_phys;
- u8 msi_cap; /* MSI capability offset */
- u8 msix_cap; /* MSI-X capability offset */
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
@@ -242,8 +252,6 @@ struct dw_pcie_ops {
size_t size);
void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
- u32 (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
- size_t size);
void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
int (*link_up)(struct dw_pcie *pcie);
@@ -263,6 +271,9 @@ struct dw_pcie {
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
unsigned int version;
+ int num_lanes;
+ int link_gen;
+ u8 n_fts[2];
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -278,20 +289,19 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val);
u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
-u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
-u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size);
-void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
-void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen);
-void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
int type, u64 cpu_addr, u64 pci_addr,
u32 size);
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
- u64 cpu_addr, enum dw_pcie_as_type as_type);
+void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u32 size);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int bar, u64 cpu_addr,
+ enum dw_pcie_as_type as_type);
void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
enum dw_pcie_region_type type);
void dw_pcie_setup(struct dw_pcie *pci);
@@ -331,21 +341,6 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
dw_pcie_write_dbi2(pci, reg, 0x4, val);
}
-static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
-{
- return dw_pcie_read_dbi2(pci, reg, 0x4);
-}
-
-static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
-{
- dw_pcie_write_atu(pci, reg, 0x4, val);
-}
-
-static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
-{
- return dw_pcie_read_atu(pci, reg, 0x4);
-}
-
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
{
u32 reg;
@@ -376,6 +371,8 @@ void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
void dw_pcie_host_deinit(struct pcie_port *pp);
int dw_pcie_allocate_domains(struct pcie_port *pp);
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
#else
static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
@@ -407,6 +404,12 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
{
return 0;
}
+static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ return NULL;
+}
#endif
#ifdef CONFIG_PCIE_DW_EP
@@ -420,7 +423,11 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num);
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
#else
static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
@@ -461,8 +468,21 @@ static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static inline int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep,
+ u8 func_no,
+ u16 interrupt_num)
+{
+ return 0;
+}
+
static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
+
+static inline struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+{
+ return NULL;
+}
#endif
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 2a2835746077..afc1abbe49aa 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -122,32 +122,37 @@ static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
histb_pcie_dbi_w_mode(&pci->pp, false);
}
-static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where,
- int size, u32 *val)
+static int histb_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- histb_pcie_dbi_r_mode(pp, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- histb_pcie_dbi_r_mode(pp, false);
+ if (PCI_SLOT(devfn)) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
- return ret;
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where,
- int size, u32 val)
+static int histb_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- histb_pcie_dbi_w_mode(pp, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- histb_pcie_dbi_w_mode(pp, false);
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
- return ret;
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
}
+static struct pci_ops histb_pci_ops = {
+ .read = histb_pcie_rd_own_conf,
+ .write = histb_pcie_wr_own_conf,
+};
+
static int histb_pcie_link_up(struct dw_pcie *pci)
{
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -194,17 +199,15 @@ static int histb_pcie_establish_link(struct pcie_port *pp)
static int histb_pcie_host_init(struct pcie_port *pp)
{
- histb_pcie_establish_link(pp);
+ pp->bridge->ops = &histb_pci_ops;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ histb_pcie_establish_link(pp);
+ dw_pcie_msi_init(pp);
return 0;
}
static const struct dw_pcie_host_ops histb_pcie_host_ops = {
- .rd_own_conf = histb_pcie_rd_own_conf,
- .wr_own_conf = histb_pcie_wr_own_conf,
.host_init = histb_pcie_host_init,
};
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index c3b3a1d162b5..5650cb78acba 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -67,14 +67,9 @@ struct intel_pcie_port {
void __iomem *app_base;
struct gpio_desc *reset_gpio;
u32 rst_intrvl;
- u32 max_speed;
- u32 link_gen;
- u32 max_width;
- u32 n_fts;
struct clk *core_clk;
struct reset_control *core_rst;
struct phy *phy;
- u8 pcie_cap_ofst;
};
static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
@@ -134,11 +129,7 @@ static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp)
static void intel_pcie_link_setup(struct intel_pcie_port *lpp)
{
u32 val;
- u8 offset = lpp->pcie_cap_ofst;
-
- val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCAP);
- lpp->max_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
- lpp->max_width = FIELD_GET(PCI_EXP_LNKCAP_MLW, val);
+ u8 offset = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP);
val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL);
@@ -146,41 +137,29 @@ static void intel_pcie_link_setup(struct intel_pcie_port *lpp)
pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val);
}
-static void intel_pcie_port_logic_setup(struct intel_pcie_port *lpp)
+static void intel_pcie_init_n_fts(struct dw_pcie *pci)
{
- u32 val, mask;
-
- switch (pcie_link_speed[lpp->max_speed]) {
- case PCIE_SPEED_8_0GT:
- lpp->n_fts = PORT_AFR_N_FTS_GEN3;
+ switch (pci->link_gen) {
+ case 3:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
break;
- case PCIE_SPEED_16_0GT:
- lpp->n_fts = PORT_AFR_N_FTS_GEN4;
+ case 4:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN4;
break;
default:
- lpp->n_fts = PORT_AFR_N_FTS_GEN12_DFT;
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN12_DFT;
break;
}
-
- mask = PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK;
- val = FIELD_PREP(PORT_AFR_N_FTS_MASK, lpp->n_fts) |
- FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, lpp->n_fts);
- pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_AFR, mask, val);
-
- /* Port Link Control Register */
- pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_LINK_CONTROL, PORT_LINK_DLL_LINK_EN,
- PORT_LINK_DLL_LINK_EN);
+ pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT;
}
static void intel_pcie_rc_setup(struct intel_pcie_port *lpp)
{
intel_pcie_ltssm_disable(lpp);
intel_pcie_link_setup(lpp);
+ intel_pcie_init_n_fts(&lpp->pci);
dw_pcie_setup_rc(&lpp->pci.pp);
dw_pcie_upconfig_setup(&lpp->pci);
- intel_pcie_port_logic_setup(lpp);
- dw_pcie_link_set_max_speed(&lpp->pci, lpp->link_gen);
- dw_pcie_link_set_n_fts(&lpp->pci, lpp->n_fts);
}
static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
@@ -275,20 +254,11 @@ static int intel_pcie_get_resources(struct platform_device *pdev)
return ret;
}
- ret = device_property_match_string(dev, "device_type", "pci");
- if (ret) {
- dev_err(dev, "Failed to find pci device type: %d\n", ret);
- return ret;
- }
-
ret = device_property_read_u32(dev, "reset-assert-ms",
&lpp->rst_intrvl);
if (ret)
lpp->rst_intrvl = RESET_INTERVAL_MS;
- ret = of_pci_get_max_link_speed(dev->of_node);
- lpp->link_gen = ret < 0 ? 0 : ret;
-
lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
if (IS_ERR(lpp->app_base))
return PTR_ERR(lpp->app_base);
@@ -313,8 +283,9 @@ static int intel_pcie_wait_l2(struct intel_pcie_port *lpp)
{
u32 value;
int ret;
+ struct dw_pcie *pci = &lpp->pci;
- if (pcie_link_speed[lpp->max_speed] < PCIE_SPEED_8_0GT)
+ if (pci->link_gen < 3)
return 0;
/* Send PME_TURN_OFF message */
@@ -343,7 +314,6 @@ static void intel_pcie_turn_off(struct intel_pcie_port *lpp)
static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
{
- struct device *dev = lpp->pci.dev;
int ret;
intel_pcie_core_rst_assert(lpp);
@@ -361,17 +331,6 @@ static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
goto clk_err;
}
- if (!lpp->pcie_cap_ofst) {
- ret = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP);
- if (!ret) {
- ret = -ENXIO;
- dev_err(dev, "Invalid PCIe capability offset\n");
- goto app_init_err;
- }
-
- lpp->pcie_cap_ofst = ret;
- }
-
intel_pcie_rc_setup(lpp);
ret = intel_pcie_app_logic_setup(lpp);
if (ret)
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index e496f51e0152..d0a6a2dee6f5 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -330,34 +330,37 @@ static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
}
-static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
+static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+ if (PCI_SLOT(devfn)) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
- return ret;
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
+static int kirin_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
- return ret;
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
}
+static struct pci_ops kirin_pci_ops = {
+ .read = kirin_pcie_rd_own_conf,
+ .write = kirin_pcie_wr_own_conf,
+};
+
static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
u32 reg, size_t size)
{
@@ -423,10 +426,10 @@ static int kirin_pcie_establish_link(struct pcie_port *pp)
static int kirin_pcie_host_init(struct pcie_port *pp)
{
- kirin_pcie_establish_link(pp);
+ pp->bridge->ops = &kirin_pci_ops;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ kirin_pcie_establish_link(pp);
+ dw_pcie_msi_init(pp);
return 0;
}
@@ -438,8 +441,6 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = {
};
static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
- .rd_own_conf = kirin_pcie_rd_own_conf,
- .wr_own_conf = kirin_pcie_wr_own_conf,
.host_init = kirin_pcie_host_init,
};
@@ -507,8 +508,12 @@ static int kirin_pcie_probe(struct platform_device *pdev)
kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
"reset-gpios", 0);
- if (kirin_pcie->gpio_id_reset < 0)
+ if (kirin_pcie->gpio_id_reset == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (!gpio_is_valid(kirin_pcie->gpio_id_reset)) {
+ dev_err(dev, "unable to get a valid gpio pin\n");
return -ENODEV;
+ }
ret = kirin_pcie_power_on(kirin_pcie);
if (ret)
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 3aac77a295ba..b4761640ffd9 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -67,10 +67,6 @@
#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
#define CFG_BRIDGE_SB_INIT BIT(0)
-#define PCIE20_CAP 0x70
-#define PCIE20_DEVICE_CONTROL2_STATUS2 (PCIE20_CAP + PCI_EXP_DEVCTL2)
-#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + PCI_EXP_LNKCAP)
-#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
#define PCIE_CAP_LINK1_VAL 0x2FD7F
#define PCIE20_PARF_Q2A_FLUSH 0x1AC
@@ -193,7 +189,6 @@ struct qcom_pcie {
struct phy *phy;
struct gpio_desc *reset;
const struct qcom_pcie_ops *ops;
- int gen;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
@@ -302,6 +297,9 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
reset_control_assert(res->por_reset);
reset_control_assert(res->ext_reset);
reset_control_assert(res->phy_reset);
+
+ writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@@ -314,6 +312,16 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
u32 val;
int ret;
+ /* reset the PCIe interface as uboot can leave it undefined state */
+ reset_control_assert(res->pci_reset);
+ reset_control_assert(res->axi_reset);
+ reset_control_assert(res->ahb_reset);
+ reset_control_assert(res->por_reset);
+ reset_control_assert(res->ext_reset);
+ reset_control_assert(res->phy_reset);
+
+ writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
if (ret < 0) {
dev_err(dev, "cannot enable regulators\n");
@@ -394,12 +402,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
/* wait for clock acquisition */
usleep_range(1000, 1500);
- if (pcie->gen == 1) {
- val = readl(pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2);
- val |= PCI_EXP_LNKSTA_CLS_2_5GB;
- writel(val, pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2);
- }
-
/* Set the Max TLP size to 2K, instead of using default of 4K */
writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
@@ -1017,6 +1019,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int i, ret;
u32 val;
@@ -1092,14 +1095,14 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
- writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
+ writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
- val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
val &= ~PCI_EXP_LNKCAP_ASPMS;
- writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
- writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base +
- PCIE20_DEVICE_CONTROL2_STATUS2);
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
return 0;
@@ -1252,7 +1255,8 @@ static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
static int qcom_pcie_link_up(struct dw_pcie *pci)
{
- u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
@@ -1280,9 +1284,7 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
}
dw_pcie_setup_rc(pp);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ dw_pcie_msi_init(pp);
qcom_ep_reset_deassert(pcie);
@@ -1399,10 +1401,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- pcie->gen = of_pci_get_max_link_speed(pdev->dev.of_node);
- if (pcie->gen < 0)
- pcie->gen = 2;
-
pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
if (IS_ERR(pcie->parf)) {
ret = PTR_ERR(pcie->parf);
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 62846562da0b..e348225f651f 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -26,7 +26,6 @@ struct spear13xx_pcie {
void __iomem *app_base;
struct phy *phy;
struct clk *clk;
- bool is_gen1;
};
struct pcie_app_reg {
@@ -65,8 +64,6 @@ struct pcie_app_reg {
/* CR6 */
#define MSI_CTRL_INT (1 << 26)
-#define EXP_CAP_ID_OFFSET 0x70
-
#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev)
static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
@@ -75,7 +72,7 @@ static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
struct pcie_port *pp = &pci->pp;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
u32 val;
- u32 exp_cap_off = EXP_CAP_ID_OFFSET;
+ u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
if (dw_pcie_link_up(pci)) {
dev_err(pci->dev, "link already up\n");
@@ -89,36 +86,12 @@ static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
* default value in capability register is 512 bytes. So force
* it to 128 here.
*/
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val);
+ val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL);
val &= ~PCI_EXP_DEVCTL_READRQ;
- dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val);
-
- dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A);
- dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80);
+ dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val);
- /*
- * if is_gen1 is set then handle it, so that some buggy card
- * also works
- */
- if (spear13xx_pcie->is_gen1) {
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
- 4, &val);
- if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCAP, 4, val);
- }
-
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
- 2, &val);
- if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCTL2, 2, val);
- }
- }
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80);
/* enable ltssm */
writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
@@ -278,7 +251,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
if (of_property_read_bool(np, "st,pcie-is-gen1"))
- spear13xx_pcie->is_gen1 = true;
+ pci->link_gen = 1;
platform_set_drvdata(pdev, spear13xx_pcie);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 70498689d0c0..f920e7efe118 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -183,19 +183,7 @@
#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
#define EVENT_COUNTER_GROUP_5 0x5
-#define PORT_LOGIC_ACK_F_ASPM_CTRL 0x70C
-#define ENTER_ASPM BIT(30)
-#define L0S_ENTRANCE_LAT_SHIFT 24
-#define L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
-#define L1_ENTRANCE_LAT_SHIFT 27
-#define L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
-#define N_FTS_SHIFT 8
-#define N_FTS_MASK GENMASK(7, 0)
#define N_FTS_VAL 52
-
-#define PORT_LOGIC_GEN2_CTRL 0x80C
-#define PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE BIT(17)
-#define FTS_MASK GENMASK(7, 0)
#define FTS_VAL 52
#define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828
@@ -296,7 +284,6 @@ struct tegra_pcie_dw {
u8 init_link_width;
u32 msi_ctrl_int;
u32 num_lanes;
- u32 max_speed;
u32 cid;
u32 cfg_link_cap_l1sub;
u32 pcie_cap_base;
@@ -401,9 +388,9 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
- val |= PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
}
@@ -568,42 +555,44 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (where == PORT_LOGIC_MSIX_DOORBELL) {
+ if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
*val = 0x00000000;
return PCIBIOS_SUCCESSFUL;
}
- return dw_pcie_read(pci->dbi_base + where, size, val);
+ return pci_generic_config_read(bus, devfn, where, size, val);
}
-static int tegra_pcie_dw_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
+static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
/*
* This is an endpoint mode specific register happen to appear even
* when controller is operating in root port mode and system hangs
* when it is accessed with link being in ASPM-L1 state.
* So skip accessing it altogether
*/
- if (where == PORT_LOGIC_MSIX_DOORBELL)
+ if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
return PCIBIOS_SUCCESSFUL;
- return dw_pcie_write(pci->dbi_base + where, size, val);
+ return pci_generic_config_write(bus, devfn, where, size, val);
}
+static struct pci_ops tegra_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = tegra_pcie_dw_rd_own_conf,
+ .write = tegra_pcie_dw_wr_own_conf,
+};
+
#if defined(CONFIG_PCIEASPM)
static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
{
@@ -692,30 +681,23 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
/* Program L0s and L1 entrance latencies */
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
- val &= ~L0S_ENTRANCE_LAT_MASK;
- val |= (pcie->aspm_l0s_enter_lat << L0S_ENTRANCE_LAT_SHIFT);
- val |= ENTER_ASPM;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
+ val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
+ val |= PORT_AFR_ENTER_ASPM;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
}
-static int init_debugfs(struct tegra_pcie_dw *pcie)
+static void init_debugfs(struct tegra_pcie_dw *pcie)
{
- struct dentry *d;
-
- d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt",
- pcie->debugfs, aspm_state_cnt);
- if (IS_ERR_OR_NULL(d))
- dev_err(pcie->dev,
- "Failed to create debugfs file \"aspm_state_cnt\"\n");
-
- return 0;
+ debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
+ aspm_state_cnt);
}
#else
static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
-static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; }
+static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
#endif
static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
@@ -827,26 +809,24 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
/* Program init preset */
for (i = 0; i < pcie->num_lanes; i++) {
- dw_pcie_read(pci->dbi_base + CAP_SPCIE_CAP_OFF
- + (i * 2), 2, &val);
+ val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
val |= GEN3_GEN4_EQ_PRESET_INIT;
val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
val |= (GEN3_GEN4_EQ_PRESET_INIT <<
CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
- dw_pcie_write(pci->dbi_base + CAP_SPCIE_CAP_OFF
- + (i * 2), 2, val);
+ dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
offset = dw_pcie_find_ext_capability(pci,
PCI_EXT_CAP_ID_PL_16GT) +
PCI_PL_16GT_LE_CTRL;
- dw_pcie_read(pci->dbi_base + offset + i, 1, &val);
+ val = dw_pcie_readb_dbi(pci, offset + i);
val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
val |= GEN3_GEN4_EQ_PRESET_INIT;
val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
val |= (GEN3_GEN4_EQ_PRESET_INIT <<
PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
- dw_pcie_write(pci->dbi_base + offset + i, 1, val);
+ dw_pcie_writeb_dbi(pci, offset + i, val);
}
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -892,17 +872,6 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- /* Configure FTS */
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
- val &= ~(N_FTS_MASK << N_FTS_SHIFT);
- val |= N_FTS_VAL << N_FTS_SHIFT;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
-
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
- val &= ~FTS_MASK;
- val |= FTS_VAL;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
-
/* Enable as 0xFFFF0001 response for CRS */
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
@@ -910,16 +879,6 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
AMBA_ERROR_RESPONSE_CRS_SHIFT);
dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
- /* Configure Max Speed from DT */
- if (pcie->max_speed && pcie->max_speed != -EINVAL) {
- val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
- PCI_EXP_LNKCAP);
- val &= ~PCI_EXP_LNKCAP_SLS;
- val |= pcie->max_speed;
- dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
- val);
- }
-
/* Configure Max lane width from DT */
val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
val &= ~PCI_EXP_LNKCAP_MLW;
@@ -970,6 +929,8 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp)
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val, tmp, offset, speed;
+ pp->bridge->ops = &tegra_pci_ops;
+
tegra_pcie_prepare_host(pp);
if (dw_pcie_wait_for_link(pci)) {
@@ -1057,8 +1018,6 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = {
};
static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
- .rd_own_conf = tegra_pcie_dw_rd_own_conf,
- .wr_own_conf = tegra_pcie_dw_wr_own_conf,
.host_init = tegra_pcie_dw_host_init,
.set_num_vectors = tegra_pcie_set_msi_vec_num,
};
@@ -1129,8 +1088,6 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
return ret;
}
- pcie->max_speed = of_pci_get_max_link_speed(np);
-
ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
if (ret) {
dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
@@ -1262,9 +1219,9 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
* 5.2 Link State Power Management (Page #428).
*/
- list_for_each_entry(child, &pp->root_bus->children, node) {
+ list_for_each_entry(child, &pp->bridge->bus->children, node) {
/* Bring downstream devices to D0 if they are not already in */
- if (child->parent == pp->root_bus) {
+ if (child->parent == pp->bridge->bus) {
root_bus = child;
break;
}
@@ -1641,10 +1598,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
}
pcie->debugfs = debugfs_create_dir(name, NULL);
- if (!pcie->debugfs)
- dev_err(dev, "Failed to create debugfs\n");
- else
- init_debugfs(pcie);
+ init_debugfs(pcie);
return ret;
@@ -1817,27 +1771,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
- /* Configure N_FTS & FTS */
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
- val &= ~(N_FTS_MASK << N_FTS_SHIFT);
- val |= N_FTS_VAL << N_FTS_SHIFT;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
-
- val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
- val &= ~FTS_MASK;
- val |= FTS_VAL;
- dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
-
- /* Configure Max Speed from DT */
- if (pcie->max_speed && pcie->max_speed != -EINVAL) {
- val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
- PCI_EXP_LNKCAP);
- val &= ~PCI_EXP_LNKCAP_SLS;
- val |= pcie->max_speed;
- dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
- val);
- }
-
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
@@ -2066,6 +1999,9 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pci = &pcie->pci;
pci->dev = &pdev->dev;
pci->ops = &tegra_dw_pcie_ops;
+ pci->n_fts[0] = N_FTS_VAL;
+ pci->n_fts[1] = FTS_VAL;
+
pp = &pci->pp;
pcie->dev = &pdev->dev;
pcie->mode = (enum dw_pcie_device_mode)data->mode;
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 3a7f403b57b8..48176265c867 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -322,8 +322,7 @@ static int uniphier_pcie_host_init(struct pcie_port *pp)
if (ret)
return ret;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ dw_pcie_msi_init(pp);
return 0;
}
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index 3adec419a45b..a2632d02ce8f 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -480,7 +480,6 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
struct device *dev = &pcie->pdev->dev;
struct device_node *node = dev->of_node;
struct mobiveil_root_port *rp = &pcie->rp;
- int ret;
/* setup INTx */
rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
@@ -494,11 +493,7 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
raw_spin_lock_init(&rp->intx_mask_lock);
/* setup MSI */
- ret = mobiveil_allocate_msi_domains(pcie);
- if (ret)
- return ret;
-
- return 0;
+ return mobiveil_allocate_msi_domains(pcie);
}
static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie)
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 1559f79e63b6..0be485a25327 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -9,11 +9,12 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/phy/phy.h>
@@ -251,6 +252,25 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
}
}
+static void advk_pcie_issue_perst(struct advk_pcie *pcie)
+{
+ u32 reg;
+
+ if (!pcie->reset_gpio)
+ return;
+
+ /* PERST does not work for some cards when link training is enabled */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~LINK_TRAINING_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* 10ms delay is needed for some cards */
+ dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+}
+
static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen)
{
int ret, neg_gen;
@@ -299,6 +319,21 @@ static void advk_pcie_train_link(struct advk_pcie *pcie)
int neg_gen = -1, gen;
/*
+ * Reset PCIe card via PERST# signal. Some cards are not detected
+ * during link training when they are in some non-initial state.
+ */
+ advk_pcie_issue_perst(pcie);
+
+ /*
+ * PERST# signal could have been asserted by pinctrl subsystem before
+ * probe() callback has been called or issued explicitly by reset gpio
+ * function advk_pcie_issue_perst(), making the endpoint going into
+ * fundamental reset. As required by PCI Express spec a delay for at
+ * least 100ms after such a reset before link training is needed.
+ */
+ msleep(PCI_PM_D3COLD_WAIT);
+
+ /*
* Try link training at link gen specified by device tree property
* 'max-link-speed'. If this fails, iteratively train at lower gen.
*/
@@ -330,31 +365,10 @@ err:
dev_err(dev, "link never came up\n");
}
-static void advk_pcie_issue_perst(struct advk_pcie *pcie)
-{
- u32 reg;
-
- if (!pcie->reset_gpio)
- return;
-
- /* PERST does not work for some cards when link training is enabled */
- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
- reg &= ~LINK_TRAINING_EN;
- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
-
- /* 10ms delay is needed for some cards */
- dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
- gpiod_set_value_cansleep(pcie->reset_gpio, 1);
- usleep_range(10000, 11000);
- gpiod_set_value_cansleep(pcie->reset_gpio, 0);
-}
-
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
- advk_pcie_issue_perst(pcie);
-
/* Enable TX */
reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
reg |= PCIE_CORE_REF_CLK_TX_ENABLE;
@@ -431,15 +445,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
advk_writel(pcie, reg, PIO_CTRL);
- /*
- * PERST# signal could have been asserted by pinctrl subsystem before
- * probe() callback has been called or issued explicitly by reset gpio
- * function advk_pcie_issue_perst(), making the endpoint going into
- * fundamental reset. As required by PCI Express spec a delay for at
- * least 100ms after such a reset before link training is needed.
- */
- msleep(PCI_PM_D3COLD_WAIT);
-
advk_pcie_train_link(pcie);
/*
@@ -607,7 +612,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
* Initialize the configuration space of the PCI-to-PCI bridge
* associated with the given PCIe interface.
*/
-static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
{
struct pci_bridge_emul *bridge = &pcie->bridge;
@@ -633,8 +638,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops;
- pci_bridge_emul_init(bridge, 0);
-
+ return pci_bridge_emul_init(bridge, 0);
}
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
@@ -1077,7 +1081,9 @@ static int advk_pcie_enable_phy(struct advk_pcie *pcie)
}
ret = phy_power_on(pcie->phy);
- if (ret) {
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&pcie->pdev->dev, "PHY unsupported by firmware\n");
+ } else if (ret) {
phy_exit(pcie->phy);
return ret;
}
@@ -1122,6 +1128,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -1167,7 +1174,11 @@ static int advk_pcie_probe(struct platform_device *pdev)
advk_pcie_setup_hw(pcie);
- advk_sw_pci_bridge_init(pcie);
+ ret = advk_sw_pci_bridge_init(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to register emulated root PCI bridge\n");
+ return ret;
+ }
ret = advk_pcie_init_irq_domain(pcie);
if (ret) {
@@ -1195,18 +1206,37 @@ static int advk_pcie_probe(struct platform_device *pdev)
return 0;
}
+static int advk_pcie_remove(struct platform_device *pdev)
+{
+ struct advk_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ pci_unlock_rescan_remove();
+
+ advk_pcie_remove_msi_irq_domain(pcie);
+ advk_pcie_remove_irq_domain(pcie);
+
+ return 0;
+}
+
static const struct of_device_id advk_pcie_of_match_table[] = {
{ .compatible = "marvell,armada-3700-pcie", },
{},
};
+MODULE_DEVICE_TABLE(of, advk_pcie_of_match_table);
static struct platform_driver advk_pcie_driver = {
.driver = {
.name = "advk-pcie",
.of_match_table = advk_pcie_of_match_table,
- /* Driver unloading/unbinding currently not supported */
- .suppress_bind_attrs = true,
},
.probe = advk_pcie_probe,
+ .remove = advk_pcie_remove,
};
-builtin_platform_driver(advk_pcie_driver);
+module_platform_driver(advk_pcie_driver);
+
+MODULE_DESCRIPTION("Aardvark PCIe controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index fc4c3a15e570..03ed5cb1c4b2 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1276,11 +1276,25 @@ static void hv_irq_unmask(struct irq_data *data)
exit_unlock:
spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
- if (res) {
+ /*
+ * During hibernation, when a CPU is offlined, the kernel tries
+ * to move the interrupt to the remaining CPUs that haven't
+ * been offlined yet. In this case, the below hv_do_hypercall()
+ * always fails since the vmbus channel has been closed:
+ * refer to cpu_disable_common() -> fixup_irqs() ->
+ * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
+ *
+ * Suppress the error message for hibernation because the failure
+ * during hibernation does not matter (at this time all the devices
+ * have been frozen). Note: the correct affinity info is still updated
+ * into the irqdata data structure in migrate_one_irq() ->
+ * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
+ * resumes, hv_pci_restore_msi_state() is able to correctly restore
+ * the interrupt with the correct affinity.
+ */
+ if (res && hbus->state != hv_pcibus_removing)
dev_err(&hbus->hdev->device,
"%s() failed: %#llx", __func__, res);
- return;
- }
pci_msi_unmask_irq(data);
}
@@ -1531,16 +1545,8 @@ static struct irq_chip hv_msi_irq_chip = {
.irq_unmask = hv_irq_unmask,
};
-static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info *info,
- msi_alloc_info_t *arg)
-{
- return arg->msi_hwirq;
-}
-
static struct msi_domain_ops hv_msi_ops = {
- .get_hwirq = hv_msi_domain_ops_get_hwirq,
.msi_prepare = pci_msi_prepare,
- .set_desc = pci_msi_set_desc,
.msi_free = hv_msi_free,
};
@@ -2515,7 +2521,10 @@ static void hv_pci_onchannelcallback(void *context)
/**
* hv_pci_protocol_negotiation() - Set up protocol
- * @hdev: VMBus's tracking struct for this root PCI bus
+ * @hdev: VMBus's tracking struct for this root PCI bus.
+ * @version: Array of supported channel protocol versions in
+ * the order of probing - highest go first.
+ * @num_version: Number of elements in the version array.
*
* This driver is intended to support running on Windows 10
* (server) and later versions. It will not run on earlier
@@ -3372,6 +3381,34 @@ static int hv_pci_suspend(struct hv_device *hdev)
return 0;
}
+static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
+{
+ struct msi_desc *entry;
+ struct irq_data *irq_data;
+
+ for_each_pci_msi_entry(entry, pdev) {
+ irq_data = irq_get_irq_data(entry->irq);
+ if (WARN_ON_ONCE(!irq_data))
+ return -EINVAL;
+
+ hv_compose_msi_msg(irq_data, &entry->msg);
+ }
+
+ return 0;
+}
+
+/*
+ * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg()
+ * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
+ * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
+ * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
+ * Table entries.
+ */
+static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
+{
+ pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL);
+}
+
static int hv_pci_resume(struct hv_device *hdev)
{
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
@@ -3405,6 +3442,8 @@ static int hv_pci_resume(struct hv_device *hdev)
prepopulate_bars(hbus);
+ hv_pci_restore_msi_state(hbus);
+
hbus->state = hv_pcibus_installed;
return 0;
out:
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 719c19fe2bfb..48169b1e3817 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -183,7 +183,6 @@ static int loongson_pci_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct pci_host_bridge *bridge;
struct resource *regs;
- int err;
if (!node)
return -ENODEV;
@@ -222,11 +221,7 @@ static int loongson_pci_probe(struct platform_device *pdev)
bridge->ops = &loongson_pci_ops;
bridge->map_irq = loongson_map_irq;
- err = pci_host_probe(bridge);
- if (err)
- return err;
-
- return 0;
+ return pci_host_probe(bridge);
}
static struct platform_driver loongson_pci_driver = {
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index c39978b750ec..ed13e81cd691 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -12,7 +12,6 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/mbus.h>
-#include <linux/msi.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
@@ -70,7 +69,6 @@ struct mvebu_pcie_port;
struct mvebu_pcie {
struct platform_device *pdev;
struct mvebu_pcie_port *ports;
- struct msi_controller *msi;
struct resource io;
struct resource realio;
struct resource mem;
@@ -960,25 +958,16 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
}
/*
- * We can't use devm_of_pci_get_host_bridge_resources() because we
- * need to parse our special DT properties encoding the MEM and IO
- * apertures.
+ * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
+ * so we need extra resource setup parsing our special DT properties encoding
+ * the MEM and IO apertures.
*/
static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
int ret;
- /* Get the bus range */
- ret = of_pci_parse_bus_range(np, &pcie->busn);
- if (ret) {
- dev_err(dev, "failed to parse bus-range property: %d\n", ret);
- return ret;
- }
- pci_add_resource(&bridge->windows, &pcie->busn);
-
/* Get the PCIe memory aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
if (resource_size(&pcie->mem) == 0) {
@@ -988,6 +977,9 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
pcie->mem.name = "PCI MEM";
pci_add_resource(&bridge->windows, &pcie->mem);
+ ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
+ if (ret)
+ return ret;
/* Get the PCIe IO aperture */
mvebu_mbus_get_pcie_io_aperture(&pcie->io);
@@ -1001,9 +993,12 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
pcie->realio.name = "PCI I/O";
pci_add_resource(&bridge->windows, &pcie->realio);
+ ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
+ if (ret)
+ return ret;
}
- return devm_request_pci_bus_resources(dev, &bridge->windows);
+ return 0;
}
/*
@@ -1127,7 +1122,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
bridge->sysdata = pcie;
bridge->ops = &mvebu_pcie_ops;
bridge->align_resource = mvebu_pcie_align_resource;
- bridge->msi = pcie->msi;
return mvebu_pci_host_probe(bridge);
}
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index c1d34353c29b..8fcabed7c6a6 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2564,36 +2564,14 @@ static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
return 0;
}
-static const struct seq_operations tegra_pcie_ports_seq_ops = {
+static const struct seq_operations tegra_pcie_ports_sops = {
.start = tegra_pcie_ports_seq_start,
.next = tegra_pcie_ports_seq_next,
.stop = tegra_pcie_ports_seq_stop,
.show = tegra_pcie_ports_seq_show,
};
-static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
-{
- struct tegra_pcie *pcie = inode->i_private;
- struct seq_file *s;
- int err;
-
- err = seq_open(file, &tegra_pcie_ports_seq_ops);
- if (err)
- return err;
-
- s = file->private_data;
- s->private = pcie;
-
- return 0;
-}
-
-static const struct file_operations tegra_pcie_ports_ops = {
- .owner = THIS_MODULE,
- .open = tegra_pcie_ports_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
{
@@ -2601,24 +2579,12 @@ static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
pcie->debugfs = NULL;
}
-static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
+static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
{
- struct dentry *file;
-
pcie->debugfs = debugfs_create_dir("pcie", NULL);
- if (!pcie->debugfs)
- return -ENOMEM;
- file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
- pcie, &tegra_pcie_ports_ops);
- if (!file)
- goto remove;
-
- return 0;
-
-remove:
- tegra_pcie_debugfs_exit(pcie);
- return -ENOMEM;
+ debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
+ &tegra_pcie_ports_fops);
}
static int tegra_pcie_probe(struct platform_device *pdev)
@@ -2672,11 +2638,8 @@ static int tegra_pcie_probe(struct platform_device *pdev)
goto pm_runtime_put;
}
- if (IS_ENABLED(CONFIG_DEBUG_FS)) {
- err = tegra_pcie_debugfs_init(pcie);
- if (err < 0)
- dev_err(dev, "failed to setup debugfs: %d\n", err);
- }
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ tegra_pcie_debugfs_init(pcie);
return 0;
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index 1f54334f09f7..154a5398633c 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -658,7 +658,6 @@ static int v3_get_dma_range_config(struct v3_pci *v3,
default:
dev_err(v3->dev, "illegal dma memory chunk size\n");
return -EINVAL;
- break;
}
val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE;
*pci_map = val;
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 02271c6d17a1..2470782cb01a 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -493,8 +493,8 @@ static int xgene_msi_probe(struct platform_device *pdev)
*/
for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
- msi_val = xgene_msi_ir_read(xgene_msi, irq_index,
- msi_idx);
+ xgene_msi_ir_read(xgene_msi, irq_index, msi_idx);
+
/* Read MSIINTn to confirm */
msi_val = xgene_msi_int_read(xgene_msi, irq_index);
if (msi_val) {
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 85fa7d54f11f..bea86899bd5d 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -23,13 +23,12 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/printk.h>
+#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <soc/bcm2835/raspberrypi-firmware.h>
-
#include "../pci.h"
/* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
@@ -54,8 +53,11 @@
#define PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK 0x1000
#define PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK 0x2000
#define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK 0x300000
-#define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128 0x0
+
#define PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK 0xf8000000
+#define PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK 0x07c00000
+#define PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK 0x0000001f
+#define SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c
#define PCIE_MEM_WIN0_LO(win) \
@@ -79,10 +81,12 @@
#define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
#define PCIE_MISC_MSI_DATA_CONFIG 0x404c
-#define PCIE_MISC_MSI_DATA_CONFIG_VAL 0xffe06540
+#define PCIE_MISC_MSI_DATA_CONFIG_VAL_32 0xffe06540
+#define PCIE_MISC_MSI_DATA_CONFIG_VAL_8 0xfff86540
#define PCIE_MISC_PCIE_CTRL 0x4064
#define PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK 0x1
+#define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK 0x4
#define PCIE_MISC_PCIE_STATUS 0x4068
#define PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK 0x80
@@ -90,6 +94,9 @@
#define PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK 0x10
#define PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK 0x40
+#define PCIE_MISC_REVISION 0x406c
+#define BRCM_PCIE_HW_REV_33 0x0303
+
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT 0x4070
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK 0xfff00000
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK 0xfff0
@@ -110,10 +117,14 @@
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
-#define PCIE_MSI_INTR2_STATUS 0x4500
-#define PCIE_MSI_INTR2_CLR 0x4508
-#define PCIE_MSI_INTR2_MASK_SET 0x4510
-#define PCIE_MSI_INTR2_MASK_CLR 0x4514
+
+#define PCIE_INTR2_CPU_BASE 0x4300
+#define PCIE_MSI_INTR2_BASE 0x4500
+/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
+#define MSI_INT_STATUS 0x0
+#define MSI_INT_CLR 0x8
+#define MSI_INT_MASK_SET 0x10
+#define MSI_INT_MASK_CLR 0x14
#define PCIE_EXT_CFG_DATA 0x8000
@@ -122,13 +133,19 @@
#define PCIE_EXT_SLOT_SHIFT 15
#define PCIE_EXT_FUNC_SHIFT 12
-#define PCIE_RGR1_SW_INIT_1 0x9210
#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
-#define PCIE_RGR1_SW_INIT_1_INIT_MASK 0x2
+#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
+
+#define RGR1_SW_INIT_1_INIT_GENERIC_MASK 0x2
+#define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT 0x1
+#define RGR1_SW_INIT_1_INIT_7278_MASK 0x1
+#define RGR1_SW_INIT_1_INIT_7278_SHIFT 0x0
/* PCIe parameters */
#define BRCM_NUM_PCIE_OUT_WINS 0x4
#define BRCM_INT_PCI_MSI_NR 32
+#define BRCM_INT_PCI_MSI_LEGACY_NR 8
+#define BRCM_INT_PCI_MSI_SHIFT 0
/* MSI target adresses */
#define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL
@@ -153,6 +170,85 @@
#define SSC_STATUS_OFFSET 0x1
#define SSC_STATUS_SSC_MASK 0x400
#define SSC_STATUS_PLL_LOCK_MASK 0x800
+#define PCIE_BRCM_MAX_MEMC 3
+
+#define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1])
+
+/* Rescal registers */
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS 0x3
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK 0x4
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT 0x2
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK 0x2
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT 0x1
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK 0x1
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT 0x0
+
+/* Forward declarations */
+struct brcm_pcie;
+static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val);
+static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val);
+static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
+static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
+
+enum {
+ RGR1_SW_INIT_1,
+ EXT_CFG_INDEX,
+ EXT_CFG_DATA,
+};
+
+enum {
+ RGR1_SW_INIT_1_INIT_MASK,
+ RGR1_SW_INIT_1_INIT_SHIFT,
+};
+
+enum pcie_type {
+ GENERIC,
+ BCM7278,
+ BCM2711,
+};
+
+struct pcie_cfg_data {
+ const int *offsets;
+ const enum pcie_type type;
+ void (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+};
+
+static const int pcie_offsets[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const struct pcie_cfg_data generic_cfg = {
+ .offsets = pcie_offsets,
+ .type = GENERIC,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const int pcie_offset_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const struct pcie_cfg_data bcm7278_cfg = {
+ .offsets = pcie_offset_bcm7278,
+ .type = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+};
+
+static const struct pcie_cfg_data bcm2711_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM2711,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
struct brcm_msi {
struct device *dev;
@@ -165,6 +261,12 @@ struct brcm_msi {
int irq;
/* used indicates which MSI interrupts have been alloc'd */
unsigned long used;
+ bool legacy;
+ /* Some chips have MSIs in bits [31..24] of a shared register. */
+ int legacy_shift;
+ int nr; /* No. of MSI available, depends on chip */
+ /* This is the base pointer for interrupt status/set/clr regs */
+ void __iomem *intr_base;
};
/* Internal PCIe Host Controller Information.*/
@@ -177,6 +279,14 @@ struct brcm_pcie {
int gen;
u64 msi_target_addr;
struct brcm_msi *msi;
+ const int *reg_offsets;
+ enum pcie_type type;
+ struct reset_control *rescal;
+ int num_memc;
+ u64 memc_size[PCIE_BRCM_MAX_MEMC];
+ u32 hw_rev;
+ void (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
};
/*
@@ -367,8 +477,10 @@ static void brcm_pcie_msi_isr(struct irq_desc *desc)
msi = irq_desc_get_handler_data(desc);
dev = msi->dev;
- status = readl(msi->base + PCIE_MSI_INTR2_STATUS);
- for_each_set_bit(bit, &status, BRCM_INT_PCI_MSI_NR) {
+ status = readl(msi->intr_base + MSI_INT_STATUS);
+ status >>= msi->legacy_shift;
+
+ for_each_set_bit(bit, &status, msi->nr) {
virq = irq_find_mapping(msi->inner_domain, bit);
if (virq)
generic_handle_irq(virq);
@@ -385,7 +497,7 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->address_lo = lower_32_bits(msi->target_addr);
msg->address_hi = upper_32_bits(msi->target_addr);
- msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL) | data->hwirq;
+ msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
}
static int brcm_msi_set_affinity(struct irq_data *irq_data,
@@ -397,8 +509,9 @@ static int brcm_msi_set_affinity(struct irq_data *irq_data,
static void brcm_msi_ack_irq(struct irq_data *data)
{
struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
+ const int shift_amt = data->hwirq + msi->legacy_shift;
- writel(1 << data->hwirq, msi->base + PCIE_MSI_INTR2_CLR);
+ writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR);
}
@@ -414,7 +527,7 @@ static int brcm_msi_alloc(struct brcm_msi *msi)
int hwirq;
mutex_lock(&msi->lock);
- hwirq = bitmap_find_free_region(&msi->used, BRCM_INT_PCI_MSI_NR, 0);
+ hwirq = bitmap_find_free_region(&msi->used, msi->nr, 0);
mutex_unlock(&msi->lock);
return hwirq;
@@ -463,8 +576,7 @@ static int brcm_allocate_domains(struct brcm_msi *msi)
struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
struct device *dev = msi->dev;
- msi->inner_domain = irq_domain_add_linear(NULL, BRCM_INT_PCI_MSI_NR,
- &msi_domain_ops, msi);
+ msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
if (!msi->inner_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -501,7 +613,10 @@ static void brcm_msi_remove(struct brcm_pcie *pcie)
static void brcm_msi_set_regs(struct brcm_msi *msi)
{
- writel(0xffffffff, msi->base + PCIE_MSI_INTR2_MASK_CLR);
+ u32 val = __GENMASK(31, msi->legacy_shift);
+
+ writel(val, msi->intr_base + MSI_INT_MASK_CLR);
+ writel(val, msi->intr_base + MSI_INT_CLR);
/*
* The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
@@ -512,8 +627,8 @@ static void brcm_msi_set_regs(struct brcm_msi *msi)
writel(upper_32_bits(msi->target_addr),
msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
- writel(PCIE_MISC_MSI_DATA_CONFIG_VAL,
- msi->base + PCIE_MISC_MSI_DATA_CONFIG);
+ val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32;
+ writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG);
}
static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
@@ -538,6 +653,17 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
msi->np = pcie->np;
msi->target_addr = pcie->msi_target_addr;
msi->irq = irq;
+ msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33;
+
+ if (msi->legacy) {
+ msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
+ msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
+ msi->legacy_shift = 24;
+ } else {
+ msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE;
+ msi->nr = BRCM_INT_PCI_MSI_NR;
+ msi->legacy_shift = 0;
+ }
ret = brcm_allocate_domains(msi);
if (ret)
@@ -601,22 +727,43 @@ static struct pci_ops brcm_pcie_ops = {
.write = pci_generic_config_write,
};
-static inline void brcm_pcie_bridge_sw_init_set(struct brcm_pcie *pcie, u32 val)
+static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
+{
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
+ u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
+
+ tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+ tmp = (tmp & ~mask) | ((val << shift) & mask);
+ writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+}
+
+static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
+{
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
+ u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
+
+ tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+ tmp = (tmp & ~mask) | ((val << shift) & mask);
+ writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+}
+
+static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
- tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1);
- u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_INIT_MASK);
- writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1);
+ /* Perst bit has moved and assert value is 0 */
+ tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
+ u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
+ writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
}
-static inline void brcm_pcie_perst_set(struct brcm_pcie *pcie, u32 val)
+static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
- tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1);
+ tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
- writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1);
+ writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
}
static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
@@ -624,22 +771,44 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
u64 *rc_bar2_offset)
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- struct device *dev = pcie->dev;
struct resource_entry *entry;
+ struct device *dev = pcie->dev;
+ u64 lowest_pcie_addr = ~(u64)0;
+ int ret, i = 0;
+ u64 size = 0;
- entry = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM);
- if (!entry)
- return -ENODEV;
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ u64 pcie_beg = entry->res->start - entry->offset;
+ size += entry->res->end - entry->res->start + 1;
+ if (pcie_beg < lowest_pcie_addr)
+ lowest_pcie_addr = pcie_beg;
+ }
- /*
- * The controller expects the inbound window offset to be calculated as
- * the difference between PCIe's address space and CPU's. The offset
- * provided by the firmware is calculated the opposite way, so we
- * negate it.
- */
- *rc_bar2_offset = -entry->offset;
- *rc_bar2_size = 1ULL << fls64(entry->res->end - entry->res->start);
+ if (lowest_pcie_addr == ~(u64)0) {
+ dev_err(dev, "DT node has no dma-ranges\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
+ PCIE_BRCM_MAX_MEMC);
+
+ if (ret <= 0) {
+ /* Make an educated guess */
+ pcie->num_memc = 1;
+ pcie->memc_size[0] = 1ULL << fls64(size - 1);
+ } else {
+ pcie->num_memc = ret;
+ }
+
+ /* Each memc is viewed through a "port" that is a power of 2 */
+ for (i = 0, size = 0; i < pcie->num_memc; i++)
+ size += pcie->memc_size[i];
+
+ /* System memory starts at this address in PCIe-space */
+ *rc_bar2_offset = lowest_pcie_addr;
+ /* The sum of all memc views must also be a power of 2 */
+ *rc_bar2_size = 1ULL << fls64(size - 1);
/*
* We validate the inbound memory view even though we should trust
@@ -691,22 +860,19 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
void __iomem *base = pcie->base;
struct device *dev = pcie->dev;
struct resource_entry *entry;
- unsigned int scb_size_val;
bool ssc_good = false;
struct resource *res;
int num_out_wins = 0;
u16 nlw, cls, lnksta;
- int i, ret;
- u32 tmp, aspm_support;
+ int i, ret, memc;
+ u32 tmp, burst, aspm_support;
/* Reset the bridge */
- brcm_pcie_bridge_sw_init_set(pcie, 1);
- brcm_pcie_perst_set(pcie, 1);
-
+ pcie->bridge_sw_init_set(pcie, 1);
usleep_range(100, 200);
/* Take the bridge out of reset */
- brcm_pcie_bridge_sw_init_set(pcie, 0);
+ pcie->bridge_sw_init_set(pcie, 0);
tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
@@ -714,11 +880,22 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
/* Wait for SerDes to be stable */
usleep_range(100, 200);
+ /*
+ * SCB_MAX_BURST_SIZE is a two bit field. For GENERIC chips it
+ * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it
+ * is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
+ */
+ if (pcie->type == BCM2711)
+ burst = 0x0; /* 128B */
+ else if (pcie->type == BCM7278)
+ burst = 0x3; /* 512 bytes */
+ else
+ burst = 0x2; /* 512 bytes */
+
/* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
- u32p_replace_bits(&tmp, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128,
- PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
+ u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
writel(tmp, base + PCIE_MISC_MISC_CTRL);
ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
@@ -733,11 +910,17 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
writel(upper_32_bits(rc_bar2_offset),
base + PCIE_MISC_RC_BAR2_CONFIG_HI);
- scb_size_val = rc_bar2_size ?
- ilog2(rc_bar2_size) - 15 : 0xf; /* 0xf is 1GB */
tmp = readl(base + PCIE_MISC_MISC_CTRL);
- u32p_replace_bits(&tmp, scb_size_val,
- PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK);
+ for (memc = 0; memc < pcie->num_memc; memc++) {
+ u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15;
+
+ if (memc == 0)
+ u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0));
+ else if (memc == 1)
+ u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1));
+ else if (memc == 2)
+ u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2));
+ }
writel(tmp, base + PCIE_MISC_MISC_CTRL);
/*
@@ -762,17 +945,11 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- /* Mask all interrupts since we are not handling any yet */
- writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_MASK_SET);
-
- /* clear any interrupts we find on boot */
- writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_CLR);
-
if (pcie->gen)
brcm_pcie_set_gen(pcie, pcie->gen);
/* Unassert the fundamental reset */
- brcm_pcie_perst_set(pcie, 0);
+ pcie->perst_set(pcie, 0);
/*
* Give the RC/EP time to wake up, before trying to configure RC.
@@ -884,6 +1061,52 @@ static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
dev_err(pcie->dev, "failed to enter low-power link state\n");
}
+static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
+{
+ static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,};
+ static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,};
+ const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1;
+ const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1;
+ u32 tmp, combined_mask = 0;
+ u32 val;
+ void __iomem *base = pcie->base;
+ int i, ret;
+
+ for (i = beg; i != end; start ? i++ : i--) {
+ val = start ? BIT_MASK(shifts[i]) : 0;
+ tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
+ tmp = (tmp & ~masks[i]) | (val & masks[i]);
+ writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
+ usleep_range(50, 200);
+ combined_mask |= masks[i];
+ }
+
+ tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
+ val = start ? combined_mask : 0;
+
+ ret = (tmp & combined_mask) == val ? 0 : -EIO;
+ if (ret)
+ dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop"));
+
+ return ret;
+}
+
+static inline int brcm_phy_start(struct brcm_pcie *pcie)
+{
+ return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
+}
+
+static inline int brcm_phy_stop(struct brcm_pcie *pcie)
+{
+ return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
+}
+
static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
{
void __iomem *base = pcie->base;
@@ -892,7 +1115,7 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
if (brcm_pcie_link_up(pcie))
brcm_pcie_enter_l23(pcie);
/* Assert fundamental reset */
- brcm_pcie_perst_set(pcie, 1);
+ pcie->perst_set(pcie, 1);
/* Deassert request for L23 in case it was asserted */
tmp = readl(base + PCIE_MISC_PCIE_CTRL);
@@ -905,13 +1128,66 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
/* Shutdown PCIe bridge */
- brcm_pcie_bridge_sw_init_set(pcie, 1);
+ pcie->bridge_sw_init_set(pcie, 1);
+}
+
+static int brcm_pcie_suspend(struct device *dev)
+{
+ struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ brcm_pcie_turn_off(pcie);
+ ret = brcm_phy_stop(pcie);
+ clk_disable_unprepare(pcie->clk);
+
+ return ret;
+}
+
+static int brcm_pcie_resume(struct device *dev)
+{
+ struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ void __iomem *base;
+ u32 tmp;
+ int ret;
+
+ base = pcie->base;
+ clk_prepare_enable(pcie->clk);
+
+ ret = brcm_phy_start(pcie);
+ if (ret)
+ goto err;
+
+ /* Take bridge out of reset so we can access the SERDES reg */
+ pcie->bridge_sw_init_set(pcie, 0);
+
+ /* SERDES_IDDQ = 0 */
+ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
+ writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+
+ /* wait for serdes to be stable */
+ udelay(100);
+
+ ret = brcm_pcie_setup(pcie);
+ if (ret)
+ goto err;
+
+ if (pcie->msi)
+ brcm_msi_set_regs(pcie->msi);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(pcie->clk);
+ return ret;
}
static void __brcm_pcie_remove(struct brcm_pcie *pcie)
{
brcm_msi_remove(pcie);
brcm_pcie_turn_off(pcie);
+ brcm_phy_stop(pcie);
+ reset_control_assert(pcie->rescal);
clk_disable_unprepare(pcie->clk);
}
@@ -927,35 +1203,40 @@ static int brcm_pcie_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id brcm_pcie_match[] = {
+ { .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
+ { .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
+ { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
+ { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
+ {},
+};
+
static int brcm_pcie_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *msi_np;
struct pci_host_bridge *bridge;
- struct device_node *fw_np;
+ const struct pcie_cfg_data *data;
struct brcm_pcie *pcie;
int ret;
- /*
- * We have to wait for Raspberry Pi's firmware interface to be up as a
- * PCI fixup, rpi_firmware_init_vl805(), depends on it. This driver's
- * probe can race with the firmware interface's (see
- * drivers/firmware/raspberrypi.c) and potentially break the PCI fixup.
- */
- fw_np = of_find_compatible_node(NULL, NULL,
- "raspberrypi,bcm2835-firmware");
- if (fw_np && !rpi_firmware_get(fw_np)) {
- of_node_put(fw_np);
- return -EPROBE_DEFER;
- }
- of_node_put(fw_np);
-
bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
if (!bridge)
return -ENOMEM;
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ pr_err("failed to look up compatible string\n");
+ return -EINVAL;
+ }
+
pcie = pci_host_bridge_priv(bridge);
pcie->dev = &pdev->dev;
pcie->np = np;
+ pcie->reg_offsets = data->offsets;
+ pcie->type = data->type;
+ pcie->perst_set = data->perst_set;
+ pcie->bridge_sw_init_set = data->bridge_sw_init_set;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -975,11 +1256,29 @@ static int brcm_pcie_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "could not enable clock\n");
return ret;
}
+ pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
+ if (IS_ERR(pcie->rescal)) {
+ clk_disable_unprepare(pcie->clk);
+ return PTR_ERR(pcie->rescal);
+ }
+
+ ret = reset_control_deassert(pcie->rescal);
+ if (ret)
+ dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
+
+ ret = brcm_phy_start(pcie);
+ if (ret) {
+ reset_control_assert(pcie->rescal);
+ clk_disable_unprepare(pcie->clk);
+ return ret;
+ }
ret = brcm_pcie_setup(pcie);
if (ret)
goto fail;
+ pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
+
msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
if (pci_msi_enabled() && msi_np == pcie->np) {
ret = brcm_pcie_enable_msi(pcie);
@@ -1000,18 +1299,20 @@ fail:
return ret;
}
-static const struct of_device_id brcm_pcie_match[] = {
- { .compatible = "brcm,bcm2711-pcie" },
- {},
-};
MODULE_DEVICE_TABLE(of, brcm_pcie_match);
+static const struct dev_pm_ops brcm_pcie_pm_ops = {
+ .suspend = brcm_pcie_suspend,
+ .resume = brcm_pcie_resume,
+};
+
static struct platform_driver brcm_pcie_driver = {
.probe = brcm_pcie_probe,
.remove = brcm_pcie_remove,
.driver = {
.name = "brcm-pcie",
.of_match_table = brcm_pcie_match,
+ .pm = &brcm_pcie_pm_ops,
},
};
module_platform_driver(brcm_pcie_driver);
diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c
new file mode 100644
index 000000000000..7959c9c8d2bc
--- /dev/null
+++ b/drivers/pci/controller/pcie-hisi-error.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for handling the PCIe controller errors on
+ * HiSilicon HIP SoCs.
+ *
+ * Copyright (c) 2020 HiSilicon Limited.
+ */
+
+#include <linux/acpi.h>
+#include <acpi/ghes.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+
+/* HISI PCIe controller error definitions */
+#define HISI_PCIE_ERR_MISC_REGS 33
+
+#define HISI_PCIE_LOCAL_VALID_VERSION BIT(0)
+#define HISI_PCIE_LOCAL_VALID_SOC_ID BIT(1)
+#define HISI_PCIE_LOCAL_VALID_SOCKET_ID BIT(2)
+#define HISI_PCIE_LOCAL_VALID_NIMBUS_ID BIT(3)
+#define HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID BIT(4)
+#define HISI_PCIE_LOCAL_VALID_CORE_ID BIT(5)
+#define HISI_PCIE_LOCAL_VALID_PORT_ID BIT(6)
+#define HISI_PCIE_LOCAL_VALID_ERR_TYPE BIT(7)
+#define HISI_PCIE_LOCAL_VALID_ERR_SEVERITY BIT(8)
+#define HISI_PCIE_LOCAL_VALID_ERR_MISC 9
+
+static guid_t hisi_pcie_sec_guid =
+ GUID_INIT(0xB2889FC9, 0xE7D7, 0x4F9D,
+ 0xA8, 0x67, 0xAF, 0x42, 0xE9, 0x8B, 0xE7, 0x72);
+
+/*
+ * Firmware reports the socket port ID where the error occurred. These
+ * macros convert that to the core ID and core port ID required by the
+ * ACPI reset method.
+ */
+#define HISI_PCIE_PORT_ID(core, v) (((v) >> 1) + ((core) << 3))
+#define HISI_PCIE_CORE_ID(v) ((v) >> 3)
+#define HISI_PCIE_CORE_PORT_ID(v) (((v) & 7) << 1)
+
+struct hisi_pcie_error_data {
+ u64 val_bits;
+ u8 version;
+ u8 soc_id;
+ u8 socket_id;
+ u8 nimbus_id;
+ u8 sub_module_id;
+ u8 core_id;
+ u8 port_id;
+ u8 err_severity;
+ u16 err_type;
+ u8 reserv[2];
+ u32 err_misc[HISI_PCIE_ERR_MISC_REGS];
+};
+
+struct hisi_pcie_error_private {
+ struct notifier_block nb;
+ struct device *dev;
+};
+
+enum hisi_pcie_submodule_id {
+ HISI_PCIE_SUB_MODULE_ID_AP,
+ HISI_PCIE_SUB_MODULE_ID_TL,
+ HISI_PCIE_SUB_MODULE_ID_MAC,
+ HISI_PCIE_SUB_MODULE_ID_DL,
+ HISI_PCIE_SUB_MODULE_ID_SDI,
+};
+
+static const char * const hisi_pcie_sub_module[] = {
+ [HISI_PCIE_SUB_MODULE_ID_AP] = "AP Layer",
+ [HISI_PCIE_SUB_MODULE_ID_TL] = "TL Layer",
+ [HISI_PCIE_SUB_MODULE_ID_MAC] = "MAC Layer",
+ [HISI_PCIE_SUB_MODULE_ID_DL] = "DL Layer",
+ [HISI_PCIE_SUB_MODULE_ID_SDI] = "SDI Layer",
+};
+
+enum hisi_pcie_err_severity {
+ HISI_PCIE_ERR_SEV_RECOVERABLE,
+ HISI_PCIE_ERR_SEV_FATAL,
+ HISI_PCIE_ERR_SEV_CORRECTED,
+ HISI_PCIE_ERR_SEV_NONE,
+};
+
+static const char * const hisi_pcie_error_sev[] = {
+ [HISI_PCIE_ERR_SEV_RECOVERABLE] = "recoverable",
+ [HISI_PCIE_ERR_SEV_FATAL] = "fatal",
+ [HISI_PCIE_ERR_SEV_CORRECTED] = "corrected",
+ [HISI_PCIE_ERR_SEV_NONE] = "none",
+};
+
+static const char *hisi_pcie_get_string(const char * const *array,
+ size_t n, u32 id)
+{
+ u32 index;
+
+ for (index = 0; index < n; index++) {
+ if (index == id && array[index])
+ return array[index];
+ }
+
+ return "unknown";
+}
+
+static int hisi_pcie_port_reset(struct platform_device *pdev,
+ u32 chip_id, u32 port_id)
+{
+ struct device *dev = &pdev->dev;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ union acpi_object arg[3];
+ struct acpi_object_list arg_list;
+ acpi_status s;
+ unsigned long long data = 0;
+
+ arg[0].type = ACPI_TYPE_INTEGER;
+ arg[0].integer.value = chip_id;
+ arg[1].type = ACPI_TYPE_INTEGER;
+ arg[1].integer.value = HISI_PCIE_CORE_ID(port_id);
+ arg[2].type = ACPI_TYPE_INTEGER;
+ arg[2].integer.value = HISI_PCIE_CORE_PORT_ID(port_id);
+
+ arg_list.count = 3;
+ arg_list.pointer = arg;
+
+ s = acpi_evaluate_integer(handle, "RST", &arg_list, &data);
+ if (ACPI_FAILURE(s)) {
+ dev_err(dev, "No RST method\n");
+ return -EIO;
+ }
+
+ if (data) {
+ dev_err(dev, "Failed to Reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hisi_pcie_port_do_recovery(struct platform_device *dev,
+ u32 chip_id, u32 port_id)
+{
+ acpi_status s;
+ struct device *device = &dev->dev;
+ acpi_handle root_handle = ACPI_HANDLE(device);
+ struct acpi_pci_root *pci_root;
+ struct pci_bus *root_bus;
+ struct pci_dev *pdev;
+ u32 domain, busnr, devfn;
+
+ s = acpi_get_parent(root_handle, &root_handle);
+ if (ACPI_FAILURE(s))
+ return -ENODEV;
+ pci_root = acpi_pci_find_root(root_handle);
+ if (!pci_root)
+ return -ENODEV;
+ root_bus = pci_root->bus;
+ domain = pci_root->segment;
+
+ busnr = root_bus->number;
+ devfn = PCI_DEVFN(port_id, 0);
+ pdev = pci_get_domain_bus_and_slot(domain, busnr, devfn);
+ if (!pdev) {
+ dev_info(device, "Fail to get root port %04x:%02x:%02x.%d device\n",
+ domain, busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ return -ENODEV;
+ }
+
+ pci_stop_and_remove_bus_device_locked(pdev);
+ pci_dev_put(pdev);
+
+ if (hisi_pcie_port_reset(dev, chip_id, port_id))
+ return -EIO;
+
+ /*
+ * The initialization time of subordinate devices after
+ * hot reset is no more than 1s, which is required by
+ * the PCI spec v5.0 sec 6.6.1. The time will shorten
+ * if Readiness Notifications mechanisms are used. But
+ * wait 1s here to adapt any conditions.
+ */
+ ssleep(1UL);
+
+ /* add root port and downstream devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(root_bus);
+ pci_unlock_rescan_remove();
+
+ return 0;
+}
+
+static void hisi_pcie_handle_error(struct platform_device *pdev,
+ const struct hisi_pcie_error_data *edata)
+{
+ struct device *dev = &pdev->dev;
+ int idx, rc;
+ const unsigned long valid_bits[] = {BITMAP_FROM_U64(edata->val_bits)};
+
+ if (edata->val_bits == 0) {
+ dev_warn(dev, "%s: no valid error information\n", __func__);
+ return;
+ }
+
+ dev_info(dev, "\nHISI : HIP : PCIe controller error\n");
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOC_ID)
+ dev_info(dev, "Table version = %d\n", edata->version);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOCKET_ID)
+ dev_info(dev, "Socket ID = %d\n", edata->socket_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_NIMBUS_ID)
+ dev_info(dev, "Nimbus ID = %d\n", edata->nimbus_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID)
+ dev_info(dev, "Sub Module = %s\n",
+ hisi_pcie_get_string(hisi_pcie_sub_module,
+ ARRAY_SIZE(hisi_pcie_sub_module),
+ edata->sub_module_id));
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_CORE_ID)
+ dev_info(dev, "Core ID = core%d\n", edata->core_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_PORT_ID)
+ dev_info(dev, "Port ID = port%d\n", edata->port_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_SEVERITY)
+ dev_info(dev, "Error severity = %s\n",
+ hisi_pcie_get_string(hisi_pcie_error_sev,
+ ARRAY_SIZE(hisi_pcie_error_sev),
+ edata->err_severity));
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_TYPE)
+ dev_info(dev, "Error type = 0x%x\n", edata->err_type);
+
+ dev_info(dev, "Reg Dump:\n");
+ idx = HISI_PCIE_LOCAL_VALID_ERR_MISC;
+ for_each_set_bit_from(idx, valid_bits,
+ HISI_PCIE_LOCAL_VALID_ERR_MISC + HISI_PCIE_ERR_MISC_REGS)
+ dev_info(dev, "ERR_MISC_%d = 0x%x\n", idx - HISI_PCIE_LOCAL_VALID_ERR_MISC,
+ edata->err_misc[idx - HISI_PCIE_LOCAL_VALID_ERR_MISC]);
+
+ if (edata->err_severity != HISI_PCIE_ERR_SEV_RECOVERABLE)
+ return;
+
+ /* Recovery for the PCIe controller errors, try reset
+ * PCI port for the error recovery
+ */
+ rc = hisi_pcie_port_do_recovery(pdev, edata->socket_id,
+ HISI_PCIE_PORT_ID(edata->core_id, edata->port_id));
+ if (rc)
+ dev_info(dev, "fail to do hisi pcie port reset\n");
+}
+
+static int hisi_pcie_notify_error(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct acpi_hest_generic_data *gdata = data;
+ const struct hisi_pcie_error_data *error_data = acpi_hest_get_payload(gdata);
+ struct hisi_pcie_error_private *priv;
+ struct device *dev;
+ struct platform_device *pdev;
+ guid_t err_sec_guid;
+ u8 socket;
+
+ import_guid(&err_sec_guid, gdata->section_type);
+ if (!guid_equal(&err_sec_guid, &hisi_pcie_sec_guid))
+ return NOTIFY_DONE;
+
+ priv = container_of(nb, struct hisi_pcie_error_private, nb);
+ dev = priv->dev;
+
+ if (device_property_read_u8(dev, "socket", &socket))
+ return NOTIFY_DONE;
+
+ if (error_data->socket_id != socket)
+ return NOTIFY_DONE;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ hisi_pcie_handle_error(pdev, error_data);
+
+ return NOTIFY_OK;
+}
+
+static int hisi_pcie_error_handler_probe(struct platform_device *pdev)
+{
+ struct hisi_pcie_error_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->nb.notifier_call = hisi_pcie_notify_error;
+ priv->dev = &pdev->dev;
+ ret = ghes_register_vendor_record_notifier(&priv->nb);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to register hisi pcie controller error handler with apei\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int hisi_pcie_error_handler_remove(struct platform_device *pdev)
+{
+ struct hisi_pcie_error_private *priv = platform_get_drvdata(pdev);
+
+ ghes_unregister_vendor_record_notifier(&priv->nb);
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_pcie_acpi_match[] = {
+ { "HISI0361", 0 },
+ { }
+};
+
+static struct platform_driver hisi_pcie_error_handler_driver = {
+ .driver = {
+ .name = "hisi-pcie-error-handler",
+ .acpi_match_table = hisi_pcie_acpi_match,
+ },
+ .probe = hisi_pcie_error_handler_probe,
+ .remove = hisi_pcie_error_handler_remove,
+};
+module_platform_driver(hisi_pcie_error_handler_driver);
+
+MODULE_DESCRIPTION("HiSilicon HIP PCIe controller error handling driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c
index aa55b064f64d..56b8ee7bf330 100644
--- a/drivers/pci/controller/pcie-iproc-bcma.c
+++ b/drivers/pci/controller/pcie-iproc-bcma.c
@@ -94,18 +94,7 @@ static struct bcma_driver iproc_pcie_bcma_driver = {
.probe = iproc_pcie_bcma_probe,
.remove = iproc_pcie_bcma_remove,
};
-
-static int __init iproc_pcie_bcma_init(void)
-{
- return bcma_driver_register(&iproc_pcie_bcma_driver);
-}
-module_init(iproc_pcie_bcma_init);
-
-static void __exit iproc_pcie_bcma_exit(void)
-{
- bcma_driver_unregister(&iproc_pcie_bcma_driver);
-}
-module_exit(iproc_pcie_bcma_exit);
+module_bcma_driver(iproc_pcie_bcma_driver);
MODULE_AUTHOR("Hauke Mehrtens");
MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver");
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 3176ad3ab0e5..908475d27e0e 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -209,15 +209,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data,
struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
int target_cpu = cpumask_first(mask);
int curr_cpu;
+ int ret;
curr_cpu = hwirq_to_cpu(msi, data->hwirq);
if (curr_cpu == target_cpu)
- return IRQ_SET_MASK_OK_DONE;
+ ret = IRQ_SET_MASK_OK_DONE;
+ else {
+ /* steer MSI to the target CPU */
+ data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
+ ret = IRQ_SET_MASK_OK;
+ }
- /* steer MSI to the target CPU */
- data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
+ irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
- return IRQ_SET_MASK_OK;
+ return ret;
}
static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index a956b0c18bd1..b93e7bda101b 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -99,7 +99,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
switch (pcie->type) {
case IPROC_PCIE_PAXC:
case IPROC_PCIE_PAXC_V2:
- pcie->map_irq = 0;
+ pcie->map_irq = NULL;
break;
default:
break;
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index f3082de44e8a..f92e0152e65e 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -572,12 +572,8 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
goto err_setup_irq;
}
- bridge->dev.parent = dev;
bridge->sysdata = port->cfg;
- bridge->busnr = port->cfg->busr.start;
bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
- bridge->map_irq = of_irq_parse_and_map_pci;
- bridge->swizzle_irq = pci_common_swizzle;
err = pci_host_probe(bridge);
if (err < 0)
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index f69ef8c89f72..f375c21ceeb1 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -298,6 +298,33 @@ static struct msi_domain_info vmd_msi_domain_info = {
.chip = &vmd_msi_controller,
};
+static int vmd_create_irq_domain(struct vmd_dev *vmd)
+{
+ struct fwnode_handle *fn;
+
+ fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
+ if (!fn)
+ return -ENODEV;
+
+ vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
+ if (!vmd->irq_domain) {
+ irq_domain_free_fwnode(fn);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void vmd_remove_irq_domain(struct vmd_dev *vmd)
+{
+ if (vmd->irq_domain) {
+ struct fwnode_handle *fn = vmd->irq_domain->fwnode;
+
+ irq_domain_remove(vmd->irq_domain);
+ irq_domain_free_fwnode(fn);
+ }
+}
+
static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
unsigned int devfn, int reg, int len)
{
@@ -417,97 +444,175 @@ static int vmd_find_free_domain(void)
return domain + 1;
}
-static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
+static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
+ resource_size_t *offset1,
+ resource_size_t *offset2)
{
- struct pci_sysdata *sd = &vmd->sysdata;
- struct fwnode_handle *fn;
- struct resource *res;
- u32 upper_bits;
- unsigned long flags;
- LIST_HEAD(resources);
- resource_size_t offset[2] = {0};
- resource_size_t membar2_offset = 0x2000;
- struct pci_bus *child;
+ struct pci_dev *dev = vmd->dev;
+ u64 phys1, phys2;
- /*
- * Shadow registers may exist in certain VMD device ids which allow
- * guests to correctly assign host physical addresses to the root ports
- * and child devices. These registers will either return the host value
- * or 0, depending on an enable bit in the VMD device.
- */
- if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
+ if (native_hint) {
u32 vmlock;
int ret;
- membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
- ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
+ ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock);
if (ret || vmlock == ~0)
return -ENODEV;
if (MB2_SHADOW_EN(vmlock)) {
void __iomem *membar2;
- membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
+ membar2 = pci_iomap(dev, VMD_MEMBAR2, 0);
if (!membar2)
return -ENOMEM;
- offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
- (readq(membar2 + MB2_SHADOW_OFFSET) &
- PCI_BASE_ADDRESS_MEM_MASK);
- offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
- (readq(membar2 + MB2_SHADOW_OFFSET + 8) &
- PCI_BASE_ADDRESS_MEM_MASK);
- pci_iounmap(vmd->dev, membar2);
- }
- }
-
- if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) {
- int pos = pci_find_capability(vmd->dev, PCI_CAP_ID_VNDR);
+ phys1 = readq(membar2 + MB2_SHADOW_OFFSET);
+ phys2 = readq(membar2 + MB2_SHADOW_OFFSET + 8);
+ pci_iounmap(dev, membar2);
+ } else
+ return 0;
+ } else {
+ /* Hypervisor-Emulated Vendor-Specific Capability */
+ int pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
u32 reg, regu;
- pci_read_config_dword(vmd->dev, pos + 4, &reg);
+ pci_read_config_dword(dev, pos + 4, &reg);
/* "SHDW" */
if (pos && reg == 0x53484457) {
- pci_read_config_dword(vmd->dev, pos + 8, &reg);
- pci_read_config_dword(vmd->dev, pos + 12, &regu);
- offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
- (((u64) regu << 32 | reg) &
- PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_dword(vmd->dev, pos + 16, &reg);
- pci_read_config_dword(vmd->dev, pos + 20, &regu);
- offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
- (((u64) regu << 32 | reg) &
- PCI_BASE_ADDRESS_MEM_MASK);
+ pci_read_config_dword(dev, pos + 8, &reg);
+ pci_read_config_dword(dev, pos + 12, &regu);
+ phys1 = (u64) regu << 32 | reg;
+
+ pci_read_config_dword(dev, pos + 16, &reg);
+ pci_read_config_dword(dev, pos + 20, &regu);
+ phys2 = (u64) regu << 32 | reg;
+ } else
+ return 0;
+ }
+
+ *offset1 = dev->resource[VMD_MEMBAR1].start -
+ (phys1 & PCI_BASE_ADDRESS_MEM_MASK);
+ *offset2 = dev->resource[VMD_MEMBAR2].start -
+ (phys2 & PCI_BASE_ADDRESS_MEM_MASK);
+
+ return 0;
+}
+
+static int vmd_get_bus_number_start(struct vmd_dev *vmd)
+{
+ struct pci_dev *dev = vmd->dev;
+ u16 reg;
+
+ pci_read_config_word(dev, PCI_REG_VMCAP, &reg);
+ if (BUS_RESTRICT_CAP(reg)) {
+ pci_read_config_word(dev, PCI_REG_VMCONFIG, &reg);
+
+ switch (BUS_RESTRICT_CFG(reg)) {
+ case 0:
+ vmd->busn_start = 0;
+ break;
+ case 1:
+ vmd->busn_start = 128;
+ break;
+ case 2:
+ vmd->busn_start = 224;
+ break;
+ default:
+ pci_err(dev, "Unknown Bus Offset Setting (%d)\n",
+ BUS_RESTRICT_CFG(reg));
+ return -ENODEV;
}
}
+ return 0;
+}
+
+static irqreturn_t vmd_irq(int irq, void *data)
+{
+ struct vmd_irq_list *irqs = data;
+ struct vmd_irq *vmdirq;
+ int idx;
+
+ idx = srcu_read_lock(&irqs->srcu);
+ list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
+ generic_handle_irq(vmdirq->virq);
+ srcu_read_unlock(&irqs->srcu, idx);
+
+ return IRQ_HANDLED;
+}
+
+static int vmd_alloc_irqs(struct vmd_dev *vmd)
+{
+ struct pci_dev *dev = vmd->dev;
+ int i, err;
+
+ vmd->msix_count = pci_msix_vec_count(dev);
+ if (vmd->msix_count < 0)
+ return -ENODEV;
+
+ vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
+ PCI_IRQ_MSIX);
+ if (vmd->msix_count < 0)
+ return vmd->msix_count;
+
+ vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
+ GFP_KERNEL);
+ if (!vmd->irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < vmd->msix_count; i++) {
+ err = init_srcu_struct(&vmd->irqs[i].srcu);
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
+ err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
+ vmd_irq, IRQF_NO_THREAD,
+ "vmd", &vmd->irqs[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
+{
+ struct pci_sysdata *sd = &vmd->sysdata;
+ struct resource *res;
+ u32 upper_bits;
+ unsigned long flags;
+ LIST_HEAD(resources);
+ resource_size_t offset[2] = {0};
+ resource_size_t membar2_offset = 0x2000;
+ struct pci_bus *child;
+ int ret;
+
+ /*
+ * Shadow registers may exist in certain VMD device ids which allow
+ * guests to correctly assign host physical addresses to the root ports
+ * and child devices. These registers will either return the host value
+ * or 0, depending on an enable bit in the VMD device.
+ */
+ if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
+ membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
+ ret = vmd_get_phys_offsets(vmd, true, &offset[0], &offset[1]);
+ if (ret)
+ return ret;
+ } else if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) {
+ ret = vmd_get_phys_offsets(vmd, false, &offset[0], &offset[1]);
+ if (ret)
+ return ret;
+ }
+
/*
* Certain VMD devices may have a root port configuration option which
* limits the bus range to between 0-127, 128-255, or 224-255
*/
if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
- u16 reg16;
-
- pci_read_config_word(vmd->dev, PCI_REG_VMCAP, &reg16);
- if (BUS_RESTRICT_CAP(reg16)) {
- pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG,
- &reg16);
-
- switch (BUS_RESTRICT_CFG(reg16)) {
- case 1:
- vmd->busn_start = 128;
- break;
- case 2:
- vmd->busn_start = 224;
- break;
- case 3:
- pci_err(vmd->dev, "Unknown Bus Offset Setting\n");
- return -ENODEV;
- default:
- break;
- }
- }
+ ret = vmd_get_bus_number_start(vmd);
+ if (ret)
+ return ret;
}
res = &vmd->dev->resource[VMD_CFGBAR];
@@ -568,16 +673,15 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
sd->node = pcibus_to_node(vmd->dev->bus);
- fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
- if (!fn)
- return -ENODEV;
+ ret = vmd_create_irq_domain(vmd);
+ if (ret)
+ return ret;
- vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
- x86_vector_domain);
- if (!vmd->irq_domain) {
- irq_domain_free_fwnode(fn);
- return -ENODEV;
- }
+ /*
+ * Override the irq domain bus token so the domain can be distinguished
+ * from a regular PCI/MSI domain.
+ */
+ irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
pci_add_resource(&resources, &vmd->resources[0]);
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
@@ -587,13 +691,13 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
&vmd_ops, sd, &resources);
if (!vmd->bus) {
pci_free_resource_list(&resources);
- irq_domain_remove(vmd->irq_domain);
- irq_domain_free_fwnode(fn);
+ vmd_remove_irq_domain(vmd);
return -ENODEV;
}
vmd_attach_resources(vmd);
- dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
+ if (vmd->irq_domain)
+ dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
pci_scan_child_bus(vmd->bus);
pci_assign_unassigned_bus_resources(vmd->bus);
@@ -613,24 +717,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
return 0;
}
-static irqreturn_t vmd_irq(int irq, void *data)
-{
- struct vmd_irq_list *irqs = data;
- struct vmd_irq *vmdirq;
- int idx;
-
- idx = srcu_read_lock(&irqs->srcu);
- list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
- generic_handle_irq(vmdirq->virq);
- srcu_read_unlock(&irqs->srcu, idx);
-
- return IRQ_HANDLED;
-}
-
static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vmd_dev *vmd;
- int i, err;
+ int err;
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM;
@@ -653,32 +743,9 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
return -ENODEV;
- vmd->msix_count = pci_msix_vec_count(dev);
- if (vmd->msix_count < 0)
- return -ENODEV;
-
- vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
- PCI_IRQ_MSIX);
- if (vmd->msix_count < 0)
- return vmd->msix_count;
-
- vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
- GFP_KERNEL);
- if (!vmd->irqs)
- return -ENOMEM;
-
- for (i = 0; i < vmd->msix_count; i++) {
- err = init_srcu_struct(&vmd->irqs[i].srcu);
- if (err)
- return err;
-
- INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
- err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
- vmd_irq, IRQF_NO_THREAD,
- "vmd", &vmd->irqs[i]);
- if (err)
- return err;
- }
+ err = vmd_alloc_irqs(vmd);
+ if (err)
+ return err;
spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
@@ -702,15 +769,13 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd)
static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
- struct fwnode_handle *fn = vmd->irq_domain->fwnode;
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_detach_resources(vmd);
- irq_domain_remove(vmd->irq_domain);
- irq_domain_free_fwnode(fn);
+ vmd_remove_irq_domain(vmd);
}
#ifdef CONFIG_PM_SLEEP
@@ -723,7 +788,6 @@ static int vmd_suspend(struct device *dev)
for (i = 0; i < vmd->msix_count; i++)
devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
- pci_save_state(pdev);
return 0;
}
@@ -741,7 +805,6 @@ static int vmd_resume(struct device *dev)
return err;
}
- pci_restore_state(pdev);
return 0;
}
#endif
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 8f065a42fc1a..b54d32a31669 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -168,4 +168,14 @@ const struct pci_ecam_ops pci_32b_ops = {
.write = pci_generic_config_write32,
}
};
+
+/* ECAM ops for 32-bit read only (non-compliant) */
+const struct pci_ecam_ops pci_32b_read_ops = {
+ .bus_shift = 20,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write,
+ }
+};
#endif
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 9f85815b4f53..529c34808440 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -73,10 +73,8 @@ static int board_added(struct controller *ctrl)
/* Check link training status */
retval = pciehp_check_link_status(ctrl);
- if (retval) {
- ctrl_err(ctrl, "Failed to check link status\n");
+ if (retval)
goto err_exit;
- }
/* Check for a power fault */
if (ctrl->power_fault_detected || pciehp_query_power_fault(ctrl)) {
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 53433b37e181..fb3840e222ad 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -283,8 +283,6 @@ static void pcie_wait_for_presence(struct pci_dev *pdev)
msleep(10);
timeout -= 10;
} while (timeout > 0);
-
- pci_info(pdev, "Timeout waiting for Presence Detect\n");
}
int pciehp_check_link_status(struct controller *ctrl)
@@ -293,8 +291,10 @@ int pciehp_check_link_status(struct controller *ctrl)
bool found;
u16 lnk_status;
- if (!pcie_wait_for_link(pdev, true))
+ if (!pcie_wait_for_link(pdev, true)) {
+ ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
return -1;
+ }
if (ctrl->inband_presence_disabled)
pcie_wait_for_presence(pdev);
@@ -311,15 +311,18 @@ int pciehp_check_link_status(struct controller *ctrl)
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
!(lnk_status & PCI_EXP_LNKSTA_NLW)) {
- ctrl_err(ctrl, "link training error: status %#06x\n",
- lnk_status);
+ ctrl_info(ctrl, "Slot(%s): Cannot train link: status %#06x\n",
+ slot_name(ctrl), lnk_status);
return -1;
}
pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
- if (!found)
+ if (!found) {
+ ctrl_info(ctrl, "Slot(%s): No device found\n",
+ slot_name(ctrl));
return -1;
+ }
return 0;
}
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index f979b7098acf..0a3c80ba66be 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -40,13 +40,13 @@ static DEFINE_MUTEX(rpadlpar_mutex);
static struct device_node *find_vio_slot_node(char *drc_name)
{
struct device_node *parent = of_find_node_by_name(NULL, "vdevice");
- struct device_node *dn = NULL;
+ struct device_node *dn;
int rc;
if (!parent)
return NULL;
- while ((dn = of_get_next_child(parent, dn))) {
+ for_each_child_of_node(parent, dn) {
rc = rpaphp_check_drc_props(dn, drc_name, NULL);
if (rc == 0)
break;
@@ -60,10 +60,10 @@ static struct device_node *find_vio_slot_node(char *drc_name)
static struct device_node *find_php_slot_pci_node(char *drc_name,
char *drc_type)
{
- struct device_node *np = NULL;
+ struct device_node *np;
int rc;
- while ((np = of_find_node_by_name(np, "pci"))) {
+ for_each_node_by_name(np, "pci") {
rc = rpaphp_check_drc_props(np, drc_name, drc_type);
if (rc == 0)
break;
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 65502e3f7b4f..6a6705e0cf17 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -299,7 +299,6 @@ static int board_added(struct slot *p_slot)
if (p_slot->status == 0xFF) {
/* power fault occurred, but it was benign */
ctrl_dbg(ctrl, "%s: Power fault\n", __func__);
- rc = POWER_FAILURE;
p_slot->status = 0;
goto err_exit;
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b37e08c4f9d1..4afd4ee4f7f0 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -180,6 +180,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
virtfn->device = iov->vf_device;
virtfn->is_virtfn = 1;
virtfn->physfn = pci_dev_get(dev);
+ virtfn->no_command_memory = 1;
if (id == 0)
pci_read_vf_config_common(virtfn);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 30ae4ffda5c1..d52d118979a6 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -58,8 +58,8 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs
#endif
+#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
/* Arch hooks */
-
int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
struct msi_controller *chip = dev->bus->msi;
@@ -132,6 +132,7 @@ void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
{
return default_teardown_msi_irqs(dev);
}
+#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
static void default_restore_msi_irq(struct pci_dev *dev, int irq)
{
@@ -1346,14 +1347,14 @@ void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
/**
* pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
- * @dev: Pointer to the PCI device
* @desc: Pointer to the MSI descriptor
*
* The ID number is only used within the irqdomain.
*/
-irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
- struct msi_desc *desc)
+static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
{
+ struct pci_dev *dev = msi_desc_to_pci_dev(desc);
+
return (irq_hw_number_t)desc->msi_attrib.entry_nr |
pci_dev_id(dev) << 11 |
(pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
@@ -1401,17 +1402,12 @@ static int pci_msi_domain_handle_error(struct irq_domain *domain,
return error;
}
-#ifdef GENERIC_MSI_DOMAIN_OPS
static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
struct msi_desc *desc)
{
arg->desc = desc;
- arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc),
- desc);
+ arg->hwirq = pci_msi_domain_calc_hwirq(desc);
}
-#else
-#define pci_msi_domain_set_desc NULL
-#endif
static struct msi_domain_ops pci_msi_domain_ops_default = {
.set_desc = pci_msi_domain_set_desc,
@@ -1558,4 +1554,26 @@ struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
DOMAIN_BUS_PCI_MSI);
return dom;
}
+
+/**
+ * pci_dev_has_special_msi_domain - Check whether the device is handled by
+ * a non-standard PCI-MSI domain
+ * @pdev: The PCI device to check.
+ *
+ * Returns: True if the device irqdomain or the bus irqdomain is
+ * non-standard PCI/MSI.
+ */
+bool pci_dev_has_special_msi_domain(struct pci_dev *pdev)
+{
+ struct irq_domain *dom = dev_get_msi_domain(&pdev->dev);
+
+ if (!dom)
+ dom = dev_get_msi_domain(&pdev->bus->dev);
+
+ if (!dom)
+ return true;
+
+ return dom->bus_token != DOMAIN_BUS_PCI_MSI;
+}
+
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index f357f9a32b3a..de1c331dbed4 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -53,7 +53,7 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
if (pdev->p2pdma->pool)
size = gen_pool_size(pdev->p2pdma->pool);
- return snprintf(buf, PAGE_SIZE, "%zd\n", size);
+ return scnprintf(buf, PAGE_SIZE, "%zd\n", size);
}
static DEVICE_ATTR_RO(size);
@@ -66,7 +66,7 @@ static ssize_t available_show(struct device *dev, struct device_attribute *attr,
if (pdev->p2pdma->pool)
avail = gen_pool_avail(pdev->p2pdma->pool);
- return snprintf(buf, PAGE_SIZE, "%zd\n", avail);
+ return scnprintf(buf, PAGE_SIZE, "%zd\n", avail);
}
static DEVICE_ATTR_RO(available);
@@ -75,8 +75,8 @@ static ssize_t published_show(struct device *dev, struct device_attribute *attr,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- pdev->p2pdma->p2pmem_published);
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ pdev->p2pdma->p2pmem_published);
}
static DEVICE_ATTR_RO(published);
@@ -185,9 +185,9 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
return -ENOMEM;
pgmap = &p2p_pgmap->pgmap;
- pgmap->res.start = pci_resource_start(pdev, bar) + offset;
- pgmap->res.end = pgmap->res.start + size - 1;
- pgmap->res.flags = pci_resource_flags(pdev, bar);
+ pgmap->range.start = pci_resource_start(pdev, bar) + offset;
+ pgmap->range.end = pgmap->range.start + size - 1;
+ pgmap->nr_range = 1;
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
p2p_pgmap->provider = pdev;
@@ -202,13 +202,13 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
pci_bus_address(pdev, bar) + offset,
- resource_size(&pgmap->res), dev_to_node(&pdev->dev),
+ range_len(&pgmap->range), dev_to_node(&pdev->dev),
pgmap->ref);
if (error)
goto pages_free;
- pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
- &pgmap->res);
+ pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n",
+ pgmap->range.start, pgmap->range.end);
return 0;
@@ -762,7 +762,7 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
struct scatterlist *sg;
void *addr;
- sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ sg = kmalloc(sizeof(*sg), GFP_KERNEL);
if (!sg)
return NULL;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index d5869a03f748..bf03648c2072 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -944,6 +944,16 @@ static bool acpi_pci_bridge_d3(struct pci_dev *dev)
if (!dev->is_hotplug_bridge)
return false;
+ /* Assume D3 support if the bridge is power-manageable by ACPI. */
+ adev = ACPI_COMPANION(&dev->dev);
+ if (!adev && !pci_dev_is_added(dev)) {
+ adev = acpi_pci_find_companion(&dev->dev);
+ ACPI_COMPANION_SET(&dev->dev, adev);
+ }
+
+ if (adev && acpi_device_power_manageable(adev))
+ return true;
+
/*
* Look for a special _DSD property for the root port and if it
* is set we know the hierarchy behind it supports D3 just fine.
@@ -1167,7 +1177,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
* @pdev: the PCI device whose delay is to be updated
* @handle: ACPI handle of this device
*
- * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM
+ * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM
* control method of either the device itself or the PCI host bridge.
*
* Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
@@ -1206,8 +1216,8 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev,
}
if (elements[3].type == ACPI_TYPE_INTEGER) {
value = (int)elements[3].integer.value / 1000;
- if (value < PCI_PM_D3_WAIT)
- pdev->d3_delay = value;
+ if (value < PCI_PM_D3HOT_WAIT)
+ pdev->d3hot_delay = value;
}
}
ACPI_FREE(obj);
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index ccf26d12ec61..139869d50eb2 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -294,6 +294,7 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
return 0;
}
+EXPORT_SYMBOL_GPL(pci_bridge_emul_init);
/*
* Cleanup a pci_bridge_emul structure that was previously initialized
@@ -305,6 +306,7 @@ void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge)
kfree(bridge->pcie_cap_regs_behavior);
kfree(bridge->pci_regs_behavior);
}
+EXPORT_SYMBOL_GPL(pci_bridge_emul_cleanup);
/*
* Should be called by the PCI controller driver when reading the PCI
@@ -366,6 +368,7 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
return PCIBIOS_SUCCESSFUL;
}
+EXPORT_SYMBOL_GPL(pci_bridge_emul_conf_read);
/*
* Should be called by the PCI controller driver when writing the PCI
@@ -430,3 +433,4 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
return PCIBIOS_SUCCESSFUL;
}
+EXPORT_SYMBOL_GPL(pci_bridge_emul_conf_write);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 449466f71040..8b587fc97f7b 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -19,6 +19,7 @@
#include <linux/kexec.h>
#include <linux/of_device.h>
#include <linux/acpi.h>
+#include <linux/dma-map-ops.h>
#include "pci.h"
#include "pcie/portdrv.h"
@@ -969,12 +970,6 @@ static int pci_pm_resume(struct device *dev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
-/*
- * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
- * a hibernate transition
- */
-struct dev_pm_ops __weak pcibios_pm_ops;
-
static int pci_pm_freeze(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -1033,9 +1028,6 @@ static int pci_pm_freeze_noirq(struct device *dev)
pci_pm_set_unknown_state(pci_dev);
- if (pcibios_pm_ops.freeze_noirq)
- return pcibios_pm_ops.freeze_noirq(dev);
-
return 0;
}
@@ -1043,13 +1035,6 @@ static int pci_pm_thaw_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int error;
-
- if (pcibios_pm_ops.thaw_noirq) {
- error = pcibios_pm_ops.thaw_noirq(dev);
- if (error)
- return error;
- }
/*
* The pm->thaw_noirq() callback assumes the device has been
@@ -1174,9 +1159,6 @@ static int pci_pm_poweroff_noirq(struct device *dev)
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
- if (pcibios_pm_ops.poweroff_noirq)
- return pcibios_pm_ops.poweroff_noirq(dev);
-
return 0;
}
@@ -1184,13 +1166,6 @@ static int pci_pm_restore_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int error;
-
- if (pcibios_pm_ops.restore_noirq) {
- error = pcibios_pm_ops.restore_noirq(dev);
- if (error)
- return error;
- }
pci_pm_default_resume_early(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c
index a0b2bd6c918a..45855a5e9fca 100644
--- a/drivers/pci/pci-pf-stub.c
+++ b/drivers/pci/pci-pf-stub.c
@@ -37,18 +37,6 @@ static struct pci_driver pf_stub_driver = {
.probe = pci_pf_stub_probe,
.sriov_configure = pci_sriov_configure_simple,
};
-
-static int __init pci_pf_stub_init(void)
-{
- return pci_register_driver(&pf_stub_driver);
-}
-
-static void __exit pci_pf_stub_exit(void)
-{
- pci_unregister_driver(&pf_stub_driver);
-}
-
-module_init(pci_pf_stub_init);
-module_exit(pci_pf_stub_exit);
+module_pci_driver(pf_stub_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 6d78df981d41..d15c881e2e7e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -574,7 +574,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ len = scnprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
device_unlock(dev);
return len;
}
@@ -708,6 +708,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
data[off - init_off + 3] = (val >> 24) & 0xff;
off += 4;
size -= 4;
+ cond_resched();
}
if (size >= 2) {
@@ -1196,10 +1197,10 @@ static int pci_create_resource_files(struct pci_dev *pdev)
}
return 0;
}
-#else /* !HAVE_PCI_MMAP */
+#else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
-#endif /* HAVE_PCI_MMAP */
+#endif
/**
* pci_write_rom - used to enable access to the PCI ROM display
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e39c5499770f..e578d34095e9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of.h>
-#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -30,8 +29,6 @@
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
#include <linux/vmalloc.h>
-#include <linux/pci-ats.h>
-#include <asm/setup.h>
#include <asm/dma.h>
#include <linux/aer.h>
#include "pci.h"
@@ -49,7 +46,7 @@ EXPORT_SYMBOL(isa_dma_bridge_buggy);
int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);
-unsigned int pci_pm_d3_delay;
+unsigned int pci_pm_d3hot_delay;
static void pci_pme_list_scan(struct work_struct *work);
@@ -66,10 +63,10 @@ struct pci_pme_device {
static void pci_dev_d3_sleep(struct pci_dev *dev)
{
- unsigned int delay = dev->d3_delay;
+ unsigned int delay = dev->d3hot_delay;
- if (delay < pci_pm_d3_delay)
- delay = pci_pm_d3_delay;
+ if (delay < pci_pm_d3hot_delay)
+ delay = pci_pm_d3hot_delay;
if (delay)
msleep(delay);
@@ -101,7 +98,19 @@ unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
#define DEFAULT_HOTPLUG_BUS_SIZE 1
unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
+
+/* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
+#ifdef CONFIG_PCIE_BUS_TUNE_OFF
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
+#elif defined CONFIG_PCIE_BUS_SAFE
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
+#elif defined CONFIG_PCIE_BUS_PERFORMANCE
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
+#elif defined CONFIG_PCIE_BUS_PEER2PEER
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
+#else
enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
+#endif
/*
* The default CLS is used if arch didn't set CLS explicitly and not
@@ -876,6 +885,10 @@ static void pci_std_enable_acs(struct pci_dev *dev)
/* Upstream Forwarding */
ctrl |= (cap & PCI_ACS_UF);
+ /* Enable Translation Blocking for external devices */
+ if (dev->external_facing || dev->untrusted)
+ ctrl |= (cap & PCI_ACS_TB);
+
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
}
@@ -1065,7 +1078,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
- msleep(PCI_PM_D2_DELAY);
+ udelay(PCI_PM_D2_DELAY);
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
@@ -3013,7 +3026,7 @@ void pci_pm_init(struct pci_dev *dev)
}
dev->pm_cap = pm;
- dev->d3_delay = PCI_PM_D3_WAIT;
+ dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
dev->bridge_d3 = pci_bridge_d3_possible(dev);
dev->d3cold_allowed = true;
@@ -3038,7 +3051,7 @@ void pci_pm_init(struct pci_dev *dev)
(pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
(pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
- (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
+ (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
(pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
dev->pme_poll = true;
@@ -3503,8 +3516,13 @@ void pci_acs_init(struct pci_dev *dev)
{
dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
- if (dev->acs_cap)
- pci_enable_acs(dev);
+ /*
+ * Attempt to enable ACS regardless of capability because some Root
+ * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
+ * the standard ACS capability but still support ACS via those
+ * quirks.
+ */
+ pci_enable_acs(dev);
}
/**
@@ -4621,7 +4639,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
*
* NOTE: This causes the caller to sleep for twice the device power transition
* cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
- * by default (i.e. unless the @dev's d3_delay field has a different value).
+ * by default (i.e. unless the @dev's d3hot_delay field has a different value).
* Moreover, only devices in D0 can be reset by this function.
*/
static int pci_pm_reset(struct pci_dev *dev, int probe)
@@ -4701,9 +4719,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
}
if (active && ret)
msleep(delay);
- else if (ret != active)
- pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
- active ? "set" : "cleared");
+
return ret == active;
}
@@ -4828,6 +4844,7 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
delay);
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
+ pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
return;
}
}
@@ -4920,16 +4937,10 @@ static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
{
- struct pci_dev *pdev;
-
- if (dev->subordinate || !dev->slot ||
+ if (dev->multifunction || dev->subordinate || !dev->slot ||
dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
return -ENOTTY;
- list_for_each_entry(pdev, &dev->bus->devices, bus_list)
- if (pdev != dev && pdev->slot == dev->slot)
- return -ENOTTY;
-
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}
@@ -6005,7 +6016,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
- if (decode == true)
+ if (decode)
cmd |= command_bits;
else
cmd &= ~command_bits;
@@ -6021,7 +6032,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
if (bridge) {
pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
&cmd);
- if (decode == true)
+ if (decode)
cmd |= PCI_BRIDGE_CTL_VGA;
else
cmd &= ~PCI_BRIDGE_CTL_VGA;
@@ -6350,7 +6361,7 @@ static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
spin_lock(&resource_alignment_lock);
if (resource_alignment_param)
- count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
+ count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
spin_unlock(&resource_alignment_lock);
/*
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fa12f7cbc1a0..f86cae9aa1f4 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -43,10 +43,9 @@ int pci_probe_reset_function(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
int pci_bus_error_reset(struct pci_dev *dev);
-#define PCI_PM_D2_DELAY 200
-#define PCI_PM_D3_WAIT 10
-#define PCI_PM_D3COLD_WAIT 100
-#define PCI_PM_BUS_WAIT 50
+#define PCI_PM_D2_DELAY 200 /* usec; see PCIe r4.0, sec 5.9.1 */
+#define PCI_PM_D3HOT_WAIT 10 /* msec */
+#define PCI_PM_D3COLD_WAIT 100 /* msec */
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
@@ -178,7 +177,7 @@ extern struct mutex pci_slot_mutex;
extern raw_spinlock_t pci_lock;
-extern unsigned int pci_pm_d3_delay;
+extern unsigned int pci_pm_d3hot_delay;
#ifdef CONFIG_PCI_MSI
void pci_no_msi(void);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 253c30cc1967..ac0557a305af 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -74,14 +74,6 @@ struct pcie_link_state {
* has one slot under it, so at most there are 8 functions.
*/
struct aspm_latency acceptable[8];
-
- /* L1 PM Substate info */
- struct {
- u32 up_cap_ptr; /* L1SS cap ptr in upstream dev */
- u32 dw_cap_ptr; /* L1SS cap ptr in downstream dev */
- u32 ctl1; /* value to be programmed in ctl1 */
- u32 ctl2; /* value to be programmed in ctl2 */
- } l1ss;
};
static int aspm_disabled, aspm_force;
@@ -308,8 +300,10 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
}
/* Convert L0s latency encoding to ns */
-static u32 calc_l0s_latency(u32 encoding)
+static u32 calc_l0s_latency(u32 lnkcap)
{
+ u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
+
if (encoding == 0x7)
return (5 * 1000); /* > 4us */
return (64 << encoding);
@@ -324,8 +318,10 @@ static u32 calc_l0s_acceptable(u32 encoding)
}
/* Convert L1 latency encoding to ns */
-static u32 calc_l1_latency(u32 encoding)
+static u32 calc_l1_latency(u32 lnkcap)
{
+ u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
+
if (encoding == 0x7)
return (65 * 1000); /* > 64us */
return (1000 << encoding);
@@ -380,58 +376,6 @@ static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
}
}
-struct aspm_register_info {
- u32 support:2;
- u32 enabled:2;
- u32 latency_encoding_l0s;
- u32 latency_encoding_l1;
-
- /* L1 substates */
- u32 l1ss_cap_ptr;
- u32 l1ss_cap;
- u32 l1ss_ctl1;
- u32 l1ss_ctl2;
-};
-
-static void pcie_get_aspm_reg(struct pci_dev *pdev,
- struct aspm_register_info *info)
-{
- u16 reg16;
- u32 reg32;
-
- pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &reg32);
- info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
- info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &reg16);
- info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
-
- /* Read L1 PM substate capabilities */
- info->l1ss_cap = info->l1ss_ctl1 = info->l1ss_ctl2 = 0;
- info->l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!info->l1ss_cap_ptr)
- return;
- pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CAP,
- &info->l1ss_cap);
- if (!(info->l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) {
- info->l1ss_cap = 0;
- return;
- }
-
- /*
- * If we don't have LTR for the entire path from the Root Complex
- * to this device, we can't use ASPM L1.2 because it relies on the
- * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
- */
- if (!pdev->ltr_path)
- info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
-
- pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1,
- &info->l1ss_ctl1);
- pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2,
- &info->l1ss_ctl2);
-}
-
static void pcie_aspm_check_latency(struct pci_dev *endpoint)
{
u32 latency, l1_switch_latency = 0;
@@ -493,39 +437,49 @@ static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
return NULL;
}
+static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
+ u32 clear, u32 set)
+{
+ u32 val;
+
+ pci_read_config_dword(pdev, pos, &val);
+ val &= ~clear;
+ val |= set;
+ pci_write_config_dword(pdev, pos, val);
+}
+
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l1ss_info(struct pcie_link_state *link,
- struct aspm_register_info *upreg,
- struct aspm_register_info *dwreg)
+ u32 parent_l1ss_cap, u32 child_l1ss_cap)
{
+ struct pci_dev *child = link->downstream, *parent = link->pdev;
u32 val1, val2, scale1, scale2;
u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
-
- link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr;
- link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr;
- link->l1ss.ctl1 = link->l1ss.ctl2 = 0;
+ u32 ctl1 = 0, ctl2 = 0;
+ u32 pctl1, pctl2, cctl1, cctl2;
+ u32 pl1_2_enables, cl1_2_enables;
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
/* Choose the greater of the two Port Common_Mode_Restore_Times */
- val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
- val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
+ val1 = (parent_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
+ val2 = (child_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
t_common_mode = max(val1, val2);
/* Choose the greater of the two Port T_POWER_ON times */
- val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
- scale1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
- val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
- scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
-
- if (calc_l1ss_pwron(link->pdev, scale1, val1) >
- calc_l1ss_pwron(link->downstream, scale2, val2)) {
- link->l1ss.ctl2 |= scale1 | (val1 << 3);
- t_power_on = calc_l1ss_pwron(link->pdev, scale1, val1);
+ val1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
+ scale1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
+ val2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
+ scale2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
+
+ if (calc_l1ss_pwron(parent, scale1, val1) >
+ calc_l1ss_pwron(child, scale2, val2)) {
+ ctl2 |= scale1 | (val1 << 3);
+ t_power_on = calc_l1ss_pwron(parent, scale1, val1);
} else {
- link->l1ss.ctl2 |= scale2 | (val2 << 3);
- t_power_on = calc_l1ss_pwron(link->downstream, scale2, val2);
+ ctl2 |= scale2 | (val2 << 3);
+ t_power_on = calc_l1ss_pwron(child, scale2, val2);
}
/*
@@ -540,14 +494,60 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
*/
l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
encode_l12_threshold(l1_2_threshold, &scale, &value);
- link->l1ss.ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
+ ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
+
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2);
+
+ if (ctl1 == pctl1 && ctl1 == cctl1 &&
+ ctl2 == pctl2 && ctl2 == cctl2)
+ return;
+
+ /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
+ pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ }
+
+ /* Program T_POWER_ON times in both ports */
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
+ pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
+
+ /* Program Common_Mode_Restore_Time in upstream device */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+
+ /* Program LTR_L1.2_THRESHOLD time in both ports */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
+ pl1_2_enables);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
+ cl1_2_enables);
+ }
}
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
struct pci_dev *child = link->downstream, *parent = link->pdev;
+ u32 parent_lnkcap, child_lnkcap;
+ u16 parent_lnkctl, child_lnkctl;
+ u32 parent_l1ss_cap, child_l1ss_cap;
+ u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
struct pci_bus *linkbus = parent->subordinate;
- struct aspm_register_info upreg, dwreg;
if (blacklist) {
/* Set enabled/disable so that we will disable ASPM later */
@@ -556,26 +556,28 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
return;
}
- /* Get upstream/downstream components' register state */
- pcie_get_aspm_reg(parent, &upreg);
- pcie_get_aspm_reg(child, &dwreg);
-
/*
* If ASPM not supported, don't mess with the clocks and link,
* bail out now.
*/
- if (!(upreg.support & dwreg.support))
+ pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
+ pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
+ if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
return;
/* Configure common clock before checking latencies */
pcie_aspm_configure_common_clock(link);
/*
- * Re-read upstream/downstream components' register state
- * after clock configuration
+ * Re-read upstream/downstream components' register state after
+ * clock configuration. L0s & L1 exit latencies in the otherwise
+ * read-only Link Capabilities may change depending on common clock
+ * configuration (PCIe r5.0, sec 7.5.3.6).
*/
- pcie_get_aspm_reg(parent, &upreg);
- pcie_get_aspm_reg(child, &dwreg);
+ pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
+ pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
+ pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
/*
* Setup L0s state
@@ -584,44 +586,71 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* given link unless components on both sides of the link each
* support L0s.
*/
- if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S)
+ if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
link->aspm_support |= ASPM_STATE_L0S;
- if (dwreg.enabled & PCIE_LINK_STATE_L0S)
+
+ if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
link->aspm_enabled |= ASPM_STATE_L0S_UP;
- if (upreg.enabled & PCIE_LINK_STATE_L0S)
+ if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
link->aspm_enabled |= ASPM_STATE_L0S_DW;
- link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s);
- link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s);
+ link->latency_up.l0s = calc_l0s_latency(parent_lnkcap);
+ link->latency_dw.l0s = calc_l0s_latency(child_lnkcap);
/* Setup L1 state */
- if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1)
+ if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
link->aspm_support |= ASPM_STATE_L1;
- if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1)
+
+ if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
link->aspm_enabled |= ASPM_STATE_L1;
- link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1);
- link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1);
+ link->latency_up.l1 = calc_l1_latency(parent_lnkcap);
+ link->latency_dw.l1 = calc_l1_latency(child_lnkcap);
/* Setup L1 substate */
- if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
+ &parent_l1ss_cap);
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
+ &child_l1ss_cap);
+
+ if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
+ parent_l1ss_cap = 0;
+ if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
+ child_l1ss_cap = 0;
+
+ /*
+ * If we don't have LTR for the entire path from the Root Complex
+ * to this device, we can't use ASPM L1.2 because it relies on the
+ * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
+ */
+ if (!child->ltr_path)
+ child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
+
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
link->aspm_support |= ASPM_STATE_L1_1;
- if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
link->aspm_support |= ASPM_STATE_L1_2;
- if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
- if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
- if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
+ if (parent_l1ss_cap)
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ &parent_l1ss_ctl1);
+ if (child_l1ss_cap)
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ &child_l1ss_ctl1);
+
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
link->aspm_enabled |= ASPM_STATE_L1_1;
- if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
link->aspm_enabled |= ASPM_STATE_L1_2;
- if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
- if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
if (link->aspm_support & ASPM_STATE_L1SS)
- aspm_calc_l1ss_info(link, &upreg, &dwreg);
+ aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap);
/* Save default state */
link->aspm_default = link->aspm_enabled;
@@ -651,24 +680,11 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
}
}
-static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
- u32 clear, u32 set)
-{
- u32 val;
-
- pci_read_config_dword(pdev, pos, &val);
- val &= ~clear;
- val |= set;
- pci_write_config_dword(pdev, pos, val);
-}
-
/* Configure the ASPM L1 substates */
static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
{
u32 val, enable_req;
struct pci_dev *child = link->downstream, *parent = link->pdev;
- u32 up_cap_ptr = link->l1ss.up_cap_ptr;
- u32 dw_cap_ptr = link->l1ss.dw_cap_ptr;
enable_req = (link->aspm_enabled ^ state) & state;
@@ -686,9 +702,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
*/
/* Disable all L1 substates */
- pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, 0);
- pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, 0);
/*
* If needed, disable L1, and it gets enabled later
@@ -701,30 +717,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
PCI_EXP_LNKCTL_ASPM_L1, 0);
}
- if (enable_req & ASPM_STATE_L1_2_MASK) {
-
- /* Program T_POWER_ON times in both ports */
- pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2,
- link->l1ss.ctl2);
- pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2,
- link->l1ss.ctl2);
-
- /* Program Common_Mode_Restore_Time in upstream device */
- pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_CM_RESTORE_TIME,
- link->l1ss.ctl1);
-
- /* Program LTR_L1.2_THRESHOLD time in both ports */
- pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
- link->l1ss.ctl1);
- pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
- link->l1ss.ctl1);
- }
-
val = 0;
if (state & ASPM_STATE_L1_1)
val |= PCI_L1SS_CTL1_ASPM_L1_1;
@@ -736,9 +728,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
val |= PCI_L1SS_CTL1_PCIPM_L1_2;
/* Enable what we need to enable */
- pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, val);
- pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, val);
}
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
index 77e685771487..565d23cccb8b 100644
--- a/drivers/pci/pcie/bw_notification.c
+++ b/drivers/pci/pcie/bw_notification.c
@@ -14,6 +14,8 @@
* and warns when links become degraded in operation.
*/
+#define dev_fmt(fmt) "bw_notification: " fmt
+
#include "../pci.h"
#include "portdrv.h"
@@ -97,6 +99,7 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
return ret;
pcie_enable_link_bandwidth_notification(srv->port);
+ pci_info(srv->port, "enabled with IRQ %d\n", srv->irq);
return 0;
}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index daa9a4153776..e05aba86a317 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -103,7 +103,8 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
* Wait until the Link is inactive, then clear DPC Trigger Status
* to allow the Port to leave DPC.
*/
- pcie_wait_for_link(pdev, false);
+ if (!pcie_wait_for_link(pdev, false))
+ pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev))
return PCI_ERS_RESULT_DISCONNECT;
@@ -111,8 +112,10 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
- if (!pcie_wait_for_link(pdev, true))
+ if (!pcie_wait_for_link(pdev, true)) {
+ pci_info(pdev, "Data Link Layer Link Active not set in 1000 msec\n");
return PCI_ERS_RESULT_DISCONNECT;
+ }
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 03d37128a24f..4289030b0fff 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -941,6 +941,12 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
pcibios_add_bus(bus);
+ if (bus->ops->add_bus) {
+ err = bus->ops->add_bus(bus);
+ if (WARN_ON(err < 0))
+ dev_err(&bus->dev, "failed to add bus: %d\n", err);
+ }
+
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(bus);
@@ -1036,6 +1042,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
struct pci_bus *child;
+ struct pci_host_bridge *host;
int i;
int ret;
@@ -1045,11 +1052,16 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
return NULL;
child->parent = parent;
- child->ops = parent->ops;
child->msi = parent->msi;
child->sysdata = parent->sysdata;
child->bus_flags = parent->bus_flags;
+ host = pci_find_host_bridge(parent);
+ if (host->child_ops)
+ child->ops = host->child_ops;
+ else
+ child->ops = parent->ops;
+
/*
* Initialize some portions of the bus device, but don't register
* it now as the parent is not properly set up yet.
@@ -2106,6 +2118,9 @@ static void pci_configure_ltr(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return;
+ /* Read L1 PM substate capabilities */
+ dev->l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
+
pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
if (!(cap & PCI_EXP_DEVCAP2_LTR))
return;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2a589b6d6ed8..f70692ac79c5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1846,7 +1846,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pci
*/
static void quirk_intel_pcie_pm(struct pci_dev *dev)
{
- pci_pm_d3_delay = 120;
+ pci_pm_d3hot_delay = 120;
dev->no_d1d2 = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
@@ -1873,12 +1873,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
{
- if (dev->d3_delay >= delay)
+ if (dev->d3hot_delay >= delay)
return;
- dev->d3_delay = delay;
+ dev->d3hot_delay = delay;
pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
- dev->d3_delay);
+ dev->d3hot_delay);
}
static void quirk_radeon_pm(struct pci_dev *dev)
@@ -3387,36 +3387,36 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
* PCI devices which are on Intel chips can skip the 10ms delay
* before entering D3 mode.
*/
-static void quirk_remove_d3_delay(struct pci_dev *dev)
-{
- dev->d3_delay = 0;
-}
-/* C600 Series devices do not need 10ms d3_delay */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
-/* Lynxpoint-H PCH devices do not need 10ms d3_delay */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
-/* Intel Cherrytrail devices do not need 10ms d3_delay */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
+static void quirk_remove_d3hot_delay(struct pci_dev *dev)
+{
+ dev->d3hot_delay = 0;
+}
+/* C600 Series devices do not need 10ms d3hot_delay */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3hot_delay);
+/* Lynxpoint-H PCH devices do not need 10ms d3hot_delay */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3hot_delay);
+/* Intel Cherrytrail devices do not need 10ms d3hot_delay */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3hot_delay);
/*
* Some devices may pass our check in pci_intx_mask_supported() if
@@ -3673,63 +3673,6 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_apple_poweroff_thunderbolt);
-
-/*
- * Apple: Wait for the Thunderbolt controller to reestablish PCI tunnels
- *
- * During suspend the Thunderbolt controller is reset and all PCI
- * tunnels are lost. The NHI driver will try to reestablish all tunnels
- * during resume. We have to manually wait for the NHI since there is
- * no parent child relationship between the NHI and the tunneled
- * bridges.
- */
-static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
-{
- struct pci_dev *sibling = NULL;
- struct pci_dev *nhi = NULL;
-
- if (!x86_apple_machine)
- return;
- if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
- return;
-
- /*
- * Find the NHI and confirm that we are a bridge on the Thunderbolt
- * host controller and not on a Thunderbolt endpoint.
- */
- sibling = pci_get_slot(dev->bus, 0x0);
- if (sibling == dev)
- goto out; /* we are the downstream bridge to the NHI */
- if (!sibling || !sibling->subordinate)
- goto out;
- nhi = pci_get_slot(sibling->subordinate, 0x0);
- if (!nhi)
- goto out;
- if (nhi->vendor != PCI_VENDOR_ID_INTEL
- || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
- nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
- nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
- nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
- || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
- goto out;
- pci_info(dev, "quirk: waiting for Thunderbolt to reestablish PCI tunnels...\n");
- device_pm_wait_for_dev(&dev->dev, &nhi->dev);
-out:
- pci_dev_put(nhi);
- pci_dev_put(sibling);
-}
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
- quirk_apple_wait_for_thunderbolt);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
- quirk_apple_wait_for_thunderbolt);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
- quirk_apple_wait_for_thunderbolt);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
- quirk_apple_wait_for_thunderbolt);
#endif
/*
@@ -4949,6 +4892,13 @@ static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
}
}
+/*
+ * Currently this quirk does the equivalent of
+ * PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF
+ *
+ * TODO: This quirk also needs to do equivalent of PCI_ACS_TB,
+ * if dev->external_facing || dev->untrusted
+ */
static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
{
if (!pci_quirk_intel_pch_acs_match(dev))
@@ -4988,6 +4938,9 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
ctrl |= (cap & PCI_ACS_CR);
ctrl |= (cap & PCI_ACS_UF);
+ if (dev->external_facing || dev->untrusted)
+ ctrl |= (cap & PCI_ACS_TB);
+
pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n");
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index c0e85be598c1..c6fe0cfec0f6 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -22,6 +22,7 @@
#include <linux/bitops.h>
#include <linux/time.h>
#include <linux/ktime.h>
+#include <linux/swiotlb.h>
#include <xen/platform_pci.h>
#include <asm/xen/swiotlb-xen.h>
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 09d06b082f8b..72114907c0e4 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -516,7 +516,7 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
p_dev->dev.parent = s->dev.parent;
p_dev->dev.release = pcmcia_release_dev;
/* by default don't allow DMA */
- p_dev->dma_mask = DMA_MASK_NONE;
+ p_dev->dma_mask = 0;
p_dev->dev.dma_mask = &p_dev->dma_mask;
dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
if (!dev_name(&p_dev->dev))
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 7305d57d1890..130327ff0b0e 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -41,6 +41,13 @@ config ARM_CCN
PMU (perf) driver supporting the ARM CCN (Cache Coherent Network)
interconnect.
+config ARM_CMN
+ tristate "Arm CMN-600 PMU support"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ help
+ Support for PMU events monitoring on the Arm CMN-600 Coherent Mesh
+ Network interconnect.
+
config ARM_PMU
depends on ARM || ARM64
bool "ARM PMU framework"
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 2ebb4de17815..5365fd56f88f 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ARM_CCI_PMU) += arm-cci.o
obj-$(CONFIG_ARM_CCN) += arm-ccn.o
+obj-$(CONFIG_ARM_CMN) += arm-cmn.o
obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
new file mode 100644
index 000000000000..a76ff594f3ca
--- /dev/null
+++ b/drivers/perf/arm-cmn.c
@@ -0,0 +1,1641 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2016-2020 Arm Limited
+// CMN-600 Coherent Mesh Network PMU driver
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+
+/* Common register stuff */
+#define CMN_NODE_INFO 0x0000
+#define CMN_NI_NODE_TYPE GENMASK_ULL(15, 0)
+#define CMN_NI_NODE_ID GENMASK_ULL(31, 16)
+#define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32)
+
+#define CMN_NODEID_DEVID(reg) ((reg) & 3)
+#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1)
+#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits)))
+#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1))
+
+#define CMN_CHILD_INFO 0x0080
+#define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
+#define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
+
+#define CMN_CHILD_NODE_ADDR GENMASK(27,0)
+#define CMN_CHILD_NODE_EXTERNAL BIT(31)
+
+#define CMN_ADDR_NODE_PTR GENMASK(27, 14)
+
+#define CMN_NODE_PTR_DEVID(ptr) (((ptr) >> 2) & 3)
+#define CMN_NODE_PTR_PID(ptr) ((ptr) & 1)
+#define CMN_NODE_PTR_X(ptr, bits) ((ptr) >> (6 + (bits)))
+#define CMN_NODE_PTR_Y(ptr, bits) (((ptr) >> 6) & ((1U << (bits)) - 1))
+
+#define CMN_MAX_XPS (8 * 8)
+
+/* The CFG node has one other useful purpose */
+#define CMN_CFGM_PERIPH_ID_2 0x0010
+#define CMN_CFGM_PID2_REVISION GENMASK(7, 4)
+
+/* PMU registers occupy the 3rd 4KB page of each node's 16KB space */
+#define CMN_PMU_OFFSET 0x2000
+
+/* For most nodes, this is all there is */
+#define CMN_PMU_EVENT_SEL 0x000
+#define CMN_PMU_EVENTn_ID_SHIFT(n) ((n) * 8)
+
+/* DTMs live in the PMU space of XP registers */
+#define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18)
+#define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00)
+#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(6)
+#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(5)
+#define CMN_DTM_WPn_CONFIG_WP_GRP BIT(4)
+#define CMN_DTM_WPn_CONFIG_WP_CHN_SEL GENMASK_ULL(3, 1)
+#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL BIT(0)
+#define CMN_DTM_WPn_VAL(n) (CMN_DTM_WPn(n) + 0x08)
+#define CMN_DTM_WPn_MASK(n) (CMN_DTM_WPn(n) + 0x10)
+
+#define CMN_DTM_PMU_CONFIG 0x210
+#define CMN__PMEVCNT0_INPUT_SEL GENMASK_ULL(37, 32)
+#define CMN__PMEVCNT0_INPUT_SEL_WP 0x00
+#define CMN__PMEVCNT0_INPUT_SEL_XP 0x04
+#define CMN__PMEVCNT0_INPUT_SEL_DEV 0x10
+#define CMN__PMEVCNT0_GLOBAL_NUM GENMASK_ULL(18, 16)
+#define CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(n) ((n) * 4)
+#define CMN__PMEVCNT_PAIRED(n) BIT(4 + (n))
+#define CMN__PMEVCNT23_COMBINED BIT(2)
+#define CMN__PMEVCNT01_COMBINED BIT(1)
+#define CMN_DTM_PMU_CONFIG_PMU_EN BIT(0)
+
+#define CMN_DTM_PMEVCNT 0x220
+
+#define CMN_DTM_PMEVCNTSR 0x240
+
+#define CMN_DTM_NUM_COUNTERS 4
+
+/* The DTC node is where the magic happens */
+#define CMN_DT_DTC_CTL 0x0a00
+#define CMN_DT_DTC_CTL_DT_EN BIT(0)
+
+/* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */
+#define _CMN_DT_CNT_REG(n) ((((n) / 2) * 4 + (n) % 2) * 4)
+#define CMN_DT_PMEVCNT(n) (CMN_PMU_OFFSET + _CMN_DT_CNT_REG(n))
+#define CMN_DT_PMCCNTR (CMN_PMU_OFFSET + 0x40)
+
+#define CMN_DT_PMEVCNTSR(n) (CMN_PMU_OFFSET + 0x50 + _CMN_DT_CNT_REG(n))
+#define CMN_DT_PMCCNTRSR (CMN_PMU_OFFSET + 0x90)
+
+#define CMN_DT_PMCR (CMN_PMU_OFFSET + 0x100)
+#define CMN_DT_PMCR_PMU_EN BIT(0)
+#define CMN_DT_PMCR_CNTR_RST BIT(5)
+#define CMN_DT_PMCR_OVFL_INTR_EN BIT(6)
+
+#define CMN_DT_PMOVSR (CMN_PMU_OFFSET + 0x118)
+#define CMN_DT_PMOVSR_CLR (CMN_PMU_OFFSET + 0x120)
+
+#define CMN_DT_PMSSR (CMN_PMU_OFFSET + 0x128)
+#define CMN_DT_PMSSR_SS_STATUS(n) BIT(n)
+
+#define CMN_DT_PMSRR (CMN_PMU_OFFSET + 0x130)
+#define CMN_DT_PMSRR_SS_REQ BIT(0)
+
+#define CMN_DT_NUM_COUNTERS 8
+#define CMN_MAX_DTCS 4
+
+/*
+ * Even in the worst case a DTC counter can't wrap in fewer than 2^42 cycles,
+ * so throwing away one bit to make overflow handling easy is no big deal.
+ */
+#define CMN_COUNTER_INIT 0x80000000
+/* Similarly for the 40-bit cycle counter */
+#define CMN_CC_INIT 0x8000000000ULL
+
+
+/* Event attributes */
+#define CMN_CONFIG_TYPE GENMASK(15, 0)
+#define CMN_CONFIG_EVENTID GENMASK(23, 16)
+#define CMN_CONFIG_OCCUPID GENMASK(27, 24)
+#define CMN_CONFIG_BYNODEID BIT(31)
+#define CMN_CONFIG_NODEID GENMASK(47, 32)
+
+#define CMN_EVENT_TYPE(event) FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config)
+#define CMN_EVENT_EVENTID(event) FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config)
+#define CMN_EVENT_OCCUPID(event) FIELD_GET(CMN_CONFIG_OCCUPID, (event)->attr.config)
+#define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
+#define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
+
+#define CMN_CONFIG_WP_COMBINE GENMASK(27, 24)
+#define CMN_CONFIG_WP_DEV_SEL BIT(48)
+#define CMN_CONFIG_WP_CHN_SEL GENMASK(50, 49)
+#define CMN_CONFIG_WP_GRP BIT(52)
+#define CMN_CONFIG_WP_EXCLUSIVE BIT(53)
+#define CMN_CONFIG1_WP_VAL GENMASK(63, 0)
+#define CMN_CONFIG2_WP_MASK GENMASK(63, 0)
+
+#define CMN_EVENT_WP_COMBINE(event) FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config)
+#define CMN_EVENT_WP_DEV_SEL(event) FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config)
+#define CMN_EVENT_WP_CHN_SEL(event) FIELD_GET(CMN_CONFIG_WP_CHN_SEL, (event)->attr.config)
+#define CMN_EVENT_WP_GRP(event) FIELD_GET(CMN_CONFIG_WP_GRP, (event)->attr.config)
+#define CMN_EVENT_WP_EXCLUSIVE(event) FIELD_GET(CMN_CONFIG_WP_EXCLUSIVE, (event)->attr.config)
+#define CMN_EVENT_WP_VAL(event) FIELD_GET(CMN_CONFIG1_WP_VAL, (event)->attr.config1)
+#define CMN_EVENT_WP_MASK(event) FIELD_GET(CMN_CONFIG2_WP_MASK, (event)->attr.config2)
+
+/* Made-up event IDs for watchpoint direction */
+#define CMN_WP_UP 0
+#define CMN_WP_DOWN 2
+
+
+/* r0px probably don't exist in silicon, thankfully */
+enum cmn_revision {
+ CMN600_R1P0,
+ CMN600_R1P1,
+ CMN600_R1P2,
+ CMN600_R1P3,
+ CMN600_R2P0,
+ CMN600_R3P0,
+};
+
+enum cmn_node_type {
+ CMN_TYPE_INVALID,
+ CMN_TYPE_DVM,
+ CMN_TYPE_CFG,
+ CMN_TYPE_DTC,
+ CMN_TYPE_HNI,
+ CMN_TYPE_HNF,
+ CMN_TYPE_XP,
+ CMN_TYPE_SBSX,
+ CMN_TYPE_RNI = 0xa,
+ CMN_TYPE_RND = 0xd,
+ CMN_TYPE_RNSAM = 0xf,
+ CMN_TYPE_CXRA = 0x100,
+ CMN_TYPE_CXHA = 0x101,
+ CMN_TYPE_CXLA = 0x102,
+ /* Not a real node type */
+ CMN_TYPE_WP = 0x7770
+};
+
+struct arm_cmn_node {
+ void __iomem *pmu_base;
+ u16 id, logid;
+ enum cmn_node_type type;
+
+ union {
+ /* Device node */
+ struct {
+ int to_xp;
+ /* DN/HN-F/CXHA */
+ unsigned int occupid_val;
+ unsigned int occupid_count;
+ };
+ /* XP */
+ struct {
+ int dtc;
+ u32 pmu_config_low;
+ union {
+ u8 input_sel[4];
+ __le32 pmu_config_high;
+ };
+ s8 wp_event[4];
+ };
+ };
+
+ union {
+ u8 event[4];
+ __le32 event_sel;
+ };
+};
+
+struct arm_cmn_dtc {
+ void __iomem *base;
+ int irq;
+ int irq_friend;
+ bool cc_active;
+
+ struct perf_event *counters[CMN_DT_NUM_COUNTERS];
+ struct perf_event *cycles;
+};
+
+#define CMN_STATE_DISABLED BIT(0)
+#define CMN_STATE_TXN BIT(1)
+
+struct arm_cmn {
+ struct device *dev;
+ void __iomem *base;
+
+ enum cmn_revision rev;
+ u8 mesh_x;
+ u8 mesh_y;
+ u16 num_xps;
+ u16 num_dns;
+ struct arm_cmn_node *xps;
+ struct arm_cmn_node *dns;
+
+ struct arm_cmn_dtc *dtc;
+ unsigned int num_dtcs;
+
+ int cpu;
+ struct hlist_node cpuhp_node;
+
+ unsigned int state;
+ struct pmu pmu;
+};
+
+#define to_cmn(p) container_of(p, struct arm_cmn, pmu)
+
+static int arm_cmn_hp_state;
+
+struct arm_cmn_hw_event {
+ struct arm_cmn_node *dn;
+ u64 dtm_idx[2];
+ unsigned int dtc_idx;
+ u8 dtcs_used;
+ u8 num_dns;
+};
+
+#define for_each_hw_dn(hw, dn, i) \
+ for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
+
+static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event)
+{
+ BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target));
+ return (struct arm_cmn_hw_event *)&event->hw;
+}
+
+static void arm_cmn_set_index(u64 x[], unsigned int pos, unsigned int val)
+{
+ x[pos / 32] |= (u64)val << ((pos % 32) * 2);
+}
+
+static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos)
+{
+ return (x[pos / 32] >> ((pos % 32) * 2)) & 3;
+}
+
+struct arm_cmn_event_attr {
+ struct device_attribute attr;
+ enum cmn_node_type type;
+ u8 eventid;
+ u8 occupid;
+};
+
+struct arm_cmn_format_attr {
+ struct device_attribute attr;
+ u64 field;
+ int config;
+};
+
+static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
+{
+ return cmn->mesh_x > 4 || cmn->mesh_y > 4 ? 3 : 2;
+}
+
+static void arm_cmn_init_node_to_xp(const struct arm_cmn *cmn,
+ struct arm_cmn_node *dn)
+{
+ int bits = arm_cmn_xyidbits(cmn);
+ int x = CMN_NODEID_X(dn->id, bits);
+ int y = CMN_NODEID_Y(dn->id, bits);
+ int xp_idx = cmn->mesh_x * y + x;
+
+ dn->to_xp = (cmn->xps + xp_idx) - dn;
+}
+
+static struct arm_cmn_node *arm_cmn_node_to_xp(struct arm_cmn_node *dn)
+{
+ return dn->type == CMN_TYPE_XP ? dn : dn + dn->to_xp;
+}
+
+static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
+ enum cmn_node_type type)
+{
+ int i;
+
+ for (i = 0; i < cmn->num_dns; i++)
+ if (cmn->dns[i].type == type)
+ return &cmn->dns[i];
+ return NULL;
+}
+
+#define CMN_EVENT_ATTR(_name, _type, _eventid, _occupid) \
+ (&((struct arm_cmn_event_attr[]) {{ \
+ .attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \
+ .type = _type, \
+ .eventid = _eventid, \
+ .occupid = _occupid, \
+ }})[0].attr.attr)
+
+static bool arm_cmn_is_occup_event(enum cmn_node_type type, unsigned int id)
+{
+ return (type == CMN_TYPE_DVM && id == 0x05) ||
+ (type == CMN_TYPE_HNF && id == 0x0f);
+}
+
+static ssize_t arm_cmn_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_cmn_event_attr *eattr;
+
+ eattr = container_of(attr, typeof(*eattr), attr);
+
+ if (eattr->type == CMN_TYPE_DTC)
+ return snprintf(buf, PAGE_SIZE, "type=0x%x\n", eattr->type);
+
+ if (eattr->type == CMN_TYPE_WP)
+ return snprintf(buf, PAGE_SIZE,
+ "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
+ eattr->type, eattr->eventid);
+
+ if (arm_cmn_is_occup_event(eattr->type, eattr->eventid))
+ return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
+ eattr->type, eattr->eventid, eattr->occupid);
+
+ return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x\n",
+ eattr->type, eattr->eventid);
+}
+
+static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
+ struct arm_cmn_event_attr *eattr;
+ enum cmn_node_type type;
+
+ eattr = container_of(attr, typeof(*eattr), attr.attr);
+ type = eattr->type;
+
+ /* Watchpoints aren't nodes */
+ if (type == CMN_TYPE_WP)
+ type = CMN_TYPE_XP;
+
+ /* Revision-specific differences */
+ if (cmn->rev < CMN600_R1P2) {
+ if (type == CMN_TYPE_HNF && eattr->eventid == 0x1b)
+ return 0;
+ }
+
+ if (!arm_cmn_node(cmn, type))
+ return 0;
+
+ return attr->mode;
+}
+
+#define _CMN_EVENT_DVM(_name, _event, _occup) \
+ CMN_EVENT_ATTR(dn_##_name, CMN_TYPE_DVM, _event, _occup)
+#define CMN_EVENT_DTC(_name) \
+ CMN_EVENT_ATTR(dtc_##_name, CMN_TYPE_DTC, 0, 0)
+#define _CMN_EVENT_HNF(_name, _event, _occup) \
+ CMN_EVENT_ATTR(hnf_##_name, CMN_TYPE_HNF, _event, _occup)
+#define CMN_EVENT_HNI(_name, _event) \
+ CMN_EVENT_ATTR(hni_##_name, CMN_TYPE_HNI, _event, 0)
+#define __CMN_EVENT_XP(_name, _event) \
+ CMN_EVENT_ATTR(mxp_##_name, CMN_TYPE_XP, _event, 0)
+#define CMN_EVENT_SBSX(_name, _event) \
+ CMN_EVENT_ATTR(sbsx_##_name, CMN_TYPE_SBSX, _event, 0)
+#define CMN_EVENT_RNID(_name, _event) \
+ CMN_EVENT_ATTR(rnid_##_name, CMN_TYPE_RNI, _event, 0)
+
+#define CMN_EVENT_DVM(_name, _event) \
+ _CMN_EVENT_DVM(_name, _event, 0)
+#define CMN_EVENT_HNF(_name, _event) \
+ _CMN_EVENT_HNF(_name, _event, 0)
+#define _CMN_EVENT_XP(_name, _event) \
+ __CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \
+ __CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \
+ __CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)), \
+ __CMN_EVENT_XP(s_##_name, (_event) | (3 << 2)), \
+ __CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)), \
+ __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2))
+
+/* Good thing there are only 3 fundamental XP events... */
+#define CMN_EVENT_XP(_name, _event) \
+ _CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)), \
+ _CMN_EVENT_XP(rsp_##_name, (_event) | (1 << 5)), \
+ _CMN_EVENT_XP(snp_##_name, (_event) | (2 << 5)), \
+ _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5))
+
+
+static struct attribute *arm_cmn_event_attrs[] = {
+ CMN_EVENT_DTC(cycles),
+
+ /*
+ * DVM node events conflict with HN-I events in the equivalent PMU
+ * slot, but our lazy short-cut of using the DTM counter index for
+ * the PMU index as well happens to avoid that by construction.
+ */
+ CMN_EVENT_DVM(rxreq_dvmop, 0x01),
+ CMN_EVENT_DVM(rxreq_dvmsync, 0x02),
+ CMN_EVENT_DVM(rxreq_dvmop_vmid_filtered, 0x03),
+ CMN_EVENT_DVM(rxreq_retried, 0x04),
+ _CMN_EVENT_DVM(rxreq_trk_occupancy_all, 0x05, 0),
+ _CMN_EVENT_DVM(rxreq_trk_occupancy_dvmop, 0x05, 1),
+ _CMN_EVENT_DVM(rxreq_trk_occupancy_dvmsync, 0x05, 2),
+
+ CMN_EVENT_HNF(cache_miss, 0x01),
+ CMN_EVENT_HNF(slc_sf_cache_access, 0x02),
+ CMN_EVENT_HNF(cache_fill, 0x03),
+ CMN_EVENT_HNF(pocq_retry, 0x04),
+ CMN_EVENT_HNF(pocq_reqs_recvd, 0x05),
+ CMN_EVENT_HNF(sf_hit, 0x06),
+ CMN_EVENT_HNF(sf_evictions, 0x07),
+ CMN_EVENT_HNF(dir_snoops_sent, 0x08),
+ CMN_EVENT_HNF(brd_snoops_sent, 0x09),
+ CMN_EVENT_HNF(slc_eviction, 0x0a),
+ CMN_EVENT_HNF(slc_fill_invalid_way, 0x0b),
+ CMN_EVENT_HNF(mc_retries, 0x0c),
+ CMN_EVENT_HNF(mc_reqs, 0x0d),
+ CMN_EVENT_HNF(qos_hh_retry, 0x0e),
+ _CMN_EVENT_HNF(qos_pocq_occupancy_all, 0x0f, 0),
+ _CMN_EVENT_HNF(qos_pocq_occupancy_read, 0x0f, 1),
+ _CMN_EVENT_HNF(qos_pocq_occupancy_write, 0x0f, 2),
+ _CMN_EVENT_HNF(qos_pocq_occupancy_atomic, 0x0f, 3),
+ _CMN_EVENT_HNF(qos_pocq_occupancy_stash, 0x0f, 4),
+ CMN_EVENT_HNF(pocq_addrhaz, 0x10),
+ CMN_EVENT_HNF(pocq_atomic_addrhaz, 0x11),
+ CMN_EVENT_HNF(ld_st_swp_adq_full, 0x12),
+ CMN_EVENT_HNF(cmp_adq_full, 0x13),
+ CMN_EVENT_HNF(txdat_stall, 0x14),
+ CMN_EVENT_HNF(txrsp_stall, 0x15),
+ CMN_EVENT_HNF(seq_full, 0x16),
+ CMN_EVENT_HNF(seq_hit, 0x17),
+ CMN_EVENT_HNF(snp_sent, 0x18),
+ CMN_EVENT_HNF(sfbi_dir_snp_sent, 0x19),
+ CMN_EVENT_HNF(sfbi_brd_snp_sent, 0x1a),
+ CMN_EVENT_HNF(snp_sent_untrk, 0x1b),
+ CMN_EVENT_HNF(intv_dirty, 0x1c),
+ CMN_EVENT_HNF(stash_snp_sent, 0x1d),
+ CMN_EVENT_HNF(stash_data_pull, 0x1e),
+ CMN_EVENT_HNF(snp_fwded, 0x1f),
+
+ CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20),
+ CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21),
+ CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl, 0x22),
+ CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl, 0x23),
+ CMN_EVENT_HNI(wdb_occ_cnt_ovfl, 0x24),
+ CMN_EVENT_HNI(rrt_rd_alloc, 0x25),
+ CMN_EVENT_HNI(rrt_wr_alloc, 0x26),
+ CMN_EVENT_HNI(rdt_rd_alloc, 0x27),
+ CMN_EVENT_HNI(rdt_wr_alloc, 0x28),
+ CMN_EVENT_HNI(wdb_alloc, 0x29),
+ CMN_EVENT_HNI(txrsp_retryack, 0x2a),
+ CMN_EVENT_HNI(arvalid_no_arready, 0x2b),
+ CMN_EVENT_HNI(arready_no_arvalid, 0x2c),
+ CMN_EVENT_HNI(awvalid_no_awready, 0x2d),
+ CMN_EVENT_HNI(awready_no_awvalid, 0x2e),
+ CMN_EVENT_HNI(wvalid_no_wready, 0x2f),
+ CMN_EVENT_HNI(txdat_stall, 0x30),
+ CMN_EVENT_HNI(nonpcie_serialization, 0x31),
+ CMN_EVENT_HNI(pcie_serialization, 0x32),
+
+ CMN_EVENT_XP(txflit_valid, 0x01),
+ CMN_EVENT_XP(txflit_stall, 0x02),
+ CMN_EVENT_XP(partial_dat_flit, 0x03),
+ /* We treat watchpoints as a special made-up class of XP events */
+ CMN_EVENT_ATTR(watchpoint_up, CMN_TYPE_WP, 0, 0),
+ CMN_EVENT_ATTR(watchpoint_down, CMN_TYPE_WP, 2, 0),
+
+ CMN_EVENT_SBSX(rd_req, 0x01),
+ CMN_EVENT_SBSX(wr_req, 0x02),
+ CMN_EVENT_SBSX(cmo_req, 0x03),
+ CMN_EVENT_SBSX(txrsp_retryack, 0x04),
+ CMN_EVENT_SBSX(txdat_flitv, 0x05),
+ CMN_EVENT_SBSX(txrsp_flitv, 0x06),
+ CMN_EVENT_SBSX(rd_req_trkr_occ_cnt_ovfl, 0x11),
+ CMN_EVENT_SBSX(wr_req_trkr_occ_cnt_ovfl, 0x12),
+ CMN_EVENT_SBSX(cmo_req_trkr_occ_cnt_ovfl, 0x13),
+ CMN_EVENT_SBSX(wdb_occ_cnt_ovfl, 0x14),
+ CMN_EVENT_SBSX(rd_axi_trkr_occ_cnt_ovfl, 0x15),
+ CMN_EVENT_SBSX(cmo_axi_trkr_occ_cnt_ovfl, 0x16),
+ CMN_EVENT_SBSX(arvalid_no_arready, 0x21),
+ CMN_EVENT_SBSX(awvalid_no_awready, 0x22),
+ CMN_EVENT_SBSX(wvalid_no_wready, 0x23),
+ CMN_EVENT_SBSX(txdat_stall, 0x24),
+ CMN_EVENT_SBSX(txrsp_stall, 0x25),
+
+ CMN_EVENT_RNID(s0_rdata_beats, 0x01),
+ CMN_EVENT_RNID(s1_rdata_beats, 0x02),
+ CMN_EVENT_RNID(s2_rdata_beats, 0x03),
+ CMN_EVENT_RNID(rxdat_flits, 0x04),
+ CMN_EVENT_RNID(txdat_flits, 0x05),
+ CMN_EVENT_RNID(txreq_flits_total, 0x06),
+ CMN_EVENT_RNID(txreq_flits_retried, 0x07),
+ CMN_EVENT_RNID(rrt_occ_ovfl, 0x08),
+ CMN_EVENT_RNID(wrt_occ_ovfl, 0x09),
+ CMN_EVENT_RNID(txreq_flits_replayed, 0x0a),
+ CMN_EVENT_RNID(wrcancel_sent, 0x0b),
+ CMN_EVENT_RNID(s0_wdata_beats, 0x0c),
+ CMN_EVENT_RNID(s1_wdata_beats, 0x0d),
+ CMN_EVENT_RNID(s2_wdata_beats, 0x0e),
+ CMN_EVENT_RNID(rrt_alloc, 0x0f),
+ CMN_EVENT_RNID(wrt_alloc, 0x10),
+ CMN_EVENT_RNID(rdb_unord, 0x11),
+ CMN_EVENT_RNID(rdb_replay, 0x12),
+ CMN_EVENT_RNID(rdb_hybrid, 0x13),
+ CMN_EVENT_RNID(rdb_ord, 0x14),
+
+ NULL
+};
+
+static const struct attribute_group arm_cmn_event_attrs_group = {
+ .name = "events",
+ .attrs = arm_cmn_event_attrs,
+ .is_visible = arm_cmn_event_attr_is_visible,
+};
+
+static ssize_t arm_cmn_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr);
+ int lo = __ffs(fmt->field), hi = __fls(fmt->field);
+
+ if (lo == hi)
+ return snprintf(buf, PAGE_SIZE, "config:%d\n", lo);
+
+ if (!fmt->config)
+ return snprintf(buf, PAGE_SIZE, "config:%d-%d\n", lo, hi);
+
+ return snprintf(buf, PAGE_SIZE, "config%d:%d-%d\n", fmt->config, lo, hi);
+}
+
+#define _CMN_FORMAT_ATTR(_name, _cfg, _fld) \
+ (&((struct arm_cmn_format_attr[]) {{ \
+ .attr = __ATTR(_name, 0444, arm_cmn_format_show, NULL), \
+ .config = _cfg, \
+ .field = _fld, \
+ }})[0].attr.attr)
+#define CMN_FORMAT_ATTR(_name, _fld) _CMN_FORMAT_ATTR(_name, 0, _fld)
+
+static struct attribute *arm_cmn_format_attrs[] = {
+ CMN_FORMAT_ATTR(type, CMN_CONFIG_TYPE),
+ CMN_FORMAT_ATTR(eventid, CMN_CONFIG_EVENTID),
+ CMN_FORMAT_ATTR(occupid, CMN_CONFIG_OCCUPID),
+ CMN_FORMAT_ATTR(bynodeid, CMN_CONFIG_BYNODEID),
+ CMN_FORMAT_ATTR(nodeid, CMN_CONFIG_NODEID),
+
+ CMN_FORMAT_ATTR(wp_dev_sel, CMN_CONFIG_WP_DEV_SEL),
+ CMN_FORMAT_ATTR(wp_chn_sel, CMN_CONFIG_WP_CHN_SEL),
+ CMN_FORMAT_ATTR(wp_grp, CMN_CONFIG_WP_GRP),
+ CMN_FORMAT_ATTR(wp_exclusive, CMN_CONFIG_WP_EXCLUSIVE),
+ CMN_FORMAT_ATTR(wp_combine, CMN_CONFIG_WP_COMBINE),
+
+ _CMN_FORMAT_ATTR(wp_val, 1, CMN_CONFIG1_WP_VAL),
+ _CMN_FORMAT_ATTR(wp_mask, 2, CMN_CONFIG2_WP_MASK),
+
+ NULL
+};
+
+static const struct attribute_group arm_cmn_format_attrs_group = {
+ .name = "format",
+ .attrs = arm_cmn_format_attrs,
+};
+
+static ssize_t arm_cmn_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(cmn->cpu));
+}
+
+static struct device_attribute arm_cmn_cpumask_attr =
+ __ATTR(cpumask, 0444, arm_cmn_cpumask_show, NULL);
+
+static struct attribute *arm_cmn_cpumask_attrs[] = {
+ &arm_cmn_cpumask_attr.attr,
+ NULL,
+};
+
+static struct attribute_group arm_cmn_cpumask_attr_group = {
+ .attrs = arm_cmn_cpumask_attrs,
+};
+
+static const struct attribute_group *arm_cmn_attr_groups[] = {
+ &arm_cmn_event_attrs_group,
+ &arm_cmn_format_attrs_group,
+ &arm_cmn_cpumask_attr_group,
+ NULL
+};
+
+static int arm_cmn_wp_idx(struct perf_event *event)
+{
+ return CMN_EVENT_EVENTID(event) + CMN_EVENT_WP_GRP(event);
+}
+
+static u32 arm_cmn_wp_config(struct perf_event *event)
+{
+ u32 config;
+ u32 dev = CMN_EVENT_WP_DEV_SEL(event);
+ u32 chn = CMN_EVENT_WP_CHN_SEL(event);
+ u32 grp = CMN_EVENT_WP_GRP(event);
+ u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
+ u32 combine = CMN_EVENT_WP_COMBINE(event);
+
+ config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
+ FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
+ FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
+ FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc);
+ if (combine && !grp)
+ config |= CMN_DTM_WPn_CONFIG_WP_COMBINE;
+
+ return config;
+}
+
+static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state)
+{
+ if (!cmn->state)
+ writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR);
+ cmn->state |= state;
+}
+
+static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state)
+{
+ cmn->state &= ~state;
+ if (!cmn->state)
+ writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN,
+ cmn->dtc[0].base + CMN_DT_PMCR);
+}
+
+static void arm_cmn_pmu_enable(struct pmu *pmu)
+{
+ arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_DISABLED);
+}
+
+static void arm_cmn_pmu_disable(struct pmu *pmu)
+{
+ arm_cmn_set_state(to_cmn(pmu), CMN_STATE_DISABLED);
+}
+
+static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw,
+ bool snapshot)
+{
+ struct arm_cmn_node *dn;
+ unsigned int i, offset;
+ u64 count = 0;
+
+ offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT;
+ for_each_hw_dn(hw, dn, i) {
+ struct arm_cmn_node *xp = arm_cmn_node_to_xp(dn);
+ int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
+ u64 reg = readq_relaxed(xp->pmu_base + offset);
+ u16 dtm_count = reg >> (dtm_idx * 16);
+
+ count += dtm_count;
+ }
+ return count;
+}
+
+static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc)
+{
+ u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR);
+
+ writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR);
+ return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1);
+}
+
+static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx)
+{
+ u32 val, pmevcnt = CMN_DT_PMEVCNT(idx);
+
+ val = readl_relaxed(dtc->base + pmevcnt);
+ writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt);
+ return val - CMN_COUNTER_INIT;
+}
+
+static void arm_cmn_init_counter(struct perf_event *event)
+{
+ struct arm_cmn *cmn = to_cmn(event->pmu);
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ unsigned int i, pmevcnt = CMN_DT_PMEVCNT(hw->dtc_idx);
+ u64 count;
+
+ for (i = 0; hw->dtcs_used & (1U << i); i++) {
+ writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + pmevcnt);
+ cmn->dtc[i].counters[hw->dtc_idx] = event;
+ }
+
+ count = arm_cmn_read_dtm(cmn, hw, false);
+ local64_set(&event->hw.prev_count, count);
+}
+
+static void arm_cmn_event_read(struct perf_event *event)
+{
+ struct arm_cmn *cmn = to_cmn(event->pmu);
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ u64 delta, new, prev;
+ unsigned long flags;
+ unsigned int i;
+
+ if (hw->dtc_idx == CMN_DT_NUM_COUNTERS) {
+ i = __ffs(hw->dtcs_used);
+ delta = arm_cmn_read_cc(cmn->dtc + i);
+ local64_add(delta, &event->count);
+ return;
+ }
+ new = arm_cmn_read_dtm(cmn, hw, false);
+ prev = local64_xchg(&event->hw.prev_count, new);
+
+ delta = new - prev;
+
+ local_irq_save(flags);
+ for (i = 0; hw->dtcs_used & (1U << i); i++) {
+ new = arm_cmn_read_counter(cmn->dtc + i, hw->dtc_idx);
+ delta += new << 16;
+ }
+ local_irq_restore(flags);
+ local64_add(delta, &event->count);
+}
+
+static void arm_cmn_event_start(struct perf_event *event, int flags)
+{
+ struct arm_cmn *cmn = to_cmn(event->pmu);
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ struct arm_cmn_node *dn;
+ enum cmn_node_type type = CMN_EVENT_TYPE(event);
+ int i;
+
+ if (type == CMN_TYPE_DTC) {
+ i = __ffs(hw->dtcs_used);
+ writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR);
+ cmn->dtc[i].cc_active = true;
+ } else if (type == CMN_TYPE_WP) {
+ int wp_idx = arm_cmn_wp_idx(event);
+ u64 val = CMN_EVENT_WP_VAL(event);
+ u64 mask = CMN_EVENT_WP_MASK(event);
+
+ for_each_hw_dn(hw, dn, i) {
+ writeq_relaxed(val, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx));
+ writeq_relaxed(mask, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx));
+ }
+ } else for_each_hw_dn(hw, dn, i) {
+ int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
+
+ dn->event[dtm_idx] = CMN_EVENT_EVENTID(event);
+ writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
+ }
+}
+
+static void arm_cmn_event_stop(struct perf_event *event, int flags)
+{
+ struct arm_cmn *cmn = to_cmn(event->pmu);
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ struct arm_cmn_node *dn;
+ enum cmn_node_type type = CMN_EVENT_TYPE(event);
+ int i;
+
+ if (type == CMN_TYPE_DTC) {
+ i = __ffs(hw->dtcs_used);
+ cmn->dtc[i].cc_active = false;
+ } else if (type == CMN_TYPE_WP) {
+ int wp_idx = arm_cmn_wp_idx(event);
+
+ for_each_hw_dn(hw, dn, i) {
+ writeq_relaxed(0, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx));
+ writeq_relaxed(~0ULL, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx));
+ }
+ } else for_each_hw_dn(hw, dn, i) {
+ int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
+
+ dn->event[dtm_idx] = 0;
+ writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
+ }
+
+ arm_cmn_event_read(event);
+}
+
+struct arm_cmn_val {
+ u8 dtm_count[CMN_MAX_XPS];
+ u8 occupid[CMN_MAX_XPS];
+ u8 wp[CMN_MAX_XPS][4];
+ int dtc_count;
+ bool cycles;
+};
+
+static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *event)
+{
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ struct arm_cmn_node *dn;
+ enum cmn_node_type type;
+ int i;
+ u8 occupid;
+
+ if (is_software_event(event))
+ return;
+
+ type = CMN_EVENT_TYPE(event);
+ if (type == CMN_TYPE_DTC) {
+ val->cycles = true;
+ return;
+ }
+
+ val->dtc_count++;
+ if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event)))
+ occupid = CMN_EVENT_OCCUPID(event) + 1;
+ else
+ occupid = 0;
+
+ for_each_hw_dn(hw, dn, i) {
+ int wp_idx, xp = arm_cmn_node_to_xp(dn)->logid;
+
+ val->dtm_count[xp]++;
+ val->occupid[xp] = occupid;
+
+ if (type != CMN_TYPE_WP)
+ continue;
+
+ wp_idx = arm_cmn_wp_idx(event);
+ val->wp[xp][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1;
+ }
+}
+
+static int arm_cmn_validate_group(struct perf_event *event)
+{
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ struct arm_cmn_node *dn;
+ struct perf_event *sibling, *leader = event->group_leader;
+ enum cmn_node_type type;
+ struct arm_cmn_val val;
+ int i;
+ u8 occupid;
+
+ if (leader == event)
+ return 0;
+
+ if (event->pmu != leader->pmu && !is_software_event(leader))
+ return -EINVAL;
+
+ memset(&val, 0, sizeof(val));
+
+ arm_cmn_val_add_event(&val, leader);
+ for_each_sibling_event(sibling, leader)
+ arm_cmn_val_add_event(&val, sibling);
+
+ type = CMN_EVENT_TYPE(event);
+ if (type == CMN_TYPE_DTC)
+ return val.cycles ? -EINVAL : 0;
+
+ if (val.dtc_count == CMN_DT_NUM_COUNTERS)
+ return -EINVAL;
+
+ if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event)))
+ occupid = CMN_EVENT_OCCUPID(event) + 1;
+ else
+ occupid = 0;
+
+ for_each_hw_dn(hw, dn, i) {
+ int wp_idx, wp_cmb, xp = arm_cmn_node_to_xp(dn)->logid;
+
+ if (val.dtm_count[xp] == CMN_DTM_NUM_COUNTERS)
+ return -EINVAL;
+
+ if (occupid && val.occupid[xp] && occupid != val.occupid[xp])
+ return -EINVAL;
+
+ if (type != CMN_TYPE_WP)
+ continue;
+
+ wp_idx = arm_cmn_wp_idx(event);
+ if (val.wp[xp][wp_idx])
+ return -EINVAL;
+
+ wp_cmb = val.wp[xp][wp_idx ^ 1];
+ if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int arm_cmn_event_init(struct perf_event *event)
+{
+ struct arm_cmn *cmn = to_cmn(event->pmu);
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ enum cmn_node_type type;
+ unsigned int i;
+ bool bynodeid;
+ u16 nodeid, eventid;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ event->cpu = cmn->cpu;
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ type = CMN_EVENT_TYPE(event);
+ /* DTC events (i.e. cycles) already have everything they need */
+ if (type == CMN_TYPE_DTC)
+ return 0;
+
+ /* For watchpoints we need the actual XP node here */
+ if (type == CMN_TYPE_WP) {
+ type = CMN_TYPE_XP;
+ /* ...and we need a "real" direction */
+ eventid = CMN_EVENT_EVENTID(event);
+ if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN)
+ return -EINVAL;
+ }
+
+ bynodeid = CMN_EVENT_BYNODEID(event);
+ nodeid = CMN_EVENT_NODEID(event);
+
+ hw->dn = arm_cmn_node(cmn, type);
+ for (i = hw->dn - cmn->dns; i < cmn->num_dns && cmn->dns[i].type == type; i++) {
+ if (!bynodeid) {
+ hw->num_dns++;
+ } else if (cmn->dns[i].id != nodeid) {
+ hw->dn++;
+ } else {
+ hw->num_dns = 1;
+ break;
+ }
+ }
+
+ if (!hw->num_dns) {
+ int bits = arm_cmn_xyidbits(cmn);
+
+ dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n",
+ nodeid, CMN_NODEID_X(nodeid, bits), CMN_NODEID_Y(nodeid, bits),
+ CMN_NODEID_PID(nodeid), CMN_NODEID_DEVID(nodeid), type);
+ return -EINVAL;
+ }
+ /*
+ * By assuming events count in all DTC domains, we cunningly avoid
+ * needing to know anything about how XPs are assigned to domains.
+ */
+ hw->dtcs_used = (1U << cmn->num_dtcs) - 1;
+
+ return arm_cmn_validate_group(event);
+}
+
+static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
+ int i)
+{
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ enum cmn_node_type type = CMN_EVENT_TYPE(event);
+
+ while (i--) {
+ struct arm_cmn_node *xp = arm_cmn_node_to_xp(hw->dn + i);
+ unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
+
+ if (type == CMN_TYPE_WP)
+ hw->dn[i].wp_event[arm_cmn_wp_idx(event)] = -1;
+
+ if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event)))
+ hw->dn[i].occupid_count--;
+
+ xp->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx);
+ writel_relaxed(xp->pmu_config_low, xp->pmu_base + CMN_DTM_PMU_CONFIG);
+ }
+ memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx));
+
+ for (i = 0; hw->dtcs_used & (1U << i); i++)
+ cmn->dtc[i].counters[hw->dtc_idx] = NULL;
+}
+
+static int arm_cmn_event_add(struct perf_event *event, int flags)
+{
+ struct arm_cmn *cmn = to_cmn(event->pmu);
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ struct arm_cmn_dtc *dtc = &cmn->dtc[0];
+ struct arm_cmn_node *dn;
+ enum cmn_node_type type = CMN_EVENT_TYPE(event);
+ unsigned int i, dtc_idx, input_sel;
+
+ if (type == CMN_TYPE_DTC) {
+ i = 0;
+ while (cmn->dtc[i].cycles)
+ if (++i == cmn->num_dtcs)
+ return -ENOSPC;
+
+ cmn->dtc[i].cycles = event;
+ hw->dtc_idx = CMN_DT_NUM_COUNTERS;
+ hw->dtcs_used = 1U << i;
+
+ if (flags & PERF_EF_START)
+ arm_cmn_event_start(event, 0);
+ return 0;
+ }
+
+ /* Grab a free global counter first... */
+ dtc_idx = 0;
+ while (dtc->counters[dtc_idx])
+ if (++dtc_idx == CMN_DT_NUM_COUNTERS)
+ return -ENOSPC;
+
+ hw->dtc_idx = dtc_idx;
+
+ /* ...then the local counters to feed it. */
+ for_each_hw_dn(hw, dn, i) {
+ struct arm_cmn_node *xp = arm_cmn_node_to_xp(dn);
+ unsigned int dtm_idx, shift;
+ u64 reg;
+
+ dtm_idx = 0;
+ while (xp->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx))
+ if (++dtm_idx == CMN_DTM_NUM_COUNTERS)
+ goto free_dtms;
+
+ if (type == CMN_TYPE_XP) {
+ input_sel = CMN__PMEVCNT0_INPUT_SEL_XP + dtm_idx;
+ } else if (type == CMN_TYPE_WP) {
+ int tmp, wp_idx = arm_cmn_wp_idx(event);
+ u32 cfg = arm_cmn_wp_config(event);
+
+ if (dn->wp_event[wp_idx] >= 0)
+ goto free_dtms;
+
+ tmp = dn->wp_event[wp_idx ^ 1];
+ if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) !=
+ CMN_EVENT_WP_COMBINE(dtc->counters[tmp]))
+ goto free_dtms;
+
+ input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx;
+ dn->wp_event[wp_idx] = dtc_idx;
+ writel_relaxed(cfg, dn->pmu_base + CMN_DTM_WPn_CONFIG(wp_idx));
+ } else {
+ unsigned int port = CMN_NODEID_PID(dn->id);
+ unsigned int dev = CMN_NODEID_DEVID(dn->id);
+
+ input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx +
+ (port << 4) + (dev << 2);
+
+ if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) {
+ int occupid = CMN_EVENT_OCCUPID(event);
+
+ if (dn->occupid_count == 0) {
+ dn->occupid_val = occupid;
+ writel_relaxed(occupid,
+ dn->pmu_base + CMN_PMU_EVENT_SEL + 4);
+ } else if (dn->occupid_val != occupid) {
+ goto free_dtms;
+ }
+ dn->occupid_count++;
+ }
+ }
+
+ arm_cmn_set_index(hw->dtm_idx, i, dtm_idx);
+
+ xp->input_sel[dtm_idx] = input_sel;
+ shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx);
+ xp->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift);
+ xp->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift;
+ xp->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx);
+ reg = (u64)le32_to_cpu(xp->pmu_config_high) << 32 | xp->pmu_config_low;
+ writeq_relaxed(reg, xp->pmu_base + CMN_DTM_PMU_CONFIG);
+ }
+
+ /* Go go go! */
+ arm_cmn_init_counter(event);
+
+ if (flags & PERF_EF_START)
+ arm_cmn_event_start(event, 0);
+
+ return 0;
+
+free_dtms:
+ arm_cmn_event_clear(cmn, event, i);
+ return -ENOSPC;
+}
+
+static void arm_cmn_event_del(struct perf_event *event, int flags)
+{
+ struct arm_cmn *cmn = to_cmn(event->pmu);
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+ enum cmn_node_type type = CMN_EVENT_TYPE(event);
+
+ arm_cmn_event_stop(event, PERF_EF_UPDATE);
+
+ if (type == CMN_TYPE_DTC)
+ cmn->dtc[__ffs(hw->dtcs_used)].cycles = NULL;
+ else
+ arm_cmn_event_clear(cmn, event, hw->num_dns);
+}
+
+/*
+ * We stop the PMU for both add and read, to avoid skew across DTM counters.
+ * In theory we could use snapshots to read without stopping, but then it
+ * becomes a lot trickier to deal with overlow and racing against interrupts,
+ * plus it seems they don't work properly on some hardware anyway :(
+ */
+static void arm_cmn_start_txn(struct pmu *pmu, unsigned int flags)
+{
+ arm_cmn_set_state(to_cmn(pmu), CMN_STATE_TXN);
+}
+
+static void arm_cmn_end_txn(struct pmu *pmu)
+{
+ arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_TXN);
+}
+
+static int arm_cmn_commit_txn(struct pmu *pmu)
+{
+ arm_cmn_end_txn(pmu);
+ return 0;
+}
+
+static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct arm_cmn *cmn;
+ unsigned int target;
+
+ cmn = hlist_entry_safe(node, struct arm_cmn, cpuhp_node);
+ if (cpu != cmn->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&cmn->pmu, cpu, target);
+ cmn->cpu = target;
+ return 0;
+}
+
+static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
+{
+ struct arm_cmn_dtc *dtc = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+
+ for (;;) {
+ u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR);
+ u64 delta;
+ int i;
+
+ for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) {
+ if (status & (1U << i)) {
+ ret = IRQ_HANDLED;
+ if (WARN_ON(!dtc->counters[i]))
+ continue;
+ delta = (u64)arm_cmn_read_counter(dtc, i) << 16;
+ local64_add(delta, &dtc->counters[i]->count);
+ }
+ }
+
+ if (status & (1U << CMN_DT_NUM_COUNTERS)) {
+ ret = IRQ_HANDLED;
+ if (dtc->cc_active && !WARN_ON(!dtc->cycles)) {
+ delta = arm_cmn_read_cc(dtc);
+ local64_add(delta, &dtc->cycles->count);
+ }
+ }
+
+ writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR);
+
+ if (!dtc->irq_friend)
+ return ret;
+ dtc += dtc->irq_friend;
+ }
+}
+
+/* We can reasonably accommodate DTCs of the same CMN sharing IRQs */
+static int arm_cmn_init_irqs(struct arm_cmn *cmn)
+{
+ int i, j, irq, err;
+
+ for (i = 0; i < cmn->num_dtcs; i++) {
+ irq = cmn->dtc[i].irq;
+ for (j = i; j--; ) {
+ if (cmn->dtc[j].irq == irq) {
+ cmn->dtc[j].irq_friend = j - i;
+ goto next;
+ }
+ }
+ err = devm_request_irq(cmn->dev, irq, arm_cmn_handle_irq,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(cmn->dev), &cmn->dtc[i]);
+ if (err)
+ return err;
+
+ err = irq_set_affinity_hint(irq, cpumask_of(cmn->cpu));
+ if (err)
+ return err;
+ next:
+ ; /* isn't C great? */
+ }
+ return 0;
+}
+
+static void arm_cmn_init_dtm(struct arm_cmn_node *xp)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ xp->wp_event[i] = -1;
+ writeq_relaxed(0, xp->pmu_base + CMN_DTM_WPn_MASK(i));
+ writeq_relaxed(~0ULL, xp->pmu_base + CMN_DTM_WPn_VAL(i));
+ }
+ xp->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN;
+ xp->dtc = -1;
+}
+
+static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx)
+{
+ struct arm_cmn_dtc *dtc = cmn->dtc + idx;
+ struct arm_cmn_node *xp;
+
+ dtc->base = dn->pmu_base - CMN_PMU_OFFSET;
+ dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx);
+ if (dtc->irq < 0)
+ return dtc->irq;
+
+ writel_relaxed(0, dtc->base + CMN_DT_PMCR);
+ writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR);
+ writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
+
+ /* We do at least know that a DTC's XP must be in that DTC's domain */
+ xp = arm_cmn_node_to_xp(dn);
+ xp->dtc = idx;
+
+ return 0;
+}
+
+static int arm_cmn_node_cmp(const void *a, const void *b)
+{
+ const struct arm_cmn_node *dna = a, *dnb = b;
+ int cmp;
+
+ cmp = dna->type - dnb->type;
+ if (!cmp)
+ cmp = dna->logid - dnb->logid;
+ return cmp;
+}
+
+static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
+{
+ struct arm_cmn_node *dn;
+ int dtc_idx = 0;
+
+ cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL);
+ if (!cmn->dtc)
+ return -ENOMEM;
+
+ sort(cmn->dns, cmn->num_dns, sizeof(cmn->dns[0]), arm_cmn_node_cmp, NULL);
+
+ cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP);
+
+ for (dn = cmn->dns; dn < cmn->dns + cmn->num_dns; dn++) {
+ if (dn->type != CMN_TYPE_XP)
+ arm_cmn_init_node_to_xp(cmn, dn);
+ else if (cmn->num_dtcs == 1)
+ dn->dtc = 0;
+
+ if (dn->type == CMN_TYPE_DTC)
+ arm_cmn_init_dtc(cmn, dn, dtc_idx++);
+
+ /* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */
+ if (dn->type == CMN_TYPE_RND)
+ dn->type = CMN_TYPE_RNI;
+ }
+
+ writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL);
+
+ return 0;
+}
+
+static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
+{
+ int level;
+ u64 reg = readq_relaxed(cmn->base + offset + CMN_NODE_INFO);
+
+ node->type = FIELD_GET(CMN_NI_NODE_TYPE, reg);
+ node->id = FIELD_GET(CMN_NI_NODE_ID, reg);
+ node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg);
+
+ node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET;
+
+ if (node->type == CMN_TYPE_CFG)
+ level = 0;
+ else if (node->type == CMN_TYPE_XP)
+ level = 1;
+ else
+ level = 2;
+
+ dev_dbg(cmn->dev, "node%*c%#06hx%*ctype:%-#6x id:%-4hd off:%#x\n",
+ (level * 2) + 1, ' ', node->id, 5 - (level * 2), ' ',
+ node->type, node->logid, offset);
+}
+
+static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+{
+ void __iomem *cfg_region;
+ struct arm_cmn_node cfg, *dn;
+ u16 child_count, child_poff;
+ u32 xp_offset[CMN_MAX_XPS];
+ u64 reg;
+ int i, j;
+
+ cfg_region = cmn->base + rgn_offset;
+ reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2);
+ cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
+ dev_dbg(cmn->dev, "periph_id_2 revision: %d\n", cmn->rev);
+
+ arm_cmn_init_node_info(cmn, rgn_offset, &cfg);
+ if (cfg.type != CMN_TYPE_CFG)
+ return -ENODEV;
+
+ reg = readq_relaxed(cfg_region + CMN_CHILD_INFO);
+ child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
+ child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
+
+ cmn->num_xps = child_count;
+ cmn->num_dns = cmn->num_xps;
+
+ /* Pass 1: visit the XPs, enumerate their children */
+ for (i = 0; i < cmn->num_xps; i++) {
+ reg = readq_relaxed(cfg_region + child_poff + i * 8);
+ xp_offset[i] = reg & CMN_CHILD_NODE_ADDR;
+
+ reg = readq_relaxed(cmn->base + xp_offset[i] + CMN_CHILD_INFO);
+ cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg);
+ }
+
+ /* Cheeky +1 to help terminate pointer-based iteration */
+ cmn->dns = devm_kcalloc(cmn->dev, cmn->num_dns + 1,
+ sizeof(*cmn->dns), GFP_KERNEL);
+ if (!cmn->dns)
+ return -ENOMEM;
+
+ /* Pass 2: now we can actually populate the nodes */
+ dn = cmn->dns;
+ for (i = 0; i < cmn->num_xps; i++) {
+ void __iomem *xp_region = cmn->base + xp_offset[i];
+ struct arm_cmn_node *xp = dn++;
+
+ arm_cmn_init_node_info(cmn, xp_offset[i], xp);
+ arm_cmn_init_dtm(xp);
+ /*
+ * Thanks to the order in which XP logical IDs seem to be
+ * assigned, we can handily infer the mesh X dimension by
+ * looking out for the XP at (0,1) without needing to know
+ * the exact node ID format, which we can later derive.
+ */
+ if (xp->id == (1 << 3))
+ cmn->mesh_x = xp->logid;
+
+ reg = readq_relaxed(xp_region + CMN_CHILD_INFO);
+ child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
+ child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
+
+ for (j = 0; j < child_count; j++) {
+ reg = readq_relaxed(xp_region + child_poff + j * 8);
+ /*
+ * Don't even try to touch anything external, since in general
+ * we haven't a clue how to power up arbitrary CHI requesters.
+ * As of CMN-600r1 these could only be RN-SAMs or CXLAs,
+ * neither of which have any PMU events anyway.
+ * (Actually, CXLAs do seem to have grown some events in r1p2,
+ * but they don't go to regular XP DTMs, and they depend on
+ * secure configuration which we can't easily deal with)
+ */
+ if (reg & CMN_CHILD_NODE_EXTERNAL) {
+ dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
+ continue;
+ }
+
+ arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
+
+ switch (dn->type) {
+ case CMN_TYPE_DTC:
+ cmn->num_dtcs++;
+ dn++;
+ break;
+ /* These guys have PMU events */
+ case CMN_TYPE_DVM:
+ case CMN_TYPE_HNI:
+ case CMN_TYPE_HNF:
+ case CMN_TYPE_SBSX:
+ case CMN_TYPE_RNI:
+ case CMN_TYPE_RND:
+ case CMN_TYPE_CXRA:
+ case CMN_TYPE_CXHA:
+ dn++;
+ break;
+ /* Nothing to see here */
+ case CMN_TYPE_RNSAM:
+ case CMN_TYPE_CXLA:
+ break;
+ /* Something has gone horribly wrong */
+ default:
+ dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type);
+ return -ENODEV;
+ }
+ }
+ }
+
+ /* Correct for any nodes we skipped */
+ cmn->num_dns = dn - cmn->dns;
+
+ /*
+ * If mesh_x wasn't set during discovery then we never saw
+ * an XP at (0,1), thus we must have an Nx1 configuration.
+ */
+ if (!cmn->mesh_x)
+ cmn->mesh_x = cmn->num_xps;
+ cmn->mesh_y = cmn->num_xps / cmn->mesh_x;
+
+ dev_dbg(cmn->dev, "mesh %dx%d, ID width %d\n",
+ cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn));
+
+ return 0;
+}
+
+static int arm_cmn_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn)
+{
+ struct resource *cfg, *root;
+
+ cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!cfg)
+ return -EINVAL;
+
+ root = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!root)
+ return -EINVAL;
+
+ if (!resource_contains(cfg, root))
+ swap(cfg, root);
+ /*
+ * Note that devm_ioremap_resource() is dumb and won't let the platform
+ * device claim cfg when the ACPI companion device has already claimed
+ * root within it. But since they *are* already both claimed in the
+ * appropriate name, we don't really need to do it again here anyway.
+ */
+ cmn->base = devm_ioremap(cmn->dev, cfg->start, resource_size(cfg));
+ if (!cmn->base)
+ return -ENOMEM;
+
+ return root->start - cfg->start;
+}
+
+static int arm_cmn_of_probe(struct platform_device *pdev, struct arm_cmn *cmn)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 rootnode;
+ int ret;
+
+ cmn->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cmn->base))
+ return PTR_ERR(cmn->base);
+
+ ret = of_property_read_u32(np, "arm,root-node", &rootnode);
+ if (ret)
+ return ret;
+
+ return rootnode;
+}
+
+static int arm_cmn_probe(struct platform_device *pdev)
+{
+ struct arm_cmn *cmn;
+ const char *name;
+ static atomic_t id;
+ int err, rootnode, this_id;
+
+ cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL);
+ if (!cmn)
+ return -ENOMEM;
+
+ cmn->dev = &pdev->dev;
+ platform_set_drvdata(pdev, cmn);
+
+ if (has_acpi_companion(cmn->dev))
+ rootnode = arm_cmn_acpi_probe(pdev, cmn);
+ else
+ rootnode = arm_cmn_of_probe(pdev, cmn);
+ if (rootnode < 0)
+ return rootnode;
+
+ err = arm_cmn_discover(cmn, rootnode);
+ if (err)
+ return err;
+
+ err = arm_cmn_init_dtcs(cmn);
+ if (err)
+ return err;
+
+ err = arm_cmn_init_irqs(cmn);
+ if (err)
+ return err;
+
+ cmn->cpu = raw_smp_processor_id();
+ cmn->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .attr_groups = arm_cmn_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .pmu_enable = arm_cmn_pmu_enable,
+ .pmu_disable = arm_cmn_pmu_disable,
+ .event_init = arm_cmn_event_init,
+ .add = arm_cmn_event_add,
+ .del = arm_cmn_event_del,
+ .start = arm_cmn_event_start,
+ .stop = arm_cmn_event_stop,
+ .read = arm_cmn_event_read,
+ .start_txn = arm_cmn_start_txn,
+ .commit_txn = arm_cmn_commit_txn,
+ .cancel_txn = arm_cmn_end_txn,
+ };
+
+ this_id = atomic_fetch_inc(&id);
+ if (this_id == 0) {
+ name = "arm_cmn";
+ } else {
+ name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id);
+ if (!name)
+ return -ENOMEM;
+ }
+
+ err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
+ if (err)
+ return err;
+
+ err = perf_pmu_register(&cmn->pmu, name, -1);
+ if (err)
+ cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
+ return err;
+}
+
+static int arm_cmn_remove(struct platform_device *pdev)
+{
+ struct arm_cmn *cmn = platform_get_drvdata(pdev);
+ int i;
+
+ writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL);
+
+ perf_pmu_unregister(&cmn->pmu);
+ cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
+
+ for (i = 0; i < cmn->num_dtcs; i++)
+ irq_set_affinity_hint(cmn->dtc[i].irq, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id arm_cmn_of_match[] = {
+ { .compatible = "arm,cmn-600", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id arm_cmn_acpi_match[] = {
+ { "ARMHC600", },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
+#endif
+
+static struct platform_driver arm_cmn_driver = {
+ .driver = {
+ .name = "arm-cmn",
+ .of_match_table = of_match_ptr(arm_cmn_of_match),
+ .acpi_match_table = ACPI_PTR(arm_cmn_acpi_match),
+ },
+ .probe = arm_cmn_probe,
+ .remove = arm_cmn_remove,
+};
+
+static int __init arm_cmn_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/arm/cmn:online", NULL,
+ arm_cmn_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ arm_cmn_hp_state = ret;
+ ret = platform_driver_register(&arm_cmn_driver);
+ if (ret)
+ cpuhp_remove_multi_state(arm_cmn_hp_state);
+ return ret;
+}
+
+static void __exit arm_cmn_exit(void)
+{
+ platform_driver_unregister(&arm_cmn_driver);
+ cpuhp_remove_multi_state(arm_cmn_hp_state);
+}
+
+module_init(arm_cmn_init);
+module_exit(arm_cmn_exit);
+
+MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
+MODULE_DESCRIPTION("Arm CMN-600 PMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 96ed93cc78e6..98e68ed7db85 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -11,6 +11,7 @@
#define DRVNAME PMUNAME "_pmu"
#define pr_fmt(fmt) DRVNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -603,18 +604,19 @@ static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev)
}
/**
- * dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster.
+ * dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster
+ * from device tree.
*/
-static int dsu_pmu_dt_get_cpus(struct device_node *dev, cpumask_t *mask)
+static int dsu_pmu_dt_get_cpus(struct device *dev, cpumask_t *mask)
{
int i = 0, n, cpu;
struct device_node *cpu_node;
- n = of_count_phandle_with_args(dev, "cpus", NULL);
+ n = of_count_phandle_with_args(dev->of_node, "cpus", NULL);
if (n <= 0)
return -ENODEV;
for (; i < n; i++) {
- cpu_node = of_parse_phandle(dev, "cpus", i);
+ cpu_node = of_parse_phandle(dev->of_node, "cpus", i);
if (!cpu_node)
break;
cpu = of_cpu_node_to_id(cpu_node);
@@ -631,6 +633,36 @@ static int dsu_pmu_dt_get_cpus(struct device_node *dev, cpumask_t *mask)
return 0;
}
+/**
+ * dsu_pmu_acpi_get_cpus: Get the list of CPUs in the cluster
+ * from ACPI.
+ */
+static int dsu_pmu_acpi_get_cpus(struct device *dev, cpumask_t *mask)
+{
+#ifdef CONFIG_ACPI
+ int cpu;
+
+ /*
+ * A dsu pmu node is inside a cluster parent node along with cpu nodes.
+ * We need to find out all cpus that have the same parent with this pmu.
+ */
+ for_each_possible_cpu(cpu) {
+ struct acpi_device *acpi_dev;
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpu_dev)
+ continue;
+
+ acpi_dev = ACPI_COMPANION(cpu_dev);
+ if (acpi_dev &&
+ acpi_dev->parent == ACPI_COMPANION(dev)->parent)
+ cpumask_set_cpu(cpu, mask);
+ }
+#endif
+
+ return 0;
+}
+
/*
* dsu_pmu_probe_pmu: Probe the PMU details on a CPU in the cluster.
*/
@@ -676,6 +708,7 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
{
int irq, rc;
struct dsu_pmu *dsu_pmu;
+ struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev);
char *name;
static atomic_t pmu_idx = ATOMIC_INIT(-1);
@@ -683,7 +716,16 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
if (IS_ERR(dsu_pmu))
return PTR_ERR(dsu_pmu);
- rc = dsu_pmu_dt_get_cpus(pdev->dev.of_node, &dsu_pmu->associated_cpus);
+ if (IS_ERR_OR_NULL(fwnode))
+ return -ENOENT;
+
+ if (is_of_node(fwnode))
+ rc = dsu_pmu_dt_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus);
+ else if (is_acpi_device_node(fwnode))
+ rc = dsu_pmu_acpi_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus);
+ else
+ return -ENOENT;
+
if (rc) {
dev_warn(&pdev->dev, "Failed to parse the CPUs\n");
return rc;
@@ -752,11 +794,21 @@ static const struct of_device_id dsu_pmu_of_match[] = {
{ .compatible = "arm,dsu-pmu", },
{},
};
+MODULE_DEVICE_TABLE(of, dsu_pmu_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id dsu_pmu_acpi_match[] = {
+ { "ARMHD500", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, dsu_pmu_acpi_match);
+#endif
static struct platform_driver dsu_pmu_driver = {
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(dsu_pmu_of_match),
+ .acpi_match_table = ACPI_PTR(dsu_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = dsu_pmu_device_probe,
@@ -826,7 +878,6 @@ static void __exit dsu_pmu_exit(void)
module_init(dsu_pmu_init);
module_exit(dsu_pmu_exit);
-MODULE_DEVICE_TABLE(of, dsu_pmu_of_match);
MODULE_DESCRIPTION("Perf driver for ARM DynamIQ Shared Unit");
MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index df352b334ea7..cb2f55f450e4 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -26,8 +26,84 @@
#include <asm/irq_regs.h>
+static int armpmu_count_irq_users(const int irq);
+
+struct pmu_irq_ops {
+ void (*enable_pmuirq)(unsigned int irq);
+ void (*disable_pmuirq)(unsigned int irq);
+ void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
+};
+
+static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
+{
+ free_irq(irq, per_cpu_ptr(devid, cpu));
+}
+
+static const struct pmu_irq_ops pmuirq_ops = {
+ .enable_pmuirq = enable_irq,
+ .disable_pmuirq = disable_irq_nosync,
+ .free_pmuirq = armpmu_free_pmuirq
+};
+
+static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid)
+{
+ free_nmi(irq, per_cpu_ptr(devid, cpu));
+}
+
+static const struct pmu_irq_ops pmunmi_ops = {
+ .enable_pmuirq = enable_nmi,
+ .disable_pmuirq = disable_nmi_nosync,
+ .free_pmuirq = armpmu_free_pmunmi
+};
+
+static void armpmu_enable_percpu_pmuirq(unsigned int irq)
+{
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
+static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
+ void __percpu *devid)
+{
+ if (armpmu_count_irq_users(irq) == 1)
+ free_percpu_irq(irq, devid);
+}
+
+static const struct pmu_irq_ops percpu_pmuirq_ops = {
+ .enable_pmuirq = armpmu_enable_percpu_pmuirq,
+ .disable_pmuirq = disable_percpu_irq,
+ .free_pmuirq = armpmu_free_percpu_pmuirq
+};
+
+static void armpmu_enable_percpu_pmunmi(unsigned int irq)
+{
+ if (!prepare_percpu_nmi(irq))
+ enable_percpu_nmi(irq, IRQ_TYPE_NONE);
+}
+
+static void armpmu_disable_percpu_pmunmi(unsigned int irq)
+{
+ disable_percpu_nmi(irq);
+ teardown_percpu_nmi(irq);
+}
+
+static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
+ void __percpu *devid)
+{
+ if (armpmu_count_irq_users(irq) == 1)
+ free_percpu_nmi(irq, devid);
+}
+
+static const struct pmu_irq_ops percpu_pmunmi_ops = {
+ .enable_pmuirq = armpmu_enable_percpu_pmunmi,
+ .disable_pmuirq = armpmu_disable_percpu_pmunmi,
+ .free_pmuirq = armpmu_free_percpu_pmunmi
+};
+
static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq);
+static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
+
+static bool has_nmi;
static inline u64 arm_pmu_event_max_period(struct perf_event *event)
{
@@ -544,6 +620,23 @@ static int armpmu_count_irq_users(const int irq)
return count;
}
+static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
+{
+ const struct pmu_irq_ops *ops = NULL;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(cpu_irq, cpu) != irq)
+ continue;
+
+ ops = per_cpu(cpu_irq_ops, cpu);
+ if (ops)
+ break;
+ }
+
+ return ops;
+}
+
void armpmu_free_irq(int irq, int cpu)
{
if (per_cpu(cpu_irq, cpu) == 0)
@@ -551,18 +644,18 @@ void armpmu_free_irq(int irq, int cpu)
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
return;
- if (!irq_is_percpu_devid(irq))
- free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
- else if (armpmu_count_irq_users(irq) == 1)
- free_percpu_irq(irq, &cpu_armpmu);
+ per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
per_cpu(cpu_irq, cpu) = 0;
+ per_cpu(cpu_irq_ops, cpu) = NULL;
}
int armpmu_request_irq(int irq, int cpu)
{
int err = 0;
const irq_handler_t handler = armpmu_dispatch_irq;
+ const struct pmu_irq_ops *irq_ops;
+
if (!irq)
return 0;
@@ -582,17 +675,44 @@ int armpmu_request_irq(int irq, int cpu)
IRQF_NO_THREAD;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
- err = request_irq(irq, handler, irq_flags, "arm-pmu",
+
+ err = request_nmi(irq, handler, irq_flags, "arm-pmu",
per_cpu_ptr(&cpu_armpmu, cpu));
+
+ /* If cannot get an NMI, get a normal interrupt */
+ if (err) {
+ err = request_irq(irq, handler, irq_flags, "arm-pmu",
+ per_cpu_ptr(&cpu_armpmu, cpu));
+ irq_ops = &pmuirq_ops;
+ } else {
+ has_nmi = true;
+ irq_ops = &pmunmi_ops;
+ }
} else if (armpmu_count_irq_users(irq) == 0) {
- err = request_percpu_irq(irq, handler, "arm-pmu",
- &cpu_armpmu);
+ err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);
+
+ /* If cannot get an NMI, get a normal interrupt */
+ if (err) {
+ err = request_percpu_irq(irq, handler, "arm-pmu",
+ &cpu_armpmu);
+ irq_ops = &percpu_pmuirq_ops;
+ } else {
+ has_nmi= true;
+ irq_ops = &percpu_pmunmi_ops;
+ }
+ } else {
+ /* Per cpudevid irq was already requested by another CPU */
+ irq_ops = armpmu_find_irq_ops(irq);
+
+ if (WARN_ON(!irq_ops))
+ err = -EINVAL;
}
if (err)
goto err_out;
per_cpu(cpu_irq, cpu) = irq;
+ per_cpu(cpu_irq_ops, cpu) = irq_ops;
return 0;
err_out:
@@ -625,12 +745,8 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
per_cpu(cpu_armpmu, cpu) = pmu;
irq = armpmu_get_cpu_irq(pmu, cpu);
- if (irq) {
- if (irq_is_percpu_devid(irq))
- enable_percpu_irq(irq, IRQ_TYPE_NONE);
- else
- enable_irq(irq);
- }
+ if (irq)
+ per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
return 0;
}
@@ -644,12 +760,8 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
irq = armpmu_get_cpu_irq(pmu, cpu);
- if (irq) {
- if (irq_is_percpu_devid(irq))
- disable_percpu_irq(irq);
- else
- disable_irq_nosync(irq);
- }
+ if (irq)
+ per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
per_cpu(cpu_armpmu, cpu) = NULL;
@@ -870,8 +982,9 @@ int armpmu_register(struct arm_pmu *pmu)
if (!__oprofile_cpu_pmu)
__oprofile_cpu_pmu = pmu;
- pr_info("enabled with %s PMU driver, %d counters available\n",
- pmu->name, pmu->num_events);
+ pr_info("enabled with %s PMU driver, %d counters available%s\n",
+ pmu->name, pmu->num_events,
+ has_nmi ? ", using NMIs" : "");
return 0;
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 25b0c97b3eb0..b59ec22169ab 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -14,6 +14,7 @@
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/perf_event.h>
#include <linux/types.h>
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index aac9823b0c6b..e116815fa809 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -805,14 +805,17 @@ static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
list_for_each_entry(rentry, &list, node) {
if (resource_type(rentry->res) == IORESOURCE_MEM) {
res = *rentry->res;
+ rentry = NULL;
break;
}
}
+ acpi_dev_free_resource_list(&list);
- if (!rentry->res)
+ if (rentry) {
+ dev_err(dev, "PMU type %d: Fail to find resource\n", type);
return NULL;
+ }
- acpi_dev_free_resource_list(&list);
base = devm_ioremap_resource(dev, &res);
if (IS_ERR(base)) {
dev_err(dev, "PMU type %d: Fail to map resource\n", type);
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index edac28cd25dd..633cf07ba672 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1453,17 +1453,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
}
#if defined(CONFIG_ACPI)
-static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
-{
- struct resource *res = data;
-
- if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
- acpi_dev_resource_memory(ares, res);
-
- /* Always tell the ACPI core to skip this resource */
- return 1;
-}
-
static struct
xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
struct acpi_device *adev, u32 type)
@@ -1475,6 +1464,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
struct hw_pmu_info *inf;
void __iomem *dev_csr;
struct resource res;
+ struct resource_entry *rentry;
int enable_bit;
int rc;
@@ -1483,11 +1473,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
return NULL;
INIT_LIST_HEAD(&resource_list);
- rc = acpi_dev_get_resources(adev, &resource_list,
- acpi_pmu_dev_add_resource, &res);
+ rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (rc <= 0) {
+ dev_err(dev, "PMU type %d: No resources found\n", type);
+ return NULL;
+ }
+
+ list_for_each_entry(rentry, &resource_list, node) {
+ if (resource_type(rentry->res) == IORESOURCE_MEM) {
+ res = *rentry->res;
+ rentry = NULL;
+ break;
+ }
+ }
acpi_dev_free_resource_list(&resource_list);
- if (rc < 0) {
- dev_err(dev, "PMU type %d: No resource address found\n", type);
+
+ if (rentry) {
+ dev_err(dev, "PMU type %d: No memory resource found\n", type);
return NULL;
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index de9362c25c07..01b53f86004c 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -49,6 +49,17 @@ config PHY_XGENE
help
This option enables support for APM X-Gene SoC multi-purpose PHY.
+config USB_LGM_PHY
+ tristate "INTEL Lightning Mountain USB PHY Driver"
+ depends on USB_SUPPORT
+ select USB_PHY
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
+ help
+ Enable this to support Intel DWC3 PHY USB phy. This driver provides
+ interface to interact with USB GEN-II and USB 3.x PHY that is part
+ of the Intel network SOC.
+
source "drivers/phy/allwinner/Kconfig"
source "drivers/phy/amlogic/Kconfig"
source "drivers/phy/broadcom/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index c27408e4daae..6eb2916773c5 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_GENERIC_PHY_MIPI_DPHY) += phy-core-mipi-dphy.o
obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
+obj-$(CONFIG_USB_LGM_PHY) += phy-lgm-usb.o
obj-y += allwinner/ \
amlogic/ \
broadcom/ \
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
index 14f45bc35cc5..47b029fbebbd 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
@@ -13,6 +13,7 @@
#include <linux/bcma/bcma.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -258,29 +259,24 @@ static struct mdio_driver bcm_ns_usb3_mdio_driver = {
**************************************************/
static int bcm_ns_usb3_wait_reg(struct bcm_ns_usb3 *usb3, void __iomem *addr,
- u32 mask, u32 value, unsigned long timeout)
+ u32 mask, u32 value, int usec)
{
- unsigned long deadline = jiffies + timeout;
u32 val;
+ int ret;
- do {
- val = readl(addr);
- if ((val & mask) == value)
- return 0;
- cpu_relax();
- udelay(10);
- } while (!time_after_eq(jiffies, deadline));
+ ret = readl_poll_timeout_atomic(addr, val, ((val & mask) == value),
+ 10, usec);
+ if (ret)
+ dev_err(usb3->dev, "Timeout waiting for register %p\n", addr);
- dev_err(usb3->dev, "Timeout waiting for register %p\n", addr);
-
- return -EBUSY;
+ return ret;
}
static inline int bcm_ns_usb3_mii_mng_wait_idle(struct bcm_ns_usb3 *usb3)
{
return bcm_ns_usb3_wait_reg(usb3, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL,
0x0100, 0x0000,
- usecs_to_jiffies(BCM_NS_USB3_MII_MNG_TIMEOUT_US));
+ BCM_NS_USB3_MII_MNG_TIMEOUT_US);
}
static int bcm_ns_usb3_platform_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
index 527625912b78..9630ac127366 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -87,17 +88,11 @@ static const unsigned int usb_extcon_cable[] = {
static inline int pll_lock_stat(u32 usb_reg, int reg_mask,
struct ns2_phy_driver *driver)
{
- int retry = PLL_LOCK_RETRY;
u32 val;
- do {
- udelay(1);
- val = readl(driver->icfgdrd_regs + usb_reg);
- if (val & reg_mask)
- return 0;
- } while (--retry > 0);
-
- return -EBUSY;
+ return readl_poll_timeout_atomic(driver->icfgdrd_regs + usb_reg,
+ val, (val & reg_mask), 1,
+ PLL_LOCK_RETRY);
}
static int ns2_drd_phy_init(struct phy *phy)
diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c
index 77c025a0720c..c3e99ad17487 100644
--- a/drivers/phy/broadcom/phy-bcm-sr-usb.c
+++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
@@ -109,19 +110,15 @@ static inline void bcm_usb_reg32_setbits(void __iomem *addr, uint32_t set)
static int bcm_usb_pll_lock_check(void __iomem *addr, u32 bit)
{
- int retry;
- u32 rd_data;
+ u32 data;
+ int ret;
- retry = PLL_LOCK_RETRY_COUNT;
- do {
- rd_data = readl(addr);
- if (rd_data & bit)
- return 0;
- udelay(1);
- } while (--retry > 0);
+ ret = readl_poll_timeout_atomic(addr, data, (data & bit), 1,
+ PLL_LOCK_RETRY_COUNT);
+ if (ret)
+ pr_err("%s: FAIL\n", __func__);
- pr_err("%s: FAIL\n", __func__);
- return -ETIMEDOUT;
+ return ret;
}
static int bcm_usb_ss_phy_init(struct bcm_usb_phy_cfg *phy_cfg)
diff --git a/drivers/phy/cadence/phy-cadence-salvo.c b/drivers/phy/cadence/phy-cadence-salvo.c
index 016514e4aa54..88e239adc3b8 100644
--- a/drivers/phy/cadence/phy-cadence-salvo.c
+++ b/drivers/phy/cadence/phy-cadence-salvo.c
@@ -97,7 +97,7 @@ struct cdns_reg_pairs {
struct cdns_salvo_data {
u8 reg_offset_shift;
- struct cdns_reg_pairs *init_sequence_val;
+ const struct cdns_reg_pairs *init_sequence_val;
u8 init_sequence_length;
};
@@ -126,7 +126,7 @@ static void cdns_salvo_write(struct cdns_salvo_phy *salvo_phy,
* Below bringup sequence pair are from Cadence PHY's User Guide
* and NXP platform tuning results.
*/
-static struct cdns_reg_pairs cdns_nxp_sequence_pair[] = {
+static const struct cdns_reg_pairs cdns_nxp_sequence_pair[] = {
{0x0830, PHY_PMA_CMN_CTRL1},
{0x0010, TB_ADDR_CMN_DIAG_HSCLK_SEL},
{0x00f0, TB_ADDR_CMN_PLL0_VCOCAL_INIT_TMR},
@@ -217,7 +217,7 @@ static int cdns_salvo_phy_init(struct phy *phy)
return ret;
for (i = 0; i < data->init_sequence_length; i++) {
- struct cdns_reg_pairs *reg_pair = data->init_sequence_val + i;
+ const struct cdns_reg_pairs *reg_pair = data->init_sequence_val + i;
cdns_salvo_write(salvo_phy, reg_pair->off, reg_pair->val);
}
@@ -251,7 +251,7 @@ static int cdns_salvo_phy_power_off(struct phy *phy)
return 0;
}
-static struct phy_ops cdns_salvo_phy_ops = {
+static const struct phy_ops cdns_salvo_phy_ops = {
.init = cdns_salvo_phy_init,
.power_on = cdns_salvo_phy_power_on,
.power_off = cdns_salvo_phy_power_off,
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index faed652b73f7..453ef26fa1c7 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -172,10 +172,10 @@ struct cdns_sierra_data {
u32 pcie_ln_regs;
u32 usb_cmn_regs;
u32 usb_ln_regs;
- struct cdns_reg_pairs *pcie_cmn_vals;
- struct cdns_reg_pairs *pcie_ln_vals;
- struct cdns_reg_pairs *usb_cmn_vals;
- struct cdns_reg_pairs *usb_ln_vals;
+ const struct cdns_reg_pairs *pcie_cmn_vals;
+ const struct cdns_reg_pairs *pcie_ln_vals;
+ const struct cdns_reg_pairs *usb_cmn_vals;
+ const struct cdns_reg_pairs *usb_ln_vals;
};
struct cdns_regmap_cdb_context {
@@ -233,7 +233,7 @@ static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val)
.reg_read = cdns_regmap_read, \
}
-static struct regmap_config cdns_sierra_lane_cdb_config[] = {
+static const struct regmap_config cdns_sierra_lane_cdb_config[] = {
SIERRA_LANE_CDB_REGMAP_CONF("0"),
SIERRA_LANE_CDB_REGMAP_CONF("1"),
SIERRA_LANE_CDB_REGMAP_CONF("2"),
@@ -252,7 +252,7 @@ static struct regmap_config cdns_sierra_lane_cdb_config[] = {
SIERRA_LANE_CDB_REGMAP_CONF("15"),
};
-static struct regmap_config cdns_sierra_common_cdb_config = {
+static const struct regmap_config cdns_sierra_common_cdb_config = {
.name = "sierra_common_cdb",
.reg_stride = 1,
.fast_io = true,
@@ -260,7 +260,7 @@ static struct regmap_config cdns_sierra_common_cdb_config = {
.reg_read = cdns_regmap_read,
};
-static struct regmap_config cdns_sierra_phy_config_ctrl_config = {
+static const struct regmap_config cdns_sierra_phy_config_ctrl_config = {
.name = "sierra_phy_config_ctrl",
.reg_stride = 1,
.fast_io = true,
@@ -274,7 +274,7 @@ static int cdns_sierra_phy_init(struct phy *gphy)
struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
struct regmap *regmap;
int i, j;
- struct cdns_reg_pairs *cmn_vals, *ln_vals;
+ const struct cdns_reg_pairs *cmn_vals, *ln_vals;
u32 num_cmn_regs, num_ln_regs;
/* Initialise the PHY registers, unless auto configured */
@@ -654,7 +654,7 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
}
/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
-static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
{0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
@@ -663,7 +663,7 @@ static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
};
/* refclk100MHz_32b_PCIe_ln_ext_ssc */
-static struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
@@ -674,7 +674,7 @@ static struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
};
/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
-static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
+static const struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
{0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
{0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
{0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
@@ -682,7 +682,7 @@ static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
};
/* refclk100MHz_20b_USB_ln_ext_ssc */
-static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
+static const struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0xFE0A, SIERRA_DET_STANDEC_A_PREG},
{0x000F, SIERRA_DET_STANDEC_B_PREG},
{0x55A5, SIERRA_DET_STANDEC_C_PREG},
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 7116127358ee..f310e15d94cb 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -25,11 +25,14 @@
#define REF_CLK_19_2MHz 19200000
#define REF_CLK_25MHz 25000000
-#define DEFAULT_NUM_LANES 4
#define MAX_NUM_LANES 4
#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */
+#define NUM_SSC_MODE 3
+#define NUM_PHY_TYPE 6
+
#define POLL_TIMEOUT_US 5000
+#define PLL_LOCK_TIMEOUT 100000
#define TORRENT_COMMON_CDB_OFFSET 0x0
@@ -79,6 +82,8 @@
#define CMN_PLLSM0_PLLLOCK_TMR 0x002CU
#define CMN_PLLSM1_PLLPRE_TMR 0x0032U
#define CMN_PLLSM1_PLLLOCK_TMR 0x0034U
+#define CMN_CDIAG_CDB_PWRI_OVRD 0x0041U
+#define CMN_CDIAG_XCVRC_PWRI_OVRD 0x0047U
#define CMN_BGCAL_INIT_TMR 0x0064U
#define CMN_BGCAL_ITER_TMR 0x0065U
#define CMN_IBCAL_INIT_TMR 0x0074U
@@ -99,6 +104,14 @@
#define CMN_PLL0_LOCK_REFCNT_START 0x009CU
#define CMN_PLL0_LOCK_PLLCNT_START 0x009EU
#define CMN_PLL0_LOCK_PLLCNT_THR 0x009FU
+#define CMN_PLL0_INTDIV_M1 0x00A0U
+#define CMN_PLL0_FRACDIVH_M1 0x00A2U
+#define CMN_PLL0_HIGH_THR_M1 0x00A3U
+#define CMN_PLL0_DSM_DIAG_M1 0x00A4U
+#define CMN_PLL0_SS_CTRL1_M1 0x00A8U
+#define CMN_PLL0_SS_CTRL2_M1 0x00A9U
+#define CMN_PLL0_SS_CTRL3_M1 0x00AAU
+#define CMN_PLL0_SS_CTRL4_M1 0x00ABU
#define CMN_PLL1_VCOCAL_TCTRL 0x00C2U
#define CMN_PLL1_VCOCAL_INIT_TMR 0x00C4U
#define CMN_PLL1_VCOCAL_ITER_TMR 0x00C5U
@@ -116,8 +129,10 @@
#define CMN_PLL1_LOCK_REFCNT_START 0x00DCU
#define CMN_PLL1_LOCK_PLLCNT_START 0x00DEU
#define CMN_PLL1_LOCK_PLLCNT_THR 0x00DFU
+#define CMN_TXPUCAL_TUNE 0x0103U
#define CMN_TXPUCAL_INIT_TMR 0x0104U
#define CMN_TXPUCAL_ITER_TMR 0x0105U
+#define CMN_TXPDCAL_TUNE 0x010BU
#define CMN_TXPDCAL_INIT_TMR 0x010CU
#define CMN_TXPDCAL_ITER_TMR 0x010DU
#define CMN_RXCAL_INIT_TMR 0x0114U
@@ -131,24 +146,31 @@
#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x01A4U
#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x01A5U
#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x01A6U
+#define CMN_PDIAG_PLL0_CTRL_M1 0x01B0U
+#define CMN_PDIAG_PLL0_CLK_SEL_M1 0x01B1U
#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x01B4U
#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x01B5U
+#define CMN_PDIAG_PLL0_FILT_PADJ_M1 0x01B6U
#define CMN_PDIAG_PLL1_CTRL_M0 0x01C0U
#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x01C1U
#define CMN_PDIAG_PLL1_CP_PADJ_M0 0x01C4U
#define CMN_PDIAG_PLL1_CP_IADJ_M0 0x01C5U
#define CMN_PDIAG_PLL1_FILT_PADJ_M0 0x01C6U
+#define CMN_DIAG_BIAS_OVRD1 0x01E1U
/* PMA TX Lane registers */
#define TX_TXCC_CTRL 0x0040U
#define TX_TXCC_CPOST_MULT_00 0x004CU
+#define TX_TXCC_CPOST_MULT_01 0x004DU
#define TX_TXCC_MGNFS_MULT_000 0x0050U
#define DRV_DIAG_TX_DRV 0x00C6U
#define XCVR_DIAG_PLLDRC_CTRL 0x00E5U
#define XCVR_DIAG_HSCLK_SEL 0x00E6U
#define XCVR_DIAG_HSCLK_DIV 0x00E7U
#define XCVR_DIAG_BIDI_CTRL 0x00EAU
+#define XCVR_DIAG_PSC_OVRD 0x00EBU
#define TX_PSC_A0 0x0100U
+#define TX_PSC_A1 0x0101U
#define TX_PSC_A2 0x0102U
#define TX_PSC_A3 0x0103U
#define TX_RCVDET_ST_TMR 0x0123U
@@ -157,23 +179,49 @@
/* PMA RX Lane registers */
#define RX_PSC_A0 0x0000U
+#define RX_PSC_A1 0x0001U
#define RX_PSC_A2 0x0002U
#define RX_PSC_A3 0x0003U
#define RX_PSC_CAL 0x0006U
+#define RX_CDRLF_CNFG 0x0080U
+#define RX_CDRLF_CNFG3 0x0082U
+#define RX_SIGDET_HL_FILT_TMR 0x0090U
#define RX_REE_GCSM1_CTRL 0x0108U
+#define RX_REE_GCSM1_EQENM_PH1 0x0109U
+#define RX_REE_GCSM1_EQENM_PH2 0x010AU
#define RX_REE_GCSM2_CTRL 0x0110U
#define RX_REE_PERGCSM_CTRL 0x0118U
+#define RX_REE_ATTEN_THR 0x0149U
+#define RX_REE_TAP1_CLIP 0x0171U
+#define RX_REE_TAP2TON_CLIP 0x0172U
+#define RX_REE_SMGM_CTRL1 0x0177U
+#define RX_REE_SMGM_CTRL2 0x0178U
+#define RX_DIAG_DFE_CTRL 0x01E0U
+#define RX_DIAG_DFE_AMP_TUNE_2 0x01E2U
+#define RX_DIAG_DFE_AMP_TUNE_3 0x01E3U
+#define RX_DIAG_NQST_CTRL 0x01E5U
+#define RX_DIAG_SIGDET_TUNE 0x01E8U
+#define RX_DIAG_PI_RATE 0x01F4U
+#define RX_DIAG_PI_CAP 0x01F5U
+#define RX_DIAG_ACYA 0x01FFU
/* PHY PCS common registers */
#define PHY_PLL_CFG 0x000EU
+#define PHY_PIPE_USB3_GEN2_PRE_CFG0 0x0020U
+#define PHY_PIPE_USB3_GEN2_POST_CFG0 0x0022U
+#define PHY_PIPE_USB3_GEN2_POST_CFG1 0x0023U
/* PHY PMA common registers */
+#define PHY_PMA_CMN_CTRL1 0x0000U
#define PHY_PMA_CMN_CTRL2 0x0001U
#define PHY_PMA_PLL_RAW_CTRL 0x0003U
static const struct reg_field phy_pll_cfg =
REG_FIELD(PHY_PLL_CFG, 0, 1);
+static const struct reg_field phy_pma_cmn_ctrl_1 =
+ REG_FIELD(PHY_PMA_CMN_CTRL1, 0, 0);
+
static const struct reg_field phy_pma_cmn_ctrl_2 =
REG_FIELD(PHY_PMA_CMN_CTRL2, 0, 7);
@@ -183,14 +231,28 @@ static const struct reg_field phy_pma_pll_raw_ctrl =
static const struct reg_field phy_reset_ctrl =
REG_FIELD(PHY_RESET, 8, 8);
-static const struct of_device_id cdns_torrent_phy_of_match[];
+enum cdns_torrent_phy_type {
+ TYPE_NONE,
+ TYPE_DP,
+ TYPE_PCIE,
+ TYPE_SGMII,
+ TYPE_QSGMII,
+ TYPE_USB,
+};
+
+enum cdns_torrent_ssc_mode {
+ NO_SSC,
+ EXTERNAL_SSC,
+ INTERNAL_SSC
+};
struct cdns_torrent_inst {
struct phy *phy;
u32 mlane;
- u32 phy_type;
+ enum cdns_torrent_phy_type phy_type;
u32 num_lanes;
struct reset_control *lnk_rst;
+ enum cdns_torrent_ssc_mode ssc_mode;
};
struct cdns_torrent_phy {
@@ -198,11 +260,13 @@ struct cdns_torrent_phy {
void __iomem *sd_base; /* SD0801 registers base */
u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
struct reset_control *phy_rst;
+ struct reset_control *apb_rst;
struct device *dev;
struct clk *clk;
unsigned long ref_clk_rate;
struct cdns_torrent_inst phys[MAX_NUM_LANES];
int nsubnodes;
+ const struct cdns_torrent_data *init_data;
struct regmap *regmap;
struct regmap *regmap_common_cdb;
struct regmap *regmap_phy_pcs_common_cdb;
@@ -211,6 +275,7 @@ struct cdns_torrent_phy {
struct regmap *regmap_rx_lane_cdb[MAX_NUM_LANES];
struct regmap *regmap_dptx_phy_reg;
struct regmap_field *phy_pll_cfg;
+ struct regmap_field *phy_pma_cmn_ctrl_1;
struct regmap_field *phy_pma_cmn_ctrl_2;
struct regmap_field *phy_pma_pll_raw_ctrl;
struct regmap_field *phy_reset_ctrl;
@@ -223,8 +288,8 @@ enum phy_powerstate {
POWERSTATE_A3 = 3,
};
+static int cdns_torrent_phy_init(struct phy *phy);
static int cdns_torrent_dp_init(struct phy *phy);
-static int cdns_torrent_dp_exit(struct phy *phy);
static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy,
u32 num_lanes);
static
@@ -254,17 +319,38 @@ static int cdns_torrent_phy_on(struct phy *phy);
static int cdns_torrent_phy_off(struct phy *phy);
static const struct phy_ops cdns_torrent_phy_ops = {
- .init = cdns_torrent_dp_init,
- .exit = cdns_torrent_dp_exit,
+ .init = cdns_torrent_phy_init,
.configure = cdns_torrent_dp_configure,
.power_on = cdns_torrent_phy_on,
.power_off = cdns_torrent_phy_off,
.owner = THIS_MODULE,
};
+struct cdns_reg_pairs {
+ u32 val;
+ u32 off;
+};
+
+struct cdns_torrent_vals {
+ struct cdns_reg_pairs *reg_pairs;
+ u32 num_regs;
+};
+
struct cdns_torrent_data {
- u8 block_offset_shift;
- u8 reg_offset_shift;
+ u8 block_offset_shift;
+ u8 reg_offset_shift;
+ struct cdns_torrent_vals *link_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_torrent_vals *xcvr_diag_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_torrent_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_torrent_vals *cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_torrent_vals *tx_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_torrent_vals *rx_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
};
struct cdns_regmap_cdb_context {
@@ -331,21 +417,21 @@ static int cdns_regmap_dptx_read(void *context, unsigned int reg,
.reg_read = cdns_regmap_read, \
}
-static struct regmap_config cdns_torrent_tx_lane_cdb_config[] = {
+static const struct regmap_config cdns_torrent_tx_lane_cdb_config[] = {
TORRENT_TX_LANE_CDB_REGMAP_CONF("0"),
TORRENT_TX_LANE_CDB_REGMAP_CONF("1"),
TORRENT_TX_LANE_CDB_REGMAP_CONF("2"),
TORRENT_TX_LANE_CDB_REGMAP_CONF("3"),
};
-static struct regmap_config cdns_torrent_rx_lane_cdb_config[] = {
+static const struct regmap_config cdns_torrent_rx_lane_cdb_config[] = {
TORRENT_RX_LANE_CDB_REGMAP_CONF("0"),
TORRENT_RX_LANE_CDB_REGMAP_CONF("1"),
TORRENT_RX_LANE_CDB_REGMAP_CONF("2"),
TORRENT_RX_LANE_CDB_REGMAP_CONF("3"),
};
-static struct regmap_config cdns_torrent_common_cdb_config = {
+static const struct regmap_config cdns_torrent_common_cdb_config = {
.name = "torrent_common_cdb",
.reg_stride = 1,
.fast_io = true,
@@ -353,7 +439,7 @@ static struct regmap_config cdns_torrent_common_cdb_config = {
.reg_read = cdns_regmap_read,
};
-static struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = {
+static const struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = {
.name = "torrent_phy_pcs_cmn_cdb",
.reg_stride = 1,
.fast_io = true,
@@ -361,7 +447,7 @@ static struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = {
.reg_read = cdns_regmap_read,
};
-static struct regmap_config cdns_torrent_phy_pma_cmn_cdb_config = {
+static const struct regmap_config cdns_torrent_phy_pma_cmn_cdb_config = {
.name = "torrent_phy_pma_cmn_cdb",
.reg_stride = 1,
.fast_io = true,
@@ -369,7 +455,7 @@ static struct regmap_config cdns_torrent_phy_pma_cmn_cdb_config = {
.reg_read = cdns_regmap_read,
};
-static struct regmap_config cdns_torrent_dptx_phy_config = {
+static const struct regmap_config cdns_torrent_dptx_phy_config = {
.name = "torrent_dptx_phy",
.reg_stride = 1,
.fast_io = true,
@@ -848,19 +934,6 @@ static int cdns_torrent_dp_init(struct phy *phy)
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
- ret = clk_prepare_enable(cdns_phy->clk);
- if (ret) {
- dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
- return ret;
- }
-
- cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk);
- if (!(cdns_phy->ref_clk_rate)) {
- dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
- clk_disable_unprepare(cdns_phy->clk);
- return -EINVAL;
- }
-
switch (cdns_phy->ref_clk_rate) {
case REF_CLK_19_2MHz:
case REF_CLK_25MHz:
@@ -920,14 +993,6 @@ static int cdns_torrent_dp_init(struct phy *phy)
return ret;
}
-static int cdns_torrent_dp_exit(struct phy *phy)
-{
- struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
-
- clk_disable_unprepare(cdns_phy->clk);
- return 0;
-}
-
static
int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy)
{
@@ -1543,15 +1608,34 @@ static int cdns_torrent_phy_on(struct phy *phy)
{
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ u32 read_val;
int ret;
- /* Take the PHY out of reset */
- ret = reset_control_deassert(cdns_phy->phy_rst);
- if (ret)
+ if (cdns_phy->nsubnodes == 1) {
+ /* Take the PHY lane group out of reset */
+ reset_control_deassert(inst->lnk_rst);
+
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(cdns_phy->phy_rst);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Wait for cmn_ready assertion
+ * PHY_PMA_CMN_CTRL1[0] == 1
+ */
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_1,
+ read_val, read_val, 1000,
+ PLL_LOCK_TIMEOUT);
+ if (ret) {
+ dev_err(cdns_phy->dev, "Timeout waiting for CMN ready\n");
return ret;
+ }
+
+ mdelay(10);
- /* Take the PHY lane group out of reset */
- return reset_control_deassert(inst->lnk_rst);
+ return 0;
}
static int cdns_torrent_phy_off(struct phy *phy)
@@ -1560,6 +1644,9 @@ static int cdns_torrent_phy_off(struct phy *phy)
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
int ret;
+ if (cdns_phy->nsubnodes != 1)
+ return 0;
+
ret = reset_control_assert(cdns_phy->phy_rst);
if (ret)
return ret;
@@ -1585,7 +1672,24 @@ static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
return devm_regmap_init(dev, NULL, ctx, config);
}
-static int cdns_regfield_init(struct cdns_torrent_phy *cdns_phy)
+static int cdns_torrent_dp_regfield_init(struct cdns_torrent_phy *cdns_phy)
+{
+ struct device *dev = cdns_phy->dev;
+ struct regmap_field *field;
+ struct regmap *regmap;
+
+ regmap = cdns_phy->regmap_dptx_phy_reg;
+ field = devm_regmap_field_alloc(dev, regmap, phy_reset_ctrl);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_RESET reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_reset_ctrl = field;
+
+ return 0;
+}
+
+static int cdns_torrent_regfield_init(struct cdns_torrent_phy *cdns_phy)
{
struct device *dev = cdns_phy->dev;
struct regmap_field *field;
@@ -1600,6 +1704,14 @@ static int cdns_regfield_init(struct cdns_torrent_phy *cdns_phy)
cdns_phy->phy_pll_cfg = field;
regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PMA_CMN_CTRL1 reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ cdns_phy->phy_pma_cmn_ctrl_1 = field;
+
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_2);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PMA_CMN_CTRL2 reg field init failed\n");
@@ -1615,28 +1727,44 @@ static int cdns_regfield_init(struct cdns_torrent_phy *cdns_phy)
}
cdns_phy->phy_pma_pll_raw_ctrl = field;
- regmap = cdns_phy->regmap_dptx_phy_reg;
- field = devm_regmap_field_alloc(dev, regmap, phy_reset_ctrl);
- if (IS_ERR(field)) {
- dev_err(dev, "PHY_RESET reg field init failed\n");
- return PTR_ERR(field);
+ return 0;
+}
+
+static int cdns_torrent_dp_regmap_init(struct cdns_torrent_phy *cdns_phy)
+{
+ void __iomem *base = cdns_phy->base;
+ struct device *dev = cdns_phy->dev;
+ struct regmap *regmap;
+ u8 reg_offset_shift;
+ u32 block_offset;
+
+ reg_offset_shift = cdns_phy->init_data->reg_offset_shift;
+
+ block_offset = TORRENT_DPTX_PHY_OFFSET;
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_torrent_dptx_phy_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init DPTX PHY regmap\n");
+ return PTR_ERR(regmap);
}
- cdns_phy->phy_reset_ctrl = field;
+ cdns_phy->regmap_dptx_phy_reg = regmap;
return 0;
}
-static int cdns_regmap_init_torrent_dp(struct cdns_torrent_phy *cdns_phy,
- void __iomem *sd_base,
- void __iomem *base,
- u8 block_offset_shift,
- u8 reg_offset_shift)
+static int cdns_torrent_regmap_init(struct cdns_torrent_phy *cdns_phy)
{
+ void __iomem *sd_base = cdns_phy->sd_base;
+ u8 block_offset_shift, reg_offset_shift;
struct device *dev = cdns_phy->dev;
struct regmap *regmap;
u32 block_offset;
int i;
+ block_offset_shift = cdns_phy->init_data->block_offset_shift;
+ reg_offset_shift = cdns_phy->init_data->reg_offset_shift;
+
for (i = 0; i < MAX_NUM_LANES; i++) {
block_offset = TORRENT_TX_LANE_CDB_OFFSET(i, block_offset_shift,
reg_offset_shift);
@@ -1691,43 +1819,282 @@ static int cdns_regmap_init_torrent_dp(struct cdns_torrent_phy *cdns_phy,
}
cdns_phy->regmap_phy_pma_common_cdb = regmap;
- block_offset = TORRENT_DPTX_PHY_OFFSET;
- regmap = cdns_regmap_init(dev, base, block_offset,
- reg_offset_shift,
- &cdns_torrent_dptx_phy_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "Failed to init DPTX PHY regmap\n");
- return PTR_ERR(regmap);
+ return 0;
+}
+
+static int cdns_torrent_phy_init(struct phy *phy)
+{
+ struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ const struct cdns_torrent_data *init_data = cdns_phy->init_data;
+ struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
+ struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
+ struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
+ enum cdns_torrent_phy_type phy_type = inst->phy_type;
+ enum cdns_torrent_ssc_mode ssc = inst->ssc_mode;
+ struct cdns_torrent_vals *pcs_cmn_vals;
+ struct cdns_reg_pairs *reg_pairs;
+ struct regmap *regmap;
+ u32 num_regs;
+ int i, j;
+
+ if (cdns_phy->nsubnodes > 1)
+ return 0;
+
+ if (phy_type == TYPE_DP)
+ return cdns_torrent_dp_init(phy);
+
+ /**
+ * Spread spectrum generation is not required or supported
+ * for SGMII/QSGMII
+ */
+ if (phy_type == TYPE_SGMII || phy_type == TYPE_QSGMII)
+ ssc = NO_SSC;
+
+ /* PHY configuration specific registers for single link */
+ link_cmn_vals = init_data->link_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (link_cmn_vals) {
+ reg_pairs = link_cmn_vals->reg_pairs;
+ num_regs = link_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+
+ /**
+ * First array value in link_cmn_vals must be of
+ * PHY_PLL_CFG register
+ */
+ regmap_field_write(cdns_phy->phy_pll_cfg, reg_pairs[0].val);
+
+ for (i = 1; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ xcvr_diag_vals = init_data->xcvr_diag_vals[phy_type][TYPE_NONE][ssc];
+ if (xcvr_diag_vals) {
+ reg_pairs = xcvr_diag_vals->reg_pairs;
+ num_regs = xcvr_diag_vals->num_regs;
+ for (i = 0; i < inst->num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + inst->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ /* PMA common registers configurations */
+ cmn_vals = init_data->cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (cmn_vals) {
+ reg_pairs = cmn_vals->reg_pairs;
+ num_regs = cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ /* PMA TX lane registers configurations */
+ tx_ln_vals = init_data->tx_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (tx_ln_vals) {
+ reg_pairs = tx_ln_vals->reg_pairs;
+ num_regs = tx_ln_vals->num_regs;
+ for (i = 0; i < inst->num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + inst->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
+
+ /* PMA RX lane registers configurations */
+ rx_ln_vals = init_data->rx_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (rx_ln_vals) {
+ reg_pairs = rx_ln_vals->reg_pairs;
+ num_regs = rx_ln_vals->num_regs;
+ for (i = 0; i < inst->num_lanes; i++) {
+ regmap = cdns_phy->regmap_rx_lane_cdb[i + inst->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
}
- cdns_phy->regmap_dptx_phy_reg = regmap;
+
+ return 0;
+}
+
+static
+int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
+{
+ const struct cdns_torrent_data *init_data = cdns_phy->init_data;
+ struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
+ struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
+ enum cdns_torrent_phy_type phy_t1, phy_t2, tmp_phy_type;
+ struct cdns_torrent_vals *pcs_cmn_vals;
+ int i, j, node, mlane, num_lanes, ret;
+ struct cdns_reg_pairs *reg_pairs;
+ enum cdns_torrent_ssc_mode ssc;
+ struct regmap *regmap;
+ u32 num_regs;
+
+ /* Maximum 2 links (subnodes) are supported */
+ if (cdns_phy->nsubnodes != 2)
+ return -EINVAL;
+
+ phy_t1 = cdns_phy->phys[0].phy_type;
+ phy_t2 = cdns_phy->phys[1].phy_type;
+
+ /**
+ * First configure the PHY for first link with phy_t1. Get the array
+ * values as [phy_t1][phy_t2][ssc].
+ */
+ for (node = 0; node < cdns_phy->nsubnodes; node++) {
+ if (node == 1) {
+ /**
+ * If first link with phy_t1 is configured, then
+ * configure the PHY for second link with phy_t2.
+ * Get the array values as [phy_t2][phy_t1][ssc].
+ */
+ tmp_phy_type = phy_t1;
+ phy_t1 = phy_t2;
+ phy_t2 = tmp_phy_type;
+ }
+
+ mlane = cdns_phy->phys[node].mlane;
+ ssc = cdns_phy->phys[node].ssc_mode;
+ num_lanes = cdns_phy->phys[node].num_lanes;
+
+ /**
+ * PHY configuration specific registers:
+ * link_cmn_vals depend on combination of PHY types being
+ * configured and are common for both PHY types, so array
+ * values should be same for [phy_t1][phy_t2][ssc] and
+ * [phy_t2][phy_t1][ssc].
+ * xcvr_diag_vals also depend on combination of PHY types
+ * being configured, but these can be different for particular
+ * PHY type and are per lane.
+ */
+ link_cmn_vals = init_data->link_cmn_vals[phy_t1][phy_t2][ssc];
+ if (link_cmn_vals) {
+ reg_pairs = link_cmn_vals->reg_pairs;
+ num_regs = link_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+
+ /**
+ * First array value in link_cmn_vals must be of
+ * PHY_PLL_CFG register
+ */
+ regmap_field_write(cdns_phy->phy_pll_cfg,
+ reg_pairs[0].val);
+
+ for (i = 1; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ xcvr_diag_vals = init_data->xcvr_diag_vals[phy_t1][phy_t2][ssc];
+ if (xcvr_diag_vals) {
+ reg_pairs = xcvr_diag_vals->reg_pairs;
+ num_regs = xcvr_diag_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ /* PMA common registers configurations */
+ cmn_vals = init_data->cmn_vals[phy_t1][phy_t2][ssc];
+ if (cmn_vals) {
+ reg_pairs = cmn_vals->reg_pairs;
+ num_regs = cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
+ /* PMA TX lane registers configurations */
+ tx_ln_vals = init_data->tx_ln_vals[phy_t1][phy_t2][ssc];
+ if (tx_ln_vals) {
+ reg_pairs = tx_ln_vals->reg_pairs;
+ num_regs = tx_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
+
+ /* PMA RX lane registers configurations */
+ rx_ln_vals = init_data->rx_ln_vals[phy_t1][phy_t2][ssc];
+ if (rx_ln_vals) {
+ reg_pairs = rx_ln_vals->reg_pairs;
+ num_regs = rx_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off,
+ reg_pairs[j].val);
+ }
+ }
+
+ reset_control_deassert(cdns_phy->phys[node].lnk_rst);
+ }
+
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(cdns_phy->phy_rst);
+ if (ret)
+ return ret;
return 0;
}
static int cdns_torrent_phy_probe(struct platform_device *pdev)
{
- struct resource *regs;
struct cdns_torrent_phy *cdns_phy;
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
- const struct of_device_id *match;
- struct cdns_torrent_data *data;
+ const struct cdns_torrent_data *data;
struct device_node *child;
int ret, subnodes, node = 0, i;
+ u32 total_num_lanes = 0;
+ u8 init_dp_regmap = 0;
+ u32 phy_type;
/* Get init data for this PHY */
- match = of_match_device(cdns_torrent_phy_of_match, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct cdns_torrent_data *)match->data;
-
cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL);
if (!cdns_phy)
return -ENOMEM;
dev_set_drvdata(dev, cdns_phy);
cdns_phy->dev = dev;
+ cdns_phy->init_data = data;
cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0);
if (IS_ERR(cdns_phy->phy_rst)) {
@@ -1736,14 +2103,20 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
return PTR_ERR(cdns_phy->phy_rst);
}
+ cdns_phy->apb_rst = devm_reset_control_get_optional(dev, "torrent_apb");
+ if (IS_ERR(cdns_phy->apb_rst)) {
+ dev_err(dev, "%s: failed to get apb reset\n",
+ dev->of_node->full_name);
+ return PTR_ERR(cdns_phy->apb_rst);
+ }
+
cdns_phy->clk = devm_clk_get(dev, "refclk");
if (IS_ERR(cdns_phy->clk)) {
dev_err(dev, "phy ref clock not found\n");
return PTR_ERR(cdns_phy->clk);
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cdns_phy->sd_base = devm_ioremap_resource(&pdev->dev, regs);
+ cdns_phy->sd_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cdns_phy->sd_base))
return PTR_ERR(cdns_phy->sd_base);
@@ -1751,14 +2124,39 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
if (subnodes == 0) {
dev_err(dev, "No available link subnodes found\n");
return -EINVAL;
- } else if (subnodes != 1) {
- dev_err(dev, "Driver supports only one link subnode.\n");
+ }
+
+ ret = cdns_torrent_regmap_init(cdns_phy);
+ if (ret)
+ return ret;
+
+ ret = cdns_torrent_regfield_init(cdns_phy);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(cdns_phy->clk);
+ if (ret) {
+ dev_err(cdns_phy->dev, "Failed to prepare ref clock\n");
+ return ret;
+ }
+
+ cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk);
+ if (!(cdns_phy->ref_clk_rate)) {
+ dev_err(cdns_phy->dev, "Failed to get ref clock rate\n");
+ clk_disable_unprepare(cdns_phy->clk);
return -EINVAL;
}
+ /* Enable APB */
+ reset_control_deassert(cdns_phy->apb_rst);
+
for_each_available_child_of_node(dev->of_node, child) {
struct phy *gphy;
+ /* PHY subnode name must be 'phy'. */
+ if (!(of_node_name_eq(child, "phy")))
+ continue;
+
cdns_phy->phys[node].lnk_rst =
of_reset_control_array_get_exclusive(child);
if (IS_ERR(cdns_phy->phys[node].lnk_rst)) {
@@ -1776,27 +2174,57 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
goto put_child;
}
- if (cdns_phy->phys[node].mlane != 0) {
- dev_err(dev,
- "%s: Driver supports only lane-0 as master lane.\n",
+ if (of_property_read_u32(child, "cdns,phy-type", &phy_type)) {
+ dev_err(dev, "%s: No \"cdns,phy-type\"-property.\n",
child->full_name);
ret = -EINVAL;
goto put_child;
}
- if (of_property_read_u32(child, "cdns,phy-type",
- &cdns_phy->phys[node].phy_type)) {
- dev_err(dev, "%s: No \"cdns,phy-type\"-property.\n",
+ switch (phy_type) {
+ case PHY_TYPE_PCIE:
+ cdns_phy->phys[node].phy_type = TYPE_PCIE;
+ break;
+ case PHY_TYPE_DP:
+ cdns_phy->phys[node].phy_type = TYPE_DP;
+ break;
+ case PHY_TYPE_SGMII:
+ cdns_phy->phys[node].phy_type = TYPE_SGMII;
+ break;
+ case PHY_TYPE_QSGMII:
+ cdns_phy->phys[node].phy_type = TYPE_QSGMII;
+ break;
+ case PHY_TYPE_USB3:
+ cdns_phy->phys[node].phy_type = TYPE_USB;
+ break;
+ default:
+ dev_err(dev, "Unsupported protocol\n");
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ if (of_property_read_u32(child, "cdns,num-lanes",
+ &cdns_phy->phys[node].num_lanes)) {
+ dev_err(dev, "%s: No \"cdns,num-lanes\"-property.\n",
child->full_name);
ret = -EINVAL;
goto put_child;
}
- cdns_phy->phys[node].num_lanes = DEFAULT_NUM_LANES;
- of_property_read_u32(child, "cdns,num-lanes",
- &cdns_phy->phys[node].num_lanes);
+ total_num_lanes += cdns_phy->phys[node].num_lanes;
+
+ /* Get SSC mode */
+ cdns_phy->phys[node].ssc_mode = NO_SSC;
+ of_property_read_u32(child, "cdns,ssc-mode",
+ &cdns_phy->phys[node].ssc_mode);
+
+ gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops);
+ if (IS_ERR(gphy)) {
+ ret = PTR_ERR(gphy);
+ goto put_child;
+ }
- if (cdns_phy->phys[node].phy_type == PHY_TYPE_DP) {
+ if (cdns_phy->phys[node].phy_type == TYPE_DP) {
switch (cdns_phy->phys[node].num_lanes) {
case 1:
case 2:
@@ -1833,30 +2261,34 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
}
/* DPTX registers */
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- cdns_phy->base = devm_ioremap_resource(&pdev->dev,
- regs);
+ cdns_phy->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(cdns_phy->base)) {
ret = PTR_ERR(cdns_phy->base);
goto put_child;
}
- gphy = devm_phy_create(dev, child,
- &cdns_torrent_phy_ops);
- if (IS_ERR(gphy)) {
- ret = PTR_ERR(gphy);
- goto put_child;
+ if (!init_dp_regmap) {
+ ret = cdns_torrent_dp_regmap_init(cdns_phy);
+ if (ret)
+ goto put_child;
+
+ ret = cdns_torrent_dp_regfield_init(cdns_phy);
+ if (ret)
+ goto put_child;
+
+ init_dp_regmap++;
}
dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n",
cdns_phy->phys[node].num_lanes,
cdns_phy->max_bit_rate / 1000,
cdns_phy->max_bit_rate % 1000);
- } else {
- dev_err(dev, "Driver supports only PHY_TYPE_DP\n");
- ret = -ENOTSUPP;
- goto put_child;
+
+ gphy->attrs.bus_width = cdns_phy->phys[node].num_lanes;
+ gphy->attrs.max_link_rate = cdns_phy->max_bit_rate;
+ gphy->attrs.mode = PHY_MODE_DP;
}
+
cdns_phy->phys[node].phy = gphy;
phy_set_drvdata(gphy, &cdns_phy->phys[node]);
@@ -1864,16 +2296,16 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
}
cdns_phy->nsubnodes = node;
- ret = cdns_regmap_init_torrent_dp(cdns_phy, cdns_phy->sd_base,
- cdns_phy->base,
- data->block_offset_shift,
- data->reg_offset_shift);
- if (ret)
+ if (total_num_lanes > MAX_NUM_LANES) {
+ dev_err(dev, "Invalid lane configuration\n");
goto put_lnk_rst;
+ }
- ret = cdns_regfield_init(cdns_phy);
- if (ret)
- goto put_lnk_rst;
+ if (cdns_phy->nsubnodes > 1) {
+ ret = cdns_torrent_phy_configure_multilink(cdns_phy);
+ if (ret)
+ goto put_lnk_rst;
+ }
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
@@ -1889,6 +2321,8 @@ put_lnk_rst:
for (i = 0; i < node; i++)
reset_control_put(cdns_phy->phys[i].lnk_rst);
of_node_put(child);
+ reset_control_assert(cdns_phy->apb_rst);
+ clk_disable_unprepare(cdns_phy->clk);
return ret;
}
@@ -1898,22 +2332,1505 @@ static int cdns_torrent_phy_remove(struct platform_device *pdev)
int i;
reset_control_assert(cdns_phy->phy_rst);
+ reset_control_assert(cdns_phy->apb_rst);
for (i = 0; i < cdns_phy->nsubnodes; i++) {
reset_control_assert(cdns_phy->phys[i].lnk_rst);
reset_control_put(cdns_phy->phys[i].lnk_rst);
}
+ clk_disable_unprepare(cdns_phy->clk);
+
return 0;
}
+/* USB and SGMII/QSGMII link configuration */
+static struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = {
+ {0x0002, PHY_PLL_CFG},
+ {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0041, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = {
+ {0x0011, XCVR_DIAG_HSCLK_SEL},
+ {0x0003, XCVR_DIAG_HSCLK_DIV},
+ {0x009B, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals usb_sgmii_link_cmn_vals = {
+ .reg_pairs = usb_sgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_sgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = usb_sgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_sgmii_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = {
+ .reg_pairs = sgmii_usb_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_usb_xcvr_diag_ln_regs),
+};
+
+/* PCIe and USB Unique SSC link configuration */
+static struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = {
+ {0x0003, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
+ {0x8600, CMN_PDIAG_PLL1_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0012, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = {
+ {0x0011, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x00C9, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals pcie_usb_link_cmn_vals = {
+ .reg_pairs = pcie_usb_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_usb_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = {
+ .reg_pairs = pcie_usb_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(pcie_usb_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = {
+ .reg_pairs = usb_pcie_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_pcie_xcvr_diag_ln_regs),
+};
+
+/* USB 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M1},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0050, CMN_PLL0_INTDIV_M1},
+ {0x0064, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M1},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0036, CMN_PLL0_HIGH_THR_M1},
+ {0x0044, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M1},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M1},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M1},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x0058, CMN_PLL0_SS_CTRL3_M1},
+ {0x006E, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x0012, CMN_PLL0_SS_CTRL4_M1},
+ {0x000E, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
+ {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
+ {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
+};
+
+static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = {
+ .reg_pairs = usb_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_100_int_ssc_cmn_regs),
+};
+
+/* Single USB link configuration */
+static struct cdns_reg_pairs sl_usb_link_cmn_regs[] = {
+ {0x0000, PHY_PLL_CFG},
+ {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0041, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals sl_usb_link_cmn_vals = {
+ .reg_pairs = sl_usb_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usb_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = {
+ .reg_pairs = sl_usb_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_usb_xcvr_diag_ln_regs),
+};
+
+/* USB PHY PCS common configuration */
+static struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = {
+ {0x0A0A, PHY_PIPE_USB3_GEN2_PRE_CFG0},
+ {0x1000, PHY_PIPE_USB3_GEN2_POST_CFG0},
+ {0x0010, PHY_PIPE_USB3_GEN2_POST_CFG1}
+};
+
+static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = {
+ .reg_pairs = usb_phy_pcs_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_phy_pcs_cmn_regs),
+};
+
+/* USB 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = {
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
+ {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
+};
+
+static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = {
+ {0x02FF, TX_PSC_A0},
+ {0x06AF, TX_PSC_A1},
+ {0x06AE, TX_PSC_A2},
+ {0x06AE, TX_PSC_A3},
+ {0x2A82, TX_TXCC_CTRL},
+ {0x0014, TX_TXCC_CPOST_MULT_01},
+ {0x0003, XCVR_DIAG_PSC_OVRD}
+};
+
+static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = {
+ {0x0D1D, RX_PSC_A0},
+ {0x0D1D, RX_PSC_A1},
+ {0x0D00, RX_PSC_A2},
+ {0x0500, RX_PSC_A3},
+ {0x0013, RX_SIGDET_HL_FILT_TMR},
+ {0x0000, RX_REE_GCSM1_CTRL},
+ {0x0C02, RX_REE_ATTEN_THR},
+ {0x0330, RX_REE_SMGM_CTRL1},
+ {0x0300, RX_REE_SMGM_CTRL2},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x1004, RX_DIAG_SIGDET_TUNE},
+ {0x00F9, RX_DIAG_NQST_CTRL},
+ {0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0002, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0000, RX_DIAG_PI_CAP},
+ {0x0031, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG},
+ {0x0003, RX_CDRLF_CNFG3}
+};
+
+static struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = {
+ .reg_pairs = usb_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = usb_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = usb_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_100_no_ssc_rx_ln_regs),
+};
+
+/* Single link USB, 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0064, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0044, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x006E, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x000E, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
+ {0x8200, CMN_CDIAG_CDB_PWRI_OVRD},
+ {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}
+};
+
+static struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = {
+ .reg_pairs = sl_usb_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usb_100_int_ssc_cmn_regs),
+};
+
+/* PCIe and SGMII/QSGMII Unique SSC link configuration */
+static struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = {
+ {0x0003, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1},
+ {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0012, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = {
+ {0x0011, XCVR_DIAG_HSCLK_SEL},
+ {0x0003, XCVR_DIAG_HSCLK_DIV},
+ {0x009B, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = {
+ .reg_pairs = pcie_sgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_sgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = pcie_sgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(pcie_sgmii_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = {
+ .reg_pairs = sgmii_pcie_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_pcie_xcvr_diag_ln_regs),
+};
+
+/* SGMII 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = {
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x3700, CMN_DIAG_BIAS_OVRD1},
+ {0x0008, CMN_TXPUCAL_TUNE},
+ {0x0008, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x00B3, DRV_DIAG_TX_DRV}
+};
+
+static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = {
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x0098, RX_DIAG_NQST_CTRL},
+ {0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0000, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0000, RX_DIAG_PI_CAP},
+ {0x0010, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG},
+};
+
+static struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = {
+ .reg_pairs = sgmii_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = sgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = sgmii_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs),
+};
+
+/* SGMII 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M1},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0050, CMN_PLL0_INTDIV_M1},
+ {0x0064, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M1},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0036, CMN_PLL0_HIGH_THR_M1},
+ {0x0044, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M1},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M1},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M1},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x0058, CMN_PLL0_SS_CTRL3_M1},
+ {0x006E, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x0012, CMN_PLL0_SS_CTRL4_M1},
+ {0x000E, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR},
+ {0x3700, CMN_DIAG_BIAS_OVRD1},
+ {0x0008, CMN_TXPUCAL_TUNE},
+ {0x0008, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = {
+ .reg_pairs = sgmii_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sgmii_100_int_ssc_cmn_regs),
+};
+
+/* QSGMII 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = {
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = {
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0003, DRV_DIAG_TX_DRV}
+};
+
+static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = {
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x0098, RX_DIAG_NQST_CTRL},
+ {0x0C01, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0000, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0000, RX_DIAG_PI_CAP},
+ {0x0010, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG},
+};
+
+static struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs),
+};
+
+/* QSGMII 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M1},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0050, CMN_PLL0_INTDIV_M1},
+ {0x0064, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M1},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0036, CMN_PLL0_HIGH_THR_M1},
+ {0x0044, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M1},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M1},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M1},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x0058, CMN_PLL0_SS_CTRL3_M1},
+ {0x006E, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x0012, CMN_PLL0_SS_CTRL4_M1},
+ {0x000E, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
+};
+
+static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = {
+ .reg_pairs = qsgmii_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_int_ssc_cmn_regs),
+};
+
+/* Single SGMII/QSGMII link configuration */
+static struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = {
+ {0x0000, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0003, XCVR_DIAG_HSCLK_DIV},
+ {0x0013, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals sl_sgmii_link_cmn_vals = {
+ .reg_pairs = sl_sgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_sgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = sl_sgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_sgmii_xcvr_diag_ln_regs),
+};
+
+/* Multi link PCIe, 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M1},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0050, CMN_PLL0_INTDIV_M1},
+ {0x0064, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M1},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0036, CMN_PLL0_HIGH_THR_M1},
+ {0x0044, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M1},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M1},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M1},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x0058, CMN_PLL0_SS_CTRL3_M1},
+ {0x006E, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x0012, CMN_PLL0_SS_CTRL4_M1},
+ {0x000E, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
+};
+
+static struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = {
+ .reg_pairs = pcie_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_int_ssc_cmn_regs),
+};
+
+/* Single link PCIe, 100 MHz Ref clk, internal SSC */
+static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = {
+ {0x0004, CMN_PLL0_DSM_DIAG_M0},
+ {0x0004, CMN_PLL0_DSM_DIAG_M1},
+ {0x0004, CMN_PLL1_DSM_DIAG_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0},
+ {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1},
+ {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0},
+ {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1},
+ {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0},
+ {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1},
+ {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0},
+ {0x0064, CMN_PLL0_INTDIV_M0},
+ {0x0050, CMN_PLL0_INTDIV_M1},
+ {0x0050, CMN_PLL1_INTDIV_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M0},
+ {0x0002, CMN_PLL0_FRACDIVH_M1},
+ {0x0002, CMN_PLL1_FRACDIVH_M0},
+ {0x0044, CMN_PLL0_HIGH_THR_M0},
+ {0x0036, CMN_PLL0_HIGH_THR_M1},
+ {0x0036, CMN_PLL1_HIGH_THR_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M0},
+ {0x0002, CMN_PDIAG_PLL0_CTRL_M1},
+ {0x0002, CMN_PDIAG_PLL1_CTRL_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M0},
+ {0x0001, CMN_PLL0_SS_CTRL1_M1},
+ {0x0001, CMN_PLL1_SS_CTRL1_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M0},
+ {0x011B, CMN_PLL0_SS_CTRL2_M1},
+ {0x011B, CMN_PLL1_SS_CTRL2_M0},
+ {0x006E, CMN_PLL0_SS_CTRL3_M0},
+ {0x0058, CMN_PLL0_SS_CTRL3_M1},
+ {0x0058, CMN_PLL1_SS_CTRL3_M0},
+ {0x000E, CMN_PLL0_SS_CTRL4_M0},
+ {0x0012, CMN_PLL0_SS_CTRL4_M1},
+ {0x0012, CMN_PLL1_SS_CTRL4_M0},
+ {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x00C7, CMN_PLL0_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_REFCNT_START},
+ {0x00C7, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x00C7, CMN_PLL1_LOCK_PLLCNT_START},
+ {0x0005, CMN_PLL0_LOCK_PLLCNT_THR},
+ {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}
+};
+
+static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = {
+ .reg_pairs = sl_pcie_100_int_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_pcie_100_int_ssc_cmn_regs),
+};
+
+/* PCIe, 100 MHz Ref clk, no SSC & external SSC */
+static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = {
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL}
+};
+
+static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = {
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x0001, RX_DIAG_ACYA}
+};
+
+static struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = {
+ .reg_pairs = pcie_100_ext_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = pcie_100_ext_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs),
+};
+
static const struct cdns_torrent_data cdns_map_torrent = {
.block_offset_shift = 0x2,
.reg_offset_shift = 0x2,
+ .link_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ },
+ .xcvr_diag_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ },
+ },
+ },
+ .pcs_cmn_vals = {
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ },
+ },
+ .tx_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ },
+ },
+ .rx_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ },
+ },
};
static const struct cdns_torrent_data ti_j721e_map_torrent = {
.block_offset_shift = 0x0,
.reg_offset_shift = 0x1,
+ .link_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &sl_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_link_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_usb_link_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_sgmii_link_cmn_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
+ },
+ },
+ },
+ .xcvr_diag_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
+ },
+ },
+ },
+ .pcs_cmn_vals = {
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
+ [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
+ },
+ },
+ },
+ .tx_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = NULL,
+ [EXTERNAL_SSC] = NULL,
+ [INTERNAL_SSC] = NULL,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
+ },
+ },
+ .rx_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_PCIE] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_SGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
+ },
+ },
};
static const struct of_device_id cdns_torrent_phy_of_match[] = {
diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
index 0c4833da7be0..62d6d6849ad6 100644
--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
@@ -1,15 +1,20 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2017 NXP. */
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#define PHY_CTRL0 0x0
#define PHY_CTRL0_REF_SSP_EN BIT(2)
+#define PHY_CTRL0_FSEL_MASK GENMASK(10, 5)
+#define PHY_CTRL0_FSEL_24M 0x2a
#define PHY_CTRL1 0x4
#define PHY_CTRL1_RESET BIT(0)
@@ -20,6 +25,11 @@
#define PHY_CTRL2 0x8
#define PHY_CTRL2_TXENABLEN0 BIT(8)
+#define PHY_CTRL2_OTG_DISABLE BIT(9)
+
+#define PHY_CTRL6 0x18
+#define PHY_CTRL6_ALT_CLK_EN BIT(1)
+#define PHY_CTRL6_ALT_CLK_SEL BIT(0)
struct imx8mq_usb_phy {
struct phy *phy;
@@ -54,6 +64,44 @@ static int imx8mq_usb_phy_init(struct phy *phy)
return 0;
}
+static int imx8mp_usb_phy_init(struct phy *phy)
+{
+ struct imx8mq_usb_phy *imx_phy = phy_get_drvdata(phy);
+ u32 value;
+
+ /* USB3.0 PHY signal fsel for 24M ref */
+ value = readl(imx_phy->base + PHY_CTRL0);
+ value &= ~PHY_CTRL0_FSEL_MASK;
+ value |= FIELD_PREP(PHY_CTRL0_FSEL_MASK, PHY_CTRL0_FSEL_24M);
+ writel(value, imx_phy->base + PHY_CTRL0);
+
+ /* Disable alt_clk_en and use internal MPLL clocks */
+ value = readl(imx_phy->base + PHY_CTRL6);
+ value &= ~(PHY_CTRL6_ALT_CLK_SEL | PHY_CTRL6_ALT_CLK_EN);
+ writel(value, imx_phy->base + PHY_CTRL6);
+
+ value = readl(imx_phy->base + PHY_CTRL1);
+ value &= ~(PHY_CTRL1_VDATSRCENB0 | PHY_CTRL1_VDATDETENB0);
+ value |= PHY_CTRL1_RESET | PHY_CTRL1_ATERESET;
+ writel(value, imx_phy->base + PHY_CTRL1);
+
+ value = readl(imx_phy->base + PHY_CTRL0);
+ value |= PHY_CTRL0_REF_SSP_EN;
+ writel(value, imx_phy->base + PHY_CTRL0);
+
+ value = readl(imx_phy->base + PHY_CTRL2);
+ value |= PHY_CTRL2_TXENABLEN0 | PHY_CTRL2_OTG_DISABLE;
+ writel(value, imx_phy->base + PHY_CTRL2);
+
+ udelay(10);
+
+ value = readl(imx_phy->base + PHY_CTRL1);
+ value &= ~(PHY_CTRL1_RESET | PHY_CTRL1_ATERESET);
+ writel(value, imx_phy->base + PHY_CTRL1);
+
+ return 0;
+}
+
static int imx8mq_phy_power_on(struct phy *phy)
{
struct imx8mq_usb_phy *imx_phy = phy_get_drvdata(phy);
@@ -76,19 +124,36 @@ static int imx8mq_phy_power_off(struct phy *phy)
return 0;
}
-static struct phy_ops imx8mq_usb_phy_ops = {
+static const struct phy_ops imx8mq_usb_phy_ops = {
.init = imx8mq_usb_phy_init,
.power_on = imx8mq_phy_power_on,
.power_off = imx8mq_phy_power_off,
.owner = THIS_MODULE,
};
+static struct phy_ops imx8mp_usb_phy_ops = {
+ .init = imx8mp_usb_phy_init,
+ .power_on = imx8mq_phy_power_on,
+ .power_off = imx8mq_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id imx8mq_usb_phy_of_match[] = {
+ {.compatible = "fsl,imx8mq-usb-phy",
+ .data = &imx8mq_usb_phy_ops,},
+ {.compatible = "fsl,imx8mp-usb-phy",
+ .data = &imx8mp_usb_phy_ops,},
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx8mq_usb_phy_of_match);
+
static int imx8mq_usb_phy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct imx8mq_usb_phy *imx_phy;
struct resource *res;
+ const struct phy_ops *phy_ops;
imx_phy = devm_kzalloc(dev, sizeof(*imx_phy), GFP_KERNEL);
if (!imx_phy)
@@ -105,7 +170,11 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev)
if (IS_ERR(imx_phy->base))
return PTR_ERR(imx_phy->base);
- imx_phy->phy = devm_phy_create(dev, NULL, &imx8mq_usb_phy_ops);
+ phy_ops = of_device_get_match_data(dev);
+ if (!phy_ops)
+ return -EINVAL;
+
+ imx_phy->phy = devm_phy_create(dev, NULL, phy_ops);
if (IS_ERR(imx_phy->phy))
return PTR_ERR(imx_phy->phy);
@@ -120,12 +189,6 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(phy_provider);
}
-static const struct of_device_id imx8mq_usb_phy_of_match[] = {
- {.compatible = "fsl,imx8mq-usb-phy",},
- { },
-};
-MODULE_DEVICE_TABLE(of, imx8mq_usb_phy_of_match);
-
static struct platform_driver imx8mq_usb_phy_driver = {
.probe = imx8mq_usb_phy_probe,
.driver = {
diff --git a/drivers/phy/hisilicon/phy-hi3660-usb3.c b/drivers/phy/hisilicon/phy-hi3660-usb3.c
index cc0af2c044d0..84adce9b4277 100644
--- a/drivers/phy/hisilicon/phy-hi3660-usb3.c
+++ b/drivers/phy/hisilicon/phy-hi3660-usb3.c
@@ -161,7 +161,7 @@ out:
return ret;
}
-static struct phy_ops hi3660_phy_ops = {
+static const struct phy_ops hi3660_phy_ops = {
.init = hi3660_phy_init,
.exit = hi3660_phy_exit,
.owner = THIS_MODULE,
diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig
index 7b47682a4e0e..58ec695c92ec 100644
--- a/drivers/phy/intel/Kconfig
+++ b/drivers/phy/intel/Kconfig
@@ -1,9 +1,21 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Phy drivers for Intel Lightning Mountain(LGM) platform
+# Phy drivers for Intel platforms
#
-config PHY_INTEL_COMBO
- bool "Intel ComboPHY driver"
+config PHY_INTEL_KEEMBAY_EMMC
+ tristate "Intel Keem Bay EMMC PHY driver"
+ depends on (OF && ARM64) || COMPILE_TEST
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ select REGMAP_MMIO
+ help
+ Choose this option if you have an Intel Keem Bay SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called phy-keembay-emmc.ko.
+
+config PHY_INTEL_LGM_COMBO
+ bool "Intel Lightning Mountain ComboPHY driver"
depends on X86 || COMPILE_TEST
depends on OF && HAS_IOMEM
select MFD_SYSCON
@@ -16,8 +28,8 @@ config PHY_INTEL_COMBO
chipsets which provides PHYs for various controllers, EMAC,
SATA and PCIe.
-config PHY_INTEL_EMMC
- tristate "Intel EMMC PHY driver"
+config PHY_INTEL_LGM_EMMC
+ tristate "Intel Lightning Mountain EMMC PHY driver"
depends on X86 || COMPILE_TEST
select GENERIC_PHY
help
diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile
index 233d530dadde..a5e0af5ccd75 100644
--- a/drivers/phy/intel/Makefile
+++ b/drivers/phy/intel/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PHY_INTEL_COMBO) += phy-intel-combo.o
-obj-$(CONFIG_PHY_INTEL_EMMC) += phy-intel-emmc.o
+obj-$(CONFIG_PHY_INTEL_KEEMBAY_EMMC) += phy-intel-keembay-emmc.o
+obj-$(CONFIG_PHY_INTEL_LGM_COMBO) += phy-intel-lgm-combo.o
+obj-$(CONFIG_PHY_INTEL_LGM_EMMC) += phy-intel-lgm-emmc.o
diff --git a/drivers/phy/intel/phy-intel-keembay-emmc.c b/drivers/phy/intel/phy-intel-keembay-emmc.c
new file mode 100644
index 000000000000..eb7c635ed89a
--- /dev/null
+++ b/drivers/phy/intel/phy-intel-keembay-emmc.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay eMMC PHY driver
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* eMMC/SD/SDIO core/phy configuration registers */
+#define PHY_CFG_0 0x24
+#define SEL_DLY_TXCLK_MASK BIT(29)
+#define OTAP_DLY_ENA_MASK BIT(27)
+#define OTAP_DLY_SEL_MASK GENMASK(26, 23)
+#define DLL_EN_MASK BIT(10)
+#define PWR_DOWN_MASK BIT(0)
+
+#define PHY_CFG_2 0x2c
+#define SEL_FREQ_MASK GENMASK(12, 10)
+
+#define PHY_STAT 0x40
+#define CAL_DONE_MASK BIT(6)
+#define IS_CALDONE(x) ((x) & CAL_DONE_MASK)
+#define DLL_RDY_MASK BIT(5)
+#define IS_DLLRDY(x) ((x) & DLL_RDY_MASK)
+
+/* From ACS_eMMC51_16nFFC_RO1100_Userguide_v1p0.pdf p17 */
+#define FREQSEL_200M_170M 0x0
+#define FREQSEL_170M_140M 0x1
+#define FREQSEL_140M_110M 0x2
+#define FREQSEL_110M_80M 0x3
+#define FREQSEL_80M_50M 0x4
+
+struct keembay_emmc_phy {
+ struct regmap *syscfg;
+ struct clk *emmcclk;
+};
+
+static const struct regmap_config keembay_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int keembay_emmc_phy_power(struct phy *phy, bool on_off)
+{
+ struct keembay_emmc_phy *priv = phy_get_drvdata(phy);
+ unsigned int caldone;
+ unsigned int dllrdy;
+ unsigned int freqsel;
+ unsigned int mhz;
+ int ret;
+
+ /*
+ * Keep phyctrl_pdb and phyctrl_endll low to allow
+ * initialization of CALIO state M/C DFFs
+ */
+ ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, PWR_DOWN_MASK,
+ FIELD_PREP(PWR_DOWN_MASK, 0));
+ if (ret) {
+ dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, DLL_EN_MASK,
+ FIELD_PREP(DLL_EN_MASK, 0));
+ if (ret) {
+ dev_err(&phy->dev, "turn off the dll failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Already finish power off above */
+ if (!on_off)
+ return 0;
+
+ mhz = DIV_ROUND_CLOSEST(clk_get_rate(priv->emmcclk), 1000000);
+ if (mhz <= 200 && mhz >= 170)
+ freqsel = FREQSEL_200M_170M;
+ else if (mhz <= 170 && mhz >= 140)
+ freqsel = FREQSEL_170M_140M;
+ else if (mhz <= 140 && mhz >= 110)
+ freqsel = FREQSEL_140M_110M;
+ else if (mhz <= 110 && mhz >= 80)
+ freqsel = FREQSEL_110M_80M;
+ else if (mhz <= 80 && mhz >= 50)
+ freqsel = FREQSEL_80M_50M;
+ else
+ freqsel = 0x0;
+
+ if (mhz < 50 || mhz > 200)
+ dev_warn(&phy->dev, "Unsupported rate: %d MHz\n", mhz);
+
+ /*
+ * According to the user manual, calpad calibration
+ * cycle takes more than 2us without the minimal recommended
+ * value, so we may need a little margin here
+ */
+ udelay(5);
+
+ ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, PWR_DOWN_MASK,
+ FIELD_PREP(PWR_DOWN_MASK, 1));
+ if (ret) {
+ dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * According to the user manual, it asks driver to wait 5us for
+ * calpad busy trimming. However it is documented that this value is
+ * PVT(A.K.A. process, voltage and temperature) relevant, so some
+ * failure cases are found which indicates we should be more tolerant
+ * to calpad busy trimming.
+ */
+ ret = regmap_read_poll_timeout(priv->syscfg, PHY_STAT,
+ caldone, IS_CALDONE(caldone),
+ 0, 50);
+ if (ret) {
+ dev_err(&phy->dev, "caldone failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Set the frequency of the DLL operation */
+ ret = regmap_update_bits(priv->syscfg, PHY_CFG_2, SEL_FREQ_MASK,
+ FIELD_PREP(SEL_FREQ_MASK, freqsel));
+ if (ret) {
+ dev_err(&phy->dev, "set the frequency of dll failed:%d\n", ret);
+ return ret;
+ }
+
+ /* Turn on the DLL */
+ ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, DLL_EN_MASK,
+ FIELD_PREP(DLL_EN_MASK, 1));
+ if (ret) {
+ dev_err(&phy->dev, "turn on the dll failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * We turned on the DLL even though the rate was 0 because we the
+ * clock might be turned on later. ...but we can't wait for the DLL
+ * to lock when the rate is 0 because it will never lock with no
+ * input clock.
+ *
+ * Technically we should be checking the lock later when the clock
+ * is turned on, but for now we won't.
+ */
+ if (mhz == 0)
+ return 0;
+
+ /*
+ * After enabling analog DLL circuits docs say that we need 10.2 us if
+ * our source clock is at 50 MHz and that lock time scales linearly
+ * with clock speed. If we are powering on the PHY and the card clock
+ * is super slow (like 100kHz) this could take as long as 5.1 ms as
+ * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms
+ * hopefully we won't be running at 100 kHz, but we should still make
+ * sure we wait long enough.
+ *
+ * NOTE: There appear to be corner cases where the DLL seems to take
+ * extra long to lock for reasons that aren't understood. In some
+ * extreme cases we've seen it take up to over 10ms (!). We'll be
+ * generous and give it 50ms.
+ */
+ ret = regmap_read_poll_timeout(priv->syscfg, PHY_STAT,
+ dllrdy, IS_DLLRDY(dllrdy),
+ 0, 50 * USEC_PER_MSEC);
+ if (ret)
+ dev_err(&phy->dev, "dllrdy failed, ret=%d\n", ret);
+
+ return ret;
+}
+
+static int keembay_emmc_phy_init(struct phy *phy)
+{
+ struct keembay_emmc_phy *priv = phy_get_drvdata(phy);
+
+ /*
+ * We purposely get the clock here and not in probe to avoid the
+ * circular dependency problem. We expect:
+ * - PHY driver to probe
+ * - SDHCI driver to start probe
+ * - SDHCI driver to register it's clock
+ * - SDHCI driver to get the PHY
+ * - SDHCI driver to init the PHY
+ *
+ * The clock is optional, so upon any error just return it like
+ * any other error to user.
+ */
+ priv->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
+
+ return PTR_ERR_OR_ZERO(priv->emmcclk);
+}
+
+static int keembay_emmc_phy_exit(struct phy *phy)
+{
+ struct keembay_emmc_phy *priv = phy_get_drvdata(phy);
+
+ clk_put(priv->emmcclk);
+
+ return 0;
+};
+
+static int keembay_emmc_phy_power_on(struct phy *phy)
+{
+ struct keembay_emmc_phy *priv = phy_get_drvdata(phy);
+ int ret;
+
+ /* Delay chain based txclk: enable */
+ ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, SEL_DLY_TXCLK_MASK,
+ FIELD_PREP(SEL_DLY_TXCLK_MASK, 1));
+ if (ret) {
+ dev_err(&phy->dev, "ERROR: delay chain txclk set: %d\n", ret);
+ return ret;
+ }
+
+ /* Output tap delay: enable */
+ ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, OTAP_DLY_ENA_MASK,
+ FIELD_PREP(OTAP_DLY_ENA_MASK, 1));
+ if (ret) {
+ dev_err(&phy->dev, "ERROR: output tap delay set: %d\n", ret);
+ return ret;
+ }
+
+ /* Output tap delay */
+ ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, OTAP_DLY_SEL_MASK,
+ FIELD_PREP(OTAP_DLY_SEL_MASK, 2));
+ if (ret) {
+ dev_err(&phy->dev, "ERROR: output tap delay select: %d\n", ret);
+ return ret;
+ }
+
+ /* Power up eMMC phy analog blocks */
+ return keembay_emmc_phy_power(phy, true);
+}
+
+static int keembay_emmc_phy_power_off(struct phy *phy)
+{
+ /* Power down eMMC phy analog blocks */
+ return keembay_emmc_phy_power(phy, false);
+}
+
+static const struct phy_ops ops = {
+ .init = keembay_emmc_phy_init,
+ .exit = keembay_emmc_phy_exit,
+ .power_on = keembay_emmc_phy_power_on,
+ .power_off = keembay_emmc_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int keembay_emmc_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct keembay_emmc_phy *priv;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ void __iomem *base;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->syscfg = devm_regmap_init_mmio(dev, base, &keembay_regmap_config);
+ if (IS_ERR(priv->syscfg))
+ return PTR_ERR(priv->syscfg);
+
+ generic_phy = devm_phy_create(dev, np, &ops);
+ if (IS_ERR(generic_phy))
+ return dev_err_probe(dev, PTR_ERR(generic_phy),
+ "failed to create PHY\n");
+
+ phy_set_drvdata(generic_phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id keembay_emmc_phy_dt_ids[] = {
+ { .compatible = "intel,keembay-emmc-phy" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, keembay_emmc_phy_dt_ids);
+
+static struct platform_driver keembay_emmc_phy_driver = {
+ .probe = keembay_emmc_phy_probe,
+ .driver = {
+ .name = "keembay-emmc-phy",
+ .of_match_table = keembay_emmc_phy_dt_ids,
+ },
+};
+module_platform_driver(keembay_emmc_phy_driver);
+
+MODULE_AUTHOR("Wan Ahmad Zainie <wan.ahmad.zainie.wan.mohamad@intel.com>");
+MODULE_DESCRIPTION("Intel Keem Bay eMMC PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/intel/phy-intel-combo.c b/drivers/phy/intel/phy-intel-lgm-combo.c
index 360b1eb2ebd6..360b1eb2ebd6 100644
--- a/drivers/phy/intel/phy-intel-combo.c
+++ b/drivers/phy/intel/phy-intel-lgm-combo.c
diff --git a/drivers/phy/intel/phy-intel-emmc.c b/drivers/phy/intel/phy-intel-lgm-emmc.c
index 703aeb122541..703aeb122541 100644
--- a/drivers/phy/intel/phy-intel-emmc.c
+++ b/drivers/phy/intel/phy-intel-lgm-emmc.c
diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
index be09b1530ae6..a7d126192cf1 100644
--- a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
+++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
@@ -141,7 +141,7 @@ static int ltq_rcu_usb2_phy_power_off(struct phy *phy)
return 0;
}
-static struct phy_ops ltq_rcu_usb2_phy_ops = {
+static const struct phy_ops ltq_rcu_usb2_phy_ops = {
.init = ltq_rcu_usb2_phy_init,
.power_on = ltq_rcu_usb2_phy_power_on,
.power_off = ltq_rcu_usb2_phy_power_off,
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
index 2ff9a48d833e..22c5698123cf 100644
--- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -349,7 +349,7 @@ static int ltq_vrx200_pcie_phy_power_off(struct phy *phy)
return 0;
}
-static struct phy_ops ltq_vrx200_pcie_phy_ops = {
+static const struct phy_ops ltq_vrx200_pcie_phy_ops = {
.init = ltq_vrx200_pcie_phy_init,
.exit = ltq_vrx200_pcie_phy_exit,
.power_on = ltq_vrx200_pcie_phy_power_on,
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index 1a138be8bd6a..810f25a47632 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -26,7 +26,6 @@
#define COMPHY_SIP_POWER_ON 0x82000001
#define COMPHY_SIP_POWER_OFF 0x82000002
#define COMPHY_SIP_PLL_LOCK 0x82000003
-#define COMPHY_FW_NOT_SUPPORTED (-1)
#define COMPHY_FW_MODE_SATA 0x1
#define COMPHY_FW_MODE_SGMII 0x2
@@ -112,10 +111,19 @@ static int mvebu_a3700_comphy_smc(unsigned long function, unsigned long lane,
unsigned long mode)
{
struct arm_smccc_res res;
+ s32 ret;
arm_smccc_smc(function, lane, mode, 0, 0, 0, 0, 0, &res);
+ ret = res.a0;
- return res.a0;
+ switch (ret) {
+ case SMCCC_RET_SUCCESS:
+ return 0;
+ case SMCCC_RET_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
}
static int mvebu_a3700_comphy_get_fw_mode(int lane, int port,
@@ -220,7 +228,7 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
}
ret = mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param);
- if (ret == COMPHY_FW_NOT_SUPPORTED)
+ if (ret == -EOPNOTSUPP)
dev_err(lane->dev,
"unsupported SMC call, try updating your firmware\n");
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index e41367f36ee1..53ad127b100f 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -123,7 +123,6 @@
#define COMPHY_SIP_POWER_ON 0x82000001
#define COMPHY_SIP_POWER_OFF 0x82000002
-#define COMPHY_FW_NOT_SUPPORTED (-1)
/*
* A lane is described by the following bitfields:
@@ -273,10 +272,19 @@ static int mvebu_comphy_smc(unsigned long function, unsigned long phys,
unsigned long lane, unsigned long mode)
{
struct arm_smccc_res res;
+ s32 ret;
arm_smccc_smc(function, phys, lane, mode, 0, 0, 0, 0, &res);
+ ret = res.a0;
- return res.a0;
+ switch (ret) {
+ case SMCCC_RET_SUCCESS:
+ return 0;
+ case SMCCC_RET_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
}
static int mvebu_comphy_get_mode(bool fw_mode, int lane, int port,
@@ -819,7 +827,7 @@ static int mvebu_comphy_power_on(struct phy *phy)
if (!ret)
return ret;
- if (ret == COMPHY_FW_NOT_SUPPORTED)
+ if (ret == -EOPNOTSUPP)
dev_err(priv->dev,
"unsupported SMC call, try updating your firmware\n");
diff --git a/drivers/phy/marvell/phy-pxa-28nm-hsic.c b/drivers/phy/marvell/phy-pxa-28nm-hsic.c
index ae8370af59c0..31b43d2ee39a 100644
--- a/drivers/phy/marvell/phy-pxa-28nm-hsic.c
+++ b/drivers/phy/marvell/phy-pxa-28nm-hsic.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/module.h>
@@ -44,15 +45,12 @@ struct mv_hsic_phy {
struct clk *clk;
};
-static bool wait_for_reg(void __iomem *reg, u32 mask, unsigned long timeout)
+static int wait_for_reg(void __iomem *reg, u32 mask, u32 ms)
{
- timeout += jiffies;
- while (time_is_after_eq_jiffies(timeout)) {
- if ((readl(reg) & mask) == mask)
- return true;
- msleep(1);
- }
- return false;
+ u32 val;
+
+ return readl_poll_timeout(reg, val, ((val & mask) == mask),
+ 1000, 1000 * ms);
}
static int mv_hsic_phy_init(struct phy *phy)
@@ -60,6 +58,7 @@ static int mv_hsic_phy_init(struct phy *phy)
struct mv_hsic_phy *mv_phy = phy_get_drvdata(phy);
struct platform_device *pdev = mv_phy->pdev;
void __iomem *base = mv_phy->base;
+ int ret;
clk_prepare_enable(mv_phy->clk);
@@ -75,14 +74,14 @@ static int mv_hsic_phy_init(struct phy *phy)
base + PHY_28NM_HSIC_PLL_CTRL2);
/* Make sure PHY PLL is locked */
- if (!wait_for_reg(base + PHY_28NM_HSIC_PLL_CTRL2,
- PHY_28NM_HSIC_H2S_PLL_LOCK, HZ / 10)) {
+ ret = wait_for_reg(base + PHY_28NM_HSIC_PLL_CTRL2,
+ PHY_28NM_HSIC_H2S_PLL_LOCK, 100);
+ if (ret) {
dev_err(&pdev->dev, "HSIC PHY PLL not locked after 100mS.");
clk_disable_unprepare(mv_phy->clk);
- return -ETIMEDOUT;
}
- return 0;
+ return ret;
}
static int mv_hsic_phy_power_on(struct phy *phy)
@@ -91,6 +90,7 @@ static int mv_hsic_phy_power_on(struct phy *phy)
struct platform_device *pdev = mv_phy->pdev;
void __iomem *base = mv_phy->base;
u32 reg;
+ int ret;
reg = readl(base + PHY_28NM_HSIC_CTRL);
/* Avoid SE0 state when resume for some device will take it as reset */
@@ -108,20 +108,20 @@ static int mv_hsic_phy_power_on(struct phy *phy)
*/
/* Make sure PHY Calibration is ready */
- if (!wait_for_reg(base + PHY_28NM_HSIC_IMPCAL_CAL,
- PHY_28NM_HSIC_H2S_IMPCAL_DONE, HZ / 10)) {
+ ret = wait_for_reg(base + PHY_28NM_HSIC_IMPCAL_CAL,
+ PHY_28NM_HSIC_H2S_IMPCAL_DONE, 100);
+ if (ret) {
dev_warn(&pdev->dev, "HSIC PHY READY not set after 100mS.");
- return -ETIMEDOUT;
+ return ret;
}
/* Waiting for HSIC connect int*/
- if (!wait_for_reg(base + PHY_28NM_HSIC_INT,
- PHY_28NM_HSIC_CONNECT_INT, HZ / 5)) {
+ ret = wait_for_reg(base + PHY_28NM_HSIC_INT,
+ PHY_28NM_HSIC_CONNECT_INT, 200);
+ if (ret)
dev_warn(&pdev->dev, "HSIC wait for connect interrupt timeout.");
- return -ETIMEDOUT;
- }
- return 0;
+ return ret;
}
static int mv_hsic_phy_power_off(struct phy *phy)
diff --git a/drivers/phy/marvell/phy-pxa-28nm-usb2.c b/drivers/phy/marvell/phy-pxa-28nm-usb2.c
index 9fd881787fa6..a175ae915f02 100644
--- a/drivers/phy/marvell/phy-pxa-28nm-usb2.c
+++ b/drivers/phy/marvell/phy-pxa-28nm-usb2.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/module.h>
@@ -138,15 +139,12 @@ struct mv_usb2_phy {
struct clk *clk;
};
-static bool wait_for_reg(void __iomem *reg, u32 mask, unsigned long timeout)
+static int wait_for_reg(void __iomem *reg, u32 mask, u32 ms)
{
- timeout += jiffies;
- while (time_is_after_eq_jiffies(timeout)) {
- if ((readl(reg) & mask) == mask)
- return true;
- msleep(1);
- }
- return false;
+ u32 val;
+
+ return readl_poll_timeout(reg, val, ((val & mask) == mask),
+ 1000, 1000 * ms);
}
static int mv_usb2_phy_28nm_init(struct phy *phy)
@@ -208,24 +206,23 @@ static int mv_usb2_phy_28nm_init(struct phy *phy)
*/
/* Make sure PHY Calibration is ready */
- if (!wait_for_reg(base + PHY_28NM_CAL_REG,
- PHY_28NM_PLL_PLLCAL_DONE | PHY_28NM_PLL_IMPCAL_DONE,
- HZ / 10)) {
+ ret = wait_for_reg(base + PHY_28NM_CAL_REG,
+ PHY_28NM_PLL_PLLCAL_DONE | PHY_28NM_PLL_IMPCAL_DONE,
+ 100);
+ if (ret) {
dev_warn(&pdev->dev, "USB PHY PLL calibrate not done after 100mS.");
- ret = -ETIMEDOUT;
goto err_clk;
}
- if (!wait_for_reg(base + PHY_28NM_RX_REG1,
- PHY_28NM_RX_SQCAL_DONE, HZ / 10)) {
+ ret = wait_for_reg(base + PHY_28NM_RX_REG1,
+ PHY_28NM_RX_SQCAL_DONE, 100);
+ if (ret) {
dev_warn(&pdev->dev, "USB PHY RX SQ calibrate not done after 100mS.");
- ret = -ETIMEDOUT;
goto err_clk;
}
/* Make sure PHY PLL is ready */
- if (!wait_for_reg(base + PHY_28NM_PLL_REG0,
- PHY_28NM_PLL_READY, HZ / 10)) {
+ ret = wait_for_reg(base + PHY_28NM_PLL_REG0, PHY_28NM_PLL_READY, 100);
+ if (ret) {
dev_warn(&pdev->dev, "PLL_READY not set after 100mS.");
- ret = -ETIMEDOUT;
goto err_clk;
}
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index dee757c957f2..50c5e9306e19 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -35,3 +35,10 @@ config PHY_MTK_XSPHY
Enable this to support the SuperSpeedPlus XS-PHY transceiver for
USB3.1 GEN2 controllers on MediaTek chips. The driver supports
multiple USB2.0, USB3.1 GEN2 ports.
+
+config PHY_MTK_HDMI
+ tristate "MediaTek HDMI-PHY Driver"
+ depends on ARCH_MEDIATEK && OF
+ select GENERIC_PHY
+ help
+ Support HDMI PHY for Mediatek SoCs.
diff --git a/drivers/phy/mediatek/Makefile b/drivers/phy/mediatek/Makefile
index 08a8e6a97b1e..6325e38709ed 100644
--- a/drivers/phy/mediatek/Makefile
+++ b/drivers/phy/mediatek/Makefile
@@ -6,3 +6,8 @@
obj-$(CONFIG_PHY_MTK_TPHY) += phy-mtk-tphy.o
obj-$(CONFIG_PHY_MTK_UFS) += phy-mtk-ufs.o
obj-$(CONFIG_PHY_MTK_XSPHY) += phy-mtk-xsphy.o
+
+phy-mtk-hdmi-drv-y := phy-mtk-hdmi.o
+phy-mtk-hdmi-drv-y += phy-mtk-hdmi-mt2701.o
+phy-mtk-hdmi-drv-y += phy-mtk-hdmi-mt8173.o
+obj-$(CONFIG_PHY_MTK_HDMI) += phy-mtk-hdmi-drv.o
diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c
index d3cc4022e988..b74c65a1762c 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c
@@ -4,7 +4,7 @@
* Author: Chunhui Dai <chunhui.dai@mediatek.com>
*/
-#include "mtk_hdmi_phy.h"
+#include "phy-mtk-hdmi.h"
#define HDMI_CON0 0x00
#define RG_HDMITX_DRV_IBIAS 0
@@ -237,8 +237,8 @@ static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
}
struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
- .tz_disabled = true,
.flags = CLK_SET_RATE_GATE,
+ .pll_default_off = true,
.hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
.hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
.hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c
index 827b93786fac..6cdfdf5a698a 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c
@@ -4,7 +4,7 @@
* Author: Jie Qiu <jie.qiu@mediatek.com>
*/
-#include "mtk_hdmi_phy.h"
+#include "phy-mtk-hdmi.h"
#define HDMI_CON0 0x00
#define RG_HDMITX_PLL_EN BIT(31)
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
index 5223498502c4..47c029d4b270 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.c
@@ -4,7 +4,7 @@
* Author: Jie Qiu <jie.qiu@mediatek.com>
*/
-#include "mtk_hdmi_phy.h"
+#include "phy-mtk-hdmi.h"
static int mtk_hdmi_phy_power_on(struct phy *phy);
static int mtk_hdmi_phy_power_off(struct phy *phy);
@@ -184,6 +184,9 @@ static int mtk_hdmi_phy_probe(struct platform_device *pdev)
return PTR_ERR(phy_provider);
}
+ if (hdmi_phy->conf->pll_default_off)
+ hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
+
return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
hdmi_phy->pll);
}
@@ -205,6 +208,7 @@ struct platform_driver mtk_hdmi_phy_driver = {
.of_match_table = mtk_hdmi_phy_match,
},
};
+module_platform_driver(mtk_hdmi_phy_driver);
MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/phy/mediatek/phy-mtk-hdmi.h
index 2d8b3182470d..dcf9bb13699b 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.h
@@ -20,8 +20,8 @@
struct mtk_hdmi_phy;
struct mtk_hdmi_phy_conf {
- bool tz_disabled;
unsigned long flags;
+ bool pll_default_off;
const struct clk_ops *hdmi_phy_clk_ops;
void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
@@ -50,7 +50,6 @@ void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
u32 val, u32 mask);
struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
-extern struct platform_driver mtk_hdmi_phy_driver;
extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf;
diff --git a/drivers/phy/phy-lgm-usb.c b/drivers/phy/phy-lgm-usb.c
new file mode 100644
index 000000000000..309c8f0e0724
--- /dev/null
+++ b/drivers/phy/phy-lgm-usb.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel LGM USB PHY driver
+ *
+ * Copyright (C) 2020 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/usb/phy.h>
+#include <linux/workqueue.h>
+
+#define CTRL1_OFFSET 0x14
+#define SRAM_EXT_LD_DONE BIT(25)
+#define SRAM_INIT_DONE BIT(26)
+
+#define TCPC_OFFSET 0x1014
+#define TCPC_MUX_CTL GENMASK(1, 0)
+#define MUX_NC 0
+#define MUX_USB 1
+#define MUX_DP 2
+#define MUX_USBDP 3
+#define TCPC_FLIPPED BIT(2)
+#define TCPC_LOW_POWER_EN BIT(3)
+#define TCPC_VALID BIT(4)
+#define TCPC_CONN \
+ (TCPC_VALID | FIELD_PREP(TCPC_MUX_CTL, MUX_USB))
+#define TCPC_DISCONN \
+ (TCPC_VALID | FIELD_PREP(TCPC_MUX_CTL, MUX_NC) | TCPC_LOW_POWER_EN)
+
+static const char *const PHY_RESETS[] = { "phy31", "phy", };
+static const char *const CTL_RESETS[] = { "apb", "ctrl", };
+
+struct tca_apb {
+ struct reset_control *resets[ARRAY_SIZE(PHY_RESETS)];
+ struct regulator *vbus;
+ struct work_struct wk;
+ struct usb_phy phy;
+
+ bool regulator_enabled;
+ bool phy_initialized;
+ bool connected;
+};
+
+static int get_flipped(struct tca_apb *ta, bool *flipped)
+{
+ union extcon_property_value property;
+ int ret;
+
+ ret = extcon_get_property(ta->phy.edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY, &property);
+ if (ret) {
+ dev_err(ta->phy.dev, "no polarity property from extcon\n");
+ return ret;
+ }
+
+ *flipped = property.intval;
+
+ return 0;
+}
+
+static int phy_init(struct usb_phy *phy)
+{
+ struct tca_apb *ta = container_of(phy, struct tca_apb, phy);
+ void __iomem *ctrl1 = phy->io_priv + CTRL1_OFFSET;
+ int val, ret, i;
+
+ if (ta->phy_initialized)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(PHY_RESETS); i++)
+ reset_control_deassert(ta->resets[i]);
+
+ ret = readl_poll_timeout(ctrl1, val, val & SRAM_INIT_DONE, 10, 10 * 1000);
+ if (ret) {
+ dev_err(ta->phy.dev, "SRAM init failed, 0x%x\n", val);
+ return ret;
+ }
+
+ writel(readl(ctrl1) | SRAM_EXT_LD_DONE, ctrl1);
+
+ ta->phy_initialized = true;
+ if (!ta->phy.edev) {
+ writel(TCPC_CONN, ta->phy.io_priv + TCPC_OFFSET);
+ return phy->set_vbus(phy, true);
+ }
+
+ schedule_work(&ta->wk);
+
+ return ret;
+}
+
+static void phy_shutdown(struct usb_phy *phy)
+{
+ struct tca_apb *ta = container_of(phy, struct tca_apb, phy);
+ int i;
+
+ if (!ta->phy_initialized)
+ return;
+
+ ta->phy_initialized = false;
+ flush_work(&ta->wk);
+ ta->phy.set_vbus(&ta->phy, false);
+
+ ta->connected = false;
+ writel(TCPC_DISCONN, ta->phy.io_priv + TCPC_OFFSET);
+
+ for (i = 0; i < ARRAY_SIZE(PHY_RESETS); i++)
+ reset_control_assert(ta->resets[i]);
+}
+
+static int phy_set_vbus(struct usb_phy *phy, int on)
+{
+ struct tca_apb *ta = container_of(phy, struct tca_apb, phy);
+ int ret;
+
+ if (!!on == ta->regulator_enabled)
+ return 0;
+
+ if (on)
+ ret = regulator_enable(ta->vbus);
+ else
+ ret = regulator_disable(ta->vbus);
+
+ if (!ret)
+ ta->regulator_enabled = on;
+
+ dev_dbg(ta->phy.dev, "set vbus: %d\n", on);
+ return ret;
+}
+
+static void tca_work(struct work_struct *work)
+{
+ struct tca_apb *ta = container_of(work, struct tca_apb, wk);
+ bool connected;
+ bool flipped = false;
+ u32 val;
+ int ret;
+
+ ret = get_flipped(ta, &flipped);
+ if (ret)
+ return;
+
+ connected = extcon_get_state(ta->phy.edev, EXTCON_USB_HOST);
+ if (connected == ta->connected)
+ return;
+
+ ta->connected = connected;
+ if (connected) {
+ val = TCPC_CONN;
+ if (flipped)
+ val |= TCPC_FLIPPED;
+ dev_dbg(ta->phy.dev, "connected%s\n", flipped ? " flipped" : "");
+ } else {
+ val = TCPC_DISCONN;
+ dev_dbg(ta->phy.dev, "disconnected\n");
+ }
+
+ writel(val, ta->phy.io_priv + TCPC_OFFSET);
+
+ ret = ta->phy.set_vbus(&ta->phy, connected);
+ if (ret)
+ dev_err(ta->phy.dev, "failed to set VBUS\n");
+}
+
+static int id_notifier(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct tca_apb *ta = container_of(nb, struct tca_apb, phy.id_nb);
+
+ if (ta->phy_initialized)
+ schedule_work(&ta->wk);
+
+ return NOTIFY_DONE;
+}
+
+static int vbus_notifier(struct notifier_block *nb, unsigned long evnt, void *ptr)
+{
+ return NOTIFY_DONE;
+}
+
+static int phy_probe(struct platform_device *pdev)
+{
+ struct reset_control *resets[ARRAY_SIZE(CTL_RESETS)];
+ struct device *dev = &pdev->dev;
+ struct usb_phy *phy;
+ struct tca_apb *ta;
+ int i;
+
+ ta = devm_kzalloc(dev, sizeof(*ta), GFP_KERNEL);
+ if (!ta)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ta);
+ INIT_WORK(&ta->wk, tca_work);
+
+ phy = &ta->phy;
+ phy->dev = dev;
+ phy->label = dev_name(dev);
+ phy->type = USB_PHY_TYPE_USB3;
+ phy->init = phy_init;
+ phy->shutdown = phy_shutdown;
+ phy->set_vbus = phy_set_vbus;
+ phy->id_nb.notifier_call = id_notifier;
+ phy->vbus_nb.notifier_call = vbus_notifier;
+
+ phy->io_priv = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(phy->io_priv))
+ return PTR_ERR(phy->io_priv);
+
+ ta->vbus = devm_regulator_get(dev, "vbus");
+ if (IS_ERR(ta->vbus))
+ return PTR_ERR(ta->vbus);
+
+ for (i = 0; i < ARRAY_SIZE(CTL_RESETS); i++) {
+ resets[i] = devm_reset_control_get_exclusive(dev, CTL_RESETS[i]);
+ if (IS_ERR(resets[i])) {
+ dev_err(dev, "%s reset not found\n", CTL_RESETS[i]);
+ return PTR_ERR(resets[i]);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(PHY_RESETS); i++) {
+ ta->resets[i] = devm_reset_control_get_exclusive(dev, PHY_RESETS[i]);
+ if (IS_ERR(ta->resets[i])) {
+ dev_err(dev, "%s reset not found\n", PHY_RESETS[i]);
+ return PTR_ERR(ta->resets[i]);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(CTL_RESETS); i++)
+ reset_control_assert(resets[i]);
+
+ for (i = 0; i < ARRAY_SIZE(PHY_RESETS); i++)
+ reset_control_assert(ta->resets[i]);
+ /*
+ * Out-of-band reset of the controller after PHY reset will cause
+ * controller malfunctioning, so we should use in-band controller
+ * reset only and leave the controller de-asserted here.
+ */
+ for (i = 0; i < ARRAY_SIZE(CTL_RESETS); i++)
+ reset_control_deassert(resets[i]);
+
+ /* Need to wait at least 20us after de-assert the controller */
+ usleep_range(20, 100);
+
+ return usb_add_phy_dev(phy);
+}
+
+static int phy_remove(struct platform_device *pdev)
+{
+ struct tca_apb *ta = platform_get_drvdata(pdev);
+
+ usb_remove_phy(&ta->phy);
+
+ return 0;
+}
+
+static const struct of_device_id intel_usb_phy_dt_ids[] = {
+ { .compatible = "intel,lgm-usb-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, intel_usb_phy_dt_ids);
+
+static struct platform_driver lgm_phy_driver = {
+ .driver = {
+ .name = "lgm-usb-phy",
+ .of_match_table = intel_usb_phy_dt_ids,
+ },
+ .probe = phy_probe,
+ .remove = phy_remove,
+};
+
+module_platform_driver(lgm_phy_driver);
+
+MODULE_DESCRIPTION("Intel LGM USB PHY driver");
+MODULE_AUTHOR("Li Yin <yin1.li@intel.com>");
+MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
index febe0aef68d4..ce91ae7f8dbd 100644
--- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
@@ -4,6 +4,7 @@
*/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -72,18 +73,12 @@ struct qcom_apq8064_sata_phy {
};
/* Helper function to do poll and timeout */
-static int read_poll_timeout(void __iomem *addr, u32 mask)
+static int poll_timeout(void __iomem *addr, u32 mask)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(TIMEOUT_MS);
+ u32 val;
- do {
- if (readl_relaxed(addr) & mask)
- return 0;
-
- usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
- } while (!time_after(jiffies, timeout));
-
- return (readl_relaxed(addr) & mask) ? 0 : -ETIMEDOUT;
+ return readl_relaxed_poll_timeout(addr, val, (val & mask),
+ DELAY_INTERVAL_US, TIMEOUT_MS * 1000);
}
static int qcom_apq8064_sata_phy_init(struct phy *generic_phy)
@@ -137,21 +132,21 @@ static int qcom_apq8064_sata_phy_init(struct phy *generic_phy)
writel_relaxed(0x05, base + UNIPHY_PLL_LKDET_CFG2);
/* PLL Lock wait */
- ret = read_poll_timeout(base + UNIPHY_PLL_STATUS, UNIPHY_PLL_LOCK);
+ ret = poll_timeout(base + UNIPHY_PLL_STATUS, UNIPHY_PLL_LOCK);
if (ret) {
dev_err(phy->dev, "poll timeout UNIPHY_PLL_STATUS\n");
return ret;
}
/* TX Calibration */
- ret = read_poll_timeout(base + SATA_PHY_TX_IMCAL_STAT, SATA_PHY_TX_CAL);
+ ret = poll_timeout(base + SATA_PHY_TX_IMCAL_STAT, SATA_PHY_TX_CAL);
if (ret) {
dev_err(phy->dev, "poll timeout SATA_PHY_TX_IMCAL_STAT\n");
return ret;
}
/* RX Calibration */
- ret = read_poll_timeout(base + SATA_PHY_RX_IMCAL_STAT, SATA_PHY_RX_CAL);
+ ret = poll_timeout(base + SATA_PHY_RX_IMCAL_STAT, SATA_PHY_RX_CAL);
if (ret) {
dev_err(phy->dev, "poll timeout SATA_PHY_RX_IMCAL_STAT\n");
return ret;
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
index b8ef331e1545..fc7f9df80a7b 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
@@ -48,7 +48,7 @@ static int ipq4019_ss_phy_power_on(struct phy *_phy)
return 0;
}
-static struct phy_ops ipq4019_usb_ss_phy_ops = {
+static const struct phy_ops ipq4019_usb_ss_phy_ops = {
.power_on = ipq4019_ss_phy_power_on,
.power_off = ipq4019_ss_phy_power_off,
};
@@ -80,7 +80,7 @@ static int ipq4019_hs_phy_power_on(struct phy *_phy)
return 0;
}
-static struct phy_ops ipq4019_usb_hs_phy_ops = {
+static const struct phy_ops ipq4019_usb_hs_phy_ops = {
.power_on = ipq4019_hs_phy_power_on,
.power_off = ipq4019_hs_phy_power_off,
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 6e6f992a9524..5d33ad4d06f2 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -946,6 +946,88 @@ static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
};
+static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_rbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x6f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr2[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x8c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr3[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x2a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x08),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TRANSCEIVER_BIAS_EN, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_INTERFACE_SELECT, 0x3d),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TRAN_DRVR_EMP_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_INTERFACE_MODE, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_BAND, 0x4),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_POL_INV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_DRV_LVL, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_EMP_POST1_LVL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07),
+};
+
static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
@@ -1761,6 +1843,16 @@ struct qmp_phy_cfg {
const struct qmp_phy_init_tbl *pcs_misc_tbl;
int pcs_misc_tbl_num;
+ /* Init sequence for DP PHY block link rates */
+ const struct qmp_phy_init_tbl *serdes_tbl_rbr;
+ int serdes_tbl_rbr_num;
+ const struct qmp_phy_init_tbl *serdes_tbl_hbr;
+ int serdes_tbl_hbr_num;
+ const struct qmp_phy_init_tbl *serdes_tbl_hbr2;
+ int serdes_tbl_hbr2_num;
+ const struct qmp_phy_init_tbl *serdes_tbl_hbr3;
+ int serdes_tbl_hbr3_num;
+
/* clock ids to be requested */
const char * const *clk_list;
int num_clks;
@@ -1797,10 +1889,17 @@ struct qmp_phy_cfg {
bool no_pcs_sw_reset;
};
+struct qmp_phy_combo_cfg {
+ const struct qmp_phy_cfg *usb_cfg;
+ const struct qmp_phy_cfg *dp_cfg;
+};
+
/**
* struct qmp_phy - per-lane phy descriptor
*
* @phy: generic phy
+ * @cfg: phy specific configuration
+ * @serdes: iomapped memory space for phy's serdes (i.e. PLL)
* @tx: iomapped memory space for lane's tx
* @rx: iomapped memory space for lane's rx
* @pcs: iomapped memory space for lane's pcs
@@ -1811,9 +1910,12 @@ struct qmp_phy_cfg {
* @index: lane index
* @qmp: QMP phy to which this lane belongs
* @lane_rst: lane's reset controller
+ * @mode: current PHY mode
*/
struct qmp_phy {
struct phy *phy;
+ const struct qmp_phy_cfg *cfg;
+ void __iomem *serdes;
void __iomem *tx;
void __iomem *rx;
void __iomem *pcs;
@@ -1824,43 +1926,45 @@ struct qmp_phy {
unsigned int index;
struct qcom_qmp *qmp;
struct reset_control *lane_rst;
+ enum phy_mode mode;
+ unsigned int dp_aux_cfg;
+ struct phy_configure_opts_dp dp_opts;
+ struct qmp_phy_dp_clks *dp_clks;
+};
+
+struct qmp_phy_dp_clks {
+ struct qmp_phy *qphy;
+ struct clk_hw dp_link_hw;
+ struct clk_hw dp_pixel_hw;
};
/**
* struct qcom_qmp - structure holding QMP phy block attributes
*
* @dev: device
- * @serdes: iomapped memory space for phy's serdes
* @dp_com: iomapped memory space for phy's dp_com control block
*
* @clks: array of clocks required by phy
* @resets: array of resets required by phy
* @vregs: regulator supplies bulk data
*
- * @cfg: phy specific configuration
* @phys: array of per-lane phy descriptors
* @phy_mutex: mutex lock for PHY common block initialization
* @init_count: phy common block initialization count
- * @phy_initialized: indicate if PHY has been initialized
- * @mode: current PHY mode
* @ufs_reset: optional UFS PHY reset handle
*/
struct qcom_qmp {
struct device *dev;
- void __iomem *serdes;
void __iomem *dp_com;
struct clk_bulk_data *clks;
struct reset_control **resets;
struct regulator_bulk_data *vregs;
- const struct qmp_phy_cfg *cfg;
struct qmp_phy **phys;
struct mutex phy_mutex;
int init_count;
- bool phy_initialized;
- enum phy_mode mode;
struct reset_control *ufs_reset;
};
@@ -2203,6 +2307,41 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
.is_dual_lane_phy = true,
};
+static const struct qmp_phy_cfg sc7180_dpphy_cfg = {
+ .type = PHY_TYPE_DP,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_dp_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl),
+ .tx_tbl = qmp_v3_dp_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v3_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v3_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v3_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v3_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr3),
+
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = sc7180_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_combo_cfg sc7180_usb3dpphy_cfg = {
+ .usb_cfg = &sc7180_usb3phy_cfg,
+ .dp_cfg = &sc7180_dpphy_cfg,
+};
+
static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.type = PHY_TYPE_USB3,
.nlanes = 1,
@@ -2479,11 +2618,300 @@ static void qcom_qmp_phy_configure(void __iomem *base,
qcom_qmp_phy_configure_lane(base, regs, tbl, num, 0xff);
}
+static int qcom_qmp_phy_serdes_init(struct qmp_phy *qphy)
+{
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *serdes = qphy->serdes;
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
+ int serdes_tbl_num = cfg->serdes_tbl_num;
+ int ret;
+
+ qcom_qmp_phy_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+
+ if (cfg->type == PHY_TYPE_DP) {
+ switch (dp_opts->link_rate) {
+ case 1620:
+ qcom_qmp_phy_configure(serdes, cfg->regs,
+ cfg->serdes_tbl_rbr,
+ cfg->serdes_tbl_rbr_num);
+ break;
+ case 2700:
+ qcom_qmp_phy_configure(serdes, cfg->regs,
+ cfg->serdes_tbl_hbr,
+ cfg->serdes_tbl_hbr_num);
+ break;
+ case 5400:
+ qcom_qmp_phy_configure(serdes, cfg->regs,
+ cfg->serdes_tbl_hbr2,
+ cfg->serdes_tbl_hbr2_num);
+ break;
+ case 8100:
+ qcom_qmp_phy_configure(serdes, cfg->regs,
+ cfg->serdes_tbl_hbr3,
+ cfg->serdes_tbl_hbr3_num);
+ break;
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+ }
+
+
+ if (cfg->has_phy_com_ctrl) {
+ void __iomem *status;
+ unsigned int mask, val;
+
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+
+ status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS];
+ mask = cfg->mask_com_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, (val & mask), 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev,
+ "phy common block init timed-out\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void qcom_qmp_phy_dp_aux_init(struct qmp_phy *qphy)
+{
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+
+ /* Turn on BIAS current for PHY/PLL */
+ writel(QSERDES_V3_COM_BIAS_EN | QSERDES_V3_COM_BIAS_EN_MUX |
+ QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL,
+ qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
+
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_LANE_0_1_PWRDN |
+ DP_PHY_PD_CTL_LANE_2_3_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN |
+ DP_PHY_PD_CTL_DP_CLAMP_EN,
+ qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+
+ writel(QSERDES_V3_COM_BIAS_EN |
+ QSERDES_V3_COM_BIAS_EN_MUX | QSERDES_V3_COM_CLKBUF_R_EN |
+ QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL |
+ QSERDES_V3_COM_CLKBUF_RX_DRIVE_L,
+ qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
+
+ writel(0x00, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG0);
+ writel(0x13, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG1);
+ writel(0x24, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG2);
+ writel(0x00, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG3);
+ writel(0x0a, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG4);
+ writel(0x26, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG5);
+ writel(0x0a, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG6);
+ writel(0x03, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG7);
+ writel(0xbb, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG8);
+ writel(0x03, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG9);
+ qphy->dp_aux_cfg = 0;
+
+ writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
+ PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
+ PHY_AUX_REQ_ERR_MASK,
+ qphy->pcs + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK);
+}
+
+static const u8 qmp_dp_v3_pre_emphasis_hbr_rbr[4][4] = {
+ { 0x00, 0x0c, 0x14, 0x19 },
+ { 0x00, 0x0b, 0x12, 0xff },
+ { 0x00, 0x0b, 0xff, 0xff },
+ { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = {
+ { 0x08, 0x0f, 0x16, 0x1f },
+ { 0x11, 0x1e, 0x1f, 0xff },
+ { 0x19, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static void qcom_qmp_phy_configure_dp_tx(struct qmp_phy *qphy)
+{
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ unsigned int v_level = 0, p_level = 0;
+ u32 bias_en, drvr_en;
+ u8 voltage_swing_cfg, pre_emphasis_cfg;
+ int i;
+
+ for (i = 0; i < dp_opts->lanes; i++) {
+ v_level = max(v_level, dp_opts->voltage[i]);
+ p_level = max(p_level, dp_opts->pre[i]);
+ }
+
+ if (dp_opts->lanes == 1) {
+ bias_en = 0x3e;
+ drvr_en = 0x13;
+ } else {
+ bias_en = 0x3f;
+ drvr_en = 0x10;
+ }
+
+ voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr_rbr[v_level][p_level];
+ pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr_rbr[v_level][p_level];
+
+ /* TODO: Move check to config check */
+ if (voltage_swing_cfg == 0xFF && pre_emphasis_cfg == 0xFF)
+ return;
+
+ /* Enable MUX to use Cursor values from these registers */
+ voltage_swing_cfg |= DP_PHY_TXn_TX_DRV_LVL_MUX_EN;
+ pre_emphasis_cfg |= DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN;
+
+ writel(voltage_swing_cfg, qphy->tx + QSERDES_V3_TX_TX_DRV_LVL);
+ writel(pre_emphasis_cfg, qphy->tx + QSERDES_V3_TX_TX_EMP_POST1_LVL);
+ writel(voltage_swing_cfg, qphy->tx2 + QSERDES_V3_TX_TX_DRV_LVL);
+ writel(pre_emphasis_cfg, qphy->tx2 + QSERDES_V3_TX_TX_EMP_POST1_LVL);
+
+ writel(drvr_en, qphy->tx + QSERDES_V3_TX_HIGHZ_DRVR_EN);
+ writel(bias_en, qphy->tx + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
+ writel(drvr_en, qphy->tx2 + QSERDES_V3_TX_HIGHZ_DRVR_EN);
+ writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
+}
+
+static int qcom_qmp_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ const struct phy_configure_opts_dp *dp_opts = &opts->dp;
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+
+ memcpy(&qphy->dp_opts, dp_opts, sizeof(*dp_opts));
+ if (qphy->dp_opts.set_voltages) {
+ qcom_qmp_phy_configure_dp_tx(qphy);
+ qphy->dp_opts.set_voltages = 0;
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy)
+{
+ const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ u32 val, phy_vco_div, status;
+ unsigned long pixel_freq;
+
+ val = DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN;
+
+ /*
+ * TODO: Assume orientation is CC1 for now and two lanes, need to
+ * use type-c connector to understand orientation and lanes.
+ *
+ * Otherwise val changes to be like below if this code understood
+ * the orientation of the type-c cable.
+ *
+ * if (lane_cnt == 4 || orientation == ORIENTATION_CC2)
+ * val |= DP_PHY_PD_CTL_LANE_0_1_PWRDN;
+ * if (lane_cnt == 4 || orientation == ORIENTATION_CC1)
+ * val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN;
+ * if (orientation == ORIENTATION_CC2)
+ * writel(0x4c, qphy->pcs + QSERDES_V3_DP_PHY_MODE);
+ */
+ val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN;
+ writel(val, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+
+ writel(0x5c, qphy->pcs + QSERDES_V3_DP_PHY_MODE);
+ writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL);
+ writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL);
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ phy_vco_div = 0x1;
+ pixel_freq = 1620000000UL / 2;
+ break;
+ case 2700:
+ phy_vco_div = 0x1;
+ pixel_freq = 2700000000UL / 2;
+ break;
+ case 5400:
+ phy_vco_div = 0x2;
+ pixel_freq = 5400000000UL / 4;
+ break;
+ case 8100:
+ phy_vco_div = 0x0;
+ pixel_freq = 8100000000UL / 6;
+ break;
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+ writel(phy_vco_div, qphy->pcs + QSERDES_V3_DP_PHY_VCO_DIV);
+
+ clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000);
+ clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq);
+
+ writel(0x04, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG2);
+ writel(0x01, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+ writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+ writel(0x01, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+ writel(0x09, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+
+ writel(0x20, qphy->serdes + QSERDES_V3_COM_RESETSM_CNTRL);
+
+ if (readl_poll_timeout(qphy->serdes + QSERDES_V3_COM_C_READY_STATUS,
+ status,
+ ((status & BIT(0)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ writel(0x19, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+
+ if (readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ writel(0x18, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+ udelay(2000);
+ writel(0x19, qphy->pcs + QSERDES_V3_DP_PHY_CFG);
+
+ return readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000);
+}
+
+/*
+ * We need to calibrate the aux setting here as many times
+ * as the caller tries
+ */
+static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const u8 cfg1_settings[] = { 0x13, 0x23, 0x1d };
+ u8 val;
+
+ qphy->dp_aux_cfg++;
+ qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings);
+ val = cfg1_settings[qphy->dp_aux_cfg];
+
+ writel(val, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG1);
+
+ return 0;
+}
+
static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
- const struct qmp_phy_cfg *cfg = qmp->cfg;
- void __iomem *serdes = qmp->serdes;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *serdes = qphy->serdes;
void __iomem *pcs = qphy->pcs;
void __iomem *dp_com = qmp->dp_com;
int ret, i;
@@ -2514,7 +2942,7 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
ret = reset_control_deassert(qmp->resets[i]);
if (ret) {
dev_err(qmp->dev, "%s reset deassert failed\n",
- qmp->cfg->reset_list[i]);
+ qphy->cfg->reset_list[i]);
goto err_rst;
}
}
@@ -2533,6 +2961,9 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ /* Default type-c orientation, i.e CC1 */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
+
qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
USB3_MODE | DP_MODE);
@@ -2540,6 +2971,9 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
}
if (cfg->has_phy_com_ctrl) {
@@ -2555,36 +2989,10 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
cfg->pwrdn_ctrl);
}
- /* Serdes configuration */
- qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl,
- cfg->serdes_tbl_num);
-
- if (cfg->has_phy_com_ctrl) {
- void __iomem *status;
- unsigned int mask, val;
-
- qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET);
- qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
- SERDES_START | PCS_START);
-
- status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS];
- mask = cfg->mask_com_pcs_ready;
-
- ret = readl_poll_timeout(status, val, (val & mask), 10,
- PHY_INIT_COMPLETE_TIMEOUT);
- if (ret) {
- dev_err(qmp->dev,
- "phy common block init timed-out\n");
- goto err_com_init;
- }
- }
-
mutex_unlock(&qmp->phy_mutex);
return 0;
-err_com_init:
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
err_rst:
while (++i < cfg->num_resets)
reset_control_assert(qmp->resets[i]);
@@ -2596,10 +3004,11 @@ err_reg_enable:
return ret;
}
-static int qcom_qmp_phy_com_exit(struct qcom_qmp *qmp)
+static int qcom_qmp_phy_com_exit(struct qmp_phy *qphy)
{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
- void __iomem *serdes = qmp->serdes;
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *serdes = qphy->serdes;
int i = cfg->num_resets;
mutex_lock(&qmp->phy_mutex);
@@ -2630,20 +3039,12 @@ static int qcom_qmp_phy_com_exit(struct qcom_qmp *qmp)
return 0;
}
-static int qcom_qmp_phy_enable(struct phy *phy)
+static int qcom_qmp_phy_init(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
- const struct qmp_phy_cfg *cfg = qmp->cfg;
- void __iomem *tx = qphy->tx;
- void __iomem *rx = qphy->rx;
- void __iomem *pcs = qphy->pcs;
- void __iomem *pcs_misc = qphy->pcs_misc;
- void __iomem *dp_com = qmp->dp_com;
- void __iomem *status;
- unsigned int mask, val, ready;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
int ret;
-
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
if (cfg->no_pcs_sw_reset) {
@@ -2670,13 +3071,34 @@ static int qcom_qmp_phy_enable(struct phy *phy)
ret = reset_control_assert(qmp->ufs_reset);
if (ret)
- goto err_lane_rst;
+ return ret;
}
ret = qcom_qmp_phy_com_init(qphy);
if (ret)
return ret;
+ if (cfg->type == PHY_TYPE_DP)
+ qcom_qmp_phy_dp_aux_init(qphy);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_power_on(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+ void __iomem *tx = qphy->tx;
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *pcs_misc = qphy->pcs_misc;
+ void __iomem *status;
+ unsigned int mask, val, ready;
+ int ret;
+
+ qcom_qmp_phy_serdes_init(qphy);
+
if (cfg->has_lane_rst) {
ret = reset_control_deassert(qphy->lane_rst);
if (ret) {
@@ -2700,13 +3122,23 @@ static int qcom_qmp_phy_enable(struct phy *phy)
qcom_qmp_phy_configure_lane(qphy->tx2, cfg->regs,
cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ /* Configure special DP tx tunings */
+ if (cfg->type == PHY_TYPE_DP)
+ qcom_qmp_phy_configure_dp_tx(qphy);
+
qcom_qmp_phy_configure_lane(rx, cfg->regs,
cfg->rx_tbl, cfg->rx_tbl_num, 1);
+
if (cfg->is_dual_lane_phy)
qcom_qmp_phy_configure_lane(qphy->rx2, cfg->regs,
cfg->rx_tbl, cfg->rx_tbl_num, 2);
- qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ /* Configure link rate, swing, etc. */
+ if (cfg->type == PHY_TYPE_DP)
+ qcom_qmp_phy_configure_dp_phy(qphy);
+ else
+ qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
ret = reset_control_deassert(qmp->ufs_reset);
if (ret)
goto err_lane_rst;
@@ -2724,102 +3156,129 @@ static int qcom_qmp_phy_enable(struct phy *phy)
if (cfg->has_pwrdn_delay)
usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
- /* Pull PHY out of reset state */
- if (!cfg->no_pcs_sw_reset)
- qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
-
- if (cfg->has_phy_dp_com_ctrl)
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
-
- /* start SerDes and Phy-Coding-Sublayer */
- qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
-
- if (cfg->type == PHY_TYPE_UFS) {
- status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
- mask = PCS_READY;
- ready = PCS_READY;
- } else {
- status = pcs + cfg->regs[QPHY_PCS_STATUS];
- mask = PHYSTATUS;
- ready = 0;
- }
+ if (cfg->type != PHY_TYPE_DP) {
+ /* Pull PHY out of reset state */
+ if (!cfg->no_pcs_sw_reset)
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ if (cfg->type == PHY_TYPE_UFS) {
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = PCS_READY;
+ ready = PCS_READY;
+ } else {
+ status = pcs + cfg->regs[QPHY_PCS_STATUS];
+ mask = PHYSTATUS;
+ ready = 0;
+ }
- ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
- PHY_INIT_COMPLETE_TIMEOUT);
- if (ret) {
- dev_err(qmp->dev, "phy initialization timed-out\n");
- goto err_pcs_ready;
+ ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_pcs_ready;
+ }
}
- qmp->phy_initialized = true;
return 0;
err_pcs_ready:
- reset_control_assert(qmp->ufs_reset);
clk_disable_unprepare(qphy->pipe_clk);
err_clk_enable:
if (cfg->has_lane_rst)
reset_control_assert(qphy->lane_rst);
err_lane_rst:
- qcom_qmp_phy_com_exit(qmp);
-
return ret;
}
-static int qcom_qmp_phy_disable(struct phy *phy)
+static int qcom_qmp_phy_power_off(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
- struct qcom_qmp *qmp = qphy->qmp;
- const struct qmp_phy_cfg *cfg = qmp->cfg;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
clk_disable_unprepare(qphy->pipe_clk);
- /* PHY reset */
- if (!cfg->no_pcs_sw_reset)
- qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ if (cfg->type == PHY_TYPE_DP) {
+ /* Assert DP PHY power down */
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL);
+ } else {
+ /* PHY reset */
+ if (!cfg->no_pcs_sw_reset)
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
- /* stop SerDes and Phy-Coding-Sublayer */
- qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
- /* Put PHY into POWER DOWN state: active low */
- if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
- qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
- cfg->pwrdn_ctrl);
- } else {
- qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL,
- cfg->pwrdn_ctrl);
+ /* Put PHY into POWER DOWN state: active low */
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ } else {
+ qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+ }
}
+ return 0;
+}
+
+static int qcom_qmp_phy_exit(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
+
if (cfg->has_lane_rst)
reset_control_assert(qphy->lane_rst);
- qcom_qmp_phy_com_exit(qmp);
-
- qmp->phy_initialized = false;
+ qcom_qmp_phy_com_exit(qphy);
return 0;
}
+static int qcom_qmp_phy_enable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_init(phy);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_power_on(phy);
+ if (ret)
+ qcom_qmp_phy_exit(phy);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_disable(struct phy *phy)
+{
+ int ret;
+
+ ret = qcom_qmp_phy_power_off(phy);
+ if (ret)
+ return ret;
+ return qcom_qmp_phy_exit(phy);
+}
+
static int qcom_qmp_phy_set_mode(struct phy *phy,
enum phy_mode mode, int submode)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
- struct qcom_qmp *qmp = qphy->qmp;
- qmp->mode = mode;
+ qphy->mode = mode;
return 0;
}
static void qcom_qmp_phy_enable_autonomous_mode(struct qmp_phy *qphy)
{
- struct qcom_qmp *qmp = qphy->qmp;
- const struct qmp_phy_cfg *cfg = qmp->cfg;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs = qphy->pcs;
void __iomem *pcs_misc = qphy->pcs_misc;
u32 intr_mask;
- if (qmp->mode == PHY_MODE_USB_HOST_SS ||
- qmp->mode == PHY_MODE_USB_DEVICE_SS)
+ if (qphy->mode == PHY_MODE_USB_HOST_SS ||
+ qphy->mode == PHY_MODE_USB_DEVICE_SS)
intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
else
intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
@@ -2842,8 +3301,7 @@ static void qcom_qmp_phy_enable_autonomous_mode(struct qmp_phy *qphy)
static void qcom_qmp_phy_disable_autonomous_mode(struct qmp_phy *qphy)
{
- struct qcom_qmp *qmp = qphy->qmp;
- const struct qmp_phy_cfg *cfg = qmp->cfg;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs = qphy->pcs;
void __iomem *pcs_misc = qphy->pcs_misc;
@@ -2863,15 +3321,15 @@ static int __maybe_unused qcom_qmp_phy_runtime_suspend(struct device *dev)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct qmp_phy *qphy = qmp->phys[0];
- const struct qmp_phy_cfg *cfg = qmp->cfg;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
- dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode);
- /* Supported only for USB3 PHY */
+ /* Supported only for USB3 PHY and luckily USB3 is the first phy */
if (cfg->type != PHY_TYPE_USB3)
return 0;
- if (!qmp->phy_initialized) {
+ if (!qmp->init_count) {
dev_vdbg(dev, "PHY not initialized, bailing out\n");
return 0;
}
@@ -2888,16 +3346,16 @@ static int __maybe_unused qcom_qmp_phy_runtime_resume(struct device *dev)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct qmp_phy *qphy = qmp->phys[0];
- const struct qmp_phy_cfg *cfg = qmp->cfg;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
int ret = 0;
- dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode);
- /* Supported only for USB3 PHY */
+ /* Supported only for USB3 PHY and luckily USB3 is the first phy */
if (cfg->type != PHY_TYPE_USB3)
return 0;
- if (!qmp->phy_initialized) {
+ if (!qmp->init_count) {
dev_vdbg(dev, "PHY not initialized, bailing out\n");
return 0;
}
@@ -2920,10 +3378,10 @@ static int __maybe_unused qcom_qmp_phy_runtime_resume(struct device *dev)
return 0;
}
-static int qcom_qmp_phy_vreg_init(struct device *dev)
+static int qcom_qmp_phy_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
- int num = qmp->cfg->num_vregs;
+ int num = cfg->num_vregs;
int i;
qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
@@ -2931,24 +3389,24 @@ static int qcom_qmp_phy_vreg_init(struct device *dev)
return -ENOMEM;
for (i = 0; i < num; i++)
- qmp->vregs[i].supply = qmp->cfg->vreg_list[i];
+ qmp->vregs[i].supply = cfg->vreg_list[i];
return devm_regulator_bulk_get(dev, num, qmp->vregs);
}
-static int qcom_qmp_phy_reset_init(struct device *dev)
+static int qcom_qmp_phy_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int i;
- qmp->resets = devm_kcalloc(dev, qmp->cfg->num_resets,
+ qmp->resets = devm_kcalloc(dev, cfg->num_resets,
sizeof(*qmp->resets), GFP_KERNEL);
if (!qmp->resets)
return -ENOMEM;
- for (i = 0; i < qmp->cfg->num_resets; i++) {
+ for (i = 0; i < cfg->num_resets; i++) {
struct reset_control *rst;
- const char *name = qmp->cfg->reset_list[i];
+ const char *name = cfg->reset_list[i];
rst = devm_reset_control_get(dev, name);
if (IS_ERR(rst)) {
@@ -2961,10 +3419,10 @@ static int qcom_qmp_phy_reset_init(struct device *dev)
return 0;
}
-static int qcom_qmp_phy_clk_init(struct device *dev)
+static int qcom_qmp_phy_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
- int num = qmp->cfg->num_clks;
+ int num = cfg->num_clks;
int i;
qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
@@ -2972,12 +3430,12 @@ static int qcom_qmp_phy_clk_init(struct device *dev)
return -ENOMEM;
for (i = 0; i < num; i++)
- qmp->clks[i].id = qmp->cfg->clk_list[i];
+ qmp->clks[i].id = cfg->clk_list[i];
return devm_clk_bulk_get(dev, num, qmp->clks);
}
-static void phy_pipe_clk_release_provider(void *res)
+static void phy_clk_release_provider(void *res)
{
of_clk_del_provider(res);
}
@@ -3006,12 +3464,6 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
struct clk_init_data init = { };
int ret;
- if ((qmp->cfg->type != PHY_TYPE_USB3) &&
- (qmp->cfg->type != PHY_TYPE_PCIE)) {
- /* not all phys register pipe clocks, so return success */
- return 0;
- }
-
ret = of_property_read_string(np, "clock-output-names", &init.name);
if (ret) {
dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
@@ -3040,9 +3492,202 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
* Roll a devm action because the clock provider is the child node, but
* the child node is not actually a device.
*/
- ret = devm_add_action(qmp->dev, phy_pipe_clk_release_provider, np);
+ ret = devm_add_action(qmp->dev, phy_clk_release_provider, np);
+ if (ret)
+ phy_clk_release_provider(np);
+
+ return ret;
+}
+
+/*
+ * Display Port PLL driver block diagram for branch clocks
+ *
+ * +------------------------------+
+ * | DP_VCO_CLK |
+ * | |
+ * | +-------------------+ |
+ * | | (DP PLL/VCO) | |
+ * | +---------+---------+ |
+ * | v |
+ * | +----------+-----------+ |
+ * | | hsclk_divsel_clk_src | |
+ * | +----------+-----------+ |
+ * +------------------------------+
+ * |
+ * +---------<---------v------------>----------+
+ * | |
+ * +--------v----------------+ |
+ * | dp_phy_pll_link_clk | |
+ * | link_clk | |
+ * +--------+----------------+ |
+ * | |
+ * | |
+ * v v
+ * Input to DISPCC block |
+ * for link clk, crypto clk |
+ * and interface clock |
+ * |
+ * |
+ * +--------<------------+-----------------+---<---+
+ * | | |
+ * +----v---------+ +--------v-----+ +--------v------+
+ * | vco_divided | | vco_divided | | vco_divided |
+ * | _clk_src | | _clk_src | | _clk_src |
+ * | | | | | |
+ * |divsel_six | | divsel_two | | divsel_four |
+ * +-------+------+ +-----+--------+ +--------+------+
+ * | | |
+ * v---->----------v-------------<------v
+ * |
+ * +----------+-----------------+
+ * | dp_phy_pll_vco_div_clk |
+ * +---------+------------------+
+ * |
+ * v
+ * Input to DISPCC block
+ * for DP pixel clock
+ *
+ */
+static int qcom_qmp_dp_pixel_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ switch (req->rate) {
+ case 1620000000UL / 2:
+ case 2700000000UL / 2:
+ /* 5.4 and 8.1 GHz are same link rate as 2.7GHz, i.e. div 4 and div 6 */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned long
+qcom_qmp_dp_pixel_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ const struct qmp_phy_dp_clks *dp_clks;
+ const struct qmp_phy *qphy;
+ const struct phy_configure_opts_dp *dp_opts;
+
+ dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_pixel_hw);
+ qphy = dp_clks->qphy;
+ dp_opts = &qphy->dp_opts;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ return 1620000000UL / 2;
+ case 2700:
+ return 2700000000UL / 2;
+ case 5400:
+ return 5400000000UL / 4;
+ case 8100:
+ return 8100000000UL / 6;
+ default:
+ return 0;
+ }
+}
+
+static const struct clk_ops qcom_qmp_dp_pixel_clk_ops = {
+ .determine_rate = qcom_qmp_dp_pixel_clk_determine_rate,
+ .recalc_rate = qcom_qmp_dp_pixel_clk_recalc_rate,
+};
+
+static int qcom_qmp_dp_link_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ switch (req->rate) {
+ case 162000000:
+ case 270000000:
+ case 540000000:
+ case 810000000:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned long
+qcom_qmp_dp_link_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ const struct qmp_phy_dp_clks *dp_clks;
+ const struct qmp_phy *qphy;
+ const struct phy_configure_opts_dp *dp_opts;
+
+ dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_link_hw);
+ qphy = dp_clks->qphy;
+ dp_opts = &qphy->dp_opts;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ case 2700:
+ case 5400:
+ case 8100:
+ return dp_opts->link_rate * 100000;
+ default:
+ return 0;
+ }
+}
+
+static const struct clk_ops qcom_qmp_dp_link_clk_ops = {
+ .determine_rate = qcom_qmp_dp_link_clk_determine_rate,
+ .recalc_rate = qcom_qmp_dp_link_clk_recalc_rate,
+};
+
+static struct clk_hw *
+qcom_qmp_dp_clks_hw_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct qmp_phy_dp_clks *dp_clks = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= 2) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (idx == 0)
+ return &dp_clks->dp_link_hw;
+
+ return &dp_clks->dp_pixel_hw;
+}
+
+static int phy_dp_clks_register(struct qcom_qmp *qmp, struct qmp_phy *qphy,
+ struct device_node *np)
+{
+ struct clk_init_data init = { };
+ struct qmp_phy_dp_clks *dp_clks;
+ int ret;
+
+ dp_clks = devm_kzalloc(qmp->dev, sizeof(*dp_clks), GFP_KERNEL);
+ if (!dp_clks)
+ return -ENOMEM;
+
+ dp_clks->qphy = qphy;
+ qphy->dp_clks = dp_clks;
+
+ init.ops = &qcom_qmp_dp_link_clk_ops;
+ init.name = "qmp_dp_phy_pll_link_clk";
+ dp_clks->dp_link_hw.init = &init;
+ ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_link_hw);
+ if (ret)
+ return ret;
+
+ init.ops = &qcom_qmp_dp_pixel_clk_ops;
+ init.name = "qmp_dp_phy_pll_vco_div_clk";
+ dp_clks->dp_pixel_hw.init = &init;
+ ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_pixel_hw);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(np, qcom_qmp_dp_clks_hw_get, dp_clks);
+ if (ret)
+ return ret;
+
+ /*
+ * Roll a devm action because the clock provider is the child node, but
+ * the child node is not actually a device.
+ */
+ ret = devm_add_action(qmp->dev, phy_clk_release_provider, np);
if (ret)
- phy_pipe_clk_release_provider(np);
+ phy_clk_release_provider(np);
return ret;
}
@@ -3054,6 +3699,17 @@ static const struct phy_ops qcom_qmp_phy_gen_ops = {
.owner = THIS_MODULE,
};
+static const struct phy_ops qcom_qmp_phy_dp_ops = {
+ .init = qcom_qmp_phy_init,
+ .configure = qcom_qmp_dp_phy_configure,
+ .power_on = qcom_qmp_phy_power_on,
+ .calibrate = qcom_qmp_dp_phy_calibrate,
+ .power_off = qcom_qmp_phy_power_off,
+ .exit = qcom_qmp_phy_exit,
+ .set_mode = qcom_qmp_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
static const struct phy_ops qcom_qmp_pcie_ufs_ops = {
.power_on = qcom_qmp_phy_enable,
.power_off = qcom_qmp_phy_disable,
@@ -3062,12 +3718,13 @@ static const struct phy_ops qcom_qmp_pcie_ufs_ops = {
};
static
-int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
+int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id,
+ void __iomem *serdes, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct phy *generic_phy;
struct qmp_phy *qphy;
- const struct phy_ops *ops = &qcom_qmp_phy_gen_ops;
+ const struct phy_ops *ops;
char prop_name[MAX_PROP_NAME];
int ret;
@@ -3075,6 +3732,8 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
if (!qphy)
return -ENOMEM;
+ qphy->cfg = cfg;
+ qphy->serdes = serdes;
/*
* Get memory resources for each phy lane:
* Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
@@ -3099,7 +3758,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
* back to old legacy behavior of assuming they can be reached at an
* offset from the first lane.
*/
- if (qmp->cfg->is_dual_lane_phy) {
+ if (cfg->is_dual_lane_phy) {
qphy->tx2 = of_iomap(np, 3);
qphy->rx2 = of_iomap(np, 4);
if (!qphy->tx2 || !qphy->rx2) {
@@ -3132,8 +3791,8 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
if (IS_ERR(qphy->pipe_clk)) {
- if (qmp->cfg->type == PHY_TYPE_PCIE ||
- qmp->cfg->type == PHY_TYPE_USB3) {
+ if (cfg->type == PHY_TYPE_PCIE ||
+ cfg->type == PHY_TYPE_USB3) {
ret = PTR_ERR(qphy->pipe_clk);
if (ret != -EPROBE_DEFER)
dev_err(dev,
@@ -3145,7 +3804,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
}
/* Get lane reset, if any */
- if (qmp->cfg->has_lane_rst) {
+ if (cfg->has_lane_rst) {
snprintf(prop_name, sizeof(prop_name), "lane%d", id);
qphy->lane_rst = of_reset_control_get(np, prop_name);
if (IS_ERR(qphy->lane_rst)) {
@@ -3154,8 +3813,12 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
}
}
- if (qmp->cfg->type == PHY_TYPE_UFS || qmp->cfg->type == PHY_TYPE_PCIE)
+ if (cfg->type == PHY_TYPE_UFS || cfg->type == PHY_TYPE_PCIE)
ops = &qcom_qmp_pcie_ufs_ops;
+ else if (cfg->type == PHY_TYPE_DP)
+ ops = &qcom_qmp_phy_dp_ops;
+ else
+ ops = &qcom_qmp_phy_gen_ops;
generic_phy = devm_phy_create(dev, np, ops);
if (IS_ERR(generic_phy)) {
@@ -3199,6 +3862,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sc7180-qmp-usb3-phy",
.data = &sc7180_usb3phy_cfg,
}, {
+ .compatible = "qcom,sc7180-qmp-usb3-dp-phy",
+ /* It's a combo phy */
+ }, {
.compatible = "qcom,sdm845-qhp-pcie-phy",
.data = &sdm845_qhp_pciephy_cfg,
}, {
@@ -3239,6 +3905,14 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
};
MODULE_DEVICE_TABLE(of, qcom_qmp_phy_of_match_table);
+static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = {
+ {
+ .compatible = "qcom,sc7180-qmp-usb3-dp-phy",
+ .data = &sc7180_usb3dpphy_cfg,
+ },
+ { }
+};
+
static const struct dev_pm_ops qcom_qmp_phy_pm_ops = {
SET_RUNTIME_PM_OPS(qcom_qmp_phy_runtime_suspend,
qcom_qmp_phy_runtime_resume, NULL)
@@ -3248,11 +3922,16 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
- struct resource *res;
struct device_node *child;
struct phy_provider *phy_provider;
- void __iomem *base;
- int num, id;
+ void __iomem *serdes;
+ void __iomem *usb_serdes;
+ void __iomem *dp_serdes;
+ const struct qmp_phy_combo_cfg *combo_cfg = NULL;
+ const struct qmp_phy_cfg *cfg = NULL;
+ const struct qmp_phy_cfg *usb_cfg = NULL;
+ const struct qmp_phy_cfg *dp_cfg = NULL;
+ int num, id, expected_phys;
int ret;
qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
@@ -3263,40 +3942,57 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
dev_set_drvdata(dev, qmp);
/* Get the specific init parameters of QMP phy */
- qmp->cfg = of_device_get_match_data(dev);
- if (!qmp->cfg)
- return -EINVAL;
+ cfg = of_device_get_match_data(dev);
+ if (!cfg) {
+ const struct of_device_id *match;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ match = of_match_device(qcom_qmp_combo_phy_of_match_table, dev);
+ if (!match)
+ return -EINVAL;
+
+ combo_cfg = match->data;
+ if (!combo_cfg)
+ return -EINVAL;
+
+ usb_cfg = combo_cfg->usb_cfg;
+ cfg = usb_cfg; /* Setup clks and regulators */
+ }
/* per PHY serdes; usually located at base address */
- qmp->serdes = base;
+ usb_serdes = serdes = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(serdes))
+ return PTR_ERR(serdes);
/* per PHY dp_com; if PHY has dp_com control block */
- if (qmp->cfg->has_phy_dp_com_ctrl) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "dp_com");
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- qmp->dp_com = base;
+ if (combo_cfg || cfg->has_phy_dp_com_ctrl) {
+ qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(qmp->dp_com))
+ return PTR_ERR(qmp->dp_com);
+ }
+
+ if (combo_cfg) {
+ /* Only two serdes for combo PHY */
+ dp_serdes = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(dp_serdes))
+ return PTR_ERR(dp_serdes);
+
+ dp_cfg = combo_cfg->dp_cfg;
+ expected_phys = 2;
+ } else {
+ expected_phys = cfg->nlanes;
}
mutex_init(&qmp->phy_mutex);
- ret = qcom_qmp_phy_clk_init(dev);
+ ret = qcom_qmp_phy_clk_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_reset_init(dev);
+ ret = qcom_qmp_phy_reset_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_vreg_init(dev);
+ ret = qcom_qmp_phy_vreg_init(dev, cfg);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get regulator supplies: %d\n",
@@ -3306,14 +4002,13 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
num = of_get_available_child_count(dev->of_node);
/* do we have a rogue child node ? */
- if (num > qmp->cfg->nlanes)
+ if (num > expected_phys)
return -EINVAL;
qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
if (!qmp->phys)
return -ENOMEM;
- id = 0;
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
/*
@@ -3322,9 +4017,18 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
*/
pm_runtime_forbid(dev);
+ id = 0;
for_each_available_child_of_node(dev->of_node, child) {
+ if (of_node_name_eq(child, "dp-phy")) {
+ cfg = dp_cfg;
+ serdes = dp_serdes;
+ } else if (of_node_name_eq(child, "usb3-phy")) {
+ cfg = usb_cfg;
+ serdes = usb_serdes;
+ }
+
/* Create per-lane phy */
- ret = qcom_qmp_phy_create(dev, child, id);
+ ret = qcom_qmp_phy_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -3335,11 +4039,20 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
* Register the pipe clock provided by phy.
* See function description to see details of this pipe clock.
*/
- ret = phy_pipe_clk_register(qmp, child);
- if (ret) {
- dev_err(qmp->dev,
- "failed to register pipe clock source\n");
- goto err_node_put;
+ if (cfg->type == PHY_TYPE_USB3 || cfg->type == PHY_TYPE_PCIE) {
+ ret = phy_pipe_clk_register(qmp, child);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register pipe clock source\n");
+ goto err_node_put;
+ }
+ } else if (cfg->type == PHY_TYPE_DP) {
+ ret = phy_dp_clks_register(qmp, qmp->phys[id], child);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register DP clock source\n");
+ goto err_node_put;
+ }
}
id++;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 904b80ab9009..b7c530088a6c 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -137,6 +137,9 @@
#define QPHY_V3_DP_COM_RESET_OVRD_CTRL 0x1c
/* Only for QMP V3 PHY - QSERDES COM registers */
+#define QSERDES_V3_COM_ATB_SEL1 0x000
+#define QSERDES_V3_COM_ATB_SEL2 0x004
+#define QSERDES_V3_COM_FREQ_UPDATE 0x008
#define QSERDES_V3_COM_BG_TIMER 0x00c
#define QSERDES_V3_COM_SSC_EN_CENTER 0x010
#define QSERDES_V3_COM_SSC_ADJ_PER1 0x014
@@ -146,6 +149,13 @@
#define QSERDES_V3_COM_SSC_STEP_SIZE1 0x024
#define QSERDES_V3_COM_SSC_STEP_SIZE2 0x028
#define QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN 0x034
+# define QSERDES_V3_COM_BIAS_EN 0x0001
+# define QSERDES_V3_COM_BIAS_EN_MUX 0x0002
+# define QSERDES_V3_COM_CLKBUF_R_EN 0x0004
+# define QSERDES_V3_COM_CLKBUF_L_EN 0x0008
+# define QSERDES_V3_COM_EN_SYSCLK_TX_SEL 0x0010
+# define QSERDES_V3_COM_CLKBUF_RX_DRIVE_L 0x0020
+# define QSERDES_V3_COM_CLKBUF_RX_DRIVE_R 0x0040
#define QSERDES_V3_COM_CLK_ENABLE1 0x038
#define QSERDES_V3_COM_SYS_CLK_CTRL 0x03c
#define QSERDES_V3_COM_SYSCLK_BUF_ENABLE 0x040
@@ -207,12 +217,36 @@
#define QSERDES_V3_COM_CMN_MODE 0x184
/* Only for QMP V3 PHY - TX registers */
+#define QSERDES_V3_TX_BIST_MODE_LANENO 0x000
+#define QSERDES_V3_TX_CLKBUF_ENABLE 0x008
+#define QSERDES_V3_TX_TX_EMP_POST1_LVL 0x00c
+# define DP_PHY_TXn_TX_EMP_POST1_LVL_MASK 0x001f
+# define DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN 0x0020
+
+#define QSERDES_V3_TX_TX_DRV_LVL 0x01c
+# define DP_PHY_TXn_TX_DRV_LVL_MASK 0x001f
+# define DP_PHY_TXn_TX_DRV_LVL_MUX_EN 0x0020
+
+#define QSERDES_V3_TX_RESET_TSYNC_EN 0x024
+#define QSERDES_V3_TX_PRE_STALL_LDO_BOOST_EN 0x028
+
+#define QSERDES_V3_TX_TX_BAND 0x02c
+#define QSERDES_V3_TX_SLEW_CNTL 0x030
+#define QSERDES_V3_TX_INTERFACE_SELECT 0x034
+#define QSERDES_V3_TX_RES_CODE_LANE_TX 0x03c
+#define QSERDES_V3_TX_RES_CODE_LANE_RX 0x040
#define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX 0x044
#define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX 0x048
#define QSERDES_V3_TX_DEBUG_BUS_SEL 0x058
+#define QSERDES_V3_TX_TRANSCEIVER_BIAS_EN 0x05c
#define QSERDES_V3_TX_HIGHZ_DRVR_EN 0x060
+#define QSERDES_V3_TX_TX_POL_INV 0x064
+#define QSERDES_V3_TX_PARRATE_REC_DETECT_IDLE_EN 0x068
#define QSERDES_V3_TX_LANE_MODE_1 0x08c
#define QSERDES_V3_TX_RCV_DETECT_LVL_2 0x0a4
+#define QSERDES_V3_TX_TRAN_DRVR_EMP_EN 0x0c0
+#define QSERDES_V3_TX_TX_INTERFACE_MODE 0x0c4
+#define QSERDES_V3_TX_VMODE_CTRL1 0x0f0
/* Only for QMP V3 PHY - RX registers */
#define QSERDES_V3_RX_UCDR_FO_GAIN 0x008
@@ -315,6 +349,52 @@
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4 0x5c
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60
+/* Only for QMP V3 PHY - DP PHY registers */
+#define QSERDES_V3_DP_PHY_REVISION_ID0 0x000
+#define QSERDES_V3_DP_PHY_REVISION_ID1 0x004
+#define QSERDES_V3_DP_PHY_REVISION_ID2 0x008
+#define QSERDES_V3_DP_PHY_REVISION_ID3 0x00c
+#define QSERDES_V3_DP_PHY_CFG 0x010
+#define QSERDES_V3_DP_PHY_PD_CTL 0x018
+# define DP_PHY_PD_CTL_PWRDN 0x001
+# define DP_PHY_PD_CTL_PSR_PWRDN 0x002
+# define DP_PHY_PD_CTL_AUX_PWRDN 0x004
+# define DP_PHY_PD_CTL_LANE_0_1_PWRDN 0x008
+# define DP_PHY_PD_CTL_LANE_2_3_PWRDN 0x010
+# define DP_PHY_PD_CTL_PLL_PWRDN 0x020
+# define DP_PHY_PD_CTL_DP_CLAMP_EN 0x040
+#define QSERDES_V3_DP_PHY_MODE 0x01c
+#define QSERDES_V3_DP_PHY_AUX_CFG0 0x020
+#define QSERDES_V3_DP_PHY_AUX_CFG1 0x024
+#define QSERDES_V3_DP_PHY_AUX_CFG2 0x028
+#define QSERDES_V3_DP_PHY_AUX_CFG3 0x02c
+#define QSERDES_V3_DP_PHY_AUX_CFG4 0x030
+#define QSERDES_V3_DP_PHY_AUX_CFG5 0x034
+#define QSERDES_V3_DP_PHY_AUX_CFG6 0x038
+#define QSERDES_V3_DP_PHY_AUX_CFG7 0x03c
+#define QSERDES_V3_DP_PHY_AUX_CFG8 0x040
+#define QSERDES_V3_DP_PHY_AUX_CFG9 0x044
+
+#define QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK 0x048
+# define PHY_AUX_STOP_ERR_MASK 0x01
+# define PHY_AUX_DEC_ERR_MASK 0x02
+# define PHY_AUX_SYNC_ERR_MASK 0x04
+# define PHY_AUX_ALIGN_ERR_MASK 0x08
+# define PHY_AUX_REQ_ERR_MASK 0x10
+
+#define QSERDES_V3_DP_PHY_AUX_INTERRUPT_CLEAR 0x04c
+#define QSERDES_V3_DP_PHY_AUX_BIST_CFG 0x050
+
+#define QSERDES_V3_DP_PHY_VCO_DIV 0x064
+#define QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL 0x06c
+#define QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL 0x088
+
+#define QSERDES_V3_DP_PHY_SPARE0 0x0ac
+#define DP_PHY_SPARE0_MASK 0x0f
+#define DP_PHY_SPARE0_ORIENTATION_INFO_SHIFT 0x04(0x0004)
+
+#define QSERDES_V3_DP_PHY_STATUS 0x0c0
+
/* Only for QMP V4 PHY - QSERDES COM registers */
#define QSERDES_V4_COM_SSC_EN_CENTER 0x010
#define QSERDES_V4_COM_SSC_PER1 0x01c
diff --git a/drivers/phy/ralink/phy-ralink-usb.c b/drivers/phy/ralink/phy-ralink-usb.c
index ba3c197fc5b0..95dfa9fd284d 100644
--- a/drivers/phy/ralink/phy-ralink-usb.c
+++ b/drivers/phy/ralink/phy-ralink-usb.c
@@ -142,7 +142,7 @@ static int ralink_usb_phy_power_off(struct phy *_phy)
return 0;
}
-static struct phy_ops ralink_usb_phy_ops = {
+static const struct phy_ops ralink_usb_phy_ops = {
.power_on = ralink_usb_phy_power_on,
.power_off = ralink_usb_phy_power_off,
.owner = THIS_MODULE,
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 0824b9dd5683..c2f22f90736c 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -9,6 +9,18 @@ config PHY_ROCKCHIP_DP
help
Enable this to support the Rockchip Display Port PHY.
+config PHY_ROCKCHIP_DPHY_RX0
+ tristate "Rockchip MIPI Synopsys DPHY RX0 driver"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select GENERIC_PHY_MIPI_DPHY
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip MIPI Synopsys DPHY RX0
+ associated to the Rockchip ISP module present in RK3399 SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called phy-rockchip-dphy-rx0.
+
config PHY_ROCKCHIP_EMMC
tristate "Rockchip EMMC PHY Driver"
depends on ARCH_ROCKCHIP && OF
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index 9f59a81e4e0d..c3cfc7f0af5c 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
+obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0.o
obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY) += phy-rockchip-inno-dsidphy.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
index 7c4df6d48c43..4df9476ef2a9 100644
--- a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c
+++ b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
@@ -16,6 +16,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 0d818b77a0d8..cfa9b8b7e5ac 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
@@ -556,41 +557,25 @@ static int exynos5_usbdrd_phy_power_off(struct phy *phy)
static int crport_handshake(struct exynos5_usbdrd_phy *phy_drd,
u32 val, u32 cmd)
{
- u32 usec = 100;
unsigned int result;
+ int err;
writel(val | cmd, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
- do {
- result = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1);
- if (result & PHYREG1_CR_ACK)
- break;
-
- udelay(1);
- } while (usec-- > 0);
-
- if (!usec) {
- dev_err(phy_drd->dev,
- "CRPORT handshake timeout1 (0x%08x)\n", val);
- return -ETIME;
+ err = readl_poll_timeout(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1,
+ result, (result & PHYREG1_CR_ACK), 1, 100);
+ if (err == -ETIMEDOUT) {
+ dev_err(phy_drd->dev, "CRPORT handshake timeout1 (0x%08x)\n", val);
+ return err;
}
- usec = 100;
-
writel(val, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0);
- do {
- result = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1);
- if (!(result & PHYREG1_CR_ACK))
- break;
-
- udelay(1);
- } while (usec-- > 0);
-
- if (!usec) {
- dev_err(phy_drd->dev,
- "CRPORT handshake timeout2 (0x%08x)\n", val);
- return -ETIME;
+ err = readl_poll_timeout(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1,
+ result, !(result & PHYREG1_CR_ACK), 1, 100);
+ if (err == -ETIMEDOUT) {
+ dev_err(phy_drd->dev, "CRPORT handshake timeout2 (0x%08x)\n", val);
+ return err;
}
return 0;
diff --git a/drivers/phy/samsung/phy-samsung-ufs.c b/drivers/phy/samsung/phy-samsung-ufs.c
index 9832599a0283..dd9ab1519d83 100644
--- a/drivers/phy/samsung/phy-samsung-ufs.c
+++ b/drivers/phy/samsung/phy-samsung-ufs.c
@@ -268,7 +268,7 @@ static int samsung_ufs_phy_exit(struct phy *phy)
return 0;
}
-static struct phy_ops samsung_ufs_phy_ops = {
+static const struct phy_ops samsung_ufs_phy_ops = {
.init = samsung_ufs_phy_init,
.exit = samsung_ufs_phy_exit,
.power_on = samsung_ufs_phy_power_on,
diff --git a/drivers/phy/socionext/Kconfig b/drivers/phy/socionext/Kconfig
index 8c9d7c37536a..a3970e0f89da 100644
--- a/drivers/phy/socionext/Kconfig
+++ b/drivers/phy/socionext/Kconfig
@@ -34,3 +34,13 @@ config PHY_UNIPHIER_PCIE
help
Enable this to support PHY implemented in PCIe controller
on UniPhier SoCs. This driver supports LD20 and PXs3 SoCs.
+
+config PHY_UNIPHIER_AHCI
+ tristate "UniPhier AHCI PHY driver"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ default SATA_AHCI_PLATFORM
+ select GENERIC_PHY
+ help
+ Enable this to support PHY implemented in AHCI controller
+ on UniPhier SoCs. This driver supports PXs2 and PXs3 SoCs.
diff --git a/drivers/phy/socionext/Makefile b/drivers/phy/socionext/Makefile
index 7dc9095b5bb7..e67c2da6675c 100644
--- a/drivers/phy/socionext/Makefile
+++ b/drivers/phy/socionext/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_PHY_UNIPHIER_USB2) += phy-uniphier-usb2.o
obj-$(CONFIG_PHY_UNIPHIER_USB3) += phy-uniphier-usb3hs.o phy-uniphier-usb3ss.o
obj-$(CONFIG_PHY_UNIPHIER_PCIE) += phy-uniphier-pcie.o
+obj-$(CONFIG_PHY_UNIPHIER_AHCI) += phy-uniphier-ahci.o
diff --git a/drivers/phy/socionext/phy-uniphier-ahci.c b/drivers/phy/socionext/phy-uniphier-ahci.c
new file mode 100644
index 000000000000..7427c40bf4ae
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-ahci.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-ahci.c - PHY driver for UniPhier AHCI controller
+ * Copyright 2016-2020, Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+struct uniphier_ahciphy_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk, *clk_parent;
+ struct reset_control *rst, *rst_parent;
+ const struct uniphier_ahciphy_soc_data *data;
+};
+
+struct uniphier_ahciphy_soc_data {
+ int (*init)(struct uniphier_ahciphy_priv *priv);
+ int (*power_on)(struct uniphier_ahciphy_priv *priv);
+ int (*power_off)(struct uniphier_ahciphy_priv *priv);
+ bool is_ready_high;
+ bool is_phy_clk;
+};
+
+/* for PXs2/PXs3 */
+#define CKCTRL 0x0
+#define CKCTRL_P0_READY BIT(15)
+#define CKCTRL_P0_RESET BIT(10)
+#define CKCTRL_REF_SSP_EN BIT(9)
+#define TXCTRL0 0x4
+#define TXCTRL0_AMP_G3_MASK GENMASK(22, 16)
+#define TXCTRL0_AMP_G2_MASK GENMASK(14, 8)
+#define TXCTRL0_AMP_G1_MASK GENMASK(6, 0)
+#define TXCTRL1 0x8
+#define TXCTRL1_DEEMPH_G3_MASK GENMASK(21, 16)
+#define TXCTRL1_DEEMPH_G2_MASK GENMASK(13, 8)
+#define TXCTRL1_DEEMPH_G1_MASK GENMASK(5, 0)
+#define RXCTRL 0xc
+#define RXCTRL_LOS_LVL_MASK GENMASK(20, 16)
+#define RXCTRL_LOS_BIAS_MASK GENMASK(10, 8)
+#define RXCTRL_RX_EQ_MASK GENMASK(2, 0)
+
+static void uniphier_ahciphy_pxs2_enable(struct uniphier_ahciphy_priv *priv,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(priv->base + CKCTRL);
+
+ if (enable) {
+ val |= CKCTRL_REF_SSP_EN;
+ writel(val, priv->base + CKCTRL);
+ val &= ~CKCTRL_P0_RESET;
+ writel(val, priv->base + CKCTRL);
+ } else {
+ val |= CKCTRL_P0_RESET;
+ writel(val, priv->base + CKCTRL);
+ val &= ~CKCTRL_REF_SSP_EN;
+ writel(val, priv->base + CKCTRL);
+ }
+}
+
+static int uniphier_ahciphy_pxs2_power_on(struct uniphier_ahciphy_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ uniphier_ahciphy_pxs2_enable(priv, true);
+
+ /* wait until PLL is ready */
+ if (priv->data->is_ready_high)
+ ret = readl_poll_timeout(priv->base + CKCTRL, val,
+ (val & CKCTRL_P0_READY), 200, 400);
+ else
+ ret = readl_poll_timeout(priv->base + CKCTRL, val,
+ !(val & CKCTRL_P0_READY), 200, 400);
+ if (ret) {
+ dev_err(priv->dev, "Failed to check whether PHY PLL is ready\n");
+ uniphier_ahciphy_pxs2_enable(priv, false);
+ }
+
+ return ret;
+}
+
+static int uniphier_ahciphy_pxs2_power_off(struct uniphier_ahciphy_priv *priv)
+{
+ uniphier_ahciphy_pxs2_enable(priv, false);
+
+ return 0;
+}
+
+static int uniphier_ahciphy_pxs3_init(struct uniphier_ahciphy_priv *priv)
+{
+ int i;
+ u32 val;
+
+ /* setup port parameter */
+ val = readl(priv->base + TXCTRL0);
+ val &= ~TXCTRL0_AMP_G3_MASK;
+ val |= FIELD_PREP(TXCTRL0_AMP_G3_MASK, 0x73);
+ val &= ~TXCTRL0_AMP_G2_MASK;
+ val |= FIELD_PREP(TXCTRL0_AMP_G2_MASK, 0x46);
+ val &= ~TXCTRL0_AMP_G1_MASK;
+ val |= FIELD_PREP(TXCTRL0_AMP_G1_MASK, 0x42);
+ writel(val, priv->base + TXCTRL0);
+
+ val = readl(priv->base + TXCTRL1);
+ val &= ~TXCTRL1_DEEMPH_G3_MASK;
+ val |= FIELD_PREP(TXCTRL1_DEEMPH_G3_MASK, 0x23);
+ val &= ~TXCTRL1_DEEMPH_G2_MASK;
+ val |= FIELD_PREP(TXCTRL1_DEEMPH_G2_MASK, 0x05);
+ val &= ~TXCTRL1_DEEMPH_G1_MASK;
+ val |= FIELD_PREP(TXCTRL1_DEEMPH_G1_MASK, 0x05);
+
+ val = readl(priv->base + RXCTRL);
+ val &= ~RXCTRL_LOS_LVL_MASK;
+ val |= FIELD_PREP(RXCTRL_LOS_LVL_MASK, 0x9);
+ val &= ~RXCTRL_LOS_BIAS_MASK;
+ val |= FIELD_PREP(RXCTRL_LOS_BIAS_MASK, 0x2);
+ val &= ~RXCTRL_RX_EQ_MASK;
+ val |= FIELD_PREP(RXCTRL_RX_EQ_MASK, 0x1);
+
+ /* dummy read 25 times to make a wait time for the phy to stabilize */
+ for (i = 0; i < 25; i++)
+ readl(priv->base + CKCTRL);
+
+ return 0;
+}
+
+static int uniphier_ahciphy_init(struct phy *phy)
+{
+ struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_parent);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rst_parent);
+ if (ret)
+ goto out_clk_disable;
+
+ if (priv->data->init) {
+ ret = priv->data->init(priv);
+ if (ret)
+ goto out_rst_assert;
+ }
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst_parent);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk_parent);
+
+ return ret;
+}
+
+static int uniphier_ahciphy_exit(struct phy *phy)
+{
+ struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy);
+
+ reset_control_assert(priv->rst_parent);
+ clk_disable_unprepare(priv->clk_parent);
+
+ return 0;
+}
+
+static int uniphier_ahciphy_power_on(struct phy *phy)
+{
+ struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ if (priv->data->power_on) {
+ ret = priv->data->power_on(priv);
+ if (ret)
+ goto out_reset_assert;
+ }
+
+ return 0;
+
+out_reset_assert:
+ reset_control_assert(priv->rst);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static int uniphier_ahciphy_power_off(struct phy *phy)
+{
+ struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->data->power_off)
+ ret = priv->data->power_off(priv);
+
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static const struct phy_ops uniphier_ahciphy_ops = {
+ .init = uniphier_ahciphy_init,
+ .exit = uniphier_ahciphy_exit,
+ .power_on = uniphier_ahciphy_power_on,
+ .power_off = uniphier_ahciphy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_ahciphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_ahciphy_priv *priv;
+ struct phy *phy;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data))
+ return -EINVAL;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk_parent = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk_parent))
+ return PTR_ERR(priv->clk_parent);
+
+ if (priv->data->is_phy_clk) {
+ priv->clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+ }
+
+ priv->rst_parent = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst_parent))
+ return PTR_ERR(priv->rst_parent);
+
+ priv->rst = devm_reset_control_get_shared(dev, "phy");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ phy = devm_phy_create(dev, dev->of_node, &uniphier_ahciphy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create phy\n");
+ return PTR_ERR(phy);
+ }
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ return 0;
+}
+
+static const struct uniphier_ahciphy_soc_data uniphier_pxs2_data = {
+ .power_on = uniphier_ahciphy_pxs2_power_on,
+ .power_off = uniphier_ahciphy_pxs2_power_off,
+ .is_ready_high = false,
+ .is_phy_clk = false,
+};
+
+static const struct uniphier_ahciphy_soc_data uniphier_pxs3_data = {
+ .init = uniphier_ahciphy_pxs3_init,
+ .power_on = uniphier_ahciphy_pxs2_power_on,
+ .power_off = uniphier_ahciphy_pxs2_power_off,
+ .is_ready_high = true,
+ .is_phy_clk = true,
+};
+
+static const struct of_device_id uniphier_ahciphy_match[] = {
+ {
+ .compatible = "socionext,uniphier-pxs2-ahci-phy",
+ .data = &uniphier_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-ahci-phy",
+ .data = &uniphier_pxs3_data,
+ },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, uniphier_ahciphy_match);
+
+static struct platform_driver uniphier_ahciphy_driver = {
+ .probe = uniphier_ahciphy_probe,
+ .driver = {
+ .name = "uniphier-ahci-phy",
+ .of_match_table = uniphier_ahciphy_match,
+ },
+};
+module_platform_driver(uniphier_ahciphy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PHY driver for AHCI controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 819c49af169a..2ff56ce77b30 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -19,15 +19,38 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#define CMU_R004 0x4
+#define CMU_R060 0x60
#define CMU_R07C 0x7c
-
+#define CMU_R088 0x88
+#define CMU_R0D0 0xd0
+#define CMU_R0E8 0xe8
+
+#define LANE_R048 0x248
+#define LANE_R058 0x258
+#define LANE_R06c 0x26c
+#define LANE_R070 0x270
+#define LANE_R070 0x270
+#define LANE_R19C 0x39c
+
+#define COMLANE_R004 0xa04
#define COMLANE_R138 0xb38
-#define VERSION 0x70
+#define VERSION_VAL 0x70
#define COMLANE_R190 0xb90
-
#define COMLANE_R194 0xb94
+#define COMRXEQ_R004 0x1404
+#define COMRXEQ_R008 0x1408
+#define COMRXEQ_R00C 0x140c
+#define COMRXEQ_R014 0x1414
+#define COMRXEQ_R018 0x1418
+#define COMRXEQ_R01C 0x141c
+#define COMRXEQ_R04C 0x144c
+#define COMRXEQ_R088 0x1488
+#define COMRXEQ_R094 0x1494
+#define COMRXEQ_R098 0x1498
+
#define SERDES_CTRL 0x1fd0
#define WIZ_LANEXCTL_STS 0x1fe0
@@ -80,27 +103,136 @@ static const struct regmap_config serdes_am654_regmap_config = {
.max_register = 0x1ffc,
};
-static const struct reg_field cmu_master_cdn_o = REG_FIELD(CMU_R07C, 24, 24);
-static const struct reg_field config_version = REG_FIELD(COMLANE_R138, 16, 23);
-static const struct reg_field l1_master_cdn_o = REG_FIELD(COMLANE_R190, 9, 9);
-static const struct reg_field cmu_ok_i_0 = REG_FIELD(COMLANE_R194, 19, 19);
-static const struct reg_field por_en = REG_FIELD(SERDES_CTRL, 29, 29);
-static const struct reg_field tx0_enable = REG_FIELD(WIZ_LANEXCTL_STS, 29, 31);
-static const struct reg_field rx0_enable = REG_FIELD(WIZ_LANEXCTL_STS, 13, 15);
-static const struct reg_field pll_enable = REG_FIELD(WIZ_PLL_CTRL, 29, 31);
-static const struct reg_field pll_ok = REG_FIELD(WIZ_PLL_CTRL, 28, 28);
+enum serdes_am654_fields {
+ /* CMU PLL Control */
+ CMU_PLL_CTRL,
+
+ LANE_PLL_CTRL_RXEQ_RXIDLE,
+
+ /* CMU VCO bias current and VREG setting */
+ AHB_PMA_CM_VCO_VBIAS_VREG,
+ AHB_PMA_CM_VCO_BIAS_VREG,
+
+ AHB_PMA_CM_SR,
+ AHB_SSC_GEN_Z_O_20_13,
+
+ /* AHB PMA Lane Configuration */
+ AHB_PMA_LN_AGC_THSEL_VREGH,
+
+ /* AGC and Signal detect threshold for Gen3 */
+ AHB_PMA_LN_GEN3_AGC_SD_THSEL,
+
+ AHB_PMA_LN_RX_SELR_GEN3,
+ AHB_PMA_LN_TX_DRV,
+
+ /* CMU Master Reset */
+ CMU_MASTER_CDN,
+
+ /* P2S ring buffer initial startup pointer difference */
+ P2S_RBUF_PTR_DIFF,
+
+ CONFIG_VERSION,
+
+ /* Lane 1 Master Reset */
+ L1_MASTER_CDN,
+
+ /* CMU OK Status */
+ CMU_OK_I_0,
+
+ /* Mid-speed initial calibration control */
+ COMRXEQ_MS_INIT_CTRL_7_0,
+
+ /* High-speed initial calibration control */
+ COMRXEQ_HS_INIT_CAL_7_0,
+
+ /* Mid-speed recalibration control */
+ COMRXEQ_MS_RECAL_CTRL_7_0,
+
+ /* High-speed recalibration control */
+ COMRXEQ_HS_RECAL_CTRL_7_0,
+
+ /* ATT configuration */
+ COMRXEQ_CSR_ATT_CONFIG,
+
+ /* Edge based boost adaptation window length */
+ COMRXEQ_CSR_EBSTADAPT_WIN_LEN,
+
+ /* COMRXEQ control 3 & 4 */
+ COMRXEQ_CTRL_3_4,
+
+ /* COMRXEQ control 14, 15 and 16*/
+ COMRXEQ_CTRL_14_15_16,
+
+ /* Threshold for errors in pattern data */
+ COMRXEQ_CSR_DLEV_ERR_THRESH,
+
+ /* COMRXEQ control 25 */
+ COMRXEQ_CTRL_25,
+
+ /* Mid-speed rate change calibration control */
+ CSR_RXEQ_RATE_CHANGE_CAL_RUN_RATE2_O,
+
+ /* High-speed rate change calibration control */
+ COMRXEQ_HS_RCHANGE_CTRL_7_0,
+
+ /* Serdes reset */
+ POR_EN,
+
+ /* Tx Enable Value */
+ TX0_ENABLE,
+
+ /* Rx Enable Value */
+ RX0_ENABLE,
+
+ /* PLL Enable Value */
+ PLL_ENABLE,
+
+ /* PLL ready for use */
+ PLL_OK,
+
+ /* sentinel */
+ MAX_FIELDS
+
+};
+
+static const struct reg_field serdes_am654_reg_fields[] = {
+ [CMU_PLL_CTRL] = REG_FIELD(CMU_R004, 8, 15),
+ [AHB_PMA_CM_VCO_VBIAS_VREG] = REG_FIELD(CMU_R060, 8, 15),
+ [CMU_MASTER_CDN] = REG_FIELD(CMU_R07C, 24, 31),
+ [AHB_PMA_CM_VCO_BIAS_VREG] = REG_FIELD(CMU_R088, 24, 31),
+ [AHB_PMA_CM_SR] = REG_FIELD(CMU_R0D0, 24, 31),
+ [AHB_SSC_GEN_Z_O_20_13] = REG_FIELD(CMU_R0E8, 8, 15),
+ [LANE_PLL_CTRL_RXEQ_RXIDLE] = REG_FIELD(LANE_R048, 8, 15),
+ [AHB_PMA_LN_AGC_THSEL_VREGH] = REG_FIELD(LANE_R058, 16, 23),
+ [AHB_PMA_LN_GEN3_AGC_SD_THSEL] = REG_FIELD(LANE_R06c, 0, 7),
+ [AHB_PMA_LN_RX_SELR_GEN3] = REG_FIELD(LANE_R070, 16, 23),
+ [AHB_PMA_LN_TX_DRV] = REG_FIELD(LANE_R19C, 16, 23),
+ [P2S_RBUF_PTR_DIFF] = REG_FIELD(COMLANE_R004, 0, 7),
+ [CONFIG_VERSION] = REG_FIELD(COMLANE_R138, 16, 23),
+ [L1_MASTER_CDN] = REG_FIELD(COMLANE_R190, 8, 15),
+ [CMU_OK_I_0] = REG_FIELD(COMLANE_R194, 19, 19),
+ [COMRXEQ_MS_INIT_CTRL_7_0] = REG_FIELD(COMRXEQ_R004, 24, 31),
+ [COMRXEQ_HS_INIT_CAL_7_0] = REG_FIELD(COMRXEQ_R008, 0, 7),
+ [COMRXEQ_MS_RECAL_CTRL_7_0] = REG_FIELD(COMRXEQ_R00C, 8, 15),
+ [COMRXEQ_HS_RECAL_CTRL_7_0] = REG_FIELD(COMRXEQ_R00C, 16, 23),
+ [COMRXEQ_CSR_ATT_CONFIG] = REG_FIELD(COMRXEQ_R014, 16, 23),
+ [COMRXEQ_CSR_EBSTADAPT_WIN_LEN] = REG_FIELD(COMRXEQ_R018, 16, 23),
+ [COMRXEQ_CTRL_3_4] = REG_FIELD(COMRXEQ_R01C, 8, 15),
+ [COMRXEQ_CTRL_14_15_16] = REG_FIELD(COMRXEQ_R04C, 0, 7),
+ [COMRXEQ_CSR_DLEV_ERR_THRESH] = REG_FIELD(COMRXEQ_R088, 16, 23),
+ [COMRXEQ_CTRL_25] = REG_FIELD(COMRXEQ_R094, 24, 31),
+ [CSR_RXEQ_RATE_CHANGE_CAL_RUN_RATE2_O] = REG_FIELD(COMRXEQ_R098, 8, 15),
+ [COMRXEQ_HS_RCHANGE_CTRL_7_0] = REG_FIELD(COMRXEQ_R098, 16, 23),
+ [POR_EN] = REG_FIELD(SERDES_CTRL, 29, 29),
+ [TX0_ENABLE] = REG_FIELD(WIZ_LANEXCTL_STS, 29, 31),
+ [RX0_ENABLE] = REG_FIELD(WIZ_LANEXCTL_STS, 13, 15),
+ [PLL_ENABLE] = REG_FIELD(WIZ_PLL_CTRL, 29, 31),
+ [PLL_OK] = REG_FIELD(WIZ_PLL_CTRL, 28, 28),
+};
struct serdes_am654 {
struct regmap *regmap;
- struct regmap_field *cmu_master_cdn_o;
- struct regmap_field *config_version;
- struct regmap_field *l1_master_cdn_o;
- struct regmap_field *cmu_ok_i_0;
- struct regmap_field *por_en;
- struct regmap_field *tx0_enable;
- struct regmap_field *rx0_enable;
- struct regmap_field *pll_enable;
- struct regmap_field *pll_ok;
+ struct regmap_field *fields[MAX_FIELDS];
struct device *dev;
struct mux_control *control;
@@ -116,12 +248,12 @@ static int serdes_am654_enable_pll(struct serdes_am654 *phy)
int ret;
u32 val;
- ret = regmap_field_write(phy->pll_enable, PLL_ENABLE_STATE);
+ ret = regmap_field_write(phy->fields[PLL_ENABLE], PLL_ENABLE_STATE);
if (ret)
return ret;
- return regmap_field_read_poll_timeout(phy->pll_ok, val, val, 1000,
- PLL_LOCK_TIME);
+ return regmap_field_read_poll_timeout(phy->fields[PLL_OK], val, val,
+ 1000, PLL_LOCK_TIME);
}
static void serdes_am654_disable_pll(struct serdes_am654 *phy)
@@ -129,41 +261,39 @@ static void serdes_am654_disable_pll(struct serdes_am654 *phy)
struct device *dev = phy->dev;
int ret;
- ret = regmap_field_write(phy->pll_enable, PLL_DISABLE_STATE);
+ ret = regmap_field_write(phy->fields[PLL_ENABLE], PLL_DISABLE_STATE);
if (ret)
dev_err(dev, "Failed to disable PLL\n");
}
static int serdes_am654_enable_txrx(struct serdes_am654 *phy)
{
- int ret;
+ int ret = 0;
/* Enable TX */
- ret = regmap_field_write(phy->tx0_enable, TX0_ENABLE_STATE);
- if (ret)
- return ret;
+ ret |= regmap_field_write(phy->fields[TX0_ENABLE], TX0_ENABLE_STATE);
/* Enable RX */
- ret = regmap_field_write(phy->rx0_enable, RX0_ENABLE_STATE);
+ ret |= regmap_field_write(phy->fields[RX0_ENABLE], RX0_ENABLE_STATE);
+
if (ret)
- return ret;
+ return -EIO;
return 0;
}
static int serdes_am654_disable_txrx(struct serdes_am654 *phy)
{
- int ret;
+ int ret = 0;
/* Disable TX */
- ret = regmap_field_write(phy->tx0_enable, TX0_DISABLE_STATE);
- if (ret)
- return ret;
+ ret |= regmap_field_write(phy->fields[TX0_ENABLE], TX0_DISABLE_STATE);
/* Disable RX */
- ret = regmap_field_write(phy->rx0_enable, RX0_DISABLE_STATE);
+ ret |= regmap_field_write(phy->fields[RX0_ENABLE], RX0_DISABLE_STATE);
+
if (ret)
- return ret;
+ return -EIO;
return 0;
}
@@ -187,8 +317,8 @@ static int serdes_am654_power_on(struct phy *x)
return ret;
}
- return regmap_field_read_poll_timeout(phy->cmu_ok_i_0, val, val,
- SLEEP_TIME, PLL_LOCK_TIME);
+ return regmap_field_read_poll_timeout(phy->fields[CMU_OK_I_0], val,
+ val, SLEEP_TIME, PLL_LOCK_TIME);
}
static int serdes_am654_power_off(struct phy *x)
@@ -286,19 +416,37 @@ static int serdes_am654_usb3_init(struct serdes_am654 *phy)
static int serdes_am654_pcie_init(struct serdes_am654 *phy)
{
- int ret;
-
- ret = regmap_field_write(phy->config_version, VERSION);
- if (ret)
- return ret;
+ int ret = 0;
- ret = regmap_field_write(phy->cmu_master_cdn_o, 0x1);
- if (ret)
- return ret;
+ ret |= regmap_field_write(phy->fields[CMU_PLL_CTRL], 0x2);
+ ret |= regmap_field_write(phy->fields[AHB_PMA_CM_VCO_VBIAS_VREG], 0x98);
+ ret |= regmap_field_write(phy->fields[AHB_PMA_CM_VCO_BIAS_VREG], 0x98);
+ ret |= regmap_field_write(phy->fields[AHB_PMA_CM_SR], 0x45);
+ ret |= regmap_field_write(phy->fields[AHB_SSC_GEN_Z_O_20_13], 0xe);
+ ret |= regmap_field_write(phy->fields[LANE_PLL_CTRL_RXEQ_RXIDLE], 0x5);
+ ret |= regmap_field_write(phy->fields[AHB_PMA_LN_AGC_THSEL_VREGH], 0x83);
+ ret |= regmap_field_write(phy->fields[AHB_PMA_LN_GEN3_AGC_SD_THSEL], 0x83);
+ ret |= regmap_field_write(phy->fields[AHB_PMA_LN_RX_SELR_GEN3], 0x81);
+ ret |= regmap_field_write(phy->fields[AHB_PMA_LN_TX_DRV], 0x3b);
+ ret |= regmap_field_write(phy->fields[P2S_RBUF_PTR_DIFF], 0x3);
+ ret |= regmap_field_write(phy->fields[CONFIG_VERSION], VERSION_VAL);
+ ret |= regmap_field_write(phy->fields[COMRXEQ_MS_INIT_CTRL_7_0], 0xf);
+ ret |= regmap_field_write(phy->fields[COMRXEQ_HS_INIT_CAL_7_0], 0x4f);
+ ret |= regmap_field_write(phy->fields[COMRXEQ_MS_RECAL_CTRL_7_0], 0xf);
+ ret |= regmap_field_write(phy->fields[COMRXEQ_HS_RECAL_CTRL_7_0], 0x4f);
+ ret |= regmap_field_write(phy->fields[COMRXEQ_CSR_ATT_CONFIG], 0x7);
+ ret |= regmap_field_write(phy->fields[COMRXEQ_CSR_EBSTADAPT_WIN_LEN], 0x7f);
+ ret |= regmap_field_write(phy->fields[COMRXEQ_CTRL_3_4], 0xf);
+ ret |= regmap_field_write(phy->fields[COMRXEQ_CTRL_14_15_16], 0x9a);
+ ret |= regmap_field_write(phy->fields[COMRXEQ_CSR_DLEV_ERR_THRESH], 0x32);
+ ret |= regmap_field_write(phy->fields[COMRXEQ_CTRL_25], 0x80);
+ ret |= regmap_field_write(phy->fields[CSR_RXEQ_RATE_CHANGE_CAL_RUN_RATE2_O], 0xf);
+ ret |= regmap_field_write(phy->fields[COMRXEQ_HS_RCHANGE_CTRL_7_0], 0x4f);
+ ret |= regmap_field_write(phy->fields[CMU_MASTER_CDN], 0x1);
+ ret |= regmap_field_write(phy->fields[L1_MASTER_CDN], 0x2);
- ret = regmap_field_write(phy->l1_master_cdn_o, 0x1);
if (ret)
- return ret;
+ return -EIO;
return 0;
}
@@ -320,20 +468,19 @@ static int serdes_am654_init(struct phy *x)
static int serdes_am654_reset(struct phy *x)
{
struct serdes_am654 *phy = phy_get_drvdata(x);
- int ret;
+ int ret = 0;
serdes_am654_disable_pll(phy);
serdes_am654_disable_txrx(phy);
- ret = regmap_field_write(phy->por_en, 0x1);
- if (ret)
- return ret;
+ ret |= regmap_field_write(phy->fields[POR_EN], 0x1);
mdelay(1);
- ret = regmap_field_write(phy->por_en, 0x0);
+ ret |= regmap_field_write(phy->fields[POR_EN], 0x0);
+
if (ret)
- return ret;
+ return -EIO;
return 0;
}
@@ -587,66 +734,16 @@ static int serdes_am654_regfield_init(struct serdes_am654 *am654_phy)
{
struct regmap *regmap = am654_phy->regmap;
struct device *dev = am654_phy->dev;
+ int i;
- am654_phy->cmu_master_cdn_o = devm_regmap_field_alloc(dev, regmap,
- cmu_master_cdn_o);
- if (IS_ERR(am654_phy->cmu_master_cdn_o)) {
- dev_err(dev, "CMU_MASTER_CDN_O reg field init failed\n");
- return PTR_ERR(am654_phy->cmu_master_cdn_o);
- }
-
- am654_phy->config_version = devm_regmap_field_alloc(dev, regmap,
- config_version);
- if (IS_ERR(am654_phy->config_version)) {
- dev_err(dev, "CONFIG_VERSION reg field init failed\n");
- return PTR_ERR(am654_phy->config_version);
- }
-
- am654_phy->l1_master_cdn_o = devm_regmap_field_alloc(dev, regmap,
- l1_master_cdn_o);
- if (IS_ERR(am654_phy->l1_master_cdn_o)) {
- dev_err(dev, "L1_MASTER_CDN_O reg field init failed\n");
- return PTR_ERR(am654_phy->l1_master_cdn_o);
- }
-
- am654_phy->cmu_ok_i_0 = devm_regmap_field_alloc(dev, regmap,
- cmu_ok_i_0);
- if (IS_ERR(am654_phy->cmu_ok_i_0)) {
- dev_err(dev, "CMU_OK_I_0 reg field init failed\n");
- return PTR_ERR(am654_phy->cmu_ok_i_0);
- }
-
- am654_phy->por_en = devm_regmap_field_alloc(dev, regmap, por_en);
- if (IS_ERR(am654_phy->por_en)) {
- dev_err(dev, "POR_EN reg field init failed\n");
- return PTR_ERR(am654_phy->por_en);
- }
-
- am654_phy->tx0_enable = devm_regmap_field_alloc(dev, regmap,
- tx0_enable);
- if (IS_ERR(am654_phy->tx0_enable)) {
- dev_err(dev, "TX0_ENABLE reg field init failed\n");
- return PTR_ERR(am654_phy->tx0_enable);
- }
-
- am654_phy->rx0_enable = devm_regmap_field_alloc(dev, regmap,
- rx0_enable);
- if (IS_ERR(am654_phy->rx0_enable)) {
- dev_err(dev, "RX0_ENABLE reg field init failed\n");
- return PTR_ERR(am654_phy->rx0_enable);
- }
-
- am654_phy->pll_enable = devm_regmap_field_alloc(dev, regmap,
- pll_enable);
- if (IS_ERR(am654_phy->pll_enable)) {
- dev_err(dev, "PLL_ENABLE reg field init failed\n");
- return PTR_ERR(am654_phy->pll_enable);
- }
-
- am654_phy->pll_ok = devm_regmap_field_alloc(dev, regmap, pll_ok);
- if (IS_ERR(am654_phy->pll_ok)) {
- dev_err(dev, "PLL_OK reg field init failed\n");
- return PTR_ERR(am654_phy->pll_ok);
+ for (i = 0; i < MAX_FIELDS; i++) {
+ am654_phy->fields[i] = devm_regmap_field_alloc(dev,
+ regmap,
+ serdes_am654_reg_fields[i]);
+ if (IS_ERR(am654_phy->fields[i])) {
+ dev_err(dev, "Unable to allocate regmap field %d\n", i);
+ return PTR_ERR(am654_phy->fields[i]);
+ }
}
return 0;
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 7edd5c3bc536..5fd2e8a08bfc 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
@@ -22,7 +23,7 @@
#define AM33XX_GMII_SEL_MODE_RGMII 2
enum {
- PHY_GMII_SEL_PORT_MODE,
+ PHY_GMII_SEL_PORT_MODE = 0,
PHY_GMII_SEL_RGMII_ID_MODE,
PHY_GMII_SEL_RMII_IO_CLK_EN,
PHY_GMII_SEL_LAST,
@@ -41,6 +42,7 @@ struct phy_gmii_sel_soc_data {
u32 num_ports;
u32 features;
const struct reg_field (*regfields)[PHY_GMII_SEL_LAST];
+ bool use_of_data;
};
struct phy_gmii_sel_priv {
@@ -49,6 +51,8 @@ struct phy_gmii_sel_priv {
struct regmap *regmap;
struct phy_provider *phy_provider;
struct phy_gmii_sel_phy_priv *if_phys;
+ u32 num_ports;
+ u32 reg_offset;
};
static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
@@ -147,13 +151,9 @@ static const
struct reg_field phy_gmii_sel_fields_dra7[][PHY_GMII_SEL_LAST] = {
{
[PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x554, 0, 1),
- [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD((~0), 0, 0),
- [PHY_GMII_SEL_RMII_IO_CLK_EN] = REG_FIELD((~0), 0, 0),
},
{
[PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x554, 4, 5),
- [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD((~0), 0, 0),
- [PHY_GMII_SEL_RMII_IO_CLK_EN] = REG_FIELD((~0), 0, 0),
},
};
@@ -172,16 +172,19 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_soc_dm814 = {
static const
struct reg_field phy_gmii_sel_fields_am654[][PHY_GMII_SEL_LAST] = {
- {
- [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4040, 0, 1),
- [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD((~0), 0, 0),
- [PHY_GMII_SEL_RMII_IO_CLK_EN] = REG_FIELD((~0), 0, 0),
- },
+ { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x0, 0, 2), },
+ { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4, 0, 2), },
+ { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x8, 0, 2), },
+ { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0xC, 0, 2), },
+ { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x10, 0, 2), },
+ { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x14, 0, 2), },
+ { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x18, 0, 2), },
+ { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x1C, 0, 2), },
};
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_soc_am654 = {
- .num_ports = 1,
+ .use_of_data = true,
.regfields = phy_gmii_sel_fields_am654,
};
@@ -228,7 +231,7 @@ static struct phy *phy_gmii_sel_of_xlate(struct device *dev,
if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) &&
args->args_count < 2)
return ERR_PTR(-EINVAL);
- if (phy_id > priv->soc_data->num_ports)
+ if (phy_id > priv->num_ports)
return ERR_PTR(-EINVAL);
if (phy_id != priv->if_phys[phy_id - 1].id)
return ERR_PTR(-EINVAL);
@@ -242,68 +245,97 @@ static struct phy *phy_gmii_sel_of_xlate(struct device *dev,
return priv->if_phys[phy_id].if_phy;
}
-static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv)
+static int phy_gmii_init_phy(struct phy_gmii_sel_priv *priv, int port,
+ struct phy_gmii_sel_phy_priv *if_phy)
{
const struct phy_gmii_sel_soc_data *soc_data = priv->soc_data;
struct device *dev = priv->dev;
+ const struct reg_field *fields;
+ struct regmap_field *regfield;
+ struct reg_field field;
+ int ret;
+
+ if_phy->id = port;
+ if_phy->priv = priv;
+
+ fields = soc_data->regfields[port - 1];
+ field = *fields++;
+ field.reg += priv->reg_offset;
+ dev_dbg(dev, "%s field %x %d %d\n", __func__,
+ field.reg, field.msb, field.lsb);
+
+ regfield = devm_regmap_field_alloc(dev, priv->regmap, field);
+ if (IS_ERR(regfield))
+ return PTR_ERR(regfield);
+ if_phy->fields[PHY_GMII_SEL_PORT_MODE] = regfield;
+
+ field = *fields++;
+ field.reg += priv->reg_offset;
+ if (soc_data->features & BIT(PHY_GMII_SEL_RGMII_ID_MODE)) {
+ regfield = devm_regmap_field_alloc(dev,
+ priv->regmap,
+ field);
+ if (IS_ERR(regfield))
+ return PTR_ERR(regfield);
+ if_phy->fields[PHY_GMII_SEL_RGMII_ID_MODE] = regfield;
+ dev_dbg(dev, "%s field %x %d %d\n", __func__,
+ field.reg, field.msb, field.lsb);
+ }
+
+ field = *fields;
+ field.reg += priv->reg_offset;
+ if (soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN)) {
+ regfield = devm_regmap_field_alloc(dev,
+ priv->regmap,
+ field);
+ if (IS_ERR(regfield))
+ return PTR_ERR(regfield);
+ if_phy->fields[PHY_GMII_SEL_RMII_IO_CLK_EN] = regfield;
+ dev_dbg(dev, "%s field %x %d %d\n", __func__,
+ field.reg, field.msb, field.lsb);
+ }
+
+ if_phy->if_phy = devm_phy_create(dev,
+ priv->dev->of_node,
+ &phy_gmii_sel_ops);
+ if (IS_ERR(if_phy->if_phy)) {
+ ret = PTR_ERR(if_phy->if_phy);
+ dev_err(dev, "Failed to create phy%d %d\n", port, ret);
+ return ret;
+ }
+ phy_set_drvdata(if_phy->if_phy, if_phy);
+
+ return 0;
+}
+
+static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv)
+{
+ const struct phy_gmii_sel_soc_data *soc_data = priv->soc_data;
struct phy_gmii_sel_phy_priv *if_phys;
- int i, num_ports, ret;
+ struct device *dev = priv->dev;
+ int i, ret;
- num_ports = priv->soc_data->num_ports;
+ if (soc_data->use_of_data) {
+ const __be32 *offset;
+ u64 size;
- if_phys = devm_kcalloc(priv->dev, num_ports,
+ offset = of_get_address(dev->of_node, 0, &size, NULL);
+ priv->num_ports = size / sizeof(u32);
+ if (!priv->num_ports)
+ return -EINVAL;
+ priv->reg_offset = __be32_to_cpu(*offset);
+ }
+
+ if_phys = devm_kcalloc(dev, priv->num_ports,
sizeof(*if_phys), GFP_KERNEL);
if (!if_phys)
return -ENOMEM;
- dev_dbg(dev, "%s %d\n", __func__, num_ports);
-
- for (i = 0; i < num_ports; i++) {
- const struct reg_field *field;
- struct regmap_field *regfield;
+ dev_dbg(dev, "%s %d\n", __func__, priv->num_ports);
- if_phys[i].id = i + 1;
- if_phys[i].priv = priv;
-
- field = &soc_data->regfields[i][PHY_GMII_SEL_PORT_MODE];
- dev_dbg(dev, "%s field %x %d %d\n", __func__,
- field->reg, field->msb, field->lsb);
-
- regfield = devm_regmap_field_alloc(dev, priv->regmap, *field);
- if (IS_ERR(regfield))
- return PTR_ERR(regfield);
- if_phys[i].fields[PHY_GMII_SEL_PORT_MODE] = regfield;
-
- field = &soc_data->regfields[i][PHY_GMII_SEL_RGMII_ID_MODE];
- if (field->reg != (~0)) {
- regfield = devm_regmap_field_alloc(dev,
- priv->regmap,
- *field);
- if (IS_ERR(regfield))
- return PTR_ERR(regfield);
- if_phys[i].fields[PHY_GMII_SEL_RGMII_ID_MODE] =
- regfield;
- }
-
- field = &soc_data->regfields[i][PHY_GMII_SEL_RMII_IO_CLK_EN];
- if (field->reg != (~0)) {
- regfield = devm_regmap_field_alloc(dev,
- priv->regmap,
- *field);
- if (IS_ERR(regfield))
- return PTR_ERR(regfield);
- if_phys[i].fields[PHY_GMII_SEL_RMII_IO_CLK_EN] =
- regfield;
- }
-
- if_phys[i].if_phy = devm_phy_create(dev,
- priv->dev->of_node,
- &phy_gmii_sel_ops);
- if (IS_ERR(if_phys[i].if_phy)) {
- ret = PTR_ERR(if_phys[i].if_phy);
- dev_err(dev, "Failed to create phy%d %d\n", i, ret);
+ for (i = 0; i < priv->num_ports; i++) {
+ ret = phy_gmii_init_phy(priv, i + 1, &if_phys[i]);
+ if (ret)
return ret;
- }
- phy_set_drvdata(if_phys[i].if_phy, &if_phys[i]);
}
priv->if_phys = if_phys;
@@ -328,6 +360,7 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->soc_data = of_id->data;
+ priv->num_ports = priv->soc_data->num_ports;
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 33c4cf0105a4..c9cfafe89cbf 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -20,7 +20,6 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
-#include <dt-bindings/phy/phy.h>
#define WIZ_SERDES_CTRL 0x404
#define WIZ_SERDES_TOP_CTRL 0x408
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index 507f79d14adb..4fec90d2624f 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -6,23 +6,23 @@
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/io.h>
-#include <linux/phy/omap_usb.h>
-#include <linux/usb/phy_companion.h>
#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/pm_runtime.h>
#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/phy/omap_control_phy.h>
+#include <linux/phy/omap_usb.h>
#include <linux/phy/phy.h>
-#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/of_platform.h>
+#include <linux/slab.h>
#include <linux/sys_soc.h>
+#include <linux/usb/phy_companion.h>
#define USB2PHY_ANA_CONFIG1 0x4c
#define USB2PHY_DISCON_BYP_LATCH BIT(31)
@@ -89,7 +89,7 @@ static inline void omap_usb_writel(void __iomem *addr, unsigned int offset,
}
/**
- * omap_usb2_set_comparator - links the comparator present in the sytem with
+ * omap_usb2_set_comparator - links the comparator present in the system with
* this phy
* @comparator - the companion phy(comparator) for this phy
*
@@ -142,7 +142,7 @@ static int omap_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
}
static int omap_usb_set_peripheral(struct usb_otg *otg,
- struct usb_gadget *gadget)
+ struct usb_gadget *gadget)
{
otg->gadget = gadget;
if (!gadget)
@@ -409,7 +409,7 @@ static int omap_usb2_probe(struct platform_device *pdev)
return PTR_ERR(phy->phy_base);
phy->syscon_phy_power = syscon_regmap_lookup_by_phandle(node,
- "syscon-phy-power");
+ "syscon-phy-power");
if (IS_ERR(phy->syscon_phy_power)) {
dev_dbg(&pdev->dev,
"can't get syscon-phy-power, using control device\n");
@@ -438,7 +438,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
}
}
-
phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
if (IS_ERR(phy->wkupclk)) {
if (PTR_ERR(phy->wkupclk) == -EPROBE_DEFER)
@@ -452,10 +451,10 @@ static int omap_usb2_probe(struct platform_device *pdev)
if (PTR_ERR(phy->wkupclk) != -EPROBE_DEFER)
dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
return PTR_ERR(phy->wkupclk);
- } else {
- dev_warn(&pdev->dev,
- "found usb_phy_cm_clk32k, please fix DTS\n");
}
+
+ dev_warn(&pdev->dev,
+ "found usb_phy_cm_clk32k, please fix DTS\n");
}
phy->optclk = devm_clk_get(phy->dev, "refclk");
@@ -504,7 +503,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
return PTR_ERR(phy_provider);
}
-
usb_add_phy_dev(&phy->phy);
return 0;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 8828613c4e0e..815095326e2d 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -208,42 +208,12 @@ config PINCTRL_OXNAS
config PINCTRL_ROCKCHIP
bool
+ depends on OF
select PINMUX
select GENERIC_PINCONF
select GENERIC_IRQ_CHIP
select MFD_SYSCON
-
-config PINCTRL_RZA1
- bool "Renesas RZ/A1 gpio and pinctrl driver"
- depends on OF
- depends on ARCH_R7S72100 || COMPILE_TEST
- select GPIOLIB
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select GENERIC_PINCONF
- help
- This selects pinctrl driver for Renesas RZ/A1 platforms.
-
-config PINCTRL_RZA2
- bool "Renesas RZ/A2 gpio and pinctrl driver"
- depends on OF
- depends on ARCH_R7S9210 || COMPILE_TEST
- select GPIOLIB
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select GENERIC_PINCONF
- help
- This selects GPIO and pinctrl driver for Renesas RZ/A2 platforms.
-
-config PINCTRL_RZN1
- bool "Renesas RZ/N1 pinctrl driver"
- depends on OF
- depends on ARCH_RZN1 || COMPILE_TEST
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select GENERIC_PINCONF
- help
- This selects pinctrl driver for Renesas RZ/N1 devices.
+ select OF_GPIO
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
@@ -415,8 +385,8 @@ source "drivers/pinctrl/nomadik/Kconfig"
source "drivers/pinctrl/nuvoton/Kconfig"
source "drivers/pinctrl/pxa/Kconfig"
source "drivers/pinctrl/qcom/Kconfig"
+source "drivers/pinctrl/renesas/Kconfig"
source "drivers/pinctrl/samsung/Kconfig"
-source "drivers/pinctrl/sh-pfc/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
source "drivers/pinctrl/sprd/Kconfig"
source "drivers/pinctrl/stm32/Kconfig"
@@ -429,6 +399,7 @@ source "drivers/pinctrl/mediatek/Kconfig"
source "drivers/pinctrl/zte/Kconfig"
source "drivers/pinctrl/meson/Kconfig"
source "drivers/pinctrl/cirrus/Kconfig"
+source "drivers/pinctrl/visconti/Kconfig"
config PINCTRL_XWAY
bool
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 1731b2154df9..f53933b2ff02 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -30,9 +30,6 @@ obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
-obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
-obj-$(CONFIG_PINCTRL_RZA2) += pinctrl-rza2.o
-obj-$(CONFIG_PINCTRL_RZN1) += pinctrl-rzn1.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += sirf/
obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
@@ -62,8 +59,8 @@ obj-y += nomadik/
obj-$(CONFIG_ARCH_NPCM7XX) += nuvoton/
obj-$(CONFIG_PINCTRL_PXA) += pxa/
obj-$(CONFIG_ARCH_QCOM) += qcom/
+obj-$(CONFIG_PINCTRL_RENESAS) += renesas/
obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
-obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc/
obj-$(CONFIG_PINCTRL_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_PINCTRL_STM32) += stm32/
@@ -74,3 +71,4 @@ obj-$(CONFIG_ARCH_VT8500) += vt8500/
obj-y += mediatek/
obj-$(CONFIG_PINCTRL_ZX) += zte/
obj-y += cirrus/
+obj-$(CONFIG_PINCTRL_VISCONTI) += visconti/
diff --git a/drivers/pinctrl/actions/Kconfig b/drivers/pinctrl/actions/Kconfig
index 966f1c2c89d6..a1d16e8280e5 100644
--- a/drivers/pinctrl/actions/Kconfig
+++ b/drivers/pinctrl/actions/Kconfig
@@ -10,6 +10,12 @@ config PINCTRL_OWL
help
Say Y here to enable Actions Semi OWL pinctrl driver
+config PINCTRL_S500
+ bool "Actions Semi S500 pinctrl driver"
+ depends on PINCTRL_OWL
+ help
+ Say Y here to enable Actions Semi S500 pinctrl driver
+
config PINCTRL_S700
bool "Actions Semi S700 pinctrl driver"
depends on PINCTRL_OWL
diff --git a/drivers/pinctrl/actions/Makefile b/drivers/pinctrl/actions/Makefile
index 61aa9107a43a..b9e2c527c9d3 100644
--- a/drivers/pinctrl/actions/Makefile
+++ b/drivers/pinctrl/actions/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PINCTRL_OWL) += pinctrl-owl.o
+obj-$(CONFIG_PINCTRL_S500) += pinctrl-s500.o
obj-$(CONFIG_PINCTRL_S700) += pinctrl-s700.o
obj-$(CONFIG_PINCTRL_S900) += pinctrl-s900.o
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 7efdfb4f3e9b..903a4baf3846 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -125,7 +125,7 @@ static void owl_pin_dbg_show(struct pinctrl_dev *pctrldev,
seq_printf(s, "%s", dev_name(pctrl->dev));
}
-static struct pinctrl_ops owl_pinctrl_ops = {
+static const struct pinctrl_ops owl_pinctrl_ops = {
.get_groups_count = owl_get_groups_count,
.get_group_name = owl_get_group_name,
.get_group_pins = owl_get_group_pins,
@@ -212,7 +212,7 @@ static int owl_set_mux(struct pinctrl_dev *pctrldev,
return 0;
}
-static struct pinmux_ops owl_pinmux_ops = {
+static const struct pinmux_ops owl_pinmux_ops = {
.get_functions_count = owl_get_funcs_count,
.get_function_name = owl_get_func_name,
.get_function_groups = owl_get_func_groups,
diff --git a/drivers/pinctrl/actions/pinctrl-s500.c b/drivers/pinctrl/actions/pinctrl-s500.c
new file mode 100644
index 000000000000..38e30914af6e
--- /dev/null
+++ b/drivers/pinctrl/actions/pinctrl-s500.c
@@ -0,0 +1,1727 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Actions Semi S500 SoC Pinctrl driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Copyright (c) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-owl.h"
+
+/* Pinctrl registers offset */
+#define MFCTL0 (0x0040)
+#define MFCTL1 (0x0044)
+#define MFCTL2 (0x0048)
+#define MFCTL3 (0x004C)
+#define PAD_PULLCTL0 (0x0060)
+#define PAD_PULLCTL1 (0x0064)
+#define PAD_PULLCTL2 (0x0068)
+#define PAD_ST0 (0x006C)
+#define PAD_ST1 (0x0070)
+#define PAD_CTL (0x0074)
+#define PAD_DRV0 (0x0080)
+#define PAD_DRV1 (0x0084)
+#define PAD_DRV2 (0x0088)
+
+#define _GPIOA(offset) (offset)
+#define _GPIOB(offset) (32 + (offset))
+#define _GPIOC(offset) (64 + (offset))
+#define _GPIOD(offset) (96 + (offset))
+#define _GPIOE(offset) (128 + (offset))
+
+#define NUM_GPIOS (_GPIOE(3) + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
+
+#define DNAND_DQS _GPIOA(12)
+#define DNAND_DQSN _GPIOA(13)
+#define ETH_TXD0 _GPIOA(14)
+#define ETH_TXD1 _GPIOA(15)
+#define ETH_TXEN _GPIOA(16)
+#define ETH_RXER _GPIOA(17)
+#define ETH_CRS_DV _GPIOA(18)
+#define ETH_RXD1 _GPIOA(19)
+#define ETH_RXD0 _GPIOA(20)
+#define ETH_REF_CLK _GPIOA(21)
+#define ETH_MDC _GPIOA(22)
+#define ETH_MDIO _GPIOA(23)
+#define SIRQ0 _GPIOA(24)
+#define SIRQ1 _GPIOA(25)
+#define SIRQ2 _GPIOA(26)
+#define I2S_D0 _GPIOA(27)
+#define I2S_BCLK0 _GPIOA(28)
+#define I2S_LRCLK0 _GPIOA(29)
+#define I2S_MCLK0 _GPIOA(30)
+#define I2S_D1 _GPIOA(31)
+
+#define I2S_BCLK1 _GPIOB(0)
+#define I2S_LRCLK1 _GPIOB(1)
+#define I2S_MCLK1 _GPIOB(2)
+#define KS_IN0 _GPIOB(3)
+#define KS_IN1 _GPIOB(4)
+#define KS_IN2 _GPIOB(5)
+#define KS_IN3 _GPIOB(6)
+#define KS_OUT0 _GPIOB(7)
+#define KS_OUT1 _GPIOB(8)
+#define KS_OUT2 _GPIOB(9)
+#define LVDS_OEP _GPIOB(10)
+#define LVDS_OEN _GPIOB(11)
+#define LVDS_ODP _GPIOB(12)
+#define LVDS_ODN _GPIOB(13)
+#define LVDS_OCP _GPIOB(14)
+#define LVDS_OCN _GPIOB(15)
+#define LVDS_OBP _GPIOB(16)
+#define LVDS_OBN _GPIOB(17)
+#define LVDS_OAP _GPIOB(18)
+#define LVDS_OAN _GPIOB(19)
+#define LVDS_EEP _GPIOB(20)
+#define LVDS_EEN _GPIOB(21)
+#define LVDS_EDP _GPIOB(22)
+#define LVDS_EDN _GPIOB(23)
+#define LVDS_ECP _GPIOB(24)
+#define LVDS_ECN _GPIOB(25)
+#define LVDS_EBP _GPIOB(26)
+#define LVDS_EBN _GPIOB(27)
+#define LVDS_EAP _GPIOB(28)
+#define LVDS_EAN _GPIOB(29)
+#define LCD0_D18 _GPIOB(30)
+#define LCD0_D17 _GPIOB(31)
+
+#define DSI_DP3 _GPIOC(0)
+#define DSI_DN3 _GPIOC(1)
+#define DSI_DP1 _GPIOC(2)
+#define DSI_DN1 _GPIOC(3)
+#define DSI_CP _GPIOC(4)
+#define DSI_CN _GPIOC(5)
+#define DSI_DP0 _GPIOC(6)
+#define DSI_DN0 _GPIOC(7)
+#define DSI_DP2 _GPIOC(8)
+#define DSI_DN2 _GPIOC(9)
+#define SD0_D0 _GPIOC(10)
+#define SD0_D1 _GPIOC(11)
+#define SD0_D2 _GPIOC(12)
+#define SD0_D3 _GPIOC(13)
+#define SD1_D0 _GPIOC(14) /* SD0_D4 */
+#define SD1_D1 _GPIOC(15) /* SD0_D5 */
+#define SD1_D2 _GPIOC(16) /* SD0_D6 */
+#define SD1_D3 _GPIOC(17) /* SD0_D7 */
+#define SD0_CMD _GPIOC(18)
+#define SD0_CLK _GPIOC(19)
+#define SD1_CMD _GPIOC(20)
+#define SD1_CLK _GPIOC(21)
+#define SPI0_SCLK _GPIOC(22)
+#define SPI0_SS _GPIOC(23)
+#define SPI0_MISO _GPIOC(24)
+#define SPI0_MOSI _GPIOC(25)
+#define UART0_RX _GPIOC(26)
+#define UART0_TX _GPIOC(27)
+#define I2C0_SCLK _GPIOC(28)
+#define I2C0_SDATA _GPIOC(29)
+#define SENSOR0_PCLK _GPIOC(31)
+
+#define SENSOR0_CKOUT _GPIOD(10)
+#define DNAND_ALE _GPIOD(12)
+#define DNAND_CLE _GPIOD(13)
+#define DNAND_CEB0 _GPIOD(14)
+#define DNAND_CEB1 _GPIOD(15)
+#define DNAND_CEB2 _GPIOD(16)
+#define DNAND_CEB3 _GPIOD(17)
+#define UART2_RX _GPIOD(18)
+#define UART2_TX _GPIOD(19)
+#define UART2_RTSB _GPIOD(20)
+#define UART2_CTSB _GPIOD(21)
+#define UART3_RX _GPIOD(22)
+#define UART3_TX _GPIOD(23)
+#define UART3_RTSB _GPIOD(24)
+#define UART3_CTSB _GPIOD(25)
+#define PCM1_IN _GPIOD(28)
+#define PCM1_CLK _GPIOD(29)
+#define PCM1_SYNC _GPIOD(30)
+#define PCM1_OUT _GPIOD(31)
+
+#define I2C1_SCLK _GPIOE(0)
+#define I2C1_SDATA _GPIOE(1)
+#define I2C2_SCLK _GPIOE(2)
+#define I2C2_SDATA _GPIOE(3)
+
+#define CSI_DN0 _PIN(0)
+#define CSI_DP0 _PIN(1)
+#define CSI_DN1 _PIN(2)
+#define CSI_DP1 _PIN(3)
+#define CSI_CN _PIN(4)
+#define CSI_CP _PIN(5)
+#define CSI_DN2 _PIN(6)
+#define CSI_DP2 _PIN(7)
+#define CSI_DN3 _PIN(8)
+#define CSI_DP3 _PIN(9)
+
+#define DNAND_D0 _PIN(10)
+#define DNAND_D1 _PIN(11)
+#define DNAND_D2 _PIN(12)
+#define DNAND_D3 _PIN(13)
+#define DNAND_D4 _PIN(14)
+#define DNAND_D5 _PIN(15)
+#define DNAND_D6 _PIN(16)
+#define DNAND_D7 _PIN(17)
+#define DNAND_WRB _PIN(18)
+#define DNAND_RDB _PIN(19)
+#define DNAND_RDBN _PIN(20)
+#define DNAND_RB _PIN(21)
+
+#define PORB _PIN(22)
+#define CLKO_25M _PIN(23)
+#define BSEL _PIN(24)
+#define PKG0 _PIN(25)
+#define PKG1 _PIN(26)
+#define PKG2 _PIN(27)
+#define PKG3 _PIN(28)
+
+#define _FIRSTPAD _GPIOA(0)
+#define _LASTPAD PKG3
+#define NUM_PADS (_PIN(28) + 1)
+
+static const struct pinctrl_pin_desc s500_pads[] = {
+ PINCTRL_PIN(DNAND_DQS, "dnand_dqs"),
+ PINCTRL_PIN(DNAND_DQSN, "dnand_dqsn"),
+ PINCTRL_PIN(ETH_TXD0, "eth_txd0"),
+ PINCTRL_PIN(ETH_TXD1, "eth_txd1"),
+ PINCTRL_PIN(ETH_TXEN, "eth_txen"),
+ PINCTRL_PIN(ETH_RXER, "eth_rxer"),
+ PINCTRL_PIN(ETH_CRS_DV, "eth_crs_dv"),
+ PINCTRL_PIN(ETH_RXD1, "eth_rxd1"),
+ PINCTRL_PIN(ETH_RXD0, "eth_rxd0"),
+ PINCTRL_PIN(ETH_REF_CLK, "eth_ref_clk"),
+ PINCTRL_PIN(ETH_MDC, "eth_mdc"),
+ PINCTRL_PIN(ETH_MDIO, "eth_mdio"),
+ PINCTRL_PIN(SIRQ0, "sirq0"),
+ PINCTRL_PIN(SIRQ1, "sirq1"),
+ PINCTRL_PIN(SIRQ2, "sirq2"),
+ PINCTRL_PIN(I2S_D0, "i2s_d0"),
+ PINCTRL_PIN(I2S_BCLK0, "i2s_bclk0"),
+ PINCTRL_PIN(I2S_LRCLK0, "i2s_lrclk0"),
+ PINCTRL_PIN(I2S_MCLK0, "i2s_mclk0"),
+ PINCTRL_PIN(I2S_D1, "i2s_d1"),
+ PINCTRL_PIN(I2S_BCLK1, "i2s_bclk1"),
+ PINCTRL_PIN(I2S_LRCLK1, "i2s_lrclk1"),
+ PINCTRL_PIN(I2S_MCLK1, "i2s_mclk1"),
+ PINCTRL_PIN(KS_IN0, "ks_in0"),
+ PINCTRL_PIN(KS_IN1, "ks_in1"),
+ PINCTRL_PIN(KS_IN2, "ks_in2"),
+ PINCTRL_PIN(KS_IN3, "ks_in3"),
+ PINCTRL_PIN(KS_OUT0, "ks_out0"),
+ PINCTRL_PIN(KS_OUT1, "ks_out1"),
+ PINCTRL_PIN(KS_OUT2, "ks_out2"),
+ PINCTRL_PIN(LVDS_OEP, "lvds_oep"),
+ PINCTRL_PIN(LVDS_OEN, "lvds_oen"),
+ PINCTRL_PIN(LVDS_ODP, "lvds_odp"),
+ PINCTRL_PIN(LVDS_ODN, "lvds_odn"),
+ PINCTRL_PIN(LVDS_OCP, "lvds_ocp"),
+ PINCTRL_PIN(LVDS_OCN, "lvds_ocn"),
+ PINCTRL_PIN(LVDS_OBP, "lvds_obp"),
+ PINCTRL_PIN(LVDS_OBN, "lvds_obn"),
+ PINCTRL_PIN(LVDS_OAP, "lvds_oap"),
+ PINCTRL_PIN(LVDS_OAN, "lvds_oan"),
+ PINCTRL_PIN(LVDS_EEP, "lvds_eep"),
+ PINCTRL_PIN(LVDS_EEN, "lvds_een"),
+ PINCTRL_PIN(LVDS_EDP, "lvds_edp"),
+ PINCTRL_PIN(LVDS_EDN, "lvds_edn"),
+ PINCTRL_PIN(LVDS_ECP, "lvds_ecp"),
+ PINCTRL_PIN(LVDS_ECN, "lvds_ecn"),
+ PINCTRL_PIN(LVDS_EBP, "lvds_ebp"),
+ PINCTRL_PIN(LVDS_EBN, "lvds_ebn"),
+ PINCTRL_PIN(LVDS_EAP, "lvds_eap"),
+ PINCTRL_PIN(LVDS_EAN, "lvds_ean"),
+ PINCTRL_PIN(LCD0_D18, "lcd0_d18"),
+ PINCTRL_PIN(LCD0_D17, "lcd0_d17"),
+ PINCTRL_PIN(DSI_DP3, "dsi_dp3"),
+ PINCTRL_PIN(DSI_DN3, "dsi_dn3"),
+ PINCTRL_PIN(DSI_DP1, "dsi_dp1"),
+ PINCTRL_PIN(DSI_DN1, "dsi_dn1"),
+ PINCTRL_PIN(DSI_CP, "dsi_cp"),
+ PINCTRL_PIN(DSI_CN, "dsi_cn"),
+ PINCTRL_PIN(DSI_DP0, "dsi_dp0"),
+ PINCTRL_PIN(DSI_DN0, "dsi_dn0"),
+ PINCTRL_PIN(DSI_DP2, "dsi_dp2"),
+ PINCTRL_PIN(DSI_DN2, "dsi_dn2"),
+ PINCTRL_PIN(SD0_D0, "sd0_d0"),
+ PINCTRL_PIN(SD0_D1, "sd0_d1"),
+ PINCTRL_PIN(SD0_D2, "sd0_d2"),
+ PINCTRL_PIN(SD0_D3, "sd0_d3"),
+ PINCTRL_PIN(SD1_D0, "sd1_d0"),
+ PINCTRL_PIN(SD1_D1, "sd1_d1"),
+ PINCTRL_PIN(SD1_D2, "sd1_d2"),
+ PINCTRL_PIN(SD1_D3, "sd1_d3"),
+ PINCTRL_PIN(SD0_CMD, "sd0_cmd"),
+ PINCTRL_PIN(SD0_CLK, "sd0_clk"),
+ PINCTRL_PIN(SD1_CMD, "sd1_cmd"),
+ PINCTRL_PIN(SD1_CLK, "sd1_clk"),
+ PINCTRL_PIN(SPI0_SCLK, "spi0_sclk"),
+ PINCTRL_PIN(SPI0_SS, "spi0_ss"),
+ PINCTRL_PIN(SPI0_MISO, "spi0_miso"),
+ PINCTRL_PIN(SPI0_MOSI, "spi0_mosi"),
+ PINCTRL_PIN(UART0_RX, "uart0_rx"),
+ PINCTRL_PIN(UART0_TX, "uart0_tx"),
+ PINCTRL_PIN(I2C0_SCLK, "i2c0_sclk"),
+ PINCTRL_PIN(I2C0_SDATA, "i2c0_sdata"),
+ PINCTRL_PIN(SENSOR0_PCLK, "sensor0_pclk"),
+ PINCTRL_PIN(SENSOR0_CKOUT, "sensor0_ckout"),
+ PINCTRL_PIN(DNAND_ALE, "dnand_ale"),
+ PINCTRL_PIN(DNAND_CLE, "dnand_cle"),
+ PINCTRL_PIN(DNAND_CEB0, "dnand_ceb0"),
+ PINCTRL_PIN(DNAND_CEB1, "dnand_ceb1"),
+ PINCTRL_PIN(DNAND_CEB2, "dnand_ceb2"),
+ PINCTRL_PIN(DNAND_CEB3, "dnand_ceb3"),
+ PINCTRL_PIN(UART2_RX, "uart2_rx"),
+ PINCTRL_PIN(UART2_TX, "uart2_tx"),
+ PINCTRL_PIN(UART2_RTSB, "uart2_rtsb"),
+ PINCTRL_PIN(UART2_CTSB, "uart2_ctsb"),
+ PINCTRL_PIN(UART3_RX, "uart3_rx"),
+ PINCTRL_PIN(UART3_TX, "uart3_tx"),
+ PINCTRL_PIN(UART3_RTSB, "uart3_rtsb"),
+ PINCTRL_PIN(UART3_CTSB, "uart3_ctsb"),
+ PINCTRL_PIN(PCM1_IN, "pcm1_in"),
+ PINCTRL_PIN(PCM1_CLK, "pcm1_clk"),
+ PINCTRL_PIN(PCM1_SYNC, "pcm1_sync"),
+ PINCTRL_PIN(PCM1_OUT, "pcm1_out"),
+ PINCTRL_PIN(I2C1_SCLK, "i2c1_sclk"),
+ PINCTRL_PIN(I2C1_SDATA, "i2c1_sdata"),
+ PINCTRL_PIN(I2C2_SCLK, "i2c2_sclk"),
+ PINCTRL_PIN(I2C2_SDATA, "i2c2_sdata"),
+ PINCTRL_PIN(CSI_DN0, "csi_dn0"),
+ PINCTRL_PIN(CSI_DP0, "csi_dp0"),
+ PINCTRL_PIN(CSI_DN1, "csi_dn1"),
+ PINCTRL_PIN(CSI_DP1, "csi_dp1"),
+ PINCTRL_PIN(CSI_DN2, "csi_dn2"),
+ PINCTRL_PIN(CSI_DP2, "csi_dp2"),
+ PINCTRL_PIN(CSI_DN3, "csi_dn3"),
+ PINCTRL_PIN(CSI_DP3, "csi_dp3"),
+ PINCTRL_PIN(CSI_CN, "csi_cn"),
+ PINCTRL_PIN(CSI_CP, "csi_cp"),
+ PINCTRL_PIN(DNAND_D0, "dnand_d0"),
+ PINCTRL_PIN(DNAND_D1, "dnand_d1"),
+ PINCTRL_PIN(DNAND_D2, "dnand_d2"),
+ PINCTRL_PIN(DNAND_D3, "dnand_d3"),
+ PINCTRL_PIN(DNAND_D4, "dnand_d4"),
+ PINCTRL_PIN(DNAND_D5, "dnand_d5"),
+ PINCTRL_PIN(DNAND_D6, "dnand_d6"),
+ PINCTRL_PIN(DNAND_D7, "dnand_d7"),
+ PINCTRL_PIN(DNAND_RB, "dnand_rb"),
+ PINCTRL_PIN(DNAND_RDB, "dnand_rdb"),
+ PINCTRL_PIN(DNAND_RDBN, "dnand_rdbn"),
+ PINCTRL_PIN(DNAND_WRB, "dnand_wrb"),
+ PINCTRL_PIN(PORB, "porb"),
+ PINCTRL_PIN(CLKO_25M, "clko_25m"),
+ PINCTRL_PIN(BSEL, "bsel"),
+ PINCTRL_PIN(PKG0, "pkg0"),
+ PINCTRL_PIN(PKG1, "pkg1"),
+ PINCTRL_PIN(PKG2, "pkg2"),
+ PINCTRL_PIN(PKG3, "pkg3"),
+};
+
+enum s500_pinmux_functions {
+ S500_MUX_NOR,
+ S500_MUX_ETH_RMII,
+ S500_MUX_ETH_SMII,
+ S500_MUX_SPI0,
+ S500_MUX_SPI1,
+ S500_MUX_SPI2,
+ S500_MUX_SPI3,
+ S500_MUX_SENS0,
+ S500_MUX_SENS1,
+ S500_MUX_UART0,
+ S500_MUX_UART1,
+ S500_MUX_UART2,
+ S500_MUX_UART3,
+ S500_MUX_UART4,
+ S500_MUX_UART5,
+ S500_MUX_UART6,
+ S500_MUX_I2S0,
+ S500_MUX_I2S1,
+ S500_MUX_PCM1,
+ S500_MUX_PCM0,
+ S500_MUX_KS,
+ S500_MUX_JTAG,
+ S500_MUX_PWM0,
+ S500_MUX_PWM1,
+ S500_MUX_PWM2,
+ S500_MUX_PWM3,
+ S500_MUX_PWM4,
+ S500_MUX_PWM5,
+ S500_MUX_P0,
+ S500_MUX_SD0,
+ S500_MUX_SD1,
+ S500_MUX_SD2,
+ S500_MUX_I2C0,
+ S500_MUX_I2C1,
+ /*S500_MUX_I2C2,*/
+ S500_MUX_I2C3,
+ S500_MUX_DSI,
+ S500_MUX_LVDS,
+ S500_MUX_USB30,
+ S500_MUX_CLKO_25M,
+ S500_MUX_MIPI_CSI,
+ S500_MUX_NAND,
+ S500_MUX_SPDIF,
+ /*S500_MUX_SIRQ0,*/
+ /*S500_MUX_SIRQ1,*/
+ /*S500_MUX_SIRQ2,*/
+ S500_MUX_TS,
+ S500_MUX_LCD0,
+ S500_MUX_RESERVED,
+};
+
+/* MFPCTL group data */
+/* mfp0_31_26 reserved */
+/* mfp0_25_23 */
+static unsigned int lcd0_d18_mfp_pads[] = { LCD0_D18 };
+static unsigned int lcd0_d18_mfp_funcs[] = { S500_MUX_NOR,
+ S500_MUX_SENS1,
+ S500_MUX_PWM2,
+ S500_MUX_PWM4,
+ S500_MUX_LCD0 };
+/* mfp0_22_20 */
+static unsigned int rmii_crs_dv_mfp_pads[] = { ETH_CRS_DV };
+static unsigned int rmii_crs_dv_mfp_funcs[] = { S500_MUX_ETH_RMII,
+ S500_MUX_ETH_SMII,
+ S500_MUX_SPI2,
+ S500_MUX_UART4,
+ S500_MUX_PWM4 };
+/* mfp0_18_16_eth_txd0 */
+static unsigned int rmii_txd0_mfp_pads[] = { ETH_TXD0 };
+static unsigned int rmii_txd0_mfp_funcs[] = { S500_MUX_ETH_RMII,
+ S500_MUX_ETH_SMII,
+ S500_MUX_SPI2,
+ S500_MUX_UART6,
+ S500_MUX_PWM4 };
+/* mfp0_18_16_eth_txd1 */
+static unsigned int rmii_txd1_mfp_pads[] = { ETH_TXD1 };
+static unsigned int rmii_txd1_mfp_funcs[] = { S500_MUX_ETH_RMII,
+ S500_MUX_ETH_SMII,
+ S500_MUX_SPI2,
+ S500_MUX_UART6,
+ S500_MUX_PWM5 };
+/* mfp0_15_13_rmii_txen */
+static unsigned int rmii_txen_mfp_pads[] = { ETH_TXEN };
+static unsigned int rmii_txen_mfp_funcs[] = { S500_MUX_ETH_RMII,
+ S500_MUX_UART2,
+ S500_MUX_SPI3,
+ S500_MUX_PWM0 };
+/* mfp0_15_13_rmii_rxen */
+static unsigned int rmii_rxen_mfp_pads[] = { ETH_RXER };
+static unsigned int rmii_rxen_mfp_funcs[] = { S500_MUX_ETH_RMII,
+ S500_MUX_UART2,
+ S500_MUX_SPI3,
+ S500_MUX_PWM1 };
+/* mfp0_12_11 reserved */
+
+/* mfp0_10_8_rmii_rxd1 */
+static unsigned int rmii_rxd1_mfp_pads[] = { ETH_RXD1 };
+static unsigned int rmii_rxd1_mfp_funcs[] = { S500_MUX_ETH_RMII,
+ S500_MUX_UART2,
+ S500_MUX_SPI3,
+ S500_MUX_PWM2,
+ S500_MUX_UART5 };
+/* mfp0_10_8_rmii_rxd0 */
+static unsigned int rmii_rxd0_mfp_pads[] = { ETH_RXD0 };
+static unsigned int rmii_rxd0_mfp_funcs[] = { S500_MUX_ETH_RMII,
+ S500_MUX_UART2,
+ S500_MUX_SPI3,
+ S500_MUX_PWM3,
+ S500_MUX_UART5 };
+/* mfp0_7_6 */
+static unsigned int rmii_ref_clk_mfp_pads[] = { ETH_REF_CLK };
+static unsigned int rmii_ref_clk_mfp_funcs[] = { S500_MUX_ETH_RMII,
+ S500_MUX_UART4,
+ S500_MUX_SPI2,
+ S500_MUX_RESERVED,
+ S500_MUX_ETH_SMII };
+/* mfp0_5 */
+static unsigned int i2s_d0_mfp_pads[] = { I2S_D0 };
+static unsigned int i2s_d0_mfp_funcs[] = { S500_MUX_I2S0,
+ S500_MUX_NOR };
+/* mfp0_4_3 */
+static unsigned int i2s_pcm1_mfp_pads[] = { I2S_LRCLK0, I2S_MCLK0 };
+static unsigned int i2s_pcm1_mfp_funcs[] = { S500_MUX_I2S0,
+ S500_MUX_NOR,
+ S500_MUX_PCM1 };
+/* mfp0_2_1_i2s0 */
+static unsigned int i2s0_pcm0_mfp_pads[] = { I2S_BCLK0 };
+static unsigned int i2s0_pcm0_mfp_funcs[] = { S500_MUX_I2S0,
+ S500_MUX_NOR,
+ S500_MUX_PCM0 };
+/* mfp0_2_1_i2s1 */
+static unsigned int i2s1_pcm0_mfp_pads[] = { I2S_BCLK1, I2S_LRCLK1,
+ I2S_MCLK1 };
+static unsigned int i2s1_pcm0_mfp_funcs[] = { S500_MUX_I2S1,
+ S500_MUX_NOR,
+ S500_MUX_PCM0 };
+/* mfp0_0 */
+static unsigned int i2s_d1_mfp_pads[] = { I2S_D1 };
+static unsigned int i2s_d1_mfp_funcs[] = { S500_MUX_I2S1,
+ S500_MUX_NOR };
+/* mfp1_31_29_ks_in0 */
+static unsigned int ks_in0_mfp_pads[] = { KS_IN0 };
+static unsigned int ks_in0_mfp_funcs[] = { S500_MUX_KS,
+ S500_MUX_JTAG,
+ S500_MUX_NOR,
+ S500_MUX_PWM0,
+ S500_MUX_PWM4,
+ S500_MUX_SENS1,
+ S500_MUX_PWM4,
+ S500_MUX_P0 };
+/* mfp1_31_29_ks_in1 */
+static unsigned int ks_in1_mfp_pads[] = { KS_IN1 };
+static unsigned int ks_in1_mfp_funcs[] = { S500_MUX_KS,
+ S500_MUX_JTAG,
+ S500_MUX_NOR,
+ S500_MUX_PWM1,
+ S500_MUX_PWM5,
+ S500_MUX_SENS1,
+ S500_MUX_PWM1,
+ S500_MUX_USB30 };
+/* mfp1_31_29_ks_in2 */
+static unsigned int ks_in2_mfp_pads[] = { KS_IN2 };
+static unsigned int ks_in2_mfp_funcs[] = { S500_MUX_KS,
+ S500_MUX_JTAG,
+ S500_MUX_NOR,
+ S500_MUX_PWM0,
+ S500_MUX_PWM0,
+ S500_MUX_SENS1,
+ S500_MUX_PWM0,
+ S500_MUX_P0 };
+/* mfp1_28_26_ks_in3 */
+static unsigned int ks_in3_mfp_pads[] = { KS_IN3 };
+static unsigned int ks_in3_mfp_funcs[] = { S500_MUX_KS,
+ S500_MUX_JTAG,
+ S500_MUX_NOR,
+ S500_MUX_PWM1,
+ S500_MUX_RESERVED,
+ S500_MUX_SENS1 };
+/* mfp1_28_26_ks_out0 */
+static unsigned int ks_out0_mfp_pads[] = { KS_OUT0 };
+static unsigned int ks_out0_mfp_funcs[] = { S500_MUX_KS,
+ S500_MUX_UART5,
+ S500_MUX_NOR,
+ S500_MUX_PWM2,
+ S500_MUX_RESERVED,
+ S500_MUX_SENS1,
+ S500_MUX_SD0 };
+/* mfp1_28_26_ks_out1 */
+static unsigned int ks_out1_mfp_pads[] = { KS_OUT1 };
+static unsigned int ks_out1_mfp_funcs[] = { S500_MUX_KS,
+ S500_MUX_JTAG,
+ S500_MUX_NOR,
+ S500_MUX_PWM3,
+ S500_MUX_RESERVED,
+ S500_MUX_SENS1,
+ S500_MUX_SD0 };
+/* mfp1_25_23 */
+static unsigned int ks_out2_mfp_pads[] = { KS_OUT2 };
+static unsigned int ks_out2_mfp_funcs[] = { S500_MUX_SD0,
+ S500_MUX_KS,
+ S500_MUX_NOR,
+ S500_MUX_PWM2,
+ S500_MUX_UART5,
+ S500_MUX_SENS1 };
+/* mfp1_22_21 */
+static unsigned int lvds_o_pn_mfp_pads[] = { LVDS_OEP, LVDS_OEN,
+ LVDS_ODP, LVDS_ODN,
+ LVDS_OCP, LVDS_OCN,
+ LVDS_OBP, LVDS_OBN,
+ LVDS_OAP, LVDS_OAN };
+static unsigned int lvds_o_pn_mfp_funcs[] = { S500_MUX_LVDS,
+ S500_MUX_TS,
+ S500_MUX_LCD0 };
+/* mfp1_20_19 */
+static unsigned int dsi_dn0_mfp_pads[] = { DSI_DN0 };
+static unsigned int dsi_dn0_mfp_funcs[] = { S500_MUX_DSI,
+ S500_MUX_UART2,
+ S500_MUX_SPI0 };
+/* mfp1_18_17 */
+static unsigned int dsi_dp2_mfp_pads[] = { DSI_DP2 };
+static unsigned int dsi_dp2_mfp_funcs[] = { S500_MUX_DSI,
+ S500_MUX_UART2,
+ S500_MUX_SPI0,
+ S500_MUX_SD1 };
+/* mfp1_16_14 */
+static unsigned int lcd0_d17_mfp_pads[] = { LCD0_D17 };
+static unsigned int lcd0_d17_mfp_funcs[] = { S500_MUX_NOR,
+ S500_MUX_SD0,
+ S500_MUX_SD1,
+ S500_MUX_PWM3,
+ S500_MUX_LCD0 };
+/* mfp1_13_12 */
+static unsigned int dsi_dp3_mfp_pads[] = { DSI_DP3 };
+static unsigned int dsi_dp3_mfp_funcs[] = { S500_MUX_DSI,
+ S500_MUX_SD0,
+ S500_MUX_SD1,
+ S500_MUX_LCD0 };
+/* mfp1_11_10 */
+static unsigned int dsi_dn3_mfp_pads[] = { DSI_DN3 };
+static unsigned int dsi_dn3_mfp_funcs[] = { S500_MUX_DSI,
+ S500_MUX_RESERVED,
+ S500_MUX_SD1,
+ S500_MUX_LCD0 };
+/* mfp1_9_7 */
+static unsigned int dsi_dp0_mfp_pads[] = { DSI_DP0 };
+static unsigned int dsi_dp0_mfp_funcs[] = { S500_MUX_DSI,
+ S500_MUX_RESERVED,
+ S500_MUX_SD0,
+ S500_MUX_UART2,
+ S500_MUX_SPI0 };
+/* mfp1_6_5 */
+static unsigned int lvds_ee_pn_mfp_pads[] = { LVDS_EEP, LVDS_EEN };
+static unsigned int lvds_ee_pn_mfp_funcs[] = { S500_MUX_LVDS,
+ S500_MUX_NOR,
+ S500_MUX_TS,
+ S500_MUX_LCD0 };
+/* mfp1_4_3 */
+static unsigned int spi0_i2c_pcm_mfp_pads[] = { SPI0_SCLK, SPI0_MOSI };
+static unsigned int spi0_i2c_pcm_mfp_funcs[] = { S500_MUX_SPI0,
+ S500_MUX_NOR,
+ S500_MUX_I2C3,
+ S500_MUX_PCM0 };
+/* mfp1_2_0 */
+static unsigned int spi0_i2s_pcm_mfp_pads[] = { SPI0_SS, SPI0_MISO };
+static unsigned int spi0_i2s_pcm_mfp_funcs[] = { S500_MUX_SPI0,
+ S500_MUX_NOR,
+ S500_MUX_I2S1,
+ S500_MUX_PCM1,
+ S500_MUX_PCM0 };
+/* mfp2_31 reserved */
+/* mfp2_30_29 */
+static unsigned int dsi_dnp1_cp_mfp_pads[] = { DSI_DP1, DSI_CP, DSI_CN };
+static unsigned int dsi_dnp1_cp_mfp_funcs[] = { S500_MUX_DSI,
+ S500_MUX_SD1,
+ S500_MUX_LCD0 };
+/* mfp2_28_27 */
+static unsigned int lvds_e_pn_mfp_pads[] = { LVDS_EDP, LVDS_EDN,
+ LVDS_ECP, LVDS_ECN,
+ LVDS_EBP, LVDS_EBN,
+ LVDS_EAP, LVDS_EAN };
+static unsigned int lvds_e_pn_mfp_funcs[] = { S500_MUX_LVDS,
+ S500_MUX_NOR,
+ S500_MUX_LCD0 };
+/* mfp2_26_24 */
+static unsigned int dsi_dn2_mfp_pads[] = { DSI_DN2 };
+static unsigned int dsi_dn2_mfp_funcs[] = { S500_MUX_DSI,
+ S500_MUX_RESERVED,
+ S500_MUX_SD1,
+ S500_MUX_UART2,
+ S500_MUX_SPI0 };
+/* mfp2_23 */
+static unsigned int uart2_rtsb_mfp_pads[] = { UART2_RTSB };
+static unsigned int uart2_rtsb_mfp_funcs[] = { S500_MUX_UART2,
+ S500_MUX_UART0 };
+/* mfp2_22 */
+static unsigned int uart2_ctsb_mfp_pads[] = { UART2_CTSB };
+static unsigned int uart2_ctsb_mfp_funcs[] = { S500_MUX_UART2,
+ S500_MUX_UART0 };
+/* mfp2_21 */
+static unsigned int uart3_rtsb_mfp_pads[] = { UART3_RTSB };
+static unsigned int uart3_rtsb_mfp_funcs[] = { S500_MUX_UART3,
+ S500_MUX_UART5 };
+/* mfp2_20 */
+static unsigned int uart3_ctsb_mfp_pads[] = { UART3_CTSB };
+static unsigned int uart3_ctsb_mfp_funcs[] = { S500_MUX_UART3,
+ S500_MUX_UART5 };
+/* mfp2_19_17 */
+static unsigned int sd0_d0_mfp_pads[] = { SD0_D0 };
+static unsigned int sd0_d0_mfp_funcs[] = { S500_MUX_SD0,
+ S500_MUX_NOR,
+ S500_MUX_RESERVED,
+ S500_MUX_JTAG,
+ S500_MUX_UART2,
+ S500_MUX_UART5 };
+/* mfp2_16_14 */
+static unsigned int sd0_d1_mfp_pads[] = { SD0_D1 };
+static unsigned int sd0_d1_mfp_funcs[] = { S500_MUX_SD0,
+ S500_MUX_NOR,
+ S500_MUX_RESERVED,
+ S500_MUX_RESERVED,
+ S500_MUX_UART2,
+ S500_MUX_UART5 };
+/* mfp2_13_11 */
+static unsigned int sd0_d2_d3_mfp_pads[] = { SD0_D2, SD0_D3 };
+static unsigned int sd0_d2_d3_mfp_funcs[] = { S500_MUX_SD0,
+ S500_MUX_NOR,
+ S500_MUX_RESERVED,
+ S500_MUX_JTAG,
+ S500_MUX_UART2,
+ S500_MUX_UART1 };
+/* mfp2_10_9 */
+static unsigned int sd1_d0_d3_mfp_pads[] = { SD1_D0, SD1_D1,
+ SD1_D2, SD1_D3 };
+static unsigned int sd1_d0_d3_mfp_funcs[] = { S500_MUX_SD0,
+ S500_MUX_NOR,
+ S500_MUX_RESERVED,
+ S500_MUX_SD1 };
+/* mfp2_8_7 */
+static unsigned int sd0_cmd_mfp_pads[] = { SD0_CMD };
+static unsigned int sd0_cmd_mfp_funcs[] = { S500_MUX_SD0,
+ S500_MUX_NOR,
+ S500_MUX_RESERVED,
+ S500_MUX_JTAG };
+/* mfp2_6_5 */
+static unsigned int sd0_clk_mfp_pads[] = { SD0_CLK };
+static unsigned int sd0_clk_mfp_funcs[] = { S500_MUX_SD0,
+ S500_MUX_RESERVED,
+ S500_MUX_JTAG };
+/* mfp2_4_3 */
+static unsigned int sd1_cmd_mfp_pads[] = { SD1_CMD };
+static unsigned int sd1_cmd_mfp_funcs[] = { S500_MUX_SD1,
+ S500_MUX_NOR };
+/* mfp2_2_0 */
+static unsigned int uart0_rx_mfp_pads[] = { UART0_RX };
+static unsigned int uart0_rx_mfp_funcs[] = { S500_MUX_UART0,
+ S500_MUX_UART2,
+ S500_MUX_SPI1,
+ S500_MUX_I2C0,
+ S500_MUX_PCM1,
+ S500_MUX_I2S1 };
+/* mfp3_31 reserved */
+/* mfp3_30 */
+static unsigned int clko_25m_mfp_pads[] = { CLKO_25M };
+static unsigned int clko_25m_mfp_funcs[] = { S500_MUX_RESERVED,
+ S500_MUX_CLKO_25M };
+/* mfp3_29_28 */
+static unsigned int csi_cn_cp_mfp_pads[] = { CSI_CN, CSI_CP };
+static unsigned int csi_cn_cp_mfp_funcs[] = { S500_MUX_MIPI_CSI,
+ S500_MUX_SENS0 };
+/* mfp3_27_24 reserved */
+/* mfp3_23_22 */
+static unsigned int sens0_ckout_mfp_pads[] = { SENSOR0_CKOUT };
+static unsigned int sens0_ckout_mfp_funcs[] = { S500_MUX_SENS0,
+ S500_MUX_NOR,
+ S500_MUX_SENS1,
+ S500_MUX_PWM1 };
+/* mfp3_21_19 */
+static unsigned int uart0_tx_mfp_pads[] = { UART0_TX };
+static unsigned int uart0_tx_mfp_funcs[] = { S500_MUX_UART0,
+ S500_MUX_UART2,
+ S500_MUX_SPI1,
+ S500_MUX_I2C0,
+ S500_MUX_SPDIF,
+ S500_MUX_PCM1,
+ S500_MUX_I2S1 };
+/* mfp3_18_16 */
+static unsigned int i2c0_mfp_pads[] = { I2C0_SCLK,
+ I2C0_SDATA };
+static unsigned int i2c0_mfp_funcs[] = { S500_MUX_I2C0,
+ S500_MUX_UART2,
+ S500_MUX_I2C1,
+ S500_MUX_UART1,
+ S500_MUX_SPI1 };
+/* mfp3_15_14 */
+static unsigned int csi_dn_dp_mfp_pads[] = { CSI_DN0, CSI_DN1,
+ CSI_DN2, CSI_DN3,
+ CSI_DP0, CSI_DP1,
+ CSI_DP2, CSI_DP3 };
+static unsigned int csi_dn_dp_mfp_funcs[] = { S500_MUX_MIPI_CSI,
+ S500_MUX_SENS0 };
+/* mfp3_13_12 */
+static unsigned int sen0_pclk_mfp_pads[] = { SENSOR0_PCLK };
+static unsigned int sen0_pclk_mfp_funcs[] = { S500_MUX_SENS0,
+ S500_MUX_NOR,
+ S500_MUX_PWM0 };
+/* mfp3_11_10 */
+static unsigned int pcm1_in_mfp_pads[] = { PCM1_IN };
+static unsigned int pcm1_in_mfp_funcs[] = { S500_MUX_PCM1,
+ S500_MUX_SENS1,
+ S500_MUX_UART4,
+ S500_MUX_PWM4 };
+/* mfp3_9_8 */
+static unsigned int pcm1_clk_mfp_pads[] = { PCM1_CLK };
+static unsigned int pcm1_clk_mfp_funcs[] = { S500_MUX_PCM1,
+ S500_MUX_SENS1,
+ S500_MUX_UART4,
+ S500_MUX_PWM5 };
+/* mfp3_7_6 */
+static unsigned int pcm1_sync_mfp_pads[] = { PCM1_SYNC };
+static unsigned int pcm1_sync_mfp_funcs[] = { S500_MUX_PCM1,
+ S500_MUX_SENS1,
+ S500_MUX_UART6,
+ S500_MUX_I2C3 };
+/* mfp3_5_4 */
+static unsigned int pcm1_out_mfp_pads[] = { PCM1_OUT };
+static unsigned int pcm1_out_mfp_funcs[] = { S500_MUX_PCM1,
+ S500_MUX_SENS1,
+ S500_MUX_UART6,
+ S500_MUX_I2C3 };
+/* mfp3_3 */
+static unsigned int dnand_data_wr_mfp_pads[] = { DNAND_D0, DNAND_D1,
+ DNAND_D2, DNAND_D3,
+ DNAND_D4, DNAND_D5,
+ DNAND_D6, DNAND_D7,
+ DNAND_RDB, DNAND_RDBN };
+static unsigned int dnand_data_wr_mfp_funcs[] = { S500_MUX_NAND,
+ S500_MUX_SD2 };
+/* mfp3_2 */
+static unsigned int dnand_acle_ce0_mfp_pads[] = { DNAND_ALE,
+ DNAND_CLE,
+ DNAND_CEB0,
+ DNAND_CEB1 };
+static unsigned int dnand_acle_ce0_mfp_funcs[] = { S500_MUX_NAND,
+ S500_MUX_SPI2 };
+/* mfp3_1_0_nand_ceb2 */
+static unsigned int nand_ceb2_mfp_pads[] = { DNAND_CEB2 };
+static unsigned int nand_ceb2_mfp_funcs[] = { S500_MUX_NAND,
+ S500_MUX_PWM5 };
+/* mfp3_1_0_nand_ceb3 */
+static unsigned int nand_ceb3_mfp_pads[] = { DNAND_CEB3 };
+static unsigned int nand_ceb3_mfp_funcs[] = { S500_MUX_NAND,
+ S500_MUX_PWM4 };
+
+/* PADDRV group data */
+/* paddrv0_29_28 */
+static unsigned int sirq_drv_pads[] = { SIRQ0, SIRQ1, SIRQ2 };
+/* paddrv0_23_22 */
+static unsigned int rmii_txd01_txen_drv_pads[] = { ETH_TXD0, ETH_TXD1,
+ ETH_TXEN };
+/* paddrv0_21_20 */
+static unsigned int rmii_rxer_drv_pads[] = { ETH_RXER };
+/* paddrv0_19_18 */
+static unsigned int rmii_crs_drv_pads[] = { ETH_CRS_DV };
+/* paddrv0_17_16 */
+static unsigned int rmii_rxd10_drv_pads[] = { ETH_RXD0, ETH_RXD1 };
+/* paddrv0_15_14 */
+static unsigned int rmii_ref_clk_drv_pads[] = { ETH_REF_CLK };
+/* paddrv0_13_12 */
+static unsigned int smi_mdc_mdio_drv_pads[] = { ETH_MDC, ETH_MDIO };
+/* paddrv0_11_10 */
+static unsigned int i2s_d0_drv_pads[] = { I2S_D0 };
+/* paddrv0_9_8 */
+static unsigned int i2s_bclk0_drv_pads[] = { I2S_BCLK0 };
+/* paddrv0_7_6 */
+static unsigned int i2s3_drv_pads[] = { I2S_LRCLK0, I2S_MCLK0,
+ I2S_D1 };
+/* paddrv0_5_4 */
+static unsigned int i2s13_drv_pads[] = { I2S_BCLK1, I2S_LRCLK1,
+ I2S_MCLK1 };
+/* paddrv0_3_2 */
+static unsigned int pcm1_drv_pads[] = { PCM1_IN, PCM1_CLK,
+ PCM1_SYNC, PCM1_OUT };
+/* paddrv0_1_0 */
+static unsigned int ks_in_drv_pads[] = { KS_IN0, KS_IN1,
+ KS_IN2, KS_IN3 };
+/* paddrv1_31_30 */
+static unsigned int ks_out_drv_pads[] = { KS_OUT0, KS_OUT1, KS_OUT2 };
+/* paddrv1_29_28 */
+static unsigned int lvds_all_drv_pads[] = { LVDS_OEP, LVDS_OEN,
+ LVDS_ODP, LVDS_ODN,
+ LVDS_OCP, LVDS_OCN,
+ LVDS_OBP, LVDS_OBN,
+ LVDS_OAP, LVDS_OAN,
+ LVDS_EEP, LVDS_EEN,
+ LVDS_EDP, LVDS_EDN,
+ LVDS_ECP, LVDS_ECN,
+ LVDS_EBP, LVDS_EBN,
+ LVDS_EAP, LVDS_EAN };
+/* paddrv1_27_26 */
+static unsigned int lcd_dsi_drv_pads[] = { DSI_DP3, DSI_DN3, DSI_DP1,
+ DSI_DN1, DSI_CP, DSI_CN };
+/* paddrv1_25_24 */
+static unsigned int dsi_drv_pads[] = { DSI_DP0, DSI_DN0,
+ DSI_DP2, DSI_DN2 };
+/* paddrv1_23_22 */
+static unsigned int sd0_d0_d3_drv_pads[] = { SD0_D0, SD0_D1,
+ SD0_D2, SD0_D3 };
+/* paddrv1_21_20 */
+static unsigned int sd1_d0_d3_drv_pads[] = { SD1_D0, SD1_D1,
+ SD1_D2, SD1_D3 };
+/* paddrv1_19_18 */
+static unsigned int sd0_cmd_drv_pads[] = { SD0_CMD };
+/* paddrv1_17_16 */
+static unsigned int sd0_clk_drv_pads[] = { SD0_CLK };
+/* paddrv1_15_14 */
+static unsigned int sd1_cmd_drv_pads[] = { SD1_CMD };
+/* paddrv1_13_12 */
+static unsigned int sd1_clk_drv_pads[] = { SD1_CLK };
+/* paddrv1_11_10 */
+static unsigned int spi0_all_drv_pads[] = { SPI0_SCLK, SPI0_SS,
+ SPI0_MISO, SPI0_MOSI };
+/* paddrv2_31_30 */
+static unsigned int uart0_rx_drv_pads[] = { UART0_RX };
+/* paddrv2_29_28 */
+static unsigned int uart0_tx_drv_pads[] = { UART0_TX };
+/* paddrv2_27_26 */
+static unsigned int uart2_all_drv_pads[] = { UART2_RX, UART2_TX,
+ UART2_RTSB, UART2_CTSB };
+/* paddrv2_24_23 */
+static unsigned int i2c0_all_drv_pads[] = { I2C0_SCLK, I2C0_SDATA };
+/* paddrv2_22_21 */
+static unsigned int i2c12_all_drv_pads[] = { I2C1_SCLK, I2C1_SDATA,
+ I2C2_SCLK, I2C2_SDATA };
+/* paddrv2_19_18 */
+static unsigned int sens0_pclk_drv_pads[] = { SENSOR0_PCLK };
+/* paddrv2_13_12 */
+static unsigned int sens0_ckout_drv_pads[] = { SENSOR0_CKOUT };
+/* paddrv2_3_2 */
+static unsigned int uart3_all_drv_pads[] = { UART3_RX, UART3_TX,
+ UART3_RTSB, UART3_CTSB };
+
+/* Pinctrl groups */
+static const struct owl_pingroup s500_groups[] = {
+ MUX_PG(lcd0_d18_mfp, 0, 23, 3),
+ MUX_PG(rmii_crs_dv_mfp, 0, 20, 3),
+ MUX_PG(rmii_txd0_mfp, 0, 16, 3),
+ MUX_PG(rmii_txd1_mfp, 0, 16, 3),
+ MUX_PG(rmii_txen_mfp, 0, 13, 3),
+ MUX_PG(rmii_rxen_mfp, 0, 13, 3),
+ MUX_PG(rmii_rxd1_mfp, 0, 8, 3),
+ MUX_PG(rmii_rxd0_mfp, 0, 8, 3),
+ MUX_PG(rmii_ref_clk_mfp, 0, 6, 2),
+ MUX_PG(i2s_d0_mfp, 0, 5, 1),
+ MUX_PG(i2s_pcm1_mfp, 0, 3, 2),
+ MUX_PG(i2s0_pcm0_mfp, 0, 1, 2),
+ MUX_PG(i2s1_pcm0_mfp, 0, 1, 2),
+ MUX_PG(i2s_d1_mfp, 0, 0, 1),
+ MUX_PG(ks_in2_mfp, 1, 29, 3),
+ MUX_PG(ks_in1_mfp, 1, 29, 3),
+ MUX_PG(ks_in0_mfp, 1, 29, 3),
+ MUX_PG(ks_in3_mfp, 1, 26, 3),
+ MUX_PG(ks_out0_mfp, 1, 26, 3),
+ MUX_PG(ks_out1_mfp, 1, 26, 3),
+ MUX_PG(ks_out2_mfp, 1, 23, 3),
+ MUX_PG(lvds_o_pn_mfp, 1, 21, 2),
+ MUX_PG(dsi_dn0_mfp, 1, 19, 2),
+ MUX_PG(dsi_dp2_mfp, 1, 17, 2),
+ MUX_PG(lcd0_d17_mfp, 1, 14, 3),
+ MUX_PG(dsi_dp3_mfp, 1, 12, 2),
+ MUX_PG(dsi_dn3_mfp, 1, 10, 2),
+ MUX_PG(dsi_dp0_mfp, 1, 7, 3),
+ MUX_PG(lvds_ee_pn_mfp, 1, 5, 2),
+ MUX_PG(spi0_i2c_pcm_mfp, 1, 3, 2),
+ MUX_PG(spi0_i2s_pcm_mfp, 1, 0, 3),
+ MUX_PG(dsi_dnp1_cp_mfp, 2, 29, 2),
+ MUX_PG(lvds_e_pn_mfp, 2, 27, 2),
+ MUX_PG(dsi_dn2_mfp, 2, 24, 3),
+ MUX_PG(uart2_rtsb_mfp, 2, 23, 1),
+ MUX_PG(uart2_ctsb_mfp, 2, 22, 1),
+ MUX_PG(uart3_rtsb_mfp, 2, 21, 1),
+ MUX_PG(uart3_ctsb_mfp, 2, 20, 1),
+ MUX_PG(sd0_d0_mfp, 2, 17, 3),
+ MUX_PG(sd0_d1_mfp, 2, 14, 3),
+ MUX_PG(sd0_d2_d3_mfp, 2, 11, 3),
+ MUX_PG(sd1_d0_d3_mfp, 2, 9, 2),
+ MUX_PG(sd0_cmd_mfp, 2, 7, 2),
+ MUX_PG(sd0_clk_mfp, 2, 5, 2),
+ MUX_PG(sd1_cmd_mfp, 2, 3, 2),
+ MUX_PG(uart0_rx_mfp, 2, 0, 3),
+ MUX_PG(clko_25m_mfp, 3, 30, 1),
+ MUX_PG(csi_cn_cp_mfp, 3, 28, 2),
+ MUX_PG(sens0_ckout_mfp, 3, 22, 2),
+ MUX_PG(uart0_tx_mfp, 3, 19, 3),
+ MUX_PG(i2c0_mfp, 3, 16, 3),
+ MUX_PG(csi_dn_dp_mfp, 3, 14, 2),
+ MUX_PG(sen0_pclk_mfp, 3, 12, 2),
+ MUX_PG(pcm1_in_mfp, 3, 10, 2),
+ MUX_PG(pcm1_clk_mfp, 3, 8, 2),
+ MUX_PG(pcm1_sync_mfp, 3, 6, 2),
+ MUX_PG(pcm1_out_mfp, 3, 4, 2),
+ MUX_PG(dnand_data_wr_mfp, 3, 3, 1),
+ MUX_PG(dnand_acle_ce0_mfp, 3, 2, 1),
+ MUX_PG(nand_ceb2_mfp, 3, 0, 2),
+ MUX_PG(nand_ceb3_mfp, 3, 0, 2),
+
+ DRV_PG(sirq_drv, 0, 28, 2),
+ DRV_PG(rmii_txd01_txen_drv, 0, 22, 2),
+ DRV_PG(rmii_rxer_drv, 0, 20, 2),
+ DRV_PG(rmii_crs_drv, 0, 18, 2),
+ DRV_PG(rmii_rxd10_drv, 0, 16, 2),
+ DRV_PG(rmii_ref_clk_drv, 0, 14, 2),
+ DRV_PG(smi_mdc_mdio_drv, 0, 12, 2),
+ DRV_PG(i2s_d0_drv, 0, 10, 2),
+ DRV_PG(i2s_bclk0_drv, 0, 8, 2),
+ DRV_PG(i2s3_drv, 0, 6, 2),
+ DRV_PG(i2s13_drv, 0, 4, 2),
+ DRV_PG(pcm1_drv, 0, 2, 2),
+ DRV_PG(ks_in_drv, 0, 0, 2),
+ DRV_PG(ks_out_drv, 1, 30, 2),
+ DRV_PG(lvds_all_drv, 1, 28, 2),
+ DRV_PG(lcd_dsi_drv, 1, 26, 2),
+ DRV_PG(dsi_drv, 1, 24, 2),
+ DRV_PG(sd0_d0_d3_drv, 1, 22, 2),
+ DRV_PG(sd1_d0_d3_drv, 1, 20, 2),
+ DRV_PG(sd0_cmd_drv, 1, 18, 2),
+ DRV_PG(sd0_clk_drv, 1, 16, 2),
+ DRV_PG(sd1_cmd_drv, 1, 14, 2),
+ DRV_PG(sd1_clk_drv, 1, 12, 2),
+ DRV_PG(spi0_all_drv, 1, 10, 2),
+ DRV_PG(uart0_rx_drv, 2, 30, 2),
+ DRV_PG(uart0_tx_drv, 2, 28, 2),
+ DRV_PG(uart2_all_drv, 2, 26, 2),
+ DRV_PG(i2c0_all_drv, 2, 23, 2),
+ DRV_PG(i2c12_all_drv, 2, 21, 2),
+ DRV_PG(sens0_pclk_drv, 2, 18, 2),
+ DRV_PG(sens0_ckout_drv, 2, 12, 2),
+ DRV_PG(uart3_all_drv, 2, 2, 2),
+};
+
+static const char * const nor_groups[] = {
+ "lcd0_d18_mfp",
+ "i2s_d0_mfp",
+ "i2s0_pcm0_mfp",
+ "i2s1_pcm0_mfp",
+ "i2s_d1_mfp",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+ "lcd0_d17_mfp",
+ "lvds_ee_pn_mfp",
+ "spi0_i2c_pcm_mfp",
+ "spi0_i2s_pcm_mfp",
+ "lvds_e_pn_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd1_d0_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd1_cmd_mfp",
+ "sens0_ckout_mfp",
+ "sen0_pclk_mfp",
+};
+
+static const char * const eth_rmii_groups[] = {
+ "rmii_crs_dv_mfp",
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "rmii_txen_mfp",
+ "rmii_rxen_mfp",
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+ "rmii_ref_clk_mfp",
+};
+
+static const char * const eth_smii_groups[] = {
+ "rmii_crs_dv_mfp",
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "rmii_ref_clk_mfp",
+};
+
+static const char * const spi0_groups[] = {
+ "dsi_dn0_mfp",
+ "dsi_dp2_mfp",
+ "dsi_dp0_mfp",
+ "spi0_i2c_pcm_mfp",
+ "spi0_i2s_pcm_mfp",
+ "dsi_dn2_mfp",
+};
+
+static const char * const spi1_groups[] = {
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "i2c0_mfp",
+};
+
+static const char * const spi2_groups[] = {
+ "rmii_crs_dv_mfp",
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "rmii_ref_clk_mfp",
+ "dnand_acle_ce0_mfp",
+};
+
+static const char * const spi3_groups[] = {
+ "rmii_txen_mfp",
+ "rmii_rxen_mfp",
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+};
+
+static const char * const sens0_groups[] = {
+ "csi_cn_cp_mfp",
+ "sens0_ckout_mfp",
+ "csi_dn_dp_mfp",
+ "sen0_pclk_mfp",
+};
+
+static const char * const sens1_groups[] = {
+ "lcd0_d18_mfp",
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+ "sens0_ckout_mfp",
+ "pcm1_in_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
+};
+
+static const char * const uart0_groups[] = {
+ "uart2_rtsb_mfp",
+ "uart2_ctsb_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+};
+
+static const char * const uart1_groups[] = {
+ "sd0_d2_d3_mfp",
+ "i2c0_mfp",
+};
+
+static const char * const uart2_groups[] = {
+ "rmii_txen_mfp",
+ "rmii_rxen_mfp",
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+ "dsi_dn0_mfp",
+ "dsi_dp2_mfp",
+ "dsi_dp0_mfp",
+ "dsi_dn2_mfp",
+ "uart2_rtsb_mfp",
+ "uart2_ctsb_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "i2c0_mfp",
+};
+
+static const char * const uart3_groups[] = {
+ "uart3_rtsb_mfp",
+ "uart3_ctsb_mfp",
+};
+
+static const char * const uart4_groups[] = {
+ "rmii_crs_dv_mfp",
+ "rmii_ref_clk_mfp",
+ "pcm1_in_mfp",
+ "pcm1_clk_mfp",
+};
+
+static const char * const uart5_groups[] = {
+ "rmii_rxd1_mfp",
+ "rmii_rxd0_mfp",
+ "ks_out0_mfp",
+ "ks_out2_mfp",
+ "uart3_rtsb_mfp",
+ "uart3_ctsb_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+};
+
+static const char * const uart6_groups[] = {
+ "rmii_txd0_mfp",
+ "rmii_txd1_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
+};
+
+static const char * const i2s0_groups[] = {
+ "i2s_d0_mfp",
+ "i2s_pcm1_mfp",
+ "i2s0_pcm0_mfp",
+};
+
+static const char * const i2s1_groups[] = {
+ "i2s1_pcm0_mfp",
+ "i2s_d1_mfp",
+ "spi0_i2s_pcm_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+};
+
+static const char * const pcm1_groups[] = {
+ "i2s_pcm1_mfp",
+ "spi0_i2s_pcm_mfp",
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "pcm1_in_mfp",
+ "pcm1_clk_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
+};
+
+static const char * const pcm0_groups[] = {
+ "i2s0_pcm0_mfp",
+ "i2s1_pcm0_mfp",
+ "spi0_i2c_pcm_mfp",
+ "spi0_i2s_pcm_mfp",
+};
+
+static const char * const ks_groups[] = {
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+};
+
+static const char * const jtag_groups[] = {
+ "ks_in2_mfp",
+ "ks_in1_mfp",
+ "ks_in0_mfp",
+ "ks_in3_mfp",
+ "ks_out1_mfp",
+ "sd0_d0_mfp",
+ "sd0_d2_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
+};
+
+static const char * const pwm0_groups[] = {
+ "ks_in2_mfp",
+ "ks_in0_mfp",
+ "rmii_txen_mfp",
+ "sen0_pclk_mfp",
+};
+
+static const char * const pwm1_groups[] = {
+ "rmii_rxen_mfp",
+ "ks_in1_mfp",
+ "ks_in3_mfp",
+ "sens0_ckout_mfp",
+};
+
+static const char * const pwm2_groups[] = {
+ "lcd0_d18_mfp",
+ "rmii_rxd1_mfp",
+ "ks_out0_mfp",
+ "ks_out2_mfp",
+};
+
+static const char * const pwm3_groups[] = {
+ "rmii_rxd0_mfp",
+ "ks_out1_mfp",
+ "lcd0_d17_mfp",
+};
+
+static const char * const pwm4_groups[] = {
+ "lcd0_d18_mfp",
+ "rmii_crs_dv_mfp",
+ "rmii_txd0_mfp",
+ "ks_in0_mfp",
+ "pcm1_in_mfp",
+ "nand_ceb3_mfp",
+};
+
+static const char * const pwm5_groups[] = {
+ "rmii_txd1_mfp",
+ "ks_in1_mfp",
+ "pcm1_clk_mfp",
+ "nand_ceb2_mfp",
+};
+
+static const char * const p0_groups[] = {
+ "ks_in2_mfp",
+ "ks_in0_mfp",
+};
+
+static const char * const sd0_groups[] = {
+ "ks_out0_mfp",
+ "ks_out1_mfp",
+ "ks_out2_mfp",
+ "lcd0_d17_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dp0_mfp",
+ "sd0_d0_mfp",
+ "sd0_d1_mfp",
+ "sd0_d2_d3_mfp",
+ "sd1_d0_d3_mfp",
+ "sd0_cmd_mfp",
+ "sd0_clk_mfp",
+};
+
+static const char * const sd1_groups[] = {
+ "dsi_dp2_mfp",
+ "lcd0_d17_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dn3_mfp",
+ "dsi_dnp1_cp_mfp",
+ "dsi_dn2_mfp",
+ "sd1_d0_d3_mfp",
+ "sd1_cmd_mfp",
+};
+
+static const char * const sd2_groups[] = {
+ "dnand_data_wr_mfp",
+};
+
+static const char * const i2c0_groups[] = {
+ "uart0_rx_mfp",
+ "uart0_tx_mfp",
+ "i2c0_mfp",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c0_mfp",
+};
+
+static const char * const i2c3_groups[] = {
+ "spi0_i2c_pcm_mfp",
+ "pcm1_sync_mfp",
+ "pcm1_out_mfp",
+};
+
+static const char * const lvds_groups[] = {
+ "lvds_o_pn_mfp",
+ "lvds_ee_pn_mfp",
+ "lvds_e_pn_mfp",
+};
+
+static const char * const ts_groups[] = {
+ "lvds_o_pn_mfp",
+ "lvds_ee_pn_mfp",
+};
+
+static const char * const lcd0_groups[] = {
+ "lcd0_d18_mfp",
+ "lcd0_d17_mfp",
+ "lvds_o_pn_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dn3_mfp",
+ "lvds_ee_pn_mfp",
+ "dsi_dnp1_cp_mfp",
+ "lvds_e_pn_mfp",
+};
+
+static const char * const usb30_groups[] = {
+ "ks_in1_mfp",
+};
+
+static const char * const clko_25m_groups[] = {
+ "clko_25m_mfp",
+};
+
+static const char * const mipi_csi_groups[] = {
+ "csi_cn_cp_mfp",
+ "csi_dn_dp_mfp",
+};
+
+static const char * const dsi_groups[] = {
+ "dsi_dn0_mfp",
+ "dsi_dp2_mfp",
+ "dsi_dp3_mfp",
+ "dsi_dn3_mfp",
+ "dsi_dp0_mfp",
+ "dsi_dnp1_cp_mfp",
+ "dsi_dn2_mfp",
+};
+
+static const char * const nand_groups[] = {
+ "dnand_data_wr_mfp",
+ "dnand_acle_ce0_mfp",
+ "nand_ceb2_mfp",
+ "nand_ceb3_mfp",
+};
+
+static const char * const spdif_groups[] = {
+ "uart0_tx_mfp",
+};
+
+static const struct owl_pinmux_func s500_functions[] = {
+ [S500_MUX_NOR] = FUNCTION(nor),
+ [S500_MUX_ETH_RMII] = FUNCTION(eth_rmii),
+ [S500_MUX_ETH_SMII] = FUNCTION(eth_smii),
+ [S500_MUX_SPI0] = FUNCTION(spi0),
+ [S500_MUX_SPI1] = FUNCTION(spi1),
+ [S500_MUX_SPI2] = FUNCTION(spi2),
+ [S500_MUX_SPI3] = FUNCTION(spi3),
+ [S500_MUX_SENS0] = FUNCTION(sens0),
+ [S500_MUX_SENS1] = FUNCTION(sens1),
+ [S500_MUX_UART0] = FUNCTION(uart0),
+ [S500_MUX_UART1] = FUNCTION(uart1),
+ [S500_MUX_UART2] = FUNCTION(uart2),
+ [S500_MUX_UART3] = FUNCTION(uart3),
+ [S500_MUX_UART4] = FUNCTION(uart4),
+ [S500_MUX_UART5] = FUNCTION(uart5),
+ [S500_MUX_UART6] = FUNCTION(uart6),
+ [S500_MUX_I2S0] = FUNCTION(i2s0),
+ [S500_MUX_I2S1] = FUNCTION(i2s1),
+ [S500_MUX_PCM1] = FUNCTION(pcm1),
+ [S500_MUX_PCM0] = FUNCTION(pcm0),
+ [S500_MUX_KS] = FUNCTION(ks),
+ [S500_MUX_JTAG] = FUNCTION(jtag),
+ [S500_MUX_PWM0] = FUNCTION(pwm0),
+ [S500_MUX_PWM1] = FUNCTION(pwm1),
+ [S500_MUX_PWM2] = FUNCTION(pwm2),
+ [S500_MUX_PWM3] = FUNCTION(pwm3),
+ [S500_MUX_PWM4] = FUNCTION(pwm4),
+ [S500_MUX_PWM5] = FUNCTION(pwm5),
+ [S500_MUX_P0] = FUNCTION(p0),
+ [S500_MUX_SD0] = FUNCTION(sd0),
+ [S500_MUX_SD1] = FUNCTION(sd1),
+ [S500_MUX_SD2] = FUNCTION(sd2),
+ [S500_MUX_I2C0] = FUNCTION(i2c0),
+ [S500_MUX_I2C1] = FUNCTION(i2c1),
+ /*[S500_MUX_I2C2] = FUNCTION(i2c2),*/
+ [S500_MUX_I2C3] = FUNCTION(i2c3),
+ [S500_MUX_DSI] = FUNCTION(dsi),
+ [S500_MUX_LVDS] = FUNCTION(lvds),
+ [S500_MUX_USB30] = FUNCTION(usb30),
+ [S500_MUX_CLKO_25M] = FUNCTION(clko_25m),
+ [S500_MUX_MIPI_CSI] = FUNCTION(mipi_csi),
+ [S500_MUX_NAND] = FUNCTION(nand),
+ [S500_MUX_SPDIF] = FUNCTION(spdif),
+ /*[S500_MUX_SIRQ0] = FUNCTION(sirq0),*/
+ /*[S500_MUX_SIRQ1] = FUNCTION(sirq1),*/
+ /*[S500_MUX_SIRQ2] = FUNCTION(sirq2),*/
+ [S500_MUX_TS] = FUNCTION(ts),
+ [S500_MUX_LCD0] = FUNCTION(lcd0),
+};
+
+/* PAD_ST0 */
+static PAD_ST_CONF(I2C0_SDATA, 0, 30, 1);
+static PAD_ST_CONF(UART0_RX, 0, 29, 1);
+static PAD_ST_CONF(I2S_MCLK1, 0, 23, 1);
+static PAD_ST_CONF(ETH_REF_CLK, 0, 22, 1);
+static PAD_ST_CONF(ETH_TXEN, 0, 21, 1);
+static PAD_ST_CONF(ETH_TXD0, 0, 20, 1);
+static PAD_ST_CONF(I2S_LRCLK1, 0, 19, 1);
+static PAD_ST_CONF(DSI_DP0, 0, 16, 1);
+static PAD_ST_CONF(DSI_DN0, 0, 15, 1);
+static PAD_ST_CONF(UART0_TX, 0, 14, 1);
+static PAD_ST_CONF(SPI0_SCLK, 0, 13, 1);
+static PAD_ST_CONF(SD0_CLK, 0, 12, 1);
+static PAD_ST_CONF(KS_IN0, 0, 11, 1);
+static PAD_ST_CONF(SENSOR0_PCLK, 0, 9, 1);
+static PAD_ST_CONF(I2C0_SCLK, 0, 7, 1);
+static PAD_ST_CONF(KS_OUT0, 0, 6, 1);
+static PAD_ST_CONF(KS_OUT1, 0, 5, 1);
+static PAD_ST_CONF(KS_OUT2, 0, 4, 1);
+
+/* PAD_ST1 */
+static PAD_ST_CONF(DSI_DP2, 1, 31, 1);
+static PAD_ST_CONF(DSI_DN2, 1, 30, 1);
+static PAD_ST_CONF(I2S_LRCLK0, 1, 29, 1);
+static PAD_ST_CONF(UART3_CTSB, 1, 27, 1);
+static PAD_ST_CONF(UART3_RTSB, 1, 26, 1);
+static PAD_ST_CONF(UART3_RX, 1, 25, 1);
+static PAD_ST_CONF(UART2_RTSB, 1, 24, 1);
+static PAD_ST_CONF(UART2_CTSB, 1, 23, 1);
+static PAD_ST_CONF(UART2_RX, 1, 22, 1);
+static PAD_ST_CONF(ETH_RXD0, 1, 21, 1);
+static PAD_ST_CONF(ETH_RXD1, 1, 20, 1);
+static PAD_ST_CONF(ETH_CRS_DV, 1, 19, 1);
+static PAD_ST_CONF(ETH_RXER, 1, 18, 1);
+static PAD_ST_CONF(ETH_TXD1, 1, 17, 1);
+static PAD_ST_CONF(LVDS_OAP, 1, 12, 1);
+static PAD_ST_CONF(PCM1_CLK, 1, 11, 1);
+static PAD_ST_CONF(PCM1_IN, 1, 10, 1);
+static PAD_ST_CONF(PCM1_SYNC, 1, 9, 1);
+static PAD_ST_CONF(I2C1_SCLK, 1, 8, 1);
+static PAD_ST_CONF(I2C1_SDATA, 1, 7, 1);
+static PAD_ST_CONF(I2C2_SCLK, 1, 6, 1);
+static PAD_ST_CONF(I2C2_SDATA, 1, 5, 1);
+static PAD_ST_CONF(SPI0_MOSI, 1, 4, 1);
+static PAD_ST_CONF(SPI0_MISO, 1, 3, 1);
+static PAD_ST_CONF(SPI0_SS, 1, 2, 1);
+static PAD_ST_CONF(I2S_BCLK0, 1, 1, 1);
+static PAD_ST_CONF(I2S_MCLK0, 1, 0, 1);
+
+/* PAD_PULLCTL0 */
+static PAD_PULLCTL_CONF(PCM1_SYNC, 0, 30, 1);
+static PAD_PULLCTL_CONF(PCM1_OUT, 0, 29, 1);
+static PAD_PULLCTL_CONF(KS_OUT2, 0, 28, 1);
+static PAD_PULLCTL_CONF(LCD0_D17, 0, 27, 1);
+static PAD_PULLCTL_CONF(DSI_DN3, 0, 26, 1);
+static PAD_PULLCTL_CONF(ETH_RXER, 0, 16, 1);
+static PAD_PULLCTL_CONF(SIRQ0, 0, 14, 2);
+static PAD_PULLCTL_CONF(SIRQ1, 0, 12, 2);
+static PAD_PULLCTL_CONF(SIRQ2, 0, 10, 2);
+static PAD_PULLCTL_CONF(I2C0_SDATA, 0, 9, 1);
+static PAD_PULLCTL_CONF(I2C0_SCLK, 0, 8, 1);
+static PAD_PULLCTL_CONF(KS_IN0, 0, 7, 1);
+static PAD_PULLCTL_CONF(KS_IN1, 0, 6, 1);
+static PAD_PULLCTL_CONF(KS_IN2, 0, 5, 1);
+static PAD_PULLCTL_CONF(KS_IN3, 0, 4, 1);
+static PAD_PULLCTL_CONF(KS_OUT0, 0, 2, 1);
+static PAD_PULLCTL_CONF(KS_OUT1, 0, 1, 1);
+static PAD_PULLCTL_CONF(DSI_DP1, 0, 0, 1);
+
+/* PAD_PULLCTL1 */
+static PAD_PULLCTL_CONF(DSI_CP, 1, 31, 1);
+static PAD_PULLCTL_CONF(DSI_CN, 1, 30, 1);
+static PAD_PULLCTL_CONF(DSI_DN2, 1, 28, 1);
+static PAD_PULLCTL_CONF(DNAND_RDBN, 1, 25, 1);
+static PAD_PULLCTL_CONF(SD0_D0, 1, 17, 1);
+static PAD_PULLCTL_CONF(SD0_D1, 1, 16, 1);
+static PAD_PULLCTL_CONF(SD0_D2, 1, 15, 1);
+static PAD_PULLCTL_CONF(SD0_D3, 1, 14, 1);
+static PAD_PULLCTL_CONF(SD0_CMD, 1, 13, 1);
+static PAD_PULLCTL_CONF(SD0_CLK, 1, 12, 1);
+static PAD_PULLCTL_CONF(SD1_CMD, 1, 11, 1);
+static PAD_PULLCTL_CONF(SD1_D0, 1, 6, 1);
+static PAD_PULLCTL_CONF(SD1_D1, 1, 5, 1);
+static PAD_PULLCTL_CONF(SD1_D2, 1, 4, 1);
+static PAD_PULLCTL_CONF(SD1_D3, 1, 3, 1);
+static PAD_PULLCTL_CONF(UART0_RX, 1, 2, 1);
+static PAD_PULLCTL_CONF(UART0_TX, 1, 1, 1);
+static PAD_PULLCTL_CONF(CLKO_25M, 1, 0, 1);
+
+/* PAD_PULLCTL2 */
+static PAD_PULLCTL_CONF(SPI0_SCLK, 2, 12, 1);
+static PAD_PULLCTL_CONF(SPI0_MOSI, 2, 11, 1);
+static PAD_PULLCTL_CONF(I2C1_SDATA, 2, 10, 1);
+static PAD_PULLCTL_CONF(I2C1_SCLK, 2, 9, 1);
+static PAD_PULLCTL_CONF(I2C2_SDATA, 2, 8, 1);
+static PAD_PULLCTL_CONF(I2C2_SCLK, 2, 7, 1);
+static PAD_PULLCTL_CONF(DNAND_DQSN, 2, 5, 2);
+static PAD_PULLCTL_CONF(DNAND_DQS, 2, 3, 2);
+static PAD_PULLCTL_CONF(DNAND_D0, 2, 2, 1);
+static PAD_PULLCTL_CONF(DNAND_D1, 2, 2, 1);
+static PAD_PULLCTL_CONF(DNAND_D2, 2, 2, 1);
+static PAD_PULLCTL_CONF(DNAND_D3, 2, 2, 1);
+static PAD_PULLCTL_CONF(DNAND_D4, 2, 2, 1);
+static PAD_PULLCTL_CONF(DNAND_D5, 2, 2, 1);
+static PAD_PULLCTL_CONF(DNAND_D6, 2, 2, 1);
+static PAD_PULLCTL_CONF(DNAND_D7, 2, 2, 1);
+
+/* Pad info table */
+static struct owl_padinfo s500_padinfo[NUM_PADS] = {
+ [DNAND_DQS] = PAD_INFO_PULLCTL(DNAND_DQS),
+ [DNAND_DQSN] = PAD_INFO_PULLCTL(DNAND_DQSN),
+ [ETH_TXD0] = PAD_INFO_ST(ETH_TXD0),
+ [ETH_TXD1] = PAD_INFO_ST(ETH_TXD1),
+ [ETH_TXEN] = PAD_INFO_ST(ETH_TXEN),
+ [ETH_RXER] = PAD_INFO_PULLCTL_ST(ETH_RXER),
+ [ETH_CRS_DV] = PAD_INFO_ST(ETH_CRS_DV),
+ [ETH_RXD1] = PAD_INFO_ST(ETH_RXD1),
+ [ETH_RXD0] = PAD_INFO_ST(ETH_RXD0),
+ [ETH_REF_CLK] = PAD_INFO_ST(ETH_REF_CLK),
+ [ETH_MDC] = PAD_INFO(ETH_MDC),
+ [ETH_MDIO] = PAD_INFO(ETH_MDIO),
+ [SIRQ0] = PAD_INFO_PULLCTL(SIRQ0),
+ [SIRQ1] = PAD_INFO_PULLCTL(SIRQ1),
+ [SIRQ2] = PAD_INFO_PULLCTL(SIRQ2),
+ [I2S_D0] = PAD_INFO(I2S_D0),
+ [I2S_BCLK0] = PAD_INFO_ST(I2S_BCLK0),
+ [I2S_LRCLK0] = PAD_INFO_ST(I2S_LRCLK0),
+ [I2S_MCLK0] = PAD_INFO_ST(I2S_MCLK0),
+ [I2S_D1] = PAD_INFO(I2S_D1),
+ [I2S_BCLK1] = PAD_INFO(I2S_BCLK1),
+ [I2S_LRCLK1] = PAD_INFO_ST(I2S_LRCLK1),
+ [I2S_MCLK1] = PAD_INFO_ST(I2S_MCLK1),
+ [KS_IN0] = PAD_INFO_PULLCTL_ST(KS_IN0),
+ [KS_IN1] = PAD_INFO_PULLCTL(KS_IN1),
+ [KS_IN2] = PAD_INFO_PULLCTL(KS_IN2),
+ [KS_IN3] = PAD_INFO_PULLCTL(KS_IN3),
+ [KS_OUT0] = PAD_INFO_PULLCTL_ST(KS_OUT0),
+ [KS_OUT1] = PAD_INFO_PULLCTL_ST(KS_OUT1),
+ [KS_OUT2] = PAD_INFO_PULLCTL_ST(KS_OUT2),
+ [LVDS_OEP] = PAD_INFO(LVDS_OEP),
+ [LVDS_OEN] = PAD_INFO(LVDS_OEN),
+ [LVDS_ODP] = PAD_INFO(LVDS_ODP),
+ [LVDS_ODN] = PAD_INFO(LVDS_ODN),
+ [LVDS_OCP] = PAD_INFO(LVDS_OCP),
+ [LVDS_OCN] = PAD_INFO(LVDS_OCN),
+ [LVDS_OBP] = PAD_INFO(LVDS_OBP),
+ [LVDS_OBN] = PAD_INFO(LVDS_OBN),
+ [LVDS_OAP] = PAD_INFO_ST(LVDS_OAP),
+ [LVDS_OAN] = PAD_INFO(LVDS_OAN),
+ [LVDS_EEP] = PAD_INFO(LVDS_EEP),
+ [LVDS_EEN] = PAD_INFO(LVDS_EEN),
+ [LVDS_EDP] = PAD_INFO(LVDS_EDP),
+ [LVDS_EDN] = PAD_INFO(LVDS_EDN),
+ [LVDS_ECP] = PAD_INFO(LVDS_ECP),
+ [LVDS_ECN] = PAD_INFO(LVDS_ECN),
+ [LVDS_EBP] = PAD_INFO(LVDS_EBP),
+ [LVDS_EBN] = PAD_INFO(LVDS_EBN),
+ [LVDS_EAP] = PAD_INFO(LVDS_EAP),
+ [LVDS_EAN] = PAD_INFO(LVDS_EAN),
+ [LCD0_D18] = PAD_INFO(LCD0_D18),
+ [LCD0_D17] = PAD_INFO_PULLCTL(LCD0_D17),
+ [DSI_DP3] = PAD_INFO(DSI_DP3),
+ [DSI_DN3] = PAD_INFO_PULLCTL(DSI_DN3),
+ [DSI_DP1] = PAD_INFO_PULLCTL(DSI_DP1),
+ [DSI_DN1] = PAD_INFO(DSI_DN1),
+ [DSI_CP] = PAD_INFO_PULLCTL(DSI_CP),
+ [DSI_CN] = PAD_INFO_PULLCTL(DSI_CN),
+ [DSI_DP0] = PAD_INFO_ST(DSI_DP0),
+ [DSI_DN0] = PAD_INFO_ST(DSI_DN0),
+ [DSI_DP2] = PAD_INFO_ST(DSI_DP2),
+ [DSI_DN2] = PAD_INFO_PULLCTL_ST(DSI_DN2),
+ [SD0_D0] = PAD_INFO_PULLCTL(SD0_D0),
+ [SD0_D1] = PAD_INFO_PULLCTL(SD0_D1),
+ [SD0_D2] = PAD_INFO_PULLCTL(SD0_D2),
+ [SD0_D3] = PAD_INFO_PULLCTL(SD0_D3),
+ [SD1_D0] = PAD_INFO_PULLCTL(SD1_D0),
+ [SD1_D1] = PAD_INFO_PULLCTL(SD1_D1),
+ [SD1_D2] = PAD_INFO_PULLCTL(SD1_D2),
+ [SD1_D3] = PAD_INFO_PULLCTL(SD1_D3),
+ [SD0_CMD] = PAD_INFO_PULLCTL(SD0_CMD),
+ [SD0_CLK] = PAD_INFO_PULLCTL_ST(SD0_CLK),
+ [SD1_CMD] = PAD_INFO_PULLCTL(SD1_CMD),
+ [SD1_CLK] = PAD_INFO(SD1_CLK),
+ [SPI0_SCLK] = PAD_INFO_PULLCTL_ST(SPI0_SCLK),
+ [SPI0_SS] = PAD_INFO_ST(SPI0_SS),
+ [SPI0_MISO] = PAD_INFO_ST(SPI0_MISO),
+ [SPI0_MOSI] = PAD_INFO_PULLCTL_ST(SPI0_MOSI),
+ [UART0_RX] = PAD_INFO_PULLCTL_ST(UART0_RX),
+ [UART0_TX] = PAD_INFO_PULLCTL_ST(UART0_TX),
+ [I2C0_SCLK] = PAD_INFO_PULLCTL_ST(I2C0_SCLK),
+ [I2C0_SDATA] = PAD_INFO_PULLCTL_ST(I2C0_SDATA),
+ [SENSOR0_PCLK] = PAD_INFO_ST(SENSOR0_PCLK),
+ [SENSOR0_CKOUT] = PAD_INFO(SENSOR0_CKOUT),
+ [DNAND_ALE] = PAD_INFO(DNAND_ALE),
+ [DNAND_CLE] = PAD_INFO(DNAND_CLE),
+ [DNAND_CEB0] = PAD_INFO(DNAND_CEB0),
+ [DNAND_CEB1] = PAD_INFO(DNAND_CEB1),
+ [DNAND_CEB2] = PAD_INFO(DNAND_CEB2),
+ [DNAND_CEB3] = PAD_INFO(DNAND_CEB3),
+ [UART2_RX] = PAD_INFO_ST(UART2_RX),
+ [UART2_TX] = PAD_INFO(UART2_TX),
+ [UART2_RTSB] = PAD_INFO_ST(UART2_RTSB),
+ [UART2_CTSB] = PAD_INFO_ST(UART2_CTSB),
+ [UART3_RX] = PAD_INFO_ST(UART3_RX),
+ [UART3_TX] = PAD_INFO(UART3_TX),
+ [UART3_RTSB] = PAD_INFO_ST(UART3_RTSB),
+ [UART3_CTSB] = PAD_INFO_ST(UART3_CTSB),
+ [PCM1_IN] = PAD_INFO_ST(PCM1_IN),
+ [PCM1_CLK] = PAD_INFO_ST(PCM1_CLK),
+ [PCM1_SYNC] = PAD_INFO_PULLCTL_ST(PCM1_SYNC),
+ [PCM1_OUT] = PAD_INFO_PULLCTL(PCM1_OUT),
+ [I2C1_SCLK] = PAD_INFO_PULLCTL_ST(I2C1_SCLK),
+ [I2C1_SDATA] = PAD_INFO_PULLCTL_ST(I2C1_SDATA),
+ [I2C2_SCLK] = PAD_INFO_PULLCTL_ST(I2C2_SCLK),
+ [I2C2_SDATA] = PAD_INFO_PULLCTL_ST(I2C2_SDATA),
+ [CSI_DN0] = PAD_INFO(CSI_DN0),
+ [CSI_DP0] = PAD_INFO(CSI_DP0),
+ [CSI_DN1] = PAD_INFO(CSI_DN1),
+ [CSI_DP1] = PAD_INFO(CSI_DP1),
+ [CSI_CN] = PAD_INFO(CSI_CN),
+ [CSI_CP] = PAD_INFO(CSI_CP),
+ [CSI_DN2] = PAD_INFO(CSI_DN2),
+ [CSI_DP2] = PAD_INFO(CSI_DP2),
+ [CSI_DN3] = PAD_INFO(CSI_DN3),
+ [CSI_DP3] = PAD_INFO(CSI_DP3),
+ [DNAND_D0] = PAD_INFO_PULLCTL(DNAND_D0),
+ [DNAND_D1] = PAD_INFO_PULLCTL(DNAND_D1),
+ [DNAND_D2] = PAD_INFO_PULLCTL(DNAND_D2),
+ [DNAND_D3] = PAD_INFO_PULLCTL(DNAND_D3),
+ [DNAND_D4] = PAD_INFO_PULLCTL(DNAND_D4),
+ [DNAND_D5] = PAD_INFO_PULLCTL(DNAND_D5),
+ [DNAND_D6] = PAD_INFO_PULLCTL(DNAND_D6),
+ [DNAND_D7] = PAD_INFO_PULLCTL(DNAND_D7),
+ [DNAND_WRB] = PAD_INFO(DNAND_WRB),
+ [DNAND_RDB] = PAD_INFO(DNAND_RDB),
+ [DNAND_RDBN] = PAD_INFO_PULLCTL(DNAND_RDBN),
+ [DNAND_RB] = PAD_INFO(DNAND_RB),
+ [PORB] = PAD_INFO(PORB),
+ [CLKO_25M] = PAD_INFO_PULLCTL(CLKO_25M),
+ [BSEL] = PAD_INFO(BSEL),
+ [PKG0] = PAD_INFO(PKG0),
+ [PKG1] = PAD_INFO(PKG1),
+ [PKG2] = PAD_INFO(PKG2),
+ [PKG3] = PAD_INFO(PKG3),
+};
+
+static const struct owl_gpio_port s500_gpio_ports[] = {
+ OWL_GPIO_PORT(A, 0x0000, 32, 0x0, 0x4, 0x8, 0x204, 0x208, 0x20C, 0x230, 0),
+ OWL_GPIO_PORT(B, 0x000C, 32, 0x0, 0x4, 0x8, 0x1F8, 0x204, 0x208, 0x22C, 1),
+ OWL_GPIO_PORT(C, 0x0018, 32, 0x0, 0x4, 0x8, 0x1EC, 0x200, 0x204, 0x228, 2),
+ OWL_GPIO_PORT(D, 0x0024, 32, 0x0, 0x4, 0x8, 0x1E0, 0x1FC, 0x200, 0x224, 3),
+ OWL_GPIO_PORT(E, 0x0030, 4, 0x0, 0x4, 0x8, 0x1D4, 0x1F8, 0x1FC, 0x220, 4),
+};
+
+enum s500_pinconf_pull {
+ OWL_PINCONF_PULL_DOWN,
+ OWL_PINCONF_PULL_UP,
+};
+
+static int s500_pad_pinconf_arg2val(const struct owl_padinfo *info,
+ unsigned int param, u32 *arg)
+{
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ *arg = OWL_PINCONF_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ *arg = OWL_PINCONF_PULL_UP;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ *arg = (*arg >= 1 ? 1 : 0);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int s500_pad_pinconf_val2arg(const struct owl_padinfo *padinfo,
+ unsigned int param, u32 *arg)
+{
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ *arg = *arg == OWL_PINCONF_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ *arg = *arg == OWL_PINCONF_PULL_UP;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ *arg = *arg == 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct owl_pinctrl_soc_data s500_pinctrl_data = {
+ .padinfo = s500_padinfo,
+ .pins = (const struct pinctrl_pin_desc *)s500_pads,
+ .npins = ARRAY_SIZE(s500_pads),
+ .functions = s500_functions,
+ .nfunctions = ARRAY_SIZE(s500_functions),
+ .groups = s500_groups,
+ .ngroups = ARRAY_SIZE(s500_groups),
+ .ngpios = NUM_GPIOS,
+ .ports = s500_gpio_ports,
+ .nports = ARRAY_SIZE(s500_gpio_ports),
+ .padctl_arg2val = s500_pad_pinconf_arg2val,
+ .padctl_val2arg = s500_pad_pinconf_val2arg,
+};
+
+static int s500_pinctrl_probe(struct platform_device *pdev)
+{
+ return owl_pinctrl_probe(pdev, &s500_pinctrl_data);
+}
+
+static const struct of_device_id s500_pinctrl_of_match[] = {
+ { .compatible = "actions,s500-pinctrl", },
+ { }
+};
+
+static struct platform_driver s500_pinctrl_driver = {
+ .driver = {
+ .name = "pinctrl-s500",
+ .of_match_table = of_match_ptr(s500_pinctrl_of_match),
+ },
+ .probe = s500_pinctrl_probe,
+};
+
+static int __init s500_pinctrl_init(void)
+{
+ return platform_driver_register(&s500_pinctrl_driver);
+}
+arch_initcall(s500_pinctrl_init);
+
+static void __exit s500_pinctrl_exit(void)
+{
+ platform_driver_unregister(&s500_pinctrl_driver);
+}
+module_exit(s500_pinctrl_exit);
+
+MODULE_AUTHOR("Actions Semi Inc.");
+MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@gmail.com>");
+MODULE_DESCRIPTION("Actions Semi S500 SoC Pinctrl Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/actions/pinctrl-s700.c b/drivers/pinctrl/actions/pinctrl-s700.c
index f579a6593f37..fd00940a5799 100644
--- a/drivers/pinctrl/actions/pinctrl-s700.c
+++ b/drivers/pinctrl/actions/pinctrl-s700.c
@@ -1685,7 +1685,7 @@ static PAD_PULLCTL_CONF(I2C2_SDATA, 2, 8, 1);
static PAD_PULLCTL_CONF(I2C2_SCLK, 2, 7, 1);
/* Pad info table for the pinmux subsystem */
-static struct owl_padinfo s700_padinfo[NUM_PADS] = {
+static const struct owl_padinfo s700_padinfo[NUM_PADS] = {
[ETH_TXD0] = PAD_INFO_ST(ETH_TXD0),
[ETH_TXD1] = PAD_INFO_ST(ETH_TXD1),
[ETH_TXEN] = PAD_INFO_ST(ETH_TXEN),
diff --git a/drivers/pinctrl/actions/pinctrl-s900.c b/drivers/pinctrl/actions/pinctrl-s900.c
index 9492b86852e7..811249a8011e 100644
--- a/drivers/pinctrl/actions/pinctrl-s900.c
+++ b/drivers/pinctrl/actions/pinctrl-s900.c
@@ -1556,7 +1556,7 @@ static PAD_ST_CONF(I2S_BCLK0, 1, 1, 1);
static PAD_ST_CONF(I2S_MCLK0, 1, 0, 1);
/* Pad info table */
-static struct owl_padinfo s900_padinfo[NUM_PADS] = {
+static const struct owl_padinfo s900_padinfo[NUM_PADS] = {
[ETH_TXD0] = PAD_INFO_ST(ETH_TXD0),
[ETH_TXD1] = PAD_INFO_ST(ETH_TXD1),
[ETH_TXEN] = PAD_INFO_ST(ETH_TXEN),
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index 7efe6dbe4398..34803a6c7664 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -19,6 +19,7 @@
#define SCU400 0x400 /* Multi-function Pin Control #1 */
#define SCU404 0x404 /* Multi-function Pin Control #2 */
+#define SCU40C 0x40C /* Multi-function Pin Control #3 */
#define SCU410 0x410 /* Multi-function Pin Control #4 */
#define SCU414 0x414 /* Multi-function Pin Control #5 */
#define SCU418 0x418 /* Multi-function Pin Control #6 */
@@ -2591,6 +2592,22 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
/* MAC4 */
{ PIN_CONFIG_POWER_SOURCE, { F24, B24 }, SCU458, BIT_MASK(5)},
{ PIN_CONFIG_DRIVE_STRENGTH, { F24, B24 }, SCU458, GENMASK(3, 2)},
+
+ /* GPIO18E */
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y1, Y4, SCU40C, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y1, Y4, SCU40C, 4),
+ /* GPIO18D */
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, AB4, AC5, SCU40C, 3),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, AB4, AC5, SCU40C, 3),
+ /* GPIO18C */
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, E4, E1, SCU40C, 2),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, E4, E1, SCU40C, 2),
+ /* GPIO18B */
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, B2, D3, SCU40C, 1),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, B2, D3, SCU40C, 1),
+ /* GPIO18A */
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, C6, A2, SCU40C, 0),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, C6, A2, SCU40C, 0),
};
/**
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 53f3f8aec695..d6b849552a1e 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -286,13 +286,14 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
{
/*
- * The signal type is GPIO if the signal name has "GPIO" as a prefix.
+ * The signal type is GPIO if the signal name has "GPI" as a prefix.
* strncmp (rather than strcmp) is used to implement the prefix
* requirement.
*
- * expr->signal might look like "GPIOT3" in the GPIO case.
+ * expr->signal might look like "GPIOB1" in the GPIO case.
+ * expr->signal might look like "GPIT0" in the GPI case.
*/
- return strncmp(expr->signal, "GPIO", 4) == 0;
+ return strncmp(expr->signal, "GPI", 3) == 0;
}
static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
@@ -534,14 +535,14 @@ int aspeed_pin_config_set(struct pinctrl_dev *pctldev, unsigned int offset,
val = pmap->val << __ffs(pconf->mask);
rc = regmap_update_bits(pdata->scu, pconf->reg,
- pmap->mask, val);
+ pconf->mask, val);
if (rc < 0)
return rc;
- pr_debug("%s: Set SCU%02X[%lu]=%d for param %d(=%d) on pin %d\n",
- __func__, pconf->reg, __ffs(pconf->mask),
- pmap->val, param, arg, offset);
+ pr_debug("%s: Set SCU%02X[0x%08X]=0x%X for param %d(=%d) on pin %d\n",
+ __func__, pconf->reg, pconf->mask,
+ val, param, arg, offset);
}
return 0;
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index dcf7df797af7..0ed14de0134c 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -23,6 +23,7 @@ config PINCTRL_BCM2835
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
select GPIOLIB_IRQCHIP
default ARCH_BCM2835 || ARCH_BRCMSTB
help
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 5eff8c296552..3fb238714718 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -130,9 +130,8 @@ static int dt_to_map_one_config(struct pinctrl *p,
if (!np_pctldev || of_node_is_root(np_pctldev)) {
of_node_put(np_pctldev);
ret = driver_deferred_probe_check_state(p->dev);
- /* keep deferring if modules are enabled unless we've timed out */
- if (IS_ENABLED(CONFIG_MODULES) && !allow_default &&
- (ret == -ENODEV))
+ /* keep deferring if modules are enabled */
+ if (IS_ENABLED(CONFIG_MODULES) && !allow_default && ret < 0)
ret = -EPROBE_DEFER;
return ret;
}
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 08fcf5c79296..a1fbb3b9ae34 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -1,13 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
config PINCTRL_IMX
- bool
+ tristate
+ depends on OF
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
select REGMAP
config PINCTRL_IMX_SCU
- bool
+ tristate
depends on IMX_SCU
select PINCTRL_IMX
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 507e4affcd73..daf28bc5661d 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -373,7 +374,7 @@ static int imx_pinconf_get(struct pinctrl_dev *pctldev,
const struct imx_pinctrl_soc_info *info = ipctl->info;
if (info->flags & IMX_USE_SCU)
- return imx_pinconf_get_scu(pctldev, pin_id, config);
+ return info->imx_pinconf_get(pctldev, pin_id, config);
else
return imx_pinconf_get_mmio(pctldev, pin_id, config);
}
@@ -423,7 +424,7 @@ static int imx_pinconf_set(struct pinctrl_dev *pctldev,
const struct imx_pinctrl_soc_info *info = ipctl->info;
if (info->flags & IMX_USE_SCU)
- return imx_pinconf_set_scu(pctldev, pin_id,
+ return info->imx_pinconf_set(pctldev, pin_id,
configs, num_configs);
else
return imx_pinconf_set_mmio(pctldev, pin_id,
@@ -440,7 +441,7 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
int ret;
if (info->flags & IMX_USE_SCU) {
- ret = imx_pinconf_get_scu(pctldev, pin_id, &config);
+ ret = info->imx_pinconf_get(pctldev, pin_id, &config);
if (ret) {
dev_err(ipctl->dev, "failed to get %s pinconf\n",
pin_get_name(pctldev, pin_id));
@@ -629,7 +630,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
for (i = 0; i < grp->num_pins; i++) {
pin = &((struct imx_pin *)(grp->data))[i];
if (info->flags & IMX_USE_SCU)
- imx_pinctrl_parse_pin_scu(ipctl, &grp->pins[i],
+ info->imx_pinctrl_parse_pin(ipctl, &grp->pins[i],
pin, &list);
else
imx_pinctrl_parse_pin_mmio(ipctl, &grp->pins[i],
@@ -898,3 +899,7 @@ const struct dev_pm_ops imx_pinctrl_pm_ops = {
imx_pinctrl_resume)
};
EXPORT_SYMBOL_GPL(imx_pinctrl_pm_ops);
+
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX common pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index 333d32b947b1..fd8c4b6b3e36 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -75,6 +75,21 @@ struct imx_cfg_params_decode {
bool invert;
};
+/**
+ * @dev: a pointer back to containing device
+ * @base: the offset to the controller in virtual memory
+ */
+struct imx_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ void __iomem *base;
+ void __iomem *input_sel_base;
+ const struct imx_pinctrl_soc_info *info;
+ struct imx_pin_reg *pin_regs;
+ unsigned int group_index;
+ struct mutex mutex;
+};
+
struct imx_pinctrl_soc_info {
const struct pinctrl_pin_desc *pins;
unsigned int npins;
@@ -98,21 +113,13 @@ struct imx_pinctrl_soc_info {
struct pinctrl_gpio_range *range,
unsigned offset,
bool input);
-};
-
-/**
- * @dev: a pointer back to containing device
- * @base: the offset to the controller in virtual memory
- */
-struct imx_pinctrl {
- struct device *dev;
- struct pinctrl_dev *pctl;
- void __iomem *base;
- void __iomem *input_sel_base;
- const struct imx_pinctrl_soc_info *info;
- struct imx_pin_reg *pin_regs;
- unsigned int group_index;
- struct mutex mutex;
+ int (*imx_pinconf_get)(struct pinctrl_dev *pctldev, unsigned int pin_id,
+ unsigned long *config);
+ int (*imx_pinconf_set)(struct pinctrl_dev *pctldev, unsigned int pin_id,
+ unsigned long *configs, unsigned int num_configs);
+ void (*imx_pinctrl_parse_pin)(struct imx_pinctrl *ipctl,
+ unsigned int *pin_id, struct imx_pin *pin,
+ const __be32 **list_p);
};
#define IMX_CFG_PARAMS_DECODE(p, m, o) \
@@ -137,7 +144,6 @@ struct imx_pinctrl {
int imx_pinctrl_probe(struct platform_device *pdev,
const struct imx_pinctrl_soc_info *info);
-#ifdef CONFIG_PINCTRL_IMX_SCU
#define BM_PAD_CTL_GP_ENABLE BIT(30)
#define BM_PAD_CTL_IFMUX_ENABLE BIT(31)
#define BP_PAD_CTL_IFMUX 27
@@ -150,23 +156,4 @@ int imx_pinconf_set_scu(struct pinctrl_dev *pctldev, unsigned pin_id,
void imx_pinctrl_parse_pin_scu(struct imx_pinctrl *ipctl,
unsigned int *pin_id, struct imx_pin *pin,
const __be32 **list_p);
-#else
-static inline int imx_pinconf_get_scu(struct pinctrl_dev *pctldev,
- unsigned pin_id, unsigned long *config)
-{
- return -EINVAL;
-}
-static inline int imx_pinconf_set_scu(struct pinctrl_dev *pctldev,
- unsigned pin_id, unsigned long *configs,
- unsigned num_configs)
-{
- return -EINVAL;
-}
-static inline void imx_pinctrl_parse_pin_scu(struct imx_pinctrl *ipctl,
- unsigned int *pin_id,
- struct imx_pin *pin,
- const __be32 **list_p)
-{
-}
-#endif
#endif /* __DRIVERS_PINCTRL_IMX_H */
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
index 12b97daa0407..d3020c0cd55d 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
@@ -159,6 +159,9 @@ static struct imx_pinctrl_soc_info imx8dxl_pinctrl_info = {
.pins = imx8dxl_pinctrl_pads,
.npins = ARRAY_SIZE(imx8dxl_pinctrl_pads),
.flags = IMX_USE_SCU,
+ .imx_pinconf_get = imx_pinconf_get_scu,
+ .imx_pinconf_set = imx_pinconf_set_scu,
+ .imx_pinctrl_parse_pin = imx_pinctrl_parse_pin_scu,
};
static const struct of_device_id imx8dxl_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8qm.c b/drivers/pinctrl/freescale/pinctrl-imx8qm.c
index 095acf494641..8f46b9404cd7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8qm.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8qm.c
@@ -292,6 +292,9 @@ static const struct imx_pinctrl_soc_info imx8qm_pinctrl_info = {
.pins = imx8qm_pinctrl_pads,
.npins = ARRAY_SIZE(imx8qm_pinctrl_pads),
.flags = IMX_USE_SCU,
+ .imx_pinconf_get = imx_pinconf_get_scu,
+ .imx_pinconf_set = imx_pinconf_set_scu,
+ .imx_pinctrl_parse_pin = imx_pinctrl_parse_pin_scu,
};
static const struct of_device_id imx8qm_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8qxp.c b/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
index 81ebd4c952ec..6776ad6a3a27 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx8qxp.c
@@ -198,6 +198,9 @@ static struct imx_pinctrl_soc_info imx8qxp_pinctrl_info = {
.pins = imx8qxp_pinctrl_pads,
.npins = ARRAY_SIZE(imx8qxp_pinctrl_pads),
.flags = IMX_USE_SCU,
+ .imx_pinconf_get = imx_pinconf_get_scu,
+ .imx_pinconf_set = imx_pinconf_set_scu,
+ .imx_pinctrl_parse_pin = imx_pinctrl_parse_pin_scu,
};
static const struct of_device_id imx8qxp_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-scu.c b/drivers/pinctrl/freescale/pinctrl-scu.c
index 9df45d3e3226..59b5f8a35111 100644
--- a/drivers/pinctrl/freescale/pinctrl-scu.c
+++ b/drivers/pinctrl/freescale/pinctrl-scu.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/firmware/imx/sci.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
@@ -123,3 +124,7 @@ void imx_pinctrl_parse_pin_scu(struct imx_pinctrl *ipctl,
pin_scu->mux_mode, pin_scu->config);
}
EXPORT_SYMBOL_GPL(imx_pinctrl_parse_pin_scu);
+
+MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX SCU common pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index b3e6060db52d..28e5f824ba45 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -6,11 +6,7 @@ if (X86 || COMPILE_TEST)
config PINCTRL_BAYTRAIL
bool "Intel Baytrail GPIO pin control"
depends on ACPI
- select GPIOLIB
- select GPIOLIB_IRQCHIP
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
+ select PINCTRL_INTEL
help
driver for memory mapped GPIO functionality on Intel Baytrail
platforms. Supports 3 banks with 102, 28 and 44 gpios.
@@ -22,11 +18,7 @@ config PINCTRL_BAYTRAIL
config PINCTRL_CHERRYVIEW
tristate "Intel Cherryview/Braswell pinctrl and GPIO driver"
depends on ACPI
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GPIOLIB
- select GPIOLIB_IRQCHIP
+ select PINCTRL_INTEL
help
Cherryview/Braswell pinctrl driver provides an interface that
allows configuring of SoC pins and using them as GPIOs.
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index d6e35cba3065..d49aab3cfbaa 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1635,28 +1635,14 @@ static const struct acpi_device_id byt_gpio_acpi_match[] = {
static int byt_pinctrl_probe(struct platform_device *pdev)
{
- const struct intel_pinctrl_soc_data *soc_data = NULL;
- const struct intel_pinctrl_soc_data **soc_table;
+ const struct intel_pinctrl_soc_data *soc_data;
struct device *dev = &pdev->dev;
- struct acpi_device *acpi_dev;
struct intel_pinctrl *vg;
- int i, ret;
-
- acpi_dev = ACPI_COMPANION(dev);
- if (!acpi_dev)
- return -ENODEV;
-
- soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(dev);
-
- for (i = 0; soc_table[i]; i++) {
- if (!strcmp(acpi_dev->pnp.unique_id, soc_table[i]->uid)) {
- soc_data = soc_table[i];
- break;
- }
- }
+ int ret;
- if (!soc_data)
- return -ENODEV;
+ soc_data = intel_pinctrl_get_soc_data(pdev);
+ if (IS_ERR(soc_data))
+ return PTR_ERR(soc_data);
vg = devm_kzalloc(dev, sizeof(*vg), GFP_KERNEL);
if (!vg)
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index 515f57a0d180..8078c7739d6a 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -30,12 +30,12 @@
.gpio_base = (g), \
}
-#define CNL_COMMUNITY(b, s, e, o, g) \
+#define CNL_COMMUNITY(b, s, e, ho, g) \
{ \
.barno = (b), \
.padown_offset = CNL_PAD_OWN, \
.padcfglock_offset = CNL_PADCFGLOCK, \
- .hostown_offset = (o), \
+ .hostown_offset = (ho), \
.is_offset = CNL_GPI_IS, \
.ie_offset = CNL_GPI_IE, \
.pin_base = (s), \
@@ -44,10 +44,10 @@
.ngpps = ARRAY_SIZE(g), \
}
-#define CNLLP_COMMUNITY(b, s, e, g) \
+#define CNL_LP_COMMUNITY(b, s, e, g) \
CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g)
-#define CNLH_COMMUNITY(b, s, e, g) \
+#define CNL_H_COMMUNITY(b, s, e, g) \
CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g)
/* Cannon Lake-H */
@@ -449,10 +449,10 @@ static const struct intel_function cnlh_functions[] = {
};
static const struct intel_community cnlh_communities[] = {
- CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
- CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
- CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
- CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
+ CNL_H_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
+ CNL_H_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
+ CNL_H_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
+ CNL_H_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
};
static const struct intel_pinctrl_soc_data cnlh_soc_data = {
@@ -810,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = {
};
static const struct intel_community cnllp_communities[] = {
- CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
- CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
- CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
+ CNL_LP_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
+ CNL_LP_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
+ CNL_LP_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
};
static const struct intel_pinctrl_soc_data cnllp_soc_data = {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 06521097513a..2ed17cdf946d 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -74,35 +74,11 @@ struct intel_pad_context {
};
/**
- * struct chv_pinctrl - CHV pinctrl private structure
- * @dev: Pointer to the parent device
- * @pctldesc: Pin controller description
- * @pctldev: Pointer to the pin controller device
- * @chip: GPIO chip in this pin controller
- * @irqchip: IRQ chip in this pin controller
- * @soc: Community specific pin configuration data
- * @communities: All communities in this pin controller
- * @ncommunities: Number of communities in this pin controller
- * @context: Configuration saved over system sleep
- * @irq: Our parent irq
+ * struct intel_community_context - community context for Cherryview
* @intr_lines: Mapping between 16 HW interrupt wires and GPIO offset (in GPIO number space)
* @saved_intmask: Interrupt mask saved for system sleep
- *
- * The first group in @groups is expected to contain all pins that can be
- * used as GPIOs.
*/
-struct chv_pinctrl {
- struct device *dev;
- struct pinctrl_desc pctldesc;
- struct pinctrl_dev *pctldev;
- struct gpio_chip chip;
- struct irq_chip irqchip;
- const struct intel_pinctrl_soc_data *soc;
- struct intel_community *communities;
- size_t ncommunities;
- struct intel_pinctrl_context context;
- int irq;
-
+struct intel_community_context {
unsigned int intr_lines[16];
u32 saved_intmask;
};
@@ -588,14 +564,14 @@ static const struct intel_pinctrl_soc_data *chv_soc_data[] = {
*/
static DEFINE_RAW_SPINLOCK(chv_lock);
-static u32 chv_pctrl_readl(struct chv_pinctrl *pctrl, unsigned int offset)
+static u32 chv_pctrl_readl(struct intel_pinctrl *pctrl, unsigned int offset)
{
const struct intel_community *community = &pctrl->communities[0];
return readl(community->regs + offset);
}
-static void chv_pctrl_writel(struct chv_pinctrl *pctrl, unsigned int offset, u32 value)
+static void chv_pctrl_writel(struct intel_pinctrl *pctrl, unsigned int offset, u32 value)
{
const struct intel_community *community = &pctrl->communities[0];
void __iomem *reg = community->regs + offset;
@@ -605,7 +581,7 @@ static void chv_pctrl_writel(struct chv_pinctrl *pctrl, unsigned int offset, u32
readl(reg);
}
-static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned int offset,
+static void __iomem *chv_padreg(struct intel_pinctrl *pctrl, unsigned int offset,
unsigned int reg)
{
const struct intel_community *community = &pctrl->communities[0];
@@ -617,12 +593,12 @@ static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned int offset,
return community->pad_regs + offset + reg;
}
-static u32 chv_readl(struct chv_pinctrl *pctrl, unsigned int pin, unsigned int offset)
+static u32 chv_readl(struct intel_pinctrl *pctrl, unsigned int pin, unsigned int offset)
{
return readl(chv_padreg(pctrl, pin, offset));
}
-static void chv_writel(struct chv_pinctrl *pctrl, unsigned int pin, unsigned int offset, u32 value)
+static void chv_writel(struct intel_pinctrl *pctrl, unsigned int pin, unsigned int offset, u32 value)
{
void __iomem *reg = chv_padreg(pctrl, pin, offset);
@@ -632,14 +608,14 @@ static void chv_writel(struct chv_pinctrl *pctrl, unsigned int pin, unsigned int
}
/* When Pad Cfg is locked, driver can only change GPIOTXState or GPIORXState */
-static bool chv_pad_locked(struct chv_pinctrl *pctrl, unsigned int offset)
+static bool chv_pad_locked(struct intel_pinctrl *pctrl, unsigned int offset)
{
return chv_readl(pctrl, offset, CHV_PADCTRL1) & CHV_PADCTRL1_CFGLOCK;
}
static int chv_get_groups_count(struct pinctrl_dev *pctldev)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->ngroups;
}
@@ -647,7 +623,7 @@ static int chv_get_groups_count(struct pinctrl_dev *pctldev)
static const char *chv_get_group_name(struct pinctrl_dev *pctldev,
unsigned int group)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->groups[group].name;
}
@@ -655,7 +631,7 @@ static const char *chv_get_group_name(struct pinctrl_dev *pctldev,
static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
const unsigned int **pins, unsigned int *npins)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
*pins = pctrl->soc->groups[group].pins;
*npins = pctrl->soc->groups[group].npins;
@@ -665,7 +641,7 @@ static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
unsigned int offset)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
unsigned long flags;
u32 ctrl0, ctrl1;
bool locked;
@@ -704,7 +680,7 @@ static const struct pinctrl_ops chv_pinctrl_ops = {
static int chv_get_functions_count(struct pinctrl_dev *pctldev)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->nfunctions;
}
@@ -712,7 +688,7 @@ static int chv_get_functions_count(struct pinctrl_dev *pctldev)
static const char *chv_get_function_name(struct pinctrl_dev *pctldev,
unsigned int function)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->functions[function].name;
}
@@ -722,7 +698,7 @@ static int chv_get_function_groups(struct pinctrl_dev *pctldev,
const char * const **groups,
unsigned int * const ngroups)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
*groups = pctrl->soc->functions[function].groups;
*ngroups = pctrl->soc->functions[function].ngroups;
@@ -732,7 +708,7 @@ static int chv_get_function_groups(struct pinctrl_dev *pctldev,
static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int function, unsigned int group)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct intel_pingroup *grp;
unsigned long flags;
int i;
@@ -790,7 +766,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
-static void chv_gpio_clear_triggering(struct chv_pinctrl *pctrl,
+static void chv_gpio_clear_triggering(struct intel_pinctrl *pctrl,
unsigned int offset)
{
u32 invrxtx_mask = CHV_PADCTRL1_INVRXTX_MASK;
@@ -816,7 +792,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int offset)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
unsigned long flags;
u32 value;
@@ -830,12 +806,13 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
return -EBUSY;
}
} else {
+ struct intel_community_context *cctx = &pctrl->context.communities[0];
int i;
/* Reset the interrupt mapping */
- for (i = 0; i < ARRAY_SIZE(pctrl->intr_lines); i++) {
- if (pctrl->intr_lines[i] == offset) {
- pctrl->intr_lines[i] = 0;
+ for (i = 0; i < ARRAY_SIZE(cctx->intr_lines); i++) {
+ if (cctx->intr_lines[i] == offset) {
+ cctx->intr_lines[i] = 0;
break;
}
}
@@ -869,7 +846,7 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int offset)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
unsigned long flags;
raw_spin_lock_irqsave(&chv_lock, flags);
@@ -884,7 +861,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned int offset, bool input)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
unsigned long flags;
u32 ctrl0;
@@ -915,7 +892,7 @@ static const struct pinmux_ops chv_pinmux_ops = {
static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *config)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
unsigned long flags;
u32 ctrl0, ctrl1;
@@ -992,7 +969,7 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
return 0;
}
-static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned int pin,
+static int chv_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
enum pin_config_param param, u32 arg)
{
unsigned long flags;
@@ -1057,7 +1034,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned int pin,
return 0;
}
-static int chv_config_set_oden(struct chv_pinctrl *pctrl, unsigned int pin,
+static int chv_config_set_oden(struct intel_pinctrl *pctrl, unsigned int pin,
bool enable)
{
unsigned long flags;
@@ -1080,7 +1057,7 @@ static int chv_config_set_oden(struct chv_pinctrl *pctrl, unsigned int pin,
static int chv_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int nconfigs)
{
- struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
int i, ret;
u32 arg;
@@ -1181,7 +1158,7 @@ static struct pinctrl_desc chv_pinctrl_desc = {
static int chv_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
- struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
unsigned long flags;
u32 ctrl0, cfg;
@@ -1199,7 +1176,7 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned int offset)
static void chv_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
- struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
unsigned long flags;
u32 ctrl0;
@@ -1219,7 +1196,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
- struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
u32 ctrl0, direction;
unsigned long flags;
@@ -1262,7 +1239,7 @@ static const struct gpio_chip chv_gpio_chip = {
static void chv_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
int pin = irqd_to_hwirq(d);
u32 intr_line;
@@ -1279,7 +1256,7 @@ static void chv_gpio_irq_ack(struct irq_data *d)
static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
int pin = irqd_to_hwirq(d);
u32 value, intr_line;
unsigned long flags;
@@ -1324,7 +1301,8 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
*/
if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct intel_community_context *cctx = &pctrl->context.communities[0];
unsigned int pin = irqd_to_hwirq(d);
irq_flow_handler_t handler;
unsigned long flags;
@@ -1341,9 +1319,9 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
else
handler = handle_edge_irq;
- if (!pctrl->intr_lines[intsel]) {
+ if (!cctx->intr_lines[intsel]) {
irq_set_handler_locked(d, handler);
- pctrl->intr_lines[intsel] = pin;
+ cctx->intr_lines[intsel] = pin;
}
raw_spin_unlock_irqrestore(&chv_lock, flags);
}
@@ -1355,7 +1333,8 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct intel_community_context *cctx = &pctrl->context.communities[0];
unsigned int pin = irqd_to_hwirq(d);
unsigned long flags;
u32 value;
@@ -1400,7 +1379,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
value &= CHV_PADCTRL0_INTSEL_MASK;
value >>= CHV_PADCTRL0_INTSEL_SHIFT;
- pctrl->intr_lines[value] = pin;
+ cctx->intr_lines[value] = pin;
if (type & IRQ_TYPE_EDGE_BOTH)
irq_set_handler_locked(d, handle_edge_irq);
@@ -1415,8 +1394,9 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
static void chv_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
const struct intel_community *community = &pctrl->communities[0];
+ struct intel_community_context *cctx = &pctrl->context.communities[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long pending;
unsigned long flags;
@@ -1431,7 +1411,7 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(intr_line, &pending, community->nirqs) {
unsigned int irq, offset;
- offset = pctrl->intr_lines[intr_line];
+ offset = cctx->intr_lines[intr_line];
irq = irq_find_mapping(gc->irq.domain, offset);
generic_handle_irq(irq);
}
@@ -1484,7 +1464,7 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip,
unsigned long *valid_mask,
unsigned int ngpios)
{
- struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
const struct intel_community *community = &pctrl->communities[0];
int i;
@@ -1506,7 +1486,7 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip,
static int chv_gpio_irq_init_hw(struct gpio_chip *chip)
{
- struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
const struct intel_community *community = &pctrl->communities[0];
/*
@@ -1532,7 +1512,7 @@ static int chv_gpio_irq_init_hw(struct gpio_chip *chip)
static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
{
- struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
const struct intel_community *community = &pctrl->communities[0];
const struct intel_padgroup *gpp;
int ret, i;
@@ -1551,7 +1531,7 @@ static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
return 0;
}
-static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
+static int chv_gpio_probe(struct intel_pinctrl *pctrl, int irq)
{
const struct intel_community *community = &pctrl->communities[0];
const struct intel_padgroup *gpp;
@@ -1617,7 +1597,7 @@ static acpi_status chv_pinctrl_mmio_access_handler(u32 function,
acpi_physical_address address, u32 bits, u64 *value,
void *handler_context, void *region_context)
{
- struct chv_pinctrl *pctrl = region_context;
+ struct intel_pinctrl *pctrl = region_context;
unsigned long flags;
acpi_status ret = AE_OK;
@@ -1637,34 +1617,23 @@ static acpi_status chv_pinctrl_mmio_access_handler(u32 function,
static int chv_pinctrl_probe(struct platform_device *pdev)
{
- const struct intel_pinctrl_soc_data *soc_data = NULL;
- const struct intel_pinctrl_soc_data **soc_table;
+ const struct intel_pinctrl_soc_data *soc_data;
struct intel_community *community;
struct device *dev = &pdev->dev;
- struct chv_pinctrl *pctrl;
- struct acpi_device *adev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct intel_pinctrl *pctrl;
acpi_status status;
- int ret, irq, i;
+ int ret, irq;
- adev = ACPI_COMPANION(&pdev->dev);
- if (!adev)
- return -ENODEV;
-
- soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(dev);
- for (i = 0; soc_table[i]; i++) {
- if (!strcmp(adev->pnp.unique_id, soc_table[i]->uid)) {
- soc_data = soc_table[i];
- break;
- }
- }
- if (!soc_data)
- return -ENODEV;
+ soc_data = intel_pinctrl_get_soc_data(pdev);
+ if (IS_ERR(soc_data))
+ return PTR_ERR(soc_data);
pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
- pctrl->dev = &pdev->dev;
+ pctrl->dev = dev;
pctrl->soc = soc_data;
pctrl->ncommunities = pctrl->soc->ncommunities;
@@ -1689,19 +1658,24 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
return -ENOMEM;
#endif
+ pctrl->context.communities = devm_kcalloc(dev, pctrl->soc->ncommunities,
+ sizeof(*pctrl->context.communities),
+ GFP_KERNEL);
+ if (!pctrl->context.communities)
+ return -ENOMEM;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
pctrl->pctldesc = chv_pinctrl_desc;
- pctrl->pctldesc.name = dev_name(&pdev->dev);
+ pctrl->pctldesc.name = dev_name(dev);
pctrl->pctldesc.pins = pctrl->soc->pins;
pctrl->pctldesc.npins = pctrl->soc->npins;
- pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc,
- pctrl);
+ pctrl->pctldev = devm_pinctrl_register(dev, &pctrl->pctldesc, pctrl);
if (IS_ERR(pctrl->pctldev)) {
- dev_err(&pdev->dev, "failed to register pinctrl driver\n");
+ dev_err(dev, "failed to register pinctrl driver\n");
return PTR_ERR(pctrl->pctldev);
}
@@ -1714,7 +1688,7 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
chv_pinctrl_mmio_access_handler,
NULL, pctrl);
if (ACPI_FAILURE(status))
- dev_err(&pdev->dev, "failed to install ACPI addr space handler\n");
+ dev_err(dev, "failed to install ACPI addr space handler\n");
platform_set_drvdata(pdev, pctrl);
@@ -1723,7 +1697,7 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
static int chv_pinctrl_remove(struct platform_device *pdev)
{
- struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
+ struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
const struct intel_community *community = &pctrl->communities[0];
acpi_remove_address_space_handler(ACPI_COMPANION(&pdev->dev),
@@ -1736,13 +1710,14 @@ static int chv_pinctrl_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int chv_pinctrl_suspend_noirq(struct device *dev)
{
- struct chv_pinctrl *pctrl = dev_get_drvdata(dev);
+ struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
+ struct intel_community_context *cctx = &pctrl->context.communities[0];
unsigned long flags;
int i;
raw_spin_lock_irqsave(&chv_lock, flags);
- pctrl->saved_intmask = chv_pctrl_readl(pctrl, CHV_INTMASK);
+ cctx->saved_intmask = chv_pctrl_readl(pctrl, CHV_INTMASK);
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc;
@@ -1765,7 +1740,8 @@ static int chv_pinctrl_suspend_noirq(struct device *dev)
static int chv_pinctrl_resume_noirq(struct device *dev)
{
- struct chv_pinctrl *pctrl = dev_get_drvdata(dev);
+ struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
+ struct intel_community_context *cctx = &pctrl->context.communities[0];
unsigned long flags;
int i;
@@ -1809,7 +1785,7 @@ static int chv_pinctrl_resume_noirq(struct device *dev)
* the interrupt mask register as well.
*/
chv_pctrl_writel(pctrl, CHV_INTSTAT, 0xffff);
- chv_pctrl_writel(pctrl, CHV_INTMASK, pctrl->saved_intmask);
+ chv_pctrl_writel(pctrl, CHV_INTMASK, cctx->saved_intmask);
raw_spin_unlock_irqrestore(&chv_lock, flags);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index b64997b303e0..1c10ab184783 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -62,10 +62,10 @@
#define PADCFG1_TERM_UP BIT(13)
#define PADCFG1_TERM_SHIFT 10
#define PADCFG1_TERM_MASK GENMASK(12, 10)
-#define PADCFG1_TERM_20K 4
-#define PADCFG1_TERM_2K 3
-#define PADCFG1_TERM_5K 2
-#define PADCFG1_TERM_1K 1
+#define PADCFG1_TERM_20K BIT(2)
+#define PADCFG1_TERM_5K BIT(1)
+#define PADCFG1_TERM_1K BIT(0)
+#define PADCFG1_TERM_833 (BIT(1) | BIT(0))
#define PADCFG2 0x008
#define PADCFG2_DEBEN BIT(0)
@@ -549,12 +549,12 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
return -EINVAL;
switch (term) {
+ case PADCFG1_TERM_833:
+ *arg = 833;
+ break;
case PADCFG1_TERM_1K:
*arg = 1000;
break;
- case PADCFG1_TERM_2K:
- *arg = 2000;
- break;
case PADCFG1_TERM_5K:
*arg = 5000;
break;
@@ -570,6 +570,11 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
return -EINVAL;
switch (term) {
+ case PADCFG1_TERM_833:
+ if (!(community->features & PINCTRL_FEATURE_1K_PD))
+ return -EINVAL;
+ *arg = 833;
+ break;
case PADCFG1_TERM_1K:
if (!(community->features & PINCTRL_FEATURE_1K_PD))
return -EINVAL;
@@ -678,6 +683,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
value |= PADCFG1_TERM_UP;
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 5000;
+
switch (arg) {
case 20000:
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
@@ -685,12 +694,12 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
case 5000:
value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
break;
- case 2000:
- value |= PADCFG1_TERM_2K << PADCFG1_TERM_SHIFT;
- break;
case 1000:
value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
break;
+ case 833:
+ value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
+ break;
default:
ret = -EINVAL;
}
@@ -700,6 +709,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_DOWN:
value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK);
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 5000;
+
switch (arg) {
case 20000:
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
@@ -714,6 +727,13 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
}
value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
break;
+ case 833:
+ if (!(community->features & PINCTRL_FEATURE_1K_PD)) {
+ ret = -EINVAL;
+ break;
+ }
+ value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
+ break;
default:
ret = -EINVAL;
}
@@ -1414,9 +1434,6 @@ static int intel_pinctrl_probe(struct platform_device *pdev,
struct intel_pinctrl *pctrl;
int i, ret, irq;
- if (!soc_data)
- return -EINVAL;
-
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
@@ -1505,12 +1522,27 @@ int intel_pinctrl_probe_by_hid(struct platform_device *pdev)
const struct intel_pinctrl_soc_data *data;
data = device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODATA;
+
return intel_pinctrl_probe(pdev, data);
}
EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_hid);
int intel_pinctrl_probe_by_uid(struct platform_device *pdev)
{
+ const struct intel_pinctrl_soc_data *data;
+
+ data = intel_pinctrl_get_soc_data(pdev);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return intel_pinctrl_probe(pdev, data);
+}
+EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_uid);
+
+const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev)
+{
const struct intel_pinctrl_soc_data *data = NULL;
const struct intel_pinctrl_soc_data **table;
struct acpi_device *adev;
@@ -1532,15 +1564,15 @@ int intel_pinctrl_probe_by_uid(struct platform_device *pdev)
id = platform_get_device_id(pdev);
if (!id)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
table = (const struct intel_pinctrl_soc_data **)id->driver_data;
data = table[pdev->id];
}
- return intel_pinctrl_probe(pdev, data);
+ return data ?: ERR_PTR(-ENODATA);
}
-EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_uid);
+EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data);
#ifdef CONFIG_PM_SLEEP
static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 4e17308d33e9..ad34b7a3f6ed 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -10,12 +10,15 @@
#ifndef PINCTRL_INTEL_H
#define PINCTRL_INTEL_H
+#include <linux/bits.h>
+#include <linux/compiler_types.h>
#include <linux/gpio/driver.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/pm.h>
+#include <linux/pinctrl/pinctrl.h>
#include <linux/spinlock_types.h>
-struct pinctrl_pin_desc;
struct platform_device;
struct device;
@@ -194,6 +197,8 @@ struct intel_pinctrl_soc_data {
size_t ncommunities;
};
+const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev);
+
struct intel_pad_context;
struct intel_community_context;
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 4d7a86a5a37b..14eac924d43d 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -22,21 +22,26 @@
#define SPT_GPI_IS 0x100
#define SPT_GPI_IE 0x120
-#define SPT_COMMUNITY(b, s, e) \
+#define SPT_COMMUNITY(b, s, e, pl, gs, gn, g, n) \
{ \
.barno = (b), \
.padown_offset = SPT_PAD_OWN, \
- .padcfglock_offset = SPT_LP_PADCFGLOCK, \
+ .padcfglock_offset = (pl), \
.hostown_offset = SPT_HOSTSW_OWN, \
.is_offset = SPT_GPI_IS, \
.ie_offset = SPT_GPI_IE, \
- .gpp_size = 24, \
- .gpp_num_padown_regs = 4, \
+ .gpp_size = (gs), \
+ .gpp_num_padown_regs = (gn), \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = (n), \
}
-#define SPTH_GPP(r, s, e, g) \
+#define SPT_LP_COMMUNITY(b, s, e) \
+ SPT_COMMUNITY(b, s, e, SPT_LP_PADCFGLOCK, 24, 4, NULL, 0)
+
+#define SPT_H_GPP(r, s, e, g) \
{ \
.reg_num = (r), \
.base = (s), \
@@ -44,19 +49,8 @@
.gpio_base = (g), \
}
-#define SPTH_COMMUNITY(b, s, e, g) \
- { \
- .barno = (b), \
- .padown_offset = SPT_PAD_OWN, \
- .padcfglock_offset = SPT_H_PADCFGLOCK, \
- .hostown_offset = SPT_HOSTSW_OWN, \
- .is_offset = SPT_GPI_IS, \
- .ie_offset = SPT_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
+#define SPT_H_COMMUNITY(b, s, e, g) \
+ SPT_COMMUNITY(b, s, e, SPT_H_PADCFGLOCK, 0, 0, g, ARRAY_SIZE(g))
/* Sunrisepoint-LP */
static const struct pinctrl_pin_desc sptlp_pins[] = {
@@ -292,9 +286,9 @@ static const struct intel_function sptlp_functions[] = {
};
static const struct intel_community sptlp_communities[] = {
- SPT_COMMUNITY(0, 0, 47),
- SPT_COMMUNITY(1, 48, 119),
- SPT_COMMUNITY(2, 120, 151),
+ SPT_LP_COMMUNITY(0, 0, 47),
+ SPT_LP_COMMUNITY(1, 48, 119),
+ SPT_LP_COMMUNITY(2, 120, 151),
};
static const struct intel_pinctrl_soc_data sptlp_soc_data = {
@@ -554,27 +548,27 @@ static const struct intel_function spth_functions[] = {
};
static const struct intel_padgroup spth_community0_gpps[] = {
- SPTH_GPP(0, 0, 23, 0), /* GPP_A */
- SPTH_GPP(1, 24, 47, 24), /* GPP_B */
+ SPT_H_GPP(0, 0, 23, 0), /* GPP_A */
+ SPT_H_GPP(1, 24, 47, 24), /* GPP_B */
};
static const struct intel_padgroup spth_community1_gpps[] = {
- SPTH_GPP(0, 48, 71, 48), /* GPP_C */
- SPTH_GPP(1, 72, 95, 72), /* GPP_D */
- SPTH_GPP(2, 96, 108, 96), /* GPP_E */
- SPTH_GPP(3, 109, 132, 120), /* GPP_F */
- SPTH_GPP(4, 133, 156, 144), /* GPP_G */
- SPTH_GPP(5, 157, 180, 168), /* GPP_H */
+ SPT_H_GPP(0, 48, 71, 48), /* GPP_C */
+ SPT_H_GPP(1, 72, 95, 72), /* GPP_D */
+ SPT_H_GPP(2, 96, 108, 96), /* GPP_E */
+ SPT_H_GPP(3, 109, 132, 120), /* GPP_F */
+ SPT_H_GPP(4, 133, 156, 144), /* GPP_G */
+ SPT_H_GPP(5, 157, 180, 168), /* GPP_H */
};
static const struct intel_padgroup spth_community3_gpps[] = {
- SPTH_GPP(0, 181, 191, 192), /* GPP_I */
+ SPT_H_GPP(0, 181, 191, 192), /* GPP_I */
};
static const struct intel_community spth_communities[] = {
- SPTH_COMMUNITY(0, 0, 47, spth_community0_gpps),
- SPTH_COMMUNITY(1, 48, 180, spth_community1_gpps),
- SPTH_COMMUNITY(2, 181, 191, spth_community3_gpps),
+ SPT_H_COMMUNITY(0, 0, 47, spth_community0_gpps),
+ SPT_H_COMMUNITY(1, 48, 180, spth_community1_gpps),
+ SPT_H_COMMUNITY(2, 181, 191, spth_community3_gpps),
};
static const struct intel_pinctrl_soc_data spth_soc_data = {
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 8c162dd5f5a1..3e354e02f408 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -15,11 +15,13 @@
#include "pinctrl-intel.h"
-#define TGL_PAD_OWN 0x020
-#define TGL_PADCFGLOCK 0x080
-#define TGL_HOSTSW_OWN 0x0b0
-#define TGL_GPI_IS 0x100
-#define TGL_GPI_IE 0x120
+#define TGL_PAD_OWN 0x020
+#define TGL_LP_PADCFGLOCK 0x080
+#define TGL_H_PADCFGLOCK 0x090
+#define TGL_LP_HOSTSW_OWN 0x0b0
+#define TGL_H_HOSTSW_OWN 0x0c0
+#define TGL_GPI_IS 0x100
+#define TGL_GPI_IE 0x120
#define TGL_GPP(r, s, e, g) \
{ \
@@ -29,12 +31,12 @@
.gpio_base = (g), \
}
-#define TGL_COMMUNITY(b, s, e, g) \
+#define TGL_COMMUNITY(b, s, e, pl, ho, g) \
{ \
.barno = (b), \
.padown_offset = TGL_PAD_OWN, \
- .padcfglock_offset = TGL_PADCFGLOCK, \
- .hostown_offset = TGL_HOSTSW_OWN, \
+ .padcfglock_offset = (pl), \
+ .hostown_offset = (ho), \
.is_offset = TGL_GPI_IS, \
.ie_offset = TGL_GPI_IE, \
.pin_base = (s), \
@@ -43,6 +45,12 @@
.ngpps = ARRAY_SIZE(g), \
}
+#define TGL_LP_COMMUNITY(b, s, e, g) \
+ TGL_COMMUNITY(b, s, e, TGL_LP_PADCFGLOCK, TGL_LP_HOSTSW_OWN, g)
+
+#define TGL_H_COMMUNITY(b, s, e, g) \
+ TGL_COMMUNITY(b, s, e, TGL_H_PADCFGLOCK, TGL_H_HOSTSW_OWN, g)
+
/* Tiger Lake-LP */
static const struct pinctrl_pin_desc tgllp_pins[] = {
/* GPP_B */
@@ -367,10 +375,10 @@ static const struct intel_padgroup tgllp_community5_gpps[] = {
};
static const struct intel_community tgllp_communities[] = {
- TGL_COMMUNITY(0, 0, 66, tgllp_community0_gpps),
- TGL_COMMUNITY(1, 67, 170, tgllp_community1_gpps),
- TGL_COMMUNITY(2, 171, 259, tgllp_community4_gpps),
- TGL_COMMUNITY(3, 260, 276, tgllp_community5_gpps),
+ TGL_LP_COMMUNITY(0, 0, 66, tgllp_community0_gpps),
+ TGL_LP_COMMUNITY(1, 67, 170, tgllp_community1_gpps),
+ TGL_LP_COMMUNITY(2, 171, 259, tgllp_community4_gpps),
+ TGL_LP_COMMUNITY(3, 260, 276, tgllp_community5_gpps),
};
static const struct intel_pinctrl_soc_data tgllp_soc_data = {
@@ -723,11 +731,11 @@ static const struct intel_padgroup tglh_community5_gpps[] = {
};
static const struct intel_community tglh_communities[] = {
- TGL_COMMUNITY(0, 0, 78, tglh_community0_gpps),
- TGL_COMMUNITY(1, 79, 180, tglh_community1_gpps),
- TGL_COMMUNITY(2, 181, 217, tglh_community3_gpps),
- TGL_COMMUNITY(3, 218, 266, tglh_community4_gpps),
- TGL_COMMUNITY(4, 267, 290, tglh_community5_gpps),
+ TGL_H_COMMUNITY(0, 0, 78, tglh_community0_gpps),
+ TGL_H_COMMUNITY(1, 79, 180, tglh_community1_gpps),
+ TGL_H_COMMUNITY(2, 181, 217, tglh_community3_gpps),
+ TGL_H_COMMUNITY(3, 218, 266, tglh_community4_gpps),
+ TGL_H_COMMUNITY(4, 267, 290, tglh_community5_gpps),
};
static const struct intel_pinctrl_soc_data tglh_soc_data = {
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 1cedc5f2aadb..eef17f228669 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -119,6 +119,13 @@ config PINCTRL_MT7622
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_MOORE
+config PINCTRL_MT8167
+ bool "Mediatek MT8167 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK
+
config PINCTRL_MT8173
bool "Mediatek MT8173 pin control"
depends on OF
@@ -133,6 +140,13 @@ config PINCTRL_MT8183
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_PARIS
+config PINCTRL_MT8192
+ bool "Mediatek MT8192 pin control"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_PARIS
+
config PINCTRL_MT8516
bool "Mediatek MT8516 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index b0b07c541d11..01218bf4dc30 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -17,7 +17,9 @@ obj-$(CONFIG_PINCTRL_MT6797) += pinctrl-mt6797.o
obj-$(CONFIG_PINCTRL_MT7622) += pinctrl-mt7622.o
obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
obj-$(CONFIG_PINCTRL_MT7629) += pinctrl-mt7629.o
+obj-$(CONFIG_PINCTRL_MT8167) += pinctrl-mt8167.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
+obj-$(CONFIG_PINCTRL_MT8192) += pinctrl-mt8192.o
obj-$(CONFIG_PINCTRL_MT8516) += pinctrl-mt8516.o
obj-$(CONFIG_PINCTRL_MT6397) += pinctrl-mt6397.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index aa1068d2867f..5e00f93ac998 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -589,7 +589,6 @@ int mtk_moore_pinctrl_probe(struct platform_device *pdev,
const struct mtk_pin_soc *soc)
{
struct pinctrl_pin_desc *pins;
- struct resource *res;
struct mtk_pinctrl *hw;
int err, i;
@@ -612,14 +611,8 @@ int mtk_moore_pinctrl_probe(struct platform_device *pdev,
return -ENOMEM;
for (i = 0; i < hw->soc->nbase_names; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- hw->soc->base_names[i]);
- if (!res) {
- dev_err(&pdev->dev, "missing IO resource\n");
- return -ENXIO;
- }
-
- hw->base[i] = devm_ioremap_resource(&pdev->dev, res);
+ hw->base[i] = devm_platform_ioremap_resource_byname(pdev,
+ hw->soc->base_names[i]);
if (IS_ERR(hw->base[i]))
return PTR_ERR(hw->base[i]);
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index ce4a8a0cc19c..38c5e166fd0f 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -263,6 +263,68 @@ static const struct mtk_pin_desc mt7622_pins[] = {
* hardware probably has multiple combinations of these pinouts.
*/
+/* ANTSEL */
+static int mt7622_antsel0_pins[] = { 91, };
+static int mt7622_antsel0_funcs[] = { 5, };
+static int mt7622_antsel1_pins[] = { 92, };
+static int mt7622_antsel1_funcs[] = { 5, };
+static int mt7622_antsel2_pins[] = { 93, };
+static int mt7622_antsel2_funcs[] = { 5, };
+static int mt7622_antsel3_pins[] = { 94, };
+static int mt7622_antsel3_funcs[] = { 5, };
+static int mt7622_antsel4_pins[] = { 95, };
+static int mt7622_antsel4_funcs[] = { 5, };
+static int mt7622_antsel5_pins[] = { 96, };
+static int mt7622_antsel5_funcs[] = { 5, };
+static int mt7622_antsel6_pins[] = { 97, };
+static int mt7622_antsel6_funcs[] = { 5, };
+static int mt7622_antsel7_pins[] = { 98, };
+static int mt7622_antsel7_funcs[] = { 5, };
+static int mt7622_antsel8_pins[] = { 99, };
+static int mt7622_antsel8_funcs[] = { 5, };
+static int mt7622_antsel9_pins[] = { 100, };
+static int mt7622_antsel9_funcs[] = { 5, };
+static int mt7622_antsel10_pins[] = { 101, };
+static int mt7622_antsel10_funcs[] = { 5, };
+static int mt7622_antsel11_pins[] = { 102, };
+static int mt7622_antsel11_funcs[] = { 5, };
+static int mt7622_antsel12_pins[] = { 73, };
+static int mt7622_antsel12_funcs[] = { 5, };
+static int mt7622_antsel13_pins[] = { 74, };
+static int mt7622_antsel13_funcs[] = { 5, };
+static int mt7622_antsel14_pins[] = { 75, };
+static int mt7622_antsel14_funcs[] = { 5, };
+static int mt7622_antsel15_pins[] = { 76, };
+static int mt7622_antsel15_funcs[] = { 5, };
+static int mt7622_antsel16_pins[] = { 77, };
+static int mt7622_antsel16_funcs[] = { 5, };
+static int mt7622_antsel17_pins[] = { 22, };
+static int mt7622_antsel17_funcs[] = { 5, };
+static int mt7622_antsel18_pins[] = { 79, };
+static int mt7622_antsel18_funcs[] = { 5, };
+static int mt7622_antsel19_pins[] = { 80, };
+static int mt7622_antsel19_funcs[] = { 5, };
+static int mt7622_antsel20_pins[] = { 81, };
+static int mt7622_antsel20_funcs[] = { 5, };
+static int mt7622_antsel21_pins[] = { 82, };
+static int mt7622_antsel21_funcs[] = { 5, };
+static int mt7622_antsel22_pins[] = { 14, };
+static int mt7622_antsel22_funcs[] = { 5, };
+static int mt7622_antsel23_pins[] = { 15, };
+static int mt7622_antsel23_funcs[] = { 5, };
+static int mt7622_antsel24_pins[] = { 16, };
+static int mt7622_antsel24_funcs[] = { 5, };
+static int mt7622_antsel25_pins[] = { 17, };
+static int mt7622_antsel25_funcs[] = { 5, };
+static int mt7622_antsel26_pins[] = { 18, };
+static int mt7622_antsel26_funcs[] = { 5, };
+static int mt7622_antsel27_pins[] = { 19, };
+static int mt7622_antsel27_funcs[] = { 5, };
+static int mt7622_antsel28_pins[] = { 20, };
+static int mt7622_antsel28_funcs[] = { 5, };
+static int mt7622_antsel29_pins[] = { 21, };
+static int mt7622_antsel29_funcs[] = { 5, };
+
/* EMMC */
static int mt7622_emmc_pins[] = { 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, };
static int mt7622_emmc_funcs[] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, };
@@ -543,6 +605,36 @@ static int mt7622_wled_pins[] = { 85, };
static int mt7622_wled_funcs[] = { 0, };
static const struct group_desc mt7622_groups[] = {
+ PINCTRL_PIN_GROUP("antsel0", mt7622_antsel0),
+ PINCTRL_PIN_GROUP("antsel1", mt7622_antsel1),
+ PINCTRL_PIN_GROUP("antsel2", mt7622_antsel2),
+ PINCTRL_PIN_GROUP("antsel3", mt7622_antsel3),
+ PINCTRL_PIN_GROUP("antsel4", mt7622_antsel4),
+ PINCTRL_PIN_GROUP("antsel5", mt7622_antsel5),
+ PINCTRL_PIN_GROUP("antsel6", mt7622_antsel6),
+ PINCTRL_PIN_GROUP("antsel7", mt7622_antsel7),
+ PINCTRL_PIN_GROUP("antsel8", mt7622_antsel8),
+ PINCTRL_PIN_GROUP("antsel9", mt7622_antsel9),
+ PINCTRL_PIN_GROUP("antsel10", mt7622_antsel10),
+ PINCTRL_PIN_GROUP("antsel11", mt7622_antsel11),
+ PINCTRL_PIN_GROUP("antsel12", mt7622_antsel12),
+ PINCTRL_PIN_GROUP("antsel13", mt7622_antsel13),
+ PINCTRL_PIN_GROUP("antsel14", mt7622_antsel14),
+ PINCTRL_PIN_GROUP("antsel15", mt7622_antsel15),
+ PINCTRL_PIN_GROUP("antsel16", mt7622_antsel16),
+ PINCTRL_PIN_GROUP("antsel17", mt7622_antsel17),
+ PINCTRL_PIN_GROUP("antsel18", mt7622_antsel18),
+ PINCTRL_PIN_GROUP("antsel19", mt7622_antsel19),
+ PINCTRL_PIN_GROUP("antsel20", mt7622_antsel20),
+ PINCTRL_PIN_GROUP("antsel21", mt7622_antsel21),
+ PINCTRL_PIN_GROUP("antsel22", mt7622_antsel22),
+ PINCTRL_PIN_GROUP("antsel23", mt7622_antsel23),
+ PINCTRL_PIN_GROUP("antsel24", mt7622_antsel24),
+ PINCTRL_PIN_GROUP("antsel25", mt7622_antsel25),
+ PINCTRL_PIN_GROUP("antsel26", mt7622_antsel26),
+ PINCTRL_PIN_GROUP("antsel27", mt7622_antsel27),
+ PINCTRL_PIN_GROUP("antsel28", mt7622_antsel28),
+ PINCTRL_PIN_GROUP("antsel29", mt7622_antsel29),
PINCTRL_PIN_GROUP("emmc", mt7622_emmc),
PINCTRL_PIN_GROUP("emmc_rst", mt7622_emmc_rst),
PINCTRL_PIN_GROUP("ephy_leds", mt7622_ephy_leds),
@@ -663,6 +755,16 @@ static const struct group_desc mt7622_groups[] = {
/* Joint those groups owning the same capability in user point of view which
* allows that people tend to use through the device tree.
*/
+static const char *mt7622_antsel_groups[] = { "antsel0", "antsel1", "antsel2",
+ "antsel3", "antsel4", "antsel5",
+ "antsel6", "antsel7", "antsel8",
+ "antsel9", "antsel10", "antsel11",
+ "antsel12", "antsel13", "antsel14",
+ "antsel15", "antsel16", "antsel17",
+ "antsel18", "antsel19", "antsel20",
+ "antsel21", "antsel22", "antsel23",
+ "antsel24", "antsel25", "antsel26",
+ "antsel27", "antsel28", "antsel29",};
static const char *mt7622_emmc_groups[] = { "emmc", "emmc_rst", };
static const char *mt7622_ethernet_groups[] = { "esw", "esw_p0_p1",
"esw_p2_p3_p4", "mdc_mdio",
@@ -732,6 +834,7 @@ static const char *mt7622_uart_groups[] = { "uart0_0_tx_rx",
static const char *mt7622_wdt_groups[] = { "watchdog", };
static const struct function_desc mt7622_functions[] = {
+ {"antsel", mt7622_antsel_groups, ARRAY_SIZE(mt7622_antsel_groups)},
{"emmc", mt7622_emmc_groups, ARRAY_SIZE(mt7622_emmc_groups)},
{"eth", mt7622_ethernet_groups, ARRAY_SIZE(mt7622_ethernet_groups)},
{"i2c", mt7622_i2c_groups, ARRAY_SIZE(mt7622_i2c_groups)},
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8167.c b/drivers/pinctrl/mediatek/pinctrl-mt8167.c
new file mode 100644
index 000000000000..7b68886bad16
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8167.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Min.Guo <min.guo@mediatek.com>
+ */
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "pinctrl-mtk-common.h"
+#include "pinctrl-mtk-mt8167.h"
+
+static const struct mtk_drv_group_desc mt8167_drv_grp[] = {
+ /* 0E4E8SR 4/8/12/16 */
+ MTK_DRV_GRP(4, 16, 1, 2, 4),
+ /* 0E2E4SR 2/4/6/8 */
+ MTK_DRV_GRP(2, 8, 1, 2, 2),
+ /* E8E4E2 2/4/6/8/10/12/14/16 */
+ MTK_DRV_GRP(2, 16, 0, 2, 2)
+};
+
+static const struct mtk_pin_drv_grp mt8167_pin_drv[] = {
+ MTK_PIN_DRV_GRP(0, 0xd00, 0, 0),
+ MTK_PIN_DRV_GRP(1, 0xd00, 0, 0),
+ MTK_PIN_DRV_GRP(2, 0xd00, 0, 0),
+ MTK_PIN_DRV_GRP(3, 0xd00, 0, 0),
+ MTK_PIN_DRV_GRP(4, 0xd00, 0, 0),
+
+ MTK_PIN_DRV_GRP(5, 0xd00, 4, 0),
+ MTK_PIN_DRV_GRP(6, 0xd00, 4, 0),
+ MTK_PIN_DRV_GRP(7, 0xd00, 4, 0),
+ MTK_PIN_DRV_GRP(8, 0xd00, 4, 0),
+ MTK_PIN_DRV_GRP(9, 0xd00, 4, 0),
+ MTK_PIN_DRV_GRP(10, 0xd00, 4, 0),
+
+ MTK_PIN_DRV_GRP(11, 0xd00, 8, 0),
+ MTK_PIN_DRV_GRP(12, 0xd00, 8, 0),
+ MTK_PIN_DRV_GRP(13, 0xd00, 8, 0),
+
+ MTK_PIN_DRV_GRP(14, 0xd00, 12, 2),
+ MTK_PIN_DRV_GRP(15, 0xd00, 12, 2),
+ MTK_PIN_DRV_GRP(16, 0xd00, 12, 2),
+ MTK_PIN_DRV_GRP(17, 0xd00, 12, 2),
+
+ MTK_PIN_DRV_GRP(18, 0xd10, 0, 0),
+ MTK_PIN_DRV_GRP(19, 0xd10, 0, 0),
+ MTK_PIN_DRV_GRP(20, 0xd10, 0, 0),
+
+ MTK_PIN_DRV_GRP(21, 0xd00, 12, 2),
+ MTK_PIN_DRV_GRP(22, 0xd00, 12, 2),
+ MTK_PIN_DRV_GRP(23, 0xd00, 12, 2),
+
+ MTK_PIN_DRV_GRP(24, 0xd00, 8, 0),
+ MTK_PIN_DRV_GRP(25, 0xd00, 8, 0),
+
+ MTK_PIN_DRV_GRP(26, 0xd10, 4, 1),
+ MTK_PIN_DRV_GRP(27, 0xd10, 4, 1),
+ MTK_PIN_DRV_GRP(28, 0xd10, 4, 1),
+ MTK_PIN_DRV_GRP(29, 0xd10, 4, 1),
+ MTK_PIN_DRV_GRP(30, 0xd10, 4, 1),
+
+ MTK_PIN_DRV_GRP(31, 0xd10, 8, 1),
+ MTK_PIN_DRV_GRP(32, 0xd10, 8, 1),
+ MTK_PIN_DRV_GRP(33, 0xd10, 8, 1),
+
+ MTK_PIN_DRV_GRP(34, 0xd10, 12, 0),
+ MTK_PIN_DRV_GRP(35, 0xd10, 12, 0),
+
+ MTK_PIN_DRV_GRP(36, 0xd20, 0, 0),
+ MTK_PIN_DRV_GRP(37, 0xd20, 0, 0),
+ MTK_PIN_DRV_GRP(38, 0xd20, 0, 0),
+ MTK_PIN_DRV_GRP(39, 0xd20, 0, 0),
+
+ MTK_PIN_DRV_GRP(40, 0xd20, 4, 1),
+
+ MTK_PIN_DRV_GRP(41, 0xd20, 8, 1),
+ MTK_PIN_DRV_GRP(42, 0xd20, 8, 1),
+ MTK_PIN_DRV_GRP(43, 0xd20, 8, 1),
+
+ MTK_PIN_DRV_GRP(44, 0xd20, 12, 1),
+ MTK_PIN_DRV_GRP(45, 0xd20, 12, 1),
+ MTK_PIN_DRV_GRP(46, 0xd20, 12, 1),
+ MTK_PIN_DRV_GRP(47, 0xd20, 12, 1),
+
+ MTK_PIN_DRV_GRP(48, 0xd30, 0, 1),
+ MTK_PIN_DRV_GRP(49, 0xd30, 0, 1),
+ MTK_PIN_DRV_GRP(50, 0xd30, 0, 1),
+ MTK_PIN_DRV_GRP(51, 0xd30, 0, 1),
+
+ MTK_PIN_DRV_GRP(54, 0xd30, 8, 1),
+
+ MTK_PIN_DRV_GRP(55, 0xd30, 12, 1),
+ MTK_PIN_DRV_GRP(56, 0xd30, 12, 1),
+ MTK_PIN_DRV_GRP(57, 0xd30, 12, 1),
+
+ MTK_PIN_DRV_GRP(62, 0xd40, 8, 1),
+ MTK_PIN_DRV_GRP(63, 0xd40, 8, 1),
+ MTK_PIN_DRV_GRP(64, 0xd40, 8, 1),
+ MTK_PIN_DRV_GRP(65, 0xd40, 8, 1),
+ MTK_PIN_DRV_GRP(66, 0xd40, 8, 1),
+ MTK_PIN_DRV_GRP(67, 0xd40, 8, 1),
+
+ MTK_PIN_DRV_GRP(68, 0xd40, 12, 2),
+
+ MTK_PIN_DRV_GRP(69, 0xd50, 0, 2),
+
+ MTK_PIN_DRV_GRP(70, 0xd50, 4, 2),
+ MTK_PIN_DRV_GRP(71, 0xd50, 4, 2),
+ MTK_PIN_DRV_GRP(72, 0xd50, 4, 2),
+ MTK_PIN_DRV_GRP(73, 0xd50, 4, 2),
+
+ MTK_PIN_DRV_GRP(100, 0xd50, 8, 1),
+ MTK_PIN_DRV_GRP(101, 0xd50, 8, 1),
+ MTK_PIN_DRV_GRP(102, 0xd50, 8, 1),
+ MTK_PIN_DRV_GRP(103, 0xd50, 8, 1),
+
+ MTK_PIN_DRV_GRP(104, 0xd50, 12, 2),
+
+ MTK_PIN_DRV_GRP(105, 0xd60, 0, 2),
+
+ MTK_PIN_DRV_GRP(106, 0xd60, 4, 2),
+ MTK_PIN_DRV_GRP(107, 0xd60, 4, 2),
+ MTK_PIN_DRV_GRP(108, 0xd60, 4, 2),
+ MTK_PIN_DRV_GRP(109, 0xd60, 4, 2),
+
+ MTK_PIN_DRV_GRP(110, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(111, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(112, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(113, 0xd70, 0, 2),
+
+ MTK_PIN_DRV_GRP(114, 0xd70, 4, 2),
+
+ MTK_PIN_DRV_GRP(115, 0xd60, 12, 2),
+
+ MTK_PIN_DRV_GRP(116, 0xd60, 8, 2),
+
+ MTK_PIN_DRV_GRP(117, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(118, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(119, 0xd70, 0, 2),
+ MTK_PIN_DRV_GRP(120, 0xd70, 0, 2),
+};
+
+static const struct mtk_pin_spec_pupd_set_samereg mt8167_spec_pupd[] = {
+ MTK_PIN_PUPD_SPEC_SR(14, 0xe50, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(15, 0xe60, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(16, 0xe60, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(17, 0xe60, 10, 9, 8),
+
+ MTK_PIN_PUPD_SPEC_SR(21, 0xe60, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(22, 0xe70, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(23, 0xe70, 6, 5, 4),
+
+ MTK_PIN_PUPD_SPEC_SR(40, 0xe80, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(41, 0xe80, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(42, 0xe90, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(43, 0xe90, 6, 5, 4),
+
+ MTK_PIN_PUPD_SPEC_SR(68, 0xe50, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(69, 0xe50, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(70, 0xe40, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(71, 0xe40, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(72, 0xe40, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(73, 0xe50, 2, 1, 0),
+
+ MTK_PIN_PUPD_SPEC_SR(104, 0xe40, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(105, 0xe30, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(106, 0xe20, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(107, 0xe30, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(108, 0xe30, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(109, 0xe30, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(110, 0xe10, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(111, 0xe10, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(112, 0xe10, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(113, 0xe10, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(114, 0xe20, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(115, 0xe20, 2, 1, 0),
+ MTK_PIN_PUPD_SPEC_SR(116, 0xe20, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(117, 0xe00, 14, 13, 12),
+ MTK_PIN_PUPD_SPEC_SR(118, 0xe00, 10, 9, 8),
+ MTK_PIN_PUPD_SPEC_SR(119, 0xe00, 6, 5, 4),
+ MTK_PIN_PUPD_SPEC_SR(120, 0xe00, 2, 1, 0),
+};
+
+static int mt8167_spec_pull_set(struct regmap *regmap, unsigned int pin,
+ unsigned char align, bool isup, unsigned int r1r0)
+{
+ return mtk_pctrl_spec_pull_set_samereg(regmap, mt8167_spec_pupd,
+ ARRAY_SIZE(mt8167_spec_pupd), pin, align, isup, r1r0);
+}
+
+static const struct mtk_pin_ies_smt_set mt8167_ies_set[] = {
+ MTK_PIN_IES_SMT_SPEC(0, 6, 0x900, 2),
+ MTK_PIN_IES_SMT_SPEC(7, 10, 0x900, 3),
+ MTK_PIN_IES_SMT_SPEC(11, 13, 0x900, 12),
+ MTK_PIN_IES_SMT_SPEC(14, 17, 0x900, 13),
+ MTK_PIN_IES_SMT_SPEC(18, 20, 0x910, 10),
+ MTK_PIN_IES_SMT_SPEC(21, 23, 0x900, 13),
+ MTK_PIN_IES_SMT_SPEC(24, 25, 0x900, 12),
+ MTK_PIN_IES_SMT_SPEC(26, 30, 0x900, 0),
+ MTK_PIN_IES_SMT_SPEC(31, 33, 0x900, 1),
+ MTK_PIN_IES_SMT_SPEC(34, 39, 0x900, 2),
+ MTK_PIN_IES_SMT_SPEC(40, 40, 0x910, 11),
+ MTK_PIN_IES_SMT_SPEC(41, 43, 0x900, 10),
+ MTK_PIN_IES_SMT_SPEC(44, 47, 0x900, 11),
+ MTK_PIN_IES_SMT_SPEC(48, 51, 0x900, 14),
+ MTK_PIN_IES_SMT_SPEC(52, 53, 0x910, 0),
+ MTK_PIN_IES_SMT_SPEC(54, 54, 0x910, 2),
+ MTK_PIN_IES_SMT_SPEC(55, 57, 0x910, 4),
+ MTK_PIN_IES_SMT_SPEC(58, 59, 0x900, 15),
+ MTK_PIN_IES_SMT_SPEC(60, 61, 0x910, 1),
+ MTK_PIN_IES_SMT_SPEC(62, 65, 0x910, 5),
+ MTK_PIN_IES_SMT_SPEC(66, 67, 0x910, 6),
+ MTK_PIN_IES_SMT_SPEC(68, 68, 0x930, 2),
+ MTK_PIN_IES_SMT_SPEC(69, 69, 0x930, 1),
+ MTK_PIN_IES_SMT_SPEC(70, 70, 0x930, 6),
+ MTK_PIN_IES_SMT_SPEC(71, 71, 0x930, 5),
+ MTK_PIN_IES_SMT_SPEC(72, 72, 0x930, 4),
+ MTK_PIN_IES_SMT_SPEC(73, 73, 0x930, 3),
+ MTK_PIN_IES_SMT_SPEC(100, 103, 0x910, 7),
+ MTK_PIN_IES_SMT_SPEC(104, 104, 0x920, 12),
+ MTK_PIN_IES_SMT_SPEC(105, 105, 0x920, 11),
+ MTK_PIN_IES_SMT_SPEC(106, 106, 0x930, 0),
+ MTK_PIN_IES_SMT_SPEC(107, 107, 0x920, 15),
+ MTK_PIN_IES_SMT_SPEC(108, 108, 0x920, 14),
+ MTK_PIN_IES_SMT_SPEC(109, 109, 0x920, 13),
+ MTK_PIN_IES_SMT_SPEC(110, 110, 0x920, 9),
+ MTK_PIN_IES_SMT_SPEC(111, 111, 0x920, 8),
+ MTK_PIN_IES_SMT_SPEC(112, 112, 0x920, 7),
+ MTK_PIN_IES_SMT_SPEC(113, 113, 0x920, 6),
+ MTK_PIN_IES_SMT_SPEC(114, 114, 0x920, 10),
+ MTK_PIN_IES_SMT_SPEC(115, 115, 0x920, 1),
+ MTK_PIN_IES_SMT_SPEC(116, 116, 0x920, 0),
+ MTK_PIN_IES_SMT_SPEC(117, 117, 0x920, 5),
+ MTK_PIN_IES_SMT_SPEC(118, 118, 0x920, 4),
+ MTK_PIN_IES_SMT_SPEC(119, 119, 0x920, 3),
+ MTK_PIN_IES_SMT_SPEC(120, 120, 0x920, 2),
+ MTK_PIN_IES_SMT_SPEC(121, 124, 0x910, 9),
+};
+
+static const struct mtk_pin_ies_smt_set mt8167_smt_set[] = {
+ MTK_PIN_IES_SMT_SPEC(0, 6, 0xA00, 2),
+ MTK_PIN_IES_SMT_SPEC(7, 10, 0xA00, 3),
+ MTK_PIN_IES_SMT_SPEC(11, 13, 0xA00, 12),
+ MTK_PIN_IES_SMT_SPEC(14, 17, 0xA00, 13),
+ MTK_PIN_IES_SMT_SPEC(18, 20, 0xA10, 10),
+ MTK_PIN_IES_SMT_SPEC(21, 23, 0xA00, 13),
+ MTK_PIN_IES_SMT_SPEC(24, 25, 0xA00, 12),
+ MTK_PIN_IES_SMT_SPEC(26, 30, 0xA00, 0),
+ MTK_PIN_IES_SMT_SPEC(31, 33, 0xA00, 1),
+ MTK_PIN_IES_SMT_SPEC(34, 39, 0xA900, 2),
+ MTK_PIN_IES_SMT_SPEC(40, 40, 0xA10, 11),
+ MTK_PIN_IES_SMT_SPEC(41, 43, 0xA00, 10),
+ MTK_PIN_IES_SMT_SPEC(44, 47, 0xA00, 11),
+ MTK_PIN_IES_SMT_SPEC(48, 51, 0xA00, 14),
+ MTK_PIN_IES_SMT_SPEC(52, 53, 0xA10, 0),
+ MTK_PIN_IES_SMT_SPEC(54, 54, 0xA10, 2),
+ MTK_PIN_IES_SMT_SPEC(55, 57, 0xA10, 4),
+ MTK_PIN_IES_SMT_SPEC(58, 59, 0xA00, 15),
+ MTK_PIN_IES_SMT_SPEC(60, 61, 0xA10, 1),
+ MTK_PIN_IES_SMT_SPEC(62, 65, 0xA10, 5),
+ MTK_PIN_IES_SMT_SPEC(66, 67, 0xA10, 6),
+ MTK_PIN_IES_SMT_SPEC(68, 68, 0xA30, 2),
+ MTK_PIN_IES_SMT_SPEC(69, 69, 0xA30, 1),
+ MTK_PIN_IES_SMT_SPEC(70, 70, 0xA30, 3),
+ MTK_PIN_IES_SMT_SPEC(71, 71, 0xA30, 4),
+ MTK_PIN_IES_SMT_SPEC(72, 72, 0xA30, 5),
+ MTK_PIN_IES_SMT_SPEC(73, 73, 0xA30, 6),
+
+ MTK_PIN_IES_SMT_SPEC(100, 103, 0xA10, 7),
+ MTK_PIN_IES_SMT_SPEC(104, 104, 0xA20, 12),
+ MTK_PIN_IES_SMT_SPEC(105, 105, 0xA20, 11),
+ MTK_PIN_IES_SMT_SPEC(106, 106, 0xA30, 13),
+ MTK_PIN_IES_SMT_SPEC(107, 107, 0xA20, 14),
+ MTK_PIN_IES_SMT_SPEC(108, 108, 0xA20, 15),
+ MTK_PIN_IES_SMT_SPEC(109, 109, 0xA30, 0),
+ MTK_PIN_IES_SMT_SPEC(110, 110, 0xA20, 9),
+ MTK_PIN_IES_SMT_SPEC(111, 111, 0xA20, 8),
+ MTK_PIN_IES_SMT_SPEC(112, 112, 0xA20, 7),
+ MTK_PIN_IES_SMT_SPEC(113, 113, 0xA20, 6),
+ MTK_PIN_IES_SMT_SPEC(114, 114, 0xA20, 10),
+ MTK_PIN_IES_SMT_SPEC(115, 115, 0xA20, 1),
+ MTK_PIN_IES_SMT_SPEC(116, 116, 0xA20, 0),
+ MTK_PIN_IES_SMT_SPEC(117, 117, 0xA20, 5),
+ MTK_PIN_IES_SMT_SPEC(118, 118, 0xA20, 4),
+ MTK_PIN_IES_SMT_SPEC(119, 119, 0xA20, 3),
+ MTK_PIN_IES_SMT_SPEC(120, 120, 0xA20, 2),
+ MTK_PIN_IES_SMT_SPEC(121, 124, 0xA10, 9),
+};
+
+static int mt8167_ies_smt_set(struct regmap *regmap, unsigned int pin,
+ unsigned char align, int value, enum pin_config_param arg)
+{
+ if (arg == PIN_CONFIG_INPUT_ENABLE)
+ return mtk_pconf_spec_set_ies_smt_range(regmap, mt8167_ies_set,
+ ARRAY_SIZE(mt8167_ies_set), pin, align, value);
+ else if (arg == PIN_CONFIG_INPUT_SCHMITT_ENABLE)
+ return mtk_pconf_spec_set_ies_smt_range(regmap, mt8167_smt_set,
+ ARRAY_SIZE(mt8167_smt_set), pin, align, value);
+ return -EINVAL;
+}
+
+static const struct mtk_pinctrl_devdata mt8167_pinctrl_data = {
+ .pins = mtk_pins_mt8167,
+ .npins = ARRAY_SIZE(mtk_pins_mt8167),
+ .grp_desc = mt8167_drv_grp,
+ .n_grp_cls = ARRAY_SIZE(mt8167_drv_grp),
+ .pin_drv_grp = mt8167_pin_drv,
+ .n_pin_drv_grps = ARRAY_SIZE(mt8167_pin_drv),
+ .spec_pull_set = mt8167_spec_pull_set,
+ .spec_ies_smt_set = mt8167_ies_smt_set,
+ .dir_offset = 0x0000,
+ .pullen_offset = 0x0500,
+ .pullsel_offset = 0x0600,
+ .dout_offset = 0x0100,
+ .din_offset = 0x0200,
+ .pinmux_offset = 0x0300,
+ .type1_start = 125,
+ .type1_end = 125,
+ .port_shf = 4,
+ .port_mask = 0xf,
+ .port_align = 4,
+ .eint_hw = {
+ .port_mask = 7,
+ .ports = 6,
+ .ap_num = 169,
+ .db_cnt = 64,
+ },
+};
+
+static int mt8167_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_pctrl_init(pdev, &mt8167_pinctrl_data, NULL);
+}
+
+static const struct of_device_id mt8167_pctrl_match[] = {
+ {
+ .compatible = "mediatek,mt8167-pinctrl",
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, mt8167_pctrl_match);
+
+static struct platform_driver mtk_pinctrl_driver = {
+ .probe = mt8167_pinctrl_probe,
+ .driver = {
+ .name = "mediatek-mt8167-pinctrl",
+ .of_match_table = mt8167_pctrl_match,
+ .pm = &mtk_eint_pm_ops,
+ },
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+ return platform_driver_register(&mtk_pinctrl_driver);
+}
+arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
new file mode 100644
index 000000000000..0c16b2c756bf
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
@@ -0,0 +1,1409 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#include <linux/module.h>
+#include "pinctrl-mtk-mt8192.h"
+#include "pinctrl-paris.h"
+
+/* MT8192 have multiple bases to program pin configuration listed as the below:
+ * iocfg0:0x10005000, iocfg_rm:0x11C20000, iocfg_bm:0x11D10000,
+ * iocfg_bl:0x11D30000, iocfg_br:0x11D40000, iocfg_lm:0x11E20000,
+ * iocfg_lb:0x11E70000, iocfg_rt:0x11EA0000, iocfg_lt:0x11F20000,
+ * iocfg_tl:0x11F30000
+ * _i_based could be used to indicate what base the pin should be mapped into.
+ */
+
+#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 0)
+
+#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \
+ PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \
+ 32, 1)
+
+static const struct mtk_pin_field_calc mt8192_pin_mode_range[] = {
+ PIN_FIELD(0, 228, 0x300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_dir_range[] = {
+ PIN_FIELD(0, 228, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_di_range[] = {
+ PIN_FIELD(0, 228, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_do_range[] = {
+ PIN_FIELD(0, 228, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x00f0, 0x10, 8, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(6, 6, 4, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x00f0, 0x10, 9, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x00f0, 0x10, 5, 1),
+ PIN_FIELD_BASE(10, 10, 6, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(11, 11, 6, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(12, 12, 6, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(14, 14, 6, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(16, 16, 8, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(17, 17, 8, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(18, 18, 7, 0x0100, 0x10, 4, 1),
+ PIN_FIELD_BASE(19, 19, 7, 0x0100, 0x10, 4, 1),
+ PIN_FIELD_BASE(20, 20, 7, 0x0100, 0x10, 5, 1),
+ PIN_FIELD_BASE(21, 21, 7, 0x0100, 0x10, 5, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(36, 36, 2, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(37, 37, 2, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(38, 38, 2, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(39, 39, 2, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x0100, 0x10, 4, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x0100, 0x10, 4, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(46, 46, 1, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x00c0, 0x10, 12, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(50, 50, 1, 0x00c0, 0x10, 13, 1),
+ PIN_FIELD_BASE(51, 51, 1, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(52, 52, 1, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(53, 53, 1, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(54, 54, 1, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(57, 57, 3, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(58, 58, 3, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(59, 59, 3, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(60, 60, 3, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(62, 62, 3, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(63, 63, 3, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(64, 64, 3, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(65, 65, 3, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(66, 66, 3, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(67, 67, 3, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(68, 68, 3, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(69, 69, 3, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(70, 70, 3, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(71, 71, 3, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(72, 72, 3, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 74, 3, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(75, 75, 3, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(76, 76, 3, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x00c0, 0x10, 9, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x00c0, 0x10, 4, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x00c0, 0x10, 5, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x00c0, 0x10, 6, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x00c0, 0x10, 7, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x00c0, 0x10, 8, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(118, 118, 4, 0x00f0, 0x10, 12, 1),
+ PIN_FIELD_BASE(119, 119, 4, 0x00f0, 0x10, 18, 1),
+ PIN_FIELD_BASE(120, 120, 4, 0x00f0, 0x10, 17, 1),
+ PIN_FIELD_BASE(121, 121, 4, 0x00f0, 0x10, 23, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x00f0, 0x10, 16, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x00f0, 0x10, 22, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x00f0, 0x10, 15, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x00f0, 0x10, 21, 1),
+ PIN_FIELD_BASE(126, 126, 4, 0x00f0, 0x10, 6, 1),
+ PIN_FIELD_BASE(127, 127, 4, 0x00f0, 0x10, 7, 1),
+ PIN_FIELD_BASE(128, 128, 4, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(129, 129, 4, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(130, 130, 4, 0x00f0, 0x10, 3, 1),
+ PIN_FIELD_BASE(131, 131, 4, 0x00f0, 0x10, 4, 1),
+ PIN_FIELD_BASE(132, 132, 4, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(133, 133, 4, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(134, 134, 4, 0x00f0, 0x10, 10, 1),
+ PIN_FIELD_BASE(135, 135, 4, 0x00f0, 0x10, 11, 1),
+ PIN_FIELD_BASE(136, 136, 4, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x00f0, 0x10, 14, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x00f0, 0x10, 20, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x00f0, 0x10, 13, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x00f0, 0x10, 19, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x00c0, 0x10, 11, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x00c0, 0x10, 10, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(151, 151, 1, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(152, 152, 7, 0x0100, 0x10, 6, 1),
+ PIN_FIELD_BASE(153, 153, 7, 0x0100, 0x10, 6, 1),
+ PIN_FIELD_BASE(154, 154, 7, 0x0100, 0x10, 6, 1),
+ PIN_FIELD_BASE(155, 155, 7, 0x0100, 0x10, 6, 1),
+ PIN_FIELD_BASE(156, 156, 7, 0x0100, 0x10, 7, 1),
+ PIN_FIELD_BASE(157, 157, 7, 0x0100, 0x10, 7, 1),
+ PIN_FIELD_BASE(158, 158, 7, 0x0100, 0x10, 7, 1),
+ PIN_FIELD_BASE(159, 159, 7, 0x0100, 0x10, 7, 1),
+ PIN_FIELD_BASE(160, 160, 7, 0x0100, 0x10, 12, 1),
+ PIN_FIELD_BASE(161, 161, 7, 0x0100, 0x10, 13, 1),
+ PIN_FIELD_BASE(162, 162, 7, 0x0100, 0x10, 0, 1),
+ PIN_FIELD_BASE(163, 163, 7, 0x0100, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 7, 0x0100, 0x10, 8, 1),
+ PIN_FIELD_BASE(165, 165, 7, 0x0100, 0x10, 8, 1),
+ PIN_FIELD_BASE(166, 166, 7, 0x0100, 0x10, 8, 1),
+ PIN_FIELD_BASE(167, 167, 7, 0x0100, 0x10, 8, 1),
+ PIN_FIELD_BASE(168, 168, 7, 0x0100, 0x10, 2, 1),
+ PIN_FIELD_BASE(169, 169, 7, 0x0100, 0x10, 3, 1),
+ PIN_FIELD_BASE(170, 170, 7, 0x0100, 0x10, 8, 1),
+ PIN_FIELD_BASE(171, 171, 7, 0x0100, 0x10, 8, 1),
+ PIN_FIELD_BASE(172, 172, 7, 0x0100, 0x10, 9, 1),
+ PIN_FIELD_BASE(173, 173, 7, 0x0100, 0x10, 10, 1),
+ PIN_FIELD_BASE(174, 174, 7, 0x0100, 0x10, 9, 1),
+ PIN_FIELD_BASE(175, 175, 7, 0x0100, 0x10, 10, 1),
+ PIN_FIELD_BASE(176, 176, 7, 0x0100, 0x10, 9, 1),
+ PIN_FIELD_BASE(177, 177, 7, 0x0100, 0x10, 9, 1),
+ PIN_FIELD_BASE(178, 178, 7, 0x0100, 0x10, 10, 1),
+ PIN_FIELD_BASE(179, 179, 7, 0x0100, 0x10, 10, 1),
+ PIN_FIELD_BASE(180, 180, 7, 0x0100, 0x10, 11, 1),
+ PIN_FIELD_BASE(181, 181, 7, 0x0100, 0x10, 11, 1),
+ PIN_FIELD_BASE(182, 182, 7, 0x0100, 0x10, 11, 1),
+ PIN_FIELD_BASE(183, 183, 9, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(184, 184, 9, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(185, 185, 9, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(186, 186, 9, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(187, 187, 9, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(188, 188, 9, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(189, 189, 9, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(190, 190, 9, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(191, 191, 9, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(192, 192, 9, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 9, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(194, 194, 9, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(195, 195, 5, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(196, 196, 5, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(197, 197, 5, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(198, 198, 5, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(199, 199, 5, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(200, 200, 8, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(201, 201, 8, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(202, 202, 5, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(203, 203, 5, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(204, 204, 8, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(205, 205, 8, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(206, 206, 5, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(207, 207, 5, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(208, 208, 5, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(209, 209, 5, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(210, 210, 5, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(211, 211, 5, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(212, 212, 5, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(213, 213, 5, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(214, 214, 5, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(215, 215, 5, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(216, 216, 5, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(217, 217, 5, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(218, 218, 5, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(219, 219, 5, 0x0080, 0x10, 4, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(6, 6, 4, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(10, 10, 6, 0x0010, 0x10, 0, 1),
+ PIN_FIELD_BASE(11, 11, 6, 0x0010, 0x10, 1, 1),
+ PIN_FIELD_BASE(12, 12, 6, 0x0010, 0x10, 2, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0010, 0x10, 3, 1),
+ PIN_FIELD_BASE(14, 14, 6, 0x0010, 0x10, 4, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0010, 0x10, 5, 1),
+ PIN_FIELD_BASE(16, 16, 8, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(17, 17, 8, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(18, 18, 7, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(19, 19, 7, 0x0050, 0x10, 22, 1),
+ PIN_FIELD_BASE(20, 20, 7, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(21, 21, 7, 0x0050, 0x10, 24, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x0030, 0x10, 27, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x0030, 0x10, 24, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x0030, 0x10, 26, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x0030, 0x10, 23, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x0030, 0x10, 25, 1),
+ PIN_FIELD_BASE(36, 36, 2, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(37, 37, 2, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(38, 38, 2, 0x0050, 0x10, 22, 1),
+ PIN_FIELD_BASE(39, 39, 2, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x0050, 0x10, 25, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x0050, 0x10, 26, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x0030, 0x10, 18, 1),
+ PIN_FIELD_BASE(46, 46, 1, 0x0030, 0x10, 20, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x0030, 0x10, 19, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x0030, 0x10, 16, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x0030, 0x10, 17, 1),
+ PIN_FIELD_BASE(50, 50, 1, 0x0030, 0x10, 15, 1),
+ PIN_FIELD_BASE(51, 51, 1, 0x0030, 0x10, 9, 1),
+ PIN_FIELD_BASE(52, 52, 1, 0x0030, 0x10, 10, 1),
+ PIN_FIELD_BASE(53, 53, 1, 0x0030, 0x10, 14, 1),
+ PIN_FIELD_BASE(54, 54, 1, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x0030, 0x10, 13, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(57, 57, 3, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(58, 58, 3, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 3, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 3, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0030, 0x10, 28, 1),
+ PIN_FIELD_BASE(62, 62, 3, 0x0030, 0x10, 22, 1),
+ PIN_FIELD_BASE(63, 63, 3, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(64, 64, 3, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(65, 65, 3, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(66, 66, 3, 0x0030, 0x10, 15, 1),
+ PIN_FIELD_BASE(67, 67, 3, 0x0030, 0x10, 16, 1),
+ PIN_FIELD_BASE(68, 68, 3, 0x0030, 0x10, 17, 1),
+ PIN_FIELD_BASE(69, 69, 3, 0x0030, 0x10, 18, 1),
+ PIN_FIELD_BASE(70, 70, 3, 0x0030, 0x10, 19, 1),
+ PIN_FIELD_BASE(71, 71, 3, 0x0030, 0x10, 20, 1),
+ PIN_FIELD_BASE(72, 72, 3, 0x0030, 0x10, 21, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 74, 3, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(75, 75, 3, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(76, 76, 3, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0030, 0x10, 7, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0030, 0x10, 8, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0030, 0x10, 9, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0030, 0x10, 10, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0030, 0x10, 13, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0030, 0x10, 14, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x0030, 0x10, 31, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x0030, 0x10, 29, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x0030, 0x10, 30, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x0050, 0x10, 24, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x0050, 0x10, 25, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0050, 0x10, 31, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0050, 0x10, 26, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x0050, 0x10, 27, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x0050, 0x10, 28, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x0050, 0x10, 29, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x0050, 0x10, 30, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(118, 118, 4, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(119, 119, 4, 0x0070, 0x10, 29, 1),
+ PIN_FIELD_BASE(120, 120, 4, 0x0070, 0x10, 28, 1),
+ PIN_FIELD_BASE(121, 121, 4, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x0070, 0x10, 27, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x0070, 0x10, 26, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 4, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(127, 127, 4, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(128, 128, 4, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(129, 129, 4, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(130, 130, 4, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(131, 131, 4, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(132, 132, 4, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(133, 133, 4, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(134, 134, 4, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(135, 135, 4, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(136, 136, 4, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0070, 0x10, 24, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0070, 0x10, 30, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0030, 0x10, 7, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0030, 0x10, 8, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(151, 151, 1, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(152, 152, 7, 0x0050, 0x10, 30, 1),
+ PIN_FIELD_BASE(153, 153, 7, 0x0050, 0x10, 29, 1),
+ PIN_FIELD_BASE(154, 154, 7, 0x0050, 0x10, 27, 1),
+ PIN_FIELD_BASE(155, 155, 7, 0x0050, 0x10, 28, 1),
+ PIN_FIELD_BASE(156, 156, 7, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(157, 157, 7, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(158, 158, 7, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(159, 159, 7, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(160, 160, 7, 0x0050, 0x10, 31, 1),
+ PIN_FIELD_BASE(161, 161, 7, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(162, 162, 7, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(163, 163, 7, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 7, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(165, 165, 7, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(166, 166, 7, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(167, 167, 7, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(168, 168, 7, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(169, 169, 7, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(170, 170, 7, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(171, 171, 7, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(172, 172, 7, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(173, 173, 7, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(174, 174, 7, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 7, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(176, 176, 7, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(177, 177, 7, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(178, 178, 7, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(179, 179, 7, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(180, 180, 7, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(181, 181, 7, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(182, 182, 7, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(183, 183, 9, 0x0020, 0x10, 1, 1),
+ PIN_FIELD_BASE(184, 184, 9, 0x0020, 0x10, 2, 1),
+ PIN_FIELD_BASE(185, 185, 9, 0x0020, 0x10, 4, 1),
+ PIN_FIELD_BASE(186, 186, 9, 0x0020, 0x10, 6, 1),
+ PIN_FIELD_BASE(187, 187, 9, 0x0020, 0x10, 8, 1),
+ PIN_FIELD_BASE(188, 188, 9, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(189, 189, 9, 0x0020, 0x10, 7, 1),
+ PIN_FIELD_BASE(190, 190, 9, 0x0020, 0x10, 9, 1),
+ PIN_FIELD_BASE(191, 191, 9, 0x0020, 0x10, 10, 1),
+ PIN_FIELD_BASE(192, 192, 9, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 9, 0x0020, 0x10, 5, 1),
+ PIN_FIELD_BASE(194, 194, 9, 0x0020, 0x10, 11, 1),
+ PIN_FIELD_BASE(195, 195, 5, 0x0030, 0x10, 16, 1),
+ PIN_FIELD_BASE(196, 196, 5, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(197, 197, 5, 0x0030, 0x10, 8, 1),
+ PIN_FIELD_BASE(198, 198, 5, 0x0030, 0x10, 7, 1),
+ PIN_FIELD_BASE(199, 199, 5, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(200, 200, 8, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(201, 201, 8, 0x0030, 0x10, 8, 1),
+ PIN_FIELD_BASE(202, 202, 5, 0x0030, 0x10, 15, 1),
+ PIN_FIELD_BASE(203, 203, 5, 0x0030, 0x10, 17, 1),
+ PIN_FIELD_BASE(204, 204, 8, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(205, 205, 8, 0x0030, 0x10, 7, 1),
+ PIN_FIELD_BASE(206, 206, 5, 0x0030, 0x10, 18, 1),
+ PIN_FIELD_BASE(207, 207, 5, 0x0030, 0x10, 19, 1),
+ PIN_FIELD_BASE(208, 208, 5, 0x0030, 0x10, 20, 1),
+ PIN_FIELD_BASE(209, 209, 5, 0x0030, 0x10, 12, 1),
+ PIN_FIELD_BASE(210, 210, 5, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(211, 211, 5, 0x0030, 0x10, 13, 1),
+ PIN_FIELD_BASE(212, 212, 5, 0x0030, 0x10, 10, 1),
+ PIN_FIELD_BASE(213, 213, 5, 0x0030, 0x10, 14, 1),
+ PIN_FIELD_BASE(214, 214, 5, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(215, 215, 5, 0x0030, 0x10, 9, 1),
+ PIN_FIELD_BASE(216, 216, 5, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(217, 217, 5, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(218, 218, 5, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(219, 219, 5, 0x0030, 0x10, 2, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_pu_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x00b0, 0x10, 9, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x00b0, 0x10, 10, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x00b0, 0x10, 11, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x00b0, 0x10, 12, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x00b0, 0x10, 13, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x00b0, 0x10, 14, 1),
+ PIN_FIELD_BASE(6, 6, 4, 0x00b0, 0x10, 15, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x00b0, 0x10, 16, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x00b0, 0x10, 17, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x00b0, 0x10, 18, 1),
+ PIN_FIELD_BASE(16, 16, 8, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(17, 17, 8, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(18, 18, 7, 0x00a0, 0x10, 21, 1),
+ PIN_FIELD_BASE(19, 19, 7, 0x00a0, 0x10, 22, 1),
+ PIN_FIELD_BASE(20, 20, 7, 0x00a0, 0x10, 23, 1),
+ PIN_FIELD_BASE(21, 21, 7, 0x00a0, 0x10, 24, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x0070, 0x10, 27, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x0070, 0x10, 24, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x0070, 0x10, 26, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(36, 36, 2, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(37, 37, 2, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(38, 38, 2, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(39, 39, 2, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x00a0, 0x10, 25, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x00a0, 0x10, 26, 1),
+ PIN_FIELD_BASE(57, 57, 3, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(58, 58, 3, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 3, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 3, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0070, 0x10, 28, 1),
+ PIN_FIELD_BASE(62, 62, 3, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(63, 63, 3, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(64, 64, 3, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(65, 65, 3, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(66, 66, 3, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(67, 67, 3, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(68, 68, 3, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(69, 69, 3, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(70, 70, 3, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(71, 71, 3, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(72, 72, 3, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 74, 3, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(75, 75, 3, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(76, 76, 3, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x0070, 0x10, 29, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x0070, 0x10, 30, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x0090, 0x10, 24, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x0090, 0x10, 25, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0090, 0x10, 31, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0090, 0x10, 26, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x0090, 0x10, 27, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x0090, 0x10, 28, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x0090, 0x10, 29, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x0090, 0x10, 30, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(118, 118, 4, 0x00b0, 0x10, 23, 1),
+ PIN_FIELD_BASE(119, 119, 4, 0x00b0, 0x10, 29, 1),
+ PIN_FIELD_BASE(120, 120, 4, 0x00b0, 0x10, 28, 1),
+ PIN_FIELD_BASE(121, 121, 4, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x00b0, 0x10, 27, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x00b0, 0x10, 26, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 4, 0x00b0, 0x10, 19, 1),
+ PIN_FIELD_BASE(127, 127, 4, 0x00b0, 0x10, 20, 1),
+ PIN_FIELD_BASE(128, 128, 4, 0x00b0, 0x10, 21, 1),
+ PIN_FIELD_BASE(129, 129, 4, 0x00b0, 0x10, 22, 1),
+ PIN_FIELD_BASE(130, 130, 4, 0x00b0, 0x10, 6, 1),
+ PIN_FIELD_BASE(131, 131, 4, 0x00b0, 0x10, 7, 1),
+ PIN_FIELD_BASE(132, 132, 4, 0x00b0, 0x10, 8, 1),
+ PIN_FIELD_BASE(133, 133, 4, 0x00b0, 0x10, 3, 1),
+ PIN_FIELD_BASE(134, 134, 4, 0x00b0, 0x10, 4, 1),
+ PIN_FIELD_BASE(135, 135, 4, 0x00b0, 0x10, 5, 1),
+ PIN_FIELD_BASE(136, 136, 4, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x00b0, 0x10, 1, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x00b0, 0x10, 2, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x00b0, 0x10, 25, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x00b0, 0x10, 31, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x00b0, 0x10, 24, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x00b0, 0x10, 30, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(151, 151, 1, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(156, 156, 7, 0x00a0, 0x10, 29, 1),
+ PIN_FIELD_BASE(157, 157, 7, 0x00a0, 0x10, 30, 1),
+ PIN_FIELD_BASE(158, 158, 7, 0x00a0, 0x10, 31, 1),
+ PIN_FIELD_BASE(159, 159, 7, 0x00b0, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 7, 0x00a0, 0x10, 27, 1),
+ PIN_FIELD_BASE(161, 161, 7, 0x00a0, 0x10, 28, 1),
+ PIN_FIELD_BASE(162, 162, 7, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(163, 163, 7, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 7, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(165, 165, 7, 0x00a0, 0x10, 3, 1),
+ PIN_FIELD_BASE(166, 166, 7, 0x00a0, 0x10, 4, 1),
+ PIN_FIELD_BASE(167, 167, 7, 0x00a0, 0x10, 5, 1),
+ PIN_FIELD_BASE(168, 168, 7, 0x00a0, 0x10, 6, 1),
+ PIN_FIELD_BASE(169, 169, 7, 0x00a0, 0x10, 7, 1),
+ PIN_FIELD_BASE(170, 170, 7, 0x00a0, 0x10, 8, 1),
+ PIN_FIELD_BASE(171, 171, 7, 0x00a0, 0x10, 9, 1),
+ PIN_FIELD_BASE(172, 172, 7, 0x00a0, 0x10, 13, 1),
+ PIN_FIELD_BASE(173, 173, 7, 0x00a0, 0x10, 14, 1),
+ PIN_FIELD_BASE(174, 174, 7, 0x00a0, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 7, 0x00a0, 0x10, 15, 1),
+ PIN_FIELD_BASE(176, 176, 7, 0x00a0, 0x10, 10, 1),
+ PIN_FIELD_BASE(177, 177, 7, 0x00a0, 0x10, 11, 1),
+ PIN_FIELD_BASE(178, 178, 7, 0x00a0, 0x10, 16, 1),
+ PIN_FIELD_BASE(179, 179, 7, 0x00a0, 0x10, 17, 1),
+ PIN_FIELD_BASE(180, 180, 7, 0x00a0, 0x10, 18, 1),
+ PIN_FIELD_BASE(181, 181, 7, 0x00a0, 0x10, 19, 1),
+ PIN_FIELD_BASE(182, 182, 7, 0x00a0, 0x10, 20, 1),
+ PIN_FIELD_BASE(195, 195, 5, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(196, 196, 5, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(197, 197, 5, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(198, 198, 5, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(199, 199, 5, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(200, 200, 8, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(201, 201, 8, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(202, 202, 5, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(203, 203, 5, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(204, 204, 8, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(205, 205, 8, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(206, 206, 5, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(207, 207, 5, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(208, 208, 5, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(209, 209, 5, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(210, 210, 5, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(211, 211, 5, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(212, 212, 5, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(213, 213, 5, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(214, 214, 5, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(215, 215, 5, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(216, 216, 5, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(217, 217, 5, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(218, 218, 5, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(219, 219, 5, 0x0050, 0x10, 2, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_pd_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(1, 1, 4, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(2, 2, 4, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x0090, 0x10, 12, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x0090, 0x10, 13, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x0090, 0x10, 14, 1),
+ PIN_FIELD_BASE(6, 6, 4, 0x0090, 0x10, 15, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x0090, 0x10, 16, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x0090, 0x10, 17, 1),
+ PIN_FIELD_BASE(9, 9, 4, 0x0090, 0x10, 18, 1),
+ PIN_FIELD_BASE(16, 16, 8, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(17, 17, 8, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(18, 18, 7, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(19, 19, 7, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(20, 20, 7, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(21, 21, 7, 0x0070, 0x10, 24, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(26, 26, 3, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(27, 27, 3, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(28, 28, 3, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(29, 29, 3, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(30, 30, 3, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(31, 31, 3, 0x0050, 0x10, 27, 1),
+ PIN_FIELD_BASE(32, 32, 3, 0x0050, 0x10, 24, 1),
+ PIN_FIELD_BASE(33, 33, 3, 0x0050, 0x10, 26, 1),
+ PIN_FIELD_BASE(34, 34, 3, 0x0050, 0x10, 23, 1),
+ PIN_FIELD_BASE(35, 35, 3, 0x0050, 0x10, 25, 1),
+ PIN_FIELD_BASE(36, 36, 2, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(37, 37, 2, 0x0070, 0x10, 21, 1),
+ PIN_FIELD_BASE(38, 38, 2, 0x0070, 0x10, 22, 1),
+ PIN_FIELD_BASE(39, 39, 2, 0x0070, 0x10, 23, 1),
+ PIN_FIELD_BASE(40, 40, 8, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(41, 41, 8, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(42, 42, 8, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x0070, 0x10, 26, 1),
+ PIN_FIELD_BASE(57, 57, 3, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(58, 58, 3, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(59, 59, 3, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(60, 60, 3, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(61, 61, 3, 0x0050, 0x10, 28, 1),
+ PIN_FIELD_BASE(62, 62, 3, 0x0050, 0x10, 22, 1),
+ PIN_FIELD_BASE(63, 63, 3, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(64, 64, 3, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(65, 65, 3, 0x0050, 0x10, 12, 1),
+ PIN_FIELD_BASE(66, 66, 3, 0x0050, 0x10, 15, 1),
+ PIN_FIELD_BASE(67, 67, 3, 0x0050, 0x10, 16, 1),
+ PIN_FIELD_BASE(68, 68, 3, 0x0050, 0x10, 17, 1),
+ PIN_FIELD_BASE(69, 69, 3, 0x0050, 0x10, 18, 1),
+ PIN_FIELD_BASE(70, 70, 3, 0x0050, 0x10, 19, 1),
+ PIN_FIELD_BASE(71, 71, 3, 0x0050, 0x10, 20, 1),
+ PIN_FIELD_BASE(72, 72, 3, 0x0050, 0x10, 21, 1),
+ PIN_FIELD_BASE(73, 73, 3, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(74, 74, 3, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(75, 75, 3, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(76, 76, 3, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(77, 77, 3, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(78, 78, 3, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(79, 79, 3, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(80, 80, 3, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(81, 81, 3, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(82, 82, 3, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(83, 83, 3, 0x0050, 0x10, 13, 1),
+ PIN_FIELD_BASE(84, 84, 3, 0x0050, 0x10, 14, 1),
+ PIN_FIELD_BASE(85, 85, 3, 0x0050, 0x10, 31, 1),
+ PIN_FIELD_BASE(86, 86, 3, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(87, 87, 3, 0x0050, 0x10, 29, 1),
+ PIN_FIELD_BASE(88, 88, 3, 0x0050, 0x10, 30, 1),
+ PIN_FIELD_BASE(89, 89, 2, 0x0070, 0x10, 24, 1),
+ PIN_FIELD_BASE(90, 90, 2, 0x0070, 0x10, 25, 1),
+ PIN_FIELD_BASE(91, 91, 2, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(92, 92, 2, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(93, 93, 2, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(94, 94, 2, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(95, 95, 2, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(96, 96, 2, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(97, 97, 2, 0x0070, 0x10, 26, 1),
+ PIN_FIELD_BASE(98, 98, 2, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(99, 99, 2, 0x0070, 0x10, 27, 1),
+ PIN_FIELD_BASE(100, 100, 2, 0x0070, 0x10, 28, 1),
+ PIN_FIELD_BASE(101, 101, 2, 0x0070, 0x10, 29, 1),
+ PIN_FIELD_BASE(102, 102, 2, 0x0070, 0x10, 30, 1),
+ PIN_FIELD_BASE(103, 103, 2, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(104, 104, 2, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(105, 105, 2, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(106, 106, 2, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(107, 107, 2, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(108, 108, 2, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(109, 109, 2, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(110, 110, 2, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(111, 111, 2, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(112, 112, 2, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(113, 113, 2, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(114, 114, 2, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(115, 115, 2, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(116, 116, 2, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(117, 117, 2, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(118, 118, 4, 0x0090, 0x10, 23, 1),
+ PIN_FIELD_BASE(119, 119, 4, 0x0090, 0x10, 29, 1),
+ PIN_FIELD_BASE(120, 120, 4, 0x0090, 0x10, 28, 1),
+ PIN_FIELD_BASE(121, 121, 4, 0x00a0, 0x10, 2, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x0090, 0x10, 27, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x00a0, 0x10, 1, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x0090, 0x10, 26, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x00a0, 0x10, 0, 1),
+ PIN_FIELD_BASE(126, 126, 4, 0x0090, 0x10, 19, 1),
+ PIN_FIELD_BASE(127, 127, 4, 0x0090, 0x10, 20, 1),
+ PIN_FIELD_BASE(128, 128, 4, 0x0090, 0x10, 21, 1),
+ PIN_FIELD_BASE(129, 129, 4, 0x0090, 0x10, 22, 1),
+ PIN_FIELD_BASE(130, 130, 4, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(131, 131, 4, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(132, 132, 4, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(133, 133, 4, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(134, 134, 4, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(135, 135, 4, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(136, 136, 4, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(137, 137, 4, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(138, 138, 4, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0090, 0x10, 25, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0090, 0x10, 31, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0090, 0x10, 24, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0090, 0x10, 30, 1),
+ PIN_FIELD_BASE(143, 143, 1, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(144, 144, 1, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(145, 145, 1, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(146, 146, 1, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(147, 147, 1, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(148, 148, 1, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(149, 149, 1, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(150, 150, 1, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(151, 151, 1, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(156, 156, 7, 0x0070, 0x10, 29, 1),
+ PIN_FIELD_BASE(157, 157, 7, 0x0070, 0x10, 30, 1),
+ PIN_FIELD_BASE(158, 158, 7, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(159, 159, 7, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(160, 160, 7, 0x0070, 0x10, 27, 1),
+ PIN_FIELD_BASE(161, 161, 7, 0x0070, 0x10, 28, 1),
+ PIN_FIELD_BASE(162, 162, 7, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(163, 163, 7, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(164, 164, 7, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(165, 165, 7, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(166, 166, 7, 0x0070, 0x10, 4, 1),
+ PIN_FIELD_BASE(167, 167, 7, 0x0070, 0x10, 5, 1),
+ PIN_FIELD_BASE(168, 168, 7, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(169, 169, 7, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(170, 170, 7, 0x0070, 0x10, 8, 1),
+ PIN_FIELD_BASE(171, 171, 7, 0x0070, 0x10, 9, 1),
+ PIN_FIELD_BASE(172, 172, 7, 0x0070, 0x10, 13, 1),
+ PIN_FIELD_BASE(173, 173, 7, 0x0070, 0x10, 14, 1),
+ PIN_FIELD_BASE(174, 174, 7, 0x0070, 0x10, 12, 1),
+ PIN_FIELD_BASE(175, 175, 7, 0x0070, 0x10, 15, 1),
+ PIN_FIELD_BASE(176, 176, 7, 0x0070, 0x10, 10, 1),
+ PIN_FIELD_BASE(177, 177, 7, 0x0070, 0x10, 11, 1),
+ PIN_FIELD_BASE(178, 178, 7, 0x0070, 0x10, 16, 1),
+ PIN_FIELD_BASE(179, 179, 7, 0x0070, 0x10, 17, 1),
+ PIN_FIELD_BASE(180, 180, 7, 0x0070, 0x10, 18, 1),
+ PIN_FIELD_BASE(181, 181, 7, 0x0070, 0x10, 19, 1),
+ PIN_FIELD_BASE(182, 182, 7, 0x0070, 0x10, 20, 1),
+ PIN_FIELD_BASE(195, 195, 5, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(196, 196, 5, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(197, 197, 5, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(198, 198, 5, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(199, 199, 5, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(200, 200, 8, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(201, 201, 8, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(202, 202, 5, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(203, 203, 5, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(204, 204, 8, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(205, 205, 8, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(206, 206, 5, 0x0040, 0x10, 18, 1),
+ PIN_FIELD_BASE(207, 207, 5, 0x0040, 0x10, 19, 1),
+ PIN_FIELD_BASE(208, 208, 5, 0x0040, 0x10, 20, 1),
+ PIN_FIELD_BASE(209, 209, 5, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(210, 210, 5, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(211, 211, 5, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(212, 212, 5, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(213, 213, 5, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(214, 214, 5, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(215, 215, 5, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(216, 216, 5, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(217, 217, 5, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(218, 218, 5, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(219, 219, 5, 0x0040, 0x10, 2, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 4, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(1, 1, 4, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(2, 2, 4, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(3, 3, 4, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(4, 4, 4, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(5, 5, 4, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(6, 6, 4, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(7, 7, 4, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(8, 8, 4, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(9, 9, 4, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(10, 10, 6, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(11, 11, 6, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(12, 12, 6, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(13, 13, 6, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(14, 14, 6, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(15, 15, 6, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(16, 16, 8, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(17, 17, 8, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(18, 18, 7, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(19, 19, 7, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(20, 20, 7, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(21, 21, 7, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(22, 22, 2, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(23, 23, 2, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(24, 24, 2, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(25, 25, 2, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(26, 26, 3, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(27, 27, 3, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(28, 28, 3, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(29, 29, 3, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(30, 30, 3, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(31, 31, 3, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(32, 32, 3, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(33, 33, 3, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(34, 34, 3, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(35, 35, 3, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(36, 36, 2, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(37, 37, 2, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(38, 38, 2, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(39, 39, 2, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(40, 40, 8, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(41, 41, 8, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(42, 42, 8, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(43, 43, 7, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(44, 44, 7, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(45, 45, 1, 0x0010, 0x10, 6, 2),
+ PIN_FIELD_BASE(46, 46, 1, 0x0010, 0x10, 6, 2),
+ PIN_FIELD_BASE(47, 47, 1, 0x0010, 0x10, 6, 2),
+ PIN_FIELD_BASE(48, 48, 1, 0x0010, 0x10, 8, 2),
+ PIN_FIELD_BASE(49, 49, 1, 0x0010, 0x10, 8, 2),
+ PIN_FIELD_BASE(50, 50, 1, 0x0010, 0x10, 8, 2),
+ PIN_FIELD_BASE(51, 51, 1, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(52, 52, 1, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(53, 53, 1, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(54, 54, 1, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(55, 55, 1, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(56, 56, 1, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(57, 57, 3, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(58, 58, 3, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(59, 59, 3, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(60, 60, 3, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(61, 61, 3, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(62, 62, 3, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(63, 63, 3, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(64, 64, 3, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(65, 65, 3, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(66, 66, 3, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(67, 67, 3, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(68, 68, 3, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(69, 69, 3, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(70, 70, 3, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(71, 71, 3, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(72, 72, 3, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(73, 73, 3, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(74, 74, 3, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(75, 75, 3, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(76, 76, 3, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(77, 77, 3, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(78, 78, 3, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(79, 79, 3, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(80, 80, 3, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(81, 81, 3, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(82, 82, 3, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(83, 83, 3, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(84, 84, 3, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(85, 85, 3, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(86, 86, 3, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(87, 87, 3, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(88, 88, 3, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(89, 89, 2, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(90, 90, 2, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(91, 91, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(92, 92, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(93, 93, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(94, 94, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(95, 95, 2, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(96, 96, 2, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(97, 97, 2, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(98, 98, 2, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(99, 99, 2, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(100, 100, 2, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(101, 101, 2, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(102, 102, 2, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(103, 103, 2, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(104, 104, 2, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(105, 105, 2, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(106, 106, 2, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(107, 107, 2, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(108, 108, 2, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(109, 109, 2, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(110, 110, 2, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(111, 111, 2, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(112, 112, 2, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(113, 113, 2, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(114, 114, 2, 0x0010, 0x10, 15, 3),
+ PIN_FIELD_BASE(115, 115, 2, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(116, 116, 2, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(117, 117, 2, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(118, 118, 4, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(119, 119, 4, 0x0020, 0x10, 21, 3),
+ PIN_FIELD_BASE(120, 120, 4, 0x0020, 0x10, 18, 3),
+ PIN_FIELD_BASE(121, 121, 4, 0x0030, 0x10, 6, 3),
+ PIN_FIELD_BASE(122, 122, 4, 0x0020, 0x10, 15, 3),
+ PIN_FIELD_BASE(123, 123, 4, 0x0030, 0x10, 3, 3),
+ PIN_FIELD_BASE(124, 124, 4, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(125, 125, 4, 0x0030, 0x10, 0, 3),
+ PIN_FIELD_BASE(126, 126, 4, 0x0010, 0x10, 18, 3),
+ PIN_FIELD_BASE(127, 127, 4, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(128, 128, 4, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(129, 129, 4, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(130, 130, 4, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(131, 131, 4, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(132, 132, 4, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(133, 133, 4, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(134, 134, 4, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(135, 135, 4, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(136, 136, 4, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(137, 137, 4, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(138, 138, 4, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(139, 139, 4, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(140, 140, 4, 0x0020, 0x10, 27, 3),
+ PIN_FIELD_BASE(141, 141, 4, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(142, 142, 4, 0x0020, 0x10, 24, 3),
+ PIN_FIELD_BASE(143, 143, 1, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(144, 144, 1, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(145, 145, 1, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(146, 146, 1, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(147, 147, 1, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(148, 148, 1, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(149, 149, 1, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(150, 150, 1, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(151, 151, 1, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(152, 152, 7, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(153, 153, 7, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(154, 154, 7, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(155, 155, 7, 0x0010, 0x10, 21, 3),
+ PIN_FIELD_BASE(156, 156, 7, 0x0020, 0x10, 3, 3),
+ PIN_FIELD_BASE(157, 157, 7, 0x0020, 0x10, 6, 3),
+ PIN_FIELD_BASE(158, 158, 7, 0x0020, 0x10, 9, 3),
+ PIN_FIELD_BASE(159, 159, 7, 0x0020, 0x10, 12, 3),
+ PIN_FIELD_BASE(160, 160, 7, 0x0010, 0x10, 27, 3),
+ PIN_FIELD_BASE(161, 161, 7, 0x0020, 0x10, 0, 3),
+ PIN_FIELD_BASE(162, 162, 7, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(163, 163, 7, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(164, 164, 7, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(165, 165, 7, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(166, 166, 7, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(167, 167, 7, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(168, 168, 7, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(169, 169, 7, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(170, 170, 7, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(171, 171, 7, 0x0010, 0x10, 24, 3),
+ PIN_FIELD_BASE(172, 172, 7, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(173, 173, 7, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(174, 174, 7, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(175, 175, 7, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(176, 176, 7, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(177, 177, 7, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(178, 178, 7, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(179, 179, 7, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(180, 180, 7, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(181, 181, 7, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(182, 182, 7, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(183, 183, 9, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(184, 184, 9, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(185, 185, 9, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(186, 186, 9, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(187, 187, 9, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(188, 188, 9, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(189, 189, 9, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(190, 190, 9, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(191, 191, 9, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(192, 192, 9, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(193, 193, 9, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(194, 194, 9, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(195, 195, 5, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(196, 196, 5, 0x0000, 0x10, 18, 3),
+ PIN_FIELD_BASE(197, 197, 5, 0x0000, 0x10, 24, 3),
+ PIN_FIELD_BASE(198, 198, 5, 0x0000, 0x10, 21, 3),
+ PIN_FIELD_BASE(199, 199, 5, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(200, 200, 8, 0x0000, 0x10, 9, 3),
+ PIN_FIELD_BASE(201, 201, 8, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(202, 202, 5, 0x0010, 0x10, 6, 3),
+ PIN_FIELD_BASE(203, 203, 5, 0x0010, 0x10, 9, 3),
+ PIN_FIELD_BASE(204, 204, 8, 0x0000, 0x10, 6, 3),
+ PIN_FIELD_BASE(205, 205, 8, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(206, 206, 5, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(207, 207, 5, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(208, 208, 5, 0x0010, 0x10, 12, 3),
+ PIN_FIELD_BASE(209, 209, 5, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(210, 210, 5, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(211, 211, 5, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(212, 212, 5, 0x0010, 0x10, 0, 3),
+ PIN_FIELD_BASE(213, 213, 5, 0x0010, 0x10, 3, 3),
+ PIN_FIELD_BASE(214, 214, 5, 0x0000, 0x10, 0, 3),
+ PIN_FIELD_BASE(215, 215, 5, 0x0000, 0x10, 27, 3),
+ PIN_FIELD_BASE(216, 216, 5, 0x0000, 0x10, 12, 3),
+ PIN_FIELD_BASE(217, 217, 5, 0x0000, 0x10, 15, 3),
+ PIN_FIELD_BASE(218, 218, 5, 0x0000, 0x10, 3, 3),
+ PIN_FIELD_BASE(219, 219, 5, 0x0000, 0x10, 6, 3),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_pupd_range[] = {
+ PIN_FIELD_BASE(10, 10, 6, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(11, 11, 6, 0x0020, 0x10, 1, 1),
+ PIN_FIELD_BASE(12, 12, 6, 0x0020, 0x10, 2, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(14, 14, 6, 0x0020, 0x10, 4, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0020, 0x10, 5, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x0060, 0x10, 9, 1),
+ PIN_FIELD_BASE(46, 46, 1, 0x0060, 0x10, 11, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x0060, 0x10, 10, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x0060, 0x10, 7, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x0060, 0x10, 8, 1),
+ PIN_FIELD_BASE(50, 50, 1, 0x0060, 0x10, 6, 1),
+ PIN_FIELD_BASE(51, 51, 1, 0x0060, 0x10, 0, 1),
+ PIN_FIELD_BASE(52, 52, 1, 0x0060, 0x10, 1, 1),
+ PIN_FIELD_BASE(53, 53, 1, 0x0060, 0x10, 5, 1),
+ PIN_FIELD_BASE(54, 54, 1, 0x0060, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x0060, 0x10, 4, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x0060, 0x10, 3, 1),
+ PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 31, 1),
+ PIN_FIELD_BASE(152, 152, 7, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(153, 153, 7, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(154, 154, 7, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(155, 155, 7, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 31, 1),
+ PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 31, 1),
+ PIN_FIELD_BASE(183, 183, 9, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(184, 184, 9, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(185, 185, 9, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(186, 186, 9, 0x0030, 0x10, 6, 1),
+ PIN_FIELD_BASE(187, 187, 9, 0x0030, 0x10, 8, 1),
+ PIN_FIELD_BASE(188, 188, 9, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(189, 189, 9, 0x0030, 0x10, 7, 1),
+ PIN_FIELD_BASE(190, 190, 9, 0x0030, 0x10, 9, 1),
+ PIN_FIELD_BASE(191, 191, 9, 0x0030, 0x10, 10, 1),
+ PIN_FIELD_BASE(192, 192, 9, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 9, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(194, 194, 9, 0x0030, 0x10, 11, 1),
+ PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 31, 1),
+ PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 31, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_r0_range[] = {
+ PIN_FIELD_BASE(10, 10, 6, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(11, 11, 6, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(12, 12, 6, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(14, 14, 6, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x0080, 0x10, 9, 1),
+ PIN_FIELD_BASE(46, 46, 1, 0x0080, 0x10, 11, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x0080, 0x10, 10, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x0080, 0x10, 7, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x0080, 0x10, 8, 1),
+ PIN_FIELD_BASE(50, 50, 1, 0x0080, 0x10, 6, 1),
+ PIN_FIELD_BASE(51, 51, 1, 0x0080, 0x10, 0, 1),
+ PIN_FIELD_BASE(52, 52, 1, 0x0080, 0x10, 1, 1),
+ PIN_FIELD_BASE(53, 53, 1, 0x0080, 0x10, 5, 1),
+ PIN_FIELD_BASE(54, 54, 1, 0x0080, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x0080, 0x10, 4, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x0080, 0x10, 3, 1),
+ PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 0, 1),
+ PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 12, 1),
+ PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 10, 1),
+ PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 22, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 8, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 20, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 6, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 18, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 4, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 16, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 2, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 14, 1),
+ PIN_FIELD_BASE(152, 152, 7, 0x00c0, 0x10, 3, 1),
+ PIN_FIELD_BASE(153, 153, 7, 0x00c0, 0x10, 2, 1),
+ PIN_FIELD_BASE(154, 154, 7, 0x00c0, 0x10, 0, 1),
+ PIN_FIELD_BASE(155, 155, 7, 0x00c0, 0x10, 1, 1),
+ PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 0, 1),
+ PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 2, 1),
+ PIN_FIELD_BASE(183, 183, 9, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(184, 184, 9, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(185, 185, 9, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(186, 186, 9, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(187, 187, 9, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(188, 188, 9, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(189, 189, 9, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(190, 190, 9, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(191, 191, 9, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(192, 192, 9, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 9, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(194, 194, 9, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 6, 1),
+ PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 2, 1),
+ PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 0, 1),
+ PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 4, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_r1_range[] = {
+ PIN_FIELD_BASE(10, 10, 6, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(11, 11, 6, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(12, 12, 6, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(13, 13, 6, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(14, 14, 6, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(15, 15, 6, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(45, 45, 1, 0x0090, 0x10, 9, 1),
+ PIN_FIELD_BASE(46, 46, 1, 0x0090, 0x10, 11, 1),
+ PIN_FIELD_BASE(47, 47, 1, 0x0090, 0x10, 10, 1),
+ PIN_FIELD_BASE(48, 48, 1, 0x0090, 0x10, 7, 1),
+ PIN_FIELD_BASE(49, 49, 1, 0x0090, 0x10, 8, 1),
+ PIN_FIELD_BASE(50, 50, 1, 0x0090, 0x10, 6, 1),
+ PIN_FIELD_BASE(51, 51, 1, 0x0090, 0x10, 0, 1),
+ PIN_FIELD_BASE(52, 52, 1, 0x0090, 0x10, 1, 1),
+ PIN_FIELD_BASE(53, 53, 1, 0x0090, 0x10, 5, 1),
+ PIN_FIELD_BASE(54, 54, 1, 0x0090, 0x10, 2, 1),
+ PIN_FIELD_BASE(55, 55, 1, 0x0090, 0x10, 4, 1),
+ PIN_FIELD_BASE(56, 56, 1, 0x0090, 0x10, 3, 1),
+ PIN_FIELD_BASE(118, 118, 4, 0x00e0, 0x10, 1, 1),
+ PIN_FIELD_BASE(119, 119, 4, 0x00e0, 0x10, 13, 1),
+ PIN_FIELD_BASE(120, 120, 4, 0x00e0, 0x10, 11, 1),
+ PIN_FIELD_BASE(121, 121, 4, 0x00e0, 0x10, 23, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x00e0, 0x10, 9, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x00e0, 0x10, 21, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x00e0, 0x10, 7, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x00e0, 0x10, 19, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x00e0, 0x10, 5, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x00e0, 0x10, 17, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x00e0, 0x10, 3, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x00e0, 0x10, 15, 1),
+ PIN_FIELD_BASE(152, 152, 7, 0x00d0, 0x10, 3, 1),
+ PIN_FIELD_BASE(153, 153, 7, 0x00d0, 0x10, 2, 1),
+ PIN_FIELD_BASE(154, 154, 7, 0x00d0, 0x10, 0, 1),
+ PIN_FIELD_BASE(155, 155, 7, 0x00d0, 0x10, 1, 1),
+ PIN_FIELD_BASE(160, 160, 7, 0x00f0, 0x10, 1, 1),
+ PIN_FIELD_BASE(161, 161, 7, 0x00f0, 0x10, 3, 1),
+ PIN_FIELD_BASE(183, 183, 9, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(184, 184, 9, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(185, 185, 9, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(186, 186, 9, 0x0050, 0x10, 6, 1),
+ PIN_FIELD_BASE(187, 187, 9, 0x0050, 0x10, 8, 1),
+ PIN_FIELD_BASE(188, 188, 9, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(189, 189, 9, 0x0050, 0x10, 7, 1),
+ PIN_FIELD_BASE(190, 190, 9, 0x0050, 0x10, 9, 1),
+ PIN_FIELD_BASE(191, 191, 9, 0x0050, 0x10, 10, 1),
+ PIN_FIELD_BASE(192, 192, 9, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(193, 193, 9, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(194, 194, 9, 0x0050, 0x10, 11, 1),
+ PIN_FIELD_BASE(200, 200, 8, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(201, 201, 8, 0x0070, 0x10, 7, 1),
+ PIN_FIELD_BASE(202, 202, 5, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(203, 203, 5, 0x0070, 0x10, 3, 1),
+ PIN_FIELD_BASE(204, 204, 8, 0x0070, 0x10, 1, 1),
+ PIN_FIELD_BASE(205, 205, 8, 0x0070, 0x10, 5, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_e1e0en_range[] = {
+ PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 0, 1),
+ PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 18, 1),
+ PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 15, 1),
+ PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 3, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 12, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 0, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 9, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 27, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 6, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 24, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 3, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 21, 1),
+ PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 0, 1),
+ PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 3, 1),
+ PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 3, 1),
+ PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 9, 1),
+ PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 0, 1),
+ PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 3, 1),
+ PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 0, 1),
+ PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 6, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_e0_range[] = {
+ PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 1, 1),
+ PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 19, 1),
+ PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 16, 1),
+ PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 4, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 13, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 1, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 10, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 28, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 7, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 25, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 4, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 22, 1),
+ PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 1, 1),
+ PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 4, 1),
+ PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 4, 1),
+ PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 10, 1),
+ PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 1, 1),
+ PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 4, 1),
+ PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 1, 1),
+ PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 7, 1),
+};
+
+static const struct mtk_pin_field_calc mt8192_pin_e1_range[] = {
+ PIN_FIELD_BASE(118, 118, 4, 0x0040, 0x10, 2, 1),
+ PIN_FIELD_BASE(119, 119, 4, 0x0040, 0x10, 20, 1),
+ PIN_FIELD_BASE(120, 120, 4, 0x0040, 0x10, 17, 1),
+ PIN_FIELD_BASE(121, 121, 4, 0x0050, 0x10, 5, 1),
+ PIN_FIELD_BASE(122, 122, 4, 0x0040, 0x10, 14, 1),
+ PIN_FIELD_BASE(123, 123, 4, 0x0050, 0x10, 2, 1),
+ PIN_FIELD_BASE(124, 124, 4, 0x0040, 0x10, 11, 1),
+ PIN_FIELD_BASE(125, 125, 4, 0x0040, 0x10, 29, 1),
+ PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 8, 1),
+ PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 26, 1),
+ PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 5, 1),
+ PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 23, 1),
+ PIN_FIELD_BASE(160, 160, 7, 0x0030, 0x10, 2, 1),
+ PIN_FIELD_BASE(161, 161, 7, 0x0030, 0x10, 5, 1),
+ PIN_FIELD_BASE(200, 200, 8, 0x0010, 0x10, 5, 1),
+ PIN_FIELD_BASE(201, 201, 8, 0x0010, 0x10, 11, 1),
+ PIN_FIELD_BASE(202, 202, 5, 0x0020, 0x10, 2, 1),
+ PIN_FIELD_BASE(203, 203, 5, 0x0020, 0x10, 5, 1),
+ PIN_FIELD_BASE(204, 204, 8, 0x0010, 0x10, 2, 1),
+ PIN_FIELD_BASE(205, 205, 8, 0x0010, 0x10, 8, 1),
+};
+
+
+static const char * const mt8192_pinctrl_register_base_names[] = {
+ "iocfg0", "iocfg_rm", "iocfg_bm", "iocfg_bl", "iocfg_br",
+ "iocfg_lm", "iocfg_lb", "iocfg_rt", "iocfg_lt", "iocfg_tl",
+};
+
+static const struct mtk_eint_hw mt8192_eint_hw = {
+ .port_mask = 7,
+ .ports = 7,
+ .ap_num = 224,
+ .db_cnt = 32,
+};
+
+static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8192_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8192_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8192_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8192_pin_do_range),
+ [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8192_pin_dir_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8192_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8192_pin_ies_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8192_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt8192_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt8192_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8192_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8192_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8192_pin_r1_range),
+ [PINCTRL_PIN_REG_DRV_EN] = MTK_RANGE(mt8192_pin_e1e0en_range),
+ [PINCTRL_PIN_REG_DRV_E0] = MTK_RANGE(mt8192_pin_e0_range),
+ [PINCTRL_PIN_REG_DRV_E1] = MTK_RANGE(mt8192_pin_e1_range),
+};
+
+static const struct mtk_pin_soc mt8192_data = {
+ .reg_cal = mt8192_reg_cals,
+ .pins = mtk_pins_mt8192,
+ .npins = ARRAY_SIZE(mtk_pins_mt8192),
+ .ngrps = ARRAY_SIZE(mtk_pins_mt8192),
+ .base_names = mt8192_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt8192_pinctrl_register_base_names),
+ .eint_hw = &mt8192_eint_hw,
+ .nfuncs = 8,
+ .gpio_m = 0,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_raw,
+ .drive_get = mtk_pinconf_drive_get_raw,
+ .adv_pull_get = mtk_pinconf_adv_pull_get,
+ .adv_pull_set = mtk_pinconf_adv_pull_set,
+ .adv_drive_get = mtk_pinconf_adv_drive_get,
+ .adv_drive_set = mtk_pinconf_adv_drive_set,
+};
+
+static const struct of_device_id mt8192_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt8192-pinctrl", },
+ { }
+};
+
+static int mt8192_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_paris_pinctrl_probe(pdev, &mt8192_data);
+}
+
+static struct platform_driver mt8192_pinctrl_driver = {
+ .driver = {
+ .name = "mt8192-pinctrl",
+ .of_match_table = mt8192_pinctrl_of_match,
+ .pm = &mtk_paris_pinctrl_pm_ops,
+ },
+ .probe = mt8192_pinctrl_probe,
+};
+
+static int __init mt8192_pinctrl_init(void)
+{
+ return platform_driver_register(&mt8192_pinctrl_driver);
+}
+arch_initcall(mt8192_pinctrl_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek MT8192 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 35bbe5935708..7e950f5d62d0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -358,7 +358,7 @@ static const struct mtk_eint_xt mtk_eint_xt = {
int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
+ int ret;
if (!IS_ENABLED(CONFIG_EINT_MTK))
return 0;
@@ -370,22 +370,22 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
if (!hw->eint)
return -ENOMEM;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "eint");
- if (!res) {
- dev_err(&pdev->dev, "Unable to get eint resource\n");
- return -ENODEV;
+ hw->eint->base = devm_platform_ioremap_resource_byname(pdev, "eint");
+ if (IS_ERR(hw->eint->base)) {
+ ret = PTR_ERR(hw->eint->base);
+ goto err_free_eint;
}
- hw->eint->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hw->eint->base))
- return PTR_ERR(hw->eint->base);
-
hw->eint->irq = irq_of_parse_and_map(np, 0);
- if (!hw->eint->irq)
- return -EINVAL;
+ if (!hw->eint->irq) {
+ ret = -EINVAL;
+ goto err_free_eint;
+ }
- if (!hw->soc->eint_hw)
- return -ENODEV;
+ if (!hw->soc->eint_hw) {
+ ret = -ENODEV;
+ goto err_free_eint;
+ }
hw->eint->dev = &pdev->dev;
hw->eint->hw = hw->soc->eint_hw;
@@ -393,6 +393,11 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
hw->eint->gpio_xlate = &mtk_eint_xt;
return mtk_eint_do_init(hw->eint);
+
+err_free_eint:
+ devm_kfree(hw->dev, hw->eint);
+ hw->eint = NULL;
+ return ret;
}
EXPORT_SYMBOL_GPL(mtk_build_eint);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8167.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8167.h
new file mode 100644
index 000000000000..225c41fc9b75
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8167.h
@@ -0,0 +1,1248 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ */
+#ifndef __PINCTRL_MTK_MT8167_H
+#define __PINCTRL_MTK_MT8167_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mtk-common.h"
+
+static const struct mtk_desc_pin mtk_pins_mt8167[] = {
+ MTK_PIN(
+ PINCTRL_PIN(0, "EINT0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 0),
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "PWM_B"),
+ MTK_FUNCTION(2, "DPI_CK"),
+ MTK_FUNCTION(3, "I2S2_BCK"),
+ MTK_FUNCTION(4, "EXT_TXD0"),
+ MTK_FUNCTION(6, "SQICS"),
+ MTK_FUNCTION(7, "DBG_MON_A[6]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(1, "EINT1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 1),
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "PWM_C"),
+ MTK_FUNCTION(2, "DPI_D12"),
+ MTK_FUNCTION(3, "I2S2_DI"),
+ MTK_FUNCTION(4, "EXT_TXD1"),
+ MTK_FUNCTION(5, "CONN_MCU_TDO"),
+ MTK_FUNCTION(6, "SQISO"),
+ MTK_FUNCTION(7, "DBG_MON_A[7]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(2, "EINT2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 2),
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "CLKM0"),
+ MTK_FUNCTION(2, "DPI_D13"),
+ MTK_FUNCTION(3, "I2S2_LRCK"),
+ MTK_FUNCTION(4, "EXT_TXD2"),
+ MTK_FUNCTION(5, "CONN_MCU_DBGACK_N"),
+ MTK_FUNCTION(6, "SQISI"),
+ MTK_FUNCTION(7, "DBG_MON_A[8]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(3, "EINT3"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 3),
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "CLKM1"),
+ MTK_FUNCTION(2, "DPI_D14"),
+ MTK_FUNCTION(3, "SPI_MI"),
+ MTK_FUNCTION(4, "EXT_TXD3"),
+ MTK_FUNCTION(5, "CONN_MCU_DBGI_N"),
+ MTK_FUNCTION(6, "SQIWP"),
+ MTK_FUNCTION(7, "DBG_MON_A[9]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(4, "EINT4"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 4),
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "CLKM2"),
+ MTK_FUNCTION(2, "DPI_D15"),
+ MTK_FUNCTION(3, "SPI_MO"),
+ MTK_FUNCTION(4, "EXT_TXC"),
+ MTK_FUNCTION(5, "CONN_MCU_TCK"),
+ MTK_FUNCTION(6, "CONN_MCU_AICE_JCKC"),
+ MTK_FUNCTION(7, "DBG_MON_A[10]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(5, "EINT5"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 5),
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "UCTS2"),
+ MTK_FUNCTION(2, "DPI_D16"),
+ MTK_FUNCTION(3, "SPI_CSB"),
+ MTK_FUNCTION(4, "EXT_RXER"),
+ MTK_FUNCTION(5, "CONN_MCU_TDI"),
+ MTK_FUNCTION(6, "CONN_TEST_CK"),
+ MTK_FUNCTION(7, "DBG_MON_A[11]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(6, "EINT6"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 6),
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "URTS2"),
+ MTK_FUNCTION(2, "DPI_D17"),
+ MTK_FUNCTION(3, "SPI_CLK"),
+ MTK_FUNCTION(4, "EXT_RXC"),
+ MTK_FUNCTION(5, "CONN_MCU_TRST_B"),
+ MTK_FUNCTION(6, "MM_TEST_CK"),
+ MTK_FUNCTION(7, "DBG_MON_A[12]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(7, "EINT7"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 7),
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "SQIRST"),
+ MTK_FUNCTION(2, "DPI_D6"),
+ MTK_FUNCTION(3, "SDA1_0"),
+ MTK_FUNCTION(4, "EXT_RXDV"),
+ MTK_FUNCTION(5, "CONN_MCU_TMS"),
+ MTK_FUNCTION(6, "CONN_MCU_AICE_JMSC"),
+ MTK_FUNCTION(7, "DBG_MON_A[13]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(8, "EINT8"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 8),
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "SQICK"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(3, "SCL1_0"),
+ MTK_FUNCTION(4, "EXT_RXD0"),
+ MTK_FUNCTION(5, "ANT_SEL0"),
+ MTK_FUNCTION(6, "DPI_D7"),
+ MTK_FUNCTION(7, "DBG_MON_A[14]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(9, "EINT9"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 9),
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "CLKM4"),
+ MTK_FUNCTION(2, "SDA2_0"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "EXT_RXD1"),
+ MTK_FUNCTION(5, "ANT_SEL1"),
+ MTK_FUNCTION(6, "DPI_D8"),
+ MTK_FUNCTION(7, "DBG_MON_A[15]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(10, "EINT10"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 10),
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "CLKM5"),
+ MTK_FUNCTION(2, "SCL2_0"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "EXT_RXD2"),
+ MTK_FUNCTION(5, "ANT_SEL2"),
+ MTK_FUNCTION(6, "DPI_D9"),
+ MTK_FUNCTION(7, "DBG_MON_A[16]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(11, "EINT11"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 11),
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "CLKM4"),
+ MTK_FUNCTION(2, "PWM_C"),
+ MTK_FUNCTION(3, "CONN_TEST_CK"),
+ MTK_FUNCTION(4, "ANT_SEL3"),
+ MTK_FUNCTION(5, "DPI_D10"),
+ MTK_FUNCTION(6, "EXT_RXD3"),
+ MTK_FUNCTION(7, "DBG_MON_A[17]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(12, "EINT12"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 12),
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "CLKM5"),
+ MTK_FUNCTION(2, "PWM_A"),
+ MTK_FUNCTION(3, "SPDIF_OUT"),
+ MTK_FUNCTION(4, "ANT_SEL4"),
+ MTK_FUNCTION(5, "DPI_D11"),
+ MTK_FUNCTION(6, "EXT_TXEN"),
+ MTK_FUNCTION(7, "DBG_MON_A[18]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(13, "EINT13"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 13),
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(3, "TSF_IN"),
+ MTK_FUNCTION(4, "ANT_SEL5"),
+ MTK_FUNCTION(5, "DPI_D0"),
+ MTK_FUNCTION(6, "SPDIF_IN"),
+ MTK_FUNCTION(7, "DBG_MON_A[19]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(14, "EINT14"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 14),
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(2, "I2S_8CH_DO1"),
+ MTK_FUNCTION(3, "TDM_RX_MCK"),
+ MTK_FUNCTION(4, "ANT_SEL1"),
+ MTK_FUNCTION(5, "CONN_MCU_DBGACK_N"),
+ MTK_FUNCTION(6, "NCLE"),
+ MTK_FUNCTION(7, "DBG_MON_B[8]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(15, "EINT15"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 15),
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(2, "I2S_8CH_LRCK"),
+ MTK_FUNCTION(3, "TDM_RX_BCK"),
+ MTK_FUNCTION(4, "ANT_SEL2"),
+ MTK_FUNCTION(5, "CONN_MCU_DBGI_N"),
+ MTK_FUNCTION(6, "NCEB1"),
+ MTK_FUNCTION(7, "DBG_MON_B[9]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(16, "EINT16"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 16),
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(2, "I2S_8CH_BCK"),
+ MTK_FUNCTION(3, "TDM_RX_LRCK"),
+ MTK_FUNCTION(4, "ANT_SEL3"),
+ MTK_FUNCTION(5, "CONN_MCU_TRST_B"),
+ MTK_FUNCTION(6, "NCEB0"),
+ MTK_FUNCTION(7, "DBG_MON_B[10]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(17, "EINT17"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 17),
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(2, "I2S_8CH_MCK"),
+ MTK_FUNCTION(3, "TDM_RX_DI"),
+ MTK_FUNCTION(4, "IDDIG"),
+ MTK_FUNCTION(5, "ANT_SEL4"),
+ MTK_FUNCTION(6, "NREB"),
+ MTK_FUNCTION(7, "DBG_MON_B[11]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(18, "EINT18"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 18),
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(2, "USB_DRVVBUS"),
+ MTK_FUNCTION(3, "I2S3_LRCK"),
+ MTK_FUNCTION(4, "CLKM1"),
+ MTK_FUNCTION(5, "ANT_SEL3"),
+ MTK_FUNCTION(6, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[20]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(19, "EINT19"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 19),
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "UCTS1"),
+ MTK_FUNCTION(2, "IDDIG"),
+ MTK_FUNCTION(3, "I2S3_BCK"),
+ MTK_FUNCTION(4, "CLKM2"),
+ MTK_FUNCTION(5, "ANT_SEL4"),
+ MTK_FUNCTION(6, "I2S2_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A[21]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(20, "EINT20"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 20),
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "URTS1"),
+ MTK_FUNCTION(3, "I2S3_DO"),
+ MTK_FUNCTION(4, "CLKM3"),
+ MTK_FUNCTION(5, "ANT_SEL5"),
+ MTK_FUNCTION(6, "I2S2_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[22]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(21, "EINT21"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 21),
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "NRNB"),
+ MTK_FUNCTION(2, "ANT_SEL0"),
+ MTK_FUNCTION(3, "I2S_8CH_DO4"),
+ MTK_FUNCTION(7, "DBG_MON_B[31]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(22, "EINT22"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 22),
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(2, "I2S_8CH_DO2"),
+ MTK_FUNCTION(3, "TSF_IN"),
+ MTK_FUNCTION(4, "USB_DRVVBUS"),
+ MTK_FUNCTION(5, "SPDIF_OUT"),
+ MTK_FUNCTION(6, "NRE_C"),
+ MTK_FUNCTION(7, "DBG_MON_B[12]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(23, "EINT23"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 23),
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(2, "I2S_8CH_DO3"),
+ MTK_FUNCTION(3, "CLKM0"),
+ MTK_FUNCTION(4, "IR"),
+ MTK_FUNCTION(5, "SPDIF_IN"),
+ MTK_FUNCTION(6, "NDQS_C"),
+ MTK_FUNCTION(7, "DBG_MON_B[13]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(24, "EINT24"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 24),
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "DPI_D20"),
+ MTK_FUNCTION(2, "DPI_DE"),
+ MTK_FUNCTION(3, "ANT_SEL1"),
+ MTK_FUNCTION(4, "UCTS2"),
+ MTK_FUNCTION(5, "PWM_A"),
+ MTK_FUNCTION(6, "I2S0_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[0]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(25, "EINT25"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 25),
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "DPI_D19"),
+ MTK_FUNCTION(2, "DPI_VSYNC"),
+ MTK_FUNCTION(3, "ANT_SEL0"),
+ MTK_FUNCTION(4, "URTS2"),
+ MTK_FUNCTION(5, "PWM_B"),
+ MTK_FUNCTION(6, "I2S_8CH_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[1]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(26, "PWRAP_SPI0_MI"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 26),
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(27, "PWRAP_SPI0_MO"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 27),
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(28, "PWRAP_INT"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 28),
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "I2S0_MCK"),
+ MTK_FUNCTION(4, "I2S_8CH_MCK"),
+ MTK_FUNCTION(5, "I2S2_MCK"),
+ MTK_FUNCTION(6, "I2S3_MCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(29, "PWRAP_SPI0_CK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 29),
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(30, "PWRAP_SPI0_CSN"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 30),
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(31, "RTC32K_CK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 31),
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(32, "WATCHDOG"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 32),
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(33, "SRCLKENA"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 33),
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(34, "URXD2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 34),
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "URXD2"),
+ MTK_FUNCTION(2, "DPI_D5"),
+ MTK_FUNCTION(3, "UTXD2"),
+ MTK_FUNCTION(4, "DBG_SCL"),
+ MTK_FUNCTION(6, "I2S2_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[0]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(35, "UTXD2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 35),
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "UTXD2"),
+ MTK_FUNCTION(2, "DPI_HSYNC"),
+ MTK_FUNCTION(3, "URXD2"),
+ MTK_FUNCTION(4, "DBG_SDA"),
+ MTK_FUNCTION(5, "DPI_D18"),
+ MTK_FUNCTION(6, "I2S3_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[1]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(36, "MRG_CLK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 36),
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "MRG_CLK"),
+ MTK_FUNCTION(2, "DPI_D4"),
+ MTK_FUNCTION(3, "I2S0_BCK"),
+ MTK_FUNCTION(4, "I2S3_BCK"),
+ MTK_FUNCTION(5, "PCM0_CLK"),
+ MTK_FUNCTION(6, "IR"),
+ MTK_FUNCTION(7, "DBG_MON_A[2]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(37, "MRG_SYNC"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 37),
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "MRG_SYNC"),
+ MTK_FUNCTION(2, "DPI_D3"),
+ MTK_FUNCTION(3, "I2S0_LRCK"),
+ MTK_FUNCTION(4, "I2S3_LRCK"),
+ MTK_FUNCTION(5, "PCM0_SYNC"),
+ MTK_FUNCTION(6, "EXT_COL"),
+ MTK_FUNCTION(7, "DBG_MON_A[3]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(38, "MRG_DI"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 38),
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "MRG_DI"),
+ MTK_FUNCTION(2, "DPI_D1"),
+ MTK_FUNCTION(3, "I2S0_DI"),
+ MTK_FUNCTION(4, "I2S3_DO"),
+ MTK_FUNCTION(5, "PCM0_DI"),
+ MTK_FUNCTION(6, "EXT_MDIO"),
+ MTK_FUNCTION(7, "DBG_MON_A[4]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(39, "MRG_DO"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 39),
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "MRG_DO"),
+ MTK_FUNCTION(2, "DPI_D2"),
+ MTK_FUNCTION(3, "I2S0_MCK"),
+ MTK_FUNCTION(4, "I2S3_MCK"),
+ MTK_FUNCTION(5, "PCM0_DO"),
+ MTK_FUNCTION(6, "EXT_MDC"),
+ MTK_FUNCTION(7, "DBG_MON_A[5]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(40, "KPROW0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 40),
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "KPROW0"),
+ MTK_FUNCTION(4, "IMG_TEST_CK"),
+ MTK_FUNCTION(7, "DBG_MON_B[4]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(41, "KPROW1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 41),
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "IDDIG"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "MFG_TEST_CK"),
+ MTK_FUNCTION(7, "DBG_MON_B[5]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(42, "KPCOL0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 42),
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "KPCOL0"),
+ MTK_FUNCTION(7, "DBG_MON_B[6]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(43, "KPCOL1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 43),
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(2, "USB_DRVVBUS"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "TSF_IN"),
+ MTK_FUNCTION(5, "DFD_NTRST_XI"),
+ MTK_FUNCTION(6, "UDI_NTRST_XI"),
+ MTK_FUNCTION(7, "DBG_MON_B[7]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(44, "JTMS"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 44),
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "JTMS"),
+ MTK_FUNCTION(2, "CONN_MCU_TMS"),
+ MTK_FUNCTION(3, "CONN_MCU_AICE_JMSC"),
+ MTK_FUNCTION(4, "GPUDFD_TMS_XI"),
+ MTK_FUNCTION(5, "DFD_TMS_XI"),
+ MTK_FUNCTION(6, "UDI_TMS_XI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(45, "JTCK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 45),
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "JTCK"),
+ MTK_FUNCTION(2, "CONN_MCU_TCK"),
+ MTK_FUNCTION(3, "CONN_MCU_AICE_JCKC"),
+ MTK_FUNCTION(4, "GPUDFD_TCK_XI"),
+ MTK_FUNCTION(5, "DFD_TCK_XI"),
+ MTK_FUNCTION(6, "UDI_TCK_XI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(46, "JTDI"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 46),
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "JTDI"),
+ MTK_FUNCTION(2, "CONN_MCU_TDI"),
+ MTK_FUNCTION(4, "GPUDFD_TDI_XI"),
+ MTK_FUNCTION(5, "DFD_TDI_XI"),
+ MTK_FUNCTION(6, "UDI_TDI_XI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(47, "JTDO"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 47),
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "JTDO"),
+ MTK_FUNCTION(2, "CONN_MCU_TDO"),
+ MTK_FUNCTION(4, "GPUDFD_TDO"),
+ MTK_FUNCTION(5, "DFD_TDO"),
+ MTK_FUNCTION(6, "UDI_TDO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(48, "SPI_CS"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 48),
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "SPI_CSB"),
+ MTK_FUNCTION(3, "I2S0_DI"),
+ MTK_FUNCTION(4, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[23]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(49, "SPI_CK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 49),
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "SPI_CLK"),
+ MTK_FUNCTION(3, "I2S0_LRCK"),
+ MTK_FUNCTION(4, "I2S2_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A[24]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(50, "SPI_MI"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 50),
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "SPI_MI"),
+ MTK_FUNCTION(2, "SPI_MO"),
+ MTK_FUNCTION(3, "I2S0_BCK"),
+ MTK_FUNCTION(4, "I2S2_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[25]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(51, "SPI_MO"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 51),
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "SPI_MO"),
+ MTK_FUNCTION(2, "SPI_MI"),
+ MTK_FUNCTION(3, "I2S0_MCK"),
+ MTK_FUNCTION(4, "I2S2_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[26]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(52, "SDA1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 52),
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "SDA1_0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(53, "SCL1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 53),
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "SCL1_0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(54, "DISP_PWM"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 54),
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "DISP_PWM"),
+ MTK_FUNCTION(2, "PWM_B"),
+ MTK_FUNCTION(7, "DBG_MON_B[2]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(55, "I2S_DATA_IN"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 55),
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "I2S0_DI"),
+ MTK_FUNCTION(2, "UCTS0"),
+ MTK_FUNCTION(3, "I2S3_DO"),
+ MTK_FUNCTION(4, "I2S_8CH_DO1"),
+ MTK_FUNCTION(5, "PWM_A"),
+ MTK_FUNCTION(6, "I2S2_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[28]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(56, "I2S_LRCK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 56),
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "I2S0_LRCK"),
+ MTK_FUNCTION(3, "I2S3_LRCK"),
+ MTK_FUNCTION(4, "I2S_8CH_LRCK"),
+ MTK_FUNCTION(5, "PWM_B"),
+ MTK_FUNCTION(6, "I2S2_DI"),
+ MTK_FUNCTION(7, "DBG_MON_A[29]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(57, "I2S_BCK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 57),
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "I2S0_BCK"),
+ MTK_FUNCTION(2, "URTS0"),
+ MTK_FUNCTION(3, "I2S3_BCK"),
+ MTK_FUNCTION(4, "I2S_8CH_BCK"),
+ MTK_FUNCTION(5, "PWM_C"),
+ MTK_FUNCTION(6, "I2S2_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_A[30]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(58, "SDA0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 58),
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "SDA0_0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(59, "SCL0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 59),
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "SCL0_0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(60, "SDA2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 60),
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "SDA2_0"),
+ MTK_FUNCTION(2, "PWM_B")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(61, "SCL2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 61),
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "SCL2_0"),
+ MTK_FUNCTION(2, "PWM_C")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(62, "URXD0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 62),
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "UTXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(63, "UTXD0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 63),
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "URXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(64, "URXD1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 64),
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "UTXD1"),
+ MTK_FUNCTION(7, "DBG_MON_A[27]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(65, "UTXD1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 65),
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "URXD1"),
+ MTK_FUNCTION(7, "DBG_MON_A[31]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(66, "LCM_RST"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 66),
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "LCM_RST"),
+ MTK_FUNCTION(3, "I2S0_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[3]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(67, "DSI_TE"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 67),
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "DSI_TE"),
+ MTK_FUNCTION(3, "I2S_8CH_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[14]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(68, "MSDC2_CMD"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 68),
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "MSDC2_CMD"),
+ MTK_FUNCTION(2, "I2S_8CH_DO4"),
+ MTK_FUNCTION(3, "SDA1_0"),
+ MTK_FUNCTION(5, "USB_SDA"),
+ MTK_FUNCTION(6, "I2S3_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[15]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(69, "MSDC2_CLK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 69),
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "MSDC2_CLK"),
+ MTK_FUNCTION(2, "I2S_8CH_DO3"),
+ MTK_FUNCTION(3, "SCL1_0"),
+ MTK_FUNCTION(4, "DPI_D21"),
+ MTK_FUNCTION(5, "USB_SCL"),
+ MTK_FUNCTION(6, "I2S3_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[16]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(70, "MSDC2_DAT0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 70),
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "MSDC2_DAT0"),
+ MTK_FUNCTION(2, "I2S_8CH_DO2"),
+ MTK_FUNCTION(4, "DPI_D22"),
+ MTK_FUNCTION(5, "UTXD0"),
+ MTK_FUNCTION(6, "I2S3_DO"),
+ MTK_FUNCTION(7, "DBG_MON_B[17]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(71, "MSDC2_DAT1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 71),
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "MSDC2_DAT1"),
+ MTK_FUNCTION(2, "I2S_8CH_DO1"),
+ MTK_FUNCTION(3, "PWM_A"),
+ MTK_FUNCTION(4, "I2S3_MCK"),
+ MTK_FUNCTION(5, "URXD0"),
+ MTK_FUNCTION(6, "PWM_B"),
+ MTK_FUNCTION(7, "DBG_MON_B[18]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(72, "MSDC2_DAT2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 72),
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "MSDC2_DAT2"),
+ MTK_FUNCTION(2, "I2S_8CH_LRCK"),
+ MTK_FUNCTION(3, "SDA2_0"),
+ MTK_FUNCTION(4, "DPI_D23"),
+ MTK_FUNCTION(5, "UTXD1"),
+ MTK_FUNCTION(6, "PWM_C"),
+ MTK_FUNCTION(7, "DBG_MON_B[19]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(73, "MSDC2_DAT3"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 73),
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "MSDC2_DAT3"),
+ MTK_FUNCTION(2, "I2S_8CH_BCK"),
+ MTK_FUNCTION(3, "SCL2_0"),
+ MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(5, "URXD1"),
+ MTK_FUNCTION(6, "PWM_A"),
+ MTK_FUNCTION(7, "DBG_MON_B[20]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(74, "TDN3"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 74),
+ MTK_FUNCTION(0, "GPI74"),
+ MTK_FUNCTION(1, "TDN3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(75, "TDP3"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 75),
+ MTK_FUNCTION(0, "GPI75"),
+ MTK_FUNCTION(1, "TDP3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(76, "TDN2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 76),
+ MTK_FUNCTION(0, "GPI76"),
+ MTK_FUNCTION(1, "TDN2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(77, "TDP2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 77),
+ MTK_FUNCTION(0, "GPI77"),
+ MTK_FUNCTION(1, "TDP2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(78, "TCN"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 78),
+ MTK_FUNCTION(0, "GPI78"),
+ MTK_FUNCTION(1, "TCN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(79, "TCP"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 79),
+ MTK_FUNCTION(0, "GPI79"),
+ MTK_FUNCTION(1, "TCP")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(80, "TDN1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 80),
+ MTK_FUNCTION(0, "GPI80"),
+ MTK_FUNCTION(1, "TDN1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(81, "TDP1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 81),
+ MTK_FUNCTION(0, "GPI81"),
+ MTK_FUNCTION(1, "TDP1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(82, "TDN0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 82),
+ MTK_FUNCTION(0, "GPI82"),
+ MTK_FUNCTION(1, "TDN0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(83, "TDP0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 83),
+ MTK_FUNCTION(0, "GPI83"),
+ MTK_FUNCTION(1, "TDP0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(84, "RDN0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 84),
+ MTK_FUNCTION(0, "GPI84"),
+ MTK_FUNCTION(1, "RDN0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(85, "RDP0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 85),
+ MTK_FUNCTION(0, "GPI85"),
+ MTK_FUNCTION(1, "RDP0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(86, "RDN1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 86),
+ MTK_FUNCTION(0, "GPI86"),
+ MTK_FUNCTION(1, "RDN1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(87, "RDP1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 87),
+ MTK_FUNCTION(0, "GPI87"),
+ MTK_FUNCTION(1, "RDP1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(88, "RCN"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 88),
+ MTK_FUNCTION(0, "GPI88"),
+ MTK_FUNCTION(1, "RCN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(89, "RCP"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 89),
+ MTK_FUNCTION(0, "GPI89"),
+ MTK_FUNCTION(1, "RCP")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(90, "RDN2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 90),
+ MTK_FUNCTION(0, "GPI90"),
+ MTK_FUNCTION(1, "RDN2"),
+ MTK_FUNCTION(2, "CMDAT8")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(91, "RDP2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 91),
+ MTK_FUNCTION(0, "GPI91"),
+ MTK_FUNCTION(1, "RDP2"),
+ MTK_FUNCTION(2, "CMDAT9")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(92, "RDN3"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 92),
+ MTK_FUNCTION(0, "GPI92"),
+ MTK_FUNCTION(1, "RDN3"),
+ MTK_FUNCTION(2, "CMDAT4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(93, "RDP3"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 93),
+ MTK_FUNCTION(0, "GPI93"),
+ MTK_FUNCTION(1, "RDP3"),
+ MTK_FUNCTION(2, "CMDAT5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(94, "RCN_A"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 94),
+ MTK_FUNCTION(0, "GPI94"),
+ MTK_FUNCTION(1, "RCN_A"),
+ MTK_FUNCTION(2, "CMDAT6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(95, "RCP_A"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 95),
+ MTK_FUNCTION(0, "GPI95"),
+ MTK_FUNCTION(1, "RCP_A"),
+ MTK_FUNCTION(2, "CMDAT7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(96, "RDN1_A"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 96),
+ MTK_FUNCTION(0, "GPI96"),
+ MTK_FUNCTION(1, "RDN1_A"),
+ MTK_FUNCTION(2, "CMDAT2"),
+ MTK_FUNCTION(3, "CMCSD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(97, "RDP1_A"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 97),
+ MTK_FUNCTION(0, "GPI97"),
+ MTK_FUNCTION(1, "RDP1_A"),
+ MTK_FUNCTION(2, "CMDAT3"),
+ MTK_FUNCTION(3, "CMCSD3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(98, "RDN0_A"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 98),
+ MTK_FUNCTION(0, "GPI98"),
+ MTK_FUNCTION(1, "RDN0_A"),
+ MTK_FUNCTION(2, "CMHSYNC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(99, "RDP0_A"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 99),
+ MTK_FUNCTION(0, "GPI99"),
+ MTK_FUNCTION(1, "RDP0_A"),
+ MTK_FUNCTION(2, "CMVSYNC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(100, "CMDAT0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 100),
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "CMDAT0"),
+ MTK_FUNCTION(2, "CMCSD0"),
+ MTK_FUNCTION(3, "ANT_SEL2"),
+ MTK_FUNCTION(5, "TDM_RX_MCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[21]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(101, "CMDAT1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 101),
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "CMDAT1"),
+ MTK_FUNCTION(2, "CMCSD1"),
+ MTK_FUNCTION(3, "ANT_SEL3"),
+ MTK_FUNCTION(4, "CMFLASH"),
+ MTK_FUNCTION(5, "TDM_RX_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[22]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(102, "CMMCLK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 102),
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "CMMCLK"),
+ MTK_FUNCTION(3, "ANT_SEL4"),
+ MTK_FUNCTION(5, "TDM_RX_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[23]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(103, "CMPCLK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 103),
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "CMPCLK"),
+ MTK_FUNCTION(2, "CMCSK"),
+ MTK_FUNCTION(3, "ANT_SEL5"),
+ MTK_FUNCTION(5, " TDM_RX_DI"),
+ MTK_FUNCTION(7, "DBG_MON_B[24]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(104, "MSDC1_CMD"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 104),
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(4, "SQICS"),
+ MTK_FUNCTION(7, "DBG_MON_B[25]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(105, "MSDC1_CLK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 105),
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "UDI_NTRST_XI"),
+ MTK_FUNCTION(3, "DFD_NTRST_XI"),
+ MTK_FUNCTION(4, "SQISO"),
+ MTK_FUNCTION(5, "GPUEJ_NTRST_XI"),
+ MTK_FUNCTION(7, "DBG_MON_B[26]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(106, "MSDC1_DAT0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 106),
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "UDI_TMS_XI"),
+ MTK_FUNCTION(3, "DFD_TMS_XI"),
+ MTK_FUNCTION(4, "SQISI"),
+ MTK_FUNCTION(5, "GPUEJ_TMS_XI"),
+ MTK_FUNCTION(7, "DBG_MON_B[27]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(107, "MSDC1_DAT1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 107),
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "UDI_TCK_XI"),
+ MTK_FUNCTION(3, "DFD_TCK_XI"),
+ MTK_FUNCTION(4, "SQIWP"),
+ MTK_FUNCTION(5, "GPUEJ_TCK_XI"),
+ MTK_FUNCTION(7, "DBG_MON_B[28]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(108, "MSDC1_DAT2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 108),
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "UDI_TDI_XI"),
+ MTK_FUNCTION(3, "DFD_TDI_XI"),
+ MTK_FUNCTION(4, "SQIRST"),
+ MTK_FUNCTION(5, "GPUEJ_TDI_XI"),
+ MTK_FUNCTION(7, "DBG_MON_B[29]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(109, "MSDC1_DAT3"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 109),
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(2, "UDI_TDO"),
+ MTK_FUNCTION(3, "DFD_TDO"),
+ MTK_FUNCTION(4, "SQICK"),
+ MTK_FUNCTION(5, "GPUEJ_TDO"),
+ MTK_FUNCTION(7, "DBG_MON_B[30]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(110, "MSDC0_DAT7"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 110),
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "MSDC0_DAT7"),
+ MTK_FUNCTION(4, "NLD7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(111, "MSDC0_DAT6"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 111),
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "MSDC0_DAT6"),
+ MTK_FUNCTION(4, "NLD6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(112, "MSDC0_DAT5"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 112),
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "MSDC0_DAT5"),
+ MTK_FUNCTION(4, "NLD4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(113, "MSDC0_DAT4"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 113),
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "MSDC0_DAT4"),
+ MTK_FUNCTION(4, "NLD3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(114, "MSDC0_RSTB"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 114),
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "MSDC0_RSTB"),
+ MTK_FUNCTION(4, "NLD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(115, "MSDC0_CMD"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 115),
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "MSDC0_CMD"),
+ MTK_FUNCTION(4, "NALE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(116, "MSDC0_CLK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 116),
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "MSDC0_CLK"),
+ MTK_FUNCTION(4, "NWEB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(117, "MSDC0_DAT3"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 117),
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "MSDC0_DAT3"),
+ MTK_FUNCTION(4, "NLD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(118, "MSDC0_DAT2"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 118),
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "MSDC0_DAT2"),
+ MTK_FUNCTION(4, "NLD5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(119, "MSDC0_DAT1"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 119),
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "MSDC0_DAT1"),
+ MTK_FUNCTION(4, "NLD8")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(120, "MSDC0_DAT0"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 120),
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "MSDC0_DAT0"),
+ MTK_FUNCTION(4, "WATCHDOG"),
+ MTK_FUNCTION(5, "NLD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(121, "CEC"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 121),
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "CEC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(122, "HTPLG"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 122),
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "HTPLG")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(123, "HDMISCK"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 123),
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "HDMISCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(124, "HDMISD"),
+ NULL, "mt8167",
+ MTK_EINT_FUNCTION(0, 124),
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "HDMISD")
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT8167_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8192.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8192.h
new file mode 100644
index 000000000000..071162141376
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8192.h
@@ -0,0 +1,2275 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Author: Andy Teng <andy.teng@mediatek.com>
+ *
+ */
+
+#ifndef __PINCTRL_MTK_MT8192_H
+#define __PINCTRL_MTK_MT8192_H
+
+#include "pinctrl-paris.h"
+
+static const struct mtk_pin_desc mtk_pins_mt8192[] = {
+ MTK_PIN(
+ 0, "GPIO0",
+ MTK_EINT_FUNCTION(0, 0),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "SPI6_CLK"),
+ MTK_FUNCTION(2, "I2S5_MCK"),
+ MTK_FUNCTION(3, "PWM_0"),
+ MTK_FUNCTION(4, "TDM_LRCK"),
+ MTK_FUNCTION(5, "TP_GPIO0_AO"),
+ MTK_FUNCTION(6, "MD_INT0")
+ ),
+ MTK_PIN(
+ 1, "GPIO1",
+ MTK_EINT_FUNCTION(0, 1),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "SPI6_CSB"),
+ MTK_FUNCTION(2, "I2S5_BCK"),
+ MTK_FUNCTION(3, "PWM_1"),
+ MTK_FUNCTION(4, "TDM_BCK"),
+ MTK_FUNCTION(5, "TP_GPIO1_AO"),
+ MTK_FUNCTION(6, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(7, "DBG_MON_A9")
+ ),
+ MTK_PIN(
+ 2, "GPIO2",
+ MTK_EINT_FUNCTION(0, 2),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "SPI6_MI"),
+ MTK_FUNCTION(2, "I2S5_LRCK"),
+ MTK_FUNCTION(3, "PWM_2"),
+ MTK_FUNCTION(4, "TDM_MCK"),
+ MTK_FUNCTION(5, "TP_GPIO2_AO"),
+ MTK_FUNCTION(6, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(7, "DBG_MON_A10")
+ ),
+ MTK_PIN(
+ 3, "GPIO3",
+ MTK_EINT_FUNCTION(0, 3),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "SPI6_MO"),
+ MTK_FUNCTION(2, "I2S5_DO"),
+ MTK_FUNCTION(3, "PWM_3"),
+ MTK_FUNCTION(4, "TDM_DATA0"),
+ MTK_FUNCTION(5, "TP_GPIO3_AO"),
+ MTK_FUNCTION(6, "CLKM0"),
+ MTK_FUNCTION(7, "DBG_MON_A11")
+ ),
+ MTK_PIN(
+ 4, "GPIO4",
+ MTK_EINT_FUNCTION(0, 4),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "SPI4_A_CLK"),
+ MTK_FUNCTION(2, "I2S2_MCK"),
+ MTK_FUNCTION(3, "DMIC1_CLK"),
+ MTK_FUNCTION(4, "TDM_DATA1"),
+ MTK_FUNCTION(5, "TP_GPIO4_AO"),
+ MTK_FUNCTION(6, "PCM1_DI"),
+ MTK_FUNCTION(7, "IDDIG")
+ ),
+ MTK_PIN(
+ 5, "GPIO5",
+ MTK_EINT_FUNCTION(0, 5),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "SPI4_A_CSB"),
+ MTK_FUNCTION(2, "I2S2_BCK"),
+ MTK_FUNCTION(3, "DMIC1_DAT"),
+ MTK_FUNCTION(4, "TDM_DATA2"),
+ MTK_FUNCTION(5, "TP_GPIO5_AO"),
+ MTK_FUNCTION(6, "PCM1_CLK"),
+ MTK_FUNCTION(7, "USB_DRVVBUS")
+ ),
+ MTK_PIN(
+ 6, "GPIO6",
+ MTK_EINT_FUNCTION(0, 6),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "SPI4_A_MI"),
+ MTK_FUNCTION(2, "I2S2_LRCK"),
+ MTK_FUNCTION(3, "DMIC_CLK"),
+ MTK_FUNCTION(4, "TDM_DATA3"),
+ MTK_FUNCTION(5, "TP_GPIO6_AO"),
+ MTK_FUNCTION(6, "PCM1_SYNC")
+ ),
+ MTK_PIN(
+ 7, "GPIO7",
+ MTK_EINT_FUNCTION(0, 7),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "SPI4_A_MO"),
+ MTK_FUNCTION(2, "I2S2_DI"),
+ MTK_FUNCTION(3, "DMIC_DAT"),
+ MTK_FUNCTION(4, "WIFI_TXD"),
+ MTK_FUNCTION(5, "TP_GPIO7_AO"),
+ MTK_FUNCTION(6, "PCM1_DO0")
+ ),
+ MTK_PIN(
+ 8, "GPIO8",
+ MTK_EINT_FUNCTION(0, 8),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "I2S2_DI2"),
+ MTK_FUNCTION(3, "KPCOL2"),
+ MTK_FUNCTION(4, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(5, "CLKM1"),
+ MTK_FUNCTION(6, "PCM1_DO1"),
+ MTK_FUNCTION(7, "DBG_MON_A12")
+ ),
+ MTK_PIN(
+ 9, "GPIO9",
+ MTK_EINT_FUNCTION(0, 9),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(3, "KPROW2"),
+ MTK_FUNCTION(4, "CMMCLK4"),
+ MTK_FUNCTION(5, "CLKM3"),
+ MTK_FUNCTION(6, "PCM1_DO2"),
+ MTK_FUNCTION(7, "DBG_MON_A13")
+ ),
+ MTK_PIN(
+ 10, "GPIO10",
+ MTK_EINT_FUNCTION(0, 10),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "MSDC2_CLK"),
+ MTK_FUNCTION(2, "SPI4_B_CLK"),
+ MTK_FUNCTION(3, "I2S8_MCK"),
+ MTK_FUNCTION(5, "MD_INT0"),
+ MTK_FUNCTION(6, "TP_GPIO8_AO")
+ ),
+ MTK_PIN(
+ 11, "GPIO11",
+ MTK_EINT_FUNCTION(0, 11),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "MSDC2_CMD"),
+ MTK_FUNCTION(2, "SPI4_B_CSB"),
+ MTK_FUNCTION(3, "I2S8_BCK"),
+ MTK_FUNCTION(4, "PCIE_CLKREQ_N"),
+ MTK_FUNCTION(5, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(6, "TP_GPIO9_AO")
+ ),
+ MTK_PIN(
+ 12, "GPIO12",
+ MTK_EINT_FUNCTION(0, 12),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "MSDC2_DAT3"),
+ MTK_FUNCTION(2, "SPI4_B_MI"),
+ MTK_FUNCTION(3, "I2S8_LRCK"),
+ MTK_FUNCTION(4, "DMIC1_CLK"),
+ MTK_FUNCTION(5, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(6, "TP_GPIO10_AO")
+ ),
+ MTK_PIN(
+ 13, "GPIO13",
+ MTK_EINT_FUNCTION(0, 13),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "MSDC2_DAT0"),
+ MTK_FUNCTION(2, "SPI4_B_MO"),
+ MTK_FUNCTION(3, "I2S8_DI"),
+ MTK_FUNCTION(4, "DMIC1_DAT"),
+ MTK_FUNCTION(5, "ANT_SEL10"),
+ MTK_FUNCTION(6, "TP_GPIO11_AO")
+ ),
+ MTK_PIN(
+ 14, "GPIO14",
+ MTK_EINT_FUNCTION(0, 14),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "MSDC2_DAT2"),
+ MTK_FUNCTION(2, "IDDIG"),
+ MTK_FUNCTION(3, "SCL_6306"),
+ MTK_FUNCTION(4, "PCIE_PERESET_N"),
+ MTK_FUNCTION(5, "ANT_SEL11"),
+ MTK_FUNCTION(6, "TP_GPIO12_AO")
+ ),
+ MTK_PIN(
+ 15, "GPIO15",
+ MTK_EINT_FUNCTION(0, 15),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "MSDC2_DAT1"),
+ MTK_FUNCTION(2, "USB_DRVVBUS"),
+ MTK_FUNCTION(3, "SDA_6306"),
+ MTK_FUNCTION(4, "PCIE_WAKE_N"),
+ MTK_FUNCTION(5, "ANT_SEL12"),
+ MTK_FUNCTION(6, "TP_GPIO13_AO")
+ ),
+ MTK_PIN(
+ 16, "GPIO16",
+ MTK_EINT_FUNCTION(0, 16),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "IDDIG"),
+ MTK_FUNCTION(3, "TP_GPIO14_AO"),
+ MTK_FUNCTION(4, "KPCOL2"),
+ MTK_FUNCTION(5, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(6, "SPI7_A_MI"),
+ MTK_FUNCTION(7, "DBG_MON_A0")
+ ),
+ MTK_PIN(
+ 17, "GPIO17",
+ MTK_EINT_FUNCTION(0, 17),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "USB_DRVVBUS"),
+ MTK_FUNCTION(3, "TP_GPIO15_AO"),
+ MTK_FUNCTION(4, "KPROW2"),
+ MTK_FUNCTION(6, "SPI7_A_MO"),
+ MTK_FUNCTION(7, "DBG_MON_A1")
+ ),
+ MTK_PIN(
+ 18, "GPIO18",
+ MTK_EINT_FUNCTION(0, 18),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "SPI4_C_MI"),
+ MTK_FUNCTION(3, "SPI1_B_MI"),
+ MTK_FUNCTION(4, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(5, "ANT_SEL10"),
+ MTK_FUNCTION(6, "MD_INT0"),
+ MTK_FUNCTION(7, "DBG_MON_B2")
+ ),
+ MTK_PIN(
+ 19, "GPIO19",
+ MTK_EINT_FUNCTION(0, 19),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "SPI4_C_MO"),
+ MTK_FUNCTION(3, "SPI1_B_MO"),
+ MTK_FUNCTION(5, "ANT_SEL11"),
+ MTK_FUNCTION(6, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(7, "DBG_MON_B3")
+ ),
+ MTK_PIN(
+ 20, "GPIO20",
+ MTK_EINT_FUNCTION(0, 20),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "SPI4_C_CLK"),
+ MTK_FUNCTION(3, "SPI1_B_CLK"),
+ MTK_FUNCTION(4, "PWM_3"),
+ MTK_FUNCTION(5, "ANT_SEL12"),
+ MTK_FUNCTION(6, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(7, "DBG_MON_B4")
+ ),
+ MTK_PIN(
+ 21, "GPIO21",
+ MTK_EINT_FUNCTION(0, 21),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(2, "SPI4_C_CSB"),
+ MTK_FUNCTION(3, "SPI1_B_CSB"),
+ MTK_FUNCTION(6, "IDDIG"),
+ MTK_FUNCTION(7, "DBG_MON_B5")
+ ),
+ MTK_PIN(
+ 22, "GPIO22",
+ MTK_EINT_FUNCTION(0, 22),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(2, "SPI0_C_CLK"),
+ MTK_FUNCTION(3, "SPI7_B_CLK"),
+ MTK_FUNCTION(4, "I2S7_BCK"),
+ MTK_FUNCTION(5, "I2S9_BCK"),
+ MTK_FUNCTION(6, "SCL_6306")
+ ),
+ MTK_PIN(
+ 23, "GPIO23",
+ MTK_EINT_FUNCTION(0, 23),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(2, "SPI0_C_CSB"),
+ MTK_FUNCTION(3, "SPI7_B_CSB"),
+ MTK_FUNCTION(4, "I2S7_LRCK"),
+ MTK_FUNCTION(5, "I2S9_LRCK"),
+ MTK_FUNCTION(6, "SDA_6306")
+ ),
+ MTK_PIN(
+ 24, "GPIO24",
+ MTK_EINT_FUNCTION(0, 24),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "SRCLKENAI1"),
+ MTK_FUNCTION(2, "SPI0_C_MI"),
+ MTK_FUNCTION(3, "SPI7_B_MI"),
+ MTK_FUNCTION(4, "I2S6_DI"),
+ MTK_FUNCTION(5, "I2S8_DI"),
+ MTK_FUNCTION(6, "SPINOR_CS")
+ ),
+ MTK_PIN(
+ 25, "GPIO25",
+ MTK_EINT_FUNCTION(0, 25),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "SRCLKENAI0"),
+ MTK_FUNCTION(2, "SPI0_C_MO"),
+ MTK_FUNCTION(3, "SPI7_B_MO"),
+ MTK_FUNCTION(4, "I2S7_DO"),
+ MTK_FUNCTION(5, "I2S9_DO"),
+ MTK_FUNCTION(6, "SPINOR_CK")
+ ),
+ MTK_PIN(
+ 26, "GPIO26",
+ MTK_EINT_FUNCTION(0, 26),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "PWM_2"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "SPI5_C_MI"),
+ MTK_FUNCTION(5, "I2S9_BCK")
+ ),
+ MTK_PIN(
+ 27, "GPIO27",
+ MTK_EINT_FUNCTION(0, 27),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "PWM_3"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(4, "SPI5_C_MO"),
+ MTK_FUNCTION(5, "I2S9_LRCK"),
+ MTK_FUNCTION(6, "SPINOR_IO0")
+ ),
+ MTK_PIN(
+ 28, "GPIO28",
+ MTK_EINT_FUNCTION(0, 28),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "PWM_0"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(4, "SPI5_C_CSB"),
+ MTK_FUNCTION(5, "I2S9_MCK"),
+ MTK_FUNCTION(6, "SPINOR_IO1")
+ ),
+ MTK_PIN(
+ 29, "GPIO29",
+ MTK_EINT_FUNCTION(0, 29),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "PWM_1"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(4, "SPI5_C_CLK"),
+ MTK_FUNCTION(5, "I2S9_DO"),
+ MTK_FUNCTION(6, "SPINOR_IO2")
+ ),
+ MTK_PIN(
+ 30, "GPIO30",
+ MTK_EINT_FUNCTION(0, 30),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "PWM_2"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(4, "I2S7_MCK"),
+ MTK_FUNCTION(5, "I2S9_MCK"),
+ MTK_FUNCTION(6, "SPINOR_IO3")
+ ),
+ MTK_PIN(
+ 31, "GPIO31",
+ MTK_EINT_FUNCTION(0, 31),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "I2S3_MCK"),
+ MTK_FUNCTION(2, "I2S1_MCK"),
+ MTK_FUNCTION(3, "I2S5_MCK"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "I2S0_MCK")
+ ),
+ MTK_PIN(
+ 32, "GPIO32",
+ MTK_EINT_FUNCTION(0, 32),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "I2S3_BCK"),
+ MTK_FUNCTION(2, "I2S1_BCK"),
+ MTK_FUNCTION(3, "I2S5_BCK"),
+ MTK_FUNCTION(4, "PCM0_CLK"),
+ MTK_FUNCTION(5, "I2S0_BCK")
+ ),
+ MTK_PIN(
+ 33, "GPIO33",
+ MTK_EINT_FUNCTION(0, 33),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "I2S3_LRCK"),
+ MTK_FUNCTION(2, "I2S1_LRCK"),
+ MTK_FUNCTION(3, "I2S5_LRCK"),
+ MTK_FUNCTION(4, "PCM0_SYNC"),
+ MTK_FUNCTION(5, "I2S0_LRCK")
+ ),
+ MTK_PIN(
+ 34, "GPIO34",
+ MTK_EINT_FUNCTION(0, 34),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "I2S0_DI"),
+ MTK_FUNCTION(2, "I2S2_DI"),
+ MTK_FUNCTION(3, "I2S2_DI2"),
+ MTK_FUNCTION(4, "PCM0_DI"),
+ MTK_FUNCTION(5, "I2S0_DI")
+ ),
+ MTK_PIN(
+ 35, "GPIO35",
+ MTK_EINT_FUNCTION(0, 35),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "I2S3_DO"),
+ MTK_FUNCTION(2, "I2S1_DO"),
+ MTK_FUNCTION(3, "I2S5_DO"),
+ MTK_FUNCTION(4, "PCM0_DO")
+ ),
+ MTK_PIN(
+ 36, "GPIO36",
+ MTK_EINT_FUNCTION(0, 36),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "SPI5_A_CLK"),
+ MTK_FUNCTION(2, "DMIC1_CLK"),
+ MTK_FUNCTION(4, "MD_URXD0"),
+ MTK_FUNCTION(5, "UCTS0"),
+ MTK_FUNCTION(6, "URXD1")
+ ),
+ MTK_PIN(
+ 37, "GPIO37",
+ MTK_EINT_FUNCTION(0, 37),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "SPI5_A_CSB"),
+ MTK_FUNCTION(2, "DMIC1_DAT"),
+ MTK_FUNCTION(4, "MD_UTXD0"),
+ MTK_FUNCTION(5, "URTS0"),
+ MTK_FUNCTION(6, "UTXD1")
+ ),
+ MTK_PIN(
+ 38, "GPIO38",
+ MTK_EINT_FUNCTION(0, 38),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(1, "SPI5_A_MI"),
+ MTK_FUNCTION(2, "DMIC_CLK"),
+ MTK_FUNCTION(4, "MD_URXD1"),
+ MTK_FUNCTION(5, "URXD0"),
+ MTK_FUNCTION(6, "UCTS1")
+ ),
+ MTK_PIN(
+ 39, "GPIO39",
+ MTK_EINT_FUNCTION(0, 39),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "SPI5_A_MO"),
+ MTK_FUNCTION(2, "DMIC_DAT"),
+ MTK_FUNCTION(4, "MD_UTXD1"),
+ MTK_FUNCTION(5, "UTXD0"),
+ MTK_FUNCTION(6, "URTS1")
+ ),
+ MTK_PIN(
+ 40, "GPIO40",
+ MTK_EINT_FUNCTION(0, 40),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "DISP_PWM"),
+ MTK_FUNCTION(7, "DBG_MON_A6")
+ ),
+ MTK_PIN(
+ 41, "GPIO41",
+ MTK_EINT_FUNCTION(0, 41),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "DSI_TE"),
+ MTK_FUNCTION(7, "DBG_MON_A7")
+ ),
+ MTK_PIN(
+ 42, "GPIO42",
+ MTK_EINT_FUNCTION(0, 42),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "LCM_RST"),
+ MTK_FUNCTION(7, "DBG_MON_A8")
+ ),
+ MTK_PIN(
+ 43, "GPIO43",
+ MTK_EINT_FUNCTION(0, 43),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(2, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(3, "SCL_6306"),
+ MTK_FUNCTION(4, "ADSP_URXD0"),
+ MTK_FUNCTION(5, "PTA_RXD"),
+ MTK_FUNCTION(6, "SSPM_URXD_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B0")
+ ),
+ MTK_PIN(
+ 44, "GPIO44",
+ MTK_EINT_FUNCTION(0, 44),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"),
+ MTK_FUNCTION(2, "MD_INT1_C2K_UIM0_HOT_PLUG"),
+ MTK_FUNCTION(3, "SDA_6306"),
+ MTK_FUNCTION(4, "ADSP_UTXD0"),
+ MTK_FUNCTION(5, "PTA_TXD"),
+ MTK_FUNCTION(6, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(7, "DBG_MON_B1")
+ ),
+ MTK_PIN(
+ 45, "GPIO45",
+ MTK_EINT_FUNCTION(0, 45),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(2, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDI"),
+ MTK_FUNCTION(4, "APU_JTAG_TDI"),
+ MTK_FUNCTION(5, "CCU_JTAG_TDI"),
+ MTK_FUNCTION(6, "LVTS_SCK"),
+ MTK_FUNCTION(7, "CONN_DSP_JDI")
+ ),
+ MTK_PIN(
+ 46, "GPIO46",
+ MTK_EINT_FUNCTION(0, 46),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(2, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TMS"),
+ MTK_FUNCTION(4, "APU_JTAG_TMS"),
+ MTK_FUNCTION(5, "CCU_JTAG_TMS"),
+ MTK_FUNCTION(6, "LVTS_SDI"),
+ MTK_FUNCTION(7, "CONN_DSP_JMS")
+ ),
+ MTK_PIN(
+ 47, "GPIO47",
+ MTK_EINT_FUNCTION(0, 47),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(2, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TDO"),
+ MTK_FUNCTION(4, "APU_JTAG_TDO"),
+ MTK_FUNCTION(5, "CCU_JTAG_TDO"),
+ MTK_FUNCTION(6, "LVTS_SCF"),
+ MTK_FUNCTION(7, "CONN_DSP_JDO")
+ ),
+ MTK_PIN(
+ 48, "GPIO48",
+ MTK_EINT_FUNCTION(0, 48),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "MD1_SIM1_SIO"),
+ MTK_FUNCTION(2, "MD1_SIM2_SIO"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TRSTN"),
+ MTK_FUNCTION(4, "APU_JTAG_TRST"),
+ MTK_FUNCTION(5, "CCU_JTAG_TRST"),
+ MTK_FUNCTION(6, "LVTS_FOUT"),
+ MTK_FUNCTION(7, "CONN_DSP_JINTP")
+ ),
+ MTK_PIN(
+ 49, "GPIO49",
+ MTK_EINT_FUNCTION(0, 49),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "MD1_SIM1_SRST"),
+ MTK_FUNCTION(2, "MD1_SIM2_SRST"),
+ MTK_FUNCTION(3, "MCUPM_JTAG_TCK"),
+ MTK_FUNCTION(4, "APU_JTAG_TCK"),
+ MTK_FUNCTION(5, "CCU_JTAG_TCK"),
+ MTK_FUNCTION(6, "LVTS_SDO"),
+ MTK_FUNCTION(7, "CONN_DSP_JCK")
+ ),
+ MTK_PIN(
+ 50, "GPIO50",
+ MTK_EINT_FUNCTION(0, 50),
+ DRV_GRP0,
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "MD1_SIM1_SCLK"),
+ MTK_FUNCTION(2, "MD1_SIM2_SCLK"),
+ MTK_FUNCTION(6, "LVTS_26M")
+ ),
+ MTK_PIN(
+ 51, "GPIO51",
+ MTK_EINT_FUNCTION(0, 51),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "PCM1_CLK"),
+ MTK_FUNCTION(3, "CONN_DSP_JCK"),
+ MTK_FUNCTION(4, "UDI_TCK"),
+ MTK_FUNCTION(5, "IPU_JTAG_TCK"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TCK"),
+ MTK_FUNCTION(7, "JTCK_SEL3")
+ ),
+ MTK_PIN(
+ 52, "GPIO52",
+ MTK_EINT_FUNCTION(0, 52),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(2, "PCM1_SYNC"),
+ MTK_FUNCTION(3, "CONN_DSP_JMS"),
+ MTK_FUNCTION(4, "UDI_TMS"),
+ MTK_FUNCTION(5, "IPU_JTAG_TMS"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TMS"),
+ MTK_FUNCTION(7, "JTMS_SEL3")
+ ),
+ MTK_PIN(
+ 53, "GPIO53",
+ MTK_EINT_FUNCTION(0, 53),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(2, "PCM1_DI"),
+ MTK_FUNCTION(3, "CONN_DSP_JINTP"),
+ MTK_FUNCTION(4, "CONN_MCU_AICE_TMSC")
+ ),
+ MTK_PIN(
+ 54, "GPIO54",
+ MTK_EINT_FUNCTION(0, 54),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "PCM1_DO0"),
+ MTK_FUNCTION(3, "CONN_DSP_JDI"),
+ MTK_FUNCTION(4, "UDI_TDI"),
+ MTK_FUNCTION(5, "IPU_JTAG_TDI"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDI"),
+ MTK_FUNCTION(7, "JTDI_SEL3")
+ ),
+ MTK_PIN(
+ 55, "GPIO55",
+ MTK_EINT_FUNCTION(0, 55),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "PCM1_DO2"),
+ MTK_FUNCTION(3, "CONN_MCU_AICE_TCKC"),
+ MTK_FUNCTION(4, "UDI_NTRST"),
+ MTK_FUNCTION(5, "IPU_JTAG_TRST"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "JTRSTN_SEL3")
+ ),
+ MTK_PIN(
+ 56, "GPIO56",
+ MTK_EINT_FUNCTION(0, 56),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "PCM1_DO1"),
+ MTK_FUNCTION(3, "CONN_DSP_JDO"),
+ MTK_FUNCTION(4, "UDI_TDO"),
+ MTK_FUNCTION(5, "IPU_JTAG_TDO"),
+ MTK_FUNCTION(6, "SSPM_JTAG_TDO"),
+ MTK_FUNCTION(7, "JTDO_SEL3")
+ ),
+ MTK_PIN(
+ 57, "GPIO57",
+ MTK_EINT_FUNCTION(0, 57),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "MIPI2_D_SCLK")
+ ),
+ MTK_PIN(
+ 58, "GPIO58",
+ MTK_EINT_FUNCTION(0, 58),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "MIPI2_D_SDATA")
+ ),
+ MTK_PIN(
+ 59, "GPIO59",
+ MTK_EINT_FUNCTION(0, 59),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "MIPI_M_SCLK")
+ ),
+ MTK_PIN(
+ 60, "GPIO60",
+ MTK_EINT_FUNCTION(0, 60),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "MIPI_M_SDATA")
+ ),
+ MTK_PIN(
+ 61, "GPIO61",
+ MTK_EINT_FUNCTION(0, 61),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "MD_UCNT_A_TGL")
+ ),
+ MTK_PIN(
+ 62, "GPIO62",
+ MTK_EINT_FUNCTION(0, 62),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "DIGRF_IRQ")
+ ),
+ MTK_PIN(
+ 63, "GPIO63",
+ MTK_EINT_FUNCTION(0, 63),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "BPI_BUS0"),
+ MTK_FUNCTION(3, "PCIE_WAKE_N")
+ ),
+ MTK_PIN(
+ 64, "GPIO64",
+ MTK_EINT_FUNCTION(0, 64),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "BPI_BUS1"),
+ MTK_FUNCTION(3, "PCIE_PERESET_N")
+ ),
+ MTK_PIN(
+ 65, "GPIO65",
+ MTK_EINT_FUNCTION(0, 65),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "BPI_BUS2"),
+ MTK_FUNCTION(3, "PCIE_CLKREQ_N")
+ ),
+ MTK_PIN(
+ 66, "GPIO66",
+ MTK_EINT_FUNCTION(0, 66),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "BPI_BUS3")
+ ),
+ MTK_PIN(
+ 67, "GPIO67",
+ MTK_EINT_FUNCTION(0, 67),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "BPI_BUS4")
+ ),
+ MTK_PIN(
+ 68, "GPIO68",
+ MTK_EINT_FUNCTION(0, 68),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "BPI_BUS5")
+ ),
+ MTK_PIN(
+ 69, "GPIO69",
+ MTK_EINT_FUNCTION(0, 69),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "BPI_BUS6"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS6")
+ ),
+ MTK_PIN(
+ 70, "GPIO70",
+ MTK_EINT_FUNCTION(0, 70),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "BPI_BUS7"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS7")
+ ),
+ MTK_PIN(
+ 71, "GPIO71",
+ MTK_EINT_FUNCTION(0, 71),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "BPI_BUS8"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS8")
+ ),
+ MTK_PIN(
+ 72, "GPIO72",
+ MTK_EINT_FUNCTION(0, 72),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "BPI_BUS9"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS9")
+ ),
+ MTK_PIN(
+ 73, "GPIO73",
+ MTK_EINT_FUNCTION(0, 73),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "BPI_BUS10"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS10")
+ ),
+ MTK_PIN(
+ 74, "GPIO74",
+ MTK_EINT_FUNCTION(0, 74),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "BPI_BUS11_OLAT0"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS11_OLAT0")
+ ),
+ MTK_PIN(
+ 75, "GPIO75",
+ MTK_EINT_FUNCTION(0, 75),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "BPI_BUS12_OLAT1"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS12_OLAT1")
+ ),
+ MTK_PIN(
+ 76, "GPIO76",
+ MTK_EINT_FUNCTION(0, 76),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "BPI_BUS13_OLAT2"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS13_OLAT2")
+ ),
+ MTK_PIN(
+ 77, "GPIO77",
+ MTK_EINT_FUNCTION(0, 77),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "BPI_BUS14_OLAT3"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS14_OLAT3")
+ ),
+ MTK_PIN(
+ 78, "GPIO78",
+ MTK_EINT_FUNCTION(0, 78),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "BPI_BUS15_OLAT4"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS15_OLAT4")
+ ),
+ MTK_PIN(
+ 79, "GPIO79",
+ MTK_EINT_FUNCTION(0, 79),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "BPI_BUS16_OLAT5"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS16_OLAT5")
+ ),
+ MTK_PIN(
+ 80, "GPIO80",
+ MTK_EINT_FUNCTION(0, 80),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "BPI_BUS17_ANT0"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS17_ANT0"),
+ MTK_FUNCTION(3, "PCIE_WAKE_N")
+ ),
+ MTK_PIN(
+ 81, "GPIO81",
+ MTK_EINT_FUNCTION(0, 81),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "BPI_BUS18_ANT1"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS18_ANT1"),
+ MTK_FUNCTION(3, "PCIE_PERESET_N")
+ ),
+ MTK_PIN(
+ 82, "GPIO82",
+ MTK_EINT_FUNCTION(0, 82),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "BPI_BUS19_ANT2"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS19_ANT2"),
+ MTK_FUNCTION(3, "PCIE_CLKREQ_N")
+ ),
+ MTK_PIN(
+ 83, "GPIO83",
+ MTK_EINT_FUNCTION(0, 83),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "BPI_BUS20_ANT3"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS20_ANT3")
+ ),
+ MTK_PIN(
+ 84, "GPIO84",
+ MTK_EINT_FUNCTION(0, 84),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "BPI_BUS21_ANT4"),
+ MTK_FUNCTION(2, "CONN_BPI_BUS21_ANT4")
+ ),
+ MTK_PIN(
+ 85, "GPIO85",
+ MTK_EINT_FUNCTION(0, 85),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "MIPI1_D_SCLK"),
+ MTK_FUNCTION(2, "CONN_MIPI1_SCLK")
+ ),
+ MTK_PIN(
+ 86, "GPIO86",
+ MTK_EINT_FUNCTION(0, 86),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "MIPI1_D_SDATA"),
+ MTK_FUNCTION(2, "CONN_MIPI1_SDATA")
+ ),
+ MTK_PIN(
+ 87, "GPIO87",
+ MTK_EINT_FUNCTION(0, 87),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "MIPI0_D_SCLK"),
+ MTK_FUNCTION(2, "CONN_MIPI0_SCLK")
+ ),
+ MTK_PIN(
+ 88, "GPIO88",
+ MTK_EINT_FUNCTION(0, 88),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "MIPI0_D_SDATA"),
+ MTK_FUNCTION(2, "CONN_MIPI0_SDATA")
+ ),
+ MTK_PIN(
+ 89, "GPIO89",
+ MTK_EINT_FUNCTION(0, 89),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "SPMI_SCL"),
+ MTK_FUNCTION(2, "SCL10")
+ ),
+ MTK_PIN(
+ 90, "GPIO90",
+ MTK_EINT_FUNCTION(0, 90),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "SPMI_SDA"),
+ MTK_FUNCTION(2, "SDA10")
+ ),
+ MTK_PIN(
+ 91, "GPIO91",
+ MTK_EINT_FUNCTION(0, 91),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO91"),
+ MTK_FUNCTION(1, "AP_GOOD")
+ ),
+ MTK_PIN(
+ 92, "GPIO92",
+ MTK_EINT_FUNCTION(0, 92),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO92"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "MD_URXD0"),
+ MTK_FUNCTION(3, "MD_URXD1"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "CONN_UART0_RXD")
+ ),
+ MTK_PIN(
+ 93, "GPIO93",
+ MTK_EINT_FUNCTION(0, 93),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO93"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "MD_UTXD0"),
+ MTK_FUNCTION(3, "MD_UTXD1"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "CONN_UART0_TXD"),
+ MTK_FUNCTION(6, "WIFI_TXD")
+ ),
+ MTK_PIN(
+ 94, "GPIO94",
+ MTK_EINT_FUNCTION(0, 94),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO94"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "ADSP_URXD0"),
+ MTK_FUNCTION(3, "MD32_0_RXD"),
+ MTK_FUNCTION(4, "SSPM_URXD_AO"),
+ MTK_FUNCTION(5, "TP_URXD1_AO"),
+ MTK_FUNCTION(6, "TP_URXD2_AO"),
+ MTK_FUNCTION(7, "MBISTREADEN_TRIGGER")
+ ),
+ MTK_PIN(
+ 95, "GPIO95",
+ MTK_EINT_FUNCTION(0, 95),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "ADSP_UTXD0"),
+ MTK_FUNCTION(3, "MD32_0_TXD"),
+ MTK_FUNCTION(4, "SSPM_UTXD_AO"),
+ MTK_FUNCTION(5, "TP_UTXD1_AO"),
+ MTK_FUNCTION(6, "TP_UTXD2_AO"),
+ MTK_FUNCTION(7, "MBISTWRITEEN_TRIGGER")
+ ),
+ MTK_PIN(
+ 96, "GPIO96",
+ MTK_EINT_FUNCTION(0, 96),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "TDM_LRCK"),
+ MTK_FUNCTION(2, "I2S7_LRCK"),
+ MTK_FUNCTION(3, "I2S9_LRCK"),
+ MTK_FUNCTION(4, "DPI_D0"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TDI"),
+ MTK_FUNCTION(7, "IO_JTAG_TDI")
+ ),
+ MTK_PIN(
+ 97, "GPIO97",
+ MTK_EINT_FUNCTION(0, 97),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "TDM_BCK"),
+ MTK_FUNCTION(2, "I2S7_BCK"),
+ MTK_FUNCTION(3, "I2S9_BCK"),
+ MTK_FUNCTION(4, "DPI_D1"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TRSTN"),
+ MTK_FUNCTION(7, "IO_JTAG_TRSTN")
+ ),
+ MTK_PIN(
+ 98, "GPIO98",
+ MTK_EINT_FUNCTION(0, 98),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "TDM_MCK"),
+ MTK_FUNCTION(2, "I2S7_MCK"),
+ MTK_FUNCTION(3, "I2S9_MCK"),
+ MTK_FUNCTION(4, "DPI_D2"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TCK"),
+ MTK_FUNCTION(7, "IO_JTAG_TCK")
+ ),
+ MTK_PIN(
+ 99, "GPIO99",
+ MTK_EINT_FUNCTION(0, 99),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "TDM_DATA0"),
+ MTK_FUNCTION(2, "I2S6_DI"),
+ MTK_FUNCTION(3, "I2S8_DI"),
+ MTK_FUNCTION(4, "DPI_D3"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TDO"),
+ MTK_FUNCTION(7, "IO_JTAG_TDO")
+ ),
+ MTK_PIN(
+ 100, "GPIO100",
+ MTK_EINT_FUNCTION(0, 100),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "TDM_DATA1"),
+ MTK_FUNCTION(2, "I2S7_DO"),
+ MTK_FUNCTION(3, "I2S9_DO"),
+ MTK_FUNCTION(4, "DPI_D4"),
+ MTK_FUNCTION(5, "ADSP_JTAG0_TMS"),
+ MTK_FUNCTION(7, "IO_JTAG_TMS")
+ ),
+ MTK_PIN(
+ 101, "GPIO101",
+ MTK_EINT_FUNCTION(0, 101),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "TDM_DATA2"),
+ MTK_FUNCTION(2, "DMIC1_CLK"),
+ MTK_FUNCTION(3, "SRCLKENAI0"),
+ MTK_FUNCTION(4, "DPI_D5"),
+ MTK_FUNCTION(5, "CLKM0"),
+ MTK_FUNCTION(7, "DAP_MD32_SWD")
+ ),
+ MTK_PIN(
+ 102, "GPIO102",
+ MTK_EINT_FUNCTION(0, 102),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "TDM_DATA3"),
+ MTK_FUNCTION(2, "DMIC1_DAT"),
+ MTK_FUNCTION(3, "SRCLKENAI1"),
+ MTK_FUNCTION(4, "DPI_D6"),
+ MTK_FUNCTION(6, "DVFSRC_EXT_REQ"),
+ MTK_FUNCTION(7, "DAP_MD32_SWCK")
+ ),
+ MTK_PIN(
+ 103, "GPIO103",
+ MTK_EINT_FUNCTION(0, 103),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "SPI0_A_MI"),
+ MTK_FUNCTION(2, "SCP_SPI0_MI"),
+ MTK_FUNCTION(4, "DPI_D7"),
+ MTK_FUNCTION(5, "DFD_TDO"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDO"),
+ MTK_FUNCTION(7, "JTDO_SEL1")
+ ),
+ MTK_PIN(
+ 104, "GPIO104",
+ MTK_EINT_FUNCTION(0, 104),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "SPI0_A_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI0_CS"),
+ MTK_FUNCTION(4, "DPI_D8"),
+ MTK_FUNCTION(5, "DFD_TMS"),
+ MTK_FUNCTION(6, "SPM_JTAG_TMS"),
+ MTK_FUNCTION(7, "JTMS_SEL1")
+ ),
+ MTK_PIN(
+ 105, "GPIO105",
+ MTK_EINT_FUNCTION(0, 105),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "SPI0_A_MO"),
+ MTK_FUNCTION(2, "SCP_SPI0_MO"),
+ MTK_FUNCTION(3, "SCP_SDA0"),
+ MTK_FUNCTION(4, "DPI_D9"),
+ MTK_FUNCTION(5, "DFD_TDI"),
+ MTK_FUNCTION(6, "SPM_JTAG_TDI"),
+ MTK_FUNCTION(7, "JTDI_SEL1")
+ ),
+ MTK_PIN(
+ 106, "GPIO106",
+ MTK_EINT_FUNCTION(0, 106),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "SPI0_A_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI0_CK"),
+ MTK_FUNCTION(3, "SCP_SCL0"),
+ MTK_FUNCTION(4, "DPI_D10"),
+ MTK_FUNCTION(5, "DFD_TCK_XI"),
+ MTK_FUNCTION(6, "SPM_JTAG_TCK"),
+ MTK_FUNCTION(7, "JTCK_SEL1")
+ ),
+ MTK_PIN(
+ 107, "GPIO107",
+ MTK_EINT_FUNCTION(0, 107),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "DMIC_CLK"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(3, "CLKM2"),
+ MTK_FUNCTION(6, "SPM_JTAG_TRSTN"),
+ MTK_FUNCTION(7, "JTRSTN_SEL1")
+ ),
+ MTK_PIN(
+ 108, "GPIO108",
+ MTK_EINT_FUNCTION(0, 108),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "DMIC_DAT"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(3, "CLKM3"),
+ MTK_FUNCTION(7, "DAP_SONIC_SWD")
+ ),
+ MTK_PIN(
+ 109, "GPIO109",
+ MTK_EINT_FUNCTION(0, 109),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "I2S1_MCK"),
+ MTK_FUNCTION(2, "I2S3_MCK"),
+ MTK_FUNCTION(3, "I2S2_MCK"),
+ MTK_FUNCTION(4, "DPI_DE"),
+ MTK_FUNCTION(5, "I2S2_MCK"),
+ MTK_FUNCTION(6, "SRCLKENAI0"),
+ MTK_FUNCTION(7, "DAP_SONIC_SWCK")
+ ),
+ MTK_PIN(
+ 110, "GPIO110",
+ MTK_EINT_FUNCTION(0, 110),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "I2S1_BCK"),
+ MTK_FUNCTION(2, "I2S3_BCK"),
+ MTK_FUNCTION(3, "I2S2_BCK"),
+ MTK_FUNCTION(4, "DPI_D11"),
+ MTK_FUNCTION(5, "I2S2_BCK"),
+ MTK_FUNCTION(6, "CONN_MCU_TDO")
+ ),
+ MTK_PIN(
+ 111, "GPIO111",
+ MTK_EINT_FUNCTION(0, 111),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "I2S1_LRCK"),
+ MTK_FUNCTION(2, "I2S3_LRCK"),
+ MTK_FUNCTION(3, "I2S2_LRCK"),
+ MTK_FUNCTION(4, "DPI_VSYNC"),
+ MTK_FUNCTION(5, "I2S2_LRCK"),
+ MTK_FUNCTION(6, "CONN_MCU_TDI")
+ ),
+ MTK_PIN(
+ 112, "GPIO112",
+ MTK_EINT_FUNCTION(0, 112),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "I2S2_DI"),
+ MTK_FUNCTION(2, "I2S0_DI"),
+ MTK_FUNCTION(3, "I2S2_DI2"),
+ MTK_FUNCTION(4, "DPI_CK"),
+ MTK_FUNCTION(5, "I2S2_DI"),
+ MTK_FUNCTION(6, "CONN_MCU_TMS")
+ ),
+ MTK_PIN(
+ 113, "GPIO113",
+ MTK_EINT_FUNCTION(0, 113),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "I2S1_DO"),
+ MTK_FUNCTION(2, "I2S3_DO"),
+ MTK_FUNCTION(3, "I2S5_DO"),
+ MTK_FUNCTION(4, "DPI_HSYNC"),
+ MTK_FUNCTION(5, "I2S2_DI2"),
+ MTK_FUNCTION(6, "CONN_MCU_TCK")
+ ),
+ MTK_PIN(
+ 114, "GPIO114",
+ MTK_EINT_FUNCTION(0, 114),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "SPI2_MI"),
+ MTK_FUNCTION(2, "SCP_SPI2_MI"),
+ MTK_FUNCTION(4, "PCM0_DI"),
+ MTK_FUNCTION(6, "CONN_MCU_TRST_B")
+ ),
+ MTK_PIN(
+ 115, "GPIO115",
+ MTK_EINT_FUNCTION(0, 115),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "SPI2_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI2_CS"),
+ MTK_FUNCTION(4, "PCM0_SYNC"),
+ MTK_FUNCTION(6, "CONN_MCU_DBGI_N")
+ ),
+ MTK_PIN(
+ 116, "GPIO116",
+ MTK_EINT_FUNCTION(0, 116),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "SPI2_MO"),
+ MTK_FUNCTION(2, "SCP_SPI2_MO"),
+ MTK_FUNCTION(3, "SCP_SDA1"),
+ MTK_FUNCTION(4, "PCM0_DO"),
+ MTK_FUNCTION(6, "CONN_MCU_DBGACK_N")
+ ),
+ MTK_PIN(
+ 117, "GPIO117",
+ MTK_EINT_FUNCTION(0, 117),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "SPI2_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI2_CK"),
+ MTK_FUNCTION(3, "SCP_SCL1"),
+ MTK_FUNCTION(4, "PCM0_CLK")
+ ),
+ MTK_PIN(
+ 118, "GPIO118",
+ MTK_EINT_FUNCTION(0, 118),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "SCL1"),
+ MTK_FUNCTION(2, "SCP_SCL0"),
+ MTK_FUNCTION(3, "SCP_SCL1")
+ ),
+ MTK_PIN(
+ 119, "GPIO119",
+ MTK_EINT_FUNCTION(0, 119),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "SDA1"),
+ MTK_FUNCTION(2, "SCP_SDA0"),
+ MTK_FUNCTION(3, "SCP_SDA1")
+ ),
+ MTK_PIN(
+ 120, "GPIO120",
+ MTK_EINT_FUNCTION(0, 120),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "SCL9"),
+ MTK_FUNCTION(2, "SCP_SCL0")
+ ),
+ MTK_PIN(
+ 121, "GPIO121",
+ MTK_EINT_FUNCTION(0, 121),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "SDA9"),
+ MTK_FUNCTION(2, "SCP_SDA0")
+ ),
+ MTK_PIN(
+ 122, "GPIO122",
+ MTK_EINT_FUNCTION(0, 122),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "SCL8"),
+ MTK_FUNCTION(2, "SCP_SDA0")
+ ),
+ MTK_PIN(
+ 123, "GPIO123",
+ MTK_EINT_FUNCTION(0, 123),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "SDA8"),
+ MTK_FUNCTION(2, "SCP_SCL0")
+ ),
+ MTK_PIN(
+ 124, "GPIO124",
+ MTK_EINT_FUNCTION(0, 124),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "SCL7"),
+ MTK_FUNCTION(2, "DMIC1_CLK")
+ ),
+ MTK_PIN(
+ 125, "GPIO125",
+ MTK_EINT_FUNCTION(0, 125),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "SDA7"),
+ MTK_FUNCTION(2, "DMIC1_DAT")
+ ),
+ MTK_PIN(
+ 126, "GPIO126",
+ MTK_EINT_FUNCTION(0, 126),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "CMFLASH0"),
+ MTK_FUNCTION(2, "PWM_2"),
+ MTK_FUNCTION(3, "TP_UCTS1_AO"),
+ MTK_FUNCTION(4, "UCTS0"),
+ MTK_FUNCTION(5, "SCL11"),
+ MTK_FUNCTION(6, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(7, "DBG_MON_A14")
+ ),
+ MTK_PIN(
+ 127, "GPIO127",
+ MTK_EINT_FUNCTION(0, 127),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "CMFLASH1"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "TP_URTS1_AO"),
+ MTK_FUNCTION(4, "URTS0"),
+ MTK_FUNCTION(5, "SDA11"),
+ MTK_FUNCTION(7, "DBG_MON_A15")
+ ),
+ MTK_PIN(
+ 128, "GPIO128",
+ MTK_EINT_FUNCTION(0, 128),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "CMFLASH2"),
+ MTK_FUNCTION(2, "PWM_0"),
+ MTK_FUNCTION(3, "TP_UCTS2_AO"),
+ MTK_FUNCTION(4, "UCTS1"),
+ MTK_FUNCTION(5, "SCL_6306"),
+ MTK_FUNCTION(7, "DBG_MON_A16")
+ ),
+ MTK_PIN(
+ 129, "GPIO129",
+ MTK_EINT_FUNCTION(0, 129),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "CMFLASH3"),
+ MTK_FUNCTION(2, "PWM_1"),
+ MTK_FUNCTION(3, "TP_URTS2_AO"),
+ MTK_FUNCTION(4, "URTS1"),
+ MTK_FUNCTION(5, "SDA_6306"),
+ MTK_FUNCTION(7, "DBG_MON_A17")
+ ),
+ MTK_PIN(
+ 130, "GPIO130",
+ MTK_EINT_FUNCTION(0, 130),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "CMVREF0"),
+ MTK_FUNCTION(2, "ANT_SEL10"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TDO"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TDO"),
+ MTK_FUNCTION(5, "SCL11"),
+ MTK_FUNCTION(6, "SPI5_B_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_A22")
+ ),
+ MTK_PIN(
+ 131, "GPIO131",
+ MTK_EINT_FUNCTION(0, 131),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "CMVREF1"),
+ MTK_FUNCTION(2, "ANT_SEL11"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TDI"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TDI"),
+ MTK_FUNCTION(5, "SDA11"),
+ MTK_FUNCTION(6, "SPI5_B_MO"),
+ MTK_FUNCTION(7, "DBG_MON_A25")
+ ),
+ MTK_PIN(
+ 132, "GPIO132",
+ MTK_EINT_FUNCTION(0, 132),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "CMVREF2"),
+ MTK_FUNCTION(2, "ANT_SEL12"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TMS"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TMS"),
+ MTK_FUNCTION(7, "DBG_MON_A28")
+ ),
+ MTK_PIN(
+ 133, "GPIO133",
+ MTK_EINT_FUNCTION(0, 133),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "CMVREF3"),
+ MTK_FUNCTION(2, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TCK"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TCK"),
+ MTK_FUNCTION(6, "SPI5_B_CSB"),
+ MTK_FUNCTION(7, "DBG_MON_A23")
+ ),
+ MTK_PIN(
+ 134, "GPIO134",
+ MTK_EINT_FUNCTION(0, 134),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "CMVREF4"),
+ MTK_FUNCTION(3, "SCP_JTAG0_TRSTN"),
+ MTK_FUNCTION(4, "MD32_0_JTAG_TRST"),
+ MTK_FUNCTION(7, "DBG_MON_A26")
+ ),
+ MTK_PIN(
+ 135, "GPIO135",
+ MTK_EINT_FUNCTION(0, 135),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "PWM_0"),
+ MTK_FUNCTION(2, "SRCLKENAI1"),
+ MTK_FUNCTION(3, "MD_URXD0"),
+ MTK_FUNCTION(4, "MD32_0_RXD"),
+ MTK_FUNCTION(5, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(7, "DBG_MON_A29")
+ ),
+ MTK_PIN(
+ 136, "GPIO136",
+ MTK_EINT_FUNCTION(0, 136),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "CMMCLK3"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(3, "MD_UTXD0"),
+ MTK_FUNCTION(4, "MD32_0_TXD"),
+ MTK_FUNCTION(6, "SPI5_B_MI"),
+ MTK_FUNCTION(7, "DBG_MON_A24")
+ ),
+ MTK_PIN(
+ 137, "GPIO137",
+ MTK_EINT_FUNCTION(0, 137),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "CMMCLK4"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(3, "MD_URXD1"),
+ MTK_FUNCTION(6, "CONN_UART0_RXD"),
+ MTK_FUNCTION(7, "DBG_MON_A27")
+ ),
+ MTK_PIN(
+ 138, "GPIO138",
+ MTK_EINT_FUNCTION(0, 138),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "CMMCLK5"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(3, "MD_UTXD1"),
+ MTK_FUNCTION(6, "CONN_UART0_TXD"),
+ MTK_FUNCTION(7, "DBG_MON_A30")
+ ),
+ MTK_PIN(
+ 139, "GPIO139",
+ MTK_EINT_FUNCTION(0, 139),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "SCL4"),
+ MTK_FUNCTION(7, "DBG_MON_A21")
+ ),
+ MTK_PIN(
+ 140, "GPIO140",
+ MTK_EINT_FUNCTION(0, 140),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "SDA4"),
+ MTK_FUNCTION(7, "DBG_MON_A20")
+ ),
+ MTK_PIN(
+ 141, "GPIO141",
+ MTK_EINT_FUNCTION(0, 141),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "SCL2"),
+ MTK_FUNCTION(7, "DBG_MON_A18")
+ ),
+ MTK_PIN(
+ 142, "GPIO142",
+ MTK_EINT_FUNCTION(0, 142),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "SDA2"),
+ MTK_FUNCTION(7, "DBG_MON_A19")
+ ),
+ MTK_PIN(
+ 143, "GPIO143",
+ MTK_EINT_FUNCTION(0, 143),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "CMVREF0"),
+ MTK_FUNCTION(2, "SPI3_CLK"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TDO"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TDO"),
+ MTK_FUNCTION(7, "DBG_MON_A31")
+ ),
+ MTK_PIN(
+ 144, "GPIO144",
+ MTK_EINT_FUNCTION(0, 144),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO144"),
+ MTK_FUNCTION(1, "CMVREF1"),
+ MTK_FUNCTION(2, "SPI3_CSB"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TDI"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TDI")
+ ),
+ MTK_PIN(
+ 145, "GPIO145",
+ MTK_EINT_FUNCTION(0, 145),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO145"),
+ MTK_FUNCTION(1, "CMVREF2"),
+ MTK_FUNCTION(2, "SPI3_MI"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TMS"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TMS")
+ ),
+ MTK_PIN(
+ 146, "GPIO146",
+ MTK_EINT_FUNCTION(0, 146),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO146"),
+ MTK_FUNCTION(1, "CMVREF3"),
+ MTK_FUNCTION(2, "SPI3_MO"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TCK"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TCK"),
+ MTK_FUNCTION(7, "DBG_MON_A32")
+ ),
+ MTK_PIN(
+ 147, "GPIO147",
+ MTK_EINT_FUNCTION(0, 147),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO147"),
+ MTK_FUNCTION(1, "CMVREF4"),
+ MTK_FUNCTION(2, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(3, "ADSP_JTAG1_TRSTN"),
+ MTK_FUNCTION(4, "SCP_JTAG1_TRSTN")
+ ),
+ MTK_PIN(
+ 148, "GPIO148",
+ MTK_EINT_FUNCTION(0, 148),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO148"),
+ MTK_FUNCTION(1, "PWM_1"),
+ MTK_FUNCTION(2, "AGPS_SYNC"),
+ MTK_FUNCTION(3, "CMMCLK5")
+ ),
+ MTK_PIN(
+ 149, "GPIO149",
+ MTK_EINT_FUNCTION(0, 149),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO149"),
+ MTK_FUNCTION(1, "CMMCLK0"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "MD32_0_GPIO0")
+ ),
+ MTK_PIN(
+ 150, "GPIO150",
+ MTK_EINT_FUNCTION(0, 150),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO150"),
+ MTK_FUNCTION(1, "CMMCLK1"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(3, "MD32_0_GPIO1"),
+ MTK_FUNCTION(7, "CONN_MCU_AICE_TMSC")
+ ),
+ MTK_PIN(
+ 151, "GPIO151",
+ MTK_EINT_FUNCTION(0, 151),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO151"),
+ MTK_FUNCTION(1, "CMMCLK2"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(3, "MD32_0_GPIO2"),
+ MTK_FUNCTION(7, "CONN_MCU_AICE_TCKC")
+ ),
+ MTK_PIN(
+ 152, "GPIO152",
+ MTK_EINT_FUNCTION(0, 152),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO152"),
+ MTK_FUNCTION(1, "KPROW1"),
+ MTK_FUNCTION(2, "PWM_2"),
+ MTK_FUNCTION(3, "IDDIG"),
+ MTK_FUNCTION(6, "MBISTREADEN_TRIGGER"),
+ MTK_FUNCTION(7, "DBG_MON_B9")
+ ),
+ MTK_PIN(
+ 153, "GPIO153",
+ MTK_EINT_FUNCTION(0, 153),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO153"),
+ MTK_FUNCTION(1, "KPROW0"),
+ MTK_FUNCTION(7, "DBG_MON_B8")
+ ),
+ MTK_PIN(
+ 154, "GPIO154",
+ MTK_EINT_FUNCTION(0, 154),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO154"),
+ MTK_FUNCTION(1, "KPCOL0"),
+ MTK_FUNCTION(7, "DBG_MON_B6")
+ ),
+ MTK_PIN(
+ 155, "GPIO155",
+ MTK_EINT_FUNCTION(0, 155),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO155"),
+ MTK_FUNCTION(1, "KPCOL1"),
+ MTK_FUNCTION(2, "PWM_3"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(6, "MBISTWRITEEN_TRIGGER"),
+ MTK_FUNCTION(7, "DBG_MON_B7")
+ ),
+ MTK_PIN(
+ 156, "GPIO156",
+ MTK_EINT_FUNCTION(0, 156),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO156"),
+ MTK_FUNCTION(1, "SPI1_A_CLK"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_CK"),
+ MTK_FUNCTION(3, "MRG_CLK"),
+ MTK_FUNCTION(4, "AGPS_SYNC"),
+ MTK_FUNCTION(5, "MD_URXD0"),
+ MTK_FUNCTION(6, "UDI_TMS"),
+ MTK_FUNCTION(7, "DBG_MON_B10")
+ ),
+ MTK_PIN(
+ 157, "GPIO157",
+ MTK_EINT_FUNCTION(0, 157),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO157"),
+ MTK_FUNCTION(1, "SPI1_A_CSB"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_CS"),
+ MTK_FUNCTION(3, "MRG_SYNC"),
+ MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(5, "MD_UTXD0"),
+ MTK_FUNCTION(6, "UDI_TCK"),
+ MTK_FUNCTION(7, "DBG_MON_B11")
+ ),
+ MTK_PIN(
+ 158, "GPIO158",
+ MTK_EINT_FUNCTION(0, 158),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO158"),
+ MTK_FUNCTION(1, "SPI1_A_MI"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_MI"),
+ MTK_FUNCTION(3, "MRG_DI"),
+ MTK_FUNCTION(4, "PTA_RXD"),
+ MTK_FUNCTION(5, "MD_URXD1"),
+ MTK_FUNCTION(6, "UDI_TDO"),
+ MTK_FUNCTION(7, "DBG_MON_B12")
+ ),
+ MTK_PIN(
+ 159, "GPIO159",
+ MTK_EINT_FUNCTION(0, 159),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO159"),
+ MTK_FUNCTION(1, "SPI1_A_MO"),
+ MTK_FUNCTION(2, "SCP_SPI1_A_MO"),
+ MTK_FUNCTION(3, "MRG_DO"),
+ MTK_FUNCTION(4, "PTA_TXD"),
+ MTK_FUNCTION(5, "MD_UTXD1"),
+ MTK_FUNCTION(6, "UDI_NTRST"),
+ MTK_FUNCTION(7, "DBG_MON_B13")
+ ),
+ MTK_PIN(
+ 160, "GPIO160",
+ MTK_EINT_FUNCTION(0, 160),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO160"),
+ MTK_FUNCTION(1, "SCL3"),
+ MTK_FUNCTION(3, "SCP_SCL1"),
+ MTK_FUNCTION(7, "DBG_MON_B14")
+ ),
+ MTK_PIN(
+ 161, "GPIO161",
+ MTK_EINT_FUNCTION(0, 161),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO161"),
+ MTK_FUNCTION(1, "SDA3"),
+ MTK_FUNCTION(3, "SCP_SDA1"),
+ MTK_FUNCTION(7, "DBG_MON_B15")
+ ),
+ MTK_PIN(
+ 162, "GPIO162",
+ MTK_EINT_FUNCTION(0, 162),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO162"),
+ MTK_FUNCTION(1, "ANT_SEL0"),
+ MTK_FUNCTION(2, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(6, "UDI_TDI"),
+ MTK_FUNCTION(7, "DBG_MON_B16")
+ ),
+ MTK_PIN(
+ 163, "GPIO163",
+ MTK_EINT_FUNCTION(0, 163),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO163"),
+ MTK_FUNCTION(1, "ANT_SEL1"),
+ MTK_FUNCTION(2, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(7, "DBG_MON_B17")
+ ),
+ MTK_PIN(
+ 164, "GPIO164",
+ MTK_EINT_FUNCTION(0, 164),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO164"),
+ MTK_FUNCTION(1, "ANT_SEL2"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_CK"),
+ MTK_FUNCTION(3, "TP_URXD1_AO"),
+ MTK_FUNCTION(5, "UCTS0"),
+ MTK_FUNCTION(7, "DBG_MON_B18")
+ ),
+ MTK_PIN(
+ 165, "GPIO165",
+ MTK_EINT_FUNCTION(0, 165),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO165"),
+ MTK_FUNCTION(1, "ANT_SEL3"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_CS"),
+ MTK_FUNCTION(3, "TP_UTXD1_AO"),
+ MTK_FUNCTION(4, "CONN_TCXOENA_REQ"),
+ MTK_FUNCTION(5, "URTS0"),
+ MTK_FUNCTION(7, "DBG_MON_B19")
+ ),
+ MTK_PIN(
+ 166, "GPIO166",
+ MTK_EINT_FUNCTION(0, 166),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO166"),
+ MTK_FUNCTION(1, "ANT_SEL4"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_MI"),
+ MTK_FUNCTION(3, "TP_URXD2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "UCTS1"),
+ MTK_FUNCTION(7, "DBG_MON_B20")
+ ),
+ MTK_PIN(
+ 167, "GPIO167",
+ MTK_EINT_FUNCTION(0, 167),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO167"),
+ MTK_FUNCTION(1, "ANT_SEL5"),
+ MTK_FUNCTION(2, "SCP_SPI1_B_MO"),
+ MTK_FUNCTION(3, "TP_UTXD2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "URTS1"),
+ MTK_FUNCTION(7, "DBG_MON_B21")
+ ),
+ MTK_PIN(
+ 168, "GPIO168",
+ MTK_EINT_FUNCTION(0, 168),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO168"),
+ MTK_FUNCTION(1, "ANT_SEL6"),
+ MTK_FUNCTION(2, "SPI0_B_CLK"),
+ MTK_FUNCTION(3, "TP_UCTS1_AO"),
+ MTK_FUNCTION(4, "KPCOL2"),
+ MTK_FUNCTION(5, "MD_UCTS0"),
+ MTK_FUNCTION(6, "SCL11"),
+ MTK_FUNCTION(7, "DBG_MON_B22")
+ ),
+ MTK_PIN(
+ 169, "GPIO169",
+ MTK_EINT_FUNCTION(0, 169),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO169"),
+ MTK_FUNCTION(1, "ANT_SEL7"),
+ MTK_FUNCTION(2, "SPI0_B_CSB"),
+ MTK_FUNCTION(3, "TP_URTS1_AO"),
+ MTK_FUNCTION(4, "KPROW2"),
+ MTK_FUNCTION(5, "MD_URTS0"),
+ MTK_FUNCTION(6, "SDA11"),
+ MTK_FUNCTION(7, "DBG_MON_B23")
+ ),
+ MTK_PIN(
+ 170, "GPIO170",
+ MTK_EINT_FUNCTION(0, 170),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO170"),
+ MTK_FUNCTION(1, "ANT_SEL8"),
+ MTK_FUNCTION(2, "SPI0_B_MI"),
+ MTK_FUNCTION(3, "TP_UCTS2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI1"),
+ MTK_FUNCTION(5, "MD_UCTS1"),
+ MTK_FUNCTION(7, "DBG_MON_B24")
+ ),
+ MTK_PIN(
+ 171, "GPIO171",
+ MTK_EINT_FUNCTION(0, 171),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO171"),
+ MTK_FUNCTION(1, "ANT_SEL9"),
+ MTK_FUNCTION(2, "SPI0_B_MO"),
+ MTK_FUNCTION(3, "TP_URTS2_AO"),
+ MTK_FUNCTION(4, "SRCLKENAI0"),
+ MTK_FUNCTION(5, "MD_URTS1"),
+ MTK_FUNCTION(7, "DBG_MON_B25")
+ ),
+ MTK_PIN(
+ 172, "GPIO172",
+ MTK_EINT_FUNCTION(0, 172),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO172"),
+ MTK_FUNCTION(1, "CONN_TOP_CLK"),
+ MTK_FUNCTION(2, "AUXIF_CLK0"),
+ MTK_FUNCTION(7, "DBG_MON_B29")
+ ),
+ MTK_PIN(
+ 173, "GPIO173",
+ MTK_EINT_FUNCTION(0, 173),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO173"),
+ MTK_FUNCTION(1, "CONN_TOP_DATA"),
+ MTK_FUNCTION(2, "AUXIF_ST0"),
+ MTK_FUNCTION(7, "DBG_MON_B30")
+ ),
+ MTK_PIN(
+ 174, "GPIO174",
+ MTK_EINT_FUNCTION(0, 174),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO174"),
+ MTK_FUNCTION(1, "CONN_HRST_B"),
+ MTK_FUNCTION(7, "DBG_MON_B28")
+ ),
+ MTK_PIN(
+ 175, "GPIO175",
+ MTK_EINT_FUNCTION(0, 175),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO175"),
+ MTK_FUNCTION(1, "CONN_WB_PTA"),
+ MTK_FUNCTION(7, "DBG_MON_B31")
+ ),
+ MTK_PIN(
+ 176, "GPIO176",
+ MTK_EINT_FUNCTION(0, 176),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO176"),
+ MTK_FUNCTION(1, "CONN_BT_CLK"),
+ MTK_FUNCTION(2, "AUXIF_CLK1"),
+ MTK_FUNCTION(7, "DBG_MON_B26")
+ ),
+ MTK_PIN(
+ 177, "GPIO177",
+ MTK_EINT_FUNCTION(0, 177),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO177"),
+ MTK_FUNCTION(1, "CONN_BT_DATA"),
+ MTK_FUNCTION(2, "AUXIF_ST1"),
+ MTK_FUNCTION(7, "DBG_MON_B27")
+ ),
+ MTK_PIN(
+ 178, "GPIO178",
+ MTK_EINT_FUNCTION(0, 178),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO178"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL0")
+ ),
+ MTK_PIN(
+ 179, "GPIO179",
+ MTK_EINT_FUNCTION(0, 179),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO179"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL1"),
+ MTK_FUNCTION(2, "UFS_MPHY_SCL")
+ ),
+ MTK_PIN(
+ 180, "GPIO180",
+ MTK_EINT_FUNCTION(0, 180),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO180"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL2"),
+ MTK_FUNCTION(2, "UFS_MPHY_SDA")
+ ),
+ MTK_PIN(
+ 181, "GPIO181",
+ MTK_EINT_FUNCTION(0, 181),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO181"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL3")
+ ),
+ MTK_PIN(
+ 182, "GPIO182",
+ MTK_EINT_FUNCTION(0, 182),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO182"),
+ MTK_FUNCTION(1, "CONN_WF_CTRL4")
+ ),
+ MTK_PIN(
+ 183, "GPIO183",
+ MTK_EINT_FUNCTION(0, 183),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO183"),
+ MTK_FUNCTION(1, "MSDC0_CMD")
+ ),
+ MTK_PIN(
+ 184, "GPIO184",
+ MTK_EINT_FUNCTION(0, 184),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO184"),
+ MTK_FUNCTION(1, "MSDC0_DAT0")
+ ),
+ MTK_PIN(
+ 185, "GPIO185",
+ MTK_EINT_FUNCTION(0, 185),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO185"),
+ MTK_FUNCTION(1, "MSDC0_DAT2")
+ ),
+ MTK_PIN(
+ 186, "GPIO186",
+ MTK_EINT_FUNCTION(0, 186),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO186"),
+ MTK_FUNCTION(1, "MSDC0_DAT4")
+ ),
+ MTK_PIN(
+ 187, "GPIO187",
+ MTK_EINT_FUNCTION(0, 187),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO187"),
+ MTK_FUNCTION(1, "MSDC0_DAT6")
+ ),
+ MTK_PIN(
+ 188, "GPIO188",
+ MTK_EINT_FUNCTION(0, 188),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO188"),
+ MTK_FUNCTION(1, "MSDC0_DAT1")
+ ),
+ MTK_PIN(
+ 189, "GPIO189",
+ MTK_EINT_FUNCTION(0, 189),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO189"),
+ MTK_FUNCTION(1, "MSDC0_DAT5")
+ ),
+ MTK_PIN(
+ 190, "GPIO190",
+ MTK_EINT_FUNCTION(0, 190),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO190"),
+ MTK_FUNCTION(1, "MSDC0_DAT7")
+ ),
+ MTK_PIN(
+ 191, "GPIO191",
+ MTK_EINT_FUNCTION(0, 191),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO191"),
+ MTK_FUNCTION(1, "MSDC0_DSL"),
+ MTK_FUNCTION(2, "GPS_L1_ELNA_EN"),
+ MTK_FUNCTION(3, "IDDIG"),
+ MTK_FUNCTION(4, "DMIC_CLK")
+ ),
+ MTK_PIN(
+ 192, "GPIO192",
+ MTK_EINT_FUNCTION(0, 192),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO192"),
+ MTK_FUNCTION(1, "MSDC0_CLK"),
+ MTK_FUNCTION(3, "USB_DRVVBUS"),
+ MTK_FUNCTION(4, "DMIC_DAT")
+ ),
+ MTK_PIN(
+ 193, "GPIO193",
+ MTK_EINT_FUNCTION(0, 193),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO193"),
+ MTK_FUNCTION(1, "MSDC0_DAT3")
+ ),
+ MTK_PIN(
+ 194, "GPIO194",
+ MTK_EINT_FUNCTION(0, 194),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO194"),
+ MTK_FUNCTION(1, "MSDC0_RSTB")
+ ),
+ MTK_PIN(
+ 195, "GPIO195",
+ MTK_EINT_FUNCTION(0, 195),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO195"),
+ MTK_FUNCTION(1, "SCP_VREQ_VAO"),
+ MTK_FUNCTION(2, "DVFSRC_EXT_REQ")
+ ),
+ MTK_PIN(
+ 196, "GPIO196",
+ MTK_EINT_FUNCTION(0, 196),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO196"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI2")
+ ),
+ MTK_PIN(
+ 197, "GPIO197",
+ MTK_EINT_FUNCTION(0, 197),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO197"),
+ MTK_FUNCTION(1, "AUD_NLE_MOSI1"),
+ MTK_FUNCTION(2, "AUD_CLK_MISO"),
+ MTK_FUNCTION(3, "I2S2_MCK"),
+ MTK_FUNCTION(4, "I2S6_MCK"),
+ MTK_FUNCTION(5, "I2S8_MCK")
+ ),
+ MTK_PIN(
+ 198, "GPIO198",
+ MTK_EINT_FUNCTION(0, 198),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO198"),
+ MTK_FUNCTION(1, "AUD_NLE_MOSI0"),
+ MTK_FUNCTION(2, "AUD_SYNC_MISO"),
+ MTK_FUNCTION(3, "I2S2_BCK"),
+ MTK_FUNCTION(4, "I2S6_BCK"),
+ MTK_FUNCTION(5, "I2S8_BCK")
+ ),
+ MTK_PIN(
+ 199, "GPIO199",
+ MTK_EINT_FUNCTION(0, 199),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO199"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO2"),
+ MTK_FUNCTION(3, "I2S2_DI2")
+ ),
+ MTK_PIN(
+ 200, "GPIO200",
+ MTK_EINT_FUNCTION(0, 200),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO200"),
+ MTK_FUNCTION(1, "SCL6"),
+ MTK_FUNCTION(3, "SCP_SCL1"),
+ MTK_FUNCTION(4, "SCL_6306"),
+ MTK_FUNCTION(7, "DBG_MON_A4")
+ ),
+ MTK_PIN(
+ 201, "GPIO201",
+ MTK_EINT_FUNCTION(0, 201),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO201"),
+ MTK_FUNCTION(1, "SDA6"),
+ MTK_FUNCTION(3, "SCP_SDA1"),
+ MTK_FUNCTION(4, "SDA_6306"),
+ MTK_FUNCTION(7, "DBG_MON_A5")
+ ),
+ MTK_PIN(
+ 202, "GPIO202",
+ MTK_EINT_FUNCTION(0, 202),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO202"),
+ MTK_FUNCTION(1, "SCL5")
+ ),
+ MTK_PIN(
+ 203, "GPIO203",
+ MTK_EINT_FUNCTION(0, 203),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO203"),
+ MTK_FUNCTION(1, "SDA5")
+ ),
+ MTK_PIN(
+ 204, "GPIO204",
+ MTK_EINT_FUNCTION(0, 204),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO204"),
+ MTK_FUNCTION(1, "SCL0"),
+ MTK_FUNCTION(6, "SPI7_A_CLK"),
+ MTK_FUNCTION(7, "DBG_MON_A2")
+ ),
+ MTK_PIN(
+ 205, "GPIO205",
+ MTK_EINT_FUNCTION(0, 205),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO205"),
+ MTK_FUNCTION(1, "SDA0"),
+ MTK_FUNCTION(6, "SPI7_A_CSB"),
+ MTK_FUNCTION(7, "DBG_MON_A3")
+ ),
+ MTK_PIN(
+ 206, "GPIO206",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO206"),
+ MTK_FUNCTION(1, "SRCLKENA0")
+ ),
+ MTK_PIN(
+ 207, "GPIO207",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO207"),
+ MTK_FUNCTION(1, "SRCLKENA1")
+ ),
+ MTK_PIN(
+ 208, "GPIO208",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO208"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+ MTK_PIN(
+ 209, "GPIO209",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO209"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MI"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MO")
+ ),
+ MTK_PIN(
+ 210, "GPIO210",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO210"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CSN")
+ ),
+ MTK_PIN(
+ 211, "GPIO211",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO211"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_MO"),
+ MTK_FUNCTION(2, "PWRAP_SPI0_MI")
+ ),
+ MTK_PIN(
+ 212, "GPIO212",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO212"),
+ MTK_FUNCTION(1, "PWRAP_SPI0_CK")
+ ),
+ MTK_PIN(
+ 213, "GPIO213",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO213"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ 214, "GPIO214",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO214"),
+ MTK_FUNCTION(1, "AUD_CLK_MOSI"),
+ MTK_FUNCTION(3, "I2S1_MCK"),
+ MTK_FUNCTION(4, "I2S7_MCK"),
+ MTK_FUNCTION(5, "I2S9_MCK")
+ ),
+ MTK_PIN(
+ 215, "GPIO215",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO215"),
+ MTK_FUNCTION(1, "AUD_SYNC_MOSI"),
+ MTK_FUNCTION(3, "I2S1_BCK"),
+ MTK_FUNCTION(4, "I2S7_BCK"),
+ MTK_FUNCTION(5, "I2S9_BCK")
+ ),
+ MTK_PIN(
+ 216, "GPIO216",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO216"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI0"),
+ MTK_FUNCTION(3, "I2S1_LRCK"),
+ MTK_FUNCTION(4, "I2S7_LRCK"),
+ MTK_FUNCTION(5, "I2S9_LRCK")
+ ),
+ MTK_PIN(
+ 217, "GPIO217",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO217"),
+ MTK_FUNCTION(1, "AUD_DAT_MOSI1"),
+ MTK_FUNCTION(3, "I2S1_DO"),
+ MTK_FUNCTION(4, "I2S7_DO"),
+ MTK_FUNCTION(5, "I2S9_DO")
+ ),
+ MTK_PIN(
+ 218, "GPIO218",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO218"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO0"),
+ MTK_FUNCTION(2, "VOW_DAT_MISO"),
+ MTK_FUNCTION(3, "I2S2_LRCK"),
+ MTK_FUNCTION(4, "I2S6_LRCK"),
+ MTK_FUNCTION(5, "I2S8_LRCK")
+ ),
+ MTK_PIN(
+ 219, "GPIO219",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO219"),
+ MTK_FUNCTION(1, "AUD_DAT_MISO1"),
+ MTK_FUNCTION(2, "VOW_CLK_MISO"),
+ MTK_FUNCTION(3, "I2S2_DI"),
+ MTK_FUNCTION(4, "I2S6_DI"),
+ MTK_FUNCTION(5, "I2S8_DI")
+ ),
+ MTK_PIN(
+ 220, "GPIO220",
+ MTK_EINT_FUNCTION(0, 208),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO220")
+ ),
+ MTK_PIN(
+ 221, "GPIO221",
+ MTK_EINT_FUNCTION(0, 209),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO221")
+ ),
+ MTK_PIN(
+ 222, "GPIO222",
+ MTK_EINT_FUNCTION(0, 210),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO222")
+ ),
+ MTK_PIN(
+ 223, "GPIO223",
+ MTK_EINT_FUNCTION(0, 211),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO223")
+ ),
+ MTK_PIN(
+ 224, "GPIO224",
+ MTK_EINT_FUNCTION(0, 212),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO224")
+ ),
+ MTK_PIN(
+ 225, "GPIO225",
+ MTK_EINT_FUNCTION(0, 214),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO225")
+ ),
+ MTK_PIN(
+ 226, "GPIO226",
+ MTK_EINT_FUNCTION(0, 215),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO226")
+ ),
+ MTK_PIN(
+ 227, "GPIO227",
+ MTK_EINT_FUNCTION(0, 216),
+ DRV_GRP4,
+ MTK_FUNCTION(0, "GPIO227")
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT8192_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index a23c18251965..623af4410b07 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -940,7 +940,6 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev,
{
struct pinctrl_pin_desc *pins;
struct mtk_pinctrl *hw;
- struct resource *res;
int err, i;
hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
@@ -963,14 +962,8 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev,
return -ENOMEM;
for (i = 0; i < hw->soc->nbase_names; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- hw->soc->base_names[i]);
- if (!res) {
- dev_err(&pdev->dev, "missing IO resource\n");
- return -ENXIO;
- }
-
- hw->base[i] = devm_ioremap_resource(&pdev->dev, res);
+ hw->base[i] = devm_platform_ioremap_resource_byname(pdev,
+ hw->soc->base_names[i]);
if (IS_ERR(hw->base[i]))
return PTR_ERR(hw->base[i]);
}
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 953126bf6657..68894e9e05d2 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -197,7 +197,7 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
PIN_GRP_GPIO("smi", 18, 2, BIT(4), "smi"),
- PIN_GRP_GPIO("pcie1", 3, 1, BIT(5), "pcie"),
+ PIN_GRP_GPIO("pcie1", 3, 1, BIT(5), "pcie"), /* this actually controls "pcie1_reset" */
PIN_GRP_GPIO("pcie1_clkreq", 4, 1, BIT(9), "pcie"),
PIN_GRP_GPIO("pcie1_wakeup", 5, 1, BIT(10), "pcie"),
PIN_GRP_GPIO("ptp", 20, 3, BIT(11) | BIT(12) | BIT(13), "ptp"),
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index ba25c4654391..657e35a75d84 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -931,11 +931,6 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
[NMK_GPIO_ALT_C+3] = "altC3",
[NMK_GPIO_ALT_C+4] = "altC4",
};
- const char *pulls[] = {
- "none ",
- "pull down",
- "pull up ",
- };
clk_enable(nmk_chip->clk);
is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
@@ -946,7 +941,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
if (is_out) {
- seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
+ seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
gpio,
label ?: "(none)",
data_out ? "hi" : "lo",
@@ -954,11 +949,12 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
} else {
int irq = chip->to_irq(chip, offset);
struct irq_desc *desc = irq_to_desc(irq);
- int pullidx = 0;
+ const int pullidx = pull ? 1 : 0;
int val;
-
- if (pull)
- pullidx = data_out ? 2 : 1;
+ static const char * const pulls[] = {
+ "none ",
+ "pull enabled",
+ };
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
gpio,
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index a935065cdac4..6de31b5ee358 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -1601,7 +1601,7 @@ static void npcm7xx_dt_free_map(struct pinctrl_dev *pctldev,
kfree(map);
}
-static struct pinctrl_ops npcm7xx_pinctrl_ops = {
+static const struct pinctrl_ops npcm7xx_pinctrl_ops = {
.get_groups_count = npcm7xx_get_groups_count,
.get_group_name = npcm7xx_get_group_name,
.get_group_pins = npcm7xx_get_group_pins,
@@ -1701,7 +1701,7 @@ static int npcm_gpio_set_direction(struct pinctrl_dev *pctldev,
return 0;
}
-static struct pinmux_ops npcm7xx_pinmux_ops = {
+static const struct pinmux_ops npcm7xx_pinmux_ops = {
.get_functions_count = npcm7xx_get_functions_count,
.get_function_name = npcm7xx_get_function_name,
.get_function_groups = npcm7xx_get_function_groups,
@@ -1842,7 +1842,7 @@ static int npcm7xx_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return 0;
}
-static struct pinconf_ops npcm7xx_pinconf_ops = {
+static const struct pinconf_ops npcm7xx_pinconf_ops = {
.is_generic = true,
.pin_config_get = npcm7xx_config_get,
.pin_config_set = npcm7xx_config_set,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 9a760f5cd7ed..4aea3e05e8c6 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -156,7 +156,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
} else if (debounce < 250000) {
- time = debounce / 15600;
+ time = debounce / 15625;
pin_reg |= time & DB_TMR_OUT_MASK;
pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg |= BIT(DB_TMR_LARGE_OFF);
@@ -166,14 +166,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg |= BIT(DB_TMR_LARGE_OFF);
} else {
- pin_reg &= ~DB_CNTRl_MASK;
+ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
ret = -EINVAL;
}
} else {
pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
pin_reg &= ~DB_TMR_OUT_MASK;
- pin_reg &= ~DB_CNTRl_MASK;
+ pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
}
writel(pin_reg, gpio_dev->base + offset * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index d4a192df5fab..95e763424042 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -123,13 +123,31 @@ static const struct pinctrl_pin_desc kerncz_pins[] = {
PINCTRL_PIN(18, "GPIO_18"),
PINCTRL_PIN(19, "GPIO_19"),
PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
PINCTRL_PIN(23, "GPIO_23"),
PINCTRL_PIN(24, "GPIO_24"),
PINCTRL_PIN(25, "GPIO_25"),
PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
PINCTRL_PIN(39, "GPIO_39"),
PINCTRL_PIN(40, "GPIO_40"),
- PINCTRL_PIN(43, "GPIO_42"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
PINCTRL_PIN(46, "GPIO_46"),
PINCTRL_PIN(47, "GPIO_47"),
PINCTRL_PIN(48, "GPIO_48"),
@@ -150,14 +168,23 @@ static const struct pinctrl_pin_desc kerncz_pins[] = {
PINCTRL_PIN(64, "GPIO_64"),
PINCTRL_PIN(65, "GPIO_65"),
PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
PINCTRL_PIN(68, "GPIO_68"),
PINCTRL_PIN(69, "GPIO_69"),
PINCTRL_PIN(70, "GPIO_70"),
PINCTRL_PIN(71, "GPIO_71"),
PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
PINCTRL_PIN(74, "GPIO_74"),
PINCTRL_PIN(75, "GPIO_75"),
PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
PINCTRL_PIN(84, "GPIO_84"),
PINCTRL_PIN(85, "GPIO_85"),
PINCTRL_PIN(86, "GPIO_86"),
@@ -168,6 +195,7 @@ static const struct pinctrl_pin_desc kerncz_pins[] = {
PINCTRL_PIN(91, "GPIO_91"),
PINCTRL_PIN(92, "GPIO_92"),
PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
PINCTRL_PIN(95, "GPIO_95"),
PINCTRL_PIN(96, "GPIO_96"),
PINCTRL_PIN(97, "GPIO_97"),
@@ -176,6 +204,16 @@ static const struct pinctrl_pin_desc kerncz_pins[] = {
PINCTRL_PIN(100, "GPIO_100"),
PINCTRL_PIN(101, "GPIO_101"),
PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
PINCTRL_PIN(113, "GPIO_113"),
PINCTRL_PIN(114, "GPIO_114"),
PINCTRL_PIN(115, "GPIO_115"),
@@ -186,12 +224,18 @@ static const struct pinctrl_pin_desc kerncz_pins[] = {
PINCTRL_PIN(120, "GPIO_120"),
PINCTRL_PIN(121, "GPIO_121"),
PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
PINCTRL_PIN(129, "GPIO_129"),
PINCTRL_PIN(130, "GPIO_130"),
PINCTRL_PIN(131, "GPIO_131"),
PINCTRL_PIN(132, "GPIO_132"),
PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
PINCTRL_PIN(135, "GPIO_135"),
PINCTRL_PIN(136, "GPIO_136"),
PINCTRL_PIN(137, "GPIO_137"),
@@ -206,6 +250,23 @@ static const struct pinctrl_pin_desc kerncz_pins[] = {
PINCTRL_PIN(146, "GPIO_146"),
PINCTRL_PIN(147, "GPIO_147"),
PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
PINCTRL_PIN(166, "GPIO_166"),
PINCTRL_PIN(167, "GPIO_167"),
PINCTRL_PIN(168, "GPIO_168"),
@@ -218,6 +279,12 @@ static const struct pinctrl_pin_desc kerncz_pins[] = {
PINCTRL_PIN(175, "GPIO_175"),
PINCTRL_PIN(176, "GPIO_176"),
PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "GPIO_180"),
+ PINCTRL_PIN(181, "GPIO_181"),
+ PINCTRL_PIN(182, "GPIO_182"),
+ PINCTRL_PIN(183, "GPIO_183"),
};
static const unsigned i2c0_pins[] = {145, 146};
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 8e5a5053a47e..578b387100d9 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -983,11 +983,18 @@ static const struct atmel_pioctrl_data atmel_sama5d2_pioctrl_data = {
.nbanks = 4,
};
+static const struct atmel_pioctrl_data microchip_sama7g5_pioctrl_data = {
+ .nbanks = 5,
+};
+
static const struct of_device_id atmel_pctrl_of_match[] = {
{
.compatible = "atmel,sama5d2-pinctrl",
.data = &atmel_sama5d2_pioctrl_data,
}, {
+ .compatible = "microchip,sama7g5-pinctrl",
+ .data = &microchip_sama7g5_pioctrl_data,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index a8d1b53ec4c1..621909b01deb 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -633,6 +633,46 @@ static int jz4770_uart2_data_pins[] = { 0x5c, 0x5e, };
static int jz4770_uart2_hwflow_pins[] = { 0x5d, 0x5f, };
static int jz4770_uart3_data_pins[] = { 0x6c, 0x85, };
static int jz4770_uart3_hwflow_pins[] = { 0x88, 0x89, };
+static int jz4770_ssi0_dt_a_pins[] = { 0x15, };
+static int jz4770_ssi0_dt_b_pins[] = { 0x35, };
+static int jz4770_ssi0_dt_d_pins[] = { 0x75, };
+static int jz4770_ssi0_dt_e_pins[] = { 0x91, };
+static int jz4770_ssi0_dr_a_pins[] = { 0x14, };
+static int jz4770_ssi0_dr_b_pins[] = { 0x34, };
+static int jz4770_ssi0_dr_d_pins[] = { 0x74, };
+static int jz4770_ssi0_dr_e_pins[] = { 0x8e, };
+static int jz4770_ssi0_clk_a_pins[] = { 0x12, };
+static int jz4770_ssi0_clk_b_pins[] = { 0x3c, };
+static int jz4770_ssi0_clk_d_pins[] = { 0x78, };
+static int jz4770_ssi0_clk_e_pins[] = { 0x8f, };
+static int jz4770_ssi0_gpc_b_pins[] = { 0x3e, };
+static int jz4770_ssi0_gpc_d_pins[] = { 0x76, };
+static int jz4770_ssi0_gpc_e_pins[] = { 0x93, };
+static int jz4770_ssi0_ce0_a_pins[] = { 0x13, };
+static int jz4770_ssi0_ce0_b_pins[] = { 0x3d, };
+static int jz4770_ssi0_ce0_d_pins[] = { 0x79, };
+static int jz4770_ssi0_ce0_e_pins[] = { 0x90, };
+static int jz4770_ssi0_ce1_b_pins[] = { 0x3f, };
+static int jz4770_ssi0_ce1_d_pins[] = { 0x77, };
+static int jz4770_ssi0_ce1_e_pins[] = { 0x92, };
+static int jz4770_ssi1_dt_b_pins[] = { 0x35, };
+static int jz4770_ssi1_dt_d_pins[] = { 0x75, };
+static int jz4770_ssi1_dt_e_pins[] = { 0x91, };
+static int jz4770_ssi1_dr_b_pins[] = { 0x34, };
+static int jz4770_ssi1_dr_d_pins[] = { 0x74, };
+static int jz4770_ssi1_dr_e_pins[] = { 0x8e, };
+static int jz4770_ssi1_clk_b_pins[] = { 0x3c, };
+static int jz4770_ssi1_clk_d_pins[] = { 0x78, };
+static int jz4770_ssi1_clk_e_pins[] = { 0x8f, };
+static int jz4770_ssi1_gpc_b_pins[] = { 0x3e, };
+static int jz4770_ssi1_gpc_d_pins[] = { 0x76, };
+static int jz4770_ssi1_gpc_e_pins[] = { 0x93, };
+static int jz4770_ssi1_ce0_b_pins[] = { 0x3d, };
+static int jz4770_ssi1_ce0_d_pins[] = { 0x79, };
+static int jz4770_ssi1_ce0_e_pins[] = { 0x90, };
+static int jz4770_ssi1_ce1_b_pins[] = { 0x3f, };
+static int jz4770_ssi1_ce1_d_pins[] = { 0x77, };
+static int jz4770_ssi1_ce1_e_pins[] = { 0x92, };
static int jz4770_mmc0_1bit_a_pins[] = { 0x12, 0x13, 0x14, };
static int jz4770_mmc0_4bit_a_pins[] = { 0x15, 0x16, 0x17, };
static int jz4770_mmc0_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
@@ -703,6 +743,46 @@ static int jz4770_uart2_data_funcs[] = { 0, 0, };
static int jz4770_uart2_hwflow_funcs[] = { 0, 0, };
static int jz4770_uart3_data_funcs[] = { 0, 1, };
static int jz4770_uart3_hwflow_funcs[] = { 0, 0, };
+static int jz4770_ssi0_dt_a_funcs[] = { 2, };
+static int jz4770_ssi0_dt_b_funcs[] = { 1, };
+static int jz4770_ssi0_dt_d_funcs[] = { 1, };
+static int jz4770_ssi0_dt_e_funcs[] = { 0, };
+static int jz4770_ssi0_dr_a_funcs[] = { 1, };
+static int jz4770_ssi0_dr_b_funcs[] = { 1, };
+static int jz4770_ssi0_dr_d_funcs[] = { 1, };
+static int jz4770_ssi0_dr_e_funcs[] = { 0, };
+static int jz4770_ssi0_clk_a_funcs[] = { 2, };
+static int jz4770_ssi0_clk_b_funcs[] = { 1, };
+static int jz4770_ssi0_clk_d_funcs[] = { 1, };
+static int jz4770_ssi0_clk_e_funcs[] = { 0, };
+static int jz4770_ssi0_gpc_b_funcs[] = { 1, };
+static int jz4770_ssi0_gpc_d_funcs[] = { 1, };
+static int jz4770_ssi0_gpc_e_funcs[] = { 0, };
+static int jz4770_ssi0_ce0_a_funcs[] = { 2, };
+static int jz4770_ssi0_ce0_b_funcs[] = { 1, };
+static int jz4770_ssi0_ce0_d_funcs[] = { 1, };
+static int jz4770_ssi0_ce0_e_funcs[] = { 0, };
+static int jz4770_ssi0_ce1_b_funcs[] = { 1, };
+static int jz4770_ssi0_ce1_d_funcs[] = { 1, };
+static int jz4770_ssi0_ce1_e_funcs[] = { 0, };
+static int jz4770_ssi1_dt_b_funcs[] = { 2, };
+static int jz4770_ssi1_dt_d_funcs[] = { 2, };
+static int jz4770_ssi1_dt_e_funcs[] = { 1, };
+static int jz4770_ssi1_dr_b_funcs[] = { 2, };
+static int jz4770_ssi1_dr_d_funcs[] = { 2, };
+static int jz4770_ssi1_dr_e_funcs[] = { 1, };
+static int jz4770_ssi1_clk_b_funcs[] = { 2, };
+static int jz4770_ssi1_clk_d_funcs[] = { 2, };
+static int jz4770_ssi1_clk_e_funcs[] = { 1, };
+static int jz4770_ssi1_gpc_b_funcs[] = { 2, };
+static int jz4770_ssi1_gpc_d_funcs[] = { 2, };
+static int jz4770_ssi1_gpc_e_funcs[] = { 1, };
+static int jz4770_ssi1_ce0_b_funcs[] = { 2, };
+static int jz4770_ssi1_ce0_d_funcs[] = { 2, };
+static int jz4770_ssi1_ce0_e_funcs[] = { 1, };
+static int jz4770_ssi1_ce1_b_funcs[] = { 2, };
+static int jz4770_ssi1_ce1_d_funcs[] = { 2, };
+static int jz4770_ssi1_ce1_e_funcs[] = { 1, };
static int jz4770_mmc0_1bit_a_funcs[] = { 1, 1, 0, };
static int jz4770_mmc0_4bit_a_funcs[] = { 1, 1, 1, };
static int jz4770_mmc0_1bit_e_funcs[] = { 0, 0, 0, };
@@ -763,6 +843,46 @@ static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("uart2-hwflow", jz4770_uart2_hwflow),
INGENIC_PIN_GROUP("uart3-data", jz4770_uart3_data),
INGENIC_PIN_GROUP("uart3-hwflow", jz4770_uart3_hwflow),
+ INGENIC_PIN_GROUP("ssi0-dt-a", jz4770_ssi0_dt_a),
+ INGENIC_PIN_GROUP("ssi0-dt-b", jz4770_ssi0_dt_b),
+ INGENIC_PIN_GROUP("ssi0-dt-d", jz4770_ssi0_dt_d),
+ INGENIC_PIN_GROUP("ssi0-dt-e", jz4770_ssi0_dt_e),
+ INGENIC_PIN_GROUP("ssi0-dr-a", jz4770_ssi0_dr_a),
+ INGENIC_PIN_GROUP("ssi0-dr-b", jz4770_ssi0_dr_b),
+ INGENIC_PIN_GROUP("ssi0-dr-d", jz4770_ssi0_dr_d),
+ INGENIC_PIN_GROUP("ssi0-dr-e", jz4770_ssi0_dr_e),
+ INGENIC_PIN_GROUP("ssi0-clk-a", jz4770_ssi0_clk_a),
+ INGENIC_PIN_GROUP("ssi0-clk-b", jz4770_ssi0_clk_b),
+ INGENIC_PIN_GROUP("ssi0-clk-d", jz4770_ssi0_clk_d),
+ INGENIC_PIN_GROUP("ssi0-clk-e", jz4770_ssi0_clk_e),
+ INGENIC_PIN_GROUP("ssi0-gpc-b", jz4770_ssi0_gpc_b),
+ INGENIC_PIN_GROUP("ssi0-gpc-d", jz4770_ssi0_gpc_d),
+ INGENIC_PIN_GROUP("ssi0-gpc-e", jz4770_ssi0_gpc_e),
+ INGENIC_PIN_GROUP("ssi0-ce0-a", jz4770_ssi0_ce0_a),
+ INGENIC_PIN_GROUP("ssi0-ce0-b", jz4770_ssi0_ce0_b),
+ INGENIC_PIN_GROUP("ssi0-ce0-d", jz4770_ssi0_ce0_d),
+ INGENIC_PIN_GROUP("ssi0-ce0-e", jz4770_ssi0_ce0_e),
+ INGENIC_PIN_GROUP("ssi0-ce1-b", jz4770_ssi0_ce1_b),
+ INGENIC_PIN_GROUP("ssi0-ce1-d", jz4770_ssi0_ce1_d),
+ INGENIC_PIN_GROUP("ssi0-ce1-e", jz4770_ssi0_ce1_e),
+ INGENIC_PIN_GROUP("ssi1-dt-b", jz4770_ssi1_dt_b),
+ INGENIC_PIN_GROUP("ssi1-dt-d", jz4770_ssi1_dt_d),
+ INGENIC_PIN_GROUP("ssi1-dt-e", jz4770_ssi1_dt_e),
+ INGENIC_PIN_GROUP("ssi1-dr-b", jz4770_ssi1_dr_b),
+ INGENIC_PIN_GROUP("ssi1-dr-d", jz4770_ssi1_dr_d),
+ INGENIC_PIN_GROUP("ssi1-dr-e", jz4770_ssi1_dr_e),
+ INGENIC_PIN_GROUP("ssi1-clk-b", jz4770_ssi1_clk_b),
+ INGENIC_PIN_GROUP("ssi1-clk-d", jz4770_ssi1_clk_d),
+ INGENIC_PIN_GROUP("ssi1-clk-e", jz4770_ssi1_clk_e),
+ INGENIC_PIN_GROUP("ssi1-gpc-b", jz4770_ssi1_gpc_b),
+ INGENIC_PIN_GROUP("ssi1-gpc-d", jz4770_ssi1_gpc_d),
+ INGENIC_PIN_GROUP("ssi1-gpc-e", jz4770_ssi1_gpc_e),
+ INGENIC_PIN_GROUP("ssi1-ce0-b", jz4770_ssi1_ce0_b),
+ INGENIC_PIN_GROUP("ssi1-ce0-d", jz4770_ssi1_ce0_d),
+ INGENIC_PIN_GROUP("ssi1-ce0-e", jz4770_ssi1_ce0_e),
+ INGENIC_PIN_GROUP("ssi1-ce1-b", jz4770_ssi1_ce1_b),
+ INGENIC_PIN_GROUP("ssi1-ce1-d", jz4770_ssi1_ce1_d),
+ INGENIC_PIN_GROUP("ssi1-ce1-e", jz4770_ssi1_ce1_e),
INGENIC_PIN_GROUP("mmc0-1bit-a", jz4770_mmc0_1bit_a),
INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a),
INGENIC_PIN_GROUP("mmc0-1bit-e", jz4770_mmc0_1bit_e),
@@ -815,6 +935,22 @@ static const char *jz4770_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
static const char *jz4770_uart1_groups[] = { "uart1-data", "uart1-hwflow", };
static const char *jz4770_uart2_groups[] = { "uart2-data", "uart2-hwflow", };
static const char *jz4770_uart3_groups[] = { "uart3-data", "uart3-hwflow", };
+static const char *jz4770_ssi0_groups[] = {
+ "ssi0-dt-a", "ssi0-dt-b", "ssi0-dt-d", "ssi0-dt-e",
+ "ssi0-dr-a", "ssi0-dr-b", "ssi0-dr-d", "ssi0-dr-e",
+ "ssi0-clk-a", "ssi0-clk-b", "ssi0-clk-d", "ssi0-clk-e",
+ "ssi0-gpc-b", "ssi0-gpc-d", "ssi0-gpc-e",
+ "ssi0-ce0-a", "ssi0-ce0-b", "ssi0-ce0-d", "ssi0-ce0-e",
+ "ssi0-ce1-b", "ssi0-ce1-d", "ssi0-ce1-e",
+};
+static const char *jz4770_ssi1_groups[] = {
+ "ssi1-dt-b", "ssi1-dt-d", "ssi1-dt-e",
+ "ssi1-dr-b", "ssi1-dr-d", "ssi1-dr-e",
+ "ssi1-clk-b", "ssi1-clk-d", "ssi1-clk-e",
+ "ssi1-gpc-b", "ssi1-gpc-d", "ssi1-gpc-e",
+ "ssi1-ce0-b", "ssi1-ce0-d", "ssi1-ce0-e",
+ "ssi1-ce1-b", "ssi1-ce1-d", "ssi1-ce1-e",
+};
static const char *jz4770_mmc0_groups[] = {
"mmc0-1bit-a", "mmc0-4bit-a",
"mmc0-1bit-e", "mmc0-4bit-e", "mmc0-8bit-e",
@@ -858,6 +994,8 @@ static const struct function_desc jz4770_functions[] = {
{ "uart1", jz4770_uart1_groups, ARRAY_SIZE(jz4770_uart1_groups), },
{ "uart2", jz4770_uart2_groups, ARRAY_SIZE(jz4770_uart2_groups), },
{ "uart3", jz4770_uart3_groups, ARRAY_SIZE(jz4770_uart3_groups), },
+ { "ssi0", jz4770_ssi0_groups, ARRAY_SIZE(jz4770_ssi0_groups), },
+ { "ssi1", jz4770_ssi1_groups, ARRAY_SIZE(jz4770_ssi1_groups), },
{ "mmc0", jz4770_mmc0_groups, ARRAY_SIZE(jz4770_mmc0_groups), },
{ "mmc1", jz4770_mmc1_groups, ARRAY_SIZE(jz4770_mmc1_groups), },
{ "mmc2", jz4770_mmc2_groups, ARRAY_SIZE(jz4770_mmc2_groups), },
@@ -897,22 +1035,106 @@ static const struct ingenic_chip_info jz4770_chip_info = {
.pull_downs = jz4770_pull_downs,
};
+static const u32 jz4780_pull_ups[6] = {
+ 0x3fffffff, 0xfff0f3fc, 0x0fffffff, 0xffff4fff, 0xfffffb7c, 0x7fa7f00f,
+};
+
+static const u32 jz4780_pull_downs[6] = {
+ 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x00580ff0,
+};
+
static int jz4780_uart2_data_pins[] = { 0x66, 0x67, };
static int jz4780_uart2_hwflow_pins[] = { 0x65, 0x64, };
static int jz4780_uart4_data_pins[] = { 0x54, 0x4a, };
+static int jz4780_ssi0_dt_a_19_pins[] = { 0x13, };
+static int jz4780_ssi0_dt_a_21_pins[] = { 0x15, };
+static int jz4780_ssi0_dt_a_28_pins[] = { 0x1c, };
+static int jz4780_ssi0_dt_b_pins[] = { 0x3d, };
+static int jz4780_ssi0_dt_d_pins[] = { 0x79, };
+static int jz4780_ssi0_dr_a_20_pins[] = { 0x14, };
+static int jz4780_ssi0_dr_a_27_pins[] = { 0x1b, };
+static int jz4780_ssi0_dr_b_pins[] = { 0x34, };
+static int jz4780_ssi0_dr_d_pins[] = { 0x74, };
+static int jz4780_ssi0_clk_a_pins[] = { 0x12, };
+static int jz4780_ssi0_clk_b_5_pins[] = { 0x25, };
+static int jz4780_ssi0_clk_b_28_pins[] = { 0x3c, };
+static int jz4780_ssi0_clk_d_pins[] = { 0x78, };
+static int jz4780_ssi0_gpc_b_pins[] = { 0x3e, };
+static int jz4780_ssi0_gpc_d_pins[] = { 0x76, };
+static int jz4780_ssi0_ce0_a_23_pins[] = { 0x17, };
+static int jz4780_ssi0_ce0_a_25_pins[] = { 0x19, };
+static int jz4780_ssi0_ce0_b_pins[] = { 0x3f, };
+static int jz4780_ssi0_ce0_d_pins[] = { 0x77, };
+static int jz4780_ssi0_ce1_b_pins[] = { 0x35, };
+static int jz4780_ssi0_ce1_d_pins[] = { 0x75, };
+static int jz4780_ssi1_dt_b_pins[] = { 0x3d, };
+static int jz4780_ssi1_dt_d_pins[] = { 0x79, };
+static int jz4780_ssi1_dr_b_pins[] = { 0x34, };
+static int jz4780_ssi1_dr_d_pins[] = { 0x74, };
+static int jz4780_ssi1_clk_b_pins[] = { 0x3c, };
+static int jz4780_ssi1_clk_d_pins[] = { 0x78, };
+static int jz4780_ssi1_gpc_b_pins[] = { 0x3e, };
+static int jz4780_ssi1_gpc_d_pins[] = { 0x76, };
+static int jz4780_ssi1_ce0_b_pins[] = { 0x3f, };
+static int jz4780_ssi1_ce0_d_pins[] = { 0x77, };
+static int jz4780_ssi1_ce1_b_pins[] = { 0x35, };
+static int jz4780_ssi1_ce1_d_pins[] = { 0x75, };
static int jz4780_mmc0_8bit_a_pins[] = { 0x04, 0x05, 0x06, 0x07, 0x18, };
static int jz4780_i2c3_pins[] = { 0x6a, 0x6b, };
static int jz4780_i2c4_e_pins[] = { 0x8c, 0x8d, };
static int jz4780_i2c4_f_pins[] = { 0xb9, 0xb8, };
+static int jz4780_i2s_data_tx_pins[] = { 0x87, };
+static int jz4780_i2s_data_rx_pins[] = { 0x86, };
+static int jz4780_i2s_clk_txrx_pins[] = { 0x6c, 0x6d, };
+static int jz4780_i2s_clk_rx_pins[] = { 0x88, 0x89, };
+static int jz4780_i2s_sysclk_pins[] = { 0x85, };
static int jz4780_hdmi_ddc_pins[] = { 0xb9, 0xb8, };
static int jz4780_uart2_data_funcs[] = { 1, 1, };
static int jz4780_uart2_hwflow_funcs[] = { 1, 1, };
static int jz4780_uart4_data_funcs[] = { 2, 2, };
+static int jz4780_ssi0_dt_a_19_funcs[] = { 2, };
+static int jz4780_ssi0_dt_a_21_funcs[] = { 2, };
+static int jz4780_ssi0_dt_a_28_funcs[] = { 2, };
+static int jz4780_ssi0_dt_b_funcs[] = { 1, };
+static int jz4780_ssi0_dt_d_funcs[] = { 1, };
+static int jz4780_ssi0_dr_a_20_funcs[] = { 2, };
+static int jz4780_ssi0_dr_a_27_funcs[] = { 2, };
+static int jz4780_ssi0_dr_b_funcs[] = { 1, };
+static int jz4780_ssi0_dr_d_funcs[] = { 1, };
+static int jz4780_ssi0_clk_a_funcs[] = { 2, };
+static int jz4780_ssi0_clk_b_5_funcs[] = { 1, };
+static int jz4780_ssi0_clk_b_28_funcs[] = { 1, };
+static int jz4780_ssi0_clk_d_funcs[] = { 1, };
+static int jz4780_ssi0_gpc_b_funcs[] = { 1, };
+static int jz4780_ssi0_gpc_d_funcs[] = { 1, };
+static int jz4780_ssi0_ce0_a_23_funcs[] = { 2, };
+static int jz4780_ssi0_ce0_a_25_funcs[] = { 2, };
+static int jz4780_ssi0_ce0_b_funcs[] = { 1, };
+static int jz4780_ssi0_ce0_d_funcs[] = { 1, };
+static int jz4780_ssi0_ce1_b_funcs[] = { 1, };
+static int jz4780_ssi0_ce1_d_funcs[] = { 1, };
+static int jz4780_ssi1_dt_b_funcs[] = { 2, };
+static int jz4780_ssi1_dt_d_funcs[] = { 2, };
+static int jz4780_ssi1_dr_b_funcs[] = { 2, };
+static int jz4780_ssi1_dr_d_funcs[] = { 2, };
+static int jz4780_ssi1_clk_b_funcs[] = { 2, };
+static int jz4780_ssi1_clk_d_funcs[] = { 2, };
+static int jz4780_ssi1_gpc_b_funcs[] = { 2, };
+static int jz4780_ssi1_gpc_d_funcs[] = { 2, };
+static int jz4780_ssi1_ce0_b_funcs[] = { 2, };
+static int jz4780_ssi1_ce0_d_funcs[] = { 2, };
+static int jz4780_ssi1_ce1_b_funcs[] = { 2, };
+static int jz4780_ssi1_ce1_d_funcs[] = { 2, };
static int jz4780_mmc0_8bit_a_funcs[] = { 1, 1, 1, 1, 1, };
static int jz4780_i2c3_funcs[] = { 1, 1, };
static int jz4780_i2c4_e_funcs[] = { 1, 1, };
static int jz4780_i2c4_f_funcs[] = { 1, 1, };
+static int jz4780_i2s_data_tx_funcs[] = { 0, };
+static int jz4780_i2s_data_rx_funcs[] = { 0, };
+static int jz4780_i2s_clk_txrx_funcs[] = { 1, 0, };
+static int jz4780_i2s_clk_rx_funcs[] = { 1, 1, };
+static int jz4780_i2s_sysclk_funcs[] = { 2, };
static int jz4780_hdmi_ddc_funcs[] = { 0, 0, };
static const struct group_desc jz4780_groups[] = {
@@ -925,6 +1147,51 @@ static const struct group_desc jz4780_groups[] = {
INGENIC_PIN_GROUP("uart3-data", jz4770_uart3_data),
INGENIC_PIN_GROUP("uart3-hwflow", jz4770_uart3_hwflow),
INGENIC_PIN_GROUP("uart4-data", jz4780_uart4_data),
+ INGENIC_PIN_GROUP("ssi0-dt-a-19", jz4780_ssi0_dt_a_19),
+ INGENIC_PIN_GROUP("ssi0-dt-a-21", jz4780_ssi0_dt_a_21),
+ INGENIC_PIN_GROUP("ssi0-dt-a-28", jz4780_ssi0_dt_a_28),
+ INGENIC_PIN_GROUP("ssi0-dt-b", jz4780_ssi0_dt_b),
+ INGENIC_PIN_GROUP("ssi0-dt-d", jz4780_ssi0_dt_d),
+ INGENIC_PIN_GROUP("ssi0-dt-e", jz4770_ssi0_dt_e),
+ INGENIC_PIN_GROUP("ssi0-dr-a-20", jz4780_ssi0_dr_a_20),
+ INGENIC_PIN_GROUP("ssi0-dr-a-27", jz4780_ssi0_dr_a_27),
+ INGENIC_PIN_GROUP("ssi0-dr-b", jz4780_ssi0_dr_b),
+ INGENIC_PIN_GROUP("ssi0-dr-d", jz4780_ssi0_dr_d),
+ INGENIC_PIN_GROUP("ssi0-dr-e", jz4770_ssi0_dr_e),
+ INGENIC_PIN_GROUP("ssi0-clk-a", jz4780_ssi0_clk_a),
+ INGENIC_PIN_GROUP("ssi0-clk-b-5", jz4780_ssi0_clk_b_5),
+ INGENIC_PIN_GROUP("ssi0-clk-b-28", jz4780_ssi0_clk_b_28),
+ INGENIC_PIN_GROUP("ssi0-clk-d", jz4780_ssi0_clk_d),
+ INGENIC_PIN_GROUP("ssi0-clk-e", jz4770_ssi0_clk_e),
+ INGENIC_PIN_GROUP("ssi0-gpc-b", jz4780_ssi0_gpc_b),
+ INGENIC_PIN_GROUP("ssi0-gpc-d", jz4780_ssi0_gpc_d),
+ INGENIC_PIN_GROUP("ssi0-gpc-e", jz4770_ssi0_gpc_e),
+ INGENIC_PIN_GROUP("ssi0-ce0-a-23", jz4780_ssi0_ce0_a_23),
+ INGENIC_PIN_GROUP("ssi0-ce0-a-25", jz4780_ssi0_ce0_a_25),
+ INGENIC_PIN_GROUP("ssi0-ce0-b", jz4780_ssi0_ce0_b),
+ INGENIC_PIN_GROUP("ssi0-ce0-d", jz4780_ssi0_ce0_d),
+ INGENIC_PIN_GROUP("ssi0-ce0-e", jz4770_ssi0_ce0_e),
+ INGENIC_PIN_GROUP("ssi0-ce1-b", jz4780_ssi0_ce1_b),
+ INGENIC_PIN_GROUP("ssi0-ce1-d", jz4780_ssi0_ce1_d),
+ INGENIC_PIN_GROUP("ssi0-ce1-e", jz4770_ssi0_ce1_e),
+ INGENIC_PIN_GROUP("ssi1-dt-b", jz4780_ssi1_dt_b),
+ INGENIC_PIN_GROUP("ssi1-dt-d", jz4780_ssi1_dt_d),
+ INGENIC_PIN_GROUP("ssi1-dt-e", jz4770_ssi1_dt_e),
+ INGENIC_PIN_GROUP("ssi1-dr-b", jz4780_ssi1_dr_b),
+ INGENIC_PIN_GROUP("ssi1-dr-d", jz4780_ssi1_dr_d),
+ INGENIC_PIN_GROUP("ssi1-dr-e", jz4770_ssi1_dr_e),
+ INGENIC_PIN_GROUP("ssi1-clk-b", jz4780_ssi1_clk_b),
+ INGENIC_PIN_GROUP("ssi1-clk-d", jz4780_ssi1_clk_d),
+ INGENIC_PIN_GROUP("ssi1-clk-e", jz4770_ssi1_clk_e),
+ INGENIC_PIN_GROUP("ssi1-gpc-b", jz4780_ssi1_gpc_b),
+ INGENIC_PIN_GROUP("ssi1-gpc-d", jz4780_ssi1_gpc_d),
+ INGENIC_PIN_GROUP("ssi1-gpc-e", jz4770_ssi1_gpc_e),
+ INGENIC_PIN_GROUP("ssi1-ce0-b", jz4780_ssi1_ce0_b),
+ INGENIC_PIN_GROUP("ssi1-ce0-d", jz4780_ssi1_ce0_d),
+ INGENIC_PIN_GROUP("ssi1-ce0-e", jz4770_ssi1_ce0_e),
+ INGENIC_PIN_GROUP("ssi1-ce1-b", jz4780_ssi1_ce1_b),
+ INGENIC_PIN_GROUP("ssi1-ce1-d", jz4780_ssi1_ce1_d),
+ INGENIC_PIN_GROUP("ssi1-ce1-e", jz4770_ssi1_ce1_e),
INGENIC_PIN_GROUP("mmc0-1bit-a", jz4770_mmc0_1bit_a),
INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a),
INGENIC_PIN_GROUP("mmc0-8bit-a", jz4780_mmc0_8bit_a),
@@ -956,6 +1223,11 @@ static const struct group_desc jz4780_groups[] = {
INGENIC_PIN_GROUP("i2c3-data", jz4780_i2c3),
INGENIC_PIN_GROUP("i2c4-data-e", jz4780_i2c4_e),
INGENIC_PIN_GROUP("i2c4-data-f", jz4780_i2c4_f),
+ INGENIC_PIN_GROUP("i2s-data-tx", jz4780_i2s_data_tx),
+ INGENIC_PIN_GROUP("i2s-data-rx", jz4780_i2s_data_rx),
+ INGENIC_PIN_GROUP("i2s-clk-txrx", jz4780_i2s_clk_txrx),
+ INGENIC_PIN_GROUP("i2s-clk-rx", jz4780_i2s_clk_rx),
+ INGENIC_PIN_GROUP("i2s-sysclk", jz4780_i2s_sysclk),
INGENIC_PIN_GROUP("hdmi-ddc", jz4780_hdmi_ddc),
INGENIC_PIN_GROUP("cim-data", jz4770_cim_8bit),
INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit),
@@ -972,6 +1244,22 @@ static const struct group_desc jz4780_groups[] = {
static const char *jz4780_uart2_groups[] = { "uart2-data", "uart2-hwflow", };
static const char *jz4780_uart4_groups[] = { "uart4-data", };
+static const char *jz4780_ssi0_groups[] = {
+ "ssi0-dt-a-19", "ssi0-dt-a-21", "ssi0-dt-a-28", "ssi0-dt-b", "ssi0-dt-d", "ssi0-dt-e",
+ "ssi0-dr-a-20", "ssi0-dr-a-27", "ssi0-dr-b", "ssi0-dr-d", "ssi0-dr-e",
+ "ssi0-clk-a", "ssi0-clk-b-5", "ssi0-clk-b-28", "ssi0-clk-d", "ssi0-clk-e",
+ "ssi0-gpc-b", "ssi0-gpc-d", "ssi0-gpc-e",
+ "ssi0-ce0-a-23", "ssi0-ce0-a-25", "ssi0-ce0-b", "ssi0-ce0-d", "ssi0-ce0-e",
+ "ssi0-ce1-b", "ssi0-ce1-d", "ssi0-ce1-e",
+};
+static const char *jz4780_ssi1_groups[] = {
+ "ssi1-dt-b", "ssi1-dt-d", "ssi1-dt-e",
+ "ssi1-dr-b", "ssi1-dr-d", "ssi1-dr-e",
+ "ssi1-clk-b", "ssi1-clk-d", "ssi1-clk-e",
+ "ssi1-gpc-b", "ssi1-gpc-d", "ssi1-gpc-e",
+ "ssi1-ce0-b", "ssi1-ce0-d", "ssi1-ce0-e",
+ "ssi1-ce1-b", "ssi1-ce1-d", "ssi1-ce1-e",
+};
static const char *jz4780_mmc0_groups[] = {
"mmc0-1bit-a", "mmc0-4bit-a", "mmc0-8bit-a",
"mmc0-1bit-e", "mmc0-4bit-e",
@@ -988,6 +1276,9 @@ static const char *jz4780_nemc_groups[] = {
};
static const char *jz4780_i2c3_groups[] = { "i2c3-data", };
static const char *jz4780_i2c4_groups[] = { "i2c4-data-e", "i2c4-data-f", };
+static const char *jz4780_i2s_groups[] = {
+ "i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-clk-rx", "i2s-sysclk",
+};
static const char *jz4780_cim_groups[] = { "cim-data", };
static const char *jz4780_hdmi_ddc_groups[] = { "hdmi-ddc", };
@@ -997,6 +1288,8 @@ static const struct function_desc jz4780_functions[] = {
{ "uart2", jz4780_uart2_groups, ARRAY_SIZE(jz4780_uart2_groups), },
{ "uart3", jz4770_uart3_groups, ARRAY_SIZE(jz4770_uart3_groups), },
{ "uart4", jz4780_uart4_groups, ARRAY_SIZE(jz4780_uart4_groups), },
+ { "ssi0", jz4780_ssi0_groups, ARRAY_SIZE(jz4780_ssi0_groups), },
+ { "ssi1", jz4780_ssi1_groups, ARRAY_SIZE(jz4780_ssi1_groups), },
{ "mmc0", jz4780_mmc0_groups, ARRAY_SIZE(jz4780_mmc0_groups), },
{ "mmc1", jz4780_mmc1_groups, ARRAY_SIZE(jz4780_mmc1_groups), },
{ "mmc2", jz4780_mmc2_groups, ARRAY_SIZE(jz4780_mmc2_groups), },
@@ -1012,6 +1305,7 @@ static const struct function_desc jz4780_functions[] = {
{ "i2c2", jz4770_i2c2_groups, ARRAY_SIZE(jz4770_i2c2_groups), },
{ "i2c3", jz4780_i2c3_groups, ARRAY_SIZE(jz4780_i2c3_groups), },
{ "i2c4", jz4780_i2c4_groups, ARRAY_SIZE(jz4780_i2c4_groups), },
+ { "i2s", jz4780_i2s_groups, ARRAY_SIZE(jz4780_i2s_groups), },
{ "cim", jz4780_cim_groups, ARRAY_SIZE(jz4780_cim_groups), },
{ "lcd", jz4770_lcd_groups, ARRAY_SIZE(jz4770_lcd_groups), },
{ "pwm0", jz4770_pwm0_groups, ARRAY_SIZE(jz4770_pwm0_groups), },
@@ -1034,8 +1328,8 @@ static const struct ingenic_chip_info jz4780_chip_info = {
.num_groups = ARRAY_SIZE(jz4780_groups),
.functions = jz4780_functions,
.num_functions = ARRAY_SIZE(jz4780_functions),
- .pull_ups = jz4770_pull_ups,
- .pull_downs = jz4770_pull_downs,
+ .pull_ups = jz4780_pull_ups,
+ .pull_downs = jz4780_pull_downs,
};
static const u32 x1000_pull_ups[4] = {
@@ -1093,6 +1387,10 @@ static int x1000_i2c0_pins[] = { 0x38, 0x37, };
static int x1000_i2c1_a_pins[] = { 0x01, 0x00, };
static int x1000_i2c1_c_pins[] = { 0x5b, 0x5a, };
static int x1000_i2c2_pins[] = { 0x61, 0x60, };
+static int x1000_i2s_data_tx_pins[] = { 0x24, };
+static int x1000_i2s_data_rx_pins[] = { 0x23, };
+static int x1000_i2s_clk_txrx_pins[] = { 0x21, 0x22, };
+static int x1000_i2s_sysclk_pins[] = { 0x20, };
static int x1000_cim_pins[] = {
0x08, 0x09, 0x0a, 0x0b,
0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c,
@@ -1155,6 +1453,10 @@ static int x1000_i2c0_funcs[] = { 0, 0, };
static int x1000_i2c1_a_funcs[] = { 2, 2, };
static int x1000_i2c1_c_funcs[] = { 0, 0, };
static int x1000_i2c2_funcs[] = { 1, 1, };
+static int x1000_i2s_data_tx_funcs[] = { 1, };
+static int x1000_i2s_data_rx_funcs[] = { 1, };
+static int x1000_i2s_clk_txrx_funcs[] = { 1, 1, };
+static int x1000_i2s_sysclk_funcs[] = { 1, };
static int x1000_cim_funcs[] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, };
static int x1000_lcd_8bit_funcs[] = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -1208,6 +1510,10 @@ static const struct group_desc x1000_groups[] = {
INGENIC_PIN_GROUP("i2c1-data-a", x1000_i2c1_a),
INGENIC_PIN_GROUP("i2c1-data-c", x1000_i2c1_c),
INGENIC_PIN_GROUP("i2c2-data", x1000_i2c2),
+ INGENIC_PIN_GROUP("i2s-data-tx", x1000_i2s_data_tx),
+ INGENIC_PIN_GROUP("i2s-data-rx", x1000_i2s_data_rx),
+ INGENIC_PIN_GROUP("i2s-clk-txrx", x1000_i2s_clk_txrx),
+ INGENIC_PIN_GROUP("i2s-sysclk", x1000_i2s_sysclk),
INGENIC_PIN_GROUP("cim-data", x1000_cim),
INGENIC_PIN_GROUP("lcd-8bit", x1000_lcd_8bit),
INGENIC_PIN_GROUP("lcd-16bit", x1000_lcd_16bit),
@@ -1249,6 +1555,9 @@ static const char *x1000_cs2_groups[] = { "emc-cs2", };
static const char *x1000_i2c0_groups[] = { "i2c0-data", };
static const char *x1000_i2c1_groups[] = { "i2c1-data-a", "i2c1-data-c", };
static const char *x1000_i2c2_groups[] = { "i2c2-data", };
+static const char *x1000_i2s_groups[] = {
+ "i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-sysclk",
+};
static const char *x1000_cim_groups[] = { "cim-data", };
static const char *x1000_lcd_groups[] = {
"lcd-8bit", "lcd-16bit", "lcd-no-pins",
@@ -1274,6 +1583,7 @@ static const struct function_desc x1000_functions[] = {
{ "i2c0", x1000_i2c0_groups, ARRAY_SIZE(x1000_i2c0_groups), },
{ "i2c1", x1000_i2c1_groups, ARRAY_SIZE(x1000_i2c1_groups), },
{ "i2c2", x1000_i2c2_groups, ARRAY_SIZE(x1000_i2c2_groups), },
+ { "i2s", x1000_i2s_groups, ARRAY_SIZE(x1000_i2s_groups), },
{ "cim", x1000_cim_groups, ARRAY_SIZE(x1000_cim_groups), },
{ "lcd", x1000_lcd_groups, ARRAY_SIZE(x1000_lcd_groups), },
{ "pwm0", x1000_pwm0_groups, ARRAY_SIZE(x1000_pwm0_groups), },
@@ -1309,6 +1619,10 @@ static int x1500_i2c0_pins[] = { 0x38, 0x37, };
static int x1500_i2c1_a_pins[] = { 0x01, 0x00, };
static int x1500_i2c1_c_pins[] = { 0x5b, 0x5a, };
static int x1500_i2c2_pins[] = { 0x61, 0x60, };
+static int x1500_i2s_data_tx_pins[] = { 0x24, };
+static int x1500_i2s_data_rx_pins[] = { 0x23, };
+static int x1500_i2s_clk_txrx_pins[] = { 0x21, 0x22, };
+static int x1500_i2s_sysclk_pins[] = { 0x20, };
static int x1500_cim_pins[] = {
0x08, 0x09, 0x0a, 0x0b,
0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0c,
@@ -1332,6 +1646,10 @@ static int x1500_i2c0_funcs[] = { 0, 0, };
static int x1500_i2c1_a_funcs[] = { 2, 2, };
static int x1500_i2c1_c_funcs[] = { 0, 0, };
static int x1500_i2c2_funcs[] = { 1, 1, };
+static int x1500_i2s_data_tx_funcs[] = { 1, };
+static int x1500_i2s_data_rx_funcs[] = { 1, };
+static int x1500_i2s_clk_txrx_funcs[] = { 1, 1, };
+static int x1500_i2s_sysclk_funcs[] = { 1, };
static int x1500_cim_funcs[] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, };
static int x1500_pwm_pwm0_funcs[] = { 0, };
static int x1500_pwm_pwm1_funcs[] = { 1, };
@@ -1354,6 +1672,10 @@ static const struct group_desc x1500_groups[] = {
INGENIC_PIN_GROUP("i2c1-data-a", x1500_i2c1_a),
INGENIC_PIN_GROUP("i2c1-data-c", x1500_i2c1_c),
INGENIC_PIN_GROUP("i2c2-data", x1500_i2c2),
+ INGENIC_PIN_GROUP("i2s-data-tx", x1500_i2s_data_tx),
+ INGENIC_PIN_GROUP("i2s-data-rx", x1500_i2s_data_rx),
+ INGENIC_PIN_GROUP("i2s-clk-txrx", x1500_i2s_clk_txrx),
+ INGENIC_PIN_GROUP("i2s-sysclk", x1500_i2s_sysclk),
INGENIC_PIN_GROUP("cim-data", x1500_cim),
{ "lcd-no-pins", },
INGENIC_PIN_GROUP("pwm0", x1500_pwm_pwm0),
@@ -1372,6 +1694,9 @@ static const char *x1500_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
static const char *x1500_i2c0_groups[] = { "i2c0-data", };
static const char *x1500_i2c1_groups[] = { "i2c1-data-a", "i2c1-data-c", };
static const char *x1500_i2c2_groups[] = { "i2c2-data", };
+static const char *x1500_i2s_groups[] = {
+ "i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-sysclk",
+};
static const char *x1500_cim_groups[] = { "cim-data", };
static const char *x1500_lcd_groups[] = { "lcd-no-pins", };
static const char *x1500_pwm0_groups[] = { "pwm0", };
@@ -1389,6 +1714,7 @@ static const struct function_desc x1500_functions[] = {
{ "i2c0", x1500_i2c0_groups, ARRAY_SIZE(x1500_i2c0_groups), },
{ "i2c1", x1500_i2c1_groups, ARRAY_SIZE(x1500_i2c1_groups), },
{ "i2c2", x1500_i2c2_groups, ARRAY_SIZE(x1500_i2c2_groups), },
+ { "i2s", x1500_i2s_groups, ARRAY_SIZE(x1500_i2s_groups), },
{ "cim", x1500_cim_groups, ARRAY_SIZE(x1500_cim_groups), },
{ "lcd", x1500_lcd_groups, ARRAY_SIZE(x1500_lcd_groups), },
{ "pwm0", x1500_pwm0_groups, ARRAY_SIZE(x1500_pwm0_groups), },
@@ -1447,6 +1773,11 @@ static int x1830_mmc1_4bit_pins[] = { 0x45, 0x46, 0x47, };
static int x1830_i2c0_pins[] = { 0x0c, 0x0d, };
static int x1830_i2c1_pins[] = { 0x39, 0x3a, };
static int x1830_i2c2_pins[] = { 0x5b, 0x5c, };
+static int x1830_i2s_data_tx_pins[] = { 0x53, };
+static int x1830_i2s_data_rx_pins[] = { 0x54, };
+static int x1830_i2s_clk_txrx_pins[] = { 0x58, 0x52, };
+static int x1830_i2s_clk_rx_pins[] = { 0x56, 0x55, };
+static int x1830_i2s_sysclk_pins[] = { 0x57, };
static int x1830_lcd_rgb_18bit_pins[] = {
0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6c, 0x6d, 0x6e, 0x6f,
@@ -1509,6 +1840,11 @@ static int x1830_mmc1_4bit_funcs[] = { 0, 0, 0, };
static int x1830_i2c0_funcs[] = { 1, 1, };
static int x1830_i2c1_funcs[] = { 0, 0, };
static int x1830_i2c2_funcs[] = { 1, 1, };
+static int x1830_i2s_data_tx_funcs[] = { 0, };
+static int x1830_i2s_data_rx_funcs[] = { 0, };
+static int x1830_i2s_clk_txrx_funcs[] = { 0, 0, };
+static int x1830_i2s_clk_rx_funcs[] = { 0, 0, };
+static int x1830_i2s_sysclk_funcs[] = { 0, };
static int x1830_lcd_rgb_18bit_funcs[] = {
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
@@ -1567,6 +1903,11 @@ static const struct group_desc x1830_groups[] = {
INGENIC_PIN_GROUP("i2c0-data", x1830_i2c0),
INGENIC_PIN_GROUP("i2c1-data", x1830_i2c1),
INGENIC_PIN_GROUP("i2c2-data", x1830_i2c2),
+ INGENIC_PIN_GROUP("i2s-data-tx", x1830_i2s_data_tx),
+ INGENIC_PIN_GROUP("i2s-data-rx", x1830_i2s_data_rx),
+ INGENIC_PIN_GROUP("i2s-clk-txrx", x1830_i2s_clk_txrx),
+ INGENIC_PIN_GROUP("i2s-clk-rx", x1830_i2s_clk_rx),
+ INGENIC_PIN_GROUP("i2s-sysclk", x1830_i2s_sysclk),
INGENIC_PIN_GROUP("lcd-rgb-18bit", x1830_lcd_rgb_18bit),
INGENIC_PIN_GROUP("lcd-slcd-8bit", x1830_lcd_slcd_8bit),
INGENIC_PIN_GROUP("lcd-slcd-16bit", x1830_lcd_slcd_16bit),
@@ -1609,6 +1950,9 @@ static const char *x1830_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", };
static const char *x1830_i2c0_groups[] = { "i2c0-data", };
static const char *x1830_i2c1_groups[] = { "i2c1-data", };
static const char *x1830_i2c2_groups[] = { "i2c2-data", };
+static const char *x1830_i2s_groups[] = {
+ "i2s-data-tx", "i2s-data-rx", "i2s-clk-txrx", "i2s-clk-rx", "i2s-sysclk",
+};
static const char *x1830_lcd_groups[] = {
"lcd-rgb-18bit", "lcd-slcd-8bit", "lcd-slcd-16bit", "lcd-no-pins",
};
@@ -1633,6 +1977,7 @@ static const struct function_desc x1830_functions[] = {
{ "i2c0", x1830_i2c0_groups, ARRAY_SIZE(x1830_i2c0_groups), },
{ "i2c1", x1830_i2c1_groups, ARRAY_SIZE(x1830_i2c1_groups), },
{ "i2c2", x1830_i2c2_groups, ARRAY_SIZE(x1830_i2c2_groups), },
+ { "i2s", x1830_i2s_groups, ARRAY_SIZE(x1830_i2s_groups), },
{ "lcd", x1830_lcd_groups, ARRAY_SIZE(x1830_lcd_groups), },
{ "pwm0", x1830_pwm0_groups, ARRAY_SIZE(x1830_pwm0_groups), },
{ "pwm1", x1830_pwm1_groups, ARRAY_SIZE(x1830_pwm1_groups), },
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 42b12ea14d6b..ce2d8014b7e0 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -87,7 +87,7 @@ const struct regmap_config mcp23x08_regmap = {
};
EXPORT_SYMBOL_GPL(mcp23x08_regmap);
-static const struct reg_default mcp23x16_defaults[] = {
+static const struct reg_default mcp23x17_defaults[] = {
{.reg = MCP_IODIR << 1, .def = 0xffff},
{.reg = MCP_IPOL << 1, .def = 0x0000},
{.reg = MCP_GPINTEN << 1, .def = 0x0000},
@@ -98,23 +98,23 @@ static const struct reg_default mcp23x16_defaults[] = {
{.reg = MCP_OLAT << 1, .def = 0x0000},
};
-static const struct regmap_range mcp23x16_volatile_range = {
+static const struct regmap_range mcp23x17_volatile_range = {
.range_min = MCP_INTF << 1,
.range_max = MCP_GPIO << 1,
};
-static const struct regmap_access_table mcp23x16_volatile_table = {
- .yes_ranges = &mcp23x16_volatile_range,
+static const struct regmap_access_table mcp23x17_volatile_table = {
+ .yes_ranges = &mcp23x17_volatile_range,
.n_yes_ranges = 1,
};
-static const struct regmap_range mcp23x16_precious_range = {
- .range_min = MCP_GPIO << 1,
+static const struct regmap_range mcp23x17_precious_range = {
+ .range_min = MCP_INTCAP << 1,
.range_max = MCP_GPIO << 1,
};
-static const struct regmap_access_table mcp23x16_precious_table = {
- .yes_ranges = &mcp23x16_precious_range,
+static const struct regmap_access_table mcp23x17_precious_table = {
+ .yes_ranges = &mcp23x17_precious_range,
.n_yes_ranges = 1,
};
@@ -124,10 +124,10 @@ const struct regmap_config mcp23x17_regmap = {
.reg_stride = 2,
.max_register = MCP_OLAT << 1,
- .volatile_table = &mcp23x16_volatile_table,
- .precious_table = &mcp23x16_precious_table,
- .reg_defaults = mcp23x16_defaults,
- .num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults),
+ .volatile_table = &mcp23x17_volatile_table,
+ .precious_table = &mcp23x17_precious_table,
+ .reg_defaults = mcp23x17_defaults,
+ .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
.cache_type = REGCACHE_FLAT,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
@@ -564,7 +564,7 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
ret = mcp_read(mcp, MCP_IOCON, &status);
if (ret < 0)
- goto fail;
+ return dev_err_probe(dev, ret, "can't identify chip %d\n", addr);
mcp->irq_controller =
device_property_read_bool(dev, "interrupt-controller");
@@ -598,7 +598,7 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
ret = mcp_write(mcp, MCP_IOCON, status);
if (ret < 0)
- goto fail;
+ return dev_err_probe(dev, ret, "can't write IOCON %d\n", addr);
}
if (mcp->irq && mcp->irq_controller) {
@@ -616,7 +616,7 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
if (ret < 0)
- goto fail;
+ return dev_err_probe(dev, ret, "can't add GPIO chip\n");
mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops;
mcp->pinctrl_desc.confops = &mcp_pinconf_ops;
@@ -628,18 +628,17 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->pinctrl_desc.owner = THIS_MODULE;
mcp->pctldev = devm_pinctrl_register(dev, &mcp->pinctrl_desc, mcp);
- if (IS_ERR(mcp->pctldev)) {
- ret = PTR_ERR(mcp->pctldev);
- goto fail;
- }
+ if (IS_ERR(mcp->pctldev))
+ return dev_err_probe(dev, PTR_ERR(mcp->pctldev), "can't register controller\n");
- if (mcp->irq)
+ if (mcp->irq) {
ret = mcp23s08_irq_setup(mcp);
+ if (ret)
+ return dev_err_probe(dev, ret, "can't setup IRQ\n");
+ }
-fail:
- if (ret < 0)
- dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(mcp23s08_probe_one);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
index 1f47a661b0a7..9ae10318f6f3 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
@@ -119,13 +119,15 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
return -EINVAL;
}
- copy = devm_kmemdup(dev, &config, sizeof(config), GFP_KERNEL);
+ copy = devm_kmemdup(dev, config, sizeof(*config), GFP_KERNEL);
if (!copy)
return -ENOMEM;
copy->name = name;
mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, copy);
+ if (IS_ERR(mcp->regmap))
+ dev_err(dev, "regmap init failed for %s\n", mcp->chip.label);
return PTR_ERR_OR_ZERO(mcp->regmap);
}
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 425a3d764f00..a4a1b00f7f0d 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1120,7 +1120,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
{
struct gpio_chip *gc;
struct gpio_irq_chip *girq;
- int ret, irq;
+ int irq;
info->gpio_chip = ocelot_gpiolib_chip;
@@ -1147,11 +1147,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
girq->handler = handle_edge_irq;
}
- ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
- if (ret)
- return ret;
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, gc, info);
}
static const struct of_device_id ocelot_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 0401c1da79dd..aa1a1c850d05 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -3155,7 +3155,9 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
if (!bank->domain)
return -ENXIO;
+ clk_enable(bank->clk);
virq = irq_create_mapping(bank->domain, offset);
+ clk_disable(bank->clk);
return (virq) ? : -ENXIO;
}
@@ -3194,7 +3196,7 @@ static void rockchip_irq_demux(struct irq_desc *desc)
irq = __ffs(pend);
pend &= ~BIT(irq);
- virq = irq_linear_revmap(bank->domain, irq);
+ virq = irq_find_mapping(bank->domain, irq);
if (!virq) {
dev_err(bank->drvdata->dev, "unmapped irq %d\n", irq);
@@ -3373,7 +3375,7 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct irq_chip_generic *gc;
int ret;
- int i, j;
+ int i;
for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
if (!bank->valid) {
@@ -3400,7 +3402,7 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
ret = irq_alloc_domain_generic_chips(bank->domain, 32, 1,
"rockchip_gpio_irq", handle_level_irq,
- clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ clr, 0, 0);
if (ret) {
dev_err(&pdev->dev, "could not alloc generic chips for bank %s\n",
bank->name);
@@ -3409,14 +3411,6 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
continue;
}
- /*
- * Linux assumes that all interrupts start out disabled/masked.
- * Our driver only uses the concept of masked and always keeps
- * things enabled, so for us that's all masked and all enabled.
- */
- writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTMASK);
- writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTEN);
-
gc = irq_get_domain_generic_chip(bank->domain, 0);
gc->reg_base = bank->reg_base;
gc->private = bank;
@@ -3433,13 +3427,17 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
gc->wake_enabled = IRQ_MSK(bank->nr_pins);
+ /*
+ * Linux assumes that all interrupts start out disabled/masked.
+ * Our driver only uses the concept of masked and always keeps
+ * things enabled, so for us that's all masked and all enabled.
+ */
+ writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTMASK);
+ writel_relaxed(0xffffffff, bank->reg_base + GPIO_INTEN);
+ gc->mask_cache = 0xffffffff;
+
irq_set_chained_handler_and_data(bank->irq,
rockchip_irq_demux, bank);
-
- /* map the gpio irqs here, when the clock is still running */
- for (j = 0 ; j < 32 ; j++)
- irq_create_mapping(bank->domain, j);
-
clk_disable(bank->clk);
}
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index efe41abc5d47..f3cd7e296712 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1014,7 +1014,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
if (res)
return res;
- if (pinctrl_spec.args_count < 2) {
+ if (pinctrl_spec.args_count < 2 || pinctrl_spec.args_count > 3) {
dev_err(pcs->dev, "invalid args_count for spec: %i\n",
pinctrl_spec.args_count);
break;
@@ -1033,7 +1033,7 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
}
dev_dbg(pcs->dev, "%pOFn index: 0x%x value: 0x%x\n",
- pinctrl_spec.np, offset, pinctrl_spec.args[1]);
+ pinctrl_spec.np, offset, vals[found].val);
pin = pcs_get_pin_by_offset(pcs, offset);
if (pin < 0) {
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index b325a136ac48..c110f780407b 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -1154,12 +1154,6 @@ static int sx150x_probe(struct i2c_client *client,
return ret;
}
- ret = pinctrl_enable(pctl->pctldev);
- if (ret) {
- dev_err(dev, "Failed to enable pinctrl device\n");
- return ret;
- }
-
/* Register GPIO controller */
pctl->gpio.base = -1;
pctl->gpio.ngpio = pctl->data->npins;
@@ -1238,6 +1232,17 @@ static int sx150x_probe(struct i2c_client *client,
if (ret)
return ret;
+ /*
+ * Pin control functions need to be enabled AFTER registering the
+ * GPIO chip because sx150x_pinconf_set() calls
+ * sx150x_gpio_direction_output().
+ */
+ ret = pinctrl_enable(pctl->pctldev);
+ if (ret) {
+ dev_err(dev, "Failed to enable pinctrl device\n");
+ return ret;
+ }
+
ret = gpiochip_add_pin_range(&pctl->gpio, dev_name(dev),
0, 0, pctl->data->npins);
if (ret)
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index f8ff30cdafa6..5fe7b8aaf69d 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -62,6 +62,15 @@ config PINCTRL_IPQ6018
Qualcomm Technologies Inc. IPQ6018 platform. Select this for
IPQ6018.
+config PINCTRL_MSM8226
+ tristate "Qualcomm 8226 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc MSM8226 platform.
+
config PINCTRL_MSM8660
tristate "Qualcomm 8660 pin controller driver"
depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 061ec9fb659b..9e3d9c91a444 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_PINCTRL_IPQ4019) += pinctrl-ipq4019.o
obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
obj-$(CONFIG_PINCTRL_IPQ8074) += pinctrl-ipq8074.o
obj-$(CONFIG_PINCTRL_IPQ6018) += pinctrl-ipq6018.o
+obj-$(CONFIG_PINCTRL_MSM8226) += pinctrl-msm8226.o
obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index a2567e772cd5..77a25bdf0da7 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -815,21 +815,14 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
static void msm_gpio_irq_enable(struct irq_data *d)
{
- /*
- * Clear the interrupt that may be pending before we enable
- * the line.
- * This is especially a problem with the GPIOs routed to the
- * PDC. These GPIOs are direct-connect interrupts to the GIC.
- * Disabling the interrupt line at the PDC does not prevent
- * the interrupt from being latched at the GIC. The state at
- * GIC needs to be cleared before enabling.
- */
- if (d->parent_data) {
- irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (d->parent_data)
irq_chip_enable_parent(d);
- }
- msm_gpio_irq_clear_unmask(d, true);
+ if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ msm_gpio_irq_clear_unmask(d, true);
}
static void msm_gpio_irq_disable(struct irq_data *d)
@@ -1077,12 +1070,10 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
* when TLMM is powered on. To allow that, enable the GPIO
* summary line to be wakeup capable at GIC.
*/
- if (d->parent_data)
- irq_chip_set_wake_parent(d, on);
-
- irq_set_irq_wake(pctrl->irq, on);
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return irq_chip_set_wake_parent(d, on);
- return 0;
+ return irq_set_irq_wake(pctrl->irq, on);
}
static int msm_gpio_irq_reqres(struct irq_data *d)
@@ -1106,6 +1097,19 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
ret = -EINVAL;
goto out;
}
+
+ /*
+ * Clear the interrupt that may be pending before we enable
+ * the line.
+ * This is especially a problem with the GPIOs routed to the
+ * PDC. These GPIOs are direct-connect interrupts to the GIC.
+ * Disabling the interrupt line at the PDC does not prevent
+ * the interrupt from being latched at the GIC. The state at
+ * GIC needs to be cleared before enabling.
+ */
+ if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+
return 0;
out:
module_put(gc->owner);
@@ -1243,6 +1247,9 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity;
pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
+ pctrl->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
if (np) {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8226.c b/drivers/pinctrl/qcom/pinctrl-msm8226.c
new file mode 100644
index 000000000000..98779e62e951
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8226.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc msm8226_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+
+ PINCTRL_PIN(117, "SDC1_CLK"),
+ PINCTRL_PIN(118, "SDC1_CMD"),
+ PINCTRL_PIN(119, "SDC1_DATA"),
+ PINCTRL_PIN(120, "SDC2_CLK"),
+ PINCTRL_PIN(121, "SDC2_CMD"),
+ PINCTRL_PIN(122, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+
+static const unsigned int sdc1_clk_pins[] = { 117 };
+static const unsigned int sdc1_cmd_pins[] = { 118 };
+static const unsigned int sdc1_data_pins[] = { 119 };
+static const unsigned int sdc2_clk_pins[] = { 120 };
+static const unsigned int sdc2_cmd_pins[] = { 121 };
+static const unsigned int sdc2_data_pins[] = { 122 };
+
+#define FUNCTION(fname) \
+ [MSM_MUX_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ MSM_MUX_gpio, \
+ MSM_MUX_##f1, \
+ MSM_MUX_##f2, \
+ MSM_MUX_##f3, \
+ MSM_MUX_##f4, \
+ MSM_MUX_##f5, \
+ MSM_MUX_##f6, \
+ MSM_MUX_##f7 \
+ }, \
+ .nfuncs = 8, \
+ .ctl_reg = 0x1000 + 0x10 * id, \
+ .io_reg = 0x1004 + 0x10 * id, \
+ .intr_cfg_reg = 0x1008 + 0x10 * id, \
+ .intr_status_reg = 0x100c + 0x10 * id, \
+ .intr_target_reg = 0x1008 + 0x10 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+/*
+ * TODO: Add the rest of the possible functions and fill out
+ * the pingroup table below.
+ */
+enum msm8226_functions {
+ MSM_MUX_gpio,
+ MSM_MUX_cci_i2c0,
+ MSM_MUX_blsp_i2c1,
+ MSM_MUX_blsp_i2c2,
+ MSM_MUX_blsp_i2c3,
+ MSM_MUX_blsp_i2c5,
+ MSM_MUX_blsp_spi1,
+ MSM_MUX_blsp_spi2,
+ MSM_MUX_blsp_spi3,
+ MSM_MUX_blsp_spi5,
+ MSM_MUX_blsp_uart1,
+ MSM_MUX_blsp_uart2,
+ MSM_MUX_blsp_uart3,
+ MSM_MUX_blsp_uart5,
+ MSM_MUX_blsp_uim1,
+ MSM_MUX_blsp_uim2,
+ MSM_MUX_blsp_uim3,
+ MSM_MUX_blsp_uim5,
+ MSM_MUX_cam_mclk0,
+ MSM_MUX_cam_mclk1,
+ MSM_MUX_wlan,
+ MSM_MUX_NA,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+};
+
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+
+static const char * const blsp_uim1_groups[] = { "gpio0", "gpio1" };
+static const char * const blsp_i2c1_groups[] = { "gpio2", "gpio3" };
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3"
+};
+
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+
+static const char * const blsp_uim2_groups[] = { "gpio4", "gpio5" };
+static const char * const blsp_i2c2_groups[] = { "gpio6", "gpio7" };
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7"
+};
+
+static const char * const blsp_uart3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11"
+};
+
+static const char * const blsp_uim3_groups[] = { "gpio8", "gpio9" };
+static const char * const blsp_i2c3_groups[] = { "gpio10", "gpio11" };
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11"
+};
+
+static const char * const blsp_uart5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19"
+};
+
+static const char * const blsp_uim5_groups[] = { "gpio16", "gpio17" };
+static const char * const blsp_i2c5_groups[] = { "gpio18", "gpio19" };
+static const char * const blsp_spi5_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19"
+};
+
+static const char * const cci_i2c0_groups[] = { "gpio29", "gpio30" };
+
+static const char * const cam_mclk0_groups[] = { "gpio26" };
+static const char * const cam_mclk1_groups[] = { "gpio27" };
+
+static const char * const wlan_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43", "gpio44"
+};
+
+static const struct msm_function msm8226_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(cci_i2c0),
+ FUNCTION(blsp_uim1),
+ FUNCTION(blsp_uim2),
+ FUNCTION(blsp_uim3),
+ FUNCTION(blsp_uim5),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uart3),
+ FUNCTION(blsp_uart5),
+ FUNCTION(cam_mclk0),
+ FUNCTION(cam_mclk1),
+ FUNCTION(wlan),
+};
+
+static const struct msm_pingroup msm8226_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, blsp_uim1, NA, NA, NA, NA),
+ PINGROUP(1, blsp_spi1, blsp_uart1, blsp_uim1, NA, NA, NA, NA),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA),
+ PINGROUP(4, blsp_spi2, blsp_uart2, blsp_uim2, NA, NA, NA, NA),
+ PINGROUP(5, blsp_spi2, blsp_uart2, blsp_uim2, NA, NA, NA, NA),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA),
+ PINGROUP(8, blsp_spi3, blsp_uart3, blsp_uim3, NA, NA, NA, NA),
+ PINGROUP(9, blsp_spi3, blsp_uart3, blsp_uim3, NA, NA, NA, NA),
+ PINGROUP(10, blsp_spi3, blsp_uart3, blsp_i2c3, NA, NA, NA, NA),
+ PINGROUP(11, blsp_spi3, blsp_uart3, blsp_i2c3, NA, NA, NA, NA),
+ PINGROUP(12, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(13, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(14, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, blsp_spi5, blsp_uart5, blsp_uim5, NA, NA, NA, NA),
+ PINGROUP(17, blsp_spi5, blsp_uart5, blsp_uim5, NA, NA, NA, NA),
+ PINGROUP(18, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA),
+ PINGROUP(19, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA),
+ PINGROUP(20, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(21, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(22, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(23, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(24, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(26, cam_mclk0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(27, cam_mclk1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(28, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(29, cci_i2c0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(30, cci_i2c0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(34, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(35, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(36, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(37, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(38, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(39, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(40, wlan, NA, NA, NA, NA, NA, NA),
+ PINGROUP(41, wlan, NA, NA, NA, NA, NA, NA),
+ PINGROUP(42, wlan, NA, NA, NA, NA, NA, NA),
+ PINGROUP(43, wlan, NA, NA, NA, NA, NA, NA),
+ PINGROUP(44, wlan, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(46, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(47, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(49, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(53, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(64, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(65, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(100, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(101, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(102, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(103, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(104, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(105, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(106, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(107, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(111, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(112, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(113, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(114, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(115, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(116, NA, NA, NA, NA, NA, NA, NA),
+ SDC_PINGROUP(sdc1_clk, 0x2044, 13, 6),
+ SDC_PINGROUP(sdc1_cmd, 0x2044, 11, 3),
+ SDC_PINGROUP(sdc1_data, 0x2044, 9, 0),
+ SDC_PINGROUP(sdc2_clk, 0x2048, 14, 6),
+ SDC_PINGROUP(sdc2_cmd, 0x2048, 11, 3),
+ SDC_PINGROUP(sdc2_data, 0x2048, 9, 0),
+};
+
+#define NUM_GPIO_PINGROUPS 117
+
+static const struct msm_pinctrl_soc_data msm8226_pinctrl = {
+ .pins = msm8226_pins,
+ .npins = ARRAY_SIZE(msm8226_pins),
+ .functions = msm8226_functions,
+ .nfunctions = ARRAY_SIZE(msm8226_functions),
+ .groups = msm8226_groups,
+ .ngroups = ARRAY_SIZE(msm8226_groups),
+ .ngpios = NUM_GPIO_PINGROUPS,
+};
+
+static int msm8226_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8226_pinctrl);
+}
+
+static const struct of_device_id msm8226_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8226-pinctrl", },
+ { },
+};
+
+static struct platform_driver msm8226_pinctrl_driver = {
+ .driver = {
+ .name = "msm8226-pinctrl",
+ .of_match_table = msm8226_pinctrl_of_match,
+ },
+ .probe = msm8226_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8226_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8226_pinctrl_driver);
+}
+arch_initcall(msm8226_pinctrl_init);
+
+static void __exit msm8226_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8226_pinctrl_driver);
+}
+module_exit(msm8226_pinctrl_exit);
+
+MODULE_AUTHOR("Bartosz Dudziak <bartosz.dudziak@snejp.pl>");
+MODULE_DESCRIPTION("Qualcomm MSM8226 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8226_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index 826df0d637ea..af144e724bd9 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -1313,6 +1313,22 @@ static const struct msm_pingroup sm8250_groups[] = {
[183] = SDC_PINGROUP(sdc2_data, 0xb7000, 9, 0),
};
+static const struct msm_gpio_wakeirq_map sm8250_pdc_map[] = {
+ { 0, 79 }, { 1, 84 }, { 2, 80 }, { 3, 82 }, { 4, 107 }, { 7, 43 },
+ { 11, 42 }, { 14, 44 }, { 15, 52 }, { 19, 67 }, { 23, 68 }, { 24, 105 },
+ { 27, 92 }, { 28, 106 }, { 31, 69 }, { 35, 70 }, { 39, 37 },
+ { 40, 108 }, { 43, 71 }, { 45, 72 }, { 47, 83 }, { 51, 74 }, { 55, 77 },
+ { 59, 78 }, { 63, 75 }, { 64, 81 }, { 65, 87 }, { 66, 88 }, { 67, 89 },
+ { 68, 54 }, { 70, 85 }, { 77, 46 }, { 80, 90 }, { 81, 91 }, { 83, 97 },
+ { 84, 98 }, { 86, 99 }, { 87, 100 }, { 88, 101 }, { 89, 102 },
+ { 92, 103 }, { 93, 104 }, { 100, 53 }, { 103, 47 }, { 104, 48 },
+ { 108, 49 }, { 109, 94 }, { 110, 95 }, { 111, 96 }, { 112, 55 },
+ { 113, 56 }, { 118, 50 }, { 121, 51 }, { 122, 57 }, { 123, 58 },
+ { 124, 45 }, { 126, 59 }, { 128, 76 }, { 129, 86 }, { 132, 93 },
+ { 133, 65 }, { 134, 66 }, { 136, 62 }, { 137, 63 }, { 138, 64 },
+ { 142, 60 }, { 143, 61 }
+};
+
static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
.pins = sm8250_pins,
.npins = ARRAY_SIZE(sm8250_pins),
@@ -1323,6 +1339,8 @@ static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
.ngpios = 181,
.tiles = sm8250_tiles,
.ntiles = ARRAY_SIZE(sm8250_tiles),
+ .wakeirq_map = sm8250_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sm8250_pdc_map),
};
static int sm8250_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 7fdc7ed8bd2e..e941b8440dbc 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -3,12 +3,11 @@
# Renesas SH and SH Mobile PINCTRL drivers
#
-config PINCTRL_SH_PFC
+menu "Renesas pinctrl drivers"
+
+config PINCTRL_RENESAS
bool "Renesas SoC pin control support" if COMPILE_TEST && !(ARCH_RENESAS || SUPERH)
default y if ARCH_RENESAS || SUPERH
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
select PINCTRL_PFC_EMEV2 if ARCH_EMEV2
select PINCTRL_PFC_R8A73A4 if ARCH_R8A73A4
select PINCTRL_PFC_R8A7740 if ARCH_R8A7740
@@ -53,153 +52,220 @@ config PINCTRL_SH_PFC
help
This enables pin control drivers for Renesas SuperH and ARM platforms
+config PINCTRL_SH_PFC
+ bool
+ select GENERIC_PINCONF
+ select PINMUX
+ select PINCONF
+ help
+ This enables common pin control functionality for EMMA Mobile, R-Car,
+ R-Mobile, RZ/G, SH, and SH-Mobile platforms.
+
config PINCTRL_SH_PFC_GPIO
- select GPIOLIB
bool
+ select GPIOLIB
+ select PINCTRL_SH_PFC
help
This enables pin control and GPIO drivers for SH/SH Mobile platforms
config PINCTRL_SH_FUNC_GPIO
- select PINCTRL_SH_PFC_GPIO
bool
+ select PINCTRL_SH_PFC_GPIO
help
This enables legacy function GPIOs for SH platforms
config PINCTRL_PFC_EMEV2
- bool "Emma Mobile AV2 pin control support" if COMPILE_TEST
+ bool "pin control support for Emma Mobile EV2" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A73A4
- bool "R-Mobile APE6 pin control support" if COMPILE_TEST
- select PINCTRL_SH_PFC_GPIO
+config PINCTRL_PFC_R8A77995
+ bool "pin control support for R-Car D3" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7740
- bool "R-Mobile A1 pin control support" if COMPILE_TEST
- select PINCTRL_SH_PFC_GPIO
+config PINCTRL_PFC_R8A7794
+ bool "pin control support for R-Car E2" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7742
- bool "RZ/G1H pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77990
+ bool "pin control support for R-Car E3" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7743
- bool "RZ/G1M pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A7779
+ bool "pin control support for R-Car H1" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7744
- bool "RZ/G1N pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A7790
+ bool "pin control support for R-Car H2" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7745
- bool "RZ/G1E pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77950
+ bool "pin control support for R-Car H3 ES1.x" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77470
- bool "RZ/G1C pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77951
+ bool "pin control support for R-Car H3 ES2.0+" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A774A1
- bool "RZ/G2M pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A7778
+ bool "pin control support for R-Car M1A" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A774B1
- bool "RZ/G2N pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A7793
+ bool "pin control support for R-Car M2-N" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A774C0
- bool "RZ/G2E pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A7791
+ bool "pin control support for R-Car M2-W" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A774E1
- bool "RZ/G2H pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77965
+ bool "pin control support for R-Car M3-N" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7778
- bool "R-Car M1A pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77960
+ bool "pin control support for R-Car M3-W" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7779
- bool "R-Car H1 pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77961
+ bool "pin control support for R-Car M3-W+" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7790
- bool "R-Car H2 pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A7792
+ bool "pin control support for R-Car V2H" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7791
- bool "R-Car M2-W pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77980
+ bool "pin control support for R-Car V3H" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7792
- bool "R-Car V2H pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77970
+ bool "pin control support for R-Car V3M" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A7793
- bool "R-Car M2-N pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A7740
+ bool "pin control support for R-Mobile A1" if COMPILE_TEST
+ select PINCTRL_SH_PFC_GPIO
-config PINCTRL_PFC_R8A7794
- bool "R-Car E2 pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A73A4
+ bool "pin control support for R-Mobile APE6" if COMPILE_TEST
+ select PINCTRL_SH_PFC_GPIO
-config PINCTRL_PFC_R8A77950
- bool "R-Car H3 ES1.x pin control support" if COMPILE_TEST
+config PINCTRL_RZA1
+ bool "pin control support for RZ/A1"
+ depends on OF
+ depends on ARCH_R7S72100 || COMPILE_TEST
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
+ help
+ This selects pinctrl driver for Renesas RZ/A1 platforms.
+
+config PINCTRL_RZA2
+ bool "pin control support for RZ/A2"
+ depends on OF
+ depends on ARCH_R7S9210 || COMPILE_TEST
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
+ help
+ This selects GPIO and pinctrl driver for Renesas RZ/A2 platforms.
-config PINCTRL_PFC_R8A77951
- bool "R-Car H3 ES2.0+ pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77470
+ bool "pin control support for RZ/G1C" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77960
- bool "R-Car M3-W pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A7745
+ bool "pin control support for RZ/G1E" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77961
- bool "R-Car M3-W+ pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A7742
+ bool "pin control support for RZ/G1H" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77965
- bool "R-Car M3-N pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A7743
+ bool "pin control support for RZ/G1M" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77970
- bool "R-Car V3M pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A7744
+ bool "pin control support for RZ/G1N" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77980
- bool "R-Car V3H pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A774C0
+ bool "pin control support for RZ/G2E" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77990
- bool "R-Car E3 pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A774E1
+ bool "pin control support for RZ/G2H" if COMPILE_TEST
+ select PINCTRL_SH_PFC
-config PINCTRL_PFC_R8A77995
- bool "R-Car D3 pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A774A1
+ bool "pin control support for RZ/G2M" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A774B1
+ bool "pin control support for RZ/G2N" if COMPILE_TEST
+ select PINCTRL_SH_PFC
+
+config PINCTRL_RZN1
+ bool "pin control support for RZ/N1"
+ depends on OF
+ depends on ARCH_RZN1 || COMPILE_TEST
+ select GENERIC_PINCONF
+ help
+ This selects pinctrl driver for Renesas RZ/N1 devices.
config PINCTRL_PFC_SH7203
- bool "SH7203 pin control support" if COMPILE_TEST
+ bool "pin control support for SH7203" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7264
- bool "SH7264 pin control support" if COMPILE_TEST
+ bool "pin control support for SH7264" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7269
- bool "SH7269 pin control support" if COMPILE_TEST
+ bool "pin control support for SH7269" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
-config PINCTRL_PFC_SH73A0
- bool "SH-Mobile AG5 pin control support" if COMPILE_TEST
- select PINCTRL_SH_PFC_GPIO
- select REGULATOR
-
config PINCTRL_PFC_SH7720
- bool "SH7720 pin control support" if COMPILE_TEST
+ bool "pin control support for SH7720" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7722
- bool "SH7722 pin control support" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH7723
- bool "SH-Mobile R2 pin control support" if COMPILE_TEST
- select PINCTRL_SH_FUNC_GPIO
-
-config PINCTRL_PFC_SH7724
- bool "SH-Mobile R2R pin control support" if COMPILE_TEST
+ bool "pin control support for SH7722" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7734
- bool "SH7734 pin control support" if COMPILE_TEST
+ bool "pin control support for SH7734" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7757
- bool "SH7757 pin control support" if COMPILE_TEST
+ bool "pin control support for SH7757" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7785
- bool "SH7785 pin control support" if COMPILE_TEST
+ bool "pin control support for SH7785" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SH7786
- bool "SH7786 pin control support" if COMPILE_TEST
+ bool "pin control support for SH7786" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SH73A0
+ bool "pin control support for SH-Mobile AG5" if COMPILE_TEST
+ select PINCTRL_SH_PFC_GPIO
+ select REGULATOR
+
+config PINCTRL_PFC_SH7723
+ bool "pin control support for SH-Mobile R2" if COMPILE_TEST
+ select PINCTRL_SH_FUNC_GPIO
+
+config PINCTRL_PFC_SH7724
+ bool "pin control support for SH-Mobile R2R" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
config PINCTRL_PFC_SHX3
- bool "SH-X3 pin control support" if COMPILE_TEST
+ bool "pin control support for SH-X3" if COMPILE_TEST
select PINCTRL_SH_FUNC_GPIO
+
+endmenu
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/renesas/Makefile
index 7bb99187cd8e..1f6d7dd019d8 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/renesas/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PINCTRL_SH_PFC) += core.o pinctrl.o
+obj-$(CONFIG_PINCTRL_SH_PFC) += core.o pinctrl.o
obj-$(CONFIG_PINCTRL_SH_PFC_GPIO) += gpio.o
-obj-$(CONFIG_PINCTRL_PFC_EMEV2) += pfc-emev2.o
+obj-$(CONFIG_PINCTRL_PFC_EMEV2) += pfc-emev2.o
obj-$(CONFIG_PINCTRL_PFC_R8A73A4) += pfc-r8a73a4.o
obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
obj-$(CONFIG_PINCTRL_PFC_R8A7742) += pfc-r8a7790.o
@@ -43,6 +43,10 @@ obj-$(CONFIG_PINCTRL_PFC_SH7785) += pfc-sh7785.o
obj-$(CONFIG_PINCTRL_PFC_SH7786) += pfc-sh7786.o
obj-$(CONFIG_PINCTRL_PFC_SHX3) += pfc-shx3.o
+obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
+obj-$(CONFIG_PINCTRL_RZA2) += pinctrl-rza2.o
+obj-$(CONFIG_PINCTRL_RZN1) += pinctrl-rzn1.o
+
ifeq ($(CONFIG_COMPILE_TEST),y)
CFLAGS_pfc-sh7203.o += -I$(srctree)/arch/sh/include/cpu-sh2a
CFLAGS_pfc-sh7264.o += -I$(srctree)/arch/sh/include/cpu-sh2a
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/renesas/core.c
index c528c124fb0e..c528c124fb0e 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/renesas/core.c
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/renesas/core.h
index b5b1d163e98a..b5b1d163e98a 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/renesas/core.h
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/renesas/gpio.c
index 9c6e931ae766..9c6e931ae766 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/renesas/gpio.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-emev2.c b/drivers/pinctrl/renesas/pfc-emev2.c
index 6c66fc335d2f..6c66fc335d2f 100644
--- a/drivers/pinctrl/sh-pfc/pfc-emev2.c
+++ b/drivers/pinctrl/renesas/pfc-emev2.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/renesas/pfc-r8a73a4.c
index b21f5afe610f..b21f5afe610f 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
+++ b/drivers/pinctrl/renesas/pfc-r8a73a4.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/renesas/pfc-r8a7740.c
index fdf1b0f09f57..fdf1b0f09f57 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7740.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77470.c b/drivers/pinctrl/renesas/pfc-r8a77470.c
index b3b116da1bb0..b3b116da1bb0 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77470.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77470.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/renesas/pfc-r8a7778.c
index a9875038ed9b..a9875038ed9b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7778.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/renesas/pfc-r8a7779.c
index 3e47cdc1411d..3e47cdc1411d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7779.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/renesas/pfc-r8a7790.c
index f524401fec5f..60f973c5dffe 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7790.c
@@ -1871,6 +1871,86 @@ static const unsigned int avb_gmii_mux[] = {
AVB_TX_EN_MARK, AVB_TX_ER_MARK, AVB_TX_CLK_MARK,
AVB_COL_MARK,
};
+/* - CAN0 ----------------------------------------------------------------- */
+static const unsigned int can0_data_pins[] = {
+ /* CAN0 RX */
+ RCAR_GP_PIN(1, 17),
+ /* CAN0 TX */
+ RCAR_GP_PIN(1, 19),
+};
+static const unsigned int can0_data_mux[] = {
+ CAN0_RX_MARK,
+ CAN0_TX_MARK,
+};
+static const unsigned int can0_data_b_pins[] = {
+ /* CAN0 RXB */
+ RCAR_GP_PIN(4, 5),
+ /* CAN0 TXB */
+ RCAR_GP_PIN(4, 4),
+};
+static const unsigned int can0_data_b_mux[] = {
+ CAN0_RX_B_MARK,
+ CAN0_TX_B_MARK,
+};
+static const unsigned int can0_data_c_pins[] = {
+ /* CAN0 RXC */
+ RCAR_GP_PIN(4, 26),
+ /* CAN0 TXC */
+ RCAR_GP_PIN(4, 23),
+};
+static const unsigned int can0_data_c_mux[] = {
+ CAN0_RX_C_MARK,
+ CAN0_TX_C_MARK,
+};
+static const unsigned int can0_data_d_pins[] = {
+ /* CAN0 RXD */
+ RCAR_GP_PIN(4, 26),
+ /* CAN0 TXD */
+ RCAR_GP_PIN(4, 18),
+};
+static const unsigned int can0_data_d_mux[] = {
+ CAN0_RX_D_MARK,
+ CAN0_TX_D_MARK,
+};
+/* - CAN1 ----------------------------------------------------------------- */
+static const unsigned int can1_data_pins[] = {
+ /* CAN1 RX */
+ RCAR_GP_PIN(1, 22),
+ /* CAN1 TX */
+ RCAR_GP_PIN(1, 18),
+};
+static const unsigned int can1_data_mux[] = {
+ CAN1_RX_MARK,
+ CAN1_TX_MARK,
+};
+static const unsigned int can1_data_b_pins[] = {
+ /* CAN1 RXB */
+ RCAR_GP_PIN(4, 7),
+ /* CAN1 TXB */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int can1_data_b_mux[] = {
+ CAN1_RX_B_MARK,
+ CAN1_TX_B_MARK,
+};
+/* - CAN Clock -------------------------------------------------------------- */
+static const unsigned int can_clk_pins[] = {
+ /* CLK */
+ RCAR_GP_PIN(1, 21),
+};
+
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+
+static const unsigned int can_clk_b_pins[] = {
+ /* CLK */
+ RCAR_GP_PIN(4, 3),
+};
+
+static const unsigned int can_clk_b_mux[] = {
+ CAN_CLK_B_MARK,
+};
/* - DU RGB ----------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
/* R[7:2], G[7:2], B[7:2] */
@@ -3611,6 +3691,13 @@ static const unsigned int usb1_pins[] = {
static const unsigned int usb1_mux[] = {
USB1_PWEN_MARK, USB1_OVC_MARK,
};
+static const unsigned int usb1_pwen_pins[] = {
+ /* PWEN */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int usb1_pwen_mux[] = {
+ USB1_PWEN_MARK,
+};
/* - USB2 ------------------------------------------------------------------- */
static const unsigned int usb2_pins[] = {
/* PWEN, OVC */
@@ -3939,7 +4026,7 @@ static const unsigned int vin3_clk_mux[] = {
};
static const struct {
- struct sh_pfc_pin_group common[289];
+ struct sh_pfc_pin_group common[298];
struct sh_pfc_pin_group automotive[1];
} pinmux_groups = {
.common = {
@@ -3956,6 +4043,14 @@ static const struct {
SH_PFC_PIN_GROUP(avb_mdio),
SH_PFC_PIN_GROUP(avb_mii),
SH_PFC_PIN_GROUP(avb_gmii),
+ SH_PFC_PIN_GROUP(can0_data),
+ SH_PFC_PIN_GROUP(can0_data_b),
+ SH_PFC_PIN_GROUP(can0_data_c),
+ SH_PFC_PIN_GROUP(can0_data_d),
+ SH_PFC_PIN_GROUP(can1_data),
+ SH_PFC_PIN_GROUP(can1_data_b),
+ SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(can_clk_b),
SH_PFC_PIN_GROUP(du_rgb666),
SH_PFC_PIN_GROUP(du_rgb888),
SH_PFC_PIN_GROUP(du_clk_out_0),
@@ -4193,6 +4288,7 @@ static const struct {
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb0_ovc_vbus),
SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb1_pwen),
SH_PFC_PIN_GROUP(usb2),
VIN_DATA_PIN_GROUP(vin0_data, 24),
VIN_DATA_PIN_GROUP(vin0_data, 20),
@@ -4257,6 +4353,23 @@ static const char * const avb_groups[] = {
"avb_gmii",
};
+static const char * const can0_groups[] = {
+ "can0_data",
+ "can0_data_b",
+ "can0_data_c",
+ "can0_data_d",
+};
+
+static const char * const can1_groups[] = {
+ "can1_data",
+ "can1_data_b",
+};
+
+static const char * const can_clk_groups[] = {
+ "can_clk",
+ "can_clk_b",
+};
+
static const char * const du_groups[] = {
"du_rgb666",
"du_rgb888",
@@ -4640,6 +4753,7 @@ static const char * const usb0_groups[] = {
static const char * const usb1_groups[] = {
"usb1",
+ "usb1_pwen",
};
static const char * const usb2_groups[] = {
@@ -4697,13 +4811,16 @@ static const char * const vin3_groups[] = {
};
static const struct {
- struct sh_pfc_function common[55];
+ struct sh_pfc_function common[58];
struct sh_pfc_function automotive[1];
} pinmux_functions = {
.common = {
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
+ SH_PFC_FUNCTION(can_clk),
SH_PFC_FUNCTION(du0),
SH_PFC_FUNCTION(du1),
SH_PFC_FUNCTION(du2),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/renesas/pfc-r8a7791.c
index bc9caf812fc1..bc9caf812fc1 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7791.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c b/drivers/pinctrl/renesas/pfc-r8a7792.c
index 258f82fb31c0..258f82fb31c0 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7792.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/renesas/pfc-r8a7794.c
index 34481b6c4328..34481b6c4328 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7794.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77950.c b/drivers/pinctrl/renesas/pfc-r8a77950.c
index 04812e62f3a4..04812e62f3a4 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77950.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77950.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77951.c b/drivers/pinctrl/renesas/pfc-r8a77951.c
index a94ebe0bf5d0..a94ebe0bf5d0 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77951.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77951.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/renesas/pfc-r8a7796.c
index a2496baca85d..55f0344a3d3e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/renesas/pfc-r8a7796.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2016-2019 Renesas Electronics Corp.
*
- * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a7795.c
*
* R-Car Gen3 processor support - PFC hardware block.
*
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/renesas/pfc-r8a77965.c
index 6616f5210b9d..7a50b9b69a7d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77965.c
@@ -5,7 +5,7 @@
* Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
* Copyright (C) 2016-2019 Renesas Electronics Corp.
*
- * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a7796.c
*
* R-Car Gen3 processor support - PFC hardware block.
*
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/renesas/pfc-r8a77970.c
index 9f7d9c9238fc..e8a0fc468eb2 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77970.c
@@ -5,7 +5,7 @@
* Copyright (C) 2016 Renesas Electronics Corp.
* Copyright (C) 2017 Cogent Embedded, Inc. <source@cogentembedded.com>
*
- * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a7795.c
*
* R-Car Gen3 processor support - PFC hardware block.
*
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c b/drivers/pinctrl/renesas/pfc-r8a77980.c
index 1055f9853404..ebd07bebaeeb 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77980.c
@@ -5,7 +5,7 @@
* Copyright (C) 2018 Renesas Electronics Corp.
* Copyright (C) 2018 Cogent Embedded, Inc.
*
- * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a7795.c
*
* R-Car Gen3 processor support - PFC hardware block.
*
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/renesas/pfc-r8a77990.c
index c926a59dd21c..aed04a4c6116 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77990.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2018-2019 Renesas Electronics Corp.
*
- * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a7796.c
*
* R8A7796 processor support - PFC hardware block.
*
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c b/drivers/pinctrl/renesas/pfc-r8a77995.c
index c10b756476b1..672251d86c2d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77995.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2017 Renesas Electronics Corp.
*
- * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+ * This file is based on the drivers/pinctrl/renesas/pfc-r8a7796.c
*
* R-Car Gen3 processor support - PFC hardware block.
*
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7203.c b/drivers/pinctrl/renesas/pfc-sh7203.c
index 811a6f2cb1fc..811a6f2cb1fc 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7203.c
+++ b/drivers/pinctrl/renesas/pfc-sh7203.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/renesas/pfc-sh7264.c
index 908837ea487b..908837ea487b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/renesas/pfc-sh7264.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/renesas/pfc-sh7269.c
index e2916aaa8304..e2916aaa8304 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/renesas/pfc-sh7269.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/renesas/pfc-sh73a0.c
index afabd95105d5..afabd95105d5 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/renesas/pfc-sh73a0.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7720.c b/drivers/pinctrl/renesas/pfc-sh7720.c
index 37bcae6b3208..37bcae6b3208 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7720.c
+++ b/drivers/pinctrl/renesas/pfc-sh7720.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7722.c b/drivers/pinctrl/renesas/pfc-sh7722.c
index 95295be4e703..95295be4e703 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7722.c
+++ b/drivers/pinctrl/renesas/pfc-sh7722.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7723.c b/drivers/pinctrl/renesas/pfc-sh7723.c
index 6f08f527c010..6f08f527c010 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7723.c
+++ b/drivers/pinctrl/renesas/pfc-sh7723.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7724.c b/drivers/pinctrl/renesas/pfc-sh7724.c
index 7a18afecda2c..7a18afecda2c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7724.c
+++ b/drivers/pinctrl/renesas/pfc-sh7724.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/renesas/pfc-sh7734.c
index dbc36079c381..dbc36079c381 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/renesas/pfc-sh7734.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7757.c b/drivers/pinctrl/renesas/pfc-sh7757.c
index 064e987b09cb..064e987b09cb 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7757.c
+++ b/drivers/pinctrl/renesas/pfc-sh7757.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7785.c b/drivers/pinctrl/renesas/pfc-sh7785.c
index c4c1e288c53e..c4c1e288c53e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7785.c
+++ b/drivers/pinctrl/renesas/pfc-sh7785.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7786.c b/drivers/pinctrl/renesas/pfc-sh7786.c
index b8a098cd7721..b8a098cd7721 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7786.c
+++ b/drivers/pinctrl/renesas/pfc-sh7786.c
diff --git a/drivers/pinctrl/sh-pfc/pfc-shx3.c b/drivers/pinctrl/renesas/pfc-shx3.c
index 22e812850964..22e812850964 100644
--- a/drivers/pinctrl/sh-pfc/pfc-shx3.c
+++ b/drivers/pinctrl/renesas/pfc-shx3.c
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/renesas/pinctrl-rza1.c
index 511f232ab7bc..15dd007700c2 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza1.c
@@ -26,10 +26,10 @@
#include <linux/pinctrl/pinmux.h>
#include <linux/slab.h>
-#include "core.h"
-#include "devicetree.h"
-#include "pinconf.h"
-#include "pinmux.h"
+#include "../core.h"
+#include "../devicetree.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
#define DRIVER_NAME "pinctrl-rza1"
@@ -928,7 +928,8 @@ static int rza1_parse_pinmux_node(struct rza1_pinctrl *rza1_pctl,
case PIN_CONFIG_INPUT_ENABLE:
pinmux_flags |= MUX_FLAGS_SWIO_INPUT;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_OUTPUT: /* for DT backwards compatibility */
+ case PIN_CONFIG_OUTPUT_ENABLE:
pinmux_flags |= MUX_FLAGS_SWIO_OUTPUT;
default:
break;
diff --git a/drivers/pinctrl/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index c5bf98c86b2b..32829eb9656c 100644
--- a/drivers/pinctrl/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -17,8 +17,8 @@
#include <linux/of_device.h>
#include <linux/pinctrl/pinmux.h>
-#include "core.h"
-#include "pinmux.h"
+#include "../core.h"
+#include "../pinmux.h"
#define DRIVER_NAME "pinctrl-rza2"
diff --git a/drivers/pinctrl/pinctrl-rzn1.c b/drivers/pinctrl/renesas/pinctrl-rzn1.c
index 39538d40dbf3..ef5fb25b6016 100644
--- a/drivers/pinctrl/pinctrl-rzn1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzn1.c
@@ -17,9 +17,9 @@
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include "core.h"
-#include "pinconf.h"
-#include "pinctrl-utils.h"
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinctrl-utils.h"
/* Field positions and masks in the pinmux registers */
#define RZN1_L1_PIN_DRIVE_STRENGTH 10
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index 212a4a9c3a8f..212a4a9c3a8f 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h
index eff1bb872325..eff1bb872325 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/renesas/sh_pfc.h
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 393b2b97d527..9d9facc4a6e4 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -379,8 +379,6 @@ static const struct of_device_id spear310_pinctrl_of_match[] = {
static int spear310_pinctrl_probe(struct platform_device *pdev)
{
- int ret;
-
spear3xx_machdata.groups = spear310_pingroups;
spear3xx_machdata.ngroups = ARRAY_SIZE(spear310_pingroups);
spear3xx_machdata.functions = spear310_functions;
@@ -392,11 +390,7 @@ static int spear310_pinctrl_probe(struct platform_device *pdev)
spear3xx_machdata.modes_supported = false;
- ret = spear_pinctrl_probe(pdev, &spear3xx_machdata);
- if (ret)
- return ret;
-
- return 0;
+ return spear_pinctrl_probe(pdev, &spear3xx_machdata);
}
static struct platform_driver spear310_pinctrl_driver = {
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index 99c10fc3d9b5..e629e3035543 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -3418,8 +3418,6 @@ static const struct of_device_id spear320_pinctrl_of_match[] = {
static int spear320_pinctrl_probe(struct platform_device *pdev)
{
- int ret;
-
spear3xx_machdata.groups = spear320_pingroups;
spear3xx_machdata.ngroups = ARRAY_SIZE(spear320_pingroups);
spear3xx_machdata.functions = spear320_functions;
@@ -3433,11 +3431,7 @@ static int spear320_pinctrl_probe(struct platform_device *pdev)
pmx_init_gpio_pingroup_addr(spear3xx_machdata.gpio_pingroups,
spear3xx_machdata.ngpio_pingroups, PMX_CONFIG_REG);
- ret = spear_pinctrl_probe(pdev, &spear3xx_machdata);
- if (ret)
- return ret;
-
- return 0;
+ return spear_pinctrl_probe(pdev, &spear3xx_machdata);
}
static struct platform_driver spear320_pinctrl_driver = {
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c b/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
index 06c8671b40e7..d14f382f2392 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c
@@ -946,18 +946,7 @@ static struct platform_driver sprd_pinctrl_driver = {
.remove = sprd_pinctrl_remove,
.shutdown = sprd_pinctrl_shutdown,
};
-
-static int sprd_pinctrl_init(void)
-{
- return platform_driver_register(&sprd_pinctrl_driver);
-}
-module_init(sprd_pinctrl_init);
-
-static void sprd_pinctrl_exit(void)
-{
- platform_driver_unregister(&sprd_pinctrl_driver);
-}
-module_exit(sprd_pinctrl_exit);
+module_platform_driver(sprd_pinctrl_driver);
MODULE_DESCRIPTION("SPREADTRUM Pin Controller Driver");
MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index f7aae200ee15..593293584ecc 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -94,6 +94,16 @@ config PINCTRL_SUN50I_A64_R
default ARM64 && ARCH_SUNXI
select PINCTRL_SUNXI
+config PINCTRL_SUN50I_A100
+ bool "Support for the Allwinner A100 PIO"
+ default ARM64 && ARCH_SUNXI
+ select PINCTRL_SUNXI
+
+config PINCTRL_SUN50I_A100_R
+ bool "Support for the Allwinner A100 R-PIO"
+ default ARM64 && ARCH_SUNXI
+ select PINCTRL_SUNXI
+
config PINCTRL_SUN50I_H5
bool "Support for the Allwinner H5 PIO"
default ARM64 && ARCH_SUNXI
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index fafcdae8134f..8b7ff0dc3bdf 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -13,6 +13,8 @@ obj-$(CONFIG_PINCTRL_SUN8I_A23_R) += pinctrl-sun8i-a23-r.o
obj-$(CONFIG_PINCTRL_SUN8I_A33) += pinctrl-sun8i-a33.o
obj-$(CONFIG_PINCTRL_SUN50I_A64) += pinctrl-sun50i-a64.o
obj-$(CONFIG_PINCTRL_SUN50I_A64_R) += pinctrl-sun50i-a64-r.o
+obj-$(CONFIG_PINCTRL_SUN50I_A100) += pinctrl-sun50i-a100.o
+obj-$(CONFIG_PINCTRL_SUN50I_A100_R) += pinctrl-sun50i-a100-r.o
obj-$(CONFIG_PINCTRL_SUN8I_A83T) += pinctrl-sun8i-a83t.o
obj-$(CONFIG_PINCTRL_SUN8I_A83T_R) += pinctrl-sun8i-a83t-r.o
obj-$(CONFIG_PINCTRL_SUN8I_H3) += pinctrl-sun8i-h3.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
new file mode 100644
index 000000000000..21054fcacd34
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ *
+ * Based on:
+ * huangshuosheng <huangshuosheng@allwinnertech.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin a100_r_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_i2c0"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_i2c0"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart0"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* MS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* DO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_i2c1"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_i2c1"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_pwm"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "s_cir"), /* IN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)),
+};
+
+static const struct sunxi_pinctrl_desc a100_r_pinctrl_data = {
+ .pins = a100_r_pins,
+ .npins = ARRAY_SIZE(a100_r_pins),
+ .pin_base = PL_BASE,
+ .irq_banks = 1,
+};
+
+static int a100_r_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev, &a100_r_pinctrl_data);
+}
+
+static const struct of_device_id a100_r_pinctrl_match[] = {
+ { .compatible = "allwinner,sun50i-a100-r-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match);
+
+static struct platform_driver a100_r_pinctrl_driver = {
+ .probe = a100_r_pinctrl_probe,
+ .driver = {
+ .name = "sun50iw10p1-r-pinctrl",
+ .of_match_table = a100_r_pinctrl_match,
+ },
+};
+module_platform_driver(a100_r_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
new file mode 100644
index 000000000000..19cfd1e76ee2
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c
@@ -0,0 +1,708 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+ *
+ * Based on:
+ * huangshuosheng <huangshuosheng@allwinnertech.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin a100_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x3, "spi2"), /* CS */
+ SUNXI_FUNCTION(0x4, "jtag"), /* MS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x3, "spi2"), /* CLK */
+ SUNXI_FUNCTION(0x4, "jtag"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x3, "spi2"), /* MOSI */
+ SUNXI_FUNCTION(0x4, "jtag"), /* DO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x3, "spi2"), /* MISO */
+ SUNXI_FUNCTION(0x4, "jtag"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION(0x4, "jtag_gpu"), /* MS_GPU */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION(0x4, "jtag_gpu"), /* CK_GPU */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2s0"), /* LRCK */
+ SUNXI_FUNCTION(0x4, "jtag_gpu"), /* DO_GPU */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spdif"), /* DIN */
+ SUNXI_FUNCTION(0x3, "i2s0_dout0"), /* DOUT0 */
+ SUNXI_FUNCTION(0x4, "i2s0_din1"), /* DIN1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spdif"), /* DOUT */
+ SUNXI_FUNCTION(0x3, "i2s0_din0"), /* DIN0 */
+ SUNXI_FUNCTION(0x4, "i2s0_dout1"), /* DOUT1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x3, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x4, "jtag_gpu"), /* DI_GPU */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x3, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x4, "pwm1"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)),
+ /* HOLE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* WE */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* DS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* ALE */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* RST */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CLE */
+ SUNXI_FUNCTION(0x4, "spi0"), /* MOSI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CE1 */
+ SUNXI_FUNCTION(0x4, "spi0"), /* CS0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CE0 */
+ SUNXI_FUNCTION(0x4, "spi0"), /* MISO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RE */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB1 */
+ SUNXI_FUNCTION(0x4, "spi0"), /* CS1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D4 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D5 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
+ SUNXI_FUNCTION(0x4, "spi0"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D6 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D2 */
+ SUNXI_FUNCTION(0x4, "spi0"), /* WP */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D7 */
+ SUNXI_FUNCTION(0x4, "spi0"), /* HOLD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)),
+ /* HOLE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D0P */
+ SUNXI_FUNCTION(0x4, "dsi0"), /* DP0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D0N */
+ SUNXI_FUNCTION(0x4, "dsi0"), /* DM0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D1P */
+ SUNXI_FUNCTION(0x4, "dsi0"), /* DP1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D1N */
+ SUNXI_FUNCTION(0x4, "dsi0"), /* DM1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D2P */
+ SUNXI_FUNCTION(0x4, "dsi0"), /* CKP */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* D2N */
+ SUNXI_FUNCTION(0x4, "dsi0"), /* CKM */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* CKP */
+ SUNXI_FUNCTION(0x4, "dsi0"), /* DP2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* CKN */
+ SUNXI_FUNCTION(0x4, "dsi0"), /* DM2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x4, "dsi0"), /* DP3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x4, "dsi0"), /* DM3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 17)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x4, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 18)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x4, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 19)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "pwm2"),
+ SUNXI_FUNCTION(0x4, "uart4"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 20)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "pwm3"),
+ SUNXI_FUNCTION(0x4, "uart4"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 21)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm1"),
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 22)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm0"),
+ SUNXI_FUNCTION(0x4, "i2c0"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 23)),
+ /* HOLE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "pll"), /* LOCK_DBG */
+ SUNXI_FUNCTION(0x4, "i2s2"), /* MCLK */
+ SUNXI_FUNCTION(0x5, "ledc"), /* LEDC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "bist0"), /* RESULT0 */
+ SUNXI_FUNCTION(0x4, "i2s2"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SM_VS */
+ SUNXI_FUNCTION(0x3, "bist0"), /* RESULT1 */
+ SUNXI_FUNCTION(0x4, "i2s2"), /* LRCK */
+ SUNXI_FUNCTION(0x5, "tcon0"), /* TRIG */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "bist0"), /* RESULT2 */
+ SUNXI_FUNCTION(0x4, "i2s2"), /* DOUT0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "bist0"), /* RESULT3 */
+ SUNXI_FUNCTION(0x4, "i2s2"), /* DIN0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 9)),
+ /* HOLE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS1 */
+ SUNXI_FUNCTION(0x4, "jtag_gpu"), /* MS_GPU */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DI1 */
+ SUNXI_FUNCTION(0x4, "jtag_gpu"), /* DI_GPU */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DO */
+ SUNXI_FUNCTION(0x4, "jtag_gpu"), /* DO_GPU */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart0"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CK */
+ SUNXI_FUNCTION(0x4, "jtag_gpu"), /* CK_GPU */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 4, 6)),
+ /* HOLE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* MCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2s1"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2s1"), /* LRCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2s1_dout0"), /* DOUT0 */
+ SUNXI_FUNCTION(0x4, "i2s1_din1"), /* DIN1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2s1_din0"), /* DIN0 */
+ SUNXI_FUNCTION(0x4, "i2s1_dout1"), /* DOUT1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 5, 13)),
+ /* HOLE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0"), /* SCK */
+ SUNXI_FUNCTION(0x5, "emac0"), /* RXD1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 0)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0"), /* SDA */
+ SUNXI_FUNCTION(0x5, "emac0"), /* RXD0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 1)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SCK */
+ SUNXI_FUNCTION(0x5, "emac0"), /* RXCTL */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 2)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SDA */
+ SUNXI_FUNCTION(0x3, "cir0"), /* OUT */
+ SUNXI_FUNCTION(0x5, "emac0"), /* CLKIN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 3)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x3, "spi1"), /* CS */
+ SUNXI_FUNCTION(0x5, "emac0"), /* TXD1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 4)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x3, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x4, "ledc"),
+ SUNXI_FUNCTION(0x5, "emac0"), /* TXD0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 5)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart3"), /* RTS */
+ SUNXI_FUNCTION(0x3, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x5, "emac0"), /* TXCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 6)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart3"), /* CTS */
+ SUNXI_FUNCTION(0x3, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x4, "spdif"), /* OUT */
+ SUNXI_FUNCTION(0x5, "emac0"), /* TXCTL */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 7)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* CLK */
+ SUNXI_FUNCTION(0x3, "spi2"), /* CS */
+ SUNXI_FUNCTION(0x4, "i2s2"), /* MCLK */
+ SUNXI_FUNCTION(0x5, "i2s2_din2"), /* DIN2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 8)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA0 */
+ SUNXI_FUNCTION(0x3, "spi2"), /* CLK */
+ SUNXI_FUNCTION(0x4, "i2s2"), /* BCLK */
+ SUNXI_FUNCTION(0x5, "emac0"), /* MDC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 9)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA1 */
+ SUNXI_FUNCTION(0x3, "spi2"), /* MOSI */
+ SUNXI_FUNCTION(0x4, "i2s2"), /* LRCK */
+ SUNXI_FUNCTION(0x5, "emac0"), /* MDIO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 10)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA2 */
+ SUNXI_FUNCTION(0x3, "spi2"), /* MISO */
+ SUNXI_FUNCTION(0x4, "i2s2_dout0"), /* DOUT0 */
+ SUNXI_FUNCTION(0x5, "i2s2_din1"), /* DIN1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 11)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "dmic"), /* DATA3 */
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x4, "i2s2_din0"), /* DIN0 */
+ SUNXI_FUNCTION(0x5, "i2s2_dout1"), /* DOUT1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 12)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x4, "i2s3"), /* MCLK */
+ SUNXI_FUNCTION(0x5, "emac0"), /* EPHY */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 13)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "i2s3"), /* BCLK */
+ SUNXI_FUNCTION(0x5, "emac0"), /* RXD3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 14)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "i2s3"), /* LRCK */
+ SUNXI_FUNCTION(0x5, "emac0"), /* RXD2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 15)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2s3_dout0"), /* DOUT0 */
+ SUNXI_FUNCTION(0x4, "i2s3_din1"), /* DIN1 */
+ SUNXI_FUNCTION(0x5, "emac0"), /* RXCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 16)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2s3_dout1"), /* DOUT1 */
+ SUNXI_FUNCTION(0x4, "i2s3_din0"), /* DIN0 */
+ SUNXI_FUNCTION(0x5, "emac0"), /* TXD3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 17)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "cir0"), /* OUT */
+ SUNXI_FUNCTION(0x3, "i2s3_dout2"), /* DOUT2 */
+ SUNXI_FUNCTION(0x4, "i2s3_din2"), /* DIN2 */
+ SUNXI_FUNCTION(0x5, "emac0"), /* TXD2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 18)),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "cir0"), /* IN */
+ SUNXI_FUNCTION(0x3, "i2s3_dout3"), /* DOUT3 */
+ SUNXI_FUNCTION(0x4, "i2s3_din3"), /* DIN3 */
+ SUNXI_FUNCTION(0x5, "ledc"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 19)),
+};
+
+static const unsigned int a100_irq_bank_map[] = { 0, 1, 2, 3, 4, 5, 6};
+
+static const struct sunxi_pinctrl_desc a100_pinctrl_data = {
+ .pins = a100_pins,
+ .npins = ARRAY_SIZE(a100_pins),
+ .irq_banks = 7,
+ .irq_bank_map = a100_irq_bank_map,
+ .io_bias_cfg_variant = BIAS_VOLTAGE_PIO_POW_MODE_SEL,
+};
+
+static int a100_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev, &a100_pinctrl_data);
+}
+
+static const struct of_device_id a100_pinctrl_match[] = {
+ { .compatible = "allwinner,sun50i-a100-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, a100_pinctrl_match);
+
+static struct platform_driver a100_pinctrl_driver = {
+ .probe = a100_pinctrl_probe,
+ .driver = {
+ .name = "sun50i-a100-pinctrl",
+ .of_match_table = a100_pinctrl_match,
+ },
+};
+module_platform_driver(a100_pinctrl_driver);
diff --git a/drivers/pinctrl/visconti/Kconfig b/drivers/pinctrl/visconti/Kconfig
new file mode 100644
index 000000000000..42653fc60413
--- /dev/null
+++ b/drivers/pinctrl/visconti/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config PINCTRL_VISCONTI
+ bool
+ select PINMUX
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+
+config PINCTRL_TMPV7700
+ bool "Toshiba Visconti TMPV7700 series pinctrl driver"
+ depends on OF
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ select PINCTRL_VISCONTI
+ default ARCH_VISCONTI
diff --git a/drivers/pinctrl/visconti/Makefile b/drivers/pinctrl/visconti/Makefile
new file mode 100644
index 000000000000..43b2eb663bce
--- /dev/null
+++ b/drivers/pinctrl/visconti/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PINCTRL_VISCONTI) += pinctrl-common.o
+obj-$(CONFIG_PINCTRL_TMPV7700) += pinctrl-tmpv7700.o
diff --git a/drivers/pinctrl/visconti/pinctrl-common.c b/drivers/pinctrl/visconti/pinctrl-common.c
new file mode 100644
index 000000000000..0cb10b7b4430
--- /dev/null
+++ b/drivers/pinctrl/visconti/pinctrl-common.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 TOSHIBA CORPORATION
+ * Copyright (c) 2020 Toshiba Electronic Devices & Storage Corporation
+ * Copyright (c) 2020 Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include "pinctrl-common.h"
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinctrl-utils.h"
+
+#define DSEL_MASK GENMASK(3, 0)
+
+/* private data */
+struct visconti_pinctrl {
+ void __iomem *base;
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ struct pinctrl_desc pctl_desc;
+
+ const struct visconti_pinctrl_devdata *devdata;
+
+ spinlock_t lock; /* protect pinctrl register */
+};
+
+/* pinconf */
+static int visconti_pin_config_set(struct pinctrl_dev *pctldev,
+ unsigned int _pin,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
+ const struct visconti_desc_pin *pin = &priv->devdata->pins[_pin];
+ enum pin_config_param param;
+ unsigned int arg;
+ int i, ret = 0;
+ unsigned int val, set_val, pude_val;
+ unsigned long flags;
+
+ dev_dbg(priv->dev, "%s: pin = %d (%s)\n", __func__, _pin, pin->pin.name);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ for (i = 0; i < num_configs; i++) {
+ set_val = 0;
+ pude_val = 0;
+
+ param = pinconf_to_config_param(configs[i]);
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ set_val = 1;
+ fallthrough;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ /* update pudsel setting */
+ val = readl(priv->base + pin->pudsel_offset);
+ val &= ~BIT(pin->pud_shift);
+ val |= set_val << pin->pud_shift;
+ writel(val, priv->base + pin->pudsel_offset);
+ pude_val = 1;
+ fallthrough;
+ case PIN_CONFIG_BIAS_DISABLE:
+ /* update pude setting */
+ val = readl(priv->base + pin->pude_offset);
+ val &= ~BIT(pin->pud_shift);
+ val |= pude_val << pin->pud_shift;
+ writel(val, priv->base + pin->pude_offset);
+ dev_dbg(priv->dev, "BIAS(%d): off = 0x%x val = 0x%x\n",
+ param, pin->pude_offset, val);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ arg = pinconf_to_config_argument(configs[i]);
+ dev_dbg(priv->dev, "DRV_STR arg = %d\n", arg);
+ switch (arg) {
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ /*
+ * I/O drive capacity setting:
+ * 2mA: 0
+ * 4mA: 1
+ * 8mA: 3
+ * 16mA: 7
+ * 24mA: 11
+ * 32mA: 15
+ */
+ set_val = DIV_ROUND_CLOSEST(arg, 2) - 1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ /* update drive setting */
+ val = readl(priv->base + pin->dsel_offset);
+ val &= ~(DSEL_MASK << pin->dsel_shift);
+ val |= set_val << pin->dsel_shift;
+ writel(val, priv->base + pin->dsel_offset);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+ }
+err:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+
+static int visconti_pin_config_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
+ const unsigned int *pins;
+ unsigned int num_pins;
+ int i, ret;
+
+ pins = priv->devdata->groups[selector].pins;
+ num_pins = priv->devdata->groups[selector].nr_pins;
+
+ dev_dbg(priv->dev, "%s: select = %d, n_pin = %d, n_config = %d\n",
+ __func__, selector, num_pins, num_configs);
+
+ for (i = 0; i < num_pins; i++) {
+ ret = visconti_pin_config_set(pctldev, pins[i],
+ configs, num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+static const struct pinconf_ops visconti_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_set = visconti_pin_config_set,
+ .pin_config_group_set = visconti_pin_config_group_set,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+/* pinctrl */
+static int visconti_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->devdata->nr_groups;
+}
+
+static const char *visconti_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->devdata->groups[selector].name;
+}
+
+static int visconti_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = priv->devdata->groups[selector].pins;
+ *num_pins = priv->devdata->groups[selector].nr_pins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops visconti_pinctrl_ops = {
+ .get_groups_count = visconti_get_groups_count,
+ .get_group_name = visconti_get_group_name,
+ .get_group_pins = visconti_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+/* pinmux */
+static int visconti_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->devdata->nr_functions;
+}
+
+static const char *visconti_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ return priv->devdata->functions[selector].name;
+}
+
+static int visconti_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = priv->devdata->functions[selector].groups;
+ *num_groups = priv->devdata->functions[selector].nr_groups;
+
+ return 0;
+}
+
+static int visconti_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
+{
+ struct visconti_pinctrl *priv = pinctrl_dev_get_drvdata(pctldev);
+ const struct visconti_pin_function *func = &priv->devdata->functions[function];
+ const struct visconti_pin_group *grp = &priv->devdata->groups[group];
+ const struct visconti_mux *mux = &grp->mux;
+ unsigned int val;
+ unsigned long flags;
+
+ dev_dbg(priv->dev, "%s: function = %d(%s) group = %d(%s)\n", __func__,
+ function, func->name, group, grp->name);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* update mux */
+ val = readl(priv->base + mux->offset);
+ val &= ~mux->mask;
+ val |= mux->val;
+ writel(val, priv->base + mux->offset);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ dev_dbg(priv->dev, "[%x]: 0x%x\n", mux->offset, val);
+
+ return 0;
+}
+
+static const struct pinmux_ops visconti_pinmux_ops = {
+ .get_functions_count = visconti_get_functions_count,
+ .get_function_name = visconti_get_function_name,
+ .get_function_groups = visconti_get_function_groups,
+ .set_mux = visconti_set_mux,
+ .strict = true,
+};
+
+int visconti_pinctrl_probe(struct platform_device *pdev,
+ const struct visconti_pinctrl_devdata *devdata)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_pinctrl *priv;
+ struct pinctrl_pin_desc *pins;
+ int i, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->devdata = devdata;
+ spin_lock_init(&priv->lock);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ dev_err(dev, "unable to map I/O space\n");
+ return PTR_ERR(priv->base);
+ }
+
+ pins = devm_kcalloc(dev, devdata->nr_pins,
+ sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < devdata->nr_pins; i++)
+ pins[i] = devdata->pins[i].pin;
+
+ priv->pctl_desc.name = dev_name(dev);
+ priv->pctl_desc.owner = THIS_MODULE;
+ priv->pctl_desc.pins = pins;
+ priv->pctl_desc.npins = devdata->nr_pins;
+ priv->pctl_desc.confops = &visconti_pinconf_ops;
+ priv->pctl_desc.pctlops = &visconti_pinctrl_ops;
+ priv->pctl_desc.pmxops = &visconti_pinmux_ops;
+
+ ret = devm_pinctrl_register_and_init(dev, &priv->pctl_desc,
+ priv, &priv->pctl);
+ if (ret) {
+ dev_err(dev, "couldn't register pinctrl: %d\n", ret);
+ return ret;
+ }
+
+ if (devdata->unlock)
+ devdata->unlock(priv->base);
+
+ return pinctrl_enable(priv->pctl);
+}
diff --git a/drivers/pinctrl/visconti/pinctrl-common.h b/drivers/pinctrl/visconti/pinctrl-common.h
new file mode 100644
index 000000000000..56a2eb0225fb
--- /dev/null
+++ b/drivers/pinctrl/visconti/pinctrl-common.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 TOSHIBA CORPORATION
+ * Copyright (c) 2020 Toshiba Electronic Devices & Storage Corporation
+ * Copyright (c) 2020 Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#ifndef __VISCONTI_PINCTRL_COMMON_H__
+#define __VISCONTI_PINCTRL_COMMON_H__
+
+struct pinctrl_pin_desc;
+
+/* PIN */
+#define VISCONTI_PINS(pins_name, ...) \
+ static const unsigned int pins_name ## _pins[] = { __VA_ARGS__ }
+
+struct visconti_desc_pin {
+ struct pinctrl_pin_desc pin;
+ unsigned int dsel_offset;
+ unsigned int dsel_shift;
+ unsigned int pude_offset;
+ unsigned int pudsel_offset;
+ unsigned int pud_shift;
+};
+
+#define VISCONTI_PIN(_pin, dsel, d_sh, pude, pudsel, p_sh) \
+{ \
+ .pin = _pin, \
+ .dsel_offset = dsel, \
+ .dsel_shift = d_sh, \
+ .pude_offset = pude, \
+ .pudsel_offset = pudsel, \
+ .pud_shift = p_sh, \
+}
+
+/* Group */
+#define VISCONTI_GROUPS(groups_name, ...) \
+ static const char * const groups_name ## _grps[] = { __VA_ARGS__ }
+
+struct visconti_mux {
+ unsigned int offset;
+ unsigned int mask;
+ unsigned int val;
+};
+
+struct visconti_pin_group {
+ const char *name;
+ const unsigned int *pins;
+ unsigned int nr_pins;
+ struct visconti_mux mux;
+};
+
+#define VISCONTI_PIN_GROUP(group_name, off, msk, v) \
+{ \
+ .name = __stringify(group_name) "_grp", \
+ .pins = group_name ## _pins, \
+ .nr_pins = ARRAY_SIZE(group_name ## _pins), \
+ .mux = { \
+ .offset = off, \
+ .mask = msk, \
+ .val = v, \
+ } \
+}
+
+/* MUX */
+struct visconti_pin_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int nr_groups;
+};
+
+#define VISCONTI_PIN_FUNCTION(func) \
+{ \
+ .name = #func, \
+ .groups = func ## _grps, \
+ .nr_groups = ARRAY_SIZE(func ## _grps), \
+}
+
+/* chip dependent data */
+struct visconti_pinctrl_devdata {
+ const struct visconti_desc_pin *pins;
+ unsigned int nr_pins;
+ const struct visconti_pin_group *groups;
+ unsigned int nr_groups;
+ const struct visconti_pin_function *functions;
+ unsigned int nr_functions;
+
+ const struct visconti_mux *gpio_mux;
+
+ void (*unlock)(void __iomem *base);
+};
+
+int visconti_pinctrl_probe(struct platform_device *pdev,
+ const struct visconti_pinctrl_devdata *devdata);
+
+#endif /* __VISCONTI_PINCTRL_COMMON_H__ */
diff --git a/drivers/pinctrl/visconti/pinctrl-tmpv7700.c b/drivers/pinctrl/visconti/pinctrl-tmpv7700.c
new file mode 100644
index 000000000000..38a00d514f74
--- /dev/null
+++ b/drivers/pinctrl/visconti/pinctrl-tmpv7700.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 TOSHIBA CORPORATION
+ * Copyright (c) 2020 Toshiba Electronic Devices & Storage Corporation
+ * Copyright (c) 2020 Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-common.h"
+
+#define tmpv7700_MAGIC_NUM 0x4932f70e
+
+/* register offset */
+#define REG_KEY_CTRL 0x0000
+#define REG_KEY_CMD 0x0004
+#define REG_PINMUX1 0x3000
+#define REG_PINMUX2 0x3004
+#define REG_PINMUX3 0x3008
+#define REG_PINMUX4 0x300c
+#define REG_PINMUX5 0x3010
+#define REG_IOSET 0x3014
+#define REG_IO_VSEL 0x3018
+#define REG_IO_DSEL1 0x301c
+#define REG_IO_DSEL2 0x3020
+#define REG_IO_DSEL3 0x3024
+#define REG_IO_DSEL4 0x3028
+#define REG_IO_DSEL5 0x302c
+#define REG_IO_DSEL6 0x3030
+#define REG_IO_DSEL7 0x3034
+#define REG_IO_DSEL8 0x3038
+#define REG_IO_PUDE1 0x303c
+#define REG_IO_PUDE2 0x3040
+#define REG_IO_PUDSEL1 0x3044
+#define REG_IO_PUDSEL2 0x3048
+
+/* PIN */
+static const struct visconti_desc_pin pins_tmpv7700[] = {
+ VISCONTI_PIN(PINCTRL_PIN(0, "gpio0"), REG_IO_DSEL4, 24,
+ REG_IO_PUDE1, REG_IO_PUDSEL1, 30),
+ VISCONTI_PIN(PINCTRL_PIN(1, "gpio1"), REG_IO_DSEL4, 28,
+ REG_IO_PUDE1, REG_IO_PUDSEL1, 31),
+ VISCONTI_PIN(PINCTRL_PIN(2, "gpio2"), REG_IO_DSEL5, 0,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 0),
+ VISCONTI_PIN(PINCTRL_PIN(3, "gpio3"), REG_IO_DSEL5, 4,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 1),
+ VISCONTI_PIN(PINCTRL_PIN(4, "gpio4"), REG_IO_DSEL5, 8,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 2),
+ VISCONTI_PIN(PINCTRL_PIN(5, "gpio5"), REG_IO_DSEL5, 12,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 3),
+ VISCONTI_PIN(PINCTRL_PIN(6, "gpio6"), REG_IO_DSEL5, 16,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 4),
+ VISCONTI_PIN(PINCTRL_PIN(7, "gpio7"), REG_IO_DSEL5, 20,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 5),
+ VISCONTI_PIN(PINCTRL_PIN(8, "gpio8"), REG_IO_DSEL5, 24,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 6),
+ VISCONTI_PIN(PINCTRL_PIN(9, "gpio9"), REG_IO_DSEL5, 28,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 7),
+ VISCONTI_PIN(PINCTRL_PIN(10, "gpio10"), REG_IO_DSEL6, 0,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 8),
+ VISCONTI_PIN(PINCTRL_PIN(11, "gpio11"), REG_IO_DSEL6, 4,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 9),
+ VISCONTI_PIN(PINCTRL_PIN(12, "gpio12"), REG_IO_DSEL6, 8,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 10),
+ VISCONTI_PIN(PINCTRL_PIN(13, "gpio13"), REG_IO_DSEL6, 12,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 11),
+ VISCONTI_PIN(PINCTRL_PIN(14, "gpio14"), REG_IO_DSEL6, 16,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 12),
+ VISCONTI_PIN(PINCTRL_PIN(15, "gpio15"), REG_IO_DSEL6, 20,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 13),
+ VISCONTI_PIN(PINCTRL_PIN(16, "gpio16"), REG_IO_DSEL6, 24,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 14),
+ VISCONTI_PIN(PINCTRL_PIN(17, "gpio17"), REG_IO_DSEL6, 28,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 15),
+ VISCONTI_PIN(PINCTRL_PIN(18, "gpio18"), REG_IO_DSEL7, 0,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 16),
+ VISCONTI_PIN(PINCTRL_PIN(19, "gpio19"), REG_IO_DSEL7, 4,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 17),
+ VISCONTI_PIN(PINCTRL_PIN(20, "gpio20"), REG_IO_DSEL7, 8,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 18),
+ VISCONTI_PIN(PINCTRL_PIN(21, "gpio21"), REG_IO_DSEL7, 12,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 19),
+ VISCONTI_PIN(PINCTRL_PIN(22, "gpio22"), REG_IO_DSEL7, 16,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 20),
+ VISCONTI_PIN(PINCTRL_PIN(23, "gpio23"), REG_IO_DSEL7, 20,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 21),
+ VISCONTI_PIN(PINCTRL_PIN(24, "gpio24"), REG_IO_DSEL7, 24,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 22),
+ VISCONTI_PIN(PINCTRL_PIN(25, "gpio25"), REG_IO_DSEL7, 28,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 23),
+ VISCONTI_PIN(PINCTRL_PIN(26, "gpio26"), REG_IO_DSEL8, 0,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 24),
+ VISCONTI_PIN(PINCTRL_PIN(27, "gpio27"), REG_IO_DSEL8, 4,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 25),
+ VISCONTI_PIN(PINCTRL_PIN(28, "gpio28"), REG_IO_DSEL8, 8,
+ REG_IO_PUDE2, REG_IO_PUDSEL2, 26),
+ VISCONTI_PIN(PINCTRL_PIN(29, "gpio29"), REG_IO_DSEL4, 8,
+ REG_IO_PUDE1, REG_IO_PUDSEL1, 26),
+ VISCONTI_PIN(PINCTRL_PIN(30, "gpio30"), REG_IO_DSEL4, 4,
+ REG_IO_PUDE1, REG_IO_PUDSEL1, 25),
+ VISCONTI_PIN(PINCTRL_PIN(31, "gpio31"), REG_IO_DSEL4, 0,
+ REG_IO_PUDE1, REG_IO_PUDSEL1, 24),
+ VISCONTI_PIN(PINCTRL_PIN(32, "spi_sck"), REG_IO_DSEL4, 12,
+ REG_IO_PUDE1, REG_IO_PUDSEL1, 27),
+ VISCONTI_PIN(PINCTRL_PIN(33, "spi_sdo"), REG_IO_DSEL4, 16,
+ REG_IO_PUDE1, REG_IO_PUDSEL1, 28),
+ VISCONTI_PIN(PINCTRL_PIN(34, "spi_sdi"), REG_IO_DSEL4, 20,
+ REG_IO_PUDE1, REG_IO_PUDSEL1, 29),
+};
+
+/* Group */
+VISCONTI_PINS(i2c0, 0, 1);
+VISCONTI_PINS(i2c1, 2, 3);
+VISCONTI_PINS(i2c2, 12, 13);
+VISCONTI_PINS(i2c3, 14, 15);
+VISCONTI_PINS(i2c4, 16, 17);
+VISCONTI_PINS(i2c5, 18, 19);
+VISCONTI_PINS(i2c6, 33, 34);
+VISCONTI_PINS(i2c7, 29, 32);
+VISCONTI_PINS(i2c8, 30, 31);
+VISCONTI_PINS(spi0_cs0, 29);
+VISCONTI_PINS(spi0_cs1, 30);
+VISCONTI_PINS(spi0_cs2, 31);
+VISCONTI_PINS(spi1_cs, 3);
+VISCONTI_PINS(spi2_cs, 7);
+VISCONTI_PINS(spi3_cs, 11);
+VISCONTI_PINS(spi4_cs, 15);
+VISCONTI_PINS(spi5_cs, 19);
+VISCONTI_PINS(spi6_cs, 27);
+VISCONTI_PINS(spi0, 32, 33, 34);
+VISCONTI_PINS(spi1, 0, 1, 2);
+VISCONTI_PINS(spi2, 4, 5, 6);
+VISCONTI_PINS(spi3, 8, 9, 10);
+VISCONTI_PINS(spi4, 12, 13, 14);
+VISCONTI_PINS(spi5, 16, 17, 18);
+VISCONTI_PINS(spi6, 24, 25, 26);
+VISCONTI_PINS(uart0, 4, 5, 6, 7);
+VISCONTI_PINS(uart1, 8, 9, 10, 11);
+VISCONTI_PINS(uart2, 12, 13, 14, 15);
+VISCONTI_PINS(uart3, 16, 17, 18, 19);
+VISCONTI_PINS(pwm0_gpio4, 4);
+VISCONTI_PINS(pwm1_gpio5, 5);
+VISCONTI_PINS(pwm2_gpio6, 6);
+VISCONTI_PINS(pwm3_gpio7, 7);
+VISCONTI_PINS(pwm0_gpio8, 8);
+VISCONTI_PINS(pwm1_gpio9, 9);
+VISCONTI_PINS(pwm2_gpio10, 10);
+VISCONTI_PINS(pwm3_gpio11, 11);
+VISCONTI_PINS(pwm0_gpio12, 12);
+VISCONTI_PINS(pwm1_gpio13, 13);
+VISCONTI_PINS(pwm2_gpio14, 14);
+VISCONTI_PINS(pwm3_gpio15, 15);
+VISCONTI_PINS(pwm0_gpio16, 16);
+VISCONTI_PINS(pwm1_gpio17, 17);
+VISCONTI_PINS(pwm2_gpio18, 18);
+VISCONTI_PINS(pwm3_gpio19, 19);
+VISCONTI_PINS(pcmif_out, 20, 21, 22);
+VISCONTI_PINS(pcmif_in, 24, 25, 26);
+
+static const struct visconti_pin_group groups_tmpv7700[] = {
+ VISCONTI_PIN_GROUP(i2c0, REG_PINMUX2, GENMASK(7, 0), 0x00000022),
+ VISCONTI_PIN_GROUP(i2c1, REG_PINMUX2, GENMASK(15, 8), 0x00002200),
+ VISCONTI_PIN_GROUP(i2c2, REG_PINMUX3, GENMASK(23, 16), 0x00770000),
+ VISCONTI_PIN_GROUP(i2c3, REG_PINMUX3, GENMASK(31, 24), 0x77000000),
+ VISCONTI_PIN_GROUP(i2c4, REG_PINMUX4, GENMASK(7, 0), 0x00000077),
+ VISCONTI_PIN_GROUP(i2c5, REG_PINMUX4, GENMASK(15, 8), 0x00007700),
+ VISCONTI_PIN_GROUP(i2c6, REG_PINMUX1, GENMASK(3, 0), 0x0000002),
+ VISCONTI_PIN_GROUP(i2c7, REG_PINMUX5, GENMASK(23, 20), 0x00200000),
+ VISCONTI_PIN_GROUP(i2c8, REG_PINMUX5, GENMASK(31, 24), 0x22000000),
+ VISCONTI_PIN_GROUP(spi0_cs0, REG_PINMUX5, GENMASK(23, 20), 0x00100000),
+ VISCONTI_PIN_GROUP(spi0_cs1, REG_PINMUX5, GENMASK(27, 24), 0x01000000),
+ VISCONTI_PIN_GROUP(spi0_cs2, REG_PINMUX5, GENMASK(31, 28), 0x10000000),
+ VISCONTI_PIN_GROUP(spi1_cs, REG_PINMUX2, GENMASK(15, 12), 0x00001000),
+ VISCONTI_PIN_GROUP(spi2_cs, REG_PINMUX2, GENMASK(31, 28), 0x10000000),
+ VISCONTI_PIN_GROUP(spi3_cs, REG_PINMUX3, GENMASK(15, 12), 0x00001000),
+ VISCONTI_PIN_GROUP(spi4_cs, REG_PINMUX4, GENMASK(31, 28), 0x10000000),
+ VISCONTI_PIN_GROUP(spi5_cs, REG_PINMUX4, GENMASK(15, 12), 0x00001000),
+ VISCONTI_PIN_GROUP(spi6_cs, REG_PINMUX5, GENMASK(15, 12), 0x00001000),
+ VISCONTI_PIN_GROUP(spi0, REG_PINMUX1, GENMASK(3, 0), 0x00000001),
+ VISCONTI_PIN_GROUP(spi1, REG_PINMUX2, GENMASK(11, 0), 0x00000111),
+ VISCONTI_PIN_GROUP(spi2, REG_PINMUX2, GENMASK(27, 16), 0x01110000),
+ VISCONTI_PIN_GROUP(spi3, REG_PINMUX3, GENMASK(11, 0), 0x00000111),
+ VISCONTI_PIN_GROUP(spi4, REG_PINMUX3, GENMASK(27, 16), 0x01110000),
+ VISCONTI_PIN_GROUP(spi5, REG_PINMUX4, GENMASK(11, 0), 0x00000111),
+ VISCONTI_PIN_GROUP(spi6, REG_PINMUX5, GENMASK(11, 0), 0x00000111),
+ VISCONTI_PIN_GROUP(uart0, REG_PINMUX2, GENMASK(31, 16), 0x22220000),
+ VISCONTI_PIN_GROUP(uart1, REG_PINMUX3, GENMASK(15, 0), 0x00002222),
+ VISCONTI_PIN_GROUP(uart2, REG_PINMUX3, GENMASK(31, 16), 0x22220000),
+ VISCONTI_PIN_GROUP(uart3, REG_PINMUX4, GENMASK(15, 0), 0x00002222),
+ VISCONTI_PIN_GROUP(pwm0_gpio4, REG_PINMUX2, GENMASK(19, 16), 0x00050000),
+ VISCONTI_PIN_GROUP(pwm1_gpio5, REG_PINMUX2, GENMASK(23, 20), 0x00500000),
+ VISCONTI_PIN_GROUP(pwm2_gpio6, REG_PINMUX2, GENMASK(27, 24), 0x05000000),
+ VISCONTI_PIN_GROUP(pwm3_gpio7, REG_PINMUX2, GENMASK(31, 28), 0x50000000),
+ VISCONTI_PIN_GROUP(pwm0_gpio8, REG_PINMUX3, GENMASK(3, 0), 0x00000005),
+ VISCONTI_PIN_GROUP(pwm1_gpio9, REG_PINMUX3, GENMASK(7, 4), 0x00000050),
+ VISCONTI_PIN_GROUP(pwm2_gpio10, REG_PINMUX3, GENMASK(11, 8), 0x00000500),
+ VISCONTI_PIN_GROUP(pwm3_gpio11, REG_PINMUX3, GENMASK(15, 12), 0x00005000),
+ VISCONTI_PIN_GROUP(pwm0_gpio12, REG_PINMUX3, GENMASK(19, 16), 0x00050000),
+ VISCONTI_PIN_GROUP(pwm1_gpio13, REG_PINMUX3, GENMASK(23, 20), 0x00500000),
+ VISCONTI_PIN_GROUP(pwm2_gpio14, REG_PINMUX3, GENMASK(27, 24), 0x05000000),
+ VISCONTI_PIN_GROUP(pwm3_gpio15, REG_PINMUX3, GENMASK(31, 28), 0x50000000),
+ VISCONTI_PIN_GROUP(pwm0_gpio16, REG_PINMUX4, GENMASK(3, 0), 0x00000005),
+ VISCONTI_PIN_GROUP(pwm1_gpio17, REG_PINMUX4, GENMASK(7, 4), 0x00000050),
+ VISCONTI_PIN_GROUP(pwm2_gpio18, REG_PINMUX4, GENMASK(11, 8), 0x00000500),
+ VISCONTI_PIN_GROUP(pwm3_gpio19, REG_PINMUX4, GENMASK(15, 12), 0x00005000),
+ VISCONTI_PIN_GROUP(pcmif_out, REG_PINMUX4, GENMASK(27, 16), 0x01110000),
+ VISCONTI_PIN_GROUP(pcmif_in, REG_PINMUX5, GENMASK(11, 0), 0x00000222),
+};
+
+/* MUX */
+VISCONTI_GROUPS(i2c0, "i2c0_grp");
+VISCONTI_GROUPS(i2c1, "i2c1_grp");
+VISCONTI_GROUPS(i2c2, "i2c2_grp");
+VISCONTI_GROUPS(i2c3, "i2c3_grp");
+VISCONTI_GROUPS(i2c4, "i2c4_grp");
+VISCONTI_GROUPS(i2c5, "i2c5_grp");
+VISCONTI_GROUPS(i2c6, "i2c6_grp");
+VISCONTI_GROUPS(i2c7, "i2c7_grp");
+VISCONTI_GROUPS(i2c8, "i2c8_grp");
+VISCONTI_GROUPS(spi0, "spi0_grp", "spi0_cs0_grp",
+ "spi0_cs1_grp", "spi0_cs2_grp");
+VISCONTI_GROUPS(spi1, "spi1_grp", "spi1_cs_grp");
+VISCONTI_GROUPS(spi2, "spi2_grp", "spi2_cs_grp");
+VISCONTI_GROUPS(spi3, "spi3_grp", "spi3_cs_grp");
+VISCONTI_GROUPS(spi4, "spi4_grp", "spi4_cs_grp");
+VISCONTI_GROUPS(spi5, "spi5_grp", "spi5_cs_grp");
+VISCONTI_GROUPS(spi6, "spi6_grp", "spi6_cs_grp");
+VISCONTI_GROUPS(uart0, "uart0_grp");
+VISCONTI_GROUPS(uart1, "uart1_grp");
+VISCONTI_GROUPS(uart2, "uart2_grp");
+VISCONTI_GROUPS(uart3, "uart3_grp");
+VISCONTI_GROUPS(pwm, "pwm0_gpio4_grp", "pwm0_gpio8_grp",
+ "pwm0_gpio12_grp", "pwm0_gpio16_grp",
+ "pwm1_gpio5_grp", "pwm1_gpio9_grp",
+ "pwm1_gpio13_grp", "pwm1_gpio17_grp",
+ "pwm2_gpio6_grp", "pwm2_gpio10_grp",
+ "pwm2_gpio14_grp", "pwm2_gpio18_grp",
+ "pwm3_gpio7_grp", "pwm3_gpio11_grp",
+ "pwm3_gpio15_grp", "pwm3_gpio19_grp");
+VISCONTI_GROUPS(pcmif_out, "pcmif_out_grp");
+VISCONTI_GROUPS(pcmif_in, "pcmif_in_grp");
+
+static const struct visconti_pin_function functions_tmpv7700[] = {
+ VISCONTI_PIN_FUNCTION(i2c0),
+ VISCONTI_PIN_FUNCTION(i2c1),
+ VISCONTI_PIN_FUNCTION(i2c2),
+ VISCONTI_PIN_FUNCTION(i2c3),
+ VISCONTI_PIN_FUNCTION(i2c4),
+ VISCONTI_PIN_FUNCTION(i2c5),
+ VISCONTI_PIN_FUNCTION(i2c6),
+ VISCONTI_PIN_FUNCTION(i2c7),
+ VISCONTI_PIN_FUNCTION(i2c8),
+ VISCONTI_PIN_FUNCTION(spi0),
+ VISCONTI_PIN_FUNCTION(spi1),
+ VISCONTI_PIN_FUNCTION(spi2),
+ VISCONTI_PIN_FUNCTION(spi3),
+ VISCONTI_PIN_FUNCTION(spi4),
+ VISCONTI_PIN_FUNCTION(spi5),
+ VISCONTI_PIN_FUNCTION(spi6),
+ VISCONTI_PIN_FUNCTION(uart0),
+ VISCONTI_PIN_FUNCTION(uart1),
+ VISCONTI_PIN_FUNCTION(uart2),
+ VISCONTI_PIN_FUNCTION(uart3),
+ VISCONTI_PIN_FUNCTION(pwm),
+ VISCONTI_PIN_FUNCTION(pcmif_in),
+ VISCONTI_PIN_FUNCTION(pcmif_out),
+};
+
+/* GPIO MUX */
+#define tmpv7700_GPIO_MUX(off, msk) \
+{ \
+ .offset = off, \
+ .mask = msk, \
+ .val = 0, \
+}
+
+static const struct visconti_mux gpio_mux_tmpv7700[] = {
+ tmpv7700_GPIO_MUX(REG_PINMUX2, GENMASK(3, 0)),
+ tmpv7700_GPIO_MUX(REG_PINMUX2, GENMASK(7, 4)),
+ tmpv7700_GPIO_MUX(REG_PINMUX2, GENMASK(11, 8)),
+ tmpv7700_GPIO_MUX(REG_PINMUX2, GENMASK(15, 12)),
+ tmpv7700_GPIO_MUX(REG_PINMUX2, GENMASK(19, 16)),
+ tmpv7700_GPIO_MUX(REG_PINMUX2, GENMASK(23, 20)),
+ tmpv7700_GPIO_MUX(REG_PINMUX2, GENMASK(27, 24)),
+ tmpv7700_GPIO_MUX(REG_PINMUX2, GENMASK(31, 28)),
+ tmpv7700_GPIO_MUX(REG_PINMUX3, GENMASK(3, 0)),
+ tmpv7700_GPIO_MUX(REG_PINMUX3, GENMASK(7, 4)),
+ tmpv7700_GPIO_MUX(REG_PINMUX3, GENMASK(11, 8)),
+ tmpv7700_GPIO_MUX(REG_PINMUX3, GENMASK(15, 12)),
+ tmpv7700_GPIO_MUX(REG_PINMUX3, GENMASK(19, 16)),
+ tmpv7700_GPIO_MUX(REG_PINMUX3, GENMASK(23, 20)),
+ tmpv7700_GPIO_MUX(REG_PINMUX3, GENMASK(27, 24)),
+ tmpv7700_GPIO_MUX(REG_PINMUX3, GENMASK(31, 28)),
+ tmpv7700_GPIO_MUX(REG_PINMUX4, GENMASK(3, 0)),
+ tmpv7700_GPIO_MUX(REG_PINMUX4, GENMASK(7, 4)),
+ tmpv7700_GPIO_MUX(REG_PINMUX4, GENMASK(11, 8)),
+ tmpv7700_GPIO_MUX(REG_PINMUX4, GENMASK(15, 12)),
+ tmpv7700_GPIO_MUX(REG_PINMUX4, GENMASK(19, 16)),
+ tmpv7700_GPIO_MUX(REG_PINMUX4, GENMASK(23, 20)),
+ tmpv7700_GPIO_MUX(REG_PINMUX4, GENMASK(27, 24)),
+ tmpv7700_GPIO_MUX(REG_PINMUX4, GENMASK(31, 28)),
+ tmpv7700_GPIO_MUX(REG_PINMUX5, GENMASK(3, 0)),
+ tmpv7700_GPIO_MUX(REG_PINMUX5, GENMASK(7, 4)),
+ tmpv7700_GPIO_MUX(REG_PINMUX5, GENMASK(11, 8)),
+ tmpv7700_GPIO_MUX(REG_PINMUX5, GENMASK(15, 12)),
+ tmpv7700_GPIO_MUX(REG_PINMUX5, GENMASK(19, 16)),
+ tmpv7700_GPIO_MUX(REG_PINMUX5, GENMASK(23, 20)),
+ tmpv7700_GPIO_MUX(REG_PINMUX5, GENMASK(27, 24)),
+ tmpv7700_GPIO_MUX(REG_PINMUX5, GENMASK(31, 28)),
+};
+
+static void tmpv7700_pinctrl_unlock(void __iomem *base)
+{
+ writel(1, base + REG_KEY_CTRL);
+ writel(tmpv7700_MAGIC_NUM, base + REG_KEY_CMD);
+}
+
+/* chip dependent data */
+static const struct visconti_pinctrl_devdata tmpv7700_pinctrl_data = {
+ .pins = pins_tmpv7700,
+ .nr_pins = ARRAY_SIZE(pins_tmpv7700),
+ .groups = groups_tmpv7700,
+ .nr_groups = ARRAY_SIZE(groups_tmpv7700),
+ .functions = functions_tmpv7700,
+ .nr_functions = ARRAY_SIZE(functions_tmpv7700),
+ .gpio_mux = gpio_mux_tmpv7700,
+ .unlock = tmpv7700_pinctrl_unlock,
+};
+
+static int tmpv7700_pinctrl_probe(struct platform_device *pdev)
+{
+ return visconti_pinctrl_probe(pdev, &tmpv7700_pinctrl_data);
+}
+
+static const struct of_device_id tmpv7700_pctrl_of_match[] = {
+ { .compatible = "toshiba,tmpv7708-pinctrl", },
+ {},
+};
+
+static struct platform_driver tmpv7700_pinctrl_driver = {
+ .probe = tmpv7700_pinctrl_probe,
+ .driver = {
+ .name = "tmpv7700-pinctrl",
+ .of_match_table = tmpv7700_pctrl_of_match,
+ },
+};
+
+static int __init tmpv7700_pinctrl_init(void)
+{
+ return platform_driver_register(&tmpv7700_pinctrl_driver);
+}
+arch_initcall(tmpv7700_pinctrl_init);
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index a056031dee81..ccc23d8686e8 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -3,16 +3,6 @@
# Platform support for Chrome OS hardware (Chromebooks and Chromeboxes)
#
-config MFD_CROS_EC
- tristate "Platform support for Chrome hardware (transitional)"
- select CHROME_PLATFORMS
- select CROS_EC
- select MFD_CROS_EC_DEV
- depends on X86 || ARM || ARM64 || COMPILE_TEST
- help
- This is a transitional Kconfig option and will be removed after
- everyone enables the parts individually.
-
menuconfig CHROME_PLATFORMS
bool "Platform support for Chrome hardware"
depends on X86 || ARM || ARM64 || COMPILE_TEST
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index b59180bff5a3..de8dfb12e486 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -116,8 +116,10 @@ static int get_lightbar_version(struct cros_ec_dev *ec,
param = (struct ec_params_lightbar *)msg->data;
param->cmd = LIGHTBAR_CMD_VERSION;
+ msg->outsize = sizeof(param->cmd);
+ msg->result = sizeof(resp->version);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret < 0) {
+ if (ret < 0 && ret != -EINVAL) {
ret = 0;
goto exit;
}
@@ -298,11 +300,9 @@ static ssize_t sequence_show(struct device *dev,
goto exit;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret == -EPROTO) {
- ret = scnprintf(buf, PAGE_SIZE,
- "ERROR: EC returned %d\n", msg->result);
- goto exit;
- } else if (ret < 0) {
+ if (ret < 0) {
+ ret = scnprintf(buf, PAGE_SIZE, "XFER / EC ERROR %d / %d\n",
+ ret, msg->result);
goto exit;
}
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 8d52b3b4bd4e..0ecee8b8773d 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -15,6 +15,43 @@
#define EC_COMMAND_RETRIES 50
+static const int cros_ec_error_map[] = {
+ [EC_RES_INVALID_COMMAND] = -EOPNOTSUPP,
+ [EC_RES_ERROR] = -EIO,
+ [EC_RES_INVALID_PARAM] = -EINVAL,
+ [EC_RES_ACCESS_DENIED] = -EACCES,
+ [EC_RES_INVALID_RESPONSE] = -EPROTO,
+ [EC_RES_INVALID_VERSION] = -ENOPROTOOPT,
+ [EC_RES_INVALID_CHECKSUM] = -EBADMSG,
+ [EC_RES_IN_PROGRESS] = -EINPROGRESS,
+ [EC_RES_UNAVAILABLE] = -ENODATA,
+ [EC_RES_TIMEOUT] = -ETIMEDOUT,
+ [EC_RES_OVERFLOW] = -EOVERFLOW,
+ [EC_RES_INVALID_HEADER] = -EBADR,
+ [EC_RES_REQUEST_TRUNCATED] = -EBADR,
+ [EC_RES_RESPONSE_TOO_BIG] = -EFBIG,
+ [EC_RES_BUS_ERROR] = -EFAULT,
+ [EC_RES_BUSY] = -EBUSY,
+ [EC_RES_INVALID_HEADER_VERSION] = -EBADMSG,
+ [EC_RES_INVALID_HEADER_CRC] = -EBADMSG,
+ [EC_RES_INVALID_DATA_CRC] = -EBADMSG,
+ [EC_RES_DUP_UNAVAILABLE] = -ENODATA,
+};
+
+static int cros_ec_map_error(uint32_t result)
+{
+ int ret = 0;
+
+ if (result != EC_RES_SUCCESS) {
+ if (result < ARRAY_SIZE(cros_ec_error_map) && cros_ec_error_map[result])
+ ret = cros_ec_error_map[result];
+ else
+ ret = -EPROTO;
+ }
+
+ return ret;
+}
+
static int prepare_packet(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -512,19 +549,22 @@ exit:
EXPORT_SYMBOL(cros_ec_query_all);
/**
- * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
+ * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
* @ec_dev: EC device.
* @msg: Message to write.
*
- * Call this to send a command to the ChromeOS EC. This should be used
- * instead of calling the EC's cmd_xfer() callback directly.
+ * Call this to send a command to the ChromeOS EC. This should be used instead of calling the EC's
+ * cmd_xfer() callback directly. It returns success status only if both the command was transmitted
+ * successfully and the EC replied with success status.
*
- * Return: 0 on success or negative error code.
+ * Return:
+ * >=0 - The number of bytes transferred
+ * <0 - Linux error code
*/
-static int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
- int ret;
+ int ret, mapped;
mutex_lock(&ec_dev->lock);
if (ec_dev->proto_version == EC_PROTO_VERSION_UNKNOWN) {
@@ -561,42 +601,15 @@ static int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
return -EMSGSIZE;
}
}
+
ret = send_command(ec_dev, msg);
mutex_unlock(&ec_dev->lock);
- return ret;
-}
-
-/**
- * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
- * @ec_dev: EC device.
- * @msg: Message to write.
- *
- * This function is identical to cros_ec_cmd_xfer, except it returns success
- * status only if both the command was transmitted successfully and the EC
- * replied with success status. It's not necessary to check msg->result when
- * using this function.
- *
- * Return:
- * >=0 - The number of bytes transferred
- * -ENOTSUPP - Operation not supported
- * -EPROTO - Protocol error
- */
-int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg)
-{
- int ret;
-
- ret = cros_ec_cmd_xfer(ec_dev, msg);
- if (ret < 0) {
- dev_err(ec_dev->dev, "Command xfer error (err:%d)\n", ret);
- } else if (msg->result == EC_RES_INVALID_VERSION) {
- dev_dbg(ec_dev->dev, "Command invalid version (err:%d)\n",
- msg->result);
- return -ENOTSUPP;
- } else if (msg->result != EC_RES_SUCCESS) {
- dev_dbg(ec_dev->dev, "Command result (err: %d)\n", msg->result);
- return -EPROTO;
+ mapped = cros_ec_map_error(msg->result);
+ if (mapped) {
+ dev_dbg(ec_dev->dev, "Command result (err: %d [%d])\n",
+ msg->result, mapped);
+ ret = mapped;
}
return ret;
@@ -615,7 +628,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
msg->insize = size;
msg->outsize = 0;
- ret = cros_ec_cmd_xfer(ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
if (ret > 0) {
ec_dev->event_size = ret - 1;
ec_dev->event_data = *event;
@@ -659,7 +672,7 @@ static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
msg->insize = sizeof(ec_dev->event_data.data);
msg->outsize = 0;
- ec_dev->event_size = cros_ec_cmd_xfer(ec_dev, msg);
+ ec_dev->event_size = cros_ec_cmd_xfer_status(ec_dev, msg);
ec_dev->event_data.event_type = EC_MKBP_EVENT_KEY_MATRIX;
memcpy(&ec_dev->event_data.data, msg->data,
sizeof(ec_dev->event_data.data));
@@ -848,11 +861,9 @@ int cros_ec_get_sensor_count(struct cros_ec_dev *ec)
params = (struct ec_params_motion_sense *)msg->data;
params->cmd = MOTIONSENSE_CMD_DUMP;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
sensor_count = ret;
- } else if (msg->result != EC_RES_SUCCESS) {
- sensor_count = -EPROTO;
} else {
resp = (struct ec_response_motion_sense *)msg->data;
sensor_count = resp->dump.sensor_count;
@@ -863,9 +874,7 @@ int cros_ec_get_sensor_count(struct cros_ec_dev *ec)
* Check legacy mode: Let's find out if sensors are accessible
* via LPC interface.
*/
- if (sensor_count == -EPROTO &&
- ec->cmd_offset == 0 &&
- ec_dev->cmd_readmem) {
+ if (sensor_count < 0 && ec->cmd_offset == 0 && ec_dev->cmd_readmem) {
ret = ec_dev->cmd_readmem(ec_dev, EC_MEMMAP_ACC_STATUS,
1, &status);
if (ret >= 0 &&
@@ -880,9 +889,6 @@ int cros_ec_get_sensor_count(struct cros_ec_dev *ec)
*/
sensor_count = 0;
}
- } else if (sensor_count == -EPROTO) {
- /* EC responded, but does not understand DUMP command. */
- sensor_count = 0;
}
return sensor_count;
}
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index d45ea5d5bfa4..f521a5c65091 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -150,12 +150,10 @@ static ssize_t version_show(struct device *dev,
msg->command = EC_CMD_GET_BUILD_INFO + ec->cmd_offset;
msg->insize = EC_HOST_PARAM_SIZE;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret == -EPROTO) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Build info: EC error %d\n", msg->result);
- } else if (ret < 0) {
+ if (ret < 0) {
count += scnprintf(buf + count, PAGE_SIZE - count,
- "Build info: XFER ERROR %d\n", ret);
+ "Build info: XFER / EC ERROR %d / %d\n",
+ ret, msg->result);
} else {
msg->data[EC_HOST_PARAM_SIZE - 1] = '\0';
count += scnprintf(buf + count, PAGE_SIZE - count,
@@ -166,12 +164,10 @@ static ssize_t version_show(struct device *dev,
msg->command = EC_CMD_GET_CHIP_INFO + ec->cmd_offset;
msg->insize = sizeof(*r_chip);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret == -EPROTO) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Chip info: EC error %d\n", msg->result);
- } else if (ret < 0) {
+ if (ret < 0) {
count += scnprintf(buf + count, PAGE_SIZE - count,
- "Chip info: XFER ERROR %d\n", ret);
+ "Chip info: XFER / EC ERROR %d / %d\n",
+ ret, msg->result);
} else {
r_chip = (struct ec_response_get_chip_info *)msg->data;
@@ -190,12 +186,10 @@ static ssize_t version_show(struct device *dev,
msg->command = EC_CMD_GET_BOARD_VERSION + ec->cmd_offset;
msg->insize = sizeof(*r_board);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret == -EPROTO) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Board version: EC error %d\n", msg->result);
- } else if (ret < 0) {
+ if (ret < 0) {
count += scnprintf(buf + count, PAGE_SIZE - count,
- "Board version: XFER ERROR %d\n", ret);
+ "Board version: XFER / EC ERROR %d / %d\n",
+ ret, msg->result);
} else {
r_board = (struct ec_response_board_version *)msg->data;
@@ -326,7 +320,7 @@ static struct attribute *__ec_attrs[] = {
static umode_t cros_ec_ctrl_visible(struct kobject *kobj,
struct attribute *a, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
if (a == &dev_attr_kb_wake_angle.attr && !ec->has_kb_wake_angle)
diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h
index e9fb05f89ef0..f744b21bc655 100644
--- a/drivers/platform/chrome/cros_ec_trace.h
+++ b/drivers/platform/chrome/cros_ec_trace.h
@@ -23,14 +23,22 @@ TRACE_EVENT(cros_ec_request_start,
TP_ARGS(cmd),
TP_STRUCT__entry(
__field(uint32_t, version)
+ __field(uint32_t, offset)
__field(uint32_t, command)
+ __field(uint32_t, outsize)
+ __field(uint32_t, insize)
),
TP_fast_assign(
__entry->version = cmd->version;
- __entry->command = cmd->command;
+ __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(1);
+ __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(1);
+ __entry->outsize = cmd->outsize;
+ __entry->insize = cmd->insize;
),
- TP_printk("version: %u, command: %s", __entry->version,
- __print_symbolic(__entry->command, EC_CMDS))
+ TP_printk("version: %u, offset: %d, command: %s, outsize: %u, insize: %u",
+ __entry->version, __entry->offset,
+ __print_symbolic(__entry->command, EC_CMDS),
+ __entry->outsize, __entry->insize)
);
TRACE_EVENT(cros_ec_request_done,
@@ -38,19 +46,26 @@ TRACE_EVENT(cros_ec_request_done,
TP_ARGS(cmd, retval),
TP_STRUCT__entry(
__field(uint32_t, version)
+ __field(uint32_t, offset)
__field(uint32_t, command)
+ __field(uint32_t, outsize)
+ __field(uint32_t, insize)
__field(uint32_t, result)
__field(int, retval)
),
TP_fast_assign(
__entry->version = cmd->version;
- __entry->command = cmd->command;
+ __entry->offset = cmd->command / EC_CMD_PASSTHRU_OFFSET(1);
+ __entry->command = cmd->command % EC_CMD_PASSTHRU_OFFSET(1);
+ __entry->outsize = cmd->outsize;
+ __entry->insize = cmd->insize;
__entry->result = cmd->result;
__entry->retval = retval;
),
- TP_printk("version: %u, command: %s, ec result: %s, retval: %d",
- __entry->version,
+ TP_printk("version: %u, offset: %d, command: %s, outsize: %u, insize: %u, ec result: %s, retval: %u",
+ __entry->version, __entry->offset,
__print_symbolic(__entry->command, EC_CMDS),
+ __entry->outsize, __entry->insize,
__print_symbolic(__entry->result, EC_RESULT),
__entry->retval)
);
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 3fcd27ec9ad8..31be31161350 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -13,6 +13,7 @@
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
+#include <linux/usb/pd.h>
#include <linux/usb/typec.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
@@ -496,6 +497,34 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
return typec_mux_set(port->mux, &port->state);
}
+static int cros_typec_enable_usb4(struct cros_typec_data *typec,
+ int port_num,
+ struct ec_response_usb_pd_control_v2 *pd_ctrl)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct enter_usb_data data;
+
+ data.eudo = EUDO_USB_MODE_USB4 << EUDO_USB_MODE_SHIFT;
+
+ /* Cable Speed */
+ data.eudo |= pd_ctrl->cable_speed << EUDO_CABLE_SPEED_SHIFT;
+
+ /* Cable Type */
+ if (pd_ctrl->control_flags & USB_PD_CTRL_OPTICAL_CABLE)
+ data.eudo |= EUDO_CABLE_TYPE_OPTICAL << EUDO_CABLE_TYPE_SHIFT;
+ else if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_CABLE)
+ data.eudo |= EUDO_CABLE_TYPE_RE_TIMER << EUDO_CABLE_TYPE_SHIFT;
+
+ data.active_link_training = !!(pd_ctrl->control_flags &
+ USB_PD_CTRL_ACTIVE_LINK_UNIDIR);
+
+ port->state.alt = NULL;
+ port->state.data = &data;
+ port->state.mode = TYPEC_MODE_USB4;
+
+ return typec_mux_set(port->mux, &port->state);
+}
+
static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
uint8_t mux_flags,
struct ec_response_usb_pd_control_v2 *pd_ctrl)
@@ -516,7 +545,15 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
if (ret)
return ret;
- if (mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
+ ret = usb_role_switch_set_role(typec->ports[port_num]->role_sw,
+ pd_ctrl->role & PD_CTRL_RESP_ROLE_DATA
+ ? USB_ROLE_HOST : USB_ROLE_DEVICE);
+ if (ret)
+ return ret;
+
+ if (mux_flags & USB_PD_MUX_USB4_ENABLED) {
+ ret = cros_typec_enable_usb4(typec, port_num, pd_ctrl);
+ } else if (mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
ret = cros_typec_enable_tbt(typec, port_num, pd_ctrl);
} else if (mux_flags & USB_PD_MUX_DP_ENABLED) {
ret = cros_typec_enable_dp(typec, port_num, pd_ctrl);
@@ -590,8 +627,7 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
if (ret)
dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
- return usb_role_switch_set_role(typec->ports[port_num]->role_sw,
- !!(resp.role & PD_CTRL_RESP_ROLE_DATA));
+ return ret;
}
static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 46482d12cffe..f3a70a312b43 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -17,7 +17,7 @@ static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *att, char *buf,
loff_t pos, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
struct cros_ec_device *ecdev = ec->ec_dev;
struct ec_params_vbnvcontext *params;
@@ -57,7 +57,7 @@ static ssize_t vboot_context_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
struct cros_ec_device *ecdev = ec->ec_dev;
struct ec_params_vbnvcontext *params;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1762f335bac9..ecd477964d11 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -81,6 +81,7 @@ enum hp_wmi_commandtype {
HPWMI_FEATURE2_QUERY = 0x0d,
HPWMI_WIRELESS2_QUERY = 0x1b,
HPWMI_POSTCODEERROR_QUERY = 0x2a,
+ HPWMI_THERMAL_POLICY_QUERY = 0x4c,
};
enum hp_wmi_command {
@@ -861,6 +862,26 @@ fail:
return err;
}
+static int thermal_policy_setup(struct platform_device *device)
+{
+ int err, tp;
+
+ tp = hp_wmi_read_int(HPWMI_THERMAL_POLICY_QUERY);
+ if (tp < 0)
+ return tp;
+
+ /*
+ * call thermal policy write command to ensure that the firmware correctly
+ * sets the OEM variables for the DPTF
+ */
+ err = hp_wmi_perform_query(HPWMI_THERMAL_POLICY_QUERY, HPWMI_WRITE, &tp,
+ sizeof(tp), 0);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int __init hp_wmi_bios_setup(struct platform_device *device)
{
/* clear detected rfkill devices */
@@ -872,6 +893,8 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
+ thermal_policy_setup(device);
+
return 0;
}
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 338ea5222555..3e5fe66333f1 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -118,6 +118,10 @@ static const struct pmc_bit_map spt_pfear_map[] = {
};
static const struct pmc_bit_map *ext_spt_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of spt_reg_map for
+ * a list of core SoCs using this.
+ */
spt_pfear_map,
NULL
};
@@ -154,6 +158,7 @@ static const struct pmc_reg_map spt_reg_map = {
.ltr_show_sts = spt_ltr_show_map,
.msr_sts = msr_map,
.slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
.ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
.regmap_length = SPT_PMC_MMIO_REG_LEN,
.ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A,
@@ -166,7 +171,6 @@ static const struct pmc_reg_map spt_reg_map = {
/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
static const struct pmc_bit_map cnp_pfear_map[] = {
- /* Reserved for Cannon Lake but valid for Comet Lake */
{"PMC", BIT(0)},
{"OPI-DMI", BIT(1)},
{"SPI/eSPI", BIT(2)},
@@ -192,10 +196,6 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"SDX", BIT(4)},
{"SPE", BIT(5)},
{"Fuse", BIT(6)},
- /*
- * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
- * Tiger Lake, Elkhart Lake and Jasper Lake.
- */
{"SBR8", BIT(7)},
{"CSME_FSC", BIT(0)},
@@ -239,10 +239,6 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"HDA_PGD4", BIT(2)},
{"HDA_PGD5", BIT(3)},
{"HDA_PGD6", BIT(4)},
- /*
- * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
- * Tiger Lake, ELkhart Lake and Jasper Lake.
- */
{"PSF6", BIT(5)},
{"PSF7", BIT(6)},
{"PSF8", BIT(7)},
@@ -250,12 +246,15 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
};
static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
cnp_pfear_map,
NULL
};
static const struct pmc_bit_map icl_pfear_map[] = {
- /* Ice Lake and Jasper Lake generation onwards only */
{"RES_65", BIT(0)},
{"RES_66", BIT(1)},
{"RES_67", BIT(2)},
@@ -268,13 +267,16 @@ static const struct pmc_bit_map icl_pfear_map[] = {
};
static const struct pmc_bit_map *ext_icl_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of icl_reg_map for
+ * a list of core SoCs using this.
+ */
cnp_pfear_map,
icl_pfear_map,
NULL
};
static const struct pmc_bit_map tgl_pfear_map[] = {
- /* Tiger Lake and Elkhart Lake generation onwards only */
{"PSF9", BIT(0)},
{"RES_66", BIT(1)},
{"RES_67", BIT(2)},
@@ -286,6 +288,10 @@ static const struct pmc_bit_map tgl_pfear_map[] = {
};
static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+ /*
+ * Check intel_pmc_core_ids[] users of tgl_reg_map for
+ * a list of core SoCs using this.
+ */
cnp_pfear_map,
tgl_pfear_map,
NULL
@@ -369,7 +375,10 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
{"ISH", CNP_PMC_LTR_ISH},
{"UFSX2", CNP_PMC_LTR_UFSX2},
{"EMMC", CNP_PMC_LTR_EMMC},
- /* Reserved for Cannon Lake but valid for Ice Lake */
+ /*
+ * Check intel_pmc_core_ids[] users of cnp_reg_map for
+ * a list of core SoCs using this.
+ */
{"WIGIG", ICL_PMC_LTR_WIGIG},
/* Below two cannot be used for LTR_IGNORE */
{"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
@@ -380,6 +389,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
static const struct pmc_reg_map cnp_reg_map = {
.pfear_sts = ext_cnp_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = SPT_PMC_SLP_S0_RES_COUNTER_STEP,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
.msr_sts = msr_map,
@@ -396,6 +406,7 @@ static const struct pmc_reg_map cnp_reg_map = {
static const struct pmc_reg_map icl_reg_map = {
.pfear_sts = ext_icl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = ICL_PMC_SLP_S0_RES_COUNTER_STEP,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
.ltr_show_sts = cnp_ltr_show_map,
.msr_sts = msr_map,
@@ -409,7 +420,7 @@ static const struct pmc_reg_map icl_reg_map = {
.ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
};
-static const struct pmc_bit_map tgl_lpm0_map[] = {
+static const struct pmc_bit_map tgl_clocksource_status_map[] = {
{"USB2PLL_OFF_STS", BIT(18)},
{"PCIe/USB3.1_Gen2PLL_OFF_STS", BIT(19)},
{"PCIe_Gen3PLL_OFF_STS", BIT(20)},
@@ -425,35 +436,35 @@ static const struct pmc_bit_map tgl_lpm0_map[] = {
{}
};
-static const struct pmc_bit_map tgl_lpm1_map[] = {
- {"SPI_PG_STS", BIT(2)},
- {"xHCI_PG_STS", BIT(3)},
- {"PCIe_Ctrller_A_PG_STS", BIT(4)},
- {"PCIe_Ctrller_B_PG_STS", BIT(5)},
- {"PCIe_Ctrller_C_PG_STS", BIT(6)},
- {"GBE_PG_STS", BIT(7)},
- {"SATA_PG_STS", BIT(8)},
- {"HDA0_PG_STS", BIT(9)},
- {"HDA1_PG_STS", BIT(10)},
- {"HDA2_PG_STS", BIT(11)},
- {"HDA3_PG_STS", BIT(12)},
- {"PCIe_Ctrller_D_PG_STS", BIT(13)},
- {"ISIO_PG_STS", BIT(14)},
- {"SMB_PG_STS", BIT(16)},
- {"ISH_PG_STS", BIT(17)},
- {"ITH_PG_STS", BIT(19)},
- {"SDX_PG_STS", BIT(20)},
- {"xDCI_PG_STS", BIT(25)},
- {"DCI_PG_STS", BIT(26)},
- {"CSME0_PG_STS", BIT(27)},
- {"CSME_KVM_PG_STS", BIT(28)},
- {"CSME1_PG_STS", BIT(29)},
- {"CSME_CLINK_PG_STS", BIT(30)},
- {"CSME2_PG_STS", BIT(31)},
+static const struct pmc_bit_map tgl_power_gating_status_map[] = {
+ {"CSME_PG_STS", BIT(0)},
+ {"SATA_PG_STS", BIT(1)},
+ {"xHCI_PG_STS", BIT(2)},
+ {"UFSX2_PG_STS", BIT(3)},
+ {"OTG_PG_STS", BIT(5)},
+ {"SPA_PG_STS", BIT(6)},
+ {"SPB_PG_STS", BIT(7)},
+ {"SPC_PG_STS", BIT(8)},
+ {"SPD_PG_STS", BIT(9)},
+ {"SPE_PG_STS", BIT(10)},
+ {"SPF_PG_STS", BIT(11)},
+ {"LSX_PG_STS", BIT(13)},
+ {"P2SB_PG_STS", BIT(14)},
+ {"PSF_PG_STS", BIT(15)},
+ {"SBR_PG_STS", BIT(16)},
+ {"OPIDMI_PG_STS", BIT(17)},
+ {"THC0_PG_STS", BIT(18)},
+ {"THC1_PG_STS", BIT(19)},
+ {"GBETSN_PG_STS", BIT(20)},
+ {"GBE_PG_STS", BIT(21)},
+ {"LPSS_PG_STS", BIT(22)},
+ {"MMP_UFSX2_PG_STS", BIT(23)},
+ {"MMP_UFSX2B_PG_STS", BIT(24)},
+ {"FIA_PG_STS", BIT(25)},
{}
};
-static const struct pmc_bit_map tgl_lpm2_map[] = {
+static const struct pmc_bit_map tgl_d3_status_map[] = {
{"ADSP_D3_STS", BIT(0)},
{"SATA_D3_STS", BIT(1)},
{"xHCI0_D3_STS", BIT(2)},
@@ -468,7 +479,7 @@ static const struct pmc_bit_map tgl_lpm2_map[] = {
{}
};
-static const struct pmc_bit_map tgl_lpm3_map[] = {
+static const struct pmc_bit_map tgl_vnn_req_status_map[] = {
{"GPIO_COM0_VNN_REQ_STS", BIT(1)},
{"GPIO_COM1_VNN_REQ_STS", BIT(2)},
{"GPIO_COM2_VNN_REQ_STS", BIT(3)},
@@ -493,7 +504,7 @@ static const struct pmc_bit_map tgl_lpm3_map[] = {
{}
};
-static const struct pmc_bit_map tgl_lpm4_map[] = {
+static const struct pmc_bit_map tgl_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS_0", BIT(0)},
{"PCIe_LPM_En_REQ_STS_3", BIT(3)},
{"ITH_REQ_STS_5", BIT(5)},
@@ -509,7 +520,7 @@ static const struct pmc_bit_map tgl_lpm4_map[] = {
{}
};
-static const struct pmc_bit_map tgl_lpm5_map[] = {
+static const struct pmc_bit_map tgl_signal_status_map[] = {
{"LSX_Wake0_En_STS", BIT(0)},
{"LSX_Wake0_Pol_STS", BIT(1)},
{"LSX_Wake1_En_STS", BIT(2)},
@@ -546,18 +557,19 @@ static const struct pmc_bit_map tgl_lpm5_map[] = {
};
static const struct pmc_bit_map *tgl_lpm_maps[] = {
- tgl_lpm0_map,
- tgl_lpm1_map,
- tgl_lpm2_map,
- tgl_lpm3_map,
- tgl_lpm4_map,
- tgl_lpm5_map,
+ tgl_clocksource_status_map,
+ tgl_power_gating_status_map,
+ tgl_d3_status_map,
+ tgl_vnn_req_status_map,
+ tgl_vnn_misc_status_map,
+ tgl_signal_status_map,
NULL
};
static const struct pmc_reg_map tgl_reg_map = {
.pfear_sts = ext_tgl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
.ltr_show_sts = cnp_ltr_show_map,
.msr_sts = msr_map,
.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
@@ -586,9 +598,9 @@ static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
writel(val, pmcdev->regbase + reg_offset);
}
-static inline u64 pmc_core_adjust_slp_s0_step(u32 value)
+static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
{
- return (u64)value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
+ return (u64)value * pmcdev->map->slp_s0_res_counter_step;
}
static int pmc_core_dev_state_get(void *data, u64 *val)
@@ -598,7 +610,7 @@ static int pmc_core_dev_state_get(void *data, u64 *val)
u32 value;
value = pmc_core_reg_read(pmcdev, map->slp_s0_offset);
- *val = pmc_core_adjust_slp_s0_step(value);
+ *val = pmc_core_adjust_slp_s0_step(pmcdev, value);
return 0;
}
@@ -628,7 +640,7 @@ static void pmc_core_slps0_display(struct pmc_dev *pmcdev, struct device *dev,
offset += 4;
while (map->name) {
if (dev)
- dev_dbg(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
+ dev_info(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
map->name,
data & map->bit_mask ? "Yes" : "No");
if (s)
@@ -671,7 +683,7 @@ static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev,
for (idx = 0; idx < arr_size; idx++) {
if (dev)
- dev_dbg(dev, "\nLPM_%s_%d:\t0x%x\n", str, idx,
+ dev_info(dev, "\nLPM_%s_%d:\t0x%x\n", str, idx,
lpm_regs[idx]);
if (s)
seq_printf(s, "\nLPM_%s_%d:\t0x%x\n", str, idx,
@@ -679,7 +691,7 @@ static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev,
for (index = 0; maps[idx][index].name && index < len; index++) {
bit_mask = maps[idx][index].bit_mask;
if (dev)
- dev_dbg(dev, "%-30s %-30d\n",
+ dev_info(dev, "%-30s %-30d\n",
maps[idx][index].name,
lpm_regs[idx] & bit_mask ? 1 : 0);
if (s)
@@ -1147,6 +1159,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_reg_map),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &tgl_reg_map),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map),
+ X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map),
{}
};
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index 5eae55d80226..f33cd2c34835 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -30,7 +30,7 @@
#define SPT_PMC_MPHY_CORE_STS_1 0x1142
#define SPT_PMC_MPHY_COM_STS_0 0x1155
#define SPT_PMC_MMIO_REG_LEN 0x1000
-#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64
+#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x68
#define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1)
#define MTPMC_MASK 0xffff0000
#define PPFEAR_MAX_NUM_ENTRIES 12
@@ -185,8 +185,10 @@ enum ppfear_regs {
#define ICL_PPFEAR_NUM_ENTRIES 9
#define ICL_NUM_IP_IGN_ALLOWED 20
#define ICL_PMC_LTR_WIGIG 0x1BFC
+#define ICL_PMC_SLP_S0_RES_COUNTER_STEP 0x64
#define TGL_NUM_IP_IGN_ALLOWED 22
+#define TGL_PMC_SLP_S0_RES_COUNTER_STEP 0x7A
/*
* Tigerlake Power Management Controller register offsets
@@ -245,6 +247,7 @@ struct pmc_reg_map {
const struct pmc_bit_map *msr_sts;
const struct pmc_bit_map **lpm_sts;
const u32 slp_s0_offset;
+ const int slp_s0_res_counter_step;
const u32 ltr_ignore_offset;
const int regmap_length;
const u32 ppfear0_offset;
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 1506ec0a4777..986ad3dda1c1 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -328,15 +328,6 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
},
};
-static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = {
- {
- I2C_BOARD_INFO("24c32", 0x51),
- },
- {
- I2C_BOARD_INFO("24c32", 0x50),
- },
-};
-
static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
{
I2C_BOARD_INFO("dps460", 0x59),
@@ -770,15 +761,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
.label = "psu1",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(0),
- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0],
- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "psu2",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(1),
- .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1],
- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
@@ -1950,6 +1939,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
static struct mlxreg_core_platform_data mlxplat_default_fan_data = {
.data = mlxplat_mlxcpld_default_fan_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_data),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
};
/* Watchdog type1: hardware implementation version1
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index eae3579f106f..e3810675090a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1913,6 +1913,10 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_CALCULATOR,
TP_ACPI_HOTKEYSCAN_BLUETOOTH,
TP_ACPI_HOTKEYSCAN_KEYBOARD,
+ TP_ACPI_HOTKEYSCAN_FN_RIGHT_SHIFT, /* Used by "Lenovo Quick Clean" */
+ TP_ACPI_HOTKEYSCAN_NOTIFICATION_CENTER,
+ TP_ACPI_HOTKEYSCAN_PICKUP_PHONE,
+ TP_ACPI_HOTKEYSCAN_HANGUP_PHONE,
/* Hotkey keymap size */
TPACPI_HOTKEY_MAP_LEN
@@ -3429,11 +3433,15 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN,
- KEY_BOOKMARKS, /* Favorite app, 0x311 */
- KEY_RESERVED, /* Clipping tool */
- KEY_CALC, /* Calculator (above numpad, P52) */
- KEY_BLUETOOTH, /* Bluetooth */
- KEY_KEYBOARD /* Keyboard, 0x315 */
+ KEY_BOOKMARKS, /* Favorite app, 0x311 */
+ KEY_SELECTIVE_SCREENSHOT, /* Clipping tool */
+ KEY_CALC, /* Calculator (above numpad, P52) */
+ KEY_BLUETOOTH, /* Bluetooth */
+ KEY_KEYBOARD, /* Keyboard, 0x315 */
+ KEY_FN_RIGHT_SHIFT, /* Fn + right Shift */
+ KEY_NOTIFICATION_CENTER, /* Notification Center */
+ KEY_PICKUP_PHONE, /* Answer incoming call */
+ KEY_HANGUP_PHONE, /* Decline incoming call */
},
};
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 3bf18d718975..a50ab002e9e4 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -51,7 +51,7 @@ static void pnp_remove_protocol(struct pnp_protocol *protocol)
}
/**
- * pnp_protocol_register - adds a pnp protocol to the pnp layer
+ * pnp_register_protocol - adds a pnp protocol to the pnp layer
* @protocol: pointer to the corresponding pnp_protocol structure
*
* Ex protocols: ISAPNP, PNPBIOS, etc
@@ -91,7 +91,7 @@ int pnp_register_protocol(struct pnp_protocol *protocol)
}
/**
- * pnp_protocol_unregister - removes a pnp protocol from the pnp layer
+ * pnp_unregister_protocol - removes a pnp protocol from the pnp layer
* @protocol: pointer to the corresponding pnp_protocol structure
*/
void pnp_unregister_protocol(struct pnp_protocol *protocol)
diff --git a/drivers/pnp/isapnp/compat.c b/drivers/pnp/isapnp/compat.c
index 6c845b628316..035e95092489 100644
--- a/drivers/pnp/isapnp/compat.c
+++ b/drivers/pnp/isapnp/compat.c
@@ -21,28 +21,6 @@ static void pnp_convert_id(char *buf, unsigned short vendor,
(device >> 12) & 0x0f, (device >> 8) & 0x0f);
}
-struct pnp_card *pnp_find_card(unsigned short vendor, unsigned short device,
- struct pnp_card *from)
-{
- char id[8];
- char any[8];
- struct list_head *list;
-
- pnp_convert_id(id, vendor, device);
- pnp_convert_id(any, ISAPNP_ANY_ID, ISAPNP_ANY_ID);
-
- list = from ? from->global_list.next : pnp_cards.next;
-
- while (list != &pnp_cards) {
- struct pnp_card *card = global_to_pnp_card(list);
-
- if (compare_pnp_id(card->id, id) || (memcmp(id, any, 7) == 0))
- return card;
- list = list->next;
- }
- return NULL;
-}
-
struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor,
unsigned short function, struct pnp_dev *from)
{
@@ -86,5 +64,4 @@ struct pnp_dev *pnp_find_dev(struct pnp_card *card, unsigned short vendor,
return NULL;
}
-EXPORT_SYMBOL(pnp_find_card);
EXPORT_SYMBOL(pnp_find_dev);
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index de99f371d362..ac98b9919029 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -226,8 +226,6 @@ static void quirk_ad1815_mpu_resources(struct pnp_dev *dev)
dev_info(&dev->dev, "made independent IRQ optional\n");
}
-#include <linux/pci.h>
-
static void quirk_system_pci_resources(struct pnp_dev *dev)
{
struct pci_dev *pdev = NULL;
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index ff0350ca3b74..696bf77a7042 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-source "drivers/power/avs/Kconfig"
source "drivers/power/reset/Kconfig"
source "drivers/power/supply/Kconfig"
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b7c2e372186b..effbf0377f32 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_POWER_RESET) += reset/
obj-$(CONFIG_POWER_SUPPLY) += supply/
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
deleted file mode 100644
index cdb4237bfd02..000000000000
--- a/drivers/power/avs/Kconfig
+++ /dev/null
@@ -1,37 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menuconfig POWER_AVS
- bool "Adaptive Voltage Scaling class support"
- help
- AVS is a power management technique which finely controls the
- operating voltage of a device in order to optimize (i.e. reduce)
- its power consumption.
- At a given operating point the voltage is adapted depending on
- static factors (chip manufacturing process) and dynamic factors
- (temperature depending performance).
- AVS is also called SmartReflex on OMAP devices.
-
- Say Y here to enable Adaptive Voltage Scaling class support.
-
-config QCOM_CPR
- tristate "QCOM Core Power Reduction (CPR) support"
- depends on POWER_AVS && HAS_IOMEM
- select PM_OPP
- select REGMAP
- help
- Say Y here to enable support for the CPR hardware found on Qualcomm
- SoCs like QCS404.
-
- This driver populates CPU OPPs tables and makes adjustments to the
- tables based on feedback from the CPR hardware. If you want to do
- CPUfrequency scaling say Y here.
-
- To compile this driver as a module, choose M here: the module will
- be called qcom-cpr
-
-config ROCKCHIP_IODOMAIN
- tristate "Rockchip IO domain support"
- depends on POWER_AVS && ARCH_ROCKCHIP && OF
- help
- Say y here to enable support io domains on Rockchip SoCs. It is
- necessary for the io domain setting of the SoC to match the
- voltage supplied by the regulators.
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
deleted file mode 100644
index 9007d05853e2..000000000000
--- a/drivers/power/avs/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
-obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o
-obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 0a1fb5c74f83..d55b3727e00e 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -129,10 +129,10 @@ config POWER_RESET_QCOM_PON
config POWER_RESET_OCELOT_RESET
bool "Microsemi Ocelot reset driver"
- depends on MSCC_OCELOT || COMPILE_TEST
+ depends on MSCC_OCELOT || ARCH_SPARX5 || COMPILE_TEST
select MFD_SYSCON
help
- This driver supports restart for Microsemi Ocelot SoC.
+ This driver supports restart for Microsemi Ocelot SoC and similar.
config POWER_RESET_OXNAS
bool "OXNAS SoC restart driver"
diff --git a/drivers/power/reset/ocelot-reset.c b/drivers/power/reset/ocelot-reset.c
index 419952c61fd0..f74e1dbb4ba3 100644
--- a/drivers/power/reset/ocelot-reset.c
+++ b/drivers/power/reset/ocelot-reset.c
@@ -15,15 +15,20 @@
#include <linux/reboot.h>
#include <linux/regmap.h>
+struct reset_props {
+ const char *syscon;
+ u32 protect_reg;
+ u32 vcore_protect;
+ u32 if_si_owner_bit;
+};
+
struct ocelot_reset_context {
void __iomem *base;
struct regmap *cpu_ctrl;
+ const struct reset_props *props;
struct notifier_block restart_handler;
};
-#define ICPU_CFG_CPU_SYSTEM_CTRL_RESET 0x20
-#define CORE_RST_PROTECT BIT(2)
-
#define SOFT_CHIP_RST BIT(0)
#define ICPU_CFG_CPU_SYSTEM_CTRL_GENERAL_CTRL 0x24
@@ -31,7 +36,6 @@ struct ocelot_reset_context {
#define IF_SI_OWNER_SISL 0
#define IF_SI_OWNER_SIBM 1
#define IF_SI_OWNER_SIMC 2
-#define IF_SI_OWNER_OFFSET 4
static int ocelot_restart_handle(struct notifier_block *this,
unsigned long mode, void *cmd)
@@ -39,15 +43,18 @@ static int ocelot_restart_handle(struct notifier_block *this,
struct ocelot_reset_context *ctx = container_of(this, struct
ocelot_reset_context,
restart_handler);
+ u32 if_si_owner_bit = ctx->props->if_si_owner_bit;
/* Make sure the core is not protected from reset */
- regmap_update_bits(ctx->cpu_ctrl, ICPU_CFG_CPU_SYSTEM_CTRL_RESET,
- CORE_RST_PROTECT, 0);
+ regmap_update_bits(ctx->cpu_ctrl, ctx->props->protect_reg,
+ ctx->props->vcore_protect, 0);
/* Make the SI back to boot mode */
regmap_update_bits(ctx->cpu_ctrl, ICPU_CFG_CPU_SYSTEM_CTRL_GENERAL_CTRL,
- IF_SI_OWNER_MASK << IF_SI_OWNER_OFFSET,
- IF_SI_OWNER_SIBM << IF_SI_OWNER_OFFSET);
+ IF_SI_OWNER_MASK << if_si_owner_bit,
+ IF_SI_OWNER_SIBM << if_si_owner_bit);
+
+ pr_emerg("Resetting SoC\n");
writel(SOFT_CHIP_RST, ctx->base);
@@ -72,9 +79,13 @@ static int ocelot_reset_probe(struct platform_device *pdev)
if (IS_ERR(ctx->base))
return PTR_ERR(ctx->base);
- ctx->cpu_ctrl = syscon_regmap_lookup_by_compatible("mscc,ocelot-cpu-syscon");
- if (IS_ERR(ctx->cpu_ctrl))
+ ctx->props = device_get_match_data(dev);
+
+ ctx->cpu_ctrl = syscon_regmap_lookup_by_compatible(ctx->props->syscon);
+ if (IS_ERR(ctx->cpu_ctrl)) {
+ dev_err(dev, "No syscon map: %s\n", ctx->props->syscon);
return PTR_ERR(ctx->cpu_ctrl);
+ }
ctx->restart_handler.notifier_call = ocelot_restart_handle;
ctx->restart_handler.priority = 192;
@@ -85,9 +96,29 @@ static int ocelot_reset_probe(struct platform_device *pdev)
return err;
}
+static const struct reset_props reset_props_ocelot = {
+ .syscon = "mscc,ocelot-cpu-syscon",
+ .protect_reg = 0x20,
+ .vcore_protect = BIT(2),
+ .if_si_owner_bit = 4,
+};
+
+static const struct reset_props reset_props_sparx5 = {
+ .syscon = "microchip,sparx5-cpu-syscon",
+ .protect_reg = 0x84,
+ .vcore_protect = BIT(10),
+ .if_si_owner_bit = 6,
+};
+
static const struct of_device_id ocelot_reset_of_match[] = {
- { .compatible = "mscc,ocelot-chip-reset" },
- {}
+ {
+ .compatible = "mscc,ocelot-chip-reset",
+ .data = &reset_props_ocelot
+ }, {
+ .compatible = "microchip,sparx5-chip-reset",
+ .data = &reset_props_sparx5
+ },
+ { /*sentinel*/ }
};
static struct platform_driver ocelot_reset_driver = {
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index faf2830aa152..eec646c568b7 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -164,7 +164,7 @@ config BATTERY_DS2782
config BATTERY_LEGO_EV3
tristate "LEGO MINDSTORMS EV3 battery"
- depends on OF && IIO && GPIOLIB
+ depends on OF && IIO && GPIOLIB && (ARCH_DAVINCI_DA850 || COMPILE_TEST)
help
Say Y here to enable support for the LEGO MINDSTORMS EV3 battery.
@@ -367,10 +367,15 @@ config AXP288_FUEL_GAUGE
config BATTERY_MAX17040
tristate "Maxim MAX17040 Fuel Gauge"
depends on I2C
+ select REGMAP_I2C
help
- MAX17040 is fuel-gauge systems for lithium-ion (Li+) batteries
- in handheld and portable equipment. The MAX17040 is configured
- to operate with a single lithium cell
+ Maxim models with ModelGauge are fuel-gauge systems for lithium-ion
+ (Li+) batteries in handheld and portable equipment, including
+ max17040, max17041, max17043, max17044, max17048, max17049, max17058,
+ max17059. It is also included in some batteries like max77836.
+
+ Driver supports reporting SOC (State of Charge, i.e capacity),
+ voltage and configurable low-SOC wakeup interrupt.
config BATTERY_MAX17042
tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
@@ -631,13 +636,22 @@ config CHARGER_BQ25890
help
Say Y to enable support for the TI BQ25890 battery charger.
+config CHARGER_BQ25980
+ tristate "TI BQ25980 battery charger driver"
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
+ select REGMAP_I2C
+ help
+ Say Y to enable support for the TI BQ25980, BQ25975 and BQ25960
+ series of fast battery chargers.
+
config CHARGER_SMB347
- tristate "Summit Microelectronics SMB347 Battery Charger"
+ tristate "Summit Microelectronics SMB3XX Battery Charger"
depends on I2C
select REGMAP_I2C
help
- Say Y to include support for Summit Microelectronics SMB347
- Battery Charger.
+ Say Y to include support for Summit Microelectronics SMB345,
+ SMB347 or SMB358 Battery Charger.
config CHARGER_TPS65090
tristate "TPS65090 battery charger driver"
@@ -752,4 +766,12 @@ config CHARGER_WILCO
information can be found in
Documentation/ABI/testing/sysfs-class-power-wilco
+config RN5T618_POWER
+ tristate "RN5T618 charger/fuel gauge support"
+ depends on MFD_RN5T618
+ help
+ Say Y here to have support for RN5T618 PMIC family fuel gauge and charger.
+ This driver can also be built as a module. If so, the module will be
+ called rn5t618_power.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index b3c694a65114..dd4b86318cd9 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_CHARGER_BQ24257) += bq24257_charger.o
obj-$(CONFIG_CHARGER_BQ24735) += bq24735-charger.o
obj-$(CONFIG_CHARGER_BQ2515X) += bq2515x_charger.o
obj-$(CONFIG_CHARGER_BQ25890) += bq25890_charger.o
+obj-$(CONFIG_CHARGER_BQ25980) += bq25980_charger.o
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
@@ -96,3 +97,4 @@ obj-$(CONFIG_CHARGER_UCS1002) += ucs1002_power.o
obj-$(CONFIG_CHARGER_BD70528) += bd70528-charger.o
obj-$(CONFIG_CHARGER_BD99954) += bd99954-charger.o
obj-$(CONFIG_CHARGER_WILCO) += wilco-charger.o
+obj-$(CONFIG_RN5T618_POWER) += rn5t618_power.o
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 7eec415c82a3..592a73d4dde6 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -653,7 +653,7 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
/*
* negative value for Discharging
- * convert 2's compliment into decimal
+ * convert 2's complement into decimal
*/
if (high & 0x10)
val = (low | (high << 8) | 0xFFFFE000);
@@ -781,7 +781,7 @@ static void ab8500_fg_acc_cur_work(struct work_struct *work)
if (ret < 0)
goto exit;
- /* Check for sign bit in case of negative value, 2's compliment */
+ /* Check for sign bit in case of negative value, 2's complement */
if (high & 0x10)
val = (low | (med << 8) | (high << 16) | 0xFFE00000);
else
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index 8e60cb0f3c3f..96cb3290bcaa 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -1152,6 +1152,7 @@ static const struct of_device_id bq24257_of_match[] = {
};
MODULE_DEVICE_TABLE(of, bq24257_of_match);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id bq24257_acpi_match[] = {
{ "BQ242500", BQ24250 },
{ "BQ242510", BQ24251 },
@@ -1159,6 +1160,7 @@ static const struct acpi_device_id bq24257_acpi_match[] = {
{},
};
MODULE_DEVICE_TABLE(acpi, bq24257_acpi_match);
+#endif
static struct i2c_driver bq24257_driver = {
.driver = {
diff --git a/drivers/power/supply/bq2515x_charger.c b/drivers/power/supply/bq2515x_charger.c
index 36b0c8c98d40..374b112f712a 100644
--- a/drivers/power/supply/bq2515x_charger.c
+++ b/drivers/power/supply/bq2515x_charger.c
@@ -168,7 +168,7 @@ enum bq2515x_id {
* @device_id: value of device_id
* @mains_online: boolean value indicating power supply online
*
- * @bq2515x_init_data init_data: charger initialization data structure
+ * @init_data: charger initialization data structure
*/
struct bq2515x_device {
struct power_supply *mains;
@@ -188,7 +188,7 @@ struct bq2515x_device {
struct bq2515x_init_data init_data;
};
-static struct reg_default bq25150_reg_defaults[] = {
+static const struct reg_default bq25150_reg_defaults[] = {
{BQ2515X_FLAG0, 0x0},
{BQ2515X_FLAG1, 0x0},
{BQ2515X_FLAG2, 0x0},
@@ -227,7 +227,7 @@ static struct reg_default bq25150_reg_defaults[] = {
{BQ2515X_DEVICE_ID, 0x20},
};
-static struct reg_default bq25155_reg_defaults[] = {
+static const struct reg_default bq25155_reg_defaults[] = {
{BQ2515X_FLAG0, 0x0},
{BQ2515X_FLAG1, 0x0},
{BQ2515X_FLAG2, 0x0},
@@ -886,14 +886,14 @@ static int bq2515x_battery_get_property(struct power_supply *psy,
return 0;
}
-static enum power_supply_property bq2515x_battery_properties[] = {
+static const enum power_supply_property bq2515x_battery_properties[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
};
-static enum power_supply_property bq2515x_mains_properties[] = {
+static const enum power_supply_property bq2515x_mains_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_HEALTH,
@@ -905,7 +905,7 @@ static enum power_supply_property bq2515x_mains_properties[] = {
POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
};
-static struct power_supply_desc bq2515x_mains_desc = {
+static const struct power_supply_desc bq2515x_mains_desc = {
.name = "bq2515x-mains",
.type = POWER_SUPPLY_TYPE_MAINS,
.get_property = bq2515x_mains_get_property,
@@ -915,7 +915,7 @@ static struct power_supply_desc bq2515x_mains_desc = {
.property_is_writeable = bq2515x_power_supply_property_is_writeable,
};
-static struct power_supply_desc bq2515x_battery_desc = {
+static const struct power_supply_desc bq2515x_battery_desc = {
.name = "bq2515x-battery",
.type = POWER_SUPPLY_TYPE_BATTERY,
.get_property = bq2515x_battery_get_property,
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 77150667e36b..34c21c51bac1 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -83,6 +83,8 @@ struct bq25890_init_data {
u8 boostf; /* boost frequency */
u8 ilim_en; /* enable ILIM pin */
u8 treg; /* thermal regulation threshold */
+ u8 rbatcomp; /* IBAT sense resistor value */
+ u8 vclamp; /* IBAT compensation voltage limit */
};
struct bq25890_state {
@@ -258,6 +260,8 @@ enum bq25890_table_ids {
TBL_VREG,
TBL_BOOSTV,
TBL_SYSVMIN,
+ TBL_VBATCOMP,
+ TBL_RBATCOMP,
/* lookup tables */
TBL_TREG,
@@ -299,6 +303,8 @@ static const union {
[TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */
[TBL_BOOSTV] = { .rt = {4550000, 5510000, 64000} }, /* uV */
[TBL_SYSVMIN] = { .rt = {3000000, 3700000, 100000} }, /* uV */
+ [TBL_VBATCOMP] ={ .rt = {0, 224000, 32000} }, /* uV */
+ [TBL_RBATCOMP] ={ .rt = {0, 140000, 20000} }, /* uOhm */
/* lookup tables */
[TBL_TREG] = { .lt = {bq25890_treg_tbl, BQ25890_TREG_TBL_SIZE} },
@@ -648,7 +654,9 @@ static int bq25890_hw_init(struct bq25890_device *bq)
{F_BOOSTI, bq->init_data.boosti},
{F_BOOSTF, bq->init_data.boostf},
{F_EN_ILIM, bq->init_data.ilim_en},
- {F_TREG, bq->init_data.treg}
+ {F_TREG, bq->init_data.treg},
+ {F_BATCMP, bq->init_data.rbatcomp},
+ {F_VCLAMP, bq->init_data.vclamp},
};
ret = bq25890_chip_reset(bq);
@@ -859,11 +867,14 @@ static int bq25890_fw_read_u32_props(struct bq25890_device *bq)
{"ti,boost-max-current", false, TBL_BOOSTI, &init->boosti},
/* optional properties */
- {"ti,thermal-regulation-threshold", true, TBL_TREG, &init->treg}
+ {"ti,thermal-regulation-threshold", true, TBL_TREG, &init->treg},
+ {"ti,ibatcomp-micro-ohms", true, TBL_RBATCOMP, &init->rbatcomp},
+ {"ti,ibatcomp-clamp-microvolt", true, TBL_VBATCOMP, &init->vclamp},
};
/* initialize data for optional properties */
init->treg = 3; /* 120 degrees Celsius */
+ init->rbatcomp = init->vclamp = 0; /* IBAT compensation disabled */
for (i = 0; i < ARRAY_SIZE(props); i++) {
ret = device_property_read_u32(bq->dev, props[i].name,
@@ -1073,11 +1084,13 @@ static const struct of_device_id bq25890_of_match[] = {
};
MODULE_DEVICE_TABLE(of, bq25890_of_match);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id bq25890_acpi_match[] = {
{"BQ258900", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, bq25890_acpi_match);
+#endif
static struct i2c_driver bq25890_driver = {
.driver = {
diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
new file mode 100644
index 000000000000..c936f311eb4f
--- /dev/null
+++ b/drivers/power/supply/bq25980_charger.c
@@ -0,0 +1,1314 @@
+// SPDX-License-Identifier: GPL-2.0
+// BQ25980 Battery Charger Driver
+// Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+
+#include "bq25980_charger.h"
+
+struct bq25980_state {
+ bool dischg;
+ bool ovp;
+ bool ocp;
+ bool wdt;
+ bool tflt;
+ bool online;
+ bool ce;
+ bool hiz;
+ bool bypass;
+
+ u32 vbat_adc;
+ u32 vsys_adc;
+ u32 ibat_adc;
+};
+
+enum bq25980_id {
+ BQ25980,
+ BQ25975,
+ BQ25960,
+};
+
+struct bq25980_chip_info {
+
+ int model_id;
+
+ const struct regmap_config *regmap_config;
+
+ int busocp_def;
+ int busocp_sc_max;
+ int busocp_byp_max;
+ int busocp_sc_min;
+ int busocp_byp_min;
+
+ int busovp_sc_def;
+ int busovp_byp_def;
+ int busovp_sc_step;
+
+ int busovp_sc_offset;
+ int busovp_byp_step;
+ int busovp_byp_offset;
+ int busovp_sc_min;
+ int busovp_sc_max;
+ int busovp_byp_min;
+ int busovp_byp_max;
+
+ int batovp_def;
+ int batovp_max;
+ int batovp_min;
+ int batovp_step;
+ int batovp_offset;
+
+ int batocp_def;
+ int batocp_max;
+};
+
+struct bq25980_init_data {
+ u32 ichg;
+ u32 bypass_ilim;
+ u32 sc_ilim;
+ u32 vreg;
+ u32 iterm;
+ u32 iprechg;
+ u32 bypass_vlim;
+ u32 sc_vlim;
+ u32 ichg_max;
+ u32 vreg_max;
+};
+
+struct bq25980_device {
+ struct i2c_client *client;
+ struct device *dev;
+ struct power_supply *charger;
+ struct power_supply *battery;
+ struct mutex lock;
+ struct regmap *regmap;
+
+ char model_name[I2C_NAME_SIZE];
+
+ struct bq25980_init_data init_data;
+ const struct bq25980_chip_info *chip_info;
+ struct bq25980_state state;
+ int watchdog_timer;
+};
+
+static struct reg_default bq25980_reg_defs[] = {
+ {BQ25980_BATOVP, 0x5A},
+ {BQ25980_BATOVP_ALM, 0x46},
+ {BQ25980_BATOCP, 0x51},
+ {BQ25980_BATOCP_ALM, 0x50},
+ {BQ25980_BATUCP_ALM, 0x28},
+ {BQ25980_CHRGR_CTRL_1, 0x0},
+ {BQ25980_BUSOVP, 0x26},
+ {BQ25980_BUSOVP_ALM, 0x22},
+ {BQ25980_BUSOCP, 0xD},
+ {BQ25980_BUSOCP_ALM, 0xC},
+ {BQ25980_TEMP_CONTROL, 0x30},
+ {BQ25980_TDIE_ALM, 0xC8},
+ {BQ25980_TSBUS_FLT, 0x15},
+ {BQ25980_TSBAT_FLG, 0x15},
+ {BQ25980_VAC_CONTROL, 0x0},
+ {BQ25980_CHRGR_CTRL_2, 0x0},
+ {BQ25980_CHRGR_CTRL_3, 0x20},
+ {BQ25980_CHRGR_CTRL_4, 0x1D},
+ {BQ25980_CHRGR_CTRL_5, 0x18},
+ {BQ25980_STAT1, 0x0},
+ {BQ25980_STAT2, 0x0},
+ {BQ25980_STAT3, 0x0},
+ {BQ25980_STAT4, 0x0},
+ {BQ25980_STAT5, 0x0},
+ {BQ25980_FLAG1, 0x0},
+ {BQ25980_FLAG2, 0x0},
+ {BQ25980_FLAG3, 0x0},
+ {BQ25980_FLAG4, 0x0},
+ {BQ25980_FLAG5, 0x0},
+ {BQ25980_MASK1, 0x0},
+ {BQ25980_MASK2, 0x0},
+ {BQ25980_MASK3, 0x0},
+ {BQ25980_MASK4, 0x0},
+ {BQ25980_MASK5, 0x0},
+ {BQ25980_DEVICE_INFO, 0x8},
+ {BQ25980_ADC_CONTROL1, 0x0},
+ {BQ25980_ADC_CONTROL2, 0x0},
+ {BQ25980_IBUS_ADC_LSB, 0x0},
+ {BQ25980_IBUS_ADC_MSB, 0x0},
+ {BQ25980_VBUS_ADC_LSB, 0x0},
+ {BQ25980_VBUS_ADC_MSB, 0x0},
+ {BQ25980_VAC1_ADC_LSB, 0x0},
+ {BQ25980_VAC2_ADC_LSB, 0x0},
+ {BQ25980_VOUT_ADC_LSB, 0x0},
+ {BQ25980_VBAT_ADC_LSB, 0x0},
+ {BQ25980_IBAT_ADC_MSB, 0x0},
+ {BQ25980_IBAT_ADC_LSB, 0x0},
+ {BQ25980_TSBUS_ADC_LSB, 0x0},
+ {BQ25980_TSBAT_ADC_LSB, 0x0},
+ {BQ25980_TDIE_ADC_LSB, 0x0},
+ {BQ25980_DEGLITCH_TIME, 0x0},
+ {BQ25980_CHRGR_CTRL_6, 0x0},
+};
+
+static struct reg_default bq25975_reg_defs[] = {
+ {BQ25980_BATOVP, 0x5A},
+ {BQ25980_BATOVP_ALM, 0x46},
+ {BQ25980_BATOCP, 0x51},
+ {BQ25980_BATOCP_ALM, 0x50},
+ {BQ25980_BATUCP_ALM, 0x28},
+ {BQ25980_CHRGR_CTRL_1, 0x0},
+ {BQ25980_BUSOVP, 0x26},
+ {BQ25980_BUSOVP_ALM, 0x22},
+ {BQ25980_BUSOCP, 0xD},
+ {BQ25980_BUSOCP_ALM, 0xC},
+ {BQ25980_TEMP_CONTROL, 0x30},
+ {BQ25980_TDIE_ALM, 0xC8},
+ {BQ25980_TSBUS_FLT, 0x15},
+ {BQ25980_TSBAT_FLG, 0x15},
+ {BQ25980_VAC_CONTROL, 0x0},
+ {BQ25980_CHRGR_CTRL_2, 0x0},
+ {BQ25980_CHRGR_CTRL_3, 0x20},
+ {BQ25980_CHRGR_CTRL_4, 0x1D},
+ {BQ25980_CHRGR_CTRL_5, 0x18},
+ {BQ25980_STAT1, 0x0},
+ {BQ25980_STAT2, 0x0},
+ {BQ25980_STAT3, 0x0},
+ {BQ25980_STAT4, 0x0},
+ {BQ25980_STAT5, 0x0},
+ {BQ25980_FLAG1, 0x0},
+ {BQ25980_FLAG2, 0x0},
+ {BQ25980_FLAG3, 0x0},
+ {BQ25980_FLAG4, 0x0},
+ {BQ25980_FLAG5, 0x0},
+ {BQ25980_MASK1, 0x0},
+ {BQ25980_MASK2, 0x0},
+ {BQ25980_MASK3, 0x0},
+ {BQ25980_MASK4, 0x0},
+ {BQ25980_MASK5, 0x0},
+ {BQ25980_DEVICE_INFO, 0x8},
+ {BQ25980_ADC_CONTROL1, 0x0},
+ {BQ25980_ADC_CONTROL2, 0x0},
+ {BQ25980_IBUS_ADC_LSB, 0x0},
+ {BQ25980_IBUS_ADC_MSB, 0x0},
+ {BQ25980_VBUS_ADC_LSB, 0x0},
+ {BQ25980_VBUS_ADC_MSB, 0x0},
+ {BQ25980_VAC1_ADC_LSB, 0x0},
+ {BQ25980_VAC2_ADC_LSB, 0x0},
+ {BQ25980_VOUT_ADC_LSB, 0x0},
+ {BQ25980_VBAT_ADC_LSB, 0x0},
+ {BQ25980_IBAT_ADC_MSB, 0x0},
+ {BQ25980_IBAT_ADC_LSB, 0x0},
+ {BQ25980_TSBUS_ADC_LSB, 0x0},
+ {BQ25980_TSBAT_ADC_LSB, 0x0},
+ {BQ25980_TDIE_ADC_LSB, 0x0},
+ {BQ25980_DEGLITCH_TIME, 0x0},
+ {BQ25980_CHRGR_CTRL_6, 0x0},
+};
+
+static struct reg_default bq25960_reg_defs[] = {
+ {BQ25980_BATOVP, 0x5A},
+ {BQ25980_BATOVP_ALM, 0x46},
+ {BQ25980_BATOCP, 0x51},
+ {BQ25980_BATOCP_ALM, 0x50},
+ {BQ25980_BATUCP_ALM, 0x28},
+ {BQ25980_CHRGR_CTRL_1, 0x0},
+ {BQ25980_BUSOVP, 0x26},
+ {BQ25980_BUSOVP_ALM, 0x22},
+ {BQ25980_BUSOCP, 0xD},
+ {BQ25980_BUSOCP_ALM, 0xC},
+ {BQ25980_TEMP_CONTROL, 0x30},
+ {BQ25980_TDIE_ALM, 0xC8},
+ {BQ25980_TSBUS_FLT, 0x15},
+ {BQ25980_TSBAT_FLG, 0x15},
+ {BQ25980_VAC_CONTROL, 0x0},
+ {BQ25980_CHRGR_CTRL_2, 0x0},
+ {BQ25980_CHRGR_CTRL_3, 0x20},
+ {BQ25980_CHRGR_CTRL_4, 0x1D},
+ {BQ25980_CHRGR_CTRL_5, 0x18},
+ {BQ25980_STAT1, 0x0},
+ {BQ25980_STAT2, 0x0},
+ {BQ25980_STAT3, 0x0},
+ {BQ25980_STAT4, 0x0},
+ {BQ25980_STAT5, 0x0},
+ {BQ25980_FLAG1, 0x0},
+ {BQ25980_FLAG2, 0x0},
+ {BQ25980_FLAG3, 0x0},
+ {BQ25980_FLAG4, 0x0},
+ {BQ25980_FLAG5, 0x0},
+ {BQ25980_MASK1, 0x0},
+ {BQ25980_MASK2, 0x0},
+ {BQ25980_MASK3, 0x0},
+ {BQ25980_MASK4, 0x0},
+ {BQ25980_MASK5, 0x0},
+ {BQ25980_DEVICE_INFO, 0x8},
+ {BQ25980_ADC_CONTROL1, 0x0},
+ {BQ25980_ADC_CONTROL2, 0x0},
+ {BQ25980_IBUS_ADC_LSB, 0x0},
+ {BQ25980_IBUS_ADC_MSB, 0x0},
+ {BQ25980_VBUS_ADC_LSB, 0x0},
+ {BQ25980_VBUS_ADC_MSB, 0x0},
+ {BQ25980_VAC1_ADC_LSB, 0x0},
+ {BQ25980_VAC2_ADC_LSB, 0x0},
+ {BQ25980_VOUT_ADC_LSB, 0x0},
+ {BQ25980_VBAT_ADC_LSB, 0x0},
+ {BQ25980_IBAT_ADC_MSB, 0x0},
+ {BQ25980_IBAT_ADC_LSB, 0x0},
+ {BQ25980_TSBUS_ADC_LSB, 0x0},
+ {BQ25980_TSBAT_ADC_LSB, 0x0},
+ {BQ25980_TDIE_ADC_LSB, 0x0},
+ {BQ25980_DEGLITCH_TIME, 0x0},
+ {BQ25980_CHRGR_CTRL_6, 0x0},
+};
+
+static int bq25980_watchdog_time[BQ25980_NUM_WD_VAL] = {5000, 10000, 50000,
+ 300000};
+
+static int bq25980_get_input_curr_lim(struct bq25980_device *bq)
+{
+ unsigned int busocp_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_BUSOCP, &busocp_reg_code);
+ if (ret)
+ return ret;
+
+ return (busocp_reg_code * BQ25980_BUSOCP_STEP_uA) + BQ25980_BUSOCP_OFFSET_uA;
+}
+
+static int bq25980_set_hiz(struct bq25980_device *bq, int setting)
+{
+ return regmap_update_bits(bq->regmap, BQ25980_CHRGR_CTRL_2,
+ BQ25980_EN_HIZ, setting);
+}
+
+static int bq25980_set_input_curr_lim(struct bq25980_device *bq, int busocp)
+{
+ unsigned int busocp_reg_code;
+ int ret;
+
+ if (!busocp)
+ return bq25980_set_hiz(bq, BQ25980_ENABLE_HIZ);
+
+ bq25980_set_hiz(bq, BQ25980_DISABLE_HIZ);
+
+ if (busocp < BQ25980_BUSOCP_MIN_uA)
+ busocp = BQ25980_BUSOCP_MIN_uA;
+
+ if (bq->state.bypass)
+ busocp = min(busocp, bq->chip_info->busocp_sc_max);
+ else
+ busocp = min(busocp, bq->chip_info->busocp_byp_max);
+
+ busocp_reg_code = (busocp - BQ25980_BUSOCP_OFFSET_uA)
+ / BQ25980_BUSOCP_STEP_uA;
+
+ ret = regmap_write(bq->regmap, BQ25980_BUSOCP, busocp_reg_code);
+ if (ret)
+ return ret;
+
+ return regmap_write(bq->regmap, BQ25980_BUSOCP_ALM, busocp_reg_code);
+}
+
+static int bq25980_get_input_volt_lim(struct bq25980_device *bq)
+{
+ unsigned int busovp_reg_code;
+ unsigned int busovp_offset;
+ unsigned int busovp_step;
+ int ret;
+
+ if (bq->state.bypass) {
+ busovp_step = bq->chip_info->busovp_byp_step;
+ busovp_offset = bq->chip_info->busovp_byp_offset;
+ } else {
+ busovp_step = bq->chip_info->busovp_sc_step;
+ busovp_offset = bq->chip_info->busovp_sc_offset;
+ }
+
+ ret = regmap_read(bq->regmap, BQ25980_BUSOVP, &busovp_reg_code);
+ if (ret)
+ return ret;
+
+ return (busovp_reg_code * busovp_step) + busovp_offset;
+}
+
+static int bq25980_set_input_volt_lim(struct bq25980_device *bq, int busovp)
+{
+ unsigned int busovp_reg_code;
+ unsigned int busovp_step;
+ unsigned int busovp_offset;
+ int ret;
+
+ if (bq->state.bypass) {
+ busovp_step = bq->chip_info->busovp_byp_step;
+ busovp_offset = bq->chip_info->busovp_byp_offset;
+ if (busovp > bq->chip_info->busovp_byp_max)
+ busovp = bq->chip_info->busovp_byp_max;
+ else if (busovp < bq->chip_info->busovp_byp_min)
+ busovp = bq->chip_info->busovp_byp_min;
+ } else {
+ busovp_step = bq->chip_info->busovp_sc_step;
+ busovp_offset = bq->chip_info->busovp_sc_offset;
+ if (busovp > bq->chip_info->busovp_sc_max)
+ busovp = bq->chip_info->busovp_sc_max;
+ else if (busovp < bq->chip_info->busovp_sc_min)
+ busovp = bq->chip_info->busovp_sc_min;
+ }
+
+ busovp_reg_code = (busovp - busovp_offset) / busovp_step;
+
+ ret = regmap_write(bq->regmap, BQ25980_BUSOVP, busovp_reg_code);
+ if (ret)
+ return ret;
+
+ return regmap_write(bq->regmap, BQ25980_BUSOVP_ALM, busovp_reg_code);
+}
+
+static int bq25980_get_const_charge_curr(struct bq25980_device *bq)
+{
+ unsigned int batocp_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_BATOCP, &batocp_reg_code);
+ if (ret)
+ return ret;
+
+ return (batocp_reg_code & BQ25980_BATOCP_MASK) *
+ BQ25980_BATOCP_STEP_uA;
+}
+
+static int bq25980_set_const_charge_curr(struct bq25980_device *bq, int batocp)
+{
+ unsigned int batocp_reg_code;
+ int ret;
+
+ batocp = max(batocp, BQ25980_BATOCP_MIN_uA);
+ batocp = min(batocp, bq->chip_info->batocp_max);
+
+ batocp_reg_code = batocp / BQ25980_BATOCP_STEP_uA;
+
+ ret = regmap_update_bits(bq->regmap, BQ25980_BATOCP,
+ BQ25980_BATOCP_MASK, batocp_reg_code);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(bq->regmap, BQ25980_BATOCP_ALM,
+ BQ25980_BATOCP_MASK, batocp_reg_code);
+}
+
+static int bq25980_get_const_charge_volt(struct bq25980_device *bq)
+{
+ unsigned int batovp_reg_code;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_BATOVP, &batovp_reg_code);
+ if (ret)
+ return ret;
+
+ return ((batovp_reg_code * bq->chip_info->batovp_step) +
+ bq->chip_info->batovp_offset);
+}
+
+static int bq25980_set_const_charge_volt(struct bq25980_device *bq, int batovp)
+{
+ unsigned int batovp_reg_code;
+ int ret;
+
+ if (batovp < bq->chip_info->batovp_min)
+ batovp = bq->chip_info->batovp_min;
+
+ if (batovp > bq->chip_info->batovp_max)
+ batovp = bq->chip_info->batovp_max;
+
+ batovp_reg_code = (batovp - bq->chip_info->batovp_offset) /
+ bq->chip_info->batovp_step;
+
+ ret = regmap_write(bq->regmap, BQ25980_BATOVP, batovp_reg_code);
+ if (ret)
+ return ret;
+
+ return regmap_write(bq->regmap, BQ25980_BATOVP_ALM, batovp_reg_code);
+}
+
+static int bq25980_set_bypass(struct bq25980_device *bq, bool en_bypass)
+{
+ int ret;
+
+ if (en_bypass)
+ ret = regmap_update_bits(bq->regmap, BQ25980_CHRGR_CTRL_2,
+ BQ25980_EN_BYPASS, BQ25980_EN_BYPASS);
+ else
+ ret = regmap_update_bits(bq->regmap, BQ25980_CHRGR_CTRL_2,
+ BQ25980_EN_BYPASS, en_bypass);
+ if (ret)
+ return ret;
+
+ bq->state.bypass = en_bypass;
+
+ return bq->state.bypass;
+}
+
+static int bq25980_set_chg_en(struct bq25980_device *bq, bool en_chg)
+{
+ int ret;
+
+ if (en_chg)
+ ret = regmap_update_bits(bq->regmap, BQ25980_CHRGR_CTRL_2,
+ BQ25980_CHG_EN, BQ25980_CHG_EN);
+ else
+ ret = regmap_update_bits(bq->regmap, BQ25980_CHRGR_CTRL_2,
+ BQ25980_CHG_EN, en_chg);
+ if (ret)
+ return ret;
+
+ bq->state.ce = en_chg;
+
+ return 0;
+}
+
+static int bq25980_get_adc_ibus(struct bq25980_device *bq)
+{
+ int ibus_adc_lsb, ibus_adc_msb;
+ u16 ibus_adc;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_IBUS_ADC_MSB, &ibus_adc_msb);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_IBUS_ADC_LSB, &ibus_adc_lsb);
+ if (ret)
+ return ret;
+
+ ibus_adc = (ibus_adc_msb << 8) | ibus_adc_lsb;
+
+ if (ibus_adc_msb & BQ25980_ADC_POLARITY_BIT)
+ return ((ibus_adc ^ 0xffff) + 1) * BQ25980_ADC_CURR_STEP_uA;
+
+ return ibus_adc * BQ25980_ADC_CURR_STEP_uA;
+}
+
+static int bq25980_get_adc_vbus(struct bq25980_device *bq)
+{
+ int vbus_adc_lsb, vbus_adc_msb;
+ u16 vbus_adc;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_VBUS_ADC_MSB, &vbus_adc_msb);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_VBUS_ADC_LSB, &vbus_adc_lsb);
+ if (ret)
+ return ret;
+
+ vbus_adc = (vbus_adc_msb << 8) | vbus_adc_lsb;
+
+ return vbus_adc * BQ25980_ADC_VOLT_STEP_uV;
+}
+
+static int bq25980_get_ibat_adc(struct bq25980_device *bq)
+{
+ int ret;
+ int ibat_adc_lsb, ibat_adc_msb;
+ int ibat_adc;
+
+ ret = regmap_read(bq->regmap, BQ25980_IBAT_ADC_MSB, &ibat_adc_msb);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_IBAT_ADC_LSB, &ibat_adc_lsb);
+ if (ret)
+ return ret;
+
+ ibat_adc = (ibat_adc_msb << 8) | ibat_adc_lsb;
+
+ if (ibat_adc_msb & BQ25980_ADC_POLARITY_BIT)
+ return ((ibat_adc ^ 0xffff) + 1) * BQ25980_ADC_CURR_STEP_uA;
+
+ return ibat_adc * BQ25980_ADC_CURR_STEP_uA;
+}
+
+static int bq25980_get_adc_vbat(struct bq25980_device *bq)
+{
+ int vsys_adc_lsb, vsys_adc_msb;
+ u16 vsys_adc;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_VBAT_ADC_MSB, &vsys_adc_msb);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_VBAT_ADC_LSB, &vsys_adc_lsb);
+ if (ret)
+ return ret;
+
+ vsys_adc = (vsys_adc_msb << 8) | vsys_adc_lsb;
+
+ return vsys_adc * BQ25980_ADC_VOLT_STEP_uV;
+}
+
+static int bq25980_get_state(struct bq25980_device *bq,
+ struct bq25980_state *state)
+{
+ unsigned int chg_ctrl_2;
+ unsigned int stat1;
+ unsigned int stat2;
+ unsigned int stat3;
+ unsigned int stat4;
+ unsigned int ibat_adc_msb;
+ int ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_STAT1, &stat1);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_STAT2, &stat2);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_STAT3, &stat3);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_STAT4, &stat4);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_CHRGR_CTRL_2, &chg_ctrl_2);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(bq->regmap, BQ25980_IBAT_ADC_MSB, &ibat_adc_msb);
+ if (ret)
+ return ret;
+
+ state->dischg = ibat_adc_msb & BQ25980_ADC_POLARITY_BIT;
+ state->ovp = (stat1 & BQ25980_STAT1_OVP_MASK) |
+ (stat3 & BQ25980_STAT3_OVP_MASK);
+ state->ocp = (stat1 & BQ25980_STAT1_OCP_MASK) |
+ (stat2 & BQ25980_STAT2_OCP_MASK);
+ state->tflt = stat4 & BQ25980_STAT4_TFLT_MASK;
+ state->wdt = stat4 & BQ25980_WD_STAT;
+ state->online = stat3 & BQ25980_PRESENT_MASK;
+ state->ce = chg_ctrl_2 & BQ25980_CHG_EN;
+ state->hiz = chg_ctrl_2 & BQ25980_EN_HIZ;
+ state->bypass = chg_ctrl_2 & BQ25980_EN_BYPASS;
+
+ return 0;
+}
+
+static int bq25980_set_battery_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct bq25980_device *bq = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = bq25980_set_const_charge_curr(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = bq25980_set_const_charge_volt(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int bq25980_get_battery_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct bq25980_device *bq = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = bq->init_data.ichg_max;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = bq->init_data.vreg_max;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = bq25980_get_ibat_adc(bq);
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = bq25980_get_adc_vbat(bq);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int bq25980_set_charger_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ struct bq25980_device *bq = power_supply_get_drvdata(psy);
+ int ret = -EINVAL;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = bq25980_set_input_curr_lim(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ ret = bq25980_set_input_volt_lim(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = bq25980_set_bypass(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = bq25980_set_chg_en(bq, val->intval);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int bq25980_get_charger_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct bq25980_device *bq = power_supply_get_drvdata(psy);
+ struct bq25980_state state;
+ int ret = 0;
+
+ mutex_lock(&bq->lock);
+ ret = bq25980_get_state(bq, &state);
+ mutex_unlock(&bq->lock);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = BQ25980_MANUFACTURER;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = bq->model_name;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = state.online;
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ ret = bq25980_get_input_volt_lim(bq);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = bq25980_get_input_curr_lim(bq);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+
+ if (state.tflt)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (state.ovp)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else if (state.ocp)
+ val->intval = POWER_SUPPLY_HEALTH_OVERCURRENT;
+ else if (state.wdt)
+ val->intval =
+ POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+
+ if ((state.ce) && (!state.hiz))
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (state.dischg)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (!state.ce)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+
+ if (!state.ce)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ else if (state.bypass)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else if (!state.bypass)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = bq25980_get_adc_ibus(bq);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = bq25980_get_adc_vbus(bq);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = bq25980_get_const_charge_curr(bq);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = bq25980_get_const_charge_volt(bq);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static bool bq25980_state_changed(struct bq25980_device *bq,
+ struct bq25980_state *new_state)
+{
+ struct bq25980_state old_state;
+
+ mutex_lock(&bq->lock);
+ old_state = bq->state;
+ mutex_unlock(&bq->lock);
+
+ return (old_state.dischg != new_state->dischg ||
+ old_state.ovp != new_state->ovp ||
+ old_state.ocp != new_state->ocp ||
+ old_state.online != new_state->online ||
+ old_state.wdt != new_state->wdt ||
+ old_state.tflt != new_state->tflt ||
+ old_state.ce != new_state->ce ||
+ old_state.hiz != new_state->hiz ||
+ old_state.bypass != new_state->bypass);
+}
+
+static irqreturn_t bq25980_irq_handler_thread(int irq, void *private)
+{
+ struct bq25980_device *bq = private;
+ struct bq25980_state state;
+ int ret;
+
+ ret = bq25980_get_state(bq, &state);
+ if (ret < 0)
+ goto irq_out;
+
+ if (!bq25980_state_changed(bq, &state))
+ goto irq_out;
+
+ mutex_lock(&bq->lock);
+ bq->state = state;
+ mutex_unlock(&bq->lock);
+
+ power_supply_changed(bq->charger);
+
+irq_out:
+ return IRQ_HANDLED;
+}
+
+static enum power_supply_property bq25980_power_supply_props[] = {
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
+static enum power_supply_property bq25980_battery_props[] = {
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
+static char *bq25980_charger_supplied_to[] = {
+ "main-battery",
+};
+
+static int bq25980_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ switch (prop) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ case POWER_SUPPLY_PROP_STATUS:
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct power_supply_desc bq25980_power_supply_desc = {
+ .name = "bq25980-charger",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = bq25980_power_supply_props,
+ .num_properties = ARRAY_SIZE(bq25980_power_supply_props),
+ .get_property = bq25980_get_charger_property,
+ .set_property = bq25980_set_charger_property,
+ .property_is_writeable = bq25980_property_is_writeable,
+};
+
+static struct power_supply_desc bq25980_battery_desc = {
+ .name = "bq25980-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .get_property = bq25980_get_battery_property,
+ .set_property = bq25980_set_battery_property,
+ .properties = bq25980_battery_props,
+ .num_properties = ARRAY_SIZE(bq25980_battery_props),
+ .property_is_writeable = bq25980_property_is_writeable,
+};
+
+
+static bool bq25980_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BQ25980_CHRGR_CTRL_2:
+ case BQ25980_STAT1...BQ25980_FLAG5:
+ case BQ25980_ADC_CONTROL1...BQ25980_TDIE_ADC_LSB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config bq25980_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BQ25980_CHRGR_CTRL_6,
+ .reg_defaults = bq25980_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(bq25980_reg_defs),
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = bq25980_is_volatile_reg,
+};
+
+static const struct regmap_config bq25975_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BQ25980_CHRGR_CTRL_6,
+ .reg_defaults = bq25975_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(bq25975_reg_defs),
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = bq25980_is_volatile_reg,
+};
+
+static const struct regmap_config bq25960_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BQ25980_CHRGR_CTRL_6,
+ .reg_defaults = bq25960_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(bq25960_reg_defs),
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = bq25980_is_volatile_reg,
+};
+
+static const struct bq25980_chip_info bq25980_chip_info_tbl[] = {
+ [BQ25980] = {
+ .model_id = BQ25980,
+ .regmap_config = &bq25980_regmap_config,
+
+ .busocp_def = BQ25980_BUSOCP_DFLT_uA,
+ .busocp_sc_min = BQ25960_BUSOCP_SC_MAX_uA,
+ .busocp_sc_max = BQ25980_BUSOCP_SC_MAX_uA,
+ .busocp_byp_max = BQ25980_BUSOCP_BYP_MAX_uA,
+ .busocp_byp_min = BQ25980_BUSOCP_MIN_uA,
+
+ .busovp_sc_def = BQ25980_BUSOVP_DFLT_uV,
+ .busovp_byp_def = BQ25980_BUSOVP_BYPASS_DFLT_uV,
+ .busovp_sc_step = BQ25980_BUSOVP_SC_STEP_uV,
+ .busovp_sc_offset = BQ25980_BUSOVP_SC_OFFSET_uV,
+ .busovp_byp_step = BQ25980_BUSOVP_BYP_STEP_uV,
+ .busovp_byp_offset = BQ25980_BUSOVP_BYP_OFFSET_uV,
+ .busovp_sc_min = BQ25980_BUSOVP_SC_MIN_uV,
+ .busovp_sc_max = BQ25980_BUSOVP_SC_MAX_uV,
+ .busovp_byp_min = BQ25980_BUSOVP_BYP_MIN_uV,
+ .busovp_byp_max = BQ25980_BUSOVP_BYP_MAX_uV,
+
+ .batovp_def = BQ25980_BATOVP_DFLT_uV,
+ .batovp_max = BQ25980_BATOVP_MAX_uV,
+ .batovp_min = BQ25980_BATOVP_MIN_uV,
+ .batovp_step = BQ25980_BATOVP_STEP_uV,
+ .batovp_offset = BQ25980_BATOVP_OFFSET_uV,
+
+ .batocp_def = BQ25980_BATOCP_DFLT_uA,
+ .batocp_max = BQ25980_BATOCP_MAX_uA,
+ },
+
+ [BQ25975] = {
+ .model_id = BQ25975,
+ .regmap_config = &bq25975_regmap_config,
+
+ .busocp_def = BQ25975_BUSOCP_DFLT_uA,
+ .busocp_sc_min = BQ25975_BUSOCP_SC_MAX_uA,
+ .busocp_sc_max = BQ25975_BUSOCP_SC_MAX_uA,
+ .busocp_byp_min = BQ25980_BUSOCP_MIN_uA,
+ .busocp_byp_max = BQ25975_BUSOCP_BYP_MAX_uA,
+
+ .busovp_sc_def = BQ25975_BUSOVP_DFLT_uV,
+ .busovp_byp_def = BQ25975_BUSOVP_BYPASS_DFLT_uV,
+ .busovp_sc_step = BQ25975_BUSOVP_SC_STEP_uV,
+ .busovp_sc_offset = BQ25975_BUSOVP_SC_OFFSET_uV,
+ .busovp_byp_step = BQ25975_BUSOVP_BYP_STEP_uV,
+ .busovp_byp_offset = BQ25975_BUSOVP_BYP_OFFSET_uV,
+ .busovp_sc_min = BQ25975_BUSOVP_SC_MIN_uV,
+ .busovp_sc_max = BQ25975_BUSOVP_SC_MAX_uV,
+ .busovp_byp_min = BQ25975_BUSOVP_BYP_MIN_uV,
+ .busovp_byp_max = BQ25975_BUSOVP_BYP_MAX_uV,
+
+ .batovp_def = BQ25975_BATOVP_DFLT_uV,
+ .batovp_max = BQ25975_BATOVP_MAX_uV,
+ .batovp_min = BQ25975_BATOVP_MIN_uV,
+ .batovp_step = BQ25975_BATOVP_STEP_uV,
+ .batovp_offset = BQ25975_BATOVP_OFFSET_uV,
+
+ .batocp_def = BQ25980_BATOCP_DFLT_uA,
+ .batocp_max = BQ25980_BATOCP_MAX_uA,
+ },
+
+ [BQ25960] = {
+ .model_id = BQ25960,
+ .regmap_config = &bq25960_regmap_config,
+
+ .busocp_def = BQ25960_BUSOCP_DFLT_uA,
+ .busocp_sc_min = BQ25960_BUSOCP_SC_MAX_uA,
+ .busocp_sc_max = BQ25960_BUSOCP_SC_MAX_uA,
+ .busocp_byp_min = BQ25960_BUSOCP_SC_MAX_uA,
+ .busocp_byp_max = BQ25960_BUSOCP_BYP_MAX_uA,
+
+ .busovp_sc_def = BQ25975_BUSOVP_DFLT_uV,
+ .busovp_byp_def = BQ25975_BUSOVP_BYPASS_DFLT_uV,
+ .busovp_sc_step = BQ25960_BUSOVP_SC_STEP_uV,
+ .busovp_sc_offset = BQ25960_BUSOVP_SC_OFFSET_uV,
+ .busovp_byp_step = BQ25960_BUSOVP_BYP_STEP_uV,
+ .busovp_byp_offset = BQ25960_BUSOVP_BYP_OFFSET_uV,
+ .busovp_sc_min = BQ25960_BUSOVP_SC_MIN_uV,
+ .busovp_sc_max = BQ25960_BUSOVP_SC_MAX_uV,
+ .busovp_byp_min = BQ25960_BUSOVP_BYP_MIN_uV,
+ .busovp_byp_max = BQ25960_BUSOVP_BYP_MAX_uV,
+
+ .batovp_def = BQ25960_BATOVP_DFLT_uV,
+ .batovp_max = BQ25960_BATOVP_MAX_uV,
+ .batovp_min = BQ25960_BATOVP_MIN_uV,
+ .batovp_step = BQ25960_BATOVP_STEP_uV,
+ .batovp_offset = BQ25960_BATOVP_OFFSET_uV,
+
+ .batocp_def = BQ25960_BATOCP_DFLT_uA,
+ .batocp_max = BQ25960_BATOCP_MAX_uA,
+ },
+};
+
+static int bq25980_power_supply_init(struct bq25980_device *bq,
+ struct device *dev)
+{
+ struct power_supply_config psy_cfg = { .drv_data = bq,
+ .of_node = dev->of_node, };
+
+ psy_cfg.supplied_to = bq25980_charger_supplied_to;
+ psy_cfg.num_supplicants = ARRAY_SIZE(bq25980_charger_supplied_to);
+
+ bq->charger = devm_power_supply_register(bq->dev,
+ &bq25980_power_supply_desc,
+ &psy_cfg);
+ if (IS_ERR(bq->charger))
+ return -EINVAL;
+
+ bq->battery = devm_power_supply_register(bq->dev,
+ &bq25980_battery_desc,
+ &psy_cfg);
+ if (IS_ERR(bq->battery))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bq25980_hw_init(struct bq25980_device *bq)
+{
+ struct power_supply_battery_info bat_info = { };
+ int wd_reg_val = BQ25980_WATCHDOG_DIS;
+ int wd_max_val = BQ25980_NUM_WD_VAL - 1;
+ int ret = 0;
+ int curr_val;
+ int volt_val;
+ int i;
+
+ if (bq->watchdog_timer) {
+ if (bq->watchdog_timer >= bq25980_watchdog_time[wd_max_val])
+ wd_reg_val = wd_max_val;
+ else {
+ for (i = 0; i < wd_max_val; i++) {
+ if (bq->watchdog_timer > bq25980_watchdog_time[i] &&
+ bq->watchdog_timer < bq25980_watchdog_time[i + 1]) {
+ wd_reg_val = i;
+ break;
+ }
+ }
+ }
+ }
+
+ ret = regmap_update_bits(bq->regmap, BQ25980_CHRGR_CTRL_3,
+ BQ25980_WATCHDOG_MASK, wd_reg_val);
+ if (ret)
+ return ret;
+
+ ret = power_supply_get_battery_info(bq->charger, &bat_info);
+ if (ret) {
+ dev_warn(bq->dev, "battery info missing\n");
+ return -EINVAL;
+ }
+
+ bq->init_data.ichg_max = bat_info.constant_charge_current_max_ua;
+ bq->init_data.vreg_max = bat_info.constant_charge_voltage_max_uv;
+
+ if (bq->state.bypass) {
+ ret = regmap_update_bits(bq->regmap, BQ25980_CHRGR_CTRL_2,
+ BQ25980_EN_BYPASS, BQ25980_EN_BYPASS);
+ if (ret)
+ return ret;
+
+ curr_val = bq->init_data.bypass_ilim;
+ volt_val = bq->init_data.bypass_vlim;
+ } else {
+ curr_val = bq->init_data.sc_ilim;
+ volt_val = bq->init_data.sc_vlim;
+ }
+
+ ret = bq25980_set_input_curr_lim(bq, curr_val);
+ if (ret)
+ return ret;
+
+ ret = bq25980_set_input_volt_lim(bq, volt_val);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(bq->regmap, BQ25980_ADC_CONTROL1,
+ BQ25980_ADC_EN, BQ25980_ADC_EN);
+}
+
+static int bq25980_parse_dt(struct bq25980_device *bq)
+{
+ int ret;
+
+ ret = device_property_read_u32(bq->dev, "ti,watchdog-timeout-ms",
+ &bq->watchdog_timer);
+ if (ret)
+ bq->watchdog_timer = BQ25980_WATCHDOG_MIN;
+
+ if (bq->watchdog_timer > BQ25980_WATCHDOG_MAX ||
+ bq->watchdog_timer < BQ25980_WATCHDOG_MIN)
+ return -EINVAL;
+
+ ret = device_property_read_u32(bq->dev,
+ "ti,sc-ovp-limit-microvolt",
+ &bq->init_data.sc_vlim);
+ if (ret)
+ bq->init_data.sc_vlim = bq->chip_info->busovp_sc_def;
+
+ if (bq->init_data.sc_vlim > bq->chip_info->busovp_sc_max ||
+ bq->init_data.sc_vlim < bq->chip_info->busovp_sc_min) {
+ dev_err(bq->dev, "SC ovp limit is out of range\n");
+ return -EINVAL;
+ }
+
+ ret = device_property_read_u32(bq->dev,
+ "ti,sc-ocp-limit-microamp",
+ &bq->init_data.sc_ilim);
+ if (ret)
+ bq->init_data.sc_ilim = bq->chip_info->busocp_def;
+
+ if (bq->init_data.sc_ilim > bq->chip_info->busocp_sc_max ||
+ bq->init_data.sc_ilim < bq->chip_info->busocp_sc_min) {
+ dev_err(bq->dev, "SC ocp limit is out of range\n");
+ return -EINVAL;
+ }
+
+ ret = device_property_read_u32(bq->dev,
+ "ti,bypass-ovp-limit-microvolt",
+ &bq->init_data.bypass_vlim);
+ if (ret)
+ bq->init_data.bypass_vlim = bq->chip_info->busovp_byp_def;
+
+ if (bq->init_data.bypass_vlim > bq->chip_info->busovp_byp_max ||
+ bq->init_data.bypass_vlim < bq->chip_info->busovp_byp_min) {
+ dev_err(bq->dev, "Bypass ovp limit is out of range\n");
+ return -EINVAL;
+ }
+
+ ret = device_property_read_u32(bq->dev,
+ "ti,bypass-ocp-limit-microamp",
+ &bq->init_data.bypass_ilim);
+ if (ret)
+ bq->init_data.bypass_ilim = bq->chip_info->busocp_def;
+
+ if (bq->init_data.bypass_ilim > bq->chip_info->busocp_byp_max ||
+ bq->init_data.bypass_ilim < bq->chip_info->busocp_byp_min) {
+ dev_err(bq->dev, "Bypass ocp limit is out of range\n");
+ return -EINVAL;
+ }
+
+
+ bq->state.bypass = device_property_read_bool(bq->dev,
+ "ti,bypass-enable");
+ return 0;
+}
+
+static int bq25980_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct bq25980_device *bq;
+ int ret;
+
+ bq = devm_kzalloc(dev, sizeof(*bq), GFP_KERNEL);
+ if (!bq)
+ return -ENOMEM;
+
+ bq->client = client;
+ bq->dev = dev;
+
+ mutex_init(&bq->lock);
+
+ strncpy(bq->model_name, id->name, I2C_NAME_SIZE);
+ bq->chip_info = &bq25980_chip_info_tbl[id->driver_data];
+
+ bq->regmap = devm_regmap_init_i2c(client,
+ bq->chip_info->regmap_config);
+ if (IS_ERR(bq->regmap)) {
+ dev_err(dev, "Failed to allocate register map\n");
+ return PTR_ERR(bq->regmap);
+ }
+
+ i2c_set_clientdata(client, bq);
+
+ ret = bq25980_parse_dt(bq);
+ if (ret) {
+ dev_err(dev, "Failed to read device tree properties%d\n", ret);
+ return ret;
+ }
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ bq25980_irq_handler_thread,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ dev_name(&client->dev), bq);
+ if (ret)
+ return ret;
+ }
+
+ ret = bq25980_power_supply_init(bq, dev);
+ if (ret) {
+ dev_err(dev, "Failed to register power supply\n");
+ return ret;
+ }
+
+ ret = bq25980_hw_init(bq);
+ if (ret) {
+ dev_err(dev, "Cannot initialize the chip.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id bq25980_i2c_ids[] = {
+ { "bq25980", BQ25980 },
+ { "bq25975", BQ25975 },
+ { "bq25975", BQ25975 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, bq25980_i2c_ids);
+
+static const struct of_device_id bq25980_of_match[] = {
+ { .compatible = "ti,bq25980", .data = (void *)BQ25980 },
+ { .compatible = "ti,bq25975", .data = (void *)BQ25975 },
+ { .compatible = "ti,bq25960", .data = (void *)BQ25960 },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bq25980_of_match);
+
+static struct i2c_driver bq25980_driver = {
+ .driver = {
+ .name = "bq25980-charger",
+ .of_match_table = bq25980_of_match,
+ },
+ .probe = bq25980_probe,
+ .id_table = bq25980_i2c_ids,
+};
+module_i2c_driver(bq25980_driver);
+
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_AUTHOR("Ricardo Rivera-Matos <r-rivera-matos@ti.com>");
+MODULE_DESCRIPTION("bq25980 charger driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/bq25980_charger.h b/drivers/power/supply/bq25980_charger.h
new file mode 100644
index 000000000000..39f94eba5f6c
--- /dev/null
+++ b/drivers/power/supply/bq25980_charger.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ */
+
+#ifndef BQ25980_CHARGER_H
+#define BQ25980_CHARGER_H
+
+#define BQ25980_MANUFACTURER "Texas Instruments"
+
+#define BQ25980_BATOVP 0x0
+#define BQ25980_BATOVP_ALM 0x1
+#define BQ25980_BATOCP 0x2
+#define BQ25980_BATOCP_ALM 0x3
+#define BQ25980_BATUCP_ALM 0x4
+#define BQ25980_CHRGR_CTRL_1 0x5
+#define BQ25980_BUSOVP 0x6
+#define BQ25980_BUSOVP_ALM 0x7
+#define BQ25980_BUSOCP 0x8
+#define BQ25980_BUSOCP_ALM 0x9
+#define BQ25980_TEMP_CONTROL 0xA
+#define BQ25980_TDIE_ALM 0xB
+#define BQ25980_TSBUS_FLT 0xC
+#define BQ25980_TSBAT_FLG 0xD
+#define BQ25980_VAC_CONTROL 0xE
+#define BQ25980_CHRGR_CTRL_2 0xF
+#define BQ25980_CHRGR_CTRL_3 0x10
+#define BQ25980_CHRGR_CTRL_4 0x11
+#define BQ25980_CHRGR_CTRL_5 0x12
+#define BQ25980_STAT1 0x13
+#define BQ25980_STAT2 0x14
+#define BQ25980_STAT3 0x15
+#define BQ25980_STAT4 0x16
+#define BQ25980_STAT5 0x17
+#define BQ25980_FLAG1 0x18
+#define BQ25980_FLAG2 0x19
+#define BQ25980_FLAG3 0x1A
+#define BQ25980_FLAG4 0x1B
+#define BQ25980_FLAG5 0x1C
+#define BQ25980_MASK1 0x1D
+#define BQ25980_MASK2 0x1E
+#define BQ25980_MASK3 0x1F
+#define BQ25980_MASK4 0x20
+#define BQ25980_MASK5 0x21
+#define BQ25980_DEVICE_INFO 0x22
+#define BQ25980_ADC_CONTROL1 0x23
+#define BQ25980_ADC_CONTROL2 0x24
+#define BQ25980_IBUS_ADC_MSB 0x25
+#define BQ25980_IBUS_ADC_LSB 0x26
+#define BQ25980_VBUS_ADC_MSB 0x27
+#define BQ25980_VBUS_ADC_LSB 0x28
+#define BQ25980_VAC1_ADC_MSB 0x29
+#define BQ25980_VAC1_ADC_LSB 0x2A
+#define BQ25980_VAC2_ADC_MSB 0x2B
+#define BQ25980_VAC2_ADC_LSB 0x2C
+#define BQ25980_VOUT_ADC_MSB 0x2D
+#define BQ25980_VOUT_ADC_LSB 0x2E
+#define BQ25980_VBAT_ADC_MSB 0x2F
+#define BQ25980_VBAT_ADC_LSB 0x30
+#define BQ25980_IBAT_ADC_MSB 0x31
+#define BQ25980_IBAT_ADC_LSB 0x32
+#define BQ25980_TSBUS_ADC_MSB 0x33
+#define BQ25980_TSBUS_ADC_LSB 0x34
+#define BQ25980_TSBAT_ADC_MSB 0x35
+#define BQ25980_TSBAT_ADC_LSB 0x36
+#define BQ25980_TDIE_ADC_MSB 0x37
+#define BQ25980_TDIE_ADC_LSB 0x38
+#define BQ25980_DEGLITCH_TIME 0x39
+#define BQ25980_CHRGR_CTRL_6 0x3A
+
+#define BQ25980_BUSOCP_STEP_uA 250000
+#define BQ25980_BUSOCP_OFFSET_uA 1000000
+
+#define BQ25980_BUSOCP_DFLT_uA 4250000
+#define BQ25975_BUSOCP_DFLT_uA 4250000
+#define BQ25960_BUSOCP_DFLT_uA 3250000
+
+#define BQ25980_BUSOCP_MIN_uA 1000000
+
+#define BQ25980_BUSOCP_SC_MAX_uA 5750000
+#define BQ25975_BUSOCP_SC_MAX_uA 5750000
+#define BQ25960_BUSOCP_SC_MAX_uA 3750000
+
+#define BQ25980_BUSOCP_BYP_MAX_uA 8500000
+#define BQ25975_BUSOCP_BYP_MAX_uA 8500000
+#define BQ25960_BUSOCP_BYP_MAX_uA 5750000
+
+#define BQ25980_BUSOVP_SC_STEP_uV 100000
+#define BQ25975_BUSOVP_SC_STEP_uV 50000
+#define BQ25960_BUSOVP_SC_STEP_uV 50000
+#define BQ25980_BUSOVP_SC_OFFSET_uV 14000000
+#define BQ25975_BUSOVP_SC_OFFSET_uV 7000000
+#define BQ25960_BUSOVP_SC_OFFSET_uV 7000000
+
+#define BQ25980_BUSOVP_BYP_STEP_uV 50000
+#define BQ25975_BUSOVP_BYP_STEP_uV 25000
+#define BQ25960_BUSOVP_BYP_STEP_uV 25000
+#define BQ25980_BUSOVP_BYP_OFFSET_uV 7000000
+#define BQ25975_BUSOVP_BYP_OFFSET_uV 3500000
+#define BQ25960_BUSOVP_BYP_OFFSET_uV 3500000
+
+#define BQ25980_BUSOVP_DFLT_uV 17800000
+#define BQ25980_BUSOVP_BYPASS_DFLT_uV 8900000
+#define BQ25975_BUSOVP_DFLT_uV 8900000
+#define BQ25975_BUSOVP_BYPASS_DFLT_uV 4450000
+#define BQ25960_BUSOVP_DFLT_uV 8900000
+
+#define BQ25980_BUSOVP_SC_MIN_uV 14000000
+#define BQ25975_BUSOVP_SC_MIN_uV 7000000
+#define BQ25960_BUSOVP_SC_MIN_uV 7000000
+#define BQ25980_BUSOVP_BYP_MIN_uV 7000000
+#define BQ25975_BUSOVP_BYP_MIN_uV 3500000
+#define BQ25960_BUSOVP_BYP_MIN_uV 3500000
+
+#define BQ25980_BUSOVP_SC_MAX_uV 22000000
+#define BQ25975_BUSOVP_SC_MAX_uV 12750000
+#define BQ25960_BUSOVP_SC_MAX_uV 12750000
+
+#define BQ25980_BUSOVP_BYP_MAX_uV 12750000
+#define BQ25975_BUSOVP_BYP_MAX_uV 6500000
+#define BQ25960_BUSOVP_BYP_MAX_uV 6500000
+
+#define BQ25980_BATOVP_STEP_uV 20000
+#define BQ25975_BATOVP_STEP_uV 10000
+#define BQ25960_BATOVP_STEP_uV 10000
+
+#define BQ25980_BATOVP_OFFSET_uV 7000000
+#define BQ25975_BATOVP_OFFSET_uV 3500000
+#define BQ25960_BATOVP_OFFSET_uV 3500000
+
+#define BQ25980_BATOVP_DFLT_uV 14000000
+#define BQ25975_BATOVP_DFLT_uV 8900000
+#define BQ25960_BATOVP_DFLT_uV 8900000
+
+#define BQ25980_BATOVP_MIN_uV 7000000
+#define BQ25975_BATOVP_MIN_uV 3500000
+#define BQ25960_BATOVP_MIN_uV 3500000
+
+#define BQ25980_BATOVP_MAX_uV 9540000
+#define BQ25975_BATOVP_MAX_uV 4770000
+#define BQ25960_BATOVP_MAX_uV 4770000
+
+#define BQ25980_BATOCP_STEP_uA 100000
+
+#define BQ25980_BATOCP_MASK GENMASK(6, 0)
+
+#define BQ25980_BATOCP_DFLT_uA 8100000
+#define BQ25960_BATOCP_DFLT_uA 6100000
+
+#define BQ25980_BATOCP_MIN_uA 2000000
+
+#define BQ25980_BATOCP_MAX_uA 11000000
+#define BQ25975_BATOCP_MAX_uA 11000000
+#define BQ25960_BATOCP_MAX_uA 7000000
+
+#define BQ25980_ENABLE_HIZ 0xff
+#define BQ25980_DISABLE_HIZ 0x0
+#define BQ25980_EN_BYPASS BIT(3)
+#define BQ25980_STAT1_OVP_MASK (BIT(6) | BIT(5) | BIT(0))
+#define BQ25980_STAT3_OVP_MASK (BIT(7) | BIT(6))
+#define BQ25980_STAT1_OCP_MASK BIT(3)
+#define BQ25980_STAT2_OCP_MASK (BIT(6) | BIT(1))
+#define BQ25980_STAT4_TFLT_MASK GENMASK(5, 1)
+#define BQ25980_WD_STAT BIT(0)
+#define BQ25980_PRESENT_MASK GENMASK(4, 2)
+#define BQ25980_CHG_EN BIT(4)
+#define BQ25980_EN_HIZ BIT(6)
+#define BQ25980_ADC_EN BIT(7)
+
+#define BQ25980_ADC_VOLT_STEP_uV 1000
+#define BQ25980_ADC_CURR_STEP_uA 1000
+#define BQ25980_ADC_POLARITY_BIT BIT(7)
+
+#define BQ25980_WATCHDOG_MASK GENMASK(4, 3)
+#define BQ25980_WATCHDOG_DIS BIT(2)
+#define BQ25980_WATCHDOG_MAX 300000
+#define BQ25980_WATCHDOG_MIN 0
+#define BQ25980_NUM_WD_VAL 4
+
+#endif /* BQ25980_CHARGER_H */
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index a123f6e21f08..315e0909e6a4 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* BQ27xxx battery driver
*
@@ -9,14 +10,6 @@
*
* Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
*
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- *
* Datasheets:
* https://www.ti.com/product/bq27000
* https://www.ti.com/product/bq27200
@@ -45,6 +38,7 @@
* https://www.ti.com/product/bq27621-g1
* https://www.ti.com/product/bq27z561
* https://www.ti.com/product/bq28z610
+ * https://www.ti.com/product/bq34z100-g1
*/
#include <linux/device.h>
@@ -83,7 +77,7 @@
/* BQ27Z561 has different layout for Flags register */
#define BQ27Z561_FLAG_FDC BIT(4) /* Battery fully discharged */
-#define BQ27Z561_FLAG_FC BIT(5) /* Battery fully charged */
+#define BQ27Z561_FLAG_FC BIT(5) /* Battery fully charged */
#define BQ27Z561_FLAG_DIS_CH BIT(6) /* Battery is discharging */
/* control register params */
@@ -483,6 +477,26 @@ static u8
[BQ27XXX_REG_DCAP] = 0x3c,
[BQ27XXX_REG_AP] = 0x22,
BQ27XXX_DM_REG_ROWS,
+ },
+ bq34z100_regs[BQ27XXX_REG_MAX] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x0c,
+ [BQ27XXX_REG_INT_TEMP] = 0x2a,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x0a,
+ [BQ27XXX_REG_FLAGS] = 0x0e,
+ [BQ27XXX_REG_TTE] = 0x18,
+ [BQ27XXX_REG_TTF] = 0x1a,
+ [BQ27XXX_REG_TTES] = 0x1e,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_FCC] = 0x06,
+ [BQ27XXX_REG_CYCT] = 0x2c,
+ [BQ27XXX_REG_AE] = 0x24,
+ [BQ27XXX_REG_SOC] = 0x02,
+ [BQ27XXX_REG_DCAP] = 0x3c,
+ [BQ27XXX_REG_AP] = 0x22,
+ BQ27XXX_DM_REG_ROWS,
};
static enum power_supply_property bq27000_props[] = {
@@ -757,6 +771,27 @@ static enum power_supply_property bq28z610_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static enum power_supply_property bq34z100_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
struct bq27xxx_dm_reg {
u8 subclass_id;
u8 offset;
@@ -854,13 +889,17 @@ static struct bq27xxx_dm_reg bq27621_dm_regs[] = {
#define bq27z561_dm_regs 0
#define bq28z610_dm_regs 0
-
-#define BQ27XXX_O_ZERO 0x00000001
-#define BQ27XXX_O_OTDC 0x00000002 /* has OTC/OTD overtemperature flags */
-#define BQ27XXX_O_UTOT 0x00000004 /* has OT overtemperature flag */
-#define BQ27XXX_O_CFGUP 0x00000008
-#define BQ27XXX_O_RAM 0x00000010
-#define BQ27Z561_O_BITS 0x00000020
+#define bq34z100_dm_regs 0
+
+#define BQ27XXX_O_ZERO BIT(0)
+#define BQ27XXX_O_OTDC BIT(1) /* has OTC/OTD overtemperature flags */
+#define BQ27XXX_O_UTOT BIT(2) /* has OT overtemperature flag */
+#define BQ27XXX_O_CFGUP BIT(3)
+#define BQ27XXX_O_RAM BIT(4)
+#define BQ27Z561_O_BITS BIT(5)
+#define BQ27XXX_O_SOC_SI BIT(6) /* SoC is single register */
+#define BQ27XXX_O_HAS_CI BIT(7) /* has Capacity Inaccurate flag */
+#define BQ27XXX_O_MUL_CHEM BIT(8) /* multiple chemistries supported */
#define BQ27XXX_DATA(ref, key, opt) { \
.opts = (opt), \
@@ -878,8 +917,8 @@ static struct {
enum power_supply_property *props;
size_t props_size;
} bq27xxx_chip_data[] = {
- [BQ27000] = BQ27XXX_DATA(bq27000, 0 , BQ27XXX_O_ZERO),
- [BQ27010] = BQ27XXX_DATA(bq27010, 0 , BQ27XXX_O_ZERO),
+ [BQ27000] = BQ27XXX_DATA(bq27000, 0 , BQ27XXX_O_ZERO | BQ27XXX_O_SOC_SI | BQ27XXX_O_HAS_CI),
+ [BQ27010] = BQ27XXX_DATA(bq27010, 0 , BQ27XXX_O_ZERO | BQ27XXX_O_SOC_SI | BQ27XXX_O_HAS_CI),
[BQ2750X] = BQ27XXX_DATA(bq2750x, 0 , BQ27XXX_O_OTDC),
[BQ2751X] = BQ27XXX_DATA(bq2751x, 0 , BQ27XXX_O_OTDC),
[BQ2752X] = BQ27XXX_DATA(bq2752x, 0 , BQ27XXX_O_OTDC),
@@ -907,6 +946,8 @@ static struct {
[BQ27621] = BQ27XXX_DATA(bq27621, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
[BQ27Z561] = BQ27XXX_DATA(bq27z561, 0 , BQ27Z561_O_BITS),
[BQ28Z610] = BQ27XXX_DATA(bq28z610, 0 , BQ27Z561_O_BITS),
+ [BQ34Z100] = BQ27XXX_DATA(bq34z100, 0 , BQ27XXX_O_OTDC | BQ27XXX_O_SOC_SI | \
+ BQ27XXX_O_HAS_CI | BQ27XXX_O_MUL_CHEM),
};
static DEFINE_MUTEX(bq27xxx_list_lock);
@@ -1426,7 +1467,7 @@ static int bq27xxx_battery_read_soc(struct bq27xxx_device_info *di)
{
int soc;
- if (di->opts & BQ27XXX_O_ZERO)
+ if (di->opts & BQ27XXX_O_SOC_SI)
soc = bq27xxx_read(di, BQ27XXX_REG_SOC, true);
else
soc = bq27xxx_read(di, BQ27XXX_REG_SOC, false);
@@ -1664,7 +1705,7 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
void bq27xxx_battery_update(struct bq27xxx_device_info *di)
{
struct bq27xxx_reg_cache cache = {0, };
- bool has_ci_flag = di->opts & BQ27XXX_O_ZERO;
+ bool has_ci_flag = di->opts & BQ27XXX_O_HAS_CI;
bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
@@ -1772,8 +1813,6 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
status = POWER_SUPPLY_STATUS_FULL;
else if (di->cache.flags & BQ27000_FLAG_CHGS)
status = POWER_SUPPLY_STATUS_CHARGING;
- else if (power_supply_am_i_supplied(di->bat) > 0)
- status = POWER_SUPPLY_STATUS_NOT_CHARGING;
else
status = POWER_SUPPLY_STATUS_DISCHARGING;
} else if (di->opts & BQ27Z561_O_BITS) {
@@ -1792,6 +1831,10 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
status = POWER_SUPPLY_STATUS_CHARGING;
}
+ if ((status == POWER_SUPPLY_STATUS_DISCHARGING) &&
+ (power_supply_am_i_supplied(di->bat) > 0))
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
val->intval = status;
return 0;
@@ -1916,7 +1959,10 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
ret = bq27xxx_simple_value(di->cache.time_to_full, val);
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ if (di->opts & BQ27XXX_O_MUL_CHEM)
+ val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+ else
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
ret = bq27xxx_simple_value(bq27xxx_battery_read_nac(di), val);
@@ -1992,13 +2038,9 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
psy_desc->external_power_changed = bq27xxx_external_power_changed;
di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
- if (IS_ERR(di->bat)) {
- if (PTR_ERR(di->bat) == -EPROBE_DEFER)
- dev_dbg(di->dev, "failed to register battery, deferring probe\n");
- else
- dev_err(di->dev, "failed to register battery\n");
- return PTR_ERR(di->bat);
- }
+ if (IS_ERR(di->bat))
+ return dev_err_probe(di->dev, PTR_ERR(di->bat),
+ "failed to register battery\n");
bq27xxx_battery_settings(di);
bq27xxx_battery_update(di);
diff --git a/drivers/power/supply/bq27xxx_battery_hdq.c b/drivers/power/supply/bq27xxx_battery_hdq.c
index 29771967df2e..922759ab2e04 100644
--- a/drivers/power/supply/bq27xxx_battery_hdq.c
+++ b/drivers/power/supply/bq27xxx_battery_hdq.c
@@ -1,16 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* BQ27xxx battery monitor HDQ/1-wire driver
*
* Copyright (C) 2007-2017 Texas Instruments Incorporated - https://www.ti.com/
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -104,7 +97,7 @@ static void bq27xxx_battery_hdq_remove_slave(struct w1_slave *sl)
bq27xxx_battery_teardown(di);
}
-static struct w1_family_ops bq27xxx_battery_hdq_fops = {
+static const struct w1_family_ops bq27xxx_battery_hdq_fops = {
.add_slave = bq27xxx_battery_hdq_add_slave,
.remove_slave = bq27xxx_battery_hdq_remove_slave,
};
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index ab02456d69e5..eb4f4284982f 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* BQ27xxx battery monitor I2C driver
*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/i2c.h>
@@ -255,6 +247,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27621", BQ27621 },
{ "bq27z561", BQ27Z561 },
{ "bq28z610", BQ28Z610 },
+ { "bq34z100", BQ34Z100 },
{},
};
MODULE_DEVICE_TABLE(i2c, bq27xxx_i2c_id_table);
@@ -290,6 +283,7 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
{ .compatible = "ti,bq27621" },
{ .compatible = "ti,bq27z561" },
{ .compatible = "ti,bq28z610" },
+ { .compatible = "ti,bq34z100" },
{},
};
MODULE_DEVICE_TABLE(of, bq27xxx_battery_i2c_of_match_table);
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 2ef53dc1f2fb..6fcebe441552 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -26,6 +26,29 @@
#include <linux/of.h>
#include <linux/thermal.h>
+static struct {
+ const char *name;
+ u64 extcon_type;
+} extcon_mapping[] = {
+ /* Current textual representations */
+ { "USB", EXTCON_USB },
+ { "USB-HOST", EXTCON_USB_HOST },
+ { "SDP", EXTCON_CHG_USB_SDP },
+ { "DCP", EXTCON_CHG_USB_DCP },
+ { "CDP", EXTCON_CHG_USB_CDP },
+ { "ACA", EXTCON_CHG_USB_ACA },
+ { "FAST-CHARGER", EXTCON_CHG_USB_FAST },
+ { "SLOW-CHARGER", EXTCON_CHG_USB_SLOW },
+ { "WPT", EXTCON_CHG_WPT },
+ { "PD", EXTCON_CHG_USB_PD },
+ { "DOCK", EXTCON_DOCK },
+ { "JIG", EXTCON_JIG },
+ { "MECHANICAL", EXTCON_MECHANICAL },
+ /* Deprecated textual representations */
+ { "TA", EXTCON_CHG_USB_SDP },
+ { "CHARGE-DOWNSTREAM", EXTCON_CHG_USB_CDP },
+};
+
/*
* Default temperature threshold for charging.
* Every temperature units are in tenth of centigrade.
@@ -33,18 +56,6 @@
#define CM_DEFAULT_RECHARGE_TEMP_DIFF 50
#define CM_DEFAULT_CHARGE_TEMP_MAX 500
-static const char * const default_event_names[] = {
- [CM_EVENT_UNKNOWN] = "Unknown",
- [CM_EVENT_BATT_FULL] = "Battery Full",
- [CM_EVENT_BATT_IN] = "Battery Inserted",
- [CM_EVENT_BATT_OUT] = "Battery Pulled Out",
- [CM_EVENT_BATT_OVERHEAT] = "Battery Overheat",
- [CM_EVENT_BATT_COLD] = "Battery Cold",
- [CM_EVENT_EXT_PWR_IN_OUT] = "External Power Attach/Detach",
- [CM_EVENT_CHG_START_STOP] = "Charging Start/Stop",
- [CM_EVENT_OTHERS] = "Other battery events"
-};
-
/*
* Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
* delayed works so that we can run delayed works with CM_JIFFIES_SMALL
@@ -61,8 +72,6 @@ static const char * const default_event_names[] = {
*/
#define CM_RTC_SMALL (2)
-#define UEVENT_BUF_SIZE 32
-
static LIST_HEAD(cm_list);
static DEFINE_MUTEX(cm_list_mtx);
@@ -285,6 +294,19 @@ static bool is_full_charged(struct charger_manager *cm)
if (!fuel_gauge)
return false;
+ /* Full, if it's over the fullbatt voltage */
+ if (desc->fullbatt_uV > 0) {
+ ret = get_batt_uV(cm, &uV);
+ if (!ret) {
+ /* Battery is already full, checks voltage drop. */
+ if (cm->battery_status == POWER_SUPPLY_STATUS_FULL
+ && desc->fullbatt_vchkdrop_uV)
+ uV += desc->fullbatt_vchkdrop_uV;
+ if (uV >= desc->fullbatt_uV)
+ return true;
+ }
+ }
+
if (desc->fullbatt_full_capacity > 0) {
val.intval = 0;
@@ -297,15 +319,6 @@ static bool is_full_charged(struct charger_manager *cm)
}
}
- /* Full, if it's over the fullbatt voltage */
- if (desc->fullbatt_uV > 0) {
- ret = get_batt_uV(cm, &uV);
- if (!ret && uV >= desc->fullbatt_uV) {
- is_full = true;
- goto out;
- }
- }
-
/* Full, if the capacity is more than fullbatt_soc */
if (desc->fullbatt_soc > 0) {
val.intval = 0;
@@ -427,122 +440,6 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
}
/**
- * try_charger_restart - Restart charging.
- * @cm: the Charger Manager representing the battery.
- *
- * Restart charging by turning off and on the charger.
- */
-static int try_charger_restart(struct charger_manager *cm)
-{
- int err;
-
- if (cm->emergency_stop)
- return -EAGAIN;
-
- err = try_charger_enable(cm, false);
- if (err)
- return err;
-
- return try_charger_enable(cm, true);
-}
-
-/**
- * uevent_notify - Let users know something has changed.
- * @cm: the Charger Manager representing the battery.
- * @event: the event string.
- *
- * If @event is null, it implies that uevent_notify is called
- * by resume function. When called in the resume function, cm_suspended
- * should be already reset to false in order to let uevent_notify
- * notify the recent event during the suspend to users. While
- * suspended, uevent_notify does not notify users, but tracks
- * events so that uevent_notify can notify users later after resumed.
- */
-static void uevent_notify(struct charger_manager *cm, const char *event)
-{
- static char env_str[UEVENT_BUF_SIZE + 1] = "";
- static char env_str_save[UEVENT_BUF_SIZE + 1] = "";
-
- if (cm_suspended) {
- /* Nothing in suspended-event buffer */
- if (env_str_save[0] == 0) {
- if (!strncmp(env_str, event, UEVENT_BUF_SIZE))
- return; /* status not changed */
- strncpy(env_str_save, event, UEVENT_BUF_SIZE);
- return;
- }
-
- if (!strncmp(env_str_save, event, UEVENT_BUF_SIZE))
- return; /* Duplicated. */
- strncpy(env_str_save, event, UEVENT_BUF_SIZE);
- return;
- }
-
- if (event == NULL) {
- /* No messages pending */
- if (!env_str_save[0])
- return;
-
- strncpy(env_str, env_str_save, UEVENT_BUF_SIZE);
- kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE);
- env_str_save[0] = 0;
-
- return;
- }
-
- /* status not changed */
- if (!strncmp(env_str, event, UEVENT_BUF_SIZE))
- return;
-
- /* save the status and notify the update */
- strncpy(env_str, event, UEVENT_BUF_SIZE);
- kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE);
-
- dev_info(cm->dev, "%s\n", event);
-}
-
-/**
- * fullbatt_vchk - Check voltage drop some times after "FULL" event.
- * @work: the work_struct appointing the function
- *
- * If a user has designated "fullbatt_vchkdrop_ms/uV" values with
- * charger_desc, Charger Manager checks voltage drop after the battery
- * "FULL" event. It checks whether the voltage has dropped more than
- * fullbatt_vchkdrop_uV by calling this function after fullbatt_vchkrop_ms.
- */
-static void fullbatt_vchk(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct charger_manager *cm = container_of(dwork,
- struct charger_manager, fullbatt_vchk_work);
- struct charger_desc *desc = cm->desc;
- int batt_uV, err, diff;
-
- /* remove the appointment for fullbatt_vchk */
- cm->fullbatt_vchk_jiffies_at = 0;
-
- if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
- return;
-
- err = get_batt_uV(cm, &batt_uV);
- if (err) {
- dev_err(cm->dev, "%s: get_batt_uV error(%d)\n", __func__, err);
- return;
- }
-
- diff = desc->fullbatt_uV - batt_uV;
- if (diff < 0)
- return;
-
- dev_info(cm->dev, "VBATT dropped %duV after full-batt\n", diff);
-
- if (diff > desc->fullbatt_vchkdrop_uV) {
- try_charger_restart(cm);
- uevent_notify(cm, "Recharging");
- }
-}
-
-/**
* check_charging_duration - Monitor charging/discharging duration
* @cm: the Charger Manager representing the battery.
*
@@ -569,19 +466,14 @@ static int check_charging_duration(struct charger_manager *cm)
if (duration > desc->charging_max_duration_ms) {
dev_info(cm->dev, "Charging duration exceed %ums\n",
desc->charging_max_duration_ms);
- uevent_notify(cm, "Discharging");
- try_charger_enable(cm, false);
ret = true;
}
- } else if (is_ext_pwr_online(cm) && !cm->charger_enabled) {
+ } else if (cm->battery_status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
duration = curr - cm->charging_end_time;
- if (duration > desc->discharging_max_duration_ms &&
- is_ext_pwr_online(cm)) {
+ if (duration > desc->discharging_max_duration_ms) {
dev_info(cm->dev, "Discharging duration exceed %ums\n",
desc->discharging_max_duration_ms);
- uevent_notify(cm, "Recharging");
- try_charger_enable(cm, true);
ret = true;
}
}
@@ -657,14 +549,53 @@ static int cm_check_thermal_status(struct charger_manager *cm)
}
if (temp > upper_limit)
- ret = CM_EVENT_BATT_OVERHEAT;
+ ret = CM_BATT_OVERHEAT;
else if (temp < lower_limit)
- ret = CM_EVENT_BATT_COLD;
+ ret = CM_BATT_COLD;
+ else
+ ret = CM_BATT_OK;
+
+ cm->emergency_stop = ret;
return ret;
}
/**
+ * cm_get_target_status - Check current status and get next target status.
+ * @cm: the Charger Manager representing the battery.
+ */
+static int cm_get_target_status(struct charger_manager *cm)
+{
+ if (!is_ext_pwr_online(cm))
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ if (cm_check_thermal_status(cm)) {
+ /* Check if discharging duration exeeds limit. */
+ if (check_charging_duration(cm))
+ goto charging_ok;
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+ }
+
+ switch (cm->battery_status) {
+ case POWER_SUPPLY_STATUS_CHARGING:
+ /* Check if charging duration exeeds limit. */
+ if (check_charging_duration(cm))
+ return POWER_SUPPLY_STATUS_FULL;
+ fallthrough;
+ case POWER_SUPPLY_STATUS_FULL:
+ if (is_full_charged(cm))
+ return POWER_SUPPLY_STATUS_FULL;
+ fallthrough;
+ default:
+ break;
+ }
+
+charging_ok:
+ /* Charging is allowed. */
+ return POWER_SUPPLY_STATUS_CHARGING;
+}
+
+/**
* _cm_monitor - Monitor the temperature and return true for exceptions.
* @cm: the Charger Manager representing the battery.
*
@@ -673,60 +604,18 @@ static int cm_check_thermal_status(struct charger_manager *cm)
*/
static bool _cm_monitor(struct charger_manager *cm)
{
- int temp_alrt;
-
- temp_alrt = cm_check_thermal_status(cm);
-
- /* It has been stopped already */
- if (temp_alrt && cm->emergency_stop)
- return false;
-
- /*
- * Check temperature whether overheat or cold.
- * If temperature is out of range normal state, stop charging.
- */
- if (temp_alrt) {
- cm->emergency_stop = temp_alrt;
- if (!try_charger_enable(cm, false))
- uevent_notify(cm, default_event_names[temp_alrt]);
-
- /*
- * Check whole charging duration and discharging duration
- * after full-batt.
- */
- } else if (!cm->emergency_stop && check_charging_duration(cm)) {
- dev_dbg(cm->dev,
- "Charging/Discharging duration is out of range\n");
- /*
- * Check dropped voltage of battery. If battery voltage is more
- * dropped than fullbatt_vchkdrop_uV after fully charged state,
- * charger-manager have to recharge battery.
- */
- } else if (!cm->emergency_stop && is_ext_pwr_online(cm) &&
- !cm->charger_enabled) {
- fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+ int target;
- /*
- * Check whether fully charged state to protect overcharge
- * if charger-manager is charging for battery.
- */
- } else if (!cm->emergency_stop && is_full_charged(cm) &&
- cm->charger_enabled) {
- dev_info(cm->dev, "EVENT_HANDLE: Battery Fully Charged\n");
- uevent_notify(cm, default_event_names[CM_EVENT_BATT_FULL]);
+ target = cm_get_target_status(cm);
- try_charger_enable(cm, false);
+ try_charger_enable(cm, (target == POWER_SUPPLY_STATUS_CHARGING));
- fullbatt_vchk(&cm->fullbatt_vchk_work.work);
- } else {
- cm->emergency_stop = 0;
- if (is_ext_pwr_online(cm)) {
- if (!try_charger_enable(cm, true))
- uevent_notify(cm, "CHARGING");
- }
+ if (cm->battery_status != target) {
+ cm->battery_status = target;
+ power_supply_changed(cm->charger_psy);
}
- return true;
+ return (cm->battery_status == POWER_SUPPLY_STATUS_NOT_CHARGING);
}
/**
@@ -819,66 +708,6 @@ static void cm_monitor_poller(struct work_struct *work)
schedule_work(&setup_polling);
}
-/**
- * fullbatt_handler - Event handler for CM_EVENT_BATT_FULL
- * @cm: the Charger Manager representing the battery.
- */
-static void fullbatt_handler(struct charger_manager *cm)
-{
- struct charger_desc *desc = cm->desc;
-
- if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
- goto out;
-
- if (cm_suspended)
- device_set_wakeup_capable(cm->dev, true);
-
- mod_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
- msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
- cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
- desc->fullbatt_vchkdrop_ms);
-
- if (cm->fullbatt_vchk_jiffies_at == 0)
- cm->fullbatt_vchk_jiffies_at = 1;
-
-out:
- dev_info(cm->dev, "EVENT_HANDLE: Battery Fully Charged\n");
- uevent_notify(cm, default_event_names[CM_EVENT_BATT_FULL]);
-}
-
-/**
- * battout_handler - Event handler for CM_EVENT_BATT_OUT
- * @cm: the Charger Manager representing the battery.
- */
-static void battout_handler(struct charger_manager *cm)
-{
- if (cm_suspended)
- device_set_wakeup_capable(cm->dev, true);
-
- if (!is_batt_present(cm)) {
- dev_emerg(cm->dev, "Battery Pulled Out!\n");
- uevent_notify(cm, default_event_names[CM_EVENT_BATT_OUT]);
- } else {
- uevent_notify(cm, "Battery Reinserted?");
- }
-}
-
-/**
- * misc_event_handler - Handler for other events
- * @cm: the Charger Manager representing the battery.
- * @type: the Charger Manager representing the battery.
- */
-static void misc_event_handler(struct charger_manager *cm,
- enum cm_event_types type)
-{
- if (cm_suspended)
- device_set_wakeup_capable(cm->dev, true);
-
- if (is_polling_required(cm) && cm->desc->polling_interval_ms)
- schedule_work(&setup_polling);
- uevent_notify(cm, default_event_names[type]);
-}
-
static int charger_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -891,12 +720,7 @@ static int charger_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (is_charging(cm))
- val->intval = POWER_SUPPLY_STATUS_CHARGING;
- else if (is_ext_pwr_online(cm))
- val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
- else
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ val->intval = cm->battery_status;
break;
case POWER_SUPPLY_PROP_HEALTH:
if (cm->emergency_stop > 0)
@@ -925,7 +749,6 @@ static int charger_get_property(struct power_supply *psy,
POWER_SUPPLY_PROP_CURRENT_NOW, val);
break;
case POWER_SUPPLY_PROP_TEMP:
- case POWER_SUPPLY_PROP_TEMP_AMBIENT:
return cm_get_battery_temperature(cm, &val->intval);
case POWER_SUPPLY_PROP_CAPACITY:
if (!is_batt_present(cm)) {
@@ -981,35 +804,13 @@ static int charger_get_property(struct power_supply *psy,
val->intval = 0;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
- if (is_full_charged(cm))
- val->intval = 1;
- else
- val->intval = 0;
- ret = 0;
- break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
- if (is_charging(cm)) {
- fuel_gauge = power_supply_get_by_name(
- cm->desc->psy_fuel_gauge);
- if (!fuel_gauge) {
- ret = -ENODEV;
- break;
- }
-
- ret = power_supply_get_property(fuel_gauge,
- POWER_SUPPLY_PROP_CHARGE_NOW,
- val);
- if (ret) {
- val->intval = 1;
- ret = 0;
- } else {
- /* If CHARGE_NOW is supplied, use it */
- val->intval = (val->intval > 0) ?
- val->intval : 1;
- }
- } else {
- val->intval = 0;
+ fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+ if (!fuel_gauge) {
+ ret = -ENODEV;
+ break;
}
+ ret = power_supply_get_property(fuel_gauge, psp, val);
break;
default:
return -EINVAL;
@@ -1028,13 +829,12 @@ static enum power_supply_property default_charger_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_CHARGE_FULL,
/*
* Optional properties are:
+ * POWER_SUPPLY_PROP_CHARGE_FULL,
* POWER_SUPPLY_PROP_CHARGE_NOW,
* POWER_SUPPLY_PROP_CURRENT_NOW,
- * POWER_SUPPLY_PROP_TEMP, and
- * POWER_SUPPLY_PROP_TEMP_AMBIENT,
+ * POWER_SUPPLY_PROP_TEMP,
*/
};
@@ -1069,21 +869,6 @@ static bool cm_setup_timer(void)
mutex_lock(&cm_list_mtx);
list_for_each_entry(cm, &cm_list, entry) {
- unsigned int fbchk_ms = 0;
-
- /* fullbatt_vchk is required. setup timer for that */
- if (cm->fullbatt_vchk_jiffies_at) {
- fbchk_ms = jiffies_to_msecs(cm->fullbatt_vchk_jiffies_at
- - jiffies);
- if (time_is_before_eq_jiffies(
- cm->fullbatt_vchk_jiffies_at) ||
- msecs_to_jiffies(fbchk_ms) < CM_JIFFIES_SMALL) {
- fullbatt_vchk(&cm->fullbatt_vchk_work.work);
- fbchk_ms = 0;
- }
- }
- CM_MIN_VALID(wakeup_ms, fbchk_ms);
-
/* Skip if polling is not required for this CM */
if (!is_polling_required(cm) && !cm->emergency_stop)
continue;
@@ -1145,7 +930,8 @@ static void charger_extcon_work(struct work_struct *work)
cable->min_uA, cable->max_uA);
}
- try_charger_enable(cable->cm, cable->attached);
+ cancel_delayed_work(&cm_monitor_work);
+ queue_delayed_work(cm_wq, &cm_monitor_work, 0);
}
/**
@@ -1169,15 +955,6 @@ static int charger_extcon_notifier(struct notifier_block *self,
cable->attached = event;
/*
- * Setup monitoring to check battery state
- * when charger cable is attached.
- */
- if (cable->attached && is_polling_required(cable->cm)) {
- cancel_work_sync(&setup_polling);
- schedule_work(&setup_polling);
- }
-
- /*
* Setup work for controlling charger(regulator)
* according to charger cable.
*/
@@ -1196,7 +973,8 @@ static int charger_extcon_notifier(struct notifier_block *self,
static int charger_extcon_init(struct charger_manager *cm,
struct charger_cable *cable)
{
- int ret;
+ int ret, i;
+ u64 extcon_type = EXTCON_NONE;
/*
* Charger manager use Extcon framework to identify
@@ -1205,14 +983,39 @@ static int charger_extcon_init(struct charger_manager *cm,
*/
INIT_WORK(&cable->wq, charger_extcon_work);
cable->nb.notifier_call = charger_extcon_notifier;
- ret = extcon_register_interest(&cable->extcon_dev,
- cable->extcon_name, cable->name, &cable->nb);
+
+ cable->extcon_dev = extcon_get_extcon_dev(cable->extcon_name);
+ if (IS_ERR_OR_NULL(cable->extcon_dev)) {
+ pr_err("Cannot find extcon_dev for %s (cable: %s)\n",
+ cable->extcon_name, cable->name);
+ if (cable->extcon_dev == NULL)
+ return -EPROBE_DEFER;
+ else
+ return PTR_ERR(cable->extcon_dev);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(extcon_mapping); i++) {
+ if (!strcmp(cable->name, extcon_mapping[i].name)) {
+ extcon_type = extcon_mapping[i].extcon_type;
+ break;
+ }
+ }
+ if (extcon_type == EXTCON_NONE) {
+ pr_err("Cannot find cable for type %s", cable->name);
+ return -EINVAL;
+ }
+
+ cable->extcon_type = extcon_type;
+
+ ret = devm_extcon_register_notifier(cm->dev, cable->extcon_dev,
+ cable->extcon_type, &cable->nb);
if (ret < 0) {
- pr_info("Cannot register extcon_dev for %s(cable: %s)\n",
+ pr_err("Cannot register extcon_dev for %s (cable: %s)\n",
cable->extcon_name, cable->name);
+ return ret;
}
- return ret;
+ return 0;
}
/**
@@ -1229,6 +1032,7 @@ static int charger_manager_register_extcon(struct charger_manager *cm)
{
struct charger_desc *desc = cm->desc;
struct charger_regulator *charger;
+ unsigned long event;
int ret;
int i;
int j;
@@ -1256,6 +1060,11 @@ static int charger_manager_register_extcon(struct charger_manager *cm)
}
cable->charger = charger;
cable->cm = cm;
+
+ event = extcon_get_state(cable->extcon_dev,
+ cable->extcon_type);
+ charger_extcon_notifier(&cable->nb,
+ event, NULL);
}
}
@@ -1447,7 +1256,7 @@ static int cm_init_thermal_data(struct charger_manager *cm,
return PTR_ERR(cm->tzd_batt);
/* Use external thermometer */
- properties[*num_properties] = POWER_SUPPLY_PROP_TEMP_AMBIENT;
+ properties[*num_properties] = POWER_SUPPLY_PROP_TEMP;
(*num_properties)++;
cm->desc->measure_battery_temp = true;
ret = 0;
@@ -1491,8 +1300,6 @@ static struct charger_desc *of_cm_parse_desc(struct device *dev)
of_property_read_u32(np, "cm-poll-interval",
&desc->polling_interval_ms);
- of_property_read_u32(np, "cm-fullbatt-vchkdrop-ms",
- &desc->fullbatt_vchkdrop_ms);
of_property_read_u32(np, "cm-fullbatt-vchkdrop-volt",
&desc->fullbatt_vchkdrop_uV);
of_property_read_u32(np, "cm-fullbatt-voltage", &desc->fullbatt_uV);
@@ -1504,8 +1311,8 @@ static struct charger_desc *of_cm_parse_desc(struct device *dev)
desc->battery_present = battery_stat;
/* chargers */
- of_property_read_u32(np, "cm-num-chargers", &num_chgs);
- if (num_chgs) {
+ num_chgs = of_property_count_strings(np, "cm-chargers");
+ if (num_chgs > 0) {
int i;
/* Allocate empty bin at the tail of array */
@@ -1618,7 +1425,6 @@ static int charger_manager_probe(struct platform_device *pdev)
struct charger_desc *desc = cm_get_drv_data(pdev);
struct charger_manager *cm;
int ret, i = 0;
- int j = 0;
union power_supply_propval val;
struct power_supply *fuel_gauge;
enum power_supply_property *properties;
@@ -1654,9 +1460,8 @@ static int charger_manager_probe(struct platform_device *pdev)
if (desc->fullbatt_uV == 0) {
dev_info(&pdev->dev, "Ignoring full-battery voltage threshold as it is not supplied\n");
}
- if (!desc->fullbatt_vchkdrop_ms || !desc->fullbatt_vchkdrop_uV) {
+ if (!desc->fullbatt_vchkdrop_uV) {
dev_info(&pdev->dev, "Disabling full-battery voltage drop checking mechanism as it is not supplied\n");
- desc->fullbatt_vchkdrop_ms = 0;
desc->fullbatt_vchkdrop_uV = 0;
}
if (desc->fullbatt_soc == 0) {
@@ -1739,6 +1544,12 @@ static int charger_manager_probe(struct platform_device *pdev)
return -ENODEV;
}
if (!power_supply_get_property(fuel_gauge,
+ POWER_SUPPLY_PROP_CHARGE_FULL, &val)) {
+ properties[num_properties] =
+ POWER_SUPPLY_PROP_CHARGE_FULL;
+ num_properties++;
+ }
+ if (!power_supply_get_property(fuel_gauge,
POWER_SUPPLY_PROP_CHARGE_NOW, &val)) {
properties[num_properties] =
POWER_SUPPLY_PROP_CHARGE_NOW;
@@ -1762,8 +1573,6 @@ static int charger_manager_probe(struct platform_device *pdev)
cm->charger_psy_desc.properties = properties;
cm->charger_psy_desc.num_properties = num_properties;
- INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk);
-
/* Register sysfs entry for charger(regulator) */
ret = charger_manager_prepare_sysfs(cm);
if (ret < 0) {
@@ -1813,19 +1622,8 @@ static int charger_manager_probe(struct platform_device *pdev)
return 0;
err_reg_extcon:
- for (i = 0; i < desc->num_charger_regulators; i++) {
- struct charger_regulator *charger;
-
- charger = &desc->charger_regulators[i];
- for (j = 0; j < charger->num_cables; j++) {
- struct charger_cable *cable = &charger->cables[j];
- /* Remove notifier block if only edev exists */
- if (cable->extcon_dev.edev)
- extcon_unregister_interest(&cable->extcon_dev);
- }
-
+ for (i = 0; i < desc->num_charger_regulators; i++)
regulator_put(desc->charger_regulators[i].consumer);
- }
power_supply_unregister(cm->charger_psy);
@@ -1837,7 +1635,6 @@ static int charger_manager_remove(struct platform_device *pdev)
struct charger_manager *cm = platform_get_drvdata(pdev);
struct charger_desc *desc = cm->desc;
int i = 0;
- int j = 0;
/* Remove from the list */
mutex_lock(&cm_list_mtx);
@@ -1847,15 +1644,6 @@ static int charger_manager_remove(struct platform_device *pdev)
cancel_work_sync(&setup_polling);
cancel_delayed_work_sync(&cm_monitor_work);
- for (i = 0 ; i < desc->num_charger_regulators ; i++) {
- struct charger_regulator *charger
- = &desc->charger_regulators[i];
- for (j = 0 ; j < charger->num_cables ; j++) {
- struct charger_cable *cable = &charger->cables[j];
- extcon_unregister_interest(&cable->extcon_dev);
- }
- }
-
for (i = 0 ; i < desc->num_charger_regulators ; i++)
regulator_put(desc->charger_regulators[i].consumer);
@@ -1903,8 +1691,6 @@ static bool cm_need_to_awake(void)
static int cm_suspend_prepare(struct device *dev)
{
- struct charger_manager *cm = dev_get_drvdata(dev);
-
if (cm_need_to_awake())
return -EBUSY;
@@ -1916,7 +1702,6 @@ static int cm_suspend_prepare(struct device *dev)
if (cm_timer_set) {
cancel_work_sync(&setup_polling);
cancel_delayed_work_sync(&cm_monitor_work);
- cancel_delayed_work(&cm->fullbatt_vchk_work);
}
return 0;
@@ -1941,31 +1726,6 @@ static void cm_suspend_complete(struct device *dev)
_cm_monitor(cm);
- /* Re-enqueue delayed work (fullbatt_vchk_work) */
- if (cm->fullbatt_vchk_jiffies_at) {
- unsigned long delay = 0;
- unsigned long now = jiffies + CM_JIFFIES_SMALL;
-
- if (time_after_eq(now, cm->fullbatt_vchk_jiffies_at)) {
- delay = (unsigned long)((long)now
- - (long)(cm->fullbatt_vchk_jiffies_at));
- delay = jiffies_to_msecs(delay);
- } else {
- delay = 0;
- }
-
- /*
- * Account for cm_suspend_duration_ms with assuming that
- * timer stops in suspend.
- */
- if (delay > cm_suspend_duration_ms)
- delay -= cm_suspend_duration_ms;
- else
- delay = 0;
-
- queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
- msecs_to_jiffies(delay));
- }
device_set_wakeup_capable(cm->dev, false);
}
@@ -2007,56 +1767,6 @@ static void __exit charger_manager_cleanup(void)
}
module_exit(charger_manager_cleanup);
-/**
- * cm_notify_event - charger driver notify Charger Manager of charger event
- * @psy: pointer to instance of charger's power_supply
- * @type: type of charger event
- * @msg: optional message passed to uevent_notify function
- */
-void cm_notify_event(struct power_supply *psy, enum cm_event_types type,
- char *msg)
-{
- struct charger_manager *cm;
- bool found_power_supply = false;
-
- if (psy == NULL)
- return;
-
- mutex_lock(&cm_list_mtx);
- list_for_each_entry(cm, &cm_list, entry) {
- if (match_string(cm->desc->psy_charger_stat, -1,
- psy->desc->name) >= 0) {
- found_power_supply = true;
- break;
- }
- }
- mutex_unlock(&cm_list_mtx);
-
- if (!found_power_supply)
- return;
-
- switch (type) {
- case CM_EVENT_BATT_FULL:
- fullbatt_handler(cm);
- break;
- case CM_EVENT_BATT_OUT:
- battout_handler(cm);
- break;
- case CM_EVENT_BATT_IN:
- case CM_EVENT_EXT_PWR_IN_OUT ... CM_EVENT_CHG_START_STOP:
- misc_event_handler(cm, type);
- break;
- case CM_EVENT_UNKNOWN:
- case CM_EVENT_OTHERS:
- uevent_notify(cm, msg ? msg : default_event_names[type]);
- break;
- default:
- dev_err(cm->dev, "%s: type not specified\n", __func__);
- break;
- }
-}
-EXPORT_SYMBOL_GPL(cm_notify_event);
-
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_DESCRIPTION("Charger Manager");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 90eba364664b..295611b3b15e 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -747,11 +747,8 @@ static int cpcap_battery_init_iio(struct cpcap_battery_ddata *ddata)
return 0;
out_err:
- if (error != -EPROBE_DEFER)
- dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
- error);
-
- return error;
+ return dev_err_probe(ddata->dev, error,
+ "could not initialize VBUS or ID IIO\n");
}
/* Calibrate coulomb counter */
diff --git a/drivers/power/supply/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index 11bed88a89fa..695bb6747400 100644
--- a/drivers/power/supply/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -795,7 +795,7 @@ static const struct of_device_id w1_ds2760_of_ids[] = {
};
#endif
-static struct w1_family_ops w1_ds2760_fops = {
+static const struct w1_family_ops w1_ds2760_fops = {
.add_slave = w1_ds2760_add_slave,
.remove_slave = w1_ds2760_remove_slave,
.groups = w1_ds2760_groups,
diff --git a/drivers/power/supply/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c
index db3a25404c9f..dd57a472e878 100644
--- a/drivers/power/supply/ds2780_battery.c
+++ b/drivers/power/supply/ds2780_battery.c
@@ -160,7 +160,7 @@ static int ds2780_get_voltage(struct ds2780_device_info *dev_info,
/*
* The voltage value is located in 10 bits across the voltage MSB
- * and LSB registers in two's compliment form
+ * and LSB registers in two's complement form
* Sign bit of the voltage value is in bit 7 of the voltage MSB register
* Bits 9 - 3 of the voltage value are in bits 6 - 0 of the
* voltage MSB register
@@ -188,7 +188,7 @@ static int ds2780_get_temperature(struct ds2780_device_info *dev_info,
/*
* The temperature value is located in 10 bits across the temperature
- * MSB and LSB registers in two's compliment form
+ * MSB and LSB registers in two's complement form
* Sign bit of the temperature value is in bit 7 of the temperature
* MSB register
* Bits 9 - 3 of the temperature value are in bits 6 - 0 of the
@@ -241,7 +241,7 @@ static int ds2780_get_current(struct ds2780_device_info *dev_info,
/*
* The current value is located in 16 bits across the current MSB
- * and LSB registers in two's compliment form
+ * and LSB registers in two's complement form
* Sign bit of the current value is in bit 7 of the current MSB register
* Bits 14 - 8 of the current value are in bits 6 - 0 of the current
* MSB register
diff --git a/drivers/power/supply/ds2781_battery.c b/drivers/power/supply/ds2781_battery.c
index 130cbdfc14eb..3df3c820b38c 100644
--- a/drivers/power/supply/ds2781_battery.c
+++ b/drivers/power/supply/ds2781_battery.c
@@ -168,7 +168,7 @@ static int ds2781_get_voltage(struct ds2781_device_info *dev_info,
return ret;
/*
* The voltage value is located in 10 bits across the voltage MSB
- * and LSB registers in two's compliment form
+ * and LSB registers in two's complement form
* Sign bit of the voltage value is in bit 7 of the voltage MSB register
* Bits 9 - 3 of the voltage value are in bits 6 - 0 of the
* voltage MSB register
@@ -197,7 +197,7 @@ static int ds2781_get_temperature(struct ds2781_device_info *dev_info,
return ret;
/*
* The temperature value is located in 10 bits across the temperature
- * MSB and LSB registers in two's compliment form
+ * MSB and LSB registers in two's complement form
* Sign bit of the temperature value is in bit 7 of the temperature
* MSB register
* Bits 9 - 3 of the temperature value are in bits 6 - 0 of the
@@ -242,7 +242,7 @@ static int ds2781_get_current(struct ds2781_device_info *dev_info,
/*
* The current value is located in 16 bits across the current MSB
- * and LSB registers in two's compliment form
+ * and LSB registers in two's complement form
* Sign bit of the current value is in bit 7 of the current MSB register
* Bits 14 - 8 of the current value are in bits 6 - 0 of the current
* MSB register
diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c
index c2644a9fe80f..bf1754355c9f 100644
--- a/drivers/power/supply/goldfish_battery.c
+++ b/drivers/power/supply/goldfish_battery.c
@@ -266,11 +266,13 @@ static const struct of_device_id goldfish_battery_of_match[] = {
};
MODULE_DEVICE_TABLE(of, goldfish_battery_of_match);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id goldfish_battery_acpi_match[] = {
{ "GFSH0001", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, goldfish_battery_acpi_match);
+#endif
static struct platform_driver goldfish_battery_device = {
.probe = goldfish_battery_probe,
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index 875735d50716..68212b39785b 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -5,7 +5,6 @@
*/
#include <linux/device.h>
-#include <linux/gpio.h> /* For legacy platform data */
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -18,7 +17,13 @@
#include <linux/power/gpio-charger.h>
+struct gpio_mapping {
+ u32 limit_ua;
+ u32 gpiodata;
+} __packed;
+
struct gpio_charger {
+ struct device *dev;
unsigned int irq;
unsigned int charge_status_irq;
bool wakeup_enabled;
@@ -27,6 +32,11 @@ struct gpio_charger {
struct power_supply_desc charger_desc;
struct gpio_desc *gpiod;
struct gpio_desc *charge_status;
+
+ struct gpio_descs *current_limit_gpios;
+ struct gpio_mapping *current_limit_map;
+ u32 current_limit_map_size;
+ u32 charge_current_limit;
};
static irqreturn_t gpio_charger_irq(int irq, void *devid)
@@ -43,6 +53,35 @@ static inline struct gpio_charger *psy_to_gpio_charger(struct power_supply *psy)
return power_supply_get_drvdata(psy);
}
+static int set_charge_current_limit(struct gpio_charger *gpio_charger, int val)
+{
+ struct gpio_mapping mapping;
+ int ndescs = gpio_charger->current_limit_gpios->ndescs;
+ struct gpio_desc **gpios = gpio_charger->current_limit_gpios->desc;
+ int i;
+
+ if (!gpio_charger->current_limit_map_size)
+ return -EINVAL;
+
+ for (i = 0; i < gpio_charger->current_limit_map_size; i++) {
+ if (gpio_charger->current_limit_map[i].limit_ua <= val)
+ break;
+ }
+ mapping = gpio_charger->current_limit_map[i];
+
+ for (i = 0; i < ndescs; i++) {
+ bool val = (mapping.gpiodata >> i) & 1;
+ gpiod_set_value_cansleep(gpios[ndescs-i-1], val);
+ }
+
+ gpio_charger->charge_current_limit = mapping.limit_ua;
+
+ dev_dbg(gpio_charger->dev, "set charge current limit to %d (requested: %d)\n",
+ gpio_charger->charge_current_limit, val);
+
+ return 0;
+}
+
static int gpio_charger_get_property(struct power_supply *psy,
enum power_supply_property psp, union power_supply_propval *val)
{
@@ -58,6 +97,24 @@ static int gpio_charger_get_property(struct power_supply *psy,
else
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = gpio_charger->charge_current_limit;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int gpio_charger_set_property(struct power_supply *psy,
+ enum power_supply_property psp, const union power_supply_propval *val)
+{
+ struct gpio_charger *gpio_charger = psy_to_gpio_charger(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ return set_charge_current_limit(gpio_charger, val->intval);
default:
return -EINVAL;
}
@@ -65,6 +122,19 @@ static int gpio_charger_get_property(struct power_supply *psy,
return 0;
}
+static int gpio_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static enum power_supply_type gpio_charger_get_type(struct device *dev)
{
const char *chargetype;
@@ -112,6 +182,61 @@ static int gpio_charger_get_irq(struct device *dev, void *dev_id,
return irq;
}
+static int init_charge_current_limit(struct device *dev,
+ struct gpio_charger *gpio_charger)
+{
+ int i, len;
+ u32 cur_limit = U32_MAX;
+
+ gpio_charger->current_limit_gpios = devm_gpiod_get_array_optional(dev,
+ "charge-current-limit", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio_charger->current_limit_gpios)) {
+ dev_err(dev, "error getting current-limit GPIOs\n");
+ return PTR_ERR(gpio_charger->current_limit_gpios);
+ }
+
+ if (!gpio_charger->current_limit_gpios)
+ return 0;
+
+ len = device_property_read_u32_array(dev, "charge-current-limit-mapping",
+ NULL, 0);
+ if (len < 0)
+ return len;
+
+ if (len == 0 || len % 2) {
+ dev_err(dev, "invalid charge-current-limit-mapping length\n");
+ return -EINVAL;
+ }
+
+ gpio_charger->current_limit_map = devm_kmalloc_array(dev,
+ len / 2, sizeof(*gpio_charger->current_limit_map), GFP_KERNEL);
+ if (!gpio_charger->current_limit_map)
+ return -ENOMEM;
+
+ gpio_charger->current_limit_map_size = len / 2;
+
+ len = device_property_read_u32_array(dev, "charge-current-limit-mapping",
+ (u32*) gpio_charger->current_limit_map, len);
+ if (len < 0)
+ return len;
+
+ for (i=0; i < gpio_charger->current_limit_map_size; i++) {
+ if (gpio_charger->current_limit_map[i].limit_ua > cur_limit) {
+ dev_err(dev, "charge-current-limit-mapping not sorted by current in descending order\n");
+ return -EINVAL;
+ }
+
+ cur_limit = gpio_charger->current_limit_map[i].limit_ua;
+ }
+
+ /* default to smallest current limitation for safety reasons */
+ len = gpio_charger->current_limit_map_size - 1;
+ set_charge_current_limit(gpio_charger,
+ gpio_charger->current_limit_map[len].limit_ua);
+
+ return 0;
+}
+
/*
* The entries will be overwritten by driver's probe routine depending
* on the available features. This list ensures, that the array is big
@@ -120,6 +245,7 @@ static int gpio_charger_get_irq(struct device *dev, void *dev_id,
static enum power_supply_property gpio_charger_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
};
static int gpio_charger_probe(struct platform_device *pdev)
@@ -131,7 +257,6 @@ static int gpio_charger_probe(struct platform_device *pdev)
struct power_supply_desc *charger_desc;
struct gpio_desc *charge_status;
int charge_status_irq;
- unsigned long flags;
int ret;
int num_props = 0;
@@ -143,40 +268,17 @@ static int gpio_charger_probe(struct platform_device *pdev)
gpio_charger = devm_kzalloc(dev, sizeof(*gpio_charger), GFP_KERNEL);
if (!gpio_charger)
return -ENOMEM;
+ gpio_charger->dev = dev;
/*
* This will fetch a GPIO descriptor from device tree, ACPI or
* boardfile descriptor tables. It's good to try this first.
*/
gpio_charger->gpiod = devm_gpiod_get_optional(dev, NULL, GPIOD_IN);
-
- /*
- * Fallback to legacy platform data method, if no GPIO is specified
- * using boardfile descriptor tables.
- */
- if (!gpio_charger->gpiod && pdata) {
- /* Non-DT: use legacy GPIO numbers */
- if (!gpio_is_valid(pdata->gpio)) {
- dev_err(dev, "Invalid gpio pin in pdata\n");
- return -EINVAL;
- }
- flags = GPIOF_IN;
- if (pdata->gpio_active_low)
- flags |= GPIOF_ACTIVE_LOW;
- ret = devm_gpio_request_one(dev, pdata->gpio, flags,
- dev_name(dev));
- if (ret) {
- dev_err(dev, "Failed to request gpio pin: %d\n", ret);
- return ret;
- }
- /* Then convert this to gpiod for now */
- gpio_charger->gpiod = gpio_to_desc(pdata->gpio);
- } else if (IS_ERR(gpio_charger->gpiod)) {
+ if (IS_ERR(gpio_charger->gpiod)) {
/* Just try again if this happens */
- if (PTR_ERR(gpio_charger->gpiod) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_err(dev, "error getting GPIO descriptor\n");
- return PTR_ERR(gpio_charger->gpiod);
+ return dev_err_probe(dev, PTR_ERR(gpio_charger->gpiod),
+ "error getting GPIO descriptor\n");
}
if (gpio_charger->gpiod) {
@@ -193,10 +295,22 @@ static int gpio_charger_probe(struct platform_device *pdev)
num_props++;
}
+ ret = init_charge_current_limit(dev, gpio_charger);
+ if (ret < 0)
+ return ret;
+ if (gpio_charger->current_limit_map) {
+ gpio_charger_properties[num_props] =
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX;
+ num_props++;
+ }
+
charger_desc = &gpio_charger->charger_desc;
charger_desc->properties = gpio_charger_properties;
charger_desc->num_properties = num_props;
charger_desc->get_property = gpio_charger_get_property;
+ charger_desc->set_property = gpio_charger_set_property;
+ charger_desc->property_is_writeable =
+ gpio_charger_property_is_writeable;
psy_cfg.of_node = dev->of_node;
psy_cfg.drv_data = gpio_charger;
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
index dd3d93dfe3eb..32dc77fd9a95 100644
--- a/drivers/power/supply/ingenic-battery.c
+++ b/drivers/power/supply/ingenic-battery.c
@@ -147,11 +147,9 @@ static int ingenic_battery_probe(struct platform_device *pdev)
psy_cfg.of_node = dev->of_node;
bat->battery = devm_power_supply_register(dev, desc, &psy_cfg);
- if (IS_ERR(bat->battery)) {
- if (PTR_ERR(bat->battery) != -EPROBE_DEFER)
- dev_err(dev, "Unable to register battery\n");
- return PTR_ERR(bat->battery);
- }
+ if (IS_ERR(bat->battery))
+ return dev_err_probe(dev, PTR_ERR(bat->battery),
+ "Unable to register battery\n");
ret = power_supply_get_battery_info(bat->battery, &bat->info);
if (ret) {
diff --git a/drivers/power/supply/lego_ev3_battery.c b/drivers/power/supply/lego_ev3_battery.c
index 1ae3710909b7..ccb00be38e2c 100644
--- a/drivers/power/supply/lego_ev3_battery.c
+++ b/drivers/power/supply/lego_ev3_battery.c
@@ -166,27 +166,21 @@ static int lego_ev3_battery_probe(struct platform_device *pdev)
batt->iio_v = devm_iio_channel_get(dev, "voltage");
err = PTR_ERR_OR_ZERO(batt->iio_v);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get voltage iio channel\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "Failed to get voltage iio channel\n");
batt->iio_i = devm_iio_channel_get(dev, "current");
err = PTR_ERR_OR_ZERO(batt->iio_i);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get current iio channel\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "Failed to get current iio channel\n");
batt->rechargeable_gpio = devm_gpiod_get(dev, "rechargeable", GPIOD_IN);
err = PTR_ERR_OR_ZERO(batt->rechargeable_gpio);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get rechargeable gpio\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "Failed to get rechargeable gpio\n");
/*
* The rechargeable battery indication switch cannot be changed without
diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index 30a9014b2f95..10cd617516ec 100644
--- a/drivers/power/supply/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
@@ -473,7 +473,8 @@ static int ltc294x_i2c_probe(struct i2c_client *client,
np = of_node_get(client->dev.of_node);
- info->id = (enum ltc294x_id)of_device_get_match_data(&client->dev);
+ info->id = (enum ltc294x_id) (uintptr_t) of_device_get_match_data(
+ &client->dev);
info->supply_desc.name = np->name;
/* r_sense can be negative, when sense+ is connected to the battery
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 6cb31b9a958d..d956c67d5155 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -15,196 +15,289 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/power_supply.h>
+#include <linux/of_device.h>
#include <linux/max17040_battery.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#define MAX17040_VCELL 0x02
#define MAX17040_SOC 0x04
#define MAX17040_MODE 0x06
#define MAX17040_VER 0x08
-#define MAX17040_RCOMP 0x0C
+#define MAX17040_CONFIG 0x0C
+#define MAX17040_STATUS 0x1A
#define MAX17040_CMD 0xFE
#define MAX17040_DELAY 1000
#define MAX17040_BATTERY_FULL 95
+#define MAX17040_RCOMP_DEFAULT 0x9700
-#define MAX17040_ATHD_MASK 0xFFC0
+#define MAX17040_ATHD_MASK 0x3f
+#define MAX17040_ALSC_MASK 0x40
#define MAX17040_ATHD_DEFAULT_POWER_UP 4
+#define MAX17040_STATUS_HD_MASK 0x1000
+#define MAX17040_STATUS_SC_MASK 0x2000
+#define MAX17040_CFG_RCOMP_MASK 0xff00
+
+enum chip_id {
+ ID_MAX17040,
+ ID_MAX17041,
+ ID_MAX17043,
+ ID_MAX17044,
+ ID_MAX17048,
+ ID_MAX17049,
+ ID_MAX17058,
+ ID_MAX17059,
+};
+
+/* values that differ by chip_id */
+struct chip_data {
+ u16 reset_val;
+ u16 vcell_shift;
+ u16 vcell_mul;
+ u16 vcell_div;
+ u8 has_low_soc_alert;
+ u8 rcomp_bytes;
+ u8 has_soc_alert;
+};
+
+static struct chip_data max17040_family[] = {
+ [ID_MAX17040] = {
+ .reset_val = 0x0054,
+ .vcell_shift = 4,
+ .vcell_mul = 1250,
+ .vcell_div = 1,
+ .has_low_soc_alert = 0,
+ .rcomp_bytes = 2,
+ .has_soc_alert = 0,
+ },
+ [ID_MAX17041] = {
+ .reset_val = 0x0054,
+ .vcell_shift = 4,
+ .vcell_mul = 2500,
+ .vcell_div = 1,
+ .has_low_soc_alert = 0,
+ .rcomp_bytes = 2,
+ .has_soc_alert = 0,
+ },
+ [ID_MAX17043] = {
+ .reset_val = 0x0054,
+ .vcell_shift = 4,
+ .vcell_mul = 1250,
+ .vcell_div = 1,
+ .has_low_soc_alert = 1,
+ .rcomp_bytes = 1,
+ .has_soc_alert = 0,
+ },
+ [ID_MAX17044] = {
+ .reset_val = 0x0054,
+ .vcell_shift = 4,
+ .vcell_mul = 2500,
+ .vcell_div = 1,
+ .has_low_soc_alert = 1,
+ .rcomp_bytes = 1,
+ .has_soc_alert = 0,
+ },
+ [ID_MAX17048] = {
+ .reset_val = 0x5400,
+ .vcell_shift = 0,
+ .vcell_mul = 625,
+ .vcell_div = 8,
+ .has_low_soc_alert = 1,
+ .rcomp_bytes = 1,
+ .has_soc_alert = 1,
+ },
+ [ID_MAX17049] = {
+ .reset_val = 0x5400,
+ .vcell_shift = 0,
+ .vcell_mul = 625,
+ .vcell_div = 4,
+ .has_low_soc_alert = 1,
+ .rcomp_bytes = 1,
+ .has_soc_alert = 1,
+ },
+ [ID_MAX17058] = {
+ .reset_val = 0x5400,
+ .vcell_shift = 0,
+ .vcell_mul = 625,
+ .vcell_div = 8,
+ .has_low_soc_alert = 1,
+ .rcomp_bytes = 1,
+ .has_soc_alert = 0,
+ },
+ [ID_MAX17059] = {
+ .reset_val = 0x5400,
+ .vcell_shift = 0,
+ .vcell_mul = 625,
+ .vcell_div = 4,
+ .has_low_soc_alert = 1,
+ .rcomp_bytes = 1,
+ .has_soc_alert = 0,
+ },
+};
struct max17040_chip {
struct i2c_client *client;
+ struct regmap *regmap;
struct delayed_work work;
struct power_supply *battery;
struct max17040_platform_data *pdata;
+ struct chip_data data;
- /* State Of Connect */
- int online;
- /* battery voltage */
- int vcell;
/* battery capacity */
int soc;
/* State Of Charge */
int status;
/* Low alert threshold from 32% to 1% of the State of Charge */
u32 low_soc_alert;
+ /* some devices return twice the capacity */
+ bool quirk_double_soc;
+ /* higher 8 bits for 17043+, 16 bits for 17040,41 */
+ u16 rcomp;
};
-static int max17040_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+static int max17040_reset(struct max17040_chip *chip)
{
- struct max17040_chip *chip = power_supply_get_drvdata(psy);
-
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- val->intval = chip->status;
- break;
- case POWER_SUPPLY_PROP_ONLINE:
- val->intval = chip->online;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- val->intval = chip->vcell;
- break;
- case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = chip->soc;
- break;
- case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
- val->intval = chip->low_soc_alert;
- break;
- default:
- return -EINVAL;
- }
- return 0;
+ return regmap_write(chip->regmap, MAX17040_CMD, chip->data.reset_val);
}
-static int max17040_write_reg(struct i2c_client *client, int reg, u16 value)
+static int max17040_set_low_soc_alert(struct max17040_chip *chip, u32 level)
{
- int ret;
-
- ret = i2c_smbus_write_word_swapped(client, reg, value);
-
- if (ret < 0)
- dev_err(&client->dev, "%s: err %d\n", __func__, ret);
-
- return ret;
+ level = 32 - level * (chip->quirk_double_soc ? 2 : 1);
+ return regmap_update_bits(chip->regmap, MAX17040_CONFIG,
+ MAX17040_ATHD_MASK, level);
}
-static int max17040_read_reg(struct i2c_client *client, int reg)
+static int max17040_set_soc_alert(struct max17040_chip *chip, bool enable)
{
- int ret;
-
- ret = i2c_smbus_read_word_swapped(client, reg);
-
- if (ret < 0)
- dev_err(&client->dev, "%s: err %d\n", __func__, ret);
-
- return ret;
+ return regmap_update_bits(chip->regmap, MAX17040_CONFIG,
+ MAX17040_ALSC_MASK, enable ? MAX17040_ALSC_MASK : 0);
}
-static void max17040_reset(struct i2c_client *client)
+static int max17040_set_rcomp(struct max17040_chip *chip, u16 rcomp)
{
- max17040_write_reg(client, MAX17040_CMD, 0x0054);
+ u16 mask = chip->data.rcomp_bytes == 2 ?
+ 0xffff : MAX17040_CFG_RCOMP_MASK;
+
+ return regmap_update_bits(chip->regmap, MAX17040_CONFIG, mask, rcomp);
}
-static int max17040_set_low_soc_alert(struct i2c_client *client, u32 level)
+static int max17040_raw_vcell_to_uvolts(struct max17040_chip *chip, u16 vcell)
{
- int ret;
- u16 data;
+ struct chip_data *d = &chip->data;
- level = 32 - level;
- data = max17040_read_reg(client, MAX17040_RCOMP);
- /* clear the alrt bit and set LSb 5 bits */
- data &= MAX17040_ATHD_MASK;
- data |= level;
- ret = max17040_write_reg(client, MAX17040_RCOMP, data);
-
- return ret;
+ return (vcell >> d->vcell_shift) * d->vcell_mul / d->vcell_div;
}
-static void max17040_get_vcell(struct i2c_client *client)
+
+static int max17040_get_vcell(struct max17040_chip *chip)
{
- struct max17040_chip *chip = i2c_get_clientdata(client);
- u16 vcell;
+ u32 vcell;
- vcell = max17040_read_reg(client, MAX17040_VCELL);
+ regmap_read(chip->regmap, MAX17040_VCELL, &vcell);
- chip->vcell = (vcell >> 4) * 1250;
+ return max17040_raw_vcell_to_uvolts(chip, vcell);
}
-static void max17040_get_soc(struct i2c_client *client)
+static int max17040_get_soc(struct max17040_chip *chip)
{
- struct max17040_chip *chip = i2c_get_clientdata(client);
- u16 soc;
+ u32 soc;
- soc = max17040_read_reg(client, MAX17040_SOC);
+ regmap_read(chip->regmap, MAX17040_SOC, &soc);
- chip->soc = (soc >> 8);
+ return soc >> (chip->quirk_double_soc ? 9 : 8);
}
-static void max17040_get_version(struct i2c_client *client)
+static int max17040_get_version(struct max17040_chip *chip)
{
- u16 version;
+ int ret;
+ u32 version;
- version = max17040_read_reg(client, MAX17040_VER);
+ ret = regmap_read(chip->regmap, MAX17040_VER, &version);
- dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver 0x%x\n", version);
+ return ret ? ret : version;
}
-static void max17040_get_online(struct i2c_client *client)
+static int max17040_get_online(struct max17040_chip *chip)
{
- struct max17040_chip *chip = i2c_get_clientdata(client);
-
- if (chip->pdata && chip->pdata->battery_online)
- chip->online = chip->pdata->battery_online();
- else
- chip->online = 1;
+ return chip->pdata && chip->pdata->battery_online ?
+ chip->pdata->battery_online() : 1;
}
-static void max17040_get_status(struct i2c_client *client)
+static int max17040_get_status(struct max17040_chip *chip)
{
- struct max17040_chip *chip = i2c_get_clientdata(client);
-
if (!chip->pdata || !chip->pdata->charger_online
- || !chip->pdata->charger_enable) {
- chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
- return;
- }
+ || !chip->pdata->charger_enable)
+ return POWER_SUPPLY_STATUS_UNKNOWN;
- if (chip->pdata->charger_online()) {
+ if (max17040_get_soc(chip) > MAX17040_BATTERY_FULL)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ if (chip->pdata->charger_online())
if (chip->pdata->charger_enable())
- chip->status = POWER_SUPPLY_STATUS_CHARGING;
+ return POWER_SUPPLY_STATUS_CHARGING;
else
- chip->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- } else {
- chip->status = POWER_SUPPLY_STATUS_DISCHARGING;
- }
-
- if (chip->soc > MAX17040_BATTERY_FULL)
- chip->status = POWER_SUPPLY_STATUS_FULL;
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else
+ return POWER_SUPPLY_STATUS_DISCHARGING;
}
static int max17040_get_of_data(struct max17040_chip *chip)
{
struct device *dev = &chip->client->dev;
+ struct chip_data *data = &max17040_family[
+ (uintptr_t) of_device_get_match_data(dev)];
+ int rcomp_len;
+ u8 rcomp[2];
+
+ chip->quirk_double_soc = device_property_read_bool(dev,
+ "maxim,double-soc");
chip->low_soc_alert = MAX17040_ATHD_DEFAULT_POWER_UP;
device_property_read_u32(dev,
"maxim,alert-low-soc-level",
&chip->low_soc_alert);
- if (chip->low_soc_alert <= 0 || chip->low_soc_alert >= 33)
+ if (chip->low_soc_alert <= 0 ||
+ chip->low_soc_alert > (chip->quirk_double_soc ? 16 : 32)) {
+ dev_err(dev, "maxim,alert-low-soc-level out of bounds\n");
return -EINVAL;
+ }
+
+ rcomp_len = device_property_count_u8(dev, "maxim,rcomp");
+ chip->rcomp = MAX17040_RCOMP_DEFAULT;
+ if (rcomp_len == data->rcomp_bytes) {
+ device_property_read_u8_array(dev, "maxim,rcomp",
+ rcomp, rcomp_len);
+ chip->rcomp = rcomp_len == 2 ?
+ rcomp[0] << 8 | rcomp[1] :
+ rcomp[0] << 8;
+ } else if (rcomp_len > 0) {
+ dev_err(dev, "maxim,rcomp has incorrect length\n");
+ return -EINVAL;
+ }
return 0;
}
-static void max17040_check_changes(struct i2c_client *client)
+static void max17040_check_changes(struct max17040_chip *chip)
{
- max17040_get_vcell(client);
- max17040_get_soc(client);
- max17040_get_online(client);
- max17040_get_status(client);
+ chip->soc = max17040_get_soc(chip);
+ chip->status = max17040_get_status(chip);
+}
+
+static void max17040_queue_work(struct max17040_chip *chip)
+{
+ queue_delayed_work(system_power_efficient_wq, &chip->work,
+ MAX17040_DELAY);
+}
+
+static void max17040_stop_work(void *data)
+{
+ struct max17040_chip *chip = data;
+
+ cancel_delayed_work_sync(&chip->work);
}
static void max17040_work(struct work_struct *work)
@@ -217,30 +310,51 @@ static void max17040_work(struct work_struct *work)
/* store SOC and status to check changes */
last_soc = chip->soc;
last_status = chip->status;
- max17040_check_changes(chip->client);
+ max17040_check_changes(chip);
/* check changes and send uevent */
if (last_soc != chip->soc || last_status != chip->status)
power_supply_changed(chip->battery);
- queue_delayed_work(system_power_efficient_wq, &chip->work,
- MAX17040_DELAY);
+ max17040_queue_work(chip);
+}
+
+/* Returns true if alert cause was SOC change, not low SOC */
+static bool max17040_handle_soc_alert(struct max17040_chip *chip)
+{
+ bool ret = true;
+ u32 data;
+
+ regmap_read(chip->regmap, MAX17040_STATUS, &data);
+
+ if (data & MAX17040_STATUS_HD_MASK) {
+ // this alert was caused by low soc
+ ret = false;
+ }
+ if (data & MAX17040_STATUS_SC_MASK) {
+ // soc change bit -- deassert to mark as handled
+ regmap_write(chip->regmap, MAX17040_STATUS,
+ data & ~MAX17040_STATUS_SC_MASK);
+ }
+
+ return ret;
}
static irqreturn_t max17040_thread_handler(int id, void *dev)
{
struct max17040_chip *chip = dev;
- struct i2c_client *client = chip->client;
- dev_warn(&client->dev, "IRQ: Alert battery low level");
+ if (!(chip->data.has_soc_alert && max17040_handle_soc_alert(chip)))
+ dev_warn(&chip->client->dev, "IRQ: Alert battery low level\n");
+
/* read registers */
- max17040_check_changes(chip->client);
+ max17040_check_changes(chip);
/* send uevent */
power_supply_changed(chip->battery);
/* reset alert bit */
- max17040_set_low_soc_alert(client, chip->low_soc_alert);
+ max17040_set_low_soc_alert(chip, chip->low_soc_alert);
return IRQ_HANDLED;
}
@@ -279,12 +393,13 @@ static int max17040_set_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
- /* alert threshold can be programmed from 1% up to 32% */
- if ((val->intval < 1) || (val->intval > 32)) {
+ /* alert threshold can be programmed from 1% up to 16/32% */
+ if ((val->intval < 1) ||
+ (val->intval > (chip->quirk_double_soc ? 16 : 32))) {
ret = -EINVAL;
break;
}
- ret = max17040_set_low_soc_alert(chip->client, val->intval);
+ ret = max17040_set_low_soc_alert(chip, val->intval);
chip->low_soc_alert = val->intval;
break;
default:
@@ -294,6 +409,41 @@ static int max17040_set_property(struct power_supply *psy,
return ret;
}
+static int max17040_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max17040_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = max17040_get_status(chip);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = max17040_get_online(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = max17040_get_vcell(chip);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = max17040_get_soc(chip);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
+ val->intval = chip->low_soc_alert;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct regmap_config max17040_regmap = {
+ .reg_bits = 8,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
static enum power_supply_property max17040_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
@@ -318,6 +468,8 @@ static int max17040_probe(struct i2c_client *client,
struct i2c_adapter *adapter = client->adapter;
struct power_supply_config psy_cfg = {};
struct max17040_chip *chip;
+ enum chip_id chip_id;
+ bool enable_irq = false;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
@@ -328,37 +480,68 @@ static int max17040_probe(struct i2c_client *client,
return -ENOMEM;
chip->client = client;
+ chip->regmap = devm_regmap_init_i2c(client, &max17040_regmap);
chip->pdata = client->dev.platform_data;
- ret = max17040_get_of_data(chip);
- if (ret) {
- dev_err(&client->dev,
- "failed: low SOC alert OF data out of bounds\n");
- return ret;
+ chip_id = (enum chip_id) id->driver_data;
+ if (client->dev.of_node) {
+ ret = max17040_get_of_data(chip);
+ if (ret)
+ return ret;
+ chip_id = (enum chip_id) (uintptr_t)
+ of_device_get_match_data(&client->dev);
}
+ chip->data = max17040_family[chip_id];
i2c_set_clientdata(client, chip);
psy_cfg.drv_data = chip;
- chip->battery = power_supply_register(&client->dev,
+ chip->battery = devm_power_supply_register(&client->dev,
&max17040_battery_desc, &psy_cfg);
if (IS_ERR(chip->battery)) {
dev_err(&client->dev, "failed: power supply register\n");
return PTR_ERR(chip->battery);
}
- max17040_reset(client);
- max17040_get_version(client);
+ ret = max17040_get_version(chip);
+ if (ret < 0)
+ return ret;
+ dev_dbg(&chip->client->dev, "MAX17040 Fuel-Gauge Ver 0x%x\n", ret);
+
+ if (chip_id == ID_MAX17040 || chip_id == ID_MAX17041)
+ max17040_reset(chip);
+
+ max17040_set_rcomp(chip, chip->rcomp);
/* check interrupt */
- if (client->irq && of_device_is_compatible(client->dev.of_node,
- "maxim,max77836-battery")) {
- ret = max17040_set_low_soc_alert(client, chip->low_soc_alert);
+ if (client->irq && chip->data.has_low_soc_alert) {
+ ret = max17040_set_low_soc_alert(chip, chip->low_soc_alert);
if (ret) {
dev_err(&client->dev,
"Failed to set low SOC alert: err %d\n", ret);
return ret;
}
+ enable_irq = true;
+ }
+
+ if (client->irq && chip->data.has_soc_alert) {
+ ret = max17040_set_soc_alert(chip, 1);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to set SOC alert: err %d\n", ret);
+ return ret;
+ }
+ enable_irq = true;
+ } else {
+ /* soc alerts negate the need for polling */
+ INIT_DEFERRABLE_WORK(&chip->work, max17040_work);
+ ret = devm_add_action(&client->dev, max17040_stop_work, chip);
+ if (ret)
+ return ret;
+ max17040_queue_work(chip);
+ }
+
+ if (enable_irq) {
ret = max17040_enable_alert_irq(chip);
if (ret) {
client->irq = 0;
@@ -367,19 +550,6 @@ static int max17040_probe(struct i2c_client *client,
}
}
- INIT_DEFERRABLE_WORK(&chip->work, max17040_work);
- queue_delayed_work(system_power_efficient_wq, &chip->work,
- MAX17040_DELAY);
-
- return 0;
-}
-
-static int max17040_remove(struct i2c_client *client)
-{
- struct max17040_chip *chip = i2c_get_clientdata(client);
-
- power_supply_unregister(chip->battery);
- cancel_delayed_work(&chip->work);
return 0;
}
@@ -390,7 +560,11 @@ static int max17040_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct max17040_chip *chip = i2c_get_clientdata(client);
- cancel_delayed_work(&chip->work);
+ if (client->irq && chip->data.has_soc_alert)
+ // disable soc alert to prevent wakeup
+ max17040_set_soc_alert(chip, 0);
+ else
+ cancel_delayed_work(&chip->work);
if (client->irq && device_may_wakeup(dev))
enable_irq_wake(client->irq);
@@ -403,12 +577,14 @@ static int max17040_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct max17040_chip *chip = i2c_get_clientdata(client);
- queue_delayed_work(system_power_efficient_wq, &chip->work,
- MAX17040_DELAY);
-
if (client->irq && device_may_wakeup(dev))
disable_irq_wake(client->irq);
+ if (client->irq && chip->data.has_soc_alert)
+ max17040_set_soc_alert(chip, 1);
+ else
+ max17040_queue_work(chip);
+
return 0;
}
@@ -422,16 +598,30 @@ static SIMPLE_DEV_PM_OPS(max17040_pm_ops, max17040_suspend, max17040_resume);
#endif /* CONFIG_PM_SLEEP */
static const struct i2c_device_id max17040_id[] = {
- { "max17040" },
- { "max77836-battery" },
- { }
+ { "max17040", ID_MAX17040 },
+ { "max17041", ID_MAX17041 },
+ { "max17043", ID_MAX17043 },
+ { "max77836-battery", ID_MAX17043 },
+ { "max17044", ID_MAX17044 },
+ { "max17048", ID_MAX17048 },
+ { "max17049", ID_MAX17049 },
+ { "max17058", ID_MAX17058 },
+ { "max17059", ID_MAX17059 },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, max17040_id);
static const struct of_device_id max17040_of_match[] = {
- { .compatible = "maxim,max17040" },
- { .compatible = "maxim,max77836-battery" },
- { },
+ { .compatible = "maxim,max17040", .data = (void *) ID_MAX17040 },
+ { .compatible = "maxim,max17041", .data = (void *) ID_MAX17041 },
+ { .compatible = "maxim,max17043", .data = (void *) ID_MAX17043 },
+ { .compatible = "maxim,max77836-battery", .data = (void *) ID_MAX17043 },
+ { .compatible = "maxim,max17044", .data = (void *) ID_MAX17044 },
+ { .compatible = "maxim,max17048", .data = (void *) ID_MAX17048 },
+ { .compatible = "maxim,max17049", .data = (void *) ID_MAX17049 },
+ { .compatible = "maxim,max17058", .data = (void *) ID_MAX17058 },
+ { .compatible = "maxim,max17059", .data = (void *) ID_MAX17059 },
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, max17040_of_match);
@@ -442,7 +632,6 @@ static struct i2c_driver max17040_i2c_driver = {
.pm = MAX17040_PM_OPS,
},
.probe = max17040_probe,
- .remove = max17040_remove,
.id_table = max17040_id,
};
module_i2c_driver(max17040_i2c_driver);
diff --git a/drivers/power/supply/max1721x_battery.c b/drivers/power/supply/max1721x_battery.c
index 9ca895b0dabb..1b1a36f8e929 100644
--- a/drivers/power/supply/max1721x_battery.c
+++ b/drivers/power/supply/max1721x_battery.c
@@ -431,7 +431,7 @@ static int devm_w1_max1721x_add_device(struct w1_slave *sl)
return 0;
}
-static struct w1_family_ops w1_max1721x_fops = {
+static const struct w1_family_ops w1_max1721x_fops = {
.add_slave = devm_w1_max1721x_add_device,
};
diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
index 17749fc90e16..2df6a2459d1f 100644
--- a/drivers/power/supply/pm2301_charger.c
+++ b/drivers/power/supply/pm2301_charger.c
@@ -104,11 +104,6 @@ static int pm2xxx_charger_current_map[] = {
3000,
};
-static const struct i2c_device_id pm2xxx_ident[] = {
- { "pm2301", 0 },
- { }
-};
-
static void set_lpn_pin(struct pm2xxx_charger *pm2)
{
if (!pm2->ac.charger_connected && gpio_is_valid(pm2->lpn_pin)) {
@@ -396,7 +391,7 @@ static int pm2_int_reg3(void *pm2_data, int val)
if (val & (PM2XXX_INT4_ITCHARGINGON)) {
dev_dbg(pm2->dev ,
- "chargind operation has started\n");
+ "charging operation has started\n");
}
if (val & (PM2XXX_INT4_ITVRESUME)) {
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index ccbad435ed12..38e3aa642131 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -579,6 +579,12 @@ int power_supply_get_battery_info(struct power_supply *psy,
info->charge_term_current_ua = -EINVAL;
info->constant_charge_current_max_ua = -EINVAL;
info->constant_charge_voltage_max_uv = -EINVAL;
+ info->temp_ambient_alert_min = INT_MIN;
+ info->temp_ambient_alert_max = INT_MAX;
+ info->temp_alert_min = INT_MIN;
+ info->temp_alert_max = INT_MAX;
+ info->temp_min = INT_MIN;
+ info->temp_max = INT_MAX;
info->factory_internal_resistance_uohm = -EINVAL;
info->resist_table = NULL;
@@ -639,6 +645,19 @@ int power_supply_get_battery_info(struct power_supply *psy,
of_property_read_u32(battery_np, "factory-internal-resistance-micro-ohms",
&info->factory_internal_resistance_uohm);
+ of_property_read_u32_index(battery_np, "ambient-celsius",
+ 0, &info->temp_ambient_alert_min);
+ of_property_read_u32_index(battery_np, "ambient-celsius",
+ 1, &info->temp_ambient_alert_max);
+ of_property_read_u32_index(battery_np, "alert-celsius",
+ 0, &info->temp_alert_min);
+ of_property_read_u32_index(battery_np, "alert-celsius",
+ 1, &info->temp_alert_max);
+ of_property_read_u32_index(battery_np, "operating-range-celsius",
+ 0, &info->temp_min);
+ of_property_read_u32_index(battery_np, "operating-range-celsius",
+ 1, &info->temp_max);
+
len = of_property_count_u32_elems(battery_np, "ocv-capacity-celsius");
if (len < 0 && len != -EINVAL) {
err = len;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 3d383086018c..a616b9d8f43c 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -56,6 +56,7 @@ static const char * const POWER_SUPPLY_TYPE_TEXT[] = {
[POWER_SUPPLY_TYPE_USB_PD] = "USB_PD",
[POWER_SUPPLY_TYPE_USB_PD_DRP] = "USB_PD_DRP",
[POWER_SUPPLY_TYPE_APPLE_BRICK_ID] = "BrickID",
+ [POWER_SUPPLY_TYPE_WIRELESS] = "Wireless",
};
static const char * const POWER_SUPPLY_USB_TYPE_TEXT[] = {
diff --git a/drivers/power/supply/rn5t618_power.c b/drivers/power/supply/rn5t618_power.c
new file mode 100644
index 000000000000..dee520f0fdf5
--- /dev/null
+++ b/drivers/power/supply/rn5t618_power.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Power supply driver for the RICOH RN5T618 power management chip family
+ *
+ * Copyright (C) 2020 Andreas Kemnade
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mfd/rn5t618.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define CHG_STATE_ADP_INPUT 0x40
+#define CHG_STATE_USB_INPUT 0x80
+#define CHG_STATE_MASK 0x1f
+#define CHG_STATE_CHG_OFF 0
+#define CHG_STATE_CHG_READY_VADP 1
+#define CHG_STATE_CHG_TRICKLE 2
+#define CHG_STATE_CHG_RAPID 3
+#define CHG_STATE_CHG_COMPLETE 4
+#define CHG_STATE_SUSPEND 5
+#define CHG_STATE_VCHG_OVER_VOL 6
+#define CHG_STATE_BAT_ERROR 7
+#define CHG_STATE_NO_BAT 8
+#define CHG_STATE_BAT_OVER_VOL 9
+#define CHG_STATE_BAT_TEMP_ERR 10
+#define CHG_STATE_DIE_ERR 11
+#define CHG_STATE_DIE_SHUTDOWN 12
+#define CHG_STATE_NO_BAT2 13
+#define CHG_STATE_CHG_READY_VUSB 14
+
+#define FG_ENABLE 1
+
+struct rn5t618_power_info {
+ struct rn5t618 *rn5t618;
+ struct platform_device *pdev;
+ struct power_supply *battery;
+ struct power_supply *usb;
+ struct power_supply *adp;
+ int irq;
+};
+
+static enum power_supply_property rn5t618_usb_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static enum power_supply_property rn5t618_adp_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+
+static enum power_supply_property rn5t618_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+};
+
+static int rn5t618_battery_read_doublereg(struct rn5t618_power_info *info,
+ u8 reg, u16 *result)
+{
+ int ret, i;
+ u8 data[2];
+ u16 old, new;
+
+ old = 0;
+ /* Prevent races when registers are changing. */
+ for (i = 0; i < 3; i++) {
+ ret = regmap_bulk_read(info->rn5t618->regmap,
+ reg, data, sizeof(data));
+ if (ret)
+ return ret;
+
+ new = data[0] << 8;
+ new |= data[1];
+ if (new == old)
+ break;
+
+ old = new;
+ }
+
+ *result = new;
+
+ return 0;
+}
+
+static int rn5t618_decode_status(unsigned int status)
+{
+ switch (status & CHG_STATE_MASK) {
+ case CHG_STATE_CHG_OFF:
+ case CHG_STATE_SUSPEND:
+ case CHG_STATE_VCHG_OVER_VOL:
+ case CHG_STATE_DIE_SHUTDOWN:
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ case CHG_STATE_CHG_TRICKLE:
+ case CHG_STATE_CHG_RAPID:
+ return POWER_SUPPLY_STATUS_CHARGING;
+
+ case CHG_STATE_CHG_COMPLETE:
+ return POWER_SUPPLY_STATUS_FULL;
+
+ default:
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+ }
+}
+
+static int rn5t618_battery_status(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int v;
+ int ret;
+
+ ret = regmap_read(info->rn5t618->regmap, RN5T618_CHGSTATE, &v);
+ if (ret)
+ return ret;
+
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+
+ if (v & 0xc0) { /* USB or ADP plugged */
+ val->intval = rn5t618_decode_status(v);
+ } else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+
+ return ret;
+}
+
+static int rn5t618_battery_present(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int v;
+ int ret;
+
+ ret = regmap_read(info->rn5t618->regmap, RN5T618_CHGSTATE, &v);
+ if (ret)
+ return ret;
+
+ v &= CHG_STATE_MASK;
+ if ((v == CHG_STATE_NO_BAT) || (v == CHG_STATE_NO_BAT2))
+ val->intval = 0;
+ else
+ val->intval = 1;
+
+ return ret;
+}
+
+static int rn5t618_battery_voltage_now(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ u16 res;
+ int ret;
+
+ ret = rn5t618_battery_read_doublereg(info, RN5T618_VOLTAGE_1, &res);
+ if (ret)
+ return ret;
+
+ val->intval = res * 2 * 2500 / 4095 * 1000;
+
+ return 0;
+}
+
+static int rn5t618_battery_current_now(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ u16 res;
+ int ret;
+
+ ret = rn5t618_battery_read_doublereg(info, RN5T618_CC_AVEREG1, &res);
+ if (ret)
+ return ret;
+
+ /* current is negative when discharging */
+ val->intval = sign_extend32(res, 13) * 1000;
+
+ return 0;
+}
+
+static int rn5t618_battery_capacity(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int v;
+ int ret;
+
+ ret = regmap_read(info->rn5t618->regmap, RN5T618_SOC, &v);
+ if (ret)
+ return ret;
+
+ val->intval = v;
+
+ return 0;
+}
+
+static int rn5t618_battery_temp(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ u16 res;
+ int ret;
+
+ ret = rn5t618_battery_read_doublereg(info, RN5T618_TEMP_1, &res);
+ if (ret)
+ return ret;
+
+ val->intval = sign_extend32(res, 11) * 10 / 16;
+
+ return 0;
+}
+
+static int rn5t618_battery_tte(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ u16 res;
+ int ret;
+
+ ret = rn5t618_battery_read_doublereg(info, RN5T618_TT_EMPTY_H, &res);
+ if (ret)
+ return ret;
+
+ if (res == 65535)
+ return -ENODATA;
+
+ val->intval = res * 60;
+
+ return 0;
+}
+
+static int rn5t618_battery_ttf(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ u16 res;
+ int ret;
+
+ ret = rn5t618_battery_read_doublereg(info, RN5T618_TT_FULL_H, &res);
+ if (ret)
+ return ret;
+
+ if (res == 65535)
+ return -ENODATA;
+
+ val->intval = res * 60;
+
+ return 0;
+}
+
+static int rn5t618_battery_charge_full(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ u16 res;
+ int ret;
+
+ ret = rn5t618_battery_read_doublereg(info, RN5T618_FA_CAP_H, &res);
+ if (ret)
+ return ret;
+
+ val->intval = res * 1000;
+
+ return 0;
+}
+
+static int rn5t618_battery_charge_now(struct rn5t618_power_info *info,
+ union power_supply_propval *val)
+{
+ u16 res;
+ int ret;
+
+ ret = rn5t618_battery_read_doublereg(info, RN5T618_RE_CAP_H, &res);
+ if (ret)
+ return ret;
+
+ val->intval = res * 1000;
+
+ return 0;
+}
+
+static int rn5t618_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int ret = 0;
+ struct rn5t618_power_info *info = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = rn5t618_battery_status(info, val);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ ret = rn5t618_battery_present(info, val);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = rn5t618_battery_voltage_now(info, val);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = rn5t618_battery_current_now(info, val);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = rn5t618_battery_capacity(info, val);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ ret = rn5t618_battery_temp(info, val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ ret = rn5t618_battery_tte(info, val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
+ ret = rn5t618_battery_ttf(info, val);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ ret = rn5t618_battery_charge_full(info, val);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ ret = rn5t618_battery_charge_now(info, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int rn5t618_adp_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct rn5t618_power_info *info = power_supply_get_drvdata(psy);
+ unsigned int chgstate;
+ bool online;
+ int ret;
+
+ ret = regmap_read(info->rn5t618->regmap, RN5T618_CHGSTATE, &chgstate);
+ if (ret)
+ return ret;
+
+ online = !!(chgstate & CHG_STATE_ADP_INPUT);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = online;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (!online) {
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ }
+ val->intval = rn5t618_decode_status(chgstate);
+ if (val->intval != POWER_SUPPLY_STATUS_CHARGING)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rn5t618_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct rn5t618_power_info *info = power_supply_get_drvdata(psy);
+ unsigned int chgstate;
+ bool online;
+ int ret;
+
+ ret = regmap_read(info->rn5t618->regmap, RN5T618_CHGSTATE, &chgstate);
+ if (ret)
+ return ret;
+
+ online = !!(chgstate & CHG_STATE_USB_INPUT);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = online;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (!online) {
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ }
+ val->intval = rn5t618_decode_status(chgstate);
+ if (val->intval != POWER_SUPPLY_STATUS_CHARGING)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct power_supply_desc rn5t618_battery_desc = {
+ .name = "rn5t618-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = rn5t618_battery_props,
+ .num_properties = ARRAY_SIZE(rn5t618_battery_props),
+ .get_property = rn5t618_battery_get_property,
+};
+
+static const struct power_supply_desc rn5t618_adp_desc = {
+ .name = "rn5t618-adp",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = rn5t618_adp_props,
+ .num_properties = ARRAY_SIZE(rn5t618_adp_props),
+ .get_property = rn5t618_adp_get_property,
+};
+
+static const struct power_supply_desc rn5t618_usb_desc = {
+ .name = "rn5t618-usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = rn5t618_usb_props,
+ .num_properties = ARRAY_SIZE(rn5t618_usb_props),
+ .get_property = rn5t618_usb_get_property,
+};
+
+static irqreturn_t rn5t618_charger_irq(int irq, void *data)
+{
+ struct device *dev = data;
+ struct rn5t618_power_info *info = dev_get_drvdata(dev);
+
+ unsigned int ctrl, stat1, stat2, err;
+
+ regmap_read(info->rn5t618->regmap, RN5T618_CHGERR_IRR, &err);
+ regmap_read(info->rn5t618->regmap, RN5T618_CHGCTRL_IRR, &ctrl);
+ regmap_read(info->rn5t618->regmap, RN5T618_CHGSTAT_IRR1, &stat1);
+ regmap_read(info->rn5t618->regmap, RN5T618_CHGSTAT_IRR2, &stat2);
+
+ regmap_write(info->rn5t618->regmap, RN5T618_CHGERR_IRR, 0);
+ regmap_write(info->rn5t618->regmap, RN5T618_CHGCTRL_IRR, 0);
+ regmap_write(info->rn5t618->regmap, RN5T618_CHGSTAT_IRR1, 0);
+ regmap_write(info->rn5t618->regmap, RN5T618_CHGSTAT_IRR2, 0);
+
+ dev_dbg(dev, "chgerr: %x chgctrl: %x chgstat: %x chgstat2: %x\n",
+ err, ctrl, stat1, stat2);
+
+ power_supply_changed(info->usb);
+ power_supply_changed(info->adp);
+ power_supply_changed(info->battery);
+
+ return IRQ_HANDLED;
+}
+
+static int rn5t618_power_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ unsigned int v;
+ struct power_supply_config psy_cfg = {};
+ struct rn5t618_power_info *info;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pdev = pdev;
+ info->rn5t618 = dev_get_drvdata(pdev->dev.parent);
+ info->irq = -1;
+
+ platform_set_drvdata(pdev, info);
+
+ ret = regmap_read(info->rn5t618->regmap, RN5T618_CONTROL, &v);
+ if (ret)
+ return ret;
+
+ if (!(v & FG_ENABLE)) {
+ /* E.g. the vendor kernels of various Kobo and Tolino Ebook
+ * readers disable the fuel gauge on shutdown. If a kernel
+ * without fuel gauge support is booted after that, the fuel
+ * gauge will get decalibrated.
+ */
+ dev_info(&pdev->dev, "Fuel gauge not enabled, enabling now\n");
+ dev_info(&pdev->dev, "Expect imprecise results\n");
+ regmap_update_bits(info->rn5t618->regmap, RN5T618_CONTROL,
+ FG_ENABLE, FG_ENABLE);
+ }
+
+ psy_cfg.drv_data = info;
+ info->battery = devm_power_supply_register(&pdev->dev,
+ &rn5t618_battery_desc,
+ &psy_cfg);
+ if (IS_ERR(info->battery)) {
+ ret = PTR_ERR(info->battery);
+ dev_err(&pdev->dev, "failed to register battery: %d\n", ret);
+ return ret;
+ }
+
+ info->adp = devm_power_supply_register(&pdev->dev,
+ &rn5t618_adp_desc,
+ &psy_cfg);
+ if (IS_ERR(info->adp)) {
+ ret = PTR_ERR(info->adp);
+ dev_err(&pdev->dev, "failed to register adp: %d\n", ret);
+ return ret;
+ }
+
+ info->usb = devm_power_supply_register(&pdev->dev,
+ &rn5t618_usb_desc,
+ &psy_cfg);
+ if (IS_ERR(info->usb)) {
+ ret = PTR_ERR(info->usb);
+ dev_err(&pdev->dev, "failed to register usb: %d\n", ret);
+ return ret;
+ }
+
+ if (info->rn5t618->irq_data)
+ info->irq = regmap_irq_get_virq(info->rn5t618->irq_data,
+ RN5T618_IRQ_CHG);
+
+ if (info->irq < 0)
+ info->irq = -1;
+ else {
+ ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
+ rn5t618_charger_irq,
+ IRQF_ONESHOT,
+ "rn5t618_power",
+ &pdev->dev);
+
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request IRQ:%d fail\n",
+ info->irq);
+ info->irq = -1;
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver rn5t618_power_driver = {
+ .driver = {
+ .name = "rn5t618-power",
+ },
+ .probe = rn5t618_power_probe,
+};
+
+module_platform_driver(rn5t618_power_driver);
+MODULE_ALIAS("platform:rn5t618-power");
+MODULE_DESCRIPTION("Power supply driver for RICOH RN5T618");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index 29161ae90245..594bb3b8a4d1 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -1731,11 +1731,13 @@ static const struct of_device_id rt9455_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rt9455_of_match);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id rt9455_i2c_acpi_match[] = {
{ "RT945500", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, rt9455_i2c_acpi_match);
+#endif
static struct i2c_driver rt9455_driver = {
.probe = rt9455_probe,
diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
index 3d00b35cafc9..60b7f41ab063 100644
--- a/drivers/power/supply/s3c_adc_battery.c
+++ b/drivers/power/supply/s3c_adc_battery.c
@@ -22,7 +22,7 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <plat/adc.h>
+#include <linux/soc/samsung/s3c-adc.h>
#define BAT_POLL_INTERVAL 10000 /* ms */
#define JITTER_DELAY 500 /* ms */
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index 7439753fac87..b6a538ebb378 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -193,7 +193,6 @@ struct sbs_info {
struct power_supply *power_supply;
bool is_present;
struct gpio_desc *gpio_detect;
- bool enable_detection;
bool charger_broadcasts;
int last_state;
int poll_time;
@@ -480,37 +479,6 @@ static bool sbs_bat_needs_calibration(struct i2c_client *client)
return !!(ret & BIT(7));
}
-static int sbs_get_battery_presence_and_health(
- struct i2c_client *client, enum power_supply_property psp,
- union power_supply_propval *val)
-{
- int ret;
-
- /* Dummy command; if it succeeds, battery is present. */
- ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
-
- if (ret < 0) { /* battery not present*/
- if (psp == POWER_SUPPLY_PROP_PRESENT) {
- val->intval = 0;
- return 0;
- }
- return ret;
- }
-
- if (psp == POWER_SUPPLY_PROP_PRESENT)
- val->intval = 1; /* battery present */
- else { /* POWER_SUPPLY_PROP_HEALTH */
- if (sbs_bat_needs_calibration(client)) {
- val->intval = POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED;
- } else {
- /* SBS spec doesn't have a general health command. */
- val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
- }
- }
-
- return 0;
-}
-
static int sbs_get_ti_battery_presence_and_health(
struct i2c_client *client, enum power_supply_property psp,
union power_supply_propval *val)
@@ -569,6 +537,41 @@ static int sbs_get_ti_battery_presence_and_health(
return 0;
}
+static int sbs_get_battery_presence_and_health(
+ struct i2c_client *client, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct sbs_info *chip = i2c_get_clientdata(client);
+ int ret;
+
+ if (chip->flags & SBS_FLAGS_TI_BQ20ZX5)
+ return sbs_get_ti_battery_presence_and_health(client, psp, val);
+
+ /* Dummy command; if it succeeds, battery is present. */
+ ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
+
+ if (ret < 0) { /* battery not present*/
+ if (psp == POWER_SUPPLY_PROP_PRESENT) {
+ val->intval = 0;
+ return 0;
+ }
+ return ret;
+ }
+
+ if (psp == POWER_SUPPLY_PROP_PRESENT)
+ val->intval = 1; /* battery present */
+ else { /* POWER_SUPPLY_PROP_HEALTH */
+ if (sbs_bat_needs_calibration(client)) {
+ val->intval = POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED;
+ } else {
+ /* SBS spec doesn't have a general health command. */
+ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+ }
+
+ return 0;
+}
+
static int sbs_get_battery_property(struct i2c_client *client,
int reg_offset, enum power_supply_property psp,
union power_supply_propval *val)
@@ -871,12 +874,7 @@ static int sbs_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_HEALTH:
- if (chip->flags & SBS_FLAGS_TI_BQ20ZX5)
- ret = sbs_get_ti_battery_presence_and_health(client,
- psp, val);
- else
- ret = sbs_get_battery_presence_and_health(client, psp,
- val);
+ ret = sbs_get_battery_presence_and_health(client, psp, val);
/* this can only be true if no gpio is used */
if (psp == POWER_SUPPLY_PROP_PRESENT)
@@ -967,32 +965,30 @@ static int sbs_get_property(struct power_supply *psy,
return -EINVAL;
}
- if (!chip->enable_detection)
- goto done;
+ if (!chip->gpio_detect && chip->is_present != (ret >= 0)) {
+ bool old_present = chip->is_present;
+ union power_supply_propval val;
+ int err = sbs_get_battery_presence_and_health(
+ client, POWER_SUPPLY_PROP_PRESENT, &val);
- if (!chip->gpio_detect &&
- chip->is_present != (ret >= 0)) {
- sbs_update_presence(chip, (ret >= 0));
- power_supply_changed(chip->power_supply);
+ sbs_update_presence(chip, !err && val.intval);
+
+ if (old_present != chip->is_present)
+ power_supply_changed(chip->power_supply);
}
done:
if (!ret) {
/* Convert units to match requirements for power supply class */
sbs_unit_adjustment(client, psp, val);
+ dev_dbg(&client->dev,
+ "%s: property = %d, value = %x\n", __func__,
+ psp, val->intval);
+ } else if (!chip->is_present) {
+ /* battery not present, so return NODATA for properties */
+ ret = -ENODATA;
}
-
- dev_dbg(&client->dev,
- "%s: property = %d, value = %x\n", __func__, psp, val->intval);
-
- if (ret && chip->is_present)
- return ret;
-
- /* battery not present, so return NODATA for properties */
- if (ret)
- return -ENODATA;
-
- return 0;
+ return ret;
}
static void sbs_supply_changed(struct sbs_info *chip)
@@ -1098,7 +1094,6 @@ static int sbs_probe(struct i2c_client *client)
chip->flags = (u32)(uintptr_t)device_get_match_data(&client->dev);
chip->client = client;
- chip->enable_detection = false;
psy_cfg.of_node = client->dev.of_node;
psy_cfg.drv_data = chip;
chip->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
@@ -1159,15 +1154,19 @@ skip_gpio:
* to the battery.
*/
if (!(force_load || chip->gpio_detect)) {
- rc = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
+ union power_supply_propval val;
- if (rc < 0) {
- dev_err(&client->dev, "%s: Failed to get device status\n",
- __func__);
+ rc = sbs_get_battery_presence_and_health(
+ client, POWER_SUPPLY_PROP_PRESENT, &val);
+ if (rc < 0 || !val.intval) {
+ dev_err(&client->dev, "Failed to get present status\n");
+ rc = -ENODEV;
goto exit_psupply;
}
}
+ INIT_DELAYED_WORK(&chip->work, sbs_delayed_work);
+
chip->power_supply = devm_power_supply_register(&client->dev, sbs_desc,
&psy_cfg);
if (IS_ERR(chip->power_supply)) {
@@ -1180,10 +1179,6 @@ skip_gpio:
dev_info(&client->dev,
"%s: battery gas gauge device registered\n", client->name);
- INIT_DELAYED_WORK(&chip->work, sbs_delayed_work);
-
- chip->enable_detection = true;
-
return 0;
exit_psupply:
diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
index f99026d81f2a..d3bf35ed12ce 100644
--- a/drivers/power/supply/smb347-charger.c
+++ b/drivers/power/supply/smb347-charger.c
@@ -16,11 +16,18 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/mutex.h>
#include <linux/power_supply.h>
-#include <linux/power/smb347-charger.h>
+#include <linux/property.h>
#include <linux/regmap.h>
+#include <dt-bindings/power/summit,smb347-charger.h>
+
+/* Use the default compensation method */
+#define SMB3XX_SOFT_TEMP_COMPENSATE_DEFAULT -1
+
+/* Use default factory programmed value for hard/soft temperature limit */
+#define SMB3XX_TEMP_USE_DEFAULT -273
+
/*
* Configuration registers. These are mirrored to volatile RAM and can be
* written once %CMD_A_ALLOW_WRITE is set in %CMD_A register. They will be
@@ -122,82 +129,140 @@
/**
* struct smb347_charger - smb347 charger instance
- * @lock: protects concurrent access to online variables
* @dev: pointer to device
* @regmap: pointer to driver regmap
* @mains: power_supply instance for AC/DC power
* @usb: power_supply instance for USB power
- * @battery: power_supply instance for battery
+ * @id: SMB charger ID
* @mains_online: is AC/DC input connected
* @usb_online: is USB input connected
* @charging_enabled: is charging enabled
- * @pdata: pointer to platform data
+ * @max_charge_current: maximum current (in uA) the battery can be charged
+ * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
+ * @pre_charge_current: current (in uA) to use in pre-charging phase
+ * @termination_current: current (in uA) used to determine when the
+ * charging cycle terminates
+ * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to
+ * pre-charge to fast charge mode
+ * @mains_current_limit: maximum input current drawn from AC/DC input (in uA)
+ * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB
+ * input
+ * @chip_temp_threshold: die temperature where device starts limiting charge
+ * current [%100 - %130] (in degree C)
+ * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C),
+ * granularity is 5 deg C.
+ * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree C),
+ * granularity is 5 deg C.
+ * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C),
+ * granularity is 5 deg C.
+ * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C),
+ * granularity is 5 deg C.
+ * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit
+ * @soft_temp_limit_compensation: compensation method when soft temperature
+ * limit is hit
+ * @charge_current_compensation: current (in uA) for charging compensation
+ * current when temperature hits soft limits
+ * @use_mains: AC/DC input can be used
+ * @use_usb: USB input can be used
+ * @use_usb_otg: USB OTG output can be used (not implemented yet)
+ * @enable_control: how charging enable/disable is controlled
+ * (driver/pin controls)
+ *
+ * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
+ * hardware support for these. This is useful when we want to have for
+ * example OTG charging controlled via OTG transceiver driver and not by
+ * the SMB347 hardware.
+ *
+ * Hard and soft temperature limit values are given as described in the
+ * device data sheet and assuming NTC beta value is %3750. Even if this is
+ * not the case, these values should be used. They can be mapped to the
+ * corresponding NTC beta values with the help of table %2 in the data
+ * sheet. So for example if NTC beta is %3375 and we want to program hard
+ * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50.
+ *
+ * If zero value is given in any of the current and voltage values, the
+ * factory programmed default will be used. For soft/hard temperature
+ * values, pass in %SMB3XX_TEMP_USE_DEFAULT instead.
*/
struct smb347_charger {
- struct mutex lock;
struct device *dev;
struct regmap *regmap;
struct power_supply *mains;
struct power_supply *usb;
- struct power_supply *battery;
+ unsigned int id;
bool mains_online;
bool usb_online;
bool charging_enabled;
- const struct smb347_charger_platform_data *pdata;
+
+ unsigned int max_charge_current;
+ unsigned int max_charge_voltage;
+ unsigned int pre_charge_current;
+ unsigned int termination_current;
+ unsigned int pre_to_fast_voltage;
+ unsigned int mains_current_limit;
+ unsigned int usb_hc_current_limit;
+ unsigned int chip_temp_threshold;
+ int soft_cold_temp_limit;
+ int soft_hot_temp_limit;
+ int hard_cold_temp_limit;
+ int hard_hot_temp_limit;
+ bool suspend_on_hard_temp_limit;
+ unsigned int soft_temp_limit_compensation;
+ unsigned int charge_current_compensation;
+ bool use_mains;
+ bool use_usb;
+ bool use_usb_otg;
+ unsigned int enable_control;
};
-/* Fast charge current in uA */
-static const unsigned int fcc_tbl[] = {
- 700000,
- 900000,
- 1200000,
- 1500000,
- 1800000,
- 2000000,
- 2200000,
- 2500000,
+enum smb_charger_chipid {
+ SMB345,
+ SMB347,
+ SMB358,
+ NUM_CHIP_TYPES,
};
+/* Fast charge current in uA */
+static const unsigned int fcc_tbl[NUM_CHIP_TYPES][8] = {
+ [SMB345] = { 200000, 450000, 600000, 900000,
+ 1300000, 1500000, 1800000, 2000000 },
+ [SMB347] = { 700000, 900000, 1200000, 1500000,
+ 1800000, 2000000, 2200000, 2500000 },
+ [SMB358] = { 200000, 450000, 600000, 900000,
+ 1300000, 1500000, 1800000, 2000000 },
+};
/* Pre-charge current in uA */
-static const unsigned int pcc_tbl[] = {
- 100000,
- 150000,
- 200000,
- 250000,
+static const unsigned int pcc_tbl[NUM_CHIP_TYPES][4] = {
+ [SMB345] = { 150000, 250000, 350000, 450000 },
+ [SMB347] = { 100000, 150000, 200000, 250000 },
+ [SMB358] = { 150000, 250000, 350000, 450000 },
};
/* Termination current in uA */
-static const unsigned int tc_tbl[] = {
- 37500,
- 50000,
- 100000,
- 150000,
- 200000,
- 250000,
- 500000,
- 600000,
+static const unsigned int tc_tbl[NUM_CHIP_TYPES][8] = {
+ [SMB345] = { 30000, 40000, 60000, 80000,
+ 100000, 125000, 150000, 200000 },
+ [SMB347] = { 37500, 50000, 100000, 150000,
+ 200000, 250000, 500000, 600000 },
+ [SMB358] = { 30000, 40000, 60000, 80000,
+ 100000, 125000, 150000, 200000 },
};
/* Input current limit in uA */
-static const unsigned int icl_tbl[] = {
- 300000,
- 500000,
- 700000,
- 900000,
- 1200000,
- 1500000,
- 1800000,
- 2000000,
- 2200000,
- 2500000,
+static const unsigned int icl_tbl[NUM_CHIP_TYPES][10] = {
+ [SMB345] = { 300000, 500000, 700000, 1000000, 1500000,
+ 1800000, 2000000, 2000000, 2000000, 2000000 },
+ [SMB347] = { 300000, 500000, 700000, 900000, 1200000,
+ 1500000, 1800000, 2000000, 2200000, 2500000 },
+ [SMB358] = { 300000, 500000, 700000, 1000000, 1500000,
+ 1800000, 2000000, 2000000, 2000000, 2000000 },
};
/* Charge current compensation in uA */
-static const unsigned int ccc_tbl[] = {
- 250000,
- 700000,
- 900000,
- 1200000,
+static const unsigned int ccc_tbl[NUM_CHIP_TYPES][4] = {
+ [SMB345] = { 200000, 450000, 600000, 900000 },
+ [SMB347] = { 250000, 700000, 900000, 1200000 },
+ [SMB358] = { 200000, 450000, 600000, 900000 },
};
/* Convert register value to current using lookup table */
@@ -242,16 +307,14 @@ static int smb347_update_ps_status(struct smb347_charger *smb)
* Dc and usb are set depending on whether they are enabled in
* platform data _and_ whether corresponding undervoltage is set.
*/
- if (smb->pdata->use_mains)
+ if (smb->use_mains)
dc = !(val & IRQSTAT_E_DCIN_UV_STAT);
- if (smb->pdata->use_usb)
+ if (smb->use_usb)
usb = !(val & IRQSTAT_E_USBIN_UV_STAT);
- mutex_lock(&smb->lock);
ret = smb->mains_online != dc || smb->usb_online != usb;
smb->mains_online = dc;
smb->usb_online = usb;
- mutex_unlock(&smb->lock);
return ret;
}
@@ -267,13 +330,7 @@ static int smb347_update_ps_status(struct smb347_charger *smb)
*/
static bool smb347_is_ps_online(struct smb347_charger *smb)
{
- bool ret;
-
- mutex_lock(&smb->lock);
- ret = smb->usb_online || smb->mains_online;
- mutex_unlock(&smb->lock);
-
- return ret;
+ return smb->usb_online || smb->mains_online;
}
/**
@@ -302,19 +359,18 @@ static int smb347_charging_set(struct smb347_charger *smb, bool enable)
{
int ret = 0;
- if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
+ if (smb->enable_control != SMB3XX_CHG_ENABLE_SW) {
dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
return 0;
}
- mutex_lock(&smb->lock);
if (smb->charging_enabled != enable) {
ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
enable ? CMD_A_CHG_ENABLED : 0);
if (!ret)
smb->charging_enabled = enable;
}
- mutex_unlock(&smb->lock);
+
return ret;
}
@@ -352,11 +408,12 @@ static int smb347_start_stop_charging(struct smb347_charger *smb)
static int smb347_set_charge_current(struct smb347_charger *smb)
{
+ unsigned int id = smb->id;
int ret;
- if (smb->pdata->max_charge_current) {
- ret = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
- smb->pdata->max_charge_current);
+ if (smb->max_charge_current) {
+ ret = current_to_hw(fcc_tbl[id], ARRAY_SIZE(fcc_tbl[id]),
+ smb->max_charge_current);
if (ret < 0)
return ret;
@@ -367,9 +424,9 @@ static int smb347_set_charge_current(struct smb347_charger *smb)
return ret;
}
- if (smb->pdata->pre_charge_current) {
- ret = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
- smb->pdata->pre_charge_current);
+ if (smb->pre_charge_current) {
+ ret = current_to_hw(pcc_tbl[id], ARRAY_SIZE(pcc_tbl[id]),
+ smb->pre_charge_current);
if (ret < 0)
return ret;
@@ -380,9 +437,9 @@ static int smb347_set_charge_current(struct smb347_charger *smb)
return ret;
}
- if (smb->pdata->termination_current) {
- ret = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
- smb->pdata->termination_current);
+ if (smb->termination_current) {
+ ret = current_to_hw(tc_tbl[id], ARRAY_SIZE(tc_tbl[id]),
+ smb->termination_current);
if (ret < 0)
return ret;
@@ -397,11 +454,12 @@ static int smb347_set_charge_current(struct smb347_charger *smb)
static int smb347_set_current_limits(struct smb347_charger *smb)
{
+ unsigned int id = smb->id;
int ret;
- if (smb->pdata->mains_current_limit) {
- ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
- smb->pdata->mains_current_limit);
+ if (smb->mains_current_limit) {
+ ret = current_to_hw(icl_tbl[id], ARRAY_SIZE(icl_tbl[id]),
+ smb->mains_current_limit);
if (ret < 0)
return ret;
@@ -412,9 +470,9 @@ static int smb347_set_current_limits(struct smb347_charger *smb)
return ret;
}
- if (smb->pdata->usb_hc_current_limit) {
- ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
- smb->pdata->usb_hc_current_limit);
+ if (smb->usb_hc_current_limit) {
+ ret = current_to_hw(icl_tbl[id], ARRAY_SIZE(icl_tbl[id]),
+ smb->usb_hc_current_limit);
if (ret < 0)
return ret;
@@ -431,8 +489,8 @@ static int smb347_set_voltage_limits(struct smb347_charger *smb)
{
int ret;
- if (smb->pdata->pre_to_fast_voltage) {
- ret = smb->pdata->pre_to_fast_voltage;
+ if (smb->pre_to_fast_voltage) {
+ ret = smb->pre_to_fast_voltage;
/* uV */
ret = clamp_val(ret, 2400000, 3000000) - 2400000;
@@ -445,8 +503,8 @@ static int smb347_set_voltage_limits(struct smb347_charger *smb)
return ret;
}
- if (smb->pdata->max_charge_voltage) {
- ret = smb->pdata->max_charge_voltage;
+ if (smb->max_charge_voltage) {
+ ret = smb->max_charge_voltage;
/* uV */
ret = clamp_val(ret, 3500000, 4500000) - 3500000;
@@ -463,12 +521,13 @@ static int smb347_set_voltage_limits(struct smb347_charger *smb)
static int smb347_set_temp_limits(struct smb347_charger *smb)
{
+ unsigned int id = smb->id;
bool enable_therm_monitor = false;
int ret = 0;
int val;
- if (smb->pdata->chip_temp_threshold) {
- val = smb->pdata->chip_temp_threshold;
+ if (smb->chip_temp_threshold) {
+ val = smb->chip_temp_threshold;
/* degree C */
val = clamp_val(val, 100, 130) - 100;
@@ -481,8 +540,8 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
return ret;
}
- if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
- val = smb->pdata->soft_cold_temp_limit;
+ if (smb->soft_cold_temp_limit != SMB3XX_TEMP_USE_DEFAULT) {
+ val = smb->soft_cold_temp_limit;
val = clamp_val(val, 0, 15);
val /= 5;
@@ -498,8 +557,8 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
enable_therm_monitor = true;
}
- if (smb->pdata->soft_hot_temp_limit != SMB347_TEMP_USE_DEFAULT) {
- val = smb->pdata->soft_hot_temp_limit;
+ if (smb->soft_hot_temp_limit != SMB3XX_TEMP_USE_DEFAULT) {
+ val = smb->soft_hot_temp_limit;
val = clamp_val(val, 40, 55) - 40;
val /= 5;
@@ -513,8 +572,8 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
enable_therm_monitor = true;
}
- if (smb->pdata->hard_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
- val = smb->pdata->hard_cold_temp_limit;
+ if (smb->hard_cold_temp_limit != SMB3XX_TEMP_USE_DEFAULT) {
+ val = smb->hard_cold_temp_limit;
val = clamp_val(val, -5, 10) + 5;
val /= 5;
@@ -530,8 +589,8 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
enable_therm_monitor = true;
}
- if (smb->pdata->hard_hot_temp_limit != SMB347_TEMP_USE_DEFAULT) {
- val = smb->pdata->hard_hot_temp_limit;
+ if (smb->hard_hot_temp_limit != SMB3XX_TEMP_USE_DEFAULT) {
+ val = smb->hard_hot_temp_limit;
val = clamp_val(val, 50, 65) - 50;
val /= 5;
@@ -562,16 +621,16 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
return ret;
}
- if (smb->pdata->suspend_on_hard_temp_limit) {
+ if (smb->suspend_on_hard_temp_limit) {
ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED, 0);
if (ret < 0)
return ret;
}
- if (smb->pdata->soft_temp_limit_compensation !=
- SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
- val = smb->pdata->soft_temp_limit_compensation & 0x3;
+ if (smb->soft_temp_limit_compensation !=
+ SMB3XX_SOFT_TEMP_COMPENSATE_DEFAULT) {
+ val = smb->soft_temp_limit_compensation & 0x3;
ret = regmap_update_bits(smb->regmap, CFG_THERM,
CFG_THERM_SOFT_HOT_COMPENSATION_MASK,
@@ -586,9 +645,9 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
return ret;
}
- if (smb->pdata->charge_current_compensation) {
- val = current_to_hw(ccc_tbl, ARRAY_SIZE(ccc_tbl),
- smb->pdata->charge_current_compensation);
+ if (smb->charge_current_compensation) {
+ val = current_to_hw(ccc_tbl[id], ARRAY_SIZE(ccc_tbl[id]),
+ smb->charge_current_compensation);
if (val < 0)
return val;
@@ -647,7 +706,7 @@ static int smb347_hw_init(struct smb347_charger *smb)
goto fail;
/* If USB charging is disabled we put the USB in suspend mode */
- if (!smb->pdata->use_usb) {
+ if (!smb->use_usb) {
ret = regmap_update_bits(smb->regmap, CMD_A,
CMD_A_SUSPEND_ENABLED,
CMD_A_SUSPEND_ENABLED);
@@ -660,7 +719,7 @@ static int smb347_hw_init(struct smb347_charger *smb)
* support for driving VBUS. Otherwise we disable it.
*/
ret = regmap_update_bits(smb->regmap, CFG_OTHER, CFG_OTHER_RID_MASK,
- smb->pdata->use_usb_otg ? CFG_OTHER_RID_ENABLED_AUTO_OTG : 0);
+ smb->use_usb_otg ? CFG_OTHER_RID_ENABLED_AUTO_OTG : 0);
if (ret < 0)
goto fail;
@@ -669,11 +728,11 @@ static int smb347_hw_init(struct smb347_charger *smb)
* command register unless pin control is specified in the platform
* data.
*/
- switch (smb->pdata->enable_control) {
- case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
+ switch (smb->enable_control) {
+ case SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW:
val = CFG_PIN_EN_CTRL_ACTIVE_LOW;
break;
- case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
+ case SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH:
val = CFG_PIN_EN_CTRL_ACTIVE_HIGH;
break;
default:
@@ -742,7 +801,10 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
*/
if (stat_c & STAT_C_CHARGER_ERROR) {
dev_err(smb->dev, "charging stopped due to charger error\n");
- power_supply_changed(smb->battery);
+ if (smb->use_mains)
+ power_supply_changed(smb->mains);
+ if (smb->use_usb)
+ power_supply_changed(smb->usb);
handled = true;
}
@@ -752,8 +814,12 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
* disabled by the hardware.
*/
if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
- if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
- power_supply_changed(smb->battery);
+ if (irqstat_c & IRQSTAT_C_TERMINATION_STAT) {
+ if (smb->use_mains)
+ power_supply_changed(smb->mains);
+ if (smb->use_usb)
+ power_supply_changed(smb->usb);
+ }
dev_dbg(smb->dev, "going to HW maintenance mode\n");
handled = true;
}
@@ -767,7 +833,10 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
if (irqstat_d & IRQSTAT_D_CHARGE_TIMEOUT_STAT)
dev_warn(smb->dev, "charging stopped due to timeout\n");
- power_supply_changed(smb->battery);
+ if (smb->use_mains)
+ power_supply_changed(smb->mains);
+ if (smb->use_usb)
+ power_supply_changed(smb->usb);
handled = true;
}
@@ -778,9 +847,9 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
if (smb347_update_ps_status(smb) > 0) {
smb347_start_stop_charging(smb);
- if (smb->pdata->use_mains)
+ if (smb->use_mains)
power_supply_changed(smb->mains);
- if (smb->pdata->use_usb)
+ if (smb->use_usb)
power_supply_changed(smb->usb);
}
handled = true;
@@ -835,22 +904,17 @@ static inline int smb347_irq_disable(struct smb347_charger *smb)
static int smb347_irq_init(struct smb347_charger *smb,
struct i2c_client *client)
{
- const struct smb347_charger_platform_data *pdata = smb->pdata;
- int ret, irq = gpio_to_irq(pdata->irq_gpio);
-
- ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
- if (ret < 0)
- goto fail;
+ int ret;
- ret = request_threaded_irq(irq, NULL, smb347_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->name, smb);
+ ret = devm_request_threaded_irq(smb->dev, client->irq, NULL,
+ smb347_interrupt, IRQF_ONESHOT,
+ client->name, smb);
if (ret < 0)
- goto fail_gpio;
+ return ret;
ret = smb347_set_writable(smb, true);
if (ret < 0)
- goto fail_irq;
+ return ret;
/*
* Configure the STAT output to be suitable for interrupts: disable
@@ -860,20 +924,10 @@ static int smb347_irq_init(struct smb347_charger *smb,
CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
CFG_STAT_DISABLED);
if (ret < 0)
- goto fail_readonly;
+ client->irq = 0;
smb347_set_writable(smb, false);
- client->irq = irq;
- return 0;
-fail_readonly:
- smb347_set_writable(smb, false);
-fail_irq:
- free_irq(irq, smb);
-fail_gpio:
- gpio_free(pdata->irq_gpio);
-fail:
- client->irq = 0;
return ret;
}
@@ -883,6 +937,7 @@ fail:
*/
static int get_const_charge_current(struct smb347_charger *smb)
{
+ unsigned int id = smb->id;
int ret, intval;
unsigned int v;
@@ -898,10 +953,12 @@ static int get_const_charge_current(struct smb347_charger *smb)
* and we can detect which table to use from bit 5.
*/
if (v & 0x20) {
- intval = hw_to_current(fcc_tbl, ARRAY_SIZE(fcc_tbl), v & 7);
+ intval = hw_to_current(fcc_tbl[id],
+ ARRAY_SIZE(fcc_tbl[id]), v & 7);
} else {
v >>= 3;
- intval = hw_to_current(pcc_tbl, ARRAY_SIZE(pcc_tbl), v & 7);
+ intval = hw_to_current(pcc_tbl[id],
+ ARRAY_SIZE(pcc_tbl[id]), v & 7);
}
return intval;
@@ -932,95 +989,19 @@ static int get_const_charge_voltage(struct smb347_charger *smb)
return intval;
}
-static int smb347_mains_get_property(struct power_supply *psy,
- enum power_supply_property prop,
- union power_supply_propval *val)
-{
- struct smb347_charger *smb = power_supply_get_drvdata(psy);
- int ret;
-
- switch (prop) {
- case POWER_SUPPLY_PROP_ONLINE:
- val->intval = smb->mains_online;
- break;
-
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
- ret = get_const_charge_voltage(smb);
- if (ret < 0)
- return ret;
- else
- val->intval = ret;
- break;
-
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- ret = get_const_charge_current(smb);
- if (ret < 0)
- return ret;
- else
- val->intval = ret;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static enum power_supply_property smb347_mains_properties[] = {
- POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
-};
-
-static int smb347_usb_get_property(struct power_supply *psy,
- enum power_supply_property prop,
- union power_supply_propval *val)
-{
- struct smb347_charger *smb = power_supply_get_drvdata(psy);
- int ret;
-
- switch (prop) {
- case POWER_SUPPLY_PROP_ONLINE:
- val->intval = smb->usb_online;
- break;
-
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
- ret = get_const_charge_voltage(smb);
- if (ret < 0)
- return ret;
- else
- val->intval = ret;
- break;
-
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- ret = get_const_charge_current(smb);
- if (ret < 0)
- return ret;
- else
- val->intval = ret;
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static enum power_supply_property smb347_usb_properties[] = {
- POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
-};
-
-static int smb347_get_charging_status(struct smb347_charger *smb)
+static int smb347_get_charging_status(struct smb347_charger *smb,
+ struct power_supply *psy)
{
int ret, status;
unsigned int val;
- if (!smb347_is_ps_online(smb))
- return POWER_SUPPLY_STATUS_DISCHARGING;
+ if (psy->desc->type == POWER_SUPPLY_TYPE_USB) {
+ if (!smb->usb_online)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+ } else {
+ if (!smb->mains_online)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+ }
ret = regmap_read(smb->regmap, STAT_C, &val);
if (ret < 0)
@@ -1059,29 +1040,29 @@ static int smb347_get_charging_status(struct smb347_charger *smb)
return status;
}
-static int smb347_battery_get_property(struct power_supply *psy,
- enum power_supply_property prop,
- union power_supply_propval *val)
+static int smb347_get_property_locked(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
{
struct smb347_charger *smb = power_supply_get_drvdata(psy);
- const struct smb347_charger_platform_data *pdata = smb->pdata;
int ret;
- ret = smb347_update_ps_status(smb);
- if (ret < 0)
- return ret;
-
switch (prop) {
case POWER_SUPPLY_PROP_STATUS:
- ret = smb347_get_charging_status(smb);
+ ret = smb347_get_charging_status(smb, psy);
if (ret < 0)
return ret;
val->intval = ret;
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
- if (!smb347_is_ps_online(smb))
- return -ENODATA;
+ if (psy->desc->type == POWER_SUPPLY_TYPE_USB) {
+ if (!smb->usb_online)
+ return -ENODATA;
+ } else {
+ if (!smb->mains_online)
+ return -ENODATA;
+ }
/*
* We handle trickle and pre-charging the same, and taper
@@ -1100,24 +1081,25 @@ static int smb347_battery_get_property(struct power_supply *psy,
}
break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = pdata->battery_info.technology;
- break;
-
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- val->intval = pdata->battery_info.voltage_min_design;
- break;
-
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- val->intval = pdata->battery_info.voltage_max_design;
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (psy->desc->type == POWER_SUPPLY_TYPE_USB)
+ val->intval = smb->usb_online;
+ else
+ val->intval = smb->mains_online;
break;
- case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- val->intval = pdata->battery_info.charge_full_design;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = get_const_charge_voltage(smb);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
break;
- case POWER_SUPPLY_PROP_MODEL_NAME:
- val->strval = pdata->battery_info.name;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = get_const_charge_current(smb);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
break;
default:
@@ -1127,14 +1109,27 @@ static int smb347_battery_get_property(struct power_supply *psy,
return 0;
}
-static enum power_supply_property smb347_battery_properties[] = {
+static int smb347_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smb347_charger *smb = power_supply_get_drvdata(psy);
+ struct i2c_client *client = to_i2c_client(smb->dev);
+ int ret;
+
+ disable_irq(client->irq);
+ ret = smb347_get_property_locked(psy, prop, val);
+ enable_irq(client->irq);
+
+ return ret;
+}
+
+static enum power_supply_property smb347_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_CHARGE_TYPE,
- POWER_SUPPLY_PROP_TECHNOLOGY,
- POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
- POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
- POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
- POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
};
static bool smb347_volatile_reg(struct device *dev, unsigned int reg)
@@ -1180,6 +1175,96 @@ static bool smb347_readable_reg(struct device *dev, unsigned int reg)
return smb347_volatile_reg(dev, reg);
}
+static void smb347_dt_parse_dev_info(struct smb347_charger *smb)
+{
+ struct device *dev = smb->dev;
+
+ smb->soft_temp_limit_compensation =
+ SMB3XX_SOFT_TEMP_COMPENSATE_DEFAULT;
+ /*
+ * These properties come from the battery info, still we need to
+ * pre-initialize the values. See smb347_get_battery_info() below.
+ */
+ smb->soft_cold_temp_limit = SMB3XX_TEMP_USE_DEFAULT;
+ smb->hard_cold_temp_limit = SMB3XX_TEMP_USE_DEFAULT;
+ smb->soft_hot_temp_limit = SMB3XX_TEMP_USE_DEFAULT;
+ smb->hard_hot_temp_limit = SMB3XX_TEMP_USE_DEFAULT;
+
+ /* Charging constraints */
+ device_property_read_u32(dev, "summit,fast-voltage-threshold-microvolt",
+ &smb->pre_to_fast_voltage);
+ device_property_read_u32(dev, "summit,mains-current-limit-microamp",
+ &smb->mains_current_limit);
+ device_property_read_u32(dev, "summit,usb-current-limit-microamp",
+ &smb->usb_hc_current_limit);
+
+ /* For thermometer monitoring */
+ device_property_read_u32(dev, "summit,chip-temperature-threshold-celsius",
+ &smb->chip_temp_threshold);
+ device_property_read_u32(dev, "summit,soft-compensation-method",
+ &smb->soft_temp_limit_compensation);
+ device_property_read_u32(dev, "summit,charge-current-compensation-microamp",
+ &smb->charge_current_compensation);
+
+ /* Supported charging mode */
+ smb->use_mains = device_property_read_bool(dev, "summit,enable-mains-charging");
+ smb->use_usb = device_property_read_bool(dev, "summit,enable-usb-charging");
+ smb->use_usb_otg = device_property_read_bool(dev, "summit,enable-otg-charging");
+
+ /* Select charging control */
+ device_property_read_u32(dev, "summit,enable-charge-control",
+ &smb->enable_control);
+}
+
+static int smb347_get_battery_info(struct smb347_charger *smb)
+{
+ struct power_supply_battery_info info = {};
+ struct power_supply *supply;
+ int err;
+
+ if (smb->mains)
+ supply = smb->mains;
+ else
+ supply = smb->usb;
+
+ err = power_supply_get_battery_info(supply, &info);
+ if (err == -ENXIO || err == -ENODEV)
+ return 0;
+ if (err)
+ return err;
+
+ if (info.constant_charge_current_max_ua != -EINVAL)
+ smb->max_charge_current = info.constant_charge_current_max_ua;
+
+ if (info.constant_charge_voltage_max_uv != -EINVAL)
+ smb->max_charge_voltage = info.constant_charge_voltage_max_uv;
+
+ if (info.precharge_current_ua != -EINVAL)
+ smb->pre_charge_current = info.precharge_current_ua;
+
+ if (info.charge_term_current_ua != -EINVAL)
+ smb->termination_current = info.charge_term_current_ua;
+
+ if (info.temp_alert_min != INT_MIN)
+ smb->soft_cold_temp_limit = info.temp_alert_min;
+
+ if (info.temp_alert_max != INT_MAX)
+ smb->soft_hot_temp_limit = info.temp_alert_max;
+
+ if (info.temp_min != INT_MIN)
+ smb->hard_cold_temp_limit = info.temp_min;
+
+ if (info.temp_max != INT_MAX)
+ smb->hard_hot_temp_limit = info.temp_max;
+
+ /* Suspend when battery temperature is outside hard limits */
+ if (smb->hard_cold_temp_limit != SMB3XX_TEMP_USE_DEFAULT ||
+ smb->hard_hot_temp_limit != SMB3XX_TEMP_USE_DEFAULT)
+ smb->suspend_on_hard_temp_limit = true;
+
+ return 0;
+}
+
static const struct regmap_config smb347_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -1191,98 +1276,71 @@ static const struct regmap_config smb347_regmap = {
static const struct power_supply_desc smb347_mains_desc = {
.name = "smb347-mains",
.type = POWER_SUPPLY_TYPE_MAINS,
- .get_property = smb347_mains_get_property,
- .properties = smb347_mains_properties,
- .num_properties = ARRAY_SIZE(smb347_mains_properties),
+ .get_property = smb347_get_property,
+ .properties = smb347_properties,
+ .num_properties = ARRAY_SIZE(smb347_properties),
};
static const struct power_supply_desc smb347_usb_desc = {
.name = "smb347-usb",
.type = POWER_SUPPLY_TYPE_USB,
- .get_property = smb347_usb_get_property,
- .properties = smb347_usb_properties,
- .num_properties = ARRAY_SIZE(smb347_usb_properties),
-};
-
-static const struct power_supply_desc smb347_battery_desc = {
- .name = "smb347-battery",
- .type = POWER_SUPPLY_TYPE_BATTERY,
- .get_property = smb347_battery_get_property,
- .properties = smb347_battery_properties,
- .num_properties = ARRAY_SIZE(smb347_battery_properties),
+ .get_property = smb347_get_property,
+ .properties = smb347_properties,
+ .num_properties = ARRAY_SIZE(smb347_properties),
};
static int smb347_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- static char *battery[] = { "smb347-battery" };
- const struct smb347_charger_platform_data *pdata;
- struct power_supply_config mains_usb_cfg = {}, battery_cfg = {};
+ struct power_supply_config mains_usb_cfg = {};
struct device *dev = &client->dev;
struct smb347_charger *smb;
int ret;
- pdata = dev->platform_data;
- if (!pdata)
- return -EINVAL;
-
- if (!pdata->use_mains && !pdata->use_usb)
- return -EINVAL;
-
smb = devm_kzalloc(dev, sizeof(*smb), GFP_KERNEL);
if (!smb)
return -ENOMEM;
-
+ smb->dev = &client->dev;
+ smb->id = id->driver_data;
i2c_set_clientdata(client, smb);
- mutex_init(&smb->lock);
- smb->dev = &client->dev;
- smb->pdata = pdata;
+ smb347_dt_parse_dev_info(smb);
+ if (!smb->use_mains && !smb->use_usb)
+ return -EINVAL;
smb->regmap = devm_regmap_init_i2c(client, &smb347_regmap);
if (IS_ERR(smb->regmap))
return PTR_ERR(smb->regmap);
- ret = smb347_hw_init(smb);
- if (ret < 0)
- return ret;
-
- mains_usb_cfg.supplied_to = battery;
- mains_usb_cfg.num_supplicants = ARRAY_SIZE(battery);
mains_usb_cfg.drv_data = smb;
- if (smb->pdata->use_mains) {
- smb->mains = power_supply_register(dev, &smb347_mains_desc,
- &mains_usb_cfg);
+ mains_usb_cfg.of_node = dev->of_node;
+ if (smb->use_mains) {
+ smb->mains = devm_power_supply_register(dev, &smb347_mains_desc,
+ &mains_usb_cfg);
if (IS_ERR(smb->mains))
return PTR_ERR(smb->mains);
}
- if (smb->pdata->use_usb) {
- smb->usb = power_supply_register(dev, &smb347_usb_desc,
- &mains_usb_cfg);
- if (IS_ERR(smb->usb)) {
- if (smb->pdata->use_mains)
- power_supply_unregister(smb->mains);
+ if (smb->use_usb) {
+ smb->usb = devm_power_supply_register(dev, &smb347_usb_desc,
+ &mains_usb_cfg);
+ if (IS_ERR(smb->usb))
return PTR_ERR(smb->usb);
- }
}
- battery_cfg.drv_data = smb;
- smb->battery = power_supply_register(dev, &smb347_battery_desc,
- &battery_cfg);
- if (IS_ERR(smb->battery)) {
- if (smb->pdata->use_usb)
- power_supply_unregister(smb->usb);
- if (smb->pdata->use_mains)
- power_supply_unregister(smb->mains);
- return PTR_ERR(smb->battery);
- }
+ ret = smb347_get_battery_info(smb);
+ if (ret)
+ return ret;
+
+ ret = smb347_hw_init(smb);
+ if (ret < 0)
+ return ret;
/*
* Interrupt pin is optional. If it is connected, we setup the
* interrupt support here.
*/
- if (pdata->irq_gpio >= 0) {
+ if (client->irq) {
ret = smb347_irq_init(smb, client);
if (ret < 0) {
dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
@@ -1299,29 +1357,31 @@ static int smb347_remove(struct i2c_client *client)
{
struct smb347_charger *smb = i2c_get_clientdata(client);
- if (client->irq) {
+ if (client->irq)
smb347_irq_disable(smb);
- free_irq(client->irq, smb);
- gpio_free(smb->pdata->irq_gpio);
- }
-
- power_supply_unregister(smb->battery);
- if (smb->pdata->use_usb)
- power_supply_unregister(smb->usb);
- if (smb->pdata->use_mains)
- power_supply_unregister(smb->mains);
return 0;
}
static const struct i2c_device_id smb347_id[] = {
- { "smb347", 0 },
- { }
+ { "smb345", SMB345 },
+ { "smb347", SMB347 },
+ { "smb358", SMB358 },
+ { },
};
MODULE_DEVICE_TABLE(i2c, smb347_id);
+static const struct of_device_id smb3xx_of_match[] = {
+ { .compatible = "summit,smb345" },
+ { .compatible = "summit,smb347" },
+ { .compatible = "summit,smb358" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, smb3xx_of_match);
+
static struct i2c_driver smb347_driver = {
.driver = {
.name = "smb347",
+ .of_match_table = smb3xx_of_match,
},
.probe = smb347_probe,
.remove = smb347_remove,
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index 04acd76bbaa1..5f510ddc946d 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -352,8 +352,8 @@ static int param_set_ac_online(const char *key, const struct kernel_param *kp)
static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
{
- strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown"));
- return strlen(buffer);
+ return sprintf(buffer, "%s\n",
+ map_get_key(map_ac_online, ac_online, "unknown"));
}
static int param_set_usb_online(const char *key, const struct kernel_param *kp)
@@ -365,8 +365,8 @@ static int param_set_usb_online(const char *key, const struct kernel_param *kp)
static int param_get_usb_online(char *buffer, const struct kernel_param *kp)
{
- strcpy(buffer, map_get_key(map_ac_online, usb_online, "unknown"));
- return strlen(buffer);
+ return sprintf(buffer, "%s\n",
+ map_get_key(map_ac_online, usb_online, "unknown"));
}
static int param_set_battery_status(const char *key,
@@ -379,8 +379,8 @@ static int param_set_battery_status(const char *key,
static int param_get_battery_status(char *buffer, const struct kernel_param *kp)
{
- strcpy(buffer, map_get_key(map_status, battery_status, "unknown"));
- return strlen(buffer);
+ return sprintf(buffer, "%s\n",
+ map_get_key(map_ac_online, battery_status, "unknown"));
}
static int param_set_battery_health(const char *key,
@@ -393,8 +393,8 @@ static int param_set_battery_health(const char *key,
static int param_get_battery_health(char *buffer, const struct kernel_param *kp)
{
- strcpy(buffer, map_get_key(map_health, battery_health, "unknown"));
- return strlen(buffer);
+ return sprintf(buffer, "%s\n",
+ map_get_key(map_ac_online, battery_health, "unknown"));
}
static int param_set_battery_present(const char *key,
@@ -408,8 +408,8 @@ static int param_set_battery_present(const char *key,
static int param_get_battery_present(char *buffer,
const struct kernel_param *kp)
{
- strcpy(buffer, map_get_key(map_present, battery_present, "unknown"));
- return strlen(buffer);
+ return sprintf(buffer, "%s\n",
+ map_get_key(map_ac_online, battery_present, "unknown"));
}
static int param_set_battery_technology(const char *key,
@@ -424,9 +424,9 @@ static int param_set_battery_technology(const char *key,
static int param_get_battery_technology(char *buffer,
const struct kernel_param *kp)
{
- strcpy(buffer,
- map_get_key(map_technology, battery_technology, "unknown"));
- return strlen(buffer);
+ return sprintf(buffer, "%s\n",
+ map_get_key(map_ac_online, battery_technology,
+ "unknown"));
}
static int param_set_battery_capacity(const char *key,
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
index cdb9a23d825f..ef673ec3db56 100644
--- a/drivers/power/supply/ucs1002_power.c
+++ b/drivers/power/supply/ucs1002_power.c
@@ -38,6 +38,7 @@
/* Interrupt Status */
#define UCS1002_REG_INTERRUPT_STATUS 0x10
+# define F_ERR BIT(7)
# define F_DISCHARGE_ERR BIT(6)
# define F_RESET BIT(5)
# define F_MIN_KEEP_OUT BIT(4)
@@ -103,6 +104,9 @@ struct ucs1002_info {
struct regulator_dev *rdev;
bool present;
bool output_disable;
+ struct delayed_work health_poll;
+ int health;
+
};
static enum power_supply_property ucs1002_props[] = {
@@ -362,32 +366,6 @@ static int ucs1002_get_usb_type(struct ucs1002_info *info,
return 0;
}
-static int ucs1002_get_health(struct ucs1002_info *info,
- union power_supply_propval *val)
-{
- unsigned int reg;
- int ret, health;
-
- ret = regmap_read(info->regmap, UCS1002_REG_INTERRUPT_STATUS, &reg);
- if (ret)
- return ret;
-
- if (reg & F_TSD)
- health = POWER_SUPPLY_HEALTH_OVERHEAT;
- else if (reg & (F_OVER_VOLT | F_BACK_VOLT))
- health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- else if (reg & F_OVER_ILIM)
- health = POWER_SUPPLY_HEALTH_OVERCURRENT;
- else if (reg & (F_DISCHARGE_ERR | F_MIN_KEEP_OUT))
- health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
- else
- health = POWER_SUPPLY_HEALTH_GOOD;
-
- val->intval = health;
-
- return 0;
-}
-
static int ucs1002_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -406,7 +384,7 @@ static int ucs1002_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_USB_TYPE:
return ucs1002_get_usb_type(info, val);
case POWER_SUPPLY_PROP_HEALTH:
- return ucs1002_get_health(info, val);
+ return val->intval = info->health;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = info->present;
return 0;
@@ -458,6 +436,38 @@ static const struct power_supply_desc ucs1002_charger_desc = {
.num_properties = ARRAY_SIZE(ucs1002_props),
};
+static void ucs1002_health_poll(struct work_struct *work)
+{
+ struct ucs1002_info *info = container_of(work, struct ucs1002_info,
+ health_poll.work);
+ int ret;
+ u32 reg;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_INTERRUPT_STATUS, &reg);
+ if (ret)
+ return;
+
+ /* bad health and no status change, just schedule us again in a while */
+ if ((reg & F_ERR) && info->health != POWER_SUPPLY_HEALTH_GOOD) {
+ schedule_delayed_work(&info->health_poll,
+ msecs_to_jiffies(2000));
+ return;
+ }
+
+ if (reg & F_TSD)
+ info->health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (reg & (F_OVER_VOLT | F_BACK_VOLT))
+ info->health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else if (reg & F_OVER_ILIM)
+ info->health = POWER_SUPPLY_HEALTH_OVERCURRENT;
+ else if (reg & (F_DISCHARGE_ERR | F_MIN_KEEP_OUT))
+ info->health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else
+ info->health = POWER_SUPPLY_HEALTH_GOOD;
+
+ sysfs_notify(&info->charger->dev.kobj, NULL, "health");
+}
+
static irqreturn_t ucs1002_charger_irq(int irq, void *data)
{
int ret, regval;
@@ -484,7 +494,7 @@ static irqreturn_t ucs1002_alert_irq(int irq, void *data)
{
struct ucs1002_info *info = data;
- power_supply_changed(info->charger);
+ mod_delayed_work(system_wq, &info->health_poll, 0);
return IRQ_HANDLED;
}
@@ -632,6 +642,9 @@ static int ucs1002_probe(struct i2c_client *client,
return ret;
}
+ info->health = POWER_SUPPLY_HEALTH_GOOD;
+ INIT_DELAYED_WORK(&info->health_poll, ucs1002_health_poll);
+
if (irq_a_det > 0) {
ret = devm_request_threaded_irq(dev, irq_a_det, NULL,
ucs1002_charger_irq,
@@ -645,10 +658,8 @@ static int ucs1002_probe(struct i2c_client *client,
}
if (irq_alert > 0) {
- ret = devm_request_threaded_irq(dev, irq_alert, NULL,
- ucs1002_alert_irq,
- IRQF_ONESHOT,
- "ucs1002-alert", info);
+ ret = devm_request_irq(dev, irq_alert, ucs1002_alert_irq,
+ 0,"ucs1002-alert", info);
if (ret) {
dev_err(dev, "Failed to request ALERT threaded irq: %d\n",
ret);
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
index ebc4d4578339..bc228725346b 100644
--- a/drivers/powercap/Kconfig
+++ b/drivers/powercap/Kconfig
@@ -30,7 +30,7 @@ config INTEL_RAPL
In RAPL, the platform level settings are divided into domains for
fine grained control. These domains include processor package, DRAM
- controller, CPU core (Power Plance 0), graphics uncore (Power Plane
+ controller, CPU core (Power Plane 0), graphics uncore (Power Plane
1), etc.
config IDLE_INJECT
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index 4310901a074e..6e1a0043c411 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -43,6 +43,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smpboot.h>
+#include <linux/idle_inject.h>
#include <uapi/linux/sched/types.h>
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 983d75bd5bd1..70d6d52bc1e2 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -544,7 +544,14 @@ static void rapl_init_domains(struct rapl_package *rp)
continue;
rd->rp = rp;
- rd->name = rapl_domain_names[i];
+
+ if (i == RAPL_DOMAIN_PLATFORM && rp->id > 0) {
+ snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "psys-%d",
+ cpu_data(rp->lead_cpu).phys_proc_id);
+ } else
+ snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "%s",
+ rapl_domain_names[i]);
+
rd->id = i;
rd->rpl[0].prim_id = PL1_ENABLE;
rd->rpl[0].name = pl1_name;
@@ -613,7 +620,7 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
case ARBITRARY_UNIT:
default:
return value;
- };
+ }
if (to_raw)
return div64_u64(value, units) * scale;
@@ -1112,13 +1119,17 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
}
/* now register domains as children of the socket/package */
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+ struct powercap_zone *parent = rp->power_zone;
+
if (rd->id == RAPL_DOMAIN_PACKAGE)
continue;
+ if (rd->id == RAPL_DOMAIN_PLATFORM)
+ parent = NULL;
/* number of power limits per domain varies */
nr_pl = find_nr_power_limit(rd);
power_zone = powercap_register_zone(&rd->power_zone,
rp->priv->control_type,
- rd->name, rp->power_zone,
+ rd->name, parent,
&zone_ops[rd->id], nr_pl,
&constraint_ops);
@@ -1145,67 +1156,6 @@ err_cleanup:
return ret;
}
-int rapl_add_platform_domain(struct rapl_if_priv *priv)
-{
- struct rapl_domain *rd;
- struct powercap_zone *power_zone;
- struct reg_action ra;
- int ret;
-
- ra.reg = priv->regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_STATUS];
- ra.mask = ~0;
- ret = priv->read_raw(0, &ra);
- if (ret || !ra.value)
- return -ENODEV;
-
- ra.reg = priv->regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_LIMIT];
- ra.mask = ~0;
- ret = priv->read_raw(0, &ra);
- if (ret || !ra.value)
- return -ENODEV;
-
- rd = kzalloc(sizeof(*rd), GFP_KERNEL);
- if (!rd)
- return -ENOMEM;
-
- rd->name = rapl_domain_names[RAPL_DOMAIN_PLATFORM];
- rd->id = RAPL_DOMAIN_PLATFORM;
- rd->regs[RAPL_DOMAIN_REG_LIMIT] =
- priv->regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_LIMIT];
- rd->regs[RAPL_DOMAIN_REG_STATUS] =
- priv->regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_STATUS];
- rd->rpl[0].prim_id = PL1_ENABLE;
- rd->rpl[0].name = pl1_name;
- rd->rpl[1].prim_id = PL2_ENABLE;
- rd->rpl[1].name = pl2_name;
- rd->rp = rapl_find_package_domain(0, priv);
-
- power_zone = powercap_register_zone(&rd->power_zone, priv->control_type,
- "psys", NULL,
- &zone_ops[RAPL_DOMAIN_PLATFORM],
- 2, &constraint_ops);
-
- if (IS_ERR(power_zone)) {
- kfree(rd);
- return PTR_ERR(power_zone);
- }
-
- priv->platform_rapl_domain = rd;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rapl_add_platform_domain);
-
-void rapl_remove_platform_domain(struct rapl_if_priv *priv)
-{
- if (priv->platform_rapl_domain) {
- powercap_unregister_zone(priv->control_type,
- &priv->platform_rapl_domain->power_zone);
- kfree(priv->platform_rapl_domain);
- }
-}
-EXPORT_SYMBOL_GPL(rapl_remove_platform_domain);
-
static int rapl_check_domain(int cpu, int domain, struct rapl_package *rp)
{
struct reg_action ra;
@@ -1215,11 +1165,9 @@ static int rapl_check_domain(int cpu, int domain, struct rapl_package *rp)
case RAPL_DOMAIN_PP0:
case RAPL_DOMAIN_PP1:
case RAPL_DOMAIN_DRAM:
+ case RAPL_DOMAIN_PLATFORM:
ra.reg = rp->priv->regs[domain][RAPL_DOMAIN_REG_STATUS];
break;
- case RAPL_DOMAIN_PLATFORM:
- /* PSYS(PLATFORM) is not a CPU domain, so avoid printng error */
- return -EINVAL;
default:
pr_err("invalid domain id %d\n", domain);
return -EINVAL;
@@ -1228,7 +1176,7 @@ static int rapl_check_domain(int cpu, int domain, struct rapl_package *rp)
* values, otherwise skip it.
*/
- ra.mask = ~0;
+ ra.mask = ENERGY_STATUS_MASK;
if (rp->priv->read_raw(cpu, &ra) || !ra.value)
return -ENODEV;
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index d2a2627507a9..1646808d354c 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -44,6 +44,7 @@ static struct rapl_if_priv rapl_msr_priv = {
.regs[RAPL_DOMAIN_PLATFORM] = {
MSR_PLATFORM_POWER_LIMIT, MSR_PLATFORM_ENERGY_STATUS, 0, 0, 0},
.limits[RAPL_DOMAIN_PACKAGE] = 2,
+ .limits[RAPL_DOMAIN_PLATFORM] = 2,
};
/* Handles CPU hotplug on multi-socket systems.
@@ -157,9 +158,6 @@ static int rapl_msr_probe(struct platform_device *pdev)
goto out;
rapl_msr_priv.pcap_rapl_online = ret;
- /* Don't bail out if PSys is not supported */
- rapl_add_platform_domain(&rapl_msr_priv);
-
return 0;
out:
@@ -171,7 +169,6 @@ out:
static int rapl_msr_remove(struct platform_device *pdev)
{
cpuhp_remove_state(rapl_msr_priv.pcap_rapl_online);
- rapl_remove_platform_domain(&rapl_msr_priv);
powercap_unregister_control_type(rapl_msr_priv.control_type);
return 0;
}
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index f808c5fa9838..3f0b8e2ef3d4 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -367,9 +367,9 @@ static void create_power_zone_common_attributes(
&dev_attr_max_energy_range_uj.attr;
if (power_zone->ops->get_energy_uj) {
if (power_zone->ops->reset_energy_uj)
- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
+ dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUSR;
else
- dev_attr_energy_uj.attr.mode = S_IRUGO;
+ dev_attr_energy_uj.attr.mode = S_IRUSR;
power_zone->zone_dev_attrs[count++] =
&dev_attr_energy_uj.attr;
}
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index 7711651ff19e..4700ffbdfced 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -93,9 +93,6 @@ MODULE_LICENSE("GPL");
#define TC_E2E_PTP_V2 2
#define TC_P2P_PTP_V2 3
-#define OFF_PTP_CLOCK_ID 20
-#define OFF_PTP_PORT_NUM 28
-
#define PHY_SPEED_10 0
#define PHY_SPEED_100 1
#define PHY_SPEED_1000 2
@@ -443,57 +440,41 @@ static void ines_link_state(struct mii_timestamper *mii_ts,
static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
struct ines_timestamp *ts, struct device *dev)
{
- u8 *msgtype, *data = skb_mac_header(skb);
- unsigned int offset = 0;
- __be16 *portn, *seqid;
- __be64 *clkid;
+ struct ptp_header *hdr;
+ u16 portn, seqid;
+ u8 msgtype;
+ u64 clkid;
if (unlikely(ptp_class & PTP_CLASS_V1))
return false;
- if (ptp_class & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (ptp_class & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return false;
- }
-
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
return false;
- msgtype = data + offset;
- clkid = (__be64 *)(data + offset + OFF_PTP_CLOCK_ID);
- portn = (__be16 *)(data + offset + OFF_PTP_PORT_NUM);
- seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ clkid = be64_to_cpup((__be64 *)&hdr->source_port_identity.clock_identity.id[0]);
+ portn = be16_to_cpu(hdr->source_port_identity.port_number);
+ seqid = be16_to_cpu(hdr->sequence_id);
- if (tag_to_msgtype(ts->tag & 0x7) != (*msgtype & 0xf)) {
+ if (tag_to_msgtype(ts->tag & 0x7) != msgtype) {
dev_dbg(dev, "msgtype mismatch ts %hhu != skb %hhu\n",
- tag_to_msgtype(ts->tag & 0x7), *msgtype & 0xf);
+ tag_to_msgtype(ts->tag & 0x7), msgtype);
return false;
}
- if (cpu_to_be64(ts->clkid) != *clkid) {
+ if (ts->clkid != clkid) {
dev_dbg(dev, "clkid mismatch ts %llx != skb %llx\n",
- cpu_to_be64(ts->clkid), *clkid);
+ ts->clkid, clkid);
return false;
}
- if (ts->portnum != ntohs(*portn)) {
+ if (ts->portnum != portn) {
dev_dbg(dev, "portn mismatch ts %hu != skb %hu\n",
- ts->portnum, ntohs(*portn));
+ ts->portnum, portn);
return false;
}
- if (ts->seqid != ntohs(*seqid)) {
+ if (ts->seqid != seqid) {
dev_dbg(dev, "seqid mismatch ts %hu != skb %hu\n",
- ts->seqid, ntohs(*seqid));
+ ts->seqid, seqid);
return false;
}
@@ -663,8 +644,7 @@ static void ines_txtstamp(struct mii_timestamper *mii_ts,
spin_unlock_irqrestore(&port->lock, flags);
- if (old_skb)
- kfree_skb(old_skb);
+ kfree_skb(old_skb);
schedule_delayed_work(&port->ts_work, 1);
}
@@ -694,35 +674,16 @@ static void ines_txtstamp_work(struct work_struct *work)
static bool is_sync_pdelay_resp(struct sk_buff *skb, int type)
{
- u8 *data = skb->data, *msgtype;
- unsigned int offset = 0;
-
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
+ struct ptp_header *hdr;
+ u8 msgtype;
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (type & PTP_CLASS_V1)
- offset += OFF_PTP_CONTROL;
-
- if (skb->len < offset + 1)
- return 0;
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return false;
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, type);
- switch ((*msgtype & 0xf)) {
+ switch ((msgtype & 0xf)) {
case SYNC:
case PDELAY_RESP:
return true;
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index c09c16be0edf..beb5f74944cd 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -72,6 +72,10 @@ static void set_fipers(struct ptp_qoriq *ptp_qoriq)
set_alarm(ptp_qoriq);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
+
+ if (ptp_qoriq->fiper3_support)
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper3,
+ ptp_qoriq->tmr_fiper3);
}
int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event)
@@ -366,6 +370,7 @@ static u32 ptp_qoriq_nominal_freq(u32 clk_src)
* "fsl,tmr-add"
* "fsl,tmr-fiper1"
* "fsl,tmr-fiper2"
+ * "fsl,tmr-fiper3" (required only for DPAA2 and ENETC hardware)
* "fsl,max-adj"
*
* Return 0 if success
@@ -412,6 +417,7 @@ static int ptp_qoriq_auto_config(struct ptp_qoriq *ptp_qoriq,
ptp_qoriq->tmr_add = freq_comp;
ptp_qoriq->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - ptp_qoriq->tclk_period;
ptp_qoriq->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - ptp_qoriq->tclk_period;
+ ptp_qoriq->tmr_fiper3 = DEFAULT_FIPER3_PERIOD - ptp_qoriq->tclk_period;
/* max_adj = 1000000000 * (freq_ratio - 1.0) - 1
* freq_ratio = reference_clock_freq / nominal_freq
@@ -446,6 +452,10 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
else
ptp_qoriq->extts_fifo_support = false;
+ if (of_device_is_compatible(node, "fsl,dpaa2-ptp") ||
+ of_device_is_compatible(node, "fsl,enetc-ptp"))
+ ptp_qoriq->fiper3_support = true;
+
if (of_property_read_u32(node,
"fsl,tclk-period", &ptp_qoriq->tclk_period) ||
of_property_read_u32(node,
@@ -457,7 +467,10 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
of_property_read_u32(node,
"fsl,tmr-fiper2", &ptp_qoriq->tmr_fiper2) ||
of_property_read_u32(node,
- "fsl,max-adj", &ptp_qoriq->caps.max_adj)) {
+ "fsl,max-adj", &ptp_qoriq->caps.max_adj) ||
+ (ptp_qoriq->fiper3_support &&
+ of_property_read_u32(node, "fsl,tmr-fiper3",
+ &ptp_qoriq->tmr_fiper3))) {
pr_warn("device tree node missing required elements, try automatic configuration\n");
if (ptp_qoriq_auto_config(ptp_qoriq, node))
@@ -502,6 +515,11 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
ptp_qoriq->write(&regs->ctrl_regs->tmr_prsc, ptp_qoriq->tmr_prsc);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
+
+ if (ptp_qoriq->fiper3_support)
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper3,
+ ptp_qoriq->tmr_fiper3);
+
set_alarm(ptp_qoriq);
ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl,
tmr_ctrl|FIPERST|RTPE|TE|FRD);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 7dbcf6973d33..63be5362fd3a 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -410,7 +410,7 @@ config PWM_ROCKCHIP
config PWM_SAMSUNG
tristate "Samsung PWM support"
- depends on PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST
+ depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
Generic PWM framework driver for Samsung.
@@ -428,6 +428,16 @@ config PWM_SIFIVE
To compile this driver as a module, choose M here: the module
will be called pwm-sifive.
+config PWM_SL28CPLD
+ tristate "Kontron sl28cpld PWM support"
+ depends on MFD_SL28CPLD || COMPILE_TEST
+ help
+ Generic PWM framework driver for board management controller
+ found on the Kontron sl28 CPLD.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-sl28cpld.
+
config PWM_SPEAR
tristate "STMicroelectronics SPEAr PWM support"
depends on PLAT_SPEAR || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 2c2ba0a03557..cbdcd55d69ee 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o
obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
obj-$(CONFIG_PWM_SIFIVE) += pwm-sifive.o
+obj-$(CONFIG_PWM_SL28CPLD) += pwm-sl28cpld.o
obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
obj-$(CONFIG_PWM_SPRD) += pwm-sprd.o
obj-$(CONFIG_PWM_STI) += pwm-sti.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 276e939a5684..1f16f5365d3c 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -1327,30 +1327,19 @@ static int pwm_seq_show(struct seq_file *s, void *v)
return 0;
}
-static const struct seq_operations pwm_seq_ops = {
+static const struct seq_operations pwm_debugfs_sops = {
.start = pwm_seq_start,
.next = pwm_seq_next,
.stop = pwm_seq_stop,
.show = pwm_seq_show,
};
-static int pwm_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &pwm_seq_ops);
-}
-
-static const struct file_operations pwm_debugfs_ops = {
- .owner = THIS_MODULE,
- .open = pwm_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(pwm_debugfs);
static int __init pwm_debugfs_init(void)
{
debugfs_create_file("pwm", S_IFREG | S_IRUGO, NULL, NULL,
- &pwm_debugfs_ops);
+ &pwm_debugfs_fops);
return 0;
}
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index d78f86f8e462..6841dcfe27fc 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -152,13 +152,9 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
return PTR_ERR(pc->base);
pc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pc->clk)) {
- ret = PTR_ERR(pc->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "clock not found: %d\n", ret);
-
- return ret;
- }
+ if (IS_ERR(pc->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
+ "clock not found\n");
ret = clk_prepare_enable(pc->clk);
if (ret)
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index 272eeb071147..ecfdfac0c2d9 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -21,8 +21,8 @@
#define PWM_MAX_LEVEL 0xFF
-#define PWM_BASE_CLK 6000000 /* 6 MHz */
-#define PWM_MAX_PERIOD_NS 21333 /* 46.875KHz */
+#define PWM_BASE_CLK_MHZ 6 /* 6 MHz */
+#define PWM_MAX_PERIOD_NS 5461334 /* 183 Hz */
/**
* struct crystalcove_pwm - Crystal Cove PWM controller
@@ -39,59 +39,121 @@ static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *pc)
return container_of(pc, struct crystalcove_pwm, chip);
}
-static int crc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm)
+static int crc_pwm_calc_clk_div(int period_ns)
{
- struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+ int clk_div;
- regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 1);
+ clk_div = PWM_BASE_CLK_MHZ * period_ns / (256 * NSEC_PER_USEC);
+ /* clk_div 1 - 128, maps to register values 0-127 */
+ if (clk_div > 0)
+ clk_div--;
- return 0;
-}
-
-static void crc_pwm_disable(struct pwm_chip *c, struct pwm_device *pwm)
-{
- struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
-
- regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0);
+ return clk_div;
}
-static int crc_pwm_config(struct pwm_chip *c, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int crc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
- struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+ struct crystalcove_pwm *crc_pwm = to_crc_pwm(chip);
struct device *dev = crc_pwm->chip.dev;
- int level;
+ int err;
- if (period_ns > PWM_MAX_PERIOD_NS) {
+ if (state->period > PWM_MAX_PERIOD_NS) {
dev_err(dev, "un-supported period_ns\n");
return -EINVAL;
}
- if (pwm_get_period(pwm) != period_ns) {
- int clk_div;
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EOPNOTSUPP;
- /* changing the clk divisor, need to disable fisrt */
- crc_pwm_disable(c, pwm);
- clk_div = PWM_BASE_CLK * period_ns / NSEC_PER_SEC;
+ if (pwm_is_enabled(pwm) && !state->enabled) {
+ err = regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0);
+ if (err) {
+ dev_err(dev, "Error writing BACKLIGHT_EN %d\n", err);
+ return err;
+ }
+ }
- regmap_write(crc_pwm->regmap, PWM0_CLK_DIV,
- clk_div | PWM_OUTPUT_ENABLE);
+ if (pwm_get_duty_cycle(pwm) != state->duty_cycle ||
+ pwm_get_period(pwm) != state->period) {
+ u64 level = state->duty_cycle * PWM_MAX_LEVEL;
+
+ do_div(level, state->period);
+
+ err = regmap_write(crc_pwm->regmap, PWM0_DUTY_CYCLE, level);
+ if (err) {
+ dev_err(dev, "Error writing PWM0_DUTY_CYCLE %d\n", err);
+ return err;
+ }
+ }
+
+ if (pwm_is_enabled(pwm) && state->enabled &&
+ pwm_get_period(pwm) != state->period) {
+ /* changing the clk divisor, clear PWM_OUTPUT_ENABLE first */
+ err = regmap_write(crc_pwm->regmap, PWM0_CLK_DIV, 0);
+ if (err) {
+ dev_err(dev, "Error writing PWM0_CLK_DIV %d\n", err);
+ return err;
+ }
+ }
- /* enable back */
- crc_pwm_enable(c, pwm);
+ if (pwm_get_period(pwm) != state->period ||
+ pwm_is_enabled(pwm) != state->enabled) {
+ int clk_div = crc_pwm_calc_clk_div(state->period);
+ int pwm_output_enable = state->enabled ? PWM_OUTPUT_ENABLE : 0;
+
+ err = regmap_write(crc_pwm->regmap, PWM0_CLK_DIV,
+ clk_div | pwm_output_enable);
+ if (err) {
+ dev_err(dev, "Error writing PWM0_CLK_DIV %d\n", err);
+ return err;
+ }
}
- /* change the pwm duty cycle */
- level = duty_ns * PWM_MAX_LEVEL / period_ns;
- regmap_write(crc_pwm->regmap, PWM0_DUTY_CYCLE, level);
+ if (!pwm_is_enabled(pwm) && state->enabled) {
+ err = regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 1);
+ if (err) {
+ dev_err(dev, "Error writing BACKLIGHT_EN %d\n", err);
+ return err;
+ }
+ }
return 0;
}
+static void crc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct crystalcove_pwm *crc_pwm = to_crc_pwm(chip);
+ struct device *dev = crc_pwm->chip.dev;
+ unsigned int clk_div, clk_div_reg, duty_cycle_reg;
+ int error;
+
+ error = regmap_read(crc_pwm->regmap, PWM0_CLK_DIV, &clk_div_reg);
+ if (error) {
+ dev_err(dev, "Error reading PWM0_CLK_DIV %d\n", error);
+ return;
+ }
+
+ error = regmap_read(crc_pwm->regmap, PWM0_DUTY_CYCLE, &duty_cycle_reg);
+ if (error) {
+ dev_err(dev, "Error reading PWM0_DUTY_CYCLE %d\n", error);
+ return;
+ }
+
+ clk_div = (clk_div_reg & ~PWM_OUTPUT_ENABLE) + 1;
+
+ state->period =
+ DIV_ROUND_UP(clk_div * NSEC_PER_USEC * 256, PWM_BASE_CLK_MHZ);
+ state->duty_cycle =
+ DIV_ROUND_UP_ULL(duty_cycle_reg * state->period, PWM_MAX_LEVEL);
+ state->polarity = PWM_POLARITY_NORMAL;
+ state->enabled = !!(clk_div_reg & PWM_OUTPUT_ENABLE);
+}
+
static const struct pwm_ops crc_pwm_ops = {
- .config = crc_pwm_config,
- .enable = crc_pwm_enable,
- .disable = crc_pwm_disable,
+ .apply = crc_pwm_apply,
+ .get_state = crc_pwm_get_state,
};
static int crystalcove_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 09c08dee099e..c1c337969e4e 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -81,8 +81,7 @@ static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty)
return cros_ec_cmd_xfer_status(ec, msg);
}
-static int __cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index,
- u32 *result)
+static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index)
{
struct {
struct cros_ec_command msg;
@@ -107,19 +106,12 @@ static int __cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index,
params->index = index;
ret = cros_ec_cmd_xfer_status(ec, msg);
- if (result)
- *result = msg->result;
if (ret < 0)
return ret;
return resp->duty;
}
-static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index)
-{
- return __cros_ec_pwm_get_duty(ec, index, NULL);
-}
-
static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
@@ -204,29 +196,34 @@ static const struct pwm_ops cros_ec_pwm_ops = {
.owner = THIS_MODULE,
};
+/*
+ * Determine the number of supported PWMs. The EC does not return the number
+ * of PWMs it supports directly, so we have to read the pwm duty cycle for
+ * subsequent channels until we get an error.
+ */
static int cros_ec_num_pwms(struct cros_ec_device *ec)
{
int i, ret;
/* The index field is only 8 bits */
for (i = 0; i <= U8_MAX; i++) {
- u32 result = 0;
-
- ret = __cros_ec_pwm_get_duty(ec, i, &result);
- /* We want to parse EC protocol errors */
- if (ret < 0 && !(ret == -EPROTO && result))
- return ret;
-
+ ret = cros_ec_pwm_get_duty(ec, i);
/*
* We look for SUCCESS, INVALID_COMMAND, or INVALID_PARAM
* responses; everything else is treated as an error.
+ * The EC error codes map to -EOPNOTSUPP and -EINVAL,
+ * so check for those.
*/
- if (result == EC_RES_INVALID_COMMAND)
+ switch (ret) {
+ case -EOPNOTSUPP: /* invalid command */
return -ENODEV;
- else if (result == EC_RES_INVALID_PARAM)
+ case -EINVAL: /* invalid parameter */
return i;
- else if (result)
- return -EPROTO;
+ default:
+ if (ret < 0)
+ return ret;
+ break;
+ }
}
return U8_MAX;
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 599a0f66a384..a34d95ed70b2 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -277,6 +277,8 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(pwm->pwm_clk);
}
+ platform_set_drvdata(pdev, pwm);
+
pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -313,7 +315,6 @@ static int img_pwm_probe(struct platform_device *pdev)
goto err_suspend;
}
- platform_set_drvdata(pdev, pwm);
return 0;
err_suspend:
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 5830ac2bdf6a..00c642fa2eed 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -60,12 +60,9 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
snprintf(name, sizeof(name), "timer%u", pwm->hwpwm);
clk = clk_get(chip->dev, name);
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- dev_err(chip->dev, "Failed to get clock: %pe", clk);
-
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(chip->dev, PTR_ERR(clk),
+ "Failed to get clock\n");
err = clk_prepare_enable(clk);
if (err < 0) {
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index 48f34d20aecd..c6502cf7a7af 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -89,7 +89,6 @@ static int pwm_lpss_prepare(struct device *dev)
static const struct dev_pm_ops pwm_lpss_platform_pm_ops = {
.prepare = pwm_lpss_prepare,
- SET_SYSTEM_SLEEP_PM_OPS(pwm_lpss_suspend, pwm_lpss_resume)
};
static const struct acpi_device_id pwm_lpss_acpi_match[] = {
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 9d965ffe66d1..3444c56b4bed 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -85,7 +85,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
unsigned long long on_time_div;
unsigned long c = lpwm->info->clk_rate, base_unit_range;
unsigned long long base_unit, freq = NSEC_PER_SEC;
- u32 orig_ctrl, ctrl;
+ u32 ctrl;
do_div(freq, period_ns);
@@ -93,26 +93,25 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
* The equation is:
* base_unit = round(base_unit_range * freq / c)
*/
- base_unit_range = BIT(lpwm->info->base_unit_bits) - 1;
+ base_unit_range = BIT(lpwm->info->base_unit_bits);
freq *= base_unit_range;
base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
+ /* base_unit must not be 0 and we also want to avoid overflowing it */
+ base_unit = clamp_val(base_unit, 1, base_unit_range - 1);
on_time_div = 255ULL * duty_ns;
do_div(on_time_div, period_ns);
on_time_div = 255ULL - on_time_div;
- orig_ctrl = ctrl = pwm_lpss_read(pwm);
+ ctrl = pwm_lpss_read(pwm);
ctrl &= ~PWM_ON_TIME_DIV_MASK;
- ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
- base_unit &= base_unit_range;
+ ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT);
ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
ctrl |= on_time_div;
- if (orig_ctrl != ctrl) {
- pwm_lpss_write(pwm, ctrl);
- pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE);
- }
+ pwm_lpss_write(pwm, ctrl);
+ pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE);
}
static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
@@ -121,41 +120,47 @@ static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
}
+static int pwm_lpss_prepare_enable(struct pwm_lpss_chip *lpwm,
+ struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int ret;
+
+ ret = pwm_lpss_is_updating(pwm);
+ if (ret)
+ return ret;
+
+ pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
+ pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
+ ret = pwm_lpss_wait_for_update(pwm);
+ if (ret)
+ return ret;
+
+ pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
+ return 0;
+}
+
static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct pwm_lpss_chip *lpwm = to_lpwm(chip);
- int ret;
+ int ret = 0;
if (state->enabled) {
if (!pwm_is_enabled(pwm)) {
pm_runtime_get_sync(chip->dev);
- ret = pwm_lpss_is_updating(pwm);
- if (ret) {
- pm_runtime_put(chip->dev);
- return ret;
- }
- pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
- pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
- ret = pwm_lpss_wait_for_update(pwm);
- if (ret) {
+ ret = pwm_lpss_prepare_enable(lpwm, pwm, state);
+ if (ret)
pm_runtime_put(chip->dev);
- return ret;
- }
- pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
} else {
- ret = pwm_lpss_is_updating(pwm);
- if (ret)
- return ret;
- pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
- return pwm_lpss_wait_for_update(pwm);
+ ret = pwm_lpss_prepare_enable(lpwm, pwm, state);
}
} else if (pwm_is_enabled(pwm)) {
pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
pm_runtime_put(chip->dev);
}
- return 0;
+ return ret;
}
static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -255,30 +260,6 @@ int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
}
EXPORT_SYMBOL_GPL(pwm_lpss_remove);
-int pwm_lpss_suspend(struct device *dev)
-{
- struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < lpwm->info->npwm; i++)
- lpwm->saved_ctrl[i] = readl(lpwm->regs + i * PWM_SIZE + PWM);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pwm_lpss_suspend);
-
-int pwm_lpss_resume(struct device *dev)
-{
- struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < lpwm->info->npwm; i++)
- writel(lpwm->saved_ctrl[i], lpwm->regs + i * PWM_SIZE + PWM);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pwm_lpss_resume);
-
MODULE_DESCRIPTION("PWM driver for Intel LPSS");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index 7909fa12fca2..70db7e389d66 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -19,7 +19,6 @@ struct pwm_lpss_chip {
struct pwm_chip chip;
void __iomem *regs;
const struct pwm_lpss_boardinfo *info;
- u32 saved_ctrl[MAX_PWMS];
};
struct pwm_lpss_boardinfo {
@@ -37,7 +36,5 @@ struct pwm_lpss_boardinfo {
struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
const struct pwm_lpss_boardinfo *info);
int pwm_lpss_remove(struct pwm_lpss_chip *lpwm);
-int pwm_lpss_suspend(struct device *dev);
-int pwm_lpss_resume(struct device *dev);
#endif /* __PWM_LPSS_H */
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 76cd22bd6614..4a55dc18656c 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -57,10 +57,14 @@
#define PCA9685_NUMREGS 0xFF
#define PCA9685_MAXCHAN 0x10
-#define LED_FULL (1 << 4)
-#define MODE1_SLEEP (1 << 4)
-#define MODE2_INVRT (1 << 4)
-#define MODE2_OUTDRV (1 << 2)
+#define LED_FULL BIT(4)
+#define MODE1_ALLCALL BIT(0)
+#define MODE1_SUB3 BIT(1)
+#define MODE1_SUB2 BIT(2)
+#define MODE1_SUB1 BIT(3)
+#define MODE1_SLEEP BIT(4)
+#define MODE2_INVRT BIT(4)
+#define MODE2_OUTDRV BIT(2)
#define LED_N_ON_H(N) (PCA9685_LEDX_ON_H + (4 * (N)))
#define LED_N_ON_L(N) (PCA9685_LEDX_ON_L + (4 * (N)))
@@ -91,7 +95,7 @@ static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx)
mutex_lock(&pca->lock);
if (pwm_idx >= PCA9685_MAXCHAN) {
/*
- * "all LEDs" channel:
+ * "All LEDs" channel:
* pretend already in use if any of the PWMs are requested
*/
if (!bitmap_empty(pca->pwms_inuse, PCA9685_MAXCHAN)) {
@@ -100,7 +104,7 @@ static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx)
}
} else {
/*
- * regular channel:
+ * Regular channel:
* pretend already in use if the "all LEDs" channel is requested
*/
if (test_bit(PCA9685_MAXCHAN, pca->pwms_inuse)) {
@@ -257,7 +261,7 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (prescale >= PCA9685_PRESCALE_MIN &&
prescale <= PCA9685_PRESCALE_MAX) {
/*
- * putting the chip briefly into SLEEP mode
+ * Putting the chip briefly into SLEEP mode
* at this point won't interfere with the
* pm_runtime framework, because the pm_runtime
* state is guaranteed active here.
@@ -443,8 +447,8 @@ static int pca9685_pwm_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca9685 *pca;
+ unsigned int reg;
int ret;
- int mode2;
pca = devm_kzalloc(&client->dev, sizeof(*pca), GFP_KERNEL);
if (!pca)
@@ -461,26 +465,31 @@ static int pca9685_pwm_probe(struct i2c_client *client,
i2c_set_clientdata(client, pca);
- regmap_read(pca->regmap, PCA9685_MODE2, &mode2);
+ regmap_read(pca->regmap, PCA9685_MODE2, &reg);
if (device_property_read_bool(&client->dev, "invert"))
- mode2 |= MODE2_INVRT;
+ reg |= MODE2_INVRT;
else
- mode2 &= ~MODE2_INVRT;
+ reg &= ~MODE2_INVRT;
if (device_property_read_bool(&client->dev, "open-drain"))
- mode2 &= ~MODE2_OUTDRV;
+ reg &= ~MODE2_OUTDRV;
else
- mode2 |= MODE2_OUTDRV;
+ reg |= MODE2_OUTDRV;
+
+ regmap_write(pca->regmap, PCA9685_MODE2, reg);
- regmap_write(pca->regmap, PCA9685_MODE2, mode2);
+ /* Disable all LED ALLCALL and SUBx addresses to avoid bus collisions */
+ regmap_read(pca->regmap, PCA9685_MODE1, &reg);
+ reg &= ~(MODE1_ALLCALL | MODE1_SUB1 | MODE1_SUB2 | MODE1_SUB3);
+ regmap_write(pca->regmap, PCA9685_MODE1, reg);
- /* clear all "full off" bits */
+ /* Clear all "full off" bits */
regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_L, 0);
regmap_write(pca->regmap, PCA9685_ALL_LED_OFF_H, 0);
pca->chip.ops = &pca9685_pwm_ops;
- /* add an extra channel for ALL_LED */
+ /* Add an extra channel for ALL_LED */
pca->chip.npwm = PCA9685_MAXCHAN + 1;
pca->chip.dev = &client->dev;
@@ -496,10 +505,10 @@ static int pca9685_pwm_probe(struct i2c_client *client,
return ret;
}
- /* the chip comes out of power-up in the active state */
+ /* The chip comes out of power-up in the active state */
pm_runtime_set_active(&client->dev);
/*
- * enable will put the chip into suspend, which is what we
+ * Enable will put the chip into suspend, which is what we
* want as all outputs are disabled at this point
*/
pm_runtime_enable(&client->dev);
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index eb8c9cb645a6..77c23a2c6d71 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -288,6 +288,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
const struct of_device_id *id;
struct rockchip_pwm_chip *pc;
struct resource *r;
+ u32 enable_conf, ctrl;
int ret, count;
id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
@@ -306,13 +307,9 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
pc->clk = devm_clk_get(&pdev->dev, "pwm");
if (IS_ERR(pc->clk)) {
pc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pc->clk)) {
- ret = PTR_ERR(pc->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Can't get bus clk: %d\n",
- ret);
- return ret;
- }
+ if (IS_ERR(pc->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
+ "Can't get bus clk\n");
}
count = of_count_phandle_with_args(pdev->dev.of_node,
@@ -362,7 +359,9 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
}
/* Keep the PWM clk enabled if the PWM appears to be up and running. */
- if (!pwm_is_enabled(pc->chip.pwms))
+ enable_conf = pc->data->enable_conf;
+ ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
+ if ((ctrl & enable_conf) != enable_conf)
clk_disable(pc->clk);
return 0;
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 62de0bb85921..2485fbaaead2 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -254,11 +254,9 @@ static int pwm_sifive_probe(struct platform_device *pdev)
return PTR_ERR(ddata->regs);
ddata->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ddata->clk)) {
- if (PTR_ERR(ddata->clk) != -EPROBE_DEFER)
- dev_err(dev, "Unable to find controller clock\n");
- return PTR_ERR(ddata->clk);
- }
+ if (IS_ERR(ddata->clk))
+ return dev_err_probe(dev, PTR_ERR(ddata->clk),
+ "Unable to find controller clock\n");
ret = clk_prepare_enable(ddata->clk);
if (ret) {
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
new file mode 100644
index 000000000000..5046b6b7fd35
--- /dev/null
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sl28cpld PWM driver
+ *
+ * Copyright (c) 2020 Michael Walle <michael@walle.cc>
+ *
+ * There is no public datasheet available for this PWM core. But it is easy
+ * enough to be briefly explained. It consists of one 8-bit counter. The PWM
+ * supports four distinct frequencies by selecting when to reset the counter.
+ * With the prescaler setting you can select which bit of the counter is used
+ * to reset it. This implies that the higher the frequency the less remaining
+ * bits are available for the actual counter.
+ *
+ * Let cnt[7:0] be the counter, clocked at 32kHz:
+ * +-----------+--------+--------------+-----------+---------------+
+ * | prescaler | reset | counter bits | frequency | period length |
+ * +-----------+--------+--------------+-----------+---------------+
+ * | 0 | cnt[7] | cnt[6:0] | 250 Hz | 4000000 ns |
+ * | 1 | cnt[6] | cnt[5:0] | 500 Hz | 2000000 ns |
+ * | 2 | cnt[5] | cnt[4:0] | 1 kHz | 1000000 ns |
+ * | 3 | cnt[4] | cnt[3:0] | 2 kHz | 500000 ns |
+ * +-----------+--------+--------------+-----------+---------------+
+ *
+ * Limitations:
+ * - The hardware cannot generate a 100% duty cycle if the prescaler is 0.
+ * - The hardware cannot atomically set the prescaler and the counter value,
+ * which might lead to glitches and inconsistent states if a write fails.
+ * - The counter is not reset if you switch the prescaler which leads
+ * to glitches, too.
+ * - The duty cycle will switch immediately and not after a complete cycle.
+ * - Depending on the actual implementation, disabling the PWM might have
+ * side effects. For example, if the output pin is shared with a GPIO pin
+ * it will automatically switch back to GPIO mode.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+
+/*
+ * PWM timer block registers.
+ */
+#define SL28CPLD_PWM_CTRL 0x00
+#define SL28CPLD_PWM_CTRL_ENABLE BIT(7)
+#define SL28CPLD_PWM_CTRL_PRESCALER_MASK GENMASK(1, 0)
+#define SL28CPLD_PWM_CYCLE 0x01
+#define SL28CPLD_PWM_CYCLE_MAX GENMASK(6, 0)
+
+#define SL28CPLD_PWM_CLK 32000 /* 32 kHz */
+#define SL28CPLD_PWM_MAX_DUTY_CYCLE(prescaler) (1 << (7 - (prescaler)))
+#define SL28CPLD_PWM_PERIOD(prescaler) \
+ (NSEC_PER_SEC / SL28CPLD_PWM_CLK * SL28CPLD_PWM_MAX_DUTY_CYCLE(prescaler))
+
+/*
+ * We calculate the duty cycle like this:
+ * duty_cycle_ns = pwm_cycle_reg * max_period_ns / max_duty_cycle
+ *
+ * With
+ * max_period_ns = 1 << (7 - prescaler) / SL28CPLD_PWM_CLK * NSEC_PER_SEC
+ * max_duty_cycle = 1 << (7 - prescaler)
+ * this then simplifies to:
+ * duty_cycle_ns = pwm_cycle_reg / SL28CPLD_PWM_CLK * NSEC_PER_SEC
+ * = NSEC_PER_SEC / SL28CPLD_PWM_CLK * pwm_cycle_reg
+ *
+ * NSEC_PER_SEC is a multiple of SL28CPLD_PWM_CLK, therefore we're not losing
+ * precision by doing the divison first.
+ */
+#define SL28CPLD_PWM_TO_DUTY_CYCLE(reg) \
+ (NSEC_PER_SEC / SL28CPLD_PWM_CLK * (reg))
+#define SL28CPLD_PWM_FROM_DUTY_CYCLE(duty_cycle) \
+ (DIV_ROUND_DOWN_ULL((duty_cycle), NSEC_PER_SEC / SL28CPLD_PWM_CLK))
+
+#define sl28cpld_pwm_read(priv, reg, val) \
+ regmap_read((priv)->regmap, (priv)->offset + (reg), (val))
+#define sl28cpld_pwm_write(priv, reg, val) \
+ regmap_write((priv)->regmap, (priv)->offset + (reg), (val))
+
+struct sl28cpld_pwm {
+ struct pwm_chip pwm_chip;
+ struct regmap *regmap;
+ u32 offset;
+};
+
+static void sl28cpld_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct sl28cpld_pwm *priv = dev_get_drvdata(chip->dev);
+ unsigned int reg;
+ int prescaler;
+
+ sl28cpld_pwm_read(priv, SL28CPLD_PWM_CTRL, &reg);
+
+ state->enabled = reg & SL28CPLD_PWM_CTRL_ENABLE;
+
+ prescaler = FIELD_GET(SL28CPLD_PWM_CTRL_PRESCALER_MASK, reg);
+ state->period = SL28CPLD_PWM_PERIOD(prescaler);
+
+ sl28cpld_pwm_read(priv, SL28CPLD_PWM_CYCLE, &reg);
+ state->duty_cycle = SL28CPLD_PWM_TO_DUTY_CYCLE(reg);
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ /*
+ * Sanitize values for the PWM core. Depending on the prescaler it
+ * might happen that we calculate a duty_cycle greater than the actual
+ * period. This might happen if someone (e.g. the bootloader) sets an
+ * invalid combination of values. The behavior of the hardware is
+ * undefined in this case. But we need to report sane values back to
+ * the PWM core.
+ */
+ state->duty_cycle = min(state->duty_cycle, state->period);
+}
+
+static int sl28cpld_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct sl28cpld_pwm *priv = dev_get_drvdata(chip->dev);
+ unsigned int cycle, prescaler;
+ bool write_duty_cycle_first;
+ int ret;
+ u8 ctrl;
+
+ /* Polarity inversion is not supported */
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ /*
+ * Calculate the prescaler. Pick the biggest period that isn't
+ * bigger than the requested period.
+ */
+ prescaler = DIV_ROUND_UP_ULL(SL28CPLD_PWM_PERIOD(0), state->period);
+ prescaler = order_base_2(prescaler);
+
+ if (prescaler > field_max(SL28CPLD_PWM_CTRL_PRESCALER_MASK))
+ return -ERANGE;
+
+ ctrl = FIELD_PREP(SL28CPLD_PWM_CTRL_PRESCALER_MASK, prescaler);
+ if (state->enabled)
+ ctrl |= SL28CPLD_PWM_CTRL_ENABLE;
+
+ cycle = SL28CPLD_PWM_FROM_DUTY_CYCLE(state->duty_cycle);
+ cycle = min_t(unsigned int, cycle, SL28CPLD_PWM_MAX_DUTY_CYCLE(prescaler));
+
+ /*
+ * Work around the hardware limitation. See also above. Trap 100% duty
+ * cycle if the prescaler is 0. Set prescaler to 1 instead. We don't
+ * care about the frequency because its "all-one" in either case.
+ *
+ * We don't need to check the actual prescaler setting, because only
+ * if the prescaler is 0 we can have this particular value.
+ */
+ if (cycle == SL28CPLD_PWM_MAX_DUTY_CYCLE(0)) {
+ ctrl &= ~SL28CPLD_PWM_CTRL_PRESCALER_MASK;
+ ctrl |= FIELD_PREP(SL28CPLD_PWM_CTRL_PRESCALER_MASK, 1);
+ cycle = SL28CPLD_PWM_MAX_DUTY_CYCLE(1);
+ }
+
+ /*
+ * To avoid glitches when we switch the prescaler, we have to make sure
+ * we have a valid duty cycle for the new mode.
+ *
+ * Take the current prescaler (or the current period length) into
+ * account to decide whether we have to write the duty cycle or the new
+ * prescaler first. If the period length is decreasing we have to
+ * write the duty cycle first.
+ */
+ write_duty_cycle_first = pwm->state.period > state->period;
+
+ if (write_duty_cycle_first) {
+ ret = sl28cpld_pwm_write(priv, SL28CPLD_PWM_CYCLE, cycle);
+ if (ret)
+ return ret;
+ }
+
+ ret = sl28cpld_pwm_write(priv, SL28CPLD_PWM_CTRL, ctrl);
+ if (ret)
+ return ret;
+
+ if (!write_duty_cycle_first) {
+ ret = sl28cpld_pwm_write(priv, SL28CPLD_PWM_CYCLE, cycle);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pwm_ops sl28cpld_pwm_ops = {
+ .apply = sl28cpld_pwm_apply,
+ .get_state = sl28cpld_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static int sl28cpld_pwm_probe(struct platform_device *pdev)
+{
+ struct sl28cpld_pwm *priv;
+ struct pwm_chip *chip;
+ int ret;
+
+ if (!pdev->dev.parent) {
+ dev_err(&pdev->dev, "no parent device\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!priv->regmap) {
+ dev_err(&pdev->dev, "could not get parent regmap\n");
+ return -ENODEV;
+ }
+
+ ret = device_property_read_u32(&pdev->dev, "reg", &priv->offset);
+ if (ret) {
+ dev_err(&pdev->dev, "no 'reg' property found (%pe)\n",
+ ERR_PTR(ret));
+ return -EINVAL;
+ }
+
+ /* Initialize the pwm_chip structure */
+ chip = &priv->pwm_chip;
+ chip->dev = &pdev->dev;
+ chip->ops = &sl28cpld_pwm_ops;
+ chip->base = -1;
+ chip->npwm = 1;
+
+ ret = pwmchip_add(&priv->pwm_chip);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add PWM chip (%pe)",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int sl28cpld_pwm_remove(struct platform_device *pdev)
+{
+ struct sl28cpld_pwm *priv = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&priv->pwm_chip);
+}
+
+static const struct of_device_id sl28cpld_pwm_of_match[] = {
+ { .compatible = "kontron,sl28cpld-pwm" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sl28cpld_pwm_of_match);
+
+static struct platform_driver sl28cpld_pwm_driver = {
+ .probe = sl28cpld_pwm_probe,
+ .remove = sl28cpld_pwm_remove,
+ .driver = {
+ .name = "sl28cpld-pwm",
+ .of_match_table = sl28cpld_pwm_of_match,
+ },
+};
+module_platform_driver(sl28cpld_pwm_driver);
+
+MODULE_DESCRIPTION("sl28cpld PWM Driver");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index be2394227423..5123d948efd6 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -228,11 +228,8 @@ static int sprd_pwm_clk_init(struct sprd_pwm_chip *spc)
if (ret == -ENOENT)
break;
- if (ret != -EPROBE_DEFER)
- dev_err(spc->dev,
- "failed to get channel clocks\n");
-
- return ret;
+ return dev_err_probe(spc->dev, ret,
+ "failed to get channel clocks\n");
}
clk_pwm = chn->clks[SPRD_PWM_CHN_OUTPUT_CLK].clk;
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 961c59c99bb3..38a4c5c1317b 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -423,38 +423,26 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
* back to the first clock of the PWM.
*/
pwm->clk = devm_clk_get_optional(&pdev->dev, "mod");
- if (IS_ERR(pwm->clk)) {
- if (PTR_ERR(pwm->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "get mod clock failed %pe\n",
- pwm->clk);
- return PTR_ERR(pwm->clk);
- }
+ if (IS_ERR(pwm->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pwm->clk),
+ "get mod clock failed\n");
if (!pwm->clk) {
pwm->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pwm->clk)) {
- if (PTR_ERR(pwm->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "get unnamed clock failed %pe\n",
- pwm->clk);
- return PTR_ERR(pwm->clk);
- }
+ if (IS_ERR(pwm->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pwm->clk),
+ "get unnamed clock failed\n");
}
pwm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
- if (IS_ERR(pwm->bus_clk)) {
- if (PTR_ERR(pwm->bus_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "get bus clock failed %pe\n",
- pwm->bus_clk);
- return PTR_ERR(pwm->bus_clk);
- }
+ if (IS_ERR(pwm->bus_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pwm->bus_clk),
+ "get bus clock failed\n");
pwm->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
- if (IS_ERR(pwm->rst)) {
- if (PTR_ERR(pwm->rst) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "get reset failed %pe\n",
- pwm->rst);
- return PTR_ERR(pwm->rst);
- }
+ if (IS_ERR(pwm->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pwm->rst),
+ "get reset failed\n");
/* Deassert reset */
ret = reset_control_deassert(pwm->rst);
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 449dbc0f49ed..9903c3a7eced 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -87,10 +87,10 @@ static ssize_t duty_cycle_store(struct device *child,
struct pwm_export *export = child_to_pwm_export(child);
struct pwm_device *pwm = export->pwm;
struct pwm_state state;
- unsigned int val;
+ u64 val;
int ret;
- ret = kstrtouint(buf, 0, &val);
+ ret = kstrtou64(buf, 0, &val);
if (ret)
return ret;
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index a30342942e26..94331d999d27 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -871,15 +871,16 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
rmcd_error("pin_user_pages_fast err=%ld",
pinned);
nr_pages = 0;
- } else
+ } else {
rmcd_error("pinned %ld out of %ld pages",
pinned, nr_pages);
+ /*
+ * Set nr_pages up to mean "how many pages to unpin, in
+ * the error handler:
+ */
+ nr_pages = pinned;
+ }
ret = -EFAULT;
- /*
- * Set nr_pages up to mean "how many pages to unpin, in
- * the error handler:
- */
- nr_pages = pinned;
goto err_pg;
}
@@ -1679,6 +1680,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
struct rio_dev *rdev;
struct rio_switch *rswitch = NULL;
struct rio_mport *mport;
+ struct device *dev;
size_t size;
u32 rval;
u32 swpinfo = 0;
@@ -1693,8 +1695,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
dev_info.comptag, dev_info.destid, dev_info.hopcount);
- if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
+ dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name);
+ if (dev) {
rmcd_debug(RDEV, "device %s already exists", dev_info.name);
+ put_device(dev);
return -EEXIST;
}
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 569d9ad2c594..ddecf25b5dd4 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -435,7 +435,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(action_threshold_ops, u64_get, action_threshold_set, "%
static const char * const bins[] = { "00", "01", "10", "11" };
-static int array_dump(struct seq_file *m, void *v)
+static int array_show(struct seq_file *m, void *v)
{
struct ce_array *ca = &ce_arr;
int i;
@@ -467,18 +467,7 @@ static int array_dump(struct seq_file *m, void *v)
return 0;
}
-static int array_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, array_dump, NULL);
-}
-
-static const struct file_operations array_ops = {
- .owner = THIS_MODULE,
- .open = array_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(array);
static int __init create_debugfs_nodes(void)
{
@@ -513,7 +502,7 @@ static int __init create_debugfs_nodes(void)
goto err;
}
- array = debugfs_create_file("array", S_IRUSR, d, NULL, &array_ops);
+ array = debugfs_create_file("array", S_IRUSR, d, NULL, &array_fops);
if (!array) {
pr_warn("Error creating array debugfs node!\n");
goto err;
@@ -553,20 +542,20 @@ static struct notifier_block cec_nb = {
.priority = MCE_PRIO_CEC,
};
-static void __init cec_init(void)
+static int __init cec_init(void)
{
if (ce_arr.disabled)
- return;
+ return -ENODEV;
ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL);
if (!ce_arr.array) {
pr_err("Error allocating CE array page!\n");
- return;
+ return -ENOMEM;
}
if (create_debugfs_nodes()) {
free_page((unsigned long)ce_arr.array);
- return;
+ return -ENOMEM;
}
INIT_DELAYED_WORK(&cec_work, cec_work_fn);
@@ -575,6 +564,7 @@ static void __init cec_init(void)
mce_register_decode_chain(&cec_nb);
pr_info("Correctable Errors collector initialized.\n");
+ return 0;
}
late_initcall(cec_init);
diff --git a/drivers/regulator/88pg86x.c b/drivers/regulator/88pg86x.c
index 71cfa2c5de5e..e91d5885c5ef 100644
--- a/drivers/regulator/88pg86x.c
+++ b/drivers/regulator/88pg86x.c
@@ -84,7 +84,7 @@ static int pg86x_i2c_probe(struct i2c_client *i2c)
return 0;
}
-static const struct of_device_id pg86x_dt_ids [] = {
+static const struct of_device_id __maybe_unused pg86x_dt_ids[] = {
{ .compatible = "marvell,88pg867" },
{ .compatible = "marvell,88pg868" },
{ }
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index de17ef7e18f0..020a00d6696b 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -231,6 +231,16 @@ config REGULATOR_BD9571MWV
This driver can also be built as a module. If so, the module
will be called bd9571mwv-regulator.
+config REGULATOR_BD957XMUF
+ tristate "ROHM BD9576MUF and BD9573MUF Regulators"
+ depends on MFD_ROHM_BD957XMUF
+ help
+ This driver supports voltage regulators on ROHM BD9576MUF and
+ BD9573MUF PMICs.
+
+ This driver can also be built as a module. If so, the module
+ will be called bd9576-regulator.
+
config REGULATOR_CPCAP
tristate "Motorola CPCAP regulator"
depends on MFD_CPCAP
@@ -500,7 +510,7 @@ config REGULATOR_MAX1586
config REGULATOR_MAX77620
tristate "Maxim 77620/MAX20024 voltage regulator"
- depends on MFD_MAX77620
+ depends on MFD_MAX77620 || COMPILE_TEST
help
This driver controls Maxim MAX77620 voltage output regulator
via I2C bus. The provided regulator is suitable for Tegra
@@ -509,7 +519,7 @@ config REGULATOR_MAX77620
config REGULATOR_MAX77650
tristate "Maxim MAX77650/77651 regulator support"
- depends on MFD_MAX77650
+ depends on MFD_MAX77650 || COMPILE_TEST
help
Regulator driver for MAX77650/77651 PMIC from Maxim
Semiconductor. This device has a SIMO with three independent
@@ -532,7 +542,7 @@ config REGULATOR_MAX8660
config REGULATOR_MAX8907
tristate "Maxim 8907 voltage regulator"
- depends on MFD_MAX8907
+ depends on MFD_MAX8907 || COMPILE_TEST
help
This driver controls a Maxim 8907 voltage output regulator
via I2C bus. The provided regulator is suitable for Tegra
@@ -582,7 +592,7 @@ config REGULATOR_MAX8998
config REGULATOR_MAX77686
tristate "Maxim 77686 regulator"
- depends on MFD_MAX77686
+ depends on MFD_MAX77686 || COMPILE_TEST
help
This driver controls a Maxim 77686 regulator
via I2C bus. The provided regulator is suitable for
@@ -590,7 +600,7 @@ config REGULATOR_MAX77686
config REGULATOR_MAX77693
tristate "Maxim 77693/77843 regulator"
- depends on (MFD_MAX77693 || MFD_MAX77843)
+ depends on MFD_MAX77693 || MFD_MAX77843 || COMPILE_TEST
help
This driver controls a Maxim 77693/77843 regulators via I2C bus.
The regulators include two LDOs, 'SAFEOUT1', 'SAFEOUT2'
@@ -599,7 +609,7 @@ config REGULATOR_MAX77693
config REGULATOR_MAX77802
tristate "Maxim 77802 regulator"
- depends on MFD_MAX77686
+ depends on MFD_MAX77686 || COMPILE_TEST
help
This driver controls a Maxim 77802 regulator
via I2C bus. The provided regulator is suitable for
@@ -711,6 +721,15 @@ config REGULATOR_MT6358
This driver supports the control of different power rails of device
through regulator interface.
+config REGULATOR_MT6360
+ tristate "MT6360 SubPMIC Regulator"
+ depends on MFD_MT6360
+ help
+ Say Y here to enable MT6360 regulator support.
+ This is support MT6360 PMIC/LDO part include
+ 2-channel buck with Thermal Shutdown and Overload Protection
+ 6-channel High PSRR and Low Dropout LDO.
+
config REGULATOR_MT6380
tristate "MediaTek MT6380 PMIC"
depends on MTK_PMIC_WRAP
@@ -864,6 +883,16 @@ config REGULATOR_QCOM_USB_VBUS
Say M here if you want to include support for enabling the VBUS output
as a module. The module will be named "qcom_usb_vbus_regulator".
+config REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY
+ tristate "Raspberry Pi 7-inch touchscreen panel ATTINY regulator"
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver supports ATTINY regulator on the Raspberry Pi 7-inch
+ touchscreen unit. The regulator is used to enable power to the
+ TC358762, display and to control backlight.
+
config REGULATOR_RC5T583
tristate "RICOH RC5T583 Power regulators"
depends on MFD_RC5T583
@@ -894,6 +923,14 @@ config REGULATOR_RN5T618
config REGULATOR_ROHM
tristate
+config REGULATOR_RT4801
+ tristate "Richtek RT4801 Regulators"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This adds support for voltage regulators in Richtek RT4801 Display Bias IC.
+ The device supports two regulators (DSVP/DSVN).
+
config REGULATOR_RT5033
tristate "Richtek RT5033 Regulators"
depends on MFD_RT5033
@@ -902,16 +939,25 @@ config REGULATOR_RT5033
RT5033 PMIC. The device supports multiple regulators like
current source, LDO and Buck.
+config REGULATOR_RTMV20
+ tristate "RTMV20 Laser Diode Regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver adds support for the load switch current regulator on
+ the Richtek RTMV20. It can support the load current up to 6A and
+ integrate strobe/vsync/fsin signal to synchronize the IR camera.
+
config REGULATOR_S2MPA01
tristate "Samsung S2MPA01 voltage regulator"
- depends on MFD_SEC_CORE
+ depends on MFD_SEC_CORE || COMPILE_TEST
help
This driver controls Samsung S2MPA01 voltage output regulator
via I2C bus. S2MPA01 has 10 Bucks and 26 LDO outputs.
config REGULATOR_S2MPS11
tristate "Samsung S2MPS11/13/14/15/S2MPU02 voltage regulator"
- depends on MFD_SEC_CORE
+ depends on MFD_SEC_CORE || COMPILE_TEST
help
This driver supports a Samsung S2MPS11/13/14/15/S2MPU02 voltage
output regulator via I2C bus. The chip is comprised of high efficient
@@ -920,7 +966,7 @@ config REGULATOR_S2MPS11
config REGULATOR_S5M8767
tristate "Samsung S5M8767A voltage regulator"
- depends on MFD_SEC_CORE
+ depends on MFD_SEC_CORE || COMPILE_TEST
help
This driver supports a Samsung S5M8767A voltage output regulator
via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index d8d3ecf526a8..6ebae516258e 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o
obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
+obj-$(CONFIG_REGULATOR_BD957XMUF) += bd9576-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x-regulator.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
@@ -88,6 +89,7 @@ obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
+obj-$(CONFIG_REGULATOR_MT6360) += mt6360-regulator.o
obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_LABIBB) += qcom-labibb-regulator.o
@@ -107,11 +109,14 @@ obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
obj-$(CONFIG_REGULATOR_PBIAS) += pbias-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
+obj-$(CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY) += rpi-panel-attiny-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
obj-$(CONFIG_REGULATOR_RK808) += rk808-regulator.o
obj-$(CONFIG_REGULATOR_RN5T618) += rn5t618-regulator.o
obj-$(CONFIG_REGULATOR_ROHM) += rohm-regulator.o
+obj-$(CONFIG_REGULATOR_RT4801) += rt4801-regulator.o
obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
+obj-$(CONFIG_REGULATOR_RTMV20) += rtmv20-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index 7b311389f925..0774467994fb 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -16,6 +16,39 @@
#include <linux/slab.h>
/*
+ * BD718(37/47/50) have two "enable control modes". ON/OFF can either be
+ * controlled by software - or by PMIC internal HW state machine. Whether
+ * regulator should be under SW or HW control can be defined from device-tree.
+ * Let's provide separate ops for regulators to use depending on the "enable
+ * control mode".
+ */
+#define BD718XX_HWOPNAME(swopname) swopname##_hwcontrol
+
+#define BD718XX_OPS(name, _list_voltage, _map_voltage, _set_voltage_sel, \
+ _get_voltage_sel, _set_voltage_time_sel, _set_ramp_delay) \
+static const struct regulator_ops name = { \
+ .enable = regulator_enable_regmap, \
+ .disable = regulator_disable_regmap, \
+ .is_enabled = regulator_is_enabled_regmap, \
+ .list_voltage = (_list_voltage), \
+ .map_voltage = (_map_voltage), \
+ .set_voltage_sel = (_set_voltage_sel), \
+ .get_voltage_sel = (_get_voltage_sel), \
+ .set_voltage_time_sel = (_set_voltage_time_sel), \
+ .set_ramp_delay = (_set_ramp_delay), \
+}; \
+ \
+static const struct regulator_ops BD718XX_HWOPNAME(name) = { \
+ .is_enabled = always_enabled_by_hwstate, \
+ .list_voltage = (_list_voltage), \
+ .map_voltage = (_map_voltage), \
+ .set_voltage_sel = (_set_voltage_sel), \
+ .get_voltage_sel = (_get_voltage_sel), \
+ .set_voltage_time_sel = (_set_voltage_time_sel), \
+ .set_ramp_delay = (_set_ramp_delay), \
+} \
+
+/*
* BUCK1/2/3/4
* BUCK1RAMPRATE[1:0] BUCK1 DVS ramp rate setting
* 00: 10.00mV/usec 10mV 1uS
@@ -55,6 +88,38 @@ static int bd718xx_buck1234_set_ramp_delay(struct regulator_dev *rdev,
BUCK_RAMPRATE_MASK, ramp_value << 6);
}
+/* These functions are used when regulators are under HW state machine control.
+ * We assume PMIC is in RUN state because SW running and able to query the
+ * status. Most of the regulators have fixed ON or OFF state at RUN/IDLE so for
+ * them we just return a constant. BD71837 BUCK3 and BUCK4 are exceptions as
+ * they support configuring the ON/OFF state for RUN.
+ *
+ * Note for next hacker - these PMICs have a register where the HW state can be
+ * read. If assuming RUN appears to be false in your use-case - you can
+ * implement state reading (although that is not going to be atomic) before
+ * returning the enable state.
+ */
+static int always_enabled_by_hwstate(struct regulator_dev *rdev)
+{
+ return 1;
+}
+
+static int never_enabled_by_hwstate(struct regulator_dev *rdev)
+{
+ return 0;
+}
+
+static int bd71837_get_buck34_enable_hwctrl(struct regulator_dev *rdev)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
+ if (ret)
+ return ret;
+
+ return !!(BD718XX_BUCK_RUN_ON & val);
+}
/*
* On BD71837 (not on BD71847, BD71850, ...)
* Bucks 1 to 4 support DVS. PWM mode is used when voltage is changed.
@@ -71,7 +136,7 @@ static int bd718xx_buck1234_set_ramp_delay(struct regulator_dev *rdev,
static int bd71837_set_voltage_sel_restricted(struct regulator_dev *rdev,
unsigned int sel)
{
- if (regulator_is_enabled_regmap(rdev))
+ if (rdev->desc->ops->is_enabled(rdev))
return -EBUSY;
return regulator_set_voltage_sel_regmap(rdev, sel);
@@ -113,7 +178,7 @@ static int voltage_change_prepare(struct regulator_dev *rdev, unsigned int sel,
int ret;
*mask = 0;
- if (regulator_is_enabled_regmap(rdev)) {
+ if (rdev->desc->ops->is_enabled(rdev)) {
int now, new;
now = rdev->desc->ops->get_voltage_sel(rdev);
@@ -195,133 +260,90 @@ static int bd718xx_set_voltage_sel_pickable_restricted(
static int bd71837_set_voltage_sel_pickable_restricted(
struct regulator_dev *rdev, unsigned int sel)
{
- if (regulator_is_enabled_regmap(rdev))
+ if (rdev->desc->ops->is_enabled(rdev))
return -EBUSY;
return regulator_set_voltage_sel_pickable_regmap(rdev, sel);
}
-static const struct regulator_ops bd718xx_pickable_range_ldo_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
+/*
+ * OPS common for BD71847 and BD71850
+ */
+BD718XX_OPS(bd718xx_pickable_range_ldo_ops,
+ regulator_list_voltage_pickable_linear_range, NULL,
+ bd718xx_set_voltage_sel_pickable_restricted,
+ regulator_get_voltage_sel_pickable_regmap, NULL, NULL);
+
+/* BD71847 and BD71850 LDO 5 is by default OFF at RUN state */
+static const struct regulator_ops bd718xx_ldo5_ops_hwstate = {
+ .is_enabled = never_enabled_by_hwstate,
.list_voltage = regulator_list_voltage_pickable_linear_range,
.set_voltage_sel = bd718xx_set_voltage_sel_pickable_restricted,
.get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
-
};
-static const struct regulator_ops bd71837_pickable_range_ldo_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_pickable_linear_range,
- .set_voltage_sel = bd71837_set_voltage_sel_pickable_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
-};
-
-static const struct regulator_ops bd718xx_pickable_range_buck_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_pickable_linear_range,
- .set_voltage_sel = regulator_set_voltage_sel_pickable_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
-};
-
-static const struct regulator_ops bd71837_pickable_range_buck_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_pickable_linear_range,
- .set_voltage_sel = bd71837_set_voltage_sel_pickable_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
-};
-
-static const struct regulator_ops bd71837_ldo_regulator_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = bd71837_set_voltage_sel_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
-};
-
-static const struct regulator_ops bd718xx_ldo_regulator_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = bd718xx_set_voltage_sel_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
-};
-
-static const struct regulator_ops bd71837_ldo_regulator_nolinear_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = bd71837_set_voltage_sel_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
-};
-
-static const struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = bd718xx_set_voltage_sel_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
-};
+BD718XX_OPS(bd718xx_pickable_range_buck_ops,
+ regulator_list_voltage_pickable_linear_range, NULL,
+ regulator_set_voltage_sel_pickable_regmap,
+ regulator_get_voltage_sel_pickable_regmap,
+ regulator_set_voltage_time_sel, NULL);
-static const struct regulator_ops bd718xx_buck_regulator_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
-};
+BD718XX_OPS(bd718xx_ldo_regulator_ops, regulator_list_voltage_linear_range,
+ NULL, bd718xx_set_voltage_sel_restricted,
+ regulator_get_voltage_sel_regmap, NULL, NULL);
-static const struct regulator_ops bd71837_buck_regulator_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_linear_range,
- .set_voltage_sel = bd71837_set_voltage_sel_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
-};
+BD718XX_OPS(bd718xx_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
+ NULL, bd718xx_set_voltage_sel_restricted,
+ regulator_get_voltage_sel_regmap, NULL, NULL);
-static const struct regulator_ops bd718xx_buck_regulator_nolinear_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_ascend,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
-};
+BD718XX_OPS(bd718xx_buck_regulator_ops, regulator_list_voltage_linear_range,
+ NULL, regulator_set_voltage_sel_regmap,
+ regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+ NULL);
-static const struct regulator_ops bd71837_buck_regulator_nolinear_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_ascend,
- .set_voltage_sel = bd718xx_set_voltage_sel_restricted,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
-};
+BD718XX_OPS(bd718xx_buck_regulator_nolinear_ops, regulator_list_voltage_table,
+ regulator_map_voltage_ascend, regulator_set_voltage_sel_regmap,
+ regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+ NULL);
-static const struct regulator_ops bd718xx_dvs_buck_regulator_ops = {
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
+/*
+ * OPS for BD71837
+ */
+BD718XX_OPS(bd71837_pickable_range_ldo_ops,
+ regulator_list_voltage_pickable_linear_range, NULL,
+ bd71837_set_voltage_sel_pickable_restricted,
+ regulator_get_voltage_sel_pickable_regmap, NULL, NULL);
+
+BD718XX_OPS(bd71837_pickable_range_buck_ops,
+ regulator_list_voltage_pickable_linear_range, NULL,
+ bd71837_set_voltage_sel_pickable_restricted,
+ regulator_get_voltage_sel_pickable_regmap,
+ regulator_set_voltage_time_sel, NULL);
+
+BD718XX_OPS(bd71837_ldo_regulator_ops, regulator_list_voltage_linear_range,
+ NULL, bd71837_set_voltage_sel_restricted,
+ regulator_get_voltage_sel_regmap, NULL, NULL);
+
+BD718XX_OPS(bd71837_ldo_regulator_nolinear_ops, regulator_list_voltage_table,
+ NULL, bd71837_set_voltage_sel_restricted,
+ regulator_get_voltage_sel_regmap, NULL, NULL);
+
+BD718XX_OPS(bd71837_buck_regulator_ops, regulator_list_voltage_linear_range,
+ NULL, bd71837_set_voltage_sel_restricted,
+ regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+ NULL);
+
+BD718XX_OPS(bd71837_buck_regulator_nolinear_ops, regulator_list_voltage_table,
+ regulator_map_voltage_ascend, bd718xx_set_voltage_sel_restricted,
+ regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+ NULL);
+/*
+ * BD71837 bucks 3 and 4 support defining their enable/disable state also
+ * when buck enable state is under HW state machine control. In that case the
+ * bit [2] in CTRL register is used to indicate if regulator should be ON.
+ */
+static const struct regulator_ops bd71837_buck34_ops_hwctrl = {
+ .is_enabled = bd71837_get_buck34_enable_hwctrl,
.list_voltage = regulator_list_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -330,6 +352,14 @@ static const struct regulator_ops bd718xx_dvs_buck_regulator_ops = {
};
/*
+ * OPS for all of the ICs - BD718(37/47/50)
+ */
+BD718XX_OPS(bd718xx_dvs_buck_regulator_ops, regulator_list_voltage_linear_range,
+ NULL, regulator_set_voltage_sel_regmap,
+ regulator_get_voltage_sel_regmap, regulator_set_voltage_time_sel,
+ bd718xx_buck1234_set_ramp_delay);
+
+/*
* BD71837 BUCK1/2/3/4
* BD71847 BUCK1/2
* 0.70 to 1.30V (10mV step)
@@ -543,14 +573,37 @@ static int buck_set_hw_dvs_levels(struct device_node *np,
return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap);
}
-static const struct bd718xx_regulator_data bd71847_regulators[] = {
+static const struct regulator_ops *bd71847_swcontrol_ops[] = {
+ &bd718xx_dvs_buck_regulator_ops, &bd718xx_dvs_buck_regulator_ops,
+ &bd718xx_pickable_range_buck_ops, &bd718xx_pickable_range_buck_ops,
+ &bd718xx_buck_regulator_nolinear_ops, &bd718xx_buck_regulator_ops,
+ &bd718xx_pickable_range_ldo_ops, &bd718xx_ldo_regulator_nolinear_ops,
+ &bd718xx_ldo_regulator_ops, &bd718xx_ldo_regulator_ops,
+ &bd718xx_pickable_range_ldo_ops, &bd718xx_ldo_regulator_ops,
+};
+
+static const struct regulator_ops *bd71847_hwcontrol_ops[] = {
+ &BD718XX_HWOPNAME(bd718xx_dvs_buck_regulator_ops),
+ &BD718XX_HWOPNAME(bd718xx_dvs_buck_regulator_ops),
+ &BD718XX_HWOPNAME(bd718xx_pickable_range_buck_ops),
+ &BD718XX_HWOPNAME(bd718xx_pickable_range_buck_ops),
+ &BD718XX_HWOPNAME(bd718xx_buck_regulator_nolinear_ops),
+ &BD718XX_HWOPNAME(bd718xx_buck_regulator_ops),
+ &BD718XX_HWOPNAME(bd718xx_pickable_range_ldo_ops),
+ &BD718XX_HWOPNAME(bd718xx_ldo_regulator_nolinear_ops),
+ &BD718XX_HWOPNAME(bd718xx_ldo_regulator_ops),
+ &BD718XX_HWOPNAME(bd718xx_ldo_regulator_ops),
+ &bd718xx_ldo5_ops_hwstate,
+ &BD718XX_HWOPNAME(bd718xx_ldo_regulator_ops),
+};
+
+static struct bd718xx_regulator_data bd71847_regulators[] = {
{
.desc = {
.name = "buck1",
.of_match = of_match_ptr("BUCK1"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK1,
- .ops = &bd718xx_dvs_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
.linear_ranges = bd718xx_dvs_buck_volts,
@@ -585,7 +638,6 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.of_match = of_match_ptr("BUCK2"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK2,
- .ops = &bd718xx_dvs_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
.linear_ranges = bd718xx_dvs_buck_volts,
@@ -616,7 +668,6 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.of_match = of_match_ptr("BUCK3"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK3,
- .ops = &bd718xx_pickable_range_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD71847_BUCK3_VOLTAGE_NUM,
.linear_ranges = bd71847_buck3_volts,
@@ -643,7 +694,6 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.of_match = of_match_ptr("BUCK4"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK4,
- .ops = &bd718xx_pickable_range_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD71847_BUCK4_VOLTAGE_NUM,
.linear_ranges = bd71847_buck4_volts,
@@ -670,7 +720,6 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.of_match = of_match_ptr("BUCK5"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK5,
- .ops = &bd718xx_buck_regulator_nolinear_ops,
.type = REGULATOR_VOLTAGE,
.volt_table = &bd718xx_3rd_nodvs_buck_volts[0],
.n_voltages = ARRAY_SIZE(bd718xx_3rd_nodvs_buck_volts),
@@ -692,7 +741,6 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.of_match = of_match_ptr("BUCK6"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK6,
- .ops = &bd718xx_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_4TH_NODVS_BUCK_VOLTAGE_NUM,
.linear_ranges = bd718xx_4th_nodvs_buck_volts,
@@ -716,7 +764,6 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.of_match = of_match_ptr("LDO1"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO1,
- .ops = &bd718xx_pickable_range_ldo_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO1_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo1_volts,
@@ -742,7 +789,6 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.of_match = of_match_ptr("LDO2"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO2,
- .ops = &bd718xx_ldo_regulator_nolinear_ops,
.type = REGULATOR_VOLTAGE,
.volt_table = &ldo_2_volts[0],
.vsel_reg = BD718XX_REG_LDO2_VOLT,
@@ -764,7 +810,6 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.of_match = of_match_ptr("LDO3"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO3,
- .ops = &bd718xx_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO3_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo3_volts,
@@ -787,7 +832,6 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.of_match = of_match_ptr("LDO4"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO4,
- .ops = &bd718xx_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO4_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo4_volts,
@@ -810,7 +854,6 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.of_match = of_match_ptr("LDO5"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO5,
- .ops = &bd718xx_pickable_range_ldo_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD71847_LDO5_VOLTAGE_NUM,
.linear_ranges = bd71847_ldo5_volts,
@@ -836,7 +879,6 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
.of_match = of_match_ptr("LDO6"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO6,
- .ops = &bd718xx_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO6_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo6_volts,
@@ -857,14 +899,41 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
},
};
-static const struct bd718xx_regulator_data bd71837_regulators[] = {
+static const struct regulator_ops *bd71837_swcontrol_ops[] = {
+ &bd718xx_dvs_buck_regulator_ops, &bd718xx_dvs_buck_regulator_ops,
+ &bd718xx_dvs_buck_regulator_ops, &bd718xx_dvs_buck_regulator_ops,
+ &bd71837_pickable_range_buck_ops, &bd71837_buck_regulator_ops,
+ &bd71837_buck_regulator_nolinear_ops, &bd71837_buck_regulator_ops,
+ &bd71837_pickable_range_ldo_ops, &bd71837_ldo_regulator_nolinear_ops,
+ &bd71837_ldo_regulator_ops, &bd71837_ldo_regulator_ops,
+ &bd71837_ldo_regulator_ops, &bd71837_ldo_regulator_ops,
+ &bd71837_ldo_regulator_ops,
+};
+
+static const struct regulator_ops *bd71837_hwcontrol_ops[] = {
+ &BD718XX_HWOPNAME(bd718xx_dvs_buck_regulator_ops),
+ &BD718XX_HWOPNAME(bd718xx_dvs_buck_regulator_ops),
+ &bd71837_buck34_ops_hwctrl, &bd71837_buck34_ops_hwctrl,
+ &BD718XX_HWOPNAME(bd71837_pickable_range_buck_ops),
+ &BD718XX_HWOPNAME(bd71837_buck_regulator_ops),
+ &BD718XX_HWOPNAME(bd71837_buck_regulator_nolinear_ops),
+ &BD718XX_HWOPNAME(bd71837_buck_regulator_ops),
+ &BD718XX_HWOPNAME(bd71837_pickable_range_ldo_ops),
+ &BD718XX_HWOPNAME(bd71837_ldo_regulator_nolinear_ops),
+ &BD718XX_HWOPNAME(bd71837_ldo_regulator_ops),
+ &BD718XX_HWOPNAME(bd71837_ldo_regulator_ops),
+ &BD718XX_HWOPNAME(bd71837_ldo_regulator_ops),
+ &BD718XX_HWOPNAME(bd71837_ldo_regulator_ops),
+ &BD718XX_HWOPNAME(bd71837_ldo_regulator_ops),
+};
+
+static struct bd718xx_regulator_data bd71837_regulators[] = {
{
.desc = {
.name = "buck1",
.of_match = of_match_ptr("BUCK1"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK1,
- .ops = &bd718xx_dvs_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
.linear_ranges = bd718xx_dvs_buck_volts,
@@ -898,7 +967,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("BUCK2"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK2,
- .ops = &bd718xx_dvs_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
.linear_ranges = bd718xx_dvs_buck_volts,
@@ -929,7 +997,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("BUCK3"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK3,
- .ops = &bd718xx_dvs_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
.linear_ranges = bd718xx_dvs_buck_volts,
@@ -958,7 +1025,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("BUCK4"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK4,
- .ops = &bd718xx_dvs_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_DVS_BUCK_VOLTAGE_NUM,
.linear_ranges = bd718xx_dvs_buck_volts,
@@ -987,7 +1053,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("BUCK5"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK5,
- .ops = &bd71837_pickable_range_buck_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD71837_BUCK5_VOLTAGE_NUM,
.linear_ranges = bd71837_buck5_volts,
@@ -1014,7 +1079,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("BUCK6"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK6,
- .ops = &bd71837_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD71837_BUCK6_VOLTAGE_NUM,
.linear_ranges = bd71837_buck6_volts,
@@ -1038,7 +1102,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("BUCK7"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK7,
- .ops = &bd71837_buck_regulator_nolinear_ops,
.type = REGULATOR_VOLTAGE,
.volt_table = &bd718xx_3rd_nodvs_buck_volts[0],
.n_voltages = ARRAY_SIZE(bd718xx_3rd_nodvs_buck_volts),
@@ -1060,7 +1123,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("BUCK8"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_BUCK8,
- .ops = &bd71837_buck_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_4TH_NODVS_BUCK_VOLTAGE_NUM,
.linear_ranges = bd718xx_4th_nodvs_buck_volts,
@@ -1084,7 +1146,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO1"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO1,
- .ops = &bd71837_pickable_range_ldo_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO1_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo1_volts,
@@ -1110,7 +1171,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO2"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO2,
- .ops = &bd71837_ldo_regulator_nolinear_ops,
.type = REGULATOR_VOLTAGE,
.volt_table = &ldo_2_volts[0],
.vsel_reg = BD718XX_REG_LDO2_VOLT,
@@ -1132,7 +1192,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO3"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO3,
- .ops = &bd71837_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO3_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo3_volts,
@@ -1155,7 +1214,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO4"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO4,
- .ops = &bd71837_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO4_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo4_volts,
@@ -1178,7 +1236,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO5"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO5,
- .ops = &bd71837_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD71837_LDO5_VOLTAGE_NUM,
.linear_ranges = bd71837_ldo5_volts,
@@ -1205,7 +1262,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO6"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO6,
- .ops = &bd71837_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD718XX_LDO6_VOLTAGE_NUM,
.linear_ranges = bd718xx_ldo6_volts,
@@ -1232,7 +1288,6 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
.of_match = of_match_ptr("LDO7"),
.regulators_node = of_match_ptr("regulators"),
.id = BD718XX_LDO7,
- .ops = &bd71837_ldo_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = BD71837_LDO7_VOLTAGE_NUM,
.linear_ranges = bd71837_ldo7_volts,
@@ -1251,15 +1306,57 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
},
};
+static void mark_hw_controlled(struct device *dev, struct device_node *np,
+ struct bd718xx_regulator_data *reg_data,
+ unsigned int num_reg_data, int *info)
+{
+ int i;
+
+ for (i = 1; i <= num_reg_data; i++) {
+ if (!of_node_name_eq(np, reg_data[i-1].desc.of_match))
+ continue;
+
+ *info |= 1 << (i - 1);
+ dev_dbg(dev, "regulator %d runlevel controlled\n", i);
+ return;
+ }
+ dev_warn(dev, "Bad regulator node\n");
+}
+
+static int get_hw_controlled_regulators(struct device *dev,
+ struct bd718xx_regulator_data *reg_data,
+ unsigned int num_reg_data, int *info)
+{
+ struct device_node *np;
+ struct device_node *nproot = dev->of_node;
+ const char *prop = "rohm,no-regulator-enable-control";
+
+ *info = 0;
+
+ nproot = of_get_child_by_name(nproot, "regulators");
+ if (!nproot) {
+ dev_err(dev, "failed to find regulators node\n");
+ return -ENODEV;
+ }
+ for_each_child_of_node(nproot, np)
+ if (of_property_read_bool(np, prop))
+ mark_hw_controlled(dev, np, reg_data, num_reg_data,
+ info);
+
+ of_node_put(nproot);
+ return 0;
+}
+
static int bd718xx_probe(struct platform_device *pdev)
{
struct bd718xx *mfd;
struct regulator_config config = { 0 };
- int i, j, err;
+ int i, j, err, omit_enable;
bool use_snvs;
- const struct bd718xx_regulator_data *reg_data;
+ struct bd718xx_regulator_data *reg_data;
unsigned int num_reg_data;
enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
+ const struct regulator_ops **swops, **hwops;
mfd = dev_get_drvdata(pdev->dev.parent);
if (!mfd) {
@@ -1272,10 +1369,14 @@ static int bd718xx_probe(struct platform_device *pdev)
case ROHM_CHIP_TYPE_BD71837:
reg_data = bd71837_regulators;
num_reg_data = ARRAY_SIZE(bd71837_regulators);
+ swops = &bd71837_swcontrol_ops[0];
+ hwops = &bd71837_hwcontrol_ops[0];
break;
case ROHM_CHIP_TYPE_BD71847:
reg_data = bd71847_regulators;
num_reg_data = ARRAY_SIZE(bd71847_regulators);
+ swops = &bd71847_swcontrol_ops[0];
+ hwops = &bd71847_hwcontrol_ops[0];
break;
default:
dev_err(&pdev->dev, "Unsupported chip type\n");
@@ -1319,17 +1420,35 @@ static int bd718xx_probe(struct platform_device *pdev)
}
}
+ config.dev = pdev->dev.parent;
+ config.regmap = mfd->chip.regmap;
+ /*
+ * There are cases when we want to leave the enable-control for
+ * the HW state machine and use this driver only for voltage control.
+ * One special case is when we use PMIC_STBY_REQ line from SoC to PMIC
+ * in order to set the system to SUSPEND state.
+ *
+ * If regulator is taken under SW control the regulator state will not
+ * be affected by PMIC state machine - Eg. regulator is likely to stay
+ * on even in SUSPEND
+ */
+ get_hw_controlled_regulators(pdev->dev.parent, reg_data, num_reg_data,
+ &omit_enable);
+
for (i = 0; i < num_reg_data; i++) {
- const struct regulator_desc *desc;
+ struct regulator_desc *desc;
struct regulator_dev *rdev;
- const struct bd718xx_regulator_data *r;
+ struct bd718xx_regulator_data *r;
+ int no_enable_control = omit_enable & (1 << i);
r = &reg_data[i];
desc = &r->desc;
- config.dev = pdev->dev.parent;
- config.regmap = mfd->chip.regmap;
+ if (no_enable_control)
+ desc->ops = hwops[i];
+ else
+ desc->ops = swops[i];
rdev = devm_regulator_register(&pdev->dev, desc, &config);
if (IS_ERR(rdev)) {
@@ -1356,8 +1475,9 @@ static int bd718xx_probe(struct platform_device *pdev)
* enable SW control for crucial regulators if snvs state is
* used
*/
- if (!use_snvs || !rdev->constraints->always_on ||
- !rdev->constraints->boot_on) {
+ if (!no_enable_control && (!use_snvs ||
+ !rdev->constraints->always_on ||
+ !rdev->constraints->boot_on)) {
err = regmap_update_bits(mfd->chip.regmap, r->init.reg,
r->init.mask, r->init.val);
if (err) {
diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
new file mode 100644
index 000000000000..a8b5832a5a1b
--- /dev/null
+++ b/drivers/regulator/bd9576-regulator.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 ROHM Semiconductors
+// ROHM BD9576MUF/BD9573MUF regulator driver
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rohm-bd957x.h>
+#include <linux/mfd/rohm-generic.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+#define BD957X_VOUTS1_VOLT 3300000
+#define BD957X_VOUTS4_BASE_VOLT 1030000
+#define BD957X_VOUTS34_NUM_VOLT 32
+
+static int vout1_volt_table[] = {5000000, 4900000, 4800000, 4700000, 4600000,
+ 4500000, 4500000, 4500000, 5000000, 5100000,
+ 5200000, 5300000, 5400000, 5500000, 5500000,
+ 5500000};
+
+static int vout2_volt_table[] = {1800000, 1780000, 1760000, 1740000, 1720000,
+ 1700000, 1680000, 1660000, 1800000, 1820000,
+ 1840000, 1860000, 1880000, 1900000, 1920000,
+ 1940000};
+
+static int voutl1_volt_table[] = {2500000, 2540000, 2580000, 2620000, 2660000,
+ 2700000, 2740000, 2780000, 2500000, 2460000,
+ 2420000, 2380000, 2340000, 2300000, 2260000,
+ 2220000};
+
+struct bd957x_regulator_data {
+ struct regulator_desc desc;
+ int base_voltage;
+};
+
+static int bd957x_vout34_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ const struct regulator_desc *desc = rdev->desc;
+ int multiplier = selector & desc->vsel_mask & 0x7f;
+ int tune;
+
+ /* VOUT3 and 4 has 10mV step */
+ tune = multiplier * 10000;
+
+ if (!(selector & 0x80))
+ return desc->fixed_uV - tune;
+
+ return desc->fixed_uV + tune;
+}
+
+static int bd957x_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ const struct regulator_desc *desc = rdev->desc;
+ int index = selector & desc->vsel_mask & 0x7f;
+
+ if (!(selector & 0x80))
+ index += desc->n_voltages/2;
+
+ if (index >= desc->n_voltages)
+ return -EINVAL;
+
+ return desc->volt_table[index];
+}
+
+static const struct regulator_ops bd957x_vout34_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = bd957x_vout34_list_voltage,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops bd957X_vouts1_regulator_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_ops bd957x_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = bd957x_list_voltage,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static struct bd957x_regulator_data bd9576_regulators[] = {
+ {
+ .desc = {
+ .name = "VD50",
+ .of_match = of_match_ptr("regulator-vd50"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VD50,
+ .type = REGULATOR_VOLTAGE,
+ .ops = &bd957x_ops,
+ .volt_table = &vout1_volt_table[0],
+ .n_voltages = ARRAY_SIZE(vout1_volt_table),
+ .vsel_reg = BD957X_REG_VOUT1_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT1_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER1,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "VD18",
+ .of_match = of_match_ptr("regulator-vd18"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VD18,
+ .type = REGULATOR_VOLTAGE,
+ .ops = &bd957x_ops,
+ .volt_table = &vout2_volt_table[0],
+ .n_voltages = ARRAY_SIZE(vout2_volt_table),
+ .vsel_reg = BD957X_REG_VOUT2_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT2_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER2,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "VDDDR",
+ .of_match = of_match_ptr("regulator-vdddr"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VDDDR,
+ .ops = &bd957x_vout34_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = BD957X_VOUTS34_NUM_VOLT,
+ .vsel_reg = BD957X_REG_VOUT3_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT3_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER3,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "VD10",
+ .of_match = of_match_ptr("regulator-vd10"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VD10,
+ .ops = &bd957x_vout34_ops,
+ .type = REGULATOR_VOLTAGE,
+ .fixed_uV = BD957X_VOUTS4_BASE_VOLT,
+ .n_voltages = BD957X_VOUTS34_NUM_VOLT,
+ .vsel_reg = BD957X_REG_VOUT4_TUNE,
+ .vsel_mask = BD957X_MASK_VOUT4_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGER4,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "VOUTL1",
+ .of_match = of_match_ptr("regulator-voutl1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VOUTL1,
+ .ops = &bd957x_ops,
+ .type = REGULATOR_VOLTAGE,
+ .volt_table = &voutl1_volt_table[0],
+ .n_voltages = ARRAY_SIZE(voutl1_volt_table),
+ .vsel_reg = BD957X_REG_VOUTL1_TUNE,
+ .vsel_mask = BD957X_MASK_VOUTL1_TUNE,
+ .enable_reg = BD957X_REG_POW_TRIGGERL1,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "VOUTS1",
+ .of_match = of_match_ptr("regulator-vouts1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = BD957X_VOUTS1,
+ .ops = &bd957X_vouts1_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 1,
+ .fixed_uV = BD957X_VOUTS1_VOLT,
+ .enable_reg = BD957X_REG_POW_TRIGGERS1,
+ .enable_mask = BD957X_REGULATOR_EN_MASK,
+ .enable_val = BD957X_REGULATOR_DIS_VAL,
+ .enable_is_inverted = true,
+ .owner = THIS_MODULE,
+ },
+ },
+};
+
+static int bd957x_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct regulator_config config = { 0 };
+ int i, err;
+ bool vout_mode, ddr_sel;
+ const struct bd957x_regulator_data *reg_data = &bd9576_regulators[0];
+ unsigned int num_reg_data = ARRAY_SIZE(bd9576_regulators);
+ enum rohm_chip_type chip = platform_get_device_id(pdev)->driver_data;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "No regmap\n");
+ return -EINVAL;
+ }
+ vout_mode = of_property_read_bool(pdev->dev.parent->of_node,
+ "rohm,vout1-en-low");
+ if (vout_mode) {
+ struct gpio_desc *en;
+
+ dev_dbg(&pdev->dev, "GPIO controlled mode\n");
+
+ /* VOUT1 enable state judged by VOUT1_EN pin */
+ /* See if we have GPIO defined */
+ en = devm_gpiod_get_from_of_node(&pdev->dev,
+ pdev->dev.parent->of_node,
+ "rohm,vout1-en-gpios", 0,
+ GPIOD_OUT_LOW, "vout1-en");
+ if (!IS_ERR(en)) {
+ /* VOUT1_OPS gpio ctrl */
+ /*
+ * Regulator core prioritizes the ena_gpio over
+ * enable/disable/is_enabled callbacks so no need to
+ * clear them. We can still use same ops
+ */
+ config.ena_gpiod = en;
+ } else {
+ /*
+ * In theory it is possible someone wants to set
+ * vout1-en LOW during OTP loading and set VOUT1 to be
+ * controlled by GPIO - but control the GPIO from some
+ * where else than this driver. For that to work we
+ * should unset the is_enabled callback here.
+ *
+ * I believe such case where rohm,vout1-en-low is set
+ * and vout1-en-gpios is not is likely to be a
+ * misconfiguration. So let's just err out for now.
+ */
+ dev_err(&pdev->dev,
+ "Failed to get VOUT1 control GPIO\n");
+ return PTR_ERR(en);
+ }
+ }
+
+ /*
+ * If more than one PMIC needs to be controlled by same processor then
+ * allocate the regulator data array here and use bd9576_regulators as
+ * template. At the moment I see no such use-case so I spare some
+ * bytes and use bd9576_regulators directly for non-constant configs
+ * like DDR voltage selection.
+ */
+ ddr_sel = of_property_read_bool(pdev->dev.parent->of_node,
+ "rohm,ddr-sel-low");
+ if (ddr_sel)
+ bd9576_regulators[2].desc.fixed_uV = 1350000;
+ else
+ bd9576_regulators[2].desc.fixed_uV = 1500000;
+
+ switch (chip) {
+ case ROHM_CHIP_TYPE_BD9576:
+ dev_dbg(&pdev->dev, "Found BD9576MUF\n");
+ break;
+ case ROHM_CHIP_TYPE_BD9573:
+ dev_dbg(&pdev->dev, "Found BD9573MUF\n");
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported chip type\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ config.dev = pdev->dev.parent;
+ config.regmap = regmap;
+
+ for (i = 0; i < num_reg_data; i++) {
+
+ const struct regulator_desc *desc;
+ struct regulator_dev *rdev;
+ const struct bd957x_regulator_data *r;
+
+ r = &reg_data[i];
+ desc = &r->desc;
+
+ rdev = devm_regulator_register(&pdev->dev, desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "failed to register %s regulator\n",
+ desc->name);
+ err = PTR_ERR(rdev);
+ goto err;
+ }
+ /*
+ * Clear the VOUT1 GPIO setting - rest of the regulators do not
+ * support GPIO control
+ */
+ config.ena_gpiod = NULL;
+ }
+
+err:
+ return err;
+}
+
+static const struct platform_device_id bd957x_pmic_id[] = {
+ { "bd9573-pmic", ROHM_CHIP_TYPE_BD9573 },
+ { "bd9576-pmic", ROHM_CHIP_TYPE_BD9576 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, bd957x_pmic_id);
+
+static struct platform_driver bd957x_regulator = {
+ .driver = {
+ .name = "bd957x-pmic",
+ },
+ .probe = bd957x_probe,
+ .id_table = bd957x_pmic_id,
+};
+
+module_platform_driver(bd957x_regulator);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("ROHM BD9576/BD9573 voltage regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd957x-pmic");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7ff507ec875a..a5ad553da8cd 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -190,11 +190,10 @@ static inline int regulator_lock_nested(struct regulator_dev *rdev,
* than the one, which initially locked the mutex, it will
* wait on mutex.
*/
-void regulator_lock(struct regulator_dev *rdev)
+static void regulator_lock(struct regulator_dev *rdev)
{
regulator_lock_nested(rdev, NULL);
}
-EXPORT_SYMBOL_GPL(regulator_lock);
/**
* regulator_unlock - unlock a single regulator
@@ -203,7 +202,7 @@ EXPORT_SYMBOL_GPL(regulator_lock);
* This function unlocks the mutex when the
* reference counter reaches 0.
*/
-void regulator_unlock(struct regulator_dev *rdev)
+static void regulator_unlock(struct regulator_dev *rdev)
{
mutex_lock(&regulator_nesting_mutex);
@@ -216,7 +215,6 @@ void regulator_unlock(struct regulator_dev *rdev)
mutex_unlock(&regulator_nesting_mutex);
}
-EXPORT_SYMBOL_GPL(regulator_unlock);
static bool regulator_supply_is_couple(struct regulator_dev *rdev)
{
@@ -409,11 +407,11 @@ err_node_put:
static struct device_node *of_get_regulator(struct device *dev, const char *supply)
{
struct device_node *regnode = NULL;
- char prop_name[32]; /* 32 is max size of property name */
+ char prop_name[64]; /* 64 is max size of property name */
dev_dbg(dev, "Looking up %s-supply from device tree\n", supply);
- snprintf(prop_name, 32, "%s-supply", supply);
+ snprintf(prop_name, 64, "%s-supply", supply);
regnode = of_parse_phandle(dev->of_node, prop_name, 0);
if (!regnode) {
@@ -568,6 +566,30 @@ regulator_get_suspend_state(struct regulator_dev *rdev, suspend_state_t state)
}
}
+static const struct regulator_state *
+regulator_get_suspend_state_check(struct regulator_dev *rdev, suspend_state_t state)
+{
+ const struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return NULL;
+
+ /* If we have no suspend mode configuration don't set anything;
+ * only warn if the driver implements set_suspend_voltage or
+ * set_suspend_mode callback.
+ */
+ if (rstate->enabled != ENABLE_IN_SUSPEND &&
+ rstate->enabled != DISABLE_IN_SUSPEND) {
+ if (rdev->desc->ops->set_suspend_voltage ||
+ rdev->desc->ops->set_suspend_mode)
+ rdev_warn(rdev, "No configuration\n");
+ return NULL;
+ }
+
+ return rstate;
+}
+
static ssize_t regulator_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -945,7 +967,8 @@ static int drms_uA_update(struct regulator_dev *rdev)
/* set the optimum mode for our new total regulator load */
err = rdev->desc->ops->set_load(rdev, current_uA);
if (err < 0)
- rdev_err(rdev, "failed to set load %d\n", current_uA);
+ rdev_err(rdev, "failed to set load %d: %pe\n",
+ current_uA, ERR_PTR(err));
} else {
/* get output voltage */
output_uV = regulator_get_voltage_rdev(rdev);
@@ -972,40 +995,24 @@ static int drms_uA_update(struct regulator_dev *rdev)
/* check the new mode is allowed */
err = regulator_mode_constrain(rdev, &mode);
if (err < 0) {
- rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
- current_uA, input_uV, output_uV);
+ rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV: %pe\n",
+ current_uA, input_uV, output_uV, ERR_PTR(err));
return err;
}
err = rdev->desc->ops->set_mode(rdev, mode);
if (err < 0)
- rdev_err(rdev, "failed to set optimum mode %x\n", mode);
+ rdev_err(rdev, "failed to set optimum mode %x: %pe\n",
+ mode, ERR_PTR(err));
}
return err;
}
-static int suspend_set_state(struct regulator_dev *rdev,
- suspend_state_t state)
+static int __suspend_set_state(struct regulator_dev *rdev,
+ const struct regulator_state *rstate)
{
int ret = 0;
- struct regulator_state *rstate;
-
- rstate = regulator_get_suspend_state(rdev, state);
- if (rstate == NULL)
- return 0;
-
- /* If we have no suspend mode configuration don't set anything;
- * only warn if the driver implements set_suspend_voltage or
- * set_suspend_mode callback.
- */
- if (rstate->enabled != ENABLE_IN_SUSPEND &&
- rstate->enabled != DISABLE_IN_SUSPEND) {
- if (rdev->desc->ops->set_suspend_voltage ||
- rdev->desc->ops->set_suspend_mode)
- rdev_warn(rdev, "No configuration\n");
- return 0;
- }
if (rstate->enabled == ENABLE_IN_SUSPEND &&
rdev->desc->ops->set_suspend_enable)
@@ -1017,14 +1024,14 @@ static int suspend_set_state(struct regulator_dev *rdev,
ret = 0;
if (ret < 0) {
- rdev_err(rdev, "failed to enabled/disable\n");
+ rdev_err(rdev, "failed to enabled/disable: %pe\n", ERR_PTR(ret));
return ret;
}
if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) {
ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV);
if (ret < 0) {
- rdev_err(rdev, "failed to set voltage\n");
+ rdev_err(rdev, "failed to set voltage: %pe\n", ERR_PTR(ret));
return ret;
}
}
@@ -1032,7 +1039,7 @@ static int suspend_set_state(struct regulator_dev *rdev,
if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) {
ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode);
if (ret < 0) {
- rdev_err(rdev, "failed to set mode\n");
+ rdev_err(rdev, "failed to set mode: %pe\n", ERR_PTR(ret));
return ret;
}
}
@@ -1040,7 +1047,20 @@ static int suspend_set_state(struct regulator_dev *rdev,
return ret;
}
-static void print_constraints(struct regulator_dev *rdev)
+static int suspend_set_initial_state(struct regulator_dev *rdev)
+{
+ const struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state_check(rdev,
+ rdev->constraints->initial_state);
+ if (!rstate)
+ return 0;
+
+ return __suspend_set_state(rdev, rstate);
+}
+
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+static void print_constraints_debug(struct regulator_dev *rdev)
{
struct regulation_constraints *constraints = rdev->constraints;
char buf[160] = "";
@@ -1097,12 +1117,27 @@ static void print_constraints(struct regulator_dev *rdev)
if (constraints->valid_modes_mask & REGULATOR_MODE_IDLE)
count += scnprintf(buf + count, len - count, "idle ");
if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
- count += scnprintf(buf + count, len - count, "standby");
+ count += scnprintf(buf + count, len - count, "standby ");
if (!count)
- scnprintf(buf, len, "no parameters");
+ count = scnprintf(buf, len, "no parameters");
+ else
+ --count;
+
+ count += scnprintf(buf + count, len - count, ", %s",
+ _regulator_is_enabled(rdev) ? "enabled" : "disabled");
rdev_dbg(rdev, "%s\n", buf);
+}
+#else /* !DEBUG && !CONFIG_DYNAMIC_DEBUG */
+static inline void print_constraints_debug(struct regulator_dev *rdev) {}
+#endif /* !DEBUG && !CONFIG_DYNAMIC_DEBUG */
+
+static void print_constraints(struct regulator_dev *rdev)
+{
+ struct regulation_constraints *constraints = rdev->constraints;
+
+ print_constraints_debug(rdev);
if ((constraints->min_uV != constraints->max_uV) &&
!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE))
@@ -1135,8 +1170,8 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
if (current_uV < 0) {
rdev_err(rdev,
- "failed to get the current voltage(%d)\n",
- current_uV);
+ "failed to get the current voltage: %pe\n",
+ ERR_PTR(current_uV));
return current_uV;
}
@@ -1165,8 +1200,8 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
rdev, target_min, target_max);
if (ret < 0) {
rdev_err(rdev,
- "failed to apply %d-%duV constraint(%d)\n",
- target_min, target_max, ret);
+ "failed to apply %d-%duV constraint: %pe\n",
+ target_min, target_max, ERR_PTR(ret));
return ret;
}
}
@@ -1315,16 +1350,16 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = ops->set_input_current_limit(rdev,
rdev->constraints->ilim_uA);
if (ret < 0) {
- rdev_err(rdev, "failed to set input limit\n");
+ rdev_err(rdev, "failed to set input limit: %pe\n", ERR_PTR(ret));
return ret;
}
}
/* do we need to setup our suspend state */
if (rdev->constraints->initial_state) {
- ret = suspend_set_state(rdev, rdev->constraints->initial_state);
+ ret = suspend_set_initial_state(rdev);
if (ret < 0) {
- rdev_err(rdev, "failed to set suspend state\n");
+ rdev_err(rdev, "failed to set suspend state: %pe\n", ERR_PTR(ret));
return ret;
}
}
@@ -1337,7 +1372,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = ops->set_mode(rdev, rdev->constraints->initial_mode);
if (ret < 0) {
- rdev_err(rdev, "failed to set initial mode: %d\n", ret);
+ rdev_err(rdev, "failed to set initial mode: %pe\n", ERR_PTR(ret));
return ret;
}
} else if (rdev->constraints->system_load) {
@@ -1352,7 +1387,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
&& ops->set_ramp_delay) {
ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay);
if (ret < 0) {
- rdev_err(rdev, "failed to set ramp_delay\n");
+ rdev_err(rdev, "failed to set ramp_delay: %pe\n", ERR_PTR(ret));
return ret;
}
}
@@ -1360,7 +1395,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
if (rdev->constraints->pull_down && ops->set_pull_down) {
ret = ops->set_pull_down(rdev);
if (ret < 0) {
- rdev_err(rdev, "failed to set pull down\n");
+ rdev_err(rdev, "failed to set pull down: %pe\n", ERR_PTR(ret));
return ret;
}
}
@@ -1368,7 +1403,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
if (rdev->constraints->soft_start && ops->set_soft_start) {
ret = ops->set_soft_start(rdev);
if (ret < 0) {
- rdev_err(rdev, "failed to set soft start\n");
+ rdev_err(rdev, "failed to set soft start: %pe\n", ERR_PTR(ret));
return ret;
}
}
@@ -1377,7 +1412,8 @@ static int set_machine_constraints(struct regulator_dev *rdev,
&& ops->set_over_current_protection) {
ret = ops->set_over_current_protection(rdev);
if (ret < 0) {
- rdev_err(rdev, "failed to set over current protection\n");
+ rdev_err(rdev, "failed to set over current protection: %pe\n",
+ ERR_PTR(ret));
return ret;
}
}
@@ -1388,7 +1424,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = ops->set_active_discharge(rdev, ad_state);
if (ret < 0) {
- rdev_err(rdev, "failed to set active discharge\n");
+ rdev_err(rdev, "failed to set active discharge: %pe\n", ERR_PTR(ret));
return ret;
}
}
@@ -1408,7 +1444,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = _regulator_do_enable(rdev);
if (ret < 0 && ret != -EINVAL) {
- rdev_err(rdev, "failed to enable\n");
+ rdev_err(rdev, "failed to enable: %pe\n", ERR_PTR(ret));
return ret;
}
@@ -1632,8 +1668,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
supply_name);
if (err) {
- rdev_dbg(rdev, "could not add device link %s err %d\n",
- dev->kobj.name, err);
+ rdev_dbg(rdev, "could not add device link %s: %pe\n",
+ dev->kobj.name, ERR_PTR(err));
/* non-fatal */
}
}
@@ -2421,7 +2457,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
if (ret >= 0) {
delay = ret;
} else {
- rdev_warn(rdev, "enable_time() failed: %d\n", ret);
+ rdev_warn(rdev, "enable_time() failed: %pe\n", ERR_PTR(ret));
delay = 0;
}
@@ -2610,7 +2646,7 @@ static int _regulator_enable(struct regulator *regulator)
_notifier_call_chain(rdev, REGULATOR_EVENT_ENABLE,
NULL);
} else if (ret < 0) {
- rdev_err(rdev, "is_enabled() failed: %d\n", ret);
+ rdev_err(rdev, "is_enabled() failed: %pe\n", ERR_PTR(ret));
goto err_consumer_disable;
}
/* Fallthrough on positive return values - already enabled */
@@ -2712,7 +2748,7 @@ static int _regulator_disable(struct regulator *regulator)
ret = _regulator_do_disable(rdev);
if (ret < 0) {
- rdev_err(rdev, "failed to disable\n");
+ rdev_err(rdev, "failed to disable: %pe\n", ERR_PTR(ret));
_notifier_call_chain(rdev,
REGULATOR_EVENT_ABORT_DISABLE,
NULL);
@@ -2779,7 +2815,7 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
ret = _regulator_do_disable(rdev);
if (ret < 0) {
- rdev_err(rdev, "failed to force disable\n");
+ rdev_err(rdev, "failed to force disable: %pe\n", ERR_PTR(ret));
_notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
REGULATOR_EVENT_ABORT_DISABLE, NULL);
return ret;
@@ -2858,7 +2894,8 @@ static void regulator_disable_work(struct work_struct *work)
for (i = 0; i < count; i++) {
ret = _regulator_disable(regulator);
if (ret != 0)
- rdev_err(rdev, "Deferred disable failed: %d\n", ret);
+ rdev_err(rdev, "Deferred disable failed: %pe\n",
+ ERR_PTR(ret));
}
}
WARN_ON(!total_count);
@@ -3051,7 +3088,7 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator,
*vsel_reg = rdev->desc->vsel_reg;
*vsel_mask = rdev->desc->vsel_mask;
- return 0;
+ return 0;
}
EXPORT_SYMBOL_GPL(regulator_get_hardware_vsel_register);
@@ -3383,7 +3420,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
}
if (delay < 0) {
- rdev_warn(rdev, "failed to get delay: %d\n", delay);
+ rdev_warn(rdev, "failed to get delay: %pe\n", ERR_PTR(delay));
delay = 0;
}
@@ -3535,8 +3572,8 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
ret = regulator_set_voltage_unlocked(rdev->supply,
best_supply_uV, INT_MAX, state);
if (ret) {
- dev_err(&rdev->dev, "Failed to increase supply voltage: %d\n",
- ret);
+ dev_err(&rdev->dev, "Failed to increase supply voltage: %pe\n",
+ ERR_PTR(ret));
goto out;
}
}
@@ -3553,8 +3590,8 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
ret = regulator_set_voltage_unlocked(rdev->supply,
best_supply_uV, INT_MAX, state);
if (ret)
- dev_warn(&rdev->dev, "Failed to decrease supply voltage: %d\n",
- ret);
+ dev_warn(&rdev->dev, "Failed to decrease supply voltage: %pe\n",
+ ERR_PTR(ret));
/* No need to fail here */
ret = 0;
}
@@ -4128,6 +4165,8 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev)
ret = rdev->desc->fixed_uV;
} else if (rdev->supply) {
ret = regulator_get_voltage_rdev(rdev->supply->rdev);
+ } else if (rdev->supply_name) {
+ return -EPROBE_DEFER;
} else {
return -EINVAL;
}
@@ -4540,8 +4579,8 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
err:
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get supply '%s': %d\n",
- consumers[i].supply, ret);
+ dev_err(dev, "Failed to get supply '%s': %pe\n",
+ consumers[i].supply, ERR_PTR(ret));
else
dev_dbg(dev, "Failed to get supply '%s', deferring\n",
consumers[i].supply);
@@ -4599,8 +4638,8 @@ int regulator_bulk_enable(int num_consumers,
err:
for (i = 0; i < num_consumers; i++) {
if (consumers[i].ret < 0)
- pr_err("Failed to enable %s: %d\n", consumers[i].supply,
- consumers[i].ret);
+ pr_err("Failed to enable %s: %pe\n", consumers[i].supply,
+ ERR_PTR(consumers[i].ret));
else
regulator_disable(consumers[i].consumer);
}
@@ -4636,12 +4675,12 @@ int regulator_bulk_disable(int num_consumers,
return 0;
err:
- pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
+ pr_err("Failed to disable %s: %pe\n", consumers[i].supply, ERR_PTR(ret));
for (++i; i < num_consumers; ++i) {
r = regulator_enable(consumers[i].consumer);
if (r != 0)
- pr_err("Failed to re-enable %s: %d\n",
- consumers[i].supply, r);
+ pr_err("Failed to re-enable %s: %pe\n",
+ consumers[i].supply, ERR_PTR(r));
}
return ret;
@@ -4709,14 +4748,11 @@ EXPORT_SYMBOL_GPL(regulator_bulk_free);
* @data: callback-specific data.
*
* Called by regulator drivers to notify clients a regulator event has
- * occurred. We also notify regulator clients downstream.
- * Note lock must be held by caller.
+ * occurred.
*/
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
- lockdep_assert_held_once(&rdev->mutex.base);
-
_notifier_call_chain(rdev, event, data);
return NOTIFY_DONE;
@@ -5023,8 +5059,8 @@ static void regulator_remove_coupling(struct regulator_dev *rdev)
if (coupler && coupler->detach_regulator) {
err = coupler->detach_regulator(coupler, rdev);
if (err)
- rdev_err(rdev, "failed to detach from coupler: %d\n",
- err);
+ rdev_err(rdev, "failed to detach from coupler: %pe\n",
+ ERR_PTR(err));
}
kfree(rdev->coupling_desc.coupled_rdevs);
@@ -5033,20 +5069,20 @@ static void regulator_remove_coupling(struct regulator_dev *rdev)
static int regulator_init_coupling(struct regulator_dev *rdev)
{
+ struct regulator_dev **coupled;
int err, n_phandles;
- size_t alloc_size;
if (!IS_ENABLED(CONFIG_OF))
n_phandles = 0;
else
n_phandles = of_get_n_coupled(rdev);
- alloc_size = sizeof(*rdev) * (n_phandles + 1);
-
- rdev->coupling_desc.coupled_rdevs = kzalloc(alloc_size, GFP_KERNEL);
- if (!rdev->coupling_desc.coupled_rdevs)
+ coupled = kcalloc(n_phandles + 1, sizeof(*coupled), GFP_KERNEL);
+ if (!coupled)
return -ENOMEM;
+ rdev->coupling_desc.coupled_rdevs = coupled;
+
/*
* Every regulator should always have coupling descriptor filled with
* at least pointer to itself.
@@ -5068,7 +5104,7 @@ static int regulator_init_coupling(struct regulator_dev *rdev)
if (IS_ERR(rdev->coupling_desc.coupler)) {
err = PTR_ERR(rdev->coupling_desc.coupler);
- rdev_err(rdev, "failed to get coupler: %d\n", err);
+ rdev_err(rdev, "failed to get coupler: %pe\n", ERR_PTR(err));
return err;
}
@@ -5231,8 +5267,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
if (config->ena_gpiod) {
ret = regulator_ena_gpio_request(rdev, config);
if (ret != 0) {
- rdev_err(rdev, "Failed to request enable GPIO: %d\n",
- ret);
+ rdev_err(rdev, "Failed to request enable GPIO: %pe\n",
+ ERR_PTR(ret));
goto clean;
}
/* The regulator core took over the GPIO descriptor */
@@ -5256,15 +5292,20 @@ regulator_register(const struct regulator_desc *regulator_desc,
else if (regulator_desc->supply_name)
rdev->supply_name = regulator_desc->supply_name;
- /*
- * Attempt to resolve the regulator supply, if specified,
- * but don't return an error if we fail because we will try
- * to resolve it again later as more regulators are added.
- */
- if (regulator_resolve_supply(rdev))
- rdev_dbg(rdev, "unable to resolve supply\n");
-
ret = set_machine_constraints(rdev, constraints);
+ if (ret == -EPROBE_DEFER) {
+ /* Regulator might be in bypass mode and so needs its supply
+ * to set the constraints */
+ /* FIXME: this currently triggers a chicken-and-egg problem
+ * when creating -SUPPLY symlink in sysfs to a regulator
+ * that is just being created */
+ ret = regulator_resolve_supply(rdev);
+ if (!ret)
+ ret = set_machine_constraints(rdev, constraints);
+ else
+ rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
+ ERR_PTR(ret));
+ }
if (ret < 0)
goto wash;
@@ -5375,9 +5416,14 @@ static int regulator_suspend(struct device *dev)
struct regulator_dev *rdev = dev_to_rdev(dev);
suspend_state_t state = pm_suspend_target_state;
int ret;
+ const struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state_check(rdev, state);
+ if (!rstate)
+ return 0;
regulator_lock(rdev);
- ret = suspend_set_state(rdev, state);
+ ret = __suspend_set_state(rdev, rstate);
regulator_unlock(rdev);
return ret;
@@ -5394,11 +5440,14 @@ static int regulator_resume(struct device *dev)
if (rstate == NULL)
return 0;
+ /* Avoid grabbing the lock if we don't need to */
+ if (!rdev->desc->ops->resume)
+ return 0;
+
regulator_lock(rdev);
- if (rdev->desc->ops->resume &&
- (rstate->enabled == ENABLE_IN_SUSPEND ||
- rstate->enabled == DISABLE_IN_SUSPEND))
+ if (rstate->enabled == ENABLE_IN_SUSPEND ||
+ rstate->enabled == DISABLE_IN_SUSPEND)
ret = rdev->desc->ops->resume(rdev);
regulator_unlock(rdev);
@@ -5809,7 +5858,7 @@ static int regulator_late_cleanup(struct device *dev, void *data)
rdev_info(rdev, "disabling\n");
ret = _regulator_do_disable(rdev);
if (ret != 0)
- rdev_err(rdev, "couldn't disable: %d\n", ret);
+ rdev_err(rdev, "couldn't disable: %pe\n", ERR_PTR(ret));
} else {
/* The intention is that in future we will
* assume that full constraints are provided
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index c025ccb1a30a..73ff5fc7d8d7 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -485,10 +485,8 @@ static irqreturn_t da9055_ldo5_6_oc_irq(int irq, void *data)
{
struct da9055_regulator *regulator = data;
- regulator_lock(regulator->rdev);
regulator_notifier_call_chain(regulator->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
- regulator_unlock(regulator->rdev);
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index d8112f56e94e..1a6324001027 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -907,10 +907,8 @@ static irqreturn_t da9062_ldo_lim_event(int irq, void *data)
continue;
if (BIT(regl->info->oc_event.lsb) & bits) {
- regulator_lock(regl->rdev);
regulator_notifier_call_chain(regl->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
- regulator_unlock(regl->rdev);
handled = IRQ_HANDLED;
}
}
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index fe65b5acaf28..cf7d5341750e 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -574,10 +574,8 @@ static irqreturn_t da9063_ldo_lim_event(int irq, void *data)
continue;
if (BIT(regl->info->oc_event.lsb) & bits) {
- regulator_lock(regl->rdev);
regulator_notifier_call_chain(regl->rdev,
REGULATOR_EVENT_OVER_CURRENT, NULL);
- regulator_unlock(regl->rdev);
}
}
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 0cdeb6186529..7493af0b5c04 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -77,8 +77,6 @@ static irqreturn_t da9210_irq_handler(int irq, void *data)
if (error < 0)
goto error_i2c;
- regulator_lock(chip->rdev);
-
if (val & DA9210_E_OVCURR) {
regulator_notifier_call_chain(chip->rdev,
REGULATOR_EVENT_OVER_CURRENT,
@@ -103,8 +101,6 @@ static irqreturn_t da9210_irq_handler(int irq, void *data)
handled |= DA9210_E_VMAX;
}
- regulator_unlock(chip->rdev);
-
if (handled) {
/* Clear handled events */
error = regmap_write(chip->regmap, DA9210_REG_EVENT_B, handled);
@@ -125,7 +121,7 @@ error_i2c:
* I2C driver interface functions
*/
-static const struct of_device_id da9210_dt_ids[] = {
+static const struct of_device_id __maybe_unused da9210_dt_ids[] = {
{ .compatible = "dlg,da9210", },
{ }
};
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 297b3aa7c753..e01b32d1fa17 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -51,10 +51,24 @@ static const struct regmap_range_cfg da9211_regmap_range[] = {
},
};
+static bool da9211_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA9211_REG_STATUS_A:
+ case DA9211_REG_STATUS_B:
+ case DA9211_REG_EVENT_A:
+ case DA9211_REG_EVENT_B:
+ return true;
+ }
+ return false;
+}
+
static const struct regmap_config da9211_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 5 * 128,
+ .volatile_reg = da9211_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
.ranges = da9211_regmap_range,
.num_ranges = ARRAY_SIZE(da9211_regmap_range),
};
@@ -332,10 +346,8 @@ static irqreturn_t da9211_irq_handler(int irq, void *data)
goto error_i2c;
if (reg_val & DA9211_E_OV_CURR_A) {
- regulator_lock(chip->rdev[0]);
regulator_notifier_call_chain(chip->rdev[0],
REGULATOR_EVENT_OVER_CURRENT, NULL);
- regulator_unlock(chip->rdev[0]);
err = regmap_write(chip->regmap, DA9211_REG_EVENT_B,
DA9211_E_OV_CURR_A);
@@ -346,10 +358,8 @@ static irqreturn_t da9211_irq_handler(int irq, void *data)
}
if (reg_val & DA9211_E_OV_CURR_B) {
- regulator_lock(chip->rdev[1]);
regulator_notifier_call_chain(chip->rdev[1],
REGULATOR_EVENT_OVER_CURRENT, NULL);
- regulator_unlock(chip->rdev[1]);
err = regmap_write(chip->regmap, DA9211_REG_EVENT_B,
DA9211_E_OV_CURR_B);
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
index c3ad6aa6b5d3..8b70bfe88019 100644
--- a/drivers/regulator/dbx500-prcmu.c
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -67,8 +67,6 @@ static int power_state_active_get(void)
static struct ux500_regulator_debug {
struct dentry *dir;
- struct dentry *status_file;
- struct dentry *power_state_cnt_file;
struct dbx500_regulator_info *regulator_array;
int num_regulators;
u8 *state_before_suspend;
@@ -117,22 +115,14 @@ ux500_regulator_debug_init(struct platform_device *pdev,
{
/* create directory */
rdebug.dir = debugfs_create_dir("ux500-regulator", NULL);
- if (!rdebug.dir)
- goto exit_no_debugfs;
/* create "status" file */
- rdebug.status_file = debugfs_create_file("status",
- S_IRUGO, rdebug.dir, &pdev->dev,
- &ux500_regulator_status_fops);
- if (!rdebug.status_file)
- goto exit_destroy_dir;
+ debugfs_create_file("status", S_IRUGO, rdebug.dir, &pdev->dev,
+ &ux500_regulator_status_fops);
/* create "power-state-count" file */
- rdebug.power_state_cnt_file = debugfs_create_file("power-state-count",
- S_IRUGO, rdebug.dir, &pdev->dev,
- &ux500_regulator_power_state_cnt_fops);
- if (!rdebug.power_state_cnt_file)
- goto exit_destroy_status;
+ debugfs_create_file("power-state-count", S_IRUGO, rdebug.dir,
+ &pdev->dev, &ux500_regulator_power_state_cnt_fops);
rdebug.regulator_array = regulator_info;
rdebug.num_regulators = num_regulators;
@@ -150,13 +140,7 @@ ux500_regulator_debug_init(struct platform_device *pdev,
exit_free:
kfree(rdebug.state_before_suspend);
exit_destroy_power_state:
- debugfs_remove(rdebug.power_state_cnt_file);
-exit_destroy_status:
- debugfs_remove(rdebug.status_file);
-exit_destroy_dir:
- debugfs_remove(rdebug.dir);
-exit_no_debugfs:
- dev_err(&pdev->dev, "failed to create debugfs entries.\n");
+ debugfs_remove_recursive(rdebug.dir);
return -ENOMEM;
}
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index 74de6983c61a..d8059f596391 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -21,13 +21,13 @@
struct regulator_dev *dummy_regulator_rdev;
-static struct regulator_init_data dummy_initdata = {
+static const struct regulator_init_data dummy_initdata = {
.constraints = {
.always_on = 1,
},
};
-static struct regulator_ops dummy_ops;
+static const struct regulator_ops dummy_ops;
static const struct regulator_desc dummy_desc = {
.name = "regulator-dummy",
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 00c83492f774..aa426183b6a1 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -436,7 +436,7 @@ static struct fan53555_platform_data *fan53555_parse_dt(struct device *dev,
return pdata;
}
-static const struct of_device_id fan53555_dt_ids[] = {
+static const struct of_device_id __maybe_unused fan53555_dt_ids[] = {
{
.compatible = "fcs,fan53526",
.data = (void *)FAN53526_VENDOR_FAIRCHILD,
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 142a70a89153..3de7709bdcd4 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -41,14 +41,6 @@ struct fixed_dev_type {
bool has_enable_clock;
};
-static const struct fixed_dev_type fixed_voltage_data = {
- .has_enable_clock = false,
-};
-
-static const struct fixed_dev_type fixed_clkenable_data = {
- .has_enable_clock = true,
-};
-
static int reg_clock_enable(struct regulator_dev *rdev)
{
struct fixed_voltage_data *priv = rdev_get_drvdata(rdev);
@@ -131,10 +123,10 @@ of_get_fixed_voltage_config(struct device *dev,
return config;
}
-static struct regulator_ops fixed_voltage_ops = {
+static const struct regulator_ops fixed_voltage_ops = {
};
-static struct regulator_ops fixed_voltage_clkenabled_ops = {
+static const struct regulator_ops fixed_voltage_clkenabled_ops = {
.enable = reg_clock_enable,
.disable = reg_clock_disable,
.is_enabled = reg_clock_is_enabled,
@@ -260,6 +252,14 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
}
#if defined(CONFIG_OF)
+static const struct fixed_dev_type fixed_voltage_data = {
+ .has_enable_clock = false,
+};
+
+static const struct fixed_dev_type fixed_clkenable_data = {
+ .has_enable_clock = true,
+};
+
static const struct of_device_id fixed_of_match[] = {
{
.compatible = "regulator-fixed",
diff --git a/drivers/regulator/lochnagar-regulator.c b/drivers/regulator/lochnagar-regulator.c
index 5ea3e4141684..cb71fa5f43c3 100644
--- a/drivers/regulator/lochnagar-regulator.c
+++ b/drivers/regulator/lochnagar-regulator.c
@@ -98,6 +98,7 @@ static const struct regulator_ops lochnagar_vddcore_ops = {
};
static const struct linear_range lochnagar_vddcore_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 0x7, 0),
REGULATOR_LINEAR_RANGE(600000, 0x8, 0x41, 12500),
};
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 4291df077c39..13c535711265 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -49,53 +49,15 @@ struct lp8755_chip {
struct regulator_dev *rdev[LP8755_BUCK_MAX];
};
-/**
- *lp8755_read : read a single register value from lp8755.
- *@pchip : device to read from
- *@reg : register to read from
- *@val : pointer to store read value
- */
-static int lp8755_read(struct lp8755_chip *pchip, unsigned int reg,
- unsigned int *val)
-{
- return regmap_read(pchip->regmap, reg, val);
-}
-
-/**
- *lp8755_write : write a single register value to lp8755.
- *@pchip : device to write to
- *@reg : register to write to
- *@val : value to be written
- */
-static int lp8755_write(struct lp8755_chip *pchip, unsigned int reg,
- unsigned int val)
-{
- return regmap_write(pchip->regmap, reg, val);
-}
-
-/**
- *lp8755_update_bits : set the values of bit fields in lp8755 register.
- *@pchip : device to read from
- *@reg : register to update
- *@mask : bitmask to be changed
- *@val : value for bitmask
- */
-static int lp8755_update_bits(struct lp8755_chip *pchip, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- return regmap_update_bits(pchip->regmap, reg, mask, val);
-}
-
static int lp8755_buck_enable_time(struct regulator_dev *rdev)
{
int ret;
unsigned int regval;
enum lp8755_bucks id = rdev_get_id(rdev);
- struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
- ret = lp8755_read(pchip, 0x12 + id, &regval);
+ ret = regmap_read(rdev->regmap, 0x12 + id, &regval);
if (ret < 0) {
- dev_err(pchip->dev, "i2c access error %s\n", __func__);
+ dev_err(&rdev->dev, "i2c access error %s\n", __func__);
return ret;
}
return (regval & 0xff) * 100;
@@ -115,17 +77,17 @@ static int lp8755_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
break;
case REGULATOR_MODE_NORMAL:
/* enable automatic pwm/pfm mode */
- ret = lp8755_update_bits(pchip, 0x08 + id, 0x20, 0x00);
+ ret = regmap_update_bits(rdev->regmap, 0x08 + id, 0x20, 0x00);
if (ret < 0)
goto err_i2c;
break;
case REGULATOR_MODE_IDLE:
/* enable automatic pwm/pfm/lppfm mode */
- ret = lp8755_update_bits(pchip, 0x08 + id, 0x20, 0x20);
+ ret = regmap_update_bits(rdev->regmap, 0x08 + id, 0x20, 0x20);
if (ret < 0)
goto err_i2c;
- ret = lp8755_update_bits(pchip, 0x10, 0x01, 0x01);
+ ret = regmap_update_bits(rdev->regmap, 0x10, 0x01, 0x01);
if (ret < 0)
goto err_i2c;
break;
@@ -135,12 +97,12 @@ static int lp8755_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
regbval = (0x01 << id);
}
- ret = lp8755_update_bits(pchip, 0x06, 0x01 << id, regbval);
+ ret = regmap_update_bits(rdev->regmap, 0x06, 0x01 << id, regbval);
if (ret < 0)
goto err_i2c;
return ret;
err_i2c:
- dev_err(pchip->dev, "i2c access error %s\n", __func__);
+ dev_err(&rdev->dev, "i2c access error %s\n", __func__);
return ret;
}
@@ -149,9 +111,8 @@ static unsigned int lp8755_buck_get_mode(struct regulator_dev *rdev)
int ret;
unsigned int regval;
enum lp8755_bucks id = rdev_get_id(rdev);
- struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
- ret = lp8755_read(pchip, 0x06, &regval);
+ ret = regmap_read(rdev->regmap, 0x06, &regval);
if (ret < 0)
goto err_i2c;
@@ -159,7 +120,7 @@ static unsigned int lp8755_buck_get_mode(struct regulator_dev *rdev)
if (regval & (0x01 << id))
return REGULATOR_MODE_FAST;
- ret = lp8755_read(pchip, 0x08 + id, &regval);
+ ret = regmap_read(rdev->regmap, 0x08 + id, &regval);
if (ret < 0)
goto err_i2c;
@@ -171,7 +132,7 @@ static unsigned int lp8755_buck_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_NORMAL;
err_i2c:
- dev_err(pchip->dev, "i2c access error %s\n", __func__);
+ dev_err(&rdev->dev, "i2c access error %s\n", __func__);
return 0;
}
@@ -180,7 +141,6 @@ static int lp8755_buck_set_ramp(struct regulator_dev *rdev, int ramp)
int ret;
unsigned int regval = 0x00;
enum lp8755_bucks id = rdev_get_id(rdev);
- struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
/* uV/us */
switch (ramp) {
@@ -209,17 +169,17 @@ static int lp8755_buck_set_ramp(struct regulator_dev *rdev, int ramp)
regval = 0x00;
break;
default:
- dev_err(pchip->dev,
+ dev_err(&rdev->dev,
"Not supported ramp value %d %s\n", ramp, __func__);
return -EINVAL;
}
- ret = lp8755_update_bits(pchip, 0x07 + id, 0x07, regval);
+ ret = regmap_update_bits(rdev->regmap, 0x07 + id, 0x07, regval);
if (ret < 0)
goto err_i2c;
return ret;
err_i2c:
- dev_err(pchip->dev, "i2c access error %s\n", __func__);
+ dev_err(&rdev->dev, "i2c access error %s\n", __func__);
return ret;
}
@@ -278,7 +238,7 @@ static int lp8755_init_data(struct lp8755_chip *pchip)
struct lp8755_platform_data *pdata = pchip->pdata;
/* read back muti-phase configuration */
- ret = lp8755_read(pchip, 0x3D, &regval);
+ ret = regmap_read(pchip->regmap, 0x3D, &regval);
if (ret < 0)
goto out_i2c_error;
pchip->mphase = regval & 0x0F;
@@ -356,11 +316,11 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
struct lp8755_chip *pchip = data;
/* read flag0 register */
- ret = lp8755_read(pchip, 0x0D, &flag0);
+ ret = regmap_read(pchip->regmap, 0x0D, &flag0);
if (ret < 0)
goto err_i2c;
/* clear flag register to pull up int. pin */
- ret = lp8755_write(pchip, 0x0D, 0x00);
+ ret = regmap_write(pchip->regmap, 0x0D, 0x00);
if (ret < 0)
goto err_i2c;
@@ -369,19 +329,17 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
if ((flag0 & (0x4 << icnt))
&& (pchip->irqmask & (0x04 << icnt))
&& (pchip->rdev[icnt] != NULL)) {
- regulator_lock(pchip->rdev[icnt]);
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_PWR_FAULT,
NULL);
- regulator_unlock(pchip->rdev[icnt]);
}
/* read flag1 register */
- ret = lp8755_read(pchip, 0x0E, &flag1);
+ ret = regmap_read(pchip->regmap, 0x0E, &flag1);
if (ret < 0)
goto err_i2c;
/* clear flag register to pull up int. pin */
- ret = lp8755_write(pchip, 0x0E, 0x00);
+ ret = regmap_write(pchip->regmap, 0x0E, 0x00);
if (ret < 0)
goto err_i2c;
@@ -389,22 +347,18 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data)
if ((flag1 & 0x01) && (pchip->irqmask & 0x01))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
if (pchip->rdev[icnt] != NULL) {
- regulator_lock(pchip->rdev[icnt]);
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_OCP,
NULL);
- regulator_unlock(pchip->rdev[icnt]);
}
/* send OVP event to all regulator devices */
if ((flag1 & 0x02) && (pchip->irqmask & 0x02))
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
if (pchip->rdev[icnt] != NULL) {
- regulator_lock(pchip->rdev[icnt]);
regulator_notifier_call_chain(pchip->rdev[icnt],
LP8755_EVENT_OVP,
NULL);
- regulator_unlock(pchip->rdev[icnt]);
}
return IRQ_HANDLED;
@@ -423,7 +377,7 @@ static int lp8755_int_config(struct lp8755_chip *pchip)
return 0;
}
- ret = lp8755_read(pchip, 0x0F, &regval);
+ ret = regmap_read(pchip->regmap, 0x0F, &regval);
if (ret < 0) {
dev_err(pchip->dev, "i2c access error %s\n", __func__);
return ret;
@@ -502,7 +456,7 @@ static int lp8755_probe(struct i2c_client *client,
err:
/* output disable */
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
- lp8755_write(pchip, icnt, 0x00);
+ regmap_write(pchip->regmap, icnt, 0x00);
return ret;
}
@@ -513,7 +467,7 @@ static int lp8755_remove(struct i2c_client *client)
struct lp8755_chip *pchip = i2c_get_clientdata(client);
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
- lp8755_write(pchip, icnt, 0x00);
+ regmap_write(pchip->regmap, icnt, 0x00);
return 0;
}
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 9a037fdc5fc5..38f7ccb63b52 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -357,22 +357,16 @@ static irqreturn_t ltc3589_isr(int irq, void *dev_id)
if (irqstat & LTC3589_IRQSTAT_THERMAL_WARN) {
event = REGULATOR_EVENT_OVER_TEMP;
- for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
- regulator_lock(ltc3589->regulators[i]);
+ for (i = 0; i < LTC3589_NUM_REGULATORS; i++)
regulator_notifier_call_chain(ltc3589->regulators[i],
event, NULL);
- regulator_unlock(ltc3589->regulators[i]);
- }
}
if (irqstat & LTC3589_IRQSTAT_UNDERVOLT_WARN) {
event = REGULATOR_EVENT_UNDER_VOLTAGE;
- for (i = 0; i < LTC3589_NUM_REGULATORS; i++) {
- regulator_lock(ltc3589->regulators[i]);
+ for (i = 0; i < LTC3589_NUM_REGULATORS; i++)
regulator_notifier_call_chain(ltc3589->regulators[i],
event, NULL);
- regulator_unlock(ltc3589->regulators[i]);
- }
}
/* Clear warning condition */
@@ -457,7 +451,7 @@ static const struct i2c_device_id ltc3589_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ltc3589_i2c_id);
-static const struct of_device_id ltc3589_of_match[] = {
+static const struct of_device_id __maybe_unused ltc3589_of_match[] = {
{
.compatible = "lltc,ltc3589",
.data = (void *)LTC3589,
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index 093b3e4a6303..eb3d6bed6d54 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -276,23 +276,17 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id)
if (irqstat & LTC3676_IRQSTAT_THERMAL_WARN) {
dev_warn(dev, "Over-temperature Warning\n");
event = REGULATOR_EVENT_OVER_TEMP;
- for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
- regulator_lock(ltc3676->regulators[i]);
+ for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
regulator_notifier_call_chain(ltc3676->regulators[i],
event, NULL);
- regulator_unlock(ltc3676->regulators[i]);
- }
}
if (irqstat & LTC3676_IRQSTAT_UNDERVOLT_WARN) {
dev_info(dev, "Undervoltage Warning\n");
event = REGULATOR_EVENT_UNDER_VOLTAGE;
- for (i = 0; i < LTC3676_NUM_REGULATORS; i++) {
- regulator_lock(ltc3676->regulators[i]);
+ for (i = 0; i < LTC3676_NUM_REGULATORS; i++)
regulator_notifier_call_chain(ltc3676->regulators[i],
event, NULL);
- regulator_unlock(ltc3676->regulators[i]);
- }
}
/* Clear warning condition */
@@ -368,7 +362,7 @@ static const struct i2c_device_id ltc3676_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ltc3676_i2c_id);
-static const struct of_device_id ltc3676_of_match[] = {
+static const struct of_device_id __maybe_unused ltc3676_of_match[] = {
{ .compatible = "lltc,ltc3676" },
{ },
};
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index f8941025780b..d4958394e608 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -201,7 +201,7 @@ static int of_get_max1586_platform_data(struct device *dev,
return 0;
}
-static const struct of_device_id max1586_of_match[] = {
+static const struct of_device_id __maybe_unused max1586_of_match[] = {
{ .compatible = "maxim,max1586", },
{},
};
diff --git a/drivers/regulator/max77826-regulator.c b/drivers/regulator/max77826-regulator.c
index 502ab6afc814..f9e2e884ff54 100644
--- a/drivers/regulator/max77826-regulator.c
+++ b/drivers/regulator/max77826-regulator.c
@@ -274,7 +274,7 @@ static int max77826_i2c_probe(struct i2c_client *client)
return max77826_read_device_id(regmap, dev);
}
-static const struct of_device_id max77826_of_match[] = {
+static const struct of_device_id __maybe_unused max77826_of_match[] = {
{ .compatible = "maxim,max77826" },
{ /* sentinel */ }
};
diff --git a/drivers/regulator/mp886x.c b/drivers/regulator/mp886x.c
index d3d475f717f4..a84fd74081de 100644
--- a/drivers/regulator/mp886x.c
+++ b/drivers/regulator/mp886x.c
@@ -18,18 +18,70 @@
#define MP886X_V_BOOT (1 << 7)
#define MP886X_SYSCNTLREG1 0x01
#define MP886X_MODE (1 << 0)
+#define MP886X_SLEW_SHIFT 3
+#define MP886X_SLEW_MASK (0x7 << MP886X_SLEW_SHIFT)
#define MP886X_GO (1 << 6)
#define MP886X_EN (1 << 7)
+#define MP8869_SYSCNTLREG2 0x02
+
+struct mp886x_cfg_info {
+ const struct regulator_ops *rops;
+ const int slew_rates[8];
+ const int switch_freq[4];
+ const u8 fs_reg;
+ const u8 fs_shift;
+};
struct mp886x_device_info {
struct device *dev;
struct regulator_desc desc;
struct regulator_init_data *regulator;
struct gpio_desc *en_gpio;
+ const struct mp886x_cfg_info *ci;
u32 r[2];
unsigned int sel;
};
+static int mp886x_set_ramp(struct regulator_dev *rdev, int ramp)
+{
+ struct mp886x_device_info *di = rdev_get_drvdata(rdev);
+ const struct mp886x_cfg_info *ci = di->ci;
+ int reg = -1, i;
+
+ for (i = 0; i < ARRAY_SIZE(ci->slew_rates); i++) {
+ if (ramp <= ci->slew_rates[i])
+ reg = i;
+ else
+ break;
+ }
+
+ if (reg < 0) {
+ dev_err(di->dev, "unsupported ramp value %d\n", ramp);
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(rdev->regmap, MP886X_SYSCNTLREG1,
+ MP886X_SLEW_MASK, reg << MP886X_SLEW_SHIFT);
+}
+
+static void mp886x_set_switch_freq(struct mp886x_device_info *di,
+ struct regmap *regmap,
+ u32 freq)
+{
+ const struct mp886x_cfg_info *ci = di->ci;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ci->switch_freq); i++) {
+ if (freq == ci->switch_freq[i]) {
+ regmap_update_bits(regmap, ci->fs_reg,
+ 0x3 << ci->fs_shift, i << ci->fs_shift);
+ return;
+ }
+ }
+
+ dev_err(di->dev, "invalid frequency %d\n", freq);
+}
+
static int mp886x_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
switch (mode) {
@@ -117,6 +169,29 @@ static const struct regulator_ops mp8869_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
.set_mode = mp886x_set_mode,
.get_mode = mp886x_get_mode,
+ .set_ramp_delay = mp886x_set_ramp,
+};
+
+static const struct mp886x_cfg_info mp8869_ci = {
+ .rops = &mp8869_regulator_ops,
+ .slew_rates = {
+ 40000,
+ 30000,
+ 20000,
+ 10000,
+ 5000,
+ 2500,
+ 1250,
+ 625,
+ },
+ .switch_freq = {
+ 500000,
+ 750000,
+ 1000000,
+ 1250000,
+ },
+ .fs_reg = MP8869_SYSCNTLREG2,
+ .fs_shift = 4,
};
static int mp8867_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
@@ -173,6 +248,29 @@ static const struct regulator_ops mp8867_regulator_ops = {
.is_enabled = regulator_is_enabled_regmap,
.set_mode = mp886x_set_mode,
.get_mode = mp886x_get_mode,
+ .set_ramp_delay = mp886x_set_ramp,
+};
+
+static const struct mp886x_cfg_info mp8867_ci = {
+ .rops = &mp8867_regulator_ops,
+ .slew_rates = {
+ 64000,
+ 32000,
+ 16000,
+ 8000,
+ 4000,
+ 2000,
+ 1000,
+ 500,
+ },
+ .switch_freq = {
+ 500000,
+ 750000,
+ 1000000,
+ 1500000,
+ },
+ .fs_reg = MP886X_SYSCNTLREG1,
+ .fs_shift = 1,
};
static int mp886x_regulator_register(struct mp886x_device_info *di,
@@ -183,7 +281,7 @@ static int mp886x_regulator_register(struct mp886x_device_info *di,
rdesc->name = "mp886x-reg";
rdesc->supply_name = "vin";
- rdesc->ops = of_device_get_match_data(di->dev);
+ rdesc->ops = di->ci->rops;
rdesc->type = REGULATOR_VOLTAGE;
rdesc->n_voltages = 128;
rdesc->enable_reg = MP886X_SYSCNTLREG1;
@@ -213,6 +311,7 @@ static int mp886x_i2c_probe(struct i2c_client *client)
struct mp886x_device_info *di;
struct regulator_config config = { };
struct regmap *regmap;
+ u32 freq;
int ret;
di = devm_kzalloc(dev, sizeof(struct mp886x_device_info), GFP_KERNEL);
@@ -234,6 +333,7 @@ static int mp886x_i2c_probe(struct i2c_client *client)
if (IS_ERR(di->en_gpio))
return PTR_ERR(di->en_gpio);
+ di->ci = of_device_get_match_data(dev);
di->dev = dev;
regmap = devm_regmap_init_i2c(client, &mp886x_regmap_config);
@@ -249,6 +349,9 @@ static int mp886x_i2c_probe(struct i2c_client *client)
config.driver_data = di;
config.of_node = np;
+ if (!of_property_read_u32(np, "mps,switch-frequency-hz", &freq))
+ mp886x_set_switch_freq(di, regmap, freq);
+
ret = mp886x_regulator_register(di, &config);
if (ret < 0)
dev_err(dev, "Failed to register regulator!\n");
@@ -258,11 +361,11 @@ static int mp886x_i2c_probe(struct i2c_client *client)
static const struct of_device_id mp886x_dt_ids[] = {
{
.compatible = "mps,mp8867",
- .data = &mp8867_regulator_ops
+ .data = &mp8867_ci
},
{
.compatible = "mps,mp8869",
- .data = &mp8869_regulator_ops
+ .data = &mp8869_ci
},
{ }
};
diff --git a/drivers/regulator/mt6360-regulator.c b/drivers/regulator/mt6360-regulator.c
new file mode 100644
index 000000000000..15308ee29c13
--- /dev/null
+++ b/drivers/regulator/mt6360-regulator.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2020 MediaTek Inc.
+//
+// Author: Gene Chen <gene_chen@richtek.com>
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include <dt-bindings/regulator/mediatek,mt6360-regulator.h>
+
+enum {
+ MT6360_REGULATOR_BUCK1 = 0,
+ MT6360_REGULATOR_BUCK2,
+ MT6360_REGULATOR_LDO6,
+ MT6360_REGULATOR_LDO7,
+ MT6360_REGULATOR_LDO1,
+ MT6360_REGULATOR_LDO2,
+ MT6360_REGULATOR_LDO3,
+ MT6360_REGULATOR_LDO5,
+ MT6360_REGULATOR_MAX,
+};
+
+struct mt6360_irq_mapping {
+ const char *name;
+ irq_handler_t handler;
+};
+
+struct mt6360_regulator_desc {
+ const struct regulator_desc desc;
+ unsigned int mode_reg;
+ unsigned int mode_mask;
+ unsigned int state_reg;
+ unsigned int state_mask;
+ const struct mt6360_irq_mapping *irq_tables;
+ int irq_table_size;
+};
+
+struct mt6360_regulator_data {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+static irqreturn_t mt6360_pgb_event_handler(int irq, void *data)
+{
+ struct regulator_dev *rdev = data;
+
+ regulator_notifier_call_chain(rdev, REGULATOR_EVENT_FAIL, NULL);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mt6360_oc_event_handler(int irq, void *data)
+{
+ struct regulator_dev *rdev = data;
+
+ regulator_notifier_call_chain(rdev, REGULATOR_EVENT_OVER_CURRENT, NULL);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mt6360_ov_event_handler(int irq, void *data)
+{
+ struct regulator_dev *rdev = data;
+
+ regulator_notifier_call_chain(rdev, REGULATOR_EVENT_REGULATION_OUT, NULL);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mt6360_uv_event_handler(int irq, void *data)
+{
+ struct regulator_dev *rdev = data;
+
+ regulator_notifier_call_chain(rdev, REGULATOR_EVENT_UNDER_VOLTAGE, NULL);
+ return IRQ_HANDLED;
+}
+
+static const struct mt6360_irq_mapping buck1_irq_tbls[] = {
+ { "buck1_pgb_evt", mt6360_pgb_event_handler },
+ { "buck1_oc_evt", mt6360_oc_event_handler },
+ { "buck1_ov_evt", mt6360_ov_event_handler },
+ { "buck1_uv_evt", mt6360_uv_event_handler },
+};
+
+static const struct mt6360_irq_mapping buck2_irq_tbls[] = {
+ { "buck2_pgb_evt", mt6360_pgb_event_handler },
+ { "buck2_oc_evt", mt6360_oc_event_handler },
+ { "buck2_ov_evt", mt6360_ov_event_handler },
+ { "buck2_uv_evt", mt6360_uv_event_handler },
+};
+
+static const struct mt6360_irq_mapping ldo6_irq_tbls[] = {
+ { "ldo6_pgb_evt", mt6360_pgb_event_handler },
+ { "ldo6_oc_evt", mt6360_oc_event_handler },
+};
+
+static const struct mt6360_irq_mapping ldo7_irq_tbls[] = {
+ { "ldo7_pgb_evt", mt6360_pgb_event_handler },
+ { "ldo7_oc_evt", mt6360_oc_event_handler },
+};
+
+static const struct mt6360_irq_mapping ldo1_irq_tbls[] = {
+ { "ldo1_pgb_evt", mt6360_pgb_event_handler },
+ { "ldo1_oc_evt", mt6360_oc_event_handler },
+};
+
+static const struct mt6360_irq_mapping ldo2_irq_tbls[] = {
+ { "ldo2_pgb_evt", mt6360_pgb_event_handler },
+ { "ldo2_oc_evt", mt6360_oc_event_handler },
+};
+
+static const struct mt6360_irq_mapping ldo3_irq_tbls[] = {
+ { "ldo3_pgb_evt", mt6360_pgb_event_handler },
+ { "ldo3_oc_evt", mt6360_oc_event_handler },
+};
+
+static const struct mt6360_irq_mapping ldo5_irq_tbls[] = {
+ { "ldo5_pgb_evt", mt6360_pgb_event_handler },
+ { "ldo5_oc_evt", mt6360_oc_event_handler },
+};
+
+static const struct linear_range buck_vout_ranges[] = {
+ REGULATOR_LINEAR_RANGE(300000, 0x00, 0xc7, 5000),
+ REGULATOR_LINEAR_RANGE(1300000, 0xc8, 0xff, 0),
+};
+
+static const struct linear_range ldo_vout_ranges1[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0x00, 0x09, 10000),
+ REGULATOR_LINEAR_RANGE(600000, 0x0a, 0x10, 0),
+ REGULATOR_LINEAR_RANGE(610000, 0x11, 0x19, 10000),
+ REGULATOR_LINEAR_RANGE(700000, 0x1a, 0x20, 0),
+ REGULATOR_LINEAR_RANGE(710000, 0x21, 0x29, 10000),
+ REGULATOR_LINEAR_RANGE(800000, 0x2a, 0x30, 0),
+ REGULATOR_LINEAR_RANGE(810000, 0x31, 0x39, 10000),
+ REGULATOR_LINEAR_RANGE(900000, 0x3a, 0x40, 0),
+ REGULATOR_LINEAR_RANGE(910000, 0x41, 0x49, 10000),
+ REGULATOR_LINEAR_RANGE(1000000, 0x4a, 0x50, 0),
+ REGULATOR_LINEAR_RANGE(1010000, 0x51, 0x59, 10000),
+ REGULATOR_LINEAR_RANGE(1100000, 0x5a, 0x60, 0),
+ REGULATOR_LINEAR_RANGE(1110000, 0x61, 0x69, 10000),
+ REGULATOR_LINEAR_RANGE(1200000, 0x6a, 0x70, 0),
+ REGULATOR_LINEAR_RANGE(1210000, 0x71, 0x79, 10000),
+ REGULATOR_LINEAR_RANGE(1300000, 0x7a, 0x80, 0),
+ REGULATOR_LINEAR_RANGE(1310000, 0x81, 0x89, 10000),
+ REGULATOR_LINEAR_RANGE(1400000, 0x8a, 0x90, 0),
+ REGULATOR_LINEAR_RANGE(1410000, 0x91, 0x99, 10000),
+ REGULATOR_LINEAR_RANGE(1500000, 0x9a, 0xa0, 0),
+ REGULATOR_LINEAR_RANGE(1510000, 0xa1, 0xa9, 10000),
+ REGULATOR_LINEAR_RANGE(1600000, 0xaa, 0xb0, 0),
+ REGULATOR_LINEAR_RANGE(1610000, 0xb1, 0xb9, 10000),
+ REGULATOR_LINEAR_RANGE(1700000, 0xba, 0xc0, 0),
+ REGULATOR_LINEAR_RANGE(1710000, 0xc1, 0xc9, 10000),
+ REGULATOR_LINEAR_RANGE(1800000, 0xca, 0xd0, 0),
+ REGULATOR_LINEAR_RANGE(1810000, 0xd1, 0xd9, 10000),
+ REGULATOR_LINEAR_RANGE(1900000, 0xda, 0xe0, 0),
+ REGULATOR_LINEAR_RANGE(1910000, 0xe1, 0xe9, 10000),
+ REGULATOR_LINEAR_RANGE(2000000, 0xea, 0xf0, 0),
+ REGULATOR_LINEAR_RANGE(2010000, 0xf1, 0xf9, 10000),
+ REGULATOR_LINEAR_RANGE(2100000, 0xfa, 0xff, 0),
+};
+
+static const struct linear_range ldo_vout_ranges2[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x09, 10000),
+ REGULATOR_LINEAR_RANGE(1300000, 0x0a, 0x10, 0),
+ REGULATOR_LINEAR_RANGE(1310000, 0x11, 0x19, 10000),
+ REGULATOR_LINEAR_RANGE(1400000, 0x1a, 0x1f, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 0x20, 0x29, 10000),
+ REGULATOR_LINEAR_RANGE(1600000, 0x2a, 0x2f, 0),
+ REGULATOR_LINEAR_RANGE(1700000, 0x30, 0x39, 10000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x3a, 0x40, 0),
+ REGULATOR_LINEAR_RANGE(1810000, 0x41, 0x49, 10000),
+ REGULATOR_LINEAR_RANGE(1900000, 0x4a, 0x4f, 0),
+ REGULATOR_LINEAR_RANGE(2000000, 0x50, 0x59, 10000),
+ REGULATOR_LINEAR_RANGE(2100000, 0x5a, 0x60, 0),
+ REGULATOR_LINEAR_RANGE(2110000, 0x61, 0x69, 10000),
+ REGULATOR_LINEAR_RANGE(2200000, 0x6a, 0x6f, 0),
+ REGULATOR_LINEAR_RANGE(2500000, 0x70, 0x79, 10000),
+ REGULATOR_LINEAR_RANGE(2600000, 0x7a, 0x7f, 0),
+ REGULATOR_LINEAR_RANGE(2700000, 0x80, 0x89, 10000),
+ REGULATOR_LINEAR_RANGE(2800000, 0x8a, 0x90, 0),
+ REGULATOR_LINEAR_RANGE(2810000, 0x91, 0x99, 10000),
+ REGULATOR_LINEAR_RANGE(2900000, 0x9a, 0xa0, 0),
+ REGULATOR_LINEAR_RANGE(2910000, 0xa1, 0xa9, 10000),
+ REGULATOR_LINEAR_RANGE(3000000, 0xaa, 0xb0, 0),
+ REGULATOR_LINEAR_RANGE(3010000, 0xb1, 0xb9, 10000),
+ REGULATOR_LINEAR_RANGE(3100000, 0xba, 0xc0, 0),
+ REGULATOR_LINEAR_RANGE(3110000, 0xc1, 0xc9, 10000),
+ REGULATOR_LINEAR_RANGE(3200000, 0xca, 0xcf, 0),
+ REGULATOR_LINEAR_RANGE(3300000, 0xd0, 0xd9, 10000),
+ REGULATOR_LINEAR_RANGE(3400000, 0xda, 0xe0, 0),
+ REGULATOR_LINEAR_RANGE(3410000, 0xe1, 0xe9, 10000),
+ REGULATOR_LINEAR_RANGE(3500000, 0xea, 0xf0, 0),
+ REGULATOR_LINEAR_RANGE(3510000, 0xf1, 0xf9, 10000),
+ REGULATOR_LINEAR_RANGE(3600000, 0xfa, 0xff, 0),
+};
+
+static const struct linear_range ldo_vout_ranges3[] = {
+ REGULATOR_LINEAR_RANGE(2700000, 0x00, 0x09, 10000),
+ REGULATOR_LINEAR_RANGE(2800000, 0x0a, 0x10, 0),
+ REGULATOR_LINEAR_RANGE(2810000, 0x11, 0x19, 10000),
+ REGULATOR_LINEAR_RANGE(2900000, 0x1a, 0x20, 0),
+ REGULATOR_LINEAR_RANGE(2910000, 0x21, 0x29, 10000),
+ REGULATOR_LINEAR_RANGE(3000000, 0x2a, 0x30, 0),
+ REGULATOR_LINEAR_RANGE(3010000, 0x31, 0x39, 10000),
+ REGULATOR_LINEAR_RANGE(3100000, 0x3a, 0x40, 0),
+ REGULATOR_LINEAR_RANGE(3110000, 0x41, 0x49, 10000),
+ REGULATOR_LINEAR_RANGE(3200000, 0x4a, 0x4f, 0),
+ REGULATOR_LINEAR_RANGE(3300000, 0x50, 0x59, 10000),
+ REGULATOR_LINEAR_RANGE(3400000, 0x5a, 0x60, 0),
+ REGULATOR_LINEAR_RANGE(3410000, 0x61, 0x69, 10000),
+ REGULATOR_LINEAR_RANGE(3500000, 0x6a, 0x70, 0),
+ REGULATOR_LINEAR_RANGE(3510000, 0x71, 0x79, 10000),
+ REGULATOR_LINEAR_RANGE(3600000, 0x7a, 0x7f, 0),
+};
+
+static int mt6360_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ const struct mt6360_regulator_desc *rdesc = (struct mt6360_regulator_desc *)rdev->desc;
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int shift = ffs(rdesc->mode_mask) - 1;
+ unsigned int val;
+ int ret;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = MT6360_OPMODE_NORMAL;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val = MT6360_OPMODE_ULP;
+ break;
+ case REGULATOR_MODE_IDLE:
+ val = MT6360_OPMODE_LP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(regmap, rdesc->mode_reg, rdesc->mode_mask, val << shift);
+ if (ret) {
+ dev_err(&rdev->dev, "%s: fail (%d)\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static unsigned int mt6360_regulator_get_mode(struct regulator_dev *rdev)
+{
+ const struct mt6360_regulator_desc *rdesc = (struct mt6360_regulator_desc *)rdev->desc;
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ int shift = ffs(rdesc->mode_mask) - 1;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, rdesc->mode_reg, &val);
+ if (ret)
+ return ret;
+
+ val &= rdesc->mode_mask;
+ val >>= shift;
+
+ switch (val) {
+ case MT6360_OPMODE_LP:
+ return REGULATOR_MODE_IDLE;
+ case MT6360_OPMODE_ULP:
+ return REGULATOR_MODE_STANDBY;
+ case MT6360_OPMODE_NORMAL:
+ return REGULATOR_MODE_NORMAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mt6360_regulator_get_status(struct regulator_dev *rdev)
+{
+ const struct mt6360_regulator_desc *rdesc = (struct mt6360_regulator_desc *)rdev->desc;
+ struct regmap *regmap = rdev_get_regmap(rdev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, rdesc->state_reg, &val);
+ if (ret)
+ return ret;
+
+ if (val & rdesc->state_mask)
+ return REGULATOR_STATUS_ON;
+
+ return REGULATOR_STATUS_OFF;
+}
+
+static const struct regulator_ops mt6360_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_mode = mt6360_regulator_set_mode,
+ .get_mode = mt6360_regulator_get_mode,
+ .get_status = mt6360_regulator_get_status,
+};
+
+static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
+{
+ switch (hw_mode) {
+ case MT6360_OPMODE_NORMAL:
+ return REGULATOR_MODE_NORMAL;
+ case MT6360_OPMODE_LP:
+ return REGULATOR_MODE_IDLE;
+ case MT6360_OPMODE_ULP:
+ return REGULATOR_MODE_STANDBY;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+#define MT6360_REGULATOR_DESC(_name, _sname, ereg, emask, vreg, vmask, \
+ mreg, mmask, streg, stmask, vranges, \
+ vcnts, offon_delay, irq_tbls) \
+{ \
+ .desc = { \
+ .name = #_name, \
+ .supply_name = #_sname, \
+ .id = MT6360_REGULATOR_##_name, \
+ .of_match = of_match_ptr(#_name), \
+ .regulators_node = of_match_ptr("regulator"), \
+ .of_map_mode = mt6360_regulator_of_map_mode, \
+ .owner = THIS_MODULE, \
+ .ops = &mt6360_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .vsel_reg = vreg, \
+ .vsel_mask = vmask, \
+ .enable_reg = ereg, \
+ .enable_mask = emask, \
+ .linear_ranges = vranges, \
+ .n_linear_ranges = ARRAY_SIZE(vranges), \
+ .n_voltages = vcnts, \
+ .off_on_delay = offon_delay, \
+ }, \
+ .mode_reg = mreg, \
+ .mode_mask = mmask, \
+ .state_reg = streg, \
+ .state_mask = stmask, \
+ .irq_tables = irq_tbls, \
+ .irq_table_size = ARRAY_SIZE(irq_tbls), \
+}
+
+static const struct mt6360_regulator_desc mt6360_regulator_descs[] = {
+ MT6360_REGULATOR_DESC(BUCK1, BUCK1_VIN, 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
+ buck_vout_ranges, 256, 0, buck1_irq_tbls),
+ MT6360_REGULATOR_DESC(BUCK2, BUCK2_VIN, 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
+ buck_vout_ranges, 256, 0, buck2_irq_tbls),
+ MT6360_REGULATOR_DESC(LDO6, LDO_VIN3, 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
+ ldo_vout_ranges1, 256, 0, ldo6_irq_tbls),
+ MT6360_REGULATOR_DESC(LDO7, LDO_VIN3, 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
+ ldo_vout_ranges1, 256, 0, ldo7_irq_tbls),
+ MT6360_REGULATOR_DESC(LDO1, LDO_VIN1, 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
+ ldo_vout_ranges2, 256, 0, ldo1_irq_tbls),
+ MT6360_REGULATOR_DESC(LDO2, LDO_VIN1, 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
+ ldo_vout_ranges2, 256, 0, ldo2_irq_tbls),
+ MT6360_REGULATOR_DESC(LDO3, LDO_VIN1, 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
+ ldo_vout_ranges2, 256, 100, ldo3_irq_tbls),
+ MT6360_REGULATOR_DESC(LDO5, LDO_VIN2, 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
+ ldo_vout_ranges3, 128, 100, ldo5_irq_tbls),
+};
+
+static int mt6360_regulator_irq_register(struct platform_device *pdev,
+ struct regulator_dev *rdev,
+ const struct mt6360_irq_mapping *tbls,
+ int tbl_size)
+{
+ int i, irq, ret;
+
+ for (i = 0; i < tbl_size; i++) {
+ const struct mt6360_irq_mapping *irq_desc = tbls + i;
+
+ irq = platform_get_irq_byname(pdev, irq_desc->name);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Fail to get %s irq\n", irq_desc->name);
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, irq_desc->handler, 0,
+ irq_desc->name, rdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Fail to request %s irq\n", irq_desc->name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mt6360_regulator_probe(struct platform_device *pdev)
+{
+ struct mt6360_regulator_data *mrd;
+ struct regulator_config config = {};
+ int i, ret;
+
+ mrd = devm_kzalloc(&pdev->dev, sizeof(*mrd), GFP_KERNEL);
+ if (!mrd)
+ return -ENOMEM;
+
+ mrd->dev = &pdev->dev;
+
+ mrd->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!mrd->regmap) {
+ dev_err(&pdev->dev, "Failed to get parent regmap\n");
+ return -ENODEV;
+ }
+
+ config.dev = pdev->dev.parent;
+ config.driver_data = mrd;
+ config.regmap = mrd->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(mt6360_regulator_descs); i++) {
+ const struct mt6360_regulator_desc *rdesc = mt6360_regulator_descs + i;
+ struct regulator_dev *rdev;
+
+ rdev = devm_regulator_register(&pdev->dev, &rdesc->desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "Failed to register %d regulator\n", i);
+ return PTR_ERR(rdev);
+ }
+
+ ret = mt6360_regulator_irq_register(pdev, rdev, rdesc->irq_tables,
+ rdesc->irq_table_size);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register %d regulator irqs\n", i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id mt6360_regulator_id_table[] = {
+ { "mt6360-regulator", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, mt6360_regulator_id_table);
+
+static struct platform_driver mt6360_regulator_driver = {
+ .driver = {
+ .name = "mt6360-regulator",
+ },
+ .probe = mt6360_regulator_probe,
+ .id_table = mt6360_regulator_id_table,
+};
+module_platform_driver(mt6360_regulator_driver);
+
+MODULE_AUTHOR("Gene Chen <gene_chen@richtek.com>");
+MODULE_DESCRIPTION("MT6360 Regulator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index eb5822bf53e0..cb29421d745a 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -90,7 +90,7 @@ static int pca9450_dvs_set_ramp_delay(struct regulator_dev *rdev,
BUCK1_RAMP_MASK, ramp_value << 6);
}
-static struct regulator_ops pca9450_dvs_buck_regulator_ops = {
+static const struct regulator_ops pca9450_dvs_buck_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -101,7 +101,7 @@ static struct regulator_ops pca9450_dvs_buck_regulator_ops = {
.set_ramp_delay = pca9450_dvs_set_ramp_delay,
};
-static struct regulator_ops pca9450_buck_regulator_ops = {
+static const struct regulator_ops pca9450_buck_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -111,7 +111,7 @@ static struct regulator_ops pca9450_buck_regulator_ops = {
.set_voltage_time_sel = regulator_set_voltage_time_sel,
};
-static struct regulator_ops pca9450_ldo_regulator_ops = {
+static const struct regulator_ops pca9450_ldo_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index 787ced918372..48238846f45c 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -233,13 +233,10 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
if (reg_val & PV88060_E_VDD_FLT) {
for (i = 0; i < PV88060_MAX_REGULATORS; i++) {
- if (chip->rdev[i] != NULL) {
- regulator_lock(chip->rdev[i]);
+ if (chip->rdev[i] != NULL)
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
- regulator_unlock(chip->rdev[i]);
- }
}
err = regmap_write(chip->regmap, PV88060_REG_EVENT_A,
@@ -252,13 +249,10 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
if (reg_val & PV88060_E_OVER_TEMP) {
for (i = 0; i < PV88060_MAX_REGULATORS; i++) {
- if (chip->rdev[i] != NULL) {
- regulator_lock(chip->rdev[i]);
+ if (chip->rdev[i] != NULL)
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP,
NULL);
- regulator_unlock(chip->rdev[i]);
- }
}
err = regmap_write(chip->regmap, PV88060_REG_EVENT_A,
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
index a444f68af1a8..2a74cc05acfe 100644
--- a/drivers/regulator/pv88080-regulator.c
+++ b/drivers/regulator/pv88080-regulator.c
@@ -334,13 +334,10 @@ static irqreturn_t pv88080_irq_handler(int irq, void *data)
if (reg_val & PV88080_E_VDD_FLT) {
for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
- if (chip->rdev[i] != NULL) {
- regulator_lock(chip->rdev[i]);
+ if (chip->rdev[i] != NULL)
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
- regulator_unlock(chip->rdev[i]);
- }
}
err = regmap_write(chip->regmap, PV88080_REG_EVENT_A,
@@ -353,13 +350,10 @@ static irqreturn_t pv88080_irq_handler(int irq, void *data)
if (reg_val & PV88080_E_OVER_TEMP) {
for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
- if (chip->rdev[i] != NULL) {
- regulator_lock(chip->rdev[i]);
+ if (chip->rdev[i] != NULL)
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP,
NULL);
- regulator_unlock(chip->rdev[i]);
- }
}
err = regmap_write(chip->regmap, PV88080_REG_EVENT_A,
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index 784729ec2182..a80176bdf8ec 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -226,13 +226,10 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
if (reg_val & PV88090_E_VDD_FLT) {
for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
- if (chip->rdev[i] != NULL) {
- regulator_lock(chip->rdev[i]);
+ if (chip->rdev[i] != NULL)
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
- regulator_unlock(chip->rdev[i]);
- }
}
err = regmap_write(chip->regmap, PV88090_REG_EVENT_A,
@@ -245,13 +242,10 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
if (reg_val & PV88090_E_OVER_TEMP) {
for (i = 0; i < PV88090_MAX_REGULATORS; i++) {
- if (chip->rdev[i] != NULL) {
- regulator_lock(chip->rdev[i]);
+ if (chip->rdev[i] != NULL)
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP,
NULL);
- regulator_unlock(chip->rdev[i]);
- }
}
err = regmap_write(chip->regmap, PV88090_REG_EVENT_A,
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 990bd50771d8..7629476d94ae 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -390,7 +390,7 @@ static int pwm_regulator_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id pwm_of_match[] = {
+static const struct of_device_id __maybe_unused pwm_of_match[] = {
{ .compatible = "pwm-regulator" },
{ },
};
diff --git a/drivers/regulator/qcom-labibb-regulator.c b/drivers/regulator/qcom-labibb-regulator.c
index 8c7dd1928380..8ccf572394a2 100644
--- a/drivers/regulator/qcom-labibb-regulator.c
+++ b/drivers/regulator/qcom-labibb-regulator.c
@@ -44,16 +44,16 @@ struct labibb_regulator_data {
const char *name;
u8 type;
u16 base;
- struct regulator_desc *desc;
+ const struct regulator_desc *desc;
};
-static struct regulator_ops qcom_labibb_ops = {
+static const struct regulator_ops qcom_labibb_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
-static struct regulator_desc pmi8998_lab_desc = {
+static const struct regulator_desc pmi8998_lab_desc = {
.enable_mask = LAB_ENABLE_CTL_MASK,
.enable_reg = (PMI8998_LAB_REG_BASE + REG_LABIBB_ENABLE_CTL),
.enable_val = LABIBB_CONTROL_ENABLE,
@@ -65,7 +65,7 @@ static struct regulator_desc pmi8998_lab_desc = {
.ops = &qcom_labibb_ops,
};
-static struct regulator_desc pmi8998_ibb_desc = {
+static const struct regulator_desc pmi8998_ibb_desc = {
.enable_mask = IBB_ENABLE_CTL_MASK,
.enable_reg = (PMI8998_IBB_REG_BASE + REG_LABIBB_ENABLE_CTL),
.enable_val = LABIBB_CONTROL_ENABLE,
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 08dcc614efa7..d488325499a9 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -967,7 +967,7 @@ static int rpmh_regulator_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id rpmh_regulator_match_table[] = {
+static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
{
.compatible = "qcom,pm8005-rpmh-regulators",
.data = pm8005_vreg_data,
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index a87b56bc29fa..bb944ee5fe3b 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -403,6 +403,24 @@ static const struct regulator_desc pm8950_pldo = {
.ops = &rpm_smps_ldo_ops,
};
+static const struct regulator_desc pm8953_lnldo = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1380000, 8, 15, 120000),
+ REGULATOR_LINEAR_RANGE(690000, 0, 7, 60000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 16,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8953_ult_nldo = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 93, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 94,
+ .ops = &rpm_smps_ldo_ops,
+};
static const struct regulator_desc pm8994_hfsmps = {
.linear_ranges = (struct linear_range[]) {
@@ -541,6 +559,69 @@ static const struct regulator_desc pmi8998_bob = {
.ops = &rpm_bob_ops,
};
+static const struct regulator_desc pm660_ftsmps = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(355000, 0, 199, 5000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 200,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm660_hfsmps = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 216, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 217,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm660_ht_nldo = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(312000, 0, 124, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 125,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm660_ht_lvpldo = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1504000, 0, 62, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 63,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm660_nldo660 = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 123, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 124,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm660_pldo660 = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1504000, 0, 255, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 256,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm660l_bob = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1800000, 0, 84, 32000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 85,
+ .ops = &rpm_bob_ops,
+};
+
static const struct regulator_desc pms405_hfsmps3 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
@@ -791,6 +872,41 @@ static const struct rpm_regulator_data rpm_pm8950_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8953_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8998_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8998_hfsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8998_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8998_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8950_ftsmps2p5, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pm8950_ftsmps2p5, "vdd_s6" },
+ { "s7", QCOM_SMD_RPM_SMPA, 7, &pm8998_hfsmps, "vdd_s7" },
+
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8953_ult_nldo, "vdd_l1" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8953_ult_nldo, "vdd_l2_l3" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8953_ult_nldo, "vdd_l2_l3" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16_l19" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16_l19" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16_l19" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16_l19" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8950_ult_pldo, "vdd_l9_l10_l17_l18_l22" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8950_ult_pldo, "vdd_l9_l10_l17_l18_l22" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8950_ult_pldo, "vdd_l8_l11_l12_l13_l14_l15" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8950_ult_pldo, "vdd_l4_l5_l6_l7_l16_l19" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8950_ult_pldo, "vdd_l9_l10_l17_l18_l22" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8950_ult_pldo, "vdd_l9_l10_l17_l18_l22" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8953_ult_nldo, "vdd_l4_l5_l6_l7_l16_l19" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8953_lnldo, "vdd_l20" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8953_lnldo, "vdd_l21" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8950_ult_pldo, "vdd_l9_l10_l17_l18_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8953_ult_nldo, "vdd_l23" },
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8994_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8994_ftsmps, "vdd_s2" },
@@ -902,6 +1018,54 @@ static const struct rpm_regulator_data rpm_pmi8998_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm660_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm660_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm660_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm660_ftsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm660_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm660_hfsmps, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pm660_hfsmps, "vdd_s6" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l6_l7" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_ht_nldo, "vdd_l2_l3" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l2_l3" },
+ /* l4 is unaccessible on PM660 */
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_ht_nldo, "vdd_l5" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_ht_nldo, "vdd_l1_l6_l7" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_ht_nldo, "vdd_l1_l6_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { }
+};
+
+static const struct rpm_regulator_data rpm_pm660l_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPB, 1, &pm660_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPB, 2, &pm660_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_RWCX, 0, &pm660_ftsmps, "vdd_s3_s4" },
+ { "s5", QCOM_SMD_RPM_RWMX, 0, &pm660_ftsmps, "vdd_s5" },
+ { "l1", QCOM_SMD_RPM_LDOB, 1, &pm660_nldo660, "vdd_l1_l9_l10" },
+ { "l2", QCOM_SMD_RPM_LDOB, 2, &pm660_pldo660, "vdd_l2" },
+ { "l3", QCOM_SMD_RPM_LDOB, 3, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l4", QCOM_SMD_RPM_LDOB, 4, &pm660_pldo660, "vdd_l4_l6" },
+ { "l5", QCOM_SMD_RPM_LDOB, 5, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l6", QCOM_SMD_RPM_LDOB, 6, &pm660_pldo660, "vdd_l4_l6" },
+ { "l7", QCOM_SMD_RPM_LDOB, 7, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l8", QCOM_SMD_RPM_LDOB, 8, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l9", QCOM_SMD_RPM_RWLC, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
+ { "l10", QCOM_SMD_RPM_RWLM, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
+ { "bob", QCOM_SMD_RPM_BOBB, 1, &pm660l_bob, "vdd_bob", },
+ { }
+};
+
static const struct rpm_regulator_data rpm_pms405_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pms405_hfsmps3, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pms405_hfsmps3, "vdd_s2" },
@@ -930,8 +1094,11 @@ static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
{ .compatible = "qcom,rpm-pm8950-regulators", .data = &rpm_pm8950_regulators },
+ { .compatible = "qcom,rpm-pm8953-regulators", .data = &rpm_pm8953_regulators },
{ .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators },
{ .compatible = "qcom,rpm-pm8998-regulators", .data = &rpm_pm8998_regulators },
+ { .compatible = "qcom,rpm-pm660-regulators", .data = &rpm_pm660_regulators },
+ { .compatible = "qcom,rpm-pm660l-regulators", .data = &rpm_pm660l_regulators },
{ .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators },
{ .compatible = "qcom,rpm-pmi8994-regulators", .data = &rpm_pmi8994_regulators },
{ .compatible = "qcom,rpm-pmi8998-regulators", .data = &rpm_pmi8998_regulators },
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 5ee7c5305d95..e62e1d72d943 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -135,6 +135,18 @@ enum spmi_regulator_subtype {
SPMI_REGULATOR_SUBTYPE_LV_P600 = 0x2b,
SPMI_REGULATOR_SUBTYPE_LV_P1200 = 0x2c,
SPMI_REGULATOR_SUBTYPE_LV_P450 = 0x2d,
+ SPMI_REGULATOR_SUBTYPE_HT_N300_ST = 0x30,
+ SPMI_REGULATOR_SUBTYPE_HT_N600_ST = 0x31,
+ SPMI_REGULATOR_SUBTYPE_HT_N1200_ST = 0x32,
+ SPMI_REGULATOR_SUBTYPE_HT_LVP150 = 0x3b,
+ SPMI_REGULATOR_SUBTYPE_HT_LVP300 = 0x3c,
+ SPMI_REGULATOR_SUBTYPE_L660_N300_ST = 0x42,
+ SPMI_REGULATOR_SUBTYPE_L660_N600_ST = 0x43,
+ SPMI_REGULATOR_SUBTYPE_L660_P50 = 0x46,
+ SPMI_REGULATOR_SUBTYPE_L660_P150 = 0x47,
+ SPMI_REGULATOR_SUBTYPE_L660_P600 = 0x49,
+ SPMI_REGULATOR_SUBTYPE_L660_LVP150 = 0x4d,
+ SPMI_REGULATOR_SUBTYPE_L660_LVP600 = 0x4f,
SPMI_REGULATOR_SUBTYPE_LV100 = 0x01,
SPMI_REGULATOR_SUBTYPE_LV300 = 0x02,
SPMI_REGULATOR_SUBTYPE_MV300 = 0x08,
@@ -511,6 +523,22 @@ static struct spmi_voltage_range ult_pldo_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1750000, 1750000, 3337500, 3337500, 12500),
};
+static struct spmi_voltage_range pldo660_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 1504000, 1504000, 3544000, 3544000, 8000),
+};
+
+static struct spmi_voltage_range nldo660_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 320000, 320000, 1304000, 1304000, 8000),
+};
+
+static struct spmi_voltage_range ht_lvpldo_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 1504000, 1504000, 2000000, 2000000, 8000),
+};
+
+static struct spmi_voltage_range ht_nldo_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 312000, 312000, 1304000, 1304000, 8000),
+};
+
static struct spmi_voltage_range hfs430_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 320000, 320000, 2040000, 2040000, 8000),
};
@@ -530,6 +558,10 @@ static DEFINE_SPMI_SET_POINTS(ult_lo_smps);
static DEFINE_SPMI_SET_POINTS(ult_ho_smps);
static DEFINE_SPMI_SET_POINTS(ult_nldo);
static DEFINE_SPMI_SET_POINTS(ult_pldo);
+static DEFINE_SPMI_SET_POINTS(pldo660);
+static DEFINE_SPMI_SET_POINTS(nldo660);
+static DEFINE_SPMI_SET_POINTS(ht_lvpldo);
+static DEFINE_SPMI_SET_POINTS(ht_nldo);
static DEFINE_SPMI_SET_POINTS(hfs430);
static inline int spmi_vreg_read(struct spmi_regulator *vreg, u16 addr, u8 *buf,
@@ -1443,6 +1475,30 @@ static const struct spmi_regulator_mapping supported_regulators[] = {
SPMI_VREG(LDO, LV_P300, 0, INF, LDO, ldo, pldo, 10000),
SPMI_VREG(LDO, LV_P600, 0, INF, LDO, ldo, pldo, 10000),
SPMI_VREG(LDO, LV_P1200, 0, INF, LDO, ldo, pldo, 10000),
+ SPMI_VREG(LDO, HT_N300_ST, 0, INF, FTSMPS426, ftsmps426,
+ ht_nldo, 30000),
+ SPMI_VREG(LDO, HT_N600_ST, 0, INF, FTSMPS426, ftsmps426,
+ ht_nldo, 30000),
+ SPMI_VREG(LDO, HT_N1200_ST, 0, INF, FTSMPS426, ftsmps426,
+ ht_nldo, 30000),
+ SPMI_VREG(LDO, HT_LVP150, 0, INF, FTSMPS426, ftsmps426,
+ ht_lvpldo, 10000),
+ SPMI_VREG(LDO, HT_LVP300, 0, INF, FTSMPS426, ftsmps426,
+ ht_lvpldo, 10000),
+ SPMI_VREG(LDO, L660_N300_ST, 0, INF, FTSMPS426, ftsmps426,
+ nldo660, 10000),
+ SPMI_VREG(LDO, L660_N600_ST, 0, INF, FTSMPS426, ftsmps426,
+ nldo660, 10000),
+ SPMI_VREG(LDO, L660_P50, 0, INF, FTSMPS426, ftsmps426,
+ pldo660, 10000),
+ SPMI_VREG(LDO, L660_P150, 0, INF, FTSMPS426, ftsmps426,
+ pldo660, 10000),
+ SPMI_VREG(LDO, L660_P600, 0, INF, FTSMPS426, ftsmps426,
+ pldo660, 10000),
+ SPMI_VREG(LDO, L660_LVP150, 0, INF, FTSMPS426, ftsmps426,
+ ht_lvpldo, 10000),
+ SPMI_VREG(LDO, L660_LVP600, 0, INF, FTSMPS426, ftsmps426,
+ ht_lvpldo, 10000),
SPMI_VREG_VS(LV100, 0, INF),
SPMI_VREG_VS(LV300, 0, INF),
SPMI_VREG_VS(MV300, 0, INF),
@@ -1633,45 +1689,43 @@ static int spmi_regulator_init_registers(struct spmi_regulator *vreg,
return ret;
/* Set up enable pin control. */
- if ((type == SPMI_REGULATOR_LOGICAL_TYPE_SMPS
- || type == SPMI_REGULATOR_LOGICAL_TYPE_LDO
- || type == SPMI_REGULATOR_LOGICAL_TYPE_VS)
- && !(data->pin_ctrl_enable
- & SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT)) {
- ctrl_reg[SPMI_COMMON_IDX_ENABLE] &=
- ~SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
- ctrl_reg[SPMI_COMMON_IDX_ENABLE] |=
- data->pin_ctrl_enable & SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
+ if (!(data->pin_ctrl_enable & SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT)) {
+ switch (type) {
+ case SPMI_REGULATOR_LOGICAL_TYPE_SMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_LDO:
+ case SPMI_REGULATOR_LOGICAL_TYPE_VS:
+ ctrl_reg[SPMI_COMMON_IDX_ENABLE] &=
+ ~SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
+ ctrl_reg[SPMI_COMMON_IDX_ENABLE] |=
+ data->pin_ctrl_enable & SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
+ break;
+ default:
+ break;
+ }
}
/* Set up mode pin control. */
- if ((type == SPMI_REGULATOR_LOGICAL_TYPE_SMPS
- || type == SPMI_REGULATOR_LOGICAL_TYPE_LDO)
- && !(data->pin_ctrl_hpm
- & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
- ctrl_reg[SPMI_COMMON_IDX_MODE] &=
- ~SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
- ctrl_reg[SPMI_COMMON_IDX_MODE] |=
- data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
- }
-
- if (type == SPMI_REGULATOR_LOGICAL_TYPE_VS
- && !(data->pin_ctrl_hpm & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
- ctrl_reg[SPMI_COMMON_IDX_MODE] &=
- ~SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
- ctrl_reg[SPMI_COMMON_IDX_MODE] |=
- data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
- }
-
- if ((type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
- || type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
- || type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_LDO)
- && !(data->pin_ctrl_hpm
- & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
- ctrl_reg[SPMI_COMMON_IDX_MODE] &=
- ~SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
- ctrl_reg[SPMI_COMMON_IDX_MODE] |=
- data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ if (!(data->pin_ctrl_hpm & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+ switch (type) {
+ case SPMI_REGULATOR_LOGICAL_TYPE_SMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_LDO:
+ ctrl_reg[SPMI_COMMON_IDX_MODE] &=
+ ~SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
+ ctrl_reg[SPMI_COMMON_IDX_MODE] |=
+ data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
+ break;
+ case SPMI_REGULATOR_LOGICAL_TYPE_VS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_ULT_LDO:
+ ctrl_reg[SPMI_COMMON_IDX_MODE] &=
+ ~SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ ctrl_reg[SPMI_COMMON_IDX_MODE] |=
+ data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ break;
+ default:
+ break;
+ }
}
/* Write back any control register values that were modified. */
@@ -1960,6 +2014,55 @@ static const struct spmi_regulator_data pmi8994_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm660_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s3", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "s6", 0x2300, "vdd_s6", },
+ { "l1", 0x4000, "vdd_l1_l6_l7", },
+ { "l2", 0x4100, "vdd_l2_l3", },
+ { "l3", 0x4200, "vdd_l2_l3", },
+ /* l4 is unaccessible on PM660 */
+ { "l5", 0x4400, "vdd_l5", },
+ { "l6", 0x4500, "vdd_l1_l6_l7", },
+ { "l7", 0x4600, "vdd_l1_l6_l7", },
+ { "l8", 0x4700, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l9", 0x4800, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l10", 0x4900, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l11", 0x4a00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l12", 0x4b00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l13", 0x4c00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l14", 0x4d00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l15", 0x4e00, "vdd_l15_l16_l17_l18_l19", },
+ { "l16", 0x4f00, "vdd_l15_l16_l17_l18_l19", },
+ { "l17", 0x5000, "vdd_l15_l16_l17_l18_l19", },
+ { "l18", 0x5100, "vdd_l15_l16_l17_l18_l19", },
+ { "l19", 0x5200, "vdd_l15_l16_l17_l18_l19", },
+ { }
+};
+
+static const struct spmi_regulator_data pm660l_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "l1", 0x4000, "vdd_l1_l9_l10", },
+ { "l2", 0x4100, "vdd_l2", },
+ { "l3", 0x4200, "vdd_l3_l5_l7_l8", },
+ { "l4", 0x4300, "vdd_l4_l6", },
+ { "l5", 0x4400, "vdd_l3_l5_l7_l8", },
+ { "l6", 0x4500, "vdd_l4_l6", },
+ { "l7", 0x4600, "vdd_l3_l5_l7_l8", },
+ { "l8", 0x4700, "vdd_l3_l5_l7_l8", },
+ { "l9", 0x4800, "vdd_l1_l9_l10", },
+ { "l10", 0x4900, "vdd_l1_l9_l10", },
+ { }
+};
+
+
static const struct spmi_regulator_data pm8004_regulators[] = {
{ "s2", 0x1700, "vdd_s2", },
{ "s5", 0x2000, "vdd_s5", },
@@ -1988,6 +2091,8 @@ static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8950-regulators", .data = &pm8950_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
{ .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
+ { .compatible = "qcom,pm660-regulators", .data = &pm660_regulators },
+ { .compatible = "qcom,pm660l-regulators", .data = &pm660l_regulators },
{ .compatible = "qcom,pms405-regulators", .data = &pms405_regulators },
{ }
};
diff --git a/drivers/regulator/qcom_usb_vbus-regulator.c b/drivers/regulator/qcom_usb_vbus-regulator.c
index 8ba947f3585f..457788b50572 100644
--- a/drivers/regulator/qcom_usb_vbus-regulator.c
+++ b/drivers/regulator/qcom_usb_vbus-regulator.c
@@ -63,6 +63,7 @@ static int qcom_usb_vbus_regulator_probe(struct platform_device *pdev)
qcom_usb_vbus_rdesc.enable_mask = OTG_EN;
config.dev = dev;
config.init_data = init_data;
+ config.of_node = dev->of_node;
config.regmap = regmap;
rdev = devm_regulator_register(dev, &qcom_usb_vbus_rdesc, &config);
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
new file mode 100644
index 000000000000..ee46bfbf5eee
--- /dev/null
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Marek Vasut <marex@denx.de>
+ *
+ * Based on rpi_touchscreen.c by Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+/* I2C registers of the Atmel microcontroller. */
+#define REG_ID 0x80
+#define REG_PORTA 0x81
+#define REG_PORTA_HF BIT(2)
+#define REG_PORTA_VF BIT(3)
+#define REG_PORTB 0x82
+#define REG_POWERON 0x85
+#define REG_PWM 0x86
+
+static const struct regmap_config attiny_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = REG_PWM,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int attiny_lcd_power_enable(struct regulator_dev *rdev)
+{
+ unsigned int data;
+
+ regmap_write(rdev->regmap, REG_POWERON, 1);
+ /* Wait for nPWRDWN to go low to indicate poweron is done. */
+ regmap_read_poll_timeout(rdev->regmap, REG_PORTB, data,
+ data & BIT(0), 10, 1000000);
+
+ /* Default to the same orientation as the closed source
+ * firmware used for the panel. Runtime rotation
+ * configuration will be supported using VC4's plane
+ * orientation bits.
+ */
+ regmap_write(rdev->regmap, REG_PORTA, BIT(2));
+
+ return 0;
+}
+
+static int attiny_lcd_power_disable(struct regulator_dev *rdev)
+{
+ regmap_write(rdev->regmap, REG_PWM, 0);
+ regmap_write(rdev->regmap, REG_POWERON, 0);
+ udelay(1);
+ return 0;
+}
+
+static int attiny_lcd_power_is_enabled(struct regulator_dev *rdev)
+{
+ unsigned int data;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, REG_POWERON, &data);
+ if (ret < 0)
+ return ret;
+
+ if (!(data & BIT(0)))
+ return 0;
+
+ ret = regmap_read(rdev->regmap, REG_PORTB, &data);
+ if (ret < 0)
+ return ret;
+
+ return data & BIT(0);
+}
+
+static const struct regulator_init_data attiny_regulator_default = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static const struct regulator_ops attiny_regulator_ops = {
+ .enable = attiny_lcd_power_enable,
+ .disable = attiny_lcd_power_disable,
+ .is_enabled = attiny_lcd_power_is_enabled,
+};
+
+static const struct regulator_desc attiny_regulator = {
+ .name = "tc358762-power",
+ .ops = &attiny_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static int attiny_update_status(struct backlight_device *bl)
+{
+ struct regmap *regmap = bl_get_data(bl);
+ int brightness = bl->props.brightness;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ return regmap_write(regmap, REG_PWM, brightness);
+}
+
+static int attiny_get_brightness(struct backlight_device *bl)
+{
+ struct regmap *regmap = bl_get_data(bl);
+ int ret, brightness;
+
+ ret = regmap_read(regmap, REG_PWM, &brightness);
+ if (ret)
+ return ret;
+
+ return brightness;
+}
+
+static const struct backlight_ops attiny_bl = {
+ .update_status = attiny_update_status,
+ .get_brightness = attiny_get_brightness,
+};
+
+/*
+ * I2C driver interface functions
+ */
+static int attiny_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct backlight_properties props = { };
+ struct regulator_config config = { };
+ struct backlight_device *bl;
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ unsigned int data;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(i2c, &attiny_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regmap_read(regmap, REG_ID, &data);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to read REG_ID reg: %d\n", ret);
+ return ret;
+ }
+
+ switch (data) {
+ case 0xde: /* ver 1 */
+ case 0xc3: /* ver 2 */
+ break;
+ default:
+ dev_err(&i2c->dev, "Unknown Atmel firmware revision: 0x%02x\n", data);
+ return -ENODEV;
+ }
+
+ regmap_write(regmap, REG_POWERON, 0);
+ mdelay(1);
+
+ config.dev = &i2c->dev;
+ config.regmap = regmap;
+ config.of_node = i2c->dev.of_node;
+ config.init_data = &attiny_regulator_default;
+
+ rdev = devm_regulator_register(&i2c->dev, &attiny_regulator, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&i2c->dev, "Failed to register ATTINY regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = 0xff;
+ bl = devm_backlight_device_register(&i2c->dev,
+ "7inch-touchscreen-panel-bl",
+ &i2c->dev, regmap, &attiny_bl,
+ &props);
+ if (IS_ERR(bl))
+ return PTR_ERR(bl);
+
+ bl->props.brightness = 0xff;
+
+ return 0;
+}
+
+static const struct of_device_id attiny_dt_ids[] = {
+ { .compatible = "raspberrypi,7inch-touchscreen-panel-regulator" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, attiny_dt_ids);
+
+static struct i2c_driver attiny_regulator_driver = {
+ .driver = {
+ .name = "rpi_touchscreen_attiny",
+ .of_match_table = of_match_ptr(attiny_dt_ids),
+ },
+ .probe = attiny_i2c_probe,
+};
+
+module_i2c_driver(attiny_regulator_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Regulator device driver for Raspberry Pi 7-inch touchscreen");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rt4801-regulator.c b/drivers/regulator/rt4801-regulator.c
new file mode 100644
index 000000000000..2055a9cb13ba
--- /dev/null
+++ b/drivers/regulator/rt4801-regulator.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+#define RT4801_REG_VOP 0x00
+#define RT4801_REG_VON 0x01
+#define RT4801_REG_APPS 0x03
+
+#define VOUT_MASK 0x1F
+
+#define MIN_UV 4000000
+#define STEP_UV 100000
+#define MAX_UV 6000000
+#define N_VOLTAGES ((MAX_UV - MIN_UV) / STEP_UV + 1)
+
+#define DSV_OUT_POS 0
+#define DSV_OUT_NEG 1
+#define DSV_OUT_MAX 2
+
+#define DSVP_ENABLE BIT(0)
+#define DSVN_ENABLE BIT(1)
+#define DSVALL_ENABLE (DSVP_ENABLE | DSVN_ENABLE)
+
+struct rt4801_priv {
+ struct device *dev;
+ struct gpio_descs *enable_gpios;
+ unsigned int enable_flag;
+ unsigned int volt_sel[DSV_OUT_MAX];
+};
+
+static int rt4801_set_voltage_sel(struct regulator_dev *rdev, unsigned int selector)
+{
+ struct rt4801_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev), ret;
+
+ if (priv->enable_flag & BIT(id)) {
+ ret = regulator_set_voltage_sel_regmap(rdev, selector);
+ if (ret)
+ return ret;
+ }
+
+ priv->volt_sel[id] = selector;
+ return 0;
+}
+
+static int rt4801_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct rt4801_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+
+ if (priv->enable_flag & BIT(id))
+ return regulator_get_voltage_sel_regmap(rdev);
+
+ return priv->volt_sel[id];
+}
+
+static int rt4801_enable(struct regulator_dev *rdev)
+{
+ struct rt4801_priv *priv = rdev_get_drvdata(rdev);
+ struct gpio_descs *gpios = priv->enable_gpios;
+ int id = rdev_get_id(rdev), ret;
+
+ if (gpios->ndescs <= id) {
+ dev_warn(&rdev->dev, "no dedicated gpio can control\n");
+ goto bypass_gpio;
+ }
+
+ gpiod_set_value(gpios->desc[id], 1);
+
+bypass_gpio:
+ ret = regmap_write(rdev->regmap, rdev->desc->vsel_reg, priv->volt_sel[id]);
+ if (ret)
+ return ret;
+
+ priv->enable_flag |= BIT(id);
+ return 0;
+}
+
+static int rt4801_disable(struct regulator_dev *rdev)
+{
+ struct rt4801_priv *priv = rdev_get_drvdata(rdev);
+ struct gpio_descs *gpios = priv->enable_gpios;
+ int id = rdev_get_id(rdev);
+
+ if (gpios->ndescs <= id) {
+ dev_warn(&rdev->dev, "no dedicated gpio can control\n");
+ goto bypass_gpio;
+ }
+
+ gpiod_set_value(gpios->desc[id], 0);
+
+bypass_gpio:
+ priv->enable_flag &= ~BIT(id);
+ return 0;
+}
+
+static int rt4801_is_enabled(struct regulator_dev *rdev)
+{
+ struct rt4801_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+
+ return !!(priv->enable_flag & BIT(id));
+}
+
+static const struct regulator_ops rt4801_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = rt4801_set_voltage_sel,
+ .get_voltage_sel = rt4801_get_voltage_sel,
+ .enable = rt4801_enable,
+ .disable = rt4801_disable,
+ .is_enabled = rt4801_is_enabled,
+};
+
+static const struct regulator_desc rt4801_regulator_descs[] = {
+ {
+ .name = "DSVP",
+ .ops = &rt4801_regulator_ops,
+ .of_match = of_match_ptr("DSVP"),
+ .type = REGULATOR_VOLTAGE,
+ .id = DSV_OUT_POS,
+ .min_uV = MIN_UV,
+ .uV_step = STEP_UV,
+ .n_voltages = N_VOLTAGES,
+ .owner = THIS_MODULE,
+ .vsel_reg = RT4801_REG_VOP,
+ .vsel_mask = VOUT_MASK,
+ },
+ {
+ .name = "DSVN",
+ .ops = &rt4801_regulator_ops,
+ .of_match = of_match_ptr("DSVN"),
+ .type = REGULATOR_VOLTAGE,
+ .id = DSV_OUT_NEG,
+ .min_uV = MIN_UV,
+ .uV_step = STEP_UV,
+ .n_voltages = N_VOLTAGES,
+ .owner = THIS_MODULE,
+ .vsel_reg = RT4801_REG_VON,
+ .vsel_mask = VOUT_MASK,
+ },
+};
+
+static const struct regmap_config rt4801_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT4801_REG_APPS,
+};
+
+static int rt4801_probe(struct i2c_client *i2c)
+{
+ struct rt4801_priv *priv;
+ struct regmap *regmap;
+ int i;
+
+ priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &i2c->dev;
+ /* bootloader will on, driver only reconfigure enable to all output high */
+ priv->enable_flag = DSVALL_ENABLE;
+
+ regmap = devm_regmap_init_i2c(i2c, &rt4801_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&i2c->dev, "Failed to init regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ priv->enable_gpios = devm_gpiod_get_array_optional(&i2c->dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->enable_gpios)) {
+ dev_err(&i2c->dev, "Failed to get gpios\n");
+ return PTR_ERR(priv->enable_gpios);
+ }
+
+ for (i = 0; i < DSV_OUT_MAX; i++) {
+ const struct regulator_desc *desc = rt4801_regulator_descs + i;
+ struct regulator_config config = { .dev = &i2c->dev, .driver_data = priv,
+ .regmap = regmap, };
+ struct regulator_dev *rdev;
+ unsigned int val;
+ int ret;
+
+ /* initialize volt_sel variable */
+ ret = regmap_read(regmap, desc->vsel_reg, &val);
+ if (ret)
+ return ret;
+
+ priv->volt_sel[i] = val & desc->vsel_mask;
+
+ rdev = devm_regulator_register(&i2c->dev, desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&i2c->dev, "Failed to register [%d] regulator\n", i);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused rt4801_of_id[] = {
+ { .compatible = "richtek,rt4801", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rt4801_of_id);
+
+static struct i2c_driver rt4801_driver = {
+ .driver = {
+ .name = "rt4801",
+ .of_match_table = of_match_ptr(rt4801_of_id),
+ },
+ .probe_new = rt4801_probe,
+};
+module_i2c_driver(rt4801_driver);
+
+MODULE_AUTHOR("ChiYuan Hwang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT4801 Display Bias Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c
new file mode 100644
index 000000000000..852fb2596ffd
--- /dev/null
+++ b/drivers/regulator/rtmv20-regulator.c
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+#define RTMV20_REG_DEVINFO 0x00
+#define RTMV20_REG_PULSEDELAY 0x01
+#define RTMV20_REG_PULSEWIDTH 0x03
+#define RTMV20_REG_LDCTRL1 0x05
+#define RTMV20_REG_ESPULSEWIDTH 0x06
+#define RTMV20_REG_ESLDCTRL1 0x08
+#define RTMV20_REG_LBP 0x0A
+#define RTMV20_REG_LDCTRL2 0x0B
+#define RTMV20_REG_FSIN1CTRL1 0x0D
+#define RTMV20_REG_FSIN1CTRL3 0x0F
+#define RTMV20_REG_FSIN2CTRL1 0x10
+#define RTMV20_REG_FSIN2CTRL3 0x12
+#define RTMV20_REG_ENCTRL 0x13
+#define RTMV20_REG_STRBVSYNDLYL 0x29
+#define RTMV20_REG_LDIRQ 0x30
+#define RTMV20_REG_LDSTAT 0x40
+#define RTMV20_REG_LDMASK 0x50
+
+#define RTMV20_VID_MASK GENMASK(7, 4)
+#define RICHTEK_VID 0x80
+#define RTMV20_LDCURR_MASK GENMASK(7, 0)
+#define RTMV20_DELAY_MASK GENMASK(9, 0)
+#define RTMV20_WIDTH_MASK GENMASK(13, 0)
+#define RTMV20_WIDTH2_MASK GENMASK(7, 0)
+#define RTMV20_LBPLVL_MASK GENMASK(3, 0)
+#define RTMV20_LBPEN_MASK BIT(7)
+#define RTMV20_STROBEPOL_MASK BIT(1)
+#define RTMV20_VSYNPOL_MASK BIT(1)
+#define RTMV20_FSINEN_MASK BIT(7)
+#define RTMV20_ESEN_MASK BIT(6)
+#define RTMV20_FSINOUT_MASK BIT(2)
+#define LDENABLE_MASK (BIT(3) | BIT(0))
+
+#define OTPEVT_MASK BIT(4)
+#define SHORTEVT_MASK BIT(3)
+#define OPENEVT_MASK BIT(2)
+#define LBPEVT_MASK BIT(1)
+#define OCPEVT_MASK BIT(0)
+#define FAILEVT_MASK (SHORTEVT_MASK | OPENEVT_MASK | LBPEVT_MASK)
+
+#define RTMV20_LSW_MINUA 0
+#define RTMV20_LSW_MAXUA 6000000
+#define RTMV20_LSW_STEPUA 30000
+
+#define RTMV20_LSW_DEFAULTUA 3000000
+
+#define RTMV20_I2CRDY_TIMEUS 200
+#define RTMV20_CSRDY_TIMEUS 2000
+
+struct rtmv20_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *enable_gpio;
+ struct regulator_dev *rdev;
+};
+
+static int rtmv20_lsw_enable(struct regulator_dev *rdev)
+{
+ struct rtmv20_priv *priv = rdev_get_drvdata(rdev);
+ int ret;
+
+ gpiod_set_value(priv->enable_gpio, 1);
+
+ /* Wait for I2C can be accessed */
+ usleep_range(RTMV20_I2CRDY_TIMEUS, RTMV20_I2CRDY_TIMEUS + 100);
+
+ /* HW re-enable, disable cache only and sync regcache here */
+ regcache_cache_only(priv->regmap, false);
+ ret = regcache_sync(priv->regmap);
+ if (ret)
+ return ret;
+
+ return regulator_enable_regmap(rdev);
+}
+
+static int rtmv20_lsw_disable(struct regulator_dev *rdev)
+{
+ struct rtmv20_priv *priv = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = regulator_disable_regmap(rdev);
+ if (ret)
+ return ret;
+
+ /* Mark the regcache as dirty and cache only before HW disabled */
+ regcache_cache_only(priv->regmap, true);
+ regcache_mark_dirty(priv->regmap);
+
+ gpiod_set_value(priv->enable_gpio, 0);
+
+ return 0;
+}
+
+static const struct regulator_ops rtmv20_regulator_ops = {
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+ .enable = rtmv20_lsw_enable,
+ .disable = rtmv20_lsw_disable,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_desc rtmv20_lsw_desc = {
+ .name = "rtmv20,lsw",
+ .of_match = of_match_ptr("lsw"),
+ .type = REGULATOR_CURRENT,
+ .owner = THIS_MODULE,
+ .ops = &rtmv20_regulator_ops,
+ .csel_reg = RTMV20_REG_LDCTRL1,
+ .csel_mask = RTMV20_LDCURR_MASK,
+ .enable_reg = RTMV20_REG_ENCTRL,
+ .enable_mask = LDENABLE_MASK,
+ .enable_time = RTMV20_CSRDY_TIMEUS,
+};
+
+static irqreturn_t rtmv20_irq_handler(int irq, void *data)
+{
+ struct rtmv20_priv *priv = data;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(priv->regmap, RTMV20_REG_LDIRQ, &val);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get irq flags\n");
+ return IRQ_NONE;
+ }
+
+ if (val & OTPEVT_MASK)
+ regulator_notifier_call_chain(priv->rdev, REGULATOR_EVENT_OVER_TEMP, NULL);
+
+ if (val & OCPEVT_MASK)
+ regulator_notifier_call_chain(priv->rdev, REGULATOR_EVENT_OVER_CURRENT, NULL);
+
+ if (val & FAILEVT_MASK)
+ regulator_notifier_call_chain(priv->rdev, REGULATOR_EVENT_FAIL, NULL);
+
+ return IRQ_HANDLED;
+}
+
+static u32 clamp_to_selector(u32 val, u32 min, u32 max, u32 step)
+{
+ u32 retval = clamp_val(val, min, max);
+
+ return (retval - min) / step;
+}
+
+static int rtmv20_properties_init(struct rtmv20_priv *priv)
+{
+ const struct {
+ const char *name;
+ u32 def;
+ u32 min;
+ u32 max;
+ u32 step;
+ u32 addr;
+ u32 mask;
+ } props[] = {
+ { "richtek,ld-pulse-delay-us", 0, 0, 100000, 100, RTMV20_REG_PULSEDELAY,
+ RTMV20_DELAY_MASK },
+ { "richtek,ld-pulse-width-us", 1200, 0, 10000, 1, RTMV20_REG_PULSEWIDTH,
+ RTMV20_WIDTH_MASK },
+ { "richtek,fsin1-delay-us", 23000, 0, 100000, 100, RTMV20_REG_FSIN1CTRL1,
+ RTMV20_DELAY_MASK },
+ { "richtek,fsin1-width-us", 160, 40, 10000, 40, RTMV20_REG_FSIN1CTRL3,
+ RTMV20_WIDTH2_MASK },
+ { "richtek,fsin2-delay-us", 23000, 0, 100000, 100, RTMV20_REG_FSIN2CTRL1,
+ RTMV20_DELAY_MASK },
+ { "richtek,fsin2-width-us", 160, 40, 10000, 40, RTMV20_REG_FSIN2CTRL3,
+ RTMV20_WIDTH2_MASK },
+ { "richtek,es-pulse-width-us", 1200, 0, 10000, 1, RTMV20_REG_ESPULSEWIDTH,
+ RTMV20_WIDTH_MASK },
+ { "richtek,es-ld-current-microamp", 3000000, 0, 6000000, 30000,
+ RTMV20_REG_ESLDCTRL1, RTMV20_LDCURR_MASK },
+ { "richtek,lbp-level-microvolt", 2700000, 2400000, 3700000, 100000, RTMV20_REG_LBP,
+ RTMV20_LBPLVL_MASK },
+ { "richtek,lbp-enable", 0, 0, 1, 1, RTMV20_REG_LBP, RTMV20_LBPEN_MASK },
+ { "richtek,strobe-polarity-high", 1, 0, 1, 1, RTMV20_REG_LDCTRL2,
+ RTMV20_STROBEPOL_MASK },
+ { "richtek,vsync-polarity-high", 1, 0, 1, 1, RTMV20_REG_LDCTRL2,
+ RTMV20_VSYNPOL_MASK },
+ { "richtek,fsin-enable", 0, 0, 1, 1, RTMV20_REG_ENCTRL, RTMV20_FSINEN_MASK },
+ { "richtek,fsin-output", 0, 0, 1, 1, RTMV20_REG_ENCTRL, RTMV20_FSINOUT_MASK },
+ { "richtek,es-enable", 0, 0, 1, 1, RTMV20_REG_ENCTRL, RTMV20_ESEN_MASK },
+ };
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ __be16 bval16;
+ u16 val16;
+ u32 temp;
+ int significant_bit = fls(props[i].mask);
+ int shift = ffs(props[i].mask) - 1;
+
+ if (props[i].max > 1) {
+ ret = device_property_read_u32(priv->dev, props[i].name, &temp);
+ if (ret)
+ temp = props[i].def;
+ } else
+ temp = device_property_read_bool(priv->dev, props[i].name);
+
+ temp = clamp_to_selector(temp, props[i].min, props[i].max, props[i].step);
+
+ /* If significant bit is over 8, two byte access, others one */
+ if (significant_bit > 8) {
+ ret = regmap_raw_read(priv->regmap, props[i].addr, &bval16, sizeof(bval16));
+ if (ret)
+ return ret;
+
+ val16 = be16_to_cpu(bval16);
+ val16 &= ~props[i].mask;
+ val16 |= (temp << shift);
+ bval16 = cpu_to_be16(val16);
+
+ ret = regmap_raw_write(priv->regmap, props[i].addr, &bval16,
+ sizeof(bval16));
+ } else {
+ ret = regmap_update_bits(priv->regmap, props[i].addr, props[i].mask,
+ temp << shift);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtmv20_check_chip_exist(struct rtmv20_priv *priv)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(priv->regmap, RTMV20_REG_DEVINFO, &val);
+ if (ret)
+ return ret;
+
+ if ((val & RTMV20_VID_MASK) != RICHTEK_VID)
+ return -ENODEV;
+
+ return 0;
+}
+
+static bool rtmv20_is_accessible_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RTMV20_REG_DEVINFO ... RTMV20_REG_STRBVSYNDLYL:
+ case RTMV20_REG_LDIRQ:
+ case RTMV20_REG_LDSTAT:
+ case RTMV20_REG_LDMASK:
+ return true;
+ }
+ return false;
+}
+
+static bool rtmv20_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg == RTMV20_REG_LDIRQ || reg == RTMV20_REG_LDSTAT)
+ return true;
+ return false;
+}
+
+static const struct regmap_config rtmv20_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = RTMV20_REG_LDMASK,
+
+ .writeable_reg = rtmv20_is_accessible_reg,
+ .readable_reg = rtmv20_is_accessible_reg,
+ .volatile_reg = rtmv20_is_volatile_reg,
+};
+
+static int rtmv20_probe(struct i2c_client *i2c)
+{
+ struct rtmv20_priv *priv;
+ struct regulator_config config = {};
+ int ret;
+
+ priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &i2c->dev;
+
+ /* Before regmap register, configure HW enable to make I2C accessible */
+ priv->enable_gpio = devm_gpiod_get(&i2c->dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->enable_gpio)) {
+ dev_err(&i2c->dev, "Failed to get enable gpio\n");
+ return PTR_ERR(priv->enable_gpio);
+ }
+
+ /* Wait for I2C can be accessed */
+ usleep_range(RTMV20_I2CRDY_TIMEUS, RTMV20_I2CRDY_TIMEUS + 100);
+
+ priv->regmap = devm_regmap_init_i2c(i2c, &rtmv20_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(&i2c->dev, "Failed to allocate register map\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ ret = rtmv20_check_chip_exist(priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Chip vendor info is not matched\n");
+ return ret;
+ }
+
+ ret = rtmv20_properties_init(priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to init properties\n");
+ return ret;
+ }
+
+ /*
+ * keep in shutdown mode to minimize the current consumption
+ * and also mark regcache as dirty
+ */
+ regcache_cache_only(priv->regmap, true);
+ regcache_mark_dirty(priv->regmap);
+ gpiod_set_value(priv->enable_gpio, 0);
+
+ config.dev = &i2c->dev;
+ config.regmap = priv->regmap;
+ config.driver_data = priv;
+ priv->rdev = devm_regulator_register(&i2c->dev, &rtmv20_lsw_desc, &config);
+ if (IS_ERR(priv->rdev)) {
+ dev_err(&i2c->dev, "Failed to register regulator\n");
+ return PTR_ERR(priv->rdev);
+ }
+
+ /* Unmask all events before IRQ registered */
+ ret = regmap_write(priv->regmap, RTMV20_REG_LDMASK, 0);
+ if (ret)
+ return ret;
+
+ return devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL, rtmv20_irq_handler,
+ IRQF_ONESHOT, dev_name(&i2c->dev), priv);
+}
+
+static int __maybe_unused rtmv20_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ /*
+ * When system suspend, disable irq to prevent interrupt trigger
+ * during I2C bus suspend
+ */
+ disable_irq(i2c->irq);
+ if (device_may_wakeup(dev))
+ enable_irq_wake(i2c->irq);
+
+ return 0;
+}
+
+static int __maybe_unused rtmv20_resume(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ /* Enable irq after I2C bus already resume */
+ enable_irq(i2c->irq);
+ if (device_may_wakeup(dev))
+ disable_irq_wake(i2c->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rtmv20_pm, rtmv20_suspend, rtmv20_resume);
+
+static const struct of_device_id __maybe_unused rtmv20_of_id[] = {
+ { .compatible = "richtek,rtmv20", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rtmv20_of_id);
+
+static struct i2c_driver rtmv20_driver = {
+ .driver = {
+ .name = "rtmv20",
+ .of_match_table = of_match_ptr(rtmv20_of_id),
+ .pm = &rtmv20_pm,
+ },
+ .probe_new = rtmv20_probe,
+};
+module_i2c_driver(rtmv20_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RTMV20 Regulator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 4abd3ed31f60..3fa472127e9a 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -1000,18 +1000,7 @@ static struct platform_driver s5m8767_pmic_driver = {
.probe = s5m8767_pmic_probe,
.id_table = s5m8767_pmic_id,
};
-
-static int __init s5m8767_pmic_init(void)
-{
- return platform_driver_register(&s5m8767_pmic_driver);
-}
-subsys_initcall(s5m8767_pmic_init);
-
-static void __exit s5m8767_pmic_exit(void)
-{
- platform_driver_unregister(&s5m8767_pmic_driver);
-}
-module_exit(s5m8767_pmic_exit);
+module_platform_driver(s5m8767_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index 87b020d0b958..75a941fb3c2b 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -386,10 +386,8 @@ static irqreturn_t slg51000_irq_handler(int irq, void *data)
for (i = 0; i < SLG51000_MAX_REGULATORS; i++) {
if (!(evt[i][R2] & SLG51000_IRQ_ILIM_FLAG_MASK) &&
(evt[i][R0] & SLG51000_EVT_ILIM_FLAG_MASK)) {
- regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_CURRENT, NULL);
- regulator_unlock(chip->rdev[i]);
if (evt[i][R1] & SLG51000_STA_ILIM_FLAG_MASK)
dev_warn(chip->dev,
@@ -403,10 +401,8 @@ static irqreturn_t slg51000_irq_handler(int irq, void *data)
for (i = 0; i < SLG51000_MAX_REGULATORS; i++) {
if (!(evt[i][R1] & SLG51000_STA_ILIM_FLAG_MASK) &&
(evt[i][R1] & SLG51000_STA_VOUT_OK_FLAG_MASK)) {
- regulator_lock(chip->rdev[i]);
regulator_notifier_call_chain(chip->rdev[i],
REGULATOR_EVENT_OVER_TEMP, NULL);
- regulator_unlock(chip->rdev[i]);
}
}
handled = IRQ_HANDLED;
diff --git a/drivers/regulator/stm32-booster.c b/drivers/regulator/stm32-booster.c
index 03f162ffd144..3136ea8a35d5 100644
--- a/drivers/regulator/stm32-booster.c
+++ b/drivers/regulator/stm32-booster.c
@@ -101,7 +101,7 @@ static int stm32_booster_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id stm32_booster_of_match[] = {
+static const struct of_device_id __maybe_unused stm32_booster_of_match[] = {
{
.compatible = "st,stm32h7-booster",
.data = (void *)&stm32h7_booster_desc
diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c
index e0e627b0106e..2a42acb7c24e 100644
--- a/drivers/regulator/stm32-pwr.c
+++ b/drivers/regulator/stm32-pwr.c
@@ -166,7 +166,7 @@ static int stm32_pwr_regulator_probe(struct platform_device *pdev)
return ret;
}
-static const struct of_device_id stm32_pwr_of_match[] = {
+static const struct of_device_id __maybe_unused stm32_pwr_of_match[] = {
{ .compatible = "st,stm32mp1,pwr-reg", },
{},
};
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index 992bc18101ef..161622ea7259 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -284,7 +284,7 @@ static const struct dev_pm_ops stm32_vrefbuf_pm_ops = {
NULL)
};
-static const struct of_device_id stm32_vrefbuf_of_match[] = {
+static const struct of_device_id __maybe_unused stm32_vrefbuf_of_match[] = {
{ .compatible = "st,stm32-vrefbuf", },
{},
};
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index 73e0ab2baeaa..cf10fdb72e32 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -505,15 +505,11 @@ static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data)
{
struct regulator_dev *rdev = (struct regulator_dev *)data;
- regulator_lock(rdev);
-
/* Send an overcurrent notification */
regulator_notifier_call_chain(rdev,
REGULATOR_EVENT_OVER_CURRENT,
NULL);
- regulator_unlock(rdev);
-
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c
index 6dc2316daad3..127ab43add49 100644
--- a/drivers/regulator/stw481x-vmmc.c
+++ b/drivers/regulator/stw481x-vmmc.c
@@ -27,7 +27,7 @@ static const unsigned int stw481x_vmmc_voltages[] = {
3300000,
};
-static struct regulator_ops stw481x_vmmc_ops = {
+static const struct regulator_ops stw481x_vmmc_ops = {
.list_voltage = regulator_list_voltage_table,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -36,7 +36,7 @@ static struct regulator_ops stw481x_vmmc_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
-static struct regulator_desc vmmc_regulator = {
+static const struct regulator_desc vmmc_regulator = {
.name = "VMMC",
.id = 0,
.ops = &stw481x_vmmc_ops,
diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c
index 2222e739e62b..c119f85259a5 100644
--- a/drivers/regulator/sy8106a-regulator.c
+++ b/drivers/regulator/sy8106a-regulator.c
@@ -123,7 +123,7 @@ static int sy8106a_i2c_probe(struct i2c_client *i2c)
return 0;
}
-static const struct of_device_id sy8106a_i2c_of_match[] = {
+static const struct of_device_id __maybe_unused sy8106a_i2c_of_match[] = {
{ .compatible = "silergy,sy8106a" },
{ },
};
diff --git a/drivers/regulator/sy8827n.c b/drivers/regulator/sy8827n.c
index b207217f74d8..52e8c17afe24 100644
--- a/drivers/regulator/sy8827n.c
+++ b/drivers/regulator/sy8827n.c
@@ -156,6 +156,7 @@ static int sy8827n_i2c_probe(struct i2c_client *client)
return ret;
}
+#ifdef CONFIG_OF
static const struct of_device_id sy8827n_dt_ids[] = {
{
.compatible = "silergy,sy8827n",
@@ -163,6 +164,7 @@ static const struct of_device_id sy8827n_dt_ids[] = {
{ }
};
MODULE_DEVICE_TABLE(of, sy8827n_dt_ids);
+#endif
static const struct i2c_device_id sy8827n_id[] = {
{ "sy8827n", },
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index af9abcd9c166..3e60bff76194 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -619,7 +619,7 @@ check_abb:
return 0;
}
-static struct regulator_ops ti_abb_reg_ops = {
+static const struct regulator_ops ti_abb_reg_ops = {
.list_voltage = regulator_list_voltage_table,
.set_voltage_sel = ti_abb_set_voltage_sel,
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index c139890c1514..a15e415e61d5 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -108,7 +108,7 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
return ret;
}
-static struct regulator_ops tps51632_dcdc_ops = {
+static const struct regulator_ops tps51632_dcdc_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index f8939af0bd2c..a6469fe05635 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -26,7 +26,7 @@ static const unsigned int tps6105x_voltages[] = {
5000000, /* There is an additional 5V */
};
-static struct regulator_ops tps6105x_regulator_ops = {
+static const struct regulator_ops tps6105x_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index f6a6d36a6533..315cd5daf480 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -233,7 +233,7 @@ static unsigned int tps62360_get_mode(struct regulator_dev *rdev)
REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
}
-static struct regulator_ops tps62360_dcdc_ops = {
+static const struct regulator_ops tps62360_dcdc_ops = {
.get_voltage_sel = tps62360_dcdc_get_voltage_sel,
.set_voltage_sel = tps62360_dcdc_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 795d459ff3cf..f25806531c7e 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -316,7 +316,7 @@ static int tps_65023_probe(struct i2c_client *client,
return 0;
}
-static const struct of_device_id tps65023_of_match[] = {
+static const struct of_device_id __maybe_unused tps65023_of_match[] = {
{ .compatible = "ti,tps65020", .data = &tps65020_drv_data},
{ .compatible = "ti,tps65021", .data = &tps65021_drv_data},
{ .compatible = "ti,tps65023", .data = &tps65023_drv_data},
diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
index 23528475a962..070c956216b0 100644
--- a/drivers/regulator/tps65086-regulator.c
+++ b/drivers/regulator/tps65086-regulator.c
@@ -101,7 +101,7 @@ static const struct linear_range tps65086_ldoa23_ranges[] = {
};
/* Operations permitted on regulators */
-static struct regulator_ops reg_ops = {
+static const struct regulator_ops reg_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -112,7 +112,7 @@ static struct regulator_ops reg_ops = {
};
/* Operations permitted on load switches */
-static struct regulator_ops switch_ops = {
+static const struct regulator_ops switch_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index f0b660e9f15f..1d2e04f452d4 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -47,7 +47,7 @@ struct tps65090_regulator {
int overcurrent_wait;
};
-static struct regulator_ops tps65090_ext_control_ops = {
+static const struct regulator_ops tps65090_ext_control_ops = {
};
/**
@@ -167,19 +167,19 @@ err:
return ret;
}
-static struct regulator_ops tps65090_reg_control_ops = {
+static const struct regulator_ops tps65090_reg_control_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
-static struct regulator_ops tps65090_fet_control_ops = {
+static const struct regulator_ops tps65090_fet_control_ops = {
.enable = tps65090_fet_enable,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
-static struct regulator_ops tps65090_ldo_ops = {
+static const struct regulator_ops tps65090_ldo_ops = {
};
#define tps65090_REG_DESC(_id, _sname, _en_reg, _en_bits, _nvolt, _volt, _ops) \
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 09e994e1f9a9..18bf4b885b08 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -60,7 +60,7 @@ struct tps6586x_regulator {
int enable_reg[2];
};
-static struct regulator_ops tps6586x_rw_regulator_ops = {
+static const struct regulator_ops tps6586x_rw_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -71,7 +71,7 @@ static struct regulator_ops tps6586x_rw_regulator_ops = {
.disable = regulator_disable_regmap,
};
-static struct regulator_ops tps6586x_rw_linear_regulator_ops = {
+static const struct regulator_ops tps6586x_rw_linear_regulator_ops = {
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -81,7 +81,7 @@ static struct regulator_ops tps6586x_rw_linear_regulator_ops = {
.disable = regulator_disable_regmap,
};
-static struct regulator_ops tps6586x_ro_regulator_ops = {
+static const struct regulator_ops tps6586x_ro_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -91,7 +91,7 @@ static struct regulator_ops tps6586x_ro_regulator_ops = {
.disable = regulator_disable_regmap,
};
-static struct regulator_ops tps6586x_sys_regulator_ops = {
+static const struct regulator_ops tps6586x_sys_regulator_ops = {
};
static const unsigned int tps6586x_ldo0_voltages[] = {
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 4eb5b19d2344..1d5b0a1b86f7 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -390,8 +390,8 @@ static int tps65911_get_ctrl_register(int id)
static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- struct tps65910 *mfd = pmic->mfd;
- int reg, value, id = rdev_get_id(dev);
+ struct regmap *regmap = rdev_get_regmap(dev);
+ int reg, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
@@ -399,14 +399,14 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_NORMAL:
- return tps65910_reg_update_bits(pmic->mfd, reg,
- LDO_ST_MODE_BIT | LDO_ST_ON_BIT,
- LDO_ST_ON_BIT);
+ return regmap_update_bits(regmap, reg,
+ LDO_ST_MODE_BIT | LDO_ST_ON_BIT,
+ LDO_ST_ON_BIT);
case REGULATOR_MODE_IDLE:
- value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
- return tps65910_reg_set_bits(mfd, reg, value);
+ return regmap_set_bits(regmap, reg,
+ LDO_ST_ON_BIT | LDO_ST_MODE_BIT);
case REGULATOR_MODE_STANDBY:
- return tps65910_reg_clear_bits(mfd, reg, LDO_ST_ON_BIT);
+ return regmap_clear_bits(regmap, reg, LDO_ST_ON_BIT);
}
return -EINVAL;
@@ -415,13 +415,14 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
static unsigned int tps65910_get_mode(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct regmap *regmap = rdev_get_regmap(dev);
int ret, reg, value, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
return reg;
- ret = tps65910_reg_read(pmic->mfd, reg, &value);
+ ret = regmap_read(regmap, reg, &value);
if (ret < 0)
return ret;
@@ -435,20 +436,20 @@ static unsigned int tps65910_get_mode(struct regulator_dev *dev)
static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
{
- struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct regmap *regmap = rdev_get_regmap(dev);
int ret, id = rdev_get_id(dev);
int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0;
switch (id) {
case TPS65910_REG_VDD1:
- ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1_OP, &opvsel);
+ ret = regmap_read(regmap, TPS65910_VDD1_OP, &opvsel);
if (ret < 0)
return ret;
- ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1, &mult);
+ ret = regmap_read(regmap, TPS65910_VDD1, &mult);
if (ret < 0)
return ret;
mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
- ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1_SR, &srvsel);
+ ret = regmap_read(regmap, TPS65910_VDD1_SR, &srvsel);
if (ret < 0)
return ret;
sr = opvsel & VDD1_OP_CMD_MASK;
@@ -457,14 +458,14 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
vselmax = 75;
break;
case TPS65910_REG_VDD2:
- ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2_OP, &opvsel);
+ ret = regmap_read(regmap, TPS65910_VDD2_OP, &opvsel);
if (ret < 0)
return ret;
- ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2, &mult);
+ ret = regmap_read(regmap, TPS65910_VDD2, &mult);
if (ret < 0)
return ret;
mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
- ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2_SR, &srvsel);
+ ret = regmap_read(regmap, TPS65910_VDD2_SR, &srvsel);
if (ret < 0)
return ret;
sr = opvsel & VDD2_OP_CMD_MASK;
@@ -473,12 +474,10 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
vselmax = 75;
break;
case TPS65911_REG_VDDCTRL:
- ret = tps65910_reg_read(pmic->mfd, TPS65911_VDDCTRL_OP,
- &opvsel);
+ ret = regmap_read(regmap, TPS65911_VDDCTRL_OP, &opvsel);
if (ret < 0)
return ret;
- ret = tps65910_reg_read(pmic->mfd, TPS65911_VDDCTRL_SR,
- &srvsel);
+ ret = regmap_read(regmap, TPS65911_VDDCTRL_SR, &srvsel);
if (ret < 0)
return ret;
sr = opvsel & VDDCTRL_OP_CMD_MASK;
@@ -514,13 +513,14 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
static int tps65910_get_voltage_sel(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct regmap *regmap = rdev_get_regmap(dev);
int ret, reg, value, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
return reg;
- ret = tps65910_reg_read(pmic->mfd, reg, &value);
+ ret = regmap_read(regmap, reg, &value);
if (ret < 0)
return ret;
@@ -556,12 +556,13 @@ static int tps65910_get_voltage_vdd3(struct regulator_dev *dev)
static int tps65911_get_voltage_sel(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct regmap *regmap = rdev_get_regmap(dev);
int ret, id = rdev_get_id(dev);
unsigned int value, reg;
reg = pmic->get_ctrl_reg(id);
- ret = tps65910_reg_read(pmic->mfd, reg, &value);
+ ret = regmap_read(regmap, reg, &value);
if (ret < 0)
return ret;
@@ -594,7 +595,7 @@ static int tps65911_get_voltage_sel(struct regulator_dev *dev)
static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
unsigned selector)
{
- struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct regmap *regmap = rdev_get_regmap(dev);
int id = rdev_get_id(dev), vsel;
int dcdc_mult = 0;
@@ -605,10 +606,9 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
dcdc_mult--;
vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
- tps65910_reg_update_bits(pmic->mfd, TPS65910_VDD1,
- VDD1_VGAIN_SEL_MASK,
- dcdc_mult << VDD1_VGAIN_SEL_SHIFT);
- tps65910_reg_write(pmic->mfd, TPS65910_VDD1_OP, vsel);
+ regmap_update_bits(regmap, TPS65910_VDD1, VDD1_VGAIN_SEL_MASK,
+ dcdc_mult << VDD1_VGAIN_SEL_SHIFT);
+ regmap_write(regmap, TPS65910_VDD1_OP, vsel);
break;
case TPS65910_REG_VDD2:
dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
@@ -616,14 +616,14 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
dcdc_mult--;
vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
- tps65910_reg_update_bits(pmic->mfd, TPS65910_VDD2,
- VDD1_VGAIN_SEL_MASK,
- dcdc_mult << VDD2_VGAIN_SEL_SHIFT);
- tps65910_reg_write(pmic->mfd, TPS65910_VDD2_OP, vsel);
+ regmap_update_bits(regmap, TPS65910_VDD2, VDD1_VGAIN_SEL_MASK,
+ dcdc_mult << VDD2_VGAIN_SEL_SHIFT);
+ regmap_write(regmap, TPS65910_VDD2_OP, vsel);
break;
case TPS65911_REG_VDDCTRL:
vsel = selector + 3;
- tps65910_reg_write(pmic->mfd, TPS65911_VDDCTRL_OP, vsel);
+ regmap_write(regmap, TPS65911_VDDCTRL_OP, vsel);
+ break;
}
return 0;
@@ -633,6 +633,7 @@ static int tps65910_set_voltage_sel(struct regulator_dev *dev,
unsigned selector)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct regmap *regmap = rdev_get_regmap(dev);
int reg, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
@@ -649,11 +650,11 @@ static int tps65910_set_voltage_sel(struct regulator_dev *dev,
case TPS65910_REG_VAUX2:
case TPS65910_REG_VAUX33:
case TPS65910_REG_VMMC:
- return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK,
- selector << LDO_SEL_SHIFT);
+ return regmap_update_bits(regmap, reg, LDO_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
case TPS65910_REG_VBB:
- return tps65910_reg_update_bits(pmic->mfd, reg, BBCH_BBSEL_MASK,
- selector << BBCH_BBSEL_SHIFT);
+ return regmap_update_bits(regmap, reg, BBCH_BBSEL_MASK,
+ selector << BBCH_BBSEL_SHIFT);
}
return -EINVAL;
@@ -663,6 +664,7 @@ static int tps65911_set_voltage_sel(struct regulator_dev *dev,
unsigned selector)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+ struct regmap *regmap = rdev_get_regmap(dev);
int reg, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
@@ -673,21 +675,21 @@ static int tps65911_set_voltage_sel(struct regulator_dev *dev,
case TPS65911_REG_LDO1:
case TPS65911_REG_LDO2:
case TPS65911_REG_LDO4:
- return tps65910_reg_update_bits(pmic->mfd, reg, LDO1_SEL_MASK,
- selector << LDO_SEL_SHIFT);
+ return regmap_update_bits(regmap, reg, LDO1_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
case TPS65911_REG_LDO3:
case TPS65911_REG_LDO5:
case TPS65911_REG_LDO6:
case TPS65911_REG_LDO7:
case TPS65911_REG_LDO8:
- return tps65910_reg_update_bits(pmic->mfd, reg, LDO3_SEL_MASK,
- selector << LDO_SEL_SHIFT);
+ return regmap_update_bits(regmap, reg, LDO3_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
case TPS65910_REG_VIO:
- return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK,
- selector << LDO_SEL_SHIFT);
+ return regmap_update_bits(regmap, reg, LDO_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
case TPS65910_REG_VBB:
- return tps65910_reg_update_bits(pmic->mfd, reg, BBCH_BBSEL_MASK,
- selector << BBCH_BBSEL_SHIFT);
+ return regmap_update_bits(regmap, reg, BBCH_BBSEL_MASK,
+ selector << BBCH_BBSEL_SHIFT);
}
return -EINVAL;
@@ -757,7 +759,7 @@ static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
}
/* Regulator ops (except VRTC) */
-static struct regulator_ops tps65910_ops_dcdc = {
+static const struct regulator_ops tps65910_ops_dcdc = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -770,7 +772,7 @@ static struct regulator_ops tps65910_ops_dcdc = {
.map_voltage = regulator_map_voltage_ascend,
};
-static struct regulator_ops tps65910_ops_vdd3 = {
+static const struct regulator_ops tps65910_ops_vdd3 = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -781,7 +783,7 @@ static struct regulator_ops tps65910_ops_vdd3 = {
.map_voltage = regulator_map_voltage_ascend,
};
-static struct regulator_ops tps65910_ops_vbb = {
+static const struct regulator_ops tps65910_ops_vbb = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -793,7 +795,7 @@ static struct regulator_ops tps65910_ops_vbb = {
.map_voltage = regulator_map_voltage_iterate,
};
-static struct regulator_ops tps65910_ops = {
+static const struct regulator_ops tps65910_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -805,7 +807,7 @@ static struct regulator_ops tps65910_ops = {
.map_voltage = regulator_map_voltage_ascend,
};
-static struct regulator_ops tps65911_ops = {
+static const struct regulator_ops tps65911_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -850,10 +852,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* External EN1 control */
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1)
- ret = tps65910_reg_set_bits(mfd,
+ ret = regmap_set_bits(mfd->regmap,
TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_reg_clear_bits(mfd,
+ ret = regmap_clear_bits(mfd->regmap,
TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -863,10 +865,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* External EN2 control */
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2)
- ret = tps65910_reg_set_bits(mfd,
+ ret = regmap_set_bits(mfd->regmap,
TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_reg_clear_bits(mfd,
+ ret = regmap_clear_bits(mfd->regmap,
TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -878,10 +880,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
if ((tps65910_chip_id(mfd) == TPS65910) &&
(id >= TPS65910_REG_VDIG1)) {
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3)
- ret = tps65910_reg_set_bits(mfd,
+ ret = regmap_set_bits(mfd->regmap,
TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_reg_clear_bits(mfd,
+ ret = regmap_clear_bits(mfd->regmap,
TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -893,10 +895,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* Return if no external control is selected */
if (!(ext_sleep_config & EXT_SLEEP_CONTROL)) {
/* Clear all sleep controls */
- ret = tps65910_reg_clear_bits(mfd,
+ ret = regmap_clear_bits(mfd->regmap,
TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
if (!ret)
- ret = tps65910_reg_clear_bits(mfd,
+ ret = regmap_clear_bits(mfd->regmap,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
if (ret < 0)
dev_err(mfd->dev,
@@ -917,39 +919,38 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
int sr_reg_add = pmic->get_ctrl_reg(id) + 2;
int opvsel, srvsel;
- ret = tps65910_reg_read(pmic->mfd, op_reg_add, &opvsel);
+ ret = regmap_read(mfd->regmap, op_reg_add, &opvsel);
if (ret < 0)
return ret;
- ret = tps65910_reg_read(pmic->mfd, sr_reg_add, &srvsel);
+ ret = regmap_read(mfd->regmap, sr_reg_add, &srvsel);
if (ret < 0)
return ret;
if (opvsel & VDD1_OP_CMD_MASK) {
u8 reg_val = srvsel & VDD1_OP_SEL_MASK;
- ret = tps65910_reg_write(pmic->mfd, op_reg_add,
- reg_val);
+ ret = regmap_write(mfd->regmap, op_reg_add, reg_val);
if (ret < 0) {
dev_err(mfd->dev,
"Error in configuring op register\n");
return ret;
}
}
- ret = tps65910_reg_write(pmic->mfd, sr_reg_add, 0);
+ ret = regmap_write(mfd->regmap, sr_reg_add, 0);
if (ret < 0) {
dev_err(mfd->dev, "Error in setting sr register\n");
return ret;
}
}
- ret = tps65910_reg_clear_bits(mfd,
+ ret = regmap_clear_bits(mfd->regmap,
TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
if (!ret) {
if (ext_sleep_config & TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
- ret = tps65910_reg_set_bits(mfd,
+ ret = regmap_set_bits(mfd->regmap,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
else
- ret = tps65910_reg_clear_bits(mfd,
+ ret = regmap_clear_bits(mfd->regmap,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
}
if (ret < 0)
@@ -1097,7 +1098,7 @@ static int tps65910_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmic);
/* Give control of all register to control port */
- err = tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
+ err = regmap_set_bits(pmic->mfd->regmap, TPS65910_DEVCTRL,
DEVCTRL_SR_CTL_I2C_SEL_MASK);
if (err < 0)
return err;
@@ -1113,7 +1114,7 @@ static int tps65910_probe(struct platform_device *pdev)
* voltage level can go higher than expected or crash
* Workaround: use no synchronization of DCDC clocks
*/
- tps65910_reg_clear_bits(pmic->mfd, TPS65910_DCDCCTRL,
+ regmap_clear_bits(pmic->mfd->regmap, TPS65910_DCDCCTRL,
DCDCCTRL_DCDCCKSYNC_MASK);
break;
case TPS65911:
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 63d6bbd4969b..b52d4f2874b7 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -57,7 +57,7 @@ static const struct linear_range tps65912_ldo_ranges[] = {
};
/* Operations permitted on DCDCx */
-static struct regulator_ops tps65912_ops_dcdc = {
+static const struct regulator_ops tps65912_ops_dcdc = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -67,7 +67,7 @@ static struct regulator_ops tps65912_ops_dcdc = {
};
/* Operations permitted on LDOx */
-static struct regulator_ops tps65912_ops_ldo = {
+static const struct regulator_ops tps65912_ops_ldo = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index ad2203d11a88..e43ed4d93f71 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -178,11 +178,9 @@ static irqreturn_t wm831x_dcdc_uv_irq(int irq, void *data)
{
struct wm831x_dcdc *dcdc = data;
- regulator_lock(dcdc->regulator);
regulator_notifier_call_chain(dcdc->regulator,
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
- regulator_unlock(dcdc->regulator);
return IRQ_HANDLED;
}
@@ -191,11 +189,9 @@ static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data)
{
struct wm831x_dcdc *dcdc = data;
- regulator_lock(dcdc->regulator);
regulator_notifier_call_chain(dcdc->regulator,
REGULATOR_EVENT_OVER_CURRENT,
NULL);
- regulator_unlock(dcdc->regulator);
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index ff3d2bf50410..eade3ae3e333 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -99,11 +99,9 @@ static irqreturn_t wm831x_isink_irq(int irq, void *data)
{
struct wm831x_isink *isink = data;
- regulator_lock(isink->regulator);
regulator_notifier_call_chain(isink->regulator,
REGULATOR_EVENT_OVER_CURRENT,
NULL);
- regulator_unlock(isink->regulator);
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 7b6cf4810cb7..e091b189ecc0 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -46,11 +46,9 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
{
struct wm831x_ldo *ldo = data;
- regulator_lock(ldo->regulator);
regulator_notifier_call_chain(ldo->regulator,
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
- regulator_unlock(ldo->regulator);
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 2e7bfdf7c87b..6579bfdb0c26 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1089,7 +1089,6 @@ static irqreturn_t pmic_uv_handler(int irq, void *data)
{
struct regulator_dev *rdev = (struct regulator_dev *)data;
- regulator_lock(rdev);
if (irq == WM8350_IRQ_CS1 || irq == WM8350_IRQ_CS2)
regulator_notifier_call_chain(rdev,
REGULATOR_EVENT_REGULATION_OUT,
@@ -1098,7 +1097,6 @@ static irqreturn_t pmic_uv_handler(int irq, void *data)
regulator_notifier_call_chain(rdev,
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
- regulator_unlock(rdev);
return IRQ_HANDLED;
}
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index c6659dfea7c7..d99548fb5dde 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -43,7 +43,7 @@ config INGENIC_VPU_RPROC
config MTK_SCP
tristate "Mediatek SCP support"
- depends on ARCH_MEDIATEK
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select RPMSG_MTK_SCP
help
Say y here to support Mediatek's System Companion Processor (SCP) via
@@ -275,6 +275,19 @@ config TI_K3_DSP_REMOTEPROC
It's safe to say N here if you're not interested in utilizing
the DSP slave processors.
+config TI_K3_R5_REMOTEPROC
+ tristate "TI K3 R5 remoteproc support"
+ depends on ARCH_K3
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+ Say m here to support TI's R5F remote processor subsystems
+ on various TI K3 family of SoCs through the remote processor
+ framework.
+
+ It's safe to say N here if you're not interested in utilizing
+ a slave processor.
+
endif # REMOTEPROC
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 3dfa28e6c701..da2ace4ec86c 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -33,3 +33,4 @@ obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
+obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
index 0066c83636d0..47b4561443a9 100644
--- a/drivers/remoteproc/mtk_common.h
+++ b/drivers/remoteproc/mtk_common.h
@@ -32,6 +32,23 @@
#define MT8183_SCP_CACHESIZE_8KB BIT(8)
#define MT8183_SCP_CACHE_CON_WAYEN BIT(10)
+#define MT8192_L2TCM_SRAM_PD_0 0x210C0
+#define MT8192_L2TCM_SRAM_PD_1 0x210C4
+#define MT8192_L2TCM_SRAM_PD_2 0x210C8
+#define MT8192_L1TCM_SRAM_PDN 0x2102C
+#define MT8192_CPU0_SRAM_PD 0x21080
+
+#define MT8192_SCP2APMCU_IPC_SET 0x24080
+#define MT8192_SCP2APMCU_IPC_CLR 0x24084
+#define MT8192_SCP_IPC_INT_BIT BIT(0)
+#define MT8192_SCP2SPM_IPC_CLR 0x24094
+#define MT8192_GIPC_IN_SET 0x24098
+#define MT8192_HOST_IPC_INT_BIT BIT(0)
+
+#define MT8192_CORE0_SW_RSTN_CLR 0x30000
+#define MT8192_CORE0_SW_RSTN_SET 0x30004
+#define MT8192_CORE0_WDT_CFG 0x30034
+
#define SCP_FW_VER_LEN 32
#define SCP_SHARE_BUFFER_SIZE 288
@@ -50,6 +67,19 @@ struct scp_ipi_desc {
void *priv;
};
+struct mtk_scp;
+
+struct mtk_scp_of_data {
+ int (*scp_before_load)(struct mtk_scp *scp);
+ void (*scp_irq_handler)(struct mtk_scp *scp);
+ void (*scp_reset_assert)(struct mtk_scp *scp);
+ void (*scp_reset_deassert)(struct mtk_scp *scp);
+ void (*scp_stop)(struct mtk_scp *scp);
+
+ u32 host_to_scp_reg;
+ u32 host_to_scp_int_bit;
+};
+
struct mtk_scp {
struct device *dev;
struct rproc *rproc;
@@ -58,6 +88,8 @@ struct mtk_scp {
void __iomem *sram_base;
size_t sram_size;
+ const struct mtk_scp_of_data *data;
+
struct mtk_share_obj __iomem *recv_buf;
struct mtk_share_obj __iomem *send_buf;
struct scp_run run;
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index ac13e7b046a6..577cbd5d421e 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -124,9 +124,6 @@ static int scp_ipi_init(struct mtk_scp *scp)
size_t send_offset = SCP_FW_END - sizeof(struct mtk_share_obj);
size_t recv_offset = send_offset - sizeof(struct mtk_share_obj);
- /* Disable SCP to host interrupt */
- writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
-
/* shared buffer initialization */
scp->recv_buf =
(struct mtk_share_obj __iomem *)(scp->sram_base + recv_offset);
@@ -138,7 +135,7 @@ static int scp_ipi_init(struct mtk_scp *scp)
return 0;
}
-static void scp_reset_assert(const struct mtk_scp *scp)
+static void mt8183_scp_reset_assert(struct mtk_scp *scp)
{
u32 val;
@@ -147,7 +144,7 @@ static void scp_reset_assert(const struct mtk_scp *scp)
writel(val, scp->reg_base + MT8183_SW_RSTN);
}
-static void scp_reset_deassert(const struct mtk_scp *scp)
+static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
{
u32 val;
@@ -156,17 +153,19 @@ static void scp_reset_deassert(const struct mtk_scp *scp)
writel(val, scp->reg_base + MT8183_SW_RSTN);
}
-static irqreturn_t scp_irq_handler(int irq, void *priv)
+static void mt8192_scp_reset_assert(struct mtk_scp *scp)
{
- struct mtk_scp *scp = priv;
- u32 scp_to_host;
- int ret;
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+}
- ret = clk_prepare_enable(scp->clk);
- if (ret) {
- dev_err(scp->dev, "failed to enable clocks\n");
- return IRQ_NONE;
- }
+static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
+{
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_CLR);
+}
+
+static void mt8183_scp_irq_handler(struct mtk_scp *scp)
+{
+ u32 scp_to_host;
scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
@@ -177,6 +176,40 @@ static irqreturn_t scp_irq_handler(int irq, void *priv)
/* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
scp->reg_base + MT8183_SCP_TO_HOST);
+}
+
+static void mt8192_scp_irq_handler(struct mtk_scp *scp)
+{
+ u32 scp_to_host;
+
+ scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET);
+
+ if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
+ scp_ipi_handler(scp);
+ else
+ scp_wdt_handler(scp, scp_to_host);
+
+ /*
+ * SCP won't send another interrupt until we clear
+ * MT8192_SCP2APMCU_IPC.
+ */
+ writel(MT8192_SCP_IPC_INT_BIT,
+ scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
+}
+
+static irqreturn_t scp_irq_handler(int irq, void *priv)
+{
+ struct mtk_scp *scp = priv;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(scp->dev, "failed to enable clocks\n");
+ return IRQ_NONE;
+ }
+
+ scp->data->scp_irq_handler(scp);
+
clk_disable_unprepare(scp->clk);
return IRQ_HANDLED;
@@ -238,20 +271,10 @@ static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
return ret;
}
-static int scp_load(struct rproc *rproc, const struct firmware *fw)
+static int mt8183_scp_before_load(struct mtk_scp *scp)
{
- const struct mtk_scp *scp = rproc->priv;
- struct device *dev = scp->dev;
- int ret;
-
- ret = clk_prepare_enable(scp->clk);
- if (ret) {
- dev_err(dev, "failed to enable clocks\n");
- return ret;
- }
-
- /* Hold SCP in reset while loading FW. */
- scp_reset_assert(scp);
+ /* Clear SCP to host interrupt */
+ writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
/* Reset clocks before loading FW */
writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
@@ -272,6 +295,63 @@ static int scp_load(struct rproc *rproc, const struct firmware *fw)
scp->reg_base + MT8183_SCP_CACHE_CON);
writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+ return 0;
+}
+
+static void mt8192_power_on_sram(void *addr)
+{
+ int i;
+
+ for (i = 31; i >= 0; i--)
+ writel(GENMASK(i, 0), addr);
+ writel(0, addr);
+}
+
+static void mt8192_power_off_sram(void *addr)
+{
+ int i;
+
+ writel(0, addr);
+ for (i = 0; i < 32; i++)
+ writel(GENMASK(i, 0), addr);
+}
+
+static int mt8192_scp_before_load(struct mtk_scp *scp)
+{
+ /* clear SPM interrupt, SCP2SPM_IPC_CLR */
+ writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
+
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+
+ /* enable SRAM clock */
+ mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0);
+ mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1);
+ mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2);
+ mt8192_power_on_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN);
+ mt8192_power_on_sram(scp->reg_base + MT8192_CPU0_SRAM_PD);
+
+ return 0;
+}
+
+static int scp_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct mtk_scp *scp = rproc->priv;
+ struct device *dev = scp->dev;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ /* Hold SCP in reset while loading FW. */
+ scp->data->scp_reset_assert(scp);
+
+ ret = scp->data->scp_before_load(scp);
+ if (ret < 0)
+ return ret;
+
ret = scp_elf_load_segments(rproc, fw);
clk_disable_unprepare(scp->clk);
@@ -293,7 +373,7 @@ static int scp_start(struct rproc *rproc)
run->signaled = false;
- scp_reset_deassert(scp);
+ scp->data->scp_reset_deassert(scp);
ret = wait_event_interruptible_timeout(
run->wq,
@@ -309,13 +389,14 @@ static int scp_start(struct rproc *rproc)
dev_err(dev, "wait SCP interrupted by a signal!\n");
goto stop;
}
+
clk_disable_unprepare(scp->clk);
dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
return 0;
stop:
- scp_reset_assert(scp);
+ scp->data->scp_reset_assert(scp);
clk_disable_unprepare(scp->clk);
return ret;
}
@@ -329,7 +410,7 @@ static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len)
offset = da;
if (offset >= 0 && (offset + len) < scp->sram_size)
return (void __force *)scp->sram_base + offset;
- } else {
+ } else if (scp->dram_size) {
offset = da - scp->dma_addr;
if (offset >= 0 && (offset + len) < scp->dram_size)
return (void __force *)scp->cpu_addr + offset;
@@ -338,6 +419,25 @@ static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len)
return NULL;
}
+static void mt8183_scp_stop(struct mtk_scp *scp)
+{
+ /* Disable SCP watchdog */
+ writel(0, scp->reg_base + MT8183_WDT_CFG);
+}
+
+static void mt8192_scp_stop(struct mtk_scp *scp)
+{
+ /* Disable SRAM clock */
+ mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0);
+ mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1);
+ mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2);
+ mt8192_power_off_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN);
+ mt8192_power_off_sram(scp->reg_base + MT8192_CPU0_SRAM_PD);
+
+ /* Disable SCP watchdog */
+ writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
static int scp_stop(struct rproc *rproc)
{
struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
@@ -349,9 +449,8 @@ static int scp_stop(struct rproc *rproc)
return ret;
}
- scp_reset_assert(scp);
- /* Disable SCP watchdog */
- writel(0, scp->reg_base + MT8183_WDT_CFG);
+ scp->data->scp_reset_assert(scp);
+ scp->data->scp_stop(scp);
clk_disable_unprepare(scp->clk);
return 0;
@@ -443,6 +542,13 @@ static int scp_map_memory_region(struct mtk_scp *scp)
int ret;
ret = of_reserved_mem_device_init(scp->dev);
+
+ /* reserved memory is optional. */
+ if (ret == -ENODEV) {
+ dev_info(scp->dev, "skipping reserved memory initialization.");
+ return 0;
+ }
+
if (ret) {
dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
return -ENOMEM;
@@ -460,6 +566,9 @@ static int scp_map_memory_region(struct mtk_scp *scp)
static void scp_unmap_memory_region(struct mtk_scp *scp)
{
+ if (scp->dram_size == 0)
+ return;
+
dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
scp->dma_addr);
of_reserved_mem_device_release(scp->dev);
@@ -536,6 +645,7 @@ static int scp_probe(struct platform_device *pdev)
scp = (struct mtk_scp *)rproc->priv;
scp->rproc = rproc;
scp->dev = dev;
+ scp->data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, scp);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
@@ -642,8 +752,29 @@ static int scp_remove(struct platform_device *pdev)
return 0;
}
+static const struct mtk_scp_of_data mt8183_of_data = {
+ .scp_before_load = mt8183_scp_before_load,
+ .scp_irq_handler = mt8183_scp_irq_handler,
+ .scp_reset_assert = mt8183_scp_reset_assert,
+ .scp_reset_deassert = mt8183_scp_reset_deassert,
+ .scp_stop = mt8183_scp_stop,
+ .host_to_scp_reg = MT8183_HOST_TO_SCP,
+ .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
+};
+
+static const struct mtk_scp_of_data mt8192_of_data = {
+ .scp_before_load = mt8192_scp_before_load,
+ .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_reset_assert = mt8192_scp_reset_assert,
+ .scp_reset_deassert = mt8192_scp_reset_deassert,
+ .scp_stop = mt8192_scp_stop,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
+};
+
static const struct of_device_id mtk_scp_of_match[] = {
- { .compatible = "mediatek,mt8183-scp"},
+ { .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
+ { .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
{},
};
MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
index 3d3d87210ef2..6dc955ecab80 100644
--- a/drivers/remoteproc/mtk_scp_ipi.c
+++ b/drivers/remoteproc/mtk_scp_ipi.c
@@ -30,10 +30,8 @@ int scp_ipi_register(struct mtk_scp *scp,
scp_ipi_handler_t handler,
void *priv)
{
- if (!scp) {
- dev_err(scp->dev, "scp device is not ready\n");
+ if (!scp)
return -EPROBE_DEFER;
- }
if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
return -EINVAL;
@@ -182,7 +180,7 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
ret = -ETIMEDOUT;
goto clock_disable;
}
- } while (readl(scp->reg_base + MT8183_HOST_TO_SCP));
+ } while (readl(scp->reg_base + scp->data->host_to_scp_reg));
scp_memcpy_aligned(send_obj->share_buf, buf, len);
@@ -191,7 +189,8 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
scp->ipi_id_ack[id] = false;
/* send the command to SCP */
- writel(MT8183_HOST_IPC_INT_BIT, scp->reg_base + MT8183_HOST_TO_SCP);
+ writel(scp->data->host_to_scp_int_bit,
+ scp->reg_base + scp->data->host_to_scp_reg);
if (wait) {
/* wait for SCP's ACK */
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index c401bcc263fa..eb3457a6c3b7 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -931,6 +931,17 @@ static int q6v5_mba_load(struct q6v5 *qproc)
goto assert_reset;
}
+ /*
+ * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
+ * the Q6 access to this region.
+ */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
+ qproc->mpss_phys, qproc->mpss_size);
+ if (ret) {
+ dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
+ goto disable_active_clks;
+ }
+
/* Assign MBA image access in DDR to q6 */
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
qproc->mba_phys, qproc->mba_size);
@@ -1135,10 +1146,9 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
}
- /**
+ /*
* In case of a modem subsystem restart on secure devices, the modem
- * memory can be reclaimed only after MBA is loaded. For modem cold
- * boot this will be a nop
+ * memory can be reclaimed only after MBA is loaded.
*/
q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
qproc->mpss_phys, qproc->mpss_size);
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 7f90eeea67e2..dab2c0f5caf0 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -22,7 +22,9 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/mutex.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h> /* XXX: pokes into bus_dma_range */
#include <linux/firmware.h>
#include <linux/string.h>
#include <linux/debugfs.h>
@@ -458,6 +460,25 @@ static void rproc_rvdev_release(struct device *dev)
kfree(rvdev);
}
+static int copy_dma_range_map(struct device *to, struct device *from)
+{
+ const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
+ int num_ranges = 0;
+
+ if (!map)
+ return 0;
+
+ for (r = map; r->size; r++)
+ num_ranges++;
+
+ new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
+ GFP_KERNEL);
+ if (!new_map)
+ return -ENOMEM;
+ to->dma_range_map = new_map;
+ return 0;
+}
+
/**
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
@@ -529,7 +550,9 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
/* Initialise vdev subdevice */
snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
rvdev->dev.parent = &rproc->dev;
- rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset;
+ ret = copy_dma_range_map(&rvdev->dev, rproc->dev.parent);
+ if (ret)
+ return ret;
rvdev->dev.release = rproc_rvdev_release;
dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
dev_set_drvdata(&rvdev->dev, rvdev);
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
index bb15a29038e8..34530dc20cb4 100644
--- a/drivers/remoteproc/remoteproc_coredump.c
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -257,7 +257,7 @@ void rproc_coredump(struct rproc *rproc)
* directly read from device memory.
*/
data_size += elf_size_of_phdr(class);
- if (dump_conf == RPROC_COREDUMP_DEFAULT)
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
data_size += segment->size;
phnum++;
@@ -297,14 +297,14 @@ void rproc_coredump(struct rproc *rproc)
elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
elf_phdr_set_p_align(class, phdr, 0);
- if (dump_conf == RPROC_COREDUMP_DEFAULT)
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
rproc_copy_segment(rproc, data + offset, segment, 0,
segment->size);
offset += elf_phdr_get_p_filesz(class, phdr);
phdr += elf_size_of_phdr(class);
}
- if (dump_conf == RPROC_COREDUMP_DEFAULT) {
+ if (dump_conf == RPROC_COREDUMP_ENABLED) {
dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
return;
}
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 2e3b3e22e1d0..7e5845376e9f 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -33,9 +33,9 @@ static struct dentry *rproc_dbg;
* enum rproc_coredump_mechanism
*/
static const char * const rproc_coredump_str[] = {
- [RPROC_COREDUMP_DEFAULT] = "default",
- [RPROC_COREDUMP_INLINE] = "inline",
[RPROC_COREDUMP_DISABLED] = "disabled",
+ [RPROC_COREDUMP_ENABLED] = "enabled",
+ [RPROC_COREDUMP_INLINE] = "inline",
};
/* Expose the current coredump configuration via debugfs */
@@ -54,20 +54,19 @@ static ssize_t rproc_coredump_read(struct file *filp, char __user *userbuf,
/*
* By writing to the 'coredump' debugfs entry, we control the behavior of the
- * coredump mechanism dynamically. The default value of this entry is "default".
+ * coredump mechanism dynamically. The default value of this entry is "disabled".
*
* The 'coredump' debugfs entry supports these commands:
*
- * default: This is the default coredump mechanism. When the remoteproc
- * crashes the entire coredump will be copied to a separate buffer
- * and exposed to userspace.
+ * disabled: By default coredump collection is disabled. Recovery will
+ * proceed without collecting any dump.
+ *
+ * enabled: When the remoteproc crashes the entire coredump will be copied
+ * to a separate buffer and exposed to userspace.
*
* inline: The coredump will not be copied to a separate buffer and the
* recovery process will have to wait until data is read by
* userspace. But this avoid usage of extra memory.
- *
- * disabled: This will disable coredump. Recovery will proceed without
- * collecting any dump.
*/
static ssize_t rproc_coredump_write(struct file *filp,
const char __user *user_buf, size_t count,
@@ -94,12 +93,12 @@ static ssize_t rproc_coredump_write(struct file *filp,
goto out;
}
- if (!strncmp(buf, "disable", count)) {
+ if (!strncmp(buf, "disabled", count)) {
rproc->dump_conf = RPROC_COREDUMP_DISABLED;
+ } else if (!strncmp(buf, "enabled", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_ENABLED;
} else if (!strncmp(buf, "inline", count)) {
rproc->dump_conf = RPROC_COREDUMP_INLINE;
- } else if (!strncmp(buf, "default", count)) {
- rproc->dump_conf = RPROC_COREDUMP_DEFAULT;
} else {
dev_err(&rproc->dev, "Invalid coredump configuration\n");
err = -EINVAL;
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index eea514cec50e..d1cf7bf277c4 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -10,6 +10,123 @@
#define to_rproc(d) container_of(d, struct rproc, dev)
+static ssize_t recovery_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ return sprintf(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n");
+}
+
+/*
+ * By writing to the 'recovery' sysfs entry, we control the behavior of the
+ * recovery mechanism dynamically. The default value of this entry is "enabled".
+ *
+ * The 'recovery' sysfs entry supports these commands:
+ *
+ * enabled: When enabled, the remote processor will be automatically
+ * recovered whenever it crashes. Moreover, if the remote
+ * processor crashes while recovery is disabled, it will
+ * be automatically recovered too as soon as recovery is enabled.
+ *
+ * disabled: When disabled, a remote processor will remain in a crashed
+ * state if it crashes. This is useful for debugging purposes;
+ * without it, debugging a crash is substantially harder.
+ *
+ * recover: This function will trigger an immediate recovery if the
+ * remote processor is in a crashed state, without changing
+ * or checking the recovery state (enabled/disabled).
+ * This is useful during debugging sessions, when one expects
+ * additional crashes to happen after enabling recovery. In this
+ * case, enabling recovery will make it hard to debug subsequent
+ * crashes, so it's recommended to keep recovery disabled, and
+ * instead use the "recover" command as needed.
+ */
+static ssize_t recovery_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ if (sysfs_streq(buf, "enabled")) {
+ /* change the flag and begin the recovery process if needed */
+ rproc->recovery_disabled = false;
+ rproc_trigger_recovery(rproc);
+ } else if (sysfs_streq(buf, "disabled")) {
+ rproc->recovery_disabled = true;
+ } else if (sysfs_streq(buf, "recover")) {
+ /* begin the recovery process without changing the flag */
+ rproc_trigger_recovery(rproc);
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_RW(recovery);
+
+/*
+ * A coredump-configuration-to-string lookup table, for exposing a
+ * human readable configuration via sysfs. Always keep in sync with
+ * enum rproc_coredump_mechanism
+ */
+static const char * const rproc_coredump_str[] = {
+ [RPROC_COREDUMP_DISABLED] = "disabled",
+ [RPROC_COREDUMP_ENABLED] = "enabled",
+ [RPROC_COREDUMP_INLINE] = "inline",
+};
+
+/* Expose the current coredump configuration via debugfs */
+static ssize_t coredump_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ return sprintf(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]);
+}
+
+/*
+ * By writing to the 'coredump' sysfs entry, we control the behavior of the
+ * coredump mechanism dynamically. The default value of this entry is "default".
+ *
+ * The 'coredump' sysfs entry supports these commands:
+ *
+ * disabled: This is the default coredump mechanism. Recovery will proceed
+ * without collecting any dump.
+ *
+ * default: When the remoteproc crashes the entire coredump will be
+ * copied to a separate buffer and exposed to userspace.
+ *
+ * inline: The coredump will not be copied to a separate buffer and the
+ * recovery process will have to wait until data is read by
+ * userspace. But this avoid usage of extra memory.
+ */
+static ssize_t coredump_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ if (rproc->state == RPROC_CRASHED) {
+ dev_err(&rproc->dev, "can't change coredump configuration\n");
+ return -EBUSY;
+ }
+
+ if (sysfs_streq(buf, "disabled")) {
+ rproc->dump_conf = RPROC_COREDUMP_DISABLED;
+ } else if (sysfs_streq(buf, "enabled")) {
+ rproc->dump_conf = RPROC_COREDUMP_ENABLED;
+ } else if (sysfs_streq(buf, "inline")) {
+ rproc->dump_conf = RPROC_COREDUMP_INLINE;
+ } else {
+ dev_err(&rproc->dev, "Invalid coredump configuration\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_RW(coredump);
+
/* Expose the loaded / running firmware name via sysfs */
static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -138,6 +255,8 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(name);
static struct attribute *rproc_attrs[] = {
+ &dev_attr_coredump.attr,
+ &dev_attr_recovery.attr,
&dev_attr_firmware.attr,
&dev_attr_state.attr,
&dev_attr_name.attr,
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index dfd3808c34fd..0cc617f76068 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -9,7 +9,7 @@
* Brian Swetland <swetland@google.com>
*/
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/export.h>
#include <linux/of_reserved_mem.h>
#include <linux/remoteproc.h>
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index f4da42fc0eeb..d2414cc1d90d 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -685,7 +685,7 @@ static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
* We couldn't get the coprocessor's state, assume
* it is not running.
*/
- state = M4_STATE_OFF;
+ *state = M4_STATE_OFF;
return 0;
}
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
new file mode 100644
index 000000000000..d9307935441d
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -0,0 +1,1395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 R5F (MCU) Remote Processor driver
+ *
+ * Copyright (C) 2017-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+/* This address can either be for ATCM or BTCM with the other at address 0x0 */
+#define K3_R5_TCM_DEV_ADDR 0x41010000
+
+/* R5 TI-SCI Processor Configuration Flags */
+#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
+#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
+#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
+#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
+#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
+#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
+#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
+#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
+
+/* R5 TI-SCI Processor Control Flags */
+#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
+
+/* R5 TI-SCI Processor Status Flags */
+#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
+#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
+#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
+#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
+
+/**
+ * struct k3_r5_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address from remoteproc view
+ * @size: Size of the memory region
+ */
+struct k3_r5_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+enum cluster_mode {
+ CLUSTER_MODE_SPLIT = 0,
+ CLUSTER_MODE_LOCKSTEP,
+};
+
+/**
+ * struct k3_r5_cluster - K3 R5F Cluster structure
+ * @dev: cached device pointer
+ * @mode: Mode to configure the Cluster - Split or LockStep
+ * @cores: list of R5 cores within the cluster
+ */
+struct k3_r5_cluster {
+ struct device *dev;
+ enum cluster_mode mode;
+ struct list_head cores;
+};
+
+/**
+ * struct k3_r5_core - K3 R5 core structure
+ * @elem: linked list item
+ * @dev: cached device pointer
+ * @rproc: rproc handle representing this core
+ * @mem: internal memory regions data
+ * @sram: on-chip SRAM memory regions data
+ * @num_mems: number of internal memory regions
+ * @num_sram: number of on-chip SRAM memory regions
+ * @reset: reset control handle
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @atcm_enable: flag to control ATCM enablement
+ * @btcm_enable: flag to control BTCM enablement
+ * @loczrama: flag to dictate which TCM is at device address 0x0
+ */
+struct k3_r5_core {
+ struct list_head elem;
+ struct device *dev;
+ struct rproc *rproc;
+ struct k3_r5_mem *mem;
+ struct k3_r5_mem *sram;
+ int num_mems;
+ int num_sram;
+ struct reset_control *reset;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ u32 atcm_enable;
+ u32 btcm_enable;
+ u32 loczrama;
+};
+
+/**
+ * struct k3_r5_rproc - K3 remote processor state
+ * @dev: cached device pointer
+ * @cluster: cached pointer to parent cluster structure
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ * @rproc: rproc handle
+ * @core: cached pointer to r5 core structure being used
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ */
+struct k3_r5_rproc {
+ struct device *dev;
+ struct k3_r5_cluster *cluster;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+ struct rproc *rproc;
+ struct k3_r5_core *core;
+ struct k3_r5_mem *rmem;
+ int num_rmems;
+};
+
+/**
+ * k3_r5_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the OMAP mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
+ client);
+ struct device *dev = kproc->rproc->dev.parent;
+ const char *name = kproc->rproc->name;
+ u32 msg = omap_mbox_message(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 R5F rproc %s crashed\n", name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > kproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/* kick a virtqueue */
+static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ mbox_msg_t msg = (mbox_msg_t)vqid;
+ int ret;
+
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = mbox_send_message(kproc->mbox, (void *)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+
+static int k3_r5_split_reset(struct k3_r5_core *core)
+{
+ int ret;
+
+ ret = reset_control_assert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "local-reset assert failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset assert failed, ret = %d\n",
+ ret);
+ if (reset_control_deassert(core->reset))
+ dev_warn(core->dev, "local-reset deassert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_r5_split_release(struct k3_r5_core *core)
+{
+ int ret;
+
+ ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
+ ret);
+ if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id))
+ dev_warn(core->dev, "module-reset assert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
+{
+ struct k3_r5_core *core;
+ int ret;
+
+ /* assert local reset on all applicable cores */
+ list_for_each_entry(core, &cluster->cores, elem) {
+ ret = reset_control_assert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "local-reset assert failed, ret = %d\n",
+ ret);
+ core = list_prev_entry(core, elem);
+ goto unroll_local_reset;
+ }
+ }
+
+ /* disable PSC modules on all applicable cores */
+ list_for_each_entry(core, &cluster->cores, elem) {
+ ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset assert failed, ret = %d\n",
+ ret);
+ goto unroll_module_reset;
+ }
+ }
+
+ return 0;
+
+unroll_module_reset:
+ list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
+ if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id))
+ dev_warn(core->dev, "module-reset assert back failed\n");
+ }
+ core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+unroll_local_reset:
+ list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
+ if (reset_control_deassert(core->reset))
+ dev_warn(core->dev, "local-reset deassert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
+{
+ struct k3_r5_core *core;
+ int ret;
+
+ /* enable PSC modules on all applicable cores */
+ list_for_each_entry_reverse(core, &cluster->cores, elem) {
+ ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ ret);
+ core = list_next_entry(core, elem);
+ goto unroll_module_reset;
+ }
+ }
+
+ /* deassert local reset on all applicable cores */
+ list_for_each_entry_reverse(core, &cluster->cores, elem) {
+ ret = reset_control_deassert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ ret);
+ goto unroll_local_reset;
+ }
+ }
+
+ return 0;
+
+unroll_local_reset:
+ list_for_each_entry_continue(core, &cluster->cores, elem) {
+ if (reset_control_assert(core->reset))
+ dev_warn(core->dev, "local-reset assert back failed\n");
+ }
+ core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+unroll_module_reset:
+ list_for_each_entry_from(core, &cluster->cores, elem) {
+ if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id))
+ dev_warn(core->dev, "module-reset assert back failed\n");
+ }
+
+ return ret;
+}
+
+static inline int k3_r5_core_halt(struct k3_r5_core *core)
+{
+ return ti_sci_proc_set_control(core->tsp,
+ PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
+}
+
+static inline int k3_r5_core_run(struct k3_r5_core *core)
+{
+ return ti_sci_proc_set_control(core->tsp,
+ 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
+}
+
+/*
+ * The R5F cores have controls for both a reset and a halt/run. The code
+ * execution from DDR requires the initial boot-strapping code to be run
+ * from the internal TCMs. This function is used to release the resets on
+ * applicable cores to allow loading into the TCMs. The .prepare() ops is
+ * invoked by remoteproc core before any firmware loading, and is followed
+ * by the .start() ops after loading to actually let the R5 cores run.
+ */
+static int k3_r5_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
+ k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
+ if (ret) {
+ dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Zero out both TCMs unconditionally (access from v8 Arm core is not
+ * affected by ATCM & BTCM enable configuration values) so that ECC
+ * can be effective on all TCM addresses.
+ */
+ dev_dbg(dev, "zeroing out ATCM memory\n");
+ memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
+
+ dev_dbg(dev, "zeroing out BTCM memory\n");
+ memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
+
+ return 0;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * resets on all applicable cores for the rproc device (depending on LockStep
+ * or Split mode). This completes the second portion of powering down the R5F
+ * cores. The cores themselves are only halted in the .stop() ops, and the
+ * .unprepare() ops is invoked by the remoteproc core after the remoteproc is
+ * stopped.
+ */
+static int k3_r5_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
+ k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
+ if (ret)
+ dev_err(dev, "unable to disable cores, ret = %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * The R5F start sequence includes two different operations
+ * 1. Configure the boot vector for R5F core(s)
+ * 2. Unhalt/Run the R5F core(s)
+ *
+ * The sequence is different between LockStep and Split modes. The LockStep
+ * mode requires the boot vector to be configured only for Core0, and then
+ * unhalt both the cores to start the execution - Core1 needs to be unhalted
+ * first followed by Core0. The Split-mode requires that Core0 to be maintained
+ * always in a higher power state that Core1 (implying Core1 needs to be started
+ * always only after Core0 is started).
+ */
+static int k3_r5_rproc_start(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ struct k3_r5_core *core;
+ u32 boot_addr;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_r5_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ goto put_mbox;
+ }
+
+ boot_addr = rproc->bootaddr;
+ /* TODO: add boot_addr sanity checking */
+ dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
+
+ /* boot vector need not be programmed for Core1 in LockStep mode */
+ core = kproc->core;
+ ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
+ if (ret)
+ goto put_mbox;
+
+ /* unhalt/run all applicable cores */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ list_for_each_entry_reverse(core, &cluster->cores, elem) {
+ ret = k3_r5_core_run(core);
+ if (ret)
+ goto unroll_core_run;
+ }
+ } else {
+ ret = k3_r5_core_run(core);
+ if (ret)
+ goto put_mbox;
+ }
+
+ return 0;
+
+unroll_core_run:
+ list_for_each_entry_continue(core, &cluster->cores, elem) {
+ if (k3_r5_core_halt(core))
+ dev_warn(core->dev, "core halt back failed\n");
+ }
+put_mbox:
+ mbox_free_channel(kproc->mbox);
+ return ret;
+}
+
+/*
+ * The R5F stop function includes the following operations
+ * 1. Halt R5F core(s)
+ *
+ * The sequence is different between LockStep and Split modes, and the order
+ * of cores the operations are performed are also in general reverse to that
+ * of the start function. The LockStep mode requires each operation to be
+ * performed first on Core0 followed by Core1. The Split-mode requires that
+ * Core0 to be maintained always in a higher power state that Core1 (implying
+ * Core1 needs to be stopped first before Core0).
+ *
+ * Note that the R5F halt operation in general is not effective when the R5F
+ * core is running, but is needed to make sure the core won't run after
+ * deasserting the reset the subsequent time. The asserting of reset can
+ * be done here, but is preferred to be done in the .unprepare() ops - this
+ * maintains the symmetric behavior between the .start(), .stop(), .prepare()
+ * and .unprepare() ops, and also balances them well between sysfs 'state'
+ * flow and device bind/unbind or module removal.
+ */
+static int k3_r5_rproc_stop(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ int ret;
+
+ /* halt all applicable cores */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ list_for_each_entry(core, &cluster->cores, elem) {
+ ret = k3_r5_core_halt(core);
+ if (ret) {
+ core = list_prev_entry(core, elem);
+ goto unroll_core_halt;
+ }
+ }
+ } else {
+ ret = k3_r5_core_halt(core);
+ if (ret)
+ goto out;
+ }
+
+ mbox_free_channel(kproc->mbox);
+
+ return 0;
+
+unroll_core_halt:
+ list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
+ if (k3_r5_core_run(core))
+ dev_warn(core->dev, "core run back failed\n");
+ }
+out:
+ return ret;
+}
+
+/*
+ * Internal Memory translation helper
+ *
+ * Custom function implementing the rproc .da_to_va ops to provide address
+ * translation (device address to kernel virtual address) for internal RAMs
+ * present in a DSP or IPU device). The translated addresses can be used
+ * either by the remoteproc core for loading, or by any rpmsg bus drivers.
+ */
+static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->core;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ /* handle both R5 and SoC views of ATCM and BTCM */
+ for (i = 0; i < core->num_mems; i++) {
+ bus_addr = core->mem[i].bus_addr;
+ dev_addr = core->mem[i].dev_addr;
+ size = core->mem[i].size;
+
+ /* handle R5-view addresses of TCMs */
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = core->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+
+ /* handle SoC-view addresses of TCMs */
+ if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
+ offset = da - bus_addr;
+ va = core->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle any SRAM regions using SoC-view addresses */
+ for (i = 0; i < core->num_sram; i++) {
+ dev_addr = core->sram[i].dev_addr;
+ size = core->sram[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = core->sram[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct rproc_ops k3_r5_rproc_ops = {
+ .prepare = k3_r5_rproc_prepare,
+ .unprepare = k3_r5_rproc_unprepare,
+ .start = k3_r5_rproc_start,
+ .stop = k3_r5_rproc_stop,
+ .kick = k3_r5_rproc_kick,
+ .da_to_va = k3_r5_rproc_da_to_va,
+};
+
+/*
+ * Internal R5F Core configuration
+ *
+ * Each R5FSS has a cluster-level setting for configuring the processor
+ * subsystem either in a safety/fault-tolerant LockStep mode or a performance
+ * oriented Split mode. Each R5F core has a number of settings to either
+ * enable/disable each of the TCMs, control which TCM appears at the R5F core's
+ * address 0x0. These settings need to be configured before the resets for the
+ * corresponding core are released. These settings are all protected and managed
+ * by the System Processor.
+ *
+ * This function is used to pre-configure these settings for each R5F core, and
+ * the configuration is all done through various ti_sci_proc functions that
+ * communicate with the System Processor. The function also ensures that both
+ * the cores are halted before the .prepare() step.
+ *
+ * The function is called from k3_r5_cluster_rproc_init() and is invoked either
+ * once (in LockStep mode) or twice (in Split mode). Support for LockStep-mode
+ * is dictated by an eFUSE register bit, and the config settings retrieved from
+ * DT are adjusted accordingly as per the permitted cluster mode. All cluster
+ * level settings like Cluster mode and TEINIT (exception handling state
+ * dictating ARM or Thumb mode) can only be set and retrieved using Core0.
+ *
+ * The function behavior is different based on the cluster mode. The R5F cores
+ * are configured independently as per their individual settings in Split mode.
+ * They are identically configured in LockStep mode using the primary Core0
+ * settings. However, some individual settings cannot be set in LockStep mode.
+ * This is overcome by switching to Split-mode initially and then programming
+ * both the cores with the same settings, before reconfiguing again for
+ * LockStep mode.
+ */
+static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
+{
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct device *dev = kproc->dev;
+ struct k3_r5_core *core0, *core, *temp;
+ u32 ctrl = 0, cfg = 0, stat = 0;
+ u32 set_cfg = 0, clr_cfg = 0;
+ u64 boot_vec = 0;
+ bool lockstep_en;
+ int ret;
+
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ? core0 : kproc->core;
+
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ &stat);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
+ boot_vec, cfg, ctrl, stat);
+
+ lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
+ if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
+ cluster->mode = CLUSTER_MODE_SPLIT;
+ }
+
+ /* always enable ARM mode and set boot vector to 0 */
+ boot_vec = 0x0;
+ if (core == core0) {
+ clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
+ /*
+ * LockStep configuration bit is Read-only on Split-mode _only_
+ * devices and system firmware will NACK any requests with the
+ * bit configured, so program it only on permitted devices
+ */
+ if (lockstep_en)
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ }
+
+ if (core->atcm_enable)
+ set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
+ else
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
+
+ if (core->btcm_enable)
+ set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
+ else
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
+
+ if (core->loczrama)
+ set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
+ else
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
+
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ /*
+ * work around system firmware limitations to make sure both
+ * cores are programmed symmetrically in LockStep. LockStep
+ * and TEINIT config is only allowed with Core0.
+ */
+ list_for_each_entry(temp, &cluster->cores, elem) {
+ ret = k3_r5_core_halt(temp);
+ if (ret)
+ goto out;
+
+ if (temp != core) {
+ clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
+ }
+ ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
+ set_cfg, clr_cfg);
+ if (ret)
+ goto out;
+ }
+
+ set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ clr_cfg = 0;
+ ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ set_cfg, clr_cfg);
+ } else {
+ ret = k3_r5_core_halt(core);
+ if (ret)
+ goto out;
+
+ ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ set_cfg, clr_cfg);
+ }
+
+out:
+ return ret;
+}
+
+static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems <= 0) {
+ dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ num_rmems--;
+ kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem) {
+ ret = -ENOMEM;
+ goto release_rmem;
+ }
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np) {
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ if (!rmem) {
+ of_node_put(rmem_np);
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+ of_node_put(rmem_np);
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /*
+ * R5Fs do not have an MMU, but have a Region Address Translator
+ * (RAT) module that provides a fixed entry translation between
+ * the 32-bit processor addresses to 64-bit bus addresses. The
+ * RAT is programmable only by the R5F cores. Support for RAT
+ * is currently not supported, so 64-bit address regions are not
+ * supported. The absence of MMUs implies that the R5F device
+ * addresses/supported memory regions are restricted to 32-bit
+ * bus addresses, and are identical
+ */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto unmap_rmem;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+
+unmap_rmem:
+ for (i--; i >= 0; i--)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+release_rmem:
+ of_reserved_mem_device_release(dev);
+ return ret;
+}
+
+static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
+{
+ int i;
+
+ for (i = 0; i < kproc->num_rmems; i++)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+
+ of_reserved_mem_device_release(kproc->dev);
+}
+
+static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct k3_r5_rproc *kproc;
+ struct k3_r5_core *core, *core1;
+ struct device *cdev;
+ const char *fw_name;
+ struct rproc *rproc;
+ int ret;
+
+ core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+ list_for_each_entry(core, &cluster->cores, elem) {
+ cdev = core->dev;
+ ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
+ if (ret) {
+ dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
+ ret);
+ goto out;
+ }
+
+ rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
+ fw_name, sizeof(*kproc));
+ if (!rproc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* K3 R5s have a Region Address Translator (RAT) but no MMU */
+ rproc->has_iommu = false;
+ /* error recovery is not supported at present */
+ rproc->recovery_disabled = true;
+
+ kproc = rproc->priv;
+ kproc->cluster = cluster;
+ kproc->core = core;
+ kproc->dev = cdev;
+ kproc->rproc = rproc;
+ core->rproc = rproc;
+
+ ret = k3_r5_rproc_configure(kproc);
+ if (ret) {
+ dev_err(dev, "initial configure failed, ret = %d\n",
+ ret);
+ goto err_config;
+ }
+
+ ret = k3_r5_reserved_mem_init(kproc);
+ if (ret) {
+ dev_err(dev, "reserved memory init failed, ret = %d\n",
+ ret);
+ goto err_config;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed, ret = %d\n", ret);
+ goto err_add;
+ }
+
+ /* create only one rproc in lockstep mode */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
+ break;
+ }
+
+ return 0;
+
+err_split:
+ rproc_del(rproc);
+err_add:
+ k3_r5_reserved_mem_exit(kproc);
+err_config:
+ rproc_free(rproc);
+ core->rproc = NULL;
+out:
+ /* undo core0 upon any failures on core1 in split-mode */
+ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
+ core = list_prev_entry(core, elem);
+ rproc = core->rproc;
+ kproc = rproc->priv;
+ goto err_split;
+ }
+ return ret;
+}
+
+static int k3_r5_cluster_rproc_exit(struct platform_device *pdev)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct k3_r5_rproc *kproc;
+ struct k3_r5_core *core;
+ struct rproc *rproc;
+
+ /*
+ * lockstep mode has only one rproc associated with first core, whereas
+ * split-mode has two rprocs associated with each core, and requires
+ * that core1 be powered down first
+ */
+ core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
+ list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
+ list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+
+ list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
+ rproc = core->rproc;
+ kproc = rproc->priv;
+
+ rproc_del(rproc);
+
+ k3_r5_reserved_mem_exit(kproc);
+
+ rproc_free(rproc);
+ core->rproc = NULL;
+ }
+
+ return 0;
+}
+
+static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
+ struct k3_r5_core *core)
+{
+ static const char * const mem_names[] = {"atcm", "btcm"};
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems;
+ int i;
+
+ num_mems = ARRAY_SIZE(mem_names);
+ core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
+ if (!core->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ mem_names[i]);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ mem_names[i]);
+ return -EBUSY;
+ }
+
+ /*
+ * TCMs are designed in general to support RAM-like backing
+ * memories. So, map these as Normal Non-Cached memories. This
+ * also avoids/fixes any potential alignment faults due to
+ * unaligned data accesses when using memcpy() or memset()
+ * functions (normally seen with device type memory).
+ */
+ core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!core->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n", mem_names[i]);
+ return -ENOMEM;
+ }
+ core->mem[i].bus_addr = res->start;
+
+ /*
+ * TODO:
+ * The R5F cores can place ATCM & BTCM anywhere in its address
+ * based on the corresponding Region Registers in the System
+ * Control coprocessor. For now, place ATCM and BTCM at
+ * addresses 0 and 0x41010000 (same as the bus address on AM65x
+ * SoCs) based on loczrama setting
+ */
+ if (!strcmp(mem_names[i], "atcm")) {
+ core->mem[i].dev_addr = core->loczrama ?
+ 0 : K3_R5_TCM_DEV_ADDR;
+ } else {
+ core->mem[i].dev_addr = core->loczrama ?
+ K3_R5_TCM_DEV_ADDR : 0;
+ }
+ core->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ mem_names[i], &core->mem[i].bus_addr,
+ core->mem[i].size, core->mem[i].cpu_addr,
+ core->mem[i].dev_addr);
+ }
+ core->num_mems = num_mems;
+
+ return 0;
+}
+
+static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
+ struct k3_r5_core *core)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *sram_np;
+ struct resource res;
+ int num_sram;
+ int i, ret;
+
+ num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+ if (num_sram <= 0) {
+ dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
+ num_sram);
+ return 0;
+ }
+
+ core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
+ if (!core->sram)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sram; i++) {
+ sram_np = of_parse_phandle(np, "sram", i);
+ if (!sram_np)
+ return -EINVAL;
+
+ if (!of_device_is_available(sram_np)) {
+ of_node_put(sram_np);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(sram_np, 0, &res);
+ of_node_put(sram_np);
+ if (ret)
+ return -EINVAL;
+
+ core->sram[i].bus_addr = res.start;
+ core->sram[i].dev_addr = res.start;
+ core->sram[i].size = resource_size(&res);
+ core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
+ resource_size(&res));
+ if (!core->sram[i].cpu_addr) {
+ dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
+ i, &res.start);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i, &core->sram[i].bus_addr,
+ core->sram[i].size, core->sram[i].cpu_addr,
+ core->sram[i].dev_addr);
+ }
+ core->num_sram = num_sram;
+
+ return 0;
+}
+
+static
+struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
+ const struct ti_sci_handle *sci)
+{
+ struct ti_sci_proc *tsp;
+ u32 temp[2];
+ int ret;
+
+ ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
+ temp, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
+ if (!tsp)
+ return ERR_PTR(-ENOMEM);
+
+ tsp->dev = dev;
+ tsp->sci = sci;
+ tsp->ops = &sci->ops.proc_ops;
+ tsp->proc_id = temp[0];
+ tsp->host_id = temp[1];
+
+ return tsp;
+}
+
+static int k3_r5_core_of_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct k3_r5_core *core;
+ int ret;
+
+ if (!devres_open_group(dev, k3_r5_core_of_init, GFP_KERNEL))
+ return -ENOMEM;
+
+ core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
+ if (!core) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ core->dev = dev;
+ /*
+ * Use SoC Power-on-Reset values as default if no DT properties are
+ * used to dictate the TCM configurations
+ */
+ core->atcm_enable = 0;
+ core->btcm_enable = 1;
+ core->loczrama = 1;
+
+ ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,loczrama, ret = %d\n", ret);
+ goto err;
+ }
+
+ core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(core->ti_sci)) {
+ ret = PTR_ERR(core->ti_sci);
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
+ ret);
+ }
+ core->ti_sci = NULL;
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ goto err;
+ }
+
+ core->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR_OR_NULL(core->reset)) {
+ ret = PTR_ERR_OR_ZERO(core->reset);
+ if (!ret)
+ ret = -ENODEV;
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get reset handle, ret = %d\n",
+ ret);
+ }
+ goto err;
+ }
+
+ core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
+ if (IS_ERR(core->tsp)) {
+ dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
+ ret);
+ ret = PTR_ERR(core->tsp);
+ goto err;
+ }
+
+ ret = k3_r5_core_of_get_internal_memories(pdev, core);
+ if (ret) {
+ dev_err(dev, "failed to get internal memories, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = k3_r5_core_of_get_sram_memories(pdev, core);
+ if (ret) {
+ dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
+ goto err;
+ }
+
+ ret = ti_sci_proc_request(core->tsp);
+ if (ret < 0) {
+ dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, core);
+ devres_close_group(dev, k3_r5_core_of_init);
+
+ return 0;
+
+err:
+ devres_release_group(dev, k3_r5_core_of_init);
+ return ret;
+}
+
+/*
+ * free the resources explicitly since driver model is not being used
+ * for the child R5F devices
+ */
+static void k3_r5_core_of_exit(struct platform_device *pdev)
+{
+ struct k3_r5_core *core = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = ti_sci_proc_release(core->tsp);
+ if (ret)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret);
+
+ platform_set_drvdata(pdev, NULL);
+ devres_release_group(dev, k3_r5_core_of_init);
+}
+
+static void k3_r5_cluster_of_exit(struct platform_device *pdev)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct platform_device *cpdev;
+ struct k3_r5_core *core, *temp;
+
+ list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) {
+ list_del(&core->elem);
+ cpdev = to_platform_device(core->dev);
+ k3_r5_core_of_exit(cpdev);
+ }
+}
+
+static int k3_r5_cluster_of_init(struct platform_device *pdev)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct platform_device *cpdev;
+ struct device_node *child;
+ struct k3_r5_core *core;
+ int ret;
+
+ for_each_available_child_of_node(np, child) {
+ cpdev = of_find_device_by_node(child);
+ if (!cpdev) {
+ ret = -ENODEV;
+ dev_err(dev, "could not get R5 core platform device\n");
+ goto fail;
+ }
+
+ ret = k3_r5_core_of_init(cpdev);
+ if (ret) {
+ dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
+ ret);
+ put_device(&cpdev->dev);
+ goto fail;
+ }
+
+ core = platform_get_drvdata(cpdev);
+ put_device(&cpdev->dev);
+ list_add_tail(&core->elem, &cluster->cores);
+ }
+
+ return 0;
+
+fail:
+ k3_r5_cluster_of_exit(pdev);
+ return ret;
+}
+
+static int k3_r5_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct k3_r5_cluster *cluster;
+ int ret;
+ int num_cores;
+
+ cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
+ if (!cluster)
+ return -ENOMEM;
+
+ cluster->dev = dev;
+ cluster->mode = CLUSTER_MODE_LOCKSTEP;
+ INIT_LIST_HEAD(&cluster->cores);
+
+ ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ num_cores = of_get_available_child_count(np);
+ if (num_cores != 2) {
+ dev_err(dev, "MCU cluster requires both R5F cores to be enabled, num_cores = %d\n",
+ num_cores);
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, cluster);
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = k3_r5_cluster_of_init(pdev);
+ if (ret) {
+ dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))k3_r5_cluster_of_exit,
+ pdev);
+ if (ret)
+ return ret;
+
+ ret = k3_r5_cluster_rproc_init(pdev);
+ if (ret) {
+ dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev,
+ (void(*)(void *))k3_r5_cluster_rproc_exit,
+ pdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id k3_r5_of_match[] = {
+ { .compatible = "ti,am654-r5fss", },
+ { .compatible = "ti,j721e-r5fss", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_r5_of_match);
+
+static struct platform_driver k3_r5_rproc_driver = {
+ .probe = k3_r5_probe,
+ .driver = {
+ .name = "k3_r5_rproc",
+ .of_match_table = k3_r5_of_match,
+ },
+};
+
+module_platform_driver(k3_r5_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI K3 R5F remote processor driver");
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index d9efbfd29646..07d162b179fc 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -65,9 +65,10 @@ config RESET_HSDK
This enables the reset controller driver for HSDK board.
config RESET_IMX7
- bool "i.MX7/8 Reset Driver" if COMPILE_TEST
+ tristate "i.MX7/8 Reset Driver"
depends on HAS_IOMEM
- default SOC_IMX7D || (ARM64 && ARCH_MXC)
+ depends on SOC_IMX7D || (ARM64 && ARCH_MXC) || COMPILE_TEST
+ default y if SOC_IMX7D
select MFD_SYSCON
help
This enables the reset controller driver for i.MX7 SoCs.
@@ -140,6 +141,17 @@ config RESET_QCOM_PDC
to control reset signals provided by PDC for Modem, Compute,
Display, GPU, Debug, AOP, Sensors, Audio, SP and APPS.
+config RESET_RASPBERRYPI
+ tristate "Raspberry Pi 4 Firmware Reset Driver"
+ depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST)
+ default USB_XHCI_PCI
+ help
+ Raspberry Pi 4's co-processor controls some of the board's HW
+ initialization process, but it's up to Linux to trigger it when
+ relevant. This driver provides a reset controller capable of
+ interfacing with RPi4's co-processor and model these firmware
+ initialization routines as reset lines.
+
config RESET_SCMI
tristate "Reset driver controlled via ARM SCMI interface"
depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 249ed357c997..16947610cc3b 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
+obj-$(CONFIG_RESET_RASPBERRYPI) += reset-raspberrypi.o
obj-$(CONFIG_RESET_SCMI) += reset-scmi.o
obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 01c0c7aa835c..a2df88e90011 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -32,7 +32,8 @@ static LIST_HEAD(reset_lookup_list);
* @refcnt: Number of gets of this reset_control
* @acquired: Only one reset_control may be acquired for a given rcdev and id.
* @shared: Is this a shared (1), or an exclusive (0) reset_control?
- * @deassert_cnt: Number of times this reset line has been deasserted
+ * @array: Is this an array of reset controls (1)?
+ * @deassert_count: Number of times this reset line has been deasserted
* @triggered_count: Number of times this reset line has been reset. Currently
* only used for shared resets, which means that the value
* will be either 0 or 1.
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index e8aa8691deb2..185a333df66c 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -8,7 +8,7 @@
*/
#include <linux/mfd/syscon.h>
-#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
@@ -178,6 +178,9 @@ static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = {
[IMX8MQ_RESET_A53_SOC_DBG_RESET] = { SRC_A53RCR0, BIT(20) },
[IMX8MQ_RESET_A53_L2RESET] = { SRC_A53RCR0, BIT(21) },
[IMX8MQ_RESET_SW_NON_SCLR_M4C_RST] = { SRC_M4RCR, BIT(0) },
+ [IMX8MQ_RESET_SW_M4C_RST] = { SRC_M4RCR, BIT(1) },
+ [IMX8MQ_RESET_SW_M4P_RST] = { SRC_M4RCR, BIT(2) },
+ [IMX8MQ_RESET_M4_ENABLE] = { SRC_M4RCR, BIT(3) },
[IMX8MQ_RESET_OTG1_PHY_RESET] = { SRC_USBOPHY1_RCR, BIT(0) },
[IMX8MQ_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) },
[IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N] = { SRC_MIPIPHY_RCR, BIT(1) },
@@ -238,6 +241,7 @@ static int imx8mq_reset_set(struct reset_controller_dev *rcdev,
case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N:
case IMX8MQ_RESET_MIPI_DSI_RESET_N:
case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N:
+ case IMX8MQ_RESET_M4_ENABLE:
value = assert ? 0 : bit;
break;
}
@@ -386,6 +390,7 @@ static const struct of_device_id imx7_reset_dt_ids[] = {
{ .compatible = "fsl,imx8mp-src", .data = &variant_imx8mp },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, imx7_reset_dt_ids);
static struct platform_driver imx7_reset_driver = {
.probe = imx7_reset_probe,
@@ -394,4 +399,8 @@ static struct platform_driver imx7_reset_driver = {
.of_match_table = imx7_reset_dt_ids,
},
};
-builtin_platform_driver(imx7_reset_driver);
+module_platform_driver(imx7_reset_driver);
+
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("NXP i.MX7 reset driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-raspberrypi.c b/drivers/reset/reset-raspberrypi.c
new file mode 100644
index 000000000000..02f59c06f69b
--- /dev/null
+++ b/drivers/reset/reset-raspberrypi.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Raspberry Pi 4 firmware reset driver
+ *
+ * Copyright (C) 2020 Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+#include <dt-bindings/reset/raspberrypi,firmware-reset.h>
+
+struct rpi_reset {
+ struct reset_controller_dev rcdev;
+ struct rpi_firmware *fw;
+};
+
+static inline struct rpi_reset *to_rpi(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct rpi_reset, rcdev);
+}
+
+static int rpi_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct rpi_reset *priv = to_rpi(rcdev);
+ u32 dev_addr;
+ int ret;
+
+ switch (id) {
+ case RASPBERRYPI_FIRMWARE_RESET_ID_USB:
+ /*
+ * The Raspberry Pi 4 gets its USB functionality from VL805, a
+ * PCIe chip that implements xHCI. After a PCI reset, VL805's
+ * firmware may either be loaded directly from an EEPROM or, if
+ * not present, by the SoC's co-processor, VideoCore. rpi's
+ * VideoCore OS contains both the non public firmware load
+ * logic and the VL805 firmware blob. This triggers the
+ * aforementioned process.
+ *
+ * The pci device address is expected is expected by the
+ * firmware encoded like this:
+ *
+ * PCI_BUS << 20 | PCI_SLOT << 15 | PCI_FUNC << 12
+ *
+ * But since rpi's PCIe is hardwired, we know the address in
+ * advance.
+ */
+ dev_addr = 0x100000;
+ ret = rpi_firmware_property(priv->fw, RPI_FIRMWARE_NOTIFY_XHCI_RESET,
+ &dev_addr, sizeof(dev_addr));
+ if (ret)
+ return ret;
+
+ /* Wait for vl805 to startup */
+ usleep_range(200, 1000);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct reset_control_ops rpi_reset_ops = {
+ .reset = rpi_reset_reset,
+};
+
+static int rpi_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpi_firmware *fw;
+ struct device_node *np;
+ struct rpi_reset *priv;
+
+ np = of_get_parent(dev->of_node);
+ if (!np) {
+ dev_err(dev, "Missing firmware node\n");
+ return -ENOENT;
+ }
+
+ fw = rpi_firmware_get(np);
+ of_node_put(np);
+ if (!fw)
+ return -EPROBE_DEFER;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ priv->fw = fw;
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.nr_resets = RASPBERRYPI_FIRMWARE_RESET_NUM_IDS;
+ priv->rcdev.ops = &rpi_reset_ops;
+ priv->rcdev.of_node = dev->of_node;
+
+ return devm_reset_controller_register(dev, &priv->rcdev);
+}
+
+static const struct of_device_id rpi_reset_of_match[] = {
+ { .compatible = "raspberrypi,firmware-reset" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rpi_reset_of_match);
+
+static struct platform_driver rpi_reset_driver = {
+ .probe = rpi_reset_probe,
+ .driver = {
+ .name = "raspberrypi-reset",
+ .of_match_table = rpi_reset_of_match,
+ },
+};
+module_platform_driver(rpi_reset_driver);
+
+MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>");
+MODULE_DESCRIPTION("Raspberry Pi 4 firmware reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
index 373ea8d4f7a1..ebd433fa09dd 100644
--- a/drivers/reset/reset-zynqmp.c
+++ b/drivers/reset/reset-zynqmp.c
@@ -9,12 +9,20 @@
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/of_device.h>
#define ZYNQMP_NR_RESETS (ZYNQMP_PM_RESET_END - ZYNQMP_PM_RESET_START)
#define ZYNQMP_RESET_ID ZYNQMP_PM_RESET_START
+#define VERSAL_NR_RESETS 95
+
+struct zynqmp_reset_soc_data {
+ u32 reset_id;
+ u32 num_resets;
+};
struct zynqmp_reset_data {
struct reset_controller_dev rcdev;
+ const struct zynqmp_reset_soc_data *data;
};
static inline struct zynqmp_reset_data *
@@ -26,23 +34,28 @@ to_zynqmp_reset_data(struct reset_controller_dev *rcdev)
static int zynqmp_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return zynqmp_pm_reset_assert(priv->data->reset_id + id,
PM_RESET_ACTION_ASSERT);
}
static int zynqmp_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return zynqmp_pm_reset_assert(priv->data->reset_id + id,
PM_RESET_ACTION_RELEASE);
}
static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
int val, err;
- err = zynqmp_pm_reset_get_status(ZYNQMP_RESET_ID + id, &val);
+ err = zynqmp_pm_reset_get_status(priv->data->reset_id + id, &val);
if (err)
return err;
@@ -52,10 +65,28 @@ static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
static int zynqmp_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
- return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return zynqmp_pm_reset_assert(priv->data->reset_id + id,
PM_RESET_ACTION_PULSE);
}
+static int zynqmp_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ return reset_spec->args[0];
+}
+
+static const struct zynqmp_reset_soc_data zynqmp_reset_data = {
+ .reset_id = ZYNQMP_RESET_ID,
+ .num_resets = ZYNQMP_NR_RESETS,
+};
+
+static const struct zynqmp_reset_soc_data versal_reset_data = {
+ .reset_id = 0,
+ .num_resets = VERSAL_NR_RESETS,
+};
+
static const struct reset_control_ops zynqmp_reset_ops = {
.reset = zynqmp_reset_reset,
.assert = zynqmp_reset_assert,
@@ -71,18 +102,25 @@ static int zynqmp_reset_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->data = of_device_get_match_data(&pdev->dev);
+ if (!priv->data)
+ return -EINVAL;
+
platform_set_drvdata(pdev, priv);
priv->rcdev.ops = &zynqmp_reset_ops;
priv->rcdev.owner = THIS_MODULE;
priv->rcdev.of_node = pdev->dev.of_node;
- priv->rcdev.nr_resets = ZYNQMP_NR_RESETS;
+ priv->rcdev.nr_resets = priv->data->num_resets;
+ priv->rcdev.of_reset_n_cells = 1;
+ priv->rcdev.of_xlate = zynqmp_reset_of_xlate;
return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
}
static const struct of_device_id zynqmp_reset_dt_ids[] = {
- { .compatible = "xlnx,zynqmp-reset", },
+ { .compatible = "xlnx,zynqmp-reset", .data = &zynqmp_reset_data, },
+ { .compatible = "xlnx,versal-reset", .data = &versal_reset_data, },
{ /* sentinel */ },
};
diff --git a/drivers/reset/sti/reset-syscfg.c b/drivers/reset/sti/reset-syscfg.c
index 91215bb88f62..99b63035fe72 100644
--- a/drivers/reset/sti/reset-syscfg.c
+++ b/drivers/reset/sti/reset-syscfg.c
@@ -17,7 +17,7 @@
#include "reset-syscfg.h"
/**
- * Reset channel regmap configuration
+ * struct syscfg_reset_channel - Reset channel regmap configuration
*
* @reset: regmap field for the channel's reset bit.
* @ack: regmap field for the channel's ack bit (optional).
@@ -28,8 +28,9 @@ struct syscfg_reset_channel {
};
/**
- * A reset controller which groups together a set of related reset bits, which
- * may be located in different system configuration registers.
+ * struct syscfg_reset_controller - A reset controller which groups together
+ * a set of related reset bits, which may be located in different system
+ * configuration registers.
*
* @rst: base reset controller structure.
* @active_low: are the resets in this controller active low, i.e. clearing
diff --git a/drivers/rpmsg/mtk_rpmsg.c b/drivers/rpmsg/mtk_rpmsg.c
index 83f2b8804ee9..96a17ec29140 100644
--- a/drivers/rpmsg/mtk_rpmsg.c
+++ b/drivers/rpmsg/mtk_rpmsg.c
@@ -200,7 +200,6 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
struct rpmsg_device *rpdev;
struct mtk_rpmsg_device *mdev;
struct platform_device *pdev = mtk_subdev->pdev;
- int ret;
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
@@ -219,13 +218,7 @@ static int mtk_rpmsg_register_device(struct mtk_rpmsg_rproc_subdev *mtk_subdev,
rpdev->dev.parent = &pdev->dev;
rpdev->dev.release = mtk_rpmsg_release_device;
- ret = rpmsg_register_device(rpdev);
- if (ret) {
- kfree(mdev);
- return ret;
- }
-
- return 0;
+ return rpmsg_register_device(rpdev);
}
static void mtk_register_device_work_function(struct work_struct *register_work)
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index f40312b16da0..27a05167c18c 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -970,7 +970,7 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
return -EINVAL;
}
- complete(&channel->open_ack);
+ complete_all(&channel->open_ack);
return 0;
}
@@ -1178,7 +1178,7 @@ static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
__be32 *val = defaults;
int size;
- if (glink->intentless)
+ if (glink->intentless || !completion_done(&channel->open_ack))
return 0;
prop = of_find_property(np, "qcom,intents", NULL);
@@ -1413,7 +1413,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
channel->rcid = ret;
spin_unlock_irqrestore(&glink->idr_lock, flags);
- complete(&channel->open_req);
+ complete_all(&channel->open_req);
if (create_device) {
rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
@@ -1574,6 +1574,60 @@ static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
kfree(dcmd);
}
+static ssize_t rpmsg_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret = 0;
+ const char *name;
+
+ ret = of_property_read_string(dev->of_node, "label", &name);
+ if (ret < 0)
+ name = dev->of_node->name;
+
+ return snprintf(buf, RPMSG_NAME_SIZE, "%s\n", name);
+}
+static DEVICE_ATTR_RO(rpmsg_name);
+
+static struct attribute *qcom_glink_attrs[] = {
+ &dev_attr_rpmsg_name.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(qcom_glink);
+
+static void qcom_glink_device_release(struct device *dev)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct glink_channel *channel = to_glink_channel(rpdev->ept);
+
+ /* Release qcom_glink_alloc_channel() reference */
+ kref_put(&channel->refcount, qcom_glink_channel_release);
+ kfree(rpdev);
+}
+
+static int qcom_glink_create_chrdev(struct qcom_glink *glink)
+{
+ struct rpmsg_device *rpdev;
+ struct glink_channel *channel;
+
+ rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
+ if (!rpdev)
+ return -ENOMEM;
+
+ channel = qcom_glink_alloc_channel(glink, "rpmsg_chrdev");
+ if (IS_ERR(channel)) {
+ kfree(rpdev);
+ return PTR_ERR(channel);
+ }
+ channel->rpdev = rpdev;
+
+ rpdev->ept = &channel->ept;
+ rpdev->ops = &glink_device_ops;
+ rpdev->dev.parent = glink->dev;
+ rpdev->dev.release = qcom_glink_device_release;
+
+ return rpmsg_chrdev_register_device(rpdev);
+}
+
struct qcom_glink *qcom_glink_native_probe(struct device *dev,
unsigned long features,
struct qcom_glink_pipe *rx,
@@ -1604,6 +1658,12 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
idr_init(&glink->lcids);
idr_init(&glink->rcids);
+ glink->dev->groups = qcom_glink_groups;
+
+ ret = device_add_groups(dev, qcom_glink_groups);
+ if (ret)
+ dev_err(dev, "failed to add groups\n");
+
ret = of_property_read_string(dev->of_node, "label", &glink->name);
if (ret < 0)
glink->name = dev->of_node->name;
@@ -1633,6 +1693,10 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
if (ret)
return ERR_PTR(ret);
+ ret = qcom_glink_create_chrdev(glink);
+ if (ret)
+ dev_err(glink->dev, "failed to register chrdev\n");
+
return glink;
}
EXPORT_SYMBOL_GPL(qcom_glink_native_probe);
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 4abbeea782fa..19903de6268d 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1338,7 +1338,7 @@ static int qcom_smd_parse_edge(struct device *dev,
ret = of_property_read_u32(node, key, &edge->edge_id);
if (ret) {
dev_err(dev, "edge missing %s property\n", key);
- return -EINVAL;
+ goto put_node;
}
edge->remote_pid = QCOM_SMEM_HOST_ANY;
@@ -1349,32 +1349,37 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->mbox_client.knows_txdone = true;
edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
if (IS_ERR(edge->mbox_chan)) {
- if (PTR_ERR(edge->mbox_chan) != -ENODEV)
- return PTR_ERR(edge->mbox_chan);
+ if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
+ ret = PTR_ERR(edge->mbox_chan);
+ goto put_node;
+ }
edge->mbox_chan = NULL;
syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
if (!syscon_np) {
dev_err(dev, "no qcom,ipc node\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto put_node;
}
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
- if (IS_ERR(edge->ipc_regmap))
- return PTR_ERR(edge->ipc_regmap);
+ if (IS_ERR(edge->ipc_regmap)) {
+ ret = PTR_ERR(edge->ipc_regmap);
+ goto put_node;
+ }
key = "qcom,ipc";
ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
if (ret < 0) {
dev_err(dev, "no offset in %s\n", key);
- return -EINVAL;
+ goto put_node;
}
ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
if (ret < 0) {
dev_err(dev, "no bit in %s\n", key);
- return -EINVAL;
+ goto put_node;
}
}
@@ -1385,7 +1390,8 @@ static int qcom_smd_parse_edge(struct device *dev,
irq = irq_of_parse_and_map(node, 0);
if (irq < 0) {
dev_err(dev, "required smd interrupt missing\n");
- return -EINVAL;
+ ret = irq;
+ goto put_node;
}
ret = devm_request_irq(dev, irq,
@@ -1393,12 +1399,18 @@ static int qcom_smd_parse_edge(struct device *dev,
node->name, edge);
if (ret) {
dev_err(dev, "failed to request smd irq\n");
- return ret;
+ goto put_node;
}
edge->irq = irq;
return 0;
+
+put_node:
+ of_node_put(node);
+ edge->of_node = NULL;
+
+ return ret;
}
/*
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index a6361cad608b..91de940896e3 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -81,7 +81,7 @@ EXPORT_SYMBOL(rpmsg_create_ept);
*/
void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
{
- if (ept)
+ if (ept && ept->ops)
ept->ops->destroy_ept(ept);
}
EXPORT_SYMBOL(rpmsg_destroy_ept);
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 9006fc7f73d0..7d7ed4e5cce7 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -123,7 +123,12 @@ enum rpmsg_ns_flags {
};
/**
- * @vrp: the remote processor this channel belongs to
+ * struct virtio_rpmsg_channel - rpmsg channel descriptor
+ * @rpdev: the rpmsg channel device
+ * @vrp: the virtio remote processor device this channel belongs to
+ *
+ * This structure stores the channel that links the rpmsg device to the virtio
+ * remote processor device.
*/
struct virtio_rpmsg_channel {
struct rpmsg_device rpdev;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 48c536acd777..65ad9d0b47ab 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -669,6 +669,16 @@ config RTC_DRV_RV3028
This driver can also be built as a module. If so, the module
will be called rtc-rv3028.
+config RTC_DRV_RV3032
+ tristate "Micro Crystal RV3032"
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Micro Crystal
+ RV3032.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-rv3032.
+
config RTC_DRV_RV8803
tristate "Micro Crystal RV8803, Epson RX8900"
help
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 880e08a409c3..bfb57464118d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -141,6 +141,7 @@ obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
obj-$(CONFIG_RTC_DRV_RTD119X) += rtc-rtd119x.o
obj-$(CONFIG_RTC_DRV_RV3028) += rtc-rv3028.o
obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o
+obj-$(CONFIG_RTC_DRV_RV3032) += rtc-rv3032.o
obj-$(CONFIG_RTC_DRV_RV8803) += rtc-rv8803.o
obj-$(CONFIG_RTC_DRV_RX4581) += rtc-rx4581.o
obj-$(CONFIG_RTC_DRV_RX6110) += rtc-rx6110.o
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index bcc96ab7793f..c633319cdb91 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1006,6 +1006,7 @@ static int cmos_suspend(struct device *dev)
enable_irq_wake(cmos->irq);
}
+ memset(&cmos->saved_wkalrm, 0, sizeof(struct rtc_wkalrm));
cmos_read_alarm(dev, &cmos->saved_wkalrm);
dev_dbg(dev, "suspend%s, ctrl %02x\n",
@@ -1054,6 +1055,7 @@ static void cmos_check_wkalrm(struct device *dev)
return;
}
+ memset(&current_alarm, 0, sizeof(struct rtc_wkalrm));
cmos_read_alarm(dev, &current_alarm);
t_current_expires = rtc_tm_to_time64(&current_alarm.time);
t_saved_expires = rtc_tm_to_time64(&cmos->saved_wkalrm.time);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 54c85cdd019d..9f5f54ca039d 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -122,6 +122,9 @@ enum ds_type {
#define RX8130_REG_FLAG_AF BIT(3)
#define RX8130_REG_CONTROL0 0x1e
#define RX8130_REG_CONTROL0_AIE BIT(3)
+#define RX8130_REG_CONTROL1 0x1f
+#define RX8130_REG_CONTROL1_INIEN BIT(4)
+#define RX8130_REG_CONTROL1_CHGEN BIT(5)
#define MCP794XX_REG_CONTROL 0x07
# define MCP794XX_BIT_ALM0_EN 0x10
@@ -153,6 +156,7 @@ enum ds_type {
#define DS1388_REG_CONTROL 0x0c
# define DS1388_BIT_RST BIT(0)
# define DS1388_BIT_WDE BIT(1)
+# define DS1388_BIT_nEOSC BIT(7)
/* negative offset step is -2.034ppm */
#define M41TXX_NEG_OFFSET_STEP_PPB 2034
@@ -190,6 +194,15 @@ struct chip_desc {
u16 trickle_charger_reg;
u8 (*do_trickle_setup)(struct ds1307 *, u32,
bool);
+ /* Does the RTC require trickle-resistor-ohms to select the value of
+ * the resistor between Vcc and Vbackup?
+ */
+ bool requires_trickle_resistor;
+ /* Some RTC's batteries and supercaps were charged by default, others
+ * allow charging but were not configured previously to do so.
+ * Remember this behavior to stay backwards compatible.
+ */
+ bool charge_default;
};
static const struct chip_desc chips[last_ds_type];
@@ -352,6 +365,10 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG,
DS1340_BIT_OSF, 0);
break;
+ case ds_1388:
+ regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG,
+ DS1388_BIT_OSF, 0);
+ break;
case mcp794xx:
/*
* these bits were cleared when preparing the date/time
@@ -507,6 +524,8 @@ static u8 do_trickle_setup_ds1339(struct ds1307 *ds1307, u32 ohms, bool diode)
u8 setup = (diode) ? DS1307_TRICKLE_CHARGER_DIODE :
DS1307_TRICKLE_CHARGER_NO_DIODE;
+ setup |= DS13XX_TRICKLE_CHARGER_MAGIC;
+
switch (ohms) {
case 250:
setup |= DS1307_TRICKLE_CHARGER_250_OHM;
@@ -525,6 +544,16 @@ static u8 do_trickle_setup_ds1339(struct ds1307 *ds1307, u32 ohms, bool diode)
return setup;
}
+static u8 do_trickle_setup_rx8130(struct ds1307 *ds1307, u32 ohms, bool diode)
+{
+ /* make sure that the backup battery is enabled */
+ u8 setup = RX8130_REG_CONTROL1_INIEN;
+ if (diode)
+ setup |= RX8130_REG_CONTROL1_CHGEN;
+
+ return setup;
+}
+
static irqreturn_t rx8130_irq(int irq, void *dev_id)
{
struct ds1307 *ds1307 = dev_id;
@@ -979,6 +1008,8 @@ static const struct chip_desc chips[last_ds_type] = {
.bbsqi_bit = DS1339_BIT_BBSQI,
.trickle_charger_reg = 0x10,
.do_trickle_setup = &do_trickle_setup_ds1339,
+ .requires_trickle_resistor = true,
+ .charge_default = true,
},
[ds_1340] = {
.century_reg = DS1307_REG_HOUR,
@@ -986,6 +1017,8 @@ static const struct chip_desc chips[last_ds_type] = {
.century_bit = DS1340_BIT_CENTURY,
.do_trickle_setup = &do_trickle_setup_ds1339,
.trickle_charger_reg = 0x08,
+ .requires_trickle_resistor = true,
+ .charge_default = true,
},
[ds_1341] = {
.century_reg = DS1307_REG_MONTH,
@@ -1009,6 +1042,8 @@ static const struct chip_desc chips[last_ds_type] = {
.offset = 0x10,
.irq_handler = rx8130_irq,
.rtc_ops = &rx8130_rtc_ops,
+ .trickle_charger_reg = RX8130_REG_CONTROL1,
+ .do_trickle_setup = &do_trickle_setup_rx8130,
},
[m41t0] = {
.rtc_ops = &m41txx_rtc_ops,
@@ -1293,18 +1328,37 @@ static int ds1307_nvram_write(void *priv, unsigned int offset, void *val,
static u8 ds1307_trickle_init(struct ds1307 *ds1307,
const struct chip_desc *chip)
{
- u32 ohms;
- bool diode = true;
+ u32 ohms, chargeable;
+ bool diode = chip->charge_default;
if (!chip->do_trickle_setup)
return 0;
if (device_property_read_u32(ds1307->dev, "trickle-resistor-ohms",
- &ohms))
+ &ohms) && chip->requires_trickle_resistor)
return 0;
- if (device_property_read_bool(ds1307->dev, "trickle-diode-disable"))
+ /* aux-voltage-chargeable takes precedence over the deprecated
+ * trickle-diode-disable
+ */
+ if (!device_property_read_u32(ds1307->dev, "aux-voltage-chargeable",
+ &chargeable)) {
+ switch (chargeable) {
+ case 0:
+ diode = false;
+ break;
+ case 1:
+ diode = true;
+ break;
+ default:
+ dev_warn(ds1307->dev,
+ "unsupported aux-voltage-chargeable value\n");
+ break;
+ }
+ } else if (device_property_read_bool(ds1307->dev,
+ "trickle-diode-disable")) {
diode = false;
+ }
return chip->do_trickle_setup(ds1307, ohms, diode);
}
@@ -1758,7 +1812,6 @@ static int ds1307_probe(struct i2c_client *client,
trickle_charger_setup = pdata->trickle_charger_setup;
if (trickle_charger_setup && chip->trickle_charger_reg) {
- trickle_charger_setup |= DS13XX_TRICKLE_CHARGER_MAGIC;
dev_dbg(ds1307->dev,
"writing trickle charger info 0x%x to 0x%x\n",
trickle_charger_setup, chip->trickle_charger_reg);
@@ -1881,6 +1934,19 @@ static int ds1307_probe(struct i2c_client *client,
DS1307_REG_HOUR << 4 | 0x08, hour);
}
break;
+ case ds_1388:
+ err = regmap_read(ds1307->regmap, DS1388_REG_CONTROL, &tmp);
+ if (err) {
+ dev_dbg(ds1307->dev, "read error %d\n", err);
+ goto exit;
+ }
+
+ /* oscillator off? turn it on, so clock can tick. */
+ if (tmp & DS1388_BIT_nEOSC) {
+ tmp &= ~DS1388_BIT_nEOSC;
+ regmap_write(ds1307->regmap, DS1388_REG_CONTROL, tmp);
+ }
+ break;
default:
break;
}
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 56c670af2e50..dfbd7b88b2b9 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -193,12 +193,12 @@ ds1685_rtc_begin_data_access(struct ds1685_priv *rtc)
rtc->write(rtc, RTC_CTRL_B,
(rtc->read(rtc, RTC_CTRL_B) | RTC_CTRL_B_SET));
+ /* Switch to Bank 1 */
+ ds1685_rtc_switch_to_bank1(rtc);
+
/* Read Ext Ctrl 4A and check the INCR bit to avoid a lockout. */
while (rtc->read(rtc, RTC_EXT_CTRL_4A) & RTC_CTRL_4A_INCR)
cpu_relax();
-
- /* Switch to Bank 1 */
- ds1685_rtc_switch_to_bank1(rtc);
}
/**
@@ -213,7 +213,7 @@ static inline void
ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
{
/* Switch back to Bank 0 */
- ds1685_rtc_switch_to_bank1(rtc);
+ ds1685_rtc_switch_to_bank0(rtc);
/* Clear the SET bit in Ctrl B */
rtc->write(rtc, RTC_CTRL_B,
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 68f0a1801a2e..48d3b38ea348 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -3,7 +3,7 @@
* Freescale FlexTimer Module (FTM) alarm device driver.
*
* Copyright 2014 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019-2020 NXP
*
*/
@@ -312,7 +312,7 @@ static const struct of_device_id ftm_rtc_match[] = {
};
static const struct acpi_device_id ftm_imx_acpi_ids[] = {
- {"NXP0011",},
+ {"NXP0014",},
{ }
};
MODULE_DEVICE_TABLE(acpi, ftm_imx_acpi_ids);
diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
index 89e5ba0dae69..e6bd0808a092 100644
--- a/drivers/rtc/rtc-meson-vrtc.c
+++ b/drivers/rtc/rtc-meson-vrtc.c
@@ -65,7 +65,6 @@ static const struct rtc_class_ops meson_vrtc_ops = {
static int meson_vrtc_probe(struct platform_device *pdev)
{
struct meson_vrtc_data *vrtc;
- int ret;
vrtc = devm_kzalloc(&pdev->dev, sizeof(*vrtc), GFP_KERNEL);
if (!vrtc)
@@ -84,11 +83,7 @@ static int meson_vrtc_probe(struct platform_device *pdev)
return PTR_ERR(vrtc->rtc);
vrtc->rtc->ops = &meson_vrtc_ops;
- ret = rtc_register_device(vrtc->rtc);
- if (ret)
- return ret;
-
- return 0;
+ return rtc_register_device(vrtc->rtc);
}
static int __maybe_unused meson_vrtc_suspend(struct device *dev)
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index f8b1353777ba..1894aded4c85 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -31,7 +31,8 @@ static int mtk_rtc_write_trigger(struct mt6397_rtc *rtc)
MTK_RTC_POLL_DELAY_US,
MTK_RTC_POLL_TIMEOUT);
if (ret < 0)
- dev_err(rtc->dev, "failed to write WRTGE: %d\n", ret);
+ dev_err(rtc->rtc_dev->dev.parent,
+ "failed to write WRTGR: %d\n", ret);
return ret;
}
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index ed6316992cbb..07a5630ec841 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -559,7 +559,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
pcf2127->rtc->uie_unsupported = 1;
- if (alarm_irq >= 0) {
+ if (alarm_irq > 0) {
ret = devm_request_threaded_irq(dev, alarm_irq, NULL,
pcf2127_rtc_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
@@ -570,7 +570,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
}
}
- if (alarm_irq >= 0 || device_property_read_bool(dev, "wakeup-source")) {
+ if (alarm_irq > 0 || device_property_read_bool(dev, "wakeup-source")) {
device_init_wakeup(dev, true);
pcf2127->rtc->ops = &pcf2127_rtc_alrm_ops;
}
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 84f0d25259ae..7ceb968f0e44 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -75,8 +75,6 @@ static int r9701_get_datetime(struct device *dev, struct rtc_time *dt)
if (ret)
return ret;
- memset(dt, 0, sizeof(*dt));
-
dt->tm_sec = bcd2bin(buf[0]); /* RSECCNT */
dt->tm_min = bcd2bin(buf[1]); /* RMINCNT */
dt->tm_hour = bcd2bin(buf[2]); /* RHRCNT */
@@ -85,20 +83,12 @@ static int r9701_get_datetime(struct device *dev, struct rtc_time *dt)
dt->tm_mon = bcd2bin(buf[4]) - 1; /* RMONCNT */
dt->tm_year = bcd2bin(buf[5]) + 100; /* RYRCNT */
- /* the rtc device may contain illegal values on power up
- * according to the data sheet. make sure they are valid.
- */
-
return 0;
}
static int r9701_set_datetime(struct device *dev, struct rtc_time *dt)
{
- int ret, year;
-
- year = dt->tm_year + 1900;
- if (year >= 2100 || year < 2000)
- return -EINVAL;
+ int ret;
ret = write_reg(dev, RHRCNT, bin2bcd(dt->tm_hour));
ret = ret ? ret : write_reg(dev, RMINCNT, bin2bcd(dt->tm_min));
@@ -106,7 +96,6 @@ static int r9701_set_datetime(struct device *dev, struct rtc_time *dt)
ret = ret ? ret : write_reg(dev, RDAYCNT, bin2bcd(dt->tm_mday));
ret = ret ? ret : write_reg(dev, RMONCNT, bin2bcd(dt->tm_mon + 1));
ret = ret ? ret : write_reg(dev, RYRCNT, bin2bcd(dt->tm_year - 100));
- ret = ret ? ret : write_reg(dev, RWKCNT, 1 << dt->tm_wday);
return ret;
}
@@ -119,7 +108,6 @@ static const struct rtc_class_ops r9701_rtc_ops = {
static int r9701_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
- struct rtc_time dt;
unsigned char tmp;
int res;
@@ -130,35 +118,16 @@ static int r9701_probe(struct spi_device *spi)
return -ENODEV;
}
- /*
- * The device seems to be present. Now check if the registers
- * contain invalid values. If so, try to write a default date:
- * 2000/1/1 00:00:00
- */
- if (r9701_get_datetime(&spi->dev, &dt)) {
- dev_info(&spi->dev, "trying to repair invalid date/time\n");
- dt.tm_sec = 0;
- dt.tm_min = 0;
- dt.tm_hour = 0;
- dt.tm_mday = 1;
- dt.tm_mon = 0;
- dt.tm_year = 100;
-
- if (r9701_set_datetime(&spi->dev, &dt) ||
- r9701_get_datetime(&spi->dev, &dt)) {
- dev_err(&spi->dev, "cannot repair RTC register\n");
- return -ENODEV;
- }
- }
-
- rtc = devm_rtc_device_register(&spi->dev, "r9701",
- &r9701_rtc_ops, THIS_MODULE);
+ rtc = devm_rtc_allocate_device(&spi->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
spi_set_drvdata(spi, rtc);
+ rtc->ops = &r9701_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
- return 0;
+ return rtc_register_device(rtc);
}
static struct spi_driver r9701_driver = {
diff --git a/drivers/rtc/rtc-rs5c313.c b/drivers/rtc/rtc-rs5c313.c
index 89f38e3e917d..e98f85f34206 100644
--- a/drivers/rtc/rtc-rs5c313.c
+++ b/drivers/rtc/rtc-rs5c313.c
@@ -366,15 +366,15 @@ static const struct rtc_class_ops rs5c313_rtc_ops = {
static int rs5c313_rtc_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc = devm_rtc_device_register(&pdev->dev, "rs5c313",
- &rs5c313_rtc_ops, THIS_MODULE);
+ struct rtc_device *rtc;
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
+ rs5c313_init_port();
+ rs5c313_check_xstp_bit();
- platform_set_drvdata(pdev, rtc);
+ rtc = devm_rtc_device_register(&pdev->dev, "rs5c313", &rs5c313_rtc_ops,
+ THIS_MODULE);
- return 0;
+ return PTR_ERR_OR_ZERO(rtc);
}
static struct platform_driver rs5c313_rtc_platform_driver = {
@@ -384,27 +384,7 @@ static struct platform_driver rs5c313_rtc_platform_driver = {
.probe = rs5c313_rtc_probe,
};
-static int __init rs5c313_rtc_init(void)
-{
- int err;
-
- err = platform_driver_register(&rs5c313_rtc_platform_driver);
- if (err)
- return err;
-
- rs5c313_init_port();
- rs5c313_check_xstp_bit();
-
- return 0;
-}
-
-static void __exit rs5c313_rtc_exit(void)
-{
- platform_driver_unregister(&rs5c313_rtc_platform_driver);
-}
-
-module_init(rs5c313_rtc_init);
-module_exit(rs5c313_rtc_exit);
+module_platform_driver(rs5c313_rtc_platform_driver);
MODULE_AUTHOR("kogiidena , Nobuhiro Iwamatsu <iwamatsu@nigauri.org>");
MODULE_DESCRIPTION("Ricoh RS5C313 RTC device driver");
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index ec84db0b3d7a..fa226f0fe67d 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -71,6 +71,7 @@
#define RV3028_EVT_CTRL_TSR BIT(2)
+#define RV3028_EEPROM_CMD_UPDATE 0x11
#define RV3028_EEPROM_CMD_WRITE 0x21
#define RV3028_EEPROM_CMD_READ 0x22
@@ -95,7 +96,7 @@ struct rv3028_data {
#endif
};
-static u16 rv3028_trickle_resistors[] = {1000, 3000, 6000, 11000};
+static u16 rv3028_trickle_resistors[] = {3000, 5000, 9000, 15000};
static ssize_t timestamp0_store(struct device *dev,
struct device_attribute *attr,
@@ -171,6 +172,88 @@ static const struct attribute_group rv3028_attr_group = {
.attrs = rv3028_attrs,
};
+static int rv3028_exit_eerd(struct rv3028_data *rv3028, u32 eerd)
+{
+ if (eerd)
+ return 0;
+
+ return regmap_update_bits(rv3028->regmap, RV3028_CTRL1, RV3028_CTRL1_EERD, 0);
+}
+
+static int rv3028_enter_eerd(struct rv3028_data *rv3028, u32 *eerd)
+{
+ u32 ctrl1, status;
+ int ret;
+
+ ret = regmap_read(rv3028->regmap, RV3028_CTRL1, &ctrl1);
+ if (ret)
+ return ret;
+
+ *eerd = ctrl1 & RV3028_CTRL1_EERD;
+ if (*eerd)
+ return 0;
+
+ ret = regmap_update_bits(rv3028->regmap, RV3028_CTRL1,
+ RV3028_CTRL1_EERD, RV3028_CTRL1_EERD);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(rv3028->regmap, RV3028_STATUS, status,
+ !(status & RV3028_STATUS_EEBUSY),
+ RV3028_EEBUSY_POLL, RV3028_EEBUSY_TIMEOUT);
+ if (ret) {
+ rv3028_exit_eerd(rv3028, *eerd);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rv3028_update_eeprom(struct rv3028_data *rv3028, u32 eerd)
+{
+ u32 status;
+ int ret;
+
+ ret = regmap_write(rv3028->regmap, RV3028_EEPROM_CMD, 0x0);
+ if (ret)
+ goto exit_eerd;
+
+ ret = regmap_write(rv3028->regmap, RV3028_EEPROM_CMD, RV3028_EEPROM_CMD_UPDATE);
+ if (ret)
+ goto exit_eerd;
+
+ usleep_range(63000, RV3028_EEBUSY_TIMEOUT);
+
+ ret = regmap_read_poll_timeout(rv3028->regmap, RV3028_STATUS, status,
+ !(status & RV3028_STATUS_EEBUSY),
+ RV3028_EEBUSY_POLL, RV3028_EEBUSY_TIMEOUT);
+
+exit_eerd:
+ rv3028_exit_eerd(rv3028, eerd);
+
+ return ret;
+}
+
+static int rv3028_update_cfg(struct rv3028_data *rv3028, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ u32 eerd;
+ int ret;
+
+ ret = rv3028_enter_eerd(rv3028, &eerd);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rv3028->regmap, reg, mask, val);
+ if (ret) {
+ rv3028_exit_eerd(rv3028, eerd);
+ return ret;
+ }
+
+ return rv3028_update_eeprom(rv3028, eerd);
+}
+
static irqreturn_t rv3028_handle_irq(int irq, void *dev_id)
{
struct rv3028_data *rv3028 = dev_id;
@@ -404,17 +487,32 @@ static int rv3028_read_offset(struct device *dev, long *offset)
static int rv3028_set_offset(struct device *dev, long offset)
{
struct rv3028_data *rv3028 = dev_get_drvdata(dev);
+ u32 eerd;
int ret;
offset = clamp(offset, -244141L, 243187L) * 1000;
offset = DIV_ROUND_CLOSEST(offset, OFFSET_STEP_PPT);
+ ret = rv3028_enter_eerd(rv3028, &eerd);
+ if (ret)
+ return ret;
+
ret = regmap_write(rv3028->regmap, RV3028_OFFSET, offset >> 1);
if (ret < 0)
- return ret;
+ goto exit_eerd;
+
+ ret = regmap_update_bits(rv3028->regmap, RV3028_BACKUP, BIT(7),
+ offset << 7);
+ if (ret < 0)
+ goto exit_eerd;
+
+ return rv3028_update_eeprom(rv3028, eerd);
+
+exit_eerd:
+ rv3028_exit_eerd(rv3028, eerd);
+
+ return ret;
- return regmap_update_bits(rv3028->regmap, RV3028_BACKUP, BIT(7),
- offset << 7);
}
static int rv3028_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
@@ -451,49 +549,36 @@ static int rv3028_nvram_read(void *priv, unsigned int offset, void *val,
static int rv3028_eeprom_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
- u32 status, ctrl1;
- int i, ret, err;
+ struct rv3028_data *rv3028 = priv;
+ u32 status, eerd;
+ int i, ret;
u8 *buf = val;
- ret = regmap_read(priv, RV3028_CTRL1, &ctrl1);
+ ret = rv3028_enter_eerd(rv3028, &eerd);
if (ret)
return ret;
- if (!(ctrl1 & RV3028_CTRL1_EERD)) {
- ret = regmap_update_bits(priv, RV3028_CTRL1,
- RV3028_CTRL1_EERD, RV3028_CTRL1_EERD);
- if (ret)
- return ret;
-
- ret = regmap_read_poll_timeout(priv, RV3028_STATUS, status,
- !(status & RV3028_STATUS_EEBUSY),
- RV3028_EEBUSY_POLL,
- RV3028_EEBUSY_TIMEOUT);
- if (ret)
- goto restore_eerd;
- }
-
for (i = 0; i < bytes; i++) {
- ret = regmap_write(priv, RV3028_EEPROM_ADDR, offset + i);
+ ret = regmap_write(rv3028->regmap, RV3028_EEPROM_ADDR, offset + i);
if (ret)
goto restore_eerd;
- ret = regmap_write(priv, RV3028_EEPROM_DATA, buf[i]);
+ ret = regmap_write(rv3028->regmap, RV3028_EEPROM_DATA, buf[i]);
if (ret)
goto restore_eerd;
- ret = regmap_write(priv, RV3028_EEPROM_CMD, 0x0);
+ ret = regmap_write(rv3028->regmap, RV3028_EEPROM_CMD, 0x0);
if (ret)
goto restore_eerd;
- ret = regmap_write(priv, RV3028_EEPROM_CMD,
+ ret = regmap_write(rv3028->regmap, RV3028_EEPROM_CMD,
RV3028_EEPROM_CMD_WRITE);
if (ret)
goto restore_eerd;
usleep_range(RV3028_EEBUSY_POLL, RV3028_EEBUSY_TIMEOUT);
- ret = regmap_read_poll_timeout(priv, RV3028_STATUS, status,
+ ret = regmap_read_poll_timeout(rv3028->regmap, RV3028_STATUS, status,
!(status & RV3028_STATUS_EEBUSY),
RV3028_EEBUSY_POLL,
RV3028_EEBUSY_TIMEOUT);
@@ -502,13 +587,7 @@ static int rv3028_eeprom_write(void *priv, unsigned int offset, void *val,
}
restore_eerd:
- if (!(ctrl1 & RV3028_CTRL1_EERD))
- {
- err = regmap_update_bits(priv, RV3028_CTRL1, RV3028_CTRL1_EERD,
- 0);
- if (err && !ret)
- ret = err;
- }
+ rv3028_exit_eerd(rv3028, eerd);
return ret;
}
@@ -516,63 +595,44 @@ restore_eerd:
static int rv3028_eeprom_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
- u32 status, ctrl1, data;
- int i, ret, err;
+ struct rv3028_data *rv3028 = priv;
+ u32 status, eerd, data;
+ int i, ret;
u8 *buf = val;
- ret = regmap_read(priv, RV3028_CTRL1, &ctrl1);
+ ret = rv3028_enter_eerd(rv3028, &eerd);
if (ret)
return ret;
- if (!(ctrl1 & RV3028_CTRL1_EERD)) {
- ret = regmap_update_bits(priv, RV3028_CTRL1,
- RV3028_CTRL1_EERD, RV3028_CTRL1_EERD);
- if (ret)
- return ret;
-
- ret = regmap_read_poll_timeout(priv, RV3028_STATUS, status,
- !(status & RV3028_STATUS_EEBUSY),
- RV3028_EEBUSY_POLL,
- RV3028_EEBUSY_TIMEOUT);
- if (ret)
- goto restore_eerd;
- }
-
for (i = 0; i < bytes; i++) {
- ret = regmap_write(priv, RV3028_EEPROM_ADDR, offset + i);
+ ret = regmap_write(rv3028->regmap, RV3028_EEPROM_ADDR, offset + i);
if (ret)
goto restore_eerd;
- ret = regmap_write(priv, RV3028_EEPROM_CMD, 0x0);
+ ret = regmap_write(rv3028->regmap, RV3028_EEPROM_CMD, 0x0);
if (ret)
goto restore_eerd;
- ret = regmap_write(priv, RV3028_EEPROM_CMD,
+ ret = regmap_write(rv3028->regmap, RV3028_EEPROM_CMD,
RV3028_EEPROM_CMD_READ);
if (ret)
goto restore_eerd;
- ret = regmap_read_poll_timeout(priv, RV3028_STATUS, status,
+ ret = regmap_read_poll_timeout(rv3028->regmap, RV3028_STATUS, status,
!(status & RV3028_STATUS_EEBUSY),
RV3028_EEBUSY_POLL,
RV3028_EEBUSY_TIMEOUT);
if (ret)
goto restore_eerd;
- ret = regmap_read(priv, RV3028_EEPROM_DATA, &data);
+ ret = regmap_read(rv3028->regmap, RV3028_EEPROM_DATA, &data);
if (ret)
goto restore_eerd;
buf[i] = data;
}
restore_eerd:
- if (!(ctrl1 & RV3028_CTRL1_EERD))
- {
- err = regmap_update_bits(priv, RV3028_CTRL1, RV3028_CTRL1_EERD,
- 0);
- if (err && !ret)
- ret = err;
- }
+ rv3028_exit_eerd(rv3028, eerd);
return ret;
}
@@ -619,24 +679,23 @@ static int rv3028_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
int i, ret;
+ u32 enabled;
struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+ ret = regmap_read(rv3028->regmap, RV3028_CLKOUT, &enabled);
+ if (ret < 0)
+ return ret;
+
ret = regmap_write(rv3028->regmap, RV3028_CLKOUT, 0x0);
if (ret < 0)
return ret;
- for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) {
- if (clkout_rates[i] == rate) {
- ret = regmap_update_bits(rv3028->regmap,
- RV3028_CLKOUT,
- RV3028_CLKOUT_FD_MASK, i);
- if (ret < 0)
- return ret;
+ enabled &= RV3028_CLKOUT_CLKOE;
- return regmap_write(rv3028->regmap, RV3028_CLKOUT,
- RV3028_CLKOUT_CLKSY | RV3028_CLKOUT_CLKOE);
- }
- }
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] == rate)
+ return rv3028_update_cfg(rv3028, RV3028_CLKOUT, 0xff,
+ RV3028_CLKOUT_CLKSY | enabled | i);
return -EINVAL;
}
@@ -811,10 +870,8 @@ static int rv3028_probe(struct i2c_client *client)
break;
if (i < ARRAY_SIZE(rv3028_trickle_resistors)) {
- ret = regmap_update_bits(rv3028->regmap, RV3028_BACKUP,
- RV3028_BACKUP_TCE |
- RV3028_BACKUP_TCR_MASK,
- RV3028_BACKUP_TCE | i);
+ ret = rv3028_update_cfg(rv3028, RV3028_BACKUP, RV3028_BACKUP_TCE |
+ RV3028_BACKUP_TCR_MASK, RV3028_BACKUP_TCE | i);
if (ret)
return ret;
} else {
@@ -835,7 +892,7 @@ static int rv3028_probe(struct i2c_client *client)
nvmem_cfg.priv = rv3028->regmap;
rtc_nvmem_register(rv3028->rtc, &nvmem_cfg);
- eeprom_cfg.priv = rv3028->regmap;
+ eeprom_cfg.priv = rv3028;
rtc_nvmem_register(rv3028->rtc, &eeprom_cfg);
rv3028->rtc->max_user_freq = 1;
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
new file mode 100644
index 000000000000..3e67f71f4261
--- /dev/null
+++ b/drivers/rtc/rtc-rv3032.c
@@ -0,0 +1,925 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RTC driver for the Micro Crystal RV3032
+ *
+ * Copyright (C) 2020 Micro Crystal SA
+ *
+ * Alexandre Belloni <alexandre.belloni@bootlin.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/bcd.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define RV3032_SEC 0x01
+#define RV3032_MIN 0x02
+#define RV3032_HOUR 0x03
+#define RV3032_WDAY 0x04
+#define RV3032_DAY 0x05
+#define RV3032_MONTH 0x06
+#define RV3032_YEAR 0x07
+#define RV3032_ALARM_MIN 0x08
+#define RV3032_ALARM_HOUR 0x09
+#define RV3032_ALARM_DAY 0x0A
+#define RV3032_STATUS 0x0D
+#define RV3032_TLSB 0x0E
+#define RV3032_TMSB 0x0F
+#define RV3032_CTRL1 0x10
+#define RV3032_CTRL2 0x11
+#define RV3032_CTRL3 0x12
+#define RV3032_TS_CTRL 0x13
+#define RV3032_CLK_IRQ 0x14
+#define RV3032_EEPROM_ADDR 0x3D
+#define RV3032_EEPROM_DATA 0x3E
+#define RV3032_EEPROM_CMD 0x3F
+#define RV3032_RAM1 0x40
+#define RV3032_PMU 0xC0
+#define RV3032_OFFSET 0xC1
+#define RV3032_CLKOUT1 0xC2
+#define RV3032_CLKOUT2 0xC3
+#define RV3032_TREF0 0xC4
+#define RV3032_TREF1 0xC5
+
+#define RV3032_STATUS_VLF BIT(0)
+#define RV3032_STATUS_PORF BIT(1)
+#define RV3032_STATUS_EVF BIT(2)
+#define RV3032_STATUS_AF BIT(3)
+#define RV3032_STATUS_TF BIT(4)
+#define RV3032_STATUS_UF BIT(5)
+#define RV3032_STATUS_TLF BIT(6)
+#define RV3032_STATUS_THF BIT(7)
+
+#define RV3032_TLSB_CLKF BIT(1)
+#define RV3032_TLSB_EEBUSY BIT(2)
+#define RV3032_TLSB_TEMP GENMASK(7, 4)
+
+#define RV3032_CLKOUT2_HFD_MSK GENMASK(4, 0)
+#define RV3032_CLKOUT2_FD_MSK GENMASK(6, 5)
+#define RV3032_CLKOUT2_OS BIT(7)
+
+#define RV3032_CTRL1_EERD BIT(3)
+#define RV3032_CTRL1_WADA BIT(5)
+
+#define RV3032_CTRL2_STOP BIT(0)
+#define RV3032_CTRL2_EIE BIT(2)
+#define RV3032_CTRL2_AIE BIT(3)
+#define RV3032_CTRL2_TIE BIT(4)
+#define RV3032_CTRL2_UIE BIT(5)
+#define RV3032_CTRL2_CLKIE BIT(6)
+#define RV3032_CTRL2_TSE BIT(7)
+
+#define RV3032_PMU_TCM GENMASK(1, 0)
+#define RV3032_PMU_TCR GENMASK(3, 2)
+#define RV3032_PMU_BSM GENMASK(5, 4)
+#define RV3032_PMU_NCLKE BIT(6)
+
+#define RV3032_PMU_BSM_DSM 1
+#define RV3032_PMU_BSM_LSM 2
+
+#define RV3032_OFFSET_MSK GENMASK(5, 0)
+
+#define RV3032_EVT_CTRL_TSR BIT(2)
+
+#define RV3032_EEPROM_CMD_UPDATE 0x11
+#define RV3032_EEPROM_CMD_WRITE 0x21
+#define RV3032_EEPROM_CMD_READ 0x22
+
+#define RV3032_EEPROM_USER 0xCB
+
+#define RV3032_EEBUSY_POLL 10000
+#define RV3032_EEBUSY_TIMEOUT 100000
+
+#define OFFSET_STEP_PPT 238419
+
+struct rv3032_data {
+ struct regmap *regmap;
+ struct rtc_device *rtc;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
+};
+
+static u16 rv3032_trickle_resistors[] = {1000, 2000, 7000, 11000};
+static u16 rv3032_trickle_voltages[] = {0, 1750, 3000, 4400};
+
+static int rv3032_exit_eerd(struct rv3032_data *rv3032, u32 eerd)
+{
+ if (eerd)
+ return 0;
+
+ return regmap_update_bits(rv3032->regmap, RV3032_CTRL1, RV3032_CTRL1_EERD, 0);
+}
+
+static int rv3032_enter_eerd(struct rv3032_data *rv3032, u32 *eerd)
+{
+ u32 ctrl1, status;
+ int ret;
+
+ ret = regmap_read(rv3032->regmap, RV3032_CTRL1, &ctrl1);
+ if (ret)
+ return ret;
+
+ *eerd = ctrl1 & RV3032_CTRL1_EERD;
+ if (*eerd)
+ return 0;
+
+ ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL1,
+ RV3032_CTRL1_EERD, RV3032_CTRL1_EERD);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(rv3032->regmap, RV3032_TLSB, status,
+ !(status & RV3032_TLSB_EEBUSY),
+ RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT);
+ if (ret) {
+ rv3032_exit_eerd(rv3032, *eerd);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rv3032_update_cfg(struct rv3032_data *rv3032, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ u32 status, eerd;
+ int ret;
+
+ ret = rv3032_enter_eerd(rv3032, &eerd);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rv3032->regmap, reg, mask, val);
+ if (ret)
+ goto exit_eerd;
+
+ ret = regmap_write(rv3032->regmap, RV3032_EEPROM_CMD, RV3032_EEPROM_CMD_UPDATE);
+ if (ret)
+ goto exit_eerd;
+
+ usleep_range(46000, RV3032_EEBUSY_TIMEOUT);
+
+ ret = regmap_read_poll_timeout(rv3032->regmap, RV3032_TLSB, status,
+ !(status & RV3032_TLSB_EEBUSY),
+ RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT);
+
+exit_eerd:
+ rv3032_exit_eerd(rv3032, eerd);
+
+ return ret;
+}
+
+static irqreturn_t rv3032_handle_irq(int irq, void *dev_id)
+{
+ struct rv3032_data *rv3032 = dev_id;
+ unsigned long events = 0;
+ u32 status = 0, ctrl = 0;
+
+ if (regmap_read(rv3032->regmap, RV3032_STATUS, &status) < 0 ||
+ status == 0) {
+ return IRQ_NONE;
+ }
+
+ if (status & RV3032_STATUS_TF) {
+ status |= RV3032_STATUS_TF;
+ ctrl |= RV3032_CTRL2_TIE;
+ events |= RTC_PF;
+ }
+
+ if (status & RV3032_STATUS_AF) {
+ status |= RV3032_STATUS_AF;
+ ctrl |= RV3032_CTRL2_AIE;
+ events |= RTC_AF;
+ }
+
+ if (status & RV3032_STATUS_UF) {
+ status |= RV3032_STATUS_UF;
+ ctrl |= RV3032_CTRL2_UIE;
+ events |= RTC_UF;
+ }
+
+ if (events) {
+ rtc_update_irq(rv3032->rtc, 1, events);
+ regmap_update_bits(rv3032->regmap, RV3032_STATUS, status, 0);
+ regmap_update_bits(rv3032->regmap, RV3032_CTRL2, ctrl, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rv3032_get_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+ u8 date[7];
+ int ret, status;
+
+ ret = regmap_read(rv3032->regmap, RV3032_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ if (status & (RV3032_STATUS_PORF | RV3032_STATUS_VLF))
+ return -EINVAL;
+
+ ret = regmap_bulk_read(rv3032->regmap, RV3032_SEC, date, sizeof(date));
+ if (ret)
+ return ret;
+
+ tm->tm_sec = bcd2bin(date[0] & 0x7f);
+ tm->tm_min = bcd2bin(date[1] & 0x7f);
+ tm->tm_hour = bcd2bin(date[2] & 0x3f);
+ tm->tm_wday = date[3] & 0x7;
+ tm->tm_mday = bcd2bin(date[4] & 0x3f);
+ tm->tm_mon = bcd2bin(date[5] & 0x1f) - 1;
+ tm->tm_year = bcd2bin(date[6]) + 100;
+
+ return 0;
+}
+
+static int rv3032_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+ u8 date[7];
+ int ret;
+
+ date[0] = bin2bcd(tm->tm_sec);
+ date[1] = bin2bcd(tm->tm_min);
+ date[2] = bin2bcd(tm->tm_hour);
+ date[3] = tm->tm_wday;
+ date[4] = bin2bcd(tm->tm_mday);
+ date[5] = bin2bcd(tm->tm_mon + 1);
+ date[6] = bin2bcd(tm->tm_year - 100);
+
+ ret = regmap_bulk_write(rv3032->regmap, RV3032_SEC, date,
+ sizeof(date));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rv3032->regmap, RV3032_STATUS,
+ RV3032_STATUS_PORF | RV3032_STATUS_VLF, 0);
+
+ return ret;
+}
+
+static int rv3032_get_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+ u8 alarmvals[3];
+ int status, ctrl, ret;
+
+ ret = regmap_bulk_read(rv3032->regmap, RV3032_ALARM_MIN, alarmvals,
+ sizeof(alarmvals));
+ if (ret)
+ return ret;
+
+ ret = regmap_read(rv3032->regmap, RV3032_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(rv3032->regmap, RV3032_CTRL2, &ctrl);
+ if (ret < 0)
+ return ret;
+
+ alrm->time.tm_sec = 0;
+ alrm->time.tm_min = bcd2bin(alarmvals[0] & 0x7f);
+ alrm->time.tm_hour = bcd2bin(alarmvals[1] & 0x3f);
+ alrm->time.tm_mday = bcd2bin(alarmvals[2] & 0x3f);
+
+ alrm->enabled = !!(ctrl & RV3032_CTRL2_AIE);
+ alrm->pending = (status & RV3032_STATUS_AF) && alrm->enabled;
+
+ return 0;
+}
+
+static int rv3032_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+ u8 alarmvals[3];
+ u8 ctrl = 0;
+ int ret;
+
+ /* The alarm has no seconds, round up to nearest minute */
+ if (alrm->time.tm_sec) {
+ time64_t alarm_time = rtc_tm_to_time64(&alrm->time);
+
+ alarm_time += 60 - alrm->time.tm_sec;
+ rtc_time64_to_tm(alarm_time, &alrm->time);
+ }
+
+ ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL2,
+ RV3032_CTRL2_AIE | RV3032_CTRL2_UIE, 0);
+ if (ret)
+ return ret;
+
+ alarmvals[0] = bin2bcd(alrm->time.tm_min);
+ alarmvals[1] = bin2bcd(alrm->time.tm_hour);
+ alarmvals[2] = bin2bcd(alrm->time.tm_mday);
+
+ ret = regmap_update_bits(rv3032->regmap, RV3032_STATUS,
+ RV3032_STATUS_AF, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(rv3032->regmap, RV3032_ALARM_MIN, alarmvals,
+ sizeof(alarmvals));
+ if (ret)
+ return ret;
+
+ if (alrm->enabled) {
+ if (rv3032->rtc->uie_rtctimer.enabled)
+ ctrl |= RV3032_CTRL2_UIE;
+ if (rv3032->rtc->aie_timer.enabled)
+ ctrl |= RV3032_CTRL2_AIE;
+ }
+
+ ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL2,
+ RV3032_CTRL2_UIE | RV3032_CTRL2_AIE, ctrl);
+
+ return ret;
+}
+
+static int rv3032_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+ int ctrl = 0, ret;
+
+ if (enabled) {
+ if (rv3032->rtc->uie_rtctimer.enabled)
+ ctrl |= RV3032_CTRL2_UIE;
+ if (rv3032->rtc->aie_timer.enabled)
+ ctrl |= RV3032_CTRL2_AIE;
+ }
+
+ ret = regmap_update_bits(rv3032->regmap, RV3032_STATUS,
+ RV3032_STATUS_AF | RV3032_STATUS_UF, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL2,
+ RV3032_CTRL2_UIE | RV3032_CTRL2_AIE, ctrl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rv3032_read_offset(struct device *dev, long *offset)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+ int ret, value, steps;
+
+ ret = regmap_read(rv3032->regmap, RV3032_OFFSET, &value);
+ if (ret < 0)
+ return ret;
+
+ steps = sign_extend32(FIELD_GET(RV3032_OFFSET_MSK, value), 5);
+
+ *offset = DIV_ROUND_CLOSEST(steps * OFFSET_STEP_PPT, 1000);
+
+ return 0;
+}
+
+static int rv3032_set_offset(struct device *dev, long offset)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+
+ offset = clamp(offset, -7629L, 7391L) * 1000;
+ offset = DIV_ROUND_CLOSEST(offset, OFFSET_STEP_PPT);
+
+ return rv3032_update_cfg(rv3032, RV3032_OFFSET, RV3032_OFFSET_MSK,
+ FIELD_PREP(RV3032_OFFSET_MSK, offset));
+}
+
+static int rv3032_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+ int status, val = 0, ret = 0;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ ret = regmap_read(rv3032->regmap, RV3032_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ if (status & (RV3032_STATUS_PORF | RV3032_STATUS_VLF))
+ val = RTC_VL_DATA_INVALID;
+ return put_user(val, (unsigned int __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static int rv3032_nvram_write(void *priv, unsigned int offset, void *val, size_t bytes)
+{
+ return regmap_bulk_write(priv, RV3032_RAM1 + offset, val, bytes);
+}
+
+static int rv3032_nvram_read(void *priv, unsigned int offset, void *val, size_t bytes)
+{
+ return regmap_bulk_read(priv, RV3032_RAM1 + offset, val, bytes);
+}
+
+static int rv3032_eeprom_write(void *priv, unsigned int offset, void *val, size_t bytes)
+{
+ struct rv3032_data *rv3032 = priv;
+ u32 status, eerd;
+ int i, ret;
+ u8 *buf = val;
+
+ ret = rv3032_enter_eerd(rv3032, &eerd);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < bytes; i++) {
+ ret = regmap_write(rv3032->regmap, RV3032_EEPROM_ADDR,
+ RV3032_EEPROM_USER + offset + i);
+ if (ret)
+ goto exit_eerd;
+
+ ret = regmap_write(rv3032->regmap, RV3032_EEPROM_DATA, buf[i]);
+ if (ret)
+ goto exit_eerd;
+
+ ret = regmap_write(rv3032->regmap, RV3032_EEPROM_CMD,
+ RV3032_EEPROM_CMD_WRITE);
+ if (ret)
+ goto exit_eerd;
+
+ usleep_range(RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT);
+
+ ret = regmap_read_poll_timeout(rv3032->regmap, RV3032_TLSB, status,
+ !(status & RV3032_TLSB_EEBUSY),
+ RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT);
+ if (ret)
+ goto exit_eerd;
+ }
+
+exit_eerd:
+ rv3032_exit_eerd(rv3032, eerd);
+
+ return ret;
+}
+
+static int rv3032_eeprom_read(void *priv, unsigned int offset, void *val, size_t bytes)
+{
+ struct rv3032_data *rv3032 = priv;
+ u32 status, eerd, data;
+ int i, ret;
+ u8 *buf = val;
+
+ ret = rv3032_enter_eerd(rv3032, &eerd);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < bytes; i++) {
+ ret = regmap_write(rv3032->regmap, RV3032_EEPROM_ADDR,
+ RV3032_EEPROM_USER + offset + i);
+ if (ret)
+ goto exit_eerd;
+
+ ret = regmap_write(rv3032->regmap, RV3032_EEPROM_CMD,
+ RV3032_EEPROM_CMD_READ);
+ if (ret)
+ goto exit_eerd;
+
+ ret = regmap_read_poll_timeout(rv3032->regmap, RV3032_TLSB, status,
+ !(status & RV3032_TLSB_EEBUSY),
+ RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT);
+ if (ret)
+ goto exit_eerd;
+
+ ret = regmap_read(rv3032->regmap, RV3032_EEPROM_DATA, &data);
+ if (ret)
+ goto exit_eerd;
+ buf[i] = data;
+ }
+
+exit_eerd:
+ rv3032_exit_eerd(rv3032, eerd);
+
+ return ret;
+}
+
+static int rv3032_trickle_charger_setup(struct device *dev, struct rv3032_data *rv3032)
+{
+ u32 val, ohms, voltage;
+ int i;
+
+ val = FIELD_PREP(RV3032_PMU_TCM, 1) | FIELD_PREP(RV3032_PMU_BSM, RV3032_PMU_BSM_DSM);
+ if (!device_property_read_u32(dev, "trickle-voltage-millivolt", &voltage)) {
+ for (i = 0; i < ARRAY_SIZE(rv3032_trickle_voltages); i++)
+ if (voltage == rv3032_trickle_voltages[i])
+ break;
+ if (i < ARRAY_SIZE(rv3032_trickle_voltages))
+ val = FIELD_PREP(RV3032_PMU_TCM, i) |
+ FIELD_PREP(RV3032_PMU_BSM, RV3032_PMU_BSM_LSM);
+ }
+
+ if (device_property_read_u32(dev, "trickle-resistor-ohms", &ohms))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(rv3032_trickle_resistors); i++)
+ if (ohms == rv3032_trickle_resistors[i])
+ break;
+
+ if (i >= ARRAY_SIZE(rv3032_trickle_resistors)) {
+ dev_warn(dev, "invalid trickle resistor value\n");
+
+ return 0;
+ }
+
+ return rv3032_update_cfg(rv3032, RV3032_PMU,
+ RV3032_PMU_TCR | RV3032_PMU_TCM | RV3032_PMU_BSM,
+ val | FIELD_PREP(RV3032_PMU_TCR, i));
+}
+
+#ifdef CONFIG_COMMON_CLK
+#define clkout_hw_to_rv3032(hw) container_of(hw, struct rv3032_data, clkout_hw)
+
+static int clkout_xtal_rates[] = {
+ 32768,
+ 1024,
+ 64,
+ 1,
+};
+
+#define RV3032_HFD_STEP 8192
+
+static unsigned long rv3032_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ int clkout, ret;
+ struct rv3032_data *rv3032 = clkout_hw_to_rv3032(hw);
+
+ ret = regmap_read(rv3032->regmap, RV3032_CLKOUT2, &clkout);
+ if (ret < 0)
+ return 0;
+
+ if (clkout & RV3032_CLKOUT2_OS) {
+ unsigned long rate = FIELD_GET(RV3032_CLKOUT2_HFD_MSK, clkout) << 8;
+
+ ret = regmap_read(rv3032->regmap, RV3032_CLKOUT1, &clkout);
+ if (ret < 0)
+ return 0;
+
+ rate += clkout + 1;
+
+ return rate * RV3032_HFD_STEP;
+ }
+
+ return clkout_xtal_rates[FIELD_GET(RV3032_CLKOUT2_FD_MSK, clkout)];
+}
+
+static long rv3032_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i, hfd;
+
+ if (rate < RV3032_HFD_STEP)
+ for (i = 0; i < ARRAY_SIZE(clkout_xtal_rates); i++)
+ if (clkout_xtal_rates[i] <= rate)
+ return clkout_xtal_rates[i];
+
+ hfd = DIV_ROUND_CLOSEST(rate, RV3032_HFD_STEP);
+
+ return RV3032_HFD_STEP * clamp(hfd, 0, 8192);
+}
+
+static int rv3032_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rv3032_data *rv3032 = clkout_hw_to_rv3032(hw);
+ u32 status, eerd;
+ int i, hfd, ret;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_xtal_rates); i++) {
+ if (clkout_xtal_rates[i] == rate) {
+ return rv3032_update_cfg(rv3032, RV3032_CLKOUT2, 0xff,
+ FIELD_PREP(RV3032_CLKOUT2_FD_MSK, i));
+ }
+ }
+
+ hfd = DIV_ROUND_CLOSEST(rate, RV3032_HFD_STEP);
+ hfd = clamp(hfd, 1, 8192) - 1;
+
+ ret = rv3032_enter_eerd(rv3032, &eerd);
+ if (ret)
+ goto exit_eerd;
+
+ ret = regmap_write(rv3032->regmap, RV3032_CLKOUT1, hfd & 0xff);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rv3032->regmap, RV3032_CLKOUT2, RV3032_CLKOUT2_OS |
+ FIELD_PREP(RV3032_CLKOUT2_HFD_MSK, hfd >> 8));
+ if (ret)
+ goto exit_eerd;
+
+ ret = regmap_write(rv3032->regmap, RV3032_EEPROM_CMD, RV3032_EEPROM_CMD_UPDATE);
+ if (ret)
+ goto exit_eerd;
+
+ usleep_range(46000, RV3032_EEBUSY_TIMEOUT);
+
+ ret = regmap_read_poll_timeout(rv3032->regmap, RV3032_TLSB, status,
+ !(status & RV3032_TLSB_EEBUSY),
+ RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT);
+
+exit_eerd:
+ rv3032_exit_eerd(rv3032, eerd);
+
+ return ret;
+}
+
+static int rv3032_clkout_prepare(struct clk_hw *hw)
+{
+ struct rv3032_data *rv3032 = clkout_hw_to_rv3032(hw);
+
+ return rv3032_update_cfg(rv3032, RV3032_PMU, RV3032_PMU_NCLKE, 0);
+}
+
+static void rv3032_clkout_unprepare(struct clk_hw *hw)
+{
+ struct rv3032_data *rv3032 = clkout_hw_to_rv3032(hw);
+
+ rv3032_update_cfg(rv3032, RV3032_PMU, RV3032_PMU_NCLKE, RV3032_PMU_NCLKE);
+}
+
+static int rv3032_clkout_is_prepared(struct clk_hw *hw)
+{
+ int val, ret;
+ struct rv3032_data *rv3032 = clkout_hw_to_rv3032(hw);
+
+ ret = regmap_read(rv3032->regmap, RV3032_PMU, &val);
+ if (ret < 0)
+ return ret;
+
+ return !(val & RV3032_PMU_NCLKE);
+}
+
+static const struct clk_ops rv3032_clkout_ops = {
+ .prepare = rv3032_clkout_prepare,
+ .unprepare = rv3032_clkout_unprepare,
+ .is_prepared = rv3032_clkout_is_prepared,
+ .recalc_rate = rv3032_clkout_recalc_rate,
+ .round_rate = rv3032_clkout_round_rate,
+ .set_rate = rv3032_clkout_set_rate,
+};
+
+static int rv3032_clkout_register_clk(struct rv3032_data *rv3032,
+ struct i2c_client *client)
+{
+ int ret;
+ struct clk *clk;
+ struct clk_init_data init;
+ struct device_node *node = client->dev.of_node;
+
+ ret = regmap_update_bits(rv3032->regmap, RV3032_TLSB, RV3032_TLSB_CLKF, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL2, RV3032_CTRL2_CLKIE, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(rv3032->regmap, RV3032_CLK_IRQ, 0);
+ if (ret < 0)
+ return ret;
+
+ init.name = "rv3032-clkout";
+ init.ops = &rv3032_clkout_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ rv3032->clkout_hw.init = &init;
+
+ of_property_read_string(node, "clock-output-names", &init.name);
+
+ clk = devm_clk_register(&client->dev, &rv3032->clkout_hw);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return 0;
+}
+#endif
+
+static int rv3032_hwmon_read_temp(struct device *dev, long *mC)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+ u8 buf[2];
+ int temp, prev = 0;
+ int ret;
+
+ ret = regmap_bulk_read(rv3032->regmap, RV3032_TLSB, buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ temp = sign_extend32(buf[1], 7) << 4;
+ temp |= FIELD_GET(RV3032_TLSB_TEMP, buf[0]);
+
+ /* No blocking or shadowing on RV3032_TLSB and RV3032_TMSB */
+ do {
+ prev = temp;
+
+ ret = regmap_bulk_read(rv3032->regmap, RV3032_TLSB, buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ temp = sign_extend32(buf[1], 7) << 4;
+ temp |= FIELD_GET(RV3032_TLSB_TEMP, buf[0]);
+ } while (temp != prev);
+
+ *mC = (temp * 1000) / 16;
+
+ return 0;
+}
+
+static umode_t rv3032_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int rv3032_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = rv3032_hwmon_read_temp(dev, temp);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static const struct hwmon_channel_info *rv3032_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
+ NULL
+};
+
+static const struct hwmon_ops rv3032_hwmon_hwmon_ops = {
+ .is_visible = rv3032_hwmon_is_visible,
+ .read = rv3032_hwmon_read,
+};
+
+static const struct hwmon_chip_info rv3032_hwmon_chip_info = {
+ .ops = &rv3032_hwmon_hwmon_ops,
+ .info = rv3032_hwmon_info,
+};
+
+static void rv3032_hwmon_register(struct device *dev)
+{
+ struct rv3032_data *rv3032 = dev_get_drvdata(dev);
+
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return;
+
+ devm_hwmon_device_register_with_info(dev, "rv3032", rv3032, &rv3032_hwmon_chip_info, NULL);
+}
+
+static struct rtc_class_ops rv3032_rtc_ops = {
+ .read_time = rv3032_get_time,
+ .set_time = rv3032_set_time,
+ .read_offset = rv3032_read_offset,
+ .set_offset = rv3032_set_offset,
+ .ioctl = rv3032_ioctl,
+};
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xCA,
+};
+
+static int rv3032_probe(struct i2c_client *client)
+{
+ struct rv3032_data *rv3032;
+ int ret, status;
+ struct nvmem_config nvmem_cfg = {
+ .name = "rv3032_nvram",
+ .word_size = 1,
+ .stride = 1,
+ .size = 16,
+ .type = NVMEM_TYPE_BATTERY_BACKED,
+ .reg_read = rv3032_nvram_read,
+ .reg_write = rv3032_nvram_write,
+ };
+ struct nvmem_config eeprom_cfg = {
+ .name = "rv3032_eeprom",
+ .word_size = 1,
+ .stride = 1,
+ .size = 32,
+ .type = NVMEM_TYPE_EEPROM,
+ .reg_read = rv3032_eeprom_read,
+ .reg_write = rv3032_eeprom_write,
+ };
+
+ rv3032 = devm_kzalloc(&client->dev, sizeof(struct rv3032_data),
+ GFP_KERNEL);
+ if (!rv3032)
+ return -ENOMEM;
+
+ rv3032->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(rv3032->regmap))
+ return PTR_ERR(rv3032->regmap);
+
+ i2c_set_clientdata(client, rv3032);
+
+ ret = regmap_read(rv3032->regmap, RV3032_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ rv3032->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rv3032->rtc))
+ return PTR_ERR(rv3032->rtc);
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, rv3032_handle_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "rv3032", rv3032);
+ if (ret) {
+ dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
+ client->irq = 0;
+ } else {
+ rv3032_rtc_ops.read_alarm = rv3032_get_alarm;
+ rv3032_rtc_ops.set_alarm = rv3032_set_alarm;
+ rv3032_rtc_ops.alarm_irq_enable = rv3032_alarm_irq_enable;
+ }
+ }
+
+ ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL1,
+ RV3032_CTRL1_WADA, RV3032_CTRL1_WADA);
+ if (ret)
+ return ret;
+
+ rv3032_trickle_charger_setup(&client->dev, rv3032);
+
+ rv3032->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rv3032->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rv3032->rtc->ops = &rv3032_rtc_ops;
+ ret = rtc_register_device(rv3032->rtc);
+ if (ret)
+ return ret;
+
+ nvmem_cfg.priv = rv3032;
+ rtc_nvmem_register(rv3032->rtc, &nvmem_cfg);
+ eeprom_cfg.priv = rv3032;
+ rtc_nvmem_register(rv3032->rtc, &eeprom_cfg);
+
+ rv3032->rtc->max_user_freq = 1;
+
+#ifdef CONFIG_COMMON_CLK
+ rv3032_clkout_register_clk(rv3032, client);
+#endif
+
+ rv3032_hwmon_register(&client->dev);
+
+ return 0;
+}
+
+static const struct of_device_id rv3032_of_match[] = {
+ { .compatible = "microcrystal,rv3032", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rv3032_of_match);
+
+static struct i2c_driver rv3032_driver = {
+ .driver = {
+ .name = "rtc-rv3032",
+ .of_match_table = of_match_ptr(rv3032_of_match),
+ },
+ .probe_new = rv3032_probe,
+};
+module_i2c_driver(rv3032_driver);
+
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@bootlin.com>");
+MODULE_DESCRIPTION("Micro Crystal RV3032 RTC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 93c3a6b627bd..c6d8e3425688 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -454,13 +454,7 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
static int rv8803_nvram_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
- int ret;
-
- ret = rv8803_write_reg(priv, RV8803_RAM, *(u8 *)val);
- if (ret)
- return ret;
-
- return 0;
+ return rv8803_write_reg(priv, RV8803_RAM, *(u8 *)val);
}
static int rv8803_nvram_read(void *priv, unsigned int offset,
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index fe010151ec8f..dca41a2a39b2 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -11,42 +11,43 @@
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/regmap.h>
#include <linux/rtc.h>
-#define RX8010_SEC 0x10
-#define RX8010_MIN 0x11
-#define RX8010_HOUR 0x12
-#define RX8010_WDAY 0x13
-#define RX8010_MDAY 0x14
-#define RX8010_MONTH 0x15
-#define RX8010_YEAR 0x16
-#define RX8010_RESV17 0x17
-#define RX8010_ALMIN 0x18
-#define RX8010_ALHOUR 0x19
-#define RX8010_ALWDAY 0x1A
-#define RX8010_TCOUNT0 0x1B
-#define RX8010_TCOUNT1 0x1C
-#define RX8010_EXT 0x1D
-#define RX8010_FLAG 0x1E
-#define RX8010_CTRL 0x1F
+#define RX8010_SEC 0x10
+#define RX8010_MIN 0x11
+#define RX8010_HOUR 0x12
+#define RX8010_WDAY 0x13
+#define RX8010_MDAY 0x14
+#define RX8010_MONTH 0x15
+#define RX8010_YEAR 0x16
+#define RX8010_RESV17 0x17
+#define RX8010_ALMIN 0x18
+#define RX8010_ALHOUR 0x19
+#define RX8010_ALWDAY 0x1A
+#define RX8010_TCOUNT0 0x1B
+#define RX8010_TCOUNT1 0x1C
+#define RX8010_EXT 0x1D
+#define RX8010_FLAG 0x1E
+#define RX8010_CTRL 0x1F
/* 0x20 to 0x2F are user registers */
-#define RX8010_RESV30 0x30
-#define RX8010_RESV31 0x31
-#define RX8010_IRQ 0x32
+#define RX8010_RESV30 0x30
+#define RX8010_RESV31 0x31
+#define RX8010_IRQ 0x32
-#define RX8010_EXT_WADA BIT(3)
+#define RX8010_EXT_WADA BIT(3)
-#define RX8010_FLAG_VLF BIT(1)
-#define RX8010_FLAG_AF BIT(3)
-#define RX8010_FLAG_TF BIT(4)
-#define RX8010_FLAG_UF BIT(5)
+#define RX8010_FLAG_VLF BIT(1)
+#define RX8010_FLAG_AF BIT(3)
+#define RX8010_FLAG_TF BIT(4)
+#define RX8010_FLAG_UF BIT(5)
-#define RX8010_CTRL_AIE BIT(3)
-#define RX8010_CTRL_UIE BIT(5)
-#define RX8010_CTRL_STOP BIT(6)
-#define RX8010_CTRL_TEST BIT(7)
+#define RX8010_CTRL_AIE BIT(3)
+#define RX8010_CTRL_UIE BIT(5)
+#define RX8010_CTRL_STOP BIT(6)
+#define RX8010_CTRL_TEST BIT(7)
-#define RX8010_ALARM_AE BIT(7)
+#define RX8010_ALARM_AE BIT(7)
static const struct i2c_device_id rx8010_id[] = {
{ "rx8010", 0 },
@@ -61,7 +62,7 @@ static const struct of_device_id rx8010_of_match[] = {
MODULE_DEVICE_TABLE(of, rx8010_of_match);
struct rx8010_data {
- struct i2c_client *client;
+ struct regmap *regs;
struct rtc_device *rtc;
u8 ctrlreg;
};
@@ -70,13 +71,12 @@ static irqreturn_t rx8010_irq_1_handler(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct rx8010_data *rx8010 = i2c_get_clientdata(client);
- int flagreg;
+ int flagreg, err;
mutex_lock(&rx8010->rtc->ops_lock);
- flagreg = i2c_smbus_read_byte_data(client, RX8010_FLAG);
-
- if (flagreg <= 0) {
+ err = regmap_read(rx8010->regs, RX8010_FLAG, &flagreg);
+ if (err) {
mutex_unlock(&rx8010->rtc->ops_lock);
return IRQ_NONE;
}
@@ -99,32 +99,29 @@ static irqreturn_t rx8010_irq_1_handler(int irq, void *dev_id)
rtc_update_irq(rx8010->rtc, 1, RTC_UF | RTC_IRQF);
}
- i2c_smbus_write_byte_data(client, RX8010_FLAG, flagreg);
-
+ err = regmap_write(rx8010->regs, RX8010_FLAG, flagreg);
mutex_unlock(&rx8010->rtc->ops_lock);
- return IRQ_HANDLED;
+ return err ? IRQ_NONE : IRQ_HANDLED;
}
static int rx8010_get_time(struct device *dev, struct rtc_time *dt)
{
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
- u8 date[7];
- int flagreg;
- int err;
+ u8 date[RX8010_YEAR - RX8010_SEC + 1];
+ int flagreg, err;
- flagreg = i2c_smbus_read_byte_data(rx8010->client, RX8010_FLAG);
- if (flagreg < 0)
- return flagreg;
+ err = regmap_read(rx8010->regs, RX8010_FLAG, &flagreg);
+ if (err)
+ return err;
if (flagreg & RX8010_FLAG_VLF) {
dev_warn(dev, "Frequency stop detected\n");
return -EINVAL;
}
- err = i2c_smbus_read_i2c_block_data(rx8010->client, RX8010_SEC,
- 7, date);
- if (err != 7)
- return err < 0 ? err : -EIO;
+ err = regmap_bulk_read(rx8010->regs, RX8010_SEC, date, sizeof(date));
+ if (err)
+ return err;
dt->tm_sec = bcd2bin(date[RX8010_SEC - RX8010_SEC] & 0x7f);
dt->tm_min = bcd2bin(date[RX8010_MIN - RX8010_SEC] & 0x7f);
@@ -140,22 +137,13 @@ static int rx8010_get_time(struct device *dev, struct rtc_time *dt)
static int rx8010_set_time(struct device *dev, struct rtc_time *dt)
{
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
- u8 date[7];
- int ctrl, flagreg;
- int ret;
-
- if ((dt->tm_year < 100) || (dt->tm_year > 199))
- return -EINVAL;
+ u8 date[RX8010_YEAR - RX8010_SEC + 1];
+ int err;
/* set STOP bit before changing clock/calendar */
- ctrl = i2c_smbus_read_byte_data(rx8010->client, RX8010_CTRL);
- if (ctrl < 0)
- return ctrl;
- rx8010->ctrlreg = ctrl | RX8010_CTRL_STOP;
- ret = i2c_smbus_write_byte_data(rx8010->client, RX8010_CTRL,
- rx8010->ctrlreg);
- if (ret < 0)
- return ret;
+ err = regmap_set_bits(rx8010->regs, RX8010_CTRL, RX8010_CTRL_STOP);
+ if (err)
+ return err;
date[RX8010_SEC - RX8010_SEC] = bin2bcd(dt->tm_sec);
date[RX8010_MIN - RX8010_SEC] = bin2bcd(dt->tm_min);
@@ -165,66 +153,54 @@ static int rx8010_set_time(struct device *dev, struct rtc_time *dt)
date[RX8010_YEAR - RX8010_SEC] = bin2bcd(dt->tm_year - 100);
date[RX8010_WDAY - RX8010_SEC] = bin2bcd(1 << dt->tm_wday);
- ret = i2c_smbus_write_i2c_block_data(rx8010->client,
- RX8010_SEC, 7, date);
- if (ret < 0)
- return ret;
+ err = regmap_bulk_write(rx8010->regs, RX8010_SEC, date, sizeof(date));
+ if (err)
+ return err;
/* clear STOP bit after changing clock/calendar */
- ctrl = i2c_smbus_read_byte_data(rx8010->client, RX8010_CTRL);
- if (ctrl < 0)
- return ctrl;
- rx8010->ctrlreg = ctrl & ~RX8010_CTRL_STOP;
- ret = i2c_smbus_write_byte_data(rx8010->client, RX8010_CTRL,
- rx8010->ctrlreg);
- if (ret < 0)
- return ret;
-
- flagreg = i2c_smbus_read_byte_data(rx8010->client, RX8010_FLAG);
- if (flagreg < 0) {
- return flagreg;
- }
+ err = regmap_clear_bits(rx8010->regs, RX8010_CTRL, RX8010_CTRL_STOP);
+ if (err)
+ return err;
- if (flagreg & RX8010_FLAG_VLF)
- ret = i2c_smbus_write_byte_data(rx8010->client, RX8010_FLAG,
- flagreg & ~RX8010_FLAG_VLF);
+ err = regmap_clear_bits(rx8010->regs, RX8010_FLAG, RX8010_FLAG_VLF);
+ if (err)
+ return err;
return 0;
}
-static int rx8010_init_client(struct i2c_client *client)
+static int rx8010_init(struct device *dev)
{
- struct rx8010_data *rx8010 = i2c_get_clientdata(client);
+ struct rx8010_data *rx8010 = dev_get_drvdata(dev);
u8 ctrl[2];
- int need_clear = 0, err = 0;
+ int need_clear = 0, err;
/* Initialize reserved registers as specified in datasheet */
- err = i2c_smbus_write_byte_data(client, RX8010_RESV17, 0xD8);
- if (err < 0)
+ err = regmap_write(rx8010->regs, RX8010_RESV17, 0xD8);
+ if (err)
return err;
- err = i2c_smbus_write_byte_data(client, RX8010_RESV30, 0x00);
- if (err < 0)
+ err = regmap_write(rx8010->regs, RX8010_RESV30, 0x00);
+ if (err)
return err;
- err = i2c_smbus_write_byte_data(client, RX8010_RESV31, 0x08);
- if (err < 0)
+ err = regmap_write(rx8010->regs, RX8010_RESV31, 0x08);
+ if (err)
return err;
- err = i2c_smbus_write_byte_data(client, RX8010_IRQ, 0x00);
- if (err < 0)
+ err = regmap_write(rx8010->regs, RX8010_IRQ, 0x00);
+ if (err)
return err;
- err = i2c_smbus_read_i2c_block_data(rx8010->client, RX8010_FLAG,
- 2, ctrl);
- if (err != 2)
- return err < 0 ? err : -EIO;
+ err = regmap_bulk_read(rx8010->regs, RX8010_FLAG, ctrl, 2);
+ if (err)
+ return err;
if (ctrl[0] & RX8010_FLAG_VLF)
- dev_warn(&client->dev, "Frequency stop was detected\n");
+ dev_warn(dev, "Frequency stop was detected\n");
if (ctrl[0] & RX8010_FLAG_AF) {
- dev_warn(&client->dev, "Alarm was detected\n");
+ dev_warn(dev, "Alarm was detected\n");
need_clear = 1;
}
@@ -236,8 +212,8 @@ static int rx8010_init_client(struct i2c_client *client)
if (need_clear) {
ctrl[0] &= ~(RX8010_FLAG_AF | RX8010_FLAG_TF | RX8010_FLAG_UF);
- err = i2c_smbus_write_byte_data(client, RX8010_FLAG, ctrl[0]);
- if (err < 0)
+ err = regmap_write(rx8010->regs, RX8010_FLAG, ctrl[0]);
+ if (err)
return err;
}
@@ -249,18 +225,16 @@ static int rx8010_init_client(struct i2c_client *client)
static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
- struct i2c_client *client = rx8010->client;
u8 alarmvals[3];
- int flagreg;
- int err;
+ int flagreg, err;
- err = i2c_smbus_read_i2c_block_data(client, RX8010_ALMIN, 3, alarmvals);
- if (err != 3)
- return err < 0 ? err : -EIO;
+ err = regmap_bulk_read(rx8010->regs, RX8010_ALMIN, alarmvals, 3);
+ if (err)
+ return err;
- flagreg = i2c_smbus_read_byte_data(client, RX8010_FLAG);
- if (flagreg < 0)
- return flagreg;
+ err = regmap_read(rx8010->regs, RX8010_FLAG, &flagreg);
+ if (err)
+ return err;
t->time.tm_sec = 0;
t->time.tm_min = bcd2bin(alarmvals[0] & 0x7f);
@@ -277,55 +251,38 @@ static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
- struct i2c_client *client = to_i2c_client(dev);
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
u8 alarmvals[3];
- int extreg, flagreg;
int err;
- flagreg = i2c_smbus_read_byte_data(client, RX8010_FLAG);
- if (flagreg < 0) {
- return flagreg;
- }
-
if (rx8010->ctrlreg & (RX8010_CTRL_AIE | RX8010_CTRL_UIE)) {
rx8010->ctrlreg &= ~(RX8010_CTRL_AIE | RX8010_CTRL_UIE);
- err = i2c_smbus_write_byte_data(rx8010->client, RX8010_CTRL,
- rx8010->ctrlreg);
- if (err < 0) {
+ err = regmap_write(rx8010->regs, RX8010_CTRL, rx8010->ctrlreg);
+ if (err)
return err;
- }
}
- flagreg &= ~RX8010_FLAG_AF;
- err = i2c_smbus_write_byte_data(rx8010->client, RX8010_FLAG, flagreg);
- if (err < 0)
+ err = regmap_clear_bits(rx8010->regs, RX8010_FLAG, RX8010_FLAG_AF);
+ if (err)
return err;
alarmvals[0] = bin2bcd(t->time.tm_min);
alarmvals[1] = bin2bcd(t->time.tm_hour);
alarmvals[2] = bin2bcd(t->time.tm_mday);
- err = i2c_smbus_write_i2c_block_data(rx8010->client, RX8010_ALMIN,
- 2, alarmvals);
- if (err < 0)
+ err = regmap_bulk_write(rx8010->regs, RX8010_ALMIN, alarmvals, 2);
+ if (err)
return err;
- extreg = i2c_smbus_read_byte_data(client, RX8010_EXT);
- if (extreg < 0)
- return extreg;
-
- extreg |= RX8010_EXT_WADA;
- err = i2c_smbus_write_byte_data(rx8010->client, RX8010_EXT, extreg);
- if (err < 0)
+ err = regmap_clear_bits(rx8010->regs, RX8010_EXT, RX8010_EXT_WADA);
+ if (err)
return err;
if (alarmvals[2] == 0)
alarmvals[2] |= RX8010_ALARM_AE;
- err = i2c_smbus_write_byte_data(rx8010->client, RX8010_ALWDAY,
- alarmvals[2]);
- if (err < 0)
+ err = regmap_write(rx8010->regs, RX8010_ALWDAY, alarmvals[2]);
+ if (err)
return err;
if (t->enabled) {
@@ -335,9 +292,8 @@ static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t)
rx8010->ctrlreg |=
(RX8010_CTRL_AIE | RX8010_CTRL_UIE);
- err = i2c_smbus_write_byte_data(rx8010->client, RX8010_CTRL,
- rx8010->ctrlreg);
- if (err < 0)
+ err = regmap_write(rx8010->regs, RX8010_CTRL, rx8010->ctrlreg);
+ if (err)
return err;
}
@@ -347,11 +303,9 @@ static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t)
static int rx8010_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
- struct i2c_client *client = to_i2c_client(dev);
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
- int flagreg;
- u8 ctrl;
int err;
+ u8 ctrl;
ctrl = rx8010->ctrlreg;
@@ -367,20 +321,14 @@ static int rx8010_alarm_irq_enable(struct device *dev,
ctrl &= ~RX8010_CTRL_AIE;
}
- flagreg = i2c_smbus_read_byte_data(client, RX8010_FLAG);
- if (flagreg < 0)
- return flagreg;
-
- flagreg &= ~RX8010_FLAG_AF;
- err = i2c_smbus_write_byte_data(rx8010->client, RX8010_FLAG, flagreg);
- if (err < 0)
+ err = regmap_clear_bits(rx8010->regs, RX8010_FLAG, RX8010_FLAG_AF);
+ if (err)
return err;
if (ctrl != rx8010->ctrlreg) {
rx8010->ctrlreg = ctrl;
- err = i2c_smbus_write_byte_data(rx8010->client, RX8010_CTRL,
- rx8010->ctrlreg);
- if (err < 0)
+ err = regmap_write(rx8010->regs, RX8010_CTRL, rx8010->ctrlreg);
+ if (err)
return err;
}
@@ -390,14 +338,13 @@ static int rx8010_alarm_irq_enable(struct device *dev,
static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct rx8010_data *rx8010 = dev_get_drvdata(dev);
- int tmp;
- int flagreg;
+ int tmp, flagreg, err;
switch (cmd) {
case RTC_VL_READ:
- flagreg = i2c_smbus_read_byte_data(rx8010->client, RX8010_FLAG);
- if (flagreg < 0)
- return flagreg;
+ err = regmap_read(rx8010->regs, RX8010_FLAG, &flagreg);
+ if (err)
+ return err;
tmp = flagreg & RX8010_FLAG_VLF ? RTC_VL_DATA_INVALID : 0;
return put_user(tmp, (unsigned int __user *)arg);
@@ -407,65 +354,72 @@ static int rx8010_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
}
}
-static struct rtc_class_ops rx8010_rtc_ops = {
+static const struct rtc_class_ops rx8010_rtc_ops_default = {
+ .read_time = rx8010_get_time,
+ .set_time = rx8010_set_time,
+ .ioctl = rx8010_ioctl,
+};
+
+static const struct rtc_class_ops rx8010_rtc_ops_alarm = {
.read_time = rx8010_get_time,
.set_time = rx8010_set_time,
.ioctl = rx8010_ioctl,
+ .read_alarm = rx8010_read_alarm,
+ .set_alarm = rx8010_set_alarm,
+ .alarm_irq_enable = rx8010_alarm_irq_enable,
+};
+
+static const struct regmap_config rx8010_regmap_config = {
+ .name = "rx8010-rtc",
+ .reg_bits = 8,
+ .val_bits = 8,
};
-static int rx8010_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rx8010_probe(struct i2c_client *client)
{
- struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
struct rx8010_data *rx8010;
int err = 0;
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
- | I2C_FUNC_SMBUS_I2C_BLOCK)) {
- dev_err(&adapter->dev, "doesn't support required functionality\n");
- return -EIO;
- }
-
- rx8010 = devm_kzalloc(&client->dev, sizeof(struct rx8010_data),
- GFP_KERNEL);
+ rx8010 = devm_kzalloc(dev, sizeof(*rx8010), GFP_KERNEL);
if (!rx8010)
return -ENOMEM;
- rx8010->client = client;
i2c_set_clientdata(client, rx8010);
- err = rx8010_init_client(client);
+ rx8010->regs = devm_regmap_init_i2c(client, &rx8010_regmap_config);
+ if (IS_ERR(rx8010->regs))
+ return PTR_ERR(rx8010->regs);
+
+ err = rx8010_init(dev);
if (err)
return err;
+ rx8010->rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rx8010->rtc))
+ return PTR_ERR(rx8010->rtc);
+
if (client->irq > 0) {
- dev_info(&client->dev, "IRQ %d supplied\n", client->irq);
- err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ dev_info(dev, "IRQ %d supplied\n", client->irq);
+ err = devm_request_threaded_irq(dev, client->irq, NULL,
rx8010_irq_1_handler,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"rx8010", client);
-
if (err) {
- dev_err(&client->dev, "unable to request IRQ\n");
- client->irq = 0;
- } else {
- rx8010_rtc_ops.read_alarm = rx8010_read_alarm;
- rx8010_rtc_ops.set_alarm = rx8010_set_alarm;
- rx8010_rtc_ops.alarm_irq_enable = rx8010_alarm_irq_enable;
+ dev_err(dev, "unable to request IRQ\n");
+ return err;
}
- }
- rx8010->rtc = devm_rtc_device_register(&client->dev, client->name,
- &rx8010_rtc_ops, THIS_MODULE);
-
- if (IS_ERR(rx8010->rtc)) {
- dev_err(&client->dev, "unable to register the class device\n");
- return PTR_ERR(rx8010->rtc);
+ rx8010->rtc->ops = &rx8010_rtc_ops_alarm;
+ } else {
+ rx8010->rtc->ops = &rx8010_rtc_ops_default;
}
rx8010->rtc->max_user_freq = 1;
+ rx8010->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rx8010->rtc->range_max = RTC_TIMESTAMP_END_2099;
- return 0;
+ return rtc_register_device(rx8010->rtc);
}
static struct i2c_driver rx8010_driver = {
@@ -473,7 +427,7 @@ static struct i2c_driver rx8010_driver = {
.name = "rtc-rx8010",
.of_match_table = of_match_ptr(rx8010_of_match),
},
- .probe = rx8010_probe,
+ .probe_new = rx8010_probe,
.id_table = rx8010_id,
};
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index e1b50e682fc4..24a41909f049 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -494,13 +494,8 @@ static int s3c_rtc_probe(struct platform_device *pdev)
if (info->data->needs_src_clk) {
info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src");
if (IS_ERR(info->rtc_src_clk)) {
- ret = PTR_ERR(info->rtc_src_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to find rtc source clock\n");
- else
- dev_dbg(&pdev->dev,
- "probe deferred due to missing rtc src clk\n");
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(info->rtc_src_clk),
+ "failed to find rtc source clock\n");
goto err_src_clk;
}
ret = clk_prepare_enable(info->rtc_src_clk);
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 51041dc08af4..0c65448b85ee 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -173,7 +173,7 @@ static int st_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
-static struct rtc_class_ops st_rtc_ops = {
+static const struct rtc_class_ops st_rtc_ops = {
.read_time = st_rtc_read_time,
.set_time = st_rtc_set_time,
.read_alarm = st_rtc_read_alarm,
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index af5b0ecb8f89..a9698fba9b76 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -101,18 +101,11 @@ int dasd_scan_partitions(struct dasd_block *block)
struct block_device *bdev;
int rc;
- bdev = bdget_disk(block->gdp, 0);
- if (!bdev) {
- DBF_DEV_EVENT(DBF_ERR, block->base, "%s",
- "scan partitions error, bdget returned NULL");
- return -ENODEV;
- }
-
- rc = blkdev_get(bdev, FMODE_READ, NULL);
- if (rc < 0) {
+ bdev = blkdev_get_by_dev(disk_devt(block->gdp), FMODE_READ, NULL);
+ if (IS_ERR(bdev)) {
DBF_DEV_EVENT(DBF_ERR, block->base,
- "scan partitions error, blkdev_get returned %d",
- rc);
+ "scan partitions error, blkdev_get returned %ld",
+ PTR_ERR(bdev));
return -ENODEV;
}
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 777734d1b4e5..cb6427fb9f3d 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -55,10 +55,7 @@ dasd_ioctl_enable(struct block_device *bdev)
dasd_enable_device(base);
/* Formatting the dasd device can change the capacity. */
- mutex_lock(&bdev->bd_mutex);
- i_size_write(bdev->bd_inode,
- (loff_t)get_capacity(base->block->gdp) << 9);
- mutex_unlock(&bdev->bd_mutex);
+ bd_set_nr_sectors(bdev, get_capacity(base->block->gdp));
dasd_put_device(base);
return 0;
}
@@ -91,9 +88,7 @@ dasd_ioctl_disable(struct block_device *bdev)
* Set i_size to zero, since read, write, etc. check against this
* value.
*/
- mutex_lock(&bdev->bd_mutex);
- i_size_write(bdev->bd_inode, 0);
- mutex_unlock(&bdev->bd_mutex);
+ bd_set_nr_sectors(bdev, 0);
dasd_put_device(base);
return 0;
}
@@ -282,7 +277,7 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
dasd_put_device(base);
return -EFAULT;
}
- if (bdev != bdev->bd_contains) {
+ if (bdev_is_partition(bdev)) {
pr_warn("%s: The specified DASD is a partition and cannot be formatted\n",
dev_name(&base->cdev->dev));
dasd_put_device(base);
@@ -309,7 +304,7 @@ static int dasd_ioctl_check_format(struct block_device *bdev, void __user *argp)
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
- if (bdev != bdev->bd_contains) {
+ if (bdev_is_partition(bdev)) {
pr_warn("%s: The specified DASD is a partition and cannot be checked\n",
dev_name(&base->cdev->dev));
rc = -EINVAL;
@@ -367,7 +362,7 @@ static int dasd_ioctl_release_space(struct block_device *bdev, void __user *argp
rc = -EROFS;
goto out_err;
}
- if (bdev != bdev->bd_contains) {
+ if (bdev_is_partition(bdev)) {
pr_warn("%s: The specified DASD is a partition and tracks cannot be released\n",
dev_name(&base->cdev->dev));
rc = -EINVAL;
@@ -545,7 +540,7 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (bdev != bdev->bd_contains)
+ if (bdev_is_partition(bdev))
// ro setting is not allowed for partitions
return -EINVAL;
if (get_user(intval, (int __user *)argp))
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 845e12ac5954..c6fdb81a068a 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_PCI) += sclp_pci.o
+obj-$(subst m,y,$(CONFIG_ZCRYPT)) += sclp_ap.o
+
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
obj-$(CONFIG_VMCP) += vmcp.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 92757f9bd010..d8acabbb1ed3 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -978,7 +978,6 @@ static int tty3215_install(struct tty_driver *driver, struct tty_struct *tty)
static int tty3215_open(struct tty_struct *tty, struct file * filp)
{
struct raw3215_info *raw = tty->driver_data;
- int retval;
tty_port_tty_set(&raw->port, tty);
@@ -986,11 +985,7 @@ static int tty3215_open(struct tty_struct *tty, struct file * filp)
/*
* Start up 3215 device
*/
- retval = raw3215_startup(raw);
- if (retval)
- return retval;
-
- return 0;
+ return raw3215_startup(raw);
}
/*
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 08f36e973b43..8d979e0ee605 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -110,7 +110,6 @@ struct raw3270_request {
};
struct raw3270_request *raw3270_request_alloc(size_t size);
-struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
void raw3270_request_free(struct raw3270_request *);
void raw3270_request_reset(struct raw3270_request *);
void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 196333013e54..69d9cde9ff5a 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -229,7 +229,7 @@ static inline void sclp_fill_core_info(struct sclp_core_info *info,
#define SCLP_HAS_CPU_INFO (sclp.facilities & 0x0800000000000000ULL)
#define SCLP_HAS_CPU_RECONFIG (sclp.facilities & 0x0400000000000000ULL)
#define SCLP_HAS_PCI_RECONFIG (sclp.facilities & 0x0000000040000000ULL)
-
+#define SCLP_HAS_AP_RECONFIG (sclp.facilities & 0x0000000100000000ULL)
struct gds_subvector {
u8 length;
@@ -305,9 +305,7 @@ int sclp_deactivate(void);
int sclp_reactivate(void);
int sclp_sync_request(sclp_cmdw_t command, void *sccb);
int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout);
-
int sclp_sdias_init(void);
-void sclp_sdias_exit(void);
enum {
sclp_init_state_uninitialized,
diff --git a/drivers/s390/char/sclp_ap.c b/drivers/s390/char/sclp_ap.c
new file mode 100644
index 000000000000..0dd1ca712795
--- /dev/null
+++ b/drivers/s390/char/sclp_ap.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * s390 crypto adapter related sclp functions.
+ *
+ * Copyright IBM Corp. 2020
+ */
+#define KMSG_COMPONENT "sclp_cmd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <asm/sclp.h>
+#include "sclp.h"
+
+#define SCLP_CMDW_CONFIGURE_AP 0x001f0001
+#define SCLP_CMDW_DECONFIGURE_AP 0x001e0001
+
+struct ap_cfg_sccb {
+ struct sccb_header header;
+} __packed;
+
+static int do_ap_configure(sclp_cmdw_t cmd, u32 apid)
+{
+ struct ap_cfg_sccb *sccb;
+ int rc;
+
+ if (!SCLP_HAS_AP_RECONFIG)
+ return -EOPNOTSUPP;
+
+ sccb = (struct ap_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ return -ENOMEM;
+
+ sccb->header.length = PAGE_SIZE;
+ cmd |= (apid & 0xFF) << 8;
+ rc = sclp_sync_request(cmd, sccb);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0020: case 0x0120: case 0x0440: case 0x0450:
+ break;
+ default:
+ pr_warn("configure AP adapter %u failed: cmd=0x%08x response=0x%04x\n",
+ apid, cmd, sccb->header.response_code);
+ rc = -EIO;
+ break;
+ }
+out:
+ free_page((unsigned long) sccb);
+ return rc;
+}
+
+int sclp_ap_configure(u32 apid)
+{
+ return do_ap_configure(SCLP_CMDW_CONFIGURE_AP, apid);
+}
+EXPORT_SYMBOL(sclp_ap_configure);
+
+int sclp_ap_deconfigure(u32 apid)
+{
+ return do_ap_configure(SCLP_CMDW_DECONFIGURE_AP, apid);
+}
+EXPORT_SYMBOL(sclp_ap_deconfigure);
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index a864b21af602..f6e97f0830f6 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -406,7 +406,7 @@ static void __init add_memory_merged(u16 rn)
if (!size)
goto skip_add;
for (addr = start; addr < start + size; addr += block_size)
- add_memory(0, addr, block_size);
+ add_memory(0, addr, block_size, MHP_NONE);
skip_add:
first_rn = rn;
num = 1;
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 7737470f8498..a960afa974bf 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -17,12 +17,12 @@
static struct read_info_sccb __bootdata(sclp_info_sccb);
static int __bootdata(sclp_info_sccb_valid);
char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET;
-int sclp_init_state __section(.data) = sclp_init_state_uninitialized;
+int sclp_init_state = sclp_init_state_uninitialized;
/*
* Used to keep track of the size of the event masks. Qemu until version 2.11
* only supports 4 and needs a workaround.
*/
-bool sclp_mask_compat_mode __section(.data);
+bool sclp_mask_compat_mode;
void sclp_early_wait_irq(void)
{
@@ -214,11 +214,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
* Output one or more lines of text on the SCLP console (VT220 and /
* or line-mode).
*/
-void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
+void __sclp_early_printk(const char *str, unsigned int len)
{
int have_linemode, have_vt220;
- if (!force && sclp_init_state != sclp_init_state_uninitialized)
+ if (sclp_init_state != sclp_init_state_uninitialized)
return;
if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
return;
@@ -231,12 +231,7 @@ void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
void sclp_early_printk(const char *str)
{
- __sclp_early_printk(str, strlen(str), 0);
-}
-
-void sclp_early_printk_force(const char *str)
-{
- __sclp_early_printk(str, strlen(str), 1);
+ __sclp_early_printk(str, strlen(str));
}
int __init sclp_early_read_info(void)
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 44594a492553..d6c84e354df5 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -337,24 +337,6 @@ sclp_chars_in_buffer(struct sclp_buffer *buffer)
}
/*
- * sets or provides some values that influence the drivers behaviour
- */
-void
-sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
-{
- buffer->columns = columns;
- if (buffer->current_line != NULL &&
- buffer->current_length > buffer->columns)
- sclp_finalize_mto(buffer);
-}
-
-void
-sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
-{
- buffer->htab = htab;
-}
-
-/*
* called by sclp_console_init and/or sclp_tty_init
*/
int
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
index a2eb22f67393..93d706e4935c 100644
--- a/drivers/s390/char/sclp_rw.h
+++ b/drivers/s390/char/sclp_rw.h
@@ -86,8 +86,6 @@ void *sclp_unmake_buffer(struct sclp_buffer *);
int sclp_buffer_space(struct sclp_buffer *);
int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
-void sclp_set_columns(struct sclp_buffer *, unsigned short);
-void sclp_set_htab(struct sclp_buffer *, unsigned short);
int sclp_chars_in_buffer(struct sclp_buffer *);
#ifdef CONFIG_SCLP_CONSOLE
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 644b61013679..215d4b4a5ff5 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -257,7 +257,7 @@ static int __init sclp_sdias_init_async(void)
int __init sclp_sdias_init(void)
{
- if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+ if (!is_ipl_type_dump())
return 0;
sclp_sdias_sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
BUG_ON(!sclp_sdias_sccb);
@@ -275,9 +275,3 @@ out:
TRACE("init done\n");
return 0;
}
-
-void __exit sclp_sdias_exit(void)
-{
- debug_unregister(sdias_dbf);
- sclp_unregister(&sclp_sdias_register);
-}
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 8bec5f9ea92c..e2c60475dfa8 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -238,7 +238,6 @@ extern int tape_do_io(struct tape_device *, struct tape_request *);
extern int tape_do_io_async(struct tape_device *, struct tape_request *);
extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
extern int tape_cancel_io(struct tape_device *, struct tape_request *);
-void tape_hotplug_event(struct tape_device *, int major, int action);
static inline int
tape_do_io_free(struct tape_device *device, struct tape_request *request)
@@ -258,8 +257,6 @@ tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
tape_do_io_async(device, request);
}
-extern int tape_oper_handler(int irq, int status);
-extern void tape_noper_handler(int irq, int status);
extern int tape_open(struct tape_device *);
extern int tape_release(struct tape_device *);
extern int tape_mtop(struct tape_device *, int, int);
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
index 53ec8e2870d4..dcc63ff587f9 100644
--- a/drivers/s390/char/tape_std.h
+++ b/drivers/s390/char/tape_std.h
@@ -101,7 +101,6 @@ struct tape_request *tape_std_read_block(struct tape_device *, size_t);
void tape_std_read_backward(struct tape_device *device,
struct tape_request *request);
struct tape_request *tape_std_write_block(struct tape_device *, size_t);
-void tape_std_check_locate(struct tape_device *, struct tape_request *);
/* Some non-mtop commands. */
int tape_std_assign(struct tape_device *);
@@ -131,19 +130,8 @@ int tape_std_mtunload(struct tape_device *, int);
int tape_std_mtweof(struct tape_device *, int);
/* Event handlers */
-void tape_std_default_handler(struct tape_device *);
-void tape_std_unexpect_uchk_handler(struct tape_device *);
-void tape_std_irq(struct tape_device *);
void tape_std_process_eov(struct tape_device *);
-// the error recovery stuff:
-void tape_std_error_recovery(struct tape_device *);
-void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
-void tape_std_error_recovery_succeded(struct tape_device *);
-void tape_std_error_recovery_do_retry(struct tape_device *);
-void tape_std_error_recovery_read_opposite(struct tape_device *);
-void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
-
/* S390 tape types */
enum s390_tape_type {
tape_3480,
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index d29f1b71618e..1515fdc3c1ab 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-1.0+
/*
* zcore module to export memory content and register sets for creating system
- * dumps on SCSI disks (zfcpdump).
+ * dumps on SCSI/NVMe disks (zfcp/nvme dump).
*
* For more information please refer to Documentation/s390/zfcpdump.rst
*
@@ -243,7 +243,7 @@ static int __init zcore_init(void)
unsigned char arch;
int rc;
- if (ipl_info.type != IPL_TYPE_FCP_DUMP)
+ if (!is_ipl_type_dump())
return -ENODATA;
if (OLDMEM_BASE)
return -ENODATA;
@@ -252,9 +252,16 @@ static int __init zcore_init(void)
debug_register_view(zcore_dbf, &debug_sprintf_view);
debug_set_level(zcore_dbf, 6);
- TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
- TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
- TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
+ if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
+ TRACE("type: fcp\n");
+ TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
+ TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
+ TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
+ } else if (ipl_info.type == IPL_TYPE_NVME_DUMP) {
+ TRACE("type: nvme\n");
+ TRACE("fid: %x\n", ipl_info.data.nvme.fid);
+ TRACE("nsid: %x\n", ipl_info.data.nvme.nsid);
+ }
rc = sclp_sdias_init();
if (rc)
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c314e9495c1b..fc06a4002168 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -65,6 +65,8 @@ int chsc_error_from_response(int response)
case 0x0100:
case 0x0102:
return -ENOMEM;
+ case 0x0108: /* "HW limit exceeded" for the op 0x003d */
+ return -EUSERS;
default:
return -EIO;
}
@@ -1114,7 +1116,7 @@ int chsc_enable_facility(int operation_code)
return ret;
}
-int __init chsc_get_cssid(int idx)
+int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid)
{
struct {
struct chsc_header request;
@@ -1125,7 +1127,8 @@ int __init chsc_get_cssid(int idx)
u32 reserved2[3];
struct {
u8 cssid;
- u32 : 24;
+ u8 iid;
+ u32 : 16;
} list[0];
} *sdcal_area;
int ret;
@@ -1151,8 +1154,10 @@ int __init chsc_get_cssid(int idx)
}
if ((addr_t) &sdcal_area->list[idx] <
- (addr_t) &sdcal_area->response + sdcal_area->response.length)
- ret = sdcal_area->list[idx].cssid;
+ (addr_t) &sdcal_area->response + sdcal_area->response.length) {
+ *cssid = sdcal_area->list[idx].cssid;
+ *iid = sdcal_area->list[idx].iid;
+ }
else
ret = -ENODEV;
exit:
@@ -1260,6 +1265,27 @@ int chsc_sstpi(void *page, void *result, size_t size)
return (rr->response.code == 0x0001) ? 0 : -EIO;
}
+int chsc_stzi(void *page, void *result, size_t size)
+{
+ struct {
+ struct chsc_header request;
+ unsigned int rsvd0[3];
+ struct chsc_header response;
+ char data[];
+ } *rr;
+ int rc;
+
+ memset(page, 0, PAGE_SIZE);
+ rr = page;
+ rr->request.length = 0x0010;
+ rr->request.code = 0x003e;
+ rc = chsc(rr);
+ if (rc)
+ return -EIO;
+ memcpy(result, &rr->data, size);
+ return (rr->response.code == 0x0001) ? 0 : -EIO;
+}
+
int chsc_siosl(struct subchannel_id schid)
{
struct {
@@ -1340,6 +1366,7 @@ EXPORT_SYMBOL_GPL(chsc_scm_info);
* chsc_pnso() - Perform Network-Subchannel Operation
* @schid: id of the subchannel on which PNSO is performed
* @pnso_area: request and response block for the operation
+ * @oc: Operation Code
* @resume_token: resume token for multiblock response
* @cnc: Boolean change-notification control
*
@@ -1347,10 +1374,8 @@ EXPORT_SYMBOL_GPL(chsc_scm_info);
*
* Returns 0 on success.
*/
-int chsc_pnso(struct subchannel_id schid,
- struct chsc_pnso_area *pnso_area,
- struct chsc_pnso_resume_token resume_token,
- int cnc)
+int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area,
+ u8 oc, struct chsc_pnso_resume_token resume_token, int cnc)
{
memset(pnso_area, 0, sizeof(*pnso_area));
pnso_area->request.length = 0x0030;
@@ -1359,7 +1384,7 @@ int chsc_pnso(struct subchannel_id schid,
pnso_area->ssid = schid.ssid;
pnso_area->sch = schid.sch_no;
pnso_area->cssid = schid.cssid;
- pnso_area->oc = 0; /* Store-network-bridging-information list */
+ pnso_area->oc = oc;
pnso_area->resume_token = resume_token;
pnso_area->n = (cnc != 0);
if (chsc(pnso_area))
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 7ecf7e4c402e..c2b83b68bc57 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -205,12 +205,10 @@ struct chsc_scm_info {
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
-int chsc_pnso(struct subchannel_id schid,
- struct chsc_pnso_area *pnso_area,
- struct chsc_pnso_resume_token resume_token,
- int cnc);
+int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area,
+ u8 oc, struct chsc_pnso_resume_token resume_token, int cnc);
-int __init chsc_get_cssid(int idx);
+int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid);
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index aca022239b33..cca1a7c4bb33 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -854,7 +854,7 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid =
- (css->cssid < 0) ? 0 : css->cssid;
+ css->id_valid ? css->cssid : 0;
} else {
css->global_pgid.pgid_high.cpu_addr = stap();
}
@@ -877,7 +877,7 @@ static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
{
struct channel_subsystem *css = to_css(dev);
- if (css->cssid < 0)
+ if (!css->id_valid)
return -EINVAL;
return sprintf(buf, "%x\n", css->cssid);
@@ -975,7 +975,12 @@ static int __init setup_css(int nr)
css->device.dma_mask = &css->device.coherent_dma_mask;
mutex_init(&css->mutex);
- css->cssid = chsc_get_cssid(nr);
+ ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
+ if (!ret) {
+ css->id_valid = true;
+ pr_info("Partition identifier %01x.%01x\n", css->cssid,
+ css->iid);
+ }
css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
ret = device_register(&css->device);
@@ -1350,20 +1355,6 @@ static int __init channel_subsystem_init_sync(void)
}
subsys_initcall_sync(channel_subsystem_init_sync);
-void channel_subsystem_reinit(void)
-{
- struct channel_path *chp;
- struct chp_id chpid;
-
- chsc_enable_facility(CHSC_SDA_OC_MSS);
- chp_id_for_each(&chpid) {
- chp = chpid_to_chp(chpid);
- if (chp)
- chp_update_desc(chp);
- }
- cmf_reactivate();
-}
-
#ifdef CONFIG_PROC_FS
static ssize_t cio_settle_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 8d832900a63d..3f322ea0f498 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -115,7 +115,9 @@ extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
void css_update_ssd_info(struct subchannel *sch);
struct channel_subsystem {
- int cssid;
+ u8 cssid;
+ u8 iid;
+ bool id_valid; /* cssid,iid */
struct channel_path *chps[__MAX_CHPID + 1];
struct device device;
struct pgid global_pgid;
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index f5c427ec24b1..853b6a8ca095 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -96,7 +96,6 @@ int ccw_device_online(struct ccw_device *);
int ccw_device_offline(struct ccw_device *);
void ccw_device_update_sense_data(struct ccw_device *);
int ccw_device_test_sense_data(struct ccw_device *);
-void ccw_device_schedule_sch_unregister(struct ccw_device *);
int ccw_purge_blacklisted(void);
void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 963fcc9054c6..0fe7b2f2e7f5 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -714,6 +714,7 @@ EXPORT_SYMBOL_GPL(ccw_device_get_schid);
* ccw_device_pnso() - Perform Network-Subchannel Operation
* @cdev: device on which PNSO is performed
* @pnso_area: request and response block for the operation
+ * @oc: Operation Code
* @resume_token: resume token for multiblock response
* @cnc: Boolean change-notification control
*
@@ -722,17 +723,101 @@ EXPORT_SYMBOL_GPL(ccw_device_get_schid);
* Returns 0 on success.
*/
int ccw_device_pnso(struct ccw_device *cdev,
- struct chsc_pnso_area *pnso_area,
- struct chsc_pnso_resume_token resume_token,
- int cnc)
+ struct chsc_pnso_area *pnso_area, u8 oc,
+ struct chsc_pnso_resume_token resume_token, int cnc)
{
struct subchannel_id schid;
ccw_device_get_schid(cdev, &schid);
- return chsc_pnso(schid, pnso_area, resume_token, cnc);
+ return chsc_pnso(schid, pnso_area, oc, resume_token, cnc);
}
EXPORT_SYMBOL_GPL(ccw_device_pnso);
+/**
+ * ccw_device_get_cssid() - obtain Channel Subsystem ID
+ * @cdev: device to obtain the CSSID for
+ * @cssid: The resulting Channel Subsystem ID
+ */
+int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid)
+{
+ struct device *sch_dev = cdev->dev.parent;
+ struct channel_subsystem *css = to_css(sch_dev->parent);
+
+ if (css->id_valid)
+ *cssid = css->cssid;
+ return css->id_valid ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_cssid);
+
+/**
+ * ccw_device_get_iid() - obtain MIF-image ID
+ * @cdev: device to obtain the MIF-image ID for
+ * @iid: The resulting MIF-image ID
+ */
+int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid)
+{
+ struct device *sch_dev = cdev->dev.parent;
+ struct channel_subsystem *css = to_css(sch_dev->parent);
+
+ if (css->id_valid)
+ *iid = css->iid;
+ return css->id_valid ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_iid);
+
+/**
+ * ccw_device_get_chpid() - obtain Channel Path ID
+ * @cdev: device to obtain the Channel Path ID for
+ * @chp_idx: Index of the channel path
+ * @chpid: The resulting Channel Path ID
+ */
+int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int mask;
+
+ if ((chp_idx < 0) || (chp_idx > 7))
+ return -EINVAL;
+ mask = 0x80 >> chp_idx;
+ if (!(sch->schib.pmcw.pim & mask))
+ return -ENODEV;
+
+ *chpid = sch->schib.pmcw.chpid[chp_idx];
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_chpid);
+
+/**
+ * ccw_device_get_chid() - obtain Channel ID associated with specified CHPID
+ * @cdev: device to obtain the Channel ID for
+ * @chp_idx: Index of the channel path
+ * @chid: The resulting Channel ID
+ */
+int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid)
+{
+ struct chp_id cssid_chpid;
+ struct channel_path *chp;
+ int rc;
+
+ chp_id_init(&cssid_chpid);
+ rc = ccw_device_get_chpid(cdev, chp_idx, &cssid_chpid.id);
+ if (rc)
+ return rc;
+ chp = chpid_to_chp(cssid_chpid);
+ if (!chp)
+ return -ENODEV;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ *chid = chp->desc_fmt1.chid;
+ else
+ rc = -ENODEV;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_chid);
+
/*
* Allocate zeroed dma coherent 31 bit addressable memory using
* the subchannels dma pool. Maximal size of allocation supported
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4fab8bba2cdd..f9a31c7819ae 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -531,26 +531,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
return 1;
}
-static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
-{
- unsigned char state = 0;
- int j, b = start;
-
- for (j = 0; j < count; ++j) {
- get_buf_state(q, b, &state, 0);
- if (state == SLSB_P_OUTPUT_PENDING) {
- struct qaob *aob = q->u.out.aobs[b];
- if (aob == NULL)
- continue;
-
- q->u.out.sbal_state[b].flags |=
- QDIO_OUTBUF_STATE_FLAG_PENDING;
- q->u.out.aobs[b] = NULL;
- }
- b = next_buf(b);
- }
-}
-
static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
int bufnr)
{
@@ -640,6 +620,19 @@ void qdio_inbound_processing(unsigned long data)
__qdio_inbound_processing(q);
}
+static void qdio_check_pending(struct qdio_q *q, unsigned int index)
+{
+ unsigned char state;
+
+ if (get_buf_state(q, index, &state, 0) > 0 &&
+ state == SLSB_P_OUTPUT_PENDING &&
+ q->u.out.aobs[index]) {
+ q->u.out.sbal_state[index].flags |=
+ QDIO_OUTBUF_STATE_FLAG_PENDING;
+ q->u.out.aobs[index] = NULL;
+ }
+}
+
static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
@@ -712,8 +705,13 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
if (count) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
- if (q->u.out.use_cq)
- qdio_handle_aobs(q, start, count);
+
+ if (q->u.out.use_cq) {
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ qdio_check_pending(q, QDIO_BUFNR(start + i));
+ }
}
return count;
@@ -1221,7 +1219,6 @@ static void qdio_trace_init_data(struct qdio_irq *irq,
struct qdio_initialize *data)
{
DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
- DBF_DEV_HEX(irq, data->adapter_name, 8, DBF_ERR);
DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 2c5cc6ec668e..a5b2e16b7aa8 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -9,6 +9,8 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/io.h>
+
+#include <asm/ebcdic.h>
#include <asm/qdio.h>
#include "cio.h"
@@ -403,28 +405,22 @@ void qdio_free_async_data(struct qdio_irq *irq_ptr)
}
}
-static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
- struct qdio_q **irq_ptr_qs,
- int i, int nr)
+static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
{
- irq_ptr->qdr->qdf0[i + nr].sliba =
- (unsigned long)irq_ptr_qs[i]->slib;
-
- irq_ptr->qdr->qdf0[i + nr].sla =
- (unsigned long)irq_ptr_qs[i]->sl;
-
- irq_ptr->qdr->qdf0[i + nr].slsba =
- (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
-
- irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
- irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
- irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
- irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
+ desc->sliba = virt_to_phys(queue->slib);
+ desc->sla = virt_to_phys(queue->sl);
+ desc->slsba = virt_to_phys(&queue->slsb);
+
+ desc->akey = PAGE_DEFAULT_KEY >> 4;
+ desc->bkey = PAGE_DEFAULT_KEY >> 4;
+ desc->ckey = PAGE_DEFAULT_KEY >> 4;
+ desc->dkey = PAGE_DEFAULT_KEY >> 4;
}
static void setup_qdr(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
+ struct qdesfmt0 *desc = &irq_ptr->qdr->qdf0[0];
int i;
irq_ptr->qdr->qfmt = qdio_init->q_format;
@@ -433,15 +429,14 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
- irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
+ irq_ptr->qdr->qiba = virt_to_phys(&irq_ptr->qib);
irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
for (i = 0; i < qdio_init->no_input_qs; i++)
- __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
+ qdio_fill_qdr_desc(desc++, irq_ptr->input_qs[i]);
for (i = 0; i < qdio_init->no_output_qs; i++)
- __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
- qdio_init->no_input_qs);
+ qdio_fill_qdr_desc(desc++, irq_ptr->output_qs[i]);
}
static void setup_qib(struct qdio_irq *irq_ptr,
@@ -459,7 +454,8 @@ static void setup_qib(struct qdio_irq *irq_ptr,
if (init_data->no_output_qs)
irq_ptr->qib.osliba =
(unsigned long)(irq_ptr->output_qs[0]->slib);
- memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
+ memcpy(irq_ptr->qib.ebcnam, dev_name(&irq_ptr->cdev->dev), 8);
+ ASCEBC(irq_ptr->qib.ebcnam, 8);
}
int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 24a1940b829e..ef738b42a092 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -214,7 +214,7 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info)
static void __init ap_init_qci_info(void)
{
if (!ap_qci_available()) {
- AP_DBF(DBF_INFO, "%s QCI not supported\n", __func__);
+ AP_DBF_INFO("%s QCI not supported\n", __func__);
return;
}
@@ -226,18 +226,18 @@ static void __init ap_init_qci_info(void)
ap_qci_info = NULL;
return;
}
- AP_DBF(DBF_INFO, "%s successful fetched initial qci info\n", __func__);
+ AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
if (ap_qci_info->apxa) {
if (ap_qci_info->Na) {
ap_max_adapter_id = ap_qci_info->Na;
- AP_DBF(DBF_INFO, "%s new ap_max_adapter_id is %d\n",
- __func__, ap_max_adapter_id);
+ AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
+ __func__, ap_max_adapter_id);
}
if (ap_qci_info->Nd) {
ap_max_domain_id = ap_qci_info->Nd;
- AP_DBF(DBF_INFO, "%s new ap_max_domain_id is %d\n",
- __func__, ap_max_domain_id);
+ AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
+ __func__, ap_max_domain_id);
}
}
}
@@ -307,7 +307,7 @@ EXPORT_SYMBOL(ap_test_config_ctrl_domain);
* false otherwise.
*/
static bool ap_queue_info(ap_qid_t qid, int *q_type,
- unsigned int *q_fac, int *q_depth)
+ unsigned int *q_fac, int *q_depth, bool *q_decfg)
{
struct ap_queue_status status;
unsigned long info = 0;
@@ -322,6 +322,9 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type,
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_BUSY:
/*
* According to the architecture in all these cases the
* info should be filled. All bits 0 is not possible as
@@ -332,6 +335,7 @@ static bool ap_queue_info(ap_qid_t qid, int *q_type,
*q_type = (int)((info >> 24) & 0xff);
*q_fac = (unsigned int)(info >> 32);
*q_depth = (int)(info & 0xff);
+ *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
switch (*q_type) {
/* For CEX2 and CEX3 the available functions
* are not reflected by the facilities bits.
@@ -618,8 +622,8 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
drvres = to_ap_drv(dev->driver)->flags
& AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres) {
- AP_DBF(DBF_DEBUG, "reprobing queue=%02x.%04x\n",
- card, queue);
+ AP_DBF_DBG("reprobing queue=%02x.%04x\n",
+ card, queue);
rc = device_reprobe(dev);
}
}
@@ -676,7 +680,10 @@ static int ap_device_probe(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = to_ap_drv(dev->driver);
- int card, queue, devres, drvres, rc;
+ int card, queue, devres, drvres, rc = -ENODEV;
+
+ if (!get_device(dev))
+ return rc;
if (is_queue_dev(dev)) {
/*
@@ -693,7 +700,7 @@ static int ap_device_probe(struct device *dev)
mutex_unlock(&ap_perms_mutex);
drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres)
- return -ENODEV;
+ goto out;
}
/* Add queue/card to list of active queues/cards */
@@ -714,6 +721,9 @@ static int ap_device_probe(struct device *dev)
ap_dev->drv = NULL;
}
+out:
+ if (rc)
+ put_device(dev);
return rc;
}
@@ -740,6 +750,8 @@ static int ap_device_remove(struct device *dev)
hash_del(&to_ap_queue(dev)->hnode);
spin_unlock_bh(&ap_queues_lock);
+ put_device(dev);
+
return 0;
}
@@ -796,7 +808,7 @@ EXPORT_SYMBOL(ap_bus_force_rescan);
*/
void ap_bus_cfg_chg(void)
{
- AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
+ AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
ap_bus_force_rescan();
}
@@ -947,7 +959,7 @@ static ssize_t ap_domain_store(struct bus_type *bus,
ap_domain_index = domain;
spin_unlock_bh(&ap_domain_lock);
- AP_DBF(DBF_INFO, "stored new default domain=%d\n", domain);
+ AP_DBF_INFO("stored new default domain=%d\n", domain);
return count;
}
@@ -1208,8 +1220,8 @@ static void ap_select_domain(void)
}
if (dom <= ap_max_domain_id) {
ap_domain_index = dom;
- AP_DBF(DBF_DEBUG, "%s new default domain is %d\n",
- __func__, ap_domain_index);
+ AP_DBF_INFO("%s new default domain is %d\n",
+ __func__, ap_domain_index);
}
out:
spin_unlock_bh(&ap_domain_lock);
@@ -1225,8 +1237,11 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
int comp_type = 0;
/* < CEX2A is not supported */
- if (rawtype < AP_DEVICE_TYPE_CEX2A)
+ if (rawtype < AP_DEVICE_TYPE_CEX2A) {
+ AP_DBF_WARN("get_comp_type queue=%02x.%04x unsupported type %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
return 0;
+ }
/* up to CEX7 known and fully supported */
if (rawtype <= AP_DEVICE_TYPE_CEX7)
return rawtype;
@@ -1248,11 +1263,12 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
comp_type = apinfo.cat;
}
if (!comp_type)
- AP_DBF(DBF_WARN, "queue=%02x.%04x unable to map type %d\n",
- AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
+ AP_DBF_WARN("get_comp_type queue=%02x.%04x unable to map type %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
else if (comp_type != rawtype)
- AP_DBF(DBF_INFO, "queue=%02x.%04x map type %d to %d\n",
- AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype, comp_type);
+ AP_DBF_INFO("get_comp_type queue=%02x.%04x map type %d to %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid),
+ rawtype, comp_type);
return comp_type;
}
@@ -1286,155 +1302,280 @@ static int __match_queue_device_with_queue_id(struct device *dev, const void *da
/*
* Helper function for ap_scan_bus().
- * Does the scan bus job for the given adapter id.
+ * Remove card device and associated queue devices.
*/
-static void _ap_scan_bus_adapter(int id)
+static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
{
- bool broken;
+ bus_for_each_dev(&ap_bus_type, NULL,
+ (void *)(long) ac->id,
+ __ap_queue_devices_with_id_unregister);
+ device_unregister(&ac->ap_dev.device);
+}
+
+/*
+ * Helper function for ap_scan_bus().
+ * Does the scan bus job for all the domains within
+ * a valid adapter given by an ap_card ptr.
+ */
+static inline void ap_scan_domains(struct ap_card *ac)
+{
+ bool decfg;
ap_qid_t qid;
unsigned int func;
- struct ap_card *ac;
struct device *dev;
struct ap_queue *aq;
+ int rc, dom, depth, type;
+
+ /*
+ * Go through the configuration for the domains and compare them
+ * to the existing queue devices. Also take care of the config
+ * and error state for the queue devices.
+ */
+
+ for (dom = 0; dom <= ap_max_domain_id; dom++) {
+ qid = AP_MKQID(ac->id, dom);
+ dev = bus_find_device(&ap_bus_type, NULL,
+ (void *)(long) qid,
+ __match_queue_device_with_qid);
+ aq = dev ? to_ap_queue(dev) : NULL;
+ if (!ap_test_config_usage_domain(dom)) {
+ if (dev) {
+ AP_DBF_INFO("%s(%d,%d) not in config any more, rm queue device\n",
+ __func__, ac->id, dom);
+ device_unregister(dev);
+ put_device(dev);
+ }
+ continue;
+ }
+ /* domain is valid, get info from this APQN */
+ if (!ap_queue_info(qid, &type, &func, &depth, &decfg)) {
+ if (aq) {
+ AP_DBF_INFO(
+ "%s(%d,%d) ap_queue_info() not successful, rm queue device\n",
+ __func__, ac->id, dom);
+ device_unregister(dev);
+ put_device(dev);
+ }
+ continue;
+ }
+ /* if no queue device exists, create a new one */
+ if (!aq) {
+ aq = ap_queue_create(qid, ac->ap_dev.device_type);
+ if (!aq) {
+ AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
+ __func__, ac->id, dom);
+ continue;
+ }
+ aq->card = ac;
+ aq->config = !decfg;
+ dev = &aq->ap_dev.device;
+ dev->bus = &ap_bus_type;
+ dev->parent = &ac->ap_dev.device;
+ dev_set_name(dev, "%02x.%04x", ac->id, dom);
+ /* register queue device */
+ rc = device_register(dev);
+ if (rc) {
+ AP_DBF_WARN("%s(%d,%d) device_register() failed\n",
+ __func__, ac->id, dom);
+ goto put_dev_and_continue;
+ }
+ /* get it and thus adjust reference counter */
+ get_device(dev);
+ if (decfg)
+ AP_DBF_INFO("%s(%d,%d) new (decfg) queue device created\n",
+ __func__, ac->id, dom);
+ else
+ AP_DBF_INFO("%s(%d,%d) new queue device created\n",
+ __func__, ac->id, dom);
+ goto put_dev_and_continue;
+ }
+ /* Check config state on the already existing queue device */
+ spin_lock_bh(&aq->lock);
+ if (decfg && aq->config) {
+ /* config off this queue device */
+ aq->config = false;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
+ }
+ spin_unlock_bh(&aq->lock);
+ AP_DBF_INFO("%s(%d,%d) queue device config off\n",
+ __func__, ac->id, dom);
+ /* 'receive' pending messages with -EAGAIN */
+ ap_flush_queue(aq);
+ goto put_dev_and_continue;
+ }
+ if (!decfg && !aq->config) {
+ /* config on this queue device */
+ aq->config = true;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+ aq->dev_state = AP_DEV_STATE_OPERATING;
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ }
+ spin_unlock_bh(&aq->lock);
+ AP_DBF_INFO("%s(%d,%d) queue device config on\n",
+ __func__, ac->id, dom);
+ goto put_dev_and_continue;
+ }
+ /* handle other error states */
+ if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) {
+ spin_unlock_bh(&aq->lock);
+ /* 'receive' pending messages with -EAGAIN */
+ ap_flush_queue(aq);
+ /* re-init (with reset) the queue device */
+ ap_queue_init_state(aq);
+ AP_DBF_INFO("%s(%d,%d) queue device reinit enforced\n",
+ __func__, ac->id, dom);
+ goto put_dev_and_continue;
+ }
+ spin_unlock_bh(&aq->lock);
+put_dev_and_continue:
+ put_device(dev);
+ }
+}
+
+/*
+ * Helper function for ap_scan_bus().
+ * Does the scan bus job for the given adapter id.
+ */
+static inline void ap_scan_adapter(int ap)
+{
+ bool decfg;
+ ap_qid_t qid;
+ unsigned int func;
+ struct device *dev;
+ struct ap_card *ac;
int rc, dom, depth, type, comp_type;
- /* check if there is a card device registered with this id */
+ /* Is there currently a card device for this adapter ? */
dev = bus_find_device(&ap_bus_type, NULL,
- (void *)(long) id,
+ (void *)(long) ap,
__match_card_device_with_id);
ac = dev ? to_ap_card(dev) : NULL;
- if (!ap_test_config_card_id(id)) {
- if (dev) {
- /* Card device has been removed from configuration */
- bus_for_each_dev(&ap_bus_type, NULL,
- (void *)(long) id,
- __ap_queue_devices_with_id_unregister);
- device_unregister(dev);
+
+ /* Adapter not in configuration ? */
+ if (!ap_test_config_card_id(ap)) {
+ if (ac) {
+ AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devices\n",
+ __func__, ap);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
}
return;
}
/*
- * This card id is enabled in the configuration. If we already have
- * a card device with this id, check if type and functions are still
- * the very same. Also verify that at least one queue is available.
+ * Adapter ap is valid in the current configuration. So do some checks:
+ * If no card device exists, build one. If a card device exists, check
+ * for type and functions changed. For all this we need to find a valid
+ * APQN first.
*/
- if (ac) {
- /* find the first valid queue */
- for (dom = 0; dom < AP_DOMAINS; dom++) {
- qid = AP_MKQID(id, dom);
- if (ap_queue_info(qid, &type, &func, &depth))
+
+ for (dom = 0; dom <= ap_max_domain_id; dom++)
+ if (ap_test_config_usage_domain(dom)) {
+ qid = AP_MKQID(ap, dom);
+ if (ap_queue_info(qid, &type, &func, &depth, &decfg))
break;
}
- broken = false;
- if (dom >= AP_DOMAINS) {
- /* no accessible queue on this card */
- broken = true;
- } else if (ac->raw_hwtype != type) {
- /* card type has changed */
- AP_DBF(DBF_INFO, "card=%02x type changed.\n", id);
- broken = true;
- } else if (ac->functions != func) {
- /* card functions have changed */
- AP_DBF(DBF_INFO, "card=%02x functions changed.\n", id);
- broken = true;
+ if (dom > ap_max_domain_id) {
+ /* Could not find a valid APQN for this adapter */
+ if (ac) {
+ AP_DBF_INFO(
+ "%s(%d) no type info (no APQN found), rm card and queue devices\n",
+ __func__, ap);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
+ put_device(dev);
+ } else {
+ AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
+ __func__, ap);
}
- if (broken) {
- /* unregister card device and associated queues */
- bus_for_each_dev(&ap_bus_type, NULL,
- (void *)(long) id,
- __ap_queue_devices_with_id_unregister);
- device_unregister(dev);
+ return;
+ }
+ if (!type) {
+ /* No apdater type info available, an unusable adapter */
+ if (ac) {
+ AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devices\n",
+ __func__, ap);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
- /* go back if there is no valid queue on this card */
- if (dom >= AP_DOMAINS)
- return;
- ac = NULL;
+ } else {
+ AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
+ __func__, ap);
}
+ return;
}
- /*
- * Go through all possible queue ids. Check and maybe create or release
- * queue devices for this card. If there exists no card device yet,
- * create a card device also.
- */
- for (dom = 0; dom < AP_DOMAINS; dom++) {
- qid = AP_MKQID(id, dom);
- dev = bus_find_device(&ap_bus_type, NULL,
- (void *)(long) qid,
- __match_queue_device_with_qid);
- aq = dev ? to_ap_queue(dev) : NULL;
- if (!ap_test_config_usage_domain(dom)) {
- if (dev) {
- /* Queue device exists but has been
- * removed from configuration.
- */
- device_unregister(dev);
- put_device(dev);
- }
- continue;
- }
- /* try to fetch infos about this queue */
- broken = !ap_queue_info(qid, &type, &func, &depth);
- if (dev) {
- if (!broken) {
- spin_lock_bh(&aq->lock);
- broken = aq->sm_state == AP_SM_STATE_BORKED;
- spin_unlock_bh(&aq->lock);
+ if (ac) {
+ /* Check APQN against existing card device for changes */
+ if (ac->raw_hwtype != type) {
+ AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devices\n",
+ __func__, ap, type);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
+ put_device(dev);
+ ac = NULL;
+ } else if (ac->functions != func) {
+ AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devices\n",
+ __func__, ap, type);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
+ put_device(dev);
+ ac = NULL;
+ } else {
+ if (decfg && ac->config) {
+ ac->config = false;
+ AP_DBF_INFO("%s(%d) card device config off\n",
+ __func__, ap);
+
}
- if (broken) {
- /* Remove broken device */
- AP_DBF(DBF_DEBUG,
- "removing broken queue=%02x.%04x\n",
- id, dom);
- device_unregister(dev);
+ if (!decfg && !ac->config) {
+ ac->config = true;
+ AP_DBF_INFO("%s(%d) card device config on\n",
+ __func__, ap);
}
- put_device(dev);
- continue;
}
- if (broken)
- continue;
- /* a new queue device is needed, check out comp type */
+ }
+
+ if (!ac) {
+ /* Build a new card device */
comp_type = ap_get_compatible_type(qid, type, func);
- if (!comp_type)
- continue;
- /* maybe a card device needs to be created first */
+ if (!comp_type) {
+ AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
+ __func__, ap, type);
+ return;
+ }
+ ac = ap_card_create(ap, depth, type, comp_type, func);
if (!ac) {
- ac = ap_card_create(id, depth, type, comp_type, func);
- if (!ac)
- continue;
- ac->ap_dev.device.bus = &ap_bus_type;
- ac->ap_dev.device.parent = ap_root_device;
- dev_set_name(&ac->ap_dev.device, "card%02x", id);
- /* Register card device with AP bus */
- rc = device_register(&ac->ap_dev.device);
- if (rc) {
- put_device(&ac->ap_dev.device);
- ac = NULL;
- break;
- }
- /* get it and thus adjust reference counter */
- get_device(&ac->ap_dev.device);
+ AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
+ __func__, ap);
+ return;
}
- /* now create the new queue device */
- aq = ap_queue_create(qid, comp_type);
- if (!aq)
- continue;
- aq->card = ac;
- aq->ap_dev.device.bus = &ap_bus_type;
- aq->ap_dev.device.parent = &ac->ap_dev.device;
- dev_set_name(&aq->ap_dev.device, "%02x.%04x", id, dom);
- /* Register queue device */
- rc = device_register(&aq->ap_dev.device);
+ ac->config = !decfg;
+ dev = &ac->ap_dev.device;
+ dev->bus = &ap_bus_type;
+ dev->parent = ap_root_device;
+ dev_set_name(dev, "card%02x", ap);
+ /* Register the new card device with AP bus */
+ rc = device_register(dev);
if (rc) {
- put_device(&aq->ap_dev.device);
- continue;
+ AP_DBF_WARN("%s(%d) device_register() failed\n",
+ __func__, ap);
+ put_device(dev);
+ return;
}
- } /* end domain loop */
+ /* get it and thus adjust reference counter */
+ get_device(dev);
+ if (decfg)
+ AP_DBF_INFO("%s(%d) new (decfg) card device type=%d func=0x%08x created\n",
+ __func__, ap, type, func);
+ else
+ AP_DBF_INFO("%s(%d) new card device type=%d func=0x%08x created\n",
+ __func__, ap, type, func);
+ }
+
+ /* Verify the domains and the queue devices for this card */
+ ap_scan_domains(ac);
- if (ac)
- put_device(&ac->ap_dev.device);
+ /* release the card device */
+ put_device(&ac->ap_dev.device);
}
/**
@@ -1443,16 +1584,16 @@ static void _ap_scan_bus_adapter(int id)
*/
static void ap_scan_bus(struct work_struct *unused)
{
- int id;
+ int ap;
ap_fetch_qci_info(ap_qci_info);
ap_select_domain();
- AP_DBF(DBF_DEBUG, "%s running\n", __func__);
+ AP_DBF_DBG("%s running\n", __func__);
/* loop over all possible adapters */
- for (id = 0; id < AP_DEVICES; id++)
- _ap_scan_bus_adapter(id);
+ for (ap = 0; ap <= ap_max_adapter_id; ap++)
+ ap_scan_adapter(ap);
/* check if there is at least one queue available with default domain */
if (ap_domain_index >= 0) {
@@ -1463,9 +1604,8 @@ static void ap_scan_bus(struct work_struct *unused)
if (dev)
put_device(dev);
else
- AP_DBF(DBF_INFO,
- "no queue device with default domain %d available\n",
- ap_domain_index);
+ AP_DBF_INFO("no queue device with default domain %d available\n",
+ ap_domain_index);
}
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
@@ -1575,7 +1715,6 @@ static int __init ap_module_init(void)
*/
if (MACHINE_IS_VM)
poll_timeout = 1500000;
- spin_lock_init(&ap_poll_timer_lock);
hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ap_poll_timer.function = ap_poll_timeout;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 1ea046324e8f..5029b80132aa 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -50,6 +50,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_RESPONSE_NO_FIRST_PART 0x13
#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
+#define AP_RESPONSE_INVALID_DOMAIN 0x42
/*
* Known device types
@@ -86,15 +87,12 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
* AP queue state machine states
*/
enum ap_sm_state {
- AP_SM_STATE_RESET_START,
+ AP_SM_STATE_RESET_START = 0,
AP_SM_STATE_RESET_WAIT,
AP_SM_STATE_SETIRQ_WAIT,
AP_SM_STATE_IDLE,
AP_SM_STATE_WORKING,
AP_SM_STATE_QUEUE_FULL,
- AP_SM_STATE_REMOVE, /* about to be removed from driver */
- AP_SM_STATE_UNBOUND, /* momentary not bound to a driver */
- AP_SM_STATE_BORKED, /* broken */
NR_AP_SM_STATES
};
@@ -118,6 +116,17 @@ enum ap_sm_wait {
NR_AP_SM_WAIT
};
+/*
+ * AP queue device states
+ */
+enum ap_dev_state {
+ AP_DEV_STATE_UNINITIATED = 0, /* fresh and virgin, not touched */
+ AP_DEV_STATE_OPERATING, /* queue dev is working normal */
+ AP_DEV_STATE_SHUTDOWN, /* remove/unbind/shutdown in progress */
+ AP_DEV_STATE_ERROR, /* device is in error state */
+ NR_AP_DEV_STATES
+};
+
struct ap_device;
struct ap_message;
@@ -158,6 +167,7 @@ struct ap_card {
unsigned int functions; /* AP device function bitfield. */
int queue_depth; /* AP queue depth.*/
int id; /* AP card number. */
+ bool config; /* configured state */
atomic64_t total_request_count; /* # requests ever for this AP device.*/
};
@@ -169,10 +179,11 @@ struct ap_queue {
struct ap_card *card; /* Ptr to assoc. AP card. */
spinlock_t lock; /* Per device lock. */
void *private; /* ap driver private pointer. */
+ enum ap_dev_state dev_state; /* queue device state */
+ bool config; /* configured state */
ap_qid_t qid; /* AP queue id. */
int interrupt; /* indicate if interrupts are enabled */
int queue_count; /* # messages currently on AP queue. */
- enum ap_sm_state sm_state; /* ap queue state machine state */
int pendingq_count; /* # requests on pendingq list. */
int requestq_count; /* # requests on requestq list. */
u64 total_request_count; /* # requests ever for this AP device.*/
@@ -181,18 +192,45 @@ struct ap_queue {
struct list_head pendingq; /* List of message sent to AP queue. */
struct list_head requestq; /* List of message yet to be sent. */
struct ap_message *reply; /* Per device reply message. */
+ enum ap_sm_state sm_state; /* ap queue state machine state */
+ int last_err_rc; /* last error state response code */
};
#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue);
+/* failure injection cmd struct */
+struct ap_fi {
+ union {
+ u16 cmd; /* fi flags + action */
+ struct {
+ u8 flags; /* fi flags only */
+ u8 action; /* fi action only */
+ };
+ };
+};
+
+/* all currently known fi actions */
+enum ap_fi_actions {
+ AP_FI_ACTION_CCA_AGENT_FF = 0x01,
+ AP_FI_ACTION_CCA_DOM_INVAL = 0x02,
+ AP_FI_ACTION_NQAP_QID_INVAL = 0x03,
+};
+
+/* all currently known fi flags */
+enum ap_fi_flags {
+ AP_FI_FLAG_NO_RETRY = 0x01,
+ AP_FI_FLAG_TOGGLE_SPECIAL = 0x02,
+};
+
struct ap_message {
struct list_head list; /* Request queueing. */
unsigned long long psmid; /* Message id. */
void *msg; /* Pointer to message buffer. */
unsigned int len; /* Message length. */
- u32 flags; /* Flags, see AP_MSG_FLAG_xxx */
+ u16 flags; /* Flags, see AP_MSG_FLAG_xxx */
+ struct ap_fi fi; /* Failure Injection cmd */
int rc; /* Return code for this message */
void *private; /* ap driver private pointer. */
/* receive is called from tasklet context */
@@ -200,7 +238,7 @@ struct ap_message {
struct ap_message *);
};
-#define AP_MSG_FLAG_SPECIAL (1 << 16) /* flag msg as 'special' with NQAP */
+#define AP_MSG_FLAG_SPECIAL 1 /* flag msg as 'special' with NQAP */
/**
* ap_init_message() - Initialize ap_message.
@@ -234,7 +272,7 @@ int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
-void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
+int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_queue *aq);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 6588713319ba..d98bdd28d23e 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/facility.h>
+#include <asm/sclp.h>
#include "ap_bus.h"
@@ -139,6 +140,38 @@ static ssize_t modalias_show(struct device *dev,
static DEVICE_ATTR_RO(modalias);
+static ssize_t config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ac->config ? 1 : 0);
+}
+
+static ssize_t config_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc = 0, cfg;
+ struct ap_card *ac = to_ap_card(dev);
+
+ if (sscanf(buf, "%d\n", &cfg) != 1 || cfg < 0 || cfg > 1)
+ return -EINVAL;
+
+ if (cfg && !ac->config)
+ rc = sclp_ap_configure(ac->id);
+ else if (!cfg && ac->config)
+ rc = sclp_ap_deconfigure(ac->id);
+ if (rc)
+ return rc;
+
+ ac->config = cfg ? true : false;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(config);
+
static struct attribute *ap_card_dev_attrs[] = {
&dev_attr_hwtype.attr,
&dev_attr_raw_hwtype.attr,
@@ -148,6 +181,7 @@ static struct attribute *ap_card_dev_attrs[] = {
&dev_attr_requestq_count.attr,
&dev_attr_pendingq_count.attr,
&dev_attr_modalias.attr,
+ &dev_attr_config.attr,
NULL
};
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index dc675eb5aef6..34b0350d0b1a 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -20,6 +20,14 @@
#define AP_DBF(...) \
debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
+#define AP_DBF_ERR(...) \
+ debug_sprintf_event(ap_dbf_info, DBF_ERR, ##__VA_ARGS__)
+#define AP_DBF_WARN(...) \
+ debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__)
+#define AP_DBF_INFO(...) \
+ debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__)
+#define AP_DBF_DBG(...) \
+ debug_sprintf_event(ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
extern debug_info_t *ap_dbf_info;
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 688ebebbf98c..ecefc25eff0c 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -195,7 +195,11 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
aq->sm_state = AP_SM_STATE_IDLE;
return AP_SM_WAIT_NONE;
default:
- aq->sm_state = AP_SM_STATE_BORKED;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
}
@@ -210,12 +214,20 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
+ ap_qid_t qid = aq->qid;
if (aq->requestq_count <= 0)
return AP_SM_WAIT_NONE;
/* Start the next request on the queue. */
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
- status = __ap_send(aq->qid, ap_msg->psmid,
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
+ AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
+ __func__, ap_msg->fi.cmd);
+ qid = 0xFF00;
+ }
+#endif
+ status = __ap_send(qid, ap_msg->psmid,
ap_msg->msg, ap_msg->len,
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
switch (status.response_code) {
@@ -237,6 +249,9 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
case AP_RESPONSE_RESET_IN_PROGRESS:
aq->sm_state = AP_SM_STATE_RESET_WAIT;
return AP_SM_WAIT_TIMEOUT;
+ case AP_RESPONSE_INVALID_DOMAIN:
+ AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n");
+ fallthrough;
case AP_RESPONSE_MESSAGE_TOO_BIG:
case AP_RESPONSE_REQ_FAC_NOT_INST:
list_del_init(&ap_msg->list);
@@ -245,7 +260,11 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
ap_msg->receive(aq, ap_msg, NULL);
return AP_SM_WAIT_AGAIN;
default:
- aq->sm_state = AP_SM_STATE_BORKED;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
}
@@ -278,13 +297,12 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
aq->sm_state = AP_SM_STATE_RESET_WAIT;
aq->interrupt = AP_INTR_DISABLED;
return AP_SM_WAIT_TIMEOUT;
- case AP_RESPONSE_BUSY:
- return AP_SM_WAIT_TIMEOUT;
- case AP_RESPONSE_Q_NOT_AVAIL:
- case AP_RESPONSE_DECONFIGURED:
- case AP_RESPONSE_CHECKSTOPPED:
default:
- aq->sm_state = AP_SM_STATE_BORKED;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
}
@@ -323,7 +341,11 @@ static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
- aq->sm_state = AP_SM_STATE_BORKED;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
}
@@ -360,7 +382,11 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
case AP_RESPONSE_NO_PENDING_REPLY:
return AP_SM_WAIT_TIMEOUT;
default:
- aq->sm_state = AP_SM_STATE_BORKED;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return AP_SM_WAIT_NONE;
}
}
@@ -393,23 +419,14 @@ static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
[AP_SM_EVENT_POLL] = ap_sm_read,
[AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
},
- [AP_SM_STATE_REMOVE] = {
- [AP_SM_EVENT_POLL] = ap_sm_nop,
- [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_SM_STATE_UNBOUND] = {
- [AP_SM_EVENT_POLL] = ap_sm_nop,
- [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_SM_STATE_BORKED] = {
- [AP_SM_EVENT_POLL] = ap_sm_nop,
- [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
- },
};
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
{
- return ap_jumptable[aq->sm_state][event](aq);
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ return ap_jumptable[aq->sm_state][event](aq);
+ else
+ return AP_SM_WAIT_NONE;
}
enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
@@ -429,12 +446,20 @@ static ssize_t request_count_show(struct device *dev,
char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
+ bool valid = false;
u64 req_cnt;
spin_lock_bh(&aq->lock);
- req_cnt = aq->total_request_count;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+ req_cnt = aq->total_request_count;
+ valid = true;
+ }
spin_unlock_bh(&aq->lock);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+
+ if (valid)
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ else
+ return scnprintf(buf, PAGE_SIZE, "-\n");
}
static ssize_t request_count_store(struct device *dev,
@@ -459,7 +484,8 @@ static ssize_t requestq_count_show(struct device *dev,
unsigned int reqq_cnt = 0;
spin_lock_bh(&aq->lock);
- reqq_cnt = aq->requestq_count;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ reqq_cnt = aq->requestq_count;
spin_unlock_bh(&aq->lock);
return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
@@ -473,7 +499,8 @@ static ssize_t pendingq_count_show(struct device *dev,
unsigned int penq_cnt = 0;
spin_lock_bh(&aq->lock);
- penq_cnt = aq->pendingq_count;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ penq_cnt = aq->pendingq_count;
spin_unlock_bh(&aq->lock);
return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
@@ -542,12 +569,138 @@ static ssize_t interrupt_show(struct device *dev,
static DEVICE_ATTR_RO(interrupt);
+static ssize_t config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc;
+
+ spin_lock_bh(&aq->lock);
+ rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static DEVICE_ATTR_RO(config);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+static ssize_t states_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc = 0;
+
+ spin_lock_bh(&aq->lock);
+ /* queue device state */
+ switch (aq->dev_state) {
+ case AP_DEV_STATE_UNINITIATED:
+ rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
+ break;
+ case AP_DEV_STATE_OPERATING:
+ rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
+ break;
+ case AP_DEV_STATE_SHUTDOWN:
+ rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
+ break;
+ case AP_DEV_STATE_ERROR:
+ rc = scnprintf(buf, PAGE_SIZE, "ERROR");
+ break;
+ default:
+ rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
+ }
+ /* state machine state */
+ if (aq->dev_state) {
+ switch (aq->sm_state) {
+ case AP_SM_STATE_RESET_START:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [RESET_START]\n");
+ break;
+ case AP_SM_STATE_RESET_WAIT:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [RESET_WAIT]\n");
+ break;
+ case AP_SM_STATE_SETIRQ_WAIT:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [SETIRQ_WAIT]\n");
+ break;
+ case AP_SM_STATE_IDLE:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [IDLE]\n");
+ break;
+ case AP_SM_STATE_WORKING:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [WORKING]\n");
+ break;
+ case AP_SM_STATE_QUEUE_FULL:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [FULL]\n");
+ break;
+ default:
+ rc += scnprintf(buf + rc, PAGE_SIZE - rc,
+ " [UNKNOWN]\n");
+ }
+ }
+ spin_unlock_bh(&aq->lock);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(states);
+
+static ssize_t last_err_rc_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc;
+
+ spin_lock_bh(&aq->lock);
+ rc = aq->last_err_rc;
+ spin_unlock_bh(&aq->lock);
+
+ switch (rc) {
+ case AP_RESPONSE_NORMAL:
+ return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
+ case AP_RESPONSE_DECONFIGURED:
+ return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
+ case AP_RESPONSE_CHECKSTOPPED:
+ return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
+ case AP_RESPONSE_BUSY:
+ return scnprintf(buf, PAGE_SIZE, "BUSY\n");
+ case AP_RESPONSE_INVALID_ADDRESS:
+ return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
+ case AP_RESPONSE_Q_FULL:
+ return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
+ case AP_RESPONSE_INDEX_TOO_BIG:
+ return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
+ case AP_RESPONSE_NO_FIRST_PART:
+ return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
+ case AP_RESPONSE_MESSAGE_TOO_BIG:
+ return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
+ default:
+ return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
+ }
+}
+static DEVICE_ATTR_RO(last_err_rc);
+#endif
+
static struct attribute *ap_queue_dev_attrs[] = {
&dev_attr_request_count.attr,
&dev_attr_requestq_count.attr,
&dev_attr_pendingq_count.attr,
&dev_attr_reset.attr,
&dev_attr_interrupt.attr,
+ &dev_attr_config.attr,
+#ifdef CONFIG_ZCRYPT_DEBUG
+ &dev_attr_states.attr,
+ &dev_attr_last_err_rc.attr,
+#endif
NULL
};
@@ -587,7 +740,6 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
aq->qid = qid;
- aq->sm_state = AP_SM_STATE_UNBOUND;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
INIT_LIST_HEAD(&aq->pendingq);
@@ -612,22 +764,30 @@ EXPORT_SYMBOL(ap_queue_init_reply);
* @aq: The AP device to queue the message to
* @ap_msg: The message that is to be added
*/
-void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
+int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
{
- /* For asynchronous message handling a valid receive-callback
- * is required.
- */
+ int rc = 0;
+
+ /* msg needs to have a valid receive-callback */
BUG_ON(!ap_msg->receive);
spin_lock_bh(&aq->lock);
- /* Queue the message. */
- list_add_tail(&ap_msg->list, &aq->requestq);
- aq->requestq_count++;
- aq->total_request_count++;
- atomic64_inc(&aq->card->total_request_count);
+
+ /* only allow to queue new messages if device state is ok */
+ if (aq->dev_state == AP_DEV_STATE_OPERATING) {
+ list_add_tail(&ap_msg->list, &aq->requestq);
+ aq->requestq_count++;
+ aq->total_request_count++;
+ atomic64_inc(&aq->card->total_request_count);
+ } else
+ rc = -ENODEV;
+
/* Send/receive as many request from the queue as possible. */
ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
+
spin_unlock_bh(&aq->lock);
+
+ return rc;
}
EXPORT_SYMBOL(ap_queue_message);
@@ -698,8 +858,8 @@ void ap_queue_prepare_remove(struct ap_queue *aq)
spin_lock_bh(&aq->lock);
/* flush queue */
__ap_flush_queue(aq);
- /* set REMOVE state to prevent new messages are queued in */
- aq->sm_state = AP_SM_STATE_REMOVE;
+ /* move queue device state to SHUTDOWN in progress */
+ aq->dev_state = AP_DEV_STATE_SHUTDOWN;
spin_unlock_bh(&aq->lock);
del_timer_sync(&aq->timeout);
}
@@ -707,21 +867,21 @@ void ap_queue_prepare_remove(struct ap_queue *aq)
void ap_queue_remove(struct ap_queue *aq)
{
/*
- * all messages have been flushed and the state is
- * AP_SM_STATE_REMOVE. Now reset with zero which also
- * clears the irq registration and move the state
- * to AP_SM_STATE_UNBOUND to signal that this queue
- * is not used by any driver currently.
+ * all messages have been flushed and the device state
+ * is SHUTDOWN. Now reset with zero which also clears
+ * the irq registration and move the device state
+ * to the initial value AP_DEV_STATE_UNINITIATED.
*/
spin_lock_bh(&aq->lock);
ap_zapq(aq->qid);
- aq->sm_state = AP_SM_STATE_UNBOUND;
+ aq->dev_state = AP_DEV_STATE_UNINITIATED;
spin_unlock_bh(&aq->lock);
}
void ap_queue_init_state(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
+ aq->dev_state = AP_DEV_STATE_OPERATING;
aq->sm_state = AP_SM_STATE_RESET_START;
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 5896e5282a4e..dd84995049b9 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -31,11 +31,9 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 protected key interface");
-#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
-#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
-
-/* mask of available pckmo subfunctions, fetched once at module init */
-static cpacf_mask_t pckmo_functions;
+#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
+#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */
+#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
/*
* debug feature data and functions
@@ -90,6 +88,9 @@ static int pkey_clr2protkey(u32 keytype,
const struct pkey_clrkey *clrkey,
struct pkey_protkey *protkey)
{
+ /* mask of available pckmo subfunctions */
+ static cpacf_mask_t pckmo_functions;
+
long fc;
int keysize;
u8 paramblock[64];
@@ -113,11 +114,13 @@ static int pkey_clr2protkey(u32 keytype,
return -EINVAL;
}
- /*
- * Check if the needed pckmo subfunction is available.
- * These subfunctions can be enabled/disabled by customers
- * in the LPAR profile or may even change on the fly.
- */
+ /* Did we already check for PCKMO ? */
+ if (!pckmo_functions.bytes[0]) {
+ /* no, so check now */
+ if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
+ return -ENODEV;
+ }
+ /* check for the pckmo subfunction we need now */
if (!cpacf_test_func(&pckmo_functions, fc)) {
DEBUG_ERR("%s pckmo functions not available\n", __func__);
return -ENODEV;
@@ -237,8 +240,9 @@ static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey)
for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
card = apqns[i] >> 16;
dom = apqns[i] & 0xFFFF;
- rc = ep11_key2protkey(card, dom, key, kb->head.len,
- pkey->protkey, &pkey->len, &pkey->type);
+ pkey->len = sizeof(pkey->protkey);
+ rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
+ pkey->protkey, &pkey->len, &pkey->type);
if (rc == 0)
break;
}
@@ -449,15 +453,21 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
break;
}
case TOKVER_EP11_AES: {
- if (keylen < MINEP11AESKEYBLOBSIZE)
- goto out;
/* check ep11 key for exportable as protected key */
- rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+ rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
if (rc)
goto out;
rc = pkey_ep11key2pkey(key, protkey);
break;
}
+ case TOKVER_EP11_AES_WITH_HEADER:
+ /* check ep11 key with header for exportable as protected key */
+ rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1);
+ if (rc)
+ goto out;
+ rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header),
+ protkey);
+ break;
default:
DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
__func__, hdr->version);
@@ -661,13 +671,14 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
*ksize = (enum pkey_key_size) t->bitsize;
rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
- ZCRYPT_CEX3C, t->mkvp, 0, 1);
+ ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1);
if (rc == 0 && flags)
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
if (rc == -ENODEV) {
rc = cca_findcard2(&_apqns, &_nr_apqns,
*cardnr, *domain,
- ZCRYPT_CEX3C, 0, t->mkvp, 1);
+ ZCRYPT_CEX3C, AES_MK_SET,
+ 0, t->mkvp, 1);
if (rc == 0 && flags)
*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
}
@@ -697,13 +708,14 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
}
rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
- ZCRYPT_CEX6, t->mkvp0, 0, 1);
+ ZCRYPT_CEX6, AES_MK_SET, t->mkvp0, 0, 1);
if (rc == 0 && flags)
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
if (rc == -ENODEV) {
rc = cca_findcard2(&_apqns, &_nr_apqns,
*cardnr, *domain,
- ZCRYPT_CEX6, 0, t->mkvp0, 1);
+ ZCRYPT_CEX6, AES_MK_SET,
+ 0, t->mkvp0, 1);
if (rc == 0 && flags)
*flags = PKEY_FLAGS_MATCH_ALT_MKVP;
}
@@ -717,7 +729,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
&& hdr->version == TOKVER_EP11_AES) {
struct ep11keyblob *kb = (struct ep11keyblob *)key;
- rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+ rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
if (rc)
goto out;
if (ktype)
@@ -778,7 +790,7 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (hdr->version == TOKVER_EP11_AES) {
if (keylen < sizeof(struct ep11keyblob))
return -EINVAL;
- if (ep11_check_aeskeyblob(debug_info, 3, key, 0, 1))
+ if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
return -EINVAL;
} else {
return pkey_nonccatok2pkey(key, keylen, pkey);
@@ -804,9 +816,10 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
else { /* EP11 AES secure key blob */
struct ep11keyblob *kb = (struct ep11keyblob *) key;
- rc = ep11_key2protkey(card, dom, key, kb->head.len,
- pkey->protkey, &pkey->len,
- &pkey->type);
+ pkey->len = sizeof(pkey->protkey);
+ rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
+ pkey->protkey, &pkey->len,
+ &pkey->type);
}
if (rc == 0)
break;
@@ -825,7 +838,27 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
if (keylen < sizeof(struct keytoken_header) || flags == 0)
return -EINVAL;
- if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES) {
+ if (hdr->type == TOKTYPE_NON_CCA
+ && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+ || hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+ && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ int minhwtype = 0, api = 0;
+ struct ep11keyblob *kb = (struct ep11keyblob *)
+ (key + sizeof(struct ep11kblob_header));
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = EP11_API_V;
+ }
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp);
+ if (rc)
+ goto out;
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES
+ && is_ep11_keyblob(key)) {
int minhwtype = 0, api = 0;
struct ep11keyblob *kb = (struct ep11keyblob *) key;
@@ -863,7 +896,26 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
return -EINVAL;
}
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, cur_mkvp, old_mkvp, 1);
+ minhwtype, AES_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+ struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
+
+ if (t->secid == 0x20) {
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp;
+ } else {
+ /* unknown cca internal 2 token type */
+ return -EINVAL;
+ }
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, APKA_MK_SET,
+ cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
} else
@@ -900,10 +952,26 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
if (ktype == PKEY_TYPE_CCA_CIPHER)
minhwtype = ZCRYPT_CEX6;
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- minhwtype, cur_mkvp, old_mkvp, 1);
+ minhwtype, AES_MK_SET,
+ cur_mkvp, old_mkvp, 1);
+ if (rc)
+ goto out;
+ } else if (ktype == PKEY_TYPE_CCA_ECC) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = *((u64 *) cur_mkvp);
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = *((u64 *) alt_mkvp);
+ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, APKA_MK_SET,
+ cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
- } else if (ktype == PKEY_TYPE_EP11) {
+
+ } else if (ktype == PKEY_TYPE_EP11 ||
+ ktype == PKEY_TYPE_EP11_AES ||
+ ktype == PKEY_TYPE_EP11_ECC) {
u8 *wkvp = NULL;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
@@ -929,6 +997,111 @@ out:
return rc;
}
+static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, size_t keylen, u32 *protkeytype,
+ u8 *protkey, u32 *protkeylen)
+{
+ int i, card, dom, rc;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ /* check for at least one apqn given */
+ if (!apqns || !nr_apqns)
+ return -EINVAL;
+
+ if (keylen < sizeof(struct keytoken_header))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES_WITH_HEADER
+ && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ /* EP11 AES key blob with header */
+ if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_ECC_WITH_HEADER
+ && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ /* EP11 ECC key blob with header */
+ if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES
+ && is_ep11_keyblob(key)) {
+ /* EP11 AES key blob with header in session field */
+ if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+ if (hdr->version == TOKVER_CCA_AES) {
+ /* CCA AES data key */
+ if (keylen != sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+ return -EINVAL;
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
+ /* CCA AES cipher key */
+ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
+ return -EINVAL;
+ if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+ return -EINVAL;
+ } else {
+ DEBUG_ERR("%s unknown CCA internal token version %d\n",
+ __func__, hdr->version);
+ return -EINVAL;
+ }
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ /* CCA ECC (private) key */
+ if (keylen < sizeof(struct eccprivkeytoken))
+ return -EINVAL;
+ if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA) {
+ struct pkey_protkey pkey;
+
+ rc = pkey_nonccatok2pkey(key, keylen, &pkey);
+ if (rc)
+ return rc;
+ memcpy(protkey, pkey.protkey, pkey.len);
+ *protkeylen = pkey.len;
+ *protkeytype = pkey.type;
+ return 0;
+ } else {
+ DEBUG_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
+ return -EINVAL;
+ }
+
+ /* simple try all apqns from the list */
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ card = apqns[i].card;
+ dom = apqns[i].domain;
+ if (hdr->type == TOKTYPE_NON_CCA
+ && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+ || hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+ && is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
+ rc = ep11_kblob2protkey(card, dom, key, hdr->len,
+ protkey, protkeylen, protkeytype);
+ else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES
+ && is_ep11_keyblob(key))
+ rc = ep11_kblob2protkey(card, dom, key, hdr->len,
+ protkey, protkeylen, protkeytype);
+ else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES)
+ rc = cca_sec2protkey(card, dom, key, protkey,
+ protkeylen, protkeytype);
+ else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC)
+ rc = cca_cipher2protkey(card, dom, key, protkey,
+ protkeylen, protkeytype);
+ else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA)
+ rc = cca_ecc2protkey(card, dom, key, protkey,
+ protkeylen, protkeytype);
+ else
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
/*
* File io functions
*/
@@ -1329,6 +1502,55 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
kfree(apqns);
break;
}
+ case PKEY_KBLOB2PROTK3: {
+ struct pkey_kblob2pkey3 __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey3 ktp;
+ struct pkey_apqn *apqns = NULL;
+ u32 protkeylen = PROTKEYBLOBBUFSIZE;
+ u8 *kkey, *protkey;
+
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ protkey = kmalloc(protkeylen, GFP_KERNEL);
+ if (!protkey) {
+ kfree(apqns);
+ kfree(kkey);
+ return -ENOMEM;
+ }
+ rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries, kkey,
+ ktp.keylen, &ktp.pkeytype,
+ protkey, &protkeylen);
+ DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
+ kfree(apqns);
+ kfree(kkey);
+ if (rc) {
+ kfree(protkey);
+ break;
+ }
+ if (ktp.pkey && ktp.pkeylen) {
+ if (protkeylen > ktp.pkeylen) {
+ kfree(protkey);
+ return -EINVAL;
+ }
+ if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
+ kfree(protkey);
+ return -EFAULT;
+ }
+ }
+ kfree(protkey);
+ ktp.pkeylen = protkeylen;
+ if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ return -EFAULT;
+ break;
+ }
default:
/* unknown/unsupported ioctl cmd */
return -ENOTTY;
@@ -1589,7 +1811,7 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
/* build a list of apqns able to generate an cipher key */
rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX6, 0, 0, 0);
+ ZCRYPT_CEX6, 0, 0, 0, 0);
if (rc)
return rc;
@@ -1838,7 +2060,7 @@ static struct miscdevice pkey_dev = {
*/
static int __init pkey_init(void)
{
- cpacf_mask_t kmc_functions;
+ cpacf_mask_t func_mask;
/*
* The pckmo instruction should be available - even if we don't
@@ -1846,15 +2068,15 @@ static int __init pkey_init(void)
* is also the minimum level for the kmc instructions which
* are able to work with protected keys.
*/
- if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
+ if (!cpacf_query(CPACF_PCKMO, &func_mask))
return -ENODEV;
/* check for kmc instructions available */
- if (!cpacf_query(CPACF_KMC, &kmc_functions))
+ if (!cpacf_query(CPACF_KMC, &func_mask))
return -ENODEV;
- if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
- !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
- !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256))
+ if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) ||
+ !cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) ||
+ !cpacf_test_func(&func_mask, CPACF_KMC_PAES_256))
return -ENODEV;
pkey_debug_init();
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index f314936b5462..f60f9fb25214 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -25,6 +25,7 @@
#include <linux/debugfs.h>
#include <linux/cdev.h>
#include <linux/ctype.h>
+#include <linux/capability.h>
#include <asm/debug.h>
#define CREATE_TRACE_POINTS
@@ -602,13 +603,13 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
unsigned int pref_weight)
{
if (!pref_zc)
- return false;
+ return true;
weight += atomic_read(&zc->load);
pref_weight += atomic_read(&pref_zc->load);
if (weight == pref_weight)
- return atomic64_read(&zc->card->total_request_count) >
+ return atomic64_read(&zc->card->total_request_count) <
atomic64_read(&pref_zc->card->total_request_count);
- return weight > pref_weight;
+ return weight < pref_weight;
}
static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
@@ -617,30 +618,39 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
unsigned int pref_weight)
{
if (!pref_zq)
- return false;
+ return true;
weight += atomic_read(&zq->load);
pref_weight += atomic_read(&pref_zq->load);
if (weight == pref_weight)
- return zq->queue->total_request_count >
+ return zq->queue->total_request_count <
pref_zq->queue->total_request_count;
- return weight > pref_weight;
+ return weight < pref_weight;
}
/*
* zcrypt ioctls.
*/
static long zcrypt_rsa_modexpo(struct ap_perms *perms,
+ struct zcrypt_track *tr,
struct ica_rsa_modexpo *mex)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
- unsigned int weight = 0, pref_weight = 0;
+ struct ap_message ap_msg;
+ unsigned int wgt = 0, pref_wgt = 0;
unsigned int func_code;
- int qid = 0, rc = -ENODEV;
+ int cpen, qpen, qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
+ ap_init_message(&ap_msg);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (tr && tr->fi.cmd)
+ ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
if (mex->outputdatalength < mex->inputdatalength) {
func_code = 0;
rc = -EINVAL;
@@ -662,8 +672,9 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for online accelarator and CCA cards */
- if (!zc->online || !(zc->card->functions & 0x18000000))
+ /* Check for useable accelarator or CCA card */
+ if (!zc->online || !zc->card->config ||
+ !(zc->card->functions & 0x18000000))
continue;
/* Check for size limits */
if (zc->min_mod_size > mex->inputdatalength ||
@@ -673,26 +684,35 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
- weight = zc->speed_rating[func_code];
- if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ wgt = zc->speed_rating[func_code];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is online and eligible */
- if (!zq->online || !zq->ops->rsa_modexpo)
+ /* check if device is useable and eligible */
+ if (!zq->online || !zq->ops->rsa_modexpo ||
+ !zq->queue->config)
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
- if (zcrypt_queue_compare(zq, pref_zq,
- weight, pref_weight))
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
- pref_weight = weight;
+ pref_wgt = wgt + cpen + qpen;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -701,30 +721,44 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
}
qid = pref_zq->queue->qid;
- rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
+ rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
+ ap_release_message(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
trace_s390_zcrypt_rep(mex, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
static long zcrypt_rsa_crt(struct ap_perms *perms,
+ struct zcrypt_track *tr,
struct ica_rsa_modexpo_crt *crt)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
- unsigned int weight = 0, pref_weight = 0;
+ struct ap_message ap_msg;
+ unsigned int wgt = 0, pref_wgt = 0;
unsigned int func_code;
- int qid = 0, rc = -ENODEV;
+ int cpen, qpen, qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(crt, TP_ICARSACRT);
+ ap_init_message(&ap_msg);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (tr && tr->fi.cmd)
+ ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
if (crt->outputdatalength < crt->inputdatalength) {
func_code = 0;
rc = -EINVAL;
@@ -746,8 +780,9 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for online accelarator and CCA cards */
- if (!zc->online || !(zc->card->functions & 0x18000000))
+ /* Check for useable accelarator or CCA card */
+ if (!zc->online || !zc->card->config ||
+ !(zc->card->functions & 0x18000000))
continue;
/* Check for size limits */
if (zc->min_mod_size > crt->inputdatalength ||
@@ -757,26 +792,35 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
- weight = zc->speed_rating[func_code];
- if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ wgt = zc->speed_rating[func_code];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is online and eligible */
- if (!zq->online || !zq->ops->rsa_modexpo_crt)
+ /* check if device is useable and eligible */
+ if (!zq->online || !zq->ops->rsa_modexpo_crt ||
+ !zq->queue->config)
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
- if (zcrypt_queue_compare(zq, pref_zq,
- weight, pref_weight))
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
- pref_weight = weight;
+ pref_wgt = wgt + cpen + qpen;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -785,35 +829,52 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
}
qid = pref_zq->queue->qid;
- rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
+ rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
+ ap_release_message(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
trace_s390_zcrypt_rep(crt, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
-static long _zcrypt_send_cprb(struct ap_perms *perms,
+static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
+ struct zcrypt_track *tr,
struct ica_xcRB *xcRB)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
struct ap_message ap_msg;
- unsigned int weight = 0, pref_weight = 0;
+ unsigned int wgt = 0, pref_wgt = 0;
unsigned int func_code;
unsigned short *domain, tdom;
- int qid = 0, rc = -ENODEV;
+ int cpen, qpen, qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
xcRB->status = 0;
ap_init_message(&ap_msg);
- rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (tr && tr->fi.cmd)
+ ap_msg.fi.cmd = tr->fi.cmd;
+ if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) {
+ ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n",
+ __func__, tr->fi.cmd);
+ xcRB->agent_ID = 0x4646;
+ }
+#endif
+
+ rc = get_cprb_fc(userspace, xcRB, &ap_msg, &func_code, &domain);
if (rc)
goto out;
@@ -832,8 +893,9 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for online CCA cards */
- if (!zc->online || !(zc->card->functions & 0x10000000))
+ /* Check for useable CCA card */
+ if (!zc->online || !zc->card->config ||
+ !(zc->card->functions & 0x10000000))
continue;
/* Check for user selected CCA card */
if (xcRB->user_defined != AUTOSELECT &&
@@ -843,13 +905,18 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
- weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
- if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is online and eligible */
+ /* check for device useable and eligible */
if (!zq->online ||
!zq->ops->send_cprb ||
+ !zq->queue->config ||
(tdom != AUTOSEL_DOM &&
tdom != AP_QID_QUEUE(zq->queue->qid)))
continue;
@@ -857,15 +924,19 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
- if (zcrypt_queue_compare(zq, pref_zq,
- weight, pref_weight))
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
- pref_weight = weight;
+ pref_wgt = wgt + cpen + qpen;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -878,14 +949,26 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
if (*domain == AUTOSEL_DOM)
*domain = AP_QID_QUEUE(qid);
- rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) {
+ ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n",
+ __func__, tr->fi.cmd);
+ *domain = 99;
+ }
+#endif
+
+ rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
ap_release_message(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
trace_s390_zcrypt_rep(xcRB, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
@@ -893,7 +976,7 @@ out:
long zcrypt_send_cprb(struct ica_xcRB *xcRB)
{
- return _zcrypt_send_cprb(&ap_perms, xcRB);
+ return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB);
}
EXPORT_SYMBOL(zcrypt_send_cprb);
@@ -924,23 +1007,29 @@ static bool is_desired_ep11_queue(unsigned int dev_qid,
return false;
}
-static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
+static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
+ struct zcrypt_track *tr,
struct ep11_urb *xcrb)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
struct ep11_target_dev *targets;
unsigned short target_num;
- unsigned int weight = 0, pref_weight = 0;
+ unsigned int wgt = 0, pref_wgt = 0;
unsigned int func_code;
struct ap_message ap_msg;
- int qid = 0, rc = -ENODEV;
+ int cpen, qpen, qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
ap_init_message(&ap_msg);
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (tr && tr->fi.cmd)
+ ap_msg.fi.cmd = tr->fi.cmd;
+#endif
+
target_num = (unsigned short) xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
@@ -956,7 +1045,7 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
}
uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
- if (copy_from_user(targets, uptr,
+ if (z_copy_from_user(userspace, targets, uptr,
target_num * sizeof(*targets))) {
func_code = 0;
rc = -EFAULT;
@@ -964,7 +1053,7 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
}
}
- rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
+ rc = get_ep11cprb_fc(userspace, xcrb, &ap_msg, &func_code);
if (rc)
goto out_free;
@@ -972,8 +1061,9 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for online EP11 cards */
- if (!zc->online || !(zc->card->functions & 0x04000000))
+ /* Check for useable EP11 card */
+ if (!zc->online || !zc->card->config ||
+ !(zc->card->functions & 0x04000000))
continue;
/* Check for user selected EP11 card */
if (targets &&
@@ -983,13 +1073,18 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
- weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
- if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is online and eligible */
+ /* check if device is useable and eligible */
if (!zq->online ||
!zq->ops->send_ep11_cprb ||
+ !zq->queue->config ||
(targets &&
!is_desired_ep11_queue(zq->queue->qid,
target_num, targets)))
@@ -998,15 +1093,19 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
- if (zcrypt_queue_compare(zq, pref_zq,
- weight, pref_weight))
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
- pref_weight = weight;
+ pref_wgt = wgt + cpen + qpen;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -1015,16 +1114,20 @@ static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
}
qid = pref_zq->queue->qid;
- rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
+ rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out_free:
kfree(targets);
out:
ap_release_message(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
trace_s390_zcrypt_rep(xcrb, func_code, rc,
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
@@ -1032,7 +1135,7 @@ out:
long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
{
- return _zcrypt_send_ep11_cprb(&ap_perms, xcrb);
+ return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
}
EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
@@ -1040,7 +1143,7 @@ static long zcrypt_rng(char *buffer)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
- unsigned int weight = 0, pref_weight = 0;
+ unsigned int wgt = 0, pref_wgt = 0;
unsigned int func_code;
struct ap_message ap_msg;
unsigned int domain;
@@ -1058,26 +1161,27 @@ static long zcrypt_rng(char *buffer)
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for online CCA cards */
- if (!zc->online || !(zc->card->functions & 0x10000000))
+ /* Check for useable CCA card */
+ if (!zc->online || !zc->card->config ||
+ !(zc->card->functions & 0x10000000))
continue;
/* get weight index of the card device */
- weight = zc->speed_rating[func_code];
- if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ wgt = zc->speed_rating[func_code];
+ if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is online and eligible */
- if (!zq->online || !zq->ops->rng)
+ /* check if device is useable and eligible */
+ if (!zq->online || !zq->ops->rng ||
+ !zq->queue->config)
continue;
- if (zcrypt_queue_compare(zq, pref_zq,
- weight, pref_weight))
+ if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
- pref_weight = weight;
+ pref_wgt = wgt;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
@@ -1089,7 +1193,7 @@ static long zcrypt_rng(char *buffer)
rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
@@ -1301,19 +1405,39 @@ static int zcrypt_requestq_count(void)
static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
+ struct zcrypt_track tr;
struct ica_rsa_modexpo mex;
struct ica_rsa_modexpo __user *umex = (void __user *) arg;
+ memset(&tr, 0, sizeof(tr));
if (copy_from_user(&mex, umex, sizeof(mex)))
return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (mex.inputdatalength & (1U << 31)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ tr.fi.cmd = (u16)(mex.inputdatalength >> 16);
+ }
+ mex.inputdatalength &= 0x0000FFFF;
+#endif
+
do {
- rc = zcrypt_rsa_modexpo(perms, &mex);
- } while (rc == -EAGAIN);
+ rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+ break;
+#endif
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_rsa_modexpo(perms, &mex);
- } while (rc == -EAGAIN);
+ rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc) {
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
return rc;
@@ -1324,19 +1448,39 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
+ struct zcrypt_track tr;
struct ica_rsa_modexpo_crt crt;
struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
+ memset(&tr, 0, sizeof(tr));
if (copy_from_user(&crt, ucrt, sizeof(crt)))
return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (crt.inputdatalength & (1U << 31)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ tr.fi.cmd = (u16)(crt.inputdatalength >> 16);
+ }
+ crt.inputdatalength &= 0x0000FFFF;
+#endif
+
do {
- rc = zcrypt_rsa_crt(perms, &crt);
- } while (rc == -EAGAIN);
+ rc = zcrypt_rsa_crt(perms, &tr, &crt);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+ break;
+#endif
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_rsa_crt(perms, &crt);
- } while (rc == -EAGAIN);
+ rc = zcrypt_rsa_crt(perms, &tr, &crt);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc) {
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
return rc;
@@ -1348,18 +1492,38 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
struct ica_xcRB xcRB;
+ struct zcrypt_track tr;
struct ica_xcRB __user *uxcRB = (void __user *) arg;
+ memset(&tr, 0, sizeof(tr));
if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (xcRB.status & (1U << 31)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ tr.fi.cmd = (u16)(xcRB.status >> 16);
+ }
+ xcRB.status &= 0x0000FFFF;
+#endif
+
do {
- rc = _zcrypt_send_cprb(perms, &xcRB);
- } while (rc == -EAGAIN);
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+ break;
+#endif
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = _zcrypt_send_cprb(perms, &xcRB);
- } while (rc == -EAGAIN);
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
rc, xcRB.status);
@@ -1372,18 +1536,38 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
struct ep11_urb xcrb;
+ struct zcrypt_track tr;
struct ep11_urb __user *uxcrb = (void __user *)arg;
+ memset(&tr, 0, sizeof(tr));
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (xcrb.req_len & (1ULL << 63)) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ tr.fi.cmd = (u16)(xcrb.req_len >> 48);
+ }
+ xcrb.req_len &= 0x0000FFFFFFFFFFFFULL;
+#endif
+
do {
- rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
- } while (rc == -EAGAIN);
+ rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
+ break;
+#endif
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
- } while (rc == -EAGAIN);
+ rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
@@ -1536,8 +1720,10 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
struct compat_ica_rsa_modexpo mex32;
struct ica_rsa_modexpo mex64;
+ struct zcrypt_track tr;
long rc;
+ memset(&tr, 0, sizeof(tr));
if (copy_from_user(&mex32, umex32, sizeof(mex32)))
return -EFAULT;
mex64.inputdata = compat_ptr(mex32.inputdata);
@@ -1547,13 +1733,17 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
mex64.b_key = compat_ptr(mex32.b_key);
mex64.n_modulus = compat_ptr(mex32.n_modulus);
do {
- rc = zcrypt_rsa_modexpo(perms, &mex64);
- } while (rc == -EAGAIN);
+ rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_rsa_modexpo(perms, &mex64);
- } while (rc == -EAGAIN);
+ rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc)
return rc;
return put_user(mex64.outputdatalength,
@@ -1578,8 +1768,10 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
struct compat_ica_rsa_modexpo_crt crt32;
struct ica_rsa_modexpo_crt crt64;
+ struct zcrypt_track tr;
long rc;
+ memset(&tr, 0, sizeof(tr));
if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
return -EFAULT;
crt64.inputdata = compat_ptr(crt32.inputdata);
@@ -1592,13 +1784,17 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
crt64.nq_prime = compat_ptr(crt32.nq_prime);
crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
do {
- rc = zcrypt_rsa_crt(perms, &crt64);
- } while (rc == -EAGAIN);
+ rc = zcrypt_rsa_crt(perms, &tr, &crt64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_rsa_crt(perms, &crt64);
- } while (rc == -EAGAIN);
+ rc = zcrypt_rsa_crt(perms, &tr, &crt64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
if (rc)
return rc;
return put_user(crt64.outputdatalength,
@@ -1630,9 +1826,11 @@ static long trans_xcRB32(struct ap_perms *perms, struct file *filp,
{
struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
struct compat_ica_xcRB xcRB32;
+ struct zcrypt_track tr;
struct ica_xcRB xcRB64;
long rc;
+ memset(&tr, 0, sizeof(tr));
if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
return -EFAULT;
xcRB64.agent_ID = xcRB32.agent_ID;
@@ -1656,13 +1854,17 @@ static long trans_xcRB32(struct ap_perms *perms, struct file *filp,
xcRB64.priority_window = xcRB32.priority_window;
xcRB64.status = xcRB32.status;
do {
- rc = _zcrypt_send_cprb(perms, &xcRB64);
- } while (rc == -EAGAIN);
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = _zcrypt_send_cprb(perms, &xcRB64);
- } while (rc == -EAGAIN);
+ rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64);
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
xcRB32.reply_data_length = xcRB64.reply_data_length;
xcRB32.status = xcRB64.status;
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 599e68bf53f7..51c0b8bdef50 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -55,13 +55,30 @@ enum crypto_ops {
struct zcrypt_queue;
+/* struct to hold tracking information for a userspace request/response */
+struct zcrypt_track {
+ int again_counter; /* retry attempts counter */
+ int last_qid; /* last qid used */
+ int last_rc; /* last return code */
+#ifdef CONFIG_ZCRYPT_DEBUG
+ struct ap_fi fi; /* failure injection cmd */
+#endif
+};
+
+/* defines related to message tracking */
+#define TRACK_AGAIN_MAX 10
+#define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000
+#define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000
+
struct zcrypt_ops {
- long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *);
+ long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *,
+ struct ap_message *);
long (*rsa_modexpo_crt)(struct zcrypt_queue *,
- struct ica_rsa_modexpo_crt *);
- long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *,
+ struct ica_rsa_modexpo_crt *,
+ struct ap_message *);
+ long (*send_cprb)(bool userspace, struct zcrypt_queue *, struct ica_xcRB *,
struct ap_message *);
- long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *,
+ long (*send_ep11_cprb)(bool userspace, struct zcrypt_queue *, struct ep11_urb *,
struct ap_message *);
long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
struct list_head list; /* zcrypt ops list. */
@@ -82,7 +99,7 @@ struct zcrypt_card {
int min_mod_size; /* Min number of bits. */
int max_mod_size; /* Max number of bits. */
int max_exp_bit_length;
- int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */
+ const int *speed_rating; /* Speed idx of crypto ops. */
atomic_t load; /* Utilization of the crypto device */
int request_count; /* # current requests. */
@@ -145,4 +162,26 @@ void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
int zcrypt_device_status_ext(int card, int queue,
struct zcrypt_device_status_ext *devstatus);
+static inline unsigned long z_copy_from_user(bool userspace,
+ void *to,
+ const void __user *from,
+ unsigned long n)
+{
+ if (likely(userspace))
+ return copy_from_user(to, from, n);
+ memcpy(to, (void __force *) from, n);
+ return 0;
+}
+
+static inline unsigned long z_copy_to_user(bool userspace,
+ void __user *to,
+ const void *from,
+ unsigned long n)
+{
+ if (likely(userspace))
+ return copy_to_user(to, from, n);
+ memcpy((void __force *) to, from, n);
+ return 0;
+}
+
#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index c53cab4b0c9e..33b23884b133 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -50,22 +50,28 @@ static ssize_t online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_card *zc = to_ap_card(dev)->private;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+ int online = ac->config && zc->online ? 1 : 0;
- return scnprintf(buf, PAGE_SIZE, "%d\n", zc->online);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", online);
}
static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zcrypt_card *zc = to_ap_card(dev)->private;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
struct zcrypt_queue *zq;
int online, id;
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
+ if (online && !ac->config)
+ return -ENODEV;
+
zc->online = online;
id = zc->card->id;
@@ -151,11 +157,6 @@ int zcrypt_card_register(struct zcrypt_card *zc)
{
int rc;
- rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
- &zcrypt_card_attr_group);
- if (rc)
- return rc;
-
spin_lock(&zcrypt_list_lock);
list_add_tail(&zc->list, &zcrypt_card_list);
spin_unlock(&zcrypt_list_lock);
@@ -164,6 +165,14 @@ int zcrypt_card_register(struct zcrypt_card *zc)
ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
+ rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
+ &zcrypt_card_attr_group);
+ if (rc) {
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zc->list);
+ spin_unlock(&zcrypt_list_lock);
+ }
+
return rc;
}
EXPORT_SYMBOL(zcrypt_card_register);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index c793dcabd551..b1046811450f 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -173,6 +173,49 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
EXPORT_SYMBOL(cca_check_secaescipherkey);
/*
+ * Simple check if the token is a valid CCA secure ECC private
+ * key token. Returns 0 on success or errno value on failure.
+ */
+int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, size_t keysize,
+ int checkcpacfexport)
+{
+ struct eccprivkeytoken *t = (struct eccprivkeytoken *) token;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (t->type != TOKTYPE_CCA_INTERNAL_PKA) {
+ if (dbg)
+ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) t->type, TOKTYPE_CCA_INTERNAL_PKA);
+ return -EINVAL;
+ }
+ if (t->len > keysize) {
+ if (dbg)
+ DBF("%s token check failed, len %d > keysize %zu\n",
+ __func__, (int) t->len, keysize);
+ return -EINVAL;
+ }
+ if (t->secid != 0x20) {
+ if (dbg)
+ DBF("%s token check failed, secid 0x%02x != 0x20\n",
+ __func__, (int) t->secid);
+ return -EINVAL;
+ }
+ if (checkcpacfexport && !(t->kutc & 0x01)) {
+ if (dbg)
+ DBF("%s token check failed, XPRTCPAC bit is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(cca_check_sececckeytoken);
+
+/*
* Allocate consecutive memory for request CPRB, request param
* block, reply CPRB and reply param block and fill in values
* for the common fields. Returns 0 on success or errno value
@@ -249,24 +292,6 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb,
}
/*
- * Helper function which calls zcrypt_send_cprb with
- * memory management segment adjusted to kernel space
- * so that the copy_from_user called within this
- * function do in fact copy from kernel space.
- */
-static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
-{
- int rc;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- rc = zcrypt_send_cprb(xcrb);
- set_fs(old_fs);
-
- return rc;
-}
-
-/*
* Generate (random) CCA AES DATA secure key.
*/
int cca_genseckey(u16 cardnr, u16 domain,
@@ -359,7 +384,7 @@ int cca_genseckey(u16 cardnr, u16 domain,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
__func__, (int) cardnr, (int) domain, rc);
@@ -497,7 +522,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int) cardnr, (int) domain, rc);
@@ -624,7 +649,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int) cardnr, (int) domain, rc);
@@ -850,7 +875,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
@@ -1018,7 +1043,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
@@ -1235,7 +1260,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
@@ -1316,6 +1341,156 @@ out:
EXPORT_SYMBOL(cca_cipher2protkey);
/*
+ * Derive protected key from CCA ECC secure private key.
+ */
+int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct aureqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ u8 rule_array[8];
+ struct {
+ u16 len;
+ u16 tk_blob_len;
+ u16 tk_blob_tag;
+ u8 tk_blob[66];
+ } vud;
+ struct {
+ u16 len;
+ u16 cca_key_token_len;
+ u16 cca_key_token_flags;
+ u8 cca_key_token[0];
+ } kb;
+ } __packed * preqparm;
+ struct aurepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ u16 sublen;
+ u16 tag;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 keylen;
+ u8 key[0]; /* the key (keylen bytes) */
+ u16 keyattrlen;
+ u8 keyattr[32];
+ u8 pad2[1];
+ u8 vptype;
+ u8 vp[32]; /* verification pattern */
+ } ckb;
+ } vud;
+ struct {
+ u16 len;
+ } kb;
+ } __packed * prepparm;
+ int keylen = ((struct eccprivkeytoken *)key)->len;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with AU request */
+ preqparm = (struct aureqparm __force *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "AU", 2);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len)
+ + sizeof(preqparm->rule_array);
+ memcpy(preqparm->rule_array, "EXPT-SK ", 8);
+ /* vud, tk blob */
+ preqparm->vud.len = sizeof(preqparm->vud);
+ preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
+ + 2 * sizeof(uint16_t);
+ preqparm->vud.tk_blob_tag = 0x00C2;
+ /* kb, cca token */
+ preqparm->kb.len = keylen + 3 * sizeof(uint16_t);
+ preqparm->kb.cca_key_token_len = keylen + 2 * sizeof(uint16_t);
+ memcpy(preqparm->kb.cca_key_token, key, keylen);
+ /* now fill length of param block into cprb */
+ preqcblk->req_parml = sizeof(struct aureqparm) + keylen;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepcblk->ccp_rscode != 0) {
+ DEBUG_WARN(
+ "%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *) ptr;
+ prepparm = (struct aurepparm *) ptr;
+
+ /* check the returned keyblock */
+ if (prepparm->vud.ckb.version != 0x02) {
+ DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
+ __func__, (int) prepparm->vud.ckb.version);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepparm->vud.ckb.algo != 0x81) {
+ DEBUG_ERR(
+ "%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
+ __func__, (int) prepparm->vud.ckb.algo);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the translated protected key */
+ if (prepparm->vud.ckb.keylen > *protkeylen) {
+ DEBUG_ERR("%s prot keylen mismatch %d > buffersize %u\n",
+ __func__, prepparm->vud.ckb.keylen, *protkeylen);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
+ *protkeylen = prepparm->vud.ckb.keylen;
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_ECC;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(cca_ecc2protkey);
+
+/*
* query cryptographic facility from CCA adapter
*/
int cca_query_crypto_facility(u16 cardnr, u16 domain,
@@ -1366,7 +1541,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
+ rc = zcrypt_send_cprb(&xcrb);
if (rc) {
DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
__func__, (int) cardnr, (int) domain, rc);
@@ -1524,21 +1699,38 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
rarray, &rlen, varray, &vlen);
if (rc == 0 && rlen >= 10*8 && vlen >= 204) {
memcpy(ci->serial, rarray, 8);
- ci->new_mk_state = (char) rarray[7*8];
- ci->cur_mk_state = (char) rarray[8*8];
- ci->old_mk_state = (char) rarray[9*8];
- if (ci->old_mk_state == '2')
- memcpy(&ci->old_mkvp, varray + 172, 8);
- if (ci->cur_mk_state == '2')
- memcpy(&ci->cur_mkvp, varray + 184, 8);
- if (ci->new_mk_state == '3')
- memcpy(&ci->new_mkvp, varray + 196, 8);
- found = 1;
+ ci->new_aes_mk_state = (char) rarray[7*8];
+ ci->cur_aes_mk_state = (char) rarray[8*8];
+ ci->old_aes_mk_state = (char) rarray[9*8];
+ if (ci->old_aes_mk_state == '2')
+ memcpy(&ci->old_aes_mkvp, varray + 172, 8);
+ if (ci->cur_aes_mk_state == '2')
+ memcpy(&ci->cur_aes_mkvp, varray + 184, 8);
+ if (ci->new_aes_mk_state == '3')
+ memcpy(&ci->new_aes_mkvp, varray + 196, 8);
+ found++;
+ }
+ if (!found)
+ goto out;
+ rlen = vlen = PAGE_SIZE/2;
+ rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
+ rarray, &rlen, varray, &vlen);
+ if (rc == 0 && rlen >= 10*8 && vlen >= 240) {
+ ci->new_apka_mk_state = (char) rarray[7*8];
+ ci->cur_apka_mk_state = (char) rarray[8*8];
+ ci->old_apka_mk_state = (char) rarray[9*8];
+ if (ci->old_apka_mk_state == '2')
+ memcpy(&ci->old_apka_mkvp, varray + 208, 8);
+ if (ci->cur_apka_mk_state == '2')
+ memcpy(&ci->cur_apka_mkvp, varray + 220, 8);
+ if (ci->new_apka_mk_state == '3')
+ memcpy(&ci->new_apka_mkvp, varray + 232, 8);
+ found++;
}
+out:
free_page((unsigned long) pg);
-
- return found ? 0 : -ENOENT;
+ return found == 2 ? 0 : -ENOENT;
}
/*
@@ -1592,16 +1784,16 @@ static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
/* enabled CCA card, check current mkvp from cache */
if (cca_info_cache_fetch(card, dom, &ci) == 0 &&
ci.hwtype >= minhwtype &&
- ci.cur_mk_state == '2' &&
- ci.cur_mkvp == mkvp) {
+ ci.cur_aes_mk_state == '2' &&
+ ci.cur_aes_mkvp == mkvp) {
if (!verify)
break;
/* verify: refresh card info */
if (fetch_cca_info(card, dom, &ci) == 0) {
cca_info_cache_update(card, dom, &ci);
if (ci.hwtype >= minhwtype &&
- ci.cur_mk_state == '2' &&
- ci.cur_mkvp == mkvp)
+ ci.cur_aes_mk_state == '2' &&
+ ci.cur_aes_mkvp == mkvp)
break;
}
}
@@ -1623,12 +1815,12 @@ static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
if (fetch_cca_info(card, dom, &ci) == 0) {
cca_info_cache_update(card, dom, &ci);
if (ci.hwtype >= minhwtype &&
- ci.cur_mk_state == '2' &&
- ci.cur_mkvp == mkvp)
+ ci.cur_aes_mk_state == '2' &&
+ ci.cur_aes_mkvp == mkvp)
break;
if (ci.hwtype >= minhwtype &&
- ci.old_mk_state == '2' &&
- ci.old_mkvp == mkvp &&
+ ci.old_aes_mk_state == '2' &&
+ ci.old_aes_mkvp == mkvp &&
oi < 0)
oi = i;
}
@@ -1682,15 +1874,14 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify)
EXPORT_SYMBOL(cca_findcard);
int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
- int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify)
+ int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
+ int verify)
{
struct zcrypt_device_status_ext *device_status;
- int i, n, card, dom, curmatch, oldmatch, rc = 0;
+ u32 *_apqns = NULL, _nr_apqns = 0;
+ int i, card, dom, curmatch, oldmatch, rc = 0;
struct cca_info ci;
- *apqns = NULL;
- *nr_apqns = 0;
-
/* fetch status of all crypto cards */
device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
sizeof(struct zcrypt_device_status_ext),
@@ -1699,67 +1890,73 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
- /* loop two times: first gather eligible apqns, then store them */
- while (1) {
- n = 0;
- /* walk through all the crypto cards */
- for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
- card = AP_QID_CARD(device_status[i].qid);
- dom = AP_QID_QUEUE(device_status[i].qid);
- /* check online state */
- if (!device_status[i].online)
- continue;
- /* check for cca functions */
- if (!(device_status[i].functions & 0x04))
- continue;
- /* check cardnr */
- if (cardnr != 0xFFFF && card != cardnr)
- continue;
- /* check domain */
- if (domain != 0xFFFF && dom != domain)
- continue;
- /* get cca info on this apqn */
- if (cca_get_info(card, dom, &ci, verify))
- continue;
- /* current master key needs to be valid */
- if (ci.cur_mk_state != '2')
- continue;
- /* check min hardware type */
- if (minhwtype > 0 && minhwtype > ci.hwtype)
- continue;
- if (cur_mkvp || old_mkvp) {
- /* check mkvps */
- curmatch = oldmatch = 0;
- if (cur_mkvp && cur_mkvp == ci.cur_mkvp)
+ /* allocate 1k space for up to 256 apqns */
+ _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ if (!_apqns) {
+ kvfree(device_status);
+ return -ENOMEM;
+ }
+
+ /* walk through all the crypto apqnss */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* check online state */
+ if (!device_status[i].online)
+ continue;
+ /* check for cca functions */
+ if (!(device_status[i].functions & 0x04))
+ continue;
+ /* check cardnr */
+ if (cardnr != 0xFFFF && card != cardnr)
+ continue;
+ /* check domain */
+ if (domain != 0xFFFF && dom != domain)
+ continue;
+ /* get cca info on this apqn */
+ if (cca_get_info(card, dom, &ci, verify))
+ continue;
+ /* current master key needs to be valid */
+ if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2')
+ continue;
+ if (mktype == APKA_MK_SET && ci.cur_apka_mk_state != '2')
+ continue;
+ /* check min hardware type */
+ if (minhwtype > 0 && minhwtype > ci.hwtype)
+ continue;
+ if (cur_mkvp || old_mkvp) {
+ /* check mkvps */
+ curmatch = oldmatch = 0;
+ if (mktype == AES_MK_SET) {
+ if (cur_mkvp && cur_mkvp == ci.cur_aes_mkvp)
+ curmatch = 1;
+ if (old_mkvp && ci.old_aes_mk_state == '2' &&
+ old_mkvp == ci.old_aes_mkvp)
+ oldmatch = 1;
+ } else {
+ if (cur_mkvp && cur_mkvp == ci.cur_apka_mkvp)
curmatch = 1;
- if (old_mkvp && ci.old_mk_state == '2' &&
- old_mkvp == ci.old_mkvp)
+ if (old_mkvp && ci.old_apka_mk_state == '2' &&
+ old_mkvp == ci.old_apka_mkvp)
oldmatch = 1;
- if ((cur_mkvp || old_mkvp) &&
- (curmatch + oldmatch < 1))
- continue;
}
- /* apqn passed all filtering criterons */
- if (*apqns && n < *nr_apqns)
- (*apqns)[n] = (((u16)card) << 16) | ((u16) dom);
- n++;
- }
- /* loop 2nd time: array has been filled */
- if (*apqns)
- break;
- /* loop 1st time: have # of eligible apqns in n */
- if (!n) {
- rc = -ENODEV; /* no eligible apqns found */
- break;
- }
- *nr_apqns = n;
- /* allocate array to store n apqns into */
- *apqns = kmalloc_array(n, sizeof(u32), GFP_KERNEL);
- if (!*apqns) {
- rc = -ENOMEM;
- break;
+ if (curmatch + oldmatch < 1)
+ continue;
}
- verify = 0;
+ /* apqn passed all filtering criterons, add to the array */
+ if (_nr_apqns < 256)
+ _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+ }
+
+ /* nothing found ? */
+ if (!_nr_apqns) {
+ kfree(_apqns);
+ rc = -ENODEV;
+ } else {
+ /* no re-allocation, simple return the _apqns array */
+ *apqns = _apqns;
+ *nr_apqns = _nr_apqns;
+ rc = 0;
}
kvfree(device_status);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 8b7a641671c9..e7105443d5cb 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -14,8 +14,9 @@
#include <asm/pkey.h>
/* Key token types */
-#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */
-#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal key token */
+#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */
+#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal sym key token */
+#define TOKTYPE_CCA_INTERNAL_PKA 0x1f /* CCA internal asym key token */
/* For TOKTYPE_NON_CCA: */
#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */
@@ -93,6 +94,31 @@ struct cipherkeytoken {
u8 vdata[]; /* variable part data follows */
} __packed;
+/* inside view of an CCA secure ECC private key */
+struct eccprivkeytoken {
+ u8 type; /* 0x1f for internal asym key token */
+ u8 version; /* should be 0x00 */
+ u16 len; /* total key token length in bytes */
+ u8 res1[4];
+ u8 secid; /* 0x20 for ECC priv key section marker */
+ u8 secver; /* section version */
+ u16 seclen; /* section length */
+ u8 wtype; /* wrapping method, 0x00 clear, 0x01 AES */
+ u8 htype; /* hash method, 0x02 for SHA-256 */
+ u8 res2[2];
+ u8 kutc; /* key usage and translation control */
+ u8 ctype; /* curve type */
+ u8 kfs; /* key format and security */
+ u8 ksrc; /* key source */
+ u16 pbitlen; /* length of prime p in bits */
+ u16 ibmadlen; /* IBM associated data length in bytes */
+ u64 mkvp; /* master key verification pattern */
+ u8 opk[48]; /* encrypted object protection key data */
+ u16 adatalen; /* associated data length in bytes */
+ u16 fseclen; /* formated section length in bytes */
+ u8 more_data[]; /* more data follows */
+} __packed;
+
/* Some defines for the CCA AES cipherkeytoken kmf1 field */
#define KMF1_XPRT_SYM 0x8000
#define KMF1_XPRT_UASY 0x4000
@@ -123,6 +149,14 @@ int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
int checkcpacfexport);
/*
+ * Simple check if the token is a valid CCA secure ECC private
+ * key token. Returns 0 on success or errno value on failure.
+ */
+int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, size_t keysize,
+ int checkcpacfexport);
+
+/*
* Generate (random) CCA AES DATA secure key.
*/
int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey);
@@ -159,6 +193,12 @@ int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
/*
+ * Derive proteced key from CCA ECC secure private key.
+ */
+int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
* Query cryptographic facility from CCA adapter
*/
int cca_query_crypto_facility(u16 cardnr, u16 domain,
@@ -186,6 +226,8 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
* - if verify is enabled and a cur_mkvp and/or old_mkvp
* value is given, then refetch the cca_info and make sure the current
* cur_mkvp or old_mkvp values of the apqn are used.
+ * The mktype determines which set of master keys to use:
+ * 0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set
* The array of apqn entries is allocated with kmalloc and returned in *apqns;
* the number of apqns stored into the list is returned in *nr_apqns. One apqn
* entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
@@ -194,18 +236,28 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
* -ENODEV is returned.
*/
int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
- int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify);
+ int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
+ int verify);
+
+#define AES_MK_SET 0
+#define APKA_MK_SET 1
/* struct to hold info for each CCA queue */
struct cca_info {
- int hwtype; /* one of the defined AP_DEVICE_TYPE_* */
- char new_mk_state; /* '1' empty, '2' partially full, '3' full */
- char cur_mk_state; /* '1' invalid, '2' valid */
- char old_mk_state; /* '1' invalid, '2' valid */
- u64 new_mkvp; /* truncated sha256 hash of new master key */
- u64 cur_mkvp; /* truncated sha256 hash of current master key */
- u64 old_mkvp; /* truncated sha256 hash of old master key */
- char serial[9]; /* serial number string (8 ascii numbers + 0x00) */
+ int hwtype; /* one of the defined AP_DEVICE_TYPE_* */
+ char new_aes_mk_state; /* '1' empty, '2' partially full, '3' full */
+ char cur_aes_mk_state; /* '1' invalid, '2' valid */
+ char old_aes_mk_state; /* '1' invalid, '2' valid */
+ char new_apka_mk_state; /* '1' empty, '2' partially full, '3' full */
+ char cur_apka_mk_state; /* '1' invalid, '2' valid */
+ char old_apka_mk_state; /* '1' invalid, '2' valid */
+ u64 new_aes_mkvp; /* truncated sha256 of new aes master key */
+ u64 cur_aes_mkvp; /* truncated sha256 of current aes master key */
+ u64 old_aes_mkvp; /* truncated sha256 of old aes master key */
+ u64 new_apka_mkvp; /* truncated sha256 of new apka master key */
+ u64 cur_apka_mkvp; /* truncated sha256 of current apka mk */
+ u64 old_apka_mkvp; /* truncated sha256 of old apka mk */
+ char serial[9]; /* serial number (8 ascii numbers + 0x00) */
};
/*
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index b447f3e9e4a2..226a5612e855 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -94,8 +94,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
- memcpy(zc->speed_rating, CEX2A_SPEED_IDX,
- sizeof(CEX2A_SPEED_IDX));
+ zc->speed_rating = CEX2A_SPEED_IDX;
zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
zc->type_string = "CEX2A";
zc->user_space_type = ZCRYPT_CEX2A;
@@ -108,8 +107,7 @@ static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
}
- memcpy(zc->speed_rating, CEX3A_SPEED_IDX,
- sizeof(CEX3A_SPEED_IDX));
+ zc->speed_rating = CEX3A_SPEED_IDX;
zc->type_string = "CEX3A";
zc->user_space_type = ZCRYPT_CEX3A;
} else {
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index f00127a78bab..7a8cbdbe4408 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -109,26 +109,53 @@ static ssize_t cca_mkvps_show(struct device *dev,
AP_QID_QUEUE(zq->queue->qid),
&ci, zq->online);
- if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3')
+ if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
- new_state[ci.new_mk_state - '1'], ci.new_mkvp);
+ new_state[ci.new_aes_mk_state - '1'],
+ ci.new_aes_mkvp);
else
n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
- if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2')
+ if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
n += scnprintf(buf + n, PAGE_SIZE - n,
"AES CUR: %s 0x%016llx\n",
- cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
+ cao_state[ci.cur_aes_mk_state - '1'],
+ ci.cur_aes_mkvp);
else
n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
- if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2')
+ if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
n += scnprintf(buf + n, PAGE_SIZE - n,
"AES OLD: %s 0x%016llx\n",
- cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
+ cao_state[ci.old_aes_mk_state - '1'],
+ ci.old_aes_mkvp);
else
n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+ if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA NEW: %s 0x%016llx\n",
+ new_state[ci.new_apka_mk_state - '1'],
+ ci.new_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
+
+ if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_apka_mk_state - '1'],
+ ci.cur_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
+
+ if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA OLD: %s 0x%016llx\n",
+ cao_state[ci.old_apka_mk_state - '1'],
+ ci.old_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
+
return n;
}
@@ -239,8 +266,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
case AP_DEVICE_TYPE_CEX2C:
zc->user_space_type = ZCRYPT_CEX2C;
zc->type_string = "CEX2C";
- memcpy(zc->speed_rating, CEX2C_SPEED_IDX,
- sizeof(CEX2C_SPEED_IDX));
+ zc->speed_rating = CEX2C_SPEED_IDX;
zc->min_mod_size = CEX2C_MIN_MOD_SIZE;
zc->max_mod_size = CEX2C_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX2C_MAX_MOD_SIZE;
@@ -248,8 +274,7 @@ static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
case AP_DEVICE_TYPE_CEX3C:
zc->user_space_type = ZCRYPT_CEX3C;
zc->type_string = "CEX3C";
- memcpy(zc->speed_rating, CEX3C_SPEED_IDX,
- sizeof(CEX3C_SPEED_IDX));
+ zc->speed_rating = CEX3C_SPEED_IDX;
zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index dc20d983e468..f5195bca1d85 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -121,26 +121,53 @@ static ssize_t cca_mkvps_show(struct device *dev,
AP_QID_QUEUE(zq->queue->qid),
&ci, zq->online);
- if (ci.new_mk_state >= '1' && ci.new_mk_state <= '3')
+ if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n",
- new_state[ci.new_mk_state - '1'], ci.new_mkvp);
+ new_state[ci.new_aes_mk_state - '1'],
+ ci.new_aes_mkvp);
else
n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n");
- if (ci.cur_mk_state >= '1' && ci.cur_mk_state <= '2')
+ if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
n += scnprintf(buf + n, PAGE_SIZE - n,
"AES CUR: %s 0x%016llx\n",
- cao_state[ci.cur_mk_state - '1'], ci.cur_mkvp);
+ cao_state[ci.cur_aes_mk_state - '1'],
+ ci.cur_aes_mkvp);
else
n += scnprintf(buf + n, PAGE_SIZE - n, "AES CUR: - -\n");
- if (ci.old_mk_state >= '1' && ci.old_mk_state <= '2')
+ if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
n += scnprintf(buf + n, PAGE_SIZE - n,
"AES OLD: %s 0x%016llx\n",
- cao_state[ci.old_mk_state - '1'], ci.old_mkvp);
+ cao_state[ci.old_aes_mk_state - '1'],
+ ci.old_aes_mkvp);
else
n += scnprintf(buf + n, PAGE_SIZE - n, "AES OLD: - -\n");
+ if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA NEW: %s 0x%016llx\n",
+ new_state[ci.new_apka_mk_state - '1'],
+ ci.new_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA NEW: - -\n");
+
+ if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_apka_mk_state - '1'],
+ ci.cur_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA CUR: - -\n");
+
+ if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
+ n += scnprintf(buf + n, PAGE_SIZE - n,
+ "APKA OLD: %s 0x%016llx\n",
+ cao_state[ci.old_apka_mk_state - '1'],
+ ci.old_apka_mkvp);
+ else
+ n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n");
+
return n;
}
@@ -382,31 +409,31 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
* Normalized speed ratings per crypto adapter
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
*/
- static const int CEX4A_SPEED_IDX[] = {
+ static const int CEX4A_SPEED_IDX[NUM_OPS] = {
14, 19, 249, 42, 228, 1458, 0, 0};
- static const int CEX5A_SPEED_IDX[] = {
+ static const int CEX5A_SPEED_IDX[NUM_OPS] = {
8, 9, 20, 18, 66, 458, 0, 0};
- static const int CEX6A_SPEED_IDX[] = {
+ static const int CEX6A_SPEED_IDX[NUM_OPS] = {
6, 9, 20, 17, 65, 438, 0, 0};
- static const int CEX7A_SPEED_IDX[] = {
+ static const int CEX7A_SPEED_IDX[NUM_OPS] = {
6, 8, 17, 15, 54, 362, 0, 0};
- static const int CEX4C_SPEED_IDX[] = {
+ static const int CEX4C_SPEED_IDX[NUM_OPS] = {
59, 69, 308, 83, 278, 2204, 209, 40};
static const int CEX5C_SPEED_IDX[] = {
24, 31, 50, 37, 90, 479, 27, 10};
- static const int CEX6C_SPEED_IDX[] = {
+ static const int CEX6C_SPEED_IDX[NUM_OPS] = {
16, 20, 32, 27, 77, 455, 24, 9};
- static const int CEX7C_SPEED_IDX[] = {
+ static const int CEX7C_SPEED_IDX[NUM_OPS] = {
14, 16, 26, 23, 64, 376, 23, 8};
- static const int CEX4P_SPEED_IDX[] = {
+ static const int CEX4P_SPEED_IDX[NUM_OPS] = {
0, 0, 0, 0, 0, 0, 0, 50};
- static const int CEX5P_SPEED_IDX[] = {
+ static const int CEX5P_SPEED_IDX[NUM_OPS] = {
0, 0, 0, 0, 0, 0, 0, 10};
- static const int CEX6P_SPEED_IDX[] = {
+ static const int CEX6P_SPEED_IDX[NUM_OPS] = {
0, 0, 0, 0, 0, 0, 0, 9};
- static const int CEX7P_SPEED_IDX[] = {
+ static const int CEX7P_SPEED_IDX[NUM_OPS] = {
0, 0, 0, 0, 0, 0, 0, 8};
struct ap_card *ac = to_ap_card(&ap_dev->device);
@@ -422,26 +449,22 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4A";
zc->user_space_type = ZCRYPT_CEX4;
- memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
- sizeof(CEX4A_SPEED_IDX));
+ zc->speed_rating = CEX4A_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5A";
zc->user_space_type = ZCRYPT_CEX5;
- memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
- sizeof(CEX5A_SPEED_IDX));
+ zc->speed_rating = CEX5A_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6A";
zc->user_space_type = ZCRYPT_CEX6;
- memcpy(zc->speed_rating, CEX6A_SPEED_IDX,
- sizeof(CEX6A_SPEED_IDX));
+ zc->speed_rating = CEX6A_SPEED_IDX;
} else {
zc->type_string = "CEX7A";
/* wrong user space type, just for compatibility
* with the ZCRYPT_STATUS_MASK ioctl.
*/
zc->user_space_type = ZCRYPT_CEX6;
- memcpy(zc->speed_rating, CEX7A_SPEED_IDX,
- sizeof(CEX7A_SPEED_IDX));
+ zc->speed_rating = CEX7A_SPEED_IDX;
}
zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
@@ -461,32 +484,28 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
- memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
- sizeof(CEX4C_SPEED_IDX));
+ zc->speed_rating = CEX4C_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5C";
/* wrong user space type, must be CEX5
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
- memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
- sizeof(CEX5C_SPEED_IDX));
+ zc->speed_rating = CEX5C_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6C";
/* wrong user space type, must be CEX6
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
- memcpy(zc->speed_rating, CEX6C_SPEED_IDX,
- sizeof(CEX6C_SPEED_IDX));
+ zc->speed_rating = CEX6C_SPEED_IDX;
} else {
zc->type_string = "CEX7C";
/* wrong user space type, must be CEX7
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
- memcpy(zc->speed_rating, CEX7C_SPEED_IDX,
- sizeof(CEX7C_SPEED_IDX));
+ zc->speed_rating = CEX7C_SPEED_IDX;
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -495,26 +514,22 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4P";
zc->user_space_type = ZCRYPT_CEX4;
- memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
- sizeof(CEX4P_SPEED_IDX));
+ zc->speed_rating = CEX4P_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5P";
zc->user_space_type = ZCRYPT_CEX5;
- memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
- sizeof(CEX5P_SPEED_IDX));
+ zc->speed_rating = CEX5P_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6P";
zc->user_space_type = ZCRYPT_CEX6;
- memcpy(zc->speed_rating, CEX6P_SPEED_IDX,
- sizeof(CEX6P_SPEED_IDX));
+ zc->speed_rating = CEX6P_SPEED_IDX;
} else {
zc->type_string = "CEX7P";
/* wrong user space type, just for compatibility
* with the ZCRYPT_STATUS_MASK ioctl.
*/
zc->user_space_type = ZCRYPT_CEX6;
- memcpy(zc->speed_rating, CEX7P_SPEED_IDX,
- sizeof(CEX7P_SPEED_IDX));
+ zc->speed_rating = CEX7P_SPEED_IDX;
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 241dbb5f75bf..3225489a1c41 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -21,6 +21,14 @@
#define ZCRYPT_DBF(...) \
debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
+#define ZCRYPT_DBF_ERR(...) \
+ debug_sprintf_event(zcrypt_dbf_info, DBF_ERR, ##__VA_ARGS__)
+#define ZCRYPT_DBF_WARN(...) \
+ debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__)
+#define ZCRYPT_DBF_INFO(...) \
+ debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__)
+#define ZCRYPT_DBF_DBG(...) \
+ debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
extern debug_info_t *zcrypt_dbf_info;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 3c3d403abe92..9ce5a71da69b 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -15,6 +15,7 @@
#include <linux/random.h>
#include <asm/zcrypt.h>
#include <asm/pkey.h>
+#include <crypto/aes.h>
#include "ap_bus.h"
#include "zcrypt_api.h"
@@ -113,79 +114,199 @@ static void __exit card_cache_free(void)
}
/*
- * Simple check if the key blob is a valid EP11 secure AES key.
+ * Simple check if the key blob is a valid EP11 AES key blob with header.
*/
-int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
- const u8 *key, int keybitsize,
- int checkcpacfexport)
+int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp)
{
- struct ep11keyblob *kb = (struct ep11keyblob *) key;
+ struct ep11kblob_header *hdr = (struct ep11kblob_header *) key;
+ struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr));
#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
- if (kb->head.type != TOKTYPE_NON_CCA) {
+ if (keylen < sizeof(*hdr) + sizeof(*kb)) {
+ DBF("%s key check failed, keylen %zu < %zu\n",
+ __func__, keylen, sizeof(*hdr) + sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (hdr->type != TOKTYPE_NON_CCA) {
if (dbg)
DBF("%s key check failed, type 0x%02x != 0x%02x\n",
- __func__, (int) kb->head.type, TOKTYPE_NON_CCA);
+ __func__, (int) hdr->type, TOKTYPE_NON_CCA);
return -EINVAL;
}
- if (kb->head.version != TOKVER_EP11_AES) {
+ if (hdr->hver != 0x00) {
+ if (dbg)
+ DBF("%s key check failed, header version 0x%02x != 0x00\n",
+ __func__, (int) hdr->hver);
+ return -EINVAL;
+ }
+ if (hdr->version != TOKVER_EP11_AES_WITH_HEADER) {
if (dbg)
DBF("%s key check failed, version 0x%02x != 0x%02x\n",
- __func__, (int) kb->head.version, TOKVER_EP11_AES);
+ __func__, (int) hdr->version, TOKVER_EP11_AES_WITH_HEADER);
+ return -EINVAL;
+ }
+ if (hdr->len > keylen) {
+ if (dbg)
+ DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ __func__, (int) hdr->len, keylen);
+ return -EINVAL;
+ }
+ if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
+ if (dbg)
+ DBF("%s key check failed, header len %d < %zu\n",
+ __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb));
return -EINVAL;
}
+
if (kb->version != EP11_STRUCT_MAGIC) {
if (dbg)
- DBF("%s key check failed, magic 0x%04x != 0x%04x\n",
+ DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
__func__, (int) kb->version, EP11_STRUCT_MAGIC);
return -EINVAL;
}
- switch (kb->head.keybitlen) {
- case 128:
- case 192:
- case 256:
- break;
- default:
+ if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
if (dbg)
- DBF("%s key check failed, keybitlen %d invalid\n",
- __func__, (int) kb->head.keybitlen);
+ DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+ __func__);
return -EINVAL;
}
- if (keybitsize > 0 && keybitsize != (int) kb->head.keybitlen) {
- DBF("%s key check failed, keybitsize %d\n",
- __func__, keybitsize);
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(ep11_check_aes_key_with_hdr);
+
+/*
+ * Simple check if the key blob is a valid EP11 ECC key blob with header.
+ */
+int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp)
+{
+ struct ep11kblob_header *hdr = (struct ep11kblob_header *) key;
+ struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr));
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (keylen < sizeof(*hdr) + sizeof(*kb)) {
+ DBF("%s key check failed, keylen %zu < %zu\n",
+ __func__, keylen, sizeof(*hdr) + sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (hdr->type != TOKTYPE_NON_CCA) {
+ if (dbg)
+ DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) hdr->type, TOKTYPE_NON_CCA);
+ return -EINVAL;
+ }
+ if (hdr->hver != 0x00) {
+ if (dbg)
+ DBF("%s key check failed, header version 0x%02x != 0x00\n",
+ __func__, (int) hdr->hver);
+ return -EINVAL;
+ }
+ if (hdr->version != TOKVER_EP11_ECC_WITH_HEADER) {
+ if (dbg)
+ DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) hdr->version, TOKVER_EP11_ECC_WITH_HEADER);
return -EINVAL;
}
- if (checkcpacfexport && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (hdr->len > keylen) {
if (dbg)
- DBF("%s key check failed, PKEY_EXTRACTABLE is 0\n",
+ DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ __func__, (int) hdr->len, keylen);
+ return -EINVAL;
+ }
+ if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
+ if (dbg)
+ DBF("%s key check failed, header len %d < %zu\n",
+ __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (kb->version != EP11_STRUCT_MAGIC) {
+ if (dbg)
+ DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+ __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+ return -EINVAL;
+ }
+ if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (dbg)
+ DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
__func__);
return -EINVAL;
}
+
#undef DBF
return 0;
}
-EXPORT_SYMBOL(ep11_check_aeskeyblob);
+EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr);
/*
- * Helper function which calls zcrypt_send_ep11_cprb with
- * memory management segment adjusted to kernel space
- * so that the copy_from_user called within this
- * function do in fact copy from kernel space.
+ * Simple check if the key blob is a valid EP11 AES key blob with
+ * the header in the session field (old style EP11 AES key).
*/
-static inline int _zcrypt_send_ep11_cprb(struct ep11_urb *urb)
+int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp)
{
- int rc;
- mm_segment_t old_fs = get_fs();
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
- set_fs(KERNEL_DS);
- rc = zcrypt_send_ep11_cprb(urb);
- set_fs(old_fs);
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
- return rc;
+ if (keylen < sizeof(*kb)) {
+ DBF("%s key check failed, keylen %zu < %zu\n",
+ __func__, keylen, sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (kb->head.type != TOKTYPE_NON_CCA) {
+ if (dbg)
+ DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) kb->head.type, TOKTYPE_NON_CCA);
+ return -EINVAL;
+ }
+ if (kb->head.version != TOKVER_EP11_AES) {
+ if (dbg)
+ DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) kb->head.version, TOKVER_EP11_AES);
+ return -EINVAL;
+ }
+ if (kb->head.len > keylen) {
+ if (dbg)
+ DBF("%s key check failed, header len %d keylen %zu mismatch\n",
+ __func__, (int) kb->head.len, keylen);
+ return -EINVAL;
+ }
+ if (kb->head.len < sizeof(*kb)) {
+ if (dbg)
+ DBF("%s key check failed, header len %d < %zu\n",
+ __func__, (int) kb->head.len, sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (kb->version != EP11_STRUCT_MAGIC) {
+ if (dbg)
+ DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+ __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+ return -EINVAL;
+ }
+ if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (dbg)
+ DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
}
+EXPORT_SYMBOL(ep11_check_aes_key);
/*
* Allocate and prepare ep11 cprb plus additional payload.
@@ -399,7 +520,7 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
req, sizeof(*req) + sizeof(*req_pl),
rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
- rc = _zcrypt_send_ep11_cprb(urb);
+ rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
@@ -637,7 +758,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
req, sizeof(*req) + sizeof(*req_pl),
rep, sizeof(*rep) + sizeof(*rep_pl));
- rc = _zcrypt_send_ep11_cprb(urb);
+ rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
@@ -757,7 +878,7 @@ static int ep11_cryptsingle(u16 card, u16 domain,
req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + rep_pl_size);
- rc = _zcrypt_send_ep11_cprb(urb);
+ rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
@@ -905,7 +1026,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + sizeof(*rep_pl));
- rc = _zcrypt_send_ep11_cprb(urb);
+ rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
@@ -972,7 +1093,7 @@ static int ep11_wrapkey(u16 card, u16 domain,
u8 data_tag;
u8 data_lenfmt;
u16 data_len;
- u8 data[512];
+ u8 data[1024];
} __packed * rep_pl;
struct ep11_cprb *req = NULL, *rep = NULL;
struct ep11_target_dev target;
@@ -980,8 +1101,17 @@ static int ep11_wrapkey(u16 card, u16 domain,
struct ep11keyblob *kb;
size_t req_pl_size;
int api, rc = -ENOMEM;
+ bool has_header = false;
u8 *p;
+ /* maybe the session field holds a header with key info */
+ kb = (struct ep11keyblob *) key;
+ if (kb->head.type == TOKTYPE_NON_CCA &&
+ kb->head.version == TOKVER_EP11_AES) {
+ has_header = true;
+ keysize = kb->head.len < keysize ? kb->head.len : keysize;
+ }
+
/* request cprb and payload */
req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+ ASN1TAGLEN(keysize) + 4;
@@ -1007,9 +1137,10 @@ static int ep11_wrapkey(u16 card, u16 domain,
/* key blob */
p += asn1tag_write(p, 0x04, key, keysize);
/* maybe the key argument needs the head data cleaned out */
- kb = (struct ep11keyblob *)(p - keysize);
- if (kb->head.version == TOKVER_EP11_AES)
+ if (has_header) {
+ kb = (struct ep11keyblob *)(p - keysize);
memset(&kb->head, 0, sizeof(kb->head));
+ }
/* empty kek tag */
*p++ = 0x04;
*p++ = 0;
@@ -1033,7 +1164,7 @@ static int ep11_wrapkey(u16 card, u16 domain,
req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + sizeof(*rep_pl));
- rc = _zcrypt_send_ep11_cprb(urb);
+ rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
DEBUG_ERR(
"%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
@@ -1132,12 +1263,12 @@ out:
}
EXPORT_SYMBOL(ep11_clr2keyblob);
-int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
int rc = -EIO;
u8 *wkbuf = NULL;
- size_t wkbuflen = 256;
+ size_t wkbuflen, keylen;
struct wk_info {
u16 version;
u8 res1[16];
@@ -1147,8 +1278,33 @@ int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
u8 res2[8];
u8 pkey[0];
} __packed * wki;
+ const u8 *key;
+ struct ep11kblob_header *hdr;
+
+ /* key with or without header ? */
+ hdr = (struct ep11kblob_header *) keyblob;
+ if (hdr->type == TOKTYPE_NON_CCA
+ && (hdr->version == TOKVER_EP11_AES_WITH_HEADER
+ || hdr->version == TOKVER_EP11_ECC_WITH_HEADER)
+ && is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) {
+ /* EP11 AES or ECC key with header */
+ key = keyblob + sizeof(struct ep11kblob_header);
+ keylen = hdr->len - sizeof(struct ep11kblob_header);
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES
+ && is_ep11_keyblob(keyblob)) {
+ /* EP11 AES key (old style) */
+ key = keyblob;
+ keylen = hdr->len;
+ } else if (is_ep11_keyblob(keyblob)) {
+ /* raw EP11 key blob */
+ key = keyblob;
+ keylen = keybloblen;
+ } else
+ return -EINVAL;
/* alloc temp working buffer */
+ wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1));
wkbuf = kmalloc(wkbuflen, GFP_ATOMIC);
if (!wkbuf)
return -ENOMEM;
@@ -1165,46 +1321,68 @@ int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
wki = (struct wk_info *) wkbuf;
/* check struct version and pkey type */
- if (wki->version != 1 || wki->pkeytype != 1) {
+ if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) {
DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
__func__, (int) wki->version, (int) wki->pkeytype);
rc = -EIO;
goto out;
}
- /* copy the tanslated protected key */
- switch (wki->pkeysize) {
- case 16+32:
- /* AES 128 protected key */
- if (protkeytype)
- *protkeytype = PKEY_KEYTYPE_AES_128;
- break;
- case 24+32:
- /* AES 192 protected key */
- if (protkeytype)
- *protkeytype = PKEY_KEYTYPE_AES_192;
+ /* check protected key type field */
+ switch (wki->pkeytype) {
+ case 1: /* AES */
+ switch (wki->pkeysize) {
+ case 16+32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n",
+ __func__, (int) wki->pkeysize);
+ rc = -EIO;
+ goto out;
+ }
break;
- case 32+32:
- /* AES 256 protected key */
+ case 3: /* EC-P */
+ case 4: /* EC-ED */
+ case 5: /* EC-BP */
if (protkeytype)
- *protkeytype = PKEY_KEYTYPE_AES_256;
+ *protkeytype = PKEY_KEYTYPE_ECC;
break;
+ case 2: /* TDES */
default:
- DEBUG_ERR("%s unknown/unsupported pkeysize %d\n",
- __func__, (int) wki->pkeysize);
+ DEBUG_ERR("%s unknown/unsupported key type %d\n",
+ __func__, (int) wki->pkeytype);
rc = -EIO;
goto out;
}
+
+ /* copy the tanslated protected key */
+ if (wki->pkeysize > *protkeylen) {
+ DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
+ __func__, wki->pkeysize, *protkeylen);
+ rc = -EINVAL;
+ goto out;
+ }
memcpy(protkey, wki->pkey, wki->pkeysize);
- if (protkeylen)
- *protkeylen = (u32) wki->pkeysize;
- rc = 0;
+ *protkeylen = wki->pkeysize;
out:
kfree(wkbuf);
return rc;
}
-EXPORT_SYMBOL(ep11_key2protkey);
+EXPORT_SYMBOL(ep11_kblob2protkey);
int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
int minhwtype, int minapi, const u8 *wkvp)
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
index e3ed5ed1de86..1e02b197c003 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.h
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -12,22 +12,28 @@
#include <asm/zcrypt.h>
#include <asm/pkey.h>
-#define TOKVER_EP11_AES 0x03 /* EP11 AES key blob */
-
#define EP11_API_V 4 /* highest known and supported EP11 API version */
-
#define EP11_STRUCT_MAGIC 0x1234
-#define EP11_BLOB_PKEY_EXTRACTABLE 0x200000
+#define EP11_BLOB_PKEY_EXTRACTABLE 0x00200000
+
+/*
+ * Internal used values for the version field of the key header.
+ * Should match to the enum pkey_key_type in pkey.h.
+ */
+#define TOKVER_EP11_AES 0x03 /* EP11 AES key blob (old style) */
+#define TOKVER_EP11_AES_WITH_HEADER 0x06 /* EP11 AES key blob with header */
+#define TOKVER_EP11_ECC_WITH_HEADER 0x07 /* EP11 ECC key blob with header */
/* inside view of an EP11 secure key blob */
struct ep11keyblob {
union {
u8 session[32];
+ /* only used for PKEY_TYPE_EP11: */
struct {
u8 type; /* 0x00 (TOKTYPE_NON_CCA) */
u8 res0; /* unused */
u16 len; /* total length in bytes of this blob */
- u8 version; /* 0x06 (TOKVER_EP11_AES) */
+ u8 version; /* 0x03 (TOKVER_EP11_AES) */
u8 res1; /* unused */
u16 keybitlen; /* clear key bit len, 0 for unknown */
} head;
@@ -41,16 +47,41 @@ struct ep11keyblob {
u8 mac[32];
} __packed;
+/* check ep11 key magic to find out if this is an ep11 key blob */
+static inline bool is_ep11_keyblob(const u8 *key)
+{
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ return (kb->version == EP11_STRUCT_MAGIC);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with header.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp);
+
/*
- * Simple check if the key blob is a valid EP11 secure AES key.
- * If keybitsize is given, the bitsize of the key is also checked.
+ * Simple check if the key blob is a valid EP11 ECC key blob with header.
* If checkcpacfexport is enabled, the key is also checked for the
* attributes needed to export this key for CPACF use.
* Returns 0 on success or errno value on failure.
*/
-int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
- const u8 *key, int keybitsize,
- int checkcpacfexport);
+int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp);
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with
+ * the header in the session field (old style EP11 AES key).
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
+ const u8 *key, size_t keylen, int checkcpacfexp);
/* EP11 card info struct */
struct ep11_card_info {
@@ -92,12 +123,6 @@ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
/*
- * Derive proteced key from EP11 AES secure key blob.
- */
-int ep11_key2protkey(u16 cardnr, u16 domain, const u8 *key, size_t keylen,
- u8 *protkey, u32 *protkeylen, u32 *protkeytype);
-
-/*
* Build a list of ep11 apqns meeting the following constrains:
* - apqn is online and is in fact an EP11 apqn
* - if cardnr is not FFFF only apqns with this cardnr
@@ -119,6 +144,12 @@ int ep11_key2protkey(u16 cardnr, u16 domain, const u8 *key, size_t keylen,
int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
int minhwtype, int minapi, const u8 *wkvp);
+/*
+ * Derive proteced key from EP11 key blob (AES and ECC keys).
+ */
+int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
void zcrypt_ep11misc_exit(void);
#endif /* _ZCRYPT_EP11MISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 54a04f8c38ef..39e626e3a379 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -52,7 +52,6 @@ struct error_hdr {
#define REP82_ERROR_INVALID_COMMAND 0x30
#define REP82_ERROR_MALFORMED_MSG 0x40
#define REP82_ERROR_INVALID_SPECIAL_CMD 0x41
-#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
#define REP82_ERROR_WORD_ALIGNMENT 0x60
#define REP82_ERROR_MESSAGE_LENGTH 0x80
@@ -67,7 +66,6 @@ struct error_hdr {
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
#define REP88_ERROR_MODULE_FAILURE 0x10
-
#define REP88_ERROR_MESSAGE_TYPE 0x20
#define REP88_ERROR_MESSAGE_MALFORMD 0x22
#define REP88_ERROR_MESSAGE_LENGTH 0x23
@@ -85,78 +83,56 @@ static inline int convert_error(struct zcrypt_queue *zq,
int queue = AP_QID_QUEUE(zq->queue->qid);
switch (ehdr->reply_code) {
- case REP82_ERROR_OPERAND_INVALID:
- case REP82_ERROR_OPERAND_SIZE:
- case REP82_ERROR_EVEN_MOD_IN_OPND:
- case REP88_ERROR_MESSAGE_MALFORMD:
- case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
- case REP82_ERROR_INVALID_DOMAIN_PENDING:
- case REP82_ERROR_INVALID_SPECIAL_CMD:
- case REP82_ERROR_FILTERED_BY_HYPERVISOR:
- // REP88_ERROR_INVALID_KEY // '82' CEX2A
- // REP88_ERROR_OPERAND // '84' CEX2A
- // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
- /* Invalid input data. */
+ case REP82_ERROR_INVALID_MSG_LEN: /* 0x23 */
+ case REP82_ERROR_RESERVD_FIELD: /* 0x24 */
+ case REP82_ERROR_FORMAT_FIELD: /* 0x29 */
+ case REP82_ERROR_MALFORMED_MSG: /* 0x40 */
+ case REP82_ERROR_INVALID_SPECIAL_CMD: /* 0x41 */
+ case REP82_ERROR_MESSAGE_LENGTH: /* 0x80 */
+ case REP82_ERROR_OPERAND_INVALID: /* 0x82 */
+ case REP82_ERROR_OPERAND_SIZE: /* 0x84 */
+ case REP82_ERROR_EVEN_MOD_IN_OPND: /* 0x85 */
+ case REP82_ERROR_INVALID_DOMAIN_PENDING: /* 0x8A */
+ case REP82_ERROR_FILTERED_BY_HYPERVISOR: /* 0x8B */
+ case REP82_ERROR_PACKET_TRUNCATED: /* 0xA0 */
+ case REP88_ERROR_MESSAGE_MALFORMD: /* 0x22 */
+ case REP88_ERROR_KEY_TYPE: /* 0x34 */
+ /* RY indicates malformed request */
ZCRYPT_DBF(DBF_WARN,
- "device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
+ "dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
card, queue, ehdr->reply_code);
return -EINVAL;
- case REP82_ERROR_MESSAGE_TYPE:
- // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
+ case REP82_ERROR_MACHINE_FAILURE: /* 0x10 */
+ case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */
+ case REP82_ERROR_TRANSPORT_FAIL: /* 0x90 */
/*
- * To sent a message of the wrong type is a bug in the
- * device driver. Send error msg, disable the device
- * and then repeat the request.
+ * Msg to wrong type or card/infrastructure failure.
+ * Trigger rescan of the ap bus, trigger retry request.
*/
atomic_set(&zcrypt_rescan_req, 1);
- zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
- card, queue);
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
- card, queue, ehdr->reply_code);
- return -EAGAIN;
- case REP82_ERROR_TRANSPORT_FAIL:
- /* Card or infrastructure failure, disable card */
- atomic_set(&zcrypt_rescan_req, 1);
- zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
- card, queue);
/* For type 86 response show the apfs value (failure reason) */
- if (ehdr->type == TYPE86_RSP_CODE) {
+ if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL &&
+ ehdr->type == TYPE86_RSP_CODE) {
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
} __packed * head = reply->msg;
unsigned int apfs = *((u32 *)head->fmt2.apfs);
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x reply=0x%02x apfs=0x%x => online=0 rc=EAGAIN\n",
- card, queue, apfs, ehdr->reply_code);
+ ZCRYPT_DBF(DBF_WARN,
+ "dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n",
+ card, queue, ehdr->reply_code, apfs);
} else
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+ ZCRYPT_DBF(DBF_WARN,
+ "dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n",
card, queue, ehdr->reply_code);
return -EAGAIN;
- case REP82_ERROR_MACHINE_FAILURE:
- // REP88_ERROR_MODULE_FAILURE // '10' CEX2A
- /* If a card fails disable it and repeat the request. */
- atomic_set(&zcrypt_rescan_req, 1);
- zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
- card, queue);
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
- card, queue, ehdr->reply_code);
- return -EAGAIN;
default:
- zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
- card, queue);
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+ /* Assume request is valid and a retry will be worth it */
+ ZCRYPT_DBF(DBF_WARN,
+ "dev=%02x.%04x RY=0x%02x => rc=EAGAIN\n",
card, queue, ehdr->reply_code);
- return -EAGAIN; /* repeat the request on a different device. */
+ return -EAGAIN;
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 7aedc338b445..bf14ee445f89 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -246,6 +246,12 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
copy_from_user(exp, mex->b_key, mod_len) ||
copy_from_user(inp, mex->inputdata, mod_len))
return -EFAULT;
+
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+ ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
return 0;
}
@@ -332,6 +338,11 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
copy_from_user(inp, crt->inputdata, mod_len))
return -EFAULT;
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+ ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
return 0;
}
@@ -356,15 +367,15 @@ static int convert_type80(struct zcrypt_queue *zq,
if (t80h->len < sizeof(*t80h) + outputdatalength) {
/* The result is too short, the CEXxA card may not do that.. */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- t80h->code);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ t80h->code);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ t80h->code);
+ return -EAGAIN;
}
if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
@@ -376,10 +387,10 @@ static int convert_type80(struct zcrypt_queue *zq,
return 0;
}
-static int convert_response(struct zcrypt_queue *zq,
- struct ap_message *reply,
- char __user *outputdata,
- unsigned int outputdatalength)
+static int convert_response_cex2a(struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
{
/* Response type byte is the second byte in the response. */
unsigned char rtype = ((unsigned char *) reply->msg)[1];
@@ -393,15 +404,15 @@ static int convert_response(struct zcrypt_queue *zq,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (unsigned int) rtype);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) rtype);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) rtype);
+ return -EAGAIN;
}
}
@@ -450,39 +461,41 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
* @mex: pointer to the modexpo request buffer
*/
static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
- struct ica_rsa_modexpo *mex)
+ struct ica_rsa_modexpo *mex,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
struct completion work;
int rc;
- ap_init_message(&ap_msg);
if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
- ap_msg.msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
+ ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
else
- ap_msg.msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg.msg)
+ ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->msg)
return -ENOMEM;
- ap_msg.receive = zcrypt_cex2a_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &work;
- rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex);
+ ap_msg->receive = zcrypt_cex2a_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = &work;
+ rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex);
if (rc)
- goto out_free;
+ goto out;
init_completion(&work);
- ap_queue_message(zq->queue, &ap_msg);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
rc = wait_for_completion_interruptible(&work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response(zq, &ap_msg, mex->outputdata,
- mex->outputdatalength);
+ rc = convert_response_cex2a(zq, ap_msg,
+ mex->outputdata,
+ mex->outputdatalength);
} else
/* Signal pending. */
- ap_cancel_message(zq->queue, &ap_msg);
-out_free:
- kfree(ap_msg.msg);
+ ap_cancel_message(zq->queue, ap_msg);
+out:
+ ap_msg->private = NULL;
return rc;
}
@@ -494,39 +507,41 @@ out_free:
* @crt: pointer to the modexpoc_crt request buffer
*/
static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
- struct ica_rsa_modexpo_crt *crt)
+ struct ica_rsa_modexpo_crt *crt,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
struct completion work;
int rc;
- ap_init_message(&ap_msg);
if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
- ap_msg.msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
+ ap_msg->msg = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, GFP_KERNEL);
else
- ap_msg.msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg.msg)
+ ap_msg->msg = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE, GFP_KERNEL);
+ if (!ap_msg->msg)
return -ENOMEM;
- ap_msg.receive = zcrypt_cex2a_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &work;
- rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt);
+ ap_msg->receive = zcrypt_cex2a_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = &work;
+ rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt);
if (rc)
- goto out_free;
+ goto out;
init_completion(&work);
- ap_queue_message(zq->queue, &ap_msg);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
rc = wait_for_completion_interruptible(&work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response(zq, &ap_msg, crt->outputdata,
- crt->outputdatalength);
+ rc = convert_response_cex2a(zq, ap_msg,
+ crt->outputdata,
+ crt->outputdatalength);
} else
/* Signal pending. */
- ap_cancel_message(zq->queue, &ap_msg);
-out_free:
- kfree(ap_msg.msg);
+ ap_cancel_message(zq->queue, ap_msg);
+out:
+ ap_msg->private = NULL;
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index d77991c74c25..307f90657d1d 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -388,7 +388,7 @@ struct type86_fmt2_msg {
struct type86_fmt2_ext fmt2;
} __packed;
-static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
+static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg,
struct ica_xcRB *xcRB,
unsigned int *fcode,
unsigned short **dom)
@@ -465,8 +465,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
msg->hdr.FromCardLen2 = xcRB->reply_data_length;
/* prepare CPRB */
- if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
- xcRB->request_control_blk_length))
+ if (z_copy_from_user(userspace, &(msg->cprbx), xcRB->request_control_blk_addr,
+ xcRB->request_control_blk_length))
return -EFAULT;
if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
xcRB->request_control_blk_length)
@@ -482,18 +482,23 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
|| memcmp(function_code, "AU", 2) == 0)
ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+ ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
/* copy data block */
if (xcRB->request_data_length &&
- copy_from_user(req_data, xcRB->request_data_address,
- xcRB->request_data_length))
+ z_copy_from_user(userspace, req_data, xcRB->request_data_address,
+ xcRB->request_data_length))
return -EFAULT;
return 0;
}
-static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
- struct ep11_urb *xcRB,
- unsigned int *fcode)
+static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg,
+ struct ep11_urb *xcRB,
+ unsigned int *fcode)
{
unsigned int lfmt;
static struct type6_hdr static_type6_ep11_hdr = {
@@ -543,8 +548,8 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
msg->hdr.FromCardLen1 = xcRB->resp_len;
/* Import CPRB data from the ioctl input parameter */
- if (copy_from_user(&(msg->cprbx.cprb_len),
- (char __force __user *)xcRB->req, xcRB->req_len)) {
+ if (z_copy_from_user(userspace, &(msg->cprbx.cprb_len),
+ (char __force __user *)xcRB->req, xcRB->req_len)) {
return -EFAULT;
}
@@ -569,6 +574,11 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
if (msg->cprbx.flags & 0x20)
ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
+#ifdef CONFIG_ZCRYPT_DEBUG
+ if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL)
+ ap_msg->flags ^= AP_MSG_FLAG_SPECIAL;
+#endif
+
return 0;
}
@@ -650,23 +660,22 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
(service_rc == 8 && service_rs == 72) ||
(service_rc == 8 && service_rs == 770) ||
(service_rc == 12 && service_rs == 769)) {
- ZCRYPT_DBF(DBF_DEBUG,
- "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) service_rc, (int) service_rs);
+ ZCRYPT_DBF_WARN("dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
return -EINVAL;
}
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) service_rc, (int) service_rs);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) service_rc, (int) service_rs);
+ return -EAGAIN;
}
data = msg->text;
reply_len = msg->length - 2;
@@ -707,7 +716,7 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
-static int convert_type86_xcrb(struct zcrypt_queue *zq,
+static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *reply,
struct ica_xcRB *xcRB)
{
@@ -715,15 +724,15 @@ static int convert_type86_xcrb(struct zcrypt_queue *zq,
char *data = reply->msg;
/* Copy CPRB to user */
- if (copy_to_user(xcRB->reply_control_blk_addr,
- data + msg->fmt2.offset1, msg->fmt2.count1))
+ if (z_copy_to_user(userspace, xcRB->reply_control_blk_addr,
+ data + msg->fmt2.offset1, msg->fmt2.count1))
return -EFAULT;
xcRB->reply_control_blk_length = msg->fmt2.count1;
/* Copy data buffer to user */
if (msg->fmt2.count2)
- if (copy_to_user(xcRB->reply_data_addr,
- data + msg->fmt2.offset2, msg->fmt2.count2))
+ if (z_copy_to_user(userspace, xcRB->reply_data_addr,
+ data + msg->fmt2.offset2, msg->fmt2.count2))
return -EFAULT;
xcRB->reply_data_length = msg->fmt2.count2;
return 0;
@@ -738,7 +747,7 @@ static int convert_type86_xcrb(struct zcrypt_queue *zq,
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
-static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
+static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *reply,
struct ep11_urb *xcRB)
{
@@ -749,8 +758,8 @@ static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
return -EINVAL;
/* Copy response CPRB to user */
- if (copy_to_user((char __force __user *)xcRB->resp,
- data + msg->fmt2.offset1, msg->fmt2.count1))
+ if (z_copy_to_user(userspace, (char __force __user *)xcRB->resp,
+ data + msg->fmt2.offset1, msg->fmt2.count1))
return -EFAULT;
xcRB->resp_len = msg->fmt2.count1;
return 0;
@@ -800,23 +809,24 @@ static int convert_response_ica(struct zcrypt_queue *zq,
return convert_type86_ica(zq, reply,
outputdata, outputdatalength);
fallthrough; /* wrong cprb version is an unknown response */
- default: /* Unknown response type, this should NEVER EVER happen */
+ default:
+ /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ return -EAGAIN;
}
}
-static int convert_response_xcrb(struct zcrypt_queue *zq,
- struct ap_message *reply,
- struct ica_xcRB *xcRB)
+static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ struct ica_xcRB *xcRB)
{
struct type86x_reply *msg = reply->msg;
@@ -831,25 +841,25 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
return convert_error(zq, reply);
}
if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_xcrb(zq, reply, xcRB);
+ return convert_type86_xcrb(userspace, zq, reply, xcRB);
fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ return -EAGAIN;
}
}
-static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
- struct ap_message *reply, struct ep11_urb *xcRB)
+static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
+ struct ap_message *reply, struct ep11_urb *xcRB)
{
struct type86_ep11_reply *msg = reply->msg;
@@ -861,19 +871,19 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
if (msg->hdr.reply_code)
return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04)
- return convert_type86_ep11_xcrb(zq, reply, xcRB);
+ return convert_type86_ep11_xcrb(userspace, zq, reply, xcRB);
fallthrough; /* wrong cprb version is an unknown resp */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ return -EAGAIN;
}
}
@@ -895,15 +905,15 @@ static int convert_response_rng(struct zcrypt_queue *zq,
fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int) msg->hdr.type);
+ return -EAGAIN;
}
}
@@ -1007,39 +1017,42 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
* @mex: pointer to the modexpo request buffer
*/
static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
- struct ica_rsa_modexpo *mex)
+ struct ica_rsa_modexpo *mex,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
struct response_type resp_type = {
.type = CEXXC_RESPONSE_TYPE_ICA,
};
int rc;
- ap_init_message(&ap_msg);
- ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL);
- if (!ap_msg.msg)
+ ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg->msg)
return -ENOMEM;
- ap_msg.receive = zcrypt_msgtype6_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex);
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = &resp_type;
+ rc = ICAMEX_msg_to_type6MEX_msgX(zq, ap_msg, mex);
if (rc)
goto out_free;
init_completion(&resp_type.work);
- ap_queue_message(zq->queue, &ap_msg);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out_free;
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_ica(zq, &ap_msg,
+ rc = convert_response_ica(zq, ap_msg,
mex->outputdata,
mex->outputdatalength);
} else
/* Signal pending. */
- ap_cancel_message(zq->queue, &ap_msg);
+ ap_cancel_message(zq->queue, ap_msg);
out_free:
- free_page((unsigned long) ap_msg.msg);
+ free_page((unsigned long) ap_msg->msg);
+ ap_msg->private = NULL;
+ ap_msg->msg = NULL;
return rc;
}
@@ -1051,40 +1064,43 @@ out_free:
* @crt: pointer to the modexpoc_crt request buffer
*/
static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
- struct ica_rsa_modexpo_crt *crt)
+ struct ica_rsa_modexpo_crt *crt,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
struct response_type resp_type = {
.type = CEXXC_RESPONSE_TYPE_ICA,
};
int rc;
- ap_init_message(&ap_msg);
- ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL);
- if (!ap_msg.msg)
+ ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg->msg)
return -ENOMEM;
- ap_msg.receive = zcrypt_msgtype6_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt);
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ ap_msg->private = &resp_type;
+ rc = ICACRT_msg_to_type6CRT_msgX(zq, ap_msg, crt);
if (rc)
goto out_free;
init_completion(&resp_type.work);
- ap_queue_message(zq->queue, &ap_msg);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out_free;
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_ica(zq, &ap_msg,
+ rc = convert_response_ica(zq, ap_msg,
crt->outputdata,
crt->outputdatalength);
} else {
/* Signal pending. */
- ap_cancel_message(zq->queue, &ap_msg);
+ ap_cancel_message(zq->queue, ap_msg);
}
out_free:
- free_page((unsigned long) ap_msg.msg);
+ free_page((unsigned long) ap_msg->msg);
+ ap_msg->private = NULL;
+ ap_msg->msg = NULL;
return rc;
}
@@ -1095,9 +1111,9 @@ out_free:
* by the caller with ap_init_message(). Also the caller has to
* make sure ap_release_message() is always called even on failure.
*/
-unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
- struct ap_message *ap_msg,
- unsigned int *func_code, unsigned short **dom)
+unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB,
+ struct ap_message *ap_msg,
+ unsigned int *func_code, unsigned short **dom)
{
struct response_type resp_type = {
.type = CEXXC_RESPONSE_TYPE_XCRB,
@@ -1112,7 +1128,7 @@ unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
return -ENOMEM;
- return XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
+ return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom);
}
/**
@@ -1122,24 +1138,26 @@ unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
* CEXxC device to the request distributor
* @xcRB: pointer to the send_cprb request buffer
*/
-static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
- struct ica_xcRB *xcRB,
- struct ap_message *ap_msg)
+static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
+ struct ica_xcRB *xcRB,
+ struct ap_message *ap_msg)
{
int rc;
struct response_type *rtype = (struct response_type *)(ap_msg->private);
init_completion(&rtype->work);
- ap_queue_message(zq->queue, ap_msg);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_xcrb(zq, ap_msg, xcRB);
+ rc = convert_response_xcrb(userspace, zq, ap_msg, xcRB);
} else
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
-
+out:
return rc;
}
@@ -1150,9 +1168,9 @@ static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
* by the caller with ap_init_message(). Also the caller has to
* make sure ap_release_message() is always called even on failure.
*/
-unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
- struct ap_message *ap_msg,
- unsigned int *func_code)
+unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb,
+ struct ap_message *ap_msg,
+ unsigned int *func_code)
{
struct response_type resp_type = {
.type = CEXXC_RESPONSE_TYPE_EP11,
@@ -1167,7 +1185,7 @@ unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
if (!ap_msg->private)
return -ENOMEM;
- return xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
+ return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code);
}
/**
@@ -1177,7 +1195,7 @@ unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
* CEX4P device to the request distributor
* @xcRB: pointer to the ep11 user request block
*/
-static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
+static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *zq,
struct ep11_urb *xcrb,
struct ap_message *ap_msg)
{
@@ -1232,16 +1250,18 @@ static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
}
init_completion(&rtype->work);
- ap_queue_message(zq->queue, ap_msg);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb);
+ rc = convert_response_ep11_xcrb(userspace, zq, ap_msg, xcrb);
} else
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
-
+out:
return rc;
}
@@ -1293,7 +1313,9 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
init_completion(&rtype->work);
- ap_queue_message(zq->queue, ap_msg);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
rc = wait_for_completion_interruptible(&rtype->work);
if (rc == 0) {
rc = ap_msg->rc;
@@ -1302,7 +1324,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
} else
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
-
+out:
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 0de280a81dd4..0a0bf074206b 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -96,9 +96,9 @@ struct type86_fmt2_ext {
unsigned int offset4; /* 0x00000000 */
} __packed;
-unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *,
+unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *, struct ap_message *,
unsigned int *, unsigned short **);
-unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *,
+unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *, struct ap_message *,
unsigned int *);
unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 8bae6ad159a7..5062eae73d4a 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -40,22 +40,27 @@ static ssize_t online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct zcrypt_queue *zq = aq->private;
+ int online = aq->config && zq->online ? 1 : 0;
- return scnprintf(buf, PAGE_SIZE, "%d\n", zq->online);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", online);
}
static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct zcrypt_queue *zq = aq->private;
struct zcrypt_card *zc = zq->zcard;
int online;
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
+ if (online && (!aq->config || !aq->card->config))
+ return -ENODEV;
if (online && !zc->online)
return -EINVAL;
zq->online = online;
@@ -175,7 +180,6 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
&zcrypt_queue_attr_group);
if (rc)
goto out;
- get_device(&zq->queue->ap_dev.device);
if (zq->ops->rng) {
rc = zcrypt_rng_device_add();
@@ -187,7 +191,6 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
out_unregister:
sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
- put_device(&zq->queue->ap_dev.device);
out:
spin_lock(&zcrypt_list_lock);
list_del_init(&zq->list);
@@ -215,12 +218,10 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
list_del_init(&zq->list);
zcrypt_device_count--;
spin_unlock(&zcrypt_list_lock);
- zcrypt_card_put(zc);
if (zq->ops->rng)
zcrypt_rng_device_remove();
sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
- put_device(&zq->queue->ap_dev.device);
- zcrypt_queue_put(zq);
+ zcrypt_card_put(zc);
}
EXPORT_SYMBOL(zcrypt_queue_unregister);
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 53120e68796e..bf236d474538 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -107,7 +107,7 @@ config QETH_OSX
config CCWGROUP
tristate
- default (LCS || CTCM || QETH)
+ default (LCS || CTCM || QETH || SMC)
config ISM
tristate "Support for ISM vPCI Adapter"
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 225737295cb4..d98c486724d4 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -159,7 +159,6 @@ extern const char *ctc_ch_state_names[];
void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg);
void ctcm_purge_skb_queue(struct sk_buff_head *q);
-void fsm_action_nop(fsm_instance *fi, int event, void *arg);
/*
* ----- non-static actions for ctcm channel statemachine -----
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
index 441d7b211f0f..da41b26f76d1 100644
--- a/drivers/s390/net/ctcm_mpc.h
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -228,7 +228,6 @@ static inline void ctcmpc_dump32(char *buf, int len)
ctcmpc_dumpit(buf, 32);
}
-int ctcmpc_open(struct net_device *);
void ctcm_ccw_check_rc(struct channel *, int, char *);
void mpc_group_ready(unsigned long adev);
void mpc_channel_action(struct channel *ch, int direction, int action);
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 1901e9c80ed8..38fe90c2597d 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -16,6 +16,7 @@
#define ISM_DMB_WORD_OFFSET 1
#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32)
#define ISM_NR_DMBS 1920
+#define ISM_IDENT_MASK 0x00FFFF
#define ISM_REG_SBA 0x1
#define ISM_REG_IEQ 0x2
@@ -206,6 +207,12 @@ struct ism_dev {
#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
((dmb) | (idx) << 24 | (sf) << 23 | (offset))
+struct ism_systemeid {
+ u8 seid_string[24];
+ u8 serial_number[4];
+ u8 type[4];
+};
+
static inline void __ism_read_cmd(struct ism_dev *ism, void *data,
unsigned long offset, unsigned long len)
{
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 5fbe9eae84d1..26cc943d2034 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -13,6 +13,8 @@
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/processor.h>
#include <net/smc.h>
#include <asm/debug.h>
@@ -387,6 +389,42 @@ static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
return 0;
}
+static struct ism_systemeid SYSTEM_EID = {
+ .seid_string = "IBM-SYSZ-ISMSEID00000000",
+ .serial_number = "0000",
+ .type = "0000",
+};
+
+static void ism_create_system_eid(void)
+{
+ struct cpuid id;
+ u16 ident_tail;
+ char tmp[5];
+
+ get_cpu_id(&id);
+ ident_tail = (u16)(id.ident & ISM_IDENT_MASK);
+ snprintf(tmp, 5, "%04X", ident_tail);
+ memcpy(&SYSTEM_EID.serial_number, tmp, 4);
+ snprintf(tmp, 5, "%04X", id.machine);
+ memcpy(&SYSTEM_EID.type, tmp, 4);
+}
+
+static void ism_get_system_eid(struct smcd_dev *smcd, u8 **eid)
+{
+ *eid = &SYSTEM_EID.seid_string[0];
+}
+
+static u16 ism_get_chid(struct smcd_dev *smcd)
+{
+ struct ism_dev *ismdev;
+
+ ismdev = (struct ism_dev *)smcd->priv;
+ if (!ismdev || !ismdev->pdev)
+ return 0;
+
+ return to_zpci(ismdev->pdev)->pchid;
+}
+
static void ism_handle_event(struct ism_dev *ism)
{
struct smcd_event *entry;
@@ -443,6 +481,8 @@ static const struct smcd_ops ism_ops = {
.reset_vlan_required = ism_reset_vlan_required,
.signal_event = ism_signal_ieq,
.move_data = ism_move,
+ .get_system_eid = ism_get_system_eid,
+ .get_chid = ism_get_chid,
};
static int ism_dev_init(struct ism_dev *ism)
@@ -471,6 +511,10 @@ static int ism_dev_init(struct ism_dev *ism)
if (ret)
goto unreg_ieq;
+ if (!ism_add_vlan_id(ism->smcd, ISM_RESERVED_VLANID))
+ /* hardware is V2 capable */
+ ism_create_system_eid();
+
ret = smcd_register_dev(ism->smcd);
if (ret)
goto unreg_ieq;
@@ -550,6 +594,9 @@ static void ism_dev_exit(struct ism_dev *ism)
struct pci_dev *pdev = ism->pdev;
smcd_unregister_dev(ism->smcd);
+ if (SYSTEM_EID.serial_number[0] != '0' ||
+ SYSTEM_EID.type[0] != '0')
+ ism_del_vlan_id(ism->smcd, ISM_RESERVED_VLANID);
unregister_ieq(ism);
unregister_sba(ism);
free_irq(pci_irq_vector(pdev, 0), ism);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index ecfd6d152e86..f73b4756ed5e 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -177,8 +177,8 @@ struct qeth_vnicc_info {
/**
* some more defs
*/
-#define QETH_TX_TIMEOUT 100 * HZ
-#define QETH_RCD_TIMEOUT 60 * HZ
+#define QETH_TX_TIMEOUT (100 * HZ)
+#define QETH_RCD_TIMEOUT (60 * HZ)
#define QETH_RECLAIM_WORK_TIME HZ
#define QETH_MAX_PORTNO 15
@@ -195,8 +195,8 @@ struct qeth_vnicc_info {
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64
#define QETH_IN_BUF_COUNT_HSDEFAULT 128
-#define QETH_IN_BUF_COUNT_MIN 8
-#define QETH_IN_BUF_COUNT_MAX 128
+#define QETH_IN_BUF_COUNT_MIN 8U
+#define QETH_IN_BUF_COUNT_MAX 128U
#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
((card)->qdio.in_buf_pool.buf_count / 2)
@@ -278,6 +278,26 @@ struct qeth_hdr {
} hdr;
} __attribute__ ((packed));
+#define QETH_QIB_PQUE_ORDER_RR 0
+#define QETH_QIB_PQUE_UNITS_SBAL 2
+#define QETH_QIB_PQUE_PRIO_DEFAULT 4
+
+struct qeth_qib_parms {
+ char pcit_magic[4];
+ u32 pcit_a;
+ u32 pcit_b;
+ u32 pcit_c;
+ char blkt_magic[4];
+ u32 blkt_total;
+ u32 blkt_inter_packet;
+ u32 blkt_inter_packet_jumbo;
+ char pque_magic[4];
+ u8 pque_order;
+ u8 pque_units;
+ u16 reserved;
+ u32 pque_priority[4];
+};
+
/*TCP Segmentation Offload header*/
struct qeth_hdr_ext_tso {
__u16 hdr_tot_len;
@@ -420,12 +440,6 @@ struct qeth_qdio_out_buffer {
struct qeth_card;
-enum qeth_out_q_states {
- QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED,
- QETH_OUT_Q_LOCKED_FLUSH,
-};
-
#define QETH_CARD_STAT_ADD(_c, _stat, _val) ((_c)->stats._stat += (_val))
#define QETH_CARD_STAT_INC(_c, _stat) QETH_CARD_STAT_ADD(_c, _stat, 1)
@@ -486,12 +500,13 @@ struct qeth_qdio_out_q {
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
struct qeth_out_q_stats stats;
+ spinlock_t lock;
+ unsigned int priority;
u8 next_buf_to_fill;
u8 max_elements;
u8 queue_no;
u8 do_pack;
struct qeth_card *card;
- atomic_t state;
/*
* number of buffers that are currently filled (PRIMED)
* -> these buffers are hardware-owned
@@ -544,7 +559,7 @@ struct qeth_qdio_info {
int in_buf_size;
/* output */
- int no_out_queues;
+ unsigned int no_out_queues;
struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
struct qdio_outbuf_state *out_bufstates;
@@ -680,14 +695,27 @@ struct qeth_card_blkt {
int inter_packet_jumbo;
};
+enum qeth_pnso_mode {
+ QETH_PNSO_NONE,
+ QETH_PNSO_BRIDGEPORT,
+ QETH_PNSO_ADDR_INFO,
+};
+
#define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
struct qeth_card_info {
unsigned short unit_addr2;
unsigned short cula;
- u8 chpid;
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
+ /* doubleword below corresponds to net_if_token */
+ u16 ddev_devno;
+ u8 cssid;
+ u8 iid;
+ u8 ssid;
+ u8 chpid;
+ u16 chid;
+ u8 ids_valid:1; /* cssid,iid,chid */
u8 dev_addr_is_registered:1;
u8 open_when_online:1;
u8 promisc_mode:1;
@@ -696,6 +724,7 @@ struct qeth_card_info {
/* no bitfield, we take a pointer on these two: */
u8 has_lp2lp_cso_v6;
u8 has_lp2lp_cso_v4;
+ enum qeth_pnso_mode pnso_mode;
enum qeth_card_types type;
enum qeth_link_types link_type;
int broadcast_capable;
@@ -745,7 +774,7 @@ struct qeth_discipline {
const struct device_type *devtype;
int (*setup) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
- int (*set_online)(struct qeth_card *card);
+ int (*set_online)(struct qeth_card *card, bool carrier_ok);
void (*set_offline)(struct qeth_card *card);
int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
int (*control_event_handler)(struct qeth_card *card,
@@ -780,6 +809,9 @@ struct qeth_switch_info {
struct qeth_priv {
unsigned int rx_copybreak;
+ unsigned int tx_wanted_queues;
+ u32 brport_hw_features;
+ u32 brport_features;
};
#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
@@ -804,12 +836,16 @@ struct qeth_card {
struct workqueue_struct *event_wq;
struct workqueue_struct *cmd_wq;
wait_queue_head_t wait_q;
+
+ struct mutex ip_lock;
+ /* protected by ip_lock: */
DECLARE_HASHTABLE(ip_htable, 4);
+ struct qeth_ipato ipato;
+
DECLARE_HASHTABLE(local_addrs4, 4);
DECLARE_HASHTABLE(local_addrs6, 4);
spinlock_t local_addrs4_lock;
spinlock_t local_addrs6_lock;
- struct mutex ip_lock;
DECLARE_HASHTABLE(rx_mode_addrs, 4);
struct work_struct rx_mode_work;
struct work_struct kernel_thread_starter;
@@ -817,13 +853,12 @@ struct qeth_card {
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
- struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
int read_or_write_problem;
struct qeth_osn_info osn_info;
- struct qeth_discipline *discipline;
+ const struct qeth_discipline *discipline;
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
@@ -857,8 +892,20 @@ struct qeth_trap_id {
__u16 devno;
} __packed;
-/*some helper functions*/
-#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+static inline bool qeth_uses_tx_prio_queueing(struct qeth_card *card)
+{
+ return card->qdio.do_prio_queueing != QETH_NO_PRIO_QUEUEING;
+}
+
+static inline unsigned int qeth_tx_actual_queues(struct qeth_card *card)
+{
+ struct qeth_priv *priv = netdev_priv(card->dev);
+
+ if (qeth_uses_tx_prio_queueing(card))
+ return min(card->dev->num_tx_queues, card->qdio.no_out_queues);
+
+ return min(priv->tx_wanted_queues, card->qdio.no_out_queues);
+}
static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
{
@@ -1001,8 +1048,8 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
-extern struct qeth_discipline qeth_l2_discipline;
-extern struct qeth_discipline qeth_l3_discipline;
+extern const struct qeth_discipline qeth_l2_discipline;
+extern const struct qeth_discipline qeth_l3_discipline;
extern const struct ethtool_ops qeth_ethtool_ops;
extern const struct ethtool_ops qeth_osn_ethtool_ops;
extern const struct attribute_group *qeth_generic_attr_groups[];
@@ -1022,13 +1069,11 @@ extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
struct net_device *qeth_clone_netdev(struct net_device *orig);
struct qeth_card *qeth_get_card_by_busid(char *bus_id);
-void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
+void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
+ int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
-int qeth_stop_channel(struct qeth_channel *channel);
int qeth_set_offline(struct qeth_card *card, bool resetting);
-void qeth_print_status_message(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
@@ -1052,12 +1097,7 @@ void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
void qeth_put_cmd(struct qeth_cmd_buffer *iob);
int qeth_schedule_recovery(struct qeth_card *card);
-void qeth_flush_local_addrs(struct qeth_card *card);
int qeth_poll(struct napi_struct *napi, int budget);
-void qeth_clear_ipacmd_list(struct qeth_card *);
-int qeth_qdio_clear_card(struct qeth_card *, int);
-void qeth_clear_working_pool_list(struct qeth_card *);
-void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
@@ -1081,9 +1121,7 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
-void qeth_trace_features(struct qeth_card *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
-int qeth_setup_netdev(struct qeth_card *card);
int qeth_set_features(struct net_device *, netdev_features_t);
void qeth_enable_hw_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 6a7398251423..93c9b30ab17a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/log2.h>
+#include <linux/io.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
@@ -35,7 +36,6 @@
#include <asm/ebcdic.h>
#include <asm/chpid.h>
-#include <asm/io.h>
#include <asm/sysinfo.h>
#include <asm/diag.h>
#include <asm/cio.h>
@@ -201,7 +201,7 @@ int qeth_threads_running(struct qeth_card *card, unsigned long threads)
}
EXPORT_SYMBOL_GPL(qeth_threads_running);
-void qeth_clear_working_pool_list(struct qeth_card *card)
+static void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
struct qeth_qdio_q *queue = card->qdio.in_q;
@@ -209,14 +209,12 @@ void qeth_clear_working_pool_list(struct qeth_card *card)
QETH_CARD_TEXT(card, 5, "clwrklst");
list_for_each_entry_safe(pool_entry, tmp,
- &card->qdio.in_buf_pool.entry_list, list){
- list_del(&pool_entry->list);
- }
+ &card->qdio.in_buf_pool.entry_list, list)
+ list_del(&pool_entry->list);
for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
queue->bufs[i].pool_entry = NULL;
}
-EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
{
@@ -482,6 +480,7 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
atomic_read(&c->state) ==
QETH_QDIO_BUF_HANDLED_DELAYED) {
struct qeth_qdio_out_buffer *f = c;
+
QETH_CARD_TEXT(f->q->card, 5, "fp");
QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
/* release here to avoid interleaving between
@@ -508,7 +507,6 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
}
}
-
static void qeth_qdio_handle_aob(struct qeth_card *card,
unsigned long phys_aob_addr)
{
@@ -658,12 +656,11 @@ static void qeth_flush_local_addrs6(struct qeth_card *card)
spin_unlock_irq(&card->local_addrs6_lock);
}
-void qeth_flush_local_addrs(struct qeth_card *card)
+static void qeth_flush_local_addrs(struct qeth_card *card)
{
qeth_flush_local_addrs4(card);
qeth_flush_local_addrs6(card);
}
-EXPORT_SYMBOL_GPL(qeth_flush_local_addrs);
static void qeth_add_local_addrs4(struct qeth_card *card,
struct qeth_ipacmd_local_addrs4 *cmd)
@@ -886,6 +883,7 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
{
const char *ipa_name;
int com = cmd->hdr.command;
+
ipa_name = qeth_get_ipa_cmd_name(com);
if (rc)
@@ -917,12 +915,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
dev_err(&card->gdev->dev,
"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
- QETH_CARD_IFNAME(card));
+ netdev_name(card->dev));
schedule_work(&card->close_dev_work);
} else {
dev_warn(&card->gdev->dev,
"The link for interface %s on CHPID 0x%X failed\n",
- QETH_CARD_IFNAME(card), card->info.chpid);
+ netdev_name(card->dev), card->info.chpid);
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
netif_carrier_off(card->dev);
}
@@ -930,7 +928,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
case IPA_CMD_STARTLAN:
dev_info(&card->gdev->dev,
"The link for %s on CHPID 0x%X has been restored\n",
- QETH_CARD_IFNAME(card), card->info.chpid);
+ netdev_name(card->dev), card->info.chpid);
if (card->info.hwtrap)
card->info.hwtrap = 2;
qeth_schedule_recovery(card);
@@ -965,7 +963,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
}
}
-void qeth_clear_ipacmd_list(struct qeth_card *card)
+static void qeth_clear_ipacmd_list(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
unsigned long flags;
@@ -977,7 +975,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
qeth_notify_cmd(iob, -ECANCELED);
spin_unlock_irqrestore(&card->lock, flags);
}
-EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
static int qeth_check_idx_response(struct qeth_card *card,
unsigned char *buffer)
@@ -1256,7 +1253,7 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
return 0;
}
QETH_CARD_TEXT(card, 2, "DGENCHK");
- return -EIO;
+ return -EIO;
}
return 0;
}
@@ -1502,7 +1499,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
}
}
-void qeth_drain_output_queues(struct qeth_card *card)
+static void qeth_drain_output_queues(struct qeth_card *card)
{
int i;
@@ -1513,25 +1510,13 @@ void qeth_drain_output_queues(struct qeth_card *card)
qeth_drain_output_queue(card->qdio.out_qs[i], false);
}
}
-EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
-static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
+static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
unsigned int max = single ? 1 : card->dev->num_tx_queues;
- unsigned int count;
- int rc;
-
- count = IS_VM_NIC(card) ? min(max, card->dev->real_num_tx_queues) : max;
-
- rtnl_lock();
- rc = netif_set_real_num_tx_queues(card->dev, count);
- rtnl_unlock();
-
- if (rc)
- return rc;
if (card->qdio.no_out_queues == max)
- return 0;
+ return;
if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
qeth_free_qdio_queues(card);
@@ -1540,14 +1525,12 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
card->qdio.no_out_queues = max;
- return 0;
}
static int qeth_update_from_chp_desc(struct qeth_card *card)
{
struct ccw_device *ccwdev;
struct channel_path_desc_fmt0 *chp_dsc;
- int rc = 0;
QETH_CARD_TEXT(card, 2, "chp_desc");
@@ -1560,12 +1543,12 @@ static int qeth_update_from_chp_desc(struct qeth_card *card)
if (IS_OSD(card) || IS_OSX(card))
/* CHPP field bit 6 == 1 -> single queue */
- rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
+ qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
kfree(chp_dsc);
QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
- return rc;
+ return 0;
}
static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1617,7 +1600,7 @@ static void qeth_start_kernel_thread(struct work_struct *work)
struct task_struct *ts;
struct qeth_card *card = container_of(work, struct qeth_card,
kernel_thread_starter);
- QETH_CARD_TEXT(card , 2, "strthrd");
+ QETH_CARD_TEXT(card, 2, "strthrd");
if (card->read.state != CH_STATE_UP &&
card->write.state != CH_STATE_UP)
@@ -1754,7 +1737,7 @@ static int qeth_halt_channel(struct qeth_card *card,
return 0;
}
-int qeth_stop_channel(struct qeth_channel *channel)
+static int qeth_stop_channel(struct qeth_channel *channel)
{
struct ccw_device *cdev = channel->ccwdev;
int rc;
@@ -1772,7 +1755,6 @@ int qeth_stop_channel(struct qeth_channel *channel)
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_stop_channel);
static int qeth_start_channel(struct qeth_channel *channel)
{
@@ -1842,7 +1824,7 @@ static int qeth_clear_halt_card(struct qeth_card *card, int halt)
return qeth_clear_channels(card);
}
-int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
+static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
{
int rc = 0;
@@ -1870,7 +1852,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
QETH_CARD_TEXT_(card, 3, "2err%d", rc);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
@@ -2311,12 +2292,10 @@ static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
u8 port = ((u8)card->dev->dev_port) | 0x80;
struct ccw1 *ccw = __ccw_from_cmd(iob);
- struct ccw_dev_id dev_id;
qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
iob->data);
qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
- ccw_device_get_id(CARD_DDEV(card), &dev_id);
iob->finalize = qeth_idx_finalize_cmd;
port |= QETH_IDX_ACT_INVAL_FRAME;
@@ -2325,7 +2304,7 @@ static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
&card->info.func_level, 2);
- memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
+ memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
}
@@ -2599,7 +2578,6 @@ static int qeth_ulp_setup(struct qeth_card *card)
{
__u16 temp;
struct qeth_cmd_buffer *iob;
- struct ccw_dev_id dev_id;
QETH_CARD_TEXT(card, 2, "ulpsetup");
@@ -2614,8 +2592,7 @@ static int qeth_ulp_setup(struct qeth_card *card)
memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
&card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
- ccw_device_get_id(CARD_DDEV(card), &dev_id);
- memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
+ memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
temp = (card->info.cula << 8) + card->info.unit_addr2;
memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
@@ -2702,9 +2679,11 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
card->qdio.out_qs[i] = queue;
queue->card = card;
queue->queue_no = i;
+ spin_lock_init(&queue->lock);
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
+ queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
/* give outbound qeth_qdio_buffers their qdio_buffers */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
@@ -2765,30 +2744,44 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
}
}
-static void qeth_create_qib_param_field(struct qeth_card *card,
- char *param_field)
+static void qeth_fill_qib_parms(struct qeth_card *card,
+ struct qeth_qib_parms *parms)
{
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
- param_field[0] = _ascebc['P'];
- param_field[1] = _ascebc['C'];
- param_field[2] = _ascebc['I'];
- param_field[3] = _ascebc['T'];
- *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
- *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
- *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
-}
+ parms->pcit_magic[0] = 'P';
+ parms->pcit_magic[1] = 'C';
+ parms->pcit_magic[2] = 'I';
+ parms->pcit_magic[3] = 'T';
+ ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
+ parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
+ parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
+ parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
+
+ parms->blkt_magic[0] = 'B';
+ parms->blkt_magic[1] = 'L';
+ parms->blkt_magic[2] = 'K';
+ parms->blkt_magic[3] = 'T';
+ ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
+ parms->blkt_total = card->info.blkt.time_total;
+ parms->blkt_inter_packet = card->info.blkt.inter_packet;
+ parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
+
+ /* Prio-queueing implicitly uses the default priorities: */
+ if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
+ return;
-static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
- char *param_field)
-{
- param_field[16] = _ascebc['B'];
- param_field[17] = _ascebc['L'];
- param_field[18] = _ascebc['K'];
- param_field[19] = _ascebc['T'];
- *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
- *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
- *((unsigned int *) (&param_field[28])) =
- card->info.blkt.inter_packet_jumbo;
+ parms->pque_magic[0] = 'P';
+ parms->pque_magic[1] = 'Q';
+ parms->pque_magic[2] = 'U';
+ parms->pque_magic[3] = 'E';
+ ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
+ parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
+ parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
+
+ qeth_for_each_output_queue(card, queue, i)
+ parms->pque_priority[i] = queue->priority;
}
static int qeth_qdio_activate(struct qeth_card *card)
@@ -2870,7 +2863,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
return 0;
}
-void qeth_print_status_message(struct qeth_card *card)
+static void qeth_print_status_message(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
@@ -2911,7 +2904,6 @@ void qeth_print_status_message(struct qeth_card *card)
(card->info.mcl_level[0]) ? ")" : "",
qeth_get_cardname_short(card));
}
-EXPORT_SYMBOL_GPL(qeth_print_status_message);
static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
@@ -3068,7 +3060,6 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
atomic_set(&queue->used_buffers, 0);
atomic_set(&queue->set_pci_flags_count, 0);
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
}
return 0;
@@ -3425,7 +3416,6 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
}
free_page(info);
- return;
}
static int qeth_hw_trap_cb(struct qeth_card *card,
@@ -3549,8 +3539,9 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
static void qeth_buffer_reclaim_work(struct work_struct *work)
{
- struct qeth_card *card = container_of(work, struct qeth_card,
- buffer_reclaim_work.work);
+ struct qeth_card *card = container_of(to_delayed_work(work),
+ struct qeth_card,
+ buffer_reclaim_work);
local_bh_disable();
napi_schedule(&card->napi);
@@ -3740,37 +3731,31 @@ static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
- int index;
- int flush_cnt = 0;
- int q_was_packing = 0;
-
/*
* check if weed have to switch to non-packing mode or if
* we have to get a pci flag out on the queue
*/
if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
!atomic_read(&queue->set_pci_flags_count)) {
- if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
- QETH_OUT_Q_UNLOCKED) {
- /*
- * If we get in here, there was no action in
- * do_send_packet. So, we check if there is a
- * packing buffer to be flushed here.
- */
- index = queue->next_buf_to_fill;
- q_was_packing = queue->do_pack;
- /* queue->do_pack may change */
- barrier();
- flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
- if (!flush_cnt &&
- !atomic_read(&queue->set_pci_flags_count))
- flush_cnt += qeth_prep_flush_pack_buffer(queue);
+ unsigned int index, flush_cnt;
+ bool q_was_packing;
+
+ spin_lock(&queue->lock);
+
+ index = queue->next_buf_to_fill;
+ q_was_packing = queue->do_pack;
+
+ flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
+ if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
+ flush_cnt = qeth_prep_flush_pack_buffer(queue);
+
+ if (flush_cnt) {
+ qeth_flush_buffers(queue, index, flush_cnt);
if (q_was_packing)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
- if (flush_cnt)
- qeth_flush_buffers(queue, index, flush_cnt);
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
}
+
+ spin_unlock(&queue->lock);
}
}
@@ -4282,29 +4267,22 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
unsigned int offset, unsigned int hd_len,
int elements_needed)
{
+ unsigned int start_index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer;
unsigned int next_element;
struct netdev_queue *txq;
bool stopped = false;
- int start_index;
int flush_count = 0;
int do_pack = 0;
- int tmp;
int rc = 0;
- /* spin until we get the queue ... */
- while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
- start_index = queue->next_buf_to_fill;
buffer = queue->bufs[queue->next_buf_to_fill];
/* Just a sanity check, the wake/stop logic should ensure that we always
* get a free buffer.
*/
- if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- }
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
@@ -4327,8 +4305,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
QETH_QDIO_BUF_EMPTY) {
qeth_flush_buffers(queue, start_index,
flush_count);
- atomic_set(&queue->state,
- QETH_OUT_Q_UNLOCKED);
rc = -EBUSY;
goto out;
}
@@ -4360,31 +4336,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
- else if (!atomic_read(&queue->set_pci_flags_count))
- atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
- /*
- * queue->state will go from LOCKED -> UNLOCKED or from
- * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
- * (switch packing state or flush buffer to get another pci flag out).
- * In that case we will enter this loop
- */
- while (atomic_dec_return(&queue->state)) {
- start_index = queue->next_buf_to_fill;
- /* check if we can go back to non-packing state */
- tmp = qeth_switch_to_nonpacking_if_needed(queue);
- /*
- * check if we need to flush a packing buffer to get a pci
- * flag out on the queue
- */
- if (!tmp && !atomic_read(&queue->set_pci_flags_count))
- tmp = qeth_prep_flush_pack_buffer(queue);
- if (tmp) {
- qeth_flush_buffers(queue, start_index, tmp);
- flush_count += tmp;
- }
- }
+
out:
- /* at this point the queue is UNLOCKED again */
if (do_pack)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
@@ -4458,8 +4411,10 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
} else {
/* TODO: drop skb_orphan() once TX completion is fast enough */
skb_orphan(skb);
+ spin_lock(&queue->lock);
rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
hd_len, elements);
+ spin_unlock(&queue->lock);
}
if (rc && !push_len)
@@ -4955,7 +4910,6 @@ int qeth_vm_request_mac(struct qeth_card *card)
{
struct diag26c_mac_resp *response;
struct diag26c_mac_req *request;
- struct ccw_dev_id id;
int rc;
QETH_CARD_TEXT(card, 2, "vmreqmac");
@@ -4967,11 +4921,10 @@ int qeth_vm_request_mac(struct qeth_card *card)
goto out;
}
- ccw_device_get_id(CARD_DDEV(card), &id);
request->resp_buf_len = sizeof(*response);
request->resp_version = DIAG26C_VERSION2;
request->op_code = DIAG26C_GET_MAC;
- request->devno = id.devno;
+ request->devno = card->info.ddev_devno;
QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
@@ -5044,7 +4997,6 @@ static void qeth_determine_capabilities(struct qeth_card *card)
card->options.cq = QETH_CQ_NOTAVAILABLE;
}
-
out_offline:
if (ddev_offline == 1)
qeth_stop_channel(channel);
@@ -5052,25 +5004,51 @@ out:
return;
}
+static void qeth_read_ccw_conf_data(struct qeth_card *card)
+{
+ struct qeth_card_info *info = &card->info;
+ struct ccw_device *cdev = CARD_DDEV(card);
+ struct ccw_dev_id dev_id;
+
+ QETH_CARD_TEXT(card, 2, "ccwconfd");
+ ccw_device_get_id(cdev, &dev_id);
+
+ info->ddev_devno = dev_id.devno;
+ info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
+ !ccw_device_get_iid(cdev, &info->iid) &&
+ !ccw_device_get_chid(cdev, 0, &info->chid);
+ info->ssid = dev_id.ssid;
+
+ dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
+ info->chid, info->chpid);
+
+ QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
+ QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
+ QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
+ QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
+ QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
+ QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
+ QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
+}
+
static int qeth_qdio_establish(struct qeth_card *card)
{
struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
+ struct qeth_qib_parms *qib_parms = NULL;
struct qdio_initialize init_data;
- char *qib_param_field;
unsigned int i;
int rc = 0;
QETH_CARD_TEXT(card, 2, "qdioest");
- qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
- if (!qib_param_field) {
- rc = -ENOMEM;
- goto out_free_nothing;
- }
+ if (!IS_IQD(card) && !IS_VM_NIC(card)) {
+ qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
+ if (!qib_parms)
+ return -ENOMEM;
- qeth_create_qib_param_field(card, qib_param_field);
- qeth_create_qib_param_field_blkt(card, qib_param_field);
+ qeth_fill_qib_parms(card, qib_parms);
+ }
in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
if (card->options.cq == QETH_CQ_ENABLED)
@@ -5083,7 +5061,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
QDIO_QETH_QFMT;
init_data.qib_param_field_format = 0;
- init_data.qib_param_field = qib_param_field;
+ init_data.qib_param_field = (void *)qib_parms;
init_data.no_input_qs = card->qdio.no_in_queues;
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = qeth_qdio_input_handler;
@@ -5120,9 +5098,9 @@ static int qeth_qdio_establish(struct qeth_card *card)
default:
break;
}
+
out:
- kfree(qib_param_field);
-out_free_nothing:
+ kfree(qib_parms);
return rc;
}
@@ -5138,7 +5116,7 @@ static void qeth_core_free_card(struct qeth_card *card)
kfree(card);
}
-void qeth_trace_features(struct qeth_card *card)
+static void qeth_trace_features(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "features");
QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
@@ -5147,7 +5125,6 @@ void qeth_trace_features(struct qeth_card *card)
QETH_CARD_HEX(card, 2, &card->info.diagass_support,
sizeof(card->info.diagass_support));
}
-EXPORT_SYMBOL_GPL(qeth_trace_features);
static struct ccw_device_id qeth_ids[] = {
{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
@@ -5178,7 +5155,7 @@ static struct ccw_driver qeth_ccw_driver = {
.remove = ccwgroup_remove_ccwdev,
};
-int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
+static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
{
int retries = 3;
int rc;
@@ -5220,6 +5197,7 @@ retriable:
}
qeth_determine_capabilities(card);
+ qeth_read_ccw_conf_data(card);
qeth_idx_init(card);
rc = qeth_idx_activate_read_channel(card);
@@ -5291,6 +5269,8 @@ retriable:
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
}
+ qeth_trace_features(card);
+
if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
(card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
card->info.hwtrap = 0;
@@ -5316,21 +5296,53 @@ out:
CARD_DEVID(card), rc);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
static int qeth_set_online(struct qeth_card *card)
{
+ bool carrier_ok;
int rc;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin");
- rc = card->discipline->set_online(card);
+ rc = qeth_hardsetup_card(card, &carrier_ok);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
+ rc = -ENODEV;
+ goto err_hardsetup;
+ }
+
+ qeth_print_status_message(card);
+
+ if (card->dev->reg_state != NETREG_REGISTERED)
+ /* no need for locking / error handling at this early stage: */
+ qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
+
+ rc = card->discipline->set_online(card, carrier_ok);
+ if (rc)
+ goto err_online;
+
+ /* let user_space know that device is online */
+ kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
+ return 0;
+
+err_online:
+err_hardsetup:
+ qeth_qdio_clear_card(card, 0);
+ qeth_clear_working_pool_list(card);
+ qeth_flush_local_addrs(card);
+ qeth_stop_channel(&card->data);
+ qeth_stop_channel(&card->write);
+ qeth_stop_channel(&card->read);
+ qdio_free(CARD_DDEV(card));
+
+ mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
@@ -5347,6 +5359,9 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
card->info.hwtrap = 1;
}
+ /* cancel any stalled cmd that might block the rtnl: */
+ qeth_clear_ipacmd_list(card);
+
rtnl_lock();
card->info.open_when_online = card->dev->flags & IFF_UP;
dev_close(card->dev);
@@ -5354,8 +5369,16 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
netif_carrier_off(card->dev);
rtnl_unlock();
+ cancel_work_sync(&card->rx_mode_work);
+
card->discipline->set_offline(card);
+ qeth_qdio_clear_card(card, 0);
+ qeth_drain_output_queues(card);
+ qeth_clear_working_pool_list(card);
+ qeth_flush_local_addrs(card);
+ card->info.promisc_mode = 0;
+
rc = qeth_stop_channel(&card->data);
rc2 = qeth_stop_channel(&card->write);
rc3 = qeth_stop_channel(&card->read);
@@ -6025,6 +6048,7 @@ EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
static void qeth_unregister_dbf_views(void)
{
int x;
+
for (x = 0; x < QETH_DBF_INFOS; x++) {
debug_unregister(qeth_dbf[x].id);
qeth_dbf[x].id = NULL;
@@ -6220,6 +6244,7 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
priv = netdev_priv(dev);
priv->rx_copybreak = QETH_RX_COPYBREAK;
+ priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
dev->ml_priv = card;
dev->watchdog_timeo = QETH_TX_TIMEOUT;
@@ -6230,8 +6255,16 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev);
- dev->ethtool_ops = IS_OSN(card) ? &qeth_osn_ethtool_ops :
- &qeth_ethtool_ops;
+ if (IS_OSN(card)) {
+ dev->ethtool_ops = &qeth_osn_ethtool_ops;
+ } else {
+ dev->ethtool_ops = &qeth_ethtool_ops;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->hw_features |= NETIF_F_SG;
+ dev->vlan_features |= NETIF_F_SG;
+ if (IS_IQD(card))
+ dev->features |= NETIF_F_SG;
+ }
return dev;
}
@@ -6247,28 +6280,6 @@ struct net_device *qeth_clone_netdev(struct net_device *orig)
return clone;
}
-int qeth_setup_netdev(struct qeth_card *card)
-{
- struct net_device *dev = card->dev;
- unsigned int num_tx_queues;
-
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->hw_features |= NETIF_F_SG;
- dev->vlan_features |= NETIF_F_SG;
-
- if (IS_IQD(card)) {
- dev->features |= NETIF_F_SG;
- num_tx_queues = QETH_IQD_MIN_TXQ;
- } else if (IS_VM_NIC(card)) {
- num_tx_queues = 1;
- } else {
- num_tx_queues = dev->real_num_tx_queues;
- }
-
- return qeth_set_real_num_tx_queues(card, num_tx_queues);
-}
-EXPORT_SYMBOL_GPL(qeth_setup_netdev);
-
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
@@ -6401,6 +6412,7 @@ static int qeth_core_set_offline(struct ccwgroup_device *gdev)
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
qeth_set_allowed_threads(card, 0, 1);
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -6939,6 +6951,7 @@ int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
return rc;
}
+EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev)
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index b459def0fb26..6541bab96822 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -719,15 +719,8 @@ struct qeth_sbp_port_entry {
struct net_if_token token;
} __packed;
-struct qeth_sbp_query_ports {
- __u8 primary_bp_supported;
- __u8 secondary_bp_supported;
- __u8 num_entries;
- __u8 entry_length;
- struct qeth_sbp_port_entry entry[];
-} __packed;
-
-struct qeth_sbp_state_change {
+/* For IPA_SBP_QUERY_BRIDGE_PORTS, IPA_SBP_BRIDGE_PORT_STATE_CHANGE */
+struct qeth_sbp_port_data {
__u8 primary_bp_supported;
__u8 secondary_bp_supported;
__u8 num_entries;
@@ -741,8 +734,7 @@ struct qeth_ipacmd_setbridgeport {
union {
struct qeth_sbp_query_cmds_supp query_cmds_supp;
struct qeth_sbp_set_primary set_primary;
- struct qeth_sbp_query_ports query_ports;
- struct qeth_sbp_state_change state_change;
+ struct qeth_sbp_port_data port_data;
} data;
} __packed;
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 8def82336f53..4441b3393eaf 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -52,7 +52,7 @@ static ssize_t qeth_dev_if_name_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
+ return sprintf(buf, "%s\n", netdev_name(card->dev));
}
static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
@@ -103,21 +103,21 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
unsigned int portno, limit;
int rc = 0;
+ rc = kstrtouint(buf, 16, &portno);
+ if (rc)
+ return rc;
+ if (portno > QETH_MAX_PORTNO)
+ return -EINVAL;
+
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
- portno = simple_strtoul(buf, &tmp, 16);
- if (portno > QETH_MAX_PORTNO) {
- rc = -EINVAL;
- goto out;
- }
limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
if (portno > limit) {
rc = -EINVAL;
@@ -164,9 +164,11 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
return sprintf(buf, "%s\n", "by skb-priority");
case QETH_PRIO_Q_ING_VLAN:
return sprintf(buf, "%s\n", "by VLAN headers");
- default:
+ case QETH_PRIO_Q_ING_FIXED:
return sprintf(buf, "always queue %i\n",
card->qdio.default_out_queue);
+ default:
+ return sprintf(buf, "disabled\n");
}
}
@@ -248,19 +250,19 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
unsigned int cnt;
- char *tmp;
int rc = 0;
+ rc = kstrtouint(buf, 10, &cnt);
+ if (rc)
+ return rc;
+
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
- cnt = simple_strtoul(buf, &tmp, 10);
- cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
- ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
-
+ cnt = clamp(cnt, QETH_IN_BUF_COUNT_MIN, QETH_IN_BUF_COUNT_MAX);
rc = qeth_resize_buffer_pool(card, cnt);
out:
@@ -341,18 +343,15 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
struct net_device *ndev;
- char *tmp;
- int i, rc = 0;
enum qeth_discipline_id newdis;
+ unsigned int input;
+ int rc;
- mutex_lock(&card->discipline_mutex);
- if (card->state != CARD_STATE_DOWN) {
- rc = -EPERM;
- goto out;
- }
+ rc = kstrtouint(buf, 16, &input);
+ if (rc)
+ return rc;
- i = simple_strtoul(buf, &tmp, 16);
- switch (i) {
+ switch (input) {
case 0:
newdis = QETH_DISCIPLINE_LAYER3;
break;
@@ -360,7 +359,12 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
newdis = QETH_DISCIPLINE_LAYER2;
break;
default:
- rc = -EINVAL;
+ return -EINVAL;
+ }
+
+ mutex_lock(&card->discipline_mutex);
+ if (card->state != CARD_STATE_DOWN) {
+ rc = -EPERM;
goto out;
}
@@ -551,20 +555,21 @@ static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
- char *tmp;
- int i, rc = 0;
+ unsigned int input;
+ int rc;
+
+ rc = kstrtouint(buf, 10, &input);
+ if (rc)
+ return rc;
+
+ if (input > max_value)
+ return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (card->state != CARD_STATE_DOWN) {
+ if (card->state != CARD_STATE_DOWN)
rc = -EPERM;
- goto out;
- }
- i = simple_strtoul(buf, &tmp, 10);
- if (i <= max_value)
- *value = i;
else
- rc = -EINVAL;
-out:
+ *value = input;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index f870c5322bfe..b5caa723326e 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -211,13 +211,19 @@ static void qeth_get_channels(struct net_device *dev,
static int qeth_set_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
+ struct qeth_priv *priv = netdev_priv(dev);
struct qeth_card *card = dev->ml_priv;
+ int rc;
if (channels->rx_count == 0 || channels->tx_count == 0)
return -EINVAL;
if (channels->tx_count > card->qdio.no_out_queues)
return -EINVAL;
+ /* Prio-queueing needs all TX queues: */
+ if (qeth_uses_tx_prio_queueing(card))
+ return -EPERM;
+
if (IS_IQD(card)) {
if (channels->tx_count < QETH_IQD_MIN_TXQ)
return -EINVAL;
@@ -228,13 +234,13 @@ static int qeth_set_channels(struct net_device *dev,
if (netif_running(dev) &&
channels->tx_count < dev->real_num_tx_queues)
return -EPERM;
- } else {
- /* OSA still uses the legacy prio-queue mechanism: */
- if (!IS_VM_NIC(card))
- return -EOPNOTSUPP;
}
- return qeth_set_real_num_tx_queues(card, channels->tx_count);
+ rc = qeth_set_real_num_tx_queues(card, channels->tx_count);
+ if (!rc)
+ priv->tx_wanted_queues = channels->tx_count;
+
+ return rc;
}
static int qeth_get_ts_info(struct net_device *dev,
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index adf25c9fd2b3..296d73d84326 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -23,7 +23,7 @@ int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state);
int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state);
int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout);
int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
-bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
+bool qeth_bridgeport_allowed(struct qeth_card *card);
struct qeth_mac {
u8 mac_addr[ETH_ALEN];
@@ -31,4 +31,11 @@ struct qeth_mac {
struct hlist_node hnode;
};
+static inline bool qeth_bridgeport_is_in_use(struct qeth_card *card)
+{
+ return card->options.sbp.role ||
+ card->options.sbp.reflect_promisc ||
+ card->options.sbp.hostnotification;
+}
+
#endif /* __QETH_L2_H__ */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 6384f7adba66..28f6dda95736 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -17,24 +17,17 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/hashtable.h>
+#include <net/switchdev.h>
#include <asm/chsc.h>
+#include <asm/css_chars.h>
#include <asm/setup.h>
#include "qeth_core.h"
#include "qeth_l2.h"
-static void qeth_bridgeport_query_support(struct qeth_card *card);
-static void qeth_bridge_state_change(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd);
-static void qeth_addr_change_event(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd);
-static void qeth_l2_vnicc_set_defaults(struct qeth_card *card);
-static void qeth_l2_vnicc_init(struct qeth_card *card);
-static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
- u32 *timeout);
-
static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
{
int rc;
@@ -190,7 +183,7 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
/* VSWITCH relies on the VLAN
* information to be present in
* the QDIO header */
- if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
+ if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
}
@@ -273,26 +266,31 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
}
-static void qeth_l2_stop_card(struct qeth_card *card)
+static void qeth_l2_set_pnso_mode(struct qeth_card *card,
+ enum qeth_pnso_mode mode)
{
- QETH_CARD_TEXT(card, 2, "stopcard");
+ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+ WRITE_ONCE(card->info.pnso_mode, mode);
+ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
- qeth_set_allowed_threads(card, 0, 1);
+ if (mode == QETH_PNSO_NONE)
+ drain_workqueue(card->event_wq);
+}
- cancel_work_sync(&card->rx_mode_work);
- qeth_l2_drain_rx_mode_cache(card);
+static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
+{
+ struct switchdev_notifier_fdb_info info;
- if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_clear_ipacmd_list(card);
- card->state = CARD_STATE_DOWN;
- }
+ QETH_CARD_TEXT(card, 2, "fdbflush");
- qeth_qdio_clear_card(card, 0);
- qeth_drain_output_queues(card);
- qeth_clear_working_pool_list(card);
- flush_workqueue(card->event_wq);
- qeth_flush_local_addrs(card);
- card->info.promisc_mode = 0;
+ info.addr = NULL;
+ /* flush all VLANs: */
+ info.vid = 0;
+ info.added_by_user = false;
+ info.offloaded = true;
+
+ call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
+ card->dev, &info.info, NULL);
}
static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -573,52 +571,10 @@ static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
return qeth_iqd_select_queue(dev, skb,
qeth_get_ether_cast_type(skb),
sb_dev);
+ if (qeth_uses_tx_prio_queueing(card))
+ return qeth_get_priority_queue(card, skb);
- return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
- qeth_get_priority_queue(card, skb);
-}
-
-static const struct device_type qeth_l2_devtype = {
- .name = "qeth_layer2",
- .groups = qeth_l2_attr_groups,
-};
-
-static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc;
-
- if (IS_OSN(card))
- dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
-
- qeth_l2_vnicc_set_defaults(card);
- mutex_init(&card->sbp_lock);
-
- if (gdev->dev.type == &qeth_generic_devtype) {
- rc = qeth_l2_create_device_attributes(&gdev->dev);
- if (rc)
- return rc;
- }
-
- INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
- return 0;
-}
-
-static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
-{
- struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
-
- if (cgdev->dev.type == &qeth_generic_devtype)
- qeth_l2_remove_device_attributes(&cgdev->dev);
- qeth_set_allowed_threads(card, 0, 1);
- wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
-
- if (cgdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
-
- cancel_work_sync(&card->close_dev_work);
- if (card->dev->reg_state == NETREG_REGISTERED)
- unregister_netdev(card->dev);
+ return netdev_pick_tx(dev, skb, sb_dev);
}
static void qeth_l2_set_rx_mode(struct net_device *dev)
@@ -631,6 +587,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
/**
* qeth_l2_pnso() - perform network subchannel operation
* @card: qeth_card structure pointer
+ * @oc: Operation Code
* @cnc: Boolean Change-Notification Control
* @cb: Callback function will be executed for each element
* of the address list
@@ -641,7 +598,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
* control" is set, further changes in the address list will be reported
* via the IPA command.
*/
-static int qeth_l2_pnso(struct qeth_card *card, int cnc,
+static int qeth_l2_pnso(struct qeth_card *card, u8 oc, int cnc,
void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
void *priv)
{
@@ -652,13 +609,14 @@ static int qeth_l2_pnso(struct qeth_card *card, int cnc,
int i, size, elems;
int rc;
- QETH_CARD_TEXT(card, 2, "PNSO");
rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
if (rr == NULL)
return -ENOMEM;
do {
+ QETH_CARD_TEXT(card, 2, "PNSO");
/* on the first iteration, naihdr.resume_token will be zero */
- rc = ccw_device_pnso(ddev, rr, rr->naihdr.resume_token, cnc);
+ rc = ccw_device_pnso(ddev, rr, oc, rr->naihdr.resume_token,
+ cnc);
if (rc)
continue;
if (cb == NULL)
@@ -694,6 +652,218 @@ static int qeth_l2_pnso(struct qeth_card *card, int cnc,
return rc;
}
+static bool qeth_is_my_net_if_token(struct qeth_card *card,
+ struct net_if_token *token)
+{
+ return ((card->info.ddev_devno == token->devnum) &&
+ (card->info.cssid == token->cssid) &&
+ (card->info.iid == token->iid) &&
+ (card->info.ssid == token->ssid) &&
+ (card->info.chpid == token->chpid) &&
+ (card->info.chid == token->chid));
+}
+
+/**
+ * qeth_l2_dev2br_fdb_notify() - update fdb of master bridge
+ * @card: qeth_card structure pointer
+ * @code: event bitmask: high order bit 0x80 set to
+ * 1 - removal of an object
+ * 0 - addition of an object
+ * Object type(s):
+ * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC
+ * @token: "network token" structure identifying 'physical' location
+ * of the target
+ * @addr_lnid: structure with MAC address and VLAN ID of the target
+ */
+static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
+ struct net_if_token *token,
+ struct mac_addr_lnid *addr_lnid)
+{
+ struct switchdev_notifier_fdb_info info;
+ u8 ntfy_mac[ETH_ALEN];
+
+ ether_addr_copy(ntfy_mac, addr_lnid->mac);
+ /* Ignore VLAN only changes */
+ if (!(code & IPA_ADDR_CHANGE_CODE_MACADDR))
+ return;
+ /* Ignore mcast entries */
+ if (is_multicast_ether_addr(ntfy_mac))
+ return;
+ /* Ignore my own addresses */
+ if (qeth_is_my_net_if_token(card, token))
+ return;
+
+ info.addr = ntfy_mac;
+ /* don't report VLAN IDs */
+ info.vid = 0;
+ info.added_by_user = false;
+ info.offloaded = true;
+
+ if (code & IPA_ADDR_CHANGE_CODE_REMOVAL) {
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ card->dev, &info.info, NULL);
+ QETH_CARD_TEXT(card, 4, "andelmac");
+ QETH_CARD_TEXT_(card, 4,
+ "mc%012lx", ether_addr_to_u64(ntfy_mac));
+ } else {
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ card->dev, &info.info, NULL);
+ QETH_CARD_TEXT(card, 4, "anaddmac");
+ QETH_CARD_TEXT_(card, 4,
+ "mc%012lx", ether_addr_to_u64(ntfy_mac));
+ }
+}
+
+static void qeth_l2_dev2br_an_set_cb(void *priv,
+ struct chsc_pnso_naid_l2 *entry)
+{
+ u8 code = IPA_ADDR_CHANGE_CODE_MACADDR;
+ struct qeth_card *card = priv;
+
+ if (entry->addr_lnid.lnid < VLAN_N_VID)
+ code |= IPA_ADDR_CHANGE_CODE_VLANID;
+ qeth_l2_dev2br_fdb_notify(card, code,
+ (struct net_if_token *)&entry->nit,
+ (struct mac_addr_lnid *)&entry->addr_lnid);
+}
+
+/**
+ * qeth_l2_dev2br_an_set() -
+ * Enable or disable 'dev to bridge network address notification'
+ * @card: qeth_card structure pointer
+ * @enable: Enable or disable 'dev to bridge network address notification'
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ *
+ * On enable, emits a series of address notifications for all
+ * currently registered hosts.
+ *
+ * Must be called under rtnl_lock
+ */
+static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
+{
+ int rc;
+
+ if (enable) {
+ QETH_CARD_TEXT(card, 2, "anseton");
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 1,
+ qeth_l2_dev2br_an_set_cb, card);
+ if (rc == -EAGAIN)
+ /* address notification enabled, but inconsistent
+ * addresses reported -> disable address notification
+ */
+ qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0,
+ NULL, NULL);
+ } else {
+ QETH_CARD_TEXT(card, 2, "ansetoff");
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
+ }
+
+ return rc;
+}
+
+static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
+{
+ struct qeth_priv *priv = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
+ u16 mode = BRIDGE_MODE_UNDEF;
+
+ /* Do not even show qeth devs that cannot do bridge_setlink */
+ if (!priv->brport_hw_features || !netif_device_present(dev) ||
+ qeth_bridgeport_is_in_use(card))
+ return -EOPNOTSUPP;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+ mode, priv->brport_features,
+ priv->brport_hw_features,
+ nlflags, filter_mask, NULL);
+}
+
+static const struct nla_policy qeth_brport_policy[IFLA_BRPORT_MAX + 1] = {
+ [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
+};
+
+/**
+ * qeth_l2_bridge_setlink() - set bridgeport attributes
+ * @dev: netdevice
+ * @nlh: netlink message header
+ * @flags: bridge flags (here: BRIDGE_FLAGS_SELF)
+ * @extack: extended ACK report struct
+ *
+ * Called under rtnl_lock
+ */
+static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct qeth_priv *priv = netdev_priv(dev);
+ struct nlattr *bp_tb[IFLA_BRPORT_MAX + 1];
+ struct qeth_card *card = dev->ml_priv;
+ struct nlattr *attr, *nested_attr;
+ bool enable, has_protinfo = false;
+ int rem1, rem2;
+ int rc;
+
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ if (!(priv->brport_hw_features))
+ return -EOPNOTSUPP;
+
+ nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) {
+ if (nla_type(attr) == IFLA_PROTINFO) {
+ rc = nla_parse_nested(bp_tb, IFLA_BRPORT_MAX, attr,
+ qeth_brport_policy, extack);
+ if (rc)
+ return rc;
+ has_protinfo = true;
+ } else if (nla_type(attr) == IFLA_AF_SPEC) {
+ nla_for_each_nested(nested_attr, attr, rem2) {
+ if (nla_type(nested_attr) == IFLA_BRIDGE_FLAGS)
+ continue;
+ NL_SET_ERR_MSG_ATTR(extack, nested_attr,
+ "Unsupported attribute");
+ return -EINVAL;
+ }
+ } else {
+ NL_SET_ERR_MSG_ATTR(extack, attr, "Unsupported attribute");
+ return -EINVAL;
+ }
+ }
+ if (!has_protinfo)
+ return 0;
+ if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC])
+ return -EINVAL;
+ enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]);
+
+ if (enable == !!(priv->brport_features & BR_LEARNING_SYNC))
+ return 0;
+
+ mutex_lock(&card->sbp_lock);
+ /* do not change anything if BridgePort is enabled */
+ if (qeth_bridgeport_is_in_use(card)) {
+ NL_SET_ERR_MSG(extack, "n/a (BridgePort)");
+ rc = -EBUSY;
+ } else if (enable) {
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ if (rc)
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ else
+ priv->brport_features |= BR_LEARNING_SYNC;
+ } else {
+ rc = qeth_l2_dev2br_an_set(card, false);
+ if (!rc) {
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ qeth_l2_dev2br_fdb_flush(card);
+ }
+ }
+ mutex_unlock(&card->sbp_lock);
+
+ return rc;
+}
+
static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
@@ -707,9 +877,11 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_set_mac_address = qeth_l2_set_mac_address,
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
- .ndo_tx_timeout = qeth_tx_timeout,
+ .ndo_tx_timeout = qeth_tx_timeout,
.ndo_fix_features = qeth_fix_features,
- .ndo_set_features = qeth_set_features
+ .ndo_set_features = qeth_set_features,
+ .ndo_bridge_getlink = qeth_l2_bridge_getlink,
+ .ndo_bridge_setlink = qeth_l2_bridge_setlink,
};
static const struct net_device_ops qeth_osn_netdev_ops = {
@@ -723,18 +895,12 @@ static const struct net_device_ops qeth_osn_netdev_ops = {
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
- int rc;
-
if (IS_OSN(card)) {
card->dev->netdev_ops = &qeth_osn_netdev_ops;
card->dev->flags |= IFF_NOARP;
goto add_napi;
}
- rc = qeth_setup_netdev(card);
- if (rc)
- return rc;
-
card->dev->needed_headroom = sizeof(struct qeth_hdr);
card->dev->netdev_ops = &qeth_l2_netdev_ops;
card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -810,135 +976,81 @@ static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
if (card->options.sbp.hostnotification) {
if (qeth_bridgeport_an_set(card, 1))
card->options.sbp.hostnotification = 0;
- } else {
- qeth_bridgeport_an_set(card, 0);
}
}
-static int qeth_l2_set_online(struct qeth_card *card)
+/**
+ * qeth_l2_detect_dev2br_support() -
+ * Detect whether this card supports 'dev to bridge fdb network address
+ * change notification' and thus can support the learning_sync bridgeport
+ * attribute
+ * @card: qeth_card structure pointer
+ *
+ * This is a destructive test and must be called before dev2br or
+ * bridgeport address notification is enabled!
+ */
+static void qeth_l2_detect_dev2br_support(struct qeth_card *card)
{
- struct ccwgroup_device *gdev = card->gdev;
- struct net_device *dev = card->dev;
- int rc = 0;
- bool carrier_ok;
-
- rc = qeth_core_hardsetup_card(card, &carrier_ok);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
- rc = -ENODEV;
- goto out_remove;
- }
-
- mutex_lock(&card->sbp_lock);
- qeth_bridgeport_query_support(card);
- if (card->options.sbp.supported_funcs) {
- qeth_l2_setup_bridgeport_attrs(card);
- dev_info(&card->gdev->dev,
- "The device represents a Bridge Capable Port\n");
- }
- mutex_unlock(&card->sbp_lock);
-
- qeth_l2_register_dev_addr(card);
-
- /* for the rx_bcast characteristic, init VNICC after setmac */
- qeth_l2_vnicc_init(card);
-
- qeth_trace_features(card);
- qeth_l2_trace_features(card);
-
- qeth_print_status_message(card);
-
- /* softsetup */
- QETH_CARD_TEXT(card, 2, "softsetp");
-
- card->state = CARD_STATE_SOFTSETUP;
-
- qeth_set_allowed_threads(card, 0xffffffff, 0);
+ struct qeth_priv *priv = netdev_priv(card->dev);
+ bool dev2br_supported;
+ int rc;
- if (dev->reg_state != NETREG_REGISTERED) {
- rc = qeth_l2_setup_netdev(card);
- if (rc)
- goto out_remove;
+ QETH_CARD_TEXT(card, 2, "d2brsup");
+ if (!IS_IQD(card))
+ return;
- if (carrier_ok)
- netif_carrier_on(dev);
+ /* dev2br requires valid cssid,iid,chid */
+ if (!card->info.ids_valid) {
+ dev2br_supported = false;
+ } else if (css_general_characteristics.enarf) {
+ dev2br_supported = true;
} else {
- rtnl_lock();
- if (carrier_ok)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
-
- netif_device_attach(dev);
- qeth_enable_hw_features(dev);
-
- if (card->info.open_when_online) {
- card->info.open_when_online = 0;
- dev_open(dev, NULL);
- }
- rtnl_unlock();
+ /* Old machines don't have the feature bit:
+ * Probe by testing whether a disable succeeds
+ */
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
+ dev2br_supported = !rc;
}
- /* let user_space know that device is online */
- kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
- return 0;
+ QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported);
-out_remove:
- qeth_l2_stop_card(card);
- qeth_stop_channel(&card->data);
- qeth_stop_channel(&card->write);
- qeth_stop_channel(&card->read);
- qdio_free(CARD_DDEV(card));
- return rc;
-}
-
-static void qeth_l2_set_offline(struct qeth_card *card)
-{
- qeth_l2_stop_card(card);
-}
-
-static int __init qeth_l2_init(void)
-{
- pr_info("register layer 2 discipline\n");
- return 0;
+ if (dev2br_supported)
+ priv->brport_hw_features |= BR_LEARNING_SYNC;
+ else
+ priv->brport_hw_features &= ~BR_LEARNING_SYNC;
}
-static void __exit qeth_l2_exit(void)
+static void qeth_l2_enable_brport_features(struct qeth_card *card)
{
- pr_info("unregister layer 2 discipline\n");
-}
+ struct qeth_priv *priv = netdev_priv(card->dev);
+ int rc;
-/* Returns zero if the command is successfully "consumed" */
-static int qeth_l2_control_event(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd)
-{
- switch (cmd->hdr.command) {
- case IPA_CMD_SETBRIDGEPORT_OSA:
- case IPA_CMD_SETBRIDGEPORT_IQD:
- if (cmd->data.sbp.hdr.command_code ==
- IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
- qeth_bridge_state_change(card, cmd);
- return 0;
- } else
- return 1;
- case IPA_CMD_ADDRESS_CHANGE_NOTIF:
- qeth_addr_change_event(card, cmd);
- return 0;
- default:
- return 1;
+ if (priv->brport_features & BR_LEARNING_SYNC) {
+ if (priv->brport_hw_features & BR_LEARNING_SYNC) {
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ if (rc == -EAGAIN) {
+ /* Recoverable error, retry once */
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ qeth_l2_dev2br_fdb_flush(card);
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ }
+ if (rc) {
+ netdev_err(card->dev,
+ "failed to enable bridge learning_sync: %d\n",
+ rc);
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ qeth_l2_dev2br_fdb_flush(card);
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ }
+ } else {
+ dev_warn(&card->gdev->dev,
+ "bridge learning_sync not supported\n");
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ }
}
}
-struct qeth_discipline qeth_l2_discipline = {
- .devtype = &qeth_l2_devtype,
- .setup = qeth_l2_probe_device,
- .remove = qeth_l2_remove_device,
- .set_online = qeth_l2_set_online,
- .set_offline = qeth_l2_set_offline,
- .do_ioctl = NULL,
- .control_event_handler = qeth_l2_control_event,
-};
-EXPORT_SYMBOL_GPL(qeth_l2_discipline);
-
#ifdef CONFIG_QETH_OSN
static void qeth_osn_assist_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
@@ -1013,7 +1125,6 @@ void qeth_osn_deregister(struct net_device *dev)
QETH_CARD_TEXT(card, 2, "osndereg");
card->osn_info.assist_cb = NULL;
card->osn_info.data_cb = NULL;
- return;
}
EXPORT_SYMBOL(qeth_osn_deregister);
#endif
@@ -1090,15 +1201,14 @@ static void qeth_bridge_emit_host_event(struct qeth_card *card,
struct qeth_bridge_state_data {
struct work_struct worker;
struct qeth_card *card;
- struct qeth_sbp_state_change qports;
+ u8 role;
+ u8 state;
};
static void qeth_bridge_state_change_worker(struct work_struct *work)
{
struct qeth_bridge_state_data *data =
container_of(work, struct qeth_bridge_state_data, worker);
- /* We are only interested in the first entry - local port */
- struct qeth_sbp_port_entry *entry = &data->qports.entry[0];
char env_locrem[32];
char env_role[32];
char env_state[32];
@@ -1109,22 +1219,16 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
NULL
};
- /* Role should not change by itself, but if it did, */
- /* information from the hardware is authoritative. */
- mutex_lock(&data->card->sbp_lock);
- data->card->options.sbp.role = entry->role;
- mutex_unlock(&data->card->sbp_lock);
-
snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
snprintf(env_role, sizeof(env_role), "ROLE=%s",
- (entry->role == QETH_SBP_ROLE_NONE) ? "none" :
- (entry->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
- (entry->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
+ (data->role == QETH_SBP_ROLE_NONE) ? "none" :
+ (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
+ (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
"<INVALID>");
snprintf(env_state, sizeof(env_state), "STATE=%s",
- (entry->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
- (entry->state == QETH_SBP_STATE_STANDBY) ? "standby" :
- (entry->state == QETH_SBP_STATE_ACTIVE) ? "active" :
+ (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
+ (data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
+ (data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
"<INVALID>");
kobject_uevent_env(&data->card->gdev->dev.kobj,
KOBJ_CHANGE, env);
@@ -1134,10 +1238,8 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
- struct qeth_sbp_state_change *qports =
- &cmd->data.sbp.data.state_change;
+ struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data;
struct qeth_bridge_state_data *data;
- int extrasize;
QETH_CARD_TEXT(card, 2, "brstchng");
if (qports->num_entries == 0) {
@@ -1148,44 +1250,136 @@ static void qeth_bridge_state_change(struct qeth_card *card,
QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
return;
}
- extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries;
- data = kzalloc(sizeof(struct qeth_bridge_state_data) + extrasize,
- GFP_ATOMIC);
+
+ data = kzalloc(sizeof(*data), GFP_ATOMIC);
if (!data) {
QETH_CARD_TEXT(card, 2, "BPSalloc");
return;
}
INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
data->card = card;
- memcpy(&data->qports, qports,
- sizeof(struct qeth_sbp_state_change) + extrasize);
+ /* Information for the local port: */
+ data->role = qports->entry[0].role;
+ data->state = qports->entry[0].state;
+
queue_work(card->event_wq, &data->worker);
}
struct qeth_addr_change_data {
- struct work_struct worker;
+ struct delayed_work dwork;
struct qeth_card *card;
struct qeth_ipacmd_addr_change ac_event;
};
+static void qeth_l2_dev2br_worker(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct qeth_addr_change_data *data;
+ struct qeth_card *card;
+ struct qeth_priv *priv;
+ unsigned int i;
+ int rc;
+
+ data = container_of(dwork, struct qeth_addr_change_data, dwork);
+ card = data->card;
+ priv = netdev_priv(card->dev);
+
+ QETH_CARD_TEXT(card, 4, "dev2brew");
+
+ if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
+ goto free;
+
+ /* Potential re-config in progress, try again later: */
+ if (!rtnl_trylock()) {
+ queue_delayed_work(card->event_wq, dwork,
+ msecs_to_jiffies(100));
+ return;
+ }
+ if (!netif_device_present(card->dev))
+ goto out_unlock;
+
+ if (data->ac_event.lost_event_mask) {
+ QETH_DBF_MESSAGE(3,
+ "Address change notification overflow on device %x\n",
+ CARD_DEVID(card));
+ /* Card fdb and bridge fdb are out of sync, card has stopped
+ * notifications (no need to drain_workqueue). Purge all
+ * 'extern_learn' entries from the parent bridge and restart
+ * the notifications.
+ */
+ qeth_l2_dev2br_fdb_flush(card);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ if (rc) {
+ /* TODO: if we want to retry after -EAGAIN, be
+ * aware there could be stale entries in the
+ * workqueue now, that need to be drained.
+ * For now we give up:
+ */
+ netdev_err(card->dev,
+ "bridge learning_sync failed to recover: %d\n",
+ rc);
+ WRITE_ONCE(card->info.pnso_mode,
+ QETH_PNSO_NONE);
+ /* To remove fdb entries reported by an_set: */
+ qeth_l2_dev2br_fdb_flush(card);
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ } else {
+ QETH_DBF_MESSAGE(3,
+ "Address Notification resynced on device %x\n",
+ CARD_DEVID(card));
+ }
+ } else {
+ for (i = 0; i < data->ac_event.num_entries; i++) {
+ struct qeth_ipacmd_addr_change_entry *entry =
+ &data->ac_event.entry[i];
+ qeth_l2_dev2br_fdb_notify(card,
+ entry->change_code,
+ &entry->token,
+ &entry->addr_lnid);
+ }
+ }
+
+out_unlock:
+ rtnl_unlock();
+
+free:
+ kfree(data);
+}
+
static void qeth_addr_change_event_worker(struct work_struct *work)
{
- struct qeth_addr_change_data *data =
- container_of(work, struct qeth_addr_change_data, worker);
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct qeth_addr_change_data *data;
+ struct qeth_card *card;
int i;
+ data = container_of(dwork, struct qeth_addr_change_data, dwork);
+ card = data->card;
+
QETH_CARD_TEXT(data->card, 4, "adrchgew");
+
+ if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
+ goto free;
+
if (data->ac_event.lost_event_mask) {
+ /* Potential re-config in progress, try again later: */
+ if (!mutex_trylock(&card->sbp_lock)) {
+ queue_delayed_work(card->event_wq, dwork,
+ msecs_to_jiffies(100));
+ return;
+ }
+
dev_info(&data->card->gdev->dev,
"Address change notification stopped on %s (%s)\n",
- data->card->dev->name,
+ netdev_name(card->dev),
(data->ac_event.lost_event_mask == 0x01)
? "Overflow"
: (data->ac_event.lost_event_mask == 0x02)
? "Bridge port state change"
: "Unknown reason");
- mutex_lock(&data->card->sbp_lock);
+
data->card->options.sbp.hostnotification = 0;
+ card->info.pnso_mode = QETH_PNSO_NONE;
mutex_unlock(&data->card->sbp_lock);
qeth_bridge_emit_host_event(data->card, anev_abort,
0, NULL, NULL);
@@ -1199,6 +1393,8 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
&entry->token,
&entry->addr_lnid);
}
+
+free:
kfree(data);
}
@@ -1210,6 +1406,9 @@ static void qeth_addr_change_event(struct qeth_card *card,
struct qeth_addr_change_data *data;
int extrasize;
+ if (card->info.pnso_mode == QETH_PNSO_NONE)
+ return;
+
QETH_CARD_TEXT(card, 4, "adrchgev");
if (cmd->hdr.return_code != 0x0000) {
if (cmd->hdr.return_code == 0x0010) {
@@ -1229,11 +1428,14 @@ static void qeth_addr_change_event(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "ACNalloc");
return;
}
- INIT_WORK(&data->worker, qeth_addr_change_event_worker);
+ if (card->info.pnso_mode == QETH_PNSO_BRIDGEPORT)
+ INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
+ else
+ INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker);
data->card = card;
memcpy(&data->ac_event, hostevs,
sizeof(struct qeth_ipacmd_addr_change) + extrasize);
- queue_work(card->event_wq, &data->worker);
+ queue_delayed_work(card->event_wq, &data->dwork, 0);
}
/* SETBRIDGEPORT support; sending commands */
@@ -1418,8 +1620,8 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
- struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports;
struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+ struct qeth_sbp_port_data *qports;
int rc;
QETH_CARD_TEXT(card, 2, "brqprtcb");
@@ -1427,6 +1629,7 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
if (rc)
return rc;
+ qports = &cmd->data.sbp.data.port_data;
if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
return -EINVAL;
@@ -1554,18 +1757,18 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
if (enable) {
qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
- rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card);
- } else
- rc = qeth_l2_pnso(card, 0, NULL, NULL);
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 1,
+ qeth_bridgeport_an_set_cb, card);
+ if (rc)
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ } else {
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 0, NULL, NULL);
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ }
return rc;
}
-static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
-{
- return (card->options.sbp.role || card->options.sbp.reflect_promisc ||
- card->options.sbp.hostnotification);
-}
-
/* VNIC Characteristics support */
/* handle VNICC IPA command return codes; convert to error codes */
@@ -1711,6 +1914,19 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
}
+/* recover user timeout setting */
+static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
+ u32 *timeout)
+{
+ if (card->options.vnicc.sup_chars & vnicc &&
+ card->options.vnicc.getset_timeout_sup & vnicc &&
+ !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
+ timeout))
+ return false;
+ *timeout = QETH_VNICC_DEFAULT_TIMEOUT;
+ return true;
+}
+
/* set current VNICC flag state; called from sysfs store function */
int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
{
@@ -1851,7 +2067,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
}
/* check if VNICC is currently enabled */
-bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
+static bool _qeth_l2_vnicc_is_in_use(struct qeth_card *card)
{
if (!card->options.vnicc.sup_chars)
return false;
@@ -1866,17 +2082,19 @@ bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
return true;
}
-/* recover user timeout setting */
-static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
- u32 *timeout)
+/**
+ * qeth_bridgeport_allowed - are any qeth_bridgeport functions allowed?
+ * @card: qeth_card structure pointer
+ *
+ * qeth_bridgeport functionality is mutually exclusive with usage of the
+ * VNIC Characteristics and dev2br address notifications
+ */
+bool qeth_bridgeport_allowed(struct qeth_card *card)
{
- if (card->options.vnicc.sup_chars & vnicc &&
- card->options.vnicc.getset_timeout_sup & vnicc &&
- !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
- timeout))
- return false;
- *timeout = QETH_VNICC_DEFAULT_TIMEOUT;
- return true;
+ struct qeth_priv *priv = netdev_priv(card->dev);
+
+ return (!_qeth_l2_vnicc_is_in_use(card) &&
+ !(priv->brport_features & BR_LEARNING_SYNC));
}
/* recover user characteristic setting */
@@ -1967,6 +2185,182 @@ static void qeth_l2_vnicc_set_defaults(struct qeth_card *card)
card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
}
+static const struct device_type qeth_l2_devtype = {
+ .name = "qeth_layer2",
+ .groups = qeth_l2_attr_groups,
+};
+
+static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
+
+ if (IS_OSN(card))
+ dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
+
+ qeth_l2_vnicc_set_defaults(card);
+ mutex_init(&card->sbp_lock);
+
+ if (gdev->dev.type == &qeth_generic_devtype) {
+ rc = qeth_l2_create_device_attributes(&gdev->dev);
+ if (rc)
+ return rc;
+ }
+
+ INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
+ return 0;
+}
+
+static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+ if (gdev->dev.type == &qeth_generic_devtype)
+ qeth_l2_remove_device_attributes(&gdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
+ wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+
+ if (gdev->state == CCWGROUP_ONLINE)
+ qeth_set_offline(card, false);
+
+ cancel_work_sync(&card->close_dev_work);
+ if (card->dev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(card->dev);
+}
+
+static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
+{
+ struct net_device *dev = card->dev;
+ int rc = 0;
+
+ /* query before bridgeport_notification may be enabled */
+ qeth_l2_detect_dev2br_support(card);
+
+ mutex_lock(&card->sbp_lock);
+ qeth_bridgeport_query_support(card);
+ if (card->options.sbp.supported_funcs) {
+ qeth_l2_setup_bridgeport_attrs(card);
+ dev_info(&card->gdev->dev,
+ "The device represents a Bridge Capable Port\n");
+ }
+ mutex_unlock(&card->sbp_lock);
+
+ qeth_l2_register_dev_addr(card);
+
+ /* for the rx_bcast characteristic, init VNICC after setmac */
+ qeth_l2_vnicc_init(card);
+
+ qeth_l2_trace_features(card);
+
+ /* softsetup */
+ QETH_CARD_TEXT(card, 2, "softsetp");
+
+ card->state = CARD_STATE_SOFTSETUP;
+
+ qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rc = qeth_l2_setup_netdev(card);
+ if (rc)
+ goto err_setup;
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ } else {
+ rtnl_lock();
+ rc = qeth_set_real_num_tx_queues(card,
+ qeth_tx_actual_queues(card));
+ if (rc) {
+ rtnl_unlock();
+ goto err_set_queues;
+ }
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ netif_device_attach(dev);
+ qeth_enable_hw_features(dev);
+ qeth_l2_enable_brport_features(card);
+
+ if (card->info.open_when_online) {
+ card->info.open_when_online = 0;
+ dev_open(dev, NULL);
+ }
+ rtnl_unlock();
+ }
+ return 0;
+
+err_set_queues:
+err_setup:
+ qeth_set_allowed_threads(card, 0, 1);
+ card->state = CARD_STATE_DOWN;
+ return rc;
+}
+
+static void qeth_l2_set_offline(struct qeth_card *card)
+{
+ struct qeth_priv *priv = netdev_priv(card->dev);
+
+ qeth_set_allowed_threads(card, 0, 1);
+ qeth_l2_drain_rx_mode_cache(card);
+
+ if (card->state == CARD_STATE_SOFTSETUP)
+ card->state = CARD_STATE_DOWN;
+
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ if (priv->brport_features & BR_LEARNING_SYNC) {
+ rtnl_lock();
+ qeth_l2_dev2br_fdb_flush(card);
+ rtnl_unlock();
+ }
+}
+
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l2_control_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
+{
+ switch (cmd->hdr.command) {
+ case IPA_CMD_SETBRIDGEPORT_OSA:
+ case IPA_CMD_SETBRIDGEPORT_IQD:
+ if (cmd->data.sbp.hdr.command_code ==
+ IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
+ qeth_bridge_state_change(card, cmd);
+ return 0;
+ }
+
+ return 1;
+ case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+ qeth_addr_change_event(card, cmd);
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+const struct qeth_discipline qeth_l2_discipline = {
+ .devtype = &qeth_l2_devtype,
+ .setup = qeth_l2_probe_device,
+ .remove = qeth_l2_remove_device,
+ .set_online = qeth_l2_set_online,
+ .set_offline = qeth_l2_set_offline,
+ .do_ioctl = NULL,
+ .control_event_handler = qeth_l2_control_event,
+};
+EXPORT_SYMBOL_GPL(qeth_l2_discipline);
+
+static int __init qeth_l2_init(void)
+{
+ pr_info("register layer 2 discipline\n");
+ return 0;
+}
+
+static void __exit qeth_l2_exit(void)
+{
+ pr_info("unregister layer 2 discipline\n");
+}
+
module_init(qeth_l2_init);
module_exit(qeth_l2_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 86bcae992f72..4ba3bc57263f 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -18,7 +18,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
int rc = 0;
char *word;
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
mutex_lock(&card->sbp_lock);
@@ -65,7 +65,7 @@ static ssize_t qeth_bridge_port_role_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
@@ -90,7 +90,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (card->options.sbp.reflect_promisc)
/* Forbid direct manipulation */
@@ -116,7 +116,7 @@ static ssize_t qeth_bridge_port_state_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
@@ -131,7 +131,7 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int enabled;
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
enabled = card->options.sbp.hostnotification;
@@ -153,10 +153,11 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (qeth_card_hw_is_reachable(card)) {
rc = qeth_bridgeport_an_set(card, enable);
+ /* sbp_lock ensures ordering vs notifications-stopped events */
if (!rc)
card->options.sbp.hostnotification = enable;
} else
@@ -178,7 +179,7 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *state;
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
if (card->options.sbp.reflect_promisc) {
@@ -214,7 +215,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (card->options.sbp.role != QETH_SBP_ROLE_NONE)
rc = -EPERM;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 6ccfe2121095..acd130cfbab3 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -96,7 +96,7 @@ struct qeth_ipato_entry {
struct list_head entry;
enum qeth_prot_versions proto;
char addr[16];
- int mask_bits;
+ unsigned int mask_bits;
};
extern const struct attribute_group *qeth_l3_attr_groups[];
@@ -110,7 +110,7 @@ int qeth_l3_setrouting_v6(struct qeth_card *);
int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
int qeth_l3_del_ipato_entry(struct qeth_card *card,
enum qeth_prot_versions proto, u8 *addr,
- int mask_bits);
+ unsigned int mask_bits);
void qeth_l3_update_ipato(struct qeth_card *card);
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add);
int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 09ef518ca1ea..b1c1d2510d55 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -97,7 +97,7 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
return false;
qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
- (addr->proto == QETH_PROT_IPV4)? 4:16);
+ (addr->proto == QETH_PROT_IPV4) ? 4 : 16);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (addr->proto != ipatoe->proto)
continue;
@@ -105,11 +105,9 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
(ipatoe->proto == QETH_PROT_IPV4) ?
4 : 16);
if (addr->proto == QETH_PROT_IPV4)
- rc = !memcmp(addr_bits, ipatoe_bits,
- min(32, ipatoe->mask_bits));
+ rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
else
- rc = !memcmp(addr_bits, ipatoe_bits,
- min(128, ipatoe->mask_bits));
+ rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
if (rc)
break;
}
@@ -314,7 +312,8 @@ static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply,
}
static int qeth_l3_send_setdelmc(struct qeth_card *card,
- struct qeth_ipaddr *addr, int ipacmd)
+ struct qeth_ipaddr *addr,
+ enum qeth_ipa_cmds ipacmd)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
@@ -535,14 +534,13 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "addipato");
- mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != new->proto)
continue;
if (!memcmp(ipatoe->addr, new->addr,
- (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
+ (ipatoe->proto == QETH_PROT_IPV4) ? 4 : 16) &&
(ipatoe->mask_bits == new->mask_bits)) {
rc = -EEXIST;
break;
@@ -555,28 +553,26 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
- mutex_unlock(&card->conf_mutex);
return rc;
}
int qeth_l3_del_ipato_entry(struct qeth_card *card,
enum qeth_prot_versions proto, u8 *addr,
- int mask_bits)
+ unsigned int mask_bits)
{
struct qeth_ipato_entry *ipatoe, *tmp;
int rc = -ENOENT;
QETH_CARD_TEXT(card, 2, "delipato");
- mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
if (!memcmp(ipatoe->addr, addr,
- (proto == QETH_PROT_IPV4)? 4:16) &&
+ (proto == QETH_PROT_IPV4) ? 4 : 16) &&
(ipatoe->mask_bits == mask_bits)) {
list_del(&ipatoe->entry);
qeth_l3_update_ipato(card);
@@ -586,7 +582,6 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
- mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -596,7 +591,6 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr addr;
- int rc;
qeth_l3_init_ipaddr(&addr, type, proto);
if (proto == QETH_PROT_IPV4)
@@ -604,11 +598,7 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
else
memcpy(&addr.u.a6.addr, ip, 16);
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_modify_ip(card, &addr, add);
- mutex_unlock(&card->conf_mutex);
-
- return rc;
+ return qeth_l3_modify_ip(card, &addr, add);
}
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
@@ -716,16 +706,16 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
dev_info(&card->gdev->dev,
- "ARP processing not supported on %s!\n",
- QETH_CARD_IFNAME(card));
+ "ARP processing not supported on %s!\n",
+ netdev_name(card->dev));
return 0;
}
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
- "Starting ARP processing support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Starting ARP processing support for %s failed\n",
+ netdev_name(card->dev));
}
return rc;
}
@@ -738,8 +728,8 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
dev_info(&card->gdev->dev,
- "Inbound source MAC-address not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ "Inbound source MAC-address not supported on %s\n",
+ netdev_name(card->dev));
return -EOPNOTSUPP;
}
@@ -747,8 +737,8 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
IPA_CMD_ASS_START, NULL);
if (rc)
dev_warn(&card->gdev->dev,
- "Starting source MAC-address support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Starting source MAC-address support for %s failed\n",
+ netdev_name(card->dev));
return rc;
}
@@ -760,7 +750,7 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
dev_info(&card->gdev->dev,
- "VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
+ "VLAN not supported on %s\n", netdev_name(card->dev));
return -EOPNOTSUPP;
}
@@ -768,8 +758,8 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
- "Starting VLAN support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Starting VLAN support for %s failed\n",
+ netdev_name(card->dev));
} else {
dev_info(&card->gdev->dev, "VLAN enabled\n");
}
@@ -784,8 +774,8 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_MULTICASTING)) {
dev_info(&card->gdev->dev,
- "Multicast not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ "Multicast not supported on %s\n",
+ netdev_name(card->dev));
return -EOPNOTSUPP;
}
@@ -793,8 +783,8 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
- "Starting multicast support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Starting multicast support for %s failed\n",
+ netdev_name(card->dev));
} else {
dev_info(&card->gdev->dev, "Multicast enabled\n");
card->dev->flags |= IFF_MULTICAST;
@@ -817,7 +807,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ netdev_name(card->dev));
return rc;
}
rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, IPA_CMD_ASS_START,
@@ -825,15 +815,15 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ netdev_name(card->dev));
return rc;
}
rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
- "Enabling the passthrough mode for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Enabling the passthrough mode for %s failed\n",
+ netdev_name(card->dev));
return rc;
}
out:
@@ -847,7 +837,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_IPV6)) {
dev_info(&card->gdev->dev,
- "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
+ "IPv6 not supported on %s\n", netdev_name(card->dev));
return 0;
}
return qeth_l3_softsetup_ipv6(card);
@@ -862,16 +852,17 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
card->info.broadcast_capable = 0;
if (!qeth_is_supported(card, IPA_FILTERING)) {
dev_info(&card->gdev->dev,
- "Broadcast not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ "Broadcast not supported on %s\n",
+ netdev_name(card->dev));
rc = -EOPNOTSUPP;
goto out;
}
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
IPA_CMD_ASS_START, NULL);
if (rc) {
- dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
- "%s failed\n", QETH_CARD_IFNAME(card));
+ dev_warn(&card->gdev->dev,
+ "Enabling broadcast filtering for %s failed\n",
+ netdev_name(card->dev));
goto out;
}
@@ -879,8 +870,8 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
IPA_CMD_ASS_CONFIGURE, &filter_data);
if (rc) {
dev_warn(&card->gdev->dev,
- "Setting up broadcast filtering for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Setting up broadcast filtering for %s failed\n",
+ netdev_name(card->dev));
goto out;
}
card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
@@ -888,8 +879,9 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
IPA_CMD_ASS_ENABLE, &filter_data);
if (rc) {
- dev_warn(&card->gdev->dev, "Setting up broadcast echo "
- "filtering for %s failed\n", QETH_CARD_IFNAME(card));
+ dev_warn(&card->gdev->dev,
+ "Setting up broadcast echo filtering for %s failed\n",
+ netdev_name(card->dev));
goto out;
}
card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
@@ -1152,33 +1144,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static void qeth_l3_stop_card(struct qeth_card *card)
-{
- QETH_CARD_TEXT(card, 2, "stopcard");
-
- qeth_set_allowed_threads(card, 0, 1);
-
- cancel_work_sync(&card->rx_mode_work);
- qeth_l3_drain_rx_mode_cache(card);
-
- if (card->options.sniffer &&
- (card->info.promisc_mode == SET_PROMISC_MODE_ON))
- qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
-
- if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l3_clear_ip_htable(card, 1);
- qeth_clear_ipacmd_list(card);
- card->state = CARD_STATE_DOWN;
- }
-
- qeth_qdio_clear_card(card, 0);
- qeth_drain_output_queues(card);
- qeth_clear_working_pool_list(card);
- flush_workqueue(card->event_wq);
- qeth_flush_local_addrs(card);
- card->info.promisc_mode = 0;
-}
-
static void qeth_l3_set_promisc_mode(struct qeth_card *card)
{
bool enable = card->dev->flags & IFF_PROMISC;
@@ -1234,7 +1199,6 @@ static void qeth_l3_rx_mode_work(struct work_struct *work)
kfree(addr);
break;
}
- addr->ref_counter = 1;
fallthrough;
default:
/* for next call to set_rx_mode(): */
@@ -1869,8 +1833,10 @@ static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct qeth_card *card = dev->ml_priv;
- return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
- qeth_get_priority_queue(card, skb);
+ if (qeth_uses_tx_prio_queueing(card))
+ return qeth_get_priority_queue(card, skb);
+
+ return netdev_pick_tx(dev, skb, sb_dev);
}
static const struct net_device_ops qeth_l3_netdev_ops = {
@@ -1913,10 +1879,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
unsigned int headroom;
int rc;
- rc = qeth_setup_netdev(card);
- if (rc)
- return rc;
-
if (IS_OSD(card) || IS_OSX(card)) {
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
@@ -2024,21 +1986,10 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_clear_ipato_list(card);
}
-static int qeth_l3_set_online(struct qeth_card *card)
+static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok)
{
- struct ccwgroup_device *gdev = card->gdev;
struct net_device *dev = card->dev;
int rc = 0;
- bool carrier_ok;
-
- rc = qeth_core_hardsetup_card(card, &carrier_ok);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
- rc = -ENODEV;
- goto out_remove;
- }
-
- qeth_print_status_message(card);
/* softsetup */
QETH_CARD_TEXT(card, 2, "softsetp");
@@ -2065,12 +2016,19 @@ static int qeth_l3_set_online(struct qeth_card *card)
if (dev->reg_state != NETREG_REGISTERED) {
rc = qeth_l3_setup_netdev(card);
if (rc)
- goto out_remove;
+ goto err_setup;
if (carrier_ok)
netif_carrier_on(dev);
} else {
rtnl_lock();
+ rc = qeth_set_real_num_tx_queues(card,
+ qeth_tx_actual_queues(card));
+ if (rc) {
+ rtnl_unlock();
+ goto err_set_queues;
+ }
+
if (carrier_ok)
netif_carrier_on(dev);
else
@@ -2085,22 +2043,29 @@ static int qeth_l3_set_online(struct qeth_card *card)
}
rtnl_unlock();
}
- qeth_trace_features(card);
- /* let user_space know that device is online */
- kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
return 0;
-out_remove:
- qeth_l3_stop_card(card);
- qeth_stop_channel(&card->data);
- qeth_stop_channel(&card->write);
- qeth_stop_channel(&card->read);
- qdio_free(CARD_DDEV(card));
+
+err_set_queues:
+err_setup:
+ qeth_set_allowed_threads(card, 0, 1);
+ card->state = CARD_STATE_DOWN;
+ qeth_l3_clear_ip_htable(card, 1);
return rc;
}
static void qeth_l3_set_offline(struct qeth_card *card)
{
- qeth_l3_stop_card(card);
+ qeth_set_allowed_threads(card, 0, 1);
+ qeth_l3_drain_rx_mode_cache(card);
+
+ if (card->options.sniffer &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_ON))
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
+
+ if (card->state == CARD_STATE_SOFTSETUP) {
+ card->state = CARD_STATE_DOWN;
+ qeth_l3_clear_ip_htable(card, 1);
+ }
}
/* Returns zero if the command is successfully "consumed" */
@@ -2110,7 +2075,7 @@ static int qeth_l3_control_event(struct qeth_card *card,
return 1;
}
-struct qeth_discipline qeth_l3_discipline = {
+const struct qeth_discipline qeth_l3_discipline = {
.devtype = &qeth_l3_devtype,
.setup = qeth_l3_probe_device,
.remove = qeth_l3_remove_device,
@@ -2174,7 +2139,6 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
static int qeth_l3_ip_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
-
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct qeth_ipaddr addr;
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index dd0b39082534..997fbb7006a7 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -285,7 +285,7 @@ static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
+ return sprintf(buf, "%u\n", card->ipato.enabled ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
@@ -301,19 +301,21 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
goto out;
}
+ mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
enable = !card->ipato.enabled;
} else if (kstrtobool(buf, &enable)) {
rc = -EINVAL;
- goto out;
+ goto unlock_ip;
}
if (card->ipato.enabled != enable) {
card->ipato.enabled = enable;
- mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- mutex_unlock(&card->ip_lock);
}
+
+unlock_ip:
+ mutex_unlock(&card->ip_lock);
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -328,7 +330,7 @@ static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
+ return sprintf(buf, "%u\n", card->ipato.invert4 ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
@@ -339,7 +341,7 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
bool invert;
int rc = 0;
- mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert4;
} else if (kstrtobool(buf, &invert)) {
@@ -349,12 +351,11 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
if (card->ipato.invert4 != invert) {
card->ipato.invert4 = invert;
- mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- mutex_unlock(&card->ip_lock);
}
+
out:
- mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->ip_lock);
return rc ? rc : count;
}
@@ -406,29 +407,29 @@ static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
}
static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
- u8 *addr, int *mask_bits)
+ u8 *addr, unsigned int *mask_bits)
{
- const char *start, *end;
- char *tmp;
- char buffer[40] = {0, };
+ char *sep;
+ int rc;
- start = buf;
- /* get address string */
- end = strchr(start, '/');
- if (!end || (end - start >= 40)) {
+ /* Expected input pattern: %addr/%mask */
+ sep = strnchr(buf, 40, '/');
+ if (!sep)
return -EINVAL;
- }
- strncpy(buffer, start, end - start);
- if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) {
- return -EINVAL;
- }
- start = end + 1;
- *mask_bits = simple_strtoul(start, &tmp, 10);
- if (!strlen(start) ||
- (tmp == start) ||
- (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
+
+ /* Terminate the %addr sub-string, and parse it: */
+ *sep = '\0';
+ rc = qeth_l3_string_to_ipaddr(buf, proto, addr);
+ if (rc)
+ return rc;
+
+ rc = kstrtouint(sep + 1, 10, mask_bits);
+ if (rc)
+ return rc;
+
+ if (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))
return -EINVAL;
- }
+
return 0;
}
@@ -436,8 +437,8 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
+ unsigned int mask_bits;
u8 addr[16];
- int mask_bits;
int rc = 0;
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
@@ -449,7 +450,7 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
return -ENOMEM;
ipatoe->proto = proto;
- memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
+ memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4) ? 4 : 16);
ipatoe->mask_bits = mask_bits;
rc = qeth_l3_add_ipato_entry(card, ipatoe);
@@ -474,8 +475,8 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
+ unsigned int mask_bits;
u8 addr[16];
- int mask_bits;
int rc = 0;
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
@@ -500,7 +501,7 @@ static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
+ return sprintf(buf, "%u\n", card->ipato.invert6 ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
@@ -510,7 +511,7 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
bool invert;
int rc = 0;
- mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert6;
} else if (kstrtobool(buf, &invert)) {
@@ -520,12 +521,11 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
if (card->ipato.invert6 != invert) {
card->ipato.invert6 = invert;
- mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- mutex_unlock(&card->ip_lock);
}
+
out:
- mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->ip_lock);
return rc ? rc : count;
}
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 59e662df5774..78d52a4c55f5 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1607,7 +1607,6 @@ check_target:
static int zfcp_erp_thread(void *data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
- struct list_head *next;
struct zfcp_erp_action *act;
unsigned long flags;
@@ -1620,12 +1619,11 @@ static int zfcp_erp_thread(void *data)
break;
write_lock_irqsave(&adapter->erp_lock, flags);
- next = adapter->erp_ready_head.next;
+ act = list_first_entry_or_null(&adapter->erp_ready_head,
+ struct zfcp_erp_action, list);
write_unlock_irqrestore(&adapter->erp_lock, flags);
- if (next != &adapter->erp_ready_head) {
- act = list_entry(next, struct zfcp_erp_action, list);
-
+ if (act) {
/* there is more to come after dismission, no notify */
if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
zfcp_erp_wakeup(adapter);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 140186fe1d1e..6cb963a06777 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -426,9 +426,14 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
* or it has been dismissed due to a queue shutdown, this function
* is called to process the completion status and trigger further
* events related to the FSF request.
+ * Caller must ensure that the request has been removed from
+ * adapter->req_list, to protect against concurrent modification
+ * by zfcp_erp_strategy_check_fsfreq().
*/
static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
{
+ struct zfcp_erp_action *erp_action;
+
if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
zfcp_fsf_status_read_handler(req);
return;
@@ -439,8 +444,9 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
zfcp_fsf_fsfstatus_eval(req);
req->handler(req);
- if (req->erp_action)
- zfcp_erp_notify(req->erp_action, 0);
+ erp_action = req->erp_action;
+ if (erp_action)
+ zfcp_erp_notify(erp_action, 0);
if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
zfcp_fsf_req_free(req);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index e78d65bd46b1..a8a514074084 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -380,8 +380,6 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
&qdio->adapter->status);
init_data.q_format = QDIO_ZFCP_QFMT;
- memcpy(init_data.adapter_name, dev_name(&cdev->dev), 8);
- ASCEBC(init_data.adapter_name, 8);
init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
if (enable_multibuffer)
init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 84b57a8f86bf..3242ff63986f 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -269,6 +269,27 @@ NCR_700_get_SXFER(struct scsi_device *SDp)
spi_period(SDp->sdev_target));
}
+static inline dma_addr_t virt_to_dma(struct NCR_700_Host_Parameters *h, void *p)
+{
+ return h->pScript + ((uintptr_t)p - (uintptr_t)h->script);
+}
+
+static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h,
+ void *addr, size_t size)
+{
+ if (h->noncoherent)
+ dma_sync_single_for_device(h->dev, virt_to_dma(h, addr),
+ size, DMA_BIDIRECTIONAL);
+}
+
+static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h,
+ void *addr, size_t size)
+{
+ if (h->noncoherent)
+ dma_sync_single_for_device(h->dev, virt_to_dma(h, addr), size,
+ DMA_BIDIRECTIONAL);
+}
+
struct Scsi_Host *
NCR_700_detect(struct scsi_host_template *tpnt,
struct NCR_700_Host_Parameters *hostdata, struct device *dev)
@@ -283,9 +304,13 @@ NCR_700_detect(struct scsi_host_template *tpnt,
if(tpnt->sdev_attrs == NULL)
tpnt->sdev_attrs = NCR_700_dev_attrs;
- memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
- GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
- if(memory == NULL) {
+ memory = dma_alloc_coherent(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL);
+ if (!memory) {
+ hostdata->noncoherent = 1;
+ memory = dma_alloc_noncoherent(dev, TOTAL_MEM_SIZE, &pScript,
+ DMA_BIDIRECTIONAL, GFP_KERNEL);
+ }
+ if (!memory) {
printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
return NULL;
}
@@ -339,11 +364,11 @@ NCR_700_detect(struct scsi_host_template *tpnt,
for (j = 0; j < PATCHES; j++)
script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
/* now patch up fixed addresses. */
- script_patch_32(hostdata->dev, script, MessageLocation,
+ script_patch_32(hostdata, script, MessageLocation,
pScript + MSGOUT_OFFSET);
- script_patch_32(hostdata->dev, script, StatusAddress,
+ script_patch_32(hostdata, script, StatusAddress,
pScript + STATUS_OFFSET);
- script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
+ script_patch_32(hostdata, script, ReceiveMsgAddress,
pScript + MSGIN_OFFSET);
hostdata->script = script;
@@ -395,8 +420,13 @@ NCR_700_release(struct Scsi_Host *host)
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)host->hostdata[0];
- dma_free_attrs(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script,
- hostdata->pScript, DMA_ATTR_NON_CONSISTENT);
+ if (hostdata->noncoherent)
+ dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
+ hostdata->script, hostdata->pScript,
+ DMA_BIDIRECTIONAL);
+ else
+ dma_free_coherent(hostdata->dev, TOTAL_MEM_SIZE,
+ hostdata->script, hostdata->pScript);
return 1;
}
@@ -804,8 +834,8 @@ process_extended_message(struct Scsi_Host *host,
shost_printk(KERN_WARNING, host,
"Unexpected SDTR msg\n");
hostdata->msgout[0] = A_REJECT_MSG;
- dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
- script_patch_16(hostdata->dev, hostdata->script,
+ dma_sync_to_dev(hostdata, hostdata->msgout, 1);
+ script_patch_16(hostdata, hostdata->script,
MessageCount, 1);
/* SendMsgOut returns, so set up the return
* address */
@@ -817,9 +847,8 @@ process_extended_message(struct Scsi_Host *host,
printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
host->host_no, pun, lun);
hostdata->msgout[0] = A_REJECT_MSG;
- dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
- script_patch_16(hostdata->dev, hostdata->script, MessageCount,
- 1);
+ dma_sync_to_dev(hostdata, hostdata->msgout, 1);
+ script_patch_16(hostdata, hostdata->script, MessageCount, 1);
resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
break;
@@ -832,9 +861,8 @@ process_extended_message(struct Scsi_Host *host,
printk("\n");
/* just reject it */
hostdata->msgout[0] = A_REJECT_MSG;
- dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
- script_patch_16(hostdata->dev, hostdata->script, MessageCount,
- 1);
+ dma_sync_to_dev(hostdata, hostdata->msgout, 1);
+ script_patch_16(hostdata, hostdata->script, MessageCount, 1);
/* SendMsgOut returns, so set up the return
* address */
resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -917,9 +945,8 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
printk("\n");
/* just reject it */
hostdata->msgout[0] = A_REJECT_MSG;
- dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
- script_patch_16(hostdata->dev, hostdata->script, MessageCount,
- 1);
+ dma_sync_to_dev(hostdata, hostdata->msgout, 1);
+ script_patch_16(hostdata, hostdata->script, MessageCount, 1);
/* SendMsgOut returns, so set up the return
* address */
resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -928,7 +955,7 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
}
NCR_700_writel(temp, host, TEMP_REG);
/* set us up to receive another message */
- dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
+ dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
return resume_offset;
}
@@ -1008,8 +1035,8 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
slot->SG[1].pAddr = 0;
slot->resume_offset = hostdata->pScript;
- dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
- dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG[0])*2);
+ dma_sync_from_dev(hostdata, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE);
/* queue the command for reissue */
slot->state = NCR_700_SLOT_QUEUED;
@@ -1129,11 +1156,11 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
hostdata->cmd = slot->cmnd;
/* re-patch for this command */
- script_patch_32_abs(hostdata->dev, hostdata->script,
+ script_patch_32_abs(hostdata, hostdata->script,
CommandAddress, slot->pCmd);
- script_patch_16(hostdata->dev, hostdata->script,
+ script_patch_16(hostdata, hostdata->script,
CommandCount, slot->cmnd->cmd_len);
- script_patch_32_abs(hostdata->dev, hostdata->script,
+ script_patch_32_abs(hostdata, hostdata->script,
SGScriptStartAddress,
to32bit(&slot->pSG[0].ins));
@@ -1144,14 +1171,14 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
* should therefore always clear ACK */
NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
host, SXFER_REG);
- dma_cache_sync(hostdata->dev, hostdata->msgin,
- MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
- dma_cache_sync(hostdata->dev, hostdata->msgout,
- MSG_ARRAY_SIZE, DMA_TO_DEVICE);
+ dma_sync_from_dev(hostdata, hostdata->msgin,
+ MSG_ARRAY_SIZE);
+ dma_sync_to_dev(hostdata, hostdata->msgout,
+ MSG_ARRAY_SIZE);
/* I'm just being paranoid here, the command should
* already have been flushed from the cache */
- dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
- slot->cmnd->cmd_len, DMA_TO_DEVICE);
+ dma_sync_to_dev(hostdata, slot->cmnd->cmnd,
+ slot->cmnd->cmd_len);
@@ -1214,8 +1241,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
hostdata->reselection_id = reselection_id;
/* just in case we have a stale simple tag message, clear it */
hostdata->msgin[1] = 0;
- dma_cache_sync(hostdata->dev, hostdata->msgin,
- MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
+ dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
if(hostdata->tag_negotiated & (1<<reselection_id)) {
resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
} else {
@@ -1329,8 +1355,7 @@ process_selection(struct Scsi_Host *host, __u32 dsp)
hostdata->cmd = NULL;
/* clear any stale simple tag message */
hostdata->msgin[1] = 0;
- dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
if(id == 0xff) {
/* Selected as target, Ignore */
@@ -1427,30 +1452,26 @@ NCR_700_start_command(struct scsi_cmnd *SCp)
NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
}
- script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
-
+ script_patch_16(hostdata, hostdata->script, MessageCount, count);
- script_patch_ID(hostdata->dev, hostdata->script,
- Device_ID, 1<<scmd_id(SCp));
+ script_patch_ID(hostdata, hostdata->script, Device_ID, 1<<scmd_id(SCp));
- script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
+ script_patch_32_abs(hostdata, hostdata->script, CommandAddress,
slot->pCmd);
- script_patch_16(hostdata->dev, hostdata->script, CommandCount,
- SCp->cmd_len);
+ script_patch_16(hostdata, hostdata->script, CommandCount, SCp->cmd_len);
/* finally plumb the beginning of the SG list into the script
* */
- script_patch_32_abs(hostdata->dev, hostdata->script,
+ script_patch_32_abs(hostdata, hostdata->script,
SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
NCR_700_clear_fifo(SCp->device->host);
if(slot->resume_offset == 0)
slot->resume_offset = hostdata->pScript;
/* now perform all the writebacks and invalidates */
- dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
- dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
- DMA_FROM_DEVICE);
- dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
- dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
+ dma_sync_to_dev(hostdata, hostdata->msgout, count);
+ dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
+ dma_sync_to_dev(hostdata, SCp->cmnd, SCp->cmd_len);
+ dma_sync_from_dev(hostdata, hostdata->status, 1);
/* set the synchronous period/offset */
NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
@@ -1485,10 +1506,8 @@ NCR_700_intr(int irq, void *dev_id)
__u8 sstat0 = 0, dstat = 0;
__u32 dsp;
struct scsi_cmnd *SCp = hostdata->cmd;
- enum NCR_700_Host_State state;
handled = 1;
- state = hostdata->state;
SCp = hostdata->cmd;
if(istat & SCSI_INT_PENDING) {
@@ -1626,7 +1645,7 @@ NCR_700_intr(int irq, void *dev_id)
slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
slot->SG[i].pAddr = 0;
}
- dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+ dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
/* and pretend we disconnected after
* the command phase */
resume_offset = hostdata->pScript + Ent_MsgInDuringData;
@@ -1739,7 +1758,6 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
__u32 move_ins;
- enum dma_data_direction direction;
struct NCR_700_command_slot *slot;
if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
@@ -1856,7 +1874,6 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
}
/* now build the scatter gather list */
- direction = SCp->sc_data_direction;
if(move_ins != 0) {
int i;
int sg_count;
@@ -1878,7 +1895,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
}
slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
slot->SG[i].pAddr = 0;
- dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+ dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
DEBUG((" SETTING %p to %x\n",
(&slot->pSG[i].ins),
slot->SG[i].ins));
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 05fe439b66af..c9f8c497babb 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -209,6 +209,7 @@ struct NCR_700_Host_Parameters {
#endif
__u32 chip710:1; /* set if really a 710 not 700 */
__u32 burst_length:4; /* set to 0 to disable 710 bursting */
+ __u32 noncoherent:1; /* needs to use non-coherent DMA */
/* NOTHING BELOW HERE NEEDS ALTERING */
__u32 fast:1; /* if we can alter the SCSI bus clock
@@ -422,33 +423,33 @@ struct NCR_700_Host_Parameters {
#define NCR_710_MIN_XFERP 0
#define NCR_700_MIN_PERIOD 25 /* for SDTR message, 100ns */
-#define script_patch_32(dev, script, symbol, value) \
+#define script_patch_32(h, script, symbol, value) \
{ \
int i; \
dma_addr_t da = value; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
__u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + da; \
(script)[A_##symbol##_used[i]] = bS_to_host(val); \
- dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \
DEBUG((" script, patching %s at %d to %pad\n", \
#symbol, A_##symbol##_used[i], &da)); \
} \
}
-#define script_patch_32_abs(dev, script, symbol, value) \
+#define script_patch_32_abs(h, script, symbol, value) \
{ \
int i; \
dma_addr_t da = value; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
(script)[A_##symbol##_used[i]] = bS_to_host(da); \
- dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \
DEBUG((" script, patching %s at %d to %pad\n", \
#symbol, A_##symbol##_used[i], &da)); \
} \
}
/* Used for patching the SCSI ID in the SELECT instruction */
-#define script_patch_ID(dev, script, symbol, value) \
+#define script_patch_ID(h, script, symbol, value) \
{ \
int i; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -456,13 +457,13 @@ struct NCR_700_Host_Parameters {
val &= 0xff00ffff; \
val |= ((value) & 0xff) << 16; \
(script)[A_##symbol##_used[i]] = bS_to_host(val); \
- dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \
DEBUG((" script, patching ID field %s at %d to 0x%x\n", \
#symbol, A_##symbol##_used[i], val)); \
} \
}
-#define script_patch_16(dev, script, symbol, value) \
+#define script_patch_16(h, script, symbol, value) \
{ \
int i; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -470,7 +471,7 @@ struct NCR_700_Host_Parameters {
val &= 0xffff0000; \
val |= ((value) & 0xffff); \
(script)[A_##symbol##_used[i]] = bS_to_host(val); \
- dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+ dma_sync_to_dev((h), &(script)[A_##symbol##_used[i]], 4); \
DEBUG((" script, patching short field %s at %d to 0x%x\n", \
#symbol, A_##symbol##_used[i], val)); \
} \
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index fd6ae5c38086..31233f6a0274 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -242,7 +242,7 @@ int aac_commit = -1;
int startup_timeout = 180;
int aif_timeout = 120;
int aac_sync_mode; /* Only Sync. transfer - disabled */
-int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
+static int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
@@ -290,7 +290,7 @@ MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
" blocks (FIB) allocated. Valid values are 512 and down. Default is"
" to use suggestion from Firmware.");
-int acbsize = -1;
+static int acbsize = -1;
module_param(acbsize, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
" size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
@@ -321,7 +321,7 @@ int aac_reset_devices;
module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
-int aac_wwn = 1;
+static int aac_wwn = 1;
module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
"\t0 - Disable\n"
@@ -2229,10 +2229,10 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
if (dev->dac_support) {
- if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(64))) {
if (!dev->in_reset)
dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
- } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(32))) {
dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
dev->dac_support = 0;
} else {
@@ -3253,7 +3253,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case START_STOP:
return aac_start_stop(scsicmd);
- fallthrough;
default:
/*
* Unhandled commands
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 59e82a832042..e3e157a74988 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -670,8 +670,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p, sg_count[i],
- data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p, sg_count[i],
+ data_dir);
hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
hbacmd->sge[i].addr_lo = cpu_to_le32(
(u32)(addr & 0xffffffff));
@@ -732,8 +732,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p,
- sg_count[i], data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ sg_count[i], data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
@@ -788,8 +788,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p,
- sg_count[i], data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ sg_count[i], data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
@@ -844,7 +844,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ usg->sg[i].count,
+ data_dir);
psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
byte_count += usg->sg[i].count;
@@ -883,8 +885,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p,
- sg_count[i], data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ sg_count[i], data_dir);
psg->sg[i].addr = cpu_to_le32(addr);
byte_count += sg_count[i];
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 383e74fea6ed..b99ca1b0c553 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1551,6 +1551,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
aac_fib_map_free(aac);
dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
aac->comm_phys);
+ aac_adapter_ioremap(aac, 0);
aac->comm_addr = NULL;
aac->comm_phys = 0;
kfree(aac->queues);
@@ -1561,15 +1562,15 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
dmamask = DMA_BIT_MASK(32);
quirks = aac_get_driver_ident(index)->quirks;
if (quirks & AAC_QUIRK_31BIT)
- retval = pci_set_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_mask(&aac->pdev->dev, dmamask);
else if (!(quirks & AAC_QUIRK_SRC))
- retval = pci_set_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_mask(&aac->pdev->dev, dmamask);
else
- retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
if (quirks & AAC_QUIRK_31BIT && !retval) {
dmamask = DMA_BIT_MASK(31);
- retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
}
if (retval)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a3aee146537b..8f3772480582 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1659,7 +1659,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto out;
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
- error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (error) {
dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
goto out_disable_pdev;
@@ -1678,7 +1678,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
mask_bits = 32;
}
- error = pci_set_consistent_dma_mask(pdev, dmamask);
+ error = dma_set_coherent_mask(&pdev->dev, dmamask);
if (error) {
dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
, mask_bits);
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 1c617c0d5899..98b02e7d38bb 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -9402,10 +9402,9 @@ ahd_loadseq(struct ahd_softc *ahd)
if (cs_count != 0) {
cs_count *= sizeof(struct cs);
- ahd->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
+ ahd->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC);
if (ahd->critical_sections == NULL)
panic("ahd_loadseq: Could not malloc");
- memcpy(ahd->critical_sections, cs_table, cs_count);
}
ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 7c321303969e..f32398939f74 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -952,8 +952,8 @@ int
ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
int flags, bus_dmamap_t *mapp)
{
- *vaddr = pci_alloc_consistent(ahd->dev_softc,
- dmat->maxsize, mapp);
+ *vaddr = dma_alloc_coherent(&ahd->dev_softc->dev, dmat->maxsize, mapp,
+ GFP_ATOMIC);
if (*vaddr == NULL)
return (ENOMEM);
return(0);
@@ -963,8 +963,7 @@ void
ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
void* vaddr, bus_dmamap_t map)
{
- pci_free_consistent(ahd->dev_softc, dmat->maxsize,
- vaddr, map);
+ dma_free_coherent(&ahd->dev_softc->dev, dmat->maxsize, vaddr, map);
}
int
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 2231c4afa531..725bb7f58054 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -6879,10 +6879,9 @@ ahc_loadseq(struct ahc_softc *ahc)
if (cs_count != 0) {
cs_count *= sizeof(struct cs);
- ahc->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
+ ahc->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC);
if (ahc->critical_sections == NULL)
panic("ahc_loadseq: Could not malloc");
- memcpy(ahc->critical_sections, cs_table, cs_count);
}
ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index e7ccb8b80fc1..7bba961d1ae0 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -730,7 +730,7 @@ ahc_linux_abort(struct scsi_cmnd *cmd)
int error;
error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
- if (error != 0)
+ if (error != SUCCESS)
printk("aic7xxx_abort returns 0x%x\n", error);
return (error);
}
@@ -744,7 +744,7 @@ ahc_linux_dev_reset(struct scsi_cmnd *cmd)
int error;
error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
- if (error != 0)
+ if (error != SUCCESS)
printk("aic7xxx_dev_reset returns 0x%x\n", error);
return (error);
}
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index c23bbb609126..98978bc199ff 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -42,14 +42,6 @@
extern struct kmem_cache *asd_dma_token_cache;
extern struct kmem_cache *asd_ascb_cache;
-static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
-{
- int i;
- for (i = 0; i < SAS_ADDR_SIZE; i++, p += 2)
- snprintf(p, 3, "%02X", sas_addr[i]);
- *p = '\0';
-}
-
struct asd_ha_struct;
struct asd_ascb;
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 9220bcf8388f..5d054d5c70a5 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -49,7 +49,7 @@ struct device_attribute;
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
#define ARCMSR_MIN_OUTSTANDING_CMD 32
-#define ARCMSR_DRIVER_VERSION "v1.40.00.10-20190116"
+#define ARCMSR_DRIVER_VERSION "v1.50.00.02-20200819"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -80,6 +80,7 @@ struct device_attribute;
#ifndef PCI_DEVICE_ID_ARECA_1884
#define PCI_DEVICE_ID_ARECA_1884 0x1884
#endif
+#define PCI_DEVICE_ID_ARECA_1886 0x188A
#define ARCMSR_HOURS (1000 * 60 * 60 * 4)
#define ARCMSR_MINUTES (1000 * 60 * 60)
/*
@@ -436,6 +437,21 @@ struct FIRMWARE_INFO
#define ARCMSR_HBEMU_DOORBELL_SYNC 0x100
#define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004
#define ARCMSR_ARC1884_DiagWrite_ENABLE 0x00000080
+
+/*
+*******************************************************************************
+** SPEC. for Areca Type F adapter
+*******************************************************************************
+*/
+#define ARCMSR_SIGNATURE_1886 0x188617D3
+// Doorbell and interrupt definition are same as Type E adapter
+/* ARC-1886 doorbell sync */
+#define ARCMSR_HBFMU_DOORBELL_SYNC 0x100
+//set host rw buffer physical address at inbound message 0, 1 (low,high)
+#define ARCMSR_HBFMU_DOORBELL_SYNC1 0x300
+#define ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK 0x80000000
+#define ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE 0x20000000
+
/*
*******************************************************************************
** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
@@ -720,6 +736,80 @@ struct MessageUnit_E{
uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
};
+/*
+*********************************************************************
+** Messaging Unit (MU) of Type F processor(LSI)
+*********************************************************************
+*/
+struct MessageUnit_F {
+ uint32_t iobound_doorbell; /*0000 0003*/
+ uint32_t write_sequence_3xxx; /*0004 0007*/
+ uint32_t host_diagnostic_3xxx; /*0008 000B*/
+ uint32_t posted_outbound_doorbell; /*000C 000F*/
+ uint32_t master_error_attribute; /*0010 0013*/
+ uint32_t master_error_address_low; /*0014 0017*/
+ uint32_t master_error_address_high; /*0018 001B*/
+ uint32_t hcb_size; /*001C 001F*/
+ uint32_t inbound_doorbell; /*0020 0023*/
+ uint32_t diagnostic_rw_data; /*0024 0027*/
+ uint32_t diagnostic_rw_address_low; /*0028 002B*/
+ uint32_t diagnostic_rw_address_high; /*002C 002F*/
+ uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_mask; /*0034 0037*/
+ uint32_t dcr_data; /*0038 003B*/
+ uint32_t dcr_address; /*003C 003F*/
+ uint32_t inbound_queueport; /*0040 0043*/
+ uint32_t outbound_queueport; /*0044 0047*/
+ uint32_t hcb_pci_address_low; /*0048 004B*/
+ uint32_t hcb_pci_address_high; /*004C 004F*/
+ uint32_t iop_int_status; /*0050 0053*/
+ uint32_t iop_int_mask; /*0054 0057*/
+ uint32_t iop_inbound_queue_port; /*0058 005B*/
+ uint32_t iop_outbound_queue_port; /*005C 005F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t reply_post_producer_index; /*0068 006B*/
+ uint32_t reply_post_consumer_index; /*006C 006F*/
+ uint32_t inbound_doorbell_clear; /*0070 0073*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t last_used_message_source_address_low; /*0078 007B*/
+ uint32_t last_used_message_source_address_high; /*007C 007F*/
+ uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
+ uint32_t message_dest_address_index; /*0090 0093*/
+ uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
+ uint32_t utility_A_int_counter_timer; /*0098 009B*/
+ uint32_t outbound_doorbell; /*009C 009F*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t message_source_address_index; /*00A4 00A7*/
+ uint32_t message_done_queue_index; /*00A8 00AB*/
+ uint32_t reserved0; /*00AC 00AF*/
+ uint32_t inbound_msgaddr0; /*00B0 00B3*/
+ uint32_t inbound_msgaddr1; /*00B4 00B7*/
+ uint32_t outbound_msgaddr0; /*00B8 00BB*/
+ uint32_t outbound_msgaddr1; /*00BC 00BF*/
+ uint32_t inbound_queueport_low; /*00C0 00C3*/
+ uint32_t inbound_queueport_high; /*00C4 00C7*/
+ uint32_t outbound_queueport_low; /*00C8 00CB*/
+ uint32_t outbound_queueport_high; /*00CC 00CF*/
+ uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
+ uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
+ uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
+ uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
+ uint32_t message_dest_queue_port_low; /*00E0 00E3*/
+ uint32_t message_dest_queue_port_high; /*00E4 00E7*/
+ uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
+ uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
+ uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
+ uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t write_sequence; /*00FC 00FF*/
+ uint32_t reserved1[46]; /*0100 01B7*/
+ uint32_t reply_post_producer_index1; /*01B8 01BB*/
+ uint32_t reply_post_consumer_index1; /*01BC 01BF*/
+};
+
+#define MESG_RW_BUFFER_SIZE (256 * 3)
+
typedef struct deliver_completeQ {
uint16_t cmdFlag;
uint16_t cmdSMID;
@@ -739,6 +829,7 @@ struct AdapterControlBlock
#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */
#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */
#define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */
+#define ACB_ADAPTER_TYPE_F 0x00000005 /* hba L IOP */
u32 ioqueue_size;
struct pci_dev * pdev;
struct Scsi_Host * host;
@@ -760,10 +851,16 @@ struct AdapterControlBlock
struct MessageUnit_C __iomem *pmuC;
struct MessageUnit_D *pmuD;
struct MessageUnit_E __iomem *pmuE;
+ struct MessageUnit_F __iomem *pmuF;
};
/* message unit ATU inbound base address0 */
void __iomem *mem_base0;
void __iomem *mem_base1;
+ //0x000 - COMPORT_IN (Host sent to ROC)
+ uint32_t *message_wbuffer;
+ //0x100 - COMPORT_OUT (ROC sent to Host)
+ uint32_t *message_rbuffer;
+ uint32_t *msgcode_rwbuffer; //0x200 - BIOS_AREA
uint32_t acb_flags;
u16 dev_id;
uint8_t adapter_index;
@@ -836,8 +933,6 @@ struct AdapterControlBlock
#define FW_NORMAL 0x0000
#define FW_BOG 0x0001
#define FW_DEADLOCK 0x0010
- atomic_t rq_map_token;
- atomic_t ante_token_value;
uint32_t maxOutstanding;
int vector_count;
uint32_t maxFreeCCB;
@@ -848,6 +943,7 @@ struct AdapterControlBlock
uint32_t out_doorbell;
uint32_t completionQ_entry;
pCompletion_Q pCompletionQ;
+ uint32_t completeQ_size;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index ec895d0319f0..e4fdb473b990 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -133,6 +133,7 @@ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb);
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
@@ -209,6 +210,8 @@ static struct pci_device_id arcmsr_device_id_table[] = {
.driver_data = ACB_ADAPTER_TYPE_C},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
.driver_data = ACB_ADAPTER_TYPE_E},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886),
+ .driver_data = ACB_ADAPTER_TYPE_F},
{0, 0}, /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
@@ -232,12 +235,12 @@ static void arcmsr_free_io_queue(struct AdapterControlBlock *acb)
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_B:
case ACB_ADAPTER_TYPE_D:
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size,
acb->dma_coherent2, acb->dma_coherent_handle2);
break;
}
- }
}
static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
@@ -310,6 +313,19 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
acb->out_doorbell = 0;
break;
}
+ case ACB_ADAPTER_TYPE_F: {
+ acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+ if (!acb->pmuF) {
+ pr_notice("arcmsr%d: memory mapping region fail\n",
+ acb->host->host_no);
+ return false;
+ }
+ writel(0, &acb->pmuF->host_int_status); /* clear interrupt */
+ writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ break;
+ }
}
return true;
}
@@ -317,26 +333,25 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A:{
+ case ACB_ADAPTER_TYPE_A:
iounmap(acb->pmuA);
- }
- break;
- case ACB_ADAPTER_TYPE_B:{
+ break;
+ case ACB_ADAPTER_TYPE_B:
iounmap(acb->mem_base0);
iounmap(acb->mem_base1);
- }
-
- break;
- case ACB_ADAPTER_TYPE_C:{
+ break;
+ case ACB_ADAPTER_TYPE_C:
iounmap(acb->pmuC);
- }
- break;
+ break;
case ACB_ADAPTER_TYPE_D:
iounmap(acb->mem_base0);
break;
case ACB_ADAPTER_TYPE_E:
iounmap(acb->pmuE);
break;
+ case ACB_ADAPTER_TYPE_F:
+ iounmap(acb->pmuF);
+ break;
}
}
@@ -552,23 +567,20 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A: {
+ case ACB_ADAPTER_TYPE_A:
arcmsr_hbaA_flush_cache(acb);
- }
break;
-
- case ACB_ADAPTER_TYPE_B: {
+ case ACB_ADAPTER_TYPE_B:
arcmsr_hbaB_flush_cache(acb);
- }
break;
- case ACB_ADAPTER_TYPE_C: {
+ case ACB_ADAPTER_TYPE_C:
arcmsr_hbaC_flush_cache(acb);
- }
break;
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_flush_cache(acb);
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
arcmsr_hbaE_flush_cache(acb);
break;
}
@@ -626,6 +638,27 @@ static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb)
reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
}
+static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb)
+{
+ dma_addr_t host_buffer_dma;
+ struct MessageUnit_F __iomem *pmuF;
+
+ memset(acb->dma_coherent2, 0xff, acb->completeQ_size);
+ acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 +
+ acb->completeQ_size, 4);
+ acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100;
+ acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200;
+ memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE);
+ host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4);
+ pmuF = acb->pmuF;
+ /* host buffer low address, bit0:1 all buffer active */
+ writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0);
+ /* host buffer high address */
+ writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1);
+ /* set host buffer physical address */
+ writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell);
+}
+
static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
{
bool rtn = true;
@@ -679,6 +712,28 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
acb->doneq_index = 0;
}
break;
+ case ACB_ADAPTER_TYPE_F: {
+ uint32_t QueueDepth;
+ uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32};
+
+ arcmsr_wait_firmware_ready(acb);
+ QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7];
+ acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128;
+ acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32);
+ dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size,
+ &dma_coherent_handle, GFP_KERNEL);
+ if (!dma_coherent) {
+ pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
+ return false;
+ }
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = dma_coherent;
+ acb->pCompletionQ = dma_coherent;
+ acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ);
+ acb->doneq_index = 0;
+ arcmsr_hbaF_assign_regAddr(acb);
+ }
+ break;
default:
break;
}
@@ -713,7 +768,8 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->host->sg_tablesize = max_sg_entrys;
roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
- acb->uncache_size += acb->ioqueue_size;
+ if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
+ acb->uncache_size += acb->ioqueue_size;
dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
if(!dma_coherent){
printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
@@ -736,6 +792,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_C:
case ACB_ADAPTER_TYPE_D:
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
ccb_tmp->cdb_phyaddr = cdb_phyaddr;
break;
}
@@ -754,8 +811,10 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
dma_coherent_handle = next_ccb_phy;
}
- acb->dma_coherent_handle2 = dma_coherent_handle;
- acb->dma_coherent2 = ccb_tmp;
+ if (acb->adapter_type != ACB_ADAPTER_TYPE_F) {
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = ccb_tmp;
+ }
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_B:
acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2;
@@ -785,7 +844,6 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
struct scsi_device *psdev;
char diff, temp;
- acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -822,8 +880,12 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
break;
}
+ case ACB_ADAPTER_TYPE_F: {
+ signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]);
+ devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]);
+ break;
+ }
}
- atomic_inc(&acb->rq_map_token);
if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
return;
for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
@@ -854,6 +916,7 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
devicemap++;
acb_dev_map++;
}
+ acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
}
static int
@@ -906,8 +969,6 @@ out_free_irq:
static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
{
INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&pacb->rq_map_token, 16);
- atomic_set(&pacb->ante_token_value, 16);
pacb->fw_flag = FW_NORMAL;
timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
@@ -1009,7 +1070,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if(!error){
goto free_hbb_mu;
}
- arcmsr_free_io_queue(acb);
+ if (acb->adapter_type != ACB_ADAPTER_TYPE_F)
+ arcmsr_free_io_queue(acb);
error = arcmsr_alloc_ccb_pool(acb);
if(error){
goto unmap_pci_region;
@@ -1122,6 +1184,14 @@ static int arcmsr_resume(struct pci_dev *pdev)
acb->out_doorbell = 0;
acb->doneq_index = 0;
break;
+ case ACB_ADAPTER_TYPE_F:
+ writel(0, &acb->pmuF->host_int_status);
+ writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell);
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ acb->doneq_index = 0;
+ arcmsr_hbaF_assign_regAddr(acb);
+ break;
}
arcmsr_iop_init(acb);
arcmsr_init_get_devmap_timer(acb);
@@ -1134,6 +1204,8 @@ controller_stop:
controller_unregister:
scsi_remove_host(host);
arcmsr_free_ccb_pool(acb);
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
+ arcmsr_free_io_queue(acb);
arcmsr_unmap_pciregion(acb);
pci_release_regions(pdev);
scsi_host_put(host);
@@ -1213,25 +1285,20 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
{
uint8_t rtnval = 0;
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A: {
+ case ACB_ADAPTER_TYPE_A:
rtnval = arcmsr_hbaA_abort_allcmd(acb);
- }
break;
-
- case ACB_ADAPTER_TYPE_B: {
+ case ACB_ADAPTER_TYPE_B:
rtnval = arcmsr_hbaB_abort_allcmd(acb);
- }
break;
-
- case ACB_ADAPTER_TYPE_C: {
+ case ACB_ADAPTER_TYPE_C:
rtnval = arcmsr_hbaC_abort_allcmd(acb);
- }
break;
-
case ACB_ADAPTER_TYPE_D:
rtnval = arcmsr_hbaD_abort_allcmd(acb);
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
rtnval = arcmsr_hbaE_abort_allcmd(acb);
break;
}
@@ -1307,7 +1374,8 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
}
break;
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
orig_mask = readl(&reg->host_int_mask);
writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask);
@@ -1514,6 +1582,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_E:
arcmsr_hbaE_postqueue_isr(acb);
break;
+ case ACB_ADAPTER_TYPE_F:
+ arcmsr_hbaF_postqueue_isr(acb);
+ break;
}
}
@@ -1568,6 +1639,8 @@ static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
pdev = acb->pdev;
arcmsr_free_irq(pdev, acb);
arcmsr_free_ccb_pool(acb);
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
+ arcmsr_free_io_queue(acb);
arcmsr_unmap_pciregion(acb);
pci_release_regions(pdev);
scsi_host_put(host);
@@ -1625,6 +1698,8 @@ static void arcmsr_remove(struct pci_dev *pdev)
}
arcmsr_free_irq(pdev, acb);
arcmsr_free_ccb_pool(acb);
+ if (acb->adapter_type == ACB_ADAPTER_TYPE_F)
+ arcmsr_free_io_queue(acb);
arcmsr_unmap_pciregion(acb);
pci_release_regions(pdev);
scsi_host_put(host);
@@ -1702,7 +1777,8 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
writel(intmask_org | mask, reg->pcief0_int_enable);
break;
}
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
@@ -1846,6 +1922,19 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
writel(ccb_post_stamp, &pmu->inbound_queueport_low);
break;
}
+ case ACB_ADAPTER_TYPE_F: {
+ struct MessageUnit_F __iomem *pmu = acb->pmuF;
+ u32 ccb_post_stamp, arc_cdb_size;
+
+ if (ccb->arc_cdb_size <= 0x300)
+ arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
+ else
+ arc_cdb_size = (((ccb->arc_cdb_size + 0xff) >> 8) + 2) << 1 | 1;
+ ccb_post_stamp = (ccb->smid | arc_cdb_size);
+ writel(0, &pmu->inbound_queueport_high);
+ writel(ccb_post_stamp, &pmu->inbound_queueport_low);
+ break;
+ }
}
}
@@ -1916,23 +2005,20 @@ static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A: {
+ case ACB_ADAPTER_TYPE_A:
arcmsr_hbaA_stop_bgrb(acb);
- }
break;
-
- case ACB_ADAPTER_TYPE_B: {
+ case ACB_ADAPTER_TYPE_B:
arcmsr_hbaB_stop_bgrb(acb);
- }
break;
- case ACB_ADAPTER_TYPE_C: {
+ case ACB_ADAPTER_TYPE_C:
arcmsr_hbaC_stop_bgrb(acb);
- }
break;
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_stop_bgrb(acb);
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
arcmsr_hbaE_stop_bgrb(acb);
break;
}
@@ -1951,7 +2037,6 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
}
break;
-
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
@@ -1969,7 +2054,8 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
writel(acb->out_doorbell, &reg->iobound_doorbell);
@@ -2015,7 +2101,8 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
writel(acb->out_doorbell, &reg->iobound_doorbell);
@@ -2034,7 +2121,6 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
}
break;
-
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
@@ -2055,6 +2141,10 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_F: {
+ qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer;
+ }
+ break;
}
return qbuffer;
}
@@ -2069,7 +2159,6 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
}
break;
-
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
@@ -2090,6 +2179,9 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_F:
+ pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer;
+ break;
}
return pqbuffer;
}
@@ -2504,6 +2596,36 @@ static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
spin_unlock_irqrestore(&acb->doneq_lock, flags);
}
+static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t doneq_index;
+ uint16_t cmdSMID;
+ int error;
+ struct MessageUnit_F __iomem *phbcmu;
+ struct CommandControlBlock *ccb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ phbcmu = acb->pmuF;
+ while (1) {
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ if (cmdSMID == 0xffff)
+ break;
+ ccb = acb->pccb_pool[cmdSMID];
+ error = (acb->pCompletionQ[doneq_index].cmdFlag &
+ ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_drain_donequeue(acb, ccb, error);
+ acb->pCompletionQ[doneq_index].cmdSMID = 0xffff;
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ }
+ acb->doneq_index = doneq_index;
+ writel(doneq_index, &phbcmu->reply_post_consumer_index);
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+}
+
/*
**********************************************************************************
** Handle a message interrupt
@@ -2694,21 +2816,46 @@ static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
return IRQ_HANDLED;
}
+static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t host_interrupt_status;
+ struct MessageUnit_F __iomem *phbcmu = pACB->pmuF;
+
+ host_interrupt_status = readl(&phbcmu->host_int_status) &
+ (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
+ if (!host_interrupt_status)
+ return IRQ_NONE;
+ do {
+ /* MU post queue interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR)
+ arcmsr_hbaF_postqueue_isr(pACB);
+
+ /* MU ioctl transfer doorbell interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)
+ arcmsr_hbaE_doorbell_isr(pACB);
+
+ host_interrupt_status = readl(&phbcmu->host_int_status);
+ } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
+ return IRQ_HANDLED;
+}
+
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
return arcmsr_hbaA_handle_isr(acb);
- break;
case ACB_ADAPTER_TYPE_B:
return arcmsr_hbaB_handle_isr(acb);
- break;
case ACB_ADAPTER_TYPE_C:
return arcmsr_hbaC_handle_isr(acb);
case ACB_ADAPTER_TYPE_D:
return arcmsr_hbaD_handle_isr(acb);
case ACB_ADAPTER_TYPE_E:
return arcmsr_hbaE_handle_isr(acb);
+ case ACB_ADAPTER_TYPE_F:
+ return arcmsr_hbaF_handle_isr(acb);
default:
return IRQ_NONE;
}
@@ -3257,6 +3404,31 @@ static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
return true;
}
+static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_F __iomem *reg = pACB->pmuF;
+ uint32_t intmask_org;
+
+ /* disable all outbound interrupt */
+ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
+ writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
+ /* wait firmware ready */
+ arcmsr_wait_firmware_ready(pACB);
+ /* post "get config" instruction */
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ /* wait message ready */
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n",
+ pACB->host->host_no);
+ return false;
+ }
+ arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer);
+ return true;
+}
+
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
{
bool rtn = false;
@@ -3277,6 +3449,9 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_E:
rtn = arcmsr_hbaE_get_config(acb);
break;
+ case ACB_ADAPTER_TYPE_F:
+ rtn = arcmsr_hbaF_get_config(acb);
+ break;
default:
break;
}
@@ -3634,23 +3809,20 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
int rtn = 0;
switch (acb->adapter_type) {
- case ACB_ADAPTER_TYPE_A: {
+ case ACB_ADAPTER_TYPE_A:
rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
- }
break;
-
- case ACB_ADAPTER_TYPE_B: {
+ case ACB_ADAPTER_TYPE_B:
rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
- }
break;
- case ACB_ADAPTER_TYPE_C: {
+ case ACB_ADAPTER_TYPE_C:
rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
- }
break;
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
break;
}
@@ -3731,6 +3903,16 @@ static void arcmsr_set_iop_datetime(struct timer_list *t)
writel(pacb->out_doorbell, &reg->iobound_doorbell);
break;
}
+ case ACB_ADAPTER_TYPE_F: {
+ struct MessageUnit_F __iomem *reg = pacb->pmuF;
+
+ pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0];
+ pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1];
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pacb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
}
if (sys_tz.tz_minuteswest)
next_time = ARCMSR_HOURS;
@@ -3756,6 +3938,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
dma_coherent_handle = acb->dma_coherent_handle2;
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
dma_coherent_handle = acb->dma_coherent_handle +
offsetof(struct CommandControlBlock, arcmsr_cdb);
break;
@@ -3873,11 +4056,8 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
writel(cdb_phyaddr, &reg->msgcode_rwbuffer[2]);
writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[3]);
writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]);
- dma_coherent_handle = acb->dma_coherent_handle2;
- cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff);
- cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
- writel(cdb_phyaddr, &reg->msgcode_rwbuffer[5]);
- writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[6]);
+ writel(lower_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[5]);
+ writel(upper_32_bits(acb->dma_coherent_handle2), &reg->msgcode_rwbuffer[6]);
writel(acb->ioqueue_size, &reg->msgcode_rwbuffer[7]);
writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
@@ -3889,6 +4069,27 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_F: {
+ struct MessageUnit_F __iomem *reg = acb->pmuF;
+
+ acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG;
+ acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886;
+ acb->msgcode_rwbuffer[2] = cdb_phyaddr;
+ acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32;
+ acb->msgcode_rwbuffer[4] = acb->ccbsize;
+ acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2);
+ acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2);
+ acb->msgcode_rwbuffer[7] = acb->completeQ_size;
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
+ pr_notice("arcmsr%d: 'set command Q window' timeout\n",
+ acb->host->host_no);
+ return 1;
+ }
+ }
+ break;
}
return 0;
}
@@ -3937,7 +4138,8 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
}
break;
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
do {
if (!(acb->acb_flags & ACB_F_IOP_INITED))
@@ -3952,24 +4154,10 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
static void arcmsr_request_device_map(struct timer_list *t)
{
struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
- (acb->acb_flags & ACB_F_BUS_RESET) ||
- (acb->acb_flags & ACB_F_ABORT)) {
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
+ if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) {
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
} else {
acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) ==
- atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value,
- atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies +
- msecs_to_jiffies(6 * HZ));
- return;
- }
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -3999,10 +4187,23 @@ static void arcmsr_request_device_map(struct timer_list *t)
writel(acb->out_doorbell, &reg->iobound_doorbell);
break;
}
+ case ACB_ADAPTER_TYPE_F: {
+ struct MessageUnit_F __iomem *reg = acb->pmuF;
+ uint32_t outMsg1 = readl(&reg->outbound_msgaddr1);
+
+ if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) ||
+ (outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE))
+ goto nxt6s;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
default:
return;
}
acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
+nxt6s:
mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
}
@@ -4084,6 +4285,7 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
arcmsr_hbaD_start_bgrb(acb);
break;
case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:
arcmsr_hbaE_start_bgrb(acb);
break;
}
@@ -4163,7 +4365,8 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
}
}
break;
- case ACB_ADAPTER_TYPE_E: {
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F: {
struct MessageUnit_E __iomem *reg = acb->pmuE;
uint32_t i, tmp;
@@ -4290,7 +4493,8 @@ static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
true : false;
}
break;
- case ACB_ADAPTER_TYPE_E:{
+ case ACB_ADAPTER_TYPE_E:
+ case ACB_ADAPTER_TYPE_F:{
struct MessageUnit_E __iomem *reg = acb->pmuE;
rtn = (readl(&reg->host_diagnostic_3xxx) &
ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
@@ -4389,8 +4593,6 @@ wait_reset_done:
goto wait_reset_done;
}
arcmsr_iop_init(acb);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
mod_timer(&acb->eternal_timer, jiffies +
msecs_to_jiffies(6 * HZ));
@@ -4399,8 +4601,6 @@ wait_reset_done:
pr_notice("arcmsr: scsi bus reset eh returns with success\n");
} else {
acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
mod_timer(&acb->eternal_timer, jiffies +
msecs_to_jiffies(6 * HZ));
@@ -4493,6 +4693,9 @@ static const char *arcmsr_info(struct Scsi_Host *host)
case PCI_DEVICE_ID_ARECA_1884:
type = "SAS/SATA";
break;
+ case PCI_DEVICE_ID_ARECA_1886:
+ type = "NVMe/SAS/SATA";
+ break;
default:
type = "unknown";
raid6 = 0;
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 29294f0ef8a9..9dcd912267e6 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -166,14 +166,15 @@ cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
- if (direction == DMA_OUT)
- map_dir = DMA_TO_DEVICE,
- dma_dir = DMA_MODE_WRITE,
+ if (direction == DMA_OUT) {
+ map_dir = DMA_TO_DEVICE;
+ dma_dir = DMA_MODE_WRITE;
alatch_dir = ALATCH_DMA_OUT;
- else
- map_dir = DMA_FROM_DEVICE,
- dma_dir = DMA_MODE_READ,
+ } else {
+ map_dir = DMA_FROM_DEVICE;
+ dma_dir = DMA_MODE_READ;
alatch_dir = ALATCH_DMA_IN;
+ }
dma_map_sg(dev, info->sg, bufs, map_dir);
@@ -326,10 +327,12 @@ cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
cumanascsi_2_terminator_ctl(host, 0);
else
ret = -EINVAL;
- } else
+ } else {
ret = -EINVAL;
- } else
+ }
+ } else {
ret = -EINVAL;
+ }
return ret;
}
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 591ae2a6dd74..5eb2415dda9d 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -165,12 +165,13 @@ eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
- if (direction == DMA_OUT)
- map_dir = DMA_TO_DEVICE,
+ if (direction == DMA_OUT) {
+ map_dir = DMA_TO_DEVICE;
dma_dir = DMA_MODE_WRITE;
- else
- map_dir = DMA_FROM_DEVICE,
+ } else {
+ map_dir = DMA_FROM_DEVICE;
dma_dir = DMA_MODE_READ;
+ }
dma_map_sg(dev, info->sg, bufs, map_dir);
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 7c9d361e91a9..78f33d57c3e8 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -120,7 +120,7 @@ static struct scsi_host_template oakscsi_template = {
static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
- int ret = -ENOMEM;
+ int ret;
ret = ecard_request_resources(ec);
if (ret)
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index d99ef014528e..9cc73da4e876 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -138,12 +138,13 @@ powertecscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
- if (direction == DMA_OUT)
- map_dir = DMA_TO_DEVICE,
+ if (direction == DMA_OUT) {
+ map_dir = DMA_TO_DEVICE;
dma_dir = DMA_MODE_WRITE;
- else
- map_dir = DMA_FROM_DEVICE,
+ } else {
+ map_dir = DMA_FROM_DEVICE;
dma_dir = DMA_MODE_READ;
+ }
dma_map_sg(dev, info->sg, bufs, map_dir);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 5c3513a4b450..202ba925c494 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3020,6 +3020,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
goto create_eq_error;
}
+ mem->dma = paddr;
mem->va = eq_vaddress;
ret = be_fill_queue(eq, phba->params.num_eq_entries,
sizeof(struct be_eq_entry), eq_vaddress);
@@ -3029,7 +3030,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
goto create_eq_error;
}
- mem->dma = paddr;
ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
BEISCSI_EQ_DELAY_DEF);
if (ret) {
@@ -3086,6 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
goto create_cq_error;
}
+ mem->dma = paddr;
ret = be_fill_queue(cq, phba->params.num_cq_entries,
sizeof(struct sol_cqe), cq_vaddress);
if (ret) {
@@ -3095,7 +3096,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
goto create_cq_error;
}
- mem->dma = paddr;
ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
false, 0);
if (ret) {
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index bc5d84f87d8f..440ef32be048 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -749,6 +749,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
if (bfad->pci_bar0_kva == NULL) {
printk(KERN_ERR "Fail to map bar0\n");
+ rc = -ENODEV;
goto out_release_region;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 5cdeeb3539fd..6890bbe04a8c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -50,7 +50,7 @@ struct workqueue_struct *bnx2fc_wq;
* Here the io threads are per cpu but the l2 thread is just one
*/
struct fcoe_percpu_s bnx2fc_global;
-DEFINE_SPINLOCK(bnx2fc_global_lock);
+static DEFINE_SPINLOCK(bnx2fc_global_lock);
static struct cnic_ulp_ops bnx2fc_cnic_cb;
static struct libfc_function_template bnx2fc_libfc_fcn_templ;
@@ -108,22 +108,22 @@ MODULE_PARM_DESC(debug_logging,
"\t\t0x10 - fcoe L2 fame related logs.\n"
"\t\t0xff - LOG all messages.");
-uint bnx2fc_devloss_tmo;
+static uint bnx2fc_devloss_tmo;
module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO);
MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports "
"attached via bnx2fc.");
-uint bnx2fc_max_luns = BNX2FC_MAX_LUN;
+static uint bnx2fc_max_luns = BNX2FC_MAX_LUN;
module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO);
MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default "
"0xffff.");
-uint bnx2fc_queue_depth;
+static uint bnx2fc_queue_depth;
module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO);
MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices "
"attached via bnx2fc.");
-uint bnx2fc_log_fka;
+static uint bnx2fc_log_fka;
module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is "
"initiating a FIP keep alive when debug logging is enabled.");
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 1aba5897ccb0..1a0dc18d6915 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -864,7 +864,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
if (!abts_io_req) {
- printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
+ printk(KERN_ERR PFX "abts: couldn't allocate cmd\n");
rc = FAILED;
goto abts_err;
}
@@ -957,7 +957,7 @@ int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
if (!seq_clnp_req) {
- printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
rc = -ENOMEM;
kfree(cb_arg);
goto cleanup_err;
@@ -1015,7 +1015,7 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
if (!cleanup_io_req) {
- printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
rc = -1;
goto cleanup_err;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 6018cdd17702..2b3f0c10478e 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -474,8 +474,6 @@ static int __init bnx2i_mod_init(void)
if (sq_size && !is_power_of_2(sq_size))
sq_size = roundup_pow_of_two(sq_size);
- mutex_init(&bnx2i_dev_lock);
-
bnx2i_scsi_xport_template =
iscsi_register_transport(&bnx2i_iscsi_transport);
if (!bnx2i_scsi_xport_template) {
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 7fa20609d5e7..e43c5413ce29 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
- ret = EINVAL;
+ ret = -EINVAL;
goto bye;
}
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 00cf33573136..55e74da2f3cb 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -933,14 +933,14 @@ csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
* abort for that I/O by the FW crossed each other.
* The FW returned FW_EINVAL. The original I/O would have
* returned with FW_SUCCESS or any other SCSI error.
- * 3. The FW couldnt sent the abort out on the wire, as there
+ * 3. The FW couldn't sent the abort out on the wire, as there
* was an I-T nexus loss (link down, remote device logged
* out etc). FW sent back an appropriate IT nexus loss status
* for the abort.
* 4. FW sent an abort, but abort timed out (remote device
* didnt respond). FW replied back with
* FW_SCSI_ABORT_TIMEDOUT.
- * 5. FW couldnt genuinely abort the request for some reason,
+ * 5. FW couldn't genuinely abort the request for some reason,
* and sent us an error.
*
* The first 3 scenarios are treated as succesful abort
@@ -1859,7 +1859,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
spin_unlock_irqrestore(&hw->lock, flags);
if (retval != 0) {
- csio_err(hw, "ioreq: %p couldnt be started, status:%d\n",
+ csio_err(hw, "ioreq: %p couldn't be started, status:%d\n",
ioreq, retval);
CSIO_INC_STATS(scsim, n_busy_error);
goto err_put_req;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 0e8621a6956d..f078b3c4e083 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -77,9 +77,9 @@ int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
{
struct cxgbi_ports_map *pmap = &cdev->pmap;
- pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
- sizeof(struct cxgbi_sock *),
- GFP_KERNEL);
+ pmap->port_csk = kvzalloc(array_size(max_conn,
+ sizeof(struct cxgbi_sock *)),
+ GFP_KERNEL | __GFP_NOWARN);
if (!pmap->port_csk) {
pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
return -ENOMEM;
@@ -124,7 +124,7 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
if (cdev->cdev2ppm)
cxgbi_ppm_release(cdev->cdev2ppm(cdev));
if (cdev->pmap.max_connect)
- cxgbi_free_big_mem(cdev->pmap.port_csk);
+ kvfree(cdev->pmap.port_csk);
kfree(cdev);
}
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index fc7255fefcd3..3687b5c0cf90 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -575,22 +575,6 @@ struct cxgbi_iso_info {
u32 buffer_offset;
};
-static inline void *cxgbi_alloc_big_mem(unsigned int size,
- gfp_t gfp)
-{
- void *p = kzalloc(size, gfp | __GFP_NOWARN);
-
- if (!p)
- p = vzalloc(size);
-
- return p;
-}
-
-static inline void cxgbi_free_big_mem(void *addr)
-{
- kvfree(addr);
-}
-
static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
{
if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET)
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 7018cd802569..e4e0d767b98e 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -15,7 +15,8 @@
#include <linux/pseudo_fs.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>
-
+#include <linux/interrupt.h>
+#include <asm/xive.h>
#include <misc/ocxl.h>
#include <uapi/misc/cxl.h>
@@ -180,7 +181,7 @@ static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
struct ocxl_hw_afu *afu = ctx->hw_afu;
struct device *dev = afu->dev;
struct ocxlflash_irqs *irq;
- void __iomem *vtrig;
+ struct xive_irq_data *xd;
u32 virq;
int rc = 0;
@@ -204,15 +205,15 @@ static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
goto err1;
}
- vtrig = ioremap(irq->ptrig, PAGE_SIZE);
- if (unlikely(!vtrig)) {
- dev_err(dev, "%s: Trigger page mapping failed\n", __func__);
- rc = -ENOMEM;
+ xd = irq_get_handler_data(virq);
+ if (unlikely(!xd)) {
+ dev_err(dev, "%s: Can't get interrupt data\n", __func__);
+ rc = -ENXIO;
goto err2;
}
irq->virq = virq;
- irq->vtrig = vtrig;
+ irq->vtrig = xd->trig_mmio;
out:
return rc;
err2:
@@ -259,8 +260,6 @@ static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
}
irq = &ctx->irqs[num];
- if (irq->vtrig)
- iounmap(irq->vtrig);
if (irq_find_mapping(NULL, irq->hwirq)) {
free_irq(irq->virq, cookie);
@@ -615,7 +614,6 @@ static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
struct ocxl_hw_afu *afu = ctx->hw_afu;
struct device *dev = afu->dev;
struct ocxlflash_irqs *irqs;
- u64 addr;
int rc = 0;
int hwirq;
int i;
@@ -640,7 +638,7 @@ static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
}
for (i = 0; i < num; i++) {
- rc = ocxl_link_irq_alloc(afu->link_token, &hwirq, &addr);
+ rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
if (unlikely(rc)) {
dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
__func__, rc);
@@ -648,7 +646,6 @@ static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
}
irqs[i].hwirq = hwirq;
- irqs[i].ptrig = addr;
}
ctx->irqs = irqs;
diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h
index fc6ad4f985de..f2fe88816bea 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.h
+++ b/drivers/scsi/cxlflash/ocxl_hw.h
@@ -13,7 +13,6 @@
struct ocxlflash_irqs {
int hwirq;
u32 virq;
- u64 ptrig;
void __iomem *vtrig;
};
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 37c6cc374079..fa16894d8758 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -902,7 +902,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
- if (dir == PCI_DMA_NONE || !nseg) {
+ if (dir == DMA_NONE || !nseg) {
dprintkdbg(DBG_0,
"build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
@@ -3135,7 +3135,7 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
struct scsi_cmnd *cmd = srb->cmd;
enum dma_data_direction dir = cmd->sc_data_direction;
- if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
+ if (scsi_sg_count(cmd) && dir != DMA_NONE) {
/* unmap DC395x SG list */
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
srb->sg_bus_addr, SEGMENTX_LEN);
@@ -3333,7 +3333,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
if (!ckc_only && (cmd->result & RES_DID) == 0
&& cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
- && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
+ && dir != DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
dcb->inquiry7 = ptr->Flags;
/*if( srb->cmd->cmnd[0] == INQUIRY && */
@@ -4504,14 +4504,8 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
/*seq_printf(m, "\n"); */
seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
- seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
- acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
- acb->dcb_map[6], acb->dcb_map[7]);
- seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n",
- acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
- acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
- acb->dcb_map[14], acb->dcb_map[15]);
+ seq_printf(m, "Map of attached LUNs: %8ph\n", &acb->dcb_map[0]);
+ seq_printf(m, " %8ph\n", &acb->dcb_map[8]);
seq_puts(m,
"Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
@@ -4727,30 +4721,7 @@ static struct pci_driver dc395x_driver = {
.probe = dc395x_init_one,
.remove = dc395x_remove_one,
};
-
-
-/**
- * dc395x_module_init - Module initialization function
- *
- * Used by both module and built-in driver to initialise this driver.
- **/
-static int __init dc395x_module_init(void)
-{
- return pci_register_driver(&dc395x_driver);
-}
-
-
-/**
- * dc395x_module_exit - Module cleanup function.
- **/
-static void __exit dc395x_module_exit(void)
-{
- pci_unregister_driver(&dc395x_driver);
-}
-
-
-module_init(dc395x_module_init);
-module_exit(dc395x_module_exit);
+module_pci_driver(dc395x_driver);
MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index f32da0ca529e..308bda2e9c00 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -658,8 +658,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
rcu_read_lock();
list_for_each_entry_rcu(h,
&tmp_pg->dh_list, node) {
- /* h->sdev should always be valid */
- BUG_ON(!h->sdev);
+ if (!h->sdev)
+ continue;
h->sdev->access_state = desc[0];
}
rcu_read_unlock();
@@ -705,7 +705,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
pg->expiry = 0;
rcu_read_lock();
list_for_each_entry_rcu(h, &pg->dh_list, node) {
- BUG_ON(!h->sdev);
+ if (!h->sdev)
+ continue;
h->sdev->access_state =
(pg->state & SCSI_ACCESS_STATE_MASK);
if (pg->pref)
@@ -1147,7 +1148,6 @@ static void alua_bus_detach(struct scsi_device *sdev)
spin_lock(&h->pg_lock);
pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
rcu_assign_pointer(h->pg, NULL);
- h->sdev = NULL;
spin_unlock(&h->pg_lock);
if (pg) {
spin_lock_irq(&pg->lock);
@@ -1156,6 +1156,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
kref_put(&pg->kref, release_port_group);
}
sdev->handler_data = NULL;
+ synchronize_rcu();
kfree(h);
}
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index f654ad8a3d69..4251212acbbe 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -408,9 +408,6 @@ static void adpt_inquiry(adpt_hba* pHba)
static int adpt_slave_configure(struct scsi_device * device)
{
struct Scsi_Host *host = device->host;
- adpt_hba* pHba;
-
- pHba = (adpt_hba *) host->hostdata[0];
if (host->can_queue && device->tagged_supported) {
scsi_change_queue_depth(device,
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index cc620f10eabc..08f4e43c7d9e 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -1548,11 +1548,10 @@ static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
a->firmware.orig_len = length;
- a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev,
- (size_t)length,
- (dma_addr_t *)&a->firmware.
- phys,
- GFP_KERNEL);
+ a->firmware.data = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)length,
+ (dma_addr_t *)&a->firmware.phys,
+ GFP_KERNEL);
if (!a->firmware.data) {
esas2r_debug("buffer alloc failed!");
@@ -1895,11 +1894,11 @@ int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
if (!a->vda_buffer) {
dma_addr_t dma_addr;
- a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev,
- (size_t)
- VDA_MAX_BUFFER_SIZE,
- &dma_addr,
- GFP_KERNEL);
+ a->vda_buffer = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)
+ VDA_MAX_BUFFER_SIZE,
+ &dma_addr,
+ GFP_KERNEL);
a->ppvda_buffer = dma_addr;
}
@@ -2064,11 +2063,10 @@ int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
re_allocate_buffer:
a->fs_api_buffer_size = length;
- a->fs_api_buffer = (u8 *)dma_alloc_coherent(
- &a->pcid->dev,
- (size_t)a->fs_api_buffer_size,
- (dma_addr_t *)&a->ppfs_api_buffer,
- GFP_KERNEL);
+ a->fs_api_buffer = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ (dma_addr_t *)&a->ppfs_api_buffer,
+ GFP_KERNEL);
}
}
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 2cb7a8c93a15..ffef2c8eddc6 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -1053,16 +1053,10 @@ EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
int __init fcoe_sysfs_setup(void)
{
- int error;
-
atomic_set(&ctlr_num, 0);
atomic_set(&fcf_num, 0);
- error = bus_register(&fcoe_bus_type);
- if (error)
- return error;
-
- return 0;
+ return bus_register(&fcoe_bus_type);
}
void __exit fcoe_sysfs_teardown(void)
diff --git a/drivers/scsi/fdomain_isa.c b/drivers/scsi/fdomain_isa.c
index f2da4fa382e8..e0cdcd2003d0 100644
--- a/drivers/scsi/fdomain_isa.c
+++ b/drivers/scsi/fdomain_isa.c
@@ -111,12 +111,11 @@ static int fdomain_isa_match(struct device *dev, unsigned int ndev)
base = readb(p + sig->base_offset) +
(readb(p + sig->base_offset + 1) << 8);
iounmap(p);
- if (base)
+ if (base) {
dev_info(dev, "BIOS at 0x%lx specifies I/O base 0x%x\n",
bios_base, base);
- else
+ } else { /* no I/O base in BIOS area */
dev_info(dev, "BIOS at 0x%lx\n", bios_base);
- if (!base) { /* no I/O base in BIOS area */
/* save BIOS signature for later use in port probing */
saved_sig = sig;
return 0;
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 13f7d88d6e57..6c049360f136 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -120,11 +120,11 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
len = 0;
trace_type = (u8 *)filp->private_data;
if (*trace_type == fc_trc_flag->fnic_trace)
- len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+ len = sprintf(buf, "%d\n", fnic_tracing_enabled);
else if (*trace_type == fc_trc_flag->fc_trace)
- len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled);
+ len = sprintf(buf, "%d\n", fnic_fc_tracing_enabled);
else if (*trace_type == fc_trc_flag->fc_clear)
- len = sprintf(buf, "%u\n", fnic_fc_trace_cleared);
+ len = sprintf(buf, "%d\n", fnic_fc_trace_cleared);
else
pr_err("fnic: Cannot read to any debugfs file\n");
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 673887e383cc..e3384afb7cbd 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -309,12 +309,10 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
struct fc_frame_header *fh = NULL;
struct fip_desc *desc;
struct fip_encaps *els;
- enum fip_desc_type els_dtype = 0;
u16 op;
u8 els_op;
u8 sub;
- size_t els_len = 0;
size_t rlen;
size_t dlen = 0;
@@ -346,10 +344,8 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
if (dlen < sizeof(*els) + sizeof(*fh) + 1)
return 0;
- els_len = dlen - sizeof(*els);
els = (struct fip_encaps *)desc;
fh = (struct fc_frame_header *)(els + 1);
- els_dtype = desc->fip_dtype;
if (!fh)
return 0;
@@ -376,7 +372,6 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct sk_buff *skb;
char *eth_fr;
- int fr_len;
struct fip_vlan *vlan;
u64 vlan_tov;
@@ -391,7 +386,6 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
if (!skb)
return;
- fr_len = sizeof(*vlan);
eth_fr = (char *)skb->data;
vlan = (struct fip_vlan *)eth_fr;
@@ -837,7 +831,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
struct sk_buff *skb;
struct fc_frame *fp;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- unsigned int eth_hdrs_stripped;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe = 0, fcoe_sof, fcoe_eof;
u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
@@ -867,7 +860,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
&ingress_port, &packet_error,
&fcoe_enc_error, &fcs_ok, &vlan_stripped,
&vlan);
- eth_hdrs_stripped = 1;
skb_trim(skb, fcp_bytes_written);
fr_sof(fp) = sof;
fr_eof(fp) = eof;
@@ -884,7 +876,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
&tcp_udp_csum_ok, &udp, &tcp,
&ipv4_csum_ok, &ipv6, &ipv4,
&ipv4_fragment, &fcs_ok);
- eth_hdrs_stripped = 0;
skb_trim(skb, bytes_written);
if (!fcs_ok) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 7910b573bacb..5f8a7ef8f6a8 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -443,7 +443,7 @@ static void fnic_notify_timer_start(struct fnic *fnic)
default:
/* Using intr for notification for INTx/MSI-X */
break;
- };
+ }
}
static int fnic_dev_wait(struct vnic_dev *vdev,
@@ -552,8 +552,7 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
{
- u16 old_vlan;
- old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+ vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
}
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 03b1805b106c..d1f7b84bbfe8 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1402,7 +1402,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
}
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
- goto cleanup_scsi_cmd;
+ continue;
}
CMD_SP(sc) = NULL;
@@ -1417,7 +1417,6 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
-cleanup_scsi_cmd:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c
index 9eab7e7caf38..7b18635df7e6 100644
--- a/drivers/scsi/fnic/vnic_wq_copy.c
+++ b/drivers/scsi/fnic/vnic_wq_copy.c
@@ -79,8 +79,6 @@ int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
unsigned int index, unsigned int desc_count,
unsigned int desc_size)
{
- int err;
-
wq->index = index;
wq->vdev = vdev;
wq->to_use_index = wq->to_clean_index = 0;
@@ -92,11 +90,7 @@ int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
vnic_wq_copy_disable(wq);
- err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
- if (err)
- return err;
-
- return 0;
+ return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
}
void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 7f150d52b4a6..5d801388680b 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -3007,7 +3007,6 @@ static char *async_cache_tab[] = {
static int gdth_async_event(gdth_ha_str *ha)
{
gdth_cmd_str *cmdp;
- int cmd_index;
cmdp= ha->pccb;
TRACE2(("gdth_async_event() ha %d serv %d\n",
@@ -3019,7 +3018,6 @@ static int gdth_async_event(gdth_ha_str *ha)
gdth_delay(0);
cmdp->Service = SCREENSERVICE;
cmdp->RequestBuffer = SCREEN_CMND;
- cmd_index = gdth_get_cmd_index(ha);
gdth_set_sema0(ha);
cmdp->OpCode = GDT_READ;
cmdp->BoardNode = LOCALBOARD;
@@ -3170,81 +3168,6 @@ static inline void gdth_timer_init(void)
}
#endif
-static void __init internal_setup(char *str,int *ints)
-{
- int i;
- char *cur_str, *argv;
-
- TRACE2(("internal_setup() str %s ints[0] %d\n",
- str ? str:"NULL", ints ? ints[0]:0));
-
- /* analyse string */
- argv = str;
- while (argv && (cur_str = strchr(argv, ':'))) {
- int val = 0, c = *++cur_str;
-
- if (c == 'n' || c == 'N')
- val = 0;
- else if (c == 'y' || c == 'Y')
- val = 1;
- else
- val = (int)simple_strtoul(cur_str, NULL, 0);
-
- if (!strncmp(argv, "disable:", 8))
- disable = val;
- else if (!strncmp(argv, "reserve_mode:", 13))
- reserve_mode = val;
- else if (!strncmp(argv, "reverse_scan:", 13))
- reverse_scan = val;
- else if (!strncmp(argv, "hdr_channel:", 12))
- hdr_channel = val;
- else if (!strncmp(argv, "max_ids:", 8))
- max_ids = val;
- else if (!strncmp(argv, "rescan:", 7))
- rescan = val;
- else if (!strncmp(argv, "shared_access:", 14))
- shared_access = val;
- else if (!strncmp(argv, "reserve_list:", 13)) {
- reserve_list[0] = val;
- for (i = 1; i < MAX_RES_ARGS; i++) {
- cur_str = strchr(cur_str, ',');
- if (!cur_str)
- break;
- if (!isdigit((int)*++cur_str)) {
- --cur_str;
- break;
- }
- reserve_list[i] =
- (int)simple_strtoul(cur_str, NULL, 0);
- }
- if (!cur_str)
- break;
- argv = ++cur_str;
- continue;
- }
-
- if ((argv = strchr(argv, ',')))
- ++argv;
- }
-}
-
-int __init option_setup(char *str)
-{
- int ints[MAXHA];
- char *cur = str;
- int i = 1;
-
- TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
-
- while (cur && isdigit(*cur) && i < MAXHA) {
- ints[i++] = simple_strtoul(cur, NULL, 0);
- if ((cur = strchr(cur, ',')) != NULL) cur++;
- }
-
- ints[0] = i - 1;
- internal_setup(cur, ints);
- return 1;
-}
static const char *gdth_ctr_name(gdth_ha_str *ha)
{
@@ -4319,5 +4242,81 @@ module_init(gdth_init);
module_exit(gdth_exit);
#ifndef MODULE
+static void __init internal_setup(char *str,int *ints)
+{
+ int i;
+ char *cur_str, *argv;
+
+ TRACE2(("internal_setup() str %s ints[0] %d\n",
+ str ? str:"NULL", ints ? ints[0]:0));
+
+ /* analyse string */
+ argv = str;
+ while (argv && (cur_str = strchr(argv, ':'))) {
+ int val = 0, c = *++cur_str;
+
+ if (c == 'n' || c == 'N')
+ val = 0;
+ else if (c == 'y' || c == 'Y')
+ val = 1;
+ else
+ val = (int)simple_strtoul(cur_str, NULL, 0);
+
+ if (!strncmp(argv, "disable:", 8))
+ disable = val;
+ else if (!strncmp(argv, "reserve_mode:", 13))
+ reserve_mode = val;
+ else if (!strncmp(argv, "reverse_scan:", 13))
+ reverse_scan = val;
+ else if (!strncmp(argv, "hdr_channel:", 12))
+ hdr_channel = val;
+ else if (!strncmp(argv, "max_ids:", 8))
+ max_ids = val;
+ else if (!strncmp(argv, "rescan:", 7))
+ rescan = val;
+ else if (!strncmp(argv, "shared_access:", 14))
+ shared_access = val;
+ else if (!strncmp(argv, "reserve_list:", 13)) {
+ reserve_list[0] = val;
+ for (i = 1; i < MAX_RES_ARGS; i++) {
+ cur_str = strchr(cur_str, ',');
+ if (!cur_str)
+ break;
+ if (!isdigit((int)*++cur_str)) {
+ --cur_str;
+ break;
+ }
+ reserve_list[i] =
+ (int)simple_strtoul(cur_str, NULL, 0);
+ }
+ if (!cur_str)
+ break;
+ argv = ++cur_str;
+ continue;
+ }
+
+ if ((argv = strchr(argv, ',')))
+ ++argv;
+ }
+}
+
+static int __init option_setup(char *str)
+{
+ int ints[MAXHA];
+ char *cur = str;
+ int i = 1;
+
+ TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
+
+ while (cur && isdigit(*cur) && i < MAXHA) {
+ ints[i++] = simple_strtoul(cur, NULL, 0);
+ if ((cur = strchr(cur, ',')) != NULL) cur++;
+ }
+
+ ints[0] = i - 1;
+ internal_setup(cur, ints);
+ return 1;
+}
+
__setup("gdth=", option_setup);
#endif
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index 13ed9073fc72..b8148b1733f8 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -15,5 +15,6 @@ config SCSI_HISI_SAS_PCI
tristate "HiSilicon SAS on PCI bus"
depends on SCSI_HISI_SAS
depends on PCI
+ depends on ACPI
help
This driver supports HiSilicon's SAS HBA based on PCI device
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2bdd64648ef0..a25cfc11c96d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -8,6 +8,8 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/dmapool.h>
@@ -19,6 +21,7 @@
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/timer.h>
@@ -32,6 +35,7 @@
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
+#define HISI_SAS_PM_BIT 2
#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS)
#define HISI_SAS_RESERVED_IPTT 96
#define HISI_SAS_UNRESERVED_IPTT \
@@ -273,6 +277,39 @@ enum hisi_sas_debugfs_cache_type {
HISI_SAS_IOST_CACHE,
};
+enum hisi_sas_debugfs_bist_ffe_cfg {
+ FFE_SAS_1_5_GBPS,
+ FFE_SAS_3_0_GBPS,
+ FFE_SAS_6_0_GBPS,
+ FFE_SAS_12_0_GBPS,
+ FFE_RESV,
+ FFE_SATA_1_5_GBPS,
+ FFE_SATA_3_0_GBPS,
+ FFE_SATA_6_0_GBPS,
+ FFE_CFG_MAX
+};
+
+enum hisi_sas_debugfs_bist_fixed_code {
+ FIXED_CODE,
+ FIXED_CODE_1,
+ FIXED_CODE_MAX
+};
+
+enum {
+ HISI_SAS_BIST_CODE_MODE_PRBS7,
+ HISI_SAS_BIST_CODE_MODE_PRBS23,
+ HISI_SAS_BIST_CODE_MODE_PRBS31,
+ HISI_SAS_BIST_CODE_MODE_JTPAT,
+ HISI_SAS_BIST_CODE_MODE_CJTPAT,
+ HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
+ HISI_SAS_BIST_CODE_MODE_TRAIN,
+ HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
+ HISI_SAS_BIST_CODE_MODE_HFTP,
+ HISI_SAS_BIST_CODE_MODE_MFTP,
+ HISI_SAS_BIST_CODE_MODE_LFTP,
+ HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
+};
+
struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
@@ -431,7 +468,6 @@ struct hisi_hba {
u32 intr_coal_count; /* Interrupt count to coalesce */
int cq_nvecs;
- unsigned int *reply_map;
/* bist */
enum sas_linkrate debugfs_bist_linkrate;
@@ -440,6 +476,8 @@ struct hisi_hba {
int debugfs_bist_mode;
u32 debugfs_bist_cnt;
int debugfs_bist_enable;
+ u32 debugfs_bist_ffe[HISI_SAS_MAX_PHYS][FFE_CFG_MAX];
+ u32 debugfs_bist_fixed_code[FIXED_CODE_MAX];
/* debugfs memories */
/* Put Global AXI and RAS Register into register array */
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index d9d21d23372e..c8dd8588f800 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -229,17 +229,18 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
task->lldd_task = NULL;
if (!sas_protocol_ata(task->task_proto)) {
- struct sas_ssp_task *ssp_task = &task->ssp_task;
- struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
-
if (slot->n_elem)
dma_unmap_sg(dev, task->scatter,
task->num_scatter,
task->data_dir);
- if (slot->n_elem_dif)
+ if (slot->n_elem_dif) {
+ struct sas_ssp_task *ssp_task = &task->ssp_task;
+ struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+
dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
scsi_prot_sg_count(scsi_cmnd),
task->data_dir);
+ }
}
}
@@ -334,7 +335,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
}
if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
- dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
+ dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
*n_elem);
rc = -EINVAL;
goto err_out_dma_unmap;
@@ -417,6 +418,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
struct device *dev = hisi_hba->dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
+ struct scsi_cmnd *scmd = NULL;
struct hisi_sas_dq *dq;
unsigned long flags;
int wr_q_index;
@@ -432,10 +434,23 @@ static int hisi_sas_task_prep(struct sas_task *task,
return -ECOMM;
}
- if (hisi_hba->reply_map) {
- int cpu = raw_smp_processor_id();
- unsigned int dq_index = hisi_hba->reply_map[cpu];
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(device)) {
+ qc = task->uldd_task;
+ scmd = qc->scsicmd;
+ } else {
+ scmd = task->uldd_task;
+ }
+ }
+
+ if (scmd && hisi_hba->shost->nr_hw_queues) {
+ unsigned int dq_index;
+ u32 blk_tag;
+ blk_tag = blk_mq_unique_tag(scmd->request);
+ dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
*dq_pointer = dq = &hisi_hba->dq[dq_index];
} else {
*dq_pointer = dq = sas_dev->dq;
@@ -464,21 +479,9 @@ static int hisi_sas_task_prep(struct sas_task *task,
if (hisi_hba->hw->slot_index_alloc)
rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
- else {
- struct scsi_cmnd *scsi_cmnd = NULL;
-
- if (task->uldd_task) {
- struct ata_queued_cmd *qc;
+ else
+ rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
- if (dev_is_sata(device)) {
- qc = task->uldd_task;
- scsi_cmnd = qc->scsicmd;
- } else {
- scsi_cmnd = task->uldd_task;
- }
- }
- rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
- }
if (rc < 0)
goto err_out_dif_dma_unmap;
@@ -618,6 +621,12 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
if (!phy->phy_attached)
return;
+ if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) &&
+ !sas_phy->suspended) {
+ dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no);
+ return;
+ }
+
sas_ha = &hisi_hba->sha;
sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
@@ -1429,7 +1438,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
} else {
hisi_sas_phy_down(hisi_hba, phy_no, 0);
}
-
}
}
@@ -1545,7 +1553,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
{
struct Scsi_Host *shost = hisi_hba->shost;
- u32 state;
/* Init and wait for PHYs to come up and all libsas event finished. */
hisi_hba->hw->phys_init(hisi_hba);
@@ -1560,8 +1567,7 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
- state = hisi_hba->hw->get_phys_state(hisi_hba);
- hisi_sas_rescan_topology(hisi_hba, state);
+ hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
}
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
@@ -3333,21 +3339,6 @@ enum {
HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
};
-enum {
- HISI_SAS_BIST_CODE_MODE_PRBS7 = 0,
- HISI_SAS_BIST_CODE_MODE_PRBS23,
- HISI_SAS_BIST_CODE_MODE_PRBS31,
- HISI_SAS_BIST_CODE_MODE_JTPAT,
- HISI_SAS_BIST_CODE_MODE_CJTPAT,
- HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
- HISI_SAS_BIST_CODE_MODE_TRAIN,
- HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
- HISI_SAS_BIST_CODE_MODE_HFTP,
- HISI_SAS_BIST_CODE_MODE_MFTP,
- HISI_SAS_BIST_CODE_MODE_LFTP,
- HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
-};
-
static const struct {
int value;
char *name;
@@ -3703,6 +3694,58 @@ static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
.owner = THIS_MODULE,
};
+static const struct {
+ char *name;
+} hisi_sas_debugfs_ffe_name[FFE_CFG_MAX] = {
+ { "SAS_1_5_GBPS" },
+ { "SAS_3_0_GBPS" },
+ { "SAS_6_0_GBPS" },
+ { "SAS_12_0_GBPS" },
+ { "FFE_RESV" },
+ { "SATA_1_5_GBPS" },
+ { "SATA_3_0_GBPS" },
+ { "SATA_6_0_GBPS" },
+};
+
+static ssize_t hisi_sas_debugfs_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ u32 *val = m->private;
+ int res;
+
+ res = kstrtouint_from_user(buf, count, 0, val);
+ if (res)
+ return res;
+
+ return count;
+}
+
+static int hisi_sas_debugfs_show(struct seq_file *s, void *p)
+{
+ u32 *val = s->private;
+
+ seq_printf(s, "0x%x\n", *val);
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_ops = {
+ .open = hisi_sas_debugfs_open,
+ .read = seq_read,
+ .write = hisi_sas_debugfs_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
@@ -3900,6 +3943,9 @@ static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
{
+ struct dentry *ports_dentry;
+ int phy_no;
+
hisi_hba->debugfs_bist_dentry =
debugfs_create_dir("bist", hisi_hba->debugfs_dir);
debugfs_create_file("link_rate", 0600,
@@ -3910,6 +3956,16 @@ static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
hisi_hba->debugfs_bist_dentry, hisi_hba,
&hisi_sas_debugfs_bist_code_mode_ops);
+ debugfs_create_file("fixed_code", 0600,
+ hisi_hba->debugfs_bist_dentry,
+ &hisi_hba->debugfs_bist_fixed_code[0],
+ &hisi_sas_debugfs_ops);
+
+ debugfs_create_file("fixed_code_1", 0600,
+ hisi_hba->debugfs_bist_dentry,
+ &hisi_hba->debugfs_bist_fixed_code[1],
+ &hisi_sas_debugfs_ops);
+
debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
hisi_hba, &hisi_sas_debugfs_bist_phy_ops);
@@ -3923,6 +3979,27 @@ static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
hisi_hba, &hisi_sas_debugfs_bist_enable_ops);
+ ports_dentry = debugfs_create_dir("port", hisi_hba->debugfs_bist_dentry);
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ struct dentry *port_dentry;
+ struct dentry *ffe_dentry;
+ char name[256];
+ int i;
+
+ snprintf(name, 256, "%d", phy_no);
+ port_dentry = debugfs_create_dir(name, ports_dentry);
+ ffe_dentry = debugfs_create_dir("ffe", port_dentry);
+ for (i = 0; i < FFE_CFG_MAX; i++) {
+ if (i == FFE_RESV)
+ continue;
+ debugfs_create_file(hisi_sas_debugfs_ffe_name[i].name,
+ 0600, ffe_dentry,
+ &hisi_hba->debugfs_bist_ffe[phy_no][i],
+ &hisi_sas_debugfs_ops);
+ }
+ }
+
hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 7922a9bb1b28..45e866cb9164 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -752,7 +752,7 @@ static int hw_init_v1_hw(struct hisi_hba *hisi_hba)
rc = reset_hw_v1_hw(hisi_hba);
if (rc) {
- dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+ dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
return rc;
}
@@ -1166,7 +1166,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
- dev_err(dev, "slot err: SATA/STP not supported");
+ dev_err(dev, "slot err: SATA/STP not supported\n");
}
break;
default:
@@ -1218,35 +1218,35 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
u32 info_reg = hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO);
if (info_reg & HGC_INVLD_DQE_INFO_DQ_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq IPTT err",
+ dev_err(dev, "slot complete: [%d:%d] has dq IPTT err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_TYPE_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq type err",
+ dev_err(dev, "slot complete: [%d:%d] has dq type err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_FORCE_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq force phy err",
+ dev_err(dev, "slot complete: [%d:%d] has dq force phy err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_PHY_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq phy id err",
+ dev_err(dev, "slot complete: [%d:%d] has dq phy id err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_ABORT_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq abort flag err",
+ dev_err(dev, "slot complete: [%d:%d] has dq abort flag err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_IPTT_OF_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq IPTT or ICT err",
+ dev_err(dev, "slot complete: [%d:%d] has dq IPTT or ICT err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_SSP_ERR_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq SSP frame type err",
+ dev_err(dev, "slot complete: [%d:%d] has dq SSP frame type err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_OFL_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq order frame len err",
+ dev_err(dev, "slot complete: [%d:%d] has dq order frame len err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
ts->stat = SAS_OPEN_REJECT;
@@ -1294,7 +1294,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- dev_err(dev, "slot complete: SATA/STP not supported");
+ dev_err(dev, "slot complete: SATA/STP not supported\n");
break;
default:
@@ -1417,7 +1417,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
if (!(irq_value & CHL_INT2_SL_RX_BC_ACK_MSK)) {
- dev_err(dev, "bcast: irq_value = %x not set enable bit",
+ dev_err(dev, "bcast: irq_value = %x not set enable bit\n",
irq_value);
res = IRQ_NONE;
goto end;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 043f47ba3600..b57177b52fac 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1202,7 +1202,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffe20fe);
hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
for (i = 0; i < hisi_hba->queue_count; i++)
- hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0);
hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
@@ -1382,7 +1382,7 @@ static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
rc = reset_hw_v2_hw(hisi_hba);
if (rc) {
- dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+ dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
return rc;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 60adf5c32143..7133ca859b5e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -191,8 +191,10 @@
#define PHY_CFG_PHY_RST_OFF 3
#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
-#define CFG_PROG_PHY_LINK_RATE_OFF 8
-#define CFG_PROG_PHY_LINK_RATE_MSK (0xf << CFG_PROG_PHY_LINK_RATE_OFF)
+#define CFG_PROG_PHY_LINK_RATE_OFF 0
+#define CFG_PROG_PHY_LINK_RATE_MSK (0xff << CFG_PROG_PHY_LINK_RATE_OFF)
+#define CFG_PROG_OOB_PHY_LINK_RATE_OFF 8
+#define CFG_PROG_OOB_PHY_LINK_RATE_MSK (0xf << CFG_PROG_OOB_PHY_LINK_RATE_OFF)
#define PHY_CTRL (PORT_BASE + 0x14)
#define PHY_CTRL_RESET_OFF 0
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
@@ -295,6 +297,7 @@
#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
#define COARSETUNE_TIME (PORT_BASE + 0x304)
+#define TXDEEMPH_G1 (PORT_BASE + 0x350)
#define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
#define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
#define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
@@ -565,7 +568,7 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
{
- int i;
+ int i, j;
/* Global registers init */
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
@@ -593,25 +596,24 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
for (i = 0; i < hisi_hba->queue_count; i++)
- hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0);
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
for (i = 0; i < hisi_hba->n_phy; i++) {
+ enum sas_linkrate max;
struct hisi_sas_phy *phy = &hisi_hba->phy[i];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- u32 prog_phy_link_rate = 0x800;
+ u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, i,
+ PROG_PHY_LINK_RATE);
+ prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK;
if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
- SAS_LINK_RATE_1_5_GBPS)) {
- prog_phy_link_rate = 0x855;
- } else {
- enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
-
- prog_phy_link_rate =
- hisi_sas_get_prog_phy_linkrate_mask(max) |
- 0x800;
- }
+ SAS_LINK_RATE_1_5_GBPS))
+ max = SAS_LINK_RATE_12_0_GBPS;
+ else
+ max = sas_phy->phy->maximum_linkrate;
+ prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
@@ -636,6 +638,13 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
+
+ /* get default FFE configuration for BIST */
+ for (j = 0; j < FFE_CFG_MAX; j++) {
+ u32 val = hisi_sas_phy_read32(hisi_hba, i,
+ TXDEEMPH_G1 + (j * 0x4));
+ hisi_hba->debugfs_bist_ffe[i][j] = val;
+ }
}
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -894,13 +903,14 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
+ struct acpi_device *acpi_dev;
union acpi_object *obj;
guid_t guid;
int rc;
rc = reset_hw_v3_hw(hisi_hba);
if (rc) {
- dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+ dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
return rc;
}
@@ -924,6 +934,9 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
else
ACPI_FREE(obj);
+ acpi_dev = ACPI_COMPANION(dev);
+ if (!acpi_device_power_manageable(acpi_dev))
+ dev_notice(dev, "neither _PS0 nor _PR0 is defined\n");
return 0;
}
@@ -1341,7 +1354,6 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
-
}
static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
@@ -1447,7 +1459,6 @@ static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
/* dw7 */
hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
-
}
static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -2362,68 +2373,36 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
return IRQ_WAKE_THREAD;
}
-static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
+static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
{
- const struct cpumask *mask;
- int queue, cpu;
+ int vectors;
+ int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct irq_affinity desc = {
+ .pre_vectors = BASE_VECTORS_V3_HW,
+ };
- for (queue = 0; queue < nvecs; queue++) {
- struct hisi_sas_cq *cq = &hisi_hba->cq[queue];
+ min_msi = MIN_AFFINE_VECTORS_V3_HW;
+ vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
+ min_msi, max_msi,
+ PCI_IRQ_MSI |
+ PCI_IRQ_AFFINITY,
+ &desc);
+ if (vectors < 0)
+ return -ENOENT;
- mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue +
- BASE_VECTORS_V3_HW);
- if (!mask)
- goto fallback;
- cq->irq_mask = mask;
- for_each_cpu(cpu, mask)
- hisi_hba->reply_map[cpu] = queue;
- }
- return;
-fallback:
- for_each_possible_cpu(cpu)
- hisi_hba->reply_map[cpu] = cpu % hisi_hba->queue_count;
- /* Don't clean all CQ masks */
+ hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
+ shost->nr_hw_queues = hisi_hba->cq_nvecs;
+
+ return 0;
}
static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
struct pci_dev *pdev = hisi_hba->pci_dev;
- int vectors, rc, i;
- int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
-
- if (auto_affine_msi_experimental) {
- struct irq_affinity desc = {
- .pre_vectors = BASE_VECTORS_V3_HW,
- };
-
- dev_info(dev, "Enable MSI auto-affinity\n");
-
- min_msi = MIN_AFFINE_VECTORS_V3_HW;
-
- hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids,
- sizeof(unsigned int),
- GFP_KERNEL);
- if (!hisi_hba->reply_map)
- return -ENOMEM;
- vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
- min_msi, max_msi,
- PCI_IRQ_MSI |
- PCI_IRQ_AFFINITY,
- &desc);
- if (vectors < 0)
- return -ENOENT;
- setup_reply_map_v3_hw(hisi_hba, vectors - BASE_VECTORS_V3_HW);
- } else {
- min_msi = max_msi;
- vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, min_msi,
- max_msi, PCI_IRQ_MSI);
- if (vectors < 0)
- return vectors;
- }
-
- hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
+ int rc, i;
rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
int_phy_up_down_bcast_v3_hw, 0,
@@ -2501,8 +2480,10 @@ static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
enum sas_linkrate max = r->maximum_linkrate;
- u32 prog_phy_link_rate = 0x800;
+ u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, phy_no,
+ PROG_PHY_LINK_RATE);
+ prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK;
prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
@@ -2516,10 +2497,11 @@ static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
synchronize_irq(pci_irq_vector(pdev, 1));
synchronize_irq(pci_irq_vector(pdev, 2));
synchronize_irq(pci_irq_vector(pdev, 11));
- for (i = 0; i < hisi_hba->queue_count; i++) {
+ for (i = 0; i < hisi_hba->queue_count; i++)
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
+
+ for (i = 0; i < hisi_hba->cq_nvecs; i++)
synchronize_irq(pci_irq_vector(pdev, i + 16));
- }
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
@@ -2744,6 +2726,33 @@ static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
}
static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
+static int slave_configure_v3_hw(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
+ struct domain_device *ddev = sdev_to_domain_dev(sdev);
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ struct device *dev = hisi_hba->dev;
+ int ret = sas_slave_configure(sdev);
+
+ if (ret)
+ return ret;
+ if (!dev_is_sata(ddev))
+ sas_change_queue_depth(sdev, 64);
+
+ if (sdev->type == TYPE_ENCLOSURE)
+ return 0;
+
+ if (!device_link_add(&sdev->sdev_gendev, dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)) {
+ if (pm_runtime_enabled(dev)) {
+ dev_info(dev, "add device link failed, disable runtime PM for the host\n");
+ pm_runtime_disable(dev);
+ }
+ }
+
+ return 0;
+}
+
static struct device_attribute *host_attrs_v3_hw[] = {
&dev_attr_phy_event_threshold,
&dev_attr_intr_conv_v3_hw,
@@ -2968,42 +2977,48 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba)
{
u32 reg_val;
- int phy_id = hisi_hba->debugfs_bist_phy_no;
+ int phy_no = hisi_hba->debugfs_bist_phy_no;
+ int i;
/* disable PHY */
- hisi_sas_phy_enable(hisi_hba, phy_id, 0);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 0);
+
+ /* update FFE */
+ for (i = 0; i < FFE_CFG_MAX; i++)
+ hisi_sas_phy_write32(hisi_hba, phy_no, TXDEEMPH_G1 + (i * 0x4),
+ hisi_hba->debugfs_bist_ffe[phy_no][i]);
/* disable ALOS */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG);
reg_val |= CFG_ALOS_CHK_DISABLE_MSK;
- hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val);
}
static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
{
u32 reg_val;
- int phy_id = hisi_hba->debugfs_bist_phy_no;
+ int phy_no = hisi_hba->debugfs_bist_phy_no;
/* disable loopback */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL);
reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK |
CFG_BIST_TEST_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val);
/* enable ALOS */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG);
reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK;
- hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val);
/* restore the linkrate */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, PROG_PHY_LINK_RATE);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
/* init OOB link rate as 1.5 Gbits */
- reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
- reg_val |= (0x8 << CFG_PROG_PHY_LINK_RATE_OFF);
- hisi_sas_phy_write32(hisi_hba, phy_id, PROG_PHY_LINK_RATE, reg_val);
+ reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
+ reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val);
/* enable PHY */
- hisi_sas_phy_enable(hisi_hba, phy_id, 1);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 1);
}
#define SAS_PHY_BIST_CODE_INIT 0x1
@@ -3012,66 +3027,90 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
{
u32 reg_val, mode_tmp;
u32 linkrate = hisi_hba->debugfs_bist_linkrate;
- u32 phy_id = hisi_hba->debugfs_bist_phy_no;
+ u32 phy_no = hisi_hba->debugfs_bist_phy_no;
+ u32 *ffe = hisi_hba->debugfs_bist_ffe[phy_no];
u32 code_mode = hisi_hba->debugfs_bist_code_mode;
u32 path_mode = hisi_hba->debugfs_bist_mode;
+ u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0];
struct device *dev = hisi_hba->dev;
- dev_info(dev, "BIST info:linkrate=%d phy_id=%d code_mode=%d path_mode=%d\n",
- linkrate, phy_id, code_mode, path_mode);
+ dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n",
+ phy_no, linkrate, code_mode, path_mode,
+ ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS],
+ ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS],
+ ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS],
+ ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE],
+ fix_code[FIXED_CODE_1]);
mode_tmp = path_mode ? 2 : 1;
if (enable) {
/* some preparations before bist test */
hisi_sas_bist_test_prep_v3_hw(hisi_hba);
/* set linkrate of bit test*/
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no,
PROG_PHY_LINK_RATE);
- reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
- reg_val |= (linkrate << CFG_PROG_PHY_LINK_RATE_OFF);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- PROG_PHY_LINK_RATE, reg_val);
+ reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
+ reg_val |= (linkrate << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+ reg_val);
/* set code mode of bit test */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no,
SAS_PHY_BIST_CTRL);
- reg_val &= ~(CFG_BIST_MODE_SEL_MSK |
- CFG_LOOP_TEST_MODE_MSK |
- CFG_RX_BIST_EN_MSK |
- CFG_TX_BIST_EN_MSK |
- CFG_BIST_TEST_MSK);
+ reg_val &= ~(CFG_BIST_MODE_SEL_MSK | CFG_LOOP_TEST_MODE_MSK |
+ CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK |
+ CFG_BIST_TEST_MSK);
reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) |
(mode_tmp << CFG_LOOP_TEST_MODE_OFF) |
CFG_BIST_TEST_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CTRL, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL,
+ reg_val);
/* set the bist init value */
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CODE,
- SAS_PHY_BIST_CODE_INIT);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CODE1,
- SAS_PHY_BIST_CODE1_INIT);
+ if (code_mode == HISI_SAS_BIST_CODE_MODE_FIXED_DATA) {
+ reg_val = hisi_hba->debugfs_bist_fixed_code[0];
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE, reg_val);
+
+ reg_val = hisi_hba->debugfs_bist_fixed_code[1];
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE1, reg_val);
+ } else {
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE,
+ SAS_PHY_BIST_CODE_INIT);
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE1,
+ SAS_PHY_BIST_CODE1_INIT);
+ }
mdelay(100);
reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CTRL, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL,
+ reg_val);
/* clear error bit */
mdelay(100);
- hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
+ hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT);
} else {
/* disable bist test and recover it */
hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba,
- phy_id, SAS_BIST_ERR_CNT);
+ phy_no, SAS_BIST_ERR_CNT);
hisi_sas_bist_test_restore_v3_hw(hisi_hba);
}
return 0;
}
+static int hisi_sas_map_queues(struct Scsi_Host *shost)
+{
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+
+ return blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
+ BASE_VECTORS_V3_HW);
+}
+
static struct scsi_host_template sht_v3_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
@@ -3079,9 +3118,10 @@ static struct scsi_host_template sht_v3_hw = {
.queuecommand = sas_queuecommand,
.dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
- .slave_configure = hisi_sas_slave_configure,
+ .slave_configure = slave_configure_v3_hw,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
+ .map_queues = hisi_sas_map_queues,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
.this_id = -1,
@@ -3098,6 +3138,7 @@ static struct scsi_host_template sht_v3_hw = {
.shost_attrs = host_attrs_v3_hw,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.host_reset = hisi_sas_host_reset,
+ .host_tagset = 1,
};
static const struct hisi_sas_hw hisi_sas_v3_hw = {
@@ -3269,6 +3310,10 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (hisi_sas_debugfs_enable)
hisi_sas_debugfs_init(hisi_hba);
+ rc = interrupt_preinit_v3_hw(hisi_hba);
+ if (rc)
+ goto err_out_ha;
+ dev_err(dev, "%d hw queues\n", shost->nr_hw_queues);
rc = scsi_add_host(shost, dev);
if (rc)
goto err_out_ha;
@@ -3283,6 +3328,17 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scsi_scan_host(shost);
+ /*
+ * For the situation that there are ATA disks connected with SAS
+ * controller, it additionally creates ata_port which will affect the
+ * child_count of hisi_hba->dev. Even if suspended all the disks,
+ * ata_port is still and the child_count of hisi_hba->dev is not 0.
+ * So use pm_suspend_ignore_children() to ignore the effect to
+ * hisi_hba->dev.
+ */
+ pm_suspend_ignore_children(dev, true);
+ pm_runtime_put_noidle(&pdev->dev);
+
return 0;
err_out_register_ha:
@@ -3322,6 +3378,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
+ pm_runtime_get_noresume(dev);
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
@@ -3376,8 +3433,9 @@ enum {
hip08,
};
-static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
+static int _suspend_v3_hw(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct device *dev = hisi_hba->dev;
@@ -3408,7 +3466,7 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
hisi_sas_init_mem(hisi_hba);
- device_state = pci_choose_state(pdev, state);
+ device_state = pci_choose_state(pdev, PMSG_SUSPEND);
dev_warn(dev, "entering operating state [D%d]\n",
device_state);
pci_save_state(pdev);
@@ -3421,8 +3479,9 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int hisi_sas_v3_resume(struct pci_dev *pdev)
+static int _resume_v3_hw(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = hisi_hba->shost;
@@ -3459,6 +3518,34 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
return 0;
}
+static int suspend_v3_hw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ int rc;
+
+ set_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
+
+ rc = _suspend_v3_hw(device);
+ if (rc)
+ clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
+
+ return rc;
+}
+
+static int resume_v3_hw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ int rc = _resume_v3_hw(device);
+
+ clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
+
+ return rc;
+}
+
static const struct pci_device_id sas_v3_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
{}
@@ -3470,14 +3557,29 @@ static const struct pci_error_handlers hisi_sas_err_handler = {
.reset_done = hisi_sas_reset_done_v3_hw,
};
+static int runtime_suspend_v3_hw(struct device *dev)
+{
+ return suspend_v3_hw(dev);
+}
+
+static int runtime_resume_v3_hw(struct device *dev)
+{
+ return resume_v3_hw(dev);
+}
+
+static const struct dev_pm_ops hisi_sas_v3_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(suspend_v3_hw, resume_v3_hw)
+ SET_RUNTIME_PM_OPS(runtime_suspend_v3_hw,
+ runtime_resume_v3_hw, NULL)
+};
+
static struct pci_driver sas_v3_pci_driver = {
.name = DRV_NAME,
.id_table = sas_v3_pci_table,
.probe = hisi_sas_v3_probe,
.remove = hisi_sas_v3_remove,
- .suspend = hisi_sas_v3_suspend,
- .resume = hisi_sas_v3_resume,
.err_handler = &hisi_sas_err_handler,
+ .driver.pm = &hisi_sas_v3_pm_ops,
};
module_pci_driver(sas_v3_pci_driver);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 37d1c5565d90..2f162603876f 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -421,6 +421,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->cmd_per_lun = sht->cmd_per_lun;
shost->unchecked_isa_dma = sht->unchecked_isa_dma;
shost->no_write_same = sht->no_write_same;
+ shost->host_tagset = sht->host_tagset;
if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler)
shost->eh_deadline = -1;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 48d5da59262b..8df70c92911d 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
@@ -8854,7 +8855,7 @@ reinit_after_soft_reset:
/* hook into SCSI subsystem */
rc = hpsa_scsi_add_host(h);
if (rc)
- goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
/* Monitor the controller for firmware lockups */
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
@@ -8869,6 +8870,8 @@ reinit_after_soft_reset:
HPSA_EVENT_MONITOR_INTERVAL);
return 0;
+clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ kfree(h->lastlogicals);
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
hpsa_free_performant_mode(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
@@ -9329,10 +9332,10 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
{
if (h->ioaccel_cmd_pool) {
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
- h->ioaccel_cmd_pool,
- h->ioaccel_cmd_pool_dhandle);
+ dma_free_coherent(&h->pdev->dev,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+ h->ioaccel_cmd_pool,
+ h->ioaccel_cmd_pool_dhandle);
h->ioaccel_cmd_pool = NULL;
h->ioaccel_cmd_pool_dhandle = 0;
}
@@ -9382,10 +9385,10 @@ static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
hpsa_free_ioaccel2_sg_chain_blocks(h);
if (h->ioaccel2_cmd_pool) {
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
- h->ioaccel2_cmd_pool,
- h->ioaccel2_cmd_pool_dhandle);
+ dma_free_coherent(&h->pdev->dev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ h->ioaccel2_cmd_pool,
+ h->ioaccel2_cmd_pool_dhandle);
h->ioaccel2_cmd_pool = NULL;
h->ioaccel2_cmd_pool_dhandle = 0;
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6b87d9815b35..99b0750850b2 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 7825cbfea4dc..46df2e3ff89b 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ea7c8930592d..070cf516b98f 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -134,6 +134,7 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
static void ibmvfc_npiv_logout(struct ibmvfc_host *);
static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
+static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
static const char *unknown_error = "unknown error";
@@ -431,7 +432,20 @@ static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
}
break;
case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
- if (action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
+ action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
tgt->action = action;
rc = 0;
}
@@ -441,16 +455,18 @@ static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
tgt->action = action;
rc = 0;
}
+ break;
case IBMVFC_TGT_ACTION_DELETED_RPORT:
break;
default:
- if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
- tgt->add_rport = 0;
tgt->action = action;
rc = 0;
break;
}
+ if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
+ tgt->add_rport = 0;
+
return rc;
}
@@ -548,7 +564,8 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
**/
static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
{
- if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
+ if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
+ vhost->state == IBMVFC_ACTIVE) {
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
@@ -2574,7 +2591,9 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
struct ibmvfc_host *vhost = shost_priv(shost);
struct fc_rport *dev_rport;
struct scsi_device *sdev;
- unsigned long rc;
+ struct ibmvfc_target *tgt;
+ unsigned long rc, flags;
+ unsigned int found;
ENTER;
shost_for_each_device(sdev, shost) {
@@ -2588,6 +2607,27 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
if (rc == FAILED)
ibmvfc_issue_fc_host_lip(shost);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ found = 0;
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->scsi_id == rport->port_id) {
+ found++;
+ break;
+ }
+ }
+
+ if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
+ /*
+ * If we get here, that means we previously attempted to send
+ * an implicit logout to the target but it failed, most likely
+ * due to I/O being pending, so we need to send it again
+ */
+ ibmvfc_del_tgt(tgt);
+ ibmvfc_reinit_host(vhost);
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
LEAVE;
}
@@ -3623,7 +3663,18 @@ static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
vhost->discovery_threads--;
ibmvfc_free_event(evt);
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+ /*
+ * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
+ * driver in which case we need to free up all the targets. If we are
+ * not unloading, we will still go through a hard reset to get out of
+ * offline state, so there is no need to track the old targets in that
+ * case.
+ */
+ if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -3662,6 +3713,94 @@ static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
}
/**
+ * ibmvfc_tgt_move_login_done - Completion handler for Move Login
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
+ u32 status = be16_to_cpu(rsp->common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
+ tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
+ tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
+ tgt->ids.port_id = tgt->scsi_id;
+ memcpy(&tgt->service_parms, &rsp->service_parms,
+ sizeof(tgt->service_parms));
+ memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
+ sizeof(tgt->service_parms_change));
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
+
+ tgt_log(tgt, level,
+ "Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
+ tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
+ status);
+ break;
+ }
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+
+/**
+ * ibmvfc_tgt_move_login - Initiate a move login for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_move_login *move;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ move = &evt->iu.move_login;
+ memset(move, 0, sizeof(*move));
+ move->common.version = cpu_to_be32(1);
+ move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
+ move->common.length = cpu_to_be16(sizeof(*move));
+
+ move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
+ move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
+ move->wwpn = cpu_to_be64(tgt->wwpn);
+ move->node_name = cpu_to_be64(tgt->ids.node_name);
+
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
+}
+
+/**
* ibmvfc_adisc_needs_plogi - Does device need PLOGI?
* @mad: ibmvfc passthru mad struct
* @tgt: ibmvfc target struct
@@ -3979,31 +4118,77 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
+static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
+ struct ibmvfc_discover_targets_entry *target)
{
+ struct ibmvfc_target *stgt = NULL;
+ struct ibmvfc_target *wtgt = NULL;
struct ibmvfc_target *tgt;
unsigned long flags;
+ u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
+ u64 wwpn = be64_to_cpu(target->wwpn);
+ /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->wwpn == wwpn) {
+ wtgt = tgt;
+ break;
+ }
+ }
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->scsi_id == scsi_id) {
- if (tgt->need_login)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ stgt = tgt;
+ break;
+ }
+ }
+
+ if (wtgt && !stgt) {
+ /*
+ * A WWPN target has moved and we still are tracking the old
+ * SCSI ID. The only way we should be able to get here is if
+ * we attempted to send an implicit logout for the old SCSI ID
+ * and it failed for some reason, such as there being I/O
+ * pending to the target. In this case, we will have already
+ * deleted the rport from the FC transport so we do a move
+ * login, which works even with I/O pending, as it will cancel
+ * any active commands.
+ */
+ if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
+ /*
+ * Do a move login here. The old target is no longer
+ * known to the transport layer We don't use the
+ * normal ibmvfc_set_tgt_action to set this, as we
+ * don't normally want to allow this state change.
+ */
+ wtgt->old_scsi_id = wtgt->scsi_id;
+ wtgt->scsi_id = scsi_id;
+ wtgt->action = IBMVFC_TGT_ACTION_INIT;
+ ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
goto unlock_out;
+ } else {
+ tgt_err(wtgt, "Unexpected target state: %d, %p\n",
+ wtgt->action, wtgt->rport);
}
+ } else if (stgt) {
+ if (tgt->need_login)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ goto unlock_out;
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
memset(tgt, 0, sizeof(*tgt));
tgt->scsi_id = scsi_id;
+ tgt->wwpn = wwpn;
tgt->vhost = vhost;
tgt->need_login = 1;
- tgt->cancel_key = vhost->task_set++;
timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
kref_init(&tgt->kref);
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
spin_lock_irqsave(vhost->host->host_lock, flags);
+ tgt->cancel_key = vhost->task_set++;
list_add_tail(&tgt->queue, &vhost->targets);
unlock_out:
@@ -4023,9 +4208,7 @@ static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
int i, rc;
for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
- rc = ibmvfc_alloc_target(vhost,
- be32_to_cpu(vhost->disc_buf->scsi_id[i]) &
- IBMVFC_DISC_TGT_SCSI_ID_MASK);
+ rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
return rc;
}
@@ -4085,6 +4268,7 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
+ mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
if (!ibmvfc_send_event(evt, vhost, default_timeout))
@@ -4420,6 +4604,13 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
+ } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
+ tgt->rport = NULL;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ fc_remote_port_delete(rport);
+ return;
} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return;
@@ -4543,6 +4734,15 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
+ } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
+ rport = tgt->rport;
+ tgt->rport = NULL;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ if (rport)
+ fc_remote_port_delete(rport);
+ return;
}
}
@@ -4775,7 +4975,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
goto free_sg_pool;
}
- vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
+ vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
&vhost->disc_buf_dma, GFP_KERNEL);
@@ -4928,6 +5128,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (IS_ERR(vhost->work_thread)) {
dev_err(dev, "Couldn't create kernel thread: %ld\n",
PTR_ERR(vhost->work_thread));
+ rc = PTR_ERR(vhost->work_thread);
goto free_host_mem;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 907889f1fa9d..34debccfb142 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -120,10 +120,14 @@ enum ibmvfc_mad_types {
IBMVFC_PORT_LOGIN = 0x0004,
IBMVFC_PROCESS_LOGIN = 0x0008,
IBMVFC_QUERY_TARGET = 0x0010,
+ IBMVFC_MOVE_LOGIN = 0x0020,
IBMVFC_IMPLICIT_LOGOUT = 0x0040,
IBMVFC_PASSTHRU = 0x0200,
IBMVFC_TMF_MAD = 0x0100,
IBMVFC_NPIV_LOGOUT = 0x0800,
+ IBMVFC_CHANNEL_ENQUIRY = 0x1000,
+ IBMVFC_CHANNEL_SETUP = 0x2000,
+ IBMVFC_CONNECTION_INFO = 0x4000,
};
struct ibmvfc_mad_common {
@@ -133,16 +137,16 @@ struct ibmvfc_mad_common {
__be16 status;
__be16 length;
__be64 tag;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_npiv_login_mad {
struct ibmvfc_mad_common common;
struct srp_direct_buf buffer;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_npiv_logout_mad {
struct ibmvfc_mad_common common;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
#define IBMVFC_MAX_NAME 256
@@ -162,13 +166,15 @@ struct ibmvfc_npiv_login {
__be32 max_cmds;
__be64 capabilities;
#define IBMVFC_CAN_MIGRATE 0x01
+#define IBMVFC_CAN_USE_CHANNELS 0x02
+#define IBMVFC_CAN_HANDLE_FPIN 0x04
__be64 node_name;
struct srp_direct_buf async;
u8 partition_name[IBMVFC_MAX_NAME];
u8 device_name[IBMVFC_MAX_NAME];
u8 drc_name[IBMVFC_MAX_NAME];
__be64 reserved2[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_common_svc_parms {
__be16 fcph_version;
@@ -177,7 +183,7 @@ struct ibmvfc_common_svc_parms {
__be16 bb_rcv_sz; /* upper nibble is BB_SC_N */
__be32 ratov;
__be32 edtov;
-}__attribute__((packed, aligned (4)));
+} __packed __aligned(4);
struct ibmvfc_service_parms {
struct ibmvfc_common_svc_parms common;
@@ -192,7 +198,8 @@ struct ibmvfc_service_parms {
__be32 ext_len;
__be32 reserved[30];
__be32 clk_sync_qos[2];
-}__attribute__((packed, aligned (4)));
+ __be32 reserved2;
+} __packed __aligned(4);
struct ibmvfc_npiv_login_resp {
__be32 version;
@@ -204,6 +211,7 @@ struct ibmvfc_npiv_login_resp {
__be64 capabilities;
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
#define IBMVFC_CAN_SUPPRESS_ABTS 0x10
+#define IBMVFC_CAN_SUPPORT_CHANNELS 0x20
__be32 max_cmds;
__be32 scsi_id_sz;
__be64 max_dma_len;
@@ -217,29 +225,32 @@ struct ibmvfc_npiv_login_resp {
u8 drc_name[IBMVFC_MAX_NAME];
struct ibmvfc_service_parms service_parms;
__be64 reserved2;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
union ibmvfc_npiv_login_data {
struct ibmvfc_npiv_login login;
struct ibmvfc_npiv_login_resp resp;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
-struct ibmvfc_discover_targets_buf {
- __be32 scsi_id[1];
+struct ibmvfc_discover_targets_entry {
+ __be32 scsi_id;
+ __be32 pad;
+ __be64 wwpn;
#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff
-};
+} __packed __aligned(8);
struct ibmvfc_discover_targets {
struct ibmvfc_mad_common common;
struct srp_direct_buf buffer;
__be32 flags;
+#define IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST 0x02
__be16 status;
__be16 error;
__be32 bufflen;
__be32 num_avail;
__be32 num_written;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_fc_reason {
IBMVFC_INVALID_ELS_CMD_CODE = 0x01,
@@ -283,7 +294,27 @@ struct ibmvfc_port_login {
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
__be64 reserved3[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
+
+struct ibmvfc_move_login {
+ struct ibmvfc_mad_common common;
+ __be64 old_scsi_id;
+ __be64 new_scsi_id;
+ __be64 wwpn;
+ __be64 node_name;
+ __be32 flags;
+#define IBMVFC_MOVE_LOGIN_IMPLICIT_OLD_FAILED 0x01
+#define IBMVFC_MOVE_LOGIN_IMPLICIT_NEW_FAILED 0x02
+#define IBMVFC_MOVE_LOGIN_PORT_LOGIN_FAILED 0x04
+ __be32 reserved;
+ struct ibmvfc_service_parms service_parms;
+ struct ibmvfc_service_parms service_parms_change;
+ __be32 reserved2;
+ __be16 service_class;
+ __be16 vios_flags;
+#define IBMVFC_MOVE_LOGIN_VF_NOT_SENT_ADAPTER 0x01
+ __be64 reserved3;
+} __packed __aligned(8);
struct ibmvfc_prli_svc_parms {
u8 type;
@@ -303,7 +334,7 @@ struct ibmvfc_prli_svc_parms {
#define IBMVFC_PRLI_TARGET_FUNC 0x00000010
#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002
#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED 0x00000001
-}__attribute__((packed, aligned (4)));
+} __packed __aligned(4);
struct ibmvfc_process_login {
struct ibmvfc_mad_common common;
@@ -314,7 +345,7 @@ struct ibmvfc_process_login {
__be16 error; /* also fc_reason */
__be32 reserved2;
__be64 reserved3[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_query_tgt {
struct ibmvfc_mad_common common;
@@ -325,13 +356,13 @@ struct ibmvfc_query_tgt {
__be16 fc_explain;
__be16 fc_type;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_implicit_logout {
struct ibmvfc_mad_common common;
__be64 old_scsi_id;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_tmf {
struct ibmvfc_mad_common common;
@@ -348,7 +379,7 @@ struct ibmvfc_tmf {
__be32 my_cancel_key;
__be32 pad;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_fcp_rsp_info_codes {
RSP_NO_FAILURE = 0x00,
@@ -361,7 +392,7 @@ struct ibmvfc_fcp_rsp_info {
u8 reserved[3];
u8 rsp_code;
u8 reserved2[4];
-}__attribute__((packed, aligned (2)));
+} __packed __aligned(2);
enum ibmvfc_fcp_rsp_flags {
FCP_BIDI_RSP = 0x80,
@@ -377,7 +408,7 @@ enum ibmvfc_fcp_rsp_flags {
union ibmvfc_fcp_rsp_data {
struct ibmvfc_fcp_rsp_info info;
u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_fcp_rsp {
__be64 reserved;
@@ -388,7 +419,7 @@ struct ibmvfc_fcp_rsp {
__be32 fcp_sense_len;
__be32 fcp_rsp_len;
union ibmvfc_fcp_rsp_data data;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_cmd_flags {
IBMVFC_SCATTERLIST = 0x0001,
@@ -422,7 +453,7 @@ struct ibmvfc_fcp_cmd_iu {
#define IBMVFC_WRDATA 0x01
u8 cdb[IBMVFC_MAX_CDB_LEN];
__be32 xfer_len;
-}__attribute__((packed, aligned (4)));
+} __packed __aligned(4);
struct ibmvfc_cmd {
__be64 task_tag;
@@ -446,7 +477,7 @@ struct ibmvfc_cmd {
__be64 reserved3[2];
struct ibmvfc_fcp_cmd_iu iu;
struct ibmvfc_fcp_rsp rsp;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_passthru_fc_iu {
__be32 payload[7];
@@ -473,18 +504,64 @@ struct ibmvfc_passthru_iu {
__be64 scsi_id;
__be64 tag;
__be64 reserved2[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_passthru_mad {
struct ibmvfc_mad_common common;
struct srp_direct_buf cmd_ioba;
struct ibmvfc_passthru_iu iu;
struct ibmvfc_passthru_fc_iu fc_iu;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
+
+struct ibmvfc_channel_enquiry {
+ struct ibmvfc_mad_common common;
+ __be32 flags;
+#define IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT 0x01
+#define IBMVFC_SUPPORT_VARIABLE_SUBQ_MSG 0x02
+#define IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT 0x04
+ __be32 num_scsi_subq_channels;
+ __be32 num_nvmeof_subq_channels;
+ __be32 num_scsi_vas_channels;
+ __be32 num_nvmeof_vas_channels;
+} __packed __aligned(8);
+
+struct ibmvfc_channel_setup_mad {
+ struct ibmvfc_mad_common common;
+ struct srp_direct_buf buffer;
+} __packed __aligned(8);
+
+#define IBMVFC_MAX_CHANNELS 502
+
+struct ibmvfc_channel_setup {
+ __be32 flags;
+#define IBMVFC_CANCEL_CHANNELS 0x01
+#define IBMVFC_USE_BUFFER 0x02
+#define IBMVFC_CHANNELS_CANCELED 0x04
+ __be32 reserved;
+ __be32 num_scsi_subq_channels;
+ __be32 num_nvmeof_subq_channels;
+ __be32 num_scsi_vas_channels;
+ __be32 num_nvmeof_vas_channels;
+ struct srp_direct_buf buffer;
+ __be64 reserved2[5];
+ __be64 channel_handles[IBMVFC_MAX_CHANNELS];
+} __packed __aligned(8);
+
+struct ibmvfc_connection_info {
+ struct ibmvfc_mad_common common;
+ __be64 information_bits;
+#define IBMVFC_NO_FC_IO_CHANNEL 0x01
+#define IBMVFC_NO_PHYP_VAS 0x02
+#define IBMVFC_NO_PHYP_SUBQ 0x04
+#define IBMVFC_PHYP_DEPRECATED_SUBQ 0x08
+#define IBMVFC_PHYP_PRESERVED_SUBQ 0x10
+#define IBMVFC_PHYP_FULL_SUBQ 0x20
+ __be64 reserved[16];
+} __packed __aligned(8);
struct ibmvfc_trace_start_entry {
u32 xfer_len;
-}__attribute__((packed));
+} __packed;
struct ibmvfc_trace_end_entry {
u16 status;
@@ -493,7 +570,7 @@ struct ibmvfc_trace_end_entry {
u8 rsp_code;
u8 scsi_status;
u8 reserved;
-}__attribute__((packed));
+} __packed;
struct ibmvfc_trace_entry {
struct ibmvfc_event *evt;
@@ -510,7 +587,7 @@ struct ibmvfc_trace_entry {
struct ibmvfc_trace_start_entry start;
struct ibmvfc_trace_end_entry end;
} u;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_crq_formats {
IBMVFC_CMD_FORMAT = 0x01,
@@ -532,6 +609,7 @@ enum ibmvfc_async_event {
IBMVFC_AE_HALT = 0x0400,
IBMVFC_AE_RESUME = 0x0800,
IBMVFC_AE_ADAPTER_FAILED = 0x1000,
+ IBMVFC_AE_FPIN = 0x2000,
};
struct ibmvfc_async_desc {
@@ -545,7 +623,7 @@ struct ibmvfc_crq {
volatile u8 format;
u8 reserved[6];
volatile __be64 ioba;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_crq_queue {
struct ibmvfc_crq *msgs;
@@ -560,17 +638,25 @@ enum ibmvfc_ae_link_state {
IBMVFC_AE_LS_LINK_DEAD = 0x08,
};
+enum ibmvfc_ae_fpin_status {
+ IBMVFC_AE_FPIN_LINK_CONGESTED = 0x1,
+ IBMVFC_AE_FPIN_PORT_CONGESTED = 0x2,
+ IBMVFC_AE_FPIN_PORT_CLEARED = 0x3,
+ IBMVFC_AE_FPIN_PORT_DEGRADED = 0x4,
+};
+
struct ibmvfc_async_crq {
volatile u8 valid;
u8 link_state;
- u8 pad[2];
+ u8 fpin_status;
+ u8 pad;
__be32 pad2;
volatile __be64 event;
volatile __be64 scsi_id;
volatile __be64 wwpn;
volatile __be64 node_name;
__be64 reserved;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_async_crq_queue {
struct ibmvfc_async_crq *msgs;
@@ -585,12 +671,16 @@ union ibmvfc_iu {
struct ibmvfc_discover_targets discover_targets;
struct ibmvfc_port_login plogi;
struct ibmvfc_process_login prli;
+ struct ibmvfc_move_login move_login;
struct ibmvfc_query_tgt query_tgt;
struct ibmvfc_implicit_logout implicit_logout;
struct ibmvfc_tmf tmf;
struct ibmvfc_cmd cmd;
struct ibmvfc_passthru_mad passthru;
-}__attribute__((packed, aligned (8)));
+ struct ibmvfc_channel_enquiry channel_enquiry;
+ struct ibmvfc_channel_setup_mad channel_setup;
+ struct ibmvfc_connection_info connection_info;
+} __packed __aligned(8);
enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_NONE = 0,
@@ -600,12 +690,16 @@ enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT,
IBMVFC_TGT_ACTION_DEL_RPORT,
IBMVFC_TGT_ACTION_DELETED_RPORT,
+ IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT,
+ IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT,
};
struct ibmvfc_target {
struct list_head queue;
struct ibmvfc_host *vhost;
u64 scsi_id;
+ u64 wwpn;
+ u64 old_scsi_id;
struct fc_rport *rport;
int target_id;
enum ibmvfc_target_action action;
@@ -701,7 +795,7 @@ struct ibmvfc_host {
dma_addr_t login_buf_dma;
int disc_buf_sz;
int log_level;
- struct ibmvfc_discover_targets_buf *disc_buf;
+ struct ibmvfc_discover_targets_entry *disc_buf;
struct mutex passthru_mutex;
int task_set;
int init_retries;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index b1f3017b6547..29fcc44be2d5 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -807,13 +807,29 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
}
/**
+ * ibmvscsi_set_request_limit - Set the adapter request_limit in response to
+ * an adapter failure, reset, or SRP Login. Done under host lock to prevent
+ * race with SCSI command submission.
+ * @hostdata: adapter to adjust
+ * @limit: new request limit
+ */
+static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ atomic_set(&hostdata->request_limit, limit);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+}
+
+/**
* ibmvscsi_reset_host - Reset the connection to the server
* @hostdata: struct ibmvscsi_host_data to reset
*/
static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
{
scsi_block_requests(hostdata->host);
- atomic_set(&hostdata->request_limit, 0);
+ ibmvscsi_set_request_limit(hostdata, 0);
purge_requests(hostdata, DID_ERROR);
hostdata->action = IBMVSCSI_HOST_ACTION_RESET;
@@ -1146,13 +1162,13 @@ static void login_rsp(struct srp_event_struct *evt_struct)
dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
evt_struct->xfer_iu->srp.login_rej.reason);
/* Login failed. */
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
return;
default:
dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
evt_struct->xfer_iu->srp.login_rsp.opcode);
/* Login failed. */
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
return;
}
@@ -1163,7 +1179,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
* This value is set rather than added to request_limit because
* request_limit could have been set to -1 by this client.
*/
- atomic_set(&hostdata->request_limit,
+ ibmvscsi_set_request_limit(hostdata,
be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
/* If we had any pending I/Os, kick them */
@@ -1195,13 +1211,13 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
- spin_lock_irqsave(hostdata->host->host_lock, flags);
/* Start out with a request limit of 0, since this is negotiated in
* the login request we are just sending and login requests always
* get sent by the driver regardless of request_limit.
*/
- atomic_set(&hostdata->request_limit, 0);
+ ibmvscsi_set_request_limit(hostdata, 0);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
dev_info(hostdata->dev, "sent SRP login\n");
@@ -1781,7 +1797,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
return;
case VIOSRP_CRQ_XPORT_EVENT: /* Hypervisor telling us the connection is closed */
scsi_block_requests(hostdata->host);
- atomic_set(&hostdata->request_limit, 0);
+ ibmvscsi_set_request_limit(hostdata, 0);
if (crq->format == 0x06) {
/* We need to re-setup the interpartition connection */
dev_info(hostdata->dev, "Re-enabling adapter!\n");
@@ -2137,12 +2153,12 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
}
hostdata->action = IBMVSCSI_HOST_ACTION_NONE;
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rc) {
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
dev_err(hostdata->dev, "error after %s\n", action);
}
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
scsi_unblock_requests(hostdata->host);
}
@@ -2226,7 +2242,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
init_waitqueue_head(&hostdata->work_wait_q);
hostdata->host = host;
hostdata->dev = dev;
- atomic_set(&hostdata->request_limit, -1);
+ ibmvscsi_set_request_limit(hostdata, -1);
hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
if (map_persist_bufs(hostdata)) {
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 1d39628ac947..ca16ef45d8dc 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2962,20 +2962,8 @@ static struct pci_driver initio_pci_driver = {
.probe = initio_probe_one,
.remove = initio_remove_one,
};
-
-static int __init initio_init_driver(void)
-{
- return pci_register_driver(&initio_pci_driver);
-}
-
-static void __exit initio_exit_driver(void)
-{
- pci_unregister_driver(&initio_pci_driver);
-}
+module_pci_driver(initio_pci_driver);
MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver");
MODULE_AUTHOR("Initio Corporation");
MODULE_LICENSE("GPL");
-
-module_init(initio_init_driver);
-module_exit(initio_exit_driver);
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 7b5deae68d33..7ebfa3c8cdc7 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -2671,7 +2671,6 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
struct isci_request *ireq)
{
enum sci_status status;
- u16 index;
switch (ihost->sm.current_state_id) {
case SCIC_STOPPING:
@@ -2682,7 +2681,6 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
if (status != SCI_SUCCESS)
return status;
- index = ISCI_TAG_TCI(ireq->io_tag);
clear_bit(IREQ_ACTIVE, &ireq->flags);
return SCI_SUCCESS;
default:
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 085e285f427d..93bc9019667f 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -142,7 +142,7 @@ static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, c
static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
-struct device_attribute *isci_host_attrs[] = {
+static struct device_attribute *isci_host_attrs[] = {
&dev_attr_isci_id,
NULL
};
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 4cacb800b530..7041e2e3ab48 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -669,7 +669,7 @@ static const char *phy_event_name(u32 event_code)
phy_state_name(state), phy_event_name(code), code)
-void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
+static void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
{
u32 val;
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
index 721ab982d2ac..0ddfdda2b248 100644
--- a/drivers/scsi/isci/remote_node_table.h
+++ b/drivers/scsi/isci/remote_node_table.h
@@ -61,7 +61,7 @@
/**
*
*
- * Remote node sets are sets of remote node index in the remtoe node table The
+ * Remote node sets are sets of remote node index in the remote node table. The
* SCU hardware requires that STP remote node entries take three consecutive
* remote node index so the table is arranged in sets of three. The bits are
* used as 0111 0111 to make a byte and the bits define the set of three remote
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index d10efb66cf19..df47557a02a3 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -970,8 +970,8 @@ static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
struct iscsi_conn *conn = session->leadconn;
if (conn->datadgst_en)
- sdev->request_queue->backing_dev_info->capabilities
- |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
+ sdev->request_queue);
blk_queue_dma_alignment(sdev->request_queue, 0);
return 0;
}
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 7f683e42c798..f0ed6863cc70 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -201,21 +201,9 @@ static struct platform_driver esp_jazz_driver = {
.name = "jazz_esp",
},
};
-
-static int __init jazz_esp_init(void)
-{
- return platform_driver_register(&esp_jazz_driver);
-}
-
-static void __exit jazz_esp_exit(void)
-{
- platform_driver_unregister(&esp_jazz_driver);
-}
+module_platform_driver(esp_jazz_driver);
MODULE_DESCRIPTION("JAZZ ESP SCSI driver");
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(jazz_esp_init);
-module_exit(jazz_esp_exit);
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index e67abb184a8a..942fc60f7c21 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -301,8 +301,8 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
struct fc_lport *lport = fc_disc_lport(disc);
unsigned long delay = 0;
- FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
- PTR_ERR(fp), disc->retry_count,
+ FC_DISC_DBG(disc, "Error %d, retries %d/%d\n",
+ PTR_ERR_OR_ZERO(fp), disc->retry_count,
FC_DISC_RETRY_LIMIT);
if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index a4887985aad6..024e5a550759 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -726,19 +726,13 @@ void sas_resume_sata(struct asd_sas_port *port)
*/
int sas_discover_sata(struct domain_device *dev)
{
- int res;
-
if (dev->dev_type == SAS_SATA_PM)
return -ENODEV;
dev->sata_dev.class = sas_get_ata_command_set(dev);
sas_fill_in_rphy(dev, dev->rphy);
- res = sas_notify_lldd_dev_found(dev);
- if (res)
- return res;
-
- return 0;
+ return sas_notify_lldd_dev_found(dev);
}
static void async_sas_ata_eh(void *data, async_cookie_t cookie)
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d0f9e90e3279..161c9b387da7 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -278,13 +278,7 @@ static void sas_resume_devices(struct work_struct *work)
*/
int sas_discover_end_dev(struct domain_device *dev)
{
- int res;
-
- res = sas_notify_lldd_dev_found(dev);
- if (res)
- return res;
-
- return 0;
+ return sas_notify_lldd_dev_found(dev);
}
/* ---------- Device registration and unregistration ---------- */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ece6c250ebaf..e94eac194676 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5338,8 +5338,7 @@ static ssize_t
lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int status = -EINVAL;
- return status;
+ return -EINVAL;
}
/*
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d0141a23a833..a8bf4d0d58f0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -387,6 +387,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
if (rc == IOCB_ERROR) {
+ geniocb->context_un.ndlp = NULL;
+ lpfc_nlp_put(ndlp);
lpfc_sli_release_iocbq(phba, geniocb);
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index ae0a8252128c..c9a327b13e5c 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1696,7 +1696,6 @@ static int
lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli4_hdw_queue *qp;
struct lpfc_hdwq_stat *c_stat;
int i, j, len;
uint32_t tot_xmt;
@@ -1726,8 +1725,6 @@ lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size)
goto buffer_done;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- qp = &phba->sli4_hba.hdwq[i];
-
tot_rcv = 0;
tot_xmt = 0;
tot_cmpl = 0;
@@ -5944,7 +5941,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_lockstat);
if (!phba->debug_lockstat) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "4610 Cant create debugfs lockstat\n");
+ "4610 Can't create debugfs lockstat\n");
goto debug_failed;
}
#endif
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 1c78bc10c790..6d23ab5aee56 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -439,22 +439,10 @@ static struct platform_driver esp_mac_driver = {
.name = DRV_MODULE_NAME,
},
};
-
-static int __init mac_esp_init(void)
-{
- return platform_driver_register(&esp_mac_driver);
-}
-
-static void __exit mac_esp_exit(void)
-{
- platform_driver_unregister(&esp_mac_driver);
-}
+module_platform_driver(esp_mac_driver);
MODULE_DESCRIPTION("Mac ESP SCSI driver");
MODULE_AUTHOR("Finn Thain");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_MODULE_NAME);
-
-module_init(mac_esp_init);
-module_exit(mac_esp_exit);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index ac406049e7c8..80f546976c7e 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -133,8 +133,10 @@ mega_setup_mailbox(adapter_t *adapter)
{
unsigned long align;
- adapter->una_mbox64 = pci_alloc_consistent(adapter->dev,
- sizeof(mbox64_t), &adapter->una_mbox64_dma);
+ adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mbox64_t),
+ &adapter->una_mbox64_dma,
+ GFP_KERNEL);
if( !adapter->una_mbox64 ) return -1;
@@ -222,8 +224,9 @@ mega_query_adapter(adapter_t *adapter)
mraid_inquiry *inq;
dma_addr_t dma_handle;
- ext_inq = pci_alloc_consistent(adapter->dev,
- sizeof(mraid_ext_inquiry), &dma_handle);
+ ext_inq = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mraid_ext_inquiry),
+ &dma_handle, GFP_KERNEL);
if( ext_inq == NULL ) return -1;
@@ -243,8 +246,9 @@ mega_query_adapter(adapter_t *adapter)
mega_8_to_40ld(inq, inquiry3,
(mega_product_info *)&adapter->product_info);
- pci_free_consistent(adapter->dev, sizeof(mraid_ext_inquiry),
- ext_inq, dma_handle);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mraid_ext_inquiry), ext_inq,
+ dma_handle);
} else { /*adapter supports 40ld */
adapter->flag |= BOARD_40LD;
@@ -253,9 +257,10 @@ mega_query_adapter(adapter_t *adapter)
* get product_info, which is static information and will be
* unchanged
*/
- prod_info_dma_handle = pci_map_single(adapter->dev, (void *)
- &adapter->product_info,
- sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
+ prod_info_dma_handle = dma_map_single(&adapter->dev->dev,
+ (void *)&adapter->product_info,
+ sizeof(mega_product_info),
+ DMA_FROM_DEVICE);
mbox->m_out.xferaddr = prod_info_dma_handle;
@@ -267,8 +272,8 @@ mega_query_adapter(adapter_t *adapter)
"Product_info cmd failed with error: %d\n",
retval);
- pci_unmap_single(adapter->dev, prod_info_dma_handle,
- sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle,
+ sizeof(mega_product_info), DMA_FROM_DEVICE);
}
@@ -645,7 +650,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
scb->raw_mbox[3] = ldrv_num;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
return scb;
#else
@@ -709,7 +714,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
}
- scb->dma_direction = PCI_DMA_FROMDEVICE;
+ scb->dma_direction = DMA_FROM_DEVICE;
pthru->numsgelements = mega_build_sglist(adapter, scb,
&pthru->dataxferaddr, &pthru->dataxferlen);
@@ -839,10 +844,10 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
* If it is a read command
*/
if( (*cmd->cmnd & 0x0F) == 0x08 ) {
- scb->dma_direction = PCI_DMA_FROMDEVICE;
+ scb->dma_direction = DMA_FROM_DEVICE;
}
else {
- scb->dma_direction = PCI_DMA_TODEVICE;
+ scb->dma_direction = DMA_TO_DEVICE;
}
/* Calculate Scatter-Gather info */
@@ -877,7 +882,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
scb->raw_mbox[3] = ldrv_num;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
return scb;
#endif
@@ -971,7 +976,7 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
/* Not sure about the direction */
- scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
+ scb->dma_direction = DMA_BIDIRECTIONAL;
/* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
switch (cmd->cmnd[0]) {
@@ -1035,7 +1040,7 @@ mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
/* Not sure about the direction */
- scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
+ scb->dma_direction = DMA_BIDIRECTIONAL;
switch(cmd->cmnd[0]) {
case INQUIRY:
@@ -1813,25 +1818,25 @@ mega_free_sgl(adapter_t *adapter)
scb = &adapter->scb_list[i];
if( scb->sgl64 ) {
- pci_free_consistent(adapter->dev,
- sizeof(mega_sgl64) * adapter->sglen,
- scb->sgl64,
- scb->sgl_dma_addr);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mega_sgl64) * adapter->sglen,
+ scb->sgl64, scb->sgl_dma_addr);
scb->sgl64 = NULL;
}
if( scb->pthru ) {
- pci_free_consistent(adapter->dev, sizeof(mega_passthru),
- scb->pthru, scb->pthru_dma_addr);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mega_passthru), scb->pthru,
+ scb->pthru_dma_addr);
scb->pthru = NULL;
}
if( scb->epthru ) {
- pci_free_consistent(adapter->dev,
- sizeof(mega_ext_passthru),
- scb->epthru, scb->epthru_dma_addr);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mega_ext_passthru),
+ scb->epthru, scb->epthru_dma_addr);
scb->epthru = NULL;
}
@@ -2004,7 +2009,7 @@ make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
- if( pci_set_dma_mask(*pdev, DMA_BIT_MASK(32)) != 0 ) {
+ if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) {
kfree(*pdev);
return -1;
}
@@ -2028,14 +2033,16 @@ free_local_pdev(struct pci_dev *pdev)
static inline void *
mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
{
- return pci_alloc_consistent(pdev, sizeof(mega_inquiry3), dma_handle);
+ return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3),
+ dma_handle, GFP_KERNEL);
}
static inline void
mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
{
- pci_free_consistent(pdev, sizeof(mega_inquiry3), inquiry, dma_handle);
+ dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry,
+ dma_handle);
}
@@ -2349,7 +2356,8 @@ proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
}
- scsi_inq = pci_alloc_consistent(pdev, 256, &scsi_inq_dma_handle);
+ scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle,
+ GFP_KERNEL);
if( scsi_inq == NULL ) {
seq_puts(m, "memory not available for scsi inq.\n");
goto free_inquiry;
@@ -2422,7 +2430,7 @@ proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
}
free_pci:
- pci_free_consistent(pdev, 256, scsi_inq, scsi_inq_dma_handle);
+ dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle);
free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
free_pdev:
@@ -2542,8 +2550,8 @@ proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
raid_inq.logdrv_info.num_ldrv;
}
- disk_array = pci_alloc_consistent(pdev, array_sz,
- &disk_array_dma_handle);
+ disk_array = dma_alloc_coherent(&pdev->dev, array_sz,
+ &disk_array_dma_handle, GFP_KERNEL);
if( disk_array == NULL ) {
seq_puts(m, "memory not available.\n");
@@ -2662,8 +2670,8 @@ proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
}
free_pci:
- pci_free_consistent(pdev, array_sz, disk_array,
- disk_array_dma_handle);
+ dma_free_coherent(&pdev->dev, array_sz, disk_array,
+ disk_array_dma_handle);
free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
free_pdev:
@@ -2881,9 +2889,9 @@ mega_init_scb(adapter_t *adapter)
scb->idx = i;
- scb->sgl64 = pci_alloc_consistent(adapter->dev,
- sizeof(mega_sgl64) * adapter->sglen,
- &scb->sgl_dma_addr);
+ scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mega_sgl64) * adapter->sglen,
+ &scb->sgl_dma_addr, GFP_KERNEL);
scb->sgl = (mega_sglist *)scb->sgl64;
@@ -2893,9 +2901,9 @@ mega_init_scb(adapter_t *adapter)
return -1;
}
- scb->pthru = pci_alloc_consistent(adapter->dev,
- sizeof(mega_passthru),
- &scb->pthru_dma_addr);
+ scb->pthru = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mega_passthru),
+ &scb->pthru_dma_addr, GFP_KERNEL);
if( !scb->pthru ) {
dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
@@ -2903,9 +2911,9 @@ mega_init_scb(adapter_t *adapter)
return -1;
}
- scb->epthru = pci_alloc_consistent(adapter->dev,
- sizeof(mega_ext_passthru),
- &scb->epthru_dma_addr);
+ scb->epthru = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mega_ext_passthru),
+ &scb->epthru_dma_addr, GFP_KERNEL);
if( !scb->epthru ) {
dev_warn(&adapter->dev->dev,
@@ -3145,9 +3153,9 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
/* Passthru commands */
- pthru = pci_alloc_consistent(pdev,
- sizeof(mega_passthru),
- &pthru_dma_hndl);
+ pthru = dma_alloc_coherent(&pdev->dev,
+ sizeof(mega_passthru),
+ &pthru_dma_hndl, GFP_KERNEL);
if( pthru == NULL ) {
free_local_pdev(pdev);
@@ -3165,9 +3173,9 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if( copy_from_user(pthru, upthru,
sizeof(mega_passthru)) ) {
- pci_free_consistent(pdev,
- sizeof(mega_passthru), pthru,
- pthru_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ sizeof(mega_passthru),
+ pthru, pthru_dma_hndl);
free_local_pdev(pdev);
@@ -3178,15 +3186,16 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
* Is there a data transfer
*/
if( pthru->dataxferlen ) {
- data = pci_alloc_consistent(pdev,
- pthru->dataxferlen,
- &data_dma_hndl);
+ data = dma_alloc_coherent(&pdev->dev,
+ pthru->dataxferlen,
+ &data_dma_hndl,
+ GFP_KERNEL);
if( data == NULL ) {
- pci_free_consistent(pdev,
- sizeof(mega_passthru),
- pthru,
- pthru_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ sizeof(mega_passthru),
+ pthru,
+ pthru_dma_hndl);
free_local_pdev(pdev);
@@ -3251,13 +3260,13 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
freemem_and_return:
if( pthru->dataxferlen ) {
- pci_free_consistent(pdev,
- pthru->dataxferlen, data,
- data_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ pthru->dataxferlen, data,
+ data_dma_hndl);
}
- pci_free_consistent(pdev, sizeof(mega_passthru),
- pthru, pthru_dma_hndl);
+ dma_free_coherent(&pdev->dev, sizeof(mega_passthru),
+ pthru, pthru_dma_hndl);
free_local_pdev(pdev);
@@ -3270,8 +3279,10 @@ freemem_and_return:
* Is there a data transfer
*/
if( uioc.xferlen ) {
- data = pci_alloc_consistent(pdev,
- uioc.xferlen, &data_dma_hndl);
+ data = dma_alloc_coherent(&pdev->dev,
+ uioc.xferlen,
+ &data_dma_hndl,
+ GFP_KERNEL);
if( data == NULL ) {
free_local_pdev(pdev);
@@ -3291,9 +3302,9 @@ freemem_and_return:
if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
uioc.xferlen) ) {
- pci_free_consistent(pdev,
- uioc.xferlen,
- data, data_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ uioc.xferlen, data,
+ data_dma_hndl);
free_local_pdev(pdev);
@@ -3314,9 +3325,9 @@ freemem_and_return:
if( rval ) {
if( uioc.xferlen ) {
- pci_free_consistent(pdev,
- uioc.xferlen, data,
- data_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ uioc.xferlen, data,
+ data_dma_hndl);
}
free_local_pdev(pdev);
@@ -3336,9 +3347,8 @@ freemem_and_return:
}
if( uioc.xferlen ) {
- pci_free_consistent(pdev,
- uioc.xferlen, data,
- data_dma_hndl);
+ dma_free_coherent(&pdev->dev, uioc.xferlen,
+ data, data_dma_hndl);
}
free_local_pdev(pdev);
@@ -4004,8 +4014,8 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
*/
if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
- pthru = pci_alloc_consistent(pdev, sizeof(mega_passthru),
- &pthru_dma_handle);
+ pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru),
+ &pthru_dma_handle, GFP_KERNEL);
if( pthru == NULL ) {
free_local_pdev(pdev);
@@ -4041,8 +4051,8 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
rval = mega_internal_command(adapter, &mc, pthru);
- pci_free_consistent(pdev, sizeof(mega_passthru), pthru,
- pthru_dma_handle);
+ dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru,
+ pthru_dma_handle);
free_local_pdev(pdev);
@@ -4267,8 +4277,10 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/*
* Allocate buffer to issue internal commands.
*/
- adapter->mega_buffer = pci_alloc_consistent(adapter->dev,
- MEGA_BUFFER_SIZE, &adapter->buf_dma_handle);
+ adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev,
+ MEGA_BUFFER_SIZE,
+ &adapter->buf_dma_handle,
+ GFP_KERNEL);
if (!adapter->mega_buffer) {
dev_warn(&pdev->dev, "out of RAM\n");
goto out_host_put;
@@ -4427,10 +4439,10 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the Mode of addressing to 64 bit if we can */
if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
- pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
adapter->has_64bit_addr = 1;
} else {
- pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
adapter->has_64bit_addr = 0;
}
@@ -4469,15 +4481,15 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out_free_mbox:
- pci_free_consistent(adapter->dev, sizeof(mbox64_t),
- adapter->una_mbox64, adapter->una_mbox64_dma);
+ dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
+ adapter->una_mbox64, adapter->una_mbox64_dma);
out_free_irq:
free_irq(adapter->host->irq, adapter);
out_free_scb_list:
kfree(adapter->scb_list);
out_free_cmd_buffer:
- pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
- adapter->mega_buffer, adapter->buf_dma_handle);
+ dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
+ adapter->mega_buffer, adapter->buf_dma_handle);
out_host_put:
scsi_host_put(host);
out_iounmap:
@@ -4551,11 +4563,11 @@ megaraid_remove_one(struct pci_dev *pdev)
sprintf(buf, "hba%d", adapter->host->host_no);
remove_proc_subtree(buf, mega_proc_dir_entry);
- pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
- adapter->mega_buffer, adapter->buf_dma_handle);
+ dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
+ adapter->mega_buffer, adapter->buf_dma_handle);
kfree(adapter->scb_list);
- pci_free_consistent(adapter->dev, sizeof(mbox64_t),
- adapter->una_mbox64, adapter->una_mbox64_dma);
+ dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
+ adapter->una_mbox64, adapter->una_mbox64_dma);
scsi_host_put(host);
pci_disable_device(pdev);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 2b7e7b5f38ed..41cd66fc7d81 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,6 +37,7 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -77,7 +78,7 @@ unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
module_param(resetwaittime, int, 0444);
MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
-int smp_affinity_enable = 1;
+static int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, 0444);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
@@ -113,6 +114,10 @@ unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
+int host_tagset_enable = 1;
+module_param(host_tagset_enable, int, 0444);
+MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -3119,6 +3124,19 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
+static int megasas_map_queues(struct Scsi_Host *shost)
+{
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+
+ if (shost->nr_hw_queues == 1)
+ return 0;
+
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ instance->pdev, instance->low_latency_index_start);
+}
+
static void megasas_aen_polling(struct work_struct *work);
/**
@@ -3427,6 +3445,7 @@ static struct scsi_host_template megasas_template = {
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
+ .map_queues = megasas_map_queues,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
};
@@ -6808,6 +6827,26 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->max_lun = MEGASAS_MAX_LUN;
host->max_cmd_len = 16;
+ /* Use shared host tagset only for fusion adaptors
+ * if there are managed interrupts (smp affinity enabled case).
+ * Single msix_vectors in kdump, so shared host tag is also disabled.
+ */
+
+ host->host_tagset = 0;
+ host->nr_hw_queues = 1;
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ (instance->msix_vectors > instance->low_latency_index_start) &&
+ host_tagset_enable &&
+ instance->smp_affinity_enable) {
+ host->host_tagset = 1;
+ host->nr_hw_queues = instance->msix_vectors -
+ instance->low_latency_index_start;
+ }
+
+ dev_info(&instance->pdev->dev,
+ "Max firmware commands: %d shared with nr_hw_queues = %d\n",
+ instance->max_fw_cmds, host->nr_hw_queues);
/*
* Notify the mid-layer about the new controller
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index b0c01cf0428f..fd607287608e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -359,24 +359,29 @@ megasas_get_msix_index(struct megasas_instance *instance,
{
int sdev_busy;
- /* nr_hw_queue = 1 for MegaRAID */
- struct blk_mq_hw_ctx *hctx =
- scmd->device->request_queue->queue_hw_ctx[0];
-
- sdev_busy = atomic_read(&hctx->nr_active);
+ /* TBD - if sml remove device_busy in future, driver
+ * should track counter in internal structure.
+ */
+ sdev_busy = atomic_read(&scmd->device->device_busy);
if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
- sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+ sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
cmd->request_desc->SCSIIO.MSIxIndex =
mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- else if (instance->msix_load_balance)
+ } else if (instance->msix_load_balance) {
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
- else
+ } else if (instance->host->nr_hw_queues > 1) {
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
+ instance->low_latency_index_start;
+ } else {
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
+ }
}
/**
@@ -956,9 +961,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_cmdlist_fusion(instance))
goto fail_exit;
- dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
- instance->max_fw_cmds);
-
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -1102,8 +1104,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
instance->perf_mode = MR_BALANCED_PERF_MODE;
- dev_info(&instance->pdev->dev, "Performance mode :%s\n",
- MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+ dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
+ MEGASAS_PERF_MODE_2STR(instance->perf_mode),
+ instance->low_latency_index_start);
instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8062bd99add8..e4cc92bc4d94 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -129,8 +129,6 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
static void
-_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
-static void
_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
/**
@@ -680,7 +678,7 @@ _base_fault_reset_work(struct work_struct *work)
ioc->shost_recovery = 1;
spin_unlock_irqrestore(
&ioc->ioc_reset_in_progress_lock, flags);
- _base_mask_interrupts(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
_base_clear_outstanding_commands(ioc);
}
@@ -1466,13 +1464,13 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
- * _base_mask_interrupts - disable interrupts
+ * mpt3sas_base_mask_interrupts - disable interrupts
* @ioc: per adapter object
*
* Disabling ResetIRQ, Reply and Doorbell Interrupts
*/
-static void
-_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
{
u32 him_register;
@@ -1484,13 +1482,13 @@ _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_unmask_interrupts - enable interrupts
+ * mpt3sas_base_unmask_interrupts - enable interrupts
* @ioc: per adapter object
*
* Enabling only Reply Interrupts
*/
-static void
-_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
{
u32 him_register;
@@ -1628,7 +1626,7 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
* So that FW can find enough entries to post the Reply
* Descriptors in the reply descriptor post queue.
*/
- if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
+ if (completed_cmds >= ioc->thresh_hold) {
if (ioc->combined_reply_queue) {
writel(reply_q->reply_post_host_index |
((msix_index & 7) <<
@@ -1742,6 +1740,13 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
reply_q->irq_poll_scheduled = false;
reply_q->irq_line_enable = true;
enable_irq(reply_q->os_irq);
+ /*
+ * Go for one more round of processing the
+ * reply descriptor post queue incase if HBA
+ * Firmware has posted some reply descriptors
+ * while reenabling the IRQ.
+ */
+ _base_process_reply_queue(reply_q);
}
return num_entries;
@@ -1787,12 +1792,14 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
* @ioc: per adapter object
+ * @poll: poll over reply descriptor pools incase interrupt for
+ * timed-out SCSI command got delayed
* Context: non ISR conext
*
* Called when a Task Management request has completed.
*/
void
-mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
+mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
{
struct adapter_reply_queue *reply_q;
@@ -1809,19 +1816,25 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
/* TMs are on msix_index == 0 */
if (reply_q->msix_index == 0)
continue;
+ synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
if (reply_q->irq_poll_scheduled) {
/* Calling irq_poll_disable will wait for any pending
* callbacks to have completed.
*/
irq_poll_disable(&reply_q->irqpoll);
irq_poll_enable(&reply_q->irqpoll);
- reply_q->irq_poll_scheduled = false;
- reply_q->irq_line_enable = true;
- enable_irq(reply_q->os_irq);
- continue;
+ /* check how the scheduled poll has ended,
+ * clean up only if necessary
+ */
+ if (reply_q->irq_poll_scheduled) {
+ reply_q->irq_poll_scheduled = false;
+ reply_q->irq_line_enable = true;
+ enable_irq(reply_q->os_irq);
+ }
}
- synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
}
+ if (poll)
+ _base_process_reply_queue(reply_q);
}
/**
@@ -3372,7 +3385,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
goto out_fail;
}
- _base_mask_interrupts(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
r = _base_get_ioc_facts(ioc);
if (r) {
@@ -5257,7 +5270,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
_base_release_memory_pools(ioc);
goto retry_allocation;
}
- memset(ioc->request, 0, sz);
if (retry_sz)
ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
@@ -5619,6 +5631,23 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
}
/**
+ * _base_dump_reg_set - This function will print hexdump of register set.
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+static inline void
+_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned int i, sz = 256;
+ u32 __iomem *reg = (u32 __iomem *)ioc->chip;
+
+ ioc_info(ioc, "System Register set:\n");
+ for (i = 0; i < (sz / sizeof(u32)); i++)
+ pr_info("%08x: %08x\n", (i * 4), readl(&reg[i]));
+}
+
+/**
* _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
* a write to the doorbell)
* @ioc: per adapter object
@@ -6797,6 +6826,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (count++ > 20) {
ioc_info(ioc,
"Stop writing magic sequence after 20 retries\n");
+ _base_dump_reg_set(ioc);
goto out;
}
@@ -6825,6 +6855,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (host_diagnostic == 0xFFFFFFFF) {
ioc_info(ioc,
"Invalid host diagnostic register value\n");
+ _base_dump_reg_set(ioc);
goto out;
}
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
@@ -6859,6 +6890,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (ioc_state) {
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
__func__, ioc_state);
+ _base_dump_reg_set(ioc);
goto out;
}
@@ -7101,7 +7133,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
skip_init_reply_post_host_index:
- _base_unmask_interrupts(ioc);
+ mpt3sas_base_unmask_interrupts(ioc);
if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
r = _base_display_fwpkg_version(ioc);
@@ -7150,7 +7182,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
/* synchronizing freeing resource with pci_access_mutex lock */
mutex_lock(&ioc->pci_access_mutex);
if (ioc->chip_phys && ioc->chip) {
- _base_mask_interrupts(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
ioc->shost_recovery = 1;
_base_make_ioc_ready(ioc, SOFT_RESET);
ioc->shost_recovery = 0;
@@ -7716,7 +7748,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
}
_base_pre_reset_handler(ioc);
mpt3sas_wait_for_commands_to_complete(ioc);
- _base_mask_interrupts(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
r = _base_make_ioc_ready(ioc, type);
if (r)
goto out;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 4ff876c31272..bc8beb10f3fc 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "34.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 34
+#define MPT3SAS_DRIVER_VERSION "35.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 35
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -1036,6 +1036,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @firmware_event_thread: ""
* @fw_event_lock:
* @fw_event_list: list of fw events
+ * @current_evet: current processing firmware event
+ * @fw_event_cleanup: set to one while cleaning up the fw events
* @aen_event_read_flag: event log was read
* @broadcast_aen_busy: broadcast aen waiting to be serviced
* @shost_recovery: host reset in progress
@@ -1217,6 +1219,8 @@ struct MPT3SAS_ADAPTER {
struct workqueue_struct *firmware_event_thread;
spinlock_t fw_event_lock;
struct list_head fw_event_list;
+ struct fw_event_work *current_event;
+ u8 fw_events_cleanup;
/* misc flags */
int aen_event_read_flag;
@@ -1524,7 +1528,9 @@ __le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
void *mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid);
dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll);
+void mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle);
@@ -1604,11 +1610,12 @@ void mpt3sas_scsih_clear_outstanding_scsi_tm_commands(
struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
-int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
- u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method);
+int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, u64 lun, u8 type, u16 smid_task,
+ u16 msix_task, u8 timeout, u8 tr_method);
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 lun, u8 type, u16 smid_task, u16 msix_task,
- u8 timeout, u8 tr_method);
+ uint channel, uint id, u64 lun, u8 type, u16 smid_task,
+ u16 msix_task, u8 timeout, u8 tr_method);
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 11026e0ef3d0..4a0ddc7c95e4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -371,7 +371,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
}
r = 0;
- memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t));
+ memset(ioc->config_cmds.reply, 0, sizeof(Mpi2ConfigReply_t));
ioc->config_cmds.status = MPT3_CMD_PENDING;
config_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->config_cmds.smid = smid;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 7c119b904834..0f2b681449e6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -1109,13 +1109,15 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
pcie_device->device_info))))
mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 0, 0, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, pcie_device->reset_timeout,
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
else
mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 0, 0, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
@@ -3384,12 +3386,10 @@ host_trace_buffer_enable_store(struct device *cdev,
&&
(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
- pci_free_consistent(ioc->pdev,
- ioc->diag_buffer_sz[
- MPI2_DIAG_BUF_TYPE_TRACE],
- ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
- ioc->diag_buffer_dma[
- MPI2_DIAG_BUF_TYPE_TRACE]);
+ dma_free_coherent(&ioc->pdev->dev,
+ ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer_dma[MPI2_DIAG_BUF_TYPE_TRACE]);
ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
NULL;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 2e2756d8a49b..5f845d7094fc 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1513,6 +1513,66 @@ _scsih_is_nvme_pciescsi_device(u32 device_info)
}
/**
+ * _scsih_scsi_lookup_find_by_target - search for matching channel:id
+ * @ioc: per adapter object
+ * @id: target id
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
+ int channel)
+{
+ int smid;
+ struct scsi_cmnd *scmd;
+
+ for (smid = 1;
+ smid <= ioc->shost->can_queue; smid++) {
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ if (scmd->device->id == id &&
+ scmd->device->channel == channel)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
+ * @ioc: per adapter object
+ * @id: target id
+ * @lun: lun number
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id:lun in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
+ unsigned int lun, int channel)
+{
+ int smid;
+ struct scsi_cmnd *scmd;
+
+ for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
+
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ if (scmd->device->id == id &&
+ scmd->device->channel == channel &&
+ scmd->device->lun == lun)
+ return 1;
+ }
+ return 0;
+}
+
+/**
* mpt3sas_scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
@@ -2701,9 +2761,101 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
+ * @ioc - per adapter object
+ * @channel - the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ *
+ * Look whether TM has aborted the timed out SCSI command, if
+ * TM has aborted the IO then return SUCCESS else return FAILED.
+ */
+static int
+scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task)
+{
+
+ if (smid_task <= ioc->shost->can_queue) {
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (!(_scsih_scsi_lookup_find_by_target(ioc,
+ id, channel)))
+ return SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
+ lun, channel)))
+ return SUCCESS;
+ break;
+ default:
+ return SUCCESS;
+ }
+ } else if (smid_task == ioc->scsih_cmds.smid) {
+ if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
+ (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
+ return SUCCESS;
+ } else if (smid_task == ioc->ctl_cmds.smid) {
+ if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
+ (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
+ return SUCCESS;
+ }
+
+ return FAILED;
+}
+
+/**
+ * scsih_tm_post_processing - post processing of target & LUN reset
+ * @ioc - per adapter object
+ * @handle: device handle
+ * @channel - the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ *
+ * Post processing of target & LUN reset. Due to interrupt latency
+ * issue it possible that interrupt for aborted IO might not be
+ * received yet. So before returning failure status, poll the
+ * reply descriptor pools for the reply of timed out SCSI command.
+ * Return FAILED status if reply for timed out is not received
+ * otherwise return SUCCESS.
+ */
+static int
+scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task)
+{
+ int rc;
+
+ rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
+ if (rc == SUCCESS)
+ return rc;
+
+ ioc_info(ioc,
+ "Poll ReplyDescriptor queues for completion of"
+ " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
+ smid_task, type, handle);
+
+ /*
+ * Due to interrupt latency issues, driver may receive interrupt for
+ * TM first and then for aborted SCSI IO command. So, poll all the
+ * ReplyDescriptor pools before returning the FAILED status to SML.
+ */
+ mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_sync_reply_irqs(ioc, 1);
+ mpt3sas_base_unmask_interrupts(ioc);
+
+ return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
+}
+
+/**
* mpt3sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
* @handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
@@ -2720,11 +2872,13 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* Return: SUCCESS or FAILED.
*/
int
-mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
- u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
+ uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
+ u8 timeout, u8 tr_method)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
+ Mpi25SCSIIORequest_t *request;
u16 smid = 0;
u32 ioc_state;
int rc;
@@ -2780,7 +2934,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = type;
- mpi_request->MsgFlags = tr_method;
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+ type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ mpi_request->MsgFlags = tr_method;
mpi_request->TaskMID = cpu_to_le16(smid_task);
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
@@ -2800,7 +2956,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
}
/* sync IRQs in case those were busy during flush. */
- mpt3sas_base_sync_reply_irqs(ioc);
+ mpt3sas_base_sync_reply_irqs(ioc, 0);
if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
@@ -2817,7 +2973,44 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
}
}
- rc = SUCCESS;
+
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ rc = SUCCESS;
+ /*
+ * If DevHandle filed in smid_task's entry of request pool
+ * doesn't match with device handle on which this task abort
+ * TM is received then it means that TM has successfully
+ * aborted the timed out command. Since smid_task's entry in
+ * request pool will be memset to zero once the timed out
+ * command is returned to the SML. If the command is not
+ * aborted then smid_task’s entry won’t be cleared and it
+ * will have same DevHandle value on which this task abort TM
+ * is received and driver will return the TM status as FAILED.
+ */
+ request = mpt3sas_base_get_msg_frame(ioc, smid_task);
+ if (le16_to_cpu(request->DevHandle) != handle)
+ break;
+
+ ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
+ "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
+ handle, timeout, tr_method, smid_task, msix_task);
+ rc = FAILED;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
+ type, smid_task);
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
out:
mpt3sas_scsih_clear_tm_flag(ioc, handle);
@@ -2826,14 +3019,14 @@ out:
}
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 lun, u8 type, u16 smid_task, u16 msix_task,
- u8 timeout, u8 tr_method)
+ uint channel, uint id, u64 lun, u8 type, u16 smid_task,
+ u16 msix_task, u8 timeout, u8 tr_method)
{
int ret;
mutex_lock(&ioc->tm_cmds.mutex);
- ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
- msix_task, timeout, tr_method);
+ ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
+ smid_task, msix_task, timeout, tr_method);
mutex_unlock(&ioc->tm_cmds.mutex);
return ret;
@@ -2980,7 +3173,8 @@ scsih_abort(struct scsi_cmnd *scmd)
if (pcie_device && (!ioc->tm_custom_handling) &&
(!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
timeout = ioc->nvme_abort_timeout;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
st->smid, st->msix_io, timeout, 0);
/* Command must be cleared after abort */
@@ -3056,7 +3250,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
} else
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
tr_timeout, tr_method);
/* Check for busy commands after reset */
@@ -3134,7 +3329,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
} else
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
tr_timeout, tr_method);
/* Check for busy commands after reset */
@@ -3323,11 +3519,13 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
- if (list_empty(&ioc->fw_event_list) ||
+ if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
!ioc->firmware_event_thread || in_interrupt())
return;
- while ((fw_event = dequeue_next_fw_event(ioc))) {
+ ioc->fw_events_cleanup = 1;
+ while ((fw_event = dequeue_next_fw_event(ioc)) ||
+ (fw_event = ioc->current_event)) {
/*
* Wait on the fw_event to complete. If this returns 1, then
* the event was never executed, and we need a put for the
@@ -3341,6 +3539,7 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
fw_event_work_put(fw_event);
}
+ ioc->fw_events_cleanup = 0;
}
/**
@@ -7527,7 +7726,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
goto out;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
+ r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
st->msix_io, 30, 0);
if (r == FAILED) {
@@ -7568,9 +7767,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery)
goto out_no_lock;
- r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
- st->msix_io, 30, 0);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ st->smid, st->msix_io, 30, 0);
if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
@@ -9421,11 +9620,13 @@ mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
{
+ ioc->current_event = fw_event;
_scsih_fw_event_del_from_list(ioc, fw_event);
/* the queue is being flushed so ignore this event */
if (ioc->remove_host || ioc->pci_error_recovery) {
fw_event_work_put(fw_event);
+ ioc->current_event = NULL;
return;
}
@@ -9439,10 +9640,10 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
while (scsi_host_in_recovery(ioc->shost) ||
ioc->shost_recovery) {
/*
- * If we're unloading, bail. Otherwise, this can become
- * an infinite loop.
+ * If we're unloading or cancelling the work, bail.
+ * Otherwise, this can become an infinite loop.
*/
- if (ioc->remove_host)
+ if (ioc->remove_host || ioc->fw_events_cleanup)
goto out;
ssleep(1);
}
@@ -9503,11 +9704,13 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
break;
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
_scsih_pcie_topology_change_event(ioc, fw_event);
+ ioc->current_event = NULL;
return;
break;
}
out:
fw_event_work_put(fw_event);
+ ioc->current_event = NULL;
}
/**
@@ -9889,6 +10092,34 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _scsih_get_shost_and_ioc - get shost and ioc
+ * and verify whether they are NULL or not
+ * @pdev: PCI device struct
+ * @shost: address of scsi host pointer
+ * @ioc: address of HBA adapter pointer
+ *
+ * Return zero if *shost and *ioc are not NULL otherwise return error number.
+ */
+static int
+_scsih_get_shost_and_ioc(struct pci_dev *pdev,
+ struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
+{
+ *shost = pci_get_drvdata(pdev);
+ if (*shost == NULL) {
+ dev_err(&pdev->dev, "pdev's driver data is null\n");
+ return -ENXIO;
+ }
+
+ *ioc = shost_priv(*shost);
+ if (*ioc == NULL) {
+ dev_err(&pdev->dev, "shost's private data is null\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/**
* scsih_remove - detach and remove add host
* @pdev: PCI device struct
*
@@ -9896,8 +10127,8 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
*/
static void scsih_remove(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
struct _sas_port *mpt3sas_port, *next_port;
struct _raid_device *raid_device, *next;
struct MPT3SAS_TARGET *sas_target_priv_data;
@@ -9906,6 +10137,9 @@ static void scsih_remove(struct pci_dev *pdev)
unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
+
ioc->remove_host = 1;
if (!pci_device_is_present(pdev))
@@ -9985,12 +10219,15 @@ static void scsih_remove(struct pci_dev *pdev)
static void
scsih_shutdown(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
struct workqueue_struct *wq;
unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
+
ioc->remove_host = 1;
if (!pci_device_is_present(pdev))
@@ -10560,6 +10797,10 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3916:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3916:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3816:
return MPI26_VERSION;
}
return 0;
@@ -10649,6 +10890,20 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI26_ATLAS_PCIe_SWITCH_DEVID:
ioc->is_gen35_ioc = 1;
break;
+ case MPI26_MFGPAGE_DEVID_INVALID0_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3916:
+ dev_err(&pdev->dev,
+ "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
+ pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ return 1;
+ case MPI26_MFGPAGE_DEVID_INVALID1_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3916:
+ dev_err(&pdev->dev,
+ "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
+ pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ return 1;
case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
dev_info(&pdev->dev,
@@ -10840,9 +11095,14 @@ out_add_shost_fail:
static int
scsih_suspend(struct pci_dev *pdev, pm_message_t state)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
pci_power_t device_state;
+ int rc;
+
+ rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
+ if (rc)
+ return rc;
mpt3sas_base_stop_watchdog(ioc);
flush_scheduled_work();
@@ -10867,11 +11127,15 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
static int
scsih_resume(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
pci_power_t device_state = pdev->current_state;
int r;
+ r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
+ if (r)
+ return r;
+
ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
pdev, pci_name(pdev), device_state);
@@ -10902,8 +11166,11 @@ scsih_resume(struct pci_dev *pdev)
static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
@@ -10938,10 +11205,13 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
int rc;
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
+
ioc_info(ioc, "PCI error: slot reset callback!!\n");
ioc->pci_error_recovery = 0;
@@ -10974,8 +11244,11 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
static void
scsih_pci_resume(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
ioc_info(ioc, "PCI error: resume callback!!\n");
@@ -10990,8 +11263,11 @@ scsih_pci_resume(struct pci_dev *pdev)
static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
@@ -11139,6 +11415,14 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
PCI_ANY_ID, PCI_ANY_ID },
+ /*
+ * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+
/* Atlas PCIe Switch Management Port */
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
PCI_ANY_ID, PCI_ANY_ID },
@@ -11151,6 +11435,14 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
PCI_ANY_ID, PCI_ANY_ID },
+ /*
+ * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 978f5283c883..6aa2697c4a15 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -246,19 +246,16 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
&mvi->tx_dma, GFP_KERNEL);
if (!mvi->tx)
goto err_out;
- memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
&mvi->rx_fis_dma, GFP_KERNEL);
if (!mvi->rx_fis)
goto err_out;
- memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
mvi->rx = dma_alloc_coherent(mvi->dev,
sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
&mvi->rx_dma, GFP_KERNEL);
if (!mvi->rx)
goto err_out;
- memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
mvi->rx[0] = cpu_to_le32(0xfff);
mvi->rx_cons = 0xfff;
@@ -267,7 +264,6 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
&mvi->slot_dma, GFP_KERNEL);
if (!mvi->slot)
goto err_out;
- memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
TRASH_BUCKET_SIZE,
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 8906aceda4c4..0354898d7cac 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2425,6 +2425,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
if (IS_ERR(mhba->dm_thread)) {
dev_err(&mhba->pdev->dev,
"failed to create device scan thread\n");
+ ret = PTR_ERR(mhba->dm_thread);
mutex_unlock(&mhba->sas_discovery_mutex);
goto fail_create_thread;
}
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index b2869c5dd7fb..5fa0f4ed6565 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1050,7 +1050,7 @@ static int myrb_get_hba_config(struct myrb_hba *cb)
enquiry2->fw.turn_id = 0;
}
snprintf(cb->fw_version, sizeof(cb->fw_version),
- "%d.%02d-%c-%02d",
+ "%u.%02u-%c-%02u",
enquiry2->fw.major_version,
enquiry2->fw.minor_version,
enquiry2->fw.firmware_type,
@@ -2167,7 +2167,7 @@ static ssize_t ctlr_num_show(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct myrb_hba *cb = shost_priv(shost);
- return snprintf(buf, 20, "%d\n", cb->ctlr_num);
+ return snprintf(buf, 20, "%u\n", cb->ctlr_num);
}
static DEVICE_ATTR_RO(ctlr_num);
@@ -2226,7 +2226,7 @@ static struct device_attribute *myrb_shost_attrs[] = {
NULL,
};
-struct scsi_host_template myrb_template = {
+static struct scsi_host_template myrb_template = {
.module = THIS_MODULE,
.name = "DAC960",
.proc_name = "myrb",
@@ -2315,7 +2315,7 @@ static void myrb_get_state(struct device *dev)
raid_set_state(myrb_raid_template, dev, state);
}
-struct raid_function_template myrb_raid_functions = {
+static struct raid_function_template myrb_raid_functions = {
.cookie = &myrb_template,
.is_raid = myrb_is_raid,
.get_resync = myrb_get_resync,
@@ -2489,7 +2489,7 @@ static void myrb_monitor(struct work_struct *work)
*
* Return: true for fatal errors and false otherwise.
*/
-bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
+static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
unsigned char parm0, unsigned char parm1)
{
struct pci_dev *pdev = cb->pdev;
@@ -2732,7 +2732,6 @@ static int DAC960_LA_hw_init(struct pci_dev *pdev,
DAC960_LA_disable_intr(base);
DAC960_LA_ack_hw_mbox_status(base);
udelay(1000);
- timeout = 0;
while (DAC960_LA_init_in_progress(base) &&
timeout < MYRB_MAILBOX_TIMEOUT) {
if (DAC960_LA_read_error_status(base, &error,
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 103803e779f2..7a3ade765ce3 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1529,7 +1529,7 @@ static struct device_attribute *myrs_shost_attrs[] = {
/*
* SCSI midlayer interface
*/
-int myrs_host_reset(struct scsi_cmnd *scmd)
+static int myrs_host_reset(struct scsi_cmnd *scmd)
{
struct Scsi_Host *shost = scmd->device->host;
struct myrs_hba *cs = shost_priv(shost);
@@ -1919,7 +1919,7 @@ static void myrs_slave_destroy(struct scsi_device *sdev)
kfree(sdev->hostdata);
}
-struct scsi_host_template myrs_template = {
+static struct scsi_host_template myrs_template = {
.module = THIS_MODULE,
.name = "DAC960",
.proc_name = "myrs",
@@ -2033,7 +2033,7 @@ myrs_get_state(struct device *dev)
raid_set_state(myrs_raid_template, dev, state);
}
-struct raid_function_template myrs_raid_functions = {
+static struct raid_function_template myrs_raid_functions = {
.cookie = &myrs_template,
.is_raid = myrs_is_raid,
.get_resync = myrs_get_resync,
@@ -2043,7 +2043,7 @@ struct raid_function_template myrs_raid_functions = {
/*
* PCI interface functions
*/
-void myrs_flush_cache(struct myrs_hba *cs)
+static void myrs_flush_cache(struct myrs_hba *cs)
{
myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
}
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index b6e04d14292d..da814c2da16d 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -1247,7 +1247,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
* ---> AutoSCSI with MSGOUTreg is processed.
*/
data->msgout_len = 0;
- };
+ }
nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed");
}
@@ -1839,7 +1839,7 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n",
nsp32_read1(base, SCSI_BUS_MONITOR));
- };
+ }
data->msgout_len = 0;
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 77c805db2724..3587f7c8a428 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -408,9 +408,10 @@ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
int offset;
char *str = buf;
int start = 0;
+ u32 ib_offset = pm8001_ha->ib_offset;
#define IB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
- memoryMap.region[IB].virt_ptr + \
+ memoryMap.region[ib_offset].virt_ptr + \
pm8001_ha->evtlog_ib_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
@@ -442,9 +443,10 @@ static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
int offset;
char *str = buf;
int start = 0;
+ u32 ob_offset = pm8001_ha->ob_offset;
#define OB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
- memoryMap.region[OB].virt_ptr + \
+ memoryMap.region[ob_offset].virt_ptr + \
pm8001_ha->evtlog_ob_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 1c7f15fd69ce..501b574239e8 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -75,12 +75,10 @@ enum port_type {
};
/* driver compile-time configuration */
-#define PM8001_MAX_CCB 256 /* max ccbs supported */
+#define PM8001_MAX_CCB 1024 /* max ccbs supported */
#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */
-#define PM8001_MAX_INB_NUM 1
-#define PM8001_MAX_OUTB_NUM 1
-#define PM8001_MAX_SPCV_INB_NUM 1
-#define PM8001_MAX_SPCV_OUTB_NUM 4
+#define PM8001_MAX_INB_NUM 64
+#define PM8001_MAX_OUTB_NUM 64
#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */
/* Inbound/Outbound queue size */
@@ -92,26 +90,27 @@ enum port_type {
#define PM8001_MAX_PORTS 16 /* max. possible ports */
#define PM8001_MAX_DEVICES 2048 /* max supported device */
#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */
+#define PM8001_RESERVE_SLOT 8
-#define USI_MAX_MEMCNT_BASE 5
-#define IB (USI_MAX_MEMCNT_BASE + 1)
-#define CI (IB + PM8001_MAX_SPCV_INB_NUM)
-#define OB (CI + PM8001_MAX_SPCV_INB_NUM)
-#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM)
-#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM)
#define CONFIG_SCSI_PM8001_MAX_DMA_SG 528
#define PM8001_MAX_DMA_SG CONFIG_SCSI_PM8001_MAX_DMA_SG
+
enum memory_region_num {
AAP1 = 0x0, /* application acceleration processor */
IOP, /* IO processor */
NVMD, /* NVM device */
- DEV_MEM, /* memory for devices */
- CCB_MEM, /* memory for command control block */
FW_FLASH, /* memory for fw flash update */
- FORENSIC_MEM /* memory for fw forensic data */
+ FORENSIC_MEM, /* memory for fw forensic data */
+ USI_MAX_MEMCNT_BASE
};
#define PM8001_EVENT_LOG_SIZE (128 * 1024)
+/**
+ * maximum DMA memory regions(number of IBQ + number of IBQ CI
+ * + number of OBQ + number of OBQ PI)
+ */
+#define USI_MAX_MEMCNT (USI_MAX_MEMCNT_BASE + ((2 * PM8001_MAX_INB_NUM) \
+ + (2 * PM8001_MAX_OUTB_NUM)))
/*error code*/
enum mpi_err {
MPI_IO_STATUS_SUCCESS = 0x0,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index e9a939230b15..2b7b2954ec31 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -189,6 +189,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
u32 offsetib, offsetob;
void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+ u32 ib_offset = pm8001_ha->ib_offset;
+ u32 ob_offset = pm8001_ha->ob_offset;
+ u32 ci_offset = pm8001_ha->ci_offset;
+ u32 pi_offset = pm8001_ha->pi_offset;
pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0;
pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0;
@@ -223,19 +227,19 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
- pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
- pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].base_virt =
- (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
+ (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr;
pm8001_ha->inbnd_q_tbl[i].total_length =
- pm8001_ha->memoryMap.region[IB + i].total_len;
+ pm8001_ha->memoryMap.region[ib_offset + i].total_len;
pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
- pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
- pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].ci_virt =
- pm8001_ha->memoryMap.region[CI + i].virt_ptr;
+ pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr;
offsetib = i * 0x20;
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
get_pci_bar_index(pm8001_mr32(addressib,
@@ -249,21 +253,21 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
- pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
- pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo;
pm8001_ha->outbnd_q_tbl[i].base_virt =
- (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
+ (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr;
pm8001_ha->outbnd_q_tbl[i].total_length =
- pm8001_ha->memoryMap.region[OB + i].total_len;
+ pm8001_ha->memoryMap.region[ob_offset + i].total_len;
pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
- pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
- pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo;
pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay =
0 | (10 << 16) | (i << 24);
pm8001_ha->outbnd_q_tbl[i].pi_virt =
- pm8001_ha->memoryMap.region[PI + i].virt_ptr;
+ pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr;
offsetob = i * 0x24;
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
get_pci_bar_index(pm8001_mr32(addressob,
@@ -4371,8 +4375,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr));
ssp_cmd.esgl = cpu_to_le32(1<<31);
@@ -4445,8 +4448,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
sata_cmd.addr_low = lower_32_bits(phys_addr);
sata_cmd.addr_high = upper_32_bits(phys_addr);
sata_cmd.esgl = cpu_to_le32(1 << 31);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 20fa96cbc9d3..3cf3e58b6979 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -56,6 +56,7 @@ MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
" 8: Link rate 12.0G\n");
static struct scsi_transport_template *pm8001_stt;
+static int pm8001_init_ccb_tag(struct pm8001_hba_info *, struct Scsi_Host *, struct pci_dev *);
/*
* chip info structure to identify chip key functionality as
@@ -264,12 +265,36 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
const struct pci_device_id *ent)
{
- int i;
+ int i, count = 0, rc = 0;
+ u32 ci_offset, ib_offset, ob_offset, pi_offset;
+ struct inbound_queue_table *circularQ;
+
spin_lock_init(&pm8001_ha->lock);
spin_lock_init(&pm8001_ha->bitmap_lock);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("pm8001_alloc: PHY:%x\n",
pm8001_ha->chip->n_phy));
+
+ /* Setup Interrupt */
+ rc = pm8001_setup_irq(pm8001_ha);
+ if (rc) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "pm8001_setup_irq failed [ret: %d]\n", rc));
+ goto err_out_shost;
+ }
+ /* Request Interrupt */
+ rc = pm8001_request_irq(pm8001_ha);
+ if (rc)
+ goto err_out_shost;
+
+ count = pm8001_ha->max_q_num;
+ /* Queues are chosen based on the number of cores/msix availability */
+ ib_offset = pm8001_ha->ib_offset = USI_MAX_MEMCNT_BASE;
+ ci_offset = pm8001_ha->ci_offset = ib_offset + count;
+ ob_offset = pm8001_ha->ob_offset = ci_offset + count;
+ pi_offset = pm8001_ha->pi_offset = ob_offset + count;
+ pm8001_ha->max_memcnt = pi_offset + count;
+
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
pm8001_phy_init(pm8001_ha, i);
pm8001_ha->port[i].wide_port_phymap = 0;
@@ -278,9 +303,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
INIT_LIST_HEAD(&pm8001_ha->port[i].list);
}
- pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
- if (!pm8001_ha->tags)
- goto err_out;
/* MPI Memory region 1 for AAP Event Log for fw */
pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
@@ -293,54 +315,62 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
pm8001_ha->memoryMap.region[IOP].alignment = 32;
- for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+ for (i = 0; i < count; i++) {
+ circularQ = &pm8001_ha->inbnd_q_tbl[i];
+ spin_lock_init(&circularQ->iq_lock);
/* MPI Memory region 3 for consumer Index of inbound queues */
- pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
- pm8001_ha->memoryMap.region[CI+i].element_size = 4;
- pm8001_ha->memoryMap.region[CI+i].total_len = 4;
- pm8001_ha->memoryMap.region[CI+i].alignment = 4;
+ pm8001_ha->memoryMap.region[ci_offset+i].num_elements = 1;
+ pm8001_ha->memoryMap.region[ci_offset+i].element_size = 4;
+ pm8001_ha->memoryMap.region[ci_offset+i].total_len = 4;
+ pm8001_ha->memoryMap.region[ci_offset+i].alignment = 4;
if ((ent->driver_data) != chip_8001) {
/* MPI Memory region 5 inbound queues */
- pm8001_ha->memoryMap.region[IB+i].num_elements =
+ pm8001_ha->memoryMap.region[ib_offset+i].num_elements =
PM8001_MPI_QUEUE;
- pm8001_ha->memoryMap.region[IB+i].element_size = 128;
- pm8001_ha->memoryMap.region[IB+i].total_len =
+ pm8001_ha->memoryMap.region[ib_offset+i].element_size
+ = 128;
+ pm8001_ha->memoryMap.region[ib_offset+i].total_len =
PM8001_MPI_QUEUE * 128;
- pm8001_ha->memoryMap.region[IB+i].alignment = 128;
+ pm8001_ha->memoryMap.region[ib_offset+i].alignment
+ = 128;
} else {
- pm8001_ha->memoryMap.region[IB+i].num_elements =
+ pm8001_ha->memoryMap.region[ib_offset+i].num_elements =
PM8001_MPI_QUEUE;
- pm8001_ha->memoryMap.region[IB+i].element_size = 64;
- pm8001_ha->memoryMap.region[IB+i].total_len =
+ pm8001_ha->memoryMap.region[ib_offset+i].element_size
+ = 64;
+ pm8001_ha->memoryMap.region[ib_offset+i].total_len =
PM8001_MPI_QUEUE * 64;
- pm8001_ha->memoryMap.region[IB+i].alignment = 64;
+ pm8001_ha->memoryMap.region[ib_offset+i].alignment = 64;
}
}
- for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+ for (i = 0; i < count; i++) {
/* MPI Memory region 4 for producer Index of outbound queues */
- pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
- pm8001_ha->memoryMap.region[PI+i].element_size = 4;
- pm8001_ha->memoryMap.region[PI+i].total_len = 4;
- pm8001_ha->memoryMap.region[PI+i].alignment = 4;
+ pm8001_ha->memoryMap.region[pi_offset+i].num_elements = 1;
+ pm8001_ha->memoryMap.region[pi_offset+i].element_size = 4;
+ pm8001_ha->memoryMap.region[pi_offset+i].total_len = 4;
+ pm8001_ha->memoryMap.region[pi_offset+i].alignment = 4;
if (ent->driver_data != chip_8001) {
/* MPI Memory region 6 Outbound queues */
- pm8001_ha->memoryMap.region[OB+i].num_elements =
+ pm8001_ha->memoryMap.region[ob_offset+i].num_elements =
PM8001_MPI_QUEUE;
- pm8001_ha->memoryMap.region[OB+i].element_size = 128;
- pm8001_ha->memoryMap.region[OB+i].total_len =
+ pm8001_ha->memoryMap.region[ob_offset+i].element_size
+ = 128;
+ pm8001_ha->memoryMap.region[ob_offset+i].total_len =
PM8001_MPI_QUEUE * 128;
- pm8001_ha->memoryMap.region[OB+i].alignment = 128;
+ pm8001_ha->memoryMap.region[ob_offset+i].alignment
+ = 128;
} else {
/* MPI Memory region 6 Outbound queues */
- pm8001_ha->memoryMap.region[OB+i].num_elements =
+ pm8001_ha->memoryMap.region[ob_offset+i].num_elements =
PM8001_MPI_QUEUE;
- pm8001_ha->memoryMap.region[OB+i].element_size = 64;
- pm8001_ha->memoryMap.region[OB+i].total_len =
+ pm8001_ha->memoryMap.region[ob_offset+i].element_size
+ = 64;
+ pm8001_ha->memoryMap.region[ob_offset+i].total_len =
PM8001_MPI_QUEUE * 64;
- pm8001_ha->memoryMap.region[OB+i].alignment = 64;
+ pm8001_ha->memoryMap.region[ob_offset+i].alignment = 64;
}
}
@@ -348,19 +378,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
pm8001_ha->memoryMap.region[NVMD].total_len = 4096;
- /* Memory region for devices*/
- pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1;
- pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES *
- sizeof(struct pm8001_device);
- pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES *
- sizeof(struct pm8001_device);
-
- /* Memory region for ccb_info*/
- pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1;
- pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB *
- sizeof(struct pm8001_ccb_info);
- pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
- sizeof(struct pm8001_ccb_info);
/* Memory region for fw flash */
pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
@@ -369,7 +386,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000;
pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000;
pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;
- for (i = 0; i < USI_MAX_MEMCNT; i++) {
+ for (i = 0; i < pm8001_ha->max_memcnt; i++) {
if (pm8001_mem_alloc(pm8001_ha->pdev,
&pm8001_ha->memoryMap.region[i].virt_ptr,
&pm8001_ha->memoryMap.region[i].phys_addr,
@@ -384,27 +401,36 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
}
}
- pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
+ /* Memory region for devices*/
+ pm8001_ha->devices = kzalloc(PM8001_MAX_DEVICES
+ * sizeof(struct pm8001_device), GFP_KERNEL);
+ if (!pm8001_ha->devices) {
+ rc = -ENOMEM;
+ goto err_out_nodev;
+ }
for (i = 0; i < PM8001_MAX_DEVICES; i++) {
pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
pm8001_ha->devices[i].id = i;
pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
pm8001_ha->devices[i].running_req = 0;
}
- pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr;
- for (i = 0; i < PM8001_MAX_CCB; i++) {
- pm8001_ha->ccb_info[i].ccb_dma_handle =
- pm8001_ha->memoryMap.region[CCB_MEM].phys_addr +
- i * sizeof(struct pm8001_ccb_info);
- pm8001_ha->ccb_info[i].task = NULL;
- pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
- pm8001_ha->ccb_info[i].device = NULL;
- ++pm8001_ha->tags_num;
- }
pm8001_ha->flags = PM8001F_INIT_TIME;
/* Initialize tags */
pm8001_tag_init(pm8001_ha);
return 0;
+
+err_out_shost:
+ scsi_remove_host(pm8001_ha->shost);
+err_out_nodev:
+ for (i = 0; i < pm8001_ha->max_memcnt; i++) {
+ if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
+ pci_free_consistent(pm8001_ha->pdev,
+ (pm8001_ha->memoryMap.region[i].total_len +
+ pm8001_ha->memoryMap.region[i].alignment),
+ pm8001_ha->memoryMap.region[i].virt_ptr,
+ pm8001_ha->memoryMap.region[i].phys_addr);
+ }
+ }
err_out:
return 1;
}
@@ -899,7 +925,8 @@ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
{
u32 number_of_intr;
- int rc;
+ int rc, cpu_online_count;
+ unsigned int allocated_irq_vectors;
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
@@ -908,13 +935,21 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
number_of_intr = PM8001_MAX_MSIX_VEC;
}
+ cpu_online_count = num_online_cpus();
+ number_of_intr = min_t(int, cpu_online_count, number_of_intr);
rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
number_of_intr, PCI_IRQ_MSIX);
- number_of_intr = rc;
+ allocated_irq_vectors = rc;
if (rc < 0)
return rc;
+
+ /* Assigns the number of interrupts */
+ number_of_intr = min_t(int, allocated_irq_vectors, number_of_intr);
pm8001_ha->number_of_intr = number_of_intr;
+ /* Maximum queue number updating in HBA structure */
+ pm8001_ha->max_q_num = number_of_intr;
+
PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
"pci_alloc_irq_vectors request ret:%d no of intr %d\n",
rc, pm8001_ha->number_of_intr));
@@ -1069,13 +1104,6 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
rc = -ENOMEM;
goto err_out_free;
}
- /* Setup Interrupt */
- rc = pm8001_setup_irq(pm8001_ha);
- if (rc) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "pm8001_setup_irq failed [ret: %d]\n", rc));
- goto err_out_shost;
- }
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
@@ -1085,16 +1113,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_ha_free;
}
+ rc = pm8001_init_ccb_tag(pm8001_ha, shost, pdev);
+ if (rc)
+ goto err_out_enable;
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha_free;
- /* Request Interrupt */
- rc = pm8001_request_irq(pm8001_ha);
- if (rc) {
- PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
- "pm8001_request_irq failed [ret: %d]\n", rc));
- goto err_out_shost;
- }
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
if (pm8001_ha->chip_id != chip_8001) {
@@ -1137,6 +1162,60 @@ err_out_enable:
return rc;
}
+/*
+ * pm8001_init_ccb_tag - allocate memory to CCB and tag.
+ * @pm8001_ha: our hba card information.
+ * @shost: scsi host which has been allocated outside.
+ */
+static int
+pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
+ struct pci_dev *pdev)
+{
+ int i = 0;
+ u32 max_out_io, ccb_count;
+ u32 can_queue;
+
+ max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io;
+ ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io);
+
+ /* Update to the scsi host*/
+ can_queue = ccb_count - PM8001_RESERVE_SLOT;
+ shost->can_queue = can_queue;
+
+ pm8001_ha->tags = kzalloc(ccb_count, GFP_KERNEL);
+ if (!pm8001_ha->tags)
+ goto err_out;
+
+ /* Memory region for ccb_info*/
+ pm8001_ha->ccb_info = (struct pm8001_ccb_info *)
+ kcalloc(ccb_count, sizeof(struct pm8001_ccb_info), GFP_KERNEL);
+ if (!pm8001_ha->ccb_info) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk
+ ("Unable to allocate memory for ccb\n"));
+ goto err_out_noccb;
+ }
+ for (i = 0; i < ccb_count; i++) {
+ pm8001_ha->ccb_info[i].buf_prd = pci_alloc_consistent(pdev,
+ sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
+ &pm8001_ha->ccb_info[i].ccb_dma_handle);
+ if (!pm8001_ha->ccb_info[i].buf_prd) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk
+ ("pm80xx: ccb prd memory allocation error\n"));
+ goto err_out;
+ }
+ pm8001_ha->ccb_info[i].task = NULL;
+ pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
+ pm8001_ha->ccb_info[i].device = NULL;
+ ++pm8001_ha->tags_num;
+ }
+ return 0;
+
+err_out_noccb:
+ kfree(pm8001_ha->devices);
+err_out:
+ return -ENOMEM;
+}
+
static void pm8001_pci_remove(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index ae7ba9b3c4bc..95663e138083 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -58,7 +58,7 @@
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
-#define DRV_VERSION "0.1.39"
+#define DRV_VERSION "0.1.40"
#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
@@ -315,7 +315,7 @@ struct pm8001_ccb_info {
u32 ccb_tag;
dma_addr_t ccb_dma_handle;
struct pm8001_device *device;
- struct pm8001_prd buf_prd[PM8001_MAX_DMA_SG];
+ struct pm8001_prd *buf_prd;
struct fw_control_ex *fw_control_context;
u8 open_retry;
};
@@ -468,6 +468,7 @@ struct inbound_queue_table {
u32 reserved;
__le32 consumer_index;
u32 producer_idx;
+ spinlock_t iq_lock;
};
struct outbound_queue_table {
u32 element_size_cnt;
@@ -524,8 +525,8 @@ struct pm8001_hba_info {
void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */
union main_cfg_table main_cfg_tbl;
union general_status_table gs_tbl;
- struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
- struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
+ struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM];
+ struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM];
struct sas_phy_attribute_table phy_attr_table;
/* MPI SAS PHY attributes */
u8 sas_addr[SAS_ADDR_SIZE];
@@ -561,6 +562,12 @@ struct pm8001_hba_info {
u32 reset_in_progress;
u32 non_fatal_count;
u32 non_fatal_read_length;
+ u32 max_q_num;
+ u32 ib_offset;
+ u32 ob_offset;
+ u32 ci_offset;
+ u32 pi_offset;
+ u32 max_memcnt;
};
struct pm8001_work {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index b42f41d1ed49..7593f248afb2 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -720,7 +720,7 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
{
int i;
void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
- for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+ for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
u32 offset = i * 0x20;
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
get_pci_bar_index(pm8001_mr32(address,
@@ -738,7 +738,7 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
{
int i;
void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
- for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+ for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
u32 offset = i * 0x24;
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
get_pci_bar_index(pm8001_mr32(address,
@@ -758,6 +758,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
u32 offsetib, offsetob;
void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+ u32 ib_offset = pm8001_ha->ib_offset;
+ u32 ob_offset = pm8001_ha->ob_offset;
+ u32 ci_offset = pm8001_ha->ci_offset;
+ u32 pi_offset = pm8001_ha->pi_offset;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr =
pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
@@ -778,23 +782,23 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
/* Disable end to end CRC checking */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
- for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+ for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
- pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
- pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].base_virt =
- (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
+ (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr;
pm8001_ha->inbnd_q_tbl[i].total_length =
- pm8001_ha->memoryMap.region[IB + i].total_len;
+ pm8001_ha->memoryMap.region[ib_offset + i].total_len;
pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr =
- pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr =
- pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo;
pm8001_ha->inbnd_q_tbl[i].ci_virt =
- pm8001_ha->memoryMap.region[CI + i].virt_ptr;
+ pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr;
offsetib = i * 0x20;
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
get_pci_bar_index(pm8001_mr32(addressib,
@@ -809,25 +813,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->inbnd_q_tbl[i].pi_pci_bar,
pm8001_ha->inbnd_q_tbl[i].pi_offset));
}
- for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+ for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
- pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
- pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo;
pm8001_ha->outbnd_q_tbl[i].base_virt =
- (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
+ (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr;
pm8001_ha->outbnd_q_tbl[i].total_length =
- pm8001_ha->memoryMap.region[OB + i].total_len;
+ pm8001_ha->memoryMap.region[ob_offset + i].total_len;
pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr =
- pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
+ pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr =
- pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
+ pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo;
/* interrupt vector based on oq */
pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
pm8001_ha->outbnd_q_tbl[i].pi_virt =
- pm8001_ha->memoryMap.region[PI + i].virt_ptr;
+ pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr;
offsetob = i * 0x24;
pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
get_pci_bar_index(pm8001_mr32(addressob,
@@ -871,7 +875,7 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
/* Update Fatal error interrupt vector */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
- ((pm8001_ha->number_of_intr - 1) << 8);
+ ((pm8001_ha->max_q_num - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
@@ -1010,8 +1014,12 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
value &= SPCv_MSGU_CFG_TABLE_UPDATE;
} while ((value != 0) && (--max_wait_count));
- if (!max_wait_count)
- return -1;
+ if (!max_wait_count) {
+ /* additional check */
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "Inb doorbell clear not toggled[value:%x]\n", value));
+ return -EBUSY;
+ }
/* check the MPI-State for initialization upto 100ms*/
max_wait_count = 100 * 1000;/* 100 msec */
do {
@@ -1022,12 +1030,12 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
} while ((GST_MPI_STATE_INIT !=
(gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count));
if (!max_wait_count)
- return -1;
+ return -EBUSY;
/* check MPI Initialization error */
gst_len_mpistate = gst_len_mpistate >> 16;
if (0x0000 != gst_len_mpistate)
- return -1;
+ return -EBUSY;
return 0;
}
@@ -1469,11 +1477,10 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
/* update main config table ,inbound table and outbound table */
update_main_config_table(pm8001_ha);
- for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++)
+ for (i = 0; i < pm8001_ha->max_q_num; i++) {
update_inbnd_queue_table(pm8001_ha, i);
- for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++)
update_outbnd_queue_table(pm8001_ha, i);
-
+ }
/* notify firmware update finished and check initialization status */
if (0 == mpi_init_check(pm8001_ha)) {
PM8001_INIT_DBG(pm8001_ha,
@@ -4191,7 +4198,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
unsigned long flags;
u32 regval;
- if (vec == (pm8001_ha->number_of_intr - 1)) {
+ if (vec == (pm8001_ha->max_q_num - 1)) {
regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
SCRATCH_PAD_MIPSALL_READY) {
@@ -4274,6 +4281,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
char *preq_dma_addr = NULL;
__le64 tmp_addr;
u32 i, length;
+ unsigned long flags;
memset(&smp_cmd, 0, sizeof(smp_cmd));
/*
@@ -4369,8 +4377,10 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
+ spin_lock_irqsave(&circularQ->iq_lock, flags);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
sizeof(smp_cmd), 0);
+ spin_unlock_irqrestore(&circularQ->iq_lock, flags);
if (rc)
goto err_out_2;
return 0;
@@ -4434,7 +4444,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
u64 phys_addr, start_addr, end_addr;
u32 end_addr_high, end_addr_low;
struct inbound_queue_table *circularQ;
- u32 q_index;
+ unsigned long flags;
+ u32 q_index, cpu_id;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
@@ -4453,7 +4464,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
- q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+ cpu_id = smp_processor_id();
+ q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
/* Check if encryption is set */
@@ -4471,8 +4483,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
ccb->n_elem, ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
ssp_cmd.enc_addr_low =
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.enc_addr_high =
@@ -4501,9 +4512,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
end_addr_high, end_addr_low));
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info,
- buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
ssp_cmd.enc_addr_low =
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.enc_addr_high =
@@ -4531,8 +4540,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem,
ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
ssp_cmd.addr_low =
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.addr_high =
@@ -4560,9 +4568,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
end_addr_high, end_addr_low));
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info,
- buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
ssp_cmd.addr_low =
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.addr_high =
@@ -4576,9 +4582,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.esgl = 0;
}
}
- q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
+ spin_lock_irqsave(&circularQ->iq_lock, flags);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
&ssp_cmd, sizeof(ssp_cmd), q_index);
+ spin_unlock_irqrestore(&circularQ->iq_lock, flags);
return ret;
}
@@ -4590,7 +4597,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
u32 tag = ccb->ccb_tag;
int ret;
- u32 q_index;
+ u32 q_index, cpu_id;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
u64 phys_addr, start_addr, end_addr;
@@ -4601,7 +4608,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
- q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+ cpu_id = smp_processor_id();
+ q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
if (task->data_dir == DMA_NONE) {
@@ -4652,8 +4660,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
ccb->n_elem, ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
@@ -4678,9 +4685,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
end_addr_high, end_addr_low));
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info,
- buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
sata_cmd.enc_addr_low =
lower_32_bits(phys_addr);
sata_cmd.enc_addr_high =
@@ -4718,8 +4723,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
ccb->n_elem, ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info, buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
sata_cmd.addr_low = lower_32_bits(phys_addr);
sata_cmd.addr_high = upper_32_bits(phys_addr);
sata_cmd.esgl = cpu_to_le32(1 << 31);
@@ -4744,9 +4748,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
end_addr_high, end_addr_low));
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
- phys_addr = ccb->ccb_dma_handle +
- offsetof(struct pm8001_ccb_info,
- buf_prd[0]);
+ phys_addr = ccb->ccb_dma_handle;
sata_cmd.addr_low =
lower_32_bits(phys_addr);
sata_cmd.addr_high =
@@ -4817,9 +4819,10 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
}
- q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
+ spin_lock_irqsave(&circularQ->iq_lock, flags);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
&sata_cmd, sizeof(sata_cmd), q_index);
+ spin_unlock_irqrestore(&circularQ->iq_lock, flags);
return ret;
}
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index aa9ae2ae8579..cbe5fab793eb 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -2860,10 +2860,8 @@ static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
{
struct pmcraid_cmd *cancel_cmd;
struct pmcraid_instance *pinstance;
- struct pmcraid_resource_entry *res;
pinstance = (struct pmcraid_instance *)cmd->drv_inst;
- res = cmd->scsi_cmd->device->hostdata;
cancel_cmd = pmcraid_get_free_cmd(pinstance);
@@ -4716,7 +4714,6 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
return -ENOMEM;
}
- memset(pinstance->hrrq_start[i], 0, buffer_size);
pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
pinstance->hrrq_end[i] =
pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index e163be8af965..0e2cbb164eeb 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -389,6 +389,7 @@ struct qedf_ctx {
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
struct delayed_work recovery_work;
+ struct delayed_work board_disable_work;
struct delayed_work grcdump_work;
struct delayed_work stag_work;
@@ -541,9 +542,17 @@ extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
extern void qedf_wq_grcdump(struct work_struct *work);
void qedf_stag_change_work(struct work_struct *work);
void qedf_ctx_soft_reset(struct fc_lport *lport);
+extern void qedf_board_disable_work(struct work_struct *work);
+extern void qedf_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
#define FCOE_WORD_TO_BYTE 4
#define QEDF_MAX_TASK_NUM 0xFFFF
+#define QL45xxx 0x165C
+#define QL41xxx 0x8080
+#define MAX_CT_PAYLOAD 2048
+#define DISCOVERED_PORTS 4
+#define NUMBER_OF_PORTS 1
struct fip_vlan {
struct ethhdr eth;
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 542ba9454257..625e58ccb8c8 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -124,7 +124,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
task = qedf_get_task_mem(&qedf->tasks, xid);
qedf_init_mp_task(els_req, task, sqe);
- /* Put timer on original I/O request */
+ /* Put timer on els request */
if (timer_msec)
qedf_cmd_timer_set(qedf, els_req, timer_msec);
@@ -143,10 +143,33 @@ void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req)
{
struct fcoe_cqe_midpath_info *mp_info;
+ struct qedf_rport *fcport;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
" cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
+ if ((els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
+ || (els_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS)
+ || (els_req->event == QEDF_IOREQ_EV_CLEANUP_FAILED)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "ELS completion xid=0x%x after flush event=0x%x",
+ els_req->xid, els_req->event);
+ return;
+ }
+
+ fcport = els_req->fcport;
+
+ /* When flush is active,
+ * let the cmds be completed from the cleanup context
+ */
+ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
+ test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Dropping ELS completion xid=0x%x as fcport is flushing",
+ els_req->xid);
+ return;
+ }
+
clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
/* Kill the ELS timer */
@@ -185,10 +208,6 @@ static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
goto out_free;
}
- if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
- rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
- cancel_delayed_work_sync(&orig_io_req->timeout_work);
-
refcount = kref_read(&orig_io_req->refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
" orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
@@ -883,6 +902,11 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
opcode = fc_frame_payload_op(fp);
if (opcode == ELS_LS_RJT) {
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ if (!rjt) {
+ QEDF_ERR(&qedf->dbg_ctx, "payload get failed");
+ goto out_free_frame;
+ }
+
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Received LS_RJT for REC: er_reason=0x%x, "
"er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index acd9774a9387..4869ef813dc4 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -85,13 +85,13 @@ static void qedf_cmd_timeout(struct work_struct *work)
*/
QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
io_req->xid);
+ qedf_initiate_cleanup(io_req, true);
io_req->event = QEDF_IOREQ_EV_ELS_TMO;
/* Call callback function to complete command */
if (io_req->cb_func && io_req->cb_arg) {
io_req->cb_func(io_req->cb_arg);
io_req->cb_arg = NULL;
}
- qedf_initiate_cleanup(io_req, true);
kref_put(&io_req->refcount, qedf_release_cmd);
break;
case QEDF_SEQ_CLEANUP:
@@ -1562,6 +1562,8 @@ static void qedf_flush_els_req(struct qedf_ctx *qedf,
*/
els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
+ clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
+
/* Cancel the timer */
cancel_delayed_work_sync(&els_req->timeout_work);
@@ -1704,8 +1706,10 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
io_req, io_req->xid);
continue;
}
+ qedf_initiate_cleanup(io_req, false);
flush_cnt++;
qedf_flush_els_req(qedf, io_req);
+
/*
* Release the kref and go back to the top of the
* loop.
@@ -2159,7 +2163,6 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
/* Sanity check qedf_rport before dereferencing any pointers */
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "tgt not offloaded\n");
- rc = 1;
return SUCCESS;
}
@@ -2169,6 +2172,10 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
return SUCCESS;
}
+ if (io_req->cmd_type == QEDF_ELS) {
+ goto process_els;
+ }
+
if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
@@ -2178,6 +2185,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
}
set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
+process_els:
/* Ensure room on SQ */
if (!atomic_read(&fcport->free_sqes)) {
QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 5ca424df355c..46d185cb9ea8 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
"remote ports (default 60)");
uint qedf_debug = QEDF_LOG_INFO;
-module_param_named(debug, qedf_debug, uint, S_IRUGO);
+module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
" mask");
@@ -105,6 +105,12 @@ module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
"during probe (0-3: 0 more verbose).");
+static bool qedf_enable_recovery = true;
+module_param_named(enable_recovery, qedf_enable_recovery,
+ bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
+ "interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
+
struct workqueue_struct *qedf_io_wq;
static struct fcoe_percpu_s qedf_global;
@@ -690,6 +696,7 @@ static struct qed_fcoe_cb_ops qedf_cb_ops = {
.dcbx_aen = qedf_dcbx_handler,
.get_generic_tlv_data = qedf_get_generic_tlv_data,
.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
+ .schedule_hw_err_handler = qedf_schedule_hw_err_handler,
}
};
@@ -726,7 +733,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
rdata = fcport->rdata;
if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
- rc = 1;
+ rc = SUCCESS;
goto out;
}
@@ -1333,7 +1340,7 @@ static int qedf_offload_connection(struct qedf_ctx *qedf,
ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
- conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20;
+ conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
@@ -1558,6 +1565,17 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
if (port_id == FC_FID_DIR_SERV)
break;
+ if (rdata->spp_type != FC_TYPE_FCP) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "No action since spp type isn't FCP\n");
+ break;
+ }
+ if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Not FCP target so no action\n");
+ break;
+ }
+
if (!rport) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"port_id=%x - rport notcreated Yet!!\n", port_id);
@@ -1634,11 +1652,13 @@ static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
static void qedf_setup_fdmi(struct qedf_ctx *qedf)
{
struct fc_lport *lport = qedf->lport;
- struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
- u64 dsn;
+ u8 buf[8];
+ int pos;
+ uint32_t i;
/*
- * fdmi_enabled needs to be set for libfc to execute FDMI registration.
+ * fdmi_enabled needs to be set for libfc
+ * to execute FDMI registration
*/
lport->fdmi_enabled = 1;
@@ -1648,32 +1668,53 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
*/
/* Get the PCI-e Device Serial Number Capability */
- dsn = pci_get_dsn(qedf->pdev);
- if (dsn)
- snprintf(fc_host->serial_number,
- sizeof(fc_host->serial_number), "%016llX", dsn);
- else
- snprintf(fc_host->serial_number,
- sizeof(fc_host->serial_number), "Unknown");
+ pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
+ if (pos) {
+ pos += 4;
+ for (i = 0; i < 8; i++)
+ pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
+
+ snprintf(fc_host_serial_number(lport->host),
+ FC_SERIAL_NUMBER_SIZE,
+ "%02X%02X%02X%02X%02X%02X%02X%02X",
+ buf[7], buf[6], buf[5], buf[4],
+ buf[3], buf[2], buf[1], buf[0]);
+ } else
+ snprintf(fc_host_serial_number(lport->host),
+ FC_SERIAL_NUMBER_SIZE, "Unknown");
+
+ snprintf(fc_host_manufacturer(lport->host),
+ FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
+
+ if (qedf->pdev->device == QL45xxx) {
+ snprintf(fc_host_model(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
+
+ snprintf(fc_host_model_description(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s",
+ "Marvell FastLinQ QL45xxx FCoE Adapter");
+ }
- snprintf(fc_host->manufacturer,
- sizeof(fc_host->manufacturer), "%s", "Cavium Inc.");
+ if (qedf->pdev->device == QL41xxx) {
+ snprintf(fc_host_model(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
- snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000");
+ snprintf(fc_host_model_description(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s",
+ "Marvell FastLinQ QL41xxx FCoE Adapter");
+ }
- snprintf(fc_host->model_description, sizeof(fc_host->model_description),
- "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller"
- "(FCoE)");
+ snprintf(fc_host_hardware_version(lport->host),
+ FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
- snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version),
- "Rev %d", qedf->pdev->revision);
+ snprintf(fc_host_driver_version(lport->host),
+ FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
- snprintf(fc_host->driver_version, sizeof(fc_host->driver_version),
- "%s", QEDF_VERSION);
+ snprintf(fc_host_firmware_version(lport->host),
+ FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
+ FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
+ FW_ENGINEERING_VERSION);
- snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version),
- "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION,
- FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
}
static int qedf_lport_setup(struct qedf_ctx *qedf)
@@ -1720,8 +1761,13 @@ static int qedf_lport_setup(struct qedf_ctx *qedf)
fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
/* Set symbolic node name */
- snprintf(fc_host_symbolic_name(lport->host), 256,
- "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
+ if (qedf->pdev->device == QL45xxx)
+ snprintf(fc_host_symbolic_name(lport->host), 256,
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+ if (qedf->pdev->device == QL41xxx)
+ snprintf(fc_host_symbolic_name(lport->host), 256,
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
qedf_setup_fdmi(qedf);
@@ -3221,11 +3267,16 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
void *task_start, *task_end;
struct qed_slowpath_params slowpath_params;
struct qed_probe_params qed_params;
+ u16 retry_cnt = 10;
/*
* When doing error recovery we didn't reap the lport so don't try
* to reallocate it.
*/
+retry_probe:
+ if (mode == QEDF_MODE_RECOVERY)
+ msleep(2000);
+
if (mode != QEDF_MODE_RECOVERY) {
lport = libfc_host_alloc(&qedf_host_template,
sizeof(struct qedf_ctx));
@@ -3312,6 +3363,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qed_params.is_vf = is_vf;
qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
if (!qedf->cdev) {
+ if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Retry %d initialize hardware\n", retry_cnt);
+ retry_cnt--;
+ goto retry_probe;
+ }
QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
rc = -ENODEV;
goto err1;
@@ -3760,6 +3817,44 @@ void qedf_wq_grcdump(struct work_struct *work)
qedf_capture_grc_dump(qedf);
}
+void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
+{
+ struct qedf_ctx *qedf = dev;
+
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Hardware error handler scheduled, event=%d.\n",
+ err_type);
+
+ if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Already in recovery, not scheduling board disable work.\n");
+ return;
+ }
+
+ switch (err_type) {
+ case QED_HW_ERR_FAN_FAIL:
+ schedule_delayed_work(&qedf->board_disable_work, 0);
+ break;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_DMAE_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ /* Prevent HW attentions from being reasserted */
+ qed_ops->common->attn_clr_enable(qedf->cdev, true);
+ break;
+ case QED_HW_ERR_RAMROD_FAIL:
+ /* Prevent HW attentions from being reasserted */
+ qed_ops->common->attn_clr_enable(qedf->cdev, true);
+
+ if (qedf_enable_recovery)
+ qed_ops->common->recovery_process(qedf->cdev);
+
+ break;
+ default:
+ break;
+ }
+}
+
/*
* Protocol TLV handler
*/
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 9498279ae80d..c342defc3f52 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -274,6 +274,10 @@ struct qedi_ctx {
spinlock_t ll2_lock; /* Light L2 lock */
spinlock_t hba_lock; /* per port lock */
struct task_struct *ll2_recv_thread;
+ unsigned long qedi_err_flags;
+#define QEDI_ERR_ATTN_CLR_EN 0
+#define QEDI_ERR_IS_RECOVERABLE 2
+#define QEDI_ERR_OVERRIDE_EN 31
unsigned long flags;
#define UIO_DEV_OPENED 1
#define QEDI_IOTHREAD_WAKE 2
@@ -305,6 +309,7 @@ struct qedi_ctx {
u32 max_sqes;
u8 num_queues;
u32 max_active_conns;
+ s32 msix_count;
struct iscsi_cid_queue cid_que;
struct qedi_endpoint **ep_tbl;
@@ -334,6 +339,7 @@ struct qedi_ctx {
struct workqueue_struct *dpc_wq;
struct delayed_work recovery_work;
+ struct delayed_work board_disable_work;
spinlock_t task_idx_lock; /* To protect gbl context */
s32 last_tidx_alloc;
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 6ed74583b1b9..440ddd2309f1 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
"Freeing tid=0x%x for cid=0x%x\n",
cmd->task_id, qedi_conn->iscsi_conn_id);
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
@@ -69,6 +70,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
cmd->task_id, qedi_conn->iscsi_conn_id,
&cmd->io_cmd);
}
+ spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
qedi_clear_task_idx(qedi, cmd->task_id);
@@ -122,6 +124,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
"Freeing tid=0x%x for cid=0x%x\n",
cmd->task_id, qedi_conn->iscsi_conn_id);
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
@@ -132,6 +135,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
cmd->task_id, qedi_conn->iscsi_conn_id,
&cmd->io_cmd);
}
+ spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
qedi_clear_task_idx(qedi, cmd->task_id);
@@ -222,11 +226,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+ spin_lock(&qedi_conn->list_lock);
if (likely(qedi_cmd->io_cmd_in_list)) {
qedi_cmd->io_cmd_in_list = false;
list_del_init(&qedi_cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
@@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
memset(task_ctx, '\0', sizeof(*task_ctx));
@@ -816,8 +824,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_clear_task_idx(qedi_conn->qedi, rtid);
spin_lock(&qedi_conn->list_lock);
- list_del_init(&dbg_cmd->io_cmd);
- qedi_conn->active_cmd_count--;
+ if (likely(dbg_cmd->io_cmd_in_list)) {
+ dbg_cmd->io_cmd_in_list = false;
+ list_del_init(&dbg_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
spin_unlock(&qedi_conn->list_lock);
qedi_cmd->state = CLEANUP_RECV;
wake_up_interruptible(&qedi_conn->wait_queue);
@@ -1235,6 +1246,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
qedi_conn->cmd_cleanup_req++;
qedi_iscsi_cleanup_task(ctask, true);
+ cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
QEDI_WARN(&qedi->dbg_ctx,
@@ -1255,7 +1267,8 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
((qedi_conn->cmd_cleanup_req ==
qedi_conn->cmd_cleanup_cmpl) ||
- qedi_conn->ep),
+ test_bit(QEDI_IN_RECOVERY,
+ &qedi->flags)),
5 * HZ);
if (rval) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
@@ -1280,7 +1293,9 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
/* Enable IOs for all other sessions except current.*/
if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
(qedi_conn->cmd_cleanup_req ==
- qedi_conn->cmd_cleanup_cmpl),
+ qedi_conn->cmd_cleanup_cmpl) ||
+ test_bit(QEDI_IN_RECOVERY,
+ &qedi->flags),
5 * HZ)) {
iscsi_host_for_each_session(qedi->shost,
qedi_mark_device_available);
@@ -1446,8 +1461,11 @@ ldel_exit:
spin_unlock_bh(&qedi_conn->tmf_work_lock);
spin_lock(&qedi_conn->list_lock);
- list_del_init(&cmd->io_cmd);
- qedi_conn->active_cmd_count--;
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
spin_unlock(&qedi_conn->list_lock);
clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index c14ac7882afa..08c05403cd72 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -975,11 +975,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
{
struct qedi_cmd *cmd, *cmd_tmp;
+ spin_lock(&qedi_conn->list_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
io_cmd) {
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
}
static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
@@ -1069,6 +1071,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
qedi_ep->state = EP_STATE_DISCONN_START;
+
+ if (test_bit(QEDI_IN_SHUTDOWN, &qedi->flags) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags))
+ goto ep_release_conn;
+
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
if (ret) {
QEDI_WARN(&qedi->dbg_ctx,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 6f038ae5efca..61fab01d2d52 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -50,6 +50,10 @@ module_param(qedi_ll2_buf_size, uint, 0644);
MODULE_PARM_DESC(qedi_ll2_buf_size,
"parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
+static uint qedi_flags_override;
+module_param(qedi_flags_override, uint, 0644);
+MODULE_PARM_DESC(qedi_flags_override, "Disable/Enable MFW error flags bits action.");
+
const struct qed_iscsi_ops *qedi_ops;
static struct scsi_transport_template *qedi_scsi_transport;
static struct pci_driver qedi_pci_driver;
@@ -63,6 +67,8 @@ static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
static void qedi_recovery_handler(struct work_struct *work);
+static void qedi_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
{
@@ -789,8 +795,7 @@ static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
spin_lock_bh(&qedi->ll2_lock);
list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
list_del(&work->list);
- if (work->skb)
- kfree_skb(work->skb);
+ kfree_skb(work->skb);
kfree(work);
}
spin_unlock_bh(&qedi->ll2_lock);
@@ -1113,6 +1118,42 @@ exit_get_data:
return;
}
+void qedi_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
+ unsigned long override_flags = qedi_flags_override;
+
+ if (override_flags && test_bit(QEDI_ERR_OVERRIDE_EN, &override_flags))
+ qedi->qedi_err_flags = qedi_flags_override;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "HW error handler scheduled, err=%d err_flags=0x%x\n",
+ err_type, qedi->qedi_err_flags);
+
+ switch (err_type) {
+ case QED_HW_ERR_FAN_FAIL:
+ schedule_delayed_work(&qedi->board_disable_work, 0);
+ break;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_DMAE_FAIL:
+ case QED_HW_ERR_RAMROD_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ /* Prevent HW attentions from being reasserted */
+ if (test_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags))
+ qedi_ops->common->attn_clr_enable(qedi->cdev, true);
+
+ if (err_type == QED_HW_ERR_RAMROD_FAIL &&
+ test_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags))
+ qedi_ops->common->recovery_process(qedi->cdev);
+
+ break;
+ default:
+ break;
+ }
+}
+
static void qedi_schedule_recovery_handler(void *dev)
{
struct qedi_ctx *qedi = dev;
@@ -1127,6 +1168,15 @@ static void qedi_schedule_recovery_handler(void *dev)
schedule_delayed_work(&qedi->recovery_work, 0);
}
+static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct iscsi_conn *conn = session->leadconn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
+
static void qedi_link_update(void *dev, struct qed_link_output *link)
{
struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
@@ -1138,6 +1188,7 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Link Down event.\n");
atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+ iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery);
}
}
@@ -1145,6 +1196,7 @@ static struct qed_iscsi_cb_ops qedi_cb_ops = {
{
.link_update = qedi_link_update,
.schedule_recovery_handler = qedi_schedule_recovery_handler,
+ .schedule_hw_err_handler = qedi_schedule_hw_err_handler,
.get_protocol_tlv_data = qedi_get_protocol_tlv_data,
.get_generic_tlv_data = qedi_get_generic_tlv_data,
}
@@ -1357,7 +1409,7 @@ static int qedi_request_msix_irq(struct qedi_ctx *qedi)
u16 idx;
cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < qedi->int_info.msix_cnt; i++) {
+ for (i = 0; i < qedi->msix_count; i++) {
idx = i * qedi->dev_info.common.num_hwfns +
qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
@@ -1387,7 +1439,12 @@ static int qedi_setup_int(struct qedi_ctx *qedi)
{
int rc = 0;
- rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
+ rc = qedi_ops->common->set_fp_int(qedi->cdev, qedi->num_queues);
+ if (rc < 0)
+ goto exit_setup_int;
+
+ qedi->msix_count = rc;
+
rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
if (rc)
goto exit_setup_int;
@@ -2336,10 +2393,30 @@ kset_free:
return -ENOMEM;
}
+static pci_ers_result_t qedi_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+ QEDI_ERR(&qedi->dbg_ctx, "%s: PCI error detected [%d]\n",
+ __func__, state);
+
+ if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Recovery already in progress.\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ qedi_ops->common->recovery_process(qedi->cdev);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
static void __qedi_remove(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
int rval;
+ u16 retry = 10;
if (mode == QEDI_MODE_SHUTDOWN)
iscsi_host_for_each_session(qedi->shost,
@@ -2368,7 +2445,13 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_sync_free_irqs(qedi);
if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
- qedi_ops->stop(qedi->cdev);
+ while (retry--) {
+ rval = qedi_ops->stop(qedi->cdev);
+ if (rval < 0)
+ msleep(1000);
+ else
+ break;
+ }
qedi_ops->ll2->stop(qedi->cdev);
}
@@ -2405,6 +2488,21 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
}
}
+static void qedi_board_disable_work(struct work_struct *work)
+{
+ struct qedi_ctx *qedi =
+ container_of(work, struct qedi_ctx,
+ board_disable_work.work);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Fan failure, Unloading firmware context.\n");
+
+ if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
+ return;
+
+ __qedi_remove(qedi->pdev, QEDI_MODE_SHUTDOWN);
+}
+
static void qedi_shutdown(struct pci_dev *pdev)
{
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
@@ -2427,6 +2525,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
struct qed_probe_params qed_params;
void *task_start, *task_end;
int rc;
+ u16 retry = 10;
if (mode != QEDI_MODE_RECOVERY) {
qedi = qedi_host_alloc(pdev);
@@ -2438,6 +2537,10 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
qedi = pci_get_drvdata(pdev);
}
+retry_probe:
+ if (mode == QEDI_MODE_RECOVERY)
+ msleep(2000);
+
memset(&qed_params, 0, sizeof(qed_params));
qed_params.protocol = QED_PROTOCOL_ISCSI;
qed_params.dp_module = qedi_qed_debug;
@@ -2445,11 +2548,20 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
qed_params.is_vf = is_vf;
qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
if (!qedi->cdev) {
+ if (mode == QEDI_MODE_RECOVERY && retry) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Retry %d initialize hardware\n", retry);
+ retry--;
+ goto retry_probe;
+ }
+
rc = -ENODEV;
QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
goto free_host;
}
+ set_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags);
+ set_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags);
atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
@@ -2533,7 +2645,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
qedi->mac);
- sprintf(host_buf, "host_%d", qedi->shost->host_no);
+ snprintf(host_buf, sizeof(host_buf), "host_%d", qedi->shost->host_no);
qedi_ops->common->set_name(qedi->cdev, host_buf);
qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
@@ -2658,6 +2770,8 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
}
INIT_DELAYED_WORK(&qedi->recovery_work, qedi_recovery_handler);
+ INIT_DELAYED_WORK(&qedi->board_disable_work,
+ qedi_board_disable_work);
/* F/w needs 1st task context memory entry for performance */
set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
@@ -2744,12 +2858,17 @@ MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
static enum cpuhp_state qedi_cpuhp_state;
+static struct pci_error_handlers qedi_err_handler = {
+ .error_detected = qedi_io_error_detected,
+};
+
static struct pci_driver qedi_pci_driver = {
.name = QEDI_MODULE_NAME,
.id_table = qedi_pci_tbl,
.probe = qedi_probe,
.remove = qedi_remove,
.shutdown = qedi_shutdown,
+ .err_handler = &qedi_err_handler,
};
static int __init qedi_init(void)
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 441a45349349..545936cb3980 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1241,7 +1241,7 @@ qla1280_done(struct scsi_qla_host *ha)
{
struct srb *sp;
struct list_head *done_q;
- int bus, target, lun;
+ int bus, target;
struct scsi_cmnd *cmd;
ENTER("qla1280_done");
@@ -1256,7 +1256,6 @@ qla1280_done(struct scsi_qla_host *ha)
cmd = sp->cmd;
bus = SCSI_BUS_32(cmd);
target = SCSI_TCN_32(cmd);
- lun = SCSI_LUN_32(cmd);
switch ((CMD_RESULT(cmd) >> 16)) {
case DID_RESET:
@@ -2185,13 +2184,12 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
nv->cntr_flags_1.disable_loading_risc_code;
if (IS_ISP1040(ha)) {
- uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
+ uint16_t hwrev, cfg1, cdma_conf;
hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
- ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
/* Busted fifo, says mjacob. */
if (hwrev != ISP_CFG0_1040A)
@@ -2427,7 +2425,6 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
int cnt;
uint16_t *optr, *iptr;
uint16_t __iomem *mptr;
- uint16_t data;
DECLARE_COMPLETION_ONSTACK(wait);
ENTER("qla1280_mailbox_command");
@@ -2462,7 +2459,7 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
spin_unlock_irq(ha->host->host_lock);
WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT);
- data = qla1280_debounce_register(&reg->istatus);
+ qla1280_debounce_register(&reg->istatus);
wait_for_completion(&wait);
del_timer_sync(&ha->mailbox_timer);
@@ -3604,7 +3601,6 @@ static void
qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
struct list_head *done_q)
{
- unsigned int bus, target, lun;
int sense_sz;
struct srb *sp;
struct scsi_cmnd *cmd;
@@ -3630,11 +3626,6 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
cmd = sp->cmd;
- /* Generate LU queue on cntrl, target, LUN */
- bus = SCSI_BUS_32(cmd);
- target = SCSI_TCN_32(cmd);
- lun = SCSI_LUN_32(cmd);
-
if (comp_status || scsi_status) {
dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
"0x%x, handle = 0x%x\n", comp_status,
@@ -3673,7 +3664,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
dprintk(2, "qla1280_status_entry: Check "
"condition Sense data, b %i, t %i, "
- "l %i\n", bus, target, lun);
+ "l %i\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
+ SCSI_LUN_32(cmd));
if (sense_sz)
qla1280_dump_buffer(2,
(char *)cmd->sense_buffer,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5d93ccc73153..ab45ac1e5a72 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
@@ -157,6 +156,14 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
vha->host_no);
}
break;
+ case 10:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ql_log(ql_log_info, vha, 0x70e9,
+ "Issuing MPI firmware dump on host#%ld.\n",
+ vha->host_no);
+ ha->isp_ops->mpi_fw_dump(vha, 0);
+ }
+ break;
}
return count;
}
@@ -744,8 +751,6 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
qla83xx_idc_unlock(vha, 0);
break;
- } else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- qla27xx_reset_mpi(vha);
} else {
/* Make sure FC side is not in reset */
WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
@@ -2726,6 +2731,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
struct link_statistics *stats;
dma_addr_t stats_dma;
struct fc_host_statistics *p = &vha->fc_host_stat;
+ struct qla_qpair *qpair;
+ int i;
+ u64 ib = 0, ob = 0, ir = 0, or = 0;
memset(p, -1, sizeof(*p));
@@ -2762,6 +2770,27 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (rval != QLA_SUCCESS)
goto done_free;
+ /* --- */
+ for (i = 0; i < vha->hw->max_qpairs; i++) {
+ qpair = vha->hw->queue_pair_map[i];
+ if (!qpair)
+ continue;
+ ir += qpair->counters.input_requests;
+ or += qpair->counters.output_requests;
+ ib += qpair->counters.input_bytes;
+ ob += qpair->counters.output_bytes;
+ }
+ ir += ha->base_qpair->counters.input_requests;
+ or += ha->base_qpair->counters.output_requests;
+ ib += ha->base_qpair->counters.input_bytes;
+ ob += ha->base_qpair->counters.output_bytes;
+
+ ir += vha->qla_stats.input_requests;
+ or += vha->qla_stats.output_requests;
+ ib += vha->qla_stats.input_bytes;
+ ob += vha->qla_stats.output_bytes;
+ /* --- */
+
p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
@@ -2781,15 +2810,16 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
} else {
- p->rx_words = vha->qla_stats.input_bytes;
- p->tx_words = vha->qla_stats.output_bytes;
+ p->rx_words = ib >> 2;
+ p->tx_words = ob >> 2;
}
}
+
p->fcp_control_requests = vha->qla_stats.control_requests;
- p->fcp_input_requests = vha->qla_stats.input_requests;
- p->fcp_output_requests = vha->qla_stats.output_requests;
- p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
- p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
+ p->fcp_input_requests = ir;
+ p->fcp_output_requests = or;
+ p->fcp_input_megabytes = ib >> 20;
+ p->fcp_output_megabytes = ob >> 20;
p->seconds_since_last_reset =
get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
do_div(p->seconds_since_last_reset, HZ);
@@ -2809,9 +2839,18 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
struct link_statistics *stats;
dma_addr_t stats_dma;
+ int i;
+ struct qla_qpair *qpair;
memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
+ for (i = 0; i < vha->hw->max_qpairs; i++) {
+ qpair = vha->hw->queue_pair_map[i];
+ if (!qpair)
+ continue;
+ memset(&qpair->counters, 0, sizeof(qpair->counters));
+ }
+ memset(&ha->base_qpair->counters, 0, sizeof(qpair->counters));
vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
@@ -3214,46 +3253,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
- if (IS_CNA_CAPABLE(ha))
- speeds = FC_PORTSPEED_10GBIT;
- else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
- if (ha->max_supported_speed == 2) {
- if (ha->min_supported_speed <= 6)
- speeds |= FC_PORTSPEED_64GBIT;
- }
- if (ha->max_supported_speed == 2 ||
- ha->max_supported_speed == 1) {
- if (ha->min_supported_speed <= 5)
- speeds |= FC_PORTSPEED_32GBIT;
- }
- if (ha->max_supported_speed == 2 ||
- ha->max_supported_speed == 1 ||
- ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 4)
- speeds |= FC_PORTSPEED_16GBIT;
- }
- if (ha->max_supported_speed == 1 ||
- ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 3)
- speeds |= FC_PORTSPEED_8GBIT;
- }
- if (ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 2)
- speeds |= FC_PORTSPEED_4GBIT;
- }
- } else if (IS_QLA2031(ha))
- speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
- FC_PORTSPEED_4GBIT;
- else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
- speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
- FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
- else if (IS_QLA24XX_TYPE(ha))
- speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
- FC_PORTSPEED_1GBIT;
- else if (IS_QLA23XX(ha))
- speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
- else
- speeds = FC_PORTSPEED_1GBIT;
+ speeds = qla25xx_fdmi_port_speed_capability(ha);
fc_host_supported_speeds(vha->host) = speeds;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 67efde1d4b8e..23b604832a54 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 7594fad7b5b5..1a09b5512267 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_BSG_H
#define __QLA_BSG_H
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 1be811a5d69d..bb7431912d41 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
/*
@@ -16,7 +15,7 @@
* | Device Discovery | 0x2134 | 0x210e-0x2116 |
* | | | 0x211a |
* | | | 0x211c-0x2128 |
- * | | | 0x212a-0x2134 |
+ * | | | 0x212c-0x2134 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
@@ -2449,7 +2448,7 @@ static void ql_dbg_prefix(char *pbuf, int pbuf_size,
const struct pci_dev *pdev = vha->hw->pdev;
/* <module-name> [<dev-name>]-<msg-id>:<host>: */
- snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
+ snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR,
dev_name(&(pdev->dev)), msg_id, vha->host_no);
} else {
/* <module-name> [<dev-name>]-<msg-id>: : */
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index e1d7de63e8f8..2e59e75c62b5 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a165120d2976..4f0486fe30dd 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_DEF_H
#define __QLA_DEF_H
@@ -624,6 +623,12 @@ enum {
TYPE_TGT_TMCMD, /* task management */
};
+struct iocb_resource {
+ u8 res_type;
+ u8 pad;
+ u16 iocb_cnt;
+};
+
typedef struct srb {
/*
* Do not move cmd_type field, it needs to
@@ -631,6 +636,7 @@ typedef struct srb {
*/
uint8_t cmd_type;
uint8_t pad[3];
+ struct iocb_resource iores;
struct kref cmd_kref; /* need to migrate ref_count over to this */
void *priv;
wait_queue_head_t nvme_ls_waitq;
@@ -2443,12 +2449,6 @@ typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
- uint8_t node_name[WWN_SIZE];
- uint8_t port_name[WWN_SIZE];
- port_id_t d_id;
- uint16_t loop_id;
- uint16_t old_loop_id;
-
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
unsigned int free_pending:1;
@@ -2465,15 +2465,24 @@ typedef struct fc_port {
unsigned int n2n_flag:1;
unsigned int explicit_logout:1;
unsigned int prli_pend_timer:1;
+ uint8_t nvme_flag;
+
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ port_id_t d_id;
+ uint16_t loop_id;
+ uint16_t old_loop_id;
struct completion nvme_del_done;
uint32_t nvme_prli_service_param;
+#define NVME_PRLI_SP_PI_CTRL BIT_9
+#define NVME_PRLI_SP_SLER BIT_8
#define NVME_PRLI_SP_CONF BIT_7
#define NVME_PRLI_SP_INITIATOR BIT_5
#define NVME_PRLI_SP_TARGET BIT_4
#define NVME_PRLI_SP_DISCOVERY BIT_3
#define NVME_PRLI_SP_FIRST_BURST BIT_0
- uint8_t nvme_flag;
+
uint32_t nvme_first_burst_size;
#define NVME_FLAG_REGISTERED 4
#define NVME_FLAG_DELETING 2
@@ -2544,6 +2553,8 @@ typedef struct fc_port {
u8 last_login_state;
u16 n2n_link_reset_cnt;
u16 n2n_chip_reset;
+
+ struct dentry *dfs_rport_dir;
} fc_port_t;
enum {
@@ -3508,6 +3519,14 @@ struct qla_tgt_counters {
uint64_t num_term_xchg_sent;
};
+struct qla_counters {
+ uint64_t input_bytes;
+ uint64_t input_requests;
+ uint64_t output_bytes;
+ uint64_t output_requests;
+
+};
+
struct qla_qpair;
/* Response queue data structure */
@@ -3566,6 +3585,15 @@ struct req_que {
uint8_t req_pkt[REQUEST_ENTRY_SIZE];
};
+struct qla_fw_resources {
+ u16 iocbs_total;
+ u16 iocbs_limit;
+ u16 iocbs_qp_limit;
+ u16 iocbs_used;
+};
+
+#define QLA_IOCB_PCT_LIMIT 95
+
/*Queue pair data structure */
struct qla_qpair {
spinlock_t qp_lock;
@@ -3592,6 +3620,7 @@ struct qla_qpair {
uint32_t enable_class_2:1;
uint32_t enable_explicit_conf:1;
uint32_t use_shadow_reg:1;
+ uint32_t rcv_intr:1;
uint16_t id; /* qp number used with FW */
uint16_t vp_idx; /* vport ID */
@@ -3607,13 +3636,17 @@ struct qla_qpair {
struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
struct qla_hw_data *hw;
struct work_struct q_work;
+ struct qla_counters counters;
+
struct list_head qp_list_elem; /* vha->qp_list */
struct list_head hints_list;
- uint16_t cpuid;
+
uint16_t retry_term_cnt;
__le32 retry_term_exchg_addr;
uint64_t retry_term_jiff;
struct qla_tgt_counters tgt_counters;
+ uint16_t cpuid;
+ struct qla_fw_resources fwres ____cacheline_aligned;
};
/* Place holder for FW buffer parameters */
@@ -3881,6 +3914,7 @@ struct qla_hw_data {
/* Enabled in Driver */
uint32_t scm_enabled:1;
uint32_t max_req_queue_warned:1;
+ uint32_t plogi_template_valid:1;
} flags;
uint16_t max_exchg;
@@ -4127,6 +4161,10 @@ struct qla_hw_data {
#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_ZIO_THRESHOLD_CAPABLE(ha) \
+ ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&\
+ (ha->zio_mode == QLA_ZIO_MODE_6))
+
/* HBA serial number */
uint8_t serial0;
uint8_t serial1;
@@ -4214,7 +4252,7 @@ struct qla_hw_data {
/* Extended Logins */
void *exlogin_buf;
dma_addr_t exlogin_buf_dma;
- int exlogin_size;
+ uint32_t exlogin_size;
#define ENABLE_EXCHANGE_OFFLD BIT_2
@@ -4225,7 +4263,8 @@ struct qla_hw_data {
int exchoffld_count;
/* n2n */
- struct els_plogi_payload plogi_els_payld;
+ struct fc_els_flogi plogi_els_payld;
+#define LOGIN_TEMPLATE_SIZE (sizeof(struct fc_els_flogi) - 4)
void *swl;
@@ -4273,6 +4312,7 @@ struct qla_hw_data {
#define FW_ATTR_EXT0_SCM_BROCADE 0x00001000
/* Cisco fabric attached */
#define FW_ATTR_EXT0_SCM_CISCO 0x00002000
+#define FW_ATTR_EXT0_NVME2 BIT_13
uint16_t fw_attributes_ext[2];
uint32_t fw_memory_size;
uint32_t fw_transfer_size;
@@ -4622,6 +4662,7 @@ typedef struct scsi_qla_host {
uint32_t qpairs_rsp_created:1;
uint32_t nvme_enabled:1;
uint32_t nvme_first_burst:1;
+ uint32_t nvme2_enabled:1;
} flags;
atomic_t loop_state;
@@ -4780,6 +4821,8 @@ typedef struct scsi_qla_host {
uint16_t ql2xexchoffld;
uint16_t ql2xiniexchg;
+ struct dentry *dfs_rport_root;
+
struct purex_list {
struct list_head head;
spinlock_t lock;
@@ -5103,6 +5146,8 @@ struct sff_8247_a0 {
ha->current_topology == ISP_CFG_N || \
!ha->current_topology)
+#define QLA_N2N_WAIT_TIME 5 /* 2 * ra_tov(n2n) + 1 */
+
#define NVME_TYPE(fcport) \
(fcport->fc4_type & FS_FC4TYPE_NVME) \
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index e62b2115235e..d5ebcf7d70ff 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
@@ -12,6 +11,140 @@
static struct dentry *qla2x00_dfs_root;
static atomic_t qla2x00_dfs_root_count;
+#define QLA_DFS_RPORT_DEVLOSS_TMO 1
+
+static int
+qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
+{
+ switch (attr_id) {
+ case QLA_DFS_RPORT_DEVLOSS_TMO:
+ /* Only supported for FC-NVMe devices that are registered. */
+ if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
+ return -EIO;
+ *val = fp->nvme_remote_port->dev_loss_tmo;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
+{
+ switch (attr_id) {
+ case QLA_DFS_RPORT_DEVLOSS_TMO:
+ /* Only supported for FC-NVMe devices that are registered. */
+ if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
+ return -EIO;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
+ val);
+#else /* CONFIG_NVME_FC */
+ return -EINVAL;
+#endif /* CONFIG_NVME_FC */
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \
+static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \
+{ \
+ struct fc_port *fp = data; \
+ return qla_dfs_rport_get(fp, _attr_id, val); \
+} \
+static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \
+{ \
+ struct fc_port *fp = data; \
+ return qla_dfs_rport_set(fp, _attr_id, val); \
+} \
+DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \
+ qla_dfs_rport_##_attr##_get, \
+ qla_dfs_rport_##_attr##_set, "%llu\n")
+
+/*
+ * Wrapper for getting fc_port fields.
+ *
+ * _attr : Attribute name.
+ * _get_val : Accessor macro to retrieve the value.
+ */
+#define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) \
+static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \
+{ \
+ struct fc_port *fp = data; \
+ *val = _get_val; \
+ return 0; \
+} \
+DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \
+ qla_dfs_rport_field_##_attr##_get, \
+ NULL, "%llu\n")
+
+#define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
+ DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
+
+#define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
+ DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
+
+DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
+
+DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
+DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
+DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
+DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
+DEFINE_QLA_DFS_RPORT_FIELD(flags);
+DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
+DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
+DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
+DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
+DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
+DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
+DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
+
+void
+qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
+{
+ char wwn[32];
+
+#define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \
+ debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \
+ fp, &qla_dfs_rport_field_##_attr##_fops)
+
+ if (!vha->dfs_rport_root || fp->dfs_rport_dir)
+ return;
+
+ sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
+ fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
+ if (!fp->dfs_rport_dir)
+ return;
+ if (NVME_TARGET(vha->hw, fp))
+ debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
+ fp, &qla_dfs_rport_dev_loss_tmo_fops);
+
+ QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
+ QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
+ QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
+ QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
+ QLA_CREATE_RPORT_FIELD_ATTR(flags);
+ QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
+ QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
+ QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
+ QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
+ QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
+ QLA_CREATE_RPORT_FIELD_ATTR(port_id);
+ QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
+}
+
+void
+qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
+{
+ if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
+ return;
+ debugfs_remove_recursive(fp->dfs_rport_dir);
+ fp->dfs_rport_dir = NULL;
+}
+
static int
qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
{
@@ -37,89 +170,63 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
return 0;
}
-static int
-qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
-{
- scsi_qla_host_t *vha = inode->i_private;
-
- return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
-}
-
-static const struct file_operations dfs_tgt_sess_ops = {
- .open = qla2x00_dfs_tgt_sess_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess);
static int
qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
{
scsi_qla_host_t *vha = s->private;
struct qla_hw_data *ha = vha->hw;
- struct gid_list_info *gid_list, *gid;
+ struct gid_list_info *gid_list;
dma_addr_t gid_list_dma;
fc_port_t fc_port;
+ char *id_iter;
int rc, i;
uint16_t entries, loop_id;
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
seq_printf(s, "%s\n", vha->host_str);
- if (tgt) {
- gid_list = dma_alloc_coherent(&ha->pdev->dev,
- qla2x00_gid_list_size(ha),
- &gid_list_dma, GFP_KERNEL);
- if (!gid_list) {
- ql_dbg(ql_dbg_user, vha, 0x7018,
- "DMA allocation failed for %u\n",
- qla2x00_gid_list_size(ha));
- return 0;
- }
+ gid_list = dma_alloc_coherent(&ha->pdev->dev,
+ qla2x00_gid_list_size(ha),
+ &gid_list_dma, GFP_KERNEL);
+ if (!gid_list) {
+ ql_dbg(ql_dbg_user, vha, 0x7018,
+ "DMA allocation failed for %u\n",
+ qla2x00_gid_list_size(ha));
+ return 0;
+ }
- rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
- &entries);
- if (rc != QLA_SUCCESS)
- goto out_free_id_list;
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
+ &entries);
+ if (rc != QLA_SUCCESS)
+ goto out_free_id_list;
- gid = gid_list;
+ id_iter = (char *)gid_list;
- seq_puts(s, "Port Name Port ID Loop ID\n");
+ seq_puts(s, "Port Name Port ID Loop ID\n");
- for (i = 0; i < entries; i++) {
- loop_id = le16_to_cpu(gid->loop_id);
- memset(&fc_port, 0, sizeof(fc_port_t));
+ for (i = 0; i < entries; i++) {
+ struct gid_list_info *gid =
+ (struct gid_list_info *)id_iter;
+ loop_id = le16_to_cpu(gid->loop_id);
+ memset(&fc_port, 0, sizeof(fc_port_t));
- fc_port.loop_id = loop_id;
+ fc_port.loop_id = loop_id;
- rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
- seq_printf(s, "%8phC %02x%02x%02x %d\n",
- fc_port.port_name, fc_port.d_id.b.domain,
- fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
- fc_port.loop_id);
- gid = (void *)gid + ha->gid_list_info_size;
- }
-out_free_id_list:
- dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
- gid_list, gid_list_dma);
+ rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
+ seq_printf(s, "%8phC %02x%02x%02x %d\n",
+ fc_port.port_name, fc_port.d_id.b.domain,
+ fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
+ fc_port.loop_id);
+ id_iter += ha->gid_list_info_size;
}
+out_free_id_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ gid_list, gid_list_dma);
return 0;
}
-static int
-qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
-{
- scsi_qla_host_t *vha = inode->i_private;
-
- return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
-}
-
-static const struct file_operations dfs_tgt_port_database_ops = {
- .open = qla2x00_dfs_tgt_port_database_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database);
static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
@@ -127,6 +234,8 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
struct scsi_qla_host *vha = s->private;
uint16_t mb[MAX_IOCB_MB_REG];
int rc;
+ struct qla_hw_data *ha = vha->hw;
+ u16 iocbs_used, i;
rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
if (rc != QLA_SUCCESS) {
@@ -151,23 +260,22 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
mb[23]);
}
- return 0;
-}
+ if (ql2xenforce_iocb_limit) {
+ /* lock is not require. It's an estimate. */
+ iocbs_used = ha->base_qpair->fwres.iocbs_used;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
+ }
-static int
-qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
-{
- struct scsi_qla_host *vha = inode->i_private;
+ seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
+ iocbs_used, ha->base_qpair->fwres.iocbs_limit);
+ }
- return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
+ return 0;
}
-static const struct file_operations dfs_fw_resource_cnt_ops = {
- .open = qla_dfs_fw_resource_cnt_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt);
static int
qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
@@ -244,20 +352,7 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
return 0;
}
-static int
-qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
-{
- struct scsi_qla_host *vha = inode->i_private;
-
- return single_open(file, qla_dfs_tgt_counters_show, vha);
-}
-
-static const struct file_operations dfs_tgt_counters_ops = {
- .open = qla_dfs_tgt_counters_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters);
static int
qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
@@ -459,23 +554,35 @@ create_dir:
create_nodes:
ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
- S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
+ S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops);
ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
- ha->dfs_dir, vha, &dfs_tgt_counters_ops);
+ ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops);
ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
- S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
+ S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops);
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
- S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
+ S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops);
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
0400, ha->dfs_dir, vha, &dfs_naqp_ops);
+ if (!ha->tgt.dfs_naqp) {
+ ql_log(ql_log_warn, vha, 0xd011,
+ "Unable to create debugFS naqp node.\n");
+ goto out;
+ }
+ }
+ vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
+ if (!vha->dfs_rport_root) {
+ ql_log(ql_log_warn, vha, 0xd012,
+ "Unable to create debugFS rports node.\n");
+ goto out;
+ }
out:
return 0;
}
@@ -515,6 +622,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
ha->dfs_fce = NULL;
}
+ if (vha->dfs_rport_root) {
+ debugfs_remove_recursive(vha->dfs_rport_root);
+ vha->dfs_rport_root = NULL;
+ }
+
if (ha->dfs_dir) {
debugfs_remove(ha->dfs_dir);
ha->dfs_dir = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index bba1b77fba7e..12b689e32883 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_FW_H
#define __QLA_FW_H
@@ -619,7 +618,7 @@ struct sts_entry_24xx {
#define SF_NVME_ERSP BIT_6
#define SF_FCP_RSP_DMA BIT_0
- __le16 retry_delay;
+ __le16 status_qualifier;
__le16 scsi_status; /* SCSI status. */
#define SS_CONFIRMATION_REQ BIT_12
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0ced18f3104e..e39b4f2da73a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_GBL_H
#define __QLA_GBL_H
@@ -129,6 +128,8 @@ int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
void qla_rscn_replay(fc_port_t *fcport);
void qla24xx_free_purex_item(struct purex_item *item);
extern bool qla24xx_risc_firmware_invalid(uint32_t *);
+void qla_init_iocb_limit(scsi_qla_host_t *);
+
/*
* Global Data in qla_os.c source file.
@@ -175,6 +176,7 @@ extern int qla2xuseresexchforels;
extern int ql2xexlogins;
extern int ql2xdifbundlinginternalbuffers;
extern int ql2xfulldump_on_mpifail;
+extern int ql2xenforce_iocb_limit;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -704,6 +706,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
void qla24xx_sp_unmap(scsi_qla_host_t *, srb_t *);
void qla_scan_work_fn(struct work_struct *);
+uint qla25xx_fdmi_port_speed_capability(struct qla_hw_data *);
+uint qla25xx_fdmi_port_speed_currently(struct qla_hw_data *);
/*
* Global Function Prototypes in qla_attr.c source file.
@@ -935,9 +939,10 @@ void qlt_clr_qp_table(struct scsi_qla_host *vha);
void qlt_set_mode(struct scsi_qla_host *);
int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
extern void qla24xx_process_purex_list(struct purex_list *);
+extern void qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp);
+extern void qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
-void qla27xx_reset_mpi(scsi_qla_host_t *vha);
void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index b569fd6e96d6..e28c4b7ec55f 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
@@ -1502,7 +1501,7 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
return &p->p.req;
}
-static uint
+uint
qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
{
uint speeds = 0;
@@ -1546,7 +1545,7 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
}
return speeds;
}
- if (IS_QLA25XX(ha))
+ if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
if (IS_QLA24XX_TYPE(ha))
@@ -1556,7 +1555,8 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
return FDMI_PORT_SPEED_1GB;
}
-static uint
+
+uint
qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
{
switch (ha->link_data_rate) {
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 0bd04a62af83..898c70b8ebbf 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_gbl.h"
@@ -63,6 +62,16 @@ void qla2x00_sp_free(srb_t *sp)
qla2x00_rel_sp(sp);
}
+void qla2xxx_rel_done_warning(srb_t *sp, int res)
+{
+ WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
+}
+
+void qla2xxx_rel_free_warning(srb_t *sp)
+{
+ WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
+}
+
/* Asynchronous Login/Logout Routines -------------------------------------- */
unsigned long
@@ -3288,6 +3297,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
j, fwdt->dump_size);
dump_size += fwdt->dump_size;
}
+ /* Add space for spare MPI fw dump. */
+ dump_size += ha->fwdt[1].dump_size;
} else {
req_q_size = req->length * sizeof(request_t);
rsp_q_size = rsp->length * sizeof(response_t);
@@ -3622,6 +3633,31 @@ out:
return ha->flags.lr_detected;
}
+void qla_init_iocb_limit(scsi_qla_host_t *vha)
+{
+ u16 i, num_qps;
+ u32 limit;
+ struct qla_hw_data *ha = vha->hw;
+
+ num_qps = ha->num_qpairs + 1;
+ limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
+
+ ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
+ ha->base_qpair->fwres.iocbs_limit = limit;
+ ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
+ ha->base_qpair->fwres.iocbs_used = 0;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i]) {
+ ha->queue_pair_map[i]->fwres.iocbs_total =
+ ha->orig_fw_iocb_count;
+ ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
+ ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
+ limit / num_qps;
+ ha->queue_pair_map[i]->fwres.iocbs_used = 0;
+ }
+ }
+}
+
/**
* qla2x00_setup_chip() - Load and start RISC firmware.
* @vha: HA context
@@ -3690,9 +3726,7 @@ execute_fw_with_lr:
goto execute_fw_with_lr;
}
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) &&
- (ha->zio_mode == QLA_ZIO_MODE_6))
+ if (IS_ZIO_THRESHOLD_CAPABLE(ha))
qla27xx_set_zio_threshold(vha,
ha->last_zio_threshold);
@@ -3723,6 +3757,7 @@ enable_82xx_npiv:
MIN_MULTI_ID_FABRIC - 1;
}
qla2x00_get_resource_cnts(vha);
+ qla_init_iocb_limit(vha);
/*
* Allocate the array of outstanding commands
@@ -4957,6 +4992,29 @@ qla2x00_free_fcport(fc_port_t *fcport)
kfree(fcport);
}
+static void qla_get_login_template(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ u32 *bp, sz;
+ __be32 *q;
+
+ memset(ha->init_cb, 0, ha->init_cb_size);
+ sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
+ rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ ha->init_cb, sz);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "PLOGI ELS param read fail.\n");
+ return;
+ }
+ q = (__be32 *)&ha->plogi_els_payld.fl_csp;
+
+ bp = (uint32_t *)ha->init_cb;
+ cpu_to_be32_array(q, bp, sz / 4);
+ ha->flags.plogi_template_valid = 1;
+}
+
/*
* qla2x00_configure_loop
* Updates Fibre Channel Device Database with what is actually on loop.
@@ -5000,6 +5058,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
clear_bit(RSCN_UPDATE, &vha->dpc_flags);
qla2x00_get_data_rate(vha);
+ qla_get_login_template(vha);
/* Determine what we need to do */
if ((ha->current_topology == ISP_CFG_FL ||
@@ -5084,32 +5143,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
{
- struct qla_hw_data *ha = vha->hw;
unsigned long flags;
fc_port_t *fcport;
- int rval;
-
- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
- /* borrowing */
- u32 *bp, sz;
-
- memset(ha->init_cb, 0, ha->init_cb_size);
- sz = min_t(int, sizeof(struct els_plogi_payload),
- ha->init_cb_size);
- rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
- ha->init_cb, sz);
- if (rval == QLA_SUCCESS) {
- __be32 *q = &ha->plogi_els_payld.data[0];
- bp = (uint32_t *)ha->init_cb;
- cpu_to_be32_array(q, bp, sz / 4);
- memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
- } else {
- ql_dbg(ql_dbg_init, vha, 0x00d1,
- "PLOGI ELS param read fail.\n");
- goto skip_login;
- }
- }
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->n2n_flag) {
@@ -5118,7 +5156,6 @@ static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
}
}
-skip_login:
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_retry++;
spin_unlock_irqrestore(&vha->work_lock, flags);
@@ -5486,6 +5523,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_iidma_fcport(vha, fcport);
+ qla2x00_dfs_create_rport(vha, fcport);
+
if (NVME_TARGET(vha->hw, fcport)) {
qla_nvme_register_remote(vha, fcport);
qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
@@ -7109,10 +7148,9 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
unsigned long flags = 0;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- int rval = QLA_SUCCESS;
if (IS_P3P_TYPE(ha))
- return rval;
+ return QLA_SUCCESS;
vha->flags.online = 0;
ha->isp_ops->disable_intrs(ha);
@@ -7127,7 +7165,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
if (IS_NOPOLLING_TYPE(ha))
ha->isp_ops->enable_intrs(ha);
- return rval;
+ return QLA_SUCCESS;
}
/* On sparc systems, obtain port and node WWN from firmware
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 861dc522723c..e80e41b6c9e1 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_target.h"
@@ -207,10 +206,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
return sp;
}
+void qla2xxx_rel_done_warning(srb_t *sp, int res);
+void qla2xxx_rel_free_warning(srb_t *sp);
+
static inline void
qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
{
sp->qpair = NULL;
+ sp->done = qla2xxx_rel_done_warning;
+ sp->free = qla2xxx_rel_free_warning;
mempool_free(sp, qpair->srb_mempool);
QLA_QPAIR_MARK_NOT_BUSY(qpair);
}
@@ -266,11 +270,41 @@ qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
}
static inline void
-qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
+qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual)
{
- if (retry_delay)
- fcport->retry_delay_timestamp = jiffies +
- (retry_delay * HZ / 10);
+ u8 scope;
+ u16 qual;
+#define SQ_SCOPE_MASK 0xc000 /* SAM-6 rev5 5.3.2 */
+#define SQ_SCOPE_SHIFT 14
+#define SQ_QUAL_MASK 0x3fff
+
+#define SQ_MAX_WAIT_SEC 60 /* Max I/O hold off time in seconds. */
+#define SQ_MAX_WAIT_TIME (SQ_MAX_WAIT_SEC * 10) /* in 100ms. */
+
+ if (!sts_qual) /* Common case. */
+ return;
+
+ scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT;
+ /* Handle only scope 1 or 2, which is for I-T nexus. */
+ if (scope != 1 && scope != 2)
+ return;
+
+ /* Skip processing, if retry delay timer is already in effect. */
+ if (fcport->retry_delay_timestamp &&
+ time_before(jiffies, fcport->retry_delay_timestamp))
+ return;
+
+ qual = sts_qual & SQ_QUAL_MASK;
+ if (qual < 1 || qual > 0x3fef)
+ return;
+ qual = min(qual, (u16)SQ_MAX_WAIT_TIME);
+
+ /* qual is expressed in 100ms increments. */
+ fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10);
+
+ ql_log(ql_log_warn, fcport->vha, 0x5101,
+ "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
+ fcport->port_name, sts_qual, qual * 100);
}
static inline bool
@@ -343,3 +377,58 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
}
+
+enum {
+ RESOURCE_NONE,
+ RESOURCE_INI,
+};
+
+static inline int
+qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+{
+ u16 iocbs_used, i;
+ struct qla_hw_data *ha = qp->vha->hw;
+
+ if (!ql2xenforce_iocb_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return 0;
+ }
+
+ if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
+ qp->fwres.iocbs_used += iores->iocb_cnt;
+ return 0;
+ } else {
+ /* no need to acquire qpair lock. It's just rough calculation */
+ iocbs_used = ha->base_qpair->fwres.iocbs_used;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
+ }
+
+ if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
+ qp->fwres.iocbs_used += iores->iocb_cnt;
+ return 0;
+ } else {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+ }
+}
+
+static inline void
+qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+{
+ switch (iores->res_type) {
+ case RESOURCE_NONE:
+ break;
+ default:
+ if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
+ qp->fwres.iocbs_used -= iores->iocb_cnt;
+ } else {
+ // should not happen
+ qp->fwres.iocbs_used = 0;
+ }
+ break;
+ }
+ iores->res_type = RESOURCE_NONE;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 0954fa41911c..c532c74ca1ab 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
@@ -594,6 +593,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
struct ct6_dsd *ctx;
+ struct qla_qpair *qpair = sp->qpair;
cmd = GET_CMD_SP(sp);
@@ -612,12 +612,12 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
- vha->qla_stats.output_bytes += scsi_bufflen(cmd);
- vha->qla_stats.output_requests++;
+ qpair->counters.output_bytes += scsi_bufflen(cmd);
+ qpair->counters.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
- vha->qla_stats.input_bytes += scsi_bufflen(cmd);
- vha->qla_stats.input_requests++;
+ qpair->counters.input_bytes += scsi_bufflen(cmd);
+ qpair->counters.input_requests++;
}
cur_seg = scsi_sglist(cmd);
@@ -704,6 +704,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
+ struct qla_qpair *qpair = sp->qpair;
cmd = GET_CMD_SP(sp);
@@ -721,12 +722,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
- vha->qla_stats.output_bytes += scsi_bufflen(cmd);
- vha->qla_stats.output_requests++;
+ qpair->counters.output_bytes += scsi_bufflen(cmd);
+ qpair->counters.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
- vha->qla_stats.input_bytes += scsi_bufflen(cmd);
- vha->qla_stats.input_requests++;
+ qpair->counters.input_bytes += scsi_bufflen(cmd);
+ qpair->counters.input_requests++;
}
/* One DSD is available in the Command Type 3 IOCB */
@@ -1635,6 +1636,12 @@ qla24xx_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out);
@@ -1707,6 +1714,7 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -1820,6 +1828,12 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Total Data and protection sg segment(s) */
tot_prot_dsds = nseg;
tot_dsds += nseg;
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out);
@@ -1894,6 +1908,7 @@ queuing_error:
}
/* Cleanup will be performed by the caller (queuecommand) */
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
}
@@ -1955,6 +1970,12 @@ qla2xxx_start_scsi_mq(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out);
@@ -2027,6 +2048,7 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -2155,6 +2177,12 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
/* Total Data and protection sg segment(s) */
tot_prot_dsds = nseg;
tot_dsds += nseg;
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out);
@@ -2232,6 +2260,7 @@ queuing_error:
}
/* Cleanup will be performed by the caller (queuecommand) */
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
}
@@ -2348,6 +2377,14 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
if (sp->vha->flags.nvme_first_burst)
logio->io_parameter[0] =
cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
+ if (sp->vha->flags.nvme2_enabled) {
+ /* Set service parameter BIT_8 for SLER support */
+ logio->io_parameter[0] |=
+ cpu_to_le32(NVME_PRLI_SP_SLER);
+ /* Set service parameter BIT_9 for PI control support */
+ logio->io_parameter[0] |=
+ cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
+ }
}
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
@@ -2975,8 +3012,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
memset(ptr, 0, sizeof(struct els_plogi_payload));
memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
- &ha->plogi_els_payld.data,
- sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
+ &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
elsio->u.els_plogi.els_cmd = els_opcode;
elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 25e0a1684763..a24b82de4aab 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
@@ -767,7 +766,7 @@ qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
ql_log(ql_log_warn, vha, 0x02f0,
"MPI Heartbeat stop. MPI reset is%s needed. "
"MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
- mb[0] & BIT_8 ? "" : " not",
+ mb[1] & BIT_8 ? "" : " not",
mb[0], mb[1], mb[2], mb[3]);
if ((mb[1] & BIT_8) == 0)
@@ -1716,35 +1715,35 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
{
struct qla_hw_data *ha = vha->hw;
sts_entry_t *pkt = iocb;
- srb_t *sp = NULL;
+ srb_t *sp;
uint16_t index;
index = LSW(pkt->handle);
if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
- "Invalid command index (%x) type %8ph.\n",
- index, iocb);
+ "%s: Invalid command index (%x) type %8ph.\n",
+ func, index, iocb);
if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- goto done;
+ return NULL;
}
sp = req->outstanding_cmds[index];
if (!sp) {
ql_log(ql_log_warn, vha, 0x5032,
- "Invalid completion handle (%x) -- timed-out.\n", index);
- return sp;
+ "%s: Invalid completion handle (%x) -- timed-out.\n",
+ func, index);
+ return NULL;
}
if (sp->handle != index) {
ql_log(ql_log_warn, vha, 0x5033,
- "SRB handle (%x) mismatch %x.\n", sp->handle, index);
+ "%s: SRB handle (%x) mismatch %x.\n", func,
+ sp->handle, index);
return NULL;
}
req->outstanding_cmds[index] = NULL;
-
-done:
return sp;
}
@@ -1839,6 +1838,7 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct mbx_24xx_entry *pkt)
{
const char func[] = "MBX-IOCB2";
+ struct qla_hw_data *ha = vha->hw;
srb_t *sp;
struct srb_iocb *si;
u16 sz, i;
@@ -1848,6 +1848,18 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (!sp)
return;
+ if (sp->type == SRB_SCSI_CMD ||
+ sp->type == SRB_NVME_CMD ||
+ sp->type == SRB_TM_CMD) {
+ ql_log(ql_log_warn, vha, 0x509d,
+ "Inconsistent event entry type %d\n", sp->type);
+ if (IS_P3P_TYPE(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
si = &sp->u.iocb_cmd;
sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
@@ -2855,7 +2867,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int logit = 1;
int res = 0;
uint16_t state_flags = 0;
- uint16_t retry_delay = 0;
+ uint16_t sts_qual = 0;
if (IS_FWI2_CAPABLE(ha)) {
comp_status = le16_to_cpu(sts24->comp_status);
@@ -2901,6 +2913,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
return;
}
+ qla_put_iocbs(sp->qpair, &sp->iores);
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
@@ -2953,8 +2966,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sense_len = par_sense_len = rsp_info_len = resid_len =
fw_resid_len = 0;
if (IS_FWI2_CAPABLE(ha)) {
- u16 sts24_retry_delay = le16_to_cpu(sts24->retry_delay);
-
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le32_to_cpu(sts24->sense_len);
if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
@@ -2968,13 +2979,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
host_to_fcp_swap(sts24->data, sizeof(sts24->data));
ox_id = le16_to_cpu(sts24->ox_id);
par_sense_len = sizeof(sts24->data);
- /* Valid values of the retry delay timer are 0x1-0xffef */
- if (sts24_retry_delay > 0 && sts24_retry_delay < 0xfff1) {
- retry_delay = sts24_retry_delay & 0x3fff;
- ql_dbg(ql_dbg_io, sp->vha, 0x3033,
- "%s: scope=%#x retry_delay=%#x\n", __func__,
- sts24_retry_delay >> 14, retry_delay);
- }
+ sts_qual = le16_to_cpu(sts24->status_qualifier);
} else {
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le16_to_cpu(sts->req_sense_length);
@@ -3012,9 +3017,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
* Check retry_delay_timer value if we receive a busy or
* queue full.
*/
- if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
- lscsi_status == SAM_STAT_BUSY)
- qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
+ if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
+ lscsi_status == SAM_STAT_BUSY))
+ qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
/*
* Based on Host and scsi status generate status code for Linux
@@ -3321,6 +3326,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
default:
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
+ qla_put_iocbs(sp->qpair, &sp->iores);
sp->done(sp, res);
return 0;
}
@@ -3422,8 +3428,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!ha->flags.fw_started)
return;
- if (rsp->qpair->cpuid != smp_processor_id())
+ if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
+ rsp->qpair->rcv_intr = 1;
qla_cpu_update(rsp->qpair, smp_processor_id());
+ }
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
@@ -3873,7 +3881,7 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
}
ha = qpair->hw;
- queue_work(ha->wq, &qpair->q_work);
+ queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
return IRQ_HANDLED;
}
@@ -3899,7 +3907,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- queue_work(ha->wq, &qpair->q_work);
+ queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 226f1428d3e5..07afd0d8a8f3 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_target.h"
@@ -845,7 +844,7 @@ qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
* Context:
* Kernel context.
*/
-#define CONFIG_XLOGINS_MEM 0x3
+#define CONFIG_XLOGINS_MEM 0x9
int
qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
{
@@ -872,8 +871,9 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx, vha, 0x111b,
+ "EXlogin Failed=%x. MB0=%x MB11=%x\n",
+ rval, mcp->mb[0], mcp->mb[11]);
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
"Done %s.\n", __func__);
@@ -1092,6 +1092,14 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
"%s: FC-NVMe is Enabled (0x%x)\n",
__func__, ha->fw_attributes_h);
}
+
+ /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
+ if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
+ ql_log(ql_log_info, vha, 0xd302,
+ "Firmware supports NVMe2 0x%x\n",
+ ha->fw_attributes_ext[0]);
+ vha->flags.nvme2_enabled = 1;
+ }
}
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -1121,12 +1129,18 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
if (ha->flags.scm_supported_a &&
(ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
ha->flags.scm_supported_f = 1;
- memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb));
ha->sf_init_cb->flags |= BIT_13;
}
ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
(ha->flags.scm_supported_f) ? "Supported" :
"Not Supported");
+
+ if (vha->flags.nvme2_enabled) {
+ /* set BIT_15 of special feature control block for SLER */
+ ha->sf_init_cb->flags |= BIT_15;
+ /* set BIT_14 of special feature control block for PI CTRL*/
+ ha->sf_init_cb->flags |= BIT_14;
+ }
}
failed:
@@ -1822,7 +1836,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
}
- if (ha->flags.scm_supported_f) {
+ if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
mcp->mb[1] |= BIT_1;
mcp->mb[16] = MSW(ha->sf_init_cb_dma);
mcp->mb[17] = LSW(ha->sf_init_cb_dma);
@@ -3979,7 +3993,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (fcport) {
fcport->plogi_nack_done_deadline = jiffies + HZ;
- fcport->dm_login_expire = jiffies + 2*HZ;
+ fcport->dm_login_expire = jiffies +
+ QLA_N2N_WAIT_TIME * HZ;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->n2n_flag = 1;
fcport->keep_nport_handle = 1;
@@ -4925,8 +4940,6 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(els_cmd_map, 0, ELS_CMD_MAP_SIZE);
-
/* List of Purex ELS */
cmd_opcode[0] = ELS_FPIN;
cmd_opcode[1] = ELS_RDP;
@@ -4958,51 +4971,12 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
"Done %s.\n", __func__);
}
- dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
els_cmd_map, els_cmd_map_dma);
return rval;
}
-int
-qla24xx_get_buffer_credits(scsi_qla_host_t *vha, struct buffer_credit_24xx *bbc,
- dma_addr_t bbc_dma)
-{
- mbx_cmd_t mc;
- mbx_cmd_t *mcp = &mc;
- int rval;
-
- if (!IS_FWI2_CAPABLE(vha->hw))
- return QLA_FUNCTION_FAILED;
-
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118e,
- "Entered %s.\n", __func__);
-
- mcp->mb[0] = MBC_GET_RNID_PARAMS;
- mcp->mb[1] = RNID_BUFFER_CREDITS << 8;
- mcp->mb[2] = MSW(LSD(bbc_dma));
- mcp->mb[3] = LSW(LSD(bbc_dma));
- mcp->mb[6] = MSW(MSD(bbc_dma));
- mcp->mb[7] = LSW(MSD(bbc_dma));
- mcp->mb[8] = sizeof(*bbc) / sizeof(*bbc->parameter);
- mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- mcp->in_mb = MBX_1|MBX_0;
- mcp->buf_size = sizeof(*bbc);
- mcp->flags = MBX_DMA_IN;
- mcp->tov = MBX_TOV_SECONDS;
- rval = qla2x00_mailbox_command(vha, mcp);
-
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x118f,
- "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
- } else {
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1190,
- "Done %s.\n", __func__);
- }
-
- return rval;
-}
-
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 15efe2f04b86..c7caf322f445 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_gbl.h"
@@ -808,11 +807,9 @@ static void qla_do_work(struct work_struct *work)
{
unsigned long flags;
struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
- struct scsi_qla_host *vha;
- struct qla_hw_data *ha = qpair->hw;
+ struct scsi_qla_host *vha = qpair->vha;
spin_lock_irqsave(&qpair->qp_lock, flags);
- vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, qpair->rsp);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index a8fe4f725fa0..ca7306685325 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include <linux/delay.h>
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 762250891a8f..73be8348402a 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_MR_H
#define __QLA_MR_H
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 90bbc61f361b..b7a1dc24db38 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2017 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_nvme.h"
#include <linux/scatterlist.h>
@@ -42,7 +41,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
req.port_name = wwn_to_u64(fcport->port_name);
req.node_name = wwn_to_u64(fcport->node_name);
req.port_role = 0;
- req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
+ req.dev_loss_tmo = 0;
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
@@ -69,6 +68,14 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return ret;
}
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
+ ql_log(ql_log_info, vha, 0x212a,
+ "PortID:%06x Supports SLER\n", req.port_id);
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
+ ql_log(ql_log_info, vha, 0x212b,
+ "PortID:%06x Supports PI control\n", req.port_id);
+
rport = fcport->nvme_remote_port->private;
rport->fcport = fcport;
@@ -368,6 +375,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
struct srb_iocb *nvme = &sp->u.iocb_cmd;
struct scatterlist *sgl, *sg;
struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
+ struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
uint32_t rval = QLA_SUCCESS;
/* Setup qpair pointers */
@@ -399,8 +407,6 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
}
if (unlikely(!fd->sqid)) {
- struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
-
if (cmd->sqe.common.opcode == nvme_admin_async_event) {
nvme->u.nvme.aen_op = 1;
atomic_inc(&ha->nvme_active_aen_cnt);
@@ -428,8 +434,8 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* No data transfer how do we check buffer len == 0?? */
if (fd->io_dir == NVMEFC_FCP_READ) {
cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
- vha->qla_stats.input_bytes += fd->payload_length;
- vha->qla_stats.input_requests++;
+ qpair->counters.input_bytes += fd->payload_length;
+ qpair->counters.input_requests++;
} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
if ((vha->flags.nvme_first_burst) &&
@@ -441,11 +447,16 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
cmd_pkt->control_flags |=
cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
}
- vha->qla_stats.output_bytes += fd->payload_length;
- vha->qla_stats.output_requests++;
+ qpair->counters.output_bytes += fd->payload_length;
+ qpair->counters.output_requests++;
} else if (fd->io_dir == 0) {
cmd_pkt->control_flags = 0;
}
+ /* Set BIT_13 of control flags for Async event */
+ if (vha->flags.nvme2_enabled &&
+ cmd->sqe.common.opcode == nvme_admin_async_event) {
+ cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
+ }
/* Set NPORT-ID */
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
@@ -530,7 +541,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
fc_port_t *fcport;
struct srb_iocb *nvme;
struct scsi_qla_host *vha;
- int rval = -ENODEV;
+ int rval;
srb_t *sp;
struct qla_qpair *qpair = hw_queue_handle;
struct nvme_private *priv = fd->private;
@@ -538,16 +549,26 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
if (!priv) {
/* nvme association has been torn down */
- return rval;
+ return -ENODEV;
}
fcport = qla_rport->fcport;
- if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
- (fcport && fcport->deleted))
- return rval;
+ if (!qpair || !fcport)
+ return -ENODEV;
+
+ if (!qpair->fw_started || fcport->deleted)
+ return -EBUSY;
vha = fcport->vha;
+
+ if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
+ return -ENODEV;
+
+ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ (qpair && !qpair->fw_started) || fcport->deleted)
+ return -EBUSY;
+
/*
* If we know the dev is going away while the transport is still sending
* IO's return busy back to stall the IO Q. This happens when the
@@ -683,7 +704,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
struct nvme_fc_port_template *tmpl;
struct qla_hw_data *ha;
struct nvme_fc_port_info pinfo;
- int ret = EINVAL;
+ int ret = -EINVAL;
if (!IS_ENABLED(CONFIG_NVME_FC))
return ret;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index fbb844226630..f81f219c7c7d 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2017 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_NVME_H
#define __QLA_NVME_H
@@ -14,9 +13,6 @@
#include "qla_def.h"
#include "qla_dsd.h"
-/* default dev loss time (seconds) before transport tears down ctrl */
-#define NVME_FC_DEV_LOSS_TMO 30
-
#define NVME_ATIO_CMD_OFF 32
#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
#define Q2T_NVME_NUM_TAGS 2048
@@ -57,6 +53,7 @@ struct cmd_nvme {
uint64_t rsvd;
__le16 control_flags; /* Control Flags */
+#define CF_ADMIN_ASYNC_EVENT BIT_13
#define CF_NVME_FIRST_BURST_ENABLE BIT_11
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 71273eb634d3..b3ba0de5d4fb 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include <linux/delay.h>
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 93344a05910a..8567eaf1bddd 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_NX_H
#define __QLA_NX_H
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 50e57603ce3d..01ccd4526707 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include <linux/vmalloc.h>
@@ -660,7 +659,7 @@ static int
qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
int duration, uint32_t test_mask, uint32_t test_result)
{
- uint32_t value;
+ uint32_t value = 0;
int timeout_error;
uint8_t retries;
int ret_val = QLA_SUCCESS;
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index 8ba7c1db07c3..2fc902a9fade 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_NX2_H
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8da00ba54aec..f9c8ae9d669e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
@@ -40,6 +39,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
"Set this to take full dump on MPI hang.");
+int ql2xenforce_iocb_limit = 1;
+module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql2xenforce_iocb_limit,
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 0)");
+
/*
* CT6 CTX allocation cache
*/
@@ -1885,7 +1889,7 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
/* Any upper-dword bits set? */
if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
- !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+ !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
/* Ok, a 64bit DMA mask is applicable. */
ha->flags.enable_64bit_addressing = 1;
ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
@@ -1895,7 +1899,7 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
}
dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
- pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+ dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
}
static void
@@ -3316,6 +3320,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
for (i = 0; i < ha->max_qpairs; i++)
qla2xxx_create_qpair(base_vha, 5, 0, startit);
}
+ qla_init_iocb_limit(base_vha);
if (ha->flags.running_gold_fw)
goto skip_dpc;
@@ -4225,6 +4230,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
&ha->sf_init_cb_dma);
if (!ha->sf_init_cb)
goto fail_sf_init_cb;
+ memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb));
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
"sf_init_cb=%p.\n", ha->sf_init_cb);
}
@@ -4379,11 +4385,12 @@ int
qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
{
int rval;
- uint16_t size, max_cnt, temp;
+ uint16_t size, max_cnt;
+ uint32_t temp;
struct qla_hw_data *ha = vha->hw;
/* Return if we don't need to alloacate any extended logins */
- if (!ql2xexlogins)
+ if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400)
return QLA_SUCCESS;
if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
@@ -4872,7 +4879,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
}
INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
- sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+ sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
"Allocated the host=%p hw=%p vha=%p dev_name=%s",
vha->host, vha->hw, vha,
@@ -5001,7 +5008,7 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
switch (code) {
case QLA_UEVENT_CODE_FW_DUMP:
- snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
vha->host_no);
break;
default:
@@ -5089,6 +5096,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->fc4_type = e->u.new_sess.fc4_type;
if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
+ fcport->dm_login_expire = jiffies +
+ QLA_N2N_WAIT_TIME * HZ;
fcport->fc4_type = FS_FC4TYPE_FCP;
fcport->n2n_flag = 1;
if (vha->flags.nvme_enabled)
@@ -5810,98 +5819,6 @@ qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
return true;
}
-static uint
-qla25xx_rdp_port_speed_capability(struct qla_hw_data *ha)
-{
- if (IS_CNA_CAPABLE(ha))
- return RDP_PORT_SPEED_10GB;
-
- if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- unsigned int speeds = 0;
-
- if (ha->max_supported_speed == 2) {
- if (ha->min_supported_speed <= 6)
- speeds |= RDP_PORT_SPEED_64GB;
- }
-
- if (ha->max_supported_speed == 2 ||
- ha->max_supported_speed == 1) {
- if (ha->min_supported_speed <= 5)
- speeds |= RDP_PORT_SPEED_32GB;
- }
-
- if (ha->max_supported_speed == 2 ||
- ha->max_supported_speed == 1 ||
- ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 4)
- speeds |= RDP_PORT_SPEED_16GB;
- }
-
- if (ha->max_supported_speed == 1 ||
- ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 3)
- speeds |= RDP_PORT_SPEED_8GB;
- }
-
- if (ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 2)
- speeds |= RDP_PORT_SPEED_4GB;
- }
-
- return speeds;
- }
-
- if (IS_QLA2031(ha))
- return RDP_PORT_SPEED_16GB|RDP_PORT_SPEED_8GB|
- RDP_PORT_SPEED_4GB;
-
- if (IS_QLA25XX(ha))
- return RDP_PORT_SPEED_8GB|RDP_PORT_SPEED_4GB|
- RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB;
-
- if (IS_QLA24XX_TYPE(ha))
- return RDP_PORT_SPEED_4GB|RDP_PORT_SPEED_2GB|
- RDP_PORT_SPEED_1GB;
-
- if (IS_QLA23XX(ha))
- return RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB;
-
- return RDP_PORT_SPEED_1GB;
-}
-
-static uint
-qla25xx_rdp_port_speed_currently(struct qla_hw_data *ha)
-{
- switch (ha->link_data_rate) {
- case PORT_SPEED_1GB:
- return RDP_PORT_SPEED_1GB;
-
- case PORT_SPEED_2GB:
- return RDP_PORT_SPEED_2GB;
-
- case PORT_SPEED_4GB:
- return RDP_PORT_SPEED_4GB;
-
- case PORT_SPEED_8GB:
- return RDP_PORT_SPEED_8GB;
-
- case PORT_SPEED_10GB:
- return RDP_PORT_SPEED_10GB;
-
- case PORT_SPEED_16GB:
- return RDP_PORT_SPEED_16GB;
-
- case PORT_SPEED_32GB:
- return RDP_PORT_SPEED_32GB;
-
- case PORT_SPEED_64GB:
- return RDP_PORT_SPEED_64GB;
-
- default:
- return RDP_PORT_SPEED_UNKNOWN;
- }
-}
-
/*
* Function Name: qla24xx_process_purex_iocb
*
@@ -5921,12 +5838,10 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
dma_addr_t rsp_els_dma;
dma_addr_t rsp_payload_dma;
dma_addr_t stat_dma;
- dma_addr_t bbc_dma;
dma_addr_t sfp_dma;
struct els_entry_24xx *rsp_els = NULL;
struct rdp_rsp_payload *rsp_payload = NULL;
struct link_statistics *stat = NULL;
- struct buffer_credit_24xx *bbc = NULL;
uint8_t *sfp = NULL;
uint16_t sfp_flags = 0;
uint rsp_payload_length = sizeof(*rsp_payload);
@@ -5970,9 +5885,6 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
&stat_dma, GFP_KERNEL);
- bbc = dma_alloc_coherent(&ha->pdev->dev, sizeof(*bbc),
- &bbc_dma, GFP_KERNEL);
-
/* Prepare Response IOCB */
rsp_els->entry_type = ELS_IOCB_TYPE;
rsp_els->entry_count = 1;
@@ -6068,9 +5980,9 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
rsp_payload->port_speed_desc.desc_len =
cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
- qla25xx_rdp_port_speed_capability(ha));
+ qla25xx_fdmi_port_speed_capability(ha));
rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
- qla25xx_rdp_port_speed_currently(ha));
+ qla25xx_fdmi_port_speed_currently(ha));
/* Link Error Status Descriptor */
rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
@@ -6126,13 +6038,10 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
- if (bbc) {
- memset(bbc, 0, sizeof(*bbc));
- rval = qla24xx_get_buffer_credits(vha, bbc, bbc_dma);
- if (!rval) {
- rsp_payload->buffer_credit_desc.fcport_b2b =
- cpu_to_be32(LSW(bbc->parameter[0]));
- }
+ if (ha->flags.plogi_template_valid) {
+ uint32_t tmp =
+ be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred);
+ rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp);
}
if (rsp_payload_length < sizeof(*rsp_payload))
@@ -6310,9 +6219,6 @@ send:
}
dealloc:
- if (bbc)
- dma_free_coherent(&ha->pdev->dev, sizeof(*bbc),
- bbc, bbc_dma);
if (stat)
dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
stat, stat_dma);
@@ -7289,8 +7195,10 @@ qla2x00_timer(struct timer_list *t)
* FC-NVME
* see if the active AEN count has changed from what was last reported.
*/
+ index = atomic_read(&ha->nvme_active_aen_cnt);
if (!vha->vp_idx &&
- (atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen) &&
+ (index != ha->nvme_last_rptd_aen) &&
+ (index >= DEFAULT_ZIO_THRESHOLD) &&
ha->zio_mode == QLA_ZIO_MODE_6 &&
!ha->flags.host_shutting_down) {
ql_log(ql_log_info, vha, 0x3002,
@@ -7302,9 +7210,8 @@ qla2x00_timer(struct timer_list *t)
}
if (!vha->vp_idx &&
- (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
- (ha->zio_mode == QLA_ZIO_MODE_6) &&
- (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
+ atomic_read(&ha->zio_threshold) != ha->last_zio_threshold &&
+ IS_ZIO_THRESHOLD_CAPABLE(ha)) {
ql_log(ql_log_info, vha, 0x3002,
"Sched: Set ZIO exchange threshold to %d.\n",
ha->last_zio_threshold);
@@ -8044,7 +7951,6 @@ module_exit(qla2x00_module_exit);
MODULE_AUTHOR("QLogic Corporation");
MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(QLA2XXX_VERSION);
MODULE_FIRMWARE(FW_FILE_ISP21XX);
MODULE_FIRMWARE(FW_FILE_ISP22XX);
MODULE_FIRMWARE(FW_FILE_ISP2300);
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index 2fb7ebfbbc38..a5f3000ae53b 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#define MAX_RETRIES_OF_ISP_ABORT 5
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 411b8a9ff393..0f92e9a044dc 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2d445bdb2129..a27a625839e6 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1111,6 +1111,8 @@ void qlt_free_session_done(struct work_struct *work)
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
sess->free_pending = 0;
+ qla2x00_dfs_remove_rport(vha, sess);
+
ql_dbg(ql_dbg_disc, vha, 0xf001,
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
sess, sess->port_name, vha->fcport_count);
@@ -1229,14 +1231,15 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
case DSC_DELETE_PEND:
return;
case DSC_DELETED:
- if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
- wake_up_all(&tgt->waitQ);
- if (sess->vha->fcport_count == 0)
- wake_up_all(&sess->vha->fcport_waitQ);
-
if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
- !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
+ !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
+ if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
+ wake_up_all(&tgt->waitQ);
+
+ if (sess->vha->fcport_count == 0)
+ wake_up_all(&sess->vha->fcport_waitQ);
return;
+ }
break;
case DSC_UPD_FCPORT:
/*
@@ -2025,7 +2028,7 @@ static void qlt_do_tmr_work(struct work_struct *work)
struct qla_tgt_mgmt_cmd *mcmd =
container_of(work, struct qla_tgt_mgmt_cmd, work);
struct qla_hw_data *ha = mcmd->vha->hw;
- int rc = EIO;
+ int rc;
uint32_t tag;
unsigned long flags;
@@ -3781,7 +3784,7 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
"multiple abort. %p transport_state %x, t_state %x, "
"se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
- return EIO;
+ return -EIO;
}
cmd->aborted = 1;
cmd->trc_flags |= TRC_ABORT;
@@ -5668,7 +5671,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
/* found existing exchange */
qpair->retry_term_cnt++;
if (qpair->retry_term_cnt >= 5) {
- rc = EIO;
+ rc = -EIO;
qpair->retry_term_cnt = 0;
ql_log(ql_log_warn, vha, 0xffff,
"Unable to send ABTS Respond. Dumping firmware.\n");
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 8dc82cfd38b2..bd8623ee156a 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include "qla_tmpl.h"
@@ -12,33 +11,6 @@
#define IOBASE(vha) IOBAR(ISPREG(vha))
#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
-/* hardware_lock assumed held. */
-static void
-qla27xx_write_remote_reg(struct scsi_qla_host *vha,
- u32 addr, u32 data)
-{
- struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
-
- ql_dbg(ql_dbg_misc, vha, 0xd300,
- "%s: addr/data = %xh/%xh\n", __func__, addr, data);
-
- wrt_reg_dword(&reg->iobase_addr, 0x40);
- wrt_reg_dword(&reg->iobase_c4, data);
- wrt_reg_dword(&reg->iobase_window, addr);
-}
-
-void
-qla27xx_reset_mpi(scsi_qla_host_t *vha)
-{
- ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd301,
- "Entered %s.\n", __func__);
-
- qla27xx_write_remote_reg(vha, 0x104050, 0x40004);
- qla27xx_write_remote_reg(vha, 0x10405c, 0x4);
-
- vha->hw->stat.num_mpi_reset++;
-}
-
static inline void
qla27xx_insert16(uint16_t value, void *buf, ulong *len)
{
@@ -906,8 +878,8 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
WARN_ON_ONCE(sscanf(qla2x00_version_str,
- "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
- v+0, v+1, v+2, v+3, v+4, v+5) != 6);
+ "%hhu.%hhu.%hhu.%hhu",
+ v + 0, v + 1, v + 2, v + 3) != 4);
tmp->driver_info[0] = cpu_to_le32(
v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]);
@@ -1028,22 +1000,25 @@ void
qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
{
ulong flags = 0;
- bool need_mpi_reset = true;
-#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
-#endif
if (!vha->hw->mpi_fw_dump) {
ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n");
- } else if (vha->hw->mpi_fw_dumped) {
- ql_log(ql_log_warn, vha, 0x02f4,
- "-> MPI firmware already dumped (%p) -- ignoring request\n",
- vha->hw->mpi_fw_dump);
} else {
struct fwdt *fwdt = &vha->hw->fwdt[1];
ulong len;
void *buf = vha->hw->mpi_fw_dump;
+ bool walk_template_only = false;
+
+ if (vha->hw->mpi_fw_dumped) {
+ /* Use the spare area for any further dumps. */
+ buf += fwdt->dump_size;
+ walk_template_only = true;
+ ql_log(ql_log_warn, vha, 0x02f4,
+ "-> MPI firmware already dumped -- dump saving to temporary buffer %p.\n",
+ buf);
+ }
ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n");
if (!fwdt->template) {
@@ -1058,9 +1033,10 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0x02f7,
"-> fwdt1 fwdump residual=%+ld\n",
fwdt->dump_size - len);
- } else {
- need_mpi_reset = false;
}
+ vha->hw->stat.num_mpi_reset++;
+ if (walk_template_only)
+ goto bailout;
vha->hw->mpi_fw_dump_len = len;
vha->hw->mpi_fw_dumped = 1;
@@ -1072,12 +1048,8 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
}
bailout:
- if (need_mpi_reset)
- qla27xx_reset_mpi(vha);
-#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
-#endif
}
void
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index 89280b3477aa..c47184db5081 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#ifndef __QLA_DMP27_H__
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 8ccd9ba1ddef..c2d4da52f4a9 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,15 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.25-k"
+#define QLA2XXX_VERSION "10.02.00.103-k"
#define QLA_DRIVER_MAJOR_VER 10
-#define QLA_DRIVER_MINOR_VER 1
+#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 0
-#define QLA_DRIVER_BETA_VER 0
+#define QLA_DRIVER_BETA_VER 103
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 44bfe162654a..61017acd3458 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -850,7 +850,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_##name##_show( \
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
struct tcm_qla2xxx_tpg, se_tpg); \
\
- return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+ return sprintf(page, "%d\n", tpg->tpg_attrib.name); \
} \
\
static ssize_t tcm_qla2xxx_tpg_attrib_##name##_store( \
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index de10e67de8c0..5f56122f6664 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <linux/ratelimit.h>
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
index f34583e5f8de..f10167c71dd3 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.h
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QL483XX_H
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 463239c972b0..ec4352818fbf 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
index 9231917066d3..c447a9d598a1 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.c
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2011-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.h b/drivers/scsi/qla4xxx/ql4_bsg.h
index 88c2401910c0..06db38561566 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.h
+++ b/drivers/scsi/qla4xxx/ql4_bsg.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2011 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QL4_BSG_H
#define __QL4_BSG_H
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 5649e9ef59a8..f43e675c5693 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2012 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
index 51c365bcf912..171c89165009 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.h
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2012 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
/*
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 817f312023a9..f5b382ed0a1b 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QL4_DEF_H
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 699575efc9ba..b9142464d3f0 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef _QLA4X_FW_H
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index bce96a58f14e..b8f02210aeb0 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QLA4x_GBL_H
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 4a7ef971a387..f786ac2f5548 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <scsi/iscsi_if.h>
@@ -1170,7 +1169,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
uint32_t state, uint32_t conn_err)
{
struct ddb_entry *ddb_entry;
- int status = QLA_ERROR;
/* check for out of range index */
if (fw_ddb_index >= MAX_DDB_ENTRIES)
@@ -1192,7 +1190,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
exit_ddb_event:
- return status;
+ return QLA_ERROR;
}
/**
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 655b7bb644d9..9ced6b325cb3 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
/*
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index a8df2d7eb069..cbd1e6ffcd67 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index ade5eafdf81e..a51910ae9525 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index bc8de7d402d5..17b719a8b6fb 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <linux/ctype.h>
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 3bf418fbd432..f08a5abcb31a 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index e97d79ff16f7..b96c06f50402 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef _QL4XNVRM_H_
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 038e19b1e3c2..f1767b21076f 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <linux/delay.h>
#include <linux/io.h>
@@ -3226,7 +3225,7 @@ static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
switch (code) {
case QL4_UEVENT_CODE_FW_DUMP:
- snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
ha->host_no);
break;
default:
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index b7a6e7f169ca..52a5209ae42a 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QLA_NX_H
#define __QLA_NX_H
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 676778cbc550..2c23b692e318 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#include <linux/moduleparam.h>
#include <linux/slab.h>
@@ -1254,7 +1253,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
exit_host_stats:
if (ql_iscsi_stats)
- dma_free_coherent(&ha->pdev->dev, host_stats_size,
+ dma_free_coherent(&ha->pdev->dev, stats_size,
ql_iscsi_stats, iscsi_stats_dma);
ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index f11eaa773339..fb1c14269f00 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2013 QLogic Corporation
- *
- * See LICENSE.qla4xxx for copyright and licensing details.
*/
#define QLA4XXX_DRIVER_VERSION "5.04.00-k6"
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 48ff7d88af86..d84e218d32cb 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1468,22 +1468,10 @@ static struct platform_driver qpti_sbus_driver = {
.probe = qpti_sbus_probe,
.remove = qpti_sbus_remove,
};
-
-static int __init qpti_init(void)
-{
- return platform_driver_register(&qpti_sbus_driver);
-}
-
-static void __exit qpti_exit(void)
-{
- platform_driver_unregister(&qpti_sbus_driver);
-}
+module_platform_driver(qpti_sbus_driver);
MODULE_DESCRIPTION("QlogicISP SBUS driver");
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_LICENSE("GPL");
MODULE_VERSION("2.1");
MODULE_FIRMWARE("qlogic/isp1000.bin");
-
-module_init(qpti_init);
-module_exit(qpti_exit);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 1ad7260d4758..24c0f7ec0351 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -209,10 +209,6 @@ static const char *sdebug_version_date = "20200710";
#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
-/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
- * or "peripheral device" addressing (value 0) */
-#define SAM2_LUN_ADDRESS_METHOD 0
-
/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
* (for response) per submit queue at one time. Can be reduced by max_queue
* option. Command responses are not queued when jdelay=0 and ndelay=0. The
@@ -791,6 +787,13 @@ static bool sdebug_wp;
static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
static char *sdeb_zbc_model_s;
+enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
+ SAM_LUN_AM_FLAT = 0x1,
+ SAM_LUN_AM_LOGICAL_UNIT = 0x2,
+ SAM_LUN_AM_EXTENDED = 0x3};
+static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
+static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
+
static unsigned int sdebug_store_sectors;
static sector_t sdebug_capacity; /* in sectors */
@@ -4179,6 +4182,8 @@ static int resp_report_luns(struct scsi_cmnd *scp,
if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
break;
int_to_scsilun(lun++, lun_p);
+ if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
+ lun_p->scsi_lun[0] |= 0x40;
}
if (j < RL_BUCKET_ELEMS)
break;
@@ -4696,19 +4701,14 @@ fini:
static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
{
u16 hwq;
+ u32 tag = blk_mq_unique_tag(cmnd->request);
- if (sdebug_host_max_queue) {
- /* Provide a simple method to choose the hwq */
- hwq = smp_processor_id() % submit_queues;
- } else {
- u32 tag = blk_mq_unique_tag(cmnd->request);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
- hwq = blk_mq_unique_tag_to_hwq(tag);
+ pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
+ if (WARN_ON_ONCE(hwq >= submit_queues))
+ hwq = 0;
- pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
- if (WARN_ON_ONCE(hwq >= submit_queues))
- hwq = 0;
- }
return sdebug_q_arr + hwq;
}
@@ -4944,6 +4944,7 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
pr_err("Host info NULL\n");
return NULL;
}
+
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if ((devip->used) && (devip->channel == sdev->channel) &&
(devip->target == sdev->id) &&
@@ -5257,7 +5258,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
{
struct msdos_partition *pp;
- int starts[SDEBUG_MAX_PARTS + 2];
+ int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
int sectors_per_part, num_sectors, k;
int heads_by_sects, start_sec, end_sec;
@@ -5268,14 +5269,18 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
sdebug_num_parts = SDEBUG_MAX_PARTS;
pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
}
- num_sectors = (int)sdebug_store_sectors;
+ num_sectors = (int)get_sdebug_capacity();
sectors_per_part = (num_sectors - sdebug_sectors_per)
/ sdebug_num_parts;
heads_by_sects = sdebug_heads * sdebug_sectors_per;
starts[0] = sdebug_sectors_per;
- for (k = 1; k < sdebug_num_parts; ++k)
+ max_part_secs = sectors_per_part;
+ for (k = 1; k < sdebug_num_parts; ++k) {
starts[k] = ((k * sectors_per_part) / heads_by_sects)
* heads_by_sects;
+ if (starts[k] - starts[k - 1] < max_part_secs)
+ max_part_secs = starts[k] - starts[k - 1];
+ }
starts[sdebug_num_parts] = num_sectors;
starts[sdebug_num_parts + 1] = 0;
@@ -5284,7 +5289,7 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
pp = (struct msdos_partition *)(ramp + 0x1be);
for (k = 0; starts[k + 1]; ++k, ++pp) {
start_sec = starts[k];
- end_sec = starts[k + 1] - 1;
+ end_sec = starts[k] + max_part_secs - 1;
pp->boot_ind = 0;
pp->cyl = start_sec / heads_by_sects;
@@ -5584,6 +5589,7 @@ module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
+module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
module_param_named(medium_error_count, sdebug_medium_error_count, int,
@@ -5657,6 +5663,7 @@ MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
+MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
@@ -6104,6 +6111,43 @@ every_nth_done:
}
static DRIVER_ATTR_RW(every_nth);
+static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
+}
+static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+ bool changed;
+
+ if (kstrtoint(buf, 0, &n))
+ return -EINVAL;
+ if (n >= 0) {
+ if (n > (int)SAM_LUN_AM_FLAT) {
+ pr_warn("only LUN address methods 0 and 1 are supported\n");
+ return -EINVAL;
+ }
+ changed = ((int)sdebug_lun_am != n);
+ sdebug_lun_am = n;
+ if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
+ struct sdebug_host_info *sdhp;
+ struct sdebug_dev_info *dp;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
+ set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+ }
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(lun_format);
+
static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
@@ -6542,6 +6586,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_dev_size_mb.attr,
&driver_attr_num_parts.attr,
&driver_attr_every_nth.attr,
+ &driver_attr_lun_format.attr,
&driver_attr_max_luns.attr,
&driver_attr_max_queue.attr,
&driver_attr_no_uld.attr,
@@ -6634,9 +6679,19 @@ static int __init scsi_debug_init(void)
pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
return -EINVAL;
}
+
+ sdebug_lun_am = sdebug_lun_am_i;
+ if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
+ pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
+ sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
+ }
+
if (sdebug_max_luns > 256) {
- pr_warn("max_luns can be no more than 256, use default\n");
- sdebug_max_luns = DEF_MAX_LUNS;
+ if (sdebug_max_luns > 16384) {
+ pr_warn("max_luns can be no more than 16384, use default\n");
+ sdebug_max_luns = DEF_MAX_LUNS;
+ }
+ sdebug_lun_am = SAM_LUN_AM_FLAT;
}
if (sdebug_lowest_aligned > 0x3fff) {
@@ -7158,6 +7213,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
int k, na;
int errsts = 0;
+ u64 lun_index = sdp->lun & 0x3FFF;
u32 flags;
u16 sa;
u8 opcode = cmd[0];
@@ -7191,7 +7247,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
return SCSI_MLQUEUE_HOST_BUSY;
has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
- if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
+ if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
goto err_out;
sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
@@ -7347,10 +7403,7 @@ static int sdebug_driver_probe(struct device *dev)
sdbg_host = to_sdebug_host(dev);
- if (sdebug_host_max_queue)
- sdebug_driver_template.can_queue = sdebug_host_max_queue;
- else
- sdebug_driver_template.can_queue = sdebug_max_queue;
+ sdebug_driver_template.can_queue = sdebug_max_queue;
if (!sdebug_clustering)
sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
@@ -7367,11 +7420,11 @@ static int sdebug_driver_probe(struct device *dev)
}
/*
* Decide whether to tell scsi subsystem that we want mq. The
- * following should give the same answer for each host. If the host
- * has a limit of hostwide max commands, then do not set.
+ * following should give the same answer for each host.
*/
- if (!sdebug_host_max_queue)
- hpnt->nr_hw_queues = submit_queues;
+ hpnt->nr_hw_queues = submit_queues;
+ if (sdebug_host_max_queue)
+ hpnt->host_tagset = 1;
sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 7d3571a2bd89..f11f51e2465f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -116,6 +116,14 @@ static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
return 1;
}
+static bool scsi_cmd_retry_allowed(struct scsi_cmnd *cmd)
+{
+ if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
+ return true;
+
+ return ++cmd->retries <= cmd->allowed;
+}
+
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
@@ -151,7 +159,7 @@ scmd_eh_abort_handler(struct work_struct *work)
"eh timeout, not retrying "
"aborted command\n"));
} else if (!scsi_noretry_cmd(scmd) &&
- (++scmd->retries <= scmd->allowed)) {
+ scsi_cmd_retry_allowed(scmd)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"retry aborted command\n"));
@@ -1264,11 +1272,18 @@ int scsi_eh_get_sense(struct list_head *work_q,
* upper level.
*/
if (rtn == SUCCESS)
- /* we don't want this command reissued, just
- * finished with the sense data, so set
- * retries to the max allowed to ensure it
- * won't get reissued */
- scmd->retries = scmd->allowed;
+ /*
+ * We don't want this command reissued, just finished
+ * with the sense data, so set retries to the max
+ * allowed to ensure it won't get reissued. If the user
+ * has requested infinite retries, we also want to
+ * finish this command, so force completion by setting
+ * retries and allowed to the same value.
+ */
+ if (scmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
+ scmd->retries = scmd->allowed = 1;
+ else
+ scmd->retries = scmd->allowed;
else if (rtn != NEEDS_RETRY)
continue;
@@ -1755,8 +1770,8 @@ check_type:
if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
blk_rq_is_passthrough(scmd->request))
return 1;
- else
- return 0;
+
+ return 0;
}
/**
@@ -1944,8 +1959,7 @@ maybe_retry:
* the request was not marked fast fail. Note that above,
* even if the request is marked fast fail, we still requeue
* for queue congestion conditions (QUEUE_FULL or BUSY) */
- if ((++scmd->retries) <= scmd->allowed
- && !scsi_noretry_cmd(scmd)) {
+ if (scsi_cmd_retry_allowed(scmd) && !scsi_noretry_cmd(scmd)) {
return NEEDS_RETRY;
} else {
/*
@@ -2091,8 +2105,7 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
list_del_init(&scmd->eh_entry);
if (scsi_device_online(scmd->device) &&
- !scsi_noretry_cmd(scmd) &&
- (++scmd->retries <= scmd->allowed)) {
+ !scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: flush retry cmd\n",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7affaaf8b98e..60c7a7d74852 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -293,21 +293,6 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
}
EXPORT_SYMBOL(__scsi_execute);
-/**
- * scsi_init_cmd_errh - Initialize cmd fields related to error handling.
- * @cmd: command that is ready to be queued.
- *
- * This function has the job of initializing a number of fields related to error
- * handling. Typically this will be called once for each command, as required.
- */
-static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
-{
- scsi_set_resid(cmd, 0);
- memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- if (cmd->cmd_len == 0)
- cmd->cmd_len = scsi_command_size(cmd->cmnd);
-}
-
/*
* Wake up the error handler if necessary. Avoid as follows that the error
* handler is not woken up if host in-flight requests number ==
@@ -530,7 +515,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
}
}
-static void scsi_free_sgtables(struct scsi_cmnd *cmd)
+void scsi_free_sgtables(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table,
@@ -539,6 +524,7 @@ static void scsi_free_sgtables(struct scsi_cmnd *cmd)
sg_free_table_chained(&cmd->prot_sdb->table,
SCSI_INLINE_PROT_SG_CNT);
}
+EXPORT_SYMBOL_GPL(scsi_free_sgtables);
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
@@ -549,10 +535,27 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
static void scsi_run_queue_async(struct scsi_device *sdev)
{
if (scsi_target(sdev)->single_lun ||
- !list_empty(&sdev->host->starved_list))
+ !list_empty(&sdev->host->starved_list)) {
kblockd_schedule_work(&sdev->requeue_work);
- else
- blk_mq_run_hw_queues(sdev->request_queue, true);
+ } else {
+ /*
+ * smp_mb() present in sbitmap_queue_clear() or implied in
+ * .end_io is for ordering writing .device_busy in
+ * scsi_device_unbusy() and reading sdev->restarts.
+ */
+ int old = atomic_read(&sdev->restarts);
+
+ /*
+ * ->restarts has to be kept as non-zero if new budget
+ * contention occurs.
+ *
+ * No need to run queue when either another re-run
+ * queue wins in updating ->restarts or a new budget
+ * contention occurs.
+ */
+ if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
+ blk_mq_run_hw_queues(sdev->request_queue, true);
+ }
}
/* Returns false when no more bytes to process, true if there are more */
@@ -652,6 +655,23 @@ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
scsi_mq_requeue_cmd(cmd);
}
+static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
+{
+ struct request *req = cmd->request;
+ unsigned long wait_for;
+
+ if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
+ return false;
+
+ wait_for = (cmd->allowed + 1) * req->timeout;
+ if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+ scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n",
+ wait_for/HZ);
+ return true;
+ }
+ return false;
+}
+
/* Helper for scsi_io_completion() when special action required. */
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
@@ -660,7 +680,6 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
int level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
- unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
struct scsi_sense_hdr sshdr;
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
@@ -758,6 +777,15 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
/* See SSC3rXX or current. */
action = ACTION_FAIL;
break;
+ case DATA_PROTECT:
+ action = ACTION_FAIL;
+ if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) ||
+ (sshdr.asc == 0x55 &&
+ (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) {
+ /* Insufficient zone resources */
+ blk_stat = BLK_STS_ZONE_OPEN_RESOURCE;
+ }
+ break;
default:
action = ACTION_FAIL;
break;
@@ -765,8 +793,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
} else
action = ACTION_FAIL;
- if (action != ACTION_FAIL &&
- time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
+ if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd))
action = ACTION_FAIL;
switch (action) {
@@ -966,7 +993,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
}
/**
- * scsi_init_io - SCSI I/O initialization function.
+ * scsi_alloc_sgtables - allocate S/G tables for a command
* @cmd: command descriptor we wish to initialize
*
* Returns:
@@ -974,7 +1001,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
* * BLK_STS_RESOURCE - if the failure is retryable
* * BLK_STS_IOERR - if the failure is fatal
*/
-blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
+blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct request *rq = cmd->request;
@@ -1066,7 +1093,7 @@ out_free_sgtables:
scsi_free_sgtables(cmd);
return ret;
}
-EXPORT_SYMBOL(scsi_init_io);
+EXPORT_SYMBOL(scsi_alloc_sgtables);
/**
* scsi_initialize_rq - initialize struct scsi_cmnd partially
@@ -1154,7 +1181,7 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
* submit a request without an attached bio.
*/
if (req->bio) {
- blk_status_t ret = scsi_init_io(cmd);
+ blk_status_t ret = scsi_alloc_sgtables(cmd);
if (unlikely(ret != BLK_STS_OK))
return ret;
} else {
@@ -1164,58 +1191,16 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
}
cmd->cmd_len = scsi_req(req)->cmd_len;
+ if (cmd->cmd_len == 0)
+ cmd->cmd_len = scsi_command_size(cmd->cmnd);
cmd->cmnd = scsi_req(req)->cmd;
cmd->transfersize = blk_rq_bytes(req);
cmd->allowed = scsi_req(req)->retries;
return BLK_STS_OK;
}
-/*
- * Setup a normal block command. These are simple request from filesystems
- * that still need to be translated to SCSI CDBs from the ULD.
- */
-static blk_status_t scsi_setup_fs_cmnd(struct scsi_device *sdev,
- struct request *req)
-{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
-
- if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
- blk_status_t ret = sdev->handler->prep_fn(sdev, req);
- if (ret != BLK_STS_OK)
- return ret;
- }
-
- cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
- memset(cmd->cmnd, 0, BLK_MAX_CDB);
- return scsi_cmd_to_driver(cmd)->init_command(cmd);
-}
-
-static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
- struct request *req)
-{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
- blk_status_t ret;
-
- if (!blk_rq_bytes(req))
- cmd->sc_data_direction = DMA_NONE;
- else if (rq_data_dir(req) == WRITE)
- cmd->sc_data_direction = DMA_TO_DEVICE;
- else
- cmd->sc_data_direction = DMA_FROM_DEVICE;
-
- if (blk_rq_is_scsi(req))
- ret = scsi_setup_scsi_cmnd(sdev, req);
- else
- ret = scsi_setup_fs_cmnd(sdev, req);
-
- if (ret != BLK_STS_OK)
- scsi_free_sgtables(cmd);
-
- return ret;
-}
-
static blk_status_t
-scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
+scsi_device_state_check(struct scsi_device *sdev, struct request *req)
{
switch (sdev->sdev_state) {
case SDEV_OFFLINE:
@@ -1439,7 +1424,6 @@ static bool scsi_mq_lld_busy(struct request_queue *q)
static void scsi_softirq_done(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
int disposition;
INIT_LIST_HEAD(&cmd->eh_entry);
@@ -1449,13 +1433,8 @@ static void scsi_softirq_done(struct request *rq)
atomic_inc(&cmd->device->ioerr_cnt);
disposition = scsi_decide_disposition(cmd);
- if (disposition != SUCCESS &&
- time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
- scmd_printk(KERN_ERR, cmd,
- "timing out command, waited %lus\n",
- wait_for/HZ);
+ if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd))
disposition = SUCCESS;
- }
scsi_log_completion(cmd, disposition);
@@ -1563,7 +1542,7 @@ static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
sizeof(struct scatterlist);
}
-static blk_status_t scsi_mq_prep_fn(struct request *req)
+static blk_status_t scsi_prepare_cmd(struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = req->q->queuedata;
@@ -1575,6 +1554,10 @@ static blk_status_t scsi_mq_prep_fn(struct request *req)
cmd->request = req;
cmd->tag = req->tag;
cmd->prot_op = SCSI_PROT_NORMAL;
+ if (blk_rq_bytes(req))
+ cmd->sc_data_direction = rq_dma_dir(req);
+ else
+ cmd->sc_data_direction = DMA_NONE;
sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
cmd->sdb.table.sgl = sg;
@@ -1586,9 +1569,23 @@ static blk_status_t scsi_mq_prep_fn(struct request *req)
(struct scatterlist *)(cmd->prot_sdb + 1);
}
- blk_mq_start_request(req);
+ /*
+ * Special handling for passthrough commands, which don't go to the ULP
+ * at all:
+ */
+ if (blk_rq_is_scsi(req))
+ return scsi_setup_scsi_cmnd(sdev, req);
+
+ if (sdev->handler && sdev->handler->prep_fn) {
+ blk_status_t ret = sdev->handler->prep_fn(sdev, req);
+
+ if (ret != BLK_STS_OK)
+ return ret;
+ }
- return scsi_setup_cmnd(sdev, req);
+ cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
+ memset(cmd->cmnd, 0, BLK_MAX_CDB);
+ return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
static void scsi_mq_done(struct scsi_cmnd *cmd)
@@ -1612,7 +1609,30 @@ static bool scsi_mq_get_budget(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
- return scsi_dev_queue_ready(q, sdev);
+ if (scsi_dev_queue_ready(q, sdev))
+ return true;
+
+ atomic_inc(&sdev->restarts);
+
+ /*
+ * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
+ * .restarts must be incremented before .device_busy is read because the
+ * code in scsi_run_queue_async() depends on the order of these operations.
+ */
+ smp_mb__after_atomic();
+
+ /*
+ * If all in-flight requests originated from this LUN are completed
+ * before reading .device_busy, sdev->device_busy will be observed as
+ * zero, then blk_mq_delay_run_hw_queues() will dispatch this request
+ * soon. Otherwise, completion of one of these requests will observe
+ * the .restarts flag, and the request queue will be run for handling
+ * this request, see scsi_end_request().
+ */
+ if (unlikely(atomic_read(&sdev->device_busy) == 0 &&
+ !scsi_device_blocked(sdev)))
+ blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
+ return false;
}
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1631,7 +1651,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
* commands.
*/
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
- ret = scsi_prep_state_check(sdev, req);
+ ret = scsi_device_state_check(sdev, req);
if (ret != BLK_STS_OK)
goto out_put_budget;
}
@@ -1643,13 +1663,12 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
goto out_dec_target_busy;
if (!(req->rq_flags & RQF_DONTPREP)) {
- ret = scsi_mq_prep_fn(req);
+ ret = scsi_prepare_cmd(req);
if (ret != BLK_STS_OK)
goto out_dec_host_busy;
req->rq_flags |= RQF_DONTPREP;
} else {
clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
- blk_mq_start_request(req);
}
cmd->flags &= SCMD_PRESERVED_FLAGS;
@@ -1658,9 +1677,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (bd->last)
cmd->flags |= SCMD_LAST;
- scsi_init_cmd_errh(cmd);
+ scsi_set_resid(cmd, 0);
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
cmd->scsi_done = scsi_mq_done;
+ blk_mq_start_request(req);
reason = scsi_dispatch_cmd(cmd);
if (reason) {
scsi_set_blocked(cmd, reason);
@@ -1891,6 +1912,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->flags |=
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
tag_set->driver_data = shost;
+ if (shost->host_tagset)
+ tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
return blk_mq_alloc_tag_set(tag_set);
}
@@ -1919,7 +1942,6 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
return sdev;
}
-EXPORT_SYMBOL_GPL(scsi_device_from_queue);
/**
* scsi_block_requests - Utility function used by low-level drivers to prevent
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index d12ada035961..180636d54982 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -15,6 +15,7 @@ struct scsi_host_template;
struct Scsi_Host;
struct scsi_nl_hdr;
+#define SCSI_CMD_RETRIES_NO_LIMIT -1
/*
* Scsi Error Handler Flags
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index f2437a7570ce..9af50e6f94c4 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1714,15 +1714,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
*/
static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
{
- struct async_scan_data *data;
+ struct async_scan_data *data = NULL;
unsigned long flags;
if (strncmp(scsi_scan_type, "sync", 4) == 0)
return NULL;
+ mutex_lock(&shost->scan_mutex);
if (shost->async_scan) {
shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
- return NULL;
+ goto err;
}
data = kmalloc(sizeof(*data), GFP_KERNEL);
@@ -1733,7 +1734,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
goto err;
init_completion(&data->prev_finished);
- mutex_lock(&shost->scan_mutex);
spin_lock_irqsave(shost->host_lock, flags);
shost->async_scan = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -1748,6 +1748,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
return data;
err:
+ mutex_unlock(&shost->scan_mutex);
kfree(data);
return NULL;
}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 163dbcb741c1..d6e344fa33ad 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -393,6 +393,16 @@ show_use_blk_mq(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(use_blk_mq, S_IRUGO, show_use_blk_mq, NULL);
+static ssize_t
+show_nr_hw_queues(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct blk_mq_tag_set *tag_set = &shost->tag_set;
+
+ return snprintf(buf, 20, "%d\n", tag_set->nr_hw_queues);
+}
+static DEVICE_ATTR(nr_hw_queues, S_IRUGO, show_nr_hw_queues, NULL);
+
static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_use_blk_mq.attr,
&dev_attr_unique_id.attr,
@@ -411,6 +421,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_prot_guard_type.attr,
&dev_attr_host_reset.attr,
&dev_attr_eh_deadline.attr,
+ &dev_attr_nr_hw_queues.attr,
NULL
};
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 2732fa65119c..2ff7f06203da 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -253,6 +253,7 @@ static const struct {
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
{ FC_PORTSPEED_64GBIT, "64 Gbit" },
{ FC_PORTSPEED_128GBIT, "128 Gbit" },
+ { FC_PORTSPEED_256GBIT, "256 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 16503e22691e..656bcf4940d6 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -194,7 +194,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
}
if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
- SD_MAX_RETRIES, &data, NULL))
+ sdkp->max_retries, &data, NULL))
return -EINVAL;
len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
data.block_descriptor_length);
@@ -212,12 +212,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
data.device_specific = 0;
if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
- SD_MAX_RETRIES, &data, &sshdr)) {
+ sdkp->max_retries, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr))
sd_print_sense_hdr(sdkp, &sshdr);
return -EINVAL;
}
- revalidate_disk(sdkp->disk);
+ sd_revalidate_disk(sdkp->disk);
return count;
}
@@ -543,6 +543,39 @@ zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(zoned_cap);
+static ssize_t
+max_retries_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdev = sdkp->device;
+ int retries, err;
+
+ err = kstrtoint(buf, 10, &retries);
+ if (err)
+ return err;
+
+ if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
+ sdkp->max_retries = retries;
+ return count;
+ }
+
+ sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
+ SD_MAX_RETRIES);
+ return -EINVAL;
+}
+
+static ssize_t
+max_retries_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return sprintf(buf, "%d\n", sdkp->max_retries);
+}
+
+static DEVICE_ATTR_RW(max_retries);
+
static struct attribute *sd_disk_attrs[] = {
&dev_attr_cache_type.attr,
&dev_attr_FUA.attr,
@@ -557,6 +590,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_max_write_same_blocks.attr,
&dev_attr_max_medium_access_timeouts.attr,
&dev_attr_zoned_cap.attr,
+ &dev_attr_max_retries.attr,
NULL,
};
ATTRIBUTE_GROUPS(sd_disk);
@@ -665,7 +699,8 @@ static void scsi_disk_put(struct scsi_disk *sdkp)
static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
size_t len, bool send)
{
- struct scsi_device *sdev = data;
+ struct scsi_disk *sdkp = data;
+ struct scsi_device *sdev = sdkp->device;
u8 cdb[12] = { 0, };
int ret;
@@ -676,7 +711,7 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
ret = scsi_execute_req(sdev, cdb,
send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
- buffer, len, NULL, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ buffer, len, NULL, SD_TIMEOUT, sdkp->max_retries, NULL);
return ret <= 0 ? ret : -EIO;
}
#endif /* CONFIG_BLK_SED_OPAL */
@@ -839,6 +874,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
unsigned int data_len = 24;
@@ -862,11 +898,11 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
put_unaligned_be64(lba, &buf[8]);
put_unaligned_be32(nr_blocks, &buf[16]);
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->transfersize = data_len;
rq->timeout = SD_TIMEOUT;
- return scsi_init_io(cmd);
+ return scsi_alloc_sgtables(cmd);
}
static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
@@ -874,6 +910,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -893,11 +930,11 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
put_unaligned_be64(lba, &cmd->cmnd[2]);
put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->transfersize = data_len;
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
- return scsi_init_io(cmd);
+ return scsi_alloc_sgtables(cmd);
}
static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
@@ -905,6 +942,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -924,11 +962,11 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
put_unaligned_be32(lba, &cmd->cmnd[2]);
put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->transfersize = data_len;
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
- return scsi_init_io(cmd);
+ return scsi_alloc_sgtables(cmd);
}
static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
@@ -1056,7 +1094,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
}
cmd->transfersize = sdp->sector_size;
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
/*
* For WRITE SAME the data transferred via the DATA OUT buffer is
@@ -1069,7 +1107,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
* knows how much to actually write.
*/
rq->__data_len = sdp->sector_size;
- ret = scsi_init_io(cmd);
+ ret = scsi_alloc_sgtables(cmd);
rq->__data_len = blk_rq_bytes(rq);
return ret;
@@ -1078,6 +1116,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
/* flush requests don't perform I/O, zero the S/G table */
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
@@ -1085,7 +1124,7 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
cmd->cmnd[0] = SYNCHRONIZE_CACHE;
cmd->cmd_len = 10;
cmd->transfersize = 0;
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
return BLK_STS_OK;
@@ -1187,23 +1226,24 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
unsigned int dif;
bool dix;
- ret = scsi_init_io(cmd);
+ ret = scsi_alloc_sgtables(cmd);
if (ret != BLK_STS_OK)
return ret;
+ ret = BLK_STS_IOERR;
if (!scsi_device_online(sdp) || sdp->changed) {
scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
- return BLK_STS_IOERR;
+ goto fail;
}
if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
- return BLK_STS_IOERR;
+ goto fail;
}
if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
- return BLK_STS_IOERR;
+ goto fail;
}
/*
@@ -1225,7 +1265,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
if (req_op(rq) == REQ_OP_ZONE_APPEND) {
ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
if (ret)
- return ret;
+ goto fail;
}
fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
@@ -1253,7 +1293,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
}
if (unlikely(ret != BLK_STS_OK))
- return ret;
+ goto fail;
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -1262,7 +1302,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
*/
cmd->transfersize = sdp->sector_size;
cmd->underflow = nr_blocks << 9;
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->sdb.length = nr_blocks * sdp->sector_size;
SCSI_LOG_HLQUEUE(1,
@@ -1277,10 +1317,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
blk_rq_sectors(rq)));
/*
- * This indicates that the command is ready from our end to be
- * queued.
+ * This indicates that the command is ready from our end to be queued.
*/
return BLK_STS_OK;
+fail:
+ scsi_free_sgtables(cmd);
+ return ret;
}
static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
@@ -1381,8 +1423,10 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
if (!scsi_block_when_processing_errors(sdev))
goto error_out;
- if (sdev->removable || sdkp->write_prot)
- check_disk_change(bdev);
+ if (sdev->removable || sdkp->write_prot) {
+ if (bdev_check_media_change(bdev))
+ sd_revalidate_disk(bdev->bd_disk);
+ }
/*
* If the drive is empty, just let the open fail.
@@ -1609,7 +1653,7 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
if (scsi_block_when_processing_errors(sdp)) {
struct scsi_sense_hdr sshdr = { 0, };
- retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
+ retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
&sshdr);
/* failed to execute TUR, assume media not present */
@@ -1666,7 +1710,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
* flush everything.
*/
res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
- timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL);
+ timeout, sdkp->max_retries, 0, RQF_PM, NULL);
if (res == 0)
break;
}
@@ -1706,8 +1750,10 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
static void sd_rescan(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ int ret;
- revalidate_disk(sdkp->disk);
+ ret = sd_revalidate_disk(sdkp->disk);
+ revalidate_disk_size(sdkp->disk, ret == 0);
}
static int sd_ioctl(struct block_device *bdev, fmode_t mode,
@@ -1761,7 +1807,8 @@ static char sd_pr_type(enum pr_type type)
static int sd_pr_command(struct block_device *bdev, u8 sa,
u64 key, u64 sa_key, u8 type, u8 flags)
{
- struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+ struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_device *sdev = sdkp->device;
struct scsi_sense_hdr sshdr;
int result;
u8 cmd[16] = { 0, };
@@ -1777,7 +1824,7 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
data[20] = flags;
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
- &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ &sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
if (driver_byte(result) == DRIVER_SENSE &&
scsi_sense_valid(&sshdr)) {
@@ -1841,7 +1888,6 @@ static const struct block_device_operations sd_fops = {
.compat_ioctl = sd_compat_ioctl,
#endif
.check_events = sd_check_events,
- .revalidate_disk = sd_revalidate_disk,
.unlock_native_capacity = sd_unlock_native_capacity,
.report_zones = sd_zbc_report_zones,
.pr_ops = &sd_pr_ops,
@@ -2114,7 +2160,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
the_result = scsi_execute_req(sdkp->device, cmd,
DMA_NONE, NULL, 0,
&sshdr, SD_TIMEOUT,
- SD_MAX_RETRIES, NULL);
+ sdkp->max_retries, NULL);
/*
* If the drive has indicated to us that it
@@ -2170,7 +2216,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
cmd[4] |= 1 << 4;
scsi_execute_req(sdkp->device, cmd, DMA_NONE,
NULL, 0, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES,
+ SD_TIMEOUT, sdkp->max_retries,
NULL);
spintime_expire = jiffies + 100 * HZ;
spintime = 1;
@@ -2312,7 +2358,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
buffer, RC16_LEN, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ SD_TIMEOUT, sdkp->max_retries, NULL);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
@@ -2397,7 +2443,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
buffer, 8, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ SD_TIMEOUT, sdkp->max_retries, NULL);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
@@ -2582,12 +2628,12 @@ sd_print_capacity(struct scsi_disk *sdkp,
/* called with buffer of length 512 */
static inline int
-sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
+sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
unsigned char *buffer, int len, struct scsi_mode_data *data,
struct scsi_sense_hdr *sshdr)
{
- return scsi_mode_sense(sdp, dbd, modepage, buffer, len,
- SD_TIMEOUT, SD_MAX_RETRIES, data,
+ return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
+ SD_TIMEOUT, sdkp->max_retries, data,
sshdr);
}
@@ -2610,14 +2656,14 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
}
if (sdp->use_192_bytes_for_3f) {
- res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL);
+ res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
} else {
/*
* First attempt: ask for all pages (0x3F), but only 4 bytes.
* We have to start carefully: some devices hang if we ask
* for more than is available.
*/
- res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL);
+ res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
/*
* Second attempt: ask for page 0 When only page 0 is
@@ -2626,13 +2672,13 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
* CDB.
*/
if (!scsi_status_is_good(res))
- res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL);
+ res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
/*
* Third attempt: ask 255 bytes, as we did earlier.
*/
if (!scsi_status_is_good(res))
- res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255,
+ res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
&data, NULL);
}
@@ -2694,7 +2740,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
/* cautiously ask */
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
+ res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
&data, &sshdr);
if (!scsi_status_is_good(res))
@@ -2726,7 +2772,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
/* Get the data */
if (len > first_len)
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
+ res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
&data, &sshdr);
if (scsi_status_is_good(res)) {
@@ -2845,7 +2891,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
return;
res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
- SD_MAX_RETRIES, &data, &sshdr);
+ sdkp->max_retries, &data, &sshdr);
if (!scsi_status_is_good(res) || !data.header_length ||
data.length < 6) {
@@ -3368,6 +3414,7 @@ static int sd_probe(struct device *dev)
sdkp->driver = &sd_template;
sdkp->disk = gd;
sdkp->index = index;
+ sdkp->max_retries = SD_MAX_RETRIES;
atomic_set(&sdkp->openers, 0);
atomic_set(&sdkp->device->ioerr_cnt, 0);
@@ -3431,7 +3478,7 @@ static int sd_probe(struct device *dev)
sd_revalidate_disk(gd);
if (sdkp->security) {
- sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit);
+ sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
if (sdkp->opal_dev)
sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
}
@@ -3546,7 +3593,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
return -ENODEV;
res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL);
+ SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
if (driver_byte(res) == DRIVER_SENSE)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index a3aad608bc38..b59136c4125b 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -90,6 +90,7 @@ struct scsi_disk {
#endif
atomic_t openers;
sector_t capacity; /* size in logical blocks */
+ int max_retries;
u32 max_xfer_blocks;
u32 opt_xfer_blocks;
u32 max_ws_blocks;
diff --git a/drivers/scsi/sense_codes.h b/drivers/scsi/sense_codes.h
index 201a536688de..805d4c13d070 100644
--- a/drivers/scsi/sense_codes.h
+++ b/drivers/scsi/sense_codes.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* The canonical list of T10 Additional Sense Codes is available at:
- * http://www.t10.org/lists/asc-num.txt [most recent: 20141221]
+ * http://www.t10.org/lists/asc-num.txt [most recent: 20200817]
*/
SENSE_CODE(0x0000, "No additional sense information")
@@ -29,6 +29,7 @@ SENSE_CODE(0x001E, "Conflicting SA creation request")
SENSE_CODE(0x001F, "Logical unit transitioning to another power condition")
SENSE_CODE(0x0020, "Extended copy information available")
SENSE_CODE(0x0021, "Atomic command aborted due to ACA")
+SENSE_CODE(0x0022, "Deferred microcode is pending")
SENSE_CODE(0x0100, "No index/sector signal")
@@ -72,6 +73,9 @@ SENSE_CODE(0x041F, "Logical unit not ready, microcode download required")
SENSE_CODE(0x0420, "Logical unit not ready, logical unit reset required")
SENSE_CODE(0x0421, "Logical unit not ready, hard reset required")
SENSE_CODE(0x0422, "Logical unit not ready, power cycle required")
+SENSE_CODE(0x0423, "Logical unit not ready, affiliation required")
+SENSE_CODE(0x0424, "Depopulation in progress")
+SENSE_CODE(0x0425, "Depopulation restoration in progress")
SENSE_CODE(0x0500, "Logical unit does not respond to selection")
@@ -104,6 +108,17 @@ SENSE_CODE(0x0B06, "Warning - non-volatile cache now volatile")
SENSE_CODE(0x0B07, "Warning - degraded power to non-volatile cache")
SENSE_CODE(0x0B08, "Warning - power loss expected")
SENSE_CODE(0x0B09, "Warning - device statistics notification active")
+SENSE_CODE(0x0B0A, "Warning - high critical temperature limit exceeded")
+SENSE_CODE(0x0B0B, "Warning - low critical temperature limit exceeded")
+SENSE_CODE(0x0B0C, "Warning - high operating temperature limit exceeded")
+SENSE_CODE(0x0B0D, "Warning - low operating temperature limit exceeded")
+SENSE_CODE(0x0B0E, "Warning - high critical humidity limit exceeded")
+SENSE_CODE(0x0B0F, "Warning - low critical humidity limit exceeded")
+SENSE_CODE(0x0B10, "Warning - high operating humidity limit exceeded")
+SENSE_CODE(0x0B11, "Warning - low operating humidity limit exceeded")
+SENSE_CODE(0x0B12, "Warning - microcode security at risk")
+SENSE_CODE(0x0B13, "Warning - microcode digital signature validation failure")
+SENSE_CODE(0x0B14, "Warning - physical element status change")
SENSE_CODE(0x0C00, "Write error")
SENSE_CODE(0x0C01, "Write error - recovered with auto reallocation")
@@ -122,6 +137,8 @@ SENSE_CODE(0x0C0D, "Write error - not enough unsolicited data")
SENSE_CODE(0x0C0E, "Multiple write errors")
SENSE_CODE(0x0C0F, "Defects in error window")
SENSE_CODE(0x0C10, "Incomplete multiple atomic write operations")
+SENSE_CODE(0x0C11, "Write error - recovery scan needed")
+SENSE_CODE(0x0C12, "Write error - insufficient zone resources")
SENSE_CODE(0x0D00, "Error detected by third party temporary initiator")
SENSE_CODE(0x0D01, "Third party device failure")
@@ -242,6 +259,9 @@ SENSE_CODE(0x2009, "Access denied - invalid LU identifier")
SENSE_CODE(0x200A, "Access denied - invalid proxy token")
SENSE_CODE(0x200B, "Access denied - ACL LUN conflict")
SENSE_CODE(0x200C, "Illegal command when not in append-only mode")
+SENSE_CODE(0x200D, "Not an administrative logical unit")
+SENSE_CODE(0x200E, "Not a subsidiary logical unit")
+SENSE_CODE(0x200F, "Not a conglomerate logical unit")
SENSE_CODE(0x2100, "Logical block address out of range")
SENSE_CODE(0x2101, "Invalid element address")
@@ -251,6 +271,8 @@ SENSE_CODE(0x2104, "Unaligned write command")
SENSE_CODE(0x2105, "Write boundary violation")
SENSE_CODE(0x2106, "Attempt to read invalid data")
SENSE_CODE(0x2107, "Read boundary violation")
+SENSE_CODE(0x2108, "Misaligned write command")
+SENSE_CODE(0x2109, "Attempt to access gap zone")
SENSE_CODE(0x2200, "Illegal function (use 20 00, 24 00, or 26 00)")
@@ -275,6 +297,7 @@ SENSE_CODE(0x2405, "Security working key frozen")
SENSE_CODE(0x2406, "Nonce not unique")
SENSE_CODE(0x2407, "Nonce timestamp out of range")
SENSE_CODE(0x2408, "Invalid XCDB")
+SENSE_CODE(0x2409, "Invalid fast format")
SENSE_CODE(0x2500, "Logical unit not supported")
@@ -297,6 +320,10 @@ SENSE_CODE(0x260F, "Invalid data-out buffer integrity check value")
SENSE_CODE(0x2610, "Data decryption key fail limit reached")
SENSE_CODE(0x2611, "Incomplete key-associated data set")
SENSE_CODE(0x2612, "Vendor specific key reference not found")
+SENSE_CODE(0x2613, "Application tag mode page is invalid")
+SENSE_CODE(0x2614, "Tape stream mirroring prevented")
+SENSE_CODE(0x2615, "Copy source or copy destination not authorized")
+SENSE_CODE(0x2616, "Fast copy not possible")
SENSE_CODE(0x2700, "Write protected")
SENSE_CODE(0x2701, "Hardware write protected")
@@ -342,6 +369,7 @@ SENSE_CODE(0x2A12, "Data encryption parameters changed by vendor specific event"
SENSE_CODE(0x2A13, "Data encryption key instance counter has changed")
SENSE_CODE(0x2A14, "SA creation capabilities data has changed")
SENSE_CODE(0x2A15, "Medium removal prevention preempted")
+SENSE_CODE(0x2A16, "Zone reset write pointer recommended")
SENSE_CODE(0x2B00, "Copy cannot execute since host cannot disconnect")
@@ -360,6 +388,11 @@ SENSE_CODE(0x2C0B, "Not reserved")
SENSE_CODE(0x2C0C, "Orwrite generation does not match")
SENSE_CODE(0x2C0D, "Reset write pointer not allowed")
SENSE_CODE(0x2C0E, "Zone is offline")
+SENSE_CODE(0x2C0F, "Stream not open")
+SENSE_CODE(0x2C10, "Unwritten data in zone")
+SENSE_CODE(0x2C11, "Descriptor format sense data required")
+SENSE_CODE(0x2C12, "Zone is inactive")
+SENSE_CODE(0x2C13, "Well known logical unit access required")
SENSE_CODE(0x2D00, "Overwrite error on update in place")
@@ -395,6 +428,8 @@ SENSE_CODE(0x3100, "Medium format corrupted")
SENSE_CODE(0x3101, "Format command failed")
SENSE_CODE(0x3102, "Zoned formatting failed due to spare linking")
SENSE_CODE(0x3103, "Sanitize command failed")
+SENSE_CODE(0x3104, "Depopulation failed")
+SENSE_CODE(0x3105, "Depopulation restoration failed")
SENSE_CODE(0x3200, "No defect spare location available")
SENSE_CODE(0x3201, "Defect list update failure")
@@ -419,6 +454,7 @@ SENSE_CODE(0x3802, "Esn - power management class event")
SENSE_CODE(0x3804, "Esn - media class event")
SENSE_CODE(0x3806, "Esn - device busy class event")
SENSE_CODE(0x3807, "Thin Provisioning soft threshold reached")
+SENSE_CODE(0x3808, "Depopulation interrupted")
SENSE_CODE(0x3900, "Saving parameters not supported")
@@ -456,6 +492,7 @@ SENSE_CODE(0x3B19, "Element enabled")
SENSE_CODE(0x3B1A, "Data transfer device removed")
SENSE_CODE(0x3B1B, "Data transfer device inserted")
SENSE_CODE(0x3B1C, "Too many logical objects on partition to support operation")
+SENSE_CODE(0x3B20, "Element static information changed")
SENSE_CODE(0x3D00, "Invalid bits in identify message")
@@ -488,6 +525,11 @@ SENSE_CODE(0x3F13, "iSCSI IP address removed")
SENSE_CODE(0x3F14, "iSCSI IP address changed")
SENSE_CODE(0x3F15, "Inspect referrals sense descriptors")
SENSE_CODE(0x3F16, "Microcode has been changed without reset")
+SENSE_CODE(0x3F17, "Zone transition to full")
+SENSE_CODE(0x3F18, "Bind completed")
+SENSE_CODE(0x3F19, "Bind redirected")
+SENSE_CODE(0x3F1A, "Subsidiary binding changed")
+
/*
* SENSE_CODE(0x40NN, "Ram failure")
* SENSE_CODE(0x40NN, "Diagnostic failure on component nn")
@@ -589,6 +631,9 @@ SENSE_CODE(0x550B, "Insufficient power for operation")
SENSE_CODE(0x550C, "Insufficient resources to create rod")
SENSE_CODE(0x550D, "Insufficient resources to create rod token")
SENSE_CODE(0x550E, "Insufficient zone resources")
+SENSE_CODE(0x550F, "Insufficient zone resources to complete write")
+SENSE_CODE(0x5510, "Maximum number of streams open")
+SENSE_CODE(0x5511, "Insufficient resources to bind")
SENSE_CODE(0x5700, "Unable to recover table-of-contents")
@@ -692,6 +737,7 @@ SENSE_CODE(0x5D69, "Firmware impending failure throughput performance")
SENSE_CODE(0x5D6A, "Firmware impending failure seek time performance")
SENSE_CODE(0x5D6B, "Firmware impending failure spin-up retry count")
SENSE_CODE(0x5D6C, "Firmware impending failure drive calibration retry count")
+SENSE_CODE(0x5D73, "Media impending failure endurance limit met")
SENSE_CODE(0x5DFF, "Failure prediction threshold exceeded (false)")
SENSE_CODE(0x5E00, "Low power condition on")
@@ -744,6 +790,8 @@ SENSE_CODE(0x6708, "Assign failure occurred")
SENSE_CODE(0x6709, "Multiply assigned logical unit")
SENSE_CODE(0x670A, "Set target port groups command failed")
SENSE_CODE(0x670B, "ATA device feature not enabled")
+SENSE_CODE(0x670C, "Command rejected")
+SENSE_CODE(0x670D, "Explicit bind not allowed")
SENSE_CODE(0x6800, "Logical unit not configured")
SENSE_CODE(0x6801, "Subsidiary logical unit not configured")
@@ -772,6 +820,10 @@ SENSE_CODE(0x6F04, "Media region code is mismatched to logical unit region")
SENSE_CODE(0x6F05, "Drive region must be permanent/region reset count error")
SENSE_CODE(0x6F06, "Insufficient block count for binding nonce recording")
SENSE_CODE(0x6F07, "Conflict in binding nonce recording")
+SENSE_CODE(0x6F08, "Insufficient permission")
+SENSE_CODE(0x6F09, "Invalid drive-host pairing server")
+SENSE_CODE(0x6F0A, "Drive-host pairing suspended")
+
/*
* SENSE_CODE(0x70NN, "Decompression exception short algorithm id of nn")
*/
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 20472aaaf630..bfa8d77322d7 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1820,14 +1820,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
struct iovec *iov = NULL;
struct iov_iter i;
-#ifdef CONFIG_COMPAT
- if (in_compat_syscall())
- res = compat_import_iovec(rw, hp->dxferp, iov_count,
- 0, &iov, &i);
- else
-#endif
- res = import_iovec(rw, hp->dxferp, iov_count,
- 0, &iov, &i);
+ res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
if (res < 0)
return res;
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 3bdf0deb8f15..cf1030c9dda1 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -95,7 +95,7 @@ void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
*/
hcp->desc.pbuf = 0;
hcp->desc.cntinfo = HPCDMA_EOX;
- dma_cache_sync(hd->dev, hd->cpu,
+ dma_sync_single_for_device(hd->dev, hd->dma,
(unsigned long)(hcp + 1) - (unsigned long)hd->cpu,
DMA_TO_DEVICE);
}
@@ -234,8 +234,8 @@ static int sgiwd93_probe(struct platform_device *pdev)
hdata = host_to_hostdata(host);
hdata->dev = &pdev->dev;
- hdata->cpu = dma_alloc_attrs(&pdev->dev, HPC_DMA_SIZE, &hdata->dma,
- GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
+ hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE,
+ &hdata->dma, DMA_TO_DEVICE, GFP_KERNEL);
if (!hdata->cpu) {
printk(KERN_WARNING "sgiwd93: Could not allocate memory for "
"host %d buffer.\n", unit);
@@ -274,8 +274,8 @@ static int sgiwd93_probe(struct platform_device *pdev)
out_irq:
free_irq(irq, host);
out_free:
- dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
- DMA_ATTR_NON_CONSISTENT);
+ dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
+ DMA_TO_DEVICE);
out_put:
scsi_host_put(host);
out:
@@ -291,8 +291,8 @@ static int sgiwd93_remove(struct platform_device *pdev)
scsi_remove_host(host);
free_irq(pd->irq, host);
- dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
- DMA_ATTR_NON_CONSISTENT);
+ dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
+ DMA_TO_DEVICE);
scsi_host_put(host);
return 0;
}
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 8eec241f074b..cb9e4e968b60 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,11 +1,11 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
-# (mailto:esc.storagedev@microsemi.com)
+# (mailto:storagedev@microchip.com)
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 1129fe7a27ed..3e54590e6e92 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -359,7 +359,7 @@ struct pqi_event_response {
struct pqi_iu_header header;
u8 event_type;
u8 reserved2 : 7;
- u8 request_acknowlege : 1;
+ u8 request_acknowledge : 1;
__le16 event_id;
__le32 additional_event_id;
union {
@@ -927,6 +927,7 @@ struct pqi_scsi_dev {
u8 new_device : 1;
u8 keep_device : 1;
u8 volume_offline : 1;
+ u8 rescan : 1;
bool aio_enabled; /* only valid for physical disks */
bool in_reset;
bool in_remove;
@@ -962,6 +963,7 @@ struct pqi_scsi_dev {
struct list_head delete_list_entry;
atomic_t scsi_cmds_outstanding;
+ atomic_t raid_bypass_cnt;
};
/* VPD inquiry pages */
@@ -1255,6 +1257,7 @@ struct bmic_sense_subsystem_info {
#define SA_DEVICE_TYPE_SATA 0x1
#define SA_DEVICE_TYPE_SAS 0x2
#define SA_DEVICE_TYPE_EXPANDER_SMP 0x5
+#define SA_DEVICE_TYPE_SES 0x6
#define SA_DEVICE_TYPE_CONTROLLER 0x7
#define SA_DEVICE_TYPE_NVME 0x9
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index ca1e6cf6a38e..9d0229656681 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.10-025"
+#define DRIVER_VERSION "1.2.16-010"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
-#define DRIVER_RELEASE 10
-#define DRIVER_REVISION 25
+#define DRIVER_RELEASE 16
+#define DRIVER_REVISION 10
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -542,8 +542,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
put_unaligned_be16(cdb_length, &cdb[7]);
break;
default:
- dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
- cmd);
+ dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
break;
}
@@ -1300,33 +1299,59 @@ no_buffer:
device->volume_offline = volume_offline;
}
-#define PQI_INQUIRY_PAGE0_RETRIES 3
+static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device,
+ struct bmic_identify_physical_device *id_phys)
+{
+ int rc;
-static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+ memset(id_phys, 0, sizeof(*id_phys));
+
+ rc = pqi_identify_physical_device(ctrl_info, device,
+ id_phys, sizeof(*id_phys));
+ if (rc) {
+ device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
+ return rc;
+ }
+
+ scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
+ scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
+
+ memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
+ memcpy(device->model, &id_phys->model[8], sizeof(device->model));
+
+ device->box_index = id_phys->box_index;
+ device->phys_box_on_bus = id_phys->phys_box_on_bus;
+ device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
+ device->queue_depth =
+ get_unaligned_le16(&id_phys->current_queue_depth_limit);
+ device->active_path_index = id_phys->active_path_number;
+ device->path_map = id_phys->redundant_path_present_map;
+ memcpy(&device->box,
+ &id_phys->alternate_paths_phys_box_on_port,
+ sizeof(device->box));
+ memcpy(&device->phys_connector,
+ &id_phys->alternate_paths_phys_connector,
+ sizeof(device->phys_connector));
+ device->bay = id_phys->phys_bay_in_box;
+
+ return 0;
+}
+
+static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
u8 *buffer;
- unsigned int retries;
-
- if (device->is_expander_smp_device)
- return 0;
buffer = kmalloc(64, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Send an inquiry to the device to see what it is. */
- for (retries = 0;;) {
- rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
- buffer, 64);
- if (rc == 0)
- break;
- if (pqi_is_logical_device(device) ||
- rc != PQI_CMD_STATUS_ABORTED ||
- ++retries > PQI_INQUIRY_PAGE0_RETRIES)
- goto out;
- }
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
+ if (rc)
+ goto out;
scsi_sanitize_inquiry_string(&buffer[8], 8);
scsi_sanitize_inquiry_string(&buffer[16], 16);
@@ -1335,7 +1360,7 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
memcpy(device->model, &buffer[16], sizeof(device->model));
- if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
+ if (device->devtype == TYPE_DISK) {
if (device->is_external_raid_device) {
device->raid_level = SA_RAID_UNKNOWN;
device->volume_status = CISS_LV_OK;
@@ -1353,36 +1378,21 @@ out:
return rc;
}
-static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
+static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
struct bmic_identify_physical_device *id_phys)
{
int rc;
- memset(id_phys, 0, sizeof(*id_phys));
+ if (device->is_expander_smp_device)
+ return 0;
- rc = pqi_identify_physical_device(ctrl_info, device,
- id_phys, sizeof(*id_phys));
- if (rc) {
- device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
- return;
- }
+ if (pqi_is_logical_device(device))
+ rc = pqi_get_logical_device_info(ctrl_info, device);
+ else
+ rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
- device->box_index = id_phys->box_index;
- device->phys_box_on_bus = id_phys->phys_box_on_bus;
- device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
- device->queue_depth =
- get_unaligned_le16(&id_phys->current_queue_depth_limit);
- device->device_type = id_phys->device_type;
- device->active_path_index = id_phys->active_path_number;
- device->path_map = id_phys->redundant_path_present_map;
- memcpy(&device->box,
- &id_phys->alternate_paths_phys_box_on_port,
- sizeof(device->box));
- memcpy(&device->phys_connector,
- &id_phys->alternate_paths_phys_connector,
- sizeof(device->phys_connector));
- device->bay = id_phys->phys_bay_in_box;
+ return rc;
}
static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
@@ -1521,11 +1531,10 @@ static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
pqi_device_remove_start(device);
- rc = pqi_device_wait_for_pending_io(ctrl_info, device,
- PQI_PENDING_IO_TIMEOUT_SECS);
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
if (rc)
dev_err(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
+ "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus,
device->target, device->lun,
atomic_read(&device->scsi_cmds_outstanding));
@@ -1543,10 +1552,8 @@ static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
{
struct pqi_scsi_dev *device;
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry)
- if (device->bus == bus && device->target == target &&
- device->lun == lun)
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
+ if (device->bus == bus && device->target == target && device->lun == lun)
return device;
return NULL;
@@ -1572,15 +1579,12 @@ enum pqi_find_result {
};
static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device_to_find,
- struct pqi_scsi_dev **matching_device)
+ struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
{
struct pqi_scsi_dev *device;
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
- device->scsi3addr)) {
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
*matching_device = device;
if (pqi_device_equal(device_to_find, device)) {
if (device_to_find->volume_offline)
@@ -1677,6 +1681,11 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
existing_device->target_lun_valid = true;
}
+ if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
+ existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
+ new_device->volume_status == CISS_LV_OK)
+ existing_device->rescan = true;
+
/* By definition, the scsi3addr and wwid fields are already the same. */
existing_device->is_physical_device = new_device->is_physical_device;
@@ -1775,8 +1784,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
/* Assume that all devices in the existing list have gone away. */
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry)
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
device->device_gone = true;
for (i = 0; i < num_new_devices; i++) {
@@ -1816,7 +1824,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
scsi_device_list_entry) {
if (device->device_gone) {
- list_del(&device->scsi_device_list_entry);
+ list_del_init(&device->scsi_device_list_entry);
list_add_tail(&device->delete_list_entry, &delete_list);
}
}
@@ -1841,18 +1849,19 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
pqi_ctrl_ofa_done(ctrl_info);
/* Remove all devices that have gone away. */
- list_for_each_entry_safe(device, next, &delete_list,
- delete_list_entry) {
+ list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
if (device->volume_offline) {
pqi_dev_info(ctrl_info, "offline", device);
pqi_show_volume_status(ctrl_info, device);
- } else {
- pqi_dev_info(ctrl_info, "removed", device);
}
- if (pqi_is_device_added(device))
- pqi_remove_device(ctrl_info, device);
list_del(&device->delete_list_entry);
- pqi_free_device(device);
+ if (pqi_is_device_added(device)) {
+ pqi_remove_device(ctrl_info, device);
+ } else {
+ if (!device->volume_offline)
+ pqi_dev_info(ctrl_info, "removed", device);
+ pqi_free_device(device);
+ }
}
/*
@@ -1861,20 +1870,27 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
*/
list_for_each_entry(device, &ctrl_info->scsi_device_list,
scsi_device_list_entry) {
- if (device->sdev && device->queue_depth !=
- device->advertised_queue_depth) {
- device->advertised_queue_depth = device->queue_depth;
- scsi_change_queue_depth(device->sdev,
- device->advertised_queue_depth);
+ if (device->sdev) {
+ if (device->queue_depth !=
+ device->advertised_queue_depth) {
+ device->advertised_queue_depth = device->queue_depth;
+ scsi_change_queue_depth(device->sdev,
+ device->advertised_queue_depth);
+ }
+ if (device->rescan) {
+ scsi_rescan_device(&device->sdev->sdev_gendev);
+ device->rescan = false;
+ }
}
}
/* Expose any new devices. */
list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
if (!pqi_is_device_added(device)) {
- pqi_dev_info(ctrl_info, "added", device);
rc = pqi_add_device(ctrl_info, device);
- if (rc) {
+ if (rc == 0) {
+ pqi_dev_info(ctrl_info, "added", device);
+ } else {
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d addition failed, device not added\n",
ctrl_info->scsi_host->host_no,
@@ -1886,36 +1902,19 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
}
}
-static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
+static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
{
- bool is_supported;
-
- if (device->is_expander_smp_device)
- return true;
-
- is_supported = false;
-
- switch (device->devtype) {
- case TYPE_DISK:
- case TYPE_ZBC:
- case TYPE_TAPE:
- case TYPE_MEDIUM_CHANGER:
- case TYPE_ENCLOSURE:
- is_supported = true;
- break;
- case TYPE_RAID:
- /*
- * Only support the HBA controller itself as a RAID
- * controller. If it's a RAID controller other than
- * the HBA itself (an external RAID controller, for
- * example), we don't support it.
- */
- if (pqi_is_hba_lunid(device->scsi3addr))
- is_supported = true;
- break;
- }
+ /*
+ * Only support the HBA controller itself as a RAID
+ * controller. If it's a RAID controller other than
+ * the HBA itself (an external RAID controller, for
+ * example), we don't support it.
+ */
+ if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
+ !pqi_is_hba_lunid(device->scsi3addr))
+ return false;
- return is_supported;
+ return true;
}
static inline bool pqi_skip_device(u8 *scsi3addr)
@@ -1934,16 +1933,10 @@ static inline void pqi_mask_device(u8 *scsi3addr)
static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
{
- if (!device->is_physical_device)
- return false;
-
- if (device->is_expander_smp_device)
- return true;
-
- switch (device->devtype) {
- case TYPE_DISK:
- case TYPE_ZBC:
- case TYPE_ENCLOSURE:
+ switch (device->device_type) {
+ case SA_DEVICE_TYPE_SAS:
+ case SA_DEVICE_TYPE_EXPANDER_SMP:
+ case SA_DEVICE_TYPE_SES:
return true;
}
@@ -2085,16 +2078,19 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
device->is_physical_device = is_physical_device;
if (is_physical_device) {
- if (phys_lun_ext_entry->device_type ==
- SA_DEVICE_TYPE_EXPANDER_SMP)
+ device->device_type = phys_lun_ext_entry->device_type;
+ if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
device->is_expander_smp_device = true;
} else {
device->is_external_raid_device =
pqi_is_external_raid_addr(scsi3addr);
}
+ if (!pqi_is_supported_device(device))
+ continue;
+
/* Gather information about the device. */
- rc = pqi_get_device_info(ctrl_info, device);
+ rc = pqi_get_device_info(ctrl_info, device, id_phys);
if (rc == -ENOMEM) {
dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
out_of_memory_msg);
@@ -2115,9 +2111,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
continue;
}
- if (!pqi_is_supported_device(device))
- continue;
-
pqi_assign_bus_target_lun(device);
if (device->is_physical_device) {
@@ -2129,7 +2122,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
device->aio_handle =
phys_lun_ext_entry->aio_handle;
}
- pqi_get_physical_disk_info(ctrl_info, device, id_phys);
} else {
memcpy(device->volume_id, log_lun_ext_entry->volume_id,
sizeof(device->volume_id));
@@ -2160,31 +2152,6 @@ out:
return rc;
}
-static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
- struct pqi_scsi_dev *device;
-
- while (1) {
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
-
- device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
- struct pqi_scsi_dev, scsi_device_list_entry);
- if (device)
- list_del(&device->scsi_device_list_entry);
-
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
-
- if (!device)
- break;
-
- if (pqi_is_device_added(device))
- pqi_remove_device(ctrl_info, device);
- pqi_free_device(device);
- }
-}
-
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int rc = 0;
@@ -2462,7 +2429,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
offload_to_mirror =
(offload_to_mirror >= layout_map_count - 1) ?
0 : offload_to_mirror + 1;
- WARN_ON(offload_to_mirror >= layout_map_count);
device->offload_to_mirror = offload_to_mirror;
/*
* Avoid direct use of device->offload_to_mirror within this
@@ -2915,10 +2881,14 @@ static int pqi_interpret_task_management_response(
return rc;
}
-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
- struct pqi_queue_group *queue_group)
+static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
{
- unsigned int num_responses;
+ pqi_take_ctrl_offline(ctrl_info);
+}
+
+static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
+{
+ int num_responses;
pqi_index_t oq_pi;
pqi_index_t oq_ci;
struct pqi_io_request *io_request;
@@ -2930,6 +2900,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
while (1) {
oq_pi = readl(queue_group->oq_pi);
+ if (oq_pi >= ctrl_info->num_elements_per_oq) {
+ pqi_invalid_response(ctrl_info);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
+ oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
+ return -1;
+ }
if (oq_pi == oq_ci)
break;
@@ -2938,10 +2915,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
request_id = get_unaligned_le16(&response->request_id);
- WARN_ON(request_id >= ctrl_info->max_io_slots);
+ if (request_id >= ctrl_info->max_io_slots) {
+ pqi_invalid_response(ctrl_info);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
+ request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
+ return -1;
+ }
io_request = &ctrl_info->io_request_pool[request_id];
- WARN_ON(atomic_read(&io_request->refcount) == 0);
+ if (atomic_read(&io_request->refcount) == 0) {
+ pqi_invalid_response(ctrl_info);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
+ request_id, oq_pi, oq_ci);
+ return -1;
+ }
switch (response->header.iu_type) {
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
@@ -2971,24 +2960,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
io_request->error_info = ctrl_info->error_buffer +
(get_unaligned_le16(&response->error_index) *
PQI_ERROR_BUFFER_ELEMENT_LENGTH);
- pqi_process_io_error(response->header.iu_type,
- io_request);
+ pqi_process_io_error(response->header.iu_type, io_request);
break;
default:
+ pqi_invalid_response(ctrl_info);
dev_err(&ctrl_info->pci_dev->dev,
- "unexpected IU type: 0x%x\n",
- response->header.iu_type);
- break;
+ "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
+ response->header.iu_type, oq_pi, oq_ci);
+ return -1;
}
- io_request->io_complete_callback(io_request,
- io_request->context);
+ io_request->io_complete_callback(io_request, io_request->context);
/*
* Note that the I/O request structure CANNOT BE TOUCHED after
* returning from the I/O completion callback!
*/
-
oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
}
@@ -3300,9 +3287,9 @@ static void pqi_ofa_capture_event_payload(struct pqi_event *event,
}
}
-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
{
- unsigned int num_events;
+ int num_events;
pqi_index_t oq_pi;
pqi_index_t oq_ci;
struct pqi_event_queue *event_queue;
@@ -3316,26 +3303,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
while (1) {
oq_pi = readl(event_queue->oq_pi);
+ if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
+ pqi_invalid_response(ctrl_info);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
+ oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
+ return -1;
+ }
+
if (oq_pi == oq_ci)
break;
num_events++;
- response = event_queue->oq_element_array +
- (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
+ response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
event_index =
pqi_event_type_to_event_index(response->event_type);
- if (event_index >= 0) {
- if (response->request_acknowlege) {
- event = &ctrl_info->events[event_index];
- event->pending = true;
- event->event_type = response->event_type;
- event->event_id = response->event_id;
- event->additional_event_id =
- response->additional_event_id;
+ if (event_index >= 0 && response->request_acknowledge) {
+ event = &ctrl_info->events[event_index];
+ event->pending = true;
+ event->event_type = response->event_type;
+ event->event_id = response->event_id;
+ event->additional_event_id = response->additional_event_id;
+ if (event->event_type == PQI_EVENT_TYPE_OFA)
pqi_ofa_capture_event_payload(event, response);
- }
}
oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
@@ -3450,7 +3442,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_queue_group *queue_group;
- unsigned int num_responses_handled;
+ int num_io_responses_handled;
+ int num_events_handled;
queue_group = data;
ctrl_info = queue_group->ctrl_info;
@@ -3458,17 +3451,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
if (!pqi_is_valid_irq(ctrl_info))
return IRQ_NONE;
- num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+ num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+ if (num_io_responses_handled < 0)
+ goto out;
- if (irq == ctrl_info->event_irq)
- num_responses_handled += pqi_process_event_intr(ctrl_info);
+ if (irq == ctrl_info->event_irq) {
+ num_events_handled = pqi_process_event_intr(ctrl_info);
+ if (num_events_handled < 0)
+ goto out;
+ } else {
+ num_events_handled = 0;
+ }
- if (num_responses_handled)
+ if (num_io_responses_handled + num_events_handled > 0)
atomic_inc(&ctrl_info->num_interrupts);
pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
+out:
return IRQ_HANDLED;
}
@@ -5375,19 +5376,18 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
!blk_rq_is_passthrough(scmd->request)) {
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
scmd, queue_group);
- if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
+ if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
+ atomic_inc(&device->raid_bypass_cnt);
+ }
}
if (!raid_bypassed)
- rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
} else {
if (device->aio_enabled)
- rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
else
- rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
}
out:
@@ -5830,8 +5830,42 @@ static int pqi_map_queues(struct Scsi_Host *shost)
ctrl_info->pci_dev, 0);
}
-static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
- void __user *arg)
+static int pqi_slave_configure(struct scsi_device *sdev)
+{
+ struct pqi_scsi_dev *device;
+
+ device = sdev->hostdata;
+ device->devtype = sdev->type;
+
+ return 0;
+}
+
+static void pqi_slave_destroy(struct scsi_device *sdev)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (device) {
+ sdev->hostdata = NULL;
+ if (!list_empty(&device->scsi_device_list_entry))
+ list_del(&device->scsi_device_list_entry);
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ if (device) {
+ pqi_dev_info(ctrl_info, "removed", device);
+ pqi_free_device(device);
+ }
+}
+
+static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
{
struct pci_dev *pci_dev;
u32 subsystem_vendor;
@@ -5848,8 +5882,7 @@ static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
pciinfo.dev_fn = pci_dev->devfn;
subsystem_vendor = pci_dev->subsystem_vendor;
subsystem_device = pci_dev->subsystem_device;
- pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
- subsystem_vendor;
+ pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
return -EFAULT;
@@ -6258,8 +6291,7 @@ static ssize_t pqi_unique_id_show(struct device *dev,
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -6296,8 +6328,7 @@ static ssize_t pqi_lunid_show(struct device *dev,
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -6332,8 +6363,7 @@ static ssize_t pqi_path_info_show(struct device *dev,
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -6409,9 +6439,8 @@ static ssize_t pqi_sas_address_show(struct device *dev,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
- if (pqi_is_logical_device(device)) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ if (!device || !pqi_is_device_with_sas_address(device)) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -6436,6 +6465,11 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
buffer[0] = device->raid_bypass_enabled ? '1' : '0';
buffer[1] = '\n';
buffer[2] = '\0';
@@ -6460,6 +6494,10 @@ static ssize_t pqi_raid_level_show(struct device *dev,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
if (pqi_is_logical_device(device))
raid_level = pqi_raid_level_to_string(device->raid_level);
@@ -6471,13 +6509,40 @@ static ssize_t pqi_raid_level_show(struct device *dev,
return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
}
+static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ int raid_bypass_cnt;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
+}
+
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
-static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
- pqi_ssd_smart_path_enabled_show, NULL);
+static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
+static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static struct device_attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid,
@@ -6486,6 +6551,7 @@ static struct device_attribute *pqi_sdev_attrs[] = {
&dev_attr_sas_address,
&dev_attr_ssd_smart_path_enabled,
&dev_attr_raid_level,
+ &dev_attr_raid_bypass_cnt,
NULL
};
@@ -6500,6 +6566,8 @@ static struct scsi_host_template pqi_driver_template = {
.eh_device_reset_handler = pqi_eh_device_reset_handler,
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
+ .slave_configure = pqi_slave_configure,
+ .slave_destroy = pqi_slave_destroy,
.map_queues = pqi_map_queues,
.sdev_attrs = pqi_sdev_attrs,
.shost_attrs = pqi_shost_attrs,
@@ -7590,7 +7658,6 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{
pqi_cancel_rescan_worker(ctrl_info);
pqi_cancel_update_time_worker(ctrl_info);
- pqi_remove_all_scsi_devices(ctrl_info);
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -8300,6 +8367,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x080a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0900)
},
{
@@ -8501,8 +8572,7 @@ static int __init pqi_init(void)
pr_info(DRIVER_NAME "\n");
- pqi_sas_transport_template =
- sas_attach_transport(&pqi_sas_transport_functions);
+ pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
if (!pqi_sas_transport_template)
return -ENODEV;
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index b7289112455c..999870eb9ed8 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index f0d6e88ba2c1..26ea6b9d4199 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 86b0e484d921..878d34ca6532 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 03d43f016397..9e2e196bc202 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -124,16 +124,4 @@ static struct platform_driver snirm710_driver = {
.name = "snirm_53c710",
},
};
-
-static int __init snirm710_init(void)
-{
- return platform_driver_register(&snirm710_driver);
-}
-
-static void __exit snirm710_exit(void)
-{
- platform_driver_unregister(&snirm710_driver);
-}
-
-module_init(snirm710_init);
-module_exit(snirm710_exit);
+module_platform_driver(snirm710_driver);
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 2b349365592f..4471c4c8aafa 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -439,26 +439,14 @@ snic_trc_seq_show(struct seq_file *sfp, void *data)
return 0;
}
-static const struct seq_operations snic_trc_seq_ops = {
+static const struct seq_operations snic_trc_sops = {
.start = snic_trc_seq_start,
.next = snic_trc_seq_next,
.stop = snic_trc_seq_stop,
.show = snic_trc_seq_show,
};
-static int
-snic_trc_open(struct inode *inode, struct file *filp)
-{
- return seq_open(filp, &snic_trc_seq_ops);
-}
-
-static const struct file_operations snic_trc_fops = {
- .owner = THIS_MODULE,
- .open = snic_trc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(snic_trc);
/*
* snic_trc_debugfs_init : creates trace/tracing_enable files for trace
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index b3650c989ed4..6dd0ff188bb4 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -1387,19 +1387,15 @@ snic_issue_tm_req(struct snic *snic,
}
ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id);
- if (ret)
- goto tmreq_err;
-
- ret = 0;
tmreq_err:
if (ret) {
SNIC_HOST_ERR(snic->shost,
- "issu_tmreq: Queing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
+ "issu_tmreq: Queueing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
tmf, sc, rqi, req_id, tag, ret);
} else {
SNIC_SCSI_DBG(snic->shost,
- "issu_tmreq: Queuing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
+ "issu_tmreq: Queueing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
tmf, sc, rqi, req_id, tag);
}
diff --git a/drivers/scsi/snic/vnic_cq.c b/drivers/scsi/snic/vnic_cq.c
index 4c8e64e4fba6..3455dd7e73f4 100644
--- a/drivers/scsi/snic/vnic_cq.c
+++ b/drivers/scsi/snic/vnic_cq.c
@@ -31,8 +31,6 @@ void svnic_cq_free(struct vnic_cq *cq)
int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
unsigned int index, unsigned int desc_count, unsigned int desc_size)
{
- int err;
-
cq->index = index;
cq->vdev = vdev;
@@ -43,11 +41,7 @@ int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq,
return -EINVAL;
}
- err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
- if (err)
- return err;
-
- return 0;
+ return svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
}
void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 3b3a53c6a0de..fd4b582110b2 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -392,15 +392,11 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
struct request *rq = SCpnt->request;
blk_status_t ret;
- ret = scsi_init_io(SCpnt);
+ ret = scsi_alloc_sgtables(SCpnt);
if (ret != BLK_STS_OK)
- goto out;
+ return ret;
cd = scsi_cd(rq->rq_disk);
- /* from here on until we're complete, any goto out
- * is used for a killable error condition */
- ret = BLK_STS_IOERR;
-
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
"Doing sr request, block = %d\n", block));
@@ -507,14 +503,26 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
SCpnt->transfersize = cd->device->sector_size;
SCpnt->underflow = this_count << 9;
SCpnt->allowed = MAX_RETRIES;
+ SCpnt->cmd_len = 10;
/*
- * This indicates that the command is ready from our end to be
- * queued.
+ * This indicates that the command is ready from our end to be queued.
*/
- ret = BLK_STS_OK;
+ return BLK_STS_OK;
out:
- return ret;
+ scsi_free_sgtables(SCpnt);
+ return BLK_STS_IOERR;
+}
+
+static void sr_revalidate_disk(struct scsi_cd *cd)
+{
+ struct scsi_sense_hdr sshdr;
+
+ /* if the unit is not ready, nothing more to do */
+ if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
+ return;
+ sr_cd_check(&cd->cdi);
+ get_sectorsize(cd);
}
static int sr_block_open(struct block_device *bdev, fmode_t mode)
@@ -529,7 +537,8 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
sdev = cd->device;
scsi_autopm_get_device(sdev);
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ sr_revalidate_disk(cd);
mutex_lock(&cd->lock);
ret = cdrom_open(&cd->cdi, bdev, mode);
@@ -658,26 +667,6 @@ static unsigned int sr_block_check_events(struct gendisk *disk,
return ret;
}
-static int sr_block_revalidate_disk(struct gendisk *disk)
-{
- struct scsi_sense_hdr sshdr;
- struct scsi_cd *cd;
-
- cd = scsi_cd_get(disk);
- if (!cd)
- return -ENXIO;
-
- /* if the unit is not ready, nothing more to do */
- if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
- goto out;
-
- sr_cd_check(&cd->cdi);
- get_sectorsize(cd);
-out:
- scsi_cd_put(cd);
- return 0;
-}
-
static const struct block_device_operations sr_bdops =
{
.owner = THIS_MODULE,
@@ -688,7 +677,6 @@ static const struct block_device_operations sr_bdops =
.compat_ioctl = sr_block_compat_ioctl,
#endif
.check_events = sr_block_check_events,
- .revalidate_disk = sr_block_revalidate_disk,
};
static int sr_open(struct cdrom_device_info *cdi, int purpose)
@@ -802,6 +790,7 @@ static int sr_probe(struct device *dev)
dev_set_drvdata(dev, cd);
disk->flags |= GENHD_FL_REMOVABLE;
+ sr_revalidate_disk(cd);
device_add_disk(&sdev->sdev_gendev, disk, NULL);
sdev_printk(KERN_DEBUG, sdev,
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8f5f5dc863a4..0c65fbd41035 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1739,23 +1739,65 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
payload_sz = sizeof(cmd_request->mpb);
if (sg_count) {
- if (sg_count > MAX_PAGE_BUFFER_COUNT) {
+ unsigned int hvpgoff = 0;
+ unsigned long offset_in_hvpg = sgl->offset & ~HV_HYP_PAGE_MASK;
+ unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
+ u64 hvpfn;
- payload_sz = (sg_count * sizeof(u64) +
+ if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
+
+ payload_sz = (hvpg_count * sizeof(u64) +
sizeof(struct vmbus_packet_mpb_array));
payload = kzalloc(payload_sz, GFP_ATOMIC);
if (!payload)
return SCSI_MLQUEUE_DEVICE_BUSY;
}
+ /*
+ * sgl is a list of PAGEs, and payload->range.pfn_array
+ * expects the page number in the unit of HV_HYP_PAGE_SIZE (the
+ * page size that Hyper-V uses, so here we need to divide PAGEs
+ * into HV_HYP_PAGE in case that PAGE_SIZE > HV_HYP_PAGE_SIZE.
+ * Besides, payload->range.offset should be the offset in one
+ * HV_HYP_PAGE.
+ */
payload->range.len = length;
- payload->range.offset = sgl[0].offset;
+ payload->range.offset = offset_in_hvpg;
+ hvpgoff = sgl->offset >> HV_HYP_PAGE_SHIFT;
cur_sgl = sgl;
- for (i = 0; i < sg_count; i++) {
- payload->range.pfn_array[i] =
- page_to_pfn(sg_page((cur_sgl)));
- cur_sgl = sg_next(cur_sgl);
+ for (i = 0; i < hvpg_count; i++) {
+ /*
+ * 'i' is the index of hv pages in the payload and
+ * 'hvpgoff' is the offset (in hv pages) of the first
+ * hv page in the the first page. The relationship
+ * between the sum of 'i' and 'hvpgoff' and the offset
+ * (in hv pages) in a payload page ('hvpgoff_in_page')
+ * is as follow:
+ *
+ * |------------------ PAGE -------------------|
+ * | NR_HV_HYP_PAGES_IN_PAGE hvpgs in total |
+ * |hvpg|hvpg| ... |hvpg|... |hvpg|
+ * ^ ^ ^ ^
+ * +-hvpgoff-+ +-hvpgoff_in_page-+
+ * ^ |
+ * +--------------------- i ---------------------------+
+ */
+ unsigned int hvpgoff_in_page =
+ (i + hvpgoff) % NR_HV_HYP_PAGES_IN_PAGE;
+
+ /*
+ * Two cases that we need to fetch a page:
+ * 1) i == 0, the first step or
+ * 2) hvpgoff_in_page == 0, when we reach the boundary
+ * of a page.
+ */
+ if (hvpgoff_in_page == 0 || i == 0) {
+ hvpfn = page_to_hvpfn(sg_page(cur_sgl));
+ cur_sgl = sg_next(cur_sgl);
+ }
+
+ payload->range.pfn_array[i] = hvpfn + hvpgoff_in_page;
}
}
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index f37df79e37e1..7de82f2c9757 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -270,22 +270,10 @@ static struct platform_driver esp_sun3x_driver = {
.name = "sun3x_esp",
},
};
-
-static int __init sun3x_esp_init(void)
-{
- return platform_driver_register(&esp_sun3x_driver);
-}
-
-static void __exit sun3x_esp_exit(void)
-{
- platform_driver_unregister(&esp_sun3x_driver);
-}
+module_platform_driver(esp_sun3x_driver);
MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(sun3x_esp_init);
-module_exit(sun3x_esp_exit);
MODULE_ALIAS("platform:sun3x_esp");
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 964130d2c8a6..5dc38d35745b 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -606,21 +606,9 @@ static struct platform_driver esp_sbus_driver = {
.probe = esp_sbus_probe,
.remove = esp_sbus_remove,
};
-
-static int __init sunesp_init(void)
-{
- return platform_driver_register(&esp_sbus_driver);
-}
-
-static void __exit sunesp_exit(void)
-{
- platform_driver_unregister(&esp_sbus_driver);
-}
+module_platform_driver(esp_sbus_driver);
MODULE_DESCRIPTION("Sun ESP SCSI driver");
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(sunesp_init);
-module_exit(sunesp_exit);
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c
index c6db61b61de3..c536d2a9a657 100644
--- a/drivers/scsi/sym53c8xx_2/sym_fw.c
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.c
@@ -369,7 +369,7 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
sym_name(np), (int) (cur-start));
++cur;
continue;
- };
+ }
/*
* We use the bogus value 0xf00ff00f ;-)
@@ -477,7 +477,7 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
default:
relocs = 0;
break;
- };
+ }
/*
* Scriptify:) the opcode.
@@ -533,5 +533,5 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
*cur++ = cpu_to_scr(new);
}
- };
+ }
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 28edb6e53ea2..d9a045f9858c 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -156,12 +156,8 @@ void sym_xpt_async_bus_reset(struct sym_hcb *np)
static int sym_xerr_cam_status(int cam_status, int x_status)
{
if (x_status) {
- if (x_status & XE_PARITY_ERR)
+ if (x_status & XE_PARITY_ERR)
cam_status = DID_PARITY;
- else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))
- cam_status = DID_ERROR;
- else if (x_status & XE_BAD_PHASE)
- cam_status = DID_ERROR;
else
cam_status = DID_ERROR;
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index cc11daa1222b..a9fe092a4906 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -5656,7 +5656,7 @@ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram
/*
* Allocate the array of lists of CCBs hashed by DSA.
*/
- np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(struct sym_ccb **), GFP_KERNEL);
+ np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(*np->ccbh), GFP_KERNEL);
if (!np->ccbh)
goto attach_failed;
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index f6394999b98c..dcdb4eb1f90b 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -165,7 +165,6 @@ config SCSI_UFS_BSG
config SCSI_UFS_EXYNOS
tristate "EXYNOS specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST)
- select PHY_SAMSUNG_UFS
help
This selects the EXYNOS specific additions to UFSHCD platform driver.
UFS host on EXYNOS includes HCI and UNIPRO layer, and associates with
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index 8f1b6f61a776..5e6b95dbb578 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -940,7 +940,6 @@ static int exynos_ufs_init(struct ufs_hba *hba)
struct device *dev = hba->dev;
struct platform_device *pdev = to_platform_device(dev);
struct exynos_ufs *ufs;
- struct resource *res;
int ret;
ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
@@ -948,24 +947,21 @@ static int exynos_ufs_init(struct ufs_hba *hba)
return -ENOMEM;
/* exynos-specific hci */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vs_hci");
- ufs->reg_hci = devm_ioremap_resource(dev, res);
+ ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
if (IS_ERR(ufs->reg_hci)) {
dev_err(dev, "cannot ioremap for hci vendor register\n");
return PTR_ERR(ufs->reg_hci);
}
/* unipro */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "unipro");
- ufs->reg_unipro = devm_ioremap_resource(dev, res);
+ ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro");
if (IS_ERR(ufs->reg_unipro)) {
dev_err(dev, "cannot ioremap for unipro register\n");
return PTR_ERR(ufs->reg_unipro);
}
/* ufs protector */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ufsp");
- ufs->reg_ufsp = devm_ioremap_resource(dev, res);
+ ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp");
if (IS_ERR(ufs->reg_ufsp)) {
dev_err(dev, "cannot ioremap for ufs protector register\n");
return PTR_ERR(ufs->reg_ufsp);
@@ -1252,7 +1248,8 @@ struct exynos_ufs_drv_data exynos_ufs_drvs = {
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
UFSHCI_QUIRK_BROKEN_HCE |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
- UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR,
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL,
.opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 1755dd6b04ae..8df73bc2f8cb 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -10,9 +10,11 @@
#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
#include "ufshcd.h"
@@ -43,6 +45,28 @@ static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
END_FIX
};
+static const struct ufs_mtk_host_cfg ufs_mtk_mt8192_cfg = {
+ .caps = UFS_MTK_CAP_BOOST_CRYPT_ENGINE,
+};
+
+static const struct of_device_id ufs_mtk_of_match[] = {
+ {
+ .compatible = "mediatek,mt8183-ufshci",
+ },
+ {
+ .compatible = "mediatek,mt8192-ufshci",
+ .data = &ufs_mtk_mt8192_cfg
+ },
+ {},
+};
+
+static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return (host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
+}
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -91,16 +115,57 @@ static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
}
}
+static void ufs_mtk_host_reset(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ reset_control_assert(host->hci_reset);
+ reset_control_assert(host->crypto_reset);
+ reset_control_assert(host->unipro_reset);
+
+ usleep_range(100, 110);
+
+ reset_control_deassert(host->unipro_reset);
+ reset_control_deassert(host->crypto_reset);
+ reset_control_deassert(host->hci_reset);
+}
+
+static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
+ struct reset_control **rc,
+ char *str)
+{
+ *rc = devm_reset_control_get(hba->dev, str);
+ if (IS_ERR(*rc)) {
+ dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
+ str, PTR_ERR(*rc));
+ *rc = NULL;
+ }
+}
+
+static void ufs_mtk_init_reset(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ ufs_mtk_init_reset_control(hba, &host->hci_reset,
+ "hci_rst");
+ ufs_mtk_init_reset_control(hba, &host->unipro_reset,
+ "unipro_rst");
+ ufs_mtk_init_reset_control(hba, &host->crypto_reset,
+ "crypto_rst");
+}
+
static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE) {
- if (host->unipro_lpm)
+ if (host->unipro_lpm) {
hba->vps->hba_enable_delay_us = 0;
- else
+ } else {
hba->vps->hba_enable_delay_us = 600;
+ ufs_mtk_host_reset(hba);
+ }
if (hba->caps & UFSHCD_CAP_CRYPTO)
ufs_mtk_crypto_enable(hba);
@@ -129,7 +194,10 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
__func__, err);
} else if (IS_ERR(host->mphy)) {
err = PTR_ERR(host->mphy);
- dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
+ if (err != -ENODEV) {
+ dev_info(dev, "%s: PHY get failed %d\n", __func__,
+ err);
+ }
}
if (err)
@@ -249,6 +317,144 @@ static void ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
host->mphy_powered_on = on;
}
+static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
+ struct clk **clk_out)
+{
+ struct clk *clk;
+ int err = 0;
+
+ clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk))
+ err = PTR_ERR(clk);
+ else
+ *clk_out = clk;
+
+ return err;
+}
+
+static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_crypt_cfg *cfg;
+ struct regulator *reg;
+ int volt, ret;
+
+ if (!ufs_mtk_is_boost_crypt_enabled(hba))
+ return;
+
+ cfg = host->crypt;
+ volt = cfg->vcore_volt;
+ reg = cfg->reg_vcore;
+
+ ret = clk_prepare_enable(cfg->clk_crypt_mux);
+ if (ret) {
+ dev_info(hba->dev, "clk_prepare_enable(): %d\n",
+ ret);
+ return;
+ }
+
+ if (boost) {
+ ret = regulator_set_voltage(reg, volt, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set vcore to %d\n", volt);
+ goto out;
+ }
+
+ ret = clk_set_parent(cfg->clk_crypt_mux,
+ cfg->clk_crypt_perf);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set clk_crypt_perf\n");
+ regulator_set_voltage(reg, 0, INT_MAX);
+ goto out;
+ }
+ } else {
+ ret = clk_set_parent(cfg->clk_crypt_mux,
+ cfg->clk_crypt_lp);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set clk_crypt_lp\n");
+ goto out;
+ }
+
+ ret = regulator_set_voltage(reg, 0, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set vcore to MIN\n");
+ }
+ }
+out:
+ clk_disable_unprepare(cfg->clk_crypt_mux);
+}
+
+static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
+ struct clk **clk)
+{
+ int ret;
+
+ ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
+ if (ret) {
+ dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
+ name, ret);
+ }
+
+ return ret;
+}
+
+static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_crypt_cfg *cfg;
+ struct device *dev = hba->dev;
+ struct regulator *reg;
+ u32 volt;
+
+ host->caps = host->cfg->caps;
+
+ if (!ufs_mtk_is_boost_crypt_enabled(hba))
+ return;
+
+ host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
+ GFP_KERNEL);
+ if (!host->crypt)
+ goto disable_caps;
+
+ reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
+ if (IS_ERR(reg)) {
+ dev_info(dev, "failed to get dvfsrc-vcore: %ld",
+ PTR_ERR(reg));
+ goto disable_caps;
+ }
+
+ if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
+ &volt)) {
+ dev_info(dev, "failed to get boost-crypt-vcore-min");
+ goto disable_caps;
+ }
+
+ cfg = host->crypt;
+ if (ufs_mtk_init_host_clk(hba, "crypt_mux",
+ &cfg->clk_crypt_mux))
+ goto disable_caps;
+
+ if (ufs_mtk_init_host_clk(hba, "crypt_lp",
+ &cfg->clk_crypt_lp))
+ goto disable_caps;
+
+ if (ufs_mtk_init_host_clk(hba, "crypt_perf",
+ &cfg->clk_crypt_perf))
+ goto disable_caps;
+
+ cfg->reg_vcore = reg;
+ cfg->vcore_volt = volt;
+ dev_info(dev, "caps: boost-crypt");
+ return;
+
+disable_caps:
+ host->caps &= ~UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
+}
+
/**
* ufs_mtk_setup_clocks - enables/disable clocks
* @hba: host controller instance
@@ -291,12 +497,14 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
}
if (clk_pwr_off) {
+ ufs_mtk_boost_crypt(hba, on);
ufs_mtk_setup_ref_clk(hba, on);
ufs_mtk_mphy_power_on(hba, on);
}
} else if (on && status == POST_CHANGE) {
ufs_mtk_mphy_power_on(hba, on);
ufs_mtk_setup_ref_clk(hba, on);
+ ufs_mtk_boost_crypt(hba, on);
}
return ret;
@@ -314,8 +522,9 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
*/
static int ufs_mtk_init(struct ufs_hba *hba)
{
- struct ufs_mtk_host *host;
+ const struct of_device_id *id;
struct device *dev = hba->dev;
+ struct ufs_mtk_host *host;
int err = 0;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
@@ -328,10 +537,24 @@ static int ufs_mtk_init(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
+ /* Get host capability and platform data */
+ id = of_match_device(ufs_mtk_of_match, dev);
+ if (!id) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (id->data) {
+ host->cfg = (struct ufs_mtk_host_cfg *)id->data;
+ ufs_mtk_init_host_caps(hba);
+ }
+
err = ufs_mtk_bind_mphy(hba);
if (err)
goto out_variant_clear;
+ ufs_mtk_init_reset(hba);
+
/* Enable runtime autosuspend */
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
@@ -416,7 +639,7 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
return ret;
}
-static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, u32 lpm)
+static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, bool lpm)
{
int ret;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -424,8 +647,14 @@ static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, u32 lpm)
ret = ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
lpm);
- if (!ret)
+ if (!ret || !lpm) {
+ /*
+ * Forcibly set as non-LPM mode if UIC commands is failed
+ * to use default hba_enable_delay_us value for re-enabling
+ * the host.
+ */
host->unipro_lpm = lpm;
+ }
return ret;
}
@@ -435,7 +664,9 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
int ret;
u32 tmp;
- ufs_mtk_unipro_set_pm(hba, 0);
+ ret = ufs_mtk_unipro_set_pm(hba, false);
+ if (ret)
+ return ret;
/*
* Setting PA_Local_TX_LCC_Enable to 0 before link startup
@@ -543,7 +774,7 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
if (err)
return err;
- err = ufs_mtk_unipro_set_pm(hba, 0);
+ err = ufs_mtk_unipro_set_pm(hba, false);
if (err)
return err;
@@ -564,10 +795,10 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
{
int err;
- err = ufs_mtk_unipro_set_pm(hba, 1);
+ err = ufs_mtk_unipro_set_pm(hba, true);
if (err) {
/* Resume UniPro state for following error recovery */
- ufs_mtk_unipro_set_pm(hba, 0);
+ ufs_mtk_unipro_set_pm(hba, false);
return err;
}
@@ -669,22 +900,16 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
{
- struct ufs_dev_info *dev_info = &hba->dev_info;
- u16 mid = dev_info->wmanufacturerid;
-
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
-
- if (mid == UFS_VENDOR_SAMSUNG)
- hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
}
-/**
+/*
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
* The variant operations configure the necessary controller and PHY
* handshake during initialization.
*/
-static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
+static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.name = "mediatek.ufshci",
.init = ufs_mtk_init,
.setup_clocks = ufs_mtk_setup_clocks,
@@ -733,11 +958,6 @@ static int ufs_mtk_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ufs_mtk_of_match[] = {
- { .compatible = "mediatek,mt8183-ufshci"},
- {},
-};
-
static const struct dev_pm_ops ufs_mtk_pm_ops = {
.suspend = ufshcd_pltfrm_suspend,
.resume = ufshcd_pltfrm_resume,
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 8ed24d5fcff9..2b6a1312c9bc 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -33,8 +33,8 @@
/*
* Vendor specific pre-defined parameters
*/
-#define UFS_MTK_LIMIT_NUM_LANES_RX 1
-#define UFS_MTK_LIMIT_NUM_LANES_TX 1
+#define UFS_MTK_LIMIT_NUM_LANES_RX 2
+#define UFS_MTK_LIMIT_NUM_LANES_TX 2
#define UFS_MTK_LIMIT_HSGEAR_RX UFS_HS_G3
#define UFS_MTK_LIMIT_HSGEAR_TX UFS_HS_G3
#define UFS_MTK_LIMIT_PWMGEAR_RX UFS_PWM_G4
@@ -89,9 +89,34 @@ enum {
TX_CLK_GATE_EN = 3,
};
+/*
+ * Host capability
+ */
+enum ufs_mtk_host_caps {
+ UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0,
+};
+
+struct ufs_mtk_crypt_cfg {
+ struct regulator *reg_vcore;
+ struct clk *clk_crypt_perf;
+ struct clk *clk_crypt_mux;
+ struct clk *clk_crypt_lp;
+ int vcore_volt;
+};
+
+struct ufs_mtk_host_cfg {
+ enum ufs_mtk_host_caps caps;
+};
+
struct ufs_mtk_host {
struct ufs_hba *hba;
struct phy *mphy;
+ struct ufs_mtk_host_cfg *cfg;
+ struct ufs_mtk_crypt_cfg *crypt;
+ enum ufs_mtk_host_caps caps;
+ struct reset_control *hci_reset;
+ struct reset_control *unipro_reset;
+ struct reset_control *crypto_reset;
bool mphy_powered_on;
bool unipro_lpm;
bool ref_clk_enabled;
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index d0d75527830e..f9d6ef356540 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -621,218 +621,6 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
return 0;
}
-#ifdef CONFIG_MSM_BUS_SCALING
-static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
- const char *speed_mode)
-{
- struct device *dev = host->hba->dev;
- struct device_node *np = dev->of_node;
- int err;
- const char *key = "qcom,bus-vector-names";
-
- if (!speed_mode) {
- err = -EINVAL;
- goto out;
- }
-
- if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
- err = of_property_match_string(np, key, "MAX");
- else
- err = of_property_match_string(np, key, speed_mode);
-
-out:
- if (err < 0)
- dev_err(dev, "%s: Invalid %s mode %d\n",
- __func__, speed_mode, err);
- return err;
-}
-
-static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
-{
- int gear = max_t(u32, p->gear_rx, p->gear_tx);
- int lanes = max_t(u32, p->lane_rx, p->lane_tx);
- int pwr;
-
- /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
- if (!gear)
- gear = 1;
-
- if (!lanes)
- lanes = 1;
-
- if (!p->pwr_rx && !p->pwr_tx) {
- pwr = SLOWAUTO_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
- } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
- p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
- pwr = FAST_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
- p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
- } else {
- pwr = SLOW_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
- "PWM", gear, lanes);
- }
-}
-
-static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
-{
- int err = 0;
-
- if (vote != host->bus_vote.curr_vote) {
- err = msm_bus_scale_client_update_request(
- host->bus_vote.client_handle, vote);
- if (err) {
- dev_err(host->hba->dev,
- "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
- __func__, host->bus_vote.client_handle,
- vote, err);
- goto out;
- }
-
- host->bus_vote.curr_vote = vote;
- }
-out:
- return err;
-}
-
-static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
-{
- int vote;
- int err = 0;
- char mode[BUS_VECTOR_NAME_LEN];
-
- ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
-
- vote = ufs_qcom_get_bus_vote(host, mode);
- if (vote >= 0)
- err = __ufs_qcom_set_bus_vote(host, vote);
- else
- err = vote;
-
- if (err)
- dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
- else
- host->bus_vote.saved_vote = vote;
- return err;
-}
-
-static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int vote, err;
-
- /*
- * In case ufs_qcom_init() is not yet done, simply ignore.
- * This ufs_qcom_set_bus_vote() shall be called from
- * ufs_qcom_init() after init is done.
- */
- if (!host)
- return 0;
-
- if (on) {
- vote = host->bus_vote.saved_vote;
- if (vote == host->bus_vote.min_bw_vote)
- ufs_qcom_update_bus_bw_vote(host);
- } else {
- vote = host->bus_vote.min_bw_vote;
- }
-
- err = __ufs_qcom_set_bus_vote(host, vote);
- if (err)
- dev_err(hba->dev, "%s: set bus vote failed %d\n",
- __func__, err);
-
- return err;
-}
-
-static ssize_t
-show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- return snprintf(buf, PAGE_SIZE, "%u\n",
- host->bus_vote.is_max_bw_needed);
-}
-
-static ssize_t
-store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- uint32_t value;
-
- if (!kstrtou32(buf, 0, &value)) {
- host->bus_vote.is_max_bw_needed = !!value;
- ufs_qcom_update_bus_bw_vote(host);
- }
-
- return count;
-}
-
-static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
-{
- int err;
- struct msm_bus_scale_pdata *bus_pdata;
- struct device *dev = host->hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct device_node *np = dev->of_node;
-
- bus_pdata = msm_bus_cl_get_pdata(pdev);
- if (!bus_pdata) {
- dev_err(dev, "%s: failed to get bus vectors\n", __func__);
- err = -ENODATA;
- goto out;
- }
-
- err = of_property_count_strings(np, "qcom,bus-vector-names");
- if (err < 0 || err != bus_pdata->num_usecases) {
- dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
- __func__, err);
- goto out;
- }
-
- host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
- if (!host->bus_vote.client_handle) {
- dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
- __func__);
- err = -EFAULT;
- goto out;
- }
-
- /* cache the vote index for minimum and maximum bandwidth */
- host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
- host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
-
- host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
- host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
- sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
- host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
- host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
- err = device_create_file(dev, &host->bus_vote.max_bus_bw);
-out:
- return err;
-}
-#else /* CONFIG_MSM_BUS_SCALING */
-static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
-{
- return 0;
-}
-
-static int ufs_qcom_set_bus_vote(struct ufs_hba *host, bool on)
-{
- return 0;
-}
-
-static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
-{
- return 0;
-}
-#endif /* CONFIG_MSM_BUS_SCALING */
-
static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
{
if (host->dev_ref_clk_ctrl_mmio &&
@@ -976,7 +764,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
/* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
- ufs_qcom_update_bus_bw_vote(host);
/* disable the device ref clock if entered PWM mode */
if (ufshcd_is_hs_mode(&hba->pwr_info) &&
@@ -1107,9 +894,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
switch (status) {
case PRE_CHANGE:
- if (on) {
- err = ufs_qcom_set_bus_vote(hba, true);
- } else {
+ if (!on) {
if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
@@ -1121,8 +906,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
- } else {
- err = ufs_qcom_set_bus_vote(hba, false);
}
break;
}
@@ -1264,10 +1047,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
goto out_variant_clear;
}
- err = ufs_qcom_bus_register(host);
- if (err)
- goto out_variant_clear;
-
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
@@ -1307,7 +1086,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (err)
goto out_variant_clear;
- ufs_qcom_set_bus_vote(hba, true);
ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
@@ -1446,7 +1224,6 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
dev_req_params->pwr_rx,
dev_req_params->hs_rate,
false);
- ufs_qcom_update_bus_bw_vote(host);
}
out:
@@ -1614,9 +1391,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
*/
}
mask <<= offset;
-
- pm_runtime_get_sync(host->hba->dev);
- ufshcd_hold(host->hba, false);
ufshcd_rmwl(host->hba, TEST_BUS_SEL,
(u32)host->testbus.select_major << 19,
REG_UFS_CFG1);
@@ -1629,50 +1403,16 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
* committed before returning.
*/
mb();
- ufshcd_release(host->hba);
- pm_runtime_put_sync(host->hba->dev);
return 0;
}
-static void ufs_qcom_testbus_read(struct ufs_hba *hba)
-{
- ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
-}
-
-static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- u32 *testbus = NULL;
- int i, nminor = 256, testbus_len = nminor * sizeof(u32);
-
- testbus = kmalloc(testbus_len, GFP_KERNEL);
- if (!testbus)
- return;
-
- host->testbus.select_major = TSTBUS_UNIPRO;
- for (i = 0; i < nminor; i++) {
- host->testbus.select_minor = i;
- ufs_qcom_testbus_config(host);
- testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
- }
- print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
- 16, 4, testbus, testbus_len, false);
- kfree(testbus);
-}
-
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
"HCI Vendor Specific Registers ");
- /* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
- udelay(1000);
- ufs_qcom_testbus_read(hba);
- udelay(1000);
- ufs_qcom_print_unipro_testbus(hba);
- udelay(1000);
}
/**
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 97247d17e258..3f4922743b3e 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -174,16 +174,6 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
mb();
}
-struct ufs_qcom_bus_vote {
- uint32_t client_handle;
- uint32_t curr_vote;
- int min_bw_vote;
- int max_bw_vote;
- int saved_vote;
- bool is_max_bw_needed;
- struct device_attribute max_bus_bw;
-};
-
/* Host controller hardware version: major.minor.step */
struct ufs_hw_version {
u16 step;
@@ -216,7 +206,6 @@ struct ufs_qcom_host {
struct phy *generic_phy;
struct ufs_hba *hba;
- struct ufs_qcom_bus_vote bus_vote;
struct ufs_pa_layer_attr dev_req_params;
struct clk *rx_l0_sync_clk;
struct clk *tx_l0_sync_clk;
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 2d71d232a69d..bdcd27faa054 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -16,6 +16,7 @@ static const char *ufschd_uic_link_state_to_string(
case UIC_LINK_OFF_STATE: return "OFF";
case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
+ case UIC_LINK_BROKEN_STATE: return "BROKEN";
default: return "UNKNOWN";
}
}
@@ -145,12 +146,19 @@ static u32 ufshcd_us_to_ahit(unsigned int timer)
static ssize_t auto_hibern8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ u32 ahit;
struct ufs_hba *hba = dev_get_drvdata(dev);
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
- return snprintf(buf, PAGE_SIZE, "%d\n", ufshcd_ahit_to_us(hba->ahit));
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+ ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ ufshcd_release(hba);
+ pm_runtime_put_sync(hba->dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ufshcd_ahit_to_us(ahit));
}
static ssize_t auto_hibern8_store(struct device *dev,
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c
index d2edbd960ebf..07310b12a5dc 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ b/drivers/scsi/ufs/ufshcd-crypto.c
@@ -59,7 +59,7 @@ static int ufshcd_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512;
int i;
int cap_idx = -1;
- union ufs_crypto_cfg_entry cfg = { 0 };
+ union ufs_crypto_cfg_entry cfg = {};
int err;
BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
@@ -100,7 +100,7 @@ static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
* Clear the crypto cfg on the device. Clearing CFGE
* might not be sufficient, so just clear the entire cfg.
*/
- union ufs_crypto_cfg_entry cfg = { 0 };
+ union ufs_crypto_cfg_entry cfg = {};
return ufshcd_program_key(hba, &cfg, slot);
}
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 5a95a7bfbab0..df3a564c3e33 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -13,6 +13,14 @@
#include "ufshcd.h"
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/debugfs.h>
+
+struct intel_host {
+ u32 active_ltr;
+ u32 idle_ltr;
+ struct dentry *debugfs_root;
+};
static int ufs_intel_disable_lcc(struct ufs_hba *hba)
{
@@ -44,20 +52,134 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
return err;
}
+#define INTEL_ACTIVELTR 0x804
+#define INTEL_IDLELTR 0x808
+
+#define INTEL_LTR_REQ BIT(15)
+#define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
+#define INTEL_LTR_SCALE_1US (2 << 10)
+#define INTEL_LTR_SCALE_32US (3 << 10)
+#define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
+
+static void intel_cache_ltr(struct ufs_hba *hba)
+{
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
+ host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
+}
+
+static void intel_ltr_set(struct device *dev, s32 val)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct intel_host *host = ufshcd_get_variant(hba);
+ u32 ltr;
+
+ pm_runtime_get_sync(dev);
+
+ /*
+ * Program latency tolerance (LTR) accordingly what has been asked
+ * by the PM QoS layer or disable it in case we were passed
+ * negative value or PM_QOS_LATENCY_ANY.
+ */
+ ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
+
+ if (val == PM_QOS_LATENCY_ANY || val < 0) {
+ ltr &= ~INTEL_LTR_REQ;
+ } else {
+ ltr |= INTEL_LTR_REQ;
+ ltr &= ~INTEL_LTR_SCALE_MASK;
+ ltr &= ~INTEL_LTR_VALUE_MASK;
+
+ if (val > INTEL_LTR_VALUE_MASK) {
+ val >>= 5;
+ if (val > INTEL_LTR_VALUE_MASK)
+ val = INTEL_LTR_VALUE_MASK;
+ ltr |= INTEL_LTR_SCALE_32US | val;
+ } else {
+ ltr |= INTEL_LTR_SCALE_1US | val;
+ }
+ }
+
+ if (ltr == host->active_ltr)
+ goto out;
+
+ writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
+ writel(ltr, hba->mmio_base + INTEL_IDLELTR);
+
+ /* Cache the values into intel_host structure */
+ intel_cache_ltr(hba);
+out:
+ pm_runtime_put(dev);
+}
+
+static void intel_ltr_expose(struct device *dev)
+{
+ dev->power.set_latency_tolerance = intel_ltr_set;
+ dev_pm_qos_expose_latency_tolerance(dev);
+}
+
+static void intel_ltr_hide(struct device *dev)
+{
+ dev_pm_qos_hide_latency_tolerance(dev);
+ dev->power.set_latency_tolerance = NULL;
+}
+
+static void intel_add_debugfs(struct ufs_hba *hba)
+{
+ struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ intel_cache_ltr(hba);
+
+ host->debugfs_root = dir;
+ debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
+ debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
+}
+
+static void intel_remove_debugfs(struct ufs_hba *hba)
+{
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ debugfs_remove_recursive(host->debugfs_root);
+}
+
+static int ufs_intel_common_init(struct ufs_hba *hba)
+{
+ struct intel_host *host;
+
+ host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+ ufshcd_set_variant(hba, host);
+ intel_ltr_expose(hba->dev);
+ intel_add_debugfs(hba);
+ return 0;
+}
+
+static void ufs_intel_common_exit(struct ufs_hba *hba)
+{
+ intel_remove_debugfs(hba);
+ intel_ltr_hide(hba->dev);
+}
+
static int ufs_intel_ehl_init(struct ufs_hba *hba)
{
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
- return 0;
+ return ufs_intel_common_init(hba);
}
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
+ .init = ufs_intel_common_init,
+ .exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
};
static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_ehl_init,
+ .exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
};
@@ -162,6 +284,8 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
+ pci_set_drvdata(pdev, hba);
+
hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
err = ufshcd_init(hba, mmio_base, pdev->irq);
@@ -171,7 +295,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- pci_set_drvdata(pdev, hba);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 1d157ff58d81..7a160b86adc6 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/bitfield.h>
#include <linux/blk-pm.h>
+#include <linux/blkdev.h>
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
@@ -35,8 +36,8 @@
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
-/* Timeout after 30 msecs if NOP OUT hangs without response */
-#define NOP_OUT_TIMEOUT 30 /* msecs */
+/* Timeout after 50 msecs if NOP OUT hangs without response */
+#define NOP_OUT_TIMEOUT 50 /* msecs */
/* Query request retries */
#define QUERY_REQ_RETRIES 3
@@ -73,6 +74,9 @@
/* Default value of wait time before gating device ref clock */
#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
+/* Polling time to wait for fDeviceInit */
+#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -125,7 +129,8 @@ enum {
UFSHCD_STATE_RESET,
UFSHCD_STATE_ERROR,
UFSHCD_STATE_OPERATIONAL,
- UFSHCD_STATE_EH_SCHEDULED,
+ UFSHCD_STATE_EH_SCHEDULED_FATAL,
+ UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
};
/* UFSHCD error handling flags */
@@ -141,6 +146,7 @@ enum {
UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
+ UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
};
#define ufshcd_set_eh_in_progress(h) \
@@ -228,6 +234,12 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
+static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg);
+static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
@@ -411,15 +423,6 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
static void ufshcd_print_host_regs(struct ufs_hba *hba)
{
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
- dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
- hba->ufs_version, hba->capabilities);
- dev_err(hba->dev,
- "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
- (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
- dev_err(hba->dev,
- "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
- ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
- hba->ufs_stats.hibern8_exit_cnt);
ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
@@ -438,8 +441,6 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba)
ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
- ufshcd_print_clk_freqs(hba);
-
ufshcd_vops_dbg_register_dump(hba);
}
@@ -474,6 +475,9 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
prdt_length = le16_to_cpu(
lrbp->utr_descriptor_ptr->prd_table_length);
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ prdt_length /= sizeof(struct ufshcd_sg_entry);
+
dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
tag, prdt_length,
@@ -499,6 +503,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
static void ufshcd_print_host_state(struct ufs_hba *hba)
{
+ struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
+
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
hba->outstanding_reqs, hba->outstanding_tasks);
@@ -511,12 +517,24 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
hba->auto_bkops_enabled, hba->host->host_self_blocked);
dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
+ dev_err(hba->dev,
+ "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
+ ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+ hba->ufs_stats.hibern8_exit_cnt);
+ dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
+ ktime_to_us(hba->ufs_stats.last_intr_ts),
+ hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
- dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
- hba->capabilities, hba->caps);
+ dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
+ hba->ufs_version, hba->capabilities, hba->caps);
dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
hba->dev_quirks);
+ if (sdev_ufs)
+ dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
+ sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
+
+ ufshcd_print_clk_freqs(hba);
}
/**
@@ -1569,11 +1587,6 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return 0;
- }
-
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
@@ -1614,12 +1627,12 @@ start:
*/
fallthrough;
case CLKS_OFF:
- ufshcd_scsi_block_requests(hba);
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- queue_work(hba->clk_gating.clk_gating_workq,
- &hba->clk_gating.ungate_work);
+ if (queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work))
+ ufshcd_scsi_block_requests(hba);
/*
* fall through to check if we should wait for this
* work to be done or not.
@@ -1653,6 +1666,7 @@ static void ufshcd_gate_work(struct work_struct *work)
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.gate_work.work);
unsigned long flags;
+ int ret;
spin_lock_irqsave(hba->host->host_lock, flags);
/*
@@ -1679,8 +1693,11 @@ static void ufshcd_gate_work(struct work_struct *work)
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
- if (ufshcd_uic_hibern8_enter(hba)) {
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (ret) {
hba->clk_gating.state = CLKS_ON;
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
+ __func__, ret);
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
goto out;
@@ -1725,11 +1742,10 @@ static void __ufshcd_release(struct ufs_hba *hba)
hba->clk_gating.active_reqs--;
- if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
- || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
- || hba->active_uic_cmd || hba->uic_async_done
- || ufshcd_eh_in_progress(hba))
+ if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
+ ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks ||
+ hba->active_uic_cmd || hba->uic_async_done)
return;
hba->clk_gating.state = REQ_CLKS_OFF;
@@ -1842,6 +1858,8 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba))
return;
+ hba->clk_gating.state = CLKS_ON;
+
hba->clk_gating.delay_ms = 150;
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
@@ -2097,10 +2115,20 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
unsigned long flags;
if (wait_for_completion_timeout(&uic_cmd->done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT)))
+ msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
- else
+ } else {
ret = -ETIMEDOUT;
+ dev_err(hba->dev,
+ "uic cmd 0x%x with arg3 0x%x completion timeout\n",
+ uic_cmd->command, uic_cmd->argument3);
+
+ if (!uic_cmd->cmd_active) {
+ dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
+ __func__);
+ ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
+ }
+ }
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
@@ -2132,6 +2160,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
if (completion)
init_completion(&uic_cmd->done);
+ uic_cmd->cmd_active = 1;
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
return 0;
@@ -2394,12 +2423,13 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
+ * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
* for Device Management Purposes
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*/
-static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
{
u8 upiu_flags;
int ret = 0;
@@ -2509,34 +2539,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
- spin_lock_irqsave(hba->host->host_lock, flags);
- switch (hba->ufshcd_state) {
- case UFSHCD_STATE_OPERATIONAL:
- break;
- case UFSHCD_STATE_EH_SCHEDULED:
- case UFSHCD_STATE_RESET:
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out_unlock;
- case UFSHCD_STATE_ERROR:
- set_host_byte(cmd, DID_ERROR);
- cmd->scsi_done(cmd);
- goto out_unlock;
- default:
- dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
- __func__, hba->ufshcd_state);
- set_host_byte(cmd, DID_BAD_TARGET);
- cmd->scsi_done(cmd);
- goto out_unlock;
- }
-
- /* if error handling is in progress, don't issue commands */
- if (ufshcd_eh_in_progress(hba)) {
- set_host_byte(cmd, DID_ERROR);
- cmd->scsi_done(cmd);
- goto out_unlock;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
hba->req_abort_count = 0;
err = ufshcd_hold(hba, true);
@@ -2544,7 +2546,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
- WARN_ON(hba->clk_gating.state != CLKS_ON);
+ WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
+ (hba->clk_gating.state != CLKS_ON));
lrbp = &hba->lrb[tag];
@@ -2571,11 +2574,51 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
- /* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
+ switch (hba->ufshcd_state) {
+ case UFSHCD_STATE_OPERATIONAL:
+ case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
+ break;
+ case UFSHCD_STATE_EH_SCHEDULED_FATAL:
+ /*
+ * pm_runtime_get_sync() is used at error handling preparation
+ * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
+ * PM ops, it can never be finished if we let SCSI layer keep
+ * retrying it, which gets err handler stuck forever. Neither
+ * can we let the scsi cmd pass through, because UFS is in bad
+ * state, the scsi cmd may eventually time out, which will get
+ * err handler blocked for too long. So, just fail the scsi cmd
+ * sent from PM ops, err handler can recover PM error anyways.
+ */
+ if (hba->pm_op_in_progress) {
+ hba->force_reset = true;
+ set_host_byte(cmd, DID_BAD_TARGET);
+ goto out_compl_cmd;
+ }
+ fallthrough;
+ case UFSHCD_STATE_RESET:
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out_compl_cmd;
+ case UFSHCD_STATE_ERROR:
+ set_host_byte(cmd, DID_ERROR);
+ goto out_compl_cmd;
+ default:
+ dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
+ __func__, hba->ufshcd_state);
+ set_host_byte(cmd, DID_BAD_TARGET);
+ goto out_compl_cmd;
+ }
ufshcd_send_command(hba, tag);
-out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ goto out;
+
+out_compl_cmd:
+ scsi_dma_unmap(lrbp->cmd);
+ lrbp->cmd = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_release(hba);
+ if (!err)
+ cmd->scsi_done(cmd);
out:
up_read(&hba->clk_scaling_lock);
return err;
@@ -2593,7 +2636,7 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
ufshcd_prepare_lrbp_crypto(NULL, lrbp);
hba->dev_cmd.type = cmd_type;
- return ufshcd_comp_devman_upiu(hba, lrbp);
+ return ufshcd_compose_devman_upiu(hba, lrbp);
}
static int
@@ -3747,6 +3790,10 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ufshcd_is_link_broken(hba)) {
+ ret = -ENOLINK;
+ goto out_unlock;
+ }
hba->uic_async_done = &uic_async_done;
if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
@@ -3771,10 +3818,18 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
cmd->command, cmd->argument3);
+
+ if (!cmd->cmd_active) {
+ dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
+ __func__);
+ goto check_upmcrs;
+ }
+
ret = -ETIMEDOUT;
goto out;
}
+check_upmcrs:
status = ufshcd_get_upmcrs(hba);
if (status != PWR_LOCAL) {
dev_err(hba->dev,
@@ -3794,6 +3849,11 @@ out:
hba->uic_async_done = NULL;
if (reenable_intr)
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ if (ret) {
+ ufshcd_set_link_broken(hba);
+ ufshcd_schedule_eh_work(hba);
+ }
+out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
@@ -3863,7 +3923,7 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
-static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
int ret;
struct uic_command uic_cmd = {0};
@@ -3876,45 +3936,16 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
- if (ret) {
- int err;
-
+ if (ret)
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
__func__, ret);
-
- /*
- * If link recovery fails then return error code returned from
- * ufshcd_link_recovery().
- * If link recovery succeeds then return -EAGAIN to attempt
- * hibern8 enter retry again.
- */
- err = ufshcd_link_recovery(hba);
- if (err) {
- dev_err(hba->dev, "%s: link recovery failed", __func__);
- ret = err;
- } else {
- ret = -EAGAIN;
- }
- } else
+ else
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
POST_CHANGE);
return ret;
}
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
-{
- int ret = 0, retries;
-
- for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
- ret = __ufshcd_uic_hibern8_enter(hba);
- if (!ret)
- goto out;
- }
-out:
- return ret;
-}
-
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
@@ -3931,7 +3962,6 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
if (ret) {
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
__func__, ret);
- ret = ufshcd_link_recovery(hba);
} else {
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
POST_CHANGE);
@@ -3972,7 +4002,7 @@ void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
unsigned long flags;
- if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
+ if (!ufshcd_is_auto_hibern8_supported(hba))
return;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4065,7 +4095,8 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
int ret;
/* if already configured to the requested pwr_mode */
- if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ if (!hba->force_pmc &&
+ pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
@@ -4175,9 +4206,9 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
- int i;
int err;
bool flag_res = true;
+ ktime_t timeout;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
@@ -4188,20 +4219,26 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
goto out;
}
- /* poll for max. 1000 iterations for fDeviceInit flag to clear */
- for (i = 0; i < 1000 && !err && flag_res; i++)
- err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
+ /* Poll fDeviceInit flag to be cleared */
+ timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
+ do {
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
+ if (!flag_res)
+ break;
+ usleep_range(5000, 10000);
+ } while (ktime_before(ktime_get(), timeout));
- if (err)
+ if (err) {
dev_err(hba->dev,
- "%s reading fDeviceInit flag failed with error %d\n",
- __func__, err);
- else if (flag_res)
+ "%s reading fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ } else if (flag_res) {
dev_err(hba->dev,
- "%s fDeviceInit was not cleared by the device\n",
- __func__);
-
+ "%s fDeviceInit was not cleared by the device\n",
+ __func__);
+ err = -EBUSY;
+ }
out:
return err;
}
@@ -4258,10 +4295,8 @@ int ufshcd_make_hba_operational(struct ufs_hba *hba)
dev_err(hba->dev,
"Host controller not ready to process requests");
err = -EIO;
- goto out;
}
-out:
return err;
}
EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
@@ -4495,6 +4530,8 @@ link_startup:
if (ret)
goto out;
+ /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
+ ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
ret = ufshcd_make_hba_operational(hba);
out:
if (ret) {
@@ -4884,11 +4921,14 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
+ if (!hba->uic_async_done)
+ hba->active_uic_cmd->cmd_active = 0;
complete(&hba->active_uic_cmd->done);
retval = IRQ_HANDLED;
}
if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
+ hba->active_uic_cmd->cmd_active = 0;
complete(hba->uic_async_done);
retval = IRQ_HANDLED;
}
@@ -5299,6 +5339,9 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
{
+ if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
+ return;
+
if (enable)
ufshcd_wb_buf_flush_enable(hba);
else
@@ -5544,16 +5587,129 @@ static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
hba->saved_err &= ~UIC_ERROR;
/* clear NAC error */
hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- if (!hba->saved_uic_err) {
+ if (!hba->saved_uic_err)
err_handling = false;
- goto out;
- }
}
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
return err_handling;
}
+/* host lock must be held before calling this func */
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
+{
+ return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+ (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
+}
+
+/* host lock must be held before calling this func */
+static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
+{
+ /* handle fatal errors only when link is not in error state */
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+ ufshcd_is_saved_err_fatal(hba))
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
+ else
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
+ queue_work(hba->eh_wq, &hba->eh_work);
+ }
+}
+
+static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
+{
+ pm_runtime_get_sync(hba->dev);
+ if (pm_runtime_suspended(hba->dev)) {
+ /*
+ * Don't assume anything of pm_runtime_get_sync(), if
+ * resume fails, irq and clocks can be OFF, and powers
+ * can be OFF or in LPM.
+ */
+ ufshcd_setup_hba_vreg(hba, true);
+ ufshcd_enable_irq(hba);
+ ufshcd_setup_vreg(hba, true);
+ ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+ ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+ ufshcd_hold(hba, false);
+ if (!ufshcd_is_clkgating_allowed(hba))
+ ufshcd_setup_clocks(hba, true);
+ ufshcd_release(hba);
+ ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
+ } else {
+ ufshcd_hold(hba, false);
+ if (hba->clk_scaling.is_allowed) {
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+ ufshcd_suspend_clkscaling(hba);
+ }
+ }
+}
+
+static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
+{
+ ufshcd_release(hba);
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+ pm_runtime_put(hba->dev);
+}
+
+static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
+{
+ return (hba->ufshcd_state == UFSHCD_STATE_ERROR ||
+ (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
+ ufshcd_is_link_broken(hba))));
+}
+
+#ifdef CONFIG_PM
+static void ufshcd_recover_pm_error(struct ufs_hba *hba)
+{
+ struct Scsi_Host *shost = hba->host;
+ struct scsi_device *sdev;
+ struct request_queue *q;
+ int ret;
+
+ /*
+ * Set RPM status of hba device to RPM_ACTIVE,
+ * this also clears its runtime error.
+ */
+ ret = pm_runtime_set_active(hba->dev);
+ /*
+ * If hba device had runtime error, we also need to resume those
+ * scsi devices under hba in case any of them has failed to be
+ * resumed due to hba runtime resume failure. This is to unblock
+ * blk_queue_enter in case there are bios waiting inside it.
+ */
+ if (!ret) {
+ shost_for_each_device(sdev, shost) {
+ q = sdev->request_queue;
+ if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
+ q->rpm_status == RPM_SUSPENDING))
+ pm_request_resume(q->dev);
+ }
+ }
+}
+#else
+static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
+{
+}
+#endif
+
+static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
+{
+ struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
+ u32 mode;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
+
+ if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
+ return true;
+
+ if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
+ return true;
+
+ return false;
+}
+
/**
* ufshcd_err_handler - handle UFS errors that require s/w attention
* @work: pointer to work structure
@@ -5562,23 +5718,36 @@ static void ufshcd_err_handler(struct work_struct *work)
{
struct ufs_hba *hba;
unsigned long flags;
- u32 err_xfer = 0;
- u32 err_tm = 0;
- int err = 0;
+ bool err_xfer = false;
+ bool err_tm = false;
+ int err = 0, pmc_err;
int tag;
- bool needs_reset = false;
+ bool needs_reset = false, needs_restore = false;
hba = container_of(work, struct ufs_hba, eh_work);
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
-
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ if (ufshcd_err_handling_should_stop(hba)) {
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return;
+ }
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_err_handling_prepare(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_scsi_block_requests(hba);
+ /*
+ * A full reset and restore might have happened after preparation
+ * is finished, double check whether we should stop.
+ */
+ if (ufshcd_err_handling_should_stop(hba)) {
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
goto out;
-
+ }
hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba);
@@ -5590,30 +5759,61 @@ static void ufshcd_err_handler(struct work_struct *work)
/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
ret = ufshcd_quirk_dl_nac_errors(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
- if (!ret)
+ if (!ret && !hba->force_reset && ufshcd_is_link_active(hba))
goto skip_err_handling;
}
- if ((hba->saved_err & INT_FATAL_ERRORS) ||
- (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
+
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+ ufshcd_is_saved_err_fatal(hba) ||
((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
- UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
+ (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
needs_reset = true;
+ if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
+ (hba->saved_uic_err &&
+ (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
+ bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+ ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ }
+
/*
* if host reset is required then skip clearing the pending
* transfers forcefully because they will get cleared during
* host reset and restore
*/
if (needs_reset)
- goto skip_pending_xfer_clear;
+ goto do_reset;
+ /*
+ * If LINERESET was caught, UFS might have been put to PWM mode,
+ * check if power mode restore is needed.
+ */
+ if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
+ hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
+ if (!hba->saved_uic_err)
+ hba->saved_err &= ~UIC_ERROR;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (ufshcd_is_pwr_mode_restore_needed(hba))
+ needs_restore = true;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!hba->saved_err && !needs_restore)
+ goto skip_err_handling;
+ }
+
+ hba->silence_err_logs = true;
/* release lock as clear command might sleep */
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Clear pending transfer requests */
for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
- if (ufshcd_clear_cmd(hba, tag)) {
+ if (ufshcd_try_to_abort_task(hba, tag)) {
err_xfer = true;
goto lock_skip_pending_xfer_clear;
}
@@ -5632,11 +5832,38 @@ lock_skip_pending_xfer_clear:
/* Complete the requests that are cleared by s/w */
ufshcd_complete_requests(hba);
+ hba->silence_err_logs = false;
- if (err_xfer || err_tm)
+ if (err_xfer || err_tm) {
needs_reset = true;
+ goto do_reset;
+ }
+
+ /*
+ * After all reqs and tasks are cleared from doorbell,
+ * now it is safe to retore power mode.
+ */
+ if (needs_restore) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /*
+ * Hold the scaling lock just in case dev cmds
+ * are sent via bsg and/or sysfs.
+ */
+ down_write(&hba->clk_scaling_lock);
+ hba->force_pmc = true;
+ pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
+ if (pmc_err) {
+ needs_reset = true;
+ dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
+ __func__, pmc_err);
+ }
+ hba->force_pmc = false;
+ ufshcd_print_pwr_info(hba);
+ up_write(&hba->clk_scaling_lock);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ }
-skip_pending_xfer_clear:
+do_reset:
/* Fatal errors need reset */
if (needs_reset) {
unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
@@ -5652,38 +5879,31 @@ skip_pending_xfer_clear:
__ufshcd_transfer_req_compl(hba,
(1UL << (hba->nutrs - 1)));
+ hba->force_reset = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_reset_and_restore(hba);
+ if (err)
+ dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
+ __func__, err);
+ else
+ ufshcd_recover_pm_error(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
- if (err) {
- dev_err(hba->dev, "%s: reset and restore failed\n",
- __func__);
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- }
- /*
- * Inform scsi mid-layer that we did reset and allow to handle
- * Unit Attention properly.
- */
- scsi_report_bus_reset(hba->host, 0);
- hba->saved_err = 0;
- hba->saved_uic_err = 0;
}
skip_err_handling:
if (!needs_reset) {
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
if (hba->saved_err || hba->saved_uic_err)
dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
__func__, hba->saved_err, hba->saved_uic_err);
}
- ufshcd_clear_eh_in_progress(hba);
-
out:
+ ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_scsi_unblock_requests(hba);
- ufshcd_release(hba);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_err_handling_unprepare(hba);
}
/**
@@ -5699,17 +5919,33 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
u32 reg;
irqreturn_t retval = IRQ_NONE;
- /* PHY layer lane error */
+ /* PHY layer error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
- /* Ignore LINERESET indication, as this is not an error */
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+ (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
+ ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
*/
- dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
- ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
+ if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
+ __func__);
+
+ /* Got a LINERESET indication. */
+ if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
+ struct uic_command *cmd = NULL;
+
+ hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
+ if (hba->uic_async_done && hba->active_uic_cmd)
+ cmd = hba->active_uic_cmd;
+ /*
+ * Ignore the LINERESET during power mode change
+ * operation via DME_SET command.
+ */
+ if (cmd && (cmd->command == UIC_CMD_DME_SET))
+ hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
+ }
retval |= IRQ_HANDLED;
}
@@ -5813,6 +6049,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
hba->errors, ufshcd_get_upmcrs(hba));
ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
hba->errors);
+ ufshcd_set_link_broken(hba);
queue_eh_work = true;
}
@@ -5824,30 +6061,18 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
hba->saved_err |= hba->errors;
hba->saved_uic_err |= hba->uic_error;
- /* handle fatal errors only when link is functional */
- if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
- /* block commands from scsi mid-layer */
- ufshcd_scsi_block_requests(hba);
-
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
-
- /* dump controller state before resetting */
- if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
- bool pr_prdt = !!(hba->saved_err &
- SYSTEM_BUS_FATAL_ERROR);
-
- dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
+ /* dump controller state before resetting */
+ if ((hba->saved_err & (INT_FATAL_ERRORS)) ||
+ (hba->saved_uic_err &&
+ (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
+ dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
__func__, hba->saved_err,
hba->saved_uic_err);
-
- ufshcd_print_host_regs(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_tmrs(hba, hba->outstanding_tasks);
- ufshcd_print_trs(hba, hba->outstanding_reqs,
- pr_prdt);
- }
- schedule_work(&hba->eh_work);
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
+ "host_regs: ");
+ ufshcd_print_pwr_info(hba);
}
+ ufshcd_schedule_eh_work(hba);
retval |= IRQ_HANDLED;
}
/*
@@ -5951,6 +6176,8 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ hba->ufs_stats.last_intr_status = intr_status;
+ hba->ufs_stats.last_intr_ts = ktime_get();
/*
* There could be max of hba->nutrs reqs in flight and in worst case
@@ -6383,7 +6610,7 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
}
/**
- * ufshcd_abort - abort a specific command
+ * ufshcd_try_to_abort_task - abort a specific task
* @cmd: SCSI command pointer
*
* Abort the pending command in device by sending UFS_ABORT_TASK task management
@@ -6392,6 +6619,80 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
* issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
* really issued and then try to abort it.
*
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
+{
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ int err = 0;
+ int poll_cnt;
+ u8 resp = 0xF;
+ u32 reg;
+
+ for (poll_cnt = 100; poll_cnt; poll_cnt--) {
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_QUERY_TASK, &resp);
+ if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
+ /* cmd pending in the device */
+ dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
+ __func__, tag);
+ break;
+ } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ /*
+ * cmd not pending in the device, check if it is
+ * in transition.
+ */
+ dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
+ __func__, tag);
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (reg & (1 << tag)) {
+ /* sleep for max. 200us to stabilize */
+ usleep_range(100, 200);
+ continue;
+ }
+ /* command completed already */
+ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
+ __func__, tag);
+ goto out;
+ } else {
+ dev_err(hba->dev,
+ "%s: no response from device. tag = %d, err %d\n",
+ __func__, tag, err);
+ if (!err)
+ err = resp; /* service response error */
+ goto out;
+ }
+ }
+
+ if (!poll_cnt) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_ABORT_TASK, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err) {
+ err = resp; /* service response error */
+ dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
+ __func__, tag, err);
+ }
+ goto out;
+ }
+
+ err = ufshcd_clear_cmd(hba, tag);
+ if (err)
+ dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
+ __func__, tag, err);
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_abort - scsi host template eh_abort_handler callback
+ * @cmd: SCSI command pointer
+ *
* Returns SUCCESS/FAILED
*/
static int ufshcd_abort(struct scsi_cmnd *cmd)
@@ -6401,8 +6702,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
unsigned long flags;
unsigned int tag;
int err = 0;
- int poll_cnt;
- u8 resp = 0xF;
struct ufshcd_lrb *lrbp;
u32 reg;
@@ -6467,79 +6766,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
}
/* Skip task abort in case previous aborts failed and report failure */
- if (lrbp->req_abort_skip) {
+ if (lrbp->req_abort_skip)
err = -EIO;
- goto out;
- }
-
- for (poll_cnt = 100; poll_cnt; poll_cnt--) {
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_QUERY_TASK, &resp);
- if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
- /* cmd pending in the device */
- dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
- __func__, tag);
- break;
- } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- /*
- * cmd not pending in the device, check if it is
- * in transition.
- */
- dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
- __func__, tag);
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (reg & (1 << tag)) {
- /* sleep for max. 200us to stabilize */
- usleep_range(100, 200);
- continue;
- }
- /* command completed already */
- dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
- __func__, tag);
- goto cleanup;
- } else {
- dev_err(hba->dev,
- "%s: no response from device. tag = %d, err %d\n",
- __func__, tag, err);
- if (!err)
- err = resp; /* service response error */
- goto out;
- }
- }
-
- if (!poll_cnt) {
- err = -EBUSY;
- goto out;
- }
-
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_ABORT_TASK, &resp);
- if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- if (!err) {
- err = resp; /* service response error */
- dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
- __func__, tag, err);
- }
- goto out;
- }
-
- err = ufshcd_clear_cmd(hba, tag);
- if (err) {
- dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
- __func__, tag, err);
- goto out;
- }
+ else
+ err = ufshcd_try_to_abort_task(hba, tag);
+ if (!err) {
cleanup:
- scsi_dma_unmap(cmd);
-
- spin_lock_irqsave(host->host_lock, flags);
- ufshcd_outstanding_req_clear(hba, tag);
- hba->lrb[tag].cmd = NULL;
- spin_unlock_irqrestore(host->host_lock, flags);
-
+ spin_lock_irqsave(host->host_lock, flags);
+ __ufshcd_transfer_req_compl(hba, (1UL << tag));
+ spin_unlock_irqrestore(host->host_lock, flags);
out:
- if (!err) {
err = SUCCESS;
} else {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
@@ -6592,8 +6829,6 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
/* Establish the link again and restore the device */
err = ufshcd_probe_hba(hba, false);
- if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
- err = -EIO;
out:
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -6612,9 +6847,23 @@ out:
*/
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
+ u32 saved_err;
+ u32 saved_uic_err;
int err = 0;
+ unsigned long flags;
int retries = MAX_HOST_RESET_RETRIES;
+ /*
+ * This is a fresh start, cache and clear saved error first,
+ * in case new error generated during reset and restore.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ saved_err = hba->saved_err;
+ saved_uic_err = hba->saved_uic_err;
+ hba->saved_err = 0;
+ hba->saved_uic_err = 0;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
do {
/* Reset the attached device */
ufshcd_vops_device_reset(hba);
@@ -6622,6 +6871,18 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * Inform scsi mid-layer that we did reset and allow to handle
+ * Unit Attention properly.
+ */
+ scsi_report_bus_reset(hba->host, 0);
+ if (err) {
+ hba->saved_err |= saved_err;
+ hba->saved_uic_err |= saved_uic_err;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return err;
}
@@ -6633,48 +6894,25 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
*/
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- int err;
+ int err = SUCCESS;
unsigned long flags;
struct ufs_hba *hba;
hba = shost_priv(cmd->device->host);
- ufshcd_hold(hba, false);
- /*
- * Check if there is any race with fatal error handling.
- * If so, wait for it to complete. Even though fatal error
- * handling does reset and restore in some cases, don't assume
- * anything out of it. We are just avoiding race here.
- */
- do {
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!(work_pending(&hba->eh_work) ||
- hba->ufshcd_state == UFSHCD_STATE_RESET ||
- hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
- break;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
- flush_work(&hba->eh_work);
- } while (1);
-
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
+ dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- err = ufshcd_reset_and_restore(hba);
+ flush_work(&hba->eh_work);
spin_lock_irqsave(hba->host->host_lock, flags);
- if (!err) {
- err = SUCCESS;
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- } else {
+ if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
err = FAILED;
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- }
- ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_release(hba);
return err;
}
@@ -7395,6 +7633,7 @@ out:
static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
{
int ret;
+ unsigned long flags;
ktime_t start = ktime_get();
ret = ufshcd_link_startup(hba);
@@ -7459,14 +7698,17 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
*/
ufshcd_set_active_icc_lvl(hba);
- /* set the state as operational after switching to desired gear */
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
-
ufshcd_wb_config(hba);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
out:
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ret)
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
trace_ufshcd_init(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -7606,12 +7848,10 @@ static int ufshcd_config_vreg(struct device *dev,
if (vreg->min_uV && vreg->max_uV) {
min_uV = on ? vreg->min_uV : 0;
ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
- if (ret) {
+ if (ret)
dev_err(dev,
"%s: %s set voltage failed, err=%d\n",
__func__, name, ret);
- goto out;
- }
}
}
out:
@@ -7674,8 +7914,6 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
goto out;
ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
- if (ret)
- goto out;
out:
if (ret) {
@@ -7721,10 +7959,8 @@ static int ufshcd_init_vreg(struct ufs_hba *hba)
goto out;
ret = ufshcd_get_vreg(dev, info->vccq);
- if (ret)
- goto out;
-
- ret = ufshcd_get_vreg(dev, info->vccq2);
+ if (!ret)
+ ret = ufshcd_get_vreg(dev, info->vccq2);
out:
return ret;
}
@@ -7868,12 +8104,7 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba)
err = ufshcd_vops_setup_regulators(hba, true);
if (err)
- goto out_exit;
-
- goto out;
-
-out_exit:
- ufshcd_vops_exit(hba);
+ ufshcd_vops_exit(hba);
out:
if (err)
dev_err(hba->dev, "%s: variant %s init failed err %d\n",
@@ -8073,10 +8304,13 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
if (req_link_state == UIC_LINK_HIBERN8_STATE) {
ret = ufshcd_uic_hibern8_enter(hba);
- if (!ret)
+ if (!ret) {
ufshcd_set_link_hibern8(hba);
- else
+ } else {
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
+ __func__, ret);
goto out;
+ }
}
/*
* If autobkops is enabled, link can't be turned off because
@@ -8092,8 +8326,11 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
* unipro. But putting the link in hibern8 is much faster.
*/
ret = ufshcd_uic_hibern8_enter(hba);
- if (ret)
+ if (ret) {
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
+ __func__, ret);
goto out;
+ }
/*
* Change controller state to "reset state" which
* should also put the link in off/reset state
@@ -8331,8 +8568,11 @@ disable_clks:
/* If link is active, device ref_clk can't be switched off */
__ufshcd_setup_clocks(hba, false, true);
- hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ }
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
@@ -8410,10 +8650,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba);
- if (!ret)
+ if (!ret) {
ufshcd_set_link_active(hba);
- else
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
goto vendor_suspend;
+ }
} else if (ufshcd_is_link_off(hba)) {
/*
* A full initialization of the host and the device is
@@ -8472,6 +8715,11 @@ disable_irq_and_vops_clks:
if (hba->clk_scaling.is_allowed)
ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ }
out:
hba->pm_op_in_progress = 0;
if (ret)
@@ -8680,6 +8928,7 @@ void ufshcd_remove(struct ufs_hba *hba)
blk_mq_free_tag_set(&hba->tmf_tag_set);
blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
+ destroy_workqueue(hba->eh_wq);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
@@ -8782,6 +9031,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
+ char eh_wq_name[sizeof("ufs_eh_wq_00")];
if (!mmio_base) {
dev_err(hba->dev,
@@ -8843,6 +9093,15 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
/* Initialize work queues */
+ snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
+ hba->host->host_no);
+ hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+ if (!hba->eh_wq) {
+ dev_err(hba->dev, "%s: failed to create eh workqueue\n",
+ __func__);
+ err = -ENOMEM;
+ goto out_disable;
+ }
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
@@ -8970,6 +9229,7 @@ out_remove_scsi_host:
exit_gating:
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
+ destroy_workqueue(hba->eh_wq);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 363589c0bd37..e0f00a42371c 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -64,6 +64,7 @@ enum dev_cmd_type {
* @argument1: UIC command argument 1
* @argument2: UIC command argument 2
* @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
* @done: UIC command completion
*/
struct uic_command {
@@ -71,6 +72,7 @@ struct uic_command {
u32 argument1;
u32 argument2;
u32 argument3;
+ int cmd_active;
struct completion done;
};
@@ -90,6 +92,7 @@ enum uic_link_state {
UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
+ UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */
};
#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
@@ -97,11 +100,15 @@ enum uic_link_state {
UIC_LINK_ACTIVE_STATE)
#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
UIC_LINK_HIBERN8_STATE)
+#define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
+ UIC_LINK_BROKEN_STATE)
#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
UIC_LINK_ACTIVE_STATE)
#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
UIC_LINK_HIBERN8_STATE)
+#define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
+ UIC_LINK_BROKEN_STATE)
#define ufshcd_set_ufs_dev_active(h) \
((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
@@ -409,6 +416,8 @@ struct ufs_err_reg_hist {
/**
* struct ufs_stats - keeps usage/err statistics
+ * @last_intr_status: record the last interrupt status.
+ * @last_intr_ts: record the last interrupt timestamp.
* @hibern8_exit_cnt: Counter to keep track of number of exits,
* reset this after link-startup.
* @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
@@ -428,6 +437,9 @@ struct ufs_err_reg_hist {
* @tsk_abort: tracks task abort events
*/
struct ufs_stats {
+ u32 last_intr_status;
+ ktime_t last_intr_ts;
+
u32 hibern8_exit_cnt;
ktime_t last_hibern8_exit_tstamp;
@@ -526,6 +538,12 @@ enum ufshcd_quirks {
* auto-hibernate capability but it doesn't work.
*/
UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
+
+ /*
+ * This quirk needs to disable manual flush for write booster
+ */
+ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
+
};
enum ufshcd_caps {
@@ -617,12 +635,15 @@ struct ufs_hba_variant_params {
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
+ * @eh_wq: Workqueue that eh_work works on
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
* @errors: HBA errors
* @uic_error: UFS interconnect layer error status
* @saved_err: sticky error mask
* @saved_uic_err: sticky UIC error mask
+ * @force_reset: flag to force eh_work perform a full reset
+ * @force_pmc: flag to force a power mode change
* @silence_err_logs: flag to silence error logs
* @dev_cmd: ufs device management command information
* @last_dme_cmd_tstamp: time stamp of the last completed DME command
@@ -711,6 +732,7 @@ struct ufs_hba {
bool is_powered;
/* Work Queues */
+ struct workqueue_struct *eh_wq;
struct work_struct eh_work;
struct work_struct eeh_work;
@@ -720,6 +742,8 @@ struct ufs_hba {
u32 saved_err;
u32 saved_uic_err;
struct ufs_stats ufs_stats;
+ bool force_reset;
+ bool force_pmc;
bool silence_err_logs;
/* Device management request data */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index ba31b090f784..6795e1f0e8f8 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -171,6 +171,7 @@ enum {
#define UIC_PHY_ADAPTER_LAYER_ERROR 0x80000000
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
+#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR 0x10
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 4ee64782fd48..f6b52ce36de6 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -205,6 +205,9 @@ enum {
UNCHANGED = 7,
};
+#define PWRMODE_MASK 0xF
+#define PWRMODE_RX_OFFSET 4
+
/* PA TX/RX Frequency Series */
enum {
PA_HS_MODE_A = 1,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 3b1803432090..b9c86a7e3b97 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -284,7 +284,12 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
case VIRTIO_SCSI_EVT_RESET_RESCAN:
- scsi_add_device(shost, 0, target, lun);
+ if (lun == 0) {
+ scsi_scan_target(&shost->shost_gendev, 0, target,
+ SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+ } else {
+ scsi_add_device(shost, 0, target, lun);
+ }
break;
case VIRTIO_SCSI_EVT_RESET_REMOVED:
sdev = scsi_device_lookup(shost, 0, target, lun);
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index ae1e248a8fb8..1d2bc181da05 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -301,8 +301,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
{
/* Remove all clients */
device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
- /* Enter Clock Pause */
- slim_ctrl_clk_pause(ctrl, false, 0);
ida_simple_remove(&ctrl_ida, ctrl->id);
return 0;
@@ -326,8 +324,8 @@ void slim_report_absent(struct slim_device *sbdev)
mutex_lock(&ctrl->lock);
sbdev->is_laddr_valid = false;
mutex_unlock(&ctrl->lock);
-
- ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
+ if (!ctrl->get_laddr)
+ ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
}
EXPORT_SYMBOL_GPL(slim_report_absent);
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 743ee7b4e63f..218aefc3531c 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1277,9 +1277,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
{
struct qcom_slim_ngd_qmi *qmi =
container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
+ struct qcom_slim_ngd_ctrl *ctrl =
+ container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
qmi->svc_info.sq_node = 0;
qmi->svc_info.sq_port = 0;
+
+ qcom_slim_ngd_enable(ctrl, false);
}
static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
diff --git a/drivers/soc/actions/owl-sps-helper.c b/drivers/soc/actions/owl-sps-helper.c
index 291a206d6f04..e3f36603dd53 100644
--- a/drivers/soc/actions/owl-sps-helper.c
+++ b/drivers/soc/actions/owl-sps-helper.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/soc/actions/owl-sps.h>
#define OWL_SPS_PG_CTL 0x0
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 43665b77aa9e..5164a4dc2352 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -15,6 +15,7 @@
#include <linux/reset.h>
#include <linux/clk.h>
#include <dt-bindings/power/meson8-power.h>
+#include <dt-bindings/power/meson-axg-power.h>
#include <dt-bindings/power/meson-g12a-power.h>
#include <dt-bindings/power/meson-gxbb-power.h>
#include <dt-bindings/power/meson-sm1-power.h>
@@ -134,6 +135,11 @@ static struct meson_ee_pwrc_top_domain sm1_pwrc_ge2d = SM1_EE_PD(19);
{ __reg, BIT(14) }, \
{ __reg, BIT(15) }
+static struct meson_ee_pwrc_mem_domain axg_pwrc_mem_vpu[] = {
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
+ VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
+};
+
static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
@@ -190,6 +196,10 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_ge2d[] = {
{ HHI_MEM_PD_REG0, GENMASK(25, 18) },
};
+static struct meson_ee_pwrc_mem_domain axg_pwrc_mem_audio[] = {
+ { HHI_MEM_PD_REG0, GENMASK(5, 4) },
+};
+
static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
{ HHI_MEM_PD_REG0, GENMASK(5, 4) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(1, 0) },
@@ -231,6 +241,13 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
static bool pwrc_ee_get_power(struct meson_ee_pwrc_domain *pwrc_domain);
+static struct meson_ee_pwrc_domain_desc axg_pwrc_domains[] = {
+ [PWRC_AXG_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, axg_pwrc_mem_vpu,
+ pwrc_ee_get_power, 5, 2),
+ [PWRC_AXG_ETHERNET_MEM_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
+ [PWRC_AXG_AUDIO_ID] = MEM_PD("AUDIO", axg_pwrc_mem_audio),
+};
+
static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
[PWRC_G12A_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, g12a_pwrc_mem_vpu,
pwrc_ee_get_power, 11, 2),
@@ -433,8 +450,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
if (ret)
return ret;
- ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov,
- false);
+ dom->base.flags = GENPD_FLAG_ALWAYS_ON;
+ ret = pm_genpd_init(&dom->base, NULL, false);
if (ret)
return ret;
} else {
@@ -529,6 +546,11 @@ static struct meson_ee_pwrc_domain_data meson_ee_g12a_pwrc_data = {
.domains = g12a_pwrc_domains,
};
+static struct meson_ee_pwrc_domain_data meson_ee_axg_pwrc_data = {
+ .count = ARRAY_SIZE(axg_pwrc_domains),
+ .domains = axg_pwrc_domains,
+};
+
static struct meson_ee_pwrc_domain_data meson_ee_gxbb_pwrc_data = {
.count = ARRAY_SIZE(gxbb_pwrc_domains),
.domains = gxbb_pwrc_domains,
@@ -563,6 +585,10 @@ static const struct of_device_id meson_ee_pwrc_match_table[] = {
.data = &meson_ee_m8b_pwrc_data,
},
{
+ .compatible = "amlogic,meson-axg-pwrc",
+ .data = &meson_ee_axg_pwrc_data,
+ },
+ {
.compatible = "amlogic,meson-gxbb-pwrc",
.data = &meson_ee_gxbb_pwrc_data,
},
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
index 511b6856225d..21b4bc811c00 100644
--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -339,8 +339,8 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
return ret;
}
- pm_genpd_init(&vpu_pd->genpd, &pm_domain_always_on_gov,
- powered_off);
+ vpu_pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
+ pm_genpd_init(&vpu_pd->genpd, NULL, powered_off);
return of_genpd_add_provider_simple(pdev->dev.of_node,
&vpu_pd->genpd);
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
index 648e32693b7e..24f92a6e882a 100644
--- a/drivers/soc/bcm/Kconfig
+++ b/drivers/soc/bcm/Kconfig
@@ -22,6 +22,15 @@ config RASPBERRYPI_POWER
This enables support for the RPi power domains which can be enabled
or disabled via the RPi firmware.
+config SOC_BCM63XX
+ bool "Broadcom 63xx SoC drivers"
+ depends on BMIPS_GENERIC || COMPILE_TEST
+ help
+ Enables drivers for the Broadcom 63xx series of chips.
+ Drivers can be enabled individually within this menu.
+
+ If unsure, say N.
+
config SOC_BRCMSTB
bool "Broadcom STB SoC drivers"
depends on ARM || ARM64 || BMIPS_GENERIC || COMPILE_TEST
@@ -33,6 +42,7 @@ config SOC_BRCMSTB
If unsure, say N.
+source "drivers/soc/bcm/bcm63xx/Kconfig"
source "drivers/soc/bcm/brcmstb/Kconfig"
endmenu
diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile
index d92268a829a9..7bc90e0bd773 100644
--- a/drivers/soc/bcm/Makefile
+++ b/drivers/soc/bcm/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BCM2835_POWER) += bcm2835-power.o
obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o
+obj-$(CONFIG_SOC_BCM63XX) += bcm63xx/
obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
diff --git a/drivers/soc/bcm/bcm63xx/Kconfig b/drivers/soc/bcm/bcm63xx/Kconfig
new file mode 100644
index 000000000000..16f648a6c70a
--- /dev/null
+++ b/drivers/soc/bcm/bcm63xx/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if SOC_BCM63XX
+
+config BCM63XX_POWER
+ bool "BCM63xx power domain driver"
+ depends on BMIPS_GENERIC || (COMPILE_TEST && OF)
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the BCM63xx power domains controller on
+ BCM6318, BCM6328, BCM6362 and BCM63268 SoCs.
+
+endif # SOC_BCM63XX
diff --git a/drivers/soc/bcm/bcm63xx/Makefile b/drivers/soc/bcm/bcm63xx/Makefile
new file mode 100644
index 000000000000..0710d5e018cc
--- /dev/null
+++ b/drivers/soc/bcm/bcm63xx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_BCM63XX_POWER) += bcm63xx-power.o
diff --git a/drivers/soc/bcm/bcm63xx/bcm63xx-power.c b/drivers/soc/bcm/bcm63xx/bcm63xx-power.c
new file mode 100644
index 000000000000..515fe182dc34
--- /dev/null
+++ b/drivers/soc/bcm/bcm63xx/bcm63xx-power.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BCM63xx Power Domain Controller Driver
+ *
+ * Copyright (C) 2020 Álvaro Fernández Rojas <noltari@gmail.com>
+ */
+
+#include <dt-bindings/soc/bcm6318-pm.h>
+#include <dt-bindings/soc/bcm6328-pm.h>
+#include <dt-bindings/soc/bcm6362-pm.h>
+#include <dt-bindings/soc/bcm63268-pm.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+struct bcm63xx_power_dev {
+ struct generic_pm_domain genpd;
+ struct bcm63xx_power *power;
+ uint32_t mask;
+};
+
+struct bcm63xx_power {
+ void __iomem *base;
+ spinlock_t lock;
+ struct bcm63xx_power_dev *dev;
+ struct genpd_onecell_data genpd_data;
+ struct generic_pm_domain **genpd;
+};
+
+struct bcm63xx_power_data {
+ const char * const name;
+ uint8_t bit;
+ unsigned int flags;
+};
+
+static int bcm63xx_power_get_state(struct bcm63xx_power_dev *pmd, bool *is_on)
+{
+ struct bcm63xx_power *power = pmd->power;
+
+ if (!pmd->mask) {
+ *is_on = false;
+ return -EINVAL;
+ }
+
+ *is_on = !(__raw_readl(power->base) & pmd->mask);
+
+ return 0;
+}
+
+static int bcm63xx_power_set_state(struct bcm63xx_power_dev *pmd, bool on)
+{
+ struct bcm63xx_power *power = pmd->power;
+ unsigned long flags;
+ uint32_t val;
+
+ if (!pmd->mask)
+ return -EINVAL;
+
+ spin_lock_irqsave(&power->lock, flags);
+ val = __raw_readl(power->base);
+ if (on)
+ val &= ~pmd->mask;
+ else
+ val |= pmd->mask;
+ __raw_writel(val, power->base);
+ spin_unlock_irqrestore(&power->lock, flags);
+
+ return 0;
+}
+
+static int bcm63xx_power_on(struct generic_pm_domain *genpd)
+{
+ struct bcm63xx_power_dev *pmd = container_of(genpd,
+ struct bcm63xx_power_dev, genpd);
+
+ return bcm63xx_power_set_state(pmd, true);
+}
+
+static int bcm63xx_power_off(struct generic_pm_domain *genpd)
+{
+ struct bcm63xx_power_dev *pmd = container_of(genpd,
+ struct bcm63xx_power_dev, genpd);
+
+ return bcm63xx_power_set_state(pmd, false);
+}
+
+static int bcm63xx_power_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ const struct bcm63xx_power_data *entry, *table;
+ struct bcm63xx_power *power;
+ unsigned int ndom;
+ uint8_t max_bit = 0;
+ int ret;
+
+ power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
+ if (!power)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ power->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(power->base))
+ return PTR_ERR(power->base);
+
+ table = of_device_get_match_data(dev);
+ if (!table)
+ return -EINVAL;
+
+ power->genpd_data.num_domains = 0;
+ ndom = 0;
+ for (entry = table; entry->name; entry++) {
+ max_bit = max(max_bit, entry->bit);
+ ndom++;
+ }
+
+ if (!ndom)
+ return -ENODEV;
+
+ power->genpd_data.num_domains = max_bit + 1;
+
+ power->dev = devm_kcalloc(dev, power->genpd_data.num_domains,
+ sizeof(struct bcm63xx_power_dev),
+ GFP_KERNEL);
+ if (!power->dev)
+ return -ENOMEM;
+
+ power->genpd = devm_kcalloc(dev, power->genpd_data.num_domains,
+ sizeof(struct generic_pm_domain *),
+ GFP_KERNEL);
+ if (!power->genpd)
+ return -ENOMEM;
+
+ power->genpd_data.domains = power->genpd;
+
+ ndom = 0;
+ for (entry = table; entry->name; entry++) {
+ struct bcm63xx_power_dev *pmd = &power->dev[ndom];
+ bool is_on;
+
+ pmd->power = power;
+ pmd->mask = BIT(entry->bit);
+ pmd->genpd.name = entry->name;
+ pmd->genpd.flags = entry->flags;
+
+ ret = bcm63xx_power_get_state(pmd, &is_on);
+ if (ret)
+ dev_warn(dev, "unable to get current state for %s\n",
+ pmd->genpd.name);
+
+ pmd->genpd.power_on = bcm63xx_power_on;
+ pmd->genpd.power_off = bcm63xx_power_off;
+
+ pm_genpd_init(&pmd->genpd, NULL, !is_on);
+ power->genpd[entry->bit] = &pmd->genpd;
+
+ ndom++;
+ }
+
+ spin_lock_init(&power->lock);
+
+ ret = of_genpd_add_provider_onecell(np, &power->genpd_data);
+ if (ret) {
+ dev_err(dev, "failed to register genpd driver: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "registered %u power domains\n", ndom);
+
+ return 0;
+}
+
+static const struct bcm63xx_power_data bcm6318_power_domains[] = {
+ {
+ .name = "pcie",
+ .bit = BCM6318_POWER_DOMAIN_PCIE,
+ }, {
+ .name = "usb",
+ .bit = BCM6318_POWER_DOMAIN_USB,
+ }, {
+ .name = "ephy0",
+ .bit = BCM6318_POWER_DOMAIN_EPHY0,
+ }, {
+ .name = "ephy1",
+ .bit = BCM6318_POWER_DOMAIN_EPHY1,
+ }, {
+ .name = "ephy2",
+ .bit = BCM6318_POWER_DOMAIN_EPHY2,
+ }, {
+ .name = "ephy3",
+ .bit = BCM6318_POWER_DOMAIN_EPHY3,
+ }, {
+ .name = "ldo2p5",
+ .bit = BCM6318_POWER_DOMAIN_LDO2P5,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "ldo2p9",
+ .bit = BCM6318_POWER_DOMAIN_LDO2P9,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "sw1p0",
+ .bit = BCM6318_POWER_DOMAIN_SW1P0,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "pad",
+ .bit = BCM6318_POWER_DOMAIN_PAD,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct bcm63xx_power_data bcm6328_power_domains[] = {
+ {
+ .name = "adsl2-mips",
+ .bit = BCM6328_POWER_DOMAIN_ADSL2_MIPS,
+ }, {
+ .name = "adsl2-phy",
+ .bit = BCM6328_POWER_DOMAIN_ADSL2_PHY,
+ }, {
+ .name = "adsl2-afe",
+ .bit = BCM6328_POWER_DOMAIN_ADSL2_AFE,
+ }, {
+ .name = "sar",
+ .bit = BCM6328_POWER_DOMAIN_SAR,
+ }, {
+ .name = "pcm",
+ .bit = BCM6328_POWER_DOMAIN_PCM,
+ }, {
+ .name = "usbd",
+ .bit = BCM6328_POWER_DOMAIN_USBD,
+ }, {
+ .name = "usbh",
+ .bit = BCM6328_POWER_DOMAIN_USBH,
+ }, {
+ .name = "pcie",
+ .bit = BCM6328_POWER_DOMAIN_PCIE,
+ }, {
+ .name = "robosw",
+ .bit = BCM6328_POWER_DOMAIN_ROBOSW,
+ }, {
+ .name = "ephy",
+ .bit = BCM6328_POWER_DOMAIN_EPHY,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct bcm63xx_power_data bcm6362_power_domains[] = {
+ {
+ .name = "sar",
+ .bit = BCM6362_POWER_DOMAIN_SAR,
+ }, {
+ .name = "ipsec",
+ .bit = BCM6362_POWER_DOMAIN_IPSEC,
+ }, {
+ .name = "mips",
+ .bit = BCM6362_POWER_DOMAIN_MIPS,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "dect",
+ .bit = BCM6362_POWER_DOMAIN_DECT,
+ }, {
+ .name = "usbh",
+ .bit = BCM6362_POWER_DOMAIN_USBH,
+ }, {
+ .name = "usbd",
+ .bit = BCM6362_POWER_DOMAIN_USBD,
+ }, {
+ .name = "robosw",
+ .bit = BCM6362_POWER_DOMAIN_ROBOSW,
+ }, {
+ .name = "pcm",
+ .bit = BCM6362_POWER_DOMAIN_PCM,
+ }, {
+ .name = "periph",
+ .bit = BCM6362_POWER_DOMAIN_PERIPH,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "adsl-phy",
+ .bit = BCM6362_POWER_DOMAIN_ADSL_PHY,
+ }, {
+ .name = "gmii-pads",
+ .bit = BCM6362_POWER_DOMAIN_GMII_PADS,
+ }, {
+ .name = "fap",
+ .bit = BCM6362_POWER_DOMAIN_FAP,
+ }, {
+ .name = "pcie",
+ .bit = BCM6362_POWER_DOMAIN_PCIE,
+ }, {
+ .name = "wlan-pads",
+ .bit = BCM6362_POWER_DOMAIN_WLAN_PADS,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct bcm63xx_power_data bcm63268_power_domains[] = {
+ {
+ .name = "sar",
+ .bit = BCM63268_POWER_DOMAIN_SAR,
+ }, {
+ .name = "ipsec",
+ .bit = BCM63268_POWER_DOMAIN_IPSEC,
+ }, {
+ .name = "mips",
+ .bit = BCM63268_POWER_DOMAIN_MIPS,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "dect",
+ .bit = BCM63268_POWER_DOMAIN_DECT,
+ }, {
+ .name = "usbh",
+ .bit = BCM63268_POWER_DOMAIN_USBH,
+ }, {
+ .name = "usbd",
+ .bit = BCM63268_POWER_DOMAIN_USBD,
+ }, {
+ .name = "robosw",
+ .bit = BCM63268_POWER_DOMAIN_ROBOSW,
+ }, {
+ .name = "pcm",
+ .bit = BCM63268_POWER_DOMAIN_PCM,
+ }, {
+ .name = "periph",
+ .bit = BCM63268_POWER_DOMAIN_PERIPH,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "vdsl-phy",
+ .bit = BCM63268_POWER_DOMAIN_VDSL_PHY,
+ }, {
+ .name = "vdsl-mips",
+ .bit = BCM63268_POWER_DOMAIN_VDSL_MIPS,
+ }, {
+ .name = "fap",
+ .bit = BCM63268_POWER_DOMAIN_FAP,
+ }, {
+ .name = "pcie",
+ .bit = BCM63268_POWER_DOMAIN_PCIE,
+ }, {
+ .name = "wlan-pads",
+ .bit = BCM63268_POWER_DOMAIN_WLAN_PADS,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct of_device_id bcm63xx_power_of_match[] = {
+ {
+ .compatible = "brcm,bcm6318-power-controller",
+ .data = &bcm6318_power_domains,
+ }, {
+ .compatible = "brcm,bcm6328-power-controller",
+ .data = &bcm6328_power_domains,
+ }, {
+ .compatible = "brcm,bcm6362-power-controller",
+ .data = &bcm6362_power_domains,
+ }, {
+ .compatible = "brcm,bcm63268-power-controller",
+ .data = &bcm63268_power_domains,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver bcm63xx_power_driver = {
+ .driver = {
+ .name = "bcm63xx-power-controller",
+ .of_match_table = bcm63xx_power_of_match,
+ },
+ .probe = bcm63xx_power_probe,
+};
+builtin_platform_driver(bcm63xx_power_driver);
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 61731e01f94b..7f8dc302ae6e 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -13,6 +13,22 @@
#include <linux/syscore_ops.h>
#include <linux/soc/brcmstb/brcmstb.h>
+#define RACENPREF_MASK 0x3
+#define RACPREFINST_SHIFT 0
+#define RACENINST_SHIFT 2
+#define RACPREFDATA_SHIFT 4
+#define RACENDATA_SHIFT 6
+#define RAC_CPU_SHIFT 8
+#define RACCFG_MASK 0xff
+#define DPREF_LINE_2_SHIFT 24
+#define DPREF_LINE_2_MASK 0xff
+
+/* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
+#define RAC_DATA_INST_EN_MASK (1 << RACPREFINST_SHIFT | \
+ RACENPREF_MASK << RACENINST_SHIFT | \
+ 1 << RACPREFDATA_SHIFT | \
+ RACENPREF_MASK << RACENDATA_SHIFT)
+
#define CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK 0x70000000
#define CPU_CREDIT_REG_MCPx_READ_CRED_MASK 0xf
#define CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK 0xf
@@ -31,11 +47,21 @@ static void __iomem *cpubiuctrl_base;
static bool mcp_wr_pairing_en;
static const int *cpubiuctrl_regs;
+enum cpubiuctrl_regs {
+ CPU_CREDIT_REG = 0,
+ CPU_MCP_FLOW_REG,
+ CPU_WRITEBACK_CTRL_REG,
+ RAC_CONFIG0_REG,
+ RAC_CONFIG1_REG,
+ NUM_CPU_BIUCTRL_REGS,
+};
+
static inline u32 cbc_readl(int reg)
{
int offset = cpubiuctrl_regs[reg];
- if (offset == -1)
+ if (offset == -1 ||
+ (IS_ENABLED(CONFIG_CACHE_B15_RAC) && reg >= RAC_CONFIG0_REG))
return (u32)-1;
return readl_relaxed(cpubiuctrl_base + offset);
@@ -45,22 +71,19 @@ static inline void cbc_writel(u32 val, int reg)
{
int offset = cpubiuctrl_regs[reg];
- if (offset == -1)
+ if (offset == -1 ||
+ (IS_ENABLED(CONFIG_CACHE_B15_RAC) && reg >= RAC_CONFIG0_REG))
return;
writel(val, cpubiuctrl_base + offset);
}
-enum cpubiuctrl_regs {
- CPU_CREDIT_REG = 0,
- CPU_MCP_FLOW_REG,
- CPU_WRITEBACK_CTRL_REG
-};
-
static const int b15_cpubiuctrl_regs[] = {
[CPU_CREDIT_REG] = 0x184,
[CPU_MCP_FLOW_REG] = -1,
[CPU_WRITEBACK_CTRL_REG] = -1,
+ [RAC_CONFIG0_REG] = -1,
+ [RAC_CONFIG1_REG] = -1,
};
/* Odd cases, e.g: 7260A0 */
@@ -68,22 +91,26 @@ static const int b53_cpubiuctrl_no_wb_regs[] = {
[CPU_CREDIT_REG] = 0x0b0,
[CPU_MCP_FLOW_REG] = 0x0b4,
[CPU_WRITEBACK_CTRL_REG] = -1,
+ [RAC_CONFIG0_REG] = 0x78,
+ [RAC_CONFIG1_REG] = 0x7c,
};
static const int b53_cpubiuctrl_regs[] = {
[CPU_CREDIT_REG] = 0x0b0,
[CPU_MCP_FLOW_REG] = 0x0b4,
[CPU_WRITEBACK_CTRL_REG] = 0x22c,
+ [RAC_CONFIG0_REG] = 0x78,
+ [RAC_CONFIG1_REG] = 0x7c,
};
static const int a72_cpubiuctrl_regs[] = {
[CPU_CREDIT_REG] = 0x18,
[CPU_MCP_FLOW_REG] = 0x1c,
[CPU_WRITEBACK_CTRL_REG] = 0x20,
+ [RAC_CONFIG0_REG] = 0x08,
+ [RAC_CONFIG1_REG] = 0x0c,
};
-#define NUM_CPU_BIUCTRL_REGS 3
-
static int __init mcp_write_pairing_set(void)
{
u32 creds = 0;
@@ -110,6 +137,8 @@ static int __init mcp_write_pairing_set(void)
static const u32 a72_b53_mach_compat[] = {
0x7211,
0x7216,
+ 0x72164,
+ 0x72165,
0x7255,
0x7260,
0x7268,
@@ -117,6 +146,61 @@ static const u32 a72_b53_mach_compat[] = {
0x7278,
};
+/* The read-ahead cache present in the Brahma-B53 CPU is a special piece of
+ * hardware after the integrated L2 cache of the B53 CPU complex whose purpose
+ * is to prefetch instruction and/or data with a line size of either 64 bytes
+ * or 256 bytes. The rationale is that the data-bus of the CPU interface is
+ * optimized for 256-byte transactions, and enabling the read-ahead cache
+ * provides a significant performance boost (typically twice the performance
+ * for a memcpy benchmark application).
+ *
+ * The read-ahead cache is transparent for Virtual Address cache maintenance
+ * operations: IC IVAU, DC IVAC, DC CVAC, DC CVAU and DC CIVAC. So no special
+ * handling is needed for the DMA API above and beyond what is included in the
+ * arm64 implementation.
+ *
+ * In addition, since the Point of Unification is typically between L1 and L2
+ * for the Brahma-B53 processor no special read-ahead cache handling is needed
+ * for the IC IALLU and IC IALLUIS cache maintenance operations.
+ *
+ * However, it is not possible to specify the cache level (L3) for the cache
+ * maintenance instructions operating by set/way to operate on the read-ahead
+ * cache. The read-ahead cache will maintain coherency when inner cache lines
+ * are cleaned by set/way, but if it is necessary to invalidate inner cache
+ * lines by set/way to maintain coherency with system masters operating on
+ * shared memory that does not have hardware support for coherency, then it
+ * will also be necessary to explicitly invalidate the read-ahead cache.
+ */
+static void __init a72_b53_rac_enable_all(struct device_node *np)
+{
+ unsigned int cpu;
+ u32 enable = 0, pref_dist, shift;
+
+ if (IS_ENABLED(CONFIG_CACHE_B15_RAC))
+ return;
+
+ if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
+ return;
+
+ pref_dist = cbc_readl(RAC_CONFIG1_REG);
+ for_each_possible_cpu(cpu) {
+ shift = cpu * RAC_CPU_SHIFT + RACPREFDATA_SHIFT;
+ enable |= RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT);
+ if (cpubiuctrl_regs == a72_cpubiuctrl_regs) {
+ enable &= ~(RACENPREF_MASK << shift);
+ enable |= 3 << shift;
+ pref_dist |= 1 << (cpu + DPREF_LINE_2_SHIFT);
+ }
+ }
+
+ cbc_writel(enable, RAC_CONFIG0_REG);
+ cbc_writel(pref_dist, RAC_CONFIG1_REG);
+
+ pr_info("%pOF: Broadcom %s read-ahead cache\n",
+ np, cpubiuctrl_regs == a72_cpubiuctrl_regs ?
+ "Cortex-A72" : "Brahma-B53");
+}
+
static void __init mcp_a72_b53_set(void)
{
unsigned int i;
@@ -262,6 +346,7 @@ static int __init brcmstb_biuctrl_init(void)
return ret;
}
+ a72_b53_rac_enable_all(np);
mcp_a72_b53_set();
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 0ab85bfb116f..659b4a570d5b 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -647,7 +647,6 @@ int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
const uint32_t *cl = (uint32_t *)d;
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
int i, num_enqueued = 0;
- uint64_t addr_cena;
spin_lock(&s->access_spinlock);
half_mask = (s->eqcr.pi_ci_mask>>1);
@@ -701,7 +700,6 @@ int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
/* Flush all the cacheline without load/store in between */
eqcr_pi = s->eqcr.pi;
- addr_cena = (size_t)s->addr_cena;
for (i = 0; i < num_enqueued; i++)
eqcr_pi++;
s->eqcr.pi = eqcr_pi & full_mask;
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index f4fb527d8301..c5dd026fe889 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -660,7 +660,7 @@ int bm_shutdown_pool(u32 bpid)
}
done:
put_affine_portal();
- return 0;
+ return err;
}
struct gen_pool *bm_bpalloc;
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
index 2895d062cf51..7066b2f1467c 100644
--- a/drivers/soc/fsl/qbman/qman_test_api.c
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -86,7 +86,7 @@ static void fd_inc(struct qm_fd *fd)
len--;
qm_fd_set_param(fd, fmt, off, len);
- fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
+ be32_add_cpu(&fd->cmd, 1);
}
/* The only part of the 'fd' we can't memcmp() is the ppid */
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index cac0fb7693a0..21dbcd787cd5 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -523,7 +523,7 @@ int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
qe_mux_reg = &qe_immr->qmx;
- if (tdm_num > 7 || tdm_num < 0)
+ if (tdm_num > 7)
return -EINVAL;
/* The communications direction must be RX or TX */
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 6cf8a7a412bd..db7e7fc321b1 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -487,22 +487,17 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
domain->regulator = devm_regulator_get_optional(domain->dev, "power");
if (IS_ERR(domain->regulator)) {
- if (PTR_ERR(domain->regulator) != -ENODEV) {
- if (PTR_ERR(domain->regulator) != -EPROBE_DEFER)
- dev_err(domain->dev, "Failed to get domain's regulator\n");
- return PTR_ERR(domain->regulator);
- }
+ if (PTR_ERR(domain->regulator) != -ENODEV)
+ return dev_err_probe(domain->dev, PTR_ERR(domain->regulator),
+ "Failed to get domain's regulator\n");
} else if (domain->voltage) {
regulator_set_voltage(domain->regulator,
domain->voltage, domain->voltage);
}
ret = imx_pgc_get_clocks(domain);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(domain->dev, "Failed to get domain's clocks\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(domain->dev, ret, "Failed to get domain's clocks\n");
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index dc644cfb6419..505651b0d715 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -13,11 +13,16 @@
#define CMDQ_POLL_ENABLE_MASK BIT(0)
#define CMDQ_EOC_IRQ_EN BIT(0)
#define CMDQ_REG_TYPE 1
+#define CMDQ_JUMP_RELATIVE 1
struct cmdq_instruction {
union {
u32 value;
u32 mask;
+ struct {
+ u16 arg_c;
+ u16 src_reg;
+ };
};
union {
u16 offset;
@@ -223,15 +228,104 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
}
EXPORT_SYMBOL(cmdq_pkt_write_mask);
-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
+int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
+ u16 reg_idx)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_READ_S;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.sop = high_addr_reg_idx;
+ inst.reg_dst = reg_idx;
+ inst.src_reg = addr_low;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_read_s);
+
+int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_WRITE_S;
+ inst.src_t = CMDQ_REG_TYPE;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.src_reg = src_reg_idx;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s);
+
+int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx, u32 mask)
+{
+ struct cmdq_instruction inst = {};
+ int err;
+
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ inst.mask = 0;
+ inst.op = CMDQ_CODE_WRITE_S_MASK;
+ inst.src_t = CMDQ_REG_TYPE;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.src_reg = src_reg_idx;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
+
+int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_WRITE_S;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.value = value;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s_value);
+
+int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value, u32 mask)
+{
+ struct cmdq_instruction inst = {};
+ int err;
+
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ inst.op = CMDQ_CODE_WRITE_S_MASK;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.value = value;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
+
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
{
struct cmdq_instruction inst = { {0} };
+ u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
if (event >= CMDQ_MAX_EVENT)
return -EINVAL;
inst.op = CMDQ_CODE_WFE;
- inst.value = CMDQ_WFE_OPTION;
+ inst.value = CMDQ_WFE_OPTION | clear_option;
inst.event = event;
return cmdq_pkt_append_command(pkt, inst);
@@ -315,6 +409,18 @@ int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
}
EXPORT_SYMBOL(cmdq_pkt_assign);
+int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_JUMP;
+ inst.offset = CMDQ_JUMP_RELATIVE;
+ inst.value = addr >>
+ cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_jump);
+
int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
{
struct cmdq_instruction inst = { {0} };
@@ -329,7 +435,8 @@ int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
/* JUMP to end */
inst.op = CMDQ_CODE_JUMP;
- inst.value = CMDQ_JUMP_PASS;
+ inst.value = CMDQ_JUMP_PASS >>
+ cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
err = cmdq_pkt_append_command(pkt, inst);
return err;
diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c
index 341c7ac250e3..4a123796aad3 100644
--- a/drivers/soc/mediatek/mtk-infracfg.c
+++ b/drivers/soc/mediatek/mtk-infracfg.c
@@ -19,7 +19,7 @@
/**
* mtk_infracfg_set_bus_protection - enable bus protection
- * @regmap: The infracfg regmap
+ * @infracfg: The infracfg regmap
* @mask: The mask containing the protection bits to be enabled.
* @reg_update: The boolean flag determines to set the protection bits
* by regmap_update_bits with enable register(PROTECTEN) or
@@ -50,7 +50,7 @@ int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask,
/**
* mtk_infracfg_clear_bus_protection - disable bus protection
- * @regmap: The infracfg regmap
+ * @infracfg: The infracfg regmap
* @mask: The mask containing the protection bits to be disabled.
* @reg_update: The boolean flag determines to clear the protection bits
* by regmap_update_bits with enable register(PROTECTEN) or
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 3dc3e3d61ea3..6a3b69b43ad5 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -26,6 +26,22 @@ config QCOM_COMMAND_DB
resource on a RPM-hardened platform must use this database to get
SoC specific identifier and information for the shared resources.
+config QCOM_CPR
+ tristate "QCOM Core Power Reduction (CPR) support"
+ depends on ARCH_QCOM && HAS_IOMEM
+ select PM_OPP
+ select REGMAP
+ help
+ Say Y here to enable support for the CPR hardware found on Qualcomm
+ SoCs like QCS404.
+
+ This driver populates CPU OPPs tables and makes adjustments to the
+ tables based on feedback from the CPR hardware. If you want to do
+ CPUfrequency scaling say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-cpr
+
config QCOM_GENI_SE
tristate "QCOM GENI Serial Engine Driver"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 93392d9dc7f7..ad675a6593d0 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -3,6 +3,7 @@ CFLAGS_rpmh-rsc.o := -I$(src)
obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o
obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
+obj-$(CONFIG_QCOM_CPR) += cpr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 1f35b097c635..7abfc8c4fdc7 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -328,7 +328,7 @@ static int of_apr_add_pd_lookups(struct device *dev)
pds = pdr_add_lookup(apr->pdr, service_name, service_path);
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
- dev_err(dev, "pdr add lookup failed: %d\n", ret);
+ dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
return PTR_ERR(pds);
}
}
diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/soc/qcom/cpr.c
index bd7c3e48b386..b24cc77d1889 100644
--- a/drivers/power/avs/qcom-cpr.c
+++ b/drivers/soc/qcom/cpr.c
@@ -665,8 +665,6 @@ static int cpr_enable(struct cpr_drv *drv)
static int cpr_disable(struct cpr_drv *drv)
{
- int ret;
-
mutex_lock(&drv->lock);
if (cpr_is_allowed(drv)) {
@@ -676,11 +674,7 @@ static int cpr_disable(struct cpr_drv *drv)
mutex_unlock(&drv->lock);
- ret = regulator_disable(drv->vdd_apc);
- if (ret)
- return ret;
-
- return 0;
+ return regulator_disable(drv->vdd_apc);
}
static int cpr_config(struct cpr_drv *drv)
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 429b5a60a1ba..70fbe70c6213 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -387,7 +387,6 @@ static int qcom_llcc_remove(struct platform_device *pdev)
static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
const char *name)
{
- struct resource *res;
void __iomem *base;
struct regmap_config llcc_regmap_config = {
.reg_bits = 32,
@@ -396,11 +395,7 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
.fast_io = true,
};
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
- if (!res)
- return ERR_PTR(-ENODEV);
-
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource_byname(pdev, name);
if (IS_ERR(base))
return ERR_CAST(base);
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
index 15b5002e4127..ab9ae8cdfa54 100644
--- a/drivers/soc/qcom/pdr_internal.h
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -185,7 +185,7 @@ struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
.data_type = QMI_STRUCT,
.elem_len = SERVREG_DOMAIN_LIST_LENGTH,
.elem_size = sizeof(struct servreg_location_entry),
- .array_type = NO_ARRAY,
+ .array_type = VAR_LEN_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct servreg_get_domain_list_resp,
domain_list),
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
index ef60e790a750..344ba687c13b 100644
--- a/drivers/soc/qcom/rpmh-internal.h
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -8,6 +8,7 @@
#define __RPM_INTERNAL_H__
#include <linux/bitmap.h>
+#include <linux/wait.h>
#include <soc/qcom/tcs.h>
#define TCS_TYPE_NR 4
@@ -106,6 +107,8 @@ struct rpmh_ctrlr {
* @lock: Synchronize state of the controller. If RPMH's cache
* lock will also be held, the order is: drv->lock then
* cache_lock.
+ * @tcs_wait: Wait queue used to wait for @tcs_in_use to free up a
+ * slot
* @client: Handle to the DRV's client.
*/
struct rsc_drv {
@@ -118,6 +121,7 @@ struct rsc_drv {
struct tcs_group tcs[TCS_TYPE_NR];
DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
spinlock_t lock;
+ wait_queue_head_t tcs_wait;
struct rpmh_ctrlr client;
};
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index ae6675782581..a297911afe57 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/wait.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/tcs.h>
@@ -453,6 +454,7 @@ skip:
if (!drv->tcs[ACTIVE_TCS].num_tcs)
enable_tcs_irq(drv, i, false);
spin_unlock(&drv->lock);
+ wake_up(&drv->tcs_wait);
if (req)
rpmh_tx_done(req, err);
}
@@ -571,73 +573,34 @@ static int find_free_tcs(struct tcs_group *tcs)
}
/**
- * tcs_write() - Store messages into a TCS right now, or return -EBUSY.
+ * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
* @drv: The controller.
+ * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
* @msg: The data to be sent.
*
- * Grabs a TCS for ACTIVE_ONLY transfers and writes the messages to it.
+ * Claims a tcs in the given tcs_group while making sure that no existing cmd
+ * is in flight that would conflict with the one in @msg.
*
- * If there are no free TCSes for ACTIVE_ONLY transfers or if a command for
- * the same address is already transferring returns -EBUSY which means the
- * client should retry shortly.
+ * Context: Must be called with the drv->lock held since that protects
+ * tcs_in_use.
*
- * Return: 0 on success, -EBUSY if client should retry, or an error.
- * Client should have interrupts enabled for a bit before retrying.
+ * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
+ * or the tcs_group is full.
*/
-static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
+static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
+ const struct tcs_request *msg)
{
- struct tcs_group *tcs;
- int tcs_id;
- unsigned long flags;
int ret;
- tcs = get_tcs_for_msg(drv, msg);
- if (IS_ERR(tcs))
- return PTR_ERR(tcs);
-
- spin_lock_irqsave(&drv->lock, flags);
/*
* The h/w does not like if we send a request to the same address,
* when one is already in-flight or being processed.
*/
ret = check_for_req_inflight(drv, tcs, msg);
if (ret)
- goto unlock;
-
- ret = find_free_tcs(tcs);
- if (ret < 0)
- goto unlock;
- tcs_id = ret;
-
- tcs->req[tcs_id - tcs->offset] = msg;
- set_bit(tcs_id, drv->tcs_in_use);
- if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
- /*
- * Clear previously programmed WAKE commands in selected
- * repurposed TCS to avoid triggering them. tcs->slots will be
- * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
- */
- write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
- write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
- enable_tcs_irq(drv, tcs_id, true);
- }
- spin_unlock_irqrestore(&drv->lock, flags);
-
- /*
- * These two can be done after the lock is released because:
- * - We marked "tcs_in_use" under lock.
- * - Once "tcs_in_use" has been marked nobody else could be writing
- * to these registers until the interrupt goes off.
- * - The interrupt can't go off until we trigger w/ the last line
- * of __tcs_set_trigger() below.
- */
- __tcs_buffer_write(drv, tcs_id, 0, msg);
- __tcs_set_trigger(drv, tcs_id, true);
+ return ret;
- return 0;
-unlock:
- spin_unlock_irqrestore(&drv->lock, flags);
- return ret;
+ return find_free_tcs(tcs);
}
/**
@@ -664,18 +627,47 @@ unlock:
*/
int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
{
- int ret;
+ struct tcs_group *tcs;
+ int tcs_id;
+ unsigned long flags;
- do {
- ret = tcs_write(drv, msg);
- if (ret == -EBUSY) {
- pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
- msg->cmds[0].addr);
- udelay(10);
- }
- } while (ret == -EBUSY);
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
- return ret;
+ spin_lock_irqsave(&drv->lock, flags);
+
+ /* Wait forever for a free tcs. It better be there eventually! */
+ wait_event_lock_irq(drv->tcs_wait,
+ (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
+ drv->lock);
+
+ tcs->req[tcs_id - tcs->offset] = msg;
+ set_bit(tcs_id, drv->tcs_in_use);
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
+ /*
+ * Clear previously programmed WAKE commands in selected
+ * repurposed TCS to avoid triggering them. tcs->slots will be
+ * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
+ */
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+ enable_tcs_irq(drv, tcs_id, true);
+ }
+ spin_unlock_irqrestore(&drv->lock, flags);
+
+ /*
+ * These two can be done after the lock is released because:
+ * - We marked "tcs_in_use" under lock.
+ * - Once "tcs_in_use" has been marked nobody else could be writing
+ * to these registers until the interrupt goes off.
+ * - The interrupt can't go off until we trigger w/ the last line
+ * of __tcs_set_trigger() below.
+ */
+ __tcs_buffer_write(drv, tcs_id, 0, msg);
+ __tcs_set_trigger(drv, tcs_id, true);
+
+ return 0;
}
/**
@@ -983,6 +975,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
return ret;
spin_lock_init(&drv->lock);
+ init_waitqueue_head(&drv->tcs_wait);
bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
irq = platform_get_irq(pdev, drv->id);
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index b25d0f7dac9e..b44ede48decc 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -194,6 +194,7 @@ static const struct soc_id soc_id[] = {
{ 186, "MSM8674" },
{ 194, "MSM8974PRO" },
{ 206, "MSM8916" },
+ { 207, "MSM8994" },
{ 208, "APQ8074-AA" },
{ 209, "APQ8074-AB" },
{ 210, "APQ8074PRO" },
@@ -214,6 +215,8 @@ static const struct soc_id soc_id[] = {
{ 248, "MSM8216" },
{ 249, "MSM8116" },
{ 250, "MSM8616" },
+ { 251, "MSM8992" },
+ { 253, "APQ8094" },
{ 291, "APQ8096" },
{ 305, "MSM8996SG" },
{ 310, "MSM8996AU" },
@@ -223,6 +226,8 @@ static const struct soc_id soc_id[] = {
{ 321, "SDM845" },
{ 341, "SDA845" },
{ 356, "SM8250" },
+ { 402, "IPQ6018" },
+ { 425, "SC7180" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 30984659df90..b70bbc38efc6 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-config SOC_RENESAS
+menuconfig SOC_RENESAS
bool "Renesas SoC driver support" if COMPILE_TEST && !ARCH_RENESAS
default y if ARCH_RENESAS
select SOC_BUS
@@ -49,126 +49,126 @@ if ARM && ARCH_RENESAS
#comment "Renesas ARM SoCs System Type"
config ARCH_EMEV2
- bool "Emma Mobile EV2"
+ bool "ARM32 Platform support for Emma Mobile EV2"
select HAVE_ARM_SCU if SMP
select SYS_SUPPORTS_EM_STI
-config ARCH_R7S72100
- bool "RZ/A1H (R7S72100)"
- select ARM_ERRATA_754322
- select PM
- select PM_GENERIC_DOMAINS
- select RENESAS_OSTM
- select RENESAS_RZA1_IRQC
- select SYS_SUPPORTS_SH_MTU2
+config ARCH_R8A7794
+ bool "ARM32 Platform support for R-Car E2"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_814220
+ select SYSC_R8A7794
-config ARCH_R7S9210
- bool "RZ/A2 (R7S9210)"
- select PM
- select PM_GENERIC_DOMAINS
- select RENESAS_OSTM
- select RENESAS_RZA1_IRQC
+config ARCH_R8A7779
+ bool "ARM32 Platform support for R-Car H1"
+ select ARCH_RCAR_GEN1
+ select ARM_ERRATA_754322
+ select ARM_GLOBAL_TIMER
+ select HAVE_ARM_SCU if SMP
+ select HAVE_ARM_TWD if SMP
+ select SYSC_R8A7779
-config ARCH_R8A73A4
- bool "R-Mobile APE6 (R8A73A40)"
- select ARCH_RMOBILE
+config ARCH_R8A7790
+ bool "ARM32 Platform support for R-Car H2"
+ select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
select ARM_ERRATA_814220
- select HAVE_ARM_ARCH_TIMER
- select RENESAS_IRQC
+ select I2C
+ select SYSC_R8A7790
-config ARCH_R8A7740
- bool "R-Mobile A1 (R8A77400)"
- select ARCH_RMOBILE
+config ARCH_R8A7778
+ bool "ARM32 Platform support for R-Car M1A"
+ select ARCH_RCAR_GEN1
select ARM_ERRATA_754322
- select RENESAS_INTC_IRQPIN
-config ARCH_R8A7742
- bool "RZ/G1H (R8A77420)"
+config ARCH_R8A7793
+ bool "ARM32 Platform support for R-Car M2-N"
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
- select ARM_ERRATA_814220
- select SYSC_R8A7742
+ select I2C
+ select SYSC_R8A7791
-config ARCH_R8A7743
- bool "RZ/G1M (R8A77430)"
+config ARCH_R8A7791
+ bool "ARM32 Platform support for R-Car M2-W"
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
- select SYSC_R8A7743
+ select I2C
+ select SYSC_R8A7791
-config ARCH_R8A7744
- bool "RZ/G1N (R8A77440)"
+config ARCH_R8A7792
+ bool "ARM32 Platform support for R-Car V2H"
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
- select SYSC_R8A7743
+ select SYSC_R8A7792
-config ARCH_R8A7745
- bool "RZ/G1E (R8A77450)"
- select ARCH_RCAR_GEN2
- select ARM_ERRATA_814220
- select SYSC_R8A7745
+config ARCH_R8A7740
+ bool "ARM32 Platform support for R-Mobile A1"
+ select ARCH_RMOBILE
+ select ARM_ERRATA_754322
+ select RENESAS_INTC_IRQPIN
-config ARCH_R8A77470
- bool "RZ/G1C (R8A77470)"
- select ARCH_RCAR_GEN2
+config ARCH_R8A73A4
+ bool "ARM32 Platform support for R-Mobile APE6"
+ select ARCH_RMOBILE
+ select ARM_ERRATA_798181 if SMP
select ARM_ERRATA_814220
- select SYSC_R8A77470
+ select HAVE_ARM_ARCH_TIMER
+ select RENESAS_IRQC
-config ARCH_R8A7778
- bool "R-Car M1A (R8A77781)"
- select ARCH_RCAR_GEN1
+config ARCH_R7S72100
+ bool "ARM32 Platform support for RZ/A1H"
select ARM_ERRATA_754322
+ select PM
+ select PM_GENERIC_DOMAINS
+ select RENESAS_OSTM
+ select RENESAS_RZA1_IRQC
+ select SYS_SUPPORTS_SH_MTU2
-config ARCH_R8A7779
- bool "R-Car H1 (R8A77790)"
- select ARCH_RCAR_GEN1
- select ARM_ERRATA_754322
- select ARM_GLOBAL_TIMER
- select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if SMP
- select SYSC_R8A7779
+config ARCH_R7S9210
+ bool "ARM32 Platform support for RZ/A2"
+ select PM
+ select PM_GENERIC_DOMAINS
+ select RENESAS_OSTM
+ select RENESAS_RZA1_IRQC
-config ARCH_R8A7790
- bool "R-Car H2 (R8A77900)"
+config ARCH_R8A77470
+ bool "ARM32 Platform support for RZ/G1C"
select ARCH_RCAR_GEN2
- select ARM_ERRATA_798181 if SMP
select ARM_ERRATA_814220
- select I2C
- select SYSC_R8A7790
+ select SYSC_R8A77470
-config ARCH_R8A7791
- bool "R-Car M2-W (R8A77910)"
+config ARCH_R8A7745
+ bool "ARM32 Platform support for RZ/G1E"
select ARCH_RCAR_GEN2
- select ARM_ERRATA_798181 if SMP
- select I2C
- select SYSC_R8A7791
+ select ARM_ERRATA_814220
+ select SYSC_R8A7745
-config ARCH_R8A7792
- bool "R-Car V2H (R8A77920)"
+config ARCH_R8A7742
+ bool "ARM32 Platform support for RZ/G1H"
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
- select SYSC_R8A7792
+ select ARM_ERRATA_814220
+ select SYSC_R8A7742
-config ARCH_R8A7793
- bool "R-Car M2-N (R8A7793)"
+config ARCH_R8A7743
+ bool "ARM32 Platform support for RZ/G1M"
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
- select I2C
- select SYSC_R8A7791
+ select SYSC_R8A7743
-config ARCH_R8A7794
- bool "R-Car E2 (R8A77940)"
+config ARCH_R8A7744
+ bool "ARM32 Platform support for RZ/G1N"
select ARCH_RCAR_GEN2
- select ARM_ERRATA_814220
- select SYSC_R8A7794
+ select ARM_ERRATA_798181 if SMP
+ select SYSC_R8A7743
config ARCH_R9A06G032
- bool "RZ/N1D (R9A06G032)"
+ bool "ARM32 Platform support for RZ/N1D"
select ARCH_RZN1
select ARM_ERRATA_814220
config ARCH_SH73A0
- bool "SH-Mobile AG5 (R8A73A00)"
+ bool "ARM32 Platform support for SH-Mobile AG5"
select ARCH_RMOBILE
select ARM_ERRATA_754322
select ARM_GLOBAL_TIMER
@@ -180,193 +180,201 @@ endif # ARM
if ARM64
-config ARCH_R8A774A1
- bool "Renesas RZ/G2M SoC Platform"
- select ARCH_RCAR_GEN3
- select SYSC_R8A774A1
- help
- This enables support for the Renesas RZ/G2M SoC.
-
-config ARCH_R8A774B1
- bool "Renesas RZ/G2N SoC Platform"
- select ARCH_RCAR_GEN3
- select SYSC_R8A774B1
- help
- This enables support for the Renesas RZ/G2N SoC.
-
-config ARCH_R8A774C0
- bool "Renesas RZ/G2E SoC Platform"
+config ARCH_R8A77995
+ bool "ARM64 Platform support for R-Car D3"
select ARCH_RCAR_GEN3
- select SYSC_R8A774C0
+ select SYSC_R8A77995
help
- This enables support for the Renesas RZ/G2E SoC.
+ This enables support for the Renesas R-Car D3 SoC.
-config ARCH_R8A774E1
- bool "Renesas RZ/G2H SoC Platform"
+config ARCH_R8A77990
+ bool "ARM64 Platform support for R-Car E3"
select ARCH_RCAR_GEN3
- select SYSC_R8A774E1
+ select SYSC_R8A77990
help
- This enables support for the Renesas RZ/G2H SoC.
+ This enables support for the Renesas R-Car E3 SoC.
config ARCH_R8A77950
- bool "Renesas R-Car H3 ES1.x SoC Platform"
+ bool "ARM64 Platform support for R-Car H3 ES1.x"
select ARCH_RCAR_GEN3
select SYSC_R8A7795
help
This enables support for the Renesas R-Car H3 SoC (revision 1.x).
config ARCH_R8A77951
- bool "Renesas R-Car H3 ES2.0+ SoC Platform"
+ bool "ARM64 Platform support for R-Car H3 ES2.0+"
select ARCH_RCAR_GEN3
select SYSC_R8A7795
help
This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and
later).
+config ARCH_R8A77965
+ bool "ARM64 Platform support for R-Car M3-N"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77965
+ help
+ This enables support for the Renesas R-Car M3-N SoC.
+
config ARCH_R8A77960
- bool "Renesas R-Car M3-W SoC Platform"
+ bool "ARM64 Platform support for R-Car M3-W"
select ARCH_RCAR_GEN3
select SYSC_R8A77960
help
This enables support for the Renesas R-Car M3-W SoC.
config ARCH_R8A77961
- bool "Renesas R-Car M3-W+ SoC Platform"
+ bool "ARM64 Platform support for R-Car M3-W+"
select ARCH_RCAR_GEN3
select SYSC_R8A77961
help
This enables support for the Renesas R-Car M3-W+ SoC.
-config ARCH_R8A77965
- bool "Renesas R-Car M3-N SoC Platform"
+config ARCH_R8A77980
+ bool "ARM64 Platform support for R-Car V3H"
select ARCH_RCAR_GEN3
- select SYSC_R8A77965
+ select SYSC_R8A77980
help
- This enables support for the Renesas R-Car M3-N SoC.
+ This enables support for the Renesas R-Car V3H SoC.
config ARCH_R8A77970
- bool "Renesas R-Car V3M SoC Platform"
+ bool "ARM64 Platform support for R-Car V3M"
select ARCH_RCAR_GEN3
select SYSC_R8A77970
help
This enables support for the Renesas R-Car V3M SoC.
-config ARCH_R8A77980
- bool "Renesas R-Car V3H SoC Platform"
+config ARCH_R8A779A0
+ bool "ARM64 Platform support for R-Car V3U"
select ARCH_RCAR_GEN3
- select SYSC_R8A77980
+ select SYSC_R8A779A0
help
- This enables support for the Renesas R-Car V3H SoC.
+ This enables support for the Renesas R-Car V3U SoC.
-config ARCH_R8A77990
- bool "Renesas R-Car E3 SoC Platform"
+config ARCH_R8A774C0
+ bool "ARM64 Platform support for RZ/G2E"
select ARCH_RCAR_GEN3
- select SYSC_R8A77990
+ select SYSC_R8A774C0
help
- This enables support for the Renesas R-Car E3 SoC.
+ This enables support for the Renesas RZ/G2E SoC.
-config ARCH_R8A77995
- bool "Renesas R-Car D3 SoC Platform"
+config ARCH_R8A774E1
+ bool "ARM64 Platform support for RZ/G2H"
select ARCH_RCAR_GEN3
- select SYSC_R8A77995
+ select SYSC_R8A774E1
help
- This enables support for the Renesas R-Car D3 SoC.
+ This enables support for the Renesas RZ/G2H SoC.
+
+config ARCH_R8A774A1
+ bool "ARM64 Platform support for RZ/G2M"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774A1
+ help
+ This enables support for the Renesas RZ/G2M SoC.
+
+config ARCH_R8A774B1
+ bool "ARM64 Platform support for RZ/G2N"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774B1
+ help
+ This enables support for the Renesas RZ/G2N SoC.
endif # ARM64
-# SoC
-config SYSC_R8A7742
- bool "RZ/G1H System Controller support" if COMPILE_TEST
- select SYSC_RCAR
+config RST_RCAR
+ bool "Reset Controller support for R-Car" if COMPILE_TEST
-config SYSC_R8A7743
- bool "RZ/G1M System Controller support" if COMPILE_TEST
+config SYSC_RCAR
+ bool "System Controller support for R-Car" if COMPILE_TEST
+
+config SYSC_R8A77995
+ bool "System Controller support for R-Car D3" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A7745
- bool "RZ/G1E System Controller support" if COMPILE_TEST
+config SYSC_R8A7794
+ bool "System Controller support for R-Car E2" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A77470
- bool "RZ/G1C System Controller support" if COMPILE_TEST
+config SYSC_R8A77990
+ bool "System Controller support for R-Car E3" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A774A1
- bool "RZ/G2M System Controller support" if COMPILE_TEST
+config SYSC_R8A7779
+ bool "System Controller support for R-Car H1" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A774B1
- bool "RZ/G2N System Controller support" if COMPILE_TEST
+config SYSC_R8A7790
+ bool "System Controller support for R-Car H2" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A774C0
- bool "RZ/G2E System Controller support" if COMPILE_TEST
+config SYSC_R8A7795
+ bool "System Controller support for R-Car H3" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A774E1
- bool "RZ/G2H System Controller support" if COMPILE_TEST
+config SYSC_R8A7791
+ bool "System Controller support for R-Car M2-W/N" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A7779
- bool "R-Car H1 System Controller support" if COMPILE_TEST
+config SYSC_R8A77965
+ bool "System Controller support for R-Car M3-N" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A7790
- bool "R-Car H2 System Controller support" if COMPILE_TEST
+config SYSC_R8A77960
+ bool "System Controller support for R-Car M3-W" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A7791
- bool "R-Car M2-W/N System Controller support" if COMPILE_TEST
+config SYSC_R8A77961
+ bool "System Controller support for R-Car M3-W+" if COMPILE_TEST
select SYSC_RCAR
config SYSC_R8A7792
- bool "R-Car V2H System Controller support" if COMPILE_TEST
+ bool "System Controller support for R-Car V2H" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A7794
- bool "R-Car E2 System Controller support" if COMPILE_TEST
+config SYSC_R8A77980
+ bool "System Controller support for R-Car V3H" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A7795
- bool "R-Car H3 System Controller support" if COMPILE_TEST
+config SYSC_R8A77970
+ bool "System Controller support for R-Car V3M" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A77960
- bool "R-Car M3-W System Controller support" if COMPILE_TEST
- select SYSC_RCAR
+config SYSC_R8A779A0
+ bool "System Controller support for R-Car V3U" if COMPILE_TEST
-config SYSC_R8A77961
- bool "R-Car M3-W+ System Controller support" if COMPILE_TEST
- select SYSC_RCAR
+config SYSC_RMOBILE
+ bool "System Controller support for R-Mobile" if COMPILE_TEST
-config SYSC_R8A77965
- bool "R-Car M3-N System Controller support" if COMPILE_TEST
+config SYSC_R8A77470
+ bool "System Controller support for RZ/G1C" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A77970
- bool "R-Car V3M System Controller support" if COMPILE_TEST
+config SYSC_R8A7745
+ bool "System Controller support for RZ/G1E" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A77980
- bool "R-Car V3H System Controller support" if COMPILE_TEST
+config SYSC_R8A7742
+ bool "System Controller support for RZ/G1H" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A77990
- bool "R-Car E3 System Controller support" if COMPILE_TEST
+config SYSC_R8A7743
+ bool "System Controller support for RZ/G1M" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A77995
- bool "R-Car D3 System Controller support" if COMPILE_TEST
+config SYSC_R8A774C0
+ bool "System Controller support for RZ/G2E" if COMPILE_TEST
select SYSC_RCAR
-# Family
-config RST_RCAR
- bool "R-Car Reset Controller support" if COMPILE_TEST
+config SYSC_R8A774E1
+ bool "System Controller support for RZ/G2H" if COMPILE_TEST
+ select SYSC_RCAR
-config SYSC_RCAR
- bool "R-Car System Controller support" if COMPILE_TEST
+config SYSC_R8A774A1
+ bool "System Controller support for RZ/G2M" if COMPILE_TEST
+ select SYSC_RCAR
-config SYSC_RMOBILE
- bool "R-Mobile System Controller support" if COMPILE_TEST
+config SYSC_R8A774B1
+ bool "System Controller support for RZ/G2N" if COMPILE_TEST
+ select SYSC_RCAR
endif # SOC_RENESAS
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 10a399fc486a..9b29bed2a597 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_SYSC_R8A77970) += r8a77970-sysc.o
obj-$(CONFIG_SYSC_R8A77980) += r8a77980-sysc.o
obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o
obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
+obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o
ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c
new file mode 100644
index 000000000000..d464ffa1be33
--- /dev/null
+++ b/drivers/soc/renesas/r8a779a0-sysc.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car V3U System Controller
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk/renesas.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <dt-bindings/power/r8a779a0-sysc.h>
+
+/*
+ * Power Domain flags
+ */
+#define PD_CPU BIT(0) /* Area contains main CPU core */
+#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */
+#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */
+
+#define PD_CPU_NOCR PD_CPU | PD_NO_CR /* CPU area lacks CR */
+#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */
+
+/*
+ * Description of a Power Area
+ */
+struct r8a779a0_sysc_area {
+ const char *name;
+ u8 pdr; /* PDRn */
+ int parent; /* -1 if none */
+ unsigned int flags; /* See PD_* */
+};
+
+/*
+ * SoC-specific Power Area Description
+ */
+struct r8a779a0_sysc_info {
+ const struct r8a779a0_sysc_area *areas;
+ unsigned int num_areas;
+};
+
+static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = {
+ { "always-on", R8A779A0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "a3e0", R8A779A0_PD_A3E0, R8A779A0_PD_ALWAYS_ON, PD_SCU },
+ { "a3e1", R8A779A0_PD_A3E1, R8A779A0_PD_ALWAYS_ON, PD_SCU },
+ { "a2e0d0", R8A779A0_PD_A2E0D0, R8A779A0_PD_A3E0, PD_SCU },
+ { "a2e0d1", R8A779A0_PD_A2E0D1, R8A779A0_PD_A3E0, PD_SCU },
+ { "a2e1d0", R8A779A0_PD_A2E1D0, R8A779A0_PD_A3E1, PD_SCU },
+ { "a2e1d1", R8A779A0_PD_A2E1D1, R8A779A0_PD_A3E1, PD_SCU },
+ { "a1e0d0c0", R8A779A0_PD_A1E0D0C0, R8A779A0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d0c1", R8A779A0_PD_A1E0D0C1, R8A779A0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d1c0", R8A779A0_PD_A1E0D1C0, R8A779A0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a1e0d1c1", R8A779A0_PD_A1E0D1C1, R8A779A0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a1e1d0c0", R8A779A0_PD_A1E1D0C0, R8A779A0_PD_A2E1D0, PD_CPU_NOCR },
+ { "a1e1d0c1", R8A779A0_PD_A1E1D0C1, R8A779A0_PD_A2E1D0, PD_CPU_NOCR },
+ { "a1e1d1c0", R8A779A0_PD_A1E1D1C0, R8A779A0_PD_A2E1D1, PD_CPU_NOCR },
+ { "a1e1d1c1", R8A779A0_PD_A1E1D1C1, R8A779A0_PD_A2E1D1, PD_CPU_NOCR },
+ { "3dg-a", R8A779A0_PD_3DG_A, R8A779A0_PD_ALWAYS_ON },
+ { "3dg-b", R8A779A0_PD_3DG_B, R8A779A0_PD_3DG_A },
+ { "a3vip0", R8A779A0_PD_A3VIP0, R8A779A0_PD_ALWAYS_ON },
+ { "a3vip1", R8A779A0_PD_A3VIP1, R8A779A0_PD_ALWAYS_ON },
+ { "a3vip3", R8A779A0_PD_A3VIP3, R8A779A0_PD_ALWAYS_ON },
+ { "a3vip2", R8A779A0_PD_A3VIP2, R8A779A0_PD_ALWAYS_ON },
+ { "a3isp01", R8A779A0_PD_A3ISP01, R8A779A0_PD_ALWAYS_ON },
+ { "a3isp23", R8A779A0_PD_A3ISP23, R8A779A0_PD_ALWAYS_ON },
+ { "a3ir", R8A779A0_PD_A3IR, R8A779A0_PD_ALWAYS_ON },
+ { "a2cn0", R8A779A0_PD_A2CN0, R8A779A0_PD_A3IR },
+ { "a2imp01", R8A779A0_PD_A2IMP01, R8A779A0_PD_A3IR },
+ { "a2dp0", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR },
+ { "a2cv0", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR },
+ { "a2cv1", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR },
+ { "a2cv4", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR },
+ { "a2cv6", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR },
+ { "a2cn2", R8A779A0_PD_A2CN2, R8A779A0_PD_A3IR },
+ { "a2imp23", R8A779A0_PD_A2IMP23, R8A779A0_PD_A3IR },
+ { "a2dp1", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR },
+ { "a2cv2", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR },
+ { "a2cv3", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR },
+ { "a2cv5", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR },
+ { "a2cv7", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR },
+ { "a2cn1", R8A779A0_PD_A2CN1, R8A779A0_PD_A3IR },
+ { "a1cnn0", R8A779A0_PD_A1CNN0, R8A779A0_PD_A2CN0 },
+ { "a1cnn2", R8A779A0_PD_A1CNN2, R8A779A0_PD_A2CN2 },
+ { "a1dsp0", R8A779A0_PD_A1DSP0, R8A779A0_PD_A2CN2 },
+ { "a1cnn1", R8A779A0_PD_A1CNN1, R8A779A0_PD_A2CN1 },
+ { "a1dsp1", R8A779A0_PD_A1DSP1, R8A779A0_PD_A2CN1 },
+};
+
+static const struct r8a779a0_sysc_info r8a779a0_sysc_info __initconst = {
+ .areas = r8a779a0_areas,
+ .num_areas = ARRAY_SIZE(r8a779a0_areas),
+};
+
+/* SYSC Common */
+#define SYSCSR 0x000 /* SYSC Status Register */
+#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */
+#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */
+#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */
+#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */
+#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */
+
+/* Power Domain Registers */
+#define PDRSR(n) (0x1000 + ((n) * 0x40))
+#define PDRONCR(n) (0x1004 + ((n) * 0x40))
+#define PDROFFCR(n) (0x1008 + ((n) * 0x40))
+#define PDRESR(n) (0x100C + ((n) * 0x40))
+
+/* PWRON/PWROFF */
+#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */
+
+/* PDRESR */
+#define PDRESR_ERR BIT(0)
+
+/* PDRSR */
+#define PDRSR_OFF BIT(0) /* Power-OFF state */
+#define PDRSR_ON BIT(4) /* Power-ON state */
+#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */
+#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */
+
+#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
+
+#define SYSCSR_TIMEOUT 10000
+#define SYSCSR_DELAY_US 10
+
+#define PDRESR_RETRIES 1000
+#define PDRESR_DELAY_US 10
+
+#define SYSCISR_TIMEOUT 10000
+#define SYSCISR_DELAY_US 10
+
+#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
+
+static void __iomem *r8a779a0_sysc_base;
+static DEFINE_SPINLOCK(r8a779a0_sysc_lock); /* SMP CPUs + I/O devices */
+
+static int r8a779a0_sysc_pwr_on_off(u8 pdr, bool on)
+{
+ unsigned int reg_offs;
+ u32 val;
+ int ret;
+
+ if (on)
+ reg_offs = PDRONCR(pdr);
+ else
+ reg_offs = PDROFFCR(pdr);
+
+ /* Wait until SYSC is ready to accept a power request */
+ ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCSR, val,
+ (val & SYSCSR_BUSY) == SYSCSR_BUSY,
+ SYSCSR_DELAY_US, SYSCSR_TIMEOUT);
+ if (ret < 0)
+ return -EAGAIN;
+
+ /* Submit power shutoff or power resume request */
+ iowrite32(PWRON_PWROFF, r8a779a0_sysc_base + reg_offs);
+
+ return 0;
+}
+
+static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask)
+{
+ u32 val;
+ int ret;
+
+ iowrite32(isr_mask, r8a779a0_sysc_base + SYSCISCR(reg_idx));
+
+ ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx),
+ val, !(val & isr_mask),
+ SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
+ if (ret < 0) {
+ pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int r8a779a0_sysc_power(u8 pdr, bool on)
+{
+ unsigned int isr_mask;
+ unsigned int reg_idx, bit_idx;
+ unsigned int status;
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+ int k;
+
+ spin_lock_irqsave(&r8a779a0_sysc_lock, flags);
+
+ reg_idx = pdr / NUM_DOMAINS_EACH_REG;
+ bit_idx = pdr % NUM_DOMAINS_EACH_REG;
+
+ isr_mask = BIT(bit_idx);
+
+ /*
+ * The interrupt source needs to be enabled, but masked, to prevent the
+ * CPU from receiving it.
+ */
+ iowrite32(ioread32(r8a779a0_sysc_base + SYSCIER(reg_idx)) | isr_mask,
+ r8a779a0_sysc_base + SYSCIER(reg_idx));
+ iowrite32(ioread32(r8a779a0_sysc_base + SYSCIMR(reg_idx)) | isr_mask,
+ r8a779a0_sysc_base + SYSCIMR(reg_idx));
+
+ ret = clear_irq_flags(reg_idx, isr_mask);
+ if (ret)
+ goto out;
+
+ /* Submit power shutoff or resume request until it was accepted */
+ for (k = 0; k < PDRESR_RETRIES; k++) {
+ ret = r8a779a0_sysc_pwr_on_off(pdr, on);
+ if (ret)
+ goto out;
+
+ status = ioread32(r8a779a0_sysc_base + PDRESR(pdr));
+ if (!(status & PDRESR_ERR))
+ break;
+
+ udelay(PDRESR_DELAY_US);
+ }
+
+ if (k == PDRESR_RETRIES) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Wait until the power shutoff or resume request has completed * */
+ ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx),
+ val, (val & isr_mask),
+ SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
+ if (ret < 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Clear interrupt flags */
+ ret = clear_irq_flags(reg_idx, isr_mask);
+ if (ret)
+ goto out;
+
+ out:
+ spin_unlock_irqrestore(&r8a779a0_sysc_lock, flags);
+
+ pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
+ pdr, ioread32(r8a779a0_sysc_base + SYSCISCR(reg_idx)), ret);
+ return ret;
+}
+
+static bool r8a779a0_sysc_power_is_off(u8 pdr)
+{
+ unsigned int st;
+
+ st = ioread32(r8a779a0_sysc_base + PDRSR(pdr));
+
+ if (st & PDRSR_OFF)
+ return true;
+
+ return false;
+}
+
+struct r8a779a0_sysc_pd {
+ struct generic_pm_domain genpd;
+ u8 pdr;
+ unsigned int flags;
+ char name[];
+};
+
+static inline struct r8a779a0_sysc_pd *to_r8a779a0_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct r8a779a0_sysc_pd, genpd);
+}
+
+static int r8a779a0_sysc_pd_power_off(struct generic_pm_domain *genpd)
+{
+ struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+ return r8a779a0_sysc_power(pd->pdr, false);
+}
+
+static int r8a779a0_sysc_pd_power_on(struct generic_pm_domain *genpd)
+{
+ struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+ return r8a779a0_sysc_power(pd->pdr, true);
+}
+
+static int __init r8a779a0_sysc_pd_setup(struct r8a779a0_sysc_pd *pd)
+{
+ struct generic_pm_domain *genpd = &pd->genpd;
+ const char *name = pd->genpd.name;
+ int error;
+
+ if (pd->flags & PD_CPU) {
+ /*
+ * This domain contains a CPU core and therefore it should
+ * only be turned off if the CPU is not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "CPU");
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ } else if (pd->flags & PD_SCU) {
+ /*
+ * This domain contains an SCU and cache-controller, and
+ * therefore it should only be turned off if the CPU cores are
+ * not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "SCU");
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ } else if (pd->flags & PD_NO_CR) {
+ /*
+ * This domain cannot be turned off.
+ */
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ }
+
+ if (!(pd->flags & (PD_CPU | PD_SCU))) {
+ /* Enable Clock Domain for I/O devices */
+ genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->attach_dev = cpg_mssr_attach_dev;
+ genpd->detach_dev = cpg_mssr_detach_dev;
+ }
+
+ genpd->power_off = r8a779a0_sysc_pd_power_off;
+ genpd->power_on = r8a779a0_sysc_pd_power_on;
+
+ if (pd->flags & (PD_CPU | PD_NO_CR)) {
+ /* Skip CPUs (handled by SMP code) and areas without control */
+ pr_debug("%s: Not touching %s\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ if (!r8a779a0_sysc_power_is_off(pd->pdr)) {
+ pr_debug("%s: %s is already powered\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ r8a779a0_sysc_power(pd->pdr, true);
+
+finalize:
+ error = pm_genpd_init(genpd, &simple_qos_governor, false);
+ if (error)
+ pr_err("Failed to init PM domain %s: %d\n", name, error);
+
+ return error;
+}
+
+static const struct of_device_id r8a779a0_sysc_matches[] __initconst = {
+ { .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info },
+ { /* sentinel */ }
+};
+
+struct r8a779a0_pm_domains {
+ struct genpd_onecell_data onecell_data;
+ struct generic_pm_domain *domains[R8A779A0_PD_ALWAYS_ON + 1];
+};
+
+static struct genpd_onecell_data *r8a779a0_sysc_onecell_data;
+
+static int __init r8a779a0_sysc_pd_init(void)
+{
+ const struct r8a779a0_sysc_info *info;
+ const struct of_device_id *match;
+ struct r8a779a0_pm_domains *domains;
+ struct device_node *np;
+ void __iomem *base;
+ unsigned int i;
+ int error;
+
+ np = of_find_matching_node_and_match(NULL, r8a779a0_sysc_matches, &match);
+ if (!np)
+ return -ENODEV;
+
+ info = match->data;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("%pOF: Cannot map regs\n", np);
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ r8a779a0_sysc_base = base;
+
+ domains = kzalloc(sizeof(*domains), GFP_KERNEL);
+ if (!domains) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ domains->onecell_data.domains = domains->domains;
+ domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
+ r8a779a0_sysc_onecell_data = &domains->onecell_data;
+
+ for (i = 0; i < info->num_areas; i++) {
+ const struct r8a779a0_sysc_area *area = &info->areas[i];
+ struct r8a779a0_sysc_pd *pd;
+
+ if (!area->name) {
+ /* Skip NULLified area */
+ continue;
+ }
+
+ pd = kzalloc(sizeof(*pd) + strlen(area->name) + 1, GFP_KERNEL);
+ if (!pd) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ strcpy(pd->name, area->name);
+ pd->genpd.name = pd->name;
+ pd->pdr = area->pdr;
+ pd->flags = area->flags;
+
+ error = r8a779a0_sysc_pd_setup(pd);
+ if (error)
+ goto out_put;
+
+ domains->domains[area->pdr] = &pd->genpd;
+
+ if (area->parent < 0)
+ continue;
+
+ error = pm_genpd_add_subdomain(domains->domains[area->parent],
+ &pd->genpd);
+ if (error) {
+ pr_warn("Failed to add PM subdomain %s to parent %u\n",
+ area->name, area->parent);
+ goto out_put;
+ }
+ }
+
+ error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
+
+out_put:
+ of_node_put(np);
+ return error;
+}
+early_initcall(r8a779a0_sysc_pd_init);
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index a932015ce9c1..8a1e402ea799 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -37,6 +37,10 @@ static const struct rst_config rcar_rst_gen3 __initconst = {
.modemr = 0x60,
};
+static const struct rst_config rcar_rst_r8a779a0 __initconst = {
+ .modemr = 0x00, /* MODEMR0 and it has CPG related bits */
+};
+
static const struct of_device_id rcar_rst_matches[] __initconst = {
/* RZ/G1 is handled like R-Car Gen2 */
{ .compatible = "renesas,r8a7742-rst", .data = &rcar_rst_gen2 },
@@ -67,6 +71,8 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
{ .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77990-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 },
+ /* R-Car V3U */
+ { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_r8a779a0 },
{ /* sentinel */ }
};
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index f815a6a8b88b..0f8eff4a641a 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -200,6 +200,11 @@ static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = {
.id = 0x58,
};
+static const struct renesas_soc soc_rcar_v3u __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x59,
+};
+
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile,
.id = 0x37,
@@ -291,6 +296,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A77995
{ .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 },
#endif
+#ifdef CONFIG_ARCH_R8A779A0
+ { .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u },
+#endif
#ifdef CONFIG_ARCH_SH73A0
{ .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
#endif
diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
index b71b73bf5fc5..2c13bf4dd5db 100644
--- a/drivers/soc/rockchip/Kconfig
+++ b/drivers/soc/rockchip/Kconfig
@@ -14,6 +14,14 @@ config ROCKCHIP_GRF
In a lot of cases there also need to be default settings initialized
to make some of them conform to expectations of the kernel.
+config ROCKCHIP_IODOMAIN
+ tristate "Rockchip IO domain support"
+ depends on OF
+ help
+ Say y here to enable support io domains on Rockchip SoCs. It is
+ necessary for the io domain setting of the SoC to match the
+ voltage supplied by the regulators.
+
config ROCKCHIP_PM_DOMAINS
bool "Rockchip generic power domain"
depends on PM
diff --git a/drivers/soc/rockchip/Makefile b/drivers/soc/rockchip/Makefile
index afca0a4c4b72..875032f7344e 100644
--- a/drivers/soc/rockchip/Makefile
+++ b/drivers/soc/rockchip/Makefile
@@ -3,4 +3,5 @@
# Rockchip Soc drivers
#
obj-$(CONFIG_ROCKCHIP_GRF) += grf.o
+obj-$(CONFIG_ROCKCHIP_IODOMAIN) += io-domain.o
obj-$(CONFIG_ROCKCHIP_PM_DOMAINS) += pm_domains.o
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/soc/rockchip/io-domain.c
index eece97f97ef8..eece97f97ef8 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/soc/rockchip/io-domain.c
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 264185664594..fc7f48a92288 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -35,7 +35,54 @@ config EXYNOS_PMU_ARM_DRIVERS
config EXYNOS_PM_DOMAINS
bool "Exynos PM domains" if COMPILE_TEST
- depends on PM_GENERIC_DOMAINS || COMPILE_TEST
+ depends on (ARCH_EXYNOS && PM_GENERIC_DOMAINS) || COMPILE_TEST
+
+config SAMSUNG_PM_DEBUG
+ bool "Samsung PM Suspend debug"
+ depends on PM && DEBUG_KERNEL
+ depends on PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210
+ depends on DEBUG_S3C24XX_UART || DEBUG_S3C2410_UART
+ depends on DEBUG_LL && MMU
+ help
+ Say Y here if you want verbose debugging from the PM Suspend and
+ Resume code. See <file:Documentation/arm/samsung-s3c24xx/suspend.rst>
+ for more information.
+
+config S3C_PM_DEBUG_LED_SMDK
+ bool "SMDK LED suspend/resume debugging"
+ depends on PM && (MACH_SMDK6410)
+ help
+ Say Y here to enable the use of the SMDK LEDs on the baseboard
+ for debugging of the state of the suspend and resume process.
+
+ Note, this currently only works for S3C64XX based SMDK boards.
+
+config SAMSUNG_PM_CHECK
+ bool "S3C2410 PM Suspend Memory CRC"
+ depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210)
+ select CRC32
+ help
+ Enable the PM code's memory area checksum over sleep. This option
+ will generate CRCs of all blocks of memory, and store them before
+ going to sleep. The blocks are then checked on resume for any
+ errors.
+
+ Note, this can take several seconds depending on memory size
+ and CPU speed.
+
+ See <file:Documentation/arm/samsung-s3c24xx/suspend.rst>
+
+config SAMSUNG_PM_CHECK_CHUNKSIZE
+ int "S3C2410 PM Suspend CRC Chunksize (KiB)"
+ depends on PM && SAMSUNG_PM_CHECK
+ default 64
+ help
+ Set the chunksize in Kilobytes of the CRC for checking memory
+ corruption over suspend and resume. A smaller value will mean that
+ the CRC data block will take more memory, but will identify any
+ faults with better precision.
+
+ See <file:Documentation/arm/samsung-s3c24xx/suspend.rst>
config EXYNOS_REGULATOR_COUPLER
bool "Exynos SoC Regulator Coupler" if COMPILE_TEST
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index ecc3a32f6406..59e8e9453f27 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -10,3 +10,6 @@ obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
exynos5250-pmu.o exynos5420-pmu.o
obj-$(CONFIG_EXYNOS_PM_DOMAINS) += pm_domains.o
obj-$(CONFIG_EXYNOS_REGULATOR_COUPLER) += exynos-regulator-coupler.o
+
+obj-$(CONFIG_SAMSUNG_PM_CHECK) += s3c-pm-check.o
+obj-$(CONFIG_SAMSUNG_PM_DEBUG) += s3c-pm-debug.o
diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
index 30bb7b7cc769..8abf4dfaa5c5 100644
--- a/drivers/soc/samsung/exynos-asv.c
+++ b/drivers/soc/samsung/exynos-asv.c
@@ -93,7 +93,7 @@ static int exynos_asv_update_opps(struct exynos_asv *asv)
continue;
opp_table = dev_pm_opp_get_opp_table(cpu);
- if (IS_ERR_OR_NULL(opp_table))
+ if (IS_ERR(opp_table))
continue;
if (!last_opp_table || opp_table != last_opp_table) {
diff --git a/drivers/soc/samsung/s3c-pm-check.c b/drivers/soc/samsung/s3c-pm-check.c
new file mode 100644
index 000000000000..ff3e099fc208
--- /dev/null
+++ b/drivers/soc/samsung/s3c-pm-check.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// originally in linux/arch/arm/plat-s3c24xx/pm.c
+//
+// Copyright (c) 2004-2008 Simtec Electronics
+// http://armlinux.simtec.co.uk
+// Ben Dooks <ben@simtec.co.uk>
+//
+// S3C Power Mangament - suspend/resume memory corruption check.
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include <linux/soc/samsung/s3c-pm.h>
+
+#if CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE < 1
+#error CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE must be a positive non-zero value
+#endif
+
+/* suspend checking code...
+ *
+ * this next area does a set of crc checks over all the installed
+ * memory, so the system can verify if the resume was ok.
+ *
+ * CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE defines the block-size for the CRC,
+ * increasing it will mean that the area corrupted will be less easy to spot,
+ * and reducing the size will cause the CRC save area to grow
+*/
+
+#define CHECK_CHUNKSIZE (CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE * 1024)
+
+static u32 crc_size; /* size needed for the crc block */
+static u32 *crcs; /* allocated over suspend/resume */
+
+typedef u32 *(run_fn_t)(struct resource *ptr, u32 *arg);
+
+/* s3c_pm_run_res
+ *
+ * go through the given resource list, and look for system ram
+*/
+
+static void s3c_pm_run_res(struct resource *ptr, run_fn_t fn, u32 *arg)
+{
+ while (ptr != NULL) {
+ if (ptr->child != NULL)
+ s3c_pm_run_res(ptr->child, fn, arg);
+
+ if ((ptr->flags & IORESOURCE_SYSTEM_RAM)
+ == IORESOURCE_SYSTEM_RAM) {
+ S3C_PMDBG("Found system RAM at %08lx..%08lx\n",
+ (unsigned long)ptr->start,
+ (unsigned long)ptr->end);
+ arg = (fn)(ptr, arg);
+ }
+
+ ptr = ptr->sibling;
+ }
+}
+
+static void s3c_pm_run_sysram(run_fn_t fn, u32 *arg)
+{
+ s3c_pm_run_res(&iomem_resource, fn, arg);
+}
+
+static u32 *s3c_pm_countram(struct resource *res, u32 *val)
+{
+ u32 size = (u32)resource_size(res);
+
+ size += CHECK_CHUNKSIZE-1;
+ size /= CHECK_CHUNKSIZE;
+
+ S3C_PMDBG("Area %08lx..%08lx, %d blocks\n",
+ (unsigned long)res->start, (unsigned long)res->end, size);
+
+ *val += size * sizeof(u32);
+ return val;
+}
+
+/* s3c_pm_prepare_check
+ *
+ * prepare the necessary information for creating the CRCs. This
+ * must be done before the final save, as it will require memory
+ * allocating, and thus touching bits of the kernel we do not
+ * know about.
+*/
+
+void s3c_pm_check_prepare(void)
+{
+ crc_size = 0;
+
+ s3c_pm_run_sysram(s3c_pm_countram, &crc_size);
+
+ S3C_PMDBG("s3c_pm_prepare_check: %u checks needed\n", crc_size);
+
+ crcs = kmalloc(crc_size+4, GFP_KERNEL);
+ if (crcs == NULL)
+ printk(KERN_ERR "Cannot allocated CRC save area\n");
+}
+
+static u32 *s3c_pm_makecheck(struct resource *res, u32 *val)
+{
+ unsigned long addr, left;
+
+ for (addr = res->start; addr < res->end;
+ addr += CHECK_CHUNKSIZE) {
+ left = res->end - addr;
+
+ if (left > CHECK_CHUNKSIZE)
+ left = CHECK_CHUNKSIZE;
+
+ *val = crc32_le(~0, phys_to_virt(addr), left);
+ val++;
+ }
+
+ return val;
+}
+
+/* s3c_pm_check_store
+ *
+ * compute the CRC values for the memory blocks before the final
+ * sleep.
+*/
+
+void s3c_pm_check_store(void)
+{
+ if (crcs != NULL)
+ s3c_pm_run_sysram(s3c_pm_makecheck, crcs);
+}
+
+/* in_region
+ *
+ * return TRUE if the area defined by ptr..ptr+size contains the
+ * what..what+whatsz
+*/
+
+static inline int in_region(void *ptr, int size, void *what, size_t whatsz)
+{
+ if ((what+whatsz) < ptr)
+ return 0;
+
+ if (what > (ptr+size))
+ return 0;
+
+ return 1;
+}
+
+/**
+ * s3c_pm_runcheck() - helper to check a resource on restore.
+ * @res: The resource to check
+ * @vak: Pointer to list of CRC32 values to check.
+ *
+ * Called from the s3c_pm_check_restore() via s3c_pm_run_sysram(), this
+ * function runs the given memory resource checking it against the stored
+ * CRC to ensure that memory is restored. The function tries to skip as
+ * many of the areas used during the suspend process.
+ */
+static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
+{
+ unsigned long addr;
+ unsigned long left;
+ void *stkpage;
+ void *ptr;
+ u32 calc;
+
+ stkpage = (void *)((u32)&calc & ~PAGE_MASK);
+
+ for (addr = res->start; addr < res->end;
+ addr += CHECK_CHUNKSIZE) {
+ left = res->end - addr;
+
+ if (left > CHECK_CHUNKSIZE)
+ left = CHECK_CHUNKSIZE;
+
+ ptr = phys_to_virt(addr);
+
+ if (in_region(ptr, left, stkpage, 4096)) {
+ S3C_PMDBG("skipping %08lx, has stack in\n", addr);
+ goto skip_check;
+ }
+
+ if (in_region(ptr, left, crcs, crc_size)) {
+ S3C_PMDBG("skipping %08lx, has crc block in\n", addr);
+ goto skip_check;
+ }
+
+ /* calculate and check the checksum */
+
+ calc = crc32_le(~0, ptr, left);
+ if (calc != *val) {
+ printk(KERN_ERR "Restore CRC error at "
+ "%08lx (%08x vs %08x)\n", addr, calc, *val);
+
+ S3C_PMDBG("Restore CRC error at %08lx (%08x vs %08x)\n",
+ addr, calc, *val);
+ }
+
+ skip_check:
+ val++;
+ }
+
+ return val;
+}
+
+/**
+ * s3c_pm_check_restore() - memory check called on resume
+ *
+ * check the CRCs after the restore event and free the memory used
+ * to hold them
+*/
+void s3c_pm_check_restore(void)
+{
+ if (crcs != NULL)
+ s3c_pm_run_sysram(s3c_pm_runcheck, crcs);
+}
+
+/**
+ * s3c_pm_check_cleanup() - free memory resources
+ *
+ * Free the resources that where allocated by the suspend
+ * memory check code. We do this separately from the
+ * s3c_pm_check_restore() function as we cannot call any
+ * functions that might sleep during that resume.
+ */
+void s3c_pm_check_cleanup(void)
+{
+ kfree(crcs);
+ crcs = NULL;
+}
+
diff --git a/drivers/soc/samsung/s3c-pm-debug.c b/drivers/soc/samsung/s3c-pm-debug.c
new file mode 100644
index 000000000000..b5ce0e9a41e5
--- /dev/null
+++ b/drivers/soc/samsung/s3c-pm-debug.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2013 Samsung Electronics Co., Ltd.
+// Tomasz Figa <t.figa@samsung.com>
+// Copyright (C) 2008 Openmoko, Inc.
+// Copyright (C) 2004-2008 Simtec Electronics
+// Ben Dooks <ben@simtec.co.uk>
+// http://armlinux.simtec.co.uk/
+//
+// Samsung common power management (suspend to RAM) debug support
+
+#include <linux/serial_core.h>
+#include <linux/serial_s3c.h>
+#include <linux/io.h>
+
+#include <asm/mach/map.h>
+
+#include <linux/soc/samsung/s3c-pm.h>
+
+static struct pm_uart_save uart_save;
+
+extern void printascii(const char *);
+
+void s3c_pm_dbg(const char *fmt, ...)
+{
+ va_list va;
+ char buff[256];
+
+ va_start(va, fmt);
+ vsnprintf(buff, sizeof(buff), fmt, va);
+ va_end(va);
+
+ printascii(buff);
+}
+
+static inline void __iomem *s3c_pm_uart_base(void)
+{
+ unsigned long paddr;
+ unsigned long vaddr;
+
+ debug_ll_addr(&paddr, &vaddr);
+
+ return (void __iomem *)vaddr;
+}
+
+void s3c_pm_save_uarts(bool is_s3c2410)
+{
+ void __iomem *regs = s3c_pm_uart_base();
+ struct pm_uart_save *save = &uart_save;
+
+ save->ulcon = __raw_readl(regs + S3C2410_ULCON);
+ save->ucon = __raw_readl(regs + S3C2410_UCON);
+ save->ufcon = __raw_readl(regs + S3C2410_UFCON);
+ save->umcon = __raw_readl(regs + S3C2410_UMCON);
+ save->ubrdiv = __raw_readl(regs + S3C2410_UBRDIV);
+
+ if (!is_s3c2410)
+ save->udivslot = __raw_readl(regs + S3C2443_DIVSLOT);
+
+ S3C_PMDBG("UART[%p]: ULCON=%04x, UCON=%04x, UFCON=%04x, UBRDIV=%04x\n",
+ regs, save->ulcon, save->ucon, save->ufcon, save->ubrdiv);
+}
+
+void s3c_pm_restore_uarts(bool is_s3c2410)
+{
+ void __iomem *regs = s3c_pm_uart_base();
+ struct pm_uart_save *save = &uart_save;
+
+ s3c_pm_arch_update_uart(regs, save);
+
+ __raw_writel(save->ulcon, regs + S3C2410_ULCON);
+ __raw_writel(save->ucon, regs + S3C2410_UCON);
+ __raw_writel(save->ufcon, regs + S3C2410_UFCON);
+ __raw_writel(save->umcon, regs + S3C2410_UMCON);
+ __raw_writel(save->ubrdiv, regs + S3C2410_UBRDIV);
+
+ if (!is_s3c2410)
+ __raw_writel(save->udivslot, regs + S3C2443_DIVSLOT);
+}
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 1b0d50f36349..d4c7bd59429e 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -194,7 +194,7 @@ static const struct sunxi_sram_data *sunxi_sram_of_parse(struct device_node *nod
if (!data) {
ret = -EINVAL;
goto err;
- };
+ }
for (func = data->func; func->func; func++) {
if (val == func->val) {
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 6bc603d0b9d9..976dee036470 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -119,6 +119,16 @@ config ARCH_TEGRA_194_SOC
help
Enable support for the NVIDIA Tegra194 SoC.
+config ARCH_TEGRA_234_SOC
+ bool "NVIDIA Tegra234 SoC"
+ select MAILBOX
+ select TEGRA_BPMP
+ select TEGRA_HSP_MBOX
+ select TEGRA_IVC
+ select SOC_TEGRA_PMC
+ help
+ Enable support for the NVIDIA Tegra234 SoC.
+
endif
endif
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index d1f8dd0289e6..94b60a692b51 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -49,6 +49,9 @@ static struct tegra_fuse *fuse = &(struct tegra_fuse) {
};
static const struct of_device_id tegra_fuse_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+ { .compatible = "nvidia,tegra234-efuse", .data = &tegra234_fuse_soc },
+#endif
#ifdef CONFIG_ARCH_TEGRA_194_SOC
{ .compatible = "nvidia,tegra194-efuse", .data = &tegra194_fuse_soc },
#endif
@@ -326,7 +329,8 @@ const struct attribute_group tegra_soc_attr_group = {
.attrs = tegra_soc_attr,
};
-#ifdef CONFIG_ARCH_TEGRA_194_SOC
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
static ssize_t platform_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -336,7 +340,7 @@ static ssize_t platform_show(struct device *dev, struct device_attribute *attr,
* platform type is silicon and all other non-zero values indicate
* the type of simulation platform is being used.
*/
- return sprintf(buf, "%d\n", (tegra_read_chipid() >> 20) & 0xf);
+ return sprintf(buf, "%d\n", tegra_get_platform());
}
static DEVICE_ATTR_RO(platform);
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index 85accef41fa1..9ea7f0168457 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -356,3 +356,33 @@ const struct tegra_fuse_soc tegra194_fuse_soc = {
.soc_attr_group = &tegra194_soc_attr_group,
};
#endif
+
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+static const struct nvmem_cell_lookup tegra234_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration-ext",
+ },
+};
+
+static const struct tegra_fuse_info tegra234_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra234_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .info = &tegra234_fuse_info,
+ .lookups = tegra234_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra234_fuse_lookups),
+ .soc_attr_group = &tegra194_soc_attr_group,
+};
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 9d4fc315a007..e057a58e2060 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -115,9 +115,17 @@ extern const struct tegra_fuse_soc tegra210_fuse_soc;
extern const struct tegra_fuse_soc tegra186_fuse_soc;
#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+extern const struct attribute_group tegra194_soc_attr_group;
+#endif
+
#ifdef CONFIG_ARCH_TEGRA_194_SOC
extern const struct tegra_fuse_soc tegra194_fuse_soc;
-extern const struct attribute_group tegra194_soc_attr_group;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+extern const struct tegra_fuse_soc tegra234_fuse_soc;
#endif
#endif
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index 8e416ad91ee2..cee207d10024 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -47,6 +47,31 @@ u8 tegra_get_minor_rev(void)
return (tegra_read_chipid() >> 16) & 0xf;
}
+u8 tegra_get_platform(void)
+{
+ return (tegra_read_chipid() >> 20) & 0xf;
+}
+
+bool tegra_is_silicon(void)
+{
+ switch (tegra_get_chip_id()) {
+ case TEGRA194:
+ case TEGRA234:
+ if (tegra_get_platform() == 0)
+ return true;
+
+ return false;
+ }
+
+ /*
+ * Chips prior to Tegra194 have a different way of determining whether
+ * they are silicon or not. Since we never supported simulation on the
+ * older Tegra chips, don't bother extracting the information and just
+ * report that we're running on silicon.
+ */
+ return true;
+}
+
u32 tegra_read_straps(void)
{
WARN(!chipid, "Tegra ABP MISC not yet available\n");
@@ -70,6 +95,7 @@ static const struct of_device_id apbmisc_match[] __initconst = {
{ .compatible = "nvidia,tegra20-apbmisc", },
{ .compatible = "nvidia,tegra186-misc", },
{ .compatible = "nvidia,tegra194-misc", },
+ { .compatible = "nvidia,tegra234-misc", },
{},
};
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index d332e5d9abac..df9a5ca8c99c 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -336,45 +336,6 @@ struct tegra_pmc_soc {
bool has_blink_output;
};
-static const char * const tegra186_reset_sources[] = {
- "SYS_RESET",
- "AOWDT",
- "MCCPLEXWDT",
- "BPMPWDT",
- "SCEWDT",
- "SPEWDT",
- "APEWDT",
- "BCCPLEXWDT",
- "SENSOR",
- "AOTAG",
- "VFSENSOR",
- "SWREST",
- "SC7",
- "HSM",
- "CORESIGHT"
-};
-
-static const char * const tegra186_reset_levels[] = {
- "L0", "L1", "L2", "WARM"
-};
-
-static const char * const tegra30_reset_sources[] = {
- "POWER_ON_RESET",
- "WATCHDOG",
- "SENSOR",
- "SW_MAIN",
- "LP0"
-};
-
-static const char * const tegra210_reset_sources[] = {
- "POWER_ON_RESET",
- "WATCHDOG",
- "SENSOR",
- "SW_MAIN",
- "LP0",
- "AOTAG"
-};
-
/**
* struct tegra_pmc - NVIDIA Tegra PMC
* @dev: pointer to PMC device structure
@@ -1990,44 +1951,17 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
event->id,
&pmc->irq, pmc);
- /*
- * GPIOs don't have an equivalent interrupt in the
- * parent controller (GIC). However some code, such
- * as the one in irq_get_irqchip_state(), require a
- * valid IRQ chip to be set. Make sure that's the
- * case by passing NULL here, which will install a
- * dummy IRQ chip for the interrupt in the parent
- * domain.
- */
- if (domain->parent)
- irq_domain_set_hwirq_and_chip(domain->parent,
- virq, 0, NULL,
- NULL);
-
+ /* GPIO hierarchies stop at the PMC level */
+ if (!err && domain->parent)
+ err = irq_domain_disconnect_hierarchy(domain->parent,
+ virq);
break;
}
}
- /*
- * For interrupts that don't have associated wake events, assign a
- * dummy hardware IRQ number. This is used in the ->irq_set_type()
- * and ->irq_set_wake() callbacks to return early for these IRQs.
- */
- if (i == soc->num_wake_events) {
- err = irq_domain_set_hwirq_and_chip(domain, virq, ULONG_MAX,
- &pmc->irq, pmc);
-
- /*
- * Interrupts without a wake event don't have a corresponding
- * interrupt in the parent controller (GIC). Pass NULL for the
- * chip here, which causes a dummy IRQ chip to be installed
- * for the interrupt in the parent domain, to make this
- * explicit.
- */
- if (domain->parent)
- irq_domain_set_hwirq_and_chip(domain->parent, virq, 0,
- NULL, NULL);
- }
+ /* If there is no wake-up event, there is no PMC mapping */
+ if (i == soc->num_wake_events)
+ err = irq_domain_disconnect_hierarchy(domain, virq);
return err;
}
@@ -2043,9 +1977,6 @@ static int tegra210_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
unsigned int offset, bit;
u32 value;
- if (data->hwirq == ULONG_MAX)
- return 0;
-
offset = data->hwirq / 32;
bit = data->hwirq % 32;
@@ -2080,9 +2011,6 @@ static int tegra210_pmc_irq_set_type(struct irq_data *data, unsigned int type)
unsigned int offset, bit;
u32 value;
- if (data->hwirq == ULONG_MAX)
- return 0;
-
offset = data->hwirq / 32;
bit = data->hwirq % 32;
@@ -2123,10 +2051,6 @@ static int tegra186_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
unsigned int offset, bit;
u32 value;
- /* nothing to do if there's no associated wake event */
- if (WARN_ON(data->hwirq == ULONG_MAX))
- return 0;
-
offset = data->hwirq / 32;
bit = data->hwirq % 32;
@@ -2154,10 +2078,6 @@ static int tegra186_pmc_irq_set_type(struct irq_data *data, unsigned int type)
struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
u32 value;
- /* nothing to do if there's no associated wake event */
- if (data->hwirq == ULONG_MAX)
- return 0;
-
value = readl(pmc->wake + WAKE_AOWAKE_CNTRL(data->hwirq));
switch (type) {
@@ -2184,6 +2104,34 @@ static int tegra186_pmc_irq_set_type(struct irq_data *data, unsigned int type)
return 0;
}
+static void tegra_irq_mask_parent(struct irq_data *data)
+{
+ if (data->parent_data)
+ irq_chip_mask_parent(data);
+}
+
+static void tegra_irq_unmask_parent(struct irq_data *data)
+{
+ if (data->parent_data)
+ irq_chip_unmask_parent(data);
+}
+
+static void tegra_irq_eoi_parent(struct irq_data *data)
+{
+ if (data->parent_data)
+ irq_chip_eoi_parent(data);
+}
+
+static int tegra_irq_set_affinity_parent(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ if (data->parent_data)
+ return irq_chip_set_affinity_parent(data, dest, force);
+
+ return -EINVAL;
+}
+
static int tegra_pmc_irq_init(struct tegra_pmc *pmc)
{
struct irq_domain *parent = NULL;
@@ -2199,10 +2147,10 @@ static int tegra_pmc_irq_init(struct tegra_pmc *pmc)
return 0;
pmc->irq.name = dev_name(pmc->dev);
- pmc->irq.irq_mask = irq_chip_mask_parent;
- pmc->irq.irq_unmask = irq_chip_unmask_parent;
- pmc->irq.irq_eoi = irq_chip_eoi_parent;
- pmc->irq.irq_set_affinity = irq_chip_set_affinity_parent;
+ pmc->irq.irq_mask = tegra_irq_mask_parent;
+ pmc->irq.irq_unmask = tegra_irq_unmask_parent;
+ pmc->irq.irq_eoi = tegra_irq_eoi_parent;
+ pmc->irq.irq_set_affinity = tegra_irq_set_affinity_parent;
pmc->irq.irq_set_type = pmc->soc->irq_set_type;
pmc->irq.irq_set_wake = pmc->soc->irq_set_wake;
@@ -2784,6 +2732,14 @@ static const u8 tegra30_cpu_powergates[] = {
TEGRA_POWERGATE_CPU3,
};
+static const char * const tegra30_reset_sources[] = {
+ "POWER_ON_RESET",
+ "WATCHDOG",
+ "SENSOR",
+ "SW_MAIN",
+ "LP0"
+};
+
static const struct tegra_pmc_soc tegra30_pmc_soc = {
.num_powergates = ARRAY_SIZE(tegra30_powergates),
.powergates = tegra30_powergates,
@@ -3061,6 +3017,15 @@ static const struct pinctrl_pin_desc tegra210_pin_descs[] = {
TEGRA210_IO_PAD_TABLE(TEGRA_IO_PIN_DESC)
};
+static const char * const tegra210_reset_sources[] = {
+ "POWER_ON_RESET",
+ "WATCHDOG",
+ "SENSOR",
+ "SW_MAIN",
+ "LP0",
+ "AOTAG"
+};
+
static const struct tegra_wake_event tegra210_wake_events[] = {
TEGRA_WAKE_IRQ("rtc", 16, 2),
TEGRA_WAKE_IRQ("pmu", 51, 86),
@@ -3193,6 +3158,28 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
iounmap(wake);
}
+static const char * const tegra186_reset_sources[] = {
+ "SYS_RESET",
+ "AOWDT",
+ "MCCPLEXWDT",
+ "BPMPWDT",
+ "SCEWDT",
+ "SPEWDT",
+ "APEWDT",
+ "BCCPLEXWDT",
+ "SENSOR",
+ "AOTAG",
+ "VFSENSOR",
+ "SWREST",
+ "SC7",
+ "HSM",
+ "CORESIGHT"
+};
+
+static const char * const tegra186_reset_levels[] = {
+ "L0", "L1", "L2", "WARM"
+};
+
static const struct tegra_wake_event tegra186_wake_events[] = {
TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA186_AON_GPIO(FF, 0)),
@@ -3362,7 +3349,75 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.has_blink_output = false,
};
+static const struct tegra_pmc_regs tegra234_pmc_regs = {
+ .scratch0 = 0x2000,
+ .dpd_req = 0,
+ .dpd_status = 0,
+ .dpd2_req = 0,
+ .dpd2_status = 0,
+ .rst_status = 0x70,
+ .rst_source_shift = 0x2,
+ .rst_source_mask = 0xfc,
+ .rst_level_shift = 0x0,
+ .rst_level_mask = 0x3,
+};
+
+static const char * const tegra234_reset_sources[] = {
+ "SYS_RESET_N",
+ "AOWDT",
+ "BCCPLEXWDT",
+ "BPMPWDT",
+ "SCEWDT",
+ "SPEWDT",
+ "APEWDT",
+ "LCCPLEXWDT",
+ "SENSOR",
+ "AOTAG",
+ "VFSENSOR",
+ "MAINSWRST",
+ "SC7",
+ "HSM",
+ "CSITE",
+ "RCEWDT",
+ "PVA0WDT",
+ "PVA1WDT",
+ "L1A_ASYNC",
+ "BPMPBOOT",
+ "FUSECRC",
+};
+
+static const struct tegra_pmc_soc tegra234_pmc_soc = {
+ .num_powergates = 0,
+ .powergates = NULL,
+ .num_cpu_powergates = 0,
+ .cpu_powergates = NULL,
+ .has_tsense_reset = false,
+ .has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = true,
+ .maybe_tz_only = false,
+ .num_io_pads = 0,
+ .io_pads = NULL,
+ .num_pin_descs = 0,
+ .pin_descs = NULL,
+ .regs = &tegra234_pmc_regs,
+ .init = NULL,
+ .setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
+ .irq_set_wake = tegra186_pmc_irq_set_wake,
+ .irq_set_type = tegra186_pmc_irq_set_type,
+ .reset_sources = tegra234_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra234_reset_sources),
+ .reset_levels = tegra186_reset_levels,
+ .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
+ .num_wake_events = 0,
+ .wake_events = NULL,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = false,
+};
+
static const struct of_device_id tegra_pmc_match[] = {
+ { .compatible = "nvidia,tegra234-pmc", .data = &tegra234_pmc_soc },
{ .compatible = "nvidia,tegra194-pmc", .data = &tegra194_pmc_soc },
{ .compatible = "nvidia,tegra186-pmc", .data = &tegra186_pmc_soc },
{ .compatible = "nvidia,tegra210-pmc", .data = &tegra210_pmc_soc },
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index e192fb788836..f5b82ffa637b 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -101,6 +101,17 @@ config TI_K3_SOCINFO
platforms to provide information about the SoC family and
variant to user space.
+config TI_PRUSS
+ tristate "TI PRU-ICSS Subsystem Platform drivers"
+ depends on SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+ select MFD_SYSCON
+ help
+ TI PRU-ICSS Subsystem platform specific support.
+
+ Say Y or M here to support the Programmable Realtime Unit (PRU)
+ processors on various TI SoCs. It's safe to say N here if you're
+ not interested in the PRU or if you are unsure.
+
endif # SOC_TI
config TI_SCI_INTA_MSI_DOMAIN
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index 1110e5c98685..cc3c972fad2e 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -12,3 +12,5 @@ obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
obj-$(CONFIG_TI_K3_SOCINFO) += k3-socinfo.o
+obj-$(CONFIG_TI_PRUSS) += pruss.o
+obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 6dcc21dde0cb..1147dc4c1d59 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <linux/soc/ti/ti_sci_inta_msi.h>
@@ -208,6 +209,15 @@ struct k3_ringacc {
const struct k3_ringacc_ops *ops;
};
+/**
+ * struct k3_ringacc - Rings accelerator SoC data
+ *
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ */
+struct k3_ringacc_soc_data {
+ unsigned dma_ring_reset_quirk:1;
+};
+
static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
{
return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
@@ -1051,9 +1061,6 @@ static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
return ret;
}
- ringacc->dma_ring_reset_quirk =
- of_property_read_bool(node, "ti,dma-ring-reset-quirk");
-
ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
if (IS_ERR(ringacc->tisci)) {
ret = PTR_ERR(ringacc->tisci);
@@ -1084,9 +1091,22 @@ static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
ringacc->rm_gp_range);
}
+static const struct k3_ringacc_soc_data k3_ringacc_soc_data_sr1 = {
+ .dma_ring_reset_quirk = 1,
+};
+
+static const struct soc_device_attribute k3_ringacc_socinfo[] = {
+ { .family = "AM65X",
+ .revision = "SR1.0",
+ .data = &k3_ringacc_soc_data_sr1
+ },
+ {/* sentinel */}
+};
+
static int k3_ringacc_init(struct platform_device *pdev,
struct k3_ringacc *ringacc)
{
+ const struct soc_device_attribute *soc;
void __iomem *base_fifo, *base_rt;
struct device *dev = &pdev->dev;
struct resource *res;
@@ -1103,6 +1123,13 @@ static int k3_ringacc_init(struct platform_device *pdev,
if (ret)
return ret;
+ soc = soc_device_match(k3_ringacc_socinfo);
+ if (soc && soc->data) {
+ const struct k3_ringacc_soc_data *soc_data = soc->data;
+
+ ringacc->dma_ring_reset_quirk = soc_data->dma_ring_reset_quirk;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
base_rt = devm_ioremap_resource(dev, res);
if (IS_ERR(base_rt))
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index af0ba5288e58..bbbc2d2b7091 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -39,6 +39,7 @@ static const struct k3_soc_id {
} k3_soc_ids[] = {
{ 0xBB5A, "AM65X" },
{ 0xBB64, "J721E" },
+ { 0xBB6D, "J7200" },
};
static int
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 6285cd8efb21..8c863ecb1c60 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -355,7 +355,7 @@ static void dma_debug_show_devices(struct seq_file *s,
}
}
-static int dma_debug_show(struct seq_file *s, void *v)
+static int knav_dma_debug_show(struct seq_file *s, void *v)
{
struct knav_dma_device *dma;
@@ -370,17 +370,7 @@ static int dma_debug_show(struct seq_file *s, void *v)
return 0;
}
-static int knav_dma_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dma_debug_show, NULL);
-}
-
-static const struct file_operations knav_dma_debug_ops = {
- .open = knav_dma_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(knav_dma_debug);
static int of_channel_match_helper(struct device_node *np, const char *name,
const char **dma_instance)
@@ -778,7 +768,7 @@ static int knav_dma_probe(struct platform_device *pdev)
}
debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
- &knav_dma_debug_ops);
+ &knav_dma_debug_fops);
device_ready = true;
return ret;
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index aa071d96ef36..a460f201bf8e 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -478,17 +478,7 @@ static int knav_queue_debug_show(struct seq_file *s, void *v)
return 0;
}
-static int knav_queue_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, knav_queue_debug_show, NULL);
-}
-
-static const struct file_operations knav_queue_debug_ops = {
- .open = knav_queue_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
u32 flags)
@@ -1878,7 +1868,7 @@ static int knav_queue_probe(struct platform_device *pdev)
}
debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
- &knav_queue_debug_ops);
+ &knav_queue_debug_fops);
device_ready = true;
return 0;
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index c9b3f9ebf0bb..980b04c38fd9 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -10,14 +10,39 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/delay.h>
#include <linux/platform_data/ti-prm.h>
+enum omap_prm_domain_mode {
+ OMAP_PRMD_OFF,
+ OMAP_PRMD_RETENTION,
+ OMAP_PRMD_ON_INACTIVE,
+ OMAP_PRMD_ON_ACTIVE,
+};
+
+struct omap_prm_domain_map {
+ unsigned int usable_modes; /* Mask of hardware supported modes */
+ unsigned long statechange:1; /* Optional low-power state change */
+ unsigned long logicretstate:1; /* Optional logic off mode */
+};
+
+struct omap_prm_domain {
+ struct device *dev;
+ struct omap_prm *prm;
+ struct generic_pm_domain pd;
+ u16 pwrstctrl;
+ u16 pwrstst;
+ const struct omap_prm_domain_map *cap;
+ u32 pwrstctrl_saved;
+};
+
struct omap_rst_map {
s8 rst;
s8 st;
@@ -27,6 +52,9 @@ struct omap_prm_data {
u32 base;
const char *name;
const char *clkdm_name;
+ u16 pwrstctrl;
+ u16 pwrstst;
+ const struct omap_prm_domain_map *dmap;
u16 rstctrl;
u16 rstst;
const struct omap_rst_map *rstmap;
@@ -36,6 +64,7 @@ struct omap_prm_data {
struct omap_prm {
const struct omap_prm_data *data;
void __iomem *base;
+ struct omap_prm_domain *prmd;
};
struct omap_reset_data {
@@ -47,6 +76,7 @@ struct omap_reset_data {
struct device *dev;
};
+#define genpd_to_prm_domain(gpd) container_of(gpd, struct omap_prm_domain, pd)
#define to_omap_reset_data(p) container_of((p), struct omap_reset_data, rcdev)
#define OMAP_MAX_RESETS 8
@@ -58,6 +88,39 @@ struct omap_reset_data {
#define OMAP_PRM_HAS_RESETS (OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_RSTST)
+#define PRM_STATE_MAX_WAIT 10000
+#define PRM_LOGICRETSTATE BIT(2)
+#define PRM_LOWPOWERSTATECHANGE BIT(4)
+#define PRM_POWERSTATE_MASK OMAP_PRMD_ON_ACTIVE
+
+#define PRM_ST_INTRANSITION BIT(20)
+
+static const struct omap_prm_domain_map omap_prm_all = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_ON_INACTIVE) |
+ BIT(OMAP_PRMD_RETENTION) | BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_noinact = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_RETENTION) |
+ BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_nooff = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_ON_INACTIVE) |
+ BIT(OMAP_PRMD_RETENTION),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_onoff_noauto = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+};
+
static const struct omap_rst_map rst_map_0[] = {
{ .rst = 0, .st = 0 },
{ .rst = -1 },
@@ -78,6 +141,10 @@ static const struct omap_rst_map rst_map_012[] = {
static const struct omap_prm_data omap4_prm_data[] = {
{ .name = "tesla", .base = 0x4a306400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "abe", .base = 0x4a306500,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_all,
+ },
{ .name = "core", .base = 0x4a306700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ducati", .rstmap = rst_map_012 },
{ .name = "ivahd", .base = 0x4a306f00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
{ .name = "device", .base = 0x4a307b00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
@@ -86,6 +153,10 @@ static const struct omap_prm_data omap4_prm_data[] = {
static const struct omap_prm_data omap5_prm_data[] = {
{ .name = "dsp", .base = 0x4ae06400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "abe", .base = 0x4ae06500,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_nooff,
+ },
{ .name = "core", .base = 0x4ae06700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu", .rstmap = rst_map_012 },
{ .name = "iva", .base = 0x4ae07200, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
{ .name = "device", .base = 0x4ae07c00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
@@ -119,7 +190,11 @@ static const struct omap_prm_data am3_prm_data[] = {
{ .name = "per", .base = 0x44e00c00, .rstctrl = 0x0, .rstmap = am3_per_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp" },
{ .name = "wkup", .base = 0x44e00d00, .rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
{ .name = "device", .base = 0x44e00f00, .rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
- { .name = "gfx", .base = 0x44e01100, .rstctrl = 0x4, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3" },
+ {
+ .name = "gfx", .base = 0x44e01100,
+ .pwrstctrl = 0, .pwrstst = 0x10, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x4, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
+ },
{ },
};
@@ -135,7 +210,11 @@ static const struct omap_rst_map am4_device_rst_map[] = {
};
static const struct omap_prm_data am4_prm_data[] = {
- { .name = "gfx", .base = 0x44df0400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3" },
+ {
+ .name = "gfx", .base = 0x44df0400,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
+ },
{ .name = "per", .base = 0x44df0800, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am4_per_rst_map, .clkdm_name = "pruss_ocp" },
{ .name = "wkup", .base = 0x44df2000, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_NO_CLKDM },
{ .name = "device", .base = 0x44df4000, .rstctrl = 0x0, .rstst = 0x4, .rstmap = am4_device_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
@@ -151,6 +230,180 @@ static const struct of_device_id omap_prm_id_table[] = {
{ },
};
+#ifdef DEBUG
+static void omap_prm_domain_show_state(struct omap_prm_domain *prmd,
+ const char *desc)
+{
+ dev_dbg(prmd->dev, "%s %s: %08x/%08x\n",
+ prmd->pd.name, desc,
+ readl_relaxed(prmd->prm->base + prmd->pwrstctrl),
+ readl_relaxed(prmd->prm->base + prmd->pwrstst));
+}
+#else
+static inline void omap_prm_domain_show_state(struct omap_prm_domain *prmd,
+ const char *desc)
+{
+}
+#endif
+
+static int omap_prm_domain_power_on(struct generic_pm_domain *domain)
+{
+ struct omap_prm_domain *prmd;
+ int ret;
+ u32 v;
+
+ prmd = genpd_to_prm_domain(domain);
+ if (!prmd->cap)
+ return 0;
+
+ omap_prm_domain_show_state(prmd, "on: previous state");
+
+ if (prmd->pwrstctrl_saved)
+ v = prmd->pwrstctrl_saved;
+ else
+ v = readl_relaxed(prmd->prm->base + prmd->pwrstctrl);
+
+ writel_relaxed(v | OMAP_PRMD_ON_ACTIVE,
+ prmd->prm->base + prmd->pwrstctrl);
+
+ /* wait for the transition bit to get cleared */
+ ret = readl_relaxed_poll_timeout(prmd->prm->base + prmd->pwrstst,
+ v, !(v & PRM_ST_INTRANSITION), 1,
+ PRM_STATE_MAX_WAIT);
+ if (ret)
+ dev_err(prmd->dev, "%s: %s timed out\n",
+ prmd->pd.name, __func__);
+
+ omap_prm_domain_show_state(prmd, "on: new state");
+
+ return ret;
+}
+
+/* No need to check for holes in the mask for the lowest mode */
+static int omap_prm_domain_find_lowest(struct omap_prm_domain *prmd)
+{
+ return __ffs(prmd->cap->usable_modes);
+}
+
+static int omap_prm_domain_power_off(struct generic_pm_domain *domain)
+{
+ struct omap_prm_domain *prmd;
+ int ret;
+ u32 v;
+
+ prmd = genpd_to_prm_domain(domain);
+ if (!prmd->cap)
+ return 0;
+
+ omap_prm_domain_show_state(prmd, "off: previous state");
+
+ v = readl_relaxed(prmd->prm->base + prmd->pwrstctrl);
+ prmd->pwrstctrl_saved = v;
+
+ v &= ~PRM_POWERSTATE_MASK;
+ v |= omap_prm_domain_find_lowest(prmd);
+
+ if (prmd->cap->statechange)
+ v |= PRM_LOWPOWERSTATECHANGE;
+ if (prmd->cap->logicretstate)
+ v &= ~PRM_LOGICRETSTATE;
+ else
+ v |= PRM_LOGICRETSTATE;
+
+ writel_relaxed(v, prmd->prm->base + prmd->pwrstctrl);
+
+ /* wait for the transition bit to get cleared */
+ ret = readl_relaxed_poll_timeout(prmd->prm->base + prmd->pwrstst,
+ v, !(v & PRM_ST_INTRANSITION), 1,
+ PRM_STATE_MAX_WAIT);
+ if (ret)
+ dev_warn(prmd->dev, "%s: %s timed out\n",
+ __func__, prmd->pd.name);
+
+ omap_prm_domain_show_state(prmd, "off: new state");
+
+ return 0;
+}
+
+static int omap_prm_domain_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data;
+ struct of_phandle_args pd_args;
+ struct omap_prm_domain *prmd;
+ struct device_node *np;
+ int ret;
+
+ prmd = genpd_to_prm_domain(domain);
+ np = dev->of_node;
+
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", 0, &pd_args);
+ if (ret < 0)
+ return ret;
+
+ if (pd_args.args_count != 0)
+ dev_warn(dev, "%s: unusupported #power-domain-cells: %i\n",
+ prmd->pd.name, pd_args.args_count);
+
+ genpd_data = dev_gpd_data(dev);
+ genpd_data->data = NULL;
+
+ return 0;
+}
+
+static void omap_prm_domain_detach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data;
+
+ genpd_data = dev_gpd_data(dev);
+ genpd_data->data = NULL;
+}
+
+static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
+{
+ struct omap_prm_domain *prmd;
+ struct device_node *np = dev->of_node;
+ const struct omap_prm_data *data;
+ const char *name;
+ int error;
+
+ if (!of_find_property(dev->of_node, "#power-domain-cells", NULL))
+ return 0;
+
+ of_node_put(dev->of_node);
+
+ prmd = devm_kzalloc(dev, sizeof(*prmd), GFP_KERNEL);
+ if (!prmd)
+ return -ENOMEM;
+
+ data = prm->data;
+ name = devm_kasprintf(dev, GFP_KERNEL, "prm_%s",
+ data->name);
+
+ prmd->dev = dev;
+ prmd->prm = prm;
+ prmd->cap = prmd->prm->data->dmap;
+ prmd->pwrstctrl = prmd->prm->data->pwrstctrl;
+ prmd->pwrstst = prmd->prm->data->pwrstst;
+
+ prmd->pd.name = name;
+ prmd->pd.power_on = omap_prm_domain_power_on;
+ prmd->pd.power_off = omap_prm_domain_power_off;
+ prmd->pd.attach_dev = omap_prm_domain_attach_dev;
+ prmd->pd.detach_dev = omap_prm_domain_detach_dev;
+
+ pm_genpd_init(&prmd->pd, NULL, true);
+ error = of_genpd_add_provider_simple(np, &prmd->pd);
+ if (error)
+ pm_genpd_remove(&prmd->pd);
+ else
+ prm->prmd = prmd;
+
+ return error;
+}
+
static bool _is_valid_reset(struct omap_reset_data *reset, unsigned long id)
{
if (reset->mask & BIT(id))
@@ -351,6 +604,7 @@ static int omap_prm_probe(struct platform_device *pdev)
const struct omap_prm_data *data;
struct omap_prm *prm;
const struct of_device_id *match;
+ int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -378,7 +632,21 @@ static int omap_prm_probe(struct platform_device *pdev)
if (IS_ERR(prm->base))
return PTR_ERR(prm->base);
- return omap_prm_reset_init(pdev, prm);
+ ret = omap_prm_domain_init(&pdev->dev, prm);
+ if (ret)
+ return ret;
+
+ ret = omap_prm_reset_init(pdev, prm);
+ if (ret)
+ goto err_domain;
+
+ return 0;
+
+err_domain:
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&prm->prmd->pd);
+
+ return ret;
}
static struct platform_driver omap_prm_driver = {
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index de0123ec8ad6..d2f5e7001a93 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
@@ -39,6 +40,8 @@
#define GIC_INT_SET_PENDING_BASE 0x200
#define AM43XX_GIC_DIST_BASE 0x48241000
+static void __iomem *rtc_base_virt;
+static struct clk *rtc_fck;
static u32 rtc_magic_val;
static int (*am33xx_do_wfi_sram)(unsigned long unused);
@@ -90,7 +93,7 @@ static int am33xx_push_sram_idle(void)
ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
ro_sram_data.amx3_pm_sram_data_phys =
gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
- ro_sram_data.rtc_base_virt = pm_ops->get_rtc_base_addr();
+ ro_sram_data.rtc_base_virt = rtc_base_virt;
/* Save physical address to calculate resume offset during pm init */
am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
@@ -158,7 +161,7 @@ static struct wkup_m3_wakeup_src rtc_wake_src(void)
{
u32 i;
- i = __raw_readl(pm_ops->get_rtc_base_addr() + 0x44) & 0x40;
+ i = __raw_readl(rtc_base_virt + 0x44) & 0x40;
if (i) {
retrigger_irq = rtc_alarm_wakeup.irq_nr;
@@ -177,13 +180,24 @@ static int am33xx_rtc_only_idle(unsigned long wfi_flags)
return 0;
}
+/*
+ * Note that the RTC module clock must be re-enabled only for rtc+ddr suspend.
+ * And looks like the module can stay in SYSC_IDLE_SMART_WKUP mode configured
+ * by the interconnect code just fine for both rtc+ddr suspend and retention
+ * suspend.
+ */
static int am33xx_pm_suspend(suspend_state_t suspend_state)
{
int i, ret = 0;
if (suspend_state == PM_SUSPEND_MEM &&
pm_ops->check_off_mode_enable()) {
- pm_ops->prepare_rtc_suspend();
+ ret = clk_prepare_enable(rtc_fck);
+ if (ret) {
+ dev_err(pm33xx_dev, "Failed to enable clock: %i\n", ret);
+ return ret;
+ }
+
pm_ops->save_context();
suspend_wfi_flags |= WFI_FLAG_RTC_ONLY;
clk_save_context();
@@ -236,7 +250,7 @@ static int am33xx_pm_suspend(suspend_state_t suspend_state)
}
if (suspend_state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable())
- pm_ops->prepare_rtc_resume();
+ clk_disable_unprepare(rtc_fck);
return ret;
}
@@ -425,14 +439,28 @@ static int am33xx_pm_rtc_setup(void)
struct device_node *np;
unsigned long val = 0;
struct nvmem_device *nvmem;
+ int error;
np = of_find_node_by_name(NULL, "rtc");
if (of_device_is_available(np)) {
+ /* RTC interconnect target module clock */
+ rtc_fck = of_clk_get_by_name(np->parent, "fck");
+ if (IS_ERR(rtc_fck))
+ return PTR_ERR(rtc_fck);
+
+ rtc_base_virt = of_iomap(np, 0);
+ if (!rtc_base_virt) {
+ pr_warn("PM: could not iomap rtc");
+ error = -ENODEV;
+ goto err_clk_put;
+ }
+
omap_rtc = rtc_class_open("rtc0");
if (!omap_rtc) {
pr_warn("PM: rtc0 not available");
- return -EPROBE_DEFER;
+ error = -EPROBE_DEFER;
+ goto err_iounmap;
}
nvmem = devm_nvmem_device_get(&omap_rtc->dev,
@@ -454,6 +482,13 @@ static int am33xx_pm_rtc_setup(void)
}
return 0;
+
+err_iounmap:
+ iounmap(rtc_base_virt);
+err_clk_put:
+ clk_put(rtc_fck);
+
+ return error;
}
static int am33xx_pm_probe(struct platform_device *pdev)
@@ -544,6 +579,8 @@ static int am33xx_pm_remove(struct platform_device *pdev)
suspend_set_ops(NULL);
wkup_m3_ipc_put(m3_ipc);
am33xx_pm_free_sram();
+ iounmap(rtc_base_virt);
+ clk_put(rtc_fck);
return 0;
}
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
new file mode 100644
index 000000000000..cc0b4ad7a3d3
--- /dev/null
+++ b/drivers/soc/ti/pruss.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU-ICSS platform driver for various TI SoCs
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Author(s):
+ * Suman Anna <s-anna@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pruss_driver.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/**
+ * struct pruss_private_data - PRUSS driver private data
+ * @has_no_sharedram: flag to indicate the absence of PRUSS Shared Data RAM
+ * @has_core_mux_clock: flag to indicate the presence of PRUSS core clock
+ */
+struct pruss_private_data {
+ bool has_no_sharedram;
+ bool has_core_mux_clock;
+};
+
+static void pruss_of_free_clk_provider(void *data)
+{
+ struct device_node *clk_mux_np = data;
+
+ of_clk_del_provider(clk_mux_np);
+ of_node_put(clk_mux_np);
+}
+
+static int pruss_clk_mux_setup(struct pruss *pruss, struct clk *clk_mux,
+ char *mux_name, struct device_node *clks_np)
+{
+ struct device_node *clk_mux_np;
+ struct device *dev = pruss->dev;
+ char *clk_mux_name;
+ unsigned int num_parents;
+ const char **parent_names;
+ void __iomem *reg;
+ u32 reg_offset;
+ int ret;
+
+ clk_mux_np = of_get_child_by_name(clks_np, mux_name);
+ if (!clk_mux_np) {
+ dev_err(dev, "%pOF is missing its '%s' node\n", clks_np,
+ mux_name);
+ return -ENODEV;
+ }
+
+ num_parents = of_clk_get_parent_count(clk_mux_np);
+ if (num_parents < 1) {
+ dev_err(dev, "mux-clock %pOF must have parents\n", clk_mux_np);
+ ret = -EINVAL;
+ goto put_clk_mux_np;
+ }
+
+ parent_names = devm_kcalloc(dev, sizeof(*parent_names), num_parents,
+ GFP_KERNEL);
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto put_clk_mux_np;
+ }
+
+ of_clk_parent_fill(clk_mux_np, parent_names, num_parents);
+
+ clk_mux_name = devm_kasprintf(dev, GFP_KERNEL, "%s.%pOFn",
+ dev_name(dev), clk_mux_np);
+ if (!clk_mux_name) {
+ ret = -ENOMEM;
+ goto put_clk_mux_np;
+ }
+
+ ret = of_property_read_u32(clk_mux_np, "reg", &reg_offset);
+ if (ret)
+ goto put_clk_mux_np;
+
+ reg = pruss->cfg_base + reg_offset;
+
+ clk_mux = clk_register_mux(NULL, clk_mux_name, parent_names,
+ num_parents, 0, reg, 0, 1, 0, NULL);
+ if (IS_ERR(clk_mux)) {
+ ret = PTR_ERR(clk_mux);
+ goto put_clk_mux_np;
+ }
+
+ ret = devm_add_action_or_reset(dev, (void(*)(void *))clk_unregister_mux,
+ clk_mux);
+ if (ret) {
+ dev_err(dev, "failed to add clkmux unregister action %d", ret);
+ goto put_clk_mux_np;
+ }
+
+ ret = of_clk_add_provider(clk_mux_np, of_clk_src_simple_get, clk_mux);
+ if (ret)
+ goto put_clk_mux_np;
+
+ ret = devm_add_action_or_reset(dev, pruss_of_free_clk_provider,
+ clk_mux_np);
+ if (ret) {
+ dev_err(dev, "failed to add clkmux free action %d", ret);
+ goto put_clk_mux_np;
+ }
+
+ return 0;
+
+put_clk_mux_np:
+ of_node_put(clk_mux_np);
+ return ret;
+}
+
+static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
+{
+ const struct pruss_private_data *data;
+ struct device_node *clks_np;
+ struct device *dev = pruss->dev;
+ int ret = 0;
+
+ data = of_device_get_match_data(dev);
+ if (IS_ERR(data))
+ return -ENODEV;
+
+ clks_np = of_get_child_by_name(cfg_node, "clocks");
+ if (!clks_np) {
+ dev_err(dev, "%pOF is missing its 'clocks' node\n", clks_np);
+ return -ENODEV;
+ }
+
+ if (data && data->has_core_mux_clock) {
+ ret = pruss_clk_mux_setup(pruss, pruss->core_clk_mux,
+ "coreclk-mux", clks_np);
+ if (ret) {
+ dev_err(dev, "failed to setup coreclk-mux\n");
+ goto put_clks_node;
+ }
+ }
+
+ ret = pruss_clk_mux_setup(pruss, pruss->iep_clk_mux, "iepclk-mux",
+ clks_np);
+ if (ret) {
+ dev_err(dev, "failed to setup iepclk-mux\n");
+ goto put_clks_node;
+ }
+
+put_clks_node:
+ of_node_put(clks_np);
+
+ return ret;
+}
+
+static struct regmap_config regmap_conf = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int pruss_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child;
+ struct pruss *pruss;
+ struct resource res;
+ int ret, i, index;
+ const struct pruss_private_data *data;
+ const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (IS_ERR(data)) {
+ dev_err(dev, "missing private data\n");
+ return -ENODEV;
+ }
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set the DMA coherent mask");
+ return ret;
+ }
+
+ pruss = devm_kzalloc(dev, sizeof(*pruss), GFP_KERNEL);
+ if (!pruss)
+ return -ENOMEM;
+
+ pruss->dev = dev;
+
+ child = of_get_child_by_name(np, "memories");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'memories' node\n", child);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < PRUSS_MEM_MAX; i++) {
+ /*
+ * On AM437x one of two PRUSS units don't contain Shared RAM,
+ * skip it
+ */
+ if (data && data->has_no_sharedram && i == PRUSS_MEM_SHRD_RAM2)
+ continue;
+
+ index = of_property_match_string(child, "reg-names",
+ mem_names[i]);
+ if (index < 0) {
+ of_node_put(child);
+ return index;
+ }
+
+ if (of_address_to_resource(child, index, &res)) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ pruss->mem_regions[i].va = devm_ioremap(dev, res.start,
+ resource_size(&res));
+ if (!pruss->mem_regions[i].va) {
+ dev_err(dev, "failed to parse and map memory resource %d %s\n",
+ i, mem_names[i]);
+ of_node_put(child);
+ return -ENOMEM;
+ }
+ pruss->mem_regions[i].pa = res.start;
+ pruss->mem_regions[i].size = resource_size(&res);
+
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ mem_names[i], &pruss->mem_regions[i].pa,
+ pruss->mem_regions[i].size, pruss->mem_regions[i].va);
+ }
+ of_node_put(child);
+
+ platform_set_drvdata(pdev, pruss);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "couldn't enable module\n");
+ pm_runtime_put_noidle(dev);
+ goto rpm_disable;
+ }
+
+ child = of_get_child_by_name(np, "cfg");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
+ ret = -ENODEV;
+ goto rpm_put;
+ }
+
+ if (of_address_to_resource(child, 0, &res)) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!pruss->cfg_base) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
+ (u64)res.start);
+ regmap_conf.max_register = resource_size(&res) - 4;
+
+ pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
+ &regmap_conf);
+ kfree(regmap_conf.name);
+ if (IS_ERR(pruss->cfg_regmap)) {
+ dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
+ PTR_ERR(pruss->cfg_regmap));
+ ret = PTR_ERR(pruss->cfg_regmap);
+ goto node_put;
+ }
+
+ ret = pruss_clk_init(pruss, child);
+ if (ret) {
+ dev_err(dev, "failed to setup coreclk-mux\n");
+ goto node_put;
+ }
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ dev_err(dev, "failed to register child devices\n");
+ goto node_put;
+ }
+
+ of_node_put(child);
+
+ return 0;
+
+node_put:
+ of_node_put(child);
+rpm_put:
+ pm_runtime_put_sync(dev);
+rpm_disable:
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int pruss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ devm_of_platform_depopulate(dev);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+/* instance-specific driver private data */
+static const struct pruss_private_data am437x_pruss1_data = {
+ .has_no_sharedram = false,
+};
+
+static const struct pruss_private_data am437x_pruss0_data = {
+ .has_no_sharedram = true,
+};
+
+static const struct pruss_private_data am65x_j721e_pruss_data = {
+ .has_core_mux_clock = true,
+};
+
+static const struct of_device_id pruss_of_match[] = {
+ { .compatible = "ti,am3356-pruss" },
+ { .compatible = "ti,am4376-pruss0", .data = &am437x_pruss0_data, },
+ { .compatible = "ti,am4376-pruss1", .data = &am437x_pruss1_data, },
+ { .compatible = "ti,am5728-pruss" },
+ { .compatible = "ti,k2g-pruss" },
+ { .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pruss_of_match);
+
+static struct platform_driver pruss_driver = {
+ .driver = {
+ .name = "pruss",
+ .of_match_table = pruss_of_match,
+ },
+ .probe = pruss_probe,
+ .remove = pruss_remove,
+};
+module_platform_driver(pruss_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_DESCRIPTION("PRU-ICSS Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/avs/smartreflex.c b/drivers/soc/ti/smartreflex.c
index 5376f3d22f31..5376f3d22f31 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/soc/ti/smartreflex.c
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index 8c2a2f23982c..8afb3f45d263 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -9,7 +9,6 @@
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -18,150 +17,95 @@
#include <dt-bindings/soc/ti,sci_pm_domain.h>
/**
- * struct ti_sci_genpd_dev_data: holds data needed for every device attached
- * to this genpd
- * @idx: index of the device that identifies it with the system
- * control processor.
- * @exclusive: Permissions for exclusive request or shared request of the
- * device.
+ * struct ti_sci_genpd_provider: holds common TI SCI genpd provider data
+ * @ti_sci: handle to TI SCI protocol driver that provides ops to
+ * communicate with system control processor.
+ * @dev: pointer to dev for the driver for devm allocs
+ * @pd_list: list of all the power domains on the device
+ * @data: onecell data for genpd core
*/
-struct ti_sci_genpd_dev_data {
- int idx;
- u8 exclusive;
+struct ti_sci_genpd_provider {
+ const struct ti_sci_handle *ti_sci;
+ struct device *dev;
+ struct list_head pd_list;
+ struct genpd_onecell_data data;
};
/**
* struct ti_sci_pm_domain: TI specific data needed for power domain
- * @ti_sci: handle to TI SCI protocol driver that provides ops to
- * communicate with system control processor.
- * @dev: pointer to dev for the driver for devm allocs
+ * @idx: index of the device that identifies it with the system
+ * control processor.
+ * @exclusive: Permissions for exclusive request or shared request of the
+ * device.
* @pd: generic_pm_domain for use with the genpd framework
+ * @node: link for the genpd list
+ * @parent: link to the parent TI SCI genpd provider
*/
struct ti_sci_pm_domain {
- const struct ti_sci_handle *ti_sci;
- struct device *dev;
+ int idx;
+ u8 exclusive;
struct generic_pm_domain pd;
+ struct list_head node;
+ struct ti_sci_genpd_provider *parent;
};
#define genpd_to_ti_sci_pd(gpd) container_of(gpd, struct ti_sci_pm_domain, pd)
-/**
- * ti_sci_dev_id(): get prepopulated ti_sci id from struct dev
- * @dev: pointer to device associated with this genpd
- *
- * Returns device_id stored from ti,sci_id property
- */
-static int ti_sci_dev_id(struct device *dev)
-{
- struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
- struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
-
- return sci_dev_data->idx;
-}
-
-static u8 is_ti_sci_dev_exclusive(struct device *dev)
-{
- struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
- struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
-
- return sci_dev_data->exclusive;
-}
-
-/**
- * ti_sci_dev_to_sci_handle(): get pointer to ti_sci_handle
- * @dev: pointer to device associated with this genpd
- *
- * Returns ti_sci_handle to be used to communicate with system
- * control processor.
+/*
+ * ti_sci_pd_power_off(): genpd power down hook
+ * @domain: pointer to the powerdomain to power off
*/
-static const struct ti_sci_handle *ti_sci_dev_to_sci_handle(struct device *dev)
+static int ti_sci_pd_power_off(struct generic_pm_domain *domain)
{
- struct generic_pm_domain *pd = pd_to_genpd(dev->pm_domain);
- struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(pd);
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(domain);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
- return ti_sci_genpd->ti_sci;
+ return ti_sci->ops.dev_ops.put_device(ti_sci, pd->idx);
}
-/**
- * ti_sci_dev_start(): genpd device start hook called to turn device on
- * @dev: pointer to device associated with this genpd to be powered on
+/*
+ * ti_sci_pd_power_on(): genpd power up hook
+ * @domain: pointer to the powerdomain to power on
*/
-static int ti_sci_dev_start(struct device *dev)
+static int ti_sci_pd_power_on(struct generic_pm_domain *domain)
{
- const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev);
- int idx = ti_sci_dev_id(dev);
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(domain);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
- if (is_ti_sci_dev_exclusive(dev))
- return ti_sci->ops.dev_ops.get_device_exclusive(ti_sci, idx);
+ if (pd->exclusive)
+ return ti_sci->ops.dev_ops.get_device_exclusive(ti_sci,
+ pd->idx);
else
- return ti_sci->ops.dev_ops.get_device(ti_sci, idx);
+ return ti_sci->ops.dev_ops.get_device(ti_sci, pd->idx);
}
-/**
- * ti_sci_dev_stop(): genpd device stop hook called to turn device off
- * @dev: pointer to device associated with this genpd to be powered off
+/*
+ * ti_sci_pd_xlate(): translation service for TI SCI genpds
+ * @genpdspec: DT identification data for the genpd
+ * @data: genpd core data for all the powerdomains on the device
*/
-static int ti_sci_dev_stop(struct device *dev)
+static struct generic_pm_domain *ti_sci_pd_xlate(
+ struct of_phandle_args *genpdspec,
+ void *data)
{
- const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev);
- int idx = ti_sci_dev_id(dev);
+ struct genpd_onecell_data *genpd_data = data;
+ unsigned int idx = genpdspec->args[0];
- return ti_sci->ops.dev_ops.put_device(ti_sci, idx);
-}
+ if (genpdspec->args_count != 1 && genpdspec->args_count != 2)
+ return ERR_PTR(-EINVAL);
-static int ti_sci_pd_attach_dev(struct generic_pm_domain *domain,
- struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct of_phandle_args pd_args;
- struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(domain);
- const struct ti_sci_handle *ti_sci = ti_sci_genpd->ti_sci;
- struct ti_sci_genpd_dev_data *sci_dev_data;
- struct generic_pm_domain_data *genpd_data;
- int idx, ret = 0;
-
- ret = of_parse_phandle_with_args(np, "power-domains",
- "#power-domain-cells", 0, &pd_args);
- if (ret < 0)
- return ret;
-
- if (pd_args.args_count != 1 && pd_args.args_count != 2)
- return -EINVAL;
-
- idx = pd_args.args[0];
-
- /*
- * Check the validity of the requested idx, if the index is not valid
- * the PMMC will return a NAK here and we will not allocate it.
- */
- ret = ti_sci->ops.dev_ops.is_valid(ti_sci, idx);
- if (ret)
- return -EINVAL;
-
- sci_dev_data = kzalloc(sizeof(*sci_dev_data), GFP_KERNEL);
- if (!sci_dev_data)
- return -ENOMEM;
+ if (idx >= genpd_data->num_domains) {
+ pr_err("%s: invalid domain index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
- sci_dev_data->idx = idx;
- /* Enable the exclusive permissions by default */
- sci_dev_data->exclusive = TI_SCI_PD_EXCLUSIVE;
- if (pd_args.args_count == 2)
- sci_dev_data->exclusive = pd_args.args[1] & 0x1;
+ if (!genpd_data->domains[idx])
+ return ERR_PTR(-ENOENT);
- genpd_data = dev_gpd_data(dev);
- genpd_data->data = sci_dev_data;
+ genpd_to_ti_sci_pd(genpd_data->domains[idx])->exclusive =
+ genpdspec->args[1];
- return 0;
-}
-
-static void ti_sci_pd_detach_dev(struct generic_pm_domain *domain,
- struct device *dev)
-{
- struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
- struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
-
- kfree(sci_dev_data);
- genpd_data->data = NULL;
+ return genpd_data->domains[idx];
}
static const struct of_device_id ti_sci_pm_domain_matches[] = {
@@ -173,33 +117,80 @@ MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches);
static int ti_sci_pm_domain_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct ti_sci_pm_domain *ti_sci_pd;
+ struct ti_sci_genpd_provider *pd_provider;
+ struct ti_sci_pm_domain *pd;
+ struct device_node *np = NULL;
+ struct of_phandle_args args;
int ret;
+ u32 max_id = 0;
+ int index;
- ti_sci_pd = devm_kzalloc(dev, sizeof(*ti_sci_pd), GFP_KERNEL);
- if (!ti_sci_pd)
+ pd_provider = devm_kzalloc(dev, sizeof(*pd_provider), GFP_KERNEL);
+ if (!pd_provider)
return -ENOMEM;
- ti_sci_pd->ti_sci = devm_ti_sci_get_handle(dev);
- if (IS_ERR(ti_sci_pd->ti_sci))
- return PTR_ERR(ti_sci_pd->ti_sci);
+ pd_provider->ti_sci = devm_ti_sci_get_handle(dev);
+ if (IS_ERR(pd_provider->ti_sci))
+ return PTR_ERR(pd_provider->ti_sci);
+
+ pd_provider->dev = dev;
+
+ INIT_LIST_HEAD(&pd_provider->pd_list);
+
+ /* Find highest device ID used for power domains */
+ while (1) {
+ np = of_find_node_with_property(np, "power-domains");
+ if (!np)
+ break;
+
+ index = 0;
+
+ while (1) {
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells",
+ index, &args);
+ if (ret)
+ break;
+
+ if (args.args_count >= 1 && args.np == dev->of_node) {
+ if (args.args[0] > max_id)
+ max_id = args.args[0];
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->pd.name = devm_kasprintf(dev, GFP_KERNEL,
+ "pd:%d",
+ args.args[0]);
+ if (!pd->pd.name)
+ return -ENOMEM;
- ti_sci_pd->dev = dev;
+ pd->pd.power_off = ti_sci_pd_power_off;
+ pd->pd.power_on = ti_sci_pd_power_on;
+ pd->idx = args.args[0];
+ pd->parent = pd_provider;
- ti_sci_pd->pd.name = "ti_sci_pd";
+ pm_genpd_init(&pd->pd, NULL, true);
- ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev;
- ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev;
+ list_add(&pd->node, &pd_provider->pd_list);
+ }
+ index++;
+ }
+ }
- ti_sci_pd->pd.dev_ops.start = ti_sci_dev_start;
- ti_sci_pd->pd.dev_ops.stop = ti_sci_dev_stop;
+ pd_provider->data.domains =
+ devm_kcalloc(dev, max_id + 1,
+ sizeof(*pd_provider->data.domains),
+ GFP_KERNEL);
- pm_genpd_init(&ti_sci_pd->pd, NULL, true);
+ pd_provider->data.num_domains = max_id + 1;
+ pd_provider->data.xlate = ti_sci_pd_xlate;
- ret = of_genpd_add_provider_simple(np, &ti_sci_pd->pd);
+ list_for_each_entry(pd, &pd_provider->pd_list, node)
+ pd_provider->data.domains[pd->idx] = &pd->pd;
- return ret;
+ return of_genpd_add_provider_onecell(dev->of_node, &pd_provider->data);
}
static struct platform_driver ti_sci_pm_domains_driver = {
diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c
index 7dcf77ccd31e..bab4ad87aa75 100644
--- a/drivers/soc/versatile/soc-integrator.c
+++ b/drivers/soc/versatile/soc-integrator.c
@@ -100,7 +100,7 @@ ATTRIBUTE_GROUPS(integrator);
static int __init integrator_soc_init(void)
{
- static struct regmap *syscon_regmap;
+ struct regmap *syscon_regmap;
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
struct device_node *np;
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 31ff49fcd078..c556623dae02 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -205,7 +205,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
rx_chan = mbox_request_channel_byname(client, "rx");
if (IS_ERR(rx_chan)) {
dev_err(&pdev->dev, "Failed to request rx channel\n");
- return IS_ERR(rx_chan);
+ return PTR_ERR(rx_chan);
}
} else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) {
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index fa2b4ab92ed9..016e74230bb7 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -24,6 +24,7 @@ config SOUNDWIRE_CADENCE
config SOUNDWIRE_INTEL
tristate "Intel SoundWire Master driver"
select SOUNDWIRE_CADENCE
+ select SOUNDWIRE_GENERIC_ALLOCATION
depends on ACPI && SND_SOC
help
SoundWire Intel Master driver.
@@ -33,11 +34,15 @@ config SOUNDWIRE_INTEL
config SOUNDWIRE_QCOM
tristate "Qualcomm SoundWire Master driver"
- depends on SLIMBUS
+ imply SLIMBUS
depends on SND_SOC
help
SoundWire Qualcomm Master driver.
If you have an Qualcomm platform which has a SoundWire Master then
enable this config option to get the SoundWire support for that
device
+
+config SOUNDWIRE_GENERIC_ALLOCATION
+ tristate
+
endif
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index 7c53ffae9f50..bf1e250d50dd 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -8,6 +8,9 @@ soundwire-bus-y := bus_type.o bus.o master.o slave.o mipi_disco.o stream.o \
sysfs_slave.o sysfs_slave_dpn.o
obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
+soundwire-generic-allocation-objs := generic_bandwidth_allocation.o
+obj-$(CONFIG_SOUNDWIRE_GENERIC_ALLOCATION) += soundwire-generic-allocation.o
+
ifdef CONFIG_DEBUG_FS
soundwire-bus-y += debugfs.o
endif
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index da0201693c24..8eaf31e76677 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -61,6 +61,12 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
return -EINVAL;
}
+ if (!bus->compute_params) {
+ dev_err(bus->dev,
+ "Bandwidth allocation not configured, compute_params no set\n");
+ return -EINVAL;
+ }
+
mutex_init(&bus->msg_lock);
mutex_init(&bus->bus_lock);
INIT_LIST_HEAD(&bus->slaves);
@@ -255,6 +261,21 @@ static int sdw_reset_page(struct sdw_bus *bus, u16 dev_num)
return ret;
}
+static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg)
+{
+ int ret;
+
+ ret = do_transfer(bus, msg);
+ if (ret != 0 && ret != -ENODATA)
+ dev_err(bus->dev, "trf on Slave %d failed:%d\n",
+ msg->dev_num, ret);
+
+ if (msg->page)
+ sdw_reset_page(bus, msg->dev_num);
+
+ return ret;
+}
+
/**
* sdw_transfer() - Synchronous transfer message to a SDW Slave device
* @bus: SDW bus
@@ -266,13 +287,7 @@ int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
mutex_lock(&bus->msg_lock);
- ret = do_transfer(bus, msg);
- if (ret != 0 && ret != -ENODATA)
- dev_err(bus->dev, "trf on Slave %d failed:%d\n",
- msg->dev_num, ret);
-
- if (msg->page)
- sdw_reset_page(bus, msg->dev_num);
+ ret = sdw_transfer_unlocked(bus, msg);
mutex_unlock(&bus->msg_lock);
@@ -347,8 +362,8 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
return -EINVAL;
}
- msg->addr_page1 = (addr >> SDW_REG_SHIFT(SDW_SCP_ADDRPAGE1_MASK));
- msg->addr_page2 = (addr >> SDW_REG_SHIFT(SDW_SCP_ADDRPAGE2_MASK));
+ msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr);
+ msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr);
msg->addr |= BIT(15);
msg->page = true;
@@ -428,6 +443,39 @@ sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
return sdw_transfer(bus, &msg);
}
+int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr)
+{
+ struct sdw_msg msg;
+ u8 buf;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
+ SDW_MSG_FLAG_READ, &buf);
+ if (ret)
+ return ret;
+
+ ret = sdw_transfer_unlocked(bus, &msg);
+ if (ret < 0)
+ return ret;
+
+ return buf;
+}
+EXPORT_SYMBOL(sdw_bread_no_pm_unlocked);
+
+int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
+{
+ struct sdw_msg msg;
+ int ret;
+
+ ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
+ SDW_MSG_FLAG_WRITE, &value);
+ if (ret)
+ return ret;
+
+ return sdw_transfer_unlocked(bus, &msg);
+}
+EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
+
static int
sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
{
@@ -699,6 +747,15 @@ static int sdw_program_device_num(struct sdw_bus *bus)
if (!found) {
/* TODO: Park this device in Group 13 */
+
+ /*
+ * add Slave device even if there is no platform
+ * firmware description. There will be no driver probe
+ * but the user/integration will be able to see the
+ * device, enumeration status and device number in sysfs
+ */
+ sdw_slave_add(bus, &id, NULL);
+
dev_err(bus->dev, "Slave Entry not found\n");
}
@@ -1051,6 +1108,12 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave,
int ret;
u8 val = 0;
+ if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) {
+ dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n",
+ enable ? "on" : "off");
+ mask |= SDW_DPN_INT_TEST_FAIL;
+ }
+
addr = SDW_DPN_INTMASK(port);
/* Set/Clear port ready interrupt mask */
@@ -1184,13 +1247,13 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
return ret;
/*
- * Set bus clash, parity and SCP implementation
- * defined interrupt mask
- * TODO: Read implementation defined interrupt mask
- * from Slave property
+ * Set SCP_INT1_MASK register, typically bus clash and
+ * implementation-defined interrupt mask. The Parity detection
+ * may not always be correct on startup so its use is
+ * device-dependent, it might e.g. only be enabled in
+ * steady-state after a couple of frames.
*/
- val = SDW_SCP_INT1_IMPL_DEF | SDW_SCP_INT1_BUS_CLASH |
- SDW_SCP_INT1_PARITY;
+ val = slave->prop.scp_int1_mask;
/* Enable SCP interrupts */
ret = sdw_update(slave, SDW_SCP_INTMASK1, val, val);
@@ -1362,6 +1425,8 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
unsigned long port;
bool slave_notify = false;
u8 buf, buf2[2], _buf, _buf2[2];
+ bool parity_check;
+ bool parity_quirk;
sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
@@ -1394,12 +1459,18 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
* interrupt
*/
if (buf & SDW_SCP_INT1_PARITY) {
- dev_err(&slave->dev, "Parity error detected\n");
+ parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY;
+ parity_quirk = !slave->first_interrupt_done &&
+ (slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY);
+
+ if (parity_check && !parity_quirk)
+ dev_err(&slave->dev, "Parity error detected\n");
clear |= SDW_SCP_INT1_PARITY;
}
if (buf & SDW_SCP_INT1_BUS_CLASH) {
- dev_err(&slave->dev, "Bus clash error detected\n");
+ if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH)
+ dev_err(&slave->dev, "Bus clash detected\n");
clear |= SDW_SCP_INT1_BUS_CLASH;
}
@@ -1411,16 +1482,18 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
*/
if (buf & SDW_SCP_INT1_IMPL_DEF) {
- dev_dbg(&slave->dev, "Slave impl defined interrupt\n");
+ if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) {
+ dev_dbg(&slave->dev, "Slave impl defined interrupt\n");
+ slave_notify = true;
+ }
clear |= SDW_SCP_INT1_IMPL_DEF;
- slave_notify = true;
}
/* Check port 0 - 3 interrupts */
port = buf & SDW_SCP_INT1_PORT0_3;
/* To get port number corresponding to bits, shift it */
- port = port >> SDW_REG_SHIFT(SDW_SCP_INT1_PORT0_3);
+ port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port);
for_each_set_bit(bit, &port, 8) {
sdw_handle_port_interrupt(slave, bit,
&port_status[bit]);
@@ -1468,6 +1541,9 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
goto io_err;
}
+ /* at this point all initial interrupt sources were handled */
+ slave->first_interrupt_done = true;
+
/*
* Read status again to ensure no new interrupts arrived
* while servicing interrupts.
@@ -1670,8 +1746,10 @@ void sdw_clear_slave_status(struct sdw_bus *bus, u32 request)
if (!slave)
continue;
- if (slave->status != SDW_SLAVE_UNATTACHED)
+ if (slave->status != SDW_SLAVE_UNATTACHED) {
sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
+ slave->first_interrupt_done = false;
+ }
/* keep track of request, used in pm_runtime resume */
slave->unattach_request = request;
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index 82484f741168..2e049d39c6e5 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -19,6 +19,8 @@ static inline int sdw_acpi_find_slaves(struct sdw_bus *bus)
int sdw_of_find_slaves(struct sdw_bus *bus);
void sdw_extract_slave_id(struct sdw_bus *bus,
u64 addr, struct sdw_slave_id *id);
+int sdw_slave_add(struct sdw_bus *bus, struct sdw_slave_id *id,
+ struct fwnode_handle *fwnode);
int sdw_master_device_add(struct sdw_bus *bus, struct device *parent,
struct fwnode_handle *fwnode);
int sdw_master_device_del(struct sdw_bus *bus);
@@ -69,6 +71,7 @@ struct sdw_msg {
};
#define SDW_DOUBLE_RATE_FACTOR 2
+#define SDW_STRM_RATE_GROUPING 1
extern int sdw_rows[SDW_FRAME_ROWS];
extern int sdw_cols[SDW_FRAME_COLS];
@@ -154,9 +157,50 @@ int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf);
+/* Retrieve and return channel count from channel mask */
+static inline int sdw_ch_mask_to_ch(int ch_mask)
+{
+ int c = 0;
+
+ for (c = 0; ch_mask; ch_mask >>= 1)
+ c += ch_mask & 1;
+
+ return c;
+}
+
+/* Fill transport parameter data structure */
+static inline void sdw_fill_xport_params(struct sdw_transport_params *params,
+ int port_num, bool grp_ctrl_valid,
+ int grp_ctrl, int sample_int,
+ int off1, int off2,
+ int hstart, int hstop,
+ int pack_mode, int lane_ctrl)
+{
+ params->port_num = port_num;
+ params->blk_grp_ctrl_valid = grp_ctrl_valid;
+ params->blk_grp_ctrl = grp_ctrl;
+ params->sample_interval = sample_int;
+ params->offset1 = off1;
+ params->offset2 = off2;
+ params->hstart = hstart;
+ params->hstop = hstop;
+ params->blk_pkg_mode = pack_mode;
+ params->lane_ctrl = lane_ctrl;
+}
+
+/* Fill port parameter data structure */
+static inline void sdw_fill_port_params(struct sdw_port_params *params,
+ int port_num, int bps,
+ int flow_mode, int data_mode)
+{
+ params->num = port_num;
+ params->bps = bps;
+ params->flow_mode = flow_mode;
+ params->data_mode = data_mode;
+}
+
/* Read-Modify-Write Slave register */
-static inline int
-sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+static inline int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
{
int tmp;
@@ -168,6 +212,10 @@ sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
return sdw_write(slave, addr, tmp);
}
+/* broadcast read/write for tests */
+int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr);
+int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value);
+
/*
* At the moment we only track Master-initiated hw_reset.
* Additional fields can be added as needed
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 6fba55898cf0..575b9bad99d5 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -84,6 +84,15 @@ static int sdw_drv_probe(struct device *dev)
const struct sdw_device_id *id;
int ret;
+ /*
+ * fw description is mandatory to bind
+ */
+ if (!dev->fwnode)
+ return -ENODEV;
+
+ if (!IS_ENABLED(CONFIG_ACPI) && !dev->of_node)
+ return -ENODEV;
+
id = sdw_get_device_id(slave, drv);
if (!id)
return -ENODEV;
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 24eafe0aa1c3..9fa55164354a 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/pm_runtime.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
#include <sound/pcm_params.h>
@@ -50,11 +51,14 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_MCP_CONTROL_BLOCK_WAKEUP BIT(0)
#define CDNS_MCP_CMDCTRL 0x8
+
+#define CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR BIT(2)
+
#define CDNS_MCP_SSPSTAT 0xC
#define CDNS_MCP_FRAME_SHAPE 0x10
#define CDNS_MCP_FRAME_SHAPE_INIT 0x14
#define CDNS_MCP_FRAME_SHAPE_COL_MASK GENMASK(2, 0)
-#define CDNS_MCP_FRAME_SHAPE_ROW_OFFSET 3
+#define CDNS_MCP_FRAME_SHAPE_ROW_MASK GENMASK(7, 3)
#define CDNS_MCP_CONFIG_UPDATE 0x18
#define CDNS_MCP_CONFIG_UPDATE_BIT BIT(0)
@@ -129,8 +133,7 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_MCP_CMD_SSP_TAG BIT(31)
#define CDNS_MCP_CMD_COMMAND GENMASK(30, 28)
#define CDNS_MCP_CMD_DEV_ADDR GENMASK(27, 24)
-#define CDNS_MCP_CMD_REG_ADDR_H GENMASK(23, 16)
-#define CDNS_MCP_CMD_REG_ADDR_L GENMASK(15, 8)
+#define CDNS_MCP_CMD_REG_ADDR GENMASK(23, 8)
#define CDNS_MCP_CMD_REG_DATA GENMASK(7, 0)
#define CDNS_MCP_CMD_READ 2
@@ -172,6 +175,7 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_DPN_HCTRL_LCTRL GENMASK(10, 8)
#define CDNS_PORTCTRL 0x130
+#define CDNS_PORTCTRL_TEST_FAILED BIT(1)
#define CDNS_PORTCTRL_DIRN BIT(7)
#define CDNS_PORTCTRL_BANK_INVERT BIT(8)
@@ -367,6 +371,85 @@ static int cdns_hw_reset(void *data, u64 value)
DEFINE_DEBUGFS_ATTRIBUTE(cdns_hw_reset_fops, NULL, cdns_hw_reset, "%llu\n");
+static int cdns_parity_error_injection(void *data, u64 value)
+{
+ struct sdw_cdns *cdns = data;
+ struct sdw_bus *bus;
+ int ret;
+
+ if (value != 1)
+ return -EINVAL;
+
+ bus = &cdns->bus;
+
+ /*
+ * Resume Master device. If this results in a bus reset, the
+ * Slave devices will re-attach and be re-enumerated.
+ */
+ ret = pm_runtime_get_sync(bus->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err_ratelimited(cdns->dev,
+ "pm_runtime_get_sync failed in %s, ret %d\n",
+ __func__, ret);
+ pm_runtime_put_noidle(bus->dev);
+ return ret;
+ }
+
+ /*
+ * wait long enough for Slave(s) to be in steady state. This
+ * does not need to be super precise.
+ */
+ msleep(200);
+
+ /*
+ * Take the bus lock here to make sure that any bus transactions
+ * will be queued while we inject a parity error on a dummy read
+ */
+ mutex_lock(&bus->bus_lock);
+
+ /* program hardware to inject parity error */
+ cdns_updatel(cdns, CDNS_MCP_CMDCTRL,
+ CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR,
+ CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR);
+
+ /* commit changes */
+ cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE,
+ CDNS_MCP_CONFIG_UPDATE_BIT,
+ CDNS_MCP_CONFIG_UPDATE_BIT);
+
+ /* do a broadcast dummy read to avoid bus clashes */
+ ret = sdw_bread_no_pm_unlocked(&cdns->bus, 0xf, SDW_SCP_DEVID_0);
+ dev_info(cdns->dev, "parity error injection, read: %d\n", ret);
+
+ /* program hardware to disable parity error */
+ cdns_updatel(cdns, CDNS_MCP_CMDCTRL,
+ CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR,
+ 0);
+
+ /* commit changes */
+ cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE,
+ CDNS_MCP_CONFIG_UPDATE_BIT,
+ CDNS_MCP_CONFIG_UPDATE_BIT);
+
+ /* Continue bus operation with parity error injection disabled */
+ mutex_unlock(&bus->bus_lock);
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ /*
+ * allow Master device to enter pm_runtime suspend. This may
+ * also result in Slave devices suspending.
+ */
+ pm_runtime_mark_last_busy(bus->dev);
+ pm_runtime_put_autosuspend(bus->dev);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cdns_parity_error_fops, NULL,
+ cdns_parity_error_injection, "%llu\n");
+
/**
* sdw_cdns_debugfs_init() - Cadence debugfs init
* @cdns: Cadence instance
@@ -378,6 +461,9 @@ void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root)
debugfs_create_file("cdns-hw-reset", 0200, root, cdns,
&cdns_hw_reset_fops);
+
+ debugfs_create_file("cdns-parity-error-injection", 0200, root, cdns,
+ &cdns_parity_error_fops);
}
EXPORT_SYMBOL_GPL(sdw_cdns_debugfs_init);
@@ -417,8 +503,7 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
/* fill response */
for (i = 0; i < count; i++)
- msg->buf[i + offset] = cdns->response_buf[i] >>
- SDW_REG_SHIFT(CDNS_MCP_RESP_RDATA);
+ msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA, cdns->response_buf[i]);
return SDW_CMD_OK;
}
@@ -441,14 +526,15 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
addr = msg->addr;
for (i = 0; i < count; i++) {
- data = msg->dev_num << SDW_REG_SHIFT(CDNS_MCP_CMD_DEV_ADDR);
- data |= cmd << SDW_REG_SHIFT(CDNS_MCP_CMD_COMMAND);
- data |= addr++ << SDW_REG_SHIFT(CDNS_MCP_CMD_REG_ADDR_L);
+ data = FIELD_PREP(CDNS_MCP_CMD_DEV_ADDR, msg->dev_num);
+ data |= FIELD_PREP(CDNS_MCP_CMD_COMMAND, cmd);
+ data |= FIELD_PREP(CDNS_MCP_CMD_REG_ADDR, addr);
+ addr++;
if (msg->flags == SDW_MSG_FLAG_WRITE)
data |= msg->buf[i + offset];
- data |= msg->ssp_sync << SDW_REG_SHIFT(CDNS_MCP_CMD_SSP_TAG);
+ data |= FIELD_PREP(CDNS_MCP_CMD_SSP_TAG, msg->ssp_sync);
cdns_writel(cdns, base, data);
base += CDNS_MCP_CMD_WORD_LEN;
}
@@ -483,12 +569,12 @@ cdns_program_scp_addr(struct sdw_cdns *cdns, struct sdw_msg *msg)
cdns->msg_count = CDNS_SCP_RX_FIFOLEVEL;
}
- data[0] = msg->dev_num << SDW_REG_SHIFT(CDNS_MCP_CMD_DEV_ADDR);
- data[0] |= 0x3 << SDW_REG_SHIFT(CDNS_MCP_CMD_COMMAND);
+ data[0] = FIELD_PREP(CDNS_MCP_CMD_DEV_ADDR, msg->dev_num);
+ data[0] |= FIELD_PREP(CDNS_MCP_CMD_COMMAND, 0x3);
data[1] = data[0];
- data[0] |= SDW_SCP_ADDRPAGE1 << SDW_REG_SHIFT(CDNS_MCP_CMD_REG_ADDR_L);
- data[1] |= SDW_SCP_ADDRPAGE2 << SDW_REG_SHIFT(CDNS_MCP_CMD_REG_ADDR_L);
+ data[0] |= FIELD_PREP(CDNS_MCP_CMD_REG_ADDR, SDW_SCP_ADDRPAGE1);
+ data[1] |= FIELD_PREP(CDNS_MCP_CMD_REG_ADDR, SDW_SCP_ADDRPAGE2);
data[0] |= msg->addr_page1;
data[1] |= msg->addr_page2;
@@ -785,13 +871,35 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
dev_err_ratelimited(cdns->dev, "Bus clash for data word\n");
}
+ if (cdns->bus.params.m_data_mode != SDW_PORT_DATA_MODE_NORMAL &&
+ int_status & CDNS_MCP_INT_DPINT) {
+ u32 port_intstat;
+
+ /* just log which ports report an error */
+ port_intstat = cdns_readl(cdns, CDNS_MCP_PORT_INTSTAT);
+ dev_err_ratelimited(cdns->dev, "DP interrupt: PortIntStat %8x\n",
+ port_intstat);
+
+ /* clear status w/ write1 */
+ cdns_writel(cdns, CDNS_MCP_PORT_INTSTAT, port_intstat);
+ }
+
if (int_status & CDNS_MCP_INT_SLAVE_MASK) {
/* Mask the Slave interrupt and wake thread */
cdns_updatel(cdns, CDNS_MCP_INTMASK,
CDNS_MCP_INT_SLAVE_MASK, 0);
int_status &= ~CDNS_MCP_INT_SLAVE_MASK;
- schedule_work(&cdns->work);
+
+ /*
+ * Deal with possible race condition between interrupt
+ * handling and disabling interrupts on suspend.
+ *
+ * If the master is in the process of disabling
+ * interrupts, don't schedule a workqueue
+ */
+ if (cdns->interrupt_enabled)
+ schedule_work(&cdns->work);
}
cdns_writel(cdns, CDNS_MCP_INTSTAT, int_status);
@@ -900,7 +1008,9 @@ int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
mask |= CDNS_MCP_INT_CTRL_CLASH | CDNS_MCP_INT_DATA_CLASH |
CDNS_MCP_INT_PARITY;
- /* no detection of port interrupts for now */
+ /* port interrupt limited to test modes for now */
+ if (cdns->bus.params.m_data_mode != SDW_PORT_DATA_MODE_NORMAL)
+ mask |= CDNS_MCP_INT_DPINT;
/* enable detection of RX fifo level */
mask |= CDNS_MCP_INT_RX_WL;
@@ -924,6 +1034,19 @@ update_masks:
slave_state = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave_state);
}
+ cdns->interrupt_enabled = state;
+
+ /*
+ * Complete any on-going status updates before updating masks,
+ * and cancel queued status updates.
+ *
+ * There could be a race with a new interrupt thrown before
+ * the 3 mask updates below are complete, so in the interrupt
+ * we use the 'interrupt_enabled' status to prevent new work
+ * from being queued.
+ */
+ if (!state)
+ cancel_work_sync(&cdns->work);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0);
cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1);
@@ -1041,9 +1164,10 @@ static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols)
int r;
r = sdw_find_row_index(n_rows);
- c = sdw_find_col_index(n_cols) & CDNS_MCP_FRAME_SHAPE_COL_MASK;
+ c = sdw_find_col_index(n_cols);
- val = (r << CDNS_MCP_FRAME_SHAPE_ROW_OFFSET) | c;
+ val = FIELD_PREP(CDNS_MCP_FRAME_SHAPE_ROW_MASK, r);
+ val |= FIELD_PREP(CDNS_MCP_FRAME_SHAPE_COL_MASK, c);
return val;
}
@@ -1170,12 +1294,9 @@ static int cdns_port_params(struct sdw_bus *bus,
dpn_config = cdns_readl(cdns, dpn_config_off);
- dpn_config |= ((p_params->bps - 1) <<
- SDW_REG_SHIFT(CDNS_DPN_CONFIG_WL));
- dpn_config |= (p_params->flow_mode <<
- SDW_REG_SHIFT(CDNS_DPN_CONFIG_PORT_FLOW));
- dpn_config |= (p_params->data_mode <<
- SDW_REG_SHIFT(CDNS_DPN_CONFIG_PORT_DAT));
+ u32p_replace_bits(&dpn_config, (p_params->bps - 1), CDNS_DPN_CONFIG_WL);
+ u32p_replace_bits(&dpn_config, p_params->flow_mode, CDNS_DPN_CONFIG_PORT_FLOW);
+ u32p_replace_bits(&dpn_config, p_params->data_mode, CDNS_DPN_CONFIG_PORT_DAT);
cdns_writel(cdns, dpn_config_off, dpn_config);
@@ -1211,24 +1332,17 @@ static int cdns_transport_params(struct sdw_bus *bus,
}
dpn_config = cdns_readl(cdns, dpn_config_off);
-
- dpn_config |= (t_params->blk_grp_ctrl <<
- SDW_REG_SHIFT(CDNS_DPN_CONFIG_BGC));
- dpn_config |= (t_params->blk_pkg_mode <<
- SDW_REG_SHIFT(CDNS_DPN_CONFIG_BPM));
+ u32p_replace_bits(&dpn_config, t_params->blk_grp_ctrl, CDNS_DPN_CONFIG_BGC);
+ u32p_replace_bits(&dpn_config, t_params->blk_pkg_mode, CDNS_DPN_CONFIG_BPM);
cdns_writel(cdns, dpn_config_off, dpn_config);
- dpn_offsetctrl |= (t_params->offset1 <<
- SDW_REG_SHIFT(CDNS_DPN_OFFSET_CTRL_1));
- dpn_offsetctrl |= (t_params->offset2 <<
- SDW_REG_SHIFT(CDNS_DPN_OFFSET_CTRL_2));
+ u32p_replace_bits(&dpn_offsetctrl, t_params->offset1, CDNS_DPN_OFFSET_CTRL_1);
+ u32p_replace_bits(&dpn_offsetctrl, t_params->offset2, CDNS_DPN_OFFSET_CTRL_2);
cdns_writel(cdns, dpn_offsetctrl_off, dpn_offsetctrl);
- dpn_hctrl |= (t_params->hstart <<
- SDW_REG_SHIFT(CDNS_DPN_HCTRL_HSTART));
- dpn_hctrl |= (t_params->hstop << SDW_REG_SHIFT(CDNS_DPN_HCTRL_HSTOP));
- dpn_hctrl |= (t_params->lane_ctrl <<
- SDW_REG_SHIFT(CDNS_DPN_HCTRL_LCTRL));
+ u32p_replace_bits(&dpn_hctrl, t_params->hstart, CDNS_DPN_HCTRL_HSTART);
+ u32p_replace_bits(&dpn_hctrl, t_params->hstop, CDNS_DPN_HCTRL_HSTOP);
+ u32p_replace_bits(&dpn_hctrl, t_params->lane_ctrl, CDNS_DPN_HCTRL_LCTRL);
cdns_writel(cdns, dpn_hctrl_off, dpn_hctrl);
cdns_writel(cdns, dpn_samplectrl_off, (t_params->sample_interval - 1));
@@ -1526,15 +1640,20 @@ void sdw_cdns_config_stream(struct sdw_cdns *cdns,
{
u32 offset, val = 0;
- if (dir == SDW_DATA_DIR_RX)
+ if (dir == SDW_DATA_DIR_RX) {
val = CDNS_PORTCTRL_DIRN;
+ if (cdns->bus.params.m_data_mode != SDW_PORT_DATA_MODE_NORMAL)
+ val |= CDNS_PORTCTRL_TEST_FAILED;
+ }
offset = CDNS_PORTCTRL + pdi->num * CDNS_PORT_OFFSET;
- cdns_updatel(cdns, offset, CDNS_PORTCTRL_DIRN, val);
+ cdns_updatel(cdns, offset,
+ CDNS_PORTCTRL_DIRN | CDNS_PORTCTRL_TEST_FAILED,
+ val);
val = pdi->num;
val |= CDNS_PDI_CONFIG_SOFT_RESET;
- val |= ((1 << ch) - 1) << SDW_REG_SHIFT(CDNS_PDI_CONFIG_CHANNEL);
+ val |= FIELD_PREP(CDNS_PDI_CONFIG_CHANNEL, (1 << ch) - 1);
cdns_writel(cdns, CDNS_PDI_CONFIG(pdi->num), val);
}
EXPORT_SYMBOL(sdw_cdns_config_stream);
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 7638858397df..4d1aab5b5ec2 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -84,6 +84,8 @@ struct sdw_cdns_stream_config {
* @bus: Bus handle
* @stream_type: Stream type
* @link_id: Master link id
+ * @hw_params: hw_params to be applied in .prepare step
+ * @suspended: status set when suspended, to be used in .prepare
*/
struct sdw_cdns_dma_data {
char *name;
@@ -92,6 +94,8 @@ struct sdw_cdns_dma_data {
struct sdw_bus *bus;
enum sdw_stream_type stream_type;
int link_id;
+ struct snd_pcm_hw_params *hw_params;
+ bool suspended;
};
/**
@@ -129,6 +133,7 @@ struct sdw_cdns {
bool link_up;
unsigned int msg_count;
+ bool interrupt_enabled;
struct work_struct work;
diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c
new file mode 100644
index 000000000000..0bdef38c9a30
--- /dev/null
+++ b/drivers/soundwire/generic_bandwidth_allocation.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+// Copyright(c) 2015-2020 Intel Corporation.
+
+/*
+ * Bandwidth management algorithm based on 2^n gears
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/soundwire/sdw.h>
+#include "bus.h"
+
+#define SDW_STRM_RATE_GROUPING 1
+
+struct sdw_group_params {
+ unsigned int rate;
+ int full_bw;
+ int payload_bw;
+ int hwidth;
+};
+
+struct sdw_group {
+ unsigned int count;
+ unsigned int max_size;
+ unsigned int *rates;
+};
+
+struct sdw_transport_data {
+ int hstart;
+ int hstop;
+ int block_offset;
+ int sub_block_offset;
+};
+
+static void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
+ struct sdw_transport_data *t_data)
+{
+ struct sdw_slave_runtime *s_rt = NULL;
+ struct sdw_port_runtime *p_rt;
+ int port_bo, sample_int;
+ unsigned int rate, bps, ch = 0;
+ unsigned int slave_total_ch;
+ struct sdw_bus_params *b_params = &m_rt->bus->params;
+
+ port_bo = t_data->block_offset;
+
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ rate = m_rt->stream->params.rate;
+ bps = m_rt->stream->params.bps;
+ sample_int = (m_rt->bus->params.curr_dr_freq / rate);
+ slave_total_ch = 0;
+
+ list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
+ ch = sdw_ch_mask_to_ch(p_rt->ch_mask);
+
+ sdw_fill_xport_params(&p_rt->transport_params,
+ p_rt->num, false,
+ SDW_BLK_GRP_CNT_1,
+ sample_int, port_bo, port_bo >> 8,
+ t_data->hstart,
+ t_data->hstop,
+ (SDW_BLK_GRP_CNT_1 * ch), 0x0);
+
+ sdw_fill_port_params(&p_rt->port_params,
+ p_rt->num, bps,
+ SDW_PORT_FLOW_MODE_ISOCH,
+ b_params->s_data_mode);
+
+ port_bo += bps * ch;
+ slave_total_ch += ch;
+ }
+
+ if (m_rt->direction == SDW_DATA_DIR_TX &&
+ m_rt->ch_count == slave_total_ch) {
+ /*
+ * Slave devices were configured to access all channels
+ * of the stream, which indicates that they operate in
+ * 'mirror mode'. Make sure we reset the port offset for
+ * the next device in the list
+ */
+ port_bo = t_data->block_offset;
+ }
+ }
+}
+
+static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
+ struct sdw_group_params *params,
+ int port_bo, int hstop)
+{
+ struct sdw_transport_data t_data = {0};
+ struct sdw_port_runtime *p_rt;
+ struct sdw_bus *bus = m_rt->bus;
+ struct sdw_bus_params *b_params = &bus->params;
+ int sample_int, hstart = 0;
+ unsigned int rate, bps, ch, no_ch;
+
+ rate = m_rt->stream->params.rate;
+ bps = m_rt->stream->params.bps;
+ ch = m_rt->ch_count;
+ sample_int = (bus->params.curr_dr_freq / rate);
+
+ if (rate != params->rate)
+ return;
+
+ t_data.hstop = hstop;
+ hstart = hstop - params->hwidth + 1;
+ t_data.hstart = hstart;
+
+ list_for_each_entry(p_rt, &m_rt->port_list, port_node) {
+ no_ch = sdw_ch_mask_to_ch(p_rt->ch_mask);
+
+ sdw_fill_xport_params(&p_rt->transport_params, p_rt->num,
+ false, SDW_BLK_GRP_CNT_1, sample_int,
+ port_bo, port_bo >> 8, hstart, hstop,
+ (SDW_BLK_GRP_CNT_1 * no_ch), 0x0);
+
+ sdw_fill_port_params(&p_rt->port_params,
+ p_rt->num, bps,
+ SDW_PORT_FLOW_MODE_ISOCH,
+ b_params->m_data_mode);
+
+ /* Check for first entry */
+ if (!(p_rt == list_first_entry(&m_rt->port_list,
+ struct sdw_port_runtime,
+ port_node))) {
+ port_bo += bps * ch;
+ continue;
+ }
+
+ t_data.hstart = hstart;
+ t_data.hstop = hstop;
+ t_data.block_offset = port_bo;
+ t_data.sub_block_offset = 0;
+ port_bo += bps * ch;
+ }
+
+ sdw_compute_slave_ports(m_rt, &t_data);
+}
+
+static void _sdw_compute_port_params(struct sdw_bus *bus,
+ struct sdw_group_params *params, int count)
+{
+ struct sdw_master_runtime *m_rt = NULL;
+ int hstop = bus->params.col - 1;
+ int block_offset, port_bo, i;
+
+ /* Run loop for all groups to compute transport parameters */
+ for (i = 0; i < count; i++) {
+ port_bo = 1;
+ block_offset = 1;
+
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ sdw_compute_master_ports(m_rt, &params[i],
+ port_bo, hstop);
+
+ block_offset += m_rt->ch_count *
+ m_rt->stream->params.bps;
+ port_bo = block_offset;
+ }
+
+ hstop = hstop - params[i].hwidth;
+ }
+}
+
+static int sdw_compute_group_params(struct sdw_bus *bus,
+ struct sdw_group_params *params,
+ int *rates, int count)
+{
+ struct sdw_master_runtime *m_rt = NULL;
+ int sel_col = bus->params.col;
+ unsigned int rate, bps, ch;
+ int i, column_needed = 0;
+
+ /* Calculate bandwidth per group */
+ for (i = 0; i < count; i++) {
+ params[i].rate = rates[i];
+ params[i].full_bw = bus->params.curr_dr_freq / params[i].rate;
+ }
+
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ rate = m_rt->stream->params.rate;
+ bps = m_rt->stream->params.bps;
+ ch = m_rt->ch_count;
+
+ for (i = 0; i < count; i++) {
+ if (rate == params[i].rate)
+ params[i].payload_bw += bps * ch;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ params[i].hwidth = (sel_col *
+ params[i].payload_bw + params[i].full_bw - 1) /
+ params[i].full_bw;
+
+ column_needed += params[i].hwidth;
+ }
+
+ if (column_needed > sel_col - 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sdw_add_element_group_count(struct sdw_group *group,
+ unsigned int rate)
+{
+ int num = group->count;
+ int i;
+
+ for (i = 0; i <= num; i++) {
+ if (rate == group->rates[i])
+ break;
+
+ if (i != num)
+ continue;
+
+ if (group->count >= group->max_size) {
+ unsigned int *rates;
+
+ group->max_size += 1;
+ rates = krealloc(group->rates,
+ (sizeof(int) * group->max_size),
+ GFP_KERNEL);
+ if (!rates)
+ return -ENOMEM;
+ group->rates = rates;
+ }
+
+ group->rates[group->count++] = rate;
+ }
+
+ return 0;
+}
+
+static int sdw_get_group_count(struct sdw_bus *bus,
+ struct sdw_group *group)
+{
+ struct sdw_master_runtime *m_rt;
+ unsigned int rate;
+ int ret = 0;
+
+ group->count = 0;
+ group->max_size = SDW_STRM_RATE_GROUPING;
+ group->rates = kcalloc(group->max_size, sizeof(int), GFP_KERNEL);
+ if (!group->rates)
+ return -ENOMEM;
+
+ list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) {
+ rate = m_rt->stream->params.rate;
+ if (m_rt == list_first_entry(&bus->m_rt_list,
+ struct sdw_master_runtime,
+ bus_node)) {
+ group->rates[group->count++] = rate;
+
+ } else {
+ ret = sdw_add_element_group_count(group, rate);
+ if (ret < 0) {
+ kfree(group->rates);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * sdw_compute_port_params: Compute transport and port parameters
+ *
+ * @bus: SDW Bus instance
+ */
+static int sdw_compute_port_params(struct sdw_bus *bus)
+{
+ struct sdw_group_params *params = NULL;
+ struct sdw_group group;
+ int ret;
+
+ ret = sdw_get_group_count(bus, &group);
+ if (ret < 0)
+ return ret;
+
+ if (group.count == 0)
+ goto out;
+
+ params = kcalloc(group.count, sizeof(*params), GFP_KERNEL);
+ if (!params) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Compute transport parameters for grouped streams */
+ ret = sdw_compute_group_params(bus, params,
+ &group.rates[0], group.count);
+ if (ret < 0)
+ goto free_params;
+
+ _sdw_compute_port_params(bus, params, group.count);
+
+free_params:
+ kfree(params);
+out:
+ kfree(group.rates);
+
+ return ret;
+}
+
+static int sdw_select_row_col(struct sdw_bus *bus, int clk_freq)
+{
+ struct sdw_master_prop *prop = &bus->prop;
+ int frame_int, frame_freq;
+ int r, c;
+
+ for (c = 0; c < SDW_FRAME_COLS; c++) {
+ for (r = 0; r < SDW_FRAME_ROWS; r++) {
+ if (sdw_rows[r] != prop->default_row ||
+ sdw_cols[c] != prop->default_col)
+ continue;
+
+ frame_int = sdw_rows[r] * sdw_cols[c];
+ frame_freq = clk_freq / frame_int;
+
+ if ((clk_freq - (frame_freq * SDW_FRAME_CTRL_BITS)) <
+ bus->params.bandwidth)
+ continue;
+
+ bus->params.row = sdw_rows[r];
+ bus->params.col = sdw_cols[c];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * sdw_compute_bus_params: Compute bus parameters
+ *
+ * @bus: SDW Bus instance
+ */
+static int sdw_compute_bus_params(struct sdw_bus *bus)
+{
+ unsigned int max_dr_freq, curr_dr_freq = 0;
+ struct sdw_master_prop *mstr_prop = &bus->prop;
+ int i, clk_values, ret;
+ bool is_gear = false;
+ u32 *clk_buf;
+
+ if (mstr_prop->num_clk_gears) {
+ clk_values = mstr_prop->num_clk_gears;
+ clk_buf = mstr_prop->clk_gears;
+ is_gear = true;
+ } else if (mstr_prop->num_clk_freq) {
+ clk_values = mstr_prop->num_clk_freq;
+ clk_buf = mstr_prop->clk_freq;
+ } else {
+ clk_values = 1;
+ clk_buf = NULL;
+ }
+
+ max_dr_freq = mstr_prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR;
+
+ for (i = 0; i < clk_values; i++) {
+ if (!clk_buf)
+ curr_dr_freq = max_dr_freq;
+ else
+ curr_dr_freq = (is_gear) ?
+ (max_dr_freq >> clk_buf[i]) :
+ clk_buf[i] * SDW_DOUBLE_RATE_FACTOR;
+
+ if (curr_dr_freq <= bus->params.bandwidth)
+ continue;
+
+ break;
+
+ /*
+ * TODO: Check all the Slave(s) port(s) audio modes and find
+ * whether given clock rate is supported with glitchless
+ * transition.
+ */
+ }
+
+ if (i == clk_values)
+ return -EINVAL;
+
+ ret = sdw_select_row_col(bus, curr_dr_freq);
+ if (ret < 0)
+ return -EINVAL;
+
+ bus->params.curr_dr_freq = curr_dr_freq;
+ return 0;
+}
+
+/**
+ * sdw_compute_params: Compute bus, transport and port parameters
+ *
+ * @bus: SDW Bus instance
+ */
+int sdw_compute_params(struct sdw_bus *bus)
+{
+ int ret;
+
+ /* Computes clock frequency, frame shape and frame frequency */
+ ret = sdw_compute_bus_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Compute bus params failed: %d", ret);
+ return ret;
+ }
+
+ /* Compute transport and port params */
+ ret = sdw_compute_port_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Compute transport params failed: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sdw_compute_params);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("SoundWire Generic Bandwidth Allocation");
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index a283670659a9..6a1e862b16c3 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -22,6 +22,24 @@
#include "bus.h"
#include "intel.h"
+#define INTEL_MASTER_SUSPEND_DELAY_MS 3000
+
+/*
+ * debug/config flags for the Intel SoundWire Master.
+ *
+ * Since we may have multiple masters active, we can have up to 8
+ * flags reused in each byte, with master0 using the ls-byte, etc.
+ */
+
+#define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME BIT(0)
+#define SDW_INTEL_MASTER_DISABLE_CLOCK_STOP BIT(1)
+#define SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE BIT(2)
+#define SDW_INTEL_MASTER_DISABLE_MULTI_LINK BIT(3)
+
+static int md_flags;
+module_param_named(sdw_md_flags, md_flags, int, 0444);
+MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)");
+
/* Intel SHIM Registers Definition */
#define SDW_SHIM_LCAP 0x0
#define SDW_SHIM_LCTL 0x4
@@ -45,7 +63,9 @@
#define SDW_SHIM_WAKESTS 0x192
#define SDW_SHIM_LCTL_SPA BIT(0)
+#define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0)
#define SDW_SHIM_LCTL_CPA BIT(8)
+#define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8)
#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1)
#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1)
@@ -242,6 +262,42 @@ static int intel_reg_show(struct seq_file *s_file, void *data)
}
DEFINE_SHOW_ATTRIBUTE(intel_reg);
+static int intel_set_m_datamode(void *data, u64 value)
+{
+ struct sdw_intel *sdw = data;
+ struct sdw_bus *bus = &sdw->cdns.bus;
+
+ if (value > SDW_PORT_DATA_MODE_STATIC_1)
+ return -EINVAL;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ bus->params.m_data_mode = value;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(intel_set_m_datamode_fops, NULL,
+ intel_set_m_datamode, "%llu\n");
+
+static int intel_set_s_datamode(void *data, u64 value)
+{
+ struct sdw_intel *sdw = data;
+ struct sdw_bus *bus = &sdw->cdns.bus;
+
+ if (value > SDW_PORT_DATA_MODE_STATIC_1)
+ return -EINVAL;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ bus->params.s_data_mode = value;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(intel_set_s_datamode_fops, NULL,
+ intel_set_s_datamode, "%llu\n");
+
static void intel_debugfs_init(struct sdw_intel *sdw)
{
struct dentry *root = sdw->cdns.bus.debugfs;
@@ -254,6 +310,12 @@ static void intel_debugfs_init(struct sdw_intel *sdw)
debugfs_create_file("intel-registers", 0400, sdw->debugfs, sdw,
&intel_reg_fops);
+ debugfs_create_file("intel-m-datamode", 0200, sdw->debugfs, sdw,
+ &intel_set_m_datamode_fops);
+
+ debugfs_create_file("intel-s-datamode", 0200, sdw->debugfs, sdw,
+ &intel_set_s_datamode_fops);
+
sdw_cdns_debugfs_init(&sdw->cdns, sdw->debugfs);
}
@@ -277,8 +339,8 @@ static int intel_link_power_up(struct sdw_intel *sdw)
u32 *shim_mask = sdw->link_res->shim_mask;
struct sdw_bus *bus = &sdw->cdns.bus;
struct sdw_master_prop *prop = &bus->prop;
- int spa_mask, cpa_mask;
- int link_control;
+ u32 spa_mask, cpa_mask;
+ u32 link_control;
int ret = 0;
u32 syncprd;
u32 sync_reg;
@@ -301,33 +363,35 @@ static int intel_link_power_up(struct sdw_intel *sdw)
syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
if (!*shim_mask) {
+ dev_dbg(sdw->cdns.dev, "%s: powering up all links\n", __func__);
+
/* we first need to program the SyncPRD/CPU registers */
dev_dbg(sdw->cdns.dev,
"%s: first link up, programming SYNCPRD\n", __func__);
/* set SyncPRD period */
sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
- sync_reg |= (syncprd <<
- SDW_REG_SHIFT(SDW_SHIM_SYNC_SYNCPRD));
+ u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
/* Set SyncCPU bit */
sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
- }
- /* Link power up sequence */
- link_control = intel_readl(shim, SDW_SHIM_LCTL);
- spa_mask = (SDW_SHIM_LCTL_SPA << link_id);
- cpa_mask = (SDW_SHIM_LCTL_CPA << link_id);
- link_control |= spa_mask;
+ /* Link power up sequence */
+ link_control = intel_readl(shim, SDW_SHIM_LCTL);
- ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
- if (ret < 0) {
- dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
- goto out;
- }
+ /* only power-up enabled links */
+ spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
+ cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
+
+ link_control |= spa_mask;
+
+ ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
+ goto out;
+ }
- if (!*shim_mask) {
/* SyncCPU will change once link is active */
ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
SDW_SHIM_SYNC_SYNCCPU, 0);
@@ -426,7 +490,7 @@ static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
intel_shim_glue_to_master_ip(sdw);
- act |= 0x1 << SDW_REG_SHIFT(SDW_SHIM_CTMCTL_DOAIS);
+ u16p_replace_bits(&act, 0x1, SDW_SHIM_CTMCTL_DOAIS);
act |= SDW_SHIM_CTMCTL_DACTQE;
act |= SDW_SHIM_CTMCTL_DODS;
intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
@@ -463,9 +527,9 @@ static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
mutex_unlock(sdw->link_res->shim_lock);
}
-static int __maybe_unused intel_link_power_down(struct sdw_intel *sdw)
+static int intel_link_power_down(struct sdw_intel *sdw)
{
- int link_control, spa_mask, cpa_mask;
+ u32 link_control, spa_mask, cpa_mask;
unsigned int link_id = sdw->instance;
void __iomem *shim = sdw->link_res->shim;
u32 *shim_mask = sdw->link_res->shim_mask;
@@ -475,24 +539,37 @@ static int __maybe_unused intel_link_power_down(struct sdw_intel *sdw)
intel_shim_master_ip_to_glue(sdw);
- /* Link power down sequence */
- link_control = intel_readl(shim, SDW_SHIM_LCTL);
- spa_mask = ~(SDW_SHIM_LCTL_SPA << link_id);
- cpa_mask = (SDW_SHIM_LCTL_CPA << link_id);
- link_control &= spa_mask;
-
- ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
-
if (!(*shim_mask & BIT(link_id)))
dev_err(sdw->cdns.dev,
"%s: Unbalanced power-up/down calls\n", __func__);
*shim_mask &= ~BIT(link_id);
+ if (!*shim_mask) {
+
+ dev_dbg(sdw->cdns.dev, "%s: powering down all links\n", __func__);
+
+ /* Link power down sequence */
+ link_control = intel_readl(shim, SDW_SHIM_LCTL);
+
+ /* only power-down enabled links */
+ spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, ~sdw->link_res->link_mask);
+ cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
+
+ link_control &= spa_mask;
+
+ ret = intel_clear_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
+ }
+
+ link_control = intel_readl(shim, SDW_SHIM_LCTL);
+
mutex_unlock(sdw->link_res->shim_lock);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "%s: could not power down link\n", __func__);
+
return ret;
+ }
sdw->cdns.link_up = false;
return 0;
@@ -538,6 +615,19 @@ static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
return ret;
}
+static int intel_shim_sync_go(struct sdw_intel *sdw)
+{
+ int ret;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ ret = intel_shim_sync_go_unlocked(sdw);
+
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ return ret;
+}
+
/*
* PDI routines
*/
@@ -551,12 +641,9 @@ static void intel_pdi_init(struct sdw_intel *sdw,
/* PCM Stream Capability */
pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
- config->pcm_bd = (pcm_cap & SDW_SHIM_PCMSCAP_BSS) >>
- SDW_REG_SHIFT(SDW_SHIM_PCMSCAP_BSS);
- config->pcm_in = (pcm_cap & SDW_SHIM_PCMSCAP_ISS) >>
- SDW_REG_SHIFT(SDW_SHIM_PCMSCAP_ISS);
- config->pcm_out = (pcm_cap & SDW_SHIM_PCMSCAP_OSS) >>
- SDW_REG_SHIFT(SDW_SHIM_PCMSCAP_OSS);
+ config->pcm_bd = FIELD_GET(SDW_SHIM_PCMSCAP_BSS, pcm_cap);
+ config->pcm_in = FIELD_GET(SDW_SHIM_PCMSCAP_ISS, pcm_cap);
+ config->pcm_out = FIELD_GET(SDW_SHIM_PCMSCAP_OSS, pcm_cap);
dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
config->pcm_bd, config->pcm_in, config->pcm_out);
@@ -564,12 +651,9 @@ static void intel_pdi_init(struct sdw_intel *sdw,
/* PDM Stream Capability */
pdm_cap = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
- config->pdm_bd = (pdm_cap & SDW_SHIM_PDMSCAP_BSS) >>
- SDW_REG_SHIFT(SDW_SHIM_PDMSCAP_BSS);
- config->pdm_in = (pdm_cap & SDW_SHIM_PDMSCAP_ISS) >>
- SDW_REG_SHIFT(SDW_SHIM_PDMSCAP_ISS);
- config->pdm_out = (pdm_cap & SDW_SHIM_PDMSCAP_OSS) >>
- SDW_REG_SHIFT(SDW_SHIM_PDMSCAP_OSS);
+ config->pdm_bd = FIELD_GET(SDW_SHIM_PDMSCAP_BSS, pdm_cap);
+ config->pdm_in = FIELD_GET(SDW_SHIM_PDMSCAP_ISS, pdm_cap);
+ config->pdm_out = FIELD_GET(SDW_SHIM_PDMSCAP_OSS, pdm_cap);
dev_dbg(sdw->cdns.dev, "PDM cap bd:%d in:%d out:%d\n",
config->pdm_bd, config->pdm_in, config->pdm_out);
@@ -596,8 +680,7 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
} else {
count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
- count = ((count & SDW_SHIM_PDMSCAP_CPSS) >>
- SDW_REG_SHIFT(SDW_SHIM_PDMSCAP_CPSS));
+ count = FIELD_GET(SDW_SHIM_PDMSCAP_CPSS, count);
}
/* zero based values for channel count in register */
@@ -671,10 +754,9 @@ intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
else
pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR);
- pdi_conf |= (pdi->intel_alh_id <<
- SDW_REG_SHIFT(SDW_SHIM_PCMSYCM_STREAM));
- pdi_conf |= (pdi->l_ch_num << SDW_REG_SHIFT(SDW_SHIM_PCMSYCM_LCHN));
- pdi_conf |= (pdi->h_ch_num << SDW_REG_SHIFT(SDW_SHIM_PCMSYCM_HCHN));
+ u32p_replace_bits(&pdi_conf, pdi->intel_alh_id, SDW_SHIM_PCMSYCM_STREAM);
+ u32p_replace_bits(&pdi_conf, pdi->l_ch_num, SDW_SHIM_PCMSYCM_LCHN);
+ u32p_replace_bits(&pdi_conf, pdi->h_ch_num, SDW_SHIM_PCMSYCM_HCHN);
intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf);
}
@@ -694,11 +776,8 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
/* Program Stream config ALH register */
conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
- conf |= (SDW_ALH_STRMZCFG_DMAT_VAL <<
- SDW_REG_SHIFT(SDW_ALH_STRMZCFG_DMAT));
-
- conf |= ((pdi->ch_count - 1) <<
- SDW_REG_SHIFT(SDW_ALH_STRMZCFG_CHN));
+ u32p_replace_bits(&conf, SDW_ALH_STRMZCFG_DMAT_VAL, SDW_ALH_STRMZCFG_DMAT);
+ u32p_replace_bits(&conf, pdi->ch_count - 1, SDW_ALH_STRMZCFG_CHN);
intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf);
}
@@ -807,10 +886,17 @@ unlock:
static int intel_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- /*
- * TODO: add pm_runtime support here, the startup callback
- * will make sure the IP is 'active'
- */
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = pm_runtime_get_sync(cdns->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err_ratelimited(cdns->dev,
+ "pm_runtime_get_sync failed in %s, ret %d\n",
+ __func__, ret);
+ pm_runtime_put_noidle(cdns->dev);
+ return ret;
+ }
return 0;
}
@@ -856,6 +942,10 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
intel_pdi_alh_configure(sdw, pdi);
sdw_cdns_config_stream(cdns, ch, dir, pdi);
+ /* store pdi and hw_params, may be needed in prepare step */
+ dma->suspended = false;
+ dma->pdi = pdi;
+ dma->hw_params = params;
/* Inform DSP about PDI stream number */
ret = intel_params_stream(sdw, substream, dai, params,
@@ -899,7 +989,11 @@ error:
static int intel_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_cdns_dma_data *dma;
+ int ch, dir;
+ int ret = 0;
dma = snd_soc_dai_get_dma_data(dai, substream);
if (!dma) {
@@ -908,43 +1002,35 @@ static int intel_prepare(struct snd_pcm_substream *substream,
return -EIO;
}
- return sdw_prepare_stream(dma->stream);
-}
+ if (dma->suspended) {
+ dma->suspended = false;
-static int intel_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct sdw_cdns_dma_data *dma;
- int ret;
-
- dma = snd_soc_dai_get_dma_data(dai, substream);
- if (!dma) {
- dev_err(dai->dev, "failed to get dma data in %s", __func__);
- return -EIO;
- }
+ /*
+ * .prepare() is called after system resume, where we
+ * need to reinitialize the SHIM/ALH/Cadence IP.
+ * .prepare() is also called to deal with underflows,
+ * but in those cases we cannot touch ALH/SHIM
+ * registers
+ */
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- case SNDRV_PCM_TRIGGER_RESUME:
- ret = sdw_enable_stream(dma->stream);
- break;
+ /* configure stream */
+ ch = params_channels(dma->hw_params);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ dir = SDW_DATA_DIR_RX;
+ else
+ dir = SDW_DATA_DIR_TX;
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_STOP:
- ret = sdw_disable_stream(dma->stream);
- break;
+ intel_pdi_shim_configure(sdw, dma->pdi);
+ intel_pdi_alh_configure(sdw, dma->pdi);
+ sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
- default:
- ret = -EINVAL;
- break;
+ /* Inform DSP about PDI stream number */
+ ret = intel_params_stream(sdw, substream, dai,
+ dma->hw_params,
+ sdw->instance,
+ dma->pdi->intel_alh_id);
}
- if (ret)
- dev_err(dai->dev,
- "%s trigger %d failed: %d",
- __func__, cmd, ret);
return ret;
}
@@ -960,12 +1046,12 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
if (!dma)
return -EIO;
- ret = sdw_deprepare_stream(dma->stream);
- if (ret) {
- dev_err(dai->dev, "sdw_deprepare_stream: failed %d", ret);
- return ret;
- }
-
+ /*
+ * The sdw stream state will transition to RELEASED when stream->
+ * master_list is empty. So the stream state will transition to
+ * DEPREPARED for the first cpu-dai and to RELEASED for the last
+ * cpu-dai.
+ */
ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
if (ret < 0) {
dev_err(dai->dev, "remove master from stream %s failed: %d\n",
@@ -979,13 +1065,42 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
return ret;
}
+ dma->hw_params = NULL;
+ dma->pdi = NULL;
+
return 0;
}
static void intel_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ pm_runtime_mark_last_busy(cdns->dev);
+ pm_runtime_put_autosuspend(cdns->dev);
+}
+
+static int intel_component_dais_suspend(struct snd_soc_component *component)
+{
+ struct sdw_cdns_dma_data *dma;
+ struct snd_soc_dai *dai;
+
+ for_each_component_dais(component, dai) {
+ /*
+ * we don't have a .suspend dai_ops, and we don't have access
+ * to the substream, so let's mark both capture and playback
+ * DMA contexts as suspended
+ */
+ dma = dai->playback_dma_data;
+ if (dma)
+ dma->suspended = true;
+
+ dma = dai->capture_dma_data;
+ if (dma)
+ dma->suspended = true;
+ }
+
+ return 0;
}
static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
@@ -1011,7 +1126,7 @@ static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
dma = dai->capture_dma_data;
if (!dma)
- return NULL;
+ return ERR_PTR(-EINVAL);
return dma->stream;
}
@@ -1020,7 +1135,6 @@ static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
.startup = intel_startup,
.hw_params = intel_hw_params,
.prepare = intel_prepare,
- .trigger = intel_trigger,
.hw_free = intel_hw_free,
.shutdown = intel_shutdown,
.set_sdw_stream = intel_pcm_set_sdw_stream,
@@ -1031,7 +1145,6 @@ static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
.startup = intel_startup,
.hw_params = intel_hw_params,
.prepare = intel_prepare,
- .trigger = intel_trigger,
.hw_free = intel_hw_free,
.shutdown = intel_shutdown,
.set_sdw_stream = intel_pdm_set_sdw_stream,
@@ -1040,6 +1153,7 @@ static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
static const struct snd_soc_component_driver dai_component = {
.name = "soundwire",
+ .suspend = intel_component_dais_suspend
};
static int intel_create_dai(struct sdw_cdns *cdns,
@@ -1207,10 +1321,7 @@ static int intel_init(struct sdw_intel *sdw)
intel_shim_init(sdw, clock_stop);
- if (clock_stop)
- return 0;
-
- return sdw_cdns_init(&sdw->cdns);
+ return 0;
}
/*
@@ -1249,6 +1360,9 @@ static int intel_master_probe(struct platform_device *pdev)
/* set driver data, accessed by snd_soc_dai_get_drvdata() */
dev_set_drvdata(dev, cdns);
+ /* use generic bandwidth allocation algorithm */
+ sdw->cdns.bus.compute_params = sdw_compute_params;
+
ret = sdw_bus_master_add(bus, dev, dev->fwnode);
if (ret) {
dev_err(dev, "sdw_bus_master_add fail: %d\n", ret);
@@ -1259,6 +1373,11 @@ static int intel_master_probe(struct platform_device *pdev)
dev_info(dev,
"SoundWire master %d is disabled, will be ignored\n",
bus->link_id);
+ /*
+ * Ignore BIOS err_threshold, it's a really bad idea when dealing
+ * with multiple hardware synchronized links
+ */
+ bus->prop.err_threshold = 0;
return 0;
}
@@ -1270,6 +1389,9 @@ int intel_master_startup(struct platform_device *pdev)
struct sdw_cdns *cdns = dev_get_drvdata(dev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
+ int link_flags;
+ bool multi_link;
+ u32 clock_stop_quirks;
int ret;
if (bus->prop.hw_disabled) {
@@ -1279,7 +1401,23 @@ int intel_master_startup(struct platform_device *pdev)
return 0;
}
- /* Initialize shim, controller and Cadence IP */
+ link_flags = md_flags >> (bus->link_id * 8);
+ multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
+ if (!multi_link) {
+ dev_dbg(dev, "Multi-link is disabled\n");
+ bus->multi_link = false;
+ } else {
+ /*
+ * hardware-based synchronization is required regardless
+ * of the number of segments used by a stream: SSP-based
+ * synchronization is gated by gsync when the multi-master
+ * mode is set.
+ */
+ bus->multi_link = true;
+ bus->hw_sync_min_links = 1;
+ }
+
+ /* Initialize shim, controller */
ret = intel_init(sdw);
if (ret)
goto err_init;
@@ -1298,12 +1436,33 @@ int intel_master_startup(struct platform_device *pdev)
goto err_init;
}
+ /*
+ * follow recommended programming flows to avoid timeouts when
+ * gsync is enabled
+ */
+ if (multi_link)
+ intel_shim_sync_arm(sdw);
+
+ ret = sdw_cdns_init(cdns);
+ if (ret < 0) {
+ dev_err(dev, "unable to initialize Cadence IP\n");
+ goto err_interrupt;
+ }
+
ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
dev_err(dev, "unable to exit bus reset sequence\n");
goto err_interrupt;
}
+ if (multi_link) {
+ ret = intel_shim_sync_go(sdw);
+ if (ret < 0) {
+ dev_err(dev, "sync go failed: %d\n", ret);
+ goto err_interrupt;
+ }
+ }
+
/* Register DAIs */
ret = intel_register_dai(sdw);
if (ret) {
@@ -1314,6 +1473,47 @@ int intel_master_startup(struct platform_device *pdev)
intel_debugfs_init(sdw);
+ /* Enable runtime PM */
+ if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
+ pm_runtime_set_autosuspend_delay(dev,
+ INTEL_MASTER_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ clock_stop_quirks = sdw->link_res->clock_stop_quirks;
+ if (clock_stop_quirks & SDW_INTEL_CLK_STOP_NOT_ALLOWED) {
+ /*
+ * To keep the clock running we need to prevent
+ * pm_runtime suspend from happening by increasing the
+ * reference count.
+ * This quirk is specified by the parent PCI device in
+ * case of specific latency requirements. It will have
+ * no effect if pm_runtime is disabled by the user via
+ * a module parameter for testing purposes.
+ */
+ pm_runtime_get_noresume(dev);
+ }
+
+ /*
+ * The runtime PM status of Slave devices is "Unsupported"
+ * until they report as ATTACHED. If they don't, e.g. because
+ * there are no Slave devices populated or if the power-on is
+ * delayed or dependent on a power switch, the Master will
+ * remain active and prevent its parent from suspending.
+ *
+ * Conditionally force the pm_runtime core to re-evaluate the
+ * Master status in the absence of any Slave activity. A quirk
+ * is provided to e.g. deal with Slaves that may be powered on
+ * with a delay. A more complete solution would require the
+ * definition of Master properties.
+ */
+ if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
+ pm_runtime_idle(dev);
+
return 0;
err_interrupt:
@@ -1329,6 +1529,11 @@ static int intel_master_remove(struct platform_device *pdev)
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
+ /*
+ * Since pm_runtime is already disabled, we don't decrease
+ * the refcount when the clock_stop_quirk is
+ * SDW_INTEL_CLK_STOP_NOT_ALLOWED
+ */
if (!bus->prop.hw_disabled) {
intel_debugfs_exit(sdw);
sdw_cdns_enable_interrupt(cdns, false);
@@ -1376,12 +1581,408 @@ int intel_master_process_wakeen_event(struct platform_device *pdev)
return 0;
}
+/*
+ * PM calls
+ */
+
+#ifdef CONFIG_PM
+
+static int __maybe_unused intel_suspend(struct device *dev)
+{
+ struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_bus *bus = &cdns->bus;
+ u32 clock_stop_quirks;
+ int ret;
+
+ if (bus->prop.hw_disabled) {
+ dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
+ bus->link_id);
+ return 0;
+ }
+
+ if (pm_runtime_suspended(dev)) {
+ dev_dbg(dev, "%s: pm_runtime status: suspended\n", __func__);
+
+ clock_stop_quirks = sdw->link_res->clock_stop_quirks;
+
+ if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
+ !clock_stop_quirks) &&
+ !pm_runtime_suspended(dev->parent)) {
+
+ /*
+ * if we've enabled clock stop, and the parent
+ * is still active, disable shim wake. The
+ * SHIM registers are not accessible if the
+ * parent is already pm_runtime suspended so
+ * it's too late to change that configuration
+ */
+
+ intel_shim_wake(sdw, false);
+ }
+
+ return 0;
+ }
+
+ ret = sdw_cdns_enable_interrupt(cdns, false);
+ if (ret < 0) {
+ dev_err(dev, "cannot disable interrupts on suspend\n");
+ return ret;
+ }
+
+ ret = intel_link_power_down(sdw);
+ if (ret) {
+ dev_err(dev, "Link power down failed: %d", ret);
+ return ret;
+ }
+
+ intel_shim_wake(sdw, false);
+
+ return 0;
+}
+
+static int intel_suspend_runtime(struct device *dev)
+{
+ struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_bus *bus = &cdns->bus;
+ u32 clock_stop_quirks;
+ int ret;
+
+ if (bus->prop.hw_disabled) {
+ dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
+ bus->link_id);
+ return 0;
+ }
+
+ clock_stop_quirks = sdw->link_res->clock_stop_quirks;
+
+ if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
+
+ ret = sdw_cdns_enable_interrupt(cdns, false);
+ if (ret < 0) {
+ dev_err(dev, "cannot disable interrupts on suspend\n");
+ return ret;
+ }
+
+ ret = intel_link_power_down(sdw);
+ if (ret) {
+ dev_err(dev, "Link power down failed: %d", ret);
+ return ret;
+ }
+
+ intel_shim_wake(sdw, false);
+
+ } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
+ !clock_stop_quirks) {
+ ret = sdw_cdns_clock_stop(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable clock stop on suspend\n");
+ return ret;
+ }
+
+ ret = sdw_cdns_enable_interrupt(cdns, false);
+ if (ret < 0) {
+ dev_err(dev, "cannot disable interrupts on suspend\n");
+ return ret;
+ }
+
+ ret = intel_link_power_down(sdw);
+ if (ret) {
+ dev_err(dev, "Link power down failed: %d", ret);
+ return ret;
+ }
+
+ intel_shim_wake(sdw, true);
+ } else {
+ dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
+ __func__, clock_stop_quirks);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int __maybe_unused intel_resume(struct device *dev)
+{
+ struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_bus *bus = &cdns->bus;
+ int link_flags;
+ bool multi_link;
+ int ret;
+
+ if (bus->prop.hw_disabled) {
+ dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
+ bus->link_id);
+ return 0;
+ }
+
+ link_flags = md_flags >> (bus->link_id * 8);
+ multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
+
+ if (pm_runtime_suspended(dev)) {
+ dev_dbg(dev, "%s: pm_runtime status was suspended, forcing active\n", __func__);
+
+ /* follow required sequence from runtime_pm.rst */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_enable(dev);
+
+ link_flags = md_flags >> (bus->link_id * 8);
+
+ if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
+ pm_runtime_idle(dev);
+ }
+
+ ret = intel_init(sdw);
+ if (ret) {
+ dev_err(dev, "%s failed: %d", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * make sure all Slaves are tagged as UNATTACHED and provide
+ * reason for reinitialization
+ */
+ sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable interrupts during resume\n");
+ return ret;
+ }
+
+ /*
+ * follow recommended programming flows to avoid timeouts when
+ * gsync is enabled
+ */
+ if (multi_link)
+ intel_shim_sync_arm(sdw);
+
+ ret = sdw_cdns_init(&sdw->cdns);
+ if (ret < 0) {
+ dev_err(dev, "unable to initialize Cadence IP during resume\n");
+ return ret;
+ }
+
+ ret = sdw_cdns_exit_reset(cdns);
+ if (ret < 0) {
+ dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ return ret;
+ }
+
+ if (multi_link) {
+ ret = intel_shim_sync_go(sdw);
+ if (ret < 0) {
+ dev_err(dev, "sync go failed during resume\n");
+ return ret;
+ }
+ }
+
+ /*
+ * after system resume, the pm_runtime suspend() may kick in
+ * during the enumeration, before any children device force the
+ * master device to remain active. Using pm_runtime_get()
+ * routines is not really possible, since it'd prevent the
+ * master from suspending.
+ * A reasonable compromise is to update the pm_runtime
+ * counters and delay the pm_runtime suspend by several
+ * seconds, by when all enumeration should be complete.
+ */
+ pm_runtime_mark_last_busy(dev);
+
+ return ret;
+}
+
+static int intel_resume_runtime(struct device *dev)
+{
+ struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_bus *bus = &cdns->bus;
+ u32 clock_stop_quirks;
+ bool clock_stop0;
+ int link_flags;
+ bool multi_link;
+ int status;
+ int ret;
+
+ if (bus->prop.hw_disabled) {
+ dev_dbg(dev, "SoundWire master %d is disabled, ignoring\n",
+ bus->link_id);
+ return 0;
+ }
+
+ link_flags = md_flags >> (bus->link_id * 8);
+ multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
+
+ clock_stop_quirks = sdw->link_res->clock_stop_quirks;
+
+ if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
+ ret = intel_init(sdw);
+ if (ret) {
+ dev_err(dev, "%s failed: %d", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * make sure all Slaves are tagged as UNATTACHED and provide
+ * reason for reinitialization
+ */
+ sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable interrupts during resume\n");
+ return ret;
+ }
+
+ /*
+ * follow recommended programming flows to avoid
+ * timeouts when gsync is enabled
+ */
+ if (multi_link)
+ intel_shim_sync_arm(sdw);
+
+ ret = sdw_cdns_init(&sdw->cdns);
+ if (ret < 0) {
+ dev_err(dev, "unable to initialize Cadence IP during resume\n");
+ return ret;
+ }
+
+ ret = sdw_cdns_exit_reset(cdns);
+ if (ret < 0) {
+ dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ return ret;
+ }
+
+ if (multi_link) {
+ ret = intel_shim_sync_go(sdw);
+ if (ret < 0) {
+ dev_err(dev, "sync go failed during resume\n");
+ return ret;
+ }
+ }
+ } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
+ ret = intel_init(sdw);
+ if (ret) {
+ dev_err(dev, "%s failed: %d", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * An exception condition occurs for the CLK_STOP_BUS_RESET
+ * case if one or more masters remain active. In this condition,
+ * all the masters are powered on for they are in the same power
+ * domain. Master can preserve its context for clock stop0, so
+ * there is no need to clear slave status and reset bus.
+ */
+ clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
+
+ if (!clock_stop0) {
+
+ /*
+ * make sure all Slaves are tagged as UNATTACHED and
+ * provide reason for reinitialization
+ */
+
+ status = SDW_UNATTACH_REQUEST_MASTER_RESET;
+ sdw_clear_slave_status(bus, status);
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable interrupts during resume\n");
+ return ret;
+ }
+
+ /*
+ * follow recommended programming flows to avoid
+ * timeouts when gsync is enabled
+ */
+ if (multi_link)
+ intel_shim_sync_arm(sdw);
+
+ /*
+ * Re-initialize the IP since it was powered-off
+ */
+ sdw_cdns_init(&sdw->cdns);
+
+ } else {
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable interrupts during resume\n");
+ return ret;
+ }
+ }
+
+ ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
+ if (ret < 0) {
+ dev_err(dev, "unable to restart clock during resume\n");
+ return ret;
+ }
+
+ if (!clock_stop0) {
+ ret = sdw_cdns_exit_reset(cdns);
+ if (ret < 0) {
+ dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ return ret;
+ }
+
+ if (multi_link) {
+ ret = intel_shim_sync_go(sdw);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "sync go failed during resume\n");
+ return ret;
+ }
+ }
+ }
+ } else if (!clock_stop_quirks) {
+
+ clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
+ if (!clock_stop0)
+ dev_err(dev, "%s invalid configuration, clock was not stopped", __func__);
+
+ ret = intel_init(sdw);
+ if (ret) {
+ dev_err(dev, "%s failed: %d", __func__, ret);
+ return ret;
+ }
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable interrupts during resume\n");
+ return ret;
+ }
+
+ ret = sdw_cdns_clock_restart(cdns, false);
+ if (ret < 0) {
+ dev_err(dev, "unable to resume master during resume\n");
+ return ret;
+ }
+ } else {
+ dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
+ __func__, clock_stop_quirks);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+#endif
+
+static const struct dev_pm_ops intel_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
+ SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL)
+};
+
static struct platform_driver sdw_intel_drv = {
.probe = intel_master_probe,
.remove = intel_master_remove,
.driver = {
.name = "intel-sdw",
- },
+ .pm = &intel_pm,
+ }
};
module_platform_driver(sdw_intel_drv);
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index 4ea3d262d249..76820d0b9deb 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -17,6 +17,8 @@
* @dev: device implementing hw_params and free callbacks
* @shim_lock: mutex to handle access to shared SHIM registers
* @shim_mask: global pointer to check SHIM register initialization
+ * @clock_stop_quirks: mask defining requested behavior on pm_suspend
+ * @link_mask: global mask needed for power-up/down sequences
* @cdns: Cadence master descriptor
* @list: used to walk-through all masters exposed by the same controller
*/
@@ -31,6 +33,8 @@ struct sdw_intel_link_res {
struct device *dev;
struct mutex *shim_lock; /* protect shared registers */
u32 *shim_mask;
+ u32 clock_stop_quirks;
+ u32 link_mask;
struct sdw_cdns *cdns;
struct list_head list;
};
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 047252a91c9e..cabdadb09a1b 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/soundwire/sdw_intel.h>
#include "cadence_master.h"
#include "intel.h"
@@ -68,8 +69,13 @@ static int sdw_intel_cleanup(struct sdw_intel_ctx *ctx)
if (!(link_mask & BIT(i)))
continue;
- if (link->pdev)
+ if (link->pdev) {
+ pm_runtime_disable(&link->pdev->dev);
platform_device_unregister(link->pdev);
+ }
+
+ if (!link->clock_stop_quirks)
+ pm_runtime_put_noidle(link->dev);
}
return 0;
@@ -246,8 +252,10 @@ static struct sdw_intel_ctx
link->ops = res->ops;
link->dev = res->dev;
+ link->clock_stop_quirks = res->clock_stop_quirks;
link->shim_lock = &ctx->shim_lock;
link->shim_mask = &ctx->shim_mask;
+ link->link_mask = link_mask;
memset(&pdevinfo, 0, sizeof(pdevinfo));
@@ -334,6 +342,16 @@ sdw_intel_startup_controller(struct sdw_intel_ctx *ctx)
continue;
intel_master_startup(link->pdev);
+
+ if (!link->clock_stop_quirks) {
+ /*
+ * we need to prevent the parent PCI device
+ * from entering pm_runtime suspend, so that
+ * power rails to the SoundWire IP are not
+ * turned off.
+ */
+ pm_runtime_get_noresume(link->dev);
+ }
}
return 0;
@@ -365,7 +383,7 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
* Name(_ADR, 0x40000000), with bits 31..28 representing the
* SoundWire link so filter accordingly
*/
- if ((adr & GENMASK(31, 28)) >> 28 != SDW_LINK_TYPE)
+ if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE)
return AE_OK; /* keep going */
/* device found, stop namespace walk */
diff --git a/drivers/soundwire/master.c b/drivers/soundwire/master.c
index 5f0b2189defe..3488bb824e84 100644
--- a/drivers/soundwire/master.c
+++ b/drivers/soundwire/master.c
@@ -154,6 +154,7 @@ int sdw_master_device_add(struct sdw_bus *bus, struct device *parent,
bus->dev = &md->dev;
bus->md = md;
+ pm_runtime_enable(&bus->md->dev);
device_register_err:
return ret;
}
@@ -166,6 +167,7 @@ device_register_err:
*/
int sdw_master_device_del(struct sdw_bus *bus)
{
+ pm_runtime_disable(&bus->md->dev);
device_unregister(bus->dev);
return 0;
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index 4ae62b452b8c..55a9c51c84c1 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -289,7 +289,7 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
struct sdw_slave_prop *prop = &slave->prop;
struct device *dev = &slave->dev;
struct fwnode_handle *port;
- int num_of_ports, nval, i, dp0 = 0;
+ int nval;
device_property_read_u32(dev, "mipi-sdw-sw-interface-revision",
&prop->mipi_revision);
@@ -352,7 +352,6 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
return -ENOMEM;
sdw_slave_read_dp0(slave, port, prop->dp0_prop);
- dp0 = 1;
}
/*
@@ -383,21 +382,6 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
sdw_slave_read_dpn(slave, prop->sink_dpn_prop, nval,
prop->sink_ports, "sink");
- /* some ports are bidirectional so check total ports by ORing */
- nval = prop->source_ports | prop->sink_ports;
- num_of_ports = hweight32(nval) + dp0; /* add DP0 */
-
- /* Allocate port_ready based on num_of_ports */
- slave->port_ready = devm_kcalloc(&slave->dev, num_of_ports,
- sizeof(*slave->port_ready),
- GFP_KERNEL);
- if (!slave->port_ready)
- return -ENOMEM;
-
- /* Initialize completion */
- for (i = 0; i < num_of_ports; i++)
- init_completion(&slave->port_ready[i]);
-
return 0;
}
EXPORT_SYMBOL(sdw_slave_read_prop);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 915c2cf0c274..fbca4ebf63e9 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -34,6 +34,7 @@
#define SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED BIT(10)
#define SWRM_INTERRUPT_MASK_ADDR 0x204
#define SWRM_INTERRUPT_CLEAR 0x208
+#define SWRM_INTERRUPT_CPU_EN 0x210
#define SWRM_CMD_FIFO_WR_CMD 0x300
#define SWRM_CMD_FIFO_RD_CMD 0x304
#define SWRM_CMD_FIFO_CMD 0x308
@@ -43,19 +44,17 @@
#define SWRM_CMD_FIFO_RD_FIFO_ADDR 0x318
#define SWRM_ENUMERATOR_CFG_ADDR 0x500
#define SWRM_MCP_FRAME_CTRL_BANK_ADDR(m) (0x101C + 0x40 * (m))
-#define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT 3
#define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK GENMASK(2, 0)
#define SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK GENMASK(7, 3)
-#define SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT 0
#define SWRM_MCP_CFG_ADDR 0x1048
#define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK GENMASK(21, 17)
-#define SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT 0x11
#define SWRM_DEF_CMD_NO_PINGS 0x1f
#define SWRM_MCP_STATUS 0x104C
#define SWRM_MCP_STATUS_BANK_NUM_MASK BIT(0)
#define SWRM_MCP_SLV_STATUS 0x1090
#define SWRM_MCP_SLV_STATUS_MASK GENMASK(1, 0)
#define SWRM_DP_PORT_CTRL_BANK(n, m) (0x1124 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_BLOCK_CTRL3_BANK(n, m) (0x1138 + 0x100 * (n - 1) + 0x40 * m)
#define SWRM_DP_PORT_CTRL_EN_CHAN_SHFT 0x18
#define SWRM_DP_PORT_CTRL_OFFSET2_SHFT 0x10
#define SWRM_DP_PORT_CTRL_OFFSET1_SHFT 0x08
@@ -67,11 +66,6 @@
#define SWRM_REG_VAL_PACK(data, dev, id, reg) \
((reg) | ((id) << 16) | ((dev) << 20) | ((data) << 24))
-#define SWRM_MAX_ROW_VAL 0 /* Rows = 48 */
-#define SWRM_DEFAULT_ROWS 48
-#define SWRM_MIN_COL_VAL 0 /* Cols = 2 */
-#define SWRM_DEFAULT_COL 16
-#define SWRM_MAX_COL_VAL 7
#define SWRM_SPECIAL_CMD_ID 0xF
#define MAX_FREQ_NUM 1
#define TIMEOUT_MS (2 * HZ)
@@ -84,12 +78,14 @@ struct qcom_swrm_port_config {
u8 si;
u8 off1;
u8 off2;
+ u8 bp_mode;
};
struct qcom_swrm_ctrl {
struct sdw_bus bus;
struct device *dev;
struct regmap *regmap;
+ void __iomem *mmio;
struct completion *comp;
struct work_struct slave_work;
/* read/write lock */
@@ -103,6 +99,8 @@ struct qcom_swrm_ctrl {
unsigned int version;
int num_din_ports;
int num_dout_ports;
+ int cols_index;
+ int rows_index;
unsigned long dout_port_mask;
unsigned long din_port_mask;
struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
@@ -112,9 +110,24 @@ struct qcom_swrm_ctrl {
int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val);
};
+struct qcom_swrm_data {
+ u32 default_cols;
+ u32 default_rows;
+};
+
+static struct qcom_swrm_data swrm_v1_3_data = {
+ .default_rows = 48,
+ .default_cols = 16,
+};
+
+static struct qcom_swrm_data swrm_v1_5_data = {
+ .default_rows = 50,
+ .default_cols = 16,
+};
+
#define to_qcom_sdw(b) container_of(b, struct qcom_swrm_ctrl, bus)
-static int qcom_swrm_abh_reg_read(struct qcom_swrm_ctrl *ctrl, int reg,
+static int qcom_swrm_ahb_reg_read(struct qcom_swrm_ctrl *ctrl, int reg,
u32 *val)
{
struct regmap *wcd_regmap = ctrl->regmap;
@@ -154,6 +167,20 @@ static int qcom_swrm_ahb_reg_write(struct qcom_swrm_ctrl *ctrl,
return SDW_CMD_OK;
}
+static int qcom_swrm_cpu_reg_read(struct qcom_swrm_ctrl *ctrl, int reg,
+ u32 *val)
+{
+ *val = readl(ctrl->mmio + reg);
+ return SDW_CMD_OK;
+}
+
+static int qcom_swrm_cpu_reg_write(struct qcom_swrm_ctrl *ctrl, int reg,
+ int val)
+{
+ writel(val, ctrl->mmio + reg);
+ return SDW_CMD_OK;
+}
+
static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *ctrl, u8 cmd_data,
u8 dev_addr, u16 reg_addr)
{
@@ -284,8 +311,8 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
u32 val;
/* Clear Rows and Cols */
- val = (SWRM_MAX_ROW_VAL << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT |
- SWRM_MIN_COL_VAL << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT);
+ val = FIELD_PREP(SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK, ctrl->rows_index);
+ val |= FIELD_PREP(SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK, ctrl->cols_index);
ctrl->reg_write(ctrl, SWRM_MCP_FRAME_CTRL_BANK_ADDR(0), val);
@@ -298,9 +325,7 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
/* Configure No pings */
ctrl->reg_read(ctrl, SWRM_MCP_CFG_ADDR, &val);
- val &= ~SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK;
- val |= (SWRM_DEF_CMD_NO_PINGS <<
- SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_SHFT);
+ u32p_replace_bits(&val, SWRM_DEF_CMD_NO_PINGS, SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK);
ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val);
/* Configure number of retries of a read/write cmd */
@@ -310,6 +335,12 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_write(ctrl, SWRM_COMP_CFG_ADDR,
SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK |
SWRM_COMP_CFG_ENABLE_MSK);
+
+ /* enable CPU IRQs */
+ if (ctrl->mmio) {
+ ctrl->reg_write(ctrl, SWRM_INTERRUPT_CPU_EN,
+ SWRM_INTERRUPT_STATUS_RMSK);
+ }
return 0;
}
@@ -355,11 +386,8 @@ static int qcom_swrm_pre_bank_switch(struct sdw_bus *bus)
ctrl->reg_read(ctrl, reg, &val);
- val &= ~SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK;
- val &= ~SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK;
-
- val |= (SWRM_MAX_ROW_VAL << SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_SHFT |
- SWRM_MAX_COL_VAL << SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_SHFT);
+ u32p_replace_bits(&val, ctrl->cols_index, SWRM_MCP_FRAME_CTRL_BANK_COL_CTRL_BMSK);
+ u32p_replace_bits(&val, ctrl->rows_index, SWRM_MCP_FRAME_CTRL_BANK_ROW_CTRL_BMSK);
return ctrl->reg_write(ctrl, reg, val);
}
@@ -378,14 +406,22 @@ static int qcom_swrm_transport_params(struct sdw_bus *bus,
{
struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
u32 value;
+ int reg = SWRM_DP_PORT_CTRL_BANK((params->port_num), bank);
+ int ret;
value = params->offset1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT;
value |= params->offset2 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT;
value |= params->sample_interval - 1;
- return ctrl->reg_write(ctrl,
- SWRM_DP_PORT_CTRL_BANK((params->port_num), bank),
- value);
+ ret = ctrl->reg_write(ctrl, reg, value);
+
+ if (!ret && params->blk_pkg_mode) {
+ reg = SWRM_DP_BLOCK_CTRL3_BANK(params->port_num, bank);
+
+ ret = ctrl->reg_write(ctrl, reg, 1);
+ }
+
+ return ret;
}
static int qcom_swrm_port_enable(struct sdw_bus *bus,
@@ -433,6 +469,7 @@ static int qcom_swrm_compute_params(struct sdw_bus *bus)
p_rt->transport_params.sample_interval = pcfg->si + 1;
p_rt->transport_params.offset1 = pcfg->off1;
p_rt->transport_params.offset2 = pcfg->off2;
+ p_rt->transport_params.blk_pkg_mode = pcfg->bp_mode;
}
list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
@@ -443,6 +480,7 @@ static int qcom_swrm_compute_params(struct sdw_bus *bus)
pcfg->si + 1;
p_rt->transport_params.offset1 = pcfg->off1;
p_rt->transport_params.offset2 = pcfg->off2;
+ p_rt->transport_params.blk_pkg_mode = pcfg->bp_mode;
i++;
}
}
@@ -689,12 +727,13 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
u8 off1[QCOM_SDW_MAX_PORTS];
u8 off2[QCOM_SDW_MAX_PORTS];
u8 si[QCOM_SDW_MAX_PORTS];
+ u8 bp_mode[QCOM_SDW_MAX_PORTS] = { 0, };
int i, ret, nports, val;
ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val);
- ctrl->num_dout_ports = val & SWRM_COMP_PARAMS_DOUT_PORTS_MASK;
- ctrl->num_din_ports = (val & SWRM_COMP_PARAMS_DIN_PORTS_MASK) >> 5;
+ ctrl->num_dout_ports = FIELD_GET(SWRM_COMP_PARAMS_DOUT_PORTS_MASK, val);
+ ctrl->num_din_ports = FIELD_GET(SWRM_COMP_PARAMS_DIN_PORTS_MASK, val);
ret = of_property_read_u32(np, "qcom,din-ports", &val);
if (ret)
@@ -731,10 +770,13 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
if (ret)
return ret;
+ ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
+ bp_mode, nports);
for (i = 0; i < nports; i++) {
ctrl->pconfig[i].si = si[i];
ctrl->pconfig[i].off1 = off1[i];
ctrl->pconfig[i].off2 = off2[i];
+ ctrl->pconfig[i].bp_mode = bp_mode[i];
}
return 0;
@@ -746,6 +788,7 @@ static int qcom_swrm_probe(struct platform_device *pdev)
struct sdw_master_prop *prop;
struct sdw_bus_params *params;
struct qcom_swrm_ctrl *ctrl;
+ const struct qcom_swrm_data *data;
int ret;
u32 val;
@@ -753,15 +796,25 @@ static int qcom_swrm_probe(struct platform_device *pdev)
if (!ctrl)
return -ENOMEM;
+ data = of_device_get_match_data(dev);
+ ctrl->rows_index = sdw_find_row_index(data->default_rows);
+ ctrl->cols_index = sdw_find_col_index(data->default_cols);
+#if IS_ENABLED(CONFIG_SLIMBUS)
if (dev->parent->bus == &slimbus_bus) {
- ctrl->reg_read = qcom_swrm_abh_reg_read;
+#else
+ if (false) {
+#endif
+ ctrl->reg_read = qcom_swrm_ahb_reg_read;
ctrl->reg_write = qcom_swrm_ahb_reg_write;
ctrl->regmap = dev_get_regmap(dev->parent, NULL);
if (!ctrl->regmap)
return -EINVAL;
} else {
- /* Only WCD based SoundWire controller is supported */
- return -ENOTSUPP;
+ ctrl->reg_read = qcom_swrm_cpu_reg_read;
+ ctrl->reg_write = qcom_swrm_cpu_reg_write;
+ ctrl->mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctrl->mmio))
+ return PTR_ERR(ctrl->mmio);
}
ctrl->irq = of_irq_get(dev->of_node, 0);
@@ -795,8 +848,8 @@ static int qcom_swrm_probe(struct platform_device *pdev)
params = &ctrl->bus.params;
params->max_dr_freq = DEFAULT_CLK_FREQ;
params->curr_dr_freq = DEFAULT_CLK_FREQ;
- params->col = SWRM_DEFAULT_COL;
- params->row = SWRM_DEFAULT_ROWS;
+ params->col = data->default_cols;
+ params->row = data->default_rows;
ctrl->reg_read(ctrl, SWRM_MCP_STATUS, &val);
params->curr_bank = val & SWRM_MCP_STATUS_BANK_NUM_MASK;
params->next_bank = !params->curr_bank;
@@ -806,8 +859,8 @@ static int qcom_swrm_probe(struct platform_device *pdev)
prop->num_clk_gears = 0;
prop->num_clk_freq = MAX_FREQ_NUM;
prop->clk_freq = &qcom_swrm_freq_tbl[0];
- prop->default_col = SWRM_DEFAULT_COL;
- prop->default_row = SWRM_DEFAULT_ROWS;
+ prop->default_col = data->default_cols;
+ prop->default_row = data->default_rows;
ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &ctrl->version);
@@ -858,7 +911,8 @@ static int qcom_swrm_remove(struct platform_device *pdev)
}
static const struct of_device_id qcom_swrm_of_match[] = {
- { .compatible = "qcom,soundwire-v1.3.0", },
+ { .compatible = "qcom,soundwire-v1.3.0", .data = &swrm_v1_3_data },
+ { .compatible = "qcom,soundwire-v1.5.1", .data = &swrm_v1_5_data },
{/* sentinel */},
};
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 0839445ee07b..a08f4081c1c4 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -6,6 +6,7 @@
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_type.h>
#include "bus.h"
+#include "sysfs_local.h"
static void sdw_slave_release(struct device *dev)
{
@@ -20,11 +21,12 @@ struct device_type sdw_slave_type = {
.uevent = sdw_slave_uevent,
};
-static int sdw_slave_add(struct sdw_bus *bus,
- struct sdw_slave_id *id, struct fwnode_handle *fwnode)
+int sdw_slave_add(struct sdw_bus *bus,
+ struct sdw_slave_id *id, struct fwnode_handle *fwnode)
{
struct sdw_slave *slave;
int ret;
+ int i;
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
if (!slave)
@@ -50,6 +52,7 @@ static int sdw_slave_add(struct sdw_bus *bus,
slave->dev.bus = &sdw_bus_type;
slave->dev.of_node = of_node_get(to_of_node(fwnode));
slave->dev.type = &sdw_slave_type;
+ slave->dev.groups = sdw_slave_status_attr_groups;
slave->bus = bus;
slave->status = SDW_SLAVE_UNATTACHED;
init_completion(&slave->enumeration_complete);
@@ -57,6 +60,10 @@ static int sdw_slave_add(struct sdw_bus *bus,
slave->dev_num = 0;
init_completion(&slave->probe_complete);
slave->probed = false;
+ slave->first_interrupt_done = false;
+
+ for (i = 0; i < SDW_MAX_PORTS; i++)
+ init_completion(&slave->port_ready[i]);
mutex_lock(&bus->bus_lock);
list_add_tail(&slave->node, &bus->slaves);
@@ -102,7 +109,7 @@ static bool find_slave(struct sdw_bus *bus,
}
/* Extract link id from ADR, Bit 51 to 48 (included) */
- link_id = (addr >> 48) & GENMASK(3, 0);
+ link_id = SDW_DISCO_LINK_ID(addr);
/* Check for link_id match */
if (link_id != bus->link_id)
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 6e36deb505b1..1099b5d1262b 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -25,8 +25,10 @@
int sdw_rows[SDW_FRAME_ROWS] = {48, 50, 60, 64, 75, 80, 125, 147,
96, 100, 120, 128, 150, 160, 250, 0,
192, 200, 240, 256, 72, 144, 90, 180};
+EXPORT_SYMBOL(sdw_rows);
int sdw_cols[SDW_FRAME_COLS] = {2, 4, 6, 8, 10, 12, 14, 16};
+EXPORT_SYMBOL(sdw_cols);
int sdw_find_col_index(int col)
{
@@ -100,9 +102,7 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
return ret;
/* Program DPN_SampleCtrl2 register */
- wbuf = (t_params->sample_interval - 1);
- wbuf &= SDW_DPN_SAMPLECTRL_HIGH;
- wbuf >>= SDW_REG_SHIFT(SDW_DPN_SAMPLECTRL_HIGH);
+ wbuf = FIELD_GET(SDW_DPN_SAMPLECTRL_HIGH, t_params->sample_interval - 1);
ret = sdw_write(slave, addr3, wbuf);
if (ret < 0) {
@@ -111,9 +111,8 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
}
/* Program DPN_HCtrl register */
- wbuf = t_params->hstart;
- wbuf <<= SDW_REG_SHIFT(SDW_DPN_HCTRL_HSTART);
- wbuf |= t_params->hstop;
+ wbuf = FIELD_PREP(SDW_DPN_HCTRL_HSTART, t_params->hstart);
+ wbuf |= FIELD_PREP(SDW_DPN_HCTRL_HSTOP, t_params->hstop);
ret = sdw_write(slave, addr4, wbuf);
if (ret < 0)
@@ -157,8 +156,8 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
}
/* Program DPN_PortCtrl register */
- wbuf = p_params->data_mode << SDW_REG_SHIFT(SDW_DPN_PORTCTRL_DATAMODE);
- wbuf |= p_params->flow_mode;
+ wbuf = FIELD_PREP(SDW_DPN_PORTCTRL_DATAMODE, p_params->data_mode);
+ wbuf |= FIELD_PREP(SDW_DPN_PORTCTRL_FLOWMODE, p_params->flow_mode);
ret = sdw_update(s_rt->slave, addr1, 0xF, wbuf);
if (ret < 0) {
@@ -444,7 +443,8 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
prep_ch.bank = bus->params.next_bank;
- if (dpn_prop->imp_def_interrupts || !dpn_prop->simple_ch_prep_sm)
+ if (dpn_prop->imp_def_interrupts || !dpn_prop->simple_ch_prep_sm ||
+ bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL)
intr = true;
/*
@@ -689,9 +689,9 @@ static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
/*
* Set the multi_link flag only when both the hardware supports
- * and there is a stream handled by multiple masters
+ * and hardware-based sync is required
*/
- multi_link = bus->multi_link && (m_rt_count > 1);
+ multi_link = bus->multi_link && (m_rt_count >= bus->hw_sync_min_links);
if (multi_link)
ret = sdw_transfer_defer(bus, wr_msg, &bus->defer_msg);
@@ -761,13 +761,16 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
const struct sdw_master_ops *ops;
struct sdw_bus *bus;
bool multi_link = false;
+ int m_rt_count;
int ret = 0;
+ m_rt_count = stream->m_rt_count;
+
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
bus = m_rt->bus;
ops = bus->ops;
- if (bus->multi_link) {
+ if (bus->multi_link && m_rt_count >= bus->hw_sync_min_links) {
multi_link = true;
mutex_lock(&bus->msg_lock);
}
@@ -788,7 +791,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
* synchronized across all Masters and happens later as a
* part of post_bank_switch ops.
*/
- ret = sdw_bank_switch(bus, stream->m_rt_count);
+ ret = sdw_bank_switch(bus, m_rt_count);
if (ret < 0) {
dev_err(bus->dev, "Bank switch failed: %d\n", ret);
goto error;
@@ -814,7 +817,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
ret);
goto error;
}
- } else if (bus->multi_link && stream->m_rt_count > 1) {
+ } else if (multi_link) {
dev_err(bus->dev,
"Post bank switch ops not implemented\n");
goto error;
@@ -832,7 +835,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
goto error;
}
- if (bus->multi_link)
+ if (multi_link)
mutex_unlock(&bus->msg_lock);
}
@@ -1784,6 +1787,16 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
bus->params.bandwidth -= m_rt->stream->params.rate *
m_rt->ch_count * m_rt->stream->params.bps;
+ /* Compute params */
+ if (bus->compute_params) {
+ ret = bus->compute_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Compute params failed: %d",
+ ret);
+ return ret;
+ }
+ }
+
/* Program params */
ret = sdw_program_params(bus, false);
if (ret < 0) {
@@ -1913,7 +1926,7 @@ void sdw_shutdown_stream(void *sdw_substream)
sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream);
- if (!sdw_stream) {
+ if (IS_ERR(sdw_stream)) {
dev_err(rtd->dev, "no stream found for DAI %s", dai->name);
return;
}
diff --git a/drivers/soundwire/sysfs_local.h b/drivers/soundwire/sysfs_local.h
index ff60adee3c41..7268bc24c538 100644
--- a/drivers/soundwire/sysfs_local.h
+++ b/drivers/soundwire/sysfs_local.h
@@ -8,6 +8,10 @@
* SDW sysfs APIs -
*/
+/* basic attributes to report status of Slave (attachment, dev_num) */
+extern const struct attribute_group *sdw_slave_status_attr_groups[];
+
+/* additional device-managed properties reported after driver probe */
int sdw_slave_sysfs_init(struct sdw_slave *slave);
int sdw_slave_sysfs_dpn_init(struct sdw_slave *slave);
diff --git a/drivers/soundwire/sysfs_slave.c b/drivers/soundwire/sysfs_slave.c
index f510071b0add..b48b6617a396 100644
--- a/drivers/soundwire/sysfs_slave.c
+++ b/drivers/soundwire/sysfs_slave.c
@@ -16,9 +16,13 @@
/*
* The sysfs for Slave reflects the MIPI description as given
- * in the MIPI DisCo spec
+ * in the MIPI DisCo spec.
+ * status and device_number come directly from the MIPI SoundWire
+ * 1.x specification.
*
* Base file is device
+ * |---- status
+ * |---- device_number
* |---- modalias
* |---- dev-properties
* |---- mipi_revision
@@ -212,3 +216,55 @@ int sdw_slave_sysfs_init(struct sdw_slave *slave)
return 0;
}
+
+/*
+ * the status is shown in capital letters for UNATTACHED and RESERVED
+ * on purpose, to highligh users to the fact that these status values
+ * are not expected.
+ */
+static const char *const slave_status[] = {
+ [SDW_SLAVE_UNATTACHED] = "UNATTACHED",
+ [SDW_SLAVE_ATTACHED] = "Attached",
+ [SDW_SLAVE_ALERT] = "Alert",
+ [SDW_SLAVE_RESERVED] = "RESERVED",
+};
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+ return sprintf(buf, "%s\n", slave_status[slave->status]);
+}
+static DEVICE_ATTR_RO(status);
+
+static ssize_t device_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+ if (slave->status == SDW_SLAVE_UNATTACHED)
+ return sprintf(buf, "%s", "N/A");
+ else
+ return sprintf(buf, "%d", slave->dev_num);
+}
+static DEVICE_ATTR_RO(device_number);
+
+static struct attribute *slave_status_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_device_number.attr,
+ NULL,
+};
+
+/*
+ * we don't use ATTRIBUTES_GROUP here since the group is used in a
+ * separate file and can't be handled as a static.
+ */
+static const struct attribute_group sdw_slave_status_attr_group = {
+ .attrs = slave_status_attrs,
+};
+
+const struct attribute_group *sdw_slave_status_attr_groups[] = {
+ &sdw_slave_status_attr_group,
+ NULL
+};
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index c6ea760ea5f0..5cff60de8e83 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -235,6 +235,7 @@ config SPI_DAVINCI
config SPI_DESIGNWARE
tristate "DesignWare SPI controller core support"
+ imply SPI_MEM
help
general driver for SPI controller core from DesignWare
@@ -251,6 +252,34 @@ config SPI_DW_MMIO
tristate "Memory-mapped io interface driver for DW SPI core"
depends on HAS_IOMEM
+config SPI_DW_BT1
+ tristate "Baikal-T1 SPI driver for DW SPI core"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ help
+ Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI
+ controllers. Two of them are pretty much normal: with IRQ, DMA,
+ FIFOs of 64 words depth, 4x CSs, but the third one as being a
+ part of the Baikal-T1 System Boot Controller has got a very
+ limited resources: no IRQ, no DMA, only a single native
+ chip-select and Tx/Rx FIFO with just 8 words depth available.
+ The later one is normally connected to an external SPI-nor flash
+ of 128Mb (in general can be of bigger size).
+
+config SPI_DW_BT1_DIRMAP
+ bool "Directly mapped Baikal-T1 Boot SPI flash support"
+ depends on SPI_DW_BT1
+ select MULTIPLEXER
+ select MUX_MMIO
+ help
+ Directly mapped SPI flash memory is an interface specific to the
+ Baikal-T1 System Boot Controller. It is a 16MB MMIO region, which
+ can be used to access a peripheral memory device just by
+ reading/writing data from/to it. Note that the system APB bus
+ will stall during each IO from/to the dirmap region until the
+ operation is finished. So try not to use it concurrently with
+ time-critical tasks (like the SPI memory operations implemented
+ in this driver).
+
endif
config SPI_DLN2
@@ -637,7 +666,7 @@ config SPI_QCOM_QSPI
config SPI_QUP
tristate "Qualcomm SPI controller with QUP interface"
- depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ depends on ARCH_QCOM || COMPILE_TEST
help
Qualcomm Universal Peripheral (QUP) core is an AHB slave that
provides a common data path (an output FIFO and an input FIFO)
@@ -680,7 +709,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on (PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST)
+ depends on (PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST)
help
SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index cf955ea803cd..6fea5821662e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_SPI_DLN2) += spi-dln2.o
obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
spi-dw-y := spi-dw-core.o
spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o
+obj-$(CONFIG_SPI_DW_BT1) += spi-dw-bt1.o
obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o
obj-$(CONFIG_SPI_EFM32) += spi-efm32.o
@@ -97,7 +98,6 @@ obj-$(CONFIG_SPI_RPCIF) += spi-rpc-if.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
spi-s3c24xx-hw-y := spi-s3c24xx.o
-spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o
obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o
obj-$(CONFIG_SPI_SC18IS602) += spi-sc18is602.o
obj-$(CONFIG_SPI_SH) += spi-sh.o
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index fcde419e480c..46feafe4e201 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -848,7 +848,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
spi = spi_master_get_devdata(master);
- memset(spi, 0, sizeof(struct a3700_spi));
spi->master = master;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 2cfe6253a784..0e5e64a80848 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/platform_data/dma-atmel.h>
#include <linux/of.h>
#include <linux/io.h>
@@ -513,9 +512,8 @@ static int atmel_spi_configure_dma(struct spi_master *master,
master->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(master->dma_tx)) {
- err = PTR_ERR(master->dma_tx);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "No TX DMA channel, DMA is disabled\n");
+ err = dev_err_probe(dev, PTR_ERR(master->dma_tx),
+ "No TX DMA channel, DMA is disabled\n");
goto error_clear;
}
@@ -859,6 +857,7 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
csr = spi_readl(as, CSR0 + 4 * chip_select);
csr = SPI_BFINS(SCBR, scbr, csr);
spi_writel(as, CSR0 + 4 * chip_select, csr);
+ xfer->effective_speed_hz = bus_hz / scbr;
return 0;
}
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 9cfa15ec8b08..14c9d0133bce 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1282,16 +1282,9 @@ static const struct bcm_qspi_data bcm_qspi_spcr3_data = {
static const struct of_device_id bcm_qspi_of_match[] = {
{
- .compatible = "brcm,spi-bcm7425-qspi",
- .data = &bcm_qspi_no_rev_data,
- },
- {
- .compatible = "brcm,spi-bcm7429-qspi",
- .data = &bcm_qspi_no_rev_data,
- },
- {
- .compatible = "brcm,spi-bcm7435-qspi",
- .data = &bcm_qspi_no_rev_data,
+ .compatible = "brcm,spi-bcm7445-qspi",
+ .data = &bcm_qspi_rev_data,
+
},
{
.compatible = "brcm,spi-bcm-qspi",
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 41986ac0fbfb..7104cf17b848 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -1193,7 +1193,6 @@ static int bcm2835_spi_setup(struct spi_device *spi)
struct spi_controller *ctlr = spi->controller;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct gpio_chip *chip;
- enum gpio_lookup_flags lflags;
u32 cs;
/*
@@ -1259,21 +1258,9 @@ static int bcm2835_spi_setup(struct spi_device *spi)
if (!chip)
return 0;
- /*
- * Retrieve the corresponding GPIO line used for CS.
- * The inversion semantics will be handled by the GPIO core
- * code, so we pass GPIOD_OUT_LOW for "unasserted" and
- * the correct flag for inversion semantics. The SPI_CS_HIGH
- * on spi->mode cannot be checked for polarity in this case
- * as the flag use_gpio_descriptors enforces SPI_CS_HIGH.
- */
- if (of_property_read_bool(spi->dev.of_node, "spi-cs-high"))
- lflags = GPIO_ACTIVE_HIGH;
- else
- lflags = GPIO_ACTIVE_LOW;
spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
DRV_NAME,
- lflags,
+ GPIO_LOOKUP_FLAGS_DEFAULT,
GPIOD_OUT_LOW);
if (IS_ERR(spi->cs_gpiod))
return PTR_ERR(spi->cs_gpiod);
@@ -1319,11 +1306,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
- err = PTR_ERR(bs->clk);
- if (err == -EPROBE_DEFER)
- dev_dbg(&pdev->dev, "could not get clk: %d\n", err);
- else
- dev_err(&pdev->dev, "could not get clk: %d\n", err);
+ err = dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
+ "could not get clk\n");
goto out_controller_put;
}
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index c6795c684b16..40938cf3806d 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1119,11 +1119,8 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
cqspi->rx_chan = dma_request_chan_by_mask(&mask);
if (IS_ERR(cqspi->rx_chan)) {
int ret = PTR_ERR(cqspi->rx_chan);
-
- if (ret != -EPROBE_DEFER)
- dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
cqspi->rx_chan = NULL;
- return ret;
+ return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
}
init_completion(&cqspi->rx_dma_complete);
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 2b6b9c1ad9d0..70467b9d61ba 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -418,8 +418,8 @@ static int cdns_transfer_one(struct spi_master *master,
xspi->rx_bytes = transfer->len;
cdns_spi_setup_transfer(spi, transfer);
-
cdns_spi_fill_tx_fifo(xspi);
+ spi_transfer_delay_exec(transfer);
cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
return transfer->len;
diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c
new file mode 100644
index 000000000000..f382dfad7842
--- /dev/null
+++ b/drivers/spi/spi-dw-bt1.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+//
+// Authors:
+// Ramil Zaripov <Ramil.Zaripov@baikalelectronics.ru>
+// Serge Semin <Sergey.Semin@baikalelectronics.ru>
+//
+// Baikal-T1 DW APB SPI and System Boot SPI driver
+//
+
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/spi/spi.h>
+
+#include "spi-dw.h"
+
+#define BT1_BOOT_DIRMAP 0
+#define BT1_BOOT_REGS 1
+
+struct dw_spi_bt1 {
+ struct dw_spi dws;
+ struct clk *clk;
+ struct mux_control *mux;
+
+#ifdef CONFIG_SPI_DW_BT1_DIRMAP
+ void __iomem *map;
+ resource_size_t map_len;
+#endif
+};
+#define to_dw_spi_bt1(_ctlr) \
+ container_of(spi_controller_get_devdata(_ctlr), struct dw_spi_bt1, dws)
+
+typedef int (*dw_spi_bt1_init_cb)(struct platform_device *pdev,
+ struct dw_spi_bt1 *dwsbt1);
+
+#ifdef CONFIG_SPI_DW_BT1_DIRMAP
+
+static int dw_spi_bt1_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller);
+
+ if (!dwsbt1->map ||
+ !dwsbt1->dws.mem_ops.supports_op(desc->mem, &desc->info.op_tmpl))
+ return -EOPNOTSUPP;
+
+ /*
+ * Make sure the requested region doesn't go out of the physically
+ * mapped flash memory bounds and the operation is read-only.
+ */
+ if (desc->info.offset + desc->info.length > dwsbt1->map_len ||
+ desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/*
+ * Directly mapped SPI memory region is only accessible in the dword chunks.
+ * That's why we have to create a dedicated read-method to copy data from there
+ * to the passed buffer.
+ */
+static void dw_spi_bt1_dirmap_copy_from_map(void *to, void __iomem *from, size_t len)
+{
+ size_t shift, chunk;
+ u32 data;
+
+ /*
+ * We split the copying up into the next three stages: unaligned head,
+ * aligned body, unaligned tail.
+ */
+ shift = (size_t)from & 0x3;
+ if (shift) {
+ chunk = min_t(size_t, 4 - shift, len);
+ data = readl_relaxed(from - shift);
+ memcpy(to, &data + shift, chunk);
+ from += chunk;
+ to += chunk;
+ len -= chunk;
+ }
+
+ while (len >= 4) {
+ data = readl_relaxed(from);
+ memcpy(to, &data, 4);
+ from += 4;
+ to += 4;
+ len -= 4;
+ }
+
+ if (len) {
+ data = readl_relaxed(from);
+ memcpy(to, &data, len);
+ }
+}
+
+static ssize_t dw_spi_bt1_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller);
+ struct dw_spi *dws = &dwsbt1->dws;
+ struct spi_mem *mem = desc->mem;
+ struct dw_spi_cfg cfg;
+ int ret;
+
+ /*
+ * Make sure the requested operation length is valid. Truncate the
+ * length if it's greater than the length of the MMIO region.
+ */
+ if (offs >= dwsbt1->map_len || !len)
+ return 0;
+
+ len = min_t(size_t, len, dwsbt1->map_len - offs);
+
+ /* Collect the controller configuration required by the operation */
+ cfg.tmode = SPI_TMOD_EPROMREAD;
+ cfg.dfs = 8;
+ cfg.ndf = 4;
+ cfg.freq = mem->spi->max_speed_hz;
+
+ /* Make sure the corresponding CS is de-asserted on transmission */
+ dw_spi_set_cs(mem->spi, false);
+
+ spi_enable_chip(dws, 0);
+
+ dw_spi_update_config(dws, mem->spi, &cfg);
+
+ spi_umask_intr(dws, SPI_INT_RXFI);
+
+ spi_enable_chip(dws, 1);
+
+ /*
+ * Enable the transparent mode of the System Boot Controller.
+ * The SPI core IO should have been locked before calling this method
+ * so noone would be touching the controller' registers during the
+ * dirmap operation.
+ */
+ ret = mux_control_select(dwsbt1->mux, BT1_BOOT_DIRMAP);
+ if (ret)
+ return ret;
+
+ dw_spi_bt1_dirmap_copy_from_map(buf, dwsbt1->map + offs, len);
+
+ mux_control_deselect(dwsbt1->mux);
+
+ dw_spi_set_cs(mem->spi, true);
+
+ ret = dw_spi_check_status(dws, true);
+
+ return ret ?: len;
+}
+
+#endif /* CONFIG_SPI_DW_BT1_DIRMAP */
+
+static int dw_spi_bt1_std_init(struct platform_device *pdev,
+ struct dw_spi_bt1 *dwsbt1)
+{
+ struct dw_spi *dws = &dwsbt1->dws;
+
+ dws->irq = platform_get_irq(pdev, 0);
+ if (dws->irq < 0)
+ return dws->irq;
+
+ dws->num_cs = 4;
+
+ /*
+ * Baikal-T1 Normal SPI Controllers don't always keep up with full SPI
+ * bus speed especially when it comes to the concurrent access to the
+ * APB bus resources. Thus we have no choice but to set a constraint on
+ * the SPI bus frequency for the memory operations which require to
+ * read/write data as fast as possible.
+ */
+ dws->max_mem_freq = 20000000U;
+
+ dw_spi_dma_setup_generic(dws);
+
+ return 0;
+}
+
+static int dw_spi_bt1_sys_init(struct platform_device *pdev,
+ struct dw_spi_bt1 *dwsbt1)
+{
+ struct resource *mem __maybe_unused;
+ struct dw_spi *dws = &dwsbt1->dws;
+
+ /*
+ * Baikal-T1 System Boot Controller is equipped with a mux, which
+ * switches between the directly mapped SPI flash access mode and
+ * IO access to the DW APB SSI registers. Note the mux controller
+ * must be setup to preserve the registers being accessible by default
+ * (on idle-state).
+ */
+ dwsbt1->mux = devm_mux_control_get(&pdev->dev, NULL);
+ if (IS_ERR(dwsbt1->mux))
+ return PTR_ERR(dwsbt1->mux);
+
+ /*
+ * Directly mapped SPI flash memory is a 16MB MMIO region, which can be
+ * used to access a peripheral memory device just by reading/writing
+ * data from/to it. Note the system APB bus will stall during each IO
+ * from/to the dirmap region until the operation is finished. So don't
+ * use it concurrently with time-critical tasks (like the SPI memory
+ * operations implemented in the DW APB SSI driver).
+ */
+#ifdef CONFIG_SPI_DW_BT1_DIRMAP
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (mem) {
+ dwsbt1->map = devm_ioremap_resource(&pdev->dev, mem);
+ if (!IS_ERR(dwsbt1->map)) {
+ dwsbt1->map_len = (mem->end - mem->start + 1);
+ dws->mem_ops.dirmap_create = dw_spi_bt1_dirmap_create;
+ dws->mem_ops.dirmap_read = dw_spi_bt1_dirmap_read;
+ } else {
+ dwsbt1->map = NULL;
+ }
+ }
+#endif /* CONFIG_SPI_DW_BT1_DIRMAP */
+
+ /*
+ * There is no IRQ, no DMA and just one CS available on the System Boot
+ * SPI controller.
+ */
+ dws->irq = IRQ_NOTCONNECTED;
+ dws->num_cs = 1;
+
+ /*
+ * Baikal-T1 System Boot SPI Controller doesn't keep up with the full
+ * SPI bus speed due to relatively slow APB bus and races for it'
+ * resources from different CPUs. The situation is worsen by a small
+ * FIFOs depth (just 8 words). It works better in a single CPU mode
+ * though, but still tends to be not fast enough at low CPU
+ * frequencies.
+ */
+ if (num_possible_cpus() > 1)
+ dws->max_mem_freq = 10000000U;
+ else
+ dws->max_mem_freq = 20000000U;
+
+ return 0;
+}
+
+static int dw_spi_bt1_probe(struct platform_device *pdev)
+{
+ dw_spi_bt1_init_cb init_func;
+ struct dw_spi_bt1 *dwsbt1;
+ struct resource *mem;
+ struct dw_spi *dws;
+ int ret;
+
+ dwsbt1 = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_bt1), GFP_KERNEL);
+ if (!dwsbt1)
+ return -ENOMEM;
+
+ dws = &dwsbt1->dws;
+
+ dws->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+ if (IS_ERR(dws->regs))
+ return PTR_ERR(dws->regs);
+
+ dws->paddr = mem->start;
+
+ dwsbt1->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dwsbt1->clk))
+ return PTR_ERR(dwsbt1->clk);
+
+ ret = clk_prepare_enable(dwsbt1->clk);
+ if (ret)
+ return ret;
+
+ dws->bus_num = pdev->id;
+ dws->reg_io_width = 4;
+ dws->max_freq = clk_get_rate(dwsbt1->clk);
+ if (!dws->max_freq)
+ goto err_disable_clk;
+
+ init_func = device_get_match_data(&pdev->dev);
+ ret = init_func(pdev, dwsbt1);
+ if (ret)
+ goto err_disable_clk;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = dw_spi_add_host(&pdev->dev, dws);
+ if (ret)
+ goto err_disable_clk;
+
+ platform_set_drvdata(pdev, dwsbt1);
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(dwsbt1->clk);
+
+ return ret;
+}
+
+static int dw_spi_bt1_remove(struct platform_device *pdev)
+{
+ struct dw_spi_bt1 *dwsbt1 = platform_get_drvdata(pdev);
+
+ dw_spi_remove_host(&dwsbt1->dws);
+
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(dwsbt1->clk);
+
+ return 0;
+}
+
+static const struct of_device_id dw_spi_bt1_of_match[] = {
+ { .compatible = "baikal,bt1-ssi", .data = dw_spi_bt1_std_init},
+ { .compatible = "baikal,bt1-sys-ssi", .data = dw_spi_bt1_sys_init},
+ { }
+};
+MODULE_DEVICE_TABLE(of, dw_spi_bt1_of_match);
+
+static struct platform_driver dw_spi_bt1_driver = {
+ .probe = dw_spi_bt1_probe,
+ .remove = dw_spi_bt1_remove,
+ .driver = {
+ .name = "bt1-sys-ssi",
+ .of_match_table = dw_spi_bt1_of_match,
+ },
+};
+module_platform_driver(dw_spi_bt1_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 System Boot SPI Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index 323c66c5db50..2e50cc0a9291 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -8,10 +8,14 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/preempt.h>
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/string.h>
+#include <linux/of.h>
#include "spi-dw.h"
@@ -19,13 +23,10 @@
#include <linux/debugfs.h>
#endif
-/* Slave spi_dev related */
+/* Slave spi_device related */
struct chip_data {
- u8 tmode; /* TR/TO/RO/EEPROM */
- u8 type; /* SPI/SSP/MicroWire */
-
- u16 clk_div; /* baud rate divider */
- u32 speed_hz; /* baud rate */
+ u32 cr0;
+ u32 rx_sample_dly; /* RX sample delay */
};
#ifdef CONFIG_DEBUG_FS
@@ -52,6 +53,7 @@ static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
+ DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
};
static int dw_spi_debugfs_init(struct dw_spi *dws)
@@ -101,7 +103,7 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
*/
if (cs_high == enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
- else if (dws->cs_override)
+ else
dw_writel(dws, DW_SPI_SER, 0);
}
EXPORT_SYMBOL_GPL(dw_spi_set_cs);
@@ -109,9 +111,8 @@ EXPORT_SYMBOL_GPL(dw_spi_set_cs);
/* Return the max entries we can fill into tx fifo */
static inline u32 tx_max(struct dw_spi *dws)
{
- u32 tx_left, tx_room, rxtx_gap;
+ u32 tx_room, rxtx_gap;
- tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
/*
@@ -122,93 +123,124 @@ static inline u32 tx_max(struct dw_spi *dws)
* shift registers. So a control from sw point of
* view is taken.
*/
- rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
- / dws->n_bytes;
+ rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
- return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
+ return min3((u32)dws->tx_len, tx_room, rxtx_gap);
}
/* Return the max entries we should read out of rx fifo */
static inline u32 rx_max(struct dw_spi *dws)
{
- u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
-
- return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
+ return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
}
static void dw_writer(struct dw_spi *dws)
{
- u32 max;
+ u32 max = tx_max(dws);
u16 txw = 0;
- spin_lock(&dws->buf_lock);
- max = tx_max(dws);
while (max--) {
- /* Set the tx word if the transfer's original "tx" is not null */
- if (dws->tx_end - dws->len) {
+ if (dws->tx) {
if (dws->n_bytes == 1)
txw = *(u8 *)(dws->tx);
else
txw = *(u16 *)(dws->tx);
+
+ dws->tx += dws->n_bytes;
}
dw_write_io_reg(dws, DW_SPI_DR, txw);
- dws->tx += dws->n_bytes;
+ --dws->tx_len;
}
- spin_unlock(&dws->buf_lock);
}
static void dw_reader(struct dw_spi *dws)
{
- u32 max;
+ u32 max = rx_max(dws);
u16 rxw;
- spin_lock(&dws->buf_lock);
- max = rx_max(dws);
while (max--) {
rxw = dw_read_io_reg(dws, DW_SPI_DR);
- /* Care rx only if the transfer's original "rx" is not null */
- if (dws->rx_end - dws->len) {
+ if (dws->rx) {
if (dws->n_bytes == 1)
*(u8 *)(dws->rx) = rxw;
else
*(u16 *)(dws->rx) = rxw;
+
+ dws->rx += dws->n_bytes;
}
- dws->rx += dws->n_bytes;
+ --dws->rx_len;
}
- spin_unlock(&dws->buf_lock);
}
-static void int_error_stop(struct dw_spi *dws, const char *msg)
+int dw_spi_check_status(struct dw_spi *dws, bool raw)
{
- spi_reset_chip(dws);
+ u32 irq_status;
+ int ret = 0;
+
+ if (raw)
+ irq_status = dw_readl(dws, DW_SPI_RISR);
+ else
+ irq_status = dw_readl(dws, DW_SPI_ISR);
+
+ if (irq_status & SPI_INT_RXOI) {
+ dev_err(&dws->master->dev, "RX FIFO overflow detected\n");
+ ret = -EIO;
+ }
+
+ if (irq_status & SPI_INT_RXUI) {
+ dev_err(&dws->master->dev, "RX FIFO underflow detected\n");
+ ret = -EIO;
+ }
+
+ if (irq_status & SPI_INT_TXOI) {
+ dev_err(&dws->master->dev, "TX FIFO overflow detected\n");
+ ret = -EIO;
+ }
- dev_err(&dws->master->dev, "%s\n", msg);
- dws->master->cur_msg->status = -EIO;
- spi_finalize_current_transfer(dws->master);
+ /* Generically handle the erroneous situation */
+ if (ret) {
+ spi_reset_chip(dws);
+ if (dws->master->cur_msg)
+ dws->master->cur_msg->status = ret;
+ }
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(dw_spi_check_status);
-static irqreturn_t interrupt_transfer(struct dw_spi *dws)
+static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
{
u16 irq_status = dw_readl(dws, DW_SPI_ISR);
- /* Error handling */
- if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
- dw_readl(dws, DW_SPI_ICR);
- int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
+ if (dw_spi_check_status(dws, false)) {
+ spi_finalize_current_transfer(dws->master);
return IRQ_HANDLED;
}
+ /*
+ * Read data from the Rx FIFO every time we've got a chance executing
+ * this method. If there is nothing left to receive, terminate the
+ * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a
+ * final stage of the transfer. By doing so we'll get the next IRQ
+ * right when the leftover incoming data is received.
+ */
dw_reader(dws);
- if (dws->rx_end == dws->rx) {
- spi_mask_intr(dws, SPI_INT_TXEI);
+ if (!dws->rx_len) {
+ spi_mask_intr(dws, 0xff);
spi_finalize_current_transfer(dws->master);
- return IRQ_HANDLED;
+ } else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
+ dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
}
+
+ /*
+ * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
+ * disabled after the data transmission is finished so not to
+ * have the TXE IRQ flood at the final stage of the transfer.
+ */
if (irq_status & SPI_INT_TXEI) {
- spi_mask_intr(dws, SPI_INT_TXEI);
dw_writer(dws);
- /* Enable TX irq always, it will be disabled when RX finished */
- spi_umask_intr(dws, SPI_INT_TXEI);
+ if (!dws->tx_len)
+ spi_mask_intr(dws, SPI_INT_TXEI);
}
return IRQ_HANDLED;
@@ -224,105 +256,176 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id)
return IRQ_NONE;
if (!master->cur_msg) {
- spi_mask_intr(dws, SPI_INT_TXEI);
+ spi_mask_intr(dws, 0xff);
return IRQ_HANDLED;
}
return dws->transfer_handler(dws);
}
-/* Configure CTRLR0 for DW_apb_ssi */
-u32 dw_spi_update_cr0(struct spi_controller *master, struct spi_device *spi,
- struct spi_transfer *transfer)
+static u32 dw_spi_prepare_cr0(struct dw_spi *dws, struct spi_device *spi)
{
- struct chip_data *chip = spi_get_ctldata(spi);
- u32 cr0;
-
- /* Default SPI mode is SCPOL = 0, SCPH = 0 */
- cr0 = (transfer->bits_per_word - 1)
- | (chip->type << SPI_FRF_OFFSET)
- | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
- (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) |
- (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET))
- | (chip->tmode << SPI_TMOD_OFFSET);
+ u32 cr0 = 0;
+
+ if (!(dws->caps & DW_SPI_CAP_DWC_SSI)) {
+ /* CTRLR0[ 5: 4] Frame Format */
+ cr0 |= SSI_MOTO_SPI << SPI_FRF_OFFSET;
+
+ /*
+ * SPI mode (SCPOL|SCPH)
+ * CTRLR0[ 6] Serial Clock Phase
+ * CTRLR0[ 7] Serial Clock Polarity
+ */
+ cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET;
+ cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET;
+
+ /* CTRLR0[11] Shift Register Loop */
+ cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET;
+ } else {
+ /* CTRLR0[ 7: 6] Frame Format */
+ cr0 |= SSI_MOTO_SPI << DWC_SSI_CTRLR0_FRF_OFFSET;
+
+ /*
+ * SPI mode (SCPOL|SCPH)
+ * CTRLR0[ 8] Serial Clock Phase
+ * CTRLR0[ 9] Serial Clock Polarity
+ */
+ cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET;
+ cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET;
+
+ /* CTRLR0[13] Shift Register Loop */
+ cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET;
+
+ if (dws->caps & DW_SPI_CAP_KEEMBAY_MST)
+ cr0 |= DWC_SSI_CTRLR0_KEEMBAY_MST;
+ }
return cr0;
}
-EXPORT_SYMBOL_GPL(dw_spi_update_cr0);
-/* Configure CTRLR0 for DWC_ssi */
-u32 dw_spi_update_cr0_v1_01a(struct spi_controller *master,
- struct spi_device *spi,
- struct spi_transfer *transfer)
+void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
+ struct dw_spi_cfg *cfg)
{
struct chip_data *chip = spi_get_ctldata(spi);
- u32 cr0;
+ u32 cr0 = chip->cr0;
+ u32 speed_hz;
+ u16 clk_div;
+
+ /* CTRLR0[ 4/3: 0] Data Frame Size */
+ cr0 |= (cfg->dfs - 1);
+
+ if (!(dws->caps & DW_SPI_CAP_DWC_SSI))
+ /* CTRLR0[ 9:8] Transfer Mode */
+ cr0 |= cfg->tmode << SPI_TMOD_OFFSET;
+ else
+ /* CTRLR0[11:10] Transfer Mode */
+ cr0 |= cfg->tmode << DWC_SSI_CTRLR0_TMOD_OFFSET;
+
+ dw_writel(dws, DW_SPI_CTRLR0, cr0);
+
+ if (cfg->tmode == SPI_TMOD_EPROMREAD || cfg->tmode == SPI_TMOD_RO)
+ dw_writel(dws, DW_SPI_CTRLR1, cfg->ndf ? cfg->ndf - 1 : 0);
+
+ /* Note DW APB SSI clock divider doesn't support odd numbers */
+ clk_div = (DIV_ROUND_UP(dws->max_freq, cfg->freq) + 1) & 0xfffe;
+ speed_hz = dws->max_freq / clk_div;
+
+ if (dws->current_freq != speed_hz) {
+ spi_set_clk(dws, clk_div);
+ dws->current_freq = speed_hz;
+ }
- /* CTRLR0[ 4: 0] Data Frame Size */
- cr0 = (transfer->bits_per_word - 1);
+ /* Update RX sample delay if required */
+ if (dws->cur_rx_sample_dly != chip->rx_sample_dly) {
+ dw_writel(dws, DW_SPI_RX_SAMPLE_DLY, chip->rx_sample_dly);
+ dws->cur_rx_sample_dly = chip->rx_sample_dly;
+ }
+}
+EXPORT_SYMBOL_GPL(dw_spi_update_config);
- /* CTRLR0[ 7: 6] Frame Format */
- cr0 |= chip->type << DWC_SSI_CTRLR0_FRF_OFFSET;
+static void dw_spi_irq_setup(struct dw_spi *dws)
+{
+ u16 level;
+ u8 imask;
/*
- * SPI mode (SCPOL|SCPH)
- * CTRLR0[ 8] Serial Clock Phase
- * CTRLR0[ 9] Serial Clock Polarity
+ * Originally Tx and Rx data lengths match. Rx FIFO Threshold level
+ * will be adjusted at the final stage of the IRQ-based SPI transfer
+ * execution so not to lose the leftover of the incoming data.
*/
- cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET;
- cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET;
+ level = min_t(u16, dws->fifo_len / 2, dws->tx_len);
+ dw_writel(dws, DW_SPI_TXFTLR, level);
+ dw_writel(dws, DW_SPI_RXFTLR, level - 1);
- /* CTRLR0[11:10] Transfer Mode */
- cr0 |= chip->tmode << DWC_SSI_CTRLR0_TMOD_OFFSET;
+ imask = SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI |
+ SPI_INT_RXFI;
+ spi_umask_intr(dws, imask);
- /* CTRLR0[13] Shift Register Loop */
- cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET;
+ dws->transfer_handler = dw_spi_transfer_handler;
+}
- return cr0;
+/*
+ * The iterative procedure of the poll-based transfer is simple: write as much
+ * as possible to the Tx FIFO, wait until the pending to receive data is ready
+ * to be read, read it from the Rx FIFO and check whether the performed
+ * procedure has been successful.
+ *
+ * Note this method the same way as the IRQ-based transfer won't work well for
+ * the SPI devices connected to the controller with native CS due to the
+ * automatic CS assertion/de-assertion.
+ */
+static int dw_spi_poll_transfer(struct dw_spi *dws,
+ struct spi_transfer *transfer)
+{
+ struct spi_delay delay;
+ u16 nbits;
+ int ret;
+
+ delay.unit = SPI_DELAY_UNIT_SCK;
+ nbits = dws->n_bytes * BITS_PER_BYTE;
+
+ do {
+ dw_writer(dws);
+
+ delay.value = nbits * (dws->rx_len - dws->tx_len);
+ spi_delay_exec(&delay, transfer);
+
+ dw_reader(dws);
+
+ ret = dw_spi_check_status(dws, true);
+ if (ret)
+ return ret;
+ } while (dws->rx_len);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(dw_spi_update_cr0_v1_01a);
static int dw_spi_transfer_one(struct spi_controller *master,
struct spi_device *spi, struct spi_transfer *transfer)
{
struct dw_spi *dws = spi_controller_get_devdata(master);
- struct chip_data *chip = spi_get_ctldata(spi);
- unsigned long flags;
- u8 imask = 0;
- u16 txlevel = 0;
- u32 cr0;
+ struct dw_spi_cfg cfg = {
+ .tmode = SPI_TMOD_TR,
+ .dfs = transfer->bits_per_word,
+ .freq = transfer->speed_hz,
+ };
int ret;
dws->dma_mapped = 0;
- spin_lock_irqsave(&dws->buf_lock, flags);
+ dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
dws->tx = (void *)transfer->tx_buf;
- dws->tx_end = dws->tx + transfer->len;
+ dws->tx_len = transfer->len / dws->n_bytes;
dws->rx = transfer->rx_buf;
- dws->rx_end = dws->rx + transfer->len;
- dws->len = transfer->len;
- spin_unlock_irqrestore(&dws->buf_lock, flags);
+ dws->rx_len = dws->tx_len;
- /* Ensure dw->rx and dw->rx_end are visible */
+ /* Ensure the data above is visible for all CPUs */
smp_mb();
spi_enable_chip(dws, 0);
- /* Handle per transfer options for bpw and speed */
- if (transfer->speed_hz != dws->current_freq) {
- if (transfer->speed_hz != chip->speed_hz) {
- /* clk_div doesn't support odd number */
- chip->clk_div = (DIV_ROUND_UP(dws->max_freq, transfer->speed_hz) + 1) & 0xfffe;
- chip->speed_hz = transfer->speed_hz;
- }
- dws->current_freq = transfer->speed_hz;
- spi_set_clk(dws, chip->clk_div);
- }
-
- transfer->effective_speed_hz = dws->max_freq / chip->clk_div;
- dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
+ dw_spi_update_config(dws, spi, &cfg);
- cr0 = dws->update_cr0(master, spi, transfer);
- dw_writel(dws, DW_SPI_CTRLR0, cr0);
+ transfer->effective_speed_hz = dws->current_freq;
/* Check if current transfer is a DMA transaction */
if (master->can_dma && master->can_dma(master, spi, transfer))
@@ -331,32 +434,20 @@ static int dw_spi_transfer_one(struct spi_controller *master,
/* For poll mode just disable all interrupts */
spi_mask_intr(dws, 0xff);
- /*
- * Interrupt mode
- * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
- */
if (dws->dma_mapped) {
ret = dws->dma_ops->dma_setup(dws, transfer);
- if (ret < 0) {
- spi_enable_chip(dws, 1);
+ if (ret)
return ret;
- }
- } else {
- txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
- dw_writel(dws, DW_SPI_TXFTLR, txlevel);
-
- /* Set the interrupt mask */
- imask |= SPI_INT_TXEI | SPI_INT_TXOI |
- SPI_INT_RXUI | SPI_INT_RXOI;
- spi_umask_intr(dws, imask);
-
- dws->transfer_handler = interrupt_transfer;
}
spi_enable_chip(dws, 1);
if (dws->dma_mapped)
return dws->dma_ops->dma_transfer(dws, transfer);
+ else if (dws->irq == IRQ_NOTCONNECTED)
+ return dw_spi_poll_transfer(dws, transfer);
+
+ dw_spi_irq_setup(dws);
return 1;
}
@@ -372,21 +463,336 @@ static void dw_spi_handle_err(struct spi_controller *master,
spi_reset_chip(dws);
}
+static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ op->data.nbytes = clamp_val(op->data.nbytes, 0, SPI_NDF_MASK + 1);
+
+ return 0;
+}
+
+static bool dw_spi_supports_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (op->data.buswidth > 1 || op->addr.buswidth > 1 ||
+ op->dummy.buswidth > 1 || op->cmd.buswidth > 1)
+ return false;
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+static int dw_spi_init_mem_buf(struct dw_spi *dws, const struct spi_mem_op *op)
+{
+ unsigned int i, j, len;
+ u8 *out;
+
+ /*
+ * Calculate the total length of the EEPROM command transfer and
+ * either use the pre-allocated buffer or create a temporary one.
+ */
+ len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ len += op->data.nbytes;
+
+ if (len <= SPI_BUF_SIZE) {
+ out = dws->buf;
+ } else {
+ out = kzalloc(len, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+ }
+
+ /*
+ * Collect the operation code, address and dummy bytes into the single
+ * buffer. If it's a transfer with data to be sent, also copy it into the
+ * single buffer in order to speed the data transmission up.
+ */
+ for (i = 0; i < op->cmd.nbytes; ++i)
+ out[i] = SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1);
+ for (j = 0; j < op->addr.nbytes; ++i, ++j)
+ out[i] = SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1);
+ for (j = 0; j < op->dummy.nbytes; ++i, ++j)
+ out[i] = 0x0;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ memcpy(&out[i], op->data.buf.out, op->data.nbytes);
+
+ dws->n_bytes = 1;
+ dws->tx = out;
+ dws->tx_len = len;
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dws->rx = op->data.buf.in;
+ dws->rx_len = op->data.nbytes;
+ } else {
+ dws->rx = NULL;
+ dws->rx_len = 0;
+ }
+
+ return 0;
+}
+
+static void dw_spi_free_mem_buf(struct dw_spi *dws)
+{
+ if (dws->tx != dws->buf)
+ kfree(dws->tx);
+}
+
+static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
+{
+ u32 room, entries, sts;
+ unsigned int len;
+ u8 *buf;
+
+ /*
+ * At initial stage we just pre-fill the Tx FIFO in with no rush,
+ * since native CS hasn't been enabled yet and the automatic data
+ * transmission won't start til we do that.
+ */
+ len = min(dws->fifo_len, dws->tx_len);
+ buf = dws->tx;
+ while (len--)
+ dw_write_io_reg(dws, DW_SPI_DR, *buf++);
+
+ /*
+ * After setting any bit in the SER register the transmission will
+ * start automatically. We have to keep up with that procedure
+ * otherwise the CS de-assertion will happen whereupon the memory
+ * operation will be pre-terminated.
+ */
+ len = dws->tx_len - ((void *)buf - dws->tx);
+ dw_spi_set_cs(spi, false);
+ while (len) {
+ entries = readl_relaxed(dws->regs + DW_SPI_TXFLR);
+ if (!entries) {
+ dev_err(&dws->master->dev, "CS de-assertion on Tx\n");
+ return -EIO;
+ }
+ room = min(dws->fifo_len - entries, len);
+ for (; room; --room, --len)
+ dw_write_io_reg(dws, DW_SPI_DR, *buf++);
+ }
+
+ /*
+ * Data fetching will start automatically if the EEPROM-read mode is
+ * activated. We have to keep up with the incoming data pace to
+ * prevent the Rx FIFO overflow causing the inbound data loss.
+ */
+ len = dws->rx_len;
+ buf = dws->rx;
+ while (len) {
+ entries = readl_relaxed(dws->regs + DW_SPI_RXFLR);
+ if (!entries) {
+ sts = readl_relaxed(dws->regs + DW_SPI_RISR);
+ if (sts & SPI_INT_RXOI) {
+ dev_err(&dws->master->dev, "FIFO overflow on Rx\n");
+ return -EIO;
+ }
+ continue;
+ }
+ entries = min(entries, len);
+ for (; entries; --entries, --len)
+ *buf++ = dw_read_io_reg(dws, DW_SPI_DR);
+ }
+
+ return 0;
+}
+
+static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
+{
+ return dw_readl(dws, DW_SPI_SR) & SR_BUSY;
+}
+
+static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
+{
+ int retry = SPI_WAIT_RETRIES;
+ struct spi_delay delay;
+ unsigned long ns, us;
+ u32 nents;
+
+ nents = dw_readl(dws, DW_SPI_TXFLR);
+ ns = NSEC_PER_SEC / dws->current_freq * nents;
+ ns *= dws->n_bytes * BITS_PER_BYTE;
+ if (ns <= NSEC_PER_USEC) {
+ delay.unit = SPI_DELAY_UNIT_NSECS;
+ delay.value = ns;
+ } else {
+ us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
+ delay.unit = SPI_DELAY_UNIT_USECS;
+ delay.value = clamp_val(us, 0, USHRT_MAX);
+ }
+
+ while (dw_spi_ctlr_busy(dws) && retry--)
+ spi_delay_exec(&delay, NULL);
+
+ if (retry < 0) {
+ dev_err(&dws->master->dev, "Mem op hanged up\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
+{
+ spi_enable_chip(dws, 0);
+ dw_spi_set_cs(spi, true);
+ spi_enable_chip(dws, 1);
+}
+
+/*
+ * The SPI memory operation implementation below is the best choice for the
+ * devices, which are selected by the native chip-select lane. It's
+ * specifically developed to workaround the problem with automatic chip-select
+ * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current
+ * SPI-mem core calls exec_op() callback only if the GPIO-based CS is
+ * unavailable.
+ */
+static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller);
+ struct dw_spi_cfg cfg;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Collect the outbound data into a single buffer to speed the
+ * transmission up at least on the initial stage.
+ */
+ ret = dw_spi_init_mem_buf(dws, op);
+ if (ret)
+ return ret;
+
+ /*
+ * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN
+ * operation. Transmit-only mode is suitable for the rest of them.
+ */
+ cfg.dfs = 8;
+ cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq);
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ cfg.tmode = SPI_TMOD_EPROMREAD;
+ cfg.ndf = op->data.nbytes;
+ } else {
+ cfg.tmode = SPI_TMOD_TO;
+ }
+
+ spi_enable_chip(dws, 0);
+
+ dw_spi_update_config(dws, mem->spi, &cfg);
+
+ spi_mask_intr(dws, 0xff);
+
+ spi_enable_chip(dws, 1);
+
+ /*
+ * DW APB SSI controller has very nasty peculiarities. First originally
+ * (without any vendor-specific modifications) it doesn't provide a
+ * direct way to set and clear the native chip-select signal. Instead
+ * the controller asserts the CS lane if Tx FIFO isn't empty and a
+ * transmission is going on, and automatically de-asserts it back to
+ * the high level if the Tx FIFO doesn't have anything to be pushed
+ * out. Due to that a multi-tasking or heavy IRQs activity might be
+ * fatal, since the transfer procedure preemption may cause the Tx FIFO
+ * getting empty and sudden CS de-assertion, which in the middle of the
+ * transfer will most likely cause the data loss. Secondly the
+ * EEPROM-read or Read-only DW SPI transfer modes imply the incoming
+ * data being automatically pulled in into the Rx FIFO. So if the
+ * driver software is late in fetching the data from the FIFO before
+ * it's overflown, new incoming data will be lost. In order to make
+ * sure the executed memory operations are CS-atomic and to prevent the
+ * Rx FIFO overflow we have to disable the local interrupts so to block
+ * any preemption during the subsequent IO operations.
+ *
+ * Note. At some circumstances disabling IRQs may not help to prevent
+ * the problems described above. The CS de-assertion and Rx FIFO
+ * overflow may still happen due to the relatively slow system bus or
+ * CPU not working fast enough, so the write-then-read algo implemented
+ * here just won't keep up with the SPI bus data transfer. Such
+ * situation is highly platform specific and is supposed to be fixed by
+ * manually restricting the SPI bus frequency using the
+ * dws->max_mem_freq parameter.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+
+ ret = dw_spi_write_then_read(dws, mem->spi);
+
+ local_irq_restore(flags);
+ preempt_enable();
+
+ /*
+ * Wait for the operation being finished and check the controller
+ * status only if there hasn't been any run-time error detected. In the
+ * former case it's just pointless. In the later one to prevent an
+ * additional error message printing since any hw error flag being set
+ * would be due to an error detected on the data transfer.
+ */
+ if (!ret) {
+ ret = dw_spi_wait_mem_op_done(dws);
+ if (!ret)
+ ret = dw_spi_check_status(dws, true);
+ }
+
+ dw_spi_stop_mem_op(dws, mem->spi);
+
+ dw_spi_free_mem_buf(dws);
+
+ return ret;
+}
+
+/*
+ * Initialize the default memory operations if a glue layer hasn't specified
+ * custom ones. Direct mapping operations will be preserved anyway since DW SPI
+ * controller doesn't have an embedded dirmap interface. Note the memory
+ * operations implemented in this driver is the best choice only for the DW APB
+ * SSI controller with standard native CS functionality. If a hardware vendor
+ * has fixed the automatic CS assertion/de-assertion peculiarity, then it will
+ * be safer to use the normal SPI-messages-based transfers implementation.
+ */
+static void dw_spi_init_mem_ops(struct dw_spi *dws)
+{
+ if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) &&
+ !dws->set_cs) {
+ dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size;
+ dws->mem_ops.supports_op = dw_spi_supports_mem_op;
+ dws->mem_ops.exec_op = dw_spi_exec_mem_op;
+ if (!dws->max_mem_freq)
+ dws->max_mem_freq = dws->max_freq;
+ }
+}
+
/* This may be called twice for each spi dev */
static int dw_spi_setup(struct spi_device *spi)
{
+ struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct chip_data *chip;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
+ struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
+ u32 rx_sample_dly_ns;
+
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
spi_set_ctldata(spi, chip);
+ /* Get specific / default rx-sample-delay */
+ if (device_property_read_u32(&spi->dev,
+ "rx-sample-delay-ns",
+ &rx_sample_dly_ns) != 0)
+ /* Use default controller value */
+ rx_sample_dly_ns = dws->def_rx_sample_dly_ns;
+ chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns,
+ NSEC_PER_SEC /
+ dws->max_freq);
}
- chip->tmode = SPI_TMOD_TR;
+ /*
+ * Update CR0 data each time the setup callback is invoked since
+ * the device parameters could have been changed, for instance, by
+ * the MMC SPI driver or something else.
+ */
+ chip->cr0 = dw_spi_prepare_cr0(dws, spi);
return 0;
}
@@ -423,7 +829,7 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws)
}
/* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
- if (dws->cs_override)
+ if (dws->caps & DW_SPI_CAP_CS_OVERRIDE)
dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
}
@@ -440,19 +846,22 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
return -ENOMEM;
dws->master = master;
- dws->type = SSI_MOTO_SPI;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
- spin_lock_init(&dws->buf_lock);
spi_controller_set_devdata(master, dws);
+ /* Basic HW init */
+ spi_hw_init(dev, dws);
+
ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
master);
- if (ret < 0) {
+ if (ret < 0 && ret != -ENOTCONN) {
dev_err(dev, "can not get IRQ\n");
goto err_free_master;
}
+ dw_spi_init_mem_ops(dws);
+
master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
@@ -460,20 +869,22 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->num_chipselect = dws->num_cs;
master->setup = dw_spi_setup;
master->cleanup = dw_spi_cleanup;
- master->set_cs = dw_spi_set_cs;
+ if (dws->set_cs)
+ master->set_cs = dws->set_cs;
+ else
+ master->set_cs = dw_spi_set_cs;
master->transfer_one = dw_spi_transfer_one;
master->handle_err = dw_spi_handle_err;
+ master->mem_ops = &dws->mem_ops;
master->max_speed_hz = dws->max_freq;
master->dev.of_node = dev->of_node;
master->dev.fwnode = dev->fwnode;
master->flags = SPI_MASTER_GPIO_SS;
master->auto_runtime_pm = true;
- if (dws->set_cs)
- master->set_cs = dws->set_cs;
-
- /* Basic HW init */
- spi_hw_init(dev, dws);
+ /* Get default rx sample delay */
+ device_property_read_u32(dev, "rx-sample-delay-ns",
+ &dws->def_rx_sample_dly_ns);
if (dws->dma_ops && dws->dma_ops->dma_init) {
ret = dws->dma_ops->dma_init(dev, dws);
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index bb390ff67d1d..a09831c62192 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -17,7 +17,6 @@
#include "spi-dw.h"
-#define WAIT_RETRIES 5
#define RX_BUSY 0
#define RX_BURST_LEVEL 16
#define TX_BUSY 1
@@ -49,6 +48,7 @@ static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
max_burst = RX_BURST_LEVEL;
dws->rxburst = min(max_burst, def_burst);
+ dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
ret = dma_get_slave_caps(dws->txchan, &caps);
if (!ret && caps.max_burst)
@@ -56,7 +56,36 @@ static void dw_spi_dma_maxburst_init(struct dw_spi *dws)
else
max_burst = TX_BURST_LEVEL;
+ /*
+ * Having a Rx DMA channel serviced with higher priority than a Tx DMA
+ * channel might not be enough to provide a well balanced DMA-based
+ * SPI transfer interface. There might still be moments when the Tx DMA
+ * channel is occasionally handled faster than the Rx DMA channel.
+ * That in its turn will eventually cause the SPI Rx FIFO overflow if
+ * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
+ * cleared by the Rx DMA channel. In order to fix the problem the Tx
+ * DMA activity is intentionally slowed down by limiting the SPI Tx
+ * FIFO depth with a value twice bigger than the Tx burst length.
+ */
dws->txburst = min(max_burst, def_burst);
+ dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
+}
+
+static void dw_spi_dma_sg_burst_init(struct dw_spi *dws)
+{
+ struct dma_slave_caps tx = {0}, rx = {0};
+
+ dma_get_slave_caps(dws->txchan, &tx);
+ dma_get_slave_caps(dws->rxchan, &rx);
+
+ if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0)
+ dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst);
+ else if (tx.max_sg_burst > 0)
+ dws->dma_sg_burst = tx.max_sg_burst;
+ else if (rx.max_sg_burst > 0)
+ dws->dma_sg_burst = rx.max_sg_burst;
+ else
+ dws->dma_sg_burst = 0;
}
static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
@@ -96,6 +125,8 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
dw_spi_dma_maxburst_init(dws);
+ dw_spi_dma_sg_burst_init(dws);
+
return 0;
free_rxchan:
@@ -125,6 +156,8 @@ static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
dw_spi_dma_maxburst_init(dws);
+ dw_spi_dma_sg_burst_init(dws);
+
return 0;
}
@@ -139,23 +172,14 @@ static void dw_spi_dma_exit(struct dw_spi *dws)
dmaengine_terminate_sync(dws->rxchan);
dma_release_channel(dws->rxchan);
}
-
- dw_writel(dws, DW_SPI_DMACR, 0);
}
static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
{
- u16 irq_status = dw_readl(dws, DW_SPI_ISR);
-
- if (!irq_status)
- return IRQ_NONE;
-
- dw_readl(dws, DW_SPI_ICR);
- spi_reset_chip(dws);
+ dw_spi_check_status(dws, false);
- dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
- dws->master->cur_msg->status = -EIO;
complete(&dws->dma_completion);
+
return IRQ_HANDLED;
}
@@ -177,12 +201,12 @@ static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
return DMA_SLAVE_BUSWIDTH_UNDEFINED;
}
-static int dw_spi_dma_wait(struct dw_spi *dws, struct spi_transfer *xfer)
+static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
{
unsigned long long ms;
- ms = xfer->len * MSEC_PER_SEC * BITS_PER_BYTE;
- do_div(ms, xfer->effective_speed_hz);
+ ms = len * MSEC_PER_SEC * BITS_PER_BYTE;
+ do_div(ms, speed);
ms += ms + 200;
if (ms > UINT_MAX)
@@ -208,7 +232,7 @@ static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws)
static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
struct spi_transfer *xfer)
{
- int retry = WAIT_RETRIES;
+ int retry = SPI_WAIT_RETRIES;
struct spi_delay delay;
u32 nents;
@@ -239,18 +263,12 @@ static void dw_spi_dma_tx_done(void *arg)
if (test_bit(RX_BUSY, &dws->dma_chan_busy))
return;
- dw_writel(dws, DW_SPI_DMACR, 0);
complete(&dws->dma_completion);
}
-static struct dma_async_tx_descriptor *
-dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer)
+static int dw_spi_dma_config_tx(struct dw_spi *dws)
{
struct dma_slave_config txconf;
- struct dma_async_tx_descriptor *txdesc;
-
- if (!xfer->tx_buf)
- return NULL;
memset(&txconf, 0, sizeof(txconf));
txconf.direction = DMA_MEM_TO_DEV;
@@ -260,20 +278,35 @@ dw_spi_dma_prepare_tx(struct dw_spi *dws, struct spi_transfer *xfer)
txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
txconf.device_fc = false;
- dmaengine_slave_config(dws->txchan, &txconf);
+ return dmaengine_slave_config(dws->txchan, &txconf);
+}
- txdesc = dmaengine_prep_slave_sg(dws->txchan,
- xfer->tx_sg.sgl,
- xfer->tx_sg.nents,
- DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl,
+ unsigned int nents)
+{
+ struct dma_async_tx_descriptor *txdesc;
+ dma_cookie_t cookie;
+ int ret;
+
+ txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
- return NULL;
+ return -ENOMEM;
txdesc->callback = dw_spi_dma_tx_done;
txdesc->callback_param = dws;
- return txdesc;
+ cookie = dmaengine_submit(txdesc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dmaengine_terminate_sync(dws->txchan);
+ return ret;
+ }
+
+ set_bit(TX_BUSY, &dws->dma_chan_busy);
+
+ return 0;
}
static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
@@ -283,7 +316,7 @@ static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws)
static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
{
- int retry = WAIT_RETRIES;
+ int retry = SPI_WAIT_RETRIES;
struct spi_delay delay;
unsigned long ns, us;
u32 nents;
@@ -331,18 +364,12 @@ static void dw_spi_dma_rx_done(void *arg)
if (test_bit(TX_BUSY, &dws->dma_chan_busy))
return;
- dw_writel(dws, DW_SPI_DMACR, 0);
complete(&dws->dma_completion);
}
-static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
- struct spi_transfer *xfer)
+static int dw_spi_dma_config_rx(struct dw_spi *dws)
{
struct dma_slave_config rxconf;
- struct dma_async_tx_descriptor *rxdesc;
-
- if (!xfer->rx_buf)
- return NULL;
memset(&rxconf, 0, sizeof(rxconf));
rxconf.direction = DMA_DEV_TO_MEM;
@@ -352,50 +379,64 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes);
rxconf.device_fc = false;
- dmaengine_slave_config(dws->rxchan, &rxconf);
+ return dmaengine_slave_config(dws->rxchan, &rxconf);
+}
+
+static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl,
+ unsigned int nents)
+{
+ struct dma_async_tx_descriptor *rxdesc;
+ dma_cookie_t cookie;
+ int ret;
- rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
- xfer->rx_sg.sgl,
- xfer->rx_sg.nents,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
- return NULL;
+ return -ENOMEM;
rxdesc->callback = dw_spi_dma_rx_done;
rxdesc->callback_param = dws;
- return rxdesc;
+ cookie = dmaengine_submit(rxdesc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dmaengine_terminate_sync(dws->rxchan);
+ return ret;
+ }
+
+ set_bit(RX_BUSY, &dws->dma_chan_busy);
+
+ return 0;
}
static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
{
- u16 imr = 0, dma_ctrl = 0;
+ u16 imr, dma_ctrl;
+ int ret;
- /*
- * Having a Rx DMA channel serviced with higher priority than a Tx DMA
- * channel might not be enough to provide a well balanced DMA-based
- * SPI transfer interface. There might still be moments when the Tx DMA
- * channel is occasionally handled faster than the Rx DMA channel.
- * That in its turn will eventually cause the SPI Rx FIFO overflow if
- * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's
- * cleared by the Rx DMA channel. In order to fix the problem the Tx
- * DMA activity is intentionally slowed down by limiting the SPI Tx
- * FIFO depth with a value twice bigger than the Tx burst length
- * calculated earlier by the dw_spi_dma_maxburst_init() method.
- */
- dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1);
- dw_writel(dws, DW_SPI_DMATDLR, dws->txburst);
+ if (!xfer->tx_buf)
+ return -EINVAL;
+
+ /* Setup DMA channels */
+ ret = dw_spi_dma_config_tx(dws);
+ if (ret)
+ return ret;
- if (xfer->tx_buf)
- dma_ctrl |= SPI_DMA_TDMAE;
+ if (xfer->rx_buf) {
+ ret = dw_spi_dma_config_rx(dws);
+ if (ret)
+ return ret;
+ }
+
+ /* Set the DMA handshaking interface */
+ dma_ctrl = SPI_DMA_TDMAE;
if (xfer->rx_buf)
dma_ctrl |= SPI_DMA_RDMAE;
dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
/* Set the interrupt mask */
- if (xfer->tx_buf)
- imr |= SPI_INT_TXOI;
+ imr = SPI_INT_TXOI;
if (xfer->rx_buf)
imr |= SPI_INT_RXUI | SPI_INT_RXOI;
spi_umask_intr(dws, imr);
@@ -407,41 +448,166 @@ static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
return 0;
}
-static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
+static int dw_spi_dma_transfer_all(struct dw_spi *dws,
+ struct spi_transfer *xfer)
{
- struct dma_async_tx_descriptor *txdesc, *rxdesc;
int ret;
- /* Prepare the TX dma transfer */
- txdesc = dw_spi_dma_prepare_tx(dws, xfer);
+ /* Submit the DMA Tx transfer */
+ ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents);
+ if (ret)
+ goto err_clear_dmac;
- /* Prepare the RX dma transfer */
- rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
+ /* Submit the DMA Rx transfer if required */
+ if (xfer->rx_buf) {
+ ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl,
+ xfer->rx_sg.nents);
+ if (ret)
+ goto err_clear_dmac;
- /* rx must be started before tx due to spi instinct */
- if (rxdesc) {
- set_bit(RX_BUSY, &dws->dma_chan_busy);
- dmaengine_submit(rxdesc);
+ /* rx must be started before tx due to spi instinct */
dma_async_issue_pending(dws->rxchan);
}
- if (txdesc) {
- set_bit(TX_BUSY, &dws->dma_chan_busy);
- dmaengine_submit(txdesc);
+ dma_async_issue_pending(dws->txchan);
+
+ ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz);
+
+err_clear_dmac:
+ dw_writel(dws, DW_SPI_DMACR, 0);
+
+ return ret;
+}
+
+/*
+ * In case if at least one of the requested DMA channels doesn't support the
+ * hardware accelerated SG list entries traverse, the DMA driver will most
+ * likely work that around by performing the IRQ-based SG list entries
+ * resubmission. That might and will cause a problem if the DMA Tx channel is
+ * recharged and re-executed before the Rx DMA channel. Due to
+ * non-deterministic IRQ-handler execution latency the DMA Tx channel will
+ * start pushing data to the SPI bus before the Rx DMA channel is even
+ * reinitialized with the next inbound SG list entry. By doing so the DMA Tx
+ * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while
+ * the DMA Rx channel being recharged and re-executed will eventually be
+ * overflown.
+ *
+ * In order to solve the problem we have to feed the DMA engine with SG list
+ * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs
+ * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg
+ * and rx_sg lists may have different number of entries of different lengths
+ * (though total length should match) let's virtually split the SG-lists to the
+ * set of DMA transfers, which length is a minimum of the ordered SG-entries
+ * lengths. An ASCII-sketch of the implemented algo is following:
+ * xfer->len
+ * |___________|
+ * tx_sg list: |___|____|__|
+ * rx_sg list: |_|____|____|
+ * DMA transfers: |_|_|__|_|__|
+ *
+ * Note in order to have this workaround solving the denoted problem the DMA
+ * engine driver should properly initialize the max_sg_burst capability and set
+ * the DMA device max segment size parameter with maximum data block size the
+ * DMA engine supports.
+ */
+
+static int dw_spi_dma_transfer_one(struct dw_spi *dws,
+ struct spi_transfer *xfer)
+{
+ struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp;
+ unsigned int tx_len = 0, rx_len = 0;
+ unsigned int base, len;
+ int ret;
+
+ sg_init_table(&tx_tmp, 1);
+ sg_init_table(&rx_tmp, 1);
+
+ for (base = 0, len = 0; base < xfer->len; base += len) {
+ /* Fetch next Tx DMA data chunk */
+ if (!tx_len) {
+ tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg);
+ sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg);
+ tx_len = sg_dma_len(tx_sg);
+ }
+
+ /* Fetch next Rx DMA data chunk */
+ if (!rx_len) {
+ rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg);
+ sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg);
+ rx_len = sg_dma_len(rx_sg);
+ }
+
+ len = min(tx_len, rx_len);
+
+ sg_dma_len(&tx_tmp) = len;
+ sg_dma_len(&rx_tmp) = len;
+
+ /* Submit DMA Tx transfer */
+ ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1);
+ if (ret)
+ break;
+
+ /* Submit DMA Rx transfer */
+ ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1);
+ if (ret)
+ break;
+
+ /* Rx must be started before Tx due to SPI instinct */
+ dma_async_issue_pending(dws->rxchan);
+
dma_async_issue_pending(dws->txchan);
+
+ /*
+ * Here we only need to wait for the DMA transfer to be
+ * finished since SPI controller is kept enabled during the
+ * procedure this loop implements and there is no risk to lose
+ * data left in the Tx/Rx FIFOs.
+ */
+ ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz);
+ if (ret)
+ break;
+
+ reinit_completion(&dws->dma_completion);
+
+ sg_dma_address(&tx_tmp) += len;
+ sg_dma_address(&rx_tmp) += len;
+ tx_len -= len;
+ rx_len -= len;
}
- ret = dw_spi_dma_wait(dws, xfer);
+ dw_writel(dws, DW_SPI_DMACR, 0);
+
+ return ret;
+}
+
+static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
+{
+ unsigned int nents;
+ int ret;
+
+ nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents);
+
+ /*
+ * Execute normal DMA-based transfer (which submits the Rx and Tx SG
+ * lists directly to the DMA engine at once) if either full hardware
+ * accelerated SG list traverse is supported by both channels, or the
+ * Tx-only SPI transfer is requested, or the DMA engine is capable to
+ * handle both SG lists on hardware accelerated basis.
+ */
+ if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst)
+ ret = dw_spi_dma_transfer_all(dws, xfer);
+ else
+ ret = dw_spi_dma_transfer_one(dws, xfer);
if (ret)
return ret;
- if (txdesc && dws->master->cur_msg->status == -EINPROGRESS) {
+ if (dws->master->cur_msg->status == -EINPROGRESS) {
ret = dw_spi_dma_wait_tx_done(dws, xfer);
if (ret)
return ret;
}
- if (rxdesc && dws->master->cur_msg->status == -EINPROGRESS)
+ if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS)
ret = dw_spi_dma_wait_rx_done(dws);
return ret;
@@ -457,8 +623,6 @@ static void dw_spi_dma_stop(struct dw_spi *dws)
dmaengine_terminate_sync(dws->rxchan);
clear_bit(RX_BUSY, &dws->dma_chan_busy);
}
-
- dw_writel(dws, DW_SPI_DMACR, 0);
}
static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = {
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 403403deae66..d0cc5bf4fa4e 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -45,16 +45,12 @@ struct dw_spi_mmio {
#define MSCC_SPI_MST_SW_MODE_SW_PIN_CTRL_MODE BIT(13)
#define MSCC_SPI_MST_SW_MODE_SW_SPI_CS(x) (x << 5)
-/*
- * For Keem Bay, CTRLR0[31] is used to select controller mode.
- * 0: SSI is slave
- * 1: SSI is master
- */
-#define KEEMBAY_CTRLR0_SSIC_IS_MST BIT(31)
+#define SPARX5_FORCE_ENA 0xa4
+#define SPARX5_FORCE_VAL 0xa8
struct dw_spi_mscc {
struct regmap *syscon;
- void __iomem *spi_mst;
+ void __iomem *spi_mst; /* Not sparx5 */
};
/*
@@ -114,9 +110,6 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
dwsmmio->dws.set_cs = dw_spi_mscc_set_cs;
dwsmmio->priv = dwsmscc;
- /* Register hook to configure CTRLR0 */
- dwsmmio->dws.update_cr0 = dw_spi_update_cr0;
-
return 0;
}
@@ -134,13 +127,71 @@ static int dw_spi_mscc_jaguar2_init(struct platform_device *pdev,
JAGUAR2_IF_SI_OWNER_OFFSET);
}
+/*
+ * The Designware SPI controller (referred to as master in the
+ * documentation) automatically deasserts chip select when the tx fifo
+ * is empty. The chip selects then needs to be driven by a CS override
+ * register. enable is an active low signal.
+ */
+static void dw_spi_sparx5_set_cs(struct spi_device *spi, bool enable)
+{
+ struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
+ struct dw_spi_mscc *dwsmscc = dwsmmio->priv;
+ u8 cs = spi->chip_select;
+
+ if (!enable) {
+ /* CS override drive enable */
+ regmap_write(dwsmscc->syscon, SPARX5_FORCE_ENA, 1);
+ /* Now set CSx enabled */
+ regmap_write(dwsmscc->syscon, SPARX5_FORCE_VAL, ~BIT(cs));
+ /* Allow settle */
+ usleep_range(1, 5);
+ } else {
+ /* CS value */
+ regmap_write(dwsmscc->syscon, SPARX5_FORCE_VAL, ~0);
+ /* Allow settle */
+ usleep_range(1, 5);
+ /* CS override drive disable */
+ regmap_write(dwsmscc->syscon, SPARX5_FORCE_ENA, 0);
+ }
+
+ dw_spi_set_cs(spi, enable);
+}
+
+static int dw_spi_mscc_sparx5_init(struct platform_device *pdev,
+ struct dw_spi_mmio *dwsmmio)
+{
+ const char *syscon_name = "microchip,sparx5-cpu-syscon";
+ struct device *dev = &pdev->dev;
+ struct dw_spi_mscc *dwsmscc;
+
+ if (!IS_ENABLED(CONFIG_SPI_MUX)) {
+ dev_err(dev, "This driver needs CONFIG_SPI_MUX\n");
+ return -EOPNOTSUPP;
+ }
+
+ dwsmscc = devm_kzalloc(dev, sizeof(*dwsmscc), GFP_KERNEL);
+ if (!dwsmscc)
+ return -ENOMEM;
+
+ dwsmscc->syscon =
+ syscon_regmap_lookup_by_compatible(syscon_name);
+ if (IS_ERR(dwsmscc->syscon)) {
+ dev_err(dev, "No syscon map %s\n", syscon_name);
+ return PTR_ERR(dwsmscc->syscon);
+ }
+
+ dwsmmio->dws.set_cs = dw_spi_sparx5_set_cs;
+ dwsmmio->priv = dwsmscc;
+
+ return 0;
+}
+
static int dw_spi_alpine_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
- dwsmmio->dws.cs_override = 1;
-
- /* Register hook to configure CTRLR0 */
- dwsmmio->dws.update_cr0 = dw_spi_update_cr0;
+ dwsmmio->dws.caps = DW_SPI_CAP_CS_OVERRIDE;
return 0;
}
@@ -148,9 +199,6 @@ static int dw_spi_alpine_init(struct platform_device *pdev,
static int dw_spi_dw_apb_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
- /* Register hook to configure CTRLR0 */
- dwsmmio->dws.update_cr0 = dw_spi_update_cr0;
-
dw_spi_dma_setup_generic(&dwsmmio->dws);
return 0;
@@ -159,28 +207,17 @@ static int dw_spi_dw_apb_init(struct platform_device *pdev,
static int dw_spi_dwc_ssi_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
- /* Register hook to configure CTRLR0 */
- dwsmmio->dws.update_cr0 = dw_spi_update_cr0_v1_01a;
+ dwsmmio->dws.caps = DW_SPI_CAP_DWC_SSI;
dw_spi_dma_setup_generic(&dwsmmio->dws);
return 0;
}
-static u32 dw_spi_update_cr0_keembay(struct spi_controller *master,
- struct spi_device *spi,
- struct spi_transfer *transfer)
-{
- u32 cr0 = dw_spi_update_cr0_v1_01a(master, spi, transfer);
-
- return cr0 | KEEMBAY_CTRLR0_SSIC_IS_MST;
-}
-
static int dw_spi_keembay_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
- /* Register hook to configure CTRLR0 */
- dwsmmio->dws.update_cr0 = dw_spi_update_cr0_keembay;
+ dwsmmio->dws.caps = DW_SPI_CAP_KEEMBAY_MST | DW_SPI_CAP_DWC_SSI;
return 0;
}
@@ -297,6 +334,7 @@ static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "renesas,rzn1-spi", .data = dw_spi_dw_apb_init},
{ .compatible = "snps,dwc-ssi-1.01a", .data = dw_spi_dwc_ssi_init},
{ .compatible = "intel,keembay-ssi", .data = dw_spi_keembay_init},
+ { .compatible = "microchip,sparx5-spi", dw_spi_mscc_sparx5_init},
{ /* end of table */}
};
MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 2ea73809ca34..8a91cd58102f 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -48,9 +48,6 @@ static int spi_mid_init(struct dw_spi *dws)
iounmap(clk_reg);
- /* Register hook to configure CTRLR0 */
- dws->update_cr0 = dw_spi_update_cr0;
-
dw_spi_dma_setup_mfld(dws);
return 0;
@@ -58,9 +55,6 @@ static int spi_mid_init(struct dw_spi *dws)
static int spi_generic_init(struct dw_spi *dws)
{
- /* Register hook to configure CTRLR0 */
- dws->update_cr0 = dw_spi_update_cr0;
-
dw_spi_dma_setup_generic(dws);
return 0;
@@ -127,18 +121,16 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (desc->setup) {
ret = desc->setup(dws);
if (ret)
- return ret;
+ goto err_free_irq_vectors;
}
} else {
- pci_free_irq_vectors(pdev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_free_irq_vectors;
}
ret = dw_spi_add_host(&pdev->dev, dws);
- if (ret) {
- pci_free_irq_vectors(pdev);
- return ret;
- }
+ if (ret)
+ goto err_free_irq_vectors;
/* PCI hook and SPI hook use the same drv data */
pci_set_drvdata(pdev, dws);
@@ -152,6 +144,10 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pm_runtime_allow(&pdev->dev);
return 0;
+
+err_free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return ret;
}
static void spi_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 151ba316619e..faf40cb66498 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -2,11 +2,13 @@
#ifndef DW_SPI_HEADER_H
#define DW_SPI_HEADER_H
+#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/irqreturn.h>
#include <linux/io.h>
#include <linux/scatterlist.h>
+#include <linux/spi/spi-mem.h>
/* Register offsets */
#define DW_SPI_CTRLR0 0x00
@@ -34,6 +36,7 @@
#define DW_SPI_IDR 0x58
#define DW_SPI_VERSION 0x5c
#define DW_SPI_DR 0x60
+#define DW_SPI_RX_SAMPLE_DLY 0xf0
#define DW_SPI_CS_OVERRIDE 0xf4
/* Bit fields in CTRLR0 */
@@ -69,6 +72,16 @@
#define DWC_SSI_CTRLR0_FRF_OFFSET 6
#define DWC_SSI_CTRLR0_DFS_OFFSET 0
+/*
+ * For Keem Bay, CTRLR0[31] is used to select controller mode.
+ * 0: SSI is slave
+ * 1: SSI is master
+ */
+#define DWC_SSI_CTRLR0_KEEMBAY_MST BIT(31)
+
+/* Bit fields in CTRLR1 */
+#define SPI_NDF_MASK GENMASK(15, 0)
+
/* Bit fields in SR, 7 bits */
#define SR_MASK 0x7f /* cover 7 bits */
#define SR_BUSY (1 << 0)
@@ -91,8 +104,12 @@
#define SPI_DMA_RDMAE (1 << 0)
#define SPI_DMA_TDMAE (1 << 1)
-/* TX RX interrupt level threshold, max can be 256 */
-#define SPI_INT_THRESHOLD 32
+#define SPI_WAIT_RETRIES 5
+#define SPI_BUF_SIZE \
+ (sizeof_field(struct spi_mem_op, cmd.opcode) + \
+ sizeof_field(struct spi_mem_op, addr.val) + 256)
+#define SPI_GET_BYTE(_val, _idx) \
+ ((_val) >> (BITS_PER_BYTE * (_idx)) & 0xff)
enum dw_ssi_type {
SSI_MOTO_SPI = 0,
@@ -100,6 +117,19 @@ enum dw_ssi_type {
SSI_NS_MICROWIRE,
};
+/* DW SPI capabilities */
+#define DW_SPI_CAP_CS_OVERRIDE BIT(0)
+#define DW_SPI_CAP_KEEMBAY_MST BIT(1)
+#define DW_SPI_CAP_DWC_SSI BIT(2)
+
+/* Slave spi_transfer/spi_mem_op related */
+struct dw_spi_cfg {
+ u8 tmode;
+ u8 dfs;
+ u32 ndf;
+ u32 freq;
+};
+
struct dw_spi;
struct dw_spi_dma_ops {
int (*dma_init)(struct device *dev, struct dw_spi *dws);
@@ -113,39 +143,43 @@ struct dw_spi_dma_ops {
struct dw_spi {
struct spi_controller *master;
- enum dw_ssi_type type;
void __iomem *regs;
unsigned long paddr;
int irq;
u32 fifo_len; /* depth of the FIFO buffer */
+ u32 max_mem_freq; /* max mem-ops bus freq */
u32 max_freq; /* max bus freq supported */
- int cs_override;
+ u32 caps; /* DW SPI capabilities */
+
u32 reg_io_width; /* DR I/O width in bytes */
u16 bus_num;
u16 num_cs; /* supported slave numbers */
void (*set_cs)(struct spi_device *spi, bool enable);
- u32 (*update_cr0)(struct spi_controller *master, struct spi_device *spi,
- struct spi_transfer *transfer);
/* Current message transfer state info */
- size_t len;
void *tx;
- void *tx_end;
- spinlock_t buf_lock;
+ unsigned int tx_len;
void *rx;
- void *rx_end;
+ unsigned int rx_len;
+ u8 buf[SPI_BUF_SIZE];
int dma_mapped;
u8 n_bytes; /* current is a 1/2 bytes op */
irqreturn_t (*transfer_handler)(struct dw_spi *dws);
u32 current_freq; /* frequency in hz */
+ u32 cur_rx_sample_dly;
+ u32 def_rx_sample_dly_ns;
+
+ /* Custom memory operations */
+ struct spi_controller_mem_ops mem_ops;
/* DMA info */
struct dma_chan *txchan;
u32 txburst;
struct dma_chan *rxchan;
u32 rxburst;
+ u32 dma_sg_burst;
unsigned long dma_chan_busy;
dma_addr_t dma_addr; /* phy address of the Data register */
const struct dw_spi_dma_ops *dma_ops;
@@ -162,29 +196,19 @@ static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
return __raw_readl(dws->regs + offset);
}
-static inline u16 dw_readw(struct dw_spi *dws, u32 offset)
-{
- return __raw_readw(dws->regs + offset);
-}
-
static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
{
__raw_writel(val, dws->regs + offset);
}
-static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val)
-{
- __raw_writew(val, dws->regs + offset);
-}
-
static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset)
{
switch (dws->reg_io_width) {
case 2:
- return dw_readw(dws, offset);
+ return readw_relaxed(dws->regs + offset);
case 4:
default:
- return dw_readl(dws, offset);
+ return readl_relaxed(dws->regs + offset);
}
}
@@ -192,11 +216,11 @@ static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
{
switch (dws->reg_io_width) {
case 2:
- dw_writew(dws, offset, val);
+ writew_relaxed(val, dws->regs + offset);
break;
case 4:
default:
- dw_writel(dws, offset, val);
+ writel_relaxed(val, dws->regs + offset);
break;
}
}
@@ -230,14 +254,16 @@ static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
}
/*
- * This does disable the SPI controller, interrupts, and re-enable the
- * controller back. Transmit and receive FIFO buffers are cleared when the
- * device is disabled.
+ * This disables the SPI controller, interrupts, clears the interrupts status
+ * and CS, then re-enables the controller back. Transmit and receive FIFO
+ * buffers are cleared when the device is disabled.
*/
static inline void spi_reset_chip(struct dw_spi *dws)
{
spi_enable_chip(dws, 0);
spi_mask_intr(dws, 0xff);
+ dw_readl(dws, DW_SPI_ICR);
+ dw_writel(dws, DW_SPI_SER, 0);
spi_enable_chip(dws, 1);
}
@@ -248,16 +274,13 @@ static inline void spi_shutdown_chip(struct dw_spi *dws)
}
extern void dw_spi_set_cs(struct spi_device *spi, bool enable);
+extern void dw_spi_update_config(struct dw_spi *dws, struct spi_device *spi,
+ struct dw_spi_cfg *cfg);
+extern int dw_spi_check_status(struct dw_spi *dws, bool raw);
extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
extern void dw_spi_remove_host(struct dw_spi *dws);
extern int dw_spi_suspend_host(struct dw_spi *dws);
extern int dw_spi_resume_host(struct dw_spi *dws);
-extern u32 dw_spi_update_cr0(struct spi_controller *master,
- struct spi_device *spi,
- struct spi_transfer *transfer);
-extern u32 dw_spi_update_cr0_v1_01a(struct spi_controller *master,
- struct spi_device *spi,
- struct spi_transfer *transfer);
#ifdef CONFIG_SPI_DW_DMA
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index 37a3e0f8e752..8a440c7078ef 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -12,6 +12,7 @@
#define FSI_ENGID_SPI 0x23
#define FSI_MBOX_ROOT_CTRL_8 0x2860
+#define FSI_MBOX_ROOT_CTRL_8_SPI_MUX 0xf0000000
#define FSI2SPI_DATA0 0x00
#define FSI2SPI_DATA1 0x04
@@ -24,11 +25,16 @@
#define SPI_FSI_BASE 0x70000
#define SPI_FSI_INIT_TIMEOUT_MS 1000
-#define SPI_FSI_MAX_TRANSFER_SIZE 2048
+#define SPI_FSI_MAX_XFR_SIZE 2048
+#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 32
#define SPI_FSI_ERROR 0x0
#define SPI_FSI_COUNTER_CFG 0x1
#define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
+#define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8)
+#define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9)
+#define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
+#define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11)
#define SPI_FSI_CFG1 0x2
#define SPI_FSI_CLOCK_CFG 0x3
#define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
@@ -61,7 +67,7 @@
#define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
#define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
#define SPI_FSI_STATUS_ANY_ERROR \
- (SPI_FSI_STATUS_ERROR | SPI_FSI_STATUS_TDR_UNDERRUN | \
+ (SPI_FSI_STATUS_ERROR | \
SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
SPI_FSI_STATUS_RDR_OVERRUN)
#define SPI_FSI_PORT_CTRL 0x9
@@ -70,6 +76,8 @@ struct fsi_spi {
struct device *dev; /* SPI controller device */
struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
u32 base;
+ size_t max_xfr_size;
+ bool restricted;
};
struct fsi_spi_sequence {
@@ -77,6 +85,26 @@ struct fsi_spi_sequence {
u64 data;
};
+static int fsi_spi_check_mux(struct fsi_device *fsi, struct device *dev)
+{
+ int rc;
+ u32 root_ctrl_8;
+ __be32 root_ctrl_8_be;
+
+ rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8_be,
+ sizeof(root_ctrl_8_be));
+ if (rc)
+ return rc;
+
+ root_ctrl_8 = be32_to_cpu(root_ctrl_8_be);
+ dev_dbg(dev, "Root control register 8: %08x\n", root_ctrl_8);
+ if ((root_ctrl_8 & FSI_MBOX_ROOT_CTRL_8_SPI_MUX) ==
+ FSI_MBOX_ROOT_CTRL_8_SPI_MUX)
+ return 0;
+
+ return -ENOLINK;
+}
+
static int fsi_spi_check_status(struct fsi_spi *ctx)
{
int rc;
@@ -205,8 +233,12 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
if (rc)
return rc;
- return fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
- SPI_FSI_CLOCK_CFG_RESET2);
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
+ SPI_FSI_CLOCK_CFG_RESET2);
+ if (rc)
+ return rc;
+
+ return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
}
static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
@@ -214,8 +246,8 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
/*
* Add the next byte of instruction to the 8-byte sequence register.
* Then decrement the counter so that the next instruction will go in
- * the right place. Return the number of "slots" left in the sequence
- * register.
+ * the right place. Return the index of the slot we just filled in the
+ * sequence register.
*/
seq->data |= (u64)val << seq->bit;
seq->bit -= 8;
@@ -233,40 +265,71 @@ static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
struct fsi_spi_sequence *seq,
struct spi_transfer *transfer)
{
+ bool docfg = false;
int loops;
int idx;
int rc;
+ u8 val = 0;
u8 len = min(transfer->len, 8U);
u8 rem = transfer->len % len;
+ u64 cfg = 0ULL;
loops = transfer->len / len;
if (transfer->tx_buf) {
- idx = fsi_spi_sequence_add(seq,
- SPI_FSI_SEQUENCE_SHIFT_OUT(len));
+ val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
+ idx = fsi_spi_sequence_add(seq, val);
+
if (rem)
rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
} else if (transfer->rx_buf) {
- idx = fsi_spi_sequence_add(seq,
- SPI_FSI_SEQUENCE_SHIFT_IN(len));
+ val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
+ idx = fsi_spi_sequence_add(seq, val);
+
if (rem)
rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
} else {
return -EINVAL;
}
+ if (ctx->restricted) {
+ const int eidx = rem ? 5 : 6;
+
+ while (loops > 1 && idx <= eidx) {
+ idx = fsi_spi_sequence_add(seq, val);
+ loops--;
+ docfg = true;
+ }
+
+ if (loops > 1) {
+ dev_warn(ctx->dev, "No sequencer slots; aborting.\n");
+ return -EINVAL;
+ }
+ }
+
if (loops > 1) {
fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
+ docfg = true;
+ }
- if (rem)
- fsi_spi_sequence_add(seq, rem);
+ if (docfg) {
+ cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
+ if (transfer->rx_buf)
+ cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
+ SPI_FSI_COUNTER_CFG_N2_TX |
+ SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
+ SPI_FSI_COUNTER_CFG_N2_RELOAD;
- rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG,
- SPI_FSI_COUNTER_CFG_LOOPS(loops - 1));
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
if (rc)
return rc;
+ } else {
+ fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
}
+ if (rem)
+ fsi_spi_sequence_add(seq, rem);
+
return 0;
}
@@ -275,6 +338,7 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
{
int rc = 0;
u64 status = 0ULL;
+ u64 cfg = 0ULL;
if (transfer->tx_buf) {
int nb;
@@ -312,6 +376,16 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
u64 in = 0ULL;
u8 *rx = transfer->rx_buf;
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
+ if (rc)
+ return rc;
+
+ if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
+ if (rc)
+ return rc;
+ }
+
while (transfer->len > recv) {
do {
rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
@@ -350,7 +424,7 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
u64 status = 0ULL;
u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
- FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 4);
+ FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 19);
end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS);
do {
@@ -396,18 +470,22 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *mesg)
{
- int rc = 0;
+ int rc;
u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
struct spi_transfer *transfer;
struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
+ rc = fsi_spi_check_mux(ctx->fsi, ctx->dev);
+ if (rc)
+ return rc;
+
list_for_each_entry(transfer, &mesg->transfers, transfer_list) {
struct fsi_spi_sequence seq;
struct spi_transfer *next = NULL;
/* Sequencer must do shift out (tx) first. */
if (!transfer->tx_buf ||
- transfer->len > SPI_FSI_MAX_TRANSFER_SIZE) {
+ transfer->len > (ctx->max_xfr_size + 8)) {
rc = -EINVAL;
goto error;
}
@@ -431,7 +509,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
/* Sequencer can only do shift in (rx) after tx. */
if (next->rx_buf) {
- if (next->len > SPI_FSI_MAX_TRANSFER_SIZE) {
+ if (next->len > ctx->max_xfr_size) {
rc = -EINVAL;
goto error;
}
@@ -476,30 +554,21 @@ error:
static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
{
- return SPI_FSI_MAX_TRANSFER_SIZE;
+ struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
+
+ return ctx->max_xfr_size;
}
static int fsi_spi_probe(struct device *dev)
{
int rc;
- u32 root_ctrl_8;
struct device_node *np;
int num_controllers_registered = 0;
struct fsi_device *fsi = to_fsi_dev(dev);
- /*
- * Check the SPI mux before attempting to probe. If the mux isn't set
- * then the SPI controllers can't access their slave devices.
- */
- rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8,
- sizeof(root_ctrl_8));
+ rc = fsi_spi_check_mux(fsi, dev);
if (rc)
- return rc;
-
- if (!root_ctrl_8) {
- dev_dbg(dev, "SPI mux not set, aborting probe.\n");
return -ENODEV;
- }
for_each_available_child_of_node(dev->of_node, np) {
u32 base;
@@ -524,6 +593,14 @@ static int fsi_spi_probe(struct device *dev)
ctx->fsi = fsi;
ctx->base = base + SPI_FSI_BASE;
+ if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
+ ctx->restricted = true;
+ ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
+ } else {
+ ctx->restricted = false;
+ ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
+ }
+
rc = devm_spi_register_controller(dev, ctlr);
if (rc)
spi_controller_put(ctlr);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 108a7d50d2c3..1a08c1d584ab 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -53,7 +53,6 @@
#define SPI_SR 0x2c
#define SPI_SR_TCFQF BIT(31)
-#define SPI_SR_EOQF BIT(28)
#define SPI_SR_TFUF BIT(27)
#define SPI_SR_TFFF BIT(25)
#define SPI_SR_CMDTCF BIT(23)
@@ -62,7 +61,7 @@
#define SPI_SR_TFIWF BIT(18)
#define SPI_SR_RFDF BIT(17)
#define SPI_SR_CMDFFF BIT(16)
-#define SPI_SR_CLEAR (SPI_SR_TCFQF | SPI_SR_EOQF | \
+#define SPI_SR_CLEAR (SPI_SR_TCFQF | \
SPI_SR_TFUF | SPI_SR_TFFF | \
SPI_SR_CMDTCF | SPI_SR_SPEF | \
SPI_SR_RFOF | SPI_SR_TFIWF | \
@@ -75,7 +74,6 @@
#define SPI_RSER 0x30
#define SPI_RSER_TCFQE BIT(31)
-#define SPI_RSER_EOQFE BIT(28)
#define SPI_RSER_CMDTCFE BIT(23)
#define SPI_PUSHR 0x34
@@ -114,7 +112,6 @@ struct chip_data {
};
enum dspi_trans_mode {
- DSPI_EOQ_MODE = 0,
DSPI_XSPI_MODE,
DSPI_DMA_MODE,
};
@@ -189,7 +186,7 @@ static const struct fsl_dspi_devtype_data devtype_data[] = {
.fifo_size = 4,
},
[MCF5441X] = {
- .trans_mode = DSPI_EOQ_MODE,
+ .trans_mode = DSPI_DMA_MODE,
.max_clock_factor = 8,
.fifo_size = 16,
},
@@ -671,11 +668,6 @@ static void ns_delay_scale(char *psc, char *sc, int delay_ns,
}
}
-static void dspi_pushr_write(struct fsl_dspi *dspi)
-{
- regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
-}
-
static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
{
/*
@@ -735,21 +727,6 @@ static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
}
}
-static void dspi_eoq_fifo_write(struct fsl_dspi *dspi, int num_words)
-{
- u16 xfer_cmd = dspi->tx_cmd;
-
- /* Fill TX FIFO with as many transfers as possible */
- while (num_words--) {
- dspi->tx_cmd = xfer_cmd;
- /* Request EOQF for last transfer in FIFO */
- if (num_words == 0)
- dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
- /* Write combined TX FIFO and CMD FIFO entry */
- dspi_pushr_write(dspi);
- }
-}
-
static u32 dspi_popr_read(struct fsl_dspi *dspi)
{
u32 rxdata = 0;
@@ -818,7 +795,7 @@ no_accel:
dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
/*
- * Update CTAR here (code is common for EOQ, XSPI and DMA modes).
+ * Update CTAR here (code is common for XSPI and DMA modes).
* We will update CTARE in the portion specific to XSPI, when we
* also know the preload value (DTCP).
*/
@@ -862,10 +839,7 @@ static void dspi_fifo_write(struct fsl_dspi *dspi)
spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
- if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE)
- dspi_eoq_fifo_write(dspi, num_words);
- else
- dspi_xspi_fifo_write(dspi, num_words);
+ dspi_xspi_fifo_write(dspi, num_words);
/*
* Everything after this point is in a potential race with the next
* interrupt, so we must never use dspi->words_in_flight again since it
@@ -898,7 +872,7 @@ static int dspi_poll(struct fsl_dspi *dspi)
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
- if (spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF))
+ if (spi_sr & SPI_SR_CMDTCF)
break;
} while (--tries);
@@ -916,7 +890,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
- if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF)))
+ if (!(spi_sr & SPI_SR_CMDTCF))
return IRQ_NONE;
if (dspi_rxtx(dspi) == 0)
@@ -1106,12 +1080,11 @@ MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
#ifdef CONFIG_PM_SLEEP
static int dspi_suspend(struct device *dev)
{
- struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
+ struct fsl_dspi *dspi = dev_get_drvdata(dev);
if (dspi->irq)
disable_irq(dspi->irq);
- spi_controller_suspend(ctlr);
+ spi_controller_suspend(dspi->ctlr);
clk_disable_unprepare(dspi->clk);
pinctrl_pm_select_sleep_state(dev);
@@ -1121,8 +1094,7 @@ static int dspi_suspend(struct device *dev)
static int dspi_resume(struct device *dev)
{
- struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
+ struct fsl_dspi *dspi = dev_get_drvdata(dev);
int ret;
pinctrl_pm_select_default_state(dev);
@@ -1130,7 +1102,7 @@ static int dspi_resume(struct device *dev)
ret = clk_prepare_enable(dspi->clk);
if (ret)
return ret;
- spi_controller_resume(ctlr);
+ spi_controller_resume(dspi->ctlr);
if (dspi->irq)
enable_irq(dspi->irq);
@@ -1204,9 +1176,6 @@ static int dspi_init(struct fsl_dspi *dspi)
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
switch (dspi->devtype_data->trans_mode) {
- case DSPI_EOQ_MODE:
- regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
- break;
case DSPI_XSPI_MODE:
regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
break;
@@ -1245,22 +1214,6 @@ static int dspi_slave_abort(struct spi_master *master)
return 0;
}
-/*
- * EOQ mode will inevitably deassert its PCS signal on last word in a queue
- * (hardware limitation), so we need to inform the spi_device that larger
- * buffers than the FIFO size are going to have the chip select randomly
- * toggling, so it has a chance to adapt its message sizes.
- */
-static size_t dspi_max_message_size(struct spi_device *spi)
-{
- struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
-
- if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE)
- return dspi->devtype_data->fifo_size;
-
- return SIZE_MAX;
-}
-
static int dspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1289,7 +1242,6 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->setup = dspi_setup;
ctlr->transfer_one_message = dspi_transfer_one_message;
- ctlr->max_message_size = dspi_max_message_size;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->cleanup = dspi_cleanup;
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 6d148ab70b93..cf2b947c600e 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -731,7 +731,7 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem,
if (ret < 0)
goto err_pm;
- dev_info(dev, "at 0x%p (irq = %u)\n", espi->reg_base, irq);
+ dev_info(dev, "irq = %u\n", irq);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 85a5c952389a..986b9793fd3c 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -944,8 +944,7 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int fsl_lpspi_suspend(struct device *dev)
+static int __maybe_unused fsl_lpspi_suspend(struct device *dev)
{
int ret;
@@ -954,7 +953,7 @@ static int fsl_lpspi_suspend(struct device *dev)
return ret;
}
-static int fsl_lpspi_resume(struct device *dev)
+static int __maybe_unused fsl_lpspi_resume(struct device *dev)
{
int ret;
@@ -968,7 +967,6 @@ static int fsl_lpspi_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops fsl_lpspi_pm_ops = {
SET_RUNTIME_PM_OPS(fsl_lpspi_runtime_suspend,
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 80cea5cd3612..25810a7eef10 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -290,6 +290,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
unsigned int proto, major, minor, ver;
+ u32 spi_tx_cfg;
pm_runtime_get_sync(mas->dev);
@@ -308,7 +309,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
* Hardware programming guide suggests to configure
* RX FIFO RFR level to fifo_depth-2.
*/
- geni_se_init(se, mas->tx_fifo_depth / 2, mas->tx_fifo_depth - 2);
+ geni_se_init(se, mas->tx_fifo_depth - 3, mas->tx_fifo_depth - 2);
/* Transmit an entire FIFO worth of data per IRQ */
mas->tx_wm = 1;
ver = geni_se_get_qup_hw_version(se);
@@ -322,16 +323,103 @@ static int spi_geni_init(struct spi_geni_master *mas)
geni_se_select_mode(se, GENI_SE_FIFO);
+ /* We always control CS manually */
+ spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
+ spi_tx_cfg &= ~CS_TOGGLE;
+ writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
+
pm_runtime_put(mas->dev);
return 0;
}
+static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
+{
+ /*
+ * Calculate how many bytes we'll put in each FIFO word. If the
+ * transfer words don't pack cleanly into a FIFO word we'll just put
+ * one transfer word in each FIFO word. If they do pack we'll pack 'em.
+ */
+ if (mas->fifo_width_bits % mas->cur_bits_per_word)
+ return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
+ BITS_PER_BYTE));
+
+ return mas->fifo_width_bits / BITS_PER_BYTE;
+}
+
+static bool geni_spi_handle_tx(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ unsigned int max_bytes;
+ const u8 *tx_buf;
+ unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
+ unsigned int i = 0;
+
+ max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
+ if (mas->tx_rem_bytes < max_bytes)
+ max_bytes = mas->tx_rem_bytes;
+
+ tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
+ while (i < max_bytes) {
+ unsigned int j;
+ unsigned int bytes_to_write;
+ u32 fifo_word = 0;
+ u8 *fifo_byte = (u8 *)&fifo_word;
+
+ bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
+ for (j = 0; j < bytes_to_write; j++)
+ fifo_byte[j] = tx_buf[i++];
+ iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
+ }
+ mas->tx_rem_bytes -= max_bytes;
+ if (!mas->tx_rem_bytes) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ return false;
+ }
+ return true;
+}
+
+static void geni_spi_handle_rx(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 rx_fifo_status;
+ unsigned int rx_bytes;
+ unsigned int rx_last_byte_valid;
+ u8 *rx_buf;
+ unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
+ unsigned int i = 0;
+
+ rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
+ rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
+ if (rx_fifo_status & RX_LAST) {
+ rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
+ rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
+ if (rx_last_byte_valid && rx_last_byte_valid < 4)
+ rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
+ }
+ if (mas->rx_rem_bytes < rx_bytes)
+ rx_bytes = mas->rx_rem_bytes;
+
+ rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
+ while (i < rx_bytes) {
+ u32 fifo_word = 0;
+ u8 *fifo_byte = (u8 *)&fifo_word;
+ unsigned int bytes_to_read;
+ unsigned int j;
+
+ bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
+ ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
+ for (j = 0; j < bytes_to_read; j++)
+ rx_buf[i++] = fifo_byte[j];
+ }
+ mas->rx_rem_bytes -= rx_bytes;
+}
+
static void setup_fifo_xfer(struct spi_transfer *xfer,
struct spi_geni_master *mas,
u16 mode, struct spi_master *spi)
{
u32 m_cmd = 0;
- u32 spi_tx_cfg, len;
+ u32 len;
struct geni_se *se = &mas->se;
int ret;
@@ -350,7 +438,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
spin_lock_irq(&mas->lock);
spin_unlock_irq(&mas->lock);
- spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
if (xfer->bits_per_word != mas->cur_bits_per_word) {
spi_setup_word_len(mas, mode, xfer->bits_per_word);
mas->cur_bits_per_word = xfer->bits_per_word;
@@ -364,8 +451,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
mas->tx_rem_bytes = 0;
mas->rx_rem_bytes = 0;
- spi_tx_cfg &= ~CS_TOGGLE;
-
if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
else
@@ -384,7 +469,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
writel(len, se->base + SE_SPI_RX_TRANS_LEN);
mas->rx_rem_bytes = xfer->len;
}
- writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
/*
* Lock around right before we start the transfer since our
@@ -398,8 +482,10 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
* setting up GENI SE engine, as driver starts data transfer
* for the watermark interrupt.
*/
- if (m_cmd & SPI_TX_ONLY)
- writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
+ if (m_cmd & SPI_TX_ONLY) {
+ if (geni_spi_handle_tx(mas))
+ writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
+ }
spin_unlock_irq(&mas->lock);
}
@@ -417,85 +503,6 @@ static int spi_geni_transfer_one(struct spi_master *spi,
return 1;
}
-static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
-{
- /*
- * Calculate how many bytes we'll put in each FIFO word. If the
- * transfer words don't pack cleanly into a FIFO word we'll just put
- * one transfer word in each FIFO word. If they do pack we'll pack 'em.
- */
- if (mas->fifo_width_bits % mas->cur_bits_per_word)
- return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
- BITS_PER_BYTE));
-
- return mas->fifo_width_bits / BITS_PER_BYTE;
-}
-
-static void geni_spi_handle_tx(struct spi_geni_master *mas)
-{
- struct geni_se *se = &mas->se;
- unsigned int max_bytes;
- const u8 *tx_buf;
- unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
- unsigned int i = 0;
-
- max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
- if (mas->tx_rem_bytes < max_bytes)
- max_bytes = mas->tx_rem_bytes;
-
- tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
- while (i < max_bytes) {
- unsigned int j;
- unsigned int bytes_to_write;
- u32 fifo_word = 0;
- u8 *fifo_byte = (u8 *)&fifo_word;
-
- bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
- for (j = 0; j < bytes_to_write; j++)
- fifo_byte[j] = tx_buf[i++];
- iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
- }
- mas->tx_rem_bytes -= max_bytes;
- if (!mas->tx_rem_bytes)
- writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
-}
-
-static void geni_spi_handle_rx(struct spi_geni_master *mas)
-{
- struct geni_se *se = &mas->se;
- u32 rx_fifo_status;
- unsigned int rx_bytes;
- unsigned int rx_last_byte_valid;
- u8 *rx_buf;
- unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
- unsigned int i = 0;
-
- rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
- rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
- if (rx_fifo_status & RX_LAST) {
- rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
- rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
- if (rx_last_byte_valid && rx_last_byte_valid < 4)
- rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
- }
- if (mas->rx_rem_bytes < rx_bytes)
- rx_bytes = mas->rx_rem_bytes;
-
- rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
- while (i < rx_bytes) {
- u32 fifo_word = 0;
- u8 *fifo_byte = (u8 *)&fifo_word;
- unsigned int bytes_to_read;
- unsigned int j;
-
- bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
- ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
- for (j = 0; j < bytes_to_read; j++)
- rx_buf[i++] = fifo_byte[j];
- }
- mas->rx_rem_bytes -= rx_bytes;
-}
-
static irqreturn_t geni_spi_isr(int irq, void *data)
{
struct spi_master *spi = data;
@@ -613,11 +620,9 @@ static int spi_geni_probe(struct platform_device *pdev)
return PTR_ERR(mas->se.opp_table);
/* OPP table is optional */
ret = dev_pm_opp_of_add_table(&pdev->dev);
- if (!ret) {
- mas->se.has_opp_table = true;
- } else if (ret != -ENODEV) {
+ if (ret && ret != -ENODEV) {
dev_err(&pdev->dev, "invalid OPP table in device tree\n");
- return ret;
+ goto put_clkname;
}
spi->bus_num = -1;
@@ -669,8 +674,8 @@ spi_geni_probe_free_irq:
spi_geni_probe_runtime_disable:
pm_runtime_disable(dev);
spi_master_put(spi);
- if (mas->se.has_opp_table)
- dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_of_remove_table(&pdev->dev);
+put_clkname:
dev_pm_opp_put_clkname(mas->se.opp_table);
return ret;
}
@@ -685,8 +690,7 @@ static int spi_geni_remove(struct platform_device *pdev)
free_irq(mas->irq, spi);
pm_runtime_disable(&pdev->dev);
- if (mas->se.has_opp_table)
- dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_of_remove_table(&pdev->dev);
dev_pm_opp_put_clkname(mas->se.opp_table);
return 0;
}
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
index 64a18d08a4d9..4650b483a33d 100644
--- a/drivers/spi/spi-hisi-sfc-v3xx.c
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -7,7 +7,9 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
+#include <linux/completion.h>
#include <linux/dmi.h>
+#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -17,18 +19,11 @@
#define HISI_SFC_V3XX_VERSION (0x1f8)
-#define HISI_SFC_V3XX_INT_STAT (0x120)
-#define HISI_SFC_V3XX_INT_STAT_PP_ERR BIT(2)
-#define HISI_SFC_V3XX_INT_STAT_ADDR_IACCES BIT(5)
+#define HISI_SFC_V3XX_RAW_INT_STAT (0x120)
+#define HISI_SFC_V3XX_INT_STAT (0x124)
+#define HISI_SFC_V3XX_INT_MASK (0x128)
#define HISI_SFC_V3XX_INT_CLR (0x12c)
-#define HISI_SFC_V3XX_INT_CLR_CLEAR (0xff)
#define HISI_SFC_V3XX_CMD_CFG (0x300)
-#define HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT (1 << 17)
-#define HISI_SFC_V3XX_CMD_CFG_DUAL_IO (2 << 17)
-#define HISI_SFC_V3XX_CMD_CFG_FULL_DIO (3 << 17)
-#define HISI_SFC_V3XX_CMD_CFG_QUAD_IN_QUAD_OUT (5 << 17)
-#define HISI_SFC_V3XX_CMD_CFG_QUAD_IO (6 << 17)
-#define HISI_SFC_V3XX_CMD_CFG_FULL_QIO (7 << 17)
#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
@@ -40,12 +35,99 @@
#define HISI_SFC_V3XX_CMD_ADDR (0x30c)
#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400)
+/* Common definition of interrupt bit masks */
+#define HISI_SFC_V3XX_INT_MASK_ALL (0x1ff) /* all the masks */
+#define HISI_SFC_V3XX_INT_MASK_CPLT BIT(0) /* command execution complete */
+#define HISI_SFC_V3XX_INT_MASK_PP_ERR BIT(2) /* page progrom error */
+#define HISI_SFC_V3XX_INT_MASK_IACCES BIT(5) /* error visiting inaccessible/
+ * protected address
+ */
+
+/* IO Mode definition in HISI_SFC_V3XX_CMD_CFG */
+#define HISI_SFC_V3XX_STD (0 << 17)
+#define HISI_SFC_V3XX_DIDO (1 << 17)
+#define HISI_SFC_V3XX_DIO (2 << 17)
+#define HISI_SFC_V3XX_FULL_DIO (3 << 17)
+#define HISI_SFC_V3XX_QIQO (5 << 17)
+#define HISI_SFC_V3XX_QIO (6 << 17)
+#define HISI_SFC_V3XX_FULL_QIO (7 << 17)
+
+/*
+ * The IO modes lookup table. hisi_sfc_v3xx_io_modes[(z - 1) / 2][y / 2][x / 2]
+ * stands for x-y-z mode, as described in SFDP terminology. -EIO indicates
+ * an invalid mode.
+ */
+static const int hisi_sfc_v3xx_io_modes[2][3][3] = {
+ {
+ { HISI_SFC_V3XX_DIDO, HISI_SFC_V3XX_DIDO, HISI_SFC_V3XX_DIDO },
+ { HISI_SFC_V3XX_DIO, HISI_SFC_V3XX_FULL_DIO, -EIO },
+ { -EIO, -EIO, -EIO },
+ },
+ {
+ { HISI_SFC_V3XX_QIQO, HISI_SFC_V3XX_QIQO, HISI_SFC_V3XX_QIQO },
+ { -EIO, -EIO, -EIO },
+ { HISI_SFC_V3XX_QIO, -EIO, HISI_SFC_V3XX_FULL_QIO },
+ },
+};
+
struct hisi_sfc_v3xx_host {
struct device *dev;
void __iomem *regbase;
int max_cmd_dword;
+ struct completion *completion;
+ int irq;
};
+static void hisi_sfc_v3xx_disable_int(struct hisi_sfc_v3xx_host *host)
+{
+ writel(0, host->regbase + HISI_SFC_V3XX_INT_MASK);
+}
+
+static void hisi_sfc_v3xx_enable_int(struct hisi_sfc_v3xx_host *host)
+{
+ writel(HISI_SFC_V3XX_INT_MASK_ALL, host->regbase + HISI_SFC_V3XX_INT_MASK);
+}
+
+static void hisi_sfc_v3xx_clear_int(struct hisi_sfc_v3xx_host *host)
+{
+ writel(HISI_SFC_V3XX_INT_MASK_ALL, host->regbase + HISI_SFC_V3XX_INT_CLR);
+}
+
+/*
+ * The interrupt status register indicates whether an error occurs
+ * after per operation. Check it, and clear the interrupts for
+ * next time judgement.
+ */
+static int hisi_sfc_v3xx_handle_completion(struct hisi_sfc_v3xx_host *host)
+{
+ u32 reg;
+
+ reg = readl(host->regbase + HISI_SFC_V3XX_RAW_INT_STAT);
+ hisi_sfc_v3xx_clear_int(host);
+
+ if (reg & HISI_SFC_V3XX_INT_MASK_IACCES) {
+ dev_err(host->dev, "fail to access protected address\n");
+ return -EIO;
+ }
+
+ if (reg & HISI_SFC_V3XX_INT_MASK_PP_ERR) {
+ dev_err(host->dev, "page program operation failed\n");
+ return -EIO;
+ }
+
+ /*
+ * The other bits of the interrupt registers is not currently
+ * used and probably not be triggered in this driver. When it
+ * happens, we regard it as an unsupported error here.
+ */
+ if (!(reg & HISI_SFC_V3XX_INT_MASK_CPLT)) {
+ dev_err(host->dev, "unsupported error occurred, status=0x%x\n", reg);
+ return -EIO;
+ }
+
+ return 0;
+}
+
#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000
#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10
@@ -80,6 +162,20 @@ static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
}
/*
+ * The controller only supports Standard SPI mode, Duall mode and
+ * Quad mode. Double sanitize the ops here to avoid OOB access.
+ */
+static bool hisi_sfc_v3xx_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (op->data.buswidth > 4 || op->dummy.buswidth > 4 ||
+ op->addr.buswidth > 4 || op->cmd.buswidth > 4)
+ return false;
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+/*
* memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the
* DATABUF registers -so use __io{read,write}32_copy when possible. For
* trailing bytes, copy them byte-by-byte from the DATABUF register, as we
@@ -163,61 +259,36 @@ static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host,
}
}
-static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
- const struct spi_mem_op *op,
- u8 chip_select)
+static int hisi_sfc_v3xx_start_bus(struct hisi_sfc_v3xx_host *host,
+ const struct spi_mem_op *op,
+ u8 chip_select)
{
- int ret, len = op->data.nbytes;
- u32 int_stat, config = 0;
+ int len = op->data.nbytes, buswidth_mode;
+ u32 config = 0;
if (op->addr.nbytes)
config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
- switch (op->data.buswidth) {
- case 0 ... 1:
- break;
- case 2:
- if (op->addr.buswidth <= 1) {
- config |= HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT;
- } else if (op->addr.buswidth == 2) {
- if (op->cmd.buswidth <= 1) {
- config |= HISI_SFC_V3XX_CMD_CFG_DUAL_IO;
- } else if (op->cmd.buswidth == 2) {
- config |= HISI_SFC_V3XX_CMD_CFG_FULL_DIO;
- } else {
- return -EIO;
- }
- } else {
- return -EIO;
- }
- break;
- case 4:
- if (op->addr.buswidth <= 1) {
- config |= HISI_SFC_V3XX_CMD_CFG_QUAD_IN_QUAD_OUT;
- } else if (op->addr.buswidth == 4) {
- if (op->cmd.buswidth <= 1) {
- config |= HISI_SFC_V3XX_CMD_CFG_QUAD_IO;
- } else if (op->cmd.buswidth == 4) {
- config |= HISI_SFC_V3XX_CMD_CFG_FULL_QIO;
- } else {
- return -EIO;
- }
- } else {
- return -EIO;
- }
- break;
- default:
- return -EOPNOTSUPP;
+ if (op->data.buswidth == 0 || op->data.buswidth == 1) {
+ buswidth_mode = HISI_SFC_V3XX_STD;
+ } else {
+ int data_idx, addr_idx, cmd_idx;
+
+ data_idx = (op->data.buswidth - 1) / 2;
+ addr_idx = op->addr.buswidth / 2;
+ cmd_idx = op->cmd.buswidth / 2;
+ buswidth_mode = hisi_sfc_v3xx_io_modes[data_idx][addr_idx][cmd_idx];
}
+ if (buswidth_mode < 0)
+ return buswidth_mode;
+ config |= buswidth_mode;
if (op->data.dir != SPI_MEM_NO_DATA) {
config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
}
- if (op->data.dir == SPI_MEM_DATA_OUT)
- hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len);
- else if (op->data.dir == SPI_MEM_DATA_IN)
+ if (op->data.dir == SPI_MEM_DATA_IN)
config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK;
config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF |
@@ -229,31 +300,46 @@ static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG);
- ret = hisi_sfc_v3xx_wait_cmd_idle(host);
+ return 0;
+}
+
+static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
+ const struct spi_mem_op *op,
+ u8 chip_select)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ int ret;
+
+ if (host->irq) {
+ host->completion = &done;
+ hisi_sfc_v3xx_enable_int(host);
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, op->data.nbytes);
+
+ ret = hisi_sfc_v3xx_start_bus(host, op, chip_select);
if (ret)
return ret;
- /*
- * The interrupt status register indicates whether an error occurs
- * after per operation. Check it, and clear the interrupts for
- * next time judgement.
- */
- int_stat = readl(host->regbase + HISI_SFC_V3XX_INT_STAT);
- writel(HISI_SFC_V3XX_INT_CLR_CLEAR,
- host->regbase + HISI_SFC_V3XX_INT_CLR);
+ if (host->irq) {
+ ret = wait_for_completion_timeout(host->completion,
+ usecs_to_jiffies(HISI_SFC_V3XX_WAIT_TIMEOUT_US));
+ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
- if (int_stat & HISI_SFC_V3XX_INT_STAT_ADDR_IACCES) {
- dev_err(host->dev, "fail to access protected address\n");
- return -EIO;
+ hisi_sfc_v3xx_disable_int(host);
+ host->completion = NULL;
+ } else {
+ ret = hisi_sfc_v3xx_wait_cmd_idle(host);
}
-
- if (int_stat & HISI_SFC_V3XX_INT_STAT_PP_ERR) {
- dev_err(host->dev, "page program operation failed\n");
+ if (hisi_sfc_v3xx_handle_completion(host) || ret)
return -EIO;
- }
if (op->data.dir == SPI_MEM_DATA_IN)
- hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len);
+ hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, op->data.nbytes);
return 0;
}
@@ -272,9 +358,21 @@ static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem,
static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
.adjust_op_size = hisi_sfc_v3xx_adjust_op_size,
+ .supports_op = hisi_sfc_v3xx_supports_op,
.exec_op = hisi_sfc_v3xx_exec_op,
};
+static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data)
+{
+ struct hisi_sfc_v3xx_host *host = data;
+
+ hisi_sfc_v3xx_disable_int(host);
+
+ complete(host->completion);
+
+ return IRQ_HANDLED;
+}
+
static int hisi_sfc_v3xx_buswidth_override_bits;
/*
@@ -341,6 +439,26 @@ static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
goto err_put_master;
}
+ host->irq = platform_get_irq_optional(pdev, 0);
+ if (host->irq == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_put_master;
+ }
+
+ hisi_sfc_v3xx_disable_int(host);
+
+ if (host->irq > 0) {
+ ret = devm_request_irq(dev, host->irq, hisi_sfc_v3xx_isr, 0,
+ "hisi-sfc-v3xx", host);
+
+ if (ret) {
+ dev_err(dev, "failed to request irq%d, ret = %d\n", host->irq, ret);
+ host->irq = 0;
+ }
+ } else {
+ host->irq = 0;
+ }
+
ctlr->bus_num = -1;
ctlr->num_chipselect = 1;
ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops;
@@ -360,7 +478,8 @@ static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
if (ret)
goto err_put_master;
- dev_info(&pdev->dev, "hw version 0x%x\n", version);
+ dev_info(&pdev->dev, "hw version 0x%x, %s mode.\n",
+ version, host->irq ? "irq" : "polling");
return 0;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 38a5f1304cec..4b80e27ecdbf 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1503,6 +1503,8 @@ static int spi_imx_transfer(struct spi_device *spi,
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ transfer->effective_speed_hz = spi_imx->spi_bus_clk;
+
/* flush rxfifo before transfer */
while (spi_imx->devtype_data->rx_available(spi_imx))
readl(spi_imx->base + MXC_CSPIRXDATA);
@@ -1674,15 +1676,18 @@ static int spi_imx_probe(struct platform_device *pdev)
goto out_master_put;
}
- pm_runtime_enable(spi_imx->dev);
+ ret = clk_prepare_enable(spi_imx->clk_per);
+ if (ret)
+ goto out_master_put;
+
+ ret = clk_prepare_enable(spi_imx->clk_ipg);
+ if (ret)
+ goto out_put_per;
+
pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT);
pm_runtime_use_autosuspend(spi_imx->dev);
-
- ret = pm_runtime_get_sync(spi_imx->dev);
- if (ret < 0) {
- dev_err(spi_imx->dev, "failed to enable clock\n");
- goto out_runtime_pm_put;
- }
+ pm_runtime_set_active(spi_imx->dev);
+ pm_runtime_enable(spi_imx->dev);
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
/*
@@ -1695,7 +1700,7 @@ static int spi_imx_probe(struct platform_device *pdev)
goto out_runtime_pm_put;
if (ret < 0)
- dev_err(&pdev->dev, "dma setup error %d, use pio\n",
+ dev_dbg(&pdev->dev, "dma setup error %d, use pio\n",
ret);
}
@@ -1707,20 +1712,25 @@ static int spi_imx_probe(struct platform_device *pdev)
ret = spi_bitbang_start(&spi_imx->bitbang);
if (ret) {
dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
- goto out_runtime_pm_put;
+ goto out_bitbang_start;
}
- dev_info(&pdev->dev, "probed\n");
-
pm_runtime_mark_last_busy(spi_imx->dev);
pm_runtime_put_autosuspend(spi_imx->dev);
return ret;
+out_bitbang_start:
+ if (spi_imx->devtype_data->has_dmamode)
+ spi_imx_sdma_exit(spi_imx);
out_runtime_pm_put:
pm_runtime_dont_use_autosuspend(spi_imx->dev);
- pm_runtime_put_sync(spi_imx->dev);
+ pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(spi_imx->dev);
+
+ clk_disable_unprepare(spi_imx->clk_ipg);
+out_put_per:
+ clk_disable_unprepare(spi_imx->clk_per);
out_master_put:
spi_master_put(master);
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
index 3cbecb2d8fc0..bcb52601804a 100644
--- a/drivers/spi/spi-lantiq-ssc.c
+++ b/drivers/spi/spi-lantiq-ssc.c
@@ -625,9 +625,8 @@ static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
struct lantiq_ssc_spi *spi = data;
const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
- unsigned long flags;
- spin_lock_irqsave(&spi->lock, flags);
+ spin_lock(&spi->lock);
if (hwcfg->irq_ack)
lantiq_ssc_writel(spi, val, hwcfg->irncr);
@@ -652,12 +651,12 @@ static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
}
}
- spin_unlock_irqrestore(&spi->lock, flags);
+ spin_unlock(&spi->lock);
return IRQ_HANDLED;
completed:
queue_work(spi->wq, &spi->work);
- spin_unlock_irqrestore(&spi->lock, flags);
+ spin_unlock(&spi->lock);
return IRQ_HANDLED;
}
@@ -668,12 +667,11 @@ static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
- unsigned long flags;
if (!(stat & LTQ_SPI_STAT_ERRORS))
return IRQ_NONE;
- spin_lock_irqsave(&spi->lock, flags);
+ spin_lock(&spi->lock);
if (hwcfg->irq_ack)
lantiq_ssc_writel(spi, val, hwcfg->irncr);
@@ -697,7 +695,7 @@ static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
if (spi->master->cur_msg)
spi->master->cur_msg->status = -EIO;
queue_work(spi->wq, &spi->work);
- spin_unlock_irqrestore(&spi->lock, flags);
+ spin_unlock(&spi->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index b08d8e9a8ee9..b97f26a60cbe 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/string.h>
@@ -27,6 +28,7 @@
#define MTK_NOR_CMD_MASK GENMASK(5, 0)
#define MTK_NOR_REG_PRG_CNT 0x04
+#define MTK_NOR_PRG_CNT_MAX 56
#define MTK_NOR_REG_RDATA 0x0c
#define MTK_NOR_REG_RADR0 0x10
@@ -78,6 +80,8 @@
#define MTK_NOR_REG_DMA_FADR 0x71c
#define MTK_NOR_REG_DMA_DADR 0x720
#define MTK_NOR_REG_DMA_END_DADR 0x724
+#define MTK_NOR_REG_DMA_DADR_HB 0x738
+#define MTK_NOR_REG_DMA_END_DADR_HB 0x73c
#define MTK_NOR_PRG_MAX_SIZE 6
// Reading DMA src/dst addresses have to be 16-byte aligned
@@ -89,18 +93,20 @@
// Buffered page program can do one 128-byte transfer
#define MTK_NOR_PP_SIZE 128
-#define CLK_TO_US(sp, clkcnt) ((clkcnt) * 1000000 / sp->spi_freq)
+#define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
struct mtk_nor {
struct spi_controller *ctlr;
struct device *dev;
void __iomem *base;
u8 *buffer;
+ dma_addr_t buffer_dma;
struct clk *spi_clk;
struct clk *ctlr_clk;
unsigned int spi_freq;
bool wbuf_en;
bool has_irq;
+ bool high_dma;
struct completion op_done;
};
@@ -144,6 +150,11 @@ static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op)
}
}
+static bool need_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK);
+}
+
static bool mtk_nor_match_read(const struct spi_mem_op *op)
{
int dummy = 0;
@@ -167,9 +178,77 @@ static bool mtk_nor_match_read(const struct spi_mem_op *op)
return false;
}
+static bool mtk_nor_match_prg(const struct spi_mem_op *op)
+{
+ int tx_len, rx_len, prg_len, prg_left;
+
+ // prg mode is spi-only.
+ if ((op->cmd.buswidth > 1) || (op->addr.buswidth > 1) ||
+ (op->dummy.buswidth > 1) || (op->data.buswidth > 1))
+ return false;
+
+ tx_len = op->cmd.nbytes + op->addr.nbytes;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ // count dummy bytes only if we need to write data after it
+ tx_len += op->dummy.nbytes;
+
+ // leave at least one byte for data
+ if (tx_len > MTK_NOR_REG_PRGDATA_MAX)
+ return false;
+
+ // if there's no addr, meaning adjust_op_size is impossible,
+ // check data length as well.
+ if ((!op->addr.nbytes) &&
+ (tx_len + op->data.nbytes > MTK_NOR_REG_PRGDATA_MAX + 1))
+ return false;
+ } else if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (tx_len > MTK_NOR_REG_PRGDATA_MAX + 1)
+ return false;
+
+ rx_len = op->data.nbytes;
+ prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
+ if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
+ prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
+ if (rx_len > prg_left) {
+ if (!op->addr.nbytes)
+ return false;
+ rx_len = prg_left;
+ }
+
+ prg_len = tx_len + op->dummy.nbytes + rx_len;
+ if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
+ return false;
+ } else {
+ prg_len = tx_len + op->dummy.nbytes;
+ if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
+ return false;
+ }
+ return true;
+}
+
+static void mtk_nor_adj_prg_size(struct spi_mem_op *op)
+{
+ int tx_len, tx_left, prg_left;
+
+ tx_len = op->cmd.nbytes + op->addr.nbytes;
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ tx_len += op->dummy.nbytes;
+ tx_left = MTK_NOR_REG_PRGDATA_MAX + 1 - tx_len;
+ if (op->data.nbytes > tx_left)
+ op->data.nbytes = tx_left;
+ } else if (op->data.dir == SPI_MEM_DATA_IN) {
+ prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
+ if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
+ prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
+ if (op->data.nbytes > prg_left)
+ op->data.nbytes = prg_left;
+ }
+}
+
static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
- size_t len;
+ struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
if (!op->data.nbytes)
return 0;
@@ -177,11 +256,14 @@ static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
if ((op->data.dir == SPI_MEM_DATA_IN) &&
mtk_nor_match_read(op)) {
+ // limit size to prevent timeout calculation overflow
+ if (op->data.nbytes > 0x400000)
+ op->data.nbytes = 0x400000;
+
if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
(op->data.nbytes < MTK_NOR_DMA_ALIGN))
op->data.nbytes = 1;
- else if (!((ulong)(op->data.buf.in) &
- MTK_NOR_DMA_ALIGN_MASK))
+ else if (!need_bounce(sp, op))
op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
@@ -195,41 +277,37 @@ static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
}
}
- len = MTK_NOR_PRG_MAX_SIZE - op->cmd.nbytes - op->addr.nbytes -
- op->dummy.nbytes;
- if (op->data.nbytes > len)
- op->data.nbytes = len;
-
+ mtk_nor_adj_prg_size(op);
return 0;
}
static bool mtk_nor_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- size_t len;
-
- if (op->cmd.buswidth != 1)
+ if (!spi_mem_default_supports_op(mem, op))
return false;
- /* DTR ops not supported. */
- if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
- return false;
- if (op->cmd.nbytes != 1)
+ if (op->cmd.buswidth != 1)
return false;
if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
- if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op))
- return true;
- else if (op->data.dir == SPI_MEM_DATA_OUT)
- return (op->addr.buswidth == 1) &&
- (op->dummy.buswidth == 0) &&
- (op->data.buswidth == 1);
+ switch(op->data.dir) {
+ case SPI_MEM_DATA_IN:
+ if (mtk_nor_match_read(op))
+ return true;
+ break;
+ case SPI_MEM_DATA_OUT:
+ if ((op->addr.buswidth == 1) &&
+ (op->dummy.nbytes == 0) &&
+ (op->data.buswidth == 1))
+ return true;
+ break;
+ default:
+ break;
+ }
}
- len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
- if ((len > MTK_NOR_PRG_MAX_SIZE) ||
- ((op->data.nbytes) && (len == MTK_NOR_PRG_MAX_SIZE)))
- return false;
- return true;
+
+ return mtk_nor_match_prg(op);
}
static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op)
@@ -258,24 +336,24 @@ static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op)
mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
}
-static int mtk_nor_read_dma(struct mtk_nor *sp, u32 from, unsigned int length,
- u8 *buffer)
+static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length,
+ dma_addr_t dma_addr)
{
int ret = 0;
ulong delay;
u32 reg;
- dma_addr_t dma_addr;
-
- dma_addr = dma_map_single(sp->dev, buffer, length, DMA_FROM_DEVICE);
- if (dma_mapping_error(sp->dev, dma_addr)) {
- dev_err(sp->dev, "failed to map dma buffer.\n");
- return -EINVAL;
- }
writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR);
writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR);
+ if (sp->high_dma) {
+ writel(upper_32_bits(dma_addr),
+ sp->base + MTK_NOR_REG_DMA_DADR_HB);
+ writel(upper_32_bits(dma_addr + length),
+ sp->base + MTK_NOR_REG_DMA_END_DADR_HB);
+ }
+
if (sp->has_irq) {
reinit_completion(&sp->op_done);
mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0);
@@ -295,30 +373,49 @@ static int mtk_nor_read_dma(struct mtk_nor *sp, u32 from, unsigned int length,
(delay + 1) * 100);
}
- dma_unmap_single(sp->dev, dma_addr, length, DMA_FROM_DEVICE);
if (ret < 0)
dev_err(sp->dev, "dma read timeout.\n");
return ret;
}
-static int mtk_nor_read_bounce(struct mtk_nor *sp, u32 from,
- unsigned int length, u8 *buffer)
+static int mtk_nor_read_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
{
unsigned int rdlen;
int ret;
- if (length & MTK_NOR_DMA_ALIGN_MASK)
- rdlen = (length + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
+ if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK)
+ rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
else
- rdlen = length;
+ rdlen = op->data.nbytes;
- ret = mtk_nor_read_dma(sp, from, rdlen, sp->buffer);
- if (ret)
- return ret;
+ ret = mtk_nor_dma_exec(sp, op->addr.val, rdlen, sp->buffer_dma);
- memcpy(buffer, sp->buffer, length);
- return 0;
+ if (!ret)
+ memcpy(op->data.buf.in, sp->buffer, op->data.nbytes);
+
+ return ret;
+}
+
+static int mtk_nor_read_dma(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ int ret;
+ dma_addr_t dma_addr;
+
+ if (need_bounce(sp, op))
+ return mtk_nor_read_bounce(sp, op);
+
+ dma_addr = dma_map_single(sp->dev, op->data.buf.in,
+ op->data.nbytes, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(sp->dev, dma_addr))
+ return -EINVAL;
+
+ ret = mtk_nor_dma_exec(sp, op->addr.val, op->data.nbytes, dma_addr);
+
+ dma_unmap_single(sp->dev, dma_addr, op->data.nbytes, DMA_FROM_DEVICE);
+
+ return ret;
}
static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
@@ -397,6 +494,83 @@ static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
}
+static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ int rx_len = 0;
+ int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
+ int tx_len, prg_len;
+ int i, ret;
+ void __iomem *reg;
+ u8 bufbyte;
+
+ tx_len = op->cmd.nbytes + op->addr.nbytes;
+
+ // count dummy bytes only if we need to write data after it
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ tx_len += op->dummy.nbytes + op->data.nbytes;
+ else if (op->data.dir == SPI_MEM_DATA_IN)
+ rx_len = op->data.nbytes;
+
+ prg_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes +
+ op->data.nbytes;
+
+ // an invalid op may reach here if the caller calls exec_op without
+ // adjust_op_size. return -EINVAL instead of -ENOTSUPP so that
+ // spi-mem won't try this op again with generic spi transfers.
+ if ((tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) ||
+ (rx_len > MTK_NOR_REG_SHIFT_MAX + 1) ||
+ (prg_len > MTK_NOR_PRG_CNT_MAX / 8))
+ return -EINVAL;
+
+ // fill tx data
+ for (i = op->cmd.nbytes; i > 0; i--, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ bufbyte = (op->cmd.opcode >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
+ writeb(bufbyte, reg);
+ }
+
+ for (i = op->addr.nbytes; i > 0; i--, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ bufbyte = (op->addr.val >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
+ writeb(bufbyte, reg);
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ for (i = 0; i < op->dummy.nbytes; i++, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ writeb(0, reg);
+ }
+
+ for (i = 0; i < op->data.nbytes; i++, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ writeb(((const u8 *)(op->data.buf.out))[i], reg);
+ }
+ }
+
+ for (; reg_offset >= 0; reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ writeb(0, reg);
+ }
+
+ // trigger op
+ writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
+ ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
+ prg_len * BITS_PER_BYTE);
+ if (ret)
+ return ret;
+
+ // fetch read data
+ reg_offset = 0;
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ for (i = op->data.nbytes - 1; i >= 0; i--, reg_offset++) {
+ reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
+ ((u8 *)(op->data.buf.in))[i] = readb(reg);
+ }
+ }
+
+ return 0;
+}
+
static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
@@ -404,7 +578,7 @@ static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
if ((op->data.nbytes == 0) ||
((op->addr.nbytes != 3) && (op->addr.nbytes != 4)))
- return -ENOTSUPP;
+ return mtk_nor_spi_mem_prg(sp, op);
if (op->data.dir == SPI_MEM_DATA_OUT) {
mtk_nor_set_addr(sp, op);
@@ -422,19 +596,12 @@ static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (op->data.nbytes == 1) {
mtk_nor_set_addr(sp, op);
return mtk_nor_read_pio(sp, op);
- } else if (((ulong)(op->data.buf.in) &
- MTK_NOR_DMA_ALIGN_MASK)) {
- return mtk_nor_read_bounce(sp, op->addr.val,
- op->data.nbytes,
- op->data.buf.in);
} else {
- return mtk_nor_read_dma(sp, op->addr.val,
- op->data.nbytes,
- op->data.buf.in);
+ return mtk_nor_read_dma(sp, op);
}
}
- return -ENOTSUPP;
+ return mtk_nor_spi_mem_prg(sp, op);
}
static int mtk_nor_setup(struct spi_device *spi)
@@ -524,22 +691,15 @@ static int mtk_nor_enable_clk(struct mtk_nor *sp)
return 0;
}
-static int mtk_nor_init(struct mtk_nor *sp)
+static void mtk_nor_init(struct mtk_nor *sp)
{
- int ret;
-
- ret = mtk_nor_enable_clk(sp);
- if (ret)
- return ret;
-
- sp->spi_freq = clk_get_rate(sp->spi_clk);
+ writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
+ writel(MTK_NOR_IRQ_MASK, sp->base + MTK_NOR_REG_IRQ_STAT);
writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
mtk_nor_rmw(sp, MTK_NOR_REG_CFG3,
MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
-
- return ret;
}
static irqreturn_t mtk_nor_irq_handler(int irq, void *data)
@@ -575,7 +735,8 @@ static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
};
static const struct of_device_id mtk_nor_match[] = {
- { .compatible = "mediatek,mt8173-nor" },
+ { .compatible = "mediatek,mt8192-nor", .data = (void *)36 },
+ { .compatible = "mediatek,mt8173-nor", .data = (void *)32 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_nor_match);
@@ -585,9 +746,9 @@ static int mtk_nor_probe(struct platform_device *pdev)
struct spi_controller *ctlr;
struct mtk_nor *sp;
void __iomem *base;
- u8 *buffer;
struct clk *spi_clk, *ctlr_clk;
int ret, irq;
+ unsigned long dma_bits;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -601,15 +762,11 @@ static int mtk_nor_probe(struct platform_device *pdev)
if (IS_ERR(ctlr_clk))
return PTR_ERR(ctlr_clk);
- buffer = devm_kmalloc(&pdev->dev,
- MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
- GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK)
- buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) &
- ~MTK_NOR_DMA_ALIGN_MASK);
+ dma_bits = (unsigned long)of_device_get_match_data(&pdev->dev);
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits))) {
+ dev_err(&pdev->dev, "failed to set dma mask(%lu)\n", dma_bits);
+ return -EINVAL;
+ }
ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp));
if (!ctlr) {
@@ -625,25 +782,43 @@ static int mtk_nor_probe(struct platform_device *pdev)
ctlr->num_chipselect = 1;
ctlr->setup = mtk_nor_setup;
ctlr->transfer_one_message = mtk_nor_transfer_one_message;
+ ctlr->auto_runtime_pm = true;
dev_set_drvdata(&pdev->dev, ctlr);
sp = spi_controller_get_devdata(ctlr);
sp->base = base;
- sp->buffer = buffer;
sp->has_irq = false;
sp->wbuf_en = false;
sp->ctlr = ctlr;
sp->dev = &pdev->dev;
sp->spi_clk = spi_clk;
sp->ctlr_clk = ctlr_clk;
+ sp->high_dma = (dma_bits > 32);
+ sp->buffer = dmam_alloc_coherent(&pdev->dev,
+ MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
+ &sp->buffer_dma, GFP_KERNEL);
+ if (!sp->buffer)
+ return -ENOMEM;
+
+ if ((uintptr_t)sp->buffer & MTK_NOR_DMA_ALIGN_MASK) {
+ dev_err(sp->dev, "misaligned allocation of internal buffer.\n");
+ return -ENOMEM;
+ }
+
+ ret = mtk_nor_enable_clk(sp);
+ if (ret < 0)
+ return ret;
+
+ sp->spi_freq = clk_get_rate(sp->spi_clk);
+
+ mtk_nor_init(sp);
irq = platform_get_irq_optional(pdev, 0);
+
if (irq < 0) {
dev_warn(sp->dev, "IRQ not available.");
} else {
- writel(MTK_NOR_IRQ_MASK, base + MTK_NOR_REG_IRQ_STAT);
- writel(0, base + MTK_NOR_REG_IRQ_EN);
ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0,
pdev->name, sp);
if (ret < 0) {
@@ -654,34 +829,86 @@ static int mtk_nor_probe(struct platform_device *pdev)
}
}
- ret = mtk_nor_init(sp);
- if (ret < 0) {
- kfree(ctlr);
- return ret;
- }
+ pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret < 0)
+ goto err_probe;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq);
- return devm_spi_register_controller(&pdev->dev, ctlr);
+ return 0;
+
+err_probe:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
+ mtk_nor_disable_clk(sp);
+
+ return ret;
}
static int mtk_nor_remove(struct platform_device *pdev)
{
- struct spi_controller *ctlr;
- struct mtk_nor *sp;
+ struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
- ctlr = dev_get_drvdata(&pdev->dev);
- sp = spi_controller_get_devdata(ctlr);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
mtk_nor_disable_clk(sp);
return 0;
}
+static int __maybe_unused mtk_nor_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+
+ mtk_nor_disable_clk(sp);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_nor_runtime_resume(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+
+ return mtk_nor_enable_clk(sp);
+}
+
+static int __maybe_unused mtk_nor_suspend(struct device *dev)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused mtk_nor_resume(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+
+static const struct dev_pm_ops mtk_nor_pm_ops = {
+ SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend,
+ mtk_nor_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend, mtk_nor_resume)
+};
+
static struct platform_driver mtk_nor_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = mtk_nor_match,
+ .pm = &mtk_nor_pm_ops,
},
.probe = mtk_nor_probe,
.remove = mtk_nor_remove,
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index cc9ef371db14..37dfc6e82804 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -139,9 +139,8 @@ static int spi_mux_probe(struct spi_device *spi)
priv->mux = devm_mux_control_get(&spi->dev, NULL);
if (IS_ERR(priv->mux)) {
- ret = PTR_ERR(priv->mux);
- if (ret != -EPROBE_DEFER)
- dev_err(&spi->dev, "failed to get control-mux\n");
+ ret = dev_err_probe(&spi->dev, PTR_ERR(priv->mux),
+ "failed to get control-mux\n");
goto err_put_ctlr;
}
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 9468e71f03ad..341f7cffeaac 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -677,7 +677,6 @@ static int npcm_fiu_probe(struct platform_device *pdev)
struct npcm_fiu_spi *fiu;
void __iomem *regbase;
struct resource *res;
- int ret;
int id;
ctrl = spi_alloc_master(dev, sizeof(*fiu));
@@ -736,11 +735,7 @@ static int npcm_fiu_probe(struct platform_device *pdev)
ctrl->num_chipselect = fiu->info->max_cs;
ctrl->dev.of_node = dev->of_node;
- ret = devm_spi_register_master(dev, ctrl);
- if (ret)
- return ret;
-
- return 0;
+ return devm_spi_register_master(dev, ctrl);
}
static int npcm_fiu_remove(struct platform_device *pdev)
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 1ccda82da206..0d41406c036d 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -3,7 +3,8 @@
/*
* NXP FlexSPI(FSPI) controller driver.
*
- * Copyright 2019 NXP.
+ * Copyright 2019-2020 NXP
+ * Copyright 2020 Puresoftware Ltd.
*
* FlexSPI is a flexsible SPI host controller which supports two SPI
* channels and up to 4 external devices. Each channel supports
@@ -30,6 +31,7 @@
* Frieder Schrempf <frieder.schrempf@kontron.de>
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
@@ -563,6 +565,9 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
{
int ret;
+ if (is_acpi_node(f->dev->fwnode))
+ return 0;
+
ret = clk_prepare_enable(f->clk_en);
if (ret)
return ret;
@@ -576,10 +581,15 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
return 0;
}
-static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
+static int nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
{
+ if (is_acpi_node(f->dev->fwnode))
+ return 0;
+
clk_disable_unprepare(f->clk);
clk_disable_unprepare(f->clk_en);
+
+ return 0;
}
/*
@@ -1001,7 +1011,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
f = spi_controller_get_devdata(ctlr);
f->dev = dev;
- f->devtype_data = of_device_get_match_data(dev);
+ f->devtype_data = device_get_match_data(dev);
if (!f->devtype_data) {
ret = -ENODEV;
goto err_put_ctrl;
@@ -1010,7 +1020,12 @@ static int nxp_fspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, f);
/* find the resources - configuration register address space */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_base");
+ if (is_acpi_node(f->dev->fwnode))
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ else
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "fspi_base");
+
f->iobase = devm_ioremap_resource(dev, res);
if (IS_ERR(f->iobase)) {
ret = PTR_ERR(f->iobase);
@@ -1018,7 +1033,12 @@ static int nxp_fspi_probe(struct platform_device *pdev)
}
/* find the resources - controller memory mapped space */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap");
+ if (is_acpi_node(f->dev->fwnode))
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ else
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "fspi_mmap");
+
if (!res) {
ret = -ENODEV;
goto err_put_ctrl;
@@ -1029,22 +1049,24 @@ static int nxp_fspi_probe(struct platform_device *pdev)
f->memmap_phy_size = resource_size(res);
/* find the clocks */
- f->clk_en = devm_clk_get(dev, "fspi_en");
- if (IS_ERR(f->clk_en)) {
- ret = PTR_ERR(f->clk_en);
- goto err_put_ctrl;
- }
+ if (dev_of_node(&pdev->dev)) {
+ f->clk_en = devm_clk_get(dev, "fspi_en");
+ if (IS_ERR(f->clk_en)) {
+ ret = PTR_ERR(f->clk_en);
+ goto err_put_ctrl;
+ }
- f->clk = devm_clk_get(dev, "fspi");
- if (IS_ERR(f->clk)) {
- ret = PTR_ERR(f->clk);
- goto err_put_ctrl;
- }
+ f->clk = devm_clk_get(dev, "fspi");
+ if (IS_ERR(f->clk)) {
+ ret = PTR_ERR(f->clk);
+ goto err_put_ctrl;
+ }
- ret = nxp_fspi_clk_prep_enable(f);
- if (ret) {
- dev_err(dev, "can not enable the clock\n");
- goto err_put_ctrl;
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret) {
+ dev_err(dev, "can not enable the clock\n");
+ goto err_put_ctrl;
+ }
}
/* find the irq */
@@ -1127,6 +1149,14 @@ static const struct of_device_id nxp_fspi_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id nxp_fspi_acpi_ids[] = {
+ { "NXP0009", .driver_data = (kernel_ulong_t)&lx2160a_data, },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, nxp_fspi_acpi_ids);
+#endif
+
static const struct dev_pm_ops nxp_fspi_pm_ops = {
.suspend = nxp_fspi_suspend,
.resume = nxp_fspi_resume,
@@ -1136,6 +1166,7 @@ static struct platform_driver nxp_fspi_driver = {
.driver = {
.name = "nxp-fspi",
.of_match_table = nxp_fspi_dt_ids,
+ .acpi_match_table = ACPI_PTR(nxp_fspi_acpi_ids),
.pm = &nxp_fspi_pm_ops,
},
.probe = nxp_fspi_probe,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 1c9478e6e5d9..d4c9510af393 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -24,7 +24,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/gcd.h>
-#include <linux/iopoll.h>
#include <linux/spi/spi.h>
@@ -348,9 +347,19 @@ disable_fifo:
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
{
- u32 val;
-
- return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC);
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!(readl_relaxed(reg) & bit)) {
+ if (time_after(jiffies, timeout)) {
+ if (!(readl_relaxed(reg) & bit))
+ return -ETIMEDOUT;
+ else
+ return 0;
+ }
+ cpu_relax();
+ }
+ return 0;
}
static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index b8857a97f40a..5eed88af6899 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -143,7 +143,6 @@ struct qcom_qspi {
struct qspi_xfer xfer;
struct icc_path *icc_path_cpu_to_qspi;
struct opp_table *opp_table;
- bool has_opp_table;
unsigned long last_speed;
/* Lock to protect data accessed by IRQs */
spinlock_t lock;
@@ -421,9 +420,8 @@ static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
u32 int_status;
struct qcom_qspi *ctrl = dev_id;
irqreturn_t ret = IRQ_NONE;
- unsigned long flags;
- spin_lock_irqsave(&ctrl->lock, flags);
+ spin_lock(&ctrl->lock);
int_status = readl(ctrl->base + MSTR_INT_STATUS);
writel(int_status, ctrl->base + MSTR_INT_STATUS);
@@ -451,7 +449,7 @@ static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
}
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ spin_unlock(&ctrl->lock);
return ret;
}
@@ -495,9 +493,8 @@ static int qcom_qspi_probe(struct platform_device *pdev)
ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config");
if (IS_ERR(ctrl->icc_path_cpu_to_qspi)) {
- ret = PTR_ERR(ctrl->icc_path_cpu_to_qspi);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get cpu path: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi),
+ "Failed to get cpu path\n");
goto exit_probe_master_put;
}
/* Set BW vote for register access */
@@ -546,11 +543,9 @@ static int qcom_qspi_probe(struct platform_device *pdev)
}
/* OPP table is optional */
ret = dev_pm_opp_of_add_table(&pdev->dev);
- if (!ret) {
- ctrl->has_opp_table = true;
- } else if (ret != -ENODEV) {
+ if (ret && ret != -ENODEV) {
dev_err(&pdev->dev, "invalid OPP table in device tree\n");
- goto exit_probe_master_put;
+ goto exit_probe_put_clkname;
}
pm_runtime_use_autosuspend(dev);
@@ -562,8 +557,9 @@ static int qcom_qspi_probe(struct platform_device *pdev)
return 0;
pm_runtime_disable(dev);
- if (ctrl->has_opp_table)
- dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_of_remove_table(&pdev->dev);
+
+exit_probe_put_clkname:
dev_pm_opp_put_clkname(ctrl->opp_table);
exit_probe_master_put:
@@ -581,8 +577,7 @@ static int qcom_qspi_remove(struct platform_device *pdev)
spi_unregister_master(master);
pm_runtime_disable(&pdev->dev);
- if (ctrl->has_opp_table)
- dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_of_remove_table(&pdev->dev);
dev_pm_opp_put_clkname(ctrl->opp_table);
return 0;
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index a364b99497e2..8dcb2e70735c 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -848,7 +848,7 @@ static int spi_qup_transfer_one(struct spi_master *master,
{
struct spi_qup *controller = spi_master_get_devdata(master);
unsigned long timeout, flags;
- int ret = -EIO;
+ int ret;
ret = spi_qup_io_prep(spi, xfer);
if (ret)
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index cbc2387d450c..e39fd38f5180 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -161,6 +161,7 @@
#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */
#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
+#define SPCMD_BRDV(brdv) ((brdv) << 2)
#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
@@ -242,24 +243,40 @@ struct spi_ops {
int (*transfer_one)(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer);
u16 extra_mode_bits;
+ u16 min_div;
+ u16 max_div;
u16 flags;
u16 fifo_size;
u8 num_hw_ss;
};
+static void rspi_set_rate(struct rspi_data *rspi)
+{
+ unsigned long clksrc;
+ int brdv = 0, spbr;
+
+ clksrc = clk_get_rate(rspi->clk);
+ spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1;
+ while (spbr > 255 && brdv < 3) {
+ brdv++;
+ spbr = DIV_ROUND_UP(spbr + 1, 2) - 1;
+ }
+
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+ rspi->spcmd |= SPCMD_BRDV(brdv);
+ rspi->speed_hz = DIV_ROUND_UP(clksrc, (2U << brdv) * (spbr + 1));
+}
+
/*
* functions for RSPI on legacy SH
*/
static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
{
- int spbr;
-
/* Sets output mode, MOSI signal, and (optionally) loopback */
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
- spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz) - 1;
- rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+ rspi_set_rate(rspi);
/* Disable dummy transmission, set 16-bit word access, 1 frame */
rspi_write8(rspi, 0, RSPI_SPDCR);
@@ -289,25 +306,11 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
*/
static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
{
- int spbr;
- int div = 0;
- unsigned long clksrc;
-
/* Sets output mode, MOSI signal, and (optionally) loopback */
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
- clksrc = clk_get_rate(rspi->clk);
- while (div < 3) {
- if (rspi->speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
- break;
- div++;
- clksrc /= 2;
- }
-
/* Sets transfer bit rate */
- spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1;
- rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
- rspi->spcmd |= div << 2;
+ rspi_set_rate(rspi);
/* Disable dummy transmission, set byte access */
rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
@@ -334,14 +337,28 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
*/
static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
{
- int spbr;
+ unsigned long clksrc;
+ int brdv = 0, spbr;
/* Sets output mode, MOSI signal, and (optionally) loopback */
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
- spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz);
- rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+ clksrc = clk_get_rate(rspi->clk);
+ if (rspi->speed_hz >= clksrc) {
+ spbr = 0;
+ rspi->speed_hz = clksrc;
+ } else {
+ spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz);
+ while (spbr > 255 && brdv < 3) {
+ brdv++;
+ spbr = DIV_ROUND_UP(spbr, 2);
+ }
+ spbr = clamp(spbr, 0, 255);
+ rspi->speed_hz = DIV_ROUND_UP(clksrc, (2U << brdv) * spbr);
+ }
+ rspi_write8(rspi, spbr, RSPI_SPBR);
+ rspi->spcmd |= SPCMD_BRDV(brdv);
/* Disable dummy transmission, set byte access */
rspi_write8(rspi, 0, RSPI_SPDCR);
@@ -686,6 +703,8 @@ static int rspi_common_transfer(struct rspi_data *rspi,
{
int ret;
+ xfer->effective_speed_hz = rspi->speed_hz;
+
ret = rspi_dma_check_then_transfer(rspi, xfer);
if (ret != -EAGAIN)
return ret;
@@ -841,6 +860,7 @@ static int qspi_transfer_one(struct spi_controller *ctlr,
{
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
+ xfer->effective_speed_hz = rspi->speed_hz;
if (spi->mode & SPI_LOOP) {
return qspi_transfer_out_in(rspi, xfer);
} else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
@@ -1163,6 +1183,8 @@ static int rspi_remove(struct platform_device *pdev)
static const struct spi_ops rspi_ops = {
.set_config_register = rspi_set_config_register,
.transfer_one = rspi_transfer_one,
+ .min_div = 2,
+ .max_div = 4096,
.flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
.num_hw_ss = 2,
@@ -1171,6 +1193,8 @@ static const struct spi_ops rspi_ops = {
static const struct spi_ops rspi_rz_ops = {
.set_config_register = rspi_rz_set_config_register,
.transfer_one = rspi_rz_transfer_one,
+ .min_div = 2,
+ .max_div = 4096,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
.num_hw_ss = 1,
@@ -1181,6 +1205,8 @@ static const struct spi_ops qspi_ops = {
.transfer_one = qspi_transfer_one,
.extra_mode_bits = SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD,
+ .min_div = 1,
+ .max_div = 4080,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
.num_hw_ss = 1,
@@ -1242,6 +1268,7 @@ static int rspi_probe(struct platform_device *pdev)
int ret;
const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
+ unsigned long clksrc;
ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
if (ctlr == NULL)
@@ -1261,13 +1288,6 @@ static int rspi_probe(struct platform_device *pdev)
ctlr->num_chipselect = 2; /* default */
}
- /* ops parameter check */
- if (!ops->set_config_register) {
- dev_err(&pdev->dev, "there is no set_config_register\n");
- ret = -ENODEV;
- goto error1;
- }
-
rspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, rspi);
rspi->ops = ops;
@@ -1301,6 +1321,9 @@ static int rspi_probe(struct platform_device *pdev)
ctlr->unprepare_message = rspi_unprepare_message;
ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
SPI_LOOP | ops->extra_mode_bits;
+ clksrc = clk_get_rate(rspi->clk);
+ ctlr->min_speed_hz = DIV_ROUND_UP(clksrc, ops->max_div);
+ ctlr->max_speed_hz = DIV_ROUND_UP(clksrc, ops->min_div);
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->use_gpio_descriptors = true;
diff --git a/drivers/spi/spi-s3c24xx-fiq.S b/drivers/spi/spi-s3c24xx-fiq.S
deleted file mode 100644
index e95d6282109e..000000000000
--- a/drivers/spi/spi-s3c24xx-fiq.S
+++ /dev/null
@@ -1,113 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* linux/drivers/spi/spi_s3c24xx_fiq.S
- *
- * Copyright 2009 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX SPI - FIQ pseudo-DMA transfer code
-*/
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
-#include <mach/map.h>
-#include <mach/regs-irq.h>
-#include <plat/regs-spi.h>
-
-#include "spi-s3c24xx-fiq.h"
-
- .text
-
- @ entry to these routines is as follows, with the register names
- @ defined in fiq.h so that they can be shared with the C files which
- @ setup the calling registers.
- @
- @ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND
- @ fiq_rtmp Temporary register to hold tx/rx data
- @ fiq_rspi The base of the SPI register block
- @ fiq_rtx The tx buffer pointer
- @ fiq_rrx The rx buffer pointer
- @ fiq_rcount The number of bytes to move
-
- @ each entry starts with a word entry of how long it is
- @ and an offset to the irq acknowledgment word
-
-ENTRY(s3c24xx_spi_fiq_rx)
-s3c24xx_spi_fix_rx:
- .word fiq_rx_end - fiq_rx_start
- .word fiq_rx_irq_ack - fiq_rx_start
-fiq_rx_start:
- ldr fiq_rtmp, fiq_rx_irq_ack
- str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
-
- ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
- strb fiq_rtmp, [ fiq_rrx ], #1
-
- mov fiq_rtmp, #0xff
- strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
-
- subs fiq_rcount, fiq_rcount, #1
- subnes pc, lr, #4 @@ return, still have work to do
-
- @@ set IRQ controller so that next op will trigger IRQ
- mov fiq_rtmp, #0
- str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
- subs pc, lr, #4
-
-fiq_rx_irq_ack:
- .word 0
-fiq_rx_end:
-
-ENTRY(s3c24xx_spi_fiq_txrx)
-s3c24xx_spi_fiq_txrx:
- .word fiq_txrx_end - fiq_txrx_start
- .word fiq_txrx_irq_ack - fiq_txrx_start
-fiq_txrx_start:
-
- ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
- strb fiq_rtmp, [ fiq_rrx ], #1
-
- ldr fiq_rtmp, fiq_txrx_irq_ack
- str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
-
- ldrb fiq_rtmp, [ fiq_rtx ], #1
- strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
-
- subs fiq_rcount, fiq_rcount, #1
- subnes pc, lr, #4 @@ return, still have work to do
-
- mov fiq_rtmp, #0
- str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
- subs pc, lr, #4
-
-fiq_txrx_irq_ack:
- .word 0
-
-fiq_txrx_end:
-
-ENTRY(s3c24xx_spi_fiq_tx)
-s3c24xx_spi_fix_tx:
- .word fiq_tx_end - fiq_tx_start
- .word fiq_tx_irq_ack - fiq_tx_start
-fiq_tx_start:
- ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
-
- ldr fiq_rtmp, fiq_tx_irq_ack
- str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
-
- ldrb fiq_rtmp, [ fiq_rtx ], #1
- strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
-
- subs fiq_rcount, fiq_rcount, #1
- subnes pc, lr, #4 @@ return, still have work to do
-
- mov fiq_rtmp, #0
- str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
- subs pc, lr, #4
-
-fiq_tx_irq_ack:
- .word 0
-
-fiq_tx_end:
-
- .end
diff --git a/drivers/spi/spi-s3c24xx-fiq.h b/drivers/spi/spi-s3c24xx-fiq.h
deleted file mode 100644
index 7786b0ea56ec..000000000000
--- a/drivers/spi/spi-s3c24xx-fiq.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* linux/drivers/spi/spi_s3c24xx_fiq.h
- *
- * Copyright 2009 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX SPI - FIQ pseudo-DMA transfer support
-*/
-
-/* We have R8 through R13 to play with */
-
-#ifdef __ASSEMBLY__
-#define __REG_NR(x) r##x
-#else
-#define __REG_NR(x) (x)
-#endif
-
-#define fiq_rspi __REG_NR(8)
-#define fiq_rtmp __REG_NR(9)
-#define fiq_rrx __REG_NR(10)
-#define fiq_rtx __REG_NR(11)
-#define fiq_rcount __REG_NR(12)
-#define fiq_rirq __REG_NR(13)
diff --git a/drivers/spi/spi-s3c24xx-regs.h b/drivers/spi/spi-s3c24xx-regs.h
new file mode 100644
index 000000000000..f51464ab5677
--- /dev/null
+++ b/drivers/spi/spi-s3c24xx-regs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2004 Fetron GmbH
+ *
+ * S3C2410 SPI register definition
+ */
+
+#ifndef __SPI_S3C2410_H
+#define __SPI_S3C2410_H
+
+#define S3C2410_SPCON (0x00)
+
+#define S3C2410_SPCON_SMOD_DMA (2 << 5) /* DMA mode */
+#define S3C2410_SPCON_SMOD_INT (1 << 5) /* interrupt mode */
+#define S3C2410_SPCON_SMOD_POLL (0 << 5) /* polling mode */
+#define S3C2410_SPCON_ENSCK (1 << 4) /* Enable SCK */
+#define S3C2410_SPCON_MSTR (1 << 3) /* Master:1, Slave:0 select */
+#define S3C2410_SPCON_CPOL_HIGH (1 << 2) /* Clock polarity select */
+#define S3C2410_SPCON_CPOL_LOW (0 << 2) /* Clock polarity select */
+
+#define S3C2410_SPCON_CPHA_FMTB (1 << 1) /* Clock Phase Select */
+#define S3C2410_SPCON_CPHA_FMTA (0 << 1) /* Clock Phase Select */
+
+#define S3C2410_SPSTA (0x04)
+
+#define S3C2410_SPSTA_DCOL (1 << 2) /* Data Collision Error */
+#define S3C2410_SPSTA_MULD (1 << 1) /* Multi Master Error */
+#define S3C2410_SPSTA_READY (1 << 0) /* Data Tx/Rx ready */
+#define S3C2412_SPSTA_READY_ORG (1 << 3)
+
+#define S3C2410_SPPIN (0x08)
+
+#define S3C2410_SPPIN_ENMUL (1 << 2) /* Multi Master Error detect */
+#define S3C2410_SPPIN_RESERVED (1 << 1)
+#define S3C2410_SPPIN_KEEP (1 << 0) /* Master Out keep */
+
+#define S3C2410_SPPRE (0x0C)
+#define S3C2410_SPTDAT (0x10)
+#define S3C2410_SPRDAT (0x14)
+
+#endif /* __SPI_S3C2410_H */
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 2cb3b611c294..d6f51695ca5b 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -19,16 +19,15 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/s3c24xx.h>
+#include <linux/spi/s3c24xx-fiq.h>
#include <linux/module.h>
-#include <plat/regs-spi.h>
-
#include <asm/fiq.h>
-#include "spi-s3c24xx-fiq.h"
+#include "spi-s3c24xx-regs.h"
/**
- * s3c24xx_spi_devstate - per device data
+ * struct s3c24xx_spi_devstate - per device data
* @hz: Last frequency calculated for @sppre field.
* @mode: Last mode setting for the @spcon field.
* @spcon: Value to write to the SPCON register.
@@ -230,21 +229,6 @@ struct spi_fiq_code {
u8 data[];
};
-extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
-extern struct spi_fiq_code s3c24xx_spi_fiq_tx;
-extern struct spi_fiq_code s3c24xx_spi_fiq_rx;
-
-/**
- * ack_bit - turn IRQ into IRQ acknowledgement bit
- * @irq: The interrupt number
- *
- * Returns the bit to write to the interrupt acknowledge register.
- */
-static inline u32 ack_bit(unsigned int irq)
-{
- return 1 << (irq - IRQ_EINT0);
-}
-
/**
* s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer
* @hw: The hardware state.
@@ -261,6 +245,7 @@ static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
struct pt_regs regs;
enum spi_fiq_mode mode;
struct spi_fiq_code *code;
+ u32 *ack_ptr = NULL;
int ret;
if (!hw->fiq_claimed) {
@@ -283,13 +268,10 @@ static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
regs.uregs[fiq_rrx] = (long)hw->rx;
regs.uregs[fiq_rtx] = (long)hw->tx + 1;
regs.uregs[fiq_rcount] = hw->len - 1;
- regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ;
set_fiq_regs(&regs);
if (hw->fiq_mode != mode) {
- u32 *ack_ptr;
-
hw->fiq_mode = mode;
switch (mode) {
@@ -309,12 +291,10 @@ static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
BUG_ON(!code);
ack_ptr = (u32 *)&code->data[code->ack_offset];
- *ack_ptr = ack_bit(hw->irq);
-
set_fiq_handler(&code->data, code->length);
}
- s3c24xx_set_fiq(hw->irq, true);
+ s3c24xx_set_fiq(hw->irq, ack_ptr, true);
hw->fiq_mode = mode;
hw->fiq_inuse = 1;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 924b24441789..dfa7c91e13aa 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -29,7 +29,7 @@
#define S3C64XX_SPI_CH_CFG 0x00
#define S3C64XX_SPI_CLK_CFG 0x04
#define S3C64XX_SPI_MODE_CFG 0x08
-#define S3C64XX_SPI_SLAVE_SEL 0x0C
+#define S3C64XX_SPI_CS_REG 0x0C
#define S3C64XX_SPI_INT_EN 0x10
#define S3C64XX_SPI_STATUS 0x14
#define S3C64XX_SPI_TX_DATA 0x18
@@ -64,9 +64,9 @@
#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
#define S3C64XX_SPI_MODE_4BURST (1<<0)
-#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
-#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
-#define S3C64XX_SPI_SLAVE_NSC_CNT_2 (2<<4)
+#define S3C64XX_SPI_CS_NSC_CNT_2 (2<<4)
+#define S3C64XX_SPI_CS_AUTO (1<<1)
+#define S3C64XX_SPI_CS_SIG_INACT (1<<0)
#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
@@ -122,6 +122,7 @@
struct s3c64xx_spi_dma_data {
struct dma_chan *ch;
+ dma_cookie_t cookie;
enum dma_transfer_direction direction;
};
@@ -161,11 +162,8 @@ struct s3c64xx_spi_port_config {
* @cntrlr_info: Platform specific data for the controller this driver manages.
* @lock: Controller specific lock.
* @state: Set of FLAGS to indicate status.
- * @rx_dmach: Controller's DMA channel for Rx.
- * @tx_dmach: Controller's DMA channel for Tx.
* @sfr_start: BUS address of SPI controller regs.
* @regs: Pointer to ioremap'ed controller registers.
- * @irq: interrupt
* @xfer_completion: To indicate completion of xfer task.
* @cur_mode: Stores the active configuration of the controller.
* @cur_bpw: Stores the active bits per word settings.
@@ -182,7 +180,7 @@ struct s3c64xx_spi_driver_data {
struct clk *ioclk;
struct platform_device *pdev;
struct spi_master *master;
- struct s3c64xx_spi_info *cntrlr_info;
+ struct s3c64xx_spi_info *cntrlr_info;
spinlock_t lock;
unsigned long sfr_start;
struct completion xfer_completion;
@@ -271,12 +269,13 @@ static void s3c64xx_spi_dmacb(void *data)
spin_unlock_irqrestore(&sdd->lock, flags);
}
-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
+static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
struct sg_table *sgt)
{
struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config;
struct dma_async_tx_descriptor *desc;
+ int ret;
memset(&config, 0, sizeof(config));
@@ -300,12 +299,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
dma->direction, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
+ dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
+ return -ENOMEM;
+ }
desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma;
- dmaengine_submit(desc);
+ dma->cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(dma->cookie);
+ if (ret) {
+ dev_err(&sdd->pdev->dev, "DMA submission failed");
+ return -EIO;
+ }
+
dma_async_issue_pending(dma->ch);
+ return 0;
}
static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
@@ -318,18 +329,18 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
if (enable) {
if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
- writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
} else {
- u32 ssel = readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ u32 ssel = readl(sdd->regs + S3C64XX_SPI_CS_REG);
- ssel |= (S3C64XX_SPI_SLAVE_AUTO |
- S3C64XX_SPI_SLAVE_NSC_CNT_2);
- writel(ssel, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ ssel |= (S3C64XX_SPI_CS_AUTO |
+ S3C64XX_SPI_CS_NSC_CNT_2);
+ writel(ssel, sdd->regs + S3C64XX_SPI_CS_REG);
}
} else {
if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
- writel(S3C64XX_SPI_SLAVE_SIG_INACT,
- sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ writel(S3C64XX_SPI_CS_SIG_INACT,
+ sdd->regs + S3C64XX_SPI_CS_REG);
}
}
@@ -355,11 +366,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
}
-static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode)
{
void __iomem *regs = sdd->regs;
u32 modecfg, chcfg;
+ int ret = 0;
modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
@@ -385,7 +397,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
- prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
+ ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else {
switch (sdd->cur_bpw) {
case 32:
@@ -417,12 +429,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
- prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
+ ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
}
}
+ if (ret)
+ return ret;
+
writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
+
+ return 0;
}
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
@@ -456,7 +473,8 @@ static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
- ms += 10; /* some tolerance */
+ ms += 30; /* some tolerance */
+ ms = max(ms, 100); /* minimum timeout */
val = msecs_to_jiffies(ms) + 10;
val = wait_for_completion_timeout(&sdd->xfer_completion, val);
@@ -555,9 +573,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
return 0;
}
-static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{
void __iomem *regs = sdd->regs;
+ int ret;
u32 val;
/* Disable Clock */
@@ -605,7 +624,10 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
if (sdd->port_conf->clk_from_cmu) {
/* The src_clk clock is divided internally by 2 */
- clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
+ ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
+ if (ret)
+ return ret;
+ sdd->cur_speed = clk_get_rate(sdd->src_clk) / 2;
} else {
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
@@ -619,6 +641,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
val |= S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
+
+ return 0;
}
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
@@ -661,7 +685,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->cur_bpw = bpw;
sdd->cur_speed = speed;
sdd->cur_mode = spi->mode;
- s3c64xx_spi_config(sdd);
+ status = s3c64xx_spi_config(sdd);
+ if (status)
+ return status;
}
if (!is_polling(sdd) && (xfer->len > fifo_len) &&
@@ -685,13 +711,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->state &= ~RXBUSY;
sdd->state &= ~TXBUSY;
- s3c64xx_enable_datapath(sdd, xfer, use_dma);
-
/* Start the signals */
s3c64xx_spi_set_cs(spi, true);
+ status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
+
spin_unlock_irqrestore(&sdd->lock, flags);
+ if (status) {
+ dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
+ break;
+ }
+
if (use_dma)
status = s3c64xx_wait_for_dma(sdd, xfer);
else
@@ -699,17 +730,28 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
if (status) {
dev_err(&spi->dev,
- "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
+ "I/O Error: rx-%d tx-%d rx-%c tx-%c len-%d dma-%d res-(%d)\n",
xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
(sdd->state & RXBUSY) ? 'f' : 'p',
(sdd->state & TXBUSY) ? 'f' : 'p',
- xfer->len);
+ xfer->len, use_dma ? 1 : 0, status);
if (use_dma) {
- if (xfer->tx_buf && (sdd->state & TXBUSY))
+ struct dma_tx_state s;
+
+ if (xfer->tx_buf && (sdd->state & TXBUSY)) {
+ dmaengine_pause(sdd->tx_dma.ch);
+ dmaengine_tx_status(sdd->tx_dma.ch, sdd->tx_dma.cookie, &s);
dmaengine_terminate_all(sdd->tx_dma.ch);
- if (xfer->rx_buf && (sdd->state & RXBUSY))
+ dev_err(&spi->dev, "TX residue: %d\n", s.residue);
+
+ }
+ if (xfer->rx_buf && (sdd->state & RXBUSY)) {
+ dmaengine_pause(sdd->rx_dma.ch);
+ dmaengine_tx_status(sdd->rx_dma.ch, sdd->rx_dma.cookie, &s);
dmaengine_terminate_all(sdd->rx_dma.ch);
+ dev_err(&spi->dev, "RX residue: %d\n", s.residue);
+ }
}
} else {
s3c64xx_flush_fifo(sdd);
@@ -939,9 +981,9 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
sdd->cur_speed = 0;
if (sci->no_cs)
- writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
- writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ writel(S3C64XX_SPI_CS_SIG_INACT, sdd->regs + S3C64XX_SPI_CS_REG);
/* Disable Interrupts - we use Polling if not DMA mode */
writel(0, regs + S3C64XX_SPI_INT_EN);
@@ -1336,6 +1378,10 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
s3c64xx_spi_hwinit(sdd);
+ writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
+ S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
+ sdd->regs + S3C64XX_SPI_INT_EN);
+
return 0;
err_disable_src_clk:
@@ -1379,6 +1425,7 @@ static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
.tx_st_done = 25,
.high_speed = true,
.clk_from_cmu = true,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index 127b8bd25831..392ec5cfa3d6 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -504,10 +504,7 @@ static int sprd_adi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "no hardware spinlock supplied\n");
break;
default:
- dev_err(&pdev->dev,
- "failed to find hwlock id, %d\n", ret);
- fallthrough;
- case -EPROBE_DEFER:
+ dev_err_probe(&pdev->dev, ret, "failed to find hwlock id\n");
goto put_ctlr;
}
}
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index 6678f1cbc566..635738f54c73 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -553,22 +553,15 @@ static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t)
static int sprd_spi_dma_request(struct sprd_spi *ss)
{
ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn");
- if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX])) {
- if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]) == -EPROBE_DEFER)
- return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]);
-
- dev_err(ss->dev, "request RX DMA channel failed!\n");
- return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]);
- }
+ if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX]))
+ return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]),
+ "request RX DMA channel failed!\n");
ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn");
if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) {
- if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER)
- return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
-
- dev_err(ss->dev, "request TX DMA channel failed!\n");
dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
- return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
+ return dev_err_probe(ss->dev, PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]),
+ "request TX DMA channel failed!\n");
}
return 0;
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 3056428b09f3..2cc850eb8922 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -804,10 +804,9 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
struct spi_master *master = dev_id;
struct stm32_spi *spi = spi_master_get_devdata(master);
u32 sr, mask = 0;
- unsigned long flags;
bool end = false;
- spin_lock_irqsave(&spi->lock, flags);
+ spin_lock(&spi->lock);
sr = readl_relaxed(spi->base + STM32F4_SPI_SR);
/*
@@ -833,7 +832,7 @@ static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
if (!(sr & mask)) {
dev_dbg(spi->dev, "spurious IT (sr=0x%08x)\n", sr);
- spin_unlock_irqrestore(&spi->lock, flags);
+ spin_unlock(&spi->lock);
return IRQ_NONE;
}
@@ -875,11 +874,11 @@ end_irq:
STM32F4_SPI_CR2_TXEIE |
STM32F4_SPI_CR2_RXNEIE |
STM32F4_SPI_CR2_ERRIE);
- spin_unlock_irqrestore(&spi->lock, flags);
+ spin_unlock(&spi->lock);
return IRQ_WAKE_THREAD;
}
- spin_unlock_irqrestore(&spi->lock, flags);
+ spin_unlock(&spi->lock);
return IRQ_HANDLED;
}
@@ -1861,9 +1860,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
spi->irq = platform_get_irq(pdev, 0);
if (spi->irq <= 0) {
- ret = spi->irq;
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
+ ret = dev_err_probe(&pdev->dev, spi->irq, "failed to get irq\n");
goto err_master_put;
}
ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
index ae17c99cce03..42e82dbe3d41 100644
--- a/drivers/spi/spi-synquacer.c
+++ b/drivers/spi/spi-synquacer.c
@@ -640,9 +640,8 @@ static int synquacer_spi_probe(struct platform_device *pdev)
}
if (IS_ERR(sspi->clk)) {
- if (!(PTR_ERR(sspi->clk) == -EPROBE_DEFER))
- dev_err(&pdev->dev, "clock not found\n");
- ret = PTR_ERR(sspi->clk);
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(sspi->clk),
+ "clock not found\n");
goto put_spi;
}
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index c2c58871a947..ca6886aaa519 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -664,16 +664,11 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
struct dma_chan *dma_chan;
u32 *dma_buf;
dma_addr_t dma_phys;
- int ret;
dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
- if (IS_ERR(dma_chan)) {
- ret = PTR_ERR(dma_chan);
- if (ret != -EPROBE_DEFER)
- dev_err(tspi->dev,
- "Dma channel is not available: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(dma_chan))
+ return dev_err_probe(tspi->dev, PTR_ERR(dma_chan),
+ "Dma channel is not available\n");
dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
&dma_phys, GFP_KERNEL);
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 02cf5f463ba6..b59015c7c8a8 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -359,9 +359,8 @@ exit:
static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd)
{
struct spi_transfer *t = tsd->curr_xfer;
- unsigned long flags;
- spin_lock_irqsave(&tsd->lock, flags);
+ spin_lock(&tsd->lock);
if (tsd->tx_status || tsd->rx_status || (tsd->status_reg & SPI_BSY)) {
dev_err(tsd->dev,
"CpuXfer ERROR bit set 0x%x\n", tsd->status_reg);
@@ -391,7 +390,7 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd)
tegra_sflash_calculate_curr_xfer_param(tsd->cur_spi, tsd, t);
tegra_sflash_start_cpu_based_transfer(tsd, t);
exit:
- spin_unlock_irqrestore(&tsd->lock, flags);
+ spin_unlock(&tsd->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index a07b72e9c344..a0810765d4e5 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -600,13 +600,9 @@ static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
struct dma_slave_config dma_sconfig;
dma_chan = dma_request_chan(tspi->dev, dma_to_memory ? "rx" : "tx");
- if (IS_ERR(dma_chan)) {
- ret = PTR_ERR(dma_chan);
- if (ret != -EPROBE_DEFER)
- dev_err(tspi->dev,
- "Dma channel is not available: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(dma_chan))
+ return dev_err_probe(tspi->dev, PTR_ERR(dma_chan),
+ "Dma channel is not available\n");
dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
&dma_phys, GFP_KERNEL);
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 6df2aeff2843..b459e369079f 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1002,7 +1002,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
spin_unlock_irqrestore(&data->lock, flags);
/* RX */
- dma->sg_rx_p = kcalloc(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
+ dma->sg_rx_p = kmalloc_array(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
if (!dma->sg_rx_p)
return;
@@ -1065,7 +1065,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
head = 0;
}
- dma->sg_tx_p = kcalloc(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
+ dma->sg_tx_p = kmalloc_array(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
if (!dma->sg_tx_p)
return;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 8dd2bb99cb4d..523edfdf5dcd 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -491,8 +491,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
goto put_master;
}
- dev_info(&pdev->dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
- (unsigned long long)res->start, xspi->regs, xspi->irq);
+ dev_info(&pdev->dev, "at %pR, irq=%d\n", res, xspi->irq);
if (pdata) {
for (i = 0; i < pdata->num_devices; i++)
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index e17a20125255..c8fa6ee18ae7 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -21,6 +21,7 @@
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <linux/spi/spi-mem.h>
/* Generic QSPI register offsets */
#define GQSPI_CONFIG_OFST 0x00000100
@@ -153,6 +154,7 @@ enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
* @dma_addr: DMA address after mapping the kernel buffer
* @genfifoentry: Used for storing the genfifoentry instruction.
* @mode: Defines the mode in which QSPI is operating
+ * @data_completion: completion structure
*/
struct zynqmp_qspi {
void __iomem *regs;
@@ -170,12 +172,14 @@ struct zynqmp_qspi {
dma_addr_t dma_addr;
u32 genfifoentry;
enum mode_type mode;
+ struct completion data_completion;
};
/**
- * zynqmp_gqspi_read: For GQSPI controller read operation
+ * zynqmp_gqspi_read - For GQSPI controller read operation
* @xqspi: Pointer to the zynqmp_qspi structure
* @offset: Offset from where to read
+ * Return: Value at the offset
*/
static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
{
@@ -183,7 +187,7 @@ static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
}
/**
- * zynqmp_gqspi_write: For GQSPI controller write operation
+ * zynqmp_gqspi_write - For GQSPI controller write operation
* @xqspi: Pointer to the zynqmp_qspi structure
* @offset: Offset where to write
* @val: Value to be written
@@ -195,7 +199,7 @@ static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset,
}
/**
- * zynqmp_gqspi_selectslave: For selection of slave device
+ * zynqmp_gqspi_selectslave - For selection of slave device
* @instanceptr: Pointer to the zynqmp_qspi structure
* @slavecs: For chip select
* @slavebus: To check which bus is selected- upper or lower
@@ -242,7 +246,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
}
/**
- * zynqmp_qspi_init_hw: Initialize the hardware
+ * zynqmp_qspi_init_hw - Initialize the hardware
* @xqspi: Pointer to the zynqmp_qspi structure
*
* The default settings of the QSPI controller's configurable parameters on
@@ -322,15 +326,15 @@ static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
GQSPI_SELECT_FLASH_BUS_LOWER);
/* Initialize DMA */
zynqmp_gqspi_write(xqspi,
- GQSPI_QSPIDMA_DST_CTRL_OFST,
- GQSPI_QSPIDMA_DST_CTRL_RESET_VAL);
+ GQSPI_QSPIDMA_DST_CTRL_OFST,
+ GQSPI_QSPIDMA_DST_CTRL_RESET_VAL);
/* Enable the GQSPI */
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
}
/**
- * zynqmp_qspi_copy_read_data: Copy data to RX buffer
+ * zynqmp_qspi_copy_read_data - Copy data to RX buffer
* @xqspi: Pointer to the zynqmp_qspi structure
* @data: The variable where data is stored
* @size: Number of bytes to be copied from data to RX buffer
@@ -344,41 +348,7 @@ static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi,
}
/**
- * zynqmp_prepare_transfer_hardware: Prepares hardware for transfer.
- * @master: Pointer to the spi_master structure which provides
- * information about the controller.
- *
- * This function enables SPI master controller.
- *
- * Return: 0 on success; error value otherwise
- */
-static int zynqmp_prepare_transfer_hardware(struct spi_master *master)
-{
- struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
-
- zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
- return 0;
-}
-
-/**
- * zynqmp_unprepare_transfer_hardware: Relaxes hardware after transfer
- * @master: Pointer to the spi_master structure which provides
- * information about the controller.
- *
- * This function disables the SPI master controller.
- *
- * Return: Always 0
- */
-static int zynqmp_unprepare_transfer_hardware(struct spi_master *master)
-{
- struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
-
- zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
- return 0;
-}
-
-/**
- * zynqmp_qspi_chipselect: Select or deselect the chip select line
+ * zynqmp_qspi_chipselect - Select or deselect the chip select line
* @qspi: Pointer to the spi_device structure
* @is_high: Select(0) or deselect (1) the chip select line
*/
@@ -386,12 +356,14 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
{
struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master);
ulong timeout;
- u32 genfifoentry = 0x0, statusreg;
+ u32 genfifoentry = 0, statusreg;
genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
- genfifoentry |= xqspi->genfifobus;
if (!is_high) {
+ xqspi->genfifobus = GQSPI_GENFIFO_BUS_LOWER;
+ xqspi->genfifocs = GQSPI_GENFIFO_CS_LOWER;
+ genfifoentry |= xqspi->genfifobus;
genfifoentry |= xqspi->genfifocs;
genfifoentry |= GQSPI_GENFIFO_CS_SETUP;
} else {
@@ -402,8 +374,8 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
/* Manually start the generic FIFO command */
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
- zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
- GQSPI_CFG_START_GEN_FIFO_MASK);
+ zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
timeout = jiffies + msecs_to_jiffies(1000);
@@ -412,10 +384,9 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
statusreg = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
if ((statusreg & GQSPI_ISR_GENFIFOEMPTY_MASK) &&
- (statusreg & GQSPI_ISR_TXEMPTY_MASK))
+ (statusreg & GQSPI_ISR_TXEMPTY_MASK))
break;
- else
- cpu_relax();
+ cpu_relax();
} while (!time_after_eq(jiffies, timeout));
if (time_after_eq(jiffies, timeout))
@@ -423,11 +394,38 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
}
/**
- * zynqmp_qspi_setup_transfer: Configure QSPI controller for specified
+ * zynqmp_qspi_selectspimode - Selects SPI mode - x1 or x2 or x4.
+ * @xqspi: xqspi is a pointer to the GQSPI instance
+ * @spimode: spimode - SPI or DUAL or QUAD.
+ * Return: Mask to set desired SPI mode in GENFIFO entry.
+ */
+static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
+ u8 spimode)
+{
+ u32 mask = 0;
+
+ switch (spimode) {
+ case GQSPI_SELECT_MODE_DUALSPI:
+ mask = GQSPI_GENFIFO_MODE_DUALSPI;
+ break;
+ case GQSPI_SELECT_MODE_QUADSPI:
+ mask = GQSPI_GENFIFO_MODE_QUADSPI;
+ break;
+ case GQSPI_SELECT_MODE_SPI:
+ mask = GQSPI_GENFIFO_MODE_SPI;
+ break;
+ default:
+ dev_warn(xqspi->dev, "Invalid SPI mode\n");
+ }
+
+ return mask;
+}
+
+/**
+ * zynqmp_qspi_config_op - Configure QSPI controller for specified
* transfer
+ * @xqspi: Pointer to the zynqmp_qspi structure
* @qspi: Pointer to the spi_device structure
- * @transfer: Pointer to the spi_transfer structure which provides
- * information about next transfer setup parameters
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
@@ -444,17 +442,11 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
* by the QSPI controller the driver will set the highest or lowest
* frequency supported by controller.
*/
-static int zynqmp_qspi_setup_transfer(struct spi_device *qspi,
- struct spi_transfer *transfer)
+static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
+ struct spi_device *qspi)
{
- struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master);
ulong clk_rate;
- u32 config_reg, req_hz, baud_rate_val = 0;
-
- if (transfer)
- req_hz = transfer->speed_hz;
- else
- req_hz = qspi->max_speed_hz;
+ u32 config_reg, baud_rate_val = 0;
/* Set the clock frequency */
/* If req_hz == 0, default to lowest speed */
@@ -462,7 +454,7 @@ static int zynqmp_qspi_setup_transfer(struct spi_device *qspi,
while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
(clk_rate /
- (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > req_hz)
+ (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > qspi->max_speed_hz)
baud_rate_val++;
config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
@@ -482,7 +474,7 @@ static int zynqmp_qspi_setup_transfer(struct spi_device *qspi,
}
/**
- * zynqmp_qspi_setup: Configure the QSPI controller
+ * zynqmp_qspi_setup_op - Configure the QSPI controller
* @qspi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer,
@@ -490,15 +482,35 @@ static int zynqmp_qspi_setup_transfer(struct spi_device *qspi,
*
* Return: 0 on success; error value otherwise.
*/
-static int zynqmp_qspi_setup(struct spi_device *qspi)
+static int zynqmp_qspi_setup_op(struct spi_device *qspi)
{
- if (qspi->master->busy)
+ struct spi_controller *ctlr = qspi->master;
+ struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
+ struct device *dev = &ctlr->dev;
+ int ret;
+
+ if (ctlr->busy)
return -EBUSY;
+
+ ret = clk_enable(xqspi->refclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable device clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(xqspi->pclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable APB clock.\n");
+ clk_disable(xqspi->refclk);
+ return ret;
+ }
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
+
return 0;
}
/**
- * zynqmp_qspi_filltxfifo: Fills the TX FIFO as long as there is room in
+ * zynqmp_qspi_filltxfifo - Fills the TX FIFO as long as there is room in
* the FIFO or the bytes required to be
* transmitted.
* @xqspi: Pointer to the zynqmp_qspi structure
@@ -524,7 +536,7 @@ static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
}
/**
- * zynqmp_qspi_readrxfifo: Fills the RX FIFO as long as there is room in
+ * zynqmp_qspi_readrxfifo - Fills the RX FIFO as long as there is room in
* the FIFO.
* @xqspi: Pointer to the zynqmp_qspi structure
* @size: Number of bytes to be copied from RX buffer to RX FIFO
@@ -536,7 +548,7 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
while ((count < size) && (xqspi->bytes_to_receive > 0)) {
if (xqspi->bytes_to_receive >= 4) {
- (*(u32 *) xqspi->rxbuf) =
+ (*(u32 *)xqspi->rxbuf) =
zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
xqspi->rxbuf += 4;
xqspi->bytes_to_receive -= 4;
@@ -552,7 +564,76 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
}
/**
- * zynqmp_process_dma_irq: Handler for DMA done interrupt of QSPI
+ * zynqmp_qspi_fillgenfifo - Fills the GENFIFO.
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @nbits: Transfer/Receive buswidth.
+ * @genfifoentry: Variable in which GENFIFO mask is saved
+ */
+static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
+ u32 genfifoentry)
+{
+ u32 transfer_len = 0;
+
+ if (xqspi->txbuf) {
+ genfifoentry &= ~GQSPI_GENFIFO_RX;
+ genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ genfifoentry |= GQSPI_GENFIFO_TX;
+ transfer_len = xqspi->bytes_to_transfer;
+ } else {
+ genfifoentry &= ~GQSPI_GENFIFO_TX;
+ genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ genfifoentry |= GQSPI_GENFIFO_RX;
+ if (xqspi->mode == GQSPI_MODE_DMA)
+ transfer_len = xqspi->dma_rx_bytes;
+ else
+ transfer_len = xqspi->bytes_to_receive;
+ }
+ genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
+ xqspi->genfifoentry = genfifoentry;
+
+ if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
+ genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ genfifoentry |= transfer_len;
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
+ } else {
+ int tempcount = transfer_len;
+ u32 exponent = 8; /* 2^8 = 256 */
+ u8 imm_data = tempcount & 0xFF;
+
+ tempcount &= ~(tempcount & 0xFF);
+ /* Immediate entry */
+ if (tempcount != 0) {
+ /* Exponent entries */
+ genfifoentry |= GQSPI_GENFIFO_EXP;
+ while (tempcount != 0) {
+ if (tempcount & GQSPI_GENFIFO_EXP_START) {
+ genfifoentry &=
+ ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ genfifoentry |= exponent;
+ zynqmp_gqspi_write(xqspi,
+ GQSPI_GEN_FIFO_OFST,
+ genfifoentry);
+ }
+ tempcount = tempcount >> 1;
+ exponent++;
+ }
+ }
+ if (imm_data != 0) {
+ genfifoentry &= ~GQSPI_GENFIFO_EXP;
+ genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ genfifoentry |= (u8)(imm_data & 0xFF);
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
+ genfifoentry);
+ }
+ }
+ if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf) {
+ /* Dummy generic FIFO entry */
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
+ }
+}
+
+/**
+ * zynqmp_process_dma_irq - Handler for DMA done interrupt of QSPI
* controller
* @xqspi: zynqmp_qspi instance pointer
*
@@ -563,14 +644,14 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
u32 config_reg, genfifoentry;
dma_unmap_single(xqspi->dev, xqspi->dma_addr,
- xqspi->dma_rx_bytes, DMA_FROM_DEVICE);
+ xqspi->dma_rx_bytes, DMA_FROM_DEVICE);
xqspi->rxbuf += xqspi->dma_rx_bytes;
xqspi->bytes_to_receive -= xqspi->dma_rx_bytes;
xqspi->dma_rx_bytes = 0;
/* Disabling the DMA interrupts */
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_DIS_OFST,
- GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
+ GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
if (xqspi->bytes_to_receive > 0) {
/* Switch to IO mode,for remaining bytes to receive */
@@ -588,19 +669,20 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
/* Manual start */
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
- (zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
- GQSPI_CFG_START_GEN_FIFO_MASK));
+ (zynqmp_gqspi_read(xqspi,
+ GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK));
/* Enable the RX interrupts for IO mode */
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
- GQSPI_IER_GENFIFOEMPTY_MASK |
- GQSPI_IER_RXNEMPTY_MASK |
- GQSPI_IER_RXEMPTY_MASK);
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_RXNEMPTY_MASK |
+ GQSPI_IER_RXEMPTY_MASK);
}
}
/**
- * zynqmp_qspi_irq: Interrupt service routine of the QSPI controller
+ * zynqmp_qspi_irq - Interrupt service routine of the QSPI controller
* @irq: IRQ number
* @dev_id: Pointer to the xqspi structure
*
@@ -613,9 +695,8 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
*/
static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
- int ret = IRQ_NONE;
+ struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id;
+ irqreturn_t ret = IRQ_NONE;
u32 status, mask, dma_status = 0;
status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
@@ -627,7 +708,7 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
dma_status =
zynqmp_gqspi_read(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST);
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
- dma_status);
+ dma_status);
}
if (mask & GQSPI_ISR_TXNOT_FULL_MASK) {
@@ -644,55 +725,27 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
ret = IRQ_HANDLED;
}
- if ((xqspi->bytes_to_receive == 0) && (xqspi->bytes_to_transfer == 0)
- && ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
+ if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 &&
+ ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
- spi_finalize_current_transfer(master);
+ complete(&xqspi->data_completion);
ret = IRQ_HANDLED;
}
return ret;
}
/**
- * zynqmp_qspi_selectspimode: Selects SPI mode - x1 or x2 or x4.
- * @xqspi: xqspi is a pointer to the GQSPI instance
- * @spimode: spimode - SPI or DUAL or QUAD.
- * Return: Mask to set desired SPI mode in GENFIFO entry.
- */
-static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
- u8 spimode)
-{
- u32 mask = 0;
-
- switch (spimode) {
- case GQSPI_SELECT_MODE_DUALSPI:
- mask = GQSPI_GENFIFO_MODE_DUALSPI;
- break;
- case GQSPI_SELECT_MODE_QUADSPI:
- mask = GQSPI_GENFIFO_MODE_QUADSPI;
- break;
- case GQSPI_SELECT_MODE_SPI:
- mask = GQSPI_GENFIFO_MODE_SPI;
- break;
- default:
- dev_warn(xqspi->dev, "Invalid SPI mode\n");
- }
-
- return mask;
-}
-
-/**
- * zynq_qspi_setuprxdma: This function sets up the RX DMA operation
+ * zynqmp_qspi_setuprxdma - This function sets up the RX DMA operation
* @xqspi: xqspi is a pointer to the GQSPI instance.
*/
-static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
+static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
{
u32 rx_bytes, rx_rem, config_reg;
dma_addr_t addr;
u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
- if ((xqspi->bytes_to_receive < 8) ||
- ((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
+ if (xqspi->bytes_to_receive < 8 ||
+ ((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
/* Setting to IO mode */
config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
@@ -706,17 +759,17 @@ static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
rx_bytes = (xqspi->bytes_to_receive - rx_rem);
addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
- rx_bytes, DMA_FROM_DEVICE);
+ rx_bytes, DMA_FROM_DEVICE);
if (dma_mapping_error(xqspi->dev, addr))
dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
xqspi->dma_rx_bytes = rx_bytes;
xqspi->dma_addr = addr;
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_OFST,
- (u32)(addr & 0xffffffff));
+ (u32)(addr & 0xffffffff));
addr = ((addr >> 16) >> 16);
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST,
- ((u32)addr) & 0xfff);
+ ((u32)addr) & 0xfff);
/* Enabling the DMA mode */
config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
@@ -732,166 +785,48 @@ static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
}
/**
- * zynqmp_qspi_txrxsetup: This function checks the TX/RX buffers in
- * the transfer and sets up the GENFIFO entries,
- * TX FIFO as required.
- * @xqspi: xqspi is a pointer to the GQSPI instance.
- * @transfer: It is a pointer to the structure containing transfer data.
- * @genfifoentry: genfifoentry is pointer to the variable in which
- * GENFIFO mask is returned to calling function
+ * zynqmp_qspi_write_op - This function sets up the GENFIFO entries,
+ * TX FIFO, and fills the TX FIFO with as many
+ * bytes as possible.
+ * @xqspi: Pointer to the GQSPI instance.
+ * @tx_nbits: Transfer buswidth.
+ * @genfifoentry: Variable in which GENFIFO mask is returned
+ * to calling function
*/
-static void zynqmp_qspi_txrxsetup(struct zynqmp_qspi *xqspi,
- struct spi_transfer *transfer,
- u32 *genfifoentry)
+static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
+ u32 genfifoentry)
{
u32 config_reg;
- /* Transmit */
- if ((xqspi->txbuf != NULL) && (xqspi->rxbuf == NULL)) {
- /* Setup data to be TXed */
- *genfifoentry &= ~GQSPI_GENFIFO_RX;
- *genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
- *genfifoentry |= GQSPI_GENFIFO_TX;
- *genfifoentry |=
- zynqmp_qspi_selectspimode(xqspi, transfer->tx_nbits);
- xqspi->bytes_to_transfer = transfer->len;
- if (xqspi->mode == GQSPI_MODE_DMA) {
- config_reg = zynqmp_gqspi_read(xqspi,
- GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
- config_reg);
- xqspi->mode = GQSPI_MODE_IO;
- }
- zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH);
- /* Discard RX data */
- xqspi->bytes_to_receive = 0;
- } else if ((xqspi->txbuf == NULL) && (xqspi->rxbuf != NULL)) {
- /* Receive */
-
- /* TX auto fill */
- *genfifoentry &= ~GQSPI_GENFIFO_TX;
- /* Setup RX */
- *genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
- *genfifoentry |= GQSPI_GENFIFO_RX;
- *genfifoentry |=
- zynqmp_qspi_selectspimode(xqspi, transfer->rx_nbits);
- xqspi->bytes_to_transfer = 0;
- xqspi->bytes_to_receive = transfer->len;
- zynq_qspi_setuprxdma(xqspi);
+ zynqmp_qspi_fillgenfifo(xqspi, tx_nbits, genfifoentry);
+ zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH);
+ if (xqspi->mode == GQSPI_MODE_DMA) {
+ config_reg = zynqmp_gqspi_read(xqspi,
+ GQSPI_CONFIG_OFST);
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ config_reg);
+ xqspi->mode = GQSPI_MODE_IO;
}
}
/**
- * zynqmp_qspi_start_transfer: Initiates the QSPI transfer
- * @master: Pointer to the spi_master structure which provides
- * information about the controller.
- * @qspi: Pointer to the spi_device structure
- * @transfer: Pointer to the spi_transfer structure which provide information
- * about next transfer parameters
- *
- * This function fills the TX FIFO, starts the QSPI transfer, and waits for the
- * transfer to be completed.
- *
- * Return: Number of bytes transferred in the last transfer
+ * zynqmp_qspi_read_op - This function sets up the GENFIFO entries and
+ * RX DMA operation.
+ * @xqspi: xqspi is a pointer to the GQSPI instance.
+ * @rx_nbits: Receive buswidth.
+ * @genfifoentry: genfifoentry is pointer to the variable in which
+ * GENFIFO mask is returned to calling function
*/
-static int zynqmp_qspi_start_transfer(struct spi_master *master,
- struct spi_device *qspi,
- struct spi_transfer *transfer)
+static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
+ u32 genfifoentry)
{
- struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
- u32 genfifoentry = 0x0, transfer_len;
-
- xqspi->txbuf = transfer->tx_buf;
- xqspi->rxbuf = transfer->rx_buf;
-
- zynqmp_qspi_setup_transfer(qspi, transfer);
-
- genfifoentry |= xqspi->genfifocs;
- genfifoentry |= xqspi->genfifobus;
-
- zynqmp_qspi_txrxsetup(xqspi, transfer, &genfifoentry);
-
- if (xqspi->mode == GQSPI_MODE_DMA)
- transfer_len = xqspi->dma_rx_bytes;
- else
- transfer_len = transfer->len;
-
- xqspi->genfifoentry = genfifoentry;
- if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
- genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= transfer_len;
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
- } else {
- int tempcount = transfer_len;
- u32 exponent = 8; /* 2^8 = 256 */
- u8 imm_data = tempcount & 0xFF;
-
- tempcount &= ~(tempcount & 0xFF);
- /* Immediate entry */
- if (tempcount != 0) {
- /* Exponent entries */
- genfifoentry |= GQSPI_GENFIFO_EXP;
- while (tempcount != 0) {
- if (tempcount & GQSPI_GENFIFO_EXP_START) {
- genfifoentry &=
- ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= exponent;
- zynqmp_gqspi_write(xqspi,
- GQSPI_GEN_FIFO_OFST,
- genfifoentry);
- }
- tempcount = tempcount >> 1;
- exponent++;
- }
- }
- if (imm_data != 0) {
- genfifoentry &= ~GQSPI_GENFIFO_EXP;
- genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= (u8) (imm_data & 0xFF);
- zynqmp_gqspi_write(xqspi,
- GQSPI_GEN_FIFO_OFST, genfifoentry);
- }
- }
-
- if ((xqspi->mode == GQSPI_MODE_IO) &&
- (xqspi->rxbuf != NULL)) {
- /* Dummy generic FIFO entry */
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
- }
-
- /* Since we are using manual mode */
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
- zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
- GQSPI_CFG_START_GEN_FIFO_MASK);
-
- if (xqspi->txbuf != NULL)
- /* Enable interrupts for TX */
- zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
- GQSPI_IER_TXEMPTY_MASK |
- GQSPI_IER_GENFIFOEMPTY_MASK |
- GQSPI_IER_TXNOT_FULL_MASK);
-
- if (xqspi->rxbuf != NULL) {
- /* Enable interrupts for RX */
- if (xqspi->mode == GQSPI_MODE_DMA) {
- /* Enable DMA interrupts */
- zynqmp_gqspi_write(xqspi,
- GQSPI_QSPIDMA_DST_I_EN_OFST,
- GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
- } else {
- zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
- GQSPI_IER_GENFIFOEMPTY_MASK |
- GQSPI_IER_RXNEMPTY_MASK |
- GQSPI_IER_RXEMPTY_MASK);
- }
- }
-
- return transfer->len;
+ zynqmp_qspi_fillgenfifo(xqspi, rx_nbits, genfifoentry);
+ zynqmp_qspi_setuprxdma(xqspi);
}
/**
- * zynqmp_qspi_suspend: Suspend method for the QSPI driver
+ * zynqmp_qspi_suspend - Suspend method for the QSPI driver
* @dev: Address of the platform_device structure
*
* This function stops the QSPI driver queue and disables the QSPI controller
@@ -900,17 +835,18 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
*/
static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
- spi_master_suspend(master);
+ spi_controller_suspend(ctlr);
- zynqmp_unprepare_transfer_hardware(master);
+ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
return 0;
}
/**
- * zynqmp_qspi_resume: Resume method for the QSPI driver
+ * zynqmp_qspi_resume - Resume method for the QSPI driver
* @dev: Address of the platform_device structure
*
* The function starts the QSPI driver queue and initializes the QSPI
@@ -920,8 +856,8 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
*/
static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
int ret = 0;
ret = clk_enable(xqspi->pclk);
@@ -937,7 +873,7 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
return ret;
}
- spi_master_resume(master);
+ spi_controller_resume(ctlr);
clk_disable(xqspi->refclk);
clk_disable(xqspi->pclk);
@@ -954,8 +890,7 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
*/
static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+ struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
clk_disable(xqspi->refclk);
clk_disable(xqspi->pclk);
@@ -973,8 +908,7 @@ static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
*/
static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+ struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
int ret;
ret = clk_enable(xqspi->pclk);
@@ -993,14 +927,179 @@ static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
return 0;
}
+/**
+ * zynqmp_qspi_exec_op() - Initiates the QSPI transfer
+ * @mem: The SPI memory
+ * @op: The memory operation to execute
+ *
+ * Executes a memory operation.
+ *
+ * This function first selects the chip and starts the memory operation.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+static int zynqmp_qspi_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct zynqmp_qspi *xqspi = spi_controller_get_devdata
+ (mem->spi->master);
+ int err = 0, i;
+ u8 *tmpbuf;
+ u32 genfifoentry = 0;
+
+ dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth);
+
+ zynqmp_qspi_config_op(xqspi, mem->spi);
+ zynqmp_qspi_chipselect(mem->spi, false);
+ genfifoentry |= xqspi->genfifocs;
+ genfifoentry |= xqspi->genfifobus;
+
+ if (op->cmd.opcode) {
+ tmpbuf = kzalloc(op->cmd.nbytes, GFP_KERNEL | GFP_DMA);
+ if (!tmpbuf)
+ return -ENOMEM;
+ tmpbuf[0] = op->cmd.opcode;
+ reinit_completion(&xqspi->data_completion);
+ xqspi->txbuf = tmpbuf;
+ xqspi->rxbuf = NULL;
+ xqspi->bytes_to_transfer = op->cmd.nbytes;
+ xqspi->bytes_to_receive = 0;
+ zynqmp_qspi_write_op(xqspi, op->cmd.buswidth, genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_TXNOT_FULL_MASK);
+ if (!wait_for_completion_interruptible_timeout
+ (&xqspi->data_completion, msecs_to_jiffies(1000))) {
+ err = -ETIMEDOUT;
+ kfree(tmpbuf);
+ goto return_err;
+ }
+ kfree(tmpbuf);
+ }
+
+ if (op->addr.nbytes) {
+ for (i = 0; i < op->addr.nbytes; i++) {
+ *(((u8 *)xqspi->txbuf) + i) = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+ }
+
+ reinit_completion(&xqspi->data_completion);
+ xqspi->rxbuf = NULL;
+ xqspi->bytes_to_transfer = op->addr.nbytes;
+ xqspi->bytes_to_receive = 0;
+ zynqmp_qspi_write_op(xqspi, op->addr.buswidth, genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read(xqspi,
+ GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_TXEMPTY_MASK |
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_TXNOT_FULL_MASK);
+ if (!wait_for_completion_interruptible_timeout
+ (&xqspi->data_completion, msecs_to_jiffies(1000))) {
+ err = -ETIMEDOUT;
+ goto return_err;
+ }
+ }
+
+ if (op->dummy.nbytes) {
+ tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL | GFP_DMA);
+ if (!tmpbuf)
+ return -ENOMEM;
+ memset(tmpbuf, 0xff, op->dummy.nbytes);
+ reinit_completion(&xqspi->data_completion);
+ xqspi->txbuf = tmpbuf;
+ xqspi->rxbuf = NULL;
+ xqspi->bytes_to_transfer = op->dummy.nbytes;
+ xqspi->bytes_to_receive = 0;
+ zynqmp_qspi_write_op(xqspi, op->dummy.buswidth,
+ genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_TXEMPTY_MASK |
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_TXNOT_FULL_MASK);
+ if (!wait_for_completion_interruptible_timeout
+ (&xqspi->data_completion, msecs_to_jiffies(1000))) {
+ err = -ETIMEDOUT;
+ kfree(tmpbuf);
+ goto return_err;
+ }
+
+ kfree(tmpbuf);
+ }
+
+ if (op->data.nbytes) {
+ reinit_completion(&xqspi->data_completion);
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ xqspi->txbuf = (u8 *)op->data.buf.out;
+ xqspi->rxbuf = NULL;
+ xqspi->bytes_to_transfer = op->data.nbytes;
+ xqspi->bytes_to_receive = 0;
+ zynqmp_qspi_write_op(xqspi, op->data.buswidth,
+ genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read
+ (xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_TXEMPTY_MASK |
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_TXNOT_FULL_MASK);
+ } else {
+ xqspi->txbuf = NULL;
+ xqspi->rxbuf = (u8 *)op->data.buf.in;
+ xqspi->bytes_to_receive = op->data.nbytes;
+ xqspi->bytes_to_transfer = 0;
+ zynqmp_qspi_read_op(xqspi, op->data.buswidth,
+ genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
+ zynqmp_gqspi_read
+ (xqspi, GQSPI_CONFIG_OFST) |
+ GQSPI_CFG_START_GEN_FIFO_MASK);
+ if (xqspi->mode == GQSPI_MODE_DMA) {
+ zynqmp_gqspi_write
+ (xqspi, GQSPI_QSPIDMA_DST_I_EN_OFST,
+ GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
+ } else {
+ zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
+ GQSPI_IER_GENFIFOEMPTY_MASK |
+ GQSPI_IER_RXNEMPTY_MASK |
+ GQSPI_IER_RXEMPTY_MASK);
+ }
+ }
+ if (!wait_for_completion_interruptible_timeout
+ (&xqspi->data_completion, msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ }
+
+return_err:
+
+ zynqmp_qspi_chipselect(mem->spi, true);
+
+ return err;
+}
+
static const struct dev_pm_ops zynqmp_qspi_dev_pm_ops = {
SET_RUNTIME_PM_OPS(zynqmp_runtime_suspend,
zynqmp_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(zynqmp_qspi_suspend, zynqmp_qspi_resume)
};
+static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
+ .exec_op = zynqmp_qspi_exec_op,
+};
+
/**
- * zynqmp_qspi_probe: Probe method for the QSPI driver
+ * zynqmp_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function initializes the driver data structures and the hardware.
@@ -1010,17 +1109,18 @@ static const struct dev_pm_ops zynqmp_qspi_dev_pm_ops = {
static int zynqmp_qspi_probe(struct platform_device *pdev)
{
int ret = 0;
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct zynqmp_qspi *xqspi;
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
- master = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
- if (!master)
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
+ if (!ctlr)
return -ENOMEM;
- xqspi = spi_master_get_devdata(master);
- master->dev.of_node = pdev->dev.of_node;
- platform_set_drvdata(pdev, master);
+ xqspi = spi_controller_get_devdata(ctlr);
+ xqspi->dev = dev;
+ platform_set_drvdata(pdev, xqspi);
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
@@ -1028,7 +1128,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
goto remove_master;
}
- xqspi->dev = dev;
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xqspi->pclk)) {
dev_err(dev, "pclk clock not found.\n");
@@ -1036,11 +1135,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
goto remove_master;
}
- ret = clk_prepare_enable(xqspi->pclk);
- if (ret) {
- dev_err(dev, "Unable to enable APB clock.\n");
- goto remove_master;
- }
+ init_completion(&xqspi->data_completion);
xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xqspi->refclk)) {
@@ -1049,6 +1144,12 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
goto clk_dis_pclk;
}
+ ret = clk_prepare_enable(xqspi->pclk);
+ if (ret) {
+ dev_err(dev, "Unable to enable APB clock.\n");
+ goto remove_master;
+ }
+
ret = clk_prepare_enable(xqspi->refclk);
if (ret) {
dev_err(dev, "Unable to enable device clock.\n");
@@ -1070,32 +1171,28 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynqmp_qspi_irq,
- 0, pdev->name, master);
+ 0, pdev->name, xqspi);
if (ret != 0) {
ret = -ENXIO;
dev_err(dev, "request_irq failed\n");
goto clk_dis_all;
}
- master->num_chipselect = GQSPI_DEFAULT_NUM_CS;
-
- master->setup = zynqmp_qspi_setup;
- master->set_cs = zynqmp_qspi_chipselect;
- master->transfer_one = zynqmp_qspi_start_transfer;
- master->prepare_transfer_hardware = zynqmp_prepare_transfer_hardware;
- master->unprepare_transfer_hardware =
- zynqmp_unprepare_transfer_hardware;
- master->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+ ctlr->mem_ops = &zynqmp_qspi_mem_ops;
+ ctlr->setup = zynqmp_qspi_setup_op;
+ ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->dev.of_node = np;
- if (master->dev.parent == NULL)
- master->dev.parent = &master->dev;
-
- ret = spi_register_master(master);
- if (ret)
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_controller failed\n");
goto clk_dis_all;
+ }
return 0;
@@ -1106,13 +1203,13 @@ clk_dis_all:
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
remove_master:
- spi_master_put(master);
+ spi_controller_put(ctlr);
return ret;
}
/**
- * zynqmp_qspi_remove: Remove method for the QSPI driver
+ * zynqmp_qspi_remove - Remove method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function is called if a device is physically removed from the system or
@@ -1123,8 +1220,7 @@ remove_master:
*/
static int zynqmp_qspi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+ struct zynqmp_qspi *xqspi = platform_get_drvdata(pdev);
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
clk_disable_unprepare(xqspi->refclk);
@@ -1132,8 +1228,6 @@ static int zynqmp_qspi_remove(struct platform_device *pdev)
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- spi_unregister_master(master);
-
return 0;
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 455e99c4958e..859910ec8d9f 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -146,7 +146,7 @@ static ssize_t
spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
{
struct spidev_data *spidev;
- ssize_t status = 0;
+ ssize_t status;
/* chipselect only toggles at start or end of operation */
if (count > bufsiz)
@@ -176,7 +176,7 @@ spidev_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct spidev_data *spidev;
- ssize_t status = 0;
+ ssize_t status;
unsigned long missing;
/* chipselect only toggles at start or end of operation */
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 7c3ae52f2b15..dac54041ad8d 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -1164,17 +1164,12 @@ void ssb_pci_exit(struct ssb_bus *bus)
int ssb_pci_init(struct ssb_bus *bus)
{
struct pci_dev *pdev;
- int err;
if (bus->bustype != SSB_BUSTYPE_PCI)
return 0;
pdev = bus->host_pci;
mutex_init(&bus->sprom_mutex);
- err = device_create_file(&pdev->dev, &dev_attr_ssb_sprom);
- if (err)
- goto out;
-out:
- return err;
+ return device_create_file(&pdev->dev, &dev_attr_ssb_sprom);
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index e6c831c6cccc..2d0310448eba 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -116,4 +116,6 @@ source "drivers/staging/qlge/Kconfig"
source "drivers/staging/wfx/Kconfig"
+source "drivers/staging/hikey9xx/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a3b1fd0622f9..757a892ab5b9 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -48,3 +48,4 @@ obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_WFX) += wfx/
+obj-y += hikey9xx/
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 3c9f09506ffa..e1fe03ceb7f1 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -205,8 +205,8 @@ static int ion_dma_buf_attach(struct dma_buf *dmabuf,
return 0;
}
-static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attachment)
+static void ion_dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
{
struct ion_dma_buf_attachment *a = attachment->priv;
struct ion_buffer *buffer = dmabuf->priv;
@@ -331,7 +331,7 @@ static const struct dma_buf_ops dma_buf_ops = {
.mmap = ion_mmap,
.release = ion_dma_buf_release,
.attach = ion_dma_buf_attach,
- .detach = ion_dma_buf_detatch,
+ .detach = ion_dma_buf_detach,
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
.end_cpu_access = ion_dma_buf_end_cpu_access,
};
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index 09a940066c0e..b5d00a006dbb 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -680,7 +680,7 @@ struct comedi_rangeinfo {
* value of 1 volt.
*
* The only defined flag value is %RF_EXTERNAL (%0x100), indicating that the
- * the range needs to be multiplied by an external reference.
+ * range needs to be multiplied by an external reference.
*/
struct comedi_krange {
int min;
@@ -970,7 +970,7 @@ enum i8254_mode {
* major reasons exist why this caused major confusion for users:
* 1) The register values are _NOT_ in user documentation, but rather in
* arcane locations, such as a few register programming manuals that are
- * increasingly hard to find and the NI MHDDK (comments in in example code).
+ * increasingly hard to find and the NI MHDDK (comments in example code).
* There is no one place to find the various valid values of the registers.
* 2) The register values are _NOT_ completely consistent. There is no way to
* gain any sense of intuition of which values, or even enums one should use
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 0dff1ac057cd..0e1b95ef9a4d 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -627,7 +627,7 @@ extern const struct comedi_lrange range_unknown;
* @range: Array of &struct comedi_krange, one for each range.
*
* Each element of @range[] describes the minimum and maximum physical range
- * range and the type of units. Typically, the type of unit is %UNIT_volt
+ * and the type of units. Typically, the type of unit is %UNIT_volt
* (i.e. volts) and the minimum and maximum are in millionths of a volt.
* There may also be a flag that indicates the minimum and maximum are merely
* scale factors for an unknown, external reference.
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index fadefcb5c237..06fc7ed96200 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -544,7 +544,7 @@ static int apci1564_timer_insn_write(struct comedi_device *dev,
{
struct apci1564_private *devpriv = dev->private;
- /* just write the last last to the reload register */
+ /* just write the last to the reload register */
if (insn->n) {
unsigned int val = data[insn->n - 1];
@@ -628,7 +628,7 @@ static int apci1564_counter_insn_write(struct comedi_device *dev,
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
- /* just write the last last to the reload register */
+ /* just write the last to the reload register */
if (insn->n) {
unsigned int val = data[insn->n - 1];
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 48ec2ee953dc..d740c4782775 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -1342,6 +1342,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
if (dev->irq && board->has_ao_fifo) {
dev->write_subdev = s;
s->subdev_flags |= SDF_CMD_WRITE;
+ s->len_chanlist = s->n_chan;
s->do_cmdtest = cb_pcidas_ao_cmdtest;
s->do_cmd = cb_pcidas_ao_cmd;
s->cancel = cb_pcidas_ao_cancel;
diff --git a/drivers/staging/comedi/drivers/comedi_8255.c b/drivers/staging/comedi/drivers/comedi_8255.c
index 3298725b9ba5..b7ca465933ee 100644
--- a/drivers/staging/comedi/drivers/comedi_8255.c
+++ b/drivers/staging/comedi/drivers/comedi_8255.c
@@ -248,7 +248,7 @@ EXPORT_SYMBOL_GPL(subdev_8255_mm_init);
* subdev_8255_regbase - get offset of 8255 registers or call-back context
* @s: comedi subdevice
*
- * Returns the 'regbase' parameter that was previously passed to to
+ * Returns the 'regbase' parameter that was previously passed to
* subdev_8255_init() or subdev_8255_mm_init() to set up the subdevice.
* Only valid if the subdevice was set up successfully.
*/
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 2a9f7e9821a7..ab6d9e8269f3 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -286,7 +286,7 @@ int ni_tio_cmdtest(struct comedi_device *dev,
* This should be done, but we don't yet know the actual
* register values. These should be tested and then documented
* in the ni_route_values/ni_*.csv files, with indication of
- * who/when/which/how these these were tested.
+ * who/when/which/how these were tested.
* When at least a e/m/660x series have been tested, this code
* should be uncommented:
*
diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c
index 58b3d07ae907..64eb649c9813 100644
--- a/drivers/staging/comedi/drivers/pcl726.c
+++ b/drivers/staging/comedi/drivers/pcl726.c
@@ -389,7 +389,7 @@ static int pcl726_attach(struct comedi_device *dev,
}
if (dev->irq) {
- /* Digial Input subdevice - Interrupt support */
+ /* Digital Input subdevice - Interrupt support */
s = &dev->subdevices[subdev++];
dev->read_subdev = s;
s->type = COMEDI_SUBD_DI;
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index 7e1fc6ffb48c..b299d648a0eb 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -48,7 +48,7 @@
*
* In the 48-channel version:
*
- * On subdev 0, the first 24 channels channels are edge-detect channels.
+ * On subdev 0, the first 24 channels are edge-detect channels.
*
* In the 96-channel board you have the following channels that can do edge
* detection:
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 1b1efa4d31f6..fe4408ebf6b3 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -164,7 +164,7 @@ static int daqp_clear_events(struct comedi_device *dev, int loops)
/*
* Reset any pending interrupts (my card has a tendency to require
- * require multiple reads on the status register to achieve this).
+ * multiple reads on the status register to achieve this).
*/
while (--loops) {
status = inb(dev->iobase + DAQP_STATUS_REG);
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 65dc6c51037e..7956abcbae22 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -667,6 +667,9 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
if (!devpriv->ep_rx || !devpriv->ep_tx)
return -ENODEV;
+ if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/staging/emxx_udc/Kconfig b/drivers/staging/emxx_udc/Kconfig
index ad1478c53e9e..e7a95b3b6a2f 100644
--- a/drivers/staging/emxx_udc/Kconfig
+++ b/drivers/staging/emxx_udc/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config USB_EMXX
tristate "EMXX USB Function Device Controller"
- depends on USB_GADGET && (ARCH_RENESAS || (ARM && COMPILE_TEST))
+ depends on USB_GADGET && (ARCH_RENESAS || COMPILE_TEST)
help
The Emma Mobile series of SoCs from Renesas Electronics and
former NEC Electronics include USB Function hardware.
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 03929b9d3a8b..a30b4f5b199b 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -793,7 +793,7 @@ static int _nbu2ss_out_dma(struct nbu2ss_udc *udc, struct nbu2ss_req *req,
u32 dmacnt;
u32 burst = 1;
u32 data;
- int result = -EINVAL;
+ int result;
struct fc_regs __iomem *preg = udc->p_regs;
if (req->dma_flag)
@@ -1288,8 +1288,6 @@ static void _nbu2ss_set_endpoint_stall(struct nbu2ss_udc *udc,
_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
} else {
- /* Clear STALL */
- ep->stalled = false;
if (ep_adrs & USB_DIR_IN) {
_nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL
, EPN_ISTL);
@@ -1304,6 +1302,7 @@ static void _nbu2ss_set_endpoint_stall(struct nbu2ss_udc *udc,
, data);
}
+ /* Clear STALL */
ep->stalled = false;
if (ep->halted) {
ep->halted = false;
@@ -2164,8 +2163,8 @@ static int _nbu2ss_enable_controller(struct nbu2ss_udc *udc)
_nbu2ss_writel(&udc->p_regs->AHBSCTR, WAIT_MODE);
- _nbu2ss_writel(&udc->p_regs->AHBMCTR,
- HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE);
+ _nbu2ss_writel(&udc->p_regs->AHBMCTR,
+ HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE);
while (!(_nbu2ss_readl(&udc->p_regs->EPCTR) & PLL_LOCK)) {
waitcnt++;
@@ -2176,7 +2175,7 @@ static int _nbu2ss_enable_controller(struct nbu2ss_udc *udc)
}
}
- _nbu2ss_bitset(&udc->p_regs->UTMI_CHARACTER_1, USB_SQUSET);
+ _nbu2ss_bitset(&udc->p_regs->UTMI_CHARACTER_1, USB_SQUSET);
_nbu2ss_bitset(&udc->p_regs->USB_CONTROL, (INT_SEL | SOF_RCV));
@@ -2593,7 +2592,7 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
if (req->unaligned) {
if (!ep->virt_buf)
- ep->virt_buf = dma_alloc_coherent(NULL, PAGE_SIZE,
+ ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
&ep->phys_buf,
GFP_ATOMIC | GFP_DMA);
if (ep->epnum > 0) {
@@ -3073,8 +3072,8 @@ static int nbu2ss_drv_contest_init(struct platform_device *pdev,
*/
static int nbu2ss_drv_probe(struct platform_device *pdev)
{
- int status = -ENODEV;
- struct nbu2ss_udc *udc;
+ int status;
+ struct nbu2ss_udc *udc;
int irq;
void __iomem *mmio_base;
@@ -3148,7 +3147,7 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
for (i = 0; i < NUM_ENDPOINTS; i++) {
ep = &udc->ep[i];
if (ep->virt_buf)
- dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf,
+ dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf,
ep->phys_buf);
}
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index 9c2671cb32f7..bca614d69aca 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -9,11 +9,6 @@
#define _LINUX_EMXX_H
/*---------------------------------------------------------------------------*/
-/*----------------- Default undef */
-#if 0
-#define DEBUG
-#define UDC_DEBUG_DUMP
-#endif
/*----------------- Default define */
#define USE_DMA 1
@@ -52,197 +47,163 @@ int vbus_irq;
#define U2F_ENABLE 1
#define U2F_DISABLE 0
-/*------- BIT */
-#define BIT00 0x00000001
-#define BIT01 0x00000002
-#define BIT02 0x00000004
-#define BIT03 0x00000008
-#define BIT04 0x00000010
-#define BIT05 0x00000020
-#define BIT06 0x00000040
-#define BIT07 0x00000080
-#define BIT08 0x00000100
-#define BIT09 0x00000200
-#define BIT10 0x00000400
-#define BIT11 0x00000800
-#define BIT12 0x00001000
-#define BIT13 0x00002000
-#define BIT14 0x00004000
-#define BIT15 0x00008000
-#define BIT16 0x00010000
-#define BIT17 0x00020000
-#define BIT18 0x00040000
-#define BIT19 0x00080000
-#define BIT20 0x00100000
-#define BIT21 0x00200000
-#define BIT22 0x00400000
-#define BIT23 0x00800000
-#define BIT24 0x01000000
-#define BIT25 0x02000000
-#define BIT26 0x04000000
-#define BIT27 0x08000000
-#define BIT28 0x10000000
-#define BIT29 0x20000000
-#define BIT30 0x40000000
-#define BIT31 0x80000000
-
-#define TEST_FORCE_ENABLE (BIT18 + BIT16)
-
-#define INT_SEL BIT10
-#define CONSTFS BIT09
-#define SOF_RCV BIT08
-#define RSUM_IN BIT07
-#define SUSPEND BIT06
-#define CONF BIT05
-#define DEFAULT BIT04
-#define CONNECTB BIT03
-#define PUE2 BIT02
+#define TEST_FORCE_ENABLE (BIT(18) | BIT(16))
+
+#define INT_SEL BIT(10)
+#define CONSTFS BIT(9)
+#define SOF_RCV BIT(8)
+#define RSUM_IN BIT(7)
+#define SUSPEND BIT(6)
+#define CONF BIT(5)
+#define DEFAULT BIT(4)
+#define CONNECTB BIT(3)
+#define PUE2 BIT(2)
#define MAX_TEST_MODE_NUM 0x05
#define TEST_MODE_SHIFT 16
/*------- (0x0004) USB Status Register */
-#define SPEED_MODE BIT06
-#define HIGH_SPEED BIT06
+#define SPEED_MODE BIT(6)
+#define HIGH_SPEED BIT(6)
-#define CONF BIT05
-#define DEFAULT BIT04
-#define USB_RST BIT03
-#define SPND_OUT BIT02
-#define RSUM_OUT BIT01
+#define CONF BIT(5)
+#define DEFAULT BIT(4)
+#define USB_RST BIT(3)
+#define SPND_OUT BIT(2)
+#define RSUM_OUT BIT(1)
/*------- (0x0008) USB Address Register */
#define USB_ADDR 0x007F0000
-#define SOF_STATUS BIT15
-#define UFRAME (BIT14 + BIT13 + BIT12)
+#define SOF_STATUS BIT(15)
+#define UFRAME (BIT(14) | BIT(13) | BIT(12))
#define FRAME 0x000007FF
#define USB_ADRS_SHIFT 16
/*------- (0x000C) UTMI Characteristic 1 Register */
-#define SQUSET (BIT07 + BIT06 + BIT05 + BIT04)
+#define SQUSET (BIT(7) | BIT(6) | BIT(5) | BIT(4))
-#define USB_SQUSET (BIT06 + BIT05 + BIT04)
+#define USB_SQUSET (BIT(6) | BIT(5) | BIT(4))
/*------- (0x0010) TEST Control Register */
-#define FORCEHS BIT02
-#define CS_TESTMODEEN BIT01
-#define LOOPBACK BIT00
+#define FORCEHS BIT(2)
+#define CS_TESTMODEEN BIT(1)
+#define LOOPBACK BIT(0)
/*------- (0x0018) Setup Data 0 Register */
/*------- (0x001C) Setup Data 1 Register */
/*------- (0x0020) USB Interrupt Status Register */
#define EPN_INT 0x00FFFF00
-#define EP15_INT BIT23
-#define EP14_INT BIT22
-#define EP13_INT BIT21
-#define EP12_INT BIT20
-#define EP11_INT BIT19
-#define EP10_INT BIT18
-#define EP9_INT BIT17
-#define EP8_INT BIT16
-#define EP7_INT BIT15
-#define EP6_INT BIT14
-#define EP5_INT BIT13
-#define EP4_INT BIT12
-#define EP3_INT BIT11
-#define EP2_INT BIT10
-#define EP1_INT BIT09
-#define EP0_INT BIT08
-#define SPEED_MODE_INT BIT06
-#define SOF_ERROR_INT BIT05
-#define SOF_INT BIT04
-#define USB_RST_INT BIT03
-#define SPND_INT BIT02
-#define RSUM_INT BIT01
+#define EP15_INT BIT(23)
+#define EP14_INT BIT(22)
+#define EP13_INT BIT(21)
+#define EP12_INT BIT(20)
+#define EP11_INT BIT(19)
+#define EP10_INT BIT(18)
+#define EP9_INT BIT(17)
+#define EP8_INT BIT(16)
+#define EP7_INT BIT(15)
+#define EP6_INT BIT(14)
+#define EP5_INT BIT(13)
+#define EP4_INT BIT(12)
+#define EP3_INT BIT(11)
+#define EP2_INT BIT(10)
+#define EP1_INT BIT(9)
+#define EP0_INT BIT(8)
+#define SPEED_MODE_INT BIT(6)
+#define SOF_ERROR_INT BIT(5)
+#define SOF_INT BIT(4)
+#define USB_RST_INT BIT(3)
+#define SPND_INT BIT(2)
+#define RSUM_INT BIT(1)
#define USB_INT_STA_RW 0x7E
/*------- (0x0024) USB Interrupt Enable Register */
#define EP15_0_EN 0x00FFFF00
-#define EP15_EN BIT23
-#define EP14_EN BIT22
-#define EP13_EN BIT21
-#define EP12_EN BIT20
-#define EP11_EN BIT19
-#define EP10_EN BIT18
-#define EP9_EN BIT17
-#define EP8_EN BIT16
-#define EP7_EN BIT15
-#define EP6_EN BIT14
-#define EP5_EN BIT13
-#define EP4_EN BIT12
-#define EP3_EN BIT11
-#define EP2_EN BIT10
-#define EP1_EN BIT09
-#define EP0_EN BIT08
-#define SPEED_MODE_EN BIT06
-#define SOF_ERROR_EN BIT05
-#define SOF_EN BIT04
-#define USB_RST_EN BIT03
-#define SPND_EN BIT02
-#define RSUM_EN BIT01
+#define EP15_EN BIT(23)
+#define EP14_EN BIT(22)
+#define EP13_EN BIT(21)
+#define EP12_EN BIT(20)
+#define EP11_EN BIT(19)
+#define EP10_EN BIT(18)
+#define EP9_EN BIT(17)
+#define EP8_EN BIT(16)
+#define EP7_EN BIT(15)
+#define EP6_EN BIT(14)
+#define EP5_EN BIT(13)
+#define EP4_EN BIT(12)
+#define EP3_EN BIT(11)
+#define EP2_EN BIT(10)
+#define EP1_EN BIT(9)
+#define EP0_EN BIT(8)
+#define SPEED_MODE_EN BIT(6)
+#define SOF_ERROR_EN BIT(5)
+#define SOF_EN BIT(4)
+#define USB_RST_EN BIT(3)
+#define SPND_EN BIT(2)
+#define RSUM_EN BIT(1)
#define USB_INT_EN_BIT \
(EP0_EN | SPEED_MODE_EN | USB_RST_EN | SPND_EN | RSUM_EN)
/*------- (0x0028) EP0 Control Register */
-#define EP0_STGSEL BIT18
-#define EP0_OVERSEL BIT17
-#define EP0_AUTO BIT16
-#define EP0_PIDCLR BIT09
-#define EP0_BCLR BIT08
-#define EP0_DEND BIT07
-#define EP0_DW (BIT06 + BIT05)
+#define EP0_STGSEL BIT(18)
+#define EP0_OVERSEL BIT(17)
+#define EP0_AUTO BIT(16)
+#define EP0_PIDCLR BIT(9)
+#define EP0_BCLR BIT(8)
+#define EP0_DEND BIT(7)
+#define EP0_DW (BIT(6) | BIT(5))
#define EP0_DW4 0
-#define EP0_DW3 (BIT06 + BIT05)
-#define EP0_DW2 BIT06
-#define EP0_DW1 BIT05
+#define EP0_DW3 (BIT(6) | BIT(5))
+#define EP0_DW2 BIT(6)
+#define EP0_DW1 BIT(5)
-#define EP0_INAK_EN BIT04
-#define EP0_PERR_NAK_CLR BIT03
-#define EP0_STL BIT02
-#define EP0_INAK BIT01
-#define EP0_ONAK BIT00
+#define EP0_INAK_EN BIT(4)
+#define EP0_PERR_NAK_CLR BIT(3)
+#define EP0_STL BIT(2)
+#define EP0_INAK BIT(1)
+#define EP0_ONAK BIT(0)
/*------- (0x002C) EP0 Status Register */
-#define EP0_PID BIT18
-#define EP0_PERR_NAK BIT17
-#define EP0_PERR_NAK_INT BIT16
-#define EP0_OUT_NAK_INT BIT15
-#define EP0_OUT_NULL BIT14
-#define EP0_OUT_FULL BIT13
-#define EP0_OUT_EMPTY BIT12
-#define EP0_IN_NAK_INT BIT11
-#define EP0_IN_DATA BIT10
-#define EP0_IN_FULL BIT09
-#define EP0_IN_EMPTY BIT08
-#define EP0_OUT_NULL_INT BIT07
-#define EP0_OUT_OR_INT BIT06
-#define EP0_OUT_INT BIT05
-#define EP0_IN_INT BIT04
-#define EP0_STALL_INT BIT03
-#define STG_END_INT BIT02
-#define STG_START_INT BIT01
-#define SETUP_INT BIT00
-
-#define EP0_STATUS_RW_BIT (BIT16 | BIT15 | BIT11 | 0xFF)
+#define EP0_PID BIT(18)
+#define EP0_PERR_NAK BIT(17)
+#define EP0_PERR_NAK_INT BIT(16)
+#define EP0_OUT_NAK_INT BIT(15)
+#define EP0_OUT_NULL BIT(14)
+#define EP0_OUT_FULL BIT(13)
+#define EP0_OUT_EMPTY BIT(12)
+#define EP0_IN_NAK_INT BIT(11)
+#define EP0_IN_DATA BIT(10)
+#define EP0_IN_FULL BIT(9)
+#define EP0_IN_EMPTY BIT(8)
+#define EP0_OUT_NULL_INT BIT(7)
+#define EP0_OUT_OR_INT BIT(6)
+#define EP0_OUT_INT BIT(5)
+#define EP0_IN_INT BIT(4)
+#define EP0_STALL_INT BIT(3)
+#define STG_END_INT BIT(2)
+#define STG_START_INT BIT(1)
+#define SETUP_INT BIT(0)
+
+#define EP0_STATUS_RW_BIT (BIT(16) | BIT(15) | BIT(11) | 0xFF)
/*------- (0x0030) EP0 Interrupt Enable Register */
-#define EP0_PERR_NAK_EN BIT16
-#define EP0_OUT_NAK_EN BIT15
+#define EP0_PERR_NAK_EN BIT(16)
+#define EP0_OUT_NAK_EN BIT(15)
-#define EP0_IN_NAK_EN BIT11
+#define EP0_IN_NAK_EN BIT(11)
-#define EP0_OUT_NULL_EN BIT07
-#define EP0_OUT_OR_EN BIT06
-#define EP0_OUT_EN BIT05
-#define EP0_IN_EN BIT04
-#define EP0_STALL_EN BIT03
-#define STG_END_EN BIT02
-#define STG_START_EN BIT01
-#define SETUP_EN BIT00
+#define EP0_OUT_NULL_EN BIT(7)
+#define EP0_OUT_OR_EN BIT(6)
+#define EP0_OUT_EN BIT(5)
+#define EP0_IN_EN BIT(4)
+#define EP0_STALL_EN BIT(3)
+#define STG_END_EN BIT(2)
+#define STG_START_EN BIT(1)
+#define SETUP_EN BIT(0)
#define EP0_INT_EN_BIT \
(EP0_OUT_OR_EN | EP0_OUT_EN | EP0_IN_EN | STG_END_EN | SETUP_EN)
@@ -254,90 +215,90 @@ int vbus_irq;
/*------- (0x003C) EP0 Write Register */
/*------- (0x0040:) EPN Control Register */
-#define EPN_EN BIT31
-#define EPN_BUF_TYPE BIT30
-#define EPN_BUF_SINGLE BIT30
+#define EPN_EN BIT(31)
+#define EPN_BUF_TYPE BIT(30)
+#define EPN_BUF_SINGLE BIT(30)
-#define EPN_DIR0 BIT26
-#define EPN_MODE (BIT25 + BIT24)
+#define EPN_DIR0 BIT(26)
+#define EPN_MODE (BIT(25) | BIT(24))
#define EPN_BULK 0
-#define EPN_INTERRUPT BIT24
-#define EPN_ISO BIT25
-
-#define EPN_OVERSEL BIT17
-#define EPN_AUTO BIT16
-
-#define EPN_IPIDCLR BIT11
-#define EPN_OPIDCLR BIT10
-#define EPN_BCLR BIT09
-#define EPN_CBCLR BIT08
-#define EPN_DEND BIT07
-#define EPN_DW (BIT06 + BIT05)
+#define EPN_INTERRUPT BIT(24)
+#define EPN_ISO BIT(25)
+
+#define EPN_OVERSEL BIT(17)
+#define EPN_AUTO BIT(16)
+
+#define EPN_IPIDCLR BIT(11)
+#define EPN_OPIDCLR BIT(10)
+#define EPN_BCLR BIT(9)
+#define EPN_CBCLR BIT(8)
+#define EPN_DEND BIT(7)
+#define EPN_DW (BIT(6) | BIT(5))
#define EPN_DW4 0
-#define EPN_DW3 (BIT06 + BIT05)
-#define EPN_DW2 BIT06
-#define EPN_DW1 BIT05
+#define EPN_DW3 (BIT(6) | BIT(5))
+#define EPN_DW2 BIT(6)
+#define EPN_DW1 BIT(5)
-#define EPN_OSTL_EN BIT04
-#define EPN_ISTL BIT03
-#define EPN_OSTL BIT02
+#define EPN_OSTL_EN BIT(4)
+#define EPN_ISTL BIT(3)
+#define EPN_OSTL BIT(2)
-#define EPN_ONAK BIT00
+#define EPN_ONAK BIT(0)
/*------- (0x0044:) EPN Status Register */
-#define EPN_ISO_PIDERR BIT29 /* R */
-#define EPN_OPID BIT28 /* R */
-#define EPN_OUT_NOTKN BIT27 /* R */
-#define EPN_ISO_OR BIT26 /* R */
-
-#define EPN_ISO_CRC BIT24 /* R */
-#define EPN_OUT_END_INT BIT23 /* RW */
-#define EPN_OUT_OR_INT BIT22 /* RW */
-#define EPN_OUT_NAK_ERR_INT BIT21 /* RW */
-#define EPN_OUT_STALL_INT BIT20 /* RW */
-#define EPN_OUT_INT BIT19 /* RW */
-#define EPN_OUT_NULL_INT BIT18 /* RW */
-#define EPN_OUT_FULL BIT17 /* R */
-#define EPN_OUT_EMPTY BIT16 /* R */
-
-#define EPN_IPID BIT10 /* R */
-#define EPN_IN_NOTKN BIT09 /* R */
-#define EPN_ISO_UR BIT08 /* R */
-#define EPN_IN_END_INT BIT07 /* RW */
-
-#define EPN_IN_NAK_ERR_INT BIT05 /* RW */
-#define EPN_IN_STALL_INT BIT04 /* RW */
-#define EPN_IN_INT BIT03 /* RW */
-#define EPN_IN_DATA BIT02 /* R */
-#define EPN_IN_FULL BIT01 /* R */
-#define EPN_IN_EMPTY BIT00 /* R */
+#define EPN_ISO_PIDERR BIT(29) /* R */
+#define EPN_OPID BIT(28) /* R */
+#define EPN_OUT_NOTKN BIT(27) /* R */
+#define EPN_ISO_OR BIT(26) /* R */
+
+#define EPN_ISO_CRC BIT(24) /* R */
+#define EPN_OUT_END_INT BIT(23) /* RW */
+#define EPN_OUT_OR_INT BIT(22) /* RW */
+#define EPN_OUT_NAK_ERR_INT BIT(21) /* RW */
+#define EPN_OUT_STALL_INT BIT(20) /* RW */
+#define EPN_OUT_INT BIT(19) /* RW */
+#define EPN_OUT_NULL_INT BIT(18) /* RW */
+#define EPN_OUT_FULL BIT(17) /* R */
+#define EPN_OUT_EMPTY BIT(16) /* R */
+
+#define EPN_IPID BIT(10) /* R */
+#define EPN_IN_NOTKN BIT(9) /* R */
+#define EPN_ISO_UR BIT(8) /* R */
+#define EPN_IN_END_INT BIT(7) /* RW */
+
+#define EPN_IN_NAK_ERR_INT BIT(5) /* RW */
+#define EPN_IN_STALL_INT BIT(4) /* RW */
+#define EPN_IN_INT BIT(3) /* RW */
+#define EPN_IN_DATA BIT(2) /* R */
+#define EPN_IN_FULL BIT(1) /* R */
+#define EPN_IN_EMPTY BIT(0) /* R */
#define EPN_INT_EN \
(EPN_OUT_END_INT | EPN_OUT_INT | EPN_IN_END_INT | EPN_IN_INT)
/*------- (0x0048:) EPN Interrupt Enable Register */
-#define EPN_OUT_END_EN BIT23 /* RW */
-#define EPN_OUT_OR_EN BIT22 /* RW */
-#define EPN_OUT_NAK_ERR_EN BIT21 /* RW */
-#define EPN_OUT_STALL_EN BIT20 /* RW */
-#define EPN_OUT_EN BIT19 /* RW */
-#define EPN_OUT_NULL_EN BIT18 /* RW */
+#define EPN_OUT_END_EN BIT(23) /* RW */
+#define EPN_OUT_OR_EN BIT(22) /* RW */
+#define EPN_OUT_NAK_ERR_EN BIT(21) /* RW */
+#define EPN_OUT_STALL_EN BIT(20) /* RW */
+#define EPN_OUT_EN BIT(19) /* RW */
+#define EPN_OUT_NULL_EN BIT(18) /* RW */
-#define EPN_IN_END_EN BIT07 /* RW */
+#define EPN_IN_END_EN BIT(7) /* RW */
-#define EPN_IN_NAK_ERR_EN BIT05 /* RW */
-#define EPN_IN_STALL_EN BIT04 /* RW */
-#define EPN_IN_EN BIT03 /* RW */
+#define EPN_IN_NAK_ERR_EN BIT(5) /* RW */
+#define EPN_IN_STALL_EN BIT(4) /* RW */
+#define EPN_IN_EN BIT(3) /* RW */
/*------- (0x004C:) EPN Interrupt Enable Register */
-#define EPN_STOP_MODE BIT11
-#define EPN_DEND_SET BIT10
-#define EPN_BURST_SET BIT09
-#define EPN_STOP_SET BIT08
+#define EPN_STOP_MODE BIT(11)
+#define EPN_DEND_SET BIT(10)
+#define EPN_BURST_SET BIT(9)
+#define EPN_STOP_SET BIT(8)
-#define EPN_DMA_EN BIT04
+#define EPN_DMA_EN BIT(4)
-#define EPN_DMAMODE0 BIT00
+#define EPN_DMAMODE0 BIT(0)
/*------- (0x0050:) EPN MaxPacket & BaseAddress Register */
#define EPN_BASEAD 0x1FFF0000
@@ -351,62 +312,62 @@ int vbus_irq;
/*------- (0x005C:) EPN Write Register */
/*------- (0x1000) AHBSCTR Register */
-#define WAIT_MODE BIT00
+#define WAIT_MODE BIT(0)
/*------- (0x1004) AHBMCTR Register */
-#define ARBITER_CTR BIT31 /* RW */
-#define MCYCLE_RST BIT12 /* RW */
+#define ARBITER_CTR BIT(31) /* RW */
+#define MCYCLE_RST BIT(12) /* RW */
-#define ENDIAN_CTR (BIT09 + BIT08) /* RW */
-#define ENDIAN_BYTE_SWAP BIT09
+#define ENDIAN_CTR (BIT(9) | BIT(8)) /* RW */
+#define ENDIAN_BYTE_SWAP BIT(9)
#define ENDIAN_HALF_WORD_SWAP ENDIAN_CTR
-#define HBUSREQ_MODE BIT05 /* RW */
-#define HTRANS_MODE BIT04 /* RW */
+#define HBUSREQ_MODE BIT(5) /* RW */
+#define HTRANS_MODE BIT(4) /* RW */
-#define WBURST_TYPE BIT02 /* RW */
-#define BURST_TYPE (BIT01 + BIT00) /* RW */
+#define WBURST_TYPE BIT(2) /* RW */
+#define BURST_TYPE (BIT(1) | BIT(0)) /* RW */
#define BURST_MAX_16 0
-#define BURST_MAX_8 BIT00
-#define BURST_MAX_4 BIT01
+#define BURST_MAX_8 BIT(0)
+#define BURST_MAX_4 BIT(1)
#define BURST_SINGLE BURST_TYPE
/*------- (0x1008) AHBBINT Register */
#define DMA_ENDINT 0xFFFE0000 /* RW */
-#define AHB_VBUS_INT BIT13 /* RW */
+#define AHB_VBUS_INT BIT(13) /* RW */
-#define MBUS_ERRINT BIT06 /* RW */
+#define MBUS_ERRINT BIT(6) /* RW */
-#define SBUS_ERRINT0 BIT04 /* RW */
+#define SBUS_ERRINT0 BIT(4) /* RW */
#define ERR_MASTER 0x0000000F /* R */
/*------- (0x100C) AHBBINTEN Register */
#define DMA_ENDINTEN 0xFFFE0000 /* RW */
-#define VBUS_INTEN BIT13 /* RW */
+#define VBUS_INTEN BIT(13) /* RW */
-#define MBUS_ERRINTEN BIT06 /* RW */
+#define MBUS_ERRINTEN BIT(6) /* RW */
-#define SBUS_ERRINT0EN BIT04 /* RW */
+#define SBUS_ERRINT0EN BIT(4) /* RW */
/*------- (0x1010) EPCTR Register */
-#define DIRPD BIT12 /* RW */
+#define DIRPD BIT(12) /* RW */
-#define VBUS_LEVEL BIT08 /* R */
+#define VBUS_LEVEL BIT(8) /* R */
-#define PLL_RESUME BIT05 /* RW */
-#define PLL_LOCK BIT04 /* R */
+#define PLL_RESUME BIT(5) /* RW */
+#define PLL_LOCK BIT(4) /* R */
-#define EPC_RST BIT00 /* RW */
+#define EPC_RST BIT(0) /* RW */
/*------- (0x1014) USBF_EPTEST Register */
-#define LINESTATE (BIT09 + BIT08) /* R */
-#define DM_LEVEL BIT09 /* R */
-#define DP_LEVEL BIT08 /* R */
+#define LINESTATE (BIT(9) | BIT(8)) /* R */
+#define DM_LEVEL BIT(9) /* R */
+#define DP_LEVEL BIT(8) /* R */
-#define PHY_TST BIT01 /* RW */
-#define PHY_TSTCLK BIT00 /* RW */
+#define PHY_TST BIT(1) /* RW */
+#define PHY_TSTCLK BIT(0) /* RW */
/*------- (0x1020) USBSSVER Register */
#define AHBB_VER 0x00FF0000 /* R */
@@ -420,8 +381,8 @@ int vbus_irq;
/*------- (0x1110:) EPNDCR1 Register */
#define DCR1_EPN_DMACNT 0x00FF0000 /* RW */
-#define DCR1_EPN_DIR0 BIT01 /* RW */
-#define DCR1_EPN_REQEN BIT00 /* RW */
+#define DCR1_EPN_DIR0 BIT(1) /* RW */
+#define DCR1_EPN_REQEN BIT(0) /* RW */
/*------- (0x1114:) EPNDCR2 Register */
#define DCR2_EPN_LMPKT 0x07FF0000 /* RW */
diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
index 5b8d0bae9ff3..b5fded15e8a6 100644
--- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
@@ -293,7 +293,7 @@ static int controller_probe(struct platform_device *pdev)
regulator = devm_regulator_register(dev, &can_power_desc, &config);
if (IS_ERR(regulator)) {
err = PTR_ERR(regulator);
- goto out_reset;
+ goto out_ida;
}
/* make controller info visible to userspace */
cd->class_dev = kzalloc(sizeof(*cd->class_dev), GFP_KERNEL);
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
index 4f0bff86e43e..ace4a6d28562 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
@@ -12,7 +12,7 @@
static struct {
enum dpsw_counter id;
char name[ETH_GSTRING_LEN];
-} ethsw_ethtool_counters[] = {
+} dpaa2_switch_ethtool_counters[] = {
{DPSW_CNT_ING_FRAME, "rx frames"},
{DPSW_CNT_ING_BYTE, "rx bytes"},
{DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
@@ -27,10 +27,10 @@ static struct {
};
-#define ETHSW_NUM_COUNTERS ARRAY_SIZE(ethsw_ethtool_counters)
+#define DPAA2_SWITCH_NUM_COUNTERS ARRAY_SIZE(dpaa2_switch_ethtool_counters)
-static void ethsw_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+static void dpaa2_switch_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
u16 version_major, version_minor;
@@ -53,8 +53,8 @@ static void ethsw_get_drvinfo(struct net_device *netdev,
}
static int
-ethsw_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *link_ksettings)
+dpaa2_switch_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct dpsw_link_state state = {0};
@@ -84,8 +84,8 @@ out:
}
static int
-ethsw_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *link_ksettings)
+dpaa2_switch_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
@@ -132,55 +132,56 @@ ethsw_set_link_ksettings(struct net_device *netdev,
return err;
}
-static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
+static int dpaa2_switch_ethtool_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return ETHSW_NUM_COUNTERS;
+ return DPAA2_SWITCH_NUM_COUNTERS;
default:
return -EOPNOTSUPP;
}
}
-static void ethsw_ethtool_get_strings(struct net_device *netdev,
- u32 stringset, u8 *data)
+static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
{
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ETHSW_NUM_COUNTERS; i++)
+ for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++)
memcpy(data + i * ETH_GSTRING_LEN,
- ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
+ dpaa2_switch_ethtool_counters[i].name,
+ ETH_GSTRING_LEN);
break;
}
}
-static void ethsw_ethtool_get_stats(struct net_device *netdev,
- struct ethtool_stats *stats,
- u64 *data)
+static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int i, err;
- for (i = 0; i < ETHSW_NUM_COUNTERS; i++) {
+ for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) {
err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
port_priv->idx,
- ethsw_ethtool_counters[i].id,
+ dpaa2_switch_ethtool_counters[i].id,
&data[i]);
if (err)
netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
- ethsw_ethtool_counters[i].name, err);
+ dpaa2_switch_ethtool_counters[i].name, err);
}
}
-const struct ethtool_ops ethsw_port_ethtool_ops = {
- .get_drvinfo = ethsw_get_drvinfo,
+const struct ethtool_ops dpaa2_switch_port_ethtool_ops = {
+ .get_drvinfo = dpaa2_switch_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_link_ksettings = ethsw_get_link_ksettings,
- .set_link_ksettings = ethsw_set_link_ksettings,
- .get_strings = ethsw_ethtool_get_strings,
- .get_ethtool_stats = ethsw_ethtool_get_stats,
- .get_sset_count = ethsw_ethtool_get_sset_count,
+ .get_link_ksettings = dpaa2_switch_get_link_ksettings,
+ .set_link_ksettings = dpaa2_switch_set_link_ksettings,
+ .get_strings = dpaa2_switch_ethtool_get_strings,
+ .get_ethtool_stats = dpaa2_switch_ethtool_get_stats,
+ .get_sset_count = dpaa2_switch_ethtool_get_sset_count,
};
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index 316fd9afd461..20c6326e5dee 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -24,7 +24,7 @@
#define DEFAULT_VLAN_ID 1
-static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
+static int dpaa2_switch_add_vlan(struct ethsw_core *ethsw, u16 vid)
{
int err;
@@ -43,7 +43,7 @@ static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
return 0;
}
-static bool ethsw_port_is_up(struct ethsw_port_priv *port_priv)
+static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
{
struct net_device *netdev = port_priv->netdev;
struct dpsw_link_state state;
@@ -62,7 +62,7 @@ static bool ethsw_port_is_up(struct ethsw_port_priv *port_priv)
return state.up ? true : false;
}
-static int ethsw_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
+static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct net_device *netdev = port_priv->netdev;
@@ -80,7 +80,7 @@ static int ethsw_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
tci_cfg.vlan_id = pvid;
/* Interface needs to be down to change PVID */
- up = ethsw_port_is_up(port_priv);
+ up = dpaa2_switch_port_is_up(port_priv);
if (up) {
err = dpsw_if_disable(ethsw->mc_io, 0,
ethsw->dpsw_handle,
@@ -117,8 +117,8 @@ set_tci_error:
return err;
}
-static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
- u16 vid, u16 flags)
+static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
+ u16 vid, u16 flags)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct net_device *netdev = port_priv->netdev;
@@ -153,7 +153,7 @@ static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
}
if (flags & BRIDGE_VLAN_INFO_PVID) {
- err = ethsw_port_set_pvid(port_priv, vid);
+ err = dpaa2_switch_port_set_pvid(port_priv, vid);
if (err)
return err;
}
@@ -161,7 +161,7 @@ static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
return 0;
}
-static int ethsw_set_learning(struct ethsw_core *ethsw, bool enable)
+static int dpaa2_switch_set_learning(struct ethsw_core *ethsw, bool enable)
{
enum dpsw_fdb_learning_mode learn_mode;
int err;
@@ -182,7 +182,7 @@ static int ethsw_set_learning(struct ethsw_core *ethsw, bool enable)
return 0;
}
-static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, bool enable)
+static int dpaa2_switch_port_set_flood(struct ethsw_port_priv *port_priv, bool enable)
{
int err;
@@ -199,7 +199,7 @@ static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, bool enable)
return 0;
}
-static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
+static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
{
struct dpsw_stp_cfg stp_cfg = {
.state = state,
@@ -229,7 +229,7 @@ static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
return 0;
}
-static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
+static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
{
struct ethsw_port_priv *ppriv_local = NULL;
int i, err;
@@ -252,8 +252,8 @@ static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
return 0;
}
-static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
- const unsigned char *addr)
+static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
{
struct dpsw_fdb_unicast_cfg entry = {0};
int err;
@@ -271,8 +271,8 @@ static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
return err;
}
-static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
- const unsigned char *addr)
+static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
{
struct dpsw_fdb_unicast_cfg entry = {0};
int err;
@@ -291,8 +291,8 @@ static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
return err;
}
-static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
- const unsigned char *addr)
+static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
{
struct dpsw_fdb_multicast_cfg entry = {0};
int err;
@@ -312,8 +312,8 @@ static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
return err;
}
-static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
- const unsigned char *addr)
+static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
{
struct dpsw_fdb_multicast_cfg entry = {0};
int err;
@@ -333,33 +333,33 @@ static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
return err;
}
-static int port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- u16 vid, u16 flags,
- struct netlink_ext_ack *extack)
+static int dpaa2_switch_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
if (is_unicast_ether_addr(addr))
- return ethsw_port_fdb_add_uc(netdev_priv(dev),
- addr);
+ return dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
+ addr);
else
- return ethsw_port_fdb_add_mc(netdev_priv(dev),
- addr);
+ return dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
+ addr);
}
-static int port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
+static int dpaa2_switch_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
{
if (is_unicast_ether_addr(addr))
- return ethsw_port_fdb_del_uc(netdev_priv(dev),
- addr);
+ return dpaa2_switch_port_fdb_del_uc(netdev_priv(dev),
+ addr);
else
- return ethsw_port_fdb_del_mc(netdev_priv(dev),
- addr);
+ return dpaa2_switch_port_fdb_del_mc(netdev_priv(dev),
+ addr);
}
-static void port_get_stats(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static void dpaa2_switch_port_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
u64 tmp;
@@ -424,26 +424,26 @@ error:
netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
}
-static bool port_has_offload_stats(const struct net_device *netdev,
- int attr_id)
+static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
+ int attr_id)
{
return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
}
-static int port_get_offload_stats(int attr_id,
- const struct net_device *netdev,
- void *sp)
+static int dpaa2_switch_port_get_offload_stats(int attr_id,
+ const struct net_device *netdev,
+ void *sp)
{
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
- port_get_stats((struct net_device *)netdev, sp);
+ dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
return 0;
}
return -EINVAL;
}
-static int port_change_mtu(struct net_device *netdev, int mtu)
+static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err;
@@ -463,7 +463,7 @@ static int port_change_mtu(struct net_device *netdev, int mtu)
return 0;
}
-static int port_carrier_state_sync(struct net_device *netdev)
+static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct dpsw_link_state state;
@@ -496,7 +496,7 @@ static int port_carrier_state_sync(struct net_device *netdev)
return 0;
}
-static int port_open(struct net_device *netdev)
+static int dpaa2_switch_port_open(struct net_device *netdev)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err;
@@ -520,10 +520,10 @@ static int port_open(struct net_device *netdev)
}
/* sync carrier state */
- err = port_carrier_state_sync(netdev);
+ err = dpaa2_switch_port_carrier_state_sync(netdev);
if (err) {
netdev_err(netdev,
- "port_carrier_state_sync err %d\n", err);
+ "dpaa2_switch_port_carrier_state_sync err %d\n", err);
goto err_carrier_sync;
}
@@ -536,7 +536,7 @@ err_carrier_sync:
return err;
}
-static int port_stop(struct net_device *netdev)
+static int dpaa2_switch_port_stop(struct net_device *netdev)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err;
@@ -552,8 +552,8 @@ static int port_stop(struct net_device *netdev)
return 0;
}
-static netdev_tx_t port_dropframe(struct sk_buff *skb,
- struct net_device *netdev)
+static netdev_tx_t dpaa2_switch_port_dropframe(struct sk_buff *skb,
+ struct net_device *netdev)
{
/* we don't support I/O for now, drop the frame */
dev_kfree_skb_any(skb);
@@ -561,8 +561,8 @@ static netdev_tx_t port_dropframe(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static int swdev_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static int dpaa2_switch_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct ethsw_port_priv *port_priv = netdev_priv(dev);
@@ -572,8 +572,8 @@ static int swdev_get_port_parent_id(struct net_device *dev,
return 0;
}
-static int port_get_phys_name(struct net_device *netdev, char *name,
- size_t len)
+static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
+ size_t len)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err;
@@ -592,8 +592,8 @@ struct ethsw_dump_ctx {
int idx;
};
-static int ethsw_fdb_do_dump(struct fdb_dump_entry *entry,
- struct ethsw_dump_ctx *dump)
+static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
+ struct ethsw_dump_ctx *dump)
{
int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
@@ -632,8 +632,8 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int port_fdb_valid_entry(struct fdb_dump_entry *entry,
- struct ethsw_port_priv *port_priv)
+static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
+ struct ethsw_port_priv *port_priv)
{
int idx = port_priv->idx;
int valid;
@@ -646,9 +646,9 @@ static int port_fdb_valid_entry(struct fdb_dump_entry *entry,
return valid;
}
-static int port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct net_device *net_dev,
- struct net_device *filter_dev, int *idx)
+static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *net_dev,
+ struct net_device *filter_dev, int *idx)
{
struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
@@ -693,10 +693,10 @@ static int port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
for (i = 0; i < num_fdb_entries; i++) {
fdb_entry = fdb_entries[i];
- if (!port_fdb_valid_entry(&fdb_entry, port_priv))
+ if (!dpaa2_switch_port_fdb_valid_entry(&fdb_entry, port_priv))
continue;
- err = ethsw_fdb_do_dump(&fdb_entry, &dump);
+ err = dpaa2_switch_fdb_dump_nl(&fdb_entry, &dump);
if (err)
goto end;
}
@@ -715,7 +715,7 @@ err_map:
return err;
}
-static int ethsw_port_set_mac_addr(struct ethsw_port_priv *port_priv)
+static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct net_device *net_dev = port_priv->netdev;
@@ -755,30 +755,30 @@ static int ethsw_port_set_mac_addr(struct ethsw_port_priv *port_priv)
return 0;
}
-static const struct net_device_ops ethsw_port_ops = {
- .ndo_open = port_open,
- .ndo_stop = port_stop,
+static const struct net_device_ops dpaa2_switch_port_ops = {
+ .ndo_open = dpaa2_switch_port_open,
+ .ndo_stop = dpaa2_switch_port_stop,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_get_stats64 = port_get_stats,
- .ndo_change_mtu = port_change_mtu,
- .ndo_has_offload_stats = port_has_offload_stats,
- .ndo_get_offload_stats = port_get_offload_stats,
- .ndo_fdb_add = port_fdb_add,
- .ndo_fdb_del = port_fdb_del,
- .ndo_fdb_dump = port_fdb_dump,
-
- .ndo_start_xmit = port_dropframe,
- .ndo_get_port_parent_id = swdev_get_port_parent_id,
- .ndo_get_phys_port_name = port_get_phys_name,
+ .ndo_get_stats64 = dpaa2_switch_port_get_stats,
+ .ndo_change_mtu = dpaa2_switch_port_change_mtu,
+ .ndo_has_offload_stats = dpaa2_switch_port_has_offload_stats,
+ .ndo_get_offload_stats = dpaa2_switch_port_get_offload_stats,
+ .ndo_fdb_add = dpaa2_switch_port_fdb_add,
+ .ndo_fdb_del = dpaa2_switch_port_fdb_del,
+ .ndo_fdb_dump = dpaa2_switch_port_fdb_dump,
+
+ .ndo_start_xmit = dpaa2_switch_port_dropframe,
+ .ndo_get_port_parent_id = dpaa2_switch_port_parent_id,
+ .ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
};
-static bool ethsw_port_dev_check(const struct net_device *netdev,
- struct notifier_block *nb)
+static bool dpaa2_switch_port_dev_check(const struct net_device *netdev,
+ struct notifier_block *nb)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- if (netdev->netdev_ops == &ethsw_port_ops &&
+ if (netdev->netdev_ops == &dpaa2_switch_port_ops &&
(!nb || &port_priv->ethsw_data->port_nb == nb ||
&port_priv->ethsw_data->port_switchdev_nb == nb ||
&port_priv->ethsw_data->port_switchdevb_nb == nb))
@@ -787,17 +787,17 @@ static bool ethsw_port_dev_check(const struct net_device *netdev,
return false;
}
-static void ethsw_links_state_update(struct ethsw_core *ethsw)
+static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw)
{
int i;
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
- port_carrier_state_sync(ethsw->ports[i]->netdev);
- ethsw_port_set_mac_addr(ethsw->ports[i]);
+ dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev);
+ dpaa2_switch_port_set_mac_addr(ethsw->ports[i]);
}
}
-static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
+static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
{
struct device *dev = (struct device *)arg;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
@@ -819,13 +819,13 @@ static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
}
if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
- ethsw_links_state_update(ethsw);
+ dpaa2_switch_links_state_update(ethsw);
out:
return IRQ_HANDLED;
}
-static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
+static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
@@ -855,7 +855,7 @@ static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
NULL,
- ethsw_irq0_handler_thread,
+ dpaa2_switch_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(dev), dev);
if (err) {
@@ -886,7 +886,7 @@ free_irq:
return err;
}
-static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
+static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
@@ -900,21 +900,21 @@ static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
fsl_mc_free_irqs(sw_dev);
}
-static int port_attr_stp_state_set(struct net_device *netdev,
- struct switchdev_trans *trans,
- u8 state)
+static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
+ struct switchdev_trans *trans,
+ u8 state)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
if (switchdev_trans_ph_prepare(trans))
return 0;
- return ethsw_port_set_stp_state(port_priv, state);
+ return dpaa2_switch_port_set_stp_state(port_priv, state);
}
-static int port_attr_br_flags_pre_set(struct net_device *netdev,
- struct switchdev_trans *trans,
- unsigned long flags)
+static int dpaa2_switch_port_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_trans *trans,
+ unsigned long flags)
{
if (flags & ~(BR_LEARNING | BR_FLOOD))
return -EINVAL;
@@ -922,9 +922,9 @@ static int port_attr_br_flags_pre_set(struct net_device *netdev,
return 0;
}
-static int port_attr_br_flags_set(struct net_device *netdev,
- struct switchdev_trans *trans,
- unsigned long flags)
+static int dpaa2_switch_port_attr_br_flags_set(struct net_device *netdev,
+ struct switchdev_trans *trans,
+ unsigned long flags)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err = 0;
@@ -933,35 +933,35 @@ static int port_attr_br_flags_set(struct net_device *netdev,
return 0;
/* Learning is enabled per switch */
- err = ethsw_set_learning(port_priv->ethsw_data,
- !!(flags & BR_LEARNING));
+ err = dpaa2_switch_set_learning(port_priv->ethsw_data,
+ !!(flags & BR_LEARNING));
if (err)
goto exit;
- err = ethsw_port_set_flood(port_priv, !!(flags & BR_FLOOD));
+ err = dpaa2_switch_port_set_flood(port_priv, !!(flags & BR_FLOOD));
exit:
return err;
}
-static int swdev_port_attr_set(struct net_device *netdev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+static int dpaa2_switch_port_attr_set(struct net_device *netdev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
{
int err = 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = port_attr_stp_state_set(netdev, trans,
- attr->u.stp_state);
+ err = dpaa2_switch_port_attr_stp_state_set(netdev, trans,
+ attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- err = port_attr_br_flags_pre_set(netdev, trans,
- attr->u.brport_flags);
+ err = dpaa2_switch_port_attr_br_flags_pre_set(netdev, trans,
+ attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = port_attr_br_flags_set(netdev, trans,
- attr->u.brport_flags);
+ err = dpaa2_switch_port_attr_br_flags_set(netdev, trans,
+ attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
/* VLANs are supported by default */
@@ -974,9 +974,9 @@ static int swdev_port_attr_set(struct net_device *netdev,
return err;
}
-static int port_vlans_add(struct net_device *netdev,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+static int dpaa2_switch_port_vlans_add(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
@@ -1004,13 +1004,13 @@ static int port_vlans_add(struct net_device *netdev,
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
if (!port_priv->ethsw_data->vlans[vid]) {
/* this is a new VLAN */
- err = ethsw_add_vlan(port_priv->ethsw_data, vid);
+ err = dpaa2_switch_add_vlan(port_priv->ethsw_data, vid);
if (err)
return err;
port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
}
- err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
+ err = dpaa2_switch_port_add_vlan(port_priv, vid, vlan->flags);
if (err)
break;
}
@@ -1018,8 +1018,8 @@ static int port_vlans_add(struct net_device *netdev,
return err;
}
-static int port_lookup_address(struct net_device *netdev, int is_uc,
- const unsigned char *addr)
+static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
+ const unsigned char *addr)
{
struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
struct netdev_hw_addr *ha;
@@ -1035,9 +1035,9 @@ static int port_lookup_address(struct net_device *netdev, int is_uc,
return 0;
}
-static int port_mdb_add(struct net_device *netdev,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err;
@@ -1046,38 +1046,38 @@ static int port_mdb_add(struct net_device *netdev,
return 0;
/* Check if address is already set on this port */
- if (port_lookup_address(netdev, 0, mdb->addr))
+ if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
return -EEXIST;
- err = ethsw_port_fdb_add_mc(port_priv, mdb->addr);
+ err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
if (err)
return err;
err = dev_mc_add(netdev, mdb->addr);
if (err) {
netdev_err(netdev, "dev_mc_add err %d\n", err);
- ethsw_port_fdb_del_mc(port_priv, mdb->addr);
+ dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
}
return err;
}
-static int swdev_port_obj_add(struct net_device *netdev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+static int dpaa2_switch_port_obj_add(struct net_device *netdev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans)
{
int err;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = port_vlans_add(netdev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ err = dpaa2_switch_port_vlans_add(netdev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- err = port_mdb_add(netdev,
- SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ err = dpaa2_switch_port_mdb_add(netdev,
+ SWITCHDEV_OBJ_PORT_MDB(obj),
+ trans);
break;
default:
err = -EOPNOTSUPP;
@@ -1087,7 +1087,7 @@ static int swdev_port_obj_add(struct net_device *netdev,
return err;
}
-static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct net_device *netdev = port_priv->netdev;
@@ -1098,7 +1098,7 @@ static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
return -ENOENT;
if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
- err = ethsw_port_set_pvid(port_priv, 0);
+ err = dpaa2_switch_port_set_pvid(port_priv, 0);
if (err)
return err;
}
@@ -1136,7 +1136,7 @@ static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
- err = ethsw_dellink_switch(ethsw, vid);
+ err = dpaa2_switch_dellink(ethsw, vid);
if (err)
return err;
}
@@ -1144,8 +1144,8 @@ static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
return 0;
}
-static int port_vlans_del(struct net_device *netdev,
- const struct switchdev_obj_port_vlan *vlan)
+static int dpaa2_switch_port_vlans_del(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int vid, err = 0;
@@ -1154,7 +1154,7 @@ static int port_vlans_del(struct net_device *netdev,
return -EOPNOTSUPP;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ethsw_port_del_vlan(port_priv, vid);
+ err = dpaa2_switch_port_del_vlan(port_priv, vid);
if (err)
break;
}
@@ -1162,16 +1162,16 @@ static int port_vlans_del(struct net_device *netdev,
return err;
}
-static int port_mdb_del(struct net_device *netdev,
- const struct switchdev_obj_port_mdb *mdb)
+static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err;
- if (!port_lookup_address(netdev, 0, mdb->addr))
+ if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
return -ENOENT;
- err = ethsw_port_fdb_del_mc(port_priv, mdb->addr);
+ err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
if (err)
return err;
@@ -1184,17 +1184,17 @@ static int port_mdb_del(struct net_device *netdev,
return err;
}
-static int swdev_port_obj_del(struct net_device *netdev,
- const struct switchdev_obj *obj)
+static int dpaa2_switch_port_obj_del(struct net_device *netdev,
+ const struct switchdev_obj *obj)
{
int err;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- err = port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -1203,23 +1203,22 @@ static int swdev_port_obj_del(struct net_device *netdev,
return err;
}
-static int
-ethsw_switchdev_port_attr_set_event(struct net_device *netdev,
- struct switchdev_notifier_port_attr_info
- *port_attr_info)
+static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
+ struct switchdev_notifier_port_attr_info
+ *port_attr_info)
{
int err;
- err = swdev_port_attr_set(netdev, port_attr_info->attr,
- port_attr_info->trans);
+ err = dpaa2_switch_port_attr_set(netdev, port_attr_info->attr,
+ port_attr_info->trans);
port_attr_info->handled = true;
return notifier_from_errno(err);
}
/* For the moment, only flood setting needs to be updated */
-static int port_bridge_join(struct net_device *netdev,
- struct net_device *upper_dev)
+static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
+ struct net_device *upper_dev)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
@@ -1237,7 +1236,7 @@ static int port_bridge_join(struct net_device *netdev,
}
netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
- if (!ethsw_port_dev_check(other_dev, NULL))
+ if (!dpaa2_switch_port_dev_check(other_dev, NULL))
continue;
other_port_priv = netdev_priv(other_dev);
@@ -1249,35 +1248,35 @@ static int port_bridge_join(struct net_device *netdev,
}
/* Enable flooding */
- err = ethsw_port_set_flood(port_priv, 1);
+ err = dpaa2_switch_port_set_flood(port_priv, 1);
if (!err)
port_priv->bridge_dev = upper_dev;
return err;
}
-static int port_bridge_leave(struct net_device *netdev)
+static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int err;
/* Disable flooding */
- err = ethsw_port_set_flood(port_priv, 0);
+ err = dpaa2_switch_port_set_flood(port_priv, 0);
if (!err)
port_priv->bridge_dev = NULL;
return err;
}
-static int port_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info = ptr;
struct net_device *upper_dev;
int err = 0;
- if (!ethsw_port_dev_check(netdev, nb))
+ if (!dpaa2_switch_port_dev_check(netdev, nb))
return NOTIFY_DONE;
/* Handle just upper dev link/unlink for the moment */
@@ -1285,9 +1284,9 @@ static int port_netdevice_event(struct notifier_block *nb,
upper_dev = info->upper_dev;
if (netif_is_bridge_master(upper_dev)) {
if (info->linking)
- err = port_bridge_join(netdev, upper_dev);
+ err = dpaa2_switch_port_bridge_join(netdev, upper_dev);
else
- err = port_bridge_leave(netdev);
+ err = dpaa2_switch_port_bridge_leave(netdev);
}
}
@@ -1301,7 +1300,7 @@ struct ethsw_switchdev_event_work {
unsigned long event;
};
-static void ethsw_switchdev_event_work(struct work_struct *work)
+static void dpaa2_switch_event_work(struct work_struct *work)
{
struct ethsw_switchdev_event_work *switchdev_work =
container_of(work, struct ethsw_switchdev_event_work, work);
@@ -1317,11 +1316,11 @@ static void ethsw_switchdev_event_work(struct work_struct *work)
if (!fdb_info->added_by_user)
break;
if (is_unicast_ether_addr(fdb_info->addr))
- err = ethsw_port_fdb_add_uc(netdev_priv(dev),
- fdb_info->addr);
+ err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
+ fdb_info->addr);
else
- err = ethsw_port_fdb_add_mc(netdev_priv(dev),
- fdb_info->addr);
+ err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
+ fdb_info->addr);
if (err)
break;
fdb_info->offloaded = true;
@@ -1332,9 +1331,9 @@ static void ethsw_switchdev_event_work(struct work_struct *work)
if (!fdb_info->added_by_user)
break;
if (is_unicast_ether_addr(fdb_info->addr))
- ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
+ dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
else
- ethsw_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
+ dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
break;
}
@@ -1345,8 +1344,8 @@ static void ethsw_switchdev_event_work(struct work_struct *work)
}
/* Called under rcu_read_lock() */
-static int port_switchdev_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int dpaa2_switch_port_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct ethsw_port_priv *port_priv = netdev_priv(dev);
@@ -1354,17 +1353,17 @@ static int port_switchdev_event(struct notifier_block *nb,
struct switchdev_notifier_fdb_info *fdb_info = ptr;
struct ethsw_core *ethsw = port_priv->ethsw_data;
- if (!ethsw_port_dev_check(dev, nb))
+ if (!dpaa2_switch_port_dev_check(dev, nb))
return NOTIFY_DONE;
if (event == SWITCHDEV_PORT_ATTR_SET)
- return ethsw_switchdev_port_attr_set_event(dev, ptr);
+ return dpaa2_switch_port_attr_set_event(dev, ptr);
switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
if (!switchdev_work)
return NOTIFY_BAD;
- INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
+ INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
switchdev_work->dev = dev;
switchdev_work->event = event;
@@ -1397,20 +1396,19 @@ err_addr_alloc:
return NOTIFY_BAD;
}
-static int
-ethsw_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
- struct switchdev_notifier_port_obj_info
- *port_obj_info)
+static int dpaa2_switch_port_obj_event(unsigned long event,
+ struct net_device *netdev,
+ struct switchdev_notifier_port_obj_info *port_obj_info)
{
int err = -EOPNOTSUPP;
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = swdev_port_obj_add(netdev, port_obj_info->obj,
- port_obj_info->trans);
+ err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj,
+ port_obj_info->trans);
break;
case SWITCHDEV_PORT_OBJ_DEL:
- err = swdev_port_obj_del(netdev, port_obj_info->obj);
+ err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
break;
}
@@ -1418,45 +1416,45 @@ ethsw_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
return notifier_from_errno(err);
}
-static int port_switchdev_blocking_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
- if (!ethsw_port_dev_check(dev, nb))
+ if (!dpaa2_switch_port_dev_check(dev, nb))
return NOTIFY_DONE;
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
case SWITCHDEV_PORT_OBJ_DEL:
- return ethsw_switchdev_port_obj_event(event, dev, ptr);
+ return dpaa2_switch_port_obj_event(event, dev, ptr);
case SWITCHDEV_PORT_ATTR_SET:
- return ethsw_switchdev_port_attr_set_event(dev, ptr);
+ return dpaa2_switch_port_attr_set_event(dev, ptr);
}
return NOTIFY_DONE;
}
-static int ethsw_register_notifier(struct device *dev)
+static int dpaa2_switch_register_notifier(struct device *dev)
{
struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
- ethsw->port_nb.notifier_call = port_netdevice_event;
+ ethsw->port_nb.notifier_call = dpaa2_switch_port_netdevice_event;
err = register_netdevice_notifier(&ethsw->port_nb);
if (err) {
dev_err(dev, "Failed to register netdev notifier\n");
return err;
}
- ethsw->port_switchdev_nb.notifier_call = port_switchdev_event;
+ ethsw->port_switchdev_nb.notifier_call = dpaa2_switch_port_event;
err = register_switchdev_notifier(&ethsw->port_switchdev_nb);
if (err) {
dev_err(dev, "Failed to register switchdev notifier\n");
goto err_switchdev_nb;
}
- ethsw->port_switchdevb_nb.notifier_call = port_switchdev_blocking_event;
+ ethsw->port_switchdevb_nb.notifier_call = dpaa2_switch_port_blocking_event;
err = register_switchdev_blocking_notifier(&ethsw->port_switchdevb_nb);
if (err) {
dev_err(dev, "Failed to register switchdev blocking notifier\n");
@@ -1472,7 +1470,7 @@ err_switchdev_nb:
return err;
}
-static void ethsw_detect_features(struct ethsw_core *ethsw)
+static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
{
ethsw->features = 0;
@@ -1480,7 +1478,7 @@ static void ethsw_detect_features(struct ethsw_core *ethsw)
ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
}
-static int ethsw_init(struct fsl_mc_device *sw_dev)
+static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
@@ -1523,7 +1521,7 @@ static int ethsw_init(struct fsl_mc_device *sw_dev)
goto err_close;
}
- ethsw_detect_features(ethsw);
+ dpaa2_switch_detect_features(ethsw);
err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
if (err) {
@@ -1568,7 +1566,7 @@ static int ethsw_init(struct fsl_mc_device *sw_dev)
goto err_close;
}
- err = ethsw_register_notifier(dev);
+ err = dpaa2_switch_register_notifier(dev);
if (err)
goto err_destroy_ordered_workqueue;
@@ -1582,7 +1580,7 @@ err_close:
return err;
}
-static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
+static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
{
struct net_device *netdev = port_priv->netdev;
struct ethsw_core *ethsw = port_priv->ethsw_data;
@@ -1603,7 +1601,7 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err;
}
- err = ethsw_port_set_pvid(port_priv, 0);
+ err = dpaa2_switch_port_set_pvid(port_priv, 0);
if (err)
return err;
@@ -1615,7 +1613,7 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err;
}
-static void ethsw_unregister_notifier(struct device *dev)
+static void dpaa2_switch_unregister_notifier(struct device *dev)
{
struct ethsw_core *ethsw = dev_get_drvdata(dev);
struct notifier_block *nb;
@@ -1639,20 +1637,20 @@ static void ethsw_unregister_notifier(struct device *dev)
"Failed to unregister netdev notifier (%d)\n", err);
}
-static void ethsw_takedown(struct fsl_mc_device *sw_dev)
+static void dpaa2_switch_takedown(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
- ethsw_unregister_notifier(dev);
+ dpaa2_switch_unregister_notifier(dev);
err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
if (err)
dev_warn(dev, "dpsw_close err %d\n", err);
}
-static int ethsw_remove(struct fsl_mc_device *sw_dev)
+static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
{
struct ethsw_port_priv *port_priv;
struct ethsw_core *ethsw;
@@ -1662,7 +1660,7 @@ static int ethsw_remove(struct fsl_mc_device *sw_dev)
dev = &sw_dev->dev;
ethsw = dev_get_drvdata(dev);
- ethsw_teardown_irqs(sw_dev);
+ dpaa2_switch_teardown_irqs(sw_dev);
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
@@ -1673,7 +1671,7 @@ static int ethsw_remove(struct fsl_mc_device *sw_dev)
}
kfree(ethsw->ports);
- ethsw_takedown(sw_dev);
+ dpaa2_switch_takedown(sw_dev);
destroy_workqueue(ethsw->workqueue);
@@ -1686,7 +1684,8 @@ static int ethsw_remove(struct fsl_mc_device *sw_dev)
return 0;
}
-static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
+static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
+ u16 port_idx)
{
struct ethsw_port_priv *port_priv;
struct device *dev = ethsw->dev;
@@ -1710,18 +1709,18 @@ static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
port_priv->flood = true;
SET_NETDEV_DEV(port_netdev, dev);
- port_netdev->netdev_ops = &ethsw_port_ops;
- port_netdev->ethtool_ops = &ethsw_port_ethtool_ops;
+ port_netdev->netdev_ops = &dpaa2_switch_port_ops;
+ port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
/* Set MTU limits */
port_netdev->min_mtu = ETH_MIN_MTU;
port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
- err = ethsw_port_init(port_priv, port_idx);
+ err = dpaa2_switch_port_init(port_priv, port_idx);
if (err)
goto err_port_probe;
- err = ethsw_port_set_mac_addr(port_priv);
+ err = dpaa2_switch_port_set_mac_addr(port_priv);
if (err)
goto err_port_probe;
@@ -1741,7 +1740,7 @@ err_port_probe:
return err;
}
-static int ethsw_probe(struct fsl_mc_device *sw_dev)
+static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
{
struct device *dev = &sw_dev->dev;
struct ethsw_core *ethsw;
@@ -1766,7 +1765,7 @@ static int ethsw_probe(struct fsl_mc_device *sw_dev)
goto err_free_drvdata;
}
- err = ethsw_init(sw_dev);
+ err = dpaa2_switch_init(sw_dev);
if (err)
goto err_free_cmdport;
@@ -1784,7 +1783,7 @@ static int ethsw_probe(struct fsl_mc_device *sw_dev)
}
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
- err = ethsw_probe_port(ethsw, i);
+ err = dpaa2_switch_probe_port(ethsw, i);
if (err)
goto err_free_ports;
}
@@ -1800,7 +1799,7 @@ static int ethsw_probe(struct fsl_mc_device *sw_dev)
dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
/* Setup IRQs */
- err = ethsw_setup_irqs(sw_dev);
+ err = dpaa2_switch_setup_irqs(sw_dev);
if (err)
goto err_stop;
@@ -1819,7 +1818,7 @@ err_free_ports:
kfree(ethsw->ports);
err_takedown:
- ethsw_takedown(sw_dev);
+ dpaa2_switch_takedown(sw_dev);
err_free_cmdport:
fsl_mc_portal_free(ethsw->mc_io);
@@ -1831,26 +1830,26 @@ err_free_drvdata:
return err;
}
-static const struct fsl_mc_device_id ethsw_match_id_table[] = {
+static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpsw",
},
{ .vendor = 0x0 }
};
-MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
+MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
-static struct fsl_mc_driver eth_sw_drv = {
+static struct fsl_mc_driver dpaa2_switch_drv = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
- .probe = ethsw_probe,
- .remove = ethsw_remove,
- .match_id_table = ethsw_match_id_table
+ .probe = dpaa2_switch_probe,
+ .remove = dpaa2_switch_remove,
+ .match_id_table = dpaa2_switch_match_id_table
};
-module_fsl_mc_driver(eth_sw_drv);
+module_fsl_mc_driver(dpaa2_switch_drv);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
index d136dbdcaffa..5f9211ccb1ef 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
@@ -39,7 +39,7 @@
#define ETHSW_FEATURE_MAC_ADDR BIT(0)
-extern const struct ethtool_ops ethsw_port_ethtool_ops;
+extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops;
struct ethsw_core;
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index aec0f19597a9..db83d34cd677 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -466,7 +466,7 @@ static void fwtty_throttle_port(struct fwtty_port *port)
* fwtty_do_hangup - wait for ldisc to deliver all pending rx; only then hangup
*
* When the remote has finished tx, and all in-flight rx has been received and
- * and pushed to the flip buffer, the remote may close its device. This will
+ * pushed to the flip buffer, the remote may close its device. This will
* drop DTR on the remote which will drop carrier here. Typically, the tty is
* hung up when carrier is dropped or lost.
*
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index 74538f8c5fa4..494aa823e998 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -688,7 +688,7 @@ static struct snd_soc_dai_driver gbaudio_dai[] = {
.playback = {
.stream_name = "I2S 0 Playback",
.rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FORMAT_S16_LE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
.rate_max = 48000,
.rate_min = 48000,
.channels_min = 1,
@@ -698,7 +698,7 @@ static struct snd_soc_dai_driver gbaudio_dai[] = {
.capture = {
.stream_name = "I2S 0 Capture",
.rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FORMAT_S16_LE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
.rate_max = 48000,
.rate_min = 48000,
.channels_min = 1,
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
index 16f60256adb2..c52c4f361b90 100644
--- a/drivers/staging/greybus/audio_module.c
+++ b/drivers/staging/greybus/audio_module.c
@@ -219,7 +219,7 @@ static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule,
greybus_set_drvdata(bundle, gbmodule);
dai->id = 0;
- dai->data_cport = connection->intf_cport_id;
+ dai->data_cport = cpu_to_le16(connection->intf_cport_id);
dai->connection = connection;
list_add(&dai->list, &gbmodule->data_list);
@@ -329,7 +329,7 @@ static int gb_audio_probe(struct gb_bundle *bundle,
if (ret) {
dev_err(dev,
"%d:Error while enabling %d:data connection\n",
- ret, dai->data_cport);
+ ret, le16_to_cpu(dai->data_cport));
goto disable_data_connection;
}
}
@@ -451,7 +451,7 @@ static int gb_audio_resume(struct device *dev)
if (ret) {
dev_err(dev,
"%d:Error while enabling %d:data connection\n",
- ret, dai->data_cport);
+ ret, le16_to_cpu(dai->data_cport));
return ret;
}
}
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 83b38ae8908c..662e3e8b4b63 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -182,7 +182,7 @@ static int gbcodec_mixer_ctl_info(struct snd_kcontrol *kcontrol,
/* update uinfo */
uinfo->access = data->access;
uinfo->count = data->vcount;
- uinfo->type = (snd_ctl_elem_type_t)info->type;
+ uinfo->type = (__force snd_ctl_elem_type_t)info->type;
switch (info->type) {
case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
@@ -466,7 +466,7 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
goto exit;
/* update ucontrol */
- if (gbvalue.value.integer_value[0] != val) {
+ if (le32_to_cpu(gbvalue.value.integer_value[0]) != val) {
for (wi = 0; wi < wlist->num_widgets; wi++) {
widget = wlist->widgets[wi];
snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol,
@@ -689,7 +689,7 @@ static int gbaudio_tplg_create_kcontrol(struct gbaudio_module_info *gb,
return -ENOMEM;
ctldata->ctl_id = ctl->id;
ctldata->data_cport = le16_to_cpu(ctl->data_cport);
- ctldata->access = ctl->access;
+ ctldata->access = le32_to_cpu(ctl->access);
ctldata->vcount = ctl->count_values;
ctldata->info = &ctl->info;
*kctl = (struct snd_kcontrol_new)
@@ -744,10 +744,10 @@ static int gbcodec_enum_dapm_ctl_get(struct snd_kcontrol *kcontrol,
return ret;
}
- ucontrol->value.enumerated.item[0] = gbvalue.value.enumerated_item[0];
+ ucontrol->value.enumerated.item[0] = le32_to_cpu(gbvalue.value.enumerated_item[0]);
if (e->shift_l != e->shift_r)
ucontrol->value.enumerated.item[1] =
- gbvalue.value.enumerated_item[1];
+ le32_to_cpu(gbvalue.value.enumerated_item[1]);
return 0;
}
@@ -800,11 +800,11 @@ static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
val = mux << e->shift_l;
mask = e->mask << e->shift_l;
- if (gbvalue.value.enumerated_item[0] !=
+ if (le32_to_cpu(gbvalue.value.enumerated_item[0]) !=
ucontrol->value.enumerated.item[0]) {
change = 1;
gbvalue.value.enumerated_item[0] =
- ucontrol->value.enumerated.item[0];
+ cpu_to_le32(ucontrol->value.enumerated.item[0]);
}
if (e->shift_l != e->shift_r) {
@@ -812,11 +812,11 @@ static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
return -EINVAL;
val |= ucontrol->value.enumerated.item[1] << e->shift_r;
mask |= e->mask << e->shift_r;
- if (gbvalue.value.enumerated_item[1] !=
+ if (le32_to_cpu(gbvalue.value.enumerated_item[1]) !=
ucontrol->value.enumerated.item[1]) {
change = 1;
gbvalue.value.enumerated_item[1] =
- ucontrol->value.enumerated.item[1];
+ cpu_to_le32(ucontrol->value.enumerated.item[1]);
}
}
@@ -887,7 +887,7 @@ static int gbaudio_tplg_create_mixer_ctl(struct gbaudio_module_info *gb,
return -ENOMEM;
ctldata->ctl_id = ctl->id;
ctldata->data_cport = le16_to_cpu(ctl->data_cport);
- ctldata->access = ctl->access;
+ ctldata->access = le32_to_cpu(ctl->access);
ctldata->vcount = ctl->count_values;
ctldata->info = &ctl->info;
*kctl = (struct snd_kcontrol_new)
diff --git a/drivers/staging/greybus/gbphy.h b/drivers/staging/greybus/gbphy.h
index 087928a586fb..d4a225b76338 100644
--- a/drivers/staging/greybus/gbphy.h
+++ b/drivers/staging/greybus/gbphy.h
@@ -36,9 +36,9 @@ struct gbphy_device_id {
struct gbphy_driver {
const char *name;
- int (*probe)(struct gbphy_device *,
+ int (*probe)(struct gbphy_device *device,
const struct gbphy_device_id *id);
- void (*remove)(struct gbphy_device *);
+ void (*remove)(struct gbphy_device *device);
const struct gbphy_device_id *id_table;
struct device_driver driver;
diff --git a/drivers/staging/hikey9xx/Kconfig b/drivers/staging/hikey9xx/Kconfig
new file mode 100644
index 000000000000..b29f5d5df134
--- /dev/null
+++ b/drivers/staging/hikey9xx/Kconfig
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# to be placed at drivers/phy
+config PHY_HI3670_USB
+ tristate "hi3670 USB PHY support"
+ depends on (ARCH_HISI && ARM64) || COMPILE_TEST
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support the HISILICON HI3670 USB PHY.
+
+ To compile this driver as a module, choose M here.
+
+# to be placed at drivers/spmi
+config SPMI_HISI3670
+ tristate "Hisilicon 3670 SPMI Controller"
+ select IRQ_DOMAIN_HIERARCHY
+ depends on HAS_IOMEM
+ depends on SPMI
+ help
+ If you say yes to this option, support will be included for the
+ built-in SPMI PMIC Arbiter interface on Hisilicon 3670
+ processors.
+
+# to be placed at drivers/mfd
+config MFD_HI6421_SPMI
+ tristate "HiSilicon Hi6421v600 SPMI PMU/Codec IC"
+ depends on OF
+ depends on SPMI
+ select MFD_CORE
+ help
+ Add support for HiSilicon Hi6421v600 SPMI PMIC. Hi6421 includes
+ multi-functions, such as regulators, RTC, codec, Coulomb counter,
+ etc.
+
+ This driver includes core APIs _only_. You have to select
+ individual components like voltage regulators under corresponding
+ menus in order to enable them.
+ We communicate with the Hi6421v600 via a SPMI bus.
+
+# to be placed at drivers/regulator
+config REGULATOR_HI6421V600
+ tristate "HiSilicon Hi6421v600 PMIC voltage regulator support"
+ depends on MFD_HI6421_SPMI && OF
+ depends on REGULATOR
+ help
+ This driver provides support for the voltage regulators on
+ HiSilicon Hi6421v600 PMU / Codec IC.
+ This is used on Kirin 3670 boards, like HiKey 970.
diff --git a/drivers/staging/hikey9xx/Makefile b/drivers/staging/hikey9xx/Makefile
new file mode 100644
index 000000000000..1924fadac952
--- /dev/null
+++ b/drivers/staging/hikey9xx/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PHY_HI3670_USB) += phy-hi3670-usb3.o
+
+obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o
+obj-$(CONFIG_MFD_HI6421_SPMI) += hi6421-spmi-pmic.o
+obj-$(CONFIG_REGULATOR_HI6421V600) += hi6421v600-regulator.o
diff --git a/drivers/staging/hikey9xx/TODO b/drivers/staging/hikey9xx/TODO
new file mode 100644
index 000000000000..65e7996a3066
--- /dev/null
+++ b/drivers/staging/hikey9xx/TODO
@@ -0,0 +1,5 @@
+ToDo list:
+
+- Port other drivers needed by Hikey 960/970;
+- Test drivers on Hikey 960;
+- Validate device tree bindings.
diff --git a/drivers/staging/hikey9xx/hi6421-spmi-pmic.c b/drivers/staging/hikey9xx/hi6421-spmi-pmic.c
new file mode 100644
index 000000000000..64b30d263c8d
--- /dev/null
+++ b/drivers/staging/hikey9xx/hi6421-spmi-pmic.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device driver for regulators in HISI PMIC IC
+ *
+ * Copyright (c) 2013 Linaro Ltd.
+ * Copyright (c) 2011 Hisilicon.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/hi6421-spmi-pmic.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+
+/* 8-bit register offset in PMIC */
+#define HISI_MASK_STATE 0xff
+
+#define HISI_IRQ_ARRAY 2
+#define HISI_IRQ_NUM (HISI_IRQ_ARRAY * 8)
+
+#define SOC_PMIC_IRQ_MASK_0_ADDR 0x0202
+#define SOC_PMIC_IRQ0_ADDR 0x0212
+
+#define HISI_IRQ_KEY_NUM 0
+#define HISI_IRQ_KEY_VALUE 0xc0
+#define HISI_IRQ_KEY_DOWN 7
+#define HISI_IRQ_KEY_UP 6
+
+#define HISI_MASK_FIELD 0xFF
+#define HISI_BITS 8
+
+/*define the first group interrupt register number*/
+#define HISI_PMIC_FIRST_GROUP_INT_NUM 2
+
+static const struct mfd_cell hi6421v600_devs[] = {
+ { .name = "hi6421v600-regulator", },
+};
+
+/*
+ * The PMIC register is only 8-bit.
+ * Hisilicon SoC use hardware to map PMIC register into SoC mapping.
+ * At here, we are accessing SoC register with 32-bit.
+ */
+int hi6421_spmi_pmic_read(struct hi6421_spmi_pmic *pmic, int reg)
+{
+ struct spmi_device *pdev;
+ u8 read_value = 0;
+ u32 ret;
+
+ pdev = to_spmi_device(pmic->dev);
+ if (!pdev) {
+ pr_err("%s: pdev get failed!\n", __func__);
+ return -ENODEV;
+ }
+
+ ret = spmi_ext_register_readl(pdev, reg, &read_value, 1);
+ if (ret) {
+ pr_err("%s: spmi_ext_register_readl failed!\n", __func__);
+ return ret;
+ }
+ return read_value;
+}
+EXPORT_SYMBOL(hi6421_spmi_pmic_read);
+
+int hi6421_spmi_pmic_write(struct hi6421_spmi_pmic *pmic, int reg, u32 val)
+{
+ struct spmi_device *pdev;
+ u32 ret;
+
+ pdev = to_spmi_device(pmic->dev);
+ if (!pdev) {
+ pr_err("%s: pdev get failed!\n", __func__);
+ return -ENODEV;
+ }
+
+ ret = spmi_ext_register_writel(pdev, reg, (unsigned char *)&val, 1);
+ if (ret)
+ pr_err("%s: spmi_ext_register_writel failed!\n", __func__);
+
+ return ret;
+}
+EXPORT_SYMBOL(hi6421_spmi_pmic_write);
+
+int hi6421_spmi_pmic_rmw(struct hi6421_spmi_pmic *pmic, int reg,
+ u32 mask, u32 bits)
+{
+ unsigned long flags;
+ u32 data;
+ int ret;
+
+ spin_lock_irqsave(&pmic->lock, flags);
+ data = hi6421_spmi_pmic_read(pmic, reg) & ~mask;
+ data |= mask & bits;
+ ret = hi6421_spmi_pmic_write(pmic, reg, data);
+ spin_unlock_irqrestore(&pmic->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(hi6421_spmi_pmic_rmw);
+
+static irqreturn_t hi6421_spmi_irq_handler(int irq, void *data)
+{
+ struct hi6421_spmi_pmic *pmic = (struct hi6421_spmi_pmic *)data;
+ unsigned long pending;
+ int i, offset;
+
+ for (i = 0; i < HISI_IRQ_ARRAY; i++) {
+ pending = hi6421_spmi_pmic_read(pmic, (i + SOC_PMIC_IRQ0_ADDR));
+ pending &= HISI_MASK_FIELD;
+ if (pending != 0)
+ pr_debug("pending[%d]=0x%lx\n\r", i, pending);
+
+ hi6421_spmi_pmic_write(pmic, (i + SOC_PMIC_IRQ0_ADDR), pending);
+
+ /* solve powerkey order */
+ if ((i == HISI_IRQ_KEY_NUM) &&
+ ((pending & HISI_IRQ_KEY_VALUE) == HISI_IRQ_KEY_VALUE)) {
+ generic_handle_irq(pmic->irqs[HISI_IRQ_KEY_DOWN]);
+ generic_handle_irq(pmic->irqs[HISI_IRQ_KEY_UP]);
+ pending &= (~HISI_IRQ_KEY_VALUE);
+ }
+
+ if (pending) {
+ for_each_set_bit(offset, &pending, HISI_BITS)
+ generic_handle_irq(pmic->irqs[offset + i * HISI_BITS]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void hi6421_spmi_irq_mask(struct irq_data *d)
+{
+ struct hi6421_spmi_pmic *pmic = irq_data_get_irq_chip_data(d);
+ u32 data, offset;
+ unsigned long flags;
+
+ offset = (irqd_to_hwirq(d) >> 3);
+ offset += SOC_PMIC_IRQ_MASK_0_ADDR;
+
+ spin_lock_irqsave(&pmic->lock, flags);
+ data = hi6421_spmi_pmic_read(pmic, offset);
+ data |= (1 << (irqd_to_hwirq(d) & 0x07));
+ hi6421_spmi_pmic_write(pmic, offset, data);
+ spin_unlock_irqrestore(&pmic->lock, flags);
+}
+
+static void hi6421_spmi_irq_unmask(struct irq_data *d)
+{
+ struct hi6421_spmi_pmic *pmic = irq_data_get_irq_chip_data(d);
+ u32 data, offset;
+ unsigned long flags;
+
+ offset = (irqd_to_hwirq(d) >> 3);
+ offset += SOC_PMIC_IRQ_MASK_0_ADDR;
+
+ spin_lock_irqsave(&pmic->lock, flags);
+ data = hi6421_spmi_pmic_read(pmic, offset);
+ data &= ~(1 << (irqd_to_hwirq(d) & 0x07));
+ hi6421_spmi_pmic_write(pmic, offset, data);
+ spin_unlock_irqrestore(&pmic->lock, flags);
+}
+
+static struct irq_chip hi6421_spmi_pmu_irqchip = {
+ .name = "hisi-irq",
+ .irq_mask = hi6421_spmi_irq_mask,
+ .irq_unmask = hi6421_spmi_irq_unmask,
+ .irq_disable = hi6421_spmi_irq_mask,
+ .irq_enable = hi6421_spmi_irq_unmask,
+};
+
+static int hi6421_spmi_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct hi6421_spmi_pmic *pmic = d->host_data;
+
+ irq_set_chip_and_handler_name(virq, &hi6421_spmi_pmu_irqchip,
+ handle_simple_irq, "hisi");
+ irq_set_chip_data(virq, pmic);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static const struct irq_domain_ops hi6421_spmi_domain_ops = {
+ .map = hi6421_spmi_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void hi6421_spmi_pmic_irq_prc(struct hi6421_spmi_pmic *pmic)
+{
+ int i, pending;
+
+ for (i = 0 ; i < HISI_IRQ_ARRAY; i++)
+ hi6421_spmi_pmic_write(pmic, SOC_PMIC_IRQ_MASK_0_ADDR + i,
+ HISI_MASK_STATE);
+
+ for (i = 0 ; i < HISI_IRQ_ARRAY; i++) {
+ pending = hi6421_spmi_pmic_read(pmic, SOC_PMIC_IRQ0_ADDR + i);
+
+ pr_debug("PMU IRQ address value:irq[0x%x] = 0x%x\n",
+ SOC_PMIC_IRQ0_ADDR + i, pending);
+ hi6421_spmi_pmic_write(pmic, SOC_PMIC_IRQ0_ADDR + i,
+ HISI_MASK_STATE);
+ }
+}
+
+static int hi6421_spmi_pmic_probe(struct spmi_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct hi6421_spmi_pmic *pmic;
+ unsigned int virq;
+ int ret, i;
+
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ spin_lock_init(&pmic->lock);
+
+ pmic->dev = dev;
+
+ pmic->gpio = of_get_gpio(np, 0);
+ if (pmic->gpio < 0)
+ return pmic->gpio;
+
+ if (!gpio_is_valid(pmic->gpio))
+ return -EINVAL;
+
+ ret = devm_gpio_request_one(dev, pmic->gpio, GPIOF_IN, "pmic");
+ if (ret < 0) {
+ dev_err(dev, "failed to request gpio%d\n", pmic->gpio);
+ return ret;
+ }
+
+ pmic->irq = gpio_to_irq(pmic->gpio);
+
+ hi6421_spmi_pmic_irq_prc(pmic);
+
+ pmic->irqs = devm_kzalloc(dev, HISI_IRQ_NUM * sizeof(int), GFP_KERNEL);
+ if (!pmic->irqs)
+ goto irq_malloc;
+
+ pmic->domain = irq_domain_add_simple(np, HISI_IRQ_NUM, 0,
+ &hi6421_spmi_domain_ops, pmic);
+ if (!pmic->domain) {
+ dev_err(dev, "failed irq domain add simple!\n");
+ ret = -ENODEV;
+ goto irq_malloc;
+ }
+
+ for (i = 0; i < HISI_IRQ_NUM; i++) {
+ virq = irq_create_mapping(pmic->domain, i);
+ if (!virq) {
+ dev_err(dev, "Failed mapping hwirq\n");
+ ret = -ENOSPC;
+ goto irq_malloc;
+ }
+ pmic->irqs[i] = virq;
+ dev_dbg(dev, "%s: pmic->irqs[%d] = %d\n",
+ __func__, i, pmic->irqs[i]);
+ }
+
+ ret = request_threaded_irq(pmic->irq, hi6421_spmi_irq_handler, NULL,
+ IRQF_TRIGGER_LOW | IRQF_SHARED | IRQF_NO_SUSPEND,
+ "pmic", pmic);
+ if (ret < 0) {
+ dev_err(dev, "could not claim pmic IRQ: error %d\n", ret);
+ goto irq_malloc;
+ }
+
+ dev_set_drvdata(&pdev->dev, pmic);
+
+ /*
+ * The logic below will rely that the pmic is already stored at
+ * drvdata.
+ */
+ dev_dbg(&pdev->dev, "SPMI-PMIC: adding children for %pOF\n",
+ pdev->dev.of_node);
+ ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
+ hi6421v600_devs, ARRAY_SIZE(hi6421v600_devs),
+ NULL, 0, NULL);
+ if (!ret)
+ return 0;
+
+ dev_err(dev, "Failed to add child devices: %d\n", ret);
+
+irq_malloc:
+ free_irq(pmic->irq, pmic);
+
+ return ret;
+}
+
+static void hi6421_spmi_pmic_remove(struct spmi_device *pdev)
+{
+ struct hi6421_spmi_pmic *pmic = dev_get_drvdata(&pdev->dev);
+
+ free_irq(pmic->irq, pmic);
+}
+
+static const struct of_device_id pmic_spmi_id_table[] = {
+ { .compatible = "hisilicon,hi6421-spmi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pmic_spmi_id_table);
+
+static struct spmi_driver hi6421_spmi_pmic_driver = {
+ .driver = {
+ .name = "hi6421-spmi-pmic",
+ .of_match_table = pmic_spmi_id_table,
+ },
+ .probe = hi6421_spmi_pmic_probe,
+ .remove = hi6421_spmi_pmic_remove,
+};
+module_spmi_driver(hi6421_spmi_pmic_driver);
+
+MODULE_DESCRIPTION("HiSilicon Hi6421v600 SPMI PMIC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/hikey9xx/hi6421v600-regulator.c b/drivers/staging/hikey9xx/hi6421v600-regulator.c
new file mode 100644
index 000000000000..614b03c9ddfb
--- /dev/null
+++ b/drivers/staging/hikey9xx/hi6421v600-regulator.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device driver for regulators in Hisi IC
+ *
+ * Copyright (c) 2013 Linaro Ltd.
+ * Copyright (c) 2011 Hisilicon.
+ *
+ * Guodong Xu <guodong.xu@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mfd/hi6421-spmi-pmic.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+
+#define rdev_dbg(rdev, fmt, arg...) \
+ pr_debug("%s: %s: " fmt, (rdev)->desc->name, __func__, ##arg)
+
+struct hi6421v600_regulator {
+ struct regulator_desc rdesc;
+ struct hi6421_spmi_pmic *pmic;
+ u32 eco_mode_mask, eco_uA;
+};
+
+static DEFINE_MUTEX(enable_mutex);
+
+/*
+ * helper function to ensure when it returns it is at least 'delay_us'
+ * microseconds after 'since'.
+ */
+
+static int hi6421_spmi_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_pmic *pmic = sreg->pmic;
+ u32 reg_val;
+
+ reg_val = hi6421_spmi_pmic_read(pmic, rdev->desc->enable_reg);
+
+ rdev_dbg(rdev,
+ "enable_reg=0x%x, val= 0x%x, enable_state=%d\n",
+ rdev->desc->enable_reg,
+ reg_val, (reg_val & rdev->desc->enable_mask));
+
+ return ((reg_val & rdev->desc->enable_mask) != 0);
+}
+
+static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev)
+{
+ struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_pmic *pmic = sreg->pmic;
+
+ /* cannot enable more than one regulator at one time */
+ mutex_lock(&enable_mutex);
+ usleep_range(HISI_REGS_ENA_PROTECT_TIME,
+ HISI_REGS_ENA_PROTECT_TIME + 1000);
+
+ /* set enable register */
+ rdev_dbg(rdev,
+ "off_on_delay=%d us, enable_reg=0x%x, enable_mask=0x%x\n",
+ rdev->desc->off_on_delay, rdev->desc->enable_reg,
+ rdev->desc->enable_mask);
+
+ hi6421_spmi_pmic_rmw(pmic, rdev->desc->enable_reg,
+ rdev->desc->enable_mask,
+ rdev->desc->enable_mask);
+
+ mutex_unlock(&enable_mutex);
+
+ return 0;
+}
+
+static int hi6421_spmi_regulator_disable(struct regulator_dev *rdev)
+{
+ struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_pmic *pmic = sreg->pmic;
+
+ /* set enable register to 0 */
+ rdev_dbg(rdev, "enable_reg=0x%x, enable_mask=0x%x\n",
+ rdev->desc->enable_reg, rdev->desc->enable_mask);
+
+ hi6421_spmi_pmic_rmw(pmic, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, 0);
+
+ return 0;
+}
+
+static int hi6421_spmi_regulator_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_pmic *pmic = sreg->pmic;
+ u32 reg_val, selector;
+
+ /* get voltage selector */
+ reg_val = hi6421_spmi_pmic_read(pmic, rdev->desc->vsel_reg);
+
+ selector = (reg_val & rdev->desc->vsel_mask) >> (ffs(rdev->desc->vsel_mask) - 1);
+
+ rdev_dbg(rdev,
+ "vsel_reg=0x%x, value=0x%x, entry=0x%x, voltage=%d mV\n",
+ rdev->desc->vsel_reg, reg_val, selector,
+ rdev->desc->ops->list_voltage(rdev, selector) / 1000);
+
+ return selector;
+}
+
+static int hi6421_spmi_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_pmic *pmic = sreg->pmic;
+ u32 reg_val;
+
+ if (unlikely(selector >= rdev->desc->n_voltages))
+ return -EINVAL;
+
+ reg_val = selector << (ffs(rdev->desc->vsel_mask) - 1);
+
+ /* set voltage selector */
+ rdev_dbg(rdev,
+ "vsel_reg=0x%x, mask=0x%x, value=0x%x, voltage=%d mV\n",
+ rdev->desc->vsel_reg, rdev->desc->vsel_mask, reg_val,
+ rdev->desc->ops->list_voltage(rdev, selector) / 1000);
+
+ hi6421_spmi_pmic_rmw(pmic, rdev->desc->vsel_reg,
+ rdev->desc->vsel_mask, reg_val);
+
+ return 0;
+}
+
+static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_pmic *pmic = sreg->pmic;
+ unsigned int mode;
+ u32 reg_val;
+
+ reg_val = hi6421_spmi_pmic_read(pmic, rdev->desc->enable_reg);
+
+ if (reg_val & sreg->eco_mode_mask)
+ mode = REGULATOR_MODE_IDLE;
+ else
+ mode = REGULATOR_MODE_NORMAL;
+
+ rdev_dbg(rdev,
+ "enable_reg=0x%x, eco_mode_mask=0x%x, reg_val=0x%x, %s mode\n",
+ rdev->desc->enable_reg, sreg->eco_mode_mask, reg_val,
+ mode == REGULATOR_MODE_IDLE ? "idle" : "normal");
+
+ return mode;
+}
+
+static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+ struct hi6421_spmi_pmic *pmic = sreg->pmic;
+ u32 val;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = 0;
+ break;
+ case REGULATOR_MODE_IDLE:
+ val = sreg->eco_mode_mask << (ffs(sreg->eco_mode_mask) - 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set mode */
+ rdev_dbg(rdev, "enable_reg=0x%x, eco_mode_mask=0x%x, value=0x%x\n",
+ rdev->desc->enable_reg, sreg->eco_mode_mask, val);
+
+ hi6421_spmi_pmic_rmw(pmic, rdev->desc->enable_reg,
+ sreg->eco_mode_mask, val);
+
+ return 0;
+}
+
+static unsigned int
+hi6421_spmi_regulator_get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV,
+ int load_uA)
+{
+ struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+
+ if (load_uA || ((unsigned int)load_uA > sreg->eco_uA))
+ return REGULATOR_MODE_NORMAL;
+
+ return REGULATOR_MODE_IDLE;
+}
+
+static int hi6421_spmi_dt_parse(struct platform_device *pdev,
+ struct hi6421v600_regulator *sreg,
+ struct regulator_desc *rdesc)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int *v_table;
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &rdesc->enable_reg);
+ if (ret) {
+ dev_err(dev, "missing reg property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "vsel-reg", &rdesc->vsel_reg);
+ if (ret) {
+ dev_err(dev, "missing vsel-reg property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "enable-mask", &rdesc->enable_mask);
+ if (ret) {
+ dev_err(dev, "missing enable-mask property\n");
+ return ret;
+ }
+
+ /*
+ * Not all regulators work on idle mode
+ */
+ ret = of_property_read_u32(np, "idle-mode-mask", &sreg->eco_mode_mask);
+ if (ret) {
+ dev_dbg(dev, "LDO doesn't support economy mode.\n");
+ sreg->eco_mode_mask = 0;
+ sreg->eco_uA = 0;
+ } else {
+ ret = of_property_read_u32(np, "eco-microamp", &sreg->eco_uA);
+ if (ret) {
+ dev_err(dev, "missing eco-microamp property\n");
+ return ret;
+ }
+ }
+
+ /* parse .off-on-delay */
+ ret = of_property_read_u32(np, "off-on-delay-us",
+ &rdesc->off_on_delay);
+ if (ret) {
+ dev_err(dev, "missing off-on-delay-us property\n");
+ return ret;
+ }
+
+ /* parse .enable_time */
+ ret = of_property_read_u32(np, "startup-delay-us",
+ &rdesc->enable_time);
+ if (ret) {
+ dev_err(dev, "missing startup-delay-us property\n");
+ return ret;
+ }
+
+ /* FIXME: are there a better value for this? */
+ rdesc->ramp_delay = rdesc->enable_time;
+
+ /* parse volt_table */
+
+ rdesc->n_voltages = of_property_count_u32_elems(np, "voltage-table");
+
+ v_table = devm_kzalloc(dev, sizeof(unsigned int) * rdesc->n_voltages,
+ GFP_KERNEL);
+ if (unlikely(!v_table))
+ return -ENOMEM;
+ rdesc->volt_table = v_table;
+
+ ret = of_property_read_u32_array(np, "voltage-table",
+ v_table, rdesc->n_voltages);
+ if (ret) {
+ dev_err(dev, "missing voltage-table property\n");
+ return ret;
+ }
+
+ /*
+ * Instead of explicitly requiring a mask for the voltage selector,
+ * as they all start from bit zero (at least on the known LDOs),
+ * just use the number of voltages at the voltage table, getting the
+ * minimal mask that would pick everything.
+ */
+ rdesc->vsel_mask = (1 << (fls(rdesc->n_voltages) - 1)) - 1;
+
+ dev_dbg(dev, "voltage selector settings: reg: 0x%x, mask: 0x%x\n",
+ rdesc->vsel_reg, rdesc->vsel_mask);
+
+ return 0;
+}
+
+static const struct regulator_ops hi6421_spmi_ldo_rops = {
+ .is_enabled = hi6421_spmi_regulator_is_enabled,
+ .enable = hi6421_spmi_regulator_enable,
+ .disable = hi6421_spmi_regulator_disable,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .get_voltage_sel = hi6421_spmi_regulator_get_voltage_sel,
+ .set_voltage_sel = hi6421_spmi_regulator_set_voltage_sel,
+ .get_mode = hi6421_spmi_regulator_get_mode,
+ .set_mode = hi6421_spmi_regulator_set_mode,
+ .get_optimum_mode = hi6421_spmi_regulator_get_optimum_mode,
+};
+
+static int hi6421_spmi_regulator_probe_ldo(struct platform_device *pdev,
+ struct device_node *np,
+ struct hi6421_spmi_pmic *pmic)
+{
+ struct regulation_constraints *constraint;
+ struct regulator_init_data *initdata;
+ struct regulator_config config = { };
+ struct hi6421v600_regulator *sreg;
+ struct device *dev = &pdev->dev;
+ struct regulator_desc *rdesc;
+ struct regulator_dev *rdev;
+ const char *supplyname;
+ int ret;
+
+ initdata = of_get_regulator_init_data(dev, np, NULL);
+ if (!initdata) {
+ dev_err(dev, "failed to get regulator data\n");
+ return -EINVAL;
+ }
+
+ sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL);
+ if (!sreg)
+ return -ENOMEM;
+
+ sreg->pmic = pmic;
+ rdesc = &sreg->rdesc;
+
+ rdesc->name = initdata->constraints.name;
+ rdesc->ops = &hi6421_spmi_ldo_rops;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->min_uV = initdata->constraints.min_uV;
+
+ supplyname = of_get_property(np, "supply_name", NULL);
+ if (supplyname)
+ initdata->supply_regulator = supplyname;
+
+ /* parse device tree data for regulator specific */
+ ret = hi6421_spmi_dt_parse(pdev, sreg, rdesc);
+ if (ret)
+ return ret;
+
+ /* hisi regulator supports two modes */
+ constraint = &initdata->constraints;
+
+ constraint->valid_modes_mask = REGULATOR_MODE_NORMAL;
+ if (sreg->eco_mode_mask) {
+ constraint->valid_modes_mask |= REGULATOR_MODE_IDLE;
+ constraint->valid_ops_mask |= REGULATOR_CHANGE_MODE;
+ }
+
+ config.dev = &pdev->dev;
+ config.init_data = initdata;
+ config.driver_data = sreg;
+ config.of_node = pdev->dev.of_node;
+
+ /* register regulator */
+ rdev = regulator_register(rdesc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "failed to register %s\n",
+ rdesc->name);
+ return PTR_ERR(rdev);
+ }
+
+ rdev_dbg(rdev, "valid_modes_mask: 0x%x, valid_ops_mask: 0x%x\n",
+ constraint->valid_modes_mask, constraint->valid_ops_mask);
+
+ dev_set_drvdata(dev, rdev);
+
+ return 0;
+}
+
+static int hi6421_spmi_regulator_probe(struct platform_device *pdev)
+{
+ struct device *pmic_dev = pdev->dev.parent;
+ struct device_node *np = pmic_dev->of_node;
+ struct device_node *regulators, *child;
+ struct platform_device *new_pdev;
+ struct hi6421_spmi_pmic *pmic;
+ int ret;
+
+ /*
+ * This driver is meant to be called by hi6421-spmi-core,
+ * which should first set drvdata. If this doesn't happen, hit
+ * a warn on and return.
+ */
+ pmic = dev_get_drvdata(pmic_dev);
+ if (WARN_ON(!pmic))
+ return -ENODEV;
+
+ regulators = of_get_child_by_name(np, "regulators");
+ if (!regulators) {
+ dev_err(&pdev->dev, "regulator node not found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Parse all LDO regulator nodes
+ */
+ for_each_child_of_node(regulators, child) {
+ dev_dbg(&pdev->dev, "adding child %pOF\n", child);
+
+ new_pdev = platform_device_alloc(child->name, -1);
+ new_pdev->dev.parent = pmic_dev;
+ new_pdev->dev.of_node = of_node_get(child);
+
+ ret = platform_device_add(new_pdev);
+ if (ret < 0) {
+ platform_device_put(new_pdev);
+ continue;
+ }
+
+ ret = hi6421_spmi_regulator_probe_ldo(new_pdev, child, pmic);
+ if (ret < 0)
+ platform_device_put(new_pdev);
+ }
+
+ of_node_put(regulators);
+
+ return 0;
+}
+
+static int hi6421_spmi_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(&pdev->dev);
+ struct hi6421v600_regulator *sreg = rdev_get_drvdata(rdev);
+
+ regulator_unregister(rdev);
+
+ if (rdev->desc->volt_table)
+ devm_kfree(&pdev->dev, (unsigned int *)rdev->desc->volt_table);
+
+ kfree(sreg);
+
+ return 0;
+}
+
+static const struct platform_device_id hi6421v600_regulator_table[] = {
+ { .name = "hi6421v600-regulator" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, hi6421v600_regulator_table);
+
+static struct platform_driver hi6421v600_regulator_driver = {
+ .id_table = hi6421v600_regulator_table,
+ .driver = {
+ .name = "hi6421v600-regulator",
+ },
+ .probe = hi6421_spmi_regulator_probe,
+ .remove = hi6421_spmi_regulator_remove,
+};
+module_platform_driver(hi6421v600_regulator_driver);
+
+MODULE_DESCRIPTION("Hi6421v600 regulator driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/staging/hikey9xx/hisi-spmi-controller.c b/drivers/staging/hikey9xx/hisi-spmi-controller.c
new file mode 100644
index 000000000000..f831c43f4783
--- /dev/null
+++ b/drivers/staging/hikey9xx/hisi-spmi-controller.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+
+/*
+ * SPMI register addr
+ */
+#define SPMI_CHANNEL_OFFSET 0x0300
+#define SPMI_SLAVE_OFFSET 0x20
+
+#define SPMI_APB_SPMI_CMD_BASE_ADDR 0x0100
+
+#define SPMI_APB_SPMI_WDATA0_BASE_ADDR 0x0104
+#define SPMI_APB_SPMI_WDATA1_BASE_ADDR 0x0108
+#define SPMI_APB_SPMI_WDATA2_BASE_ADDR 0x010c
+#define SPMI_APB_SPMI_WDATA3_BASE_ADDR 0x0110
+
+#define SPMI_APB_SPMI_STATUS_BASE_ADDR 0x0200
+
+#define SPMI_APB_SPMI_RDATA0_BASE_ADDR 0x0204
+#define SPMI_APB_SPMI_RDATA1_BASE_ADDR 0x0208
+#define SPMI_APB_SPMI_RDATA2_BASE_ADDR 0x020c
+#define SPMI_APB_SPMI_RDATA3_BASE_ADDR 0x0210
+
+#define SPMI_PER_DATAREG_BYTE 4
+/*
+ * SPMI cmd register
+ */
+#define SPMI_APB_SPMI_CMD_EN BIT(31)
+#define SPMI_APB_SPMI_CMD_TYPE_OFFSET 24
+#define SPMI_APB_SPMI_CMD_LENGTH_OFFSET 20
+#define SPMI_APB_SPMI_CMD_SLAVEID_OFFSET 16
+#define SPMI_APB_SPMI_CMD_ADDR_OFFSET 0
+
+/* Command Opcodes */
+
+enum spmi_controller_cmd_op_code {
+ SPMI_CMD_REG_ZERO_WRITE = 0,
+ SPMI_CMD_REG_WRITE = 1,
+ SPMI_CMD_REG_READ = 2,
+ SPMI_CMD_EXT_REG_WRITE = 3,
+ SPMI_CMD_EXT_REG_READ = 4,
+ SPMI_CMD_EXT_REG_WRITE_L = 5,
+ SPMI_CMD_EXT_REG_READ_L = 6,
+ SPMI_CMD_REG_RESET = 7,
+ SPMI_CMD_REG_SLEEP = 8,
+ SPMI_CMD_REG_SHUTDOWN = 9,
+ SPMI_CMD_REG_WAKEUP = 10,
+};
+
+/*
+ * SPMI status register
+ */
+#define SPMI_APB_TRANS_DONE BIT(0)
+#define SPMI_APB_TRANS_FAIL BIT(2)
+
+/* Command register fields */
+#define SPMI_CONTROLLER_CMD_MAX_BYTE_COUNT 16
+
+/* Maximum number of support PMIC peripherals */
+#define SPMI_CONTROLLER_TIMEOUT_US 1000
+#define SPMI_CONTROLLER_MAX_TRANS_BYTES 16
+
+struct spmi_controller_dev {
+ struct spmi_controller *controller;
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t lock;
+ u32 channel;
+};
+
+static int spmi_controller_wait_for_done(struct device *dev,
+ struct spmi_controller_dev *ctrl_dev,
+ void __iomem *base, u8 sid, u16 addr)
+{
+ u32 timeout = SPMI_CONTROLLER_TIMEOUT_US;
+ u32 status, offset;
+
+ offset = SPMI_APB_SPMI_STATUS_BASE_ADDR;
+ offset += SPMI_CHANNEL_OFFSET * ctrl_dev->channel + SPMI_SLAVE_OFFSET * sid;
+
+ do {
+ status = readl(base + offset);
+
+ if (status & SPMI_APB_TRANS_DONE) {
+ if (status & SPMI_APB_TRANS_FAIL) {
+ dev_err(dev, "%s: transaction failed (0x%x)\n",
+ __func__, status);
+ return -EIO;
+ }
+ dev_dbg(dev, "%s: status 0x%x\n", __func__, status);
+ return 0;
+ }
+ udelay(1);
+ } while (timeout--);
+
+ dev_err(dev, "%s: timeout, status 0x%x\n", __func__, status);
+ return -ETIMEDOUT;
+}
+
+static int spmi_read_cmd(struct spmi_controller *ctrl,
+ u8 opc, u8 slave_id, u16 slave_addr, u8 *__buf, size_t bc)
+{
+ struct spmi_controller_dev *spmi_controller = dev_get_drvdata(&ctrl->dev);
+ u32 chnl_ofst = SPMI_CHANNEL_OFFSET * spmi_controller->channel;
+ unsigned long flags;
+ u8 *buf = __buf;
+ u32 cmd, data;
+ int rc;
+ u8 op_code, i;
+
+ if (bc > SPMI_CONTROLLER_MAX_TRANS_BYTES) {
+ dev_err(&ctrl->dev,
+ "spmi_controller supports 1..%d bytes per trans, but:%zu requested\n",
+ SPMI_CONTROLLER_MAX_TRANS_BYTES, bc);
+ return -EINVAL;
+ }
+
+ switch (opc) {
+ case SPMI_CMD_READ:
+ op_code = SPMI_CMD_REG_READ;
+ break;
+ case SPMI_CMD_EXT_READ:
+ op_code = SPMI_CMD_EXT_REG_READ;
+ break;
+ case SPMI_CMD_EXT_READL:
+ op_code = SPMI_CMD_EXT_REG_READ_L;
+ break;
+ default:
+ dev_err(&ctrl->dev, "invalid read cmd 0x%x\n", opc);
+ return -EINVAL;
+ }
+
+ cmd = SPMI_APB_SPMI_CMD_EN |
+ (op_code << SPMI_APB_SPMI_CMD_TYPE_OFFSET) |
+ ((bc - 1) << SPMI_APB_SPMI_CMD_LENGTH_OFFSET) |
+ ((slave_id & 0xf) << SPMI_APB_SPMI_CMD_SLAVEID_OFFSET) | /* slvid */
+ ((slave_addr & 0xffff) << SPMI_APB_SPMI_CMD_ADDR_OFFSET); /* slave_addr */
+
+ spin_lock_irqsave(&spmi_controller->lock, flags);
+
+ writel(cmd, spmi_controller->base + chnl_ofst + SPMI_APB_SPMI_CMD_BASE_ADDR);
+
+ rc = spmi_controller_wait_for_done(&ctrl->dev, spmi_controller,
+ spmi_controller->base, slave_id, slave_addr);
+ if (rc)
+ goto done;
+
+ for (i = 0; bc > i * SPMI_PER_DATAREG_BYTE; i++) {
+ data = readl(spmi_controller->base + chnl_ofst +
+ SPMI_SLAVE_OFFSET * slave_id +
+ SPMI_APB_SPMI_RDATA0_BASE_ADDR +
+ i * SPMI_PER_DATAREG_BYTE);
+ data = be32_to_cpu((__be32)data);
+ if ((bc - i * SPMI_PER_DATAREG_BYTE) >> 2) {
+ memcpy(buf, &data, sizeof(data));
+ buf += sizeof(data);
+ } else {
+ memcpy(buf, &data, bc % SPMI_PER_DATAREG_BYTE);
+ buf += (bc % SPMI_PER_DATAREG_BYTE);
+ }
+ }
+
+done:
+ spin_unlock_irqrestore(&spmi_controller->lock, flags);
+ if (rc)
+ dev_err(&ctrl->dev,
+ "spmi read wait timeout op:0x%x slave_id:%d slave_addr:0x%x bc:%zu\n",
+ opc, slave_id, slave_addr, bc + 1);
+ else
+ dev_dbg(&ctrl->dev, "%s: id:%d slave_addr:0x%x, read value: %*ph\n",
+ __func__, slave_id, slave_addr, (int)bc, __buf);
+
+ return rc;
+}
+
+static int spmi_write_cmd(struct spmi_controller *ctrl,
+ u8 opc, u8 slave_id, u16 slave_addr, const u8 *__buf, size_t bc)
+{
+ struct spmi_controller_dev *spmi_controller = dev_get_drvdata(&ctrl->dev);
+ u32 chnl_ofst = SPMI_CHANNEL_OFFSET * spmi_controller->channel;
+ const u8 *buf = __buf;
+ unsigned long flags;
+ u32 cmd, data;
+ int rc;
+ u8 op_code, i;
+
+ if (bc > SPMI_CONTROLLER_MAX_TRANS_BYTES) {
+ dev_err(&ctrl->dev,
+ "spmi_controller supports 1..%d bytes per trans, but:%zu requested\n",
+ SPMI_CONTROLLER_MAX_TRANS_BYTES, bc);
+ return -EINVAL;
+ }
+
+ switch (opc) {
+ case SPMI_CMD_WRITE:
+ op_code = SPMI_CMD_REG_WRITE;
+ break;
+ case SPMI_CMD_EXT_WRITE:
+ op_code = SPMI_CMD_EXT_REG_WRITE;
+ break;
+ case SPMI_CMD_EXT_WRITEL:
+ op_code = SPMI_CMD_EXT_REG_WRITE_L;
+ break;
+ default:
+ dev_err(&ctrl->dev, "invalid write cmd 0x%x\n", opc);
+ return -EINVAL;
+ }
+
+ cmd = SPMI_APB_SPMI_CMD_EN |
+ (op_code << SPMI_APB_SPMI_CMD_TYPE_OFFSET) |
+ ((bc - 1) << SPMI_APB_SPMI_CMD_LENGTH_OFFSET) |
+ ((slave_id & 0xf) << SPMI_APB_SPMI_CMD_SLAVEID_OFFSET) |
+ ((slave_addr & 0xffff) << SPMI_APB_SPMI_CMD_ADDR_OFFSET);
+
+ /* Write data to FIFOs */
+ spin_lock_irqsave(&spmi_controller->lock, flags);
+
+ for (i = 0; bc > i * SPMI_PER_DATAREG_BYTE; i++) {
+ data = 0;
+ if ((bc - i * SPMI_PER_DATAREG_BYTE) >> 2) {
+ memcpy(&data, buf, sizeof(data));
+ buf += sizeof(data);
+ } else {
+ memcpy(&data, buf, bc % SPMI_PER_DATAREG_BYTE);
+ buf += (bc % SPMI_PER_DATAREG_BYTE);
+ }
+
+ writel((u32)cpu_to_be32(data),
+ spmi_controller->base + chnl_ofst +
+ SPMI_APB_SPMI_WDATA0_BASE_ADDR +
+ SPMI_PER_DATAREG_BYTE * i);
+ }
+
+ /* Start the transaction */
+ writel(cmd, spmi_controller->base + chnl_ofst + SPMI_APB_SPMI_CMD_BASE_ADDR);
+
+ rc = spmi_controller_wait_for_done(&ctrl->dev, spmi_controller,
+ spmi_controller->base, slave_id,
+ slave_addr);
+ spin_unlock_irqrestore(&spmi_controller->lock, flags);
+
+ if (rc)
+ dev_err(&ctrl->dev, "spmi write wait timeout op:0x%x slave_id:%d slave_addr:0x%x bc:%zu\n",
+ opc, slave_id, slave_addr, bc);
+ else
+ dev_dbg(&ctrl->dev, "%s: id:%d slave_addr:0x%x, wrote value: %*ph\n",
+ __func__, slave_id, slave_addr, (int)bc, __buf);
+
+ return rc;
+}
+
+static int spmi_controller_probe(struct platform_device *pdev)
+{
+ struct spmi_controller_dev *spmi_controller;
+ struct spmi_controller *ctrl;
+ struct resource *iores;
+ int ret;
+
+ ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*spmi_controller));
+ if (!ctrl) {
+ dev_err(&pdev->dev, "can not allocate spmi_controller data\n");
+ return -ENOMEM;
+ }
+ spmi_controller = spmi_controller_get_drvdata(ctrl);
+ spmi_controller->controller = ctrl;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores) {
+ dev_err(&pdev->dev, "can not get resource!\n");
+ return -EINVAL;
+ }
+
+ spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
+ resource_size(iores));
+ if (!spmi_controller->base) {
+ dev_err(&pdev->dev, "can not remap base addr!\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "spmi-channel",
+ &spmi_controller->channel);
+ if (ret) {
+ dev_err(&pdev->dev, "can not get channel\n");
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, spmi_controller);
+ dev_set_drvdata(&ctrl->dev, spmi_controller);
+
+ spin_lock_init(&spmi_controller->lock);
+
+ ctrl->nr = spmi_controller->channel;
+ ctrl->dev.parent = pdev->dev.parent;
+ ctrl->dev.of_node = of_node_get(pdev->dev.of_node);
+
+ /* Callbacks */
+ ctrl->read_cmd = spmi_read_cmd;
+ ctrl->write_cmd = spmi_write_cmd;
+
+ ret = spmi_controller_add(ctrl);
+ if (ret)
+ dev_err(&pdev->dev, "spmi_add_controller failed with error %d!\n", ret);
+
+ return ret;
+}
+
+static int spmi_del_controller(struct platform_device *pdev)
+{
+ struct spmi_controller *ctrl = platform_get_drvdata(pdev);
+
+ spmi_controller_remove(ctrl);
+ kfree(ctrl);
+ return 0;
+}
+
+static const struct of_device_id spmi_controller_match_table[] = {
+ {
+ .compatible = "hisilicon,kirin970-spmi-controller",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spmi_controller_match_table);
+
+static struct platform_driver spmi_controller_driver = {
+ .probe = spmi_controller_probe,
+ .remove = spmi_del_controller,
+ .driver = {
+ .name = "hisi_spmi_controller",
+ .of_match_table = spmi_controller_match_table,
+ },
+};
+
+static int __init spmi_controller_init(void)
+{
+ return platform_driver_register(&spmi_controller_driver);
+}
+postcore_initcall(spmi_controller_init);
+
+static void __exit spmi_controller_exit(void)
+{
+ platform_driver_unregister(&spmi_controller_driver);
+}
+module_exit(spmi_controller_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:spmi_controller");
diff --git a/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml b/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
new file mode 100644
index 000000000000..80e74c261e05
--- /dev/null
+++ b/drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/hisilicon,hi6421-spmi-pmic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HiSilicon 6421v600 SPMI PMIC
+
+maintainers:
+ - Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+
+description: |
+ HiSilicon 6421v600 should be connected inside a MIPI System Power Management
+ (SPMI) bus. It provides interrupts and power supply.
+
+ The GPIO and interrupt settings are represented as part of the top-level PMIC
+ node.
+
+ The SPMI controller part is provided by
+ drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml.
+
+properties:
+ $nodename:
+ pattern: "pmic@[0-9a-f]"
+
+ compatible:
+ const: hisilicon,hi6421v600-spmi
+
+ reg:
+ maxItems: 1
+
+ '#interrupt-cells':
+ const: 2
+
+ interrupt-controller:
+ description:
+ Identify that the PMIC is capable of behaving as an interrupt controller.
+
+ gpios:
+ maxItems: 1
+
+ regulators:
+ type: object
+
+ properties:
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ '^ldo[0-9]+@[0-9a-f]$':
+ type: object
+
+ $ref: "/schemas/regulator/regulator.yaml#"
+
+ properties:
+ reg:
+ description: Enable register.
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ vsel-reg:
+ description: Voltage selector register.
+
+ enable-mask:
+ description: Bitmask used to enable the regulator.
+
+ voltage-table:
+ description: Table with the selector items for the voltage regulator.
+ minItems: 2
+ maxItems: 16
+
+ off-on-delay-us:
+ description: Time required for changing state to enabled in microseconds.
+
+ startup-delay-us:
+ description: Startup time in microseconds.
+
+ idle-mode-mask:
+ description: Bitmask used to put the regulator on idle mode.
+
+ eco-microamp:
+ description: Maximum current while on idle mode.
+
+ required:
+ - reg
+ - vsel-reg
+ - enable-mask
+ - voltage-table
+ - off-on-delay-us
+ - startup-delay-us
+
+required:
+ - compatible
+ - reg
+ - regulators
+
+examples:
+ - |
+ /* pmic properties */
+
+ pmic: pmic@0 {
+ compatible = "hisilicon,hi6421-spmi";
+ reg = <0 0>;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ gpios = <&gpio28 0 0>;
+
+ regulators {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ldo3: ldo3@16 {
+ reg = <0x16>;
+ vsel-reg = <0x51>;
+
+ regulator-name = "ldo3";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-boot-on;
+
+ enable-mask = <0x01>;
+
+ voltage-table = <1500000>, <1550000>, <1600000>, <1650000>,
+ <1700000>, <1725000>, <1750000>, <1775000>,
+ <1800000>, <1825000>, <1850000>, <1875000>,
+ <1900000>, <1925000>, <1950000>, <2000000>;
+ off-on-delay-us = <20000>;
+ startup-delay-us = <120>;
+ };
+
+ ldo4: ldo4@17 { /* 40 PIN */
+ reg = <0x17>;
+ vsel-reg = <0x52>;
+
+ regulator-name = "ldo4";
+ regulator-min-microvolt = <1725000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-boot-on;
+
+ enable-mask = <0x01>;
+ idle-mode-mask = <0x10>;
+ eco-microamp = <10000>;
+
+ hi6421-vsel = <0x52 0x07>;
+ voltage-table = <1725000>, <1750000>, <1775000>, <1800000>,
+ <1825000>, <1850000>, <1875000>, <1900000>;
+ off-on-delay-us = <20000>;
+ startup-delay-us = <120>;
+ };
+ };
+ };
diff --git a/drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml b/drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml
new file mode 100644
index 000000000000..f2a56fa4e78e
--- /dev/null
+++ b/drivers/staging/hikey9xx/hisilicon,hisi-spmi-controller.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spmi/hisilicon,hisi-spmi-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: HiSilicon SPMI controller
+
+maintainers:
+ - Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+
+description: |
+ The HiSilicon SPMI BUS controller is found on some Kirin-based designs.
+ It is a MIPI System Power Management (SPMI) controller.
+
+ The PMIC part is provided by
+ drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml.
+
+properties:
+ $nodename:
+ pattern: "spmi@[0-9a-f]"
+
+ compatible:
+ const: hisilicon,kirin970-spmi-controller
+
+ reg:
+ maxItems: 1
+
+ spmi-channel:
+ description: |
+ number of the Kirin 970 SPMI channel where the SPMI devices are connected.
+
+required:
+ - compatible
+ - reg
+ - spmi-channel
+
+patternProperties:
+ "^pmic@[0-9a-f]$":
+ description: |
+ PMIC properties, which are specific to the used SPMI PMIC device(s).
+ When used in combination with HiSilicon 6421v600, the properties
+ are documented at
+ drivers/staging/hikey9xx/hisilicon,hi6421-spmi-pmic.yaml.
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ spmi: spmi@fff24000 {
+ compatible = "hisilicon,kirin970-spmi-controller";
+ status = "ok";
+ reg = <0x0 0xfff24000 0x0 0x1000>;
+ spmi-channel = <2>;
+
+ pmic@0 {
+ /* pmic properties */
+ };
+ };
+ };
diff --git a/drivers/staging/hikey9xx/phy-hi3670-usb3.c b/drivers/staging/hikey9xx/phy-hi3670-usb3.c
new file mode 100644
index 000000000000..4fc013911a78
--- /dev/null
+++ b/drivers/staging/hikey9xx/phy-hi3670-usb3.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Phy provider for USB 3.1 controller on HiSilicon Kirin970 platform
+ *
+ * Copyright (C) 2017-2020 Hilisicon Electronics Co., Ltd.
+ * http://www.huawei.com
+ *
+ * Authors: Yu Chen <chenyu56@huawei.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define SCTRL_SCDEEPSLEEPED (0x0)
+#define USB_CLK_SELECTED BIT(20)
+
+#define PERI_CRG_PEREN0 (0x00)
+#define PERI_CRG_PERDIS0 (0x04)
+#define PERI_CRG_PEREN4 (0x40)
+#define PERI_CRG_PERDIS4 (0x44)
+#define PERI_CRG_PERRSTEN4 (0x90)
+#define PERI_CRG_PERRSTDIS4 (0x94)
+#define PERI_CRG_ISODIS (0x148)
+#define PERI_CRG_PEREN6 (0x410)
+#define PERI_CRG_PERDIS6 (0x414)
+
+#define USB_REFCLK_ISO_EN BIT(25)
+
+#define GT_CLK_USB2PHY_REF BIT(19)
+
+#define PCTRL_PERI_CTRL3 (0x10)
+#define PCTRL_PERI_CTRL3_MSK_START (16)
+#define USB_TCXO_EN BIT(1)
+
+#define PCTRL_PERI_CTRL24 (0x64)
+#define SC_CLK_USB3PHY_3MUX1_SEL BIT(25)
+
+#define USB3OTG_CTRL0 (0x00)
+#define USB3OTG_CTRL3 (0x0C)
+#define USB3OTG_CTRL4 (0x10)
+#define USB3OTG_CTRL5 (0x14)
+#define USB3OTG_CTRL7 (0x1C)
+#define USB_MISC_CFG50 (0x50)
+#define USB_MISC_CFG54 (0x54)
+#define USB_MISC_CFG58 (0x58)
+#define USB_MISC_CFG5C (0x5C)
+#define USB_MISC_CFGA0 (0xA0)
+#define TCA_CLK_RST (0x200)
+#define TCA_INTR_EN (0x204)
+#define TCA_INTR_STS (0x208)
+#define TCA_GCFG (0x210)
+#define TCA_TCPC (0x214)
+#define TCA_SYSMODE_CFG (0x218)
+#define TCA_VBUS_CTRL (0x240)
+
+#define CTRL0_USB3_VBUSVLD BIT(7)
+#define CTRL0_USB3_VBUSVLD_SEL BIT(6)
+
+#define CTRL3_USB2_VBUSVLDEXT0 BIT(6)
+#define CTRL3_USB2_VBUSVLDEXTSEL0 BIT(5)
+
+#define CTRL5_USB2_SIDDQ BIT(0)
+
+#define CTRL7_USB2_REFCLKSEL_MASK (3 << 3)
+#define CTRL7_USB2_REFCLKSEL_ABB (3 << 3)
+#define CTRL7_USB2_REFCLKSEL_PAD (2 << 3)
+
+#define CFG50_USB3_PHY_TEST_POWERDOWN BIT(23)
+
+#define CFG54_USB31PHY_CR_ADDR_MASK (0xFFFF)
+#define CFG54_USB31PHY_CR_ADDR_SHIFT (16)
+#define CFG54_USB3PHY_REF_USE_PAD BIT(12)
+#define CFG54_PHY0_PMA_PWR_STABLE BIT(11)
+#define CFG54_PHY0_PCS_PWR_STABLE BIT(9)
+#define CFG54_USB31PHY_CR_ACK BIT(7)
+#define CFG54_USB31PHY_CR_WR_EN BIT(5)
+#define CFG54_USB31PHY_CR_SEL BIT(4)
+#define CFG54_USB31PHY_CR_RD_EN BIT(3)
+#define CFG54_USB31PHY_CR_CLK BIT(2)
+#define CFG54_USB3_PHY0_ANA_PWR_EN BIT(1)
+
+#define CFG58_USB31PHY_CR_DATA_MASK (0xFFFF)
+#define CFG58_USB31PHY_CR_DATA_RD_START (16)
+
+#define CFG5C_USB3_PHY0_SS_MPLLA_SSC_EN BIT(1)
+
+#define CFGA0_VAUX_RESET BIT(9)
+#define CFGA0_USB31C_RESET BIT(8)
+#define CFGA0_USB2PHY_REFCLK_SELECT BIT(4)
+#define CFGA0_USB3PHY_RESET BIT(1)
+#define CFGA0_USB2PHY_POR BIT(0)
+
+#define INTR_EN_XA_TIMEOUT_EVT_EN BIT(1)
+#define INTR_EN_XA_ACK_EVT_EN BIT(0)
+
+#define CLK_RST_TCA_REF_CLK_EN BIT(1)
+#define CLK_RST_SUSPEND_CLK_EN BIT(0)
+
+#define GCFG_ROLE_HSTDEV BIT(4)
+#define GCFG_OP_MODE (3 << 0)
+#define GCFG_OP_MODE_CTRL_SYNC_MODE BIT(0)
+
+#define TCPC_VALID BIT(4)
+#define TCPC_LOW_POWER_EN BIT(3)
+#define TCPC_MUX_CONTROL_MASK (3 << 0)
+#define TCPC_MUX_CONTROL_USB31 BIT(0)
+
+#define SYSMODE_CFG_TYPEC_DISABLE BIT(3)
+
+#define VBUS_CTRL_POWERPRESENT_OVERRD (3 << 2)
+#define VBUS_CTRL_VBUSVALID_OVERRD (3 << 0)
+
+#define KIRIN970_USB_DEFAULT_PHY_PARAM (0xFDFEE4)
+#define KIRIN970_USB_DEFAULT_PHY_VBOOST (0x5)
+
+#define TX_VBOOST_LVL_REG (0xf)
+#define TX_VBOOST_LVL_START (6)
+#define TX_VBOOST_LVL_ENABLE BIT(9)
+
+struct hi3670_priv {
+ struct device *dev;
+ struct regmap *peri_crg;
+ struct regmap *pctrl;
+ struct regmap *sctrl;
+ struct regmap *usb31misc;
+
+ u32 eye_diagram_param;
+ u32 tx_vboost_lvl;
+
+ u32 peri_crg_offset;
+ u32 pctrl_offset;
+ u32 usb31misc_offset;
+};
+
+static int hi3670_phy_cr_clk(struct regmap *usb31misc)
+{
+ int ret;
+
+ /* Clock up */
+ ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
+ CFG54_USB31PHY_CR_CLK, CFG54_USB31PHY_CR_CLK);
+ if (ret)
+ return ret;
+
+ /* Clock down */
+ ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
+ CFG54_USB31PHY_CR_CLK, 0);
+
+ return ret;
+}
+
+static int hi3670_phy_cr_set_sel(struct regmap *usb31misc)
+{
+ return regmap_update_bits(usb31misc, USB_MISC_CFG54,
+ CFG54_USB31PHY_CR_SEL, CFG54_USB31PHY_CR_SEL);
+}
+
+static int hi3670_phy_cr_start(struct regmap *usb31misc, int direction)
+{
+ int ret;
+
+ if (direction)
+ ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
+ CFG54_USB31PHY_CR_WR_EN,
+ CFG54_USB31PHY_CR_WR_EN);
+ else
+ ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
+ CFG54_USB31PHY_CR_RD_EN,
+ CFG54_USB31PHY_CR_RD_EN);
+
+ if (ret)
+ return ret;
+
+ ret = hi3670_phy_cr_clk(usb31misc);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(usb31misc, USB_MISC_CFG54,
+ CFG54_USB31PHY_CR_RD_EN | CFG54_USB31PHY_CR_WR_EN, 0);
+
+ return ret;
+}
+
+static int hi3670_phy_cr_wait_ack(struct regmap *usb31misc)
+{
+ u32 reg;
+ int retry = 100000;
+ int ret;
+
+ while (retry-- > 0) {
+ ret = regmap_read(usb31misc, USB_MISC_CFG54, &reg);
+ if (ret)
+ return ret;
+ if ((reg & CFG54_USB31PHY_CR_ACK) == CFG54_USB31PHY_CR_ACK)
+ return 0;
+
+ ret = hi3670_phy_cr_clk(usb31misc);
+ if (ret)
+ return ret;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int hi3670_phy_cr_set_addr(struct regmap *usb31misc, u32 addr)
+{
+ u32 reg;
+ int ret;
+
+ ret = regmap_read(usb31misc, USB_MISC_CFG54, &reg);
+ if (ret)
+ return ret;
+
+ reg &= ~(CFG54_USB31PHY_CR_ADDR_MASK << CFG54_USB31PHY_CR_ADDR_SHIFT);
+ reg |= ((addr & CFG54_USB31PHY_CR_ADDR_MASK) << CFG54_USB31PHY_CR_ADDR_SHIFT);
+ ret = regmap_write(usb31misc, USB_MISC_CFG54, reg);
+
+ return ret;
+}
+
+static int hi3670_phy_cr_read(struct regmap *usb31misc, u32 addr, u32 *val)
+{
+ int reg;
+ int i;
+ int ret;
+
+ for (i = 0; i < 100; i++) {
+ ret = hi3670_phy_cr_clk(usb31misc);
+ if (ret)
+ return ret;
+ }
+
+ ret = hi3670_phy_cr_set_sel(usb31misc);
+ if (ret)
+ return ret;
+
+ ret = hi3670_phy_cr_set_addr(usb31misc, addr);
+ if (ret)
+ return ret;
+
+ ret = hi3670_phy_cr_start(usb31misc, 0);
+ if (ret)
+ return ret;
+
+ ret = hi3670_phy_cr_wait_ack(usb31misc);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(usb31misc, USB_MISC_CFG58, &reg);
+ if (ret)
+ return ret;
+
+ *val = (reg >> CFG58_USB31PHY_CR_DATA_RD_START) &
+ CFG58_USB31PHY_CR_DATA_MASK;
+
+ return 0;
+}
+
+static int hi3670_phy_cr_write(struct regmap *usb31misc, u32 addr, u32 val)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < 100; i++) {
+ ret = hi3670_phy_cr_clk(usb31misc);
+ if (ret)
+ return ret;
+ }
+
+ ret = hi3670_phy_cr_set_sel(usb31misc);
+ if (ret)
+ return ret;
+
+ ret = hi3670_phy_cr_set_addr(usb31misc, addr);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(usb31misc, USB_MISC_CFG58,
+ val & CFG58_USB31PHY_CR_DATA_MASK);
+ if (ret)
+ return ret;
+
+ ret = hi3670_phy_cr_start(usb31misc, 1);
+ if (ret)
+ return ret;
+
+ ret = hi3670_phy_cr_wait_ack(usb31misc);
+
+ return ret;
+}
+
+static int hi3670_phy_set_params(struct hi3670_priv *priv)
+{
+ u32 reg;
+ int ret;
+ int retry = 3;
+
+ ret = regmap_write(priv->usb31misc, USB3OTG_CTRL4,
+ priv->eye_diagram_param);
+ if (ret) {
+ dev_err(priv->dev, "set USB3OTG_CTRL4 failed\n");
+ return ret;
+ }
+
+ while (retry-- > 0) {
+ ret = hi3670_phy_cr_read(priv->usb31misc,
+ TX_VBOOST_LVL_REG, &reg);
+ if (!ret)
+ break;
+
+ if (ret != -ETIMEDOUT) {
+ dev_err(priv->dev, "read TX_VBOOST_LVL_REG failed\n");
+ return ret;
+ }
+ }
+ if (ret)
+ return ret;
+
+ reg |= (TX_VBOOST_LVL_ENABLE | (priv->tx_vboost_lvl << TX_VBOOST_LVL_START));
+ ret = hi3670_phy_cr_write(priv->usb31misc, TX_VBOOST_LVL_REG, reg);
+ if (ret)
+ dev_err(priv->dev, "write TX_VBOOST_LVL_REG failed\n");
+
+ return ret;
+}
+
+static int hi3670_is_abbclk_seleted(struct hi3670_priv *priv)
+{
+ u32 reg;
+
+ if (!priv->sctrl) {
+ dev_err(priv->dev, "priv->sctrl is null!\n");
+ return 1;
+ }
+
+ if (regmap_read(priv->sctrl, SCTRL_SCDEEPSLEEPED, &reg)) {
+ dev_err(priv->dev, "SCTRL_SCDEEPSLEEPED read failed!\n");
+ return 1;
+ }
+
+ if ((reg & USB_CLK_SELECTED) == 0)
+ return 1;
+
+ return 0;
+}
+
+static int hi3670_config_phy_clock(struct hi3670_priv *priv)
+{
+ u32 val, mask;
+ int ret;
+
+ if (hi3670_is_abbclk_seleted(priv)) {
+ /* usb refclk iso disable */
+ ret = regmap_write(priv->peri_crg, PERI_CRG_ISODIS,
+ USB_REFCLK_ISO_EN);
+ if (ret)
+ goto out;
+
+ /* enable usb_tcxo_en */
+ ret = regmap_write(priv->pctrl, PCTRL_PERI_CTRL3,
+ USB_TCXO_EN |
+ (USB_TCXO_EN << PCTRL_PERI_CTRL3_MSK_START));
+
+ /* select usbphy clk from abb */
+ mask = SC_CLK_USB3PHY_3MUX1_SEL;
+ ret = regmap_update_bits(priv->pctrl,
+ PCTRL_PERI_CTRL24, mask, 0);
+ if (ret)
+ goto out;
+
+ ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0,
+ CFGA0_USB2PHY_REFCLK_SELECT, 0);
+ if (ret)
+ goto out;
+
+ ret = regmap_read(priv->usb31misc, USB3OTG_CTRL7, &val);
+ if (ret)
+ goto out;
+ val &= ~CTRL7_USB2_REFCLKSEL_MASK;
+ val |= CTRL7_USB2_REFCLKSEL_ABB;
+ ret = regmap_write(priv->usb31misc, USB3OTG_CTRL7, val);
+ if (ret)
+ goto out;
+
+ return 0;
+ }
+
+ ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFG54,
+ CFG54_USB3PHY_REF_USE_PAD,
+ CFG54_USB3PHY_REF_USE_PAD);
+ if (ret)
+ goto out;
+
+ ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0,
+ CFGA0_USB2PHY_REFCLK_SELECT,
+ CFGA0_USB2PHY_REFCLK_SELECT);
+ if (ret)
+ goto out;
+
+ ret = regmap_read(priv->usb31misc, USB3OTG_CTRL7, &val);
+ if (ret)
+ goto out;
+ val &= ~CTRL7_USB2_REFCLKSEL_MASK;
+ val |= CTRL7_USB2_REFCLKSEL_PAD;
+ ret = regmap_write(priv->usb31misc, USB3OTG_CTRL7, val);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(priv->peri_crg,
+ PERI_CRG_PEREN6, GT_CLK_USB2PHY_REF);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ dev_err(priv->dev, "failed to config phy clock ret: %d\n", ret);
+ return ret;
+}
+
+static int hi3670_config_tca(struct hi3670_priv *priv)
+{
+ u32 val, mask;
+ int ret;
+
+ ret = regmap_write(priv->usb31misc, TCA_INTR_STS, 0xffff);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(priv->usb31misc, TCA_INTR_EN,
+ INTR_EN_XA_TIMEOUT_EVT_EN | INTR_EN_XA_ACK_EVT_EN);
+ if (ret)
+ goto out;
+
+ mask = CLK_RST_TCA_REF_CLK_EN | CLK_RST_SUSPEND_CLK_EN;
+ ret = regmap_update_bits(priv->usb31misc, TCA_CLK_RST, mask, 0);
+ if (ret)
+ goto out;
+
+ ret = regmap_update_bits(priv->usb31misc, TCA_GCFG,
+ GCFG_ROLE_HSTDEV | GCFG_OP_MODE,
+ GCFG_ROLE_HSTDEV | GCFG_OP_MODE_CTRL_SYNC_MODE);
+ if (ret)
+ goto out;
+
+ ret = regmap_update_bits(priv->usb31misc, TCA_SYSMODE_CFG,
+ SYSMODE_CFG_TYPEC_DISABLE, 0);
+ if (ret)
+ goto out;
+
+ ret = regmap_read(priv->usb31misc, TCA_TCPC, &val);
+ if (ret)
+ goto out;
+ val &= ~(TCPC_VALID | TCPC_LOW_POWER_EN | TCPC_MUX_CONTROL_MASK);
+ val |= (TCPC_VALID | TCPC_MUX_CONTROL_USB31);
+ ret = regmap_write(priv->usb31misc, TCA_TCPC, val);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(priv->usb31misc, TCA_VBUS_CTRL,
+ VBUS_CTRL_POWERPRESENT_OVERRD | VBUS_CTRL_VBUSVALID_OVERRD);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ dev_err(priv->dev, "failed to config phy clock ret: %d\n", ret);
+ return ret;
+}
+
+static int hi3670_phy_init(struct phy *phy)
+{
+ struct hi3670_priv *priv = phy_get_drvdata(phy);
+ u32 val;
+ int ret;
+
+ /* assert controller */
+ val = CFGA0_VAUX_RESET | CFGA0_USB31C_RESET |
+ CFGA0_USB3PHY_RESET | CFGA0_USB2PHY_POR;
+ ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0, val, 0);
+ if (ret)
+ goto out;
+
+ ret = hi3670_config_phy_clock(priv);
+ if (ret)
+ goto out;
+
+ /* Exit from IDDQ mode */
+ ret = regmap_update_bits(priv->usb31misc, USB3OTG_CTRL5,
+ CTRL5_USB2_SIDDQ, 0);
+ if (ret)
+ goto out;
+
+ /* Release USB31 PHY out of TestPowerDown mode */
+ ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFG50,
+ CFG50_USB3_PHY_TEST_POWERDOWN, 0);
+ if (ret)
+ goto out;
+
+ /* Deassert phy */
+ val = CFGA0_USB3PHY_RESET | CFGA0_USB2PHY_POR;
+ ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0, val, val);
+ if (ret)
+ goto out;
+
+ usleep_range(100, 120);
+
+ /* Tell the PHY power is stable */
+ val = CFG54_USB3_PHY0_ANA_PWR_EN | CFG54_PHY0_PCS_PWR_STABLE |
+ CFG54_PHY0_PMA_PWR_STABLE;
+ ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFG54,
+ val, val);
+ if (ret)
+ goto out;
+
+ ret = hi3670_config_tca(priv);
+ if (ret)
+ goto out;
+
+ /* Enable SSC */
+ ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFG5C,
+ CFG5C_USB3_PHY0_SS_MPLLA_SSC_EN,
+ CFG5C_USB3_PHY0_SS_MPLLA_SSC_EN);
+ if (ret)
+ goto out;
+
+ /* Deassert controller */
+ val = CFGA0_VAUX_RESET | CFGA0_USB31C_RESET;
+ ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0, val, val);
+ if (ret)
+ goto out;
+
+ usleep_range(100, 120);
+
+ /* Set fake vbus valid signal */
+ val = CTRL0_USB3_VBUSVLD | CTRL0_USB3_VBUSVLD_SEL;
+ ret = regmap_update_bits(priv->usb31misc, USB3OTG_CTRL0, val, val);
+ if (ret)
+ goto out;
+
+ val = CTRL3_USB2_VBUSVLDEXT0 | CTRL3_USB2_VBUSVLDEXTSEL0;
+ ret = regmap_update_bits(priv->usb31misc, USB3OTG_CTRL3, val, val);
+ if (ret)
+ goto out;
+
+ usleep_range(100, 120);
+
+ ret = hi3670_phy_set_params(priv);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ dev_err(priv->dev, "failed to init phy ret: %d\n", ret);
+ return ret;
+}
+
+static int hi3670_phy_exit(struct phy *phy)
+{
+ struct hi3670_priv *priv = phy_get_drvdata(phy);
+ u32 mask;
+ int ret;
+
+ /* Assert phy */
+ mask = CFGA0_USB3PHY_RESET | CFGA0_USB2PHY_POR;
+ ret = regmap_update_bits(priv->usb31misc, USB_MISC_CFGA0, mask, 0);
+ if (ret)
+ goto out;
+
+ if (hi3670_is_abbclk_seleted(priv)) {
+ /* disable usb_tcxo_en */
+ ret = regmap_write(priv->pctrl, PCTRL_PERI_CTRL3,
+ USB_TCXO_EN << PCTRL_PERI_CTRL3_MSK_START);
+ } else {
+ ret = regmap_write(priv->peri_crg, PERI_CRG_PERDIS6,
+ GT_CLK_USB2PHY_REF);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_err(priv->dev, "failed to exit phy ret: %d\n", ret);
+ return ret;
+}
+
+static struct phy_ops hi3670_phy_ops = {
+ .init = hi3670_phy_init,
+ .exit = hi3670_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int hi3670_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct phy *phy;
+ struct hi3670_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->peri_crg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "hisilicon,pericrg-syscon");
+ if (IS_ERR(priv->peri_crg)) {
+ dev_err(dev, "no hisilicon,pericrg-syscon\n");
+ return PTR_ERR(priv->peri_crg);
+ }
+
+ priv->pctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "hisilicon,pctrl-syscon");
+ if (IS_ERR(priv->pctrl)) {
+ dev_err(dev, "no hisilicon,pctrl-syscon\n");
+ return PTR_ERR(priv->pctrl);
+ }
+
+ priv->sctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "hisilicon,sctrl-syscon");
+ if (IS_ERR(priv->sctrl)) {
+ dev_err(dev, "no hisilicon,sctrl-syscon\n");
+ return PTR_ERR(priv->sctrl);
+ }
+
+ /* node of hi3670 phy is a sub-node of usb3_otg_bc */
+ priv->usb31misc = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(priv->usb31misc)) {
+ dev_err(dev, "no hisilicon,usb3-otg-bc-syscon\n");
+ return PTR_ERR(priv->usb31misc);
+ }
+
+ if (of_property_read_u32(dev->of_node, "hisilicon,eye-diagram-param",
+ &priv->eye_diagram_param))
+ priv->eye_diagram_param = KIRIN970_USB_DEFAULT_PHY_PARAM;
+
+ if (of_property_read_u32(dev->of_node, "hisilicon,tx-vboost-lvl",
+ &priv->tx_vboost_lvl))
+ priv->tx_vboost_lvl = KIRIN970_USB_DEFAULT_PHY_VBOOST;
+
+ phy = devm_phy_create(dev, NULL, &hi3670_phy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id hi3670_phy_of_match[] = {
+ { .compatible = "hisilicon,hi3670-usb-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, hi3670_phy_of_match);
+
+static struct platform_driver hi3670_phy_driver = {
+ .probe = hi3670_phy_probe,
+ .driver = {
+ .name = "hi3670-usb-phy",
+ .of_match_table = hi3670_phy_of_match,
+ }
+};
+module_platform_driver(hi3670_phy_driver);
+
+MODULE_AUTHOR("Yu Chen <chenyu56@huawei.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hilisicon Kirin970 USB31 PHY Driver");
diff --git a/drivers/staging/hikey9xx/phy-hi3670-usb3.yaml b/drivers/staging/hikey9xx/phy-hi3670-usb3.yaml
new file mode 100644
index 000000000000..125a5d6546ae
--- /dev/null
+++ b/drivers/staging/hikey9xx/phy-hi3670-usb3.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/hisilicon,hi3670-usb3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Hisilicon Kirin970 USB PHY
+
+maintainers:
+ - Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+description: |+
+ Bindings for USB3 PHY on HiSilicon Kirin 970.
+
+properties:
+ compatible:
+ const: hisilicon,hi3670-usb-phy
+
+ "#phy-cells":
+ const: 0
+
+ hisilicon,pericrg-syscon:
+ $ref: '/schemas/types.yaml#/definitions/phandle'
+ description: phandle of syscon used to control iso refclk.
+
+ hisilicon,pctrl-syscon:
+ $ref: '/schemas/types.yaml#/definitions/phandle'
+ description: phandle of syscon used to control usb tcxo.
+
+ hisilicon,sctrl-syscon:
+ $ref: '/schemas/types.yaml#/definitions/phandle'
+ description: phandle of syscon used to control phy deep sleep.
+
+ hisilicon,eye-diagram-param:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Eye diagram for phy.
+
+ hisilicon,tx-vboost-lvl:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: TX level vboost for phy.
+
+required:
+ - compatible
+ - hisilicon,pericrg-syscon
+ - hisilicon,pctrl-syscon
+ - hisilicon,sctrl-syscon
+ - hisilicon,eye-diagram-param
+ - hisilicon,tx-vboost-lvl
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ usb3_otg_bc: usb3_otg_bc@ff200000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x0 0xff200000 0x0 0x1000>;
+
+ usb_phy {
+ compatible = "hisilicon,hi3670-usb-phy";
+ #phy-cells = <0>;
+ hisilicon,pericrg-syscon = <&crg_ctrl>;
+ hisilicon,pctrl-syscon = <&pctrl>;
+ hisilicon,sctrl-syscon = <&sctrl>;
+ hisilicon,eye-diagram-param = <0xfdfee4>;
+ hisilicon,tx-vboost-lvl = <0x5>;
+ };
+ };
+ };
diff --git a/drivers/staging/iio/Documentation/dac/max517 b/drivers/staging/iio/Documentation/dac/max517
deleted file mode 100644
index e60ec2f91a7a..000000000000
--- a/drivers/staging/iio/Documentation/dac/max517
+++ /dev/null
@@ -1,41 +0,0 @@
-Kernel driver max517
-====================
-
-Supported chips:
- * Maxim MAX517, MAX518, MAX519
- Prefix: 'max517'
- Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/
-
-Author:
- Roland Stigge <stigge@antcom.de>
-
-Description
------------
-
-The Maxim MAX517/518/519 is an 8-bit DAC on the I2C bus. The following table
-shows the different feature sets of the variants MAX517, MAX518 and MAX519:
-
-Feature MAX517 MAX518 MAX519
---------------------------------------------------------------------------
-One output channel X
-Two output channels X X
-Simultaneous output updates X X
-Supply voltage as reference X
-Separate reference input X
-Reference input for each DAC X
-
-Via the iio sysfs interface, there are three attributes available: out1_raw,
-out2_raw and out12_raw. With out1_raw and out2_raw, the current output values
-(0..255) of the DACs can be written to the device. out12_raw can be used to set
-both output channel values simultaneously.
-
-With MAX517, only out1_raw is available.
-
-Via out1_scale (and where appropriate, out2_scale), the current scaling factor
-in mV can be read.
-
-When the operating system goes to a power down state, the Power Down function
-of the chip is activated, reducing the supply current to 4uA.
-
-On power-up, the device is in 0V-output state.
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
deleted file mode 100644
index 0d1275b1eb3f..000000000000
--- a/drivers/staging/iio/Documentation/device.txt
+++ /dev/null
@@ -1,74 +0,0 @@
-IIO Device drivers
-
-This is not intended to provide a comprehensive guide to writing an
-IIO device driver. For further information see the drivers within the
-subsystem.
-
-The crucial structure for device drivers in iio is iio_dev.
-
-First allocate one using:
-
-struct iio_dev *indio_dev = iio_device_alloc(parent, sizeof(struct chip_state));
-where chip_state is a structure of local state data for this instance of
-the chip.
-
-That data can be accessed using iio_priv(struct iio_dev *).
-
-Then fill in the following:
-
-- indio_dev->name
- Name of the device being driven - made available as the name
- attribute in sysfs.
-
-- indio_dev->info
- pointer to a structure with elements that tend to be fixed for
- large sets of different parts supported by a given driver.
- This contains:
- * info->event_attrs:
- Attributes used to enable / disable hardware events.
- * info->attrs:
- General device attributes. Typically used for the weird
- and the wonderful bits not covered by the channel specification.
- * info->read_raw:
- Raw data reading function. Used for both raw channel access
- and for associate parameters such as offsets and scales.
- * info->write_raw:
- Raw value writing function. Used for writable device values such
- as DAC values and calibbias.
- * info->read_event_config:
- Typically only set if there are some interrupt lines. This
- is used to read if an on sensor event detector is enabled.
- * info->write_event_config:
- Enable / disable an on sensor event detector.
- * info->read_event_value:
- Read value associated with on sensor event detectors. Note that
- the meaning of the returned value is dependent on the event
- type.
- * info->write_event_value:
- Write the value associated with on sensor event detectors. E.g.
- a threshold above which an interrupt occurs. Note that the
- meaning of the value to be set is event type dependent.
-
-- indio_dev->modes:
- Specify whether direct access and / or ring buffer access is supported.
-- indio_dev->buffer:
- An optional associated buffer.
-- indio_dev->pollfunc:
- Poll function related elements. This controls what occurs when a trigger
- to which this device is attached sends an event.
-- indio_dev->channels:
- Specification of device channels. Most attributes etc. are built
- from this spec.
-- indio_dev->num_channels:
- How many channels are there?
-
-Once these are set up, a call to iio_device_register(indio_dev)
-will register the device with the iio core.
-
-Worth noting here is that, if a ring buffer is to be used, it can be
-allocated prior to registering the device with the iio-core, but must
-be registered afterwards (otherwise the whole parentage of devices
-gets confused)
-
-On remove, iio_device_unregister(indio_dev) will remove the device from
-the core, and iio_device_free(indio_dev) will clean up.
diff --git a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x
deleted file mode 100644
index b2798b258bf7..000000000000
--- a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2x7x
+++ /dev/null
@@ -1,13 +0,0 @@
-What: /sys/bus/iio/devices/device[n]/in_illuminance0_calibrate
-KernelVersion: 3.3-rc1
-Contact: linux-iio@vger.kernel.org
-Description:
- Causes an internal calibration of the als gain trim
- value which is later used in calculating illuminance in lux.
-
-What: /sys/bus/iio/devices/device[n]/in_proximity0_calibrate
-KernelVersion: 3.3-rc1
-Contact: linux-iio@vger.kernel.org
-Description:
- Causes a recalculation and adjustment to the
- proximity_thresh_rising_value.
diff --git a/drivers/staging/iio/Documentation/overview.txt b/drivers/staging/iio/Documentation/overview.txt
deleted file mode 100644
index ebdc64f451d7..000000000000
--- a/drivers/staging/iio/Documentation/overview.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-Overview of IIO
-
-The Industrial I/O subsystem is intended to provide support for devices
-that in some sense are analog to digital converters (ADCs). As many
-actual devices combine some ADCs with digital to analog converters
-(DACs) that functionality is also supported.
-
-The aim is to fill the gap between the somewhat similar hwmon and
-input subsystems. Hwmon is very much directed at low sample rate
-sensors used in applications such as fan speed control and temperature
-measurement. Input is, as its name suggests focused on input
-devices. In some cases there is considerable overlap between these and
-IIO.
-
-A typical device falling into this category would be connected via SPI
-or I2C.
-
-Functionality of IIO
-
-* Basic device registration and handling. This is very similar to
-hwmon with simple polled access to device channels via sysfs.
-
-* Event chrdevs. These are similar to input in that they provide a
-route to user space for hardware triggered events. Such events include
-threshold detectors, free-fall detectors and more complex action
-detection. The events themselves are currently very simple with
-merely an event code and a timestamp. Any data associated with the
-event must be accessed via polling.
-
-Note: A given device may have one or more event channel. These events are
-turned on or off (if possible) via sysfs interfaces.
-
-* Hardware buffer support. Some recent sensors have included
-fifo / ring buffers on the sensor chip. These greatly reduce the load
-on the host CPU by buffering relatively large numbers of data samples
-based on an internal sampling clock. Examples include VTI SCA3000
-series and Analog Devices ADXL345 accelerometers. Each buffer supports
-polling to establish when data is available.
-
-* Trigger and software buffer support. In many data analysis
-applications it it useful to be able to capture data based on some
-external signal (trigger). These triggers might be a data ready
-signal, a gpio line connected to some external system or an on
-processor periodic interrupt. A single trigger may initialize data
-capture or reading from a number of sensors. These triggers are
-used in IIO to fill software buffers acting in a very similar
-fashion to the hardware buffers described above.
-
-Other documentation:
-
-device.txt - elements of a typical device driver.
-
-trigger.txt - elements of a typical trigger driver.
-
-ring.txt - additional elements required for buffer support.
-
-sysfs-bus-iio - abi documentation file.
diff --git a/drivers/staging/iio/Documentation/ring.txt b/drivers/staging/iio/Documentation/ring.txt
deleted file mode 100644
index 18718fcaf259..000000000000
--- a/drivers/staging/iio/Documentation/ring.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-Buffer support within IIO
-
-This document is intended as a general overview of the functionality
-a buffer may supply and how it is specified within IIO. For more
-specific information on a given buffer implementation, see the
-comments in the source code. Note that some drivers allow buffer
-implementation to be selected at compile time via Kconfig options.
-
-A given buffer implementation typically embeds a struct
-iio_ring_buffer and it is a pointer to this that is provided to the
-IIO core. Access to the embedding structure is typically done via
-container_of functions.
-
-struct iio_ring_buffer contains a struct iio_ring_setup_ops *setup_ops
-which in turn contains the 4 function pointers
-(preenable, postenable, predisable and postdisable).
-These are used to perform device specific steps on either side
-of the core changing its current mode to indicate that the buffer
-is enabled or disabled (along with enabling triggering etc. as appropriate).
-
-Also in struct iio_ring_buffer is a struct iio_ring_access_funcs.
-The function pointers within here are used to allow the core to handle
-as much buffer functionality as possible. Note almost all of these
-are optional.
-
-store_to
- If possible, push data to the buffer.
-
-read_last
- If possible, get the most recent scan from the buffer (without removal).
- This provides polling like functionality whilst the ring buffering is in
- use without a separate read from the device.
-
-rip_first_n
- The primary buffer reading function. Note that it may well not return
- as much data as requested.
-
-request_update
- If parameters have changed that require reinitialization or configuration of
- the buffer this will trigger it.
-
-set_bytes_per_datum
- Set the number of bytes for a complete scan. (All samples + timestamp)
-
-set_length
- Set the number of complete scans that may be held by the buffer.
-
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light b/drivers/staging/iio/Documentation/sysfs-bus-iio-light
deleted file mode 100644
index 7c7cd8456060..000000000000
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light
+++ /dev/null
@@ -1,79 +0,0 @@
-What: /sys/bus/iio/devices/device[n]/in_illuminance0[_input|_raw]
-KernelVersion: 2.6.35
-Contact: linux-iio@vger.kernel.org
-Description:
- This should return the calculated lux from the light sensor. If
- it comes back in SI units, it should also include _input else it
- should include _raw to signify it is not in SI units.
-
-What: /sys/.../device[n]/proximity_on_chip_ambient_infrared_suppression
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent mode for an ALS device to calculate the value
- in proximity mode. When this is enabled, then the device should
- use a infrared sensor reading to remove infrared noise from the
- proximity reading. If this is not enabled, the driver can still
- do this calculation manually by reading the infrared sensor
- value and doing the negation in sw.
-
-What: /sys/bus/iio/devices/device[n]/in_proximity[_input|_raw]
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property is supported by proximity sensors and should be
- used to return the value of a reading by the sensor. If this
- value is returned in SI units, it should also include _input
- but if it is not, then it should include _raw.
-
-What: /sys/bus/iio/devices/device[n]/intensity_infrared[_input|_raw]
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property is supported by sensors that have an infrared
- sensing mode. This value should be the output from a reading
- and if expressed in SI units, should include _input. If this
- value is not in SI units, then it should include _raw.
-
-What: /sys/bus/iio/devices/device[n]/in_illuminance0_target
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property gets/sets the last known external
- lux measurement used in/for calibration.
-
-What: /sys/bus/iio/devices/device[n]/in_illuminance0_integration_time
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property gets/sets the sensors ADC analog integration time.
-
-What: /sys/bus/iio/devices/device[n]/in_illuminance0_lux_table
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- This property gets/sets the table of coefficients
- used in calculating illuminance in lux.
-
-What: /sys/bus/iio/devices/device[n]/in_intensity_clear[_input|_raw]
-What: /sys/bus/iio/devices/device[n]/in_intensity_red[_input|_raw]
-What: /sys/bus/iio/devices/device[n]/in_intensity_green[_input|_raw]
-What: /sys/bus/iio/devices/device[n]/in_intensity_blue[_input|_raw]
-KernelVersion: 3.6.0
-Contact: linux-iio@vger.kernel.org
-Description:
- This property is supported by sensors that have a RGBC
- sensing mode. This value should be the output from a reading
- and if expressed in SI units, should include _input. If this
- value is not in SI units (irradiance, uW/mm^2), then it should
- include _raw.
-
-What: /sys/bus/iio/devices/device[n]/in_cct0[_input|_raw]
-KernelVersion: 3.6.0
-Contact: linux-iio@vger.kernel.org
-Description:
- This should return the correlated color temperature from the
- light sensor. If it comes back in SI units, it should also
- include _input else it should include _raw to signify it is not
- in SI units.
-
diff --git a/drivers/staging/iio/Documentation/trigger.txt b/drivers/staging/iio/Documentation/trigger.txt
deleted file mode 100644
index 299a1add98bf..000000000000
--- a/drivers/staging/iio/Documentation/trigger.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-IIO trigger drivers.
-
-Many triggers are provided by hardware that will also be registered as
-an IIO device. Whilst this can create device specific complexities
-such triggers are registered with the core in the same way as
-stand-alone triggers.
-
-struct iio_trig *trig = iio_trigger_alloc("<trigger format string>", ...);
-
-allocates a trigger structure. The key elements to then fill in within
-a driver are:
-
-trig->set_trigger_state:
- Function that enables / disables the underlying source of the trigger.
-
-There is also a
-trig->alloc_list which is useful for drivers that allocate multiple
-triggers to keep track of what they have created.
-
-When these have been set call:
-
-iio_trigger_register(trig);
-
-to register the trigger with the core, making it available to trigger
-consumers.
-
-Trigger Consumers
-
-Currently triggers are only used for the filling of software
-buffers and as such any device supporting INDIO_BUFFER_TRIGGERED has the
-consumer interface automatically created.
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
index c7798908ef0e..b68304da288b 100644
--- a/drivers/staging/iio/accel/adis16203.c
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -286,35 +286,16 @@ static int adis16203_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
+ ret = devm_adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
if (ret)
- goto error_cleanup_buffer_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16203_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id adis16203_of_match[] = {
@@ -330,7 +311,6 @@ static struct spi_driver adis16203_driver = {
.of_match_table = adis16203_of_match,
},
.probe = adis16203_probe,
- .remove = adis16203_remove,
};
module_spi_driver(adis16203_driver);
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index 38ec40b458c9..5064adce5f58 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -415,35 +415,17 @@ static int adis16240_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16240_data);
if (ret)
return ret;
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
+ ret = devm_adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16240_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
+ return ret;
- return 0;
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
-
static const struct of_device_id adis16240_of_match[] = {
{ .compatible = "adi,adis16240" },
{ },
@@ -456,7 +438,6 @@ static struct spi_driver adis16240_driver = {
.of_match_table = adis16240_of_match,
},
.probe = adis16240_probe,
- .remove = adis16240_remove,
};
module_spi_driver(adis16240_driver);
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 77f77a2b2e05..262c3590e64e 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -397,7 +397,6 @@ static int ad9834_probe(struct spi_device *spi)
struct regulator *reg;
int ret;
-
reg = devm_regulator_get(&spi->dev, "avdd");
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index dd716edd9b1b..e1c7c04f16fe 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -53,7 +53,7 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
acd = kzalloc(sizeof(*acd), GFP_KERNEL);
if (!acd) {
- dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the aio data\n");
+ dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for the aio data\n");
return -ENOMEM;
}
memset(acd, 0x66, sizeof(struct aio_cb_data));
@@ -69,7 +69,7 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
acd->user_pages = kcalloc(acd->page_count, sizeof(struct page *),
GFP_KERNEL);
if (!acd->user_pages) {
- dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for for the page pointers\n");
+ dev_err(&priv->ldev->pldev->dev, "Couldn't kmalloc space for the page pointers\n");
rv = -ENOMEM;
goto err_alloc_userpages;
}
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
index 6b2660c94f4e..78dc8beeae98 100644
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -405,9 +405,9 @@ int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
return result;
}
-static void rx_event_task(unsigned long dev)
+static void rx_event_task(struct tasklet_struct *t)
{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+ struct ks_wlan_private *priv = from_tasklet(priv, t, rx_bh_task);
struct rx_device_buffer *rp;
if (rxq_has_space(priv) && priv->dev_state >= DEVICE_STATE_BOOT) {
@@ -618,7 +618,7 @@ static int trx_device_init(struct ks_wlan_private *priv)
spin_lock_init(&priv->tx_dev.tx_dev_lock);
spin_lock_init(&priv->rx_dev.rx_dev_lock);
- tasklet_init(&priv->rx_bh_task, rx_event_task, (unsigned long)priv);
+ tasklet_setup(&priv->rx_bh_task, rx_event_task);
return 0;
}
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index eaaf6a5440a9..8bc3b7d8d3d5 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -2205,9 +2205,9 @@ static void hostif_sme_execute(struct ks_wlan_private *priv, int event)
}
static
-void hostif_sme_task(unsigned long dev)
+void hostif_sme_task(struct tasklet_struct *t)
{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+ struct ks_wlan_private *priv = from_tasklet(priv, t, sme_task);
if (priv->dev_state < DEVICE_STATE_BOOT)
return;
@@ -2258,7 +2258,7 @@ static inline void hostif_sme_init(struct ks_wlan_private *priv)
priv->sme_i.qtail = 0;
spin_lock_init(&priv->sme_i.sme_spin);
priv->sme_i.sme_flag = 0;
- tasklet_init(&priv->sme_task, hostif_sme_task, (unsigned long)priv);
+ tasklet_setup(&priv->sme_task, hostif_sme_task);
}
static inline void hostif_wpa_init(struct ks_wlan_private *priv)
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 71d077762698..747c6cf1d795 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -38,16 +38,12 @@ source "drivers/staging/media/sunxi/Kconfig"
source "drivers/staging/media/tegra-vde/Kconfig"
+source "drivers/staging/media/zoran/Kconfig"
+
source "drivers/staging/media/tegra-video/Kconfig"
source "drivers/staging/media/ipu3/Kconfig"
-source "drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig"
-
source "drivers/staging/media/rkisp1/Kconfig"
-if MEDIA_ANALOG_TV_SUPPORT
-source "drivers/staging/media/usbvision/Kconfig"
-endif
-
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 17ececa1e095..b59571826ba6 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -10,6 +10,5 @@ obj-$(CONFIG_VIDEO_TEGRA) += tegra-video/
obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
obj-$(CONFIG_VIDEO_HANTRO) += hantro/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
-obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0/
obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1) += rkisp1/
-obj-$(CONFIG_VIDEO_USBVISION) += usbvision/
+obj-$(CONFIG_VIDEO_ZORAN) += zoran/
diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile
index 205d0f8cc2e1..1dfad0dd02d0 100644
--- a/drivers/staging/media/atomisp/Makefile
+++ b/drivers/staging/media/atomisp/Makefile
@@ -307,18 +307,12 @@ INCLUDES += \
-I$(atomisp)/pci/runtime/queue/src/ \
-I$(atomisp)/pci/runtime/rmgr/interface/ \
-I$(atomisp)/pci/runtime/spctrl/interface/ \
- -I$(atomisp)/pci/runtime/tagger/interface/
-
-INCLUDES_byt += \
+ -I$(atomisp)/pci/runtime/tagger/interface/ \
-I$(atomisp)/pci/css_2400_system/hive/ \
-
-INCLUDES_cht += \
-I$(atomisp)/pci/css_2401_system/ \
-I$(atomisp)/pci/css_2401_system/host/ \
-I$(atomisp)/pci/css_2401_system/hive/ \
- -I$(atomisp)/pci/css_2401_system/hrt/ \
-
-# -I$(atomisp)/pci/css_2401_system/hive_isp_css_2401_system_generated/ \
+ -I$(atomisp)/pci/css_2401_system/hrt/
DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
#DEFINES += -DUSE_DYNAMIC_BIN
@@ -330,11 +324,9 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
ifeq ($(CONFIG_VIDEO_ATOMISP_ISP2401),y)
atomisp-objs += $(obj-cht)
-INCLUDES += $(INCLUDES_cht)
DEFINES += -DISP2401 -DISP2401_NEW_INPUT_SYSTEM -DSYSTEM_hive_isp_css_2401_system
else
atomisp-objs += $(obj-byt)
-INCLUDES += $(INCLUDES_byt)
DEFINES += -DISP2400 -DSYSTEM_hive_isp_css_2400_system
endif
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
index 7c7f0fc090b3..a772b833a85f 100644
--- a/drivers/staging/media/atomisp/i2c/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -3,53 +3,51 @@
# Kconfig for sensor drivers
#
-source "drivers/staging/media/atomisp/i2c/ov5693/Kconfig"
-
config VIDEO_ATOMISP_OV2722
- tristate "OVT ov2722 sensor support"
+ tristate "OVT ov2722 sensor support"
depends on ACPI
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_V4L2
help
- This is a Video4Linux2 sensor-level driver for the OVT
- OV2722 raw camera.
+ This is a Video4Linux2 sensor-level driver for the OVT
+ OV2722 raw camera.
- OVT is a 2M raw sensor.
+ OVT is a 2M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
config VIDEO_ATOMISP_GC2235
- tristate "Galaxy gc2235 sensor support"
+ tristate "Galaxy gc2235 sensor support"
depends on ACPI
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_V4L2
help
- This is a Video4Linux2 sensor-level driver for the OVT
- GC2235 raw camera.
+ This is a Video4Linux2 sensor-level driver for the OVT
+ GC2235 raw camera.
- GC2235 is a 2M raw sensor.
+ GC2235 is a 2M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
config VIDEO_ATOMISP_MSRLIST_HELPER
- tristate "Helper library to load, parse and apply large register lists."
- depends on I2C
+ tristate "Helper library to load, parse and apply large register lists."
+ depends on I2C
help
- This is a helper library to be used from a sensor driver to load, parse
- and apply large register lists.
+ This is a helper library to be used from a sensor driver to load, parse
+ and apply large register lists.
- To compile this driver as a module, choose M here: the
- module will be called libmsrlisthelper.
+ To compile this driver as a module, choose M here: the
+ module will be called libmsrlisthelper.
config VIDEO_ATOMISP_MT9M114
- tristate "Aptina mt9m114 sensor support"
+ tristate "Aptina mt9m114 sensor support"
depends on ACPI
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_V4L2
help
- This is a Video4Linux2 sensor-level driver for the Micron
- mt9m114 1.3 Mpixel camera.
+ This is a Video4Linux2 sensor-level driver for the Micron
+ mt9m114 1.3 Mpixel camera.
- mt9m114 is video camera sensor.
+ mt9m114 is video camera sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
config VIDEO_ATOMISP_GC0310
tristate "GC0310 sensor support"
@@ -60,16 +58,28 @@ config VIDEO_ATOMISP_GC0310
GC0310 0.3MP sensor.
config VIDEO_ATOMISP_OV2680
- tristate "Omnivision OV2680 sensor support"
+ tristate "Omnivision OV2680 sensor support"
+ depends on ACPI
+ depends on I2C && VIDEO_V4L2
+ help
+ This is a Video4Linux2 sensor-level driver for the Omnivision
+ OV2680 raw camera.
+
+ ov2680 is a 2M raw sensor.
+
+ It currently only works with the atomisp driver.
+
+config VIDEO_ATOMISP_OV5693
+ tristate "Omnivision ov5693 sensor support"
depends on ACPI
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_V4L2
help
- This is a Video4Linux2 sensor-level driver for the Omnivision
- OV2680 raw camera.
+ This is a Video4Linux2 sensor-level driver for the Micron
+ ov5693 5 Mpixel camera.
- ov2680 is a 2M raw sensor.
+ ov5693 is video camera sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
#
# Kconfig for flash drivers
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
index 809010af7855..7ca7378b1859 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
@@ -19,14 +19,13 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include "../include/media/lm3554.h"
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include "../include/linux/atomisp_gmin_platform.h"
#include "../include/linux/atomisp.h"
@@ -173,7 +172,7 @@ static void lm3554_flash_off_delay(struct timer_list *t)
struct lm3554 *flash = from_timer(flash, t, flash_off_delay);
struct lm3554_platform_data *pdata = flash->pdata;
- gpio_set_value(pdata->gpio_strobe, 0);
+ gpiod_set_value(pdata->gpio_strobe, 0);
}
static int lm3554_hw_strobe(struct i2c_client *client, bool strobe)
@@ -209,7 +208,7 @@ static int lm3554_hw_strobe(struct i2c_client *client, bool strobe)
* so must strobe off here
*/
if (timer_pending)
- gpio_set_value(pdata->gpio_strobe, 0);
+ gpiod_set_value(pdata->gpio_strobe, 0);
/* Restore flash current settings */
ret = lm3554_set_flash(flash);
@@ -217,7 +216,7 @@ static int lm3554_hw_strobe(struct i2c_client *client, bool strobe)
goto err;
/* Strobe on Flash */
- gpio_set_value(pdata->gpio_strobe, 1);
+ gpiod_set_value(pdata->gpio_strobe, 1);
return 0;
err:
@@ -627,7 +626,7 @@ static int __lm3554_s_power(struct lm3554 *flash, int power)
int ret;
/*initialize flash driver*/
- gpio_set_value(pdata->gpio_reset, power);
+ gpiod_set_value(pdata->gpio_reset, power);
usleep_range(100, 100 + 1);
if (power) {
@@ -766,33 +765,22 @@ static int lm3554_gpio_init(struct i2c_client *client)
struct lm3554_platform_data *pdata = flash->pdata;
int ret;
- if (!gpio_is_valid(pdata->gpio_reset))
+ if (!pdata->gpio_reset)
return -EINVAL;
- ret = gpio_direction_output(pdata->gpio_reset, 0);
+ ret = gpiod_direction_output(pdata->gpio_reset, 0);
if (ret < 0)
- goto err_gpio_reset;
+ return ret;
dev_info(&client->dev, "flash led reset successfully\n");
- if (!gpio_is_valid(pdata->gpio_strobe)) {
- ret = -EINVAL;
- goto err_gpio_dir_reset;
- }
+ if (!pdata->gpio_strobe)
+ return -EINVAL;
- ret = gpio_direction_output(pdata->gpio_strobe, 0);
+ ret = gpiod_direction_output(pdata->gpio_strobe, 0);
if (ret < 0)
- goto err_gpio_strobe;
+ return ret;
return 0;
-
-err_gpio_strobe:
- gpio_free(pdata->gpio_strobe);
-err_gpio_dir_reset:
- gpio_direction_output(pdata->gpio_reset, 0);
-err_gpio_reset:
- gpio_free(pdata->gpio_reset);
-
- return ret;
}
static int lm3554_gpio_uninit(struct i2c_client *client)
@@ -802,16 +790,14 @@ static int lm3554_gpio_uninit(struct i2c_client *client)
struct lm3554_platform_data *pdata = flash->pdata;
int ret;
- ret = gpio_direction_output(pdata->gpio_strobe, 0);
+ ret = gpiod_direction_output(pdata->gpio_strobe, 0);
if (ret < 0)
return ret;
- ret = gpio_direction_output(pdata->gpio_reset, 0);
+ ret = gpiod_direction_output(pdata->gpio_reset, 0);
if (ret < 0)
return ret;
- gpio_free(pdata->gpio_strobe);
- gpio_free(pdata->gpio_reset);
return 0;
}
@@ -819,18 +805,18 @@ static void *lm3554_platform_data_func(struct i2c_client *client)
{
static struct lm3554_platform_data platform_data;
- platform_data.gpio_reset =
- desc_to_gpio(gpiod_get_index(&client->dev,
- NULL, 2, GPIOD_OUT_LOW));
- platform_data.gpio_strobe =
- desc_to_gpio(gpiod_get_index(&client->dev,
- NULL, 0, GPIOD_OUT_LOW));
- platform_data.gpio_torch =
- desc_to_gpio(gpiod_get_index(&client->dev,
- NULL, 1, GPIOD_OUT_LOW));
- dev_info(&client->dev, "camera pdata: lm3554: reset: %d strobe %d torch %d\n",
- platform_data.gpio_reset, platform_data.gpio_strobe,
- platform_data.gpio_torch);
+ platform_data.gpio_reset = gpiod_get_index(&client->dev,
+ NULL, 2, GPIOD_OUT_LOW);
+ if (IS_ERR(platform_data.gpio_reset))
+ return ERR_CAST(platform_data.gpio_reset);
+ platform_data.gpio_strobe = gpiod_get_index(&client->dev,
+ NULL, 0, GPIOD_OUT_LOW);
+ if (IS_ERR(platform_data.gpio_strobe))
+ return ERR_CAST(platform_data.gpio_strobe);
+ platform_data.gpio_torch = gpiod_get_index(&client->dev,
+ NULL, 1, GPIOD_OUT_LOW);
+ if (IS_ERR(platform_data.gpio_torch))
+ return ERR_CAST(platform_data.gpio_torch);
/* Set to TX2 mode, then ENVM/TX2 pin is a power amplifier sync input:
* ENVM/TX pin asserted, flash forced into torch;
@@ -857,6 +843,8 @@ static int lm3554_probe(struct i2c_client *client)
return -ENOMEM;
flash->pdata = lm3554_platform_data_func(client);
+ if (IS_ERR(flash->pdata))
+ return PTR_ERR(flash->pdata);
v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
flash->sd.internal_ops = &lm3554_internal_ops;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index 0d60918a9b19..f5de81132177 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -212,7 +212,7 @@ misensor_rmw_reg(struct i2c_client *client, u16 data_length, u16 reg,
err = mt9m114_read_reg(client, data_length, reg, &val);
if (err) {
- v4l2_err(client, "misensor_rmw_reg error exit, read failed\n");
+ v4l2_err(client, "%s error exit, read failed\n", __func__);
return -EINVAL;
}
@@ -233,7 +233,7 @@ misensor_rmw_reg(struct i2c_client *client, u16 data_length, u16 reg,
err = mt9m114_write_reg(client, data_length, reg, val);
if (err) {
- v4l2_err(client, "misensor_rmw_reg error exit, write failed\n");
+ v4l2_err(client, "%s error exit, write failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
deleted file mode 100644
index c8d09f416c35..000000000000
--- a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config VIDEO_ATOMISP_OV5693
- tristate "Omnivision ov5693 sensor support"
- depends on ACPI
- depends on I2C && VIDEO_V4L2
- help
- This is a Video4Linux2 sensor-level driver for the Micron
- ov5693 5 Mpixel camera.
-
- ov5693 is video camera sensor.
-
- It currently only works with the atomisp driver.
diff --git a/drivers/staging/media/atomisp/include/media/lm3554.h b/drivers/staging/media/atomisp/include/media/lm3554.h
index 812ce74f0635..711b7d7c9950 100644
--- a/drivers/staging/media/atomisp/include/media/lm3554.h
+++ b/drivers/staging/media/atomisp/include/media/lm3554.h
@@ -18,6 +18,7 @@
#ifndef _LM3554_H_
#define _LM3554_H_
+#include <linux/gpio/consumer.h>
#include <linux/videodev2.h>
#include <media/v4l2-subdev.h>
@@ -119,9 +120,9 @@
* lm3554_platform_data - Flash controller platform data
*/
struct lm3554_platform_data {
- int gpio_torch;
- int gpio_strobe;
- int gpio_reset;
+ struct gpio_desc *gpio_torch;
+ struct gpio_desc *gpio_strobe;
+ struct gpio_desc *gpio_reset;
unsigned int current_limit;
unsigned int envm_tx2;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index a4e4eef55f35..592ea990d4ca 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -654,8 +654,7 @@ bool atomisp_buffers_queued(struct atomisp_sub_device *asd)
return asd->video_out_capture.buffers_in_css ||
asd->video_out_vf.buffers_in_css ||
asd->video_out_preview.buffers_in_css ||
- asd->video_out_video_capture.buffers_in_css ?
- true : false;
+ asd->video_out_video_capture.buffers_in_css;
}
/* ISP2401 */
@@ -877,7 +876,8 @@ static struct atomisp_video_pipe *__atomisp_get_pipe(
enum atomisp_metadata_type
atomisp_get_metadata_type(struct atomisp_sub_device *asd,
- enum ia_css_pipe_id pipe_id) {
+ enum ia_css_pipe_id pipe_id)
+{
if (!asd->continuous_mode->val)
return ATOMISP_MAIN_METADATA;
@@ -1211,8 +1211,7 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
default:
break;
}
- if (vb)
- {
+ if (vb) {
vb->ts = ktime_get_ns();
vb->field_count = atomic_read(&asd->sequence) << 1;
/*mark videobuffer done for dequeue*/
@@ -1234,8 +1233,7 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
* Requeue should only be done for 3a and dis buffers.
* Queue/dequeue order will change if driver recycles image buffers.
*/
- if (requeue)
- {
+ if (requeue) {
err = atomisp_css_queue_buffer(asd,
stream_id, css_pipe_id,
buf_type, &buffer);
@@ -1940,9 +1938,9 @@ int atomisp_get_frame_pgnr(struct atomisp_device *isp,
* Get internal fmt according to V4L2 fmt
*/
static enum ia_css_frame_format
-v4l2_fmt_to_sh_fmt(u32 fmt) {
- switch (fmt)
- {
+v4l2_fmt_to_sh_fmt(u32 fmt)
+{
+ switch (fmt) {
case V4L2_PIX_FMT_YUV420:
return IA_CSS_FRAME_FORMAT_YUV420;
case V4L2_PIX_FMT_YVU420:
@@ -2812,7 +2810,6 @@ int atomisp_get_metadata(struct atomisp_sub_device *asd, int flag,
struct atomisp_metadata *md)
{
struct atomisp_device *isp = asd->isp;
- struct ia_css_stream_config *stream_config;
struct ia_css_stream_info *stream_info;
struct camera_mipi_info *mipi_info;
struct atomisp_metadata_buf *md_buf;
@@ -2822,8 +2819,6 @@ int atomisp_get_metadata(struct atomisp_sub_device *asd, int flag,
if (flag != 0)
return -EINVAL;
- stream_config = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
- stream_config;
stream_info = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
stream_info;
@@ -2891,7 +2886,6 @@ int atomisp_get_metadata_by_type(struct atomisp_sub_device *asd, int flag,
struct atomisp_metadata_with_type *md)
{
struct atomisp_device *isp = asd->isp;
- struct ia_css_stream_config *stream_config;
struct ia_css_stream_info *stream_info;
struct camera_mipi_info *mipi_info;
struct atomisp_metadata_buf *md_buf;
@@ -2901,8 +2895,6 @@ int atomisp_get_metadata_by_type(struct atomisp_sub_device *asd, int flag,
if (flag != 0)
return -EINVAL;
- stream_config = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
- stream_config;
stream_info = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
stream_info;
@@ -4981,9 +4973,8 @@ enum mipi_port_id __get_mipi_port(struct atomisp_device *isp,
case ATOMISP_CAMERA_PORT_SECONDARY:
return MIPI_PORT1_ID;
case ATOMISP_CAMERA_PORT_TERTIARY:
- if (MIPI_PORT1_ID + 1 != N_MIPI_PORT_ID) {
+ if (MIPI_PORT1_ID + 1 != N_MIPI_PORT_ID)
return MIPI_PORT1_ID + 1;
- }
fallthrough;
default:
dev_err(isp->dev, "unsupported port: %d\n", port);
@@ -6557,7 +6548,7 @@ int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd,
if (!enable)
return -EINVAL;
- value = *enable > 0 ? true : false;
+ value = *enable > 0;
atomisp_en_dz_capt_pipe(asd, value);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index 1b2b2c68025b..faa0935e536a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -159,19 +159,14 @@ static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n)
spin_unlock_irqrestore(&mmio_lock, flags);
}
-static int atomisp_css2_dbg_print(const char *fmt, va_list args)
-{
- vprintk(fmt, args);
- return 0;
-}
-
-static int atomisp_css2_dbg_ftrace_print(const char *fmt, va_list args)
+static int __printf(1, 0) atomisp_css2_dbg_ftrace_print(const char *fmt,
+ va_list args)
{
ftrace_vprintk(fmt, args);
return 0;
}
-static int atomisp_css2_err_print(const char *fmt, va_list args)
+static int __printf(1, 0) atomisp_vprintk(const char *fmt, va_list args)
{
vprintk(fmt, args);
return 0;
@@ -711,7 +706,6 @@ static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd,
return true;
return false;
- fallthrough;
case ATOMISP_RUN_MODE_VIDEO:
if (!asd->continuous_mode->val) {
if (pipe_id == IA_CSS_PIPE_ID_VIDEO ||
@@ -869,8 +863,7 @@ static inline int __set_css_print_env(struct atomisp_device *isp, int opt)
isp->css_env.isp_css_env.print_env.debug_print =
atomisp_css2_dbg_ftrace_print;
else if (opt == 2)
- isp->css_env.isp_css_env.print_env.debug_print =
- atomisp_css2_dbg_print;
+ isp->css_env.isp_css_env.print_env.debug_print = atomisp_vprintk;
else
ret = -EINVAL;
@@ -903,7 +896,7 @@ int atomisp_css_load_firmware(struct atomisp_device *isp)
__set_css_print_env(isp, dbg_func);
- isp->css_env.isp_css_env.print_env.error_print = atomisp_css2_err_print;
+ isp->css_env.isp_css_env.print_env.error_print = atomisp_vprintk;
/* load isp fw into ISP memory */
err = ia_css_load_firmware(isp->dev, &isp->css_env.isp_css_env,
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c
index fa5918270614..e5553df5bad4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.c
@@ -25,15 +25,25 @@
#include "atomisp_ioctl.h"
#include "atomisp_compat_ioctl32.h"
-/* Macro borrowed from v4l2-compat-ioctl32.c */
-/* Use the same argument order as copy_in_user */
-#define assign_in_user(to, from) \
-({ \
- typeof(*from) __assign_tmp; \
- \
- get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \
+/* Macros borrowed from v4l2-compat-ioctl32.c */
+
+#define get_user_cast(__x, __ptr) \
+({ \
+ get_user(__x, (typeof(*__ptr) __user *)(__ptr)); \
})
+#define put_user_force(__x, __ptr) \
+({ \
+ put_user((typeof(*__x) __force *)(__x), __ptr); \
+})
+
+/* Use the same argument order as copy_in_user */
+#define assign_in_user(to, from) \
+({ \
+ typeof(*from) __assign_tmp; \
+ \
+ get_user_cast(__assign_tmp, from) || put_user(__assign_tmp, to);\
+})
static int get_atomisp_histogram32(struct atomisp_histogram __user *kp,
struct atomisp_histogram32 __user *up)
@@ -64,13 +74,13 @@ static int put_atomisp_histogram32(struct atomisp_histogram __user *kp,
}
static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
- struct v4l2_framebuffer32 __user *up)
+ struct v4l2_framebuffer32 __user *up)
{
compat_uptr_t tmp;
if (!access_ok(up, sizeof(struct v4l2_framebuffer32)) ||
get_user(tmp, &up->base) ||
- put_user(compat_ptr(tmp), &kp->base) ||
+ put_user_force(compat_ptr(tmp), &kp->base) ||
assign_in_user(&kp->capability, &up->capability) ||
assign_in_user(&kp->flags, &up->flags) ||
copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt)))
@@ -244,10 +254,10 @@ static int get_atomisp_dvs_6axis_config32(struct atomisp_dvs_6axis_config __user
get_user(ycoords_y, &up->ycoords_y) ||
get_user(xcoords_uv, &up->xcoords_uv) ||
get_user(ycoords_uv, &up->ycoords_uv) ||
- put_user(compat_ptr(xcoords_y), &kp->xcoords_y) ||
- put_user(compat_ptr(ycoords_y), &kp->ycoords_y) ||
- put_user(compat_ptr(xcoords_uv), &kp->xcoords_uv) ||
- put_user(compat_ptr(ycoords_uv), &kp->ycoords_uv))
+ put_user_force(compat_ptr(xcoords_y), &kp->xcoords_y) ||
+ put_user_force(compat_ptr(ycoords_y), &kp->ycoords_y) ||
+ put_user_force(compat_ptr(xcoords_uv), &kp->xcoords_uv) ||
+ put_user_force(compat_ptr(ycoords_uv), &kp->ycoords_uv))
return -EFAULT;
return 0;
@@ -279,7 +289,7 @@ static int put_atomisp_3a_statistics32(struct atomisp_3a_statistics __user *kp,
void __user *rgby_data;
if (!access_ok(up, sizeof(struct atomisp_3a_statistics32)) ||
- copy_to_user(up, kp, sizeof(struct atomisp_grid_info)) ||
+ copy_in_user(up, kp, sizeof(struct atomisp_grid_info)) ||
get_user(rgby_data, &kp->rgby_data) ||
put_user(ptr_to_compat(rgby_data), &up->rgby_data) ||
get_user(data, &kp->data) ||
@@ -305,7 +315,7 @@ static int get_atomisp_metadata_stat32(struct atomisp_metadata __user *kp,
assign_in_user(&kp->stride, &up->stride) ||
assign_in_user(&kp->exp_id, &up->exp_id) ||
get_user(effective_width, &up->effective_width) ||
- put_user(compat_ptr(effective_width), &kp->effective_width))
+ put_user_force(compat_ptr(effective_width), &kp->effective_width))
return -EFAULT;
return 0;
@@ -315,7 +325,7 @@ static int put_atomisp_metadata_stat32(struct atomisp_metadata __user *kp,
struct atomisp_metadata32 __user *up)
{
void __user *data;
- void __user *effective_width;
+ void *effective_width;
if (!access_ok(up, sizeof(struct atomisp_metadata32)) ||
get_user(data, &kp->data) ||
@@ -325,7 +335,8 @@ static int put_atomisp_metadata_stat32(struct atomisp_metadata __user *kp,
assign_in_user(&up->stride, &kp->stride) ||
assign_in_user(&up->exp_id, &kp->exp_id) ||
get_user(effective_width, &kp->effective_width) ||
- put_user(ptr_to_compat(effective_width), &up->effective_width))
+ put_user(ptr_to_compat((void __user *)effective_width),
+ &up->effective_width))
return -EFAULT;
return 0;
@@ -336,7 +347,7 @@ put_atomisp_metadata_by_type_stat32(struct atomisp_metadata_with_type __user *kp
struct atomisp_metadata_with_type32 __user *up)
{
void __user *data;
- void __user *effective_width;
+ u32 *effective_width;
if (!access_ok(up, sizeof(struct atomisp_metadata_with_type32)) ||
get_user(data, &kp->data) ||
@@ -346,7 +357,7 @@ put_atomisp_metadata_by_type_stat32(struct atomisp_metadata_with_type __user *kp
assign_in_user(&up->stride, &kp->stride) ||
assign_in_user(&up->exp_id, &kp->exp_id) ||
get_user(effective_width, &kp->effective_width) ||
- put_user(ptr_to_compat(effective_width),
+ put_user(ptr_to_compat((void __user *)effective_width),
&up->effective_width) ||
assign_in_user(&up->type, &kp->type))
return -EFAULT;
@@ -369,7 +380,7 @@ get_atomisp_metadata_by_type_stat32(struct atomisp_metadata_with_type __user *kp
assign_in_user(&kp->stride, &up->stride) ||
assign_in_user(&kp->exp_id, &up->exp_id) ||
get_user(effective_width, &up->effective_width) ||
- put_user(compat_ptr(effective_width), &kp->effective_width) ||
+ put_user_force(compat_ptr(effective_width), &kp->effective_width) ||
assign_in_user(&kp->type, &up->type))
return -EFAULT;
@@ -430,7 +441,7 @@ static int get_atomisp_overlay32(struct atomisp_overlay __user *kp,
if (!access_ok(up, sizeof(struct atomisp_overlay32)) ||
get_user(frame, &up->frame) ||
- put_user(compat_ptr(frame), &kp->frame) ||
+ put_user_force(compat_ptr(frame), &kp->frame) ||
assign_in_user(&kp->bg_y, &up->bg_y) ||
assign_in_user(&kp->bg_u, &up->bg_u) ||
assign_in_user(&kp->bg_v, &up->bg_v) ||
@@ -456,11 +467,11 @@ static int get_atomisp_overlay32(struct atomisp_overlay __user *kp,
static int put_atomisp_overlay32(struct atomisp_overlay __user *kp,
struct atomisp_overlay32 __user *up)
{
- void __user *frame;
+ void *frame;
if (!access_ok(up, sizeof(struct atomisp_overlay32)) ||
get_user(frame, &kp->frame) ||
- put_user(ptr_to_compat(frame), &up->frame) ||
+ put_user(ptr_to_compat((void __user *)frame), &up->frame) ||
assign_in_user(&up->bg_y, &kp->bg_y) ||
assign_in_user(&up->bg_u, &kp->bg_u) ||
assign_in_user(&up->bg_v, &kp->bg_v) ||
@@ -493,7 +504,7 @@ get_atomisp_calibration_group32(struct atomisp_calibration_group __user *kp,
assign_in_user(&kp->size, &up->size) ||
assign_in_user(&kp->type, &up->type) ||
get_user(calb_grp_values, &up->calb_grp_values) ||
- put_user(compat_ptr(calb_grp_values), &kp->calb_grp_values))
+ put_user_force(compat_ptr(calb_grp_values), &kp->calb_grp_values))
return -EFAULT;
return 0;
@@ -503,13 +514,14 @@ static int
put_atomisp_calibration_group32(struct atomisp_calibration_group __user *kp,
struct atomisp_calibration_group32 __user *up)
{
- void __user *calb_grp_values;
+ void *calb_grp_values;
if (!access_ok(up, sizeof(struct atomisp_calibration_group32)) ||
assign_in_user(&up->size, &kp->size) ||
assign_in_user(&up->type, &kp->type) ||
get_user(calb_grp_values, &kp->calb_grp_values) ||
- put_user(ptr_to_compat(calb_grp_values), &up->calb_grp_values))
+ put_user(ptr_to_compat((void __user *)calb_grp_values),
+ &up->calb_grp_values))
return -EFAULT;
return 0;
@@ -523,7 +535,7 @@ static int get_atomisp_acc_fw_load32(struct atomisp_acc_fw_load __user *kp,
if (!access_ok(up, sizeof(struct atomisp_acc_fw_load32)) ||
assign_in_user(&kp->size, &up->size) ||
assign_in_user(&kp->fw_handle, &up->fw_handle) ||
- get_user(data, &up->data) ||
+ get_user_cast(data, &up->data) ||
put_user(compat_ptr(data), &kp->data))
return -EFAULT;
@@ -627,7 +639,7 @@ static int get_atomisp_shading_table32(struct atomisp_shading_table __user *kp,
compat_uptr_t tmp;
if (get_user(tmp, &up->data[n]) ||
- put_user(compat_ptr(tmp), &kp->data[n]))
+ put_user_force(compat_ptr(tmp), &kp->data[n]))
return -EFAULT;
}
return 0;
@@ -712,17 +724,17 @@ static int get_atomisp_parameters32(struct atomisp_parameters __user *kp,
struct atomisp_morph_table morph_table;
struct atomisp_dis_coefficients dvs2_coefs;
struct atomisp_dvs_6axis_config dvs_6axis_config;
- } __user *karg = (void *)(kp + 1);
+ } __user *karg = (void __user *)(kp + 1);
if (!access_ok(up, sizeof(struct atomisp_parameters32)))
return -EFAULT;
while (n >= 0) {
- compat_uptr_t *src = (compat_uptr_t *)up + n;
+ compat_uptr_t __user *src = (compat_uptr_t __user *)up + n;
void * __user *dst = (void * __user *)kp + n;
compat_uptr_t tmp;
- if (get_user(tmp, src) || put_user(compat_ptr(tmp), dst))
+ if (get_user_cast(tmp, src) || put_user_force(compat_ptr(tmp), dst))
return -EFAULT;
n--;
}
@@ -738,26 +750,26 @@ static int get_atomisp_parameters32(struct atomisp_parameters __user *kp,
/* handle shading table */
if (stp && (get_atomisp_shading_table32(&karg->shading_table,
compat_ptr(stp)) ||
- put_user(&karg->shading_table, &kp->shading_table)))
+ put_user_force(&karg->shading_table, &kp->shading_table)))
return -EFAULT;
/* handle morph table */
if (mtp && (get_atomisp_morph_table32(&karg->morph_table,
compat_ptr(mtp)) ||
- put_user(&karg->morph_table, &kp->morph_table)))
+ put_user_force(&karg->morph_table, &kp->morph_table)))
return -EFAULT;
/* handle dvs2 coefficients */
if (dcp && (get_atomisp_dis_coefficients32(&karg->dvs2_coefs,
compat_ptr(dcp)) ||
- put_user(&karg->dvs2_coefs, &kp->dvs2_coefs)))
+ put_user_force(&karg->dvs2_coefs, &kp->dvs2_coefs)))
return -EFAULT;
/* handle dvs 6axis configuration */
if (dscp &&
(get_atomisp_dvs_6axis_config32(&karg->dvs_6axis_config,
compat_ptr(dscp)) ||
- put_user(&karg->dvs_6axis_config, &kp->dvs_6axis_config)))
+ put_user_force(&karg->dvs_6axis_config, &kp->dvs_6axis_config)))
return -EFAULT;
return 0;
@@ -814,7 +826,7 @@ get_atomisp_sensor_ae_bracketing_lut(struct atomisp_sensor_ae_bracketing_lut __u
if (!access_ok(up, sizeof(struct atomisp_sensor_ae_bracketing_lut32)) ||
assign_in_user(&kp->lut_size, &up->lut_size) ||
get_user(lut, &up->lut) ||
- put_user(compat_ptr(lut), &kp->lut))
+ put_user_force(compat_ptr(lut), &kp->lut))
return -EFAULT;
return 0;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
index 0df46a1af5f0..135994d44802 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
@@ -817,6 +817,9 @@ static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
int ret;
int value;
+ if (!gs || gs->v1p8_on == on)
+ return 0;
+
if (gs->v1p8_gpio >= 0) {
pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n",
gs->v1p8_gpio);
@@ -827,8 +830,6 @@ static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
pr_err("V1P8 GPIO initialization failed\n");
}
- if (!gs || gs->v1p8_on == on)
- return 0;
gs->v1p8_on = on;
if (gs->v1p8_gpio >= 0)
@@ -871,6 +872,9 @@ static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
int ret;
int value;
+ if (WARN_ON(!gs))
+ return -ENODEV;
+
if (gs->v2p8_gpio >= 0) {
pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n",
gs->v2p8_gpio);
@@ -881,7 +885,7 @@ static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
pr_err("V2P8 GPIO initialization failed\n");
}
- if (!gs || gs->v2p8_on == on)
+ if (gs->v2p8_on == on)
return 0;
gs->v2p8_on = on;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index 65b0c8a662a0..2ae50decfc8b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -511,8 +511,8 @@ const struct atomisp_format_bridge atomisp_output_fmts[] = {
#endif
};
-const struct atomisp_format_bridge *atomisp_get_format_bridge(
- unsigned int pixelformat)
+const struct atomisp_format_bridge *
+atomisp_get_format_bridge(unsigned int pixelformat)
{
unsigned int i;
@@ -524,8 +524,8 @@ const struct atomisp_format_bridge *atomisp_get_format_bridge(
return NULL;
}
-const struct atomisp_format_bridge *atomisp_get_format_bridge_from_mbus(
- u32 mbus_code)
+const struct atomisp_format_bridge *
+atomisp_get_format_bridge_from_mbus(u32 mbus_code)
{
unsigned int i;
@@ -605,8 +605,8 @@ static int atomisp_enum_input(struct file *file, void *fh,
return 0;
}
-static unsigned int atomisp_subdev_streaming_count(
- struct atomisp_sub_device *asd)
+static unsigned int
+atomisp_subdev_streaming_count(struct atomisp_sub_device *asd)
{
return asd->video_out_preview.capq.streaming
+ asd->video_out_capture.capq.streaming
@@ -797,7 +797,7 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
continue;
}
- strlcpy(f->description, format->description,
+ strscpy(f->description, format->description,
sizeof(f->description));
f->pixelformat = format->pixelformat;
return 0;
@@ -1274,13 +1274,15 @@ done:
}
}
- /* Workaround: Due to the design of HALv3,
+ /*
+ * Workaround: Due to the design of HALv3,
* sometimes in ZSL or SDV mode HAL needs to
* capture multiple images within one streaming cycle.
* But the capture number cannot be determined by HAL.
* So HAL only sets the capture number to be 1 and queue multiple
* buffers. Atomisp driver needs to check this case and re-trigger
- * CSS to do capture when new buffer is queued. */
+ * CSS to do capture when new buffer is queued.
+ */
if (asd->continuous_mode->val &&
atomisp_subdev_source_pad(vdev)
== ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE &&
@@ -1805,7 +1807,7 @@ start_sensor:
/*
* set freq to max when streaming count > 1 which indicate
* dual camera would run
- */
+ */
if (atomisp_streaming_count(isp) > 1) {
if (atomisp_freq_scaling(isp,
ATOMISP_DFS_MODE_MAX, false) < 0)
@@ -1827,11 +1829,10 @@ start_sensor:
dev_err(isp->dev, "master slave sensor stream on failed!\n");
goto out;
}
- if (!IS_ISP2401) {
+ if (!IS_ISP2401)
__wdt_on_master_slave_sensor(isp, wdt_duration);
- } else {
+ else
__wdt_on_master_slave_sensor_pipe(pipe, wdt_duration, true);
- }
goto start_delay_wq;
} else if (asd->depth_mode->val && (atomisp_streaming_count(isp) <
ATOMISP_DEPTH_SENSOR_STREAMON_COUNT)) {
@@ -2435,8 +2436,10 @@ static int atomisp_g_ext_ctrls(struct file *file, void *fh,
struct v4l2_control ctrl;
int i, ret = 0;
- /* input_lock is not need for the Camera related IOCTLs
- * The input_lock downgrade the FPS of 3A*/
+ /*
+ * input_lock is not need for the Camera related IOCTLs
+ * The input_lock downgrade the FPS of 3A
+ */
ret = atomisp_camera_g_ext_ctrls(file, fh, c);
if (ret != -EINVAL)
return ret;
@@ -2518,8 +2521,10 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh,
ret =
v4l2_s_ctrl(NULL, isp->flash->ctrl_handler,
&ctrl);
- /* When flash mode is changed we need to reset
- * flash state */
+ /*
+ * When flash mode is changed we need to reset
+ * flash state
+ */
if (ctrl.id == V4L2_CID_FLASH_MODE) {
asd->params.flash_state =
ATOMISP_FLASH_IDLE;
@@ -2557,8 +2562,10 @@ static int atomisp_s_ext_ctrls(struct file *file, void *fh,
struct v4l2_control ctrl;
int i, ret = 0;
- /* input_lock is not need for the Camera related IOCTLs
- * The input_lock downgrade the FPS of 3A*/
+ /*
+ * input_lock is not need for the Camera related IOCTLs
+ * The input_lock downgrade the FPS of 3A
+ */
ret = atomisp_camera_s_ext_ctrls(file, fh, c);
if (ret != -EINVAL)
return ret;
@@ -2587,7 +2594,7 @@ static int atomisp_g_parm(struct file *file, void *fh,
struct atomisp_device *isp = video_get_drvdata(vdev);
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- dev_err(isp->dev, "unsupport v4l2 buf type\n");
+ dev_err(isp->dev, "unsupported v4l2 buf type\n");
return -EINVAL;
}
@@ -2609,7 +2616,7 @@ static int atomisp_s_parm(struct file *file, void *fh,
int fps;
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- dev_err(isp->dev, "unsupport v4l2 buf type\n");
+ dev_err(isp->dev, "unsupported v4l2 buf type\n");
return -EINVAL;
}
@@ -2667,7 +2674,7 @@ static int atomisp_s_parm_file(struct file *file, void *fh,
struct atomisp_device *isp = video_get_drvdata(vdev);
if (parm->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- dev_err(isp->dev, "unsupport v4l2 buf type for output\n");
+ dev_err(isp->dev, "unsupported v4l2 buf type for output\n");
return -EINVAL;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 6ba817f15655..52b9fb18c87f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -410,8 +410,10 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
if (atomisp_subdev_format_conversion(isp_sd,
isp_sd->capture_pad)
- && crop[pad]->width && crop[pad]->height)
- crop[pad]->width -= padding_w, crop[pad]->height -= padding_h;
+ && crop[pad]->width && crop[pad]->height) {
+ crop[pad]->width -= padding_w;
+ crop[pad]->height -= padding_h;
+ }
/* if subdev type is SOC camera,we do not need to set DVS */
if (isp->inputs[isp_sd->input_curr].type == SOC_CAMERA)
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index 0114b040247b..0295e2e32d79 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -1429,7 +1429,6 @@ atomisp_load_firmware(struct atomisp_device *isp)
*/
static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id)
{
- unsigned int a0_max_id = 0;
const char *name;
const char *product;
@@ -1437,11 +1436,9 @@ static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id
switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
case ATOMISP_PCI_DEVICE_SOC_MRFLD:
- a0_max_id = ATOMISP_PCI_REV_MRFLD_A0_MAX;
name = "Merrifield";
break;
case ATOMISP_PCI_DEVICE_SOC_BYT:
- a0_max_id = ATOMISP_PCI_REV_BYT_A0_MAX;
name = "Baytrail";
break;
case ATOMISP_PCI_DEVICE_SOC_ANN:
@@ -1573,7 +1570,7 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
spin_lock_init(&isp->lock);
/* This is not a true PCI device on SoC, so the delay is not needed. */
- pdev->d3_delay = 0;
+ pdev->d3hot_delay = 0;
pci_set_drvdata(pdev, isp);
@@ -1708,8 +1705,8 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
pci_set_master(pdev);
- err = pci_enable_msi(pdev);
- if (err) {
+ err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (err < 0) {
dev_err(&pdev->dev, "Failed to enable msi (%d)\n", err);
goto enable_msi_fail;
}
@@ -1827,7 +1824,7 @@ register_entities_fail:
initialize_modules_fail:
cpu_latency_qos_remove_request(&isp->pm_qos);
atomisp_msi_irq_uninit(isp);
- pci_disable_msi(pdev);
+ pci_free_irq_vectors(pdev);
enable_msi_fail:
fw_validation_fail:
release_firmware(isp->firmware);
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c
index 8e661091f7d9..9a8d8f546da7 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c
@@ -14,6 +14,7 @@
*/
#include "system_global.h"
+#include "csi_rx_global.h"
const u32 N_SHORT_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID] = {
4, /* 4 entries at CSI_RX_BACKEND0_ID*/
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c
index 58fec54a914d..8d19c9875a71 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c
@@ -15,6 +15,7 @@
#include <type_support.h>
#include "system_global.h"
+#include "ibuf_ctrl_global.h"
const u32 N_IBUF_CTRL_PROCS[N_IBUF_CTRL_ID] = {
8, /* IBUF_CTRL0_ID supports at most 8 processes */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h
index 4952b42d8191..f71841195ac1 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h
@@ -17,6 +17,7 @@
#define __IBUF_CTRL_LOCAL_H_INCLUDED__
#include "ibuf_ctrl_global.h"
+#include "ibuf_ctrl_local.h"
typedef struct ibuf_ctrl_proc_state_s ibuf_ctrl_proc_state_t;
typedef struct ibuf_ctrl_state_s ibuf_ctrl_state_t;
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_private.h
deleted file mode 100644
index a58e8477da6e..000000000000
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_private.h
+++ /dev/null
@@ -1,268 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __IBUF_CTRL_PRIVATE_H_INCLUDED__
-#define __IBUF_CTRL_PRIVATE_H_INCLUDED__
-
-#include "ibuf_ctrl_public.h"
-
-#include "device_access.h" /* ia_css_device_load_uint32 */
-
-#include "assert_support.h" /* assert */
-#include "print_support.h" /* print */
-
-/*****************************************************
- *
- * Native command interface (NCI).
- *
- *****************************************************/
-/**
- * @brief Get the ibuf-controller state.
- * Refer to "ibuf_ctrl_public.h" for details.
- */
-STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_get_state(
- const ibuf_ctrl_ID_t ID,
- ibuf_ctrl_state_t *state)
-{
- u32 i;
-
- state->recalc_words =
- ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_RECALC_WORDS_STATUS);
- state->arbiters =
- ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_ARBITERS_STATUS);
-
- /*
- * Get the values of the register-set per
- * ibuf-controller process.
- */
- for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) {
- ibuf_ctrl_get_proc_state(
- ID,
- i,
- &state->proc_state[i]);
- }
-}
-
-/**
- * @brief Get the state of the ibuf-controller process.
- * Refer to "ibuf_ctrl_public.h" for details.
- */
-STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_get_proc_state(
- const ibuf_ctrl_ID_t ID,
- const u32 proc_id,
- ibuf_ctrl_proc_state_t *state)
-{
- hrt_address reg_bank_offset;
-
- reg_bank_offset =
- _IBUF_CNTRL_PROC_REG_ALIGN * (1 + proc_id);
-
- state->num_items =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_ITEMS_PER_STORE);
-
- state->num_stores =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_STORES_PER_FRAME);
-
- state->dma_channel =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CHANNEL);
-
- state->dma_command =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CMD);
-
- state->ibuf_st_addr =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_START_ADDRESS);
-
- state->ibuf_stride =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_STRIDE);
-
- state->ibuf_end_addr =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_END_ADDRESS);
-
- state->dest_st_addr =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_START_ADDRESS);
-
- state->dest_stride =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_STRIDE);
-
- state->dest_end_addr =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_END_ADDRESS);
-
- state->sync_frame =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SYNC_FRAME);
-
- state->sync_command =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_SYNC_CMD);
-
- state->store_command =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_STORE_CMD);
-
- state->shift_returned_items =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SHIFT_ITEMS);
-
- state->elems_ibuf =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_IBUF);
-
- state->elems_dest =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_DEST);
-
- state->cur_stores =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_STORES);
-
- state->cur_acks =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ACKS);
-
- state->cur_s2m_ibuf_addr =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_S2M_IBUF_ADDR);
-
- state->cur_dma_ibuf_addr =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_IBUF_ADDR);
-
- state->cur_dma_dest_addr =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_DEST_ADDR);
-
- state->cur_isp_dest_addr =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ISP_DEST_ADDR);
-
- state->dma_cmds_send =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_NR_DMA_CMDS_SEND);
-
- state->main_cntrl_state =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_MAIN_CNTRL_STATE);
-
- state->dma_sync_state =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_SYNC_STATE);
-
- state->isp_sync_state =
- ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ISP_SYNC_STATE);
-}
-
-/**
- * @brief Dump the ibuf-controller state.
- * Refer to "ibuf_ctrl_public.h" for details.
- */
-STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_dump_state(
- const ibuf_ctrl_ID_t ID,
- ibuf_ctrl_state_t *state)
-{
- u32 i;
-
- ia_css_print("IBUF controller ID %d recalculate words 0x%x\n", ID,
- state->recalc_words);
- ia_css_print("IBUF controller ID %d arbiters 0x%x\n", ID, state->arbiters);
-
- /*
- * Dump the values of the register-set per
- * ibuf-controller process.
- */
- for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) {
- ia_css_print("IBUF controller ID %d Process ID %d num_items 0x%x\n", ID, i,
- state->proc_state[i].num_items);
- ia_css_print("IBUF controller ID %d Process ID %d num_stores 0x%x\n", ID, i,
- state->proc_state[i].num_stores);
- ia_css_print("IBUF controller ID %d Process ID %d dma_channel 0x%x\n", ID, i,
- state->proc_state[i].dma_channel);
- ia_css_print("IBUF controller ID %d Process ID %d dma_command 0x%x\n", ID, i,
- state->proc_state[i].dma_command);
- ia_css_print("IBUF controller ID %d Process ID %d ibuf_st_addr 0x%x\n", ID, i,
- state->proc_state[i].ibuf_st_addr);
- ia_css_print("IBUF controller ID %d Process ID %d ibuf_stride 0x%x\n", ID, i,
- state->proc_state[i].ibuf_stride);
- ia_css_print("IBUF controller ID %d Process ID %d ibuf_end_addr 0x%x\n", ID, i,
- state->proc_state[i].ibuf_end_addr);
- ia_css_print("IBUF controller ID %d Process ID %d dest_st_addr 0x%x\n", ID, i,
- state->proc_state[i].dest_st_addr);
- ia_css_print("IBUF controller ID %d Process ID %d dest_stride 0x%x\n", ID, i,
- state->proc_state[i].dest_stride);
- ia_css_print("IBUF controller ID %d Process ID %d dest_end_addr 0x%x\n", ID, i,
- state->proc_state[i].dest_end_addr);
- ia_css_print("IBUF controller ID %d Process ID %d sync_frame 0x%x\n", ID, i,
- state->proc_state[i].sync_frame);
- ia_css_print("IBUF controller ID %d Process ID %d sync_command 0x%x\n", ID, i,
- state->proc_state[i].sync_command);
- ia_css_print("IBUF controller ID %d Process ID %d store_command 0x%x\n", ID, i,
- state->proc_state[i].store_command);
- ia_css_print("IBUF controller ID %d Process ID %d shift_returned_items 0x%x\n",
- ID, i,
- state->proc_state[i].shift_returned_items);
- ia_css_print("IBUF controller ID %d Process ID %d elems_ibuf 0x%x\n", ID, i,
- state->proc_state[i].elems_ibuf);
- ia_css_print("IBUF controller ID %d Process ID %d elems_dest 0x%x\n", ID, i,
- state->proc_state[i].elems_dest);
- ia_css_print("IBUF controller ID %d Process ID %d cur_stores 0x%x\n", ID, i,
- state->proc_state[i].cur_stores);
- ia_css_print("IBUF controller ID %d Process ID %d cur_acks 0x%x\n", ID, i,
- state->proc_state[i].cur_acks);
- ia_css_print("IBUF controller ID %d Process ID %d cur_s2m_ibuf_addr 0x%x\n", ID,
- i,
- state->proc_state[i].cur_s2m_ibuf_addr);
- ia_css_print("IBUF controller ID %d Process ID %d cur_dma_ibuf_addr 0x%x\n", ID,
- i,
- state->proc_state[i].cur_dma_ibuf_addr);
- ia_css_print("IBUF controller ID %d Process ID %d cur_dma_dest_addr 0x%x\n", ID,
- i,
- state->proc_state[i].cur_dma_dest_addr);
- ia_css_print("IBUF controller ID %d Process ID %d cur_isp_dest_addr 0x%x\n", ID,
- i,
- state->proc_state[i].cur_isp_dest_addr);
- ia_css_print("IBUF controller ID %d Process ID %d dma_cmds_send 0x%x\n", ID, i,
- state->proc_state[i].dma_cmds_send);
- ia_css_print("IBUF controller ID %d Process ID %d main_cntrl_state 0x%x\n", ID,
- i,
- state->proc_state[i].main_cntrl_state);
- ia_css_print("IBUF controller ID %d Process ID %d dma_sync_state 0x%x\n", ID, i,
- state->proc_state[i].dma_sync_state);
- ia_css_print("IBUF controller ID %d Process ID %d isp_sync_state 0x%x\n", ID, i,
- state->proc_state[i].isp_sync_state);
- }
-}
-
-/* end of NCI */
-
-/*****************************************************
- *
- * Device level interface (DLI).
- *
- *****************************************************/
-/**
- * @brief Load the register value.
- * Refer to "ibuf_ctrl_public.h" for details.
- */
-STORAGE_CLASS_IBUF_CTRL_C hrt_data ibuf_ctrl_reg_load(
- const ibuf_ctrl_ID_t ID,
- const hrt_address reg)
-{
- assert(ID < N_IBUF_CTRL_ID);
- assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1);
- return ia_css_device_load_uint32(IBUF_CTRL_BASE[ID] + reg * sizeof(hrt_data));
-}
-
-/**
- * @brief Store a value to the register.
- * Refer to "ibuf_ctrl_public.h" for details.
- */
-STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_reg_store(
- const ibuf_ctrl_ID_t ID,
- const hrt_address reg,
- const hrt_data value)
-{
- assert(ID < N_IBUF_CTRL_ID);
- assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1);
-
- ia_css_device_store_uint32(IBUF_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
-}
-
-/* end of DLI */
-
-#endif /* __IBUF_CTRL_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c
index 5809dbb6e5aa..2a5159945a44 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c
@@ -13,16 +13,10 @@
* more details.
*/
-#include "isys_dma.h"
+#include "system_local.h"
+#include "isys_dma_global.h"
#include "assert_support.h"
-
-#ifndef __INLINE_ISYS2401_DMA__
-/*
- * Include definitions for isys dma register access functions. isys_dma.h
- * includes declarations of these functions by including isys_dma_public.h.
- */
#include "isys_dma_private.h"
-#endif
const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID] = {
N_ISYS2401_DMA_CHANNEL
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_local.h
deleted file mode 100644
index 878933261a43..000000000000
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_local.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __ISYS_DMA_LOCAL_H_INCLUDED__
-#define __ISYS_DMA_LOCAL_H_INCLUDED__
-
-#include "isys_dma_global.h"
-
-#endif /* __ISYS_DMA_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h
index eb35b7bcead4..a313e1dc7c71 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h
@@ -23,10 +23,9 @@
#include "dma_v2_defs.h"
#include "print_support.h"
-STORAGE_CLASS_ISYS2401_DMA_C void isys2401_dma_reg_store(
- const isys2401_dma_ID_t dma_id,
- const unsigned int reg,
- const hrt_data value)
+void isys2401_dma_reg_store(const isys2401_dma_ID_t dma_id,
+ const unsigned int reg,
+ const hrt_data value)
{
unsigned int reg_loc;
@@ -40,9 +39,8 @@ STORAGE_CLASS_ISYS2401_DMA_C void isys2401_dma_reg_store(
ia_css_device_store_uint32(reg_loc, value);
}
-STORAGE_CLASS_ISYS2401_DMA_C hrt_data isys2401_dma_reg_load(
- const isys2401_dma_ID_t dma_id,
- const unsigned int reg)
+hrt_data isys2401_dma_reg_load(const isys2401_dma_ID_t dma_id,
+ const unsigned int reg)
{
unsigned int reg_loc;
hrt_data value;
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c
index 99576af4713c..b6135c4b6eea 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c
@@ -28,8 +28,7 @@
#endif
/* Public interface */
-STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_status_enable(
- const isys_irq_ID_t isys_irqc_id)
+void isys_irqc_status_enable(const isys_irq_ID_t isys_irqc_id)
{
assert(isys_irqc_id < N_ISYS_IRQ_ID);
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h
index e3d6d5e1634e..a76987190292 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h
@@ -18,7 +18,7 @@
#include <type_support.h>
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
typedef struct isys_irqc_state_s isys_irqc_state_t;
@@ -31,6 +31,6 @@ struct isys_irqc_state_s {
/*hrt_data clear; */ /* write-only register */
};
-#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+#endif /* defined(ISP2401) */
#endif /* __ISYS_IRQ_LOCAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h
index 91ef000d76dc..fb168c25bdfc 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h
@@ -19,7 +19,7 @@
#include "isys_irq_global.h"
#include "isys_irq_local.h"
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
/* -------------------------------------------------------+
| Native command interface (NCI) |
@@ -29,7 +29,7 @@
* @brief Get the isys irq status.
* Refer to "isys_irq.h" for details.
*/
-STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_get(
+void isys_irqc_state_get(
const isys_irq_ID_t isys_irqc_id,
isys_irqc_state_t *state)
{
@@ -48,7 +48,7 @@ STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_get(
* @brief Dump the isys irq status.
* Refer to "isys_irq.h" for details.
*/
-STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_dump(
+void isys_irqc_state_dump(
const isys_irq_ID_t isys_irqc_id,
const isys_irqc_state_t *state)
{
@@ -65,7 +65,7 @@ STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_dump(
+ -------------------------------------------------------*/
/* Support functions */
-STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_reg_store(
+void isys_irqc_reg_store(
const isys_irq_ID_t isys_irqc_id,
const unsigned int reg_idx,
const hrt_data value)
@@ -82,7 +82,7 @@ STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_reg_store(
ia_css_device_store_uint32(reg_addr, value);
}
-STORAGE_CLASS_ISYS2401_IRQ_C hrt_data isys_irqc_reg_load(
+hrt_data isys_irqc_reg_load(
const isys_irq_ID_t isys_irqc_id,
const unsigned int reg_idx)
{
@@ -102,6 +102,6 @@ STORAGE_CLASS_ISYS2401_IRQ_C hrt_data isys_irqc_reg_load(
/* end of DLI */
-#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+#endif /* defined(ISP2401) */
#endif /* __ISYS_IRQ_PRIVATE_H__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h b/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h
index 4faa519219ee..1c7938d8ccb5 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h
@@ -22,6 +22,43 @@
/*****************************************************
*
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "pixelgen_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C hrt_data pixelgen_ctrl_reg_load(
+ const pixelgen_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_PIXELGEN_ID);
+ assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address) - 1);
+ return ia_css_device_load_uint32(PIXELGEN_CTRL_BASE[ID] + reg * sizeof(
+ hrt_data));
+}
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "pixelgen_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_reg_store(
+ const pixelgen_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_PIXELGEN_ID);
+ assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(PIXELGEN_CTRL_BASE[ID] + reg * sizeof(hrt_data),
+ value);
+}
+
+/* end of DLI */
+
+/*****************************************************
+ *
* Native command interface (NCI).
*
*****************************************************/
@@ -144,40 +181,4 @@ STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_dump_state(
}
/* end of NCI */
-/*****************************************************
- *
- * Device level interface (DLI).
- *
- *****************************************************/
-/**
- * @brief Load the register value.
- * Refer to "pixelgen_public.h" for details.
- */
-STORAGE_CLASS_PIXELGEN_C hrt_data pixelgen_ctrl_reg_load(
- const pixelgen_ID_t ID,
- const hrt_address reg)
-{
- assert(ID < N_PIXELGEN_ID);
- assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address) - 1);
- return ia_css_device_load_uint32(PIXELGEN_CTRL_BASE[ID] + reg * sizeof(
- hrt_data));
-}
-
-/**
- * @brief Store a value to the register.
- * Refer to "pixelgen_ctrl_public.h" for details.
- */
-STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_reg_store(
- const pixelgen_ID_t ID,
- const hrt_address reg,
- const hrt_data value)
-{
- assert(ID < N_PIXELGEN_ID);
- assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address)-1);
-
- ia_css_device_store_uint32(PIXELGEN_CTRL_BASE[ID] + reg * sizeof(hrt_data),
- value);
-}
-
-/* end of DLI */
#endif /* __PIXELGEN_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h
index 1b9f03d57659..56c5ed89b3cc 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h
@@ -33,8 +33,7 @@
#define _IBUF_CNTRL_DMA_SYNC_WAIT_FOR_SYNC 1
#define _IBUF_CNTRL_DMA_SYNC_FSM_WAIT_FOR_ACK (0x3 << 1)
-typedef struct ib_buffer_s ib_buffer_t;
-struct ib_buffer_s {
+struct isp2401_ib_buffer_s {
u32 start_addr; /* start address of the buffer in the
* "input-buffer hardware block"
*/
@@ -42,6 +41,7 @@ struct ib_buffer_s {
u32 stride; /* stride per buffer line (in bytes) */
u32 lines; /* lines in the buffer */
};
+typedef struct isp2401_ib_buffer_s isp2401_ib_buffer_t;
typedef struct ibuf_ctrl_cfg_s ibuf_ctrl_cfg_t;
struct ibuf_ctrl_cfg_s {
@@ -58,7 +58,7 @@ struct ibuf_ctrl_cfg_s {
u32 elems_per_word_in_dest;
} dma_cfg;
- ib_buffer_t ib_buffer;
+ isp2401_ib_buffer_t ib_buffer;
struct {
u32 stride;
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h
index 156b4c95277e..a81e4d13ac9f 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h
@@ -16,7 +16,7 @@
#ifndef __ISYS_IRQ_GLOBAL_H__
#define __ISYS_IRQ_GLOBAL_H__
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
/* Register offset/index from base location */
#define ISYS_IRQ_EDGE_REG_IDX (0)
@@ -31,6 +31,6 @@
#define ISYS_IRQ_CLEAR_REG_VALUE (0xFFFF)
#define ISYS_IRQ_ENABLE_REG_VALUE (0xFFFF)
-#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+#endif /* defined(ISP2401) */
#endif /* __ISYS_IRQ_GLOBAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h b/drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h
index 75722ef572d0..f131f03cb8fa 100644
--- a/drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h
+++ b/drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h
@@ -24,8 +24,8 @@
/*
* Duplicates "sync_generator_cfg_t" in "input_system_global.h".
*/
-typedef struct sync_generator_cfg_s sync_generator_cfg_t;
-struct sync_generator_cfg_s {
+typedef struct isp2401_sync_generator_cfg_s isp2401_sync_generator_cfg_t;
+struct isp2401_sync_generator_cfg_s {
u32 hblank_cycles;
u32 vblank_cycles;
u32 pixels_per_clock;
@@ -72,7 +72,7 @@ struct pixelgen_tpg_cfg_s {
s32 v_delta; /* vertical delta? */
} delta_cfg;
- sync_generator_cfg_t sync_gen_cfg;
+ isp2401_sync_generator_cfg_t sync_gen_cfg;
};
/*
@@ -84,7 +84,7 @@ struct pixelgen_prbs_cfg_s {
s32 seed0;
s32 seed1;
- sync_generator_cfg_t sync_gen_cfg;
+ isp2401_sync_generator_cfg_t sync_gen_cfg;
};
/* end of Pixel-generator: TPG. ("pixelgen_global.h") */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
index bec9c7238a78..5cd6136f21a2 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
@@ -15,7 +15,7 @@
#include "system_global.h"
-#ifdef USE_INPUT_SYSTEM_VERSION_2
+#ifndef ISP2401
#include "input_formatter.h"
#include <type_support.h>
@@ -27,6 +27,10 @@
#include "input_formatter_private.h"
#endif /* __INLINE_INPUT_FORMATTER__ */
+static const unsigned int input_formatter_alignment[N_INPUT_FORMATTER_ID] = {
+ ISP_VEC_ALIGN, ISP_VEC_ALIGN, HIVE_ISP_CTRL_DATA_BYTES
+};
+
const hrt_address HIVE_IF_SRST_ADDRESS[N_INPUT_FORMATTER_ID] = {
INPUT_FORMATTER0_SRST_OFFSET,
INPUT_FORMATTER1_SRST_OFFSET,
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h
index 94fff77584f7..dfb593c109af 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h
@@ -115,8 +115,4 @@ struct input_formatter_bin_state_s {
u32 en_status_update;
};
-static const unsigned int input_formatter_alignment[N_INPUT_FORMATTER_ID] = {
- ISP_VEC_ALIGN, ISP_VEC_ALIGN, HIVE_ISP_CTRL_DATA_BYTES
-};
-
#endif /* __INPUT_FORMATTER_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
index fc000af042dc..0f5a231672a8 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
@@ -15,7 +15,7 @@
#include "system_global.h"
-#ifdef USE_INPUT_SYSTEM_VERSION_2
+#ifndef ISP2401
#include "input_system.h"
#include <type_support.h>
@@ -30,17 +30,17 @@
#define ZERO (0x0)
#define ONE (1U)
-static const ib_buffer_t IB_BUFFER_NULL = {0, 0, 0 };
+static const isp2400_ib_buffer_t IB_BUFFER_NULL = {0, 0, 0 };
-static input_system_error_t input_system_configure_channel(
+static input_system_err_t input_system_configure_channel(
const channel_cfg_t channel);
-static input_system_error_t input_system_configure_channel_sensor(
+static input_system_err_t input_system_configure_channel_sensor(
const channel_cfg_t channel);
-static input_system_error_t input_buffer_configuration(void);
+static input_system_err_t input_buffer_configuration(void);
-static input_system_error_t configuration_to_registers(void);
+static input_system_err_t configuration_to_registers(void);
static void receiver_rst(const rx_ID_t ID);
static void input_system_network_rst(const input_system_ID_t ID);
@@ -48,12 +48,12 @@ static void input_system_network_rst(const input_system_ID_t ID);
static void capture_unit_configure(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
- const ib_buffer_t *const cfg);
+ const isp2400_ib_buffer_t *const cfg);
static void acquisition_unit_configure(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
- const ib_buffer_t *const cfg);
+ const isp2400_ib_buffer_t *const cfg);
static void ctrl_unit_configure(
const input_system_ID_t ID,
@@ -65,17 +65,17 @@ static void input_system_network_configure(
const input_system_network_cfg_t *const cfg);
// MW: CSI is previously named as "rx" short for "receiver"
-static input_system_error_t set_csi_cfg(
+static input_system_err_t set_csi_cfg(
csi_cfg_t *const lhs,
const csi_cfg_t *const rhs,
input_system_config_flags_t *const flags);
-static input_system_error_t set_source_type(
+static input_system_err_t set_source_type(
input_system_source_t *const lhs,
const input_system_source_t rhs,
input_system_config_flags_t *const flags);
-static input_system_error_t input_system_multiplexer_cfg(
+static input_system_err_t input_system_multiplexer_cfg(
input_system_multiplex_t *const lhs,
const input_system_multiplex_t rhs,
input_system_config_flags_t *const flags);
@@ -848,7 +848,7 @@ static void input_system_network_rst(const input_system_ID_t ID)
}
// Function that resets current configuration.
-input_system_error_t input_system_configuration_reset(void)
+input_system_err_t input_system_configuration_reset(void)
{
unsigned int i;
@@ -890,10 +890,10 @@ input_system_error_t input_system_configuration_reset(void)
// MW: Comments are good, but doxygen is required, place it at the declaration
// Function that appends the channel to current configuration.
-static input_system_error_t input_system_configure_channel(
+static input_system_err_t input_system_configure_channel(
const channel_cfg_t channel)
{
- input_system_error_t error = INPUT_SYSTEM_ERR_NO_ERROR;
+ input_system_err_t error = INPUT_SYSTEM_ERR_NO_ERROR;
// Check if channel is not already configured.
if (config.ch_flags[channel.ch_id] & INPUT_SYSTEM_CFG_FLAG_SET) {
return INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET;
@@ -948,12 +948,12 @@ static input_system_error_t input_system_configure_channel(
}
// Function that partitions input buffer space with determining addresses.
-static input_system_error_t input_buffer_configuration(void)
+static input_system_err_t input_buffer_configuration(void)
{
u32 current_address = 0;
u32 unallocated_memory = IB_CAPACITY_IN_WORDS;
- ib_buffer_t candidate_buffer_acq = IB_BUFFER_NULL;
+ isp2400_ib_buffer_t candidate_buffer_acq = IB_BUFFER_NULL;
u32 size_requested;
input_system_config_flags_t acq_already_specified = INPUT_SYSTEM_CFG_FLAG_RESET;
input_system_csi_port_t port;
@@ -1062,7 +1062,7 @@ static input_system_error_t input_buffer_configuration(void)
static void capture_unit_configure(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
- const ib_buffer_t *const cfg)
+ const isp2400_ib_buffer_t *const cfg)
{
assert(ID < N_INPUT_SYSTEM_ID);
assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <=
@@ -1088,7 +1088,7 @@ static void capture_unit_configure(
static void acquisition_unit_configure(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
- const ib_buffer_t *const cfg)
+ const isp2400_ib_buffer_t *const cfg)
{
assert(ID < N_INPUT_SYSTEM_ID);
assert(sub_id == ACQUISITION_UNIT0_ID);
@@ -1236,7 +1236,7 @@ static void input_system_network_configure(
return;
}
-static input_system_error_t configuration_to_registers(void)
+static input_system_err_t configuration_to_registers(void)
{
input_system_network_cfg_t input_system_network_cfg;
int i;
@@ -1335,10 +1335,10 @@ static input_system_error_t configuration_to_registers(void)
}
// Function that applies the whole configuration.
-input_system_error_t input_system_configuration_commit(void)
+input_system_err_t input_system_configuration_commit(void)
{
// The last configuration step is to configure the input buffer.
- input_system_error_t error = input_buffer_configuration();
+ input_system_err_t error = input_buffer_configuration();
if (error != INPUT_SYSTEM_ERR_NO_ERROR) {
return error;
@@ -1357,7 +1357,7 @@ input_system_error_t input_system_configuration_commit(void)
// FIFO
-input_system_error_t input_system_csi_fifo_channel_cfg(
+input_system_err_t input_system_csi_fifo_channel_cfg(
u32 ch_id,
input_system_csi_port_t port,
backend_channel_cfg_t backend_ch,
@@ -1380,7 +1380,7 @@ input_system_error_t input_system_csi_fifo_channel_cfg(
return input_system_configure_channel(channel);
}
-input_system_error_t input_system_csi_fifo_channel_with_counting_cfg(
+input_system_err_t input_system_csi_fifo_channel_with_counting_cfg(
u32 ch_id,
u32 nof_frames,
input_system_csi_port_t port,
@@ -1411,7 +1411,7 @@ input_system_error_t input_system_csi_fifo_channel_with_counting_cfg(
// SRAM
-input_system_error_t input_system_csi_sram_channel_cfg(
+input_system_err_t input_system_csi_sram_channel_cfg(
u32 ch_id,
input_system_csi_port_t port,
backend_channel_cfg_t backend_ch,
@@ -1443,7 +1443,7 @@ input_system_error_t input_system_csi_sram_channel_cfg(
//XMEM
// Collects all parameters and puts them in channel_cfg_t.
-input_system_error_t input_system_csi_xmem_channel_cfg(
+input_system_err_t input_system_csi_xmem_channel_cfg(
u32 ch_id,
input_system_csi_port_t port,
backend_channel_cfg_t backend_ch,
@@ -1475,7 +1475,7 @@ input_system_error_t input_system_csi_xmem_channel_cfg(
return input_system_configure_channel(channel);
}
-input_system_error_t input_system_csi_xmem_acquire_only_channel_cfg(
+input_system_err_t input_system_csi_xmem_acquire_only_channel_cfg(
u32 ch_id,
u32 nof_frames,
input_system_csi_port_t port,
@@ -1502,7 +1502,7 @@ input_system_error_t input_system_csi_xmem_acquire_only_channel_cfg(
return input_system_configure_channel(channel);
}
-input_system_error_t input_system_csi_xmem_capture_only_channel_cfg(
+input_system_err_t input_system_csi_xmem_capture_only_channel_cfg(
u32 ch_id,
u32 nof_frames,
input_system_csi_port_t port,
@@ -1535,7 +1535,7 @@ input_system_error_t input_system_csi_xmem_capture_only_channel_cfg(
// Non - CSI
-input_system_error_t input_system_prbs_channel_cfg(
+input_system_err_t input_system_prbs_channel_cfg(
u32 ch_id,
u32 nof_frames,//not used yet
u32 seed,
@@ -1564,7 +1564,7 @@ input_system_error_t input_system_prbs_channel_cfg(
return input_system_configure_channel(channel);
}
-input_system_error_t input_system_tpg_channel_cfg(
+input_system_err_t input_system_tpg_channel_cfg(
u32 ch_id,
u32 nof_frames,//not used yet
u32 x_mask,
@@ -1601,7 +1601,7 @@ input_system_error_t input_system_tpg_channel_cfg(
}
// MW: Don't use system specific names, (even in system specific files) "cfg2400" -> cfg
-input_system_error_t input_system_gpfifo_channel_cfg(
+input_system_err_t input_system_gpfifo_channel_cfg(
u32 ch_id,
u32 nof_frames, //not used yet
@@ -1625,11 +1625,11 @@ input_system_error_t input_system_gpfifo_channel_cfg(
///////////////////////////////////////////////////////////////////////////
// Fills the parameters to config.csi_value[port]
-static input_system_error_t input_system_configure_channel_sensor(
+static input_system_err_t input_system_configure_channel_sensor(
const channel_cfg_t channel)
{
const u32 port = channel.source_cfg.csi_cfg.csi_port;
- input_system_error_t status = INPUT_SYSTEM_ERR_NO_ERROR;
+ input_system_err_t status = INPUT_SYSTEM_ERR_NO_ERROR;
input_system_multiplex_t mux;
@@ -1711,7 +1711,7 @@ static input_system_error_t input_system_configure_channel_sensor(
}
// Test flags and set structure.
-static input_system_error_t set_source_type(
+static input_system_err_t set_source_type(
input_system_source_t *const lhs,
const input_system_source_t rhs,
input_system_config_flags_t *const flags)
@@ -1747,7 +1747,7 @@ static input_system_error_t set_source_type(
}
// Test flags and set structure.
-static input_system_error_t set_csi_cfg(
+static input_system_err_t set_csi_cfg(
csi_cfg_t *const lhs,
const csi_cfg_t *const rhs,
input_system_config_flags_t *const flags)
@@ -1814,7 +1814,7 @@ static input_system_error_t set_csi_cfg(
}
// Test flags and set structure.
-static input_system_error_t input_system_multiplexer_cfg(
+static input_system_err_t input_system_multiplexer_cfg(
input_system_multiplex_t *const lhs,
const input_system_multiplex_t rhs,
input_system_config_flags_t *const flags)
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
index e6f695691407..3d6621f2fa96 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
@@ -16,7 +16,7 @@
#ifndef __CSI_RX_PUBLIC_H_INCLUDED__
#define __CSI_RX_PUBLIC_H_INCLUDED__
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/*****************************************************
*
* Native command interface (NCI).
@@ -132,5 +132,5 @@ void csi_rx_be_ctrl_reg_store(
const hrt_address reg,
const hrt_data value);
/* end of DLI */
-#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* ISP2401 */
#endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/ibuf_ctrl_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/ibuf_ctrl_public.h
deleted file mode 100644
index 053803d2cae3..000000000000
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/ibuf_ctrl_public.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __IBUF_CTRL_PUBLIC_H_INCLUDED__
-#define __IBUF_CTRL_PUBLIC_H_INCLUDED__
-
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
-/*****************************************************
- *
- * Native command interface (NCI).
- *
- *****************************************************/
-/**
- * @brief Get the ibuf-controller state.
- * Get the state of the ibuf-controller regiester-set.
- *
- * @param[in] id The global unique ID of the input-buffer controller.
- * @param[out] state Point to the register-state.
- */
-STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_state(
- const ibuf_ctrl_ID_t ID,
- ibuf_ctrl_state_t *state);
-
-/**
- * @brief Get the state of the ibuf-controller process.
- * Get the state of the register set per buf-controller process.
- *
- * @param[in] id The global unique ID of the input-buffer controller.
- * @param[in] proc_id The process ID.
- * @param[out] state Point to the process state.
- */
-STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_proc_state(
- const ibuf_ctrl_ID_t ID,
- const u32 proc_id,
- ibuf_ctrl_proc_state_t *state);
-/**
- * @brief Dump the ibuf-controller state.
- * Dump the state of the ibuf-controller regiester-set.
- *
- * @param[in] id The global unique ID of the input-buffer controller.
- * @param[in] state Pointer to the register-state.
- */
-STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_dump_state(
- const ibuf_ctrl_ID_t ID,
- ibuf_ctrl_state_t *state);
-/* end of NCI */
-
-/*****************************************************
- *
- * Device level interface (DLI).
- *
- *****************************************************/
-/**
- * @brief Load the register value.
- * Load the value of the register of the ibuf-controller.
- *
- * @param[in] ID The global unique ID for the ibuf-controller instance.
- * @param[in] reg The offset address of the register.
- *
- * @return the value of the register.
- */
-STORAGE_CLASS_IBUF_CTRL_H hrt_data ibuf_ctrl_reg_load(
- const ibuf_ctrl_ID_t ID,
- const hrt_address reg);
-
-/**
- * @brief Store a value to the register.
- * Store a value to the registe of the ibuf-controller.
- *
- * @param[in] ID The global unique ID for the ibuf-controller instance.
- * @param[in] reg The offset address of the register.
- * @param[in] value The value to be stored.
- *
- */
-STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_reg_store(
- const ibuf_ctrl_ID_t ID,
- const hrt_address reg,
- const hrt_data value);
-/* end of DLI */
-
-#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
-#endif /* __IBUF_CTRL_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h
index 23a158b81b13..d9b6af898c06 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h
@@ -16,17 +16,17 @@
#ifndef __ISYS_DMA_PUBLIC_H_INCLUDED__
#define __ISYS_DMA_PUBLIC_H_INCLUDED__
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
#include "system_local.h"
#include "type_support.h"
-STORAGE_CLASS_ISYS2401_DMA_H void isys2401_dma_reg_store(
+extern void isys2401_dma_reg_store(
const isys2401_dma_ID_t dma_id,
const unsigned int reg,
const hrt_data value);
-STORAGE_CLASS_ISYS2401_DMA_H hrt_data isys2401_dma_reg_load(
+extern hrt_data isys2401_dma_reg_load(
const isys2401_dma_ID_t dma_id,
const unsigned int reg);
@@ -34,6 +34,6 @@ void isys2401_dma_set_max_burst_size(
const isys2401_dma_ID_t dma_id,
uint32_t max_burst_size);
-#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* ISP2401 */
#endif /* __ISYS_DMA_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h
index b9befdd2508e..736cbc4e3705 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h
@@ -19,28 +19,23 @@
#include "isys_irq_global.h"
#include "isys_irq_local.h"
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
-STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_state_get(
- const isys_irq_ID_t isys_irqc_id,
- isys_irqc_state_t *state);
+void isys_irqc_state_get(const isys_irq_ID_t isys_irqc_id,
+ isys_irqc_state_t *state);
-STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_state_dump(
- const isys_irq_ID_t isys_irqc_id,
- const isys_irqc_state_t *state);
+void isys_irqc_state_dump(const isys_irq_ID_t isys_irqc_id,
+ const isys_irqc_state_t *state);
-STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_reg_store(
- const isys_irq_ID_t isys_irqc_id,
- const unsigned int reg_idx,
- const hrt_data value);
+void isys_irqc_reg_store(const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx,
+ const hrt_data value);
-STORAGE_CLASS_ISYS2401_IRQ_H hrt_data isys_irqc_reg_load(
- const isys_irq_ID_t isys_irqc_id,
- const unsigned int reg_idx);
+hrt_data isys_irqc_reg_load(const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx);
-STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_status_enable(
- const isys_irq_ID_t isys_irqc_id);
+void isys_irqc_status_enable(const isys_irq_ID_t isys_irqc_id);
-#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+#endif /* defined(ISP2401) */
#endif /* __ISYS_IRQ_PUBLIC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h
index 509f75fe025c..dac53e324118 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h
@@ -16,7 +16,7 @@
#ifndef __ISYS_PUBLIC_H_INCLUDED__
#define __ISYS_PUBLIC_H_INCLUDED__
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/*! Read the state of INPUT_SYSTEM[ID]
\param ID[in] INPUT_SYSTEM identifier
\param state[out] pointer to input system state structure
@@ -34,5 +34,5 @@ STORAGE_CLASS_INPUT_SYSTEM_H input_system_err_t input_system_get_state(
STORAGE_CLASS_INPUT_SYSTEM_H void input_system_dump_state(
const input_system_ID_t ID,
input_system_state_t *state);
-#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* ISP2401 */
#endif /* __ISYS_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h
index ded4dce06d09..40a9fb6d7761 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h
@@ -16,7 +16,7 @@
#ifndef __PIXELGEN_PUBLIC_H_INCLUDED__
#define __PIXELGEN_PUBLIC_H_INCLUDED__
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/*****************************************************
*
* Native command interface (NCI).
@@ -76,5 +76,5 @@ STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_reg_store(
const hrt_data value);
/* end of DLI */
-#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* ISP2401 */
#endif /* __PIXELGEN_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/ibuf_ctrl.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/ibuf_ctrl.h
deleted file mode 100644
index 218341041811..000000000000
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/ibuf_ctrl.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __IBUF_CTRL_H_INCLUDED__
-#define __IBUF_CTRL_H_INCLUDED__
-
-/*
- * This file is included on every cell {SP,ISP,host} and on every system
- * that uses the input system device(s). It defines the API to DLI bridge
- *
- * System and cell specific interfaces and inline code are included
- * conditionally through Makefile path settings.
- *
- * - system and cell agnostic interfaces, constants and identifiers
- * - public: system agnostic, cell specific interfaces
- * - private: system dependent, cell specific interfaces &
- * inline implementations
- * - global: system specific constants and identifiers
- * - local: system and cell specific constants and identifiers
- */
-
-#include "system_local.h"
-#include "ibuf_ctrl_local.h"
-
-#ifndef __INLINE_IBUF_CTRL__
-#define STORAGE_CLASS_IBUF_CTRL_H extern
-#define STORAGE_CLASS_IBUF_CTRL_C
-#include "ibuf_ctrl_public.h"
-#else /* __INLINE_IBUF_CTRL__ */
-#define STORAGE_CLASS_IBUF_CTRL_H static inline
-#define STORAGE_CLASS_IBUF_CTRL_C static inline
-#include "ibuf_ctrl_private.h"
-#endif /* __INLINE_IBUF_CTRL__ */
-
-#endif /* __IBUF_CTRL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_dma.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_dma.h
deleted file mode 100644
index 6a759142eda8..000000000000
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_dma.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __ISYS_DMA_H_INCLUDED__
-#define __ISYS_DMA_H_INCLUDED__
-
-/*
- * This file is included on every cell {SP,ISP,host} and on every system
- * that uses the input system device(s). It defines the API to DLI bridge
- *
- * System and cell specific interfaces and inline code are included
- * conditionally through Makefile path settings.
- *
- * - system and cell agnostic interfaces, constants and identifiers
- * - public: system agnostic, cell specific interfaces
- * - private: system dependent, cell specific interfaces &
- * inline implementations
- * - global: system specific constants and identifiers
- * - local: system and cell specific constants and identifiers
- */
-
-#include "system_local.h"
-#include "isys_dma_local.h"
-
-#ifndef __INLINE_ISYS2401_DMA__
-#define STORAGE_CLASS_ISYS2401_DMA_H extern
-#define STORAGE_CLASS_ISYS2401_DMA_C
-#include "isys_dma_public.h"
-#else /* __INLINE_ISYS2401_DMA__ */
-#define STORAGE_CLASS_ISYS2401_DMA_H static inline
-#define STORAGE_CLASS_ISYS2401_DMA_C static inline
-#include "isys_dma_private.h"
-#endif /* __INLINE_ISYS2401_DMA__ */
-
-#endif /* __ISYS_DMA_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h
index d854124f4f97..001c55ea970b 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h
@@ -19,22 +19,10 @@
#include <type_support.h>
#include <system_local.h>
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
-#ifndef __INLINE_ISYS2401_IRQ__
-
-#define STORAGE_CLASS_ISYS2401_IRQ_H extern
-#define STORAGE_CLASS_ISYS2401_IRQ_C extern
#include "isys_irq_public.h"
-#else /* __INLINE_ISYS2401_IRQ__ */
-
-#define STORAGE_CLASS_ISYS2401_IRQ_H static inline
-#define STORAGE_CLASS_ISYS2401_IRQ_C static inline
-#include "isys_irq_private.h"
-
-#endif /* __INLINE_ISYS2401_IRQ__ */
-
-#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+#endif /* defined(ISP2401) */
#endif /* __IA_CSS_ISYS_IRQ_H__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h
index a1f7a5839560..540b405cc0f7 100644
--- a/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h
+++ b/drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h
@@ -20,7 +20,7 @@
extern int (*sh_css_printf)(const char *fmt, va_list args);
/* depends on host supplied print function in ia_css_init() */
-static inline void ia_css_print(const char *fmt, ...)
+static inline __printf(1, 2) void ia_css_print(const char *fmt, ...)
{
va_list ap;
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
index 2bd39b4939f1..e0eaff0f8a22 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -268,9 +268,9 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
if (attrs & ATOMISP_MAP_FLAG_CLEARED)
hmm_set(bo->start, 0, bytes);
- dev_dbg(atomisp_dev,
- "%s: pages: 0x%08x (%ld bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
- __func__, bo->start, bytes, type, from_highmem, userptr, cached);
+ dev_dbg(atomisp_dev,
+ "%s: pages: 0x%08x (%ld bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
+ __func__, bo->start, bytes, type, from_highmem, userptr, cached);
return bo->start;
diff --git a/drivers/staging/media/atomisp/pci/ia_css_env.h b/drivers/staging/media/atomisp/pci/ia_css_env.h
index 8debf334c15c..6b38723b27cd 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_env.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_env.h
@@ -75,9 +75,9 @@ struct ia_css_hw_access_env {
/* Environment with function pointers to print error and debug messages.
*/
struct ia_css_print_env {
- int (*debug_print)(const char *fmt, va_list args);
+ int __printf(1, 0) (*debug_print)(const char *fmt, va_list args);
/** Print a debug message. */
- int (*error_print)(const char *fmt, va_list args);
+ int __printf(1, 0) (*error_print)(const char *fmt, va_list args);
/** Print an error message.*/
};
diff --git a/drivers/staging/media/atomisp/pci/ia_css_mipi.h b/drivers/staging/media/atomisp/pci/ia_css_mipi.h
index 56a2fca8117f..7b6d796d6ee0 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_mipi.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_mipi.h
@@ -42,7 +42,6 @@ int
ia_css_mipi_frame_specify(const unsigned int size_mem_words,
const bool contiguous);
-#if !defined(HAS_NO_INPUT_SYSTEM)
/* @brief Register size of a CSS MIPI frame for check during capturing.
*
* @param[in] port CSI-2 port this check is registered.
@@ -58,7 +57,6 @@ ia_css_mipi_frame_specify(const unsigned int size_mem_words,
int
ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
const unsigned int size_mem_words);
-#endif
/* @brief Calculate the size of a mipi frame.
*
diff --git a/drivers/staging/media/atomisp/pci/ia_css_stream.h b/drivers/staging/media/atomisp/pci/ia_css_stream.h
index e3e7a8a03b04..70b0378748f1 100644
--- a/drivers/staging/media/atomisp/pci/ia_css_stream.h
+++ b/drivers/staging/media/atomisp/pci/ia_css_stream.h
@@ -18,7 +18,7 @@
#include <type_support.h>
#include <system_local.h>
-#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if !defined(ISP2401)
#include <input_system.h>
#endif
#include "ia_css_types.h"
@@ -30,7 +30,7 @@
struct ia_css_stream {
struct ia_css_stream_config config;
struct ia_css_stream_info info;
-#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if !defined(ISP2401)
rx_cfg_t csi_rx_config;
#endif
bool reconfigure_css_rx;
diff --git a/drivers/staging/media/atomisp/pci/input_system_global.h b/drivers/staging/media/atomisp/pci/input_system_global.h
index 5ac580ce64ed..1450964445f6 100644
--- a/drivers/staging/media/atomisp/pci/input_system_global.h
+++ b/drivers/staging/media/atomisp/pci/input_system_global.h
@@ -4,8 +4,27 @@
* (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
*/
-#ifdef ISP2401
-# include "isp2401_input_system_global.h"
-#else
-# include "isp2400_input_system_global.h"
-#endif
+
+#ifndef __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+#define __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+typedef enum {
+ INPUT_SYSTEM_ERR_NO_ERROR = 0,
+ /* ISP2401 */
+ INPUT_SYSTEM_ERR_CREATE_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_CONFIGURE_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_OPEN_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_TRANSFER_FAIL,
+ INPUT_SYSTEM_ERR_CREATE_INPUT_PORT_FAIL,
+ INPUT_SYSTEM_ERR_CONFIGURE_INPUT_PORT_FAIL,
+ INPUT_SYSTEM_ERR_OPEN_INPUT_PORT_FAIL,
+ /* ISP2400 */
+ INPUT_SYSTEM_ERR_GENERIC,
+ INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET,
+ INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE,
+ INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED,
+} input_system_err_t;
+
+#include "isp2401_input_system_global.h"
+#include "isp2400_input_system_global.h"
+
+#endif /* __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h
index 7e2fa192a0fe..eaad708c611c 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h
@@ -22,15 +22,8 @@
#include "ia_css_ctc_types.h"
#ifndef PIPE_GENERATION
-#if defined(HAS_VAMEM_VERSION_2)
#define SH_CSS_ISP_CTC_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2
#define SH_CSS_ISP_CTC_TABLE_SIZE IA_CSS_VAMEM_2_CTC_TABLE_SIZE
-#elif defined(HAS_VAMEM_VERSION_1)
-#define SH_CSS_ISP_CTC_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2
-#define SH_CSS_ISP_CTC_TABLE_SIZE IA_CSS_VAMEM_1_CTC_TABLE_SIZE
-#else
-#error "VAMEM should be {VERSION1, VERSION2}"
-#endif
#else
/* For pipe generation, the size is not relevant */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c
index f13b79586963..6a7925c8493a 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c
@@ -23,7 +23,6 @@
struct ia_css_ctc_table default_ctc_table;
-#if defined(HAS_VAMEM_VERSION_2)
static const uint16_t
default_ctc_table_data[IA_CSS_VAMEM_2_CTC_TABLE_SIZE] = {
@@ -62,155 +61,11 @@ default_ctc_table_data[IA_CSS_VAMEM_2_CTC_TABLE_SIZE] = {
0
};
-#elif defined(HAS_VAMEM_VERSION_1)
-
-/* Default Parameters */
-static const uint16_t
-default_ctc_table_data[IA_CSS_VAMEM_1_CTC_TABLE_SIZE] = {
- 0, 0, 256, 384, 384, 497, 765, 806,
- 837, 851, 888, 901, 957, 981, 993, 1001,
- 1011, 1029, 1028, 1039, 1062, 1059, 1073, 1080,
- 1083, 1085, 1085, 1098, 1080, 1084, 1085, 1093,
- 1078, 1073, 1070, 1069, 1077, 1066, 1072, 1063,
- 1053, 1044, 1046, 1053, 1039, 1028, 1025, 1024,
- 1012, 1013, 1016, 996, 992, 990, 990, 980,
- 969, 968, 961, 955, 951, 949, 933, 930,
- 929, 925, 921, 916, 906, 901, 895, 893,
- 886, 877, 872, 869, 866, 861, 857, 849,
- 845, 838, 836, 832, 823, 821, 815, 813,
- 809, 805, 796, 793, 790, 785, 784, 778,
- 772, 768, 766, 763, 758, 752, 749, 745,
- 741, 740, 736, 730, 726, 724, 723, 718,
- 711, 709, 706, 704, 701, 698, 691, 689,
- 688, 683, 683, 678, 675, 673, 671, 669,
- 666, 663, 661, 660, 656, 656, 653, 650,
- 648, 647, 646, 643, 639, 638, 637, 635,
- 633, 632, 629, 627, 626, 625, 622, 621,
- 618, 618, 614, 614, 612, 609, 606, 606,
- 603, 600, 600, 597, 594, 591, 590, 586,
- 582, 581, 578, 575, 572, 569, 563, 560,
- 557, 554, 551, 548, 545, 539, 536, 533,
- 529, 527, 524, 519, 516, 513, 510, 507,
- 504, 501, 498, 493, 491, 488, 485, 484,
- 480, 476, 474, 471, 467, 466, 464, 460,
- 459, 455, 453, 449, 447, 446, 443, 441,
- 438, 435, 432, 432, 429, 427, 426, 422,
- 419, 418, 416, 414, 412, 410, 408, 406,
- 404, 402, 401, 398, 397, 395, 393, 390,
- 389, 388, 387, 384, 382, 380, 378, 377,
- 376, 375, 372, 370, 368, 368, 366, 364,
- 363, 361, 360, 358, 357, 355, 354, 352,
- 351, 350, 349, 346, 345, 344, 344, 342,
- 340, 339, 337, 337, 336, 335, 333, 331,
- 330, 329, 328, 326, 326, 324, 324, 322,
- 321, 320, 318, 318, 318, 317, 315, 313,
- 312, 311, 311, 310, 308, 307, 306, 306,
- 304, 304, 302, 301, 300, 300, 299, 297,
- 297, 296, 296, 294, 294, 292, 291, 291,
- 291, 290, 288, 287, 286, 286, 287, 285,
- 284, 283, 282, 282, 281, 281, 279, 278,
- 278, 278, 276, 276, 275, 274, 274, 273,
- 271, 270, 269, 268, 268, 267, 265, 262,
- 261, 260, 260, 259, 257, 254, 252, 252,
- 251, 251, 249, 246, 245, 244, 243, 242,
- 240, 239, 239, 237, 235, 235, 233, 231,
- 232, 230, 229, 226, 225, 224, 225, 224,
- 223, 220, 219, 219, 218, 217, 217, 214,
- 213, 213, 212, 211, 209, 209, 209, 208,
- 206, 205, 204, 203, 204, 203, 201, 200,
- 199, 197, 198, 198, 197, 195, 194, 194,
- 193, 192, 192, 191, 189, 190, 189, 188,
- 186, 187, 186, 185, 185, 184, 183, 181,
- 183, 182, 181, 180, 179, 178, 178, 178,
- 177, 176, 175, 176, 175, 174, 174, 173,
- 172, 173, 172, 171, 170, 170, 169, 169,
- 169, 168, 167, 166, 167, 167, 166, 165,
- 164, 164, 164, 163, 164, 163, 162, 163,
- 162, 161, 160, 161, 160, 160, 160, 159,
- 158, 157, 158, 158, 157, 157, 156, 156,
- 156, 156, 155, 155, 154, 154, 154, 154,
- 154, 153, 152, 153, 152, 152, 151, 152,
- 151, 152, 151, 150, 150, 149, 149, 150,
- 149, 149, 148, 148, 148, 149, 148, 147,
- 146, 146, 147, 146, 147, 146, 145, 146,
- 146, 145, 144, 145, 144, 145, 144, 144,
- 143, 143, 143, 144, 143, 142, 142, 142,
- 142, 142, 142, 141, 141, 141, 141, 140,
- 140, 141, 140, 140, 141, 140, 139, 139,
- 139, 140, 139, 139, 138, 138, 137, 139,
- 138, 138, 138, 137, 138, 137, 137, 137,
- 137, 136, 137, 136, 136, 136, 136, 135,
- 136, 135, 135, 135, 135, 136, 135, 135,
- 134, 134, 133, 135, 134, 134, 134, 133,
- 134, 133, 134, 133, 133, 132, 133, 133,
- 132, 133, 132, 132, 132, 132, 131, 131,
- 131, 132, 131, 131, 130, 131, 130, 132,
- 131, 130, 130, 129, 130, 129, 130, 129,
- 129, 129, 130, 129, 128, 128, 128, 128,
- 129, 128, 128, 127, 127, 128, 128, 127,
- 127, 126, 126, 127, 127, 126, 126, 126,
- 127, 126, 126, 126, 125, 125, 126, 125,
- 125, 124, 124, 124, 125, 125, 124, 124,
- 123, 124, 124, 123, 123, 122, 122, 122,
- 122, 122, 121, 120, 120, 119, 118, 118,
- 118, 117, 117, 116, 115, 115, 115, 114,
- 114, 113, 113, 112, 111, 111, 111, 110,
- 110, 109, 109, 108, 108, 108, 107, 107,
- 106, 106, 105, 105, 105, 104, 104, 103,
- 103, 102, 102, 102, 102, 101, 101, 100,
- 100, 99, 99, 99, 99, 99, 99, 98,
- 97, 98, 97, 97, 97, 96, 96, 95,
- 96, 95, 96, 95, 95, 94, 94, 95,
- 94, 94, 94, 93, 93, 92, 93, 93,
- 93, 93, 92, 92, 91, 92, 92, 92,
- 91, 91, 90, 90, 91, 91, 91, 90,
- 90, 90, 90, 91, 90, 90, 90, 89,
- 89, 89, 90, 89, 89, 89, 89, 89,
- 88, 89, 89, 88, 88, 88, 88, 87,
- 89, 88, 88, 88, 88, 88, 87, 88,
- 88, 88, 87, 87, 87, 87, 87, 88,
- 87, 87, 87, 87, 87, 87, 88, 87,
- 87, 87, 87, 86, 86, 87, 87, 87,
- 87, 86, 86, 86, 87, 87, 86, 87,
- 86, 86, 86, 87, 87, 86, 86, 86,
- 86, 86, 87, 87, 86, 85, 85, 85,
- 84, 85, 85, 84, 84, 83, 83, 82,
- 82, 82, 81, 81, 80, 79, 79, 79,
- 78, 77, 77, 76, 76, 76, 75, 74,
- 74, 74, 73, 73, 72, 71, 71, 71,
- 70, 70, 69, 69, 68, 68, 67, 67,
- 67, 66, 66, 65, 65, 64, 64, 63,
- 62, 62, 62, 61, 60, 60, 59, 59,
- 58, 58, 57, 57, 56, 56, 56, 55,
- 55, 54, 55, 55, 54, 53, 53, 52,
- 53, 53, 52, 51, 51, 50, 51, 50,
- 49, 49, 50, 49, 49, 48, 48, 47,
- 47, 48, 46, 45, 45, 45, 46, 45,
- 45, 44, 45, 45, 45, 43, 42, 42,
- 41, 43, 41, 40, 40, 39, 40, 41,
- 39, 39, 39, 39, 39, 38, 35, 35,
- 34, 37, 36, 34, 33, 33, 33, 35,
- 34, 32, 32, 31, 32, 30, 29, 26,
- 25, 25, 27, 26, 23, 23, 23, 25,
- 24, 24, 22, 21, 20, 19, 16, 14,
- 13, 13, 13, 10, 9, 7, 7, 7,
- 12, 12, 12, 7, 0, 0, 0, 0
-};
-
-#else
-#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
-#endif
void
ia_css_config_ctc_table(void)
{
-#if defined(HAS_VAMEM_VERSION_2)
memcpy(default_ctc_table.data.vamem_2, default_ctc_table_data,
sizeof(default_ctc_table_data));
default_ctc_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
-#else
- memcpy(default_ctc_table.data.vamem_1, default_ctc_table_data,
- sizeof(default_ctc_table_data));
- default_ctc_table.vamem_type = 1IA_CSS_VAMEM_TYPE_1;
-#endif
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
index b8b71791466f..67f5540b48b5 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
@@ -234,7 +234,6 @@ convert_allocate_dvs_6axis_config(
unsigned int o_width;
unsigned int o_height;
struct ia_css_host_data *me;
- struct gdc_warp_param_mem_s *isp_data_ptr;
assert(binary);
assert(dvs_6axis_config);
@@ -249,8 +248,6 @@ convert_allocate_dvs_6axis_config(
assert((dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_NV12)
|| (dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_YUV420));
- isp_data_ptr = (struct gdc_warp_param_mem_s *)me->address;
-
i_stride = dvs_in_frame_info->padded_width;
o_width = binary->out_frame_info[0].res.width;
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c
index f48f876777dc..7dbe2dc0591d 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c
@@ -21,7 +21,6 @@
#include "ia_css_types.h"
#include "ia_css_gc_table.host.h"
-#if defined(HAS_VAMEM_VERSION_2)
struct ia_css_gamma_table default_gamma_table;
@@ -62,154 +61,11 @@ default_gamma_table_data[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE] = {
255
};
-#elif defined(HAS_VAMEM_VERSION_1)
-
-static const uint16_t
-default_gamma_table_data[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 16,
- 17, 18, 19, 20, 21, 23, 24, 25,
- 27, 28, 29, 31, 32, 33, 35, 36,
- 38, 39, 41, 42, 44, 45, 47, 48,
- 49, 51, 52, 54, 55, 57, 58, 60,
- 61, 62, 64, 65, 66, 68, 69, 70,
- 71, 72, 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 84, 85, 86, 87,
- 88, 89, 90, 91, 92, 93, 93, 94,
- 95, 96, 97, 98, 98, 99, 100, 101,
- 102, 102, 103, 104, 105, 105, 106, 107,
- 108, 108, 109, 110, 110, 111, 112, 112,
- 113, 114, 114, 115, 116, 116, 117, 118,
- 118, 119, 120, 120, 121, 121, 122, 123,
- 123, 124, 125, 125, 126, 126, 127, 127, /* 128 */
- 128, 129, 129, 130, 130, 131, 131, 132,
- 132, 133, 134, 134, 135, 135, 136, 136,
- 137, 137, 138, 138, 139, 139, 140, 140,
- 141, 141, 142, 142, 143, 143, 144, 144,
- 145, 145, 145, 146, 146, 147, 147, 148,
- 148, 149, 149, 150, 150, 150, 151, 151,
- 152, 152, 152, 153, 153, 154, 154, 155,
- 155, 155, 156, 156, 156, 157, 157, 158,
- 158, 158, 159, 159, 160, 160, 160, 161,
- 161, 161, 162, 162, 162, 163, 163, 163,
- 164, 164, 164, 165, 165, 165, 166, 166,
- 166, 167, 167, 167, 168, 168, 168, 169,
- 169, 169, 170, 170, 170, 170, 171, 171,
- 171, 172, 172, 172, 172, 173, 173, 173,
- 174, 174, 174, 174, 175, 175, 175, 176,
- 176, 176, 176, 177, 177, 177, 177, 178, /* 256 */
- 178, 178, 178, 179, 179, 179, 179, 180,
- 180, 180, 180, 181, 181, 181, 181, 182,
- 182, 182, 182, 182, 183, 183, 183, 183,
- 184, 184, 184, 184, 184, 185, 185, 185,
- 185, 186, 186, 186, 186, 186, 187, 187,
- 187, 187, 187, 188, 188, 188, 188, 188,
- 189, 189, 189, 189, 189, 190, 190, 190,
- 190, 190, 191, 191, 191, 191, 191, 192,
- 192, 192, 192, 192, 192, 193, 193, 193,
- 193, 193, 194, 194, 194, 194, 194, 194,
- 195, 195, 195, 195, 195, 195, 196, 196,
- 196, 196, 196, 196, 197, 197, 197, 197,
- 197, 197, 198, 198, 198, 198, 198, 198,
- 198, 199, 199, 199, 199, 199, 199, 200,
- 200, 200, 200, 200, 200, 200, 201, 201,
- 201, 201, 201, 201, 201, 202, 202, 202, /* 384 */
- 202, 202, 202, 202, 203, 203, 203, 203,
- 203, 203, 203, 204, 204, 204, 204, 204,
- 204, 204, 204, 205, 205, 205, 205, 205,
- 205, 205, 205, 206, 206, 206, 206, 206,
- 206, 206, 206, 207, 207, 207, 207, 207,
- 207, 207, 207, 208, 208, 208, 208, 208,
- 208, 208, 208, 209, 209, 209, 209, 209,
- 209, 209, 209, 209, 210, 210, 210, 210,
- 210, 210, 210, 210, 210, 211, 211, 211,
- 211, 211, 211, 211, 211, 211, 212, 212,
- 212, 212, 212, 212, 212, 212, 212, 213,
- 213, 213, 213, 213, 213, 213, 213, 213,
- 214, 214, 214, 214, 214, 214, 214, 214,
- 214, 214, 215, 215, 215, 215, 215, 215,
- 215, 215, 215, 216, 216, 216, 216, 216,
- 216, 216, 216, 216, 216, 217, 217, 217, /* 512 */
- 217, 217, 217, 217, 217, 217, 217, 218,
- 218, 218, 218, 218, 218, 218, 218, 218,
- 218, 219, 219, 219, 219, 219, 219, 219,
- 219, 219, 219, 220, 220, 220, 220, 220,
- 220, 220, 220, 220, 220, 221, 221, 221,
- 221, 221, 221, 221, 221, 221, 221, 221,
- 222, 222, 222, 222, 222, 222, 222, 222,
- 222, 222, 223, 223, 223, 223, 223, 223,
- 223, 223, 223, 223, 223, 224, 224, 224,
- 224, 224, 224, 224, 224, 224, 224, 224,
- 225, 225, 225, 225, 225, 225, 225, 225,
- 225, 225, 225, 226, 226, 226, 226, 226,
- 226, 226, 226, 226, 226, 226, 226, 227,
- 227, 227, 227, 227, 227, 227, 227, 227,
- 227, 227, 228, 228, 228, 228, 228, 228,
- 228, 228, 228, 228, 228, 228, 229, 229,
- 229, 229, 229, 229, 229, 229, 229, 229,
- 229, 229, 230, 230, 230, 230, 230, 230,
- 230, 230, 230, 230, 230, 230, 231, 231,
- 231, 231, 231, 231, 231, 231, 231, 231,
- 231, 231, 231, 232, 232, 232, 232, 232,
- 232, 232, 232, 232, 232, 232, 232, 233,
- 233, 233, 233, 233, 233, 233, 233, 233,
- 233, 233, 233, 233, 234, 234, 234, 234,
- 234, 234, 234, 234, 234, 234, 234, 234,
- 234, 235, 235, 235, 235, 235, 235, 235,
- 235, 235, 235, 235, 235, 235, 236, 236,
- 236, 236, 236, 236, 236, 236, 236, 236,
- 236, 236, 236, 236, 237, 237, 237, 237,
- 237, 237, 237, 237, 237, 237, 237, 237,
- 237, 237, 238, 238, 238, 238, 238, 238,
- 238, 238, 238, 238, 238, 238, 238, 238,
- 239, 239, 239, 239, 239, 239, 239, 239,
- 239, 239, 239, 239, 239, 239, 240, 240,
- 240, 240, 240, 240, 240, 240, 240, 240,
- 240, 240, 240, 240, 241, 241, 241, 241,
- 241, 241, 241, 241, 241, 241, 241, 241,
- 241, 241, 241, 242, 242, 242, 242, 242,
- 242, 242, 242, 242, 242, 242, 242, 242,
- 242, 242, 243, 243, 243, 243, 243, 243,
- 243, 243, 243, 243, 243, 243, 243, 243,
- 243, 244, 244, 244, 244, 244, 244, 244,
- 244, 244, 244, 244, 244, 244, 244, 244,
- 245, 245, 245, 245, 245, 245, 245, 245,
- 245, 245, 245, 245, 245, 245, 245, 246,
- 246, 246, 246, 246, 246, 246, 246, 246,
- 246, 246, 246, 246, 246, 246, 246, 247,
- 247, 247, 247, 247, 247, 247, 247, 247,
- 247, 247, 247, 247, 247, 247, 247, 248,
- 248, 248, 248, 248, 248, 248, 248, 248,
- 248, 248, 248, 248, 248, 248, 248, 249,
- 249, 249, 249, 249, 249, 249, 249, 249,
- 249, 249, 249, 249, 249, 249, 249, 250,
- 250, 250, 250, 250, 250, 250, 250, 250,
- 250, 250, 250, 250, 250, 250, 250, 251,
- 251, 251, 251, 251, 251, 251, 251, 251,
- 251, 251, 251, 251, 251, 251, 251, 252,
- 252, 252, 252, 252, 252, 252, 252, 252,
- 252, 252, 252, 252, 252, 252, 252, 253,
- 253, 253, 253, 253, 253, 253, 253, 253,
- 253, 253, 253, 253, 253, 253, 253, 253,
- 254, 254, 254, 254, 254, 254, 254, 254,
- 254, 254, 254, 254, 254, 254, 254, 254,
- 255, 255, 255, 255, 255, 255, 255, 255
-};
-
-#else
-#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
-#endif
void
ia_css_config_gamma_table(void)
{
-#if defined(HAS_VAMEM_VERSION_2)
memcpy(default_gamma_table.data.vamem_2, default_gamma_table_data,
sizeof(default_gamma_table_data));
default_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
-#else
- memcpy(default_gamma_table.data.vamem_1, default_gamma_table_data,
- sizeof(default_gamma_table_data));
- default_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
-#endif
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c
index 7eadb31268eb..34795011907a 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c
@@ -27,7 +27,6 @@ struct ia_css_rgb_gamma_table default_b_gamma_table;
/* Identical default gamma table for R, G, and B. */
-#if defined(HAS_VAMEM_VERSION_2)
static const uint16_t
default_gamma_table_data[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE] = {
@@ -65,51 +64,10 @@ default_gamma_table_data[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE] = {
4032, 4040, 4048, 4056, 4064, 4072, 4080, 4088,
4095
};
-#elif defined(HAS_VAMEM_VERSION_1)
-
-static const uint16_t
-default_gamma_table_data[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE] = {
- 0, 72, 144, 216, 288, 360, 426, 486,
- 541, 592, 641, 687, 730, 772, 812, 850,
- 887, 923, 958, 991, 1024, 1055, 1086, 1117,
- 1146, 1175, 1203, 1230, 1257, 1284, 1310, 1335,
- 1360, 1385, 1409, 1433, 1457, 1480, 1502, 1525,
- 1547, 1569, 1590, 1612, 1632, 1653, 1674, 1694,
- 1714, 1734, 1753, 1772, 1792, 1811, 1829, 1848,
- 1866, 1884, 1902, 1920, 1938, 1955, 1973, 1990,
- 2007, 2024, 2040, 2057, 2074, 2090, 2106, 2122,
- 2138, 2154, 2170, 2185, 2201, 2216, 2231, 2247,
- 2262, 2277, 2291, 2306, 2321, 2335, 2350, 2364,
- 2378, 2393, 2407, 2421, 2435, 2449, 2462, 2476,
- 2490, 2503, 2517, 2530, 2543, 2557, 2570, 2583,
- 2596, 2609, 2622, 2634, 2647, 2660, 2673, 2685,
- 2698, 2710, 2722, 2735, 2747, 2759, 2771, 2783,
- 2795, 2807, 2819, 2831, 2843, 2855, 2867, 2878,
- 2890, 2901, 2913, 2924, 2936, 2947, 2958, 2970,
- 2981, 2992, 3003, 3014, 3025, 3036, 3047, 3058,
- 3069, 3080, 3091, 3102, 3112, 3123, 3134, 3144,
- 3155, 3165, 3176, 3186, 3197, 3207, 3217, 3228,
- 3238, 3248, 3258, 3268, 3279, 3289, 3299, 3309,
- 3319, 3329, 3339, 3349, 3358, 3368, 3378, 3388,
- 3398, 3407, 3417, 3427, 3436, 3446, 3455, 3465,
- 3474, 3484, 3493, 3503, 3512, 3521, 3531, 3540,
- 3549, 3559, 3568, 3577, 3586, 3595, 3605, 3614,
- 3623, 3632, 3641, 3650, 3659, 3668, 3677, 3686,
- 3694, 3703, 3712, 3721, 3730, 3739, 3747, 3756,
- 3765, 3773, 3782, 3791, 3799, 3808, 3816, 3825,
- 3833, 3842, 3850, 3859, 3867, 3876, 3884, 3893,
- 3901, 3909, 3918, 3926, 3934, 3942, 3951, 3959,
- 3967, 3975, 3984, 3992, 4000, 4008, 4016, 4024,
- 4032, 4040, 4048, 4056, 4064, 4072, 4080, 4088
-};
-#else
-#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
-#endif
void
ia_css_config_rgb_gamma_tables(void)
{
-#if defined(HAS_VAMEM_VERSION_2)
default_r_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
default_g_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
default_b_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
@@ -119,15 +77,4 @@ ia_css_config_rgb_gamma_tables(void)
sizeof(default_gamma_table_data));
memcpy(default_b_gamma_table.data.vamem_2, default_gamma_table_data,
sizeof(default_gamma_table_data));
-#else
- memcpy(default_r_gamma_table.data.vamem_1, default_gamma_table_data,
- sizeof(default_gamma_table_data));
- memcpy(default_g_gamma_table.data.vamem_1, default_gamma_table_data,
- sizeof(default_gamma_table_data));
- memcpy(default_b_gamma_table.data.vamem_1, default_gamma_table_data,
- sizeof(default_gamma_table_data));
- default_r_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
- default_g_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
- default_b_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
-#endif
}
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
index 1c6f6792d57b..c505c94a7241 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
@@ -81,7 +81,7 @@ ia_css_raw_config(
const struct ia_css_frame_info *internal_info = from->internal_info;
(void)size;
-#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if !defined(ISP2401)
/* 2401 input system uses input width width */
in_info = internal_info;
#else
@@ -105,7 +105,7 @@ ia_css_raw_config(
to->two_ppc = from->two_ppc;
to->stream_format = css2isp_stream_format(from->stream_format);
to->deinterleaved = from->deinterleaved;
-#if (defined(USE_INPUT_SYSTEM_VERSION_2401) || defined(CONFIG_CSI2_PLUS))
+#if defined(ISP2401)
to->start_column = in_info->crop_info.start_column;
to->start_line = in_info->crop_info.start_line;
to->enable_left_padding = from->enable_left_padding;
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
index 7922198f6784..f608740e8340 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
@@ -116,7 +116,6 @@ void ia_css_get_isp_dvs2_coefficients(
{
struct ia_css_isp_parameters *params;
unsigned int hor_num_3a, ver_num_3a;
- unsigned int hor_num_isp, ver_num_isp;
struct ia_css_binary *dvs_binary;
IA_CSS_ENTER("void");
@@ -140,8 +139,6 @@ void ia_css_get_isp_dvs2_coefficients(
hor_num_3a = dvs_binary->dis.coef.dim.width;
ver_num_3a = dvs_binary->dis.coef.dim.height;
- hor_num_isp = dvs_binary->dis.coef.pad.width;
- ver_num_isp = dvs_binary->dis.coef.pad.height;
memcpy(hor_coefs_odd_real, params->dvs2_coefs.hor_coefs.odd_real,
hor_num_3a * sizeof(short));
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
index 358cb7d2cd4c..dd3670972936 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
@@ -13,6 +13,8 @@
* more details.
*/
+#include "atomisp_internal.h"
+
#include "ia_css_vf.host.h"
#include <assert_support.h>
#include <ia_css_err.h>
@@ -58,7 +60,7 @@ sh_css_vf_downscale_log2(
unsigned int ds_log2 = 0;
unsigned int out_width;
- if ((!out_info) | (!vf_info))
+ if ((!out_info) || (!vf_info))
return -EINVAL;
out_width = out_info->res.width;
@@ -129,6 +131,9 @@ ia_css_vf_configure(
const struct ia_css_binary_info *info = &binary->info->sp;
err = configure_kernel(info, out_info, vf_info, downscale_log2, &config);
+ if (err)
+ dev_warn(atomisp_dev, "Couldn't setup downscale\n");
+
configure_dma(&config, vf_info);
if (vf_info)
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
index 7ebf139f3618..93754f7c797d 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
@@ -20,15 +20,8 @@
#include <system_global.h>
#ifndef PIPE_GENERATION
-#if defined(HAS_VAMEM_VERSION_2)
#define SH_CSS_ISP_XNR_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2
#define SH_CSS_ISP_XNR_TABLE_SIZE IA_CSS_VAMEM_2_XNR_TABLE_SIZE
-#elif defined(HAS_VAMEM_VERSION_1)
-#define SH_CSS_ISP_XNR_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2
-#define SH_CSS_ISP_XNR_TABLE_SIZE IA_CSS_VAMEM_1_XNR_TABLE_SIZE
-#else
-#error "Unknown vamem type"
-#endif
#else
/* For pipe generation, the size is not relevant */
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c
index 5566f3c16aac..e5c15308693d 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c
@@ -23,7 +23,6 @@
struct ia_css_xnr_table default_xnr_table;
-#if defined(HAS_VAMEM_VERSION_2)
static const uint16_t
default_xnr_table_data[IA_CSS_VAMEM_2_XNR_TABLE_SIZE] = {
@@ -43,41 +42,11 @@ default_xnr_table_data[IA_CSS_VAMEM_2_XNR_TABLE_SIZE] = {
167 >> 1, 163 >> 1, 160 >> 1, 157 >> 1, 154 >> 1, 151 >> 1, 148 >> 1, 146 >> 1, 143 >> 1, 141 >> 1, 138 >> 1, 136 >> 1, 134 >> 1, 132 >> 1, 130 >> 1, 128 >> 1
};
-#elif defined(HAS_VAMEM_VERSION_1)
-
-static const uint16_t
-default_xnr_table_data[IA_CSS_VAMEM_1_XNR_TABLE_SIZE] = {
- /* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */
- 8191 >> 1, 4096 >> 1, 2730 >> 1, 2048 >> 1, 1638 >> 1, 1365 >> 1, 1170 >> 1, 1024 >> 1, 910 >> 1, 819 >> 1, 744 >> 1, 682 >> 1, 630 >> 1, 585 >> 1,
- 546 >> 1, 512 >> 1,
-
- /* 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 */
- 481 >> 1, 455 >> 1, 431 >> 1, 409 >> 1, 390 >> 1, 372 >> 1, 356 >> 1, 341 >> 1, 327 >> 1, 315 >> 1, 303 >> 1, 292 >> 1, 282 >> 1, 273 >> 1, 264 >> 1,
- 256 >> 1,
-
- /* 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 */
- 248 >> 1, 240 >> 1, 234 >> 1, 227 >> 1, 221 >> 1, 215 >> 1, 210 >> 1, 204 >> 1, 199 >> 1, 195 >> 1, 190 >> 1, 186 >> 1, 182 >> 1, 178 >> 1, 174 >> 1,
- 170 >> 1,
-
- /* 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 */
- 167 >> 1, 163 >> 1, 160 >> 1, 157 >> 1, 154 >> 1, 151 >> 1, 148 >> 1, 146 >> 1, 143 >> 1, 141 >> 1, 138 >> 1, 136 >> 1, 134 >> 1, 132 >> 1, 130 >> 1, 128 >> 1
-};
-
-#else
-#error "sh_css_params.c: VAMEM version must \
-be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
-#endif
void
ia_css_config_xnr_table(void)
{
-#if defined(HAS_VAMEM_VERSION_2)
memcpy(default_xnr_table.data.vamem_2, default_xnr_table_data,
sizeof(default_xnr_table_data));
default_xnr_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
-#else
- memcpy(default_xnr_table.data.vamem_1, default_xnr_table_data,
- sizeof(default_xnr_table_data));
- default_xnr_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
-#endif
}
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h
index b4142bdde51b..61f23814e2fd 100644
--- a/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_global.h
@@ -13,11 +13,6 @@
* more details.
*/
-#ifndef __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
-#define __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
-
-#define IS_INPUT_SYSTEM_VERSION_2
-
#include <type_support.h>
//CSI reveiver has 3 ports.
@@ -80,13 +75,13 @@ typedef enum {
N_INPUT_SYSTEM_BUFFERING_MODE
} buffering_mode_t;
-typedef struct input_system_cfg_s input_system_cfg_t;
+typedef struct isp2400_input_system_cfg_s input_system_cfg_t;
typedef struct sync_generator_cfg_s sync_generator_cfg_t;
typedef struct tpg_cfg_s tpg_cfg_t;
typedef struct prbs_cfg_s prbs_cfg_t;
/* MW: uint16_t should be sufficient */
-struct input_system_cfg_s {
+struct isp2400_input_system_cfg_s {
u32 no_side_band;
u32 fmt_type;
u32 ch_id;
@@ -123,7 +118,7 @@ struct gpfifo_cfg_s {
typedef struct gpfifo_cfg_s gpfifo_cfg_t;
//ALX:Commented out to pass the compilation.
-//typedef struct input_system_cfg_s input_system_cfg_t;
+//typedef struct isp2400_input_system_cfg_s input_system_cfg_t;
struct ib_buffer_s {
u32 mem_reg_size;
@@ -131,13 +126,13 @@ struct ib_buffer_s {
u32 mem_reg_addr;
};
-typedef struct ib_buffer_s ib_buffer_t;
+typedef struct ib_buffer_s isp2400_ib_buffer_t;
struct csi_cfg_s {
u32 csi_port;
buffering_mode_t buffering_mode;
- ib_buffer_t csi_buffer;
- ib_buffer_t acquisition_buffer;
+ isp2400_ib_buffer_t csi_buffer;
+ isp2400_ib_buffer_t acquisition_buffer;
u32 nof_xmem_buffers;
};
@@ -149,8 +144,6 @@ typedef enum {
INPUT_SYSTEM_CFG_FLAG_BLOCKED = 1U << 1,
INPUT_SYSTEM_CFG_FLAG_REQUIRED = 1U << 2,
INPUT_SYSTEM_CFG_FLAG_CONFLICT = 1U << 3 // To mark a conflicting configuration.
-} input_system_cfg_flag_t;
+} isp2400_input_system_cfg_flag_t;
typedef u32 input_system_config_flags_t;
-
-#endif /* __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
index 33ebf89ca053..072a92199e05 100644
--- a/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
@@ -34,15 +34,6 @@
#include "input_system_ctrl_defs.h"
typedef enum {
- INPUT_SYSTEM_ERR_NO_ERROR = 0,
- INPUT_SYSTEM_ERR_GENERIC,
- INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET,
- INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE,
- INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED,
- N_INPUT_SYSTEM_ERR
-} input_system_error_t;
-
-typedef enum {
INPUT_SYSTEM_PORT_A = 0,
INPUT_SYSTEM_PORT_B,
INPUT_SYSTEM_PORT_C,
@@ -61,8 +52,8 @@ typedef struct input_switch_cfg_channel_s input_switch_cfg_channel_t;
typedef struct input_switch_cfg_s input_switch_cfg_t;
struct ctrl_unit_cfg_s {
- ib_buffer_t buffer_mipi[N_CAPTURE_UNIT_ID];
- ib_buffer_t buffer_acquire[N_ACQUISITION_UNIT_ID];
+ isp2400_ib_buffer_t buffer_mipi[N_CAPTURE_UNIT_ID];
+ isp2400_ib_buffer_t buffer_acquire[N_ACQUISITION_UNIT_ID];
};
struct input_system_network_cfg_s {
@@ -137,9 +128,9 @@ struct input_system_cfg2400_s {
// Possible another struct for ib.
// This buffers set at the end, based on the all configurations.
- ib_buffer_t csi_buffer[N_CSI_PORTS];
+ isp2400_ib_buffer_t csi_buffer[N_CSI_PORTS];
input_system_config_flags_t csi_buffer_flags[N_CSI_PORTS];
- ib_buffer_t acquisition_buffer_unique;
+ isp2400_ib_buffer_t acquisition_buffer_unique;
input_system_config_flags_t acquisition_buffer_unique_flags;
u32 unallocated_ib_mem_words; // Used for check.DEFAULT = IB_CAPACITY_IN_WORDS.
//uint32_t acq_allocated_ib_mem_words;
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
index 689e451f1ce2..85cb61e34192 100644
--- a/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
@@ -17,7 +17,7 @@
#define __INPUT_SYSTEM_PUBLIC_H_INCLUDED__
#include <type_support.h>
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
#include "isys_public.h"
#else
@@ -251,11 +251,11 @@ STORAGE_CLASS_INPUT_SYSTEM_H hrt_data input_system_sub_system_reg_load(
// Function that resets current configuration.
// remove the argument since it should be private.
-input_system_error_t input_system_configuration_reset(void);
+input_system_err_t input_system_configuration_reset(void);
// Function that commits current configuration.
// remove the argument since it should be private.
-input_system_error_t input_system_configuration_commit(void);
+input_system_err_t input_system_configuration_commit(void);
///////////////////////////////////////////////////////////////////////////
//
@@ -269,14 +269,14 @@ input_system_error_t input_system_configuration_commit(void);
// FIFO channel config function user
-input_system_error_t input_system_csi_fifo_channel_cfg(
+input_system_err_t input_system_csi_fifo_channel_cfg(
u32 ch_id,
input_system_csi_port_t port,
backend_channel_cfg_t backend_ch,
target_cfg2400_t target
);
-input_system_error_t input_system_csi_fifo_channel_with_counting_cfg(
+input_system_err_t input_system_csi_fifo_channel_with_counting_cfg(
u32 ch_id,
u32 nof_frame,
input_system_csi_port_t port,
@@ -288,7 +288,7 @@ input_system_error_t input_system_csi_fifo_channel_with_counting_cfg(
// SRAM channel config function user
-input_system_error_t input_system_csi_sram_channel_cfg(
+input_system_err_t input_system_csi_sram_channel_cfg(
u32 ch_id,
input_system_csi_port_t port,
backend_channel_cfg_t backend_ch,
@@ -299,7 +299,7 @@ input_system_error_t input_system_csi_sram_channel_cfg(
//XMEM channel config function user
-input_system_error_t input_system_csi_xmem_channel_cfg(
+input_system_err_t input_system_csi_xmem_channel_cfg(
u32 ch_id,
input_system_csi_port_t port,
backend_channel_cfg_t backend_ch,
@@ -311,7 +311,7 @@ input_system_error_t input_system_csi_xmem_channel_cfg(
uint32_t nof_xmem_buffers
);
-input_system_error_t input_system_csi_xmem_capture_only_channel_cfg(
+input_system_err_t input_system_csi_xmem_capture_only_channel_cfg(
u32 ch_id,
u32 nof_frames,
input_system_csi_port_t port,
@@ -322,7 +322,7 @@ input_system_error_t input_system_csi_xmem_capture_only_channel_cfg(
target_cfg2400_t target
);
-input_system_error_t input_system_csi_xmem_acquire_only_channel_cfg(
+input_system_err_t input_system_csi_xmem_acquire_only_channel_cfg(
u32 ch_id,
u32 nof_frames,
input_system_csi_port_t port,
@@ -334,7 +334,7 @@ input_system_error_t input_system_csi_xmem_acquire_only_channel_cfg(
// Non - CSI channel config function user
-input_system_error_t input_system_prbs_channel_cfg(
+input_system_err_t input_system_prbs_channel_cfg(
u32 ch_id,
u32 nof_frames,
u32 seed,
@@ -345,7 +345,7 @@ input_system_error_t input_system_prbs_channel_cfg(
target_cfg2400_t target
);
-input_system_error_t input_system_tpg_channel_cfg(
+input_system_err_t input_system_tpg_channel_cfg(
u32 ch_id,
u32 nof_frames,//not used yet
u32 x_mask,
@@ -360,11 +360,11 @@ input_system_error_t input_system_tpg_channel_cfg(
target_cfg2400_t target
);
-input_system_error_t input_system_gpfifo_channel_cfg(
+input_system_err_t input_system_gpfifo_channel_cfg(
u32 ch_id,
u32 nof_frames,
target_cfg2400_t target
);
-#endif /* #ifdef USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* #ifdef ISP2401 */
#endif /* __INPUT_SYSTEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_system_global.h b/drivers/staging/media/atomisp/pci/isp2400_system_global.h
deleted file mode 100644
index 74fff465e8e8..000000000000
--- a/drivers/staging/media/atomisp/pci/isp2400_system_global.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#define USE_INPUT_SYSTEM_VERSION_2
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h
index 5070e651f7c4..f38773842646 100644
--- a/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_global.h
@@ -13,19 +13,15 @@
* more details.
*/
-#ifndef __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
-#define __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
-
-#define IS_INPUT_SYSTEM_VERSION_VERSION_2401
-
/* CSI reveiver has 3 ports. */
#define N_CSI_PORTS (3)
-#include "isys_dma.h" /* isys2401_dma_channel,
+#include "system_local.h"
+#include "isys_dma_global.h" /* isys2401_dma_channel,
* isys2401_dma_cfg_t
*/
-#include "ibuf_ctrl.h" /* ibuf_cfg_t,
+#include "ibuf_ctrl_local.h" /* ibuf_cfg_t,
* ibuf_ctrl_cfg_t
*/
@@ -41,18 +37,6 @@
virtual channels supported*/
typedef enum {
- INPUT_SYSTEM_ERR_NO_ERROR = 0,
- INPUT_SYSTEM_ERR_CREATE_CHANNEL_FAIL,
- INPUT_SYSTEM_ERR_CONFIGURE_CHANNEL_FAIL,
- INPUT_SYSTEM_ERR_OPEN_CHANNEL_FAIL,
- INPUT_SYSTEM_ERR_TRANSFER_FAIL,
- INPUT_SYSTEM_ERR_CREATE_INPUT_PORT_FAIL,
- INPUT_SYSTEM_ERR_CONFIGURE_INPUT_PORT_FAIL,
- INPUT_SYSTEM_ERR_OPEN_INPUT_PORT_FAIL,
- N_INPUT_SYSTEM_ERR
-} input_system_err_t;
-
-typedef enum {
INPUT_SYSTEM_SOURCE_TYPE_UNDEFINED = 0,
INPUT_SYSTEM_SOURCE_TYPE_SENSOR,
INPUT_SYSTEM_SOURCE_TYPE_TPG,
@@ -71,7 +55,7 @@ struct input_system_channel_s {
stream2mmio_sid_ID_t stream2mmio_sid_id;
ibuf_ctrl_ID_t ibuf_ctrl_id;
- ib_buffer_t ib_buffer;
+ isp2401_ib_buffer_t ib_buffer;
isys2401_dma_ID_t dma_id;
isys2401_dma_channel dma_channel;
@@ -121,8 +105,8 @@ struct input_system_input_port_cfg_s {
} pixelgen_cfg;
};
-typedef struct input_system_cfg_s input_system_cfg_t;
-struct input_system_cfg_s {
+typedef struct isp2401_input_system_cfg_s isp2401_input_system_cfg_t;
+struct isp2401_input_system_cfg_s {
input_system_input_port_ID_t input_port_id;
input_system_source_type_t mode;
@@ -202,5 +186,3 @@ struct virtual_input_system_stream_cfg_s {
#define NUM_OF_LINES_PER_BUF 2
#define LINES_OF_ISP_INPUT_BUF (NUM_OF_INPUT_BUF * NUM_OF_LINES_PER_BUF)
#define ISP_INPUT_BUF_STRIDE SH_CSS_MAX_SENSOR_WIDTH
-
-#endif /* __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
index f52a8ca5f86b..24026090cd35 100644
--- a/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
@@ -19,14 +19,11 @@
#include "type_support.h"
#include "input_system_global.h"
-#include "ibuf_ctrl.h"
#include "csi_rx.h"
#include "pixelgen.h"
#include "isys_stream2mmio.h"
#include "isys_irq.h"
-typedef input_system_err_t input_system_error_t;
-
typedef enum {
MIPI_FORMAT_SHORT1 = 0x08,
MIPI_FORMAT_SHORT2,
diff --git a/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h b/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h
index f3ca5d1bcb01..e4c76428f6dd 100644
--- a/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h
+++ b/drivers/staging/media/atomisp/pci/isp2401_input_system_private.h
@@ -18,9 +18,222 @@
#include "input_system_public.h"
-STORAGE_CLASS_INPUT_SYSTEM_C input_system_err_t input_system_get_state(
- const input_system_ID_t ID,
- input_system_state_t *state)
+#include "device_access.h" /* ia_css_device_load_uint32 */
+
+#include "assert_support.h" /* assert */
+#include "print_support.h" /* print */
+
+/* Load the register value */
+static inline hrt_data ibuf_ctrl_reg_load(const ibuf_ctrl_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_IBUF_CTRL_ID);
+ assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(IBUF_CTRL_BASE[ID] + reg * sizeof(hrt_data));
+}
+
+/* Store a value to the register */
+static inline void ibuf_ctrl_reg_store(const ibuf_ctrl_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_IBUF_CTRL_ID);
+ assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(IBUF_CTRL_BASE[ID] + reg * sizeof(hrt_data), value);
+}
+
+/* Get the state of the ibuf-controller process */
+static inline void ibuf_ctrl_get_proc_state(const ibuf_ctrl_ID_t ID,
+ const u32 proc_id,
+ ibuf_ctrl_proc_state_t *state)
+{
+ hrt_address reg_bank_offset;
+
+ reg_bank_offset =
+ _IBUF_CNTRL_PROC_REG_ALIGN * (1 + proc_id);
+
+ state->num_items =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_ITEMS_PER_STORE);
+
+ state->num_stores =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_STORES_PER_FRAME);
+
+ state->dma_channel =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CHANNEL);
+
+ state->dma_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CMD);
+
+ state->ibuf_st_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_START_ADDRESS);
+
+ state->ibuf_stride =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_STRIDE);
+
+ state->ibuf_end_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_END_ADDRESS);
+
+ state->dest_st_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_START_ADDRESS);
+
+ state->dest_stride =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_STRIDE);
+
+ state->dest_end_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_END_ADDRESS);
+
+ state->sync_frame =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SYNC_FRAME);
+
+ state->sync_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_SYNC_CMD);
+
+ state->store_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_STORE_CMD);
+
+ state->shift_returned_items =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SHIFT_ITEMS);
+
+ state->elems_ibuf =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_IBUF);
+
+ state->elems_dest =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_DEST);
+
+ state->cur_stores =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_STORES);
+
+ state->cur_acks =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ACKS);
+
+ state->cur_s2m_ibuf_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_S2M_IBUF_ADDR);
+
+ state->cur_dma_ibuf_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_IBUF_ADDR);
+
+ state->cur_dma_dest_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_DEST_ADDR);
+
+ state->cur_isp_dest_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ISP_DEST_ADDR);
+
+ state->dma_cmds_send =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_NR_DMA_CMDS_SEND);
+
+ state->main_cntrl_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_MAIN_CNTRL_STATE);
+
+ state->dma_sync_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_SYNC_STATE);
+
+ state->isp_sync_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ISP_SYNC_STATE);
+}
+
+/* Get the ibuf-controller state. */
+static inline void ibuf_ctrl_get_state(const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state)
+{
+ u32 i;
+
+ state->recalc_words =
+ ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_RECALC_WORDS_STATUS);
+ state->arbiters =
+ ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_ARBITERS_STATUS);
+
+ /*
+ * Get the values of the register-set per
+ * ibuf-controller process.
+ */
+ for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) {
+ ibuf_ctrl_get_proc_state(
+ ID,
+ i,
+ &state->proc_state[i]);
+ }
+}
+
+/* Dump the ibuf-controller state */
+static inline void ibuf_ctrl_dump_state(const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state)
+{
+ u32 i;
+
+ ia_css_print("IBUF controller ID %d recalculate words 0x%x\n", ID,
+ state->recalc_words);
+ ia_css_print("IBUF controller ID %d arbiters 0x%x\n", ID, state->arbiters);
+
+ /*
+ * Dump the values of the register-set per
+ * ibuf-controller process.
+ */
+ for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) {
+ ia_css_print("IBUF controller ID %d Process ID %d num_items 0x%x\n", ID, i,
+ state->proc_state[i].num_items);
+ ia_css_print("IBUF controller ID %d Process ID %d num_stores 0x%x\n", ID, i,
+ state->proc_state[i].num_stores);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_channel 0x%x\n", ID, i,
+ state->proc_state[i].dma_channel);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_command 0x%x\n", ID, i,
+ state->proc_state[i].dma_command);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_st_addr 0x%x\n", ID, i,
+ state->proc_state[i].ibuf_st_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_stride 0x%x\n", ID, i,
+ state->proc_state[i].ibuf_stride);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_end_addr 0x%x\n", ID, i,
+ state->proc_state[i].ibuf_end_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_st_addr 0x%x\n", ID, i,
+ state->proc_state[i].dest_st_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_stride 0x%x\n", ID, i,
+ state->proc_state[i].dest_stride);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_end_addr 0x%x\n", ID, i,
+ state->proc_state[i].dest_end_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d sync_frame 0x%x\n", ID, i,
+ state->proc_state[i].sync_frame);
+ ia_css_print("IBUF controller ID %d Process ID %d sync_command 0x%x\n", ID, i,
+ state->proc_state[i].sync_command);
+ ia_css_print("IBUF controller ID %d Process ID %d store_command 0x%x\n", ID, i,
+ state->proc_state[i].store_command);
+ ia_css_print("IBUF controller ID %d Process ID %d shift_returned_items 0x%x\n",
+ ID, i,
+ state->proc_state[i].shift_returned_items);
+ ia_css_print("IBUF controller ID %d Process ID %d elems_ibuf 0x%x\n", ID, i,
+ state->proc_state[i].elems_ibuf);
+ ia_css_print("IBUF controller ID %d Process ID %d elems_dest 0x%x\n", ID, i,
+ state->proc_state[i].elems_dest);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_stores 0x%x\n", ID, i,
+ state->proc_state[i].cur_stores);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_acks 0x%x\n", ID, i,
+ state->proc_state[i].cur_acks);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_s2m_ibuf_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_s2m_ibuf_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_dma_ibuf_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_dma_ibuf_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_dma_dest_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_dma_dest_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_isp_dest_addr 0x%x\n", ID,
+ i,
+ state->proc_state[i].cur_isp_dest_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_cmds_send 0x%x\n", ID, i,
+ state->proc_state[i].dma_cmds_send);
+ ia_css_print("IBUF controller ID %d Process ID %d main_cntrl_state 0x%x\n", ID,
+ i,
+ state->proc_state[i].main_cntrl_state);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_sync_state 0x%x\n", ID, i,
+ state->proc_state[i].dma_sync_state);
+ ia_css_print("IBUF controller ID %d Process ID %d isp_sync_state 0x%x\n", ID, i,
+ state->proc_state[i].isp_sync_state);
+ }
+}
+
+static inline input_system_err_t
+input_system_get_state(const input_system_ID_t ID,
+ input_system_state_t *state)
{
u32 i;
@@ -73,9 +286,8 @@ STORAGE_CLASS_INPUT_SYSTEM_C input_system_err_t input_system_get_state(
return INPUT_SYSTEM_ERR_NO_ERROR;
}
-STORAGE_CLASS_INPUT_SYSTEM_C void input_system_dump_state(
- const input_system_ID_t ID,
- input_system_state_t *state)
+static inline void input_system_dump_state(const input_system_ID_t ID,
+ input_system_state_t *state)
{
u32 i;
diff --git a/drivers/staging/media/atomisp/pci/isp2401_system_global.h b/drivers/staging/media/atomisp/pci/isp2401_system_global.h
deleted file mode 100644
index 27cd2535bab8..000000000000
--- a/drivers/staging/media/atomisp/pci/isp2401_system_global.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#define HAS_NO_INPUT_FORMATTER
-#define USE_INPUT_SYSTEM_VERSION_2401
-#define HAS_INPUT_SYSTEM_VERSION_2401
-#define CSI2P_DISABLE_ISYS2401_ONLINE_MODE
diff --git a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
index 9813014c3fd3..060d38749570 100644
--- a/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
+++ b/drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
@@ -135,52 +135,30 @@ struct sh_css_binary_sc_requirements {
at shading correction. */
};
-/* Get the requirements for the shading correction. */
+/* ISP2400: Get the requirements for the shading correction. */
static int
-#ifndef ISP2401
ia_css_binary_compute_shading_table_bayer_origin(
const struct ia_css_binary *binary, /* [in] */
unsigned int required_bds_factor, /* [in] */
const struct ia_css_stream_config *stream_config, /* [in] */
struct sh_css_shading_table_bayer_origin_compute_results *res) /* [out] */
-#else
-sh_css_binary_get_sc_requirements(
- const struct ia_css_binary *binary, /* [in] */
- unsigned int required_bds_factor, /* [in] */
- const struct ia_css_stream_config *stream_config, /* [in] */
- struct sh_css_binary_sc_requirements *scr) /* [out] */
-#endif
{
int err;
-#ifndef ISP2401
/* Numerator and denominator of the fixed bayer downscaling factor.
(numerator >= denominator) */
-#else
- /* Numerator and denominator of the fixed bayer downscaling factor. (numerator >= denominator) */
-#endif
unsigned int bds_num, bds_den;
-#ifndef ISP2401
/* Horizontal/Vertical ratio of bayer scaling
between input area and output area. */
unsigned int bs_hor_ratio_in;
unsigned int bs_hor_ratio_out;
unsigned int bs_ver_ratio_in;
unsigned int bs_ver_ratio_out;
-#else
- /* Horizontal/Vertical ratio of bayer scaling between input area and output area. */
- unsigned int bs_hor_ratio_in, bs_hor_ratio_out, bs_ver_ratio_in, bs_ver_ratio_out;
-#endif
/* Left padding set by InputFormatter. */
-#ifndef ISP2401
unsigned int left_padding_bqs; /* in bqs */
-#else
- unsigned int left_padding_bqs;
-#endif
-#ifndef ISP2401
/* Flag for the NEED_BDS_FACTOR_2_00 macro defined in isp kernels. */
unsigned int need_bds_factor_2_00;
@@ -201,7 +179,106 @@ sh_css_binary_get_sc_requirements(
err = sh_css_bds_factor_get_numerator_denominator
(required_bds_factor, &bds_num, &bds_den);
if (err)
-#else
+ return err;
+
+ /* Set the horizontal/vertical ratio of bayer scaling
+ between input area and output area. */
+ bs_hor_ratio_in = bds_num;
+ bs_hor_ratio_out = bds_den;
+ bs_ver_ratio_in = bds_num;
+ bs_ver_ratio_out = bds_den;
+
+ /* Set the left padding set by InputFormatter. (ifmtr.c) */
+ if (stream_config->left_padding == -1)
+ left_padding_bqs = _ISP_BQS(binary->left_padding);
+ else
+ left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS
+ - _ISP_BQS(stream_config->left_padding));
+
+ /* Set the left padding adjusted inside the isp.
+ When bds_factor 2.00 is needed, some padding is added to left_padding
+ inside the isp, before bayer downscaling. (raw.isp.c)
+ (Hopefully, left_crop/left_padding/top_crop should be defined in css
+ appropriately, depending on bds_factor.)
+ */
+ need_bds_factor_2_00 = ((binary->info->sp.bds.supported_bds_factors &
+ (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_8_00))) != 0);
+
+ if (need_bds_factor_2_00 && binary->info->sp.pipeline.left_cropping > 0)
+ left_padding_adjusted_bqs = left_padding_bqs + ISP_VEC_NELEMS;
+ else
+ left_padding_adjusted_bqs = left_padding_bqs;
+
+ /* Currently, the bad pixel caused by filters before bayer scaling
+ is NOT considered, because the bad pixel is subtle.
+ When some large filter is used in the future,
+ we need to consider the bad pixel.
+
+ Currently, when bds_factor isn't 1.00, 3x3 anti-alias filter is applied
+ to each color plane(Gr/R/B/Gb) before bayer downscaling.
+ This filter moves each color plane to right/bottom directions
+ by 1 pixel at the most, depending on downscaling factor.
+ */
+ bad_bqs_on_left_before_bs = 0;
+ bad_bqs_on_top_before_bs = 0;
+
+ /* Currently, the bad pixel caused by filters after bayer scaling
+ is NOT considered, because the bad pixel is subtle.
+ When some large filter is used in the future,
+ we need to consider the bad pixel.
+
+ Currently, when DPC&BNR is processed between bayer scaling and
+ shading correction, DPC&BNR moves each color plane to
+ right/bottom directions by 1 pixel.
+ */
+ bad_bqs_on_left_after_bs = 0;
+ bad_bqs_on_top_after_bs = 0;
+
+ /* Calculate the origin of bayer (real sensor data area)
+ located on the shading table during the shading correction. */
+ res->sc_bayer_origin_x_bqs_on_shading_table =
+ ((left_padding_adjusted_bqs + bad_bqs_on_left_before_bs)
+ * bs_hor_ratio_out + bs_hor_ratio_in / 2) / bs_hor_ratio_in
+ + bad_bqs_on_left_after_bs;
+ /* "+ bs_hor_ratio_in/2": rounding for division by bs_hor_ratio_in */
+ res->sc_bayer_origin_y_bqs_on_shading_table =
+ (bad_bqs_on_top_before_bs * bs_ver_ratio_out + bs_ver_ratio_in / 2) / bs_ver_ratio_in
+ + bad_bqs_on_top_after_bs;
+ /* "+ bs_ver_ratio_in/2": rounding for division by bs_ver_ratio_in */
+
+ res->bayer_scale_hor_ratio_in = (uint32_t)bs_hor_ratio_in;
+ res->bayer_scale_hor_ratio_out = (uint32_t)bs_hor_ratio_out;
+ res->bayer_scale_ver_ratio_in = (uint32_t)bs_ver_ratio_in;
+ res->bayer_scale_ver_ratio_out = (uint32_t)bs_ver_ratio_out;
+
+ return err;
+}
+
+/* ISP2401: Get the requirements for the shading correction. */
+static int
+sh_css_binary_get_sc_requirements(const struct ia_css_binary *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct sh_css_binary_sc_requirements *scr) /* [out] */
+{
+ int err;
+
+ /* Numerator and denominator of the fixed bayer downscaling factor. (numerator >= denominator) */
+ unsigned int bds_num, bds_den;
+
+ /* Horizontal/Vertical ratio of bayer scaling between input area and output area. */
+ unsigned int bs_hor_ratio_in, bs_hor_ratio_out, bs_ver_ratio_in, bs_ver_ratio_out;
+
+ /* Left padding set by InputFormatter. */
+ unsigned int left_padding_bqs;
+
/* Flags corresponding to NEED_BDS_FACTOR_2_00/NEED_BDS_FACTOR_1_50/NEED_BDS_FACTOR_1_25 macros
* defined in isp kernels. */
unsigned int need_bds_factor_2_00, need_bds_factor_1_50, need_bds_factor_1_25;
@@ -225,318 +302,201 @@ sh_css_binary_get_sc_requirements(
unsigned int sensor_data_origin_x_bqs_on_internal;
unsigned int sensor_data_origin_y_bqs_on_internal;
+ unsigned int bs_frac = bds_frac_acc; /* scaling factor 1.0 in fixed point */
+ unsigned int bs_out, bs_in; /* scaling ratio in fixed point */
+
IA_CSS_ENTER_PRIVATE("binary=%p, required_bds_factor=%d, stream_config=%p",
binary, required_bds_factor, stream_config);
/* Get the numerator and denominator of the required bayer downscaling factor. */
- err = sh_css_bds_factor_get_numerator_denominator(required_bds_factor, &bds_num, &bds_den);
- if (err)
- {
+ err = sh_css_bds_factor_get_numerator_denominator(required_bds_factor,
+ &bds_num, &bds_den);
+ if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
-#endif
return err;
-#ifdef ISP2401
-}
-#endif
-
-#ifndef ISP2401
-/* Set the horizontal/vertical ratio of bayer scaling
-between input area and output area. */
-#else
-IA_CSS_LOG("bds_num=%d, bds_den=%d", bds_num, bds_den);
-
-/* Set the horizontal/vertical ratio of bayer scaling between input area and output area. */
-#endif
-bs_hor_ratio_in = bds_num;
-bs_hor_ratio_out = bds_den;
-bs_ver_ratio_in = bds_num;
-bs_ver_ratio_out = bds_den;
+ }
-#ifndef ISP2401
-/* Set the left padding set by InputFormatter. (ifmtr.c) */
-#else
-/* Set the left padding set by InputFormatter. (ia_css_ifmtr_configure() in ifmtr.c) */
-#endif
-if (stream_config->left_padding == -1)
- left_padding_bqs = _ISP_BQS(binary->left_padding);
-else
-#ifndef ISP2401
- left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS
- - _ISP_BQS(stream_config->left_padding));
-#else
- left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS - _ISP_BQS(stream_config->left_padding));
-#endif
+ IA_CSS_LOG("bds_num=%d, bds_den=%d", bds_num, bds_den);
-#ifndef ISP2401
-/* Set the left padding adjusted inside the isp.
-When bds_factor 2.00 is needed, some padding is added to left_padding
-inside the isp, before bayer downscaling. (raw.isp.c)
-(Hopefully, left_crop/left_padding/top_crop should be defined in css
-appropriately, depending on bds_factor.)
-*/
-#else
-IA_CSS_LOG("stream.left_padding=%d, binary.left_padding=%d, left_padding_bqs=%d",
- stream_config->left_padding, binary->left_padding, left_padding_bqs);
+ /* Set the horizontal/vertical ratio of bayer scaling between input area and output area. */
+ bs_hor_ratio_in = bds_num;
+ bs_hor_ratio_out = bds_den;
+ bs_ver_ratio_in = bds_num;
+ bs_ver_ratio_out = bds_den;
-/* Set the left padding adjusted inside the isp kernels.
- * When the bds_factor isn't 1.00, the left padding size is adjusted inside the isp,
- * before bayer downscaling. (scaled_hor_plane_index(), raw_compute_hphase() in raw.isp.c)
- */
-#endif
-need_bds_factor_2_00 = ((binary->info->sp.bds.supported_bds_factors &
- (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_8_00))) != 0);
+ /* Set the left padding set by InputFormatter. (ia_css_ifmtr_configure() in ifmtr.c) */
+ if (stream_config->left_padding == -1)
+ left_padding_bqs = _ISP_BQS(binary->left_padding);
+ else
+ left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS - _ISP_BQS(stream_config->left_padding));
-#ifndef ISP2401
-if (need_bds_factor_2_00 && binary->info->sp.pipeline.left_cropping > 0)
- left_padding_adjusted_bqs = left_padding_bqs + ISP_VEC_NELEMS;
-else
-#else
-need_bds_factor_1_50 = ((binary->info->sp.bds.supported_bds_factors &
- (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_50) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_25) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00))) != 0);
-
-need_bds_factor_1_25 = ((binary->info->sp.bds.supported_bds_factors &
- (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_25) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
- PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00))) != 0);
-
-if (binary->info->sp.pipeline.left_cropping > 0 &&
- (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25))
-{
- /*
- * downscale 2.0 -> first_vec_adjusted_bqs = 128
- * downscale 1.5 -> first_vec_adjusted_bqs = 96
- * downscale 1.25 -> first_vec_adjusted_bqs = 80
- */
- unsigned int first_vec_adjusted_bqs
- = ISP_VEC_NELEMS * bs_hor_ratio_in / bs_hor_ratio_out;
- left_padding_adjusted_bqs = first_vec_adjusted_bqs
- - _ISP_BQS(binary->info->sp.pipeline.left_cropping);
-} else
-#endif
- left_padding_adjusted_bqs = left_padding_bqs;
+ IA_CSS_LOG("stream.left_padding=%d, binary.left_padding=%d, left_padding_bqs=%d",
+ stream_config->left_padding, binary->left_padding,
+ left_padding_bqs);
-#ifndef ISP2401
-/* Currently, the bad pixel caused by filters before bayer scaling
-is NOT considered, because the bad pixel is subtle.
-When some large filter is used in the future,
-we need to consider the bad pixel.
-
-Currently, when bds_factor isn't 1.00, 3x3 anti-alias filter is applied
-to each color plane(Gr/R/B/Gb) before bayer downscaling.
-This filter moves each color plane to right/bottom directions
-by 1 pixel at the most, depending on downscaling factor.
-*/
-bad_bqs_on_left_before_bs = 0;
-bad_bqs_on_top_before_bs = 0;
-#else
-IA_CSS_LOG("supported_bds_factors=%d, need_bds_factor:2_00=%d, 1_50=%d, 1_25=%d",
- binary->info->sp.bds.supported_bds_factors,
- need_bds_factor_2_00, need_bds_factor_1_50, need_bds_factor_1_25);
-IA_CSS_LOG("left_cropping=%d, left_padding_adjusted_bqs=%d",
- binary->info->sp.pipeline.left_cropping, left_padding_adjusted_bqs);
-
-/* Set the top padding padded inside the isp kernel for bayer downscaling binaries.
- * When the bds_factor isn't 1.00, the top padding is padded inside the isp
- * before bayer downscaling, because the top cropping size (input margin) is not enough.
- * (calculate_input_line(), raw_compute_vphase(), dma_read_raw() in raw.isp.c)
- * NOTE: In dma_read_raw(), the factor passed to raw_compute_vphase() is got by get_bds_factor_for_dma_read().
- * This factor is BDS_FPVAL_100/BDS_FPVAL_125/BDS_FPVAL_150/BDS_FPVAL_200.
- */
-top_padding_bqs = 0;
-if (binary->info->sp.pipeline.top_cropping > 0 &&
- (required_bds_factor == SH_CSS_BDS_FACTOR_1_25 ||
- required_bds_factor == SH_CSS_BDS_FACTOR_1_50 ||
- required_bds_factor == SH_CSS_BDS_FACTOR_2_00))
-{
- /* Calculation from calculate_input_line() and raw_compute_vphase() in raw.isp.c. */
- int top_cropping_bqs = _ISP_BQS(binary->info->sp.pipeline.top_cropping);
- /* top cropping (in bqs) */
- int factor = bds_num * bds_frac_acc /
- bds_den; /* downscaling factor by fixed-point */
- int top_padding_bqsxfrac_acc = (top_cropping_bqs * factor - top_cropping_bqs *
- bds_frac_acc)
- + (2 * bds_frac_acc - factor); /* top padding by fixed-point (in bqs) */
-
- top_padding_bqs = (unsigned int)((top_padding_bqsxfrac_acc + bds_frac_acc / 2 -
- 1) / bds_frac_acc);
-}
+ /* Set the left padding adjusted inside the isp kernels.
+ * When the bds_factor isn't 1.00, the left padding size is adjusted inside the isp,
+ * before bayer downscaling. (scaled_hor_plane_index(), raw_compute_hphase() in raw.isp.c)
+ */
+ need_bds_factor_2_00 = ((binary->info->sp.bds.supported_bds_factors &
+ (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_8_00))) != 0);
+
+ need_bds_factor_1_50 = ((binary->info->sp.bds.supported_bds_factors &
+ (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_25) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00))) != 0);
+
+ need_bds_factor_1_25 = ((binary->info->sp.bds.supported_bds_factors &
+ (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_25) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00))) != 0);
+
+ if (binary->info->sp.pipeline.left_cropping > 0 &&
+ (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25)) {
+ /*
+ * downscale 2.0 -> first_vec_adjusted_bqs = 128
+ * downscale 1.5 -> first_vec_adjusted_bqs = 96
+ * downscale 1.25 -> first_vec_adjusted_bqs = 80
+ */
+ unsigned int first_vec_adjusted_bqs = ISP_VEC_NELEMS * bs_hor_ratio_in / bs_hor_ratio_out;
+ left_padding_adjusted_bqs = first_vec_adjusted_bqs
+ - _ISP_BQS(binary->info->sp.pipeline.left_cropping);
+ } else {
+ left_padding_adjusted_bqs = left_padding_bqs;
+ }
-IA_CSS_LOG("top_cropping=%d, top_padding_bqs=%d", binary->info->sp.pipeline.top_cropping, top_padding_bqs);
+ IA_CSS_LOG("supported_bds_factors=%d, need_bds_factor:2_00=%d, 1_50=%d, 1_25=%d",
+ binary->info->sp.bds.supported_bds_factors,
+ need_bds_factor_2_00, need_bds_factor_1_50,
+ need_bds_factor_1_25);
+ IA_CSS_LOG("left_cropping=%d, left_padding_adjusted_bqs=%d",
+ binary->info->sp.pipeline.left_cropping,
+ left_padding_adjusted_bqs);
+
+ /* Set the top padding padded inside the isp kernel for bayer downscaling binaries.
+ * When the bds_factor isn't 1.00, the top padding is padded inside the isp
+ * before bayer downscaling, because the top cropping size (input margin) is not enough.
+ * (calculate_input_line(), raw_compute_vphase(), dma_read_raw() in raw.isp.c)
+ * NOTE: In dma_read_raw(), the factor passed to raw_compute_vphase() is got by get_bds_factor_for_dma_read().
+ * This factor is BDS_FPVAL_100/BDS_FPVAL_125/BDS_FPVAL_150/BDS_FPVAL_200.
+ */
+ top_padding_bqs = 0;
+ if (binary->info->sp.pipeline.top_cropping > 0 &&
+ (required_bds_factor == SH_CSS_BDS_FACTOR_1_25 ||
+ required_bds_factor == SH_CSS_BDS_FACTOR_1_50 ||
+ required_bds_factor == SH_CSS_BDS_FACTOR_2_00)) {
+ /* Calculation from calculate_input_line() and raw_compute_vphase() in raw.isp.c. */
+ int top_cropping_bqs = _ISP_BQS(binary->info->sp.pipeline.top_cropping);
+ /* top cropping (in bqs) */
+ int factor = bds_num * bds_frac_acc /
+ bds_den; /* downscaling factor by fixed-point */
+ int top_padding_bqsxfrac_acc = (top_cropping_bqs * factor - top_cropping_bqs *
+ bds_frac_acc)
+ + (2 * bds_frac_acc - factor); /* top padding by fixed-point (in bqs) */
+
+ top_padding_bqs = (unsigned int)((top_padding_bqsxfrac_acc + bds_frac_acc / 2 -
+ 1) / bds_frac_acc);
+ }
-/* Set the right/down shift amount caused by filters applied BEFORE bayer scaling,
- * which scaling is applied BEFORE shading corrertion.
- *
- * When the bds_factor isn't 1.00, 3x3 anti-alias filter is applied to each color plane(Gr/R/B/Gb)
- * before bayer downscaling.
- * This filter shifts each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel.
- */
-right_shift_bqs_before_bs = 0;
-down_shift_bqs_before_bs = 0;
-#endif
+ IA_CSS_LOG("top_cropping=%d, top_padding_bqs=%d",
+ binary->info->sp.pipeline.top_cropping, top_padding_bqs);
-#ifndef ISP2401
-/* Currently, the bad pixel caused by filters after bayer scaling
-is NOT considered, because the bad pixel is subtle.
-When some large filter is used in the future,
-we need to consider the bad pixel.
-
-Currently, when DPC&BNR is processed between bayer scaling and
-shading correction, DPC&BNR moves each color plane to
-right/bottom directions by 1 pixel.
-*/
-bad_bqs_on_left_after_bs = 0;
-bad_bqs_on_top_after_bs = 0;
-#else
-if (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25)
-{
- right_shift_bqs_before_bs = 1;
- down_shift_bqs_before_bs = 1;
-}
+ /* Set the right/down shift amount caused by filters applied BEFORE bayer scaling,
+ * which scaling is applied BEFORE shading corrertion.
+ *
+ * When the bds_factor isn't 1.00, 3x3 anti-alias filter is applied to each color plane(Gr/R/B/Gb)
+ * before bayer downscaling.
+ * This filter shifts each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel.
+ */
+ right_shift_bqs_before_bs = 0;
+ down_shift_bqs_before_bs = 0;
-IA_CSS_LOG("right_shift_bqs_before_bs=%d, down_shift_bqs_before_bs=%d",
- right_shift_bqs_before_bs, down_shift_bqs_before_bs);
+ if (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25) {
+ right_shift_bqs_before_bs = 1;
+ down_shift_bqs_before_bs = 1;
+ }
-/* Set the right/down shift amount caused by filters applied AFTER bayer scaling,
- * which scaling is applied BEFORE shading corrertion.
- *
- * When DPC&BNR is processed between bayer scaling and shading correction,
- * DPC&BNR moves each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel.
- */
-right_shift_bqs_after_bs = 0;
-down_shift_bqs_after_bs = 0;
-#endif
+ IA_CSS_LOG("right_shift_bqs_before_bs=%d, down_shift_bqs_before_bs=%d",
+ right_shift_bqs_before_bs, down_shift_bqs_before_bs);
-#ifndef ISP2401
-/* Calculate the origin of bayer (real sensor data area)
-located on the shading table during the shading correction. */
-res->sc_bayer_origin_x_bqs_on_shading_table
-= ((left_padding_adjusted_bqs + bad_bqs_on_left_before_bs)
- * bs_hor_ratio_out + bs_hor_ratio_in / 2) / bs_hor_ratio_in
-+ bad_bqs_on_left_after_bs;
-/* "+ bs_hor_ratio_in/2": rounding for division by bs_hor_ratio_in */
-res->sc_bayer_origin_y_bqs_on_shading_table
-= (bad_bqs_on_top_before_bs
- * bs_ver_ratio_out + bs_ver_ratio_in / 2) / bs_ver_ratio_in
-+ bad_bqs_on_top_after_bs;
-/* "+ bs_ver_ratio_in/2": rounding for division by bs_ver_ratio_in */
-
-res->bayer_scale_hor_ratio_in = (uint32_t)bs_hor_ratio_in;
-res->bayer_scale_hor_ratio_out = (uint32_t)bs_hor_ratio_out;
-res->bayer_scale_ver_ratio_in = (uint32_t)bs_ver_ratio_in;
-res->bayer_scale_ver_ratio_out = (uint32_t)bs_ver_ratio_out;
-#else
-if (binary->info->mem_offsets.offsets.param->dmem.dp.size != 0) /* if DPC&BNR is enabled in the binary */
-{
- right_shift_bqs_after_bs = 1;
- down_shift_bqs_after_bs = 1;
-}
+ /* Set the right/down shift amount caused by filters applied AFTER bayer scaling,
+ * which scaling is applied BEFORE shading corrertion.
+ *
+ * When DPC&BNR is processed between bayer scaling and shading correction,
+ * DPC&BNR moves each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel.
+ */
+ right_shift_bqs_after_bs = 0;
+ down_shift_bqs_after_bs = 0;
-IA_CSS_LOG("right_shift_bqs_after_bs=%d, down_shift_bqs_after_bs=%d",
- right_shift_bqs_after_bs, down_shift_bqs_after_bs);
+ /* if DPC&BNR is enabled in the binary */
+ if (binary->info->mem_offsets.offsets.param->dmem.dp.size != 0) {
+ right_shift_bqs_after_bs = 1;
+ down_shift_bqs_after_bs = 1;
+ }
-/* Set the origin of the sensor data area on the internal frame at shading correction. */
-{
- unsigned int bs_frac = bds_frac_acc; /* scaling factor 1.0 in fixed point */
- unsigned int bs_out, bs_in; /* scaling ratio in fixed point */
+ IA_CSS_LOG("right_shift_bqs_after_bs=%d, down_shift_bqs_after_bs=%d",
+ right_shift_bqs_after_bs, down_shift_bqs_after_bs);
bs_out = bs_hor_ratio_out * bs_frac;
bs_in = bs_hor_ratio_in * bs_frac;
- sensor_data_origin_x_bqs_on_internal
- = ((left_padding_adjusted_bqs + right_shift_bqs_before_bs) * bs_out + bs_in / 2) / bs_in
- + right_shift_bqs_after_bs; /* "+ bs_in/2": rounding */
+ sensor_data_origin_x_bqs_on_internal =
+ ((left_padding_adjusted_bqs + right_shift_bqs_before_bs) * bs_out + bs_in / 2) / bs_in
+ + right_shift_bqs_after_bs; /* "+ bs_in/2": rounding */
bs_out = bs_ver_ratio_out * bs_frac;
bs_in = bs_ver_ratio_in * bs_frac;
- sensor_data_origin_y_bqs_on_internal
- = ((top_padding_bqs + down_shift_bqs_before_bs) * bs_out + bs_in / 2) / bs_in
- + down_shift_bqs_after_bs; /* "+ bs_in/2": rounding */
-}
-
-scr->bayer_scale_hor_ratio_in = (uint32_t)bs_hor_ratio_in;
-scr->bayer_scale_hor_ratio_out = (uint32_t)bs_hor_ratio_out;
-scr->bayer_scale_ver_ratio_in = (uint32_t)bs_ver_ratio_in;
-scr->bayer_scale_ver_ratio_out = (uint32_t)bs_ver_ratio_out;
-scr->sensor_data_origin_x_bqs_on_internal = (uint32_t)sensor_data_origin_x_bqs_on_internal;
-scr->sensor_data_origin_y_bqs_on_internal = (uint32_t)sensor_data_origin_y_bqs_on_internal;
-
-IA_CSS_LOG("sc_requirements: %d, %d, %d, %d, %d, %d",
- scr->bayer_scale_hor_ratio_in, scr->bayer_scale_hor_ratio_out,
- scr->bayer_scale_ver_ratio_in, scr->bayer_scale_ver_ratio_out,
- scr->sensor_data_origin_x_bqs_on_internal, scr->sensor_data_origin_y_bqs_on_internal);
-#endif
+ sensor_data_origin_y_bqs_on_internal =
+ ((top_padding_bqs + down_shift_bqs_before_bs) * bs_out + bs_in / 2) / bs_in
+ + down_shift_bqs_after_bs; /* "+ bs_in/2": rounding */
+
+ scr->bayer_scale_hor_ratio_in = (uint32_t)bs_hor_ratio_in;
+ scr->bayer_scale_hor_ratio_out = (uint32_t)bs_hor_ratio_out;
+ scr->bayer_scale_ver_ratio_in = (uint32_t)bs_ver_ratio_in;
+ scr->bayer_scale_ver_ratio_out = (uint32_t)bs_ver_ratio_out;
+ scr->sensor_data_origin_x_bqs_on_internal = (uint32_t)sensor_data_origin_x_bqs_on_internal;
+ scr->sensor_data_origin_y_bqs_on_internal = (uint32_t)sensor_data_origin_y_bqs_on_internal;
+
+ IA_CSS_LOG("sc_requirements: %d, %d, %d, %d, %d, %d",
+ scr->bayer_scale_hor_ratio_in,
+ scr->bayer_scale_hor_ratio_out,
+ scr->bayer_scale_ver_ratio_in, scr->bayer_scale_ver_ratio_out,
+ scr->sensor_data_origin_x_bqs_on_internal,
+ scr->sensor_data_origin_y_bqs_on_internal);
-#ifdef ISP2401
-IA_CSS_LEAVE_ERR_PRIVATE(err);
-#endif
-return err;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
}
/* Get the shading information of Shading Correction Type 1. */
static int
-ia_css_binary_get_shading_info_type_1(const struct ia_css_binary
- *binary, /* [in] */
- unsigned int required_bds_factor, /* [in] */
- const struct ia_css_stream_config *stream_config, /* [in] */
-#ifndef ISP2401
- struct ia_css_shading_info *info) /* [out] */
-#else
- struct ia_css_shading_info *shading_info, /* [out] */
- struct ia_css_pipe_config *pipe_config) /* [out] */
-#endif
+isp2400_binary_get_shading_info_type_1(const struct ia_css_binary *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct ia_css_shading_info *info) /* [out] */
{
int err;
-#ifndef ISP2401
struct sh_css_shading_table_bayer_origin_compute_results res;
-#else
- struct sh_css_binary_sc_requirements scr;
-#endif
-#ifndef ISP2401
assert(binary);
assert(info);
-#else
- u32 in_width_bqs, in_height_bqs, internal_width_bqs, internal_height_bqs;
- u32 num_hor_grids, num_ver_grids, bqs_per_grid_cell, tbl_width_bqs, tbl_height_bqs;
- u32 sensor_org_x_bqs_on_internal, sensor_org_y_bqs_on_internal, sensor_width_bqs, sensor_height_bqs;
- u32 sensor_center_x_bqs_on_internal, sensor_center_y_bqs_on_internal;
- u32 left, right, upper, lower;
- u32 adjust_left, adjust_right, adjust_upper, adjust_lower, adjust_width_bqs, adjust_height_bqs;
- u32 internal_org_x_bqs_on_tbl, internal_org_y_bqs_on_tbl;
- u32 sensor_org_x_bqs_on_tbl, sensor_org_y_bqs_on_tbl;
-#endif
-#ifndef ISP2401
info->type = IA_CSS_SHADING_CORRECTION_TYPE_1;
-#else
- assert(binary);
- assert(stream_config);
- assert(shading_info);
- assert(pipe_config);
-#endif
-#ifndef ISP2401
info->info.type_1.enable = binary->info->sp.enable.sc;
info->info.type_1.num_hor_grids = binary->sctbl_width_per_color;
info->info.type_1.num_ver_grids = binary->sctbl_height;
info->info.type_1.bqs_per_grid_cell = (1 << binary->deci_factor_log2);
-#else
- IA_CSS_ENTER_PRIVATE("binary=%p, required_bds_factor=%d, stream_config=%p",
- binary, required_bds_factor, stream_config);
-#endif
/* Initialize by default values. */
-#ifndef ISP2401
info->info.type_1.bayer_scale_hor_ratio_in = 1;
info->info.type_1.bayer_scale_hor_ratio_out = 1;
info->info.type_1.bayer_scale_ver_ratio_in = 1;
@@ -550,158 +510,186 @@ ia_css_binary_get_shading_info_type_1(const struct ia_css_binary
stream_config,
&res);
if (err)
-#else
+ return err;
+
+ info->info.type_1.bayer_scale_hor_ratio_in = res.bayer_scale_hor_ratio_in;
+ info->info.type_1.bayer_scale_hor_ratio_out = res.bayer_scale_hor_ratio_out;
+ info->info.type_1.bayer_scale_ver_ratio_in = res.bayer_scale_ver_ratio_in;
+ info->info.type_1.bayer_scale_ver_ratio_out = res.bayer_scale_ver_ratio_out;
+ info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = res.sc_bayer_origin_x_bqs_on_shading_table;
+ info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = res.sc_bayer_origin_y_bqs_on_shading_table;
+
+ return err;
+}
+
+/* Get the shading information of Shading Correction Type 1. */
+static int
+isp2401_binary_get_shading_info_type_1(const struct ia_css_binary *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct ia_css_shading_info *shading_info, /* [out] */
+ struct ia_css_pipe_config *pipe_config) /* [out] */
+{
+ int err;
+ struct sh_css_binary_sc_requirements scr;
+
+ u32 in_width_bqs, in_height_bqs, internal_width_bqs, internal_height_bqs;
+ u32 num_hor_grids, num_ver_grids, bqs_per_grid_cell, tbl_width_bqs, tbl_height_bqs;
+ u32 sensor_org_x_bqs_on_internal, sensor_org_y_bqs_on_internal, sensor_width_bqs, sensor_height_bqs;
+ u32 sensor_center_x_bqs_on_internal, sensor_center_y_bqs_on_internal;
+ u32 left, right, upper, lower;
+ u32 adjust_left, adjust_right, adjust_upper, adjust_lower, adjust_width_bqs, adjust_height_bqs;
+ u32 internal_org_x_bqs_on_tbl, internal_org_y_bqs_on_tbl;
+ u32 sensor_org_x_bqs_on_tbl, sensor_org_y_bqs_on_tbl;
+
+ assert(binary);
+ assert(stream_config);
+ assert(shading_info);
+ assert(pipe_config);
+
+ IA_CSS_ENTER_PRIVATE("binary=%p, required_bds_factor=%d, stream_config=%p",
+ binary, required_bds_factor, stream_config);
+
+ /* Initialize by default values. */
*shading_info = DEFAULT_SHADING_INFO_TYPE_1;
err = sh_css_binary_get_sc_requirements(binary, required_bds_factor, stream_config, &scr);
- if (err)
- {
+ if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
-#endif
return err;
-#ifdef ISP2401
-}
+ }
-IA_CSS_LOG("binary: id=%d, sctbl=%dx%d, deci=%d",
- binary->info->sp.id, binary->sctbl_width_per_color, binary->sctbl_height, binary->deci_factor_log2);
-IA_CSS_LOG("binary: in=%dx%d, in_padded_w=%d, int=%dx%d, int_padded_w=%d, out=%dx%d, out_padded_w=%d",
- binary->in_frame_info.res.width, binary->in_frame_info.res.height, binary->in_frame_info.padded_width,
- binary->internal_frame_info.res.width, binary->internal_frame_info.res.height,
- binary->internal_frame_info.padded_width,
- binary->out_frame_info[0].res.width, binary->out_frame_info[0].res.height,
- binary->out_frame_info[0].padded_width);
-
-/* Set the input size from sensor, which includes left/top crop size. */
-in_width_bqs = _ISP_BQS(binary->in_frame_info.res.width);
-in_height_bqs = _ISP_BQS(binary->in_frame_info.res.height);
-
-/* Frame size internally used in ISP, including sensor data and padding.
- * This is the frame size, to which the shading correction is applied.
- */
-internal_width_bqs = _ISP_BQS(binary->internal_frame_info.res.width);
-internal_height_bqs = _ISP_BQS(binary->internal_frame_info.res.height);
-
-/* Shading table. */
-num_hor_grids = binary->sctbl_width_per_color;
-num_ver_grids = binary->sctbl_height;
-bqs_per_grid_cell = (1 << binary->deci_factor_log2);
-tbl_width_bqs = (num_hor_grids - 1) * bqs_per_grid_cell;
-tbl_height_bqs = (num_ver_grids - 1) * bqs_per_grid_cell;
-#endif
+ IA_CSS_LOG("binary: id=%d, sctbl=%dx%d, deci=%d",
+ binary->info->sp.id, binary->sctbl_width_per_color, binary->sctbl_height, binary->deci_factor_log2);
+ IA_CSS_LOG("binary: in=%dx%d, in_padded_w=%d, int=%dx%d, int_padded_w=%d, out=%dx%d, out_padded_w=%d",
+ binary->in_frame_info.res.width, binary->in_frame_info.res.height, binary->in_frame_info.padded_width,
+ binary->internal_frame_info.res.width, binary->internal_frame_info.res.height,
+ binary->internal_frame_info.padded_width,
+ binary->out_frame_info[0].res.width, binary->out_frame_info[0].res.height,
+ binary->out_frame_info[0].padded_width);
-#ifndef ISP2401
-info->info.type_1.bayer_scale_hor_ratio_in = res.bayer_scale_hor_ratio_in;
-info->info.type_1.bayer_scale_hor_ratio_out = res.bayer_scale_hor_ratio_out;
-info->info.type_1.bayer_scale_ver_ratio_in = res.bayer_scale_ver_ratio_in;
-info->info.type_1.bayer_scale_ver_ratio_out = res.bayer_scale_ver_ratio_out;
-info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = res.sc_bayer_origin_x_bqs_on_shading_table;
-info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = res.sc_bayer_origin_y_bqs_on_shading_table;
-#else
-IA_CSS_LOG("tbl_width_bqs=%d, tbl_height_bqs=%d", tbl_width_bqs, tbl_height_bqs);
-#endif
+ /* Set the input size from sensor, which includes left/top crop size. */
+ in_width_bqs = _ISP_BQS(binary->in_frame_info.res.width);
+ in_height_bqs = _ISP_BQS(binary->in_frame_info.res.height);
-#ifdef ISP2401
-/* Real sensor data area on the internal frame at shading correction.
- * Filters and scaling are applied to the internal frame before shading correction, depending on the binary.
- */
-sensor_org_x_bqs_on_internal = scr.sensor_data_origin_x_bqs_on_internal;
-sensor_org_y_bqs_on_internal = scr.sensor_data_origin_y_bqs_on_internal;
-{
- unsigned int bs_frac = 8; /* scaling factor 1.0 in fixed point (8 == FRAC_ACC macro in ISP) */
- unsigned int bs_out, bs_in; /* scaling ratio in fixed point */
+ /*
+ * Frame size internally used in ISP, including sensor data and padding.
+ * This is the frame size, to which the shading correction is applied.
+ */
+ internal_width_bqs = _ISP_BQS(binary->internal_frame_info.res.width);
+ internal_height_bqs = _ISP_BQS(binary->internal_frame_info.res.height);
- bs_out = scr.bayer_scale_hor_ratio_out * bs_frac;
- bs_in = scr.bayer_scale_hor_ratio_in * bs_frac;
- sensor_width_bqs = (in_width_bqs * bs_out + bs_in / 2) / bs_in; /* "+ bs_in/2": rounding */
+ /* Shading table. */
+ num_hor_grids = binary->sctbl_width_per_color;
+ num_ver_grids = binary->sctbl_height;
+ bqs_per_grid_cell = (1 << binary->deci_factor_log2);
+ tbl_width_bqs = (num_hor_grids - 1) * bqs_per_grid_cell;
+ tbl_height_bqs = (num_ver_grids - 1) * bqs_per_grid_cell;
- bs_out = scr.bayer_scale_ver_ratio_out * bs_frac;
- bs_in = scr.bayer_scale_ver_ratio_in * bs_frac;
- sensor_height_bqs = (in_height_bqs * bs_out + bs_in / 2) / bs_in; /* "+ bs_in/2": rounding */
-}
+ IA_CSS_LOG("tbl_width_bqs=%d, tbl_height_bqs=%d", tbl_width_bqs, tbl_height_bqs);
+
+ /*
+ * Real sensor data area on the internal frame at shading correction.
+ * Filters and scaling are applied to the internal frame before
+ * shading correction, depending on the binary.
+ */
+ sensor_org_x_bqs_on_internal = scr.sensor_data_origin_x_bqs_on_internal;
+ sensor_org_y_bqs_on_internal = scr.sensor_data_origin_y_bqs_on_internal;
+ {
+ unsigned int bs_frac = 8; /* scaling factor 1.0 in fixed point (8 == FRAC_ACC macro in ISP) */
+ unsigned int bs_out, bs_in; /* scaling ratio in fixed point */
-/* Center of the sensor data on the internal frame at shading correction. */
-sensor_center_x_bqs_on_internal = sensor_org_x_bqs_on_internal + sensor_width_bqs / 2;
-sensor_center_y_bqs_on_internal = sensor_org_y_bqs_on_internal + sensor_height_bqs / 2;
+ bs_out = scr.bayer_scale_hor_ratio_out * bs_frac;
+ bs_in = scr.bayer_scale_hor_ratio_in * bs_frac;
+ sensor_width_bqs = (in_width_bqs * bs_out + bs_in / 2) / bs_in; /* "+ bs_in/2": rounding */
-/* Size of left/right/upper/lower sides of the sensor center on the internal frame. */
-left = sensor_center_x_bqs_on_internal;
-right = internal_width_bqs - sensor_center_x_bqs_on_internal;
-upper = sensor_center_y_bqs_on_internal;
-lower = internal_height_bqs - sensor_center_y_bqs_on_internal;
+ bs_out = scr.bayer_scale_ver_ratio_out * bs_frac;
+ bs_in = scr.bayer_scale_ver_ratio_in * bs_frac;
+ sensor_height_bqs = (in_height_bqs * bs_out + bs_in / 2) / bs_in; /* "+ bs_in/2": rounding */
+ }
-/* Align the size of left/right/upper/lower sides to a multiple of the grid cell size. */
-adjust_left = CEIL_MUL(left, bqs_per_grid_cell);
-adjust_right = CEIL_MUL(right, bqs_per_grid_cell);
-adjust_upper = CEIL_MUL(upper, bqs_per_grid_cell);
-adjust_lower = CEIL_MUL(lower, bqs_per_grid_cell);
+ /* Center of the sensor data on the internal frame at shading correction. */
+ sensor_center_x_bqs_on_internal = sensor_org_x_bqs_on_internal + sensor_width_bqs / 2;
+ sensor_center_y_bqs_on_internal = sensor_org_y_bqs_on_internal + sensor_height_bqs / 2;
-/* Shading table should cover the adjusted frame size. */
-adjust_width_bqs = adjust_left + adjust_right;
-adjust_height_bqs = adjust_upper + adjust_lower;
+ /* Size of left/right/upper/lower sides of the sensor center on the internal frame. */
+ left = sensor_center_x_bqs_on_internal;
+ right = internal_width_bqs - sensor_center_x_bqs_on_internal;
+ upper = sensor_center_y_bqs_on_internal;
+ lower = internal_height_bqs - sensor_center_y_bqs_on_internal;
-IA_CSS_LOG("adjust_width_bqs=%d, adjust_height_bqs=%d", adjust_width_bqs, adjust_height_bqs);
+ /* Align the size of left/right/upper/lower sides to a multiple of the grid cell size. */
+ adjust_left = CEIL_MUL(left, bqs_per_grid_cell);
+ adjust_right = CEIL_MUL(right, bqs_per_grid_cell);
+ adjust_upper = CEIL_MUL(upper, bqs_per_grid_cell);
+ adjust_lower = CEIL_MUL(lower, bqs_per_grid_cell);
-if (adjust_width_bqs > tbl_width_bqs || adjust_height_bqs > tbl_height_bqs)
-{
- IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
- return -EINVAL;
-}
+ /* Shading table should cover the adjusted frame size. */
+ adjust_width_bqs = adjust_left + adjust_right;
+ adjust_height_bqs = adjust_upper + adjust_lower;
-/* Origin of the internal frame on the shading table. */
-internal_org_x_bqs_on_tbl = adjust_left - left;
-internal_org_y_bqs_on_tbl = adjust_upper - upper;
-
-/* Origin of the real sensor data area on the shading table. */
-sensor_org_x_bqs_on_tbl = internal_org_x_bqs_on_tbl + sensor_org_x_bqs_on_internal;
-sensor_org_y_bqs_on_tbl = internal_org_y_bqs_on_tbl + sensor_org_y_bqs_on_internal;
-
-/* The shading information necessary as API is stored in the shading_info. */
-shading_info->info.type_1.num_hor_grids = num_hor_grids;
-shading_info->info.type_1.num_ver_grids = num_ver_grids;
-shading_info->info.type_1.bqs_per_grid_cell = bqs_per_grid_cell;
-
-shading_info->info.type_1.bayer_scale_hor_ratio_in = scr.bayer_scale_hor_ratio_in;
-shading_info->info.type_1.bayer_scale_hor_ratio_out = scr.bayer_scale_hor_ratio_out;
-shading_info->info.type_1.bayer_scale_ver_ratio_in = scr.bayer_scale_ver_ratio_in;
-shading_info->info.type_1.bayer_scale_ver_ratio_out = scr.bayer_scale_ver_ratio_out;
-
-shading_info->info.type_1.isp_input_sensor_data_res_bqs.width = in_width_bqs;
-shading_info->info.type_1.isp_input_sensor_data_res_bqs.height = in_height_bqs;
-
-shading_info->info.type_1.sensor_data_res_bqs.width = sensor_width_bqs;
-shading_info->info.type_1.sensor_data_res_bqs.height = sensor_height_bqs;
-
-shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x = (int32_t)sensor_org_x_bqs_on_tbl;
-shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y = (int32_t)sensor_org_y_bqs_on_tbl;
-
-/* The shading information related to ISP (but, not necessary as API) is stored in the pipe_config. */
-pipe_config->internal_frame_origin_bqs_on_sctbl.x = (int32_t)internal_org_x_bqs_on_tbl;
-pipe_config->internal_frame_origin_bqs_on_sctbl.y = (int32_t)internal_org_y_bqs_on_tbl;
-
-IA_CSS_LOG("shading_info: grids=%dx%d, cell=%d, scale=%d,%d,%d,%d, input=%dx%d, data=%dx%d, origin=(%d,%d)",
- shading_info->info.type_1.num_hor_grids,
- shading_info->info.type_1.num_ver_grids,
- shading_info->info.type_1.bqs_per_grid_cell,
- shading_info->info.type_1.bayer_scale_hor_ratio_in,
- shading_info->info.type_1.bayer_scale_hor_ratio_out,
- shading_info->info.type_1.bayer_scale_ver_ratio_in,
- shading_info->info.type_1.bayer_scale_ver_ratio_out,
- shading_info->info.type_1.isp_input_sensor_data_res_bqs.width,
- shading_info->info.type_1.isp_input_sensor_data_res_bqs.height,
- shading_info->info.type_1.sensor_data_res_bqs.width,
- shading_info->info.type_1.sensor_data_res_bqs.height,
- shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x,
- shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y);
-
-IA_CSS_LOG("pipe_config: origin=(%d,%d)",
- pipe_config->internal_frame_origin_bqs_on_sctbl.x,
- pipe_config->internal_frame_origin_bqs_on_sctbl.y);
-
-IA_CSS_LEAVE_ERR_PRIVATE(err);
-#endif
-return err;
+ IA_CSS_LOG("adjust_width_bqs=%d, adjust_height_bqs=%d", adjust_width_bqs, adjust_height_bqs);
+
+ if (adjust_width_bqs > tbl_width_bqs || adjust_height_bqs > tbl_height_bqs) {
+ IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Origin of the internal frame on the shading table. */
+ internal_org_x_bqs_on_tbl = adjust_left - left;
+ internal_org_y_bqs_on_tbl = adjust_upper - upper;
+
+ /* Origin of the real sensor data area on the shading table. */
+ sensor_org_x_bqs_on_tbl = internal_org_x_bqs_on_tbl + sensor_org_x_bqs_on_internal;
+ sensor_org_y_bqs_on_tbl = internal_org_y_bqs_on_tbl + sensor_org_y_bqs_on_internal;
+
+ /* The shading information necessary as API is stored in the shading_info. */
+ shading_info->info.type_1.num_hor_grids = num_hor_grids;
+ shading_info->info.type_1.num_ver_grids = num_ver_grids;
+ shading_info->info.type_1.bqs_per_grid_cell = bqs_per_grid_cell;
+
+ shading_info->info.type_1.bayer_scale_hor_ratio_in = scr.bayer_scale_hor_ratio_in;
+ shading_info->info.type_1.bayer_scale_hor_ratio_out = scr.bayer_scale_hor_ratio_out;
+ shading_info->info.type_1.bayer_scale_ver_ratio_in = scr.bayer_scale_ver_ratio_in;
+ shading_info->info.type_1.bayer_scale_ver_ratio_out = scr.bayer_scale_ver_ratio_out;
+
+ shading_info->info.type_1.isp_input_sensor_data_res_bqs.width = in_width_bqs;
+ shading_info->info.type_1.isp_input_sensor_data_res_bqs.height = in_height_bqs;
+
+ shading_info->info.type_1.sensor_data_res_bqs.width = sensor_width_bqs;
+ shading_info->info.type_1.sensor_data_res_bqs.height = sensor_height_bqs;
+
+ shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x = (int32_t)sensor_org_x_bqs_on_tbl;
+ shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y = (int32_t)sensor_org_y_bqs_on_tbl;
+
+ /* The shading information related to ISP (but, not necessary as API) is stored in the pipe_config. */
+ pipe_config->internal_frame_origin_bqs_on_sctbl.x = (int32_t)internal_org_x_bqs_on_tbl;
+ pipe_config->internal_frame_origin_bqs_on_sctbl.y = (int32_t)internal_org_y_bqs_on_tbl;
+
+ IA_CSS_LOG("shading_info: grids=%dx%d, cell=%d, scale=%d,%d,%d,%d, input=%dx%d, data=%dx%d, origin=(%d,%d)",
+ shading_info->info.type_1.num_hor_grids,
+ shading_info->info.type_1.num_ver_grids,
+ shading_info->info.type_1.bqs_per_grid_cell,
+ shading_info->info.type_1.bayer_scale_hor_ratio_in,
+ shading_info->info.type_1.bayer_scale_hor_ratio_out,
+ shading_info->info.type_1.bayer_scale_ver_ratio_in,
+ shading_info->info.type_1.bayer_scale_ver_ratio_out,
+ shading_info->info.type_1.isp_input_sensor_data_res_bqs.width,
+ shading_info->info.type_1.isp_input_sensor_data_res_bqs.height,
+ shading_info->info.type_1.sensor_data_res_bqs.width,
+ shading_info->info.type_1.sensor_data_res_bqs.height,
+ shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x,
+ shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y);
+
+ IA_CSS_LOG("pipe_config: origin=(%d,%d)",
+ pipe_config->internal_frame_origin_bqs_on_sctbl.x,
+ pipe_config->internal_frame_origin_bqs_on_sctbl.y);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
}
+
int
ia_css_binary_get_shading_info(const struct ia_css_binary *binary, /* [in] */
enum ia_css_shading_correction_type type, /* [in] */
@@ -718,19 +706,24 @@ ia_css_binary_get_shading_info(const struct ia_css_binary *binary, /* [in] */
IA_CSS_ENTER_PRIVATE("binary=%p, type=%d, required_bds_factor=%d, stream_config=%p",
binary, type, required_bds_factor, stream_config);
- if (type == IA_CSS_SHADING_CORRECTION_TYPE_1)
-#ifndef ISP2401
- err = ia_css_binary_get_shading_info_type_1(binary, required_bds_factor, stream_config,
- shading_info);
-#else
- err = ia_css_binary_get_shading_info_type_1(binary, required_bds_factor, stream_config,
- shading_info, pipe_config);
-#endif
+ if (type != IA_CSS_SHADING_CORRECTION_TYPE_1) {
+ err = -ENOTSUPP;
- /* Other function calls can be added here when other shading correction types will be added in the future. */
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (!IS_ISP2401)
+ err = isp2400_binary_get_shading_info_type_1(binary,
+ required_bds_factor,
+ stream_config,
+ shading_info);
else
- err = -ENOTSUPP;
+ err = isp2401_binary_get_shading_info_type_1(binary,
+ required_bds_factor,
+ stream_config,
+ shading_info,
+ pipe_config);
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -1045,7 +1038,7 @@ binary_in_frame_padded_width(int in_frame_width,
int rval;
int nr_of_left_paddings; /* number of paddings pixels on the left of an image line */
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
/* the output image line of Input System 2401 does not have the left paddings */
nr_of_left_paddings = 0;
#else
diff --git a/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h b/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h
index cddf5882b76a..567d94d91e3c 100644
--- a/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h
+++ b/drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h
@@ -27,19 +27,9 @@ enum sh_css_queue_id {
SH_CSS_QUEUE_E_ID,
SH_CSS_QUEUE_F_ID,
SH_CSS_QUEUE_G_ID,
-#if defined(HAS_NO_INPUT_SYSTEM)
- /* input frame queue for skycam */
- SH_CSS_QUEUE_H_ID,
-#endif
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
SH_CSS_QUEUE_H_ID, /* for metadata */
-#endif
-#if defined(HAS_NO_INPUT_SYSTEM) || defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
#define SH_CSS_MAX_NUM_QUEUES (SH_CSS_QUEUE_H_ID + 1)
-#else
-#define SH_CSS_MAX_NUM_QUEUES (SH_CSS_QUEUE_G_ID + 1)
-#endif
};
diff --git a/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c b/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
index 38e85735293b..6a75cba4886f 100644
--- a/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
+++ b/drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
@@ -47,13 +47,11 @@ struct sh_css_queues {
/* SP2Host event queue */
ia_css_queue_t sp2host_psys_event_queue_handle;
-#if !defined(HAS_NO_INPUT_SYSTEM)
/* Host2SP ISYS event queue */
ia_css_queue_t host2sp_isys_event_queue_handle;
/* SP2Host ISYS event queue */
ia_css_queue_t sp2host_isys_event_queue_handle;
-#endif
/* Tagger command queue */
ia_css_queue_t host2sp_tag_cmd_queue_handle;
};
@@ -231,14 +229,12 @@ static ia_css_queue_t *bufq_get_qhandle(
case sh_css_sp2host_psys_event_queue:
q = &css_queues.sp2host_psys_event_queue_handle;
break;
-#if !defined(HAS_NO_INPUT_SYSTEM)
case sh_css_host2sp_isys_event_queue:
q = &css_queues.host2sp_isys_event_queue_handle;
break;
case sh_css_sp2host_isys_event_queue:
q = &css_queues.sp2host_isys_event_queue_handle;
break;
-#endif
case sh_css_host2sp_tag_cmd_queue:
q = &css_queues.host2sp_tag_cmd_queue_handle;
break;
@@ -307,7 +303,6 @@ void ia_css_bufq_init(void)
(uint32_t)offsetof(struct host_sp_queues, sp2host_psys_event_queue_elems),
&css_queues.sp2host_psys_event_queue_handle);
-#if !defined(HAS_NO_INPUT_SYSTEM)
/* Host2SP ISYS event queue */
init_bufq((uint32_t)offsetof(struct host_sp_queues,
host2sp_isys_event_queue_desc),
@@ -324,7 +319,6 @@ void ia_css_bufq_init(void)
init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_desc),
(uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_elems),
&css_queues.host2sp_tag_cmd_queue_handle);
-#endif
IA_CSS_LEAVE_PRIVATE("");
}
@@ -391,8 +385,7 @@ int ia_css_bufq_enqueue_psys_event(
u8 evt_payload_1,
uint8_t evt_payload_2)
{
-
- int error = 0;
+ int error = 0;
ia_css_queue_t *q;
IA_CSS_ENTER_PRIVATE("evt_id=%d", evt_id);
@@ -434,7 +427,6 @@ int ia_css_bufq_dequeue_psys_event(
int ia_css_bufq_dequeue_isys_event(
u8 item[BUFQ_EVENT_SIZE])
{
-#if !defined(HAS_NO_INPUT_SYSTEM)
int error = 0;
ia_css_queue_t *q;
@@ -451,15 +443,10 @@ int ia_css_bufq_dequeue_isys_event(
}
error = ia_css_eventq_recv(q, item);
return error;
-#else
- (void)item;
- return -EBUSY;
-#endif
}
int ia_css_bufq_enqueue_isys_event(uint8_t evt_id)
{
-#if !defined(HAS_NO_INPUT_SYSTEM)
int error = 0;
ia_css_queue_t *q;
@@ -474,16 +461,11 @@ int ia_css_bufq_enqueue_isys_event(uint8_t evt_id)
IA_CSS_LEAVE_ERR_PRIVATE(error);
return error;
-#else
- (void)evt_id;
- return -EBUSY;
-#endif
}
int ia_css_bufq_enqueue_tag_cmd(
uint32_t item)
{
-#if !defined(HAS_NO_INPUT_SYSTEM)
int error;
ia_css_queue_t *q;
@@ -497,10 +479,6 @@ int ia_css_bufq_enqueue_tag_cmd(
IA_CSS_LEAVE_ERR_PRIVATE(error);
return error;
-#else
- (void)item;
- return -EBUSY;
-#endif
}
int ia_css_bufq_deinit(void)
@@ -545,12 +523,10 @@ void ia_css_bufq_dump_queue_info(void)
bufq_dump_queue_info("sp2host_psys_event",
&css_queues.sp2host_psys_event_queue_handle);
-#if !defined(HAS_NO_INPUT_SYSTEM)
bufq_dump_queue_info("host2sp_isys_event",
&css_queues.host2sp_isys_event_queue_handle);
bufq_dump_queue_info("sp2host_isys_event",
&css_queues.sp2host_isys_event_queue_handle);
bufq_dump_queue_info("host2sp_tag_cmd",
&css_queues.host2sp_tag_cmd_queue_handle);
-#endif
}
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
index e04d2485ea75..5e6e7447ae00 100644
--- a/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
@@ -129,15 +129,16 @@ enum ia_css_debug_enable_param_dump {
* @param[in] fmt printf like format string
* @param[in] args arguments for the format string
*/
-static inline void
-ia_css_debug_vdtrace(unsigned int level, const char *fmt, va_list args)
+static inline void __printf(2, 0) ia_css_debug_vdtrace(unsigned int level,
+ const char *fmt,
+ va_list args)
{
if (dbg_level >= level)
sh_css_vprint(fmt, args);
}
-__printf(2, 3)
-void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...);
+__printf(2, 3) void ia_css_debug_dtrace(unsigned int level,
+ const char *fmt, ...);
/*! @brief Dump sp thread's stack contents
* SP thread's stack contents are set to 0xcafecafe. This function dumps the
@@ -158,12 +159,6 @@ void ia_css_debug_set_dtrace_level(
*/
unsigned int ia_css_debug_get_dtrace_level(void);
-/*! @brief Dump input formatter state.
- * Dumps the input formatter state to tracing output.
- * @return None
- */
-void ia_css_debug_dump_if_state(void);
-
/*! @brief Dump isp hardware state.
* Dumps the isp hardware state to tracing output.
* @return None
diff --git a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
index 2bca27a04b02..05ce0f73f5ae 100644
--- a/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
+++ b/drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
@@ -52,9 +52,7 @@
#include "fifo_monitor.h"
-#if !defined(HAS_NO_INPUT_FORMATTER)
#include "input_formatter.h"
-#endif
#include "dma.h"
#include "irq.h"
#include "gp_device.h"
@@ -62,17 +60,11 @@
#include "isp.h"
#include "type_support.h"
#include "math_support.h" /* CEIL_DIV */
-#if defined(HAS_INPUT_FORMATTER_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
#include "input_system.h" /* input_formatter_reg_load */
-#endif
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
#include "ia_css_tagger_common.h"
-#endif
#include "sh_css_internal.h"
-#if !defined(HAS_NO_INPUT_SYSTEM)
#include "ia_css_isys.h"
-#endif
#include "sh_css_sp.h" /* sh_css_sp_get_debug_state() */
#include "css_trace.h" /* tracer */
@@ -109,17 +101,6 @@
#define ENABLE_LINE_MAX_LENGTH (25)
-#ifdef ISP2401
-#define DBG_EXT_CMD_TRACE_PNTS_DUMP BIT(8)
-#define DBG_EXT_CMD_PUB_CFG_DUMP BIT(9)
-#define DBG_EXT_CMD_GAC_REG_DUMP BIT(10)
-#define DBG_EXT_CMD_GAC_ACB_REG_DUMP BIT(11)
-#define DBG_EXT_CMD_FIFO_DUMP BIT(12)
-#define DBG_EXT_CMD_QUEUE_DUMP BIT(13)
-#define DBG_EXT_CMD_DMA_DUMP BIT(14)
-#define DBG_EXT_CMD_MASK 0xAB0000CD
-
-#endif
/*
* TODO:SH_CSS_MAX_SP_THREADS is not the max number of sp threads
* future rework should fix this and remove the define MAX_THREAD_NUM
@@ -453,23 +434,21 @@ void ia_css_debug_dump_isp_state(void)
debug_print_isp_state(&state, "ISP");
if (state.is_stalling) {
-#if !defined(HAS_NO_INPUT_FORMATTER)
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "[0] if_prim_a_FIFO stalled", stall.fifo0);
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "[1] if_prim_b_FIFO stalled", stall.fifo1);
-#endif
+ if (!IS_ISP2401) {
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "[0] if_prim_a_FIFO stalled", stall.fifo0);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "[1] if_prim_b_FIFO stalled", stall.fifo1);
+ }
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[2] dma_FIFO stalled",
stall.fifo2);
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[3] gdc0_FIFO stalled",
stall.fifo3);
-#if !defined(IS_ISP_2500_SYSTEM)
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[4] gdc1_FIFO stalled",
stall.fifo4);
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[5] gpio_FIFO stalled",
stall.fifo5);
-#endif
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[6] sp_FIFO stalled",
stall.fifo6);
ia_css_debug_dtrace(2, "\t%-32s: %d\n",
@@ -501,34 +480,29 @@ void ia_css_debug_dump_sp_state(void)
sp_get_state(SP0_ID, &state, &stall);
debug_print_sp_state(&state, "SP");
if (state.is_stalling) {
-#if !defined(HAS_NO_INPUT_SYSTEM)
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isys_FIFO stalled",
stall.fifo0);
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "if_sec_FIFO stalled",
stall.fifo1);
-#endif
ia_css_debug_dtrace(2, "\t%-32s: %d\n",
"str_to_mem_FIFO stalled", stall.fifo2);
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dma_FIFO stalled",
stall.fifo3);
-#if !defined(HAS_NO_INPUT_FORMATTER)
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "if_prim_a_FIFO stalled", stall.fifo4);
-#endif
+ if (!IS_ISP2401)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "if_prim_a_FIFO stalled", stall.fifo4);
+
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isp_FIFO stalled",
stall.fifo5);
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gp_FIFO stalled",
stall.fifo6);
-#if !defined(HAS_NO_INPUT_FORMATTER)
- ia_css_debug_dtrace(2, "\t%-32s: %d\n",
- "if_prim_b_FIFO stalled", stall.fifo7);
-#endif
+ if (!IS_ISP2401)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "if_prim_b_FIFO stalled", stall.fifo7);
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc0_FIFO stalled",
stall.fifo8);
-#if !defined(IS_ISP_2500_SYSTEM)
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc1_FIFO stalled",
stall.fifo9);
-#endif
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "irq FIFO stalled",
stall.fifoa);
ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled",
@@ -562,7 +536,6 @@ static void debug_print_fifo_channel_state(const fifo_channel_state_t *state,
return;
}
-#if !defined(HAS_NO_INPUT_FORMATTER) && defined(USE_INPUT_SYSTEM_VERSION_2)
void ia_css_debug_dump_pif_a_isp_fifo_state(void)
{
fifo_channel_state_t pif_to_isp, isp_to_pif;
@@ -599,13 +572,11 @@ void ia_css_debug_dump_str2mem_sp_fifo_state(void)
debug_print_fifo_channel_state(&sp_to_s2m, "SP to stream-to-memory");
}
+#ifndef ISP2401
static void debug_print_if_state(input_formatter_state_t *state, const char *id)
{
unsigned int val;
-#if defined(HAS_INPUT_FORMATTER_VERSION_1)
- const char *st_reset = (state->reset ? "Active" : "Not active");
-#endif
const char *st_vsync_active_low =
(state->vsync_active_low ? "low" : "high");
const char *st_hsync_active_low =
@@ -637,9 +608,6 @@ static void debug_print_if_state(input_formatter_state_t *state, const char *id)
ia_css_debug_dtrace(2, "\tConfiguration:\n");
-#if defined(HAS_INPUT_FORMATTER_VERSION_1)
- ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "Software reset", st_reset);
-#endif
ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start line", st_stline);
ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start column", st_stcol);
ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped height", st_crpht);
@@ -674,7 +642,6 @@ static void debug_print_if_state(input_formatter_state_t *state, const char *id)
ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
"Block when no request", st_block_fifo_when_no_req);
-#if defined(HAS_INPUT_FORMATTER_VERSION_2)
ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
"IF_BLOCKED_FIFO_NO_REQ_ADDRESS",
input_formatter_reg_load(INPUT_FORMATTER0_ID,
@@ -737,7 +704,6 @@ static void debug_print_if_state(input_formatter_state_t *state, const char *id)
"_REG_GP_IFMT_slv_reg_srst",
gp_device_reg_load(GP_DEVICE0_ID,
_REG_GP_IFMT_slv_reg_srst));
-#endif
ia_css_debug_dtrace(2, "\tFSM Status:\n");
@@ -868,7 +834,6 @@ static void debug_print_if_state(input_formatter_state_t *state, const char *id)
state->vector_support);
ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Fifo sensor data lost",
state->sensor_data_lost);
- return;
}
static void debug_print_if_bin_state(input_formatter_bin_state_t *state)
@@ -891,7 +856,7 @@ static void debug_print_if_bin_state(input_formatter_bin_state_t *state)
state->en_status_update);
}
-void ia_css_debug_dump_if_state(void)
+static void ia_css_debug_dump_if_state(void)
{
input_formatter_state_t if_state;
input_formatter_bin_state_t if_bin_state;
@@ -1620,19 +1585,11 @@ void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state
"frame_buffer.sp.c"
};
-#if 1
/* Example SH_CSS_SP_DBG_NR_OF_TRACES==1 */
/* Adjust this to your trace case */
static char const *trace_name[SH_CSS_SP_DBG_NR_OF_TRACES] = {
"default"
};
-#else
- /* Example SH_CSS_SP_DBG_NR_OF_TRACES==4 */
- /* Adjust this to your trace case */
- static char const *trace_name[SH_CSS_SP_DBG_NR_OF_TRACES] = {
- "copy", "preview/video", "capture", "acceleration"
- };
-#endif
/* Remember host_index_last because we only want to print new entries */
static int host_index_last[SH_CSS_SP_DBG_NR_OF_TRACES] = { 0 };
@@ -1704,7 +1661,7 @@ void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state
}
#endif
-#if defined(HAS_INPUT_FORMATTER_VERSION_2) && !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
static void debug_print_rx_mipi_port_state(mipi_port_state_t *state)
{
int i;
@@ -1901,17 +1858,15 @@ static void debug_print_rx_state(receiver_state_t *state)
}
#endif
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
void ia_css_debug_dump_rx_state(void)
{
-#if defined(HAS_INPUT_FORMATTER_VERSION_2) && !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
receiver_state_t state;
receiver_get_state(RX0_ID, &state);
debug_print_rx_state(&state);
#endif
}
-#endif
void ia_css_debug_dump_sp_sw_debug_info(void)
{
@@ -1926,7 +1881,7 @@ void ia_css_debug_dump_sp_sw_debug_info(void)
return;
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
static void debug_print_isys_capture_unit_state(capture_unit_state_t *state)
{
assert(state);
@@ -2163,31 +2118,20 @@ static void debug_print_isys_state(input_system_state_t *state)
}
/* end of control unit state */
}
-
-void ia_css_debug_dump_isys_state(void)
-{
- input_system_state_t state;
-
- input_system_get_state(INPUT_SYSTEM0_ID, &state);
- debug_print_isys_state(&state);
-
- return;
-}
#endif
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+
void ia_css_debug_dump_isys_state(void)
{
- /* Android compilation fails if made a local variable
- stack size on android is limited to 2k and this structure
- is around 3.5K, in place of static malloc can be done but
- if this call is made too often it will lead to fragment memory
- versus a fixed allocation */
static input_system_state_t state;
input_system_get_state(INPUT_SYSTEM0_ID, &state);
+
+#ifndef ISP2401
+ debug_print_isys_state(&state);
+#else
input_system_dump_state(INPUT_SYSTEM0_ID, &state);
-}
#endif
+}
void ia_css_debug_dump_debug_info(const char *context)
{
@@ -2195,10 +2139,10 @@ void ia_css_debug_dump_debug_info(const char *context)
context = "No Context provided";
ia_css_debug_dtrace(2, "CSS Debug Info dump [Context = %s]\n", context);
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
- ia_css_debug_dump_rx_state();
-#endif
-#if !defined(HAS_NO_INPUT_FORMATTER) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ if (!IS_ISP2401)
+ ia_css_debug_dump_rx_state();
+
+#ifndef ISP2401
ia_css_debug_dump_if_state();
#endif
ia_css_debug_dump_isp_state();
@@ -2215,12 +2159,12 @@ void ia_css_debug_dump_debug_info(const char *context)
ia_css_debug_dump_dma_isp_fifo_state();
ia_css_debug_dump_dma_sp_fifo_state();
ia_css_debug_dump_dma_state();
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
- ia_css_debug_dump_isys_state();
- {
+ if (!IS_ISP2401) {
struct irq_controller_state state;
+ ia_css_debug_dump_isys_state();
+
irq_controller_get_state(IRQ2_ID, &state);
ia_css_debug_dtrace(2, "\t%-32s:\n",
@@ -2241,14 +2185,12 @@ void ia_css_debug_dump_debug_info(const char *context)
ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
"irq_level_not_pulse",
state.irq_level_not_pulse);
+ } else {
+ ia_css_debug_dump_isys_state();
}
-#endif
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
- ia_css_debug_dump_isys_state();
-#endif
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+
ia_css_debug_tagger_state();
-#endif
+
return;
}
@@ -2278,7 +2220,6 @@ void ia_css_debug_wake_up_sp(void)
sp_ctrl_setbit(SP0_ID, SP_SC_REG, SP_START_BIT);
}
-#if !defined(IS_ISP_2500_SYSTEM)
#define FIND_DMEM_PARAMS_TYPE(stream, kernel, type) \
(struct HRTCAT(HRTCAT(sh_css_isp_, type), _params) *) \
findf_dmem_params(stream, offsetof(struct ia_css_memory_offsets, dmem.kernel))
@@ -2310,16 +2251,11 @@ findf_dmem_params(struct ia_css_stream *stream, short idx)
}
return NULL;
}
-#endif
void ia_css_debug_dump_isp_params(struct ia_css_stream *stream,
unsigned int enable)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "ISP PARAMETERS:\n");
-#if defined(IS_ISP_2500_SYSTEM)
- (void)enable;
- (void)stream;
-#else
assert(stream);
if ((enable & IA_CSS_DEBUG_DUMP_FPN)
@@ -2383,7 +2319,6 @@ void ia_css_debug_dump_isp_params(struct ia_css_stream *stream,
|| (enable & IA_CSS_DEBUG_DUMP_ALL)) {
ia_css_ce_dump(FIND_DMEM_PARAMS(stream, ce), IA_CSS_DEBUG_VERBOSE);
}
-#endif
}
void sh_css_dump_sp_raw_copy_linecount(bool reduced)
@@ -2449,12 +2384,14 @@ void ia_css_debug_dump_isp_binary(void)
void ia_css_debug_dump_perf_counters(void)
{
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
const struct ia_css_fw_info *fw;
int i;
unsigned int HIVE_ADDR_ia_css_isys_sp_error_cnt;
- s32 ia_css_sp_input_system_error_cnt[N_MIPI_PORT_ID +
- 1]; /* 3 Capture Units and 1 Acquire Unit. */
+ /* N_MIPI_PORT_ID + 1: 3 Capture Units and 1 Acquire Unit. */
+ s32 ia_css_sp_input_system_error_cnt[N_MIPI_PORT_ID + 1];
+
+ if (IS_ISP2401)
+ return;
ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "Input System Error Counters:\n");
@@ -2473,49 +2410,9 @@ void ia_css_debug_dump_perf_counters(void)
ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\tport[%d] = %d\n",
i, ia_css_sp_input_system_error_cnt[i]);
}
-#endif
}
/*
-
-void sh_css_init_ddr_debug_queue(void)
-{
- ia_css_ptr ddr_debug_queue_addr =
- hmm_alloc(sizeof(debug_data_ddr_t), HMM_BO_PRIVATE, 0, NULL, 0);
- const struct ia_css_fw_info *fw;
- unsigned int HIVE_ADDR_debug_buffer_ddr_address;
-
- fw = &sh_css_sp_fw;
- HIVE_ADDR_debug_buffer_ddr_address =
- fw->info.sp.debug_buffer_ddr_address;
-
- (void)HIVE_ADDR_debug_buffer_ddr_address;
-
- debug_buffer_ddr_init(ddr_debug_queue_addr);
-
- sp_dmem_store_uint32(SP0_ID,
- (unsigned int)sp_address_of(debug_buffer_ddr_address),
- (uint32_t)(ddr_debug_queue_addr));
-}
-
-void sh_css_load_ddr_debug_queue(void)
-{
- debug_synch_queue_ddr();
-}
-
-void ia_css_debug_dump_ddr_debug_queue(void)
-{
- int i;
- sh_css_load_ddr_debug_queue();
- for (i = 0; i < DEBUG_BUF_SIZE; i++) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
- "ddr_debug_queue[%d] = 0x%x\n",
- i, debug_data_ptr->buf[i]);
- }
-}
-*/
-
-/*
* @brief Initialize the debug mode.
* Refer to "ia_css_debug.h" for more details.
*/
@@ -2557,8 +2454,7 @@ ia_css_debug_mode_enable_dma_channel(int dma_id,
return rc;
}
-static
-void dtrace_dot(const char *fmt, ...)
+static void __printf(1, 2) dtrace_dot(const char *fmt, ...)
{
va_list ap;
@@ -3260,22 +3156,16 @@ ia_css_debug_dump_stream_config(
byte 2-3: data
*/
#if TRACE_ENABLE_SP0 || TRACE_ENABLE_SP1 || TRACE_ENABLE_ISP
-#ifndef ISP2401
-static void debug_dump_one_trace(TRACE_CORE_ID proc_id)
-#else
static void debug_dump_one_trace(enum TRACE_CORE_ID proc_id)
-#endif
{
#if defined(HAS_TRACER_V2)
u32 start_addr;
u32 start_addr_data;
u32 item_size;
-#ifndef ISP2401
u32 tmp;
-#else
u8 tid_val;
enum TRACE_DUMP_FORMAT dump_format;
-#endif
+
int i, j, max_trace_points, point_num, limit = -1;
/* using a static buffer here as the driver has issues allocating memory */
static u32 trace_read_buf[TRACE_BUFF_SIZE] = {0};
@@ -3479,7 +3369,6 @@ void ia_css_debug_dump_trace(void)
#endif
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
/* Tagger state dump function. The tagger is only available when the CSS
* contains an input system (2400 or 2401). */
void ia_css_debug_tagger_state(void)
@@ -3505,7 +3394,6 @@ void ia_css_debug_tagger_state(void)
i, tbuf_frames[i].exp_id, tbuf_frames[i].mark, tbuf_frames[i].lock);
}
}
-#endif /* defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) */
/* ISP2401 */
void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps)
diff --git a/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c b/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
index 89cded6b6e2b..6d9f47629fbc 100644
--- a/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
+++ b/drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
@@ -16,7 +16,7 @@
#include "system_global.h"
#include <linux/kernel.h>
-#ifdef USE_INPUT_SYSTEM_VERSION_2
+#ifndef ISP2401
#include "ia_css_ifmtr.h"
#include <math_support.h>
diff --git a/drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c b/drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c
index 38712530f566..2d06e124007e 100644
--- a/drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c
+++ b/drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c
@@ -32,24 +32,18 @@
#include "event_fifo.h"
#define __INLINE_SP__
-#if !defined(HAS_NO_INPUT_SYSTEM)
#include "input_system.h" /* MIPI_PREDICTOR_NONE,... */
-#endif
#include "assert_support.h"
/* System independent */
#include "sh_css_internal.h"
-#if !defined(HAS_NO_INPUT_SYSTEM)
#include "ia_css_isys.h"
-#endif
#define HBLANK_CYCLES (187)
#define MARKER_CYCLES (6)
-#if !defined(HAS_NO_INPUT_SYSTEM)
#include <hive_isp_css_streaming_to_mipi_types_hrt.h>
-#endif
/* The data type is used to send special cases:
* yuv420: odd lines (1, 3 etc) are twice as wide as even
@@ -67,9 +61,7 @@ enum inputfifo_mipi_data_type {
inputfifo_mipi_data_type_rgb,
};
-#if !defined(HAS_NO_INPUT_SYSTEM)
static unsigned int inputfifo_curr_ch_id, inputfifo_curr_fmt_type;
-#endif
struct inputfifo_instance {
unsigned int ch_id;
enum atomisp_input_format input_format;
@@ -81,7 +73,6 @@ struct inputfifo_instance {
enum inputfifo_mipi_data_type type;
};
-#if !defined(HAS_NO_INPUT_SYSTEM)
/*
* Maintain a basic streaming to Mipi administration with ch_id as index
* ch_id maps on the "Mipi virtual channel ID" and can have value 0..3
@@ -536,4 +527,3 @@ void ia_css_inputfifo_end_frame(
s2mi->streaming = false;
return;
}
-#endif /* #if !defined(HAS_NO_INPUT_SYSTEM) */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h
index f975429b8705..711a321e9a3f 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h
@@ -24,22 +24,20 @@
#include <system_global.h>
#include "ia_css_isys_comm.h"
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/**
* Virtual Input System. (Input System 2401)
*/
-typedef input_system_cfg_t ia_css_isys_descr_t;
+typedef isp2401_input_system_cfg_t ia_css_isys_descr_t;
/* end of Virtual Input System */
#endif
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
-input_system_error_t ia_css_isys_init(void);
+input_system_err_t ia_css_isys_init(void);
void ia_css_isys_uninit(void);
enum mipi_port_id ia_css_isys_port_to_mipi_port(
enum mipi_port_id api_port);
-#endif
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
/**
* @brief Register one (virtual) stream. This is used to track when all
@@ -73,12 +71,12 @@ int ia_css_isys_csi_rx_unregister_stream(
int ia_css_isys_convert_compressed_format(
struct ia_css_csi2_compression *comp,
- struct input_system_cfg_s *cfg);
+ struct isp2401_input_system_cfg_s *cfg);
unsigned int ia_css_csi2_calculate_input_system_alignment(
enum atomisp_input_format fmt_type);
#endif
-#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if !defined(ISP2401)
/* CSS Receiver */
void ia_css_isys_rx_configure(
const rx_cfg_t *config,
@@ -95,7 +93,7 @@ void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
unsigned int irq_infos);
unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits);
-#endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
+#endif /* #if !defined(ISP2401) */
/* @brief Translate format and compression to format type.
*
@@ -113,7 +111,7 @@ int ia_css_isys_convert_stream_format_to_mipi_format(
mipi_predictor_t compression,
unsigned int *fmt_type);
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/**
* Virtual Input System. (Input System 2401)
*/
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h
index 6f1a86c81d7c..d80ef42c7a64 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h
@@ -19,7 +19,7 @@
#include <type_support.h>
#include <input_system.h>
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
#include <platform_support.h> /* inline */
#include <input_system_global.h>
#include <ia_css_stream_public.h> /* IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH */
@@ -50,5 +50,5 @@ static inline uint32_t ia_css_isys_generate_stream_id(
return sp_thread_id * IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH + stream_id;
}
-#endif /* USE_INPUT_SYSTEM_VERSION_2401*/
+#endif /* ISP2401*/
#endif /*_IA_CSS_ISYS_COMM_H */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c
index 5a44d8f6c196..3fc9fed1e516 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c
@@ -15,7 +15,7 @@
#include "system_global.h"
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
#include "assert_support.h"
#include "platform_support.h"
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c
index 68baec78b1c4..261c6460e970 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c
@@ -15,7 +15,7 @@
#include "system_global.h"
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
#include "assert_support.h"
#include "platform_support.h"
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c
index de442f1fa6ba..d0a43c44963c 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c
@@ -15,17 +15,16 @@
#include "input_system.h"
-#ifdef HAS_INPUT_SYSTEM_VERSION_2
#include "ia_css_isys.h"
#include "platform_support.h"
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
-#include "isys_dma.h" /* isys2401_dma_set_max_burst_size() */
+#ifdef ISP2401
+#include "isys_dma_public.h" /* isys2401_dma_set_max_burst_size() */
#include "isys_irq.h"
#endif
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
-input_system_error_t ia_css_isys_init(void)
+#if !defined(ISP2401)
+input_system_err_t ia_css_isys_init(void)
{
backend_channel_cfg_t backend_ch0;
backend_channel_cfg_t backend_ch1;
@@ -33,7 +32,7 @@ input_system_error_t ia_css_isys_init(void)
target_cfg2400_t targetC;
u32 acq_mem_region_size = 24;
u32 acq_nof_mem_regions = 2;
- input_system_error_t error = INPUT_SYSTEM_ERR_NO_ERROR;
+ input_system_err_t error = INPUT_SYSTEM_ERR_NO_ERROR;
memset(&backend_ch0, 0, sizeof(backend_channel_cfg_t));
memset(&backend_ch1, 0, sizeof(backend_channel_cfg_t));
@@ -87,8 +86,8 @@ input_system_error_t ia_css_isys_init(void)
return error;
}
-#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
-input_system_error_t ia_css_isys_init(void)
+#elif defined(ISP2401)
+input_system_err_t ia_css_isys_init(void)
{
ia_css_isys_csi_rx_lut_rmgr_init();
ia_css_isys_ibuf_rmgr_init();
@@ -107,11 +106,11 @@ input_system_error_t ia_css_isys_init(void)
}
#endif
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
void ia_css_isys_uninit(void)
{
}
-#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+#elif defined(ISP2401)
void ia_css_isys_uninit(void)
{
ia_css_isys_csi_rx_lut_rmgr_uninit();
@@ -121,4 +120,3 @@ void ia_css_isys_uninit(void)
}
#endif
-#endif
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c
index bc4a2ff3c0fc..fb0cb183f701 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c
@@ -15,7 +15,7 @@
#include "system_global.h"
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
#include "assert_support.h"
#include "platform_support.h"
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
index 4f0dcdfa13be..b4813cd50daa 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
@@ -20,7 +20,7 @@
#include "ia_css_irq.h"
#include "sh_css_internal.h"
-#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if !defined(ISP2401)
void ia_css_isys_rx_enable_all_interrupts(enum mipi_port_id port)
{
hrt_data bits = receiver_port_reg_load(RX0_ID,
@@ -28,9 +28,7 @@ void ia_css_isys_rx_enable_all_interrupts(enum mipi_port_id port)
_HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
bits |= (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT) |
-#if defined(HAS_RX_VERSION_2)
(1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT) |
-#endif
(1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT) |
@@ -117,10 +115,8 @@ unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits)
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT))
infos |= IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN;
-#if defined(HAS_RX_VERSION_2)
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT))
infos |= IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT;
-#endif
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT))
@@ -176,10 +172,8 @@ void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
/* MW: Why do we remap the receiver bitmap */
if (irq_infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT;
-#if defined(HAS_RX_VERSION_2)
if (irq_infos & IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT;
-#endif
if (irq_infos & IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE)
@@ -215,7 +209,7 @@ void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
return;
}
-#endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
+#endif /* #if !defined(ISP2401) */
int ia_css_isys_convert_stream_format_to_mipi_format(
enum atomisp_input_format input_format,
@@ -317,7 +311,7 @@ int ia_css_isys_convert_stream_format_to_mipi_format(
case ATOMISP_INPUT_FORMAT_EMBEDDED:
*fmt_type = MIPI_FORMAT_EMBEDDED;
break;
-#ifndef USE_INPUT_SYSTEM_VERSION_2401
+#ifndef ISP2401
case ATOMISP_INPUT_FORMAT_RAW_16:
/* This is not specified by Arasan, so we use
* 17 for now.
@@ -362,7 +356,7 @@ int ia_css_isys_convert_stream_format_to_mipi_format(
return 0;
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
static mipi_predictor_t sh_css_csi2_compression_type_2_mipi_predictor(
enum ia_css_csi2_compression_type type)
{
@@ -382,7 +376,7 @@ static mipi_predictor_t sh_css_csi2_compression_type_2_mipi_predictor(
int ia_css_isys_convert_compressed_format(
struct ia_css_csi2_compression *comp,
- struct input_system_cfg_s *cfg)
+ struct isp2401_input_system_cfg_s *cfg)
{
int err = 0;
@@ -480,11 +474,10 @@ unsigned int ia_css_csi2_calculate_input_system_alignment(
#endif
-#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if !defined(ISP2401)
void ia_css_isys_rx_configure(const rx_cfg_t *config,
const enum ia_css_input_mode input_mode)
{
-#if defined(HAS_RX_VERSION_2)
bool port_enabled[N_MIPI_PORT_ID];
bool any_port_enabled = false;
enum mipi_port_id port;
@@ -580,9 +573,6 @@ void ia_css_isys_rx_configure(const rx_cfg_t *config,
* INPUT_SYSTEM_CSI_RECEIVER_SELECT_BACKENG, 1);
*/
input_system_reg_store(INPUT_SYSTEM0_ID, 0x207, 1);
-#else
-#error "rx.c: RX version must be one of {RX_VERSION_2}"
-#endif
return;
}
@@ -598,4 +588,4 @@ void ia_css_isys_rx_disable(void)
}
return;
}
-#endif /* if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
+#endif /* if !defined(ISP2401) */
diff --git a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
index b3c6831cb9e3..317ea30ede7a 100644
--- a/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
+++ b/drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
@@ -17,7 +17,7 @@
#include "system_global.h"
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
#include "ia_css_isys.h"
#include "ia_css_debug.h"
@@ -33,7 +33,7 @@
*************************************************/
static bool create_input_system_channel(
- input_system_cfg_t *cfg,
+ isp2401_input_system_cfg_t *cfg,
bool metadata,
input_system_channel_t *channel);
@@ -41,7 +41,7 @@ static void destroy_input_system_channel(
input_system_channel_t *channel);
static bool create_input_system_input_port(
- input_system_cfg_t *cfg,
+ isp2401_input_system_cfg_t *cfg,
input_system_input_port_t *input_port);
static void destroy_input_system_input_port(
@@ -50,14 +50,14 @@ static void destroy_input_system_input_port(
static bool calculate_input_system_channel_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
- input_system_cfg_t *isys_cfg,
+ isp2401_input_system_cfg_t *isys_cfg,
input_system_channel_cfg_t *channel_cfg,
bool metadata);
static bool calculate_input_system_input_port_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
- input_system_cfg_t *isys_cfg,
+ isp2401_input_system_cfg_t *isys_cfg,
input_system_input_port_cfg_t *input_port_cfg);
static bool acquire_sid(
@@ -74,10 +74,10 @@ static bool acquire_ib_buffer(
s32 lines_per_frame,
s32 align_in_bytes,
bool online,
- ib_buffer_t *buf);
+ isp2401_ib_buffer_t *buf);
static void release_ib_buffer(
- ib_buffer_t *buf);
+ isp2401_ib_buffer_t *buf);
static bool acquire_dma_channel(
isys2401_dma_ID_t dma_id,
@@ -100,43 +100,43 @@ static void release_be_lut_entry(
static bool calculate_tpg_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
- input_system_cfg_t *isys_cfg,
+ isp2401_input_system_cfg_t *isys_cfg,
pixelgen_tpg_cfg_t *cfg);
static bool calculate_prbs_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
- input_system_cfg_t *isys_cfg,
+ isp2401_input_system_cfg_t *isys_cfg,
pixelgen_prbs_cfg_t *cfg);
static bool calculate_fe_cfg(
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
csi_rx_frontend_cfg_t *cfg);
static bool calculate_be_cfg(
const input_system_input_port_t *input_port,
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
bool metadata,
csi_rx_backend_cfg_t *cfg);
static bool calculate_stream2mmio_cfg(
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
bool metadata,
stream2mmio_cfg_t *cfg);
static bool calculate_ibuf_ctrl_cfg(
const input_system_channel_t *channel,
const input_system_input_port_t *input_port,
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
ibuf_ctrl_cfg_t *cfg);
static bool calculate_isys2401_dma_cfg(
const input_system_channel_t *channel,
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
isys2401_dma_cfg_t *cfg);
static bool calculate_isys2401_dma_port_cfg(
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
bool raw_packed,
bool metadata,
isys2401_dma_port_cfg_t *cfg);
@@ -287,7 +287,7 @@ ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
*
**************************************************/
static bool create_input_system_channel(
- input_system_cfg_t *cfg,
+ isp2401_input_system_cfg_t *cfg,
bool metadata,
input_system_channel_t *me)
{
@@ -361,7 +361,7 @@ static void destroy_input_system_channel(
}
static bool create_input_system_input_port(
- input_system_cfg_t *cfg,
+ isp2401_input_system_cfg_t *cfg,
input_system_input_port_t *me)
{
csi_mipi_packet_type_t packet_type;
@@ -457,7 +457,7 @@ static void destroy_input_system_input_port(
static bool calculate_input_system_channel_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
- input_system_cfg_t *isys_cfg,
+ isp2401_input_system_cfg_t *isys_cfg,
input_system_channel_cfg_t *channel_cfg,
bool metadata)
{
@@ -508,7 +508,7 @@ static bool calculate_input_system_channel_cfg(
static bool calculate_input_system_input_port_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
- input_system_cfg_t *isys_cfg,
+ isp2401_input_system_cfg_t *isys_cfg,
input_system_input_port_cfg_t *input_port_cfg)
{
bool rc;
@@ -595,7 +595,7 @@ static bool acquire_ib_buffer(
s32 lines_per_frame,
s32 align_in_bytes,
bool online,
- ib_buffer_t *buf)
+ isp2401_ib_buffer_t *buf)
{
buf->stride = calculate_stride(bits_per_pixel, pixels_per_line, false,
align_in_bytes);
@@ -610,7 +610,7 @@ static bool acquire_ib_buffer(
}
static void release_ib_buffer(
- ib_buffer_t *buf)
+ isp2401_ib_buffer_t *buf)
{
ia_css_isys_ibuf_rmgr_release(&buf->start_addr);
}
@@ -648,7 +648,7 @@ static void release_be_lut_entry(
static bool calculate_tpg_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
- input_system_cfg_t *isys_cfg,
+ isp2401_input_system_cfg_t *isys_cfg,
pixelgen_tpg_cfg_t *cfg)
{
memcpy(cfg, &isys_cfg->tpg_port_attr, sizeof(pixelgen_tpg_cfg_t));
@@ -659,7 +659,7 @@ static bool calculate_tpg_cfg(
static bool calculate_prbs_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
- input_system_cfg_t *isys_cfg,
+ isp2401_input_system_cfg_t *isys_cfg,
pixelgen_prbs_cfg_t *cfg)
{
memcpy(cfg, &isys_cfg->prbs_port_attr, sizeof(pixelgen_prbs_cfg_t));
@@ -668,7 +668,7 @@ static bool calculate_prbs_cfg(
}
static bool calculate_fe_cfg(
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
csi_rx_frontend_cfg_t *cfg)
{
cfg->active_lanes = isys_cfg->csi_port_attr.active_lanes;
@@ -677,7 +677,7 @@ static bool calculate_fe_cfg(
static bool calculate_be_cfg(
const input_system_input_port_t *input_port,
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
bool metadata,
csi_rx_backend_cfg_t *cfg)
{
@@ -707,7 +707,7 @@ static bool calculate_be_cfg(
}
static bool calculate_stream2mmio_cfg(
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
bool metadata,
stream2mmio_cfg_t *cfg
)
@@ -725,7 +725,7 @@ static bool calculate_stream2mmio_cfg(
static bool calculate_ibuf_ctrl_cfg(
const input_system_channel_t *channel,
const input_system_input_port_t *input_port,
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
ibuf_ctrl_cfg_t *cfg)
{
const s32 bits_per_byte = 8;
@@ -807,7 +807,7 @@ static bool calculate_ibuf_ctrl_cfg(
static bool calculate_isys2401_dma_cfg(
const input_system_channel_t *channel,
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
isys2401_dma_cfg_t *cfg)
{
cfg->channel = channel->dma_channel;
@@ -827,7 +827,7 @@ static bool calculate_isys2401_dma_cfg(
/* See also: ia_css_dma_configure_from_info() */
static bool calculate_isys2401_dma_port_cfg(
- const input_system_cfg_t *isys_cfg,
+ const isp2401_input_system_cfg_t *isys_cfg,
bool raw_packed,
bool metadata,
isys2401_dma_port_cfg_t *cfg)
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
index 18a7d18e197e..de2c526a58ae 100644
--- a/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
@@ -243,7 +243,7 @@ bool ia_css_pipeline_uses_params(struct ia_css_pipeline *pipeline);
*/
bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val);
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
/**
* @brief Get the pipeline io status
*
diff --git a/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
index 4b8e85bc2122..d03957d1ecf4 100644
--- a/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
+++ b/drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
@@ -140,9 +140,7 @@ void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
false, false, false, true, SH_CSS_BDS_FACTOR_1_00,
SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD,
IA_CSS_INPUT_MODE_MEMORY, NULL, NULL,
-#if !defined(HAS_NO_INPUT_SYSTEM)
(enum mipi_port_id)0,
-#endif
NULL, NULL);
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
@@ -457,7 +455,7 @@ bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipeline)
return sp_group.pipe[thread_id].num_stages == 0;
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void)
{
return(&sh_css_sp_group.pipe_io_status);
diff --git a/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c
index fdca743c4ab7..424e7a15a389 100644
--- a/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c
+++ b/drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c
@@ -44,7 +44,7 @@ int ia_css_queue_load(
the value as zero. This causes division by 0
exception as the size is used in a modular
division operation. */
- return EDOM;
+ return -EDOM;
}
}
diff --git a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
index 1ea74296fc8d..b4f53be18e7f 100644
--- a/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
+++ b/drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
@@ -241,7 +241,6 @@ void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool,
struct ia_css_rmgr_vbuf_handle **handle)
{
u32 i;
- bool succes = false;
assert(pool);
assert(pool->recycle);
@@ -255,8 +254,7 @@ void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool,
pool->handles[i] = NULL;
/* dont release, we are returning it...
ia_css_rmgr_refcount_release_vbuf(handle); */
- succes = true;
- break;
+ return;
}
}
}
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index a68cbb4995f0..ddee04c8248d 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -27,9 +27,7 @@
#include "sh_css_internal.h"
#include "sh_css_mipi.h"
#include "sh_css_sp.h" /* sh_css_sp_group */
-#if !defined(HAS_NO_INPUT_SYSTEM)
#include "ia_css_isys.h"
-#endif
#include "ia_css_frame.h"
#include "sh_css_defs.h"
#include "sh_css_firmware.h"
@@ -51,7 +49,7 @@
#include "ia_css_pipe_util.h"
#include "ia_css_pipe_binarydesc.h"
#include "ia_css_pipe_stagedesc.h"
-#ifdef USE_INPUT_SYSTEM_VERSION_2
+#ifndef ISP2401
#include "ia_css_isys.h"
#endif
@@ -59,12 +57,10 @@
#include "assert_support.h"
#include "math_support.h"
#include "sw_event_global.h" /* Event IDs.*/
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
#include "ia_css_ifmtr.h"
#endif
-#if !defined(HAS_NO_INPUT_SYSTEM)
#include "input_system.h"
-#endif
#include "mmu_device.h" /* mmu_set_page_table_base_index(), ... */
#include "ia_css_mmu_private.h" /* sh_css_mmu_set_page_table_base_index() */
#include "gdc_device.h" /* HRT_GDC_N */
@@ -115,7 +111,7 @@ static int thread_alive;
struct sh_css my_css;
-int (*sh_css_printf)(const char *fmt, va_list args) = NULL;
+int __printf(1, 0) (*sh_css_printf)(const char *fmt, va_list args) = NULL;
/* modes of work: stream_create and stream_destroy will update the save/restore data
only when in working mode, not suspend/resume
@@ -397,7 +393,7 @@ static int set_config_on_frame_enqueue(struct ia_css_frame_info
*info, struct frame_data_wrapper *frame);
#endif
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
static unsigned int get_crop_lines_for_bayer_order(const struct
ia_css_stream_config *config);
static unsigned int get_crop_columns_for_bayer_order(const struct
@@ -533,7 +529,7 @@ ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream)
#define GP_ISEL_TPG_MODE 0x90058
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
static int
sh_css_config_input_network(struct ia_css_stream *stream) {
unsigned int fmt_type;
@@ -594,7 +590,7 @@ sh_css_config_input_network(struct ia_css_stream *stream) {
"sh_css_config_input_network() leave:\n");
return 0;
}
-#elif !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+#elif defined(ISP2401)
static unsigned int csi2_protocol_calculate_max_subpixels_per_line(
enum atomisp_input_format format,
unsigned int pixels_per_line)
@@ -894,7 +890,7 @@ static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
stream_cfg->source.port.num_lanes;
isys_stream_descr->csi_port_attr.fmt_type = fmt_type;
isys_stream_descr->csi_port_attr.ch_id = stream_cfg->channel_id;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
isys_stream_descr->online = stream_cfg->online;
#endif
err |= ia_css_isys_convert_compressed_format(
@@ -919,7 +915,7 @@ static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
stream_cfg->metadata_config.resolution.width;
isys_stream_descr->metadata.lines_per_frame =
stream_cfg->metadata_config.resolution.height;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/* For new input system, number of str2mmio requests must be even.
* So we round up number of metadata lines to be even. */
if (isys_stream_descr->metadata.lines_per_frame > 0)
@@ -1367,20 +1363,8 @@ static void
start_binary(struct ia_css_pipe *pipe,
struct ia_css_binary *binary)
{
- struct ia_css_stream *stream;
-
assert(pipe);
/* Acceleration uses firmware, the binary thus can be NULL */
- /* assert(binary != NULL); */
-
- (void)binary;
-
-#if !defined(HAS_NO_INPUT_SYSTEM)
- stream = pipe->stream;
-#else
- (void)pipe;
- (void)stream;
-#endif
if (binary)
sh_css_metrics_start_binary(&binary->metrics);
@@ -1395,11 +1379,11 @@ start_binary(struct ia_css_pipe *pipe,
sh_binary_running = true;
#endif
-#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
- if (stream->reconfigure_css_rx) {
+#if !defined(ISP2401)
+ if (pipe->stream->reconfigure_css_rx) {
ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
pipe->stream->config.mode);
- stream->reconfigure_css_rx = false;
+ pipe->stream->reconfigure_css_rx = false;
}
#endif
}
@@ -1415,7 +1399,7 @@ start_copy_on_sp(struct ia_css_pipe *pipe,
if ((!pipe) || (!pipe->stream))
return -EINVAL;
-#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if !defined(ISP2401)
if (pipe->stream->reconfigure_css_rx)
ia_css_isys_rx_disable();
#endif
@@ -1424,7 +1408,7 @@ start_copy_on_sp(struct ia_css_pipe *pipe,
return -EINVAL;
sh_css_sp_start_binary_copy(ia_css_pipe_get_pipe_num(pipe), out_frame, pipe->stream->config.pixels_per_clock == 2);
-#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if !defined(ISP2401)
if (pipe->stream->reconfigure_css_rx)
{
ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
@@ -1461,9 +1445,6 @@ static void start_pipe(
const struct ia_css_coordinate *coord = NULL;
const struct ia_css_isp_parameters *params = NULL;
-#if defined(HAS_NO_INPUT_SYSTEM)
- (void)input_mode;
-#endif
IA_CSS_ENTER_PRIVATE("me = %p, copy_ovrd = %d, input_mode = %d",
me, copy_ovrd, input_mode);
@@ -1487,11 +1468,9 @@ static void start_pipe(
input_mode,
&me->stream->config.metadata_config,
&me->stream->info.metadata_info
-#if !defined(HAS_NO_INPUT_SYSTEM)
, (input_mode == IA_CSS_INPUT_MODE_MEMORY) ?
(enum mipi_port_id)0 :
me->stream->config.source.port.port,
-#endif
coord,
params);
@@ -1529,7 +1508,7 @@ sh_css_invalidate_shading_tables(struct ia_css_stream *stream)
static void
enable_interrupts(enum ia_css_irq_type irq_type)
{
-#ifdef USE_INPUT_SYSTEM_VERSION_2
+#ifndef ISP2401
enum mipi_port_id port;
#endif
bool enable_pulse = irq_type != IA_CSS_IRQ_TYPE_EDGE;
@@ -1551,15 +1530,8 @@ enable_interrupts(enum ia_css_irq_type irq_type)
cnd_virq_enable_channel(
(enum virq_id)(IRQ_SW_CHANNEL1_ID + IRQ_SW_CHANNEL_OFFSET),
true);
-#if !defined(HAS_IRQ_MAP_VERSION_2)
- /* IRQ_SW_CHANNEL2_ID does not exist on 240x systems */
- cnd_virq_enable_channel(
- (enum virq_id)(IRQ_SW_CHANNEL2_ID + IRQ_SW_CHANNEL_OFFSET),
- true);
- virq_clear_all();
-#endif
-#ifdef USE_INPUT_SYSTEM_VERSION_2
+#ifndef ISP2401
for (port = 0; port < N_MIPI_PORT_ID; port++)
ia_css_isys_rx_enable_all_interrupts(port);
#endif
@@ -1832,15 +1804,10 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
sh_css_init_buffer_queues();
*/
-#if defined(HAS_INPUT_SYSTEM_VERSION_2) && defined(HAS_INPUT_SYSTEM_VERSION_2401)
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
- gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 0);
-#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 1);
#endif
-#endif
-#if !defined(HAS_NO_INPUT_SYSTEM)
if (!IS_ISP2401)
dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
@@ -1851,7 +1818,6 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
if (ia_css_isys_init() != INPUT_SYSTEM_ERR_NO_ERROR)
err = -EINVAL;
-#endif
sh_css_params_map_and_store_default_gdc_lut();
@@ -2103,7 +2069,7 @@ create_host_pipeline(struct ia_css_stream *stream) {
}
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
/* old isys: need to allocate_mipi_frames() even in IA_CSS_PIPE_MODE_COPY */
if (pipe_id != IA_CSS_PIPE_ID_ACC)
{
@@ -2111,7 +2077,7 @@ create_host_pipeline(struct ia_css_stream *stream) {
if (err)
goto ERR;
}
-#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+#elif defined(ISP2401)
if ((pipe_id != IA_CSS_PIPE_ID_ACC) &&
(main_pipe->config.mode != IA_CSS_PIPE_MODE_COPY))
{
@@ -2525,7 +2491,7 @@ ia_css_uninit(void)
ia_css_rmgr_uninit();
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
/* needed for reprogramming the inputformatter after power cycle of css */
ifmtr_set_if_blocking_mode_reset = true;
#endif
@@ -2535,21 +2501,16 @@ ia_css_uninit(void)
}
ia_css_spctrl_unload_fw(SP0_ID);
sh_css_sp_set_sp_running(false);
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
/* check and free any remaining mipi frames */
free_mipi_frames(NULL);
-#endif
sh_css_sp_reset_global_vars();
-#if !defined(HAS_NO_INPUT_SYSTEM)
ia_css_isys_uninit();
-#endif
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() leave: return_void\n");
}
-#if defined(HAS_IRQ_MAP_VERSION_2)
int ia_css_irq_translate(
unsigned int *irq_infos)
{
@@ -2581,7 +2542,6 @@ int ia_css_irq_translate(
break;
case virq_isp:
break;
-#if !defined(HAS_NO_INPUT_SYSTEM)
case virq_isys_sof:
infos |= IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF;
break;
@@ -2591,8 +2551,7 @@ int ia_css_irq_translate(
case virq_isys_csi:
infos |= IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR;
break;
-#endif
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
case virq_ifmt0_id:
infos |= IA_CSS_IRQ_INFO_IF_ERROR;
break;
@@ -2631,7 +2590,7 @@ int ia_css_irq_enable(
IA_CSS_ENTER("info=%d, enable=%d", info, enable);
switch (info) {
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
case IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF:
irq = virq_isys_sof;
break;
@@ -2672,9 +2631,6 @@ int ia_css_irq_enable(
return 0;
}
-#else
-#error "sh_css.c: IRQ MAP must be one of { IRQ_MAP_VERSION_2 }"
-#endif
static unsigned int
sh_css_get_sw_interrupt_value(unsigned int irq)
@@ -2736,7 +2692,6 @@ alloc_continuous_frames(
bool continuous;
unsigned int i, idx;
unsigned int num_frames;
- struct ia_css_pipe *capture_pipe = NULL;
IA_CSS_ENTER_PRIVATE("pipe = %p, init_time = %d", pipe, init_time);
@@ -2774,7 +2729,7 @@ alloc_continuous_frames(
return -EINVAL;
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
/* For CSI2+, the continuous frame will hold the full input frame */
ref_info.res.width = pipe->stream->config.input_config.input_res.width;
ref_info.res.height = pipe->stream->config.input_config.input_res.height;
@@ -2798,17 +2753,12 @@ alloc_continuous_frames(
}
/* Write format back to binary */
- if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
- {
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
pipe->pipe_settings.preview.preview_binary.in_frame_info.format =
ref_info.format;
- capture_pipe = pipe->pipe_settings.preview.capture_pipe;
- } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
- {
+ } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) {
pipe->pipe_settings.video.video_binary.in_frame_info.format = ref_info.format;
- capture_pipe = pipe->pipe_settings.video.capture_pipe;
- } else
- {
+ } else {
/* should not happen */
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
@@ -2865,10 +2815,12 @@ load_preview_binaries(struct ia_css_pipe *pipe) {
struct ia_css_binary_descr preview_descr;
bool online;
int err = 0;
- bool continuous, need_vf_pp = false;
+ bool need_vf_pp = false;
bool need_isp_copy_binary = false;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
bool sensor = false;
+#else
+ bool continuous;
#endif
/* preview only have 1 output pin now */
struct ia_css_frame_info *pipe_out_info = &pipe->output_info[0];
@@ -2880,9 +2832,10 @@ load_preview_binaries(struct ia_css_pipe *pipe) {
assert(pipe->mode == IA_CSS_PIPE_ID_PREVIEW);
online = pipe->stream->config.online;
- continuous = pipe->stream->config.continuous;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+#else
+ continuous = pipe->stream->config.continuous;
#endif
if (mycs->preview_binary.info)
@@ -3002,7 +2955,7 @@ load_preview_binaries(struct ia_css_pipe *pipe) {
return err;
}
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/* When the input system is 2401, only the Direct Sensor Mode
* Offline Preview uses the ISP copy binary.
*/
@@ -3343,7 +3296,7 @@ init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
return err;
}
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
static unsigned int
get_crop_lines_for_bayer_order(
const struct ia_css_stream_config *config)
@@ -3500,7 +3453,7 @@ init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
in_frame->info.format = format;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
if (format == IA_CSS_FRAME_FORMAT_RAW)
in_frame->info.format = (pipe->stream->config.pack_raw_pixels) ?
IA_CSS_FRAME_FORMAT_RAW_PACKED : IA_CSS_FRAME_FORMAT_RAW;
@@ -3517,7 +3470,7 @@ init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_INPUT_FRAME, thread_id, &queue_id);
in_frame->dynamic_queue_id = queue_id;
in_frame->buf_type = IA_CSS_BUFFER_TYPE_INPUT_FRAME;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
ia_css_get_crop_offsets(pipe, &in_frame->info);
#endif
err = ia_css_frame_init_planes(in_frame);
@@ -3568,7 +3521,6 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
bool need_copy = false;
bool need_vf_pp = false;
bool need_yuv_pp = false;
- unsigned int num_output_pins;
bool need_in_frameinfo_memory = false;
unsigned int i, num_yuv_scaler;
@@ -3588,7 +3540,7 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
me->dvs_frame_delay = pipe->dvs_frame_delay;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/* When the input system is 2401, always enable 'in_frameinfo_memory'
* except for the following: online or continuous
*/
@@ -3625,7 +3577,6 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
copy_binary = &pipe->pipe_settings.video.copy_binary;
video_binary = &pipe->pipe_settings.video.video_binary;
vf_pp_binary = &pipe->pipe_settings.video.vf_pp_binary;
- num_output_pins = video_binary->info->num_output_pins;
yuv_scaler_binary = pipe->pipe_settings.video.yuv_scaler_binary;
num_yuv_scaler = pipe->pipe_settings.video.num_yuv_scaler;
@@ -3646,7 +3597,7 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
goto ERR;
in_frame = me->stages->args.out_frame[0];
} else if (pipe->stream->config.continuous) {
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/* When continuous is enabled, configure in_frame with the
* last pipe, which is the copy pipe.
*/
@@ -3733,7 +3684,7 @@ static int create_host_video_pipeline(struct ia_css_pipe *pipe)
struct ia_css_frame *tmp_out_frame = NULL;
for (i = 0; i < num_yuv_scaler; i++) {
- if (is_output_stage[i] == true) {
+ if (is_output_stage[i]) {
tmp_out_frame = out_frame;
} else {
tmp_out_frame = NULL;
@@ -3818,7 +3769,7 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe) {
struct ia_css_frame *out_frame;
struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
bool need_in_frameinfo_memory = false;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
bool sensor = false;
bool buffered_sensor = false;
bool online = false;
@@ -3837,7 +3788,7 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe) {
me = &pipe->pipeline;
ia_css_pipeline_clean(me);
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/* When the input system is 2401, always enable 'in_frameinfo_memory'
* except for the following:
* - Direct Sensor Mode Online Preview
@@ -3889,14 +3840,8 @@ create_host_preview_pipeline(struct ia_css_pipe *pipe) {
if (err)
goto ERR;
in_frame = me->stages->args.out_frame[0];
-#ifndef ISP2401
- } else
- {
-#else
- } else if (pipe->stream->config.continuous)
- {
-#endif
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ } else if (pipe->stream->config.continuous) {
+#ifdef ISP2401
/* When continuous is enabled, configure in_frame with the
* last pipe, which is the copy pipe.
*/
@@ -3976,8 +3921,6 @@ static void send_raw_frames(struct ia_css_pipe *pipe)
static int
preview_start(struct ia_css_pipe *pipe) {
- struct ia_css_pipeline *me;
- struct ia_css_binary *copy_binary, *preview_binary, *vf_pp_binary = NULL;
int err = 0;
struct ia_css_pipe *copy_pipe, *capture_pipe;
struct ia_css_pipe *acc_pipe;
@@ -3993,29 +3936,20 @@ preview_start(struct ia_css_pipe *pipe) {
return -EINVAL;
}
- me = &pipe->pipeline;
-
preview_pipe_input_mode = pipe->stream->config.mode;
copy_pipe = pipe->pipe_settings.preview.copy_pipe;
capture_pipe = pipe->pipe_settings.preview.capture_pipe;
acc_pipe = pipe->pipe_settings.preview.acc_pipe;
- copy_binary = &pipe->pipe_settings.preview.copy_binary;
- preview_binary = &pipe->pipe_settings.preview.preview_binary;
- if (pipe->pipe_settings.preview.vf_pp_binary.info)
- vf_pp_binary = &pipe->pipe_settings.preview.vf_pp_binary;
-
sh_css_metrics_start_frame();
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
/* multi stream video needs mipi buffers */
err = send_mipi_frames(pipe);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
-#endif
send_raw_frames(pipe);
{
@@ -4050,9 +3984,7 @@ preview_start(struct ia_css_pipe *pipe) {
pipe->stream->config.mode,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
-#if !defined(HAS_NO_INPUT_SYSTEM)
pipe->stream->config.source.port.port,
-#endif
coord,
params);
@@ -4076,9 +4008,7 @@ preview_start(struct ia_css_pipe *pipe) {
IA_CSS_INPUT_MODE_MEMORY,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
-#if !defined(HAS_NO_INPUT_SYSTEM)
(enum mipi_port_id)0,
-#endif
coord,
params);
}
@@ -4097,9 +4027,7 @@ preview_start(struct ia_css_pipe *pipe) {
IA_CSS_INPUT_MODE_MEMORY,
NULL,
NULL,
-#if !defined(HAS_NO_INPUT_SYSTEM)
(enum mipi_port_id)0,
-#endif
coord,
params);
}
@@ -4496,8 +4424,8 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
case IA_CSS_BUFFER_TYPE_INPUT_FRAME:
case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME:
case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
- if ((pipe) && (pipe->stop_requested == true)) {
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ if (pipe && pipe->stop_requested) {
+#if !defined(ISP2401)
/* free mipi frames only for old input system
* for 2401 it is done in ia_css_stream_destroy call
*/
@@ -4529,7 +4457,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
pipe->num_invalid_frames--;
if (frame->info.format == IA_CSS_FRAME_FORMAT_BINARY_8) {
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
frame->planes.binary.size = frame->data_bytes;
#else
frame->planes.binary.size =
@@ -4857,7 +4785,7 @@ sh_css_pipe_start(struct ia_css_stream *stream) {
pipe_id = pipe->mode;
- if (stream->started == true)
+ if (stream->started)
{
IA_CSS_WARNING("Cannot start stream that is already started");
IA_CSS_LEAVE_ERR(err);
@@ -5142,24 +5070,23 @@ sh_css_pipes_stop(struct ia_css_stream *stream)
stream->pipes[i]->pipeline.pipe_id);
err = ia_css_pipeline_request_stop(&stream->pipes[i]->pipeline);
- /*
- * Exit this loop if "ia_css_pipeline_request_stop()"
- * returns the error code.
- *
- * The error code would be generated in the following
- * two cases:
- * (1) The Scalar Processor has already been stopped.
- * (2) The "Host->SP" event queue is full.
- *
- * As the convention of using CSS API 2.0/2.1, such CSS
- * error code would be propogated from the CSS-internal
- * API returned value to the CSS API returned value. Then
- * the CSS driver should capture these error code and
- * handle it in the driver exception handling mechanism.
- */
- if (err) {
- goto ERR;
- }
+ /*
+ * Exit this loop if "ia_css_pipeline_request_stop()"
+ * returns the error code.
+ *
+ * The error code would be generated in the following
+ * two cases:
+ * (1) The Scalar Processor has already been stopped.
+ * (2) The "Host->SP" event queue is full.
+ *
+ * As the convention of using CSS API 2.0/2.1, such CSS
+ * error code would be propogated from the CSS-internal
+ * API returned value to the CSS API returned value. Then
+ * the CSS driver should capture these error code and
+ * handle it in the driver exception handling mechanism.
+ */
+ if (err)
+ goto ERR;
}
/*
@@ -5286,7 +5213,7 @@ RET:
return rval;
}
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
unsigned int
sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx)
{
@@ -5413,13 +5340,7 @@ sh_css_pipe_get_grid_info(struct ia_css_pipe *pipe,
info->isp_in_height = binary->internal_frame_info.res.height;
}
-#if defined(HAS_VAMEM_VERSION_2)
info->vamem_type = IA_CSS_VAMEM_TYPE_2;
-#elif defined(HAS_VAMEM_VERSION_1)
- info->vamem_type = IA_CSS_VAMEM_TYPE_1;
-#else
-#error "Unknown VAMEM version"
-#endif
ERR :
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -5677,7 +5598,7 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
pipe->num_invalid_frames, pipe->dvs_frame_delay);
/* pqiao TODO: temp hack for PO, should be removed after offline YUVPP is enabled */
-#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if !defined(ISP2401)
/* Copy */
if (!online && !continuous) {
/* TODO: what exactly needs doing, prepend the copy binary to
@@ -5804,7 +5725,6 @@ unload_video_binaries(struct ia_css_pipe *pipe) {
static int video_start(struct ia_css_pipe *pipe)
{
- struct ia_css_binary *copy_binary;
int err = 0;
struct ia_css_pipe *copy_pipe, *capture_pipe;
enum sh_css_pipe_config_override copy_ovrd;
@@ -5824,17 +5744,13 @@ static int video_start(struct ia_css_pipe *pipe)
copy_pipe = pipe->pipe_settings.video.copy_pipe;
capture_pipe = pipe->pipe_settings.video.capture_pipe;
- copy_binary = &pipe->pipe_settings.video.copy_binary;
-
sh_css_metrics_start_frame();
/* multi stream video needs mipi buffers */
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
err = send_mipi_frames(pipe);
if (err)
return err;
-#endif
send_raw_frames(pipe);
{
@@ -5867,9 +5783,7 @@ static int video_start(struct ia_css_pipe *pipe)
pipe->stream->config.mode,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
-#if !defined(HAS_NO_INPUT_SYSTEM)
pipe->stream->config.source.port.port,
-#endif
coord,
params);
@@ -5892,9 +5806,7 @@ static int video_start(struct ia_css_pipe *pipe)
IA_CSS_INPUT_MODE_MEMORY,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
-#if !defined(HAS_NO_INPUT_SYSTEM)
(enum mipi_port_id)0,
-#endif
coord,
params);
}
@@ -6010,7 +5922,7 @@ static bool need_capture_pp(
if (IS_ISP2401) {
/* ldc and capture_pp are not supported in the same pipeline */
- if (need_capt_ldc(pipe) == true)
+ if (need_capt_ldc(pipe))
return false;
}
@@ -6073,13 +5985,13 @@ static int load_primary_binaries(
struct ia_css_pipe *pipe)
{
bool online = false;
- bool memory = false;
- bool continuous = false;
bool need_pp = false;
bool need_isp_copy_binary = false;
bool need_ldc = false;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
bool sensor = false;
+#else
+ bool memory, continuous;
#endif
struct ia_css_frame_info prim_in_info,
prim_out_info,
@@ -6100,10 +6012,11 @@ static int load_primary_binaries(
pipe->mode == IA_CSS_PIPE_ID_COPY);
online = pipe->stream->config.online;
+#ifdef ISP2401
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+#else
memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
continuous = pipe->stream->config.continuous;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
- sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
#endif
mycs = &pipe->pipe_settings.capture;
@@ -6230,8 +6143,8 @@ static int load_primary_binaries(
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
- need_pp = 0;
- need_ldc = 0;
+ need_pp = false;
+ need_ldc = false;
}
/* we build up the pipeline starting at the end */
@@ -6320,7 +6233,7 @@ static int load_primary_binaries(
if (err)
return err;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/* When the input system is 2401, only the Direct Sensor Mode
* Offline Capture uses the ISP copy binary.
*/
@@ -6534,7 +6447,7 @@ static int load_advanced_binaries(
}
/* Copy */
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/* For CSI2+, only the direct sensor mode/online requires ISP copy */
need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
#endif
@@ -6681,7 +6594,7 @@ static int load_low_light_binaries(
}
/* Copy */
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/* For CSI2+, only the direct sensor mode/online requires ISP copy */
need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
#endif
@@ -6754,7 +6667,7 @@ static int load_capture_binaries(
switch (pipe->config.default_capture_config.mode) {
case IA_CSS_CAPTURE_MODE_RAW:
err = load_copy_binaries(pipe);
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
if (!err)
pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online;
#endif
@@ -7246,7 +7159,7 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe) {
next_binary = NULL;
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
/*
* NOTES
* - Why does the "yuvpp" pipe needs "isp_copy_binary" (i.e. ISP Copy) when
@@ -7266,9 +7179,9 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe) {
*/
need_isp_copy_binary =
(pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_YUV422_8);
-#else /* !USE_INPUT_SYSTEM_VERSION_2401 */
+#else /* !ISP2401 */
need_isp_copy_binary = true;
-#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* ISP2401 */
if (need_isp_copy_binary)
{
@@ -7390,7 +7303,6 @@ unload_yuvpp_binaries(struct ia_css_pipe *pipe) {
static int yuvpp_start(struct ia_css_pipe *pipe)
{
- struct ia_css_binary *copy_binary;
int err = 0;
enum sh_css_pipe_config_override copy_ovrd;
enum ia_css_input_mode yuvpp_pipe_input_mode;
@@ -7403,19 +7315,15 @@ static int yuvpp_start(struct ia_css_pipe *pipe)
yuvpp_pipe_input_mode = pipe->stream->config.mode;
- copy_binary = &pipe->pipe_settings.yuvpp.copy_binary;
-
sh_css_metrics_start_frame();
/* multi stream video needs mipi buffers */
-#if !defined(HAS_NO_INPUT_SYSTEM) && (defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401))
err = send_mipi_frames(pipe);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
-#endif
{
unsigned int thread_id;
@@ -7522,7 +7430,7 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
*vf_pp_binary,
*yuv_scaler_binary;
bool need_scaler = false;
- unsigned int num_stage, num_vf_pp_stage, num_output_stage;
+ unsigned int num_stage, num_output_stage;
unsigned int i, j;
struct ia_css_frame *in_frame = NULL;
@@ -7531,7 +7439,7 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
struct ia_css_frame *vf_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
struct ia_css_pipeline_stage_desc stage_desc;
bool need_in_frameinfo_memory = false;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
bool sensor = false;
bool buffered_sensor = false;
bool online = false;
@@ -7553,10 +7461,9 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
}
ia_css_pipe_util_create_output_frames(bin_out_frame);
num_stage = pipe->pipe_settings.yuvpp.num_yuv_scaler;
- num_vf_pp_stage = pipe->pipe_settings.yuvpp.num_vf_pp;
num_output_stage = pipe->pipe_settings.yuvpp.num_output;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/* When the input system is 2401, always enable 'in_frameinfo_memory'
* except for the following:
* - Direct Sensor Mode Online Capture
@@ -7663,7 +7570,7 @@ create_host_yuvpp_pipeline(struct ia_css_pipe *pipe) {
{
struct ia_css_frame *in_frame_local = NULL;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/* After isp copy is enabled in_frame needs to be passed. */
if (!online)
in_frame_local = in_frame;
@@ -7880,7 +7787,7 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
struct ia_css_frame *vf_frame;
struct ia_css_pipeline_stage_desc stage_desc;
bool need_in_frameinfo_memory = false;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
bool sensor = false;
bool buffered_sensor = false;
bool online = false;
@@ -7902,7 +7809,7 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
ia_css_pipeline_clean(me);
ia_css_pipe_util_create_output_frames(out_frames);
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
/* When the input system is 2401, always enable 'in_frameinfo_memory'
* except for the following:
* - Direct Sensor Mode Online Capture
@@ -7989,7 +7896,7 @@ create_host_regular_capture_pipeline(struct ia_css_pipe *pipe) {
{
if (raw) {
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
if (!continuous) {
ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
out_frames, in_frame, NULL);
@@ -8256,14 +8163,14 @@ static int capture_start(
}
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
/* old isys: need to send_mipi_frames() in all pipe modes */
err = send_mipi_frames(pipe);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
-#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+#elif defined(ISP2401)
if (pipe->config.mode != IA_CSS_PIPE_MODE_COPY) {
err = send_mipi_frames(pipe);
if (err) {
@@ -8282,7 +8189,7 @@ static int capture_start(
}
start_pipe(pipe, copy_ovrd, pipe->stream->config.mode);
-#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if !defined(ISP2401)
/*
* old isys: for IA_CSS_PIPE_MODE_COPY pipe, isys rx has to be configured,
* which is currently done in start_binary(); but COPY pipe contains no binary,
@@ -8332,7 +8239,6 @@ sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
return 0;
}
-#if !defined(HAS_NO_INPUT_SYSTEM)
void
ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
const unsigned short *data,
@@ -8387,7 +8293,6 @@ ia_css_stream_end_input_frame(const struct ia_css_stream *stream) {
ia_css_inputfifo_end_frame(stream->config.channel_id);
}
-#endif
static void
append_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware) {
@@ -8714,9 +8619,7 @@ sh_css_init_host_sp_control_vars(void) {
unsigned int o = offsetof(struct host_sp_communication, host2sp_command)
/ sizeof(int);
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
unsigned int i;
-#endif
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_init_host_sp_control_vars() enter: void\n");
@@ -8762,12 +8665,10 @@ sh_css_init_host_sp_control_vars(void) {
#endif
store_sp_array_uint(host_sp_com, o, host2sp_cmd_ready);
-#if !defined(HAS_NO_INPUT_SYSTEM)
for (i = 0; i < N_CSI_PORTS; i++) {
sh_css_update_host2sp_num_mipi_frames
(my_css.num_mipi_frames[i]);
}
-#endif
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_init_host_sp_control_vars() leave: return_void\n");
@@ -8839,47 +8740,27 @@ ia_css_acc_pipe_create(struct ia_css_pipe *pipe) {
return err;
}
-int
-ia_css_pipe_create(const struct ia_css_pipe_config *config,
- struct ia_css_pipe **pipe) {
-#ifndef ISP2401
- if (!config)
-#else
+int ia_css_pipe_create(const struct ia_css_pipe_config *config,
+ struct ia_css_pipe **pipe)
+{
int err = 0;
IA_CSS_ENTER_PRIVATE("config = %p, pipe = %p", config, pipe);
- if (!config)
- {
+ if (!config || !pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
-#endif
return -EINVAL;
-#ifndef ISP2401
- if (!pipe)
-#else
-}
-
-if (!pipe)
-{
- IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
-#endif
- return -EINVAL;
-#ifndef ISP2401
- return ia_css_pipe_create_extra(config, NULL, pipe);
-#else
-}
+ }
-err = ia_css_pipe_create_extra(config, NULL, pipe);
+ err = ia_css_pipe_create_extra(config, NULL, pipe);
-if (err == 0)
-{
- IA_CSS_LOG("pipe created successfully = %p", *pipe);
-}
+ if (err == 0) {
+ IA_CSS_LOG("pipe created successfully = %p", *pipe);
+ }
-IA_CSS_LEAVE_ERR_PRIVATE(err);
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
-return err;
-#endif
+ return err;
}
int
@@ -9135,7 +9016,7 @@ ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
return err;
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
/* Configuration of INPUT_SYSTEM_VERSION_2401 is done on SP */
static int
ia_css_stream_configure_rx(struct ia_css_stream *stream) {
@@ -9325,7 +9206,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
int err = -EINVAL;
struct ia_css_metadata_info md_info;
struct ia_css_resolution effective_res;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
bool aspect_ratio_crop_enabled = false;
#endif
@@ -9342,7 +9223,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
return err;
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
/* We don't support metadata for JPEG stream, since they both use str2mem */
if (stream_config->input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8 &&
stream_config->metadata_config.resolution.height > 0)
@@ -9353,7 +9234,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
}
#endif
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
if (stream_config->online && stream_config->pack_raw_pixels)
{
IA_CSS_LOG("online and pack raw is invalid on input system 2401");
@@ -9363,12 +9244,11 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
}
#endif
-#if !defined(HAS_NO_INPUT_SYSTEM)
ia_css_debug_pipe_graph_dump_stream_config(stream_config);
/* check if mipi size specified */
if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
if (!stream_config->online)
#endif
{
@@ -9408,7 +9288,6 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
return err;
}
}
-#endif
/* Currently we only supported metadata up to a certain size. */
err = metadata_info_init(&stream_config->metadata_config, &md_info);
@@ -9449,13 +9328,13 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* take over stream config */
curr_stream->config = *stream_config;
-#if defined(USE_INPUT_SYSTEM_VERSION_2401) && defined(CSI2P_DISABLE_ISYS2401_ONLINE_MODE)
+#if defined(ISP2401)
if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR &&
stream_config->online)
curr_stream->config.online = false;
#endif
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
if (curr_stream->config.online)
{
curr_stream->config.source.port.num_lanes =
@@ -9479,12 +9358,12 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
{
case IA_CSS_INPUT_MODE_SENSOR:
case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
ia_css_stream_configure_rx(curr_stream);
#endif
break;
case IA_CSS_INPUT_MODE_TPG:
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
IA_CSS_LOG("tpg_configuration: x_mask=%d, y_mask=%d, x_delta=%d, y_delta=%d, xy_mask=%d",
curr_stream->config.source.tpg.x_mask,
curr_stream->config.source.tpg.y_mask,
@@ -9501,7 +9380,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
#endif
break;
case IA_CSS_INPUT_MODE_PRBS:
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
IA_CSS_LOG("mode prbs");
sh_css_sp_configure_prbs(curr_stream->config.source.prbs.seed);
#endif
@@ -9514,14 +9393,14 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
IA_CSS_LOG("mode sensor/default");
}
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
err = aspect_ratio_crop_init(curr_stream,
pipes,
&aspect_ratio_crop_enabled);
if (err)
{
IA_CSS_LEAVE_ERR(err);
- return err;
+ goto ERR;
}
#endif
for (i = 0; i < num_pipes; i++)
@@ -9537,7 +9416,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
if (effective_res.height == 0 || effective_res.width == 0) {
effective_res = curr_pipe->stream->config.input_config.effective_res;
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
/* The aspect ratio cropping is currently only
* supported on the new input system. */
if (aspect_ratio_crop_check(aspect_ratio_crop_enabled, curr_pipe)) {
@@ -9625,10 +9504,10 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
IA_CSS_PIPE_MODE_VIDEO, false);
acc_pipe = find_pipe(pipes, num_pipes,
IA_CSS_PIPE_MODE_ACC, false);
- if (acc_pipe && num_pipes == 2 && curr_stream->cont_capt == true)
+ if (acc_pipe && num_pipes == 2 && curr_stream->cont_capt)
curr_stream->cont_capt =
false; /* preview + QoS case will not need cont_capt switch */
- if (curr_stream->cont_capt == true) {
+ if (curr_stream->cont_capt) {
capture_pipe = find_pipe(pipes, num_pipes,
IA_CSS_PIPE_MODE_CAPTURE, false);
if (!capture_pipe) {
@@ -9650,7 +9529,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
preview_pipe->pipe_settings.preview.copy_pipe = copy_pipe;
copy_pipe->stream = curr_stream;
}
- if (preview_pipe && (curr_stream->cont_capt == true)) {
+ if (preview_pipe && curr_stream->cont_capt) {
preview_pipe->pipe_settings.preview.capture_pipe = capture_pipe;
}
if (video_pipe && !video_pipe->pipe_settings.video.copy_pipe) {
@@ -9661,7 +9540,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
video_pipe->pipe_settings.video.copy_pipe = copy_pipe;
copy_pipe->stream = curr_stream;
}
- if (video_pipe && (curr_stream->cont_capt == true)) {
+ if (video_pipe && curr_stream->cont_capt) {
video_pipe->pipe_settings.video.capture_pipe = capture_pipe;
}
if (preview_pipe && acc_pipe) {
@@ -9811,7 +9690,7 @@ ia_css_stream_destroy(struct ia_css_stream *stream) {
if ((stream->last_pipe) &&
ia_css_pipeline_is_mapped(stream->last_pipe->pipe_num))
{
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
bool free_mpi;
for (i = 0; i < stream->num_pipes; i++) {
@@ -10003,15 +9882,13 @@ ia_css_stream_start(struct ia_css_stream *stream) {
return err;
}
-#if !defined(HAS_NO_INPUT_SYSTEM)
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
if ((stream->config.mode == IA_CSS_INPUT_MODE_SENSOR) ||
(stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR))
stream_register_with_csi_rx(stream);
#endif
-#endif
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
/* Initialize mipi size checks */
if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
{
@@ -10025,14 +9902,12 @@ ia_css_stream_start(struct ia_css_stream *stream) {
}
#endif
-#if !defined(HAS_NO_INPUT_SYSTEM)
if (stream->config.mode != IA_CSS_INPUT_MODE_MEMORY)
{
err = sh_css_config_input_network(stream);
if (err)
return err;
}
-#endif /* !HAS_NO_INPUT_SYSTEM */
err = sh_css_pipe_start(stream);
IA_CSS_LEAVE_ERR(err);
@@ -10049,7 +9924,7 @@ ia_css_stream_stop(struct ia_css_stream *stream) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop: stopping %d\n",
stream->last_pipe->mode);
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
/* De-initialize mipi size checks */
if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
{
@@ -10515,19 +10390,17 @@ ia_css_update_continuous_frames(struct ia_css_stream *stream) {
void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
{
unsigned int thread_id;
- enum ia_css_pipe_id pipe_id;
unsigned int pipe_num;
bool need_input_queue;
IA_CSS_ENTER("");
assert(pipe);
- pipe_id = pipe->mode;
pipe_num = pipe->pipe_num;
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
-#if defined(HAS_NO_INPUT_SYSTEM) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
need_input_queue = true;
#else
need_input_queue = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
@@ -10856,7 +10729,7 @@ ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe,
return err;
}
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
static int
aspect_ratio_crop_init(struct ia_css_stream *curr_stream,
struct ia_css_pipe *pipes[],
diff --git a/drivers/staging/media/atomisp/pci/sh_css_defs.h b/drivers/staging/media/atomisp/pci/sh_css_defs.h
index 92d80213860f..30a84a587b2a 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_defs.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_defs.h
@@ -397,10 +397,6 @@ RGB[0,8191],coef[-8192,8191] -> RGB[0,8191]
#define SH_CSS_MAX_STAGES 8 /* primary_stage[1-6], capture_pp, vf_pp */
/* For CSI2+ input system, it requires extra paddinga from vmem */
-#ifdef CONFIG_CSI2_PLUS
-#define _ISP_EXTRA_PADDING_VECS 2
-#else
#define _ISP_EXTRA_PADDING_VECS 0
-#endif /* CONFIG_CSI2_PLUS */
#endif /* _SH_CSS_DEFS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
index d4ab15b6d1ac..db25e39bea88 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_firmware.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_firmware.c
@@ -51,9 +51,11 @@ struct fw_param {
static struct firmware_header *firmware_header;
-/* The string STR is a place holder
+/*
+ * The string STR is a place holder
* which will be replaced with the actual RELEASE_VERSION
- * during package generation. Please do not modify */
+ * during package generation. Please do not modify
+ */
static const char *isp2400_release_version = STR(irci_stable_candrpv_0415_20150521_0458);
static const char *isp2401_release_version = STR(irci_ecr - master_20150911_0724);
@@ -78,7 +80,8 @@ char *sh_css_get_fw_version(void)
/* Setup sp/sp1 binary */
static int
setup_binary(struct ia_css_fw_info *fw, const char *fw_data,
- struct ia_css_fw_info *sh_css_fw, unsigned int binary_id) {
+ struct ia_css_fw_info *sh_css_fw, unsigned int binary_id)
+{
const char *blob_data;
if ((!fw) || (!fw_data))
@@ -102,7 +105,8 @@ setup_binary(struct ia_css_fw_info *fw, const char *fw_data,
int
sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi,
struct ia_css_blob_descr *bd,
- unsigned int index) {
+ unsigned int index)
+{
const char *name;
const unsigned char *blob;
@@ -110,14 +114,16 @@ sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi,
return -EINVAL;
/* Special case: only one binary in fw */
- if (!bi) bi = (const struct ia_css_fw_info *)fw;
+ if (!bi)
+ bi = (const struct ia_css_fw_info *)fw;
name = fw + bi->blob.prog_name_offset;
blob = (const unsigned char *)fw + bi->blob.offset;
/* sanity check */
- if (bi->blob.size != bi->blob.text_size + bi->blob.icache_size + bi->blob.data_size + bi->blob.padding_size)
- {
+ if (bi->blob.size !=
+ bi->blob.text_size + bi->blob.icache_size +
+ bi->blob.data_size + bi->blob.padding_size) {
/* sanity check, note the padding bytes added for section to DDR alignment */
return -EINVAL;
}
@@ -128,21 +134,18 @@ sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi,
bd->blob = blob;
bd->header = *bi;
- if (bi->type == ia_css_isp_firmware || bi->type == ia_css_sp_firmware)
- {
+ if (bi->type == ia_css_isp_firmware || bi->type == ia_css_sp_firmware) {
char *namebuffer;
namebuffer = kstrdup(name, GFP_KERNEL);
if (!namebuffer)
return -ENOMEM;
bd->name = fw_minibuffer[index].name = namebuffer;
- } else
- {
+ } else {
bd->name = name;
}
- if (bi->type == ia_css_isp_firmware)
- {
+ if (bi->type == ia_css_isp_firmware) {
size_t paramstruct_size = sizeof(struct ia_css_memory_offsets);
size_t configstruct_size = sizeof(struct ia_css_config_memory_offsets);
size_t statestruct_size = sizeof(struct ia_css_state_memory_offsets);
@@ -204,7 +207,7 @@ sh_css_check_firmware_version(struct device *dev, const char *fw_data)
}
/* For now, let's just accept a wrong version, even if wrong */
- return 0;
+ return false;
}
static const char * const fw_type_name[] = {
@@ -223,7 +226,8 @@ static const char * const fw_acc_type_name[] = {
int
sh_css_load_firmware(struct device *dev, const char *fw_data,
- unsigned int fw_size) {
+ unsigned int fw_size)
+{
unsigned int i;
struct ia_css_fw_info *binaries;
struct sh_css_fw_bi_file_h *file_header;
@@ -238,7 +242,8 @@ sh_css_load_firmware(struct device *dev, const char *fw_data,
firmware_header = (struct firmware_header *)fw_data;
file_header = &firmware_header->file_header;
binaries = &firmware_header->binary_header;
- strscpy(FW_rel_ver_name, file_header->version, min(sizeof(FW_rel_ver_name), sizeof(file_header->version)));
+ strscpy(FW_rel_ver_name, file_header->version,
+ min(sizeof(FW_rel_ver_name), sizeof(file_header->version)));
ret = sh_css_check_firmware_version(dev, fw_data);
if (ret) {
IA_CSS_ERROR("CSS code version (%s) and firmware version (%s) mismatch!",
@@ -257,8 +262,7 @@ sh_css_load_firmware(struct device *dev, const char *fw_data,
sh_css_num_binaries = file_header->binary_nr;
/* Only allocate memory for ISP blob info */
- if (sh_css_num_binaries > NUM_OF_SPS)
- {
+ if (sh_css_num_binaries > NUM_OF_SPS) {
sh_css_blob_info = kmalloc(
(sh_css_num_binaries - NUM_OF_SPS) *
sizeof(*sh_css_blob_info), GFP_KERNEL);
@@ -273,13 +277,13 @@ sh_css_load_firmware(struct device *dev, const char *fw_data,
if (!fw_minibuffer)
return -ENOMEM;
- for (i = 0; i < sh_css_num_binaries; i++)
- {
+ for (i = 0; i < sh_css_num_binaries; i++) {
struct ia_css_fw_info *bi = &binaries[i];
- /* note: the var below is made static as it is quite large;
- if it is not static it ends up on the stack which could
- cause issues for drivers
- */
+ /*
+ * note: the var below is made static as it is quite large;
+ * if it is not static it ends up on the stack which could
+ * cause issues for drivers
+ */
static struct ia_css_blob_descr bd;
int err;
@@ -333,7 +337,11 @@ sh_css_load_firmware(struct device *dev, const char *fw_data,
return err;
} else {
- /* All subsequent binaries (including bootloaders) (i>NUM_OF_SPS) are ISP firmware */
+ /*
+ * All subsequent binaries
+ * (including bootloaders) (i>NUM_OF_SPS)
+ * are ISP firmware
+ */
if (i < NUM_OF_SPS)
return -EINVAL;
@@ -374,8 +382,10 @@ ia_css_ptr
sh_css_load_blob(const unsigned char *blob, unsigned int size)
{
ia_css_ptr target_addr = hmm_alloc(size, HMM_BO_PRIVATE, 0, NULL, 0);
- /* this will allocate memory aligned to a DDR word boundary which
- is required for the CSS DMA to read the instructions. */
+ /*
+ * this will allocate memory aligned to a DDR word boundary which
+ * is required for the CSS DMA to read the instructions.
+ */
assert(blob);
if (target_addr)
diff --git a/drivers/staging/media/atomisp/pci/sh_css_hrt.c b/drivers/staging/media/atomisp/pci/sh_css_hrt.c
index 06b502151af9..879c85311038 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_hrt.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_hrt.c
@@ -64,11 +64,7 @@ bool sh_css_hrt_system_is_idle(void)
int sh_css_hrt_sp_wait(void)
{
-#if defined(HAS_IRQ_MAP_VERSION_2)
irq_sw_channel_id_t irq_id = IRQ_SW_CHANNEL0_ID;
-#else
- irq_sw_channel_id_t irq_id = IRQ_SW_CHANNEL2_ID;
-#endif
/*
* Wait till SP is idle or till there is a SW2 interrupt
* The SW2 interrupt will be used when frameloop runs on SP
diff --git a/drivers/staging/media/atomisp/pci/sh_css_internal.h b/drivers/staging/media/atomisp/pci/sh_css_internal.h
index 5c25a25dce92..3c669ec79b68 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_internal.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_internal.h
@@ -22,7 +22,7 @@
#include <platform_support.h>
#include <stdarg.h>
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
#include "input_formatter.h"
#endif
#include "input_system.h"
@@ -86,11 +86,9 @@
#define SH_CSS_MAX_IF_CONFIGS 3 /* Must match with IA_CSS_NR_OF_CONFIGS (not defined yet).*/
#define SH_CSS_IF_CONFIG_NOT_NEEDED 0xFF
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
#define SH_CSS_ENABLE_METADATA
-#endif
-#if defined(SH_CSS_ENABLE_METADATA) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(SH_CSS_ENABLE_METADATA) && !defined(ISP2401)
#define SH_CSS_ENABLE_METADATA_THREAD
#endif
@@ -320,15 +318,9 @@ struct sh_css_sp_debug_state {
#elif SP_DEBUG == SP_DEBUG_TRACE
-#if 1
/* Example of just one global trace */
#define SH_CSS_SP_DBG_NR_OF_TRACES (1)
#define SH_CSS_SP_DBG_TRACE_DEPTH (40)
-#else
-/* E.g. if you like separate traces for 4 threads */
-#define SH_CSS_SP_DBG_NR_OF_TRACES (4)
-#define SH_CSS_SP_DBG_TRACE_DEPTH (10)
-#endif
#define SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS (13)
@@ -371,7 +363,7 @@ struct sh_css_sp_debug_command {
u32 dma_sw_reg;
};
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
/* SP input formatter configuration.*/
struct sh_css_sp_input_formatter_set {
u32 stream_format;
@@ -391,7 +383,7 @@ struct sh_css_sp_config {
frames are locked when their EOF event is successfully sent to the
host (true) or when they are passed to the preview/video pipe
(false). */
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
struct {
u8 a_changed;
u8 b_changed;
@@ -400,7 +392,7 @@ struct sh_css_sp_config {
set[SH_CSS_MAX_IF_CONFIGS]; /* CSI-2 port is used as index. */
} input_formatter;
#endif
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
sync_generator_cfg_t sync_gen;
tpg_cfg_t tpg;
prbs_cfg_t prbs;
@@ -423,7 +415,7 @@ enum sh_css_stage_type {
#define SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS_MASK \
((SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << SH_CSS_MAX_SP_THREADS) - 1)
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
struct sh_css_sp_pipeline_terminal {
union {
/* Input System 2401 */
@@ -679,7 +671,7 @@ struct sh_css_sp_stage {
struct sh_css_sp_group {
struct sh_css_sp_config config;
struct sh_css_sp_pipeline pipe[SH_CSS_MAX_SP_THREADS];
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
struct sh_css_sp_pipeline_io pipe_io[SH_CSS_MAX_SP_THREADS];
struct sh_css_sp_pipeline_io_status pipe_io_status;
#endif
@@ -828,11 +820,9 @@ struct host_sp_communication {
ia_css_ptr host2sp_offline_frames[NUM_CONTINUOUS_FRAMES];
ia_css_ptr host2sp_offline_metadata[NUM_CONTINUOUS_FRAMES];
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
ia_css_ptr host2sp_mipi_frames[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
ia_css_ptr host2sp_mipi_metadata[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
u32 host2sp_num_mipi_frames[N_CSI_PORTS];
-#endif
u32 host2sp_cont_avail_num_raw_frames;
u32 host2sp_cont_extra_num_raw_frames;
u32 host2sp_cont_target_num_raw_frames;
@@ -840,20 +830,12 @@ struct host_sp_communication {
};
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
#define SIZE_OF_HOST_SP_COMMUNICATION_STRUCT \
(sizeof(uint32_t) + \
(NUM_CONTINUOUS_FRAMES * SIZE_OF_HRT_VADDRESS * 2) + \
(N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM * SIZE_OF_HRT_VADDRESS * 2) + \
((3 + N_CSI_PORTS) * sizeof(uint32_t)) + \
(NR_OF_PIPELINES * SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT))
-#else
-#define SIZE_OF_HOST_SP_COMMUNICATION_STRUCT \
- (sizeof(uint32_t) + \
- (NUM_CONTINUOUS_FRAMES * SIZE_OF_HRT_VADDRESS * 2) + \
- (3 * sizeof(uint32_t)) + \
- (NR_OF_PIPELINES * SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT))
-#endif
struct host_sp_queues {
/*
@@ -925,10 +907,9 @@ struct host_sp_queues {
#define SIZE_OF_HOST_SP_QUEUES_STRUCT \
(SIZE_OF_QUEUES_ELEMS + SIZE_OF_QUEUES_DESC)
-extern int (*sh_css_printf)(const char *fmt, va_list args);
+extern int __printf(1, 0) (*sh_css_printf)(const char *fmt, va_list args);
-static inline void
-sh_css_print(const char *fmt, ...)
+static inline void __printf(1, 2) sh_css_print(const char *fmt, ...)
{
va_list ap;
@@ -939,8 +920,7 @@ sh_css_print(const char *fmt, ...)
}
}
-static inline void
-sh_css_vprint(const char *fmt, va_list args)
+static inline void __printf(1, 0) sh_css_vprint(const char *fmt, va_list args)
{
if (sh_css_printf)
sh_css_printf(fmt, args);
@@ -987,7 +967,7 @@ sh_css_frame_info_set_width(struct ia_css_frame_info *info,
unsigned int width,
unsigned int aligned);
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
unsigned int
sh_css_get_mipi_sizes_for_check(const unsigned int port,
@@ -1036,7 +1016,7 @@ sh_css_continuous_is_enabled(uint8_t pipe_num);
struct ia_css_pipe *
find_pipe_by_num(uint32_t pipe_num);
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
void
ia_css_get_crop_offsets(
struct ia_css_pipe *pipe,
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
index 2ef5dbd62a6d..d5ae7f0b5864 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_mipi.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
@@ -30,10 +30,8 @@
#include "sh_css_sp.h" /* sh_css_update_host2sp_mipi_frame sh_css_update_host2sp_num_mipi_frames ... */
#include "sw_event_global.h" /* IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY */
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
static u32
ref_count_mipi_allocation[N_CSI_PORTS]; /* Initialized in mipi_init */
-#endif
int
ia_css_mipi_frame_specify(const unsigned int size_mem_words,
@@ -120,7 +118,7 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
unsigned int mem_words = 0;
unsigned int width_padded = width;
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
/* The changes will be reverted as soon as RAW
* Buffers are deployed by the 2401 Input System
* in the non-continuous use scenario.
@@ -246,7 +244,7 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
return err;
}
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
int
ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
const unsigned int size_mem_words) {
@@ -275,19 +273,17 @@ ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
void
mipi_init(void)
{
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
unsigned int i;
for (i = 0; i < N_CSI_PORTS; i++)
ref_count_mipi_allocation[i] = 0;
-#endif
}
int
calculate_mipi_buff_size(
struct ia_css_stream_config *stream_cfg,
unsigned int *size_mem_words) {
-#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if !defined(ISP2401)
int err = -EINVAL;
(void)stream_cfg;
(void)size_mem_words;
@@ -409,10 +405,8 @@ static bool buffers_needed(struct ia_css_pipe *pipe)
int
allocate_mipi_frames(struct ia_css_pipe *pipe,
struct ia_css_stream_info *info) {
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
int err = -EINVAL;
unsigned int port;
- struct ia_css_frame_info mipi_intermediate_info;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) enter:\n", pipe);
@@ -427,7 +421,7 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
return -EINVAL;
}
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
if (pipe->stream->config.online)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
@@ -459,13 +453,13 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
return -EINVAL;
}
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
err = calculate_mipi_buff_size(
&pipe->stream->config,
&my_css.mipi_frame_size[port]);
#endif
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
if (ref_count_mipi_allocation[port] != 0)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
@@ -491,17 +485,6 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
ref_count_mipi_allocation[port]++;
- /* TODO: Cleaning needed. */
- /* This code needs to modified to allocate the MIPI frames in the correct normal way
- with an allocate from info, by justin */
- mipi_intermediate_info = pipe->pipe_settings.video.video_binary.internal_frame_info;
- mipi_intermediate_info.res.width = 0;
- mipi_intermediate_info.res.height = 0;
- /* To indicate it is not (yet) valid format. */
- mipi_intermediate_info.format = IA_CSS_FRAME_FORMAT_NUM;
- mipi_intermediate_info.padded_width = 0;
- mipi_intermediate_info.raw_bit_depth = 0;
-
/* AM TODO: mipi frames number should come from stream struct. */
my_css.num_mipi_frames[port] = NUM_MIPI_FRAMES_PER_STREAM;
@@ -560,16 +543,10 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
"allocate_mipi_frames(%p) exit:\n", pipe);
return err;
-#else
- (void)pipe;
- (void)info;
- return 0;
-#endif
}
int
free_mipi_frames(struct ia_css_pipe *pipe) {
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
int err = -EINVAL;
unsigned int port;
@@ -609,7 +586,7 @@ free_mipi_frames(struct ia_css_pipe *pipe) {
}
if (ref_count_mipi_allocation[port] > 0) {
-#if defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
assert(ref_count_mipi_allocation[port] == 1);
if (ref_count_mipi_allocation[port] != 1) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
@@ -641,7 +618,7 @@ free_mipi_frames(struct ia_css_pipe *pipe) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"free_mipi_frames(%p) exit (deallocated).\n", pipe);
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
else {
/* 2401 system allows multiple streams to use same physical port. This is not
* true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution.
@@ -675,15 +652,11 @@ free_mipi_frames(struct ia_css_pipe *pipe) {
ref_count_mipi_allocation[port] = 0;
}
}
-#else
- (void)pipe;
-#endif
return 0;
}
int
send_mipi_frames(struct ia_css_pipe *pipe) {
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
int err = -EINVAL;
unsigned int i;
#ifndef ISP2401
@@ -751,8 +724,5 @@ send_mipi_frames(struct ia_css_pipe *pipe) {
(uint8_t)my_css.num_mipi_frames[port],
0 /* not used */);
IA_CSS_LEAVE_ERR_PRIVATE(0);
-#else
- (void)pipe;
-#endif
return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_shading.c b/drivers/staging/media/atomisp/pci/sh_css_param_shading.c
index 046f34857891..69cc4e423d8b 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_param_shading.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_shading.c
@@ -230,15 +230,8 @@ prepare_shading_table(const struct ia_css_shading_table *in_table,
const struct ia_css_binary *binary,
unsigned int bds_factor)
{
- unsigned int input_width,
- input_height,
- table_width,
- table_height,
- left_padding,
- top_padding,
- padded_width,
- left_cropping,
- i;
+ unsigned int input_width, input_height, table_width, table_height, i;
+ unsigned int left_padding, top_padding, left_cropping;
unsigned int bds_numerator, bds_denominator;
int right_padding;
@@ -254,15 +247,11 @@ prepare_shading_table(const struct ia_css_shading_table *in_table,
return;
}
- padded_width = binary->in_frame_info.padded_width;
- /* We use the ISP input resolution for the shading table because
- shading correction is performed in the bayer domain (before bayer
- down scaling). */
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
- padded_width = CEIL_MUL(binary->effective_in_frame_res.width + 2 *
- ISP_VEC_NELEMS,
- 2 * ISP_VEC_NELEMS);
-#endif
+ /*
+ * We use the ISP input resolution for the shading table because
+ * shading correction is performed in the bayer domain (before bayer
+ * down scaling).
+ */
input_height = binary->in_frame_info.res.height;
input_width = binary->in_frame_info.res.width;
left_padding = binary->left_padding;
diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c
index ba42be9b06eb..24fc497bd491 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_params.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_params.c
@@ -55,9 +55,6 @@
#include "ia_css_host_data.h"
#include "ia_css_pipe.h"
#include "ia_css_pipe_binarydesc.h"
-#if 0
-#include "ia_css_system_ctrl.h"
-#endif
/* Include all kernel host interfaces for ISP1 */
@@ -143,536 +140,6 @@ static int interleaved_lut_temp[4][HRT_GDC_N];
/* Digital Zoom lookup table. See documentation for more details about the
* contents of this table.
*/
-#if defined(HAS_GDC_VERSION_2)
-#if defined(CONFIG_CSI2_PLUS)
-/*
- * Coefficients from
- * Css_Mizuchi/regressions/20140424_0930/all/applications/common/gdc_v2_common/lut.h
- */
-
-static const int zoom_table[4][HRT_GDC_N] = {
- {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -2, -2, -2, -2, -2, -2, -2,
- -3, -3, -3, -3, -3, -3, -3, -4,
- -4, -4, -4, -4, -5, -5, -5, -5,
- -5, -5, -6, -6, -6, -6, -7, -7,
- -7, -7, -7, -8, -8, -8, -8, -9,
- -9, -9, -9, -10, -10, -10, -10, -11,
- -11, -11, -12, -12, -12, -12, -13, -13,
- -13, -14, -14, -14, -15, -15, -15, -15,
- -16, -16, -16, -17, -17, -17, -18, -18,
- -18, -19, -19, -20, -20, -20, -21, -21,
- -21, -22, -22, -22, -23, -23, -24, -24,
- -24, -25, -25, -25, -26, -26, -27, -27,
- -28, -28, -28, -29, -29, -30, -30, -30,
- -31, -31, -32, -32, -33, -33, -33, -34,
- -34, -35, -35, -36, -36, -37, -37, -37,
- -38, -38, -39, -39, -40, -40, -41, -41,
- -42, -42, -43, -43, -44, -44, -45, -45,
- -46, -46, -47, -47, -48, -48, -49, -49,
- -50, -50, -51, -51, -52, -52, -53, -53,
- -54, -54, -55, -55, -56, -56, -57, -57,
- -58, -59, -59, -60, -60, -61, -61, -62,
- -62, -63, -63, -64, -65, -65, -66, -66,
- -67, -67, -68, -69, -69, -70, -70, -71,
- -71, -72, -73, -73, -74, -74, -75, -75,
- -76, -77, -77, -78, -78, -79, -80, -80,
- -81, -81, -82, -83, -83, -84, -84, -85,
- -86, -86, -87, -87, -88, -89, -89, -90,
- -91, -91, -92, -92, -93, -94, -94, -95,
- -96, -96, -97, -97, -98, -99, -99, -100,
- -101, -101, -102, -102, -103, -104, -104, -105,
- -106, -106, -107, -108, -108, -109, -109, -110,
- -111, -111, -112, -113, -113, -114, -115, -115,
- -116, -117, -117, -118, -119, -119, -120, -121,
- -121, -122, -122, -123, -124, -124, -125, -126,
- -126, -127, -128, -128, -129, -130, -130, -131,
- -132, -132, -133, -134, -134, -135, -136, -136,
- -137, -138, -138, -139, -140, -140, -141, -142,
- -142, -143, -144, -144, -145, -146, -146, -147,
- -148, -148, -149, -150, -150, -151, -152, -152,
- -153, -154, -154, -155, -156, -156, -157, -158,
- -158, -159, -160, -160, -161, -162, -162, -163,
- -164, -164, -165, -166, -166, -167, -168, -168,
- -169, -170, -170, -171, -172, -172, -173, -174,
- -174, -175, -176, -176, -177, -178, -178, -179,
- -180, -180, -181, -181, -182, -183, -183, -184,
- -185, -185, -186, -187, -187, -188, -189, -189,
- -190, -191, -191, -192, -193, -193, -194, -194,
- -195, -196, -196, -197, -198, -198, -199, -200,
- -200, -201, -201, -202, -203, -203, -204, -205,
- -205, -206, -206, -207, -208, -208, -209, -210,
- -210, -211, -211, -212, -213, -213, -214, -215,
- -215, -216, -216, -217, -218, -218, -219, -219,
- -220, -221, -221, -222, -222, -223, -224, -224,
- -225, -225, -226, -227, -227, -228, -228, -229,
- -229, -230, -231, -231, -232, -232, -233, -233,
- -234, -235, -235, -236, -236, -237, -237, -238,
- -239, -239, -240, -240, -241, -241, -242, -242,
- -243, -244, -244, -245, -245, -246, -246, -247,
- -247, -248, -248, -249, -249, -250, -250, -251,
- -251, -252, -252, -253, -253, -254, -254, -255,
- -256, -256, -256, -257, -257, -258, -258, -259,
- -259, -260, -260, -261, -261, -262, -262, -263,
- -263, -264, -264, -265, -265, -266, -266, -266,
- -267, -267, -268, -268, -269, -269, -270, -270,
- -270, -271, -271, -272, -272, -273, -273, -273,
- -274, -274, -275, -275, -275, -276, -276, -277,
- -277, -277, -278, -278, -279, -279, -279, -280,
- -280, -280, -281, -281, -282, -282, -282, -283,
- -283, -283, -284, -284, -284, -285, -285, -285,
- -286, -286, -286, -287, -287, -287, -288, -288,
- -288, -289, -289, -289, -289, -290, -290, -290,
- -291, -291, -291, -291, -292, -292, -292, -293,
- -293, -293, -293, -294, -294, -294, -294, -295,
- -295, -295, -295, -295, -296, -296, -296, -296,
- -297, -297, -297, -297, -297, -298, -298, -298,
- -298, -298, -299, -299, -299, -299, -299, -299,
- -300, -300, -300, -300, -300, -300, -300, -301,
- -301, -301, -301, -301, -301, -301, -301, -301,
- -302, -302, -302, -302, -302, -302, -302, -302,
- -302, -302, -302, -302, -302, -303, -303, -303,
- -303, -303, -303, -303, -303, -303, -303, -303,
- -303, -303, -303, -303, -303, -303, -303, -303,
- -303, -303, -303, -303, -303, -303, -303, -303,
- -303, -303, -302, -302, -302, -302, -302, -302,
- -302, -302, -302, -302, -302, -302, -301, -301,
- -301, -301, -301, -301, -301, -301, -300, -300,
- -300, -300, -300, -300, -299, -299, -299, -299,
- -299, -299, -298, -298, -298, -298, -298, -297,
- -297, -297, -297, -296, -296, -296, -296, -295,
- -295, -295, -295, -294, -294, -294, -293, -293,
- -293, -293, -292, -292, -292, -291, -291, -291,
- -290, -290, -290, -289, -289, -289, -288, -288,
- -288, -287, -287, -286, -286, -286, -285, -285,
- -284, -284, -284, -283, -283, -282, -282, -281,
- -281, -280, -280, -279, -279, -279, -278, -278,
- -277, -277, -276, -276, -275, -275, -274, -273,
- -273, -272, -272, -271, -271, -270, -270, -269,
- -268, -268, -267, -267, -266, -266, -265, -264,
- -264, -263, -262, -262, -261, -260, -260, -259,
- -259, -258, -257, -256, -256, -255, -254, -254,
- -253, -252, -252, -251, -250, -249, -249, -248,
- -247, -246, -246, -245, -244, -243, -242, -242,
- -241, -240, -239, -238, -238, -237, -236, -235,
- -234, -233, -233, -232, -231, -230, -229, -228,
- -227, -226, -226, -225, -224, -223, -222, -221,
- -220, -219, -218, -217, -216, -215, -214, -213,
- -212, -211, -210, -209, -208, -207, -206, -205,
- -204, -203, -202, -201, -200, -199, -198, -197,
- -196, -194, -193, -192, -191, -190, -189, -188,
- -187, -185, -184, -183, -182, -181, -180, -178,
- -177, -176, -175, -174, -172, -171, -170, -169,
- -167, -166, -165, -164, -162, -161, -160, -158,
- -157, -156, -155, -153, -152, -151, -149, -148,
- -147, -145, -144, -142, -141, -140, -138, -137,
- -135, -134, -133, -131, -130, -128, -127, -125,
- -124, -122, -121, -120, -118, -117, -115, -114,
- -112, -110, -109, -107, -106, -104, -103, -101,
- -100, -98, -96, -95, -93, -92, -90, -88,
- -87, -85, -83, -82, -80, -78, -77, -75,
- -73, -72, -70, -68, -67, -65, -63, -61,
- -60, -58, -56, -54, -52, -51, -49, -47,
- -45, -43, -42, -40, -38, -36, -34, -32,
- -31, -29, -27, -25, -23, -21, -19, -17,
- -15, -13, -11, -9, -7, -5, -3, -1
- },
- {
- 0, 2, 4, 6, 8, 10, 12, 14,
- 16, 18, 20, 22, 25, 27, 29, 31,
- 33, 36, 38, 40, 43, 45, 47, 50,
- 52, 54, 57, 59, 61, 64, 66, 69,
- 71, 74, 76, 79, 81, 84, 86, 89,
- 92, 94, 97, 99, 102, 105, 107, 110,
- 113, 116, 118, 121, 124, 127, 129, 132,
- 135, 138, 141, 144, 146, 149, 152, 155,
- 158, 161, 164, 167, 170, 173, 176, 179,
- 182, 185, 188, 191, 194, 197, 200, 203,
- 207, 210, 213, 216, 219, 222, 226, 229,
- 232, 235, 239, 242, 245, 248, 252, 255,
- 258, 262, 265, 269, 272, 275, 279, 282,
- 286, 289, 292, 296, 299, 303, 306, 310,
- 313, 317, 321, 324, 328, 331, 335, 338,
- 342, 346, 349, 353, 357, 360, 364, 368,
- 372, 375, 379, 383, 386, 390, 394, 398,
- 402, 405, 409, 413, 417, 421, 425, 429,
- 432, 436, 440, 444, 448, 452, 456, 460,
- 464, 468, 472, 476, 480, 484, 488, 492,
- 496, 500, 504, 508, 512, 516, 521, 525,
- 529, 533, 537, 541, 546, 550, 554, 558,
- 562, 567, 571, 575, 579, 584, 588, 592,
- 596, 601, 605, 609, 614, 618, 622, 627,
- 631, 635, 640, 644, 649, 653, 657, 662,
- 666, 671, 675, 680, 684, 689, 693, 698,
- 702, 707, 711, 716, 720, 725, 729, 734,
- 738, 743, 747, 752, 757, 761, 766, 771,
- 775, 780, 784, 789, 794, 798, 803, 808,
- 813, 817, 822, 827, 831, 836, 841, 846,
- 850, 855, 860, 865, 870, 874, 879, 884,
- 889, 894, 898, 903, 908, 913, 918, 923,
- 928, 932, 937, 942, 947, 952, 957, 962,
- 967, 972, 977, 982, 986, 991, 996, 1001,
- 1006, 1011, 1016, 1021, 1026, 1031, 1036, 1041,
- 1046, 1051, 1056, 1062, 1067, 1072, 1077, 1082,
- 1087, 1092, 1097, 1102, 1107, 1112, 1117, 1122,
- 1128, 1133, 1138, 1143, 1148, 1153, 1158, 1164,
- 1169, 1174, 1179, 1184, 1189, 1195, 1200, 1205,
- 1210, 1215, 1221, 1226, 1231, 1236, 1242, 1247,
- 1252, 1257, 1262, 1268, 1273, 1278, 1284, 1289,
- 1294, 1299, 1305, 1310, 1315, 1321, 1326, 1331,
- 1336, 1342, 1347, 1352, 1358, 1363, 1368, 1374,
- 1379, 1384, 1390, 1395, 1400, 1406, 1411, 1417,
- 1422, 1427, 1433, 1438, 1443, 1449, 1454, 1460,
- 1465, 1470, 1476, 1481, 1487, 1492, 1497, 1503,
- 1508, 1514, 1519, 1525, 1530, 1535, 1541, 1546,
- 1552, 1557, 1563, 1568, 1574, 1579, 1585, 1590,
- 1596, 1601, 1606, 1612, 1617, 1623, 1628, 1634,
- 1639, 1645, 1650, 1656, 1661, 1667, 1672, 1678,
- 1683, 1689, 1694, 1700, 1705, 1711, 1716, 1722,
- 1727, 1733, 1738, 1744, 1749, 1755, 1761, 1766,
- 1772, 1777, 1783, 1788, 1794, 1799, 1805, 1810,
- 1816, 1821, 1827, 1832, 1838, 1844, 1849, 1855,
- 1860, 1866, 1871, 1877, 1882, 1888, 1893, 1899,
- 1905, 1910, 1916, 1921, 1927, 1932, 1938, 1943,
- 1949, 1955, 1960, 1966, 1971, 1977, 1982, 1988,
- 1993, 1999, 2005, 2010, 2016, 2021, 2027, 2032,
- 2038, 2043, 2049, 2055, 2060, 2066, 2071, 2077,
- 2082, 2088, 2093, 2099, 2105, 2110, 2116, 2121,
- 2127, 2132, 2138, 2143, 2149, 2154, 2160, 2165,
- 2171, 2177, 2182, 2188, 2193, 2199, 2204, 2210,
- 2215, 2221, 2226, 2232, 2237, 2243, 2248, 2254,
- 2259, 2265, 2270, 2276, 2281, 2287, 2292, 2298,
- 2304, 2309, 2314, 2320, 2325, 2331, 2336, 2342,
- 2347, 2353, 2358, 2364, 2369, 2375, 2380, 2386,
- 2391, 2397, 2402, 2408, 2413, 2419, 2424, 2429,
- 2435, 2440, 2446, 2451, 2457, 2462, 2467, 2473,
- 2478, 2484, 2489, 2495, 2500, 2505, 2511, 2516,
- 2522, 2527, 2532, 2538, 2543, 2549, 2554, 2559,
- 2565, 2570, 2575, 2581, 2586, 2591, 2597, 2602,
- 2607, 2613, 2618, 2623, 2629, 2634, 2639, 2645,
- 2650, 2655, 2661, 2666, 2671, 2676, 2682, 2687,
- 2692, 2698, 2703, 2708, 2713, 2719, 2724, 2729,
- 2734, 2740, 2745, 2750, 2755, 2760, 2766, 2771,
- 2776, 2781, 2786, 2792, 2797, 2802, 2807, 2812,
- 2817, 2823, 2828, 2833, 2838, 2843, 2848, 2853,
- 2859, 2864, 2869, 2874, 2879, 2884, 2889, 2894,
- 2899, 2904, 2909, 2914, 2919, 2924, 2930, 2935,
- 2940, 2945, 2950, 2955, 2960, 2965, 2970, 2975,
- 2980, 2984, 2989, 2994, 2999, 3004, 3009, 3014,
- 3019, 3024, 3029, 3034, 3039, 3044, 3048, 3053,
- 3058, 3063, 3068, 3073, 3078, 3082, 3087, 3092,
- 3097, 3102, 3106, 3111, 3116, 3121, 3126, 3130,
- 3135, 3140, 3145, 3149, 3154, 3159, 3163, 3168,
- 3173, 3177, 3182, 3187, 3191, 3196, 3201, 3205,
- 3210, 3215, 3219, 3224, 3228, 3233, 3238, 3242,
- 3247, 3251, 3256, 3260, 3265, 3269, 3274, 3279,
- 3283, 3287, 3292, 3296, 3301, 3305, 3310, 3314,
- 3319, 3323, 3327, 3332, 3336, 3341, 3345, 3349,
- 3354, 3358, 3362, 3367, 3371, 3375, 3380, 3384,
- 3388, 3393, 3397, 3401, 3405, 3410, 3414, 3418,
- 3422, 3426, 3431, 3435, 3439, 3443, 3447, 3451,
- 3455, 3460, 3464, 3468, 3472, 3476, 3480, 3484,
- 3488, 3492, 3496, 3500, 3504, 3508, 3512, 3516,
- 3520, 3524, 3528, 3532, 3536, 3540, 3544, 3548,
- 3552, 3555, 3559, 3563, 3567, 3571, 3575, 3578,
- 3582, 3586, 3590, 3593, 3597, 3601, 3605, 3608,
- 3612, 3616, 3619, 3623, 3627, 3630, 3634, 3638,
- 3641, 3645, 3649, 3652, 3656, 3659, 3663, 3666,
- 3670, 3673, 3677, 3680, 3684, 3687, 3691, 3694,
- 3698, 3701, 3704, 3708, 3711, 3714, 3718, 3721,
- 3724, 3728, 3731, 3734, 3738, 3741, 3744, 3747,
- 3751, 3754, 3757, 3760, 3763, 3767, 3770, 3773,
- 3776, 3779, 3782, 3785, 3788, 3791, 3794, 3798,
- 3801, 3804, 3807, 3809, 3812, 3815, 3818, 3821,
- 3824, 3827, 3830, 3833, 3836, 3839, 3841, 3844,
- 3847, 3850, 3853, 3855, 3858, 3861, 3864, 3866,
- 3869, 3872, 3874, 3877, 3880, 3882, 3885, 3887,
- 3890, 3893, 3895, 3898, 3900, 3903, 3905, 3908,
- 3910, 3913, 3915, 3917, 3920, 3922, 3925, 3927,
- 3929, 3932, 3934, 3936, 3939, 3941, 3943, 3945,
- 3948, 3950, 3952, 3954, 3956, 3958, 3961, 3963,
- 3965, 3967, 3969, 3971, 3973, 3975, 3977, 3979,
- 3981, 3983, 3985, 3987, 3989, 3991, 3993, 3994,
- 3996, 3998, 4000, 4002, 4004, 4005, 4007, 4009,
- 4011, 4012, 4014, 4016, 4017, 4019, 4021, 4022,
- 4024, 4025, 4027, 4028, 4030, 4031, 4033, 4034,
- 4036, 4037, 4039, 4040, 4042, 4043, 4044, 4046,
- 4047, 4048, 4050, 4051, 4052, 4053, 4055, 4056,
- 4057, 4058, 4059, 4060, 4062, 4063, 4064, 4065,
- 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073,
- 4074, 4075, 4075, 4076, 4077, 4078, 4079, 4079,
- 4080, 4081, 4082, 4082, 4083, 4084, 4084, 4085,
- 4086, 4086, 4087, 4087, 4088, 4088, 4089, 4089,
- 4090, 4090, 4091, 4091, 4092, 4092, 4092, 4093,
- 4093, 4093, 4094, 4094, 4094, 4094, 4095, 4095,
- 4095, 4095, 4095, 4095, 4095, 4095, 4095, 4095
- },
- {
- 4096, 4095, 4095, 4095, 4095, 4095, 4095, 4095,
- 4095, 4095, 4095, 4094, 4094, 4094, 4094, 4093,
- 4093, 4093, 4092, 4092, 4092, 4091, 4091, 4090,
- 4090, 4089, 4089, 4088, 4088, 4087, 4087, 4086,
- 4086, 4085, 4084, 4084, 4083, 4082, 4082, 4081,
- 4080, 4079, 4079, 4078, 4077, 4076, 4075, 4075,
- 4074, 4073, 4072, 4071, 4070, 4069, 4068, 4067,
- 4066, 4065, 4064, 4063, 4062, 4060, 4059, 4058,
- 4057, 4056, 4055, 4053, 4052, 4051, 4050, 4048,
- 4047, 4046, 4044, 4043, 4042, 4040, 4039, 4037,
- 4036, 4034, 4033, 4031, 4030, 4028, 4027, 4025,
- 4024, 4022, 4021, 4019, 4017, 4016, 4014, 4012,
- 4011, 4009, 4007, 4005, 4004, 4002, 4000, 3998,
- 3996, 3994, 3993, 3991, 3989, 3987, 3985, 3983,
- 3981, 3979, 3977, 3975, 3973, 3971, 3969, 3967,
- 3965, 3963, 3961, 3958, 3956, 3954, 3952, 3950,
- 3948, 3945, 3943, 3941, 3939, 3936, 3934, 3932,
- 3929, 3927, 3925, 3922, 3920, 3917, 3915, 3913,
- 3910, 3908, 3905, 3903, 3900, 3898, 3895, 3893,
- 3890, 3887, 3885, 3882, 3880, 3877, 3874, 3872,
- 3869, 3866, 3864, 3861, 3858, 3855, 3853, 3850,
- 3847, 3844, 3841, 3839, 3836, 3833, 3830, 3827,
- 3824, 3821, 3818, 3815, 3812, 3809, 3807, 3804,
- 3801, 3798, 3794, 3791, 3788, 3785, 3782, 3779,
- 3776, 3773, 3770, 3767, 3763, 3760, 3757, 3754,
- 3751, 3747, 3744, 3741, 3738, 3734, 3731, 3728,
- 3724, 3721, 3718, 3714, 3711, 3708, 3704, 3701,
- 3698, 3694, 3691, 3687, 3684, 3680, 3677, 3673,
- 3670, 3666, 3663, 3659, 3656, 3652, 3649, 3645,
- 3641, 3638, 3634, 3630, 3627, 3623, 3619, 3616,
- 3612, 3608, 3605, 3601, 3597, 3593, 3590, 3586,
- 3582, 3578, 3575, 3571, 3567, 3563, 3559, 3555,
- 3552, 3548, 3544, 3540, 3536, 3532, 3528, 3524,
- 3520, 3516, 3512, 3508, 3504, 3500, 3496, 3492,
- 3488, 3484, 3480, 3476, 3472, 3468, 3464, 3460,
- 3455, 3451, 3447, 3443, 3439, 3435, 3431, 3426,
- 3422, 3418, 3414, 3410, 3405, 3401, 3397, 3393,
- 3388, 3384, 3380, 3375, 3371, 3367, 3362, 3358,
- 3354, 3349, 3345, 3341, 3336, 3332, 3327, 3323,
- 3319, 3314, 3310, 3305, 3301, 3296, 3292, 3287,
- 3283, 3279, 3274, 3269, 3265, 3260, 3256, 3251,
- 3247, 3242, 3238, 3233, 3228, 3224, 3219, 3215,
- 3210, 3205, 3201, 3196, 3191, 3187, 3182, 3177,
- 3173, 3168, 3163, 3159, 3154, 3149, 3145, 3140,
- 3135, 3130, 3126, 3121, 3116, 3111, 3106, 3102,
- 3097, 3092, 3087, 3082, 3078, 3073, 3068, 3063,
- 3058, 3053, 3048, 3044, 3039, 3034, 3029, 3024,
- 3019, 3014, 3009, 3004, 2999, 2994, 2989, 2984,
- 2980, 2975, 2970, 2965, 2960, 2955, 2950, 2945,
- 2940, 2935, 2930, 2924, 2919, 2914, 2909, 2904,
- 2899, 2894, 2889, 2884, 2879, 2874, 2869, 2864,
- 2859, 2853, 2848, 2843, 2838, 2833, 2828, 2823,
- 2817, 2812, 2807, 2802, 2797, 2792, 2786, 2781,
- 2776, 2771, 2766, 2760, 2755, 2750, 2745, 2740,
- 2734, 2729, 2724, 2719, 2713, 2708, 2703, 2698,
- 2692, 2687, 2682, 2676, 2671, 2666, 2661, 2655,
- 2650, 2645, 2639, 2634, 2629, 2623, 2618, 2613,
- 2607, 2602, 2597, 2591, 2586, 2581, 2575, 2570,
- 2565, 2559, 2554, 2549, 2543, 2538, 2532, 2527,
- 2522, 2516, 2511, 2505, 2500, 2495, 2489, 2484,
- 2478, 2473, 2467, 2462, 2457, 2451, 2446, 2440,
- 2435, 2429, 2424, 2419, 2413, 2408, 2402, 2397,
- 2391, 2386, 2380, 2375, 2369, 2364, 2358, 2353,
- 2347, 2342, 2336, 2331, 2325, 2320, 2314, 2309,
- 2304, 2298, 2292, 2287, 2281, 2276, 2270, 2265,
- 2259, 2254, 2248, 2243, 2237, 2232, 2226, 2221,
- 2215, 2210, 2204, 2199, 2193, 2188, 2182, 2177,
- 2171, 2165, 2160, 2154, 2149, 2143, 2138, 2132,
- 2127, 2121, 2116, 2110, 2105, 2099, 2093, 2088,
- 2082, 2077, 2071, 2066, 2060, 2055, 2049, 2043,
- 2038, 2032, 2027, 2021, 2016, 2010, 2005, 1999,
- 1993, 1988, 1982, 1977, 1971, 1966, 1960, 1955,
- 1949, 1943, 1938, 1932, 1927, 1921, 1916, 1910,
- 1905, 1899, 1893, 1888, 1882, 1877, 1871, 1866,
- 1860, 1855, 1849, 1844, 1838, 1832, 1827, 1821,
- 1816, 1810, 1805, 1799, 1794, 1788, 1783, 1777,
- 1772, 1766, 1761, 1755, 1749, 1744, 1738, 1733,
- 1727, 1722, 1716, 1711, 1705, 1700, 1694, 1689,
- 1683, 1678, 1672, 1667, 1661, 1656, 1650, 1645,
- 1639, 1634, 1628, 1623, 1617, 1612, 1606, 1601,
- 1596, 1590, 1585, 1579, 1574, 1568, 1563, 1557,
- 1552, 1546, 1541, 1535, 1530, 1525, 1519, 1514,
- 1508, 1503, 1497, 1492, 1487, 1481, 1476, 1470,
- 1465, 1460, 1454, 1449, 1443, 1438, 1433, 1427,
- 1422, 1417, 1411, 1406, 1400, 1395, 1390, 1384,
- 1379, 1374, 1368, 1363, 1358, 1352, 1347, 1342,
- 1336, 1331, 1326, 1321, 1315, 1310, 1305, 1299,
- 1294, 1289, 1284, 1278, 1273, 1268, 1262, 1257,
- 1252, 1247, 1242, 1236, 1231, 1226, 1221, 1215,
- 1210, 1205, 1200, 1195, 1189, 1184, 1179, 1174,
- 1169, 1164, 1158, 1153, 1148, 1143, 1138, 1133,
- 1128, 1122, 1117, 1112, 1107, 1102, 1097, 1092,
- 1087, 1082, 1077, 1072, 1067, 1062, 1056, 1051,
- 1046, 1041, 1036, 1031, 1026, 1021, 1016, 1011,
- 1006, 1001, 996, 991, 986, 982, 977, 972,
- 967, 962, 957, 952, 947, 942, 937, 932,
- 928, 923, 918, 913, 908, 903, 898, 894,
- 889, 884, 879, 874, 870, 865, 860, 855,
- 850, 846, 841, 836, 831, 827, 822, 817,
- 813, 808, 803, 798, 794, 789, 784, 780,
- 775, 771, 766, 761, 757, 752, 747, 743,
- 738, 734, 729, 725, 720, 716, 711, 707,
- 702, 698, 693, 689, 684, 680, 675, 671,
- 666, 662, 657, 653, 649, 644, 640, 635,
- 631, 627, 622, 618, 614, 609, 605, 601,
- 596, 592, 588, 584, 579, 575, 571, 567,
- 562, 558, 554, 550, 546, 541, 537, 533,
- 529, 525, 521, 516, 512, 508, 504, 500,
- 496, 492, 488, 484, 480, 476, 472, 468,
- 464, 460, 456, 452, 448, 444, 440, 436,
- 432, 429, 425, 421, 417, 413, 409, 405,
- 402, 398, 394, 390, 386, 383, 379, 375,
- 372, 368, 364, 360, 357, 353, 349, 346,
- 342, 338, 335, 331, 328, 324, 321, 317,
- 313, 310, 306, 303, 299, 296, 292, 289,
- 286, 282, 279, 275, 272, 269, 265, 262,
- 258, 255, 252, 248, 245, 242, 239, 235,
- 232, 229, 226, 222, 219, 216, 213, 210,
- 207, 203, 200, 197, 194, 191, 188, 185,
- 182, 179, 176, 173, 170, 167, 164, 161,
- 158, 155, 152, 149, 146, 144, 141, 138,
- 135, 132, 129, 127, 124, 121, 118, 116,
- 113, 110, 107, 105, 102, 99, 97, 94,
- 92, 89, 86, 84, 81, 79, 76, 74,
- 71, 69, 66, 64, 61, 59, 57, 54,
- 52, 50, 47, 45, 43, 40, 38, 36,
- 33, 31, 29, 27, 25, 22, 20, 18,
- 16, 14, 12, 10, 8, 6, 4, 2
- },
- {
- 0, -1, -3, -5, -7, -9, -11, -13,
- -15, -17, -19, -20, -23, -25, -27, -28,
- -30, -33, -34, -36, -39, -40, -42, -43,
- -45, -46, -49, -50, -52, -54, -56, -58,
- -60, -61, -62, -65, -66, -68, -70, -72,
- -73, -74, -77, -78, -80, -82, -83, -85,
- -87, -89, -90, -92, -93, -95, -96, -98,
- -100, -102, -103, -105, -106, -107, -108, -110,
- -112, -114, -116, -116, -118, -120, -122, -122,
- -124, -126, -127, -128, -130, -131, -133, -133,
- -136, -137, -138, -139, -141, -142, -144, -145,
- -147, -147, -150, -151, -151, -153, -155, -156,
- -157, -159, -160, -161, -163, -164, -165, -166,
- -168, -168, -170, -171, -172, -174, -174, -176,
- -177, -178, -180, -181, -182, -183, -184, -185,
- -187, -188, -189, -190, -191, -192, -193, -195,
- -196, -196, -198, -199, -200, -200, -202, -204,
- -204, -205, -206, -207, -208, -209, -211, -212,
- -212, -213, -214, -215, -216, -217, -218, -220,
- -220, -221, -222, -223, -224, -225, -225, -227,
- -227, -228, -229, -230, -230, -231, -233, -234,
- -234, -235, -235, -237, -238, -239, -239, -240,
- -240, -242, -242, -243, -243, -245, -246, -247,
- -247, -249, -248, -249, -250, -251, -251, -253,
- -253, -253, -255, -255, -256, -256, -257, -258,
- -259, -259, -260, -261, -261, -262, -262, -264,
- -263, -265, -265, -265, -266, -267, -267, -268,
- -269, -269, -269, -270, -271, -271, -272, -273,
- -273, -273, -274, -274, -276, -275, -276, -277,
- -277, -278, -278, -278, -279, -279, -280, -281,
- -280, -281, -282, -283, -283, -282, -284, -284,
- -284, -285, -285, -286, -286, -286, -287, -287,
- -288, -288, -288, -289, -289, -289, -290, -290,
- -290, -291, -291, -292, -291, -291, -292, -292,
- -292, -293, -293, -293, -294, -294, -295, -295,
- -294, -295, -295, -296, -297, -297, -297, -297,
- -297, -297, -298, -298, -297, -298, -298, -298,
- -299, -299, -300, -299, -299, -300, -299, -300,
- -301, -300, -300, -301, -300, -301, -301, -301,
- -301, -301, -302, -301, -302, -301, -302, -302,
- -302, -302, -302, -302, -302, -302, -303, -302,
- -303, -302, -303, -303, -302, -303, -303, -303,
- -302, -303, -303, -302, -303, -303, -302, -303,
- -303, -302, -303, -303, -302, -303, -303, -303,
- -303, -302, -303, -303, -302, -302, -302, -303,
- -302, -302, -302, -301, -303, -302, -301, -302,
- -301, -301, -301, -302, -301, -301, -301, -300,
- -301, -300, -300, -300, -300, -299, -300, -299,
- -300, -300, -299, -300, -299, -299, -299, -299,
- -298, -299, -298, -297, -297, -297, -296, -297,
- -296, -296, -296, -296, -295, -296, -295, -296,
- -295, -294, -294, -294, -293, -294, -294, -293,
- -293, -292, -293, -292, -292, -292, -291, -290,
- -291, -290, -291, -289, -289, -290, -289, -289,
- -288, -288, -288, -288, -286, -287, -286, -286,
- -286, -285, -286, -284, -284, -284, -284, -283,
- -283, -283, -282, -282, -282, -281, -280, -281,
- -279, -280, -280, -278, -279, -278, -278, -277,
- -278, -276, -276, -277, -275, -276, -274, -275,
- -274, -273, -273, -272, -273, -272, -272, -271,
- -270, -270, -269, -269, -269, -268, -268, -267,
- -267, -266, -266, -266, -265, -265, -264, -264,
- -263, -263, -262, -262, -261, -261, -260, -260,
- -259, -259, -258, -258, -257, -257, -256, -256,
- -256, -255, -254, -254, -253, -253, -252, -252,
- -251, -251, -250, -250, -249, -249, -248, -248,
- -247, -247, -246, -246, -245, -245, -244, -244,
- -243, -242, -242, -241, -241, -240, -239, -239,
- -239, -238, -238, -237, -237, -235, -235, -235,
- -234, -234, -232, -233, -232, -232, -231, -229,
- -230, -229, -228, -228, -227, -226, -227, -225,
- -224, -225, -223, -223, -222, -222, -221, -221,
- -220, -219, -219, -218, -218, -216, -217, -216,
- -215, -215, -214, -213, -212, -213, -211, -211,
- -210, -210, -209, -209, -208, -206, -207, -206,
- -205, -204, -204, -204, -203, -202, -202, -200,
- -200, -200, -200, -198, -197, -197, -196, -195,
- -195, -195, -194, -194, -192, -192, -191, -191,
- -189, -189, -188, -188, -187, -186, -186, -186,
- -185, -185, -183, -183, -182, -182, -181, -181,
- -180, -178, -178, -177, -177, -176, -176, -174,
- -174, -173, -173, -172, -172, -172, -170, -170,
- -168, -168, -167, -167, -167, -165, -165, -164,
- -164, -164, -162, -162, -161, -160, -160, -158,
- -158, -158, -157, -156, -155, -155, -154, -153,
- -153, -152, -151, -151, -150, -149, -149, -148,
- -147, -147, -146, -146, -144, -144, -144, -142,
- -142, -141, -142, -140, -140, -139, -138, -138,
- -137, -136, -136, -134, -134, -133, -134, -132,
- -132, -131, -130, -130, -128, -128, -128, -127,
- -127, -126, -124, -124, -124, -123, -123, -122,
- -121, -120, -120, -119, -118, -118, -117, -117,
- -116, -115, -115, -115, -114, -113, -111, -111,
- -110, -110, -109, -109, -108, -107, -107, -106,
- -105, -104, -104, -103, -102, -103, -102, -101,
- -101, -100, -99, -99, -98, -97, -97, -96,
- -96, -95, -94, -94, -93, -92, -92, -91,
- -91, -90, -89, -88, -88, -88, -87, -86,
- -85, -86, -84, -84, -83, -82, -82, -81,
- -81, -80, -80, -78, -79, -77, -77, -77,
- -76, -76, -75, -74, -74, -73, -72, -72,
- -72, -71, -70, -70, -69, -68, -68, -68,
- -66, -67, -66, -65, -65, -65, -63, -63,
- -62, -62, -61, -61, -60, -60, -60, -58,
- -58, -58, -56, -56, -56, -55, -54, -55,
- -54, -54, -53, -52, -51, -51, -51, -50,
- -49, -49, -49, -49, -48, -47, -46, -46,
- -46, -46, -45, -43, -43, -43, -43, -42,
- -42, -42, -40, -40, -40, -39, -39, -38,
- -38, -38, -37, -37, -36, -36, -35, -35,
- -34, -35, -34, -33, -33, -32, -32, -31,
- -31, -31, -30, -29, -29, -29, -28, -27,
- -28, -28, -27, -26, -26, -25, -25, -25,
- -24, -24, -24, -23, -23, -22, -22, -22,
- -21, -21, -20, -20, -20, -20, -19, -18,
- -19, -18, -18, -17, -18, -17, -16, -17,
- -16, -15, -15, -15, -14, -14, -15, -13,
- -13, -13, -13, -12, -12, -11, -12, -11,
- -12, -10, -10, -10, -10, -10, -9, -10,
- -9, -9, -9, -8, -8, -7, -8, -7,
- -7, -7, -6, -6, -6, -7, -6, -6,
- -5, -5, -5, -5, -5, -4, -4, -5,
- -4, -4, -3, -3, -3, -3, -3, -2,
- -3, -2, -2, -2, -1, -2, -1, -2,
- -1, -1, -1, -1, -1, 0, -1, 0,
- -1, -1, 0, 0, -1, 0, 0, -1,
- 1, 1, 0, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0
- }
-};
-#else /* defined(CONFIG_CSI2_PLUS) */
static const int zoom_table[4][HRT_GDC_N] = {
{
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
@@ -1195,11 +662,6 @@ static const int zoom_table[4][HRT_GDC_N] = {
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4
}
};
-#endif
-#else
-#error "sh_css_params.c: GDC version must be \
-one of {GDC_VERSION_2}"
-#endif
static const struct ia_css_dz_config default_dz_config = {
HRT_GDC_N,
@@ -1634,7 +1096,7 @@ ia_css_params_alloc_convert_sctbl(
{
const struct ia_css_binary *binary = stage->binary;
struct ia_css_host_data *sctbl;
- unsigned int i, j, aligned_width, row_padding;
+ unsigned int i, j, aligned_width;
unsigned int sctbl_size;
short int *ptr;
@@ -1649,7 +1111,6 @@ ia_css_params_alloc_convert_sctbl(
}
aligned_width = binary->sctbl_aligned_width_per_color;
- row_padding = aligned_width - shading_table->width;
sctbl_size = shading_table->height * IA_CSS_SC_NUM_COLORS * aligned_width *
sizeof(short);
@@ -4917,7 +4378,6 @@ ia_css_3a_statistics_free(struct ia_css_3a_statistics *me)
if (me) {
kvfree(me->rgby_data);
kvfree(me->data);
- memset(me, 0, sizeof(struct ia_css_3a_statistics));
kvfree(me);
}
}
@@ -4956,7 +4416,6 @@ ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me)
if (me) {
kvfree(me->hor_proj);
kvfree(me->ver_proj);
- memset(me, 0, sizeof(struct ia_css_dvs_statistics));
kvfree(me);
}
}
@@ -4998,7 +4457,6 @@ ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me)
if (me) {
kvfree(me->hor_coefs);
kvfree(me->ver_coefs);
- memset(me, 0, sizeof(struct ia_css_dvs_coefficients));
kvfree(me);
}
}
@@ -5090,7 +4548,6 @@ ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me)
kvfree(me->ver_prod.odd_imag);
kvfree(me->ver_prod.even_real);
kvfree(me->ver_prod.even_imag);
- memset(me, 0, sizeof(struct ia_css_dvs2_statistics));
kvfree(me);
}
}
@@ -5174,7 +4631,6 @@ ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me)
kvfree(me->ver_coefs.odd_imag);
kvfree(me->ver_coefs.even_real);
kvfree(me->ver_coefs.even_imag);
- memset(me, 0, sizeof(struct ia_css_dvs2_coefficients));
kvfree(me);
}
}
@@ -5249,7 +4705,6 @@ ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config)
kvfree(dvs_6axis_config->ycoords_y);
kvfree(dvs_6axis_config->xcoords_uv);
kvfree(dvs_6axis_config->ycoords_uv);
- memset(dvs_6axis_config, 0, sizeof(struct ia_css_dvs_6axis_config));
kvfree(dvs_6axis_config);
}
}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_properties.c b/drivers/staging/media/atomisp/pci/sh_css_properties.c
index de588f9bd540..8ecd93d65a68 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_properties.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_properties.c
@@ -22,23 +22,13 @@ void
ia_css_get_properties(struct ia_css_properties *properties)
{
assert(properties);
-#if defined(HAS_GDC_VERSION_2) || defined(HAS_GDC_VERSION_3)
/*
* MW: We don't want to store the coordinates
* full range in memory: Truncate
*/
properties->gdc_coord_one = gdc_get_unity(GDC0_ID) / HRT_GDC_COORD_SCALE;
-#else
-#error "Unknown GDC version"
-#endif
properties->l1_base_is_index = true;
-#if defined(HAS_VAMEM_VERSION_1)
- properties->vamem_type = IA_CSS_VAMEM_TYPE_1;
-#elif defined(HAS_VAMEM_VERSION_2)
properties->vamem_type = IA_CSS_VAMEM_TYPE_2;
-#else
-#error "Unknown VAMEM version"
-#endif
}
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.c b/drivers/staging/media/atomisp/pci/sh_css_sp.c
index a26680b1d0b0..02f5a73b4096 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_sp.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.c
@@ -17,7 +17,7 @@
#include "sh_css_sp.h"
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
#include "input_formatter.h"
#endif
@@ -38,9 +38,7 @@
#include "sh_css_params.h"
#include "sh_css_legacy.h"
#include "ia_css_frame_comm.h"
-#if !defined(HAS_NO_INPUT_SYSTEM)
#include "ia_css_isys.h"
-#endif
#include "gdc_device.h" /* HRT_GDC_N */
@@ -229,7 +227,7 @@ sh_css_sp_start_binary_copy(unsigned int pipe_num,
IA_CSS_LOG("pipe_id %d port_config %08x",
pipe->pipe_id, pipe->inout_port_config);
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
#else
(void)two_ppc;
@@ -307,7 +305,7 @@ sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame,
IA_CSS_LOG("pipe_id %d port_config %08x",
pipe->pipe_id, pipe->inout_port_config);
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
#else
(void)two_ppc;
@@ -638,7 +636,7 @@ set_view_finder_buffer(const struct ia_css_frame *frame) {
return 0;
}
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
void sh_css_sp_set_if_configs(
const input_formatter_cfg_t *config_a,
const input_formatter_cfg_t *config_b,
@@ -662,7 +660,7 @@ void sh_css_sp_set_if_configs(
}
#endif
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
void
sh_css_sp_program_input_circuit(int fmt_type,
int ch_id,
@@ -681,7 +679,7 @@ sh_css_sp_program_input_circuit(int fmt_type,
}
#endif
-#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+#if !defined(ISP2401)
void
sh_css_sp_configure_sync_gen(int width, int height,
int hblank_cycles,
@@ -724,11 +722,7 @@ sh_css_sp_configure_enable_raw_pool_locking(bool lock_all)
void
sh_css_sp_enable_isys_event_queue(bool enable)
{
-#if !defined(HAS_NO_INPUT_SYSTEM)
sh_css_sp_group.config.enable_isys_event_queue = enable;
-#else
- (void)enable;
-#endif
}
void
@@ -766,7 +760,7 @@ sh_css_sp_init_group(bool two_ppc,
bool no_isp_sync,
uint8_t if_config_index)
{
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
sh_css_sp_group.config.input_formatter.isp_2ppc = two_ppc;
#else
(void)two_ppc;
@@ -775,7 +769,7 @@ sh_css_sp_init_group(bool two_ppc,
sh_css_sp_group.config.no_isp_sync = (uint8_t)no_isp_sync;
/* decide whether the frame is processed online or offline */
if (if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED) return;
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
sh_css_sp_group.config.input_formatter.set[if_config_index].stream_format =
input_format;
@@ -940,7 +934,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
return 0;
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+#if defined(ISP2401)
(void)continuous;
sh_css_sp_stage.deinterleaved = 0;
#else
@@ -1025,7 +1019,7 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
if (err)
return err;
-#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifdef ISP2401
if (stage == 0) {
pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
if (!pipe)
@@ -1206,9 +1200,7 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
enum ia_css_input_mode input_mode,
const struct ia_css_metadata_config *md_config,
const struct ia_css_metadata_info *md_info,
-#if !defined(HAS_NO_INPUT_SYSTEM)
const enum mipi_port_id port_id,
-#endif
const struct ia_css_coordinate
*internal_frame_origin_bqs_on_sctbl, /* Origin of internal frame
positioned on shading table at shading correction in ISP. */
@@ -1226,7 +1218,6 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
assert(me);
-#if !defined(HAS_NO_INPUT_SYSTEM)
assert(me->stages);
first_binary = me->stages->binary;
@@ -1245,10 +1236,6 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
{
if_config_index = 0x0;
}
-#else
- (void)input_mode;
- if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
-#endif
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));
@@ -1268,12 +1255,10 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
offline, if_config_index);
} /* if (first_binary != NULL) */
-#if defined(USE_INPUT_SYSTEM_VERSION_2401) || defined(USE_INPUT_SYSTEM_VERSION_2)
/* Signal the host immediately after start for SP_ISYS_COPY only */
if ((me->num_stages == 1) && me->stages &&
(me->stages->sp_func == IA_CSS_PIPELINE_ISYS_COPY))
sh_css_sp_group.config.no_isp_sync = true;
-#endif
/* Init stage data */
sh_css_init_host2sp_frame_data();
@@ -1285,11 +1270,9 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
sh_css_sp_group.pipe[thread_id].num_execs = me->num_execs;
sh_css_sp_group.pipe[thread_id].pipe_qos_config = me->pipe_qos_config;
sh_css_sp_group.pipe[thread_id].required_bds_factor = required_bds_factor;
-#if !defined(HAS_NO_INPUT_SYSTEM)
sh_css_sp_group.pipe[thread_id].input_system_mode
= (uint32_t)input_mode;
sh_css_sp_group.pipe[thread_id].port_id = port_id;
-#endif
sh_css_sp_group.pipe[thread_id].dvs_frame_delay = (uint32_t)me->dvs_frame_delay;
/* TODO: next indicates from which queues parameters need to be
@@ -1482,7 +1465,6 @@ sh_css_update_host2sp_offline_frame(
store_sp_array_uint(host_sp_com, offset, metadata ? metadata->address : 0);
}
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
/*
* @brief Update the mipi frame information in host_sp_communication.
* Refer to "sh_css_sp.h" for more details.
@@ -1547,7 +1529,6 @@ sh_css_update_host2sp_num_mipi_frames(unsigned int num_frames)
store_sp_array_uint(host_sp_com, offset, num_frames);
}
-#endif
void
sh_css_update_host2sp_cont_num_raw_frames(unsigned int num_frames,
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.h b/drivers/staging/media/atomisp/pci/sh_css_sp.h
index 153b005becda..832eed711525 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_sp.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.h
@@ -18,7 +18,7 @@
#include <system_global.h>
#include <type_support.h>
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
#include "input_formatter.h"
#endif
@@ -66,9 +66,7 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
enum ia_css_input_mode input_mode,
const struct ia_css_metadata_config *md_config,
const struct ia_css_metadata_info *md_info,
-#if !defined(HAS_NO_INPUT_SYSTEM)
const enum mipi_port_id port_id,
-#endif
const struct ia_css_coordinate
*internal_frame_origin_bqs_on_sctbl, /* Origin of internal frame
positioned on shading table at shading correction in ISP. */
@@ -98,7 +96,6 @@ sh_css_update_host2sp_offline_frame(
struct ia_css_frame *frame,
struct ia_css_metadata *metadata);
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
/**
* @brief Update the mipi frame information in host_sp_communication.
*
@@ -128,7 +125,6 @@ sh_css_update_host2sp_mipi_metadata(
*/
void
sh_css_update_host2sp_num_mipi_frames(unsigned int num_frames);
-#endif
/**
* @brief Update the nr of offline frames to use in host_sp_communication.
@@ -158,7 +154,7 @@ sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state);
#endif
-#if !defined(HAS_NO_INPUT_FORMATTER)
+#if !defined(ISP2401)
void
sh_css_sp_set_if_configs(
const input_formatter_cfg_t *config_a,
diff --git a/drivers/staging/media/atomisp/pci/sh_css_struct.h b/drivers/staging/media/atomisp/pci/sh_css_struct.h
index bd260252317a..eb8960ebae34 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_struct.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_struct.h
@@ -55,7 +55,6 @@ struct sh_css {
bool check_system_idle;
unsigned int num_cont_raw_frames;
-#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
unsigned int num_mipi_frames[N_CSI_PORTS];
struct ia_css_frame
*mipi_frames[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
@@ -64,7 +63,6 @@ struct sh_css {
unsigned int
mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT];
unsigned int mipi_frame_size[N_CSI_PORTS];
-#endif
ia_css_ptr sp_bin_addr;
hrt_data page_table_base_index;
diff --git a/drivers/staging/media/atomisp/pci/system_global.h b/drivers/staging/media/atomisp/pci/system_global.h
index 90210f6943d2..9b22b8c168be 100644
--- a/drivers/staging/media/atomisp/pci/system_global.h
+++ b/drivers/staging/media/atomisp/pci/system_global.h
@@ -25,23 +25,6 @@
* N.B. the 3 input formatters are of 2 different classess
*/
-#define HAS_MMU_VERSION_2
-#define HAS_DMA_VERSION_2
-#define HAS_GDC_VERSION_2
-#define HAS_VAMEM_VERSION_2
-#define HAS_HMEM_VERSION_1
-#define HAS_BAMEM_VERSION_2
-#define HAS_IRQ_VERSION_2
-#define HAS_IRQ_MAP_VERSION_2
-#define HAS_INPUT_FORMATTER_VERSION_2
-#define HAS_INPUT_SYSTEM_VERSION_2
-#define HAS_BUFFERED_SENSOR
-#define HAS_FIFO_MONITORS_VERSION_2
-#define HAS_GP_DEVICE_VERSION_2
-#define HAS_GPIO_VERSION_1
-#define HAS_TIMED_CTRL_VERSION_1
-#define HAS_RX_VERSION_2
-
/* per-frame parameter handling support */
#define SH_CSS_ENABLE_PER_FRAME_PARAMS
@@ -64,12 +47,6 @@
#define ISP2400_DMA_MAX_BURST_LENGTH 128
#define ISP2401_DMA_MAX_BURST_LENGTH 2
-#ifdef ISP2401
-# include "isp2401_system_global.h"
-#else
-# include "isp2400_system_global.h"
-#endif
-
#include <hive_isp_css_defs.h>
#include <type_support.h>
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 34797507f214..3cd00cc0a364 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -309,11 +309,6 @@ static const struct hantro_ctrl controls[] = {
}, {
.codec = HANTRO_H264_DECODER,
.cfg = {
- .id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS,
- },
- }, {
- .codec = HANTRO_H264_DECODER,
- .cfg = {
.id = V4L2_CID_MPEG_VIDEO_H264_SPS,
.ops = &hantro_ctrl_ops,
},
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
index 424c648ce9fc..845bef73d218 100644
--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
@@ -23,7 +23,6 @@ static void set_params(struct hantro_ctx *ctx)
{
const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
- const struct v4l2_ctrl_h264_slice_params *slices = ctrls->slices;
const struct v4l2_ctrl_h264_sps *sps = ctrls->sps;
const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
struct vb2_v4l2_buffer *src_buf = hantro_get_src_buf(ctx);
@@ -42,11 +41,11 @@ static void set_params(struct hantro_ctx *ctx)
if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
- slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC))
+ dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC))
reg |= G1_REG_DEC_CTRL0_PIC_INTERLACE_E;
- if (slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ if (dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)
reg |= G1_REG_DEC_CTRL0_PIC_FIELDMODE_E;
- if (!(slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD))
+ if (!(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD))
reg |= G1_REG_DEC_CTRL0_PIC_TOPFIELD_E;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
@@ -60,9 +59,8 @@ static void set_params(struct hantro_ctx *ctx)
reg = G1_REG_DEC_CTRL2_CH_QP_OFFSET(pps->chroma_qp_index_offset) |
G1_REG_DEC_CTRL2_CH_QP_OFFSET2(pps->second_chroma_qp_index_offset);
- /* always use the matrix sent from userspace */
- reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E;
-
+ if (pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT)
+ reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E;
if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
@@ -75,7 +73,7 @@ static void set_params(struct hantro_ctx *ctx)
/* Decoder control register 4. */
reg = G1_REG_DEC_CTRL4_FRAMENUM_LEN(sps->log2_max_frame_num_minus4 + 4) |
- G1_REG_DEC_CTRL4_FRAMENUM(slices[0].frame_num) |
+ G1_REG_DEC_CTRL4_FRAMENUM(dec_param->frame_num) |
G1_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(pps->weighted_bipred_idc);
if (pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE)
reg |= G1_REG_DEC_CTRL4_CABAC_E;
@@ -88,8 +86,8 @@ static void set_params(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL4);
/* Decoder control register 5. */
- reg = G1_REG_DEC_CTRL5_REFPIC_MK_LEN(slices[0].dec_ref_pic_marking_bit_size) |
- G1_REG_DEC_CTRL5_IDR_PIC_ID(slices[0].idr_pic_id);
+ reg = G1_REG_DEC_CTRL5_REFPIC_MK_LEN(dec_param->dec_ref_pic_marking_bit_size) |
+ G1_REG_DEC_CTRL5_IDR_PIC_ID(dec_param->idr_pic_id);
if (pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED)
reg |= G1_REG_DEC_CTRL5_CONST_INTRA_E;
if (pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT)
@@ -103,10 +101,10 @@ static void set_params(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL5);
/* Decoder control register 6. */
- reg = G1_REG_DEC_CTRL6_PPS_ID(slices[0].pic_parameter_set_id) |
+ reg = G1_REG_DEC_CTRL6_PPS_ID(pps->pic_parameter_set_id) |
G1_REG_DEC_CTRL6_REFIDX0_ACTIVE(pps->num_ref_idx_l0_default_active_minus1 + 1) |
G1_REG_DEC_CTRL6_REFIDX1_ACTIVE(pps->num_ref_idx_l1_default_active_minus1 + 1) |
- G1_REG_DEC_CTRL6_POC_LENGTH(slices[0].pic_order_cnt_bit_size);
+ G1_REG_DEC_CTRL6_POC_LENGTH(dec_param->pic_order_cnt_bit_size);
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL6);
/* Error concealment register. */
@@ -246,7 +244,7 @@ static void set_buffers(struct hantro_ctx *ctx)
/* Destination (decoded frame) buffer. */
dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf);
/* Adjust dma addr to start at second line for bottom field */
- if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
offset = ALIGN(ctx->src_fmt.width, MB_DIM);
vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DST);
@@ -265,7 +263,7 @@ static void set_buffers(struct hantro_ctx *ctx)
* DMV buffer is split in two for field encoded frames,
* adjust offset for bottom field
*/
- if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
offset += 32 * MB_WIDTH(ctx->src_fmt.width) *
MB_HEIGHT(ctx->src_fmt.height);
vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DIR_MV);
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
index 194d05848077..b1bdc00ac262 100644
--- a/drivers/staging/media/hantro/hantro_h264.c
+++ b/drivers/staging/media/hantro/hantro_h264.c
@@ -197,6 +197,7 @@ assemble_scaling_list(struct hantro_ctx *ctx)
{
const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling;
+ const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4);
const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]);
const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]);
@@ -205,6 +206,9 @@ assemble_scaling_list(struct hantro_ctx *ctx)
const u32 *src;
int i, j;
+ if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
+ return;
+
for (i = 0; i < num_list_4x4; i++) {
src = (u32 *)&scaling->scaling_list_4x4[i];
for (j = 0; j < list_len_4x4 / 4; j++)
@@ -325,7 +329,7 @@ dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
*/
dst_buf = hantro_get_dst_buf(ctx);
buf = &dst_buf->vb2_buf;
- dma_addr = vb2_dma_contig_plane_dma_addr(buf, 0);
+ dma_addr = hantro_get_dec_buf_addr(ctx, buf);
}
return dma_addr;
@@ -349,11 +353,6 @@ int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
if (WARN_ON(!ctrls->decode))
return -EINVAL;
- ctrls->slices =
- hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS);
- if (WARN_ON(!ctrls->slices))
- return -EINVAL;
-
ctrls->sps =
hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_H264_SPS);
if (WARN_ON(!ctrls->sps))
@@ -372,8 +371,7 @@ int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
/* Build the P/B{0,1} ref lists. */
v4l2_h264_init_reflist_builder(&reflist_builder, ctrls->decode,
- &ctrls->slices[0], ctrls->sps,
- ctx->h264_dec.dpb);
+ ctrls->sps, ctx->h264_dec.dpb);
v4l2_h264_build_p_ref_list(&reflist_builder, h264_ctx->reflists.p);
v4l2_h264_build_b_ref_lists(&reflist_builder, h264_ctx->reflists.b0,
h264_ctx->reflists.b1);
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index f066de6b592d..219283a06f52 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -56,14 +56,12 @@ struct hantro_jpeg_enc_hw_ctx {
* struct hantro_h264_dec_ctrls
* @decode: Decode params
* @scaling: Scaling info
- * @slice: Slice params
* @sps: SPS info
* @pps: PPS info
*/
struct hantro_h264_dec_ctrls {
const struct v4l2_ctrl_h264_decode_params *decode;
const struct v4l2_ctrl_h264_scaling_matrix *scaling;
- const struct v4l2_ctrl_h264_slice_params *slices;
const struct v4l2_ctrl_h264_sps *sps;
const struct v4l2_ctrl_h264_pps *pps;
};
diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c
index 44062ffceaea..6d2a8f2a8f0b 100644
--- a/drivers/staging/media/hantro/hantro_postproc.c
+++ b/drivers/staging/media/hantro/hantro_postproc.c
@@ -118,7 +118,9 @@ int hantro_postproc_alloc(struct hantro_ctx *ctx)
unsigned int num_buffers = cap_queue->num_buffers;
unsigned int i, buf_size;
- buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage;
+ buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage +
+ hantro_h264_mv_size(ctx->dst_fmt.width,
+ ctx->dst_fmt.height);
for (i = 0; i < num_buffers; ++i) {
struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
diff --git a/drivers/staging/media/imx/TODO b/drivers/staging/media/imx/TODO
index a371cdedcdb0..9cfc1c1e78dc 100644
--- a/drivers/staging/media/imx/TODO
+++ b/drivers/staging/media/imx/TODO
@@ -10,6 +10,10 @@
driver uses the parsed DT bus config method until this issue is
resolved.
+ 2020-06: g_mbus has been removed in favour of the get_mbus_config pad
+ operation which should be used to avoid parsing the remote endpoint
+ configuration.
+
- This media driver supports inheriting V4L2 controls to the
video capture devices, from the subdevices in the capture device's
pipeline. The controls for each capture device are updated in the
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index a607b0158c81..3a45c1fe4957 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -120,13 +120,13 @@ struct ipu3_uapi_awb_config {
#define IPU3_UAPI_AE_WEIGHTS 96
/**
- + * struct ipu3_uapi_ae_raw_buffer - AE global weighted histogram
- + *
- + * @vals: Sum of IPU3_UAPI_AE_COLORS in cell
- + *
- + * Each histogram contains IPU3_UAPI_AE_BINS bins. Each bin has 24 bit unsigned
- + * for counting the number of the pixel.
- + */
+ * struct ipu3_uapi_ae_raw_buffer - AE global weighted histogram
+ *
+ * @vals: Sum of IPU3_UAPI_AE_COLORS in cell
+ *
+ * Each histogram contains IPU3_UAPI_AE_BINS bins. Each bin has 24 bit unsigned
+ * for counting the number of the pixel.
+ */
struct ipu3_uapi_ae_raw_buffer {
__u32 vals[IPU3_UAPI_AE_BINS * IPU3_UAPI_AE_COLORS];
} __packed;
diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
index fbd53d7c097c..e9d6bd9e9332 100644
--- a/drivers/staging/media/ipu3/ipu3-css-params.c
+++ b/drivers/staging/media/ipu3/ipu3-css-params.c
@@ -159,7 +159,7 @@ imgu_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
memset(&cfg->scaler_coeffs_chroma, 0,
sizeof(cfg->scaler_coeffs_chroma));
- memset(&cfg->scaler_coeffs_luma, 0, sizeof(*cfg->scaler_coeffs_luma));
+ memset(&cfg->scaler_coeffs_luma, 0, sizeof(cfg->scaler_coeffs_luma));
do {
phase_step_correction++;
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index 3c700ae9c94e..608dcacf12b2 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -662,17 +662,16 @@ static void imgu_css_hw_cleanup(struct imgu_css *css)
static void imgu_css_pipeline_cleanup(struct imgu_css *css, unsigned int pipe)
{
struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
unsigned int i;
- imgu_css_pool_cleanup(imgu,
- &css->pipes[pipe].pool.parameter_set_info);
- imgu_css_pool_cleanup(imgu, &css->pipes[pipe].pool.acc);
- imgu_css_pool_cleanup(imgu, &css->pipes[pipe].pool.gdc);
- imgu_css_pool_cleanup(imgu, &css->pipes[pipe].pool.obgrid);
+ imgu_css_pool_cleanup(imgu, &css_pipe->pool.parameter_set_info);
+ imgu_css_pool_cleanup(imgu, &css_pipe->pool.acc);
+ imgu_css_pool_cleanup(imgu, &css_pipe->pool.gdc);
+ imgu_css_pool_cleanup(imgu, &css_pipe->pool.obgrid);
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
- imgu_css_pool_cleanup(imgu,
- &css->pipes[pipe].pool.binary_params_p[i]);
+ imgu_css_pool_cleanup(imgu, &css_pipe->pool.binary_params_p[i]);
}
/*
@@ -698,6 +697,12 @@ static int imgu_css_pipeline_init(struct imgu_css *css, unsigned int pipe)
unsigned int i, j;
struct imgu_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_css_queue *css_queue_in =
+ &css_pipe->queue[IPU3_CSS_QUEUE_IN];
+ struct imgu_css_queue *css_queue_out =
+ &css_pipe->queue[IPU3_CSS_QUEUE_OUT];
+ struct imgu_css_queue *css_queue_vf =
+ &css_pipe->queue[IPU3_CSS_QUEUE_VF];
const struct imgu_fw_info *bi =
&css->fwp->binary_header[css_pipe->bindex];
const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
@@ -710,6 +715,9 @@ static int imgu_css_pipeline_init(struct imgu_css *css, unsigned int pipe)
struct imgu_abi_isp_stage *isp_stage;
struct imgu_abi_sp_stage *sp_stage;
struct imgu_abi_sp_group *sp_group;
+ struct imgu_abi_frames_sp *frames_sp;
+ struct imgu_abi_frame_sp *frame_sp;
+ struct imgu_abi_frame_sp_info *frame_sp_info;
const unsigned int bds_width_pad =
ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
@@ -731,61 +739,44 @@ static int imgu_css_pipeline_init(struct imgu_css *css, unsigned int pipe)
if (!cfg_iter)
goto bad_firmware;
- cfg_iter->input_info.res.width =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
- cfg_iter->input_info.res.height =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
- cfg_iter->input_info.padded_width =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].width_pad;
- cfg_iter->input_info.format =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->frame_format;
- cfg_iter->input_info.raw_bit_depth =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bit_depth;
- cfg_iter->input_info.raw_bayer_order =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
- cfg_iter->input_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
-
- cfg_iter->internal_info.res.width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
- cfg_iter->internal_info.res.height =
- css_pipe->rect[IPU3_CSS_RECT_BDS].height;
- cfg_iter->internal_info.padded_width = bds_width_pad;
- cfg_iter->internal_info.format =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
- cfg_iter->internal_info.raw_bit_depth =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
- cfg_iter->internal_info.raw_bayer_order =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
- cfg_iter->internal_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
-
- cfg_iter->output_info.res.width =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
- cfg_iter->output_info.res.height =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
- cfg_iter->output_info.padded_width =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
- cfg_iter->output_info.format =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
- cfg_iter->output_info.raw_bit_depth =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
- cfg_iter->output_info.raw_bayer_order =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
- cfg_iter->output_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
-
- cfg_iter->vf_info.res.width =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
- cfg_iter->vf_info.res.height =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
- cfg_iter->vf_info.padded_width =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
- cfg_iter->vf_info.format =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
- cfg_iter->vf_info.raw_bit_depth =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bit_depth;
- cfg_iter->vf_info.raw_bayer_order =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bayer_order;
- cfg_iter->vf_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
-
- cfg_iter->dvs_envelope.width = css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
+ frame_sp_info = &cfg_iter->input_info;
+ frame_sp_info->res.width = css_queue_in->fmt.mpix.width;
+ frame_sp_info->res.height = css_queue_in->fmt.mpix.height;
+ frame_sp_info->padded_width = css_queue_in->width_pad;
+ frame_sp_info->format = css_queue_in->css_fmt->frame_format;
+ frame_sp_info->raw_bit_depth = css_queue_in->css_fmt->bit_depth;
+ frame_sp_info->raw_bayer_order = css_queue_in->css_fmt->bayer_order;
+ frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ frame_sp_info = &cfg_iter->internal_info;
+ frame_sp_info->res.width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ frame_sp_info->res.height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ frame_sp_info->padded_width = bds_width_pad;
+ frame_sp_info->format = css_queue_out->css_fmt->frame_format;
+ frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
+ frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
+ frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ frame_sp_info = &cfg_iter->output_info;
+ frame_sp_info->res.width = css_queue_out->fmt.mpix.width;
+ frame_sp_info->res.height = css_queue_out->fmt.mpix.height;
+ frame_sp_info->padded_width = css_queue_out->width_pad;
+ frame_sp_info->format = css_queue_out->css_fmt->frame_format;
+ frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
+ frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
+ frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ frame_sp_info = &cfg_iter->vf_info;
+ frame_sp_info->res.width = css_queue_vf->fmt.mpix.width;
+ frame_sp_info->res.height = css_queue_vf->fmt.mpix.height;
+ frame_sp_info->padded_width = css_queue_vf->width_pad;
+ frame_sp_info->format = css_queue_vf->css_fmt->frame_format;
+ frame_sp_info->raw_bit_depth = css_queue_vf->css_fmt->bit_depth;
+ frame_sp_info->raw_bayer_order = css_queue_vf->css_fmt->bayer_order;
+ frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ cfg_iter->dvs_envelope.width =
+ css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
cfg_iter->dvs_envelope.height =
css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
@@ -916,12 +907,13 @@ static int imgu_css_pipeline_init(struct imgu_css *css, unsigned int pipe)
sp_stage = css_pipe->xmem_sp_stage_ptrs[pipe][stage].vaddr;
memset(sp_stage, 0, sizeof(*sp_stage));
- sp_stage->frames.in.buf_attr = buffer_sp_init;
+ frames_sp = &sp_stage->frames;
+ frames_sp->in.buf_attr = buffer_sp_init;
for (i = 0; i < IMGU_ABI_BINARY_MAX_OUTPUT_PORTS; i++)
- sp_stage->frames.out[i].buf_attr = buffer_sp_init;
- sp_stage->frames.out_vf.buf_attr = buffer_sp_init;
- sp_stage->frames.s3a_buf = buffer_sp_init;
- sp_stage->frames.dvs_buf = buffer_sp_init;
+ frames_sp->out[i].buf_attr = buffer_sp_init;
+ frames_sp->out_vf.buf_attr = buffer_sp_init;
+ frames_sp->s3a_buf = buffer_sp_init;
+ frames_sp->dvs_buf = buffer_sp_init;
sp_stage->stage_type = IMGU_ABI_STAGE_TYPE_ISP;
sp_stage->num = stage;
@@ -931,94 +923,70 @@ static int imgu_css_pipeline_init(struct imgu_css *css, unsigned int pipe)
sp_stage->enable.vf_output = css_pipe->vf_output_en;
- sp_stage->frames.effective_in_res.width =
+ frames_sp->effective_in_res.width =
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
- sp_stage->frames.effective_in_res.height =
+ frames_sp->effective_in_res.height =
css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
- sp_stage->frames.in.info.res.width =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
- sp_stage->frames.in.info.res.height =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
- sp_stage->frames.in.info.padded_width =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].width_pad;
- sp_stage->frames.in.info.format =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->frame_format;
- sp_stage->frames.in.info.raw_bit_depth =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bit_depth;
- sp_stage->frames.in.info.raw_bayer_order =
- css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
- sp_stage->frames.in.info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
- sp_stage->frames.in.buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_C_ID;
- sp_stage->frames.in.buf_attr.buf_type =
- IMGU_ABI_BUFFER_TYPE_INPUT_FRAME;
-
- sp_stage->frames.out[0].info.res.width =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
- sp_stage->frames.out[0].info.res.height =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
- sp_stage->frames.out[0].info.padded_width =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
- sp_stage->frames.out[0].info.format =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
- sp_stage->frames.out[0].info.raw_bit_depth =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
- sp_stage->frames.out[0].info.raw_bayer_order =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
- sp_stage->frames.out[0].info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
- sp_stage->frames.out[0].planes.nv.uv.offset =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad *
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
- sp_stage->frames.out[0].buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_D_ID;
- sp_stage->frames.out[0].buf_attr.buf_type =
- IMGU_ABI_BUFFER_TYPE_OUTPUT_FRAME;
-
- sp_stage->frames.out[1].buf_attr.buf_src.queue_id =
- IMGU_ABI_QUEUE_EVENT_ID;
-
- sp_stage->frames.internal_frame_info.res.width =
- css_pipe->rect[IPU3_CSS_RECT_BDS].width;
- sp_stage->frames.internal_frame_info.res.height =
- css_pipe->rect[IPU3_CSS_RECT_BDS].height;
- sp_stage->frames.internal_frame_info.padded_width = bds_width_pad;
-
- sp_stage->frames.internal_frame_info.format =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
- sp_stage->frames.internal_frame_info.raw_bit_depth =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
- sp_stage->frames.internal_frame_info.raw_bayer_order =
- css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
- sp_stage->frames.internal_frame_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
-
- sp_stage->frames.out_vf.info.res.width =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
- sp_stage->frames.out_vf.info.res.height =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
- sp_stage->frames.out_vf.info.padded_width =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
- sp_stage->frames.out_vf.info.format =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
- sp_stage->frames.out_vf.info.raw_bit_depth =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bit_depth;
- sp_stage->frames.out_vf.info.raw_bayer_order =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bayer_order;
- sp_stage->frames.out_vf.info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
- sp_stage->frames.out_vf.planes.yuv.u.offset =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad *
- css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
- sp_stage->frames.out_vf.planes.yuv.v.offset =
- css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad *
- css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height * 5 / 4;
- sp_stage->frames.out_vf.buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_E_ID;
- sp_stage->frames.out_vf.buf_attr.buf_type =
- IMGU_ABI_BUFFER_TYPE_VF_OUTPUT_FRAME;
-
- sp_stage->frames.s3a_buf.buf_src.queue_id = IMGU_ABI_QUEUE_F_ID;
- sp_stage->frames.s3a_buf.buf_type = IMGU_ABI_BUFFER_TYPE_3A_STATISTICS;
-
- sp_stage->frames.dvs_buf.buf_src.queue_id = IMGU_ABI_QUEUE_G_ID;
- sp_stage->frames.dvs_buf.buf_type = IMGU_ABI_BUFFER_TYPE_DIS_STATISTICS;
-
- sp_stage->dvs_envelope.width = css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
+
+ frame_sp = &frames_sp->in;
+ frame_sp->info.res.width = css_queue_in->fmt.mpix.width;
+ frame_sp->info.res.height = css_queue_in->fmt.mpix.height;
+ frame_sp->info.padded_width = css_queue_in->width_pad;
+ frame_sp->info.format = css_queue_in->css_fmt->frame_format;
+ frame_sp->info.raw_bit_depth = css_queue_in->css_fmt->bit_depth;
+ frame_sp->info.raw_bayer_order = css_queue_in->css_fmt->bayer_order;
+ frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+ frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_C_ID;
+ frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_INPUT_FRAME;
+
+ frame_sp = &frames_sp->out[0];
+ frame_sp->info.res.width = css_queue_out->fmt.mpix.width;
+ frame_sp->info.res.height = css_queue_out->fmt.mpix.height;
+ frame_sp->info.padded_width = css_queue_out->width_pad;
+ frame_sp->info.format = css_queue_out->css_fmt->frame_format;
+ frame_sp->info.raw_bit_depth = css_queue_out->css_fmt->bit_depth;
+ frame_sp->info.raw_bayer_order = css_queue_out->css_fmt->bayer_order;
+ frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+ frame_sp->planes.nv.uv.offset = css_queue_out->width_pad *
+ css_queue_out->fmt.mpix.height;
+ frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_D_ID;
+ frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_OUTPUT_FRAME;
+
+ frame_sp = &frames_sp->out[1];
+ frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_EVENT_ID;
+
+ frame_sp_info = &frames_sp->internal_frame_info;
+ frame_sp_info->res.width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ frame_sp_info->res.height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ frame_sp_info->padded_width = bds_width_pad;
+ frame_sp_info->format = css_queue_out->css_fmt->frame_format;
+ frame_sp_info->raw_bit_depth = css_queue_out->css_fmt->bit_depth;
+ frame_sp_info->raw_bayer_order = css_queue_out->css_fmt->bayer_order;
+ frame_sp_info->raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ frame_sp = &frames_sp->out_vf;
+ frame_sp->info.res.width = css_queue_vf->fmt.mpix.width;
+ frame_sp->info.res.height = css_queue_vf->fmt.mpix.height;
+ frame_sp->info.padded_width = css_queue_vf->width_pad;
+ frame_sp->info.format = css_queue_vf->css_fmt->frame_format;
+ frame_sp->info.raw_bit_depth = css_queue_vf->css_fmt->bit_depth;
+ frame_sp->info.raw_bayer_order = css_queue_vf->css_fmt->bayer_order;
+ frame_sp->info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+ frame_sp->planes.yuv.u.offset = css_queue_vf->width_pad *
+ css_queue_vf->fmt.mpix.height;
+ frame_sp->planes.yuv.v.offset = css_queue_vf->width_pad *
+ css_queue_vf->fmt.mpix.height * 5 / 4;
+ frame_sp->buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_E_ID;
+ frame_sp->buf_attr.buf_type = IMGU_ABI_BUFFER_TYPE_VF_OUTPUT_FRAME;
+
+ frames_sp->s3a_buf.buf_src.queue_id = IMGU_ABI_QUEUE_F_ID;
+ frames_sp->s3a_buf.buf_type = IMGU_ABI_BUFFER_TYPE_3A_STATISTICS;
+
+ frames_sp->dvs_buf.buf_src.queue_id = IMGU_ABI_QUEUE_G_ID;
+ frames_sp->dvs_buf.buf_type = IMGU_ABI_BUFFER_TYPE_DIS_STATISTICS;
+
+ sp_stage->dvs_envelope.width =
+ css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
sp_stage->dvs_envelope.height =
css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index 3040136ceb77..5ccb3846c879 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -841,13 +841,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->min_buffers_needed = 1;
dst_vq->dev = sess->core->dev;
dst_vq->lock = &sess->lock;
- ret = vb2_queue_init(dst_vq);
- if (ret) {
- vb2_queue_release(src_vq);
- return ret;
- }
-
- return 0;
+ return vb2_queue_init(dst_vq);
}
static int vdec_init_ctrls(struct amvdec_session *sess)
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index 6fb60b58447a..e06ea7ea1e50 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -55,7 +55,7 @@ static void iss_print_status(struct iss_device *iss)
* readback the same register, in this case the revision register.
*
* See this link for reference:
- * http://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
+ * https://www.mail-archive.com/linux-omap@vger.kernel.org/msg08149.html
*/
static void omap4iss_flush(struct iss_device *iss)
{
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig b/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig
deleted file mode 100644
index fb74df829371..000000000000
--- a/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-config PHY_ROCKCHIP_DPHY_RX0
- tristate "Rockchip MIPI Synopsys DPHY RX0 driver"
- depends on ARCH_ROCKCHIP || COMPILE_TEST
- select GENERIC_PHY_MIPI_DPHY
- select GENERIC_PHY
- help
- Enable this to support the Rockchip MIPI Synopsys DPHY RX0
- associated to the Rockchip ISP module present in RK3399 SoCs.
-
- To compile this driver as a module, choose M here: the module
- will be called phy-rockchip-dphy-rx0.
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile b/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile
deleted file mode 100644
index 507e5d0593ab..000000000000
--- a/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0.o
diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/TODO b/drivers/staging/media/phy-rockchip-dphy-rx0/TODO
deleted file mode 100644
index ab612e5b27dc..000000000000
--- a/drivers/staging/media/phy-rockchip-dphy-rx0/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-The main reason for keeping this in staging is because the only driver
-that uses this is rkisp1, which is also in staging. It should be moved together
-with rkisp1.
-
-Please CC patches to Linux Media <linux-media@vger.kernel.org> and
-Helen Koike <helen.koike@collabora.com>.
diff --git a/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst b/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst
deleted file mode 100644
index 32034e481357..000000000000
--- a/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst
+++ /dev/null
@@ -1,23 +0,0 @@
-.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-
-.. _v4l2-meta-fmt-rkisp1-params:
-
-============================
-V4L2_META_FMT_RK_ISP1_PARAMS
-============================
-
-Rockchip ISP1 Parameters Data
-
-Description
-===========
-
-This format describes input parameters for the Rockchip ISP1.
-
-It uses c-struct :c:type:`rkisp1_params_cfg`, which is defined in
-the ``linux/rkisp1-config.h`` header file.
-
-The parameters consist of multiple modules.
-The module won't be updated if the corresponding bit was not set in module_*_update.
-
-.. kernel-doc:: include/uapi/linux/rkisp1-config.h
- :functions: rkisp1_params_cfg
diff --git a/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst b/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst
deleted file mode 100644
index 4ad303f96421..000000000000
--- a/drivers/staging/media/rkisp1/Documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-
-.. _v4l2-meta-fmt-rkisp1-stat:
-
-=============================
-V4L2_META_FMT_RK_ISP1_STAT_3A
-=============================
-
-
-Rockchip ISP1 Statistics Data
-
-Description
-===========
-
-This format describes image color statistics information generated by the Rockchip
-ISP1.
-
-It uses c-struct :c:type:`rkisp1_stat_buffer`, which is defined in
-the ``linux/rkisp1-config.h`` header file.
-
-.. kernel-doc:: include/uapi/linux/rkisp1-config.h
- :functions: rkisp1_stat_buffer
diff --git a/drivers/staging/media/rkisp1/TODO b/drivers/staging/media/rkisp1/TODO
index bdb1b8f73556..e7c8398fc2ce 100644
--- a/drivers/staging/media/rkisp1/TODO
+++ b/drivers/staging/media/rkisp1/TODO
@@ -1,8 +1,6 @@
* Fix pad format size for statistics and parameters entities.
* Fix checkpatch errors.
-* Review and comment every lock
-* Handle quantization
-* Document rkisp1-common.h
+* Add uapi docs. Remember to add documentation of how quantization is handled.
* streaming paths (mainpath and selfpath) check if the other path is streaming
in several places of the code, review this, specially that it doesn't seem it
supports streaming from both paths at the same time.
diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c
index c05280950ea0..b6f497ce3e95 100644
--- a/drivers/staging/media/rkisp1/rkisp1-capture.c
+++ b/drivers/staging/media/rkisp1/rkisp1-capture.c
@@ -49,12 +49,14 @@ enum rkisp1_plane {
* @uv_swap: if cb cr swaped, for yuv
* @write_format: defines how YCbCr self picture data is written to memory
* @output_format: defines sp output format
+ * @mbus: the mbus code on the src resizer pad that matches the pixel format
*/
struct rkisp1_capture_fmt_cfg {
u32 fourcc;
u8 uv_swap;
u32 write_format;
u32 output_format;
+ u32 mbus;
};
struct rkisp1_capture_ops {
@@ -82,114 +84,133 @@ struct rkisp1_capture_config {
} mi;
};
+/*
+ * The supported pixel formats for mainpath. NOTE, pixel formats with identical 'mbus'
+ * are grouped together. This is assumed and used by the function rkisp1_cap_enum_mbus_codes
+ */
static const struct rkisp1_capture_fmt_cfg rkisp1_mp_fmts[] = {
/* yuv422 */
{
.fourcc = V4L2_PIX_FMT_YUYV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
- }, {
- .fourcc = V4L2_PIX_FMT_YVYU,
- .uv_swap = 1,
- .write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
- }, {
- .fourcc = V4L2_PIX_FMT_VYUY,
- .write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_YVU422M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
+ },
+ /* yuv400 */
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
},
/* yuv420 */
{
.fourcc = V4L2_PIX_FMT_NV21,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV12,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV21M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV12M,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
- },
- /* yuv444 */
- {
- .fourcc = V4L2_PIX_FMT_YUV444M,
- .uv_swap = 0,
- .write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
- },
- /* yuv400 */
- {
- .fourcc = V4L2_PIX_FMT_GREY,
- .uv_swap = 0,
- .write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
},
/* raw */
{
.fourcc = V4L2_PIX_FMT_SRGGB8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .mbus = MEDIA_BUS_FMT_SRGGB8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .mbus = MEDIA_BUS_FMT_SGRBG8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .mbus = MEDIA_BUS_FMT_SGBRG8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
+ .mbus = MEDIA_BUS_FMT_SBGGR8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB10,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .mbus = MEDIA_BUS_FMT_SRGGB10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG10,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .mbus = MEDIA_BUS_FMT_SGRBG10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG10,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .mbus = MEDIA_BUS_FMT_SGBRG10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR10,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .mbus = MEDIA_BUS_FMT_SBGGR10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB12,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .mbus = MEDIA_BUS_FMT_SRGGB12_1X12,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG12,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .mbus = MEDIA_BUS_FMT_SGRBG12_1X12,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG12,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .mbus = MEDIA_BUS_FMT_SGBRG12_1X12,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR12,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
+ .mbus = MEDIA_BUS_FMT_SBGGR12_1X12,
},
};
+/*
+ * The supported pixel formats for selfpath. NOTE, pixel formats with identical 'mbus'
+ * are grouped together. This is assumed and used by the function rkisp1_cap_enum_mbus_codes
+ */
static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
/* yuv422 */
{
@@ -197,36 +218,51 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
- }, {
- .fourcc = V4L2_PIX_FMT_YVYU,
- .uv_swap = 1,
- .write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
- .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
- }, {
- .fourcc = V4L2_PIX_FMT_VYUY,
- .uv_swap = 1,
- .write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
- .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_YVU422M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
+ },
+ /* yuv400 */
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .uv_swap = 0,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV400,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
+ },
+ /* rgb */
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB888,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
+ .output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB565,
+ .mbus = MEDIA_BUS_FMT_YUYV8_2X8,
},
/* yuv420 */
{
@@ -234,55 +270,37 @@ static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV12,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV21M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV12M,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
- },
- /* yuv444 */
- {
- .fourcc = V4L2_PIX_FMT_YUV444M,
- .uv_swap = 0,
- .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
- .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV444,
- },
- /* yuv400 */
- {
- .fourcc = V4L2_PIX_FMT_GREY,
- .uv_swap = 0,
- .write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
- .output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV400,
- },
- /* rgb */
- {
- .fourcc = V4L2_PIX_FMT_RGB24,
- .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
- .output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB888,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB565,
- .write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
- .output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB565,
+ .mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
},
};
@@ -324,6 +342,30 @@ rkisp1_vdev_to_node(struct video_device *vdev)
return container_of(vdev, struct rkisp1_vdev_node, vdev);
}
+int rkisp1_cap_enum_mbus_codes(struct rkisp1_capture *cap,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct rkisp1_capture_fmt_cfg *fmts = cap->config->fmts;
+ /*
+ * initialize curr_mbus to non existing mbus code 0 to ensure it is
+ * different from fmts[0].mbus
+ */
+ u32 curr_mbus = 0;
+ int i, n = 0;
+
+ for (i = 0; i < cap->config->fmt_size; i++) {
+ if (fmts[i].mbus == curr_mbus)
+ continue;
+
+ curr_mbus = fmts[i].mbus;
+ if (n++ == code->index) {
+ code->code = curr_mbus;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
/* ----------------------------------------------------------------------------
* Stream operations for self-picture path (sp) and main-picture path (mp)
*/
@@ -626,13 +668,12 @@ static void rkisp1_handle_buffer(struct rkisp1_capture *cap)
{
struct rkisp1_isp *isp = &cap->rkisp1->isp;
struct rkisp1_buffer *curr_buf;
- unsigned long flags;
- spin_lock_irqsave(&cap->buf.lock, flags);
+ spin_lock(&cap->buf.lock);
curr_buf = cap->buf.curr;
if (curr_buf) {
- curr_buf->vb.sequence = atomic_read(&isp->frame_sequence);
+ curr_buf->vb.sequence = isp->frame_sequence;
curr_buf->vb.vb2_buf.timestamp = ktime_get_boottime_ns();
curr_buf->vb.field = V4L2_FIELD_NONE;
vb2_buffer_done(&curr_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
@@ -641,7 +682,7 @@ static void rkisp1_handle_buffer(struct rkisp1_capture *cap)
}
rkisp1_set_next_buf(cap);
- spin_unlock_irqrestore(&cap->buf.lock, flags);
+ spin_unlock(&cap->buf.lock);
}
void rkisp1_capture_isr(struct rkisp1_device *rkisp1)
@@ -716,7 +757,6 @@ static void rkisp1_vb2_buf_queue(struct vb2_buffer *vb)
container_of(vbuf, struct rkisp1_buffer, vb);
struct rkisp1_capture *cap = vb->vb2_queue->drv_priv;
const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
- unsigned long flags;
unsigned int i;
memset(ispbuf->buff_addr, 0, sizeof(ispbuf->buff_addr));
@@ -741,9 +781,9 @@ static void rkisp1_vb2_buf_queue(struct vb2_buffer *vb)
swap(ispbuf->buff_addr[RKISP1_PLANE_CR],
ispbuf->buff_addr[RKISP1_PLANE_CB]);
- spin_lock_irqsave(&cap->buf.lock, flags);
+ spin_lock_irq(&cap->buf.lock);
list_add_tail(&ispbuf->queue, &cap->buf.queue);
- spin_unlock_irqrestore(&cap->buf.lock, flags);
+ spin_unlock_irq(&cap->buf.lock);
}
static int rkisp1_vb2_buf_prepare(struct vb2_buffer *vb)
@@ -769,10 +809,9 @@ static int rkisp1_vb2_buf_prepare(struct vb2_buffer *vb)
static void rkisp1_return_all_buffers(struct rkisp1_capture *cap,
enum vb2_buffer_state state)
{
- unsigned long flags;
struct rkisp1_buffer *buf;
- spin_lock_irqsave(&cap->buf.lock, flags);
+ spin_lock_irq(&cap->buf.lock);
if (cap->buf.curr) {
vb2_buffer_done(&cap->buf.curr->vb.vb2_buf, state);
cap->buf.curr = NULL;
@@ -787,7 +826,7 @@ static void rkisp1_return_all_buffers(struct rkisp1_capture *cap,
list_del(&buf->queue);
vb2_buffer_done(&buf->vb.vb2_buf, state);
}
- spin_unlock_irqrestore(&cap->buf.lock, flags);
+ spin_unlock_irq(&cap->buf.lock);
}
/*
@@ -916,6 +955,7 @@ static void rkisp1_stream_start(struct rkisp1_capture *cap)
cap->ops->config(cap);
/* Setup a buffer for the next frame */
+ spin_lock_irq(&cap->buf.lock);
rkisp1_set_next_buf(cap);
cap->ops->enable(cap);
/* It's safe to config ACTIVE and SHADOW regs for the
@@ -933,6 +973,7 @@ static void rkisp1_stream_start(struct rkisp1_capture *cap)
RKISP1_CIF_MI_INIT_SOFT_UPD, RKISP1_CIF_MI_INIT);
rkisp1_set_next_buf(cap);
}
+ spin_unlock_irq(&cap->buf.lock);
cap->is_streaming = true;
}
@@ -1017,6 +1058,7 @@ rkisp1_fill_pixfmt(struct v4l2_pix_format_mplane *pixm,
unsigned int i;
u32 stride;
+ memset(pixm->plane_fmt, 0, sizeof(pixm->plane_fmt));
info = v4l2_format_info(pixm->pixelformat);
pixm->num_planes = info->mem_planes;
stride = info->bpp[0] * pixm->width;
@@ -1069,8 +1111,6 @@ static void rkisp1_try_fmt(const struct rkisp1_capture *cap,
const struct v4l2_format_info **fmt_info)
{
const struct rkisp1_capture_config *config = cap->config;
- struct rkisp1_capture *other_cap =
- &cap->rkisp1->capture_devs[cap->id ^ 1];
const struct rkisp1_capture_fmt_cfg *fmt;
const struct v4l2_format_info *info;
const unsigned int max_widths[] = { RKISP1_RSZ_MP_SRC_MAX_WIDTH,
@@ -1095,14 +1135,6 @@ static void rkisp1_try_fmt(const struct rkisp1_capture *cap,
info = rkisp1_fill_pixfmt(pixm, cap->id);
- /* can not change quantization when stream-on */
- if (other_cap->is_streaming)
- pixm->quantization = other_cap->pix.fmt.quantization;
- /* output full range by default, take effect in params */
- else if (!pixm->quantization ||
- pixm->quantization > V4L2_QUANTIZATION_LIM_RANGE)
- pixm->quantization = V4L2_QUANTIZATION_FULL_RANGE;
-
if (fmt_cfg)
*fmt_cfg = fmt;
if (fmt_info)
@@ -1136,14 +1168,27 @@ static int rkisp1_enum_fmt_vid_cap_mplane(struct file *file, void *priv,
{
struct rkisp1_capture *cap = video_drvdata(file);
const struct rkisp1_capture_fmt_cfg *fmt = NULL;
+ unsigned int i, n = 0;
- if (f->index >= cap->config->fmt_size)
- return -EINVAL;
+ if (!f->mbus_code) {
+ if (f->index >= cap->config->fmt_size)
+ return -EINVAL;
- fmt = &cap->config->fmts[f->index];
- f->pixelformat = fmt->fourcc;
+ fmt = &cap->config->fmts[f->index];
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
- return 0;
+ for (i = 0; i < cap->config->fmt_size; i++) {
+ if (cap->config->fmts[i].mbus != f->mbus_code)
+ continue;
+
+ if (n++ == f->index) {
+ f->pixelformat = cap->config->fmts[i].fourcc;
+ return 0;
+ }
+ }
+ return -EINVAL;
}
static int rkisp1_s_fmt_vid_cap_mplane(struct file *file,
@@ -1210,29 +1255,11 @@ static int rkisp1_capture_link_validate(struct media_link *link)
struct v4l2_subdev *sd =
media_entity_to_v4l2_subdev(link->source->entity);
struct rkisp1_capture *cap = video_get_drvdata(vdev);
- struct rkisp1_isp *isp = &cap->rkisp1->isp;
- u8 isp_pix_enc = isp->src_fmt->pixel_enc;
- u8 cap_pix_enc = cap->pix.info->pixel_enc;
+ const struct rkisp1_capture_fmt_cfg *fmt =
+ rkisp1_find_fmt_cfg(cap, cap->pix.fmt.pixelformat);
struct v4l2_subdev_format sd_fmt;
int ret;
- if (cap->id == RKISP1_SELFPATH &&
- isp->src_fmt->mbus_code != MEDIA_BUS_FMT_YUYV8_2X8) {
- dev_err(cap->rkisp1->dev,
- "selfpath only supports MEDIA_BUS_FMT_YUYV8_2X8\n");
- return -EPIPE;
- }
-
- if (cap_pix_enc != isp_pix_enc &&
- !(isp_pix_enc == V4L2_PIXEL_ENC_YUV &&
- cap_pix_enc == V4L2_PIXEL_ENC_RGB)) {
- dev_err(cap->rkisp1->dev,
- "format type mismatch in link '%s:%d->%s:%d'\n",
- link->source->entity->name, link->source->index,
- link->sink->entity->name, link->sink->index);
- return -EPIPE;
- }
-
sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sd_fmt.pad = link->source->index;
ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
@@ -1240,7 +1267,8 @@ static int rkisp1_capture_link_validate(struct media_link *link)
return ret;
if (sd_fmt.format.height != cap->pix.fmt.height ||
- sd_fmt.format.width != cap->pix.fmt.width)
+ sd_fmt.format.width != cap->pix.fmt.width ||
+ sd_fmt.format.code != fmt->mbus)
return -EPIPE;
return 0;
@@ -1265,7 +1293,7 @@ static const struct v4l2_file_operations rkisp1_fops = {
static void rkisp1_unregister_capture(struct rkisp1_capture *cap)
{
media_entity_cleanup(&cap->vnode.vdev.entity);
- video_unregister_device(&cap->vnode.vdev);
+ vb2_video_unregister_device(&cap->vnode.vdev);
}
void rkisp1_capture_devs_unregister(struct rkisp1_device *rkisp1)
@@ -1298,7 +1326,7 @@ static int rkisp1_register_capture(struct rkisp1_capture *cap)
vdev->v4l2_dev = v4l2_dev;
vdev->lock = &node->vlock;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_STREAMING;
+ V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
vdev->entity.ops = &rkisp1_media_ops;
video_set_drvdata(vdev, cap);
vdev->vfl_dir = VFL_DIR_RX;
diff --git a/drivers/staging/media/rkisp1/rkisp1-common.h b/drivers/staging/media/rkisp1/rkisp1-common.h
index 3dc51d703f73..45abacdbb664 100644
--- a/drivers/staging/media/rkisp1/rkisp1-common.h
+++ b/drivers/staging/media/rkisp1/rkisp1-common.h
@@ -22,9 +22,14 @@
#include "rkisp1-regs.h"
#include "uapi/rkisp1-config.h"
+/*
+ * flags on the 'direction' field in struct 'rkisp1_isp_mbus_info' that indicate
+ * on which pad the media bus format is supported
+ */
#define RKISP1_ISP_SD_SRC BIT(0)
#define RKISP1_ISP_SD_SINK BIT(1)
+/* min and max values for the widths and heights of the entities */
#define RKISP1_ISP_MAX_WIDTH 4032
#define RKISP1_ISP_MAX_HEIGHT 3024
#define RKISP1_ISP_MIN_WIDTH 32
@@ -37,29 +42,36 @@
#define RKISP1_RSZ_SRC_MIN_WIDTH 32
#define RKISP1_RSZ_SRC_MIN_HEIGHT 16
+/* the default width and height of all the entities */
#define RKISP1_DEFAULT_WIDTH 800
#define RKISP1_DEFAULT_HEIGHT 600
#define RKISP1_DRIVER_NAME "rkisp1"
#define RKISP1_BUS_INFO "platform:" RKISP1_DRIVER_NAME
+/* maximum number of clocks */
#define RKISP1_MAX_BUS_CLK 8
+/* a bitmask of the ready stats */
#define RKISP1_STATS_MEAS_MASK (RKISP1_CIF_ISP_AWB_DONE | \
RKISP1_CIF_ISP_AFM_FIN | \
RKISP1_CIF_ISP_EXP_END | \
RKISP1_CIF_ISP_HIST_MEASURE_RDY)
+
+/* enum for the resizer pads */
enum rkisp1_rsz_pad {
RKISP1_RSZ_PAD_SINK,
RKISP1_RSZ_PAD_SRC,
RKISP1_RSZ_PAD_MAX
};
+/* enum for the capture id */
enum rkisp1_stream_id {
RKISP1_MAINPATH,
RKISP1_SELFPATH,
};
+/* bayer patterns */
enum rkisp1_fmt_raw_pat_type {
RKISP1_RAW_RGGB = 0,
RKISP1_RAW_GRBG,
@@ -67,6 +79,7 @@ enum rkisp1_fmt_raw_pat_type {
RKISP1_RAW_BGGR,
};
+/* enum for the isp pads */
enum rkisp1_isp_pad {
RKISP1_ISP_PAD_SINK_VIDEO,
RKISP1_ISP_PAD_SINK_PARAMS,
@@ -76,8 +89,16 @@ enum rkisp1_isp_pad {
};
/*
- * struct rkisp1_sensor_async - Sensor information
- * @mbus: media bus configuration
+ * struct rkisp1_sensor_async - A container for the v4l2_async_subdev to add to the notifier
+ * of the v4l2-async API
+ *
+ * @asd: async_subdev variable for the sensor
+ * @lanes: number of lanes
+ * @mbus_type: type of bus (currently only CSI2 is supported)
+ * @mbus_flags: media bus (V4L2_MBUS_*) flags
+ * @sd: a pointer to v4l2_subdev struct of the sensor
+ * @pixel_rate_ctrl: pixel rate of the sensor, used to initialize the phy
+ * @dphy: a pointer to the phy
*/
struct rkisp1_sensor_async {
struct v4l2_async_subdev asd;
@@ -90,19 +111,17 @@ struct rkisp1_sensor_async {
};
/*
- * struct rkisp1_isp - ISP sub-device
+ * struct rkisp1_isp - ISP subdev entity
*
- * See Cropping regions of ISP in rkisp1.c for details
- * @sink_frm: input size, don't have to be equal to sensor size
- * @sink_fmt: input format
- * @sink_crop: crop for sink pad
- * @src_fmt: output format
- * @src_crop: output size
- * @ops_lock: ops serialization
- *
- * @is_dphy_errctrl_disabled : if dphy errctrl is disabled (avoid endless interrupt)
- * @frame_sequence: used to synchronize frame_id between video devices.
- * @quantization: output quantization
+ * @sd: v4l2_subdev variable
+ * @rkisp1: pointer to rkisp1_device
+ * @pads: media pads
+ * @pad_cfg: pads configurations
+ * @sink_fmt: input format
+ * @src_fmt: output format
+ * @ops_lock: ops serialization
+ * @is_dphy_errctrl_disabled: if dphy errctrl is disabled (avoid endless interrupt)
+ * @frame_sequence: used to synchronize frame_id between video devices.
*/
struct rkisp1_isp {
struct v4l2_subdev sd;
@@ -110,11 +129,19 @@ struct rkisp1_isp {
struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX];
const struct rkisp1_isp_mbus_info *sink_fmt;
const struct rkisp1_isp_mbus_info *src_fmt;
- struct mutex ops_lock;
+ struct mutex ops_lock; /* serialize the subdevice ops */
bool is_dphy_errctrl_disabled;
- atomic_t frame_sequence;
+ __u32 frame_sequence;
};
+/*
+ * struct rkisp1_vdev_node - Container for the video nodes: params, stats, mainpath, selfpath
+ *
+ * @buf_queue: queue of buffers
+ * @vlock: lock of the video node
+ * @vdev: video node
+ * @pad: media pad
+ */
struct rkisp1_vdev_node {
struct vb2_queue buf_queue;
struct mutex vlock; /* ioctl serialization mutex */
@@ -122,15 +149,32 @@ struct rkisp1_vdev_node {
struct media_pad pad;
};
+/*
+ * struct rkisp1_buffer - A container for the vb2 buffers used by the video devices:
+ * params, stats, mainpath, selfpath
+ *
+ * @vb: vb2 buffer
+ * @queue: entry of the buffer in the queue
+ * @buff_addr: dma addresses of each plane, used only by the capture devices: selfpath, mainpath
+ * @vaddr: virtual address for buffers used by params and stats devices
+ */
struct rkisp1_buffer {
struct vb2_v4l2_buffer vb;
struct list_head queue;
union {
u32 buff_addr[VIDEO_MAX_PLANES];
- void *vaddr[VIDEO_MAX_PLANES];
+ void *vaddr;
};
};
+/*
+ * struct rkisp1_dummy_buffer - A buffer to write the next frame to in case
+ * there are no vb2 buffers available.
+ *
+ * @vaddr: return value of call to dma_alloc_attrs.
+ * @dma_addr: dma address of the buffer.
+ * @size: size of the buffer.
+ */
struct rkisp1_dummy_buffer {
void *vaddr;
dma_addr_t dma_addr;
@@ -142,17 +186,29 @@ struct rkisp1_device;
/*
* struct rkisp1_capture - ISP capture video device
*
- * @pix.fmt: buffer format
- * @pix.info: pixel information
- * @pix.cfg: pixel configuration
+ * @vnode: video node
+ * @rkisp1: pointer to rkisp1_device
+ * @id: id of the capture, one of RKISP1_SELFPATH, RKISP1_MAINPATH
+ * @ops: list of callbacks to configure the capture device.
+ * @config: a pointer to the list of registers to configure the capture format.
+ * @is_streaming: device is streaming
+ * @is_stopping: stop_streaming callback was called and the device is in the process of
+ * stopping the streaming.
+ * @done: when stop_streaming callback is called, the device waits for the next irq
+ * handler to stop the streaming by waiting on the 'done' wait queue.
+ * If the irq handler is not called, the stream is stopped by the callback
+ * after timeout.
+ * @sp_y_stride: the selfpath allows to configure a y stride that is longer than the image width.
+ * @buf.lock: lock to protect buf.queue
+ * @buf.queue: queued buffer list
+ * @buf.dummy: dummy space to store dropped data
*
- * @buf.lock: lock to protect buf_queue
- * @buf.queue: queued buffer list
- * @buf.dummy: dummy space to store dropped data
- *
- * rkisp1 use shadowsock registers, so it need two buffer at a time
- * @buf.curr: the buffer used for current frame
- * @buf.next: the buffer used for next frame
+ * rkisp1 uses shadow registers, so it needs two buffers at a time
+ * @buf.curr: the buffer used for current frame
+ * @buf.next: the buffer used for next frame
+ * @pix.cfg: pixel configuration
+ * @pix.info: a pointer to the v4l2_format_info of the pixel format
+ * @pix.fmt: buffer format
*/
struct rkisp1_capture {
struct rkisp1_vdev_node vnode;
@@ -182,14 +238,18 @@ struct rkisp1_capture {
/*
* struct rkisp1_stats - ISP Statistics device
*
- * @lock: locks the buffer list 'stat' and 'is_streaming'
- * @stat: stats buffer list
+ * @vnode: video node
+ * @rkisp1: pointer to the rkisp1 device
+ * @lock: locks the buffer list 'stat' and 'is_streaming'
+ * @stat: queue of rkisp1_buffer
+ * @vdev_fmt: v4l2_format of the metadata format
+ * @is_streaming: device is streaming
*/
struct rkisp1_stats {
struct rkisp1_vdev_node vnode;
struct rkisp1_device *rkisp1;
- spinlock_t lock; /* locks 'is_streaming', and 'stats' */
+ spinlock_t lock; /* locks the buffers list 'stats' and 'is_streaming' */
struct list_head stat;
struct v4l2_format vdev_fmt;
bool is_streaming;
@@ -198,24 +258,40 @@ struct rkisp1_stats {
/*
* struct rkisp1_params - ISP input parameters device
*
- * @cur_params: Current ISP parameters
- * @is_first_params: the first params should take effect immediately
+ * @vnode: video node
+ * @rkisp1: pointer to the rkisp1 device
+ * @config_lock: locks the buffer list 'params' and 'is_streaming'
+ * @params: queue of rkisp1_buffer
+ * @vdev_fmt: v4l2_format of the metadata format
+ * @is_streaming: device is streaming
+ * @quantization: the quantization configured on the isp's src pad
+ * @raw_type: the bayer pattern on the isp video sink pad
*/
struct rkisp1_params {
struct rkisp1_vdev_node vnode;
struct rkisp1_device *rkisp1;
- spinlock_t config_lock;
+ spinlock_t config_lock; /* locks the buffers list 'params' and 'is_streaming' */
struct list_head params;
- struct rkisp1_params_cfg cur_params;
struct v4l2_format vdev_fmt;
bool is_streaming;
- bool is_first_params;
enum v4l2_quantization quantization;
enum rkisp1_fmt_raw_pat_type raw_type;
};
+/*
+ * struct rkisp1_resizer - Resizer subdev
+ *
+ * @sd: v4l2_subdev variable
+ * @id: id of the resizer, one of RKISP1_SELFPATH, RKISP1_MAINPATH
+ * @rkisp1: pointer to the rkisp1 device
+ * @pads: media pads
+ * @pad_cfg: configurations for the pads
+ * @config: the set of registers to configure the resizer
+ * @pixel_enc: pixel encoding of the resizer
+ * @ops_lock: a lock for the subdev ops
+ */
struct rkisp1_resizer {
struct v4l2_subdev sd;
enum rkisp1_stream_id id;
@@ -224,15 +300,33 @@ struct rkisp1_resizer {
struct v4l2_subdev_pad_config pad_cfg[RKISP1_RSZ_PAD_MAX];
const struct rkisp1_rsz_config *config;
enum v4l2_pixel_encoding pixel_enc;
- struct mutex ops_lock;
+ struct mutex ops_lock; /* serialize the subdevice ops */
};
+/*
+ * struct rkisp1_debug - Values to be exposed on debugfs.
+ * The parameters are counters of the number of times the
+ * event occurred since the driver was loaded.
+ *
+ * @data_loss: loss of data occurred within a line, processing failure
+ * @outform_size_error: size error is generated in outmux submodule
+ * @img_stabilization_size_error: size error is generated in image stabilization submodule
+ * @inform_size_err: size error is generated in inform submodule
+ * @mipi_error: mipi error occurred
+ * @stats_error: writing to the 'Interrupt clear register' did not clear
+ * it in the register 'Masked interrupt status'
+ * @stop_timeout: upon stream stop, the capture waits 1 second for the isr to stop
+ * the stream. This param is incremented in case of timeout.
+ * @frame_drop: a frame was ready but the buffer queue was empty so the frame
+ * was not sent to userspace
+ */
struct rkisp1_debug {
struct dentry *debugfs_dir;
unsigned long data_loss;
unsigned long outform_size_error;
unsigned long img_stabilization_size_error;
unsigned long inform_size_error;
+ unsigned long irq_delay;
unsigned long mipi_error;
unsigned long stats_error;
unsigned long stop_timeout[2];
@@ -241,13 +335,24 @@ struct rkisp1_debug {
/*
* struct rkisp1_device - ISP platform device
- * @base_addr: base register address
+ *
+ * @base_addr: base register address
+ * @irq: the irq number
+ * @dev: a pointer to the struct device
+ * @clk_size: number of clocks
+ * @clks: array of clocks
+ * @v4l2_dev: v4l2_device variable
+ * @media_dev: media_device variable
+ * @notifier: a notifier to register on the v4l2-async API to be notified on the sensor
* @active_sensor: sensor in-use, set when streaming on
- * @isp: ISP sub-device
- * @rkisp1_capture: capture video device
- * @stats: ISP statistics output device
- * @params: ISP input parameters device
- * @stream_lock: lock to serialize start/stop streaming in capture devices.
+ * @isp: ISP sub-device
+ * @resizer_devs: resizer sub-devices
+ * @capture_devs: capture devices
+ * @stats: ISP statistics metadata capture device
+ * @params: ISP parameters metadata output device
+ * @pipe: media pipeline
+ * @stream_lock: serializes {start/stop}_streaming callbacks between the capture devices.
+ * @debug: debug params to be exposed on debugfs
*/
struct rkisp1_device {
void __iomem *base_addr;
@@ -265,16 +370,21 @@ struct rkisp1_device {
struct rkisp1_stats stats;
struct rkisp1_params params;
struct media_pipeline pipe;
- struct mutex stream_lock;
+ struct mutex stream_lock; /* serialize {start/stop}_streaming cb between capture devices */
struct rkisp1_debug debug;
};
/*
- * struct rkisp1_isp_mbus_info - ISP pad format info
- *
- * Translate mbus_code to hardware format values
+ * struct rkisp1_isp_mbus_info - ISP media bus info, Translates media bus code to hardware
+ * format values
*
- * @bus_width: used for parallel
+ * @mbus_code: media bus code
+ * @pixel_enc: pixel encoding
+ * @mipi_dt: mipi data type
+ * @yuv_seq: the order of the Y, Cb, Cr values
+ * @bus_width: bus width
+ * @bayer_pat: bayer pattern
+ * @direction: a bitmask of the flags indicating on which pad the format is supported on
*/
struct rkisp1_isp_mbus_info {
u32 mbus_code;
@@ -297,44 +407,83 @@ static inline u32 rkisp1_read(struct rkisp1_device *rkisp1, unsigned int addr)
return readl(rkisp1->base_addr + addr);
}
+/*
+ * rkisp1_cap_enum_mbus_codes - A helper function that return the i'th supported mbus code
+ * of the capture entity. This is used to enumerate the supported
+ * mbus codes on the source pad of the resizer.
+ *
+ * @cap: the capture entity
+ * @code: the mbus code, the function reads the code->index and fills the code->code
+ */
+int rkisp1_cap_enum_mbus_codes(struct rkisp1_capture *cap,
+ struct v4l2_subdev_mbus_code_enum *code);
+
+/*
+ * rkisp1_sd_adjust_crop_rect - adjust a rectangle to fit into another rectangle.
+ *
+ * @crop: rectangle to adjust.
+ * @bounds: rectangle used as bounds.
+ */
void rkisp1_sd_adjust_crop_rect(struct v4l2_rect *crop,
const struct v4l2_rect *bounds);
+/*
+ * rkisp1_sd_adjust_crop - adjust a rectangle to fit into media bus format
+ *
+ * @crop: rectangle to adjust.
+ * @bounds: media bus format used as bounds.
+ */
void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
const struct v4l2_mbus_framefmt *bounds);
-int rkisp1_isp_register(struct rkisp1_device *rkisp1,
- struct v4l2_device *v4l2_dev);
-void rkisp1_isp_unregister(struct rkisp1_device *rkisp1);
-
+/*
+ * rkisp1_isp_mbus_info - get the isp info of the media bus code
+ *
+ * @mbus_code: the media bus code
+ */
const struct rkisp1_isp_mbus_info *rkisp1_isp_mbus_info_get(u32 mbus_code);
+/* rkisp1_params_configure - configure the params when stream starts.
+ * This function is called by the isp entity upon stream starts.
+ * The function applies the initial configuration of the parameters.
+ *
+ * @params: pointer to rkisp1_params.
+ * @bayer_pat: the bayer pattern on the isp video sink pad
+ * @quantization: the quantization configured on the isp's src pad
+ */
+void rkisp1_params_configure(struct rkisp1_params *params,
+ enum rkisp1_fmt_raw_pat_type bayer_pat,
+ enum v4l2_quantization quantization);
+
+/* rkisp1_params_disable - disable all parameters.
+ * This function is called by the isp entity upon stream start
+ * when capturing bayer format.
+ *
+ * @params: pointer to rkisp1_params.
+ */
+void rkisp1_params_disable(struct rkisp1_params *params);
+
+/* irq handlers */
void rkisp1_isp_isr(struct rkisp1_device *rkisp1);
void rkisp1_mipi_isr(struct rkisp1_device *rkisp1);
void rkisp1_capture_isr(struct rkisp1_device *rkisp1);
void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris);
-void rkisp1_params_isr(struct rkisp1_device *rkisp1, u32 isp_mis);
+void rkisp1_params_isr(struct rkisp1_device *rkisp1);
+/* register/unregisters functions of the entities */
int rkisp1_capture_devs_register(struct rkisp1_device *rkisp1);
void rkisp1_capture_devs_unregister(struct rkisp1_device *rkisp1);
+int rkisp1_isp_register(struct rkisp1_device *rkisp1);
+void rkisp1_isp_unregister(struct rkisp1_device *rkisp1);
+
int rkisp1_resizer_devs_register(struct rkisp1_device *rkisp1);
void rkisp1_resizer_devs_unregister(struct rkisp1_device *rkisp1);
-int rkisp1_stats_register(struct rkisp1_stats *stats,
- struct v4l2_device *v4l2_dev,
- struct rkisp1_device *rkisp1);
-void rkisp1_stats_unregister(struct rkisp1_stats *stats);
-
-void rkisp1_params_configure(struct rkisp1_params *params,
- enum rkisp1_fmt_raw_pat_type bayer_pat,
- enum v4l2_quantization quantization);
-void rkisp1_params_disable(struct rkisp1_params *params);
-int rkisp1_params_register(struct rkisp1_params *params,
- struct v4l2_device *v4l2_dev,
- struct rkisp1_device *rkisp1);
-void rkisp1_params_unregister(struct rkisp1_params *params);
+int rkisp1_stats_register(struct rkisp1_device *rkisp1);
+void rkisp1_stats_unregister(struct rkisp1_device *rkisp1);
-void rkisp1_params_isr_handler(struct rkisp1_device *rkisp1, u32 isp_mis);
+int rkisp1_params_register(struct rkisp1_device *rkisp1);
+void rkisp1_params_unregister(struct rkisp1_device *rkisp1);
#endif /* _RKISP1_COMMON_H */
diff --git a/drivers/staging/media/rkisp1/rkisp1-dev.c b/drivers/staging/media/rkisp1/rkisp1-dev.c
index a0eb8f08708b..91584695804b 100644
--- a/drivers/staging/media/rkisp1/rkisp1-dev.c
+++ b/drivers/staging/media/rkisp1/rkisp1-dev.c
@@ -345,7 +345,7 @@ static int rkisp1_entities_register(struct rkisp1_device *rkisp1)
{
int ret;
- ret = rkisp1_isp_register(rkisp1, &rkisp1->v4l2_dev);
+ ret = rkisp1_isp_register(rkisp1);
if (ret)
return ret;
@@ -357,12 +357,11 @@ static int rkisp1_entities_register(struct rkisp1_device *rkisp1)
if (ret)
goto err_unreg_resizer_devs;
- ret = rkisp1_stats_register(&rkisp1->stats, &rkisp1->v4l2_dev, rkisp1);
+ ret = rkisp1_stats_register(rkisp1);
if (ret)
goto err_unreg_capture_devs;
- ret = rkisp1_params_register(&rkisp1->params,
- &rkisp1->v4l2_dev, rkisp1);
+ ret = rkisp1_params_register(rkisp1);
if (ret)
goto err_unreg_stats;
@@ -375,9 +374,9 @@ static int rkisp1_entities_register(struct rkisp1_device *rkisp1)
return 0;
err_unreg_params:
- rkisp1_params_unregister(&rkisp1->params);
+ rkisp1_params_unregister(rkisp1);
err_unreg_stats:
- rkisp1_stats_unregister(&rkisp1->stats);
+ rkisp1_stats_unregister(rkisp1);
err_unreg_capture_devs:
rkisp1_capture_devs_unregister(rkisp1);
err_unreg_resizer_devs:
@@ -445,6 +444,8 @@ static void rkisp1_debug_init(struct rkisp1_device *rkisp1)
&debug->img_stabilization_size_error);
debugfs_create_ulong("inform_size_error", 0444, debug->debugfs_dir,
&debug->inform_size_error);
+ debugfs_create_ulong("irq_delay", 0444, debug->debugfs_dir,
+ &debug->irq_delay);
debugfs_create_ulong("mipi_error", 0444, debug->debugfs_dir,
&debug->mipi_error);
debugfs_create_ulong("stats_error", 0444, debug->debugfs_dir,
@@ -551,8 +552,8 @@ static int rkisp1_remove(struct platform_device *pdev)
v4l2_async_notifier_unregister(&rkisp1->notifier);
v4l2_async_notifier_cleanup(&rkisp1->notifier);
- rkisp1_params_unregister(&rkisp1->params);
- rkisp1_stats_unregister(&rkisp1->stats);
+ rkisp1_params_unregister(rkisp1);
+ rkisp1_stats_unregister(rkisp1);
rkisp1_capture_devs_unregister(rkisp1);
rkisp1_resizer_devs_unregister(rkisp1);
rkisp1_isp_unregister(rkisp1);
diff --git a/drivers/staging/media/rkisp1/rkisp1-isp.c b/drivers/staging/media/rkisp1/rkisp1-isp.c
index 6ec1e9816e9f..a9715b0b7264 100644
--- a/drivers/staging/media/rkisp1/rkisp1-isp.c
+++ b/drivers/staging/media/rkisp1/rkisp1-isp.c
@@ -348,7 +348,7 @@ static int rkisp1_config_isp(struct rkisp1_device *rkisp1)
rkisp1_write(rkisp1, sink_crop->height, RKISP1_CIF_ISP_OUT_V_SIZE);
irq_mask |= RKISP1_CIF_ISP_FRAME | RKISP1_CIF_ISP_V_START |
- RKISP1_CIF_ISP_PIC_SIZE_ERROR | RKISP1_CIF_ISP_FRAME_IN;
+ RKISP1_CIF_ISP_PIC_SIZE_ERROR;
rkisp1_write(rkisp1, irq_mask, RKISP1_CIF_ISP_IMSC);
if (src_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
@@ -589,6 +589,10 @@ static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index == pos - 1) {
code->code = fmt->mbus_code;
+ if (fmt->pixel_enc == V4L2_PIXEL_ENC_YUV &&
+ dir == RKISP1_ISP_SD_SRC)
+ code->flags =
+ V4L2_SUBDEV_MBUS_CODE_CSC_QUANTIZATION;
return 0;
}
}
@@ -620,7 +624,6 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
RKISP1_ISP_PAD_SOURCE_VIDEO);
*src_fmt = *sink_fmt;
src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
- src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
src_crop = v4l2_subdev_get_try_crop(sd, cfg,
RKISP1_ISP_PAD_SOURCE_VIDEO);
@@ -663,9 +666,18 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
isp->src_fmt = mbus_info;
src_fmt->width = src_crop->width;
src_fmt->height = src_crop->height;
- src_fmt->quantization = format->quantization;
- /* full range by default */
- if (!src_fmt->quantization)
+
+ /*
+ * The CSC API is used to allow userspace to force full
+ * quantization on YUV formats.
+ */
+ if (format->flags & V4L2_MBUS_FRAMEFMT_SET_CSC &&
+ format->quantization == V4L2_QUANTIZATION_FULL_RANGE &&
+ mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV)
+ src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ else if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV)
+ src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ else
src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
*format = *src_fmt;
@@ -940,7 +952,7 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
if (rkisp1->active_sensor->mbus_type != V4L2_MBUS_CSI2_DPHY)
return -EINVAL;
- atomic_set(&rkisp1->isp.frame_sequence, -1);
+ rkisp1->isp.frame_sequence = -1;
mutex_lock(&isp->ops_lock);
ret = rkisp1_config_cif(rkisp1);
if (ret)
@@ -989,8 +1001,7 @@ static const struct v4l2_subdev_ops rkisp1_isp_ops = {
.pad = &rkisp1_isp_pad_ops,
};
-int rkisp1_isp_register(struct rkisp1_device *rkisp1,
- struct v4l2_device *v4l2_dev)
+int rkisp1_isp_register(struct rkisp1_device *rkisp1)
{
struct rkisp1_isp *isp = &rkisp1->isp;
struct media_pad *pads = isp->pads;
@@ -1018,7 +1029,7 @@ int rkisp1_isp_register(struct rkisp1_device *rkisp1,
if (ret)
return ret;
- ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ ret = v4l2_device_register_subdev(&rkisp1->v4l2_dev, sd);
if (ret) {
dev_err(rkisp1->dev, "Failed to register isp subdev\n");
goto err_cleanup_media_entity;
@@ -1093,15 +1104,8 @@ static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp)
struct v4l2_event event = {
.type = V4L2_EVENT_FRAME_SYNC,
};
+ event.u.frame_sync.frame_sequence = isp->frame_sequence;
- /*
- * Increment the frame sequence on the vsync signal.
- * This will allow applications to detect dropped.
- * Note that there is a debugfs counter for dropped
- * frames, but using this event is more accurate.
- */
- event.u.frame_sync.frame_sequence =
- atomic_inc_return(&isp->frame_sequence);
v4l2_event_queue(isp->sd.devnode, &event);
}
@@ -1116,9 +1120,14 @@ void rkisp1_isp_isr(struct rkisp1_device *rkisp1)
rkisp1_write(rkisp1, status, RKISP1_CIF_ISP_ICR);
/* Vertical sync signal, starting generating new frame */
- if (status & RKISP1_CIF_ISP_V_START)
+ if (status & RKISP1_CIF_ISP_V_START) {
+ rkisp1->isp.frame_sequence++;
rkisp1_isp_queue_event_sof(&rkisp1->isp);
-
+ if (status & RKISP1_CIF_ISP_FRAME) {
+ WARN_ONCE(1, "irq delay is too long, buffers might not be in sync\n");
+ rkisp1->debug.irq_delay++;
+ }
+ }
if (status & RKISP1_CIF_ISP_PIC_SIZE_ERROR) {
/* Clear pic_size_error */
isp_err = rkisp1_read(rkisp1, RKISP1_CIF_ISP_ERR);
@@ -1141,12 +1150,12 @@ void rkisp1_isp_isr(struct rkisp1_device *rkisp1)
isp_ris = rkisp1_read(rkisp1, RKISP1_CIF_ISP_RIS);
if (isp_ris & RKISP1_STATS_MEAS_MASK)
rkisp1_stats_isr(&rkisp1->stats, isp_ris);
+ /*
+ * Then update changed configs. Some of them involve
+ * lot of register writes. Do those only one per frame.
+ * Do the updates in the order of the processing flow.
+ */
+ rkisp1_params_isr(rkisp1);
}
- /*
- * Then update changed configs. Some of them involve
- * lot of register writes. Do those only one per frame.
- * Do the updates in the order of the processing flow.
- */
- rkisp1_params_isr(rkisp1, status);
}
diff --git a/drivers/staging/media/rkisp1/rkisp1-params.c b/drivers/staging/media/rkisp1/rkisp1-params.c
index 797e79de659c..986d293201e6 100644
--- a/drivers/staging/media/rkisp1/rkisp1-params.c
+++ b/drivers/staging/media/rkisp1/rkisp1-params.c
@@ -206,47 +206,45 @@ rkisp1_lsc_correct_matrix_config(struct rkisp1_params *params,
RKISP1_CIF_ISP_LSC_B_TABLE_ADDR);
/* program data tables (table size is 9 * 17 = 153) */
- for (i = 0;
- i < RKISP1_CIF_ISP_LSC_SECTORS_MAX * RKISP1_CIF_ISP_LSC_SECTORS_MAX;
- i += RKISP1_CIF_ISP_LSC_SECTORS_MAX) {
+ for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) {
/*
* 17 sectors with 2 values in one DWORD = 9
* DWORDs (2nd value of last DWORD unused)
*/
- for (j = 0; j < RKISP1_CIF_ISP_LSC_SECTORS_MAX - 1; j += 2) {
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i + j],
- pconfig->r_data_tbl[i + j + 1]);
+ for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i][j],
+ pconfig->r_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i + j],
- pconfig->gr_data_tbl[i + j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i][j],
+ pconfig->gr_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i + j],
- pconfig->gb_data_tbl[i + j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i][j],
+ pconfig->gb_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i + j],
- pconfig->b_data_tbl[i + j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i][j],
+ pconfig->b_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
}
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i + j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i + j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i + j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i + j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
}
@@ -269,7 +267,7 @@ static void rkisp1_lsc_config(struct rkisp1_params *params,
RKISP1_CIF_ISP_LSC_CTRL_ENA);
rkisp1_lsc_correct_matrix_config(params, arg);
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE / 2; i++) {
/* program x size tables */
data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_size_tbl[i * 2],
arg->x_size_tbl[i * 2 + 1]);
@@ -402,21 +400,15 @@ static void rkisp1_goc_config(struct rkisp1_params *params,
static void rkisp1_ctk_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_ctk_config *arg)
{
- rkisp1_write(params->rkisp1, arg->coeff0, RKISP1_CIF_ISP_CT_COEFF_0);
- rkisp1_write(params->rkisp1, arg->coeff1, RKISP1_CIF_ISP_CT_COEFF_1);
- rkisp1_write(params->rkisp1, arg->coeff2, RKISP1_CIF_ISP_CT_COEFF_2);
- rkisp1_write(params->rkisp1, arg->coeff3, RKISP1_CIF_ISP_CT_COEFF_3);
- rkisp1_write(params->rkisp1, arg->coeff4, RKISP1_CIF_ISP_CT_COEFF_4);
- rkisp1_write(params->rkisp1, arg->coeff5, RKISP1_CIF_ISP_CT_COEFF_5);
- rkisp1_write(params->rkisp1, arg->coeff6, RKISP1_CIF_ISP_CT_COEFF_6);
- rkisp1_write(params->rkisp1, arg->coeff7, RKISP1_CIF_ISP_CT_COEFF_7);
- rkisp1_write(params->rkisp1, arg->coeff8, RKISP1_CIF_ISP_CT_COEFF_8);
- rkisp1_write(params->rkisp1, arg->ct_offset_r,
- RKISP1_CIF_ISP_CT_OFFSET_R);
- rkisp1_write(params->rkisp1, arg->ct_offset_g,
- RKISP1_CIF_ISP_CT_OFFSET_G);
- rkisp1_write(params->rkisp1, arg->ct_offset_b,
- RKISP1_CIF_ISP_CT_OFFSET_B);
+ unsigned int i, j, k = 0;
+
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 3; j++)
+ rkisp1_write(params->rkisp1, arg->coeff[i][j],
+ RKISP1_CIF_ISP_CT_COEFF_0 + 4 * k++);
+ for (i = 0; i < 3; i++)
+ rkisp1_write(params->rkisp1, arg->ct_offset[i],
+ RKISP1_CIF_ISP_CT_OFFSET_R + i * 4);
}
static void rkisp1_ctk_enable(struct rkisp1_params *params, bool en)
@@ -560,7 +552,7 @@ static void rkisp1_cproc_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_cproc_config *arg)
{
struct rkisp1_cif_isp_isp_other_cfg *cur_other_cfg =
- &params->cur_params.others;
+ container_of(arg, struct rkisp1_cif_isp_isp_other_cfg, cproc_config);
struct rkisp1_cif_isp_ie_config *cur_ie_config =
&cur_other_cfg->ie_config;
u32 effect = cur_ie_config->effect;
@@ -1193,48 +1185,52 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
}
}
-void rkisp1_params_isr(struct rkisp1_device *rkisp1, u32 isp_mis)
+static void rkisp1_params_apply_params_cfg(struct rkisp1_params *params,
+ unsigned int frame_sequence)
{
- unsigned int frame_sequence = atomic_read(&rkisp1->isp.frame_sequence);
- struct rkisp1_params *params = &rkisp1->params;
struct rkisp1_params_cfg *new_params;
struct rkisp1_buffer *cur_buf = NULL;
- spin_lock(&params->config_lock);
- if (!params->is_streaming) {
- spin_unlock(&params->config_lock);
+ if (list_empty(&params->params))
return;
- }
- /* get one empty buffer */
- if (!list_empty(&params->params))
- cur_buf = list_first_entry(&params->params,
- struct rkisp1_buffer, queue);
- spin_unlock(&params->config_lock);
+ cur_buf = list_first_entry(&params->params,
+ struct rkisp1_buffer, queue);
- if (!cur_buf)
- return;
+ new_params = (struct rkisp1_params_cfg *)(cur_buf->vaddr);
- new_params = (struct rkisp1_params_cfg *)(cur_buf->vaddr[0]);
+ rkisp1_isp_isr_other_config(params, new_params);
+ rkisp1_isp_isr_meas_config(params, new_params);
- if (isp_mis & RKISP1_CIF_ISP_FRAME) {
- u32 isp_ctrl;
+ /* update shadow register immediately */
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD);
- rkisp1_isp_isr_other_config(params, new_params);
- rkisp1_isp_isr_meas_config(params, new_params);
+ list_del(&cur_buf->queue);
- /* update shadow register immediately */
- isp_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_CTRL);
- isp_ctrl |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD;
- rkisp1_write(params->rkisp1, isp_ctrl, RKISP1_CIF_ISP_CTRL);
+ cur_buf->vb.sequence = frame_sequence;
+ vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+}
- spin_lock(&params->config_lock);
- list_del(&cur_buf->queue);
- spin_unlock(&params->config_lock);
+void rkisp1_params_isr(struct rkisp1_device *rkisp1)
+{
+ /*
+ * This isr is called when the ISR finishes processing a frame (RKISP1_CIF_ISP_FRAME).
+ * Configurations performed here will be applied on the next frame.
+ * Since frame_sequence is updated on the vertical sync signal, we should use
+ * frame_sequence + 1 here to indicate to userspace on which frame these parameters
+ * are being applied.
+ */
+ unsigned int frame_sequence = rkisp1->isp.frame_sequence + 1;
+ struct rkisp1_params *params = &rkisp1->params;
- cur_buf->vb.sequence = frame_sequence;
- vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+ spin_lock(&params->config_lock);
+ if (!params->is_streaming) {
+ spin_unlock(&params->config_lock);
+ return;
}
+ rkisp1_params_apply_params_cfg(params, frame_sequence);
+
+ spin_unlock(&params->config_lock);
}
static const struct rkisp1_cif_isp_awb_meas_config rkisp1_awb_params_default_config = {
@@ -1280,8 +1276,6 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params)
{
struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config;
- spin_lock(&params->config_lock);
-
rkisp1_awb_meas_config(params, &rkisp1_awb_params_default_config);
rkisp1_awb_meas_enable(params, &rkisp1_awb_params_default_config,
true);
@@ -1306,14 +1300,15 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params)
else
rkisp1_csm_config(params, false);
- /* override the default things */
- rkisp1_isp_isr_other_config(params, &params->cur_params);
- rkisp1_isp_isr_meas_config(params, &params->cur_params);
+ spin_lock_irq(&params->config_lock);
- spin_unlock(&params->config_lock);
+ /* apply the first buffer if there is one already */
+ if (params->is_streaming)
+ rkisp1_params_apply_params_cfg(params, 0);
+
+ spin_unlock_irq(&params->config_lock);
}
-/* Not called when the camera active, thus not isr protection. */
void rkisp1_params_configure(struct rkisp1_params *params,
enum rkisp1_fmt_raw_pat_type bayer_pat,
enum v4l2_quantization quantization)
@@ -1436,8 +1431,6 @@ static int rkisp1_params_vb2_queue_setup(struct vb2_queue *vq,
sizes[0] = sizeof(struct rkisp1_params_cfg);
INIT_LIST_HEAD(&params->params);
- params->is_first_params = true;
-
return 0;
}
@@ -1448,25 +1441,11 @@ static void rkisp1_params_vb2_buf_queue(struct vb2_buffer *vb)
container_of(vbuf, struct rkisp1_buffer, vb);
struct vb2_queue *vq = vb->vb2_queue;
struct rkisp1_params *params = vq->drv_priv;
- struct rkisp1_params_cfg *new_params;
- unsigned long flags;
- unsigned int frame_sequence =
- atomic_read(&params->rkisp1->isp.frame_sequence);
-
- if (params->is_first_params) {
- new_params = (struct rkisp1_params_cfg *)
- (vb2_plane_vaddr(vb, 0));
- vbuf->sequence = frame_sequence;
- vb2_buffer_done(&params_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
- params->is_first_params = false;
- params->cur_params = *new_params;
- return;
- }
- params_buf->vaddr[0] = vb2_plane_vaddr(vb, 0);
- spin_lock_irqsave(&params->config_lock, flags);
+ params_buf->vaddr = vb2_plane_vaddr(vb, 0);
+ spin_lock_irq(&params->config_lock);
list_add_tail(&params_buf->queue, &params->params);
- spin_unlock_irqrestore(&params->config_lock, flags);
+ spin_unlock_irq(&params->config_lock);
}
static int rkisp1_params_vb2_buf_prepare(struct vb2_buffer *vb)
@@ -1483,43 +1462,32 @@ static void rkisp1_params_vb2_stop_streaming(struct vb2_queue *vq)
{
struct rkisp1_params *params = vq->drv_priv;
struct rkisp1_buffer *buf;
- unsigned long flags;
- unsigned int i;
+ struct list_head tmp_list;
+
+ INIT_LIST_HEAD(&tmp_list);
- /* stop params input firstly */
- spin_lock_irqsave(&params->config_lock, flags);
+ /*
+ * we first move the buffers into a local list 'tmp_list'
+ * and then we can iterate it and call vb2_buffer_done
+ * without holding the lock
+ */
+ spin_lock_irq(&params->config_lock);
params->is_streaming = false;
- spin_unlock_irqrestore(&params->config_lock, flags);
-
- for (i = 0; i < RKISP1_ISP_PARAMS_REQ_BUFS_MAX; i++) {
- spin_lock_irqsave(&params->config_lock, flags);
- if (!list_empty(&params->params)) {
- buf = list_first_entry(&params->params,
- struct rkisp1_buffer, queue);
- list_del(&buf->queue);
- spin_unlock_irqrestore(&params->config_lock,
- flags);
- } else {
- spin_unlock_irqrestore(&params->config_lock,
- flags);
- break;
- }
+ list_cut_position(&tmp_list, &params->params, params->params.prev);
+ spin_unlock_irq(&params->config_lock);
- if (buf)
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- buf = NULL;
- }
+ list_for_each_entry(buf, &tmp_list, queue)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
static int
rkisp1_params_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
{
struct rkisp1_params *params = queue->drv_priv;
- unsigned long flags;
- spin_lock_irqsave(&params->config_lock, flags);
+ spin_lock_irq(&params->config_lock);
params->is_streaming = true;
- spin_unlock_irqrestore(&params->config_lock, flags);
+ spin_unlock_irq(&params->config_lock);
return 0;
}
@@ -1570,10 +1538,9 @@ static void rkisp1_init_params(struct rkisp1_params *params)
sizeof(struct rkisp1_params_cfg);
}
-int rkisp1_params_register(struct rkisp1_params *params,
- struct v4l2_device *v4l2_dev,
- struct rkisp1_device *rkisp1)
+int rkisp1_params_register(struct rkisp1_device *rkisp1)
{
+ struct rkisp1_params *params = &rkisp1->params;
struct rkisp1_vdev_node *node = &params->vnode;
struct video_device *vdev = &node->vdev;
int ret;
@@ -1593,7 +1560,7 @@ int rkisp1_params_register(struct rkisp1_params *params,
* to protect all fops and v4l2 ioctls.
*/
vdev->lock = &node->vlock;
- vdev->v4l2_dev = v4l2_dev;
+ vdev->v4l2_dev = &rkisp1->v4l2_dev;
vdev->queue = &node->buf_queue;
vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_META_OUTPUT;
vdev->vfl_dir = VFL_DIR_TX;
@@ -1604,7 +1571,7 @@ int rkisp1_params_register(struct rkisp1_params *params,
node->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
if (ret)
- goto err_release_queue;
+ return ret;
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(rkisp1->dev,
@@ -1614,17 +1581,15 @@ int rkisp1_params_register(struct rkisp1_params *params,
return 0;
err_cleanup_media_entity:
media_entity_cleanup(&vdev->entity);
-err_release_queue:
- vb2_queue_release(vdev->queue);
return ret;
}
-void rkisp1_params_unregister(struct rkisp1_params *params)
+void rkisp1_params_unregister(struct rkisp1_device *rkisp1)
{
+ struct rkisp1_params *params = &rkisp1->params;
struct rkisp1_vdev_node *node = &params->vnode;
struct video_device *vdev = &node->vdev;
- video_unregister_device(vdev);
+ vb2_video_unregister_device(vdev);
media_entity_cleanup(&vdev->entity);
- vb2_queue_release(vdev->queue);
}
diff --git a/drivers/staging/media/rkisp1/rkisp1-regs.h b/drivers/staging/media/rkisp1/rkisp1-regs.h
index 9b8e616ea24c..049f6c3a11df 100644
--- a/drivers/staging/media/rkisp1/rkisp1-regs.h
+++ b/drivers/staging/media/rkisp1/rkisp1-regs.h
@@ -475,7 +475,6 @@
#define RKISP1_CIF_ISP_LSC_SECT_SIZE_RESERVED 0xFC00FC00
#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED 0xF000F000
#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED 0xF000F000
-#define RKISP1_CIF_ISP_LSC_SECTORS_MAX 17
#define RKISP1_CIF_ISP_LSC_TABLE_DATA(v0, v1) \
(((v0) & 0xFFF) | (((v1) & 0xFFF) << 12))
#define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1) \
diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c
index c66d2a52fd71..1687d82e6c68 100644
--- a/drivers/staging/media/rkisp1/rkisp1-resizer.c
+++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c
@@ -16,8 +16,36 @@
#define RKISP1_DEF_FMT MEDIA_BUS_FMT_YUYV8_2X8
#define RKISP1_DEF_PIXEL_ENC V4L2_PIXEL_ENC_YUV
-#define RKISP1_MBUS_FMT_HDIV 2
-#define RKISP1_MBUS_FMT_VDIV 1
+struct rkisp1_rsz_yuv_mbus_info {
+ u32 mbus_code;
+ u32 hdiv;
+ u32 vdiv;
+};
+
+static const struct rkisp1_rsz_yuv_mbus_info rkisp1_rsz_yuv_src_formats[] = {
+ {
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, /* YUV422 */
+ .hdiv = 2,
+ .vdiv = 1,
+ },
+ {
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_1_5X8, /* YUV420 */
+ .hdiv = 2,
+ .vdiv = 2,
+ },
+};
+
+static const struct rkisp1_rsz_yuv_mbus_info *rkisp1_rsz_get_yuv_mbus_info(u32 mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rkisp1_rsz_yuv_src_formats); i++) {
+ if (rkisp1_rsz_yuv_src_formats[i].mbus_code == mbus_code)
+ return &rkisp1_rsz_yuv_src_formats[i];
+ }
+
+ return NULL;
+}
enum rkisp1_shadow_regs_when {
RKISP1_SHADOW_REGS_SYNC,
@@ -361,16 +389,19 @@ static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
enum rkisp1_shadow_regs_when when)
{
- u8 hdiv = RKISP1_MBUS_FMT_HDIV, vdiv = RKISP1_MBUS_FMT_VDIV;
+ const struct rkisp1_rsz_yuv_mbus_info *sink_yuv_info, *src_yuv_info;
struct v4l2_rect sink_y, sink_c, src_y, src_c;
- struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
struct v4l2_rect *sink_crop;
- struct rkisp1_capture *cap = &rsz->rkisp1->capture_devs[rsz->id];
sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
V4L2_SUBDEV_FORMAT_ACTIVE);
src_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SRC,
V4L2_SUBDEV_FORMAT_ACTIVE);
+ src_yuv_info = rkisp1_rsz_get_yuv_mbus_info(src_fmt->code);
+ sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SINK,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ sink_yuv_info = rkisp1_rsz_get_yuv_mbus_info(sink_fmt->code);
/*
* The resizer only works on yuv formats,
@@ -386,25 +417,17 @@ static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
src_y.width = src_fmt->width;
src_y.height = src_fmt->height;
- sink_c.width = sink_y.width / RKISP1_MBUS_FMT_HDIV;
- sink_c.height = sink_y.height / RKISP1_MBUS_FMT_VDIV;
+ sink_c.width = sink_y.width / sink_yuv_info->hdiv;
+ sink_c.height = sink_y.height / sink_yuv_info->vdiv;
/*
* The resizer is used not only to change the dimensions of the frame
* but also to change the scale for YUV formats,
* (4:2:2 -> 4:2:0 for example). So the width/height of the CbCr
- * streams should be set according to the pixel format in the capture.
- * The resizer always gets the input as YUV422. If the capture format
- * is RGB then the memory input should be YUV422 so we don't change the
- * default hdiv, vdiv in that case.
+ * streams should be set according to the media bus format in the src pad.
*/
- if (v4l2_is_format_yuv(cap->pix.info)) {
- hdiv = cap->pix.info->hdiv;
- vdiv = cap->pix.info->vdiv;
- }
-
- src_c.width = src_y.width / hdiv;
- src_c.height = src_y.height / vdiv;
+ src_c.width = src_y.width / src_yuv_info->hdiv;
+ src_c.height = src_y.height / src_yuv_info->vdiv;
if (sink_c.width == src_c.width && sink_c.height == src_c.height) {
rkisp1_rsz_disable(rsz, when);
@@ -437,13 +460,32 @@ static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
u32 pad = code->pad;
int ret;
- /* supported mbus codes are the same in isp video src pad */
+ if (code->pad == RKISP1_RSZ_PAD_SRC) {
+ /* supported mbus codes on the src are the same as in the capture */
+ struct rkisp1_capture *cap = &rsz->rkisp1->capture_devs[rsz->id];
+
+ return rkisp1_cap_enum_mbus_codes(cap, code);
+ }
+
+ /*
+ * The selfpath capture doesn't support bayer formats. Therefore the selfpath resizer
+ * should support only YUV422 on the sink pad
+ */
+ if (rsz->id == RKISP1_SELFPATH) {
+ if (code->index > 0)
+ return -EINVAL;
+ code->code = MEDIA_BUS_FMT_YUYV8_2X8;
+ return 0;
+ }
+
+ /* supported mbus codes on the sink pad are the same as isp src pad */
code->pad = RKISP1_ISP_PAD_SOURCE_VIDEO;
ret = v4l2_subdev_call(&rsz->rkisp1->isp.sd, pad, enum_mbus_code,
&dummy_cfg, code);
/* restore pad */
code->pad = pad;
+ code->flags = 0;
return ret;
}
@@ -478,9 +520,17 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
+ const struct rkisp1_isp_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *src_fmt;
src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
+ mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
+
+ /* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
+ if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
+ rkisp1_rsz_get_yuv_mbus_info(format->code))
+ src_fmt->code = format->code;
+
src_fmt->width = clamp_t(u32, format->width,
rsz->config->min_rsz_width,
rsz->config->max_rsz_width);
@@ -540,7 +590,11 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
sink_crop = rkisp1_rsz_get_pad_crop(rsz, cfg, RKISP1_RSZ_PAD_SINK,
which);
- sink_fmt->code = format->code;
+ if (rsz->id == RKISP1_SELFPATH)
+ sink_fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
+ else
+ sink_fmt->code = format->code;
+
mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SRC)) {
sink_fmt->code = RKISP1_DEF_FMT;
diff --git a/drivers/staging/media/rkisp1/rkisp1-stats.c b/drivers/staging/media/rkisp1/rkisp1-stats.c
index 87e4104d20dd..51c64f75fe29 100644
--- a/drivers/staging/media/rkisp1/rkisp1-stats.c
+++ b/drivers/staging/media/rkisp1/rkisp1-stats.c
@@ -116,7 +116,7 @@ static void rkisp1_stats_vb2_buf_queue(struct vb2_buffer *vb)
struct vb2_queue *vq = vb->vb2_queue;
struct rkisp1_stats *stats_dev = vq->drv_priv;
- stats_buf->vaddr[0] = vb2_plane_vaddr(vb, 0);
+ stats_buf->vaddr = vb2_plane_vaddr(vb, 0);
spin_lock_irq(&stats_dev->lock);
list_add_tail(&stats_buf->queue, &stats_dev->stat);
@@ -157,7 +157,9 @@ rkisp1_stats_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
{
struct rkisp1_stats *stats = queue->drv_priv;
+ spin_lock_irq(&stats->lock);
stats->is_streaming = true;
+ spin_unlock_irq(&stats->lock);
return 0;
}
@@ -231,7 +233,7 @@ static void rkisp1_stats_get_afc_meas(struct rkisp1_stats *stats,
struct rkisp1_device *rkisp1 = stats->rkisp1;
struct rkisp1_cif_isp_af_stat *af;
- pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AFM_FIN;
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AFM;
af = &pbuf->params.af;
af->window[0].sum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_SUM_A);
@@ -307,8 +309,7 @@ rkisp1_stats_send_measurement(struct rkisp1_stats *stats, u32 isp_ris)
{
struct rkisp1_stat_buffer *cur_stat_buf;
struct rkisp1_buffer *cur_buf = NULL;
- unsigned int frame_sequence =
- atomic_read(&stats->rkisp1->isp.frame_sequence);
+ unsigned int frame_sequence = stats->rkisp1->isp.frame_sequence;
u64 timestamp = ktime_get_ns();
/* get one empty buffer */
@@ -322,7 +323,7 @@ rkisp1_stats_send_measurement(struct rkisp1_stats *stats, u32 isp_ris)
return;
cur_stat_buf =
- (struct rkisp1_stat_buffer *)(cur_buf->vaddr[0]);
+ (struct rkisp1_stat_buffer *)(cur_buf->vaddr);
if (isp_ris & RKISP1_CIF_ISP_AWB_DONE)
rkisp1_stats_get_awb_meas(stats, cur_stat_buf);
@@ -375,10 +376,9 @@ static void rkisp1_init_stats(struct rkisp1_stats *stats)
sizeof(struct rkisp1_stat_buffer);
}
-int rkisp1_stats_register(struct rkisp1_stats *stats,
- struct v4l2_device *v4l2_dev,
- struct rkisp1_device *rkisp1)
+int rkisp1_stats_register(struct rkisp1_device *rkisp1)
{
+ struct rkisp1_stats *stats = &rkisp1->stats;
struct rkisp1_vdev_node *node = &stats->vnode;
struct video_device *vdev = &node->vdev;
int ret;
@@ -395,7 +395,7 @@ int rkisp1_stats_register(struct rkisp1_stats *stats,
vdev->fops = &rkisp1_stats_fops;
vdev->release = video_device_release_empty;
vdev->lock = &node->vlock;
- vdev->v4l2_dev = v4l2_dev;
+ vdev->v4l2_dev = &rkisp1->v4l2_dev;
vdev->queue = &node->buf_queue;
vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
vdev->vfl_dir = VFL_DIR_RX;
@@ -406,7 +406,7 @@ int rkisp1_stats_register(struct rkisp1_stats *stats,
node->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
if (ret)
- goto err_release_queue;
+ goto err_mutex_destroy;
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
@@ -419,19 +419,18 @@ int rkisp1_stats_register(struct rkisp1_stats *stats,
err_cleanup_media_entity:
media_entity_cleanup(&vdev->entity);
-err_release_queue:
- vb2_queue_release(vdev->queue);
+err_mutex_destroy:
mutex_destroy(&node->vlock);
return ret;
}
-void rkisp1_stats_unregister(struct rkisp1_stats *stats)
+void rkisp1_stats_unregister(struct rkisp1_device *rkisp1)
{
+ struct rkisp1_stats *stats = &rkisp1->stats;
struct rkisp1_vdev_node *node = &stats->vnode;
struct video_device *vdev = &node->vdev;
- video_unregister_device(vdev);
+ vb2_video_unregister_device(vdev);
media_entity_cleanup(&vdev->entity);
- vb2_queue_release(vdev->queue);
mutex_destroy(&node->vlock);
}
diff --git a/drivers/staging/media/rkisp1/uapi/rkisp1-config.h b/drivers/staging/media/rkisp1/uapi/rkisp1-config.h
index 8f9b061e5b6b..432cb6be55b4 100644
--- a/drivers/staging/media/rkisp1/uapi/rkisp1-config.h
+++ b/drivers/staging/media/rkisp1/uapi/rkisp1-config.h
@@ -4,11 +4,6 @@
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
-/*
- * TODO: Improve documentation, mostly regarding abbreviation and hardware
- * specificities. Reference: "REF_01 - ISP_user_manual, Rev 2.57" (not public)
- */
-
#ifndef _UAPI_RKISP1_CONFIG_H
#define _UAPI_RKISP1_CONFIG_H
@@ -18,24 +13,42 @@
#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 params */
#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A statistics */
-#define RKISP1_CIF_ISP_MODULE_DPCC BIT(0)
-#define RKISP1_CIF_ISP_MODULE_BLS BIT(1)
-#define RKISP1_CIF_ISP_MODULE_SDG BIT(2)
-#define RKISP1_CIF_ISP_MODULE_HST BIT(3)
-#define RKISP1_CIF_ISP_MODULE_LSC BIT(4)
-#define RKISP1_CIF_ISP_MODULE_AWB_GAIN BIT(5)
-#define RKISP1_CIF_ISP_MODULE_FLT BIT(6)
-#define RKISP1_CIF_ISP_MODULE_BDM BIT(7)
-#define RKISP1_CIF_ISP_MODULE_CTK BIT(8)
-#define RKISP1_CIF_ISP_MODULE_GOC BIT(9)
-#define RKISP1_CIF_ISP_MODULE_CPROC BIT(10)
-#define RKISP1_CIF_ISP_MODULE_AFC BIT(11)
-#define RKISP1_CIF_ISP_MODULE_AWB BIT(12)
-#define RKISP1_CIF_ISP_MODULE_IE BIT(13)
-#define RKISP1_CIF_ISP_MODULE_AEC BIT(14)
-#define RKISP1_CIF_ISP_MODULE_WDR BIT(15)
-#define RKISP1_CIF_ISP_MODULE_DPF BIT(16)
-#define RKISP1_CIF_ISP_MODULE_DPF_STRENGTH BIT(17)
+/* Defect Pixel Cluster Detection */
+#define RKISP1_CIF_ISP_MODULE_DPCC (1U << 0)
+/* Black Level Subtraction */
+#define RKISP1_CIF_ISP_MODULE_BLS (1U << 1)
+/* Sensor De-gamma */
+#define RKISP1_CIF_ISP_MODULE_SDG (1U << 2)
+/* Histogram */
+#define RKISP1_CIF_ISP_MODULE_HST (1U << 3)
+/* Lens Shade Control */
+#define RKISP1_CIF_ISP_MODULE_LSC (1U << 4)
+/* Auto White Balance Gain */
+#define RKISP1_CIF_ISP_MODULE_AWB_GAIN (1U << 5)
+/* Filter */
+#define RKISP1_CIF_ISP_MODULE_FLT (1U << 6)
+/* Bayer Demosaic */
+#define RKISP1_CIF_ISP_MODULE_BDM (1U << 7)
+/* Cross Talk */
+#define RKISP1_CIF_ISP_MODULE_CTK (1U << 8)
+/* Gamma Out Curve */
+#define RKISP1_CIF_ISP_MODULE_GOC (1U << 9)
+/* Color Processing */
+#define RKISP1_CIF_ISP_MODULE_CPROC (1U << 10)
+/* Auto Focus Control */
+#define RKISP1_CIF_ISP_MODULE_AFC (1U << 11)
+/* Auto White Balancing */
+#define RKISP1_CIF_ISP_MODULE_AWB (1U << 12)
+/* Image Effect */
+#define RKISP1_CIF_ISP_MODULE_IE (1U << 13)
+/* Auto Exposure Control */
+#define RKISP1_CIF_ISP_MODULE_AEC (1U << 14)
+/* Wide Dynamic Range */
+#define RKISP1_CIF_ISP_MODULE_WDR (1U << 15)
+/* Denoise Pre-Filter */
+#define RKISP1_CIF_ISP_MODULE_DPF (1U << 16)
+/* Denoise Pre-Filter Strength */
+#define RKISP1_CIF_ISP_MODULE_DPF_STRENGTH (1U << 17)
#define RKISP1_CIF_ISP_CTK_COEFF_MAX 0x100
#define RKISP1_CIF_ISP_CTK_OFFSET_MAX 0x800
@@ -82,14 +95,13 @@
/*
* Lens shade correction
*/
-#define RKISP1_CIF_ISP_LSC_GRAD_TBL_SIZE 8
-#define RKISP1_CIF_ISP_LSC_SIZE_TBL_SIZE 8
+#define RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE 8
+
/*
* The following matches the tuning process,
* not the max capabilities of the chip.
- * Last value unused.
*/
-#define RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE 290
+#define RKISP1_CIF_ISP_LSC_SAMPLES_MAX 17
/*
* Histogram calculation
@@ -111,10 +123,10 @@
/*
* Measurement types
*/
-#define RKISP1_CIF_ISP_STAT_AWB BIT(0)
-#define RKISP1_CIF_ISP_STAT_AUTOEXP BIT(1)
-#define RKISP1_CIF_ISP_STAT_AFM_FIN BIT(2)
-#define RKISP1_CIF_ISP_STAT_HIST BIT(3)
+#define RKISP1_CIF_ISP_STAT_AWB (1U << 0)
+#define RKISP1_CIF_ISP_STAT_AUTOEXP (1U << 1)
+#define RKISP1_CIF_ISP_STAT_AFM (1U << 2)
+#define RKISP1_CIF_ISP_STAT_HIST (1U << 3)
enum rkisp1_cif_isp_histogram_mode {
RKISP1_CIF_ISP_HISTOGRAM_MODE_DISABLE,
@@ -158,12 +170,23 @@ enum rkisp1_cif_isp_exp_meas_mode {
/*---------- PART1: Input Parameters ------------*/
+/**
+ * struct rkisp1_cif_isp_window - measurement window.
+ *
+ * Measurements are calculated per window inside the frame.
+ * This struct represents a window for a measurement.
+ *
+ * @h_offs: the horizontal offset of the window from the left of the frame in pixels.
+ * @v_offs: the vertical offset of the window from the top of the frame in pixels.
+ * @h_size: the horizontal size of the window in pixels
+ * @v_size: the vertical size of the window in pixels.
+ */
struct rkisp1_cif_isp_window {
__u16 h_offs;
__u16 v_offs;
__u16 h_size;
__u16 v_size;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_bls_fixed_val - BLS fixed subtraction values
@@ -181,7 +204,7 @@ struct rkisp1_cif_isp_bls_fixed_val {
__s16 gr;
__s16 gb;
__s16 b;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_bls_config - Configuration used by black level subtraction
@@ -203,7 +226,7 @@ struct rkisp1_cif_isp_bls_config {
struct rkisp1_cif_isp_window bls_window2;
__u8 bls_samples;
struct rkisp1_cif_isp_bls_fixed_val fixed_val;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_dpcc_methods_config - Methods Configuration used by DPCC
@@ -224,7 +247,7 @@ struct rkisp1_cif_isp_dpcc_methods_config {
__u32 pg_fac;
__u32 rnd_thresh;
__u32 rg_fac;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_dpcc_config - Configuration used by DPCC
@@ -245,53 +268,88 @@ struct rkisp1_cif_isp_dpcc_config {
struct rkisp1_cif_isp_dpcc_methods_config methods[RKISP1_CIF_ISP_DPCC_METHODS_MAX];
__u32 ro_limits;
__u32 rnd_offs;
-} __packed;
+};
+/**
+ * struct rkisp1_cif_isp_gamma_corr_curve - gamma curve point definition y-axis (output).
+ *
+ * The reset values define a linear curve which has the same effect as bypass. Reset values are:
+ * gamma_y[0] = 0x0000, gamma_y[1] = 0x0100, ... gamma_y[15] = 0x0f00, gamma_y[16] = 0xfff
+ *
+ * @gamma_y: the values for the y-axis of gamma curve points. Each value is 12 bit.
+ */
struct rkisp1_cif_isp_gamma_corr_curve {
__u16 gamma_y[RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE];
-} __packed;
+};
+/**
+ * struct rkisp1_cif_isp_gamma_curve_x_axis_pnts - De-Gamma Curve definition x increments
+ * (sampling points). gamma_dx0 is for the lower samples (1-8), gamma_dx1 is for the
+ * higher samples (9-16). The reset values for both fields is 0x44444444. This means
+ * that each sample is 4 units away from the previous one on the x-axis.
+ *
+ * @gamma_dx0: gamma curve sample points definitions. Bits 0:2 for sample 1. Bit 3 unused.
+ * Bits 4:6 for sample 2. bit 7 unused ... Bits 28:30 for sample 8. Bit 31 unused
+ * @gamma_dx1: gamma curve sample points definitions. Bits 0:2 for sample 9. Bit 3 unused.
+ * Bits 4:6 for sample 10. bit 7 unused ... Bits 28:30 for sample 16. Bit 31 unused
+ */
struct rkisp1_cif_isp_gamma_curve_x_axis_pnts {
__u32 gamma_dx0;
__u32 gamma_dx1;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_sdg_config - Configuration used by sensor degamma
*
- * @curve_x: gamma curve point definition axis for x
- * @xa_pnts: x increments
+ * @curve_r: gamma curve point definition axis for red
+ * @curve_g: gamma curve point definition axis for green
+ * @curve_b: gamma curve point definition axis for blue
+ * @xa_pnts: x axis increments
*/
struct rkisp1_cif_isp_sdg_config {
struct rkisp1_cif_isp_gamma_corr_curve curve_r;
struct rkisp1_cif_isp_gamma_corr_curve curve_g;
struct rkisp1_cif_isp_gamma_corr_curve curve_b;
struct rkisp1_cif_isp_gamma_curve_x_axis_pnts xa_pnts;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_lsc_config - Configuration used by Lens shading correction
*
- * refer to REF_01 for details
+ * @r_data_tbl: sample table red
+ * @gr_data_tbl: sample table green (red)
+ * @gb_data_tbl: sample table green (blue)
+ * @b_data_tbl: sample table blue
+ * @x_grad_tbl: gradient table x
+ * @y_grad_tbl: gradient table y
+ * @x_size_tbl: size table x
+ * @y_size_tbl: size table y
+ * @config_width: not used at the moment
+ * @config_height: not used at the moment
*/
struct rkisp1_cif_isp_lsc_config {
- __u32 r_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
- __u32 gr_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
- __u32 gb_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
- __u32 b_data_tbl[RKISP1_CIF_ISP_LSC_DATA_TBL_SIZE];
+ __u16 r_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
+ __u16 gr_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
+ __u16 gb_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
+ __u16 b_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
- __u32 x_grad_tbl[RKISP1_CIF_ISP_LSC_GRAD_TBL_SIZE];
- __u32 y_grad_tbl[RKISP1_CIF_ISP_LSC_GRAD_TBL_SIZE];
+ __u16 x_grad_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ __u16 y_grad_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
- __u32 x_size_tbl[RKISP1_CIF_ISP_LSC_SIZE_TBL_SIZE];
- __u32 y_size_tbl[RKISP1_CIF_ISP_LSC_SIZE_TBL_SIZE];
+ __u16 x_size_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
+ __u16 y_size_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
__u16 config_width;
__u16 config_height;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_ie_config - Configuration used by image effects
*
+ * @effect: values from 'enum v4l2_colorfx'. Possible values are: V4L2_COLORFX_SEPIA,
+ * V4L2_COLORFX_SET_CBCR, V4L2_COLORFX_AQUA, V4L2_COLORFX_EMBOSS,
+ * V4L2_COLORFX_SKETCH, V4L2_COLORFX_BW, V4L2_COLORFX_NEGATIVE
+ * @color_sel: bits 0:2 - colors bitmask (001 - blue, 010 - green, 100 - red).
+ * bits 8:15 - Threshold value of the RGB colors for the color selection effect.
* @eff_mat_1: 3x3 Matrix Coefficients for Emboss Effect 1
* @eff_mat_2: 3x3 Matrix Coefficients for Emboss Effect 2
* @eff_mat_3: 3x3 Matrix Coefficients for Emboss 3/Sketch 1
@@ -308,7 +366,7 @@ struct rkisp1_cif_isp_ie_config {
__u16 eff_mat_4;
__u16 eff_mat_5;
__u16 eff_tint;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_cproc_config - Configuration used by Color Processing
@@ -330,13 +388,13 @@ struct rkisp1_cif_isp_cproc_config {
__u8 brightness;
__u8 sat;
__u8 hue;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_awb_meas_config - Configuration used by auto white balance
*
+ * @awb_mode: the awb meas mode. From enum rkisp1_cif_isp_awb_mode_type.
* @awb_wnd: white balance measurement window (in pixels)
- * (from enum rkisp1_cif_isp_awb_mode_type)
* @max_y: only pixels values < max_y contribute to awb measurement, set to 0
* to disable this feature
* @min_y: only pixels values > min_y contribute to awb measurement
@@ -348,6 +406,7 @@ struct rkisp1_cif_isp_cproc_config {
* (ucFrames=0 means 1 Frame)
* @awb_ref_cr: reference Cr value for AWB regulation, target for AWB
* @awb_ref_cb: reference Cb value for AWB regulation, target for AWB
+ * @enable_ymax_cmp: enable Y_MAX compare (Not valid in RGB measurement mode.)
*/
struct rkisp1_cif_isp_awb_meas_config {
/*
@@ -363,31 +422,49 @@ struct rkisp1_cif_isp_awb_meas_config {
__u8 awb_ref_cr;
__u8 awb_ref_cb;
__u8 enable_ymax_cmp;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_awb_gain_config - Configuration used by auto white balance gain
*
- * out_data_x = ( AWB_GEAIN_X * in_data + 128) >> 8
+ * All fields in this struct are 10 bit, where:
+ * 0x100h = 1, unsigned integer value, range 0 to 4 with 8 bit fractional part.
+ *
+ * out_data_x = ( AWB_GAIN_X * in_data + 128) >> 8
+ *
+ * @gain_red: gain value for red component.
+ * @gain_green_r: gain value for green component in red line.
+ * @gain_blue: gain value for blue component.
+ * @gain_green_b: gain value for green component in blue line.
*/
struct rkisp1_cif_isp_awb_gain_config {
__u16 gain_red;
__u16 gain_green_r;
__u16 gain_blue;
__u16 gain_green_b;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_flt_config - Configuration used by ISP filtering
*
- * @mode: ISP_FILT_MODE register fields (from enum rkisp1_cif_isp_flt_mode)
- * @grn_stage1: ISP_FILT_MODE register fields
- * @chr_h_mode: ISP_FILT_MODE register fields
- * @chr_v_mode: ISP_FILT_MODE register fields
+ * All 4 threshold fields (thresh_*) are 10 bits.
+ * All 6 factor fields (fac_*) are 6 bits.
*
- * refer to REF_01 for details.
+ * @mode: ISP_FILT_MODE register fields (from enum rkisp1_cif_isp_flt_mode)
+ * @grn_stage1: Green filter stage 1 select (range 0x0...0x8)
+ * @chr_h_mode: Chroma filter horizontal mode
+ * @chr_v_mode: Chroma filter vertical mode
+ * @thresh_bl0: If thresh_bl1 < sum_grad < thresh_bl0 then fac_bl0 is selected (blurring th)
+ * @thresh_bl1: If sum_grad < thresh_bl1 then fac_bl1 is selected (blurring th)
+ * @thresh_sh0: If thresh_sh0 < sum_grad < thresh_sh1 then thresh_sh0 is selected (sharpening th)
+ * @thresh_sh1: If thresh_sh1 < sum_grad then thresh_sh1 is selected (sharpening th)
+ * @lum_weight: Parameters for luminance weight function.
+ * @fac_sh1: filter factor for sharp1 level
+ * @fac_sh0: filter factor for sharp0 level
+ * @fac_mid: filter factor for mid level and for static filter mode
+ * @fac_bl0: filter factor for blur 0 level
+ * @fac_bl1: filter factor for blur 1 level (max blur)
*/
-
struct rkisp1_cif_isp_flt_config {
__u32 mode;
__u8 grn_stage1;
@@ -403,7 +480,7 @@ struct rkisp1_cif_isp_flt_config {
__u32 fac_mid;
__u32 fac_bl0;
__u32 fac_bl1;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_bdm_config - Configuration used by Bayer DeMosaic
@@ -412,28 +489,20 @@ struct rkisp1_cif_isp_flt_config {
*/
struct rkisp1_cif_isp_bdm_config {
__u8 demosaic_th;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_ctk_config - Configuration used by Cross Talk correction
*
- * @coeff: color correction matrix
- * @ct_offset_b: offset for the crosstalk correction matrix
+ * @coeff: color correction matrix. Values are 11-bit signed fixed-point numbers with 4 bit integer
+ * and 7 bit fractional part, ranging from -8 (0x400) to +7.992 (0x3FF). 0 is
+ * represented by 0x000 and a coefficient value of 1 as 0x080.
+ * @ct_offset: Red, Green, Blue offsets for the crosstalk correction matrix
*/
struct rkisp1_cif_isp_ctk_config {
- __u16 coeff0;
- __u16 coeff1;
- __u16 coeff2;
- __u16 coeff3;
- __u16 coeff4;
- __u16 coeff5;
- __u16 coeff6;
- __u16 coeff7;
- __u16 coeff8;
- __u16 ct_offset_r;
- __u16 ct_offset_g;
- __u16 ct_offset_b;
-} __packed;
+ __u16 coeff[3][3];
+ __u16 ct_offset[3];
+};
enum rkisp1_cif_isp_goc_mode {
RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC,
@@ -449,7 +518,7 @@ enum rkisp1_cif_isp_goc_mode {
struct rkisp1_cif_isp_goc_config {
__u32 mode;
__u16 gamma_y[RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES];
-} __packed;
+};
/**
* struct rkisp1_cif_isp_hst_config - Configuration used by Histogram
@@ -465,7 +534,7 @@ struct rkisp1_cif_isp_hst_config {
__u8 histogram_predivider;
struct rkisp1_cif_isp_window meas_window;
__u8 hist_weight[RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE];
-} __packed;
+};
/**
* struct rkisp1_cif_isp_aec_config - Configuration used by Auto Exposure Control
@@ -478,7 +547,7 @@ struct rkisp1_cif_isp_aec_config {
__u32 mode;
__u32 autostop;
struct rkisp1_cif_isp_window meas_window;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_afc_config - Configuration used by Auto Focus Control
@@ -494,7 +563,7 @@ struct rkisp1_cif_isp_afc_config {
struct rkisp1_cif_isp_window afm_win[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
__u32 thres;
__u32 var_shift;
-} __packed;
+};
/**
* enum rkisp1_cif_isp_dpf_gain_usage - dpf gain usage
@@ -549,7 +618,7 @@ enum rkisp1_cif_isp_dpf_nll_scale_mode {
struct rkisp1_cif_isp_dpf_nll {
__u16 coeff[RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS];
__u32 scale_mode;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_dpf_rb_flt - Red blue filter config
@@ -565,7 +634,7 @@ struct rkisp1_cif_isp_dpf_rb_flt {
__u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
__u8 r_enable;
__u8 b_enable;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_dpf_g_flt - Green filter Configuration
@@ -578,7 +647,7 @@ struct rkisp1_cif_isp_dpf_g_flt {
__u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
__u8 gr_enable;
__u8 gb_enable;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_dpf_gain - Noise function Configuration
@@ -597,7 +666,7 @@ struct rkisp1_cif_isp_dpf_gain {
__u16 nf_b_gain;
__u16 nf_gr_gain;
__u16 nf_gb_gain;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_dpf_config - Configuration used by De-noising pre-filter
@@ -612,7 +681,7 @@ struct rkisp1_cif_isp_dpf_config {
struct rkisp1_cif_isp_dpf_g_flt g_flt;
struct rkisp1_cif_isp_dpf_rb_flt rb_flt;
struct rkisp1_cif_isp_dpf_nll nll;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_dpf_strength_config - strength of the filter
@@ -625,7 +694,7 @@ struct rkisp1_cif_isp_dpf_strength_config {
__u8 r;
__u8 g;
__u8 b;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_isp_other_cfg - Parameters for some blocks in rockchip isp1
@@ -659,7 +728,7 @@ struct rkisp1_cif_isp_isp_other_cfg {
struct rkisp1_cif_isp_dpf_strength_config dpf_strength_config;
struct rkisp1_cif_isp_cproc_config cproc_config;
struct rkisp1_cif_isp_ie_config ie_config;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_isp_meas_cfg - Rockchip ISP1 Measure Parameters
@@ -674,7 +743,7 @@ struct rkisp1_cif_isp_isp_meas_cfg {
struct rkisp1_cif_isp_hst_config hst_config;
struct rkisp1_cif_isp_aec_config aec_config;
struct rkisp1_cif_isp_afc_config afc_config;
-} __packed;
+};
/**
* struct rkisp1_params_cfg - Rockchip ISP1 Input Parameters Meta Data
@@ -693,7 +762,7 @@ struct rkisp1_params_cfg {
struct rkisp1_cif_isp_isp_meas_cfg meas;
struct rkisp1_cif_isp_isp_other_cfg others;
-} __packed;
+};
/*---------- PART2: Measurement Statistics ------------*/
@@ -714,7 +783,7 @@ struct rkisp1_cif_isp_awb_meas {
__u8 mean_y_or_g;
__u8 mean_cb_or_b;
__u8 mean_cr_or_r;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_awb_stat - statistics automatic white balance data
@@ -723,7 +792,7 @@ struct rkisp1_cif_isp_awb_meas {
*/
struct rkisp1_cif_isp_awb_stat {
struct rkisp1_cif_isp_awb_meas awb_mean[RKISP1_CIF_ISP_AWB_MAX_GRID];
-} __packed;
+};
/**
* struct rkisp1_cif_isp_bls_meas_val - BLS measured values
@@ -738,7 +807,7 @@ struct rkisp1_cif_isp_bls_meas_val {
__u16 meas_gr;
__u16 meas_gb;
__u16 meas_b;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_ae_stat - statistics auto exposure data
@@ -751,18 +820,18 @@ struct rkisp1_cif_isp_bls_meas_val {
struct rkisp1_cif_isp_ae_stat {
__u8 exp_mean[RKISP1_CIF_ISP_AE_MEAN_MAX];
struct rkisp1_cif_isp_bls_meas_val bls_val;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_af_meas_val - AF measured values
*
- * @sum: sharpness, refer to REF_01 for definition
- * @lum: luminance, refer to REF_01 for definition
+ * @sum: sharpness value
+ * @lum: luminance value
*/
struct rkisp1_cif_isp_af_meas_val {
__u32 sum;
__u32 lum;
-} __packed;
+};
/**
* struct rkisp1_cif_isp_af_stat - statistics auto focus data
@@ -774,7 +843,7 @@ struct rkisp1_cif_isp_af_meas_val {
*/
struct rkisp1_cif_isp_af_stat {
struct rkisp1_cif_isp_af_meas_val window[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
-} __packed;
+};
/**
* struct rkisp1_cif_isp_hist_stat - statistics histogram data
@@ -786,27 +855,27 @@ struct rkisp1_cif_isp_af_stat {
*/
struct rkisp1_cif_isp_hist_stat {
__u16 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
-} __packed;
+};
/**
- * struct rkisp1_stat_buffer - Rockchip ISP1 Statistics Data
+ * struct rkisp1_cif_isp_stat - Rockchip ISP1 Statistics Data
*
- * @rkisp1_cif_isp_awb_stat: statistics data for automatic white balance
- * @rkisp1_cif_isp_ae_stat: statistics data for auto exposure
- * @rkisp1_cif_isp_af_stat: statistics data for auto focus
- * @rkisp1_cif_isp_hist_stat: statistics histogram data
+ * @awb: statistics data for automatic white balance
+ * @ae: statistics data for auto exposure
+ * @af: statistics data for auto focus
+ * @hist: statistics histogram data
*/
struct rkisp1_cif_isp_stat {
struct rkisp1_cif_isp_awb_stat awb;
struct rkisp1_cif_isp_ae_stat ae;
struct rkisp1_cif_isp_af_stat af;
struct rkisp1_cif_isp_hist_stat hist;
-} __packed;
+};
/**
* struct rkisp1_stat_buffer - Rockchip ISP1 Statistics Meta Data
*
- * @meas_type: measurement types (RKISP1_CIF_ISP_STAT_ definitions)
+ * @meas_type: measurement types (RKISP1_CIF_ISP_STAT_* definitions)
* @frame_id: frame ID for sync
* @params: statistics data
*/
@@ -814,6 +883,6 @@ struct rkisp1_stat_buffer {
__u32 meas_type;
__u32 frame_id;
struct rkisp1_cif_isp_stat params;
-} __packed;
+};
#endif /* _UAPI_RKISP1_CONFIG_H */
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
index 7b66e2743a4f..7cc3b478a5f4 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -109,7 +109,6 @@ struct rkvdec_h264_reflists {
struct rkvdec_h264_run {
struct rkvdec_run base;
const struct v4l2_ctrl_h264_decode_params *decode_params;
- const struct v4l2_ctrl_h264_slice_params *slices_params;
const struct v4l2_ctrl_h264_sps *sps;
const struct v4l2_ctrl_h264_pps *pps;
const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
@@ -709,9 +708,9 @@ static void assemble_hw_pps(struct rkvdec_ctx *ctx,
WRITE_PPS(pps->second_chroma_qp_index_offset,
SECOND_CHROMA_QP_INDEX_OFFSET);
- /* always use the matrix sent from userspace */
- WRITE_PPS(1, SCALING_LIST_ENABLE_FLAG);
-
+ WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT),
+ SCALING_LIST_ENABLE_FLAG);
+ /* To be on the safe side, program the scaling matrix address */
scaling_distance = offsetof(struct rkvdec_h264_priv_tbl, scaling_list);
scaling_list_address = h264_ctx->priv_tbl.dma + scaling_distance;
WRITE_PPS(scaling_list_address, SCALING_LIST_ADDRESS);
@@ -730,7 +729,6 @@ static void assemble_hw_rps(struct rkvdec_ctx *ctx,
struct rkvdec_h264_run *run)
{
const struct v4l2_ctrl_h264_decode_params *dec_params = run->decode_params;
- const struct v4l2_ctrl_h264_slice_params *sl_params = &run->slices_params[0];
const struct v4l2_h264_dpb_entry *dpb = dec_params->dpb;
struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
const struct v4l2_ctrl_h264_sps *sps = run->sps;
@@ -754,7 +752,7 @@ static void assemble_hw_rps(struct rkvdec_ctx *ctx,
continue;
if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM ||
- dpb[i].frame_num < sl_params->frame_num) {
+ dpb[i].frame_num < dec_params->frame_num) {
p[i] = dpb[i].frame_num;
continue;
}
@@ -794,9 +792,13 @@ static void assemble_hw_scaling_list(struct rkvdec_ctx *ctx,
struct rkvdec_h264_run *run)
{
const struct v4l2_ctrl_h264_scaling_matrix *scaling = run->scaling_matrix;
+ const struct v4l2_ctrl_h264_pps *pps = run->pps;
struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
struct rkvdec_h264_priv_tbl *tbl = h264_ctx->priv_tbl.cpu;
+ if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
+ return;
+
BUILD_BUG_ON(sizeof(tbl->scaling_list.scaling_list_4x4) !=
sizeof(scaling->scaling_list_4x4));
BUILD_BUG_ON(sizeof(tbl->scaling_list.scaling_list_8x8) !=
@@ -949,16 +951,17 @@ static void config_registers(struct rkvdec_ctx *ctx,
for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) {
struct vb2_buffer *vb_buf = get_ref_buf(ctx, run, i);
- refer_addr = vb2_dma_contig_plane_dma_addr(vb_buf, 0) |
- RKVDEC_COLMV_USED_FLAG_REF;
+ refer_addr = vb2_dma_contig_plane_dma_addr(vb_buf, 0);
- if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_FIELD))
- refer_addr |= RKVDEC_TOPFIELD_USED_REF |
- RKVDEC_BOTFIELD_USED_REF;
- else if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_BOTTOM_FIELD)
- refer_addr |= RKVDEC_BOTFIELD_USED_REF;
- else
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ refer_addr |= RKVDEC_COLMV_USED_FLAG_REF;
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_FIELD)
+ refer_addr |= RKVDEC_FIELD_REF;
+
+ if (dpb[i].fields & V4L2_H264_TOP_FIELD_REF)
refer_addr |= RKVDEC_TOPFIELD_USED_REF;
+ if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
+ refer_addr |= RKVDEC_BOTFIELD_USED_REF;
writel_relaxed(dpb[i].top_field_order_cnt,
rkvdec->regs + poc_reg_tbl_top_field[i]);
@@ -1067,9 +1070,6 @@ static void rkvdec_h264_run_preamble(struct rkvdec_ctx *ctx,
V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS);
run->decode_params = ctrl ? ctrl->p_cur.p : NULL;
ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
- V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS);
- run->slices_params = ctrl ? ctrl->p_cur.p : NULL;
- ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
V4L2_CID_MPEG_VIDEO_H264_SPS);
run->sps = ctrl ? ctrl->p_cur.p : NULL;
ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
@@ -1093,8 +1093,7 @@ static int rkvdec_h264_run(struct rkvdec_ctx *ctx)
/* Build the P/B{0,1} ref lists. */
v4l2_h264_init_reflist_builder(&reflist_builder, run.decode_params,
- &run.slices_params[0], run.sps,
- run.decode_params->dpb);
+ run.sps, run.decode_params->dpb);
h264_ctx->reflists.num_valid = reflist_builder.num_valid;
v4l2_h264_build_p_ref_list(&reflist_builder, h264_ctx->reflists.p);
v4l2_h264_build_b_ref_lists(&reflist_builder, h264_ctx->reflists.b0,
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index c8151328fb70..d25c4a37e2af 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -55,40 +55,28 @@ static const struct v4l2_ctrl_ops rkvdec_ctrl_ops = {
static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = {
{
- .per_request = true,
.mandatory = true,
.cfg.id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS,
},
{
- .per_request = true,
- .mandatory = true,
- .cfg.id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS,
- },
- {
- .per_request = true,
.mandatory = true,
.cfg.id = V4L2_CID_MPEG_VIDEO_H264_SPS,
.cfg.ops = &rkvdec_ctrl_ops,
},
{
- .per_request = true,
.mandatory = true,
.cfg.id = V4L2_CID_MPEG_VIDEO_H264_PPS,
},
{
- .per_request = true,
- .mandatory = true,
.cfg.id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX,
},
{
- .mandatory = true,
.cfg.id = V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE,
.cfg.min = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
.cfg.max = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
.cfg.def = V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED,
},
{
- .mandatory = true,
.cfg.id = V4L2_CID_MPEG_VIDEO_H264_START_CODE,
.cfg.min = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
.cfg.def = V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B,
@@ -620,7 +608,7 @@ static int rkvdec_request_validate(struct media_request *req)
u32 id = ctrls->ctrls[i].cfg.id;
struct v4l2_ctrl *ctrl;
- if (!ctrls->ctrls[i].per_request || !ctrls->ctrls[i].mandatory)
+ if (!ctrls->ctrls[i].mandatory)
continue;
ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, id);
diff --git a/drivers/staging/media/rkvdec/rkvdec.h b/drivers/staging/media/rkvdec/rkvdec.h
index 2fc9f46b6910..77a137cca88e 100644
--- a/drivers/staging/media/rkvdec/rkvdec.h
+++ b/drivers/staging/media/rkvdec/rkvdec.h
@@ -25,7 +25,6 @@
struct rkvdec_ctx;
struct rkvdec_ctrl_desc {
- u32 per_request : 1;
u32 mandatory : 1;
struct v4l2_ctrl_config cfg;
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 7c6b91f0e780..e0e35502e34a 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -76,7 +76,14 @@ static const struct cedrus_control cedrus_controls[] = {
.id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX,
},
.codec = CEDRUS_CODEC_H264,
- .required = true,
+ .required = false,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PRED_WEIGHTS,
+ },
+ .codec = CEDRUS_CODEC_H264,
+ .required = false,
},
{
.cfg = {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 96765555ab8a..93c843ae14bb 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -62,6 +62,7 @@ struct cedrus_h264_run {
const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
const struct v4l2_ctrl_h264_slice_params *slice_params;
const struct v4l2_ctrl_h264_sps *sps;
+ const struct v4l2_ctrl_h264_pred_weights *pred_weights;
};
struct cedrus_mpeg2_run {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 58c48e4fdfe9..6385026d1b6b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -57,6 +57,8 @@ void cedrus_device_run(void *priv)
V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS);
run.h264.sps = cedrus_find_control_data(ctx,
V4L2_CID_MPEG_VIDEO_H264_SPS);
+ run.h264.pred_weights = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_H264_PRED_WEIGHTS);
break;
case V4L2_PIX_FMT_HEVC_SLICE:
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index 54ee2aa423e2..28319351e909 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -95,14 +95,13 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
{
struct cedrus_h264_sram_ref_pic pic_list[CEDRUS_H264_FRAME_NUM];
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
- const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
struct vb2_queue *cap_q;
struct cedrus_buffer *output_buf;
struct cedrus_dev *dev = ctx->dev;
unsigned long used_dpbs = 0;
unsigned int position;
- unsigned int output = 0;
+ int output = -1;
unsigned int i;
cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
@@ -125,6 +124,11 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
position = cedrus_buf->codec.h264.position;
used_dpbs |= BIT(position);
+ if (run->dst->vb2_buf.timestamp == dpb->reference_ts) {
+ output = position;
+ continue;
+ }
+
if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
continue;
@@ -132,19 +136,17 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
dpb->top_field_order_cnt,
dpb->bottom_field_order_cnt,
&pic_list[position]);
-
- output = max(position, output);
}
- position = find_next_zero_bit(&used_dpbs, CEDRUS_H264_FRAME_NUM,
- output);
- if (position >= CEDRUS_H264_FRAME_NUM)
+ if (output >= 0)
+ position = output;
+ else
position = find_first_zero_bit(&used_dpbs, CEDRUS_H264_FRAME_NUM);
output_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
output_buf->codec.h264.position = position;
- if (slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ if (decode->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)
output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_FIELD;
else if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
output_buf->codec.h264.pic_type = CEDRUS_H264_PIC_TYPE_MBAFF;
@@ -166,8 +168,8 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
struct cedrus_run *run,
- const u8 *ref_list, u8 num_ref,
- enum cedrus_h264_sram_off sram)
+ const struct v4l2_h264_reference *ref_list,
+ u8 num_ref, enum cedrus_h264_sram_off sram)
{
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
struct vb2_queue *cap_q;
@@ -183,12 +185,11 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
for (i = 0; i < num_ref; i++) {
const struct v4l2_h264_dpb_entry *dpb;
const struct cedrus_buffer *cedrus_buf;
- const struct vb2_v4l2_buffer *ref_buf;
unsigned int position;
int buf_idx;
u8 dpb_idx;
- dpb_idx = ref_list[i];
+ dpb_idx = ref_list[i].index;
dpb = &decode->dpb[dpb_idx];
if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
@@ -198,12 +199,11 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
if (buf_idx < 0)
continue;
- ref_buf = to_vb2_v4l2_buffer(cap_q->bufs[buf_idx]);
- cedrus_buf = vb2_v4l2_to_cedrus_buffer(ref_buf);
+ cedrus_buf = vb2_to_cedrus_buffer(cap_q->bufs[buf_idx]);
position = cedrus_buf->codec.h264.position;
sram_array[i] |= position << 1;
- if (ref_buf->field == V4L2_FIELD_BOTTOM)
+ if (ref_list[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
sram_array[i] |= BIT(0);
}
@@ -238,8 +238,12 @@ static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx,
{
const struct v4l2_ctrl_h264_scaling_matrix *scaling =
run->h264.scaling_matrix;
+ const struct v4l2_ctrl_h264_pps *pps = run->h264.pps;
struct cedrus_dev *dev = ctx->dev;
+ if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
+ return;
+
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_0,
scaling->scaling_list_8x8[0],
sizeof(scaling->scaling_list_8x8[0]));
@@ -256,10 +260,8 @@ static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx,
static void cedrus_write_pred_weight_table(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
- const struct v4l2_ctrl_h264_slice_params *slice =
- run->h264.slice_params;
- const struct v4l2_h264_pred_weight_table *pred_weight =
- &slice->pred_weight_table;
+ const struct v4l2_ctrl_h264_pred_weights *pred_weight =
+ run->h264.pred_weights;
struct cedrus_dev *dev = ctx->dev;
int i, j, k;
@@ -326,17 +328,16 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
struct vb2_buffer *src_buf = &run->src->vb2_buf;
struct cedrus_dev *dev = ctx->dev;
dma_addr_t src_buf_addr;
- u32 len = slice->size * 8;
+ size_t slice_bytes = vb2_get_plane_payload(src_buf, 0);
unsigned int pic_width_in_mbs;
bool mbaff_pic;
u32 reg;
- cedrus_write(dev, VE_H264_VLD_LEN, len);
+ cedrus_write(dev, VE_H264_VLD_LEN, slice_bytes * 8);
cedrus_write(dev, VE_H264_VLD_OFFSET, 0);
src_buf_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- cedrus_write(dev, VE_H264_VLD_END,
- src_buf_addr + vb2_get_plane_payload(src_buf, 0));
+ cedrus_write(dev, VE_H264_VLD_END, src_buf_addr + slice_bytes);
cedrus_write(dev, VE_H264_VLD_ADDR,
VE_H264_VLD_ADDR_VAL(src_buf_addr) |
VE_H264_VLD_ADDR_FIRST | VE_H264_VLD_ADDR_VALID |
@@ -367,11 +368,7 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
cedrus_skip_bits(dev, slice->header_bit_size);
- if (((pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) &&
- (slice->slice_type == V4L2_H264_SLICE_TYPE_P ||
- slice->slice_type == V4L2_H264_SLICE_TYPE_SP)) ||
- (pps->weighted_bipred_idc == 1 &&
- slice->slice_type == V4L2_H264_SLICE_TYPE_B))
+ if (V4L2_H264_CTRL_PRED_WEIGHTS_REQUIRED(pps, slice))
cedrus_write_pred_weight_table(ctx, run);
if ((slice->slice_type == V4L2_H264_SLICE_TYPE_P) ||
@@ -414,7 +411,7 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
reg |= VE_H264_SPS_DIRECT_8X8_INFERENCE;
cedrus_write(dev, VE_H264_SPS, reg);
- mbaff_pic = !(slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC) &&
+ mbaff_pic = !(decode->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) &&
(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
pic_width_in_mbs = sps->pic_width_in_mbs_minus1 + 1;
@@ -428,9 +425,9 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
reg |= slice->cabac_init_idc & 0x3;
if (ctx->fh.m2m_ctx->new_frame)
reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
- if (slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ if (decode->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)
reg |= VE_H264_SHS_FIELD_PIC;
- if (slice->flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ if (decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
reg |= VE_H264_SHS_BOTTOM_FIELD;
if (slice->flags & V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED)
reg |= VE_H264_SHS_DIRECT_SPATIAL_MV_PRED;
@@ -449,6 +446,8 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
reg |= (pps->second_chroma_qp_index_offset & 0x3f) << 16;
reg |= (pps->chroma_qp_index_offset & 0x3f) << 8;
reg |= (pps->pic_init_qp_minus26 + 26 + slice->slice_qp_delta) & 0x3f;
+ if (pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT)
+ reg |= VE_H264_SHS_QP_SCALING_MATRIX_DEFAULT;
cedrus_write(dev, VE_H264_SHS_QP, reg);
// clear status flags
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index 1744e6fcc999..bcf050a04ffc 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -227,11 +227,17 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
* the RAM offset to the physcal addresses.
*
* This information will eventually be obtained from device-tree.
+ *
+ * XXX(hch): this has no business in a driver and needs to move
+ * to the device tree.
*/
#ifdef PHYS_PFN_OFFSET
- if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET))
- dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+ if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET)) {
+ ret = dma_direct_set_offset(dev->dev, PHYS_OFFSET, 0, SZ_4G);
+ if (ret)
+ return ret;
+ }
#endif
ret = of_reserved_mem_device_init(dev->dev);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index 16d82309e7b6..667b86dde1ee 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -247,6 +247,8 @@ static int cedrus_try_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
pix_fmt->pixelformat = fmt->pixelformat;
+ pix_fmt->width = ctx->src_fmt.width;
+ pix_fmt->height = ctx->src_fmt.height;
cedrus_prepare_format(pix_fmt);
return 0;
@@ -296,10 +298,30 @@ static int cedrus_s_fmt_vid_out(struct file *file, void *priv,
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct vb2_queue *vq;
+ struct vb2_queue *peer_vq;
int ret;
+ ret = cedrus_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- if (vb2_is_busy(vq))
+ /*
+ * In order to support dynamic resolution change,
+ * the decoder admits a resolution change, as long
+ * as the pixelformat remains. Can't be done if streaming.
+ */
+ if (vb2_is_streaming(vq) || (vb2_is_busy(vq) &&
+ f->fmt.pix.pixelformat != ctx->src_fmt.pixelformat))
+ return -EBUSY;
+ /*
+ * Since format change on the OUTPUT queue will reset
+ * the CAPTURE queue, we can't allow doing so
+ * when the CAPTURE queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (vb2_is_busy(peer_vq))
return -EBUSY;
ret = cedrus_try_fmt_vid_out(file, priv, f);
@@ -319,11 +341,14 @@ static int cedrus_s_fmt_vid_out(struct file *file, void *priv,
break;
}
- /* Propagate colorspace information to capture. */
+ /* Propagate format information to capture. */
ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
ctx->dst_fmt.quantization = f->fmt.pix.quantization;
+ ctx->dst_fmt.width = ctx->src_fmt.width;
+ ctx->dst_fmt.height = ctx->src_fmt.height;
+ cedrus_prepare_format(&ctx->dst_fmt);
return 0;
}
diff --git a/drivers/staging/media/tegra-vde/iommu.c b/drivers/staging/media/tegra-vde/iommu.c
index 6af863d92123..adf8dc7ee25c 100644
--- a/drivers/staging/media/tegra-vde/iommu.c
+++ b/drivers/staging/media/tegra-vde/iommu.c
@@ -36,8 +36,8 @@ int tegra_vde_iommu_map(struct tegra_vde *vde,
addr = iova_dma_addr(&vde->iova, iova);
- size = iommu_map_sg(vde->domain, addr, sgt->sgl, sgt->nents,
- IOMMU_READ | IOMMU_WRITE);
+ size = iommu_map_sgtable(vde->domain, addr, sgt,
+ IOMMU_READ | IOMMU_WRITE);
if (!size) {
__free_iova(&vde->iova, iova);
return -ENXIO;
diff --git a/drivers/staging/media/tegra-vde/vde.c b/drivers/staging/media/tegra-vde/vde.c
index a3c24d96d5b9..28845b5bafaf 100644
--- a/drivers/staging/media/tegra-vde/vde.c
+++ b/drivers/staging/media/tegra-vde/vde.c
@@ -913,7 +913,7 @@ static irqreturn_t tegra_vde_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int tegra_vde_runtime_suspend(struct device *dev)
+static __maybe_unused int tegra_vde_runtime_suspend(struct device *dev)
{
struct tegra_vde *vde = dev_get_drvdata(dev);
int err;
@@ -929,7 +929,7 @@ static int tegra_vde_runtime_suspend(struct device *dev)
return 0;
}
-static int tegra_vde_runtime_resume(struct device *dev)
+static __maybe_unused int tegra_vde_runtime_resume(struct device *dev)
{
struct tegra_vde *vde = dev_get_drvdata(dev);
int err;
diff --git a/drivers/staging/media/tegra-video/Kconfig b/drivers/staging/media/tegra-video/Kconfig
index f6c61ec74386..1f35da4b134e 100644
--- a/drivers/staging/media/tegra-video/Kconfig
+++ b/drivers/staging/media/tegra-video/Kconfig
@@ -5,8 +5,15 @@ config VIDEO_TEGRA
depends on VIDEO_V4L2
select MEDIA_CONTROLLER
select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
help
Choose this option if you have an NVIDIA Tegra SoC.
To compile this driver as a module, choose M here: the module
will be called tegra-video.
+
+config VIDEO_TEGRA_TPG
+ bool "NVIDIA Tegra VI driver TPG mode"
+ depends on VIDEO_TEGRA
+ help
+ Say yes here to enable Tegra internal TPG mode
diff --git a/drivers/staging/media/tegra-video/TODO b/drivers/staging/media/tegra-video/TODO
index 6ceb7549c218..c82108166894 100644
--- a/drivers/staging/media/tegra-video/TODO
+++ b/drivers/staging/media/tegra-video/TODO
@@ -1,10 +1,4 @@
TODO list
-* Currently driver supports Tegra build-in TPG only with direct media links
- from CSI to VI. Add kernel config CONFIG_VIDEO_TEGRA_TPG and update the
- driver to do TPG Vs Sensor media links based on CONFIG_VIDEO_TEGRA_TPG.
-* Add real camera sensor capture support.
-* Add Tegra CSI MIPI pads calibration.
-* Add MIPI clock Settle time computation based on the data rate.
* Add support for Ganged mode.
* Add RAW10 packed video format support to Tegra210 video formats.
* Add support for suspend and resume.
diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
index 40ea195d141d..a19c85c57fca 100644
--- a/drivers/staging/media/tegra-video/csi.c
+++ b/drivers/staging/media/tegra-video/csi.c
@@ -9,13 +9,18 @@
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <media/v4l2-fwnode.h>
+
#include "csi.h"
#include "video.h"
+#define MHZ 1000000
+
static inline struct tegra_csi *
host1x_client_to_csi(struct host1x_client *client)
{
@@ -62,6 +67,9 @@ static int csi_enum_bus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
if (code->index >= ARRAY_SIZE(tegra_csi_tpg_fmts))
return -EINVAL;
@@ -76,6 +84,9 @@ static int csi_get_format(struct v4l2_subdev *subdev,
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
fmt->format = csi_chan->format;
return 0;
@@ -121,6 +132,9 @@ static int csi_enum_framesizes(struct v4l2_subdev *subdev,
{
unsigned int i;
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
if (fse->index >= ARRAY_SIZE(tegra_csi_tpg_sizes))
return -EINVAL;
@@ -148,6 +162,9 @@ static int csi_enum_frameintervals(struct v4l2_subdev *subdev,
const struct tpg_framerate *frmrate = csi->soc->tpg_frmrate_table;
int index;
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
/* one framerate per format and resolution */
if (fie->index > 0)
return -EINVAL;
@@ -172,6 +189,9 @@ static int csi_set_format(struct v4l2_subdev *subdev,
const struct v4l2_frmsize_discrete *sizes;
unsigned int i;
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
sizes = v4l2_find_nearest_size(tegra_csi_tpg_sizes,
ARRAY_SIZE(tegra_csi_tpg_sizes),
width, height,
@@ -208,40 +228,157 @@ static int tegra_csi_g_frame_interval(struct v4l2_subdev *subdev,
{
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return -ENOIOCTLCMD;
+
vfi->interval.numerator = 1;
vfi->interval.denominator = csi_chan->framerate;
return 0;
}
-static int tegra_csi_s_stream(struct v4l2_subdev *subdev, int enable)
+static unsigned int csi_get_pixel_rate(struct tegra_csi_channel *csi_chan)
+{
+ struct tegra_vi_channel *chan;
+ struct v4l2_subdev *src_subdev;
+ struct v4l2_ctrl *ctrl;
+
+ chan = v4l2_get_subdev_hostdata(&csi_chan->subdev);
+ src_subdev = tegra_channel_get_remote_source_subdev(chan);
+ ctrl = v4l2_ctrl_find(src_subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
+ if (ctrl)
+ return v4l2_ctrl_g_ctrl_int64(ctrl);
+
+ return 0;
+}
+
+void tegra_csi_calc_settle_time(struct tegra_csi_channel *csi_chan,
+ u8 *clk_settle_time,
+ u8 *ths_settle_time)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ unsigned int cil_clk_mhz;
+ unsigned int pix_clk_mhz;
+ int clk_idx = (csi_chan->csi_port_num >> 1) + 1;
+
+ cil_clk_mhz = clk_get_rate(csi->clks[clk_idx].clk) / MHZ;
+ pix_clk_mhz = csi_get_pixel_rate(csi_chan) / MHZ;
+
+ /*
+ * CLK Settle time is the interval during which HS receiver should
+ * ignore any clock lane HS transitions, starting from the beginning
+ * of T-CLK-PREPARE.
+ * Per DPHY specification, T-CLK-SETTLE should be between 95ns ~ 300ns
+ *
+ * 95ns < (clk-settle-programmed + 7) * lp clk period < 300ns
+ * midpoint = 197.5 ns
+ */
+ *clk_settle_time = ((95 + 300) * cil_clk_mhz - 14000) / 2000;
+
+ /*
+ * THS Settle time is the interval during which HS receiver should
+ * ignore any data lane HS transitions, starting from the beginning
+ * of THS-PREPARE.
+ *
+ * Per DPHY specification, T-HS-SETTLE should be between 85ns + 6UI
+ * and 145ns+10UI.
+ * 85ns + 6UI < (Ths-settle-prog + 5) * lp_clk_period < 145ns + 10UI
+ * midpoint = 115ns + 8UI
+ */
+ if (pix_clk_mhz)
+ *ths_settle_time = (115 * cil_clk_mhz + 8000 * cil_clk_mhz
+ / (2 * pix_clk_mhz) - 5000) / 1000;
+}
+
+static int tegra_csi_enable_stream(struct v4l2_subdev *subdev)
{
struct tegra_vi_channel *chan = v4l2_get_subdev_hostdata(subdev);
struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
struct tegra_csi *csi = csi_chan->csi;
- int ret = 0;
+ int ret, err;
- csi_chan->pg_mode = chan->pg_mode;
- if (enable) {
- ret = pm_runtime_get_sync(csi->dev);
+ ret = pm_runtime_get_sync(csi->dev);
+ if (ret < 0) {
+ dev_err(csi->dev, "failed to get runtime PM: %d\n", ret);
+ pm_runtime_put_noidle(csi->dev);
+ return ret;
+ }
+
+ if (csi_chan->mipi) {
+ ret = tegra_mipi_enable(csi_chan->mipi);
if (ret < 0) {
dev_err(csi->dev,
- "failed to get runtime PM: %d\n", ret);
- pm_runtime_put_noidle(csi->dev);
- return ret;
+ "failed to enable MIPI pads: %d\n", ret);
+ goto rpm_put;
}
- ret = csi->ops->csi_start_streaming(csi_chan);
- if (ret < 0)
- goto rpm_put;
+ /*
+ * CSI MIPI pads PULLUP, PULLDN and TERM impedances need to
+ * be calibrated after power on.
+ * So, trigger the calibration start here and results will
+ * be latched and applied to the pads when link is in LP11
+ * state during start of sensor streaming.
+ */
+ ret = tegra_mipi_start_calibration(csi_chan->mipi);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to start MIPI calibration: %d\n", ret);
+ goto disable_mipi;
+ }
+ }
- return 0;
+ csi_chan->pg_mode = chan->pg_mode;
+ ret = csi->ops->csi_start_streaming(csi_chan);
+ if (ret < 0)
+ goto finish_calibration;
+
+ return 0;
+
+finish_calibration:
+ if (csi_chan->mipi)
+ tegra_mipi_finish_calibration(csi_chan->mipi);
+disable_mipi:
+ if (csi_chan->mipi) {
+ err = tegra_mipi_disable(csi_chan->mipi);
+ if (err < 0)
+ dev_err(csi->dev,
+ "failed to disable MIPI pads: %d\n", err);
}
+rpm_put:
+ pm_runtime_put(csi->dev);
+ return ret;
+}
+
+static int tegra_csi_disable_stream(struct v4l2_subdev *subdev)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ struct tegra_csi *csi = csi_chan->csi;
+ int err;
+
csi->ops->csi_stop_streaming(csi_chan);
-rpm_put:
+ if (csi_chan->mipi) {
+ err = tegra_mipi_disable(csi_chan->mipi);
+ if (err < 0)
+ dev_err(csi->dev,
+ "failed to disable MIPI pads: %d\n", err);
+ }
+
pm_runtime_put(csi->dev);
+
+ return 0;
+}
+
+static int tegra_csi_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ int ret;
+
+ if (enable)
+ ret = tegra_csi_enable_stream(subdev);
+ else
+ ret = tegra_csi_disable_stream(subdev);
+
return ret;
}
@@ -267,29 +404,123 @@ static const struct v4l2_subdev_ops tegra_csi_ops = {
.pad = &tegra_csi_pad_ops,
};
+static int tegra_csi_channel_alloc(struct tegra_csi *csi,
+ struct device_node *node,
+ unsigned int port_num, unsigned int lanes,
+ unsigned int num_pads)
+{
+ struct tegra_csi_channel *chan;
+ int ret = 0;
+
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ list_add_tail(&chan->list, &csi->csi_chans);
+ chan->csi = csi;
+ chan->csi_port_num = port_num;
+ chan->numlanes = lanes;
+ chan->of_node = node;
+ chan->numpads = num_pads;
+ if (num_pads & 0x2) {
+ chan->pads[0].flags = MEDIA_PAD_FL_SINK;
+ chan->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ } else {
+ chan->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ }
+
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return 0;
+
+ chan->mipi = tegra_mipi_request(csi->dev, node);
+ if (IS_ERR(chan->mipi)) {
+ ret = PTR_ERR(chan->mipi);
+ dev_err(csi->dev, "failed to get mipi device: %d\n", ret);
+ }
+
+ return ret;
+}
+
static int tegra_csi_tpg_channels_alloc(struct tegra_csi *csi)
{
struct device_node *node = csi->dev->of_node;
unsigned int port_num;
- struct tegra_csi_channel *chan;
unsigned int tpg_channels = csi->soc->csi_max_channels;
+ int ret;
/* allocate CSI channel for each CSI x2 ports */
for (port_num = 0; port_num < tpg_channels; port_num++) {
- chan = kzalloc(sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
-
- list_add_tail(&chan->list, &csi->csi_chans);
- chan->csi = csi;
- chan->csi_port_num = port_num;
- chan->numlanes = 2;
- chan->of_node = node;
- chan->numpads = 1;
- chan->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ ret = tegra_csi_channel_alloc(csi, node, port_num, 2, 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_csi_channels_alloc(struct tegra_csi *csi)
+{
+ struct device_node *node = csi->dev->of_node;
+ struct v4l2_fwnode_endpoint v4l2_ep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct fwnode_handle *fwh;
+ struct device_node *channel;
+ struct device_node *ep;
+ unsigned int lanes, portno, num_pads;
+ int ret;
+
+ for_each_child_of_node(node, channel) {
+ if (!of_node_name_eq(channel, "channel"))
+ continue;
+
+ ret = of_property_read_u32(channel, "reg", &portno);
+ if (ret < 0)
+ continue;
+
+ if (portno >= csi->soc->csi_max_channels) {
+ dev_err(csi->dev, "invalid port num %d for %pOF\n",
+ portno, channel);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ ep = of_graph_get_endpoint_by_regs(channel, 0, 0);
+ if (!ep)
+ continue;
+
+ fwh = of_fwnode_handle(ep);
+ ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
+ of_node_put(ep);
+ if (ret) {
+ dev_err(csi->dev,
+ "failed to parse v4l2 endpoint for %pOF: %d\n",
+ channel, ret);
+ goto err_node_put;
+ }
+
+ lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
+ if (!lanes || ((lanes & (lanes - 1)) != 0)) {
+ dev_err(csi->dev, "invalid data-lanes %d for %pOF\n",
+ lanes, channel);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ num_pads = of_graph_get_endpoint_count(channel);
+ if (num_pads == TEGRA_CSI_PADS_NUM) {
+ ret = tegra_csi_channel_alloc(csi, channel, portno,
+ lanes, num_pads);
+ if (ret < 0)
+ goto err_node_put;
+ }
}
return 0;
+
+err_node_put:
+ of_node_put(channel);
+ return ret;
}
static int tegra_csi_channel_init(struct tegra_csi_channel *chan)
@@ -311,8 +542,12 @@ static int tegra_csi_channel_init(struct tegra_csi_channel *chan)
subdev = &chan->subdev;
v4l2_subdev_init(subdev, &tegra_csi_ops);
subdev->dev = csi->dev;
- snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s-%d", "tpg",
- chan->csi_port_num);
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s-%d", "tpg",
+ chan->csi_port_num);
+ else
+ snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s",
+ kbasename(chan->of_node->full_name));
v4l2_set_subdevdata(subdev, chan);
subdev->fwnode = of_fwnode_handle(chan->of_node);
@@ -328,6 +563,15 @@ static int tegra_csi_channel_init(struct tegra_csi_channel *chan)
return ret;
}
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)) {
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to register subdev: %d\n", ret);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -366,9 +610,16 @@ static void tegra_csi_channels_cleanup(struct tegra_csi *csi)
struct tegra_csi_channel *chan, *tmp;
list_for_each_entry_safe(chan, tmp, &csi->csi_chans, list) {
+ if (chan->mipi)
+ tegra_mipi_free(chan->mipi);
+
subdev = &chan->subdev;
- if (subdev->dev)
+ if (subdev->dev) {
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ v4l2_async_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
+ }
+
list_del(&chan->list);
kfree(chan);
}
@@ -405,10 +656,13 @@ static int tegra_csi_init(struct host1x_client *client)
INIT_LIST_HEAD(&csi->csi_chans);
- ret = tegra_csi_tpg_channels_alloc(csi);
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ ret = tegra_csi_tpg_channels_alloc(csi);
+ else
+ ret = tegra_csi_channels_alloc(csi);
if (ret < 0) {
dev_err(csi->dev,
- "failed to allocate tpg channels: %d\n", ret);
+ "failed to allocate channels: %d\n", ret);
goto cleanup;
}
diff --git a/drivers/staging/media/tegra-video/csi.h b/drivers/staging/media/tegra-video/csi.h
index 93bd2a05797d..c65ff73b1cdc 100644
--- a/drivers/staging/media/tegra-video/csi.h
+++ b/drivers/staging/media/tegra-video/csi.h
@@ -7,6 +7,7 @@
#define __TEGRA_CSI_H__
#include <media/media-entity.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-subdev.h>
/*
@@ -49,6 +50,8 @@ struct tegra_csi;
* @framerate: active framerate for TPG
* @h_blank: horizontal blanking for TPG active format
* @v_blank: vertical blanking for TPG active format
+ * @mipi: mipi device for corresponding csi channel pads
+ * @pixel_rate: active pixel rate from the sensor on this channel
*/
struct tegra_csi_channel {
struct list_head list;
@@ -64,6 +67,8 @@ struct tegra_csi_channel {
unsigned int framerate;
unsigned int h_blank;
unsigned int v_blank;
+ struct tegra_mipi_device *mipi;
+ unsigned int pixel_rate;
};
/**
@@ -144,4 +149,7 @@ extern const struct tegra_csi_soc tegra210_csi_soc;
#endif
void tegra_csi_error_recover(struct v4l2_subdev *subdev);
+void tegra_csi_calc_settle_time(struct tegra_csi_channel *csi_chan,
+ u8 *clk_settle_time,
+ u8 *ths_settle_time);
#endif
diff --git a/drivers/staging/media/tegra-video/tegra210.c b/drivers/staging/media/tegra-video/tegra210.c
index 3baa4e314203..ac066c030a4f 100644
--- a/drivers/staging/media/tegra-video/tegra210.c
+++ b/drivers/staging/media/tegra-video/tegra210.c
@@ -7,6 +7,7 @@
* This source file contains Tegra210 supported video formats,
* VI and CSI SoC specific data, operations and registers accessors.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/delay.h>
@@ -98,6 +99,8 @@
#define BRICK_CLOCK_B_4X (0x2 << 16)
#define TEGRA_CSI_CIL_PAD_CONFIG1 0x004
#define TEGRA_CSI_CIL_PHY_CONTROL 0x008
+#define CLK_SETTLE_MASK GENMASK(13, 8)
+#define THS_SETTLE_MASK GENMASK(5, 0)
#define TEGRA_CSI_CIL_INTERRUPT_MASK 0x00c
#define TEGRA_CSI_CIL_STATUS 0x010
#define TEGRA_CSI_CILX_STATUS 0x014
@@ -230,7 +233,7 @@ static void tegra_channel_capture_error_recover(struct tegra_vi_channel *chan)
tegra_channel_capture_setup(chan);
/* recover CSI block */
- subdev = tegra_channel_get_remote_subdev(chan);
+ subdev = tegra_channel_get_remote_csi_subdev(chan);
tegra_csi_error_recover(subdev);
}
@@ -631,7 +634,11 @@ const struct tegra_vi_soc tegra210_vi_soc = {
.ops = &tegra210_vi_ops,
.hw_revision = 3,
.vi_max_channels = 6,
+#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
.vi_max_clk_hz = 499200000,
+#else
+ .vi_max_clk_hz = 998400000,
+#endif
};
/* Tegra210 CSI PHY registers accessors */
@@ -766,8 +773,14 @@ static int tegra210_csi_start_streaming(struct tegra_csi_channel *csi_chan)
{
struct tegra_csi *csi = csi_chan->csi;
unsigned int portno = csi_chan->csi_port_num;
+ u8 clk_settle_time = 0;
+ u8 ths_settle_time = 10;
u32 val;
+ if (!csi_chan->pg_mode)
+ tegra_csi_calc_settle_time(csi_chan, &clk_settle_time,
+ &ths_settle_time);
+
csi_write(csi, portno, TEGRA_CSI_CLKEN_OVERRIDE, 0);
/* clean up status */
@@ -778,7 +791,9 @@ static int tegra210_csi_start_streaming(struct tegra_csi_channel *csi_chan)
/* CIL PHY registers setup */
cil_write(csi, portno, TEGRA_CSI_CIL_PAD_CONFIG0, 0x0);
- cil_write(csi, portno, TEGRA_CSI_CIL_PHY_CONTROL, 0xa);
+ cil_write(csi, portno, TEGRA_CSI_CIL_PHY_CONTROL,
+ FIELD_PREP(CLK_SETTLE_MASK, clk_settle_time) |
+ FIELD_PREP(THS_SETTLE_MASK, ths_settle_time));
/*
* The CSI unit provides for connection of up to six cameras in
@@ -797,7 +812,9 @@ static int tegra210_csi_start_streaming(struct tegra_csi_channel *csi_chan)
BRICK_CLOCK_A_4X);
cil_write(csi, portno + 1, TEGRA_CSI_CIL_PAD_CONFIG0, 0x0);
cil_write(csi, portno + 1, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
- cil_write(csi, portno + 1, TEGRA_CSI_CIL_PHY_CONTROL, 0xa);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_PHY_CONTROL,
+ FIELD_PREP(CLK_SETTLE_MASK, clk_settle_time) |
+ FIELD_PREP(THS_SETTLE_MASK, ths_settle_time));
csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND,
CSI_A_PHY_CIL_ENABLE | CSI_B_PHY_CIL_ENABLE);
} else {
@@ -957,7 +974,9 @@ static const char * const tegra210_csi_cil_clks[] = {
"cilab",
"cilcd",
"cile",
+#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
"csi_tpg",
+#endif
};
/* Tegra210 CSI operations */
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index 1b5e660155f5..560d8b368124 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
@@ -52,12 +53,19 @@ to_tegra_channel_buffer(struct vb2_v4l2_buffer *vb)
return container_of(vb, struct tegra_channel_buffer, buf);
}
+static inline struct tegra_vi_graph_entity *
+to_tegra_vi_graph_entity(struct v4l2_async_subdev *asd)
+{
+ return container_of(asd, struct tegra_vi_graph_entity, asd);
+}
+
static int tegra_get_format_idx_by_code(struct tegra_vi *vi,
- unsigned int code)
+ unsigned int code,
+ unsigned int offset)
{
unsigned int i;
- for (i = 0; i < vi->soc->nformats; ++i) {
+ for (i = offset; i < vi->soc->nformats; ++i) {
if (vi->soc->video_formats[i].code == code)
return i;
}
@@ -145,33 +153,125 @@ static void tegra_channel_buffer_queue(struct vb2_buffer *vb)
}
struct v4l2_subdev *
-tegra_channel_get_remote_subdev(struct tegra_vi_channel *chan)
+tegra_channel_get_remote_csi_subdev(struct tegra_vi_channel *chan)
+{
+ struct media_pad *pad;
+
+ pad = media_entity_remote_pad(&chan->pad);
+ if (!pad)
+ return NULL;
+
+ return media_entity_to_v4l2_subdev(pad->entity);
+}
+
+struct v4l2_subdev *
+tegra_channel_get_remote_source_subdev(struct tegra_vi_channel *chan)
{
struct media_pad *pad;
struct v4l2_subdev *subdev;
struct media_entity *entity;
- pad = media_entity_remote_pad(&chan->pad);
- entity = pad->entity;
- subdev = media_entity_to_v4l2_subdev(entity);
+ subdev = tegra_channel_get_remote_csi_subdev(chan);
+ if (!subdev)
+ return NULL;
+
+ pad = &subdev->entity.pads[0];
+ while (!(pad->flags & MEDIA_PAD_FL_SOURCE)) {
+ pad = media_entity_remote_pad(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+ entity = pad->entity;
+ pad = &entity->pads[0];
+ subdev = media_entity_to_v4l2_subdev(entity);
+ }
return subdev;
}
-int tegra_channel_set_stream(struct tegra_vi_channel *chan, bool on)
+static int tegra_channel_enable_stream(struct tegra_vi_channel *chan)
+{
+ struct v4l2_subdev *csi_subdev, *src_subdev;
+ struct tegra_csi_channel *csi_chan;
+ int ret, err;
+
+ /*
+ * Tegra CSI receiver can detect the first LP to HS transition.
+ * So, start the CSI stream-on prior to sensor stream-on and
+ * vice-versa for stream-off.
+ */
+ csi_subdev = tegra_channel_get_remote_csi_subdev(chan);
+ ret = v4l2_subdev_call(csi_subdev, video, s_stream, true);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return 0;
+
+ csi_chan = v4l2_get_subdevdata(csi_subdev);
+ /*
+ * TRM has incorrectly documented to wait for done status from
+ * calibration logic after CSI interface power on.
+ * As per the design, calibration results are latched and applied
+ * to the pads only when the link is in LP11 state which will happen
+ * during the sensor stream-on.
+ * CSI subdev stream-on triggers start of MIPI pads calibration.
+ * Wait for calibration to finish here after sensor subdev stream-on.
+ */
+ src_subdev = tegra_channel_get_remote_source_subdev(chan);
+ ret = v4l2_subdev_call(src_subdev, video, s_stream, true);
+ err = tegra_mipi_finish_calibration(csi_chan->mipi);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto err_disable_csi_stream;
+
+ if (err < 0)
+ dev_warn(csi_chan->csi->dev,
+ "MIPI calibration failed: %d\n", err);
+
+ return 0;
+
+err_disable_csi_stream:
+ v4l2_subdev_call(csi_subdev, video, s_stream, false);
+ return ret;
+}
+
+static int tegra_channel_disable_stream(struct tegra_vi_channel *chan)
{
struct v4l2_subdev *subdev;
int ret;
- /* stream CSI */
- subdev = tegra_channel_get_remote_subdev(chan);
- ret = v4l2_subdev_call(subdev, video, s_stream, on);
- if (on && ret < 0 && ret != -ENOIOCTLCMD)
+ /*
+ * Stream-off subdevices in reverse order to stream-on.
+ * Remote source subdev in TPG mode is same as CSI subdev.
+ */
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ ret = v4l2_subdev_call(subdev, video, s_stream, false);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ return 0;
+
+ subdev = tegra_channel_get_remote_csi_subdev(chan);
+ ret = v4l2_subdev_call(subdev, video, s_stream, false);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
return 0;
}
+int tegra_channel_set_stream(struct tegra_vi_channel *chan, bool on)
+{
+ int ret;
+
+ if (on)
+ ret = tegra_channel_enable_stream(chan);
+ else
+ ret = tegra_channel_disable_stream(chan);
+
+ return ret;
+}
+
void tegra_channel_release_buffers(struct tegra_vi_channel *chan,
enum vb2_buffer_state state)
{
@@ -251,7 +351,7 @@ static int tegra_channel_g_parm(struct file *file, void *fh,
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
- subdev = tegra_channel_get_remote_subdev(chan);
+ subdev = tegra_channel_get_remote_source_subdev(chan);
return v4l2_g_parm_cap(&chan->video, subdev, a);
}
@@ -261,7 +361,7 @@ static int tegra_channel_s_parm(struct file *file, void *fh,
struct tegra_vi_channel *chan = video_drvdata(file);
struct v4l2_subdev *subdev;
- subdev = tegra_channel_get_remote_subdev(chan);
+ subdev = tegra_channel_get_remote_source_subdev(chan);
return v4l2_s_parm_cap(&chan->video, subdev, a);
}
@@ -283,7 +383,7 @@ static int tegra_channel_enum_framesizes(struct file *file, void *fh,
fse.code = fmtinfo->code;
- subdev = tegra_channel_get_remote_subdev(chan);
+ subdev = tegra_channel_get_remote_source_subdev(chan);
ret = v4l2_subdev_call(subdev, pad, enum_frame_size, NULL, &fse);
if (ret)
return ret;
@@ -315,7 +415,7 @@ static int tegra_channel_enum_frameintervals(struct file *file, void *fh,
fie.code = fmtinfo->code;
- subdev = tegra_channel_get_remote_subdev(chan);
+ subdev = tegra_channel_get_remote_source_subdev(chan);
ret = v4l2_subdev_call(subdev, pad, enum_frame_interval, NULL, &fie);
if (ret)
return ret;
@@ -334,6 +434,9 @@ static int tegra_channel_enum_format(struct file *file, void *fh,
unsigned int index = 0, i;
unsigned long *fmts_bitmap = chan->tpg_fmts_bitmap;
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ fmts_bitmap = chan->fmts_bitmap;
+
if (f->index >= bitmap_weight(fmts_bitmap, MAX_FORMAT_NUM))
return -EINVAL;
@@ -359,25 +462,15 @@ static void tegra_channel_fmt_align(struct tegra_vi_channel *chan,
struct v4l2_pix_format *pix,
unsigned int bpp)
{
- unsigned int align;
- unsigned int min_width;
- unsigned int max_width;
- unsigned int width;
unsigned int min_bpl;
unsigned int max_bpl;
unsigned int bpl;
/*
- * The transfer alignment requirements are expressed in bytes. Compute
- * minimum and maximum values, clamp the requested width and convert
- * it back to pixels. Use bytesperline to adjust the width.
+ * The transfer alignment requirements are expressed in bytes.
+ * Clamp the requested width and height to the limits.
*/
- align = lcm(SURFACE_ALIGN_BYTES, bpp);
- min_width = roundup(TEGRA_MIN_WIDTH, align);
- max_width = rounddown(TEGRA_MAX_WIDTH, align);
- width = roundup(pix->width * bpp, align);
-
- pix->width = clamp(width, min_width, max_width) / bpp;
+ pix->width = clamp(pix->width, TEGRA_MIN_WIDTH, TEGRA_MAX_WIDTH);
pix->height = clamp(pix->height, TEGRA_MIN_HEIGHT, TEGRA_MAX_HEIGHT);
/* Clamp the requested bytes per line value. If the maximum bytes per
@@ -400,8 +493,19 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
struct v4l2_subdev *subdev;
struct v4l2_subdev_format fmt;
struct v4l2_subdev_pad_config *pad_cfg;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
+ int ret;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!subdev)
+ return -ENODEV;
- subdev = tegra_channel_get_remote_subdev(chan);
pad_cfg = v4l2_subdev_alloc_pad_config(subdev);
if (!pad_cfg)
return -ENOMEM;
@@ -421,7 +525,28 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
fmt.which = V4L2_SUBDEV_FORMAT_TRY;
fmt.pad = 0;
v4l2_fill_mbus_format(&fmt.format, pix, fmtinfo->code);
- v4l2_subdev_call(subdev, pad, set_fmt, pad_cfg, &fmt);
+
+ /*
+ * Attempt to obtain the format size from subdev.
+ * If not available, try to get crop boundary from subdev.
+ */
+ fse.code = fmtinfo->code;
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size, pad_cfg, &fse);
+ if (ret) {
+ ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
+ if (ret)
+ return -EINVAL;
+ pad_cfg->try_crop.width = sdsel.r.width;
+ pad_cfg->try_crop.height = sdsel.r.height;
+ } else {
+ pad_cfg->try_crop.width = fse.max_width;
+ pad_cfg->try_crop.height = fse.max_height;
+ }
+
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, pad_cfg, &fmt);
+ if (ret < 0)
+ return ret;
+
v4l2_fill_pix_format(pix, &fmt.format);
tegra_channel_fmt_align(chan, pix, fmtinfo->bpp);
@@ -461,8 +586,11 @@ static int tegra_channel_set_format(struct file *file, void *fh,
fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
fmt.pad = 0;
v4l2_fill_mbus_format(&fmt.format, pix, fmtinfo->code);
- subdev = tegra_channel_get_remote_subdev(chan);
- v4l2_subdev_call(subdev, pad, set_fmt, NULL, &fmt);
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret;
+
v4l2_fill_pix_format(pix, &fmt.format);
tegra_channel_fmt_align(chan, pix, fmtinfo->bpp);
@@ -472,15 +600,129 @@ static int tegra_channel_set_format(struct file *file, void *fh,
return 0;
}
+static int tegra_channel_set_subdev_active_fmt(struct tegra_vi_channel *chan)
+{
+ int ret, index;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ /*
+ * Initialize channel format to the sub-device active format if there
+ * is corresponding match in the Tegra supported video formats.
+ */
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ index = tegra_get_format_idx_by_code(chan->vi, fmt.format.code, 0);
+ if (index < 0)
+ return -EINVAL;
+
+ chan->fmtinfo = &chan->vi->soc->video_formats[index];
+ v4l2_fill_pix_format(&chan->format, &fmt.format);
+ chan->format.pixelformat = chan->fmtinfo->fourcc;
+ chan->format.bytesperline = chan->format.width * chan->fmtinfo->bpp;
+ chan->format.sizeimage = chan->format.bytesperline *
+ chan->format.height;
+ tegra_channel_fmt_align(chan, &chan->format, chan->fmtinfo->bpp);
+
+ return 0;
+}
+
+static int tegra_channel_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ };
+ int ret;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, get_selection))
+ return -ENOTTY;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ /*
+ * Try the get selection operation and fallback to get format if not
+ * implemented.
+ */
+ ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
+ if (!ret)
+ sel->r = sdsel.r;
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret;
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = fmt.format.width;
+ sel->r.height = fmt.format.height;
+
+ return 0;
+}
+
+static int tegra_channel_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ int ret;
+ struct v4l2_subdev_selection sdsel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = sel->target,
+ .flags = sel->flags,
+ .r = sel->r,
+ };
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!v4l2_subdev_has_op(subdev, pad, set_selection))
+ return -ENOTTY;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (vb2_is_busy(&chan->queue))
+ return -EBUSY;
+
+ ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel);
+ if (!ret) {
+ sel->r = sdsel.r;
+ /*
+ * Subdev active format resolution may have changed during
+ * set selection operation. So, update channel format to
+ * the sub-device active format.
+ */
+ return tegra_channel_set_subdev_active_fmt(chan);
+ }
+
+ return ret;
+}
+
static int tegra_channel_enum_input(struct file *file, void *fh,
struct v4l2_input *inp)
{
- /* currently driver supports internal TPG only */
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
if (inp->index)
return -EINVAL;
inp->type = V4L2_INPUT_TYPE_CAMERA;
- strscpy(inp->name, "Tegra TPG", sizeof(inp->name));
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ strscpy(inp->name, subdev->name, sizeof(inp->name));
return 0;
}
@@ -526,6 +768,8 @@ static const struct v4l2_ioctl_ops tegra_channel_ioctl_ops = {
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_g_selection = tegra_channel_g_selection,
+ .vidioc_s_selection = tegra_channel_s_selection,
};
/*
@@ -544,6 +788,7 @@ static const struct v4l2_file_operations tegra_channel_fops = {
/*
* V4L2 control operations
*/
+#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
static int vi_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct tegra_vi_channel *chan = container_of(ctrl->handler,
@@ -570,11 +815,13 @@ static const char *const vi_pattern_strings[] = {
"Black/White Direct Mode",
"Color Patch Mode",
};
+#endif
static int tegra_channel_setup_ctrl_handler(struct tegra_vi_channel *chan)
{
int ret;
+#if IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)
/* add test pattern control handler to v4l2 device */
v4l2_ctrl_new_std_menu_items(&chan->ctrl_handler, &vi_ctrl_ops,
V4L2_CID_TEST_PATTERN,
@@ -586,6 +833,23 @@ static int tegra_channel_setup_ctrl_handler(struct tegra_vi_channel *chan)
v4l2_ctrl_handler_free(&chan->ctrl_handler);
return chan->ctrl_handler.error;
}
+#else
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ if (!subdev)
+ return -ENODEV;
+
+ ret = v4l2_ctrl_add_handler(&chan->ctrl_handler, subdev->ctrl_handler,
+ NULL, true);
+ if (ret < 0) {
+ dev_err(chan->vi->dev,
+ "failed to add subdev %s ctrl handler: %d\n",
+ subdev->name, ret);
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ return ret;
+ }
+#endif
/* setup the controls */
ret = v4l2_ctrl_handler_setup(&chan->ctrl_handler);
@@ -606,14 +870,70 @@ static void vi_tpg_fmts_bitmap_init(struct tegra_vi_channel *chan)
bitmap_zero(chan->tpg_fmts_bitmap, MAX_FORMAT_NUM);
index = tegra_get_format_idx_by_code(chan->vi,
- MEDIA_BUS_FMT_SRGGB10_1X10);
+ MEDIA_BUS_FMT_SRGGB10_1X10, 0);
bitmap_set(chan->tpg_fmts_bitmap, index, 1);
index = tegra_get_format_idx_by_code(chan->vi,
- MEDIA_BUS_FMT_RGB888_1X32_PADHI);
+ MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ 0);
bitmap_set(chan->tpg_fmts_bitmap, index, 1);
}
+static int vi_fmts_bitmap_init(struct tegra_vi_channel *chan)
+{
+ int index, ret, match_code = 0;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ bitmap_zero(chan->fmts_bitmap, MAX_FORMAT_NUM);
+
+ /*
+ * Set the bitmap bits based on all the matched formats between the
+ * available media bus formats of sub-device and the pre-defined Tegra
+ * supported video formats.
+ */
+ subdev = tegra_channel_get_remote_source_subdev(chan);
+ while (1) {
+ ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &code);
+ if (ret < 0)
+ break;
+
+ index = tegra_get_format_idx_by_code(chan->vi, code.code, 0);
+ while (index >= 0) {
+ bitmap_set(chan->fmts_bitmap, index, 1);
+ if (!match_code)
+ match_code = code.code;
+ /* look for other formats with same mbus code */
+ index = tegra_get_format_idx_by_code(chan->vi,
+ code.code,
+ index + 1);
+ }
+
+ code.index++;
+ }
+
+ /*
+ * Set the bitmap bit corresponding to default tegra video format if
+ * there are no matched formats.
+ */
+ if (!match_code) {
+ match_code = tegra_default_format.code;
+ index = tegra_get_format_idx_by_code(chan->vi, match_code, 0);
+ if (WARN_ON(index < 0))
+ return -EINVAL;
+
+ bitmap_set(chan->fmts_bitmap, index, 1);
+ }
+
+ /* initialize channel format to the sub-device active format */
+ tegra_channel_set_subdev_active_fmt(chan);
+
+ return 0;
+}
+
static void tegra_channel_cleanup(struct tegra_vi_channel *chan)
{
v4l2_ctrl_handler_free(&chan->ctrl_handler);
@@ -726,6 +1046,9 @@ static int tegra_channel_init(struct tegra_vi_channel *chan)
goto free_v4l2_ctrl_hdl;
}
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ v4l2_async_notifier_init(&chan->notifier);
+
return 0;
free_v4l2_ctrl_hdl:
@@ -739,31 +1062,90 @@ free_fs_syncpt:
return ret;
}
-static int tegra_vi_tpg_channels_alloc(struct tegra_vi *vi)
+static int tegra_vi_channel_alloc(struct tegra_vi *vi, unsigned int port_num,
+ struct device_node *node)
{
struct tegra_vi_channel *chan;
+
+ /*
+ * Do not use devm_kzalloc as memory is freed immediately
+ * when device instance is unbound but application might still
+ * be holding the device node open. Channel memory allocated
+ * with kzalloc is freed during video device release callback.
+ */
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ chan->vi = vi;
+ chan->portno = port_num;
+ chan->of_node = node;
+ list_add_tail(&chan->list, &vi->vi_chans);
+
+ return 0;
+}
+
+static int tegra_vi_tpg_channels_alloc(struct tegra_vi *vi)
+{
unsigned int port_num;
unsigned int nchannels = vi->soc->vi_max_channels;
+ int ret;
for (port_num = 0; port_num < nchannels; port_num++) {
- /*
- * Do not use devm_kzalloc as memory is freed immediately
- * when device instance is unbound but application might still
- * be holding the device node open. Channel memory allocated
- * with kzalloc is freed during video device release callback.
- */
- chan = kzalloc(sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
-
- chan->vi = vi;
- chan->portno = port_num;
- list_add_tail(&chan->list, &vi->vi_chans);
+ ret = tegra_vi_channel_alloc(vi, port_num, vi->dev->of_node);
+ if (ret < 0)
+ return ret;
}
return 0;
}
+static int tegra_vi_channels_alloc(struct tegra_vi *vi)
+{
+ struct device_node *node = vi->dev->of_node;
+ struct device_node *ep = NULL;
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int port_num;
+ int ret = 0;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ return -ENODEV;
+
+ for_each_child_of_node(ports, port) {
+ if (!of_node_name_eq(port, "port"))
+ continue;
+
+ ret = of_property_read_u32(port, "reg", &port_num);
+ if (ret < 0)
+ continue;
+
+ if (port_num > vi->soc->vi_max_channels) {
+ dev_err(vi->dev, "invalid port num %d for %pOF\n",
+ port_num, port);
+ ret = -EINVAL;
+ of_node_put(port);
+ goto cleanup;
+ }
+
+ ep = of_get_child_by_name(port, "endpoint");
+ if (!ep)
+ continue;
+
+ of_node_put(ep);
+ ret = tegra_vi_channel_alloc(vi, port_num, port);
+ if (ret < 0) {
+ of_node_put(port);
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ of_node_put(ports);
+ return ret;
+}
+
static int tegra_vi_channels_init(struct tegra_vi *vi)
{
struct tegra_vi_channel *chan;
@@ -795,12 +1177,8 @@ void tegra_v4l2_nodes_cleanup_tpg(struct tegra_video_device *vid)
struct tegra_csi_channel *csi_chan;
struct tegra_vi_channel *chan;
- list_for_each_entry(chan, &vi->vi_chans, list) {
- video_unregister_device(&chan->video);
- mutex_lock(&chan->video_lock);
- vb2_queue_release(&chan->queue);
- mutex_unlock(&chan->video_lock);
- }
+ list_for_each_entry(chan, &vi->vi_chans, list)
+ vb2_video_unregister_device(&chan->video);
list_for_each_entry(csi_chan, &csi->csi_chans, list)
v4l2_device_unregister_subdev(&csi_chan->subdev);
@@ -915,6 +1293,347 @@ static int __maybe_unused vi_runtime_suspend(struct device *dev)
return 0;
}
+/*
+ * Graph Management
+ */
+static struct tegra_vi_graph_entity *
+tegra_vi_graph_find_entity(struct tegra_vi_channel *chan,
+ const struct fwnode_handle *fwnode)
+{
+ struct tegra_vi_graph_entity *entity;
+ struct v4l2_async_subdev *asd;
+
+ list_for_each_entry(asd, &chan->notifier.asd_list, asd_list) {
+ entity = to_tegra_vi_graph_entity(asd);
+ if (entity->asd.match.fwnode == fwnode)
+ return entity;
+ }
+
+ return NULL;
+}
+
+static int tegra_vi_graph_build(struct tegra_vi_channel *chan,
+ struct tegra_vi_graph_entity *entity)
+{
+ struct tegra_vi *vi = chan->vi;
+ struct tegra_vi_graph_entity *ent;
+ struct fwnode_handle *ep = NULL;
+ struct v4l2_fwnode_link link;
+ struct media_entity *local = entity->entity;
+ struct media_entity *remote;
+ struct media_pad *local_pad;
+ struct media_pad *remote_pad;
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ int ret = 0;
+
+ dev_dbg(vi->dev, "creating links for entity %s\n", local->name);
+
+ while (1) {
+ ep = fwnode_graph_get_next_endpoint(entity->asd.match.fwnode,
+ ep);
+ if (!ep)
+ break;
+
+ ret = v4l2_fwnode_parse_link(ep, &link);
+ if (ret < 0) {
+ dev_err(vi->dev, "failed to parse link for %pOF: %d\n",
+ to_of_node(ep), ret);
+ continue;
+ }
+
+ if (link.local_port >= local->num_pads) {
+ dev_err(vi->dev, "invalid port number %u on %pOF\n",
+ link.local_port, to_of_node(link.local_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ local_pad = &local->pads[link.local_port];
+ /* Remote node is vi node. So use channel video entity and pad
+ * as remote/sink.
+ */
+ if (link.remote_node == of_fwnode_handle(vi->dev->of_node)) {
+ remote = &chan->video.entity;
+ remote_pad = &chan->pad;
+ goto create_link;
+ }
+
+ /*
+ * Skip sink ports, they will be processed from the other end
+ * of the link.
+ */
+ if (local_pad->flags & MEDIA_PAD_FL_SINK) {
+ dev_dbg(vi->dev, "skipping sink port %pOF:%u\n",
+ to_of_node(link.local_node), link.local_port);
+ v4l2_fwnode_put_link(&link);
+ continue;
+ }
+
+ /* find the remote entity from notifier list */
+ ent = tegra_vi_graph_find_entity(chan, link.remote_node);
+ if (!ent) {
+ dev_err(vi->dev, "no entity found for %pOF\n",
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+
+ remote = ent->entity;
+ if (link.remote_port >= remote->num_pads) {
+ dev_err(vi->dev, "invalid port number %u on %pOF\n",
+ link.remote_port,
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ remote_pad = &remote->pads[link.remote_port];
+
+create_link:
+ dev_dbg(vi->dev, "creating %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+
+ ret = media_create_pad_link(local, local_pad->index,
+ remote, remote_pad->index,
+ link_flags);
+ v4l2_fwnode_put_link(&link);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to create %s:%u -> %s:%u link: %d\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index, ret);
+ break;
+ }
+ }
+
+ fwnode_handle_put(ep);
+ return ret;
+}
+
+static int tegra_vi_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct tegra_vi_graph_entity *entity;
+ struct v4l2_async_subdev *asd;
+ struct v4l2_subdev *subdev;
+ struct tegra_vi_channel *chan;
+ struct tegra_vi *vi;
+ int ret;
+
+ chan = container_of(notifier, struct tegra_vi_channel, notifier);
+ vi = chan->vi;
+
+ dev_dbg(vi->dev, "notify complete, all subdevs registered\n");
+
+ /*
+ * Video device node should be created at the end of all the device
+ * related initialization/setup.
+ * Current video_register_device() does both initialize and register
+ * video device in same API.
+ *
+ * TODO: Update v4l2-dev driver to split initialize and register into
+ * separate APIs and then update Tegra video driver to do video device
+ * initialize followed by all video device related setup and then
+ * register the video device.
+ */
+ ret = video_register_device(&chan->video, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to register video device: %d\n", ret);
+ goto unregister_video;
+ }
+
+ /* create links between the entities */
+ list_for_each_entry(asd, &chan->notifier.asd_list, asd_list) {
+ entity = to_tegra_vi_graph_entity(asd);
+ ret = tegra_vi_graph_build(chan, entity);
+ if (ret < 0)
+ goto unregister_video;
+ }
+
+ ret = tegra_channel_setup_ctrl_handler(chan);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to setup channel controls: %d\n", ret);
+ goto unregister_video;
+ }
+
+ ret = vi_fmts_bitmap_init(chan);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to initialize formats bitmap: %d\n", ret);
+ goto unregister_video;
+ }
+
+ subdev = tegra_channel_get_remote_csi_subdev(chan);
+ if (!subdev) {
+ ret = -ENODEV;
+ dev_err(vi->dev,
+ "failed to get remote csi subdev: %d\n", ret);
+ goto unregister_video;
+ }
+
+ v4l2_set_subdev_hostdata(subdev, chan);
+
+ return 0;
+
+unregister_video:
+ vb2_video_unregister_device(&chan->video);
+ return ret;
+}
+
+static int tegra_vi_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct tegra_vi_graph_entity *entity;
+ struct tegra_vi *vi;
+ struct tegra_vi_channel *chan;
+
+ chan = container_of(notifier, struct tegra_vi_channel, notifier);
+ vi = chan->vi;
+
+ /*
+ * Locate the entity corresponding to the bound subdev and store the
+ * subdev pointer.
+ */
+ entity = tegra_vi_graph_find_entity(chan, subdev->fwnode);
+ if (!entity) {
+ dev_err(vi->dev, "no entity for subdev %s\n", subdev->name);
+ return -EINVAL;
+ }
+
+ if (entity->subdev) {
+ dev_err(vi->dev, "duplicate subdev for node %pOF\n",
+ to_of_node(entity->asd.match.fwnode));
+ return -EINVAL;
+ }
+
+ dev_dbg(vi->dev, "subdev %s bound\n", subdev->name);
+ entity->entity = &subdev->entity;
+ entity->subdev = subdev;
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations tegra_vi_async_ops = {
+ .bound = tegra_vi_graph_notify_bound,
+ .complete = tegra_vi_graph_notify_complete,
+};
+
+static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan,
+ struct fwnode_handle *fwnode)
+{
+ struct tegra_vi *vi = chan->vi;
+ struct fwnode_handle *ep = NULL;
+ struct fwnode_handle *remote = NULL;
+ struct v4l2_async_subdev *asd;
+ struct device_node *node = NULL;
+ int ret;
+
+ dev_dbg(vi->dev, "parsing node %pOF\n", to_of_node(fwnode));
+
+ /* parse all the remote entities and put them into the list */
+ for_each_endpoint_of_node(to_of_node(fwnode), node) {
+ ep = of_fwnode_handle(node);
+ remote = fwnode_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ dev_err(vi->dev,
+ "remote device at %pOF not found\n", node);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* skip entities that are already processed */
+ if (remote == dev_fwnode(vi->dev) ||
+ tegra_vi_graph_find_entity(chan, remote)) {
+ fwnode_handle_put(remote);
+ continue;
+ }
+
+ asd = v4l2_async_notifier_add_fwnode_subdev(&chan->notifier,
+ remote, sizeof(struct tegra_vi_graph_entity));
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ dev_err(vi->dev,
+ "failed to add subdev to notifier: %d\n", ret);
+ fwnode_handle_put(remote);
+ goto cleanup;
+ }
+
+ ret = tegra_vi_graph_parse_one(chan, remote);
+ if (ret < 0) {
+ fwnode_handle_put(remote);
+ goto cleanup;
+ }
+
+ fwnode_handle_put(remote);
+ }
+
+ return 0;
+
+cleanup:
+ dev_err(vi->dev, "failed parsing the graph: %d\n", ret);
+ v4l2_async_notifier_cleanup(&chan->notifier);
+ of_node_put(node);
+ return ret;
+}
+
+static int tegra_vi_graph_init(struct tegra_vi *vi)
+{
+ struct tegra_video_device *vid = dev_get_drvdata(vi->client.host);
+ struct tegra_vi_channel *chan;
+ struct fwnode_handle *fwnode = dev_fwnode(vi->dev);
+ int ret;
+ struct fwnode_handle *remote = NULL;
+
+ /*
+ * Walk the links to parse the full graph. Each channel will have
+ * one endpoint of the composite node. Start by parsing the
+ * composite node and parse the remote entities in turn.
+ * Each channel will register v4l2 async notifier to make the graph
+ * independent between the channels so we can the current channel
+ * in case of something wrong during graph parsing and continue with
+ * next channels.
+ */
+ list_for_each_entry(chan, &vi->vi_chans, list) {
+ remote = fwnode_graph_get_remote_node(fwnode, chan->portno, 0);
+ if (!remote)
+ continue;
+
+ ret = tegra_vi_graph_parse_one(chan, remote);
+ fwnode_handle_put(remote);
+ if (ret < 0 || list_empty(&chan->notifier.asd_list))
+ continue;
+
+ chan->notifier.ops = &tegra_vi_async_ops;
+ ret = v4l2_async_notifier_register(&vid->v4l2_dev,
+ &chan->notifier);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to register channel %d notifier: %d\n",
+ chan->portno, ret);
+ v4l2_async_notifier_cleanup(&chan->notifier);
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_vi_graph_cleanup(struct tegra_vi *vi)
+{
+ struct tegra_vi_channel *chan;
+
+ list_for_each_entry(chan, &vi->vi_chans, list) {
+ vb2_video_unregister_device(&chan->video);
+ v4l2_async_notifier_unregister(&chan->notifier);
+ v4l2_async_notifier_cleanup(&chan->notifier);
+ }
+}
+
static int tegra_vi_init(struct host1x_client *client)
{
struct tegra_video_device *vid = dev_get_drvdata(client->host);
@@ -928,9 +1647,13 @@ static int tegra_vi_init(struct host1x_client *client)
INIT_LIST_HEAD(&vi->vi_chans);
- ret = tegra_vi_tpg_channels_alloc(vi);
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ ret = tegra_vi_tpg_channels_alloc(vi);
+ else
+ ret = tegra_vi_channels_alloc(vi);
if (ret < 0) {
- dev_err(vi->dev, "failed to allocate tpg channels: %d\n", ret);
+ dev_err(vi->dev,
+ "failed to allocate vi channels: %d\n", ret);
goto free_chans;
}
@@ -940,6 +1663,12 @@ static int tegra_vi_init(struct host1x_client *client)
vid->vi = vi;
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)) {
+ ret = tegra_vi_graph_init(vi);
+ if (ret < 0)
+ goto free_chans;
+ }
+
return 0;
free_chans:
@@ -953,6 +1682,8 @@ free_chans:
static int tegra_vi_exit(struct host1x_client *client)
{
+ struct tegra_vi *vi = host1x_client_to_vi(client);
+
/*
* Do not cleanup the channels here as application might still be
* holding video device nodes. Channels cleanup will happen during
@@ -960,6 +1691,9 @@ static int tegra_vi_exit(struct host1x_client *client)
* device nodes are released.
*/
+ if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ tegra_vi_graph_cleanup(vi);
+
return 0;
}
diff --git a/drivers/staging/media/tegra-video/vi.h b/drivers/staging/media/tegra-video/vi.h
index 6272c9a61809..7d6b7a6d0a45 100644
--- a/drivers/staging/media/tegra-video/vi.h
+++ b/drivers/staging/media/tegra-video/vi.h
@@ -14,6 +14,7 @@
#include <linux/wait.h>
#include <media/media-entity.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
@@ -93,6 +94,19 @@ struct tegra_vi {
};
/**
+ * struct tegra_vi_graph_entity - Entity in the video graph
+ *
+ * @asd: subdev asynchronous registration information
+ * @entity: media entity from the corresponding V4L2 subdev
+ * @subdev: V4L2 subdev
+ */
+struct tegra_vi_graph_entity {
+ struct v4l2_async_subdev asd;
+ struct media_entity *entity;
+ struct v4l2_subdev *subdev;
+};
+
+/**
* struct tegra_vi_channel - Tegra video channel
*
* @list: list head for this entry
@@ -138,10 +152,13 @@ struct tegra_vi {
* @done_lock: protects the capture done queue list
*
* @portno: VI channel port number
+ * @of_node: device node of VI channel
*
* @ctrl_handler: V4L2 control handler of this video channel
+ * @fmts_bitmap: a bitmap for supported formats matching v4l2 subdev formats
* @tpg_fmts_bitmap: a bitmap for supported TPG formats
* @pg_mode: test pattern generator mode (disabled/direct/patch)
+ * @notifier: V4L2 asynchronous subdevs notifier
*/
struct tegra_vi_channel {
struct list_head list;
@@ -174,10 +191,14 @@ struct tegra_vi_channel {
spinlock_t done_lock;
unsigned char portno;
+ struct device_node *of_node;
struct v4l2_ctrl_handler ctrl_handler;
+ DECLARE_BITMAP(fmts_bitmap, MAX_FORMAT_NUM);
DECLARE_BITMAP(tpg_fmts_bitmap, MAX_FORMAT_NUM);
enum tegra_vi_pg_mode pg_mode;
+
+ struct v4l2_async_notifier notifier;
};
/**
@@ -249,7 +270,9 @@ extern const struct tegra_vi_soc tegra210_vi_soc;
#endif
struct v4l2_subdev *
-tegra_channel_get_remote_subdev(struct tegra_vi_channel *chan);
+tegra_channel_get_remote_csi_subdev(struct tegra_vi_channel *chan);
+struct v4l2_subdev *
+tegra_channel_get_remote_source_subdev(struct tegra_vi_channel *chan);
int tegra_channel_set_stream(struct tegra_vi_channel *chan, bool on);
void tegra_channel_release_buffers(struct tegra_vi_channel *chan,
enum vb2_buffer_state state);
diff --git a/drivers/staging/media/tegra-video/video.c b/drivers/staging/media/tegra-video/video.c
index 30816aa41e81..e50bd70575f3 100644
--- a/drivers/staging/media/tegra-video/video.c
+++ b/drivers/staging/media/tegra-video/video.c
@@ -60,15 +60,17 @@ static int host1x_video_probe(struct host1x_device *dev)
if (ret < 0)
goto unregister_v4l2;
- /*
- * Both vi and csi channels are available now.
- * Register v4l2 nodes and create media links for TPG.
- */
- ret = tegra_v4l2_nodes_setup_tpg(vid);
- if (ret < 0) {
- dev_err(&dev->dev,
- "failed to setup tpg graph: %d\n", ret);
- goto device_exit;
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)) {
+ /*
+ * Both vi and csi channels are available now.
+ * Register v4l2 nodes and create media links for TPG.
+ */
+ ret = tegra_v4l2_nodes_setup_tpg(vid);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "failed to setup tpg graph: %d\n", ret);
+ goto device_exit;
+ }
}
return 0;
@@ -91,7 +93,8 @@ static int host1x_video_remove(struct host1x_device *dev)
{
struct tegra_video_device *vid = dev_get_drvdata(&dev->dev);
- tegra_v4l2_nodes_cleanup_tpg(vid);
+ if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
+ tegra_v4l2_nodes_cleanup_tpg(vid);
host1x_device_exit(dev);
diff --git a/drivers/staging/media/usbvision/Kconfig b/drivers/staging/media/usbvision/Kconfig
deleted file mode 100644
index 1c7da2a2caac..000000000000
--- a/drivers/staging/media/usbvision/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config VIDEO_USBVISION
- tristate "USB video devices based on Nogatech NT1003/1004/1005 (Deprecated)"
- depends on MEDIA_USB_SUPPORT && I2C && VIDEO_V4L2 && USB
- select VIDEO_TUNER
- select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
- help
- There are more than 50 different USB video devices based on
- NT1003/1004/1005 USB Bridges. This driver enables using those
- devices.
-
- This driver is deprecated and scheduled for removal by the
- end of 2020. See the TODO file in drivers/staging/media/usbvision
- for a list of actions that have to be done in order to prevent
- removal of this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called usbvision.
diff --git a/drivers/staging/media/usbvision/Makefile b/drivers/staging/media/usbvision/Makefile
deleted file mode 100644
index 4d8541b9d4f8..000000000000
--- a/drivers/staging/media/usbvision/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-usbvision-objs := usbvision-core.o usbvision-video.o usbvision-i2c.o usbvision-cards.o
-
-obj-$(CONFIG_VIDEO_USBVISION) += usbvision.o
diff --git a/drivers/staging/media/usbvision/TODO b/drivers/staging/media/usbvision/TODO
deleted file mode 100644
index e9fb4d125581..000000000000
--- a/drivers/staging/media/usbvision/TODO
+++ /dev/null
@@ -1,11 +0,0 @@
-The driver is deprecated and scheduled for removal by the end
-of 2020.
-
-In order to prevent removal the following actions would have to
-be taken:
-
-- clean up the code
-- convert to the vb2 framework
-- fix the disconnect and free-on-last-user handling (i.e., add
- a release callback for struct v4l2_device and rework the code
- to use that correctly).
diff --git a/drivers/staging/media/usbvision/usbvision-cards.c b/drivers/staging/media/usbvision/usbvision-cards.c
deleted file mode 100644
index 5e0cbbfe7c86..000000000000
--- a/drivers/staging/media/usbvision/usbvision-cards.c
+++ /dev/null
@@ -1,1120 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * usbvision-cards.c
- * usbvision cards definition file
- *
- * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de>
- *
- * This module is part of usbvision driver project.
- * Updates to driver completed by Dwaine P. Garden
- */
-
-
-#include <linux/list.h>
-#include <linux/module.h>
-#include <media/v4l2-dev.h>
-#include <media/tuner.h>
-#include "usbvision.h"
-#include "usbvision-cards.h"
-
-/* Supported Devices: A table for usbvision.c*/
-struct usbvision_device_data_st usbvision_device_data[] = {
- [XANBOO] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 4,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Xanboo",
- },
- [BELKIN_VIDEOBUS_II] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 2,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Belkin USB VideoBus II Adapter",
- },
- [BELKIN_VIDEOBUS] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 2,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Belkin Components USB VideoBus",
- },
- [BELKIN_USB_VIDEOBUS_II] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 2,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Belkin USB VideoBus II",
- },
- [ECHOFX_INTERVIEW_LITE] = {
- .interface = 0,
- .codec = CODEC_SAA7111,
- .video_channels = 2,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 0,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = -1,
- .y_offset = -1,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "echoFX InterView Lite",
- },
- [USBGEAR_USBG_V1] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 2,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "USBGear USBG-V1 resp. HAMA USB",
- },
- [D_LINK_V100] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 4,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 0,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "D-Link V100",
- },
- [X10_USB_CAMERA] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 2,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "X10 USB Camera",
- },
- [HPG_WINTV_LIVE_PAL_BG] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 2,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = -1,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Live (PAL B/G)",
- },
- [HPG_WINTV_LIVE_PRO_NTSC_MN] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 2,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 0,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Live Pro (NTSC M/N)",
- },
- [ZORAN_PMD_NOGATECH] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 2,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 2,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Zoran Co. PMD (Nogatech) AV-grabber Manhattan",
- },
- [NOGATECH_USB_TV_NTSC_FM] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_NTSC_M,
- .x_offset = -1,
- .y_offset = 20,
- .model_string = "Nogatech USB-TV (NTSC) FM",
- },
- [PNY_USB_TV_NTSC_FM] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_NTSC_M,
- .x_offset = -1,
- .y_offset = 20,
- .model_string = "PNY USB-TV (NTSC) FM",
- },
- [PV_PLAYTV_USB_PRO_PAL_FM] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "PixelView PlayTv-USB PRO (PAL) FM",
- },
- [ZT_721] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "ZTV ZT-721 2.4GHz USB A/V Receiver",
- },
- [HPG_WINTV_NTSC_MN] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_NTSC_M,
- .x_offset = -1,
- .y_offset = 20,
- .model_string = "Hauppauge WinTV USB (NTSC M/N)",
- },
- [HPG_WINTV_PAL_BG] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Hauppauge WinTV USB (PAL B/G)",
- },
- [HPG_WINTV_PAL_I] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Hauppauge WinTV USB (PAL I)",
- },
- [HPG_WINTV_PAL_SECAM_L] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_SECAM,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_SECAM,
- .x_offset = 0x80,
- .y_offset = 0x16,
- .model_string = "Hauppauge WinTV USB (PAL/SECAM L)",
- },
- [HPG_WINTV_PAL_D_K] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Hauppauge WinTV USB (PAL D/K)",
- },
- [HPG_WINTV_NTSC_FM] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_NTSC_M,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Hauppauge WinTV USB (NTSC FM)",
- },
- [HPG_WINTV_PAL_BG_FM] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Hauppauge WinTV USB (PAL B/G FM)",
- },
- [HPG_WINTV_PAL_I_FM] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Hauppauge WinTV USB (PAL I FM)",
- },
- [HPG_WINTV_PAL_D_K_FM] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Hauppauge WinTV USB (PAL D/K FM)",
- },
- [HPG_WINTV_PRO_NTSC_MN] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_MICROTUNE_4049FM5,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (NTSC M/N)",
- },
- [HPG_WINTV_PRO_NTSC_MN_V2] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_MICROTUNE_4049FM5,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (NTSC M/N) V2",
- },
- [HPG_WINTV_PRO_PAL] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L)",
- },
- [HPG_WINTV_PRO_NTSC_MN_V3] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_NTSC_M,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (NTSC M/N) V3",
- },
- [HPG_WINTV_PRO_PAL_BG] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL B/G)",
- },
- [HPG_WINTV_PRO_PAL_I] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL I)",
- },
- [HPG_WINTV_PRO_PAL_SECAM_L] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_SECAM,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_SECAM,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM L)",
- },
- [HPG_WINTV_PRO_PAL_D_K] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL D/K)",
- },
- [HPG_WINTV_PRO_PAL_SECAM] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_SECAM,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_SECAM,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L)",
- },
- [HPG_WINTV_PRO_PAL_SECAM_V2] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_SECAM,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_SECAM,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) V2",
- },
- [HPG_WINTV_PRO_PAL_BG_V2] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_ALPS_TSBE1_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL B/G) V2",
- },
- [HPG_WINTV_PRO_PAL_BG_D_K] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_ALPS_TSBE1_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL B/G,D/K)",
- },
- [HPG_WINTV_PRO_PAL_I_D_K] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_LG_PAL_NEW_TAPC,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL I,D/K)",
- },
- [HPG_WINTV_PRO_NTSC_MN_FM] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_NTSC_M,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (NTSC M/N FM)",
- },
- [HPG_WINTV_PRO_PAL_BG_FM] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL B/G FM)",
- },
- [HPG_WINTV_PRO_PAL_I_FM] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL I FM)",
- },
- [HPG_WINTV_PRO_PAL_D_K_FM] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL D/K FM)",
- },
- [HPG_WINTV_PRO_TEMIC_PAL_FM] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_MICROTUNE_4049FM5,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (Temic PAL/SECAM B/G/I/D/K/L FM)",
- },
- [HPG_WINTV_PRO_TEMIC_PAL_BG_FM] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_MICROTUNE_4049FM5,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (Temic PAL B/G FM)",
- },
- [HPG_WINTV_PRO_PAL_FM] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L FM)",
- },
- [HPG_WINTV_PRO_NTSC_MN_FM_V2] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_NTSC_M,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Hauppauge WinTV USB Pro (NTSC M/N FM) V2",
- },
- [CAMTEL_TVB330] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_NTSC_M,
- .x_offset = 5,
- .y_offset = 5,
- .model_string = "Camtel Technology USB TV Genie Pro FM Model TVB330",
- },
- [DIGITAL_VIDEO_CREATOR_I] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 2,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 0,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Digital Video Creator I",
- },
- [GLOBAL_VILLAGE_GV_007_NTSC] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 2,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 0,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 82,
- .y_offset = 20,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Global Village GV-007 (NTSC)",
- },
- [DAZZLE_DVC_50_REV_1_NTSC] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 2,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 0,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Dazzle Fusion Model DVC-50 Rev 1 (NTSC)",
- },
- [DAZZLE_DVC_80_REV_1_PAL] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 2,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 0,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Dazzle Fusion Model DVC-80 Rev 1 (PAL)",
- },
- [DAZZLE_DVC_90_REV_1_SECAM] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 2,
- .video_norm = V4L2_STD_SECAM,
- .audio_channels = 0,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Dazzle Fusion Model DVC-90 Rev 1 (SECAM)",
- },
- [ESKAPE_LABS_MYTV2GO] = {
- .interface = 0,
- .codec = CODEC_SAA7113,
- .video_channels = 2,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Eskape Labs MyTV2Go",
- },
- [PINNA_PCTV_USB_PAL] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 0,
- .tuner = 1,
- .tuner_type = TUNER_TEMIC_4066FY5_PAL_I,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Pinnacle Studio PCTV USB (PAL)",
- },
- [PINNA_PCTV_USB_SECAM] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_SECAM,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_SECAM,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Pinnacle Studio PCTV USB (SECAM)",
- },
- [PINNA_PCTV_USB_PAL_FM] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = 128,
- .y_offset = 23,
- .model_string = "Pinnacle Studio PCTV USB (PAL) FM",
- },
- [MIRO_PCTV_USB] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_PAL,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Miro PCTV USB",
- },
- [PINNA_PCTV_USB_NTSC_FM] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_NTSC_M,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Pinnacle Studio PCTV USB (NTSC) FM",
- },
- [PINNA_PCTV_USB_NTSC_FM_V3] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_NTSC_M,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Pinnacle Studio PCTV USB (NTSC) FM V3",
- },
- [PINNA_PCTV_USB_PAL_FM_V2] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_TEMIC_4009FR5_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Pinnacle Studio PCTV USB (PAL) FM V2",
- },
- [PINNA_PCTV_USB_NTSC_FM_V2] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_TEMIC_4039FR5_NTSC,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Pinnacle Studio PCTV USB (NTSC) FM V2",
- },
- [PINNA_PCTV_USB_PAL_FM_V3] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_TEMIC_4009FR5_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Pinnacle Studio PCTV USB (PAL) FM V3",
- },
- [PINNA_LINX_VD_IN_CAB_NTSC] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 2,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Pinnacle Studio Linx Video input cable (NTSC)",
- },
- [PINNA_LINX_VD_IN_CAB_PAL] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 2,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Pinnacle Studio Linx Video input cable (PAL)",
- },
- [PINNA_PCTV_BUNGEE_PAL_FM] = {
- .interface = -1,
- .codec = CODEC_SAA7113,
- .video_channels = 3,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 1,
- .radio = 1,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_TEMIC_4009FR5_PAL,
- .x_offset = 0,
- .y_offset = 3,
- .dvi_yuv_override = 1,
- .dvi_yuv = 7,
- .model_string = "Pinnacle PCTV Bungee USB (PAL) FM",
- },
- [HPG_WINTV] = {
- .interface = -1,
- .codec = CODEC_SAA7111,
- .video_channels = 3,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 1,
- .radio = 0,
- .vbi = 1,
- .tuner = 1,
- .tuner_type = TUNER_PHILIPS_NTSC_M,
- .x_offset = -1,
- .y_offset = -1,
- .model_string = "Hauppauge WinTv-USB",
- },
- [MICROCAM_NTSC] = {
- .interface = -1,
- .codec = CODEC_WEBCAM,
- .video_channels = 1,
- .video_norm = V4L2_STD_NTSC,
- .audio_channels = 0,
- .radio = 0,
- .vbi = 0,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 71,
- .y_offset = 15,
- .model_string = "Nogatech USB MicroCam NTSC (NV3000N)",
- },
- [MICROCAM_PAL] = {
- .interface = -1,
- .codec = CODEC_WEBCAM,
- .video_channels = 1,
- .video_norm = V4L2_STD_PAL,
- .audio_channels = 0,
- .radio = 0,
- .vbi = 0,
- .tuner = 0,
- .tuner_type = 0,
- .x_offset = 71,
- .y_offset = 18,
- .model_string = "Nogatech USB MicroCam PAL (NV3001P)",
- },
-};
-const int usbvision_device_data_size = ARRAY_SIZE(usbvision_device_data);
-
-/* Supported Devices */
-
-struct usb_device_id usbvision_table[] = {
- { USB_DEVICE(0x0a6f, 0x0400), .driver_info = XANBOO },
- { USB_DEVICE(0x050d, 0x0106), .driver_info = BELKIN_VIDEOBUS_II },
- { USB_DEVICE(0x050d, 0x0207), .driver_info = BELKIN_VIDEOBUS },
- { USB_DEVICE(0x050d, 0x0208), .driver_info = BELKIN_USB_VIDEOBUS_II },
- { USB_DEVICE(0x0571, 0x0002), .driver_info = ECHOFX_INTERVIEW_LITE },
- { USB_DEVICE(0x0573, 0x0003), .driver_info = USBGEAR_USBG_V1 },
- { USB_DEVICE(0x0573, 0x0400), .driver_info = D_LINK_V100 },
- { USB_DEVICE(0x0573, 0x2000), .driver_info = X10_USB_CAMERA },
- { USB_DEVICE(0x0573, 0x2d00), .driver_info = HPG_WINTV_LIVE_PAL_BG },
- { USB_DEVICE(0x0573, 0x2d01), .driver_info = HPG_WINTV_LIVE_PRO_NTSC_MN },
- { USB_DEVICE(0x0573, 0x2101), .driver_info = ZORAN_PMD_NOGATECH },
- { USB_DEVICE(0x0573, 0x3000), .driver_info = MICROCAM_NTSC },
- { USB_DEVICE(0x0573, 0x3001), .driver_info = MICROCAM_PAL },
- { USB_DEVICE(0x0573, 0x4100), .driver_info = NOGATECH_USB_TV_NTSC_FM },
- { USB_DEVICE(0x0573, 0x4110), .driver_info = PNY_USB_TV_NTSC_FM },
- { USB_DEVICE(0x0573, 0x4450), .driver_info = PV_PLAYTV_USB_PRO_PAL_FM },
- { USB_DEVICE(0x0573, 0x4550), .driver_info = ZT_721 },
- { USB_DEVICE(0x0573, 0x4d00), .driver_info = HPG_WINTV_NTSC_MN },
- { USB_DEVICE(0x0573, 0x4d01), .driver_info = HPG_WINTV_PAL_BG },
- { USB_DEVICE(0x0573, 0x4d02), .driver_info = HPG_WINTV_PAL_I },
- { USB_DEVICE(0x0573, 0x4d03), .driver_info = HPG_WINTV_PAL_SECAM_L },
- { USB_DEVICE(0x0573, 0x4d04), .driver_info = HPG_WINTV_PAL_D_K },
- { USB_DEVICE(0x0573, 0x4d10), .driver_info = HPG_WINTV_NTSC_FM },
- { USB_DEVICE(0x0573, 0x4d11), .driver_info = HPG_WINTV_PAL_BG_FM },
- { USB_DEVICE(0x0573, 0x4d12), .driver_info = HPG_WINTV_PAL_I_FM },
- { USB_DEVICE(0x0573, 0x4d14), .driver_info = HPG_WINTV_PAL_D_K_FM },
- { USB_DEVICE(0x0573, 0x4d2a), .driver_info = HPG_WINTV_PRO_NTSC_MN },
- { USB_DEVICE(0x0573, 0x4d2b), .driver_info = HPG_WINTV_PRO_NTSC_MN_V2 },
- { USB_DEVICE(0x0573, 0x4d2c), .driver_info = HPG_WINTV_PRO_PAL },
- { USB_DEVICE(0x0573, 0x4d20), .driver_info = HPG_WINTV_PRO_NTSC_MN_V3 },
- { USB_DEVICE(0x0573, 0x4d21), .driver_info = HPG_WINTV_PRO_PAL_BG },
- { USB_DEVICE(0x0573, 0x4d22), .driver_info = HPG_WINTV_PRO_PAL_I },
- { USB_DEVICE(0x0573, 0x4d23), .driver_info = HPG_WINTV_PRO_PAL_SECAM_L },
- { USB_DEVICE(0x0573, 0x4d24), .driver_info = HPG_WINTV_PRO_PAL_D_K },
- { USB_DEVICE(0x0573, 0x4d25), .driver_info = HPG_WINTV_PRO_PAL_SECAM },
- { USB_DEVICE(0x0573, 0x4d26), .driver_info = HPG_WINTV_PRO_PAL_SECAM_V2 },
- { USB_DEVICE(0x0573, 0x4d27), .driver_info = HPG_WINTV_PRO_PAL_BG_V2 },
- { USB_DEVICE(0x0573, 0x4d28), .driver_info = HPG_WINTV_PRO_PAL_BG_D_K },
- { USB_DEVICE(0x0573, 0x4d29), .driver_info = HPG_WINTV_PRO_PAL_I_D_K },
- { USB_DEVICE(0x0573, 0x4d30), .driver_info = HPG_WINTV_PRO_NTSC_MN_FM },
- { USB_DEVICE(0x0573, 0x4d31), .driver_info = HPG_WINTV_PRO_PAL_BG_FM },
- { USB_DEVICE(0x0573, 0x4d32), .driver_info = HPG_WINTV_PRO_PAL_I_FM },
- { USB_DEVICE(0x0573, 0x4d34), .driver_info = HPG_WINTV_PRO_PAL_D_K_FM },
- { USB_DEVICE(0x0573, 0x4d35), .driver_info = HPG_WINTV_PRO_TEMIC_PAL_FM },
- { USB_DEVICE(0x0573, 0x4d36), .driver_info = HPG_WINTV_PRO_TEMIC_PAL_BG_FM },
- { USB_DEVICE(0x0573, 0x4d37), .driver_info = HPG_WINTV_PRO_PAL_FM },
- { USB_DEVICE(0x0573, 0x4d38), .driver_info = HPG_WINTV_PRO_NTSC_MN_FM_V2 },
- { USB_DEVICE(0x0768, 0x0006), .driver_info = CAMTEL_TVB330 },
- { USB_DEVICE(0x07d0, 0x0001), .driver_info = DIGITAL_VIDEO_CREATOR_I },
- { USB_DEVICE(0x07d0, 0x0002), .driver_info = GLOBAL_VILLAGE_GV_007_NTSC },
- { USB_DEVICE(0x07d0, 0x0003), .driver_info = DAZZLE_DVC_50_REV_1_NTSC },
- { USB_DEVICE(0x07d0, 0x0004), .driver_info = DAZZLE_DVC_80_REV_1_PAL },
- { USB_DEVICE(0x07d0, 0x0005), .driver_info = DAZZLE_DVC_90_REV_1_SECAM },
- { USB_DEVICE(0x07f8, 0x9104), .driver_info = ESKAPE_LABS_MYTV2GO },
- { USB_DEVICE(0x2304, 0x010d), .driver_info = PINNA_PCTV_USB_PAL },
- { USB_DEVICE(0x2304, 0x0109), .driver_info = PINNA_PCTV_USB_SECAM },
- { USB_DEVICE(0x2304, 0x0110), .driver_info = PINNA_PCTV_USB_PAL_FM },
- { USB_DEVICE(0x2304, 0x0111), .driver_info = MIRO_PCTV_USB },
- { USB_DEVICE(0x2304, 0x0112), .driver_info = PINNA_PCTV_USB_NTSC_FM },
- { USB_DEVICE(0x2304, 0x0113), .driver_info = PINNA_PCTV_USB_NTSC_FM_V3 },
- { USB_DEVICE(0x2304, 0x0210), .driver_info = PINNA_PCTV_USB_PAL_FM_V2 },
- { USB_DEVICE(0x2304, 0x0212), .driver_info = PINNA_PCTV_USB_NTSC_FM_V2 },
- { USB_DEVICE(0x2304, 0x0214), .driver_info = PINNA_PCTV_USB_PAL_FM_V3 },
- { USB_DEVICE(0x2304, 0x0300), .driver_info = PINNA_LINX_VD_IN_CAB_NTSC },
- { USB_DEVICE(0x2304, 0x0301), .driver_info = PINNA_LINX_VD_IN_CAB_PAL },
- { USB_DEVICE(0x2304, 0x0419), .driver_info = PINNA_PCTV_BUNGEE_PAL_FM },
- { USB_DEVICE(0x2400, 0x4200), .driver_info = HPG_WINTV },
- { }, /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(usb, usbvision_table);
diff --git a/drivers/staging/media/usbvision/usbvision-cards.h b/drivers/staging/media/usbvision/usbvision-cards.h
deleted file mode 100644
index 07ec83512743..000000000000
--- a/drivers/staging/media/usbvision/usbvision-cards.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#define XANBOO 0
-#define BELKIN_VIDEOBUS_II 1
-#define BELKIN_VIDEOBUS 2
-#define BELKIN_USB_VIDEOBUS_II 3
-#define ECHOFX_INTERVIEW_LITE 4
-#define USBGEAR_USBG_V1 5
-#define D_LINK_V100 6
-#define X10_USB_CAMERA 7
-#define HPG_WINTV_LIVE_PAL_BG 8
-#define HPG_WINTV_LIVE_PRO_NTSC_MN 9
-#define ZORAN_PMD_NOGATECH 10
-#define NOGATECH_USB_TV_NTSC_FM 11
-#define PNY_USB_TV_NTSC_FM 12
-#define PV_PLAYTV_USB_PRO_PAL_FM 13
-#define ZT_721 14
-#define HPG_WINTV_NTSC_MN 15
-#define HPG_WINTV_PAL_BG 16
-#define HPG_WINTV_PAL_I 17
-#define HPG_WINTV_PAL_SECAM_L 18
-#define HPG_WINTV_PAL_D_K 19
-#define HPG_WINTV_NTSC_FM 20
-#define HPG_WINTV_PAL_BG_FM 21
-#define HPG_WINTV_PAL_I_FM 22
-#define HPG_WINTV_PAL_D_K_FM 23
-#define HPG_WINTV_PRO_NTSC_MN 24
-#define HPG_WINTV_PRO_NTSC_MN_V2 25
-#define HPG_WINTV_PRO_PAL 26
-#define HPG_WINTV_PRO_NTSC_MN_V3 27
-#define HPG_WINTV_PRO_PAL_BG 28
-#define HPG_WINTV_PRO_PAL_I 29
-#define HPG_WINTV_PRO_PAL_SECAM_L 30
-#define HPG_WINTV_PRO_PAL_D_K 31
-#define HPG_WINTV_PRO_PAL_SECAM 32
-#define HPG_WINTV_PRO_PAL_SECAM_V2 33
-#define HPG_WINTV_PRO_PAL_BG_V2 34
-#define HPG_WINTV_PRO_PAL_BG_D_K 35
-#define HPG_WINTV_PRO_PAL_I_D_K 36
-#define HPG_WINTV_PRO_NTSC_MN_FM 37
-#define HPG_WINTV_PRO_PAL_BG_FM 38
-#define HPG_WINTV_PRO_PAL_I_FM 39
-#define HPG_WINTV_PRO_PAL_D_K_FM 40
-#define HPG_WINTV_PRO_TEMIC_PAL_FM 41
-#define HPG_WINTV_PRO_TEMIC_PAL_BG_FM 42
-#define HPG_WINTV_PRO_PAL_FM 43
-#define HPG_WINTV_PRO_NTSC_MN_FM_V2 44
-#define CAMTEL_TVB330 45
-#define DIGITAL_VIDEO_CREATOR_I 46
-#define GLOBAL_VILLAGE_GV_007_NTSC 47
-#define DAZZLE_DVC_50_REV_1_NTSC 48
-#define DAZZLE_DVC_80_REV_1_PAL 49
-#define DAZZLE_DVC_90_REV_1_SECAM 50
-#define ESKAPE_LABS_MYTV2GO 51
-#define PINNA_PCTV_USB_PAL 52
-#define PINNA_PCTV_USB_SECAM 53
-#define PINNA_PCTV_USB_PAL_FM 54
-#define MIRO_PCTV_USB 55
-#define PINNA_PCTV_USB_NTSC_FM 56
-#define PINNA_PCTV_USB_PAL_FM_V2 57
-#define PINNA_PCTV_USB_NTSC_FM_V2 58
-#define PINNA_PCTV_USB_PAL_FM_V3 59
-#define PINNA_LINX_VD_IN_CAB_NTSC 60
-#define PINNA_LINX_VD_IN_CAB_PAL 61
-#define PINNA_PCTV_BUNGEE_PAL_FM 62
-#define HPG_WINTV 63
-#define PINNA_PCTV_USB_NTSC_FM_V3 64
-#define MICROCAM_NTSC 65
-#define MICROCAM_PAL 66
-
-extern const int usbvision_device_data_size;
diff --git a/drivers/staging/media/usbvision/usbvision-core.c b/drivers/staging/media/usbvision/usbvision-core.c
deleted file mode 100644
index e35dee35b068..000000000000
--- a/drivers/staging/media/usbvision/usbvision-core.c
+++ /dev/null
@@ -1,2428 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * usbvision-core.c - driver for NT100x USB video capture devices
- *
- * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de>
- * Dwaine Garden <dwainegarden@rogers.com>
- *
- * This module is part of usbvision driver project.
- * Updates to driver completed by Dwaine P. Garden
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/timer.h>
-#include <linux/gfp.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/videodev2.h>
-#include <linux/i2c.h>
-
-#include <media/i2c/saa7115.h>
-#include <media/v4l2-common.h>
-#include <media/tuner.h>
-
-#include <linux/workqueue.h>
-
-#include "usbvision.h"
-
-static unsigned int core_debug;
-module_param(core_debug, int, 0644);
-MODULE_PARM_DESC(core_debug, "enable debug messages [core]");
-
-static int adjust_compression = 1; /* Set the compression to be adaptive */
-module_param(adjust_compression, int, 0444);
-MODULE_PARM_DESC(adjust_compression, " Set the ADPCM compression for the device. Default: 1 (On)");
-
-/* To help people with Black and White output with using s-video input.
- * Some cables and input device are wired differently. */
-static int switch_svideo_input;
-module_param(switch_svideo_input, int, 0444);
-MODULE_PARM_DESC(switch_svideo_input, " Set the S-Video input. Some cables and input device are wired differently. Default: 0 (Off)");
-
-static unsigned int adjust_x_offset = -1;
-module_param(adjust_x_offset, int, 0644);
-MODULE_PARM_DESC(adjust_x_offset, "adjust X offset display [core]");
-
-static unsigned int adjust_y_offset = -1;
-module_param(adjust_y_offset, int, 0644);
-MODULE_PARM_DESC(adjust_y_offset, "adjust Y offset display [core]");
-
-
-#define ENABLE_HEXDUMP 0 /* Enable if you need it */
-
-
-#ifdef USBVISION_DEBUG
- #define PDEBUG(level, fmt, args...) { \
- if (core_debug & (level)) \
- printk(KERN_INFO KBUILD_MODNAME ":[%s:%d] " fmt, \
- __func__, __LINE__ , ## args); \
- }
-#else
- #define PDEBUG(level, fmt, args...) do {} while (0)
-#endif
-
-#define DBG_HEADER (1 << 0)
-#define DBG_IRQ (1 << 1)
-#define DBG_ISOC (1 << 2)
-#define DBG_PARSE (1 << 3)
-#define DBG_SCRATCH (1 << 4)
-#define DBG_FUNC (1 << 5)
-
-/* The value of 'scratch_buf_size' affects quality of the picture
- * in many ways. Shorter buffers may cause loss of data when client
- * is too slow. Larger buffers are memory-consuming and take longer
- * to work with. This setting can be adjusted, but the default value
- * should be OK for most desktop users.
- */
-#define DEFAULT_SCRATCH_BUF_SIZE (0x20000) /* 128kB memory scratch buffer */
-static const int scratch_buf_size = DEFAULT_SCRATCH_BUF_SIZE;
-
-/* Function prototypes */
-static int usbvision_request_intra(struct usb_usbvision *usbvision);
-static int usbvision_unrequest_intra(struct usb_usbvision *usbvision);
-static int usbvision_adjust_compression(struct usb_usbvision *usbvision);
-static int usbvision_measure_bandwidth(struct usb_usbvision *usbvision);
-
-/*******************************/
-/* Memory management functions */
-/*******************************/
-
-/*
- * Here we want the physical address of the memory.
- * This is used when initializing the contents of the area.
- */
-
-static void *usbvision_rvmalloc(unsigned long size)
-{
- void *mem;
- unsigned long adr;
-
- size = PAGE_ALIGN(size);
- mem = vmalloc_32(size);
- if (!mem)
- return NULL;
-
- memset(mem, 0, size); /* Clear the ram out, no junk to the user */
- adr = (unsigned long) mem;
- while (size > 0) {
- SetPageReserved(vmalloc_to_page((void *)adr));
- adr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- return mem;
-}
-
-static void usbvision_rvfree(void *mem, unsigned long size)
-{
- unsigned long adr;
-
- if (!mem)
- return;
-
- size = PAGE_ALIGN(size);
-
- adr = (unsigned long) mem;
- while ((long) size > 0) {
- ClearPageReserved(vmalloc_to_page((void *)adr));
- adr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- vfree(mem);
-}
-
-
-#if ENABLE_HEXDUMP
-static void usbvision_hexdump(const unsigned char *data, int len)
-{
- char tmp[80];
- int i, k;
-
- for (i = k = 0; len > 0; i++, len--) {
- if (i > 0 && (i % 16 == 0)) {
- printk("%s\n", tmp);
- k = 0;
- }
- k += sprintf(&tmp[k], "%02x ", data[i]);
- }
- if (k > 0)
- printk(KERN_CONT "%s\n", tmp);
-}
-#endif
-
-/********************************
- * scratch ring buffer handling
- ********************************/
-static int scratch_len(struct usb_usbvision *usbvision) /* This returns the amount of data actually in the buffer */
-{
- int len = usbvision->scratch_write_ptr - usbvision->scratch_read_ptr;
-
- if (len < 0)
- len += scratch_buf_size;
- PDEBUG(DBG_SCRATCH, "scratch_len() = %d\n", len);
-
- return len;
-}
-
-
-/* This returns the free space left in the buffer */
-static int scratch_free(struct usb_usbvision *usbvision)
-{
- int free = usbvision->scratch_read_ptr - usbvision->scratch_write_ptr;
- if (free <= 0)
- free += scratch_buf_size;
- if (free) {
- free -= 1; /* at least one byte in the buffer must */
- /* left blank, otherwise there is no chance to differ between full and empty */
- }
- PDEBUG(DBG_SCRATCH, "return %d\n", free);
-
- return free;
-}
-
-
-/* This puts data into the buffer */
-static int scratch_put(struct usb_usbvision *usbvision, unsigned char *data,
- int len)
-{
- int len_part;
-
- if (usbvision->scratch_write_ptr + len < scratch_buf_size) {
- memcpy(usbvision->scratch + usbvision->scratch_write_ptr, data, len);
- usbvision->scratch_write_ptr += len;
- } else {
- len_part = scratch_buf_size - usbvision->scratch_write_ptr;
- memcpy(usbvision->scratch + usbvision->scratch_write_ptr, data, len_part);
- if (len == len_part) {
- usbvision->scratch_write_ptr = 0; /* just set write_ptr to zero */
- } else {
- memcpy(usbvision->scratch, data + len_part, len - len_part);
- usbvision->scratch_write_ptr = len - len_part;
- }
- }
-
- PDEBUG(DBG_SCRATCH, "len=%d, new write_ptr=%d\n", len, usbvision->scratch_write_ptr);
-
- return len;
-}
-
-/* This marks the write_ptr as position of new frame header */
-static void scratch_mark_header(struct usb_usbvision *usbvision)
-{
- PDEBUG(DBG_SCRATCH, "header at write_ptr=%d\n", usbvision->scratch_headermarker_write_ptr);
-
- usbvision->scratch_headermarker[usbvision->scratch_headermarker_write_ptr] =
- usbvision->scratch_write_ptr;
- usbvision->scratch_headermarker_write_ptr += 1;
- usbvision->scratch_headermarker_write_ptr %= USBVISION_NUM_HEADERMARKER;
-}
-
-/* This gets data from the buffer at the given "ptr" position */
-static int scratch_get_extra(struct usb_usbvision *usbvision,
- unsigned char *data, int *ptr, int len)
-{
- int len_part;
-
- if (*ptr + len < scratch_buf_size) {
- memcpy(data, usbvision->scratch + *ptr, len);
- *ptr += len;
- } else {
- len_part = scratch_buf_size - *ptr;
- memcpy(data, usbvision->scratch + *ptr, len_part);
- if (len == len_part) {
- *ptr = 0; /* just set the y_ptr to zero */
- } else {
- memcpy(data + len_part, usbvision->scratch, len - len_part);
- *ptr = len - len_part;
- }
- }
-
- PDEBUG(DBG_SCRATCH, "len=%d, new ptr=%d\n", len, *ptr);
-
- return len;
-}
-
-
-/* This sets the scratch extra read pointer */
-static void scratch_set_extra_ptr(struct usb_usbvision *usbvision, int *ptr,
- int len)
-{
- *ptr = (usbvision->scratch_read_ptr + len) % scratch_buf_size;
-
- PDEBUG(DBG_SCRATCH, "ptr=%d\n", *ptr);
-}
-
-
-/* This increments the scratch extra read pointer */
-static void scratch_inc_extra_ptr(int *ptr, int len)
-{
- *ptr = (*ptr + len) % scratch_buf_size;
-
- PDEBUG(DBG_SCRATCH, "ptr=%d\n", *ptr);
-}
-
-
-/* This gets data from the buffer */
-static int scratch_get(struct usb_usbvision *usbvision, unsigned char *data,
- int len)
-{
- int len_part;
-
- if (usbvision->scratch_read_ptr + len < scratch_buf_size) {
- memcpy(data, usbvision->scratch + usbvision->scratch_read_ptr, len);
- usbvision->scratch_read_ptr += len;
- } else {
- len_part = scratch_buf_size - usbvision->scratch_read_ptr;
- memcpy(data, usbvision->scratch + usbvision->scratch_read_ptr, len_part);
- if (len == len_part) {
- usbvision->scratch_read_ptr = 0; /* just set the read_ptr to zero */
- } else {
- memcpy(data + len_part, usbvision->scratch, len - len_part);
- usbvision->scratch_read_ptr = len - len_part;
- }
- }
-
- PDEBUG(DBG_SCRATCH, "len=%d, new read_ptr=%d\n", len, usbvision->scratch_read_ptr);
-
- return len;
-}
-
-
-/* This sets read pointer to next header and returns it */
-static int scratch_get_header(struct usb_usbvision *usbvision,
- struct usbvision_frame_header *header)
-{
- int err_code = 0;
-
- PDEBUG(DBG_SCRATCH, "from read_ptr=%d", usbvision->scratch_headermarker_read_ptr);
-
- while (usbvision->scratch_headermarker_write_ptr -
- usbvision->scratch_headermarker_read_ptr != 0) {
- usbvision->scratch_read_ptr =
- usbvision->scratch_headermarker[usbvision->scratch_headermarker_read_ptr];
- usbvision->scratch_headermarker_read_ptr += 1;
- usbvision->scratch_headermarker_read_ptr %= USBVISION_NUM_HEADERMARKER;
- scratch_get(usbvision, (unsigned char *)header, USBVISION_HEADER_LENGTH);
- if ((header->magic_1 == USBVISION_MAGIC_1)
- && (header->magic_2 == USBVISION_MAGIC_2)
- && (header->header_length == USBVISION_HEADER_LENGTH)) {
- err_code = USBVISION_HEADER_LENGTH;
- header->frame_width = header->frame_width_lo + (header->frame_width_hi << 8);
- header->frame_height = header->frame_height_lo + (header->frame_height_hi << 8);
- break;
- }
- }
-
- return err_code;
-}
-
-
-/* This removes len bytes of old data from the buffer */
-static void scratch_rm_old(struct usb_usbvision *usbvision, int len)
-{
- usbvision->scratch_read_ptr += len;
- usbvision->scratch_read_ptr %= scratch_buf_size;
- PDEBUG(DBG_SCRATCH, "read_ptr is now %d\n", usbvision->scratch_read_ptr);
-}
-
-
-/* This resets the buffer - kills all data in it too */
-static void scratch_reset(struct usb_usbvision *usbvision)
-{
- PDEBUG(DBG_SCRATCH, "\n");
-
- usbvision->scratch_read_ptr = 0;
- usbvision->scratch_write_ptr = 0;
- usbvision->scratch_headermarker_read_ptr = 0;
- usbvision->scratch_headermarker_write_ptr = 0;
- usbvision->isocstate = isoc_state_no_frame;
-}
-
-int usbvision_scratch_alloc(struct usb_usbvision *usbvision)
-{
- usbvision->scratch = vmalloc_32(scratch_buf_size);
- scratch_reset(usbvision);
- if (usbvision->scratch == NULL) {
- dev_err(&usbvision->dev->dev,
- "%s: unable to allocate %d bytes for scratch\n",
- __func__, scratch_buf_size);
- return -ENOMEM;
- }
- return 0;
-}
-
-void usbvision_scratch_free(struct usb_usbvision *usbvision)
-{
- vfree(usbvision->scratch);
- usbvision->scratch = NULL;
-}
-
-/*
- * usbvision_decompress_alloc()
- *
- * allocates intermediate buffer for decompression
- */
-int usbvision_decompress_alloc(struct usb_usbvision *usbvision)
-{
- int IFB_size = MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT * 3 / 2;
-
- usbvision->intra_frame_buffer = vmalloc_32(IFB_size);
- if (usbvision->intra_frame_buffer == NULL) {
- dev_err(&usbvision->dev->dev,
- "%s: unable to allocate %d for compr. frame buffer\n",
- __func__, IFB_size);
- return -ENOMEM;
- }
- return 0;
-}
-
-/*
- * usbvision_decompress_free()
- *
- * frees intermediate buffer for decompression
- */
-void usbvision_decompress_free(struct usb_usbvision *usbvision)
-{
- vfree(usbvision->intra_frame_buffer);
- usbvision->intra_frame_buffer = NULL;
-
-}
-
-/************************************************************
- * Here comes the data parsing stuff that is run as interrupt
- ************************************************************/
-/*
- * usbvision_find_header()
- *
- * Locate one of supported header markers in the scratch buffer.
- */
-static enum parse_state usbvision_find_header(struct usb_usbvision *usbvision)
-{
- struct usbvision_frame *frame;
- int found_header = 0;
-
- frame = usbvision->cur_frame;
-
- while (scratch_get_header(usbvision, &frame->isoc_header) == USBVISION_HEADER_LENGTH) {
- /* found header in scratch */
- PDEBUG(DBG_HEADER, "found header: 0x%02x%02x %d %d %d %d %#x 0x%02x %u %u",
- frame->isoc_header.magic_2,
- frame->isoc_header.magic_1,
- frame->isoc_header.header_length,
- frame->isoc_header.frame_num,
- frame->isoc_header.frame_phase,
- frame->isoc_header.frame_latency,
- frame->isoc_header.data_format,
- frame->isoc_header.format_param,
- frame->isoc_header.frame_width,
- frame->isoc_header.frame_height);
-
- if (usbvision->request_intra) {
- if (frame->isoc_header.format_param & 0x80) {
- found_header = 1;
- usbvision->last_isoc_frame_num = -1; /* do not check for lost frames this time */
- usbvision_unrequest_intra(usbvision);
- break;
- }
- } else {
- found_header = 1;
- break;
- }
- }
-
- if (found_header) {
- frame->frmwidth = frame->isoc_header.frame_width * usbvision->stretch_width;
- frame->frmheight = frame->isoc_header.frame_height * usbvision->stretch_height;
- frame->v4l2_linesize = (frame->frmwidth * frame->v4l2_format.depth) >> 3;
- } else { /* no header found */
- PDEBUG(DBG_HEADER, "skipping scratch data, no header");
- scratch_reset(usbvision);
- return parse_state_end_parse;
- }
-
- /* found header */
- if (frame->isoc_header.data_format == ISOC_MODE_COMPRESS) {
- /* check isoc_header.frame_num for lost frames */
- if (usbvision->last_isoc_frame_num >= 0) {
- if (((usbvision->last_isoc_frame_num + 1) % 32) != frame->isoc_header.frame_num) {
- /* unexpected frame drop: need to request new intra frame */
- PDEBUG(DBG_HEADER, "Lost frame before %d on USB", frame->isoc_header.frame_num);
- usbvision_request_intra(usbvision);
- return parse_state_next_frame;
- }
- }
- usbvision->last_isoc_frame_num = frame->isoc_header.frame_num;
- }
- usbvision->header_count++;
- frame->scanstate = scan_state_lines;
- frame->curline = 0;
-
- return parse_state_continue;
-}
-
-static enum parse_state usbvision_parse_lines_422(struct usb_usbvision *usbvision,
- long *pcopylen)
-{
- volatile struct usbvision_frame *frame;
- unsigned char *f;
- int len;
- int i;
- unsigned char yuyv[4] = { 180, 128, 10, 128 }; /* YUV components */
- unsigned char rv, gv, bv; /* RGB components */
- int clipmask_index, bytes_per_pixel;
- int stretch_bytes, clipmask_add;
-
- frame = usbvision->cur_frame;
- f = frame->data + (frame->v4l2_linesize * frame->curline);
-
- /* Make sure there's enough data for the entire line */
- len = (frame->isoc_header.frame_width * 2) + 5;
- if (scratch_len(usbvision) < len) {
- PDEBUG(DBG_PARSE, "out of data in line %d, need %u.\n", frame->curline, len);
- return parse_state_out;
- }
-
- if ((frame->curline + 1) >= frame->frmheight)
- return parse_state_next_frame;
-
- bytes_per_pixel = frame->v4l2_format.bytes_per_pixel;
- stretch_bytes = (usbvision->stretch_width - 1) * bytes_per_pixel;
- clipmask_index = frame->curline * MAX_FRAME_WIDTH;
- clipmask_add = usbvision->stretch_width;
-
- for (i = 0; i < frame->frmwidth; i += (2 * usbvision->stretch_width)) {
- scratch_get(usbvision, &yuyv[0], 4);
-
- if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
- *f++ = yuyv[0]; /* Y */
- *f++ = yuyv[3]; /* U */
- } else {
- YUV_TO_RGB_BY_THE_BOOK(yuyv[0], yuyv[1], yuyv[3], rv, gv, bv);
- switch (frame->v4l2_format.format) {
- case V4L2_PIX_FMT_RGB565:
- *f++ = (0x1F & rv) |
- (0xE0 & (gv << 5));
- *f++ = (0x07 & (gv >> 3)) |
- (0xF8 & bv);
- break;
- case V4L2_PIX_FMT_RGB24:
- *f++ = rv;
- *f++ = gv;
- *f++ = bv;
- break;
- case V4L2_PIX_FMT_RGB32:
- *f++ = rv;
- *f++ = gv;
- *f++ = bv;
- f++;
- break;
- case V4L2_PIX_FMT_RGB555:
- *f++ = (0x1F & rv) |
- (0xE0 & (gv << 5));
- *f++ = (0x03 & (gv >> 3)) |
- (0x7C & (bv << 2));
- break;
- }
- }
- clipmask_index += clipmask_add;
- f += stretch_bytes;
-
- if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
- *f++ = yuyv[2]; /* Y */
- *f++ = yuyv[1]; /* V */
- } else {
- YUV_TO_RGB_BY_THE_BOOK(yuyv[2], yuyv[1], yuyv[3], rv, gv, bv);
- switch (frame->v4l2_format.format) {
- case V4L2_PIX_FMT_RGB565:
- *f++ = (0x1F & rv) |
- (0xE0 & (gv << 5));
- *f++ = (0x07 & (gv >> 3)) |
- (0xF8 & bv);
- break;
- case V4L2_PIX_FMT_RGB24:
- *f++ = rv;
- *f++ = gv;
- *f++ = bv;
- break;
- case V4L2_PIX_FMT_RGB32:
- *f++ = rv;
- *f++ = gv;
- *f++ = bv;
- f++;
- break;
- case V4L2_PIX_FMT_RGB555:
- *f++ = (0x1F & rv) |
- (0xE0 & (gv << 5));
- *f++ = (0x03 & (gv >> 3)) |
- (0x7C & (bv << 2));
- break;
- }
- }
- clipmask_index += clipmask_add;
- f += stretch_bytes;
- }
-
- frame->curline += usbvision->stretch_height;
- *pcopylen += frame->v4l2_linesize * usbvision->stretch_height;
-
- if (frame->curline >= frame->frmheight)
- return parse_state_next_frame;
- return parse_state_continue;
-}
-
-/* The decompression routine */
-static int usbvision_decompress(struct usb_usbvision *usbvision, unsigned char *compressed,
- unsigned char *decompressed, int *start_pos,
- int *block_typestart_pos, int len)
-{
- int rest_pixel, idx, pos, extra_pos, block_len, block_type_pos, block_type_len;
- unsigned char block_byte, block_code, block_type, block_type_byte, integrator;
-
- integrator = 0;
- pos = *start_pos;
- block_type_pos = *block_typestart_pos;
- extra_pos = pos;
- block_len = 0;
- block_byte = 0;
- block_code = 0;
- block_type = 0;
- block_type_byte = 0;
- block_type_len = 0;
- rest_pixel = len;
-
- for (idx = 0; idx < len; idx++) {
- if (block_len == 0) {
- if (block_type_len == 0) {
- block_type_byte = compressed[block_type_pos];
- block_type_pos++;
- block_type_len = 4;
- }
- block_type = (block_type_byte & 0xC0) >> 6;
-
- /* statistic: */
- usbvision->compr_block_types[block_type]++;
-
- pos = extra_pos;
- if (block_type == 0) {
- if (rest_pixel >= 24) {
- idx += 23;
- rest_pixel -= 24;
- integrator = decompressed[idx];
- } else {
- idx += rest_pixel - 1;
- rest_pixel = 0;
- }
- } else {
- block_code = compressed[pos];
- pos++;
- if (rest_pixel >= 24)
- block_len = 24;
- else
- block_len = rest_pixel;
- rest_pixel -= block_len;
- extra_pos = pos + (block_len / 4);
- }
- block_type_byte <<= 2;
- block_type_len -= 1;
- }
- if (block_len > 0) {
- if ((block_len % 4) == 0) {
- block_byte = compressed[pos];
- pos++;
- }
- if (block_type == 1) /* inter Block */
- integrator = decompressed[idx];
- switch (block_byte & 0xC0) {
- case 0x03 << 6:
- integrator += compressed[extra_pos];
- extra_pos++;
- break;
- case 0x02 << 6:
- integrator += block_code;
- break;
- case 0x00:
- integrator -= block_code;
- break;
- }
- decompressed[idx] = integrator;
- block_byte <<= 2;
- block_len -= 1;
- }
- }
- *start_pos = extra_pos;
- *block_typestart_pos = block_type_pos;
- return idx;
-}
-
-
-/*
- * usbvision_parse_compress()
- *
- * Parse compressed frame from the scratch buffer, put
- * decoded RGB value into the current frame buffer and add the written
- * number of bytes (RGB) to the *pcopylen.
- *
- */
-static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision,
- long *pcopylen)
-{
-#define USBVISION_STRIP_MAGIC 0x5A
-#define USBVISION_STRIP_LEN_MAX 400
-#define USBVISION_STRIP_HEADER_LEN 3
-
- struct usbvision_frame *frame;
- unsigned char *f, *u = NULL, *v = NULL;
- unsigned char strip_data[USBVISION_STRIP_LEN_MAX];
- unsigned char strip_header[USBVISION_STRIP_HEADER_LEN];
- int idx, idx_end, strip_len, strip_ptr, startblock_pos, block_pos, block_type_pos;
- int clipmask_index;
- int image_size;
- unsigned char rv, gv, bv;
- static unsigned char *Y, *U, *V;
-
- frame = usbvision->cur_frame;
- image_size = frame->frmwidth * frame->frmheight;
- if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
- (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420)) { /* this is a planar format */
- /* ... v4l2_linesize not used here. */
- f = frame->data + (frame->width * frame->curline);
- } else
- f = frame->data + (frame->v4l2_linesize * frame->curline);
-
- if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { /* initialise u and v pointers */
- /* get base of u and b planes add halfoffset */
- u = frame->data
- + image_size
- + (frame->frmwidth >> 1) * frame->curline;
- v = u + (image_size >> 1);
- } else if (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420) {
- v = frame->data + image_size + ((frame->curline * (frame->width)) >> 2);
- u = v + (image_size >> 2);
- }
-
- if (frame->curline == 0)
- usbvision_adjust_compression(usbvision);
-
- if (scratch_len(usbvision) < USBVISION_STRIP_HEADER_LEN)
- return parse_state_out;
-
- /* get strip header without changing the scratch_read_ptr */
- scratch_set_extra_ptr(usbvision, &strip_ptr, 0);
- scratch_get_extra(usbvision, &strip_header[0], &strip_ptr,
- USBVISION_STRIP_HEADER_LEN);
-
- if (strip_header[0] != USBVISION_STRIP_MAGIC) {
- /* wrong strip magic */
- usbvision->strip_magic_errors++;
- return parse_state_next_frame;
- }
-
- if (frame->curline != (int)strip_header[2]) {
- /* line number mismatch error */
- usbvision->strip_line_number_errors++;
- }
-
- strip_len = 2 * (unsigned int)strip_header[1];
- if (strip_len > USBVISION_STRIP_LEN_MAX) {
- /* strip overrun */
- /* I think this never happens */
- usbvision_request_intra(usbvision);
- }
-
- if (scratch_len(usbvision) < strip_len) {
- /* there is not enough data for the strip */
- return parse_state_out;
- }
-
- if (usbvision->intra_frame_buffer) {
- Y = usbvision->intra_frame_buffer + frame->frmwidth * frame->curline;
- U = usbvision->intra_frame_buffer + image_size + (frame->frmwidth / 2) * (frame->curline / 2);
- V = usbvision->intra_frame_buffer + image_size / 4 * 5 + (frame->frmwidth / 2) * (frame->curline / 2);
- } else {
- return parse_state_next_frame;
- }
-
- clipmask_index = frame->curline * MAX_FRAME_WIDTH;
-
- scratch_get(usbvision, strip_data, strip_len);
-
- idx_end = frame->frmwidth;
- block_type_pos = USBVISION_STRIP_HEADER_LEN;
- startblock_pos = block_type_pos + (idx_end - 1) / 96 + (idx_end / 2 - 1) / 96 + 2;
- block_pos = startblock_pos;
-
- usbvision->block_pos = block_pos;
-
- usbvision_decompress(usbvision, strip_data, Y, &block_pos, &block_type_pos, idx_end);
- if (strip_len > usbvision->max_strip_len)
- usbvision->max_strip_len = strip_len;
-
- if (frame->curline % 2)
- usbvision_decompress(usbvision, strip_data, V, &block_pos, &block_type_pos, idx_end / 2);
- else
- usbvision_decompress(usbvision, strip_data, U, &block_pos, &block_type_pos, idx_end / 2);
-
- if (block_pos > usbvision->comprblock_pos)
- usbvision->comprblock_pos = block_pos;
- if (block_pos > strip_len)
- usbvision->strip_len_errors++;
-
- for (idx = 0; idx < idx_end; idx++) {
- if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
- *f++ = Y[idx];
- *f++ = idx & 0x01 ? U[idx / 2] : V[idx / 2];
- } else if (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) {
- *f++ = Y[idx];
- if (idx & 0x01)
- *u++ = U[idx >> 1];
- else
- *v++ = V[idx >> 1];
- } else if (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420) {
- *f++ = Y[idx];
- if (!((idx & 0x01) | (frame->curline & 0x01))) {
- /* only need do this for 1 in 4 pixels */
- /* intraframe buffer is YUV420 format */
- *u++ = U[idx >> 1];
- *v++ = V[idx >> 1];
- }
- } else {
- YUV_TO_RGB_BY_THE_BOOK(Y[idx], U[idx / 2], V[idx / 2], rv, gv, bv);
- switch (frame->v4l2_format.format) {
- case V4L2_PIX_FMT_GREY:
- *f++ = Y[idx];
- break;
- case V4L2_PIX_FMT_RGB555:
- *f++ = (0x1F & rv) |
- (0xE0 & (gv << 5));
- *f++ = (0x03 & (gv >> 3)) |
- (0x7C & (bv << 2));
- break;
- case V4L2_PIX_FMT_RGB565:
- *f++ = (0x1F & rv) |
- (0xE0 & (gv << 5));
- *f++ = (0x07 & (gv >> 3)) |
- (0xF8 & bv);
- break;
- case V4L2_PIX_FMT_RGB24:
- *f++ = rv;
- *f++ = gv;
- *f++ = bv;
- break;
- case V4L2_PIX_FMT_RGB32:
- *f++ = rv;
- *f++ = gv;
- *f++ = bv;
- f++;
- break;
- }
- }
- clipmask_index++;
- }
- /* Deal with non-integer no. of bytes for YUV420P */
- if (frame->v4l2_format.format != V4L2_PIX_FMT_YVU420)
- *pcopylen += frame->v4l2_linesize;
- else
- *pcopylen += frame->curline & 0x01 ? frame->v4l2_linesize : frame->v4l2_linesize << 1;
-
- frame->curline += 1;
-
- if (frame->curline >= frame->frmheight)
- return parse_state_next_frame;
- return parse_state_continue;
-
-}
-
-
-/*
- * usbvision_parse_lines_420()
- *
- * Parse two lines from the scratch buffer, put
- * decoded RGB value into the current frame buffer and add the written
- * number of bytes (RGB) to the *pcopylen.
- *
- */
-static enum parse_state usbvision_parse_lines_420(struct usb_usbvision *usbvision,
- long *pcopylen)
-{
- struct usbvision_frame *frame;
- unsigned char *f_even = NULL, *f_odd = NULL;
- unsigned int pixel_per_line, block;
- int pixel, block_split;
- int y_ptr, u_ptr, v_ptr, y_odd_offset;
- const int y_block_size = 128;
- const int uv_block_size = 64;
- const int sub_block_size = 32;
- const int y_step[] = { 0, 0, 0, 2 }, y_step_size = 4;
- const int uv_step[] = { 0, 0, 0, 4 }, uv_step_size = 4;
- unsigned char y[2], u, v; /* YUV components */
- int y_, u_, v_, vb, uvg, ur;
- int r_, g_, b_; /* RGB components */
- unsigned char g;
- int clipmask_even_index, clipmask_odd_index, bytes_per_pixel;
- int clipmask_add, stretch_bytes;
-
- frame = usbvision->cur_frame;
- f_even = frame->data + (frame->v4l2_linesize * frame->curline);
- f_odd = f_even + frame->v4l2_linesize * usbvision->stretch_height;
-
- /* Make sure there's enough data for the entire line */
- /* In this mode usbvision transfer 3 bytes for every 2 pixels */
- /* I need two lines to decode the color */
- bytes_per_pixel = frame->v4l2_format.bytes_per_pixel;
- stretch_bytes = (usbvision->stretch_width - 1) * bytes_per_pixel;
- clipmask_even_index = frame->curline * MAX_FRAME_WIDTH;
- clipmask_odd_index = clipmask_even_index + MAX_FRAME_WIDTH;
- clipmask_add = usbvision->stretch_width;
- pixel_per_line = frame->isoc_header.frame_width;
-
- if (scratch_len(usbvision) < (int)pixel_per_line * 3) {
- /* printk(KERN_DEBUG "out of data, need %d\n", len); */
- return parse_state_out;
- }
-
- if ((frame->curline + 1) >= frame->frmheight)
- return parse_state_next_frame;
-
- block_split = (pixel_per_line%y_block_size) ? 1 : 0; /* are some blocks split into different lines? */
-
- y_odd_offset = (pixel_per_line / y_block_size) * (y_block_size + uv_block_size)
- + block_split * uv_block_size;
-
- scratch_set_extra_ptr(usbvision, &y_ptr, y_odd_offset);
- scratch_set_extra_ptr(usbvision, &u_ptr, y_block_size);
- scratch_set_extra_ptr(usbvision, &v_ptr, y_odd_offset
- + (4 - block_split) * sub_block_size);
-
- for (block = 0; block < (pixel_per_line / sub_block_size); block++) {
- for (pixel = 0; pixel < sub_block_size; pixel += 2) {
- scratch_get(usbvision, &y[0], 2);
- scratch_get_extra(usbvision, &u, &u_ptr, 1);
- scratch_get_extra(usbvision, &v, &v_ptr, 1);
-
- /* I don't use the YUV_TO_RGB macro for better performance */
- v_ = v - 128;
- u_ = u - 128;
- vb = 132252 * v_;
- uvg = -53281 * u_ - 25625 * v_;
- ur = 104595 * u_;
-
- if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
- *f_even++ = y[0];
- *f_even++ = v;
- } else {
- y_ = 76284 * (y[0] - 16);
-
- b_ = (y_ + vb) >> 16;
- g_ = (y_ + uvg) >> 16;
- r_ = (y_ + ur) >> 16;
-
- switch (frame->v4l2_format.format) {
- case V4L2_PIX_FMT_RGB565:
- g = LIMIT_RGB(g_);
- *f_even++ =
- (0x1F & LIMIT_RGB(r_)) |
- (0xE0 & (g << 5));
- *f_even++ =
- (0x07 & (g >> 3)) |
- (0xF8 & LIMIT_RGB(b_));
- break;
- case V4L2_PIX_FMT_RGB24:
- *f_even++ = LIMIT_RGB(r_);
- *f_even++ = LIMIT_RGB(g_);
- *f_even++ = LIMIT_RGB(b_);
- break;
- case V4L2_PIX_FMT_RGB32:
- *f_even++ = LIMIT_RGB(r_);
- *f_even++ = LIMIT_RGB(g_);
- *f_even++ = LIMIT_RGB(b_);
- f_even++;
- break;
- case V4L2_PIX_FMT_RGB555:
- g = LIMIT_RGB(g_);
- *f_even++ = (0x1F & LIMIT_RGB(r_)) |
- (0xE0 & (g << 5));
- *f_even++ = (0x03 & (g >> 3)) |
- (0x7C & (LIMIT_RGB(b_) << 2));
- break;
- }
- }
- clipmask_even_index += clipmask_add;
- f_even += stretch_bytes;
-
- if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
- *f_even++ = y[1];
- *f_even++ = u;
- } else {
- y_ = 76284 * (y[1] - 16);
-
- b_ = (y_ + vb) >> 16;
- g_ = (y_ + uvg) >> 16;
- r_ = (y_ + ur) >> 16;
-
- switch (frame->v4l2_format.format) {
- case V4L2_PIX_FMT_RGB565:
- g = LIMIT_RGB(g_);
- *f_even++ =
- (0x1F & LIMIT_RGB(r_)) |
- (0xE0 & (g << 5));
- *f_even++ =
- (0x07 & (g >> 3)) |
- (0xF8 & LIMIT_RGB(b_));
- break;
- case V4L2_PIX_FMT_RGB24:
- *f_even++ = LIMIT_RGB(r_);
- *f_even++ = LIMIT_RGB(g_);
- *f_even++ = LIMIT_RGB(b_);
- break;
- case V4L2_PIX_FMT_RGB32:
- *f_even++ = LIMIT_RGB(r_);
- *f_even++ = LIMIT_RGB(g_);
- *f_even++ = LIMIT_RGB(b_);
- f_even++;
- break;
- case V4L2_PIX_FMT_RGB555:
- g = LIMIT_RGB(g_);
- *f_even++ = (0x1F & LIMIT_RGB(r_)) |
- (0xE0 & (g << 5));
- *f_even++ = (0x03 & (g >> 3)) |
- (0x7C & (LIMIT_RGB(b_) << 2));
- break;
- }
- }
- clipmask_even_index += clipmask_add;
- f_even += stretch_bytes;
-
- scratch_get_extra(usbvision, &y[0], &y_ptr, 2);
-
- if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
- *f_odd++ = y[0];
- *f_odd++ = v;
- } else {
- y_ = 76284 * (y[0] - 16);
-
- b_ = (y_ + vb) >> 16;
- g_ = (y_ + uvg) >> 16;
- r_ = (y_ + ur) >> 16;
-
- switch (frame->v4l2_format.format) {
- case V4L2_PIX_FMT_RGB565:
- g = LIMIT_RGB(g_);
- *f_odd++ =
- (0x1F & LIMIT_RGB(r_)) |
- (0xE0 & (g << 5));
- *f_odd++ =
- (0x07 & (g >> 3)) |
- (0xF8 & LIMIT_RGB(b_));
- break;
- case V4L2_PIX_FMT_RGB24:
- *f_odd++ = LIMIT_RGB(r_);
- *f_odd++ = LIMIT_RGB(g_);
- *f_odd++ = LIMIT_RGB(b_);
- break;
- case V4L2_PIX_FMT_RGB32:
- *f_odd++ = LIMIT_RGB(r_);
- *f_odd++ = LIMIT_RGB(g_);
- *f_odd++ = LIMIT_RGB(b_);
- f_odd++;
- break;
- case V4L2_PIX_FMT_RGB555:
- g = LIMIT_RGB(g_);
- *f_odd++ = (0x1F & LIMIT_RGB(r_)) |
- (0xE0 & (g << 5));
- *f_odd++ = (0x03 & (g >> 3)) |
- (0x7C & (LIMIT_RGB(b_) << 2));
- break;
- }
- }
- clipmask_odd_index += clipmask_add;
- f_odd += stretch_bytes;
-
- if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) {
- *f_odd++ = y[1];
- *f_odd++ = u;
- } else {
- y_ = 76284 * (y[1] - 16);
-
- b_ = (y_ + vb) >> 16;
- g_ = (y_ + uvg) >> 16;
- r_ = (y_ + ur) >> 16;
-
- switch (frame->v4l2_format.format) {
- case V4L2_PIX_FMT_RGB565:
- g = LIMIT_RGB(g_);
- *f_odd++ =
- (0x1F & LIMIT_RGB(r_)) |
- (0xE0 & (g << 5));
- *f_odd++ =
- (0x07 & (g >> 3)) |
- (0xF8 & LIMIT_RGB(b_));
- break;
- case V4L2_PIX_FMT_RGB24:
- *f_odd++ = LIMIT_RGB(r_);
- *f_odd++ = LIMIT_RGB(g_);
- *f_odd++ = LIMIT_RGB(b_);
- break;
- case V4L2_PIX_FMT_RGB32:
- *f_odd++ = LIMIT_RGB(r_);
- *f_odd++ = LIMIT_RGB(g_);
- *f_odd++ = LIMIT_RGB(b_);
- f_odd++;
- break;
- case V4L2_PIX_FMT_RGB555:
- g = LIMIT_RGB(g_);
- *f_odd++ = (0x1F & LIMIT_RGB(r_)) |
- (0xE0 & (g << 5));
- *f_odd++ = (0x03 & (g >> 3)) |
- (0x7C & (LIMIT_RGB(b_) << 2));
- break;
- }
- }
- clipmask_odd_index += clipmask_add;
- f_odd += stretch_bytes;
- }
-
- scratch_rm_old(usbvision, y_step[block % y_step_size] * sub_block_size);
- scratch_inc_extra_ptr(&y_ptr, y_step[(block + 2 * block_split) % y_step_size]
- * sub_block_size);
- scratch_inc_extra_ptr(&u_ptr, uv_step[block % uv_step_size]
- * sub_block_size);
- scratch_inc_extra_ptr(&v_ptr, uv_step[(block + 2 * block_split) % uv_step_size]
- * sub_block_size);
- }
-
- scratch_rm_old(usbvision, pixel_per_line * 3 / 2
- + block_split * sub_block_size);
-
- frame->curline += 2 * usbvision->stretch_height;
- *pcopylen += frame->v4l2_linesize * 2 * usbvision->stretch_height;
-
- if (frame->curline >= frame->frmheight)
- return parse_state_next_frame;
- return parse_state_continue;
-}
-
-/*
- * usbvision_parse_data()
- *
- * Generic routine to parse the scratch buffer. It employs either
- * usbvision_find_header() or usbvision_parse_lines() to do most
- * of work.
- *
- */
-static void usbvision_parse_data(struct usb_usbvision *usbvision)
-{
- struct usbvision_frame *frame;
- enum parse_state newstate;
- long copylen = 0;
- unsigned long lock_flags;
-
- frame = usbvision->cur_frame;
-
- PDEBUG(DBG_PARSE, "parsing len=%d\n", scratch_len(usbvision));
-
- while (1) {
- newstate = parse_state_out;
- if (scratch_len(usbvision)) {
- if (frame->scanstate == scan_state_scanning) {
- newstate = usbvision_find_header(usbvision);
- } else if (frame->scanstate == scan_state_lines) {
- if (usbvision->isoc_mode == ISOC_MODE_YUV420)
- newstate = usbvision_parse_lines_420(usbvision, &copylen);
- else if (usbvision->isoc_mode == ISOC_MODE_YUV422)
- newstate = usbvision_parse_lines_422(usbvision, &copylen);
- else if (usbvision->isoc_mode == ISOC_MODE_COMPRESS)
- newstate = usbvision_parse_compress(usbvision, &copylen);
- }
- }
- if (newstate == parse_state_continue)
- continue;
- if ((newstate == parse_state_next_frame) || (newstate == parse_state_out))
- break;
- return; /* parse_state_end_parse */
- }
-
- if (newstate == parse_state_next_frame) {
- frame->grabstate = frame_state_done;
- frame->ts = ktime_get_ns();
- frame->sequence = usbvision->frame_num;
-
- spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
- list_move_tail(&(frame->frame), &usbvision->outqueue);
- usbvision->cur_frame = NULL;
- spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
-
- usbvision->frame_num++;
-
- /* This will cause the process to request another frame. */
- if (waitqueue_active(&usbvision->wait_frame)) {
- PDEBUG(DBG_PARSE, "Wake up !");
- wake_up_interruptible(&usbvision->wait_frame);
- }
- } else {
- frame->grabstate = frame_state_grabbing;
- }
-
- /* Update the frame's uncompressed length. */
- frame->scanlength += copylen;
-}
-
-
-/*
- * Make all of the blocks of data contiguous
- */
-static int usbvision_compress_isochronous(struct usb_usbvision *usbvision,
- struct urb *urb)
-{
- unsigned char *packet_data;
- int i, totlen = 0;
-
- for (i = 0; i < urb->number_of_packets; i++) {
- int packet_len = urb->iso_frame_desc[i].actual_length;
- int packet_stat = urb->iso_frame_desc[i].status;
-
- packet_data = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
-
- /* Detect and ignore errored packets */
- if (packet_stat) { /* packet_stat != 0 ????????????? */
- PDEBUG(DBG_ISOC, "data error: [%d] len=%d, status=%X", i, packet_len, packet_stat);
- usbvision->isoc_err_count++;
- continue;
- }
-
- /* Detect and ignore empty packets */
- if (packet_len < 0) {
- PDEBUG(DBG_ISOC, "error packet [%d]", i);
- usbvision->isoc_skip_count++;
- continue;
- } else if (packet_len == 0) { /* Frame end ????? */
- PDEBUG(DBG_ISOC, "null packet [%d]", i);
- usbvision->isocstate = isoc_state_no_frame;
- usbvision->isoc_skip_count++;
- continue;
- } else if (packet_len > usbvision->isoc_packet_size) {
- PDEBUG(DBG_ISOC, "packet[%d] > isoc_packet_size", i);
- usbvision->isoc_skip_count++;
- continue;
- }
-
- PDEBUG(DBG_ISOC, "packet ok [%d] len=%d", i, packet_len);
-
- if (usbvision->isocstate == isoc_state_no_frame) { /* new frame begins */
- usbvision->isocstate = isoc_state_in_frame;
- scratch_mark_header(usbvision);
- usbvision_measure_bandwidth(usbvision);
- PDEBUG(DBG_ISOC, "packet with header");
- }
-
- /*
- * If usbvision continues to feed us with data but there is no
- * consumption (if, for example, V4L client fell asleep) we
- * may overflow the buffer. We have to move old data over to
- * free room for new data. This is bad for old data. If we
- * just drop new data then it's bad for new data... choose
- * your favorite evil here.
- */
- if (scratch_free(usbvision) < packet_len) {
- usbvision->scratch_ovf_count++;
- PDEBUG(DBG_ISOC, "scratch buf overflow! scr_len: %d, n: %d",
- scratch_len(usbvision), packet_len);
- scratch_rm_old(usbvision, packet_len - scratch_free(usbvision));
- }
-
- /* Now we know that there is enough room in scratch buffer */
- scratch_put(usbvision, packet_data, packet_len);
- totlen += packet_len;
- usbvision->isoc_data_count += packet_len;
- usbvision->isoc_packet_count++;
- }
-#if ENABLE_HEXDUMP
- if (totlen > 0) {
- static int foo;
-
- if (foo < 1) {
- printk(KERN_DEBUG "+%d.\n", usbvision->scratchlen);
- usbvision_hexdump(data0, (totlen > 64) ? 64 : totlen);
- ++foo;
- }
- }
-#endif
- return totlen;
-}
-
-static void usbvision_isoc_irq(struct urb *urb)
-{
- int err_code = 0;
- int len;
- struct usb_usbvision *usbvision = urb->context;
- int i;
- struct usbvision_frame **f;
-
- /* We don't want to do anything if we are about to be removed! */
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return;
-
- /* any urb with wrong status is ignored without acknowledgment */
- if (urb->status == -ENOENT)
- return;
-
- f = &usbvision->cur_frame;
-
- /* Manage streaming interruption */
- if (usbvision->streaming == stream_interrupt) {
- usbvision->streaming = stream_idle;
- if ((*f)) {
- (*f)->grabstate = frame_state_ready;
- (*f)->scanstate = scan_state_scanning;
- }
- PDEBUG(DBG_IRQ, "stream interrupted");
- wake_up_interruptible(&usbvision->wait_stream);
- }
-
- /* Copy the data received into our scratch buffer */
- len = usbvision_compress_isochronous(usbvision, urb);
-
- usbvision->isoc_urb_count++;
- usbvision->urb_length = len;
-
- if (usbvision->streaming == stream_on) {
- /* If we collected enough data let's parse! */
- if (scratch_len(usbvision) > USBVISION_HEADER_LENGTH &&
- !list_empty(&(usbvision->inqueue))) {
- if (!(*f)) {
- (*f) = list_entry(usbvision->inqueue.next,
- struct usbvision_frame,
- frame);
- }
- usbvision_parse_data(usbvision);
- } else {
- /* If we don't have a frame
- we're current working on, complain */
- PDEBUG(DBG_IRQ,
- "received data, but no one needs it");
- scratch_reset(usbvision);
- }
- } else {
- PDEBUG(DBG_IRQ, "received data, but no one needs it");
- scratch_reset(usbvision);
- }
-
- for (i = 0; i < USBVISION_URB_FRAMES; i++) {
- urb->iso_frame_desc[i].status = 0;
- urb->iso_frame_desc[i].actual_length = 0;
- }
-
- urb->status = 0;
- urb->dev = usbvision->dev;
- err_code = usb_submit_urb(urb, GFP_ATOMIC);
-
- if (err_code) {
- dev_err(&usbvision->dev->dev,
- "%s: usb_submit_urb failed: error %d\n",
- __func__, err_code);
- }
-
- return;
-}
-
-/*************************************/
-/* Low level usbvision access functions */
-/*************************************/
-
-/*
- * usbvision_read_reg()
- *
- * return < 0 -> Error
- * >= 0 -> Data
- */
-
-int usbvision_read_reg(struct usb_usbvision *usbvision, unsigned char reg)
-{
- int err_code = 0;
- unsigned char *buffer = usbvision->ctrl_urb_buffer;
-
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return -1;
-
- err_code = usb_control_msg(usbvision->dev, usb_rcvctrlpipe(usbvision->dev, 1),
- USBVISION_OP_CODE,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT,
- 0, (__u16) reg, buffer, 1, HZ);
-
- if (err_code < 0) {
- dev_err(&usbvision->dev->dev,
- "%s: failed: error %d\n", __func__, err_code);
- return err_code;
- }
- return buffer[0];
-}
-
-/*
- * usbvision_write_reg()
- *
- * return 1 -> Reg written
- * 0 -> usbvision is not yet ready
- * -1 -> Something went wrong
- */
-
-int usbvision_write_reg(struct usb_usbvision *usbvision, unsigned char reg,
- unsigned char value)
-{
- int err_code = 0;
-
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return 0;
-
- usbvision->ctrl_urb_buffer[0] = value;
- err_code = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
- USBVISION_OP_CODE,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_ENDPOINT, 0, (__u16) reg,
- usbvision->ctrl_urb_buffer, 1, HZ);
-
- if (err_code < 0) {
- dev_err(&usbvision->dev->dev,
- "%s: failed: error %d\n", __func__, err_code);
- }
- return err_code;
-}
-
-
-static void usbvision_ctrl_urb_complete(struct urb *urb)
-{
- struct usb_usbvision *usbvision = (struct usb_usbvision *)urb->context;
-
- PDEBUG(DBG_IRQ, "");
- usbvision->ctrl_urb_busy = 0;
-}
-
-
-static int usbvision_write_reg_irq(struct usb_usbvision *usbvision, int address,
- unsigned char *data, int len)
-{
- int err_code = 0;
-
- PDEBUG(DBG_IRQ, "");
- if (len > 8)
- return -EFAULT;
- if (usbvision->ctrl_urb_busy)
- return -EBUSY;
- usbvision->ctrl_urb_busy = 1;
-
- usbvision->ctrl_urb_setup.bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT;
- usbvision->ctrl_urb_setup.bRequest = USBVISION_OP_CODE;
- usbvision->ctrl_urb_setup.wValue = 0;
- usbvision->ctrl_urb_setup.wIndex = cpu_to_le16(address);
- usbvision->ctrl_urb_setup.wLength = cpu_to_le16(len);
- usb_fill_control_urb(usbvision->ctrl_urb, usbvision->dev,
- usb_sndctrlpipe(usbvision->dev, 1),
- (unsigned char *)&usbvision->ctrl_urb_setup,
- (void *)usbvision->ctrl_urb_buffer, len,
- usbvision_ctrl_urb_complete,
- (void *)usbvision);
-
- memcpy(usbvision->ctrl_urb_buffer, data, len);
-
- err_code = usb_submit_urb(usbvision->ctrl_urb, GFP_ATOMIC);
- if (err_code < 0) {
- /* error in usb_submit_urb() */
- usbvision->ctrl_urb_busy = 0;
- }
- PDEBUG(DBG_IRQ, "submit %d byte: error %d", len, err_code);
- return err_code;
-}
-
-
-static int usbvision_init_compression(struct usb_usbvision *usbvision)
-{
- usbvision->last_isoc_frame_num = -1;
- usbvision->isoc_data_count = 0;
- usbvision->isoc_packet_count = 0;
- usbvision->isoc_skip_count = 0;
- usbvision->compr_level = 50;
- usbvision->last_compr_level = -1;
- usbvision->isoc_urb_count = 0;
- usbvision->request_intra = 1;
- usbvision->isoc_measure_bandwidth_count = 0;
-
- return 0;
-}
-
-/* this function measures the used bandwidth since last call
- * return: 0 : no error
- * sets used_bandwidth to 1-100 : 1-100% of full bandwidth resp. to isoc_packet_size
- */
-static int usbvision_measure_bandwidth(struct usb_usbvision *usbvision)
-{
- if (usbvision->isoc_measure_bandwidth_count < 2) { /* this gives an average bandwidth of 3 frames */
- usbvision->isoc_measure_bandwidth_count++;
- return 0;
- }
- if ((usbvision->isoc_packet_size > 0) && (usbvision->isoc_packet_count > 0)) {
- usbvision->used_bandwidth = usbvision->isoc_data_count /
- (usbvision->isoc_packet_count + usbvision->isoc_skip_count) *
- 100 / usbvision->isoc_packet_size;
- }
- usbvision->isoc_measure_bandwidth_count = 0;
- usbvision->isoc_data_count = 0;
- usbvision->isoc_packet_count = 0;
- usbvision->isoc_skip_count = 0;
- return 0;
-}
-
-static int usbvision_adjust_compression(struct usb_usbvision *usbvision)
-{
- int err_code = 0;
- unsigned char buffer[6];
-
- PDEBUG(DBG_IRQ, "");
- if ((adjust_compression) && (usbvision->used_bandwidth > 0)) {
- usbvision->compr_level += (usbvision->used_bandwidth - 90) / 2;
- RESTRICT_TO_RANGE(usbvision->compr_level, 0, 100);
- if (usbvision->compr_level != usbvision->last_compr_level) {
- int distortion;
-
- if (usbvision->bridge_type == BRIDGE_NT1004 || usbvision->bridge_type == BRIDGE_NT1005) {
- buffer[0] = (unsigned char)(4 + 16 * usbvision->compr_level / 100); /* PCM Threshold 1 */
- buffer[1] = (unsigned char)(4 + 8 * usbvision->compr_level / 100); /* PCM Threshold 2 */
- distortion = 7 + 248 * usbvision->compr_level / 100;
- buffer[2] = (unsigned char)(distortion & 0xFF); /* Average distortion Threshold (inter) */
- buffer[3] = (unsigned char)(distortion & 0xFF); /* Average distortion Threshold (intra) */
- distortion = 1 + 42 * usbvision->compr_level / 100;
- buffer[4] = (unsigned char)(distortion & 0xFF); /* Maximum distortion Threshold (inter) */
- buffer[5] = (unsigned char)(distortion & 0xFF); /* Maximum distortion Threshold (intra) */
- } else { /* BRIDGE_NT1003 */
- buffer[0] = (unsigned char)(4 + 16 * usbvision->compr_level / 100); /* PCM threshold 1 */
- buffer[1] = (unsigned char)(4 + 8 * usbvision->compr_level / 100); /* PCM threshold 2 */
- distortion = 2 + 253 * usbvision->compr_level / 100;
- buffer[2] = (unsigned char)(distortion & 0xFF); /* distortion threshold bit0-7 */
- buffer[3] = 0; /* (unsigned char)((distortion >> 8) & 0x0F); distortion threshold bit 8-11 */
- distortion = 0 + 43 * usbvision->compr_level / 100;
- buffer[4] = (unsigned char)(distortion & 0xFF); /* maximum distortion bit0-7 */
- buffer[5] = 0; /* (unsigned char)((distortion >> 8) & 0x01); maximum distortion bit 8 */
- }
- err_code = usbvision_write_reg_irq(usbvision, USBVISION_PCM_THR1, buffer, 6);
- if (err_code == 0) {
- PDEBUG(DBG_IRQ, "new compr params %#02x %#02x %#02x %#02x %#02x %#02x", buffer[0],
- buffer[1], buffer[2], buffer[3], buffer[4], buffer[5]);
- usbvision->last_compr_level = usbvision->compr_level;
- }
- }
- }
- return err_code;
-}
-
-static int usbvision_request_intra(struct usb_usbvision *usbvision)
-{
- unsigned char buffer[1];
-
- PDEBUG(DBG_IRQ, "");
- usbvision->request_intra = 1;
- buffer[0] = 1;
- usbvision_write_reg_irq(usbvision, USBVISION_FORCE_INTRA, buffer, 1);
- return 0;
-}
-
-static int usbvision_unrequest_intra(struct usb_usbvision *usbvision)
-{
- unsigned char buffer[1];
-
- PDEBUG(DBG_IRQ, "");
- usbvision->request_intra = 0;
- buffer[0] = 0;
- usbvision_write_reg_irq(usbvision, USBVISION_FORCE_INTRA, buffer, 1);
- return 0;
-}
-
-/*******************************
- * usbvision utility functions
- *******************************/
-
-int usbvision_power_off(struct usb_usbvision *usbvision)
-{
- int err_code = 0;
-
- PDEBUG(DBG_FUNC, "");
-
- err_code = usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN);
- if (err_code == 1)
- usbvision->power = 0;
- PDEBUG(DBG_FUNC, "%s: err_code %d", (err_code != 1) ? "ERROR" : "power is off", err_code);
- return err_code;
-}
-
-/* configure webcam image sensor using the serial port */
-static int usbvision_init_webcam(struct usb_usbvision *usbvision)
-{
- int rc;
- int i;
- static char init_values[38][3] = {
- { 0x04, 0x12, 0x08 }, { 0x05, 0xff, 0xc8 }, { 0x06, 0x18, 0x07 }, { 0x07, 0x90, 0x00 },
- { 0x09, 0x00, 0x00 }, { 0x0a, 0x00, 0x00 }, { 0x0b, 0x08, 0x00 }, { 0x0d, 0xcc, 0xcc },
- { 0x0e, 0x13, 0x14 }, { 0x10, 0x9b, 0x83 }, { 0x11, 0x5a, 0x3f }, { 0x12, 0xe4, 0x73 },
- { 0x13, 0x88, 0x84 }, { 0x14, 0x89, 0x80 }, { 0x15, 0x00, 0x20 }, { 0x16, 0x00, 0x00 },
- { 0x17, 0xff, 0xa0 }, { 0x18, 0x6b, 0x20 }, { 0x19, 0x22, 0x40 }, { 0x1a, 0x10, 0x07 },
- { 0x1b, 0x00, 0x47 }, { 0x1c, 0x03, 0xe0 }, { 0x1d, 0x00, 0x00 }, { 0x1e, 0x00, 0x00 },
- { 0x1f, 0x00, 0x00 }, { 0x20, 0x00, 0x00 }, { 0x21, 0x00, 0x00 }, { 0x22, 0x00, 0x00 },
- { 0x23, 0x00, 0x00 }, { 0x24, 0x00, 0x00 }, { 0x25, 0x00, 0x00 }, { 0x26, 0x00, 0x00 },
- { 0x27, 0x00, 0x00 }, { 0x28, 0x00, 0x00 }, { 0x29, 0x00, 0x00 }, { 0x08, 0x80, 0x60 },
- { 0x0f, 0x2d, 0x24 }, { 0x0c, 0x80, 0x80 }
- };
- unsigned char *value = usbvision->ctrl_urb_buffer;
-
- /* the only difference between PAL and NTSC init_values */
- if (usbvision_device_data[usbvision->dev_model].video_norm == V4L2_STD_NTSC)
- init_values[4][1] = 0x34;
-
- for (i = 0; i < sizeof(init_values) / 3; i++) {
- usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT);
- memcpy(value, init_values[i], 3);
- rc = usb_control_msg(usbvision->dev,
- usb_sndctrlpipe(usbvision->dev, 1),
- USBVISION_OP_CODE,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_ENDPOINT, 0,
- (__u16) USBVISION_SER_DAT1, value,
- 3, HZ);
- if (rc < 0)
- return rc;
- usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SIO);
- /* write 3 bytes to the serial port using SIO mode */
- usbvision_write_reg(usbvision, USBVISION_SER_CONT, 3 | 0x10);
- usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, 0);
- usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT);
- usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, USBVISION_IO_2);
- usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT | USBVISION_CLK_OUT);
- usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT | USBVISION_DAT_IO);
- usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT | USBVISION_CLK_OUT | USBVISION_DAT_IO);
- }
-
- return 0;
-}
-
-/*
- * usbvision_set_video_format()
- *
- */
-static int usbvision_set_video_format(struct usb_usbvision *usbvision, int format)
-{
- static const char proc[] = "usbvision_set_video_format";
- unsigned char *value = usbvision->ctrl_urb_buffer;
- int rc;
-
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return 0;
-
- PDEBUG(DBG_FUNC, "isoc_mode %#02x", format);
-
- if ((format != ISOC_MODE_YUV422)
- && (format != ISOC_MODE_YUV420)
- && (format != ISOC_MODE_COMPRESS)) {
- printk(KERN_ERR "usbvision: unknown video format %02x, using default YUV420",
- format);
- format = ISOC_MODE_YUV420;
- }
- value[0] = 0x0A; /* TODO: See the effect of the filter */
- value[1] = format; /* Sets the VO_MODE register which follows FILT_CONT */
- rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
- USBVISION_OP_CODE,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_ENDPOINT, 0,
- (__u16) USBVISION_FILT_CONT, value, 2, HZ);
-
- if (rc < 0) {
- printk(KERN_ERR "%s: ERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
- proc, rc);
- }
- usbvision->isoc_mode = format;
- return rc;
-}
-
-/*
- * usbvision_set_output()
- *
- */
-
-int usbvision_set_output(struct usb_usbvision *usbvision, int width,
- int height)
-{
- int err_code = 0;
- int usb_width, usb_height;
- unsigned int frame_rate = 0, frame_drop = 0;
- unsigned char *value = usbvision->ctrl_urb_buffer;
-
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return 0;
-
- if (width > MAX_USB_WIDTH) {
- usb_width = width / 2;
- usbvision->stretch_width = 2;
- } else {
- usb_width = width;
- usbvision->stretch_width = 1;
- }
-
- if (height > MAX_USB_HEIGHT) {
- usb_height = height / 2;
- usbvision->stretch_height = 2;
- } else {
- usb_height = height;
- usbvision->stretch_height = 1;
- }
-
- RESTRICT_TO_RANGE(usb_width, MIN_FRAME_WIDTH, MAX_USB_WIDTH);
- usb_width &= ~(MIN_FRAME_WIDTH-1);
- RESTRICT_TO_RANGE(usb_height, MIN_FRAME_HEIGHT, MAX_USB_HEIGHT);
- usb_height &= ~(1);
-
- PDEBUG(DBG_FUNC, "usb %dx%d; screen %dx%d; stretch %dx%d",
- usb_width, usb_height, width, height,
- usbvision->stretch_width, usbvision->stretch_height);
-
- /* I'll not rewrite the same values */
- if ((usb_width != usbvision->curwidth) || (usb_height != usbvision->curheight)) {
- value[0] = usb_width & 0xff; /* LSB */
- value[1] = (usb_width >> 8) & 0x03; /* MSB */
- value[2] = usb_height & 0xff; /* LSB */
- value[3] = (usb_height >> 8) & 0x03; /* MSB */
-
- err_code = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
- USBVISION_OP_CODE,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT,
- 0, (__u16) USBVISION_LXSIZE_O, value, 4, HZ);
-
- if (err_code < 0) {
- dev_err(&usbvision->dev->dev,
- "%s failed: error %d\n", __func__, err_code);
- return err_code;
- }
- usbvision->curwidth = usbvision->stretch_width * usb_width;
- usbvision->curheight = usbvision->stretch_height * usb_height;
- }
-
- if (usbvision->isoc_mode == ISOC_MODE_YUV422)
- frame_rate = (usbvision->isoc_packet_size * 1000) / (usb_width * usb_height * 2);
- else if (usbvision->isoc_mode == ISOC_MODE_YUV420)
- frame_rate = (usbvision->isoc_packet_size * 1000) / ((usb_width * usb_height * 12) / 8);
- else
- frame_rate = FRAMERATE_MAX;
-
- if (usbvision->tvnorm_id & V4L2_STD_625_50)
- frame_drop = frame_rate * 32 / 25 - 1;
- else if (usbvision->tvnorm_id & V4L2_STD_525_60)
- frame_drop = frame_rate * 32 / 30 - 1;
-
- RESTRICT_TO_RANGE(frame_drop, FRAMERATE_MIN, FRAMERATE_MAX);
-
- PDEBUG(DBG_FUNC, "frame_rate %d fps, frame_drop %d", frame_rate, frame_drop);
-
- frame_drop = FRAMERATE_MAX; /* We can allow the maximum here, because dropping is controlled */
-
- if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM) {
- if (usbvision_device_data[usbvision->dev_model].video_norm == V4L2_STD_PAL)
- frame_drop = 25;
- else
- frame_drop = 30;
- }
-
- /* frame_drop = 7; => frame_phase = 1, 5, 9, 13, 17, 21, 25, 0, 4, 8, ...
- => frame_skip = 4;
- => frame_rate = (7 + 1) * 25 / 32 = 200 / 32 = 6.25;
-
- frame_drop = 9; => frame_phase = 1, 5, 8, 11, 14, 17, 21, 24, 27, 1, 4, 8, ...
- => frame_skip = 4, 3, 3, 3, 3, 4, 3, 3, 3, 3, 4, ...
- => frame_rate = (9 + 1) * 25 / 32 = 250 / 32 = 7.8125;
- */
- err_code = usbvision_write_reg(usbvision, USBVISION_FRM_RATE, frame_drop);
- return err_code;
-}
-
-
-/*
- * usbvision_frames_alloc
- * allocate the required frames
- */
-int usbvision_frames_alloc(struct usb_usbvision *usbvision, int number_of_frames)
-{
- int i;
-
- /* needs to be page aligned cause the buffers can be mapped individually! */
- usbvision->max_frame_size = PAGE_ALIGN(usbvision->curwidth *
- usbvision->curheight *
- usbvision->palette.bytes_per_pixel);
-
- /* Try to do my best to allocate the frames the user want in the remaining memory */
- usbvision->num_frames = number_of_frames;
- while (usbvision->num_frames > 0) {
- usbvision->fbuf_size = usbvision->num_frames * usbvision->max_frame_size;
- usbvision->fbuf = usbvision_rvmalloc(usbvision->fbuf_size);
- if (usbvision->fbuf)
- break;
- usbvision->num_frames--;
- }
-
- /* Allocate all buffers */
- for (i = 0; i < usbvision->num_frames; i++) {
- usbvision->frame[i].index = i;
- usbvision->frame[i].grabstate = frame_state_unused;
- usbvision->frame[i].data = usbvision->fbuf +
- i * usbvision->max_frame_size;
- /*
- * Set default sizes for read operation.
- */
- usbvision->stretch_width = 1;
- usbvision->stretch_height = 1;
- usbvision->frame[i].width = usbvision->curwidth;
- usbvision->frame[i].height = usbvision->curheight;
- usbvision->frame[i].bytes_read = 0;
- }
- PDEBUG(DBG_FUNC, "allocated %d frames (%d bytes per frame)",
- usbvision->num_frames, usbvision->max_frame_size);
- return usbvision->num_frames;
-}
-
-/*
- * usbvision_frames_free
- * frees memory allocated for the frames
- */
-void usbvision_frames_free(struct usb_usbvision *usbvision)
-{
- /* Have to free all that memory */
- PDEBUG(DBG_FUNC, "free %d frames", usbvision->num_frames);
-
- if (usbvision->fbuf != NULL) {
- usbvision_rvfree(usbvision->fbuf, usbvision->fbuf_size);
- usbvision->fbuf = NULL;
-
- usbvision->num_frames = 0;
- }
-}
-/*
- * usbvision_empty_framequeues()
- * prepare queues for incoming and outgoing frames
- */
-void usbvision_empty_framequeues(struct usb_usbvision *usbvision)
-{
- u32 i;
-
- INIT_LIST_HEAD(&(usbvision->inqueue));
- INIT_LIST_HEAD(&(usbvision->outqueue));
-
- for (i = 0; i < USBVISION_NUMFRAMES; i++) {
- usbvision->frame[i].grabstate = frame_state_unused;
- usbvision->frame[i].bytes_read = 0;
- }
-}
-
-/*
- * usbvision_stream_interrupt()
- * stops streaming
- */
-int usbvision_stream_interrupt(struct usb_usbvision *usbvision)
-{
- int ret = 0;
-
- /* stop reading from the device */
-
- usbvision->streaming = stream_interrupt;
- ret = wait_event_timeout(usbvision->wait_stream,
- (usbvision->streaming == stream_idle),
- msecs_to_jiffies(USBVISION_NUMSBUF*USBVISION_URB_FRAMES));
- return ret;
-}
-
-/*
- * usbvision_set_compress_params()
- *
- */
-
-static int usbvision_set_compress_params(struct usb_usbvision *usbvision)
-{
- static const char proc[] = "usbvision_set_compression_params: ";
- int rc;
- unsigned char *value = usbvision->ctrl_urb_buffer;
-
- value[0] = 0x0F; /* Intra-Compression cycle */
- value[1] = 0x01; /* Reg.45 one line per strip */
- value[2] = 0x00; /* Reg.46 Force intra mode on all new frames */
- value[3] = 0x00; /* Reg.47 FORCE_UP <- 0 normal operation (not force) */
- value[4] = 0xA2; /* Reg.48 BUF_THR I'm not sure if this does something in not compressed mode. */
- value[5] = 0x00; /* Reg.49 DVI_YUV This has nothing to do with compression */
-
- /* caught values for NT1004 */
- /* value[0] = 0xFF; Never apply intra mode automatically */
- /* value[1] = 0xF1; Use full frame height for virtual strip width; One line per strip */
- /* value[2] = 0x01; Force intra mode on all new frames */
- /* value[3] = 0x00; Strip size 400 Bytes; do not force up */
- /* value[4] = 0xA2; */
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return 0;
-
- rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
- USBVISION_OP_CODE,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_ENDPOINT, 0,
- (__u16) USBVISION_INTRA_CYC, value, 5, HZ);
-
- if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
- proc, rc);
- return rc;
- }
-
- if (usbvision->bridge_type == BRIDGE_NT1004) {
- value[0] = 20; /* PCM Threshold 1 */
- value[1] = 12; /* PCM Threshold 2 */
- value[2] = 255; /* Distortion Threshold inter */
- value[3] = 255; /* Distortion Threshold intra */
- value[4] = 43; /* Max Distortion inter */
- value[5] = 43; /* Max Distortion intra */
- } else {
- value[0] = 20; /* PCM Threshold 1 */
- value[1] = 12; /* PCM Threshold 2 */
- value[2] = 255; /* Distortion Threshold d7-d0 */
- value[3] = 0; /* Distortion Threshold d11-d8 */
- value[4] = 43; /* Max Distortion d7-d0 */
- value[5] = 0; /* Max Distortion d8 */
- }
-
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return 0;
-
- rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
- USBVISION_OP_CODE,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_ENDPOINT, 0,
- (__u16) USBVISION_PCM_THR1, value, 6, HZ);
-
- if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
- proc, rc);
- }
- return rc;
-}
-
-
-/*
- * usbvision_set_input()
- *
- * Set the input (saa711x, ...) size x y and other misc input params
- * I've no idea if this parameters are right
- *
- */
-int usbvision_set_input(struct usb_usbvision *usbvision)
-{
- static const char proc[] = "usbvision_set_input: ";
- int rc;
- unsigned char *value = usbvision->ctrl_urb_buffer;
- unsigned char dvi_yuv_value;
-
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return 0;
-
- /* Set input format expected from decoder*/
- if (usbvision_device_data[usbvision->dev_model].vin_reg1_override) {
- value[0] = usbvision_device_data[usbvision->dev_model].vin_reg1;
- } else if (usbvision_device_data[usbvision->dev_model].codec == CODEC_SAA7113) {
- /* SAA7113 uses 8 bit output */
- value[0] = USBVISION_8_422_SYNC;
- } else {
- /* I'm sure only about d2-d0 [010] 16 bit 4:2:2 using sync pulses
- * as that is how saa7111 is configured */
- value[0] = USBVISION_16_422_SYNC;
- /* | USBVISION_VSNC_POL | USBVISION_VCLK_POL);*/
- }
-
- rc = usbvision_write_reg(usbvision, USBVISION_VIN_REG1, value[0]);
- if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
- proc, rc);
- return rc;
- }
-
-
- if (usbvision->tvnorm_id & V4L2_STD_PAL) {
- value[0] = 0xC0;
- value[1] = 0x02; /* 0x02C0 -> 704 Input video line length */
- value[2] = 0x20;
- value[3] = 0x01; /* 0x0120 -> 288 Input video n. of lines */
- value[4] = 0x60;
- value[5] = 0x00; /* 0x0060 -> 96 Input video h offset */
- value[6] = 0x16;
- value[7] = 0x00; /* 0x0016 -> 22 Input video v offset */
- } else if (usbvision->tvnorm_id & V4L2_STD_SECAM) {
- value[0] = 0xC0;
- value[1] = 0x02; /* 0x02C0 -> 704 Input video line length */
- value[2] = 0x20;
- value[3] = 0x01; /* 0x0120 -> 288 Input video n. of lines */
- value[4] = 0x01;
- value[5] = 0x00; /* 0x0001 -> 01 Input video h offset */
- value[6] = 0x01;
- value[7] = 0x00; /* 0x0001 -> 01 Input video v offset */
- } else { /* V4L2_STD_NTSC */
- value[0] = 0xD0;
- value[1] = 0x02; /* 0x02D0 -> 720 Input video line length */
- value[2] = 0xF0;
- value[3] = 0x00; /* 0x00F0 -> 240 Input video number of lines */
- value[4] = 0x50;
- value[5] = 0x00; /* 0x0050 -> 80 Input video h offset */
- value[6] = 0x10;
- value[7] = 0x00; /* 0x0010 -> 16 Input video v offset */
- }
-
- /* webcam is only 480 pixels wide, both PAL and NTSC version */
- if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM) {
- value[0] = 0xe0;
- value[1] = 0x01; /* 0x01E0 -> 480 Input video line length */
- }
-
- if (usbvision_device_data[usbvision->dev_model].x_offset >= 0) {
- value[4] = usbvision_device_data[usbvision->dev_model].x_offset & 0xff;
- value[5] = (usbvision_device_data[usbvision->dev_model].x_offset & 0x0300) >> 8;
- }
-
- if (adjust_x_offset != -1) {
- value[4] = adjust_x_offset & 0xff;
- value[5] = (adjust_x_offset & 0x0300) >> 8;
- }
-
- if (usbvision_device_data[usbvision->dev_model].y_offset >= 0) {
- value[6] = usbvision_device_data[usbvision->dev_model].y_offset & 0xff;
- value[7] = (usbvision_device_data[usbvision->dev_model].y_offset & 0x0300) >> 8;
- }
-
- if (adjust_y_offset != -1) {
- value[6] = adjust_y_offset & 0xff;
- value[7] = (adjust_y_offset & 0x0300) >> 8;
- }
-
- rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
- USBVISION_OP_CODE, /* USBVISION specific code */
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0,
- (__u16) USBVISION_LXSIZE_I, value, 8, HZ);
- if (rc < 0) {
- printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
- proc, rc);
- return rc;
- }
-
-
- dvi_yuv_value = 0x00; /* U comes after V, Ya comes after U/V, Yb comes after Yb */
-
- if (usbvision_device_data[usbvision->dev_model].dvi_yuv_override) {
- dvi_yuv_value = usbvision_device_data[usbvision->dev_model].dvi_yuv;
- } else if (usbvision_device_data[usbvision->dev_model].codec == CODEC_SAA7113) {
- /* This changes as the fine sync control changes. Further investigation necessary */
- dvi_yuv_value = 0x06;
- }
-
- return usbvision_write_reg(usbvision, USBVISION_DVI_YUV, dvi_yuv_value);
-}
-
-
-/*
- * usbvision_set_dram_settings()
- *
- * Set the buffer address needed by the usbvision dram to operate
- * This values has been taken with usbsnoop.
- *
- */
-
-static int usbvision_set_dram_settings(struct usb_usbvision *usbvision)
-{
- unsigned char *value = usbvision->ctrl_urb_buffer;
- int rc;
-
- if (usbvision->isoc_mode == ISOC_MODE_COMPRESS) {
- value[0] = 0x42;
- value[1] = 0x71;
- value[2] = 0xff;
- value[3] = 0x00;
- value[4] = 0x98;
- value[5] = 0xe0;
- value[6] = 0x71;
- value[7] = 0xff;
- /* UR: 0x0E200-0x3FFFF = 204288 Words (1 Word = 2 Byte) */
- /* FDL: 0x00000-0x0E099 = 57498 Words */
- /* VDW: 0x0E3FF-0x3FFFF */
- } else {
- value[0] = 0x42;
- value[1] = 0x00;
- value[2] = 0xff;
- value[3] = 0x00;
- value[4] = 0x00;
- value[5] = 0x00;
- value[6] = 0x00;
- value[7] = 0xff;
- }
- /* These are the values of the address of the video buffer,
- * they have to be loaded into the USBVISION_DRM_PRM1-8
- *
- * Start address of video output buffer for read: drm_prm1-2 -> 0x00000
- * End address of video output buffer for read: drm_prm1-3 -> 0x1ffff
- * Start address of video frame delay buffer: drm_prm1-4 -> 0x20000
- * Only used in compressed mode
- * End address of video frame delay buffer: drm_prm1-5-6 -> 0x3ffff
- * Only used in compressed mode
- * Start address of video output buffer for write: drm_prm1-7 -> 0x00000
- * End address of video output buffer for write: drm_prm1-8 -> 0x1ffff
- */
-
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return 0;
-
- rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
- USBVISION_OP_CODE, /* USBVISION specific code */
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_ENDPOINT, 0,
- (__u16) USBVISION_DRM_PRM1, value, 8, HZ);
-
- if (rc < 0) {
- dev_err(&usbvision->dev->dev, "%s: ERROR=%d\n", __func__, rc);
- return rc;
- }
-
- /* Restart the video buffer logic */
- rc = usbvision_write_reg(usbvision, USBVISION_DRM_CONT, USBVISION_RES_UR |
- USBVISION_RES_FDL | USBVISION_RES_VDW);
- if (rc < 0)
- return rc;
- rc = usbvision_write_reg(usbvision, USBVISION_DRM_CONT, 0x00);
-
- return rc;
-}
-
-/*
- * ()
- *
- * Power on the device, enables suspend-resume logic
- * & reset the isoc End-Point
- *
- */
-
-int usbvision_power_on(struct usb_usbvision *usbvision)
-{
- int err_code = 0;
-
- PDEBUG(DBG_FUNC, "");
-
- usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN);
- usbvision_write_reg(usbvision, USBVISION_PWR_REG,
- USBVISION_SSPND_EN | USBVISION_RES2);
-
- if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM) {
- usbvision_write_reg(usbvision, USBVISION_VIN_REG1,
- USBVISION_16_422_SYNC | USBVISION_HVALID_PO);
- usbvision_write_reg(usbvision, USBVISION_VIN_REG2,
- USBVISION_NOHVALID | USBVISION_KEEP_BLANK);
- }
- usbvision_write_reg(usbvision, USBVISION_PWR_REG,
- USBVISION_SSPND_EN | USBVISION_PWR_VID);
- mdelay(10);
- err_code = usbvision_write_reg(usbvision, USBVISION_PWR_REG,
- USBVISION_SSPND_EN | USBVISION_PWR_VID | USBVISION_RES2);
- if (err_code == 1)
- usbvision->power = 1;
- PDEBUG(DBG_FUNC, "%s: err_code %d", (err_code < 0) ? "ERROR" : "power is on", err_code);
- return err_code;
-}
-
-
-/*
- * usbvision_begin_streaming()
- * Sure you have to put bit 7 to 0, if not incoming frames are dropped, but no
- * idea about the rest
- */
-int usbvision_begin_streaming(struct usb_usbvision *usbvision)
-{
- if (usbvision->isoc_mode == ISOC_MODE_COMPRESS)
- usbvision_init_compression(usbvision);
- return usbvision_write_reg(usbvision, USBVISION_VIN_REG2,
- USBVISION_NOHVALID | usbvision->vin_reg2_preset);
-}
-
-/*
- * usbvision_restart_isoc()
- * Not sure yet if touching here PWR_REG make loose the config
- */
-
-int usbvision_restart_isoc(struct usb_usbvision *usbvision)
-{
- int ret;
-
- ret = usbvision_write_reg(usbvision, USBVISION_PWR_REG,
- USBVISION_SSPND_EN | USBVISION_PWR_VID);
- if (ret < 0)
- return ret;
- ret = usbvision_write_reg(usbvision, USBVISION_PWR_REG,
- USBVISION_SSPND_EN | USBVISION_PWR_VID |
- USBVISION_RES2);
- if (ret < 0)
- return ret;
- ret = usbvision_write_reg(usbvision, USBVISION_VIN_REG2,
- USBVISION_KEEP_BLANK | USBVISION_NOHVALID |
- usbvision->vin_reg2_preset);
- if (ret < 0)
- return ret;
-
- /* TODO: schedule timeout */
- while ((usbvision_read_reg(usbvision, USBVISION_STATUS_REG) & 0x01) != 1)
- ;
-
- return 0;
-}
-
-int usbvision_audio_off(struct usb_usbvision *usbvision)
-{
- if (usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, USBVISION_AUDIO_MUTE) < 0) {
- printk(KERN_ERR "usbvision_audio_off: can't write reg\n");
- return -1;
- }
- usbvision->audio_mute = 0;
- usbvision->audio_channel = USBVISION_AUDIO_MUTE;
- return 0;
-}
-
-int usbvision_set_audio(struct usb_usbvision *usbvision, int audio_channel)
-{
- if (!usbvision->audio_mute) {
- if (usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, audio_channel) < 0) {
- printk(KERN_ERR "usbvision_set_audio: can't write iopin register for audio switching\n");
- return -1;
- }
- }
- usbvision->audio_channel = audio_channel;
- return 0;
-}
-
-int usbvision_setup(struct usb_usbvision *usbvision, int format)
-{
- if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM)
- usbvision_init_webcam(usbvision);
- usbvision_set_video_format(usbvision, format);
- usbvision_set_dram_settings(usbvision);
- usbvision_set_compress_params(usbvision);
- usbvision_set_input(usbvision);
- usbvision_set_output(usbvision, MAX_USB_WIDTH, MAX_USB_HEIGHT);
- usbvision_restart_isoc(usbvision);
-
- /* cosas del PCM */
- return USBVISION_IS_OPERATIONAL(usbvision);
-}
-
-int usbvision_set_alternate(struct usb_usbvision *dev)
-{
- int err_code, prev_alt = dev->iface_alt;
- int i;
-
- dev->iface_alt = 0;
- for (i = 0; i < dev->num_alt; i++)
- if (dev->alt_max_pkt_size[i] > dev->alt_max_pkt_size[dev->iface_alt])
- dev->iface_alt = i;
-
- if (dev->iface_alt != prev_alt) {
- dev->isoc_packet_size = dev->alt_max_pkt_size[dev->iface_alt];
- PDEBUG(DBG_FUNC, "setting alternate %d with max_packet_size=%u",
- dev->iface_alt, dev->isoc_packet_size);
- err_code = usb_set_interface(dev->dev, dev->iface, dev->iface_alt);
- if (err_code < 0) {
- dev_err(&dev->dev->dev,
- "cannot change alternate number to %d (error=%i)\n",
- dev->iface_alt, err_code);
- return err_code;
- }
- }
-
- PDEBUG(DBG_ISOC, "ISO Packet Length:%d", dev->isoc_packet_size);
-
- return 0;
-}
-
-/*
- * usbvision_init_isoc()
- *
- */
-int usbvision_init_isoc(struct usb_usbvision *usbvision)
-{
- struct usb_device *dev = usbvision->dev;
- int buf_idx, err_code, reg_value;
- int sb_size;
-
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return -EFAULT;
-
- usbvision->cur_frame = NULL;
- scratch_reset(usbvision);
-
- /* Alternate interface 1 is is the biggest frame size */
- err_code = usbvision_set_alternate(usbvision);
- if (err_code < 0) {
- usbvision->last_error = err_code;
- return -EBUSY;
- }
- sb_size = USBVISION_URB_FRAMES * usbvision->isoc_packet_size;
-
- reg_value = (16 - usbvision_read_reg(usbvision,
- USBVISION_ALTER_REG)) & 0x0F;
-
- usbvision->usb_bandwidth = reg_value >> 1;
- PDEBUG(DBG_ISOC, "USB Bandwidth Usage: %dMbit/Sec",
- usbvision->usb_bandwidth);
-
-
-
- /* We double buffer the Iso lists */
-
- for (buf_idx = 0; buf_idx < USBVISION_NUMSBUF; buf_idx++) {
- int j, k;
- struct urb *urb;
-
- urb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL);
- if (urb == NULL)
- return -ENOMEM;
- usbvision->sbuf[buf_idx].urb = urb;
- usbvision->sbuf[buf_idx].data =
- usb_alloc_coherent(usbvision->dev,
- sb_size,
- GFP_KERNEL,
- &urb->transfer_dma);
- if (!usbvision->sbuf[buf_idx].data)
- return -ENOMEM;
-
- urb->dev = dev;
- urb->context = usbvision;
- urb->pipe = usb_rcvisocpipe(dev, usbvision->video_endp);
- urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
- urb->interval = 1;
- urb->transfer_buffer = usbvision->sbuf[buf_idx].data;
- urb->complete = usbvision_isoc_irq;
- urb->number_of_packets = USBVISION_URB_FRAMES;
- urb->transfer_buffer_length =
- usbvision->isoc_packet_size * USBVISION_URB_FRAMES;
- for (j = k = 0; j < USBVISION_URB_FRAMES; j++,
- k += usbvision->isoc_packet_size) {
- urb->iso_frame_desc[j].offset = k;
- urb->iso_frame_desc[j].length =
- usbvision->isoc_packet_size;
- }
- }
-
- /* Submit all URBs */
- for (buf_idx = 0; buf_idx < USBVISION_NUMSBUF; buf_idx++) {
- err_code = usb_submit_urb(usbvision->sbuf[buf_idx].urb,
- GFP_KERNEL);
- if (err_code) {
- dev_err(&usbvision->dev->dev,
- "%s: usb_submit_urb(%d) failed: error %d\n",
- __func__, buf_idx, err_code);
- }
- }
-
- usbvision->streaming = stream_idle;
- PDEBUG(DBG_ISOC, "%s: streaming=1 usbvision->video_endp=$%02x",
- __func__,
- usbvision->video_endp);
- return 0;
-}
-
-/*
- * usbvision_stop_isoc()
- *
- * This procedure stops streaming and deallocates URBs. Then it
- * activates zero-bandwidth alt. setting of the video interface.
- *
- */
-void usbvision_stop_isoc(struct usb_usbvision *usbvision)
-{
- int buf_idx, err_code, reg_value;
- int sb_size = USBVISION_URB_FRAMES * usbvision->isoc_packet_size;
-
- if ((usbvision->streaming == stream_off) || (usbvision->dev == NULL))
- return;
-
- /* Unschedule all of the iso td's */
- for (buf_idx = 0; buf_idx < USBVISION_NUMSBUF; buf_idx++) {
- usb_kill_urb(usbvision->sbuf[buf_idx].urb);
- if (usbvision->sbuf[buf_idx].data) {
- usb_free_coherent(usbvision->dev,
- sb_size,
- usbvision->sbuf[buf_idx].data,
- usbvision->sbuf[buf_idx].urb->transfer_dma);
- }
- usb_free_urb(usbvision->sbuf[buf_idx].urb);
- usbvision->sbuf[buf_idx].urb = NULL;
- }
-
- PDEBUG(DBG_ISOC, "%s: streaming=stream_off\n", __func__);
- usbvision->streaming = stream_off;
-
- if (!usbvision->remove_pending) {
- /* Set packet size to 0 */
- usbvision->iface_alt = 0;
- err_code = usb_set_interface(usbvision->dev, usbvision->iface,
- usbvision->iface_alt);
- if (err_code < 0) {
- dev_err(&usbvision->dev->dev,
- "%s: usb_set_interface() failed: error %d\n",
- __func__, err_code);
- usbvision->last_error = err_code;
- }
- reg_value = (16-usbvision_read_reg(usbvision, USBVISION_ALTER_REG)) & 0x0F;
- usbvision->isoc_packet_size =
- (reg_value == 0) ? 0 : (reg_value * 64) - 1;
- PDEBUG(DBG_ISOC, "ISO Packet Length:%d",
- usbvision->isoc_packet_size);
-
- usbvision->usb_bandwidth = reg_value >> 1;
- PDEBUG(DBG_ISOC, "USB Bandwidth Usage: %dMbit/Sec",
- usbvision->usb_bandwidth);
- }
-}
-
-int usbvision_muxsel(struct usb_usbvision *usbvision, int channel)
-{
- /* inputs #0 and #3 are constant for every SAA711x. */
- /* inputs #1 and #2 are variable for SAA7111 and SAA7113 */
- int mode[4] = { SAA7115_COMPOSITE0, 0, 0, SAA7115_COMPOSITE3 };
- int audio[] = { 1, 0, 0, 0 };
- /* channel 0 is TV with audiochannel 1 (tuner mono) */
- /* channel 1 is Composite with audio channel 0 (line in) */
- /* channel 2 is S-Video with audio channel 0 (line in) */
- /* channel 3 is additional video inputs to the device with audio channel 0 (line in) */
-
- RESTRICT_TO_RANGE(channel, 0, usbvision->video_inputs);
- usbvision->ctl_input = channel;
-
- /* set the new channel */
- /* Regular USB TV Tuners -> channel: 0 = Television, 1 = Composite, 2 = S-Video */
- /* Four video input devices -> channel: 0 = Chan White, 1 = Chan Green, 2 = Chan Yellow, 3 = Chan Red */
-
- switch (usbvision_device_data[usbvision->dev_model].codec) {
- case CODEC_SAA7113:
- mode[1] = SAA7115_COMPOSITE2;
- if (switch_svideo_input) {
- /* To handle problems with S-Video Input for
- * some devices. Use switch_svideo_input
- * parameter when loading the module.*/
- mode[2] = SAA7115_COMPOSITE1;
- } else {
- mode[2] = SAA7115_SVIDEO1;
- }
- break;
- case CODEC_SAA7111:
- default:
- /* modes for saa7111 */
- mode[1] = SAA7115_COMPOSITE1;
- mode[2] = SAA7115_SVIDEO1;
- break;
- }
- call_all(usbvision, video, s_routing, mode[channel], 0, 0);
- usbvision_set_audio(usbvision, audio[channel]);
- return 0;
-}
diff --git a/drivers/staging/media/usbvision/usbvision-i2c.c b/drivers/staging/media/usbvision/usbvision-i2c.c
deleted file mode 100644
index aa3ff67a3cb1..000000000000
--- a/drivers/staging/media/usbvision/usbvision-i2c.c
+++ /dev/null
@@ -1,438 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * usbvision_i2c.c
- * i2c algorithm for USB-I2C Bridges
- *
- * Copyright (c) 1999-2007 Joerg Heckenbach <joerg@heckenbach-aw.de>
- * Dwaine Garden <dwainegarden@rogers.com>
- *
- * This module is part of usbvision driver project.
- * Updates to driver completed by Dwaine P. Garden
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-#include <linux/ioport.h>
-#include <linux/errno.h>
-#include <linux/usb.h>
-#include <linux/i2c.h>
-#include "usbvision.h"
-
-#define DBG_I2C (1 << 0)
-
-static int i2c_debug;
-
-module_param(i2c_debug, int, 0644); /* debug_i2c_usb mode of the device driver */
-MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
-
-#define PDEBUG(level, fmt, args...) { \
- if (i2c_debug & (level)) \
- printk(KERN_INFO KBUILD_MODNAME ":[%s:%d] " fmt, \
- __func__, __LINE__ , ## args); \
- }
-
-static int usbvision_i2c_write(struct usb_usbvision *usbvision, unsigned char addr, char *buf,
- short len);
-static int usbvision_i2c_read(struct usb_usbvision *usbvision, unsigned char addr, char *buf,
- short len);
-
-static inline int try_write_address(struct i2c_adapter *i2c_adap,
- unsigned char addr, int retries)
-{
- struct usb_usbvision *usbvision;
- int i, ret = -1;
- char buf[4];
-
- usbvision = (struct usb_usbvision *)i2c_get_adapdata(i2c_adap);
- buf[0] = 0x00;
- for (i = 0; i <= retries; i++) {
- ret = (usbvision_i2c_write(usbvision, addr, buf, 1));
- if (ret == 1)
- break; /* success! */
- udelay(5);
- if (i == retries) /* no success */
- break;
- udelay(10);
- }
- if (i) {
- PDEBUG(DBG_I2C, "Needed %d retries for address %#2x", i, addr);
- PDEBUG(DBG_I2C, "Maybe there's no device at this address");
- }
- return ret;
-}
-
-static inline int try_read_address(struct i2c_adapter *i2c_adap,
- unsigned char addr, int retries)
-{
- struct usb_usbvision *usbvision;
- int i, ret = -1;
- char buf[4];
-
- usbvision = (struct usb_usbvision *)i2c_get_adapdata(i2c_adap);
- for (i = 0; i <= retries; i++) {
- ret = (usbvision_i2c_read(usbvision, addr, buf, 1));
- if (ret == 1)
- break; /* success! */
- udelay(5);
- if (i == retries) /* no success */
- break;
- udelay(10);
- }
- if (i) {
- PDEBUG(DBG_I2C, "Needed %d retries for address %#2x", i, addr);
- PDEBUG(DBG_I2C, "Maybe there's no device at this address");
- }
- return ret;
-}
-
-static inline int usb_find_address(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msg, int retries,
- unsigned char *add)
-{
- unsigned short flags = msg->flags;
-
- unsigned char addr;
- int ret;
-
- addr = (msg->addr << 1);
- if (flags & I2C_M_RD)
- addr |= 1;
-
- add[0] = addr;
- if (flags & I2C_M_RD)
- ret = try_read_address(i2c_adap, addr, retries);
- else
- ret = try_write_address(i2c_adap, addr, retries);
-
- if (ret != 1)
- return -EREMOTEIO;
-
- return 0;
-}
-
-static int
-usbvision_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num)
-{
- struct i2c_msg *pmsg;
- struct usb_usbvision *usbvision;
- int i, ret;
- unsigned char addr = 0;
-
- usbvision = (struct usb_usbvision *)i2c_get_adapdata(i2c_adap);
-
- for (i = 0; i < num; i++) {
- pmsg = &msgs[i];
- ret = usb_find_address(i2c_adap, pmsg, i2c_adap->retries, &addr);
- if (ret != 0) {
- PDEBUG(DBG_I2C, "got NAK from device, message #%d", i);
- return (ret < 0) ? ret : -EREMOTEIO;
- }
-
- if (pmsg->flags & I2C_M_RD) {
- /* read bytes into buffer */
- ret = (usbvision_i2c_read(usbvision, addr, pmsg->buf, pmsg->len));
- if (ret < pmsg->len)
- return (ret < 0) ? ret : -EREMOTEIO;
- } else {
- /* write bytes from buffer */
- ret = (usbvision_i2c_write(usbvision, addr, pmsg->buf, pmsg->len));
- if (ret < pmsg->len)
- return (ret < 0) ? ret : -EREMOTEIO;
- }
- }
- return num;
-}
-
-static u32 functionality(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-/* -----exported algorithm data: ------------------------------------- */
-
-static const struct i2c_algorithm usbvision_algo = {
- .master_xfer = usbvision_i2c_xfer,
- .smbus_xfer = NULL,
- .functionality = functionality,
-};
-
-
-/* ----------------------------------------------------------------------- */
-/* usbvision specific I2C functions */
-/* ----------------------------------------------------------------------- */
-static const struct i2c_adapter i2c_adap_template;
-
-int usbvision_i2c_register(struct usb_usbvision *usbvision)
-{
- static unsigned short saa711x_addrs[] = {
- 0x4a >> 1, 0x48 >> 1, /* SAA7111, SAA7111A and SAA7113 */
- 0x42 >> 1, 0x40 >> 1, /* SAA7114, SAA7115 and SAA7118 */
- I2C_CLIENT_END };
-
- if (usbvision->registered_i2c)
- return 0;
-
- usbvision->i2c_adap = i2c_adap_template;
-
- snprintf(usbvision->i2c_adap.name, sizeof(usbvision->i2c_adap.name),
- "usbvision-%d-%s",
- usbvision->dev->bus->busnum, usbvision->dev->devpath);
- PDEBUG(DBG_I2C, "Adaptername: %s", usbvision->i2c_adap.name);
- usbvision->i2c_adap.dev.parent = &usbvision->dev->dev;
-
- i2c_set_adapdata(&usbvision->i2c_adap, &usbvision->v4l2_dev);
-
- if (usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_IIC_LRNACK) < 0) {
- printk(KERN_ERR "usbvision_i2c_register: can't write reg\n");
- return -EBUSY;
- }
-
- PDEBUG(DBG_I2C, "I2C debugging is enabled [i2c]");
- PDEBUG(DBG_I2C, "ALGO debugging is enabled [i2c]");
-
- /* register new adapter to i2c module... */
-
- usbvision->i2c_adap.algo = &usbvision_algo;
-
- usbvision->i2c_adap.timeout = 100; /* default values, should */
- usbvision->i2c_adap.retries = 3; /* be replaced by defines */
-
- i2c_add_adapter(&usbvision->i2c_adap);
-
- PDEBUG(DBG_I2C, "i2c bus for %s registered", usbvision->i2c_adap.name);
-
- /* Request the load of the i2c modules we need */
- switch (usbvision_device_data[usbvision->dev_model].codec) {
- case CODEC_SAA7113:
- case CODEC_SAA7111:
- /* Without this delay the detection of the saa711x is
- hit-and-miss. */
- mdelay(10);
- v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
- &usbvision->i2c_adap,
- "saa7115_auto", 0, saa711x_addrs);
- break;
- }
- if (usbvision_device_data[usbvision->dev_model].tuner == 1) {
- struct v4l2_subdev *sd;
- enum v4l2_i2c_tuner_type type;
- struct tuner_setup tun_setup;
-
- sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
- &usbvision->i2c_adap,
- "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
- /* depending on whether we found a demod or not, select
- the tuner type. */
- type = sd ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
-
- sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
- &usbvision->i2c_adap,
- "tuner", 0, v4l2_i2c_tuner_addrs(type));
-
- if (sd == NULL)
- return -ENODEV;
- if (usbvision->tuner_type != -1) {
- tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
- tun_setup.type = usbvision->tuner_type;
- tun_setup.addr = v4l2_i2c_subdev_addr(sd);
- call_all(usbvision, tuner, s_type_addr, &tun_setup);
- }
- }
- usbvision->registered_i2c = 1;
-
- return 0;
-}
-
-int usbvision_i2c_unregister(struct usb_usbvision *usbvision)
-{
- if (!usbvision->registered_i2c)
- return 0;
-
- i2c_del_adapter(&(usbvision->i2c_adap));
- usbvision->registered_i2c = 0;
-
- PDEBUG(DBG_I2C, "i2c bus for %s unregistered", usbvision->i2c_adap.name);
-
- return 0;
-}
-
-static int
-usbvision_i2c_read_max4(struct usb_usbvision *usbvision, unsigned char addr,
- char *buf, short len)
-{
- int rc, retries;
-
- for (retries = 5;;) {
- rc = usbvision_write_reg(usbvision, USBVISION_SER_ADRS, addr);
- if (rc < 0)
- return rc;
-
- /* Initiate byte read cycle */
- /* USBVISION_SER_CONT <- d0-d2 n. of bytes to r/w */
- /* d3 0=Wr 1=Rd */
- rc = usbvision_write_reg(usbvision, USBVISION_SER_CONT,
- (len & 0x07) | 0x18);
- if (rc < 0)
- return rc;
-
- /* Test for Busy and ACK */
- do {
- /* USBVISION_SER_CONT -> d4 == 0 busy */
- rc = usbvision_read_reg(usbvision, USBVISION_SER_CONT);
- } while (rc > 0 && ((rc & 0x10) != 0)); /* Retry while busy */
- if (rc < 0)
- return rc;
-
- /* USBVISION_SER_CONT -> d5 == 1 Not ack */
- if ((rc & 0x20) == 0) /* Ack? */
- break;
-
- /* I2C abort */
- rc = usbvision_write_reg(usbvision, USBVISION_SER_CONT, 0x00);
- if (rc < 0)
- return rc;
-
- if (--retries < 0)
- return -1;
- }
-
- switch (len) {
- case 4:
- buf[3] = usbvision_read_reg(usbvision, USBVISION_SER_DAT4);
- fallthrough;
- case 3:
- buf[2] = usbvision_read_reg(usbvision, USBVISION_SER_DAT3);
- fallthrough;
- case 2:
- buf[1] = usbvision_read_reg(usbvision, USBVISION_SER_DAT2);
- fallthrough;
- case 1:
- buf[0] = usbvision_read_reg(usbvision, USBVISION_SER_DAT1);
- break;
- default:
- printk(KERN_ERR
- "usbvision_i2c_read_max4: buffer length > 4\n");
- }
-
- if (i2c_debug & DBG_I2C) {
- int idx;
-
- for (idx = 0; idx < len; idx++)
- PDEBUG(DBG_I2C, "read %x from address %x", (unsigned char)buf[idx], addr);
- }
- return len;
-}
-
-
-static int usbvision_i2c_write_max4(struct usb_usbvision *usbvision,
- unsigned char addr, const char *buf,
- short len)
-{
- int rc, retries;
- int i;
- unsigned char *value = usbvision->ctrl_urb_buffer;
- unsigned char ser_cont;
-
- ser_cont = (len & 0x07) | 0x10;
-
- value[0] = addr;
- value[1] = ser_cont;
- for (i = 0; i < len; i++)
- value[i + 2] = buf[i];
-
- for (retries = 5;;) {
- rc = usb_control_msg(usbvision->dev,
- usb_sndctrlpipe(usbvision->dev, 1),
- USBVISION_OP_CODE,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_ENDPOINT, 0,
- (__u16) USBVISION_SER_ADRS, value,
- len + 2, HZ);
-
- if (rc < 0)
- return rc;
-
- rc = usbvision_write_reg(usbvision, USBVISION_SER_CONT,
- (len & 0x07) | 0x10);
- if (rc < 0)
- return rc;
-
- /* Test for Busy and ACK */
- do {
- rc = usbvision_read_reg(usbvision, USBVISION_SER_CONT);
- } while (rc > 0 && ((rc & 0x10) != 0)); /* Retry while busy */
- if (rc < 0)
- return rc;
-
- if ((rc & 0x20) == 0) /* Ack? */
- break;
-
- /* I2C abort */
- usbvision_write_reg(usbvision, USBVISION_SER_CONT, 0x00);
-
- if (--retries < 0)
- return -1;
-
- }
-
- if (i2c_debug & DBG_I2C) {
- int idx;
-
- for (idx = 0; idx < len; idx++)
- PDEBUG(DBG_I2C, "wrote %x at address %x", (unsigned char)buf[idx], addr);
- }
- return len;
-}
-
-static int usbvision_i2c_write(struct usb_usbvision *usbvision, unsigned char addr, char *buf,
- short len)
-{
- char *buf_ptr = buf;
- int retval;
- int wrcount = 0;
- int count;
- int max_len = 4;
-
- while (len > 0) {
- count = (len > max_len) ? max_len : len;
- retval = usbvision_i2c_write_max4(usbvision, addr, buf_ptr, count);
- if (retval > 0) {
- len -= count;
- buf_ptr += count;
- wrcount += count;
- } else
- return (retval < 0) ? retval : -EFAULT;
- }
- return wrcount;
-}
-
-static int usbvision_i2c_read(struct usb_usbvision *usbvision, unsigned char addr, char *buf,
- short len)
-{
- char temp[4];
- int retval, i;
- int rdcount = 0;
- int count;
-
- while (len > 0) {
- count = (len > 3) ? 4 : len;
- retval = usbvision_i2c_read_max4(usbvision, addr, temp, count);
- if (retval > 0) {
- for (i = 0; i < len; i++)
- buf[rdcount + i] = temp[i];
- len -= count;
- rdcount += count;
- } else
- return (retval < 0) ? retval : -EFAULT;
- }
- return rdcount;
-}
-
-static const struct i2c_adapter i2c_adap_template = {
- .owner = THIS_MODULE,
- .name = "usbvision",
-};
diff --git a/drivers/staging/media/usbvision/usbvision-video.c b/drivers/staging/media/usbvision/usbvision-video.c
deleted file mode 100644
index 3ea25fdcf767..000000000000
--- a/drivers/staging/media/usbvision/usbvision-video.c
+++ /dev/null
@@ -1,1643 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * USB USBVISION Video device driver 0.9.10
- *
- * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de>
- *
- * This module is part of usbvision driver project.
- *
- * Let's call the version 0.... until compression decoding is completely
- * implemented.
- *
- * This driver is written by Jose Ignacio Gijon and Joerg Heckenbach.
- * It was based on USB CPiA driver written by Peter Pregler,
- * Scott J. Bertin and Johannes Erdfelt
- * Ideas are taken from bttv driver by Ralph Metzler, Marcus Metzler &
- * Gerd Knorr and zoran 36120/36125 driver by Pauline Middelink
- * Updates to driver completed by Dwaine P. Garden
- *
- * TODO:
- * - use submit_urb for all setup packets
- * - Fix memory settings for nt1004. It is 4 times as big as the
- * nt1003 memory.
- * - Add audio on endpoint 3 for nt1004 chip.
- * Seems impossible, needs a codec interface. Which one?
- * - Clean up the driver.
- * - optimization for performance.
- * - Add Videotext capability (VBI). Working on it.....
- * - Check audio for other devices
- */
-
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/videodev2.h>
-#include <linux/i2c.h>
-
-#include <media/i2c/saa7115.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-event.h>
-#include <media/tuner.h>
-
-#include <linux/workqueue.h>
-
-#include "usbvision.h"
-#include "usbvision-cards.h"
-
-#define DRIVER_AUTHOR \
- "Joerg Heckenbach <joerg@heckenbach-aw.de>, " \
- "Dwaine Garden <DwaineGarden@rogers.com>"
-#define DRIVER_NAME "usbvision"
-#define DRIVER_ALIAS "USBVision"
-#define DRIVER_DESC "USBVision USB Video Device Driver for Linux"
-#define USBVISION_VERSION_STRING "0.9.11"
-
-#define ENABLE_HEXDUMP 0 /* Enable if you need it */
-
-
-#ifdef USBVISION_DEBUG
- #define PDEBUG(level, fmt, args...) { \
- if (video_debug & (level)) \
- printk(KERN_INFO KBUILD_MODNAME ":[%s:%d] " fmt, \
- __func__, __LINE__ , ## args); \
- }
-#else
- #define PDEBUG(level, fmt, args...) do {} while (0)
-#endif
-
-#define DBG_IO (1 << 1)
-#define DBG_PROBE (1 << 2)
-#define DBG_MMAP (1 << 3)
-
-/* String operations */
-#define rmspace(str) while (*str == ' ') str++;
-#define goto2next(str) while (*str != ' ') str++; while (*str == ' ') str++;
-
-
-/* sequential number of usbvision device */
-static int usbvision_nr;
-
-static struct usbvision_v4l2_format_st usbvision_v4l2_format[] = {
- { 1, 1, 8, V4L2_PIX_FMT_GREY },
- { 1, 2, 16, V4L2_PIX_FMT_RGB565 },
- { 1, 3, 24, V4L2_PIX_FMT_RGB24 },
- { 1, 4, 32, V4L2_PIX_FMT_RGB32 },
- { 1, 2, 16, V4L2_PIX_FMT_RGB555 },
- { 1, 2, 16, V4L2_PIX_FMT_YUYV },
- { 1, 2, 12, V4L2_PIX_FMT_YVU420 }, /* 1.5 ! */
- { 1, 2, 16, V4L2_PIX_FMT_YUV422P }
-};
-
-/* Function prototypes */
-static void usbvision_release(struct usb_usbvision *usbvision);
-
-/* Default initialization of device driver parameters */
-/* Set the default format for ISOC endpoint */
-static int isoc_mode = ISOC_MODE_COMPRESS;
-/* Set the default Debug Mode of the device driver */
-static int video_debug;
-/* Sequential Number of Video Device */
-static int video_nr = -1;
-/* Sequential Number of Radio Device */
-static int radio_nr = -1;
-
-/* Grab parameters for the device driver */
-
-/* Showing parameters under SYSFS */
-module_param(isoc_mode, int, 0444);
-module_param(video_debug, int, 0444);
-module_param(video_nr, int, 0444);
-module_param(radio_nr, int, 0444);
-
-MODULE_PARM_DESC(isoc_mode, " Set the default format for ISOC endpoint. Default: 0x60 (Compression On)");
-MODULE_PARM_DESC(video_debug, " Set the default Debug Mode of the device driver. Default: 0 (Off)");
-MODULE_PARM_DESC(video_nr, "Set video device number (/dev/videoX). Default: -1 (autodetect)");
-MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)");
-
-
-/* Misc stuff */
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_VERSION(USBVISION_VERSION_STRING);
-MODULE_ALIAS(DRIVER_ALIAS);
-
-
-/*****************************************************************************/
-/* SYSFS Code - Copied from the stv680.c usb module. */
-/* Device information is located at /sys/class/video4linux/video0 */
-/* Device parameters information is located at /sys/module/usbvision */
-/* Device USB Information is located at */
-/* /sys/bus/usb/drivers/USBVision Video Grabber */
-/*****************************************************************************/
-
-#define YES_NO(x) ((x) ? "Yes" : "No")
-
-static inline struct usb_usbvision *cd_to_usbvision(struct device *cd)
-{
- struct video_device *vdev = to_video_device(cd);
- return video_get_drvdata(vdev);
-}
-
-static ssize_t show_version(struct device *cd,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", USBVISION_VERSION_STRING);
-}
-static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
-
-static ssize_t show_model(struct device *cd,
- struct device_attribute *attr, char *buf)
-{
- struct video_device *vdev = to_video_device(cd);
- struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- return sprintf(buf, "%s\n",
- usbvision_device_data[usbvision->dev_model].model_string);
-}
-static DEVICE_ATTR(model, S_IRUGO, show_model, NULL);
-
-static ssize_t show_hue(struct device *cd,
- struct device_attribute *attr, char *buf)
-{
- struct video_device *vdev = to_video_device(cd);
- struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- s32 val = v4l2_ctrl_g_ctrl(v4l2_ctrl_find(&usbvision->hdl,
- V4L2_CID_HUE));
-
- return sprintf(buf, "%d\n", val);
-}
-static DEVICE_ATTR(hue, S_IRUGO, show_hue, NULL);
-
-static ssize_t show_contrast(struct device *cd,
- struct device_attribute *attr, char *buf)
-{
- struct video_device *vdev = to_video_device(cd);
- struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- s32 val = v4l2_ctrl_g_ctrl(v4l2_ctrl_find(&usbvision->hdl,
- V4L2_CID_CONTRAST));
-
- return sprintf(buf, "%d\n", val);
-}
-static DEVICE_ATTR(contrast, S_IRUGO, show_contrast, NULL);
-
-static ssize_t show_brightness(struct device *cd,
- struct device_attribute *attr, char *buf)
-{
- struct video_device *vdev = to_video_device(cd);
- struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- s32 val = v4l2_ctrl_g_ctrl(v4l2_ctrl_find(&usbvision->hdl,
- V4L2_CID_BRIGHTNESS));
-
- return sprintf(buf, "%d\n", val);
-}
-static DEVICE_ATTR(brightness, S_IRUGO, show_brightness, NULL);
-
-static ssize_t show_saturation(struct device *cd,
- struct device_attribute *attr, char *buf)
-{
- struct video_device *vdev = to_video_device(cd);
- struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- s32 val = v4l2_ctrl_g_ctrl(v4l2_ctrl_find(&usbvision->hdl,
- V4L2_CID_SATURATION));
-
- return sprintf(buf, "%d\n", val);
-}
-static DEVICE_ATTR(saturation, S_IRUGO, show_saturation, NULL);
-
-static ssize_t show_streaming(struct device *cd,
- struct device_attribute *attr, char *buf)
-{
- struct video_device *vdev = to_video_device(cd);
- struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- return sprintf(buf, "%s\n",
- YES_NO(usbvision->streaming == stream_on ? 1 : 0));
-}
-static DEVICE_ATTR(streaming, S_IRUGO, show_streaming, NULL);
-
-static ssize_t show_compression(struct device *cd,
- struct device_attribute *attr, char *buf)
-{
- struct video_device *vdev = to_video_device(cd);
- struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- return sprintf(buf, "%s\n",
- YES_NO(usbvision->isoc_mode == ISOC_MODE_COMPRESS));
-}
-static DEVICE_ATTR(compression, S_IRUGO, show_compression, NULL);
-
-static ssize_t show_device_bridge(struct device *cd,
- struct device_attribute *attr, char *buf)
-{
- struct video_device *vdev = to_video_device(cd);
- struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- return sprintf(buf, "%d\n", usbvision->bridge_type);
-}
-static DEVICE_ATTR(bridge, S_IRUGO, show_device_bridge, NULL);
-
-static void usbvision_create_sysfs(struct video_device *vdev)
-{
- int res;
-
- if (!vdev)
- return;
- do {
- res = device_create_file(&vdev->dev, &dev_attr_version);
- if (res < 0)
- break;
- res = device_create_file(&vdev->dev, &dev_attr_model);
- if (res < 0)
- break;
- res = device_create_file(&vdev->dev, &dev_attr_hue);
- if (res < 0)
- break;
- res = device_create_file(&vdev->dev, &dev_attr_contrast);
- if (res < 0)
- break;
- res = device_create_file(&vdev->dev, &dev_attr_brightness);
- if (res < 0)
- break;
- res = device_create_file(&vdev->dev, &dev_attr_saturation);
- if (res < 0)
- break;
- res = device_create_file(&vdev->dev, &dev_attr_streaming);
- if (res < 0)
- break;
- res = device_create_file(&vdev->dev, &dev_attr_compression);
- if (res < 0)
- break;
- res = device_create_file(&vdev->dev, &dev_attr_bridge);
- if (res >= 0)
- return;
- } while (0);
-
- dev_err(&vdev->dev, "%s error: %d\n", __func__, res);
-}
-
-static void usbvision_remove_sysfs(struct video_device *vdev)
-{
- if (vdev) {
- device_remove_file(&vdev->dev, &dev_attr_version);
- device_remove_file(&vdev->dev, &dev_attr_model);
- device_remove_file(&vdev->dev, &dev_attr_hue);
- device_remove_file(&vdev->dev, &dev_attr_contrast);
- device_remove_file(&vdev->dev, &dev_attr_brightness);
- device_remove_file(&vdev->dev, &dev_attr_saturation);
- device_remove_file(&vdev->dev, &dev_attr_streaming);
- device_remove_file(&vdev->dev, &dev_attr_compression);
- device_remove_file(&vdev->dev, &dev_attr_bridge);
- }
-}
-
-/*
- * usbvision_open()
- *
- * This is part of Video 4 Linux API. The driver can be opened by one
- * client only (checks internal counter 'usbvision->user'). The procedure
- * then allocates buffers needed for video processing.
- *
- */
-static int usbvision_v4l2_open(struct file *file)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int err_code = 0;
-
- PDEBUG(DBG_IO, "open");
-
- if (mutex_lock_interruptible(&usbvision->v4l2_lock))
- return -ERESTARTSYS;
-
- if (usbvision->remove_pending) {
- err_code = -ENODEV;
- goto unlock;
- }
- if (usbvision->user) {
- err_code = -EBUSY;
- } else {
- err_code = v4l2_fh_open(file);
- if (err_code)
- goto unlock;
-
- /* Allocate memory for the scratch ring buffer */
- err_code = usbvision_scratch_alloc(usbvision);
- if (isoc_mode == ISOC_MODE_COMPRESS) {
- /* Allocate intermediate decompression buffers
- only if needed */
- err_code = usbvision_decompress_alloc(usbvision);
- }
- if (err_code) {
- /* Deallocate all buffers if trouble */
- usbvision_scratch_free(usbvision);
- usbvision_decompress_free(usbvision);
- }
- }
-
- /* If so far no errors then we shall start the camera */
- if (!err_code) {
- /* Send init sequence only once, it's large! */
- if (!usbvision->initialized) {
- int setup_ok = 0;
- setup_ok = usbvision_setup(usbvision, isoc_mode);
- if (setup_ok)
- usbvision->initialized = 1;
- else
- err_code = -EBUSY;
- }
-
- if (!err_code) {
- usbvision_begin_streaming(usbvision);
- err_code = usbvision_init_isoc(usbvision);
- /* device must be initialized before isoc transfer */
- usbvision_muxsel(usbvision, 0);
-
- /* prepare queues */
- usbvision_empty_framequeues(usbvision);
- usbvision->user++;
- }
- }
-
-unlock:
- mutex_unlock(&usbvision->v4l2_lock);
-
- PDEBUG(DBG_IO, "success");
- return err_code;
-}
-
-/*
- * usbvision_v4l2_close()
- *
- * This is part of Video 4 Linux API. The procedure
- * stops streaming and deallocates all buffers that were earlier
- * allocated in usbvision_v4l2_open().
- *
- */
-static int usbvision_v4l2_close(struct file *file)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int r;
-
- PDEBUG(DBG_IO, "close");
-
- mutex_lock(&usbvision->v4l2_lock);
- usbvision_audio_off(usbvision);
- usbvision_restart_isoc(usbvision);
- usbvision_stop_isoc(usbvision);
-
- usbvision_decompress_free(usbvision);
- usbvision_frames_free(usbvision);
- usbvision_empty_framequeues(usbvision);
- usbvision_scratch_free(usbvision);
-
- usbvision->user--;
- r = usbvision->remove_pending;
- mutex_unlock(&usbvision->v4l2_lock);
-
- if (r) {
- printk(KERN_INFO "%s: Final disconnect\n", __func__);
- usbvision_release(usbvision);
- return 0;
- }
-
- PDEBUG(DBG_IO, "success");
- return v4l2_fh_release(file);
-}
-
-
-/*
- * usbvision_ioctl()
- *
- * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
- *
- */
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int vidioc_g_register(struct file *file, void *priv,
- struct v4l2_dbg_register *reg)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int err_code;
-
- /* NT100x has a 8-bit register space */
- err_code = usbvision_read_reg(usbvision, reg->reg&0xff);
- if (err_code < 0) {
- dev_err(&usbvision->vdev.dev,
- "%s: VIDIOC_DBG_G_REGISTER failed: error %d\n",
- __func__, err_code);
- return err_code;
- }
- reg->val = err_code;
- reg->size = 1;
- return 0;
-}
-
-static int vidioc_s_register(struct file *file, void *priv,
- const struct v4l2_dbg_register *reg)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int err_code;
-
- /* NT100x has a 8-bit register space */
- err_code = usbvision_write_reg(usbvision, reg->reg & 0xff, reg->val);
- if (err_code < 0) {
- dev_err(&usbvision->vdev.dev,
- "%s: VIDIOC_DBG_S_REGISTER failed: error %d\n",
- __func__, err_code);
- return err_code;
- }
- return 0;
-}
-#endif
-
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *vc)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- if (!usbvision->dev)
- return -ENODEV;
-
- strscpy(vc->driver, "USBVision", sizeof(vc->driver));
- strscpy(vc->card,
- usbvision_device_data[usbvision->dev_model].model_string,
- sizeof(vc->card));
- usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info));
- vc->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
- if (usbvision_device_data[usbvision->dev_model].radio)
- vc->capabilities |= V4L2_CAP_RADIO;
- if (usbvision->have_tuner)
- vc->capabilities |= V4L2_CAP_TUNER;
- return 0;
-}
-
-static int vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *vi)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int chan;
-
- if (vi->index >= usbvision->video_inputs)
- return -EINVAL;
- if (usbvision->have_tuner)
- chan = vi->index;
- else
- chan = vi->index + 1; /* skip Television string*/
-
- /* Determine the requested input characteristics
- specific for each usbvision card model */
- switch (chan) {
- case 0:
- if (usbvision_device_data[usbvision->dev_model].video_channels == 4) {
- strscpy(vi->name, "White Video Input", sizeof(vi->name));
- } else {
- strscpy(vi->name, "Television", sizeof(vi->name));
- vi->type = V4L2_INPUT_TYPE_TUNER;
- vi->tuner = chan;
- vi->std = USBVISION_NORMS;
- }
- break;
- case 1:
- vi->type = V4L2_INPUT_TYPE_CAMERA;
- if (usbvision_device_data[usbvision->dev_model].video_channels == 4)
- strscpy(vi->name, "Green Video Input", sizeof(vi->name));
- else
- strscpy(vi->name, "Composite Video Input",
- sizeof(vi->name));
- vi->std = USBVISION_NORMS;
- break;
- case 2:
- vi->type = V4L2_INPUT_TYPE_CAMERA;
- if (usbvision_device_data[usbvision->dev_model].video_channels == 4)
- strscpy(vi->name, "Yellow Video Input", sizeof(vi->name));
- else
- strscpy(vi->name, "S-Video Input", sizeof(vi->name));
- vi->std = USBVISION_NORMS;
- break;
- case 3:
- vi->type = V4L2_INPUT_TYPE_CAMERA;
- strscpy(vi->name, "Red Video Input", sizeof(vi->name));
- vi->std = USBVISION_NORMS;
- break;
- }
- return 0;
-}
-
-static int vidioc_g_input(struct file *file, void *priv, unsigned int *input)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- *input = usbvision->ctl_input;
- return 0;
-}
-
-static int vidioc_s_input(struct file *file, void *priv, unsigned int input)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- if (input >= usbvision->video_inputs)
- return -EINVAL;
-
- usbvision_muxsel(usbvision, input);
- usbvision_set_input(usbvision);
- usbvision_set_output(usbvision,
- usbvision->curwidth,
- usbvision->curheight);
- return 0;
-}
-
-static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- usbvision->tvnorm_id = id;
-
- call_all(usbvision, video, s_std, usbvision->tvnorm_id);
- /* propagate the change to the decoder */
- usbvision_muxsel(usbvision, usbvision->ctl_input);
-
- return 0;
-}
-
-static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- *id = usbvision->tvnorm_id;
- return 0;
-}
-
-static int vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *vt)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- if (vt->index) /* Only tuner 0 */
- return -EINVAL;
- if (vt->type == V4L2_TUNER_RADIO)
- strscpy(vt->name, "Radio", sizeof(vt->name));
- else
- strscpy(vt->name, "Television", sizeof(vt->name));
-
- /* Let clients fill in the remainder of this struct */
- call_all(usbvision, tuner, g_tuner, vt);
-
- return 0;
-}
-
-static int vidioc_s_tuner(struct file *file, void *priv,
- const struct v4l2_tuner *vt)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- /* Only one tuner for now */
- if (vt->index)
- return -EINVAL;
- /* let clients handle this */
- call_all(usbvision, tuner, s_tuner, vt);
-
- return 0;
-}
-
-static int vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *freq)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- /* Only one tuner */
- if (freq->tuner)
- return -EINVAL;
- if (freq->type == V4L2_TUNER_RADIO)
- freq->frequency = usbvision->radio_freq;
- else
- freq->frequency = usbvision->tv_freq;
-
- return 0;
-}
-
-static int vidioc_s_frequency(struct file *file, void *priv,
- const struct v4l2_frequency *freq)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- struct v4l2_frequency new_freq = *freq;
-
- /* Only one tuner for now */
- if (freq->tuner)
- return -EINVAL;
-
- call_all(usbvision, tuner, s_frequency, freq);
- call_all(usbvision, tuner, g_frequency, &new_freq);
- if (freq->type == V4L2_TUNER_RADIO)
- usbvision->radio_freq = new_freq.frequency;
- else
- usbvision->tv_freq = new_freq.frequency;
-
- return 0;
-}
-
-static int vidioc_reqbufs(struct file *file,
- void *priv, struct v4l2_requestbuffers *vr)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int ret;
-
- RESTRICT_TO_RANGE(vr->count, 1, USBVISION_NUMFRAMES);
-
- /* Check input validity:
- the user must do a VIDEO CAPTURE and MMAP method. */
- if (vr->memory != V4L2_MEMORY_MMAP)
- return -EINVAL;
-
- if (usbvision->streaming == stream_on) {
- ret = usbvision_stream_interrupt(usbvision);
- if (ret)
- return ret;
- }
-
- usbvision_frames_free(usbvision);
- usbvision_empty_framequeues(usbvision);
- vr->count = usbvision_frames_alloc(usbvision, vr->count);
-
- usbvision->cur_frame = NULL;
-
- return 0;
-}
-
-static int vidioc_querybuf(struct file *file,
- void *priv, struct v4l2_buffer *vb)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- struct usbvision_frame *frame;
-
- /* FIXME : must control
- that buffers are mapped (VIDIOC_REQBUFS has been called) */
- if (vb->index >= usbvision->num_frames)
- return -EINVAL;
- /* Updating the corresponding frame state */
- vb->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- frame = &usbvision->frame[vb->index];
- if (frame->grabstate >= frame_state_ready)
- vb->flags |= V4L2_BUF_FLAG_QUEUED;
- if (frame->grabstate >= frame_state_done)
- vb->flags |= V4L2_BUF_FLAG_DONE;
- if (frame->grabstate == frame_state_unused)
- vb->flags |= V4L2_BUF_FLAG_MAPPED;
- vb->memory = V4L2_MEMORY_MMAP;
-
- vb->m.offset = vb->index * PAGE_ALIGN(usbvision->max_frame_size);
-
- vb->memory = V4L2_MEMORY_MMAP;
- vb->field = V4L2_FIELD_NONE;
- vb->length = usbvision->curwidth *
- usbvision->curheight *
- usbvision->palette.bytes_per_pixel;
- v4l2_buffer_set_timestamp(vb, usbvision->frame[vb->index].ts);
- vb->sequence = usbvision->frame[vb->index].sequence;
- return 0;
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *vb)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- struct usbvision_frame *frame;
- unsigned long lock_flags;
-
- /* FIXME : works only on VIDEO_CAPTURE MODE, MMAP. */
- if (vb->index >= usbvision->num_frames)
- return -EINVAL;
-
- frame = &usbvision->frame[vb->index];
-
- if (frame->grabstate != frame_state_unused)
- return -EAGAIN;
-
- /* Mark it as ready and enqueue frame */
- frame->grabstate = frame_state_ready;
- frame->scanstate = scan_state_scanning;
- frame->scanlength = 0; /* Accumulated in usbvision_parse_data() */
-
- vb->flags &= ~V4L2_BUF_FLAG_DONE;
-
- /* set v4l2_format index */
- frame->v4l2_format = usbvision->palette;
-
- spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
- list_add_tail(&usbvision->frame[vb->index].frame, &usbvision->inqueue);
- spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
-
- return 0;
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *vb)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int ret;
- struct usbvision_frame *f;
- unsigned long lock_flags;
-
- if (list_empty(&(usbvision->outqueue))) {
- if (usbvision->streaming == stream_idle)
- return -EINVAL;
- ret = wait_event_interruptible
- (usbvision->wait_frame,
- !list_empty(&(usbvision->outqueue)));
- if (ret)
- return ret;
- }
-
- spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
- f = list_entry(usbvision->outqueue.next,
- struct usbvision_frame, frame);
- list_del(usbvision->outqueue.next);
- spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
-
- f->grabstate = frame_state_unused;
-
- vb->memory = V4L2_MEMORY_MMAP;
- vb->flags = V4L2_BUF_FLAG_MAPPED |
- V4L2_BUF_FLAG_QUEUED |
- V4L2_BUF_FLAG_DONE |
- V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vb->index = f->index;
- vb->sequence = f->sequence;
- v4l2_buffer_set_timestamp(vb, f->ts);
- vb->field = V4L2_FIELD_NONE;
- vb->bytesused = f->scanlength;
-
- return 0;
-}
-
-static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- usbvision->streaming = stream_on;
- call_all(usbvision, video, s_stream, 1);
-
- return 0;
-}
-
-static int vidioc_streamoff(struct file *file,
- void *priv, enum v4l2_buf_type type)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if (usbvision->streaming == stream_on) {
- usbvision_stream_interrupt(usbvision);
- /* Stop all video streamings */
- call_all(usbvision, video, s_stream, 0);
- }
- usbvision_empty_framequeues(usbvision);
-
- return 0;
-}
-
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *vfd)
-{
- if (vfd->index >= USBVISION_SUPPORTED_PALETTES - 1)
- return -EINVAL;
- vfd->pixelformat = usbvision_v4l2_format[vfd->index].format;
- return 0;
-}
-
-static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *vf)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- vf->fmt.pix.width = usbvision->curwidth;
- vf->fmt.pix.height = usbvision->curheight;
- vf->fmt.pix.pixelformat = usbvision->palette.format;
- vf->fmt.pix.bytesperline =
- usbvision->curwidth * usbvision->palette.bytes_per_pixel;
- vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline * usbvision->curheight;
- vf->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- vf->fmt.pix.field = V4L2_FIELD_NONE; /* Always progressive image */
-
- return 0;
-}
-
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *vf)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int format_idx;
-
- /* Find requested format in available ones */
- for (format_idx = 0; format_idx < USBVISION_SUPPORTED_PALETTES; format_idx++) {
- if (vf->fmt.pix.pixelformat ==
- usbvision_v4l2_format[format_idx].format) {
- usbvision->palette = usbvision_v4l2_format[format_idx];
- break;
- }
- }
- /* robustness */
- if (format_idx == USBVISION_SUPPORTED_PALETTES)
- return -EINVAL;
- RESTRICT_TO_RANGE(vf->fmt.pix.width, MIN_FRAME_WIDTH, MAX_FRAME_WIDTH);
- RESTRICT_TO_RANGE(vf->fmt.pix.height, MIN_FRAME_HEIGHT, MAX_FRAME_HEIGHT);
-
- vf->fmt.pix.bytesperline = vf->fmt.pix.width*
- usbvision->palette.bytes_per_pixel;
- vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*vf->fmt.pix.height;
- vf->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- vf->fmt.pix.field = V4L2_FIELD_NONE; /* Always progressive image */
-
- return 0;
-}
-
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *vf)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int ret;
-
- ret = vidioc_try_fmt_vid_cap(file, priv, vf);
- if (ret)
- return ret;
-
- /* stop io in case it is already in progress */
- if (usbvision->streaming == stream_on) {
- ret = usbvision_stream_interrupt(usbvision);
- if (ret)
- return ret;
- }
- usbvision_frames_free(usbvision);
- usbvision_empty_framequeues(usbvision);
-
- usbvision->cur_frame = NULL;
-
- /* by now we are committed to the new data... */
- usbvision_set_output(usbvision, vf->fmt.pix.width, vf->fmt.pix.height);
-
- return 0;
-}
-
-static ssize_t usbvision_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int noblock = file->f_flags & O_NONBLOCK;
- unsigned long lock_flags;
- int ret, i;
- struct usbvision_frame *frame;
-
- PDEBUG(DBG_IO, "%s: %ld bytes, noblock=%d", __func__,
- (unsigned long)count, noblock);
-
- if (!USBVISION_IS_OPERATIONAL(usbvision) || !buf)
- return -EFAULT;
-
- /* This entry point is compatible with the mmap routines
- so that a user can do either VIDIOC_QBUF/VIDIOC_DQBUF
- to get frames or call read on the device. */
- if (!usbvision->num_frames) {
- /* First, allocate some frames to work with
- if this has not been done with VIDIOC_REQBUF */
- usbvision_frames_free(usbvision);
- usbvision_empty_framequeues(usbvision);
- usbvision_frames_alloc(usbvision, USBVISION_NUMFRAMES);
- }
-
- if (usbvision->streaming != stream_on) {
- /* no stream is running, make it running ! */
- usbvision->streaming = stream_on;
- call_all(usbvision, video, s_stream, 1);
- }
-
- /* Then, enqueue as many frames as possible
- (like a user of VIDIOC_QBUF would do) */
- for (i = 0; i < usbvision->num_frames; i++) {
- frame = &usbvision->frame[i];
- if (frame->grabstate == frame_state_unused) {
- /* Mark it as ready and enqueue frame */
- frame->grabstate = frame_state_ready;
- frame->scanstate = scan_state_scanning;
- /* Accumulated in usbvision_parse_data() */
- frame->scanlength = 0;
-
- /* set v4l2_format index */
- frame->v4l2_format = usbvision->palette;
-
- spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
- list_add_tail(&frame->frame, &usbvision->inqueue);
- spin_unlock_irqrestore(&usbvision->queue_lock,
- lock_flags);
- }
- }
-
- /* Then try to steal a frame (like a VIDIOC_DQBUF would do) */
- if (list_empty(&(usbvision->outqueue))) {
- if (noblock)
- return -EAGAIN;
-
- ret = wait_event_interruptible
- (usbvision->wait_frame,
- !list_empty(&(usbvision->outqueue)));
- if (ret)
- return ret;
- }
-
- spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
- frame = list_entry(usbvision->outqueue.next,
- struct usbvision_frame, frame);
- list_del(usbvision->outqueue.next);
- spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
-
- /* An error returns an empty frame */
- if (frame->grabstate == frame_state_error) {
- frame->bytes_read = 0;
- return 0;
- }
-
- PDEBUG(DBG_IO, "%s: frmx=%d, bytes_read=%ld, scanlength=%ld",
- __func__,
- frame->index, frame->bytes_read, frame->scanlength);
-
- /* copy bytes to user space; we allow for partials reads */
- if ((count + frame->bytes_read) > (unsigned long)frame->scanlength)
- count = frame->scanlength - frame->bytes_read;
-
- if (copy_to_user(buf, frame->data + frame->bytes_read, count))
- return -EFAULT;
-
- frame->bytes_read += count;
- PDEBUG(DBG_IO, "%s: {copy} count used=%ld, new bytes_read=%ld",
- __func__,
- (unsigned long)count, frame->bytes_read);
-
- /*
- * FIXME:
- * For now, forget the frame if it has not been read in one shot.
- */
- frame->bytes_read = 0;
-
- /* Mark it as available to be used again. */
- frame->grabstate = frame_state_unused;
-
- return count;
-}
-
-static ssize_t usbvision_v4l2_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int res;
-
- if (mutex_lock_interruptible(&usbvision->v4l2_lock))
- return -ERESTARTSYS;
- res = usbvision_read(file, buf, count, ppos);
- mutex_unlock(&usbvision->v4l2_lock);
- return res;
-}
-
-static int usbvision_mmap(struct file *file, struct vm_area_struct *vma)
-{
- unsigned long size = vma->vm_end - vma->vm_start,
- start = vma->vm_start;
- void *pos;
- u32 i;
- struct usb_usbvision *usbvision = video_drvdata(file);
-
- PDEBUG(DBG_MMAP, "mmap");
-
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return -EFAULT;
-
- if (!(vma->vm_flags & VM_WRITE) ||
- size != PAGE_ALIGN(usbvision->max_frame_size)) {
- return -EINVAL;
- }
-
- for (i = 0; i < usbvision->num_frames; i++) {
- if (((PAGE_ALIGN(usbvision->max_frame_size)*i) >> PAGE_SHIFT) ==
- vma->vm_pgoff)
- break;
- }
- if (i == usbvision->num_frames) {
- PDEBUG(DBG_MMAP,
- "mmap: user supplied mapping address is out of range");
- return -EINVAL;
- }
-
- /* VM_IO is eventually going to replace PageReserved altogether */
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-
- pos = usbvision->frame[i].data;
- while (size > 0) {
- if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
- PDEBUG(DBG_MMAP, "mmap: vm_insert_page failed");
- return -EAGAIN;
- }
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- return 0;
-}
-
-static int usbvision_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int res;
-
- if (mutex_lock_interruptible(&usbvision->v4l2_lock))
- return -ERESTARTSYS;
- res = usbvision_mmap(file, vma);
- mutex_unlock(&usbvision->v4l2_lock);
- return res;
-}
-
-/*
- * Here comes the stuff for radio on usbvision based devices
- *
- */
-static int usbvision_radio_open(struct file *file)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int err_code = 0;
-
- PDEBUG(DBG_IO, "%s:", __func__);
-
- if (mutex_lock_interruptible(&usbvision->v4l2_lock))
- return -ERESTARTSYS;
-
- if (usbvision->remove_pending) {
- err_code = -ENODEV;
- goto out;
- }
- err_code = v4l2_fh_open(file);
- if (err_code)
- goto out;
- if (usbvision->user) {
- dev_err(&usbvision->rdev.dev,
- "%s: Someone tried to open an already opened USBVision Radio!\n",
- __func__);
- err_code = -EBUSY;
- } else {
- /* Alternate interface 1 is is the biggest frame size */
- err_code = usbvision_set_alternate(usbvision);
- if (err_code < 0) {
- usbvision->last_error = err_code;
- err_code = -EBUSY;
- goto out;
- }
-
- /* If so far no errors then we shall start the radio */
- usbvision->radio = 1;
- call_all(usbvision, tuner, s_radio);
- usbvision_set_audio(usbvision, USBVISION_AUDIO_RADIO);
- usbvision->user++;
- }
-out:
- mutex_unlock(&usbvision->v4l2_lock);
- return err_code;
-}
-
-
-static int usbvision_radio_close(struct file *file)
-{
- struct usb_usbvision *usbvision = video_drvdata(file);
- int r;
-
- PDEBUG(DBG_IO, "");
-
- mutex_lock(&usbvision->v4l2_lock);
- /* Set packet size to 0 */
- usbvision->iface_alt = 0;
- if (usbvision->dev)
- usb_set_interface(usbvision->dev, usbvision->iface,
- usbvision->iface_alt);
-
- usbvision_audio_off(usbvision);
- usbvision->radio = 0;
- usbvision->user--;
- r = usbvision->remove_pending;
- mutex_unlock(&usbvision->v4l2_lock);
-
- if (r) {
- printk(KERN_INFO "%s: Final disconnect\n", __func__);
- v4l2_fh_release(file);
- usbvision_release(usbvision);
- return 0;
- }
-
- PDEBUG(DBG_IO, "success");
- return v4l2_fh_release(file);
-}
-
-/* Video registration stuff */
-
-/* Video template */
-static const struct v4l2_file_operations usbvision_fops = {
- .owner = THIS_MODULE,
- .open = usbvision_v4l2_open,
- .release = usbvision_v4l2_close,
- .read = usbvision_v4l2_read,
- .mmap = usbvision_v4l2_mmap,
- .unlocked_ioctl = video_ioctl2,
-};
-
-static const struct v4l2_ioctl_ops usbvision_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_s_std = vidioc_s_std,
- .vidioc_g_std = vidioc_g_std,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = vidioc_g_register,
- .vidioc_s_register = vidioc_s_register,
-#endif
-};
-
-static struct video_device usbvision_video_template = {
- .fops = &usbvision_fops,
- .ioctl_ops = &usbvision_ioctl_ops,
- .name = "usbvision-video",
- .release = video_device_release_empty,
- .tvnorms = USBVISION_NORMS,
-};
-
-
-/* Radio template */
-static const struct v4l2_file_operations usbvision_radio_fops = {
- .owner = THIS_MODULE,
- .open = usbvision_radio_open,
- .release = usbvision_radio_close,
- .poll = v4l2_ctrl_poll,
- .unlocked_ioctl = video_ioctl2,
-};
-
-static const struct v4l2_ioctl_ops usbvision_radio_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static struct video_device usbvision_radio_template = {
- .fops = &usbvision_radio_fops,
- .name = "usbvision-radio",
- .release = video_device_release_empty,
- .ioctl_ops = &usbvision_radio_ioctl_ops,
-};
-
-
-static void usbvision_vdev_init(struct usb_usbvision *usbvision,
- struct video_device *vdev,
- const struct video_device *vdev_template,
- const char *name)
-{
- struct usb_device *usb_dev = usbvision->dev;
-
- if (!usb_dev) {
- dev_err(&usbvision->dev->dev,
- "%s: usbvision->dev is not set\n", __func__);
- return;
- }
-
- *vdev = *vdev_template;
- vdev->lock = &usbvision->v4l2_lock;
- vdev->v4l2_dev = &usbvision->v4l2_dev;
- snprintf(vdev->name, sizeof(vdev->name), "%s", name);
- video_set_drvdata(vdev, usbvision);
-}
-
-/* unregister video4linux devices */
-static void usbvision_unregister_video(struct usb_usbvision *usbvision)
-{
- /* Radio Device: */
- if (video_is_registered(&usbvision->rdev)) {
- PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
- video_device_node_name(&usbvision->rdev));
- video_unregister_device(&usbvision->rdev);
- }
-
- /* Video Device: */
- if (video_is_registered(&usbvision->vdev)) {
- PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
- video_device_node_name(&usbvision->vdev));
- video_unregister_device(&usbvision->vdev);
- }
-}
-
-/* register video4linux devices */
-static int usbvision_register_video(struct usb_usbvision *usbvision)
-{
- int res = -ENOMEM;
-
- /* Video Device: */
- usbvision_vdev_init(usbvision, &usbvision->vdev,
- &usbvision_video_template, "USBVision Video");
- if (!usbvision->have_tuner) {
- v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_G_FREQUENCY);
- v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_S_TUNER);
- v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_G_FREQUENCY);
- v4l2_disable_ioctl(&usbvision->vdev, VIDIOC_S_TUNER);
- }
- usbvision->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- if (usbvision->have_tuner)
- usbvision->vdev.device_caps |= V4L2_CAP_TUNER;
-
- if (video_register_device(&usbvision->vdev, VFL_TYPE_VIDEO, video_nr) < 0)
- goto err_exit;
- printk(KERN_INFO "USBVision[%d]: registered USBVision Video device %s [v4l2]\n",
- usbvision->nr, video_device_node_name(&usbvision->vdev));
-
- /* Radio Device: */
- if (usbvision_device_data[usbvision->dev_model].radio) {
- /* usbvision has radio */
- usbvision_vdev_init(usbvision, &usbvision->rdev,
- &usbvision_radio_template, "USBVision Radio");
- usbvision->rdev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
- if (video_register_device(&usbvision->rdev, VFL_TYPE_RADIO, radio_nr) < 0)
- goto err_exit;
- printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device %s [v4l2]\n",
- usbvision->nr, video_device_node_name(&usbvision->rdev));
- }
- /* all done */
- return 0;
-
- err_exit:
- dev_err(&usbvision->dev->dev,
- "USBVision[%d]: video_register_device() failed\n",
- usbvision->nr);
- usbvision_unregister_video(usbvision);
- return res;
-}
-
-/*
- * usbvision_alloc()
- *
- * This code allocates the struct usb_usbvision.
- * It is filled with default values.
- *
- * Returns NULL on error, a pointer to usb_usbvision else.
- *
- */
-static struct usb_usbvision *usbvision_alloc(struct usb_device *dev,
- struct usb_interface *intf)
-{
- struct usb_usbvision *usbvision;
-
- usbvision = kzalloc(sizeof(*usbvision), GFP_KERNEL);
- if (!usbvision)
- return NULL;
-
- usbvision->dev = dev;
- if (v4l2_device_register(&intf->dev, &usbvision->v4l2_dev))
- goto err_free;
-
- if (v4l2_ctrl_handler_init(&usbvision->hdl, 4))
- goto err_unreg;
- usbvision->v4l2_dev.ctrl_handler = &usbvision->hdl;
- mutex_init(&usbvision->v4l2_lock);
-
- /* prepare control urb for control messages during interrupts */
- usbvision->ctrl_urb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL);
- if (!usbvision->ctrl_urb)
- goto err_unreg;
-
- return usbvision;
-
-err_unreg:
- v4l2_ctrl_handler_free(&usbvision->hdl);
- v4l2_device_unregister(&usbvision->v4l2_dev);
-err_free:
- kfree(usbvision);
- return NULL;
-}
-
-/*
- * usbvision_release()
- *
- * This code does final release of struct usb_usbvision. This happens
- * after the device is disconnected -and- all clients closed their files.
- *
- */
-static void usbvision_release(struct usb_usbvision *usbvision)
-{
- PDEBUG(DBG_PROBE, "");
-
- usbvision->initialized = 0;
-
- usbvision_remove_sysfs(&usbvision->vdev);
- usbvision_unregister_video(usbvision);
- kfree(usbvision->alt_max_pkt_size);
-
- usb_free_urb(usbvision->ctrl_urb);
-
- v4l2_ctrl_handler_free(&usbvision->hdl);
- v4l2_device_unregister(&usbvision->v4l2_dev);
- kfree(usbvision);
-
- PDEBUG(DBG_PROBE, "success");
-}
-
-
-/*********************** usb interface **********************************/
-
-static void usbvision_configure_video(struct usb_usbvision *usbvision)
-{
- int model;
-
- if (!usbvision)
- return;
-
- model = usbvision->dev_model;
- usbvision->palette = usbvision_v4l2_format[2]; /* V4L2_PIX_FMT_RGB24; */
-
- if (usbvision_device_data[usbvision->dev_model].vin_reg2_override) {
- usbvision->vin_reg2_preset =
- usbvision_device_data[usbvision->dev_model].vin_reg2;
- } else {
- usbvision->vin_reg2_preset = 0;
- }
-
- usbvision->tvnorm_id = usbvision_device_data[model].video_norm;
- usbvision->video_inputs = usbvision_device_data[model].video_channels;
- usbvision->ctl_input = 0;
- usbvision->radio_freq = 87.5 * 16000;
- usbvision->tv_freq = 400 * 16;
-
- /* This should be here to make i2c clients to be able to register */
- /* first switch off audio */
- if (usbvision_device_data[model].audio_channels > 0)
- usbvision_audio_off(usbvision);
- /* and then power up the tuner */
- usbvision_power_on(usbvision);
- usbvision_i2c_register(usbvision);
-}
-
-/*
- * usbvision_probe()
- *
- * This procedure queries device descriptor and accepts the interface
- * if it looks like USBVISION video device
- *
- */
-static int usbvision_probe(struct usb_interface *intf,
- const struct usb_device_id *devid)
-{
- struct usb_device *dev = usb_get_dev(interface_to_usbdev(intf));
- struct usb_interface *uif;
- __u8 ifnum = intf->altsetting->desc.bInterfaceNumber;
- const struct usb_host_interface *interface;
- struct usb_usbvision *usbvision = NULL;
- const struct usb_endpoint_descriptor *endpoint;
- int model, i, ret;
-
- PDEBUG(DBG_PROBE, "VID=%#04x, PID=%#04x, ifnum=%u",
- le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct), ifnum);
-
- model = devid->driver_info;
- if (model < 0 || model >= usbvision_device_data_size) {
- PDEBUG(DBG_PROBE, "model out of bounds %d", model);
- ret = -ENODEV;
- goto err_usb;
- }
- printk(KERN_INFO "%s: %s found\n", __func__,
- usbvision_device_data[model].model_string);
-
- if (usbvision_device_data[model].interface >= 0)
- interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
- else if (ifnum < dev->actconfig->desc.bNumInterfaces)
- interface = &dev->actconfig->interface[ifnum]->altsetting[0];
- else {
- dev_err(&intf->dev, "interface %d is invalid, max is %d\n",
- ifnum, dev->actconfig->desc.bNumInterfaces - 1);
- ret = -ENODEV;
- goto err_usb;
- }
-
- if (interface->desc.bNumEndpoints < 2) {
- dev_err(&intf->dev, "interface %d has %d endpoints, but must have minimum 2\n",
- ifnum, interface->desc.bNumEndpoints);
- ret = -ENODEV;
- goto err_usb;
- }
- endpoint = &interface->endpoint[1].desc;
-
- if (!usb_endpoint_xfer_isoc(endpoint)) {
- dev_err(&intf->dev, "%s: interface %d. has non-ISO endpoint!\n",
- __func__, ifnum);
- dev_err(&intf->dev, "%s: Endpoint attributes %d",
- __func__, endpoint->bmAttributes);
- ret = -ENODEV;
- goto err_usb;
- }
- if (usb_endpoint_dir_out(endpoint)) {
- dev_err(&intf->dev, "%s: interface %d. has ISO OUT endpoint!\n",
- __func__, ifnum);
- ret = -ENODEV;
- goto err_usb;
- }
-
- usbvision = usbvision_alloc(dev, intf);
- if (!usbvision) {
- dev_err(&intf->dev, "%s: couldn't allocate USBVision struct\n", __func__);
- ret = -ENOMEM;
- goto err_usb;
- }
-
- if (dev->descriptor.bNumConfigurations > 1)
- usbvision->bridge_type = BRIDGE_NT1004;
- else if (model == DAZZLE_DVC_90_REV_1_SECAM)
- usbvision->bridge_type = BRIDGE_NT1005;
- else
- usbvision->bridge_type = BRIDGE_NT1003;
- PDEBUG(DBG_PROBE, "bridge_type %d", usbvision->bridge_type);
-
- /* compute alternate max packet sizes */
- uif = dev->actconfig->interface[0];
-
- usbvision->num_alt = uif->num_altsetting;
- PDEBUG(DBG_PROBE, "Alternate settings: %i", usbvision->num_alt);
- usbvision->alt_max_pkt_size = kmalloc_array(32, usbvision->num_alt,
- GFP_KERNEL);
- if (!usbvision->alt_max_pkt_size) {
- ret = -ENOMEM;
- goto err_pkt;
- }
-
- for (i = 0; i < usbvision->num_alt; i++) {
- u16 tmp;
-
- if (uif->altsetting[i].desc.bNumEndpoints < 2) {
- ret = -ENODEV;
- goto err_pkt;
- }
-
- tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
- wMaxPacketSize);
- usbvision->alt_max_pkt_size[i] =
- (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
- PDEBUG(DBG_PROBE, "Alternate setting %i, max size= %i", i,
- usbvision->alt_max_pkt_size[i]);
- }
-
-
- usbvision->nr = usbvision_nr++;
-
- spin_lock_init(&usbvision->queue_lock);
- init_waitqueue_head(&usbvision->wait_frame);
- init_waitqueue_head(&usbvision->wait_stream);
-
- usbvision->have_tuner = usbvision_device_data[model].tuner;
- if (usbvision->have_tuner)
- usbvision->tuner_type = usbvision_device_data[model].tuner_type;
-
- usbvision->dev_model = model;
- usbvision->remove_pending = 0;
- usbvision->iface = ifnum;
- usbvision->iface_alt = 0;
- usbvision->video_endp = endpoint->bEndpointAddress;
- usbvision->isoc_packet_size = 0;
- usbvision->usb_bandwidth = 0;
- usbvision->user = 0;
- usbvision->streaming = stream_off;
- usbvision_configure_video(usbvision);
- usbvision_register_video(usbvision);
-
- usbvision_create_sysfs(&usbvision->vdev);
-
- PDEBUG(DBG_PROBE, "success");
- return 0;
-
-err_pkt:
- usbvision_release(usbvision);
-err_usb:
- usb_put_dev(dev);
- return ret;
-}
-
-
-/*
- * usbvision_disconnect()
- *
- * This procedure stops all driver activity, deallocates interface-private
- * structure (pointed by 'ptr') and after that driver should be removable
- * with no ill consequences.
- *
- */
-static void usbvision_disconnect(struct usb_interface *intf)
-{
- struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
- int u;
-
- PDEBUG(DBG_PROBE, "");
-
- if (!usbvision) {
- pr_err("%s: usb_get_intfdata() failed\n", __func__);
- return;
- }
-
- mutex_lock(&usbvision->v4l2_lock);
-
- /* At this time we ask to cancel outstanding URBs */
- usbvision_stop_isoc(usbvision);
-
- v4l2_device_disconnect(&usbvision->v4l2_dev);
- usbvision_i2c_unregister(usbvision);
- usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
- u = usbvision->user;
-
- usb_put_dev(usbvision->dev);
- usbvision->dev = NULL; /* USB device is no more */
-
- mutex_unlock(&usbvision->v4l2_lock);
-
- if (u) {
- printk(KERN_INFO "%s: In use, disconnect pending\n",
- __func__);
- wake_up_interruptible(&usbvision->wait_frame);
- wake_up_interruptible(&usbvision->wait_stream);
- } else {
- usbvision_release(usbvision);
- }
-
- PDEBUG(DBG_PROBE, "success");
-}
-
-static struct usb_driver usbvision_driver = {
- .name = "usbvision",
- .id_table = usbvision_table,
- .probe = usbvision_probe,
- .disconnect = usbvision_disconnect,
-};
-
-/*
- * usbvision_init()
- *
- * This code is run to initialize the driver.
- *
- */
-static int __init usbvision_init(void)
-{
- int err_code;
-
- PDEBUG(DBG_PROBE, "");
-
- PDEBUG(DBG_IO, "IO debugging is enabled [video]");
- PDEBUG(DBG_PROBE, "PROBE debugging is enabled [video]");
- PDEBUG(DBG_MMAP, "MMAP debugging is enabled [video]");
-
- /* disable planar mode support unless compression enabled */
- if (isoc_mode != ISOC_MODE_COMPRESS) {
- /* FIXME : not the right way to set supported flag */
- usbvision_v4l2_format[6].supported = 0; /* V4L2_PIX_FMT_YVU420 */
- usbvision_v4l2_format[7].supported = 0; /* V4L2_PIX_FMT_YUV422P */
- }
-
- err_code = usb_register(&usbvision_driver);
-
- if (err_code == 0) {
- printk(KERN_INFO DRIVER_DESC " : " USBVISION_VERSION_STRING "\n");
- PDEBUG(DBG_PROBE, "success");
- }
- return err_code;
-}
-
-static void __exit usbvision_exit(void)
-{
- PDEBUG(DBG_PROBE, "");
-
- usb_deregister(&usbvision_driver);
- PDEBUG(DBG_PROBE, "success");
-}
-
-module_init(usbvision_init);
-module_exit(usbvision_exit);
diff --git a/drivers/staging/media/usbvision/usbvision.h b/drivers/staging/media/usbvision/usbvision.h
deleted file mode 100644
index 11539578e8d2..000000000000
--- a/drivers/staging/media/usbvision/usbvision.h
+++ /dev/null
@@ -1,500 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * USBVISION.H
- * usbvision header file
- *
- * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de>
- * Dwaine Garden <dwainegarden@rogers.com>
- *
- * Report problems to v4l MailingList: linux-media@vger.kernel.org
- *
- * This module is part of usbvision driver project.
- * Updates to driver completed by Dwaine P. Garden
- * v4l2 conversion by Thierry Merle <thierry.merle@free.fr>
- */
-
-
-#ifndef __LINUX_USBVISION_H
-#define __LINUX_USBVISION_H
-
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/i2c.h>
-#include <linux/mutex.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <media/tuner.h>
-#include <linux/videodev2.h>
-
-#define USBVISION_DEBUG /* Turn on debug messages */
-
-#define USBVISION_PWR_REG 0x00
- #define USBVISION_SSPND_EN (1 << 1)
- #define USBVISION_RES2 (1 << 2)
- #define USBVISION_PWR_VID (1 << 5)
- #define USBVISION_E2_EN (1 << 7)
-#define USBVISION_CONFIG_REG 0x01
-#define USBVISION_ADRS_REG 0x02
-#define USBVISION_ALTER_REG 0x03
-#define USBVISION_FORCE_ALTER_REG 0x04
-#define USBVISION_STATUS_REG 0x05
-#define USBVISION_IOPIN_REG 0x06
- #define USBVISION_IO_1 (1 << 0)
- #define USBVISION_IO_2 (1 << 1)
- #define USBVISION_AUDIO_IN 0
- #define USBVISION_AUDIO_TV 1
- #define USBVISION_AUDIO_RADIO 2
- #define USBVISION_AUDIO_MUTE 3
-#define USBVISION_SER_MODE 0x07
- #define USBVISION_CLK_OUT (1 << 0)
- #define USBVISION_DAT_IO (1 << 1)
- #define USBVISION_SENS_OUT (1 << 2)
- #define USBVISION_SER_MODE_SOFT (0 << 4)
- #define USBVISION_SER_MODE_SIO (1 << 4)
-#define USBVISION_SER_ADRS 0x08
-#define USBVISION_SER_CONT 0x09
-#define USBVISION_SER_DAT1 0x0A
-#define USBVISION_SER_DAT2 0x0B
-#define USBVISION_SER_DAT3 0x0C
-#define USBVISION_SER_DAT4 0x0D
-#define USBVISION_EE_DATA 0x0E
-#define USBVISION_EE_LSBAD 0x0F
-#define USBVISION_EE_CONT 0x10
-#define USBVISION_DRM_CONT 0x12
- #define USBVISION_REF (1 << 0)
- #define USBVISION_RES_UR (1 << 2)
- #define USBVISION_RES_FDL (1 << 3)
- #define USBVISION_RES_VDW (1 << 4)
-#define USBVISION_DRM_PRM1 0x13
-#define USBVISION_DRM_PRM2 0x14
-#define USBVISION_DRM_PRM3 0x15
-#define USBVISION_DRM_PRM4 0x16
-#define USBVISION_DRM_PRM5 0x17
-#define USBVISION_DRM_PRM6 0x18
-#define USBVISION_DRM_PRM7 0x19
-#define USBVISION_DRM_PRM8 0x1A
-#define USBVISION_VIN_REG1 0x1B
- #define USBVISION_8_422_SYNC 0x01
- #define USBVISION_16_422_SYNC 0x02
- #define USBVISION_VSNC_POL (1 << 3)
- #define USBVISION_HSNC_POL (1 << 4)
- #define USBVISION_FID_POL (1 << 5)
- #define USBVISION_HVALID_PO (1 << 6)
- #define USBVISION_VCLK_POL (1 << 7)
-#define USBVISION_VIN_REG2 0x1C
- #define USBVISION_AUTO_FID (1 << 0)
- #define USBVISION_NONE_INTER (1 << 1)
- #define USBVISION_NOHVALID (1 << 2)
- #define USBVISION_UV_ID (1 << 3)
- #define USBVISION_FIX_2C (1 << 4)
- #define USBVISION_SEND_FID (1 << 5)
- #define USBVISION_KEEP_BLANK (1 << 7)
-#define USBVISION_LXSIZE_I 0x1D
-#define USBVISION_MXSIZE_I 0x1E
-#define USBVISION_LYSIZE_I 0x1F
-#define USBVISION_MYSIZE_I 0x20
-#define USBVISION_LX_OFFST 0x21
-#define USBVISION_MX_OFFST 0x22
-#define USBVISION_LY_OFFST 0x23
-#define USBVISION_MY_OFFST 0x24
-#define USBVISION_FRM_RATE 0x25
-#define USBVISION_LXSIZE_O 0x26
-#define USBVISION_MXSIZE_O 0x27
-#define USBVISION_LYSIZE_O 0x28
-#define USBVISION_MYSIZE_O 0x29
-#define USBVISION_FILT_CONT 0x2A
-#define USBVISION_VO_MODE 0x2B
-#define USBVISION_INTRA_CYC 0x2C
-#define USBVISION_STRIP_SZ 0x2D
-#define USBVISION_FORCE_INTRA 0x2E
-#define USBVISION_FORCE_UP 0x2F
-#define USBVISION_BUF_THR 0x30
-#define USBVISION_DVI_YUV 0x31
-#define USBVISION_AUDIO_CONT 0x32
-#define USBVISION_AUD_PK_LEN 0x33
-#define USBVISION_BLK_PK_LEN 0x34
-#define USBVISION_PCM_THR1 0x38
-#define USBVISION_PCM_THR2 0x39
-#define USBVISION_DIST_THR_L 0x3A
-#define USBVISION_DIST_THR_H 0x3B
-#define USBVISION_MAX_DIST_L 0x3C
-#define USBVISION_MAX_DIST_H 0x3D
-#define USBVISION_OP_CODE 0x33
-
-#define MAX_BYTES_PER_PIXEL 4
-
-#define MIN_FRAME_WIDTH 64
-#define MAX_USB_WIDTH 320 /* 384 */
-#define MAX_FRAME_WIDTH 320 /* 384 */ /* stretching sometimes causes crashes*/
-
-#define MIN_FRAME_HEIGHT 48
-#define MAX_USB_HEIGHT 240 /* 288 */
-#define MAX_FRAME_HEIGHT 240 /* 288 */ /* Stretching sometimes causes crashes*/
-
-#define MAX_FRAME_SIZE (MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT * MAX_BYTES_PER_PIXEL)
-#define USBVISION_CLIPMASK_SIZE (MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT / 8) /* bytesize of clipmask */
-
-#define USBVISION_URB_FRAMES 32
-
-#define USBVISION_NUM_HEADERMARKER 20
-#define USBVISION_NUMFRAMES 3 /* Maximum number of frames an application can get */
-#define USBVISION_NUMSBUF 2 /* Dimensioning the USB S buffering */
-
-#define USBVISION_POWEROFF_TIME (3 * HZ) /* 3 seconds */
-
-
-#define FRAMERATE_MIN 0
-#define FRAMERATE_MAX 31
-
-enum {
- ISOC_MODE_YUV422 = 0x03,
- ISOC_MODE_YUV420 = 0x14,
- ISOC_MODE_COMPRESS = 0x60,
-};
-
-/* This macro restricts an int variable to an inclusive range */
-#define RESTRICT_TO_RANGE(v, mi, ma) \
- { if (((int)v) < (mi)) (v) = (mi); else if ((v) > (ma)) (v) = (ma); }
-
-/*
- * We use macros to do YUV -> RGB conversion because this is
- * very important for speed and totally unimportant for size.
- *
- * YUV -> RGB Conversion
- * ---------------------
- *
- * B = 1.164*(Y-16) + 2.018*(V-128)
- * G = 1.164*(Y-16) - 0.813*(U-128) - 0.391*(V-128)
- * R = 1.164*(Y-16) + 1.596*(U-128)
- *
- * If you fancy integer arithmetic (as you should), hear this:
- *
- * 65536*B = 76284*(Y-16) + 132252*(V-128)
- * 65536*G = 76284*(Y-16) - 53281*(U-128) - 25625*(V-128)
- * 65536*R = 76284*(Y-16) + 104595*(U-128)
- *
- * Make sure the output values are within [0..255] range.
- */
-#define LIMIT_RGB(x) (((x) < 0) ? 0 : (((x) > 255) ? 255 : (x)))
-#define YUV_TO_RGB_BY_THE_BOOK(my, mu, mv, mr, mg, mb) { \
- int mm_y, mm_yc, mm_u, mm_v, mm_r, mm_g, mm_b; \
- mm_y = (my) - 16; \
- mm_u = (mu) - 128; \
- mm_v = (mv) - 128; \
- mm_yc = mm_y * 76284; \
- mm_b = (mm_yc + 132252 * mm_v) >> 16; \
- mm_g = (mm_yc - 53281 * mm_u - 25625 * mm_v) >> 16; \
- mm_r = (mm_yc + 104595 * mm_u) >> 16; \
- mb = LIMIT_RGB(mm_b); \
- mg = LIMIT_RGB(mm_g); \
- mr = LIMIT_RGB(mm_r); \
-}
-
-/*
- * This macro checks if usbvision is still operational. The 'usbvision'
- * pointer must be valid, usbvision->dev must be valid, we are not
- * removing the device and the device has not erred on us.
- */
-#define USBVISION_IS_OPERATIONAL(udevice) (\
- (udevice != NULL) && \
- ((udevice)->dev != NULL) && \
- ((udevice)->last_error == 0) && \
- (!(udevice)->remove_pending))
-
-#define I2C_USB_ADAP_MAX 16
-
-#define USBVISION_NORMS (V4L2_STD_PAL | V4L2_STD_NTSC | V4L2_STD_SECAM | V4L2_STD_PAL_M)
-
-/* ----------------------------------------------------------------- */
-/* usbvision video structures */
-/* ----------------------------------------------------------------- */
-enum scan_state {
- scan_state_scanning, /* Scanning for header */
- scan_state_lines /* Parsing lines */
-};
-
-/* Completion states of the data parser */
-enum parse_state {
- parse_state_continue, /* Just parse next item */
- parse_state_next_frame, /* Frame done, send it to V4L */
- parse_state_out, /* Not enough data for frame */
- parse_state_end_parse /* End parsing */
-};
-
-enum frame_state {
- frame_state_unused, /* Unused (no MCAPTURE) */
- frame_state_ready, /* Ready to start grabbing */
- frame_state_grabbing, /* In the process of being grabbed into */
- frame_state_done, /* Finished grabbing, but not been synced yet */
- frame_state_done_hold, /* Are syncing or reading */
- frame_state_error, /* Something bad happened while processing */
-};
-
-/* stream states */
-enum stream_state {
- stream_off, /* Driver streaming is completely OFF */
- stream_idle, /* Driver streaming is ready to be put ON by the application */
- stream_interrupt, /* Driver streaming must be interrupted */
- stream_on, /* Driver streaming is put ON by the application */
-};
-
-enum isoc_state {
- isoc_state_in_frame, /* Isoc packet is member of frame */
- isoc_state_no_frame, /* Isoc packet is not member of any frame */
-};
-
-struct usb_device;
-
-struct usbvision_sbuf {
- char *data;
- struct urb *urb;
-};
-
-#define USBVISION_MAGIC_1 0x55
-#define USBVISION_MAGIC_2 0xAA
-#define USBVISION_HEADER_LENGTH 0x0c
-#define USBVISION_SAA7111_ADDR 0x48
-#define USBVISION_SAA7113_ADDR 0x4a
-#define USBVISION_IIC_LRACK 0x20
-#define USBVISION_IIC_LRNACK 0x30
-#define USBVISION_FRAME_FORMAT_PARAM_INTRA (1<<7)
-
-struct usbvision_v4l2_format_st {
- int supported;
- int bytes_per_pixel;
- int depth;
- int format;
-};
-#define USBVISION_SUPPORTED_PALETTES ARRAY_SIZE(usbvision_v4l2_format)
-
-struct usbvision_frame_header {
- unsigned char magic_1; /* 0 magic */
- unsigned char magic_2; /* 1 magic */
- unsigned char header_length; /* 2 */
- unsigned char frame_num; /* 3 */
- unsigned char frame_phase; /* 4 */
- unsigned char frame_latency; /* 5 */
- unsigned char data_format; /* 6 */
- unsigned char format_param; /* 7 */
- unsigned char frame_width_lo; /* 8 */
- unsigned char frame_width_hi; /* 9 */
- unsigned char frame_height_lo; /* 10 */
- unsigned char frame_height_hi; /* 11 */
- __u16 frame_width; /* 8 - 9 after endian correction*/
- __u16 frame_height; /* 10 - 11 after endian correction*/
-};
-
-struct usbvision_frame {
- char *data; /* Frame buffer */
- struct usbvision_frame_header isoc_header; /* Header from stream */
-
- int width; /* Width application is expecting */
- int height; /* Height */
- int index; /* Frame index */
- int frmwidth; /* Width the frame actually is */
- int frmheight; /* Height */
-
- volatile int grabstate; /* State of grabbing */
- int scanstate; /* State of scanning */
-
- struct list_head frame;
-
- int curline; /* Line of frame we're working on */
-
- long scanlength; /* uncompressed, raw data length of frame */
- long bytes_read; /* amount of scanlength that has been read from data */
- struct usbvision_v4l2_format_st v4l2_format; /* format the user needs*/
- int v4l2_linesize; /* bytes for one videoline*/
- u64 ts;
- int sequence; /* How many video frames we send to user */
-};
-
-#define CODEC_SAA7113 7113
-#define CODEC_SAA7111 7111
-#define CODEC_WEBCAM 3000
-#define BRIDGE_NT1003 1003
-#define BRIDGE_NT1004 1004
-#define BRIDGE_NT1005 1005
-
-struct usbvision_device_data_st {
- __u64 video_norm;
- const char *model_string;
- int interface; /* to handle special interface number like BELKIN and Hauppauge WinTV-USB II */
- __u16 codec;
- unsigned video_channels:3;
- unsigned audio_channels:2;
- unsigned radio:1;
- unsigned vbi:1;
- unsigned tuner:1;
- unsigned vin_reg1_override:1; /* Override default value with */
- unsigned vin_reg2_override:1; /* vin_reg1, vin_reg2, etc. */
- unsigned dvi_yuv_override:1;
- __u8 vin_reg1;
- __u8 vin_reg2;
- __u8 dvi_yuv;
- __u8 tuner_type;
- __s16 x_offset;
- __s16 y_offset;
-};
-
-/* Declared on usbvision-cards.c */
-extern struct usbvision_device_data_st usbvision_device_data[];
-extern struct usb_device_id usbvision_table[];
-
-struct usb_usbvision {
- struct v4l2_device v4l2_dev;
- struct v4l2_ctrl_handler hdl;
- struct video_device vdev; /* Video Device */
- struct video_device rdev; /* Radio Device */
-
- /* i2c Declaration Section*/
- struct i2c_adapter i2c_adap;
- int registered_i2c;
-
- struct urb *ctrl_urb;
- unsigned char ctrl_urb_buffer[8];
- int ctrl_urb_busy;
- struct usb_ctrlrequest ctrl_urb_setup;
-
- /* configuration part */
- int have_tuner;
- int tuner_type;
- int bridge_type; /* NT1003, NT1004, NT1005 */
- int radio;
- int video_inputs; /* # of inputs */
- unsigned long radio_freq;
- unsigned long tv_freq;
- int audio_mute;
- int audio_channel;
- int isoc_mode; /* format of video data for the usb isoc-transfer */
- unsigned int nr; /* Number of the device */
-
- /* Device structure */
- struct usb_device *dev;
- /* usb transfer */
- int num_alt; /* Number of alternative settings */
- unsigned int *alt_max_pkt_size; /* array of max_packet_size */
- unsigned char iface; /* Video interface number */
- unsigned char iface_alt; /* Alt settings */
- unsigned char vin_reg2_preset;
- struct mutex v4l2_lock;
- int power; /* is the device powered on? */
- int user; /* user count for exclusive use */
- int initialized; /* Had we already sent init sequence? */
- int dev_model; /* What type of USBVISION device we got? */
- enum stream_state streaming; /* Are we streaming Isochronous? */
- int last_error; /* What calamity struck us? */
- int curwidth; /* width of the frame the device is currently set to*/
- int curheight; /* height of the frame the device is currently set to*/
- int stretch_width; /* stretch-factor for frame width (from usb to screen)*/
- int stretch_height; /* stretch-factor for frame height (from usb to screen)*/
- char *fbuf; /* Videodev buffer area for mmap*/
- int max_frame_size; /* Bytes in one video frame */
- int fbuf_size; /* Videodev buffer size */
- spinlock_t queue_lock; /* spinlock for protecting mods on inqueue and outqueue */
- struct list_head inqueue, outqueue; /* queued frame list and ready to dequeue frame list */
- wait_queue_head_t wait_frame; /* Processes waiting */
- wait_queue_head_t wait_stream; /* Processes waiting */
- struct usbvision_frame *cur_frame; /* pointer to current frame, set by usbvision_find_header */
- struct usbvision_frame frame[USBVISION_NUMFRAMES]; /* frame buffer */
- int num_frames; /* number of frames allocated */
- struct usbvision_sbuf sbuf[USBVISION_NUMSBUF]; /* S buffering */
- volatile int remove_pending; /* If set then about to exit */
-
- /* Scratch space from the Isochronous Pipe.*/
- unsigned char *scratch;
- int scratch_read_ptr;
- int scratch_write_ptr;
- int scratch_headermarker[USBVISION_NUM_HEADERMARKER];
- int scratch_headermarker_read_ptr;
- int scratch_headermarker_write_ptr;
- enum isoc_state isocstate;
- struct usbvision_v4l2_format_st palette;
-
- struct v4l2_capability vcap; /* Video capabilities */
- unsigned int ctl_input; /* selected input */
- v4l2_std_id tvnorm_id; /* selected tv norm */
- unsigned char video_endp; /* 0x82 for USBVISION devices based */
-
- /* Decompression stuff: */
- unsigned char *intra_frame_buffer; /* Buffer for reference frame */
- int block_pos; /* for test only */
- int request_intra; /* 0 = normal; 1 = intra frame is requested; */
- int last_isoc_frame_num; /* check for lost isoc frames */
- int isoc_packet_size; /* need to calculate used_bandwidth */
- int used_bandwidth; /* used bandwidth 0-100%, need to set compr_level */
- int compr_level; /* How strong (100) or weak (0) is compression */
- int last_compr_level; /* How strong (100) or weak (0) was compression */
- int usb_bandwidth; /* Mbit/s */
-
- /* Statistics that can be overlaid on the screen */
- unsigned long isoc_urb_count; /* How many URBs we received so far */
- unsigned long urb_length; /* Length of last URB */
- unsigned long isoc_data_count; /* How many bytes we received */
- unsigned long header_count; /* How many frame headers we found */
- unsigned long scratch_ovf_count; /* How many times we overflowed scratch */
- unsigned long isoc_skip_count; /* How many empty ISO packets received */
- unsigned long isoc_err_count; /* How many bad ISO packets received */
- unsigned long isoc_packet_count; /* How many packets we totally got */
- int isoc_measure_bandwidth_count;
- int frame_num; /* How many video frames we send to user */
- int max_strip_len; /* How big is the biggest strip */
- int comprblock_pos;
- int strip_len_errors; /* How many times was block_pos greater than strip_len */
- int strip_magic_errors;
- int strip_line_number_errors;
- int compr_block_types[4];
-};
-
-static inline struct usb_usbvision *to_usbvision(struct v4l2_device *v4l2_dev)
-{
- return container_of(v4l2_dev, struct usb_usbvision, v4l2_dev);
-}
-
-#define call_all(usbvision, o, f, args...) \
- v4l2_device_call_all(&usbvision->v4l2_dev, 0, o, f, ##args)
-
-/* --------------------------------------------------------------- */
-/* defined in usbvision-i2c.c */
-/* i2c-algo-usb declaration */
-/* --------------------------------------------------------------- */
-
-/* ----------------------------------------------------------------------- */
-/* usbvision specific I2C functions */
-/* ----------------------------------------------------------------------- */
-int usbvision_i2c_register(struct usb_usbvision *usbvision);
-int usbvision_i2c_unregister(struct usb_usbvision *usbvision);
-
-/* defined in usbvision-core.c */
-int usbvision_read_reg(struct usb_usbvision *usbvision, unsigned char reg);
-int usbvision_write_reg(struct usb_usbvision *usbvision, unsigned char reg,
- unsigned char value);
-
-int usbvision_frames_alloc(struct usb_usbvision *usbvision, int number_of_frames);
-void usbvision_frames_free(struct usb_usbvision *usbvision);
-int usbvision_scratch_alloc(struct usb_usbvision *usbvision);
-void usbvision_scratch_free(struct usb_usbvision *usbvision);
-int usbvision_decompress_alloc(struct usb_usbvision *usbvision);
-void usbvision_decompress_free(struct usb_usbvision *usbvision);
-
-int usbvision_setup(struct usb_usbvision *usbvision, int format);
-int usbvision_init_isoc(struct usb_usbvision *usbvision);
-int usbvision_restart_isoc(struct usb_usbvision *usbvision);
-void usbvision_stop_isoc(struct usb_usbvision *usbvision);
-int usbvision_set_alternate(struct usb_usbvision *dev);
-
-int usbvision_set_audio(struct usb_usbvision *usbvision, int audio_channel);
-int usbvision_audio_off(struct usb_usbvision *usbvision);
-
-int usbvision_begin_streaming(struct usb_usbvision *usbvision);
-void usbvision_empty_framequeues(struct usb_usbvision *dev);
-int usbvision_stream_interrupt(struct usb_usbvision *dev);
-
-int usbvision_muxsel(struct usb_usbvision *usbvision, int channel);
-int usbvision_set_input(struct usb_usbvision *usbvision);
-int usbvision_set_output(struct usb_usbvision *usbvision, int width, int height);
-
-int usbvision_power_off(struct usb_usbvision *usbvision);
-int usbvision_power_on(struct usb_usbvision *usbvision);
-
-#endif /* __LINUX_USBVISION_H */
diff --git a/drivers/staging/media/zoran/Kconfig b/drivers/staging/media/zoran/Kconfig
new file mode 100644
index 000000000000..7874842033ca
--- /dev/null
+++ b/drivers/staging/media/zoran/Kconfig
@@ -0,0 +1,76 @@
+config VIDEO_ZORAN
+ tristate "Zoran ZR36057/36067 Video For Linux (Deprecated)"
+ depends on PCI && I2C_ALGOBIT && VIDEO_V4L2
+ depends on !ALPHA
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Say Y for support for MJPEG capture cards based on the Zoran
+ 36057/36067 PCI controller chipset. This includes the Iomega
+ Buz, Pinnacle DC10+ and the Linux Media Labs LML33. There is
+ a driver homepage at <http://mjpeg.sf.net/driver-zoran/>. For
+ more information, check <file:Documentation/driver-api/media/drivers/zoran.rst>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called zr36067.
+
+config VIDEO_ZORAN_DC30
+ tristate "Pinnacle/Miro DC30(+) support"
+ depends on VIDEO_ZORAN
+ select VIDEO_ADV7175 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_VPX3220 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Support for the Pinnacle/Miro DC30(+) MJPEG capture/playback
+ card. This also supports really old DC10 cards based on the
+ zr36050 MJPEG codec and zr36016 VFE.
+
+config VIDEO_ZORAN_ZR36060
+ tristate "Zoran ZR36060"
+ depends on VIDEO_ZORAN
+ help
+ Say Y to support Zoran boards based on 36060 chips.
+ This includes Iomega Buz, Pinnacle DC10, Linux media Labs 33
+ and 33 R10 and AverMedia 6 boards.
+
+config VIDEO_ZORAN_BUZ
+ tristate "Iomega Buz support"
+ depends on VIDEO_ZORAN_ZR36060
+ select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_SAA7185 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Support for the Iomega Buz MJPEG capture/playback card.
+
+config VIDEO_ZORAN_DC10
+ tristate "Pinnacle/Miro DC10(+) support"
+ depends on VIDEO_ZORAN_ZR36060
+ select VIDEO_SAA7110 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_ADV7175 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Support for the Pinnacle/Miro DC10(+) MJPEG capture/playback
+ card.
+
+config VIDEO_ZORAN_LML33
+ tristate "Linux Media Labs LML33 support"
+ depends on VIDEO_ZORAN_ZR36060
+ select VIDEO_BT819 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_BT856 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Support for the Linux Media Labs LML33 MJPEG capture/playback
+ card.
+
+config VIDEO_ZORAN_LML33R10
+ tristate "Linux Media Labs LML33R10 support"
+ depends on VIDEO_ZORAN_ZR36060
+ select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_ADV7170 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ support for the Linux Media Labs LML33R10 MJPEG capture/playback
+ card.
+
+config VIDEO_ZORAN_AVS6EYES
+ tristate "AverMedia 6 Eyes support"
+ depends on VIDEO_ZORAN_ZR36060
+ select VIDEO_BT856 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_BT866 if MEDIA_SUBDRV_AUTOSELECT
+ select VIDEO_KS0127 if MEDIA_SUBDRV_AUTOSELECT
+ help
+ Support for the AverMedia 6 Eyes video surveillance card.
diff --git a/drivers/staging/media/zoran/Makefile b/drivers/staging/media/zoran/Makefile
new file mode 100644
index 000000000000..7023158e3892
--- /dev/null
+++ b/drivers/staging/media/zoran/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+zr36067-objs := zoran_device.o \
+ zoran_driver.o zoran_card.o
+
+obj-$(CONFIG_VIDEO_ZORAN) += zr36067.o videocodec.o
+obj-$(CONFIG_VIDEO_ZORAN_DC30) += zr36050.o zr36016.o
+obj-$(CONFIG_VIDEO_ZORAN_ZR36060) += zr36060.o
diff --git a/drivers/staging/media/zoran/TODO b/drivers/staging/media/zoran/TODO
new file mode 100644
index 000000000000..6992540d3e53
--- /dev/null
+++ b/drivers/staging/media/zoran/TODO
@@ -0,0 +1,19 @@
+
+How to test the zoran driver:
+- RAW capture
+ mplayer tv:///dev/video0 -tv driver=v4l2
+
+- MJPEG capture (compression)
+ mplayer tv:///dev/video0 -tv driver=v4l2:outfmt=mjpeg
+ TODO: need two test for both Dcim path
+
+- MJPEG play (decompression)
+ ffmpeg -i test.avi -vcodec mjpeg -an -f v4l2 /dev/video0
+ Note: only recent ffmpeg has the ability of sending non-raw video via v4l2
+
+ The original way of sending video was via mplayer vo_zr/vo_zr2, but it does not compile
+ anymore and is a dead end (usage of some old private ffmpeg structures).
+
+TODO
+- fix the v4l compliance "TRY_FMT cannot handle an invalid pixelformat"
+- Filter JPEG data to made output work
diff --git a/drivers/staging/media/zoran/videocodec.c b/drivers/staging/media/zoran/videocodec.c
new file mode 100644
index 000000000000..28031d3fd757
--- /dev/null
+++ b/drivers/staging/media/zoran/videocodec.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * VIDEO MOTION CODECs internal API for video devices
+ *
+ * Interface for MJPEG (and maybe later MPEG/WAVELETS) codec's
+ * bound to a master device.
+ *
+ * (c) 2002 Wolfgang Scherr <scherr@net4you.at>
+ */
+
+#define VIDEOCODEC_VERSION "v0.2"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+// kernel config is here (procfs flag)
+
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#endif
+
+#include "videocodec.h"
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0-4)");
+
+#define dprintk(num, format, args...) \
+ do { \
+ if (debug >= num) \
+ printk(format, ##args); \
+ } while (0)
+
+struct attached_list {
+ struct videocodec *codec;
+ struct attached_list *next;
+};
+
+struct codec_list {
+ const struct videocodec *codec;
+ int attached;
+ struct attached_list *list;
+ struct codec_list *next;
+};
+
+static struct codec_list *codeclist_top;
+
+/* ================================================= */
+/* function prototypes of the master/slave interface */
+/* ================================================= */
+
+struct videocodec *videocodec_attach(struct videocodec_master *master)
+{
+ struct codec_list *h = codeclist_top;
+ struct attached_list *a, *ptr;
+ struct videocodec *codec;
+ int res;
+
+ if (!master) {
+ pr_err("%s: no data\n", __func__);
+ return NULL;
+ }
+
+ dprintk(2, "%s: '%s', flags %lx, magic %lx\n", __func__,
+ master->name, master->flags, master->magic);
+
+ if (!h) {
+ pr_err("%s: no device available\n", __func__);
+ return NULL;
+ }
+
+ while (h) {
+ // attach only if the slave has at least the flags
+ // expected by the master
+ if ((master->flags & h->codec->flags) == master->flags) {
+ dprintk(4, "%s: try '%s'\n", __func__, h->codec->name);
+
+ if (!try_module_get(h->codec->owner))
+ return NULL;
+
+ codec = kmemdup(h->codec, sizeof(struct videocodec), GFP_KERNEL);
+ if (!codec)
+ goto out_module_put;
+
+ res = strlen(codec->name);
+ snprintf(codec->name + res, sizeof(codec->name) - res, "[%d]", h->attached);
+ codec->master_data = master;
+ res = codec->setup(codec);
+ if (res == 0) {
+ dprintk(3, "%s: '%s'\n", __func__, codec->name);
+ ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ goto out_kfree;
+ ptr->codec = codec;
+
+ a = h->list;
+ if (!a) {
+ h->list = ptr;
+ dprintk(4, "videocodec: first element\n");
+ } else {
+ while (a->next)
+ a = a->next; // find end
+ a->next = ptr;
+ dprintk(4, "videocodec: in after '%s'\n", h->codec->name);
+ }
+
+ h->attached += 1;
+ return codec;
+ } else {
+ kfree(codec);
+ }
+ }
+ h = h->next;
+ }
+
+ pr_err("%s: no codec found!\n", __func__);
+ return NULL;
+
+ out_module_put:
+ module_put(h->codec->owner);
+ out_kfree:
+ kfree(codec);
+ return NULL;
+}
+EXPORT_SYMBOL(videocodec_attach);
+
+int videocodec_detach(struct videocodec *codec)
+{
+ struct codec_list *h = codeclist_top;
+ struct attached_list *a, *prev;
+ int res;
+
+ if (!codec) {
+ pr_err("%s: no data\n", __func__);
+ return -EINVAL;
+ }
+
+ dprintk(2, "%s: '%s', type: %x, flags %lx, magic %lx\n", __func__,
+ codec->name, codec->type, codec->flags, codec->magic);
+
+ if (!h) {
+ pr_err("%s: no device left...\n", __func__);
+ return -ENXIO;
+ }
+
+ while (h) {
+ a = h->list;
+ prev = NULL;
+ while (a) {
+ if (codec == a->codec) {
+ res = a->codec->unset(a->codec);
+ if (res >= 0) {
+ dprintk(3, "%s: '%s'\n", __func__, a->codec->name);
+ a->codec->master_data = NULL;
+ } else {
+ pr_err("%s: '%s'\n", __func__, a->codec->name);
+ a->codec->master_data = NULL;
+ }
+ if (!prev) {
+ h->list = a->next;
+ dprintk(4, "videocodec: delete first\n");
+ } else {
+ prev->next = a->next;
+ dprintk(4, "videocodec: delete middle\n");
+ }
+ module_put(a->codec->owner);
+ kfree(a->codec);
+ kfree(a);
+ h->attached -= 1;
+ return 0;
+ }
+ prev = a;
+ a = a->next;
+ }
+ h = h->next;
+ }
+
+ pr_err("%s: given codec not found!\n", __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(videocodec_detach);
+
+int videocodec_register(const struct videocodec *codec)
+{
+ struct codec_list *ptr, *h = codeclist_top;
+
+ if (!codec) {
+ pr_err("%s: no data!\n", __func__);
+ return -EINVAL;
+ }
+
+ dprintk(2,
+ "videocodec: register '%s', type: %x, flags %lx, magic %lx\n",
+ codec->name, codec->type, codec->flags, codec->magic);
+
+ ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+ ptr->codec = codec;
+
+ if (!h) {
+ codeclist_top = ptr;
+ dprintk(4, "videocodec: hooked in as first element\n");
+ } else {
+ while (h->next)
+ h = h->next; // find the end
+ h->next = ptr;
+ dprintk(4, "videocodec: hooked in after '%s'\n",
+ h->codec->name);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(videocodec_register);
+
+int videocodec_unregister(const struct videocodec *codec)
+{
+ struct codec_list *prev = NULL, *h = codeclist_top;
+
+ if (!codec) {
+ pr_err("%s: no data!\n", __func__);
+ return -EINVAL;
+ }
+
+ dprintk(2,
+ "videocodec: unregister '%s', type: %x, flags %lx, magic %lx\n",
+ codec->name, codec->type, codec->flags, codec->magic);
+
+ if (!h) {
+ pr_err("%s: no device left...\n", __func__);
+ return -ENXIO;
+ }
+
+ while (h) {
+ if (codec == h->codec) {
+ if (h->attached) {
+ pr_err("videocodec: '%s' is used\n", h->codec->name);
+ return -EBUSY;
+ }
+ dprintk(3, "videocodec: unregister '%s' is ok.\n",
+ h->codec->name);
+ if (!prev) {
+ codeclist_top = h->next;
+ dprintk(4,
+ "videocodec: delete first element\n");
+ } else {
+ prev->next = h->next;
+ dprintk(4,
+ "videocodec: delete middle element\n");
+ }
+ kfree(h);
+ return 0;
+ }
+ prev = h;
+ h = h->next;
+ }
+
+ pr_err("%s: given codec not found!\n", __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(videocodec_unregister);
+
+#ifdef CONFIG_PROC_FS
+static int proc_videocodecs_show(struct seq_file *m, void *v)
+{
+ struct codec_list *h = codeclist_top;
+ struct attached_list *a;
+
+ seq_printf(m, "<S>lave or attached <M>aster name type flags magic ");
+ seq_printf(m, "(connected as)\n");
+
+ while (h) {
+ seq_printf(m, "S %32s %04x %08lx %08lx (TEMPLATE)\n",
+ h->codec->name, h->codec->type,
+ h->codec->flags, h->codec->magic);
+ a = h->list;
+ while (a) {
+ seq_printf(m, "M %32s %04x %08lx %08lx (%s)\n",
+ a->codec->master_data->name,
+ a->codec->master_data->type,
+ a->codec->master_data->flags,
+ a->codec->master_data->magic,
+ a->codec->name);
+ a = a->next;
+ }
+ h = h->next;
+ }
+
+ return 0;
+}
+#endif
+
+/* ===================== */
+/* hook in driver module */
+/* ===================== */
+static int __init videocodec_init(void)
+{
+#ifdef CONFIG_PROC_FS
+ static struct proc_dir_entry *videocodec_proc_entry;
+#endif
+
+ pr_info("Linux video codec intermediate layer: %s\n", VIDEOCODEC_VERSION);
+
+#ifdef CONFIG_PROC_FS
+ videocodec_proc_entry = proc_create_single("videocodecs", 0, NULL, proc_videocodecs_show);
+ if (!videocodec_proc_entry)
+ pr_err("videocodec: can't init procfs.\n");
+#endif
+ return 0;
+}
+
+static void __exit videocodec_exit(void)
+{
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("videocodecs", NULL);
+#endif
+}
+
+module_init(videocodec_init);
+module_exit(videocodec_exit);
+
+MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>");
+MODULE_DESCRIPTION("Intermediate API module for video codecs "
+ VIDEOCODEC_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/videocodec.h b/drivers/staging/media/zoran/videocodec.h
new file mode 100644
index 000000000000..8a5003dda9f4
--- /dev/null
+++ b/drivers/staging/media/zoran/videocodec.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * VIDEO MOTION CODECs internal API for video devices
+ *
+ * Interface for MJPEG (and maybe later MPEG/WAVELETS) codec's
+ * bound to a master device.
+ *
+ * (c) 2002 Wolfgang Scherr <scherr@net4you.at>
+ */
+
+/* =================== */
+/* general description */
+/* =================== */
+
+/* Should ease the (re-)usage of drivers supporting cards with (different)
+ video codecs. The codecs register to this module their functionality,
+ and the processors (masters) can attach to them if they fit.
+
+ The codecs are typically have a "strong" binding to their master - so I
+ don't think it makes sense to have a full blown interfacing as with e.g.
+ i2c. If you have an other opinion, let's discuss & implement it :-)))
+
+ Usage:
+
+ The slave has just to setup the videocodec structure and use two functions:
+ videocodec_register(codecdata);
+ videocodec_unregister(codecdata);
+ The best is just calling them at module (de-)initialisation.
+
+ The master sets up the structure videocodec_master and calls:
+ codecdata=videocodec_attach(master_codecdata);
+ videocodec_detach(codecdata);
+
+ The slave is called during attach/detach via functions setup previously
+ during register. At that time, the master_data pointer is set up
+ and the slave can access any io registers of the master device (in the case
+ the slave is bound to it). Otherwise it doesn't need this functions and
+ therfor they may not be initialized.
+
+ The other functions are just for convenience, as they are for sure used by
+ most/all of the codecs. The last ones may be omitted, too.
+
+ See the structure declaration below for more information and which data has
+ to be set up for the master and the slave.
+
+ ----------------------------------------------------------------------------
+ The master should have "knowledge" of the slave and vice versa. So the data
+ structures sent to/from slave via set_data/get_data set_image/get_image are
+ device dependent and vary between MJPEG/MPEG/WAVELET/... devices. (!!!!)
+ ----------------------------------------------------------------------------
+*/
+
+/* ========================================== */
+/* description of the videocodec_io structure */
+/* ========================================== */
+
+/*
+ ==== master setup ====
+ name -> name of the device structure for reference and debugging
+ master_data -> data ref. for the master (e.g. the zr36055,57,67)
+ readreg -> ref. to read-fn from register (setup by master, used by slave)
+ writereg -> ref. to write-fn to register (setup by master, used by slave)
+ this two functions do the lowlevel I/O job
+
+ ==== slave functionality setup ====
+ slave_data -> data ref. for the slave (e.g. the zr36050,60)
+ check -> fn-ref. checks availability of an device, returns -EIO on failure or
+ the type on success
+ this makes espcecially sense if a driver module supports more than
+ one codec which may be quite similar to access, nevertheless it
+ is good for a first functionality check
+
+ -- main functions you always need for compression/decompression --
+
+ set_mode -> this fn-ref. resets the entire codec, and sets up the mode
+ with the last defined norm/size (or device default if not
+ available) - it returns 0 if the mode is possible
+ set_size -> this fn-ref. sets the norm and image size for
+ compression/decompression (returns 0 on success)
+ the norm param is defined in videodev2.h (V4L2_STD_*)
+
+ additional setup may be available, too - but the codec should work with
+ some default values even without this
+
+ set_data -> sets device-specific data (tables, quality etc.)
+ get_data -> query device-specific data (tables, quality etc.)
+
+ if the device delivers interrupts, they may be setup/handled here
+ setup_interrupt -> codec irq setup (not needed for 36050/60)
+ handle_interrupt -> codec irq handling (not needed for 36050/60)
+
+ if the device delivers pictures, they may be handled here
+ put_image -> puts image data to the codec (not needed for 36050/60)
+ get_image -> gets image data from the codec (not needed for 36050/60)
+ the calls include frame numbers and flags (even/odd/...)
+ if needed and a flag which allows blocking until its ready
+*/
+
+/* ============== */
+/* user interface */
+/* ============== */
+
+/*
+ Currently there is only a information display planned, as the layer
+ is not visible for the user space at all.
+
+ Information is available via procfs. The current entry is "/proc/videocodecs"
+ but it makes sense to "hide" it in the /proc/video tree of v4l(2) --TODO--.
+
+A example for such an output is:
+
+<S>lave or attached <M>aster name type flags magic (connected as)
+S zr36050 0002 0000d001 00000000 (TEMPLATE)
+M zr36055[0] 0001 0000c001 00000000 (zr36050[0])
+M zr36055[1] 0001 0000c001 00000000 (zr36050[1])
+
+*/
+
+/* =============================================== */
+/* special defines for the videocodec_io structure */
+/* =============================================== */
+
+#ifndef __LINUX_VIDEOCODEC_H
+#define __LINUX_VIDEOCODEC_H
+
+#include <linux/videodev2.h>
+
+#define CODEC_DO_COMPRESSION 0
+#define CODEC_DO_EXPANSION 1
+
+/* this are the current codec flags I think they are needed */
+/* -> type value in structure */
+#define CODEC_FLAG_JPEG 0x00000001L // JPEG codec
+#define CODEC_FLAG_MPEG 0x00000002L // MPEG1/2/4 codec
+#define CODEC_FLAG_DIVX 0x00000004L // DIVX codec
+#define CODEC_FLAG_WAVELET 0x00000008L // WAVELET codec
+ // room for other types
+
+#define CODEC_FLAG_MAGIC 0x00000800L // magic key must match
+#define CODEC_FLAG_HARDWARE 0x00001000L // is a hardware codec
+#define CODEC_FLAG_VFE 0x00002000L // has direct video frontend
+#define CODEC_FLAG_ENCODER 0x00004000L // compression capability
+#define CODEC_FLAG_DECODER 0x00008000L // decompression capability
+#define CODEC_FLAG_NEEDIRQ 0x00010000L // needs irq handling
+#define CODEC_FLAG_RDWRPIC 0x00020000L // handles picture I/O
+
+/* a list of modes, some are just examples (is there any HW?) */
+#define CODEC_MODE_BJPG 0x0001 // Baseline JPEG
+#define CODEC_MODE_LJPG 0x0002 // Lossless JPEG
+#define CODEC_MODE_MPEG1 0x0003 // MPEG 1
+#define CODEC_MODE_MPEG2 0x0004 // MPEG 2
+#define CODEC_MODE_MPEG4 0x0005 // MPEG 4
+#define CODEC_MODE_MSDIVX 0x0006 // MS DivX
+#define CODEC_MODE_ODIVX 0x0007 // Open DivX
+#define CODEC_MODE_WAVELET 0x0008 // Wavelet
+
+/* this are the current codec types I want to implement */
+/* -> type value in structure */
+#define CODEC_TYPE_NONE 0
+#define CODEC_TYPE_L64702 1
+#define CODEC_TYPE_ZR36050 2
+#define CODEC_TYPE_ZR36016 3
+#define CODEC_TYPE_ZR36060 4
+
+/* the type of data may be enhanced by future implementations (data-fn.'s) */
+/* -> used in command */
+#define CODEC_G_STATUS 0x0000 /* codec status (query only) */
+#define CODEC_S_CODEC_MODE 0x0001 /* codec mode (baseline JPEG, MPEG1,... */
+#define CODEC_G_CODEC_MODE 0x8001
+#define CODEC_S_VFE 0x0002 /* additional video frontend setup */
+#define CODEC_G_VFE 0x8002
+#define CODEC_S_MMAP 0x0003 /* MMAP setup (if available) */
+
+#define CODEC_S_JPEG_TDS_BYTE 0x0010 /* target data size in bytes */
+#define CODEC_G_JPEG_TDS_BYTE 0x8010
+#define CODEC_S_JPEG_SCALE 0x0011 /* scaling factor for quant. tables */
+#define CODEC_G_JPEG_SCALE 0x8011
+#define CODEC_S_JPEG_HDT_DATA 0x0018 /* huffman-tables */
+#define CODEC_G_JPEG_HDT_DATA 0x8018
+#define CODEC_S_JPEG_QDT_DATA 0x0019 /* quantizing-tables */
+#define CODEC_G_JPEG_QDT_DATA 0x8019
+#define CODEC_S_JPEG_APP_DATA 0x001A /* APP marker */
+#define CODEC_G_JPEG_APP_DATA 0x801A
+#define CODEC_S_JPEG_COM_DATA 0x001B /* COM marker */
+#define CODEC_G_JPEG_COM_DATA 0x801B
+
+#define CODEC_S_PRIVATE 0x1000 /* "private" commands start here */
+#define CODEC_G_PRIVATE 0x9000
+
+#define CODEC_G_FLAG 0x8000 /* this is how 'get' is detected */
+
+/* types of transfer, directly user space or a kernel buffer (image-fn.'s) */
+/* -> used in get_image, put_image */
+#define CODEC_TRANSFER_KERNEL 0 /* use "memcopy" */
+#define CODEC_TRANSFER_USER 1 /* use "to/from_user" */
+
+/* ========================= */
+/* the structures itself ... */
+/* ========================= */
+
+struct vfe_polarity {
+ unsigned int vsync_pol:1;
+ unsigned int hsync_pol:1;
+ unsigned int field_pol:1;
+ unsigned int blank_pol:1;
+ unsigned int subimg_pol:1;
+ unsigned int poe_pol:1;
+ unsigned int pvalid_pol:1;
+ unsigned int vclk_pol:1;
+};
+
+struct vfe_settings {
+ __u32 x, y; /* Offsets into image */
+ __u32 width, height; /* Area to capture */
+ __u16 decimation; /* Decimation divider */
+ __u16 flags; /* Flags for capture */
+ __u16 quality; /* quality of the video */
+};
+
+struct tvnorm {
+ u16 wt, wa, h_start, h_sync_start, ht, ha, v_start;
+};
+
+struct jpeg_com_marker {
+ int len; /* number of usable bytes in data */
+ char data[60];
+};
+
+struct jpeg_app_marker {
+ int appn; /* number app segment */
+ int len; /* number of usable bytes in data */
+ char data[60];
+};
+
+struct videocodec {
+ struct module *owner;
+ /* -- filled in by slave device during register -- */
+ char name[32];
+ unsigned long magic; /* may be used for client<->master attaching */
+ unsigned long flags; /* functionality flags */
+ unsigned int type; /* codec type */
+
+ /* -- these is filled in later during master device attach -- */
+
+ struct videocodec_master *master_data;
+
+ /* -- these are filled in by the slave device during register -- */
+
+ void *data; /* private slave data */
+
+ /* attach/detach client functions (indirect call) */
+ int (*setup)(struct videocodec *codec);
+ int (*unset)(struct videocodec *codec);
+
+ /* main functions, every client needs them for sure! */
+ // set compression or decompression (or freeze, stop, standby, etc)
+ int (*set_mode)(struct videocodec *codec, int mode);
+ // setup picture size and norm (for the codec's video frontend)
+ int (*set_video)(struct videocodec *codec, const struct tvnorm *norm,
+ struct vfe_settings *cap, struct vfe_polarity *pol);
+ // other control commands, also mmap setup etc.
+ int (*control)(struct videocodec *codec, int type, int size, void *data);
+
+ /* additional setup/query/processing (may be NULL pointer) */
+ // interrupt setup / handling (for irq's delivered by master)
+ int (*setup_interrupt)(struct videocodec *codec, long mode);
+ int (*handle_interrupt)(struct videocodec *codec, int source, long flag);
+ // picture interface (if any)
+ long (*put_image)(struct videocodec *codec, int tr_type, int block,
+ long *fr_num, long *flag, long size, void *buf);
+ long (*get_image)(struct videocodec *codec, int tr_type, int block,
+ long *fr_num, long *flag, long size, void *buf);
+};
+
+struct videocodec_master {
+ /* -- filled in by master device for registration -- */
+ char name[32];
+ unsigned long magic; /* may be used for client<->master attaching */
+ unsigned long flags; /* functionality flags */
+ unsigned int type; /* master type */
+
+ void *data; /* private master data */
+
+ __u32 (*readreg)(struct videocodec *codec, __u16 reg);
+ void (*writereg)(struct videocodec *codec, __u16 reg, __u32 value);
+};
+
+/* ================================================= */
+/* function prototypes of the master/slave interface */
+/* ================================================= */
+
+/* attach and detach commands for the master */
+// * master structure needs to be kmalloc'ed before calling attach
+// and free'd after calling detach
+// * returns pointer on success, NULL on failure
+extern struct videocodec *videocodec_attach(struct videocodec_master *);
+// * 0 on success, <0 (errno) on failure
+extern int videocodec_detach(struct videocodec *);
+
+/* register and unregister commands for the slaves */
+// * 0 on success, <0 (errno) on failure
+extern int videocodec_register(const struct videocodec *);
+// * 0 on success, <0 (errno) on failure
+extern int videocodec_unregister(const struct videocodec *);
+
+/* the other calls are directly done via the videocodec structure! */
+
+#endif /*ifndef __LINUX_VIDEOCODEC_H */
diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h
new file mode 100644
index 000000000000..e7fe8da7732c
--- /dev/null
+++ b/drivers/staging/media/zoran/zoran.h
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * zoran - Iomega Buz driver
+ *
+ * Copyright (C) 1999 Rainer Johanni <Rainer@Johanni.de>
+ *
+ * based on
+ *
+ * zoran.0.0.3 Copyright (C) 1998 Dave Perks <dperks@ibm.net>
+ *
+ * and
+ *
+ * bttv - Bt848 frame grabber driver
+ * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
+ * & Marcus Metzler (mocm@thp.uni-koeln.de)
+ */
+
+#ifndef _BUZ_H_
+#define _BUZ_H_
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define ZR_NORM_PAL 0
+#define ZR_NORM_NTSC 1
+#define ZR_NORM_SECAM 2
+
+struct zr_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head queue;
+};
+
+static inline struct zr_buffer *vb2_to_zr_buffer(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ return container_of(vbuf, struct zr_buffer, vbuf);
+}
+
+#define ZORAN_NAME "ZORAN" /* name of the device */
+
+#define ZR_DEVNAME(zr) ((zr)->name)
+
+#define BUZ_MAX_WIDTH (zr->timing->wa)
+#define BUZ_MAX_HEIGHT (zr->timing->ha)
+#define BUZ_MIN_WIDTH 32 /* never display less than 32 pixels */
+#define BUZ_MIN_HEIGHT 24 /* never display less than 24 rows */
+
+#define BUZ_NUM_STAT_COM 4
+#define BUZ_MASK_STAT_COM 3
+
+#define BUZ_MAX_FRAME 256 /* Must be a power of 2 */
+#define BUZ_MASK_FRAME 255 /* Must be BUZ_MAX_FRAME-1 */
+
+#define BUZ_MAX_INPUT 16
+
+#if VIDEO_MAX_FRAME <= 32
+# define V4L_MAX_FRAME 32
+#elif VIDEO_MAX_FRAME <= 64
+# define V4L_MAX_FRAME 64
+#else
+# error "Too many video frame buffers to handle"
+#endif
+#define V4L_MASK_FRAME (V4L_MAX_FRAME - 1)
+
+#define MAX_FRAME (BUZ_MAX_FRAME > VIDEO_MAX_FRAME ? BUZ_MAX_FRAME : VIDEO_MAX_FRAME)
+
+#include "zr36057.h"
+
+enum card_type {
+ UNKNOWN = -1,
+
+ /* Pinnacle/Miro */
+ DC10_OLD, /* DC30 like */
+ DC10_NEW, /* DC10_PLUS like */
+ DC10_PLUS,
+ DC30,
+ DC30_PLUS,
+
+ /* Linux Media Labs */
+ LML33,
+ LML33R10,
+
+ /* Iomega */
+ BUZ,
+
+ /* AverMedia */
+ AVS6EYES,
+
+ /* total number of cards */
+ NUM_CARDS
+};
+
+enum zoran_codec_mode {
+ BUZ_MODE_IDLE, /* nothing going on */
+ BUZ_MODE_MOTION_COMPRESS, /* grabbing frames */
+ BUZ_MODE_MOTION_DECOMPRESS, /* playing frames */
+ BUZ_MODE_STILL_COMPRESS, /* still frame conversion */
+ BUZ_MODE_STILL_DECOMPRESS /* still frame conversion */
+};
+
+enum zoran_map_mode {
+ ZORAN_MAP_MODE_NONE,
+ ZORAN_MAP_MODE_RAW,
+ ZORAN_MAP_MODE_JPG_REC,
+ ZORAN_MAP_MODE_JPG_PLAY,
+};
+
+enum gpio_type {
+ ZR_GPIO_JPEG_SLEEP = 0,
+ ZR_GPIO_JPEG_RESET,
+ ZR_GPIO_JPEG_FRAME,
+ ZR_GPIO_VID_DIR,
+ ZR_GPIO_VID_EN,
+ ZR_GPIO_VID_RESET,
+ ZR_GPIO_CLK_SEL1,
+ ZR_GPIO_CLK_SEL2,
+ ZR_GPIO_MAX,
+};
+
+enum gpcs_type {
+ GPCS_JPEG_RESET = 0,
+ GPCS_JPEG_START,
+ GPCS_MAX,
+};
+
+struct zoran_format {
+ char *name;
+ __u32 fourcc;
+ int colorspace;
+ int depth;
+ __u32 flags;
+ __u32 vfespfr;
+};
+
+/* flags */
+#define ZORAN_FORMAT_COMPRESSED BIT(0)
+#define ZORAN_FORMAT_OVERLAY BIT(1)
+#define ZORAN_FORMAT_CAPTURE BIT(2)
+#define ZORAN_FORMAT_PLAYBACK BIT(3)
+
+/* v4l-capture settings */
+struct zoran_v4l_settings {
+ int width, height, bytesperline; /* capture size */
+ const struct zoran_format *format; /* capture format */
+};
+
+/* jpg-capture/-playback settings */
+struct zoran_jpg_settings {
+ int decimation; /* this bit is used to set everything to default */
+ int hor_dcm, ver_dcm, tmp_dcm; /* capture decimation settings (tmp_dcm=1 means both fields) */
+ int field_per_buff, odd_even; /* field-settings (odd_even=1 (+tmp_dcm=1) means top-field-first) */
+ int img_x, img_y, img_width, img_height; /* crop settings (subframe capture) */
+ struct v4l2_jpegcompression jpg_comp; /* JPEG-specific capture settings */
+};
+
+
+struct zoran;
+
+/* zoran_fh contains per-open() settings */
+struct zoran_fh {
+ struct v4l2_fh fh;
+ struct zoran *zr;
+};
+
+struct card_info {
+ enum card_type type;
+ char name[32];
+ const char *i2c_decoder; /* i2c decoder device */
+ const unsigned short *addrs_decoder;
+ const char *i2c_encoder; /* i2c encoder device */
+ const unsigned short *addrs_encoder;
+ u16 video_vfe, video_codec; /* videocodec types */
+ u16 audio_chip; /* audio type */
+
+ int inputs; /* number of video inputs */
+ struct input {
+ int muxsel;
+ char name[32];
+ } input[BUZ_MAX_INPUT];
+
+ v4l2_std_id norms;
+ const struct tvnorm *tvn[3]; /* supported TV norms */
+
+ u32 jpeg_int; /* JPEG interrupt */
+ u32 vsync_int; /* VSYNC interrupt */
+ s8 gpio[ZR_GPIO_MAX];
+ u8 gpcs[GPCS_MAX];
+
+ struct vfe_polarity vfe_pol;
+ u8 gpio_pol[ZR_GPIO_MAX];
+
+ /* is the /GWS line connected? */
+ u8 gws_not_connected;
+
+ /* avs6eyes mux setting */
+ u8 input_mux;
+
+ void (*init)(struct zoran *zr);
+};
+
+struct zoran {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
+ struct video_device *video_dev;
+ struct vb2_queue vq;
+
+ struct i2c_adapter i2c_adapter; /* */
+ struct i2c_algo_bit_data i2c_algo; /* */
+ u32 i2cbr;
+
+ struct v4l2_subdev *decoder; /* video decoder sub-device */
+ struct v4l2_subdev *encoder; /* video encoder sub-device */
+
+ struct videocodec *codec; /* video codec */
+ struct videocodec *vfe; /* video front end */
+
+ struct mutex lock; /* file ops serialize lock */
+
+ u8 initialized; /* flag if zoran has been correctly initialized */
+ struct card_info card;
+ const struct tvnorm *timing;
+
+ unsigned short id; /* number of this device */
+ char name[32]; /* name of this device */
+ struct pci_dev *pci_dev; /* PCI device */
+ unsigned char revision; /* revision of zr36057 */
+ unsigned char __iomem *zr36057_mem;/* pointer to mapped IO memory */
+
+ spinlock_t spinlock; /* Spinlock */
+
+ /* Video for Linux parameters */
+ int input; /* card's norm and input */
+ v4l2_std_id norm;
+
+ /* Current buffer params */
+ unsigned int buffer_size;
+
+ struct zoran_v4l_settings v4l_settings; /* structure with a lot of things to play with */
+
+ /* Buz MJPEG parameters */
+ enum zoran_codec_mode codec_mode; /* status of codec */
+ struct zoran_jpg_settings jpg_settings; /* structure with a lot of things to play with */
+
+ /* grab queue counts/indices, mask with BUZ_MASK_STAT_COM before using as index */
+ /* (dma_head - dma_tail) is number active in DMA, must be <= BUZ_NUM_STAT_COM */
+ /* (value & BUZ_MASK_STAT_COM) corresponds to index in stat_com table */
+ unsigned long jpg_que_head; /* Index where to put next buffer which is queued */
+ unsigned long jpg_dma_head; /* Index of next buffer which goes into stat_com */
+ unsigned long jpg_dma_tail; /* Index of last buffer in stat_com */
+ unsigned long jpg_que_tail; /* Index of last buffer in queue */
+ unsigned long jpg_seq_num; /* count of frames since grab/play started */
+ unsigned long jpg_err_seq; /* last seq_num before error */
+ unsigned long jpg_err_shift;
+ unsigned long jpg_queued_num; /* count of frames queued since grab/play started */
+ unsigned long vbseq;
+
+ /* zr36057's code buffer table */
+ __le32 *stat_com; /* stat_com[i] is indexed by dma_head/tail & BUZ_MASK_STAT_COM */
+
+ /* Additional stuff for testing */
+ unsigned int ghost_int;
+ int intr_counter_GIRQ1;
+ int intr_counter_GIRQ0;
+ int intr_counter_cod_rep_irq;
+ int intr_counter_jpeg_rep_irq;
+ int field_counter;
+ int irq1_in;
+ int irq1_out;
+ int jpeg_in;
+ int jpeg_out;
+ int JPEG_0;
+ int JPEG_1;
+ int end_event_missed;
+ int jpeg_missed;
+ int jpeg_error;
+ int num_errors;
+ int jpeg_max_missed;
+ int jpeg_min_missed;
+ unsigned int prepared;
+ unsigned int queued;
+
+ u32 last_isr;
+ unsigned long frame_num;
+ int running;
+ int buf_in_reserve;
+
+ dma_addr_t p_sc;
+ __le32 *stat_comb;
+ dma_addr_t p_scb;
+ enum zoran_map_mode map_mode;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock; /* Protects queued_bufs */
+ struct zr_buffer *inuse[BUZ_NUM_STAT_COM * 2];
+};
+
+static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct zoran, v4l2_dev);
+}
+
+/* There was something called _ALPHA_BUZ that used the PCI address instead of
+ * the kernel iomapped address for btread/btwrite. */
+#define btwrite(dat, adr) writel((dat), zr->zr36057_mem + (adr))
+#define btread(adr) readl(zr->zr36057_mem + (adr))
+
+#define btand(dat, adr) btwrite((dat) & btread(adr), adr)
+#define btor(dat, adr) btwrite((dat) | btread(adr), adr)
+#define btaor(dat, mask, adr) btwrite((dat) | ((mask) & btread(adr)), adr)
+
+#endif
+
+int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq);
+void zoran_queue_exit(struct zoran *zr);
+int zr_set_buf(struct zoran *zr);
diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/staging/media/zoran/zoran_card.c
new file mode 100644
index 000000000000..dfc60e2e9dd7
--- /dev/null
+++ b/drivers/staging/media/zoran/zoran_card.c
@@ -0,0 +1,1333 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Zoran zr36057/zr36067 PCI controller driver, for the
+ * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
+ * Media Labs LML33/LML33R10.
+ *
+ * This part handles card-specific data and detection
+ *
+ * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <media/v4l2-common.h>
+#include <media/i2c/bt819.h>
+
+#include "videocodec.h"
+#include "zoran.h"
+#include "zoran_card.h"
+#include "zoran_device.h"
+
+extern const struct zoran_format zoran_formats[];
+
+static int card[BUZ_MAX] = { [0 ... (BUZ_MAX - 1)] = -1 };
+module_param_array(card, int, NULL, 0444);
+MODULE_PARM_DESC(card, "Card type");
+
+/*
+ * The video mem address of the video card. The driver has a little database for some videocards
+ * to determine it from there. If your video card is not in there you have either to give it to
+ * the driver as a parameter or set in in a VIDIOCSFBUF ioctl
+ */
+
+static unsigned long vidmem; /* default = 0 - Video memory base address */
+module_param_hw(vidmem, ulong, iomem, 0444);
+MODULE_PARM_DESC(vidmem, "Default video memory base address");
+
+/* Default input and video norm at startup of the driver. */
+
+static unsigned int default_input; /* default 0 = Composite, 1 = S-Video */
+module_param(default_input, uint, 0444);
+MODULE_PARM_DESC(default_input,
+ "Default input (0=Composite, 1=S-Video, 2=Internal)");
+
+static int default_mux = 1; /* 6 Eyes input selection */
+module_param(default_mux, int, 0644);
+MODULE_PARM_DESC(default_mux,
+ "Default 6 Eyes mux setting (Input selection)");
+
+static int default_norm; /* default 0 = PAL, 1 = NTSC 2 = SECAM */
+module_param(default_norm, int, 0444);
+MODULE_PARM_DESC(default_norm, "Default norm (0=PAL, 1=NTSC, 2=SECAM)");
+
+/* /dev/videoN, -1 for autodetect */
+static int video_nr[BUZ_MAX] = { [0 ... (BUZ_MAX - 1)] = -1 };
+module_param_array(video_nr, int, NULL, 0444);
+MODULE_PARM_DESC(video_nr, "Video device number (-1=Auto)");
+
+int v4l_nbufs = 4;
+int v4l_bufsize = 864; /* Everybody should be able to work with this setting */
+module_param(v4l_nbufs, int, 0644);
+MODULE_PARM_DESC(v4l_nbufs, "Maximum number of V4L buffers to use");
+module_param(v4l_bufsize, int, 0644);
+MODULE_PARM_DESC(v4l_bufsize, "Maximum size per V4L buffer (in kB)");
+
+int jpg_nbufs = 32;
+int jpg_bufsize = 512; /* max size for 100% quality full-PAL frame */
+module_param(jpg_nbufs, int, 0644);
+MODULE_PARM_DESC(jpg_nbufs, "Maximum number of JPG buffers to use");
+module_param(jpg_bufsize, int, 0644);
+MODULE_PARM_DESC(jpg_bufsize, "Maximum size per JPG buffer (in kB)");
+
+/* 1=Pass through TV signal when device is not used */
+/* 0=Show color bar when device is not used (LML33: only if lml33dpath=1) */
+int pass_through;
+module_param(pass_through, int, 0644);
+MODULE_PARM_DESC(pass_through,
+ "Pass TV signal through to TV-out when idling");
+
+int zr36067_debug = 1;
+module_param_named(debug, zr36067_debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-5)");
+
+#define ZORAN_VERSION "0.10.1"
+
+MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver");
+MODULE_AUTHOR("Serguei Miridonov");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ZORAN_VERSION);
+
+#define ZR_DEVICE(subven, subdev, data) { \
+ .vendor = PCI_VENDOR_ID_ZORAN, .device = PCI_DEVICE_ID_ZORAN_36057, \
+ .subvendor = (subven), .subdevice = (subdev), .driver_data = (data) }
+
+static const struct pci_device_id zr36067_pci_tbl[] = {
+ ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC10PLUS, DC10_PLUS),
+ ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC30PLUS, DC30_PLUS),
+ ZR_DEVICE(PCI_VENDOR_ID_ELECTRONICDESIGNGMBH, PCI_DEVICE_ID_LML_33R10, LML33R10),
+ ZR_DEVICE(PCI_VENDOR_ID_IOMEGA, PCI_DEVICE_ID_IOMEGA_BUZ, BUZ),
+ ZR_DEVICE(PCI_ANY_ID, PCI_ANY_ID, NUM_CARDS),
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, zr36067_pci_tbl);
+
+static unsigned int zoran_num; /* number of cards found */
+
+/* videocodec bus functions ZR36060 */
+static u32 zr36060_read(struct videocodec *codec, u16 reg)
+{
+ struct zoran *zr = (struct zoran *)codec->master_data->data;
+ __u32 data;
+
+ if (post_office_wait(zr) || post_office_write(zr, 0, 1, reg >> 8) ||
+ post_office_write(zr, 0, 2, reg & 0xff))
+ return -1;
+
+ data = post_office_read(zr, 0, 3) & 0xff;
+ return data;
+}
+
+static void zr36060_write(struct videocodec *codec, u16 reg, u32 val)
+{
+ struct zoran *zr = (struct zoran *)codec->master_data->data;
+
+ if (post_office_wait(zr) || post_office_write(zr, 0, 1, reg >> 8) ||
+ post_office_write(zr, 0, 2, reg & 0xff))
+ return;
+
+ post_office_write(zr, 0, 3, val & 0xff);
+}
+
+/* videocodec bus functions ZR36050 */
+static u32 zr36050_read(struct videocodec *codec, u16 reg)
+{
+ struct zoran *zr = (struct zoran *)codec->master_data->data;
+ __u32 data;
+
+ if (post_office_wait(zr) || post_office_write(zr, 1, 0, reg >> 2)) // reg. HIGHBYTES
+ return -1;
+
+ data = post_office_read(zr, 0, reg & 0x03) & 0xff; // reg. LOWBYTES + read
+ return data;
+}
+
+static void zr36050_write(struct videocodec *codec, u16 reg, u32 val)
+{
+ struct zoran *zr = (struct zoran *)codec->master_data->data;
+
+ if (post_office_wait(zr) || post_office_write(zr, 1, 0, reg >> 2)) // reg. HIGHBYTES
+ return;
+
+ post_office_write(zr, 0, reg & 0x03, val & 0xff); // reg. LOWBYTES + wr. data
+}
+
+/* videocodec bus functions ZR36016 */
+static u32 zr36016_read(struct videocodec *codec, u16 reg)
+{
+ struct zoran *zr = (struct zoran *)codec->master_data->data;
+ __u32 data;
+
+ if (post_office_wait(zr))
+ return -1;
+
+ data = post_office_read(zr, 2, reg & 0x03) & 0xff; // read
+ return data;
+}
+
+/* hack for in zoran_device.c */
+void zr36016_write(struct videocodec *codec, u16 reg, u32 val)
+{
+ struct zoran *zr = (struct zoran *)codec->master_data->data;
+
+ if (post_office_wait(zr))
+ return;
+
+ post_office_write(zr, 2, reg & 0x03, val & 0x0ff); // wr. data
+}
+
+/*
+ * Board specific information
+ */
+
+static void dc10_init(struct zoran *zr)
+{
+ pci_dbg(zr->pci_dev, "%s\n", __func__);
+
+ /* Pixel clock selection */
+ GPIO(zr, 4, 0);
+ GPIO(zr, 5, 1);
+ /* Enable the video bus sync signals */
+ GPIO(zr, 7, 0);
+}
+
+static void dc10plus_init(struct zoran *zr)
+{
+ pci_dbg(zr->pci_dev, "%s\n", __func__);
+}
+
+static void buz_init(struct zoran *zr)
+{
+ pci_dbg(zr->pci_dev, "%s\n", __func__);
+
+ /* some stuff from Iomega */
+ pci_write_config_dword(zr->pci_dev, 0xfc, 0x90680f15);
+ pci_write_config_dword(zr->pci_dev, 0x0c, 0x00012020);
+ pci_write_config_dword(zr->pci_dev, 0xe8, 0xc0200000);
+}
+
+static void lml33_init(struct zoran *zr)
+{
+ pci_dbg(zr->pci_dev, "%s\n", __func__);
+
+ GPIO(zr, 2, 1); // Set Composite input/output
+}
+
+static void avs6eyes_init(struct zoran *zr)
+{
+ // AverMedia 6-Eyes original driver by Christer Weinigel
+
+ // Lifted straight from Christer's old driver and
+ // modified slightly by Martin Samuelsson.
+
+ int mux = default_mux; /* 1 = BT866, 7 = VID1 */
+
+ GPIO(zr, 4, 1); /* Bt866 SLEEP on */
+ udelay(2);
+
+ GPIO(zr, 0, 1); /* ZR36060 /RESET on */
+ GPIO(zr, 1, 0); /* ZR36060 /SLEEP on */
+ GPIO(zr, 2, mux & 1); /* MUX S0 */
+ GPIO(zr, 3, 0); /* /FRAME on */
+ GPIO(zr, 4, 0); /* Bt866 SLEEP off */
+ GPIO(zr, 5, mux & 2); /* MUX S1 */
+ GPIO(zr, 6, 0); /* ? */
+ GPIO(zr, 7, mux & 4); /* MUX S2 */
+}
+
+static const char *codecid_to_modulename(u16 codecid)
+{
+ const char *name = NULL;
+
+ switch (codecid) {
+ case CODEC_TYPE_ZR36060:
+ name = "zr36060";
+ break;
+ case CODEC_TYPE_ZR36050:
+ name = "zr36050";
+ break;
+ case CODEC_TYPE_ZR36016:
+ name = "zr36016";
+ break;
+ }
+
+ return name;
+}
+
+// struct tvnorm {
+// u16 wt, wa, h_start, h_sync_start, ht, ha, v_start;
+// };
+
+static const struct tvnorm f50sqpixel = { 944, 768, 83, 880, 625, 576, 16 };
+static const struct tvnorm f60sqpixel = { 780, 640, 51, 716, 525, 480, 12 };
+static const struct tvnorm f50ccir601 = { 864, 720, 75, 804, 625, 576, 18 };
+static const struct tvnorm f60ccir601 = { 858, 720, 57, 788, 525, 480, 16 };
+
+static const struct tvnorm f50ccir601_lml33 = { 864, 720, 75 + 34, 804, 625, 576, 18 };
+static const struct tvnorm f60ccir601_lml33 = { 858, 720, 57 + 34, 788, 525, 480, 16 };
+
+/* The DC10 (57/16/50) uses VActive as HSync, so h_start must be 0 */
+static const struct tvnorm f50sqpixel_dc10 = { 944, 768, 0, 880, 625, 576, 0 };
+static const struct tvnorm f60sqpixel_dc10 = { 780, 640, 0, 716, 525, 480, 12 };
+
+/*
+ * FIXME: I cannot swap U and V in saa7114, so i do one pixel left shift in zoran (75 -> 74)
+ * (Maxim Yevtyushkin <max@linuxmedialabs.com>)
+ */
+static const struct tvnorm f50ccir601_lm33r10 = { 864, 720, 74 + 54, 804, 625, 576, 18 };
+static const struct tvnorm f60ccir601_lm33r10 = { 858, 720, 56 + 54, 788, 525, 480, 16 };
+
+/*
+ * FIXME: The ks0127 seem incapable of swapping U and V, too, which is why I copy Maxim's left
+ * shift hack for the 6 Eyes.
+ *
+ * Christer's driver used the unshifted norms, though...
+ * /Sam
+ */
+static const struct tvnorm f50ccir601_avs6eyes = { 864, 720, 74, 804, 625, 576, 18 };
+static const struct tvnorm f60ccir601_avs6eyes = { 858, 720, 56, 788, 525, 480, 16 };
+
+static const unsigned short vpx3220_addrs[] = { 0x43, 0x47, I2C_CLIENT_END };
+static const unsigned short saa7110_addrs[] = { 0x4e, 0x4f, I2C_CLIENT_END };
+static const unsigned short saa7111_addrs[] = { 0x25, 0x24, I2C_CLIENT_END };
+static const unsigned short saa7114_addrs[] = { 0x21, 0x20, I2C_CLIENT_END };
+static const unsigned short adv717x_addrs[] = { 0x6a, 0x6b, 0x2a, 0x2b, I2C_CLIENT_END };
+static const unsigned short ks0127_addrs[] = { 0x6c, 0x6d, I2C_CLIENT_END };
+static const unsigned short saa7185_addrs[] = { 0x44, I2C_CLIENT_END };
+static const unsigned short bt819_addrs[] = { 0x45, I2C_CLIENT_END };
+static const unsigned short bt856_addrs[] = { 0x44, I2C_CLIENT_END };
+static const unsigned short bt866_addrs[] = { 0x44, I2C_CLIENT_END };
+
+static struct card_info zoran_cards[NUM_CARDS] = {
+ {
+ .type = DC10_OLD,
+ .name = "DC10(old)",
+ .i2c_decoder = "vpx3220a",
+ .addrs_decoder = vpx3220_addrs,
+ .video_codec = CODEC_TYPE_ZR36050,
+ .video_vfe = CODEC_TYPE_ZR36016,
+
+ .inputs = 3,
+ .input = {
+ { 1, "Composite" },
+ { 2, "S-Video" },
+ { 0, "Internal/comp" }
+ },
+ .norms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
+ .tvn = {
+ &f50sqpixel_dc10,
+ &f60sqpixel_dc10,
+ &f50sqpixel_dc10
+ },
+ .jpeg_int = 0,
+ .vsync_int = ZR36057_ISR_GIRQ1,
+ .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 },
+ .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 },
+ .gpcs = { -1, 0 },
+ .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .gws_not_connected = 0,
+ .input_mux = 0,
+ .init = &dc10_init,
+ }, {
+ .type = DC10_NEW,
+ .name = "DC10(new)",
+ .i2c_decoder = "saa7110",
+ .addrs_decoder = saa7110_addrs,
+ .i2c_encoder = "adv7175",
+ .addrs_encoder = adv717x_addrs,
+ .video_codec = CODEC_TYPE_ZR36060,
+
+ .inputs = 3,
+ .input = {
+ { 0, "Composite" },
+ { 7, "S-Video" },
+ { 5, "Internal/comp" }
+ },
+ .norms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
+ .tvn = {
+ &f50sqpixel,
+ &f60sqpixel,
+ &f50sqpixel},
+ .jpeg_int = ZR36057_ISR_GIRQ0,
+ .vsync_int = ZR36057_ISR_GIRQ1,
+ .gpio = { 3, 0, 6, 1, 2, -1, 4, 5 },
+ .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .gpcs = { -1, 1},
+ .vfe_pol = { 1, 1, 1, 1, 0, 0, 0, 0 },
+ .gws_not_connected = 0,
+ .input_mux = 0,
+ .init = &dc10plus_init,
+ }, {
+ .type = DC10_PLUS,
+ .name = "DC10_PLUS",
+ .i2c_decoder = "saa7110",
+ .addrs_decoder = saa7110_addrs,
+ .i2c_encoder = "adv7175",
+ .addrs_encoder = adv717x_addrs,
+ .video_codec = CODEC_TYPE_ZR36060,
+
+ .inputs = 3,
+ .input = {
+ { 0, "Composite" },
+ { 7, "S-Video" },
+ { 5, "Internal/comp" }
+ },
+ .norms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
+ .tvn = {
+ &f50sqpixel,
+ &f60sqpixel,
+ &f50sqpixel
+ },
+ .jpeg_int = ZR36057_ISR_GIRQ0,
+ .vsync_int = ZR36057_ISR_GIRQ1,
+ .gpio = { 3, 0, 6, 1, 2, -1, 4, 5 },
+ .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .gpcs = { -1, 1 },
+ .vfe_pol = { 1, 1, 1, 1, 0, 0, 0, 0 },
+ .gws_not_connected = 0,
+ .input_mux = 0,
+ .init = &dc10plus_init,
+ }, {
+ .type = DC30,
+ .name = "DC30",
+ .i2c_decoder = "vpx3220a",
+ .addrs_decoder = vpx3220_addrs,
+ .i2c_encoder = "adv7175",
+ .addrs_encoder = adv717x_addrs,
+ .video_codec = CODEC_TYPE_ZR36050,
+ .video_vfe = CODEC_TYPE_ZR36016,
+
+ .inputs = 3,
+ .input = {
+ { 1, "Composite" },
+ { 2, "S-Video" },
+ { 0, "Internal/comp" }
+ },
+ .norms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
+ .tvn = {
+ &f50sqpixel_dc10,
+ &f60sqpixel_dc10,
+ &f50sqpixel_dc10
+ },
+ .jpeg_int = 0,
+ .vsync_int = ZR36057_ISR_GIRQ1,
+ .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 },
+ .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 },
+ .gpcs = { -1, 0 },
+ .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .gws_not_connected = 0,
+ .input_mux = 0,
+ .init = &dc10_init,
+ }, {
+ .type = DC30_PLUS,
+ .name = "DC30_PLUS",
+ .i2c_decoder = "vpx3220a",
+ .addrs_decoder = vpx3220_addrs,
+ .i2c_encoder = "adv7175",
+ .addrs_encoder = adv717x_addrs,
+ .video_codec = CODEC_TYPE_ZR36050,
+ .video_vfe = CODEC_TYPE_ZR36016,
+
+ .inputs = 3,
+ .input = {
+ { 1, "Composite" },
+ { 2, "S-Video" },
+ { 0, "Internal/comp" }
+ },
+ .norms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
+ .tvn = {
+ &f50sqpixel_dc10,
+ &f60sqpixel_dc10,
+ &f50sqpixel_dc10
+ },
+ .jpeg_int = 0,
+ .vsync_int = ZR36057_ISR_GIRQ1,
+ .gpio = { 2, 1, -1, 3, 7, 0, 4, 5 },
+ .gpio_pol = { 0, 0, 0, 1, 0, 0, 0, 0 },
+ .gpcs = { -1, 0 },
+ .vfe_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .gws_not_connected = 0,
+ .input_mux = 0,
+ .init = &dc10_init,
+ }, {
+ .type = LML33,
+ .name = "LML33",
+ .i2c_decoder = "bt819a",
+ .addrs_decoder = bt819_addrs,
+ .i2c_encoder = "bt856",
+ .addrs_encoder = bt856_addrs,
+ .video_codec = CODEC_TYPE_ZR36060,
+
+ .inputs = 2,
+ .input = {
+ { 0, "Composite" },
+ { 7, "S-Video" }
+ },
+ .norms = V4L2_STD_NTSC | V4L2_STD_PAL,
+ .tvn = {
+ &f50ccir601_lml33,
+ &f60ccir601_lml33,
+ NULL
+ },
+ .jpeg_int = ZR36057_ISR_GIRQ1,
+ .vsync_int = ZR36057_ISR_GIRQ0,
+ .gpio = { 1, -1, 3, 5, 7, -1, -1, -1 },
+ .gpio_pol = { 0, 0, 0, 0, 1, 0, 0, 0 },
+ .gpcs = { 3, 1 },
+ .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 },
+ .gws_not_connected = 1,
+ .input_mux = 0,
+ .init = &lml33_init,
+ }, {
+ .type = LML33R10,
+ .name = "LML33R10",
+ .i2c_decoder = "saa7114",
+ .addrs_decoder = saa7114_addrs,
+ .i2c_encoder = "adv7170",
+ .addrs_encoder = adv717x_addrs,
+ .video_codec = CODEC_TYPE_ZR36060,
+
+ .inputs = 2,
+ .input = {
+ { 0, "Composite" },
+ { 7, "S-Video" }
+ },
+ .norms = V4L2_STD_NTSC | V4L2_STD_PAL,
+ .tvn = {
+ &f50ccir601_lm33r10,
+ &f60ccir601_lm33r10,
+ NULL
+ },
+ .jpeg_int = ZR36057_ISR_GIRQ1,
+ .vsync_int = ZR36057_ISR_GIRQ0,
+ .gpio = { 1, -1, 3, 5, 7, -1, -1, -1 },
+ .gpio_pol = { 0, 0, 0, 0, 1, 0, 0, 0 },
+ .gpcs = { 3, 1 },
+ .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 },
+ .gws_not_connected = 1,
+ .input_mux = 0,
+ .init = &lml33_init,
+ }, {
+ .type = BUZ,
+ .name = "Buz",
+ .i2c_decoder = "saa7111",
+ .addrs_decoder = saa7111_addrs,
+ .i2c_encoder = "saa7185",
+ .addrs_encoder = saa7185_addrs,
+ .video_codec = CODEC_TYPE_ZR36060,
+
+ .inputs = 2,
+ .input = {
+ { 3, "Composite" },
+ { 7, "S-Video" }
+ },
+ .norms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
+ .tvn = {
+ &f50ccir601,
+ &f60ccir601,
+ &f50ccir601
+ },
+ .jpeg_int = ZR36057_ISR_GIRQ1,
+ .vsync_int = ZR36057_ISR_GIRQ0,
+ .gpio = { 1, -1, 3, -1, -1, -1, -1, -1 },
+ .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .gpcs = { 3, 1 },
+ .vfe_pol = { 1, 1, 0, 0, 0, 1, 0, 0 },
+ .gws_not_connected = 1,
+ .input_mux = 0,
+ .init = &buz_init,
+ }, {
+ .type = AVS6EYES,
+ .name = "6-Eyes",
+/* AverMedia chose not to brand the 6-Eyes. Thus it can't be autodetected, and requires card=x. */
+ .i2c_decoder = "ks0127",
+ .addrs_decoder = ks0127_addrs,
+ .i2c_encoder = "bt866",
+ .addrs_encoder = bt866_addrs,
+ .video_codec = CODEC_TYPE_ZR36060,
+
+ .inputs = 10,
+ .input = {
+ { 0, "Composite 1" },
+ { 1, "Composite 2" },
+ { 2, "Composite 3" },
+ { 4, "Composite 4" },
+ { 5, "Composite 5" },
+ { 6, "Composite 6" },
+ { 8, "S-Video 1" },
+ { 9, "S-Video 2" },
+ {10, "S-Video 3" },
+ {15, "YCbCr" }
+ },
+ .norms = V4L2_STD_NTSC | V4L2_STD_PAL,
+ .tvn = {
+ &f50ccir601_avs6eyes,
+ &f60ccir601_avs6eyes,
+ NULL
+ },
+ .jpeg_int = ZR36057_ISR_GIRQ1,
+ .vsync_int = ZR36057_ISR_GIRQ0,
+ .gpio = { 1, 0, 3, -1, -1, -1, -1, -1 },// Validity unknown /Sam
+ .gpio_pol = { 0, 0, 0, 0, 0, 0, 0, 0 }, // Validity unknown /Sam
+ .gpcs = { 3, 1 }, // Validity unknown /Sam
+ .vfe_pol = { 1, 0, 0, 0, 0, 1, 0, 0 }, // Validity unknown /Sam
+ .gws_not_connected = 1,
+ .input_mux = 1,
+ .init = &avs6eyes_init,
+ }
+
+};
+
+/*
+ * I2C functions
+ */
+/* software I2C functions */
+static int zoran_i2c_getsda(void *data)
+{
+ struct zoran *zr = (struct zoran *)data;
+
+ return (btread(ZR36057_I2CBR) >> 1) & 1;
+}
+
+static int zoran_i2c_getscl(void *data)
+{
+ struct zoran *zr = (struct zoran *)data;
+
+ return btread(ZR36057_I2CBR) & 1;
+}
+
+static void zoran_i2c_setsda(void *data, int state)
+{
+ struct zoran *zr = (struct zoran *)data;
+
+ if (state)
+ zr->i2cbr |= 2;
+ else
+ zr->i2cbr &= ~2;
+ btwrite(zr->i2cbr, ZR36057_I2CBR);
+}
+
+static void zoran_i2c_setscl(void *data, int state)
+{
+ struct zoran *zr = (struct zoran *)data;
+
+ if (state)
+ zr->i2cbr |= 1;
+ else
+ zr->i2cbr &= ~1;
+ btwrite(zr->i2cbr, ZR36057_I2CBR);
+}
+
+static const struct i2c_algo_bit_data zoran_i2c_bit_data_template = {
+ .setsda = zoran_i2c_setsda,
+ .setscl = zoran_i2c_setscl,
+ .getsda = zoran_i2c_getsda,
+ .getscl = zoran_i2c_getscl,
+ .udelay = 10,
+ .timeout = 100,
+};
+
+static int zoran_register_i2c(struct zoran *zr)
+{
+ zr->i2c_algo = zoran_i2c_bit_data_template;
+ zr->i2c_algo.data = zr;
+ strscpy(zr->i2c_adapter.name, ZR_DEVNAME(zr),
+ sizeof(zr->i2c_adapter.name));
+ i2c_set_adapdata(&zr->i2c_adapter, &zr->v4l2_dev);
+ zr->i2c_adapter.algo_data = &zr->i2c_algo;
+ zr->i2c_adapter.dev.parent = &zr->pci_dev->dev;
+ return i2c_bit_add_bus(&zr->i2c_adapter);
+}
+
+static void zoran_unregister_i2c(struct zoran *zr)
+{
+ i2c_del_adapter(&zr->i2c_adapter);
+}
+
+/* Check a zoran_params struct for correctness, insert default params */
+int zoran_check_jpg_settings(struct zoran *zr,
+ struct zoran_jpg_settings *settings, int try)
+{
+ int err = 0, err0 = 0;
+
+ pci_dbg(zr->pci_dev, "%s - dec: %d, Hdcm: %d, Vdcm: %d, Tdcm: %d\n",
+ __func__, settings->decimation, settings->hor_dcm,
+ settings->ver_dcm, settings->tmp_dcm);
+ pci_dbg(zr->pci_dev, "%s - x: %d, y: %d, w: %d, y: %d\n", __func__,
+ settings->img_x, settings->img_y,
+ settings->img_width, settings->img_height);
+ /* Check decimation, set default values for decimation = 1, 2, 4 */
+ switch (settings->decimation) {
+ case 1:
+
+ settings->hor_dcm = 1;
+ settings->ver_dcm = 1;
+ settings->tmp_dcm = 1;
+ settings->field_per_buff = 2;
+ settings->img_x = 0;
+ settings->img_y = 0;
+ settings->img_width = BUZ_MAX_WIDTH;
+ settings->img_height = BUZ_MAX_HEIGHT / 2;
+ break;
+ case 2:
+
+ settings->hor_dcm = 2;
+ settings->ver_dcm = 1;
+ settings->tmp_dcm = 2;
+ settings->field_per_buff = 1;
+ settings->img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0;
+ settings->img_y = 0;
+ settings->img_width =
+ (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH;
+ settings->img_height = BUZ_MAX_HEIGHT / 2;
+ break;
+ case 4:
+
+ if (zr->card.type == DC10_NEW) {
+ pci_dbg(zr->pci_dev, "%s - HDec by 4 is not supported on the DC10\n", __func__);
+ err0++;
+ break;
+ }
+
+ settings->hor_dcm = 4;
+ settings->ver_dcm = 2;
+ settings->tmp_dcm = 2;
+ settings->field_per_buff = 1;
+ settings->img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0;
+ settings->img_y = 0;
+ settings->img_width =
+ (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH;
+ settings->img_height = BUZ_MAX_HEIGHT / 2;
+ break;
+ case 0:
+
+ /* We have to check the data the user has set */
+
+ if (settings->hor_dcm != 1 && settings->hor_dcm != 2 &&
+ (zr->card.type == DC10_NEW || settings->hor_dcm != 4)) {
+ settings->hor_dcm = clamp(settings->hor_dcm, 1, 2);
+ err0++;
+ }
+ if (settings->ver_dcm != 1 && settings->ver_dcm != 2) {
+ settings->ver_dcm = clamp(settings->ver_dcm, 1, 2);
+ err0++;
+ }
+ if (settings->tmp_dcm != 1 && settings->tmp_dcm != 2) {
+ settings->tmp_dcm = clamp(settings->tmp_dcm, 1, 2);
+ err0++;
+ }
+ if (settings->field_per_buff != 1 &&
+ settings->field_per_buff != 2) {
+ settings->field_per_buff = clamp(settings->field_per_buff, 1, 2);
+ err0++;
+ }
+ if (settings->img_x < 0) {
+ settings->img_x = 0;
+ err0++;
+ }
+ if (settings->img_y < 0) {
+ settings->img_y = 0;
+ err0++;
+ }
+ if (settings->img_width < 0 || settings->img_width > BUZ_MAX_WIDTH) {
+ settings->img_width = clamp(settings->img_width, 0, (int)BUZ_MAX_WIDTH);
+ err0++;
+ }
+ if (settings->img_height < 0 || settings->img_height > BUZ_MAX_HEIGHT / 2) {
+ settings->img_height = clamp(settings->img_height, 0, BUZ_MAX_HEIGHT / 2);
+ err0++;
+ }
+ if (settings->img_x + settings->img_width > BUZ_MAX_WIDTH) {
+ settings->img_x = BUZ_MAX_WIDTH - settings->img_width;
+ err0++;
+ }
+ if (settings->img_y + settings->img_height > BUZ_MAX_HEIGHT / 2) {
+ settings->img_y = BUZ_MAX_HEIGHT / 2 - settings->img_height;
+ err0++;
+ }
+ if (settings->img_width % (16 * settings->hor_dcm) != 0) {
+ settings->img_width -= settings->img_width % (16 * settings->hor_dcm);
+ if (settings->img_width == 0)
+ settings->img_width = 16 * settings->hor_dcm;
+ err0++;
+ }
+ if (settings->img_height % (8 * settings->ver_dcm) != 0) {
+ settings->img_height -= settings->img_height % (8 * settings->ver_dcm);
+ if (settings->img_height == 0)
+ settings->img_height = 8 * settings->ver_dcm;
+ err0++;
+ }
+
+ if (!try && err0) {
+ pci_err(zr->pci_dev, "%s - error in params for decimation = 0\n", __func__);
+ err++;
+ }
+ break;
+ default:
+ pci_err(zr->pci_dev, "%s - decimation = %d, must be 0, 1, 2 or 4\n",
+ __func__, settings->decimation);
+ err++;
+ break;
+ }
+
+ if (settings->jpg_comp.quality > 100)
+ settings->jpg_comp.quality = 100;
+ if (settings->jpg_comp.quality < 5)
+ settings->jpg_comp.quality = 5;
+ if (settings->jpg_comp.APPn < 0)
+ settings->jpg_comp.APPn = 0;
+ if (settings->jpg_comp.APPn > 15)
+ settings->jpg_comp.APPn = 15;
+ if (settings->jpg_comp.APP_len < 0)
+ settings->jpg_comp.APP_len = 0;
+ if (settings->jpg_comp.APP_len > 60)
+ settings->jpg_comp.APP_len = 60;
+ if (settings->jpg_comp.COM_len < 0)
+ settings->jpg_comp.COM_len = 0;
+ if (settings->jpg_comp.COM_len > 60)
+ settings->jpg_comp.COM_len = 60;
+ if (err)
+ return -EINVAL;
+ return 0;
+}
+
+void zoran_open_init_params(struct zoran *zr)
+{
+ int i;
+
+ zr->v4l_settings.width = 192;
+ zr->v4l_settings.height = 144;
+ zr->v4l_settings.format = &zoran_formats[7]; /* YUY2 - YUV-4:2:2 packed */
+ zr->v4l_settings.bytesperline = zr->v4l_settings.width *
+ ((zr->v4l_settings.format->depth + 7) / 8);
+
+ /* Set necessary params and call zoran_check_jpg_settings to set the defaults */
+ zr->jpg_settings.decimation = 1;
+ zr->jpg_settings.jpg_comp.quality = 50; /* default compression factor 8 */
+ if (zr->card.type != BUZ)
+ zr->jpg_settings.odd_even = 1;
+ else
+ zr->jpg_settings.odd_even = 0;
+ zr->jpg_settings.jpg_comp.APPn = 0;
+ zr->jpg_settings.jpg_comp.APP_len = 0; /* No APPn marker */
+ memset(zr->jpg_settings.jpg_comp.APP_data, 0,
+ sizeof(zr->jpg_settings.jpg_comp.APP_data));
+ zr->jpg_settings.jpg_comp.COM_len = 0; /* No COM marker */
+ memset(zr->jpg_settings.jpg_comp.COM_data, 0,
+ sizeof(zr->jpg_settings.jpg_comp.COM_data));
+ zr->jpg_settings.jpg_comp.jpeg_markers =
+ V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT;
+ i = zoran_check_jpg_settings(zr, &zr->jpg_settings, 0);
+ if (i)
+ pci_err(zr->pci_dev, "%s internal error\n", __func__);
+
+ zr->buffer_size = zr->v4l_settings.bytesperline * zr->v4l_settings.height;
+
+ clear_interrupt_counters(zr);
+}
+
+static int zr36057_init(struct zoran *zr)
+{
+ int j, err;
+
+ pci_info(zr->pci_dev, "initializing card[%d]\n", zr->id);
+
+ /* Avoid nonsense settings from user for default input/norm */
+ if (default_norm < 0 || default_norm > 2)
+ default_norm = 0;
+ if (default_norm == 0) {
+ zr->norm = V4L2_STD_PAL;
+ zr->timing = zr->card.tvn[ZR_NORM_PAL];
+ } else if (default_norm == 1) {
+ zr->norm = V4L2_STD_NTSC;
+ zr->timing = zr->card.tvn[ZR_NORM_NTSC];
+ } else {
+ zr->norm = V4L2_STD_SECAM;
+ zr->timing = zr->card.tvn[ZR_NORM_SECAM];
+ }
+ if (!zr->timing) {
+ pci_warn(zr->pci_dev, "%s - default TV standard not supported by hardware. PAL will be used.\n", __func__);
+ zr->norm = V4L2_STD_PAL;
+ zr->timing = zr->card.tvn[ZR_NORM_PAL];
+ }
+
+ if (default_input > zr->card.inputs - 1) {
+ pci_warn(zr->pci_dev, "default_input value %d out of range (0-%d)\n",
+ default_input, zr->card.inputs - 1);
+ default_input = 0;
+ }
+ zr->input = default_input;
+
+ /* default setup (will be repeated at every open) */
+ zoran_open_init_params(zr);
+
+ /* allocate memory *before* doing anything to the hardware in case allocation fails */
+ zr->video_dev = video_device_alloc();
+ if (!zr->video_dev) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ zr->stat_com = dma_alloc_coherent(&zr->pci_dev->dev,
+ BUZ_NUM_STAT_COM * sizeof(u32),
+ &zr->p_sc, GFP_KERNEL);
+ if (!zr->stat_com) {
+ err = -ENOMEM;
+ goto exit_video;
+ }
+ for (j = 0; j < BUZ_NUM_STAT_COM; j++)
+ zr->stat_com[j] = cpu_to_le32(1); /* mark as unavailable to zr36057 */
+
+ zr->stat_comb = dma_alloc_coherent(&zr->pci_dev->dev,
+ BUZ_NUM_STAT_COM * sizeof(u32) * 2,
+ &zr->p_scb, GFP_KERNEL);
+ if (!zr->stat_comb) {
+ err = -ENOMEM;
+ goto exit_statcom;
+ }
+
+ /* Now add the template and register the device unit. */
+ *zr->video_dev = zoran_template;
+ zr->video_dev->v4l2_dev = &zr->v4l2_dev;
+ zr->video_dev->lock = &zr->lock;
+ zr->video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+
+ strscpy(zr->video_dev->name, ZR_DEVNAME(zr), sizeof(zr->video_dev->name));
+ /*
+ * It's not a mem2mem device, but you can both capture and output from one and the same
+ * device. This should really be split up into two device nodes, but that's a job for
+ * another day.
+ */
+ zr->video_dev->vfl_dir = VFL_DIR_M2M;
+
+ zoran_queue_init(zr, &zr->vq);
+
+ err = video_register_device(zr->video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]);
+ if (err < 0)
+ goto exit_statcomb;
+ video_set_drvdata(zr->video_dev, zr);
+
+ zoran_init_hardware(zr);
+ if (!pass_through) {
+ decoder_call(zr, video, s_stream, 0);
+ encoder_call(zr, video, s_routing, 2, 0, 0);
+ }
+
+ zr->initialized = 1;
+ return 0;
+
+exit_statcomb:
+ dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb);
+exit_statcom:
+ dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), zr->stat_com, zr->p_sc);
+exit_video:
+ kfree(zr->video_dev);
+exit:
+ return err;
+}
+
+static void zoran_remove(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct zoran *zr = to_zoran(v4l2_dev);
+
+ if (!zr->initialized)
+ goto exit_free;
+
+ zoran_queue_exit(zr);
+
+ /* unregister videocodec bus */
+ if (zr->codec)
+ videocodec_detach(zr->codec);
+ if (zr->vfe)
+ videocodec_detach(zr->vfe);
+
+ /* unregister i2c bus */
+ zoran_unregister_i2c(zr);
+ /* disable PCI bus-mastering */
+ zoran_set_pci_master(zr, 0);
+ /* put chip into reset */
+ btwrite(0, ZR36057_SPGPPCR);
+ pci_free_irq(zr->pci_dev, 0, zr);
+ /* unmap and free memory */
+ dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), zr->stat_com, zr->p_sc);
+ dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb);
+ pci_release_regions(pdev);
+ pci_disable_device(zr->pci_dev);
+ video_unregister_device(zr->video_dev);
+exit_free:
+ v4l2_ctrl_handler_free(&zr->hdl);
+ v4l2_device_unregister(&zr->v4l2_dev);
+}
+
+void zoran_vdev_release(struct video_device *vdev)
+{
+ kfree(vdev);
+}
+
+static struct videocodec_master *zoran_setup_videocodec(struct zoran *zr,
+ int type)
+{
+ struct videocodec_master *m = NULL;
+
+ m = devm_kmalloc(&zr->pci_dev->dev, sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return m;
+
+ /*
+ * magic and type are unused for master struct. Makes sense only at codec structs.
+ * In the past, .type were initialized to the old V4L1 .hardware value,
+ * as VID_HARDWARE_ZR36067
+ */
+ m->magic = 0L;
+ m->type = 0;
+
+ m->flags = CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER;
+ strscpy(m->name, ZR_DEVNAME(zr), sizeof(m->name));
+ m->data = zr;
+
+ switch (type) {
+ case CODEC_TYPE_ZR36060:
+ m->readreg = zr36060_read;
+ m->writereg = zr36060_write;
+ m->flags |= CODEC_FLAG_JPEG | CODEC_FLAG_VFE;
+ break;
+ case CODEC_TYPE_ZR36050:
+ m->readreg = zr36050_read;
+ m->writereg = zr36050_write;
+ m->flags |= CODEC_FLAG_JPEG;
+ break;
+ case CODEC_TYPE_ZR36016:
+ m->readreg = zr36016_read;
+ m->writereg = zr36016_write;
+ m->flags |= CODEC_FLAG_VFE;
+ break;
+ }
+
+ return m;
+}
+
+static void zoran_subdev_notify(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct zoran *zr = to_zoran(sd->v4l2_dev);
+
+ /*
+ * Bt819 needs to reset its FIFO buffer using #FRST pin and
+ * LML33 card uses GPIO(7) for that.
+ */
+ if (cmd == BT819_FIFO_RESET_LOW)
+ GPIO(zr, 7, 0);
+ else if (cmd == BT819_FIFO_RESET_HIGH)
+ GPIO(zr, 7, 1);
+}
+
+static int zoran_video_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct zoran *zr = container_of(ctrl->handler, struct zoran, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ zr->jpg_settings.jpg_comp.quality = ctrl->val;
+ return zoran_check_jpg_settings(zr, &zr->jpg_settings, 0);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops zoran_video_ctrl_ops = {
+ .s_ctrl = zoran_video_set_ctrl,
+};
+
+/*
+ * Scan for a Buz card (actually for the PCI controller ZR36057),
+ * request the irq and map the io memory
+ */
+static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned char latency, need_latency;
+ struct zoran *zr;
+ int result;
+ struct videocodec_master *master_vfe = NULL;
+ struct videocodec_master *master_codec = NULL;
+ int card_num;
+ const char *codec_name, *vfe_name;
+ unsigned int nr;
+ int err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ return -ENODEV;
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+ nr = zoran_num++;
+ if (nr >= BUZ_MAX) {
+ pci_err(pdev, "driver limited to %d card(s) maximum\n", BUZ_MAX);
+ return -ENOENT;
+ }
+
+ zr = devm_kzalloc(&pdev->dev, sizeof(*zr), GFP_KERNEL);
+ if (!zr)
+ return -ENOMEM;
+
+ zr->v4l2_dev.notify = zoran_subdev_notify;
+ if (v4l2_device_register(&pdev->dev, &zr->v4l2_dev))
+ goto zr_free_mem;
+ zr->pci_dev = pdev;
+ zr->id = nr;
+ snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id);
+ if (v4l2_ctrl_handler_init(&zr->hdl, 10))
+ goto zr_unreg;
+ zr->v4l2_dev.ctrl_handler = &zr->hdl;
+ v4l2_ctrl_new_std(&zr->hdl, &zoran_video_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 0,
+ 100, 1, 50);
+ spin_lock_init(&zr->spinlock);
+ mutex_init(&zr->lock);
+ if (pci_enable_device(pdev))
+ goto zr_unreg;
+ zr->revision = zr->pci_dev->revision;
+
+ pci_info(zr->pci_dev, "Zoran ZR360%c7 (rev %d), irq: %d, memory: 0x%08llx\n",
+ zr->revision < 2 ? '5' : '6', zr->revision,
+ zr->pci_dev->irq, (uint64_t)pci_resource_start(zr->pci_dev, 0));
+ if (zr->revision >= 2)
+ pci_info(zr->pci_dev, "Subsystem vendor=0x%04x id=0x%04x\n",
+ zr->pci_dev->subsystem_vendor, zr->pci_dev->subsystem_device);
+
+ /* Use auto-detected card type? */
+ if (card[nr] == -1) {
+ if (zr->revision < 2) {
+ pci_err(pdev, "No card type specified, please use the card=X module parameter\n");
+ pci_err(pdev, "It is not possible to auto-detect ZR36057 based cards\n");
+ goto zr_unreg;
+ }
+
+ card_num = ent->driver_data;
+ if (card_num >= NUM_CARDS) {
+ pci_err(pdev, "Unknown card, try specifying card=X module parameter\n");
+ goto zr_unreg;
+ }
+ pci_info(zr->pci_dev, "%s() - card %s detected\n", __func__, zoran_cards[card_num].name);
+ } else {
+ card_num = card[nr];
+ if (card_num >= NUM_CARDS || card_num < 0) {
+ pci_err(pdev, "User specified card type %d out of range (0 .. %d)\n",
+ card_num, NUM_CARDS - 1);
+ goto zr_unreg;
+ }
+ }
+
+ /*
+ * even though we make this a non pointer and thus
+ * theoretically allow for making changes to this struct
+ * on a per-individual card basis at runtime, this is
+ * strongly discouraged. This structure is intended to
+ * keep general card information, no settings or anything
+ */
+ zr->card = zoran_cards[card_num];
+ snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "%s[%u]",
+ zr->card.name, zr->id);
+
+ err = pci_request_regions(pdev, ZR_DEVNAME(zr));
+ if (err)
+ goto zr_unreg;
+
+ zr->zr36057_mem = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+ if (!zr->zr36057_mem) {
+ pci_err(pdev, "%s() - ioremap failed\n", __func__);
+ goto zr_pci_release;
+ }
+
+ result = pci_request_irq(pdev, 0, zoran_irq, NULL, zr, ZR_DEVNAME(zr));
+ if (result < 0) {
+ if (result == -EINVAL) {
+ pci_err(pdev, "%s - bad IRQ number or handler\n", __func__);
+ } else if (result == -EBUSY) {
+ pci_err(pdev, "%s - IRQ %d busy, change your PnP config in BIOS\n",
+ __func__, zr->pci_dev->irq);
+ } else {
+ pci_err(pdev, "%s - cannot assign IRQ, error code %d\n", __func__, result);
+ }
+ goto zr_pci_release;
+ }
+
+ /* set PCI latency timer */
+ pci_read_config_byte(zr->pci_dev, PCI_LATENCY_TIMER,
+ &latency);
+ need_latency = zr->revision > 1 ? 32 : 48;
+ if (latency != need_latency) {
+ pci_info(zr->pci_dev, "Changing PCI latency from %d to %d\n", latency, need_latency);
+ pci_write_config_byte(zr->pci_dev, PCI_LATENCY_TIMER, need_latency);
+ }
+
+ zr36057_restart(zr);
+ /* i2c */
+ pci_info(zr->pci_dev, "Initializing i2c bus...\n");
+
+ if (zoran_register_i2c(zr) < 0) {
+ pci_err(pdev, "%s - can't initialize i2c bus\n", __func__);
+ goto zr_free_irq;
+ }
+
+ zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, &zr->i2c_adapter,
+ zr->card.i2c_decoder, 0,
+ zr->card.addrs_decoder);
+
+ if (zr->card.i2c_encoder)
+ zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, &zr->i2c_adapter,
+ zr->card.i2c_encoder, 0,
+ zr->card.addrs_encoder);
+
+ pci_info(zr->pci_dev, "Initializing videocodec bus...\n");
+
+ if (zr->card.video_codec) {
+ codec_name = codecid_to_modulename(zr->card.video_codec);
+ if (codec_name) {
+ result = request_module(codec_name);
+ if (result)
+ pci_err(pdev, "failed to load modules %s: %d\n", codec_name, result);
+ }
+ }
+ if (zr->card.video_vfe) {
+ vfe_name = codecid_to_modulename(zr->card.video_vfe);
+ if (vfe_name) {
+ result = request_module(vfe_name);
+ if (result < 0)
+ pci_err(pdev, "failed to load modules %s: %d\n", vfe_name, result);
+ }
+ }
+
+ /* reset JPEG codec */
+ jpeg_codec_sleep(zr, 1);
+ jpeg_codec_reset(zr);
+ /* video bus enabled */
+ /* display codec revision */
+ if (zr->card.video_codec != 0) {
+ master_codec = zoran_setup_videocodec(zr, zr->card.video_codec);
+ if (!master_codec)
+ goto zr_unreg_i2c;
+ zr->codec = videocodec_attach(master_codec);
+ if (!zr->codec) {
+ pci_err(pdev, "%s - no codec found\n", __func__);
+ goto zr_unreg_i2c;
+ }
+ if (zr->codec->type != zr->card.video_codec) {
+ pci_err(pdev, "%s - wrong codec\n", __func__);
+ goto zr_detach_codec;
+ }
+ }
+ if (zr->card.video_vfe != 0) {
+ master_vfe = zoran_setup_videocodec(zr, zr->card.video_vfe);
+ if (!master_vfe)
+ goto zr_detach_codec;
+ zr->vfe = videocodec_attach(master_vfe);
+ if (!zr->vfe) {
+ pci_err(pdev, "%s - no VFE found\n", __func__);
+ goto zr_detach_codec;
+ }
+ if (zr->vfe->type != zr->card.video_vfe) {
+ pci_err(pdev, "%s = wrong VFE\n", __func__);
+ goto zr_detach_vfe;
+ }
+ }
+
+ /* take care of Natoma chipset and a revision 1 zr36057 */
+ if ((pci_pci_problems & PCIPCI_NATOMA) && zr->revision <= 1)
+ pci_info(zr->pci_dev, "ZR36057/Natoma bug, max. buffer size is 128K\n");
+
+ if (zr36057_init(zr) < 0)
+ goto zr_detach_vfe;
+
+ zr->map_mode = ZORAN_MAP_MODE_RAW;
+
+ return 0;
+
+zr_detach_vfe:
+ videocodec_detach(zr->vfe);
+zr_detach_codec:
+ videocodec_detach(zr->codec);
+zr_unreg_i2c:
+ zoran_unregister_i2c(zr);
+zr_free_irq:
+ btwrite(0, ZR36057_SPGPPCR);
+ pci_free_irq(zr->pci_dev, 0, zr);
+zr_pci_release:
+ pci_release_regions(pdev);
+zr_unreg:
+ v4l2_ctrl_handler_free(&zr->hdl);
+ v4l2_device_unregister(&zr->v4l2_dev);
+zr_free_mem:
+
+ return -ENODEV;
+}
+
+static struct pci_driver zoran_driver = {
+ .name = "zr36067",
+ .id_table = zr36067_pci_tbl,
+ .probe = zoran_probe,
+ .remove = zoran_remove,
+};
+
+static int __init zoran_init(void)
+{
+ int res;
+
+ pr_info("Zoran MJPEG board driver version %s\n", ZORAN_VERSION);
+
+ /* check the parameters we have been given, adjust if necessary */
+ if (v4l_nbufs < 2)
+ v4l_nbufs = 2;
+ if (v4l_nbufs > VIDEO_MAX_FRAME)
+ v4l_nbufs = VIDEO_MAX_FRAME;
+ /* The user specifies the in KB, we want them in byte (and page aligned) */
+ v4l_bufsize = PAGE_ALIGN(v4l_bufsize * 1024);
+ if (v4l_bufsize < 32768)
+ v4l_bufsize = 32768;
+ /* 2 MB is arbitrary but sufficient for the maximum possible images */
+ if (v4l_bufsize > 2048 * 1024)
+ v4l_bufsize = 2048 * 1024;
+ if (jpg_nbufs < 4)
+ jpg_nbufs = 4;
+ if (jpg_nbufs > BUZ_MAX_FRAME)
+ jpg_nbufs = BUZ_MAX_FRAME;
+ jpg_bufsize = PAGE_ALIGN(jpg_bufsize * 1024);
+ if (jpg_bufsize < 8192)
+ jpg_bufsize = 8192;
+ if (jpg_bufsize > (512 * 1024))
+ jpg_bufsize = 512 * 1024;
+ /* Use parameter for vidmem or try to find a video card */
+ if (vidmem)
+ pr_info("%s: Using supplied video memory base address @ 0x%lx\n", ZORAN_NAME, vidmem);
+
+ /* some mainboards might not do PCI-PCI data transfer well */
+ if (pci_pci_problems & (PCIPCI_FAIL | PCIAGP_FAIL | PCIPCI_ALIMAGIK))
+ pr_warn("%s: chipset does not support reliable PCI-PCI DMA\n", ZORAN_NAME);
+
+ res = pci_register_driver(&zoran_driver);
+ if (res) {
+ pr_err("Unable to register ZR36057 driver\n");
+ return res;
+ }
+
+ return 0;
+}
+
+static void __exit zoran_exit(void)
+{
+ pci_unregister_driver(&zoran_driver);
+}
+
+module_init(zoran_init);
+module_exit(zoran_exit);
diff --git a/drivers/staging/media/zoran/zoran_card.h b/drivers/staging/media/zoran/zoran_card.h
new file mode 100644
index 000000000000..8e0d634cb30f
--- /dev/null
+++ b/drivers/staging/media/zoran/zoran_card.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Zoran zr36057/zr36067 PCI controller driver, for the
+ * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
+ * Media Labs LML33/LML33R10.
+ *
+ * This part handles card-specific data and detection
+ *
+ * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
+ */
+
+#ifndef __ZORAN_CARD_H__
+#define __ZORAN_CARD_H__
+
+extern int zr36067_debug;
+
+/* Anybody who uses more than four? */
+#define BUZ_MAX 4
+
+extern const struct video_device zoran_template;
+
+extern int zoran_check_jpg_settings(struct zoran *zr,
+ struct zoran_jpg_settings *settings,
+ int try);
+extern void zoran_open_init_params(struct zoran *zr);
+extern void zoran_vdev_release(struct video_device *vdev);
+
+void zr36016_write(struct videocodec *codec, u16 reg, u32 val);
+
+#endif /* __ZORAN_CARD_H__ */
diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/staging/media/zoran/zoran_device.c
new file mode 100644
index 000000000000..e569a1341d01
--- /dev/null
+++ b/drivers/staging/media/zoran/zoran_device.c
@@ -0,0 +1,1013 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Zoran zr36057/zr36067 PCI controller driver, for the
+ * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
+ * Media Labs LML33/LML33R10.
+ *
+ * This part handles device access (PCI/I2C/codec/...)
+ *
+ * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/spinlock.h>
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/io.h>
+
+#include "videocodec.h"
+#include "zoran.h"
+#include "zoran_device.h"
+#include "zoran_card.h"
+
+#define IRQ_MASK (ZR36057_ISR_GIRQ0 | \
+ ZR36057_ISR_GIRQ1 | \
+ ZR36057_ISR_JPEG_REP_IRQ)
+
+static bool lml33dpath; /* default = 0
+ * 1 will use digital path in capture
+ * mode instead of analog. It can be
+ * used for picture adjustments using
+ * tool like xawtv while watching image
+ * on TV monitor connected to the output.
+ * However, due to absence of 75 Ohm
+ * load on Bt819 input, there will be
+ * some image imperfections
+ */
+
+module_param(lml33dpath, bool, 0644);
+MODULE_PARM_DESC(lml33dpath, "Use digital path capture mode (on LML33 cards)");
+
+int zr_set_buf(struct zoran *zr);
+/*
+ * initialize video front end
+ */
+static void zr36057_init_vfe(struct zoran *zr)
+{
+ u32 reg;
+
+ reg = btread(ZR36057_VFESPFR);
+ reg |= ZR36057_VFESPFR_LITTLE_ENDIAN;
+ reg &= ~ZR36057_VFESPFR_VCLK_POL;
+ reg |= ZR36057_VFESPFR_EXT_FL;
+ reg |= ZR36057_VFESPFR_TOP_FIELD;
+ btwrite(reg, ZR36057_VFESPFR);
+ reg = btread(ZR36057_VDCR);
+ if (pci_pci_problems & PCIPCI_TRITON)
+ // || zr->revision < 1) // Revision 1 has also Triton support
+ reg &= ~ZR36057_VDCR_TRITON;
+ else
+ reg |= ZR36057_VDCR_TRITON;
+ btwrite(reg, ZR36057_VDCR);
+}
+
+/*
+ * General Purpose I/O and Guest bus access
+ */
+
+/*
+ * This is a bit tricky. When a board lacks a GPIO function, the corresponding
+ * GPIO bit number in the card_info structure is set to 0.
+ */
+
+void GPIO(struct zoran *zr, int bit, unsigned int value)
+{
+ u32 reg;
+ u32 mask;
+
+ /* Make sure the bit number is legal
+ * A bit number of -1 (lacking) gives a mask of 0,
+ * making it harmless
+ */
+ mask = (1 << (24 + bit)) & 0xff000000;
+ reg = btread(ZR36057_GPPGCR1) & ~mask;
+ if (value)
+ reg |= mask;
+
+ btwrite(reg, ZR36057_GPPGCR1);
+ udelay(1);
+}
+
+/*
+ * Wait til post office is no longer busy
+ */
+
+int post_office_wait(struct zoran *zr)
+{
+ u32 por;
+
+// while (((por = btread(ZR36057_POR)) & (ZR36057_POR_PO_PEN | ZR36057_POR_PO_TIME)) == ZR36057_POR_PO_PEN) {
+ while ((por = btread(ZR36057_POR)) & ZR36057_POR_PO_PEN) {
+ /* wait for something to happen */
+ /* TODO add timeout */
+ }
+ if ((por & ZR36057_POR_PO_TIME) && !zr->card.gws_not_connected) {
+ /* In LML33/BUZ \GWS line is not connected, so it has always timeout set */
+ pci_info(zr->pci_dev, "pop timeout %08x\n", por);
+ return -1;
+ }
+
+ return 0;
+}
+
+int post_office_write(struct zoran *zr, unsigned int guest,
+ unsigned int reg, unsigned int value)
+{
+ u32 por;
+
+ por =
+ ZR36057_POR_PO_DIR | ZR36057_POR_PO_TIME | ((guest & 7) << 20) |
+ ((reg & 7) << 16) | (value & 0xFF);
+ btwrite(por, ZR36057_POR);
+
+ return post_office_wait(zr);
+}
+
+int post_office_read(struct zoran *zr, unsigned int guest, unsigned int reg)
+{
+ u32 por;
+
+ por = ZR36057_POR_PO_TIME | ((guest & 7) << 20) | ((reg & 7) << 16);
+ btwrite(por, ZR36057_POR);
+ if (post_office_wait(zr) < 0)
+ return -1;
+
+ return btread(ZR36057_POR) & 0xFF;
+}
+
+/*
+ * detect guests
+ */
+
+static void dump_guests(struct zoran *zr)
+{
+ if (zr36067_debug > 2) {
+ int i, guest[8];
+
+ /* do not print random data */
+ guest[0] = 0;
+
+ for (i = 1; i < 8; i++) /* Don't read jpeg codec here */
+ guest[i] = post_office_read(zr, i, 0);
+
+ pci_info(zr->pci_dev, "Guests: %*ph\n", 8, guest);
+ }
+}
+
+void detect_guest_activity(struct zoran *zr)
+{
+ int timeout, i, j, res, guest[8], guest0[8], change[8][3];
+ ktime_t t0, t1;
+
+ /* do not print random data */
+ guest[0] = 0;
+ guest0[0] = 0;
+
+ dump_guests(zr);
+ pci_info(zr->pci_dev, "Detecting guests activity, please wait...\n");
+ for (i = 1; i < 8; i++) /* Don't read jpeg codec here */
+ guest0[i] = guest[i] = post_office_read(zr, i, 0);
+
+ timeout = 0;
+ j = 0;
+ t0 = ktime_get();
+ while (timeout < 10000) {
+ udelay(10);
+ timeout++;
+ for (i = 1; (i < 8) && (j < 8); i++) {
+ res = post_office_read(zr, i, 0);
+ if (res != guest[i]) {
+ t1 = ktime_get();
+ change[j][0] = ktime_to_us(ktime_sub(t1, t0));
+ t0 = t1;
+ change[j][1] = i;
+ change[j][2] = res;
+ j++;
+ guest[i] = res;
+ }
+ }
+ if (j >= 8)
+ break;
+ }
+
+ pci_info(zr->pci_dev, "Guests: %*ph\n", 8, guest0);
+
+ if (j == 0) {
+ pci_info(zr->pci_dev, "No activity detected.\n");
+ return;
+ }
+ for (i = 0; i < j; i++)
+ pci_info(zr->pci_dev, "%6d: %d => 0x%02x\n", change[i][0], change[i][1], change[i][2]);
+}
+
+/*
+ * JPEG Codec access
+ */
+
+void jpeg_codec_sleep(struct zoran *zr, int sleep)
+{
+ GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_SLEEP], !sleep);
+ if (!sleep) {
+ pci_dbg(zr->pci_dev, "%s() - wake GPIO=0x%08x\n", __func__, btread(ZR36057_GPPGCR1));
+ udelay(500);
+ } else {
+ pci_dbg(zr->pci_dev, "%s() - sleep GPIO=0x%08x\n", __func__, btread(ZR36057_GPPGCR1));
+ udelay(2);
+ }
+}
+
+int jpeg_codec_reset(struct zoran *zr)
+{
+ /* Take the codec out of sleep */
+ jpeg_codec_sleep(zr, 0);
+
+ if (zr->card.gpcs[GPCS_JPEG_RESET] != 0xff) {
+ post_office_write(zr, zr->card.gpcs[GPCS_JPEG_RESET], 0,
+ 0);
+ udelay(2);
+ } else {
+ GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_RESET], 0);
+ udelay(2);
+ GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_RESET], 1);
+ udelay(2);
+ }
+
+ return 0;
+}
+
+/*
+ * Set the registers for the size we have specified. Don't bother
+ * trying to understand this without the ZR36057 manual in front of
+ * you [AC].
+ */
+static void zr36057_adjust_vfe(struct zoran *zr, enum zoran_codec_mode mode)
+{
+ u32 reg;
+
+ switch (mode) {
+ case BUZ_MODE_MOTION_DECOMPRESS:
+ btand(~ZR36057_VFESPFR_EXT_FL, ZR36057_VFESPFR);
+ reg = btread(ZR36057_VFEHCR);
+ if ((reg & (1 << 10)) && zr->card.type != LML33R10)
+ reg += ((1 << 10) | 1);
+
+ btwrite(reg, ZR36057_VFEHCR);
+ break;
+ case BUZ_MODE_MOTION_COMPRESS:
+ case BUZ_MODE_IDLE:
+ default:
+ if ((zr->norm & V4L2_STD_NTSC) ||
+ (zr->card.type == LML33R10 &&
+ (zr->norm & V4L2_STD_PAL)))
+ btand(~ZR36057_VFESPFR_EXT_FL, ZR36057_VFESPFR);
+ else
+ btor(ZR36057_VFESPFR_EXT_FL, ZR36057_VFESPFR);
+ reg = btread(ZR36057_VFEHCR);
+ if (!(reg & (1 << 10)) && zr->card.type != LML33R10)
+ reg -= ((1 << 10) | 1);
+
+ btwrite(reg, ZR36057_VFEHCR);
+ break;
+ }
+}
+
+/*
+ * set geometry
+ */
+
+static void zr36057_set_vfe(struct zoran *zr, int video_width, int video_height,
+ const struct zoran_format *format)
+{
+ const struct tvnorm *tvn;
+ unsigned int h_start, HEnd, v_start, VEnd;
+ unsigned int DispMode;
+ unsigned int VidWinWid, VidWinHt;
+ unsigned int hcrop1, hcrop2, vcrop1, vcrop2;
+ unsigned int wa, We, ha, He;
+ unsigned int X, Y, hor_dcm, ver_dcm;
+ u32 reg;
+
+ tvn = zr->timing;
+
+ wa = tvn->wa;
+ ha = tvn->ha;
+
+ pci_info(zr->pci_dev, "set_vfe() - width = %d, height = %d\n", video_width, video_height);
+
+ if (video_width < BUZ_MIN_WIDTH ||
+ video_height < BUZ_MIN_HEIGHT ||
+ video_width > wa || video_height > ha) {
+ pci_err(zr->pci_dev, "set_vfe: w=%d h=%d not valid\n", video_width, video_height);
+ return;
+ }
+
+ /**** zr36057 ****/
+
+ /* horizontal */
+ VidWinWid = video_width;
+ X = DIV_ROUND_UP(VidWinWid * 64, tvn->wa);
+ We = (VidWinWid * 64) / X;
+ hor_dcm = 64 - X;
+ hcrop1 = 2 * ((tvn->wa - We) / 4);
+ hcrop2 = tvn->wa - We - hcrop1;
+ h_start = tvn->h_start ? tvn->h_start : 1;
+ /* (Ronald) Original comment:
+ * "| 1 Doesn't have any effect, tested on both a DC10 and a DC10+"
+ * this is false. It inverses chroma values on the LML33R10 (so Cr
+ * suddenly is shown as Cb and reverse, really cool effect if you
+ * want to see blue faces, not useful otherwise). So don't use |1.
+ * However, the DC10 has '0' as h_start, but does need |1, so we
+ * use a dirty check...
+ */
+ HEnd = h_start + tvn->wa - 1;
+ h_start += hcrop1;
+ HEnd -= hcrop2;
+ reg = ((h_start & ZR36057_VFEHCR_HMASK) << ZR36057_VFEHCR_H_START)
+ | ((HEnd & ZR36057_VFEHCR_HMASK) << ZR36057_VFEHCR_H_END);
+ if (zr->card.vfe_pol.hsync_pol)
+ reg |= ZR36057_VFEHCR_HS_POL;
+ btwrite(reg, ZR36057_VFEHCR);
+
+ /* Vertical */
+ DispMode = !(video_height > BUZ_MAX_HEIGHT / 2);
+ VidWinHt = DispMode ? video_height : video_height / 2;
+ Y = DIV_ROUND_UP(VidWinHt * 64 * 2, tvn->ha);
+ He = (VidWinHt * 64) / Y;
+ ver_dcm = 64 - Y;
+ vcrop1 = (tvn->ha / 2 - He) / 2;
+ vcrop2 = tvn->ha / 2 - He - vcrop1;
+ v_start = tvn->v_start;
+ VEnd = v_start + tvn->ha / 2; // - 1; FIXME SnapShot times out with -1 in 768*576 on the DC10 - LP
+ v_start += vcrop1;
+ VEnd -= vcrop2;
+ reg = ((v_start & ZR36057_VFEVCR_VMASK) << ZR36057_VFEVCR_V_START)
+ | ((VEnd & ZR36057_VFEVCR_VMASK) << ZR36057_VFEVCR_V_END);
+ if (zr->card.vfe_pol.vsync_pol)
+ reg |= ZR36057_VFEVCR_VS_POL;
+ btwrite(reg, ZR36057_VFEVCR);
+
+ /* scaler and pixel format */
+ reg = 0;
+ reg |= (hor_dcm << ZR36057_VFESPFR_HOR_DCM);
+ reg |= (ver_dcm << ZR36057_VFESPFR_VER_DCM);
+ reg |= (DispMode << ZR36057_VFESPFR_DISP_MODE);
+ /* RJ: I don't know, why the following has to be the opposite
+ * of the corresponding ZR36060 setting, but only this way
+ * we get the correct colors when uncompressing to the screen */
+ //reg |= ZR36057_VFESPFR_VCLK_POL; /**/
+ /* RJ: Don't know if that is needed for NTSC also */
+ if (!(zr->norm & V4L2_STD_NTSC))
+ reg |= ZR36057_VFESPFR_EXT_FL; // NEEDED!!!!!!! Wolfgang
+ reg |= ZR36057_VFESPFR_TOP_FIELD;
+ if (hor_dcm >= 48)
+ reg |= 3 << ZR36057_VFESPFR_H_FILTER; /* 5 tap filter */
+ else if (hor_dcm >= 32)
+ reg |= 2 << ZR36057_VFESPFR_H_FILTER; /* 4 tap filter */
+ else if (hor_dcm >= 16)
+ reg |= 1 << ZR36057_VFESPFR_H_FILTER; /* 3 tap filter */
+
+ reg |= format->vfespfr;
+ btwrite(reg, ZR36057_VFESPFR);
+
+ /* display configuration */
+ reg = (16 << ZR36057_VDCR_MIN_PIX)
+ | (VidWinHt << ZR36057_VDCR_VID_WIN_HT)
+ | (VidWinWid << ZR36057_VDCR_VID_WIN_WID);
+ if (pci_pci_problems & PCIPCI_TRITON)
+ // || zr->revision < 1) // Revision 1 has also Triton support
+ reg &= ~ZR36057_VDCR_TRITON;
+ else
+ reg |= ZR36057_VDCR_TRITON;
+ btwrite(reg, ZR36057_VDCR);
+
+ zr36057_adjust_vfe(zr, zr->codec_mode);
+}
+
+/* Enable/Disable uncompressed memory grabbing of the 36057 */
+void zr36057_set_memgrab(struct zoran *zr, int mode)
+{
+ if (mode) {
+ /* We only check SnapShot and not FrameGrab here. SnapShot==1
+ * means a capture is already in progress, but FrameGrab==1
+ * doesn't necessary mean that. It's more correct to say a 1
+ * to 0 transition indicates a capture completed. If a
+ * capture is pending when capturing is tuned off, FrameGrab
+ * will be stuck at 1 until capturing is turned back on.
+ */
+ if (btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_SNAP_SHOT)
+ pci_warn(zr->pci_dev, "zr36057_set_memgrab(1) with SnapShot on!?\n");
+
+ /* switch on VSync interrupts */
+ btwrite(IRQ_MASK, ZR36057_ISR); // Clear Interrupts
+ btor(zr->card.vsync_int, ZR36057_ICR); // SW
+
+ /* enable SnapShot */
+ btor(ZR36057_VSSFGR_SNAP_SHOT, ZR36057_VSSFGR);
+
+ /* Set zr36057 video front end and enable video */
+ zr36057_set_vfe(zr, zr->v4l_settings.width,
+ zr->v4l_settings.height,
+ zr->v4l_settings.format);
+ } else {
+ /* switch off VSync interrupts */
+ btand(~zr->card.vsync_int, ZR36057_ICR); // SW
+
+ /* re-enable grabbing to screen if it was running */
+ btand(~ZR36057_VDCR_VID_EN, ZR36057_VDCR);
+ btand(~ZR36057_VSSFGR_SNAP_SHOT, ZR36057_VSSFGR);
+ }
+}
+
+/*****************************************************************************
+ * *
+ * Set up the Buz-specific MJPEG part *
+ * *
+ *****************************************************************************/
+
+static inline void set_frame(struct zoran *zr, int val)
+{
+ GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_FRAME], val);
+}
+
+static void set_videobus_dir(struct zoran *zr, int val)
+{
+ switch (zr->card.type) {
+ case LML33:
+ case LML33R10:
+ if (!lml33dpath)
+ GPIO(zr, 5, val);
+ else
+ GPIO(zr, 5, 1);
+ break;
+ default:
+ GPIO(zr, zr->card.gpio[ZR_GPIO_VID_DIR],
+ zr->card.gpio_pol[ZR_GPIO_VID_DIR] ? !val : val);
+ break;
+ }
+}
+
+static void init_jpeg_queue(struct zoran *zr)
+{
+ int i;
+
+ /* re-initialize DMA ring stuff */
+ zr->jpg_que_head = 0;
+ zr->jpg_dma_head = 0;
+ zr->jpg_dma_tail = 0;
+ zr->jpg_que_tail = 0;
+ zr->jpg_seq_num = 0;
+ zr->jpeg_error = 0;
+ zr->num_errors = 0;
+ zr->jpg_err_seq = 0;
+ zr->jpg_err_shift = 0;
+ zr->jpg_queued_num = 0;
+ for (i = 0; i < BUZ_NUM_STAT_COM; i++)
+ zr->stat_com[i] = cpu_to_le32(1); /* mark as unavailable to zr36057 */
+}
+
+static void zr36057_set_jpg(struct zoran *zr, enum zoran_codec_mode mode)
+{
+ const struct tvnorm *tvn;
+ u32 reg;
+
+ tvn = zr->timing;
+
+ /* assert P_Reset, disable code transfer, deassert Active */
+ btwrite(0, ZR36057_JPC);
+
+ /* MJPEG compression mode */
+ switch (mode) {
+ case BUZ_MODE_MOTION_COMPRESS:
+ default:
+ reg = ZR36057_JMC_MJPG_CMP_MODE;
+ break;
+
+ case BUZ_MODE_MOTION_DECOMPRESS:
+ reg = ZR36057_JMC_MJPG_EXP_MODE;
+ reg |= ZR36057_JMC_SYNC_MSTR;
+ /* RJ: The following is experimental - improves the output to screen */
+ //if(zr->jpg_settings.VFIFO_FB) reg |= ZR36057_JMC_VFIFO_FB; // No, it doesn't. SM
+ break;
+
+ case BUZ_MODE_STILL_COMPRESS:
+ reg = ZR36057_JMC_JPG_CMP_MODE;
+ break;
+
+ case BUZ_MODE_STILL_DECOMPRESS:
+ reg = ZR36057_JMC_JPG_EXP_MODE;
+ break;
+ }
+ reg |= ZR36057_JMC_JPG;
+ if (zr->jpg_settings.field_per_buff == 1)
+ reg |= ZR36057_JMC_FLD_PER_BUFF;
+ btwrite(reg, ZR36057_JMC);
+
+ /* vertical */
+ btor(ZR36057_VFEVCR_VS_POL, ZR36057_VFEVCR);
+ reg = (6 << ZR36057_VSP_VSYNC_SIZE) |
+ (tvn->ht << ZR36057_VSP_FRM_TOT);
+ btwrite(reg, ZR36057_VSP);
+ reg = ((zr->jpg_settings.img_y + tvn->v_start) << ZR36057_FVAP_NAY) |
+ (zr->jpg_settings.img_height << ZR36057_FVAP_PAY);
+ btwrite(reg, ZR36057_FVAP);
+
+ /* horizontal */
+ if (zr->card.vfe_pol.hsync_pol)
+ btor(ZR36057_VFEHCR_HS_POL, ZR36057_VFEHCR);
+ else
+ btand(~ZR36057_VFEHCR_HS_POL, ZR36057_VFEHCR);
+ reg = ((tvn->h_sync_start) << ZR36057_HSP_HSYNC_START) |
+ (tvn->wt << ZR36057_HSP_LINE_TOT);
+ btwrite(reg, ZR36057_HSP);
+ reg = ((zr->jpg_settings.img_x +
+ tvn->h_start + 4) << ZR36057_FHAP_NAX) |
+ (zr->jpg_settings.img_width << ZR36057_FHAP_PAX);
+ btwrite(reg, ZR36057_FHAP);
+
+ /* field process parameters */
+ if (zr->jpg_settings.odd_even)
+ reg = ZR36057_FPP_ODD_EVEN;
+ else
+ reg = 0;
+
+ btwrite(reg, ZR36057_FPP);
+
+ /* Set proper VCLK Polarity, else colors will be wrong during playback */
+ //btor(ZR36057_VFESPFR_VCLK_POL, ZR36057_VFESPFR);
+
+ /* code base address */
+ btwrite(zr->p_sc, ZR36057_JCBA);
+
+ /* FIFO threshold (FIFO is 160. double words) */
+ /* NOTE: decimal values here */
+ switch (mode) {
+ case BUZ_MODE_STILL_COMPRESS:
+ case BUZ_MODE_MOTION_COMPRESS:
+ if (zr->card.type != BUZ)
+ reg = 140;
+ else
+ reg = 60;
+ break;
+
+ case BUZ_MODE_STILL_DECOMPRESS:
+ case BUZ_MODE_MOTION_DECOMPRESS:
+ reg = 20;
+ break;
+
+ default:
+ reg = 80;
+ break;
+ }
+ btwrite(reg, ZR36057_JCFT);
+ zr36057_adjust_vfe(zr, mode);
+}
+
+void clear_interrupt_counters(struct zoran *zr)
+{
+ zr->intr_counter_GIRQ1 = 0;
+ zr->intr_counter_GIRQ0 = 0;
+ zr->intr_counter_cod_rep_irq = 0;
+ zr->intr_counter_jpeg_rep_irq = 0;
+ zr->field_counter = 0;
+ zr->irq1_in = 0;
+ zr->irq1_out = 0;
+ zr->jpeg_in = 0;
+ zr->jpeg_out = 0;
+ zr->JPEG_0 = 0;
+ zr->JPEG_1 = 0;
+ zr->end_event_missed = 0;
+ zr->jpeg_missed = 0;
+ zr->jpeg_max_missed = 0;
+ zr->jpeg_min_missed = 0x7fffffff;
+}
+
+static u32 count_reset_interrupt(struct zoran *zr)
+{
+ u32 isr;
+
+ isr = btread(ZR36057_ISR) & 0x78000000;
+ if (isr) {
+ if (isr & ZR36057_ISR_GIRQ1) {
+ btwrite(ZR36057_ISR_GIRQ1, ZR36057_ISR);
+ zr->intr_counter_GIRQ1++;
+ }
+ if (isr & ZR36057_ISR_GIRQ0) {
+ btwrite(ZR36057_ISR_GIRQ0, ZR36057_ISR);
+ zr->intr_counter_GIRQ0++;
+ }
+ if (isr & ZR36057_ISR_COD_REP_IRQ) {
+ btwrite(ZR36057_ISR_COD_REP_IRQ, ZR36057_ISR);
+ zr->intr_counter_cod_rep_irq++;
+ }
+ if (isr & ZR36057_ISR_JPEG_REP_IRQ) {
+ btwrite(ZR36057_ISR_JPEG_REP_IRQ, ZR36057_ISR);
+ zr->intr_counter_jpeg_rep_irq++;
+ }
+ }
+ return isr;
+}
+
+void jpeg_start(struct zoran *zr)
+{
+ int reg;
+
+ zr->frame_num = 0;
+
+ /* deassert P_reset, disable code transfer, deassert Active */
+ btwrite(ZR36057_JPC_P_RESET, ZR36057_JPC);
+ /* stop flushing the internal code buffer */
+ btand(~ZR36057_MCTCR_C_FLUSH, ZR36057_MCTCR);
+ /* enable code transfer */
+ btor(ZR36057_JPC_COD_TRNS_EN, ZR36057_JPC);
+
+ /* clear IRQs */
+ btwrite(IRQ_MASK, ZR36057_ISR);
+ /* enable the JPEG IRQs */
+ btwrite(zr->card.jpeg_int | ZR36057_ICR_JPEG_REP_IRQ | ZR36057_ICR_INT_PIN_EN,
+ ZR36057_ICR);
+
+ set_frame(zr, 0); // \FRAME
+
+ /* set the JPEG codec guest ID */
+ reg = (zr->card.gpcs[1] << ZR36057_JCGI_JPE_GUEST_ID) |
+ (0 << ZR36057_JCGI_JPE_GUEST_REG);
+ btwrite(reg, ZR36057_JCGI);
+
+ if (zr->card.video_vfe == CODEC_TYPE_ZR36016 &&
+ zr->card.video_codec == CODEC_TYPE_ZR36050) {
+ /* Enable processing on the ZR36016 */
+ if (zr->vfe)
+ zr36016_write(zr->vfe, 0, 1);
+
+ /* load the address of the GO register in the ZR36050 latch */
+ post_office_write(zr, 0, 0, 0);
+ }
+
+ /* assert Active */
+ btor(ZR36057_JPC_ACTIVE, ZR36057_JPC);
+
+ /* enable the Go generation */
+ btor(ZR36057_JMC_GO_EN, ZR36057_JMC);
+ udelay(30);
+
+ set_frame(zr, 1); // /FRAME
+
+ pci_dbg(zr->pci_dev, "jpeg_start\n");
+}
+
+void zr36057_enable_jpg(struct zoran *zr, enum zoran_codec_mode mode)
+{
+ struct vfe_settings cap;
+ int field_size = zr->buffer_size / zr->jpg_settings.field_per_buff;
+
+ zr->codec_mode = mode;
+
+ cap.x = zr->jpg_settings.img_x;
+ cap.y = zr->jpg_settings.img_y;
+ cap.width = zr->jpg_settings.img_width;
+ cap.height = zr->jpg_settings.img_height;
+ cap.decimation =
+ zr->jpg_settings.hor_dcm | (zr->jpg_settings.ver_dcm << 8);
+ cap.quality = zr->jpg_settings.jpg_comp.quality;
+
+ switch (mode) {
+ case BUZ_MODE_MOTION_COMPRESS: {
+ struct jpeg_app_marker app;
+ struct jpeg_com_marker com;
+
+ /* In motion compress mode, the decoder output must be enabled, and
+ * the video bus direction set to input.
+ */
+ set_videobus_dir(zr, 0);
+ decoder_call(zr, video, s_stream, 1);
+ encoder_call(zr, video, s_routing, 0, 0, 0);
+
+ /* Take the JPEG codec and the VFE out of sleep */
+ jpeg_codec_sleep(zr, 0);
+
+ /* set JPEG app/com marker */
+ app.appn = zr->jpg_settings.jpg_comp.APPn;
+ app.len = zr->jpg_settings.jpg_comp.APP_len;
+ memcpy(app.data, zr->jpg_settings.jpg_comp.APP_data, 60);
+ zr->codec->control(zr->codec, CODEC_S_JPEG_APP_DATA,
+ sizeof(struct jpeg_app_marker), &app);
+
+ com.len = zr->jpg_settings.jpg_comp.COM_len;
+ memcpy(com.data, zr->jpg_settings.jpg_comp.COM_data, 60);
+ zr->codec->control(zr->codec, CODEC_S_JPEG_COM_DATA,
+ sizeof(struct jpeg_com_marker), &com);
+
+ /* Setup the JPEG codec */
+ zr->codec->control(zr->codec, CODEC_S_JPEG_TDS_BYTE,
+ sizeof(int), &field_size);
+ zr->codec->set_video(zr->codec, zr->timing, &cap,
+ &zr->card.vfe_pol);
+ zr->codec->set_mode(zr->codec, CODEC_DO_COMPRESSION);
+
+ /* Setup the VFE */
+ if (zr->vfe) {
+ zr->vfe->control(zr->vfe, CODEC_S_JPEG_TDS_BYTE,
+ sizeof(int), &field_size);
+ zr->vfe->set_video(zr->vfe, zr->timing, &cap,
+ &zr->card.vfe_pol);
+ zr->vfe->set_mode(zr->vfe, CODEC_DO_COMPRESSION);
+ }
+
+ init_jpeg_queue(zr);
+ zr36057_set_jpg(zr, mode); // \P_Reset, ... Video param, FIFO
+
+ clear_interrupt_counters(zr);
+ pci_info(zr->pci_dev, "enable_jpg(MOTION_COMPRESS)\n");
+ break;
+ }
+
+ case BUZ_MODE_MOTION_DECOMPRESS:
+ /* In motion decompression mode, the decoder output must be disabled, and
+ * the video bus direction set to output.
+ */
+ decoder_call(zr, video, s_stream, 0);
+ set_videobus_dir(zr, 1);
+ encoder_call(zr, video, s_routing, 1, 0, 0);
+
+ /* Take the JPEG codec and the VFE out of sleep */
+ jpeg_codec_sleep(zr, 0);
+ /* Setup the VFE */
+ if (zr->vfe) {
+ zr->vfe->set_video(zr->vfe, zr->timing, &cap,
+ &zr->card.vfe_pol);
+ zr->vfe->set_mode(zr->vfe, CODEC_DO_EXPANSION);
+ }
+ /* Setup the JPEG codec */
+ zr->codec->set_video(zr->codec, zr->timing, &cap,
+ &zr->card.vfe_pol);
+ zr->codec->set_mode(zr->codec, CODEC_DO_EXPANSION);
+
+ init_jpeg_queue(zr);
+ zr36057_set_jpg(zr, mode); // \P_Reset, ... Video param, FIFO
+
+ clear_interrupt_counters(zr);
+ pci_info(zr->pci_dev, "enable_jpg(MOTION_DECOMPRESS)\n");
+ break;
+
+ case BUZ_MODE_IDLE:
+ default:
+ /* shut down processing */
+ btand(~(zr->card.jpeg_int | ZR36057_ICR_JPEG_REP_IRQ),
+ ZR36057_ICR);
+ btwrite(zr->card.jpeg_int | ZR36057_ICR_JPEG_REP_IRQ,
+ ZR36057_ISR);
+ btand(~ZR36057_JMC_GO_EN, ZR36057_JMC); // \Go_en
+
+ msleep(50);
+
+ set_videobus_dir(zr, 0);
+ set_frame(zr, 1); // /FRAME
+ btor(ZR36057_MCTCR_C_FLUSH, ZR36057_MCTCR); // /CFlush
+ btwrite(0, ZR36057_JPC); // \P_Reset,\CodTrnsEn,\Active
+ btand(~ZR36057_JMC_VFIFO_FB, ZR36057_JMC);
+ btand(~ZR36057_JMC_SYNC_MSTR, ZR36057_JMC);
+ jpeg_codec_reset(zr);
+ jpeg_codec_sleep(zr, 1);
+ zr36057_adjust_vfe(zr, mode);
+
+ decoder_call(zr, video, s_stream, 1);
+ encoder_call(zr, video, s_routing, 0, 0, 0);
+
+ pci_info(zr->pci_dev, "enable_jpg(IDLE)\n");
+ break;
+ }
+}
+
+/* when this is called the spinlock must be held */
+void zoran_feed_stat_com(struct zoran *zr)
+{
+ /* move frames from pending queue to DMA */
+
+ int i, max_stat_com;
+ struct zr_buffer *buf;
+ struct vb2_v4l2_buffer *vbuf;
+ dma_addr_t phys_addr = 0;
+ unsigned long flags;
+ unsigned long payload;
+
+ max_stat_com =
+ (zr->jpg_settings.tmp_dcm ==
+ 1) ? BUZ_NUM_STAT_COM : (BUZ_NUM_STAT_COM >> 1);
+
+ spin_lock_irqsave(&zr->queued_bufs_lock, flags);
+ while ((zr->jpg_dma_head - zr->jpg_dma_tail) < max_stat_com) {
+ buf = list_first_entry_or_null(&zr->queued_bufs, struct zr_buffer, queue);
+ if (!buf) {
+ pci_err(zr->pci_dev, "No buffer available to queue\n");
+ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
+ return;
+ }
+ list_del(&buf->queue);
+ zr->buf_in_reserve--;
+ vbuf = &buf->vbuf;
+ vbuf->vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ payload = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
+ if (payload == 0)
+ payload = zr->buffer_size;
+ if (zr->jpg_settings.tmp_dcm == 1) {
+ /* fill 1 stat_com entry */
+ i = (zr->jpg_dma_head -
+ zr->jpg_err_shift) & BUZ_MASK_STAT_COM;
+ if (!(zr->stat_com[i] & cpu_to_le32(1)))
+ break;
+ zr->stat_comb[i * 2] = cpu_to_le32(phys_addr);
+ zr->stat_comb[i * 2 + 1] = cpu_to_le32((payload >> 1) | 1);
+ zr->inuse[i] = buf;
+ zr->stat_com[i] = cpu_to_le32(zr->p_scb + i * 2 * 4);
+ } else {
+ /* fill 2 stat_com entries */
+ i = ((zr->jpg_dma_head -
+ zr->jpg_err_shift) & 1) * 2;
+ if (!(zr->stat_com[i] & cpu_to_le32(1)))
+ break;
+ zr->stat_com[i] = cpu_to_le32(zr->p_scb + i * 2 * 4);
+ zr->stat_com[i + 1] = cpu_to_le32(zr->p_scb + i * 2 * 4);
+
+ zr->stat_comb[i * 2] = cpu_to_le32(phys_addr);
+ zr->stat_comb[i * 2 + 1] = cpu_to_le32((payload >> 1) | 1);
+
+ zr->inuse[i] = buf;
+ zr->inuse[i + 1] = NULL;
+ }
+ zr->jpg_dma_head++;
+ }
+ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
+ if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS)
+ zr->jpg_queued_num++;
+}
+
+/* when this is called the spinlock must be held */
+static void zoran_reap_stat_com(struct zoran *zr)
+{
+ /* move frames from DMA queue to done queue */
+
+ int i;
+ u32 stat_com;
+ unsigned int seq;
+ unsigned int dif;
+ unsigned long flags;
+ struct zr_buffer *buf;
+ unsigned int size = 0;
+ u32 fcnt;
+
+ /* In motion decompress we don't have a hardware frame counter,
+ * we just count the interrupts here */
+
+ if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS)
+ zr->jpg_seq_num++;
+
+ spin_lock_irqsave(&zr->queued_bufs_lock, flags);
+ while (zr->jpg_dma_tail < zr->jpg_dma_head) {
+ if (zr->jpg_settings.tmp_dcm == 1)
+ i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM;
+ else
+ i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2 + 1;
+
+ stat_com = le32_to_cpu(zr->stat_com[i]);
+ if ((stat_com & 1) == 0) {
+ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
+ return;
+ }
+
+ fcnt = (stat_com & GENMASK(31, 24)) >> 24;
+ size = (stat_com & GENMASK(22, 1)) >> 1;
+
+ buf = zr->inuse[i];
+ buf->vbuf.vb2_buf.timestamp = ktime_get_ns();
+
+ if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) {
+ vb2_set_plane_payload(&buf->vbuf.vb2_buf, 0, size);
+
+ /* update sequence number with the help of the counter in stat_com */
+ seq = (fcnt + zr->jpg_err_seq) & 0xff;
+ dif = (seq - zr->jpg_seq_num) & 0xff;
+ zr->jpg_seq_num += dif;
+ }
+ buf->vbuf.sequence = zr->jpg_settings.tmp_dcm ==
+ 2 ? (zr->jpg_seq_num >> 1) : zr->jpg_seq_num;
+ zr->inuse[i] = NULL;
+ if (zr->jpg_settings.tmp_dcm != 1)
+ buf->vbuf.field = zr->jpg_settings.odd_even ?
+ V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ else
+ buf->vbuf.field = zr->jpg_settings.odd_even ?
+ V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT;
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_DONE);
+
+ zr->jpg_dma_tail++;
+ }
+ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
+}
+
+irqreturn_t zoran_irq(int irq, void *dev_id)
+{
+ struct zoran *zr = dev_id;
+ u32 stat, astat;
+
+ stat = count_reset_interrupt(zr);
+ astat = stat & IRQ_MASK;
+ if (astat & zr->card.vsync_int) {
+ if (zr->running == ZORAN_MAP_MODE_RAW) {
+ if ((btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_SNAP_SHOT) == 0)
+ pci_warn(zr->pci_dev, "BuzIRQ with SnapShot off ???\n");
+ if ((btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_FRAME_GRAB) == 0)
+ zr_set_buf(zr);
+ return IRQ_HANDLED;
+ }
+ if (astat & ZR36057_ISR_JPEG_REP_IRQ) {
+ if (zr->codec_mode != BUZ_MODE_MOTION_DECOMPRESS &&
+ zr->codec_mode != BUZ_MODE_MOTION_COMPRESS) {
+ pci_err(zr->pci_dev, "JPG IRQ when not in good mode\n");
+ return IRQ_HANDLED;
+ }
+ zr->frame_num++;
+ zoran_reap_stat_com(zr);
+ zoran_feed_stat_com(zr);
+ return IRQ_HANDLED;
+ }
+ /* unused interrupts */
+ }
+ zr->ghost_int++;
+ return IRQ_HANDLED;
+}
+
+void zoran_set_pci_master(struct zoran *zr, int set_master)
+{
+ if (set_master) {
+ pci_set_master(zr->pci_dev);
+ } else {
+ u16 command;
+
+ pci_read_config_word(zr->pci_dev, PCI_COMMAND, &command);
+ command &= ~PCI_COMMAND_MASTER;
+ pci_write_config_word(zr->pci_dev, PCI_COMMAND, command);
+ }
+}
+
+void zoran_init_hardware(struct zoran *zr)
+{
+ /* Enable bus-mastering */
+ zoran_set_pci_master(zr, 1);
+
+ /* Initialize the board */
+ if (zr->card.init)
+ zr->card.init(zr);
+
+ decoder_call(zr, core, init, 0);
+ decoder_call(zr, video, s_std, zr->norm);
+ decoder_call(zr, video, s_routing,
+ zr->card.input[zr->input].muxsel, 0, 0);
+
+ encoder_call(zr, core, init, 0);
+ encoder_call(zr, video, s_std_output, zr->norm);
+ encoder_call(zr, video, s_routing, 0, 0, 0);
+
+ /* toggle JPEG codec sleep to sync PLL */
+ jpeg_codec_sleep(zr, 1);
+ jpeg_codec_sleep(zr, 0);
+
+ /*
+ * set individual interrupt enables (without GIRQ1)
+ * but don't global enable until zoran_open()
+ */
+ zr36057_init_vfe(zr);
+
+ zr36057_enable_jpg(zr, BUZ_MODE_IDLE);
+
+ btwrite(IRQ_MASK, ZR36057_ISR); // Clears interrupts
+}
+
+void zr36057_restart(struct zoran *zr)
+{
+ btwrite(0, ZR36057_SPGPPCR);
+ udelay(1000);
+ btor(ZR36057_SPGPPCR_SOFT_RESET, ZR36057_SPGPPCR);
+ udelay(1000);
+
+ /* assert P_Reset */
+ btwrite(0, ZR36057_JPC);
+ /* set up GPIO direction - all output */
+ btwrite(ZR36057_SPGPPCR_SOFT_RESET | 0, ZR36057_SPGPPCR);
+
+ /* set up GPIO pins and guest bus timing */
+ btwrite((0x81 << 24) | 0x8888, ZR36057_GPPGCR1);
+}
+
diff --git a/drivers/staging/media/zoran/zoran_device.h b/drivers/staging/media/zoran/zoran_device.h
new file mode 100644
index 000000000000..24be19a61b6d
--- /dev/null
+++ b/drivers/staging/media/zoran/zoran_device.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Zoran zr36057/zr36067 PCI controller driver, for the
+ * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
+ * Media Labs LML33/LML33R10.
+ *
+ * This part handles card-specific data and detection
+ *
+ * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
+ */
+
+#ifndef __ZORAN_DEVICE_H__
+#define __ZORAN_DEVICE_H__
+
+/* general purpose I/O */
+extern void GPIO(struct zoran *zr, int bit, unsigned int value);
+
+/* codec (or actually: guest bus) access */
+extern int post_office_wait(struct zoran *zr);
+extern int post_office_write(struct zoran *zr, unsigned int guest, unsigned int reg, unsigned int value);
+extern int post_office_read(struct zoran *zr, unsigned int guest, unsigned int reg);
+
+extern void detect_guest_activity(struct zoran *zr);
+
+extern void jpeg_codec_sleep(struct zoran *zr, int sleep);
+extern int jpeg_codec_reset(struct zoran *zr);
+
+/* zr360x7 access to raw capture */
+extern void zr36057_overlay(struct zoran *zr, int on);
+extern void write_overlay_mask(struct zoran_fh *fh, struct v4l2_clip *vp, int count);
+extern void zr36057_set_memgrab(struct zoran *zr, int mode);
+extern int wait_grab_pending(struct zoran *zr);
+
+/* interrupts */
+extern void print_interrupts(struct zoran *zr);
+extern void clear_interrupt_counters(struct zoran *zr);
+extern irqreturn_t zoran_irq(int irq, void *dev_id);
+
+/* JPEG codec access */
+extern void jpeg_start(struct zoran *zr);
+extern void zr36057_enable_jpg(struct zoran *zr,
+ enum zoran_codec_mode mode);
+extern void zoran_feed_stat_com(struct zoran *zr);
+
+/* general */
+extern void zoran_set_pci_master(struct zoran *zr, int set_master);
+extern void zoran_init_hardware(struct zoran *zr);
+extern void zr36057_restart(struct zoran *zr);
+
+extern const struct zoran_format zoran_formats[];
+
+extern int v4l_nbufs;
+extern int v4l_bufsize;
+extern int jpg_nbufs;
+extern int jpg_bufsize;
+extern int pass_through;
+
+/* i2c */
+#define decoder_call(zr, o, f, args...) \
+ v4l2_subdev_call(zr->decoder, o, f, ##args)
+#define encoder_call(zr, o, f, args...) \
+ v4l2_subdev_call(zr->encoder, o, f, ##args)
+
+#endif /* __ZORAN_DEVICE_H__ */
diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c
new file mode 100644
index 000000000000..808196ea5b81
--- /dev/null
+++ b/drivers/staging/media/zoran/zoran_driver.c
@@ -0,0 +1,1037 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Zoran zr36057/zr36067 PCI controller driver, for the
+ * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
+ * Media Labs LML33/LML33R10.
+ *
+ * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
+ *
+ * Changes for BUZ by Wolfgang Scherr <scherr@net4you.net>
+ *
+ * Changes for DC10/DC30 by Laurent Pinchart <laurent.pinchart@skynet.be>
+ *
+ * Changes for LML33R10 by Maxim Yevtyushkin <max@linuxmedialabs.com>
+ *
+ * Changes for videodev2/v4l2 by Ronald Bultje <rbultje@ronald.bitfreak.net>
+ *
+ * Based on
+ *
+ * Miro DC10 driver
+ * Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net>
+ *
+ * Iomega Buz driver version 1.0
+ * Copyright (C) 1999 Rainer Johanni <Rainer@Johanni.de>
+ *
+ * buz.0.0.3
+ * Copyright (C) 1998 Dave Perks <dperks@ibm.net>
+ *
+ * bttv - Bt848 frame grabber driver
+ * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
+ * & Marcus Metzler (mocm@thp.uni-koeln.de)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include <linux/spinlock.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include "videocodec.h"
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <linux/mutex.h>
+#include "zoran.h"
+#include "zoran_device.h"
+#include "zoran_card.h"
+
+const struct zoran_format zoran_formats[] = {
+ {
+ .name = "15-bit RGB LE",
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = 15,
+ .flags = ZORAN_FORMAT_CAPTURE,
+ .vfespfr = ZR36057_VFESPFR_RGB555 | ZR36057_VFESPFR_ERR_DIF |
+ ZR36057_VFESPFR_LITTLE_ENDIAN,
+ }, {
+ .name = "15-bit RGB BE",
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = 15,
+ .flags = ZORAN_FORMAT_CAPTURE,
+ .vfespfr = ZR36057_VFESPFR_RGB555 | ZR36057_VFESPFR_ERR_DIF,
+ }, {
+ .name = "16-bit RGB LE",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = 16,
+ .flags = ZORAN_FORMAT_CAPTURE,
+ .vfespfr = ZR36057_VFESPFR_RGB565 | ZR36057_VFESPFR_ERR_DIF |
+ ZR36057_VFESPFR_LITTLE_ENDIAN,
+ }, {
+ .name = "16-bit RGB BE",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = 16,
+ .flags = ZORAN_FORMAT_CAPTURE,
+ .vfespfr = ZR36057_VFESPFR_RGB565 | ZR36057_VFESPFR_ERR_DIF,
+ }, {
+ .name = "24-bit RGB",
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = 24,
+ .flags = ZORAN_FORMAT_CAPTURE,
+ .vfespfr = ZR36057_VFESPFR_RGB888 | ZR36057_VFESPFR_PACK24,
+ }, {
+ .name = "32-bit RGB LE",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = 32,
+ .flags = ZORAN_FORMAT_CAPTURE,
+ .vfespfr = ZR36057_VFESPFR_RGB888 | ZR36057_VFESPFR_LITTLE_ENDIAN,
+ }, {
+ .name = "32-bit RGB BE",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .depth = 32,
+ .flags = ZORAN_FORMAT_CAPTURE,
+ .vfespfr = ZR36057_VFESPFR_RGB888,
+ }, {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .depth = 16,
+ .flags = ZORAN_FORMAT_CAPTURE,
+ .vfespfr = ZR36057_VFESPFR_YUV422,
+ }, {
+ .name = "4:2:2, packed, UYVY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .depth = 16,
+ .flags = ZORAN_FORMAT_CAPTURE,
+ .vfespfr = ZR36057_VFESPFR_YUV422 | ZR36057_VFESPFR_LITTLE_ENDIAN,
+ }, {
+ .name = "Hardware-encoded Motion-JPEG",
+ .fourcc = V4L2_PIX_FMT_MJPEG,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .depth = 0,
+ .flags = ZORAN_FORMAT_CAPTURE |
+ ZORAN_FORMAT_PLAYBACK |
+ ZORAN_FORMAT_COMPRESSED,
+ }
+};
+
+#define NUM_FORMATS ARRAY_SIZE(zoran_formats)
+
+ /*
+ * small helper function for calculating buffersizes for v4l2
+ * we calculate the nearest higher power-of-two, which
+ * will be the recommended buffersize
+ */
+static __u32 zoran_v4l2_calc_bufsize(struct zoran_jpg_settings *settings)
+{
+ __u8 div = settings->ver_dcm * settings->hor_dcm * settings->tmp_dcm;
+ __u32 num = (1024 * 512) / (div);
+ __u32 result = 2;
+
+ num--;
+ while (num) {
+ num >>= 1;
+ result <<= 1;
+ }
+
+ if (result > jpg_bufsize)
+ return jpg_bufsize;
+ if (result < 8192)
+ return 8192;
+
+ return result;
+}
+
+/*
+ * V4L Buffer grabbing
+ */
+static int zoran_v4l_set_format(struct zoran *zr, int width, int height,
+ const struct zoran_format *format)
+{
+ int bpp;
+
+ /* Check size and format of the grab wanted */
+
+ if (height < BUZ_MIN_HEIGHT || width < BUZ_MIN_WIDTH ||
+ height > BUZ_MAX_HEIGHT || width > BUZ_MAX_WIDTH) {
+ pci_err(zr->pci_dev, "%s - wrong frame size (%dx%d)\n", __func__, width, height);
+ return -EINVAL;
+ }
+
+ bpp = (format->depth + 7) / 8;
+
+ zr->buffer_size = height * width * bpp;
+
+ /* Check against available buffer size */
+ if (height * width * bpp > zr->buffer_size) {
+ pci_err(zr->pci_dev, "%s - video buffer size (%d kB) is too small\n",
+ __func__, zr->buffer_size >> 10);
+ return -EINVAL;
+ }
+
+ /* The video front end needs 4-byte alinged line sizes */
+
+ if ((bpp == 2 && (width & 1)) || (bpp == 3 && (width & 3))) {
+ pci_err(zr->pci_dev, "%s - wrong frame alignment\n", __func__);
+ return -EINVAL;
+ }
+
+ zr->v4l_settings.width = width;
+ zr->v4l_settings.height = height;
+ zr->v4l_settings.format = format;
+ zr->v4l_settings.bytesperline = bpp * zr->v4l_settings.width;
+
+ return 0;
+}
+
+static int zoran_set_norm(struct zoran *zr, v4l2_std_id norm)
+{
+
+ if (!(norm & zr->card.norms)) {
+ pci_err(zr->pci_dev, "%s - unsupported norm %llx\n", __func__, norm);
+ return -EINVAL;
+ }
+
+ if (norm & V4L2_STD_SECAM)
+ zr->timing = zr->card.tvn[ZR_NORM_SECAM];
+ else if (norm & V4L2_STD_NTSC)
+ zr->timing = zr->card.tvn[ZR_NORM_NTSC];
+ else
+ zr->timing = zr->card.tvn[ZR_NORM_PAL];
+
+ decoder_call(zr, video, s_std, norm);
+ encoder_call(zr, video, s_std_output, norm);
+
+ /* Make sure the changes come into effect */
+ zr->norm = norm;
+
+ return 0;
+}
+
+static int zoran_set_input(struct zoran *zr, int input)
+{
+ if (input == zr->input)
+ return 0;
+
+ if (input < 0 || input >= zr->card.inputs) {
+ pci_err(zr->pci_dev, "%s - unsupported input %d\n", __func__, input);
+ return -EINVAL;
+ }
+
+ zr->input = input;
+
+ decoder_call(zr, video, s_routing, zr->card.input[input].muxsel, 0, 0);
+
+ return 0;
+}
+
+/*
+ * ioctl routine
+ */
+
+static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability *cap)
+{
+ struct zoran *zr = video_drvdata(file);
+
+ strscpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card));
+ strscpy(cap->driver, "zoran", sizeof(cap->driver));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", pci_name(zr->pci_dev));
+ cap->device_caps = zr->video_dev->device_caps;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag)
+{
+ unsigned int num, i;
+
+ if (fmt->index >= ARRAY_SIZE(zoran_formats))
+ return -EINVAL;
+ if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ for (num = i = 0; i < NUM_FORMATS; i++) {
+ if (zoran_formats[i].flags & flag && num++ == fmt->index) {
+ strscpy(fmt->description, zoran_formats[i].name,
+ sizeof(fmt->description));
+ /* fmt struct pre-zeroed, so adding '\0' not needed */
+ fmt->pixelformat = zoran_formats[i].fourcc;
+ if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED)
+ fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct zoran *zr = video_drvdata(file);
+
+ return zoran_enum_fmt(zr, f, ZORAN_FORMAT_CAPTURE);
+}
+
+#if 0
+/* TODO: output does not work yet */
+static int zoran_enum_fmt_vid_out(struct file *file, void *__fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct zoran *zr = video_drvdata(file);
+
+ return zoran_enum_fmt(zr, f, ZORAN_FORMAT_PLAYBACK);
+}
+#endif
+
+static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
+ struct v4l2_format *fmt)
+{
+ struct zoran *zr = video_drvdata(file);
+
+ fmt->fmt.pix.width = zr->jpg_settings.img_width / zr->jpg_settings.hor_dcm;
+ fmt->fmt.pix.height = zr->jpg_settings.img_height * 2 /
+ (zr->jpg_settings.ver_dcm * zr->jpg_settings.tmp_dcm);
+ fmt->fmt.pix.sizeimage = zr->buffer_size;
+ fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG;
+ if (zr->jpg_settings.tmp_dcm == 1)
+ fmt->fmt.pix.field = (zr->jpg_settings.odd_even ?
+ V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT);
+ else
+ fmt->fmt.pix.field = (zr->jpg_settings.odd_even ?
+ V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
+ fmt->fmt.pix.bytesperline = 0;
+ fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ return 0;
+}
+
+static int zoran_g_fmt_vid_cap(struct file *file, void *__fh,
+ struct v4l2_format *fmt)
+{
+ struct zoran *zr = video_drvdata(file);
+
+ if (zr->map_mode != ZORAN_MAP_MODE_RAW)
+ return zoran_g_fmt_vid_out(file, __fh, fmt);
+ fmt->fmt.pix.width = zr->v4l_settings.width;
+ fmt->fmt.pix.height = zr->v4l_settings.height;
+ fmt->fmt.pix.sizeimage = zr->buffer_size;
+ fmt->fmt.pix.pixelformat = zr->v4l_settings.format->fourcc;
+ fmt->fmt.pix.colorspace = zr->v4l_settings.format->colorspace;
+ fmt->fmt.pix.bytesperline = zr->v4l_settings.bytesperline;
+ if (BUZ_MAX_HEIGHT < (zr->v4l_settings.height * 2))
+ fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ else
+ fmt->fmt.pix.field = V4L2_FIELD_TOP;
+ return 0;
+}
+
+static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
+ struct v4l2_format *fmt)
+{
+ struct zoran *zr = video_drvdata(file);
+ struct zoran_jpg_settings settings;
+ int res = 0;
+
+ if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG)
+ return -EINVAL;
+
+ settings = zr->jpg_settings;
+
+ /* we actually need to set 'real' parameters now */
+ if ((fmt->fmt.pix.height * 2) > BUZ_MAX_HEIGHT)
+ settings.tmp_dcm = 1;
+ else
+ settings.tmp_dcm = 2;
+ settings.decimation = 0;
+ if (fmt->fmt.pix.height <= zr->jpg_settings.img_height / 2)
+ settings.ver_dcm = 2;
+ else
+ settings.ver_dcm = 1;
+ if (fmt->fmt.pix.width <= zr->jpg_settings.img_width / 4)
+ settings.hor_dcm = 4;
+ else if (fmt->fmt.pix.width <= zr->jpg_settings.img_width / 2)
+ settings.hor_dcm = 2;
+ else
+ settings.hor_dcm = 1;
+ if (settings.tmp_dcm == 1)
+ settings.field_per_buff = 2;
+ else
+ settings.field_per_buff = 1;
+
+ if (settings.hor_dcm > 1) {
+ settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0;
+ settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH;
+ } else {
+ settings.img_x = 0;
+ settings.img_width = BUZ_MAX_WIDTH;
+ }
+
+ /* check */
+ res = zoran_check_jpg_settings(zr, &settings, 1);
+ if (res)
+ return res;
+
+ /* tell the user what we actually did */
+ fmt->fmt.pix.width = settings.img_width / settings.hor_dcm;
+ fmt->fmt.pix.height = settings.img_height * 2 /
+ (settings.tmp_dcm * settings.ver_dcm);
+ if (settings.tmp_dcm == 1)
+ fmt->fmt.pix.field = (zr->jpg_settings.odd_even ?
+ V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT);
+ else
+ fmt->fmt.pix.field = (zr->jpg_settings.odd_even ?
+ V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
+
+ fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&settings);
+ zr->buffer_size = fmt->fmt.pix.sizeimage;
+ fmt->fmt.pix.bytesperline = 0;
+ fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return res;
+}
+
+static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
+ struct v4l2_format *fmt)
+{
+ struct zoran *zr = video_drvdata(file);
+ int bpp;
+ int i;
+
+ if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
+ return zoran_try_fmt_vid_out(file, __fh, fmt);
+
+ for (i = 0; i < NUM_FORMATS; i++)
+ if (zoran_formats[i].fourcc == fmt->fmt.pix.pixelformat)
+ break;
+
+ if (i == NUM_FORMATS) {
+ /* TODO do not return here to fix the TRY_FMT cannot handle an invalid pixelformat*/
+ return -EINVAL;
+ }
+
+ fmt->fmt.pix.pixelformat = zoran_formats[i].fourcc;
+ fmt->fmt.pix.colorspace = zoran_formats[i].colorspace;
+ if (BUZ_MAX_HEIGHT < (fmt->fmt.pix.height * 2))
+ fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ else
+ fmt->fmt.pix.field = V4L2_FIELD_TOP;
+
+ bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8);
+ v4l_bound_align_image(&fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2,
+ &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0);
+ return 0;
+}
+
+static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
+ struct v4l2_format *fmt)
+{
+ struct zoran *zr = video_drvdata(file);
+ __le32 printformat = __cpu_to_le32(fmt->fmt.pix.pixelformat);
+ struct zoran_jpg_settings settings;
+ int res = 0;
+
+ pci_dbg(zr->pci_dev, "size=%dx%d, fmt=0x%x (%4.4s)\n",
+ fmt->fmt.pix.width, fmt->fmt.pix.height,
+ fmt->fmt.pix.pixelformat,
+ (char *)&printformat);
+ if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG)
+ return -EINVAL;
+
+ if (!fmt->fmt.pix.height || !fmt->fmt.pix.width)
+ return -EINVAL;
+
+ settings = zr->jpg_settings;
+
+ /* we actually need to set 'real' parameters now */
+ if (fmt->fmt.pix.height * 2 > BUZ_MAX_HEIGHT)
+ settings.tmp_dcm = 1;
+ else
+ settings.tmp_dcm = 2;
+ settings.decimation = 0;
+ if (fmt->fmt.pix.height <= zr->jpg_settings.img_height / 2)
+ settings.ver_dcm = 2;
+ else
+ settings.ver_dcm = 1;
+ if (fmt->fmt.pix.width <= zr->jpg_settings.img_width / 4)
+ settings.hor_dcm = 4;
+ else if (fmt->fmt.pix.width <= zr->jpg_settings.img_width / 2)
+ settings.hor_dcm = 2;
+ else
+ settings.hor_dcm = 1;
+ if (settings.tmp_dcm == 1)
+ settings.field_per_buff = 2;
+ else
+ settings.field_per_buff = 1;
+
+ if (settings.hor_dcm > 1) {
+ settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0;
+ settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH;
+ } else {
+ settings.img_x = 0;
+ settings.img_width = BUZ_MAX_WIDTH;
+ }
+
+ /* check */
+ res = zoran_check_jpg_settings(zr, &settings, 0);
+ if (res)
+ return res;
+
+ /* it's ok, so set them */
+ zr->jpg_settings = settings;
+
+ if (fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ zr->map_mode = ZORAN_MAP_MODE_JPG_REC;
+ else
+ zr->map_mode = ZORAN_MAP_MODE_JPG_PLAY;
+
+ zr->buffer_size = zoran_v4l2_calc_bufsize(&zr->jpg_settings);
+
+ /* tell the user what we actually did */
+ fmt->fmt.pix.width = settings.img_width / settings.hor_dcm;
+ fmt->fmt.pix.height = settings.img_height * 2 /
+ (settings.tmp_dcm * settings.ver_dcm);
+ if (settings.tmp_dcm == 1)
+ fmt->fmt.pix.field = (zr->jpg_settings.odd_even ?
+ V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT);
+ else
+ fmt->fmt.pix.field = (zr->jpg_settings.odd_even ?
+ V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM);
+ fmt->fmt.pix.bytesperline = 0;
+ fmt->fmt.pix.sizeimage = zr->buffer_size;
+ fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return res;
+}
+
+static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
+ struct v4l2_format *fmt)
+{
+ struct zoran *zr = video_drvdata(file);
+ struct zoran_fh *fh = __fh;
+ int i;
+ int res = 0;
+
+ if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
+ return zoran_s_fmt_vid_out(file, fh, fmt);
+
+ for (i = 0; i < NUM_FORMATS; i++)
+ if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc)
+ break;
+ if (i == NUM_FORMATS) {
+ pci_err(zr->pci_dev, "VIDIOC_S_FMT - unknown/unsupported format 0x%x\n",
+ fmt->fmt.pix.pixelformat);
+ /* TODO do not return here to fix the TRY_FMT cannot handle an invalid pixelformat*/
+ return -EINVAL;
+ }
+
+ fmt->fmt.pix.pixelformat = zoran_formats[i].fourcc;
+ if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT)
+ fmt->fmt.pix.height = BUZ_MAX_HEIGHT;
+ if (fmt->fmt.pix.width > BUZ_MAX_WIDTH)
+ fmt->fmt.pix.width = BUZ_MAX_WIDTH;
+ if (fmt->fmt.pix.height < BUZ_MIN_HEIGHT)
+ fmt->fmt.pix.height = BUZ_MIN_HEIGHT;
+ if (fmt->fmt.pix.width < BUZ_MIN_WIDTH)
+ fmt->fmt.pix.width = BUZ_MIN_WIDTH;
+
+ zr->map_mode = ZORAN_MAP_MODE_RAW;
+
+ res = zoran_v4l_set_format(zr, fmt->fmt.pix.width, fmt->fmt.pix.height,
+ &zoran_formats[i]);
+ if (res)
+ return res;
+
+ /* tell the user the results/missing stuff */
+ fmt->fmt.pix.bytesperline = zr->v4l_settings.bytesperline;
+ fmt->fmt.pix.sizeimage = zr->buffer_size;
+ fmt->fmt.pix.colorspace = zr->v4l_settings.format->colorspace;
+ if (BUZ_MAX_HEIGHT < (zr->v4l_settings.height * 2))
+ fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ else
+ fmt->fmt.pix.field = V4L2_FIELD_TOP;
+ return res;
+}
+
+static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std)
+{
+ struct zoran *zr = video_drvdata(file);
+
+ *std = zr->norm;
+ return 0;
+}
+
+static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std)
+{
+ struct zoran *zr = video_drvdata(file);
+ int res = 0;
+
+ if (zr->running != ZORAN_MAP_MODE_NONE)
+ return -EBUSY;
+
+ res = zoran_set_norm(zr, std);
+ return res;
+}
+
+static int zoran_enum_input(struct file *file, void *__fh,
+ struct v4l2_input *inp)
+{
+ struct zoran *zr = video_drvdata(file);
+
+ if (inp->index >= zr->card.inputs)
+ return -EINVAL;
+
+ strscpy(inp->name, zr->card.input[inp->index].name, sizeof(inp->name));
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
+
+ /* Get status of video decoder */
+ decoder_call(zr, video, g_input_status, &inp->status);
+ return 0;
+}
+
+static int zoran_g_input(struct file *file, void *__fh, unsigned int *input)
+{
+ struct zoran *zr = video_drvdata(file);
+
+ *input = zr->input;
+
+ return 0;
+}
+
+static int zoran_s_input(struct file *file, void *__fh, unsigned int input)
+{
+ struct zoran *zr = video_drvdata(file);
+ int res;
+
+ if (zr->running != ZORAN_MAP_MODE_NONE)
+ return -EBUSY;
+
+ res = zoran_set_input(zr, input);
+ return res;
+}
+
+#if 0
+/* TODO: output does not work yet */
+static int zoran_enum_output(struct file *file, void *__fh,
+ struct v4l2_output *outp)
+{
+ if (outp->index != 0)
+ return -EINVAL;
+
+ outp->index = 0;
+ outp->type = V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY;
+ outp->std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
+ outp->capabilities = V4L2_OUT_CAP_STD;
+ strscpy(outp->name, "Autodetect", sizeof(outp->name));
+
+ return 0;
+}
+static int zoran_g_output(struct file *file, void *__fh, unsigned int *output)
+{
+ *output = 0;
+
+ return 0;
+}
+
+static int zoran_s_output(struct file *file, void *__fh, unsigned int output)
+{
+ if (output != 0)
+ return -EINVAL;
+
+ return 0;
+}
+#endif
+
+/* cropping (sub-frame capture) */
+static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
+{
+ struct zoran *zr = video_drvdata(file);
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ pci_err(zr->pci_dev, "%s invalid combinaison\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r.top = zr->jpg_settings.img_y;
+ sel->r.left = zr->jpg_settings.img_x;
+ sel->r.width = zr->jpg_settings.img_width;
+ sel->r.height = zr->jpg_settings.img_height;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = sel->r.left = 0;
+ sel->r.width = BUZ_MIN_WIDTH;
+ sel->r.height = BUZ_MIN_HEIGHT;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = sel->r.left = 0;
+ sel->r.width = BUZ_MAX_WIDTH;
+ sel->r.height = BUZ_MAX_HEIGHT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int zoran_s_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
+{
+ struct zoran *zr = video_drvdata(file);
+ struct zoran_jpg_settings settings;
+ int res;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (!sel->r.width || !sel->r.height)
+ return -EINVAL;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ if (zr->map_mode == ZORAN_MAP_MODE_RAW) {
+ pci_err(zr->pci_dev, "VIDIOC_S_SELECTION - subcapture only supported for compressed capture\n");
+ return -EINVAL;
+ }
+
+ settings = zr->jpg_settings;
+
+ /* move into a form that we understand */
+ settings.img_x = sel->r.left;
+ settings.img_y = sel->r.top;
+ settings.img_width = sel->r.width;
+ settings.img_height = sel->r.height;
+
+ /* check validity */
+ res = zoran_check_jpg_settings(zr, &settings, 0);
+ if (res)
+ return res;
+
+ /* accept */
+ zr->jpg_settings = settings;
+ return res;
+}
+
+static int zoran_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm)
+{
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Output is disabled temporarily
+ * Zoran is picky about jpeg data it accepts. At least it seems to unsupport COM and APPn.
+ * So until a way to filter data will be done, disable output.
+ */
+static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
+ .vidioc_querycap = zoran_querycap,
+ .vidioc_g_parm = zoran_g_parm,
+ .vidioc_s_selection = zoran_s_selection,
+ .vidioc_g_selection = zoran_g_selection,
+ .vidioc_enum_input = zoran_enum_input,
+ .vidioc_g_input = zoran_g_input,
+ .vidioc_s_input = zoran_s_input,
+/* .vidioc_enum_output = zoran_enum_output,
+ .vidioc_g_output = zoran_g_output,
+ .vidioc_s_output = zoran_s_output,*/
+ .vidioc_g_std = zoran_g_std,
+ .vidioc_s_std = zoran_s_std,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_enum_fmt_vid_cap = zoran_enum_fmt_vid_cap,
+/* .vidioc_enum_fmt_vid_out = zoran_enum_fmt_vid_out,*/
+ .vidioc_g_fmt_vid_cap = zoran_g_fmt_vid_cap,
+/* .vidioc_g_fmt_vid_out = zoran_g_fmt_vid_out,*/
+ .vidioc_s_fmt_vid_cap = zoran_s_fmt_vid_cap,
+/* .vidioc_s_fmt_vid_out = zoran_s_fmt_vid_out,*/
+ .vidioc_try_fmt_vid_cap = zoran_try_fmt_vid_cap,
+/* .vidioc_try_fmt_vid_out = zoran_try_fmt_vid_out,*/
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations zoran_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .write = vb2_fop_write,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+const struct video_device zoran_template = {
+ .name = ZORAN_NAME,
+ .fops = &zoran_fops,
+ .ioctl_ops = &zoran_ioctl_ops,
+ .release = &zoran_vdev_release,
+ .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
+};
+
+static int zr_vb2_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct zoran *zr = vb2_get_drv_priv(vq);
+ unsigned int size = zr->buffer_size;
+
+ pci_dbg(zr->pci_dev, "%s nbuf=%u nplanes=%u", __func__, *nbuffers, *nplanes);
+
+ zr->buf_in_reserve = 0;
+
+ if (*nbuffers < vq->min_buffers_needed)
+ *nbuffers = vq->min_buffers_needed;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ else
+ return 0;
+ }
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static void zr_vb2_queue(struct vb2_buffer *vb)
+{
+ struct zoran *zr = vb2_get_drv_priv(vb->vb2_queue);
+ struct zr_buffer *buf = vb2_to_zr_buffer(vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&zr->queued_bufs_lock, flags);
+ list_add_tail(&buf->queue, &zr->queued_bufs);
+ zr->buf_in_reserve++;
+ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
+ if (zr->running == ZORAN_MAP_MODE_JPG_REC)
+ zoran_feed_stat_com(zr);
+ zr->queued++;
+}
+
+static int zr_vb2_prepare(struct vb2_buffer *vb)
+{
+ struct zoran *zr = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vb2_plane_size(vb, 0) < zr->buffer_size)
+ return -EINVAL;
+ zr->prepared++;
+
+ return 0;
+}
+
+int zr_set_buf(struct zoran *zr)
+{
+ struct zr_buffer *buf;
+ struct vb2_v4l2_buffer *vbuf;
+ dma_addr_t phys_addr;
+ unsigned long flags;
+ u32 reg;
+
+ if (zr->running == ZORAN_MAP_MODE_NONE)
+ return 0;
+
+ if (zr->inuse[0]) {
+ buf = zr->inuse[0];
+ buf->vbuf.vb2_buf.timestamp = ktime_get_ns();
+ buf->vbuf.sequence = zr->vbseq++;
+ vbuf = &buf->vbuf;
+
+ buf->vbuf.field = V4L2_FIELD_INTERLACED;
+ vb2_set_plane_payload(&buf->vbuf.vb2_buf, 0, zr->buffer_size);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_DONE);
+ zr->inuse[0] = NULL;
+ }
+
+ spin_lock_irqsave(&zr->queued_bufs_lock, flags);
+ if (list_empty(&zr->queued_bufs)) {
+ btand(~ZR36057_ICR_INT_PIN_EN, ZR36057_ICR);
+ vb2_queue_error(zr->video_dev->queue);
+ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
+ return -EINVAL;
+ }
+ buf = list_first_entry_or_null(&zr->queued_bufs, struct zr_buffer, queue);
+ if (!buf) {
+ btand(~ZR36057_ICR_INT_PIN_EN, ZR36057_ICR);
+ vb2_queue_error(zr->video_dev->queue);
+ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
+ return -EINVAL;
+ }
+ list_del(&buf->queue);
+ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
+
+ vbuf = &buf->vbuf;
+ vbuf->vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+
+ if (!phys_addr)
+ return -EINVAL;
+
+ zr->inuse[0] = buf;
+
+ reg = phys_addr;
+ btwrite(reg, ZR36057_VDTR);
+ if (zr->v4l_settings.height > BUZ_MAX_HEIGHT / 2)
+ reg += zr->v4l_settings.bytesperline;
+ btwrite(reg, ZR36057_VDBR);
+
+ reg = 0;
+ if (zr->v4l_settings.height > BUZ_MAX_HEIGHT / 2)
+ reg += zr->v4l_settings.bytesperline;
+ reg = (reg << ZR36057_VSSFGR_DISP_STRIDE);
+ reg |= ZR36057_VSSFGR_VID_OVF;
+ reg |= ZR36057_VSSFGR_SNAP_SHOT;
+ reg |= ZR36057_VSSFGR_FRAME_GRAB;
+ btwrite(reg, ZR36057_VSSFGR);
+
+ btor(ZR36057_VDCR_VID_EN, ZR36057_VDCR);
+ return 0;
+}
+
+static int zr_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct zoran *zr = vq->drv_priv;
+ int j;
+
+ for (j = 0; j < BUZ_NUM_STAT_COM; j++) {
+ zr->stat_com[j] = cpu_to_le32(1);
+ zr->inuse[j] = NULL;
+ }
+
+ if (zr->map_mode != ZORAN_MAP_MODE_RAW) {
+ pci_info(zr->pci_dev, "START JPG\n");
+ zr36057_restart(zr);
+ zoran_init_hardware(zr);
+ if (zr->map_mode == ZORAN_MAP_MODE_JPG_REC)
+ zr36057_enable_jpg(zr, BUZ_MODE_MOTION_DECOMPRESS);
+ else
+ zr36057_enable_jpg(zr, BUZ_MODE_MOTION_COMPRESS);
+ zoran_feed_stat_com(zr);
+ jpeg_start(zr);
+ zr->running = zr->map_mode;
+ btor(ZR36057_ICR_INT_PIN_EN, ZR36057_ICR);
+ return 0;
+ }
+
+ pci_info(zr->pci_dev, "START RAW\n");
+ zr36057_restart(zr);
+ zoran_init_hardware(zr);
+
+ zr36057_enable_jpg(zr, BUZ_MODE_IDLE);
+ zr36057_set_memgrab(zr, 1);
+ zr->running = zr->map_mode;
+ btor(ZR36057_ICR_INT_PIN_EN, ZR36057_ICR);
+ return 0;
+}
+
+static void zr_vb2_stop_streaming(struct vb2_queue *vq)
+{
+ struct zoran *zr = vq->drv_priv;
+ struct zr_buffer *buf;
+ unsigned long flags;
+ int j;
+
+ btand(~ZR36057_ICR_INT_PIN_EN, ZR36057_ICR);
+ if (zr->map_mode != ZORAN_MAP_MODE_RAW)
+ zr36057_enable_jpg(zr, BUZ_MODE_IDLE);
+ zr36057_set_memgrab(zr, 0);
+ zr->running = ZORAN_MAP_MODE_NONE;
+
+ zoran_set_pci_master(zr, 0);
+
+ if (!pass_through) { /* Switch to color bar */
+ decoder_call(zr, video, s_stream, 0);
+ encoder_call(zr, video, s_routing, 2, 0, 0);
+ }
+
+ for (j = 0; j < BUZ_NUM_STAT_COM; j++) {
+ zr->stat_com[j] = cpu_to_le32(1);
+ if (!zr->inuse[j])
+ continue;
+ buf = zr->inuse[j];
+ pci_dbg(zr->pci_dev, "%s clean buf %d\n", __func__, j);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
+ zr->inuse[j] = NULL;
+ }
+
+ spin_lock_irqsave(&zr->queued_bufs_lock, flags);
+ while (!list_empty(&zr->queued_bufs)) {
+ buf = list_entry(zr->queued_bufs.next, struct zr_buffer, queue);
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
+ zr->buf_in_reserve--;
+ }
+ spin_unlock_irqrestore(&zr->queued_bufs_lock, flags);
+ if (zr->buf_in_reserve)
+ pci_err(zr->pci_dev, "Buffer remaining %d\n", zr->buf_in_reserve);
+ zr->map_mode = ZORAN_MAP_MODE_RAW;
+}
+
+static const struct vb2_ops zr_video_qops = {
+ .queue_setup = zr_vb2_queue_setup,
+ .buf_queue = zr_vb2_queue,
+ .buf_prepare = zr_vb2_prepare,
+ .start_streaming = zr_vb2_start_streaming,
+ .stop_streaming = zr_vb2_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq)
+{
+ int err;
+
+ spin_lock_init(&zr->queued_bufs_lock);
+ INIT_LIST_HEAD(&zr->queued_bufs);
+
+ vq->dev = &zr->pci_dev->dev;
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->io_modes = VB2_USERPTR | VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE;
+ vq->drv_priv = zr;
+ vq->buf_struct_size = sizeof(struct zr_buffer);
+ vq->ops = &zr_video_qops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->gfp_flags = GFP_DMA32,
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->min_buffers_needed = 9;
+ vq->lock = &zr->lock;
+ err = vb2_queue_init(vq);
+ if (err)
+ return err;
+ zr->video_dev->queue = vq;
+ return 0;
+}
+
+void zoran_queue_exit(struct zoran *zr)
+{
+ vb2_queue_release(zr->video_dev->queue);
+}
diff --git a/drivers/staging/media/zoran/zr36016.c b/drivers/staging/media/zoran/zr36016.c
new file mode 100644
index 000000000000..2d7dc7abde79
--- /dev/null
+++ b/drivers/staging/media/zoran/zr36016.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Zoran ZR36016 basic configuration functions
+ *
+ * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+/* headerfile of this module */
+#include "zr36016.h"
+
+/* codec io API */
+#include "videocodec.h"
+
+/* it doesn't make sense to have more than 20 or so,
+ just to prevent some unwanted loops */
+#define MAX_CODECS 20
+
+/* amount of chips attached via this driver */
+static int zr36016_codecs;
+
+/* debugging is available via module parameter */
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0-4)");
+
+#define dprintk(num, format, args...) \
+ do { \
+ if (debug >= num) \
+ printk(format, ##args); \
+ } while (0)
+
+/* =========================================================================
+ Local hardware I/O functions:
+
+ read/write via codec layer (registers are located in the master device)
+ ========================================================================= */
+
+/* read and write functions */
+static u8 zr36016_read(struct zr36016 *ptr, u16 reg)
+{
+ u8 value = 0;
+
+ /* just in case something is wrong... */
+ if (ptr->codec->master_data->readreg)
+ value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xFF;
+ else
+ pr_err("%s: invalid I/O setup, nothing read!\n", ptr->name);
+
+ dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value);
+
+ return value;
+}
+
+static void zr36016_write(struct zr36016 *ptr, u16 reg, u8 value)
+{
+ dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg);
+
+ // just in case something is wrong...
+ if (ptr->codec->master_data->writereg)
+ ptr->codec->master_data->writereg(ptr->codec, reg, value);
+ else
+ pr_err("%s: invalid I/O setup, nothing written!\n", ptr->name);
+}
+
+/* indirect read and write functions */
+/* the 016 supports auto-addr-increment, but
+ * writing it all time cost not much and is safer... */
+static u8 zr36016_readi(struct zr36016 *ptr, u16 reg)
+{
+ u8 value = 0;
+
+ /* just in case something is wrong... */
+ if ((ptr->codec->master_data->writereg) && (ptr->codec->master_data->readreg)) {
+ ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR
+ value = (ptr->codec->master_data->readreg(ptr->codec, ZR016_IDATA)) & 0xFF; // DATA
+ } else {
+ pr_err("%s: invalid I/O setup, nothing read (i)!\n", ptr->name);
+ }
+
+ dprintk(4, "%s: reading indirect from 0x%04x: %02x\n", ptr->name, reg, value);
+ return value;
+}
+
+static void zr36016_writei(struct zr36016 *ptr, u16 reg, u8 value)
+{
+ dprintk(4, "%s: writing indirect 0x%02x to 0x%04x\n", ptr->name,
+ value, reg);
+
+ /* just in case something is wrong... */
+ if (ptr->codec->master_data->writereg) {
+ ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR
+ ptr->codec->master_data->writereg(ptr->codec, ZR016_IDATA, value & 0x0FF); // DATA
+ } else {
+ pr_err("%s: invalid I/O setup, nothing written (i)!\n", ptr->name);
+ }
+}
+
+/* =========================================================================
+ Local helper function:
+
+ version read
+ ========================================================================= */
+
+/* version kept in datastructure */
+static u8 zr36016_read_version(struct zr36016 *ptr)
+{
+ ptr->version = zr36016_read(ptr, 0) >> 4;
+ return ptr->version;
+}
+
+/* =========================================================================
+ Local helper function:
+
+ basic test of "connectivity", writes/reads to/from PAX-Lo register
+ ========================================================================= */
+
+static int zr36016_basic_test(struct zr36016 *ptr)
+{
+ if (debug) {
+ int i;
+
+ zr36016_writei(ptr, ZR016I_PAX_LO, 0x55);
+ dprintk(1, KERN_INFO "%s: registers: ", ptr->name);
+ for (i = 0; i <= 0x0b; i++)
+ dprintk(1, "%02x ", zr36016_readi(ptr, i));
+ dprintk(1, "\n");
+ }
+ // for testing just write 0, then the default value to a register and read
+ // it back in both cases
+ zr36016_writei(ptr, ZR016I_PAX_LO, 0x00);
+ if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0) {
+ pr_err("%s: attach failed, can't connect to vfe processor!\n", ptr->name);
+ return -ENXIO;
+ }
+ zr36016_writei(ptr, ZR016I_PAX_LO, 0x0d0);
+ if (zr36016_readi(ptr, ZR016I_PAX_LO) != 0x0d0) {
+ pr_err("%s: attach failed, can't connect to vfe processor!\n", ptr->name);
+ return -ENXIO;
+ }
+ // we allow version numbers from 0-3, should be enough, though
+ zr36016_read_version(ptr);
+ if (ptr->version & 0x0c) {
+ pr_err("%s: attach failed, suspicious version %d found...\n", ptr->name,
+ ptr->version);
+ return -ENXIO;
+ }
+
+ return 0; /* looks good! */
+}
+
+/* =========================================================================
+ Local helper function:
+
+ simple loop for pushing the init datasets - NO USE --
+ ========================================================================= */
+
+#if 0
+static int zr36016_pushit(struct zr36016 *ptr,
+ u16 startreg,
+ u16 len,
+ const char *data)
+{
+ int i = 0;
+
+ dprintk(4, "%s: write data block to 0x%04x (len=%d)\n",
+ ptr->name, startreg, len);
+ while (i < len) {
+ zr36016_writei(ptr, startreg++, data[i++]);
+ }
+
+ return i;
+}
+#endif
+
+/* =========================================================================
+ Basic datasets & init:
+
+ //TODO//
+ ========================================================================= */
+
+static void zr36016_init(struct zr36016 *ptr)
+{
+ // stop any processing
+ zr36016_write(ptr, ZR016_GOSTOP, 0);
+
+ // mode setup (yuv422 in and out, compression/expansuon due to mode)
+ zr36016_write(ptr, ZR016_MODE,
+ ZR016_YUV422 | ZR016_YUV422_YUV422 |
+ (ptr->mode == CODEC_DO_COMPRESSION ?
+ ZR016_COMPRESSION : ZR016_EXPANSION));
+
+ // misc setup
+ zr36016_writei(ptr, ZR016I_SETUP1,
+ (ptr->xdec ? (ZR016_HRFL | ZR016_HORZ) : 0) |
+ (ptr->ydec ? ZR016_VERT : 0) | ZR016_CNTI);
+ zr36016_writei(ptr, ZR016I_SETUP2, ZR016_CCIR);
+
+ // Window setup
+ // (no extra offset for now, norm defines offset, default width height)
+ zr36016_writei(ptr, ZR016I_PAX_HI, ptr->width >> 8);
+ zr36016_writei(ptr, ZR016I_PAX_LO, ptr->width & 0xFF);
+ zr36016_writei(ptr, ZR016I_PAY_HI, ptr->height >> 8);
+ zr36016_writei(ptr, ZR016I_PAY_LO, ptr->height & 0xFF);
+ zr36016_writei(ptr, ZR016I_NAX_HI, ptr->xoff >> 8);
+ zr36016_writei(ptr, ZR016I_NAX_LO, ptr->xoff & 0xFF);
+ zr36016_writei(ptr, ZR016I_NAY_HI, ptr->yoff >> 8);
+ zr36016_writei(ptr, ZR016I_NAY_LO, ptr->yoff & 0xFF);
+
+ /* shall we continue now, please? */
+ zr36016_write(ptr, ZR016_GOSTOP, 1);
+}
+
+/* =========================================================================
+ CODEC API FUNCTIONS
+
+ this functions are accessed by the master via the API structure
+ ========================================================================= */
+
+/* set compression/expansion mode and launches codec -
+ this should be the last call from the master before starting processing */
+static int zr36016_set_mode(struct videocodec *codec, int mode)
+{
+ struct zr36016 *ptr = (struct zr36016 *)codec->data;
+
+ dprintk(2, "%s: set_mode %d call\n", ptr->name, mode);
+
+ if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION))
+ return -EINVAL;
+
+ ptr->mode = mode;
+ zr36016_init(ptr);
+
+ return 0;
+}
+
+/* set picture size */
+static int zr36016_set_video(struct videocodec *codec, const struct tvnorm *norm,
+ struct vfe_settings *cap, struct vfe_polarity *pol)
+{
+ struct zr36016 *ptr = (struct zr36016 *)codec->data;
+
+ dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) call\n",
+ ptr->name, norm->h_start, norm->v_start,
+ cap->x, cap->y, cap->width, cap->height,
+ cap->decimation);
+
+ /* if () return -EINVAL;
+ * trust the master driver that it knows what it does - so
+ * we allow invalid startx/y for now ... */
+ ptr->width = cap->width;
+ ptr->height = cap->height;
+ /* (Ronald) This is ugly. zoran_device.c, line 387
+ * already mentions what happens if h_start is even
+ * (blue faces, etc., cr/cb inversed). There's probably
+ * some good reason why h_start is 0 instead of 1, so I'm
+ * leaving it to this for now, but really... This can be
+ * done a lot simpler */
+ ptr->xoff = (norm->h_start ? norm->h_start : 1) + cap->x;
+ /* Something to note here (I don't understand it), setting
+ * v_start too high will cause the codec to 'not work'. I
+ * really don't get it. values of 16 (v_start) already break
+ * it here. Just '0' seems to work. More testing needed! */
+ ptr->yoff = norm->v_start + cap->y;
+ /* (Ronald) dzjeeh, can't this thing do hor_decimation = 4? */
+ ptr->xdec = ((cap->decimation & 0xff) == 1) ? 0 : 1;
+ ptr->ydec = (((cap->decimation >> 8) & 0xff) == 1) ? 0 : 1;
+
+ return 0;
+}
+
+/* additional control functions */
+static int zr36016_control(struct videocodec *codec, int type, int size, void *data)
+{
+ struct zr36016 *ptr = (struct zr36016 *)codec->data;
+ int *ival = (int *)data;
+
+ dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, size);
+
+ switch (type) {
+ case CODEC_G_STATUS: /* get last status - we don't know it ... */
+ if (size != sizeof(int))
+ return -EFAULT;
+ *ival = 0;
+ break;
+
+ case CODEC_G_CODEC_MODE:
+ if (size != sizeof(int))
+ return -EFAULT;
+ *ival = 0;
+ break;
+
+ case CODEC_S_CODEC_MODE:
+ if (size != sizeof(int))
+ return -EFAULT;
+ if (*ival != 0)
+ return -EINVAL;
+ /* not needed, do nothing */
+ return 0;
+
+ case CODEC_G_VFE:
+ case CODEC_S_VFE:
+ return 0;
+
+ case CODEC_S_MMAP:
+ /* not available, give an error */
+ return -ENXIO;
+
+ default:
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+/* =========================================================================
+ Exit and unregister function:
+
+ Deinitializes Zoran's JPEG processor
+ ========================================================================= */
+
+static int zr36016_unset(struct videocodec *codec)
+{
+ struct zr36016 *ptr = codec->data;
+
+ if (ptr) {
+ /* do wee need some codec deinit here, too ???? */
+
+ dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num);
+ kfree(ptr);
+ codec->data = NULL;
+
+ zr36016_codecs--;
+ return 0;
+ }
+
+ return -EFAULT;
+}
+
+/* =========================================================================
+ Setup and registry function:
+
+ Initializes Zoran's JPEG processor
+
+ Also sets pixel size, average code size, mode (compr./decompr.)
+ (the given size is determined by the processor with the video interface)
+ ========================================================================= */
+
+static int zr36016_setup(struct videocodec *codec)
+{
+ struct zr36016 *ptr;
+ int res;
+
+ dprintk(2, "zr36016: initializing VFE subsystem #%d.\n", zr36016_codecs);
+
+ if (zr36016_codecs == MAX_CODECS) {
+ pr_err("zr36016: Can't attach more codecs!\n");
+ return -ENOSPC;
+ }
+ //mem structure init
+ codec->data = ptr = kzalloc(sizeof(struct zr36016), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ snprintf(ptr->name, sizeof(ptr->name), "zr36016[%d]", zr36016_codecs);
+ ptr->num = zr36016_codecs++;
+ ptr->codec = codec;
+
+ //testing
+ res = zr36016_basic_test(ptr);
+ if (res < 0) {
+ zr36016_unset(codec);
+ return res;
+ }
+ //final setup
+ ptr->mode = CODEC_DO_COMPRESSION;
+ ptr->width = 768;
+ ptr->height = 288;
+ ptr->xdec = 1;
+ ptr->ydec = 0;
+ zr36016_init(ptr);
+
+ dprintk(1, KERN_INFO "%s: codec v%d attached and running\n", ptr->name, ptr->version);
+
+ return 0;
+}
+
+static const struct videocodec zr36016_codec = {
+ .owner = THIS_MODULE,
+ .name = "zr36016",
+ .magic = 0L, /* magic not used */
+ .flags =
+ CODEC_FLAG_HARDWARE | CODEC_FLAG_VFE | CODEC_FLAG_ENCODER |
+ CODEC_FLAG_DECODER,
+ .type = CODEC_TYPE_ZR36016,
+ .setup = zr36016_setup, /* functionality */
+ .unset = zr36016_unset,
+ .set_mode = zr36016_set_mode,
+ .set_video = zr36016_set_video,
+ .control = zr36016_control,
+ /* others are not used */
+};
+
+/* =========================================================================
+ HOOK IN DRIVER AS KERNEL MODULE
+ ========================================================================= */
+
+static int __init zr36016_init_module(void)
+{
+ //dprintk(1, "ZR36016 driver %s\n",ZR016_VERSION);
+ zr36016_codecs = 0;
+ return videocodec_register(&zr36016_codec);
+}
+
+static void __exit zr36016_cleanup_module(void)
+{
+ if (zr36016_codecs) {
+ dprintk(1,
+ "zr36016: something's wrong - %d codecs left somehow.\n",
+ zr36016_codecs);
+ }
+ videocodec_unregister(&zr36016_codec);
+}
+
+module_init(zr36016_init_module);
+module_exit(zr36016_cleanup_module);
+
+MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>");
+MODULE_DESCRIPTION("Driver module for ZR36016 video frontends");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/zr36016.h b/drivers/staging/media/zoran/zr36016.h
new file mode 100644
index 000000000000..1475f971cc24
--- /dev/null
+++ b/drivers/staging/media/zoran/zr36016.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Zoran ZR36016 basic configuration functions - header file
+ *
+ * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at>
+ */
+
+#ifndef ZR36016_H
+#define ZR36016_H
+
+/* data stored for each zoran jpeg codec chip */
+struct zr36016 {
+ char name[32];
+ int num;
+ /* io datastructure */
+ struct videocodec *codec;
+ // coder status
+ __u8 version;
+ // actual coder setup
+ int mode;
+
+ __u16 xoff;
+ __u16 yoff;
+ __u16 width;
+ __u16 height;
+ __u16 xdec;
+ __u16 ydec;
+};
+
+/* direct register addresses */
+#define ZR016_GOSTOP 0x00
+#define ZR016_MODE 0x01
+#define ZR016_IADDR 0x02
+#define ZR016_IDATA 0x03
+
+/* indirect register addresses */
+#define ZR016I_SETUP1 0x00
+#define ZR016I_SETUP2 0x01
+#define ZR016I_NAX_LO 0x02
+#define ZR016I_NAX_HI 0x03
+#define ZR016I_PAX_LO 0x04
+#define ZR016I_PAX_HI 0x05
+#define ZR016I_NAY_LO 0x06
+#define ZR016I_NAY_HI 0x07
+#define ZR016I_PAY_LO 0x08
+#define ZR016I_PAY_HI 0x09
+#define ZR016I_NOL_LO 0x0a
+#define ZR016I_NOL_HI 0x0b
+
+/* possible values for mode register */
+#define ZR016_RGB444_YUV444 0x00
+#define ZR016_RGB444_YUV422 0x01
+#define ZR016_RGB444_YUV411 0x02
+#define ZR016_RGB444_Y400 0x03
+#define ZR016_RGB444_RGB444 0x04
+#define ZR016_YUV444_YUV444 0x08
+#define ZR016_YUV444_YUV422 0x09
+#define ZR016_YUV444_YUV411 0x0a
+#define ZR016_YUV444_Y400 0x0b
+#define ZR016_YUV444_RGB444 0x0c
+#define ZR016_YUV422_YUV422 0x11
+#define ZR016_YUV422_YUV411 0x12
+#define ZR016_YUV422_Y400 0x13
+#define ZR016_YUV411_YUV411 0x16
+#define ZR016_YUV411_Y400 0x17
+#define ZR016_4444_4444 0x19
+#define ZR016_100_100 0x1b
+
+#define ZR016_RGB444 0x00
+#define ZR016_YUV444 0x20
+#define ZR016_YUV422 0x40
+
+#define ZR016_COMPRESSION 0x80
+#define ZR016_EXPANSION 0x80
+
+/* possible values for setup 1 register */
+#define ZR016_CKRT 0x80
+#define ZR016_VERT 0x40
+#define ZR016_HORZ 0x20
+#define ZR016_HRFL 0x10
+#define ZR016_DSFL 0x08
+#define ZR016_SBFL 0x04
+#define ZR016_RSTR 0x02
+#define ZR016_CNTI 0x01
+
+/* possible values for setup 2 register */
+#define ZR016_SYEN 0x40
+#define ZR016_CCIR 0x04
+#define ZR016_SIGN 0x02
+#define ZR016_YMCS 0x01
+
+#endif /*fndef ZR36016_H */
diff --git a/drivers/staging/media/zoran/zr36050.c b/drivers/staging/media/zoran/zr36050.c
new file mode 100644
index 000000000000..2826f4e5d37b
--- /dev/null
+++ b/drivers/staging/media/zoran/zr36050.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Zoran ZR36050 basic configuration functions
+ *
+ * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at>
+ */
+
+#define ZR050_VERSION "v0.7.1"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include <linux/types.h>
+#include <linux/wait.h>
+
+/* I/O commands, error codes */
+#include <asm/io.h>
+
+/* headerfile of this module */
+#include "zr36050.h"
+
+/* codec io API */
+#include "videocodec.h"
+
+/* it doesn't make sense to have more than 20 or so,
+ just to prevent some unwanted loops */
+#define MAX_CODECS 20
+
+/* amount of chips attached via this driver */
+static int zr36050_codecs;
+
+/* debugging is available via module parameter */
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0-4)");
+
+#define dprintk(num, format, args...) \
+ do { \
+ if (debug >= num) \
+ printk(format, ##args); \
+ } while (0)
+
+/* =========================================================================
+ Local hardware I/O functions:
+
+ read/write via codec layer (registers are located in the master device)
+ ========================================================================= */
+
+/* read and write functions */
+static u8 zr36050_read(struct zr36050 *ptr, u16 reg)
+{
+ u8 value = 0;
+
+ /* just in case something is wrong... */
+ if (ptr->codec->master_data->readreg)
+ value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xFF;
+ else
+ dprintk(1,
+ KERN_ERR "%s: invalid I/O setup, nothing read!\n", ptr->name);
+
+ dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value);
+
+ return value;
+}
+
+static void zr36050_write(struct zr36050 *ptr, u16 reg, u8 value)
+{
+ dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg);
+
+ /* just in case something is wrong... */
+ if (ptr->codec->master_data->writereg)
+ ptr->codec->master_data->writereg(ptr->codec, reg, value);
+ else
+ dprintk(1,
+ KERN_ERR
+ "%s: invalid I/O setup, nothing written!\n",
+ ptr->name);
+}
+
+/* =========================================================================
+ Local helper function:
+
+ status read
+ ========================================================================= */
+
+/* status is kept in datastructure */
+static u8 zr36050_read_status1(struct zr36050 *ptr)
+{
+ ptr->status1 = zr36050_read(ptr, ZR050_STATUS_1);
+
+ zr36050_read(ptr, 0);
+ return ptr->status1;
+}
+
+/* =========================================================================
+ Local helper function:
+
+ scale factor read
+ ========================================================================= */
+
+/* scale factor is kept in datastructure */
+static u16 zr36050_read_scalefactor(struct zr36050 *ptr)
+{
+ ptr->scalefact = (zr36050_read(ptr, ZR050_SF_HI) << 8) |
+ (zr36050_read(ptr, ZR050_SF_LO) & 0xFF);
+
+ /* leave 0 selected for an eventually GO from master */
+ zr36050_read(ptr, 0);
+ return ptr->scalefact;
+}
+
+/* =========================================================================
+ Local helper function:
+
+ wait if codec is ready to proceed (end of processing) or time is over
+ ========================================================================= */
+
+static void zr36050_wait_end(struct zr36050 *ptr)
+{
+ int i = 0;
+
+ while (!(zr36050_read_status1(ptr) & 0x4)) {
+ udelay(1);
+ if (i++ > 200000) { // 200ms, there is for sure something wrong!!!
+ dprintk(1,
+ "%s: timeout at wait_end (last status: 0x%02x)\n",
+ ptr->name, ptr->status1);
+ break;
+ }
+ }
+}
+
+/* =========================================================================
+ Local helper function:
+
+ basic test of "connectivity", writes/reads to/from memory the SOF marker
+ ========================================================================= */
+
+static int zr36050_basic_test(struct zr36050 *ptr)
+{
+ zr36050_write(ptr, ZR050_SOF_IDX, 0x00);
+ zr36050_write(ptr, ZR050_SOF_IDX + 1, 0x00);
+ if ((zr36050_read(ptr, ZR050_SOF_IDX) |
+ zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0x0000) {
+ dprintk(1,
+ KERN_ERR
+ "%s: attach failed, can't connect to jpeg processor!\n",
+ ptr->name);
+ return -ENXIO;
+ }
+ zr36050_write(ptr, ZR050_SOF_IDX, 0xff);
+ zr36050_write(ptr, ZR050_SOF_IDX + 1, 0xc0);
+ if (((zr36050_read(ptr, ZR050_SOF_IDX) << 8) |
+ zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0xffc0) {
+ dprintk(1,
+ KERN_ERR
+ "%s: attach failed, can't connect to jpeg processor!\n",
+ ptr->name);
+ return -ENXIO;
+ }
+
+ zr36050_wait_end(ptr);
+ if ((ptr->status1 & 0x4) == 0) {
+ dprintk(1,
+ KERN_ERR
+ "%s: attach failed, jpeg processor failed (end flag)!\n",
+ ptr->name);
+ return -EBUSY;
+ }
+
+ return 0; /* looks good! */
+}
+
+/* =========================================================================
+ Local helper function:
+
+ simple loop for pushing the init datasets
+ ========================================================================= */
+
+static int zr36050_pushit(struct zr36050 *ptr, u16 startreg, u16 len, const char *data)
+{
+ int i = 0;
+
+ dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name,
+ startreg, len);
+ while (i < len)
+ zr36050_write(ptr, startreg++, data[i++]);
+
+ return i;
+}
+
+/* =========================================================================
+ Basic datasets:
+
+ jpeg baseline setup data (you find it on lots places in internet, or just
+ extract it from any regular .jpg image...)
+
+ Could be variable, but until it's not needed it they are just fixed to save
+ memory. Otherwise expand zr36050 structure with arrays, push the values to
+ it and initialize from there, as e.g. the linux zr36057/60 driver does it.
+ ========================================================================= */
+
+static const char zr36050_dqt[0x86] = {
+ 0xff, 0xdb, //Marker: DQT
+ 0x00, 0x84, //Length: 2*65+2
+ 0x00, //Pq,Tq first table
+ 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e,
+ 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28,
+ 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25,
+ 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33,
+ 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44,
+ 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57,
+ 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71,
+ 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63,
+ 0x01, //Pq,Tq second table
+ 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a,
+ 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
+};
+
+static const char zr36050_dht[0x1a4] = {
+ 0xff, 0xc4, //Marker: DHT
+ 0x01, 0xa2, //Length: 2*AC, 2*DC
+ 0x00, //DC first table
+ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x01, //DC second table
+ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, //AC first table
+ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
+ 0x05, 0x05, 0x04, 0x04, 0x00, 0x00,
+ 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11,
+ 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61,
+ 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1,
+ 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24,
+ 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34,
+ 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44,
+ 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56,
+ 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
+ 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
+ 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99,
+ 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
+ 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9,
+ 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8,
+ 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9,
+ 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA,
+ 0x11, //AC second table
+ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
+ 0x07, 0x05, 0x04, 0x04, 0x00, 0x01,
+ 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04,
+ 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
+ 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62,
+ 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25,
+ 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A,
+ 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44,
+ 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56,
+ 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
+ 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+ 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8,
+ 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8,
+ 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
+ 0xF9, 0xFA
+};
+
+/* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */
+#define NO_OF_COMPONENTS 0x3 //Y,U,V
+#define BASELINE_PRECISION 0x8 //MCU size (?)
+static const char zr36050_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT
+static const char zr36050_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC
+static const char zr36050_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC
+
+/* horizontal 422 decimation setup (maybe we support 411 or so later, too) */
+static const char zr36050_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 };
+static const char zr36050_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 };
+
+/* =========================================================================
+ Local helper functions:
+
+ calculation and setup of parameter-dependent JPEG baseline segments
+ (needed for compression only)
+ ========================================================================= */
+
+/* ------------------------------------------------------------------------- */
+
+/* SOF (start of frame) segment depends on width, height and sampling ratio
+ of each color component */
+
+static int zr36050_set_sof(struct zr36050 *ptr)
+{
+ char sof_data[34]; // max. size of register set
+ int i;
+
+ dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name,
+ ptr->width, ptr->height, NO_OF_COMPONENTS);
+ sof_data[0] = 0xff;
+ sof_data[1] = 0xc0;
+ sof_data[2] = 0x00;
+ sof_data[3] = (3 * NO_OF_COMPONENTS) + 8;
+ sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36050
+ sof_data[5] = (ptr->height) >> 8;
+ sof_data[6] = (ptr->height) & 0xff;
+ sof_data[7] = (ptr->width) >> 8;
+ sof_data[8] = (ptr->width) & 0xff;
+ sof_data[9] = NO_OF_COMPONENTS;
+ for (i = 0; i < NO_OF_COMPONENTS; i++) {
+ sof_data[10 + (i * 3)] = i; // index identifier
+ sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | (ptr->v_samp_ratio[i]); // sampling ratios
+ sof_data[12 + (i * 3)] = zr36050_tq[i]; // Q table selection
+ }
+ return zr36050_pushit(ptr, ZR050_SOF_IDX,
+ (3 * NO_OF_COMPONENTS) + 10, sof_data);
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* SOS (start of scan) segment depends on the used scan components
+ of each color component */
+
+static int zr36050_set_sos(struct zr36050 *ptr)
+{
+ char sos_data[16]; // max. size of register set
+ int i;
+
+ dprintk(3, "%s: write SOS\n", ptr->name);
+ sos_data[0] = 0xff;
+ sos_data[1] = 0xda;
+ sos_data[2] = 0x00;
+ sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3;
+ sos_data[4] = NO_OF_COMPONENTS;
+ for (i = 0; i < NO_OF_COMPONENTS; i++) {
+ sos_data[5 + (i * 2)] = i; // index
+ sos_data[6 + (i * 2)] = (zr36050_td[i] << 4) | zr36050_ta[i]; // AC/DC tbl.sel.
+ }
+ sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start
+ sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3F;
+ sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00;
+ return zr36050_pushit(ptr, ZR050_SOS1_IDX,
+ 4 + 1 + (2 * NO_OF_COMPONENTS) + 3,
+ sos_data);
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* DRI (define restart interval) */
+
+static int zr36050_set_dri(struct zr36050 *ptr)
+{
+ char dri_data[6]; // max. size of register set
+
+ dprintk(3, "%s: write DRI\n", ptr->name);
+ dri_data[0] = 0xff;
+ dri_data[1] = 0xdd;
+ dri_data[2] = 0x00;
+ dri_data[3] = 0x04;
+ dri_data[4] = ptr->dri >> 8;
+ dri_data[5] = ptr->dri & 0xff;
+ return zr36050_pushit(ptr, ZR050_DRI_IDX, 6, dri_data);
+}
+
+/* =========================================================================
+ Setup function:
+
+ Setup compression/decompression of Zoran's JPEG processor
+ ( see also zoran 36050 manual )
+
+ ... sorry for the spaghetti code ...
+ ========================================================================= */
+static void zr36050_init(struct zr36050 *ptr)
+{
+ int sum = 0;
+ long bitcnt, tmp;
+
+ if (ptr->mode == CODEC_DO_COMPRESSION) {
+ dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name);
+
+ /* 050 communicates with 057 in master mode */
+ zr36050_write(ptr, ZR050_HARDWARE, ZR050_HW_MSTR);
+
+ /* encoding table preload for compression */
+ zr36050_write(ptr, ZR050_MODE,
+ ZR050_MO_COMP | ZR050_MO_TLM);
+ zr36050_write(ptr, ZR050_OPTIONS, 0);
+
+ /* disable all IRQs */
+ zr36050_write(ptr, ZR050_INT_REQ_0, 0);
+ zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1
+
+ /* volume control settings */
+ /*zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol);*/
+ zr36050_write(ptr, ZR050_SF_HI, ptr->scalefact >> 8);
+ zr36050_write(ptr, ZR050_SF_LO, ptr->scalefact & 0xff);
+
+ zr36050_write(ptr, ZR050_AF_HI, 0xff);
+ zr36050_write(ptr, ZR050_AF_M, 0xff);
+ zr36050_write(ptr, ZR050_AF_LO, 0xff);
+
+ /* setup the variable jpeg tables */
+ sum += zr36050_set_sof(ptr);
+ sum += zr36050_set_sos(ptr);
+ sum += zr36050_set_dri(ptr);
+
+ /* setup the fixed jpeg tables - maybe variable, though -
+ * (see table init section above) */
+ dprintk(3, "%s: write DQT, DHT, APP\n", ptr->name);
+ sum += zr36050_pushit(ptr, ZR050_DQT_IDX,
+ sizeof(zr36050_dqt), zr36050_dqt);
+ sum += zr36050_pushit(ptr, ZR050_DHT_IDX,
+ sizeof(zr36050_dht), zr36050_dht);
+ zr36050_write(ptr, ZR050_APP_IDX, 0xff);
+ zr36050_write(ptr, ZR050_APP_IDX + 1, 0xe0 + ptr->app.appn);
+ zr36050_write(ptr, ZR050_APP_IDX + 2, 0x00);
+ zr36050_write(ptr, ZR050_APP_IDX + 3, ptr->app.len + 2);
+ sum += zr36050_pushit(ptr, ZR050_APP_IDX + 4, 60,
+ ptr->app.data) + 4;
+ zr36050_write(ptr, ZR050_COM_IDX, 0xff);
+ zr36050_write(ptr, ZR050_COM_IDX + 1, 0xfe);
+ zr36050_write(ptr, ZR050_COM_IDX + 2, 0x00);
+ zr36050_write(ptr, ZR050_COM_IDX + 3, ptr->com.len + 2);
+ sum += zr36050_pushit(ptr, ZR050_COM_IDX + 4, 60,
+ ptr->com.data) + 4;
+
+ /* do the internal huffman table preload */
+ zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI);
+
+ zr36050_write(ptr, ZR050_GO, 1); // launch codec
+ zr36050_wait_end(ptr);
+ dprintk(2, "%s: Status after table preload: 0x%02x\n",
+ ptr->name, ptr->status1);
+
+ if ((ptr->status1 & 0x4) == 0) {
+ pr_err("%s: init aborted!\n", ptr->name);
+ return; // something is wrong, its timed out!!!!
+ }
+
+ /* setup misc. data for compression (target code sizes) */
+
+ /* size of compressed code to reach without header data */
+ sum = ptr->real_code_vol - sum;
+ bitcnt = sum << 3; /* need the size in bits */
+
+ tmp = bitcnt >> 16;
+ dprintk(3,
+ "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n",
+ ptr->name, sum, ptr->real_code_vol, bitcnt, tmp);
+ zr36050_write(ptr, ZR050_TCV_NET_HI, tmp >> 8);
+ zr36050_write(ptr, ZR050_TCV_NET_MH, tmp & 0xff);
+ tmp = bitcnt & 0xffff;
+ zr36050_write(ptr, ZR050_TCV_NET_ML, tmp >> 8);
+ zr36050_write(ptr, ZR050_TCV_NET_LO, tmp & 0xff);
+
+ bitcnt -= bitcnt >> 7; // bits without stuffing
+ bitcnt -= ((bitcnt * 5) >> 6); // bits without eob
+
+ tmp = bitcnt >> 16;
+ dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n",
+ ptr->name, bitcnt, tmp);
+ zr36050_write(ptr, ZR050_TCV_DATA_HI, tmp >> 8);
+ zr36050_write(ptr, ZR050_TCV_DATA_MH, tmp & 0xff);
+ tmp = bitcnt & 0xffff;
+ zr36050_write(ptr, ZR050_TCV_DATA_ML, tmp >> 8);
+ zr36050_write(ptr, ZR050_TCV_DATA_LO, tmp & 0xff);
+
+ /* compression setup with or without bitrate control */
+ zr36050_write(ptr, ZR050_MODE,
+ ZR050_MO_COMP | ZR050_MO_PASS2 |
+ (ptr->bitrate_ctrl ? ZR050_MO_BRC : 0));
+
+ /* this headers seem to deliver "valid AVI" jpeg frames */
+ zr36050_write(ptr, ZR050_MARKERS_EN,
+ ZR050_ME_DQT | ZR050_ME_DHT |
+ ((ptr->app.len > 0) ? ZR050_ME_APP : 0) |
+ ((ptr->com.len > 0) ? ZR050_ME_COM : 0));
+ } else {
+ dprintk(2, "%s: EXPANSION SETUP\n", ptr->name);
+
+ /* 050 communicates with 055 in master mode */
+ zr36050_write(ptr, ZR050_HARDWARE,
+ ZR050_HW_MSTR | ZR050_HW_CFIS_2_CLK);
+
+ /* encoding table preload */
+ zr36050_write(ptr, ZR050_MODE, ZR050_MO_TLM);
+
+ /* disable all IRQs */
+ zr36050_write(ptr, ZR050_INT_REQ_0, 0);
+ zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1
+
+ dprintk(3, "%s: write DHT\n", ptr->name);
+ zr36050_pushit(ptr, ZR050_DHT_IDX, sizeof(zr36050_dht),
+ zr36050_dht);
+
+ /* do the internal huffman table preload */
+ zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI);
+
+ zr36050_write(ptr, ZR050_GO, 1); // launch codec
+ zr36050_wait_end(ptr);
+ dprintk(2, "%s: Status after table preload: 0x%02x\n",
+ ptr->name, ptr->status1);
+
+ if ((ptr->status1 & 0x4) == 0) {
+ pr_err("%s: init aborted!\n", ptr->name);
+ return; // something is wrong, its timed out!!!!
+ }
+
+ /* setup misc. data for expansion */
+ zr36050_write(ptr, ZR050_MODE, 0);
+ zr36050_write(ptr, ZR050_MARKERS_EN, 0);
+ }
+
+ /* adr on selected, to allow GO from master */
+ zr36050_read(ptr, 0);
+}
+
+/* =========================================================================
+ CODEC API FUNCTIONS
+
+ this functions are accessed by the master via the API structure
+ ========================================================================= */
+
+/* set compression/expansion mode and launches codec -
+ this should be the last call from the master before starting processing */
+static int zr36050_set_mode(struct videocodec *codec, int mode)
+{
+ struct zr36050 *ptr = (struct zr36050 *)codec->data;
+
+ dprintk(2, "%s: set_mode %d call\n", ptr->name, mode);
+
+ if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION))
+ return -EINVAL;
+
+ ptr->mode = mode;
+ zr36050_init(ptr);
+
+ return 0;
+}
+
+/* set picture size (norm is ignored as the codec doesn't know about it) */
+static int zr36050_set_video(struct videocodec *codec, const struct tvnorm *norm,
+ struct vfe_settings *cap, struct vfe_polarity *pol)
+{
+ struct zr36050 *ptr = (struct zr36050 *)codec->data;
+ int size;
+
+ dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n",
+ ptr->name, norm->h_start, norm->v_start,
+ cap->x, cap->y, cap->width, cap->height,
+ cap->decimation, cap->quality);
+ /* if () return -EINVAL;
+ * trust the master driver that it knows what it does - so
+ * we allow invalid startx/y and norm for now ... */
+ ptr->width = cap->width / (cap->decimation & 0xff);
+ ptr->height = cap->height / ((cap->decimation >> 8) & 0xff);
+
+ /* (KM) JPEG quality */
+ size = ptr->width * ptr->height;
+ size *= 16; /* size in bits */
+ /* apply quality setting */
+ size = size * cap->quality / 200;
+
+ /* Minimum: 1kb */
+ if (size < 8192)
+ size = 8192;
+ /* Maximum: 7/8 of code buffer */
+ if (size > ptr->total_code_vol * 7)
+ size = ptr->total_code_vol * 7;
+
+ ptr->real_code_vol = size >> 3; /* in bytes */
+
+ /* Set max_block_vol here (previously in zr36050_init, moved
+ * here for consistency with zr36060 code */
+ zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol);
+
+ return 0;
+}
+
+/* additional control functions */
+static int zr36050_control(struct videocodec *codec, int type, int size, void *data)
+{
+ struct zr36050 *ptr = (struct zr36050 *)codec->data;
+ int *ival = (int *)data;
+
+ dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type,
+ size);
+
+ switch (type) {
+ case CODEC_G_STATUS: /* get last status */
+ if (size != sizeof(int))
+ return -EFAULT;
+ zr36050_read_status1(ptr);
+ *ival = ptr->status1;
+ break;
+
+ case CODEC_G_CODEC_MODE:
+ if (size != sizeof(int))
+ return -EFAULT;
+ *ival = CODEC_MODE_BJPG;
+ break;
+
+ case CODEC_S_CODEC_MODE:
+ if (size != sizeof(int))
+ return -EFAULT;
+ if (*ival != CODEC_MODE_BJPG)
+ return -EINVAL;
+ /* not needed, do nothing */
+ return 0;
+
+ case CODEC_G_VFE:
+ case CODEC_S_VFE:
+ /* not needed, do nothing */
+ return 0;
+
+ case CODEC_S_MMAP:
+ /* not available, give an error */
+ return -ENXIO;
+
+ case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */
+ if (size != sizeof(int))
+ return -EFAULT;
+ *ival = ptr->total_code_vol;
+ break;
+
+ case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */
+ if (size != sizeof(int))
+ return -EFAULT;
+ ptr->total_code_vol = *ival;
+ /* (Kieran Morrissey)
+ * code copied from zr36060.c to ensure proper bitrate */
+ ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3;
+ break;
+
+ case CODEC_G_JPEG_SCALE: /* get scaling factor */
+ if (size != sizeof(int))
+ return -EFAULT;
+ *ival = zr36050_read_scalefactor(ptr);
+ break;
+
+ case CODEC_S_JPEG_SCALE: /* set scaling factor */
+ if (size != sizeof(int))
+ return -EFAULT;
+ ptr->scalefact = *ival;
+ break;
+
+ case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */
+ struct jpeg_app_marker *app = data;
+
+ if (size != sizeof(struct jpeg_app_marker))
+ return -EFAULT;
+
+ *app = ptr->app;
+ break;
+ }
+
+ case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */
+ struct jpeg_app_marker *app = data;
+
+ if (size != sizeof(struct jpeg_app_marker))
+ return -EFAULT;
+
+ ptr->app = *app;
+ break;
+ }
+
+ case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */
+ struct jpeg_com_marker *com = data;
+
+ if (size != sizeof(struct jpeg_com_marker))
+ return -EFAULT;
+
+ *com = ptr->com;
+ break;
+ }
+
+ case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */
+ struct jpeg_com_marker *com = data;
+
+ if (size != sizeof(struct jpeg_com_marker))
+ return -EFAULT;
+
+ ptr->com = *com;
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+/* =========================================================================
+ Exit and unregister function:
+
+ Deinitializes Zoran's JPEG processor
+ ========================================================================= */
+
+static int zr36050_unset(struct videocodec *codec)
+{
+ struct zr36050 *ptr = codec->data;
+
+ if (ptr) {
+ /* do wee need some codec deinit here, too ???? */
+
+ dprintk(1, "%s: finished codec #%d\n", ptr->name,
+ ptr->num);
+ kfree(ptr);
+ codec->data = NULL;
+
+ zr36050_codecs--;
+ return 0;
+ }
+
+ return -EFAULT;
+}
+
+/* =========================================================================
+ Setup and registry function:
+
+ Initializes Zoran's JPEG processor
+
+ Also sets pixel size, average code size, mode (compr./decompr.)
+ (the given size is determined by the processor with the video interface)
+ ========================================================================= */
+
+static int zr36050_setup(struct videocodec *codec)
+{
+ struct zr36050 *ptr;
+ int res;
+
+ dprintk(2, "zr36050: initializing MJPEG subsystem #%d.\n",
+ zr36050_codecs);
+
+ if (zr36050_codecs == MAX_CODECS) {
+ dprintk(1,
+ KERN_ERR "zr36050: Can't attach more codecs!\n");
+ return -ENOSPC;
+ }
+ //mem structure init
+ codec->data = ptr = kzalloc(sizeof(struct zr36050), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ snprintf(ptr->name, sizeof(ptr->name), "zr36050[%d]",
+ zr36050_codecs);
+ ptr->num = zr36050_codecs++;
+ ptr->codec = codec;
+
+ //testing
+ res = zr36050_basic_test(ptr);
+ if (res < 0) {
+ zr36050_unset(codec);
+ return res;
+ }
+ //final setup
+ memcpy(ptr->h_samp_ratio, zr36050_decimation_h, 8);
+ memcpy(ptr->v_samp_ratio, zr36050_decimation_v, 8);
+
+ ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag
+ * (what is the difference?) */
+ ptr->mode = CODEC_DO_COMPRESSION;
+ ptr->width = 384;
+ ptr->height = 288;
+ ptr->total_code_vol = 16000;
+ ptr->max_block_vol = 240;
+ ptr->scalefact = 0x100;
+ ptr->dri = 1;
+
+ /* no app/com marker by default */
+ ptr->app.appn = 0;
+ ptr->app.len = 0;
+ ptr->com.len = 0;
+
+ zr36050_init(ptr);
+
+ dprintk(1, KERN_INFO "%s: codec attached and running\n",
+ ptr->name);
+
+ return 0;
+}
+
+static const struct videocodec zr36050_codec = {
+ .owner = THIS_MODULE,
+ .name = "zr36050",
+ .magic = 0L, // magic not used
+ .flags =
+ CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER |
+ CODEC_FLAG_DECODER,
+ .type = CODEC_TYPE_ZR36050,
+ .setup = zr36050_setup, // functionality
+ .unset = zr36050_unset,
+ .set_mode = zr36050_set_mode,
+ .set_video = zr36050_set_video,
+ .control = zr36050_control,
+ // others are not used
+};
+
+/* =========================================================================
+ HOOK IN DRIVER AS KERNEL MODULE
+ ========================================================================= */
+
+static int __init zr36050_init_module(void)
+{
+ //dprintk(1, "ZR36050 driver %s\n",ZR050_VERSION);
+ zr36050_codecs = 0;
+ return videocodec_register(&zr36050_codec);
+}
+
+static void __exit zr36050_cleanup_module(void)
+{
+ if (zr36050_codecs) {
+ dprintk(1,
+ "zr36050: something's wrong - %d codecs left somehow.\n",
+ zr36050_codecs);
+ }
+ videocodec_unregister(&zr36050_codec);
+}
+
+module_init(zr36050_init_module);
+module_exit(zr36050_cleanup_module);
+
+MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>");
+MODULE_DESCRIPTION("Driver module for ZR36050 jpeg processors "
+ ZR050_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/zr36050.h b/drivers/staging/media/zoran/zr36050.h
new file mode 100644
index 000000000000..8f972d045b58
--- /dev/null
+++ b/drivers/staging/media/zoran/zr36050.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Zoran ZR36050 basic configuration functions - header file
+ *
+ * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at>
+ */
+
+#ifndef ZR36050_H
+#define ZR36050_H
+
+#include "videocodec.h"
+
+/* data stored for each zoran jpeg codec chip */
+struct zr36050 {
+ char name[32];
+ int num;
+ /* io datastructure */
+ struct videocodec *codec;
+ // last coder status
+ __u8 status1;
+ // actual coder setup
+ int mode;
+
+ __u16 width;
+ __u16 height;
+
+ __u16 bitrate_ctrl;
+
+ __u32 total_code_vol;
+ __u32 real_code_vol;
+ __u16 max_block_vol;
+
+ __u8 h_samp_ratio[8];
+ __u8 v_samp_ratio[8];
+ __u16 scalefact;
+ __u16 dri;
+
+ /* com/app marker */
+ struct jpeg_com_marker com;
+ struct jpeg_app_marker app;
+};
+
+/* zr36050 register addresses */
+#define ZR050_GO 0x000
+#define ZR050_HARDWARE 0x002
+#define ZR050_MODE 0x003
+#define ZR050_OPTIONS 0x004
+#define ZR050_MBCV 0x005
+#define ZR050_MARKERS_EN 0x006
+#define ZR050_INT_REQ_0 0x007
+#define ZR050_INT_REQ_1 0x008
+#define ZR050_TCV_NET_HI 0x009
+#define ZR050_TCV_NET_MH 0x00a
+#define ZR050_TCV_NET_ML 0x00b
+#define ZR050_TCV_NET_LO 0x00c
+#define ZR050_TCV_DATA_HI 0x00d
+#define ZR050_TCV_DATA_MH 0x00e
+#define ZR050_TCV_DATA_ML 0x00f
+#define ZR050_TCV_DATA_LO 0x010
+#define ZR050_SF_HI 0x011
+#define ZR050_SF_LO 0x012
+#define ZR050_AF_HI 0x013
+#define ZR050_AF_M 0x014
+#define ZR050_AF_LO 0x015
+#define ZR050_ACV_HI 0x016
+#define ZR050_ACV_MH 0x017
+#define ZR050_ACV_ML 0x018
+#define ZR050_ACV_LO 0x019
+#define ZR050_ACT_HI 0x01a
+#define ZR050_ACT_MH 0x01b
+#define ZR050_ACT_ML 0x01c
+#define ZR050_ACT_LO 0x01d
+#define ZR050_ACV_TURN_HI 0x01e
+#define ZR050_ACV_TURN_MH 0x01f
+#define ZR050_ACV_TURN_ML 0x020
+#define ZR050_ACV_TURN_LO 0x021
+#define ZR050_STATUS_0 0x02e
+#define ZR050_STATUS_1 0x02f
+
+#define ZR050_SOF_IDX 0x040
+#define ZR050_SOS1_IDX 0x07a
+#define ZR050_SOS2_IDX 0x08a
+#define ZR050_SOS3_IDX 0x09a
+#define ZR050_SOS4_IDX 0x0aa
+#define ZR050_DRI_IDX 0x0c0
+#define ZR050_DNL_IDX 0x0c6
+#define ZR050_DQT_IDX 0x0cc
+#define ZR050_DHT_IDX 0x1d4
+#define ZR050_APP_IDX 0x380
+#define ZR050_COM_IDX 0x3c0
+
+/* zr36050 hardware register bits */
+
+#define ZR050_HW_BSWD 0x80
+#define ZR050_HW_MSTR 0x40
+#define ZR050_HW_DMA 0x20
+#define ZR050_HW_CFIS_1_CLK 0x00
+#define ZR050_HW_CFIS_2_CLK 0x04
+#define ZR050_HW_CFIS_3_CLK 0x08
+#define ZR050_HW_CFIS_4_CLK 0x0C
+#define ZR050_HW_CFIS_5_CLK 0x10
+#define ZR050_HW_CFIS_6_CLK 0x14
+#define ZR050_HW_CFIS_7_CLK 0x18
+#define ZR050_HW_CFIS_8_CLK 0x1C
+#define ZR050_HW_BELE 0x01
+
+/* zr36050 mode register bits */
+
+#define ZR050_MO_COMP 0x80
+#define ZR050_MO_ATP 0x40
+#define ZR050_MO_PASS2 0x20
+#define ZR050_MO_TLM 0x10
+#define ZR050_MO_DCONLY 0x08
+#define ZR050_MO_BRC 0x04
+
+#define ZR050_MO_ATP 0x40
+#define ZR050_MO_PASS2 0x20
+#define ZR050_MO_TLM 0x10
+#define ZR050_MO_DCONLY 0x08
+
+/* zr36050 option register bits */
+
+#define ZR050_OP_NSCN_1 0x00
+#define ZR050_OP_NSCN_2 0x20
+#define ZR050_OP_NSCN_3 0x40
+#define ZR050_OP_NSCN_4 0x60
+#define ZR050_OP_NSCN_5 0x80
+#define ZR050_OP_NSCN_6 0xA0
+#define ZR050_OP_NSCN_7 0xC0
+#define ZR050_OP_NSCN_8 0xE0
+#define ZR050_OP_OVF 0x10
+
+/* zr36050 markers-enable register bits */
+
+#define ZR050_ME_APP 0x80
+#define ZR050_ME_COM 0x40
+#define ZR050_ME_DRI 0x20
+#define ZR050_ME_DQT 0x10
+#define ZR050_ME_DHT 0x08
+#define ZR050_ME_DNL 0x04
+#define ZR050_ME_DQTI 0x02
+#define ZR050_ME_DHTI 0x01
+
+/* zr36050 status0/1 register bit masks */
+
+#define ZR050_ST_RST_MASK 0x20
+#define ZR050_ST_SOF_MASK 0x02
+#define ZR050_ST_SOS_MASK 0x02
+#define ZR050_ST_DATRDY_MASK 0x80
+#define ZR050_ST_MRKDET_MASK 0x40
+#define ZR050_ST_RFM_MASK 0x10
+#define ZR050_ST_RFD_MASK 0x08
+#define ZR050_ST_END_MASK 0x04
+#define ZR050_ST_TCVOVF_MASK 0x02
+#define ZR050_ST_DATOVF_MASK 0x01
+
+/* pixel component idx */
+
+#define ZR050_Y_COMPONENT 0
+#define ZR050_U_COMPONENT 1
+#define ZR050_V_COMPONENT 2
+
+#endif /*fndef ZR36050_H */
diff --git a/drivers/staging/media/zoran/zr36057.h b/drivers/staging/media/zoran/zr36057.h
new file mode 100644
index 000000000000..71b651add35a
--- /dev/null
+++ b/drivers/staging/media/zoran/zr36057.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * zr36057.h - zr36057 register offsets
+ *
+ * Copyright (C) 1998 Dave Perks <dperks@ibm.net>
+ */
+
+#ifndef _ZR36057_H_
+#define _ZR36057_H_
+
+/* Zoran ZR36057 registers */
+
+#define ZR36057_VFEHCR 0x000 /* Video Front End, Horizontal Configuration Register */
+#define ZR36057_VFEHCR_HS_POL BIT(30)
+#define ZR36057_VFEHCR_H_START 10
+#define ZR36057_VFEHCR_H_END 0
+#define ZR36057_VFEHCR_HMASK 0x3ff
+
+#define ZR36057_VFEVCR 0x004 /* Video Front End, Vertical Configuration Register */
+#define ZR36057_VFEVCR_VS_POL BIT(30)
+#define ZR36057_VFEVCR_V_START 10
+#define ZR36057_VFEVCR_V_END 0
+#define ZR36057_VFEVCR_VMASK 0x3ff
+
+#define ZR36057_VFESPFR 0x008 /* Video Front End, Scaler and Pixel Format Register */
+#define ZR36057_VFESPFR_EXT_FL BIT(26)
+#define ZR36057_VFESPFR_TOP_FIELD BIT(25)
+#define ZR36057_VFESPFR_VCLK_POL BIT(24)
+#define ZR36057_VFESPFR_H_FILTER 21
+#define ZR36057_VFESPFR_HOR_DCM 14
+#define ZR36057_VFESPFR_VER_DCM 8
+#define ZR36057_VFESPFR_DISP_MODE 6
+#define ZR36057_VFESPFR_YUV422 (0<<3)
+#define ZR36057_VFESPFR_RGB888 (1<<3)
+#define ZR36057_VFESPFR_RGB565 (2<<3)
+#define ZR36057_VFESPFR_RGB555 (3<<3)
+#define ZR36057_VFESPFR_ERR_DIF (1<<2)
+#define ZR36057_VFESPFR_PACK24 (1<<1)
+#define ZR36057_VFESPFR_LITTLE_ENDIAN (1<<0)
+
+#define ZR36057_VDTR 0x00c /* Video Display "Top" Register */
+
+#define ZR36057_VDBR 0x010 /* Video Display "Bottom" Register */
+
+#define ZR36057_VSSFGR 0x014 /* Video Stride, Status, and Frame Grab Register */
+#define ZR36057_VSSFGR_DISP_STRIDE 16
+#define ZR36057_VSSFGR_VID_OVF BIT(8)
+#define ZR36057_VSSFGR_SNAP_SHOT BIT(1)
+#define ZR36057_VSSFGR_FRAME_GRAB BIT(0)
+
+#define ZR36057_VDCR 0x018 /* Video Display Configuration Register */
+#define ZR36057_VDCR_VID_EN BIT(31)
+#define ZR36057_VDCR_MIN_PIX 24
+#define ZR36057_VDCR_TRITON BIT(24)
+#define ZR36057_VDCR_VID_WIN_HT 12
+#define ZR36057_VDCR_VID_WIN_WID 0
+
+#define ZR36057_MMTR 0x01c /* Masking Map "Top" Register */
+
+#define ZR36057_MMBR 0x020 /* Masking Map "Bottom" Register */
+
+#define ZR36057_OCR 0x024 /* Overlay Control Register */
+#define ZR36057_OCR_OVL_ENABLE BIT(15)
+#define ZR36057_OCR_MASK_STRIDE 0
+
+#define ZR36057_SPGPPCR 0x028 /* System, PCI, and General Purpose Pins Control Register */
+#define ZR36057_SPGPPCR_SOFT_RESET BIT(24)
+
+#define ZR36057_GPPGCR1 0x02c /* General Purpose Pins and GuestBus Control Register (1) */
+
+#define ZR36057_MCSAR 0x030 /* MPEG Code Source Address Register */
+
+#define ZR36057_MCTCR 0x034 /* MPEG Code Transfer Control Register */
+#define ZR36057_MCTCR_COD_TIME BIT(30)
+#define ZR36057_MCTCR_C_EMPTY BIT(29)
+#define ZR36057_MCTCR_C_FLUSH BIT(28)
+#define ZR36057_MCTCR_COD_GUEST_ID 20
+#define ZR36057_MCTCR_COD_GUEST_REG 16
+
+#define ZR36057_MCMPR 0x038 /* MPEG Code Memory Pointer Register */
+
+#define ZR36057_ISR 0x03c /* Interrupt Status Register */
+#define ZR36057_ISR_GIRQ1 BIT(30)
+#define ZR36057_ISR_GIRQ0 BIT(29)
+#define ZR36057_ISR_COD_REP_IRQ BIT(28)
+#define ZR36057_ISR_JPEG_REP_IRQ BIT(27)
+
+#define ZR36057_ICR 0x040 /* Interrupt Control Register */
+#define ZR36057_ICR_GIRQ1 BIT(30)
+#define ZR36057_ICR_GIRQ0 BIT(29)
+#define ZR36057_ICR_COD_REP_IRQ BIT(28)
+#define ZR36057_ICR_JPEG_REP_IRQ BIT(27)
+#define ZR36057_ICR_INT_PIN_EN BIT(24)
+
+#define ZR36057_I2CBR 0x044 /* I2C Bus Register */
+#define ZR36057_I2CBR_SDA BIT(1)
+#define ZR36057_I2CBR_SCL BIT(0)
+
+#define ZR36057_JMC 0x100 /* JPEG Mode and Control */
+#define ZR36057_JMC_JPG BIT(31)
+#define ZR36057_JMC_JPG_EXP_MODE (0 << 29)
+#define ZR36057_JMC_JPG_CMP_MODE BIT(29)
+#define ZR36057_JMC_MJPG_EXP_MODE (2 << 29)
+#define ZR36057_JMC_MJPG_CMP_MODE (3 << 29)
+#define ZR36057_JMC_RTBUSY_FB BIT(6)
+#define ZR36057_JMC_GO_EN BIT(5)
+#define ZR36057_JMC_SYNC_MSTR BIT(4)
+#define ZR36057_JMC_FLD_PER_BUFF BIT(3)
+#define ZR36057_JMC_VFIFO_FB BIT(2)
+#define ZR36057_JMC_CFIFO_FB BIT(1)
+#define ZR36057_JMC_STLL_LIT_ENDIAN BIT(0)
+
+#define ZR36057_JPC 0x104 /* JPEG Process Control */
+#define ZR36057_JPC_P_RESET BIT(7)
+#define ZR36057_JPC_COD_TRNS_EN BIT(5)
+#define ZR36057_JPC_ACTIVE BIT(0)
+
+#define ZR36057_VSP 0x108 /* Vertical Sync Parameters */
+#define ZR36057_VSP_VSYNC_SIZE 16
+#define ZR36057_VSP_FRM_TOT 0
+
+#define ZR36057_HSP 0x10c /* Horizontal Sync Parameters */
+#define ZR36057_HSP_HSYNC_START 16
+#define ZR36057_HSP_LINE_TOT 0
+
+#define ZR36057_FHAP 0x110 /* Field Horizontal Active Portion */
+#define ZR36057_FHAP_NAX 16
+#define ZR36057_FHAP_PAX 0
+
+#define ZR36057_FVAP 0x114 /* Field Vertical Active Portion */
+#define ZR36057_FVAP_NAY 16
+#define ZR36057_FVAP_PAY 0
+
+#define ZR36057_FPP 0x118 /* Field Process Parameters */
+#define ZR36057_FPP_ODD_EVEN BIT(0)
+
+#define ZR36057_JCBA 0x11c /* JPEG Code Base Address */
+
+#define ZR36057_JCFT 0x120 /* JPEG Code FIFO Threshold */
+
+#define ZR36057_JCGI 0x124 /* JPEG Codec Guest ID */
+#define ZR36057_JCGI_JPE_GUEST_ID 4
+#define ZR36057_JCGI_JPE_GUEST_REG 0
+
+#define ZR36057_GCR2 0x12c /* GuestBus Control Register (2) */
+
+#define ZR36057_POR 0x200 /* Post Office Register */
+#define ZR36057_POR_PO_PEN BIT(25)
+#define ZR36057_POR_PO_TIME BIT(24)
+#define ZR36057_POR_PO_DIR BIT(23)
+
+#define ZR36057_STR 0x300 /* "Still" Transfer Register */
+
+#endif
diff --git a/drivers/staging/media/zoran/zr36060.c b/drivers/staging/media/zoran/zr36060.c
new file mode 100644
index 000000000000..4f9eb9ff2c42
--- /dev/null
+++ b/drivers/staging/media/zoran/zr36060.c
@@ -0,0 +1,872 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Zoran ZR36060 basic configuration functions
+ *
+ * Copyright (C) 2002 Laurent Pinchart <laurent.pinchart@skynet.be>
+ */
+
+#define ZR060_VERSION "v0.7"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include <linux/types.h>
+#include <linux/wait.h>
+
+/* I/O commands, error codes */
+#include <linux/io.h>
+
+/* headerfile of this module */
+#include "zr36060.h"
+
+/* codec io API */
+#include "videocodec.h"
+
+/* it doesn't make sense to have more than 20 or so, just to prevent some unwanted loops */
+#define MAX_CODECS 20
+
+/* amount of chips attached via this driver */
+static int zr36060_codecs;
+
+static bool low_bitrate;
+module_param(low_bitrate, bool, 0);
+MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate");
+
+/* debugging is available via module parameter */
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0-4)");
+
+#define dprintk(num, format, args...) \
+ do { \
+ if (debug >= num) \
+ printk(format, ##args); \
+ } while (0)
+
+/* =========================================================================
+ * Local hardware I/O functions:
+ * read/write via codec layer (registers are located in the master device)
+ * =========================================================================
+ */
+
+static u8 zr36060_read(struct zr36060 *ptr, u16 reg)
+{
+ u8 value = 0;
+
+ // just in case something is wrong...
+ if (ptr->codec->master_data->readreg)
+ value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xff;
+ else
+ pr_err("%s: invalid I/O setup, nothing read!\n", ptr->name);
+
+ return value;
+}
+
+static void zr36060_write(struct zr36060 *ptr, u16 reg, u8 value)
+{
+ dprintk(4, "0x%02x @0x%04x\n", value, reg);
+
+ // just in case something is wrong...
+ if (ptr->codec->master_data->writereg)
+ ptr->codec->master_data->writereg(ptr->codec, reg, value);
+ else
+ pr_err("%s: invalid I/O setup, nothing written!\n", ptr->name);
+}
+
+/* =========================================================================
+ * Local helper function:
+ * status read
+ * =========================================================================
+ */
+
+/* status is kept in datastructure */
+static u8 zr36060_read_status(struct zr36060 *ptr)
+{
+ ptr->status = zr36060_read(ptr, ZR060_CFSR);
+
+ zr36060_read(ptr, 0);
+ return ptr->status;
+}
+
+/* scale factor is kept in datastructure */
+static u16 zr36060_read_scalefactor(struct zr36060 *ptr)
+{
+ ptr->scalefact = (zr36060_read(ptr, ZR060_SF_HI) << 8) |
+ (zr36060_read(ptr, ZR060_SF_LO) & 0xFF);
+
+ /* leave 0 selected for an eventually GO from master */
+ zr36060_read(ptr, 0);
+ return ptr->scalefact;
+}
+
+/* wait if codec is ready to proceed (end of processing) or time is over */
+static void zr36060_wait_end(struct zr36060 *ptr)
+{
+ int i = 0;
+
+ while (zr36060_read_status(ptr) & ZR060_CFSR_BUSY) {
+ udelay(1);
+ if (i++ > 200000) { // 200ms, there is for sure something wrong!!!
+ dprintk(1,
+ "%s: timeout at wait_end (last status: 0x%02x)\n",
+ ptr->name, ptr->status);
+ break;
+ }
+ }
+}
+
+/* Basic test of "connectivity", writes/reads to/from memory the SOF marker */
+static int zr36060_basic_test(struct zr36060 *ptr)
+{
+ if ((zr36060_read(ptr, ZR060_IDR_DEV) != 0x33) &&
+ (zr36060_read(ptr, ZR060_IDR_REV) != 0x01)) {
+ pr_err("%s: attach failed, can't connect to jpeg processor!\n", ptr->name);
+ return -ENXIO;
+ }
+
+ zr36060_wait_end(ptr);
+ if (ptr->status & ZR060_CFSR_BUSY) {
+ pr_err("%s: attach failed, jpeg processor failed (end flag)!\n", ptr->name);
+ return -EBUSY;
+ }
+
+ return 0; /* looks good! */
+}
+
+/* simple loop for pushing the init datasets */
+static int zr36060_pushit(struct zr36060 *ptr, u16 startreg, u16 len, const char *data)
+{
+ int i = 0;
+
+ dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name,
+ startreg, len);
+ while (i < len)
+ zr36060_write(ptr, startreg++, data[i++]);
+
+ return i;
+}
+
+/* =========================================================================
+ * Basic datasets:
+ * jpeg baseline setup data (you find it on lots places in internet, or just
+ * extract it from any regular .jpg image...)
+ *
+ * Could be variable, but until it's not needed it they are just fixed to save
+ * memory. Otherwise expand zr36060 structure with arrays, push the values to
+ * it and initialize from there, as e.g. the linux zr36057/60 driver does it.
+ * =========================================================================
+ */
+static const char zr36060_dqt[0x86] = {
+ 0xff, 0xdb, //Marker: DQT
+ 0x00, 0x84, //Length: 2*65+2
+ 0x00, //Pq,Tq first table
+ 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e,
+ 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28,
+ 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25,
+ 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33,
+ 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44,
+ 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57,
+ 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71,
+ 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63,
+ 0x01, //Pq,Tq second table
+ 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a,
+ 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
+};
+
+static const char zr36060_dht[0x1a4] = {
+ 0xff, 0xc4, //Marker: DHT
+ 0x01, 0xa2, //Length: 2*AC, 2*DC
+ 0x00, //DC first table
+ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x01, //DC second table
+ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x10, //AC first table
+ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
+ 0x05, 0x05, 0x04, 0x04, 0x00, 0x00,
+ 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11,
+ 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61,
+ 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1,
+ 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24,
+ 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34,
+ 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44,
+ 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56,
+ 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
+ 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
+ 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99,
+ 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
+ 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9,
+ 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8,
+ 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9,
+ 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
+ 0xF8, 0xF9, 0xFA,
+ 0x11, //AC second table
+ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
+ 0x07, 0x05, 0x04, 0x04, 0x00, 0x01,
+ 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04,
+ 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
+ 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62,
+ 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25,
+ 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A,
+ 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44,
+ 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56,
+ 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66,
+ 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+ 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8,
+ 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
+ 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8,
+ 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
+ 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
+ 0xF9, 0xFA
+};
+
+/* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */
+#define NO_OF_COMPONENTS 0x3 //Y,U,V
+#define BASELINE_PRECISION 0x8 //MCU size (?)
+static const char zr36060_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT
+static const char zr36060_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC
+static const char zr36060_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC
+
+/* horizontal 422 decimation setup (maybe we support 411 or so later, too) */
+static const char zr36060_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 };
+static const char zr36060_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 };
+
+/* SOF (start of frame) segment depends on width, height and sampling ratio of each color component */
+static int zr36060_set_sof(struct zr36060 *ptr)
+{
+ char sof_data[34]; // max. size of register set
+ int i;
+
+ dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name,
+ ptr->width, ptr->height, NO_OF_COMPONENTS);
+ sof_data[0] = 0xff;
+ sof_data[1] = 0xc0;
+ sof_data[2] = 0x00;
+ sof_data[3] = (3 * NO_OF_COMPONENTS) + 8;
+ sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36060
+ sof_data[5] = (ptr->height) >> 8;
+ sof_data[6] = (ptr->height) & 0xff;
+ sof_data[7] = (ptr->width) >> 8;
+ sof_data[8] = (ptr->width) & 0xff;
+ sof_data[9] = NO_OF_COMPONENTS;
+ for (i = 0; i < NO_OF_COMPONENTS; i++) {
+ sof_data[10 + (i * 3)] = i; // index identifier
+ sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) |
+ (ptr->v_samp_ratio[i]); // sampling ratios
+ sof_data[12 + (i * 3)] = zr36060_tq[i]; // Q table selection
+ }
+ return zr36060_pushit(ptr, ZR060_SOF_IDX,
+ (3 * NO_OF_COMPONENTS) + 10, sof_data);
+}
+
+/* SOS (start of scan) segment depends on the used scan components of each color component */
+static int zr36060_set_sos(struct zr36060 *ptr)
+{
+ char sos_data[16]; // max. size of register set
+ int i;
+
+ dprintk(3, "%s: write SOS\n", ptr->name);
+ sos_data[0] = 0xff;
+ sos_data[1] = 0xda;
+ sos_data[2] = 0x00;
+ sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3;
+ sos_data[4] = NO_OF_COMPONENTS;
+ for (i = 0; i < NO_OF_COMPONENTS; i++) {
+ sos_data[5 + (i * 2)] = i; // index
+ sos_data[6 + (i * 2)] = (zr36060_td[i] << 4) |
+ zr36060_ta[i]; // AC/DC tbl.sel.
+ }
+ sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start
+ sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3f;
+ sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00;
+ return zr36060_pushit(ptr, ZR060_SOS_IDX,
+ 4 + 1 + (2 * NO_OF_COMPONENTS) + 3,
+ sos_data);
+}
+
+/* DRI (define restart interval) */
+static int zr36060_set_dri(struct zr36060 *ptr)
+{
+ char dri_data[6]; // max. size of register set
+
+ dprintk(3, "%s: write DRI\n", ptr->name);
+ dri_data[0] = 0xff;
+ dri_data[1] = 0xdd;
+ dri_data[2] = 0x00;
+ dri_data[3] = 0x04;
+ dri_data[4] = (ptr->dri) >> 8;
+ dri_data[5] = (ptr->dri) & 0xff;
+ return zr36060_pushit(ptr, ZR060_DRI_IDX, 6, dri_data);
+}
+
+/* Setup compression/decompression of Zoran's JPEG processor ( see also zoran 36060 manual )
+ * ... sorry for the spaghetti code ...
+ */
+static void zr36060_init(struct zr36060 *ptr)
+{
+ int sum = 0;
+ long bitcnt, tmp;
+
+ if (ptr->mode == CODEC_DO_COMPRESSION) {
+ dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name);
+
+ zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SYNC_RST);
+
+ /* 060 communicates with 067 in master mode */
+ zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CODE_MSTR);
+
+ /* Compression with or without variable scale factor */
+ /*FIXME: What about ptr->bitrate_ctrl? */
+ zr36060_write(ptr, ZR060_CMR, ZR060_CMR_COMP | ZR060_CMR_PASS2 | ZR060_CMR_BRB);
+
+ /* Must be zero */
+ zr36060_write(ptr, ZR060_MBZ, 0x00);
+ zr36060_write(ptr, ZR060_TCR_HI, 0x00);
+ zr36060_write(ptr, ZR060_TCR_LO, 0x00);
+
+ /* Disable all IRQs - no DataErr means autoreset */
+ zr36060_write(ptr, ZR060_IMR, 0);
+
+ /* volume control settings */
+ zr36060_write(ptr, ZR060_SF_HI, ptr->scalefact >> 8);
+ zr36060_write(ptr, ZR060_SF_LO, ptr->scalefact & 0xff);
+
+ zr36060_write(ptr, ZR060_AF_HI, 0xff);
+ zr36060_write(ptr, ZR060_AF_M, 0xff);
+ zr36060_write(ptr, ZR060_AF_LO, 0xff);
+
+ /* setup the variable jpeg tables */
+ sum += zr36060_set_sof(ptr);
+ sum += zr36060_set_sos(ptr);
+ sum += zr36060_set_dri(ptr);
+
+/* setup the fixed jpeg tables - maybe variable, though - (see table init section above) */
+ sum += zr36060_pushit(ptr, ZR060_DQT_IDX, sizeof(zr36060_dqt), zr36060_dqt);
+ sum += zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), zr36060_dht);
+ zr36060_write(ptr, ZR060_APP_IDX, 0xff);
+ zr36060_write(ptr, ZR060_APP_IDX + 1, 0xe0 + ptr->app.appn);
+ zr36060_write(ptr, ZR060_APP_IDX + 2, 0x00);
+ zr36060_write(ptr, ZR060_APP_IDX + 3, ptr->app.len + 2);
+ sum += zr36060_pushit(ptr, ZR060_APP_IDX + 4, 60, ptr->app.data) + 4;
+ zr36060_write(ptr, ZR060_COM_IDX, 0xff);
+ zr36060_write(ptr, ZR060_COM_IDX + 1, 0xfe);
+ zr36060_write(ptr, ZR060_COM_IDX + 2, 0x00);
+ zr36060_write(ptr, ZR060_COM_IDX + 3, ptr->com.len + 2);
+ sum += zr36060_pushit(ptr, ZR060_COM_IDX + 4, 60, ptr->com.data) + 4;
+
+ /* setup misc. data for compression (target code sizes) */
+
+ /* size of compressed code to reach without header data */
+ sum = ptr->real_code_vol - sum;
+ bitcnt = sum << 3; /* need the size in bits */
+
+ tmp = bitcnt >> 16;
+ dprintk(3,
+ "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n",
+ ptr->name, sum, ptr->real_code_vol, bitcnt, tmp);
+ zr36060_write(ptr, ZR060_TCV_NET_HI, tmp >> 8);
+ zr36060_write(ptr, ZR060_TCV_NET_MH, tmp & 0xff);
+ tmp = bitcnt & 0xffff;
+ zr36060_write(ptr, ZR060_TCV_NET_ML, tmp >> 8);
+ zr36060_write(ptr, ZR060_TCV_NET_LO, tmp & 0xff);
+
+ bitcnt -= bitcnt >> 7; // bits without stuffing
+ bitcnt -= ((bitcnt * 5) >> 6); // bits without eob
+
+ tmp = bitcnt >> 16;
+ dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n",
+ ptr->name, bitcnt, tmp);
+ zr36060_write(ptr, ZR060_TCV_DATA_HI, tmp >> 8);
+ zr36060_write(ptr, ZR060_TCV_DATA_MH, tmp & 0xff);
+ tmp = bitcnt & 0xffff;
+ zr36060_write(ptr, ZR060_TCV_DATA_ML, tmp >> 8);
+ zr36060_write(ptr, ZR060_TCV_DATA_LO, tmp & 0xff);
+
+ /* JPEG markers to be included in the compressed stream */
+ zr36060_write(ptr, ZR060_MER,
+ ZR060_MER_DQT | ZR060_MER_DHT |
+ ((ptr->com.len > 0) ? ZR060_MER_COM : 0) |
+ ((ptr->app.len > 0) ? ZR060_MER_APP : 0));
+
+ /* Setup the Video Frontend */
+ /* Limit pixel range to 16..235 as per CCIR-601 */
+ zr36060_write(ptr, ZR060_VCR, ZR060_VCR_RANGE);
+
+ } else {
+ dprintk(2, "%s: EXPANSION SETUP\n", ptr->name);
+
+ zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SYNC_RST);
+
+ /* 060 communicates with 067 in master mode */
+ zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CODE_MSTR);
+
+ /* Decompression */
+ zr36060_write(ptr, ZR060_CMR, 0);
+
+ /* Must be zero */
+ zr36060_write(ptr, ZR060_MBZ, 0x00);
+ zr36060_write(ptr, ZR060_TCR_HI, 0x00);
+ zr36060_write(ptr, ZR060_TCR_LO, 0x00);
+
+ /* Disable all IRQs - no DataErr means autoreset */
+ zr36060_write(ptr, ZR060_IMR, 0);
+
+ /* setup misc. data for expansion */
+ zr36060_write(ptr, ZR060_MER, 0);
+
+/* setup the fixed jpeg tables - maybe variable, though - (see table init section above) */
+ zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), zr36060_dht);
+
+ /* Setup the Video Frontend */
+ //zr36060_write(ptr, ZR060_VCR, ZR060_VCR_FI_EXT);
+ //this doesn't seem right and doesn't work...
+ zr36060_write(ptr, ZR060_VCR, ZR060_VCR_RANGE);
+ }
+
+ /* Load the tables */
+ zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SYNC_RST | ZR060_LOAD_LOAD);
+ zr36060_wait_end(ptr);
+ dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, ptr->status);
+
+ if (ptr->status & ZR060_CFSR_BUSY) {
+ pr_err("%s: init aborted!\n", ptr->name);
+ return; // something is wrong, its timed out!!!!
+ }
+}
+
+/* =========================================================================
+ * CODEC API FUNCTIONS
+ * this functions are accessed by the master via the API structure
+ * =========================================================================
+ */
+
+/* set compressiion/expansion mode and launches codec -
+ * this should be the last call from the master before starting processing
+ */
+static int zr36060_set_mode(struct videocodec *codec, int mode)
+{
+ struct zr36060 *ptr = (struct zr36060 *)codec->data;
+
+ dprintk(2, "%s: set_mode %d call\n", ptr->name, mode);
+
+ if (mode != CODEC_DO_EXPANSION && mode != CODEC_DO_COMPRESSION)
+ return -EINVAL;
+
+ ptr->mode = mode;
+ zr36060_init(ptr);
+
+ return 0;
+}
+
+/* set picture size (norm is ignored as the codec doesn't know about it) */
+static int zr36060_set_video(struct videocodec *codec, const struct tvnorm *norm,
+ struct vfe_settings *cap, struct vfe_polarity *pol)
+{
+ struct zr36060 *ptr = (struct zr36060 *)codec->data;
+ u32 reg;
+ int size;
+
+ dprintk(2, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name,
+ cap->x, cap->y, cap->width, cap->height, cap->decimation);
+
+ /* if () return -EINVAL;
+ * trust the master driver that it knows what it does - so
+ * we allow invalid startx/y and norm for now ...
+ */
+ ptr->width = cap->width / (cap->decimation & 0xff);
+ ptr->height = cap->height / (cap->decimation >> 8);
+
+ zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SYNC_RST);
+
+ /* Note that VSPol/HSPol bits in zr36060 have the opposite
+ * meaning of their zr360x7 counterparts with the same names
+ * N.b. for VSPol this is only true if FIVEdge = 0 (default,
+ * left unchanged here - in accordance with datasheet).
+ */
+ reg = (!pol->vsync_pol ? ZR060_VPR_VS_POL : 0)
+ | (!pol->hsync_pol ? ZR060_VPR_HS_POL : 0)
+ | (pol->field_pol ? ZR060_VPR_FI_POL : 0)
+ | (pol->blank_pol ? ZR060_VPR_BL_POL : 0)
+ | (pol->subimg_pol ? ZR060_VPR_S_IMG_POL : 0)
+ | (pol->poe_pol ? ZR060_VPR_POE_POL : 0)
+ | (pol->pvalid_pol ? ZR060_VPR_P_VAL_POL : 0)
+ | (pol->vclk_pol ? ZR060_VPR_VCLK_POL : 0);
+ zr36060_write(ptr, ZR060_VPR, reg);
+
+ reg = 0;
+ switch (cap->decimation & 0xff) {
+ default:
+ case 1:
+ break;
+
+ case 2:
+ reg |= ZR060_SR_H_SCALE2;
+ break;
+
+ case 4:
+ reg |= ZR060_SR_H_SCALE4;
+ break;
+ }
+
+ switch (cap->decimation >> 8) {
+ default:
+ case 1:
+ break;
+
+ case 2:
+ reg |= ZR060_SR_V_SCALE;
+ break;
+ }
+ zr36060_write(ptr, ZR060_SR, reg);
+
+ zr36060_write(ptr, ZR060_BCR_Y, 0x00);
+ zr36060_write(ptr, ZR060_BCR_U, 0x80);
+ zr36060_write(ptr, ZR060_BCR_V, 0x80);
+
+ /* sync generator */
+
+ reg = norm->ht - 1; /* Vtotal */
+ zr36060_write(ptr, ZR060_SGR_VTOTAL_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_SGR_VTOTAL_LO, (reg >> 0) & 0xff);
+
+ reg = norm->wt - 1; /* Htotal */
+ zr36060_write(ptr, ZR060_SGR_HTOTAL_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_SGR_HTOTAL_LO, (reg >> 0) & 0xff);
+
+ reg = 6 - 1; /* VsyncSize */
+ zr36060_write(ptr, ZR060_SGR_VSYNC, reg);
+
+ //reg = 30 - 1; /* HsyncSize */
+///*CP*/ reg = (zr->params.norm == 1 ? 57 : 68);
+ reg = 68;
+ zr36060_write(ptr, ZR060_SGR_HSYNC, reg);
+
+ reg = norm->v_start - 1; /* BVstart */
+ zr36060_write(ptr, ZR060_SGR_BVSTART, reg);
+
+ reg += norm->ha / 2; /* BVend */
+ zr36060_write(ptr, ZR060_SGR_BVEND_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_SGR_BVEND_LO, (reg >> 0) & 0xff);
+
+ reg = norm->h_start - 1; /* BHstart */
+ zr36060_write(ptr, ZR060_SGR_BHSTART, reg);
+
+ reg += norm->wa; /* BHend */
+ zr36060_write(ptr, ZR060_SGR_BHEND_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_SGR_BHEND_LO, (reg >> 0) & 0xff);
+
+ /* active area */
+ reg = cap->y + norm->v_start; /* Vstart */
+ zr36060_write(ptr, ZR060_AAR_VSTART_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_AAR_VSTART_LO, (reg >> 0) & 0xff);
+
+ reg += cap->height; /* Vend */
+ zr36060_write(ptr, ZR060_AAR_VEND_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_AAR_VEND_LO, (reg >> 0) & 0xff);
+
+ reg = cap->x + norm->h_start; /* Hstart */
+ zr36060_write(ptr, ZR060_AAR_HSTART_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_AAR_HSTART_LO, (reg >> 0) & 0xff);
+
+ reg += cap->width; /* Hend */
+ zr36060_write(ptr, ZR060_AAR_HEND_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_AAR_HEND_LO, (reg >> 0) & 0xff);
+
+ /* subimage area */
+ reg = norm->v_start - 4; /* SVstart */
+ zr36060_write(ptr, ZR060_SWR_VSTART_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_SWR_VSTART_LO, (reg >> 0) & 0xff);
+
+ reg += norm->ha / 2 + 8; /* SVend */
+ zr36060_write(ptr, ZR060_SWR_VEND_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_SWR_VEND_LO, (reg >> 0) & 0xff);
+
+ reg = norm->h_start /*+ 64 */ - 4; /* SHstart */
+ zr36060_write(ptr, ZR060_SWR_HSTART_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_SWR_HSTART_LO, (reg >> 0) & 0xff);
+
+ reg += norm->wa + 8; /* SHend */
+ zr36060_write(ptr, ZR060_SWR_HEND_HI, (reg >> 8) & 0xff);
+ zr36060_write(ptr, ZR060_SWR_HEND_LO, (reg >> 0) & 0xff);
+
+ size = ptr->width * ptr->height;
+ /* Target compressed field size in bits: */
+ size = size * 16; /* uncompressed size in bits */
+ /* (Ronald) by default, quality = 100 is a compression
+ * ratio 1:2. Setting low_bitrate (insmod option) sets
+ * it to 1:4 (instead of 1:2, zr36060 max) as limit because the
+ * buz can't handle more at decimation=1... Use low_bitrate if
+ * you have a Buz, unless you know what you're doing
+ */
+ size = size * cap->quality / (low_bitrate ? 400 : 200);
+ /* Lower limit (arbitrary, 1 KB) */
+ if (size < 8192)
+ size = 8192;
+ /* Upper limit: 7/8 of the code buffers */
+ if (size > ptr->total_code_vol * 7)
+ size = ptr->total_code_vol * 7;
+
+ ptr->real_code_vol = size >> 3; /* in bytes */
+
+ /* the MBCVR is the *maximum* block volume, according to the
+ * JPEG ISO specs, this shouldn't be used, since that allows
+ * for the best encoding quality. So set it to it's max value
+ */
+ reg = ptr->max_block_vol;
+ zr36060_write(ptr, ZR060_MBCVR, reg);
+
+ return 0;
+}
+
+/* additional control functions */
+static int zr36060_control(struct videocodec *codec, int type, int size, void *data)
+{
+ struct zr36060 *ptr = (struct zr36060 *)codec->data;
+ int *ival = (int *)data;
+
+ dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type,
+ size);
+
+ switch (type) {
+ case CODEC_G_STATUS: /* get last status */
+ if (size != sizeof(int))
+ return -EFAULT;
+ zr36060_read_status(ptr);
+ *ival = ptr->status;
+ break;
+
+ case CODEC_G_CODEC_MODE:
+ if (size != sizeof(int))
+ return -EFAULT;
+ *ival = CODEC_MODE_BJPG;
+ break;
+
+ case CODEC_S_CODEC_MODE:
+ if (size != sizeof(int))
+ return -EFAULT;
+ if (*ival != CODEC_MODE_BJPG)
+ return -EINVAL;
+ /* not needed, do nothing */
+ return 0;
+
+ case CODEC_G_VFE:
+ case CODEC_S_VFE:
+ /* not needed, do nothing */
+ return 0;
+
+ case CODEC_S_MMAP:
+ /* not available, give an error */
+ return -ENXIO;
+
+ case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */
+ if (size != sizeof(int))
+ return -EFAULT;
+ *ival = ptr->total_code_vol;
+ break;
+
+ case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */
+ if (size != sizeof(int))
+ return -EFAULT;
+ ptr->total_code_vol = *ival;
+ ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3;
+ break;
+
+ case CODEC_G_JPEG_SCALE: /* get scaling factor */
+ if (size != sizeof(int))
+ return -EFAULT;
+ *ival = zr36060_read_scalefactor(ptr);
+ break;
+
+ case CODEC_S_JPEG_SCALE: /* set scaling factor */
+ if (size != sizeof(int))
+ return -EFAULT;
+ ptr->scalefact = *ival;
+ break;
+
+ case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */
+ struct jpeg_app_marker *app = data;
+
+ if (size != sizeof(struct jpeg_app_marker))
+ return -EFAULT;
+
+ *app = ptr->app;
+ break;
+ }
+
+ case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */
+ struct jpeg_app_marker *app = data;
+
+ if (size != sizeof(struct jpeg_app_marker))
+ return -EFAULT;
+
+ ptr->app = *app;
+ break;
+ }
+
+ case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */
+ struct jpeg_com_marker *com = data;
+
+ if (size != sizeof(struct jpeg_com_marker))
+ return -EFAULT;
+
+ *com = ptr->com;
+ break;
+ }
+
+ case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */
+ struct jpeg_com_marker *com = data;
+
+ if (size != sizeof(struct jpeg_com_marker))
+ return -EFAULT;
+
+ ptr->com = *com;
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+/* =========================================================================
+ * Exit and unregister function:
+ * Deinitializes Zoran's JPEG processor
+ * =========================================================================
+ */
+static int zr36060_unset(struct videocodec *codec)
+{
+ struct zr36060 *ptr = codec->data;
+
+ if (ptr) {
+ /* do wee need some codec deinit here, too ???? */
+
+ dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num);
+ kfree(ptr);
+ codec->data = NULL;
+
+ zr36060_codecs--;
+ return 0;
+ }
+
+ return -EFAULT;
+}
+
+/* =========================================================================
+ * Setup and registry function:
+ * Initializes Zoran's JPEG processor
+ * Also sets pixel size, average code size, mode (compr./decompr.)
+ * (the given size is determined by the processor with the video interface)
+ * =========================================================================
+ */
+static int zr36060_setup(struct videocodec *codec)
+{
+ struct zr36060 *ptr;
+ int res;
+
+ dprintk(2, "zr36060: initializing MJPEG subsystem #%d.\n", zr36060_codecs);
+
+ if (zr36060_codecs == MAX_CODECS) {
+ pr_err("zr36060: Can't attach more codecs!\n");
+ return -ENOSPC;
+ }
+ //mem structure init
+ codec->data = ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ snprintf(ptr->name, sizeof(ptr->name), "zr36060[%d]", zr36060_codecs);
+ ptr->num = zr36060_codecs++;
+ ptr->codec = codec;
+
+ //testing
+ res = zr36060_basic_test(ptr);
+ if (res < 0) {
+ zr36060_unset(codec);
+ return res;
+ }
+ //final setup
+ memcpy(ptr->h_samp_ratio, zr36060_decimation_h, 8);
+ memcpy(ptr->v_samp_ratio, zr36060_decimation_v, 8);
+
+ ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag (what is the difference?) */
+ ptr->mode = CODEC_DO_COMPRESSION;
+ ptr->width = 384;
+ ptr->height = 288;
+ ptr->total_code_vol = 16000; /* CHECKME */
+ ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3;
+ ptr->max_block_vol = 240; /* CHECKME, was 120 is 240 */
+ ptr->scalefact = 0x100;
+ ptr->dri = 1; /* CHECKME, was 8 is 1 */
+
+ /* by default, no COM or APP markers - app should set those */
+ ptr->com.len = 0;
+ ptr->app.appn = 0;
+ ptr->app.len = 0;
+
+ zr36060_init(ptr);
+
+ dprintk(1, KERN_INFO "%s: codec attached and running\n", ptr->name);
+
+ return 0;
+}
+
+static const struct videocodec zr36060_codec = {
+ .owner = THIS_MODULE,
+ .name = "zr36060",
+ .magic = 0L, // magic not used
+ .flags =
+ CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER |
+ CODEC_FLAG_DECODER | CODEC_FLAG_VFE,
+ .type = CODEC_TYPE_ZR36060,
+ .setup = zr36060_setup, // functionality
+ .unset = zr36060_unset,
+ .set_mode = zr36060_set_mode,
+ .set_video = zr36060_set_video,
+ .control = zr36060_control,
+ // others are not used
+};
+
+static int __init zr36060_init_module(void)
+{
+ zr36060_codecs = 0;
+ return videocodec_register(&zr36060_codec);
+}
+
+static void __exit zr36060_cleanup_module(void)
+{
+ if (zr36060_codecs) {
+ dprintk(1,
+ "zr36060: something's wrong - %d codecs left somehow.\n",
+ zr36060_codecs);
+ }
+
+ /* however, we can't just stay alive */
+ videocodec_unregister(&zr36060_codec);
+}
+
+module_init(zr36060_init_module);
+module_exit(zr36060_cleanup_module);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@skynet.be>");
+MODULE_DESCRIPTION("Driver module for ZR36060 jpeg processors " ZR060_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/zoran/zr36060.h b/drivers/staging/media/zoran/zr36060.h
new file mode 100644
index 000000000000..d2cdc26bf625
--- /dev/null
+++ b/drivers/staging/media/zoran/zr36060.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Zoran ZR36060 basic configuration functions - header file
+ *
+ * Copyright (C) 2002 Laurent Pinchart <laurent.pinchart@skynet.be>
+ */
+
+#ifndef ZR36060_H
+#define ZR36060_H
+
+#include "videocodec.h"
+
+/* data stored for each zoran jpeg codec chip */
+struct zr36060 {
+ char name[32];
+ int num;
+ /* io datastructure */
+ struct videocodec *codec;
+ // last coder status
+ __u8 status;
+ // actual coder setup
+ int mode;
+
+ __u16 width;
+ __u16 height;
+
+ __u16 bitrate_ctrl;
+
+ __u32 total_code_vol;
+ __u32 real_code_vol;
+ __u16 max_block_vol;
+
+ __u8 h_samp_ratio[8];
+ __u8 v_samp_ratio[8];
+ __u16 scalefact;
+ __u16 dri;
+
+ /* app/com marker data */
+ struct jpeg_app_marker app;
+ struct jpeg_com_marker com;
+};
+
+/* ZR36060 register addresses */
+#define ZR060_LOAD 0x000
+#define ZR060_CFSR 0x001
+#define ZR060_CIR 0x002
+#define ZR060_CMR 0x003
+#define ZR060_MBZ 0x004
+#define ZR060_MBCVR 0x005
+#define ZR060_MER 0x006
+#define ZR060_IMR 0x007
+#define ZR060_ISR 0x008
+#define ZR060_TCV_NET_HI 0x009
+#define ZR060_TCV_NET_MH 0x00a
+#define ZR060_TCV_NET_ML 0x00b
+#define ZR060_TCV_NET_LO 0x00c
+#define ZR060_TCV_DATA_HI 0x00d
+#define ZR060_TCV_DATA_MH 0x00e
+#define ZR060_TCV_DATA_ML 0x00f
+#define ZR060_TCV_DATA_LO 0x010
+#define ZR060_SF_HI 0x011
+#define ZR060_SF_LO 0x012
+#define ZR060_AF_HI 0x013
+#define ZR060_AF_M 0x014
+#define ZR060_AF_LO 0x015
+#define ZR060_ACV_HI 0x016
+#define ZR060_ACV_MH 0x017
+#define ZR060_ACV_ML 0x018
+#define ZR060_ACV_LO 0x019
+#define ZR060_ACT_HI 0x01a
+#define ZR060_ACT_MH 0x01b
+#define ZR060_ACT_ML 0x01c
+#define ZR060_ACT_LO 0x01d
+#define ZR060_ACV_TURN_HI 0x01e
+#define ZR060_ACV_TURN_MH 0x01f
+#define ZR060_ACV_TURN_ML 0x020
+#define ZR060_ACV_TURN_LO 0x021
+#define ZR060_IDR_DEV 0x022
+#define ZR060_IDR_REV 0x023
+#define ZR060_TCR_HI 0x024
+#define ZR060_TCR_LO 0x025
+#define ZR060_VCR 0x030
+#define ZR060_VPR 0x031
+#define ZR060_SR 0x032
+#define ZR060_BCR_Y 0x033
+#define ZR060_BCR_U 0x034
+#define ZR060_BCR_V 0x035
+#define ZR060_SGR_VTOTAL_HI 0x036
+#define ZR060_SGR_VTOTAL_LO 0x037
+#define ZR060_SGR_HTOTAL_HI 0x038
+#define ZR060_SGR_HTOTAL_LO 0x039
+#define ZR060_SGR_VSYNC 0x03a
+#define ZR060_SGR_HSYNC 0x03b
+#define ZR060_SGR_BVSTART 0x03c
+#define ZR060_SGR_BHSTART 0x03d
+#define ZR060_SGR_BVEND_HI 0x03e
+#define ZR060_SGR_BVEND_LO 0x03f
+#define ZR060_SGR_BHEND_HI 0x040
+#define ZR060_SGR_BHEND_LO 0x041
+#define ZR060_AAR_VSTART_HI 0x042
+#define ZR060_AAR_VSTART_LO 0x043
+#define ZR060_AAR_VEND_HI 0x044
+#define ZR060_AAR_VEND_LO 0x045
+#define ZR060_AAR_HSTART_HI 0x046
+#define ZR060_AAR_HSTART_LO 0x047
+#define ZR060_AAR_HEND_HI 0x048
+#define ZR060_AAR_HEND_LO 0x049
+#define ZR060_SWR_VSTART_HI 0x04a
+#define ZR060_SWR_VSTART_LO 0x04b
+#define ZR060_SWR_VEND_HI 0x04c
+#define ZR060_SWR_VEND_LO 0x04d
+#define ZR060_SWR_HSTART_HI 0x04e
+#define ZR060_SWR_HSTART_LO 0x04f
+#define ZR060_SWR_HEND_HI 0x050
+#define ZR060_SWR_HEND_LO 0x051
+
+#define ZR060_SOF_IDX 0x060
+#define ZR060_SOS_IDX 0x07a
+#define ZR060_DRI_IDX 0x0c0
+#define ZR060_DQT_IDX 0x0cc
+#define ZR060_DHT_IDX 0x1d4
+#define ZR060_APP_IDX 0x380
+#define ZR060_COM_IDX 0x3c0
+
+/* ZR36060 LOAD register bits */
+
+#define ZR060_LOAD_LOAD BIT(7)
+#define ZR060_LOAD_SYNC_RST BIT(0)
+
+/* ZR36060 Code FIFO Status register bits */
+
+#define ZR060_CFSR_BUSY BIT(7)
+#define ZR060_CFSR_C_BUSY BIT(2)
+#define ZR060_CFSR_CFIFO (3 << 0)
+
+/* ZR36060 Code Interface register */
+
+#define ZR060_CIR_CODE16 BIT(7)
+#define ZR060_CIR_ENDIAN BIT(6)
+#define ZR060_CIR_CFIS BIT(2)
+#define ZR060_CIR_CODE_MSTR BIT(0)
+
+/* ZR36060 Codec Mode register */
+
+#define ZR060_CMR_COMP BIT(7)
+#define ZR060_CMR_ATP BIT(6)
+#define ZR060_CMR_PASS2 BIT(5)
+#define ZR060_CMR_TLM BIT(4)
+#define ZR060_CMR_BRB BIT(2)
+#define ZR060_CMR_FSF BIT(1)
+
+/* ZR36060 Markers Enable register */
+
+#define ZR060_MER_APP BIT(7)
+#define ZR060_MER_COM BIT(6)
+#define ZR060_MER_DRI BIT(5)
+#define ZR060_MER_DQT BIT(4)
+#define ZR060_MER_DHT BIT(3)
+
+/* ZR36060 Interrupt Mask register */
+
+#define ZR060_IMR_EOAV BIT(3)
+#define ZR060_IMR_EOI BIT(2)
+#define ZR060_IMR_END BIT(1)
+#define ZR060_IMR_DATA_ERR BIT(0)
+
+/* ZR36060 Interrupt Status register */
+
+#define ZR060_ISR_PRO_CNT (3 << 6)
+#define ZR060_ISR_EOAV BIT(3)
+#define ZR060_ISR_EOI BIT(2)
+#define ZR060_ISR_END BIT(1)
+#define ZR060_ISR_DATA_ERR BIT(0)
+
+/* ZR36060 Video Control register */
+
+#define ZR060_VCR_VIDEO8 BIT(7)
+#define ZR060_VCR_RANGE BIT(6)
+#define ZR060_VCR_FI_DET BIT(3)
+#define ZR060_VCR_FI_VEDGE BIT(2)
+#define ZR060_VCR_FI_EXT BIT(1)
+#define ZR060_VCR_SYNC_MSTR BIT(0)
+
+/* ZR36060 Video Polarity register */
+
+#define ZR060_VPR_VCLK_POL BIT(7)
+#define ZR060_VPR_P_VAL_POL BIT(6)
+#define ZR060_VPR_POE_POL BIT(5)
+#define ZR060_VPR_S_IMG_POL BIT(4)
+#define ZR060_VPR_BL_POL BIT(3)
+#define ZR060_VPR_FI_POL BIT(2)
+#define ZR060_VPR_HS_POL BIT(1)
+#define ZR060_VPR_VS_POL BIT(0)
+
+/* ZR36060 Scaling register */
+
+#define ZR060_SR_V_SCALE BIT(2)
+#define ZR060_SR_H_SCALE2 BIT(0)
+#define ZR060_SR_H_SCALE4 (2 << 0)
+
+#endif /*fndef ZR36060_H */
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index c35fb34fae79..535e6dec3504 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -18,8 +18,6 @@ menuconfig MOST_COMPONENTS
if MOST_COMPONENTS
-source "drivers/staging/most/cdev/Kconfig"
-
source "drivers/staging/most/net/Kconfig"
source "drivers/staging/most/sound/Kconfig"
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
index 7c10b84ebac0..be94673209f5 100644
--- a/drivers/staging/most/Makefile
+++ b/drivers/staging/most/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MOST_CDEV) += cdev/
obj-$(CONFIG_MOST_NET) += net/
obj-$(CONFIG_MOST_SOUND) += sound/
obj-$(CONFIG_MOST_VIDEO) += video/
diff --git a/drivers/staging/most/cdev/Kconfig b/drivers/staging/most/cdev/Kconfig
deleted file mode 100644
index dab99477858e..000000000000
--- a/drivers/staging/most/cdev/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# MOST Cdev configuration
-#
-
-config MOST_CDEV
- tristate "Cdev"
-
- help
- Say Y here if you want to commumicate via character devices.
-
- To compile this driver as a module, choose M here: the
- module will be called most_cdev.
diff --git a/drivers/staging/most/cdev/Makefile b/drivers/staging/most/cdev/Makefile
deleted file mode 100644
index ef90cd71994a..000000000000
--- a/drivers/staging/most/cdev/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MOST_CDEV) += most_cdev.o
-
-most_cdev-objs := cdev.o
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 509c8012d20b..b34e3c130f53 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -100,12 +100,12 @@ struct dim2_hdm {
struct medialb_bus bus;
void (*on_netinfo)(struct most_interface *most_iface,
unsigned char link_state, unsigned char *addrs);
- void (*disable_platform)(struct platform_device *);
+ void (*disable_platform)(struct platform_device *pdev);
};
struct dim2_platform_data {
- int (*enable)(struct platform_device *);
- void (*disable)(struct platform_device *);
+ int (*enable)(struct platform_device *pdev);
+ void (*disable)(struct platform_device *pdev);
};
#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index 14592ed9ce98..354536783e1c 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -533,9 +533,9 @@ static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
mtk_hsdma_chan_done(hsdma, chan);
}
-static void mtk_hsdma_tasklet(unsigned long arg)
+static void mtk_hsdma_tasklet(struct tasklet_struct *t)
{
- struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg;
+ struct mtk_hsdam_engine *hsdma = from_tasklet(hsdma, t, task);
mtk_hsdma_rx(hsdma);
mtk_hsdma_tx(hsdma);
@@ -670,7 +670,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
hsdma->base = base + HSDMA_BASE_OFFSET;
- tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma);
+ tasklet_setup(&hsdma->task, mtk_hsdma_tasklet);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/staging/mt7621-pci/TODO b/drivers/staging/mt7621-pci/TODO
index ccfd266db4ca..d674a9ac85c1 100644
--- a/drivers/staging/mt7621-pci/TODO
+++ b/drivers/staging/mt7621-pci/TODO
@@ -1,4 +1,4 @@
- general code review and cleanup
-Cc: NeilBrown <neil@brown.name>
+Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 360ec0407740..a80996b2f5ce 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL(nvec_write_async);
* interrupt handlers.
*
* Returns: 0 on success, a negative error code on failure.
- * The response message is returned in @msg. Shall be freed with
+ * The response message is returned in @msg. Shall be freed
* with nvec_msg_free() once no longer used.
*
*/
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 61471a19d4e6..e2f8b6b67f75 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -1233,8 +1233,7 @@ static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb,
cvmx_write64_uint32(csr_address, *ptr++);
cvmx_write64_uint32(csr_address, *ptr++);
cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_read64_uint64(
- CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
words -= 3;
}
cvmx_write64_uint32(csr_address, *ptr++);
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index cfb673a52b25..0bf545849b11 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -147,12 +147,6 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
- int rc;
-
- rc = of_phy_register_fixed_link(priv->of_node);
- if (rc)
- return rc;
-
phy_node = of_node_get(priv->of_node);
}
if (!phy_node)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 2c16230f993c..9ebd665e5d42 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -69,15 +69,17 @@ static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
else
port = work->word1.cn38xx.ipprt;
- if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
+ if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64))
/*
* Ignore length errors on min size packets. Some
* equipment incorrectly pads packets to 64+4FCS
* instead of 60+4FCS. Note these packets still get
* counted as frame errors.
*/
- } else if (work->word2.snoip.err_code == 5 ||
- work->word2.snoip.err_code == 7) {
+ return 0;
+
+ if (work->word2.snoip.err_code == 5 ||
+ work->word2.snoip.err_code == 7) {
/*
* We received a packet with either an alignment error
* or a FCS error. This may be signalling that we are
@@ -108,7 +110,10 @@ static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
/* Port received 0xd5 preamble */
work->packet_ptr.s.addr += i + 1;
work->word1.len -= i + 5;
- } else if ((*ptr & 0xf) == 0xd) {
+ return 0;
+ }
+
+ if ((*ptr & 0xf) == 0xd) {
/* Port received 0xd preamble */
work->packet_ptr.s.addr += i;
work->word1.len -= i + 4;
@@ -118,21 +123,20 @@ static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
((*(ptr + 1) & 0xf) << 4);
ptr++;
}
- } else {
- printk_ratelimited("Port %d unknown preamble, packet dropped\n",
- port);
- cvm_oct_free_work(work);
- return 1;
+ return 0;
}
+
+ printk_ratelimited("Port %d unknown preamble, packet dropped\n",
+ port);
+ cvm_oct_free_work(work);
+ return 1;
}
- } else {
- printk_ratelimited("Port %d receive error code %d, packet dropped\n",
- port, work->word2.snoip.err_code);
- cvm_oct_free_work(work);
- return 1;
}
- return 0;
+ printk_ratelimited("Port %d receive error code %d, packet dropped\n",
+ port, work->word2.snoip.err_code);
+ cvm_oct_free_work(work);
+ return 1;
}
static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 204f0b1e2739..5dea6e96ec90 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -13,6 +13,7 @@
#include <linux/phy.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
@@ -892,6 +893,14 @@ static int cvm_oct_probe(struct platform_device *pdev)
break;
}
+ if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) {
+ if (of_phy_register_fixed_link(priv->of_node)) {
+ netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n",
+ interface, priv->port);
+ dev->netdev_ops = NULL;
+ }
+ }
+
if (!dev->netdev_ops) {
free_netdev(dev);
} else if (register_netdev(dev) < 0) {
diff --git a/drivers/staging/pi433/pi433_if.h b/drivers/staging/pi433/pi433_if.h
index 16c5b7fba249..d5c1521192c1 100644
--- a/drivers/staging/pi433/pi433_if.h
+++ b/drivers/staging/pi433/pi433_if.h
@@ -117,9 +117,15 @@ struct pi433_rx_cfg {
/* packet format */
enum option_on_off enable_sync;
- enum option_on_off enable_length_byte; /* should be used in combination with sync, only */
- enum address_filtering enable_address_filtering; /* operational with sync, only */
- enum option_on_off enable_crc; /* only operational, if sync on and fixed length or length byte is used */
+
+ /* should be used in combination with sync, only */
+ enum option_on_off enable_length_byte;
+
+ /* operational with sync, only */
+ enum address_filtering enable_address_filtering;
+
+ /* only operational, if sync on and fixed length or length byte is used */
+ enum option_on_off enable_crc;
__u8 sync_length;
__u8 fixed_message_length;
@@ -130,12 +136,16 @@ struct pi433_rx_cfg {
__u8 broadcast_address;
};
-#define PI433_IOC_MAGIC 'r'
+#define PI433_IOC_MAGIC 'r'
-#define PI433_IOC_RD_TX_CFG _IOR(PI433_IOC_MAGIC, PI433_TX_CFG_IOCTL_NR, char[sizeof(struct pi433_tx_cfg)])
-#define PI433_IOC_WR_TX_CFG _IOW(PI433_IOC_MAGIC, PI433_TX_CFG_IOCTL_NR, char[sizeof(struct pi433_tx_cfg)])
+#define PI433_IOC_RD_TX_CFG \
+ _IOR(PI433_IOC_MAGIC, PI433_TX_CFG_IOCTL_NR, char[sizeof(struct pi433_tx_cfg)])
+#define PI433_IOC_WR_TX_CFG \
+ _IOW(PI433_IOC_MAGIC, PI433_TX_CFG_IOCTL_NR, char[sizeof(struct pi433_tx_cfg)])
-#define PI433_IOC_RD_RX_CFG _IOR(PI433_IOC_MAGIC, PI433_RX_CFG_IOCTL_NR, char[sizeof(struct pi433_rx_cfg)])
-#define PI433_IOC_WR_RX_CFG _IOW(PI433_IOC_MAGIC, PI433_RX_CFG_IOCTL_NR, char[sizeof(struct pi433_rx_cfg)])
+#define PI433_IOC_RD_RX_CFG \
+ _IOR(PI433_IOC_MAGIC, PI433_RX_CFG_IOCTL_NR, char[sizeof(struct pi433_rx_cfg)])
+#define PI433_IOC_WR_RX_CFG \
+ _IOW(PI433_IOC_MAGIC, PI433_RX_CFG_IOCTL_NR, char[sizeof(struct pi433_rx_cfg)])
#endif /* PI433_H */
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index 483ce04789ed..b295990e361b 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QLogic QLA41xx NIC HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
- *
- * See LICENSE.qlge for copyright and licensing details.
*/
#ifndef _QLGE_H_
#define _QLGE_H_
@@ -2338,21 +2337,21 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#endif
#ifdef QL_OB_DUMP
-void ql_dump_tx_desc(struct tx_buf_desc *tbd);
-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
+void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd);
+void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb);
+void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp);
+#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb) ql_dump_ob_mac_iocb(qdev, ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp) ql_dump_ob_mac_rsp(qdev, ob_mac_rsp)
#else
-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
+#define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb)
+#define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp)
#endif
#ifdef QL_IB_DUMP
-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
+void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp);
+#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp) ql_dump_ib_mac_rsp(qdev, ib_mac_rsp)
#else
-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
+#define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp)
#endif
#ifdef QL_ALL_DUMP
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index a55bf0b3e9dc..42fd13990f3a 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -1431,7 +1431,7 @@ void ql_dump_routing_entries(struct ql_adapter *qdev)
}
if (value)
netdev_err(qdev->ndev,
- "%s: Routing Mask %d = 0x%.08x\n",
+ "Routing Mask %d = 0x%.08x\n",
i, value);
}
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
@@ -1617,6 +1617,9 @@ void ql_dump_qdev(struct ql_adapter *qdev)
#ifdef QL_CB_DUMP
void ql_dump_wqicb(struct wqicb *wqicb)
{
+ struct tx_ring *tx_ring = container_of(wqicb, struct tx_ring, wqicb);
+ struct ql_adapter *qdev = tx_ring->qdev;
+
netdev_err(qdev->ndev, "Dumping wqicb stuff...\n");
netdev_err(qdev->ndev, "wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
netdev_err(qdev->ndev, "wqicb->flags = %x\n",
@@ -1632,8 +1635,8 @@ void ql_dump_wqicb(struct wqicb *wqicb)
void ql_dump_tx_ring(struct tx_ring *tx_ring)
{
- if (!tx_ring)
- return;
+ struct ql_adapter *qdev = tx_ring->qdev;
+
netdev_err(qdev->ndev, "===================== Dumping tx_ring %d ===============\n",
tx_ring->wq_id);
netdev_err(qdev->ndev, "tx_ring->base = %p\n", tx_ring->wq_base);
@@ -1657,6 +1660,8 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring)
void ql_dump_ricb(struct ricb *ricb)
{
int i;
+ struct ql_adapter *qdev =
+ container_of(ricb, struct ql_adapter, ricb);
netdev_err(qdev->ndev, "===================== Dumping ricb ===============\n");
netdev_err(qdev->ndev, "Dumping ricb stuff...\n");
@@ -1686,6 +1691,9 @@ void ql_dump_ricb(struct ricb *ricb)
void ql_dump_cqicb(struct cqicb *cqicb)
{
+ struct rx_ring *rx_ring = container_of(cqicb, struct rx_ring, cqicb);
+ struct ql_adapter *qdev = rx_ring->qdev;
+
netdev_err(qdev->ndev, "Dumping cqicb stuff...\n");
netdev_err(qdev->ndev, "cqicb->msix_vect = %d\n", cqicb->msix_vect);
@@ -1725,8 +1733,8 @@ static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
void ql_dump_rx_ring(struct rx_ring *rx_ring)
{
- if (!rx_ring)
- return;
+ struct ql_adapter *qdev = rx_ring->qdev;
+
netdev_err(qdev->ndev,
"===================== Dumping rx_ring %d ===============\n",
rx_ring->cq_id);
@@ -1816,7 +1824,7 @@ fail_it:
#endif
#ifdef QL_OB_DUMP
-void ql_dump_tx_desc(struct tx_buf_desc *tbd)
+void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd)
{
netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n",
le64_to_cpu((u64)tbd->addr));
@@ -1843,7 +1851,7 @@ void ql_dump_tx_desc(struct tx_buf_desc *tbd)
tbd->len & TX_DESC_E ? "E" : ".");
}
-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
+void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb)
{
struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
(struct ob_mac_tso_iocb_req *)ob_mac_iocb;
@@ -1886,10 +1894,10 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
}
tbd = &ob_mac_iocb->tbd[0];
- ql_dump_tx_desc(tbd);
+ ql_dump_tx_desc(qdev, tbd);
}
-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
+void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp)
{
netdev_err(qdev->ndev, "%s\n", __func__);
netdev_err(qdev->ndev, "opcode = %d\n", ob_mac_rsp->opcode);
@@ -1906,7 +1914,7 @@ void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
#endif
#ifdef QL_IB_DUMP
-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
+void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp)
{
netdev_err(qdev->ndev, "%s\n", __func__);
netdev_err(qdev->ndev, "opcode = 0x%x\n", ib_mac_rsp->opcode);
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 2028458bea6f..27da386f9d87 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic qlge NIC HBA Driver
* Copyright (c) 2003-2008 QLogic Corporation
- * See LICENSE.qlge for copyright and licensing details.
* Author: Linux qlge network device driver by
* Ron Mercer <ron.mercer@qlogic.com>
*/
@@ -1856,7 +1856,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
- QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+ QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
if (unlikely(!skb)) {
@@ -1954,7 +1954,7 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
((le16_to_cpu(ib_mac_rsp->vlan_id) &
IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
- QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+ QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
@@ -2001,7 +2001,7 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
struct tx_ring *tx_ring;
struct tx_ring_desc *tx_ring_desc;
- QL_DUMP_OB_MAC_RSP(mac_rsp);
+ QL_DUMP_OB_MAC_RSP(qdev, mac_rsp);
tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
tx_ring_desc = &tx_ring->q[mac_rsp->tid];
ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
@@ -2079,9 +2079,9 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
break;
case PCI_ERR_ANON_BUF_RD:
- netdev_err(qdev->ndev, "PCI error occurred when reading "
- "anonymous buffers from rx_ring %d.\n",
- ib_ae_rsp->q_id);
+ netdev_err(qdev->ndev,
+ "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
+ ib_ae_rsp->q_id);
ql_queue_asic_error(qdev);
break;
@@ -2415,8 +2415,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
ql_queue_asic_error(qdev);
netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
- netdev_err(qdev->ndev, "Resetting chip. "
- "Error Status Register = 0x%x\n", var);
+ netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
@@ -2593,7 +2592,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
- QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
+ QL_DUMP_OB_MAC_IOCB(qdev, mac_iocb_ptr);
tx_ring->prod_idx++;
if (tx_ring->prod_idx == tx_ring->wq_len)
tx_ring->prod_idx = 0;
@@ -3739,8 +3738,7 @@ static void ql_display_dev_info(struct net_device *ndev)
struct ql_adapter *qdev = netdev_priv(ndev);
netif_info(qdev, probe, qdev->ndev,
- "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
- "XG Roll = %d, XG Rev = %d.\n",
+ "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
qdev->func,
qdev->port,
qdev->chip_rev_id & 0x0000000f,
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index e85c6ab538df..143a886080c5 100644
--- a/drivers/staging/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
@@ -117,7 +117,6 @@ int ql_own_firmware(struct ql_adapter *qdev)
return 1;
return 0;
-
}
static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
@@ -240,12 +239,12 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
netif_err(qdev, drv, qdev->ndev,
"Could not read MPI, resetting RISC!\n");
ql_queue_fw_error(qdev);
- } else
+ } else {
/* Wake up the sleeping mpi_idc_work thread that is
* waiting for this event.
*/
complete(&qdev->ide_completion);
-
+ }
return status;
}
@@ -347,16 +346,15 @@ static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
mbcp->out_count = 6;
status = ql_get_mb_sts(qdev, mbcp);
- if (status)
+ if (status) {
netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
- else {
+ } else {
int i;
netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
for (i = 0; i < mbcp->out_count; i++)
netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
i, mbcp->mbox_out[i]);
-
}
return status;
@@ -405,7 +403,6 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
}
switch (mbcp->mbox_out[0]) {
-
/* This case is only active when we arrive here
* as a result of issuing a mailbox command to
* the firmware.
@@ -998,9 +995,9 @@ int ql_mb_get_led_cfg(struct ql_adapter *qdev)
netif_err(qdev, drv, qdev->ndev,
"Failed to get LED Configuration.\n");
status = -EIO;
- } else
+ } else {
qdev->led_config = mbcp->mbox_out[1];
-
+ }
return status;
}
diff --git a/drivers/staging/ralink-gdma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
index eabf1093328e..655df317d0ee 100644
--- a/drivers/staging/ralink-gdma/ralink-gdma.c
+++ b/drivers/staging/ralink-gdma/ralink-gdma.c
@@ -701,9 +701,9 @@ static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
}
-static void gdma_dma_tasklet(unsigned long arg)
+static void gdma_dma_tasklet(struct tasklet_struct *t)
{
- struct gdma_dma_dev *dma_dev = (struct gdma_dma_dev *)arg;
+ struct gdma_dma_dev *dma_dev = from_tasklet(dma_dev, t, task);
struct gdma_dmaengine_chan *chan;
static unsigned int last_chan;
unsigned int i, chan_mask;
@@ -821,7 +821,7 @@ static int gdma_dma_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
dma_dev->base = base;
- tasklet_init(&dma_dev->task, gdma_dma_tasklet, (unsigned long)dma_dev);
+ tasklet_setup(&dma_dev->task, gdma_dma_tasklet);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 41535441f82c..2078d87055bf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -422,7 +422,7 @@ static void update_bmc_sta(struct adapter *padapter)
int i, supportRateNum = 0;
unsigned int tx_ra_bitmap = 0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct wlan_bssid_ex *pcur_network = &pmlmepriv->cur_network.network;
struct sta_info *psta = rtw_get_bcmc_stainfo(padapter);
if (psta) {
@@ -599,7 +599,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
struct registry_priv *pregpriv = &padapter->registrypriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct wlan_bssid_ex *pnetwork = &pmlmepriv->cur_network.network;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
@@ -711,7 +711,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
update_wireless_mode(padapter);
/* update capability after cur_wireless_mode updated */
- update_capinfo(padapter, rtw_get_capability((struct wlan_bssid_ex *)pnetwork));
+ update_capinfo(padapter, rtw_get_capability(pnetwork));
/* let pnetwork_mlmeext == pnetwork_mlme. */
memcpy(pnetwork_mlmeext, pnetwork, pnetwork->Length);
@@ -745,7 +745,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pbss_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct wlan_bssid_ex *pbss_network = &pmlmepriv->cur_network.network;
u8 *ie = pbss_network->ies;
/* SSID */
@@ -982,7 +982,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
HT_info_handler(padapter, (struct ndis_802_11_var_ie *)pHT_info_ie);
}
- pbss_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pbss_network);
+ pbss_network->Length = get_wlan_bssid_ex_sz(pbss_network);
/* issue beacon to start bss network */
start_bss_network(padapter, (u8 *)pbss_network);
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index a97d50081071..a2b88ba242d5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -159,14 +159,16 @@ int rtw_cmd_thread(void *context)
if (padapter->bDriverStopped ||
padapter->bSurpriseRemoved) {
DBG_88E("%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
- __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__);
+ __func__, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved, __LINE__);
break;
}
_next:
if (padapter->bDriverStopped ||
padapter->bSurpriseRemoved) {
DBG_88E("%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
- __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__);
+ __func__, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved, __LINE__);
break;
}
@@ -195,14 +197,18 @@ _next:
if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback;
if (!pcmd_callback) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("mlme_cmd_hdl(): pcmd_callback = 0x%p, cmdcode = 0x%x\n", pcmd_callback, pcmd->cmdcode));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ ("mlme_cmd_hdl(): pcmd_callback = 0x%p, cmdcode = 0x%x\n",
+ pcmd_callback, pcmd->cmdcode));
rtw_free_cmd_obj(pcmd);
} else {
/* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
pcmd_callback(pcmd->padapter, pcmd);/* need consider that free cmd_obj in rtw_cmd_callback */
}
} else {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("%s: cmdcode = 0x%x callback not defined!\n", __func__, pcmd->cmdcode));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("%s: cmdcode = 0x%x callback not defined!\n",
+ __func__, pcmd->cmdcode));
rtw_free_cmd_obj(pcmd);
}
@@ -264,7 +270,8 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
if (ssid[i].ssid_length) {
- memcpy(&psurveyPara->ssid[i], &ssid[i], sizeof(struct ndis_802_11_ssid));
+ memcpy(&psurveyPara->ssid[i], &ssid[i],
+ sizeof(struct ndis_802_11_ssid));
psurveyPara->ssid_num++;
}
}
@@ -276,7 +283,8 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
if (ch[i].hw_value && !(ch[i].flags & RTW_IEEE80211_CHAN_DISABLED)) {
- memcpy(&psurveyPara->ch[i], &ch[i], sizeof(struct rtw_ieee80211_channel));
+ memcpy(&psurveyPara->ch[i], &ch[i],
+ sizeof(struct rtw_ieee80211_channel));
psurveyPara->ch_num++;
}
}
@@ -317,9 +325,11 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
led_control_8188eu(padapter, LED_CTL_START_TO_LINK);
if (pmlmepriv->assoc_ssid.ssid_length == 0)
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.ssid));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.ssid));
else
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.ssid));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.ssid));
pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd) {
@@ -330,7 +340,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
INIT_LIST_HEAD(&pcmd->list);
pcmd->cmdcode = _CreateBss_CMD_;
pcmd->parmbuf = (unsigned char *)pdev_network;
- pcmd->cmdsz = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
+ pcmd->cmdsz = get_wlan_bssid_ex_sz(pdev_network);
pcmd->rsp = NULL;
pcmd->rspsz = 0;
pdev_network->Length = pcmd->cmdsz;
@@ -361,7 +371,8 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
if (pmlmepriv->assoc_ssid.ssid_length == 0)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
else
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.ssid));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_,
+ ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.ssid));
pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC);
if (!pcmd) {
@@ -387,7 +398,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
}
}
- psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
+ psecnetwork = &psecuritypriv->sec_bss;
if (!psecnetwork) {
kfree(pcmd);
@@ -406,7 +417,8 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->ie_length;
if (psecnetwork->ie_length - 12 < 255)
- memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], psecnetwork->ie_length - 12);
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12],
+ psecnetwork->ie_length - 12);
else
memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], 255);
@@ -419,14 +431,19 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
if (!pmlmepriv->assoc_by_bssid)
memcpy(&pmlmepriv->assoc_bssid[0], &pnetwork->network.MacAddress[0], ETH_ALEN);
- psecnetwork->ie_length = rtw_restruct_sec_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0], pnetwork->network.ie_length);
+ psecnetwork->ie_length = rtw_restruct_sec_ie(padapter, &pnetwork->network.ies[0],
+ &psecnetwork->ies[0],
+ pnetwork->network.ie_length);
pqospriv->qos_option = 0;
if (pregistrypriv->wmm_enable) {
u32 tmp_len;
- tmp_len = rtw_restruct_wmm_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0], pnetwork->network.ie_length, psecnetwork->ie_length);
+ tmp_len = rtw_restruct_wmm_ie(padapter, &pnetwork->network.ies[0],
+ &psecnetwork->ies[0],
+ pnetwork->network.ie_length,
+ psecnetwork->ie_length);
if (psecnetwork->ie_length != tmp_len) {
psecnetwork->ie_length = tmp_len;
@@ -448,7 +465,8 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
(padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
(padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
/* rtw_restructure_ht_ie */
- rtw_restructure_ht_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0],
+ rtw_restructure_ht_ie(padapter, &pnetwork->network.ies[0],
+ &psecnetwork->ies[0],
pnetwork->network.ie_length, &psecnetwork->ie_length);
}
}
@@ -573,7 +591,8 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key)
if (unicast_key)
memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16);
else
- memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16);
+ memcpy(&psetstakey_para->key,
+ &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16);
/* jeff: set this because at least sw key is ready */
padapter->securitypriv.busetkipkey = true;
@@ -760,7 +779,8 @@ static void traffic_status_watchdog(struct adapter *padapter)
pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 100) {
bBusyTraffic = true;
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod >
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
bRxBusyTraffic = true;
else
bTxBusyTraffic = true;
@@ -771,7 +791,8 @@ static void traffic_status_watchdog(struct adapter *padapter)
pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 4000) {
bHigherBusyTraffic = true;
- if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod >
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
bHigherBusyRxTraffic = true;
else
bHigherBusyTxTraffic = true;
@@ -1127,7 +1148,8 @@ void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
} else if (pcmd->res != H2C_SUCCESS) {
mod_timer(&pmlmepriv->scan_to_timer,
jiffies + msecs_to_jiffies(1));
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: MgntActrtw_set_802_11_bssid_LIST_SCAN Fail ************\n\n."));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\n ********Error: MgntActrtw_set_802_11_bssid_LIST_SCAN Fail ************\n\n."));
}
/* free cmd */
@@ -1143,7 +1165,8 @@ void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
set_fwstate(pmlmepriv, _FW_LINKED);
spin_unlock_bh(&pmlmepriv->lock);
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ***Error: disconnect_cmd_callback Fail ***\n."));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\n ***Error: disconnect_cmd_callback Fail ***\n."));
return;
}
@@ -1161,7 +1184,8 @@ void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(1));
} else if (pcmd->res != H2C_SUCCESS) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("********Error:rtw_select_and_join_from_scanned_queue Wait Sema Fail ************\n"));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("********Error:rtw_select_and_join_from_scanned_queue Wait Sema Fail ************\n"));
mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(1));
}
@@ -1193,7 +1217,8 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
if (!psta) {
psta = rtw_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress);
if (!psta) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nCan't alloc sta_info when createbss_cmd_callback\n"));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\nCan't alloc sta_info when createbss_cmd_callback\n"));
goto createbss_cmd_fail;
}
}
@@ -1205,7 +1230,8 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
if (!pwlan) {
pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
if (!pwlan) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n Error: can't get pwlan in rtw_joinbss_event_callback\n"));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\n Error: can't get pwlan in rtw_joinbss_event_callback\n"));
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto createbss_cmd_fail;
}
@@ -1242,7 +1268,8 @@ void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pc
struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
if (!psta) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: %s => can't get sta_info\n\n", __func__));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\nERROR: %s => can't get sta_info\n\n", __func__));
goto exit;
}
exit:
@@ -1258,7 +1285,8 @@ void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *
struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
if (!psta) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: %s => can't get sta_info\n\n", __func__));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\nERROR: %s => can't get sta_info\n\n", __func__));
goto exit;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index fcc8bd1011e1..3c0d20cb9c6a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -33,7 +33,7 @@ int proc_set_write_reg(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
char tmp[32];
u32 addr, val, len;
@@ -75,7 +75,7 @@ int proc_get_read_reg(char *page, char **start,
int *eof, void *data)
{
struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
int len = 0;
@@ -135,7 +135,7 @@ int proc_get_adapter_state(char *page, char **start,
int *eof, void *data)
{
struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
int len = 0;
len += scnprintf(page + len, count - len, "bSurpriseRemoved=%d, bDriverStopped=%d\n",
@@ -150,7 +150,7 @@ int proc_get_best_channel(char *page, char **start,
int *eof, void *data)
{
struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
int len = 0;
u32 i, best_channel_24G = 1, index_24G = 0;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index d334dc335914..9d12f92994b3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -1672,8 +1672,8 @@ static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
int i = 0;
do {
- if ((psecuritypriv->PMKIDList[i].bUsed) &&
- (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN)))
+ if ((psecuritypriv->PMKIDList[i].used) &&
+ (!memcmp(psecuritypriv->PMKIDList[i].bssid, bssid, ETH_ALEN)))
break;
} while (++i < NUM_PMKID_CACHE);
@@ -1730,7 +1730,7 @@ int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
(ndisauthmode == Ndis802_11AuthModeWPAPSK))
authmode = _WPA_IE_ID_;
else if ((ndisauthmode == Ndis802_11AuthModeWPA2) ||
- (ndisauthmode == Ndis802_11AuthModeWPA2PSK))
+ (ndisauthmode == Ndis802_11AuthModeWPA2PSK))
authmode = _WPA2_IE_ID_;
else
authmode = 0x0;
@@ -1815,7 +1815,7 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter)
sz = rtw_generate_ie(pregistrypriv);
pdev_network->ie_length = sz;
- pdev_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
+ pdev_network->Length = get_wlan_bssid_ex_sz(pdev_network);
/* notes: translate ie_length & Length after assign the Length to cmdsz in createbss_cmd(); */
/* pdev_network->ie_length = cpu_to_le32(sz); */
@@ -1894,9 +1894,9 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
rtw_hal_get_def_var(padapter, HAL_DEF_MAX_RECVBUF_SZ, &max_recvbuf_sz);
/*
- ampdu_params_info [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
- ampdu_params_info [4:2]:Min MPDU Start Spacing
- */
+ * ampdu_params_info [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ * ampdu_params_info [4:2]:Min MPDU Start Spacing
+ */
rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
ht_cap.ampdu_params_info = max_rx_ampdu_factor & 0x03;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 98b1ba2e489f..b3eddcb83cd0 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -19,9 +19,7 @@
static u8 null_addr[ETH_ALEN] = {};
-/**************************************************
-OUI definitions for the vendor specific IE
-***************************************************/
+/* OUI definitions for the vendor specific IE */
const u8 RTW_WPA_OUI[] = {0x00, 0x50, 0xf2, 0x01};
const u8 WPS_OUI[] = {0x00, 0x50, 0xf2, 0x04};
static const u8 WMM_OUI[] = {0x00, 0x50, 0xf2, 0x02};
@@ -32,17 +30,13 @@ static const u8 WMM_PARA_OUI[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
const u8 WPA_TKIP_CIPHER[4] = {0x00, 0x50, 0xf2, 0x02};
const u8 RSN_TKIP_CIPHER[4] = {0x00, 0x0f, 0xac, 0x02};
-/********************************************************
-MCS rate definitions
-*********************************************************/
+/* MCS rate definitions */
const u8 MCS_rate_1R[16] = {
0xff, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
-/********************************************************
-ChannelPlan definitions
-*********************************************************/
+/* ChannelPlan definitions */
static struct rt_channel_plan_2g RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x00, RT_CHANNEL_DOMAIN_2G_WORLD , Passive scan CH 12, 13 */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x01, RT_CHANNEL_DOMAIN_2G_ETSI1 */
@@ -1777,7 +1771,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
plist = plist->next;
- pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
+ pbss_network = &pnetwork->network;
p = rtw_get_ie(pbss_network->ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->ie_length - _FIXED_IE_LENGTH_);
if (!p || len == 0) { /* non-HT */
@@ -2137,7 +2131,7 @@ static u8 collect_bss_info(struct adapter *padapter,
bssid->Configuration.BeaconPeriod =
get_unaligned_le16(rtw_get_beacon_interval_from_ie(bssid->ies));
- val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
+ val16 = rtw_get_capability(bssid);
if (val16 & BIT(0)) {
bssid->InfrastructureMode = Ndis802_11Infrastructure;
@@ -2183,7 +2177,7 @@ static void start_create_ibss(struct adapter *padapter)
u8 join_type;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
@@ -2192,7 +2186,7 @@ static void start_create_ibss(struct adapter *padapter)
update_wireless_mode(padapter);
/* update capability */
- caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
+ caps = rtw_get_capability(pnetwork);
update_capinfo(padapter, caps);
if (caps & cap_IBSS) {/* adhoc master */
val8 = 0xcf;
@@ -2234,7 +2228,7 @@ static void start_clnt_join(struct adapter *padapter)
u8 val8;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
int beacon_timeout;
pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
@@ -2244,7 +2238,7 @@ static void start_clnt_join(struct adapter *padapter)
update_wireless_mode(padapter);
/* update capability */
- caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
+ caps = rtw_get_capability(pnetwork);
update_capinfo(padapter, caps);
if (caps & cap_ESS) {
Set_MSR(padapter, WIFI_FW_STATION_STATE);
@@ -2599,9 +2593,9 @@ static unsigned int OnBeacon(struct adapter *padapter,
if (psta) {
ret = rtw_check_bcn_info(padapter, pframe, len);
if (!ret) {
- DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
- receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 65535);
- return _SUCCESS;
+ DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 65535);
+ return _SUCCESS;
}
/* update WMM, ERP in the beacon */
/* todo: the timer is used instead of the number of the beacon received */
@@ -2929,7 +2923,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
pstat = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (!pstat) {
- status = _RSON_CLS2_;
+ status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
goto asoc_class2_error;
}
@@ -2943,7 +2937,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
/* check if this stat has been successfully authenticated/assocated */
if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS)) {
if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS)) {
- status = _RSON_CLS2_;
+ status = WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA;
goto asoc_class2_error;
} else {
pstat->state &= (~WIFI_FW_ASSOC_SUCCESS);
@@ -2981,7 +2975,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
status = _STATS_FAILURE_;
}
- if (_STATS_SUCCESSFUL_ != status)
+ if (status != _STATS_SUCCESSFUL_)
goto OnAssocReqFail;
/* check if the supported rate is ok */
@@ -3072,7 +3066,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
wpa_ie_len = 0;
}
- if (_STATS_SUCCESSFUL_ != status)
+ if (status != _STATS_SUCCESSFUL_)
goto OnAssocReqFail;
pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
@@ -3282,7 +3276,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
spin_unlock_bh(&pstapriv->asoc_list_lock);
/* now the station is qualified to join our BSS... */
- if ((pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
+ if ((pstat->state & WIFI_FW_ASSOC_SUCCESS) && (status == _STATS_SUCCESSFUL_)) {
/* 1 bss_cap_update & sta_info_update */
bss_cap_update_on_sta_join(padapter, pstat);
sta_info_update(padapter, pstat);
@@ -3546,12 +3540,12 @@ static unsigned int on_action_spct(struct adapter *padapter,
action = frame_body[1];
switch (action) {
- case RTW_WLAN_ACTION_SPCT_MSR_REQ:
- case RTW_WLAN_ACTION_SPCT_MSR_RPRT:
- case RTW_WLAN_ACTION_SPCT_TPC_REQ:
- case RTW_WLAN_ACTION_SPCT_TPC_RPRT:
+ case WLAN_ACTION_SPCT_MSR_REQ:
+ case WLAN_ACTION_SPCT_MSR_RPRT:
+ case WLAN_ACTION_SPCT_TPC_REQ:
+ case WLAN_ACTION_SPCT_TPC_RPRT:
break;
- case RTW_WLAN_ACTION_SPCT_CHL_SWITCH:
+ case WLAN_ACTION_SPCT_CHL_SWITCH:
break;
default:
break;
@@ -4199,7 +4193,7 @@ void report_survey_event(struct adapter *padapter,
psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
- if (collect_bss_info(padapter, precv_frame, (struct wlan_bssid_ex *)&psurvey_evt->bss) == _FAIL) {
+ if (collect_bss_info(padapter, precv_frame, &psurvey_evt->bss) == _FAIL) {
kfree(pcmd_obj);
kfree(pevtcmd);
return;
@@ -4857,7 +4851,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
struct wlan_bssid_ex *pparm = (struct wlan_bssid_ex *)pbuf;
if (pparm->InfrastructureMode == Ndis802_11APMode) {
@@ -4919,7 +4913,7 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
struct registry_priv *pregpriv = &padapter->registrypriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
struct wlan_bssid_ex *pparm = (struct wlan_bssid_ex *)pbuf;
u32 i;
@@ -5030,7 +5024,7 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
struct disconnect_parm *param = (struct disconnect_parm *)pbuf;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&pmlmeinfo->network);
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
u8 val8;
if (is_client_associated_to_ap(padapter))
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 39ca97411fd5..3848e695ac84 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -84,7 +84,7 @@ static int rtw_hw_resume(struct adapter *padapter)
pwrpriv->bips_processing = true;
rtw_reset_drv_sw(padapter);
- if (ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev)) != _SUCCESS) {
+ if (ips_netdrv_open(rtw_netdev_priv(pnetdev)) != _SUCCESS) {
mutex_unlock(&pwrpriv->mutex_lock);
goto error_exit;
}
@@ -530,11 +530,11 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
}
/*
-* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
-* @adapter: pointer to struct adapter structure
-* @ips_deffer_ms: the ms will prevent from falling into IPS after wakeup
-* Return _SUCCESS or _FAIL
-*/
+ * rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
+ * @adapter: pointer to struct adapter structure
+ * @ips_deffer_ms: the ms will prevent from falling into IPS after wakeup
+ * Return _SUCCESS or _FAIL
+ */
int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *caller)
{
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 78a8ac60bf3d..46ba55a8952a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -142,7 +142,7 @@ void rtw_wep_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
struct sk_buff *skb;
struct lib80211_crypto_ops *crypto_ops;
- if (pxmitframe->buf_addr == NULL)
+ if (!pxmitframe->buf_addr)
return;
if ((pattrib->encrypt != _WEP40_) && (pattrib->encrypt != _WEP104_))
@@ -371,8 +371,6 @@ void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_cod
rtw_secgetmic(&micdata, mic_code);
}
-
-
/* macros for extraction/creation of unsigned char/unsigned short values */
#define RotR1(v16) ((((v16) >> 1) & 0x7FFF) ^ (((v16) & 1) << 15))
#define Lo8(v16) ((u8)((v16) & 0x00FF))
@@ -591,7 +589,7 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
u32 res = _SUCCESS;
- if (pxmitframe->buf_addr == NULL)
+ if (!pxmitframe->buf_addr)
return _FAIL;
hw_hdr_offset = TXDESC_SIZE +
@@ -604,7 +602,7 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
else
stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
- if (stainfo != NULL) {
+ if (stainfo) {
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
if (is_multicast_ether_addr(pattrib->ra))
@@ -662,7 +660,6 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
u8 crc[4];
struct arc4context mycontext;
int length;
-
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
struct sta_info *stainfo;
@@ -670,7 +667,6 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
struct security_priv *psecuritypriv = &padapter->securitypriv;
u32 res = _SUCCESS;
-
pframe = (unsigned char *)precvframe->pkt->data;
/* 4 start to decrypt recvframe */
@@ -726,553 +722,106 @@ exit:
return res;
}
-/* 3 ===== AES related ===== */
-
-
-#define MAX_MSG_SIZE 2048
-/*****************************/
-/******** SBOX Table *********/
-/*****************************/
-
-static const u8 sbox_table[256] = {
- 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
- 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
- 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
- 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
- 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
- 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
- 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
- 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
- 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
- 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
- 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
- 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
- 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
- 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
- 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
- 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
- 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
- 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
- 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
- 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
- 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
- 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
- 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
- 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
- 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
- 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
- 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
- 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
- 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
- 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
- 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
- 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-};
-
-/*****************************/
-/**** Function Prototypes ****/
-/*****************************/
-
-static void bitwise_xor(u8 *ina, u8 *inb, u8 *out);
-static void construct_mic_iv(u8 *mic_header1, int qc_exists, int a4_exists, u8 *mpdu, uint payload_length, u8 *pn_vector);
-static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu);
-static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists, int qc_exists);
-static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c);
-static void xor_128(u8 *a, u8 *b, u8 *out);
-static void xor_32(u8 *a, u8 *b, u8 *out);
-static u8 sbox(u8 a);
-static void next_key(u8 *key, int round);
-static void byte_sub(u8 *in, u8 *out);
-static void shift_row(u8 *in, u8 *out);
-static void mix_column(u8 *in, u8 *out);
-static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext);
-
-/****************************************/
-/* aes128k128d() */
-/* Performs a 128 bit AES encrypt with */
-/* 128 bit data. */
-/****************************************/
-static void xor_128(u8 *a, u8 *b, u8 *out)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- out[i] = a[i] ^ b[i];
-}
-
-static void xor_32(u8 *a, u8 *b, u8 *out)
-{
- int i;
-
- for (i = 0; i < 4; i++)
- out[i] = a[i] ^ b[i];
-}
-
-static u8 sbox(u8 a)
+u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
- return sbox_table[(int)a];
-}
-
-static void next_key(u8 *key, int round)
-{
- u8 rcon;
- u8 sbox_key[4];
- static const u8 rcon_table[12] = {
- 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
- 0x1b, 0x36, 0x36, 0x36
- };
-
- sbox_key[0] = sbox(key[13]);
- sbox_key[1] = sbox(key[14]);
- sbox_key[2] = sbox(key[15]);
- sbox_key[3] = sbox(key[12]);
-
- rcon = rcon_table[round];
-
- xor_32(&key[0], sbox_key, &key[0]);
- key[0] = key[0] ^ rcon;
-
- xor_32(&key[4], &key[0], &key[4]);
- xor_32(&key[8], &key[4], &key[8]);
- xor_32(&key[12], &key[8], &key[12]);
-}
-
-static void byte_sub(u8 *in, u8 *out)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- out[i] = sbox(in[i]);
-}
-
-static void shift_row(u8 *in, u8 *out)
-{
- out[0] = in[0];
- out[1] = in[5];
- out[2] = in[10];
- out[3] = in[15];
- out[4] = in[4];
- out[5] = in[9];
- out[6] = in[14];
- out[7] = in[3];
- out[8] = in[8];
- out[9] = in[13];
- out[10] = in[2];
- out[11] = in[7];
- out[12] = in[12];
- out[13] = in[1];
- out[14] = in[6];
- out[15] = in[11];
-}
-
-static void mix_column(u8 *in, u8 *out)
-{
- int i;
- u8 add1b[4];
- u8 add1bf7[4];
- u8 rotl[4];
- u8 swap_halves[4];
- u8 andf7[4];
- u8 rotr[4];
- u8 temp[4];
- u8 tempb[4];
-
- for (i = 0 ; i < 4; i++) {
- if ((in[i] & 0x80) == 0x80)
- add1b[i] = 0x1b;
- else
- add1b[i] = 0x00;
- }
-
- swap_halves[0] = in[2]; /* Swap halves */
- swap_halves[1] = in[3];
- swap_halves[2] = in[0];
- swap_halves[3] = in[1];
-
- rotl[0] = in[3]; /* Rotate left 8 bits */
- rotl[1] = in[0];
- rotl[2] = in[1];
- rotl[3] = in[2];
-
- andf7[0] = in[0] & 0x7f;
- andf7[1] = in[1] & 0x7f;
- andf7[2] = in[2] & 0x7f;
- andf7[3] = in[3] & 0x7f;
-
- for (i = 3; i > 0; i--) { /* logical shift left 1 bit */
- andf7[i] = andf7[i] << 1;
- if ((andf7[i - 1] & 0x80) == 0x80)
- andf7[i] = (andf7[i] | 0x01);
- }
- andf7[0] = andf7[0] << 1;
- andf7[0] = andf7[0] & 0xfe;
-
- xor_32(add1b, andf7, add1bf7);
-
- xor_32(in, add1bf7, rotr);
-
- temp[0] = rotr[0]; /* Rotate right 8 bits */
- rotr[0] = rotr[1];
- rotr[1] = rotr[2];
- rotr[2] = rotr[3];
- rotr[3] = temp[0];
-
- xor_32(add1bf7, rotr, temp);
- xor_32(swap_halves, rotl, tempb);
- xor_32(temp, tempb, out);
-}
-
-static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
-{
- int round;
- int i;
- u8 intermediatea[16];
- u8 intermediateb[16];
- u8 round_key[16];
-
- for (i = 0; i < 16; i++)
- round_key[i] = key[i];
- for (round = 0; round < 11; round++) {
- if (round == 0) {
- xor_128(round_key, data, ciphertext);
- next_key(round_key, round);
- } else if (round == 10) {
- byte_sub(ciphertext, intermediatea);
- shift_row(intermediatea, intermediateb);
- xor_128(intermediateb, round_key, ciphertext);
- } else { /* 1 - 9 */
- byte_sub(ciphertext, intermediatea);
- shift_row(intermediatea, intermediateb);
- mix_column(&intermediateb[0], &intermediatea[0]);
- mix_column(&intermediateb[4], &intermediatea[4]);
- mix_column(&intermediateb[8], &intermediatea[8]);
- mix_column(&intermediateb[12], &intermediatea[12]);
- xor_128(intermediatea, round_key, ciphertext);
- next_key(round_key, round);
- }
- }
-}
-
-/************************************************/
-/* construct_mic_iv() */
-/* Builds the MIC IV from header fields and PN */
-/************************************************/
-static void construct_mic_iv(u8 *mic_iv, int qc_exists, int a4_exists, u8 *mpdu,
- uint payload_length, u8 *pn_vector)
-{
- int i;
-
- mic_iv[0] = 0x59;
- if (qc_exists && a4_exists)
- mic_iv[1] = mpdu[30] & 0x0f; /* QoS_TC */
- if (qc_exists && !a4_exists)
- mic_iv[1] = mpdu[24] & 0x0f; /* mute bits 7-4 */
- if (!qc_exists)
- mic_iv[1] = 0x00;
- for (i = 2; i < 8; i++)
- mic_iv[i] = mpdu[i + 8]; /* mic_iv[2:7] = A2[0:5] = mpdu[10:15] */
- for (i = 8; i < 14; i++)
- mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
- mic_iv[14] = (unsigned char)(payload_length / 256);
- mic_iv[15] = (unsigned char)(payload_length % 256);
-}
-
-/************************************************/
-/* construct_mic_header1() */
-/* Builds the first MIC header block from */
-/* header fields. */
-/************************************************/
-static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu)
-{
- mic_header1[0] = (u8)((header_length - 2) / 256);
- mic_header1[1] = (u8)((header_length - 2) % 256);
- mic_header1[2] = mpdu[0] & 0xcf; /* Mute CF poll & CF ack bits */
- mic_header1[3] = mpdu[1] & 0xc7; /* Mute retry, more data and pwr mgt bits */
- mic_header1[4] = mpdu[4]; /* A1 */
- mic_header1[5] = mpdu[5];
- mic_header1[6] = mpdu[6];
- mic_header1[7] = mpdu[7];
- mic_header1[8] = mpdu[8];
- mic_header1[9] = mpdu[9];
- mic_header1[10] = mpdu[10]; /* A2 */
- mic_header1[11] = mpdu[11];
- mic_header1[12] = mpdu[12];
- mic_header1[13] = mpdu[13];
- mic_header1[14] = mpdu[14];
- mic_header1[15] = mpdu[15];
-}
-
-/************************************************/
-/* construct_mic_header2() */
-/* Builds the last MIC header block from */
-/* header fields. */
-/************************************************/
-static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists, int qc_exists)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- mic_header2[i] = 0x00;
+ int curfragnum, length;
+ u8 *pframe; /* *payload,*iv */
+ u8 hw_hdr_offset = 0;
+ struct sta_info *stainfo;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u32 res = _SUCCESS;
+ void *crypto_private;
+ struct sk_buff *skb;
+ struct lib80211_crypto_ops *crypto_ops;
+ const int key_idx = is_multicast_ether_addr(pattrib->ra) ? psecuritypriv->dot118021XGrpKeyid : 0;
+ const int key_length = 16;
+ u8 *key;
- mic_header2[0] = mpdu[16]; /* A3 */
- mic_header2[1] = mpdu[17];
- mic_header2[2] = mpdu[18];
- mic_header2[3] = mpdu[19];
- mic_header2[4] = mpdu[20];
- mic_header2[5] = mpdu[21];
+ if (!pxmitframe->buf_addr)
+ return _FAIL;
- mic_header2[6] = 0x00;
- mic_header2[7] = 0x00; /* mpdu[23]; */
+ hw_hdr_offset = TXDESC_SIZE +
+ (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
- if (!qc_exists && a4_exists) {
- for (i = 0; i < 6; i++)
- mic_header2[8 + i] = mpdu[24 + i]; /* A4 */
- }
+ pframe = pxmitframe->buf_addr + hw_hdr_offset;
- if (qc_exists && !a4_exists) {
- mic_header2[8] = mpdu[24] & 0x0f; /* mute bits 15 - 4 */
- mic_header2[9] = mpdu[25] & 0x00;
- }
+ /* 4 start to encrypt each fragment */
+ if (pattrib->encrypt != _AES_)
+ return res;
- if (qc_exists && a4_exists) {
- for (i = 0; i < 6; i++)
- mic_header2[8 + i] = mpdu[24 + i]; /* A4 */
+ if (pattrib->psta)
+ stainfo = pattrib->psta;
+ else
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
- mic_header2[14] = mpdu[30] & 0x0f;
- mic_header2[15] = mpdu[31] & 0x00;
+ if (!stainfo) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo==NULL!!!\n", __func__));
+ return _FAIL;
}
-}
-/************************************************/
-/* construct_mic_header2() */
-/* Builds the last MIC header block from */
-/* header fields. */
-/************************************************/
-static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- ctr_preload[i] = 0x00;
- i = 0;
-
- ctr_preload[0] = 0x01; /* flag */
- if (qc_exists && a4_exists)
- ctr_preload[1] = mpdu[30] & 0x0f; /* QoC_Control */
- if (qc_exists && !a4_exists)
- ctr_preload[1] = mpdu[24] & 0x0f;
-
- for (i = 2; i < 8; i++)
- ctr_preload[i] = mpdu[i + 8]; /* ctr_preload[2:7] = A2[0:5] = mpdu[10:15] */
- for (i = 8; i < 14; i++)
- ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
- ctr_preload[14] = (unsigned char)(c / 256); /* Ctr */
- ctr_preload[15] = (unsigned char)(c % 256);
-}
-
-/************************************/
-/* bitwise_xor() */
-/* A 128 bit, bitwise exclusive or */
-/************************************/
-static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
-{
- int i;
-
- for (i = 0; i < 16; i++)
- out[i] = ina[i] ^ inb[i];
-}
+ crypto_ops = lib80211_get_crypto_ops("CCMP");
-static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
-{
- uint qc_exists, a4_exists, i, j, payload_remainder,
- num_blocks, payload_index;
-
- u8 pn_vector[6];
- u8 mic_iv[16];
- u8 mic_header1[16];
- u8 mic_header2[16];
- u8 ctr_preload[16];
-
- /* Intermediate Buffers */
- u8 chain_buffer[16];
- u8 aes_out[16];
- u8 padded_buffer[16];
- u8 mic[8];
- uint frtype = GetFrameType(pframe);
- uint frsubtype = GetFrameSubType(pframe);
-
- frsubtype >>= 4;
-
- memset(mic_iv, 0, 16);
- memset(mic_header1, 0, 16);
- memset(mic_header2, 0, 16);
- memset(ctr_preload, 0, 16);
- memset(chain_buffer, 0, 16);
- memset(aes_out, 0, 16);
- memset(padded_buffer, 0, 16);
-
- if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
- a4_exists = 0;
+ if (is_multicast_ether_addr(pattrib->ra))
+ key = psecuritypriv->dot118021XGrpKey[key_idx].skey;
else
- a4_exists = 1;
-
- if ((frtype == WIFI_DATA_CFACK) || (frtype == WIFI_DATA_CFPOLL) || (frtype == WIFI_DATA_CFACKPOLL)) {
- qc_exists = 1;
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
- hdrlen += 2;
- } else if ((frsubtype == 0x08) || (frsubtype == 0x09) || (frsubtype == 0x0a) || (frsubtype == 0x0b)) {
- if (hdrlen != WLAN_HDR_A3_QOS_LEN)
- hdrlen += 2;
- qc_exists = 1;
- } else {
- qc_exists = 0;
- }
-
- pn_vector[0] = pframe[hdrlen];
- pn_vector[1] = pframe[hdrlen + 1];
- pn_vector[2] = pframe[hdrlen + 4];
- pn_vector[3] = pframe[hdrlen + 5];
- pn_vector[4] = pframe[hdrlen + 6];
- pn_vector[5] = pframe[hdrlen + 7];
-
- construct_mic_iv(mic_iv, qc_exists, a4_exists, pframe, plen, pn_vector);
+ key = stainfo->dot118021x_UncstKey.skey;
- construct_mic_header1(mic_header1, hdrlen, pframe);
- construct_mic_header2(mic_header2, pframe, a4_exists, qc_exists);
-
- payload_remainder = plen % 16;
- num_blocks = plen / 16;
-
- /* Find start of payload */
- payload_index = hdrlen + 8;
-
- /* Calculate MIC */
- aes128k128d(key, mic_iv, aes_out);
- bitwise_xor(aes_out, mic_header1, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
- bitwise_xor(aes_out, mic_header2, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
-
- for (i = 0; i < num_blocks; i++) {
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
-
- payload_index += 16;
- aes128k128d(key, chain_buffer, aes_out);
+ if (!crypto_ops) {
+ res = _FAIL;
+ goto exit;
}
- /* Add on the final payload block if it needs padding */
- if (payload_remainder > 0) {
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = pframe[payload_index++];/* padded_buffer[j] = message[payload_index++]; */
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- aes128k128d(key, chain_buffer, aes_out);
+ crypto_private = crypto_ops->init(key_idx);
+ if (!crypto_private) {
+ res = _FAIL;
+ goto exit;
}
- for (j = 0; j < 8; j++)
- mic[j] = aes_out[j];
-
- /* Insert MIC into payload */
- for (j = 0; j < 8; j++)
- pframe[payload_index + j] = mic[j];
-
- payload_index = hdrlen + 8;
- for (i = 0; i < num_blocks; i++) {
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, i + 1);
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
- for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];
+ if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
+ res = _FAIL;
+ goto exit_crypto_ops_deinit;
}
- if (payload_remainder > 0) { /* If there is a short final block, then pad it,*/
- /* encrypt it and copy the unpadded part back */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, num_blocks + 1);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = pframe[payload_index + j];
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < payload_remainder; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
- /* Encrypt the MIC */
- construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, 0);
-
- for (j = 0; j < 16; j++)
- padded_buffer[j] = 0x00;
- for (j = 0; j < 8; j++)
- padded_buffer[j] = pframe[j + hdrlen + 8 + plen];
-
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, padded_buffer, chain_buffer);
- for (j = 0; j < 8; j++)
- pframe[payload_index++] = chain_buffer[j];
- return _SUCCESS;
-}
-
-u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
-{ /* exclude ICV */
-
- /*static*/
-/* unsigned char message[MAX_MSG_SIZE]; */
-
- /* Intermediate Buffers */
- int curfragnum, length;
- u8 *pframe, *prwskey; /* *payload,*iv */
- u8 hw_hdr_offset = 0;
- struct sta_info *stainfo;
- struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
-/* uint offset = 0; */
- u32 res = _SUCCESS;
-
- if (pxmitframe->buf_addr == NULL)
- return _FAIL;
-
- hw_hdr_offset = TXDESC_SIZE +
- (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
-
- pframe = pxmitframe->buf_addr + hw_hdr_offset;
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
- /* 4 start to encrypt each fragment */
- if (pattrib->encrypt == _AES_) {
- if (pattrib->psta)
- stainfo = pattrib->psta;
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ if (curfragnum + 1 == pattrib->nr_frags)
+ length = pattrib->last_txcmdsz;
else
- stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
+ length = pxmitpriv->frag_len;
- if (stainfo) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ res = _FAIL;
+ goto exit_crypto_ops_deinit;
+ }
- if (is_multicast_ether_addr(pattrib->ra))
- prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
- else
- prwskey = &stainfo->dot118021x_UncstKey.skey[0];
- for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- if ((curfragnum + 1) == pattrib->nr_frags) { /* 4 the last fragment */
- length = pattrib->last_txcmdsz - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
+ skb_put_data(skb, pframe, length);
- aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
- } else {
- length = pxmitpriv->frag_len - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
+ memmove(skb->data + pattrib->iv_len, skb->data, pattrib->hdrlen);
+ skb_pull(skb, pattrib->iv_len);
+ skb_trim(skb, skb->len - pattrib->icv_len);
- aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
- pframe += pxmitpriv->frag_len;
- pframe = (u8 *)round_up((size_t)(pframe), 8);
- }
- }
- } else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo==NULL!!!\n", __func__));
+ if (crypto_ops->encrypt_mpdu(skb, pattrib->hdrlen, crypto_private)) {
+ kfree_skb(skb);
res = _FAIL;
+ goto exit_crypto_ops_deinit;
}
+
+ memcpy(pframe, skb->data, skb->len);
+
+ pframe += skb->len;
+ pframe = (u8 *)round_up((size_t)(pframe), 8);
+
+ kfree_skb(skb);
}
+exit_crypto_ops_deinit:
+ crypto_ops->deinit(crypto_private);
+
+exit:
return res;
}
@@ -1285,7 +834,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
if (prxattrib->encrypt == _AES_) {
struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
- if (stainfo != NULL) {
+ if (stainfo) {
int key_idx;
const int key_length = 16, iv_len = 8, icv_len = 8;
struct sk_buff *skb = precvframe->pkt;
@@ -1349,190 +898,3 @@ exit_lib80211_ccmp:
exit:
return res;
}
-
-/* AES tables*/
-const u32 Te0[256] = {
- 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
- 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
- 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
- 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
- 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
- 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
- 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
- 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
- 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
- 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
- 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
- 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
- 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
- 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
- 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
- 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
- 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
- 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
- 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
- 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
- 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
- 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
- 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
- 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
- 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
- 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
- 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
- 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
- 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
- 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
- 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
- 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
- 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
- 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
- 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
- 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
- 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
- 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
- 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
- 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
- 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
- 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
- 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
- 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
- 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
- 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
- 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
- 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
- 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
- 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
- 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
- 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
- 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
- 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
- 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
- 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
- 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
- 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
- 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
- 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
- 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
- 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
- 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
- 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
-};
-
-const u32 Td0[256] = {
- 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
- 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
- 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
- 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
- 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
- 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
- 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
- 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
- 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
- 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
- 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
- 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
- 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
- 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
- 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
- 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
- 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
- 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
- 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
- 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
- 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
- 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
- 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
- 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
- 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
- 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
- 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
- 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
- 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
- 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
- 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
- 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
- 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
- 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
- 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
- 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
- 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
- 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
- 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
- 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
- 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
- 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
- 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
- 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
- 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
- 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
- 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
- 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
- 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
- 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
- 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
- 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
- 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
- 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
- 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
- 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
- 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
- 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
- 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
- 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
- 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
- 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
- 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
- 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
-};
-
-const u8 Td4s[256] = {
- 0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
- 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
- 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
- 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
- 0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
- 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
- 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
- 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
- 0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
- 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
- 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
- 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
- 0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
- 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
- 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
- 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
- 0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
- 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
- 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
- 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
- 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
- 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
- 0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
- 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
- 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
- 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
- 0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
- 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
- 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
- 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
- 0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
- 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU,
-};
-const u8 rcons[] = {
- 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36
- /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
-};
-
-/**
- * Expand the cipher key into the encryption key schedule.
- *
- * @return the number of rounds for the given cipher key size.
- */
-#define ROUND(i, d, s) \
-do { \
- d##0 = TE0(s##0) ^ TE1(s##1) ^ TE2(s##2) ^ TE3(s##3) ^ rk[4 * i]; \
- d##1 = TE0(s##1) ^ TE1(s##2) ^ TE2(s##3) ^ TE3(s##0) ^ rk[4 * i + 1]; \
- d##2 = TE0(s##2) ^ TE1(s##3) ^ TE2(s##0) ^ TE3(s##1) ^ rk[4 * i + 2]; \
- d##3 = TE0(s##3) ^ TE1(s##0) ^ TE2(s##1) ^ TE3(s##2) ^ rk[4 * i + 3]; \
-} while (0)
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index be843fd2461a..26f128836a5e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -53,32 +53,6 @@ static const u8 rtw_basic_rate_mix[7] = {
IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK
};
-bool cckrates_included(unsigned char *rate, int ratelen)
-{
- int i;
-
- for (i = 0; i < ratelen; i++) {
- u8 r = rate[i] & 0x7f;
-
- if (r == 2 || r == 4 || r == 11 || r == 22)
- return true;
- }
- return false;
-}
-
-bool cckratesonly_included(unsigned char *rate, int ratelen)
-{
- int i;
-
- for (i = 0; i < ratelen; i++) {
- u8 r = rate[i] & 0x7f;
-
- if (r != 2 && r != 4 && r != 11 && r != 22)
- return false;
- }
- return true;
-}
-
unsigned char networktype_to_raid(unsigned char network_type)
{
switch (network_type) {
@@ -102,7 +76,7 @@ unsigned char networktype_to_raid(unsigned char network_type)
}
}
-u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int ratelen)
+u8 judge_network_type(struct adapter *padapter, unsigned char *rate)
{
u8 network_type = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -111,9 +85,9 @@ u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int ratelen
if (pmlmeinfo->HT_enable)
network_type = WIRELESS_11_24N;
- if (cckratesonly_included(rate, ratelen))
+ if (rtw_is_cckratesonly_included(rate))
network_type |= WIRELESS_11B;
- else if (cckrates_included(rate, ratelen))
+ else if (rtw_is_cckrates_included(rate))
network_type |= WIRELESS_11BG;
else
network_type |= WIRELESS_11G;
@@ -869,42 +843,42 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
/* parsing HT_INFO_IE */
p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
if (p && len > 0) {
- pht_info = (struct HT_info_element *)(p + 2);
- ht_info_infos_0 = pht_info->infos[0];
+ pht_info = (struct HT_info_element *)(p + 2);
+ ht_info_infos_0 = pht_info->infos[0];
} else {
- ht_info_infos_0 = 0;
+ ht_info_infos_0 = 0;
}
if (ht_cap_info != cur_network->BcnInfo.ht_cap_info ||
((ht_info_infos_0 & 0x03) != (cur_network->BcnInfo.ht_info_infos_0 & 0x03))) {
- DBG_88E("%s bcn now: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
- ht_cap_info, ht_info_infos_0);
- DBG_88E("%s bcn link: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
- cur_network->BcnInfo.ht_cap_info, cur_network->BcnInfo.ht_info_infos_0);
- DBG_88E("%s bw mode change, disconnect\n", __func__);
- /* bcn_info_update */
- cur_network->BcnInfo.ht_cap_info = ht_cap_info;
- cur_network->BcnInfo.ht_info_infos_0 = ht_info_infos_0;
- /* to do : need to check that whether modify related register of BB or not */
- /* goto _mismatch; */
+ DBG_88E("%s bcn now: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
+ ht_cap_info, ht_info_infos_0);
+ DBG_88E("%s bcn link: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
+ cur_network->BcnInfo.ht_cap_info, cur_network->BcnInfo.ht_info_infos_0);
+ DBG_88E("%s bw mode change, disconnect\n", __func__);
+ /* bcn_info_update */
+ cur_network->BcnInfo.ht_cap_info = ht_cap_info;
+ cur_network->BcnInfo.ht_info_infos_0 = ht_info_infos_0;
+ /* to do : need to check that whether modify related register of BB or not */
+ /* goto _mismatch; */
}
/* Checking for channel */
p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _DSSET_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
if (p) {
- bcn_channel = *(p + 2);
+ bcn_channel = *(p + 2);
} else {/* In 5G, some ap do not have DSSET IE checking HT info for channel */
- p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
- if (pht_info) {
- bcn_channel = pht_info->primary_channel;
- } else { /* we don't find channel IE, so don't check it */
- DBG_88E("Oops: %s we don't find channel IE, so don't check it\n", __func__);
- bcn_channel = Adapter->mlmeextpriv.cur_channel;
- }
+ p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->ie_length - _FIXED_IE_LENGTH_);
+ if (pht_info) {
+ bcn_channel = pht_info->primary_channel;
+ } else { /* we don't find channel IE, so don't check it */
+ DBG_88E("Oops: %s we don't find channel IE, so don't check it\n", __func__);
+ bcn_channel = Adapter->mlmeextpriv.cur_channel;
+ }
}
if (bcn_channel != Adapter->mlmeextpriv.cur_channel) {
- DBG_88E("%s beacon channel:%d cur channel:%d disconnect\n", __func__,
- bcn_channel, Adapter->mlmeextpriv.cur_channel);
- goto _mismatch;
+ DBG_88E("%s beacon channel:%d cur channel:%d disconnect\n", __func__,
+ bcn_channel, Adapter->mlmeextpriv.cur_channel);
+ goto _mismatch;
}
/* checking SSID */
@@ -932,7 +906,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
/* check encryption info */
- val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
+ val16 = rtw_get_capability(bssid);
if (val16 & BIT(4))
bssid->Privacy = 1;
@@ -1043,7 +1017,7 @@ unsigned int is_ap_in_tkip(struct adapter *padapter)
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
+ if (rtw_get_capability(cur_network) & WLAN_CAPABILITY_PRIVACY) {
for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.ie_length;) {
pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.ies + i);
@@ -1347,24 +1321,22 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap)
void update_wireless_mode(struct adapter *padapter)
{
- int ratelen, network_type = 0;
+ int network_type = 0;
u32 SIFS_Timer;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
unsigned char *rate = cur_network->SupportedRates;
- ratelen = rtw_get_rateset_len(cur_network->SupportedRates);
-
if (pmlmeinfo->HT_info_enable && pmlmeinfo->HT_caps_enable)
pmlmeinfo->HT_enable = 1;
if (pmlmeinfo->HT_enable)
network_type = WIRELESS_11_24N;
- if (cckratesonly_included(rate, ratelen))
+ if (rtw_is_cckratesonly_included(rate))
network_type |= WIRELESS_11B;
- else if (cckrates_included(rate, ratelen))
+ else if (rtw_is_cckrates_included(rate))
network_type |= WIRELESS_11BG;
else
network_type |= WIRELESS_11G;
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index b8fecc952cfc..9585dffc63a3 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -23,7 +23,7 @@ uint rtw_hal_init(struct adapter *adapt)
rtw_hal_notch_filter(adapt, 1);
} else {
adapt->hw_init_completed = false;
- DBG_88E("rtw_hal_init: hal__init fail\n");
+ DBG_88E("%s: hal__init fail\n", __func__);
}
RT_TRACE(_module_hal_init_c_, _drv_err_,
@@ -41,7 +41,7 @@ uint rtw_hal_deinit(struct adapter *adapt)
if (status == _SUCCESS)
adapt->hw_init_completed = false;
else
- DBG_88E("\n rtw_hal_deinit: hal_init fail\n");
+ DBG_88E("\n %s: hal_init fail\n", __func__);
return status;
}
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 28974808839d..4d659a812aed 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -249,7 +249,7 @@ void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm)
{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoInit_Debug==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("%s==>\n", __func__));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportPlatform=%d\n", pDM_Odm->SupportPlatform));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportAbility=0x%x\n", pDM_Odm->SupportAbility));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportInterface=%d\n", pDM_Odm->SupportInterface));
@@ -267,7 +267,7 @@ void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm)
void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm)
{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoHook_Debug==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("%s==>\n", __func__));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumTxBytesUnicast=%llu\n", *pDM_Odm->pNumTxBytesUnicast));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumRxBytesUnicast=%llu\n", *pDM_Odm->pNumRxBytesUnicast));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pWirelessMode=0x%x\n", *pDM_Odm->pWirelessMode));
@@ -282,7 +282,7 @@ void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm)
void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm)
{
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoUpdate_Debug==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("%s==>\n", __func__));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Direct=%d\n", pDM_Odm->bWIFI_Direct));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Display=%d\n", pDM_Odm->bWIFI_Display));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bLinked=%d\n", pDM_Odm->bLinked));
@@ -339,21 +339,21 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
u8 dm_dig_max, dm_dig_min;
u8 CurrentIGI = pDM_DigTable->CurIGValue;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG()==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s()==>\n", __func__));
if ((!(pDM_Odm->SupportAbility & ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))) {
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG() Return: SupportAbility ODM_BB_DIG or ODM_BB_FA_CNT is disabled\n"));
+ ("%s() Return: SupportAbility ODM_BB_DIG or ODM_BB_FA_CNT is disabled\n", __func__));
return;
}
if (*pDM_Odm->pbScanInProcess) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() Return: In Scan Progress\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s() Return: In Scan Progress\n", __func__));
return;
}
/* add by Neil Chen to avoid PSD is processing */
if (!pDM_Odm->bDMInitialGainEnable) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() Return: PSD is Processing\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s() Return: PSD is Processing\n", __func__));
return;
}
@@ -383,18 +383,18 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
else
DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG() : bOneEntryOnly=true, DIG_Dynamic_MIN=0x%x\n",
- DIG_Dynamic_MIN));
+ ("%s() : bOneEntryOnly=true, DIG_Dynamic_MIN=0x%x\n",
+ __func__, DIG_Dynamic_MIN));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG() : pDM_Odm->RSSI_Min=%d\n",
- pDM_Odm->RSSI_Min));
+ ("%s() : pDM_Odm->RSSI_Min=%d\n",
+ __func__, pDM_Odm->RSSI_Min));
} else if (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV) {
/* 1 Lower Bound for 88E AntDiv */
if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) {
DIG_Dynamic_MIN = (u8)pDM_DigTable->AntDiv_RSSI_max;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
- ("odm_DIG(): pDM_DigTable->AntDiv_RSSI_max=%d\n",
- pDM_DigTable->AntDiv_RSSI_max));
+ ("%s(): pDM_DigTable->AntDiv_RSSI_max=%d\n",
+ __func__, pDM_DigTable->AntDiv_RSSI_max));
}
} else {
DIG_Dynamic_MIN = dm_dig_min;
@@ -402,7 +402,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
} else {
pDM_DigTable->rx_gain_range_max = dm_dig_max;
DIG_Dynamic_MIN = dm_dig_min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() : No Link\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s() : No Link\n", __func__));
}
/* 1 Modify DIG lower bound, deal with abnormally large false alarm */
@@ -433,11 +433,11 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
if ((pDM_DigTable->ForbiddenIGI - 1) < DIG_Dynamic_MIN) { /* DM_DIG_MIN) */
pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
pDM_DigTable->rx_gain_range_min = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: At Lower Bound\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): Normal Case: At Lower Bound\n", __func__));
} else {
pDM_DigTable->ForbiddenIGI--;
pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: Approach Lower Bound\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): Normal Case: Approach Lower Bound\n", __func__));
}
} else {
pDM_DigTable->LargeFAHit = 0;
@@ -445,12 +445,12 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
}
}
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG(): pDM_DigTable->LargeFAHit=%d\n",
- pDM_DigTable->LargeFAHit));
+ ("%s(): pDM_DigTable->LargeFAHit=%d\n",
+ __func__, pDM_DigTable->LargeFAHit));
/* 1 Adjust initial gain by false alarm */
if (pDM_Odm->bLinked) {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): DIG AfterLink\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): DIG AfterLink\n", __func__));
if (FirstConnect) {
CurrentIGI = pDM_Odm->RSSI_Min;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("DIG: First Connect\n"));
@@ -463,10 +463,10 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
}
} else {
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): DIG BeforeLink\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): DIG BeforeLink\n", __func__));
if (FirstDisConnect) {
CurrentIGI = pDM_DigTable->rx_gain_range_min;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): First DisConnect\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): First DisConnect\n", __func__));
} else {
/* 2012.03.30 LukeLee: enable DIG before link but with very high thresholds */
if (pFalseAlmCnt->Cnt_all > 10000)
@@ -475,10 +475,10 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
CurrentIGI = CurrentIGI + 1;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
else if (pFalseAlmCnt->Cnt_all < 500)
CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): England DIG\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): England DIG\n", __func__));
}
}
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): DIG End Adjust IGI\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): DIG End Adjust IGI\n", __func__));
/* 1 Check initial gain by upper/lower bound */
if (CurrentIGI > pDM_DigTable->rx_gain_range_max)
CurrentIGI = pDM_DigTable->rx_gain_range_max;
@@ -486,10 +486,10 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
CurrentIGI = pDM_DigTable->rx_gain_range_min;
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
- ("odm_DIG(): rx_gain_range_max=0x%x, rx_gain_range_min=0x%x\n",
- pDM_DigTable->rx_gain_range_max, pDM_DigTable->rx_gain_range_min));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): TotalFA=%d\n", pFalseAlmCnt->Cnt_all));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): CurIGValue=0x%x\n", CurrentIGI));
+ ("%s(): rx_gain_range_max=0x%x, rx_gain_range_min=0x%x\n",
+ __func__, pDM_DigTable->rx_gain_range_max, pDM_DigTable->rx_gain_range_min));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): TotalFA=%d\n", __func__, pFalseAlmCnt->Cnt_all));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("%s(): CurIGValue=0x%x\n", __func__, CurrentIGI));
/* 2 High power RSSI threshold */
@@ -557,7 +557,7 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Enter odm_FalseAlarmCounterStatistics\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Enter %s\n", __func__));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
("Cnt_Fast_Fsync=%d, Cnt_SB_Search_fail=%d\n",
FalseAlmCnt->Cnt_Fast_Fsync, FalseAlmCnt->Cnt_SB_Search_fail));
@@ -829,9 +829,9 @@ bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate
}
/* Decide RATRState by RSSI. */
- if (RSSI > HighRSSIThreshForRA)
+ if (HighRSSIThreshForRA < RSSI)
RATRState = DM_RATR_STA_HIGH;
- else if (RSSI > LowRSSIThreshForRA)
+ else if (LowRSSIThreshForRA < RSSI)
RATRState = DM_RATR_STA_MIDDLE;
else
RATRState = DM_RATR_STA_LOW;
@@ -969,7 +969,6 @@ void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
rtl88eu_dm_txpower_tracking_callback_thermalmeter(Adapter);
pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
-
}
/* 3============================================================ */
@@ -1016,13 +1015,13 @@ void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
/* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
/* at the same time. In the stage2/3, we need to prive universal interface and merge all */
/* HW dynamic mechanism. */
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("odm_EdcaTurboCheck========================>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("%s========================>\n", __func__));
if (!(pDM_Odm->SupportAbility & ODM_MAC_EDCA_TURBO))
return;
odm_EdcaTurboCheckCE(pDM_Odm);
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("<========================odm_EdcaTurboCheck\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("<========================%s\n", __func__));
} /* odm_CheckEdcaTurbo */
void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm)
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 920688fc9e9f..a970189ba8c6 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -51,8 +51,7 @@ void phy_set_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask, u32 data)
usb_write32(adapt, regaddr, data);
}
-static u32 rf_serial_read(struct adapter *adapt,
- enum rf_radio_path rfpath, u32 offset)
+static u32 rf_serial_read(struct adapter *adapt, enum rf_radio_path rfpath, u32 offset)
{
u32 ret = 0;
struct bb_reg_def *phyreg = &adapt->HalData->PHYRegDef[rfpath];
@@ -107,7 +106,7 @@ static void rf_serial_write(struct adapter *adapt,
}
u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rf_path,
- u32 reg_addr, u32 bit_mask)
+ u32 reg_addr, u32 bit_mask)
{
u32 original_value, bit_shift;
@@ -117,7 +116,7 @@ u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rf_path,
}
void phy_set_rf_reg(struct adapter *adapt, enum rf_radio_path rf_path,
- u32 reg_addr, u32 bit_mask, u32 data)
+ u32 reg_addr, u32 bit_mask, u32 data)
{
u32 original_value, bit_shift;
@@ -190,7 +189,7 @@ void phy_set_tx_power_level(struct adapter *adapt, u8 channel)
rtl88eu_phy_rf6052_set_cck_txpower(adapt, &cck_pwr[0]);
rtl88eu_phy_rf6052_set_ofdm_txpower(adapt, &ofdm_pwr[0], &bw20_pwr[0],
- &bw40_pwr[0], channel);
+ &bw40_pwr[0], channel);
}
static void phy_set_bw_mode_callback(struct adapter *adapt)
@@ -236,11 +235,11 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
* These settings are required only for 40MHz
*/
phy_set_bb_reg(adapt, rCCK0_System, bCCKSideBand,
- (hal_data->nCur40MhzPrimeSC >> 1));
+ (hal_data->nCur40MhzPrimeSC >> 1));
phy_set_bb_reg(adapt, rOFDM1_LSTF, 0xC00,
hal_data->nCur40MhzPrimeSC);
phy_set_bb_reg(adapt, 0x818, (BIT(26) | BIT(27)),
- (hal_data->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+ (hal_data->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
break;
default:
break;
@@ -251,7 +250,7 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
}
void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth,
- unsigned char offset)
+ unsigned char offset)
{
struct hal_data_8188e *hal_data = adapt->HalData;
enum ht_channel_width tmp_bw = hal_data->CurrentChannelBW;
@@ -345,7 +344,7 @@ static void dm_txpwr_track_setpwr(struct odm_dm_struct *dm_odm)
{
if (dm_odm->BbSwingFlagOfdm || dm_odm->BbSwingFlagCck) {
ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("dm_txpwr_track_setpwr CH=%d\n", *dm_odm->pChannel));
+ ("%s CH=%d\n", __func__, *dm_odm->pChannel));
phy_set_tx_power_level(dm_odm->Adapter, *dm_odm->pChannel);
dm_odm->BbSwingFlagOfdm = false;
dm_odm->BbSwingFlagCck = false;
@@ -403,11 +402,11 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
for (i = 0; i < CCK_TABLE_SIZE; i++) {
if ((dm_odm->RFCalibrateInfo.bCCKinCH14 &&
- memcmp(&temp_cck, &CCKSwingTable_Ch14[i][2], 4)) ||
- memcmp(&temp_cck, &CCKSwingTable_Ch1_Ch13[i][2], 4)) {
- cck_index_old = (u8)i;
- dm_odm->BbSwingIdxCckBase = (u8)i;
- break;
+ memcmp(&temp_cck, &CCKSwingTable_Ch14[i][2], 4)) ||
+ memcmp(&temp_cck, &CCKSwingTable_Ch1_Ch13[i][2], 4)) {
+ cck_index_old = (u8)i;
+ dm_odm->BbSwingIdxCckBase = (u8)i;
+ break;
}
}
@@ -437,7 +436,7 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
thermal_val = (u8)(thermal_avg / thermal_avg_count);
if (dm_odm->RFCalibrateInfo.bDoneTxpower &&
- !dm_odm->RFCalibrateInfo.bReloadtxpowerindex) {
+ !dm_odm->RFCalibrateInfo.bReloadtxpowerindex) {
delta = abs(thermal_val - dm_odm->RFCalibrateInfo.ThermalValue);
} else {
delta = abs(thermal_val - hal_data->EEPROMThermalMeter);
@@ -1039,10 +1038,10 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
for (i = 0; i < retry_count; i++) {
path_a_ok = phy_path_a_iqk(adapt, is2t);
if (path_a_ok == 0x01) {
- result[t][0] = (phy_query_bb_reg(adapt, rTx_Power_Before_IQK_A,
- bMaskDWord) & 0x3FF0000) >> 16;
- result[t][1] = (phy_query_bb_reg(adapt, rTx_Power_After_IQK_A,
- bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][0] = (phy_query_bb_reg(adapt, rTx_Power_Before_IQK_A,
+ bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][1] = (phy_query_bb_reg(adapt, rTx_Power_After_IQK_A,
+ bMaskDWord) & 0x3FF0000) >> 16;
break;
}
}
@@ -1050,10 +1049,10 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
for (i = 0; i < retry_count; i++) {
path_a_ok = phy_path_a_rx_iqk(adapt, is2t);
if (path_a_ok == 0x03) {
- result[t][2] = (phy_query_bb_reg(adapt, rRx_Power_Before_IQK_A_2,
- bMaskDWord) & 0x3FF0000) >> 16;
- result[t][3] = (phy_query_bb_reg(adapt, rRx_Power_After_IQK_A_2,
- bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][2] = (phy_query_bb_reg(adapt, rRx_Power_Before_IQK_A_2,
+ bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][3] = (phy_query_bb_reg(adapt, rRx_Power_After_IQK_A_2,
+ bMaskDWord) & 0x3FF0000) >> 16;
break;
}
ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
@@ -1149,12 +1148,12 @@ static void phy_lc_calibrate(struct adapter *adapt, bool is2t)
/* 1. Read original RF mode */
/* Path-A */
rf_a_mode = rtw_hal_read_rfreg(adapt, RF_PATH_A, RF_AC,
- bMask12Bits);
+ bMask12Bits);
/* Path-B */
if (is2t)
rf_b_mode = rtw_hal_read_rfreg(adapt, RF_PATH_B, RF_AC,
- bMask12Bits);
+ bMask12Bits);
/* 2. Set RF mode = standby mode */
/* Path-A */
diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
index 77edd7ad19a1..34784943a7d1 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
@@ -26,25 +26,26 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
pwrcfgcmd = pwrseqcmd[aryidx];
RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("rtl88eu_pwrseqcmdparsing: offset(%#x) cut_msk(%#x)"
+ ("%s: offset(%#x) cut_msk(%#x)"
" cmd(%#x)"
"msk(%#x) value(%#x)\n",
- GET_PWR_CFG_OFFSET(pwrcfgcmd),
- GET_PWR_CFG_CUT_MASK(pwrcfgcmd),
- GET_PWR_CFG_CMD(pwrcfgcmd),
- GET_PWR_CFG_MASK(pwrcfgcmd),
- GET_PWR_CFG_VALUE(pwrcfgcmd)));
+ __func__,
+ GET_PWR_CFG_OFFSET(pwrcfgcmd),
+ GET_PWR_CFG_CUT_MASK(pwrcfgcmd),
+ GET_PWR_CFG_CMD(pwrcfgcmd),
+ GET_PWR_CFG_MASK(pwrcfgcmd),
+ GET_PWR_CFG_VALUE(pwrcfgcmd)));
/* Only Handle the command whose CUT is matched */
if (GET_PWR_CFG_CUT_MASK(pwrcfgcmd) & cut_vers) {
switch (GET_PWR_CFG_CMD(pwrcfgcmd)) {
case PWR_CMD_READ:
RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("rtl88eu_pwrseqcmdparsing: PWR_CMD_READ\n"));
+ ("%s: PWR_CMD_READ\n", __func__));
break;
case PWR_CMD_WRITE:
RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("rtl88eu_pwrseqcmdparsing: PWR_CMD_WRITE\n"));
+ ("%s: PWR_CMD_WRITE\n", __func__));
offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
/* Read the value from system register */
@@ -59,7 +60,7 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
break;
case PWR_CMD_POLLING:
RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("rtl88eu_pwrseqcmdparsing: PWR_CMD_POLLING\n"));
+ ("%s: PWR_CMD_POLLING\n", __func__));
poll_bit = false;
offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
@@ -81,7 +82,7 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
break;
case PWR_CMD_DELAY:
RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("rtl88eu_pwrseqcmdparsing: PWR_CMD_DELAY\n"));
+ ("%s: PWR_CMD_DELAY\n", __func__));
if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
else
@@ -90,11 +91,11 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
case PWR_CMD_END:
/* When this command is parsed, end the process */
RT_TRACE(_module_hal_init_c_, _drv_info_,
- ("rtl88eu_pwrseqcmdparsing: PWR_CMD_END\n"));
+ ("%s: PWR_CMD_END\n", __func__));
return true;
default:
RT_TRACE(_module_hal_init_c_, _drv_err_,
- ("rtl88eu_pwrseqcmdparsing: Unknown CMD!!\n"));
+ ("%s: Unknown CMD!!\n", __func__));
break;
}
}
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 6702f263c770..aab0f54a75fc 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -138,6 +138,7 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm,
(powerbase1 << 8) | powerbase1;
*mcs_base = powerbase1;
}
+
static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
u8 index, u32 *powerbase0, u32 *powerbase1,
u32 *out_val)
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index 0b20e62f9a68..d39e1bd97f85 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -171,8 +171,7 @@ static void rtl_rfreg_delay(struct adapter *adapt, enum rf_radio_path rfpath, u3
}
}
-static void rtl8188e_config_rf_reg(struct adapter *adapt,
- u32 addr, u32 data)
+static void rtl8188e_config_rf_reg(struct adapter *adapt, u32 addr, u32 data)
{
u32 content = 0x1000; /*RF Content: radio_a_txt*/
u32 maskforphyset = content & 0xE000;
@@ -206,8 +205,8 @@ static bool rtl88e_phy_config_rf_with_headerfile(struct adapter *adapt)
READ_NEXT_PAIR(v1, v2, i);
while (v2 != 0xDEAD && v2 != 0xCDEF &&
v2 != 0xCDCD && i < array_len - 2) {
- rtl8188e_config_rf_reg(adapt, v1, v2);
- READ_NEXT_PAIR(v1, v2, i);
+ rtl8188e_config_rf_reg(adapt, v1, v2);
+ READ_NEXT_PAIR(v1, v2, i);
}
while (v2 != 0xDEAD && i < array_len - 2)
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 2baef9a285c0..95b27b4df705 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -95,7 +95,7 @@ void _8051Reset88E(struct adapter *padapter)
u1bTmp = usb_read8(padapter, REG_SYS_FUNC_EN + 1);
usb_write8(padapter, REG_SYS_FUNC_EN + 1, u1bTmp & (~BIT(2)));
usb_write8(padapter, REG_SYS_FUNC_EN + 1, u1bTmp | (BIT(2)));
- DBG_88E("=====> _8051Reset88E(): 8051 reset success .\n");
+ DBG_88E("=====> %s(): 8051 reset success .\n", __func__);
}
void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
@@ -187,7 +187,7 @@ static s32 _LLTWrite(struct adapter *padapter, u32 address, u32 data)
/* polling */
do {
value = usb_read32(padapter, LLTReg);
- if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+ if (_LLT_OP_VALUE(value) == _LLT_NO_ACTIVE)
break;
if (count > POLLING_LLT_THRESHOLD) {
@@ -406,7 +406,7 @@ void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoL
padapter->pwrctrlpriv.bSupportRemoteWakeup = (hwinfo[EEPROM_USB_OPTIONAL_FUNCTION0] & BIT(1)) ? true : false;
DBG_88E("%s...bHWPwrPindetect(%x)-bHWPowerdown(%x) , bSupportRemoteWakeup(%x)\n", __func__,
- padapter->pwrctrlpriv.bHWPwrPindetect, padapter->pwrctrlpriv.bHWPowerdown, padapter->pwrctrlpriv.bSupportRemoteWakeup);
+ padapter->pwrctrlpriv.bHWPwrPindetect, padapter->pwrctrlpriv.bHWPowerdown, padapter->pwrctrlpriv.bSupportRemoteWakeup);
DBG_88E("### PS params => power_mgnt(%x), usbss_enable(%x) ###\n", padapter->registrypriv.power_mgnt, padapter->registrypriv.usbss_enable);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index 7badfc2e45df..25f46b2f4920 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -22,8 +22,7 @@ int rtw_hal_init_recv_priv(struct adapter *padapter)
int i, res = _SUCCESS;
struct recv_buf *precvbuf;
- tasklet_init(&precvpriv->recv_tasklet, rtl8188eu_recv_tasklet,
- (unsigned long)padapter);
+ tasklet_setup(&precvpriv->recv_tasklet, rtl8188eu_recv_tasklet);
/* init recv_buf */
_rtw_init_queue(&precvpriv->free_recv_buf_queue);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index 7d315bd438d4..2866283c211d 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -17,8 +17,7 @@ s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
{
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
- tasklet_init(&pxmitpriv->xmit_tasklet, rtl8188eu_xmit_tasklet,
- (unsigned long)adapt);
+ tasklet_setup(&pxmitpriv->xmit_tasklet, rtl8188eu_xmit_tasklet);
return _SUCCESS;
}
@@ -347,7 +346,7 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
rtw_issue_addbareq_cmd(adapt, pxmitframe);
mem_addr = pxmitframe->buf_addr;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n"));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("%s()\n", __func__));
for (t = 0; t < pattrib->nr_frags; t++) {
if (inner_ret != _SUCCESS && ret == _SUCCESS)
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 114638f6f719..abe58cf2de16 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -78,8 +78,8 @@ void rtw_hal_chip_configure(struct adapter *adapt)
haldata->UsbRxAggPageCount = 48; /* uint :128 b 0x0A; 10 = MAX_RX_DMA_BUFFER_SIZE/2/haldata->UsbBulkOutSize */
haldata->UsbRxAggPageTimeout = 0x4; /* 6, absolute time = 34ms/(2^6) */
- HalUsbSetQueuePipeMapping8188EUsb(adapt,
- pdvobjpriv->RtNumInPipes, pdvobjpriv->RtNumOutPipes);
+ HalUsbSetQueuePipeMapping8188EUsb(adapt, pdvobjpriv->RtNumInPipes,
+ pdvobjpriv->RtNumOutPipes);
}
u32 rtw_hal_power_on(struct adapter *adapt)
@@ -876,7 +876,7 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
{
u8 val8;
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("CardDisableRTL8188EU\n"));
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("%s\n", __func__));
/* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */
val8 = usb_read8(Adapter, REG_TX_RPT_CTRL);
@@ -1038,8 +1038,7 @@ static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool
memcpy(eeprom->mac_addr, &hwinfo[EEPROM_MAC_ADDR_88EU], ETH_ALEN);
}
RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
- ("Hal_EfuseParseMACAddr_8188EU: Permanent Address = %pM\n",
- eeprom->mac_addr));
+ ("%s: Permanent Address = %pM\n", __func__, eeprom->mac_addr));
}
static void readAdapterInfo_8188EU(struct adapter *adapt)
@@ -1894,7 +1893,7 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
switch (mac_id) {
case 0:/* for infra mode */
supportRateNum = rtw_get_rateset_len(cur_network->SupportedRates);
- networkType = judge_network_type(adapt, cur_network->SupportedRates, supportRateNum) & 0xf;
+ networkType = judge_network_type(adapt, cur_network->SupportedRates) & 0xf;
raid = networktype_to_raid(networkType);
mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&pmlmeinfo->HT_caps) : 0;
@@ -1912,7 +1911,7 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
break;
default: /* for each sta in IBSS */
supportRateNum = rtw_get_rateset_len(pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
- networkType = judge_network_type(adapt, pmlmeinfo->FW_sta_info[mac_id].SupportedRates, supportRateNum) & 0xf;
+ networkType = judge_network_type(adapt, pmlmeinfo->FW_sta_info[mac_id].SupportedRates) & 0xf;
raid = networktype_to_raid(networkType);
mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index 83218e7ec0a9..cb6940d2aeab 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -526,16 +526,6 @@ enum rtw_ieee80211_category {
RTW_WLAN_CATEGORY_P2P = 0x7f,/* P2P action frames */
};
-/* SPECTRUM_MGMT action code */
-enum rtw_ieee80211_spectrum_mgmt_actioncode {
- RTW_WLAN_ACTION_SPCT_MSR_REQ = 0,
- RTW_WLAN_ACTION_SPCT_MSR_RPRT = 1,
- RTW_WLAN_ACTION_SPCT_TPC_REQ = 2,
- RTW_WLAN_ACTION_SPCT_TPC_RPRT = 3,
- RTW_WLAN_ACTION_SPCT_CHL_SWITCH = 4,
- RTW_WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5,
-};
-
enum _PUBLIC_ACTION {
ACT_PUBLIC_BSSCOEXIST = 0, /* 20/40 BSS Coexistence */
ACT_PUBLIC_DSE_ENABLE = 1,
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index b44d602e954a..56e937b26407 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -69,6 +69,7 @@ void _rtw_init_queue(struct __queue *pqueue);
struct rtw_netdev_priv_indicator {
void *priv;
};
+
struct net_device *rtw_alloc_etherdev_with_old_priv(void *old_priv);
static inline struct adapter *rtw_netdev_priv(struct net_device *netdev)
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 23251ffa8404..fea1119c426e 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -43,7 +43,7 @@ enum rx_packet_type {
};
#define INTERRUPT_MSG_FORMAT_LEN 60
-void rtl8188eu_recv_tasklet(unsigned long priv);
+void rtl8188eu_recv_tasklet(struct tasklet_struct *t);
void rtl8188e_process_phy_info(struct adapter *padapter,
struct recv_frame *prframe);
void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
index 85efa41c8350..617c2273b41b 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -94,6 +94,7 @@ enum TXDESC_SC {
SC_LOWER = 0x02,
SC_DUPLICATE = 0x03
};
+
/* OFFSET 20 */
#define SGI BIT(6)
#define USB_TXAGG_NUM_SHT 24
@@ -147,7 +148,7 @@ void rtl8188e_fill_fake_txdesc(struct adapter *padapter, u8 *pDesc,
s32 rtl8188eu_init_xmit_priv(struct adapter *padapter);
s32 rtl8188eu_xmit_buf_handler(struct adapter *padapter);
#define hal_xmit_handler rtl8188eu_xmit_buf_handler
-void rtl8188eu_xmit_tasklet(unsigned long priv);
+void rtl8188eu_xmit_tasklet(struct tasklet_struct *t);
bool rtl8188eu_xmitframe_complete(struct adapter *padapter,
struct xmit_priv *pxmitpriv);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 010f0c42368a..1b74b32b8a81 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -266,7 +266,7 @@ static inline void set_fwstate(struct mlme_priv *pmlmepriv, int state)
{
pmlmepriv->fw_state |= state;
/* FOR HW integration */
- if (_FW_UNDER_SURVEY == state)
+ if (state == _FW_UNDER_SURVEY)
pmlmepriv->bScanInProcess = true;
}
@@ -274,7 +274,7 @@ static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
{
pmlmepriv->fw_state &= ~state;
/* FOR HW integration */
- if (_FW_UNDER_SURVEY == state)
+ if (state == _FW_UNDER_SURVEY)
pmlmepriv->bScanInProcess = false;
}
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 565bfe46256c..b11a6886a083 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -448,7 +448,7 @@ void init_addba_retry_timer(struct adapter *adapt, struct sta_info *sta);
struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
unsigned char networktype_to_raid(unsigned char network_type);
-u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int len);
+u8 judge_network_type(struct adapter *padapter, unsigned char *rate);
void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *len);
void UpdateBrateTbl(struct adapter *padapter, u8 *mBratesOS);
void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen);
@@ -568,9 +568,6 @@ void addba_timer_hdl(struct timer_list *t);
mod_timer(&mlmeext->link_timer, jiffies + \
msecs_to_jiffies(ms))
-bool cckrates_included(unsigned char *rate, int ratelen);
-bool cckratesonly_included(unsigned char *rate, int ratelen);
-
void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr);
void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len);
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index b281b9e7fcea..e20bab41708a 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -62,7 +62,9 @@ struct signal_stat {
u32 total_num; /* num of valid elements */
u32 total_val; /* sum of valid elements */
};
+
#define MAX_PATH_NUM_92CS 3
+
struct phy_info {
u8 RxPWDBAll;
u8 SignalQuality; /* in 0-100 index. */
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index 8ba02a7cea60..d08a8d8adccf 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -81,8 +81,8 @@ union Keytype {
};
struct rt_pmkid_list {
- u8 bUsed;
- u8 Bssid[6];
+ u8 used;
+ u8 bssid[ETH_ALEN];
u8 PMKID[16];
u8 SsidBuf[33];
u8 *ssid_octet;
@@ -228,64 +228,6 @@ struct mic_data {
u32 nBytesInM; /* # bytes in M */
};
-extern const u32 Te0[256];
-extern const u32 Td0[256];
-extern const u32 Td1[256];
-extern const u32 Td2[256];
-extern const u32 Td3[256];
-extern const u32 Td4[256];
-extern const u32 rcon[10];
-extern const u8 Td4s[256];
-extern const u8 rcons[10];
-
-#define RCON(i) (rcons[(i)] << 24)
-
-static inline u32 rotr(u32 val, int bits)
-{
- return (val >> bits) | (val << (32 - bits));
-}
-
-#define TE0(i) Te0[((i) >> 24) & 0xff]
-#define TE1(i) rotr(Te0[((i) >> 16) & 0xff], 8)
-#define TE2(i) rotr(Te0[((i) >> 8) & 0xff], 16)
-#define TE3(i) rotr(Te0[(i) & 0xff], 24)
-
-/* ===== start - public domain SHA256 implementation ===== */
-
-/* This is based on SHA256 implementation in LibTomCrypt that was released into
- * public domain by Tom St Denis.
- */
-
-/* the K array */
-static const unsigned long K[64] = {
- 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, 0x3956c25bUL,
- 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, 0xd807aa98UL, 0x12835b01UL,
- 0x243185beUL, 0x550c7dc3UL, 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL,
- 0xc19bf174UL, 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
- 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, 0x983e5152UL,
- 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, 0xc6e00bf3UL, 0xd5a79147UL,
- 0x06ca6351UL, 0x14292967UL, 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL,
- 0x53380d13UL, 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
- 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, 0xd192e819UL,
- 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, 0x19a4c116UL, 0x1e376c08UL,
- 0x2748774cUL, 0x34b0bcb5UL, 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL,
- 0x682e6ff3UL, 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
- 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
-};
-
-/* Various logical functions */
-#define RORc(x, y) \
- (((((unsigned long)(x) & 0xFFFFFFFFUL) >> (unsigned long)((y) & 31)) | \
- ((unsigned long)(x) << (unsigned long)(32 - ((y) & 31)))) & 0xFFFFFFFFUL)
-#define Ch(x, y, z) (z ^ (x & (y ^ z)))
-#define Maj(x, y, z) (((x | y) & z) | (x & y))
-#define S(x, n) RORc((x), (n))
-#define R(x, n) (((x) & 0xFFFFFFFFUL) >> (n))
-#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22))
-#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25))
-#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3))
-#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10))
-
void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key);
void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b);
void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nBytes);
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 217be809b937..757c582ba4d9 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -74,37 +74,6 @@ enum WIFI_FRAME_SUBTYPE {
WIFI_QOS_DATA_NULL = (BIT(6) | WIFI_QOS_DATA_TYPE),
};
-enum WIFI_REASON_CODE {
- _RSON_RESERVED_ = 0,
- _RSON_UNSPECIFIED_ = 1,
- _RSON_AUTH_NO_LONGER_VALID_ = 2,
- _RSON_DEAUTH_STA_LEAVING_ = 3,
- _RSON_INACTIVITY_ = 4,
- _RSON_UNABLE_HANDLE_ = 5,
- _RSON_CLS2_ = 6,
- _RSON_CLS3_ = 7,
- _RSON_DISAOC_STA_LEAVING_ = 8,
- _RSON_ASOC_NOT_AUTH_ = 9,
-
- /* WPA reason */
- _RSON_INVALID_IE_ = 13,
- _RSON_MIC_FAILURE_ = 14,
- _RSON_4WAY_HNDSHK_TIMEOUT_ = 15,
- _RSON_GROUP_KEY_UPDATE_TIMEOUT_ = 16,
- _RSON_DIFF_IE_ = 17,
- _RSON_MLTCST_CIPHER_NOT_VALID_ = 18,
- _RSON_UNICST_CIPHER_NOT_VALID_ = 19,
- _RSON_AKMP_NOT_VALID_ = 20,
- _RSON_UNSUPPORT_RSNE_VER_ = 21,
- _RSON_INVALID_RSNE_CAP_ = 22,
- _RSON_IEEE_802DOT1X_AUTH_FAIL_ = 23,
-
- /* belowing are Realtek definition */
- _RSON_PMK_NOT_AVAILABLE_ = 24,
- _RSON_TDLS_TEAR_TOOFAR_ = 25,
- _RSON_TDLS_TEAR_UN_RSN_ = 26,
-};
-
enum WIFI_STATUS_CODE {
_STATS_SUCCESSFUL_ = 0,
_STATS_FAILURE_ = 1,
@@ -326,11 +295,12 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
static inline int IsFrameTypeCtrl(unsigned char *pframe)
{
- if (WIFI_CTRL_TYPE == GetFrameType(pframe))
+ if (GetFrameType(pframe) == WIFI_CTRL_TYPE)
return true;
else
return false;
}
+
/*-----------------------------------------------------------------------------
Below is for the security related definition
------------------------------------------------------------------------------*/
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 2e83d24fcb09..8e10462f1fbe 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -124,6 +124,7 @@ static char *translate_scan(struct adapter *padapter,
if (p && ht_ielen > 0) {
struct ieee80211_ht_cap *pht_capie;
+
ht_cap = true;
pht_capie = (struct ieee80211_ht_cap *)(p + 2);
@@ -310,30 +311,30 @@ static char *translate_scan(struct adapter *padapter,
static int wpa_set_auth_algs(struct net_device *dev, u32 value)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
int ret = 0;
if ((value & AUTH_ALG_SHARED_KEY) && (value & AUTH_ALG_OPEN_SYSTEM)) {
- DBG_88E("wpa_set_auth_algs, AUTH_ALG_SHARED_KEY and AUTH_ALG_OPEN_SYSTEM [value:0x%x]\n", value);
+ DBG_88E("%s, AUTH_ALG_SHARED_KEY and AUTH_ALG_OPEN_SYSTEM [value:0x%x]\n", __func__, value);
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch;
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
} else if (value & AUTH_ALG_SHARED_KEY) {
- DBG_88E("wpa_set_auth_algs, AUTH_ALG_SHARED_KEY [value:0x%x]\n", value);
+ DBG_88E("%s, AUTH_ALG_SHARED_KEY [value:0x%x]\n", __func__, value);
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeShared;
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
} else if (value & AUTH_ALG_OPEN_SYSTEM) {
- DBG_88E("wpa_set_auth_algs, AUTH_ALG_OPEN_SYSTEM\n");
+ DBG_88E("%s, AUTH_ALG_OPEN_SYSTEM\n", __func__);
if (padapter->securitypriv.ndisauthtype < Ndis802_11AuthModeWPAPSK) {
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
}
} else if (value & AUTH_ALG_LEAP) {
- DBG_88E("wpa_set_auth_algs, AUTH_ALG_LEAP\n");
+ DBG_88E("%s, AUTH_ALG_LEAP\n", __func__);
} else {
- DBG_88E("wpa_set_auth_algs, error!\n");
+ DBG_88E("%s, error!\n", __func__);
ret = -EINVAL;
}
return ret;
@@ -343,9 +344,9 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
{
int ret = 0;
u32 wep_key_idx, wep_key_len, wep_total_len;
- struct ndis_802_11_wep *pwep = NULL;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ndis_802_11_wep *pwep = NULL;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
param->u.crypt.err = 0;
@@ -367,8 +368,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
}
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("wpa_set_encryption, crypt.alg = WEP\n"));
- DBG_88E("wpa_set_encryption, crypt.alg = WEP\n");
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("%s, crypt.alg = WEP\n", __func__));
+ DBG_88E("%s, crypt.alg = WEP\n", __func__);
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
@@ -390,7 +391,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
wep_total_len = wep_key_len + offsetof(struct ndis_802_11_wep, KeyMaterial);
pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
if (!pwep) {
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, (" wpa_set_encryption: pwep allocate fail !!!\n"));
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("%s: pwep allocate fail !!!\n", __func__));
goto exit;
}
memset(pwep, 0, wep_total_len);
@@ -437,11 +438,11 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
psta->ieee8021x_blocked = false;
if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
if (param->u.crypt.set_tx == 1) { /* pairwise key */
- memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
@@ -453,7 +454,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
rtw_setstakey_cmd(padapter, (unsigned char *)psta, true);
} else { /* group key */
- memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16 ));
+ memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
padapter->securitypriv.binstallGrpkey = true;
@@ -473,7 +474,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
pbcmc_sta->ieee8021x_blocked = false;
if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
}
}
@@ -603,8 +604,8 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
}
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- ("rtw_set_wpa_ie: pairwise_cipher = 0x%08x padapter->securitypriv.ndisencryptstatus =%d padapter->securitypriv.ndisauthtype =%d\n",
- pairwise_cipher, padapter->securitypriv.ndisencryptstatus, padapter->securitypriv.ndisauthtype));
+ ("%s: pairwise_cipher = 0x%08x padapter->securitypriv.ndisencryptstatus =%d padapter->securitypriv.ndisauthtype =%d\n",
+ __func__, pairwise_cipher, padapter->securitypriv.ndisencryptstatus, padapter->securitypriv.ndisauthtype));
exit:
kfree(buf);
return ret;
@@ -613,10 +614,10 @@ exit:
typedef unsigned char NDIS_802_11_RATES_EX[NDIS_802_11_LENGTH_RATES_EX];
static int rtw_wx_get_name(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
u32 ht_ielen = 0;
char *p;
u8 ht_cap = false;
@@ -657,18 +658,18 @@ static int rtw_wx_get_name(struct net_device *dev,
}
static int rtw_wx_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_wx_set_freq\n"));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+%s\n", __func__));
return 0;
}
static int rtw_wx_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
@@ -687,13 +688,13 @@ static int rtw_wx_get_freq(struct net_device *dev,
}
static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+ union iwreq_data *wrqu, char *b)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
enum ndis_802_11_network_infra networkType;
int ret = 0;
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (!rtw_pwr_wakeup(padapter)) {
ret = -EPERM;
goto exit;
}
@@ -735,12 +736,12 @@ exit:
}
static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
+ union iwreq_data *wrqu, char *b)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_get_mode\n"));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
wrqu->mode = IW_MODE_INFRA;
@@ -759,7 +760,7 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
u8 j, blInserted = false;
int ret = false;
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -769,7 +770,7 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
memcpy(strIssueBssid, pPMK->bssid.sa_data, ETH_ALEN);
if (pPMK->cmd == IW_PMKSA_ADD) {
- DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_ADD!\n");
+ DBG_88E("[%s] IW_PMKSA_ADD!\n", __func__);
if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN))
return ret;
ret = true;
@@ -777,11 +778,11 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
/* overwrite PMKID */
for (j = 0; j < NUM_PMKID_CACHE; j++) {
- if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) {
+ if (!memcmp(psecuritypriv->PMKIDList[j].bssid, strIssueBssid, ETH_ALEN)) {
/* BSSID is matched, the same AP => rewrite with new PMKID. */
- DBG_88E("[rtw_wx_set_pmkid] BSSID exists in the PMKList.\n");
+ DBG_88E("[%s] BSSID exists in the PMKList.\n", __func__);
memcpy(psecuritypriv->PMKIDList[j].PMKID, pPMK->pmkid, IW_PMKID_LEN);
- psecuritypriv->PMKIDList[j].bUsed = true;
+ psecuritypriv->PMKIDList[j].used = true;
psecuritypriv->PMKIDIndex = j + 1;
blInserted = true;
break;
@@ -790,30 +791,30 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
if (!blInserted) {
/* Find a new entry */
- DBG_88E("[rtw_wx_set_pmkid] Use the new entry index = %d for this PMKID.\n",
- psecuritypriv->PMKIDIndex);
+ DBG_88E("[%s] Use the new entry index = %d for this PMKID.\n",
+ __func__, psecuritypriv->PMKIDIndex);
- memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].Bssid, strIssueBssid, ETH_ALEN);
+ memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bssid, strIssueBssid, ETH_ALEN);
memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN);
- psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed = true;
+ psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].used = true;
psecuritypriv->PMKIDIndex++;
if (psecuritypriv->PMKIDIndex == 16)
psecuritypriv->PMKIDIndex = 0;
}
} else if (pPMK->cmd == IW_PMKSA_REMOVE) {
- DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_REMOVE!\n");
+ DBG_88E("[%s] IW_PMKSA_REMOVE!\n", __func__);
ret = true;
for (j = 0; j < NUM_PMKID_CACHE; j++) {
- if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) {
+ if (!memcmp(psecuritypriv->PMKIDList[j].bssid, strIssueBssid, ETH_ALEN)) {
/* BSSID is matched, the same AP => Remove this PMKID information and reset it. */
- eth_zero_addr(psecuritypriv->PMKIDList[j].Bssid);
- psecuritypriv->PMKIDList[j].bUsed = false;
+ eth_zero_addr(psecuritypriv->PMKIDList[j].bssid);
+ psecuritypriv->PMKIDList[j].used = false;
break;
}
}
} else if (pPMK->cmd == IW_PMKSA_FLUSH) {
- DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_FLUSH!\n");
+ DBG_88E("[%s] IW_PMKSA_FLUSH!\n", __func__);
memset(&psecuritypriv->PMKIDList[0], 0x00, sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
psecuritypriv->PMKIDIndex = 0;
ret = true;
@@ -822,8 +823,8 @@ static int rtw_wx_set_pmkid(struct net_device *dev,
}
static int rtw_wx_get_sens(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
wrqu->sens.value = 0;
wrqu->sens.fixed = 0; /* no auto select */
@@ -832,17 +833,17 @@ static int rtw_wx_get_sens(struct net_device *dev,
}
static int rtw_wx_get_range(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
struct iw_range *range = (struct iw_range *)extra;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
u16 val;
int i;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_range. cmd_code =%x\n", info->cmd));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s. cmd_code =%x\n", __func__, info->cmd));
wrqu->data.length = sizeof(*range);
memset(range, 0, sizeof(*range));
@@ -931,12 +932,11 @@ static int rtw_wx_get_range(struct net_device *dev,
/* s3. set_802_11_encryption_mode() */
/* s4. rtw_set_802_11_bssid() */
static int rtw_wx_set_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *awrq, char *extra)
{
uint ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct sockaddr *temp = (struct sockaddr *)awrq;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct list_head *phead;
@@ -945,7 +945,7 @@ static int rtw_wx_set_wap(struct net_device *dev,
struct wlan_network *pnetwork = NULL;
enum ndis_802_11_auth_mode authmode;
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (!rtw_pwr_wakeup(padapter)) {
ret = -1;
goto exit;
}
@@ -998,10 +998,10 @@ exit:
}
static int rtw_wx_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
@@ -1009,7 +1009,7 @@ static int rtw_wx_get_wap(struct net_device *dev,
eth_zero_addr(wrqu->ap_addr.sa_data);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_wap\n"));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
if (check_fwstate(pmlmepriv, _FW_LINKED) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
@@ -1021,12 +1021,12 @@ static int rtw_wx_get_wap(struct net_device *dev,
}
static int rtw_wx_set_mlme(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
int ret = 0;
u16 reason;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
if (!mlme)
@@ -1054,17 +1054,17 @@ static int rtw_wx_set_mlme(struct net_device *dev,
}
static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
u8 _status = false;
int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_set_scan\n"));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (!rtw_pwr_wakeup(padapter)) {
ret = -1;
goto exit;
}
@@ -1122,7 +1122,7 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
spin_unlock_bh(&pmlmepriv->lock);
} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
- DBG_88E("rtw_wx_set_scan, req->scan_type == IW_SCAN_TYPE_PASSIVE\n");
+ DBG_88E("%s, req->scan_type == IW_SCAN_TYPE_PASSIVE\n", __func__);
}
} else {
if (wrqu->data.length >= WEXT_CSCAN_HEADER_SIZE &&
@@ -1184,10 +1184,10 @@ exit:
}
static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
struct list_head *plist, *phead;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
@@ -1198,7 +1198,7 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
u32 wait_for_surveydone;
int wait_status;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan\n"));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, (" Start of Query SIOCGIWSCAN .\n"));
if (padapter->pwrctrlpriv.brfoffbyhw && padapter->bDriverStopped) {
@@ -1252,10 +1252,10 @@ exit:
/* s3. set_802_11_encryption_mode() */
/* s4. rtw_set_802_11_ssid() */
static int rtw_wx_set_essid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct list_head *phead;
@@ -1267,8 +1267,8 @@ static int rtw_wx_set_essid(struct net_device *dev,
uint ret = 0, len;
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- ("+rtw_wx_set_essid: fw_state = 0x%08x\n", get_fwstate(pmlmepriv)));
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ("+%s: fw_state = 0x%08x\n", __func__, get_fwstate(pmlmepriv)));
+ if (!rtw_pwr_wakeup(padapter)) {
ret = -1;
goto exit;
}
@@ -1301,7 +1301,7 @@ static int rtw_wx_set_essid(struct net_device *dev,
memcpy(ndis_ssid.ssid, extra, len);
src_ssid = ndis_ssid.ssid;
- RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: ssid =[%s]\n", src_ssid));
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("%s: ssid =[%s]\n", __func__, src_ssid));
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
pmlmepriv->pscanned = phead->next;
@@ -1314,13 +1314,13 @@ static int rtw_wx_set_essid(struct net_device *dev,
dst_ssid = pnetwork->network.ssid.ssid;
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- ("rtw_wx_set_essid: dst_ssid =%s\n",
+ ("%s: dst_ssid =%s\n", __func__,
pnetwork->network.ssid.ssid));
if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.ssid_length)) &&
(pnetwork->network.ssid.ssid_length == ndis_ssid.ssid_length)) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- ("rtw_wx_set_essid: find match, set infra mode\n"));
+ ("%s: find match, set infra mode\n", __func__));
if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
@@ -1353,15 +1353,15 @@ exit:
}
static int rtw_wx_get_essid(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
{
u32 len;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_essid\n"));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
@@ -1378,8 +1378,8 @@ static int rtw_wx_get_essid(struct net_device *dev,
}
static int rtw_wx_set_rate(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
{
int i;
u8 datarates[NumRates];
@@ -1388,7 +1388,7 @@ static int rtw_wx_set_rate(struct net_device *dev,
u32 ratevalue = 0;
u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_set_rate\n"));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s\n", __func__));
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("target_rate = %d, fixed = %d\n", target_rate, fixed));
if (target_rate == -1) {
@@ -1457,12 +1457,12 @@ set_rate:
}
static int rtw_wx_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
u16 max_rate = 0;
- max_rate = rtw_get_cur_max_rate((struct adapter *)rtw_netdev_priv(dev));
+ max_rate = rtw_get_cur_max_rate(rtw_netdev_priv(dev));
if (max_rate == 0)
return -EPERM;
@@ -1474,10 +1474,10 @@ static int rtw_wx_get_rate(struct net_device *dev,
}
static int rtw_wx_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
if (wrqu->rts.disabled) {
padapter->registrypriv.rts_thresh = 2347;
@@ -1495,10 +1495,10 @@ static int rtw_wx_set_rts(struct net_device *dev,
}
static int rtw_wx_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
@@ -1510,10 +1510,10 @@ static int rtw_wx_get_rts(struct net_device *dev,
}
static int rtw_wx_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
if (wrqu->frag.disabled) {
padapter->xmitpriv.frag_len = MAX_FRAG_THRESHOLD;
@@ -1531,10 +1531,10 @@ static int rtw_wx_set_frag(struct net_device *dev,
}
static int rtw_wx_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
@@ -1545,8 +1545,8 @@ static int rtw_wx_get_frag(struct net_device *dev,
}
static int rtw_wx_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
wrqu->retry.value = 7;
wrqu->retry.fixed = 0; /* no auto select */
@@ -1556,8 +1556,8 @@ static int rtw_wx_get_retry(struct net_device *dev,
}
static int rtw_wx_set_enc(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *keybuf)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *keybuf)
{
u32 key, ret = 0;
u32 keyindex_provided;
@@ -1565,10 +1565,10 @@ static int rtw_wx_set_enc(struct net_device *dev,
enum ndis_802_11_auth_mode authmode;
struct iw_point *erq = &wrqu->encoding;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- DBG_88E("+rtw_wx_set_enc, flags = 0x%x\n", erq->flags);
+ DBG_88E("+%s, flags = 0x%x\n", __func__, erq->flags);
memset(&wep, 0, sizeof(struct ndis_802_11_wep));
@@ -1594,12 +1594,12 @@ static int rtw_wx_set_enc(struct net_device *dev,
} else {
keyindex_provided = 0;
key = padapter->securitypriv.dot11PrivacyKeyIndex;
- DBG_88E("rtw_wx_set_enc, key =%d\n", key);
+ DBG_88E("%s, key =%d\n", __func__, key);
}
/* set authentication mode */
if (erq->flags & IW_ENCODE_OPEN) {
- DBG_88E("rtw_wx_set_enc():IW_ENCODE_OPEN\n");
+ DBG_88E("%s():IW_ENCODE_OPEN\n", __func__);
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
@@ -1607,7 +1607,7 @@ static int rtw_wx_set_enc(struct net_device *dev,
authmode = Ndis802_11AuthModeOpen;
padapter->securitypriv.ndisauthtype = authmode;
} else if (erq->flags & IW_ENCODE_RESTRICTED) {
- DBG_88E("rtw_wx_set_enc():IW_ENCODE_RESTRICTED\n");
+ DBG_88E("%s():IW_ENCODE_RESTRICTED\n", __func__);
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
@@ -1615,7 +1615,7 @@ static int rtw_wx_set_enc(struct net_device *dev,
authmode = Ndis802_11AuthModeShared;
padapter->securitypriv.ndisauthtype = authmode;
} else {
- DBG_88E("rtw_wx_set_enc():erq->flags = 0x%x\n", erq->flags);
+ DBG_88E("%s():erq->flags = 0x%x\n", __func__, erq->flags);
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
@@ -1670,11 +1670,11 @@ exit:
}
static int rtw_wx_get_enc(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *keybuf)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *keybuf)
{
uint key;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct iw_point *erq = &wrqu->encoding;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1735,8 +1735,8 @@ static int rtw_wx_get_enc(struct net_device *dev,
}
static int rtw_wx_get_power(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
wrqu->power.value = 0;
wrqu->power.fixed = 0; /* no auto select */
@@ -1749,16 +1749,16 @@ static int rtw_wx_set_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
return rtw_set_wpa_ie(padapter, extra, wrqu->data.length);
}
static int rtw_wx_set_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct iw_param *param = (struct iw_param *)&wrqu->param;
int ret = 0;
@@ -1812,9 +1812,7 @@ static int rtw_wx_set_auth(struct net_device *dev,
break;
case IW_AUTH_80211_AUTH_ALG:
- /*
- * It's the starting point of a link layer connection using wpa_supplicant
- */
+ /* It's the starting point of a link layer connection using wpa_supplicant */
if (check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
LeaveAllPowerSaveMode(padapter);
rtw_disassoc_cmd(padapter, 500, false);
@@ -1838,8 +1836,8 @@ static int rtw_wx_set_auth(struct net_device *dev,
}
static int rtw_wx_set_enc_ext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
char *alg_name;
u32 param_len;
@@ -1930,7 +1928,7 @@ static int dummy(struct net_device *dev, struct iw_request_info *a,
static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
{
uint ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
switch (name) {
case IEEE_PARAM_WPA_ENABLED:
@@ -1946,7 +1944,7 @@ static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
break;
}
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
- ("wpa_set_param:padapter->securitypriv.ndisauthtype =%d\n", padapter->securitypriv.ndisauthtype));
+ ("%s:padapter->securitypriv.ndisauthtype =%d\n", __func__, padapter->securitypriv.ndisauthtype));
break;
case IEEE_PARAM_TKIP_COUNTERMEASURES:
break;
@@ -1985,7 +1983,7 @@ static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
static int wpa_mlme(struct net_device *dev, u32 command, u32 reason)
{
int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
switch (command) {
case IEEE_MLME_STA_DEAUTH:
@@ -2022,7 +2020,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
break;
case IEEE_CMD_SET_WPA_IE:
- ret = rtw_set_wpa_ie((struct adapter *)rtw_netdev_priv(dev),
+ ret = rtw_set_wpa_ie(rtw_netdev_priv(dev),
(char *)param->u.wpa_ie.data, (u16)param->u.wpa_ie.len);
break;
@@ -2166,7 +2164,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
u32 wep_key_idx, wep_key_len, wep_total_len;
struct ndis_802_11_wep *pwep = NULL;
struct sta_info *psta = NULL, *pbcmc_sta = NULL;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -2186,7 +2184,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
} else {
psta = rtw_get_stainfo(pstapriv, param->sta_addr);
if (!psta) {
- DBG_88E("rtw_set_encryption(), sta has already been removed or never been added\n");
+ DBG_88E("%s(), sta has already been removed or never been added\n", __func__);
goto exit;
}
}
@@ -2267,7 +2265,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
DBG_88E("%s, set group_key, WEP\n", __func__);
memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
- param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
+ param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -2276,7 +2274,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
DBG_88E("%s, set group_key, TKIP\n", __func__);
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
- param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
+ param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
/* set mic key */
memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
@@ -2286,7 +2284,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
DBG_88E("%s, set group_key, CCMP\n", __func__);
psecuritypriv->dot118021XGrpPrivacy = _AES_;
memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
- param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
+ param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
} else {
DBG_88E("%s, set group_key, none\n", __func__);
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
@@ -2341,7 +2339,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
} else { /* group key??? */
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
- param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
+ param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
@@ -2349,7 +2347,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
- param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
+ param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
/* set mic key */
memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
@@ -2360,7 +2358,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psecuritypriv->dot118021XGrpPrivacy = _AES_;
memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
- param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
+ param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
} else {
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
}
@@ -2392,7 +2390,7 @@ exit:
static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int len)
{
int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
unsigned char *pbuf = param->u.bcn_ie.buf;
@@ -2417,7 +2415,7 @@ static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int
static int rtw_hostapd_sta_flush(struct net_device *dev)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
DBG_88E("%s\n", __func__);
@@ -2430,11 +2428,11 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
{
int ret = 0;
struct sta_info *psta = NULL;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- DBG_88E("rtw_add_sta(aid =%d) =%pM\n", param->u.add_sta.aid, (param->sta_addr));
+ DBG_88E("%s(aid =%d) =%pM\n", __func__, param->u.add_sta.aid, (param->sta_addr));
if (!check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)))
return -EINVAL;
@@ -2483,12 +2481,12 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
{
struct sta_info *psta = NULL;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
int updated = 0;
- DBG_88E("rtw_del_sta =%pM\n", (param->sta_addr));
+ DBG_88E("%s =%pM\n", __func__, (param->sta_addr));
if (!check_fwstate(pmlmepriv, _FW_LINKED | WIFI_AP_STATE))
return -EINVAL;
@@ -2508,7 +2506,7 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
associated_clients_update(padapter, updated);
psta = NULL;
} else {
- DBG_88E("rtw_del_sta(), sta has already been removed or never been added\n");
+ DBG_88E("%s(), sta has already been removed or never been added\n", __func__);
}
return 0;
@@ -2518,7 +2516,7 @@ static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *par
{
int ret = 0;
struct sta_info *psta = NULL;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct ieee_param_ex *param_ex = (struct ieee_param_ex *)param;
@@ -2574,11 +2572,11 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
{
int ret = 0;
struct sta_info *psta = NULL;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- DBG_88E("rtw_get_sta_wpaie, sta_addr: %pM\n", (param->sta_addr));
+ DBG_88E("%s, sta_addr: %pM\n", __func__, (param->sta_addr));
if (!check_fwstate(pmlmepriv, _FW_LINKED | WIFI_AP_STATE))
return -EINVAL;
@@ -2610,7 +2608,7 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param, int len)
{
unsigned char wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
int ie_len;
@@ -2645,7 +2643,7 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *param, int len)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ie_len;
@@ -2674,7 +2672,7 @@ static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *par
static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *param, int len)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ie_len;
@@ -2704,7 +2702,7 @@ static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *par
static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param, int len)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -2728,7 +2726,7 @@ static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param,
static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
@@ -2742,7 +2740,7 @@ static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *p
static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
@@ -2756,7 +2754,7 @@ static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *para
static int rtw_ioctl_set_macaddr_acl(struct net_device *dev, struct ieee_param *param, int len)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
@@ -2771,12 +2769,12 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
{
struct ieee_param *param;
int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
/*
- * this function is expect to call in master mode, which allows no power saving
- * so, we just check hw_init_completed
- */
+ * this function is expect to call in master mode, which allows no power saving
+ * so, we just check hw_init_completed
+ */
if (!padapter->hw_init_completed)
return -EPERM;
@@ -2846,14 +2844,13 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
#include <rtw_android.h>
static int rtw_wx_set_priv(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *awrq,
- char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *awrq, char *extra)
{
int ret = 0;
int len = 0;
char *ext;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct iw_point *dwrq = (struct iw_point *)awrq;
if (dwrq->length == 0)
@@ -2877,7 +2874,7 @@ static int rtw_wx_set_priv(struct net_device *dev,
int probereq_wpsie_len = len;
u8 wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
- if ((_VENDOR_SPECIFIC_IE_ == probereq_wpsie[0]) &&
+ if ((probereq_wpsie[0] == _VENDOR_SPECIFIC_IE_) &&
(!memcmp(&probereq_wpsie[2], wps_oui, 4))) {
cp_sz = min(probereq_wpsie_len, MAX_WPS_IE_LEN);
@@ -2971,7 +2968,7 @@ static iw_handler rtw_handlers[] = {
static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct adapter *padapter = rtw_netdev_priv(dev);
struct iw_statistics *piwstats = &padapter->iwstats;
int tmp_level = 0;
int tmp_qual = 0;
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 8907bf6bb7ff..e291df87f620 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -187,7 +187,7 @@ static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct adapter *padapter = rtw_netdev_priv(pnetdev);
struct sockaddr *addr = p;
if (!padapter->bup)
@@ -198,7 +198,7 @@ static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
static struct net_device_stats *rtw_net_get_stats(struct net_device *pnetdev)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct adapter *padapter = rtw_netdev_priv(pnetdev);
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct recv_priv *precvpriv = &padapter->recvpriv;
@@ -335,7 +335,7 @@ static int rtw_start_drv_threads(struct adapter *padapter)
{
int err = 0;
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_start_drv_threads\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+%s\n", __func__));
padapter->cmdThread = kthread_run(rtw_cmd_thread, padapter,
"RTW_CMD_THREAD");
@@ -350,7 +350,7 @@ static int rtw_start_drv_threads(struct adapter *padapter)
void rtw_stop_drv_threads(struct adapter *padapter)
{
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+%s\n", __func__));
/* Below is to terminate rtw_cmd_thread & event_thread... */
complete(&padapter->cmdpriv.cmd_queue_comp);
@@ -433,7 +433,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
{
u8 ret8 = _SUCCESS;
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+%s\n", __func__));
if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) {
RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init cmd_priv\n"));
@@ -487,27 +487,27 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
rtw_hal_sreset_init(padapter);
exit:
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-%s\n", __func__));
return ret8;
}
void rtw_cancel_all_timer(struct adapter *padapter)
{
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_cancel_all_timer\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+%s\n", __func__));
del_timer_sync(&padapter->mlmepriv.assoc_timer);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel association timer complete!\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel association timer complete!\n", __func__));
del_timer_sync(&padapter->mlmepriv.scan_to_timer);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel scan_to_timer!\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel scan_to_timer!\n", __func__));
del_timer_sync(&padapter->mlmepriv.dynamic_chk_timer);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel dynamic_chk_timer!\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel dynamic_chk_timer!\n", __func__));
/* cancel sw led timer */
rtw_hal_sw_led_deinit(padapter);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel DeInitSwLeds!\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel DeInitSwLeds!\n", __func__));
del_timer_sync(&padapter->pwrctrlpriv.pwr_state_check_timer);
@@ -516,7 +516,7 @@ void rtw_cancel_all_timer(struct adapter *padapter)
u8 rtw_free_drv_sw(struct adapter *padapter)
{
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>rtw_free_drv_sw"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>%s", __func__));
free_mlme_ext_priv(&padapter->mlmeextpriv);
@@ -530,11 +530,11 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
rtw_hal_free_data(padapter);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<== rtw_free_drv_sw\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<== %s\n", __func__));
mutex_destroy(&padapter->hw_init_mutex);
- RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_free_drv_sw\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-%s\n", __func__));
return _SUCCESS;
}
@@ -543,7 +543,7 @@ static int _netdev_open(struct net_device *pnetdev)
{
uint status;
int err;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct adapter *padapter = rtw_netdev_priv(pnetdev);
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - dev_open\n"));
@@ -612,7 +612,7 @@ netdev_open_error:
int netdev_open(struct net_device *pnetdev)
{
int ret;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct adapter *padapter = rtw_netdev_priv(pnetdev);
if (mutex_lock_interruptible(&padapter->hw_init_mutex))
return -ERESTARTSYS;
@@ -633,7 +633,7 @@ int ips_netdrv_open(struct adapter *padapter)
status = rtw_hal_init(padapter);
if (status == _FAIL) {
- RT_TRACE(_module_os_intfs_c_, _drv_err_, ("ips_netdrv_open(): Can't init h/w!\n"));
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("%s(): Can't init h/w!\n", __func__));
goto netdev_open_error;
}
@@ -646,7 +646,7 @@ int ips_netdrv_open(struct adapter *padapter)
return _SUCCESS;
netdev_open_error:
- DBG_88E("-ips_netdrv_open - drv_open failure, bup =%d\n", padapter->bup);
+ DBG_88E("-%s - drv_open failure, bup =%d\n", __func__, padapter->bup);
return _FAIL;
}
@@ -656,14 +656,14 @@ int rtw_ips_pwr_up(struct adapter *padapter)
int result;
unsigned long start_time = jiffies;
- DBG_88E("===> rtw_ips_pwr_up..............\n");
+ DBG_88E("===> %s..............\n", __func__);
rtw_reset_drv_sw(padapter);
result = ips_netdrv_open(padapter);
led_control_8188eu(padapter, LED_CTL_NO_LINK);
- DBG_88E("<=== rtw_ips_pwr_up.............. in %dms\n",
+ DBG_88E("<=== %s.............. in %dms\n", __func__,
jiffies_to_msecs(jiffies - start_time));
return result;
}
@@ -672,14 +672,14 @@ void rtw_ips_pwr_down(struct adapter *padapter)
{
unsigned long start_time = jiffies;
- DBG_88E("===> rtw_ips_pwr_down...................\n");
+ DBG_88E("===> %s...................\n", __func__);
padapter->net_closed = true;
led_control_8188eu(padapter, LED_CTL_POWER_OFF);
rtw_ips_dev_unload(padapter);
- DBG_88E("<=== rtw_ips_pwr_down..................... in %dms\n",
+ DBG_88E("<=== %s..................... in %dms\n", __func__,
jiffies_to_msecs(jiffies - start_time));
}
@@ -698,7 +698,7 @@ void rtw_ips_dev_unload(struct adapter *padapter)
static int netdev_close(struct net_device *pnetdev)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct adapter *padapter = rtw_netdev_priv(pnetdev);
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - drv_close\n"));
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index bf86d03820ca..b5209627fd1a 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -68,7 +68,7 @@ int rtw_android_cmdstr_to_num(char *cmdstr)
for (cmd_num = 0; cmd_num < ANDROID_WIFI_CMD_MAX; cmd_num++)
if (!strncasecmp(cmdstr, android_wifi_cmd_str[cmd_num],
- strlen(android_wifi_cmd_str[cmd_num])))
+ strlen(android_wifi_cmd_str[cmd_num])))
break;
return cmd_num;
}
@@ -76,7 +76,7 @@ int rtw_android_cmdstr_to_num(char *cmdstr)
static int rtw_android_get_rssi(struct net_device *net, char *command,
int total_len)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(net);
+ struct adapter *padapter = rtw_netdev_priv(net);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *pcur_network = &pmlmepriv->cur_network;
int bytes_written = 0;
@@ -93,7 +93,7 @@ static int rtw_android_get_rssi(struct net_device *net, char *command,
static int rtw_android_get_link_speed(struct net_device *net, char *command,
int total_len)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(net);
+ struct adapter *padapter = rtw_netdev_priv(net);
u16 link_speed;
link_speed = rtw_get_cur_max_rate(padapter) / 10;
@@ -111,7 +111,7 @@ static int rtw_android_get_macaddr(struct net_device *net, char *command,
static int android_set_cntry(struct net_device *net, char *command,
int total_len)
{
- struct adapter *adapter = (struct adapter *)rtw_netdev_priv(net);
+ struct adapter *adapter = rtw_netdev_priv(net);
char *country_code = command + strlen(android_wifi_cmd_str[ANDROID_WIFI_CMD_COUNTRY]) + 1;
int ret;
@@ -120,7 +120,7 @@ static int android_set_cntry(struct net_device *net, char *command,
}
static int android_get_p2p_addr(struct net_device *net, char *command,
- int total_len)
+ int total_len)
{
/* We use the same address as our HW MAC address */
memcpy(command, net->dev_addr, ETH_ALEN);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index f7f09c0d273f..99bfc828672c 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -118,7 +118,7 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
if (dvobj) {
/* Modify condition for 92DU DMDP 2010.11.18, by Thomas */
if ((dvobj->NumInterfaces != 2 &&
- dvobj->NumInterfaces != 3) ||
+ dvobj->NumInterfaces != 3) ||
(dvobj->InterfaceNumber == 1)) {
if (interface_to_usbdev(usb_intf)->state !=
USB_STATE_NOTATTACHED) {
@@ -126,7 +126,8 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
* remove/insert module, driver fails
* on sitesurvey for the first time when
* device is up . Reset usb port for sitesurvey
- * fail issue. */
+ * fail issue.
+ */
pr_debug("usb attached..., try to reset usb device\n");
usb_reset_device(interface_to_usbdev(usb_intf));
}
@@ -141,7 +142,7 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
void usb_intf_stop(struct adapter *padapter)
{
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_stop\n"));
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+%s\n", __func__));
/* disable_hw_interrupt */
if (!padapter->bSurpriseRemoved) {
@@ -159,15 +160,15 @@ void usb_intf_stop(struct adapter *padapter)
/* todo:cancel other irps */
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_stop\n"));
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-%s\n", __func__));
}
static void rtw_dev_unload(struct adapter *padapter)
{
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_dev_unload\n"));
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+%s\n", __func__));
if (padapter->bup) {
- pr_debug("===> rtw_dev_unload\n");
+ pr_debug("===> %s\n", __func__);
padapter->bDriverStopped = true;
if (padapter->xmitpriv.ack_tx)
rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
@@ -189,9 +190,9 @@ static void rtw_dev_unload(struct adapter *padapter)
("r871x_dev_unload():padapter->bup == false\n"));
}
- pr_debug("<=== rtw_dev_unload\n");
+ pr_debug("<=== %s\n", __func__);
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-rtw_dev_unload\n"));
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-%s\n", __func__));
}
static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
@@ -208,8 +209,8 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
if ((!padapter->bup) || (padapter->bDriverStopped) ||
(padapter->bSurpriseRemoved)) {
pr_debug("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
- padapter->bup, padapter->bDriverStopped,
- padapter->bSurpriseRemoved);
+ padapter->bup, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved);
goto exit;
}
@@ -230,11 +231,11 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
check_fwstate(pmlmepriv, _FW_LINKED)) {
pr_debug("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n",
- __func__, __LINE__,
- pmlmepriv->cur_network.network.ssid.ssid,
- pmlmepriv->cur_network.network.MacAddress,
- pmlmepriv->cur_network.network.ssid.ssid_length,
- pmlmepriv->assoc_ssid.ssid_length);
+ __func__, __LINE__,
+ pmlmepriv->cur_network.network.ssid.ssid,
+ pmlmepriv->cur_network.network.MacAddress,
+ pmlmepriv->cur_network.network.ssid.ssid_length,
+ pmlmepriv->assoc_ssid.ssid_length);
pmlmepriv->to_roaming = 1;
}
@@ -299,7 +300,7 @@ exit:
if (pwrpriv)
pwrpriv->bInSuspend = false;
pr_debug("<=== %s return %d.............. in %dms\n", __func__,
- ret, jiffies_to_msecs(jiffies - start_time));
+ ret, jiffies_to_msecs(jiffies - start_time));
return ret;
}
@@ -321,7 +322,8 @@ static int rtw_resume(struct usb_interface *pusb_intf)
*/
static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
- struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
+ struct usb_interface *pusb_intf,
+ const struct usb_device_id *pdid)
{
struct adapter *padapter = NULL;
struct net_device *pnetdev = NULL;
@@ -379,12 +381,11 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
device_init_wakeup(&pusb_intf->dev, 1);
pr_debug("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~~~~\n");
pr_debug("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~[%d]~~~\n",
- device_may_wakeup(&pusb_intf->dev));
+ device_may_wakeup(&pusb_intf->dev));
}
#endif
- /* 2012-07-11 Move here to prevent the 8723AS-VAU BT auto
- * suspend influence */
+ /* 2012-07-11 Move here to prevent the 8723AS-VAU BT auto suspend influence */
if (usb_autopm_get_interface(pusb_intf) < 0)
pr_debug("can't get autopm:\n");
@@ -393,7 +394,7 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
rtw_macaddr_cfg(padapter->eeprompriv.mac_addr);
memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
pr_debug("MAC Address from pnetdev->dev_addr = %pM\n",
- pnetdev->dev_addr);
+ pnetdev->dev_addr);
/* step 6. Tell the network stack we exist */
if (register_netdev(pnetdev) != 0) {
@@ -445,7 +446,7 @@ static void rtw_usb_if1_deinit(struct adapter *if1)
rtw_dev_unload(if1);
pr_debug("+r871xu_dev_remove, hw_init_completed=%d\n",
- if1->hw_init_completed);
+ if1->hw_init_completed);
rtw_free_drv_sw(if1);
rtw_free_netdev(pnetdev);
}
@@ -479,14 +480,15 @@ exit:
/*
* dev_remove() - our device is being removed
-*/
-/* rmmod module & unplug(SurpriseRemoved) will call r871xu_dev_remove() => how to recognize both */
+ *
+ * rmmod module & unplug(SurpriseRemoved) will call r871xu_dev_remove() => how to recognize both
+ */
static void rtw_dev_remove(struct usb_interface *pusb_intf)
{
struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
struct adapter *padapter = dvobj->if1;
- pr_debug("+rtw_dev_remove\n");
+ pr_debug("+%s\n", __func__);
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+dev_remove()\n"));
if (!pusb_intf->unregistering)
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index a80c7f3b86d1..6926443bba4e 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -773,10 +773,10 @@ void usb_write_port_cancel(struct adapter *padapter)
}
}
-void rtl8188eu_recv_tasklet(unsigned long priv)
+void rtl8188eu_recv_tasklet(struct tasklet_struct *t)
{
struct sk_buff *pskb;
- struct adapter *adapt = (struct adapter *)priv;
+ struct adapter *adapt = from_tasklet(adapt, t, recvpriv.recv_tasklet);
struct recv_priv *precvpriv = &adapt->recvpriv;
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
@@ -792,9 +792,9 @@ void rtl8188eu_recv_tasklet(unsigned long priv)
}
}
-void rtl8188eu_xmit_tasklet(unsigned long priv)
+void rtl8188eu_xmit_tasklet(struct tasklet_struct *t)
{
- struct adapter *adapt = (struct adapter *)priv;
+ struct adapter *adapt = from_tasklet(adapt, t, xmitpriv.xmit_tasklet);
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index a73313cf6a75..c22ddeb9a56b 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -164,7 +164,7 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct adapter *padapter = rtw_netdev_priv(pnetdev);
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
s32 res = 0;
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 1007eea6c8fc..03fcc23516fd 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -14,6 +14,7 @@ if RTLLIB
config RTLLIB_CRYPTO_CCMP
tristate "Support for rtllib CCMP crypto"
depends on RTLLIB
+ select CRYPTO
select CRYPTO_AES
select CRYPTO_CCM
default y
@@ -25,7 +26,7 @@ config RTLLIB_CRYPTO_CCMP
config RTLLIB_CRYPTO_TKIP
tristate "Support for rtllib TKIP crypto"
depends on RTLLIB
- select CRYPTO_ARC4
+ select CRYPTO_LIB_ARC4
select CRYPTO_MICHAEL_MIC
default y
help
@@ -35,7 +36,7 @@ config RTLLIB_CRYPTO_TKIP
config RTLLIB_CRYPTO_WEP
tristate "Support for rtllib WEP crypto"
- select CRYPTO_ARC4
+ select CRYPTO_LIB_ARC4
depends on RTLLIB
default y
help
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index fac58eebf263..663675efcfe4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -82,8 +82,8 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void _rtl92e_tx_cmd(struct net_device *dev, struct sk_buff *skb);
static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb);
static short _rtl92e_pci_initdescring(struct net_device *dev);
-static void _rtl92e_irq_tx_tasklet(unsigned long data);
-static void _rtl92e_irq_rx_tasklet(unsigned long data);
+static void _rtl92e_irq_tx_tasklet(struct tasklet_struct *t);
+static void _rtl92e_irq_rx_tasklet(struct tasklet_struct *t);
static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv);
static int _rtl92e_up(struct net_device *dev, bool is_silent_reset);
static int _rtl92e_try_up(struct net_device *dev);
@@ -517,9 +517,10 @@ static int _rtl92e_handle_assoc_response(struct net_device *dev,
return 0;
}
-static void _rtl92e_prepare_beacon(unsigned long data)
+static void _rtl92e_prepare_beacon(struct tasklet_struct *t)
{
- struct r8192_priv *priv = (struct r8192_priv *)data;
+ struct r8192_priv *priv = from_tasklet(priv, t,
+ irq_prepare_beacon_tasklet);
struct net_device *dev = priv->rtllib->dev;
struct sk_buff *pskb = NULL, *pnewskb = NULL;
struct cb_desc *tcb_desc = NULL;
@@ -1009,12 +1010,10 @@ static void _rtl92e_init_priv_task(struct net_device *dev)
(void *)rtl92e_hw_wakeup_wq, dev);
INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_sleep_wq,
(void *)rtl92e_hw_sleep_wq, dev);
- tasklet_init(&priv->irq_rx_tasklet, _rtl92e_irq_rx_tasklet,
- (unsigned long)priv);
- tasklet_init(&priv->irq_tx_tasklet, _rtl92e_irq_tx_tasklet,
- (unsigned long)priv);
- tasklet_init(&priv->irq_prepare_beacon_tasklet, _rtl92e_prepare_beacon,
- (unsigned long)priv);
+ tasklet_setup(&priv->irq_rx_tasklet, _rtl92e_irq_rx_tasklet);
+ tasklet_setup(&priv->irq_tx_tasklet, _rtl92e_irq_tx_tasklet);
+ tasklet_setup(&priv->irq_prepare_beacon_tasklet,
+ _rtl92e_prepare_beacon);
}
static short _rtl92e_get_channel_map(struct net_device *dev)
@@ -2109,16 +2108,16 @@ static void _rtl92e_tx_resume(struct net_device *dev)
}
}
-static void _rtl92e_irq_tx_tasklet(unsigned long data)
+static void _rtl92e_irq_tx_tasklet(struct tasklet_struct *t)
{
- struct r8192_priv *priv = (struct r8192_priv *)data;
+ struct r8192_priv *priv = from_tasklet(priv, t, irq_tx_tasklet);
_rtl92e_tx_resume(priv->rtllib->dev);
}
-static void _rtl92e_irq_rx_tasklet(unsigned long data)
+static void _rtl92e_irq_rx_tasklet(struct tasklet_struct *t)
{
- struct r8192_priv *priv = (struct r8192_priv *)data;
+ struct r8192_priv *priv = from_tasklet(priv, t, irq_rx_tasklet);
_rtl92e_rx_normal(priv->rtllib->dev);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 8d2a58e706d5..238387d6221b 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -5,8 +5,9 @@
* Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
*/
+#include <crypto/arc4.h>
#include <crypto/hash.h>
-#include <crypto/skcipher.h>
+#include <linux/fips.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -16,7 +17,6 @@
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/string.h>
-#include <linux/scatterlist.h>
#include <linux/crc32.h>
#include <linux/etherdevice.h>
@@ -45,9 +45,9 @@ struct rtllib_tkip_data {
u32 dot11RSNAStatsTKIPLocalMICFailures;
int key_idx;
- struct crypto_sync_skcipher *rx_tfm_arc4;
+ struct arc4_ctx rx_ctx_arc4;
+ struct arc4_ctx tx_ctx_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_sync_skcipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16];
@@ -58,16 +58,13 @@ static void *rtllib_tkip_init(int key_idx)
{
struct rtllib_tkip_data *priv;
+ if (fips_enabled)
+ return NULL;
+
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
- if (IS_ERR(priv->tx_tfm_arc4)) {
- pr_debug("Could not allocate crypto API arc4\n");
- priv->tx_tfm_arc4 = NULL;
- goto fail;
- }
priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->tx_tfm_michael)) {
@@ -76,13 +73,6 @@ static void *rtllib_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
- if (IS_ERR(priv->rx_tfm_arc4)) {
- pr_debug("Could not allocate crypto API arc4\n");
- priv->rx_tfm_arc4 = NULL;
- goto fail;
- }
-
priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->rx_tfm_michael)) {
pr_debug("Could not allocate crypto API michael_mic\n");
@@ -94,9 +84,7 @@ static void *rtllib_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_sync_skcipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_sync_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -110,11 +98,9 @@ static void rtllib_tkip_deinit(void *priv)
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
}
- kfree(priv);
+ kfree_sensitive(priv);
}
@@ -289,7 +275,6 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
int ret = 0;
u8 rc4key[16], *icv;
u32 crc;
- struct scatterlist sg;
if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
skb->len < hdr_len)
@@ -331,8 +316,6 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
-
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
icv[0] = crc;
@@ -340,15 +323,8 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- sg_init_one(&sg, pos, len+4);
-
-
- crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
- ret = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
+ arc4_setkey(&tkey->tx_ctx_arc4, rc4key, 16);
+ arc4_crypt(&tkey->tx_ctx_arc4, pos, pos, len + 4);
}
tkey->tx_iv16++;
@@ -376,9 +352,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 rc4key[16];
u8 icv[4];
u32 crc;
- struct scatterlist sg;
int plen;
- int err;
if (skb->len < hdr_len + 8 + 4)
return -1;
@@ -414,8 +388,6 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
-
if ((iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) &&
tkey->initialized) {
@@ -439,22 +411,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
- sg_init_one(&sg, pos, plen+4);
-
- crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
- err = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- if (err) {
- if (net_ratelimit()) {
- netdev_dbg(skb->dev,
- "Failed to decrypt received packet from %pM\n",
- hdr->addr2);
- }
- return -7;
- }
+ arc4_setkey(&tkey->rx_ctx_arc4, rc4key, 16);
+ arc4_crypt(&tkey->rx_ctx_arc4, pos, pos, plen + 4);
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
@@ -657,17 +615,13 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
struct rtllib_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
tkey->key_idx = keyidx;
tkey->tx_tfm_michael = tfm;
- tkey->tx_tfm_arc4 = tfm2;
tkey->rx_tfm_michael = tfm3;
- tkey->rx_tfm_arc4 = tfm4;
if (len == TKIP_KEY_LEN) {
memcpy(tkey->key, key, TKIP_KEY_LEN);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index b1ea650036d2..7790271a6a40 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -5,7 +5,8 @@
* Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
*/
-#include <crypto/skcipher.h>
+#include <crypto/arc4.h>
+#include <linux/fips.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -14,7 +15,6 @@
#include <linux/string.h>
#include "rtllib.h"
-#include <linux/scatterlist.h>
#include <linux/crc32.h>
struct prism2_wep_data {
@@ -23,8 +23,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_sync_skcipher *tx_tfm;
- struct crypto_sync_skcipher *rx_tfm;
+ struct arc4_ctx rx_ctx_arc4;
+ struct arc4_ctx tx_ctx_arc4;
};
@@ -32,48 +32,24 @@ static void *prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
+ if (fips_enabled)
+ return NULL;
+
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
- goto fail;
+ return NULL;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
- if (IS_ERR(priv->tx_tfm)) {
- pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
- priv->tx_tfm = NULL;
- goto fail;
- }
- priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
- if (IS_ERR(priv->rx_tfm)) {
- pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
- priv->rx_tfm = NULL;
- goto fail;
- }
-
/* start WEP IV from a random value */
get_random_bytes(&priv->iv, 4);
return priv;
-
-fail:
- if (priv) {
- crypto_free_sync_skcipher(priv->tx_tfm);
- crypto_free_sync_skcipher(priv->rx_tfm);
- kfree(priv);
- }
- return NULL;
}
static void prism2_wep_deinit(void *priv)
{
- struct prism2_wep_data *_priv = priv;
-
- if (_priv) {
- crypto_free_sync_skcipher(_priv->tx_tfm);
- crypto_free_sync_skcipher(_priv->rx_tfm);
- }
- kfree(priv);
+ kfree_sensitive(priv);
}
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
@@ -92,8 +68,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
MAX_DEV_ADDR_SIZE);
u32 crc;
u8 *icv;
- struct scatterlist sg;
- int err;
if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
skb->len < hdr_len){
@@ -131,8 +105,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
-
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
icv = skb_put(skb, 4);
@@ -141,14 +113,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- sg_init_one(&sg, pos, len+4);
- crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
- skcipher_request_set_sync_tfm(req, wep->tx_tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
- err = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- return err;
+ arc4_setkey(&wep->tx_ctx_arc4, key, klen);
+ arc4_crypt(&wep->tx_ctx_arc4, pos, pos, len + 4);
}
return 0;
@@ -172,8 +138,6 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
MAX_DEV_ADDR_SIZE);
u32 crc;
u8 icv[4];
- struct scatterlist sg;
- int err;
if (skb->len < hdr_len + 8)
return -1;
@@ -195,17 +159,9 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
-
- sg_init_one(&sg, pos, plen+4);
- crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
- skcipher_request_set_sync_tfm(req, wep->rx_tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
- err = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- if (err)
- return -7;
+ arc4_setkey(&wep->rx_ctx_arc4, key, klen);
+ arc4_crypt(&wep->rx_ctx_arc4, pos, pos, plen + 4);
+
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
icv[1] = crc >> 8;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 6e2f620afd14..2c752ba5a802 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -2044,9 +2044,9 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
}
-static inline void rtllib_sta_ps(unsigned long data)
+static inline void rtllib_sta_ps(struct tasklet_struct *t)
{
- struct rtllib_device *ieee = (struct rtllib_device *)data;
+ struct rtllib_device *ieee = from_tasklet(ieee, t, ps_task);
u64 time;
short sleep;
unsigned long flags, flags2;
@@ -3028,7 +3028,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_init(&ieee->ps_task, rtllib_sta_ps, (unsigned long)ieee);
+ tasklet_setup(&ieee->ps_task, rtllib_sta_ps);
}
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 79d7ad7c0a4a..e0d79daca24a 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -859,7 +859,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
else
- ieee->seq_ctrl[0]++;
+ ieee->seq_ctrl[0]++;
}
} else {
if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig
index 1edca5c304fb..ef883d462d3d 100644
--- a/drivers/staging/rtl8192u/Kconfig
+++ b/drivers/staging/rtl8192u/Kconfig
@@ -8,3 +8,4 @@ config RTL8192U
select CRYPTO
select CRYPTO_AES
select CRYPTO_CCM
+ select CRYPTO_LIB_ARC4
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index ffe624ed0c0c..e8fa1d385f24 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -5,6 +5,7 @@
* Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
*/
+#include <linux/fips.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -17,9 +18,8 @@
#include "ieee80211.h"
+#include <crypto/arc4.h>
#include <crypto/hash.h>
-#include <crypto/skcipher.h>
- #include <linux/scatterlist.h>
#include <linux/crc32.h>
MODULE_AUTHOR("Jouni Malinen");
@@ -49,9 +49,9 @@ struct ieee80211_tkip_data {
int key_idx;
- struct crypto_sync_skcipher *rx_tfm_arc4;
+ struct arc4_ctx rx_ctx_arc4;
+ struct arc4_ctx tx_ctx_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_sync_skcipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
@@ -62,19 +62,14 @@ static void *ieee80211_tkip_init(int key_idx)
{
struct ieee80211_tkip_data *priv;
+ if (fips_enabled)
+ return NULL;
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
- if (IS_ERR(priv->tx_tfm_arc4)) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API arc4\n");
- priv->tx_tfm_arc4 = NULL;
- goto fail;
- }
-
priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->tx_tfm_michael)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -83,14 +78,6 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
- if (IS_ERR(priv->rx_tfm_arc4)) {
- printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
- "crypto API arc4\n");
- priv->rx_tfm_arc4 = NULL;
- goto fail;
- }
-
priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->rx_tfm_michael)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -104,9 +91,7 @@ static void *ieee80211_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_sync_skcipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_sync_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -120,11 +105,9 @@ static void ieee80211_tkip_deinit(void *priv)
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
}
- kfree(priv);
+ kfree_sensitive(priv);
}
@@ -290,10 +273,8 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 *pos;
struct rtl_80211_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- int ret = 0;
u8 rc4key[16], *icv;
u32 crc;
- struct scatterlist sg;
if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
skb->len < hdr_len)
@@ -334,21 +315,15 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
-
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, len + 4);
- skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
- ret = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
+
+ arc4_setkey(&tkey->tx_ctx_arc4, rc4key, 16);
+ arc4_crypt(&tkey->tx_ctx_arc4, pos, pos, len + 4);
}
tkey->tx_iv16++;
@@ -357,12 +332,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
tkey->tx_iv32++;
}
- if (!tcb_desc->bHwSec)
- return ret;
- else
- return 0;
-
-
+ return 0;
}
static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
@@ -376,9 +346,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 rc4key[16];
u8 icv[4];
u32 crc;
- struct scatterlist sg;
int plen;
- int err;
if (skb->len < hdr_len + 8 + 4)
return -1;
@@ -412,8 +380,6 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
-
if (iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
if (net_ratelimit()) {
@@ -434,23 +400,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
- crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- sg_init_one(&sg, pos, plen + 4);
-
- skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
-
- err = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- if (err) {
- if (net_ratelimit()) {
- netdev_dbg(skb->dev, "TKIP: failed to decrypt "
- "received packet from %pM\n",
- hdr->addr2);
- }
- return -7;
- }
+ arc4_setkey(&tkey->rx_ctx_arc4, rc4key, 16);
+ arc4_crypt(&tkey->rx_ctx_arc4, pos, pos, plen + 4);
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
@@ -655,17 +606,13 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
struct ieee80211_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
tkey->key_idx = keyidx;
tkey->tx_tfm_michael = tfm;
- tkey->tx_tfm_arc4 = tfm2;
tkey->rx_tfm_michael = tfm3;
- tkey->rx_tfm_arc4 = tfm4;
if (len == TKIP_KEY_LEN) {
memcpy(tkey->key, key, TKIP_KEY_LEN);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 26482c3dcd1c..a41b6510481b 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -5,6 +5,7 @@
* Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
*/
+#include <linux/fips.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -14,8 +15,7 @@
#include "ieee80211.h"
-#include <crypto/skcipher.h>
-#include <linux/scatterlist.h>
+#include <crypto/arc4.h>
#include <linux/crc32.h>
MODULE_AUTHOR("Jouni Malinen");
@@ -28,8 +28,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_sync_skcipher *tx_tfm;
- struct crypto_sync_skcipher *rx_tfm;
+ struct arc4_ctx rx_ctx_arc4;
+ struct arc4_ctx tx_ctx_arc4;
};
@@ -37,39 +37,24 @@ static void *prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
+ if (fips_enabled)
+ return NULL;
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
- if (IS_ERR(priv->tx_tfm))
- goto free_priv;
- priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
- if (IS_ERR(priv->rx_tfm))
- goto free_tx;
-
/* start WEP IV from a random value */
get_random_bytes(&priv->iv, 4);
return priv;
-free_tx:
- crypto_free_sync_skcipher(priv->tx_tfm);
-free_priv:
- kfree(priv);
- return NULL;
}
static void prism2_wep_deinit(void *priv)
{
- struct prism2_wep_data *_priv = priv;
-
- if (_priv) {
- crypto_free_sync_skcipher(_priv->tx_tfm);
- crypto_free_sync_skcipher(_priv->rx_tfm);
- }
- kfree(priv);
+ kfree_sensitive(priv);
}
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
@@ -87,8 +72,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u32 crc;
u8 *icv;
- struct scatterlist sg;
- int err;
if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
skb->len < hdr_len)
@@ -124,8 +107,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
-
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
icv = skb_put(skb, 4);
@@ -134,16 +115,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
- sg_init_one(&sg, pos, len + 4);
-
- skcipher_request_set_sync_tfm(req, wep->tx_tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
-
- err = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
- return err;
+ arc4_setkey(&wep->tx_ctx_arc4, key, klen);
+ arc4_crypt(&wep->tx_ctx_arc4, pos, pos, len + 4);
}
return 0;
@@ -166,8 +139,6 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u32 crc;
u8 icv[4];
- struct scatterlist sg;
- int err;
if (skb->len < hdr_len + 8)
return -1;
@@ -189,19 +160,8 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
-
- crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
- sg_init_one(&sg, pos, plen + 4);
-
- skcipher_request_set_sync_tfm(req, wep->rx_tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
-
- err = crypto_skcipher_decrypt(req);
- skcipher_request_zero(req);
- if (err)
- return -7;
+ arc4_setkey(&wep->rx_ctx_arc4, key, klen);
+ arc4_crypt(&wep->rx_ctx_arc4, pos, pos, plen + 4);
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 195d963c4fbb..b6fee7230ce0 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -597,7 +597,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
sizeof(struct ieee80211_rxb *),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!prxbIndicateArray)
return;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index d8eb907ff301..690b664df8fa 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1687,9 +1687,9 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
return 1;
}
-static inline void ieee80211_sta_ps(unsigned long data)
+static inline void ieee80211_sta_ps(struct tasklet_struct *t)
{
- struct ieee80211_device *ieee = (struct ieee80211_device *)data;
+ struct ieee80211_device *ieee = from_tasklet(ieee, t, ps_task);
u32 th, tl;
short sleep;
@@ -2598,7 +2598,7 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_init(&ieee->ps_task, ieee80211_sta_ps, (unsigned long)ieee);
+ tasklet_setup(&ieee->ps_task, ieee80211_sta_ps);
}
void ieee80211_softmac_free(struct ieee80211_device *ieee)
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 6ec65187bef9..27dc181c4c9b 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -2193,7 +2193,7 @@ static void rtl8192_init_priv_lock(struct r8192_priv *priv)
static void rtl819x_watchdog_wqcallback(struct work_struct *work);
-static void rtl8192_irq_rx_tasklet(unsigned long data);
+static void rtl8192_irq_rx_tasklet(struct tasklet_struct *t);
/* init tasklet and wait_queue here. only 2.6 above kernel is considered */
#define DRV_NAME "wlan0"
static void rtl8192_init_priv_task(struct net_device *dev)
@@ -2214,8 +2214,7 @@ static void rtl8192_init_priv_task(struct net_device *dev)
InitialGainOperateWorkItemCallBack);
INIT_WORK(&priv->qos_activate, rtl8192_qos_activate);
- tasklet_init(&priv->irq_rx_tasklet, rtl8192_irq_rx_tasklet,
- (unsigned long)priv);
+ tasklet_setup(&priv->irq_rx_tasklet, rtl8192_irq_rx_tasklet);
}
static void rtl8192_get_eeprom_size(struct net_device *dev)
@@ -4647,9 +4646,9 @@ static void rtl8192_rx_cmd(struct sk_buff *skb)
}
}
-static void rtl8192_irq_rx_tasklet(unsigned long data)
+static void rtl8192_irq_rx_tasklet(struct tasklet_struct *t)
{
- struct r8192_priv *priv = (struct r8192_priv *)data;
+ struct r8192_priv *priv = from_tasklet(priv, t, irq_rx_tasklet);
struct sk_buff *skb;
struct rtl8192_rx_info *info;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 6b301acb584e..bac402b40121 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -26,6 +26,7 @@ Major Change History:
static u32 edca_setting_DL[HT_IOT_PEER_MAX] = {
0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0x00a44f, 0x5ea44f
};
+
static u32 edca_setting_UL[HT_IOT_PEER_MAX] = {
0x5e4322, 0x00a44f, 0x5e4322, 0x604322, 0x5ea44f, 0x5ea44f
};
@@ -599,7 +600,6 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
priv->rfa_txpowertrackingindex++;
priv->rfa_txpowertrackingindex_real++;
rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value);
-
}
}
priv->cck_present_attenuation_difference
@@ -1268,7 +1268,6 @@ static void dm_InitializeTXPowerTracking_TSSI(struct net_device *dev)
priv->btxpower_tracking = true;
priv->txpower_count = 0;
priv->btxpower_trackingInit = false;
-
}
static void dm_InitializeTXPowerTracking_ThermalMeter(struct net_device *dev)
@@ -1773,7 +1772,6 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
/* 1.5 Higher EDCCA. */
/*PlatformEFIOWrite4Byte(pAdapter, rOFDM0_ECCAThreshold, 0x325);*/
return;
-
}
/* 2. When RSSI increase, We have to judge if it is larger than a threshold
@@ -1836,7 +1834,6 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
/* 2.5 DIG On. */
rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); /* Only clear byte 1 and rewrite. */
-
}
dm_ctrl_initgain_byrssi_highpwr(dev);
@@ -2157,7 +2154,6 @@ static void dm_check_edca_turbo(
write_nic_dword(dev, EDCAPARA_BE, edca_setting_UL[pHTInfo->IOTPeer]);
priv->bis_cur_rdlstate = false;
}
-
}
priv->bcurrent_turbo_EDCA = true;
@@ -2191,7 +2187,6 @@ static void dm_check_edca_turbo(
write_nic_dword(dev, EDCAPARA_BE, u4bAcParam);
-
/* Check ACM bit.
* If it is set, immediately set ACM control bit to downgrading AC for passing WMM testplan. Annie, 2005-12-13.
*/
@@ -2296,7 +2291,6 @@ static void dm_check_pbc_gpio(struct net_device *dev)
RT_TRACE(COMP_IO, "CheckPbcGPIO - PBC is pressed\n");
priv->bpbc_pressed = true;
}
-
}
/*-----------------------------------------------------------------------------
@@ -2495,7 +2489,6 @@ static void dm_rxpath_sel_byrssi(struct net_device *dev)
cck_rx_ver2_min_index = i;
}
}
-
}
}
}
@@ -2715,7 +2708,6 @@ static void dm_EndSWFsync(struct net_device *dev)
priv->ContinueDiffCount = 0;
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
-
}
static void dm_StartSWFsync(struct net_device *dev)
@@ -2751,7 +2743,6 @@ static void dm_StartSWFsync(struct net_device *dev)
add_timer(&priv->fsync_timer);
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
-
}
static void dm_EndHWFsync(struct net_device *dev)
@@ -2759,7 +2750,6 @@ static void dm_EndHWFsync(struct net_device *dev)
RT_TRACE(COMP_HALDM, "%s\n", __func__);
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
write_nic_byte(dev, 0xc3b, 0x49);
-
}
void dm_check_fsync(struct net_device *dev)
diff --git a/drivers/staging/rtl8192u/r8192U_hw.h b/drivers/staging/rtl8192u/r8192U_hw.h
index 95a2d2ee3c65..8d3a592f1c35 100644
--- a/drivers/staging/rtl8192u/r8192U_hw.h
+++ b/drivers/staging/rtl8192u/r8192U_hw.h
@@ -239,6 +239,7 @@ enum _RTL8192Usb_HW {
#define EPROM_W_BIT BIT(1)
#define EPROM_R_BIT BIT(0)
};
+
//----------------------------------------------------------------------------
// 818xB AnaParm & AnaParm2 Register
//----------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 100532598781..d853586705fc 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -138,7 +138,6 @@ static int r8192_wx_force_reset(struct net_device *dev,
priv->force_reset = *extra;
mutex_unlock(&priv->wx_mutex);
return 0;
-
}
static int r8192_wx_set_rawtx(struct net_device *dev,
@@ -155,7 +154,6 @@ static int r8192_wx_set_rawtx(struct net_device *dev,
mutex_unlock(&priv->wx_mutex);
return ret;
-
}
static int r8192_wx_set_crcmon(struct net_device *dev,
@@ -218,6 +216,7 @@ struct iw_range_with_scan_capa {
/* Scan capabilities */
__u8 scan_capa;
};
+
static int rtl8180_wx_get_range(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -251,7 +250,7 @@ static int rtl8180_wx_get_range(struct net_device *dev,
/* range->old_num_channels; */
/* range->old_num_frequency; */
/* range->old_freq[6]; */ /* Filler to keep "version" at the same offset */
- if (priv->rf_set_sens != NULL)
+ if (priv->rf_set_sens)
range->sensitivity = priv->max_sens; /* signal level threshold range */
range->max_qual.qual = 100;
@@ -294,7 +293,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
/* range->max_r_time; */ /* Maximal retry lifetime */
for (i = 0, val = 0; i < 14; i++) {
-
/* Include only legal frequencies for some countries */
if ((GET_DOT11D_INFO(priv->ieee80211)->channel_map)[i+1]) {
range->freq[val].i = i + 1;
@@ -350,11 +348,9 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
return ret;
}
-
static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
-
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -444,7 +440,6 @@ static int r8192_wx_set_frag(struct net_device *dev,
return 0;
}
-
static int r8192_wx_get_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -458,13 +453,11 @@ static int r8192_wx_get_frag(struct net_device *dev,
return 0;
}
-
static int r8192_wx_set_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *awrq,
char *extra)
{
-
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
/* struct sockaddr *temp = (struct sockaddr *)awrq; */
@@ -475,7 +468,6 @@ static int r8192_wx_set_wap(struct net_device *dev,
mutex_unlock(&priv->wx_mutex);
return ret;
-
}
static int r8192_wx_get_wap(struct net_device *dev,
@@ -522,11 +514,8 @@ static int r8192_wx_set_enc(struct net_device *dev,
mutex_unlock(&priv->wx_mutex);
-
-
/* sometimes, the length is zero while we do not type key value */
if (wrqu->encoding.length != 0) {
-
for (i = 0; i < 4; i++) {
hwkey[i] |= key[4*i+0]&mask;
if (i == 1 && (4*i+1) == wrqu->encoding.length)
@@ -572,10 +561,7 @@ static int r8192_wx_set_enc(struct net_device *dev,
zero_addr[key_idx],
0, /* DefaultKey */
hwkey); /* KeyContent */
-
- }
-
- else if (wrqu->encoding.length == 0xd) {
+ } else if (wrqu->encoding.length == 0xd) {
ieee->pairwise_key_type = KEY_TYPE_WEP104;
EnableHWSecurityConfig8192(dev);
@@ -586,21 +572,17 @@ static int r8192_wx_set_enc(struct net_device *dev,
zero_addr[key_idx],
0, /* DefaultKey */
hwkey); /* KeyContent */
-
} else {
netdev_warn(dev, "wrong type in WEP, not WEP40 and WEP104\n");
}
-
}
return ret;
}
-
static int r8192_wx_set_scan_type(struct net_device *dev, struct iw_request_info *aa,
union iwreq_data *wrqu, char *p)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
int *parms = (int *)p;
int mode = parms[0];
@@ -610,8 +592,6 @@ static int r8192_wx_set_scan_type(struct net_device *dev, struct iw_request_info
return 1;
}
-
-
static int r8192_wx_set_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -663,7 +643,6 @@ static int r8192_wx_get_retry(struct net_device *dev,
{
struct r8192_priv *priv = ieee80211_priv(dev);
-
wrqu->retry.disabled = 0; /* can't be disabled */
if ((wrqu->retry.flags & IW_RETRY_TYPE) ==
@@ -687,7 +666,7 @@ static int r8192_wx_get_sens(struct net_device *dev,
{
struct r8192_priv *priv = ieee80211_priv(dev);
- if (priv->rf_set_sens == NULL)
+ if (!priv->rf_set_sens)
return -1; /* we have not this support for this radio */
wrqu->sens.value = priv->sens;
return 0;
@@ -697,12 +676,11 @@ static int r8192_wx_set_sens(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
short err = 0;
mutex_lock(&priv->wx_mutex);
- if (priv->rf_set_sens == NULL) {
+ if (!priv->rf_set_sens) {
err = -1; /* we have not this support for this radio */
goto exit;
}
@@ -726,7 +704,6 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
-
mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_encode_ext(priv->ieee80211, info, wrqu, extra);
@@ -758,7 +735,6 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
memcpy((u8 *)key, ext->key, 16); /* we only get 16 bytes key.why? WB 2008.7.1 */
if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode != 2)) {
-
setKey(dev,
idx, /* EntryNao */
idx, /* KeyIndex */
@@ -784,16 +760,14 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
0, /* DefaultKey */
key); /* KeyContent */
}
-
-
}
end_hw_sec:
mutex_unlock(&priv->wx_mutex);
return ret;
-
}
+
static int r8192_wx_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
@@ -811,7 +785,6 @@ static int r8192_wx_set_mlme(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
-
int ret = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -833,8 +806,6 @@ static int r8192_wx_set_gen_ie(struct net_device *dev,
ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, data->data.length);
mutex_unlock(&priv->wx_mutex);
return ret;
-
-
}
static int dummy(struct net_device *dev, struct iw_request_info *a,
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index bc98cdaf61ec..4cece40a92f6 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -336,7 +336,6 @@ static void cmpk_count_tx_status(struct net_device *dev,
priv->stats.txretrycount += pstx_status->txretry;
priv->stats.txfeedbackretry += pstx_status->txretry;
-
priv->stats.txmulticast += pstx_status->txmcok;
priv->stats.txbroadcast += pstx_status->txbcok;
priv->stats.txunicast += pstx_status->txucok;
@@ -431,7 +430,7 @@ static void cmpk_handle_tx_rate_history(struct net_device *dev, u8 *pmsg)
ptxrate = (cmpk_tx_rahis_t *)pmsg;
- if (ptxrate == NULL)
+ if (!ptxrate)
return;
for (i = 0; i < 16; i++) {
@@ -480,7 +479,7 @@ u32 cmpk_message_handle_rx(struct net_device *dev,
/* 0. Check inpt arguments. It is a command queue message or
* pointer is null.
*/
- if (pstats == NULL)
+ if (!pstats)
return 0; /* This is not a command packet. */
/* 1. Read received command packet message length from RFD. */
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index dd81d210bd49..4f8629e47e82 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -54,11 +54,9 @@ static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
if ((buffer_len - frag_offset) > frag_threshold) {
frag_length = frag_threshold;
bLastIniPkt = 0;
-
} else {
frag_length = buffer_len - frag_offset;
bLastIniPkt = 1;
-
}
/* Allocate skb buffer to contain firmware info and tx descriptor info
@@ -104,7 +102,6 @@ static bool fw_download_code(struct net_device *dev, u8 *code_virtual_address,
} while (frag_offset < buffer_len);
return rt_status;
-
}
/*
@@ -172,7 +169,6 @@ CPUCheckMainCodeOKAndTurnOnCPU_Fail:
static bool CPUcheck_firmware_ready(struct net_device *dev)
{
-
bool rt_status = true;
int check_time = 200000;
u32 CPU_status = 0;
@@ -197,7 +193,6 @@ CPUCheckFirmwareReady_Fail:
RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__);
rt_status = false;
return rt_status;
-
}
bool init_firmware(struct net_device *dev)
@@ -338,7 +333,6 @@ download_firmware_fail:
RT_TRACE(COMP_ERR, "ERR in %s()\n", __func__);
rt_status = false;
return rt_status;
-
}
MODULE_FIRMWARE("RTL8192U/boot.img");
diff --git a/drivers/staging/rtl8192u/r819xU_firmware_img.h b/drivers/staging/rtl8192u/r819xU_firmware_img.h
index 355da9157be1..61585a72465e 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware_img.h
+++ b/drivers/staging/rtl8192u/r819xU_firmware_img.h
@@ -13,7 +13,6 @@
#define RadioD_ArrayLength 1
#define PHY_REGArrayLength 1
-
extern u32 Rtl8192UsbPHY_REGArray[];
extern u32 Rtl8192UsbPHY_REG_1T2RArray[];
extern u32 Rtl8192UsbRadioA_Array[];
@@ -24,6 +23,4 @@ extern u32 Rtl8192UsbMACPHY_Array[];
extern u32 Rtl8192UsbMACPHY_Array_PG[];
extern u32 Rtl8192UsbAGCTAB_Array[];
-
-
#endif
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index 37b99cf4b35f..eef751d2b12e 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -67,7 +67,6 @@ u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 e_rfpath)
void rtl8192_setBBreg(struct net_device *dev, u32 reg_addr, u32 bitmask,
u32 data)
{
-
u32 reg, bitshift;
if (bitmask != bMaskDWord) {
@@ -169,14 +168,12 @@ static u32 rtl8192_phy_RFSerialRead(struct net_device *dev,
rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0);
rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1);
-
/* TODO: we should not delay such a long time. Ask for help from SD3 */
usleep_range(1000, 1000);
ret = rtl8192_QueryBBReg(dev, pPhyReg->rfLSSIReadBack,
bLSSIReadBackData);
-
/* Switch back to Reg_Mode0 */
if (priv->rf_chip == RF_8256) {
priv->RfReg0Value[e_rfpath] &= 0xebf;
@@ -219,7 +216,6 @@ static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
offset &= 0x3f;
if (priv->rf_chip == RF_8256) {
-
if (offset >= 31) {
priv->RfReg0Value[e_rfpath] |= 0x140;
rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
@@ -248,7 +244,6 @@ static void rtl8192_phy_RFSerialWrite(struct net_device *dev,
/* Write operation */
rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
-
if (offset == 0x0)
priv->RfReg0Value[e_rfpath] = data;
@@ -330,7 +325,6 @@ u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
u32 reg, bitshift;
struct r8192_priv *priv = ieee80211_priv(dev);
-
if (!rtl8192_phy_CheckIsLegalRFPath(dev, e_rfpath))
return 0;
if (priv->Rf_Mode == RF_OP_By_FW) {
@@ -342,7 +336,6 @@ u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
bitshift = ffs(bitmask) - 1;
reg = (reg & bitmask) >> bitshift;
return reg;
-
}
/******************************************************************************
@@ -700,7 +693,6 @@ u8 rtl8192_phy_checkBBAndRF(struct net_device *dev, enum hw90_block_e CheckBlock
WriteAddr[HW90_BLOCK_RF] = 0x3;
RT_TRACE(COMP_PHY, "%s(), CheckBlock: %d\n", __func__, CheckBlock);
for (i = 0; i < CheckTimes; i++) {
-
/* Write data to register and readback */
switch (CheckBlock) {
case HW90_BLOCK_MAC:
@@ -735,7 +727,6 @@ u8 rtl8192_phy_checkBBAndRF(struct net_device *dev, enum hw90_block_e CheckBlock
break;
}
-
/* Check whether readback data is correct */
if (reg != WriteData[i]) {
RT_TRACE((COMP_PHY|COMP_ERR),
@@ -844,7 +835,6 @@ void rtl8192_BBConfig(struct net_device *dev)
rtl8192_BB_Config_ParaFile(dev);
}
-
/******************************************************************************
* function: This function obtains the initialization value of Tx power Level
* offset
@@ -961,13 +951,11 @@ void rtl8192_phy_updateInitGain(struct net_device *dev)
u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
enum rf90_radio_path_e e_rfpath)
{
-
int i;
switch (e_rfpath) {
case RF90_PATH_A:
for (i = 0; i < RadioA_ArrayLength; i = i+2) {
-
if (Rtl8192UsbRadioA_Array[i] == 0xfe) {
mdelay(100);
continue;
@@ -977,12 +965,10 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
bMask12Bits,
Rtl8192UsbRadioA_Array[i+1]);
mdelay(1);
-
}
break;
case RF90_PATH_B:
for (i = 0; i < RadioB_ArrayLength; i = i+2) {
-
if (Rtl8192UsbRadioB_Array[i] == 0xfe) {
mdelay(100);
continue;
@@ -992,12 +978,10 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
bMask12Bits,
Rtl8192UsbRadioB_Array[i+1]);
mdelay(1);
-
}
break;
case RF90_PATH_C:
for (i = 0; i < RadioC_ArrayLength; i = i+2) {
-
if (Rtl8192UsbRadioC_Array[i] == 0xfe) {
mdelay(100);
continue;
@@ -1007,12 +991,10 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
bMask12Bits,
Rtl8192UsbRadioC_Array[i+1]);
mdelay(1);
-
}
break;
case RF90_PATH_D:
for (i = 0; i < RadioD_ArrayLength; i = i+2) {
-
if (Rtl8192UsbRadioD_Array[i] == 0xfe) {
mdelay(100);
continue;
@@ -1022,7 +1004,6 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
bMask12Bits,
Rtl8192UsbRadioD_Array[i+1]);
mdelay(1);
-
}
break;
default:
@@ -1030,7 +1011,6 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
}
return 0;
-
}
/******************************************************************************
@@ -1170,7 +1150,7 @@ static u8 rtl8192_phy_SetSwChnlCmdArray(struct sw_chnl_cmd *CmdTable, u32 CmdTab
{
struct sw_chnl_cmd *pCmd;
- if (CmdTable == NULL) {
+ if (!CmdTable) {
RT_TRACE(COMP_ERR, "%s(): CmdTable cannot be NULL\n", __func__);
return false;
}
@@ -1225,7 +1205,6 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
}
/* FIXME: need to check whether channel is legal or not here */
-
/* <1> Fill up pre common command. */
PreCommonCmdCnt = 0;
rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++,
@@ -1286,7 +1265,6 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
return true;
}
-
do {
switch (*stage) {
case 0:
@@ -1378,13 +1356,11 @@ static void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel)
*****************************************************************************/
void rtl8192_SwChnl_WorkItem(struct net_device *dev)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
RT_TRACE(COMP_CH, "==> SwChnlCallback819xUsbWorkItem(), chan:%d\n",
priv->chan);
-
rtl8192_phy_FinishSwChnlNow(dev, priv->chan);
RT_TRACE(COMP_CH, "<== SwChnlCallback819xUsbWorkItem()\n");
@@ -1459,14 +1435,12 @@ u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel)
*****************************************************************************/
void rtl8192_SetBWModeWorkItem(struct net_device *dev)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
u8 regBwOpMode;
RT_TRACE(COMP_SWBW, "%s() Switch to %s bandwidth\n", __func__,
priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20?"20MHz":"40MHz");
-
if (priv->rf_chip == RF_PSEUDO_11N) {
priv->SetBWModeInProgress = false;
return;
@@ -1563,7 +1537,6 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
"SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n",
priv->CurrentChannelBW);
break;
-
}
/* Skip over setting of J-mode in BB register here.
* Default value is "None J mode".
@@ -1624,7 +1597,6 @@ void rtl8192_SetBWMode(struct net_device *dev,
priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
rtl8192_SetBWModeWorkItem(dev);
-
}
void InitialGain819xUsb(struct net_device *dev, u8 Operation)
diff --git a/drivers/staging/rtl8192u/r819xU_phyreg.h b/drivers/staging/rtl8192u/r819xU_phyreg.h
index dc9ddf100eab..c9669821b278 100644
--- a/drivers/staging/rtl8192u/r819xU_phyreg.h
+++ b/drivers/staging/rtl8192u/r819xU_phyreg.h
@@ -2,7 +2,6 @@
#ifndef _R819XU_PHYREG_H
#define _R819XU_PHYREG_H
-
#define RF_DATA 0x1d4 /* FW will write RF data in the register.*/
/* page8 */
@@ -81,7 +80,6 @@
#define rOFDM0_XDTxIQImbalance 0xc98
#define rOFDM0_XDTxAFE 0xc9c
-
/* page d */
#define rOFDM1_LSTF 0xd00
#define rOFDM1_TRxPathEnable 0xd04
@@ -95,7 +93,6 @@
#define rTxAGC_Mcs11_Mcs08 0xe18
#define rTxAGC_Mcs15_Mcs12 0xe1c
-
/* RF
* Zebra1
*/
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index d83f421acfc1..db5c7a487ab3 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -28,7 +28,7 @@
#include "usb_ops.h"
#include "wifi.h"
-static void recv_tasklet(unsigned long priv);
+static void recv_tasklet(struct tasklet_struct *t);
void r8712_init_recv_priv(struct recv_priv *precvpriv,
struct _adapter *padapter)
@@ -60,8 +60,7 @@ void r8712_init_recv_priv(struct recv_priv *precvpriv,
precvbuf++;
}
precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
- tasklet_init(&precvpriv->recv_tasklet, recv_tasklet,
- (unsigned long)padapter);
+ tasklet_setup(&precvpriv->recv_tasklet, recv_tasklet);
skb_queue_head_init(&precvpriv->rx_skb_queue);
skb_queue_head_init(&precvpriv->free_recv_skb_queue);
@@ -477,11 +476,14 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
while (!end_of_queue_search(phead, plist)) {
pnextrframe = container_of(plist, union recv_frame, u.list);
pnextattrib = &pnextrframe->u.hdr.attrib;
+
+ if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
+ return false;
+
if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
plist = plist->next;
- else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
- return false;
- break;
+ else
+ break;
}
list_del_init(&(prframe->u.hdr.list));
list_add_tail(&(prframe->u.hdr.list), plist);
@@ -1057,10 +1059,11 @@ static void recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
} while ((transfer_len > 0) && pkt_cnt > 0);
}
-static void recv_tasklet(unsigned long priv)
+static void recv_tasklet(struct tasklet_struct *t)
{
struct sk_buff *pskb;
- struct _adapter *padapter = (struct _adapter *)priv;
+ struct _adapter *padapter = from_tasklet(padapter, t,
+ recvpriv.recv_tasklet);
struct recv_priv *precvpriv = &padapter->recvpriv;
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index c7523072a660..18116469bd31 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -161,7 +161,7 @@ void r8712_free_cmd_obj(struct cmd_obj *pcmd)
if ((pcmd->cmdcode != _JoinBss_CMD_) &&
(pcmd->cmdcode != _CreateBss_CMD_))
kfree(pcmd->parmbuf);
- if (pcmd->rsp != NULL) {
+ if (pcmd->rsp) {
if (pcmd->rspsz != 0)
kfree(pcmd->rsp);
}
@@ -191,7 +191,7 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
psurveyPara->passive_mode = cpu_to_le32(pmlmepriv->passive_mode);
psurveyPara->ss_ssidlen = 0;
memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1);
- if ((pssid != NULL) && (pssid->SsidLength)) {
+ if (pssid && pssid->SsidLength) {
memcpy(psurveyPara->ss_ssid, pssid->Ssid, pssid->SsidLength);
psurveyPara->ss_ssidlen = cpu_to_le32(pssid->SsidLength);
}
diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c
index 87024d6a465e..6789a4c98564 100644
--- a/drivers/staging/rtl8712/rtl871x_io.c
+++ b/drivers/staging/rtl8712/rtl871x_io.c
@@ -50,7 +50,7 @@ static uint _init_intf_hdl(struct _adapter *padapter,
init_intf_priv = &r8712_usb_init_intf_priv;
pintf_priv = pintf_hdl->pintfpriv = kmalloc(sizeof(struct intf_priv),
GFP_ATOMIC);
- if (pintf_priv == NULL)
+ if (!pintf_priv)
goto _init_intf_hdl_fail;
pintf_hdl->adapter = (u8 *)padapter;
set_intf_option(&pintf_hdl->intf_option);
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index df6ae855f3c1..cbaa7a489748 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -481,11 +481,11 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
int group_cipher = 0, pairwise_cipher = 0;
int ret = 0;
- if ((ielen > MAX_WPA_IE_LEN) || (pie == NULL))
+ if (ielen > MAX_WPA_IE_LEN || !pie)
return -EINVAL;
if (ielen) {
buf = kmemdup(pie, ielen, GFP_ATOMIC);
- if (buf == NULL)
+ if (!buf)
return -ENOMEM;
if (ielen < RSN_HEADER_LEN) {
ret = -EINVAL;
@@ -777,7 +777,7 @@ static int r871x_wx_set_pmkid(struct net_device *dev,
* If cmd is IW_PMKSA_REMOVE, it means the wpa_supplicant wants to
* remove a PMKID/BSSID from driver.
*/
- if (pPMK == NULL)
+ if (!pPMK)
return -EINVAL;
memcpy(strIssueBssid, pPMK->bssid.sa_data, ETH_ALEN);
switch (pPMK->cmd) {
@@ -1099,7 +1099,7 @@ static int r871x_wx_set_mlme(struct net_device *dev,
struct _adapter *padapter = netdev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *) extra;
- if (mlme == NULL)
+ if (!mlme)
return -1;
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
@@ -1950,7 +1950,7 @@ static int r871x_get_ap_info(struct net_device *dev,
u8 bssid[ETH_ALEN];
char data[33];
- if (padapter->driver_stopped || (pdata == NULL))
+ if (padapter->driver_stopped || !pdata)
return -EINVAL;
while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY |
_FW_UNDER_LINKING)) {
@@ -2014,7 +2014,7 @@ static int r871x_set_pid(struct net_device *dev,
struct _adapter *padapter = netdev_priv(dev);
struct iw_point *pdata = &wrqu->data;
- if ((padapter->driver_stopped) || (pdata == NULL))
+ if (padapter->driver_stopped || !pdata)
return -EINVAL;
if (copy_from_user(&padapter->pid, pdata->pointer, sizeof(int)))
return -EINVAL;
@@ -2030,7 +2030,7 @@ static int r871x_set_chplan(struct net_device *dev,
struct iw_point *pdata = &wrqu->data;
int ch_plan = -1;
- if ((padapter->driver_stopped) || (pdata == NULL)) {
+ if (padapter->driver_stopped || !pdata) {
ret = -EINVAL;
goto exit;
}
@@ -2050,7 +2050,7 @@ static int r871x_wps_start(struct net_device *dev,
struct iw_point *pdata = &wrqu->data;
u32 u32wps_start = 0;
- if ((padapter->driver_stopped) || (pdata == NULL))
+ if (padapter->driver_stopped || !pdata)
return -EINVAL;
if (copy_from_user((void *)&u32wps_start, pdata->pointer, 4))
return -EFAULT;
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 2ccd49032206..6074383ec0b5 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -754,7 +754,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
ptarget_wlan->fixed = true;
}
- if (ptarget_wlan == NULL) {
+ if (!ptarget_wlan) {
if (check_fwstate(pmlmepriv,
_FW_UNDER_LINKING))
pmlmepriv->fw_state ^=
@@ -768,7 +768,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
ptarget_sta =
r8712_get_stainfo(pstapriv,
pnetwork->network.MacAddress);
- if (ptarget_sta == NULL)
+ if (!ptarget_sta)
ptarget_sta =
r8712_alloc_stainfo(pstapriv,
pnetwork->network.MacAddress);
@@ -879,7 +879,7 @@ void r8712_stassoc_event_callback(struct _adapter *adapter, u8 *pbuf)
if (!r8712_access_ctrl(&adapter->acl_list, pstassoc->macaddr))
return;
psta = r8712_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta != NULL) {
+ if (psta) {
/*the sta have been in sta_info_queue => do nothing
*(between drv has received this event before and
* fw have not yet to set key to CAM_ENTRY)
@@ -888,7 +888,7 @@ void r8712_stassoc_event_callback(struct _adapter *adapter, u8 *pbuf)
}
psta = r8712_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta == NULL)
+ if (!psta)
return;
/* to do : init sta_info variable */
psta->qos_option = 0;
@@ -1080,8 +1080,7 @@ int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv)
pmlmepriv->pscanned = phead->next;
while (1) {
if (end_of_queue_search(phead, pmlmepriv->pscanned)) {
- if ((pmlmepriv->assoc_by_rssi) &&
- (pnetwork_max_rssi != NULL)) {
+ if (pmlmepriv->assoc_by_rssi && pnetwork_max_rssi) {
pnetwork = pnetwork_max_rssi;
goto ask_for_joinbss;
}
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
index 29b85330815f..f906d3fbe179 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
@@ -186,7 +186,7 @@ static int mp_start_test(struct _adapter *padapter)
if (psta)
r8712_free_stainfo(padapter, psta);
psta = r8712_alloc_stainfo(&padapter->stapriv, bssid.MacAddress);
- if (psta == NULL) {
+ if (!psta) {
res = -ENOMEM;
goto end_of_mp_start_test;
}
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index c1bfd61824ef..eb4e46a7f743 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -58,7 +58,7 @@ void _r8712_init_recv_priv(struct recv_priv *precvpriv,
precvpriv->pallocated_frame_buf = kzalloc(NR_RECVFRAME *
sizeof(union recv_frame) + RXFRAME_ALIGN_SZ,
GFP_ATOMIC);
- if (precvpriv->pallocated_frame_buf == NULL)
+ if (!precvpriv->pallocated_frame_buf)
return;
kmemleak_not_leak(precvpriv->pallocated_frame_buf);
precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf +
@@ -97,7 +97,7 @@ union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue)
if (precvframe) {
list_del_init(&precvframe->u.hdr.list);
padapter = precvframe->u.hdr.adapter;
- if (padapter != NULL) {
+ if (padapter) {
precvpriv = &padapter->recvpriv;
if (pfree_recv_queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt--;
@@ -145,7 +145,7 @@ sint r8712_recvframe_chkmic(struct _adapter *adapter,
stainfo = r8712_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
if (prxattrib->encrypt == _TKIP_) {
/* calculate mic code */
- if (stainfo != NULL) {
+ if (stainfo) {
if (is_multicast_ether_addr(prxattrib->ra)) {
iv = precvframe->u.hdr.rx_data +
prxattrib->hdrlen;
@@ -242,7 +242,7 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
ether_type = get_unaligned_be16(ptr);
- if ((psta != NULL) && (psta->ieee8021x_blocked)) {
+ if (psta && psta->ieee8021x_blocked) {
/* blocked
* only accept EAPOL frame
*/
@@ -349,7 +349,7 @@ static sint sta2sta_data_frame(struct _adapter *adapter,
*psta = r8712_get_bcmc_stainfo(adapter);
else
*psta = r8712_get_stainfo(pstapriv, sta_addr); /* get ap_info */
- if (*psta == NULL) {
+ if (!*psta) {
if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
adapter->mppriv.rx_pktloss++;
return _FAIL;
@@ -399,7 +399,7 @@ static sint ap2sta_data_frame(struct _adapter *adapter,
*psta = r8712_get_bcmc_stainfo(adapter);
else
*psta = r8712_get_stainfo(pstapriv, pattrib->bssid);
- if (*psta == NULL)
+ if (!*psta)
return _FAIL;
} else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) &&
check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -410,7 +410,7 @@ static sint ap2sta_data_frame(struct _adapter *adapter,
memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
memcpy(pattrib->bssid, mybssid, ETH_ALEN);
*psta = r8712_get_stainfo(pstapriv, pattrib->bssid);
- if (*psta == NULL)
+ if (!*psta)
return _FAIL;
} else {
return _FAIL;
@@ -435,7 +435,7 @@ static sint sta2ap_data_frame(struct _adapter *adapter,
if (memcmp(pattrib->bssid, mybssid, ETH_ALEN))
return _FAIL;
*psta = r8712_get_stainfo(pstapriv, pattrib->src);
- if (*psta == NULL)
+ if (!*psta)
return _FAIL;
}
return _SUCCESS;
@@ -469,7 +469,7 @@ static sint validate_recv_data_frame(struct _adapter *adapter,
pda = get_da(ptr);
psa = get_sa(ptr);
pbssid = get_hdr_bssid(ptr);
- if (pbssid == NULL)
+ if (!pbssid)
return _FAIL;
memcpy(pattrib->dst, pda, ETH_ALEN);
memcpy(pattrib->src, psa, ETH_ALEN);
@@ -499,7 +499,7 @@ static sint validate_recv_data_frame(struct _adapter *adapter,
}
if (res == _FAIL)
return _FAIL;
- if (psta == NULL)
+ if (!psta)
return _FAIL;
precv_frame->u.hdr.psta = psta;
pattrib->amsdu = 0;
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index c05010d85212..5000c87752d3 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -584,7 +584,7 @@ u32 r8712_tkip_encrypt(struct _adapter *padapter, u8 *pxmitframe)
else
stainfo = r8712_get_stainfo(&padapter->stapriv,
&pattrib->ra[0]);
- if (stainfo != NULL) {
+ if (stainfo) {
prwskey = &stainfo->x_UncstKey.skey[0];
for (curfragnum = 0; curfragnum < pattrib->nr_frags;
curfragnum++) {
@@ -658,7 +658,7 @@ void r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe)
if (prxattrib->encrypt == _TKIP_) {
stainfo = r8712_get_stainfo(&padapter->stapriv,
&prxattrib->ta[0]);
- if (stainfo != NULL) {
+ if (stainfo) {
iv = pframe + prxattrib->hdrlen;
payload = pframe + prxattrib->iv_len +
prxattrib->hdrlen;
@@ -1155,7 +1155,7 @@ u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe)
else
stainfo = r8712_get_stainfo(&padapter->stapriv,
&pattrib->ra[0]);
- if (stainfo != NULL) {
+ if (stainfo) {
prwskey = &stainfo->x_UncstKey.skey[0];
for (curfragnum = 0; curfragnum < pattrib->nr_frags;
curfragnum++) {
@@ -1357,7 +1357,7 @@ void r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
if (prxattrib->encrypt == _AES_) {
stainfo = r8712_get_stainfo(&padapter->stapriv,
&prxattrib->ta[0]);
- if (stainfo != NULL) {
+ if (stainfo) {
if (is_multicast_ether_addr(prxattrib->ra)) {
iv = pframe + prxattrib->hdrlen;
idx = iv[3];
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 653812c5d5a8..706e9db0fc5b 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -149,7 +149,7 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- if (psta == NULL)
+ if (!psta)
return;
pfree_sta_queue = &pstapriv->free_sta_queue;
pstaxmitpriv = &psta->sta_xmitpriv;
@@ -222,7 +222,7 @@ struct sta_info *r8712_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
struct sta_info *psta = NULL;
u32 index;
- if (hwaddr == NULL)
+ if (!hwaddr)
return NULL;
index = wifi_mac_hash(hwaddr);
spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 8b88fd5dc9a1..fd99782a400a 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -144,8 +144,7 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
INIT_WORK(&padapter->wk_filter_rx_ff0, r8712_SetFilter);
alloc_hwxmits(padapter);
init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
- tasklet_init(&pxmitpriv->xmit_tasklet, r8712_xmit_bh,
- (unsigned long)padapter);
+ tasklet_setup(&pxmitpriv->xmit_tasklet, r8712_xmit_bh);
return 0;
}
@@ -157,7 +156,7 @@ void _free_xmit_priv(struct xmit_priv *pxmitpriv)
pxmitpriv->pxmit_frame_buf;
struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
- if (pxmitpriv->pxmit_frame_buf == NULL)
+ if (!pxmitpriv->pxmit_frame_buf)
return;
for (i = 0; i < NR_XMITFRAME; i++) {
r8712_xmit_complete(padapter, pxmitframe);
@@ -270,7 +269,7 @@ int r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
pattrib->mac_id = 5;
} else {
psta = r8712_get_stainfo(pstapriv, pattrib->ra);
- if (psta == NULL) /* drop the pkt */
+ if (!psta) /* drop the pkt */
return -ENOMEM;
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
pattrib->mac_id = 5;
@@ -353,7 +352,7 @@ static int xmitframe_addmic(struct _adapter *padapter,
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct security_priv *psecpriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
+ u8 priority[4] = {};
bool bmcst = is_multicast_ether_addr(pattrib->ra);
if (pattrib->psta)
@@ -363,10 +362,9 @@ static int xmitframe_addmic(struct _adapter *padapter,
&pattrib->ra[0]);
if (pattrib->encrypt == _TKIP_) {
/*encode mic code*/
- if (stainfo != NULL) {
- u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x0, 0x0};
+ if (stainfo) {
+ u8 null_key[16] = {};
+
pframe = pxmitframe->buf_addr + TXDESC_OFFSET;
if (bmcst) {
if (!memcmp(psecpriv->XGrptxmickey
@@ -593,10 +591,10 @@ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
u8 *pbuf_start;
bool bmcst = is_multicast_ether_addr(pattrib->ra);
- if (pattrib->psta == NULL)
+ if (!pattrib->psta)
return _FAIL;
psta = pattrib->psta;
- if (pxmitframe->buf_addr == NULL)
+ if (!pxmitframe->buf_addr)
return _FAIL;
pbuf_start = pxmitframe->buf_addr;
ptxdesc = pbuf_start;
@@ -624,7 +622,7 @@ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
mpdu_len -= pattrib->hdrlen;
/* adding icv, if necessary...*/
if (pattrib->iv_len) {
- if (psta != NULL) {
+ if (psta) {
switch (pattrib->encrypt) {
case _WEP40_:
case _WEP104_:
@@ -712,7 +710,7 @@ void r8712_update_protection(struct _adapter *padapter, u8 *ie, uint ie_len)
case AUTO_VCS:
default:
perp = r8712_get_ie(ie, _ERPINFO_IE_, &erp_len, ie_len);
- if (perp == NULL) {
+ if (!perp) {
pxmitpriv->vcs = NONE_VCS;
} else {
protection = (*(perp + 2)) & BIT(1);
@@ -751,7 +749,7 @@ void r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
unsigned long irqL;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
- if (pxmitbuf == NULL)
+ if (!pxmitbuf)
return;
spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
list_del_init(&pxmitbuf->list);
@@ -804,7 +802,7 @@ void r8712_free_xmitframe(struct xmit_priv *pxmitpriv,
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
struct _adapter *padapter = pxmitpriv->adapter;
- if (pxmitframe == NULL)
+ if (!pxmitframe)
return;
spin_lock_irqsave(&pfree_xmit_queue->lock, irqL);
list_del_init(&pxmitframe->list);
@@ -820,7 +818,7 @@ void r8712_free_xmitframe(struct xmit_priv *pxmitpriv,
void r8712_free_xmitframe_ex(struct xmit_priv *pxmitpriv,
struct xmit_frame *pxmitframe)
{
- if (pxmitframe == NULL)
+ if (!pxmitframe)
return;
if (pxmitframe->frame_tag == DATA_FRAMETAG)
r8712_free_xmitframe(pxmitpriv, pxmitframe);
@@ -911,7 +909,7 @@ int r8712_xmit_classifier(struct _adapter *padapter,
psta = r8712_get_stainfo(pstapriv, pattrib->ra);
}
}
- if (psta == NULL)
+ if (!psta)
return -EINVAL;
ptxservq = get_sta_pending(padapter, &pstapending,
psta, pattrib->priority);
@@ -1023,7 +1021,7 @@ int r8712_pre_xmit(struct _adapter *padapter, struct xmit_frame *pxmitframe)
return ret;
}
pxmitbuf = r8712_alloc_xmitbuf(pxmitpriv);
- if (pxmitbuf == NULL) { /*enqueue packet*/
+ if (!pxmitbuf) { /*enqueue packet*/
ret = false;
r8712_xmit_enqueue(padapter, pxmitframe);
spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index c0c0c781fe17..cc58c7216935 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -277,7 +277,7 @@ int r8712_pre_xmit(struct _adapter *padapter, struct xmit_frame *pxmitframe);
int r8712_xmit_enqueue(struct _adapter *padapter,
struct xmit_frame *pxmitframe);
void r8712_xmit_direct(struct _adapter *padapter, struct xmit_frame *pxmitframe);
-void r8712_xmit_bh(unsigned long priv);
+void r8712_xmit_bh(struct tasklet_struct *t);
void xmitframe_xmitbuf_attach(struct xmit_frame *pxmitframe,
struct xmit_buf *pxmitbuf);
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 2fcd65260f4c..dc21e7743349 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -577,7 +577,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
error:
usb_put_dev(udev);
usb_set_intfdata(pusb_intf, NULL);
- if (padapter && padapter->dvobj_deinit != NULL)
+ if (padapter && padapter->dvobj_deinit)
padapter->dvobj_deinit(padapter);
if (pnetdev)
free_netdev(pnetdev);
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 9a04a752af13..655497cead12 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -308,10 +308,11 @@ void r8712_usb_read_port_cancel(struct _adapter *padapter)
}
}
-void r8712_xmit_bh(unsigned long priv)
+void r8712_xmit_bh(struct tasklet_struct *t)
{
int ret = false;
- struct _adapter *padapter = (struct _adapter *)priv;
+ struct _adapter *padapter = from_tasklet(padapter, t,
+ xmitpriv.xmit_tasklet);
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
if (padapter->driver_stopped ||
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index a76e81330756..4f270d509ad3 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -8,6 +8,7 @@
#include <drv_types.h>
#include <rtw_debug.h>
+#include <asm/unaligned.h>
extern unsigned char RTW_WPA_OUI[];
extern unsigned char WMM_OUI[];
@@ -995,12 +996,12 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
/* beacon interval */
p = rtw_get_beacon_interval_from_ie(ie);/* ie + 8; 8: TimeStamp, 2: Beacon Interval 2:Capability */
/* pbss_network->Configuration.BeaconPeriod = le16_to_cpu(*(unsigned short*)p); */
- pbss_network->Configuration.BeaconPeriod = RTW_GET_LE16(p);
+ pbss_network->Configuration.BeaconPeriod = get_unaligned_le16(p);
/* capability */
/* cap = *(unsigned short *)rtw_get_capability_from_ie(ie); */
/* cap = le16_to_cpu(cap); */
- cap = RTW_GET_LE16(ie);
+ cap = get_unaligned_le16(ie);
/* SSID */
p = rtw_get_ie(
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index bd18d1803e27..2abe205e3453 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -469,7 +469,7 @@ _next:
pcmdpriv->cmd_issued_cnt++;
- pcmd->cmdsz = _RND4((pcmd->cmdsz));/* _RND4 */
+ pcmd->cmdsz = round_up((pcmd->cmdsz), 4);
memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
@@ -2034,7 +2034,6 @@ void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
- u8 timer_cancelled;
struct sta_info *psta = NULL;
struct wlan_network *pwlan = NULL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -2049,7 +2048,7 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
_set_timer(&pmlmepriv->assoc_timer, 1);
}
- _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
+ del_timer_sync(&pmlmepriv->assoc_timer);
spin_lock_bh(&pmlmepriv->lock);
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index ca98274ae390..c43cca4a3828 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -9,6 +9,7 @@
#include <drv_types.h>
#include <rtw_debug.h>
#include <linux/of.h>
+#include <asm/unaligned.h>
u8 RTW_WPA_OUI_TYPE[] = { 0x00, 0x50, 0xf2, 1 };
u16 RTW_WPA_VERSION = 1;
@@ -499,7 +500,7 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
/* pairwise_cipher */
if (left >= 2) {
/* count = le16_to_cpu(*(u16*)pos); */
- count = RTW_GET_LE16(pos);
+ count = get_unaligned_le16(pos);
pos += 2;
left -= 2;
@@ -569,7 +570,7 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
/* pairwise_cipher */
if (left >= 2) {
/* count = le16_to_cpu(*(u16*)pos); */
- count = RTW_GET_LE16(pos);
+ count = get_unaligned_le16(pos);
pos += 2;
left -= 2;
@@ -800,8 +801,8 @@ u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_att
while (attr_ptr - wps_ie < wps_ielen) {
/* 4 = 2(Attribute ID) + 2(Length) */
- u16 attr_id = RTW_GET_BE16(attr_ptr);
- u16 attr_data_len = RTW_GET_BE16(attr_ptr + 2);
+ u16 attr_id = get_unaligned_be16(attr_ptr);
+ u16 attr_data_len = get_unaligned_be16(attr_ptr + 2);
u16 attr_len = attr_data_len + 4;
/* DBG_871X("%s attr_ptr:%p, id:%u, length:%u\n", __func__, attr_ptr, attr_id, attr_data_len); */
@@ -874,7 +875,7 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
return -1;
}
- oui = RTW_GET_BE24(pos);
+ oui = get_unaligned_be24(pos);
switch (oui) {
case OUI_MICROSOFT:
/* Microsoft/Wi-Fi information elements are further typed and
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index e65c5a870b46..9531ba54e95b 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -814,7 +814,6 @@ exit:
void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
{
- u8 timer_cancelled = false;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
spin_lock_bh(&pmlmepriv->lock);
@@ -827,22 +826,12 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_surveydone_event_callback: fw_state:%x\n\n", get_fwstate(pmlmepriv)));
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
- /* u8 timer_cancelled; */
-
- timer_cancelled = true;
- /* _cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled); */
-
+ del_timer_sync(&pmlmepriv->scan_to_timer);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
} else {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("nic status =%x, survey done event comes too late!\n", get_fwstate(pmlmepriv)));
}
- spin_unlock_bh(&pmlmepriv->lock);
-
- if (timer_cancelled)
- _cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled);
-
- spin_lock_bh(&pmlmepriv->lock);
rtw_set_signal_stat_timer(&adapter->recvpriv);
@@ -1298,7 +1287,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
{
static u8 retry;
- u8 timer_cancelled;
struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL;
struct sta_priv *pstapriv = &adapter->stapriv;
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
@@ -1392,7 +1380,7 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
}
/* s5. Cancel assoc_timer */
- _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
+ del_timer_sync(&pmlmepriv->assoc_timer);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("Cancel assoc_timer\n"));
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 6db637701063..b912ad2f4b72 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -11,6 +11,7 @@
#include <rtw_wifi_regd.h>
#include <hal_btcoex.h>
#include <linux/kernel.h>
+#include <asm/unaligned.h>
static struct mlme_handler mlme_sta_tbl[] = {
{WIFI_ASSOCREQ, "OnAssocReq", &OnAssocReq},
@@ -1213,7 +1214,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
goto asoc_class2_error;
}
- capab_info = RTW_GET_LE16(pframe + WLAN_HDR_A3_LEN);
+ capab_info = get_unaligned_le16(pframe + WLAN_HDR_A3_LEN);
/* capab_info = le16_to_cpu(*(unsigned short *)(pframe + WLAN_HDR_A3_LEN)); */
left = pkt_len - (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
@@ -1959,7 +1960,7 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
break;
case RTW_WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
- status = RTW_GET_LE16(&frame_body[3]);
+ status = get_unaligned_le16(&frame_body[3]);
tid = ((frame_body[5] >> 2) & 0x7);
if (status == 0) {
@@ -1989,7 +1990,7 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
~BIT((frame_body[3] >> 4) & 0xf);
/* reason_code = frame_body[4] | (frame_body[5] << 8); */
- reason_code = RTW_GET_LE16(&frame_body[4]);
+ reason_code = get_unaligned_le16(&frame_body[4]);
} else if ((frame_body[3] & BIT(3)) == BIT(3)) {
tid = (frame_body[3] >> 4) & 0x0F;
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 7e1da0e35812..6979f8dbccb8 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -11,6 +11,7 @@
#include <linux/jiffies.h>
#include <rtw_recv.h>
#include <net/cfg80211.h>
+#include <asm/unaligned.h>
static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
@@ -1906,7 +1907,7 @@ static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
while (a_len > ETH_HLEN) {
/* Offset 12 denote 2 mac address */
- nSubframe_Length = RTW_GET_BE16(pdata + 12);
+ nSubframe_Length = get_unaligned_be16(pdata + 12);
if (a_len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
DBG_871X("nRemain_Length is %d and nSubframe_Length is : %d\n", a_len, nSubframe_Length);
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 7f74e1d05b3a..159d32ace2bc 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -260,7 +260,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
arcfour_encrypt(&mycontext, payload+length, crc, 4);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *)RND4((SIZE_PTR)(pframe));
+ pframe = (u8 *)round_up((SIZE_PTR)(pframe), 4);
}
}
@@ -716,7 +716,7 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
arcfour_encrypt(&mycontext, payload+length, crc, 4);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *)RND4((SIZE_PTR)(pframe));
+ pframe = (u8 *)round_up((SIZE_PTR)(pframe), 4);
}
}
@@ -1523,7 +1523,7 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
pframe += pxmitpriv->frag_len;
- pframe = (u8 *)RND4((SIZE_PTR)(pframe));
+ pframe = (u8 *)round_up((SIZE_PTR)(pframe), 4);
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index a3ea7ce3e12e..372ce17c3569 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -54,32 +54,6 @@ static u8 rtw_basic_rate_ofdm[3] = {
IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK
};
-int cckrates_included(unsigned char *rate, int ratelen)
-{
- int i;
-
- for (i = 0; i < ratelen; i++) {
- if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
- (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
- return true;
- }
-
- return false;
-}
-
-int cckratesonly_included(unsigned char *rate, int ratelen)
-{
- int i;
-
- for (i = 0; i < ratelen; i++) {
- if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
- (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
- return false;
- }
-
- return true;
-}
-
u8 networktype_to_raid_ex(struct adapter *adapter, struct sta_info *psta)
{
u8 raid, cur_rf_type, rf_type = RF_1T1R;
@@ -374,20 +348,7 @@ u8 rtw_get_center_ch(u8 channel, u8 chnl_bw, u8 chnl_offset)
u8 center_ch = channel;
if (chnl_bw == CHANNEL_WIDTH_80) {
- if ((channel == 36) || (channel == 40) || (channel == 44) || (channel == 48))
- center_ch = 42;
- if ((channel == 52) || (channel == 56) || (channel == 60) || (channel == 64))
- center_ch = 58;
- if ((channel == 100) || (channel == 104) || (channel == 108) || (channel == 112))
- center_ch = 106;
- if ((channel == 116) || (channel == 120) || (channel == 124) || (channel == 128))
- center_ch = 122;
- if ((channel == 132) || (channel == 136) || (channel == 140) || (channel == 144))
- center_ch = 138;
- if ((channel == 149) || (channel == 153) || (channel == 157) || (channel == 161))
- center_ch = 155;
- else if (channel <= 14)
- center_ch = 7;
+ center_ch = 7;
} else if (chnl_bw == CHANNEL_WIDTH_40) {
if (chnl_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
center_ch = channel + 2;
@@ -1753,38 +1714,27 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap)
void update_wireless_mode(struct adapter *padapter)
{
- int ratelen, network_type = 0;
+ int network_type = 0;
u32 SIFS_Timer;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
unsigned char *rate = cur_network->SupportedRates;
- ratelen = rtw_get_rateset_len(cur_network->SupportedRates);
-
if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable))
pmlmeinfo->HT_enable = 1;
- if (pmlmeext->cur_channel > 14) {
- if (pmlmeinfo->VHT_enable)
- network_type = WIRELESS_11AC;
- else if (pmlmeinfo->HT_enable)
- network_type = WIRELESS_11_5N;
+ if (pmlmeinfo->VHT_enable)
+ network_type = WIRELESS_11AC;
+ else if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_24N;
- network_type |= WIRELESS_11A;
- } else {
- if (pmlmeinfo->VHT_enable)
- network_type = WIRELESS_11AC;
- else if (pmlmeinfo->HT_enable)
- network_type = WIRELESS_11_24N;
-
- if ((cckratesonly_included(rate, ratelen)) == true)
- network_type |= WIRELESS_11B;
- else if ((cckrates_included(rate, ratelen)) == true)
- network_type |= WIRELESS_11BG;
- else
- network_type |= WIRELESS_11G;
- }
+ if (rtw_is_cckratesonly_included(rate))
+ network_type |= WIRELESS_11B;
+ else if (rtw_is_cckrates_included(rate))
+ network_type |= WIRELESS_11BG;
+ else
+ network_type |= WIRELESS_11G;
pmlmeext->cur_wireless_mode = network_type & padapter->registrypriv.wireless_mode;
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 571353404a95..6ecaff9728fd 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -865,7 +865,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
payload = pframe;
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
- payload = (u8 *)RND4((SIZE_PTR)(payload));
+ payload = (u8 *)round_up((SIZE_PTR)(payload), 4);
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("===curfragnum =%d, pframe = 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x,!!!\n",
curfragnum, *payload, *(payload+1), *(payload+2), *(payload+3), *(payload+4), *(payload+5), *(payload+6), *(payload+7)));
@@ -1209,7 +1209,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
addr = (SIZE_PTR)(pframe);
- mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
+ mem_start = (unsigned char *)round_up(addr, 4) + hw_hdr_offset;
memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 29c29e2e125b..1fbf89cb72d0 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -230,9 +230,10 @@ static inline bool pkt_exceeds_tail(struct recv_priv *precvpriv,
return false;
}
-static void rtl8723bs_recv_tasklet(unsigned long priv)
+static void rtl8723bs_recv_tasklet(struct tasklet_struct *t)
{
- struct adapter *padapter;
+ struct adapter *padapter = from_tasklet(padapter, t,
+ recvpriv.recv_tasklet);
struct hal_com_data *p_hal_data;
struct recv_priv *precvpriv;
struct recv_buf *precvbuf;
@@ -244,7 +245,6 @@ static void rtl8723bs_recv_tasklet(unsigned long priv)
_pkt *pkt_copy = NULL;
u8 shift_sz = 0, rx_report_sz = 0;
- padapter = (struct adapter *)priv;
p_hal_data = GET_HAL_DATA(padapter);
precvpriv = &padapter->recvpriv;
recv_buf_queue = &precvpriv->recv_buf_pending_queue;
@@ -369,7 +369,7 @@ static void rtl8723bs_recv_tasklet(unsigned long priv)
}
}
- pkt_offset = _RND8(pkt_offset);
+ pkt_offset = round_up(pkt_offset, 8);
precvbuf->pdata += pkt_offset;
ptr = precvbuf->pdata;
precvframe = NULL;
@@ -444,8 +444,7 @@ s32 rtl8723bs_init_recv_priv(struct adapter *padapter)
goto initbuferror;
/* 3 2. init tasklet */
- tasklet_init(&precvpriv->recv_tasklet, rtl8723bs_recv_tasklet,
- (unsigned long)padapter);
+ tasklet_setup(&precvpriv->recv_tasklet, rtl8723bs_recv_tasklet);
goto exit;
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index b6b4adb5a28a..369f55d11519 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -474,7 +474,7 @@ static u32 sdio_write_port(
return _FAIL;
}
- cnt = _RND4(cnt);
+ cnt = round_up(cnt, 4);
HalSdioGetCmdAddr8723BSdio(adapter, addr, cnt >> 2, &addr);
if (cnt > psdio->block_transfer_len)
@@ -534,7 +534,7 @@ static s32 _sdio_local_read(
if (!mac_pwr_ctrl_on)
return _sd_cmd52_read(intfhdl, addr, cnt, buf);
- n = RND4(cnt);
+ n = round_up(cnt, 4);
tmpbuf = rtw_malloc(n);
if (!tmpbuf)
return -1;
@@ -575,7 +575,7 @@ s32 sdio_local_read(
)
return sd_cmd52_read(intfhdl, addr, cnt, buf);
- n = RND4(cnt);
+ n = round_up(cnt, 4);
tmpbuf = rtw_malloc(n);
if (!tmpbuf)
return -1;
@@ -859,7 +859,7 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *adapter, u32 size)
/* Patch for some SDIO Host 4 bytes issue */
/* ex. RK3188 */
- readsize = RND4(size);
+ readsize = round_up(size, 4);
/* 3 1. alloc recvbuf */
recv_priv = &adapter->recvpriv;
@@ -945,8 +945,7 @@ void sd_int_dpc(struct adapter *adapter)
if (hal->sdio_hisr & SDIO_HISR_CPWM1) {
struct reportpwrstate_parm report;
- u8 bcancelled;
- _cancel_timer(&(pwrctl->pwr_rpwm_timer), &bcancelled);
+ del_timer_sync(&(pwrctl->pwr_rpwm_timer));
report.state = SdioLocalCmd52Read1Byte(adapter, SDIO_REG_HCPWM1_8723B);
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index be34e279670b..a94b72397ce7 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -131,29 +131,6 @@ static inline int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *par
}
#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
-#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0 : 1)) << 2)
-
-static inline u32 _RND4(u32 sz)
-{
-
- u32 val;
-
- val = ((sz >> 2) + ((sz & 3) ? 1 : 0)) << 2;
-
- return val;
-
-}
-
-static inline u32 _RND8(u32 sz)
-{
-
- u32 val;
-
- val = ((sz >> 3) + ((sz & 7) ? 1 : 0)) << 3;
-
- return val;
-
-}
#ifndef MAC_FMT
#define MAC_FMT "%pM"
@@ -173,70 +150,6 @@ extern void rtw_free_netdev(struct net_device * netdev);
/* Macros for handling unaligned memory accesses */
-#define RTW_GET_BE16(a) ((u16) (((a)[0] << 8) | (a)[1]))
-#define RTW_PUT_BE16(a, val) \
- do { \
- (a)[0] = ((u16) (val)) >> 8; \
- (a)[1] = ((u16) (val)) & 0xff; \
- } while (0)
-
-#define RTW_GET_LE16(a) ((u16) (((a)[1] << 8) | (a)[0]))
-#define RTW_PUT_LE16(a, val) \
- do { \
- (a)[1] = ((u16) (val)) >> 8; \
- (a)[0] = ((u16) (val)) & 0xff; \
- } while (0)
-
-#define RTW_GET_BE24(a) ((((u32) (a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
- ((u32) (a)[2]))
-#define RTW_PUT_BE24(a, val) \
- do { \
- (a)[0] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[2] = (u8) (((u32) (val)) & 0xff); \
- } while (0)
-
-#define RTW_GET_BE32(a) ((((u32) (a)[0]) << 24) | (((u32) (a)[1]) << 16) | \
- (((u32) (a)[2]) << 8) | ((u32) (a)[3]))
-#define RTW_PUT_BE32(a, val) \
- do { \
- (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[3] = (u8) (((u32) (val)) & 0xff); \
- } while (0)
-
-#define RTW_GET_LE32(a) ((((u32) (a)[3]) << 24) | (((u32) (a)[2]) << 16) | \
- (((u32) (a)[1]) << 8) | ((u32) (a)[0]))
-#define RTW_PUT_LE32(a, val) \
- do { \
- (a)[3] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[0] = (u8) (((u32) (val)) & 0xff); \
- } while (0)
-
-#define RTW_GET_BE64(a) ((((u64) (a)[0]) << 56) | (((u64) (a)[1]) << 48) | \
- (((u64) (a)[2]) << 40) | (((u64) (a)[3]) << 32) | \
- (((u64) (a)[4]) << 24) | (((u64) (a)[5]) << 16) | \
- (((u64) (a)[6]) << 8) | ((u64) (a)[7]))
-#define RTW_PUT_BE64(a, val) \
- do { \
- (a)[0] = (u8) (((u64) (val)) >> 56); \
- (a)[1] = (u8) (((u64) (val)) >> 48); \
- (a)[2] = (u8) (((u64) (val)) >> 40); \
- (a)[3] = (u8) (((u64) (val)) >> 32); \
- (a)[4] = (u8) (((u64) (val)) >> 24); \
- (a)[5] = (u8) (((u64) (val)) >> 16); \
- (a)[6] = (u8) (((u64) (val)) >> 8); \
- (a)[7] = (u8) (((u64) (val)) & 0xff); \
- } while (0)
-
-#define RTW_GET_LE64(a) ((((u64) (a)[7]) << 56) | (((u64) (a)[6]) << 48) | \
- (((u64) (a)[5]) << 40) | (((u64) (a)[4]) << 32) | \
- (((u64) (a)[3]) << 24) | (((u64) (a)[2]) << 16) | \
- (((u64) (a)[1]) << 8) | ((u64) (a)[0]))
-
void rtw_buf_free(u8 **buf, u32 *buf_len);
void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len);
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index 1710fa3eeb71..498d5474010c 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -83,12 +83,6 @@ static inline void _set_timer(_timer *ptimer, u32 delay_time)
mod_timer(ptimer, (jiffies + (delay_time * HZ / 1000)));
}
-static inline void _cancel_timer(_timer *ptimer, u8 *bcancelled)
-{
- del_timer_sync(ptimer);
- *bcancelled = true;/* true == 1; false == 0 */
-}
-
static inline void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
{
INIT_WORK(pwork, pfunc);
@@ -129,8 +123,6 @@ static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
-#define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
-
#define NDEV_FMT "%s"
#define NDEV_ARG(ndev) ndev->name
#define ADPT_FMT "%s"
@@ -144,6 +136,12 @@ struct rtw_netdev_priv_indicator {
void *priv;
u32 sizeof_priv;
};
+
+static inline struct adapter *rtw_netdev_priv(struct net_device *netdev)
+{
+ return ((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv;
+}
+
struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv);
extern struct net_device * rtw_alloc_etherdev(int sizeof_priv);
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index 14583799039f..1567831caf91 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -716,8 +716,6 @@ void sa_query_timer_hdl(struct timer_list *t);
DBG_871X("%s set_sa_query_timer(%p, %d)\n", __func__, (mlmeext), (ms)); \
_set_timer(&(mlmeext)->sa_query_timer, (ms)); \
} while (0)
-extern int cckrates_included(unsigned char *rate, int ratelen);
-extern int cckratesonly_included(unsigned char *rate, int ratelen);
extern void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 2fb80b6eb51d..ea3ae3d38337 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2021,7 +2021,7 @@ static int cfg80211_rtw_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
}
leave_ibss:
- return 0;
+ return ret;
}
static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index b2a1bbb30df6..900ff3a3b014 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -10,6 +10,7 @@
#include <rtw_debug.h>
#include <linux/jiffies.h>
#include <net/cfg80211.h>
+#include <asm/unaligned.h>
void rtw_os_free_recvframe(union recv_frame *precvframe)
{
@@ -69,7 +70,7 @@ _pkt *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, u16 nSubframe_Length, u8
skb_reserve(sub_skb, 12);
skb_put_data(sub_skb, (pdata + ETH_HLEN), nSubframe_Length);
- eth_type = RTW_GET_BE16(&sub_skb->data[6]);
+ eth_type = get_unaligned_be16(&sub_skb->data[6]);
if (sub_skb->len >= 8 &&
((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 5b1392deb0a7..79b55ec827a4 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -15,8 +15,7 @@
#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev)
#endif
-static const struct sdio_device_id sdio_ids[] =
-{
+static const struct sdio_device_id sdio_ids[] = {
{ SDIO_DEVICE(0x024c, 0x0523), },
{ SDIO_DEVICE(0x024c, 0x0525), },
{ SDIO_DEVICE(0x024c, 0x0623), },
@@ -132,6 +131,7 @@ static irqreturn_t gpio_hostwakeup_irq_thread(int irq, void *data)
static u8 gpio_hostwakeup_alloc_irq(struct adapter *padapter)
{
int err;
+
if (oob_irq == 0) {
DBG_871X("oob_irq ZERO!\n");
return _FAIL;
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
index 50b89340465b..079da433d811 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
@@ -84,9 +84,9 @@ s32 _sd_cmd52_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata)
func = psdio->func;
for (i = 0; i < cnt; i++) {
- pdata[i] = sdio_readb(func, addr+i, &err);
+ pdata[i] = sdio_readb(func, addr + i, &err);
if (err) {
- DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x\n", __func__, err, addr+i);
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x\n", __func__, err, addr + i);
break;
}
}
@@ -154,9 +154,10 @@ s32 _sd_cmd52_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata)
func = psdio->func;
for (i = 0; i < cnt; i++) {
- sdio_writeb(func, pdata[i], addr+i, &err);
+ sdio_writeb(func, pdata[i], addr + i, &err);
if (err) {
- DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x val = 0x%02x\n", __func__, err, addr+i, pdata[i]);
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x val = 0x%02x\n", __func__,
+ err, addr + i, pdata[i]);
break;
}
}
@@ -264,18 +265,19 @@ u32 sd_read32(struct intf_hdl *pintfhdl, u32 addr, s32 *err)
*err = 0;
for (i = 0; i < SD_IO_TRY_CNT; i++) {
- if (claim_needed) sdio_claim_host(func);
+ if (claim_needed)
+ sdio_claim_host(func);
v = sdio_readl(func, addr, err);
- if (claim_needed) sdio_release_host(func);
+ if (claim_needed)
+ sdio_release_host(func);
if (*err == 0) {
rtw_reset_continual_io_error(psdiodev);
break;
} else {
DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x, val = 0x%x, try_cnt =%d\n", __func__, *err, addr, v, i);
- if ((-ESHUTDOWN == *err) || (-ENODEV == *err)) {
+ if ((-ESHUTDOWN == *err) || (-ENODEV == *err))
padapter->bSurpriseRemoved = true;
- }
if (rtw_inc_and_chk_continual_io_error(psdiodev) == true) {
padapter->bSurpriseRemoved = true;
@@ -355,17 +357,18 @@ void sd_write32(struct intf_hdl *pintfhdl, u32 addr, u32 v, s32 *err)
*err = 0;
for (i = 0; i < SD_IO_TRY_CNT; i++) {
- if (claim_needed) sdio_claim_host(func);
+ if (claim_needed)
+ sdio_claim_host(func);
sdio_writel(func, v, addr, err);
- if (claim_needed) sdio_release_host(func);
+ if (claim_needed)
+ sdio_release_host(func);
if (*err == 0) {
rtw_reset_continual_io_error(psdiodev);
break;
} else {
DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x, val = 0x%x, try_cnt =%d\n", __func__, *err, addr, v, i);
- if ((-ESHUTDOWN == *err) || (-ENODEV == *err)) {
+ if ((-ESHUTDOWN == *err) || (-ENODEV == *err))
padapter->bSurpriseRemoved = true;
- }
if (rtw_inc_and_chk_continual_io_error(psdiodev) == true) {
padapter->bSurpriseRemoved = true;
@@ -421,7 +424,7 @@ s32 _sd_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
u8 *pbuf = pdata;
for (i = 0; i < cnt; i++) {
- *(pbuf+i) = sdio_readb(func, addr+i, &err);
+ *(pbuf + i) = sdio_readb(func, addr + i, &err);
if (err) {
DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x\n", __func__, err, addr);
@@ -432,9 +435,9 @@ s32 _sd_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
}
err = sdio_memcpy_fromio(func, pdata, addr, cnt);
- if (err) {
+ if (err)
DBG_871X(KERN_ERR "%s: FAIL(%d)! ADDR =%#x Size =%d\n", __func__, err, addr, cnt);
- }
+
return err;
}
@@ -522,9 +525,10 @@ s32 _sd_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
u8 *pbuf = pdata;
for (i = 0; i < cnt; i++) {
- sdio_writeb(func, *(pbuf+i), addr+i, &err);
+ sdio_writeb(func, *(pbuf + i), addr + i, &err);
if (err) {
- DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x val = 0x%02x\n", __func__, err, addr, *(pbuf+i));
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x val = 0x%02x\n",
+ __func__, err, addr, *(pbuf + i));
break;
}
}
@@ -534,9 +538,9 @@ s32 _sd_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
size = cnt;
err = sdio_memcpy_toio(func, addr, pdata, size);
- if (err) {
+ if (err)
DBG_871X(KERN_ERR "%s: FAIL(%d)! ADDR =%#x Size =%d(%d)\n", __func__, err, addr, cnt, size);
- }
+
return err;
}
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 0027bcf638ad..909a3e663ef6 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -257,8 +257,8 @@ int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
- timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, msecs_to_jiffies(timeout));
+ timeleft = wait_for_completion_interruptible_timeout(&trans_done,
+ msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
chip->int_reg);
@@ -284,8 +284,8 @@ finish_send_cmd:
return err;
}
-static inline void rtsx_add_sg_tbl(
- struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
+static inline void rtsx_add_sg_tbl(struct rtsx_chip *chip,
+ u32 addr, u32 len, u8 option)
{
__le64 *sgb = (__le64 *)(chip->host_sg_tbl_ptr);
u64 val = 0;
@@ -419,8 +419,8 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
spin_unlock_irq(&rtsx->reg_lock);
- timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, msecs_to_jiffies(timeout));
+ timeleft = wait_for_completion_interruptible_timeout(&trans_done,
+ msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
@@ -443,8 +443,8 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
if (rtsx->trans_result == TRANS_NOT_READY) {
init_completion(&trans_done);
spin_unlock_irq(&rtsx->reg_lock);
- timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, msecs_to_jiffies(timeout));
+ timeleft = wait_for_completion_interruptible_timeout(&trans_done,
+ msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
@@ -563,8 +563,8 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
spin_unlock_irq(&rtsx->reg_lock);
- timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, msecs_to_jiffies(timeout));
+ timeleft = wait_for_completion_interruptible_timeout(&trans_done,
+ msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
@@ -590,8 +590,8 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
if (rtsx->trans_result == TRANS_NOT_READY) {
init_completion(&trans_done);
spin_unlock_irq(&rtsx->reg_lock);
- timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, msecs_to_jiffies(timeout));
+ timeleft = wait_for_completion_interruptible_timeout(&trans_done,
+ msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 84fb585a5739..029f0d09e966 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -411,6 +411,7 @@ static int __maybe_unused lynxfb_suspend(struct device *dev)
{
struct fb_info *info;
struct sm750_dev *sm750_dev;
+
sm750_dev = dev_get_drvdata(dev);
console_lock();
@@ -500,7 +501,7 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
var->height = var->width = -1;
var->accel_flags = 0;/* FB_ACCELF_TEXT; */
- /* check if current fb's video memory big enought to hold the onscreen*/
+ /* check if current fb's video memory big enough to hold the onscreen*/
request = var->xres_virtual * (var->bits_per_pixel >> 3);
/* defaulty crtc->channel go with par->index */
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index 292fcee9d6f2..d567a2e3f70c 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -122,7 +122,7 @@ static int
vc_vchi_audio_init(struct vchiq_instance *vchiq_instance,
struct bcm2835_audio_instance *instance)
{
- struct vchiq_service_params params = {
+ struct vchiq_service_params_kernel params = {
.version = VC_AUDIOSERV_VER,
.version_min = VC_AUDIOSERV_MIN_VER,
.fourcc = VCHIQ_MAKE_FOURCC('A', 'U', 'D', 'S'),
diff --git a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
index 18d63df368c4..fefc664eefcf 100644
--- a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
+++ b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
@@ -62,7 +62,14 @@ struct vchiq_service_base {
void *userdata;
};
-struct vchiq_service_params {
+struct vchiq_completion_data_kernel {
+ enum vchiq_reason reason;
+ struct vchiq_header *header;
+ void *service_userdata;
+ void *bulk_userdata;
+};
+
+struct vchiq_service_params_kernel {
int fourcc;
enum vchiq_status (*callback)(enum vchiq_reason reason,
struct vchiq_header *header,
@@ -79,7 +86,7 @@ extern enum vchiq_status vchiq_initialise(struct vchiq_instance **pinstance);
extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance);
extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance);
extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,
- const struct vchiq_service_params *params,
+ const struct vchiq_service_params_kernel *params,
unsigned int *pservice);
extern enum vchiq_status vchiq_close_service(unsigned int service);
extern enum vchiq_status vchiq_use_service(unsigned int service);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 5ed36d557014..8782ebe0b39a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -70,7 +70,7 @@ static irqreturn_t
vchiq_doorbell_irq(int irq, void *dev_id);
static struct vchiq_pagelist_info *
-create_pagelist(char __user *buf, size_t count, unsigned short type);
+create_pagelist(char *buf, char __user *ubuf, size_t count, unsigned short type);
static void
free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
@@ -216,12 +216,12 @@ remote_event_signal(struct remote_event *event)
}
enum vchiq_status
-vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
- int dir)
+vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
+ void __user *uoffset, int size, int dir)
{
struct vchiq_pagelist_info *pagelistinfo;
- pagelistinfo = create_pagelist((char __user *)offset, size,
+ pagelistinfo = create_pagelist(offset, uoffset, size,
(dir == VCHIQ_BULK_RECEIVE)
? PAGELIST_READ
: PAGELIST_WRITE);
@@ -229,7 +229,7 @@ vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
if (!pagelistinfo)
return VCHIQ_ERROR;
- bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
+ bulk->data = pagelistinfo->dma_addr;
/*
* Store the pagelistinfo address in remote_data,
@@ -304,7 +304,8 @@ cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
*/
static struct vchiq_pagelist_info *
-create_pagelist(char __user *buf, size_t count, unsigned short type)
+create_pagelist(char *buf, char __user *ubuf,
+ size_t count, unsigned short type)
{
struct pagelist *pagelist;
struct vchiq_pagelist_info *pagelistinfo;
@@ -320,7 +321,10 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
if (count >= INT_MAX - PAGE_SIZE)
return NULL;
- offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
+ if (buf)
+ offset = (uintptr_t)buf & (PAGE_SIZE - 1);
+ else
+ offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
if (num_pages > (SIZE_MAX - sizeof(struct pagelist) -
@@ -368,14 +372,14 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
pagelistinfo->scatterlist = scatterlist;
pagelistinfo->scatterlist_mapped = 0;
- if (is_vmalloc_addr((void __force *)buf)) {
+ if (buf) {
unsigned long length = count;
unsigned int off = offset;
for (actual_pages = 0; actual_pages < num_pages;
actual_pages++) {
struct page *pg =
- vmalloc_to_page((void __force *)(buf +
+ vmalloc_to_page((buf +
(actual_pages * PAGE_SIZE)));
size_t bytes = PAGE_SIZE - off;
@@ -393,7 +397,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
/* do not try and release vmalloc pages */
} else {
actual_pages = pin_user_pages_fast(
- (unsigned long)buf & PAGE_MASK,
+ (unsigned long)ubuf & PAGE_MASK,
num_pages,
type == PAGELIST_READ,
pages);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index d4d811884861..01125d9f991b 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -53,7 +53,7 @@ int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
struct user_service {
struct vchiq_service *service;
- void *userdata;
+ void __user *userdata;
struct vchiq_instance *instance;
char is_vchi;
char dequeue_pending;
@@ -75,7 +75,7 @@ struct bulk_waiter_node {
struct vchiq_instance {
struct vchiq_state *state;
- struct vchiq_completion_data completions[MAX_COMPLETIONS];
+ struct vchiq_completion_data_kernel completions[MAX_COMPLETIONS];
int completion_insert;
int completion_remove;
struct completion insert_event;
@@ -273,7 +273,7 @@ EXPORT_SYMBOL(vchiq_connect);
static enum vchiq_status vchiq_add_service(
struct vchiq_instance *instance,
- const struct vchiq_service_params *params,
+ const struct vchiq_service_params_kernel *params,
unsigned int *phandle)
{
enum vchiq_status status;
@@ -311,7 +311,7 @@ static enum vchiq_status vchiq_add_service(
enum vchiq_status vchiq_open_service(
struct vchiq_instance *instance,
- const struct vchiq_service_params *params,
+ const struct vchiq_service_params_kernel *params,
unsigned int *phandle)
{
enum vchiq_status status = VCHIQ_ERROR;
@@ -359,8 +359,9 @@ vchiq_bulk_transmit(unsigned int handle, const void *data,
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
- status = vchiq_bulk_transfer(handle, (void *)data, size,
- userdata, mode,
+ status = vchiq_bulk_transfer(handle,
+ (void *)data, NULL,
+ size, userdata, mode,
VCHIQ_BULK_TRANSMIT);
break;
case VCHIQ_BULK_MODE_BLOCKING:
@@ -396,7 +397,8 @@ enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
case VCHIQ_BULK_MODE_CALLBACK:
- status = vchiq_bulk_transfer(handle, data, size, userdata,
+ status = vchiq_bulk_transfer(handle, data, NULL,
+ size, userdata,
mode, VCHIQ_BULK_RECEIVE);
break;
case VCHIQ_BULK_MODE_BLOCKING:
@@ -430,6 +432,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
struct vchiq_service *service;
enum vchiq_status status;
struct bulk_waiter_node *waiter = NULL;
+ bool found = false;
service = find_service_by_handle(handle);
if (!service)
@@ -443,17 +446,19 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
if (waiter->pid == current->pid) {
list_del(&waiter->list);
+ found = true;
break;
}
}
mutex_unlock(&instance->bulk_waiter_list_mutex);
- if (waiter) {
+ if (found) {
struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
if (bulk) {
/* This thread has an outstanding bulk transfer. */
- if ((bulk->data != data) ||
+ /* FIXME: why compare a dma address to a pointer? */
+ if ((bulk->data != (dma_addr_t)(uintptr_t)data) ||
(bulk->size != size)) {
/* This is not a retry of the previous one.
* Cancel the signal when the transfer
@@ -464,9 +469,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
spin_unlock(&bulk_waiter_spinlock);
}
}
- }
-
- if (!waiter) {
+ } else {
waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
if (!waiter) {
vchiq_log_error(vchiq_core_log_level,
@@ -475,7 +478,8 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
}
}
- status = vchiq_bulk_transfer(handle, data, size, &waiter->bulk_waiter,
+ status = vchiq_bulk_transfer(handle, data, NULL, size,
+ &waiter->bulk_waiter,
VCHIQ_BULK_MODE_BLOCKING, dir);
if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
!waiter->bulk_waiter.bulk) {
@@ -513,7 +517,7 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
struct vchiq_header *header, struct user_service *user_service,
void *bulk_userdata)
{
- struct vchiq_completion_data *completion;
+ struct vchiq_completion_data_kernel *completion;
int insert;
DEBUG_INITIALISE(g_state.local)
@@ -765,12 +769,13 @@ static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
* vchiq_ioc_queue_message
*
**************************************************************************/
-static enum vchiq_status
+static int
vchiq_ioc_queue_message(unsigned int handle,
struct vchiq_element *elements,
unsigned long count)
{
struct vchiq_io_copy_callback_context context;
+ enum vchiq_status status = VCHIQ_SUCCESS;
unsigned long i;
size_t total_size = 0;
@@ -785,8 +790,459 @@ vchiq_ioc_queue_message(unsigned int handle,
total_size += elements[i].size;
}
- return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
- &context, total_size);
+ status = vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
+ &context, total_size);
+
+ if (status == VCHIQ_ERROR)
+ return -EIO;
+ else if (status == VCHIQ_RETRY)
+ return -EINTR;
+ return 0;
+}
+
+static int vchiq_ioc_create_service(struct vchiq_instance *instance,
+ struct vchiq_create_service *args)
+{
+ struct user_service *user_service = NULL;
+ struct vchiq_service *service;
+ enum vchiq_status status = VCHIQ_SUCCESS;
+ struct vchiq_service_params_kernel params;
+ int srvstate;
+
+ user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
+ if (!user_service)
+ return -ENOMEM;
+
+ if (args->is_open) {
+ if (!instance->connected) {
+ kfree(user_service);
+ return -ENOTCONN;
+ }
+ srvstate = VCHIQ_SRVSTATE_OPENING;
+ } else {
+ srvstate = instance->connected ?
+ VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN;
+ }
+
+ params = (struct vchiq_service_params_kernel) {
+ .fourcc = args->params.fourcc,
+ .callback = service_callback,
+ .userdata = user_service,
+ .version = args->params.version,
+ .version_min = args->params.version_min,
+ };
+ service = vchiq_add_service_internal(instance->state, &params,
+ srvstate, instance,
+ user_service_free);
+ if (!service) {
+ kfree(user_service);
+ return -EEXIST;
+ }
+
+ user_service->service = service;
+ user_service->userdata = args->params.userdata;
+ user_service->instance = instance;
+ user_service->is_vchi = (args->is_vchi != 0);
+ user_service->dequeue_pending = 0;
+ user_service->close_pending = 0;
+ user_service->message_available_pos = instance->completion_remove - 1;
+ user_service->msg_insert = 0;
+ user_service->msg_remove = 0;
+ init_completion(&user_service->insert_event);
+ init_completion(&user_service->remove_event);
+ init_completion(&user_service->close_event);
+
+ if (args->is_open) {
+ status = vchiq_open_service_internal(service, instance->pid);
+ if (status != VCHIQ_SUCCESS) {
+ vchiq_remove_service(service->handle);
+ return (status == VCHIQ_RETRY) ?
+ -EINTR : -EIO;
+ }
+ }
+ args->handle = service->handle;
+
+ return 0;
+}
+
+static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
+ struct vchiq_dequeue_message *args)
+{
+ struct user_service *user_service;
+ struct vchiq_service *service;
+ struct vchiq_header *header;
+ int ret;
+
+ DEBUG_INITIALISE(g_state.local)
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ service = find_service_for_instance(instance, args->handle);
+ if (!service)
+ return -EINVAL;
+
+ user_service = (struct user_service *)service->base.userdata;
+ if (user_service->is_vchi == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ spin_lock(&msg_queue_spinlock);
+ if (user_service->msg_remove == user_service->msg_insert) {
+ if (!args->blocking) {
+ spin_unlock(&msg_queue_spinlock);
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ ret = -EWOULDBLOCK;
+ goto out;
+ }
+ user_service->dequeue_pending = 1;
+ ret = 0;
+ do {
+ spin_unlock(&msg_queue_spinlock);
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ if (wait_for_completion_interruptible(
+ &user_service->insert_event)) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "DEQUEUE_MESSAGE interrupted");
+ ret = -EINTR;
+ break;
+ }
+ spin_lock(&msg_queue_spinlock);
+ } while (user_service->msg_remove ==
+ user_service->msg_insert);
+
+ if (ret)
+ goto out;
+ }
+
+ BUG_ON((int)(user_service->msg_insert -
+ user_service->msg_remove) < 0);
+
+ header = user_service->msg_queue[user_service->msg_remove &
+ (MSG_QUEUE_SIZE - 1)];
+ user_service->msg_remove++;
+ spin_unlock(&msg_queue_spinlock);
+
+ complete(&user_service->remove_event);
+ if (!header) {
+ ret = -ENOTCONN;
+ } else if (header->size <= args->bufsize) {
+ /* Copy to user space if msgbuf is not NULL */
+ if (!args->buf || (copy_to_user(args->buf,
+ header->data, header->size) == 0)) {
+ ret = header->size;
+ vchiq_release_message(service->handle, header);
+ } else
+ ret = -EFAULT;
+ } else {
+ vchiq_log_error(vchiq_arm_log_level,
+ "header %pK: bufsize %x < size %x",
+ header, args->bufsize, header->size);
+ WARN(1, "invalid size\n");
+ ret = -EMSGSIZE;
+ }
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+out:
+ unlock_service(service);
+ return ret;
+}
+
+static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
+ struct vchiq_queue_bulk_transfer *args,
+ enum vchiq_bulk_dir dir,
+ enum vchiq_bulk_mode __user *mode)
+{
+ struct vchiq_service *service;
+ struct bulk_waiter_node *waiter = NULL;
+ bool found = false;
+ void *userdata = NULL;
+ int status = 0;
+ int ret;
+
+ service = find_service_for_instance(instance, args->handle);
+ if (!service)
+ return -EINVAL;
+
+ if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
+ waiter = kzalloc(sizeof(struct bulk_waiter_node),
+ GFP_KERNEL);
+ if (!waiter) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ userdata = &waiter->bulk_waiter;
+ } else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
+ mutex_lock(&instance->bulk_waiter_list_mutex);
+ list_for_each_entry(waiter, &instance->bulk_waiter_list,
+ list) {
+ if (waiter->pid == current->pid) {
+ list_del(&waiter->list);
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
+ if (!found) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "no bulk_waiter found for pid %d",
+ current->pid);
+ ret = -ESRCH;
+ goto out;
+ }
+ vchiq_log_info(vchiq_arm_log_level,
+ "found bulk_waiter %pK for pid %d", waiter,
+ current->pid);
+ userdata = &waiter->bulk_waiter;
+ }
+
+ /*
+ * FIXME address space mismatch:
+ * args->data may be interpreted as a kernel pointer
+ * in create_pagelist() called from vchiq_bulk_transfer(),
+ * accessing kernel data instead of user space, based on the
+ * address.
+ */
+ status = vchiq_bulk_transfer(args->handle, NULL, args->data, args->size,
+ userdata, args->mode, dir);
+
+ if (!waiter) {
+ ret = 0;
+ goto out;
+ }
+
+ if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
+ !waiter->bulk_waiter.bulk) {
+ if (waiter->bulk_waiter.bulk) {
+ /* Cancel the signal when the transfer
+ ** completes. */
+ spin_lock(&bulk_waiter_spinlock);
+ waiter->bulk_waiter.bulk->userdata = NULL;
+ spin_unlock(&bulk_waiter_spinlock);
+ }
+ kfree(waiter);
+ ret = 0;
+ } else {
+ const enum vchiq_bulk_mode mode_waiting =
+ VCHIQ_BULK_MODE_WAITING;
+ waiter->pid = current->pid;
+ mutex_lock(&instance->bulk_waiter_list_mutex);
+ list_add(&waiter->list, &instance->bulk_waiter_list);
+ mutex_unlock(&instance->bulk_waiter_list_mutex);
+ vchiq_log_info(vchiq_arm_log_level,
+ "saved bulk_waiter %pK for pid %d",
+ waiter, current->pid);
+
+ ret = put_user(mode_waiting, mode);
+ }
+out:
+ unlock_service(service);
+ if (ret)
+ return ret;
+ else if (status == VCHIQ_ERROR)
+ return -EIO;
+ else if (status == VCHIQ_RETRY)
+ return -EINTR;
+ return 0;
+}
+
+/* read a user pointer value from an array pointers in user space */
+static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index)
+{
+ int ret;
+
+ if (in_compat_syscall()) {
+ compat_uptr_t ptr32;
+ compat_uptr_t __user *uptr = ubuf;
+ ret = get_user(ptr32, uptr + index);
+ *buf = compat_ptr(ptr32);
+ } else {
+ uintptr_t ptr, __user *uptr = ubuf;
+ ret = get_user(ptr, uptr + index);
+ *buf = (void __user *)ptr;
+ }
+
+ return ret;
+}
+
+struct vchiq_completion_data32 {
+ enum vchiq_reason reason;
+ compat_uptr_t header;
+ compat_uptr_t service_userdata;
+ compat_uptr_t bulk_userdata;
+};
+
+static int vchiq_put_completion(struct vchiq_completion_data __user *buf,
+ struct vchiq_completion_data *completion,
+ int index)
+{
+ struct vchiq_completion_data32 __user *buf32 = (void __user *)buf;
+
+ if (in_compat_syscall()) {
+ struct vchiq_completion_data32 tmp = {
+ .reason = completion->reason,
+ .header = ptr_to_compat(completion->header),
+ .service_userdata = ptr_to_compat(completion->service_userdata),
+ .bulk_userdata = ptr_to_compat(completion->bulk_userdata),
+ };
+ if (copy_to_user(&buf32[index], &tmp, sizeof(tmp)))
+ return -EFAULT;
+ } else {
+ if (copy_to_user(&buf[index], completion, sizeof(*completion)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
+ struct vchiq_await_completion *args,
+ int __user *msgbufcountp)
+{
+ int msgbufcount;
+ int remove;
+ int ret;
+
+ DEBUG_INITIALISE(g_state.local)
+
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ if (!instance->connected) {
+ return -ENOTCONN;
+ }
+
+ mutex_lock(&instance->completion_mutex);
+
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ while ((instance->completion_remove ==
+ instance->completion_insert)
+ && !instance->closing) {
+ int rc;
+
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ mutex_unlock(&instance->completion_mutex);
+ rc = wait_for_completion_interruptible(
+ &instance->insert_event);
+ mutex_lock(&instance->completion_mutex);
+ if (rc) {
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ vchiq_log_info(vchiq_arm_log_level,
+ "AWAIT_COMPLETION interrupted");
+ ret = -EINTR;
+ goto out;
+ }
+ }
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+
+ msgbufcount = args->msgbufcount;
+ remove = instance->completion_remove;
+
+ for (ret = 0; ret < args->count; ret++) {
+ struct vchiq_completion_data_kernel *completion;
+ struct vchiq_completion_data user_completion;
+ struct vchiq_service *service;
+ struct user_service *user_service;
+ struct vchiq_header *header;
+
+ if (remove == instance->completion_insert)
+ break;
+
+ completion = &instance->completions[
+ remove & (MAX_COMPLETIONS - 1)];
+
+ /*
+ * A read memory barrier is needed to stop
+ * prefetch of a stale completion record
+ */
+ rmb();
+
+ service = completion->service_userdata;
+ user_service = service->base.userdata;
+
+ memset(&user_completion, 0, sizeof(user_completion));
+ user_completion = (struct vchiq_completion_data) {
+ .reason = completion->reason,
+ .service_userdata = user_service->userdata,
+ };
+
+ header = completion->header;
+ if (header) {
+ void __user *msgbuf;
+ int msglen;
+
+ msglen = header->size + sizeof(struct vchiq_header);
+ /* This must be a VCHIQ-style service */
+ if (args->msgbufsize < msglen) {
+ vchiq_log_error(vchiq_arm_log_level,
+ "header %pK: msgbufsize %x < msglen %x",
+ header, args->msgbufsize, msglen);
+ WARN(1, "invalid message size\n");
+ if (ret == 0)
+ ret = -EMSGSIZE;
+ break;
+ }
+ if (msgbufcount <= 0)
+ /* Stall here for lack of a
+ ** buffer for the message. */
+ break;
+ /* Get the pointer from user space */
+ msgbufcount--;
+ if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
+ msgbufcount)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Copy the message to user space */
+ if (copy_to_user(msgbuf, header, msglen)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Now it has been copied, the message
+ ** can be released. */
+ vchiq_release_message(service->handle, header);
+
+ /* The completion must point to the
+ ** msgbuf. */
+ user_completion.header = msgbuf;
+ }
+
+ if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
+ !instance->use_close_delivered)
+ unlock_service(service);
+
+ /*
+ * FIXME: address space mismatch, does bulk_userdata
+ * actually point to user or kernel memory?
+ */
+ user_completion.bulk_userdata = completion->bulk_userdata;
+
+ if (vchiq_put_completion(args->buf, &user_completion, ret)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
+
+ /*
+ * Ensure that the above copy has completed
+ * before advancing the remove pointer.
+ */
+ mb();
+ remove++;
+ instance->completion_remove = remove;
+ }
+
+ if (msgbufcount != args->msgbufcount) {
+ if (put_user(msgbufcount, msgbufcountp))
+ ret = -EFAULT;
+ }
+out:
+ if (ret)
+ complete(&instance->remove_event);
+ mutex_unlock(&instance->completion_mutex);
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+
+ return ret;
}
/****************************************************************************
@@ -803,8 +1259,6 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
long ret = 0;
int i, rc;
- DEBUG_INITIALISE(g_state.local)
-
vchiq_log_trace(vchiq_arm_log_level,
"%s - instance %pK, cmd %s, arg %lx",
__func__, instance,
@@ -861,85 +1315,22 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case VCHIQ_IOC_CREATE_SERVICE: {
+ struct vchiq_create_service __user *argp;
struct vchiq_create_service args;
- struct user_service *user_service = NULL;
- void *userdata;
- int srvstate;
- if (copy_from_user(&args, (const void __user *)arg,
- sizeof(args))) {
+ argp = (void __user *)arg;
+ if (copy_from_user(&args, argp, sizeof(args))) {
ret = -EFAULT;
break;
}
- user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
- if (!user_service) {
- ret = -ENOMEM;
+ ret = vchiq_ioc_create_service(instance, &args);
+ if (ret < 0)
break;
- }
-
- if (args.is_open) {
- if (!instance->connected) {
- ret = -ENOTCONN;
- kfree(user_service);
- break;
- }
- srvstate = VCHIQ_SRVSTATE_OPENING;
- } else {
- srvstate =
- instance->connected ?
- VCHIQ_SRVSTATE_LISTENING :
- VCHIQ_SRVSTATE_HIDDEN;
- }
-
- userdata = args.params.userdata;
- args.params.callback = service_callback;
- args.params.userdata = user_service;
- service = vchiq_add_service_internal(
- instance->state,
- &args.params, srvstate,
- instance, user_service_free);
-
- if (service) {
- user_service->service = service;
- user_service->userdata = userdata;
- user_service->instance = instance;
- user_service->is_vchi = (args.is_vchi != 0);
- user_service->dequeue_pending = 0;
- user_service->close_pending = 0;
- user_service->message_available_pos =
- instance->completion_remove - 1;
- user_service->msg_insert = 0;
- user_service->msg_remove = 0;
- init_completion(&user_service->insert_event);
- init_completion(&user_service->remove_event);
- init_completion(&user_service->close_event);
-
- if (args.is_open) {
- status = vchiq_open_service_internal
- (service, instance->pid);
- if (status != VCHIQ_SUCCESS) {
- vchiq_remove_service(service->handle);
- service = NULL;
- ret = (status == VCHIQ_RETRY) ?
- -EINTR : -EIO;
- break;
- }
- }
-
- if (copy_to_user((void __user *)
- &(((struct vchiq_create_service __user *)
- arg)->handle),
- (const void *)&service->handle,
- sizeof(service->handle))) {
- ret = -EFAULT;
- vchiq_remove_service(service->handle);
- }
- service = NULL;
- } else {
- ret = -EEXIST;
- kfree(user_service);
+ if (put_user(args.handle, &argp->handle)) {
+ vchiq_remove_service(args.handle);
+ ret = -EFAULT;
}
} break;
@@ -1020,9 +1411,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(elements, args.elements,
args.count * sizeof(struct vchiq_element)) == 0)
- status = vchiq_ioc_queue_message
- (args.handle,
- elements, args.count);
+ ret = vchiq_ioc_queue_message(args.handle, elements,
+ args.count);
else
ret = -EFAULT;
} else {
@@ -1033,333 +1423,46 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
struct vchiq_queue_bulk_transfer args;
- struct bulk_waiter_node *waiter = NULL;
+ struct vchiq_queue_bulk_transfer __user *argp;
enum vchiq_bulk_dir dir =
(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
- if (copy_from_user(&args, (const void __user *)arg,
- sizeof(args))) {
+ argp = (void __user *)arg;
+ if (copy_from_user(&args, argp, sizeof(args))) {
ret = -EFAULT;
break;
}
- service = find_service_for_instance(instance, args.handle);
- if (!service) {
- ret = -EINVAL;
- break;
- }
-
- if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
- waiter = kzalloc(sizeof(struct bulk_waiter_node),
- GFP_KERNEL);
- if (!waiter) {
- ret = -ENOMEM;
- break;
- }
-
- args.userdata = &waiter->bulk_waiter;
- } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
- mutex_lock(&instance->bulk_waiter_list_mutex);
- list_for_each_entry(waiter, &instance->bulk_waiter_list,
- list) {
- if (waiter->pid == current->pid) {
- list_del(&waiter->list);
- break;
- }
- }
- mutex_unlock(&instance->bulk_waiter_list_mutex);
- if (!waiter) {
- vchiq_log_error(vchiq_arm_log_level,
- "no bulk_waiter found for pid %d",
- current->pid);
- ret = -ESRCH;
- break;
- }
- vchiq_log_info(vchiq_arm_log_level,
- "found bulk_waiter %pK for pid %d", waiter,
- current->pid);
- args.userdata = &waiter->bulk_waiter;
- }
-
- status = vchiq_bulk_transfer(args.handle, args.data, args.size,
- args.userdata, args.mode, dir);
-
- if (!waiter)
- break;
-
- if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
- !waiter->bulk_waiter.bulk) {
- if (waiter->bulk_waiter.bulk) {
- /* Cancel the signal when the transfer
- ** completes. */
- spin_lock(&bulk_waiter_spinlock);
- waiter->bulk_waiter.bulk->userdata = NULL;
- spin_unlock(&bulk_waiter_spinlock);
- }
- kfree(waiter);
- } else {
- const enum vchiq_bulk_mode mode_waiting =
- VCHIQ_BULK_MODE_WAITING;
- waiter->pid = current->pid;
- mutex_lock(&instance->bulk_waiter_list_mutex);
- list_add(&waiter->list, &instance->bulk_waiter_list);
- mutex_unlock(&instance->bulk_waiter_list_mutex);
- vchiq_log_info(vchiq_arm_log_level,
- "saved bulk_waiter %pK for pid %d",
- waiter, current->pid);
-
- if (copy_to_user((void __user *)
- &(((struct vchiq_queue_bulk_transfer __user *)
- arg)->mode),
- (const void *)&mode_waiting,
- sizeof(mode_waiting)))
- ret = -EFAULT;
- }
+ ret = vchiq_irq_queue_bulk_tx_rx(instance, &args,
+ dir, &argp->mode);
} break;
case VCHIQ_IOC_AWAIT_COMPLETION: {
struct vchiq_await_completion args;
+ struct vchiq_await_completion __user *argp;
- DEBUG_TRACE(AWAIT_COMPLETION_LINE);
- if (!instance->connected) {
- ret = -ENOTCONN;
- break;
- }
-
- if (copy_from_user(&args, (const void __user *)arg,
- sizeof(args))) {
+ argp = (void __user *)arg;
+ if (copy_from_user(&args, argp, sizeof(args))) {
ret = -EFAULT;
break;
}
- mutex_lock(&instance->completion_mutex);
-
- DEBUG_TRACE(AWAIT_COMPLETION_LINE);
- while ((instance->completion_remove ==
- instance->completion_insert)
- && !instance->closing) {
- int rc;
-
- DEBUG_TRACE(AWAIT_COMPLETION_LINE);
- mutex_unlock(&instance->completion_mutex);
- rc = wait_for_completion_interruptible(
- &instance->insert_event);
- mutex_lock(&instance->completion_mutex);
- if (rc) {
- DEBUG_TRACE(AWAIT_COMPLETION_LINE);
- vchiq_log_info(vchiq_arm_log_level,
- "AWAIT_COMPLETION interrupted");
- ret = -EINTR;
- break;
- }
- }
- DEBUG_TRACE(AWAIT_COMPLETION_LINE);
-
- if (ret == 0) {
- int msgbufcount = args.msgbufcount;
- int remove = instance->completion_remove;
-
- for (ret = 0; ret < args.count; ret++) {
- struct vchiq_completion_data *completion;
- struct vchiq_service *service;
- struct user_service *user_service;
- struct vchiq_header *header;
-
- if (remove == instance->completion_insert)
- break;
-
- completion = &instance->completions[
- remove & (MAX_COMPLETIONS - 1)];
-
- /*
- * A read memory barrier is needed to stop
- * prefetch of a stale completion record
- */
- rmb();
-
- service = completion->service_userdata;
- user_service = service->base.userdata;
- completion->service_userdata =
- user_service->userdata;
-
- header = completion->header;
- if (header) {
- void __user *msgbuf;
- int msglen;
-
- msglen = header->size +
- sizeof(struct vchiq_header);
- /* This must be a VCHIQ-style service */
- if (args.msgbufsize < msglen) {
- vchiq_log_error(
- vchiq_arm_log_level,
- "header %pK: msgbufsize %x < msglen %x",
- header, args.msgbufsize,
- msglen);
- WARN(1, "invalid message "
- "size\n");
- if (ret == 0)
- ret = -EMSGSIZE;
- break;
- }
- if (msgbufcount <= 0)
- /* Stall here for lack of a
- ** buffer for the message. */
- break;
- /* Get the pointer from user space */
- msgbufcount--;
- if (copy_from_user(&msgbuf,
- (const void __user *)
- &args.msgbufs[msgbufcount],
- sizeof(msgbuf))) {
- if (ret == 0)
- ret = -EFAULT;
- break;
- }
-
- /* Copy the message to user space */
- if (copy_to_user(msgbuf, header,
- msglen)) {
- if (ret == 0)
- ret = -EFAULT;
- break;
- }
-
- /* Now it has been copied, the message
- ** can be released. */
- vchiq_release_message(service->handle,
- header);
-
- /* The completion must point to the
- ** msgbuf. */
- completion->header =
- (struct vchiq_header __force *)
- msgbuf;
- }
-
- if ((completion->reason ==
- VCHIQ_SERVICE_CLOSED) &&
- !instance->use_close_delivered)
- unlock_service(service);
-
- if (copy_to_user((void __user *)(
- (size_t)args.buf + ret *
- sizeof(struct vchiq_completion_data)),
- completion,
- sizeof(struct vchiq_completion_data))) {
- if (ret == 0)
- ret = -EFAULT;
- break;
- }
-
- /*
- * Ensure that the above copy has completed
- * before advancing the remove pointer.
- */
- mb();
- remove++;
- instance->completion_remove = remove;
- }
-
- if (msgbufcount != args.msgbufcount) {
- if (copy_to_user((void __user *)
- &((struct vchiq_await_completion *)arg)
- ->msgbufcount,
- &msgbufcount,
- sizeof(msgbufcount))) {
- ret = -EFAULT;
- }
- }
- }
-
- if (ret)
- complete(&instance->remove_event);
- mutex_unlock(&instance->completion_mutex);
- DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ ret = vchiq_ioc_await_completion(instance, &args,
+ &argp->msgbufcount);
} break;
case VCHIQ_IOC_DEQUEUE_MESSAGE: {
struct vchiq_dequeue_message args;
- struct user_service *user_service;
- struct vchiq_header *header;
- DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
if (copy_from_user(&args, (const void __user *)arg,
sizeof(args))) {
ret = -EFAULT;
break;
}
- service = find_service_for_instance(instance, args.handle);
- if (!service) {
- ret = -EINVAL;
- break;
- }
- user_service = (struct user_service *)service->base.userdata;
- if (user_service->is_vchi == 0) {
- ret = -EINVAL;
- break;
- }
- spin_lock(&msg_queue_spinlock);
- if (user_service->msg_remove == user_service->msg_insert) {
- if (!args.blocking) {
- spin_unlock(&msg_queue_spinlock);
- DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
- ret = -EWOULDBLOCK;
- break;
- }
- user_service->dequeue_pending = 1;
- do {
- spin_unlock(&msg_queue_spinlock);
- DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
- if (wait_for_completion_interruptible(
- &user_service->insert_event)) {
- vchiq_log_info(vchiq_arm_log_level,
- "DEQUEUE_MESSAGE interrupted");
- ret = -EINTR;
- break;
- }
- spin_lock(&msg_queue_spinlock);
- } while (user_service->msg_remove ==
- user_service->msg_insert);
-
- if (ret)
- break;
- }
-
- BUG_ON((int)(user_service->msg_insert -
- user_service->msg_remove) < 0);
-
- header = user_service->msg_queue[user_service->msg_remove &
- (MSG_QUEUE_SIZE - 1)];
- user_service->msg_remove++;
- spin_unlock(&msg_queue_spinlock);
-
- complete(&user_service->remove_event);
- if (!header)
- ret = -ENOTCONN;
- else if (header->size <= args.bufsize) {
- /* Copy to user space if msgbuf is not NULL */
- if (!args.buf ||
- (copy_to_user((void __user *)args.buf,
- header->data,
- header->size) == 0)) {
- ret = header->size;
- vchiq_release_message(
- service->handle,
- header);
- } else
- ret = -EFAULT;
- } else {
- vchiq_log_error(vchiq_arm_log_level,
- "header %pK: bufsize %x < size %x",
- header, args.bufsize, header->size);
- WARN(1, "invalid size\n");
- ret = -EMSGSIZE;
- }
- DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+ ret = vchiq_ioc_dequeue_message(instance, &args);
} break;
case VCHIQ_IOC_GET_CLIENT_ID: {
@@ -1489,46 +1592,36 @@ static long
vchiq_compat_ioctl_create_service(
struct file *file,
unsigned int cmd,
- unsigned long arg)
+ struct vchiq_create_service32 __user *ptrargs32)
{
- struct vchiq_create_service __user *args;
- struct vchiq_create_service32 __user *ptrargs32 =
- (struct vchiq_create_service32 __user *)arg;
+ struct vchiq_create_service args;
struct vchiq_create_service32 args32;
long ret;
- args = compat_alloc_user_space(sizeof(*args));
- if (!args)
- return -EFAULT;
-
if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
return -EFAULT;
- if (put_user(args32.params.fourcc, &args->params.fourcc) ||
- put_user(compat_ptr(args32.params.callback),
- &args->params.callback) ||
- put_user(compat_ptr(args32.params.userdata),
- &args->params.userdata) ||
- put_user(args32.params.version, &args->params.version) ||
- put_user(args32.params.version_min,
- &args->params.version_min) ||
- put_user(args32.is_open, &args->is_open) ||
- put_user(args32.is_vchi, &args->is_vchi) ||
- put_user(args32.handle, &args->handle))
- return -EFAULT;
-
- ret = vchiq_ioctl(file, VCHIQ_IOC_CREATE_SERVICE, (unsigned long)args);
+ args = (struct vchiq_create_service) {
+ .params = {
+ .fourcc = args32.params.fourcc,
+ .callback = compat_ptr(args32.params.callback),
+ .userdata = compat_ptr(args32.params.userdata),
+ .version = args32.params.version,
+ .version_min = args32.params.version_min,
+ },
+ .is_open = args32.is_open,
+ .is_vchi = args32.is_vchi,
+ .handle = args32.handle,
+ };
+ ret = vchiq_ioc_create_service(file->private_data, &args);
if (ret < 0)
return ret;
- if (get_user(args32.handle, &args->handle))
- return -EFAULT;
-
- if (copy_to_user(&ptrargs32->handle,
- &args32.handle,
- sizeof(args32.handle)))
+ if (put_user(args.handle, &ptrargs32->handle)) {
+ vchiq_remove_service(args.handle);
return -EFAULT;
+ }
return 0;
}
@@ -1550,55 +1643,53 @@ struct vchiq_queue_message32 {
static long
vchiq_compat_ioctl_queue_message(struct file *file,
unsigned int cmd,
- unsigned long arg)
+ struct vchiq_queue_message32 __user *arg)
{
- struct vchiq_queue_message __user *args;
- struct vchiq_element __user *elements;
+ struct vchiq_queue_message args;
struct vchiq_queue_message32 args32;
- unsigned int count;
-
- if (copy_from_user(&args32,
- (struct vchiq_queue_message32 __user *)arg,
- sizeof(args32)))
- return -EFAULT;
-
- args = compat_alloc_user_space(sizeof(*args) +
- (sizeof(*elements) * MAX_ELEMENTS));
+ struct vchiq_service *service;
+ int ret;
- if (!args)
+ if (copy_from_user(&args32, arg, sizeof(args32)))
return -EFAULT;
- if (put_user(args32.handle, &args->handle) ||
- put_user(args32.count, &args->count) ||
- put_user(compat_ptr(args32.elements), &args->elements))
- return -EFAULT;
+ args = (struct vchiq_queue_message) {
+ .handle = args32.handle,
+ .count = args32.count,
+ .elements = compat_ptr(args32.elements),
+ };
if (args32.count > MAX_ELEMENTS)
return -EINVAL;
- if (args32.elements && args32.count) {
- struct vchiq_element32 tempelement32[MAX_ELEMENTS];
+ service = find_service_for_instance(file->private_data, args.handle);
+ if (!service)
+ return -EINVAL;
- elements = (struct vchiq_element __user *)(args + 1);
+ if (args32.elements && args32.count) {
+ struct vchiq_element32 element32[MAX_ELEMENTS];
+ struct vchiq_element elements[MAX_ELEMENTS];
+ unsigned int count;
- if (copy_from_user(&tempelement32,
- compat_ptr(args32.elements),
- sizeof(tempelement32)))
+ if (copy_from_user(&element32, args.elements,
+ sizeof(element32))) {
+ unlock_service(service);
return -EFAULT;
+ }
for (count = 0; count < args32.count; count++) {
- if (put_user(compat_ptr(tempelement32[count].data),
- &elements[count].data) ||
- put_user(tempelement32[count].size,
- &elements[count].size))
- return -EFAULT;
+ elements[count].data =
+ compat_ptr(element32[count].data);
+ elements[count].size = element32[count].size;
}
-
- if (put_user(elements, &args->elements))
- return -EFAULT;
+ ret = vchiq_ioc_queue_message(args.handle, elements,
+ args.count);
+ } else {
+ ret = -EINVAL;
}
+ unlock_service(service);
- return vchiq_ioctl(file, VCHIQ_IOC_QUEUE_MESSAGE, (unsigned long)args);
+ return ret;
}
struct vchiq_queue_bulk_transfer32 {
@@ -1617,56 +1708,28 @@ struct vchiq_queue_bulk_transfer32 {
static long
vchiq_compat_ioctl_queue_bulk(struct file *file,
unsigned int cmd,
- unsigned long arg)
+ struct vchiq_queue_bulk_transfer32 __user *argp)
{
- struct vchiq_queue_bulk_transfer __user *args;
struct vchiq_queue_bulk_transfer32 args32;
- struct vchiq_queue_bulk_transfer32 __user *ptrargs32 =
- (struct vchiq_queue_bulk_transfer32 __user *)arg;
- long ret;
-
- args = compat_alloc_user_space(sizeof(*args));
- if (!args)
- return -EFAULT;
-
- if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
- return -EFAULT;
-
- if (put_user(args32.handle, &args->handle) ||
- put_user(compat_ptr(args32.data), &args->data) ||
- put_user(args32.size, &args->size) ||
- put_user(compat_ptr(args32.userdata), &args->userdata) ||
- put_user(args32.mode, &args->mode))
- return -EFAULT;
-
- if (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32)
- cmd = VCHIQ_IOC_QUEUE_BULK_TRANSMIT;
- else
- cmd = VCHIQ_IOC_QUEUE_BULK_RECEIVE;
-
- ret = vchiq_ioctl(file, cmd, (unsigned long)args);
-
- if (ret < 0)
- return ret;
+ struct vchiq_queue_bulk_transfer args;
+ enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
+ VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
- if (get_user(args32.mode, &args->mode))
+ if (copy_from_user(&args32, argp, sizeof(args32)))
return -EFAULT;
- if (copy_to_user(&ptrargs32->mode,
- &args32.mode,
- sizeof(args32.mode)))
- return -EFAULT;
+ args = (struct vchiq_queue_bulk_transfer) {
+ .handle = args32.handle,
+ .data = compat_ptr(args32.data),
+ .size = args32.size,
+ .userdata = compat_ptr(args32.userdata),
+ .mode = args32.mode,
+ };
- return 0;
+ return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args,
+ dir, &argp->mode);
}
-struct vchiq_completion_data32 {
- enum vchiq_reason reason;
- compat_uptr_t header;
- compat_uptr_t service_userdata;
- compat_uptr_t bulk_userdata;
-};
-
struct vchiq_await_completion32 {
unsigned int count;
compat_uptr_t buf;
@@ -1681,141 +1744,24 @@ struct vchiq_await_completion32 {
static long
vchiq_compat_ioctl_await_completion(struct file *file,
unsigned int cmd,
- unsigned long arg)
+ struct vchiq_await_completion32 __user *argp)
{
- struct vchiq_await_completion __user *args;
- struct vchiq_completion_data __user *completion;
- struct vchiq_completion_data completiontemp;
+ struct vchiq_await_completion args;
struct vchiq_await_completion32 args32;
- struct vchiq_completion_data32 completion32;
- unsigned int __user *msgbufcount32;
- unsigned int msgbufcount_native;
- compat_uptr_t msgbuf32;
- void __user *msgbuf;
- void * __user *msgbufptr;
- long ret;
-
- args = compat_alloc_user_space(sizeof(*args) +
- sizeof(*completion) +
- sizeof(*msgbufptr));
- if (!args)
- return -EFAULT;
-
- completion = (struct vchiq_completion_data __user *)(args + 1);
- msgbufptr = (void * __user *)(completion + 1);
-
- if (copy_from_user(&args32,
- (struct vchiq_completion_data32 __user *)arg,
- sizeof(args32)))
- return -EFAULT;
-
- if (put_user(args32.count, &args->count) ||
- put_user(compat_ptr(args32.buf), &args->buf) ||
- put_user(args32.msgbufsize, &args->msgbufsize) ||
- put_user(args32.msgbufcount, &args->msgbufcount) ||
- put_user(compat_ptr(args32.msgbufs), &args->msgbufs))
- return -EFAULT;
-
- /* These are simple cases, so just fall into the native handler */
- if (!args32.count || !args32.buf || !args32.msgbufcount)
- return vchiq_ioctl(file,
- VCHIQ_IOC_AWAIT_COMPLETION,
- (unsigned long)args);
-
- /*
- * These are the more complex cases. Typical applications of this
- * ioctl will use a very large count, with a very large msgbufcount.
- * Since the native ioctl can asynchronously fill in the returned
- * buffers and the application can in theory begin processing messages
- * even before the ioctl returns, a bit of a trick is used here.
- *
- * By forcing both count and msgbufcount to be 1, it forces the native
- * ioctl to only claim at most 1 message is available. This tricks
- * the calling application into thinking only 1 message was actually
- * available in the queue so like all good applications it will retry
- * waiting until all the required messages are received.
- *
- * This trick has been tested and proven to work with vchiq_test,
- * Minecraft_PI, the "hello pi" examples, and various other
- * applications that are included in Raspbian.
- */
-
- if (copy_from_user(&msgbuf32,
- compat_ptr(args32.msgbufs) +
- (sizeof(compat_uptr_t) *
- (args32.msgbufcount - 1)),
- sizeof(msgbuf32)))
- return -EFAULT;
-
- msgbuf = compat_ptr(msgbuf32);
-
- if (copy_to_user(msgbufptr,
- &msgbuf,
- sizeof(msgbuf)))
- return -EFAULT;
-
- if (copy_to_user(&args->msgbufs,
- &msgbufptr,
- sizeof(msgbufptr)))
- return -EFAULT;
-
- if (put_user(1U, &args->count) ||
- put_user(completion, &args->buf) ||
- put_user(1U, &args->msgbufcount))
- return -EFAULT;
-
- ret = vchiq_ioctl(file,
- VCHIQ_IOC_AWAIT_COMPLETION,
- (unsigned long)args);
-
- /*
- * An return value of 0 here means that no messages where available
- * in the message queue. In this case the native ioctl does not
- * return any data to the application at all. Not even to update
- * msgbufcount. This functionality needs to be kept here for
- * compatibility.
- *
- * Of course, < 0 means that an error occurred and no data is being
- * returned.
- *
- * Since count and msgbufcount was forced to 1, that means
- * the only other possible return value is 1. Meaning that 1 message
- * was available, so that multiple message case does not need to be
- * handled here.
- */
- if (ret <= 0)
- return ret;
- if (copy_from_user(&completiontemp, completion, sizeof(*completion)))
+ if (copy_from_user(&args32, argp, sizeof(args32)))
return -EFAULT;
- completion32.reason = completiontemp.reason;
- completion32.header = ptr_to_compat(completiontemp.header);
- completion32.service_userdata =
- ptr_to_compat(completiontemp.service_userdata);
- completion32.bulk_userdata =
- ptr_to_compat(completiontemp.bulk_userdata);
-
- if (copy_to_user(compat_ptr(args32.buf),
- &completion32,
- sizeof(completion32)))
- return -EFAULT;
-
- if (get_user(msgbufcount_native, &args->msgbufcount))
- return -EFAULT;
-
- if (!msgbufcount_native)
- args32.msgbufcount--;
-
- msgbufcount32 =
- &((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
-
- if (copy_to_user(msgbufcount32,
- &args32.msgbufcount,
- sizeof(args32.msgbufcount)))
- return -EFAULT;
+ args = (struct vchiq_await_completion) {
+ .count = args32.count,
+ .buf = compat_ptr(args32.buf),
+ .msgbufsize = args32.msgbufsize,
+ .msgbufcount = args32.msgbufcount,
+ .msgbufs = compat_ptr(args32.msgbufs),
+ };
- return 1;
+ return vchiq_ioc_await_completion(file->private_data, &args,
+ &argp->msgbufcount);
}
struct vchiq_dequeue_message32 {
@@ -1831,28 +1777,22 @@ struct vchiq_dequeue_message32 {
static long
vchiq_compat_ioctl_dequeue_message(struct file *file,
unsigned int cmd,
- unsigned long arg)
+ struct vchiq_dequeue_message32 __user *arg)
{
- struct vchiq_dequeue_message __user *args;
struct vchiq_dequeue_message32 args32;
+ struct vchiq_dequeue_message args;
- args = compat_alloc_user_space(sizeof(*args));
- if (!args)
- return -EFAULT;
-
- if (copy_from_user(&args32,
- (struct vchiq_dequeue_message32 __user *)arg,
- sizeof(args32)))
+ if (copy_from_user(&args32, arg, sizeof(args32)))
return -EFAULT;
- if (put_user(args32.handle, &args->handle) ||
- put_user(args32.blocking, &args->blocking) ||
- put_user(args32.bufsize, &args->bufsize) ||
- put_user(compat_ptr(args32.buf), &args->buf))
- return -EFAULT;
+ args = (struct vchiq_dequeue_message) {
+ .handle = args32.handle,
+ .blocking = args32.blocking,
+ .bufsize = args32.bufsize,
+ .buf = compat_ptr(args32.buf),
+ };
- return vchiq_ioctl(file, VCHIQ_IOC_DEQUEUE_MESSAGE,
- (unsigned long)args);
+ return vchiq_ioc_dequeue_message(file->private_data, &args);
}
struct vchiq_get_config32 {
@@ -1866,46 +1806,45 @@ struct vchiq_get_config32 {
static long
vchiq_compat_ioctl_get_config(struct file *file,
unsigned int cmd,
- unsigned long arg)
+ struct vchiq_get_config32 __user *arg)
{
- struct vchiq_get_config __user *args;
struct vchiq_get_config32 args32;
+ struct vchiq_config config;
+ void __user *ptr;
- args = compat_alloc_user_space(sizeof(*args));
- if (!args)
- return -EFAULT;
-
- if (copy_from_user(&args32,
- (struct vchiq_get_config32 __user *)arg,
- sizeof(args32)))
+ if (copy_from_user(&args32, arg, sizeof(args32)))
return -EFAULT;
+ if (args32.config_size > sizeof(config))
+ return -EINVAL;
- if (put_user(args32.config_size, &args->config_size) ||
- put_user(compat_ptr(args32.pconfig), &args->pconfig))
+ vchiq_get_config(&config);
+ ptr = compat_ptr(args32.pconfig);
+ if (copy_to_user(ptr, &config, args32.config_size))
return -EFAULT;
- return vchiq_ioctl(file, VCHIQ_IOC_GET_CONFIG, (unsigned long)args);
+ return 0;
}
static long
vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ void __user *argp = compat_ptr(arg);
switch (cmd) {
case VCHIQ_IOC_CREATE_SERVICE32:
- return vchiq_compat_ioctl_create_service(file, cmd, arg);
+ return vchiq_compat_ioctl_create_service(file, cmd, argp);
case VCHIQ_IOC_QUEUE_MESSAGE32:
- return vchiq_compat_ioctl_queue_message(file, cmd, arg);
+ return vchiq_compat_ioctl_queue_message(file, cmd, argp);
case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
- return vchiq_compat_ioctl_queue_bulk(file, cmd, arg);
+ return vchiq_compat_ioctl_queue_bulk(file, cmd, argp);
case VCHIQ_IOC_AWAIT_COMPLETION32:
- return vchiq_compat_ioctl_await_completion(file, cmd, arg);
+ return vchiq_compat_ioctl_await_completion(file, cmd, argp);
case VCHIQ_IOC_DEQUEUE_MESSAGE32:
- return vchiq_compat_ioctl_dequeue_message(file, cmd, arg);
+ return vchiq_compat_ioctl_dequeue_message(file, cmd, argp);
case VCHIQ_IOC_GET_CONFIG32:
- return vchiq_compat_ioctl_get_config(file, cmd, arg);
+ return vchiq_compat_ioctl_get_config(file, cmd, argp);
default:
- return vchiq_ioctl(file, cmd, arg);
+ return vchiq_ioctl(file, cmd, (unsigned long)argp);
}
}
@@ -2018,7 +1957,7 @@ static int vchiq_release(struct inode *inode, struct file *file)
/* Release any closed services */
while (instance->completion_remove !=
instance->completion_insert) {
- struct vchiq_completion_data *completion;
+ struct vchiq_completion_data_kernel *completion;
struct vchiq_service *service;
completion = &instance->completions[
@@ -2283,7 +2222,7 @@ vchiq_keepalive_thread_func(void *v)
struct vchiq_instance *instance;
unsigned int ka_handle;
- struct vchiq_service_params params = {
+ struct vchiq_service_params_kernel params = {
.fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
.callback = vchiq_keepalive_vchiq_callback,
.version = KEEPALIVE_VER,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 5a361e8e7c6c..38b10fd5d992 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -1392,7 +1392,7 @@ abort_outstanding_bulks(struct vchiq_service *service,
bulk->remote_size);
} else {
/* fabricate a matching dummy bulk */
- bulk->data = NULL;
+ bulk->data = 0;
bulk->size = 0;
bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
@@ -1764,10 +1764,10 @@ parse_rx_slots(struct vchiq_state *state)
queue->remote_insert++;
vchiq_log_info(vchiq_core_log_level,
- "%d: prs %s@%pK (%d->%d) %x@%pK",
+ "%d: prs %s@%pK (%d->%d) %x@%pad",
state->id, msg_type_str(type),
header, remoteport, localport,
- bulk->actual, bulk->data);
+ bulk->actual, &bulk->data);
vchiq_log_trace(vchiq_core_log_level,
"%d: prs:%d %cx li=%x ri=%x p=%x",
@@ -2316,7 +2316,7 @@ struct vchiq_header *vchiq_msg_hold(unsigned int handle)
}
EXPORT_SYMBOL(vchiq_msg_hold);
-static int vchiq_validate_params(const struct vchiq_service_params *params)
+static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
{
if (!params->callback || !params->fourcc) {
vchiq_loud_error("Can't add service, invalid params\n");
@@ -2329,7 +2329,7 @@ static int vchiq_validate_params(const struct vchiq_service_params *params)
/* Called from application thread when a client or server service is created. */
struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state *state,
- const struct vchiq_service_params *params,
+ const struct vchiq_service_params_kernel *params,
int srvstate, struct vchiq_instance *instance,
vchiq_userdata_term userdata_term)
{
@@ -3015,7 +3015,8 @@ vchiq_remove_service(unsigned int handle)
* structure.
*/
enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
- void *offset, int size, void *userdata,
+ void *offset, void __user *uoffset,
+ int size, void *userdata,
enum vchiq_bulk_mode mode,
enum vchiq_bulk_dir dir)
{
@@ -3031,7 +3032,8 @@ enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
int payload[2];
if (!service || service->srvstate != VCHIQ_SRVSTATE_OPEN ||
- !offset || vchiq_check_service(service) != VCHIQ_SUCCESS)
+ (!offset && !uoffset) ||
+ vchiq_check_service(service) != VCHIQ_SUCCESS)
goto error_exit;
switch (mode) {
@@ -3087,15 +3089,16 @@ enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
bulk->size = size;
bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
- if (vchiq_prepare_bulk_data(bulk, offset, size, dir) != VCHIQ_SUCCESS)
+ if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir)
+ != VCHIQ_SUCCESS)
goto unlock_error_exit;
wmb();
vchiq_log_info(vchiq_core_log_level,
- "%d: bt (%d->%d) %cx %x@%pK %pK",
+ "%d: bt (%d->%d) %cx %x@%pad %pK",
state->id, service->localport, service->remoteport, dir_char,
- size, bulk->data, userdata);
+ size, &bulk->data, userdata);
/* The slot mutex must be held when the service is being closed, so
claim it here to ensure that isn't happening */
@@ -3107,7 +3110,7 @@ enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
goto unlock_both_error_exit;
- payload[0] = (int)(long)bulk->data;
+ payload[0] = lower_32_bits(bulk->data);
payload[1] = bulk->size;
status = queue_message(state,
NULL,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index e67692879249..06200a76b871 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -231,7 +231,7 @@ struct vchiq_bulk {
short mode;
short dir;
void *userdata;
- void *data;
+ dma_addr_t data;
int size;
void *remote_data;
int remote_size;
@@ -534,9 +534,9 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero);
extern enum vchiq_status
vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
-extern struct vchiq_service *
+struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state *state,
- const struct vchiq_service_params *params,
+ const struct vchiq_service_params_kernel *params,
int srvstate, struct vchiq_instance *instance,
vchiq_userdata_term userdata_term);
@@ -559,8 +559,8 @@ extern void
remote_event_pollall(struct vchiq_state *state);
extern enum vchiq_status
-vchiq_bulk_transfer(unsigned int handle, void *offset, int size,
- void *userdata, enum vchiq_bulk_mode mode,
+vchiq_bulk_transfer(unsigned int handle, void *offset, void __user *uoffset,
+ int size, void *userdata, enum vchiq_bulk_mode mode,
enum vchiq_bulk_dir dir);
extern int
@@ -632,8 +632,8 @@ vchiq_queue_message(unsigned int handle,
** implementations must be provided. */
extern enum vchiq_status
-vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
- int dir);
+vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
+ void __user *uoffset, int size, int dir);
extern void
vchiq_complete_bulk(struct vchiq_bulk *bulk);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
index 3653fd99d8a1..86d77f2eeea5 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
@@ -10,6 +10,17 @@
#define VCHIQ_IOC_MAGIC 0xc4
#define VCHIQ_INVALID_HANDLE (~0)
+struct vchiq_service_params {
+ int fourcc;
+ enum vchiq_status __user (*callback)(enum vchiq_reason reason,
+ struct vchiq_header *header,
+ unsigned int handle,
+ void *bulk_userdata);
+ void __user *userdata;
+ short version; /* Increment for non-trivial changes */
+ short version_min; /* Update for incompatible changes */
+};
+
struct vchiq_create_service {
struct vchiq_service_params params;
int is_open;
@@ -25,32 +36,32 @@ struct vchiq_queue_message {
struct vchiq_queue_bulk_transfer {
unsigned int handle;
- void *data;
+ void __user *data;
unsigned int size;
- void *userdata;
+ void __user *userdata;
enum vchiq_bulk_mode mode;
};
struct vchiq_completion_data {
enum vchiq_reason reason;
- struct vchiq_header *header;
- void *service_userdata;
- void *bulk_userdata;
+ struct vchiq_header __user *header;
+ void __user *service_userdata;
+ void __user *bulk_userdata;
};
struct vchiq_await_completion {
unsigned int count;
- struct vchiq_completion_data *buf;
+ struct vchiq_completion_data __user *buf;
unsigned int msgbufsize;
unsigned int msgbufcount; /* IN/OUT */
- void **msgbufs;
+ void * __user *msgbufs;
};
struct vchiq_dequeue_message {
unsigned int handle;
int blocking;
unsigned int bufsize;
- void *buf;
+ void __user *buf;
};
struct vchiq_get_config {
@@ -65,7 +76,7 @@ struct vchiq_set_service_option {
};
struct vchiq_dump_mem {
- void *virt_addr;
+ void __user *virt_addr;
size_t num_bytes;
};
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index e798d494f00f..9097bcbd67d8 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -179,6 +179,9 @@ struct vchiq_mmal_instance {
/* ordered workqueue to process all bulk operations */
struct workqueue_struct *bulk_wq;
+
+ /* handle for a vchiq instance */
+ struct vchiq_instance *vchiq_instance;
};
static struct mmal_msg_context *
@@ -1840,6 +1843,7 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
mutex_unlock(&instance->vchiq_mutex);
+ vchiq_shutdown(instance->vchiq_instance);
flush_workqueue(instance->bulk_wq);
destroy_workqueue(instance->bulk_wq);
@@ -1856,9 +1860,10 @@ EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
{
int status;
+ int err = -ENODEV;
struct vchiq_mmal_instance *instance;
static struct vchiq_instance *vchiq_instance;
- struct vchiq_service_params params = {
+ struct vchiq_service_params_kernel params = {
.version = VC_MMAL_VER,
.version_min = VC_MMAL_MIN_VER,
.fourcc = VCHIQ_MAKE_FOURCC('m', 'm', 'a', 'l'),
@@ -1890,17 +1895,21 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
status = vchiq_connect(vchiq_instance);
if (status) {
pr_err("Failed to connect VCHI instance (status=%d)\n", status);
- return -EIO;
+ err = -EIO;
+ goto err_shutdown_vchiq;
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
- if (!instance)
- return -ENOMEM;
+ if (!instance) {
+ err = -ENOMEM;
+ goto err_shutdown_vchiq;
+ }
mutex_init(&instance->vchiq_mutex);
instance->bulk_scratch = vmalloc(PAGE_SIZE);
+ instance->vchiq_instance = vchiq_instance;
mutex_init(&instance->context_map_lock);
idr_init_base(&instance->context_map, 1);
@@ -1932,7 +1941,9 @@ err_close_services:
err_free:
vfree(instance->bulk_scratch);
kfree(instance);
- return -ENODEV;
+err_shutdown_vchiq:
+ vchiq_shutdown(vchiq_instance);
+ return err;
}
EXPORT_SYMBOL_GPL(vchiq_mmal_init);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 76de1fd568eb..09ab6d6f2429 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -555,7 +555,7 @@ static int device_init_rd0_ring(struct vnt_private *priv)
}
if (i > 0)
- priv->aRD0Ring[i-1].next_desc = cpu_to_le32(priv->rd0_pool_dma);
+ priv->aRD0Ring[i - 1].next_desc = cpu_to_le32(priv->rd0_pool_dma);
priv->pCurrRD[0] = &priv->aRD0Ring[0];
return 0;
@@ -596,12 +596,12 @@ static int device_init_rd1_ring(struct vnt_private *priv)
goto err_free_rd;
}
- desc->next = &priv->aRD1Ring[(i+1) % priv->opts.rx_descs1];
+ desc->next = &priv->aRD1Ring[(i + 1) % priv->opts.rx_descs1];
desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
}
if (i > 0)
- priv->aRD1Ring[i-1].next_desc = cpu_to_le32(priv->rd1_pool_dma);
+ priv->aRD1Ring[i - 1].next_desc = cpu_to_le32(priv->rd1_pool_dma);
priv->pCurrRD[1] = &priv->aRD1Ring[0];
return 0;
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index c7888c4e96f2..6e2bd16ef384 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -621,7 +621,7 @@ do { \
/* set the chip with current BCN length */
#define MACvSetCurrBCNLength(iobase, wCurrBCNLength) \
- VNSvOutPortW(iobase + MAC_REG_BCNDMACTL+2, \
+ VNSvOutPortW(iobase + MAC_REG_BCNDMACTL + 2, \
wCurrBCNLength)
#define MACvReadBSSIDAddress(iobase, pbyEtherAddr) \
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 4778439e8757..477d19314634 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -367,52 +367,52 @@ s_uGetRTSCTSDuration(
case RTSDUR_BA_F0: /* RTSDuration_ba_f0 */
uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate - RATE_18M], bNeedAck);
break;
case RTSDUR_AA_F0: /* RTSDuration_aa_f0 */
uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate - RATE_18M], bNeedAck);
break;
case RTSDUR_BA_F1: /* RTSDuration_ba_f1 */
uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate - RATE_18M], bNeedAck);
break;
case RTSDUR_AA_F1: /* RTSDuration_aa_f1 */
uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
+ uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate - RATE_18M], bNeedAck);
break;
case CTSDUR_BA_F0: /* CTSDuration_ba_f0 */
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
+ uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
+ uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate - RATE_18M], bNeedAck);
break;
case CTSDUR_BA_F1: /* CTSDuration_ba_f1 */
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
+ uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
- uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
+ uDurTime = pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate - RATE_18M], bNeedAck);
break;
diff --git a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
new file mode 100644
index 000000000000..510edd12ed19
--- /dev/null
+++ b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (c) 2020, Silicon Laboratories, Inc.
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/net/wireless/silabs,wfx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Silicon Labs WFxxx devicetree bindings
+
+maintainers:
+ - Jérôme Pouiller <jerome.pouiller@silabs.com>
+
+description:
+ The WFxxx chip series can be connected via SPI or via SDIO.
+
+ For SDIO':'
+
+ The driver is able to detect a WFxxx chip on SDIO bus by matching its Vendor
+ ID and Product ID. However, driver will only provide limited features in
+ this case. Thus declaring WFxxx chip in device tree is recommended (and may
+ become mandatory in the future).
+
+ In addition, it is recommended to declare a mmc-pwrseq on SDIO host above
+ WFx. Without it, you may encounter issues with warm boot. The mmc-pwrseq
+ should be compatible with mmc-pwrseq-simple. Please consult
+ Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml for more
+ information.
+
+ For SPI':'
+
+ In add of the properties below, please consult
+ Documentation/devicetree/bindings/spi/spi-controller.yaml for optional SPI
+ related properties.
+
+ Note that in add of the properties below, the WFx driver also supports
+ `mac-address` and `local-mac-address` as described in
+ Documentation/devicetree/bindings/net/ethernet.txt
+
+properties:
+ compatible:
+ const: silabs,wf200
+ reg:
+ description:
+ When used on SDIO bus, <reg> must be set to 1. When used on SPI bus, it is
+ the chip select address of the device as defined in the SPI devices
+ bindings.
+ maxItems: 1
+ spi-max-frequency:
+ description: (SPI only) Maximum SPI clocking speed of device in Hz.
+ maxItems: 1
+ interrupts:
+ description: The interrupt line. Triggers IRQ_TYPE_LEVEL_HIGH and
+ IRQ_TYPE_EDGE_RISING are both supported by the chip and the driver. When
+ SPI is used, this property is required. When SDIO is used, the "in-band"
+ interrupt provided by the SDIO bus is used unless an interrupt is defined
+ in the Device Tree.
+ maxItems: 1
+ reset-gpios:
+ description: (SPI only) Phandle of gpio that will be used to reset chip
+ during probe. Without this property, you may encounter issues with warm
+ boot. (For legacy purpose, the gpio in inverted when compatible ==
+ "silabs,wfx-spi")
+
+ For SDIO, the reset gpio should declared using a mmc-pwrseq.
+ maxItems: 1
+ wakeup-gpios:
+ description: Phandle of gpio that will be used to wake-up chip. Without this
+ property, driver will disable most of power saving features.
+ maxItems: 1
+ config-file:
+ description: Use an alternative file as PDS. Default is `wf200.pds`. Only
+ necessary for development/debug purpose.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wfx@0 {
+ compatible = "silabs,wf200";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_irq &wfx_gpios>;
+ reg = <0>;
+ interrupts-extended = <&gpio 16 IRQ_TYPE_EDGE_RISING>;
+ wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
+ spi-max-frequency = <42000000>;
+ };
+ };
+
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ wfx_pwrseq: wfx_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_reset>;
+ reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
+ };
+
+ mmc0 {
+ mmc-pwrseq = <&wfx_pwrseq>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mmc@1 {
+ compatible = "silabs,wf200";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_wakeup>;
+ reg = <1>;
+ wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
+ };
+ };
+...
diff --git a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
deleted file mode 100644
index 17db67559f5e..000000000000
--- a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
+++ /dev/null
@@ -1,98 +0,0 @@
-The WFxxx chip series can be connected via SPI or via SDIO.
-
-SPI
----
-
-You have to declare the WFxxx chip in your device tree.
-
-Required properties:
- - compatible: Should be "silabs,wf200"
- - reg: Chip select address of device
- - spi-max-frequency: Maximum SPI clocking speed of device in Hz
- - interrupts-extended: Should contain interrupt line (interrupt-parent +
- interrupt can also been used). Trigger should be `IRQ_TYPE_EDGE_RISING`.
-
-Optional properties:
- - reset-gpios: phandle of gpio that will be used to reset chip during probe.
- Without this property, you may encounter issues with warm boot.
- (Legacy: when compatible == "silabs,wfx-spi", the gpio is inverted.)
-
-Please consult Documentation/devicetree/bindings/spi/spi-bus.txt for optional
-SPI connection related properties,
-
-Example:
-
-&spi1 {
- wfx {
- compatible = "silabs,wf200";
- pinctrl-names = "default";
- pinctrl-0 = <&wfx_irq &wfx_gpios>;
- interrupts-extended = <&gpio 16 IRQ_TYPE_EDGE_RISING>;
- wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
- reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
- reg = <0>;
- spi-max-frequency = <42000000>;
- };
-};
-
-
-SDIO
-----
-
-The driver is able to detect a WFxxx chip on SDIO bus by matching its Vendor ID
-and Product ID. However, driver will only provide limited features in this
-case. Thus declaring WFxxx chip in device tree is strongly recommended (and may
-become mandatory in the future).
-
-Required properties:
- - compatible: Should be "silabs,wf200"
- - reg: Should be 1
-
-In addition, it is recommended to declare a mmc-pwrseq on SDIO host above WFx.
-Without it, you may encounter issues with warm boot. mmc-pwrseq should be
-compatible with mmc-pwrseq-simple. Please consult
-Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt for more
-information.
-
-Example:
-
-/ {
- wfx_pwrseq: wfx_pwrseq {
- compatible = "mmc-pwrseq-simple";
- pinctrl-names = "default";
- pinctrl-0 = <&wfx_reset>;
- reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
- };
-};
-
-&mmc1 {
- mmc-pwrseq = <&wfx_pwrseq>;
- #address-size = <1>;
- #size = <0>;
-
- mmc@1 {
- compatible = "silabs,wf200";
- reg = <1>;
- pinctrl-names = "default";
- pinctrl-0 = <&wfx_wakeup>;
- wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
- };
-};
-
-Note that #address-size and #size shoud already be defined in node mmc1, but it
-is rarely the case.
-
-Common properties
------------------
-
-Some properties are recognized either by SPI and SDIO versions:
- - wakeup-gpios: phandle of gpio that will be used to wake-up chip. Without
- this property, driver will disable most of power saving features.
- - config-file: Use an alternative file as PDS. Default is `wf200.pds`. Only
- necessary for development/debug purpose.
- - slk_key: String representing hexadecimal value of secure link key to use.
- Must contains 64 hexadecimal digits. Not supported in current version.
-
-WFx driver also supports `mac-address` and `local-mac-address` as described in
-Documentation/devicetree/bindings/net/ethernet.txt
-
diff --git a/drivers/staging/wfx/TODO b/drivers/staging/wfx/TODO
index 42bf36d43970..1b4bc2af94b6 100644
--- a/drivers/staging/wfx/TODO
+++ b/drivers/staging/wfx/TODO
@@ -1,25 +1,6 @@
This is a list of things that need to be done to get this driver out of the
staging directory.
- - The HIF API is not yet clean enough.
-
- - The code that check the corectness of received message (in rx_helper()) can
- be improved. See:
- https://lore.kernel.org/driverdev-devel/2302785.6C7ODC2LYm@pc-42/
-
- As suggested by Felix, rate control could be improved following this idea:
https://lore.kernel.org/lkml/3099559.gv3Q75KnN1@pc-42/
- - Feature called "secure link" should be either developed (using kernel
- crypto API) or dropped.
-
- - The device allows to filter multicast traffic. The code to support these
- filters exists in the driver but it is disabled because it has never been
- tested.
-
- - In wfx_cmd_send(), "async" allow to send command without waiting the reply.
- It may help in some situation, but it is not yet used. In add, it may cause
- some trouble:
- https://lore.kernel.org/driverdev-devel/alpine.DEB.2.21.1910041317381.2992@hadrien/
- So, fix it (by replacing the mutex with a semaphore) or drop it.
-
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
index 53ae0b5abcdd..ed53d0b45592 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/staging/wfx/bh.c
@@ -2,7 +2,7 @@
/*
* Interrupt bottom half (BH).
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/gpio/consumer.h>
@@ -12,31 +12,45 @@
#include "wfx.h"
#include "hwio.h"
#include "traces.h"
-#include "secure_link.h"
#include "hif_rx.h"
#include "hif_api_cmd.h"
static void device_wakeup(struct wfx_dev *wdev)
{
+ int max_retry = 3;
+
if (!wdev->pdata.gpio_wakeup)
return;
- if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup))
+ if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup) > 0)
return;
- gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
if (wfx_api_older_than(wdev, 1, 4)) {
+ gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
if (!completion_done(&wdev->hif.ctrl_ready))
usleep_range(2000, 2500);
- } else {
+ return;
+ }
+ for (;;) {
+ gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
// completion.h does not provide any function to wait
// completion without consume it (a kind of
// wait_for_completion_done_timeout()). So we have to emulate
// it.
if (wait_for_completion_timeout(&wdev->hif.ctrl_ready,
- msecs_to_jiffies(2) + 1))
+ msecs_to_jiffies(2))) {
complete(&wdev->hif.ctrl_ready);
- else
+ return;
+ } else if (max_retry-- > 0) {
+ // Older firmwares have a race in sleep/wake-up process.
+ // Redo the process is sufficient to unfreeze the
+ // chip.
dev_err(wdev->dev, "timeout while wake up chip\n");
+ gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0);
+ usleep_range(2000, 2500);
+ } else {
+ dev_err(wdev->dev, "max wake-up retries reached\n");
+ return;
+ }
}
}
@@ -73,20 +87,11 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
_trace_piggyback(piggyback, false);
hif = (struct hif_msg *)skb->data;
- WARN(hif->encrypted & 0x1, "unsupported encryption type");
- if (hif->encrypted == 0x2) {
- if (WARN(read_len < sizeof(struct hif_sl_msg), "corrupted read"))
- goto err;
- computed_len = le16_to_cpu(((struct hif_sl_msg *)hif)->len);
- computed_len = round_up(computed_len - sizeof(u16), 16);
- computed_len += sizeof(struct hif_sl_msg);
- computed_len += sizeof(struct hif_sl_tag);
- } else {
- if (WARN(read_len < sizeof(struct hif_msg), "corrupted read"))
- goto err;
- computed_len = le16_to_cpu(hif->len);
- computed_len = round_up(computed_len, 2);
- }
+ WARN(hif->encrypted & 0x3, "encryption is unsupported");
+ if (WARN(read_len < sizeof(struct hif_msg), "corrupted read"))
+ goto err;
+ computed_len = le16_to_cpu(hif->len);
+ computed_len = round_up(computed_len, 2);
if (computed_len != read_len) {
dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n",
computed_len, read_len);
@@ -94,16 +99,6 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
hif, read_len, true);
goto err;
}
- if (hif->encrypted == 0x2) {
- if (wfx_sl_decode(wdev, (struct hif_sl_msg *)hif)) {
- dev_kfree_skb(skb);
- // If frame was a confirmation, expect trouble in next
- // exchange. However, it is harmless to fail to decode
- // an indication frame, so try to continue. Anyway,
- // piggyback is probably correct.
- return piggyback;
- }
- }
if (!(hif->id & HIF_ID_IS_INDICATION)) {
(*is_cnf)++;
@@ -184,23 +179,7 @@ static void tx_helper(struct wfx_dev *wdev, struct hif_msg *hif)
hif->seqnum = wdev->hif.tx_seqnum;
wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1);
- if (wfx_is_secure_command(wdev, hif->id)) {
- len = round_up(len - sizeof(hif->len), 16) + sizeof(hif->len) +
- sizeof(struct hif_sl_msg_hdr) +
- sizeof(struct hif_sl_tag);
- // AES support encryption in-place. However, mac80211 access to
- // 802.11 header after frame was sent (to get MAC addresses).
- // So, keep origin buffer clear.
- data = kmalloc(len, GFP_KERNEL);
- if (!data)
- goto end;
- is_encrypted = true;
- ret = wfx_sl_encode(wdev, hif, data);
- if (ret)
- goto end;
- } else {
- data = hif;
- }
+ data = hif;
WARN(len > wdev->hw_caps.size_inp_ch_buf,
"%s: request exceed WFx capability: %zu > %d\n", __func__,
len, wdev->hw_caps.size_inp_ch_buf);
diff --git a/drivers/staging/wfx/bh.h b/drivers/staging/wfx/bh.h
index 4b73437869e1..78c49329e22a 100644
--- a/drivers/staging/wfx/bh.h
+++ b/drivers/staging/wfx/bh.h
@@ -2,7 +2,7 @@
/*
* Interrupt bottom half.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#ifndef WFX_BH_H
diff --git a/drivers/staging/wfx/bus.h b/drivers/staging/wfx/bus.h
index 0370b6c59863..ca04b3da6204 100644
--- a/drivers/staging/wfx/bus.h
+++ b/drivers/staging/wfx/bus.h
@@ -2,7 +2,7 @@
/*
* Common bus abstraction layer.
*
- * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#ifndef WFX_BUS_H
diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c
index 496bfc8bbacc..e06d7e1ebe9c 100644
--- a/drivers/staging/wfx/bus_sdio.c
+++ b/drivers/staging/wfx/bus_sdio.c
@@ -2,7 +2,7 @@
/*
* SDIO interface.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/module.h>
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
index d19c0478e8be..a99125d1a30d 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/staging/wfx/bus_spi.c
@@ -2,7 +2,7 @@
/*
* SPI interface.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2011, Sagrad Inc.
* Copyright (c) 2010, ST-Ericsson
*/
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
index 6fb078880742..385f2d42a0e2 100644
--- a/drivers/staging/wfx/data_rx.c
+++ b/drivers/staging/wfx/data_rx.c
@@ -2,7 +2,7 @@
/*
* Datapath implementation.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/etherdevice.h>
@@ -17,6 +17,9 @@ static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt)
{
int params, tid;
+ if (wfx_api_older_than(wvif->wdev, 3, 6))
+ return;
+
switch (mgmt->u.action.u.addba_req.action_code) {
case WLAN_ACTION_ADDBA_REQ:
params = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
@@ -41,7 +44,7 @@ void wfx_rx_cb(struct wfx_vif *wvif,
memset(hdr, 0, sizeof(*hdr));
if (arg->status == HIF_STATUS_RX_FAIL_MIC)
- hdr->flag |= RX_FLAG_MMIC_ERROR;
+ hdr->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_IV_STRIPPED;
else if (arg->status)
goto drop;
@@ -70,10 +73,10 @@ void wfx_rx_cb(struct wfx_vif *wvif,
hdr->signal = arg->rcpi_rssi / 2 - 110;
hdr->antenna = 0;
- if (arg->rx_flags.encryp)
+ if (arg->encryp)
hdr->flag |= RX_FLAG_DECRYPTED;
- // Block ack negociation is offloaded by the firmware. However,
+ // Block ack negotiation is offloaded by the firmware. However,
// re-ordering must be done by the mac80211.
if (ieee80211_is_action(frame->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK &&
diff --git a/drivers/staging/wfx/data_rx.h b/drivers/staging/wfx/data_rx.h
index 125dbfc1f875..4c0da37f2084 100644
--- a/drivers/staging/wfx/data_rx.h
+++ b/drivers/staging/wfx/data_rx.h
@@ -2,7 +2,7 @@
/*
* Datapath implementation.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#ifndef WFX_DATA_RX_H
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
index 3acf4eb0214d..36b36ef39d05 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/staging/wfx/data_tx.c
@@ -2,7 +2,7 @@
/*
* Datapath implementation.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <net/mac80211.h>
@@ -34,6 +34,10 @@ static int wfx_get_hw_rate(struct wfx_dev *wdev,
// WFx only support 2GHz, else band information should be retrieved
// from ieee80211_tx_info
band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (rate->idx >= band->n_bitrates) {
+ WARN(1, "wrong rate->idx value: %d", rate->idx);
+ return -1;
+ }
return band->bitrates[rate->idx].hw_value;
}
@@ -234,7 +238,7 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
int i;
bool finished;
- // Firmware is not able to mix rates with differents flags
+ // Firmware is not able to mix rates with different flags
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
@@ -300,23 +304,14 @@ static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif,
return rate_id;
}
-static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev,
- struct ieee80211_tx_info *tx_info)
+static int wfx_tx_get_frame_format(struct ieee80211_tx_info *tx_info)
{
- struct ieee80211_tx_rate *rate = &tx_info->driver_rates[0];
- struct hif_ht_tx_parameters ret = { };
-
- if (!(rate->flags & IEEE80211_TX_RC_MCS))
- ret.frame_format = HIF_FRAME_FORMAT_NON_HT;
- else if (!(rate->flags & IEEE80211_TX_RC_GREEN_FIELD))
- ret.frame_format = HIF_FRAME_FORMAT_MIXED_FORMAT_HT;
+ if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_MCS))
+ return HIF_FRAME_FORMAT_NON_HT;
+ else if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD))
+ return HIF_FRAME_FORMAT_MIXED_FORMAT_HT;
else
- ret.frame_format = HIF_FRAME_FORMAT_GF_HT_11N;
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- ret.short_gi = 1;
- if (tx_info->flags & IEEE80211_TX_CTL_STBC)
- ret.stbc = 0; // FIXME: Not yet supported by firmware?
- return ret;
+ return HIF_FRAME_FORMAT_GF_HT_11N;
}
static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
@@ -325,6 +320,8 @@ static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
if (!hw_key)
return 0;
+ if (hw_key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ return 0;
mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0;
return hw_key->icv_len + mic_space;
}
@@ -334,7 +331,6 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
{
struct hif_msg *hif_msg;
struct hif_req_tx *req;
- struct wfx_tx_priv *tx_priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -348,15 +344,11 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
// From now tx_info->control is unusable
memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
- // Fill tx_priv
- tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
- if (ieee80211_has_protected(hdr->frame_control))
- tx_priv->hw_key = hw_key;
// Fill hif_msg
WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
WARN(offset & 1, "attempt to transmit an unaligned frame");
- skb_put(skb, wfx_tx_get_icv_len(tx_priv->hw_key));
+ skb_put(skb, wfx_tx_get_icv_len(hw_key));
skb_push(skb, wmsg_len);
memset(skb->data, 0, wmsg_len);
hif_msg = (struct hif_msg *)skb->data;
@@ -380,14 +372,16 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16;
req->packet_id |= queue_id << 28;
- req->data_flags.fc_offset = offset;
+ req->fc_offset = offset;
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
- req->data_flags.after_dtim = 1;
- req->queue_id.peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr);
+ req->after_dtim = 1;
+ req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr);
// Queue index are inverted between firmware and Linux
- req->queue_id.queue_id = 3 - queue_id;
- req->ht_tx_parameters = wfx_tx_get_tx_parms(wvif->wdev, tx_info);
- req->tx_flags.retry_policy_index = wfx_tx_get_rate_id(wvif, tx_info);
+ req->queue_id = 3 - queue_id;
+ req->retry_policy_index = wfx_tx_get_rate_id(wvif, tx_info);
+ req->frame_format = wfx_tx_get_frame_format(tx_info);
+ if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ req->short_gi = 1;
// Auxiliary operations
wfx_tx_queues_put(wvif, skb);
@@ -439,10 +433,13 @@ static void wfx_skb_dtor(struct wfx_vif *wvif, struct sk_buff *skb)
struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
unsigned int offset = sizeof(struct hif_msg) +
sizeof(struct hif_req_tx) +
- req->data_flags.fc_offset;
+ req->fc_offset;
- WARN_ON(!wvif);
- wfx_tx_policy_put(wvif, req->tx_flags.retry_policy_index);
+ if (!wvif) {
+ pr_warn("%s: vif associated with the skb does not exist anymore\n", __func__);
+ return;
+ }
+ wfx_tx_policy_put(wvif, req->retry_policy_index);
skb_pull(skb, offset);
ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb);
}
@@ -488,7 +485,6 @@ static void wfx_tx_fill_rates(struct wfx_dev *wdev,
void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
{
struct ieee80211_tx_info *tx_info;
- const struct wfx_tx_priv *tx_priv;
struct wfx_vif *wvif;
struct sk_buff *skb;
@@ -498,18 +494,15 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
arg->packet_id);
return;
}
+ tx_info = IEEE80211_SKB_CB(skb);
wvif = wdev_to_wvif(wdev, ((struct hif_msg *)skb->data)->interface);
WARN_ON(!wvif);
if (!wvif)
return;
- tx_info = IEEE80211_SKB_CB(skb);
- tx_priv = wfx_skb_tx_priv(skb);
- _trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb));
- // You can touch to tx_priv, but don't touch to tx_info->status.
+ // Note that wfx_pending_get_pkt_us_delay() get data from tx_info
+ _trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb));
wfx_tx_fill_rates(wdev, tx_info, arg);
- skb_trim(skb, skb->len - wfx_tx_get_icv_len(tx_priv->hw_key));
-
// From now, you can touch to tx_info->status, but do not touch to
// tx_priv anymore
// FIXME: use ieee80211_tx_info_clear_status()
@@ -525,8 +518,7 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
else
tx_info->flags |= IEEE80211_TX_STAT_ACK;
} else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) {
- WARN(!arg->tx_result_flags.requeue,
- "incoherent status and result_flags");
+ WARN(!arg->requeue, "incoherent status and result_flags");
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
wvif->after_dtim_tx_allowed = false; // DTIM period elapsed
schedule_work(&wvif->update_tim_work);
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
index cff7b9ff99a9..46c9fff7a870 100644
--- a/drivers/staging/wfx/data_tx.h
+++ b/drivers/staging/wfx/data_tx.h
@@ -2,7 +2,7 @@
/*
* Datapath implementation.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#ifndef WFX_DATA_TX_H
@@ -35,8 +35,7 @@ struct tx_policy_cache {
struct wfx_tx_priv {
ktime_t xmit_timestamp;
- struct ieee80211_key_conf *hw_key;
-} __packed;
+};
void wfx_tx_policy_init(struct wfx_vif *wvif);
void wfx_tx_policy_upload_work(struct work_struct *work);
diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c
index 3f1712b7c919..eedada78c25f 100644
--- a/drivers/staging/wfx/debug.c
+++ b/drivers/staging/wfx/debug.c
@@ -2,7 +2,7 @@
/*
* Debugfs interface.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/debugfs.h>
@@ -32,7 +32,7 @@ static const struct trace_print_flags wfx_reg_print_map[] = {
};
static const char *get_symbol(unsigned long val,
- const struct trace_print_flags *symbol_array)
+ const struct trace_print_flags *symbol_array)
{
int i;
@@ -230,21 +230,6 @@ static const struct file_operations wfx_send_pds_fops = {
.write = wfx_send_pds_write,
};
-static ssize_t wfx_burn_slk_key_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct wfx_dev *wdev = file->private_data;
-
- dev_info(wdev->dev, "this driver does not support secure link\n");
- return -EINVAL;
-}
-
-static const struct file_operations wfx_burn_slk_key_fops = {
- .open = simple_open,
- .write = wfx_burn_slk_key_write,
-};
-
struct dbgfs_hif_msg {
struct wfx_dev *wdev;
struct completion complete;
@@ -267,7 +252,7 @@ static ssize_t wfx_send_hif_msg_write(struct file *file,
if (count < sizeof(struct hif_msg))
return -EINVAL;
- // wfx_cmd_send() chekc that reply buffer is wide enough, but do not
+ // wfx_cmd_send() checks that reply buffer is wide enough, but does not
// return precise length read. User have to know how many bytes should
// be read. Filling reply buffer with a memory pattern may help user.
memset(context->reply, 0xFF, sizeof(context->reply));
@@ -299,8 +284,8 @@ static ssize_t wfx_send_hif_msg_read(struct file *file, char __user *user_buf,
return ret;
if (context->ret < 0)
return context->ret;
- // Be carefull, write() is waiting for a full message while read()
- // only return a payload
+ // Be careful, write() is waiting for a full message while read()
+ // only returns a payload
if (copy_to_user(user_buf, context->reply, count))
return -EFAULT;
@@ -366,8 +351,6 @@ int wfx_debug_init(struct wfx_dev *wdev)
debugfs_create_file("tx_power_loop", 0444, d, wdev,
&wfx_tx_power_loop_fops);
debugfs_create_file("send_pds", 0200, d, wdev, &wfx_send_pds_fops);
- debugfs_create_file("burn_slk_key", 0200, d, wdev,
- &wfx_burn_slk_key_fops);
debugfs_create_file("send_hif_msg", 0600, d, wdev,
&wfx_send_hif_msg_fops);
debugfs_create_file("ps_timeout", 0600, d, wdev, &wfx_ps_timeout_fops);
diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c
index 22d3b684f04f..1b8aec02d169 100644
--- a/drivers/staging/wfx/fwio.c
+++ b/drivers/staging/wfx/fwio.c
@@ -2,7 +2,7 @@
/*
* Firmware loading.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/firmware.h>
@@ -94,7 +94,7 @@ static int sram_write_dma_safe(struct wfx_dev *wdev, u32 addr, const u8 *buf,
tmp = buf;
}
ret = sram_buf_write(wdev, addr, tmp, len);
- if (!virt_addr_valid(buf))
+ if (tmp != buf)
kfree(tmp);
return ret;
}
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
index 21cde19cff75..11bc1a58edae 100644
--- a/drivers/staging/wfx/hif_api_cmd.h
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -2,15 +2,15 @@
/*
* WFx hardware interface definitions
*
- * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ * Copyright (c) 2018-2020, Silicon Laboratories Inc.
*/
#ifndef WFX_HIF_API_CMD_H
#define WFX_HIF_API_CMD_H
-#include "hif_api_general.h"
+#include <linux/ieee80211.h>
-#define HIF_API_SSID_SIZE API_SSID_SIZE
+#include "hif_api_general.h"
enum hif_requests_ids {
HIF_REQ_ID_RESET = 0x0a,
@@ -60,21 +60,15 @@ enum hif_indications_ids {
HIF_IND_ID_EVENT = 0x85
};
-union hif_commands_ids {
- enum hif_requests_ids request;
- enum hif_confirmations_ids confirmation;
- enum hif_indications_ids indication;
-};
-
-struct hif_reset_flags {
+struct hif_req_reset {
u8 reset_stat:1;
u8 reset_all_int:1;
u8 reserved1:6;
u8 reserved2[3];
} __packed;
-struct hif_req_reset {
- struct hif_reset_flags reset_flags;
+struct hif_cnf_reset {
+ __le32 status;
} __packed;
struct hif_req_read_mib {
@@ -99,52 +93,23 @@ struct hif_cnf_write_mib {
__le32 status;
} __packed;
-struct hif_ie_flags {
+struct hif_req_update_ie {
u8 beacon:1;
u8 probe_resp:1;
u8 probe_req:1;
u8 reserved1:5;
u8 reserved2;
-} __packed;
-
-struct hif_ie_tlv {
- u8 type;
- u8 length;
- u8 data[];
-} __packed;
-
-struct hif_req_update_ie {
- struct hif_ie_flags ie_flags;
__le16 num_ies;
- struct hif_ie_tlv ie[];
+ struct element ie[];
} __packed;
struct hif_cnf_update_ie {
__le32 status;
} __packed;
-struct hif_scan_type {
- u8 type:1;
- u8 mode:1;
- u8 reserved:6;
-} __packed;
-
-struct hif_scan_flags {
- u8 fbg:1;
- u8 reserved1:1;
- u8 pre:1;
- u8 reserved2:5;
-} __packed;
-
-struct hif_auto_scan_param {
- __le16 interval;
- u8 reserved;
- s8 rssi_thr;
-} __packed;
-
struct hif_ssid_def {
__le32 ssid_length;
- u8 ssid[HIF_API_SSID_SIZE];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
} __packed;
#define HIF_API_MAX_NB_SSIDS 2
@@ -152,10 +117,17 @@ struct hif_ssid_def {
struct hif_req_start_scan_alt {
u8 band;
- struct hif_scan_type scan_type;
- struct hif_scan_flags scan_flags;
+ u8 maintain_current_bss:1;
+ u8 periodic:1;
+ u8 reserved1:6;
+ u8 disallow_ps:1;
+ u8 reserved2:1;
+ u8 short_preamble:1;
+ u8 reserved3:5;
u8 max_transmit_rate;
- struct hif_auto_scan_param auto_scan_param;
+ __le16 periodic_interval;
+ u8 reserved4;
+ s8 periodic_rssi_thr;
u8 num_of_probe_requests;
u8 probe_delay;
u8 num_of_ssids;
@@ -201,53 +173,32 @@ enum hif_frame_format {
HIF_FRAME_FORMAT_GF_HT_11N = 0x2
};
-enum hif_stbc {
- HIF_STBC_NOT_ALLOWED = 0x0,
- HIF_STBC_ALLOWED = 0x1
-};
-
-struct hif_queue {
+struct hif_req_tx {
+ // packet_id is not interpreted by the device, so it is not necessary to
+ // declare it little endian
+ u32 packet_id;
+ u8 max_tx_rate;
u8 queue_id:2;
u8 peer_sta_id:4;
- u8 reserved:2;
-} __packed;
-
-struct hif_data_flags {
+ u8 reserved1:2;
u8 more:1;
u8 fc_offset:3;
u8 after_dtim:1;
- u8 reserved:3;
-} __packed;
-
-struct hif_tx_flags {
+ u8 reserved2:3;
u8 start_exp:1;
- u8 reserved:3;
+ u8 reserved3:3;
u8 retry_policy_index:4;
-} __packed;
-
-struct hif_ht_tx_parameters {
+ __le32 reserved4;
+ __le32 expire_time;
u8 frame_format:4;
u8 fec_coding:1;
u8 short_gi:1;
- u8 reserved1:1;
+ u8 reserved5:1;
u8 stbc:1;
- u8 reserved2;
+ u8 reserved6;
u8 aggregation:1;
- u8 reserved3:7;
- u8 reserved4;
-} __packed;
-
-struct hif_req_tx {
- // packet_id is not interpreted by the device, so it is not necessary to
- // declare it little endian
- u32 packet_id;
- u8 max_tx_rate;
- struct hif_queue queue_id;
- struct hif_data_flags data_flags;
- struct hif_tx_flags tx_flags;
- __le32 reserved;
- __le32 expire_time;
- struct hif_ht_tx_parameters ht_tx_parameters;
+ u8 reserved7:7;
+ u8 reserved8;
u8 frame[];
} __packed;
@@ -258,15 +209,6 @@ enum hif_qos_ackplcy {
HIF_QOS_ACKPLCY_BLCKACK = 0x3
};
-struct hif_tx_result_flags {
- u8 aggr:1;
- u8 requeue:1;
- u8 ack_policy:2;
- u8 txop_limit:1;
- u8 reserved1:3;
- u8 reserved2;
-} __packed;
-
struct hif_cnf_tx {
__le32 status;
// packet_id is copied from struct hif_req_tx without been interpreted
@@ -274,7 +216,12 @@ struct hif_cnf_tx {
u32 packet_id;
u8 txed_rate;
u8 ack_failures;
- struct hif_tx_result_flags tx_result_flags;
+ u8 aggr:1;
+ u8 requeue:1;
+ u8 ack_policy:2;
+ u8 txop_limit:1;
+ u8 reserved1:3;
+ u8 reserved2;
__le32 media_delay;
__le32 tx_queue_delay;
} __packed;
@@ -282,7 +229,7 @@ struct hif_cnf_tx {
struct hif_cnf_multi_transmit {
u8 num_tx_confs;
u8 reserved[3];
- struct hif_cnf_tx tx_conf_payload[];
+ struct hif_cnf_tx tx_conf_payload[];
} __packed;
enum hif_ri_flags_encrypt {
@@ -293,7 +240,12 @@ enum hif_ri_flags_encrypt {
HIF_RI_FLAGS_WAPI_ENCRYPTED = 0x4
};
-struct hif_rx_flags {
+struct hif_ind_rx {
+ __le32 status;
+ u8 channel_number;
+ u8 reserved1;
+ u8 rxed_rate;
+ u8 rcpi_rssi;
u8 encryp:3;
u8 in_aggr:1;
u8 first_aggr:1;
@@ -305,7 +257,7 @@ struct hif_rx_flags {
u8 match_ssid:1;
u8 match_bssid:1;
u8 more:1;
- u8 reserved1:1;
+ u8 reserved2:1;
u8 ht:1;
u8 stbc:1;
u8 match_uc_addr:1;
@@ -313,23 +265,13 @@ struct hif_rx_flags {
u8 match_bc_addr:1;
u8 key_type:1;
u8 key_index:4;
- u8 reserved2:1;
+ u8 reserved3:1;
u8 peer_sta_id:4;
- u8 reserved3:2;
- u8 reserved4:1;
-} __packed;
-
-struct hif_ind_rx {
- __le32 status;
- u8 channel_number;
- u8 reserved;
- u8 rxed_rate;
- u8 rcpi_rssi;
- struct hif_rx_flags rx_flags;
+ u8 reserved4:2;
+ u8 reserved5:1;
u8 frame[];
} __packed;
-
struct hif_req_edca_queue_params {
u8 queue_id;
u8 reserved1;
@@ -346,28 +288,24 @@ struct hif_cnf_edca_queue_params {
__le32 status;
} __packed;
-struct hif_join_flags {
- u8 reserved1:2;
- u8 force_no_beacon:1;
- u8 force_with_ind:1;
- u8 reserved2:4;
-} __packed;
-
struct hif_req_join {
u8 infrastructure_bss_mode:1;
u8 reserved1:7;
u8 band;
u8 channel_number;
- u8 reserved;
+ u8 reserved2;
u8 bssid[ETH_ALEN];
__le16 atim_window;
u8 short_preamble:1;
- u8 reserved2:7;
+ u8 reserved3:7;
u8 probe_for_join;
- u8 reserved3;
- struct hif_join_flags join_flags;
+ u8 reserved4;
+ u8 reserved5:2;
+ u8 force_no_beacon:1;
+ u8 force_with_ind:1;
+ u8 reserved6:4;
__le32 ssid_length;
- u8 ssid[HIF_API_SSID_SIZE];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
__le32 beacon_interval;
__le32 basic_rate_set;
} __packed;
@@ -380,13 +318,9 @@ struct hif_ind_join_complete {
__le32 status;
} __packed;
-struct hif_bss_flags {
+struct hif_req_set_bss_params {
u8 lost_count_only:1;
u8 reserved:7;
-} __packed;
-
-struct hif_req_set_bss_params {
- struct hif_bss_flags bss_flags;
u8 beacon_lost_count;
__le16 aid;
__le32 operational_rate_set;
@@ -396,14 +330,10 @@ struct hif_cnf_set_bss_params {
__le32 status;
} __packed;
-struct hif_pm_mode {
+struct hif_req_set_pm_mode {
u8 enter_psm:1;
u8 reserved:6;
u8 fast_psm:1;
-} __packed;
-
-struct hif_req_set_pm_mode {
- struct hif_pm_mode pm_mode;
u8 fast_psm_idle_period;
u8 ap_psm_change_period;
u8 min_auto_ps_poll_period;
@@ -419,7 +349,6 @@ struct hif_ind_set_pm_mode_cmpl {
u8 reserved[3];
} __packed;
-
struct hif_req_start {
u8 mode;
u8 band;
@@ -432,7 +361,7 @@ struct hif_req_start {
u8 reserved3:7;
u8 reserved4;
u8 ssid_length;
- u8 ssid[HIF_API_SSID_SIZE];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
__le32 basic_rate_set;
} __packed;
@@ -440,11 +369,6 @@ struct hif_cnf_start {
__le32 status;
} __packed;
-enum hif_beacon {
- HIF_BEACON_STOP = 0x0,
- HIF_BEACON_START = 0x1
-};
-
struct hif_req_beacon_transmit {
u8 enable_beaconing;
u8 reserved[3];
@@ -457,20 +381,11 @@ struct hif_cnf_beacon_transmit {
#define HIF_LINK_ID_MAX 14
#define HIF_LINK_ID_NOT_ASSOCIATED (HIF_LINK_ID_MAX + 1)
-enum hif_sta_map_direction {
- HIF_STA_MAP = 0x0,
- HIF_STA_UNMAP = 0x1
-};
-
-struct hif_map_link_flags {
- u8 map_direction:1;
- u8 mfpc:1;
- u8 reserved:6;
-} __packed;
-
struct hif_req_map_link {
u8 mac_addr[ETH_ALEN];
- struct hif_map_link_flags map_link_flags;
+ u8 unmap:1;
+ u8 mfpc:1;
+ u8 reserved:6;
u8 peer_sta_id;
} __packed;
@@ -478,16 +393,12 @@ struct hif_cnf_map_link {
__le32 status;
} __packed;
-struct hif_suspend_resume_flags {
+struct hif_ind_suspend_resume_tx {
u8 resume:1;
u8 reserved1:2;
u8 bc_mc_only:1;
u8 reserved2:4;
u8 reserved3;
-} __packed;
-
-struct hif_ind_suspend_resume_tx {
- struct hif_suspend_resume_flags suspend_resume_flags;
__le16 peer_sta_set;
} __packed;
@@ -582,25 +493,23 @@ struct hif_igtk_group_key {
u8 ipn[HIF_API_IPN_SIZE];
} __packed;
-union hif_privacy_key_data {
- struct hif_wep_pairwise_key wep_pairwise_key;
- struct hif_wep_group_key wep_group_key;
- struct hif_tkip_pairwise_key tkip_pairwise_key;
- struct hif_tkip_group_key tkip_group_key;
- struct hif_aes_pairwise_key aes_pairwise_key;
- struct hif_aes_group_key aes_group_key;
- struct hif_wapi_pairwise_key wapi_pairwise_key;
- struct hif_wapi_group_key wapi_group_key;
- struct hif_igtk_group_key igtk_group_key;
-};
-
struct hif_req_add_key {
u8 type;
u8 entry_index;
u8 int_id:2;
u8 reserved1:6;
u8 reserved2;
- union hif_privacy_key_data key;
+ union {
+ struct hif_wep_pairwise_key wep_pairwise_key;
+ struct hif_wep_group_key wep_group_key;
+ struct hif_tkip_pairwise_key tkip_pairwise_key;
+ struct hif_tkip_group_key tkip_group_key;
+ struct hif_aes_pairwise_key aes_pairwise_key;
+ struct hif_aes_group_key aes_group_key;
+ struct hif_wapi_pairwise_key wapi_pairwise_key;
+ struct hif_wapi_group_key wapi_group_key;
+ struct hif_igtk_group_key igtk_group_key;
+ } key;
} __packed;
struct hif_cnf_add_key {
@@ -632,16 +541,13 @@ enum hif_ps_mode_error {
HIF_PS_ERROR_AP_NO_DATA_AFTER_TIM = 4
};
-union hif_event_data {
- u8 rcpi_rssi;
- __le32 ps_mode_error;
- __le32 peer_sta_set;
-};
-
struct hif_ind_event {
__le32 event_id;
- union hif_event_data event_data;
+ union {
+ u8 rcpi_rssi;
+ __le32 ps_mode_error;
+ __le32 peer_sta_set;
+ } event_data;
} __packed;
-
#endif
diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/staging/wfx/hif_api_general.h
index dba18a7ae919..24188945718d 100644
--- a/drivers/staging/wfx/hif_api_general.h
+++ b/drivers/staging/wfx/hif_api_general.h
@@ -2,7 +2,7 @@
/*
* WFx hardware interface definitions
*
- * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ * Copyright (c) 2018-2020, Silicon Laboratories Inc.
*/
#ifndef WFX_HIF_API_GENERAL_H
@@ -17,8 +17,6 @@
#define __packed __attribute__((__packed__))
#endif
-#define API_SSID_SIZE 32
-
#define HIF_ID_IS_INDICATION 0x80
#define HIF_COUNTER_MAX 7
@@ -115,32 +113,12 @@ enum hif_api_rate_index {
API_RATE_NUM_ENTRIES = 22
};
-
enum hif_fw_type {
HIF_FW_TYPE_ETF = 0x0,
HIF_FW_TYPE_WFM = 0x1,
HIF_FW_TYPE_WSM = 0x2
};
-struct hif_capabilities {
- u8 link_mode:2;
- u8 reserved1:6;
- u8 reserved2;
- u8 reserved3;
- u8 reserved4;
-} __packed;
-
-struct hif_otp_regul_sel_mode_info {
- u8 region_sel_mode:4;
- u8 reserved:4;
-} __packed;
-
-struct hif_otp_phy_info {
- u8 phy1_region:3;
- u8 phy0_region:3;
- u8 otp_phy_ver:2;
-} __packed;
-
struct hif_ind_startup {
// As the others, this struct is interpreted as little endian by the
// device. However, this struct is also used by the driver. We prefer to
@@ -156,14 +134,21 @@ struct hif_ind_startup {
u8 mac_addr[2][ETH_ALEN];
u8 api_version_minor;
u8 api_version_major;
- struct hif_capabilities capabilities;
+ u8 link_mode:2;
+ u8 reserved1:6;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
u8 firmware_build;
u8 firmware_minor;
u8 firmware_major;
u8 firmware_type;
u8 disabled_channel_list[2];
- struct hif_otp_regul_sel_mode_info regul_sel_mode_info;
- struct hif_otp_phy_info otp_phy_info;
+ u8 region_sel_mode:4;
+ u8 reserved5:4;
+ u8 phy1_region:3;
+ u8 phy0_region:3;
+ u8 otp_phy_ver:2;
u32 supported_rate_mask;
u8 firmware_label[128];
} __packed;
@@ -233,15 +218,12 @@ struct hif_tx_power_loop_info {
u8 reserved;
} __packed;
-union hif_indication_data {
- struct hif_rx_stats rx_stats;
- struct hif_tx_power_loop_info tx_power_loop_info;
- u8 raw_data[1];
-};
-
struct hif_ind_generic {
- __le32 indication_type;
- union hif_indication_data indication_data;
+ __le32 type;
+ union {
+ struct hif_rx_stats rx_stats;
+ struct hif_tx_power_loop_info tx_power_loop_info;
+ } data;
} __packed;
enum hif_error {
@@ -262,6 +244,7 @@ enum hif_error {
HIF_ERROR_HIF_TX_QUEUE_FULL = 0x0d,
HIF_ERROR_HIF_BUS = 0x0f,
HIF_ERROR_PDS_TESTFEATURE = 0x10,
+ HIF_ERROR_SLK_UNCONFIGURED = 0x11,
};
struct hif_ind_error {
@@ -281,84 +264,4 @@ enum hif_secure_link_state {
SEC_LINK_ENFORCED = 0x3
};
-enum hif_sl_encryption_type {
- NO_ENCRYPTION = 0,
- TX_ENCRYPTION = 1,
- RX_ENCRYPTION = 2,
- HP_ENCRYPTION = 3
-};
-
-struct hif_sl_msg_hdr {
- u32 seqnum:30;
- u32 encrypted:2;
-} __packed;
-
-struct hif_sl_msg {
- struct hif_sl_msg_hdr hdr;
- __le16 len;
- u8 payload[];
-} __packed;
-
-#define AES_CCM_TAG_SIZE 16
-
-struct hif_sl_tag {
- u8 tag[16];
-} __packed;
-
-enum hif_sl_mac_key_dest {
- SL_MAC_KEY_DEST_OTP = 0x78,
- SL_MAC_KEY_DEST_RAM = 0x87
-};
-
-#define API_KEY_VALUE_SIZE 32
-
-struct hif_req_set_sl_mac_key {
- u8 otp_or_ram;
- u8 key_value[API_KEY_VALUE_SIZE];
-} __packed;
-
-struct hif_cnf_set_sl_mac_key {
- __le32 status;
-} __packed;
-
-enum hif_sl_session_key_alg {
- HIF_SL_CURVE25519 = 0x01,
- HIF_SL_KDF = 0x02
-};
-
-#define API_HOST_PUB_KEY_SIZE 32
-#define API_HOST_PUB_KEY_MAC_SIZE 64
-
-struct hif_req_sl_exchange_pub_keys {
- u8 algorithm:2;
- u8 reserved1:6;
- u8 reserved2[3];
- u8 host_pub_key[API_HOST_PUB_KEY_SIZE];
- u8 host_pub_key_mac[API_HOST_PUB_KEY_MAC_SIZE];
-} __packed;
-
-struct hif_cnf_sl_exchange_pub_keys {
- __le32 status;
-} __packed;
-
-#define API_NCP_PUB_KEY_SIZE 32
-#define API_NCP_PUB_KEY_MAC_SIZE 64
-
-struct hif_ind_sl_exchange_pub_keys {
- __le32 status;
- u8 ncp_pub_key[API_NCP_PUB_KEY_SIZE];
- u8 ncp_pub_key_mac[API_NCP_PUB_KEY_MAC_SIZE];
-} __packed;
-
-struct hif_req_sl_configure {
- u8 encr_bmp[32];
- u8 disable_session_key_protection:1;
- u8 reserved1:7;
- u8 reserved2[3];
-} __packed;
-
-struct hif_cnf_sl_configure {
- __le32 status;
-} __packed;
-
#endif
diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/staging/wfx/hif_api_mib.h
index 6f1434795fa8..ace924720ce6 100644
--- a/drivers/staging/wfx/hif_api_mib.h
+++ b/drivers/staging/wfx/hif_api_mib.h
@@ -2,7 +2,7 @@
/*
* WFx hardware interface definitions
*
- * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ * Copyright (c) 2018-2020, Silicon Laboratories Inc.
*/
#ifndef WFX_HIF_API_MIB_H
@@ -82,50 +82,6 @@ struct hif_mib_gl_set_multi_msg {
u8 reserved2[3];
} __packed;
-enum hif_mac_addr_type {
- HIF_MAC_ADDR_A1 = 0x0,
- HIF_MAC_ADDR_A2 = 0x1,
- HIF_MAC_ADDR_A3 = 0x2
-};
-
-struct hif_mib_mac_addr_data_frame_condition {
- u8 condition_idx;
- u8 address_type;
- u8 mac_address[ETH_ALEN];
-} __packed;
-
-#define HIF_FILTER_UNICAST 0x1
-#define HIF_FILTER_MULTICAST 0x2
-#define HIF_FILTER_BROADCAST 0x4
-
-struct hif_mib_uc_mc_bc_data_frame_condition {
- u8 condition_idx;
- u8 allowed_frames;
- u8 reserved[2];
-} __packed;
-
-struct hif_mib_config_data_filter {
- u8 filter_idx;
- u8 enable;
- u8 reserved1[2];
- u8 eth_type_cond;
- u8 port_cond;
- u8 magic_cond;
- u8 mac_cond;
- u8 ipv4_cond;
- u8 ipv6_cond;
- u8 uc_mc_bc_cond;
- u8 reserved2;
-} __packed;
-
-struct hif_mib_set_data_filtering {
- u8 invert_matching:1;
- u8 reserved1:7;
- u8 enable:1;
- u8 reserved2:7;
- u8 reserved3[2];
-} __packed;
-
enum hif_arp_ns_frame_treatment {
HIF_ARP_NS_FILTERING_DISABLE = 0x0,
HIF_ARP_NS_FILTERING_ENABLE = 0x1,
@@ -349,7 +305,7 @@ struct hif_mib_set_uapsd_information {
__le16 auto_trigger_step;
} __packed;
-struct hif_mib_tx_rate_retry_policy {
+struct hif_tx_rate_retry_policy {
u8 policy_index;
u8 short_retry_count;
u8 long_retry_count;
@@ -368,7 +324,7 @@ struct hif_mib_tx_rate_retry_policy {
struct hif_mib_set_tx_rate_retry_policy {
u8 num_tx_rate_policies;
u8 reserved[3];
- struct hif_mib_tx_rate_retry_policy tx_rate_retry_policy[];
+ struct hif_tx_rate_retry_policy tx_rate_retry_policy[];
} __packed;
struct hif_mib_protected_mgmt_policy {
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c
index cc7c0cf226ba..56a5f891447b 100644
--- a/drivers/staging/wfx/hif_rx.c
+++ b/drivers/staging/wfx/hif_rx.c
@@ -3,7 +3,7 @@
* Implementation of chip-to-host event (aka indications) of WFxxx Split Mac
* (WSM) API.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/skbuff.h>
@@ -15,7 +15,6 @@
#include "bh.h"
#include "sta.h"
#include "data_rx.h"
-#include "secure_link.h"
#include "hif_api_cmd.h"
static int hif_generic_confirm(struct wfx_dev *wdev,
@@ -41,21 +40,14 @@ static int hif_generic_confirm(struct wfx_dev *wdev,
}
if (wdev->hif_cmd.buf_recv) {
- if (wdev->hif_cmd.len_recv >= len)
+ if (wdev->hif_cmd.len_recv >= len && len > 0)
memcpy(wdev->hif_cmd.buf_recv, buf, len);
else
- status = -ENOMEM;
+ status = -EIO;
}
wdev->hif_cmd.ret = status;
- if (!wdev->hif_cmd.async) {
- complete(&wdev->hif_cmd.done);
- } else {
- wdev->hif_cmd.buf_send = NULL;
- mutex_unlock(&wdev->hif_cmd.lock);
- if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
- mutex_unlock(&wdev->hif_cmd.key_renew_lock);
- }
+ complete(&wdev->hif_cmd.done);
return status;
}
@@ -102,29 +94,14 @@ static int hif_startup_indication(struct wfx_dev *wdev,
static int hif_wakeup_indication(struct wfx_dev *wdev,
const struct hif_msg *hif, const void *buf)
{
- if (!wdev->pdata.gpio_wakeup
- || !gpiod_get_value(wdev->pdata.gpio_wakeup)) {
+ if (!wdev->pdata.gpio_wakeup ||
+ gpiod_get_value(wdev->pdata.gpio_wakeup) == 0) {
dev_warn(wdev->dev, "unexpected wake-up indication\n");
return -EIO;
}
return 0;
}
-static int hif_keys_indication(struct wfx_dev *wdev,
- const struct hif_msg *hif, const void *buf)
-{
- const struct hif_ind_sl_exchange_pub_keys *body = buf;
- u8 pubkey[API_NCP_PUB_KEY_SIZE];
-
- // SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS is used by legacy secure link
- if (body->status && body->status != HIF_STATUS_SLK_NEGO_SUCCESS)
- dev_warn(wdev->dev, "secure link negociation error\n");
- memcpy(pubkey, body->ncp_pub_key, sizeof(pubkey));
- memreverse(pubkey, sizeof(pubkey));
- wfx_sl_check_pubkey(wdev, pubkey, body->ncp_pub_key_mac);
- return 0;
-}
-
static int hif_receive_indication(struct wfx_dev *wdev,
const struct hif_msg *hif,
const void *buf, struct sk_buff *skb)
@@ -133,9 +110,9 @@ static int hif_receive_indication(struct wfx_dev *wdev,
const struct hif_ind_rx *body = buf;
if (!wvif) {
- dev_warn(wdev->dev, "ignore rx data for non-existent vif %d\n",
- hif->interface);
- return 0;
+ dev_warn(wdev->dev, "%s: ignore rx data for non-existent vif %d\n",
+ __func__, hif->interface);
+ return -EIO;
}
skb_pull(skb, sizeof(struct hif_msg) + sizeof(struct hif_ind_rx));
wfx_rx_cb(wvif, body, skb);
@@ -151,8 +128,8 @@ static int hif_event_indication(struct wfx_dev *wdev,
int type = le32_to_cpu(body->event_id);
if (!wvif) {
- dev_warn(wdev->dev, "received event for non-existent vif\n");
- return 0;
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
}
switch (type) {
@@ -184,7 +161,10 @@ static int hif_pm_mode_complete_indication(struct wfx_dev *wdev,
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- WARN_ON(!wvif);
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
complete(&wvif->set_pm_mode_complete);
return 0;
@@ -196,7 +176,11 @@ static int hif_scan_complete_indication(struct wfx_dev *wdev,
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- WARN_ON(!wvif);
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
+
wfx_scan_complete(wvif);
return 0;
@@ -208,7 +192,10 @@ static int hif_join_complete_indication(struct wfx_dev *wdev,
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- WARN_ON(!wvif);
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
dev_warn(wdev->dev, "unattended JoinCompleteInd\n");
return 0;
@@ -218,19 +205,23 @@ static int hif_suspend_resume_indication(struct wfx_dev *wdev,
const struct hif_msg *hif,
const void *buf)
{
- struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
const struct hif_ind_suspend_resume_tx *body = buf;
+ struct wfx_vif *wvif;
- if (body->suspend_resume_flags.bc_mc_only) {
- WARN_ON(!wvif);
- if (body->suspend_resume_flags.resume)
+ if (body->bc_mc_only) {
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
+ if (body->resume)
wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE);
else
wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP);
} else {
WARN(body->peer_sta_set, "misunderstood indication");
WARN(hif->interface != 2, "misunderstood indication");
- if (body->suspend_resume_flags.resume)
+ if (body->resume)
wfx_suspend_hot_dev(wdev, STA_NOTIFY_AWAKE);
else
wfx_suspend_hot_dev(wdev, STA_NOTIFY_SLEEP);
@@ -243,29 +234,28 @@ static int hif_generic_indication(struct wfx_dev *wdev,
const struct hif_msg *hif, const void *buf)
{
const struct hif_ind_generic *body = buf;
- int type = le32_to_cpu(body->indication_type);
+ int type = le32_to_cpu(body->type);
switch (type) {
case HIF_GENERIC_INDICATION_TYPE_RAW:
return 0;
case HIF_GENERIC_INDICATION_TYPE_STRING:
- dev_info(wdev->dev, "firmware says: %s\n",
- (char *)body->indication_data.raw_data);
+ dev_info(wdev->dev, "firmware says: %s\n", (char *)&body->data);
return 0;
case HIF_GENERIC_INDICATION_TYPE_RX_STATS:
mutex_lock(&wdev->rx_stats_lock);
// Older firmware send a generic indication beside RxStats
if (!wfx_api_older_than(wdev, 1, 4))
- dev_info(wdev->dev, "Rx test ongoing. Temperature: %d°C\n",
- body->indication_data.rx_stats.current_temp);
- memcpy(&wdev->rx_stats, &body->indication_data.rx_stats,
+ dev_info(wdev->dev, "Rx test ongoing. Temperature: %d degrees C\n",
+ body->data.rx_stats.current_temp);
+ memcpy(&wdev->rx_stats, &body->data.rx_stats,
sizeof(wdev->rx_stats));
mutex_unlock(&wdev->rx_stats_lock);
return 0;
case HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO:
mutex_lock(&wdev->tx_power_loop_info_lock);
memcpy(&wdev->tx_power_loop_info,
- &body->indication_data.tx_power_loop_info,
+ &body->data.tx_power_loop_info,
sizeof(wdev->tx_power_loop_info));
mutex_unlock(&wdev->tx_power_loop_info_lock);
return 0;
@@ -301,6 +291,8 @@ static const struct {
"secure link overflow" },
{ HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE,
"secure link messages list does not match message encryption" },
+ { HIF_ERROR_SLK_UNCONFIGURED,
+ "secure link not yet configured" },
{ HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW,
"bus clock is too slow (<1kHz)" },
{ HIF_ERROR_HIF_RX_DATA_TOO_LARGE,
@@ -378,7 +370,6 @@ static const struct {
{ HIF_IND_ID_SET_PM_MODE_CMPL, hif_pm_mode_complete_indication },
{ HIF_IND_ID_SCAN_CMPL, hif_scan_complete_indication },
{ HIF_IND_ID_SUSPEND_RESUME_TX, hif_suspend_resume_indication },
- { HIF_IND_ID_SL_EXCHANGE_PUB_KEYS, hif_keys_indication },
{ HIF_IND_ID_EVENT, hif_event_indication },
{ HIF_IND_ID_GENERIC, hif_generic_indication },
{ HIF_IND_ID_ERROR, hif_error_indication },
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
index 5110f9b93762..63b437261eb7 100644
--- a/drivers/staging/wfx/hif_tx.c
+++ b/drivers/staging/wfx/hif_tx.c
@@ -3,7 +3,7 @@
* Implementation of host-to-chip commands (aka request/confirmation) of WFxxx
* Split Mac (WSM) API.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/etherdevice.h>
@@ -20,7 +20,6 @@ void wfx_init_hif_cmd(struct wfx_hif_cmd *hif_cmd)
init_completion(&hif_cmd->ready);
init_completion(&hif_cmd->done);
mutex_init(&hif_cmd->lock);
- mutex_init(&hif_cmd->key_renew_lock);
}
static void wfx_fill_header(struct hif_msg *hif, int if_id,
@@ -48,7 +47,7 @@ static void *wfx_alloc_hif(size_t body_len, struct hif_msg **hif)
}
int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
- void *reply, size_t reply_len, bool async)
+ void *reply, size_t reply_len, bool no_reply)
{
const char *mib_name = "";
const char *mib_sep = "";
@@ -56,15 +55,10 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
int vif = request->interface;
int ret;
- WARN(wdev->hif_cmd.buf_recv && wdev->hif_cmd.async, "API usage error");
-
// Do not wait for any reply if chip is frozen
if (wdev->chip_frozen)
return -ETIMEDOUT;
- if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
- mutex_lock(&wdev->hif_cmd.key_renew_lock);
-
mutex_lock(&wdev->hif_cmd.lock);
WARN(wdev->hif_cmd.buf_send, "data locking error");
@@ -73,14 +67,18 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
wdev->hif_cmd.buf_send = request;
wdev->hif_cmd.buf_recv = reply;
wdev->hif_cmd.len_recv = reply_len;
- wdev->hif_cmd.async = async;
complete(&wdev->hif_cmd.ready);
wfx_bh_request_tx(wdev);
- // NOTE: no timeout is catched async is enabled
- if (async)
+ if (no_reply) {
+ // Chip won't reply. Give enough time to the wq to send the
+ // buffer.
+ msleep(100);
+ wdev->hif_cmd.buf_send = NULL;
+ mutex_unlock(&wdev->hif_cmd.lock);
return 0;
+ }
if (wdev->poll_irq)
wfx_bh_poll_irq(wdev);
@@ -118,36 +116,25 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
"WSM request %s%s%s (%#.2x) on vif %d returned status %d\n",
get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
- if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
- mutex_unlock(&wdev->hif_cmd.key_renew_lock);
return ret;
}
// This function is special. After HIF_REQ_ID_SHUT_DOWN, chip won't reply to any
-// request anymore. We need to slightly hack struct wfx_hif_cmd for that job. Be
-// carefull to only call this funcion during device unregister.
+// request anymore. Obviously, only call this function during device unregister.
int hif_shutdown(struct wfx_dev *wdev)
{
int ret;
struct hif_msg *hif;
- if (wdev->chip_frozen)
- return 0;
wfx_alloc_hif(0, &hif);
if (!hif)
return -ENOMEM;
wfx_fill_header(hif, -1, HIF_REQ_ID_SHUT_DOWN, 0);
ret = wfx_cmd_send(wdev, hif, NULL, 0, true);
- // After this command, chip won't reply. Be sure to give enough time to
- // bh to send buffer:
- msleep(100);
- wdev->hif_cmd.buf_send = NULL;
if (wdev->pdata.gpio_wakeup)
gpiod_set_value(wdev->pdata.gpio_wakeup, 0);
else
control_reg_write(wdev, 0);
- mutex_unlock(&wdev->hif_cmd.lock);
- mutex_unlock(&wdev->hif_cmd.key_renew_lock);
kfree(hif);
return ret;
}
@@ -177,7 +164,7 @@ int hif_reset(struct wfx_vif *wvif, bool reset_stat)
if (!hif)
return -ENOMEM;
- body->reset_flags.reset_stat = reset_stat;
+ body->reset_stat = reset_stat;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_RESET, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
@@ -252,8 +239,6 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
WARN(chan_num > HIF_API_MAX_NB_CHANNELS, "invalid params");
WARN(req->n_ssids > HIF_API_MAX_NB_SSIDS, "invalid params");
- compiletime_assert(IEEE80211_MAX_SSID_LEN == HIF_API_SSID_SIZE,
- "API inconsistency");
if (!hif)
return -ENOMEM;
for (i = 0; i < req->n_ssids; i++) {
@@ -263,9 +248,8 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
cpu_to_le32(req->ssids[i].ssid_len);
}
body->num_of_ssids = HIF_API_MAX_NB_SSIDS;
- // Background scan is always a good idea
- body->scan_type.type = 1;
- body->scan_flags.fbg = 1;
+ body->maintain_current_bss = 1;
+ body->disallow_ps = 1;
body->tx_power_level =
cpu_to_le32(req->channels[chan_start_idx]->max_power);
body->num_of_channels = chan_num;
@@ -324,11 +308,13 @@ int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
WARN_ON(!conf->basic_rates);
WARN_ON(sizeof(body->ssid) < ssidlen);
WARN(!conf->ibss_joined && !ssidlen, "joining an unknown BSS");
+ if (WARN_ON(!channel))
+ return -EINVAL;
if (!hif)
return -ENOMEM;
body->infrastructure_bss_mode = !conf->ibss_joined;
body->short_preamble = conf->use_short_preamble;
- if (channel && channel->flags & IEEE80211_CHAN_NO_IR)
+ if (channel->flags & IEEE80211_CHAN_NO_IR)
body->probe_for_join = 0;
else
body->probe_for_join = 1;
@@ -446,11 +432,11 @@ int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout)
if (!hif)
return -ENOMEM;
if (ps) {
- body->pm_mode.enter_psm = 1;
+ body->enter_psm = 1;
// Firmware does not support more than 128ms
body->fast_psm_idle_period = min(dynamic_ps_timeout * 2, 255);
if (body->fast_psm_idle_period)
- body->pm_mode.fast_psm = 1;
+ body->fast_psm = 1;
}
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -499,7 +485,7 @@ int hif_beacon_transmit(struct wfx_vif *wvif, bool enable)
return ret;
}
-int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id)
+int hif_map_link(struct wfx_vif *wvif, bool unmap, u8 *mac_addr, int sta_id, bool mfp)
{
int ret;
struct hif_msg *hif;
@@ -509,7 +495,8 @@ int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id)
return -ENOMEM;
if (mac_addr)
ether_addr_copy(body->mac_addr, mac_addr);
- body->map_link_flags = *(struct hif_map_link_flags *)&flags;
+ body->mfpc = mfp ? 1 : 0;
+ body->unmap = unmap ? 1 : 0;
body->peer_sta_id = sta_id;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_MAP_LINK, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -526,7 +513,7 @@ int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len)
if (!hif)
return -ENOMEM;
- body->ie_flags.beacon = 1;
+ body->beacon = 1;
body->num_ies = cpu_to_le16(1);
memcpy(body->ie, ies, ies_len);
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len);
@@ -534,62 +521,3 @@ int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len)
kfree(hif);
return ret;
}
-
-int hif_sl_send_pub_keys(struct wfx_dev *wdev,
- const u8 *pubkey, const u8 *pubkey_hmac)
-{
- int ret;
- struct hif_msg *hif;
- struct hif_req_sl_exchange_pub_keys *body = wfx_alloc_hif(sizeof(*body),
- &hif);
-
- if (!hif)
- return -ENOMEM;
- body->algorithm = HIF_SL_CURVE25519;
- memcpy(body->host_pub_key, pubkey, sizeof(body->host_pub_key));
- memcpy(body->host_pub_key_mac, pubkey_hmac,
- sizeof(body->host_pub_key_mac));
- wfx_fill_header(hif, -1, HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS,
- sizeof(*body));
- ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
- kfree(hif);
- // Compatibility with legacy secure link
- if (ret == le32_to_cpu(HIF_STATUS_SLK_NEGO_SUCCESS))
- ret = 0;
- return ret;
-}
-
-int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap)
-{
- int ret;
- struct hif_msg *hif;
- struct hif_req_sl_configure *body = wfx_alloc_hif(sizeof(*body), &hif);
-
- if (!hif)
- return -ENOMEM;
- memcpy(body->encr_bmp, bitmap, sizeof(body->encr_bmp));
- wfx_fill_header(hif, -1, HIF_REQ_ID_SL_CONFIGURE, sizeof(*body));
- ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
- kfree(hif);
- return ret;
-}
-
-int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key, int destination)
-{
- int ret;
- struct hif_msg *hif;
- struct hif_req_set_sl_mac_key *body = wfx_alloc_hif(sizeof(*body),
- &hif);
-
- if (!hif)
- return -ENOMEM;
- memcpy(body->key_value, slk_key, sizeof(body->key_value));
- body->otp_or_ram = destination;
- wfx_fill_header(hif, -1, HIF_REQ_ID_SET_SL_MAC_KEY, sizeof(*body));
- ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
- kfree(hif);
- // Compatibility with legacy secure link
- if (ret == le32_to_cpu(HIF_STATUS_SLK_SET_KEY_SUCCESS))
- ret = 0;
- return ret;
-}
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h
index e1da28aef706..3521c545ae6b 100644
--- a/drivers/staging/wfx/hif_tx.h
+++ b/drivers/staging/wfx/hif_tx.h
@@ -3,7 +3,7 @@
* Implementation of host-to-chip commands (aka request/confirmation) of WFxxx
* Split Mac (WSM) API.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
* Copyright (C) 2010, ST-Ericsson SA
*/
@@ -20,10 +20,8 @@ struct wfx_vif;
struct wfx_hif_cmd {
struct mutex lock;
- struct mutex key_renew_lock;
struct completion ready;
struct completion done;
- bool async;
struct hif_msg *buf_send;
void *buf_recv;
size_t len_recv;
@@ -55,12 +53,8 @@ int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
const struct ieee80211_channel *channel);
int hif_beacon_transmit(struct wfx_vif *wvif, bool enable);
-int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id);
+int hif_map_link(struct wfx_vif *wvif,
+ bool unmap, u8 *mac_addr, int sta_id, bool mfp);
int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len);
-int hif_sl_set_mac_key(struct wfx_dev *wdev,
- const u8 *slk_key, int destination);
-int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap);
-int hif_sl_send_pub_keys(struct wfx_dev *wdev,
- const u8 *pubkey, const u8 *pubkey_hmac);
#endif
diff --git a/drivers/staging/wfx/hif_tx_mib.c b/drivers/staging/wfx/hif_tx_mib.c
index 05f1e1e98af9..1926cf1b62be 100644
--- a/drivers/staging/wfx/hif_tx_mib.c
+++ b/drivers/staging/wfx/hif_tx_mib.c
@@ -2,7 +2,7 @@
/*
* Implementation of host-to-chip MIBs of WFxxx Split Mac (WSM) API.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
* Copyright (C) 2010, ST-Ericsson SA
*/
@@ -29,7 +29,7 @@ int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
unsigned int dtim_interval,
unsigned int listen_interval)
{
- struct hif_mib_beacon_wake_up_period val = {
+ struct hif_mib_beacon_wake_up_period arg = {
.wakeup_period_min = dtim_interval,
.receive_dtim = 0,
.wakeup_period_max = listen_interval,
@@ -39,7 +39,7 @@ int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
return -EINVAL;
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_BEACON_WAKEUP_PERIOD,
- &val, sizeof(val));
+ &arg, sizeof(arg));
}
int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
@@ -92,31 +92,31 @@ int hif_set_macaddr(struct wfx_vif *wvif, u8 *mac)
int hif_set_rx_filter(struct wfx_vif *wvif,
bool filter_bssid, bool filter_prbreq)
{
- struct hif_mib_rx_filter val = { };
+ struct hif_mib_rx_filter arg = { };
if (filter_bssid)
- val.bssid_filter = 1;
+ arg.bssid_filter = 1;
if (!filter_prbreq)
- val.fwd_probe_req = 1;
+ arg.fwd_probe_req = 1;
return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER,
- &val, sizeof(val));
+ &arg, sizeof(arg));
}
int hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len,
const struct hif_ie_table_entry *tbl)
{
int ret;
- struct hif_mib_bcn_filter_table *val;
- int buf_len = struct_size(val, ie_table, tbl_len);
+ struct hif_mib_bcn_filter_table *arg;
+ int buf_len = struct_size(arg, ie_table, tbl_len);
- val = kzalloc(buf_len, GFP_KERNEL);
- if (!val)
+ arg = kzalloc(buf_len, GFP_KERNEL);
+ if (!arg)
return -ENOMEM;
- val->num_of_info_elmts = cpu_to_le32(tbl_len);
- memcpy(val->ie_table, tbl, flex_array_size(val, ie_table, tbl_len));
+ arg->num_of_info_elmts = cpu_to_le32(tbl_len);
+ memcpy(arg->ie_table, tbl, flex_array_size(arg, ie_table, tbl_len));
ret = hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_BEACON_FILTER_TABLE, val, buf_len);
- kfree(val);
+ HIF_MIB_ID_BEACON_FILTER_TABLE, arg, buf_len);
+ kfree(arg);
return ret;
}
@@ -134,13 +134,13 @@ int hif_beacon_filter_control(struct wfx_vif *wvif,
int hif_set_operational_mode(struct wfx_dev *wdev, enum hif_op_power_mode mode)
{
- struct hif_mib_gl_operational_power_mode val = {
+ struct hif_mib_gl_operational_power_mode arg = {
.power_mode = mode,
.wup_ind_activation = 1,
};
return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE,
- &val, sizeof(val));
+ &arg, sizeof(arg));
}
int hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb,
@@ -161,57 +161,46 @@ int hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb,
int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
{
- struct hif_mib_protected_mgmt_policy val = { };
+ struct hif_mib_protected_mgmt_policy arg = { };
WARN(required && !capable, "incoherent arguments");
if (capable) {
- val.pmf_enable = 1;
- val.host_enc_auth_frames = 1;
+ arg.pmf_enable = 1;
+ arg.host_enc_auth_frames = 1;
}
if (!required)
- val.unpmf_allowed = 1;
+ arg.unpmf_allowed = 1;
return hif_write_mib(wvif->wdev, wvif->id,
HIF_MIB_ID_PROTECTED_MGMT_POLICY,
- &val, sizeof(val));
+ &arg, sizeof(arg));
}
int hif_set_block_ack_policy(struct wfx_vif *wvif,
u8 tx_tid_policy, u8 rx_tid_policy)
{
- struct hif_mib_block_ack_policy val = {
+ struct hif_mib_block_ack_policy arg = {
.block_ack_tx_tid_policy = tx_tid_policy,
.block_ack_rx_tid_policy = rx_tid_policy,
};
return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY,
- &val, sizeof(val));
+ &arg, sizeof(arg));
}
-int hif_set_association_mode(struct wfx_vif *wvif,
- struct ieee80211_bss_conf *info)
+int hif_set_association_mode(struct wfx_vif *wvif, int ampdu_density,
+ bool greenfield, bool short_preamble)
{
- struct ieee80211_sta *sta = NULL;
- struct hif_mib_set_association_mode val = {
+ struct hif_mib_set_association_mode arg = {
.preambtype_use = 1,
.mode = 1,
.spacing = 1,
- .short_preamble = info->use_short_preamble,
+ .short_preamble = short_preamble,
+ .greenfield = greenfield,
+ .mpdu_start_spacing = ampdu_density,
};
- rcu_read_lock(); // protect sta
- if (info->bssid && !info->ibss_joined)
- sta = ieee80211_find_sta(wvif->vif, info->bssid);
-
- // FIXME: it is strange to not retrieve all information from bss_info
- if (sta && sta->ht_cap.ht_supported) {
- val.mpdu_start_spacing = sta->ht_cap.ampdu_density;
- if (!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
- val.greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
- }
- rcu_read_unlock();
-
return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_ASSOCIATION_MODE, &val, sizeof(val));
+ HIF_MIB_ID_SET_ASSOCIATION_MODE, &arg, sizeof(arg));
}
int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
@@ -239,57 +228,6 @@ int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
return ret;
}
-int hif_set_mac_addr_condition(struct wfx_vif *wvif,
- int idx, const u8 *mac_addr)
-{
- struct hif_mib_mac_addr_data_frame_condition val = {
- .condition_idx = idx,
- .address_type = HIF_MAC_ADDR_A1,
- };
-
- ether_addr_copy(val.mac_address, mac_addr);
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION,
- &val, sizeof(val));
-}
-
-int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif, int idx, u8 allowed_frames)
-{
- struct hif_mib_uc_mc_bc_data_frame_condition val = {
- .condition_idx = idx,
- .allowed_frames = allowed_frames,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION,
- &val, sizeof(val));
-}
-
-int hif_set_config_data_filter(struct wfx_vif *wvif, bool enable, int idx,
- int mac_filters, int frames_types_filters)
-{
- struct hif_mib_config_data_filter val = {
- .enable = enable,
- .filter_idx = idx,
- .mac_cond = mac_filters,
- .uc_mc_bc_cond = frames_types_filters,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_CONFIG_DATA_FILTER, &val, sizeof(val));
-}
-
-int hif_set_data_filtering(struct wfx_vif *wvif, bool enable, bool invert)
-{
- struct hif_mib_set_data_filtering val = {
- .enable = enable,
- .invert_matching = invert,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_DATA_FILTERING, &val, sizeof(val));
-}
-
int hif_keep_alive_period(struct wfx_vif *wvif, int period)
{
struct hif_mib_keep_alive_period arg = {
diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h
index 86683de7de7c..812b3ba0f00e 100644
--- a/drivers/staging/wfx/hif_tx_mib.h
+++ b/drivers/staging/wfx/hif_tx_mib.h
@@ -2,7 +2,7 @@
/*
* Implementation of host-to-chip MIBs of WFxxx Split Mac (WSM) API.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
* Copyright (C) 2010, ST-Ericsson SA
*/
@@ -33,17 +33,10 @@ int hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb,
int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required);
int hif_set_block_ack_policy(struct wfx_vif *wvif,
u8 tx_tid_policy, u8 rx_tid_policy);
-int hif_set_association_mode(struct wfx_vif *wvif,
- struct ieee80211_bss_conf *info);
+int hif_set_association_mode(struct wfx_vif *wvif, int ampdu_density,
+ bool greenfield, bool short_preamble);
int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
int policy_index, u8 *rates);
-int hif_set_mac_addr_condition(struct wfx_vif *wvif,
- int idx, const u8 *mac_addr);
-int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif,
- int idx, u8 allowed_frames);
-int hif_set_config_data_filter(struct wfx_vif *wvif, bool enable, int idx,
- int mac_filters, int frames_types_filters);
-int hif_set_data_filtering(struct wfx_vif *wvif, bool enable, bool invert);
int hif_keep_alive_period(struct wfx_vif *wvif, int period);
int hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr);
int hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable);
diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c
index 777217cdf9a7..36fbc5b5d64c 100644
--- a/drivers/staging/wfx/hwio.c
+++ b/drivers/staging/wfx/hwio.c
@@ -2,7 +2,7 @@
/*
* Low-level I/O functions.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h
index 4b6ef061b40b..0b8e4f7157df 100644
--- a/drivers/staging/wfx/hwio.h
+++ b/drivers/staging/wfx/hwio.h
@@ -2,7 +2,7 @@
/*
* Low-level API.
*
- * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#ifndef WFX_HWIO_H
diff --git a/drivers/staging/wfx/key.c b/drivers/staging/wfx/key.c
index 5ee2ffc5f935..2ab82bed4c1b 100644
--- a/drivers/staging/wfx/key.c
+++ b/drivers/staging/wfx/key.c
@@ -2,7 +2,7 @@
/*
* Key management related functions.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/etherdevice.h>
@@ -171,7 +171,7 @@ static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
k.int_id = wvif->id;
k.entry_index = idx;
if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) {
if (pairwise)
k.type = fill_wep_pair(&k.key.wep_pairwise_key, key,
sta->addr);
@@ -191,15 +191,15 @@ static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
else
k.type = fill_ccmp_group(&k.key.aes_group_key, key,
&seq);
- } else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) {
+ } else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) {
if (pairwise)
k.type = fill_sms4_pair(&k.key.wapi_pairwise_key, key,
sta->addr);
else
k.type = fill_sms4_group(&k.key.wapi_group_key, key);
- } else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
- k.type = fill_aes_cmac_group(&k.key.igtk_group_key, key,
- &seq);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ k.type = fill_aes_cmac_group(&k.key.igtk_group_key, key, &seq);
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
} else {
dev_warn(wdev->dev, "unsupported key type %d\n", key->cipher);
wfx_free_key(wdev, idx);
diff --git a/drivers/staging/wfx/key.h b/drivers/staging/wfx/key.h
index ff31fc9c565a..70a44d0ca35e 100644
--- a/drivers/staging/wfx/key.h
+++ b/drivers/staging/wfx/key.h
@@ -2,7 +2,7 @@
/*
* Implementation of mac80211 API.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#ifndef WFX_KEY_H
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index 11dfa088fc86..e7bc1988124a 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -2,7 +2,7 @@
/*
* Device probe and register.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
* Copyright (c) 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies).
@@ -30,7 +30,6 @@
#include "scan.h"
#include "debug.h"
#include "data_tx.h"
-#include "secure_link.h"
#include "hif_tx_mib.h"
#include "hif_api_cmd.h"
@@ -143,7 +142,6 @@ static const struct ieee80211_ops wfx_ops = {
.set_rts_threshold = wfx_set_rts_threshold,
.set_default_unicast_key = wfx_set_default_unicast_key,
.bss_info_changed = wfx_bss_info_changed,
- .prepare_multicast = wfx_prepare_multicast,
.configure_filter = wfx_configure_filter,
.ampdu_action = wfx_ampdu_action,
.flush = wfx_flush,
@@ -224,12 +222,18 @@ static int wfx_send_pdata_pds(struct wfx_dev *wdev)
if (ret) {
dev_err(wdev->dev, "can't load PDS file %s\n",
wdev->pdata.file_pds);
- return ret;
+ goto err1;
}
tmp_buf = kmemdup(pds->data, pds->size, GFP_KERNEL);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto err2;
+ }
ret = wfx_send_pds(wdev, tmp_buf, pds->size);
kfree(tmp_buf);
+err2:
release_firmware(pds);
+err1:
return ret;
}
@@ -271,8 +275,7 @@ struct wfx_dev *wfx_init_common(struct device *dev,
hw->queues = 4;
hw->max_rates = 8;
hw->max_rate_tries = 8;
- hw->extra_tx_headroom = sizeof(struct hif_sl_msg_hdr) +
- sizeof(struct hif_msg)
+ hw->extra_tx_headroom = sizeof(struct hif_msg)
+ sizeof(struct hif_req_tx)
+ 4 /* alignment */ + 8 /* TKIP IV */;
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -282,9 +285,9 @@ struct wfx_dev *wfx_init_common(struct device *dev,
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+ hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->max_ap_assoc_sta = HIF_LINK_ID_MAX;
hw->wiphy->max_scan_ssids = 2;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
@@ -306,10 +309,9 @@ struct wfx_dev *wfx_init_common(struct device *dev,
wdev->pdata.gpio_wakeup = devm_gpiod_get_optional(dev, "wakeup",
GPIOD_OUT_LOW);
if (IS_ERR(wdev->pdata.gpio_wakeup))
- return ERR_CAST(wdev->pdata.gpio_wakeup);
+ return NULL;
if (wdev->pdata.gpio_wakeup)
gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup");
- wfx_sl_fill_pdata(dev, &wdev->pdata);
mutex_init(&wdev->conf_mutex);
mutex_init(&wdev->rx_stats_lock);
@@ -363,9 +365,8 @@ int wfx_probe(struct wfx_dev *wdev)
dev_info(wdev->dev, "started firmware %d.%d.%d \"%s\" (API: %d.%d, keyset: %02X, caps: 0x%.8X)\n",
wdev->hw_caps.firmware_major, wdev->hw_caps.firmware_minor,
wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label,
- wdev->hw_caps.api_version_major,
- wdev->hw_caps.api_version_minor,
- wdev->keyset, *((u32 *)&wdev->hw_caps.capabilities));
+ wdev->hw_caps.api_version_major, wdev->hw_caps.api_version_minor,
+ wdev->keyset, wdev->hw_caps.link_mode);
snprintf(wdev->hw->wiphy->fw_version,
sizeof(wdev->hw->wiphy->fw_version),
"%d.%d.%d",
@@ -381,14 +382,13 @@ int wfx_probe(struct wfx_dev *wdev)
goto err0;
}
- err = wfx_sl_init(wdev);
- if (err && wdev->hw_caps.capabilities.link_mode == SEC_LINK_ENFORCED) {
+ if (wdev->hw_caps.link_mode == SEC_LINK_ENFORCED) {
dev_err(wdev->dev,
- "chip require secure_link, but can't negociate it\n");
+ "chip require secure_link, but can't negotiate it\n");
goto err0;
}
- if (wdev->hw_caps.regul_sel_mode_info.region_sel_mode) {
+ if (wdev->hw_caps.region_sel_mode) {
wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[11].flags |= IEEE80211_CHAN_NO_IR;
wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[12].flags |= IEEE80211_CHAN_NO_IR;
wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[13].flags |= IEEE80211_CHAN_DISABLED;
@@ -466,7 +466,6 @@ void wfx_release(struct wfx_dev *wdev)
hif_shutdown(wdev);
wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv);
wfx_bh_unregister(wdev);
- wfx_sl_deinit(wdev);
}
static int __init wfx_core_init(void)
diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h
index c59d375dd3ad..a0db322383a3 100644
--- a/drivers/staging/wfx/main.h
+++ b/drivers/staging/wfx/main.h
@@ -2,7 +2,7 @@
/*
* Device probe and register.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
@@ -19,7 +19,7 @@ struct wfx_dev;
struct hwbus_ops;
struct wfx_platform_data {
- /* Keyset and ".sec" extention will appended to this string */
+ /* Keyset and ".sec" extension will be appended to this string */
const char *file_fw;
const char *file_pds;
struct gpio_desc *gpio_wakeup;
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
index 6e3159165143..31c37f69c295 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/staging/wfx/queue.c
@@ -2,7 +2,7 @@
/*
* O(1) TX queue with built-in allocator.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/sched.h>
@@ -60,11 +60,16 @@ void wfx_tx_lock_flush(struct wfx_dev *wdev)
void wfx_tx_queues_init(struct wfx_vif *wvif)
{
+ // The device is in charge to respect the details of the QoS parameters.
+ // The driver just ensure that it roughtly respect the priorities to
+ // avoid any shortage.
+ const int priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 };
int i;
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
skb_queue_head_init(&wvif->tx_queue[i].normal);
skb_queue_head_init(&wvif->tx_queue[i].cab);
+ wvif->tx_queue[i].priority = priorities[i];
}
}
@@ -219,6 +224,11 @@ bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
return false;
}
+static int wfx_tx_queue_get_weight(struct wfx_queue *queue)
+{
+ return atomic_read(&queue->pending_frames) * queue->priority;
+}
+
static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
{
struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)];
@@ -234,8 +244,8 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
WARN_ON(num_queues >= ARRAY_SIZE(queues));
queues[num_queues] = &wvif->tx_queue[i];
for (j = num_queues; j > 0; j--)
- if (atomic_read(&queues[j]->pending_frames) <
- atomic_read(&queues[j - 1]->pending_frames))
+ if (wfx_tx_queue_get_weight(queues[j]) <
+ wfx_tx_queue_get_weight(queues[j - 1]))
swap(queues[j - 1], queues[j]);
num_queues++;
}
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
index 22d7c936907f..80ba19455ef3 100644
--- a/drivers/staging/wfx/queue.h
+++ b/drivers/staging/wfx/queue.h
@@ -2,7 +2,7 @@
/*
* O(1) TX queue with built-in allocator.
*
- * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#ifndef WFX_QUEUE_H
@@ -18,6 +18,7 @@ struct wfx_queue {
struct sk_buff_head normal;
struct sk_buff_head cab; // Content After (DTIM) Beacon
atomic_t pending_frames;
+ int priority;
};
void wfx_tx_lock(struct wfx_dev *wdev);
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
index e9de19784865..fb47c7cddf2f 100644
--- a/drivers/staging/wfx/scan.c
+++ b/drivers/staging/wfx/scan.c
@@ -2,7 +2,7 @@
/*
* Scan related functions.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <net/mac80211.h>
@@ -113,10 +113,6 @@ int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
WARN_ON(hw_req->req.n_channels > HIF_API_MAX_NB_CHANNELS);
-
- if (vif->type == NL80211_IFTYPE_AP)
- return -EOPNOTSUPP;
-
wvif->scan_req = hw_req;
schedule_work(&wvif->scan_work);
return 0;
diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h
index 2eb786c9572c..c7496a766478 100644
--- a/drivers/staging/wfx/scan.h
+++ b/drivers/staging/wfx/scan.h
@@ -2,7 +2,7 @@
/*
* Scan related functions.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#ifndef WFX_SCAN_H
diff --git a/drivers/staging/wfx/secure_link.h b/drivers/staging/wfx/secure_link.h
deleted file mode 100644
index c3d055b2f8b1..000000000000
--- a/drivers/staging/wfx/secure_link.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2019, Silicon Laboratories, Inc.
- */
-#ifndef WFX_SECURE_LINK_H
-#define WFX_SECURE_LINK_H
-
-#include <linux/of.h>
-
-#include "hif_api_general.h"
-
-struct wfx_dev;
-
-
-struct sl_context {
-};
-
-static inline bool wfx_is_secure_command(struct wfx_dev *wdev, int cmd_id)
-{
- return false;
-}
-
-static inline int wfx_sl_decode(struct wfx_dev *wdev, struct hif_sl_msg *m)
-{
- return -EIO;
-}
-
-static inline int wfx_sl_encode(struct wfx_dev *wdev,
- const struct hif_msg *input,
- struct hif_sl_msg *output)
-{
- return -EIO;
-}
-
-static inline int wfx_sl_check_pubkey(struct wfx_dev *wdev,
- const u8 *ncp_pubkey,
- const u8 *ncp_pubmac)
-{
- return -EIO;
-}
-
-static inline void wfx_sl_fill_pdata(struct device *dev,
- struct wfx_platform_data *pdata)
-{
- if (of_find_property(dev->of_node, "slk_key", NULL))
- dev_err(dev, "secure link is not supported by this driver, ignoring provided key\n");
-}
-
-static inline int wfx_sl_init(struct wfx_dev *wdev)
-{
- return -EIO;
-}
-
-static inline void wfx_sl_deinit(struct wfx_dev *wdev)
-{
-}
-
-
-#endif
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
index 4e30ab17a93d..2320a81eae0b 100644
--- a/drivers/staging/wfx/sta.c
+++ b/drivers/staging/wfx/sta.c
@@ -2,7 +2,7 @@
/*
* Implementation of mac80211 API.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/etherdevice.h>
@@ -91,59 +91,12 @@ static void wfx_filter_beacon(struct wfx_vif *wvif, bool filter_beacon)
}
}
-static void wfx_filter_mcast(struct wfx_vif *wvif, bool filter_mcast)
-{
- int i;
-
- // Temporary workaround for filters
- hif_set_data_filtering(wvif, false, true);
- return;
-
- if (!filter_mcast) {
- hif_set_data_filtering(wvif, false, true);
- return;
- }
- for (i = 0; i < wvif->filter_mcast_count; i++)
- hif_set_mac_addr_condition(wvif, i, wvif->filter_mcast_addr[i]);
- hif_set_uc_mc_bc_condition(wvif, 0,
- HIF_FILTER_UNICAST | HIF_FILTER_BROADCAST);
- hif_set_config_data_filter(wvif, true, 0, BIT(1),
- BIT(wvif->filter_mcast_count) - 1);
- hif_set_data_filtering(wvif, true, true);
-}
-
-u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
- struct netdev_hw_addr_list *mc_list)
-{
- int i;
- struct netdev_hw_addr *ha;
- struct wfx_vif *wvif = NULL;
- struct wfx_dev *wdev = hw->priv;
- int count = netdev_hw_addr_list_count(mc_list);
-
- while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- if (count > ARRAY_SIZE(wvif->filter_mcast_addr)) {
- wvif->filter_mcast_count = 0;
- continue;
- }
- wvif->filter_mcast_count = count;
-
- i = 0;
- netdev_hw_addr_list_for_each(ha, mc_list) {
- ether_addr_copy(wvif->filter_mcast_addr[i], ha->addr);
- i++;
- }
- }
-
- return 0;
-}
-
void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *total_flags, u64 unused)
{
struct wfx_vif *wvif = NULL;
struct wfx_dev *wdev = hw->priv;
- bool filter_bssid, filter_prbreq, filter_beacon, filter_mcast;
+ bool filter_bssid, filter_prbreq, filter_beacon;
// Notes:
// - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered
@@ -167,16 +120,6 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
filter_beacon = true;
wfx_filter_beacon(wvif, filter_beacon);
- if (*total_flags & FIF_ALLMULTI) {
- filter_mcast = false;
- } else if (!wvif->filter_mcast_count) {
- dev_dbg(wdev->dev, "disabling unconfigured multicast filter");
- filter_mcast = false;
- } else {
- filter_mcast = true;
- }
- wfx_filter_mcast(wvif, filter_mcast);
-
if (*total_flags & FIF_OTHER_BSS)
filter_bssid = false;
else
@@ -214,7 +157,7 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
if (chan0 && chan1 && chan0->hw_value != chan1->hw_value &&
wvif->vif->type != NL80211_IFTYPE_AP) {
// It is necessary to enable powersave if channels
- // are differents.
+ // are different.
if (enable_ps)
*enable_ps = true;
if (wvif->wdev->force_ps_timeout > -1)
@@ -323,36 +266,6 @@ void wfx_set_default_unicast_key(struct ieee80211_hw *hw,
hif_wep_default_key_id(wvif, idx);
}
-static void wfx_set_mfp(struct wfx_vif *wvif,
- struct cfg80211_bss *bss)
-{
- const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
- const int pairwise_cipher_suite_size = 4 / sizeof(u16);
- const int akm_suite_size = 4 / sizeof(u16);
- const u16 *ptr = NULL;
- bool mfpc = false;
- bool mfpr = false;
-
- /* 802.11w protected mgmt frames */
-
- /* retrieve MFPC and MFPR flags from beacon or PBRSP */
-
- rcu_read_lock();
- if (bss)
- ptr = (const u16 *)ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
-
- if (ptr) {
- ptr += pairwise_cipher_suite_count_offset;
- ptr += 1 + pairwise_cipher_suite_size * *ptr;
- ptr += 1 + akm_suite_size * *ptr;
- mfpr = *ptr & BIT(6);
- mfpc = *ptr & BIT(7);
- }
- rcu_read_unlock();
-
- hif_set_mfp(wvif, mfpc, mfpr);
-}
-
void wfx_reset(struct wfx_vif *wvif)
{
struct wfx_dev *wdev = wvif->wdev;
@@ -370,55 +283,6 @@ void wfx_reset(struct wfx_vif *wvif)
wfx_update_pm(wvif);
}
-static void wfx_do_join(struct wfx_vif *wvif)
-{
- int ret;
- struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
- struct cfg80211_bss *bss = NULL;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- const u8 *ssidie = NULL;
- int ssidlen = 0;
-
- wfx_tx_lock_flush(wvif->wdev);
-
- bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel,
- conf->bssid, NULL, 0,
- IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
- if (!bss && !conf->ibss_joined) {
- wfx_tx_unlock(wvif->wdev);
- return;
- }
-
- rcu_read_lock(); // protect ssidie
- if (bss)
- ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
- if (ssidie) {
- ssidlen = ssidie[1];
- if (ssidlen > IEEE80211_MAX_SSID_LEN)
- ssidlen = IEEE80211_MAX_SSID_LEN;
- memcpy(ssid, &ssidie[2], ssidlen);
- }
- rcu_read_unlock();
-
- wfx_set_mfp(wvif, bss);
- cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
-
- wvif->join_in_progress = true;
- ret = hif_join(wvif, conf, wvif->channel, ssid, ssidlen);
- if (ret) {
- ieee80211_connection_loss(wvif->vif);
- wfx_reset(wvif);
- } else {
- /* Due to beacon filtering it is possible that the
- * AP's beacon is not known for the mac80211 stack.
- * Disable filtering temporary to make sure the stack
- * receives at least one
- */
- wfx_filter_beacon(wvif, false);
- }
- wfx_tx_unlock(wvif->wdev);
-}
-
int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
@@ -427,6 +291,9 @@ int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
sta_priv->vif_id = wvif->id;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ hif_set_mfp(wvif, sta->mfp, sta->mfp);
+
// In station mode, the firmware interprets new link-id as a TDLS peer.
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
return 0;
@@ -434,7 +301,7 @@ int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
wvif->link_id_map |= BIT(sta_priv->link_id);
WARN_ON(!sta_priv->link_id);
WARN_ON(sta_priv->link_id >= HIF_LINK_ID_MAX);
- hif_map_link(wvif, sta->addr, 0, sta_priv->link_id);
+ hif_map_link(wvif, false, sta->addr, sta_priv->link_id, sta->mfp);
return 0;
}
@@ -449,7 +316,7 @@ int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!sta_priv->link_id)
return 0;
// FIXME add a mutex?
- hif_map_link(wvif, sta->addr, 1, sta_priv->link_id);
+ hif_map_link(wvif, true, sta->addr, sta_priv->link_id, false);
wvif->link_id_map &= ~BIT(sta_priv->link_id);
return 0;
}
@@ -474,6 +341,31 @@ static int wfx_upload_ap_templates(struct wfx_vif *wvif)
return 0;
}
+static void wfx_set_mfp_ap(struct wfx_vif *wvif)
+{
+ struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif);
+ const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN,
+ skb->data + ieoffset,
+ skb->len - ieoffset);
+ const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
+ const int pairwise_cipher_suite_size = 4 / sizeof(u16);
+ const int akm_suite_size = 4 / sizeof(u16);
+
+ if (ptr) {
+ ptr += pairwise_cipher_suite_count_offset;
+ if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+ return;
+ ptr += 1 + pairwise_cipher_suite_size * *ptr;
+ if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+ return;
+ ptr += 1 + akm_suite_size * *ptr;
+ if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
+ return;
+ hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
+ }
+}
+
int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
@@ -488,6 +380,7 @@ int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
ret = hif_start(wvif, &vif->bss_conf, wvif->channel);
if (ret > 0)
return -EIO;
+ wfx_set_mfp_ap(wvif);
return ret;
}
@@ -498,11 +391,74 @@ void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
wfx_reset(wvif);
}
+static void wfx_join(struct wfx_vif *wvif)
+{
+ int ret;
+ struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
+ struct cfg80211_bss *bss = NULL;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ const u8 *ssidie = NULL;
+ int ssidlen = 0;
+
+ wfx_tx_lock_flush(wvif->wdev);
+
+ bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel,
+ conf->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ if (!bss && !conf->ibss_joined) {
+ wfx_tx_unlock(wvif->wdev);
+ return;
+ }
+
+ rcu_read_lock(); // protect ssidie
+ if (bss)
+ ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (ssidie) {
+ ssidlen = ssidie[1];
+ if (ssidlen > IEEE80211_MAX_SSID_LEN)
+ ssidlen = IEEE80211_MAX_SSID_LEN;
+ memcpy(ssid, &ssidie[2], ssidlen);
+ }
+ rcu_read_unlock();
+
+ cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
+
+ wvif->join_in_progress = true;
+ ret = hif_join(wvif, conf, wvif->channel, ssid, ssidlen);
+ if (ret) {
+ ieee80211_connection_loss(wvif->vif);
+ wfx_reset(wvif);
+ } else {
+ /* Due to beacon filtering it is possible that the
+ * AP's beacon is not known for the mac80211 stack.
+ * Disable filtering temporary to make sure the stack
+ * receives at least one
+ */
+ wfx_filter_beacon(wvif, false);
+ }
+ wfx_tx_unlock(wvif->wdev);
+}
+
static void wfx_join_finalize(struct wfx_vif *wvif,
struct ieee80211_bss_conf *info)
{
+ struct ieee80211_sta *sta = NULL;
+ int ampdu_density = 0;
+ bool greenfield = false;
+
+ rcu_read_lock(); // protect sta
+ if (info->bssid && !info->ibss_joined)
+ sta = ieee80211_find_sta(wvif->vif, info->bssid);
+ if (sta && sta->ht_cap.ht_supported)
+ ampdu_density = sta->ht_cap.ampdu_density;
+ if (sta && sta->ht_cap.ht_supported &&
+ !(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
+ greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+ rcu_read_unlock();
+
wvif->join_in_progress = false;
- hif_set_association_mode(wvif, info);
+ hif_set_association_mode(wvif, ampdu_density, greenfield,
+ info->use_short_preamble);
hif_keep_alive_period(wvif, 0);
// beacon_loss_count is defined to 7 in net/mac80211/mlme.c. Let's use
// the same value.
@@ -516,7 +472,7 @@ int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
wfx_upload_ap_templates(wvif);
- wfx_do_join(wvif);
+ wfx_join(wvif);
return 0;
}
@@ -549,32 +505,22 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&wdev->conf_mutex);
- /* TODO: BSS_CHANGED_QOS */
- if (changed & BSS_CHANGED_ARP_FILTER) {
- for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) {
- __be32 *arp_addr = &info->arp_addr_list[i];
-
- if (info->arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
- arp_addr = NULL;
- if (i >= info->arp_addr_cnt)
- arp_addr = NULL;
- hif_set_arp_ipv4_filter(wvif, i, arp_addr);
- }
- }
-
if (changed & BSS_CHANGED_BASIC_RATES ||
changed & BSS_CHANGED_BEACON_INT ||
changed & BSS_CHANGED_BSSID) {
if (vif->type == NL80211_IFTYPE_STATION)
- wfx_do_join(wvif);
+ wfx_join(wvif);
}
- if (changed & BSS_CHANGED_AP_PROBE_RESP ||
- changed & BSS_CHANGED_BEACON)
- wfx_upload_ap_templates(wvif);
-
- if (changed & BSS_CHANGED_BEACON_ENABLED)
- wfx_enable_beacon(wvif, info->enable_beacon);
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (info->assoc || info->ibss_joined)
+ wfx_join_finalize(wvif, info);
+ else if (!info->assoc && vif->type == NL80211_IFTYPE_STATION)
+ wfx_reset(wvif);
+ else
+ dev_warn(wdev->dev, "%s: misunderstood change: ASSOC\n",
+ __func__);
+ }
if (changed & BSS_CHANGED_BEACON_INFO) {
if (vif->type != NL80211_IFTYPE_STATION)
@@ -587,16 +533,25 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
wfx_filter_beacon(wvif, true);
}
- if (changed & BSS_CHANGED_ASSOC) {
- if (info->assoc || info->ibss_joined)
- wfx_join_finalize(wvif, info);
- else if (!info->assoc && vif->type == NL80211_IFTYPE_STATION)
- wfx_reset(wvif);
- else
- dev_warn(wdev->dev, "%s: misunderstood change: ASSOC\n",
- __func__);
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) {
+ __be32 *arp_addr = &info->arp_addr_list[i];
+
+ if (info->arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
+ arp_addr = NULL;
+ if (i >= info->arp_addr_cnt)
+ arp_addr = NULL;
+ hif_set_arp_ipv4_filter(wvif, i, arp_addr);
+ }
}
+ if (changed & BSS_CHANGED_AP_PROBE_RESP ||
+ changed & BSS_CHANGED_BEACON)
+ wfx_upload_ap_templates(wvif);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ wfx_enable_beacon(wvif, info->enable_beacon);
+
if (changed & BSS_CHANGED_KEEP_ALIVE)
hif_keep_alive_period(wvif, info->max_idle_period *
USEC_PER_TU / USEC_PER_MSEC);
@@ -664,6 +619,10 @@ int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *)&sta->drv_priv;
struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id);
+ if (!wvif) {
+ dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
+ return -EIO;
+ }
schedule_work(&wvif->update_tim_work);
return 0;
}
@@ -682,15 +641,16 @@ int wfx_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
- /* Aggregation is implemented fully in firmware,
- * including block ack negotiation. Do not allow
- * mac80211 stack to do anything: it interferes with
- * the firmware.
- */
-
- /* Note that we still need this function stubbed. */
-
- return -ENOTSUPP;
+ // Aggregation is implemented fully in firmware
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ // Just acknowledge it to enable frame re-ordering
+ return 0;
+ default:
+ // Leave the firmware doing its business for tx aggregation
+ return -ENOTSUPP;
+ }
}
int wfx_add_chanctx(struct ieee80211_hw *hw,
@@ -760,17 +720,6 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return -EOPNOTSUPP;
}
- for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
- if (!wdev->vif[i]) {
- wdev->vif[i] = vif;
- wvif->id = i;
- break;
- }
- }
- if (i == ARRAY_SIZE(wdev->vif)) {
- mutex_unlock(&wdev->conf_mutex);
- return -EOPNOTSUPP;
- }
// FIXME: prefer use of container_of() to get vif
wvif->vif = vif;
wvif->wdev = wdev;
@@ -787,12 +736,22 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
init_completion(&wvif->scan_complete);
INIT_WORK(&wvif->scan_work, wfx_hw_scan_work);
- mutex_unlock(&wdev->conf_mutex);
+ wfx_tx_queues_init(wvif);
+ wfx_tx_policy_init(wvif);
+
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ if (!wdev->vif[i]) {
+ wdev->vif[i] = vif;
+ wvif->id = i;
+ break;
+ }
+ }
+ WARN(i == ARRAY_SIZE(wdev->vif), "try to instantiate more vif than supported");
hif_set_macaddr(wvif, vif->addr);
- wfx_tx_queues_init(wvif);
- wfx_tx_policy_init(wvif);
+ mutex_unlock(&wdev->conf_mutex);
+
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
// Combo mode does not support Block Acks. We can re-enable them
@@ -824,6 +783,7 @@ void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
wvif->vif = NULL;
mutex_unlock(&wdev->conf_mutex);
+
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
// Combo mode does not support Block Acks. We can re-enable them
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
index 6b15a64ac9e2..d7b5df5ea4e6 100644
--- a/drivers/staging/wfx/sta.h
+++ b/drivers/staging/wfx/sta.h
@@ -2,7 +2,7 @@
/*
* Implementation of mac80211 API.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
#ifndef WFX_STA_H
@@ -25,8 +25,6 @@ int wfx_config(struct ieee80211_hw *hw, u32 changed);
int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
void wfx_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
-u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
- struct netdev_hw_addr_list *mc_list);
void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *total_flags, u64 unused);
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
index d376db2f1891..e34c7a538c65 100644
--- a/drivers/staging/wfx/traces.h
+++ b/drivers/staging/wfx/traces.h
@@ -2,7 +2,7 @@
/*
* Tracepoints definitions.
*
- * Copyright (c) 2018-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2018-2020, Silicon Laboratories, Inc.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
index 38e24d7f72f2..94898680ccde 100644
--- a/drivers/staging/wfx/wfx.h
+++ b/drivers/staging/wfx/wfx.h
@@ -2,7 +2,7 @@
/*
* Common private data for Silicon Labs WFx chips.
*
- * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
@@ -20,7 +20,6 @@
#include "data_tx.h"
#include "main.h"
#include "queue.h"
-#include "secure_link.h"
#include "hif_tx.h"
#define USEC_PER_TXOP 32 // see struct ieee80211_tx_queue_params
@@ -41,7 +40,6 @@ struct wfx_dev {
struct completion firmware_ready;
struct hif_ind_startup hw_caps;
struct wfx_hif hif;
- struct sl_context sl;
struct delayed_work cooling_timeout_work;
bool poll_irq;
bool chip_frozen;
@@ -81,9 +79,6 @@ struct wfx_vif {
struct work_struct update_tim_work;
- int filter_mcast_count;
- u8 filter_mcast_addr[8][ETH_ALEN];
-
unsigned long uapsd_mask;
/* avoid some operations in parallel with scan */
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 2720f7319a3d..f2a0e16b0318 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -191,9 +191,9 @@ static void hfa384x_usbctlx_resptimerfn(struct timer_list *t);
static void hfa384x_usb_throttlefn(struct timer_list *t);
-static void hfa384x_usbctlx_completion_task(unsigned long data);
+static void hfa384x_usbctlx_completion_task(struct tasklet_struct *t);
-static void hfa384x_usbctlx_reaper_task(unsigned long data);
+static void hfa384x_usbctlx_reaper_task(struct tasklet_struct *t);
static int hfa384x_usbctlx_submit(struct hfa384x *hw,
struct hfa384x_usbctlx *ctlx);
@@ -539,10 +539,8 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
/* Initialize the authentication queue */
skb_queue_head_init(&hw->authq);
- tasklet_init(&hw->reaper_bh,
- hfa384x_usbctlx_reaper_task, (unsigned long)hw);
- tasklet_init(&hw->completion_bh,
- hfa384x_usbctlx_completion_task, (unsigned long)hw);
+ tasklet_setup(&hw->reaper_bh, hfa384x_usbctlx_reaper_task);
+ tasklet_setup(&hw->completion_bh, hfa384x_usbctlx_completion_task);
INIT_WORK(&hw->link_bh, prism2sta_processing_defer);
INIT_WORK(&hw->usb_work, hfa384x_usb_defer);
@@ -2599,9 +2597,9 @@ void hfa384x_tx_timeout(struct wlandevice *wlandev)
* Interrupt
*----------------------------------------------------------------
*/
-static void hfa384x_usbctlx_reaper_task(unsigned long data)
+static void hfa384x_usbctlx_reaper_task(struct tasklet_struct *t)
{
- struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x *hw = from_tasklet(hw, t, reaper_bh);
struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
@@ -2633,9 +2631,9 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data)
* Interrupt
*----------------------------------------------------------------
*/
-static void hfa384x_usbctlx_completion_task(unsigned long data)
+static void hfa384x_usbctlx_completion_task(struct tasklet_struct *t)
{
- struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x *hw = from_tasklet(hw, t, completion_bh);
struct hfa384x_usbctlx *ctlx, *temp;
unsigned long flags;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 7b091c5a2984..a15abb2c8f54 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -266,15 +266,15 @@ static int p80211_convert_to_ether(struct wlandevice *wlandev,
/**
* p80211netdev_rx_bh - deferred processing of all received frames
*
- * @arg: pointer to WLAN network device structure (cast to unsigned long)
+ * @t: pointer to the tasklet associated with this handler
*/
-static void p80211netdev_rx_bh(unsigned long arg)
+static void p80211netdev_rx_bh(struct tasklet_struct *t)
{
- struct wlandevice *wlandev = (struct wlandevice *)arg;
+ struct wlandevice *wlandev = from_tasklet(wlandev, t, rx_bh);
struct sk_buff *skb = NULL;
struct net_device *dev = wlandev->netdev;
- /* Let's empty our our queue */
+ /* Let's empty our queue */
while ((skb = skb_dequeue(&wlandev->nsd_rxq))) {
if (wlandev->state == WLAN_DEVICE_OPEN) {
if (dev->type != ARPHRD_ETHER) {
@@ -728,8 +728,7 @@ int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
/* Set up the rx queue */
skb_queue_head_init(&wlandev->nsd_rxq);
- tasklet_init(&wlandev->rx_bh,
- p80211netdev_rx_bh, (unsigned long)wlandev);
+ tasklet_setup(&wlandev->rx_bh, p80211netdev_rx_bh);
/* Allocate and initialize the wiphy struct */
wiphy = wlan_create_wiphy(physdev, wlandev);
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index a8860d2aee68..a908ff301707 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -228,8 +228,8 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
__le16 wordbuf[17];
result = hfa384x_drvr_setconfig16(hw,
- HFA384x_RID_CNFROAMINGMODE,
- HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM);
+ HFA384x_RID_CNFROAMINGMODE,
+ HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM);
if (result) {
netdev_err(wlandev->netdev,
"setconfig(ROAMINGMODE) failed. result=%d\n",
@@ -275,8 +275,8 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
}
/* ibss options */
result = hfa384x_drvr_setconfig16(hw,
- HFA384x_RID_CREATEIBSS,
- HFA384x_CREATEIBSS_JOINCREATEIBSS);
+ HFA384x_RID_CREATEIBSS,
+ HFA384x_CREATEIBSS_JOINCREATEIBSS);
if (result) {
netdev_err(wlandev->netdev,
"Failed to set CREATEIBSS.\n");
@@ -1167,8 +1167,8 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
if (hw->presniff_port_type != 0) {
word = hw->presniff_port_type;
result = hfa384x_drvr_setconfig16(hw,
- HFA384x_RID_CNFPORTTYPE,
- word);
+ HFA384x_RID_CNFPORTTYPE,
+ word);
if (result) {
netdev_dbg
(wlandev->netdev,
@@ -1209,8 +1209,8 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
}
/* Save the wepflags state */
result = hfa384x_drvr_getconfig16(hw,
- HFA384x_RID_CNFWEPFLAGS,
- &hw->presniff_wepflags);
+ HFA384x_RID_CNFWEPFLAGS,
+ &hw->presniff_wepflags);
if (result) {
netdev_dbg
(wlandev->netdev,
@@ -1259,8 +1259,8 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
/* Set the port type to pIbss */
word = HFA384x_PORTTYPE_PSUEDOIBSS;
result = hfa384x_drvr_setconfig16(hw,
- HFA384x_RID_CNFPORTTYPE,
- word);
+ HFA384x_RID_CNFPORTTYPE,
+ word);
if (result) {
netdev_dbg
(wlandev->netdev,
@@ -1276,8 +1276,8 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
HFA384x_WEPFLAGS_DISABLE_RXCRYPT;
result =
hfa384x_drvr_setconfig16(hw,
- HFA384x_RID_CNFWEPFLAGS,
- word);
+ HFA384x_RID_CNFWEPFLAGS,
+ word);
}
if (result) {
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 7d7d77b04255..875812a391c9 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -292,7 +292,7 @@ int prism2mgmt_mibset_mibget(struct wlandevice *wlandev, void *msgp)
/*
** Determine if this is a "mibget" or a "mibset". If this is a
** "mibget", then make sure that the MIB may be read. Otherwise,
- ** this is a "mibset" so make make sure that the MIB may be written.
+ ** this is a "mibset" so make sure that the MIB may be written.
*/
isget = (msg->msgcode == DIDMSG_DOT11REQ_MIBGET);
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 8f25496188aa..e6dcb687e7a1 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -461,7 +461,7 @@ u32 prism2sta_ifstate(struct wlandevice *wlandev, u32 ifstate)
case WLAN_MSD_FWLOAD:
wlandev->msdstate = WLAN_MSD_RUNNING_PENDING;
/* Initialize the device+driver for full
- * operation. Note that this might me an FWLOAD to
+ * operation. Note that this might me an FWLOAD
* to RUNNING transition so we must not do a chip
* or board level reset. Note that on failure,
* the MSD state is set to HWPRESENT because we
@@ -1352,7 +1352,7 @@ void prism2sta_processing_defer(struct work_struct *data)
* we get back in range. We should block transmits and
* receives in this state. Do we need an indication here?
* Probably not since a polling user-mode element would
- * get this status from from p2PortStatus(FD40). What about
+ * get this status from p2PortStatus(FD40). What about
* p80211?
* Response:
* Block Transmits, Ignore receives of data frames
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7b56fe9f1062..f77e5eee6b80 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4529,7 +4529,6 @@ int iscsit_logout_post_handler(
iscsit_logout_post_handler_closesession(conn);
break;
}
- ret = 0;
break;
case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
if (conn->cid == cmd->logout_cid) {
@@ -4540,7 +4539,6 @@ int iscsit_logout_post_handler(
iscsit_logout_post_handler_samecid(conn);
break;
}
- ret = 0;
} else {
switch (cmd->logout_response) {
case ISCSI_LOGOUT_SUCCESS:
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 1c181d31f4c8..f2bd2e207e0b 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -611,9 +611,8 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
- MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
- "" : (bd->bd_holder == ib_dev) ?
- "CLAIMED: IBLOCK" : "CLAIMED: OS");
+ MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
+ "CLAIMED: IBLOCK");
} else {
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 408bd975170b..bf936bbeccfe 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -131,7 +131,7 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
if (sg_per_table < total_sg_needed)
chain_entry = 1;
- sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
+ sg = kmalloc_array(sg_per_table + chain_entry, sizeof(*sg),
GFP_KERNEL);
if (!sg)
return -ENOMEM;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9b7592350502..590e6d072228 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -177,9 +177,12 @@ struct tcmu_cmd {
/* Can't use se_cmd when cleaning up expired cmds, because if
cmd has been completed then accessing se_cmd is off limits */
uint32_t dbi_cnt;
+ uint32_t dbi_bidi_cnt;
uint32_t dbi_cur;
uint32_t *dbi;
+ uint32_t data_len_bidi;
+
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
@@ -191,7 +194,7 @@ struct tcmu_tmr {
uint8_t tmr_type;
uint32_t tmr_cmd_cnt;
- int16_t tmr_cmd_ids[0];
+ int16_t tmr_cmd_ids[];
};
/*
@@ -242,7 +245,7 @@ static int tcmu_set_global_max_data_area(const char *str,
static int tcmu_get_global_max_data_area(char *buffer,
const struct kernel_param *kp)
{
- return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ return sprintf(buffer, "%d\n", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
}
static const struct kernel_param_ops tcmu_global_max_data_area_op = {
@@ -436,7 +439,7 @@ static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static const struct genl_ops tcmu_genl_ops[] = {
+static const struct genl_small_ops tcmu_genl_ops[] = {
{
.cmd = TCMU_CMD_SET_FEATURES,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -474,8 +477,8 @@ static struct genl_family tcmu_genl_family __ro_after_init = {
.mcgrps = tcmu_mcgrps,
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
.netnsok = true,
- .ops = tcmu_genl_ops,
- .n_ops = ARRAY_SIZE(tcmu_genl_ops),
+ .small_ops = tcmu_genl_ops,
+ .n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
};
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
@@ -492,15 +495,16 @@ static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
}
-static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd)
+static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd,
+ int prev_dbi, int *iov_cnt)
{
struct page *page;
int ret, dbi;
dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
if (dbi == udev->dbi_thresh)
- return false;
+ return -1;
page = radix_tree_lookup(&udev->data_blocks, dbi);
if (!page) {
@@ -524,24 +528,30 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
set_bit(dbi, udev->data_bitmap);
tcmu_cmd_set_dbi(tcmu_cmd, dbi);
- return true;
+ if (dbi != prev_dbi + 1)
+ *iov_cnt += 1;
+
+ return dbi;
err_insert:
__free_page(page);
err_alloc:
atomic_dec(&global_db_count);
- return false;
+ return -1;
}
-static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd)
+static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd, int dbi_cnt)
{
- int i;
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
+ int i, iov_cnt = 0;
- for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
- if (!tcmu_get_empty_block(udev, tcmu_cmd))
- return false;
+ for (i = 0; i < dbi_cnt; i++) {
+ dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, &iov_cnt);
+ if (dbi < 0)
+ return -1;
}
- return true;
+ return iov_cnt;
}
static inline struct page *
@@ -558,25 +568,58 @@ static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
}
-static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
+static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
{
- struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
- size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
+ int i, len;
+ struct se_cmd *se_cmd = cmd->se_cmd;
+
+ cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
if (se_cmd->se_cmd_flags & SCF_BIDI) {
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
- data_length += round_up(se_cmd->t_bidi_data_sg->length,
- DATA_BLOCK_SIZE);
+ for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
+ len += se_cmd->t_bidi_data_sg[i].length;
+ cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, DATA_BLOCK_SIZE);
+ cmd->dbi_cnt += cmd->dbi_bidi_cnt;
+ cmd->data_len_bidi = len;
}
+}
+
+static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ struct iovec **iov, int prev_dbi, int *remain)
+{
+ /* Get the next dbi */
+ int dbi = tcmu_cmd_get_dbi(cmd);
+ /* Do not add more than DATA_BLOCK_SIZE to iov */
+ int len = min_t(int, DATA_BLOCK_SIZE, *remain);
- return data_length;
+ *remain -= len;
+ /*
+ * The following code will gather and map the blocks to the same iovec
+ * when the blocks are all next to each other.
+ */
+ if (dbi != prev_dbi + 1) {
+ /* dbi is not next to previous dbi, so start new iov */
+ if (prev_dbi >= 0)
+ (*iov)++;
+ /* write offset relative to mb_addr */
+ (*iov)->iov_base = (void __user *)
+ (udev->data_off + dbi * DATA_BLOCK_SIZE);
+ }
+ (*iov)->iov_len += len;
+
+ return dbi;
}
-static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
+static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ struct iovec **iov, int data_length)
{
- size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
- return data_length / DATA_BLOCK_SIZE;
+ /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
+ while (data_length > 0)
+ dbi = new_block_to_iov(udev, cmd, iov, dbi, &data_length);
}
static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
@@ -593,8 +636,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- tcmu_cmd_reset_dbi_cur(tcmu_cmd);
- tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
+ tcmu_cmd_set_block_cnts(tcmu_cmd);
tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
GFP_NOIO);
if (!tcmu_cmd->dbi) {
@@ -644,46 +686,22 @@ static inline size_t head_to_end(size_t head, size_t size)
return size - head;
}
-static inline void new_iov(struct iovec **iov, int *iov_cnt)
-{
- struct iovec *iovec;
-
- if (*iov_cnt != 0)
- (*iov)++;
- (*iov_cnt)++;
-
- iovec = *iov;
- memset(iovec, 0, sizeof(struct iovec));
-}
-
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
-/* offset is relative to mb_addr */
-static inline size_t get_block_offset_user(struct tcmu_dev *dev,
- int dbi, int remaining)
-{
- return dev->data_off + dbi * DATA_BLOCK_SIZE +
- DATA_BLOCK_SIZE - remaining;
-}
-
-static inline size_t iov_tail(struct iovec *iov)
-{
- return (size_t)iov->iov_base + iov->iov_len;
-}
-
-static void scatter_data_area(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
- unsigned int data_nents, struct iovec **iov,
- int *iov_cnt, bool copy_data)
+static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
+ struct iovec **iov)
{
- int i, dbi;
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+ /* start value of dbi + 1 must not be a valid dbi */
+ int i, dbi = -2;
int block_remaining = 0;
+ int data_len = se_cmd->data_length;
void *from, *to = NULL;
- size_t copy_bytes, to_offset, offset;
+ size_t copy_bytes, offset;
struct scatterlist *sg;
- struct page *page;
+ struct page *page = NULL;
- for_each_sg(data_sg, sg, data_nents, i) {
+ for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
int sg_remaining = sg->length;
from = kmap_atomic(sg_page(sg)) + sg->offset;
while (sg_remaining > 0) {
@@ -693,50 +711,19 @@ static void scatter_data_area(struct tcmu_dev *udev,
kunmap_atomic(to);
}
- block_remaining = DATA_BLOCK_SIZE;
- dbi = tcmu_cmd_get_dbi(tcmu_cmd);
+ /* get next dbi and add to IOVs */
+ dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
+ &data_len);
page = tcmu_get_block_page(udev, dbi);
to = kmap_atomic(page);
+ block_remaining = DATA_BLOCK_SIZE;
}
- /*
- * Covert to virtual offset of the ring data area.
- */
- to_offset = get_block_offset_user(udev, dbi,
- block_remaining);
-
- /*
- * The following code will gather and map the blocks
- * to the same iovec when the blocks are all next to
- * each other.
- */
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
- if (*iov_cnt != 0 &&
- to_offset == iov_tail(*iov)) {
- /*
- * Will append to the current iovec, because
- * the current block page is next to the
- * previous one.
- */
- (*iov)->iov_len += copy_bytes;
- } else {
- /*
- * Will allocate a new iovec because we are
- * first time here or the current block page
- * is not next to the previous one.
- */
- new_iov(iov, iov_cnt);
- (*iov)->iov_base = (void __user *)to_offset;
- (*iov)->iov_len = copy_bytes;
- }
-
- if (copy_data) {
- offset = DATA_BLOCK_SIZE - block_remaining;
- memcpy(to + offset,
- from + sg->length - sg_remaining,
- copy_bytes);
- }
+ offset = DATA_BLOCK_SIZE - block_remaining;
+ memcpy(to + offset, from + sg->length - sg_remaining,
+ copy_bytes);
sg_remaining -= copy_bytes;
block_remaining -= copy_bytes;
@@ -767,13 +754,12 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
data_sg = se_cmd->t_data_sg;
data_nents = se_cmd->t_data_nents;
} else {
-
/*
* For bidi case, the first count blocks are for Data-Out
* buffer blocks, and before gathering the Data-In buffer
- * the Data-Out buffer blocks should be discarded.
+ * the Data-Out buffer blocks should be skipped.
*/
- count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+ count = cmd->dbi_cnt - cmd->dbi_bidi_cnt;
data_sg = se_cmd->t_bidi_data_sg;
data_nents = se_cmd->t_bidi_data_nents;
@@ -821,17 +807,13 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
}
/*
- * We can't queue a command until we have space available on the cmd ring *and*
- * space available on the data area.
+ * We can't queue a command until we have space available on the cmd ring.
*
* Called with ring lock held.
*/
-static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
- size_t cmd_size, size_t data_needed)
+static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
{
struct tcmu_mailbox *mb = udev->mb_addr;
- uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
- / DATA_BLOCK_SIZE;
size_t space, cmd_needed;
u32 cmd_head;
@@ -854,29 +836,54 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
udev->cmdr_last_cleaned, udev->cmdr_size);
return false;
}
+ return true;
+}
- if (!data_needed)
- return true;
+/*
+ * We have to allocate data buffers before we can queue a command.
+ * Returns -1 on error (not enough space) or number of needed iovs on success
+ *
+ * Called with ring lock held.
+ */
+static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ int *iov_bidi_cnt)
+{
+ int space, iov_cnt = 0, ret = 0;
+
+ if (!cmd->dbi_cnt)
+ goto wr_iov_cnts;
/* try to check and get the data blocks as needed */
space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
- if ((space * DATA_BLOCK_SIZE) < data_needed) {
+ if (space < cmd->dbi_cnt) {
unsigned long blocks_left =
(udev->max_blocks - udev->dbi_thresh) + space;
- if (blocks_left < blocks_needed) {
- pr_debug("no data space: only %lu available, but ask for %zu\n",
+ if (blocks_left < cmd->dbi_cnt) {
+ pr_debug("no data space: only %lu available, but ask for %lu\n",
blocks_left * DATA_BLOCK_SIZE,
- data_needed);
- return false;
+ cmd->dbi_cnt * DATA_BLOCK_SIZE);
+ return -1;
}
- udev->dbi_thresh += blocks_needed;
+ udev->dbi_thresh += cmd->dbi_cnt;
if (udev->dbi_thresh > udev->max_blocks)
udev->dbi_thresh = udev->max_blocks;
}
- return tcmu_get_empty_blocks(udev, cmd);
+ iov_cnt = tcmu_get_empty_blocks(udev, cmd,
+ cmd->dbi_cnt - cmd->dbi_bidi_cnt);
+ if (iov_cnt < 0)
+ return -1;
+
+ if (cmd->dbi_bidi_cnt) {
+ ret = tcmu_get_empty_blocks(udev, cmd, cmd->dbi_bidi_cnt);
+ if (ret < 0)
+ return -1;
+ }
+wr_iov_cnts:
+ *iov_bidi_cnt = ret;
+ return iov_cnt + ret;
}
static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
@@ -986,11 +993,11 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
struct tcmu_mailbox *mb = udev->mb_addr;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
- int iov_cnt, cmd_id;
+ int iov_cnt, iov_bidi_cnt, cmd_id;
uint32_t cmd_head;
uint64_t cdb_off;
- bool copy_to_data_area;
- size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+ /* size of data buffer needed */
+ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE;
*scsi_err = TCM_NO_SENSE;
@@ -1004,42 +1011,54 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
return -1;
}
+ if (!list_empty(&udev->qfull_queue))
+ goto queue;
+
+ if (data_length > udev->data_size) {
+ pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
+ data_length, udev->data_size);
+ *scsi_err = TCM_INVALID_CDB_FIELD;
+ return -1;
+ }
+
+ iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt);
+ if (iov_cnt < 0)
+ goto free_and_queue;
+
/*
* Must be a certain minimum size for response sense info, but
* also may be larger if the iov array is large.
- *
- * We prepare as many iovs as possbile for potential uses here,
- * because it's expensive to tell how many regions are freed in
- * the bitmap & global data pool, as the size calculated here
- * will only be used to do the checks.
- *
- * The size will be recalculated later as actually needed to save
- * cmd area memories.
*/
- base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
+ base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt);
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
- if (!list_empty(&udev->qfull_queue))
- goto queue;
-
- if ((command_size > (udev->cmdr_size / 2)) ||
- data_length > udev->data_size) {
- pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
- "cmd ring/data area\n", command_size, data_length,
- udev->cmdr_size, udev->data_size);
+ if (command_size > (udev->cmdr_size / 2)) {
+ pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n",
+ command_size, udev->cmdr_size);
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
*scsi_err = TCM_INVALID_CDB_FIELD;
return -1;
}
- if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
+ if (!is_ring_space_avail(udev, command_size))
/*
* Don't leave commands partially setup because the unmap
* thread might need the blocks to make forward progress.
*/
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
- tcmu_cmd_reset_dbi_cur(tcmu_cmd);
- goto queue;
+ goto free_and_queue;
+
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ *scsi_err = TCM_OUT_OF_RESOURCES;
+ return -1;
}
+ tcmu_cmd->cmd_id = cmd_id;
+
+ pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
+ tcmu_cmd, udev->name);
cmd_head = ring_insert_padding(udev, command_size);
@@ -1047,52 +1066,29 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
- /* Handle allocating space from the data area */
+ /* prepare iov list and copy data to data area if necessary */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
iov = &entry->req.iov[0];
- iov_cnt = 0;
- copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
- || se_cmd->se_cmd_flags & SCF_BIDI);
- scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
- se_cmd->t_data_nents, &iov, &iov_cnt,
- copy_to_data_area);
- entry->req.iov_cnt = iov_cnt;
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE ||
+ se_cmd->se_cmd_flags & SCF_BIDI)
+ scatter_data_area(udev, tcmu_cmd, &iov);
+ else
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length);
+
+ entry->req.iov_cnt = iov_cnt - iov_bidi_cnt;
/* Handle BIDI commands */
- iov_cnt = 0;
if (se_cmd->se_cmd_flags & SCF_BIDI) {
iov++;
- scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
- se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
- false);
- }
- entry->req.iov_bidi_cnt = iov_cnt;
-
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
- pr_err("tcmu: Could not allocate cmd id.\n");
-
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
- *scsi_err = TCM_OUT_OF_RESOURCES;
- return -1;
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi);
+ entry->req.iov_bidi_cnt = iov_bidi_cnt;
}
- tcmu_cmd->cmd_id = cmd_id;
-
- pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
- tcmu_cmd, udev->name);
tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
- /*
- * Recalaulate the command's base size and size according
- * to the actual needs
- */
- base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
- entry->req.iov_bidi_cnt);
- command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
-
tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
/* All offsets relative to mb_addr, not start of entry! */
@@ -1111,6 +1107,10 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
return 0;
+free_and_queue:
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
+ tcmu_cmd_reset_dbi_cur(tcmu_cmd);
+
queue:
if (add_to_qfull_queue(tcmu_cmd)) {
*scsi_err = TCM_OUT_OF_RESOURCES;
@@ -1145,7 +1145,7 @@ queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
if (!list_empty(&udev->tmr_queue) ||
- !is_ring_space_avail(udev, NULL, cmd_size, 0)) {
+ !is_ring_space_avail(udev, cmd_size)) {
list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
tmr, udev->name);
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index b373b1b08b6d..cf4718c6d35d 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -216,6 +216,8 @@ static void optee_get_version(struct tee_device *teedev,
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
v.gen_caps |= TEE_GEN_CAP_REG_MEM;
+ if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
+ v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
*vers = v;
}
@@ -262,6 +264,11 @@ static int optee_open(struct tee_context *ctx)
mutex_init(&ctxdata->mutex);
INIT_LIST_HEAD(&ctxdata->sess_list);
+ if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
+ ctx->cap_memref_null = true;
+ else
+ ctx->cap_memref_null = false;
+
ctx->data = ctxdata;
return 0;
}
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index 795bc19ae17a..7b2d919da2ac 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -419,4 +419,25 @@ struct optee_msg_arg {
*/
#define OPTEE_MSG_RPC_CMD_SHM_FREE 7
+/*
+ * Access a device on an i2c bus
+ *
+ * [in] param[0].u.value.a mode: RD(0), WR(1)
+ * [in] param[0].u.value.b i2c adapter
+ * [in] param[0].u.value.c i2c chip
+ *
+ * [in] param[1].u.value.a i2c control flags
+ *
+ * [in/out] memref[2] buffer to exchange the transfer data
+ * with the secure world
+ *
+ * [out] param[3].u.value.a bytes transferred by the driver
+ */
+#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER 21
+/* I2C master transfer modes */
+#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER_RD 0
+#define OPTEE_MSG_RPC_CMD_I2C_TRANSFER_WR 1
+/* I2C master control flags */
+#define OPTEE_MSG_RPC_CMD_I2C_FLAGS_TEN_BIT BIT(0)
+
#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 8b71839a357e..e25b216a14ef 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -17,6 +17,7 @@
/* Some Global Platform error codes used in this driver */
#define TEEC_SUCCESS 0x00000000
#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
#define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index c72122d9c997..777ad54d4c2c 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -215,6 +215,9 @@ struct optee_smc_get_shm_config_result {
*/
#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2)
+/* Secure world supports Shared Memory with a NULL buffer reference */
+#define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4)
+
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index b4ade54d1f28..1e3614e4798f 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include "optee_private.h"
@@ -49,6 +50,97 @@ bad:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
}
+#if IS_REACHABLE(CONFIG_I2C)
+static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct i2c_client client = { 0 };
+ struct tee_param *params;
+ size_t i;
+ int ret = -EOPNOTSUPP;
+ u8 attr[] = {
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT,
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT,
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT,
+ };
+
+ if (arg->num_params != ARRAY_SIZE(attr)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (optee_from_msg_param(params, arg->num_params, arg->params))
+ goto bad;
+
+ for (i = 0; i < arg->num_params; i++) {
+ if (params[i].attr != attr[i])
+ goto bad;
+ }
+
+ client.adapter = i2c_get_adapter(params[0].u.value.b);
+ if (!client.adapter)
+ goto bad;
+
+ if (params[1].u.value.a & OPTEE_MSG_RPC_CMD_I2C_FLAGS_TEN_BIT) {
+ if (!i2c_check_functionality(client.adapter,
+ I2C_FUNC_10BIT_ADDR)) {
+ i2c_put_adapter(client.adapter);
+ goto bad;
+ }
+
+ client.flags = I2C_CLIENT_TEN;
+ }
+
+ client.addr = params[0].u.value.c;
+ snprintf(client.name, I2C_NAME_SIZE, "i2c%d", client.adapter->nr);
+
+ switch (params[0].u.value.a) {
+ case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_RD:
+ ret = i2c_master_recv(&client, params[2].u.memref.shm->kaddr,
+ params[2].u.memref.size);
+ break;
+ case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_WR:
+ ret = i2c_master_send(&client, params[2].u.memref.shm->kaddr,
+ params[2].u.memref.size);
+ break;
+ default:
+ i2c_put_adapter(client.adapter);
+ goto bad;
+ }
+
+ if (ret < 0) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+ params[3].u.value.a = ret;
+ if (optee_to_msg_param(arg->params, arg->num_params, params))
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ else
+ arg->ret = TEEC_SUCCESS;
+ }
+
+ i2c_put_adapter(client.adapter);
+ kfree(params);
+ return;
+bad:
+ kfree(params);
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+#else
+static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ arg->ret = TEEC_ERROR_NOT_SUPPORTED;
+}
+#endif
+
static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
{
struct wq_entry *w;
@@ -382,6 +474,9 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
case OPTEE_MSG_RPC_CMD_SHM_FREE:
handle_rpc_func_cmd_shm_free(ctx, arg);
break;
+ case OPTEE_MSG_RPC_CMD_I2C_TRANSFER:
+ handle_rpc_func_cmd_i2c_transfer(ctx, arg);
+ break;
default:
handle_rpc_supp_cmd(ctx, arg);
}
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 64637e09a095..6ade4a5c4840 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -200,7 +200,8 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
int name_len;
int rc;
- if (connection_method == TEE_IOCTL_LOGIN_PUBLIC) {
+ if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
+ connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
/* Nil UUID to be passed to TEE environment */
uuid_copy(uuid, &uuid_null);
return 0;
@@ -383,25 +384,38 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
/*
- * If we fail to get a pointer to a shared memory
- * object (and increase the ref count) from an
- * identifier we return an error. All pointers that
- * has been added in params have an increased ref
- * count. It's the callers responibility to do
- * tee_shm_put() on all resolved pointers.
+ * If a NULL pointer is passed to a TA in the TEE,
+ * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
+ * indicating a NULL memory reference.
*/
- shm = tee_shm_get_from_id(ctx, ip.c);
- if (IS_ERR(shm))
- return PTR_ERR(shm);
-
- /*
- * Ensure offset + size does not overflow offset
- * and does not overflow the size of the referred
- * shared memory object.
- */
- if ((ip.a + ip.b) < ip.a ||
- (ip.a + ip.b) > shm->size) {
- tee_shm_put(shm);
+ if (ip.c != TEE_MEMREF_NULL) {
+ /*
+ * If we fail to get a pointer to a shared
+ * memory object (and increase the ref count)
+ * from an identifier we return an error. All
+ * pointers that has been added in params have
+ * an increased ref count. It's the callers
+ * responibility to do tee_shm_put() on all
+ * resolved pointers.
+ */
+ shm = tee_shm_get_from_id(ctx, ip.c);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ /*
+ * Ensure offset + size does not overflow
+ * offset and does not overflow the size of
+ * the referred shared memory object.
+ */
+ if ((ip.a + ip.b) < ip.a ||
+ (ip.a + ip.b) > shm->size) {
+ tee_shm_put(shm);
+ return -EINVAL;
+ }
+ } else if (ctx->cap_memref_null) {
+ /* Pass NULL pointer to OP-TEE */
+ shm = NULL;
+ } else {
return -EINVAL;
}
@@ -917,7 +931,6 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
cdev_init(&teedev->cdev, &tee_fops);
teedev->cdev.owner = teedesc->owner;
- teedev->cdev.kobj.parent = &teedev->dev.kobj;
dev_set_drvdata(&teedev->dev, driver_data);
device_initialize(&teedev->dev);
@@ -963,9 +976,7 @@ static struct attribute *tee_dev_attrs[] = {
NULL
};
-static const struct attribute_group tee_dev_group = {
- .attrs = tee_dev_attrs,
-};
+ATTRIBUTE_GROUPS(tee_dev);
/**
* tee_device_register() - Registers a TEE device
@@ -985,39 +996,19 @@ int tee_device_register(struct tee_device *teedev)
return -EINVAL;
}
- rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1);
- if (rc) {
- dev_err(&teedev->dev,
- "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
- teedev->name, MAJOR(teedev->dev.devt),
- MINOR(teedev->dev.devt), rc);
- return rc;
- }
+ teedev->dev.groups = tee_dev_groups;
- rc = device_add(&teedev->dev);
+ rc = cdev_device_add(&teedev->cdev, &teedev->dev);
if (rc) {
dev_err(&teedev->dev,
- "unable to device_add() %s, major %d, minor %d, err=%d\n",
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
teedev->name, MAJOR(teedev->dev.devt),
MINOR(teedev->dev.devt), rc);
- goto err_device_add;
- }
-
- rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group);
- if (rc) {
- dev_err(&teedev->dev,
- "failed to create sysfs attributes, err=%d\n", rc);
- goto err_sysfs_create_group;
+ return rc;
}
teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
return 0;
-
-err_sysfs_create_group:
- device_del(&teedev->dev);
-err_device_add:
- cdev_del(&teedev->cdev);
- return rc;
}
EXPORT_SYMBOL_GPL(tee_device_register);
@@ -1060,11 +1051,8 @@ void tee_device_unregister(struct tee_device *teedev)
if (!teedev)
return;
- if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
- sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group);
- cdev_del(&teedev->cdev);
- device_del(&teedev->dev);
- }
+ if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED)
+ cdev_device_del(&teedev->cdev, &teedev->dev);
tee_device_put(teedev);
wait_for_completion(&teedev->c_no_users);
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 827ac3d0fea9..00472f5ce22e 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -12,6 +12,22 @@
#include <linux/uio.h>
#include "tee_private.h"
+static void release_registered_pages(struct tee_shm *shm)
+{
+ if (shm->pages) {
+ if (shm->flags & TEE_SHM_USER_MAPPED) {
+ unpin_user_pages(shm->pages, shm->num_pages);
+ } else {
+ size_t n;
+
+ for (n = 0; n < shm->num_pages; n++)
+ put_page(shm->pages[n]);
+ }
+
+ kfree(shm->pages);
+ }
+}
+
static void tee_shm_release(struct tee_shm *shm)
{
struct tee_device *teedev = shm->ctx->teedev;
@@ -32,17 +48,13 @@ static void tee_shm_release(struct tee_shm *shm)
poolm->ops->free(poolm, shm);
} else if (shm->flags & TEE_SHM_REGISTER) {
- size_t n;
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
if (rc)
dev_err(teedev->dev.parent,
"unregister shm %p failed: %d", shm, rc);
- for (n = 0; n < shm->num_pages; n++)
- put_page(shm->pages[n]);
-
- kfree(shm->pages);
+ release_registered_pages(shm);
}
teedev_ctx_put(shm->ctx);
@@ -228,7 +240,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
}
if (flags & TEE_SHM_USER_MAPPED) {
- rc = get_user_pages_fast(start, num_pages, FOLL_WRITE,
+ rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
shm->pages);
} else {
struct kvec *kiov;
@@ -292,18 +304,12 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
return shm;
err:
if (shm) {
- size_t n;
-
if (shm->id >= 0) {
mutex_lock(&teedev->mutex);
idr_remove(&teedev->idr, shm->id);
mutex_unlock(&teedev->mutex);
}
- if (shm->pages) {
- for (n = 0; n < shm->num_pages; n++)
- put_page(shm->pages[n]);
- kfree(shm->pages);
- }
+ release_registered_pages(shm);
}
kfree(shm);
teedev_ctx_put(ctx);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index b668224f906d..7edc8dc6bbab 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -346,13 +346,13 @@ config RCAR_THERMAL
thermal framework.
config RCAR_GEN3_THERMAL
- tristate "Renesas R-Car Gen3 thermal driver"
+ tristate "Renesas R-Car Gen3 and RZ/G2 thermal driver"
depends on ARCH_RENESAS || COMPILE_TEST
depends on HAS_IOMEM
depends on OF
help
- Enable this to plug the R-Car Gen3 thermal sensor driver into the Linux
- thermal framework.
+ Enable this to plug the R-Car Gen3 or RZ/G2 thermal sensor driver into
+ the Linux thermal framework.
config KIRKWOOD_THERMAL
tristate "Temperature sensor on Marvell Kirkwood SoCs"
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 6cf23a54e853..cc2959f22f01 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -182,7 +182,6 @@ static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
/**
* cpufreq_get_requested_power() - get the current power
* @cdev: &thermal_cooling_device pointer
- * @tz: a valid thermal zone device pointer
* @power: pointer in which to store the resulting power
*
* Calculate the current power consumption of the cpus in milliwatts
@@ -203,7 +202,6 @@ static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
* Return: 0 on success, -E* if getting the static power failed.
*/
static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz,
u32 *power)
{
unsigned long freq;
@@ -253,7 +251,6 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
/**
* cpufreq_state2power() - convert a cpu cdev state to power consumed
* @cdev: &thermal_cooling_device pointer
- * @tz: a valid thermal zone device pointer
* @state: cooling device state to be converted
* @power: pointer in which to store the resulting power
*
@@ -266,7 +263,6 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
* when calculating the static power.
*/
static int cpufreq_state2power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz,
unsigned long state, u32 *power)
{
unsigned int freq, num_cpus, idx;
@@ -288,7 +284,6 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
/**
* cpufreq_power2state() - convert power to a cooling device state
* @cdev: &thermal_cooling_device pointer
- * @tz: a valid thermal zone device pointer
* @power: power in milliwatts to be converted
* @state: pointer in which to store the resulting state
*
@@ -306,8 +301,7 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
* device.
*/
static int cpufreq_power2state(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz, u32 power,
- unsigned long *state)
+ u32 power, unsigned long *state)
{
unsigned int target_freq;
u32 last_load, normalised_power;
diff --git a/drivers/thermal/cpuidle_cooling.c b/drivers/thermal/cpuidle_cooling.c
index 78e3e8238116..7ecab4b16b29 100644
--- a/drivers/thermal/cpuidle_cooling.c
+++ b/drivers/thermal/cpuidle_cooling.c
@@ -30,7 +30,7 @@ static DEFINE_IDA(cpuidle_ida);
/**
* cpuidle_cooling_runtime - Running time computation
- * @idle_duration_us: the idle cooling device
+ * @idle_duration_us: CPU idle time to inject in microseconds
* @state: a percentile based number
*
* The running duration is computed from the idle injection duration
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index a12d29096229..dfab49a67252 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -229,7 +229,6 @@ static inline unsigned long get_total_power(struct devfreq_cooling_device *dfc,
static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz,
u32 *power)
{
struct devfreq_cooling_device *dfc = cdev->devdata;
@@ -289,7 +288,6 @@ fail:
}
static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz,
unsigned long state,
u32 *power)
{
@@ -308,7 +306,6 @@ static int devfreq_cooling_state2power(struct thermal_cooling_device *cdev,
}
static int devfreq_cooling_power2state(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz,
u32 power, unsigned long *state)
{
struct devfreq_cooling_device *dfc = cdev->devdata;
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 5cb518d8f156..ab0be26f0816 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -96,7 +96,7 @@ static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
if (instance->trip != params->trip_max_desired_temperature)
continue;
- if (power_actor_get_min_power(cdev, tz, &min_power))
+ if (power_actor_get_min_power(cdev, &min_power))
continue;
sustainable_power += min_power;
@@ -388,7 +388,7 @@ static int allocate_power(struct thermal_zone_device *tz,
if (!cdev_is_power_actor(cdev))
continue;
- if (cdev->ops->get_requested_power(cdev, tz, &req_power[i]))
+ if (cdev->ops->get_requested_power(cdev, &req_power[i]))
continue;
if (!total_weight)
@@ -398,7 +398,7 @@ static int allocate_power(struct thermal_zone_device *tz,
weighted_req_power[i] = frac_to_int(weight * req_power[i]);
- if (power_actor_get_max_power(cdev, tz, &max_power[i]))
+ if (power_actor_get_max_power(cdev, &max_power[i]))
continue;
total_req_power += req_power[i];
diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
index f5124f14cf81..a1e4f9bb4cb0 100644
--- a/drivers/thermal/imx8mm_thermal.c
+++ b/drivers/thermal/imx8mm_thermal.c
@@ -146,13 +146,9 @@ static int imx8mm_tmu_probe(struct platform_device *pdev)
return PTR_ERR(tmu->base);
tmu->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tmu->clk)) {
- ret = PTR_ERR(tmu->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get tmu clock: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(tmu->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tmu->clk),
+ "failed to get tmu clock\n");
ret = clk_prepare_enable(tmu->clk);
if (ret) {
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 3f74ab4c1ab9..2c7473d86a59 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -716,14 +716,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
if (of_find_property(pdev->dev.of_node, "nvmem-cells", NULL)) {
ret = imx_init_from_nvmem_cells(pdev);
- if (ret) {
- if (ret == -EPROBE_DEFER)
- return ret;
-
- dev_err(&pdev->dev, "failed to init from nvmem: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to init from nvmem\n");
} else {
ret = imx_init_from_tempmon_data(pdev);
if (ret) {
@@ -746,14 +741,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
data->socdata->power_down_mask);
ret = imx_thermal_register_legacy_cooling(data);
- if (ret) {
- if (ret == -EPROBE_DEFER)
- return ret;
-
- dev_err(&pdev->dev,
- "failed to register cpufreq cooling device: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register cpufreq cooling device\n");
data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(data->thermal_clk)) {
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 4f5859d4c780..0966551cbaaa 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -14,6 +14,7 @@
#define INT3400_THERMAL_TABLE_CHANGED 0x83
#define INT3400_ODVP_CHANGED 0x88
+#define INT3400_KEEP_ALIVE 0xA0
enum int3400_thermal_uuid {
INT3400_THERMAL_PASSIVE_1,
@@ -83,8 +84,33 @@ static struct bin_attribute *data_attributes[] = {
NULL,
};
+static ssize_t imok_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
+ acpi_status status;
+ int input, ret;
+
+ ret = kstrtouint(buf, 10, &input);
+ if (ret)
+ return ret;
+ status = acpi_execute_simple_method(priv->adev->handle, "IMOK", input);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(imok);
+
+static struct attribute *imok_attr[] = {
+ &dev_attr_imok.attr,
+ NULL
+};
+
static const struct attribute_group data_attribute_group = {
.bin_attrs = data_attributes,
+ .attrs = imok_attr,
};
static ssize_t available_uuids_show(struct device *dev,
@@ -349,30 +375,33 @@ static void int3400_notify(acpi_handle handle,
{
struct int3400_thermal_priv *priv = data;
char *thermal_prop[5];
+ int therm_event;
if (!priv)
return;
switch (event) {
case INT3400_THERMAL_TABLE_CHANGED:
- thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s",
- priv->thermal->type);
- thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d",
- priv->thermal->temperature);
- thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP=");
- thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d",
- THERMAL_TABLE_CHANGED);
- thermal_prop[4] = NULL;
- kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE,
- thermal_prop);
+ therm_event = THERMAL_TABLE_CHANGED;
+ break;
+ case INT3400_KEEP_ALIVE:
+ therm_event = THERMAL_EVENT_KEEP_ALIVE;
break;
case INT3400_ODVP_CHANGED:
evaluate_odvp(priv);
+ therm_event = THERMAL_DEVICE_POWER_CAPABILITY_CHANGED;
break;
default:
/* Ignore unknown notification codes sent to INT3400 device */
- break;
+ return;
}
+
+ thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", priv->thermal->type);
+ thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", priv->thermal->temperature);
+ thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP=");
+ thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event);
+ thermal_prop[4] = NULL;
+ kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop);
}
static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 787710bb88fe..5c2a13bf249c 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -546,11 +546,11 @@ static int rcar_thermal_probe(struct platform_device *pdev)
if (ret < 0)
goto error_unregister;
- if (chip->use_of_thermal)
+ if (chip->use_of_thermal) {
priv->zone = devm_thermal_zone_of_sensor_register(
dev, i, priv,
&rcar_thermal_zone_of_ops);
- else {
+ } else {
priv->zone = thermal_zone_device_register(
"rcar_thermal",
1, 0, priv,
diff --git a/drivers/thermal/st/Kconfig b/drivers/thermal/st/Kconfig
index 3c3b695cc3e9..58ece381956b 100644
--- a/drivers/thermal/st/Kconfig
+++ b/drivers/thermal/st/Kconfig
@@ -23,5 +23,5 @@ config STM32_THERMAL
help
Support for thermal framework on STMicroelectronics STM32 series of
SoCs. This thermal driver allows to access to general thermal framework
- functionalities and to acces to SoC sensor functionalities. This
+ functionalities and to access to SoC sensor functionalities. This
configuration is fully dependent of MACH_STM32MP157.
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index 331e2b768df5..5fd3fb8912a6 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -446,14 +446,9 @@ thermal_unprepare:
#ifdef CONFIG_PM_SLEEP
static int stm_thermal_suspend(struct device *dev)
{
- int ret;
struct stm_thermal_sensor *sensor = dev_get_drvdata(dev);
- ret = stm_thermal_sensor_off(sensor);
- if (ret)
- return ret;
-
- return 0;
+ return stm_thermal_sensor_off(sensor);
}
static int stm_thermal_resume(struct device *dev)
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index 74d73be16496..f8b13071a6f4 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -244,7 +244,7 @@ static int sun50i_h6_ths_calibrate(struct ths_device *tmdev,
ft_temp = (caldata[0] & FT_TEMP_MASK) * 100;
for (i = 0; i < tmdev->chip->sensor_num; i++) {
- int sensor_reg = caldata[i + 1];
+ int sensor_reg = caldata[i + 1] & TEMP_CALIB_MASK;
int cdata, offset;
int sensor_temp = tmdev->chip->calc_temp(tmdev, i, sensor_reg);
@@ -590,6 +590,19 @@ static const struct ths_thermal_chip sun50i_a64_ths = {
.calc_temp = sun8i_ths_calc_temp,
};
+static const struct ths_thermal_chip sun50i_a100_ths = {
+ .sensor_num = 3,
+ .has_bus_clk_reset = true,
+ .ft_deviation = 8000,
+ .offset = 187744,
+ .scale = 672,
+ .temp_data_base = SUN50I_H6_THS_TEMP_DATA,
+ .calibrate = sun50i_h6_ths_calibrate,
+ .init = sun50i_h6_thermal_init,
+ .irq_ack = sun50i_h6_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
static const struct ths_thermal_chip sun50i_h5_ths = {
.sensor_num = 2,
.has_mod_clk = true,
@@ -619,6 +632,7 @@ static const struct of_device_id of_ths_match[] = {
{ .compatible = "allwinner,sun8i-h3-ths", .data = &sun8i_h3_ths },
{ .compatible = "allwinner,sun8i-r40-ths", .data = &sun8i_r40_ths },
{ .compatible = "allwinner,sun50i-a64-ths", .data = &sun50i_a64_ths },
+ { .compatible = "allwinner,sun50i-a100-ths", .data = &sun50i_a100_ths },
{ .compatible = "allwinner,sun50i-h5-ths", .data = &sun50i_h5_ths },
{ .compatible = "allwinner,sun50i-h6-ths", .data = &sun50i_h6_ths },
{ /* sentinel */ },
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index a6616e530a84..c6d74bc1c90b 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -603,7 +603,6 @@ static void thermal_zone_device_check(struct work_struct *work)
/**
* power_actor_get_max_power() - get the maximum power that a cdev can consume
* @cdev: pointer to &thermal_cooling_device
- * @tz: a valid thermal zone device pointer
* @max_power: pointer in which to store the maximum power
*
* Calculate the maximum power consumption in milliwats that the
@@ -613,18 +612,17 @@ static void thermal_zone_device_check(struct work_struct *work)
* power_actor API or -E* on other error.
*/
int power_actor_get_max_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz, u32 *max_power)
+ u32 *max_power)
{
if (!cdev_is_power_actor(cdev))
return -EINVAL;
- return cdev->ops->state2power(cdev, tz, 0, max_power);
+ return cdev->ops->state2power(cdev, 0, max_power);
}
/**
* power_actor_get_min_power() - get the mainimum power that a cdev can consume
* @cdev: pointer to &thermal_cooling_device
- * @tz: a valid thermal zone device pointer
* @min_power: pointer in which to store the minimum power
*
* Calculate the minimum power consumption in milliwatts that the
@@ -634,7 +632,7 @@ int power_actor_get_max_power(struct thermal_cooling_device *cdev,
* power_actor API or -E* on other error.
*/
int power_actor_get_min_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz, u32 *min_power)
+ u32 *min_power)
{
unsigned long max_state;
int ret;
@@ -646,7 +644,7 @@ int power_actor_get_min_power(struct thermal_cooling_device *cdev,
if (ret)
return ret;
- return cdev->ops->state2power(cdev, tz, max_state, min_power);
+ return cdev->ops->state2power(cdev, max_state, min_power);
}
/**
@@ -670,7 +668,7 @@ int power_actor_set_power(struct thermal_cooling_device *cdev,
if (!cdev_is_power_actor(cdev))
return -EINVAL;
- ret = cdev->ops->power2state(cdev, instance->tz, power, &state);
+ ret = cdev->ops->power2state(cdev, power, &state);
if (ret)
return ret;
@@ -1652,7 +1650,6 @@ static int __init thermal_init(void)
if (result)
goto error;
- mutex_init(&poweroff_lock);
result = thermal_register_governors();
if (result)
goto error;
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index e00fc5585ea8..681209db42a8 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -34,7 +34,7 @@ extern struct thermal_governor *__governor_thermal_table_end[];
#define THERMAL_TABLE_ENTRY(table, name) \
static typeof(name) *__thermal_table_entry_##name \
- __used __section(__##table##_thermal_table) = &name
+ __used __section("__" #table "_thermal_table") = &name
#define THERMAL_GOVERNOR_DECLARE(name) THERMAL_TABLE_ENTRY(governor, name)
@@ -66,9 +66,9 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
}
int power_actor_get_max_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz, u32 *max_power);
+ u32 *max_power);
int power_actor_get_min_power(struct thermal_cooling_device *cdev,
- struct thermal_zone_device *tz, u32 *min_power);
+ u32 *min_power);
int power_actor_set_power(struct thermal_cooling_device *cdev,
struct thermal_instance *ti, u32 power);
/**
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index af7b2383e8f6..1234dbe95895 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -78,7 +78,7 @@ int thermal_genl_sampling_temp(int id, int temp)
hdr = genlmsg_put(skb, 0, 0, &thermal_gnl_family, 0,
THERMAL_GENL_SAMPLING_TEMP);
if (!hdr)
- return -EMSGSIZE;
+ goto out_free;
if (nla_put_u32(skb, THERMAL_GENL_ATTR_TZ_ID, id))
goto out_cancel;
@@ -93,6 +93,7 @@ int thermal_genl_sampling_temp(int id, int temp)
return 0;
out_cancel:
genlmsg_cancel(skb, hdr);
+out_free:
nlmsg_free(skb);
return -EMSGSIZE;
@@ -545,7 +546,7 @@ static int thermal_genl_cmd_dumpit(struct sk_buff *skb,
{
struct param p = { .msg = skb };
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- int cmd = info->ops->cmd;
+ int cmd = info->op.cmd;
int ret;
void *hdr;
@@ -601,7 +602,7 @@ out_free_msg:
return ret;
}
-static const struct genl_ops thermal_genl_ops[] = {
+static const struct genl_small_ops thermal_genl_ops[] = {
{
.cmd = THERMAL_GENL_CMD_TZ_GET_ID,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -635,8 +636,8 @@ static struct genl_family thermal_gnl_family __ro_after_init = {
.version = THERMAL_GENL_VERSION,
.maxattr = THERMAL_GENL_ATTR_MAX,
.policy = thermal_genl_policy,
- .ops = thermal_genl_ops,
- .n_ops = ARRAY_SIZE(thermal_genl_ops),
+ .small_ops = thermal_genl_ops,
+ .n_small_ops = ARRAY_SIZE(thermal_genl_ops),
.mcgrps = thermal_genl_mcgrps,
.n_mcgrps = ARRAY_SIZE(thermal_genl_mcgrps),
};
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 8c231219e15d..a6f371fc9af2 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -448,7 +448,7 @@ static umode_t thermal_zone_passive_is_visible(struct kobject *kobj,
struct attribute *attr,
int attrno)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct thermal_zone_device *tz;
enum thermal_trip_type trip_type;
int count, passive = 0;
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index ab19ceff6e2a..5e596168ba73 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -25,10 +25,20 @@
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/io.h>
+#include <linux/cpu_pm.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "ti-bandgap.h"
static int ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id);
+#ifdef CONFIG_PM_SLEEP
+static int bandgap_omap_cpu_notifier(struct notifier_block *nb,
+ unsigned long cmd, void *v);
+#endif
/*** Helper functions to access registers and their bitfields ***/
@@ -1008,6 +1018,11 @@ int ti_bandgap_probe(struct platform_device *pdev)
}
}
+#ifdef CONFIG_PM_SLEEP
+ bgp->nb.notifier_call = bandgap_omap_cpu_notifier;
+ cpu_pm_register_notifier(&bgp->nb);
+#endif
+
return 0;
remove_last_cooling:
@@ -1041,7 +1056,9 @@ int ti_bandgap_remove(struct platform_device *pdev)
struct ti_bandgap *bgp = platform_get_drvdata(pdev);
int i;
- /* First thing is to remove sensor interfaces */
+ cpu_pm_unregister_notifier(&bgp->nb);
+
+ /* Remove sensor interfaces */
for (i = 0; i < bgp->conf->sensor_count; i++) {
if (bgp->conf->sensors[i].unregister_cooling)
bgp->conf->sensors[i].unregister_cooling(bgp, i);
@@ -1150,9 +1167,43 @@ static int ti_bandgap_suspend(struct device *dev)
if (TI_BANDGAP_HAS(bgp, CLK_CTRL))
clk_disable_unprepare(bgp->fclock);
+ bgp->is_suspended = true;
+
return err;
}
+static int bandgap_omap_cpu_notifier(struct notifier_block *nb,
+ unsigned long cmd, void *v)
+{
+ struct ti_bandgap *bgp;
+
+ bgp = container_of(nb, struct ti_bandgap, nb);
+
+ spin_lock(&bgp->lock);
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ if (bgp->is_suspended)
+ break;
+ ti_bandgap_save_ctxt(bgp);
+ ti_bandgap_power(bgp, false);
+ if (TI_BANDGAP_HAS(bgp, CLK_CTRL))
+ clk_disable(bgp->fclock);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ if (bgp->is_suspended)
+ break;
+ if (TI_BANDGAP_HAS(bgp, CLK_CTRL))
+ clk_enable(bgp->fclock);
+ ti_bandgap_power(bgp, true);
+ ti_bandgap_restore_ctxt(bgp);
+ break;
+ }
+ spin_unlock(&bgp->lock);
+
+ return NOTIFY_OK;
+}
+
static int ti_bandgap_resume(struct device *dev)
{
struct ti_bandgap *bgp = dev_get_drvdata(dev);
@@ -1161,6 +1212,7 @@ static int ti_bandgap_resume(struct device *dev)
clk_prepare_enable(bgp->fclock);
ti_bandgap_power(bgp, true);
+ bgp->is_suspended = false;
return ti_bandgap_restore_ctxt(bgp);
}
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.h b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
index fce4657e9486..ed0ea4b17b25 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
@@ -12,6 +12,10 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/err.h>
+#include <linux/cpu_pm.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm.h>
struct gpio_desc;
@@ -203,6 +207,8 @@ struct ti_bandgap {
int irq;
struct gpio_desc *tshut_gpiod;
u32 clk_rate;
+ struct notifier_block nb;
+ unsigned int is_suspended:1;
};
/**
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index 354e61c0f2e5..7fc058f81d00 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -16,7 +16,19 @@ menuconfig USB4
To compile this driver a module, choose M here. The module will be
called thunderbolt.
+if USB4
+
+config USB4_DEBUGFS_WRITE
+ bool "Enable write by debugfs to configuration spaces (DANGEROUS)"
+ help
+ Enables writing to device configuration registers through
+ debugfs interface.
+
+ Only enable this if you know what you are doing! Never enable
+ this for production systems or distro kernels.
+
config USB4_KUNIT_TEST
bool "KUnit tests"
depends on KUNIT=y
- depends on USB4=y
+
+endif # USB4
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 4ab5bfad7bfd..571537371072 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -4,4 +4,6 @@ thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o ee
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
thunderbolt-objs += nvm.o retimer.o quirks.o
-obj-${CONFIG_USB4_KUNIT_TEST} += test.o
+thunderbolt-${CONFIG_ACPI} += acpi.o
+thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
+thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
new file mode 100644
index 000000000000..a5f988a9f948
--- /dev/null
+++ b/drivers/thunderbolt/acpi.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI support
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+
+#include "tb.h"
+
+static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
+ void **return_value)
+{
+ struct fwnode_reference_args args;
+ struct fwnode_handle *fwnode;
+ struct tb_nhi *nhi = data;
+ struct acpi_device *adev;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ fwnode = acpi_fwnode_handle(adev);
+ ret = fwnode_property_get_reference_args(fwnode, "usb4-host-interface",
+ NULL, 0, 0, &args);
+ if (ret)
+ return AE_OK;
+
+ /* It needs to reference this NHI */
+ if (nhi->pdev->dev.fwnode != args.fwnode)
+ goto out_put;
+
+ /*
+ * Try to find physical device walking upwards to the hierarcy.
+ * We need to do this because the xHCI driver might not yet be
+ * bound so the USB3 SuperSpeed ports are not yet created.
+ */
+ dev = acpi_get_first_physical_node(adev);
+ while (!dev) {
+ adev = adev->parent;
+ if (!adev)
+ break;
+ dev = acpi_get_first_physical_node(adev);
+ }
+
+ if (!dev)
+ goto out_put;
+
+ /*
+ * Check that the device is PCIe. This is because USB3
+ * SuperSpeed ports have this property and they are not power
+ * managed with the xHCI and the SuperSpeed hub so we create the
+ * link from xHCI instead.
+ */
+ while (!dev_is_pci(dev))
+ dev = dev->parent;
+
+ if (!dev)
+ goto out_put;
+
+ /*
+ * Check that this actually matches the type of device we
+ * expect. It should either be xHCI or PCIe root/downstream
+ * port.
+ */
+ pdev = to_pci_dev(dev);
+ if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI ||
+ (pci_is_pcie(pdev) &&
+ (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
+ const struct device_link *link;
+
+ link = device_link_add(&pdev->dev, &nhi->pdev->dev,
+ DL_FLAG_AUTOREMOVE_SUPPLIER |
+ DL_FLAG_PM_RUNTIME);
+ if (link) {
+ dev_dbg(&nhi->pdev->dev, "created link from %s\n",
+ dev_name(&pdev->dev));
+ } else {
+ dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
+ dev_name(&pdev->dev));
+ }
+ }
+
+out_put:
+ fwnode_handle_put(args.fwnode);
+ return AE_OK;
+}
+
+/**
+ * tb_acpi_add_links() - Add device links based on ACPI description
+ * @nhi: Pointer to NHI
+ *
+ * Goes over ACPI namespace finding tunneled ports that reference to
+ * @nhi ACPI node. For each reference a device link is added. The link
+ * is automatically removed by the driver core.
+ */
+void tb_acpi_add_links(struct tb_nhi *nhi)
+{
+ acpi_status status;
+
+ if (!has_acpi_companion(&nhi->pdev->dev))
+ return;
+
+ /*
+ * Find all devices that have usb4-host-controller interface
+ * property that references to this NHI.
+ */
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 32,
+ tb_acpi_add_link, NULL, nhi, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n");
+}
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 19db6cdc5b70..6f571e912cf2 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -15,14 +15,6 @@
#define VSE_CAP_OFFSET_MAX 0xffff
#define TMU_ACCESS_EN BIT(20)
-struct tb_cap_any {
- union {
- struct tb_cap_basic basic;
- struct tb_cap_extended_short extended_short;
- struct tb_cap_extended_long extended_long;
- };
-} __packed;
-
static int tb_port_enable_tmu(struct tb_port *port, bool enable)
{
struct tb_switch *sw = port->sw;
@@ -67,23 +59,50 @@ static void tb_port_dummy_read(struct tb_port *port)
}
}
+/**
+ * tb_port_next_cap() - Return next capability in the linked list
+ * @port: Port to find the capability for
+ * @offset: Previous capability offset (%0 for start)
+ *
+ * Returns dword offset of the next capability in port config space
+ * capability list and returns it. Passing %0 returns the first entry in
+ * the capability list. If no next capability is found returns %0. In case
+ * of failure returns negative errno.
+ */
+int tb_port_next_cap(struct tb_port *port, unsigned int offset)
+{
+ struct tb_cap_any header;
+ int ret;
+
+ if (!offset)
+ return port->config.first_cap_offset;
+
+ ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
+ if (ret)
+ return ret;
+
+ return header.basic.next;
+}
+
static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
{
- u32 offset = 1;
+ int offset = 0;
do {
struct tb_cap_any header;
int ret;
+ offset = tb_port_next_cap(port, offset);
+ if (offset < 0)
+ return offset;
+
ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
if (ret)
return ret;
if (header.basic.cap == cap)
return offset;
-
- offset = header.basic.next;
- } while (offset);
+ } while (offset > 0);
return -ENOENT;
}
@@ -114,6 +133,50 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
}
/**
+ * tb_switch_next_cap() - Return next capability in the linked list
+ * @sw: Switch to find the capability for
+ * @offset: Previous capability offset (%0 for start)
+ *
+ * Finds dword offset of the next capability in router config space
+ * capability list and returns it. Passing %0 returns the first entry in
+ * the capability list. If no next capability is found returns %0. In case
+ * of failure returns negative errno.
+ */
+int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
+{
+ struct tb_cap_any header;
+ int ret;
+
+ if (!offset)
+ return sw->config.first_cap_offset;
+
+ ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
+ if (ret)
+ return ret;
+
+ switch (header.basic.cap) {
+ case TB_SWITCH_CAP_TMU:
+ ret = header.basic.next;
+ break;
+
+ case TB_SWITCH_CAP_VSE:
+ if (!header.extended_short.length)
+ ret = header.extended_long.next;
+ else
+ ret = header.extended_short.next;
+ break;
+
+ default:
+ tb_sw_dbg(sw, "unknown capability %#x at %#x\n",
+ header.basic.cap, offset);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret >= VSE_CAP_OFFSET_MAX ? 0 : ret;
+}
+
+/**
* tb_switch_find_cap() - Find switch capability
* @sw Switch to find the capability for
* @cap: Capability to look
@@ -124,21 +187,23 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
*/
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
{
- int offset = sw->config.first_cap_offset;
+ int offset = 0;
- while (offset > 0 && offset < CAP_OFFSET_MAX) {
+ do {
struct tb_cap_any header;
int ret;
+ offset = tb_switch_next_cap(sw, offset);
+ if (offset < 0)
+ return offset;
+
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
if (header.basic.cap == cap)
return offset;
-
- offset = header.basic.next;
- }
+ } while (offset);
return -ENOENT;
}
@@ -155,37 +220,24 @@ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
*/
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec)
{
- struct tb_cap_any header;
- int offset;
+ int offset = 0;
- offset = tb_switch_find_cap(sw, TB_SWITCH_CAP_VSE);
- if (offset < 0)
- return offset;
-
- while (offset > 0 && offset < VSE_CAP_OFFSET_MAX) {
+ do {
+ struct tb_cap_any header;
int ret;
- ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
+ offset = tb_switch_next_cap(sw, offset);
+ if (offset < 0)
+ return offset;
+
+ ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
- /*
- * Extended vendor specific capabilities come in two
- * flavors: short and long. The latter is used when
- * offset is over 0xff.
- */
- if (offset >= CAP_OFFSET_MAX) {
- if (header.extended_long.vsec_id == vsec)
- return offset;
- offset = header.extended_long.next;
- } else {
- if (header.extended_short.vsec_id == vsec)
- return offset;
- if (!header.extended_short.length)
- return -ENOENT;
- offset = header.extended_short.next;
- }
- }
+ if (header.extended_short.cap == TB_SWITCH_CAP_VSE &&
+ header.extended_short.vsec_id == vsec)
+ return offset;
+ } while (offset);
return -ENOENT;
}
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 394a23ce6ca4..9894b8f63064 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -219,6 +219,7 @@ static int check_config_address(struct tb_cfg_address addr,
static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
{
struct cfg_error_pkg *pkg = response->buffer;
+ struct tb_ctl *ctl = response->ctl;
struct tb_cfg_result res = { 0 };
res.response_route = tb_cfg_get_route(&pkg->header);
res.response_port = 0;
@@ -227,9 +228,13 @@ static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
if (res.err)
return res;
- WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1);
- WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1);
- WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1);
+ if (pkg->zero1)
+ tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
+ if (pkg->zero2)
+ tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
+ if (pkg->zero3)
+ tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
+
res.err = 1;
res.tb_error = pkg->error;
res.response_port = pkg->port;
@@ -266,9 +271,8 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
* Invalid cfg_space/offset/length combination in
* cfg_read/cfg_write.
*/
- tb_ctl_WARN(ctl,
- "CFG_ERROR(%llx:%x): Invalid config space or offset\n",
- res->response_route, res->response_port);
+ tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
+ res->response_route, res->response_port);
return;
case TB_CFG_ERROR_NO_SUCH_PORT:
/*
@@ -283,6 +287,10 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
res->response_route, res->response_port);
return;
+ case TB_CFG_ERROR_LOCK:
+ tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
+ res->response_route, res->response_port);
+ return;
default:
/* 5,6,7,9 and 11 are also valid error codes */
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
@@ -951,6 +959,9 @@ static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
return -ENODEV;
tb_cfg_print_error(ctl, res);
+
+ if (res->tb_error == TB_CFG_ERROR_LOCK)
+ return -EACCES;
return -EIO;
}
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
new file mode 100644
index 000000000000..ed65d2b13964
--- /dev/null
+++ b/drivers/thunderbolt/debugfs.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Debugfs interface
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Authors: Gil Fine <gil.fine@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+
+#include "tb.h"
+
+#define PORT_CAP_PCIE_LEN 1
+#define PORT_CAP_POWER_LEN 2
+#define PORT_CAP_LANE_LEN 3
+#define PORT_CAP_USB3_LEN 5
+#define PORT_CAP_DP_LEN 8
+#define PORT_CAP_TMU_LEN 8
+#define PORT_CAP_BASIC_LEN 9
+#define PORT_CAP_USB4_LEN 20
+
+#define SWITCH_CAP_TMU_LEN 26
+#define SWITCH_CAP_BASIC_LEN 27
+
+#define PATH_LEN 2
+
+#define COUNTER_SET_LEN 3
+
+#define DEBUGFS_ATTR(__space, __write) \
+static int __space ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __space ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __space ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __space ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .write = __write, \
+ .llseek = seq_lseek, \
+}
+
+#define DEBUGFS_ATTR_RO(__space) \
+ DEBUGFS_ATTR(__space, NULL)
+
+#define DEBUGFS_ATTR_RW(__space) \
+ DEBUGFS_ATTR(__space, __space ## _write)
+
+static struct dentry *tb_debugfs_root;
+
+static void *validate_and_copy_from_user(const void __user *user_buf,
+ size_t *count)
+{
+ size_t nbytes;
+ void *buf;
+
+ if (!*count)
+ return ERR_PTR(-EINVAL);
+
+ if (!access_ok(user_buf, *count))
+ return ERR_PTR(-EFAULT);
+
+ buf = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ nbytes = min_t(size_t, *count, PAGE_SIZE);
+ if (copy_from_user(buf, user_buf, nbytes)) {
+ free_page((unsigned long)buf);
+ return ERR_PTR(-EFAULT);
+ }
+
+ *count = nbytes;
+ return buf;
+}
+
+static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
+ int long_fmt_len)
+{
+ char *token;
+ u32 v[5];
+ int ret;
+
+ token = strsep(line, "\n");
+ if (!token)
+ return false;
+
+ /*
+ * For Adapter/Router configuration space:
+ * Short format is: offset value\n
+ * v[0] v[1]
+ * Long format as produced from the read side:
+ * offset relative_offset cap_id vs_cap_id value\n
+ * v[0] v[1] v[2] v[3] v[4]
+ *
+ * For Counter configuration space:
+ * Short format is: offset\n
+ * v[0]
+ * Long format as produced from the read side:
+ * offset relative_offset counter_id value\n
+ * v[0] v[1] v[2] v[3]
+ */
+ ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]);
+ /* In case of Counters, clear counter, "val" content is NA */
+ if (ret == short_fmt_len) {
+ *offs = v[0];
+ *val = v[short_fmt_len - 1];
+ return true;
+ } else if (ret == long_fmt_len) {
+ *offs = v[0];
+ *val = v[long_fmt_len - 1];
+ return true;
+ }
+
+ return false;
+}
+
+#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
+static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct tb *tb = sw->tb;
+ char *line, *buf;
+ u32 val, offset;
+ int ret = 0;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ /* User did hardware changes behind the driver's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ line = buf;
+ while (parse_line(&line, &offset, &val, 2, 5)) {
+ if (port)
+ ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
+ else
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&tb->lock);
+
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+ free_page((unsigned long)buf);
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+
+ return regs_write(port->sw, port, user_buf, count, ppos);
+}
+
+static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_switch *sw = s->private;
+
+ return regs_write(sw, NULL, user_buf, count, ppos);
+}
+#define DEBUGFS_MODE 0600
+#else
+#define port_regs_write NULL
+#define switch_regs_write NULL
+#define DEBUGFS_MODE 0400
+#endif
+
+static int port_clear_all_counters(struct tb_port *port)
+{
+ u32 *buf;
+ int ret;
+
+ buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32),
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0,
+ COUNTER_SET_LEN * port->config.max_counters);
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t counters_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = port->sw->tb;
+ char *buf;
+ int ret;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ /* If written delimiter only, clear all counters in one shot */
+ if (buf[0] == '\n') {
+ ret = port_clear_all_counters(port);
+ } else {
+ char *line = buf;
+ u32 val, offset;
+
+ ret = -EINVAL;
+ while (parse_line(&line, &offset, &val, 1, 4)) {
+ ret = tb_port_write(port, &val, TB_CFG_COUNTERS,
+ offset, 1);
+ if (ret)
+ break;
+ }
+ }
+
+ mutex_unlock(&tb->lock);
+
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+ free_page((unsigned long)buf);
+
+ return ret < 0 ? ret : count;
+}
+
+static void cap_show(struct seq_file *s, struct tb_switch *sw,
+ struct tb_port *port, unsigned int cap, u8 cap_id,
+ u8 vsec_id, int length)
+{
+ int ret, offset = 0;
+
+ while (length > 0) {
+ int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH);
+ u32 data[TB_MAX_CONFIG_RW_LENGTH];
+
+ if (port)
+ ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset,
+ dwords);
+ else
+ ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords);
+ if (ret) {
+ seq_printf(s, "0x%04x <not accessible>\n",
+ cap + offset);
+ if (dwords > 1)
+ seq_printf(s, "0x%04x ...\n", cap + offset + 1);
+ return;
+ }
+
+ for (i = 0; i < dwords; i++) {
+ seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n",
+ cap + offset + i, offset + i,
+ cap_id, vsec_id, data[i]);
+ }
+
+ length -= dwords;
+ offset += dwords;
+ }
+}
+
+static void port_cap_show(struct tb_port *port, struct seq_file *s,
+ unsigned int cap)
+{
+ struct tb_cap_any header;
+ u8 vsec_id = 0;
+ size_t length;
+ int ret;
+
+ ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1);
+ if (ret) {
+ seq_printf(s, "0x%04x <capability read failed>\n", cap);
+ return;
+ }
+
+ switch (header.basic.cap) {
+ case TB_PORT_CAP_PHY:
+ length = PORT_CAP_LANE_LEN;
+ break;
+
+ case TB_PORT_CAP_TIME1:
+ length = PORT_CAP_TMU_LEN;
+ break;
+
+ case TB_PORT_CAP_POWER:
+ length = PORT_CAP_POWER_LEN;
+ break;
+
+ case TB_PORT_CAP_ADAP:
+ if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
+ length = PORT_CAP_PCIE_LEN;
+ } else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
+ length = PORT_CAP_DP_LEN;
+ } else if (tb_port_is_usb3_down(port) ||
+ tb_port_is_usb3_up(port)) {
+ length = PORT_CAP_USB3_LEN;
+ } else {
+ seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
+ cap, header.basic.cap);
+ return;
+ }
+ break;
+
+ case TB_PORT_CAP_VSE:
+ if (!header.extended_short.length) {
+ ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT,
+ cap + 1, 1);
+ if (ret) {
+ seq_printf(s, "0x%04x <capability read failed>\n",
+ cap + 1);
+ return;
+ }
+ length = header.extended_long.length;
+ vsec_id = header.extended_short.vsec_id;
+ } else {
+ length = header.extended_short.length;
+ vsec_id = header.extended_short.vsec_id;
+ /*
+ * Ice Lake and Tiger Lake do not implement the
+ * full length of the capability, only first 32
+ * dwords so hard-code it here.
+ */
+ if (!vsec_id &&
+ (tb_switch_is_ice_lake(port->sw) ||
+ tb_switch_is_tiger_lake(port->sw)))
+ length = 32;
+ }
+ break;
+
+ case TB_PORT_CAP_USB4:
+ length = PORT_CAP_USB4_LEN;
+ break;
+
+ default:
+ seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
+ cap, header.basic.cap);
+ return;
+ }
+
+ cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length);
+}
+
+static void port_caps_show(struct tb_port *port, struct seq_file *s)
+{
+ int cap;
+
+ cap = tb_port_next_cap(port, 0);
+ while (cap > 0) {
+ port_cap_show(port, s, cap);
+ cap = tb_port_next_cap(port, cap);
+ }
+}
+
+static int port_basic_regs_show(struct tb_port *port, struct seq_file *s)
+{
+ u32 data[PORT_CAP_BASIC_LEN];
+ int ret, i;
+
+ ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
+
+ return 0;
+}
+
+static int port_regs_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ int ret;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
+
+ ret = port_basic_regs_show(port, s);
+ if (ret)
+ goto out_unlock;
+
+ port_caps_show(port, s);
+
+out_unlock:
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RW(port_regs);
+
+static void switch_cap_show(struct tb_switch *sw, struct seq_file *s,
+ unsigned int cap)
+{
+ struct tb_cap_any header;
+ int ret, length;
+ u8 vsec_id = 0;
+
+ ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1);
+ if (ret) {
+ seq_printf(s, "0x%04x <capability read failed>\n", cap);
+ return;
+ }
+
+ if (header.basic.cap == TB_SWITCH_CAP_VSE) {
+ if (!header.extended_short.length) {
+ ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH,
+ cap + 1, 1);
+ if (ret) {
+ seq_printf(s, "0x%04x <capability read failed>\n",
+ cap + 1);
+ return;
+ }
+ length = header.extended_long.length;
+ } else {
+ length = header.extended_short.length;
+ }
+ vsec_id = header.extended_short.vsec_id;
+ } else {
+ if (header.basic.cap == TB_SWITCH_CAP_TMU) {
+ length = SWITCH_CAP_TMU_LEN;
+ } else {
+ seq_printf(s, "0x%04x <unknown capability 0x%02x>\n",
+ cap, header.basic.cap);
+ return;
+ }
+ }
+
+ cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length);
+}
+
+static void switch_caps_show(struct tb_switch *sw, struct seq_file *s)
+{
+ int cap;
+
+ cap = tb_switch_next_cap(sw, 0);
+ while (cap > 0) {
+ switch_cap_show(sw, s, cap);
+ cap = tb_switch_next_cap(sw, cap);
+ }
+}
+
+static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s)
+{
+ u32 data[SWITCH_CAP_BASIC_LEN];
+ size_t dwords;
+ int ret, i;
+
+ /* Only USB4 has the additional registers */
+ if (tb_switch_is_usb4(sw))
+ dwords = ARRAY_SIZE(data);
+ else
+ dwords = 7;
+
+ ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dwords; i++)
+ seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
+
+ return 0;
+}
+
+static int switch_regs_show(struct seq_file *s, void *not_used)
+{
+ struct tb_switch *sw = s->private;
+ struct tb *tb = sw->tb;
+ int ret;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
+
+ ret = switch_basic_regs_show(sw, s);
+ if (ret)
+ goto out_unlock;
+
+ switch_caps_show(sw, s);
+
+out_unlock:
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RW(switch_regs);
+
+static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid)
+{
+ u32 data[PATH_LEN];
+ int ret, i;
+
+ ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN,
+ ARRAY_SIZE(data));
+ if (ret) {
+ seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
+ hopid * PATH_LEN + i, i, hopid, data[i]);
+ }
+
+ return 0;
+}
+
+static int path_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ int start, i, ret = 0;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ seq_puts(s, "# offset relative_offset in_hop_id value\n");
+
+ /* NHI and lane adapters have entry for path 0 */
+ if (tb_port_is_null(port) || tb_port_is_nhi(port)) {
+ ret = path_show_one(port, s, 0);
+ if (ret)
+ goto out_unlock;
+ }
+
+ start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID;
+
+ for (i = start; i <= port->config.max_in_hop_id; i++) {
+ ret = path_show_one(port, s, i);
+ if (ret)
+ break;
+ }
+
+out_unlock:
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RO(path);
+
+static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
+ int counter)
+{
+ u32 data[COUNTER_SET_LEN];
+ int ret, i;
+
+ ret = tb_port_read(port, data, TB_CFG_COUNTERS,
+ counter * COUNTER_SET_LEN, ARRAY_SIZE(data));
+ if (ret) {
+ seq_printf(s, "0x%04x <not accessible>\n",
+ counter * COUNTER_SET_LEN);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
+ counter * COUNTER_SET_LEN + i, i, counter, data[i]);
+ }
+
+ return 0;
+}
+
+static int counters_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ int i, ret = 0;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ seq_puts(s, "# offset relative_offset counter_id value\n");
+
+ for (i = 0; i < port->config.max_counters; i++) {
+ ret = counter_set_regs_show(port, s, i);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&tb->lock);
+
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RW(counters);
+
+/**
+ * tb_switch_debugfs_init() - Add debugfs entries for router
+ * @sw: Pointer to the router
+ *
+ * Adds debugfs directories and files for given router.
+ */
+void tb_switch_debugfs_init(struct tb_switch *sw)
+{
+ struct dentry *debugfs_dir;
+ struct tb_port *port;
+
+ debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root);
+ sw->debugfs_dir = debugfs_dir;
+ debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
+ &switch_regs_fops);
+
+ tb_switch_for_each_port(sw, port) {
+ struct dentry *debugfs_dir;
+ char dir_name[10];
+
+ if (port->disabled)
+ continue;
+ if (port->config.type == TB_TYPE_INACTIVE)
+ continue;
+
+ snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir);
+ debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir,
+ port, &port_regs_fops);
+ debugfs_create_file("path", 0400, debugfs_dir, port,
+ &path_fops);
+ if (port->config.counters_support)
+ debugfs_create_file("counters", 0600, debugfs_dir, port,
+ &counters_fops);
+ }
+}
+
+/**
+ * tb_switch_debugfs_remove() - Remove all router debugfs entries
+ * @sw: Pointer to the router
+ *
+ * Removes all previously added debugfs entries under this router.
+ */
+void tb_switch_debugfs_remove(struct tb_switch *sw)
+{
+ debugfs_remove_recursive(sw->debugfs_dir);
+}
+
+void tb_debugfs_init(void)
+{
+ tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);
+}
+
+void tb_debugfs_exit(void)
+{
+ debugfs_remove_recursive(tb_debugfs_root);
+}
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index bba4cbfa9759..f0de94f7acbf 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -275,7 +275,7 @@ static struct attribute *domain_attrs[] = {
static umode_t domain_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct tb *tb = container_of(dev, struct tb, dev);
if (attr == &dev_attr_boot_acl.attr) {
@@ -455,6 +455,8 @@ int tb_domain_add(struct tb *tb)
/* This starts event processing */
mutex_unlock(&tb->lock);
+ device_init_wakeup(&tb->dev, true);
+
pm_runtime_no_callbacks(&tb->dev);
pm_runtime_set_active(&tb->dev);
pm_runtime_enable(&tb->dev);
@@ -544,6 +546,33 @@ int tb_domain_suspend(struct tb *tb)
return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
}
+int tb_domain_freeze_noirq(struct tb *tb)
+{
+ int ret = 0;
+
+ mutex_lock(&tb->lock);
+ if (tb->cm_ops->freeze_noirq)
+ ret = tb->cm_ops->freeze_noirq(tb);
+ if (!ret)
+ tb_ctl_stop(tb->ctl);
+ mutex_unlock(&tb->lock);
+
+ return ret;
+}
+
+int tb_domain_thaw_noirq(struct tb *tb)
+{
+ int ret = 0;
+
+ mutex_lock(&tb->lock);
+ tb_ctl_start(tb->ctl);
+ if (tb->cm_ops->thaw_noirq)
+ ret = tb->cm_ops->thaw_noirq(tb);
+ mutex_unlock(&tb->lock);
+
+ return ret;
+}
+
void tb_domain_complete(struct tb *tb)
{
if (tb->cm_ops->complete)
@@ -798,12 +827,23 @@ int tb_domain_init(void)
{
int ret;
+ tb_test_init();
+
+ tb_debugfs_init();
ret = tb_xdomain_init();
if (ret)
- return ret;
+ goto err_debugfs;
ret = bus_register(&tb_bus_type);
if (ret)
- tb_xdomain_exit();
+ goto err_xdomain;
+
+ return 0;
+
+err_xdomain:
+ tb_xdomain_exit();
+err_debugfs:
+ tb_debugfs_exit();
+ tb_test_exit();
return ret;
}
@@ -814,4 +854,6 @@ void tb_domain_exit(void)
ida_destroy(&tb_domain_ida);
tb_nvm_exit();
tb_xdomain_exit();
+ tb_debugfs_exit();
+ tb_test_exit();
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index ffcc8c3459e5..977ba91f4d0e 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1635,11 +1635,14 @@ static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
static bool icm_tgl_is_supported(struct tb *tb)
{
+ u32 val;
+
/*
* If the firmware is not running use software CM. This platform
* should fully support both.
*/
- return icm_firmware_running(tb->nhi);
+ val = ioread32(tb->nhi->iobase + REG_FW_STS);
+ return !!(val & REG_FW_STS_NVM_AUTH_DONE);
}
static void icm_handle_notification(struct work_struct *work)
@@ -2281,6 +2284,8 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_TGL_NHI0:
case PCI_DEVICE_ID_INTEL_TGL_NHI1:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
icm->is_supported = icm_tgl_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index 19be627d090f..41e6c738f6c8 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -45,7 +45,7 @@ static int find_port_lc_cap(struct tb_port *port)
return sw->cap_lc + start + phys * size;
}
-static int tb_lc_configure_lane(struct tb_port *port, bool configure)
+static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
{
bool upstream = tb_is_upstream_port(port);
struct tb_switch *sw = port->sw;
@@ -69,7 +69,7 @@ static int tb_lc_configure_lane(struct tb_port *port, bool configure)
else
lane = TB_LC_SX_CTRL_L2C;
- if (configure) {
+ if (configured) {
ctrl |= lane;
if (upstream)
ctrl |= TB_LC_SX_CTRL_UPSTREAM;
@@ -83,55 +83,146 @@ static int tb_lc_configure_lane(struct tb_port *port, bool configure)
}
/**
- * tb_lc_configure_link() - Let LC know about configured link
- * @sw: Switch that is being added
+ * tb_lc_configure_port() - Let LC know about configured port
+ * @port: Port that is set as configured
*
- * Informs LC of both parent switch and @sw that there is established
- * link between the two.
+ * Sets the port configured for power management purposes.
*/
-int tb_lc_configure_link(struct tb_switch *sw)
+int tb_lc_configure_port(struct tb_port *port)
{
- struct tb_port *up, *down;
- int ret;
+ return tb_lc_set_port_configured(port, true);
+}
+
+/**
+ * tb_lc_unconfigure_port() - Let LC know about unconfigured port
+ * @port: Port that is set as configured
+ *
+ * Sets the port unconfigured for power management purposes.
+ */
+void tb_lc_unconfigure_port(struct tb_port *port)
+{
+ tb_lc_set_port_configured(port, false);
+}
- if (!tb_route(sw) || tb_switch_is_icm(sw))
+static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
+{
+ struct tb_switch *sw = port->sw;
+ u32 ctrl, lane;
+ int cap, ret;
+
+ if (sw->generation < 2)
return 0;
- up = tb_upstream_port(sw);
- down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return cap;
- /* Configure parent link toward this switch */
- ret = tb_lc_configure_lane(down, true);
+ ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
if (ret)
return ret;
- /* Configure upstream link from this switch to the parent */
- ret = tb_lc_configure_lane(up, true);
+ /* Resolve correct lane */
+ if (port->port % 2)
+ lane = TB_LC_SX_CTRL_L1D;
+ else
+ lane = TB_LC_SX_CTRL_L2D;
+
+ if (configure)
+ ctrl |= lane;
+ else
+ ctrl &= ~lane;
+
+ return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+}
+
+/**
+ * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
+ * @port: Switch downstream port connected to another host
+ *
+ * Sets the lane configured for XDomain accordingly so that the LC knows
+ * about this. Returns %0 in success and negative errno in failure.
+ */
+int tb_lc_configure_xdomain(struct tb_port *port)
+{
+ return tb_lc_set_xdomain_configured(port, true);
+}
+
+/**
+ * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
+ * @port: Switch downstream port that was connected to another host
+ *
+ * Unsets the lane XDomain configuration.
+ */
+void tb_lc_unconfigure_xdomain(struct tb_port *port)
+{
+ tb_lc_set_xdomain_configured(port, false);
+}
+
+static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
+ unsigned int flags)
+{
+ u32 ctrl;
+ int ret;
+
+ /*
+ * Enable wake on PCIe and USB4 (wake coming from another
+ * router).
+ */
+ ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
+ offset + TB_LC_SX_CTRL, 1);
if (ret)
- tb_lc_configure_lane(down, false);
+ return ret;
+
+ ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WOP |
+ TB_LC_SX_CTRL_WOU4);
+
+ if (flags & TB_WAKE_ON_CONNECT)
+ ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
+ if (flags & TB_WAKE_ON_USB4)
+ ctrl |= TB_LC_SX_CTRL_WOU4;
+ if (flags & TB_WAKE_ON_PCIE)
+ ctrl |= TB_LC_SX_CTRL_WOP;
- return ret;
+ return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
}
/**
- * tb_lc_unconfigure_link() - Let LC know about unconfigured link
- * @sw: Switch to unconfigure
+ * tb_lc_set_wake() - Enable/disable wake
+ * @sw: Switch whose wakes to configure
+ * @flags: Wakeup flags (%0 to disable)
*
- * Informs LC of both parent switch and @sw that the link between the
- * two does not exist anymore.
+ * For each LC sets wake bits accordingly.
*/
-void tb_lc_unconfigure_link(struct tb_switch *sw)
+int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
{
- struct tb_port *up, *down;
+ int start, size, nlc, ret, i;
+ u32 desc;
- if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw))
- return;
+ if (sw->generation < 2)
+ return 0;
- up = tb_upstream_port(sw);
- down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
+ if (!tb_route(sw))
+ return 0;
- tb_lc_configure_lane(up, false);
- tb_lc_configure_lane(down, false);
+ ret = read_lc_desc(sw, &desc);
+ if (ret)
+ return ret;
+
+ /* Figure out number of link controllers */
+ nlc = desc & TB_LC_DESC_NLC_MASK;
+ start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
+ size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
+
+ /* For each link controller set sleep bit */
+ for (i = 0; i < nlc; i++) {
+ unsigned int offset = sw->cap_lc + start + i * size;
+
+ ret = tb_lc_set_wake_one(sw, offset, flags);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
/**
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 5f7489fa1327..db80dc5dfeba 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/property.h>
+#include <linux/platform_data/x86/apple.h>
#include "nhi.h"
#include "nhi_regs.h"
@@ -405,12 +406,23 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
ring->vector = ret;
- ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
- if (ring->irq < 0)
- return ring->irq;
+ ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
+ if (ret < 0)
+ goto err_ida_remove;
+
+ ring->irq = ret;
irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
- return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
+ ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
+ if (ret)
+ goto err_ida_remove;
+
+ return 0;
+
+err_ida_remove:
+ ida_simple_remove(&nhi->msix_ida, ring->vector);
+
+ return ret;
}
static void ring_release_msix(struct tb_ring *ring)
@@ -863,6 +875,22 @@ static int nhi_suspend_noirq(struct device *dev)
return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
}
+static int nhi_freeze_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ return tb_domain_freeze_noirq(tb);
+}
+
+static int nhi_thaw_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ return tb_domain_thaw_noirq(tb);
+}
+
static bool nhi_wake_supported(struct pci_dev *pdev)
{
u8 val;
@@ -1069,6 +1097,69 @@ static bool nhi_imr_valid(struct pci_dev *pdev)
return true;
}
+/*
+ * During suspend the Thunderbolt controller is reset and all PCIe
+ * tunnels are lost. The NHI driver will try to reestablish all tunnels
+ * during resume. This adds device links between the tunneled PCIe
+ * downstream ports and the NHI so that the device core will make sure
+ * NHI is resumed first before the rest.
+ */
+static void tb_apple_add_links(struct tb_nhi *nhi)
+{
+ struct pci_dev *upstream, *pdev;
+
+ if (!x86_apple_machine)
+ return;
+
+ switch (nhi->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ break;
+ default:
+ return;
+ }
+
+ upstream = pci_upstream_bridge(nhi->pdev);
+ while (upstream) {
+ if (!pci_is_pcie(upstream))
+ return;
+ if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
+ break;
+ upstream = pci_upstream_bridge(upstream);
+ }
+
+ if (!upstream)
+ return;
+
+ /*
+ * For each hotplug downstream port, create add device link
+ * back to NHI so that PCIe tunnels can be re-established after
+ * sleep.
+ */
+ for_each_pci_bridge(pdev, upstream->subordinate) {
+ const struct device_link *link;
+
+ if (!pci_is_pcie(pdev))
+ continue;
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
+ !pdev->is_hotplug_bridge)
+ continue;
+
+ link = device_link_add(&pdev->dev, &nhi->pdev->dev,
+ DL_FLAG_AUTOREMOVE_SUPPLIER |
+ DL_FLAG_PM_RUNTIME);
+ if (link) {
+ dev_dbg(&nhi->pdev->dev, "created link from %s\n",
+ dev_name(&pdev->dev));
+ } else {
+ dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
+ dev_name(&pdev->dev));
+ }
+ }
+}
+
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct tb_nhi *nhi;
@@ -1134,6 +1225,9 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return res;
}
+ tb_apple_add_links(nhi);
+ tb_acpi_add_links(nhi);
+
tb = icm_probe(nhi);
if (!tb)
tb = tb_probe(nhi);
@@ -1157,6 +1251,8 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
pci_set_drvdata(pdev, tb);
+ device_wakeup_enable(&pdev->dev);
+
pm_runtime_allow(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -1186,14 +1282,13 @@ static void nhi_remove(struct pci_dev *pdev)
static const struct dev_pm_ops nhi_pm_ops = {
.suspend_noirq = nhi_suspend_noirq,
.resume_noirq = nhi_resume_noirq,
- .freeze_noirq = nhi_suspend_noirq, /*
+ .freeze_noirq = nhi_freeze_noirq, /*
* we just disable hotplug, the
* pci-tunnels stay alive.
*/
- .thaw_noirq = nhi_resume_noirq,
+ .thaw_noirq = nhi_thaw_noirq,
.restore_noirq = nhi_resume_noirq,
.suspend = nhi_suspend,
- .freeze = nhi_suspend,
.poweroff_noirq = nhi_poweroff_noirq,
.poweroff = nhi_suspend,
.complete = nhi_complete,
@@ -1250,6 +1345,10 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 80162e4b013f..4e0861d75072 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -75,6 +75,8 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
#define PCI_DEVICE_ID_INTEL_TGL_NHI0 0x9a1b
#define PCI_DEVICE_ID_INTEL_TGL_NHI1 0x9a1d
+#define PCI_DEVICE_ID_INTEL_TGL_H_NHI0 0x9a1f
+#define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21
#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c
index 6795851aac95..96da07e88c52 100644
--- a/drivers/thunderbolt/nhi_ops.c
+++ b/drivers/thunderbolt/nhi_ops.c
@@ -59,7 +59,7 @@ static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap);
if (power) {
- unsigned int retries = 10;
+ unsigned int retries = 350;
u32 val;
/* Wait until the firmware tells it is up and running */
@@ -67,7 +67,7 @@ static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
pci_read_config_dword(nhi->pdev, VS_CAP_9, &val);
if (val & VS_CAP_9_FW_READY)
return 0;
- msleep(250);
+ usleep_range(3000, 3100);
} while (--retries);
return -ETIMEDOUT;
@@ -97,7 +97,7 @@ static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout)
pci_read_config_dword(nhi->pdev, VS_CAP_18, &data);
if (data & VS_CAP_18_DONE)
goto clear;
- msleep(100);
+ usleep_range(1000, 1100);
} while (time_before(jiffies, end));
return -ETIMEDOUT;
@@ -121,31 +121,38 @@ static void icl_nhi_set_ltr(struct tb_nhi *nhi)
static int icl_nhi_suspend(struct tb_nhi *nhi)
{
+ struct tb *tb = pci_get_drvdata(nhi->pdev);
int ret;
if (icl_nhi_is_device_connected(nhi))
return 0;
- /*
- * If there is no device connected we need to perform both: a
- * handshake through LC mailbox and force power down before
- * entering D3.
- */
- icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
- ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
- if (ret)
- return ret;
+ if (tb_switch_is_icm(tb->root_switch)) {
+ /*
+ * If there is no device connected we need to perform
+ * both: a handshake through LC mailbox and force power
+ * down before entering D3.
+ */
+ icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
+ ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
+ if (ret)
+ return ret;
+ }
return icl_nhi_force_power(nhi, false);
}
static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup)
{
+ struct tb *tb = pci_get_drvdata(nhi->pdev);
enum icl_lc_mailbox_cmd cmd;
if (!pm_suspend_via_firmware())
return icl_nhi_suspend(nhi);
+ if (!tb_switch_is_icm(tb->root_switch))
+ return 0;
+
cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE;
icl_nhi_lc_mailbox_cmd(nhi, cmd);
return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index 7eac3e0f90a2..57e2978a3c21 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -27,7 +27,7 @@ static const struct tb_quirk tb_quirks[] = {
* tb_check_quirks() - Check for quirks to apply
* @sw: Thunderbolt switch
*
- * Apply any quirks for the Thunderbolt controller
+ * Apply any quirks for the Thunderbolt controller.
*/
void tb_check_quirks(struct tb_switch *sw)
{
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index a921de9ce7cb..c73bbfe69ba1 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -601,6 +601,13 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
if (credits == 0 || port->sw->is_unplugged)
return 0;
+ /*
+ * USB4 restricts programming NFC buffers to lane adapters only
+ * so skip other ports.
+ */
+ if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
+ return 0;
+
nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
nfc_credits += credits;
@@ -666,6 +673,50 @@ int tb_port_unlock(struct tb_port *port)
return 0;
}
+static int __tb_port_enable(struct tb_port *port, bool enable)
+{
+ int ret;
+ u32 phy;
+
+ if (!tb_port_is_null(port))
+ return -EINVAL;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ phy &= ~LANE_ADP_CS_1_LD;
+ else
+ phy |= LANE_ADP_CS_1_LD;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+/**
+ * tb_port_enable() - Enable lane adapter
+ * @port: Port to enable (can be %NULL)
+ *
+ * This is used for lane 0 and 1 adapters to enable it.
+ */
+int tb_port_enable(struct tb_port *port)
+{
+ return __tb_port_enable(port, true);
+}
+
+/**
+ * tb_port_disable() - Disable lane adapter
+ * @port: Port to disable (can be %NULL)
+ *
+ * This is used for lane 0 and 1 adapters to disable it.
+ */
+int tb_port_disable(struct tb_port *port)
+{
+ return __tb_port_enable(port, false);
+}
+
/**
* tb_init_port() - initialize a port
*
@@ -739,7 +790,7 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
* NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
* reserved.
*/
- if (port->config.type != TB_TYPE_NHI && min_hopid < TB_PATH_MIN_HOPID)
+ if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
min_hopid = TB_PATH_MIN_HOPID;
if (max_hopid < 0 || max_hopid > port_max_hopid)
@@ -1227,23 +1278,24 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
/**
* reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
+ * @sw: Switch to reset
*
* Return: Returns 0 on success or an error code on failure.
*/
-int tb_switch_reset(struct tb *tb, u64 route)
+int tb_switch_reset(struct tb_switch *sw)
{
struct tb_cfg_result res;
- struct tb_regs_switch_header header = {
- header.route_hi = route >> 32,
- header.route_lo = route,
- header.enabled = true,
- };
- tb_dbg(tb, "resetting switch at %llx\n", route);
- res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
- 0, 2, 2, 2);
+
+ if (sw->generation > 1)
+ return 0;
+
+ tb_sw_dbg(sw, "resetting switch\n");
+
+ res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
+ TB_CFG_SWITCH, 2, 2);
if (res.err)
return res.err;
- res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
+ res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT);
if (res.err > 0)
return -EIO;
return res.err;
@@ -1261,7 +1313,7 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
u32 data;
int res;
- if (tb_switch_is_icm(sw))
+ if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
return 0;
sw->config.plug_events_delay = 0xff;
@@ -1269,10 +1321,6 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
if (res)
return res;
- /* Plug events are always enabled in USB4 */
- if (tb_switch_is_usb4(sw))
- return 0;
-
res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
if (res)
return res;
@@ -1649,7 +1697,7 @@ static struct attribute *switch_attrs[] = {
static umode_t switch_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct tb_switch *sw = tb_to_switch(dev);
if (attr == &dev_attr_device.attr) {
@@ -1988,7 +2036,7 @@ int tb_switch_configure(struct tb_switch *sw)
route = tb_route(sw);
tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
- sw->config.enabled ? "restoring " : "initializing", route,
+ sw->config.enabled ? "restoring" : "initializing", route,
tb_route_length(route), sw->config.upstream_port_number);
sw->config.enabled = 1;
@@ -2008,10 +2056,6 @@ int tb_switch_configure(struct tb_switch *sw)
return ret;
ret = usb4_switch_setup(sw);
- if (ret)
- return ret;
-
- ret = usb4_switch_configure_link(sw);
} else {
if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
tb_sw_warn(sw, "unknown switch vendor id %#x\n",
@@ -2025,10 +2069,6 @@ int tb_switch_configure(struct tb_switch *sw)
/* Enumerate the switch */
ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
ROUTER_CS_1, 3);
- if (ret)
- return ret;
-
- ret = tb_lc_configure_link(sw);
}
if (ret)
return ret;
@@ -2312,6 +2352,69 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw)
}
/**
+ * tb_switch_configure_link() - Set link configured
+ * @sw: Switch whose link is configured
+ *
+ * Sets the link upstream from @sw configured (from both ends) so that
+ * it will not be disconnected when the domain exits sleep. Can be
+ * called for any switch.
+ *
+ * It is recommended that this is called after lane bonding is enabled.
+ *
+ * Returns %0 on success and negative errno in case of error.
+ */
+int tb_switch_configure_link(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ if (tb_switch_is_usb4(up->sw))
+ ret = usb4_port_configure(up);
+ else
+ ret = tb_lc_configure_port(up);
+ if (ret)
+ return ret;
+
+ down = up->remote;
+ if (tb_switch_is_usb4(down->sw))
+ return usb4_port_configure(down);
+ return tb_lc_configure_port(down);
+}
+
+/**
+ * tb_switch_unconfigure_link() - Unconfigure link
+ * @sw: Switch whose link is unconfigured
+ *
+ * Sets the link unconfigured so the @sw will be disconnected if the
+ * domain exists sleep.
+ */
+void tb_switch_unconfigure_link(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+
+ if (sw->is_unplugged)
+ return;
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return;
+
+ up = tb_upstream_port(sw);
+ if (tb_switch_is_usb4(up->sw))
+ usb4_port_unconfigure(up);
+ else
+ tb_lc_unconfigure_port(up);
+
+ down = up->remote;
+ if (tb_switch_is_usb4(down->sw))
+ usb4_port_unconfigure(down);
+ else
+ tb_lc_unconfigure_port(down);
+}
+
+/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
*
@@ -2399,6 +2502,13 @@ int tb_switch_add(struct tb_switch *sw)
return ret;
}
+ /*
+ * Thunderbolt routers do not generate wakeups themselves but
+ * they forward wakeups from tunneled protocols, so enable it
+ * here.
+ */
+ device_init_wakeup(&sw->dev, true);
+
pm_runtime_set_active(&sw->dev);
if (sw->rpm) {
pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
@@ -2408,6 +2518,7 @@ int tb_switch_add(struct tb_switch *sw)
pm_request_autosuspend(&sw->dev);
}
+ tb_switch_debugfs_init(sw);
return 0;
}
@@ -2423,6 +2534,8 @@ void tb_switch_remove(struct tb_switch *sw)
{
struct tb_port *port;
+ tb_switch_debugfs_remove(sw);
+
if (sw->rpm) {
pm_runtime_get_sync(&sw->dev);
pm_runtime_disable(&sw->dev);
@@ -2445,11 +2558,6 @@ void tb_switch_remove(struct tb_switch *sw)
if (!sw->is_unplugged)
tb_plug_events_active(sw, false);
- if (tb_switch_is_usb4(sw))
- usb4_switch_unconfigure_link(sw);
- else
- tb_lc_unconfigure_link(sw);
-
tb_switch_nvm_remove(sw);
if (tb_route(sw))
@@ -2481,6 +2589,18 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
}
}
+static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+{
+ if (flags)
+ tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
+ else
+ tb_sw_dbg(sw, "disabling wakeup\n");
+
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_set_wake(sw, flags);
+ return tb_lc_set_wake(sw, flags);
+}
+
int tb_switch_resume(struct tb_switch *sw)
{
struct tb_port *port;
@@ -2526,6 +2646,13 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
+ /* Disable wakes */
+ tb_switch_set_wake(sw, 0);
+
+ err = tb_switch_tmu_init(sw);
+ if (err)
+ return err;
+
/* check for surviving downstream switches */
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port) && !port->xdomain)
@@ -2555,20 +2682,43 @@ int tb_switch_resume(struct tb_switch *sw)
return 0;
}
-void tb_switch_suspend(struct tb_switch *sw)
+/**
+ * tb_switch_suspend() - Put a switch to sleep
+ * @sw: Switch to suspend
+ * @runtime: Is this runtime suspend or system sleep
+ *
+ * Suspends router and all its children. Enables wakes according to
+ * value of @runtime and then sets sleep bit for the router. If @sw is
+ * host router the domain is ready to go to sleep once this function
+ * returns.
+ */
+void tb_switch_suspend(struct tb_switch *sw, bool runtime)
{
+ unsigned int flags = 0;
struct tb_port *port;
int err;
+ tb_sw_dbg(sw, "suspending switch\n");
+
err = tb_plug_events_active(sw, false);
if (err)
return;
tb_switch_for_each_port(sw, port) {
if (tb_port_has_remote(port))
- tb_switch_suspend(port->remote->sw);
+ tb_switch_suspend(port->remote->sw, runtime);
}
+ if (runtime) {
+ /* Trigger wake when something is plugged in/out */
+ flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
+ flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
+ } else if (device_may_wakeup(&sw->dev)) {
+ flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
+ }
+
+ tb_switch_set_wake(sw, flags);
+
if (tb_switch_is_usb4(sw))
usb4_switch_set_sleep(sw);
else
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index f507815040eb..214fbc92c1b7 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include "tb.h"
#include "tb_regs.h"
@@ -22,13 +23,21 @@
* events and exit if this is not set (it needs to
* acquire the lock one more time). Used to drain wq
* after cfg has been paused.
+ * @remove_work: Work used to remove any unplugged routers after
+ * runtime resume
*/
struct tb_cm {
struct list_head tunnel_list;
struct list_head dp_resources;
bool hotplug_active;
+ struct delayed_work remove_work;
};
+static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
+{
+ return ((void *)tcm - sizeof(struct tb));
+}
+
struct tb_hotplug_event {
struct work_struct work;
struct tb *tb;
@@ -140,6 +149,29 @@ static void tb_discover_tunnels(struct tb_switch *sw)
}
}
+static int tb_port_configure_xdomain(struct tb_port *port)
+{
+ /*
+ * XDomain paths currently only support single lane so we must
+ * disable the other lane according to USB4 spec.
+ */
+ tb_port_disable(port->dual_link_port);
+
+ if (tb_switch_is_usb4(port->sw))
+ return usb4_port_configure_xdomain(port);
+ return tb_lc_configure_xdomain(port);
+}
+
+static void tb_port_unconfigure_xdomain(struct tb_port *port)
+{
+ if (tb_switch_is_usb4(port->sw))
+ usb4_port_unconfigure_xdomain(port);
+ else
+ tb_lc_unconfigure_xdomain(port);
+
+ tb_port_enable(port->dual_link_port);
+}
+
static void tb_scan_xdomain(struct tb_port *port)
{
struct tb_switch *sw = port->sw;
@@ -158,6 +190,7 @@ static void tb_scan_xdomain(struct tb_port *port)
NULL);
if (xd) {
tb_port_at(route, sw)->xdomain = xd;
+ tb_port_configure_xdomain(port);
tb_xdomain_add(xd);
}
}
@@ -502,8 +535,13 @@ static void tb_scan_switch(struct tb_switch *sw)
{
struct tb_port *port;
+ pm_runtime_get_sync(&sw->dev);
+
tb_switch_for_each_port(sw, port)
tb_scan_port(port);
+
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
}
/**
@@ -566,6 +604,7 @@ static void tb_scan_port(struct tb_port *port)
*/
if (port->xdomain) {
tb_xdomain_remove(port->xdomain);
+ tb_port_unconfigure_xdomain(port);
port->xdomain = NULL;
}
@@ -577,6 +616,12 @@ static void tb_scan_port(struct tb_port *port)
if (!tcm->hotplug_active)
dev_set_uevent_suppress(&sw->dev, true);
+ /*
+ * At the moment Thunderbolt 2 and beyond (devices with LC) we
+ * can support runtime PM.
+ */
+ sw->rpm = sw->generation > 1;
+
if (tb_switch_add(sw)) {
tb_switch_put(sw);
return;
@@ -592,8 +637,9 @@ static void tb_scan_port(struct tb_port *port)
}
/* Enable lane bonding if supported */
- if (tb_switch_lane_bonding_enable(sw))
- tb_sw_warn(sw, "failed to enable lane bonding\n");
+ tb_switch_lane_bonding_enable(sw);
+ /* Set the link configured */
+ tb_switch_configure_link(sw);
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to enable TMU\n");
@@ -636,6 +682,11 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
* deallocated properly.
*/
tb_switch_dealloc_dp_resource(src_port->sw, src_port);
+ /* Now we can allow the domain to runtime suspend again */
+ pm_runtime_mark_last_busy(&dst_port->sw->dev);
+ pm_runtime_put_autosuspend(&dst_port->sw->dev);
+ pm_runtime_mark_last_busy(&src_port->sw->dev);
+ pm_runtime_put_autosuspend(&src_port->sw->dev);
fallthrough;
case TB_TUNNEL_USB3:
@@ -682,6 +733,7 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
if (port->remote->sw->is_unplugged) {
tb_retimer_remove_all(port);
tb_remove_dp_resources(port->remote->sw);
+ tb_switch_unconfigure_link(port->remote->sw);
tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
@@ -821,9 +873,20 @@ static void tb_tunnel_dp(struct tb *tb)
return;
}
+ /*
+ * DP stream needs the domain to be active so runtime resume
+ * both ends of the tunnel.
+ *
+ * This should bring the routers in the middle active as well
+ * and keeps the domain from runtime suspending while the DP
+ * tunnel is active.
+ */
+ pm_runtime_get_sync(&in->sw->dev);
+ pm_runtime_get_sync(&out->sw->dev);
+
if (tb_switch_alloc_dp_resource(in->sw, in)) {
tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
- return;
+ goto err_rpm_put;
}
/* Make all unused USB3 bandwidth available for the new DP tunnel */
@@ -862,6 +925,11 @@ err_reclaim:
tb_reclaim_usb3_bandwidth(tb, in, out);
err_dealloc_dp:
tb_switch_dealloc_dp_resource(in->sw, in);
+err_rpm_put:
+ pm_runtime_mark_last_busy(&out->sw->dev);
+ pm_runtime_put_autosuspend(&out->sw->dev);
+ pm_runtime_mark_last_busy(&in->sw->dev);
+ pm_runtime_put_autosuspend(&in->sw->dev);
}
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
@@ -911,6 +979,29 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
tb_tunnel_dp(tb);
}
+static void tb_disconnect_and_release_dp(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel, *n;
+
+ /*
+ * Tear down all DP tunnels and release their resources. They
+ * will be re-established after resume based on plug events.
+ */
+ list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_dp(tunnel))
+ tb_deactivate_and_free_tunnel(tunnel);
+ }
+
+ while (!list_empty(&tcm->dp_resources)) {
+ struct tb_port *port;
+
+ port = list_first_entry(&tcm->dp_resources,
+ struct tb_port, list);
+ list_del_init(&port->list);
+ }
+}
+
static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
{
struct tb_port *up, *down, *port;
@@ -1022,6 +1113,10 @@ static void tb_handle_hotplug(struct work_struct *work)
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
struct tb_port *port;
+
+ /* Bring the domain back from sleep if it was suspended */
+ pm_runtime_get_sync(&tb->dev);
+
mutex_lock(&tb->lock);
if (!tcm->hotplug_active)
goto out; /* during init, suspend or shutdown */
@@ -1045,6 +1140,9 @@ static void tb_handle_hotplug(struct work_struct *work)
ev->route, ev->port, ev->unplug);
goto put_sw;
}
+
+ pm_runtime_get_sync(&sw->dev);
+
if (ev->unplug) {
tb_retimer_remove_all(port);
@@ -1054,6 +1152,7 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_free_invalid_tunnels(tb);
tb_remove_dp_resources(port->remote->sw);
tb_switch_tmu_disable(port->remote->sw);
+ tb_switch_unconfigure_link(port->remote->sw);
tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
@@ -1077,6 +1176,7 @@ static void tb_handle_hotplug(struct work_struct *work)
port->xdomain = NULL;
__tb_disconnect_xdomain_paths(tb, xd);
tb_xdomain_put(xd);
+ tb_port_unconfigure_xdomain(port);
} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
tb_dp_resource_unavailable(tb, port);
} else {
@@ -1096,10 +1196,17 @@ static void tb_handle_hotplug(struct work_struct *work)
}
}
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
put_sw:
tb_switch_put(sw);
out:
mutex_unlock(&tb->lock);
+
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
+
kfree(ev);
}
@@ -1135,6 +1242,7 @@ static void tb_stop(struct tb *tb)
struct tb_tunnel *tunnel;
struct tb_tunnel *n;
+ cancel_delayed_work(&tcm->remove_work);
/* tunnels are only present after everything has been initialized */
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
/*
@@ -1186,6 +1294,8 @@ static int tb_start(struct tb *tb)
* root switch.
*/
tb->root_switch->no_nvm_upgrade = true;
+ /* All USB4 routers support runtime PM */
+ tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
ret = tb_switch_configure(tb->root_switch);
if (ret) {
@@ -1227,7 +1337,8 @@ static int tb_suspend_noirq(struct tb *tb)
struct tb_cm *tcm = tb_priv(tb);
tb_dbg(tb, "suspending...\n");
- tb_switch_suspend(tb->root_switch);
+ tb_disconnect_and_release_dp(tb);
+ tb_switch_suspend(tb->root_switch, false);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
tb_dbg(tb, "suspend finished\n");
@@ -1238,17 +1349,25 @@ static void tb_restore_children(struct tb_switch *sw)
{
struct tb_port *port;
+ /* No need to restore if the router is already unplugged */
+ if (sw->is_unplugged)
+ return;
+
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to restore TMU configuration\n");
tb_switch_for_each_port(sw, port) {
- if (!tb_port_has_remote(port))
+ if (!tb_port_has_remote(port) && !port->xdomain)
continue;
- if (tb_switch_lane_bonding_enable(port->remote->sw))
- dev_warn(&sw->dev, "failed to restore lane bonding\n");
+ if (port->remote) {
+ tb_switch_lane_bonding_enable(port->remote->sw);
+ tb_switch_configure_link(port->remote->sw);
- tb_restore_children(port->remote->sw);
+ tb_restore_children(port->remote->sw);
+ } else if (port->xdomain) {
+ tb_port_configure_xdomain(port);
+ }
}
}
@@ -1260,7 +1379,7 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "resuming...\n");
/* remove any pci devices the firmware might have setup */
- tb_switch_reset(tb, 0);
+ tb_switch_reset(tb->root_switch);
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
@@ -1294,6 +1413,7 @@ static int tb_free_unplugged_xdomains(struct tb_switch *sw)
if (port->xdomain && port->xdomain->is_unplugged) {
tb_retimer_remove_all(port);
tb_xdomain_remove(port->xdomain);
+ tb_port_unconfigure_xdomain(port);
port->xdomain = NULL;
ret++;
} else if (port->remote) {
@@ -1304,6 +1424,22 @@ static int tb_free_unplugged_xdomains(struct tb_switch *sw)
return ret;
}
+static int tb_freeze_noirq(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+
+ tcm->hotplug_active = false;
+ return 0;
+}
+
+static int tb_thaw_noirq(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+
+ tcm->hotplug_active = true;
+ return 0;
+}
+
static void tb_complete(struct tb *tb)
{
/*
@@ -1317,12 +1453,64 @@ static void tb_complete(struct tb *tb)
mutex_unlock(&tb->lock);
}
+static int tb_runtime_suspend(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+
+ mutex_lock(&tb->lock);
+ tb_switch_suspend(tb->root_switch, true);
+ tcm->hotplug_active = false;
+ mutex_unlock(&tb->lock);
+
+ return 0;
+}
+
+static void tb_remove_work(struct work_struct *work)
+{
+ struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
+ struct tb *tb = tcm_to_tb(tcm);
+
+ mutex_lock(&tb->lock);
+ if (tb->root_switch) {
+ tb_free_unplugged_children(tb->root_switch);
+ tb_free_unplugged_xdomains(tb->root_switch);
+ }
+ mutex_unlock(&tb->lock);
+}
+
+static int tb_runtime_resume(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel, *n;
+
+ mutex_lock(&tb->lock);
+ tb_switch_resume(tb->root_switch);
+ tb_free_invalid_tunnels(tb);
+ tb_restore_children(tb->root_switch);
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
+ tb_tunnel_restart(tunnel);
+ tcm->hotplug_active = true;
+ mutex_unlock(&tb->lock);
+
+ /*
+ * Schedule cleanup of any unplugged devices. Run this in a
+ * separate thread to avoid possible deadlock if the device
+ * removal runtime resumes the unplugged device.
+ */
+ queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
+ return 0;
+}
+
static const struct tb_cm_ops tb_cm_ops = {
.start = tb_start,
.stop = tb_stop,
.suspend_noirq = tb_suspend_noirq,
.resume_noirq = tb_resume_noirq,
+ .freeze_noirq = tb_freeze_noirq,
+ .thaw_noirq = tb_thaw_noirq,
.complete = tb_complete,
+ .runtime_suspend = tb_runtime_suspend,
+ .runtime_resume = tb_runtime_resume,
.handle_event = tb_handle_event,
.approve_switch = tb_tunnel_pci,
.approve_xdomain_paths = tb_approve_xdomain_paths,
@@ -1344,6 +1532,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
tcm = tb_priv(tb);
INIT_LIST_HEAD(&tcm->tunnel_list);
INIT_LIST_HEAD(&tcm->dp_resources);
+ INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
return tb;
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 3c620a9203c5..8ea360b0ff77 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -125,6 +125,7 @@ struct tb_switch_tmu {
* @rpm: The switch supports runtime PM
* @authorized: Whether the switch is authorized by user or policy
* @security_level: Switch supported security level
+ * @debugfs_dir: Pointer to the debugfs structure
* @key: Contains the key used to challenge the device or %NULL if not
* supported. Size of the key is %TB_SWITCH_KEY_SIZE.
* @connection_id: Connection ID used with ICM messaging
@@ -166,6 +167,7 @@ struct tb_switch {
bool rpm;
unsigned int authorized;
enum tb_security_level security_level;
+ struct dentry *debugfs_dir;
u8 *key;
u8 connection_id;
u8 connection_key;
@@ -333,6 +335,13 @@ struct tb_path {
*/
#define TB_PATH_MAX_HOPS (7 * 2)
+/* Possible wake types */
+#define TB_WAKE_ON_CONNECT BIT(0)
+#define TB_WAKE_ON_DISCONNECT BIT(1)
+#define TB_WAKE_ON_USB4 BIT(2)
+#define TB_WAKE_ON_USB3 BIT(3)
+#define TB_WAKE_ON_PCIE BIT(4)
+
/**
* struct tb_cm_ops - Connection manager specific operations vector
* @driver_ready: Called right after control channel is started. Used by
@@ -342,6 +351,8 @@ struct tb_path {
* @suspend_noirq: Connection manager specific suspend_noirq
* @resume_noirq: Connection manager specific resume_noirq
* @suspend: Connection manager specific suspend
+ * @freeze_noirq: Connection manager specific freeze_noirq
+ * @thaw_noirq: Connection manager specific thaw_noirq
* @complete: Connection manager specific complete
* @runtime_suspend: Connection manager specific runtime_suspend
* @runtime_resume: Connection manager specific runtime_resume
@@ -364,6 +375,8 @@ struct tb_cm_ops {
int (*suspend_noirq)(struct tb *tb);
int (*resume_noirq)(struct tb *tb);
int (*suspend)(struct tb *tb);
+ int (*freeze_noirq)(struct tb *tb);
+ int (*thaw_noirq)(struct tb *tb);
void (*complete)(struct tb *tb);
int (*runtime_suspend)(struct tb *tb);
int (*runtime_resume)(struct tb *tb);
@@ -457,6 +470,11 @@ static inline bool tb_port_is_null(const struct tb_port *port)
return port && port->port && port->config.type == TB_TYPE_PORT;
}
+static inline bool tb_port_is_nhi(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_NHI;
+}
+
static inline bool tb_port_is_pcie_down(const struct tb_port *port)
{
return port && port->config.type == TB_TYPE_PCIE_DOWN;
@@ -593,6 +611,8 @@ void tb_domain_remove(struct tb *tb);
int tb_domain_suspend_noirq(struct tb *tb);
int tb_domain_resume_noirq(struct tb *tb);
int tb_domain_suspend(struct tb *tb);
+int tb_domain_freeze_noirq(struct tb *tb);
+int tb_domain_thaw_noirq(struct tb *tb);
void tb_domain_complete(struct tb *tb);
int tb_domain_runtime_suspend(struct tb *tb);
int tb_domain_runtime_resume(struct tb *tb);
@@ -632,9 +652,9 @@ struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
int tb_switch_configure(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
-void tb_switch_suspend(struct tb_switch *sw);
+void tb_switch_suspend(struct tb_switch *sw, bool runtime);
int tb_switch_resume(struct tb_switch *sw);
-int tb_switch_reset(struct tb *tb, u64 route);
+int tb_switch_reset(struct tb_switch *sw);
void tb_sw_set_unplugged(struct tb_switch *sw);
struct tb_port *tb_switch_find_port(struct tb_switch *sw,
enum tb_port_type type);
@@ -685,59 +705,91 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
{
- return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
+ return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
+ sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
}
static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
{
- return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
+ return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
+ sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
}
static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
{
- switch (sw->config.device_id) {
- case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
- case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
- return true;
- default:
- return false;
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+ return true;
+ }
}
+ return false;
}
static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
{
- switch (sw->config.device_id) {
- case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
- return true;
- default:
- return false;
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
+ return true;
+ }
}
+ return false;
}
static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
{
- switch (sw->config.device_id) {
- case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
- case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
- return true;
- default:
- return false;
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ return true;
+ }
}
+ return false;
}
static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
{
- switch (sw->config.device_id) {
- case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
- return true;
- default:
- return false;
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline bool tb_switch_is_ice_lake(const struct tb_switch *sw)
+{
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI1:
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
+{
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_TGL_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_NHI1:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
+ return true;
+ }
}
+ return false;
}
/**
@@ -767,6 +819,8 @@ static inline bool tb_switch_is_icm(const struct tb_switch *sw)
int tb_switch_lane_bonding_enable(struct tb_switch *sw);
void tb_switch_lane_bonding_disable(struct tb_switch *sw);
+int tb_switch_configure_link(struct tb_switch *sw);
+void tb_switch_unconfigure_link(struct tb_switch *sw);
bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
@@ -788,6 +842,8 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
int tb_port_unlock(struct tb_port *port);
+int tb_port_enable(struct tb_port *port);
+int tb_port_disable(struct tb_port *port);
int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
void tb_port_release_in_hopid(struct tb_port *port, int hopid);
int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
@@ -811,7 +867,9 @@ int tb_port_get_link_speed(struct tb_port *port);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
+int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset);
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
+int tb_port_next_cap(struct tb_port *port, unsigned int offset);
bool tb_port_is_enabled(struct tb_port *port);
bool tb_usb3_port_is_enabled(struct tb_port *port);
@@ -844,8 +902,11 @@ int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
-int tb_lc_configure_link(struct tb_switch *sw);
-void tb_lc_unconfigure_link(struct tb_switch *sw);
+int tb_lc_configure_port(struct tb_port *port);
+void tb_lc_unconfigure_port(struct tb_port *port);
+int tb_lc_configure_xdomain(struct tb_port *port);
+void tb_lc_unconfigure_xdomain(struct tb_port *port);
+int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags);
int tb_lc_set_sleep(struct tb_switch *sw);
bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
@@ -900,9 +961,8 @@ int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size);
-int usb4_switch_configure_link(struct tb_switch *sw);
-void usb4_switch_unconfigure_link(struct tb_switch *sw);
bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
int usb4_switch_set_sleep(struct tb_switch *sw);
int usb4_switch_nvm_sector_size(struct tb_switch *sw);
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
@@ -919,6 +979,10 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
const struct tb_port *port);
int usb4_port_unlock(struct tb_port *port);
+int usb4_port_configure(struct tb_port *port);
+void usb4_port_unconfigure(struct tb_port *port);
+int usb4_port_configure_xdomain(struct tb_port *port);
+void usb4_port_unconfigure_xdomain(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
@@ -945,9 +1009,35 @@ int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw);
-/* keep link controller awake during update */
+/* Keep link controller awake during update */
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
void tb_check_quirks(struct tb_switch *sw);
+#ifdef CONFIG_ACPI
+void tb_acpi_add_links(struct tb_nhi *nhi);
+#else
+static inline void tb_acpi_add_links(struct tb_nhi *nhi) { }
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void tb_debugfs_init(void);
+void tb_debugfs_exit(void);
+void tb_switch_debugfs_init(struct tb_switch *sw);
+void tb_switch_debugfs_remove(struct tb_switch *sw);
+#else
+static inline void tb_debugfs_init(void) { }
+static inline void tb_debugfs_exit(void) { }
+static inline void tb_switch_debugfs_init(struct tb_switch *sw) { }
+static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { }
+#endif
+
+#ifdef CONFIG_USB4_KUNIT_TEST
+int tb_test_init(void);
+void tb_test_exit(void);
+#else
+static inline int tb_test_init(void) { return 0; }
+static inline void tb_test_exit(void) { }
+#endif
+
#endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index fc208c567953..0e01dbc63e72 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -28,6 +28,7 @@ enum tb_cfg_error {
TB_CFG_ERROR_LOOP = 8,
TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
+ TB_CFG_ERROR_LOCK = 15,
};
/* common header */
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index fd4fc144d17f..e7d9529822fa 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -39,6 +39,7 @@ enum tb_switch_vse_cap {
enum tb_port_cap {
TB_PORT_CAP_PHY = 0x01,
+ TB_PORT_CAP_POWER = 0x02,
TB_PORT_CAP_TIME1 = 0x03,
TB_PORT_CAP_ADAP = 0x04,
TB_PORT_CAP_VSE = 0x05,
@@ -93,6 +94,20 @@ struct tb_cap_extended_long {
u16 length;
} __packed;
+/**
+ * struct tb_cap_any - Structure capable of hold every capability
+ * @basic: Basic capability
+ * @extended_short: Vendor specific capability
+ * @extended_long: Vendor specific extended capability
+ */
+struct tb_cap_any {
+ union {
+ struct tb_cap_basic basic;
+ struct tb_cap_extended_short extended_short;
+ struct tb_cap_extended_long extended_long;
+ };
+} __packed;
+
/* capabilities */
struct tb_cap_link_controller {
@@ -178,6 +193,8 @@ struct tb_regs_switch_header {
#define ROUTER_CS_4 0x04
#define ROUTER_CS_5 0x05
#define ROUTER_CS_5_SLP BIT(0)
+#define ROUTER_CS_5_WOP BIT(1)
+#define ROUTER_CS_5_WOU BIT(2)
#define ROUTER_CS_5_C3S BIT(23)
#define ROUTER_CS_5_PTO BIT(24)
#define ROUTER_CS_5_UTO BIT(25)
@@ -186,6 +203,8 @@ struct tb_regs_switch_header {
#define ROUTER_CS_6 0x06
#define ROUTER_CS_6_SLPR BIT(0)
#define ROUTER_CS_6_TNS BIT(1)
+#define ROUTER_CS_6_WOPS BIT(2)
+#define ROUTER_CS_6_WOUS BIT(3)
#define ROUTER_CS_6_HCI BIT(18)
#define ROUTER_CS_6_CR BIT(25)
#define ROUTER_CS_7 0x07
@@ -234,7 +253,8 @@ struct tb_regs_port_header {
/* DWORD 1 */
u32 first_cap_offset:8;
u32 max_counters:11;
- u32 __unknown1:5;
+ u32 counters_support:1;
+ u32 __unknown1:4;
u32 revision:8;
/* DWORD 2 */
enum tb_port_type type:24;
@@ -279,6 +299,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
+#define LANE_ADP_CS_1_LD BIT(14)
#define LANE_ADP_CS_1_LB BIT(15)
#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16
@@ -301,8 +322,13 @@ struct tb_regs_port_header {
#define PORT_CS_18 0x12
#define PORT_CS_18_BE BIT(8)
#define PORT_CS_18_TCM BIT(9)
+#define PORT_CS_18_WOU4S BIT(18)
#define PORT_CS_19 0x13
#define PORT_CS_19_PC BIT(3)
+#define PORT_CS_19_PID BIT(4)
+#define PORT_CS_19_WOC BIT(16)
+#define PORT_CS_19_WOD BIT(17)
+#define PORT_CS_19_WOU4 BIT(18)
/* Display Port adapter registers */
#define ADP_DP_CS_0 0x00
@@ -416,8 +442,14 @@ struct tb_regs_hop {
#define TB_LC_PORT_ATTR_BE BIT(12)
#define TB_LC_SX_CTRL 0x96
+#define TB_LC_SX_CTRL_WOC BIT(1)
+#define TB_LC_SX_CTRL_WOD BIT(2)
+#define TB_LC_SX_CTRL_WOU4 BIT(5)
+#define TB_LC_SX_CTRL_WOP BIT(6)
#define TB_LC_SX_CTRL_L1C BIT(16)
+#define TB_LC_SX_CTRL_L1D BIT(17)
#define TB_LC_SX_CTRL_L2C BIT(20)
+#define TB_LC_SX_CTRL_L2D BIT(21)
#define TB_LC_SX_CTRL_UPSTREAM BIT(30)
#define TB_LC_SX_CTRL_SLP BIT(31)
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index a4d78811f7e2..464c2d37b992 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -1623,4 +1623,15 @@ static struct kunit_suite tb_test_suite = {
.name = "thunderbolt",
.test_cases = tb_test_cases,
};
-kunit_test_suite(tb_test_suite);
+
+static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
+
+int tb_test_init(void)
+{
+ return __kunit_test_suites_init(tb_test_suites);
+}
+
+void tb_test_exit(void)
+{
+ return __kunit_test_suites_exit(tb_test_suites);
+}
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 2b8355e6b65f..f2583b4053e4 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -196,6 +196,46 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
return 0;
}
+static void usb4_switch_check_wakes(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ bool wakeup = false;
+ u32 val;
+
+ if (!device_may_wakeup(&sw->dev))
+ return;
+
+ if (tb_route(sw)) {
+ if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
+ return;
+
+ tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
+ (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
+ (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
+
+ wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
+ }
+
+ /* Check for any connected downstream ports for USB4 wake */
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port))
+ continue;
+
+ if (tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_18, 1))
+ break;
+
+ tb_port_dbg(port, "USB4 wake: %s\n",
+ (val & PORT_CS_18_WOU4S) ? "yes" : "no");
+
+ if (val & PORT_CS_18_WOU4S)
+ wakeup = true;
+ }
+
+ if (wakeup)
+ pm_wakeup_event(&sw->dev, 0);
+}
+
static bool link_is_usb4(struct tb_port *port)
{
u32 val;
@@ -229,6 +269,8 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0;
int ret;
+ usb4_switch_check_wakes(sw);
+
if (!tb_route(sw))
return 0;
@@ -338,87 +380,103 @@ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
usb4_switch_drom_read_block, sw);
}
-static int usb4_set_port_configured(struct tb_port *port, bool configured)
+/**
+ * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
+ * @sw: USB4 router
+ *
+ * Checks whether conditions are met so that lane bonding can be
+ * established with the upstream router. Call only for device routers.
+ */
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
{
+ struct tb_port *up;
int ret;
u32 val;
- ret = tb_port_read(port, &val, TB_CFG_PORT,
- port->cap_usb4 + PORT_CS_19, 1);
+ up = tb_upstream_port(sw);
+ ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
if (ret)
- return ret;
-
- if (configured)
- val |= PORT_CS_19_PC;
- else
- val &= ~PORT_CS_19_PC;
+ return false;
- return tb_port_write(port, &val, TB_CFG_PORT,
- port->cap_usb4 + PORT_CS_19, 1);
+ return !!(val & PORT_CS_18_BE);
}
/**
- * usb4_switch_configure_link() - Set upstream USB4 link configured
+ * usb4_switch_set_wake() - Enabled/disable wake
* @sw: USB4 router
+ * @flags: Wakeup flags (%0 to disable)
*
- * Sets the upstream USB4 link to be configured for power management
- * purposes.
+ * Enables/disables router to wake up from sleep.
*/
-int usb4_switch_configure_link(struct tb_switch *sw)
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
{
- struct tb_port *up;
+ struct tb_port *port;
+ u64 route = tb_route(sw);
+ u32 val;
+ int ret;
- if (!tb_route(sw))
- return 0;
+ /*
+ * Enable wakes coming from all USB4 downstream ports (from
+ * child routers). For device routers do this also for the
+ * upstream USB4 port.
+ */
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_null(port))
+ continue;
+ if (!route && tb_is_upstream_port(port))
+ continue;
+ if (!port->cap_usb4)
+ continue;
- up = tb_upstream_port(sw);
- return usb4_set_port_configured(up, true);
-}
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
-/**
- * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
- * @sw: USB4 router
- *
- * Reverse of usb4_switch_configure_link().
- */
-void usb4_switch_unconfigure_link(struct tb_switch *sw)
-{
- struct tb_port *up;
+ val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
- if (sw->is_unplugged || !tb_route(sw))
- return;
+ if (flags & TB_WAKE_ON_CONNECT)
+ val |= PORT_CS_19_WOC;
+ if (flags & TB_WAKE_ON_DISCONNECT)
+ val |= PORT_CS_19_WOD;
+ if (flags & TB_WAKE_ON_USB4)
+ val |= PORT_CS_19_WOU4;
- up = tb_upstream_port(sw);
- usb4_set_port_configured(up, false);
-}
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+ }
-/**
- * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
- * @sw: USB4 router
- *
- * Checks whether conditions are met so that lane bonding can be
- * established with the upstream router. Call only for device routers.
- */
-bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
-{
- struct tb_port *up;
- int ret;
- u32 val;
+ /*
+ * Enable wakes from PCIe and USB 3.x on this router. Only
+ * needed for device routers.
+ */
+ if (route) {
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
- up = tb_upstream_port(sw);
- ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
- if (ret)
- return false;
+ val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU);
+ if (flags & TB_WAKE_ON_USB3)
+ val |= ROUTER_CS_5_WOU;
+ if (flags & TB_WAKE_ON_PCIE)
+ val |= ROUTER_CS_5_WOP;
- return !!(val & PORT_CS_18_BE);
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
/**
* usb4_switch_set_sleep() - Prepare the router to enter sleep
* @sw: USB4 router
*
- * Enables wakes and sets sleep bit for the router. Returns when the
- * router sleep ready bit has been asserted.
+ * Sets sleep bit for the router. Returns when the router sleep ready
+ * bit has been asserted.
*/
int usb4_switch_set_sleep(struct tb_switch *sw)
{
@@ -795,6 +853,95 @@ int usb4_port_unlock(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
}
+static int usb4_port_set_configured(struct tb_port *port, bool configured)
+{
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ if (configured)
+ val |= PORT_CS_19_PC;
+ else
+ val &= ~PORT_CS_19_PC;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
+/**
+ * usb4_port_configure() - Set USB4 port configured
+ * @port: USB4 router
+ *
+ * Sets the USB4 link to be configured for power management purposes.
+ */
+int usb4_port_configure(struct tb_port *port)
+{
+ return usb4_port_set_configured(port, true);
+}
+
+/**
+ * usb4_port_unconfigure() - Set USB4 port unconfigured
+ * @port: USB4 router
+ *
+ * Sets the USB4 link to be unconfigured for power management purposes.
+ */
+void usb4_port_unconfigure(struct tb_port *port)
+{
+ usb4_port_set_configured(port, false);
+}
+
+static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
+{
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ if (configured)
+ val |= PORT_CS_19_PID;
+ else
+ val &= ~PORT_CS_19_PID;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
+/**
+ * usb4_port_configure_xdomain() - Configure port for XDomain
+ * @port: USB4 port connected to another host
+ *
+ * Marks the USB4 port as being connected to another host. Returns %0 in
+ * success and negative errno in failure.
+ */
+int usb4_port_configure_xdomain(struct tb_port *port)
+{
+ return usb4_set_xdomain_configured(port, true);
+}
+
+/**
+ * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
+ * @port: USB4 port that was connected to another host
+ *
+ * Clears USB4 port from being marked as XDomain.
+ */
+void usb4_port_unconfigure_xdomain(struct tb_port *port)
+{
+ usb4_set_xdomain_configured(port, false);
+}
+
static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
u32 value, int timeout_msec)
{
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 48907853732a..c00ad817042e 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -881,6 +881,7 @@ static void enumerate_services(struct tb_xdomain *xd)
id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
if (id < 0) {
+ kfree(svc->key);
kfree(svc);
break;
}
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index d1b27b0522a3..8d60e0ff67b4 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -81,6 +81,7 @@ config HVC_DCC
bool "ARM JTAG DCC console"
depends on ARM || ARM64
select HVC_DRIVER
+ select SERIAL_CORE_CONSOLE
help
This console uses the JTAG DCC on ARM to create a console under the HVC
driver. This console is used through a JTAG only on ARM. If you don't have
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 55105ac38f89..509d1042825a 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -1216,13 +1216,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
tty_wait_until_sent(tty, HVCS_CLOSE_WAIT);
- /*
- * This line is important because it tells hvcs_open that this
- * device needs to be re-configured the next time hvcs_open is
- * called.
- */
- tty->driver_data = NULL;
-
free_irq(irq, hvcsd);
return;
} else if (hvcsd->port.count < 0) {
@@ -1237,6 +1230,13 @@ static void hvcs_cleanup(struct tty_struct * tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
+ /*
+ * This line is important because it tells hvcs_open that this
+ * device needs to be re-configured the next time hvcs_open is
+ * called.
+ */
+ tty->driver_data = NULL;
+
tty_port_put(&hvcsd->port);
}
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index 6bbf35682d53..f5d3e68f5750 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -1006,9 +1006,9 @@ static int send_pending_packet(struct ipw_hardware *hw, int priority_limit)
/*
* Send and receive all queued packets.
*/
-static void ipwireless_do_tasklet(unsigned long hw_)
+static void ipwireless_do_tasklet(struct tasklet_struct *t)
{
- struct ipw_hardware *hw = (struct ipw_hardware *) hw_;
+ struct ipw_hardware *hw = from_tasklet(hw, t, tasklet);
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
@@ -1635,7 +1635,7 @@ struct ipw_hardware *ipwireless_hardware_create(void)
INIT_LIST_HEAD(&hw->rx_queue);
INIT_LIST_HEAD(&hw->rx_pool);
spin_lock_init(&hw->lock);
- tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw);
+ tasklet_setup(&hw->tasklet, ipwireless_do_tasklet);
INIT_WORK(&hw->work_rx, ipw_receive_data_work);
timer_setup(&hw->setup_timer, ipwireless_setup_timer, 0);
diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
index cf20616340a1..fe569f6294a2 100644
--- a/drivers/tty/ipwireless/network.c
+++ b/drivers/tty/ipwireless/network.c
@@ -117,7 +117,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
skb->len,
notify_packet_sent,
network);
- if (ret == -1) {
+ if (ret < 0) {
skb_pull(skb, 2);
return 0;
}
@@ -134,7 +134,7 @@ static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
notify_packet_sent,
network);
kfree(buf);
- if (ret == -1)
+ if (ret < 0)
return 0;
}
kfree_skb(skb);
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index fad3401e604d..23584769fc29 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -218,7 +218,7 @@ static int ipw_write(struct tty_struct *linux_tty,
ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
buf, count,
ipw_write_packet_sent_callback, tty);
- if (ret == -1) {
+ if (ret < 0) {
mutex_unlock(&tty->ipw_tty_mutex);
return 0;
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 35cf12147e39..25f3152089c2 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -76,10 +76,9 @@ module_param(debug, int, 0600);
/**
* struct gsm_mux_net - network interface
- * @struct gsm_dlci* dlci
*
* Created when net interface is initialized.
- **/
+ */
struct gsm_mux_net {
struct kref ref;
struct gsm_dlci *dlci;
@@ -222,11 +221,8 @@ struct gsm_mux {
u8 received_fcs;
u8 *txframe; /* TX framing buffer */
- /* Methods for the receiver side */
+ /* Method for the receiver side */
void (*receive)(struct gsm_mux *gsm, u8 ch);
- void (*error)(struct gsm_mux *gsm, u8 ch, u8 flag);
- /* And transmit side */
- int (*output)(struct gsm_mux *mux, u8 *data, int len);
/* Link Layer */
unsigned int mru;
@@ -366,6 +362,8 @@ static const u8 gsm_fcs8[256] = {
#define INIT_FCS 0xFF
#define GOOD_FCS 0xCF
+static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len);
+
/**
* gsm_fcs_add - update FCS
* @fcs: Current FCS
@@ -400,7 +398,7 @@ static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len)
/**
* gsm_read_ea - read a byte into an EA
* @val: variable holding value
- * c: byte going into the EA
+ * @c: byte going into the EA
*
* Processes one byte of an EA. Updates the passed variable
* and returns 1 if the EA is now completely read
@@ -514,8 +512,8 @@ static void gsm_print_packet(const char *hdr, int addr, int cr,
/**
* gsm_stuff_packet - bytestuff a packet
- * @ibuf: input
- * @obuf: output
+ * @input: input buffer
+ * @output: output buffer
* @len: length of input
*
* Expand a buffer by bytestuffing it. The worst case size change
@@ -587,7 +585,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
WARN_ON(1);
return;
}
- gsm->output(gsm, cbuf, len);
+ gsmld_output(gsm, cbuf, len);
gsm_print_packet("-->", addr, cr, control, NULL, 0);
}
@@ -687,7 +685,7 @@ static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
print_hex_dump_bytes("gsm_data_kick: ",
DUMP_PREFIX_OFFSET,
gsm->txframe, len);
- if (gsm->output(gsm, gsm->txframe, len) < 0)
+ if (gsmld_output(gsm, gsm->txframe, len) < 0)
break;
/* FIXME: Can eliminate one SOF in many more cases */
gsm->tx_bytes -= msg->len;
@@ -1305,7 +1303,7 @@ static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
/**
* gsm_control_retransmit - retransmit a control frame
- * @data: pointer to our gsm object
+ * @t: timer contained in our gsm object
*
* Called off the T2 timer expiry in order to retransmit control frames
* that have been lost in the system somewhere. The control_lock protects
@@ -1342,7 +1340,7 @@ static void gsm_control_retransmit(struct timer_list *t)
* @gsm: the GSM channel
* @command: command to send including CR bit
* @data: bytes of data (must be kmalloced)
- * @len: length of the block to send
+ * @clen: length of the block to send
*
* Queue and dispatch a control command. Only one command can be
* active at a time. In theory more can be outstanding but the matching
@@ -1454,7 +1452,7 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
/**
* gsm_dlci_t1 - T1 timer expiry
- * @dlci: DLCI that opened
+ * @t: timer contained in the DLCI that opened
*
* The T1 timer handles retransmits of control frames (essentially of
* SABM and DISC). We resend the command until the retry count runs out
@@ -1550,7 +1548,7 @@ static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
* gsm_dlci_data - data arrived
* @dlci: channel
* @data: block of bytes received
- * @len: length of received block
+ * @clen: length of received block
*
* A UI or UIH frame has arrived which contains data for a channel
* other than the control channel. If the relevant virtual tty is
@@ -1672,7 +1670,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
/**
* gsm_dlci_free - free DLCI
- * @dlci: DLCI to free
+ * @port: tty port for DLCI to free
*
* Free up a DLCI.
*
@@ -2128,7 +2126,6 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
gsm->receive = gsm0_receive;
else
gsm->receive = gsm1_receive;
- gsm->error = gsm_error;
spin_lock(&gsm_mux_lock);
for (i = 0; i < MAX_MUX; i++) {
@@ -2151,7 +2148,7 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
/**
* gsm_free_mux - free up a mux
- * @mux: mux to free
+ * @gsm: mux to free
*
* Dispose of allocated resources for a dead mux
*/
@@ -2164,7 +2161,7 @@ static void gsm_free_mux(struct gsm_mux *gsm)
/**
* gsm_free_muxr - free up a mux
- * @mux: mux to free
+ * @ref: kreference to the mux to free
*
* Dispose of allocated resources for a dead mux
*/
@@ -2378,7 +2375,6 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
int ret, i;
gsm->tty = tty_kref_get(tty);
- gsm->output = gsmld_output;
ret = gsm_activate_mux(gsm);
if (ret != 0)
tty_kref_put(gsm->tty);
@@ -2438,7 +2434,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
case TTY_BREAK:
case TTY_PARITY:
case TTY_FRAME:
- gsm->error(gsm, *dp, flags);
+ gsm_error(gsm, *dp, flags);
break;
default:
WARN_ONCE(1, "%s: unknown flag %d\n",
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 8e975cb29833..12557ee1edb6 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -123,13 +123,13 @@ struct n_hdlc_buf_list {
/**
* struct n_hdlc - per device instance data structure
- * @magic - magic value for structure
- * @tbusy - reentrancy flag for tx wakeup code
- * @woke_up - tx wakeup needs to be run again as it was called while @tbusy
- * @tx_buf_list - list of pending transmit frame buffers
- * @rx_buf_list - list of received frame buffers
- * @tx_free_buf_list - list unused transmit frame buffers
- * @rx_free_buf_list - list unused received frame buffers
+ * @magic: magic value for structure
+ * @tbusy: reentrancy flag for tx wakeup code
+ * @woke_up: tx wakeup needs to be run again as it was called while @tbusy
+ * @tx_buf_list: list of pending transmit frame buffers
+ * @rx_buf_list: list of received frame buffers
+ * @tx_free_buf_list: list unused transmit frame buffers
+ * @rx_free_buf_list: list unused received frame buffers
*/
struct n_hdlc {
int magic;
@@ -187,7 +187,7 @@ static void n_hdlc_free_buf_list(struct n_hdlc_buf_list *list)
/**
* n_hdlc_tty_close - line discipline close
- * @tty - pointer to tty info structure
+ * @tty: pointer to tty info structure
*
* Called when the line discipline is changed to something
* else, the tty is closed, or the tty detects a hangup.
@@ -218,7 +218,7 @@ static void n_hdlc_tty_close(struct tty_struct *tty)
/**
* n_hdlc_tty_open - called when line discipline changed to n_hdlc
- * @tty - pointer to tty info structure
+ * @tty: pointer to tty info structure
*
* Returns 0 if success, otherwise error code
*/
@@ -255,8 +255,8 @@ static int n_hdlc_tty_open(struct tty_struct *tty)
/**
* n_hdlc_send_frames - send frames on pending send buffer list
- * @n_hdlc - pointer to ldisc instance data
- * @tty - pointer to tty instance data
+ * @n_hdlc: pointer to ldisc instance data
+ * @tty: pointer to tty instance data
*
* Send frames on pending send buffer list until the driver does not accept a
* frame (busy) this function is called after adding a frame to the send buffer
@@ -335,7 +335,7 @@ check_again:
/**
* n_hdlc_tty_wakeup - Callback for transmit wakeup
- * @tty - pointer to associated tty instance data
+ * @tty: pointer to associated tty instance data
*
* Called when low level device driver can accept more send data.
*/
@@ -348,10 +348,10 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
/**
* n_hdlc_tty_receive - Called by tty driver when receive data is available
- * @tty - pointer to tty instance data
- * @data - pointer to received data
- * @flags - pointer to flags for data
- * @count - count of received data in bytes
+ * @tty: pointer to tty instance data
+ * @data: pointer to received data
+ * @flags: pointer to flags for data
+ * @count: count of received data in bytes
*
* Called by tty low level driver when receive data is available. Data is
* interpreted as one HDLC frame.
@@ -408,10 +408,10 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
/**
* n_hdlc_tty_read - Called to retrieve one frame of data (if available)
- * @tty - pointer to tty instance data
- * @file - pointer to open file object
- * @buf - pointer to returned data buffer
- * @nr - size of returned data buffer
+ * @tty: pointer to tty instance data
+ * @file: pointer to open file object
+ * @buf: pointer to returned data buffer
+ * @nr: size of returned data buffer
*
* Returns the number of bytes returned or error code.
*/
@@ -479,10 +479,10 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
/**
* n_hdlc_tty_write - write a single frame of data to device
- * @tty - pointer to associated tty device instance data
- * @file - pointer to file object data
- * @data - pointer to transmit data (one frame)
- * @count - size of transmit frame in bytes
+ * @tty: pointer to associated tty device instance data
+ * @file: pointer to file object data
+ * @data: pointer to transmit data (one frame)
+ * @count: size of transmit frame in bytes
*
* Returns the number of bytes written (or error code).
*/
@@ -546,10 +546,10 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
/**
* n_hdlc_tty_ioctl - process IOCTL system call for the tty device.
- * @tty - pointer to tty instance data
- * @file - pointer to open file object for device
- * @cmd - IOCTL command code
- * @arg - argument for IOCTL call (cmd dependent)
+ * @tty: pointer to tty instance data
+ * @file: pointer to open file object for device
+ * @cmd: IOCTL command code
+ * @arg: argument for IOCTL call (cmd dependent)
*
* Returns command dependent result.
*/
@@ -614,9 +614,9 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
/**
* n_hdlc_tty_poll - TTY callback for poll system call
- * @tty - pointer to tty instance data
- * @filp - pointer to open file object for device
- * @poll_table - wait queue for operations
+ * @tty: pointer to tty instance data
+ * @filp: pointer to open file object for device
+ * @wait: wait queue for operations
*
* Determine which operations (read/write) will not block and return info
* to caller.
@@ -703,8 +703,8 @@ static struct n_hdlc *n_hdlc_alloc(void)
/**
* n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
- * @buf_list - pointer to the buffer list
- * @buf - pointer to the buffer
+ * @buf_list: pointer to the buffer list
+ * @buf: pointer to the buffer
*/
static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
struct n_hdlc_buf *buf)
@@ -721,8 +721,8 @@ static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
/**
* n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
- * @buf_list - pointer to buffer list
- * @buf - pointer to buffer
+ * @buf_list: pointer to buffer list
+ * @buf: pointer to buffer
*/
static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
struct n_hdlc_buf *buf)
@@ -739,7 +739,7 @@ static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
/**
* n_hdlc_buf_get - remove and return an HDLC buffer from list
- * @buf_list - pointer to HDLC buffer list
+ * @buf_list: pointer to HDLC buffer list
*
* Remove and return an HDLC buffer from the head of the specified HDLC buffer
* list.
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 1794d84e7bf6..7e5e36315260 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -322,7 +322,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
/**
* reset_buffer_flags - reset buffer state
- * @tty: terminal to reset
+ * @ldata: line disc data to reset
*
* Reset the read buffer counters and clear the flags.
* Called from n_tty_open() and n_tty_flush_buffer().
@@ -906,7 +906,7 @@ static void echo_erase_tab(unsigned int num_chars, int after_tab,
/**
* echo_char_raw - echo a character raw
* @c: unicode byte to echo
- * @tty: terminal device
+ * @ldata: line disc data
*
* Echo user input back onto the screen. This must be called only when
* L_ECHO(tty) is true. Called from the driver receive_buf path.
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 00099a8439d2..23368cec7ee8 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -100,7 +100,7 @@ static void pty_unthrottle(struct tty_struct *tty)
* pty_write - write to a pty
* @tty: the tty we write from
* @buf: kernel buffer of data
- * @count: bytes to write
+ * @c: bytes to write
*
* Our "hardware" write method. Data is coming from the ldisc which
* may be in a non sleeping state. We simply throw this at the other
@@ -120,10 +120,10 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
spin_lock_irqsave(&to->port->lock, flags);
/* Stuff the data into the input queue of the other end */
c = tty_insert_flip_string(to->port, buf, c);
+ spin_unlock_irqrestore(&to->port->lock, flags);
/* And shovel */
if (c)
tty_flip_buffer_push(to->port);
- spin_unlock_irqrestore(&to->port->lock, flags);
}
return c;
}
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index 718e010fcb04..09baef4ccc39 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -50,25 +50,25 @@ static const char serial21285_name[] = "Footbridge UART";
static bool is_enabled(struct uart_port *port, int bit)
{
- unsigned long private_data = (unsigned long)port->private_data;
+ unsigned long *private_data = (unsigned long *)&port->private_data;
- if (test_bit(bit, &private_data))
+ if (test_bit(bit, private_data))
return true;
return false;
}
static void enable(struct uart_port *port, int bit)
{
- unsigned long private_data = (unsigned long)port->private_data;
+ unsigned long *private_data = (unsigned long *)&port->private_data;
- set_bit(bit, &private_data);
+ set_bit(bit, private_data);
}
static void disable(struct uart_port *port, int bit)
{
- unsigned long private_data = (unsigned long)port->private_data;
+ unsigned long *private_data = (unsigned long *)&port->private_data;
- clear_bit(bit, &private_data);
+ clear_bit(bit, private_data);
}
#define is_tx_enabled(port) is_enabled(port, tx_enabled_bit)
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index 12d03e678295..fd95860cd661 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -110,12 +110,8 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
/* get the clock - this also enables the HW */
data->clk = devm_clk_get(&pdev->dev, NULL);
- ret = PTR_ERR_OR_ZERO(data->clk);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "could not get clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(data->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->clk), "could not get clk\n");
/* get the interrupt */
ret = platform_get_irq(pdev, 0);
@@ -155,9 +151,7 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
/* register the port */
ret = serial8250_register_8250_port(&up);
if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "unable to register 8250 port - %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "unable to register 8250 port\n");
goto dis_clk;
}
data->line = ret;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 87f450b7c177..9e204f9b799a 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -373,39 +373,6 @@ static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios)
serial8250_do_set_ldisc(p, termios);
}
-static int dw8250_startup(struct uart_port *p)
-{
- struct dw8250_data *d = to_dw8250_data(p->private_data);
- int ret;
-
- /*
- * Some platforms may provide a reference clock shared between several
- * devices. In this case before using the serial port first we have to
- * make sure that any clock state change is known to the UART port at
- * least post factum.
- */
- if (d->clk) {
- ret = clk_notifier_register(d->clk, &d->clk_notifier);
- if (ret)
- dev_warn(p->dev, "Failed to set the clock notifier\n");
- }
-
- return serial8250_do_startup(p);
-}
-
-static void dw8250_shutdown(struct uart_port *p)
-{
- struct dw8250_data *d = to_dw8250_data(p->private_data);
-
- serial8250_do_shutdown(p);
-
- if (d->clk) {
- clk_notifier_unregister(d->clk, &d->clk_notifier);
-
- flush_work(&d->clk_work);
- }
-}
-
/*
* dw8250_fallback_dma_filter will prevent the UART from getting just any free
* channel on platforms that have DMA engines, but don't have any channels
@@ -501,8 +468,6 @@ static int dw8250_probe(struct platform_device *pdev)
p->serial_out = dw8250_serial_out;
p->set_ldisc = dw8250_set_ldisc;
p->set_termios = dw8250_set_termios;
- p->startup = dw8250_startup;
- p->shutdown = dw8250_shutdown;
p->membase = devm_ioremap(dev, regs->start, resource_size(regs));
if (!p->membase)
@@ -622,6 +587,19 @@ static int dw8250_probe(struct platform_device *pdev)
goto err_reset;
}
+ /*
+ * Some platforms may provide a reference clock shared between several
+ * devices. In this case any clock state change must be known to the
+ * UART port at least post factum.
+ */
+ if (data->clk) {
+ err = clk_notifier_register(data->clk, &data->clk_notifier);
+ if (err)
+ dev_warn(p->dev, "Failed to set the clock notifier\n");
+ else
+ queue_work(system_unbound_wq, &data->clk_work);
+ }
+
platform_set_drvdata(pdev, data);
pm_runtime_set_active(dev);
@@ -648,6 +626,12 @@ static int dw8250_remove(struct platform_device *pdev)
pm_runtime_get_sync(dev);
+ if (data->clk) {
+ clk_notifier_unregister(data->clk, &data->clk_notifier);
+
+ flush_work(&data->clk_work);
+ }
+
serial8250_unregister_port(data->data.line);
reset_control_assert(data->rst);
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 0d0c80905c58..fbcc90c31ca1 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -1,15 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/serial_reg.h>
-#include <linux/serial_8250.h>
-
-#include "8250.h"
-
/*
* Freescale 16550 UART "driver", Copyright (C) 2011 Paul Gortmaker.
+ * Copyright 2020 NXP
+ * Copyright 2020 Puresoftware Ltd.
*
* This isn't a full driver; it just provides an alternate IRQ
- * handler to deal with an errata. Everything else is just
- * using the bog standard 8250 support.
+ * handler to deal with an errata and provide ACPI wrapper.
+ * Everything else is just using the bog standard 8250 support.
*
* We follow code flow of serial8250_default_handle_irq() but add
* a check for a break and insert a dummy read on the Rx for the
@@ -20,6 +17,16 @@
* IRQ event to the next one.
*/
+#include <linux/acpi.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+
+#include "8250.h"
+
+struct fsl8250_data {
+ int line;
+};
+
int fsl8250_handle_irq(struct uart_port *port)
{
unsigned char lsr, orig_lsr;
@@ -71,7 +78,7 @@ int fsl8250_handle_irq(struct uart_port *port)
serial8250_modem_status(up);
- if (lsr & UART_LSR_THRE)
+ if ((lsr & UART_LSR_THRE) && (up->ier & UART_IER_THRI))
serial8250_tx_chars(up);
up->lsr_saved_flags = orig_lsr;
@@ -79,3 +86,90 @@ int fsl8250_handle_irq(struct uart_port *port)
return 1;
}
EXPORT_SYMBOL_GPL(fsl8250_handle_irq);
+
+#ifdef CONFIG_ACPI
+static int fsl8250_acpi_probe(struct platform_device *pdev)
+{
+ struct fsl8250_data *data;
+ struct uart_8250_port port8250;
+ struct device *dev = &pdev->dev;
+ struct resource *regs;
+
+ int ret, irq;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(dev, "no registers defined\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(dev, "cannot get irq\n");
+ return irq;
+ }
+
+ memset(&port8250, 0, sizeof(port8250));
+
+ ret = device_property_read_u32(dev, "clock-frequency",
+ &port8250.port.uartclk);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&port8250.port.lock);
+
+ port8250.port.mapbase = regs->start;
+ port8250.port.irq = irq;
+ port8250.port.handle_irq = fsl8250_handle_irq;
+ port8250.port.type = PORT_16550A;
+ port8250.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF
+ | UPF_FIXED_PORT | UPF_IOREMAP
+ | UPF_FIXED_TYPE;
+ port8250.port.dev = dev;
+ port8250.port.mapsize = resource_size(regs);
+ port8250.port.iotype = UPIO_MEM;
+ port8250.port.irqflags = IRQF_SHARED;
+
+ port8250.port.membase = devm_ioremap(dev, port8250.port.mapbase,
+ port8250.port.mapsize);
+ if (!port8250.port.membase)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->line = serial8250_register_8250_port(&port8250);
+ if (data->line < 0)
+ return data->line;
+
+ platform_set_drvdata(pdev, data);
+ return 0;
+}
+
+static int fsl8250_acpi_remove(struct platform_device *pdev)
+{
+ struct fsl8250_data *data = platform_get_drvdata(pdev);
+
+ serial8250_unregister_port(data->line);
+ return 0;
+}
+
+static const struct acpi_device_id fsl_8250_acpi_id[] = {
+ { "NXP0018", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, fsl_8250_acpi_id);
+
+static struct platform_driver fsl8250_platform_driver = {
+ .driver = {
+ .name = "fsl-16550-uart",
+ .acpi_match_table = ACPI_PTR(fsl_8250_acpi_id),
+ },
+ .probe = fsl8250_acpi_probe,
+ .remove = fsl8250_acpi_remove,
+};
+
+module_platform_driver(fsl8250_platform_driver);
+#endif
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index dde766fa465f..988bf6bcce42 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -259,22 +259,14 @@ static int ingenic_uart_probe(struct platform_device *pdev)
return -ENOMEM;
data->clk_module = devm_clk_get(&pdev->dev, "module");
- if (IS_ERR(data->clk_module)) {
- err = PTR_ERR(data->clk_module);
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "unable to get module clock: %d\n", err);
- return err;
- }
+ if (IS_ERR(data->clk_module))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->clk_module),
+ "unable to get module clock\n");
data->clk_baud = devm_clk_get(&pdev->dev, "baud");
- if (IS_ERR(data->clk_baud)) {
- err = PTR_ERR(data->clk_baud);
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "unable to get baud clock: %d\n", err);
- return err;
- }
+ if (IS_ERR(data->clk_baud))
+ return dev_err_probe(&pdev->dev, PTR_ERR(data->clk_baud),
+ "unable to get baud clock\n");
err = clk_prepare_enable(data->clk_module);
if (err) {
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 7b0dec14c8b8..fa876e2c13e5 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -317,7 +317,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
*/
baud = tty_termios_baud_rate(termios);
- serial8250_do_set_termios(port, termios, old);
+ serial8250_do_set_termios(port, termios, NULL);
tty_termios_encode_baud_rate(termios, baud, baud);
@@ -669,6 +669,7 @@ static int __init early_mtk8250_setup(struct earlycon_device *device,
return -ENODEV;
device->port.iotype = UPIO_MEM32;
+ device->port.regshift = 2;
return early_serial8250_setup(device, NULL);
}
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 55bb7b897d97..d5a513efb261 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1776,6 +1776,39 @@ pci_wch_ch38x_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
+
+#define CH384_XINT_ENABLE_REG 0xEB
+#define CH384_XINT_ENABLE_BIT 0x02
+
+static int pci_wch_ch38x_init(struct pci_dev *dev)
+{
+ int max_port;
+ unsigned long iobase;
+
+
+ switch (dev->device) {
+ case 0x3853: /* 8 ports */
+ max_port = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ iobase = pci_resource_start(dev, 0);
+ outb(CH384_XINT_ENABLE_BIT, iobase + CH384_XINT_ENABLE_REG);
+
+ return max_port;
+}
+
+static void pci_wch_ch38x_exit(struct pci_dev *dev)
+{
+ unsigned long iobase;
+
+ iobase = pci_resource_start(dev, 0);
+ outb(0x0, iobase + CH384_XINT_ENABLE_REG);
+}
+
+
static int
pci_sunix_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -1867,6 +1900,7 @@ pci_moxa_setup(struct serial_private *priv,
#define PCIE_VENDOR_ID_WCH 0x1c00
#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
+#define PCIE_DEVICE_ID_WCH_CH384_8S 0x3853
#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
#define PCI_VENDOR_ID_ACCESIO 0x494f
@@ -2642,6 +2676,16 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch38x_setup,
},
+ /* WCH CH384 8S card (16850 clone) */
+ {
+ .vendor = PCIE_VENDOR_ID_WCH,
+ .device = PCIE_DEVICE_ID_WCH_CH384_8S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_wch_ch38x_init,
+ .exit = pci_wch_ch38x_exit,
+ .setup = pci_wch_ch38x_setup,
+ },
/*
* ASIX devices with FIFO bug
*/
@@ -2751,15 +2795,6 @@ static struct pci_serial_quirk *find_quirk(struct pci_dev *dev)
return quirk;
}
-static inline int get_pci_irq(struct pci_dev *dev,
- const struct pciserial_board *board)
-{
- if (board->flags & FL_NOIRQ)
- return 0;
- else
- return dev->irq;
-}
-
/*
* This is the configuration table for all of the PCI serial boards
* which we support. It is directly indexed by the pci_board_num_t enum
@@ -2913,6 +2948,7 @@ enum pci_board_num_t {
pbn_fintek_F81512A,
pbn_wch382_2,
pbn_wch384_4,
+ pbn_wch384_8,
pbn_pericom_PI7C9X7951,
pbn_pericom_PI7C9X7952,
pbn_pericom_PI7C9X7954,
@@ -3650,6 +3686,13 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 8,
.first_offset = 0xC0,
},
+ [pbn_wch384_8] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ .first_offset = 0x00,
+ },
/*
* Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
*/
@@ -5566,6 +5609,9 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch384_4 },
+ { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_8S,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_wch384_8 },
/*
* Realtek RealManage
*/
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index c71d647eb87a..b0af13074cd3 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2653,6 +2653,10 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
goto out_lock;
port->uartclk = uartclk;
+
+ if (!tty_port_initialized(&port->state->port))
+ goto out_lock;
+
termios = &port->state->port.tty->termios;
baud = serial8250_get_baud_rate(port, termios, NULL);
@@ -2665,7 +2669,6 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
serial8250_set_divisor(port, baud, quot, frac);
serial_port_out(port, UART_LCR, up->lcr);
- serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
spin_unlock_irqrestore(&port->lock, flags);
serial8250_rpm_put(up);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 9409be982aa6..28f22e58639c 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -8,6 +8,7 @@ menu "Serial drivers"
config SERIAL_EARLYCON
bool
+ depends on SERIAL_CORE
help
Support for early consoles with the earlycon parameter. This enables
the console before standard serial driver is probed. The console is
@@ -235,7 +236,7 @@ config SERIAL_CLPS711X_CONSOLE
config SERIAL_SAMSUNG
tristate "Samsung SoC serial support"
- depends on PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST
+ depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
select SERIAL_CORE
help
Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
@@ -520,6 +521,8 @@ config SERIAL_IMX_EARLYCON
depends on ARCH_MXC || COMPILE_TEST
depends on OF
select SERIAL_EARLYCON
+ select SERIAL_CORE_CONSOLE
+ default y if SERIAL_IMX_CONSOLE
help
If you have enabled the earlycon on the Freescale IMX
CPU you can make it the earlycon by answering Y to this option.
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 67498594d7d7..87dc3fc15694 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -308,8 +308,9 @@ static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
*/
static int pl011_fifo_to_tty(struct uart_amba_port *uap)
{
- u16 status;
unsigned int ch, flag, fifotaken;
+ int sysrq;
+ u16 status;
for (fifotaken = 0; fifotaken != 256; fifotaken++) {
status = pl011_read(uap, REG_FR);
@@ -344,10 +345,12 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
flag = TTY_FRAME;
}
- if (uart_handle_sysrq_char(&uap->port, ch & 255))
- continue;
+ spin_unlock(&uap->port.lock);
+ sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
+ spin_lock(&uap->port.lock);
- uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
+ if (!sysrq)
+ uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
}
return fifotaken;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index bb5fc8bdd57a..a24e5c2b30bc 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1722,10 +1722,11 @@ static int atmel_prepare_rx_pdc(struct uart_port *port)
/*
* tasklet handling tty stuff outside the interrupt handler.
*/
-static void atmel_tasklet_rx_func(unsigned long data)
+static void atmel_tasklet_rx_func(struct tasklet_struct *t)
{
- struct uart_port *port = (struct uart_port *)data;
- struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
+ tasklet_rx);
+ struct uart_port *port = &atmel_port->uart;
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
@@ -1733,10 +1734,11 @@ static void atmel_tasklet_rx_func(unsigned long data)
spin_unlock(&port->lock);
}
-static void atmel_tasklet_tx_func(unsigned long data)
+static void atmel_tasklet_tx_func(struct tasklet_struct *t)
{
- struct uart_port *port = (struct uart_port *)data;
- struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct atmel_uart_port *atmel_port = from_tasklet(atmel_port, t,
+ tasklet_tx);
+ struct uart_port *port = &atmel_port->uart;
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
@@ -1911,10 +1913,8 @@ static int atmel_startup(struct uart_port *port)
}
atomic_set(&atmel_port->tasklet_shutdown, 0);
- tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func,
- (unsigned long)port);
- tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func,
- (unsigned long)port);
+ tasklet_setup(&atmel_port->tasklet_rx, atmel_tasklet_rx_func);
+ tasklet_setup(&atmel_port->tasklet_tx, atmel_tasklet_tx_func);
/*
* Initialize DMA (if necessary)
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 2ae9190b64bb..b70877932d47 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -56,7 +56,6 @@ static void __init earlycon_init(struct earlycon_device *device,
const char *name)
{
struct console *earlycon = device->con;
- struct uart_port *port = &device->port;
const char *s;
size_t len;
@@ -70,6 +69,12 @@ static void __init earlycon_init(struct earlycon_device *device,
len = s - name;
strlcpy(earlycon->name, name, min(len + 1, sizeof(earlycon->name)));
earlycon->data = &early_console_dev;
+}
+
+static void __init earlycon_print_info(struct earlycon_device *device)
+{
+ struct console *earlycon = device->con;
+ struct uart_port *port = &device->port;
if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM16 ||
port->iotype == UPIO_MEM32 || port->iotype == UPIO_MEM32BE)
@@ -140,6 +145,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
earlycon_init(&early_console_dev, match->name);
err = match->setup(&early_console_dev, buf);
+ earlycon_print_info(&early_console_dev);
if (err < 0)
return err;
if (!early_console_dev.con->write)
@@ -302,6 +308,7 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
}
earlycon_init(&early_console_dev, match->name);
err = match->setup(&early_console_dev, options);
+ earlycon_print_info(&early_console_dev);
if (err < 0)
return err;
if (!early_console_dev.con->write)
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 7ca642249224..bd047e1f9bea 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -314,9 +314,10 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
/* Forward declare this for the dma callbacks*/
static void lpuart_dma_tx_complete(void *arg);
-static inline bool is_ls1028a_lpuart(struct lpuart_port *sport)
+static inline bool is_layerscape_lpuart(struct lpuart_port *sport)
{
- return sport->devtype == LS1028A_LPUART;
+ return (sport->devtype == LS1021A_LPUART ||
+ sport->devtype == LS1028A_LPUART);
}
static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport)
@@ -649,26 +650,24 @@ static int lpuart32_poll_init(struct uart_port *port)
spin_lock_irqsave(&sport->port.lock, flags);
/* Disable Rx & Tx */
- lpuart32_write(&sport->port, UARTCTRL, 0);
+ lpuart32_write(&sport->port, 0, UARTCTRL);
temp = lpuart32_read(&sport->port, UARTFIFO);
/* Enable Rx and Tx FIFO */
- lpuart32_write(&sport->port, UARTFIFO,
- temp | UARTFIFO_RXFE | UARTFIFO_TXFE);
+ lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
/* flush Tx and Rx FIFO */
- lpuart32_write(&sport->port, UARTFIFO,
- UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH);
+ lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
/* explicitly clear RDRF */
if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
lpuart32_read(&sport->port, UARTDATA);
- lpuart32_write(&sport->port, UARTFIFO, UARTFIFO_RXUF);
+ lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
}
/* Enable Rx and Tx */
- lpuart32_write(&sport->port, UARTCTRL, UARTCTRL_RE | UARTCTRL_TE);
+ lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
spin_unlock_irqrestore(&sport->port.lock, flags);
return 0;
@@ -677,12 +676,12 @@ static int lpuart32_poll_init(struct uart_port *port)
static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
{
lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TDRE);
- lpuart32_write(port, UARTDATA, c);
+ lpuart32_write(port, c, UARTDATA);
}
static int lpuart32_poll_get_char(struct uart_port *port)
{
- if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF))
+ if (!(lpuart32_read(port, UARTWATER) >> UARTWATER_RXCNT_OFF))
return NO_POLL_CHAR;
return lpuart32_read(port, UARTDATA);
@@ -978,6 +977,15 @@ static irqreturn_t lpuart_int(int irq, void *dev_id)
sts = readb(sport->port.membase + UARTSR1);
+ /* SysRq, using dma, check for linebreak by framing err. */
+ if (sts & UARTSR1_FE && sport->lpuart_dma_rx_use) {
+ readb(sport->port.membase + UARTDR);
+ uart_handle_break(&sport->port);
+ /* linebreak produces some garbage, removing it */
+ writeb(UARTCFIFO_RXFLUSH, sport->port.membase + UARTCFIFO);
+ return IRQ_HANDLED;
+ }
+
if (sts & UARTSR1_RDRF && !sport->lpuart_dma_rx_use)
lpuart_rxint(sport);
@@ -1006,6 +1014,37 @@ static irqreturn_t lpuart32_int(int irq, void *dev_id)
return IRQ_HANDLED;
}
+
+static inline void lpuart_handle_sysrq_chars(struct uart_port *port,
+ unsigned char *p, int count)
+{
+ while (count--) {
+ if (*p && uart_handle_sysrq_char(port, *p))
+ return;
+ p++;
+ }
+}
+
+static void lpuart_handle_sysrq(struct lpuart_port *sport)
+{
+ struct circ_buf *ring = &sport->rx_ring;
+ int count;
+
+ if (ring->head < ring->tail) {
+ count = sport->rx_sgl.length - ring->tail;
+ lpuart_handle_sysrq_chars(&sport->port,
+ ring->buf + ring->tail, count);
+ ring->tail = 0;
+ }
+
+ if (ring->head > ring->tail) {
+ count = ring->head - ring->tail;
+ lpuart_handle_sysrq_chars(&sport->port,
+ ring->buf + ring->tail, count);
+ ring->tail = ring->head;
+ }
+}
+
static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
{
struct tty_port *port = &sport->port.state->port;
@@ -1092,6 +1131,15 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
*/
ring->head = sport->rx_sgl.length - state.residue;
BUG_ON(ring->head > sport->rx_sgl.length);
+
+ /*
+ * Silent handling of keys pressed in the sysrq timeframe
+ */
+ if (sport->port.sysrq) {
+ lpuart_handle_sysrq(sport);
+ goto exit;
+ }
+
/*
* At this point ring->head may point to the first byte right after the
* last byte of the dma buffer:
@@ -1123,6 +1171,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
sport->port.icount.rx += count;
}
+exit:
dma_sync_sg_for_device(chan->device->dev, &sport->rx_sgl, 1,
DMA_FROM_DEVICE);
@@ -1260,7 +1309,7 @@ static int lpuart_config_rs485(struct uart_port *port,
modem |= UARTMODEM_TXRTSE;
/*
- * RTS needs to be logic HIGH either during transer _or_ after
+ * RTS needs to be logic HIGH either during transfer _or_ after
* transfer, other variants are not supported by the hardware.
*/
@@ -1311,7 +1360,7 @@ static int lpuart32_config_rs485(struct uart_port *port,
modem |= UARTMODEM_TXRTSE;
/*
- * RTS needs to be logic HIGH either during transer _or_ after
+ * RTS needs to be logic HIGH either during transfer _or_ after
* transfer, other variants are not supported by the hardware.
*/
@@ -1559,6 +1608,7 @@ err:
static void lpuart_rx_dma_startup(struct lpuart_port *sport)
{
int ret;
+ unsigned char cr3;
if (!sport->dma_rx_chan)
goto err;
@@ -1575,6 +1625,12 @@ static void lpuart_rx_dma_startup(struct lpuart_port *sport)
sport->lpuart_dma_rx_use = true;
rx_dma_timer_init(sport);
+ if (sport->port.has_sysrq) {
+ cr3 = readb(sport->port.membase + UARTCR3);
+ cr3 |= UARTCR3_FEIE;
+ writeb(cr3, sport->port.membase + UARTCR3);
+ }
+
return;
err:
@@ -1646,11 +1702,11 @@ static int lpuart32_startup(struct uart_port *port)
UARTFIFO_FIFOSIZE_MASK);
/*
- * The LS1028A has a fixed length of 16 words. Although it supports the
- * RX/TXSIZE fields their encoding is different. Eg the reference manual
- * states 0b101 is 16 words.
+ * The LS1021A and LS1028A have a fixed FIFO depth of 16 words.
+ * Although they support the RX/TXSIZE fields, their encoding is
+ * different. Eg the reference manual states 0b101 is 16 words.
*/
- if (is_ls1028a_lpuart(sport)) {
+ if (is_layerscape_lpuart(sport)) {
sport->rxfifo_size = 16;
sport->txfifo_size = 16;
sport->port.fifosize = sport->txfifo_size;
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 624f3d541c68..94c8281ddb5f 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -138,24 +138,24 @@ static void free_port_memory(struct icom_port *icom_port)
trace(icom_port, "RET_PORT_MEM", 0);
if (icom_port->recv_buf) {
- pci_free_consistent(dev, 4096, icom_port->recv_buf,
- icom_port->recv_buf_pci);
+ dma_free_coherent(&dev->dev, 4096, icom_port->recv_buf,
+ icom_port->recv_buf_pci);
icom_port->recv_buf = NULL;
}
if (icom_port->xmit_buf) {
- pci_free_consistent(dev, 4096, icom_port->xmit_buf,
- icom_port->xmit_buf_pci);
+ dma_free_coherent(&dev->dev, 4096, icom_port->xmit_buf,
+ icom_port->xmit_buf_pci);
icom_port->xmit_buf = NULL;
}
if (icom_port->statStg) {
- pci_free_consistent(dev, 4096, icom_port->statStg,
- icom_port->statStg_pci);
+ dma_free_coherent(&dev->dev, 4096, icom_port->statStg,
+ icom_port->statStg_pci);
icom_port->statStg = NULL;
}
if (icom_port->xmitRestart) {
- pci_free_consistent(dev, 4096, icom_port->xmitRestart,
- icom_port->xmitRestart_pci);
+ dma_free_coherent(&dev->dev, 4096, icom_port->xmitRestart,
+ icom_port->xmitRestart_pci);
icom_port->xmitRestart = NULL;
}
}
@@ -169,7 +169,8 @@ static int get_port_memory(struct icom_port *icom_port)
struct pci_dev *dev = icom_port->adapter->pci_dev;
icom_port->xmit_buf =
- pci_alloc_consistent(dev, 4096, &icom_port->xmit_buf_pci);
+ dma_alloc_coherent(&dev->dev, 4096, &icom_port->xmit_buf_pci,
+ GFP_KERNEL);
if (!icom_port->xmit_buf) {
dev_err(&dev->dev, "Can not allocate Transmit buffer\n");
return -ENOMEM;
@@ -179,7 +180,8 @@ static int get_port_memory(struct icom_port *icom_port)
(unsigned long) icom_port->xmit_buf);
icom_port->recv_buf =
- pci_alloc_consistent(dev, 4096, &icom_port->recv_buf_pci);
+ dma_alloc_coherent(&dev->dev, 4096, &icom_port->recv_buf_pci,
+ GFP_KERNEL);
if (!icom_port->recv_buf) {
dev_err(&dev->dev, "Can not allocate Receive buffer\n");
free_port_memory(icom_port);
@@ -189,7 +191,8 @@ static int get_port_memory(struct icom_port *icom_port)
(unsigned long) icom_port->recv_buf);
icom_port->statStg =
- pci_alloc_consistent(dev, 4096, &icom_port->statStg_pci);
+ dma_alloc_coherent(&dev->dev, 4096, &icom_port->statStg_pci,
+ GFP_KERNEL);
if (!icom_port->statStg) {
dev_err(&dev->dev, "Can not allocate Status buffer\n");
free_port_memory(icom_port);
@@ -199,7 +202,8 @@ static int get_port_memory(struct icom_port *icom_port)
(unsigned long) icom_port->statStg);
icom_port->xmitRestart =
- pci_alloc_consistent(dev, 4096, &icom_port->xmitRestart_pci);
+ dma_alloc_coherent(&dev->dev, 4096, &icom_port->xmitRestart_pci,
+ GFP_KERNEL);
if (!icom_port->xmitRestart) {
dev_err(&dev->dev,
"Can not allocate xmit Restart buffer\n");
@@ -414,7 +418,7 @@ static void load_code(struct icom_port *icom_port)
/*Set up data in icom DRAM to indicate where personality
*code is located and its length.
*/
- new_page = pci_alloc_consistent(dev, 4096, &temp_pci);
+ new_page = dma_alloc_coherent(&dev->dev, 4096, &temp_pci, GFP_KERNEL);
if (!new_page) {
dev_err(&dev->dev, "Can not allocate DMA buffer\n");
@@ -494,7 +498,7 @@ static void load_code(struct icom_port *icom_port)
}
if (new_page != NULL)
- pci_free_consistent(dev, 4096, new_page, temp_pci);
+ dma_free_coherent(&dev->dev, 4096, new_page, temp_pci);
}
static int startup(struct icom_port *icom_port)
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 7d16fe41932f..21d519c804cb 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -257,7 +257,7 @@ static void mrdy_assert(struct ifx_spi_device *ifx_dev)
/**
* ifx_spi_timeout - SPI timeout
- * @arg: our SPI device
+ * @t: timer in our SPI device
*
* The SPI has timed out: hang up the tty. Users will then see a hangup
* and error events.
@@ -277,7 +277,6 @@ static void ifx_spi_timeout(struct timer_list *t)
/**
* ifx_spi_tiocmget - get modem lines
* @tty: our tty device
- * @filp: file handle issuing the request
*
* Map the signal state into Linux modem flags and report the value
* in Linux terms
@@ -531,7 +530,7 @@ static int ifx_spi_chars_in_buffer(struct tty_struct *tty)
/**
* ifx_port_hangup
- * @port: our tty port
+ * @tty: our tty
*
* tty port hang up. Called when tty_hangup processing is invoked either
* by loss of carrier, or by software (eg vhangup). Serialized against
@@ -611,7 +610,7 @@ static const struct tty_operations ifx_spi_serial_ops = {
/**
* ifx_spi_insert_fip_string - queue received data
- * @ifx_ser: our SPI device
+ * @ifx_dev: our SPI device
* @chars: buffer we have received
* @size: number of chars reeived
*
@@ -725,10 +724,11 @@ complete_exit:
* Queue data for transmission if possible and then kick off the
* transfer.
*/
-static void ifx_spi_io(unsigned long data)
+static void ifx_spi_io(struct tasklet_struct *t)
{
int retval;
- struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *) data;
+ struct ifx_spi_device *ifx_dev = from_tasklet(ifx_dev, t,
+ io_work_tasklet);
if (!test_and_set_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags) &&
test_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags)) {
@@ -1067,8 +1067,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
init_waitqueue_head(&ifx_dev->mdm_reset_wait);
spi_set_drvdata(spi, ifx_dev);
- tasklet_init(&ifx_dev->io_work_tasklet, ifx_spi_io,
- (unsigned long)ifx_dev);
+ tasklet_setup(&ifx_dev->io_work_tasklet, ifx_spi_io);
set_bit(IFX_SPI_STATE_PRESENT, &ifx_dev->flags);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index ce8c472cf385..1731d9728865 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1552,10 +1552,6 @@ static void imx_uart_shutdown(struct uart_port *port)
ucr2 = imx_uart_readl(sport, UCR2);
ucr2 &= ~(UCR2_TXEN | UCR2_ATEN);
imx_uart_writel(sport, ucr2, UCR2);
-
- ucr4 = imx_uart_readl(sport, UCR4);
- ucr4 &= ~UCR4_OREN;
- imx_uart_writel(sport, ucr4, UCR4);
spin_unlock_irqrestore(&sport->port.lock, flags);
/*
@@ -1568,10 +1564,15 @@ static void imx_uart_shutdown(struct uart_port *port)
*/
spin_lock_irqsave(&sport->port.lock, flags);
+
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN | UCR1_RXDMAEN | UCR1_ATDMAEN);
-
imx_uart_writel(sport, ucr1, UCR1);
+
+ ucr4 = imx_uart_readl(sport, UCR4);
+ ucr4 &= ~(UCR4_OREN | UCR4_TCEN);
+ imx_uart_writel(sport, ucr4, UCR4);
+
spin_unlock_irqrestore(&sport->port.lock, flags);
clk_disable_unprepare(sport->clk_per);
@@ -2389,8 +2390,7 @@ static int imx_uart_probe(struct platform_device *pdev)
/* Disable interrupts before requesting them */
ucr1 = imx_uart_readl(sport, UCR1);
- ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN |
- UCR1_TRDYEN | UCR1_RTSDEN);
+ ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN);
imx_uart_writel(sport, ucr1, UCR1);
if (!imx_uart_is_imx1(sport) && sport->dte_mode) {
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 8434bd5a8ec7..21130af106bb 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1056,9 +1056,9 @@ static int max310x_startup(struct uart_port *port)
max310x_port_update(port, MAX310X_MODE1_REG,
MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
- /* Configure MODE2 register & Reset FIFOs*/
- val = MAX310X_MODE2_RXEMPTINV_BIT | MAX310X_MODE2_FIFORST_BIT;
- max310x_port_write(port, MAX310X_MODE2_REG, val);
+ /* Reset FIFOs */
+ max310x_port_write(port, MAX310X_MODE2_REG,
+ MAX310X_MODE2_FIFORST_BIT);
max310x_port_update(port, MAX310X_MODE2_REG,
MAX310X_MODE2_FIFORST_BIT, 0);
@@ -1086,8 +1086,27 @@ static int max310x_startup(struct uart_port *port)
/* Clear IRQ status register */
max310x_port_read(port, MAX310X_IRQSTS_REG);
- /* Enable RX, TX, CTS change interrupts */
- val = MAX310X_IRQ_RXEMPTY_BIT | MAX310X_IRQ_TXEMPTY_BIT;
+ /*
+ * Let's ask for an interrupt after a timeout equivalent to
+ * the receiving time of 4 characters after the last character
+ * has been received.
+ */
+ max310x_port_write(port, MAX310X_RXTO_REG, 4);
+
+ /*
+ * Make sure we also get RX interrupts when the RX FIFO is
+ * filling up quickly, so get an interrupt when half of the RX
+ * FIFO has been filled in.
+ */
+ max310x_port_write(port, MAX310X_FIFOTRIGLVL_REG,
+ MAX310X_FIFOTRIGLVL_RX(MAX310X_FIFO_SIZE / 2));
+
+ /* Enable RX timeout interrupt in LSR */
+ max310x_port_write(port, MAX310X_LSR_IRQEN_REG,
+ MAX310X_LSR_RXTO_BIT);
+
+ /* Enable LSR, RX FIFO trigger, CTS change interrupts */
+ val = MAX310X_IRQ_LSR_BIT | MAX310X_IRQ_RXFIFO_BIT | MAX310X_IRQ_TXEMPTY_BIT;
max310x_port_write(port, MAX310X_IRQEN_REG, val | MAX310X_IRQ_CTS_BIT);
return 0;
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 7dbfb4cde124..09c88c48fb7b 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -632,6 +632,7 @@ static int mcf_probe(struct platform_device *pdev)
port->ops = &mcf_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->rs485_config = mcf_config_rs485;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MCF_CONSOLE);
uart_add_one_port(&mcf_driver, port);
}
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index 4f53a4caabf6..9acae5f8fc32 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -173,7 +173,7 @@ static void men_z135_reg_clr(struct men_z135_port *uart,
/**
* men_z135_handle_modem_status() - Handle change of modem status
- * @port: The UART port
+ * @uart: The UART port
*
* Handle change of modem status register. This is done by reading the "delta"
* versions of DCD (Data Carrier Detect) and CTS (Clear To Send).
@@ -236,7 +236,7 @@ static u16 get_rx_fifo_content(struct men_z135_port *uart)
/**
* men_z135_handle_rx() - RX tasklet routine
- * @arg: Pointer to struct men_z135_port
+ * @uart: Pointer to struct men_z135_port
*
* Copy from RX FIFO and acknowledge number of bytes copied.
*/
@@ -287,7 +287,7 @@ static void men_z135_handle_rx(struct men_z135_port *uart)
/**
* men_z135_handle_tx() - TX tasklet routine
- * @arg: Pointer to struct men_z135_port
+ * @uart: Pointer to struct men_z135_port
*
*/
static void men_z135_handle_tx(struct men_z135_port *uart)
@@ -596,7 +596,7 @@ static void men_z135_stop_rx(struct uart_port *port)
/**
* men_z135_enable_ms() - Enable Modem Status
- * port:
+ * @port: the port
*
* Enable Modem Status IRQ.
*/
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 4e9a590712cb..118b29912289 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -803,7 +803,7 @@ static int mvebu_uart_probe(struct platform_device *pdev)
&pdev->dev);
struct uart_port *port;
struct mvebu_uart *mvuart;
- int ret, id, irq;
+ int id, irq;
if (!reg) {
dev_err(&pdev->dev, "no registers defined\n");
@@ -912,10 +912,7 @@ static int mvebu_uart_probe(struct platform_device *pdev)
udelay(1);
writel(0, port->membase + UART_CTRL(port));
- ret = uart_add_one_port(&mvebu_uart_driver, port);
- if (ret)
- return ret;
- return 0;
+ return uart_add_one_port(&mvebu_uart_driver, port);
}
static struct mvebu_uart_driver_data uart_std_driver_data = {
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 67aca8cb9cd4..a7363bc66c11 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -981,7 +981,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
priv->tx_dma_use = 1;
- priv->sg_tx_p = kcalloc(num, sizeof(struct scatterlist), GFP_ATOMIC);
+ priv->sg_tx_p = kmalloc_array(num, sizeof(struct scatterlist), GFP_ATOMIC);
if (!priv->sg_tx_p) {
dev_err(priv->port.dev, "%s:kzalloc Failed\n", __func__);
return 0;
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 96e7aa479961..063484b22523 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1644,7 +1644,7 @@ static int __init pmz_probe(void)
* TODO: Add routines with proper locking to do that...
*/
node_a = node_b = NULL;
- for (np = NULL; (np = of_get_next_child(node_p, np)) != NULL;) {
+ for_each_child_of_node(node_p, np) {
if (of_node_name_prefix(np, "ch-a"))
node_a = of_node_get(np);
else if (of_node_name_prefix(np, "ch-b"))
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 184b458820a3..291649f02821 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -242,7 +242,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
if (mctrl & TIOCM_LOOP)
port->loopback = RX_TX_CTS_RTS_SORTED;
- if (!(mctrl & TIOCM_RTS))
+ if (!(mctrl & TIOCM_RTS) && !uport->suspended)
uart_manual_rfr = UART_MANUAL_RFR_EN | UART_RFR_NOT_READY;
writel(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
}
@@ -1000,7 +1000,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
sampling_rate = UART_OVERSAMPLING;
/* Sampling rate is halved for IP versions >= 2.5 */
ver = geni_se_get_qup_hw_version(&port->se);
- if (GENI_SE_VERSION_MAJOR(ver) >= 2 && GENI_SE_VERSION_MINOR(ver) >= 5)
+ if (ver >= QUP_SE_VERSION_2_5)
sampling_rate /= 2;
clk_rate = get_clk_div_rate(baud, sampling_rate, &clk_div);
@@ -1107,7 +1107,7 @@ static int qcom_geni_console_setup(struct console *co, char *options)
{
struct uart_port *uport;
struct qcom_geni_serial_port *port;
- int baud = 9600;
+ int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
@@ -1438,11 +1438,9 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
return PTR_ERR(port->se.opp_table);
/* OPP table is optional */
ret = dev_pm_opp_of_add_table(&pdev->dev);
- if (!ret) {
- port->se.has_opp_table = true;
- } else if (ret != -ENODEV) {
+ if (ret && ret != -ENODEV) {
dev_err(&pdev->dev, "invalid OPP table in device tree\n");
- return ret;
+ goto put_clkname;
}
port->private_data.drv = drv;
@@ -1483,8 +1481,8 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
return 0;
err:
- if (port->se.has_opp_table)
- dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_of_remove_table(&pdev->dev);
+put_clkname:
dev_pm_opp_put_clkname(port->se.opp_table);
return ret;
}
@@ -1494,8 +1492,7 @@ static int qcom_geni_serial_remove(struct platform_device *pdev)
struct qcom_geni_serial_port *port = platform_get_drvdata(pdev);
struct uart_driver *drv = port->private_data.drv;
- if (port->se.has_opp_table)
- dev_pm_opp_of_remove_table(&pdev->dev);
+ dev_pm_opp_of_remove_table(&pdev->dev);
dev_pm_opp_put_clkname(port->se.opp_table);
dev_pm_clear_wake_irq(&pdev->dev);
device_init_wakeup(&pdev->dev, false);
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 75c2a22895f9..f5fab1dd96bc 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -879,22 +879,20 @@ static int sa1100_serial_add_one_port(struct sa1100_port *sport, struct platform
static int sa1100_serial_probe(struct platform_device *dev)
{
- struct resource *res = dev->resource;
+ struct resource *res;
int i;
- for (i = 0; i < dev->num_resources; i++, res++)
- if (res->flags & IORESOURCE_MEM)
- break;
-
- if (i < dev->num_resources) {
- for (i = 0; i < NR_PORTS; i++) {
- if (sa1100_ports[i].port.mapbase != res->start)
- continue;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
- sa1100_serial_add_one_port(&sa1100_ports[i], dev);
+ for (i = 0; i < NR_PORTS; i++)
+ if (sa1100_ports[i].port.mapbase == res->start)
break;
- }
- }
+ if (i == NR_PORTS)
+ return -ENODEV;
+
+ sa1100_serial_add_one_port(&sa1100_ports[i], dev);
return 0;
}
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index bd5e7e9938ce..22c7bc90b104 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -35,7 +35,6 @@
#include <linux/refcount.h>
#include <asm/io.h>
-#include <asm/war.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_uart.h>
@@ -157,7 +156,7 @@ static unsigned char read_sbdchn(struct sbd_port *sport, int reg)
unsigned char retval;
retval = __read_sbdchn(sport, reg);
- if (SIBYTE_1956_WAR)
+ if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
__war_sbd1956(sport);
return retval;
}
@@ -167,7 +166,7 @@ static unsigned char read_sbdshr(struct sbd_port *sport, int reg)
unsigned char retval;
retval = __read_sbdshr(sport, reg);
- if (SIBYTE_1956_WAR)
+ if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
__war_sbd1956(sport);
return retval;
}
@@ -175,14 +174,14 @@ static unsigned char read_sbdshr(struct sbd_port *sport, int reg)
static void write_sbdchn(struct sbd_port *sport, int reg, unsigned int value)
{
__write_sbdchn(sport, reg, value);
- if (SIBYTE_1956_WAR)
+ if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
__war_sbd1956(sport);
}
static void write_sbdshr(struct sbd_port *sport, int reg, unsigned int value)
{
__write_sbdshr(sport, reg, value);
- if (SIBYTE_1956_WAR)
+ if (IS_ENABLED(CONFIG_SB1_PASS_2_WORKAROUNDS))
__war_sbd1956(sport);
}
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 809610b37c71..f86ec2d2635b 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1271,6 +1271,7 @@ static int sc16is7xx_probe(struct device *dev,
s->p[i].port.type = PORT_SC16IS7XX;
s->p[i].port.fifosize = SC16IS7XX_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
+ s->p[i].port.iobase = i;
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.uartclk = freq;
s->p[i].port.rs485_config = sc16is7xx_config_rs485;
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 124524ecfe26..f41cba10b86b 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2626,7 +2626,7 @@ static ssize_t uartclk_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "%d\n", tmp.baud_base * 16);
+ return sprintf(buf, "%d\n", tmp.baud_base * 16);
}
static ssize_t type_show(struct device *dev,
@@ -2636,7 +2636,7 @@ static ssize_t type_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "%d\n", tmp.type);
+ return sprintf(buf, "%d\n", tmp.type);
}
static ssize_t line_show(struct device *dev,
@@ -2646,7 +2646,7 @@ static ssize_t line_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "%d\n", tmp.line);
+ return sprintf(buf, "%d\n", tmp.line);
}
static ssize_t port_show(struct device *dev,
@@ -2660,7 +2660,7 @@ static ssize_t port_show(struct device *dev,
ioaddr = tmp.port;
if (HIGH_BITS_OFFSET)
ioaddr |= (unsigned long)tmp.port_high << HIGH_BITS_OFFSET;
- return snprintf(buf, PAGE_SIZE, "0x%lX\n", ioaddr);
+ return sprintf(buf, "0x%lX\n", ioaddr);
}
static ssize_t irq_show(struct device *dev,
@@ -2670,7 +2670,7 @@ static ssize_t irq_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "%d\n", tmp.irq);
+ return sprintf(buf, "%d\n", tmp.irq);
}
static ssize_t flags_show(struct device *dev,
@@ -2680,7 +2680,7 @@ static ssize_t flags_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "0x%X\n", tmp.flags);
+ return sprintf(buf, "0x%X\n", tmp.flags);
}
static ssize_t xmit_fifo_size_show(struct device *dev,
@@ -2690,7 +2690,7 @@ static ssize_t xmit_fifo_size_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "%d\n", tmp.xmit_fifo_size);
+ return sprintf(buf, "%d\n", tmp.xmit_fifo_size);
}
static ssize_t close_delay_show(struct device *dev,
@@ -2700,7 +2700,7 @@ static ssize_t close_delay_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "%d\n", tmp.close_delay);
+ return sprintf(buf, "%d\n", tmp.close_delay);
}
static ssize_t closing_wait_show(struct device *dev,
@@ -2710,7 +2710,7 @@ static ssize_t closing_wait_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "%d\n", tmp.closing_wait);
+ return sprintf(buf, "%d\n", tmp.closing_wait);
}
static ssize_t custom_divisor_show(struct device *dev,
@@ -2720,7 +2720,7 @@ static ssize_t custom_divisor_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "%d\n", tmp.custom_divisor);
+ return sprintf(buf, "%d\n", tmp.custom_divisor);
}
static ssize_t io_type_show(struct device *dev,
@@ -2730,7 +2730,7 @@ static ssize_t io_type_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "%d\n", tmp.io_type);
+ return sprintf(buf, "%d\n", tmp.io_type);
}
static ssize_t iomem_base_show(struct device *dev,
@@ -2740,7 +2740,7 @@ static ssize_t iomem_base_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "0x%lX\n", (unsigned long)tmp.iomem_base);
+ return sprintf(buf, "0x%lX\n", (unsigned long)tmp.iomem_base);
}
static ssize_t iomem_reg_shift_show(struct device *dev,
@@ -2750,7 +2750,7 @@ static ssize_t iomem_reg_shift_show(struct device *dev,
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
- return snprintf(buf, PAGE_SIZE, "%d\n", tmp.iomem_reg_shift);
+ return sprintf(buf, "%d\n", tmp.iomem_reg_shift);
}
static ssize_t console_show(struct device *dev,
@@ -3260,9 +3260,7 @@ int uart_get_rs485_mode(struct uart_port *port)
if (IS_ERR(port->rs485_term_gpio)) {
ret = PTR_ERR(port->rs485_term_gpio);
port->rs485_term_gpio = NULL;
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Cannot get rs485-term-gpios\n");
- return ret;
+ return dev_err_probe(dev, ret, "Cannot get rs485-term-gpios\n");
}
return 0;
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index b4d89e31730e..7a07e7272de1 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -1280,6 +1280,9 @@ static int __init serial_txx9_init(void)
#ifdef ENABLE_SERIAL_TXX9_PCI
ret = pci_register_driver(&serial_txx9_pci_driver);
+ if (ret) {
+ platform_driver_unregister(&serial_txx9_plat_driver);
+ }
#endif
if (ret == 0)
goto out;
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index ba503dd04ce2..ee6c7762d355 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -129,13 +129,9 @@ static int stm32_config_rs485(struct uart_port *port,
if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
cr3 &= ~USART_CR3_DEP;
rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl & ~TIOCM_RTS);
} else {
cr3 |= USART_CR3_DEP;
rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl | TIOCM_RTS);
}
writel_relaxed(cr3, port->membase + ofs->cr3);
@@ -541,17 +537,42 @@ static void stm32_disable_ms(struct uart_port *port)
/* Transmit stop */
static void stm32_stop_tx(struct uart_port *port)
{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct serial_rs485 *rs485conf = &port->rs485;
+
stm32_tx_interrupt_disable(port);
+
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl & ~TIOCM_RTS);
+ } else {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl | TIOCM_RTS);
+ }
+ }
}
/* There are probably characters waiting to be transmitted. */
static void stm32_start_tx(struct uart_port *port)
{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct serial_rs485 *rs485conf = &port->rs485;
struct circ_buf *xmit = &port->state->xmit;
if (uart_circ_empty(xmit))
return;
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl | TIOCM_RTS);
+ } else {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl & ~TIOCM_RTS);
+ }
+ }
+
stm32_transmit_chars(port);
}
@@ -851,13 +872,9 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
cr3 &= ~USART_CR3_DEP;
rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl & ~TIOCM_RTS);
} else {
cr3 |= USART_CR3_DEP;
rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl | TIOCM_RTS);
}
} else {
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 19d38b504e27..2126e6e6dfd1 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -172,9 +172,9 @@ static void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier)
dev_dbg(port->dev, "%s - leaving\n", __func__);
}
-static void timbuart_tasklet(unsigned long arg)
+static void timbuart_tasklet(struct tasklet_struct *t)
{
- struct timbuart_port *uart = (struct timbuart_port *)arg;
+ struct timbuart_port *uart = from_tasklet(uart, t, tasklet);
u32 isr, ier = 0;
spin_lock(&uart->port.lock);
@@ -451,7 +451,7 @@ static int timbuart_probe(struct platform_device *dev)
}
uart->port.irq = irq;
- tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
+ tasklet_setup(&uart->tasklet, timbuart_tasklet);
err = uart_register_driver(&timbuart_driver);
if (err)
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 3c8c662c69e2..d6a8604157ab 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -283,7 +283,7 @@ static unsigned int qe_uart_tx_empty(struct uart_port *port)
* don't need that support. This function must exist, however, otherwise
* the kernel will panic.
*/
-void qe_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+static void qe_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 0dba40eace46..c8324d58ef56 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -942,7 +942,7 @@ static inline int mgsl_paranoia_check(struct mgsl_struct *info,
return 0;
}
-/**
+/*
* line discipline callback wrappers
*
* The wrappers maintain line discipline references
@@ -7419,14 +7419,14 @@ static int usc_loopmode_active( struct mgsl_struct * info)
#if SYNCLINK_GENERIC_HDLC
/**
- * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
- * set encoding and frame check sequence (FCS) options
+ * hdlcdev_attach - called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
+ * @dev: pointer to network device structure
+ * @encoding: serial encoding setting
+ * @parity: FCS setting
*
- * dev pointer to network device structure
- * encoding serial encoding setting
- * parity FCS setting
+ * Set encoding and frame check sequence (FCS) options.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
@@ -7468,10 +7468,9 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
}
/**
- * called by generic HDLC layer to send frame
- *
- * skb socket buffer containing HDLC frame
- * dev pointer to network device structure
+ * hdlcdev_xmit - called by generic HDLC layer to send a frame
+ * @skb: socket buffer containing HDLC frame
+ * @dev: pointer to network device structure
*/
static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
struct net_device *dev)
@@ -7509,12 +7508,12 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
}
/**
- * called by network layer when interface enabled
- * claim resources and initialize hardware
+ * hdlcdev_open - called by network layer when interface enabled
+ * @dev: pointer to network device structure
*
- * dev pointer to network device structure
+ * Claim resources and initialize hardware.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_open(struct net_device *dev)
{
@@ -7568,12 +7567,12 @@ static int hdlcdev_open(struct net_device *dev)
}
/**
- * called by network layer when interface is disabled
- * shutdown hardware and release resources
+ * hdlcdev_close - called by network layer when interface is disabled
+ * @dev: pointer to network device structure
*
- * dev pointer to network device structure
+ * Shutdown hardware and release resources.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_close(struct net_device *dev)
{
@@ -7598,13 +7597,12 @@ static int hdlcdev_close(struct net_device *dev)
}
/**
- * called by network layer to process IOCTL call to network device
- *
- * dev pointer to network device structure
- * ifr pointer to network interface request structure
- * cmd IOCTL command code
+ * hdlcdev_ioctl - called by network layer to process IOCTL call to network device
+ * @dev: pointer to network device structure
+ * @ifr: pointer to network interface request structure
+ * @cmd: IOCTL command code
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
@@ -7702,9 +7700,9 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
/**
- * called by network layer when transmit timeout is detected
+ * hdlcdev_tx_timeout - called by network layer when transmit timeout is detected
*
- * dev pointer to network device structure
+ * @dev: pointer to network device structure
*/
static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
@@ -7725,10 +7723,10 @@ static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
/**
- * called by device driver when transmit completes
- * reenable network layer transmit if stopped
+ * hdlcdev_tx_done - called by device driver when transmit completes
+ * @info: pointer to device instance information
*
- * info pointer to device instance information
+ * Reenable network layer transmit if stopped.
*/
static void hdlcdev_tx_done(struct mgsl_struct *info)
{
@@ -7737,12 +7735,12 @@ static void hdlcdev_tx_done(struct mgsl_struct *info)
}
/**
- * called by device driver when frame received
- * pass frame to network layer
+ * hdlcdev_rx - called by device driver when frame received
+ * @info: pointer to device instance information
+ * @buf: pointer to buffer contianing frame data
+ * @size: count of data bytes in buf
*
- * info pointer to device instance information
- * buf pointer to buffer contianing frame data
- * size count of data bytes in buf
+ * Pass frame to network layer.
*/
static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
{
@@ -7778,12 +7776,12 @@ static const struct net_device_ops hdlcdev_ops = {
};
/**
- * called by device driver when adding device instance
- * do generic HDLC initialization
+ * hdlcdev_init - called by device driver when adding device instance
+ * @info: pointer to device instance information
*
- * info pointer to device instance information
+ * Do generic HDLC initialization.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_init(struct mgsl_struct *info)
{
@@ -7827,10 +7825,10 @@ static int hdlcdev_init(struct mgsl_struct *info)
}
/**
- * called by device driver when removing device instance
- * do generic HDLC cleanup
+ * hdlcdev_exit - called by device driver when removing device instance
+ * @info: pointer to device instance information
*
- * info pointer to device instance information
+ * Do generic HDLC cleanup.
*/
static void hdlcdev_exit(struct mgsl_struct *info)
{
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index b794177ccfb9..afa4cc52e48d 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1395,14 +1395,14 @@ static int set_break(struct tty_struct *tty, int break_state)
#if SYNCLINK_GENERIC_HDLC
/**
- * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
- * set encoding and frame check sequence (FCS) options
+ * hdlcdev_attach - called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
+ * @dev: pointer to network device structure
+ * @encoding: serial encoding setting
+ * @parity: FCS setting
*
- * dev pointer to network device structure
- * encoding serial encoding setting
- * parity FCS setting
+ * Set encoding and frame check sequence (FCS) options.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
@@ -1446,10 +1446,9 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
}
/**
- * called by generic HDLC layer to send frame
- *
- * skb socket buffer containing HDLC frame
- * dev pointer to network device structure
+ * hdlcdev_xmit - called by generic HDLC layer to send a frame
+ * @skb: socket buffer containing HDLC frame
+ * @dev: pointer to network device structure
*/
static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
struct net_device *dev)
@@ -1483,12 +1482,12 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
}
/**
- * called by network layer when interface enabled
- * claim resources and initialize hardware
+ * hdlcdev_open - called by network layer when interface enabled
+ * @dev: pointer to network device structure
*
- * dev pointer to network device structure
+ * Claim resources and initialize hardware.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_open(struct net_device *dev)
{
@@ -1544,12 +1543,12 @@ static int hdlcdev_open(struct net_device *dev)
}
/**
- * called by network layer when interface is disabled
- * shutdown hardware and release resources
+ * hdlcdev_close - called by network layer when interface is disabled
+ * @dev: pointer to network device structure
*
- * dev pointer to network device structure
+ * Shutdown hardware and release resources.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_close(struct net_device *dev)
{
@@ -1574,13 +1573,12 @@ static int hdlcdev_close(struct net_device *dev)
}
/**
- * called by network layer to process IOCTL call to network device
- *
- * dev pointer to network device structure
- * ifr pointer to network interface request structure
- * cmd IOCTL command code
+ * hdlcdev_ioctl - called by network layer to process IOCTL call to network device
+ * @dev: pointer to network device structure
+ * @ifr: pointer to network interface request structure
+ * @cmd: IOCTL command code
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
@@ -1678,9 +1676,8 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
/**
- * called by network layer when transmit timeout is detected
- *
- * dev pointer to network device structure
+ * hdlcdev_tx_timeout - called by network layer when transmit timeout is detected
+ * @dev: pointer to network device structure
*/
static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
@@ -1700,10 +1697,10 @@ static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
/**
- * called by device driver when transmit completes
- * reenable network layer transmit if stopped
+ * hdlcdev_tx_done - called by device driver when transmit completes
+ * @info: pointer to device instance information
*
- * info pointer to device instance information
+ * Reenable network layer transmit if stopped.
*/
static void hdlcdev_tx_done(struct slgt_info *info)
{
@@ -1712,12 +1709,12 @@ static void hdlcdev_tx_done(struct slgt_info *info)
}
/**
- * called by device driver when frame received
- * pass frame to network layer
+ * hdlcdev_rx - called by device driver when frame received
+ * @info: pointer to device instance information
+ * @buf: pointer to buffer contianing frame data
+ * @size: count of data bytes in buf
*
- * info pointer to device instance information
- * buf pointer to buffer contianing frame data
- * size count of data bytes in buf
+ * Pass frame to network layer.
*/
static void hdlcdev_rx(struct slgt_info *info, char *buf, int size)
{
@@ -1751,12 +1748,12 @@ static const struct net_device_ops hdlcdev_ops = {
};
/**
- * called by device driver when adding device instance
- * do generic HDLC initialization
+ * hdlcdev_init - called by device driver when adding device instance
+ * @info: pointer to device instance information
*
- * info pointer to device instance information
+ * Do generic HDLC initialization.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_init(struct slgt_info *info)
{
@@ -1800,10 +1797,10 @@ static int hdlcdev_init(struct slgt_info *info)
}
/**
- * called by device driver when removing device instance
- * do generic HDLC cleanup
+ * hdlcdev_exit - called by device driver when removing device instance
+ * @info: pointer to device instance information
*
- * info pointer to device instance information
+ * Do generic HDLC cleanup.
*/
static void hdlcdev_exit(struct slgt_info *info)
{
@@ -3341,8 +3338,8 @@ static int alloc_desc(struct slgt_info *info)
unsigned int pbufs;
/* allocate memory to hold descriptor lists */
- info->bufs = pci_zalloc_consistent(info->pdev, DESC_LIST_SIZE,
- &info->bufs_dma_addr);
+ info->bufs = dma_alloc_coherent(&info->pdev->dev, DESC_LIST_SIZE,
+ &info->bufs_dma_addr, GFP_KERNEL);
if (info->bufs == NULL)
return -ENOMEM;
@@ -3384,7 +3381,8 @@ static int alloc_desc(struct slgt_info *info)
static void free_desc(struct slgt_info *info)
{
if (info->bufs != NULL) {
- pci_free_consistent(info->pdev, DESC_LIST_SIZE, info->bufs, info->bufs_dma_addr);
+ dma_free_coherent(&info->pdev->dev, DESC_LIST_SIZE,
+ info->bufs, info->bufs_dma_addr);
info->bufs = NULL;
info->rbufs = NULL;
info->tbufs = NULL;
@@ -3395,7 +3393,9 @@ static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
{
int i;
for (i=0; i < count; i++) {
- if ((bufs[i].buf = pci_alloc_consistent(info->pdev, DMABUFSIZE, &bufs[i].buf_dma_addr)) == NULL)
+ bufs[i].buf = dma_alloc_coherent(&info->pdev->dev, DMABUFSIZE,
+ &bufs[i].buf_dma_addr, GFP_KERNEL);
+ if (!bufs[i].buf)
return -ENOMEM;
bufs[i].pbuf = cpu_to_le32((unsigned int)bufs[i].buf_dma_addr);
}
@@ -3408,7 +3408,8 @@ static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
for (i=0; i < count; i++) {
if (bufs[i].buf == NULL)
continue;
- pci_free_consistent(info->pdev, DMABUFSIZE, bufs[i].buf, bufs[i].buf_dma_addr);
+ dma_free_coherent(&info->pdev->dev, DMABUFSIZE, bufs[i].buf,
+ bufs[i].buf_dma_addr);
bufs[i].buf = NULL;
}
}
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 33ff2dbb6650..ce08c5ec331c 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -685,7 +685,7 @@ static inline int sanity_check(SLMP_INFO *info,
return 0;
}
-/**
+/*
* line discipline callback wrappers
*
* The wrappers maintain line discipline references
@@ -1520,14 +1520,14 @@ static int set_break(struct tty_struct *tty, int break_state)
#if SYNCLINK_GENERIC_HDLC
/**
- * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
- * set encoding and frame check sequence (FCS) options
+ * hdlcdev_attach - called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
+ * @dev: pointer to network device structure
+ * @encoding: serial encoding setting
+ * @parity: FCS setting
*
- * dev pointer to network device structure
- * encoding serial encoding setting
- * parity FCS setting
+ * Set encoding and frame check sequence (FCS) options.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
@@ -1569,10 +1569,9 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
}
/**
- * called by generic HDLC layer to send frame
- *
- * skb socket buffer containing HDLC frame
- * dev pointer to network device structure
+ * hdlcdev_xmit - called by generic HDLC layer to send frame
+ * @skb: socket buffer containing HDLC frame
+ * @dev: pointer to network device structure
*/
static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
struct net_device *dev)
@@ -1610,12 +1609,12 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
}
/**
- * called by network layer when interface enabled
- * claim resources and initialize hardware
+ * hdlcdev_open - called by network layer when interface enabled
+ * @dev: pointer to network device structure
*
- * dev pointer to network device structure
+ * Claim resources and initialize hardware.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_open(struct net_device *dev)
{
@@ -1669,12 +1668,12 @@ static int hdlcdev_open(struct net_device *dev)
}
/**
- * called by network layer when interface is disabled
- * shutdown hardware and release resources
+ * hdlcdev_close - called by network layer when interface is disabled
+ * @dev: pointer to network device structure
*
- * dev pointer to network device structure
+ * Shutdown hardware and release resources.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_close(struct net_device *dev)
{
@@ -1699,13 +1698,12 @@ static int hdlcdev_close(struct net_device *dev)
}
/**
- * called by network layer to process IOCTL call to network device
+ * hdlcdev_ioctl - called by network layer to process IOCTL call to network device
+ * @dev: pointer to network device structure
+ * @ifr: pointer to network interface request structure
+ * @cmd: IOCTL command code
*
- * dev pointer to network device structure
- * ifr pointer to network interface request structure
- * cmd IOCTL command code
- *
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
@@ -1803,9 +1801,8 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
/**
- * called by network layer when transmit timeout is detected
- *
- * dev pointer to network device structure
+ * hdlcdev_tx_timeout - called by network layer when transmit timeout is detected
+ * @dev: pointer to network device structure
*/
static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
@@ -1826,10 +1823,10 @@ static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
/**
- * called by device driver when transmit completes
- * reenable network layer transmit if stopped
+ * hdlcdev_tx_done - called by device driver when transmit completes
+ * @info: pointer to device instance information
*
- * info pointer to device instance information
+ * Reenable network layer transmit if stopped.
*/
static void hdlcdev_tx_done(SLMP_INFO *info)
{
@@ -1838,12 +1835,12 @@ static void hdlcdev_tx_done(SLMP_INFO *info)
}
/**
- * called by device driver when frame received
- * pass frame to network layer
+ * hdlcdev_rx - called by device driver when frame received
+ * @info: pointer to device instance information
+ * @buf: pointer to buffer contianing frame data
+ * @size: count of data bytes in buf
*
- * info pointer to device instance information
- * buf pointer to buffer contianing frame data
- * size count of data bytes in buf
+ * Pass frame to network layer.
*/
static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size)
{
@@ -1879,12 +1876,12 @@ static const struct net_device_ops hdlcdev_ops = {
};
/**
- * called by device driver when adding device instance
- * do generic HDLC initialization
+ * hdlcdev_init - called by device driver when adding device instance
+ * @info: pointer to device instance information
*
- * info pointer to device instance information
+ * Do generic HDLC initialization.
*
- * returns 0 if success, otherwise error code
+ * Return: 0 if success, otherwise error code
*/
static int hdlcdev_init(SLMP_INFO *info)
{
@@ -1928,10 +1925,10 @@ static int hdlcdev_init(SLMP_INFO *info)
}
/**
- * called by device driver when removing device instance
- * do generic HDLC cleanup
+ * hdlcdev_exit - called by device driver when removing device instance
+ * @info: pointer to device instance information
*
- * info pointer to device instance information
+ * Do generic HDLC cleanup.
*/
static void hdlcdev_exit(SLMP_INFO *info)
{
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index a8e39b2cdd55..959f9e121cc6 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -19,6 +19,7 @@
#include <linux/sched/rt.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
+#include <linux/ctype.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/fs.h>
@@ -440,7 +441,7 @@ static const struct sysrq_key_op sysrq_unrt_op = {
/* Key Operations table and lock */
static DEFINE_SPINLOCK(sysrq_key_table_lock);
-static const struct sysrq_key_op *sysrq_key_table[36] = {
+static const struct sysrq_key_op *sysrq_key_table[62] = {
&sysrq_loglevel_op, /* 0 */
&sysrq_loglevel_op, /* 1 */
&sysrq_loglevel_op, /* 2 */
@@ -497,6 +498,32 @@ static const struct sysrq_key_op *sysrq_key_table[36] = {
/* y: May be registered on sparc64 for global register dump */
NULL, /* y */
&sysrq_ftrace_dump_op, /* z */
+ NULL, /* A */
+ NULL, /* B */
+ NULL, /* C */
+ NULL, /* D */
+ NULL, /* E */
+ NULL, /* F */
+ NULL, /* G */
+ NULL, /* H */
+ NULL, /* I */
+ NULL, /* J */
+ NULL, /* K */
+ NULL, /* L */
+ NULL, /* M */
+ NULL, /* N */
+ NULL, /* O */
+ NULL, /* P */
+ NULL, /* Q */
+ NULL, /* R */
+ NULL, /* S */
+ NULL, /* T */
+ NULL, /* U */
+ NULL, /* V */
+ NULL, /* W */
+ NULL, /* X */
+ NULL, /* Y */
+ NULL, /* Z */
};
/* key2index calculation, -1 on invalid index */
@@ -508,6 +535,8 @@ static int sysrq_key_table_key2index(int key)
retval = key - '0';
else if ((key >= 'a') && (key <= 'z'))
retval = key + 10 - 'a';
+ else if ((key >= 'A') && (key <= 'Z'))
+ retval = key + 36 - 'A';
else
retval = -1;
return retval;
@@ -621,6 +650,8 @@ struct sysrq_state {
unsigned long key_down[BITS_TO_LONGS(KEY_CNT)];
unsigned int alt;
unsigned int alt_use;
+ unsigned int shift;
+ unsigned int shift_use;
bool active;
bool need_reinject;
bool reinjecting;
@@ -805,10 +836,20 @@ static bool sysrq_handle_keypress(struct sysrq_state *sysrq,
}
break;
+ case KEY_LEFTSHIFT:
+ case KEY_RIGHTSHIFT:
+ if (!value)
+ sysrq->shift = KEY_RESERVED;
+ else if (value != 2)
+ sysrq->shift = code;
+ break;
+
case KEY_SYSRQ:
if (value == 1 && sysrq->alt != KEY_RESERVED) {
sysrq->active = true;
sysrq->alt_use = sysrq->alt;
+ /* either RESERVED (for released) or actual code */
+ sysrq->shift_use = sysrq->shift;
/*
* If nothing else will be pressed we'll need
* to re-inject Alt-SysRq keysroke.
@@ -831,8 +872,12 @@ static bool sysrq_handle_keypress(struct sysrq_state *sysrq,
default:
if (sysrq->active && value && value != 2) {
+ unsigned char c = sysrq_xlate[code];
+
sysrq->need_reinject = false;
- __handle_sysrq(sysrq_xlate[code], true);
+ if (sysrq->shift_use != KEY_RESERVED)
+ c = toupper(c);
+ __handle_sysrq(c, true);
}
break;
}
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index 40207cab3b2a..84fec3c62d6a 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -119,8 +119,8 @@ EXPORT_SYMBOL(tty_termios_input_baud_rate);
/**
* tty_termios_encode_baud_rate
* @termios: ktermios structure holding user requested state
- * @ispeed: input speed
- * @ospeed: output speed
+ * @ibaud: input speed
+ * @obaud: output speed
*
* Encode the speeds set into the passed termios structure. This is
* used as a library helper for drivers so that they can report back
@@ -223,7 +223,7 @@ EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate);
/**
* tty_encode_baud_rate - set baud rate of the tty
* @ibaud: input baud rate
- * @obad: output baud rate
+ * @obaud: output baud rate
*
* Update the current termios data for the tty with the new speed
* settings. The caller must hold the termios_rwsem for the tty in
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index ec145a59f199..bd2d91546e32 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -42,7 +42,7 @@
* tty_buffer_lock_exclusive - gain exclusive access to buffer
* tty_buffer_unlock_exclusive - release exclusive access
*
- * @port - tty_port owning the flip buffer
+ * @port: tty port owning the flip buffer
*
* Guarantees safe use of the line discipline's receive_buf() method by
* excluding the buffer work and any pending flush from using the flip
@@ -78,7 +78,7 @@ EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
/**
* tty_buffer_space_avail - return unused buffer space
- * @port - tty_port owning the flip buffer
+ * @port: tty port owning the flip buffer
*
* Returns the # of bytes which can be written by the driver without
* reaching the buffer limit.
@@ -107,7 +107,7 @@ static void tty_buffer_reset(struct tty_buffer *p, size_t size)
/**
* tty_buffer_free_all - free buffers used by a tty
- * @tty: tty to free from
+ * @port: tty port to free from
*
* Remove all the buffers pending on a tty whether queued with data
* or in the free ring. Must be called when the tty is no longer in use
@@ -142,7 +142,7 @@ void tty_buffer_free_all(struct tty_port *port)
/**
* tty_buffer_alloc - allocate a tty buffer
- * @tty: tty device
+ * @port: tty port
* @size: desired size (characters)
*
* Allocate a new tty buffer to hold the desired number of characters.
@@ -184,7 +184,7 @@ found:
/**
* tty_buffer_free - free a tty buffer
- * @tty: tty owning the buffer
+ * @port: tty port owning the buffer
* @b: the buffer to free
*
* Free a tty buffer, or add it to the free list according to our
@@ -243,7 +243,7 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
/**
* tty_buffer_request_room - grow tty buffer if needed
- * @tty: tty structure
+ * @port: tty port
* @size: size desired
* @flags: buffer flags if new buffer allocated (default = 0)
*
@@ -559,7 +559,7 @@ EXPORT_SYMBOL(tty_flip_buffer_push);
/**
* tty_buffer_init - prepare a tty buffer structure
- * @tty: tty to initialise
+ * @port: tty port to initialise
*
* Set up the initial state of the buffer management for a tty device.
* Must be called before the other tty buffer functions are used.
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index ceed72c9a88f..9f8b9a567b35 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -307,7 +307,7 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
/**
* get_tty_driver - find device of a tty
- * @dev_t: device identifier
+ * @device: device identifier
* @index: returns the index of the tty
*
* This routine returns a tty driver structure, given a device number
@@ -544,7 +544,7 @@ EXPORT_SYMBOL_GPL(tty_wakeup);
/**
* __tty_hangup - actual handler for hangup events
- * @work: tty device
+ * @tty: tty device
*
* This can be called by a "kworker" kernel thread. That is process
* synchronous but doesn't hold any locks, so we need to make sure we
@@ -1232,7 +1232,7 @@ static int tty_driver_install_tty(struct tty_driver *driver,
/**
* tty_driver_remove_tty() - remove a tty from the driver tables
* @driver: the driver for the tty
- * @idx: the minor number
+ * @tty: tty to remove
*
* Remvoe a tty object from the driver tables. The tty->index field
* will be set by the time this is called.
@@ -1247,9 +1247,9 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
driver->ttys[tty->index] = NULL;
}
-/*
- * tty_reopen() - fast re-open of an open tty
- * @tty - the tty to open
+/**
+ * tty_reopen() - fast re-open of an open tty
+ * @tty: the tty to open
*
* Return 0 on success, -errno on error.
* Re-opens on master ptys are not allowed and return -EIO.
@@ -1295,7 +1295,6 @@ static int tty_reopen(struct tty_struct *tty)
* tty_init_dev - initialise a tty device
* @driver: tty driver we are opening a device on
* @idx: device index
- * @ret_tty: returned tty structure
*
* Prepare a tty device. This may not be a "new" clean device but
* could also be an active device. The pty drivers require special
@@ -1313,6 +1312,8 @@ static int tty_reopen(struct tty_struct *tty)
* failed open. The new code protects the open with a mutex, so it's
* really quite straightforward. The mutex locking can probably be
* relaxed for the (most common) case of reopening a tty.
+ *
+ * Return: returned tty structure
*/
struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
@@ -1432,7 +1433,7 @@ static void tty_flush_works(struct tty_struct *tty)
/**
* release_one_tty - release tty structure memory
- * @kref: kref of tty we are obliterating
+ * @work: work of tty we are obliterating
*
* Releases memory associated with a tty structure, and clears out the
* driver table slots. This function is called when a device is no longer
@@ -1514,10 +1515,12 @@ static void release_tty(struct tty_struct *tty, int idx)
tty->ops->shutdown(tty);
tty_save_termios(tty);
tty_driver_remove_tty(tty->driver, tty);
- tty->port->itty = NULL;
+ if (tty->port)
+ tty->port->itty = NULL;
if (tty->link)
tty->link->port->itty = NULL;
- tty_buffer_cancel_work(tty->port);
+ if (tty->port)
+ tty_buffer_cancel_work(tty->port);
if (tty->link)
tty_buffer_cancel_work(tty->link->port);
@@ -1528,7 +1531,6 @@ static void release_tty(struct tty_struct *tty, int idx)
/**
* tty_release_checks - check a tty before real release
* @tty: tty to check
- * @o_tty: link of @tty (if any)
* @idx: index of the tty
*
* Performs some paranoid checking before true release of the @tty.
@@ -2200,7 +2202,7 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
/**
* tiocgwinsz - implement window query ioctl
- * @tty; tty
+ * @tty: tty
* @arg: user buffer for result
*
* Copies the kernel idea of the window size into the user buffer.
@@ -2223,8 +2225,7 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
/**
* tty_do_resize - resize event
* @tty: tty being resized
- * @rows: rows (character)
- * @cols: cols (character)
+ * @ws: new dimensions
*
* Update the termios variables and send the necessary signals to
* peform a terminal resize correctly
@@ -2254,7 +2255,7 @@ EXPORT_SYMBOL(tty_do_resize);
/**
* tiocswinsz - implement window size set ioctl
- * @tty; tty side of tty
+ * @tty: tty side of tty
* @arg: user buffer for result
*
* Copies the user idea of the window size to the kernel. Traditionally
@@ -2402,7 +2403,6 @@ out:
/**
* tty_tiocmget - get modem status
* @tty: tty device
- * @file: user file pointer
* @p: pointer to result
*
* Obtain the modem status bits from the tty driver if the feature
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
index f8ed50a16848..28a23a0fef21 100644
--- a/drivers/tty/tty_jobctrl.c
+++ b/drivers/tty/tty_jobctrl.c
@@ -178,8 +178,8 @@ void session_clear_tty(struct pid *session)
/**
* tty_signal_session_leader - sends SIGHUP to session leader
- * @tty controlling tty
- * @exit_session if non-zero, signal all foreground group processes
+ * @tty: controlling tty
+ * @exit_session: if non-zero, signal all foreground group processes
*
* Send SIGHUP and SIGCONT to the session leader and its process group.
* Optionally, signal all processes in the foreground process group.
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index ec1f6a48121e..fe37ec331289 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -79,7 +79,6 @@ EXPORT_SYMBOL(tty_register_ldisc);
/**
* tty_unregister_ldisc - unload a line discipline
* @disc: ldisc number
- * @new_ldisc: pointer to the ldisc object
*
* Remove a line discipline from the kernel providing it is not
* currently in use.
@@ -542,7 +541,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
/**
* tty_set_ldisc - set line discipline
* @tty: the terminal to set
- * @ldisc: the line discipline
+ * @disc: the line discipline number
*
* Set the discipline of a tty line. Must be called from a process
* context. The ldisc change logic has to protect itself against any
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 5947b54d92be..5d778c0aa009 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -268,7 +268,7 @@ unsigned short *set_translate(int m, struct vc_data *vc)
* was active.
* Still, it is now possible to a certain extent to cut and paste non-ASCII.
*/
-u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode)
+u16 inverse_translate(const struct vc_data *conp, int glyph, int use_unicode)
{
struct uni_pagedir *p;
int m;
@@ -708,7 +708,7 @@ EXPORT_SYMBOL(con_set_default_unimap);
/**
* con_copy_unimap - copy unimap between two vts
* @dst_vc: target
- * @src_vt: source
+ * @src_vc: source
*
* The caller must hold the console lock when invoking this method
*/
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 0db53b5b3acf..78acc270e39a 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -743,8 +743,13 @@ static void k_fn(struct vc_data *vc, unsigned char value, char up_flag)
return;
if ((unsigned)value < ARRAY_SIZE(func_table)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&func_buf_lock, flags);
if (func_table[value])
puts_queue(vc, func_table[value]);
+ spin_unlock_irqrestore(&func_buf_lock, flags);
+
} else
pr_err("k_fn called with value=%d\n", value);
}
@@ -1991,13 +1996,11 @@ out:
#undef s
#undef v
-/* FIXME: This one needs untangling and locking */
+/* FIXME: This one needs untangling */
int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
{
struct kbsentry *kbs;
- char *p;
u_char *q;
- u_char __user *up;
int sz, fnw_sz;
int delta;
char *first_free, *fj, *fnw;
@@ -2023,23 +2026,19 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
i = array_index_nospec(kbs->kb_func, MAX_NR_FUNC);
switch (cmd) {
- case KDGKBSENT:
- sz = sizeof(kbs->kb_string) - 1; /* sz should have been
- a struct member */
- up = user_kdgkb->kb_string;
- p = func_table[i];
- if(p)
- for ( ; *p && sz; p++, sz--)
- if (put_user(*p, up++)) {
- ret = -EFAULT;
- goto reterr;
- }
- if (put_user('\0', up)) {
- ret = -EFAULT;
- goto reterr;
- }
- kfree(kbs);
- return ((p && *p) ? -EOVERFLOW : 0);
+ case KDGKBSENT: {
+ /* size should have been a struct member */
+ ssize_t len = sizeof(user_kdgkb->kb_string);
+
+ spin_lock_irqsave(&func_buf_lock, flags);
+ len = strlcpy(kbs->kb_string, func_table[i] ? : "", len);
+ spin_unlock_irqrestore(&func_buf_lock, flags);
+
+ ret = copy_to_user(user_kdgkb->kb_string, kbs->kb_string,
+ len + 1) ? -EFAULT : 0;
+
+ goto reterr;
+ }
case KDSKBSENT:
if (!perm) {
ret = -EPERM;
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 8e74654c1b27..f245a5acf7e9 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -54,7 +54,7 @@ static struct vc_selection {
/* set reverse video on characters s-e of console with selection. */
static inline void highlight(const int s, const int e)
{
- invert_screen(vc_sel.cons, s, e-s+2, 1);
+ invert_screen(vc_sel.cons, s, e-s+2, true);
}
/* use complementary color to show the pointer */
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 778f83ea2249..1850bacdb5b0 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -50,11 +50,7 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
-#undef attr
-#undef org
-#undef addr
-#define HEADER_SIZE 4
-
+#define HEADER_SIZE 4u
#define CON_BUF_SIZE (CONFIG_BASE_SMALL ? 256 : PAGE_SIZE)
/*
@@ -177,12 +173,14 @@ vcs_poll_data_get(struct file *file)
return poll;
}
-/*
- * Returns VC for inode.
+/**
+ * vcs_vc -- return VC for @inode
+ * @inode: inode for which to return a VC
+ * @viewed: returns whether this console is currently foreground (viewed)
+ *
* Must be called with console_lock.
*/
-static struct vc_data*
-vcs_vc(struct inode *inode, int *viewed)
+static struct vc_data *vcs_vc(struct inode *inode, bool *viewed)
{
unsigned int currcons = console(inode);
@@ -191,54 +189,177 @@ vcs_vc(struct inode *inode, int *viewed)
if (currcons == 0) {
currcons = fg_console;
if (viewed)
- *viewed = 1;
+ *viewed = true;
} else {
currcons--;
if (viewed)
- *viewed = 0;
+ *viewed = false;
}
return vc_cons[currcons].d;
}
-/*
- * Returns size for VC carried by inode.
+/**
+ * vcs_size -- return size for a VC in @vc
+ * @vc: which VC
+ * @attr: does it use attributes?
+ * @unicode: is it unicode?
+ *
* Must be called with console_lock.
*/
-static int
-vcs_size(struct inode *inode)
+static int vcs_size(const struct vc_data *vc, bool attr, bool unicode)
{
int size;
- struct vc_data *vc;
WARN_CONSOLE_UNLOCKED();
- vc = vcs_vc(inode, NULL);
- if (!vc)
- return -ENXIO;
-
size = vc->vc_rows * vc->vc_cols;
- if (use_attributes(inode)) {
- if (use_unicode(inode))
+ if (attr) {
+ if (unicode)
return -EOPNOTSUPP;
- size = 2*size + HEADER_SIZE;
- } else if (use_unicode(inode))
+
+ size = 2 * size + HEADER_SIZE;
+ } else if (unicode)
size *= 4;
+
return size;
}
static loff_t vcs_lseek(struct file *file, loff_t offset, int orig)
{
+ struct inode *inode = file_inode(file);
+ struct vc_data *vc;
int size;
console_lock();
- size = vcs_size(file_inode(file));
+ vc = vcs_vc(inode, NULL);
+ if (!vc) {
+ console_unlock();
+ return -ENXIO;
+ }
+
+ size = vcs_size(vc, use_attributes(inode), use_unicode(inode));
console_unlock();
if (size < 0)
return size;
return fixed_size_llseek(file, offset, orig, size);
}
+static int vcs_read_buf_uni(struct vc_data *vc, char *con_buf,
+ unsigned int pos, unsigned int count, bool viewed)
+{
+ unsigned int nr, row, col, maxcol = vc->vc_cols;
+ int ret;
+
+ ret = vc_uniscr_check(vc);
+ if (ret)
+ return ret;
+
+ pos /= 4;
+ row = pos / maxcol;
+ col = pos % maxcol;
+ nr = maxcol - col;
+ do {
+ if (nr > count / 4)
+ nr = count / 4;
+ vc_uniscr_copy_line(vc, con_buf, viewed, row, col, nr);
+ con_buf += nr * 4;
+ count -= nr * 4;
+ row++;
+ col = 0;
+ nr = maxcol;
+ } while (count);
+
+ return 0;
+}
+
+static void vcs_read_buf_noattr(const struct vc_data *vc, char *con_buf,
+ unsigned int pos, unsigned int count, bool viewed)
+{
+ u16 *org;
+ unsigned int col, maxcol = vc->vc_cols;
+
+ org = screen_pos(vc, pos, viewed);
+ col = pos % maxcol;
+ pos += maxcol - col;
+
+ while (count-- > 0) {
+ *con_buf++ = (vcs_scr_readw(vc, org++) & 0xff);
+ if (++col == maxcol) {
+ org = screen_pos(vc, pos, viewed);
+ col = 0;
+ pos += maxcol;
+ }
+ }
+}
+
+static unsigned int vcs_read_buf(const struct vc_data *vc, char *con_buf,
+ unsigned int pos, unsigned int count, bool viewed,
+ unsigned int *skip)
+{
+ u16 *org, *con_buf16;
+ unsigned int col, maxcol = vc->vc_cols;
+ unsigned int filled = count;
+
+ if (pos < HEADER_SIZE) {
+ /* clamp header values if they don't fit */
+ con_buf[0] = min(vc->vc_rows, 0xFFu);
+ con_buf[1] = min(vc->vc_cols, 0xFFu);
+ getconsxy(vc, con_buf + 2);
+
+ *skip += pos;
+ count += pos;
+ if (count > CON_BUF_SIZE) {
+ count = CON_BUF_SIZE;
+ filled = count - pos;
+ }
+
+ /* Advance state pointers and move on. */
+ count -= min(HEADER_SIZE, count);
+ pos = HEADER_SIZE;
+ con_buf += HEADER_SIZE;
+ /* If count >= 0, then pos is even... */
+ } else if (pos & 1) {
+ /*
+ * Skip first byte for output if start address is odd. Update
+ * region sizes up/down depending on free space in buffer.
+ */
+ (*skip)++;
+ if (count < CON_BUF_SIZE)
+ count++;
+ else
+ filled--;
+ }
+
+ if (!count)
+ return filled;
+
+ pos -= HEADER_SIZE;
+ pos /= 2;
+ col = pos % maxcol;
+
+ org = screen_pos(vc, pos, viewed);
+ pos += maxcol - col;
+
+ /*
+ * Buffer has even length, so we can always copy character + attribute.
+ * We do not copy last byte to userspace if count is odd.
+ */
+ count = (count + 1) / 2;
+ con_buf16 = (u16 *)con_buf;
+
+ while (count) {
+ *con_buf16++ = vcs_scr_readw(vc, org++);
+ count--;
+ if (++col == maxcol) {
+ org = screen_pos(vc, pos, viewed);
+ col = 0;
+ pos += maxcol;
+ }
+ }
+
+ return filled;
+}
static ssize_t
vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -246,11 +367,11 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
struct inode *inode = file_inode(file);
struct vc_data *vc;
struct vcs_poll_data *poll;
- long pos, read;
- int attr, uni_mode, row, col, maxcol, viewed;
- unsigned short *org = NULL;
+ unsigned int read;
ssize_t ret;
char *con_buf;
+ loff_t pos;
+ bool viewed, attr, uni_mode;
con_buf = (char *) __get_free_page(GFP_KERNEL);
if (!con_buf)
@@ -283,16 +404,14 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
read = 0;
ret = 0;
while (count) {
- char *con_buf0, *con_buf_start;
- long this_round, size;
- ssize_t orig_count;
- long p = pos;
+ unsigned int this_round, skip = 0;
+ int size;
/* Check whether we are above size each round,
* as copy_to_user at the end of this loop
* could sleep.
*/
- size = vcs_size(inode);
+ size = vcs_size(vc, attr, uni_mode);
if (size < 0) {
if (read)
break;
@@ -313,104 +432,17 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
* attempt to move it to userspace.
*/
- con_buf_start = con_buf0 = con_buf;
- orig_count = this_round;
- maxcol = vc->vc_cols;
if (uni_mode) {
- unsigned int nr;
-
- ret = vc_uniscr_check(vc);
+ ret = vcs_read_buf_uni(vc, con_buf, pos, this_round,
+ viewed);
if (ret)
break;
- p /= 4;
- row = p / vc->vc_cols;
- col = p % maxcol;
- nr = maxcol - col;
- do {
- if (nr > this_round/4)
- nr = this_round/4;
- vc_uniscr_copy_line(vc, con_buf0, viewed,
- row, col, nr);
- con_buf0 += nr * 4;
- this_round -= nr * 4;
- row++;
- col = 0;
- nr = maxcol;
- } while (this_round);
} else if (!attr) {
- org = screen_pos(vc, p, viewed);
- col = p % maxcol;
- p += maxcol - col;
- while (this_round-- > 0) {
- *con_buf0++ = (vcs_scr_readw(vc, org++) & 0xff);
- if (++col == maxcol) {
- org = screen_pos(vc, p, viewed);
- col = 0;
- p += maxcol;
- }
- }
+ vcs_read_buf_noattr(vc, con_buf, pos, this_round,
+ viewed);
} else {
- if (p < HEADER_SIZE) {
- size_t tmp_count;
-
- /* clamp header values if they don't fit */
- con_buf0[0] = min(vc->vc_rows, 0xFFu);
- con_buf0[1] = min(vc->vc_cols, 0xFFu);
- getconsxy(vc, con_buf0 + 2);
-
- con_buf_start += p;
- this_round += p;
- if (this_round > CON_BUF_SIZE) {
- this_round = CON_BUF_SIZE;
- orig_count = this_round - p;
- }
-
- tmp_count = HEADER_SIZE;
- if (tmp_count > this_round)
- tmp_count = this_round;
-
- /* Advance state pointers and move on. */
- this_round -= tmp_count;
- p = HEADER_SIZE;
- con_buf0 = con_buf + HEADER_SIZE;
- /* If this_round >= 0, then p is even... */
- } else if (p & 1) {
- /* Skip first byte for output if start address is odd
- * Update region sizes up/down depending on free
- * space in buffer.
- */
- con_buf_start++;
- if (this_round < CON_BUF_SIZE)
- this_round++;
- else
- orig_count--;
- }
- if (this_round > 0) {
- unsigned short *tmp_buf = (unsigned short *)con_buf0;
-
- p -= HEADER_SIZE;
- p /= 2;
- col = p % maxcol;
-
- org = screen_pos(vc, p, viewed);
- p += maxcol - col;
-
- /* Buffer has even length, so we can always copy
- * character + attribute. We do not copy last byte
- * to userspace if this_round is odd.
- */
- this_round = (this_round + 1) >> 1;
-
- while (this_round) {
- *tmp_buf++ = vcs_scr_readw(vc, org++);
- this_round --;
- if (++col == maxcol) {
- org = screen_pos(vc, p, viewed);
- col = 0;
- p += maxcol;
- }
- }
- }
+ this_round = vcs_read_buf(vc, con_buf, pos, this_round,
+ viewed, &skip);
}
/* Finally, release the console semaphore while we push
@@ -421,18 +453,18 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
*/
console_unlock();
- ret = copy_to_user(buf, con_buf_start, orig_count);
+ ret = copy_to_user(buf, con_buf + skip, this_round);
console_lock();
if (ret) {
- read += (orig_count - ret);
+ read += this_round - ret;
ret = -EFAULT;
break;
}
- buf += orig_count;
- pos += orig_count;
- read += orig_count;
- count -= orig_count;
+ buf += this_round;
+ pos += this_round;
+ read += this_round;
+ count -= this_round;
}
*ppos += read;
if (read)
@@ -443,18 +475,129 @@ unlock_out:
return ret;
}
+static u16 *vcs_write_buf_noattr(struct vc_data *vc, const char *con_buf,
+ unsigned int pos, unsigned int count, bool viewed, u16 **org0)
+{
+ u16 *org;
+ unsigned int col, maxcol = vc->vc_cols;
+
+ *org0 = org = screen_pos(vc, pos, viewed);
+ col = pos % maxcol;
+ pos += maxcol - col;
+
+ while (count > 0) {
+ unsigned char c = *con_buf++;
+
+ count--;
+ vcs_scr_writew(vc,
+ (vcs_scr_readw(vc, org) & 0xff00) | c, org);
+ org++;
+ if (++col == maxcol) {
+ org = screen_pos(vc, pos, viewed);
+ col = 0;
+ pos += maxcol;
+ }
+ }
+
+ return org;
+}
+
+/*
+ * Compilers (gcc 10) are unable to optimize the swap in cpu_to_le16. So do it
+ * the poor man way.
+ */
+static inline u16 vc_compile_le16(u8 hi, u8 lo)
+{
+#ifdef __BIG_ENDIAN
+ return (lo << 8u) | hi;
+#else
+ return (hi << 8u) | lo;
+#endif
+}
+
+static u16 *vcs_write_buf(struct vc_data *vc, const char *con_buf,
+ unsigned int pos, unsigned int count, bool viewed, u16 **org0)
+{
+ u16 *org;
+ unsigned int col, maxcol = vc->vc_cols;
+ unsigned char c;
+
+ /* header */
+ if (pos < HEADER_SIZE) {
+ char header[HEADER_SIZE];
+
+ getconsxy(vc, header + 2);
+ while (pos < HEADER_SIZE && count > 0) {
+ count--;
+ header[pos++] = *con_buf++;
+ }
+ if (!viewed)
+ putconsxy(vc, header + 2);
+ }
+
+ if (!count)
+ return NULL;
+
+ pos -= HEADER_SIZE;
+ col = (pos/2) % maxcol;
+
+ *org0 = org = screen_pos(vc, pos/2, viewed);
+
+ /* odd pos -- the first single character */
+ if (pos & 1) {
+ count--;
+ c = *con_buf++;
+ vcs_scr_writew(vc, vc_compile_le16(c, vcs_scr_readw(vc, org)),
+ org);
+ org++;
+ pos++;
+ if (++col == maxcol) {
+ org = screen_pos(vc, pos/2, viewed);
+ col = 0;
+ }
+ }
+
+ pos /= 2;
+ pos += maxcol - col;
+
+ /* even pos -- handle attr+character pairs */
+ while (count > 1) {
+ unsigned short w;
+
+ w = get_unaligned(((unsigned short *)con_buf));
+ vcs_scr_writew(vc, w, org++);
+ con_buf += 2;
+ count -= 2;
+ if (++col == maxcol) {
+ org = screen_pos(vc, pos, viewed);
+ col = 0;
+ pos += maxcol;
+ }
+ }
+
+ if (!count)
+ return org;
+
+ /* odd pos -- the remaining character */
+ c = *con_buf++;
+ vcs_scr_writew(vc, vc_compile_le16(vcs_scr_readw(vc, org) >> 8, c),
+ org);
+
+ return org;
+}
+
static ssize_t
vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct inode *inode = file_inode(file);
struct vc_data *vc;
- long pos;
- long attr, size, written;
- char *con_buf0;
- int col, maxcol, viewed;
- u16 *org0 = NULL, *org = NULL;
- size_t ret;
char *con_buf;
+ u16 *org0, *org;
+ unsigned int written;
+ int size;
+ ssize_t ret;
+ loff_t pos;
+ bool viewed, attr;
if (use_unicode(inode))
return -EOPNOTSUPP;
@@ -476,7 +619,11 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
if (!vc)
goto unlock_out;
- size = vcs_size(inode);
+ size = vcs_size(vc, attr, false);
+ if (size < 0) {
+ ret = size;
+ goto unlock_out;
+ }
ret = -EINVAL;
if (pos < 0 || pos > size)
goto unlock_out;
@@ -484,9 +631,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
count = size - pos;
written = 0;
while (count) {
- long this_round = count;
- size_t orig_count;
- long p;
+ unsigned int this_round = count;
if (this_round > CON_BUF_SIZE)
this_round = CON_BUF_SIZE;
@@ -515,7 +660,7 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
* the user buffer, so recheck.
* Return data written up to now on failure.
*/
- size = vcs_size(inode);
+ size = vcs_size(vc, attr, false);
if (size < 0) {
if (written)
break;
@@ -531,95 +676,18 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
* under the lock using the local kernel buffer.
*/
- con_buf0 = con_buf;
- orig_count = this_round;
- maxcol = vc->vc_cols;
- p = pos;
- if (!attr) {
- org0 = org = screen_pos(vc, p, viewed);
- col = p % maxcol;
- p += maxcol - col;
-
- while (this_round > 0) {
- unsigned char c = *con_buf0++;
-
- this_round--;
- vcs_scr_writew(vc,
- (vcs_scr_readw(vc, org) & 0xff00) | c, org);
- org++;
- if (++col == maxcol) {
- org = screen_pos(vc, p, viewed);
- col = 0;
- p += maxcol;
- }
- }
- } else {
- if (p < HEADER_SIZE) {
- char header[HEADER_SIZE];
-
- getconsxy(vc, header + 2);
- while (p < HEADER_SIZE && this_round > 0) {
- this_round--;
- header[p++] = *con_buf0++;
- }
- if (!viewed)
- putconsxy(vc, header + 2);
- }
- p -= HEADER_SIZE;
- col = (p/2) % maxcol;
- if (this_round > 0) {
- org0 = org = screen_pos(vc, p/2, viewed);
- if ((p & 1) && this_round > 0) {
- char c;
-
- this_round--;
- c = *con_buf0++;
-#ifdef __BIG_ENDIAN
- vcs_scr_writew(vc, c |
- (vcs_scr_readw(vc, org) & 0xff00), org);
-#else
- vcs_scr_writew(vc, (c << 8) |
- (vcs_scr_readw(vc, org) & 0xff), org);
-#endif
- org++;
- p++;
- if (++col == maxcol) {
- org = screen_pos(vc, p/2, viewed);
- col = 0;
- }
- }
- p /= 2;
- p += maxcol - col;
- }
- while (this_round > 1) {
- unsigned short w;
-
- w = get_unaligned(((unsigned short *)con_buf0));
- vcs_scr_writew(vc, w, org++);
- con_buf0 += 2;
- this_round -= 2;
- if (++col == maxcol) {
- org = screen_pos(vc, p, viewed);
- col = 0;
- p += maxcol;
- }
- }
- if (this_round > 0) {
- unsigned char c;
-
- c = *con_buf0++;
-#ifdef __BIG_ENDIAN
- vcs_scr_writew(vc, (vcs_scr_readw(vc, org) & 0xff) | (c << 8), org);
-#else
- vcs_scr_writew(vc, (vcs_scr_readw(vc, org) & 0xff00) | c, org);
-#endif
- }
- }
- count -= orig_count;
- written += orig_count;
- buf += orig_count;
- pos += orig_count;
- if (org0)
+ if (attr)
+ org = vcs_write_buf(vc, con_buf, pos, this_round,
+ viewed, &org0);
+ else
+ org = vcs_write_buf_noattr(vc, con_buf, pos, this_round,
+ viewed, &org0);
+
+ count -= this_round;
+ written += this_round;
+ buf += this_round;
+ pos += this_round;
+ if (org)
update_region(vc, (unsigned long)(org0), org - org0);
}
*ppos += written;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 19cd4a4b1939..d04a162939a4 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -283,7 +283,8 @@ static inline bool con_should_update(const struct vc_data *vc)
return con_is_visible(vc) && !console_blanked;
}
-static inline unsigned short *screenpos(struct vc_data *vc, int offset, int viewed)
+static inline unsigned short *screenpos(const struct vc_data *vc, int offset,
+ bool viewed)
{
unsigned short *p;
@@ -543,7 +544,7 @@ int vc_uniscr_check(struct vc_data *vc)
* This must be preceded by a successful call to vc_uniscr_check() once
* the console lock has been taken.
*/
-void vc_uniscr_copy_line(struct vc_data *vc, void *dest, int viewed,
+void vc_uniscr_copy_line(const struct vc_data *vc, void *dest, bool viewed,
unsigned int row, unsigned int col, unsigned int nr)
{
struct uni_screen *uniscr = get_vc_uniscr(vc);
@@ -752,7 +753,7 @@ static void update_attr(struct vc_data *vc)
}
/* Note: inverting the screen twice should revert to the original state */
-void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
+void invert_screen(struct vc_data *vc, int offset, int count, bool viewed)
{
unsigned short *p;
@@ -811,7 +812,7 @@ void complement_pos(struct vc_data *vc, int offset)
if (old_offset != -1 && old_offset >= 0 &&
old_offset < vc->vc_screenbuf_size) {
- scr_writew(old, screenpos(vc, old_offset, 1));
+ scr_writew(old, screenpos(vc, old_offset, true));
if (con_should_update(vc))
vc->vc_sw->con_putc(vc, old, oldy, oldx);
notify_update(vc);
@@ -823,7 +824,7 @@ void complement_pos(struct vc_data *vc, int offset)
offset < vc->vc_screenbuf_size) {
unsigned short new;
unsigned short *p;
- p = screenpos(vc, offset, 1);
+ p = screenpos(vc, offset, true);
old = scr_readw(p);
new = old ^ vc->vc_complement_mask;
scr_writew(new, p);
@@ -1180,7 +1181,6 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
/**
* vc_do_resize - resizing method for the tty
* @tty: tty being resized
- * @real_tty: real tty (different to tty if a pty/tty pair)
* @vc: virtual console private data
* @cols: columns
* @lines: lines
@@ -1885,7 +1885,9 @@ static void set_mode(struct vc_data *vc, int on_off)
case 5: /* Inverted screen on/off */
if (vc->vc_decscnm != on_off) {
vc->vc_decscnm = on_off;
- invert_screen(vc, 0, vc->vc_screenbuf_size, 0);
+ invert_screen(vc, 0,
+ vc->vc_screenbuf_size,
+ false);
update_attr(vc);
}
break;
@@ -2605,6 +2607,9 @@ static inline int vc_sanitize_unicode(const int c)
/**
* vc_translate_unicode -- Combine UTF-8 into Unicode in @vc_utf_char
+ * @vc: virtual console
+ * @c: character to translate
+ * @rescan: we return true if we need more (continuation) data
*
* @vc_utf_char is the being-constructed unicode character.
* @vc_utf_count is the number of continuation bytes still expected to arrive.
@@ -3980,7 +3985,7 @@ EXPORT_SYMBOL(con_is_visible);
/**
* con_debug_enter - prepare the console for the kernel debugger
- * @sw: console driver
+ * @vc: virtual console
*
* Called when the console is taken over by the kernel debugger, this
* function needs to save the current console state, then put the console
@@ -4038,7 +4043,6 @@ EXPORT_SYMBOL_GPL(con_debug_enter);
/**
* con_debug_leave - restore console state
- * @sw: console driver
*
* Restore the console state to what it was before the kernel debugger
* was invoked.
@@ -4700,27 +4704,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
return rc;
}
-static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
-{
- int con = op->height;
- int rc;
-
-
- console_lock();
- if (vc->vc_mode != KD_TEXT)
- rc = -EINVAL;
- else if (!vc->vc_sw->con_font_copy)
- rc = -ENOSYS;
- else if (con < 0 || !vc_cons_allocated(con))
- rc = -ENOTTY;
- else if (con == vc->vc_num) /* nothing to do */
- rc = 0;
- else
- rc = vc->vc_sw->con_font_copy(vc, con);
- console_unlock();
- return rc;
-}
-
int con_font_op(struct vc_data *vc, struct console_font_op *op)
{
switch (op->op) {
@@ -4731,7 +4714,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op)
case KD_FONT_OP_SET_DEFAULT:
return con_font_default(vc, op);
case KD_FONT_OP_COPY:
- return con_font_copy(vc, op);
+ /* was buggy and never really used */
+ return -EINVAL;
}
return -ENOSYS;
}
@@ -4741,9 +4725,9 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op)
*/
/* used by selection */
-u16 screen_glyph(struct vc_data *vc, int offset)
+u16 screen_glyph(const struct vc_data *vc, int offset)
{
- u16 w = scr_readw(screenpos(vc, offset, 1));
+ u16 w = scr_readw(screenpos(vc, offset, true));
u16 c = w & 0xff;
if (w & vc->vc_hi_font_mask)
@@ -4752,7 +4736,7 @@ u16 screen_glyph(struct vc_data *vc, int offset)
}
EXPORT_SYMBOL_GPL(screen_glyph);
-u32 screen_glyph_unicode(struct vc_data *vc, int n)
+u32 screen_glyph_unicode(const struct vc_data *vc, int n)
{
struct uni_screen *uniscr = get_vc_uniscr(vc);
@@ -4763,27 +4747,27 @@ u32 screen_glyph_unicode(struct vc_data *vc, int n)
EXPORT_SYMBOL_GPL(screen_glyph_unicode);
/* used by vcs - note the word offset */
-unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed)
+unsigned short *screen_pos(const struct vc_data *vc, int w_offset, bool viewed)
{
return screenpos(vc, 2 * w_offset, viewed);
}
EXPORT_SYMBOL_GPL(screen_pos);
-void getconsxy(struct vc_data *vc, unsigned char *p)
+void getconsxy(const struct vc_data *vc, unsigned char xy[static 2])
{
/* clamp values if they don't fit */
- p[0] = min(vc->state.x, 0xFFu);
- p[1] = min(vc->state.y, 0xFFu);
+ xy[0] = min(vc->state.x, 0xFFu);
+ xy[1] = min(vc->state.y, 0xFFu);
}
-void putconsxy(struct vc_data *vc, unsigned char *p)
+void putconsxy(struct vc_data *vc, unsigned char xy[static const 2])
{
hide_cursor(vc);
- gotoxy(vc, p[0], p[1]);
+ gotoxy(vc, xy[0], xy[1]);
set_cursor(vc);
}
-u16 vcs_scr_readw(struct vc_data *vc, const u16 *org)
+u16 vcs_scr_readw(const struct vc_data *vc, const u16 *org)
{
if ((unsigned long)org == vc->vc_pos && softcursor_original != -1)
return softcursor_original;
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index a4e520bdd521..5f61b25a9aaa 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -181,7 +181,7 @@ static void vt_event_wait(struct vt_event_wait *vw)
/**
* vt_event_wait_ioctl - event ioctl handler
- * @arg: argument to ioctl
+ * @event: argument to ioctl (the event)
*
* Implement the VT_WAITEVENT ioctl using the VT event interface
*/
@@ -208,7 +208,6 @@ static int vt_event_wait_ioctl(struct vt_event __user *event)
/**
* vt_waitactive - active console wait
- * @event: event code
* @n: new console
*
* Helper for event waits. Used to implement the legacy
@@ -485,7 +484,7 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
return 0;
}
-static inline int do_fontx_ioctl(int cmd,
+static inline int do_fontx_ioctl(struct vc_data *vc, int cmd,
struct consolefontdesc __user *user_cfd,
struct console_font_op *op)
{
@@ -503,15 +502,16 @@ static inline int do_fontx_ioctl(int cmd,
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = cfdarg.chardata;
- return con_font_op(vc_cons[fg_console].d, op);
- case GIO_FONTX: {
+ return con_font_op(vc, op);
+
+ case GIO_FONTX:
op->op = KD_FONT_OP_GET;
op->flags = KD_FONT_FLAG_OLD;
op->width = 8;
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = cfdarg.chardata;
- i = con_font_op(vc_cons[fg_console].d, op);
+ i = con_font_op(vc, op);
if (i)
return i;
cfdarg.charheight = op->height;
@@ -519,12 +519,11 @@ static inline int do_fontx_ioctl(int cmd,
if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc)))
return -EFAULT;
return 0;
- }
}
return -EINVAL;
}
-static int vt_io_fontreset(struct console_font_op *op)
+static int vt_io_fontreset(struct vc_data *vc, struct console_font_op *op)
{
int ret;
@@ -538,19 +537,19 @@ static int vt_io_fontreset(struct console_font_op *op)
op->op = KD_FONT_OP_SET_DEFAULT;
op->data = NULL;
- ret = con_font_op(vc_cons[fg_console].d, op);
+ ret = con_font_op(vc, op);
if (ret)
return ret;
console_lock();
- con_set_default_unimap(vc_cons[fg_console].d);
+ con_set_default_unimap(vc);
console_unlock();
return 0;
}
static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
- struct vc_data *vc)
+ bool perm, struct vc_data *vc)
{
struct unimapdesc tmp;
@@ -558,9 +557,11 @@ static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud,
return -EFAULT;
switch (cmd) {
case PIO_UNIMAP:
+ if (!perm)
+ return -EPERM;
return con_set_unimap(vc, tmp.entry_ct, tmp.entries);
case GIO_UNIMAP:
- if (fg_console != vc->vc_num)
+ if (!perm && fg_console != vc->vc_num)
return -EPERM;
return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct),
tmp.entries);
@@ -583,7 +584,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
op.height = 0;
op.charcount = 256;
op.data = up;
- return con_font_op(vc_cons[fg_console].d, &op);
+ return con_font_op(vc, &op);
case GIO_FONT:
op.op = KD_FONT_OP_GET;
@@ -592,7 +593,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
op.height = 32;
op.charcount = 256;
op.data = up;
- return con_font_op(vc_cons[fg_console].d, &op);
+ return con_font_op(vc, &op);
case PIO_CMAP:
if (!perm)
@@ -608,13 +609,13 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
fallthrough;
case GIO_FONTX:
- return do_fontx_ioctl(cmd, up, &op);
+ return do_fontx_ioctl(vc, cmd, up, &op);
case PIO_FONTRESET:
if (!perm)
return -EPERM;
- return vt_io_fontreset(&op);
+ return vt_io_fontreset(vc, &op);
case PIO_SCRNMAP:
if (!perm)
@@ -640,10 +641,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
case PIO_UNIMAP:
case GIO_UNIMAP:
- if (!perm)
- return -EPERM;
-
- return do_unimap_ioctl(cmd, up, vc);
+ return do_unimap_ioctl(cmd, up, perm, vc);
default:
return -ENOIOCTLCMD;
@@ -773,58 +771,21 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
if (copy_from_user(&v, cs, sizeof(struct vt_consize)))
return -EFAULT;
- /* FIXME: Should check the copies properly */
- if (!v.v_vlin)
- v.v_vlin = vc->vc_scan_lines;
-
- if (v.v_clin) {
- int rows = v.v_vlin / v.v_clin;
- if (v.v_rows != rows) {
- if (v.v_rows) /* Parameters don't add up */
- return -EINVAL;
- v.v_rows = rows;
- }
- }
-
- if (v.v_vcol && v.v_ccol) {
- int cols = v.v_vcol / v.v_ccol;
- if (v.v_cols != cols) {
- if (v.v_cols)
- return -EINVAL;
- v.v_cols = cols;
- }
- }
-
- if (v.v_clin > 32)
- return -EINVAL;
+ if (v.v_vlin)
+ pr_info_once("\"struct vt_consize\"->v_vlin is ignored. Please report if you need this.\n");
+ if (v.v_clin)
+ pr_info_once("\"struct vt_consize\"->v_clin is ignored. Please report if you need this.\n");
+ console_lock();
for (i = 0; i < MAX_NR_CONSOLES; i++) {
- struct vc_data *vcp;
+ vc = vc_cons[i].d;
- if (!vc_cons[i].d)
- continue;
- console_lock();
- vcp = vc_cons[i].d;
- if (vcp) {
- int ret;
- int save_scan_lines = vcp->vc_scan_lines;
- int save_font_height = vcp->vc_font.height;
-
- if (v.v_vlin)
- vcp->vc_scan_lines = v.v_vlin;
- if (v.v_clin)
- vcp->vc_font.height = v.v_clin;
- vcp->vc_resize_user = 1;
- ret = vc_resize(vcp, v.v_cols, v.v_rows);
- if (ret) {
- vcp->vc_scan_lines = save_scan_lines;
- vcp->vc_font.height = save_font_height;
- console_unlock();
- return ret;
- }
+ if (vc) {
+ vc->vc_resize_user = 1;
+ vc_resize(vc, v.v_cols, v.v_rows);
}
- console_unlock();
}
+ console_unlock();
return 0;
}
@@ -1105,8 +1066,9 @@ struct compat_consolefontdesc {
};
static inline int
-compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
- int perm, struct console_font_op *op)
+compat_fontx_ioctl(struct vc_data *vc, int cmd,
+ struct compat_consolefontdesc __user *user_cfd,
+ int perm, struct console_font_op *op)
{
struct compat_consolefontdesc cfdarg;
int i;
@@ -1124,7 +1086,8 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = compat_ptr(cfdarg.chardata);
- return con_font_op(vc_cons[fg_console].d, op);
+ return con_font_op(vc, op);
+
case GIO_FONTX:
op->op = KD_FONT_OP_GET;
op->flags = KD_FONT_FLAG_OLD;
@@ -1132,7 +1095,7 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount;
op->data = compat_ptr(cfdarg.chardata);
- i = con_font_op(vc_cons[fg_console].d, op);
+ i = con_font_op(vc, op);
if (i)
return i;
cfdarg.charheight = op->height;
@@ -1222,7 +1185,7 @@ long vt_compat_ioctl(struct tty_struct *tty,
*/
case PIO_FONTX:
case GIO_FONTX:
- return compat_fontx_ioctl(cmd, up, perm, &op);
+ return compat_fontx_ioctl(vc, cmd, up, perm, &op);
case KDFONTOP:
return compat_kdfontop_ioctl(up, perm, &op, vc);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 73efb80815db..be06f1a961c2 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -413,10 +413,10 @@ static int uio_get_minor(struct uio_device *idev)
return retval;
}
-static void uio_free_minor(struct uio_device *idev)
+static void uio_free_minor(unsigned long minor)
{
mutex_lock(&minor_lock);
- idr_remove(&uio_idr, idev->minor);
+ idr_remove(&uio_idr, minor);
mutex_unlock(&minor_lock);
}
@@ -990,7 +990,7 @@ err_request_irq:
err_uio_dev_add_attributes:
device_del(&idev->dev);
err_device_create:
- uio_free_minor(idev);
+ uio_free_minor(idev->minor);
put_device(&idev->dev);
return ret;
}
@@ -1042,13 +1042,13 @@ EXPORT_SYMBOL_GPL(__devm_uio_register_device);
void uio_unregister_device(struct uio_info *info)
{
struct uio_device *idev;
+ unsigned long minor;
if (!info || !info->uio_dev)
return;
idev = info->uio_dev;
-
- uio_free_minor(idev);
+ minor = idev->minor;
mutex_lock(&idev->info_lock);
uio_dev_del_attributes(idev);
@@ -1064,6 +1064,8 @@ void uio_unregister_device(struct uio_info *info)
device_unregister(&idev->dev);
+ uio_free_minor(minor);
+
return;
}
EXPORT_SYMBOL_GPL(uio_unregister_device);
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index ea66f8f385ba..e62a770a5d3b 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -230,12 +230,12 @@ CXACRU__ATTR_INIT(_name)
static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%u\n", value);
+ return sprintf(buf, "%u\n", value);
}
static ssize_t cxacru_sysfs_showattr_s8(s8 value, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
+ return sprintf(buf, "%d\n", value);
}
static ssize_t cxacru_sysfs_showattr_dB(s16 value, char *buf)
@@ -255,8 +255,8 @@ static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf)
static char *str[] = { "no", "yes" };
if (unlikely(value >= ARRAY_SIZE(str)))
- return snprintf(buf, PAGE_SIZE, "%u\n", value);
- return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
+ return sprintf(buf, "%u\n", value);
+ return sprintf(buf, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf)
@@ -264,8 +264,8 @@ static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf)
static char *str[] = { NULL, "not connected", "connected", "lost" };
if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL))
- return snprintf(buf, PAGE_SIZE, "%u\n", value);
- return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
+ return sprintf(buf, "%u\n", value);
+ return sprintf(buf, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf)
@@ -275,8 +275,8 @@ static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf)
"waiting", "initialising"
};
if (unlikely(value >= ARRAY_SIZE(str)))
- return snprintf(buf, PAGE_SIZE, "%u\n", value);
- return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
+ return sprintf(buf, "%u\n", value);
+ return sprintf(buf, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
@@ -288,8 +288,8 @@ static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
"ITU-T G.992.2 (G.LITE)"
};
if (unlikely(value >= ARRAY_SIZE(str)))
- return snprintf(buf, PAGE_SIZE, "%u\n", value);
- return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
+ return sprintf(buf, "%u\n", value);
+ return sprintf(buf, "%s\n", str[value]);
}
/*
@@ -309,8 +309,7 @@ static ssize_t mac_address_show(struct device *dev,
if (instance == NULL || instance->usbatm->atm_dev == NULL)
return -ENODEV;
- return snprintf(buf, PAGE_SIZE, "%pM\n",
- instance->usbatm->atm_dev->esi);
+ return sprintf(buf, "%pM\n", instance->usbatm->atm_dev->esi);
}
static ssize_t adsl_state_show(struct device *dev,
@@ -326,8 +325,8 @@ static ssize_t adsl_state_show(struct device *dev,
value = instance->card_info[CXINF_LINE_STARTABLE];
if (unlikely(value >= ARRAY_SIZE(str)))
- return snprintf(buf, PAGE_SIZE, "%u\n", value);
- return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
+ return sprintf(buf, "%u\n", value);
+ return sprintf(buf, "%s\n", str[value]);
}
static ssize_t adsl_state_store(struct device *dev,
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 4e12a32ca392..56fe30d247da 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -511,9 +511,10 @@ static unsigned int usbatm_write_cells(struct usbatm_data *instance,
** receive **
**************/
-static void usbatm_rx_process(unsigned long data)
+static void usbatm_rx_process(struct tasklet_struct *t)
{
- struct usbatm_data *instance = (struct usbatm_data *)data;
+ struct usbatm_data *instance = from_tasklet(instance, t,
+ rx_channel.tasklet);
struct urb *urb;
while ((urb = usbatm_pop_urb(&instance->rx_channel))) {
@@ -564,9 +565,10 @@ static void usbatm_rx_process(unsigned long data)
** send **
***********/
-static void usbatm_tx_process(unsigned long data)
+static void usbatm_tx_process(struct tasklet_struct *t)
{
- struct usbatm_data *instance = (struct usbatm_data *)data;
+ struct usbatm_data *instance = from_tasklet(instance, t,
+ tx_channel.tasklet);
struct sk_buff *skb = instance->current_skb;
struct urb *urb = NULL;
const unsigned int buf_size = instance->tx_channel.buf_size;
@@ -1069,8 +1071,8 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
usbatm_init_channel(&instance->rx_channel);
usbatm_init_channel(&instance->tx_channel);
- tasklet_init(&instance->rx_channel.tasklet, usbatm_rx_process, (unsigned long)instance);
- tasklet_init(&instance->tx_channel.tasklet, usbatm_tx_process, (unsigned long)instance);
+ tasklet_setup(&instance->rx_channel.tasklet, usbatm_rx_process);
+ tasklet_setup(&instance->tx_channel.tasklet, usbatm_tx_process);
instance->rx_channel.stride = ATM_CELL_SIZE + driver->rx_padding;
instance->tx_channel.stride = ATM_CELL_SIZE + driver->tx_padding;
instance->rx_channel.usbatm = instance->tx_channel.usbatm = instance;
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index 60f4711717d2..e65f1a0ae80b 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -1123,9 +1123,9 @@ static void c67x00_do_work(struct c67x00_hcd *c67x00)
/* -------------------------------------------------------------------------- */
-static void c67x00_sched_tasklet(unsigned long __c67x00)
+static void c67x00_sched_tasklet(struct tasklet_struct *t)
{
- struct c67x00_hcd *c67x00 = (struct c67x00_hcd *)__c67x00;
+ struct c67x00_hcd *c67x00 = from_tasklet(c67x00, t, tasklet);
c67x00_do_work(c67x00);
}
@@ -1136,8 +1136,7 @@ void c67x00_sched_kick(struct c67x00_hcd *c67x00)
int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
{
- tasklet_init(&c67x00->tasklet, c67x00_sched_tasklet,
- (unsigned long)c67x00);
+ tasklet_setup(&c67x00->tasklet, c67x00_sched_tasklet);
return 0;
}
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
index aba988e71958..54a2d70a9c73 100644
--- a/drivers/usb/cdns3/cdns3-imx.c
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -15,6 +15,8 @@
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/iopoll.h>
+#include <linux/pm_runtime.h>
+#include "core.h"
#define USB3_CORE_CTRL1 0x00
#define USB3_CORE_CTRL2 0x04
@@ -32,7 +34,7 @@
/* Register bits definition */
/* USB3_CORE_CTRL1 */
-#define SW_RESET_MASK (0x3f << 26)
+#define SW_RESET_MASK GENMASK(31, 26)
#define PWR_SW_RESET BIT(31)
#define APB_SW_RESET BIT(30)
#define AXI_SW_RESET BIT(29)
@@ -53,8 +55,8 @@
#define LPM_CLK_REQ BIT(28)
#define DEVU3_WAEKUP_EN BIT(14)
#define OTG_WAKEUP_EN BIT(12)
-#define DEV_INT_EN (3 << 8) /* DEV INT b9:8 */
-#define HOST_INT1_EN (1 << 0) /* HOST INT b7:0 */
+#define DEV_INT_EN (3 << 8) /* DEV INT b9:8 */
+#define HOST_INT1_EN (1 << 0) /* HOST INT b7:0 */
/* USB3_CORE_STATUS */
#define MDCTRL_CLK_STATUS BIT(15)
@@ -66,11 +68,30 @@
#define CLK_VALID_COMPARE_BITS (0xf << 28)
#define PHY_REFCLK_REQ (1 << 0)
+/* OTG registers definition */
+#define OTGSTS 0x4
+/* OTGSTS */
+#define OTG_NRDY BIT(11)
+
+/* xHCI registers definition */
+#define XECP_PM_PMCSR 0x8018
+#define XECP_AUX_CTRL_REG1 0x8120
+
+/* Register bits definition */
+/* XECP_AUX_CTRL_REG1 */
+#define CFG_RXDET_P3_EN BIT(15)
+
+/* XECP_PM_PMCSR */
+#define PS_MASK GENMASK(1, 0)
+#define PS_D0 0
+#define PS_D1 1
+
struct cdns_imx {
struct device *dev;
void __iomem *noncore;
struct clk_bulk_data *clks;
int num_clks;
+ struct platform_device *cdns3_pdev;
};
static inline u32 cdns_imx_readl(struct cdns_imx *data, u32 offset)
@@ -126,6 +147,20 @@ static int cdns_imx_noncore_init(struct cdns_imx *data)
return ret;
}
+static int cdns_imx_platform_suspend(struct device *dev,
+ bool suspend, bool wakeup);
+static struct cdns3_platform_data cdns_imx_pdata = {
+ .platform_suspend = cdns_imx_platform_suspend,
+};
+
+static const struct of_dev_auxdata cdns_imx_auxdata[] = {
+ {
+ .compatible = "cdns,usb3",
+ .platform_data = &cdns_imx_pdata,
+ },
+ {},
+};
+
static int cdns_imx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -162,14 +197,18 @@ static int cdns_imx_probe(struct platform_device *pdev)
if (ret)
goto err;
- ret = of_platform_populate(node, NULL, NULL, dev);
+ ret = of_platform_populate(node, NULL, cdns_imx_auxdata, dev);
if (ret) {
dev_err(dev, "failed to create children: %d\n", ret);
goto err;
}
- return ret;
+ device_set_wakeup_capable(dev, true);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_forbid(dev);
+ return ret;
err:
clk_bulk_disable_unprepare(data->num_clks, data->clks);
return ret;
@@ -194,6 +233,147 @@ static int cdns_imx_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static void cdns3_set_wakeup(struct cdns_imx *data, bool enable)
+{
+ u32 value;
+
+ value = cdns_imx_readl(data, USB3_INT_REG);
+ if (enable)
+ value |= OTG_WAKEUP_EN | DEVU3_WAEKUP_EN;
+ else
+ value &= ~(OTG_WAKEUP_EN | DEVU3_WAEKUP_EN);
+
+ cdns_imx_writel(data, USB3_INT_REG, value);
+}
+
+static int cdns_imx_platform_suspend(struct device *dev,
+ bool suspend, bool wakeup)
+{
+ struct cdns3 *cdns = dev_get_drvdata(dev);
+ struct device *parent = dev->parent;
+ struct cdns_imx *data = dev_get_drvdata(parent);
+ void __iomem *otg_regs = (void __iomem *)(cdns->otg_regs);
+ void __iomem *xhci_regs = cdns->xhci_regs;
+ u32 value;
+ int ret = 0;
+
+ if (cdns->role != USB_ROLE_HOST)
+ return 0;
+
+ if (suspend) {
+ /* SW request low power when all usb ports allow to it ??? */
+ value = readl(xhci_regs + XECP_PM_PMCSR);
+ value &= ~PS_MASK;
+ value |= PS_D1;
+ writel(value, xhci_regs + XECP_PM_PMCSR);
+
+ /* mdctrl_clk_sel */
+ value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+ value |= MDCTRL_CLK_SEL;
+ cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+
+ /* wait for mdctrl_clk_status */
+ value = cdns_imx_readl(data, USB3_CORE_STATUS);
+ ret = readl_poll_timeout(data->noncore + USB3_CORE_STATUS, value,
+ (value & MDCTRL_CLK_STATUS) == MDCTRL_CLK_STATUS,
+ 10, 100000);
+ if (ret)
+ dev_warn(parent, "wait mdctrl_clk_status timeout\n");
+
+ /* wait lpm_clk_req to be 0 */
+ value = cdns_imx_readl(data, USB3_INT_REG);
+ ret = readl_poll_timeout(data->noncore + USB3_INT_REG, value,
+ (value & LPM_CLK_REQ) != LPM_CLK_REQ,
+ 10, 100000);
+ if (ret)
+ dev_warn(parent, "wait lpm_clk_req timeout\n");
+
+ /* wait phy_refclk_req to be 0 */
+ value = cdns_imx_readl(data, USB3_SSPHY_STATUS);
+ ret = readl_poll_timeout(data->noncore + USB3_SSPHY_STATUS, value,
+ (value & PHY_REFCLK_REQ) != PHY_REFCLK_REQ,
+ 10, 100000);
+ if (ret)
+ dev_warn(parent, "wait phy_refclk_req timeout\n");
+
+ cdns3_set_wakeup(data, wakeup);
+ } else {
+ cdns3_set_wakeup(data, false);
+
+ /* SW request D0 */
+ value = readl(xhci_regs + XECP_PM_PMCSR);
+ value &= ~PS_MASK;
+ value |= PS_D0;
+ writel(value, xhci_regs + XECP_PM_PMCSR);
+
+ /* clr CFG_RXDET_P3_EN */
+ value = readl(xhci_regs + XECP_AUX_CTRL_REG1);
+ value &= ~CFG_RXDET_P3_EN;
+ writel(value, xhci_regs + XECP_AUX_CTRL_REG1);
+
+ /* clear mdctrl_clk_sel */
+ value = cdns_imx_readl(data, USB3_CORE_CTRL1);
+ value &= ~MDCTRL_CLK_SEL;
+ cdns_imx_writel(data, USB3_CORE_CTRL1, value);
+
+ /* wait CLK_125_REQ to be 1 */
+ value = cdns_imx_readl(data, USB3_INT_REG);
+ ret = readl_poll_timeout(data->noncore + USB3_INT_REG, value,
+ (value & CLK_125_REQ) == CLK_125_REQ,
+ 10, 100000);
+ if (ret)
+ dev_warn(parent, "wait CLK_125_REQ timeout\n");
+
+ /* wait for mdctrl_clk_status is cleared */
+ value = cdns_imx_readl(data, USB3_CORE_STATUS);
+ ret = readl_poll_timeout(data->noncore + USB3_CORE_STATUS, value,
+ (value & MDCTRL_CLK_STATUS) != MDCTRL_CLK_STATUS,
+ 10, 100000);
+ if (ret)
+ dev_warn(parent, "wait mdctrl_clk_status cleared timeout\n");
+
+ /* Wait until OTG_NRDY is 0 */
+ value = readl(otg_regs + OTGSTS);
+ ret = readl_poll_timeout(otg_regs + OTGSTS, value,
+ (value & OTG_NRDY) != OTG_NRDY,
+ 10, 100000);
+ if (ret)
+ dev_warn(parent, "wait OTG ready timeout\n");
+ }
+
+ return ret;
+
+}
+
+static int cdns_imx_resume(struct device *dev)
+{
+ struct cdns_imx *data = dev_get_drvdata(dev);
+
+ return clk_bulk_prepare_enable(data->num_clks, data->clks);
+}
+
+static int cdns_imx_suspend(struct device *dev)
+{
+ struct cdns_imx *data = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+
+ return 0;
+}
+#else
+static int cdns_imx_platform_suspend(struct device *dev,
+ bool suspend, bool wakeup)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops cdns_imx_pm_ops = {
+ SET_RUNTIME_PM_OPS(cdns_imx_suspend, cdns_imx_resume, NULL)
+};
+
static const struct of_device_id cdns_imx_of_match[] = {
{ .compatible = "fsl,imx8qm-usb3", },
{},
@@ -206,6 +386,7 @@ static struct platform_driver cdns_imx_driver = {
.driver = {
.name = "cdns3-imx",
.of_match_table = cdns_imx_of_match,
+ .pm = &cdns_imx_pm_ops,
},
};
module_platform_driver(cdns_imx_driver);
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 5c1586ec7824..a0f73d4711ae 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -280,6 +280,10 @@ int cdns3_hw_role_switch(struct cdns3 *cdns)
enum usb_role real_role, current_role;
int ret = 0;
+ /* Depends on role switch class */
+ if (cdns->role_sw)
+ return 0;
+
pm_runtime_get_sync(cdns->dev);
current_role = cdns->role;
@@ -371,6 +375,50 @@ pm_put:
return ret;
}
+static int set_phy_power_on(struct cdns3 *cdns)
+{
+ int ret;
+
+ ret = phy_power_on(cdns->usb2_phy);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(cdns->usb3_phy);
+ if (ret)
+ phy_power_off(cdns->usb2_phy);
+
+ return ret;
+}
+
+static void set_phy_power_off(struct cdns3 *cdns)
+{
+ phy_power_off(cdns->usb3_phy);
+ phy_power_off(cdns->usb2_phy);
+}
+
+/**
+ * cdns3_wakeup_irq - interrupt handler for wakeup events
+ * @irq: irq number for cdns3 core device
+ * @data: structure of cdns3
+ *
+ * Returns IRQ_HANDLED or IRQ_NONE
+ */
+static irqreturn_t cdns3_wakeup_irq(int irq, void *data)
+{
+ struct cdns3 *cdns = data;
+
+ if (cdns->in_lpm) {
+ disable_irq_nosync(irq);
+ cdns->wakeup_pending = true;
+ if ((cdns->role == USB_ROLE_HOST) && cdns->host_dev)
+ pm_request_resume(&cdns->host_dev->dev);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
/**
* cdns3_probe - probe for cdns3 core device
* @pdev: Pointer to cdns3 core platform device
@@ -397,6 +445,7 @@ static int cdns3_probe(struct platform_device *pdev)
return -ENOMEM;
cdns->dev = dev;
+ cdns->pdata = dev_get_platdata(dev);
platform_set_drvdata(pdev, cdns);
@@ -443,8 +492,21 @@ static int cdns3_probe(struct platform_device *pdev)
return -ENXIO;
}
+ cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable");
+
cdns->otg_res = *res;
+ cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
+ if (cdns->wakeup_irq == -EPROBE_DEFER)
+ return cdns->wakeup_irq;
+ else if (cdns->wakeup_irq == 0)
+ return -EINVAL;
+
+ if (cdns->wakeup_irq < 0) {
+ dev_dbg(dev, "couldn't get wakeup irq\n");
+ cdns->wakeup_irq = 0x0;
+ }
+
mutex_init(&cdns->mutex);
cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy");
@@ -463,14 +525,10 @@ static int cdns3_probe(struct platform_device *pdev)
if (ret)
goto err1;
- ret = phy_power_on(cdns->usb2_phy);
+ ret = set_phy_power_on(cdns);
if (ret)
goto err2;
- ret = phy_power_on(cdns->usb3_phy);
- if (ret)
- goto err3;
-
sw_desc.set = cdns3_role_set;
sw_desc.get = cdns3_role_get;
sw_desc.allow_userspace_control = true;
@@ -482,20 +540,34 @@ static int cdns3_probe(struct platform_device *pdev)
if (IS_ERR(cdns->role_sw)) {
ret = PTR_ERR(cdns->role_sw);
dev_warn(dev, "Unable to register Role Switch\n");
- goto err4;
+ goto err3;
+ }
+
+ if (cdns->wakeup_irq) {
+ ret = devm_request_irq(cdns->dev, cdns->wakeup_irq,
+ cdns3_wakeup_irq,
+ IRQF_SHARED,
+ dev_name(cdns->dev), cdns);
+
+ if (ret) {
+ dev_err(cdns->dev, "couldn't register wakeup irq handler\n");
+ goto err3;
+ }
}
ret = cdns3_drd_init(cdns);
if (ret)
- goto err5;
+ goto err4;
ret = cdns3_core_init_role(cdns);
if (ret)
- goto err5;
+ goto err4;
+ spin_lock_init(&cdns->lock);
device_set_wakeup_capable(dev, true);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+ pm_runtime_forbid(dev);
/*
* The controller needs less time between bus and controller suspend,
@@ -508,14 +580,11 @@ static int cdns3_probe(struct platform_device *pdev)
dev_dbg(dev, "Cadence USB3 core: probe succeed\n");
return 0;
-err5:
+err4:
cdns3_drd_exit(cdns);
usb_role_switch_unregister(cdns->role_sw);
-err4:
- phy_power_off(cdns->usb3_phy);
-
err3:
- phy_power_off(cdns->usb2_phy);
+ set_phy_power_off(cdns);
err2:
phy_exit(cdns->usb3_phy);
err1:
@@ -539,59 +608,128 @@ static int cdns3_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
cdns3_exit_roles(cdns);
usb_role_switch_unregister(cdns->role_sw);
- phy_power_off(cdns->usb2_phy);
- phy_power_off(cdns->usb3_phy);
+ set_phy_power_off(cdns);
phy_exit(cdns->usb2_phy);
phy_exit(cdns->usb3_phy);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
-static int cdns3_suspend(struct device *dev)
+static int cdns3_set_platform_suspend(struct device *dev,
+ bool suspend, bool wakeup)
+{
+ struct cdns3 *cdns = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (cdns->pdata && cdns->pdata->platform_suspend)
+ ret = cdns->pdata->platform_suspend(dev, suspend, wakeup);
+
+ return ret;
+}
+
+static int cdns3_controller_suspend(struct device *dev, pm_message_t msg)
{
struct cdns3 *cdns = dev_get_drvdata(dev);
+ bool wakeup;
unsigned long flags;
- if (cdns->role == USB_ROLE_HOST)
+ if (cdns->in_lpm)
return 0;
- if (pm_runtime_status_suspended(dev))
- pm_runtime_resume(dev);
+ if (PMSG_IS_AUTO(msg))
+ wakeup = true;
+ else
+ wakeup = device_may_wakeup(dev);
- if (cdns->roles[cdns->role]->suspend) {
- spin_lock_irqsave(&cdns->gadget_dev->lock, flags);
- cdns->roles[cdns->role]->suspend(cdns, false);
- spin_unlock_irqrestore(&cdns->gadget_dev->lock, flags);
- }
+ cdns3_set_platform_suspend(cdns->dev, true, wakeup);
+ set_phy_power_off(cdns);
+ spin_lock_irqsave(&cdns->lock, flags);
+ cdns->in_lpm = true;
+ spin_unlock_irqrestore(&cdns->lock, flags);
+ dev_dbg(cdns->dev, "%s ends\n", __func__);
return 0;
}
-static int cdns3_resume(struct device *dev)
+static int cdns3_controller_resume(struct device *dev, pm_message_t msg)
{
struct cdns3 *cdns = dev_get_drvdata(dev);
+ int ret;
unsigned long flags;
- if (cdns->role == USB_ROLE_HOST)
+ if (!cdns->in_lpm)
return 0;
- if (cdns->roles[cdns->role]->resume) {
- spin_lock_irqsave(&cdns->gadget_dev->lock, flags);
+ ret = set_phy_power_on(cdns);
+ if (ret)
+ return ret;
+
+ cdns3_set_platform_suspend(cdns->dev, false, false);
+
+ spin_lock_irqsave(&cdns->lock, flags);
+ if (cdns->roles[cdns->role]->resume && !PMSG_IS_AUTO(msg))
cdns->roles[cdns->role]->resume(cdns, false);
- spin_unlock_irqrestore(&cdns->gadget_dev->lock, flags);
+
+ cdns->in_lpm = false;
+ spin_unlock_irqrestore(&cdns->lock, flags);
+ if (cdns->wakeup_pending) {
+ cdns->wakeup_pending = false;
+ enable_irq(cdns->wakeup_irq);
+ }
+ dev_dbg(cdns->dev, "%s ends\n", __func__);
+
+ return ret;
+}
+
+static int cdns3_runtime_suspend(struct device *dev)
+{
+ return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND);
+}
+
+static int cdns3_runtime_resume(struct device *dev)
+{
+ return cdns3_controller_resume(dev, PMSG_AUTO_RESUME);
+}
+#ifdef CONFIG_PM_SLEEP
+
+static int cdns3_suspend(struct device *dev)
+{
+ struct cdns3 *cdns = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ if (pm_runtime_status_suspended(dev))
+ pm_runtime_resume(dev);
+
+ if (cdns->roles[cdns->role]->suspend) {
+ spin_lock_irqsave(&cdns->lock, flags);
+ cdns->roles[cdns->role]->suspend(cdns, false);
+ spin_unlock_irqrestore(&cdns->lock, flags);
}
+ return cdns3_controller_suspend(dev, PMSG_SUSPEND);
+}
+
+static int cdns3_resume(struct device *dev)
+{
+ int ret;
+
+ ret = cdns3_controller_resume(dev, PMSG_RESUME);
+ if (ret)
+ return ret;
+
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- return 0;
+ return ret;
}
-#endif
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
static const struct dev_pm_ops cdns3_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(cdns3_suspend, cdns3_resume)
+ SET_RUNTIME_PM_OPS(cdns3_runtime_suspend, cdns3_runtime_resume, NULL)
};
#ifdef CONFIG_OF
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 1ad1f1fe61e9..8a40d53d5ede 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -38,6 +38,12 @@ struct cdns3_role_driver {
};
#define CDNS3_XHCI_RESOURCES_NUM 2
+
+struct cdns3_platform_data {
+ int (*platform_suspend)(struct device *dev,
+ bool suspend, bool wakeup);
+};
+
/**
* struct cdns3 - Representation of Cadence USB3 DRD controller.
* @dev: pointer to Cadence device struct
@@ -50,6 +56,7 @@ struct cdns3_role_driver {
* @otg_regs: pointer to base of otg registers
* @otg_irq: irq number for otg controller
* @dev_irq: irq number for device controller
+ * @wakeup_irq: irq number for wakeup event, it is optional
* @roles: array of supported roles for this controller
* @role: current role
* @host_dev: the child host device pointer for cdns3 core
@@ -62,6 +69,10 @@ struct cdns3_role_driver {
* This field based on firmware setting, kernel configuration
* and hardware configuration.
* @role_sw: pointer to role switch object.
+ * @in_lpm: indicate the controller is in low power mode
+ * @wakeup_pending: wakeup interrupt pending
+ * @pdata: platform data from glue layer
+ * @lock: spinlock structure
*/
struct cdns3 {
struct device *dev;
@@ -76,9 +87,11 @@ struct cdns3 {
#define CDNS3_CONTROLLER_V0 0
#define CDNS3_CONTROLLER_V1 1
u32 version;
+ bool phyrst_a_enable;
int otg_irq;
int dev_irq;
+ int wakeup_irq;
struct cdns3_role_driver *roles[USB_ROLE_DEVICE + 1];
enum usb_role role;
struct platform_device *host_dev;
@@ -89,6 +102,10 @@ struct cdns3 {
struct mutex mutex;
enum usb_dr_mode dr_mode;
struct usb_role_switch *role_sw;
+ bool in_lpm;
+ bool wakeup_pending;
+ struct cdns3_platform_data *pdata;
+ spinlock_t lock;
};
int cdns3_hw_role_switch(struct cdns3 *cdns);
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 6234bcd6158a..38ccd29e4cde 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/usb/otg.h>
+#include <linux/phy/phy.h>
#include "gadget.h"
#include "drd.h"
@@ -42,6 +43,18 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
reg = readl(&cdns->otg_v1_regs->override);
reg |= OVERRIDE_IDPULLUP;
writel(reg, &cdns->otg_v1_regs->override);
+
+ /*
+ * Enable work around feature built into the
+ * controller to address issue with RX Sensitivity
+ * est (EL_17) for USB2 PHY. The issue only occures
+ * for 0x0002450D controller version.
+ */
+ if (cdns->phyrst_a_enable) {
+ reg = readl(&cdns->otg_v1_regs->phyrst_cfg);
+ reg |= PHYRST_CFG_PHYRST_A_ENABLE;
+ writel(reg, &cdns->otg_v1_regs->phyrst_cfg);
+ }
} else {
reg = readl(&cdns->otg_v0_regs->ctrl1);
reg |= OVERRIDE_IDPULLUP_V0;
@@ -145,6 +158,7 @@ int cdns3_drd_host_on(struct cdns3 *cdns)
if (ret)
dev_err(cdns->dev, "timeout waiting for xhci_ready\n");
+ phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_HOST);
return ret;
}
@@ -164,6 +178,7 @@ void cdns3_drd_host_off(struct cdns3 *cdns)
readl_poll_timeout_atomic(&cdns->otg_regs->state, val,
!(val & OTGSTATE_HOST_STATE_MASK),
1, 2000000);
+ phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID);
}
/**
@@ -190,6 +205,7 @@ int cdns3_drd_gadget_on(struct cdns3 *cdns)
return ret;
}
+ phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_DEVICE);
return 0;
}
@@ -213,6 +229,7 @@ void cdns3_drd_gadget_off(struct cdns3 *cdns)
readl_poll_timeout_atomic(&cdns->otg_regs->state, val,
!(val & OTGSTATE_DEV_STATE_MASK),
1, 2000000);
+ phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID);
}
/**
@@ -293,6 +310,9 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data)
if (cdns->dr_mode != USB_DR_MODE_OTG)
return IRQ_NONE;
+ if (cdns->in_lpm)
+ return ret;
+
reg = readl(&cdns->otg_regs->ivect);
if (!reg)
diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
index 7e7cf7fa2dd3..f1ccae285a16 100644
--- a/drivers/usb/cdns3/drd.h
+++ b/drivers/usb/cdns3/drd.h
@@ -31,7 +31,7 @@ struct cdns3_otg_regs {
__le32 simulate;
__le32 override;
__le32 susp_ctrl;
- __le32 reserved4;
+ __le32 phyrst_cfg;
__le32 anasts;
__le32 adp_ramp_time;
__le32 ctrl1;
@@ -153,6 +153,9 @@ struct cdns3_otg_common_regs {
/* Only for CDNS3_CONTROLLER_V0 version */
#define OVERRIDE_IDPULLUP_V0 BIT(24)
+/* PHYRST_CFG - bitmasks */
+#define PHYRST_CFG_PHYRST_A_ENABLE BIT(0)
+
#define CDNS3_ID_PERIPHERAL 1
#define CDNS3_ID_HOST 0
diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
index d9779abc65b2..d3121a32cc68 100644
--- a/drivers/usb/cdns3/ep0.c
+++ b/drivers/usb/cdns3/ep0.c
@@ -137,48 +137,36 @@ static int cdns3_req_ep0_set_configuration(struct cdns3_device *priv_dev,
struct usb_ctrlrequest *ctrl_req)
{
enum usb_device_state device_state = priv_dev->gadget.state;
- struct cdns3_endpoint *priv_ep;
u32 config = le16_to_cpu(ctrl_req->wValue);
int result = 0;
- int i;
switch (device_state) {
case USB_STATE_ADDRESS:
- /* Configure non-control EPs */
- for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
- priv_ep = priv_dev->eps[i];
- if (!priv_ep)
- continue;
-
- if (priv_ep->flags & EP_CLAIMED)
- cdns3_ep_config(priv_ep);
- }
-
result = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
- if (result)
- return result;
-
- if (!config) {
- cdns3_hw_reset_eps_config(priv_dev);
- usb_gadget_set_state(&priv_dev->gadget,
- USB_STATE_ADDRESS);
- }
+ if (result || !config)
+ goto reset_config;
break;
case USB_STATE_CONFIGURED:
result = cdns3_ep0_delegate_req(priv_dev, ctrl_req);
+ if (!config && !result)
+ goto reset_config;
- if (!config && !result) {
- cdns3_hw_reset_eps_config(priv_dev);
- usb_gadget_set_state(&priv_dev->gadget,
- USB_STATE_ADDRESS);
- }
break;
default:
- result = -EINVAL;
+ return -EINVAL;
}
+ return 0;
+
+reset_config:
+ if (result != USB_GADGET_DELAYED_STATUS)
+ cdns3_hw_reset_eps_config(priv_dev);
+
+ usb_gadget_set_state(&priv_dev->gadget,
+ USB_STATE_ADDRESS);
+
return result;
}
@@ -705,6 +693,7 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
unsigned long flags;
int ret = 0;
u8 zlp = 0;
+ int i;
spin_lock_irqsave(&priv_dev->lock, flags);
trace_cdns3_ep0_queue(priv_dev, request);
@@ -717,9 +706,28 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
/* send STATUS stage. Should be called only for SET_CONFIGURATION */
if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) {
+ u32 val;
+
cdns3_select_ep(priv_dev, 0x00);
+
+ /*
+ * Configure all non-control EPs which are not enabled by class driver
+ */
+ for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
+ priv_ep = priv_dev->eps[i];
+ if (priv_ep && priv_ep->flags & EP_CLAIMED &&
+ !(priv_ep->flags & EP_ENABLED))
+ cdns3_ep_config(priv_ep, 0);
+ }
+
cdns3_set_hw_configuration(priv_dev);
cdns3_ep0_complete_setup(priv_dev, 0, 1);
+ /* wait until configuration set */
+ ret = readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val,
+ val & USB_STS_CFGSTS_MASK, 1, 100);
+ if (ret == -ETIMEDOUT)
+ dev_warn(priv_dev->dev, "timeout for waiting configuration set\n");
+
request->actual = 0;
priv_dev->status_completion_no_call = true;
priv_dev->pending_status_request = request;
@@ -731,7 +739,7 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
* ep0_queue is back.
*/
queue_work(system_freezable_wq, &priv_dev->pending_status_wq);
- return 0;
+ return ret;
}
if (!list_empty(&priv_ep->pending_req_list)) {
@@ -803,6 +811,7 @@ void cdns3_ep0_config(struct cdns3_device *priv_dev)
struct cdns3_usb_regs __iomem *regs;
struct cdns3_endpoint *priv_ep;
u32 max_packet_size = 64;
+ u32 ep_cfg;
regs = priv_dev->regs;
@@ -834,8 +843,10 @@ void cdns3_ep0_config(struct cdns3_device *priv_dev)
BIT(0) | BIT(16));
}
- writel(EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size),
- &regs->ep_cfg);
+ ep_cfg = EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size);
+
+ if (!(priv_ep->flags & EP_CONFIGURED))
+ writel(ep_cfg, &regs->ep_cfg);
writel(EP_STS_EN_SETUPEN | EP_STS_EN_DESCMISEN | EP_STS_EN_TRBERREN,
&regs->ep_sts_en);
@@ -843,8 +854,10 @@ void cdns3_ep0_config(struct cdns3_device *priv_dev)
/* init ep in */
cdns3_select_ep(priv_dev, USB_DIR_IN);
- writel(EP_CFG_ENABLE | EP_CFG_MAXPKTSIZE(max_packet_size),
- &regs->ep_cfg);
+ if (!(priv_ep->flags & EP_CONFIGURED))
+ writel(ep_cfg, &regs->ep_cfg);
+
+ priv_ep->flags |= EP_CONFIGURED;
writel(EP_STS_EN_SETUPEN | EP_STS_EN_TRBERREN, &regs->ep_sts_en);
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index dea649ee173b..66c1e6723eb1 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -261,8 +261,8 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
*/
link_trb->control = 0;
} else {
- link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
- link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
+ link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma));
+ link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE);
}
return 0;
}
@@ -296,6 +296,8 @@ static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep)
*/
void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
{
+ int i;
+
writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf);
cdns3_allow_enable_l1(priv_dev, 0);
@@ -304,6 +306,10 @@ void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
priv_dev->out_mem_is_allocated = 0;
priv_dev->wait_for_setup = 0;
priv_dev->using_streams = 0;
+
+ for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
+ if (priv_dev->eps[i])
+ priv_dev->eps[i]->flags &= ~EP_CONFIGURED;
}
/**
@@ -462,6 +468,36 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev,
(reg) |= EP_STS_EN_DESCMISEN; \
} } while (0)
+static void __cdns3_descmiss_copy_data(struct usb_request *request,
+ struct usb_request *descmiss_req)
+{
+ int length = request->actual + descmiss_req->actual;
+ struct scatterlist *s = request->sg;
+
+ if (!s) {
+ if (length <= request->length) {
+ memcpy(&((u8 *)request->buf)[request->actual],
+ descmiss_req->buf,
+ descmiss_req->actual);
+ request->actual = length;
+ } else {
+ /* It should never occures */
+ request->status = -ENOMEM;
+ }
+ } else {
+ if (length <= sg_dma_len(s)) {
+ void *p = phys_to_virt(sg_dma_address(s));
+
+ memcpy(&((u8 *)p)[request->actual],
+ descmiss_req->buf,
+ descmiss_req->actual);
+ request->actual = length;
+ } else {
+ request->status = -ENOMEM;
+ }
+ }
+}
+
/**
* cdns3_wa2_descmiss_copy_data copy data from internal requests to
* request queued by class driver.
@@ -476,7 +512,6 @@ static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
int chunk_end;
- int length;
descmiss_priv_req =
cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
@@ -487,22 +522,9 @@ static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
break;
chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH;
- length = request->actual + descmiss_req->actual;
-
request->status = descmiss_req->status;
-
- if (length <= request->length) {
- memcpy(&((u8 *)request->buf)[request->actual],
- descmiss_req->buf,
- descmiss_req->actual);
- request->actual = length;
- } else {
- /* It should never occures */
- request->status = -ENOMEM;
- }
-
+ __cdns3_descmiss_copy_data(request, descmiss_req);
list_del_init(&descmiss_priv_req->list);
-
kfree(descmiss_req->buf);
cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req);
--priv_ep->wa2_counter;
@@ -817,6 +839,8 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
request->length);
priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED);
+ /* All TRBs have finished, clear the counter */
+ priv_req->finished_trb = 0;
trace_cdns3_gadget_giveback(priv_req);
if (priv_dev->dev_ver < DEV_VER_V2) {
@@ -847,10 +871,10 @@ static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
priv_ep->wa1_trb_index = 0xFFFF;
if (priv_ep->wa1_cycle_bit) {
priv_ep->wa1_trb->control =
- priv_ep->wa1_trb->control | 0x1;
+ priv_ep->wa1_trb->control | cpu_to_le32(0x1);
} else {
priv_ep->wa1_trb->control =
- priv_ep->wa1_trb->control & ~0x1;
+ priv_ep->wa1_trb->control & cpu_to_le32(~0x1);
}
}
}
@@ -1008,17 +1032,16 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP;
if (!request->num_sgs) {
- trb->buffer = TRB_BUFFER(trb_dma);
+ trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
length = request->length;
} else {
- trb->buffer = TRB_BUFFER(request->sg[sg_idx].dma_address);
+ trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address));
length = request->sg[sg_idx].length;
}
tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket);
- trb->length = TRB_BURST_LEN(16 /*priv_ep->trb_burst_size*/) |
- TRB_LEN(length);
+ trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length));
/*
* For DEV_VER_V2 controller version we have enabled
@@ -1027,11 +1050,11 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
*/
if (priv_dev->dev_ver >= DEV_VER_V2) {
if (priv_dev->gadget.speed == USB_SPEED_SUPER)
- trb->length |= TRB_TDL_SS_SIZE(tdl);
+ trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl));
}
priv_req->flags |= REQUEST_PENDING;
- trb->control = control;
+ trb->control = cpu_to_le32(control);
trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
@@ -1091,6 +1114,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct cdns3_request *priv_req;
struct cdns3_trb *trb;
+ struct cdns3_trb *link_trb;
dma_addr_t trb_dma;
u32 togle_pcs = 1;
int sg_iter = 0;
@@ -1099,11 +1123,13 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
u32 control;
int pcs;
u16 total_tdl = 0;
+ struct scatterlist *s = NULL;
+ bool sg_supported = !!(request->num_mapped_sgs);
if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
num_trb = priv_ep->interval;
else
- num_trb = request->num_sgs ? request->num_sgs : 1;
+ num_trb = sg_supported ? request->num_mapped_sgs : 1;
if (num_trb > priv_ep->free_trbs) {
priv_ep->flags |= EP_RING_FULL;
@@ -1129,7 +1155,6 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
/* prepare ring */
if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) {
- struct cdns3_trb *link_trb;
int doorbell, dma_index;
u32 ch_bit = 0;
@@ -1156,13 +1181,16 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
TRBS_PER_SEGMENT > 2)
ch_bit = TRB_CHAIN;
- link_trb->control = ((priv_ep->pcs) ? TRB_CYCLE : 0) |
- TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit;
+ link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) |
+ TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
}
if (priv_dev->dev_ver <= DEV_VER_V2)
togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
+ if (sg_supported)
+ s = request->sg;
+
/* set incorrect Cycle Bit for first trb*/
control = priv_ep->pcs ? 0 : TRB_CYCLE;
@@ -1172,13 +1200,13 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
/* fill TRB */
control |= TRB_TYPE(TRB_NORMAL);
- trb->buffer = TRB_BUFFER(request->num_sgs == 0
- ? trb_dma : request->sg[sg_iter].dma_address);
-
- if (likely(!request->num_sgs))
+ if (sg_supported) {
+ trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s)));
+ length = sg_dma_len(s);
+ } else {
+ trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
length = request->length;
- else
- length = request->sg[sg_iter].length;
+ }
if (likely(priv_dev->dev_ver >= DEV_VER_V2))
td_size = DIV_ROUND_UP(length,
@@ -1187,10 +1215,10 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
total_tdl += DIV_ROUND_UP(length,
priv_ep->endpoint.maxpacket);
- trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) |
- TRB_LEN(length);
+ trb->length = cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
+ TRB_LEN(length));
if (priv_dev->gadget.speed == USB_SPEED_SUPER)
- trb->length |= TRB_TDL_SS_SIZE(td_size);
+ trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(td_size));
else
control |= TRB_TDL_HS_SIZE(td_size);
@@ -1212,9 +1240,18 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
}
if (sg_iter)
- trb->control = control;
+ trb->control = cpu_to_le32(control);
else
- priv_req->trb->control = control;
+ priv_req->trb->control = cpu_to_le32(control);
+
+ if (sg_supported) {
+ trb->control |= TRB_ISP;
+ /* Don't set chain bit for last TRB */
+ if (sg_iter < num_trb - 1)
+ trb->control |= TRB_CHAIN;
+
+ s = sg_next(s);
+ }
control = 0;
++sg_iter;
@@ -1226,9 +1263,10 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
trb = priv_req->trb;
priv_req->flags |= REQUEST_PENDING;
+ priv_req->num_of_trb = num_trb;
if (sg_iter == 1)
- trb->control |= TRB_IOC | TRB_ISP;
+ trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP);
if (priv_dev->dev_ver < DEV_VER_V2 &&
(priv_ep->flags & EP_TDLCHK_EN)) {
@@ -1254,12 +1292,27 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
/* give the TD to the consumer*/
if (togle_pcs)
- trb->control = trb->control ^ 1;
+ trb->control = trb->control ^ cpu_to_le32(1);
if (priv_dev->dev_ver <= DEV_VER_V2)
cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep);
- trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
+ if (num_trb > 1) {
+ int i = 0;
+
+ while (i < num_trb) {
+ trace_cdns3_prepare_trb(priv_ep, trb + i);
+ if (trb + i == link_trb) {
+ trb = priv_ep->trb_pool;
+ num_trb = num_trb - i;
+ i = 0;
+ } else {
+ i++;
+ }
+ }
+ } else {
+ trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
+ }
/*
* Memory barrier - Cycle Bit must be set before trb->length and
@@ -1310,7 +1363,6 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev)
{
struct cdns3_endpoint *priv_ep;
struct usb_ep *ep;
- int val;
if (priv_dev->hw_configured_flag)
return;
@@ -1320,10 +1372,6 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev)
cdns3_set_register_bit(&priv_dev->regs->usb_conf,
USB_CONF_U1EN | USB_CONF_U2EN);
- /* wait until configuration set */
- readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val,
- val & USB_STS_CFGSTS_MASK, 1, 100);
-
priv_dev->hw_configured_flag = 1;
list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
@@ -1337,7 +1385,7 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev)
}
/**
- * cdns3_request_handled - check whether request has been handled by DMA
+ * cdns3_trb_handled - check whether trb has been handled by DMA
*
* @priv_ep: extended endpoint object.
* @priv_req: request object for checking
@@ -1354,32 +1402,28 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev)
* ET = priv_req->end_trb - index of last TRB in transfer ring
* CI = current_index - index of processed TRB by DMA.
*
- * As first step, function checks if cycle bit for priv_req->start_trb is
- * correct.
+ * As first step, we check if the TRB between the ST and ET.
+ * Then, we check if cycle bit for index priv_ep->dequeue
+ * is correct.
*
* some rules:
- * 1. priv_ep->dequeue never exceed current_index.
+ * 1. priv_ep->dequeue never equals to current_index.
* 2 priv_ep->enqueue never exceed priv_ep->dequeue
* 3. exception: priv_ep->enqueue == priv_ep->dequeue
* and priv_ep->free_trbs is zero.
* This case indicate that TR is full.
*
- * Then We can split recognition into two parts:
+ * At below two cases, the request have been handled.
* Case 1 - priv_ep->dequeue < current_index
* SR ... EQ ... DQ ... CI ... ER
* SR ... DQ ... CI ... EQ ... ER
*
- * Request has been handled by DMA if ST and ET is between DQ and CI.
- *
* Case 2 - priv_ep->dequeue > current_index
- * This situation take place when CI go through the LINK TRB at the end of
+ * This situation takes place when CI go through the LINK TRB at the end of
* transfer ring.
* SR ... CI ... EQ ... DQ ... ER
- *
- * Request has been handled by DMA if ET is less then CI or
- * ET is greater or equal DQ.
*/
-static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep,
+static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep,
struct cdns3_request *priv_req)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
@@ -1391,9 +1435,27 @@ static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep,
current_index = cdns3_get_dma_pos(priv_dev, priv_ep);
doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
- trb = &priv_ep->trb_pool[priv_req->start_trb];
+ /* current trb doesn't belong to this request */
+ if (priv_req->start_trb < priv_req->end_trb) {
+ if (priv_ep->dequeue > priv_req->end_trb)
+ goto finish;
+
+ if (priv_ep->dequeue < priv_req->start_trb)
+ goto finish;
+ }
+
+ if ((priv_req->start_trb > priv_req->end_trb) &&
+ (priv_ep->dequeue > priv_req->end_trb) &&
+ (priv_ep->dequeue < priv_req->start_trb))
+ goto finish;
+
+ if ((priv_req->start_trb == priv_req->end_trb) &&
+ (priv_ep->dequeue != priv_req->end_trb))
+ goto finish;
+
+ trb = &priv_ep->trb_pool[priv_ep->dequeue];
- if ((trb->control & TRB_CYCLE) != priv_ep->ccs)
+ if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs)
goto finish;
if (doorbell == 1 && current_index == priv_ep->dequeue)
@@ -1413,12 +1475,8 @@ static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep,
!priv_ep->dequeue)
goto finish;
- if (priv_req->end_trb >= priv_ep->dequeue &&
- priv_req->end_trb < current_index)
- handled = 1;
+ handled = 1;
} else if (priv_ep->dequeue > current_index) {
- if (priv_req->end_trb < current_index ||
- priv_req->end_trb >= priv_ep->dequeue)
handled = 1;
}
@@ -1434,6 +1492,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
struct cdns3_request *priv_req;
struct usb_request *request;
struct cdns3_trb *trb;
+ bool request_handled = false;
+ bool transfer_end = false;
while (!list_empty(&priv_ep->pending_req_list)) {
request = cdns3_next_request(&priv_ep->pending_req_list);
@@ -1442,7 +1502,7 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
trb = priv_ep->trb_pool + priv_ep->dequeue;
/* Request was dequeued and TRB was changed to TRB_LINK. */
- if (TRB_FIELD_TO_TYPE(trb->control) == TRB_LINK) {
+ if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
trace_cdns3_complete_trb(priv_ep, trb);
cdns3_move_deq_to_next_trb(priv_req);
}
@@ -1453,20 +1513,32 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
*/
cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
- if (!cdns3_request_handled(priv_ep, priv_req))
- goto prepare_next_td;
+ while (cdns3_trb_handled(priv_ep, priv_req)) {
+ priv_req->finished_trb++;
+ if (priv_req->finished_trb >= priv_req->num_of_trb)
+ request_handled = true;
- trb = priv_ep->trb_pool + priv_ep->dequeue;
- trace_cdns3_complete_trb(priv_ep, trb);
+ trb = priv_ep->trb_pool + priv_ep->dequeue;
+ trace_cdns3_complete_trb(priv_ep, trb);
- if (trb != priv_req->trb)
- dev_warn(priv_dev->dev,
- "request_trb=0x%p, queue_trb=0x%p\n",
- priv_req->trb, trb);
+ if (!transfer_end)
+ request->actual +=
+ TRB_LEN(le32_to_cpu(trb->length));
- request->actual = TRB_LEN(le32_to_cpu(trb->length));
- cdns3_move_deq_to_next_trb(priv_req);
- cdns3_gadget_giveback(priv_ep, priv_req, 0);
+ if (priv_req->num_of_trb > 1 &&
+ le32_to_cpu(trb->control) & TRB_SMM)
+ transfer_end = true;
+
+ cdns3_ep_inc_deq(priv_ep);
+ }
+
+ if (request_handled) {
+ cdns3_gadget_giveback(priv_ep, priv_req, 0);
+ request_handled = false;
+ transfer_end = false;
+ } else {
+ goto prepare_next_td;
+ }
if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
TRBS_PER_SEGMENT == 2)
@@ -1574,7 +1646,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
* that host ignore the ERDY packet and driver has to send it
* again.
*/
- if (tdl && (dbusy | !EP_STS_BUFFEMPTY(ep_sts_reg) |
+ if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) ||
EP_STS_HOSTPP(ep_sts_reg))) {
writel(EP_CMD_ERDY |
EP_CMD_ERDY_SID(priv_ep->last_stream_id),
@@ -1678,11 +1750,8 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev)
{
- if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) {
- spin_unlock(&priv_dev->lock);
+ if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect)
priv_dev->gadget_driver->disconnect(&priv_dev->gadget);
- spin_lock(&priv_dev->lock);
- }
}
/**
@@ -1693,6 +1762,7 @@ static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev)
*/
static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
u32 usb_ists)
+__must_hold(&priv_dev->lock)
{
int speed = 0;
@@ -1717,7 +1787,9 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
/* Disconnection detected */
if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) {
+ spin_unlock(&priv_dev->lock);
cdns3_disconnect_gadget(priv_dev);
+ spin_lock(&priv_dev->lock);
priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
cdns3_hw_reset_eps_config(priv_dev);
@@ -1769,9 +1841,13 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
{
struct cdns3_device *priv_dev = data;
+ struct cdns3 *cdns = dev_get_drvdata(priv_dev->dev);
irqreturn_t ret = IRQ_NONE;
u32 reg;
+ if (cdns->in_lpm)
+ return ret;
+
/* check USB device interrupt */
reg = readl(&priv_dev->regs->usb_ists);
if (reg) {
@@ -1907,27 +1983,6 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
return 0;
}
-static void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
- struct cdns3_endpoint *priv_ep)
-{
- if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER)
- return;
-
- if (priv_dev->dev_ver >= DEV_VER_V3) {
- u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
-
- /*
- * Stream capable endpoints are handled by using ep_tdl
- * register. Other endpoints use TDL from TRB feature.
- */
- cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, mask);
- }
-
- /* Enable Stream Bit TDL chk and SID chk */
- cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_STREAM_EN |
- EP_CFG_TDL_CHK | EP_CFG_SID_CHK);
-}
-
static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep)
{
@@ -1965,8 +2020,9 @@ static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
/**
* cdns3_ep_config Configure hardware endpoint
* @priv_ep: extended endpoint object
+ * @enable: set EP_CFG_ENABLE bit in ep_cfg register.
*/
-void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
+int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
{
bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
@@ -2027,7 +2083,7 @@ void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
break;
default:
/* all other speed are not supported */
- return;
+ return -EINVAL;
}
if (max_packet_size == 1024)
@@ -2037,11 +2093,33 @@ void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
else
priv_ep->trb_burst_size = 16;
- ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
- !!priv_ep->dir);
- if (ret) {
- dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
- return;
+ /* onchip buffer is only allocated before configuration */
+ if (!priv_dev->hw_configured_flag) {
+ ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
+ !!priv_ep->dir);
+ if (ret) {
+ dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
+ return ret;
+ }
+ }
+
+ if (enable)
+ ep_cfg |= EP_CFG_ENABLE;
+
+ if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) {
+ if (priv_dev->dev_ver >= DEV_VER_V3) {
+ u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0));
+
+ /*
+ * Stream capable endpoints are handled by using ep_tdl
+ * register. Other endpoints use TDL from TRB feature.
+ */
+ cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb,
+ mask);
+ }
+
+ /* Enable Stream Bit TDL chk and SID chk */
+ ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK;
}
ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
@@ -2051,9 +2129,12 @@ void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
cdns3_select_ep(priv_dev, bEndpointAddress);
writel(ep_cfg, &priv_dev->regs->ep_cfg);
+ priv_ep->flags |= EP_CONFIGURED;
dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n",
priv_ep->name, ep_cfg);
+
+ return 0;
}
/* Find correct direction for HW endpoint according to description */
@@ -2194,7 +2275,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
u32 bEndpointAddress;
unsigned long flags;
int enable = 1;
- int ret;
+ int ret = 0;
int val;
priv_ep = ep_to_cdns3_ep(ep);
@@ -2233,6 +2314,17 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
bEndpointAddress = priv_ep->num | priv_ep->dir;
cdns3_select_ep(priv_dev, bEndpointAddress);
+ /*
+ * For some versions of controller at some point during ISO OUT traffic
+ * DMA reads Transfer Ring for the EP which has never got doorbell.
+ * This issue was detected only on simulation, but to avoid this issue
+ * driver add protection against it. To fix it driver enable ISO OUT
+ * endpoint before setting DRBL. This special treatment of ISO OUT
+ * endpoints are recommended by controller specification.
+ */
+ if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
+ enable = 0;
+
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
/*
* Enable stream support (SS mode) related interrupts
@@ -2243,13 +2335,17 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN |
EP_STS_EN_STREAMREN;
priv_ep->use_streams = true;
- cdns3_stream_ep_reconfig(priv_dev, priv_ep);
+ ret = cdns3_ep_config(priv_ep, enable);
priv_dev->using_streams |= true;
}
+ } else {
+ ret = cdns3_ep_config(priv_ep, enable);
}
- ret = cdns3_allocate_trb_pool(priv_ep);
+ if (ret)
+ goto exit;
+ ret = cdns3_allocate_trb_pool(priv_ep);
if (ret)
goto exit;
@@ -2279,20 +2375,6 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep,
writel(reg, &priv_dev->regs->ep_sts_en);
- /*
- * For some versions of controller at some point during ISO OUT traffic
- * DMA reads Transfer Ring for the EP which has never got doorbell.
- * This issue was detected only on simulation, but to avoid this issue
- * driver add protection against it. To fix it driver enable ISO OUT
- * endpoint before setting DRBL. This special treatment of ISO OUT
- * endpoints are recommended by controller specification.
- */
- if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
- enable = 0;
-
- if (enable)
- cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_ENABLE);
-
ep->desc = desc;
priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING |
EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN);
@@ -2552,10 +2634,10 @@ found:
/* Update ring only if removed request is on pending_req_list list */
if (req_on_hw_ring && link_trb) {
- link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
- ((priv_req->end_trb + 1) * TRB_SIZE));
- link_trb->control = (link_trb->control & TRB_CYCLE) |
- TRB_TYPE(TRB_LINK) | TRB_CHAIN;
+ link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma +
+ ((priv_req->end_trb + 1) * TRB_SIZE)));
+ link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) |
+ TRB_TYPE(TRB_LINK) | TRB_CHAIN);
if (priv_ep->wa1_trb == priv_req->trb)
cdns3_wa1_restore_cycle_bit(priv_ep);
@@ -2610,7 +2692,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
priv_req = to_cdns3_request(request);
trb = priv_req->trb;
if (trb)
- trb->control = trb->control ^ TRB_CYCLE;
+ trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
}
writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
@@ -2625,7 +2707,8 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
if (request) {
if (trb)
- trb->control = trb->control ^ TRB_CYCLE;
+ trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
+
cdns3_rearm_transfer(priv_ep, 1);
}
@@ -2735,10 +2818,13 @@ static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
- if (is_on)
+ if (is_on) {
writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
- else
+ } else {
+ writel(~0, &priv_dev->regs->ep_ists);
+ writel(~0, &priv_dev->regs->usb_ists);
writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
+ }
return 0;
}
@@ -2779,6 +2865,8 @@ static void cdns3_gadget_config(struct cdns3_device *priv_dev)
/* enable generic interrupt*/
writel(USB_IEN_INIT, &regs->usb_ien);
writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, &regs->usb_conf);
+ /* keep Fast Access bit */
+ writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr);
cdns3_configure_dmult(priv_dev, NULL);
}
@@ -2862,6 +2950,7 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
/* disable interrupt for device */
writel(0, &priv_dev->regs->usb_ien);
+ writel(0, &priv_dev->regs->usb_pwr);
writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
return 0;
@@ -2984,18 +3073,26 @@ err:
return -ENOMEM;
}
+static void cdns3_gadget_release(struct device *dev)
+{
+ struct cdns3_device *priv_dev = container_of(dev,
+ struct cdns3_device, gadget.dev);
+
+ kfree(priv_dev);
+}
+
void cdns3_gadget_exit(struct cdns3 *cdns)
{
struct cdns3_device *priv_dev;
priv_dev = cdns->gadget_dev;
- devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
pm_runtime_mark_last_busy(cdns->dev);
pm_runtime_put_autosuspend(cdns->dev);
- usb_del_gadget_udc(&priv_dev->gadget);
+ usb_del_gadget(&priv_dev->gadget);
+ devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev);
cdns3_free_all_eps(priv_dev);
@@ -3015,7 +3112,7 @@ void cdns3_gadget_exit(struct cdns3 *cdns)
priv_dev->setup_dma);
kfree(priv_dev->zlp_buf);
- kfree(priv_dev);
+ usb_put_gadget(&priv_dev->gadget);
cdns->gadget_dev = NULL;
cdns3_drd_gadget_off(cdns);
}
@@ -3030,6 +3127,8 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
if (!priv_dev)
return -ENOMEM;
+ usb_initialize_gadget(cdns->dev, &priv_dev->gadget,
+ cdns3_gadget_release);
cdns->gadget_dev = priv_dev;
priv_dev->sysdev = cdns->dev;
priv_dev->dev = cdns->dev;
@@ -3070,7 +3169,6 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
priv_dev->gadget.ops = &cdns3_gadget_ops;
priv_dev->gadget.name = "usb-ss-gadget";
- priv_dev->gadget.sg_supported = 1;
priv_dev->gadget.quirk_avoids_skb_reserve = 1;
priv_dev->gadget.irq = cdns->dev_irq;
@@ -3109,6 +3207,8 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
readl(&priv_dev->regs->usb_cap2));
priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver);
+ if (priv_dev->dev_ver >= DEV_VER_V2)
+ priv_dev->gadget.sg_supported = 1;
priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL);
if (!priv_dev->zlp_buf) {
@@ -3117,10 +3217,9 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
}
/* add USB gadget device */
- ret = usb_add_gadget_udc(priv_dev->dev, &priv_dev->gadget);
+ ret = usb_add_gadget(&priv_dev->gadget);
if (ret < 0) {
- dev_err(priv_dev->dev,
- "Failed to register USB device controller\n");
+ dev_err(priv_dev->dev, "Failed to add gadget\n");
goto err4;
}
@@ -3133,6 +3232,7 @@ err3:
err2:
cdns3_free_all_eps(priv_dev);
err1:
+ usb_put_gadget(&priv_dev->gadget);
cdns->gadget_dev = NULL;
return ret;
}
@@ -3175,10 +3275,13 @@ err0:
}
static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
+__must_hold(&cdns->lock)
{
struct cdns3_device *priv_dev = cdns->gadget_dev;
+ spin_unlock(&cdns->lock);
cdns3_disconnect_gadget(priv_dev);
+ spin_lock(&cdns->lock);
priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h
index 52765b098b9e..21fa461c518e 100644
--- a/drivers/usb/cdns3/gadget.h
+++ b/drivers/usb/cdns3/gadget.h
@@ -966,7 +966,7 @@ struct cdns3_usb_regs {
/*
* USBSS-DEV DMA interface.
*/
-#define TRBS_PER_SEGMENT 40
+#define TRBS_PER_SEGMENT 600
#define ISO_MAX_INTERVAL 10
@@ -1030,6 +1030,11 @@ struct cdns3_trb {
* When set to '1', the device will toggle its interpretation of the Cycle bit
*/
#define TRB_TOGGLE BIT(1)
+/*
+ * The controller will set it if OUTSMM (OUT size mismatch) is detected,
+ * this bit is for normal TRB
+ */
+#define TRB_SMM BIT(1)
/*
* Short Packet (SP). OUT EPs at DMULT=1 only. Indicates if the TRB was
@@ -1067,7 +1072,7 @@ struct cdns3_trb {
#define TRB_TDL_SS_SIZE_GET(p) (((p) & GENMASK(23, 17)) >> 17)
/* transfer_len bitmasks - bits 31:24 */
-#define TRB_BURST_LEN(p) (((p) << 24) & GENMASK(31, 24))
+#define TRB_BURST_LEN(p) ((unsigned int)((p) << 24) & GENMASK(31, 24))
#define TRB_BURST_LEN_GET(p) (((p) & GENMASK(31, 24)) >> 24)
/* Data buffer pointer bitmasks*/
@@ -1154,6 +1159,7 @@ struct cdns3_endpoint {
#define EP_QUIRK_EXTRA_BUF_DET BIT(12)
#define EP_QUIRK_EXTRA_BUF_EN BIT(13)
#define EP_TDLCHK_EN BIT(15)
+#define EP_CONFIGURED BIT(16)
u32 flags;
struct cdns3_request *descmis_req;
@@ -1215,6 +1221,8 @@ struct cdns3_aligned_buf {
* this endpoint
* @flags: flag specifying special usage of request
* @list: used by internally allocated request to add to wa2_descmiss_req_list.
+ * @finished_trb: number of trb has already finished per request
+ * @num_of_trb: how many trbs in this request
*/
struct cdns3_request {
struct usb_request request;
@@ -1230,6 +1238,8 @@ struct cdns3_request {
#define REQUEST_UNALIGNED BIT(4)
u32 flags;
struct list_head list;
+ int finished_trb;
+ int num_of_trb;
};
#define to_cdns3_request(r) (container_of(r, struct cdns3_request, request))
@@ -1351,7 +1361,7 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
int cdns3_init_ep0(struct cdns3_device *priv_dev,
struct cdns3_endpoint *priv_ep);
void cdns3_ep0_config(struct cdns3_device *priv_dev);
-void cdns3_ep_config(struct cdns3_endpoint *priv_ep);
+int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable);
void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir);
int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev);
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index 36c63d9ecd37..b3e2cb69762c 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -13,11 +13,13 @@
#include "core.h"
#include "drd.h"
#include "host-export.h"
+#include <linux/usb/hcd.h>
static int __cdns3_host_init(struct cdns3 *cdns)
{
struct platform_device *xhci;
int ret;
+ struct usb_hcd *hcd;
cdns3_drd_host_on(cdns);
@@ -43,6 +45,11 @@ static int __cdns3_host_init(struct cdns3 *cdns)
goto err1;
}
+ /* Glue needs to access xHCI region register for Power management */
+ hcd = platform_get_drvdata(xhci);
+ if (hcd)
+ cdns->xhci_regs = hcd->regs;
+
return 0;
err1:
platform_device_put(xhci);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index c39e2b615ac6..25c65accf089 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -165,6 +165,11 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI)
data->ulpi = 1;
+ of_property_read_u32(np, "samsung,picophy-pre-emp-curr-control",
+ &data->emp_curr_control);
+ of_property_read_u32(np, "samsung,picophy-dc-vol-level-adjust",
+ &data->dc_vol_level_adjust);
+
return data;
}
@@ -609,7 +614,12 @@ static int __maybe_unused ci_hdrc_imx_suspend(struct device *dev)
}
}
- return imx_controller_suspend(dev);
+ ret = imx_controller_suspend(dev);
+ if (ret)
+ return ret;
+
+ pinctrl_pm_select_sleep_state(dev);
+ return ret;
}
static int __maybe_unused ci_hdrc_imx_resume(struct device *dev)
@@ -617,6 +627,7 @@ static int __maybe_unused ci_hdrc_imx_resume(struct device *dev)
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret;
+ pinctrl_pm_select_default_state(dev);
ret = imx_controller_resume(dev);
if (!ret && data->supports_runtime_pm) {
pm_runtime_disable(dev);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index 99f846119c00..999c65390b7f 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -26,6 +26,8 @@ struct imx_usbmisc_data {
unsigned int ext_vbus:1; /* Vbus from exteranl event */
struct usb_phy *usb_phy;
enum usb_dr_mode available_role; /* runtime usb dr mode */
+ int emp_curr_control;
+ int dc_vol_level_adjust;
};
int imx_usbmisc_init(struct imx_usbmisc_data *data);
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 322e4de6b24a..6d8331e7da99 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -128,6 +128,12 @@
#define MX7D_USB_OTG_PHY_STATUS_VBUS_VLD BIT(3)
#define MX7D_USB_OTG_PHY_STATUS_CHRGDET BIT(29)
+#define MX7D_USB_OTG_PHY_CFG1 0x30
+#define TXPREEMPAMPTUNE0_BIT 28
+#define TXPREEMPAMPTUNE0_MASK (3 << 28)
+#define TXVREFTUNE0_BIT 20
+#define TXVREFTUNE0_MASK (0xf << 20)
+
#define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
MX6_BM_ID_WAKEUP)
@@ -649,6 +655,21 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data)
writel(reg | MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID
| MX7D_USBNC_AUTO_RESUME,
usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ /* PHY tuning for signal quality */
+ reg = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG1);
+ if (data->emp_curr_control && data->emp_curr_control <=
+ (TXPREEMPAMPTUNE0_MASK >> TXPREEMPAMPTUNE0_BIT)) {
+ reg &= ~TXPREEMPAMPTUNE0_MASK;
+ reg |= (data->emp_curr_control << TXPREEMPAMPTUNE0_BIT);
+ }
+
+ if (data->dc_vol_level_adjust && data->dc_vol_level_adjust <=
+ (TXVREFTUNE0_MASK >> TXVREFTUNE0_BIT)) {
+ reg &= ~TXVREFTUNE0_MASK;
+ reg |= (data->dc_vol_level_adjust << TXVREFTUNE0_BIT);
+ }
+
+ writel(reg, usbmisc->base + MX7D_USB_OTG_PHY_CFG1);
}
spin_unlock_irqrestore(&usbmisc->lock, flags);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 7f6f3ab5b8a6..f52f1bc0559f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -173,7 +173,7 @@ static int acm_wb_alloc(struct acm *acm)
for (;;) {
wb = &acm->wb[wbn];
if (!wb->use) {
- wb->use = 1;
+ wb->use = true;
wb->len = 0;
return wbn;
}
@@ -191,7 +191,8 @@ static int acm_wb_is_avail(struct acm *acm)
n = ACM_NW;
spin_lock_irqsave(&acm->write_lock, flags);
for (i = 0; i < ACM_NW; i++)
- n -= acm->wb[i].use;
+ if(acm->wb[i].use)
+ n--;
spin_unlock_irqrestore(&acm->write_lock, flags);
return n;
}
@@ -201,7 +202,7 @@ static int acm_wb_is_avail(struct acm *acm)
*/
static void acm_write_done(struct acm *acm, struct acm_wb *wb)
{
- wb->use = 0;
+ wb->use = false;
acm->transmitting--;
usb_autopm_put_interface_async(acm->control);
}
@@ -507,6 +508,7 @@ static void acm_read_bulk_callback(struct urb *urb)
"%s - cooling babbling device\n", __func__);
usb_mark_last_busy(acm->dev);
set_bit(rb->index, &acm->urbs_in_error_delay);
+ set_bit(ACM_ERROR_DELAY, &acm->flags);
cooldown = true;
break;
default:
@@ -532,7 +534,7 @@ static void acm_read_bulk_callback(struct urb *urb)
if (stopped || stalled || cooldown) {
if (stalled)
- schedule_work(&acm->work);
+ schedule_delayed_work(&acm->dwork, 0);
else if (cooldown)
schedule_delayed_work(&acm->dwork, HZ / 2);
return;
@@ -562,13 +564,13 @@ static void acm_write_bulk(struct urb *urb)
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
set_bit(EVENT_TTY_WAKEUP, &acm->flags);
- schedule_work(&acm->work);
+ schedule_delayed_work(&acm->dwork, 0);
}
static void acm_softint(struct work_struct *work)
{
int i;
- struct acm *acm = container_of(work, struct acm, work);
+ struct acm *acm = container_of(work, struct acm, dwork.work);
if (test_bit(EVENT_RX_STALL, &acm->flags)) {
smp_mb(); /* against acm_suspend() */
@@ -584,7 +586,7 @@ static void acm_softint(struct work_struct *work)
if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
for (i = 0; i < acm->rx_buflimit; i++)
if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
- acm_submit_read_urb(acm, i, GFP_NOIO);
+ acm_submit_read_urb(acm, i, GFP_KERNEL);
}
if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
@@ -741,7 +743,7 @@ static void acm_port_shutdown(struct tty_port *port)
if (!urb)
break;
wb = urb->context;
- wb->use = 0;
+ wb->use = false;
usb_autopm_put_interface_async(acm->control);
}
@@ -792,7 +794,7 @@ static int acm_tty_write(struct tty_struct *tty,
wb = &acm->wb[wbn];
if (!acm->dev) {
- wb->use = 0;
+ wb->use = false;
spin_unlock_irqrestore(&acm->write_lock, flags);
return -ENODEV;
}
@@ -804,7 +806,7 @@ static int acm_tty_write(struct tty_struct *tty,
stat = usb_autopm_get_interface_async(acm->control);
if (stat) {
- wb->use = 0;
+ wb->use = false;
spin_unlock_irqrestore(&acm->write_lock, flags);
return stat;
}
@@ -1196,9 +1198,6 @@ static int acm_probe(struct usb_interface *intf,
return -EINVAL;
}
- if (!intf->cur_altsetting)
- return -EINVAL;
-
if (!buflen) {
if (intf->cur_altsetting->endpoint &&
intf->cur_altsetting->endpoint->extralen &&
@@ -1221,39 +1220,42 @@ static int acm_probe(struct usb_interface *intf,
call_intf_num = cmgmd->bDataInterface;
if (!union_header) {
- if (call_intf_num > 0) {
+ if (intf->cur_altsetting->desc.bNumEndpoints == 3) {
+ dev_dbg(&intf->dev, "No union descriptor, assuming single interface\n");
+ combined_interfaces = 1;
+ control_interface = data_interface = intf;
+ goto look_for_collapsed_interface;
+ } else if (call_intf_num > 0) {
dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n");
- /* quirks for Droids MuIn LCD */
- if (quirks & NO_DATA_INTERFACE) {
- data_interface = usb_ifnum_to_if(usb_dev, 0);
- } else {
- data_intf_num = call_intf_num;
- data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
- }
+ data_intf_num = call_intf_num;
+ data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
control_interface = intf;
} else {
- if (intf->cur_altsetting->desc.bNumEndpoints != 3) {
- dev_dbg(&intf->dev,"No union descriptor, giving up\n");
- return -ENODEV;
- } else {
- dev_warn(&intf->dev,"No union descriptor, testing for castrated device\n");
- combined_interfaces = 1;
- control_interface = data_interface = intf;
- goto look_for_collapsed_interface;
- }
+ dev_dbg(&intf->dev, "No union descriptor, giving up\n");
+ return -ENODEV;
}
} else {
+ int class = -1;
+
data_intf_num = union_header->bSlaveInterface0;
control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
+
+ if (control_interface)
+ class = control_interface->cur_altsetting->desc.bInterfaceClass;
+
+ if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) {
+ dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n");
+ combined_interfaces = 1;
+ control_interface = data_interface = intf;
+ goto look_for_collapsed_interface;
+ }
}
if (!control_interface || !data_interface) {
dev_dbg(&intf->dev, "no interfaces\n");
return -ENODEV;
}
- if (!data_interface->cur_altsetting || !control_interface->cur_altsetting)
- return -ENODEV;
if (data_intf_num != call_intf_num)
dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
@@ -1280,10 +1282,8 @@ look_for_collapsed_interface:
skip_normal_probe:
/*workaround for switched interfaces */
- if (data_interface->cur_altsetting->desc.bInterfaceClass
- != CDC_DATA_INTERFACE_TYPE) {
- if (control_interface->cur_altsetting->desc.bInterfaceClass
- == CDC_DATA_INTERFACE_TYPE) {
+ if (data_interface->cur_altsetting->desc.bInterfaceClass != USB_CLASS_CDC_DATA) {
+ if (control_interface->cur_altsetting->desc.bInterfaceClass == USB_CLASS_CDC_DATA) {
dev_dbg(&intf->dev,
"Your device has switched interfaces.\n");
swap(control_interface, data_interface);
@@ -1352,7 +1352,6 @@ made_compressed_probe:
acm->ctrlsize = ctrlsize;
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
- INIT_WORK(&acm->work, acm_softint);
INIT_DELAYED_WORK(&acm->dwork, acm_softint);
init_waitqueue_head(&acm->wioctl);
spin_lock_init(&acm->write_lock);
@@ -1562,7 +1561,6 @@ static void acm_disconnect(struct usb_interface *intf)
}
acm_kill_urbs(acm);
- cancel_work_sync(&acm->work);
cancel_delayed_work_sync(&acm->dwork);
tty_unregister_device(acm_tty_driver, acm->minor);
@@ -1605,7 +1603,6 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
acm_kill_urbs(acm);
- cancel_work_sync(&acm->work);
cancel_delayed_work_sync(&acm->dwork);
acm->urbs_in_error_delay = 0;
@@ -1696,6 +1693,15 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */
+ },
+ { USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */
+ },
+ { USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */
+ },
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
@@ -1876,11 +1882,6 @@ static const struct usb_device_id acm_ids[] = {
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
- /* Support for Droids MuIn LCD */
- { USB_DEVICE(0x04d8, 0x000b),
- .driver_info = NO_DATA_INTERFACE,
- },
-
#if IS_ENABLED(CONFIG_INPUT_IMS_PCU)
{ USB_DEVICE(0x04d8, 0x0082), /* Application mode */
.driver_info = IGNORE_DEVICE,
@@ -1906,6 +1907,17 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = IGNORE_DEVICE,
},
+ /* Exclude ETAS ES58x */
+ { USB_DEVICE(0x108c, 0x0159), /* ES581.4 */
+ .driver_info = IGNORE_DEVICE,
+ },
+ { USB_DEVICE(0x108c, 0x0168), /* ES582.1 */
+ .driver_info = IGNORE_DEVICE,
+ },
+ { USB_DEVICE(0x108c, 0x0169), /* ES584.1 */
+ .driver_info = IGNORE_DEVICE,
+ },
+
{ USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
.driver_info = SEND_ZERO_PACKET,
},
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index cd5e9d8ab237..8aef5eb769a0 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -64,12 +64,12 @@
#define ACM_NR 16
struct acm_wb {
- unsigned char *buf;
+ u8 *buf;
dma_addr_t dmah;
- int len;
- int use;
+ unsigned int len;
struct urb *urb;
struct acm *instance;
+ bool use;
};
struct acm_rb {
@@ -112,8 +112,7 @@ struct acm {
# define ACM_ERROR_DELAY 3
unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */
struct usb_cdc_line_coding line; /* bits, stop, parity */
- struct work_struct work; /* work queue entry for various purposes*/
- struct delayed_work dwork; /* for cool downs needed in error recovery */
+ struct delayed_work dwork; /* work queue entry for various purposes */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
unsigned int ctrlout; /* output control lines (DTR, RTS) */
struct async_icount iocount; /* counters for control line changes */
@@ -131,15 +130,12 @@ struct acm {
unsigned long quirks;
};
-#define CDC_DATA_INTERFACE_TYPE 0x0a
-
/* constants describing various quirks and errors */
#define NO_UNION_NORMAL BIT(0)
#define SINGLE_RX_URB BIT(1)
#define NO_CAP_LINE BIT(2)
-#define NO_DATA_INTERFACE BIT(4)
-#define IGNORE_DEVICE BIT(5)
-#define QUIRK_CONTROL_LINE_STATE BIT(6)
-#define CLEAR_HALT_CONDITIONS BIT(7)
-#define SEND_ZERO_PACKET BIT(8)
-#define DISABLE_ECHO BIT(9)
+#define IGNORE_DEVICE BIT(3)
+#define QUIRK_CONTROL_LINE_STATE BIT(4)
+#define CLEAR_HALT_CONDITIONS BIT(5)
+#define SEND_ZERO_PACKET BIT(6)
+#define DISABLE_ECHO BIT(7)
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 7f5de956a2fc..02d0cfd23bb2 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -58,6 +58,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
#define WDM_MAX 16
+/* we cannot wait forever at flush() */
+#define WDM_FLUSH_TIMEOUT (30 * HZ)
+
/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
#define WDM_DEFAULT_BUFSIZE 256
@@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb)
kfree(desc->outbuf);
desc->outbuf = NULL;
clear_bit(WDM_IN_USE, &desc->flags);
- wake_up(&desc->wait);
+ wake_up_all(&desc->wait);
}
static void wdm_in_callback(struct urb *urb)
@@ -393,6 +396,9 @@ static ssize_t wdm_write
if (test_bit(WDM_RESETTING, &desc->flags))
r = -EIO;
+ if (test_bit(WDM_DISCONNECTING, &desc->flags))
+ r = -ENODEV;
+
if (r < 0) {
rv = r;
goto out_free_mem_pm;
@@ -424,6 +430,7 @@ static ssize_t wdm_write
if (rv < 0) {
desc->outbuf = NULL;
clear_bit(WDM_IN_USE, &desc->flags);
+ wake_up_all(&desc->wait); /* for wdm_wait_for_response() */
dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
rv = usb_translate_errors(rv);
goto out_free_mem_pm;
@@ -583,28 +590,58 @@ err:
return rv;
}
-static int wdm_flush(struct file *file, fl_owner_t id)
+static int wdm_wait_for_response(struct file *file, long timeout)
{
struct wdm_device *desc = file->private_data;
+ long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */
+
+ /*
+ * Needs both flags. We cannot do with one because resetting it would
+ * cause a race with write() yet we need to signal a disconnect.
+ */
+ rv = wait_event_interruptible_timeout(desc->wait,
+ !test_bit(WDM_IN_USE, &desc->flags) ||
+ test_bit(WDM_DISCONNECTING, &desc->flags),
+ timeout);
- wait_event(desc->wait,
- /*
- * needs both flags. We cannot do with one
- * because resetting it would cause a race
- * with write() yet we need to signal
- * a disconnect
- */
- !test_bit(WDM_IN_USE, &desc->flags) ||
- test_bit(WDM_DISCONNECTING, &desc->flags));
-
- /* cannot dereference desc->intf if WDM_DISCONNECTING */
+ /*
+ * To report the correct error. This is best effort.
+ * We are inevitably racing with the hardware.
+ */
if (test_bit(WDM_DISCONNECTING, &desc->flags))
return -ENODEV;
- if (desc->werr < 0)
- dev_err(&desc->intf->dev, "Error in flush path: %d\n",
- desc->werr);
+ if (!rv)
+ return -EIO;
+ if (rv < 0)
+ return -EINTR;
+
+ spin_lock_irq(&desc->iuspin);
+ rv = desc->werr;
+ desc->werr = 0;
+ spin_unlock_irq(&desc->iuspin);
+
+ return usb_translate_errors(rv);
+
+}
+
+/*
+ * You need to send a signal when you react to malicious or defective hardware.
+ * Also, don't abort when fsync() returned -EINVAL, for older kernels which do
+ * not implement wdm_flush() will return -EINVAL.
+ */
+static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT);
+}
- return usb_translate_errors(desc->werr);
+/*
+ * Same with wdm_fsync(), except it uses finite timeout in order to react to
+ * malicious or defective hardware which ceased communication after close() was
+ * implicitly called due to process termination.
+ */
+static int wdm_flush(struct file *file, fl_owner_t id)
+{
+ return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT);
}
static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
@@ -729,6 +766,7 @@ static const struct file_operations wdm_fops = {
.owner = THIS_MODULE,
.read = wdm_read,
.write = wdm_write,
+ .fsync = wdm_fsync,
.open = wdm_open,
.flush = wdm_flush,
.release = wdm_release,
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index 7b3a21360d7c..6c4e3a19f42c 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -91,14 +91,14 @@ static void usb_conn_detect_cable(struct work_struct *work)
return;
}
- if (info->last_role == USB_ROLE_HOST)
+ if (info->last_role == USB_ROLE_HOST && info->vbus)
regulator_disable(info->vbus);
ret = usb_role_switch_set_role(info->role_sw, role);
if (ret)
dev_err(info->dev, "failed to set role: %d\n", ret);
- if (role == USB_ROLE_HOST) {
+ if (role == USB_ROLE_HOST && info->vbus) {
ret = regulator_enable(info->vbus);
if (ret)
dev_err(info->dev, "enable vbus regulator failed\n");
@@ -106,8 +106,9 @@ static void usb_conn_detect_cable(struct work_struct *work)
info->last_role = role;
- dev_dbg(info->dev, "vbus regulator is %s\n",
- regulator_is_enabled(info->vbus) ? "enabled" : "disabled");
+ if (info->vbus)
+ dev_dbg(info->dev, "vbus regulator is %s\n",
+ regulator_is_enabled(info->vbus) ? "enabled" : "disabled");
power_supply_changed(info->charger);
}
@@ -156,6 +157,7 @@ static int usb_conn_probe(struct platform_device *pdev)
struct power_supply_config cfg = {
.of_node = dev->of_node,
};
+ bool need_vbus = true;
int ret = 0;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
@@ -185,10 +187,26 @@ static int usb_conn_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&info->dw_det, usb_conn_detect_cable);
- info->vbus = devm_regulator_get(dev, "vbus");
+ /*
+ * If the USB connector is a child of a USB port and that port already provides the VBUS
+ * supply, there's no need for the USB connector to provide it again.
+ */
+ if (dev->parent && dev->parent->of_node) {
+ if (of_find_property(dev->parent->of_node, "vbus-supply", NULL))
+ need_vbus = false;
+ }
+
+ if (!need_vbus) {
+ info->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (PTR_ERR(info->vbus) == -ENODEV)
+ info->vbus = NULL;
+ } else {
+ info->vbus = devm_regulator_get(dev, "vbus");
+ }
+
if (IS_ERR(info->vbus)) {
if (PTR_ERR(info->vbus) != -EPROBE_DEFER)
- dev_err(dev, "failed to get vbus\n");
+ dev_err(dev, "failed to get vbus: %ld\n", PTR_ERR(info->vbus));
return PTR_ERR(info->vbus);
}
@@ -266,7 +284,7 @@ static int usb_conn_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&info->dw_det);
- if (info->last_role == USB_ROLE_HOST)
+ if (info->last_role == USB_ROLE_HOST && info->vbus)
regulator_disable(info->vbus);
usb_role_switch_put(info->role_sw);
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index dfacc478a8fc..351ede4b5de2 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -32,6 +32,20 @@ config USB_DEFAULT_PERSIST
If you have any questions about this, say Y here, only say N
if you know exactly what you are doing.
+config USB_FEW_INIT_RETRIES
+ bool "Limit USB device initialization to only a few retries"
+ help
+ When a new USB device is detected, the kernel tries very hard
+ to initialize and enumerate it, with lots of nested retry loops.
+ This almost always works, but when it fails it can take a long time.
+ This option tells the kernel to make only a few retry attempts,
+ so that the total time required for a failed initialization is
+ no more than 30 seconds (as required by the USB OTG spec).
+
+ Say N here unless you require new-device enumeration failure to
+ occur within 30 seconds (as might be needed in an embedded
+ application).
+
config USB_DYNAMIC_MINORS
bool "Dynamic USB minor allocation"
help
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 696b2b692b83..1ef2de6e375a 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -39,7 +39,6 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/gfp.h>
-#include <linux/poll.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/usb/hcd.h>
@@ -97,22 +96,6 @@ static const char format_endpt[] =
/* E: Ad=xx(s) Atr=xx(ssss) MxPS=dddd Ivl=D?s */
"E: Ad=%02x(%c) Atr=%02x(%-4s) MxPS=%4d Ivl=%d%cs\n";
-/*
- * Wait for an connect/disconnect event to happen. We initialize
- * the event counter with an odd number, and each event will increment
- * the event counter by two, so it will always _stay_ odd. That means
- * that it will never be zero, so "event 0" will never match a current
- * event, and thus 'poll' will always trigger as readable for the first
- * time it gets called.
- */
-static struct device_connect_event {
- atomic_t count;
- wait_queue_head_t wait;
-} device_event = {
- .count = ATOMIC_INIT(1),
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER(device_event.wait)
-};
-
struct class_info {
int class;
char *class_name;
@@ -146,12 +129,6 @@ static const struct class_info clas_info[] = {
/*****************************************************************/
-void usbfs_conn_disc_event(void)
-{
- atomic_add(2, &device_event.count);
- wake_up(&device_event.wait);
-}
-
static const char *class_decode(const int class)
{
int ix;
@@ -623,25 +600,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
return total_written;
}
-/* Kernel lock for "lastev" protection */
-static __poll_t usb_device_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- unsigned int event_count;
-
- poll_wait(file, &device_event.wait, wait);
-
- event_count = atomic_read(&device_event.count);
- if (file->f_version != event_count) {
- file->f_version = event_count;
- return EPOLLIN | EPOLLRDNORM;
- }
-
- return 0;
-}
-
const struct file_operations usbfs_devices_fops = {
.llseek = no_seek_end_llseek,
.read = usb_device_read,
- .poll = usb_device_poll,
};
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index b351962279e4..4dfa44d6cc3c 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -839,6 +839,22 @@ const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
return NULL;
}
+bool usb_driver_applicable(struct usb_device *udev,
+ struct usb_device_driver *udrv)
+{
+ if (udrv->id_table && udrv->match)
+ return usb_device_match_id(udev, udrv->id_table) != NULL &&
+ udrv->match(udev);
+
+ if (udrv->id_table)
+ return usb_device_match_id(udev, udrv->id_table) != NULL;
+
+ if (udrv->match)
+ return udrv->match(udev);
+
+ return false;
+}
+
static int usb_device_match(struct device *dev, struct device_driver *drv)
{
/* devices and interfaces are handled separately */
@@ -853,17 +869,14 @@ static int usb_device_match(struct device *dev, struct device_driver *drv)
udev = to_usb_device(dev);
udrv = to_usb_device_driver(drv);
- if (udrv->id_table)
- return usb_device_match_id(udev, udrv->id_table) != NULL;
-
- if (udrv->match)
- return udrv->match(udev);
-
/* If the device driver under consideration does not have a
* id_table or a match function, then let the driver's probe
* function decide.
*/
- return 1;
+ if (!udrv->id_table && !udrv->match)
+ return 1;
+
+ return usb_driver_applicable(udev, udrv);
} else if (is_usb_interface(dev)) {
struct usb_interface *intf;
@@ -941,8 +954,7 @@ static int __usb_bus_reprobe_drivers(struct device *dev, void *data)
return 0;
udev = to_usb_device(dev);
- if (usb_device_match_id(udev, new_udriver->id_table) == NULL &&
- (!new_udriver->match || new_udriver->match(udev) == 0))
+ if (!usb_driver_applicable(udev, new_udriver))
return 0;
ret = device_reprobe(dev);
@@ -991,8 +1003,7 @@ int usb_register_device_driver(struct usb_device_driver *new_udriver,
bus_for_each_dev(&usb_bus_type, NULL, new_udriver,
__usb_bus_reprobe_drivers);
} else {
- printk(KERN_ERR "%s: error %d registering device "
- " driver %s\n",
+ pr_err("%s: error %d registering device driver %s\n",
usbcore_name, retval, new_udriver->name);
}
@@ -1068,9 +1079,8 @@ out:
out_newid:
driver_unregister(&new_driver->drvwrap.driver);
- printk(KERN_ERR "%s: error %d registering interface "
- " driver %s\n",
- usbcore_name, retval, new_driver->name);
+ pr_err("%s: error %d registering interface driver %s\n",
+ usbcore_name, retval, new_driver->name);
goto out;
}
EXPORT_SYMBOL_GPL(usb_register_driver);
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 2b2f1ab6e36a..26f9fb9f67ca 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -195,7 +195,7 @@ int usb_choose_configuration(struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(usb_choose_configuration);
-static int __check_usb_generic(struct device_driver *drv, void *data)
+static int __check_for_non_generic_match(struct device_driver *drv, void *data)
{
struct usb_device *udev = data;
struct usb_device_driver *udrv;
@@ -205,9 +205,7 @@ static int __check_usb_generic(struct device_driver *drv, void *data)
udrv = to_usb_device_driver(drv);
if (udrv == &usb_generic_driver)
return 0;
- if (usb_device_match_id(udev, udrv->id_table) != NULL)
- return 1;
- return (udrv->match && udrv->match(udev));
+ return usb_driver_applicable(udev, udrv);
}
static bool usb_generic_driver_match(struct usb_device *udev)
@@ -219,7 +217,7 @@ static bool usb_generic_driver_match(struct usb_device *udev)
* If any other driver wants the device, leave the device to this other
* driver.
*/
- if (bus_for_each_drv(&usb_bus_type, NULL, udev, __check_usb_generic))
+ if (bus_for_each_drv(&usb_bus_type, NULL, udev, __check_for_non_generic_match))
return false;
return true;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index a33b849e8beb..2c6b9578a7d3 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1657,9 +1657,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
usb_put_urb(urb);
}
-static void usb_giveback_urb_bh(unsigned long param)
+static void usb_giveback_urb_bh(struct tasklet_struct *t)
{
- struct giveback_urb_bh *bh = (struct giveback_urb_bh *)param;
+ struct giveback_urb_bh *bh = from_tasklet(bh, t, bh);
struct list_head local_list;
spin_lock_irq(&bh->lock);
@@ -2403,7 +2403,7 @@ static void init_giveback_urb_bh(struct giveback_urb_bh *bh)
spin_lock_init(&bh->lock);
INIT_LIST_HEAD(&bh->head);
- tasklet_init(&bh->bh, usb_giveback_urb_bh, (unsigned long)bh);
+ tasklet_setup(&bh->bh, usb_giveback_urb_bh);
}
struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 5b768b80d1ee..17202b2ee063 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2705,11 +2705,20 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
}
+#ifdef CONFIG_USB_FEW_INIT_RETRIES
+#define PORT_RESET_TRIES 2
+#define SET_ADDRESS_TRIES 1
+#define GET_DESCRIPTOR_TRIES 1
+#define GET_MAXPACKET0_TRIES 1
+#define PORT_INIT_TRIES 4
+
+#else
#define PORT_RESET_TRIES 5
#define SET_ADDRESS_TRIES 2
#define GET_DESCRIPTOR_TRIES 2
-#define SET_CONFIG_TRIES (2 * (use_both_schemes + 1))
-#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)(scheme))
+#define GET_MAXPACKET0_TRIES 3
+#define PORT_INIT_TRIES 4
+#endif /* CONFIG_USB_FEW_INIT_RETRIES */
#define HUB_ROOT_RESET_TIME 60 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
@@ -2717,23 +2726,31 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define HUB_LONG_RESET_TIME 200
#define HUB_RESET_TIMEOUT 800
-/*
- * "New scheme" enumeration causes an extra state transition to be
- * exposed to an xhci host and causes USB3 devices to receive control
- * commands in the default state. This has been seen to cause
- * enumeration failures, so disable this enumeration scheme for USB3
- * devices.
- */
static bool use_new_scheme(struct usb_device *udev, int retry,
struct usb_port *port_dev)
{
int old_scheme_first_port =
- port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME;
+ (port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME) ||
+ old_scheme_first;
+ /*
+ * "New scheme" enumeration causes an extra state transition to be
+ * exposed to an xhci host and causes USB3 devices to receive control
+ * commands in the default state. This has been seen to cause
+ * enumeration failures, so disable this enumeration scheme for USB3
+ * devices.
+ */
if (udev->speed >= USB_SPEED_SUPER)
return false;
- return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first);
+ /*
+ * If use_both_schemes is set, use the first scheme (whichever
+ * it is) for the larger half of the retries, then use the other
+ * scheme. Otherwise, use the first scheme for all the retries.
+ */
+ if (use_both_schemes && retry >= (PORT_INIT_TRIES + 1) / 2)
+ return old_scheme_first_port; /* Second half */
+ return !old_scheme_first_port; /* First half or all */
}
/* Is a USB 3.0 port in the Inactive or Compliance Mode state?
@@ -4545,6 +4562,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
const char *speed;
int devnum = udev->devnum;
const char *driver_name;
+ bool do_new_scheme;
/* root hub ports have a slightly longer reset period
* (from USB 2.0 spec, section 7.1.7.5)
@@ -4657,14 +4675,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* first 8 bytes of the device descriptor to get the ep0 maxpacket
* value.
*/
- for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
- bool did_new_scheme = false;
+ do_new_scheme = use_new_scheme(udev, retry_counter, port_dev);
- if (use_new_scheme(udev, retry_counter, port_dev)) {
+ for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
+ if (do_new_scheme) {
struct usb_device_descriptor *buf;
int r = 0;
- did_new_scheme = true;
retval = hub_enable_device(udev);
if (retval < 0) {
dev_err(&udev->dev,
@@ -4684,7 +4701,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* 255 is for WUSB devices, we actually need to use
* 512 (WUSB1.0[4.8.1]).
*/
- for (operations = 0; operations < 3; ++operations) {
+ for (operations = 0; operations < GET_MAXPACKET0_TRIES;
+ ++operations) {
buf->bMaxPacketSize0 = 0;
r = usb_control_msg(udev, usb_rcvaddr0pipe(),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
@@ -4773,11 +4791,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* - read ep0 maxpacket even for high and low speed,
*/
msleep(10);
- /* use_new_scheme() checks the speed which may have
- * changed since the initial look so we cache the result
- * in did_new_scheme
- */
- if (did_new_scheme)
+ if (do_new_scheme)
break;
}
@@ -5106,7 +5120,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
unit_load = 100;
status = 0;
- for (i = 0; i < SET_CONFIG_TRIES; i++) {
+ for (i = 0; i < PORT_INIT_TRIES; i++) {
/* reallocate for each attempt, since references
* to the previous one can escape in various ways
@@ -5239,7 +5253,7 @@ loop:
break;
/* When halfway through our retry count, power-cycle the port */
- if (i == (SET_CONFIG_TRIES / 2) - 1) {
+ if (i == (PORT_INIT_TRIES - 1) / 2) {
dev_info(&port_dev->dev, "attempt power cycle\n");
usb_hub_set_port_power(hdev, hub, port1, false);
msleep(2 * hub_power_on_good_delay(hub));
@@ -5770,7 +5784,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
bos = udev->bos;
udev->bos = NULL;
- for (i = 0; i < SET_CONFIG_TRIES; ++i) {
+ for (i = 0; i < PORT_INIT_TRIES; ++i) {
/* ep0 maxpacket size may change; let the HCD know about it.
* Other endpoints will be handled by re-enumeration. */
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index ae1de9cc4b09..19ebb542befc 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -163,6 +163,143 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
EXPORT_SYMBOL_GPL(usb_control_msg);
/**
+ * usb_control_msg_send - Builds a control "send" message, sends it off and waits for completion
+ * @dev: pointer to the usb device to send the message to
+ * @endpoint: endpoint to send the message to
+ * @request: USB message request value
+ * @requesttype: USB message request type value
+ * @value: USB message value
+ * @index: USB message index value
+ * @driver_data: pointer to the data to send
+ * @size: length in bytes of the data to send
+ * @timeout: time in msecs to wait for the message to complete before timing
+ * out (if 0 the wait is forever)
+ * @memflags: the flags for memory allocation for buffers
+ *
+ * Context: !in_interrupt ()
+ *
+ * This function sends a control message to a specified endpoint that is not
+ * expected to fill in a response (i.e. a "send message") and waits for the
+ * message to complete, or timeout.
+ *
+ * Do not use this function from within an interrupt context. If you need
+ * an asynchronous message, or need to send a message from within interrupt
+ * context, use usb_submit_urb(). If a thread in your driver uses this call,
+ * make sure your disconnect() method can wait for it to complete. Since you
+ * don't have a handle on the URB used, you can't cancel the request.
+ *
+ * The data pointer can be made to a reference on the stack, or anywhere else,
+ * as it will not be modified at all. This does not have the restriction that
+ * usb_control_msg() has where the data pointer must be to dynamically allocated
+ * memory (i.e. memory that can be successfully DMAed to a device).
+ *
+ * Return: If successful, 0 is returned, Otherwise, a negative error number.
+ */
+int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index,
+ const void *driver_data, __u16 size, int timeout,
+ gfp_t memflags)
+{
+ unsigned int pipe = usb_sndctrlpipe(dev, endpoint);
+ int ret;
+ u8 *data = NULL;
+
+ if (usb_pipe_type_check(dev, pipe))
+ return -EINVAL;
+
+ if (size) {
+ data = kmemdup(driver_data, size, memflags);
+ if (!data)
+ return -ENOMEM;
+ }
+
+ ret = usb_control_msg(dev, pipe, request, requesttype, value, index,
+ data, size, timeout);
+ kfree(data);
+
+ if (ret < 0)
+ return ret;
+ if (ret == size)
+ return 0;
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(usb_control_msg_send);
+
+/**
+ * usb_control_msg_recv - Builds a control "receive" message, sends it off and waits for completion
+ * @dev: pointer to the usb device to send the message to
+ * @endpoint: endpoint to send the message to
+ * @request: USB message request value
+ * @requesttype: USB message request type value
+ * @value: USB message value
+ * @index: USB message index value
+ * @driver_data: pointer to the data to be filled in by the message
+ * @size: length in bytes of the data to be received
+ * @timeout: time in msecs to wait for the message to complete before timing
+ * out (if 0 the wait is forever)
+ * @memflags: the flags for memory allocation for buffers
+ *
+ * Context: !in_interrupt ()
+ *
+ * This function sends a control message to a specified endpoint that is
+ * expected to fill in a response (i.e. a "receive message") and waits for the
+ * message to complete, or timeout.
+ *
+ * Do not use this function from within an interrupt context. If you need
+ * an asynchronous message, or need to send a message from within interrupt
+ * context, use usb_submit_urb(). If a thread in your driver uses this call,
+ * make sure your disconnect() method can wait for it to complete. Since you
+ * don't have a handle on the URB used, you can't cancel the request.
+ *
+ * The data pointer can be made to a reference on the stack, or anywhere else
+ * that can be successfully written to. This function does not have the
+ * restriction that usb_control_msg() has where the data pointer must be to
+ * dynamically allocated memory (i.e. memory that can be successfully DMAed to a
+ * device).
+ *
+ * The "whole" message must be properly received from the device in order for
+ * this function to be successful. If a device returns less than the expected
+ * amount of data, then the function will fail. Do not use this for messages
+ * where a variable amount of data might be returned.
+ *
+ * Return: If successful, 0 is returned, Otherwise, a negative error number.
+ */
+int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index,
+ void *driver_data, __u16 size, int timeout,
+ gfp_t memflags)
+{
+ unsigned int pipe = usb_rcvctrlpipe(dev, endpoint);
+ int ret;
+ u8 *data;
+
+ if (!size || !driver_data || usb_pipe_type_check(dev, pipe))
+ return -EINVAL;
+
+ data = kmalloc(size, memflags);
+ if (!data)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev, pipe, request, requesttype, value, index,
+ data, size, timeout);
+
+ if (ret < 0)
+ goto exit;
+
+ if (ret == size) {
+ memcpy(driver_data, data, size);
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+exit:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_control_msg_recv);
+
+/**
* usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion
* @usb_dev: pointer to the usb device to send the message to
* @pipe: endpoint "pipe" to send the message to
@@ -948,11 +1085,12 @@ int usb_set_isoch_delay(struct usb_device *dev)
if (dev->speed < USB_SPEED_SUPER)
return 0;
- return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ return usb_control_msg_send(dev, 0,
USB_REQ_SET_ISOCH_DELAY,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
dev->hub_delay, 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ USB_CTRL_SET_TIMEOUT,
+ GFP_NOIO);
}
/**
@@ -1070,13 +1208,13 @@ int usb_clear_halt(struct usb_device *dev, int pipe)
* (like some ibmcam model 1 units) seem to expect hosts to make
* this request for iso endpoints, which can't halt!
*/
- result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
- USB_ENDPOINT_HALT, endp, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ result = usb_control_msg_send(dev, 0,
+ USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
+ USB_ENDPOINT_HALT, endp, NULL, 0,
+ USB_CTRL_SET_TIMEOUT, GFP_NOIO);
/* don't un-halt or force to DATA0 except on success */
- if (result < 0)
+ if (result)
return result;
/* NOTE: seems like Microsoft and Apple don't bother verifying
@@ -1438,9 +1576,11 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
if (dev->quirks & USB_QUIRK_NO_SET_INTF)
ret = -EPIPE;
else
- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE,
- alternate, interface, NULL, 0, 5000);
+ ret = usb_control_msg_send(dev, 0,
+ USB_REQ_SET_INTERFACE,
+ USB_RECIP_INTERFACE, alternate,
+ interface, NULL, 0, 5000,
+ GFP_NOIO);
/* 9.4.10 says devices don't need this and are free to STALL the
* request if the interface only has one alternate setting.
@@ -1450,7 +1590,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
"manual set_interface for iface %d, alt %d\n",
interface, alternate);
manual = 1;
- } else if (ret < 0) {
+ } else if (ret) {
/* Re-instate the old alt setting */
usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting);
usb_enable_lpm(dev);
@@ -1574,11 +1714,11 @@ int usb_reset_configuration(struct usb_device *dev)
mutex_unlock(hcd->bandwidth_mutex);
return retval;
}
- retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- USB_REQ_SET_CONFIGURATION, 0,
- config->desc.bConfigurationValue, 0,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (retval < 0) {
+ retval = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0,
+ config->desc.bConfigurationValue, 0,
+ NULL, 0, USB_CTRL_SET_TIMEOUT,
+ GFP_NOIO);
+ if (retval) {
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
@@ -1947,12 +2087,6 @@ free_interfaces:
intf->dev.bus = &usb_bus_type;
intf->dev.type = &usb_if_device_type;
intf->dev.groups = usb_interface_groups;
- /*
- * Please refer to usb_alloc_dev() to see why we set
- * dma_mask and dma_pfn_offset.
- */
- intf->dev.dma_mask = dev->dev.dma_mask;
- intf->dev.dma_pfn_offset = dev->dev.dma_pfn_offset;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
intf->minor = -1;
device_initialize(&intf->dev);
@@ -1963,10 +2097,10 @@ free_interfaces:
}
kfree(new_interfaces);
- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (ret < 0 && cp) {
+ ret = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0,
+ configuration, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT, GFP_NOIO);
+ if (ret && cp) {
/*
* All the old state is gone, so what else can we do?
* The device is probably useless now anyway.
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 10574fa3f927..a1e3a037a289 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -378,6 +378,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Kingston DataTraveler 3.0 */
+ { USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 7bc23469f4e4..357b149b20d3 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -192,24 +192,39 @@ static const int pipetypes[4] = {
};
/**
- * usb_urb_ep_type_check - sanity check of endpoint in the given urb
- * @urb: urb to be checked
+ * usb_pipe_type_check - sanity check of a specific pipe for a usb device
+ * @dev: struct usb_device to be checked
+ * @pipe: pipe to check
*
* This performs a light-weight sanity check for the endpoint in the
- * given urb. It returns 0 if the urb contains a valid endpoint, otherwise
- * a negative error code.
+ * given usb device. It returns 0 if the pipe is valid for the specific usb
+ * device, otherwise a negative error code.
*/
-int usb_urb_ep_type_check(const struct urb *urb)
+int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe)
{
const struct usb_host_endpoint *ep;
- ep = usb_pipe_endpoint(urb->dev, urb->pipe);
+ ep = usb_pipe_endpoint(dev, pipe);
if (!ep)
return -EINVAL;
- if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
+ if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
return -EINVAL;
return 0;
}
+EXPORT_SYMBOL_GPL(usb_pipe_type_check);
+
+/**
+ * usb_urb_ep_type_check - sanity check of endpoint in the given urb
+ * @urb: urb to be checked
+ *
+ * This performs a light-weight sanity check for the endpoint in the
+ * given urb. It returns 0 if the urb contains a valid endpoint, otherwise
+ * a negative error code.
+ */
+int usb_urb_ep_type_check(const struct urb *urb)
+{
+ return usb_pipe_type_check(urb->dev, urb->pipe);
+}
EXPORT_SYMBOL_GPL(usb_urb_ep_type_check);
/**
@@ -474,7 +489,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
*/
/* Check that the pipe's type matches the endpoint's type */
- if (usb_urb_ep_type_check(urb))
+ if (usb_pipe_type_check(urb->dev, urb->pipe))
dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
usb_pipetype(urb->pipe), pipetypes[xfertype]);
@@ -772,11 +787,12 @@ void usb_block_urb(struct urb *urb)
EXPORT_SYMBOL_GPL(usb_block_urb);
/**
- * usb_kill_anchored_urbs - cancel transfer requests en masse
+ * usb_kill_anchored_urbs - kill all URBs associated with an anchor
* @anchor: anchor the requests are bound to
*
- * this allows all outstanding URBs to be killed starting
- * from the back of the queue
+ * This kills all outstanding URBs starting from the back of the queue,
+ * with guarantee that no completer callbacks will take place from the
+ * anchor after this function returns.
*
* This routine should not be called by a driver after its disconnect
* method has returned.
@@ -784,20 +800,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb);
void usb_kill_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
+ int surely_empty;
- spin_lock_irq(&anchor->lock);
- while (!list_empty(&anchor->urb_list)) {
- victim = list_entry(anchor->urb_list.prev, struct urb,
- anchor_list);
- /* we must make sure the URB isn't freed before we kill it*/
- usb_get_urb(victim);
- spin_unlock_irq(&anchor->lock);
- /* this will unanchor the URB */
- usb_kill_urb(victim);
- usb_put_urb(victim);
+ do {
spin_lock_irq(&anchor->lock);
- }
- spin_unlock_irq(&anchor->lock);
+ while (!list_empty(&anchor->urb_list)) {
+ victim = list_entry(anchor->urb_list.prev,
+ struct urb, anchor_list);
+ /* make sure the URB isn't freed before we kill it */
+ usb_get_urb(victim);
+ spin_unlock_irq(&anchor->lock);
+ /* this will unanchor the URB */
+ usb_kill_urb(victim);
+ usb_put_urb(victim);
+ spin_lock_irq(&anchor->lock);
+ }
+ surely_empty = usb_anchor_check_wakeup(anchor);
+
+ spin_unlock_irq(&anchor->lock);
+ cpu_relax();
+ } while (!surely_empty);
}
EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
@@ -816,21 +838,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
void usb_poison_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
+ int surely_empty;
- spin_lock_irq(&anchor->lock);
- anchor->poisoned = 1;
- while (!list_empty(&anchor->urb_list)) {
- victim = list_entry(anchor->urb_list.prev, struct urb,
- anchor_list);
- /* we must make sure the URB isn't freed before we kill it*/
- usb_get_urb(victim);
- spin_unlock_irq(&anchor->lock);
- /* this will unanchor the URB */
- usb_poison_urb(victim);
- usb_put_urb(victim);
+ do {
spin_lock_irq(&anchor->lock);
- }
- spin_unlock_irq(&anchor->lock);
+ anchor->poisoned = 1;
+ while (!list_empty(&anchor->urb_list)) {
+ victim = list_entry(anchor->urb_list.prev,
+ struct urb, anchor_list);
+ /* make sure the URB isn't freed before we kill it */
+ usb_get_urb(victim);
+ spin_unlock_irq(&anchor->lock);
+ /* this will unanchor the URB */
+ usb_poison_urb(victim);
+ usb_put_urb(victim);
+ spin_lock_irq(&anchor->lock);
+ }
+ surely_empty = usb_anchor_check_wakeup(anchor);
+
+ spin_unlock_irq(&anchor->lock);
+ cpu_relax();
+ } while (!surely_empty);
}
EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
@@ -970,14 +998,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
unsigned long flags;
+ int surely_empty;
+
+ do {
+ spin_lock_irqsave(&anchor->lock, flags);
+ while (!list_empty(&anchor->urb_list)) {
+ victim = list_entry(anchor->urb_list.prev,
+ struct urb, anchor_list);
+ __usb_unanchor_urb(victim, anchor);
+ }
+ surely_empty = usb_anchor_check_wakeup(anchor);
- spin_lock_irqsave(&anchor->lock, flags);
- while (!list_empty(&anchor->urb_list)) {
- victim = list_entry(anchor->urb_list.prev, struct urb,
- anchor_list);
- __usb_unanchor_urb(victim, anchor);
- }
- spin_unlock_irqrestore(&anchor->lock, flags);
+ spin_unlock_irqrestore(&anchor->lock, flags);
+ cpu_relax();
+ } while (!surely_empty);
}
EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index bafc113f2b3e..9b4ac4415f1a 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -599,18 +599,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
dev->dev.bus = &usb_bus_type;
dev->dev.type = &usb_device_type;
dev->dev.groups = usb_device_groups;
- /*
- * Fake a dma_mask/offset for the USB device:
- * We cannot really use the dma-mapping API (dma_alloc_* and
- * dma_map_*) for USB devices but instead need to use
- * usb_alloc_coherent and pass data in 'urb's, but some subsystems
- * manually look into the mask/offset pair to determine whether
- * they need bounce buffers.
- * Note: calling dma_set_mask() on a USB device would set the
- * mask for the entire HCD, so don't do that.
- */
- dev->dev.dma_mask = bus->sysdev->dma_mask;
- dev->dev.dma_pfn_offset = bus->sysdev->dma_pfn_offset;
set_dev_node(&dev->dev, dev_to_node(bus->sysdev));
dev->state = USB_STATE_ATTACHED;
dev->lpm_disable_count = 1;
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 98e7d1ee63dc..82538daac8b8 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -74,6 +74,8 @@ extern int usb_match_device(struct usb_device *dev,
const struct usb_device_id *id);
extern const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
const struct usb_device_id *id);
+extern bool usb_driver_applicable(struct usb_device *udev,
+ struct usb_device_driver *udrv);
extern void usb_forced_unbind_intf(struct usb_interface *intf);
extern void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev);
@@ -191,7 +193,6 @@ extern const struct attribute_group *usb_interface_groups[];
extern struct usb_driver usbfs_driver;
extern const struct file_operations usbfs_devices_fops;
extern const struct file_operations usbdev_file_operations;
-extern void usbfs_conn_disc_event(void);
extern int usb_devio_init(void);
extern void usb_devio_cleanup(void);
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index 16e1aa304edc..c131719367ec 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -5,6 +5,7 @@ config USB_DWC2
depends on HAS_DMA
depends on USB || USB_GADGET
depends on HAS_IOMEM
+ select USB_ROLE_SWITCH
help
Say Y here if your system has a Dual Role Hi-Speed USB
controller based on the DesignWare HSOTG IP Core.
diff --git a/drivers/usb/dwc2/Makefile b/drivers/usb/dwc2/Makefile
index 440320cc20a4..2bcd6945df46 100644
--- a/drivers/usb/dwc2/Makefile
+++ b/drivers/usb/dwc2/Makefile
@@ -3,7 +3,7 @@ ccflags-$(CONFIG_USB_DWC2_DEBUG) += -DDEBUG
ccflags-$(CONFIG_USB_DWC2_VERBOSE) += -DVERBOSE_DEBUG
obj-$(CONFIG_USB_DWC2) += dwc2.o
-dwc2-y := core.o core_intr.o platform.o
+dwc2-y := core.o core_intr.o platform.o drd.o
dwc2-y += params.o
ifneq ($(filter y,$(CONFIG_USB_DWC2_HOST) $(CONFIG_USB_DWC2_DUAL_ROLE)),)
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 9deff0400a92..7161344c6522 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -860,6 +860,7 @@ struct dwc2_hregs_backup {
* - USB_DR_MODE_PERIPHERAL
* - USB_DR_MODE_HOST
* - USB_DR_MODE_OTG
+ * @role_sw: usb_role_switch handle
* @hcd_enabled: Host mode sub-driver initialization indicator.
* @gadget_enabled: Peripheral mode sub-driver initialization indicator.
* @ll_hw_enabled: Status of low-level hardware resources.
@@ -1054,6 +1055,7 @@ struct dwc2_hsotg {
struct dwc2_core_params params;
enum usb_otg_state op_state;
enum usb_dr_mode dr_mode;
+ struct usb_role_switch *role_sw;
unsigned int hcd_enabled:1;
unsigned int gadget_enabled:1;
unsigned int ll_hw_enabled:1;
@@ -1376,6 +1378,11 @@ static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg)
return (dwc2_readl(hsotg, GINTSTS) & GINTSTS_CURMODE_HOST) == 0;
}
+int dwc2_drd_init(struct dwc2_hsotg *hsotg);
+void dwc2_drd_suspend(struct dwc2_hsotg *hsotg);
+void dwc2_drd_resume(struct dwc2_hsotg *hsotg);
+void dwc2_drd_exit(struct dwc2_hsotg *hsotg);
+
/*
* Dump core registers and SPRAM
*/
@@ -1392,6 +1399,7 @@ int dwc2_hsotg_resume(struct dwc2_hsotg *dwc2);
int dwc2_gadget_init(struct dwc2_hsotg *hsotg);
void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2,
bool reset);
+void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg);
void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2);
int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
@@ -1417,6 +1425,7 @@ static inline int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
{ return 0; }
static inline void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2,
bool reset) {}
+static inline void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) {}
static inline void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg) {}
static inline void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2) {}
static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
new file mode 100644
index 000000000000..2d4176f5788e
--- /dev/null
+++ b/drivers/usb/dwc2/drd.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drd.c - DesignWare USB2 DRD Controller Dual-role support
+ *
+ * Copyright (C) 2020 STMicroelectronics
+ *
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com>
+ */
+
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/usb/role.h>
+#include "core.h"
+
+static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
+{
+ unsigned long flags;
+ u32 gotgctl;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ gotgctl = dwc2_readl(hsotg, GOTGCTL);
+ gotgctl |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN | GOTGCTL_VBVALOEN;
+ gotgctl |= GOTGCTL_DBNCE_FLTR_BYPASS;
+ gotgctl &= ~(GOTGCTL_BVALOVAL | GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL);
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
+
+ dwc2_force_mode(hsotg, false);
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
+static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
+{
+ u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
+
+ /* Check if A-Session is already in the right state */
+ if ((valid && (gotgctl & GOTGCTL_ASESVLD)) ||
+ (!valid && !(gotgctl & GOTGCTL_ASESVLD)))
+ return -EALREADY;
+
+ if (valid)
+ gotgctl |= GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL;
+ else
+ gotgctl &= ~(GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL);
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
+
+ return 0;
+}
+
+static int dwc2_ovr_bvalid(struct dwc2_hsotg *hsotg, bool valid)
+{
+ u32 gotgctl = dwc2_readl(hsotg, GOTGCTL);
+
+ /* Check if B-Session is already in the right state */
+ if ((valid && (gotgctl & GOTGCTL_BSESVLD)) ||
+ (!valid && !(gotgctl & GOTGCTL_BSESVLD)))
+ return -EALREADY;
+
+ if (valid)
+ gotgctl |= GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL;
+ else
+ gotgctl &= ~(GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL);
+ dwc2_writel(hsotg, gotgctl, GOTGCTL);
+
+ return 0;
+}
+
+static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
+{
+ struct dwc2_hsotg *hsotg = usb_role_switch_get_drvdata(sw);
+ unsigned long flags;
+ int already = 0;
+
+ /* Skip session not in line with dr_mode */
+ if ((role == USB_ROLE_DEVICE && hsotg->dr_mode == USB_DR_MODE_HOST) ||
+ (role == USB_ROLE_HOST && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL))
+ return -EINVAL;
+
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ /* Skip session if core is in test mode */
+ if (role == USB_ROLE_NONE && hsotg->test_mode) {
+ dev_dbg(hsotg->dev, "Core is in test mode\n");
+ return -EBUSY;
+ }
+#endif
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+
+ if (role == USB_ROLE_HOST) {
+ already = dwc2_ovr_avalid(hsotg, true);
+ } else if (role == USB_ROLE_DEVICE) {
+ already = dwc2_ovr_bvalid(hsotg, true);
+ /* This clear DCTL.SFTDISCON bit */
+ dwc2_hsotg_core_connect(hsotg);
+ } else {
+ if (dwc2_is_device_mode(hsotg)) {
+ if (!dwc2_ovr_bvalid(hsotg, false))
+ /* This set DCTL.SFTDISCON bit */
+ dwc2_hsotg_core_disconnect(hsotg);
+ } else {
+ dwc2_ovr_avalid(hsotg, false);
+ }
+ }
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ if (!already && hsotg->dr_mode == USB_DR_MODE_OTG)
+ /* This will raise a Connector ID Status Change Interrupt */
+ dwc2_force_mode(hsotg, role == USB_ROLE_HOST);
+
+ dev_dbg(hsotg->dev, "%s-session valid\n",
+ role == USB_ROLE_NONE ? "No" :
+ role == USB_ROLE_HOST ? "A" : "B");
+
+ return 0;
+}
+
+int dwc2_drd_init(struct dwc2_hsotg *hsotg)
+{
+ struct usb_role_switch_desc role_sw_desc = {0};
+ struct usb_role_switch *role_sw;
+ int ret;
+
+ if (!device_property_read_bool(hsotg->dev, "usb-role-switch"))
+ return 0;
+
+ role_sw_desc.driver_data = hsotg;
+ role_sw_desc.fwnode = dev_fwnode(hsotg->dev);
+ role_sw_desc.set = dwc2_drd_role_sw_set;
+ role_sw_desc.allow_userspace_control = true;
+
+ role_sw = usb_role_switch_register(hsotg->dev, &role_sw_desc);
+ if (IS_ERR(role_sw)) {
+ ret = PTR_ERR(role_sw);
+ dev_err(hsotg->dev,
+ "failed to register role switch: %d\n", ret);
+ return ret;
+ }
+
+ hsotg->role_sw = role_sw;
+
+ /* Enable override and initialize values */
+ dwc2_ovr_init(hsotg);
+
+ return 0;
+}
+
+void dwc2_drd_suspend(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts, gintmsk;
+
+ if (hsotg->role_sw && !hsotg->params.external_id_pin_ctl) {
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk &= ~GINTSTS_CONIDSTSCHNG;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ gintsts = dwc2_readl(hsotg, GINTSTS);
+ dwc2_writel(hsotg, gintsts | GINTSTS_CONIDSTSCHNG, GINTSTS);
+ }
+}
+
+void dwc2_drd_resume(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts, gintmsk;
+
+ if (hsotg->role_sw && !hsotg->params.external_id_pin_ctl) {
+ gintsts = dwc2_readl(hsotg, GINTSTS);
+ dwc2_writel(hsotg, gintsts | GINTSTS_CONIDSTSCHNG, GINTSTS);
+ gintmsk = dwc2_readl(hsotg, GINTMSK);
+ gintmsk |= GINTSTS_CONIDSTSCHNG;
+ dwc2_writel(hsotg, gintmsk, GINTMSK);
+ }
+}
+
+void dwc2_drd_exit(struct dwc2_hsotg *hsotg)
+{
+ if (hsotg->role_sw)
+ usb_role_switch_unregister(hsotg->role_sw);
+}
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 5b9d23991c99..0a0d11151cfb 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -713,8 +713,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
*/
static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
{
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
int is_isoc = hs_ep->isochronous;
unsigned int maxsize;
+ u32 mps = hs_ep->ep.maxpacket;
+ int dir_in = hs_ep->dir_in;
if (is_isoc)
maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
@@ -723,6 +726,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
else
maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
+ /* Interrupt OUT EP with mps not multiple of 4 */
+ if (hs_ep->index)
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
+ maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
+
return maxsize;
}
@@ -738,11 +746,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
* Isochronous - descriptor rx/tx bytes bitfield limit,
* Control In/Bulk/Interrupt - multiple of mps. This will allow to not
* have concatenations from various descriptors within one packet.
+ * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
+ * to a single descriptor.
*
* Selects corresponding mask for RX/TX bytes as well.
*/
static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
{
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
u32 mps = hs_ep->ep.maxpacket;
int dir_in = hs_ep->dir_in;
u32 desc_size = 0;
@@ -766,6 +777,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
desc_size -= desc_size % mps;
}
+ /* Interrupt OUT EP with mps not multiple of 4 */
+ if (hs_ep->index)
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
+ desc_size = mps;
+ *mask = DEV_DMA_NBYTES_MASK;
+ }
+
return desc_size;
}
@@ -1123,13 +1141,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
length += (mps - (length % mps));
}
- /*
- * If more data to send, adjust DMA for EP0 out data stage.
- * ureq->dma stays unchanged, hence increment it by already
- * passed passed data count before starting new transaction.
- */
- if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
- continuing)
+ if (continuing)
offset = ureq->actual;
/* Fill DDMA chain entries */
@@ -2320,22 +2332,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
*/
static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
{
+ const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
struct dwc2_hsotg *hsotg = hs_ep->parent;
unsigned int bytes_rem = 0;
+ unsigned int bytes_rem_correction = 0;
struct dwc2_dma_desc *desc = hs_ep->desc_list;
int i;
u32 status;
+ u32 mps = hs_ep->ep.maxpacket;
+ int dir_in = hs_ep->dir_in;
if (!desc)
return -EINVAL;
+ /* Interrupt OUT EP with mps not multiple of 4 */
+ if (hs_ep->index)
+ if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
+ bytes_rem_correction = 4 - (mps % 4);
+
for (i = 0; i < hs_ep->desc_count; ++i) {
status = desc->status;
bytes_rem += status & DEV_DMA_NBYTES_MASK;
+ bytes_rem -= bytes_rem_correction;
if (status & DEV_DMA_STS_MASK)
dev_err(hsotg->dev, "descriptor %d closed with %x\n",
i, status & DEV_DMA_STS_MASK);
+
+ if (status & DEV_DMA_L)
+ break;
+
desc++;
}
@@ -3530,7 +3556,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
dwc2_readl(hsotg, DOEPCTL0));
}
-static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
+void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
{
/* set the soft-disconnect bit */
dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 8f9d061c4d5f..267543c3dc38 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -185,7 +185,7 @@ static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
struct dwc2_core_params *p = &hsotg->params;
p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
- p->activate_stm_id_vb_detection = true;
+ p->activate_stm_id_vb_detection = !device_property_read_bool(hsotg->dev, "usb-role-switch");
p->host_rx_fifo_size = 440;
p->host_nperio_tx_fifo_size = 256;
p->host_perio_tx_fifo_size = 256;
@@ -210,6 +210,7 @@ const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "amlogic,meson-g12a-usb",
.data = dwc2_set_amlogic_g12a_params },
{ .compatible = "amcc,dwc-otg", .data = dwc2_set_amcc_params },
+ { .compatible = "apm,apm82181-dwc-otg", .data = dwc2_set_amcc_params },
{ .compatible = "st,stm32f4x9-fsotg",
.data = dwc2_set_stm32f4x9_fsotg_params },
{ .compatible = "st,stm32f4x9-hsotg" },
@@ -860,7 +861,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
int dwc2_init_params(struct dwc2_hsotg *hsotg)
{
const struct of_device_id *match;
- void (*set_params)(void *data);
+ void (*set_params)(struct dwc2_hsotg *data);
dwc2_set_default_params(hsotg);
dwc2_get_device_properties(hsotg);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index db9fd4bd1a38..5f18acac7406 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -121,6 +121,13 @@ static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg)
return 0;
}
+static void __dwc2_disable_regulators(void *data)
+{
+ struct dwc2_hsotg *hsotg = data;
+
+ regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
+}
+
static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
@@ -131,6 +138,11 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev,
+ __dwc2_disable_regulators, hsotg);
+ if (ret)
+ return ret;
+
if (hsotg->clk) {
ret = clk_prepare_enable(hsotg->clk);
if (ret)
@@ -186,10 +198,7 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
if (hsotg->clk)
clk_disable_unprepare(hsotg->clk);
- ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
- hsotg->supplies);
-
- return ret;
+ return 0;
}
/**
@@ -314,6 +323,8 @@ static int dwc2_driver_remove(struct platform_device *dev)
if (hsotg->gadget_enabled)
dwc2_hsotg_remove(hsotg);
+ dwc2_drd_exit(hsotg);
+
if (hsotg->params.activate_stm_id_vb_detection)
regulator_disable(hsotg->usb33d);
@@ -533,10 +544,17 @@ static int dwc2_driver_probe(struct platform_device *dev)
dwc2_writel(hsotg, ggpio, GGPIO);
}
+ retval = dwc2_drd_init(hsotg);
+ if (retval) {
+ if (retval != -EPROBE_DEFER)
+ dev_err(hsotg->dev, "failed to initialize dual-role\n");
+ goto error_init;
+ }
+
if (hsotg->dr_mode != USB_DR_MODE_HOST) {
retval = dwc2_gadget_init(hsotg);
if (retval)
- goto error_init;
+ goto error_drd;
hsotg->gadget_enabled = 1;
}
@@ -562,7 +580,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval) {
if (hsotg->gadget_enabled)
dwc2_hsotg_remove(hsotg);
- goto error_init;
+ goto error_drd;
}
hsotg->hcd_enabled = 1;
}
@@ -584,12 +602,22 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval) {
hsotg->gadget.udc = NULL;
dwc2_hsotg_remove(hsotg);
- goto error_init;
+ goto error_debugfs;
}
}
#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
return 0;
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+error_debugfs:
+ dwc2_debugfs_exit(hsotg);
+ if (hsotg->hcd_enabled)
+ dwc2_hcd_remove(hsotg);
+#endif
+error_drd:
+ dwc2_drd_exit(hsotg);
+
error_init:
if (hsotg->params.activate_stm_id_vb_detection)
regulator_disable(hsotg->usb33d);
@@ -608,6 +636,8 @@ static int __maybe_unused dwc2_suspend(struct device *dev)
if (is_device_mode)
dwc2_hsotg_suspend(dwc2);
+ dwc2_drd_suspend(dwc2);
+
if (dwc2->params.activate_stm_id_vb_detection) {
unsigned long flags;
u32 ggpio, gotgctl;
@@ -688,6 +718,8 @@ static int __maybe_unused dwc2_resume(struct device *dev)
/* Need to restore FORCEDEVMODE/FORCEHOSTMODE */
dwc2_force_dr_mode(dwc2);
+ dwc2_drd_resume(dwc2);
+
if (dwc2_is_device_mode(dwc2))
ret = dwc2_hsotg_resume(dwc2);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 2eb34c8b4065..841daec70b6e 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* core.c - DesignWare USB3 DRD Controller Core file
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
@@ -119,9 +119,7 @@ static void __dwc3_set_mode(struct work_struct *work)
struct dwc3 *dwc = work_to_dwc(work);
unsigned long flags;
int ret;
-
- if (dwc->dr_mode != USB_DR_MODE_OTG)
- return;
+ u32 reg;
pm_runtime_get_sync(dwc->dev);
@@ -172,6 +170,11 @@ static void __dwc3_set_mode(struct work_struct *work)
otg_set_vbus(dwc->usb2_phy->otg, true);
phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
+ if (dwc->dis_split_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
+ reg |= DWC3_GUCTL3_SPLITDISABLE;
+ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
+ }
}
break;
case DWC3_GCTL_PRTCAP_DEVICE:
@@ -203,6 +206,9 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
{
unsigned long flags;
+ if (dwc->dr_mode != USB_DR_MODE_OTG)
+ return;
+
spin_lock_irqsave(&dwc->lock, flags);
dwc->desired_dr_role = mode;
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -929,13 +935,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
*/
dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
- /* Handle USB2.0-only core configuration */
- if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
- DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
- if (dwc->maximum_speed == USB_SPEED_SUPER)
- dwc->maximum_speed = USB_SPEED_HIGH;
- }
-
ret = dwc3_phy_setup(dwc);
if (ret)
goto err0;
@@ -1356,6 +1355,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->dis_metastability_quirk = device_property_read_bool(dev,
"snps,dis_metastability_quirk");
+ dwc->dis_split_quirk = device_property_read_bool(dev,
+ "snps,dis-split-quirk");
+
dwc->lpm_nyet_threshold = lpm_nyet_threshold;
dwc->tx_de_emphasis = tx_de_emphasis;
@@ -1381,6 +1383,8 @@ bool dwc3_has_imod(struct dwc3 *dwc)
static void dwc3_check_params(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
+ unsigned int hwparam_gen =
+ DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
/* Check for proper value of imod_interval */
if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
@@ -1404,25 +1408,40 @@ static void dwc3_check_params(struct dwc3 *dwc)
case USB_SPEED_LOW:
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
+ break;
case USB_SPEED_SUPER:
+ if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS)
+ dev_warn(dev, "UDC doesn't support Gen 1\n");
+ break;
case USB_SPEED_SUPER_PLUS:
+ if ((DWC3_IP_IS(DWC32) &&
+ hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) ||
+ (!DWC3_IP_IS(DWC32) &&
+ hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
+ dev_warn(dev, "UDC doesn't support SSP\n");
break;
default:
dev_err(dev, "invalid maximum_speed parameter %d\n",
dwc->maximum_speed);
fallthrough;
case USB_SPEED_UNKNOWN:
- /* default to superspeed */
- dwc->maximum_speed = USB_SPEED_SUPER;
-
- /*
- * default to superspeed plus if we are capable.
- */
- if ((DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) &&
- (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
- DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
+ switch (hwparam_gen) {
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
-
+ break;
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
+ if (DWC3_IP_IS(DWC32))
+ dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
+ else
+ dwc->maximum_speed = USB_SPEED_SUPER;
+ break;
+ case DWC3_GHWPARAMS3_SSPHY_IFC_DIS:
+ dwc->maximum_speed = USB_SPEED_HIGH;
+ break;
+ default:
+ dwc->maximum_speed = USB_SPEED_SUPER;
+ break;
+ }
break;
}
}
@@ -1554,6 +1573,17 @@ static int dwc3_probe(struct platform_device *pdev)
err5:
dwc3_event_buffers_cleanup(dwc);
+
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+
dwc3_ulpi_exit(dwc);
err4:
@@ -1589,9 +1619,9 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
dwc3_free_event_buffers(dwc);
dwc3_free_scratch_buffers(dwc);
@@ -1865,10 +1895,26 @@ static int dwc3_resume(struct device *dev)
return 0;
}
+
+static void dwc3_complete(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ u32 reg;
+
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST &&
+ dwc->dis_split_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL3);
+ reg |= DWC3_GUCTL3_SPLITDISABLE;
+ dwc3_writel(dwc->regs, DWC3_GUCTL3, reg);
+ }
+}
+#else
+#define dwc3_complete NULL
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops dwc3_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
+ .complete = dwc3_complete,
SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
dwc3_runtime_idle)
};
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 2f04b3e42bf1..2f95f08ca511 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -138,6 +138,7 @@
#define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10))
#define DWC3_GHWPARAMS8 0xc600
+#define DWC3_GUCTL3 0xc60c
#define DWC3_GFLADJ 0xc630
/* Device Registers */
@@ -380,6 +381,9 @@
/* Global User Control Register 2 */
#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
+/* Global User Control Register 3 */
+#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
+
/* Device Configuration Register */
#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
@@ -634,7 +638,7 @@ struct dwc3_trb;
struct dwc3_event_buffer {
void *buf;
void *cache;
- unsigned length;
+ unsigned int length;
unsigned int lpos;
unsigned int count;
unsigned int flags;
@@ -694,7 +698,7 @@ struct dwc3_ep {
struct dwc3 *dwc;
u32 saved_state;
- unsigned flags;
+ unsigned int flags;
#define DWC3_EP_ENABLED BIT(0)
#define DWC3_EP_STALL BIT(1)
#define DWC3_EP_WEDGE BIT(2)
@@ -706,6 +710,7 @@ struct dwc3_ep {
#define DWC3_EP_IGNORE_NEXT_NOSTREAM BIT(8)
#define DWC3_EP_FORCE_RESTART_STREAM BIT(9)
#define DWC3_EP_FIRST_STREAM_PRIMED BIT(10)
+#define DWC3_EP_PENDING_CLEAR_STALL BIT(11)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN BIT(31)
@@ -893,9 +898,9 @@ struct dwc3_request {
struct scatterlist *sg;
struct scatterlist *start_sg;
- unsigned num_pending_sgs;
+ unsigned int num_pending_sgs;
unsigned int num_queued_sgs;
- unsigned remaining;
+ unsigned int remaining;
unsigned int status;
#define DWC3_REQUEST_STATUS_QUEUED 0
@@ -908,11 +913,11 @@ struct dwc3_request {
struct dwc3_trb *trb;
dma_addr_t trb_dma;
- unsigned num_trbs;
+ unsigned int num_trbs;
- unsigned needs_extra_trb:1;
- unsigned direction:1;
- unsigned mapped:1;
+ unsigned int needs_extra_trb:1;
+ unsigned int direction:1;
+ unsigned int mapped:1;
};
/*
@@ -1010,8 +1015,8 @@ struct dwc3_scratchpad_array {
* @has_lpm_erratum: true when core was configured with LPM Erratum. Note that
* there's now way for software to detect this in runtime.
* @is_utmi_l1_suspend: the core asserts output signal
- * 0 - utmi_sleep_n
- * 1 - utmi_l1_suspend_n
+ * 0 - utmi_sleep_n
+ * 1 - utmi_l1_suspend_n
* @is_fpga: true when we are using the FPGA board
* @pending_events: true when we have pending IRQs to be handled
* @pullups_connected: true when Run/Stop bit is set
@@ -1047,13 +1052,14 @@ struct dwc3_scratchpad_array {
* instances in park mode.
* @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
* @tx_de_emphasis: Tx de-emphasis value
- * 0 - -6dB de-emphasis
- * 1 - -3.5dB de-emphasis
- * 2 - No de-emphasis
- * 3 - Reserved
+ * 0 - -6dB de-emphasis
+ * 1 - -3.5dB de-emphasis
+ * 2 - No de-emphasis
+ * 3 - Reserved
* @dis_metastability_quirk: set to disable metastability quirk.
+ * @dis_split_quirk: set to disable split boundary.
* @imod_interval: set the interrupt moderation interval in 250ns
- * increments or 0 to disable.
+ * increments or 0 to disable.
*/
struct dwc3 {
struct work_struct drd_work;
@@ -1079,7 +1085,7 @@ struct dwc3 {
struct dwc3_event_buffer *ev_buf;
struct dwc3_ep *eps[DWC3_ENDPOINTS_NUM];
- struct usb_gadget gadget;
+ struct usb_gadget *gadget;
struct usb_gadget_driver *gadget_driver;
struct clk_bulk_data *clks;
@@ -1245,6 +1251,8 @@ struct dwc3 {
unsigned dis_metastability_quirk:1;
+ unsigned dis_split_quirk:1;
+
u16 imod_interval;
};
@@ -1269,7 +1277,7 @@ struct dwc3_event_type {
#define DWC3_DEPEVT_EPCMDCMPLT 0x07
/**
- * struct dwc3_event_depvt - Device Endpoint Events
+ * struct dwc3_event_depevt - Device Endpoint Events
* @one_bit: indicates this is an endpoint event (not used)
* @endpoint_number: number of the endpoint
* @endpoint_event: The event we have:
@@ -1456,9 +1464,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc);
int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
int dwc3_gadget_get_link_state(struct dwc3 *dwc);
int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
-int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
struct dwc3_gadget_ep_cmd_params *params);
-int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param);
+int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
+ u32 param);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; }
@@ -1472,7 +1481,7 @@ static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc,
enum dwc3_link_state state)
{ return 0; }
-static inline int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+static inline int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
struct dwc3_gadget_ep_cmd_params *params)
{ return 0; }
static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 3d16dac4e5cc..8ab394942360 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -371,7 +371,9 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
static inline const char *dwc3_decode_event(char *str, size_t size, u32 event,
u32 ep0state)
{
- const union dwc3_event evt = (union dwc3_event) event;
+ union dwc3_event evt;
+
+ memcpy(&evt, &event, sizeof(event));
if (evt.type.is_devspec)
return dwc3_gadget_event_string(str, size, &evt.devt);
@@ -411,8 +413,8 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
#ifdef CONFIG_DEBUG_FS
-extern void dwc3_debugfs_init(struct dwc3 *);
-extern void dwc3_debugfs_exit(struct dwc3 *);
+extern void dwc3_debugfs_init(struct dwc3 *d);
+extern void dwc3_debugfs_exit(struct dwc3 *d);
#else
static inline void dwc3_debugfs_init(struct dwc3 *d)
{ }
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 2c7b6dd79cdf..5da4f6082d93 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -397,13 +397,13 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
switch (DWC3_GCTL_PRTCAP(reg)) {
case DWC3_GCTL_PRTCAP_HOST:
- seq_printf(s, "host\n");
+ seq_puts(s, "host\n");
break;
case DWC3_GCTL_PRTCAP_DEVICE:
- seq_printf(s, "device\n");
+ seq_puts(s, "device\n");
break;
case DWC3_GCTL_PRTCAP_OTG:
- seq_printf(s, "otg\n");
+ seq_puts(s, "otg\n");
break;
default:
seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
@@ -428,6 +428,9 @@ static ssize_t dwc3_mode_write(struct file *file,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
+ if (dwc->dr_mode != USB_DR_MODE_OTG)
+ return count;
+
if (!strncmp(buf, "host", 4))
mode = DWC3_GCTL_PRTCAP_HOST;
@@ -464,22 +467,22 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
switch (reg) {
case 0:
- seq_printf(s, "no test\n");
+ seq_puts(s, "no test\n");
break;
case USB_TEST_J:
- seq_printf(s, "test_j\n");
+ seq_puts(s, "test_j\n");
break;
case USB_TEST_K:
- seq_printf(s, "test_k\n");
+ seq_puts(s, "test_k\n");
break;
case USB_TEST_SE0_NAK:
- seq_printf(s, "test_se0_nak\n");
+ seq_puts(s, "test_se0_nak\n");
break;
case USB_TEST_PACKET:
- seq_printf(s, "test_packet\n");
+ seq_puts(s, "test_packet\n");
break;
case USB_TEST_FORCE_ENABLE:
- seq_printf(s, "test_force_enable\n");
+ seq_puts(s, "test_force_enable\n");
break;
default:
seq_printf(s, "UNKNOWN %d\n", reg);
@@ -760,27 +763,26 @@ static int dwc3_transfer_type_show(struct seq_file *s, void *unused)
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
- if (!(dep->flags & DWC3_EP_ENABLED) ||
- !dep->endpoint.desc) {
- seq_printf(s, "--\n");
+ if (!(dep->flags & DWC3_EP_ENABLED) || !dep->endpoint.desc) {
+ seq_puts(s, "--\n");
goto out;
}
switch (usb_endpoint_type(dep->endpoint.desc)) {
case USB_ENDPOINT_XFER_CONTROL:
- seq_printf(s, "control\n");
+ seq_puts(s, "control\n");
break;
case USB_ENDPOINT_XFER_ISOC:
- seq_printf(s, "isochronous\n");
+ seq_puts(s, "isochronous\n");
break;
case USB_ENDPOINT_XFER_BULK:
- seq_printf(s, "bulk\n");
+ seq_puts(s, "bulk\n");
break;
case USB_ENDPOINT_XFER_INT:
- seq_printf(s, "interrupt\n");
+ seq_puts(s, "interrupt\n");
break;
default:
- seq_printf(s, "--\n");
+ seq_puts(s, "--\n");
}
out:
@@ -798,11 +800,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
spin_lock_irqsave(&dwc->lock, flags);
if (dep->number <= 1) {
- seq_printf(s, "--\n");
+ seq_puts(s, "--\n");
goto out;
}
- seq_printf(s, "buffer_addr,size,type,ioc,isp_imi,csp,chn,lst,hwo\n");
+ seq_puts(s, "buffer_addr,size,type,ioc,isp_imi,csp,chn,lst,hwo\n");
for (i = 0; i < DWC3_TRB_NUM; i++) {
struct dwc3_trb *trb = &dep->trb_pool[i];
@@ -884,7 +886,7 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
const struct file_operations *fops = dwc3_ep_file_map[i].fops;
const char *name = dwc3_ep_file_map[i].name;
- debugfs_create_file(name, S_IRUGO, parent, dep, fops);
+ debugfs_create_file(name, 0444, parent, dep, fops);
}
}
@@ -929,21 +931,18 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
dwc->root = root;
- debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
-
- debugfs_create_file("lsp_dump", S_IRUGO | S_IWUSR, root, dwc,
- &dwc3_lsp_fops);
+ debugfs_create_regset32("regdump", 0444, root, dwc->regset);
+ debugfs_create_file("lsp_dump", 0644, root, dwc, &dwc3_lsp_fops);
- if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
- debugfs_create_file("mode", S_IRUGO | S_IWUSR, root, dwc,
+ if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE))
+ debugfs_create_file("mode", 0644, root, dwc,
&dwc3_mode_fops);
- }
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) ||
IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
- debugfs_create_file("testmode", S_IRUGO | S_IWUSR, root, dwc,
- &dwc3_testmode_fops);
- debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, dwc,
+ debugfs_create_file("testmode", 0644, root, dwc,
+ &dwc3_testmode_fops);
+ debugfs_create_file("link_state", 0644, root, dwc,
&dwc3_link_state_fops);
dwc3_debugfs_create_endpoint_dirs(dwc, root);
}
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index 1f7f4d88ed9d..417e05381b5d 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -116,23 +116,24 @@ static struct clk_bulk_data meson_a1_clocks[] = {
{ .id = "xtal_usb_ctrl" },
};
-static const char *meson_gxm_phy_names[] = {
+static const char * const meson_gxm_phy_names[] = {
"usb2-phy0", "usb2-phy1", "usb2-phy2",
};
-static const char *meson_g12a_phy_names[] = {
+static const char * const meson_g12a_phy_names[] = {
"usb2-phy0", "usb2-phy1", "usb3-phy0",
};
/*
* Amlogic A1 has a single physical PHY, in slot 1, but still has the
* two U2 PHY controls register blocks like G12A.
+ * AXG has the similar scheme, thus needs the same tweak.
* Handling the first PHY on slot 1 would need a large amount of code
* changes, and the current management is generic enough to handle it
* correctly when only the "usb2-phy1" phy is specified on-par with the
* DT bindings.
*/
-static const char *meson_a1_phy_names[] = {
+static const char * const meson_a1_phy_names[] = {
"usb2-phy0", "usb2-phy1"
};
@@ -143,7 +144,7 @@ struct dwc3_meson_g12a_drvdata {
bool otg_phy_host_port_disable;
struct clk_bulk_data *clks;
int num_clks;
- const char **phy_names;
+ const char * const *phy_names;
int num_phys;
int (*setup_regmaps)(struct dwc3_meson_g12a *priv, void __iomem *base);
int (*usb2_init_phy)(struct dwc3_meson_g12a *priv, int i,
@@ -215,6 +216,19 @@ static struct dwc3_meson_g12a_drvdata gxm_drvdata = {
.usb_post_init = dwc3_meson_gxl_usb_post_init,
};
+static struct dwc3_meson_g12a_drvdata axg_drvdata = {
+ .otg_switch_supported = true,
+ .clks = meson_gxl_clocks,
+ .num_clks = ARRAY_SIZE(meson_gxl_clocks),
+ .phy_names = meson_a1_phy_names,
+ .num_phys = ARRAY_SIZE(meson_a1_phy_names),
+ .setup_regmaps = dwc3_meson_gxl_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_gxl_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_gxl_set_phy_mode,
+ .usb_init = dwc3_meson_g12a_usb_init,
+ .usb_post_init = dwc3_meson_gxl_usb_post_init,
+};
+
static struct dwc3_meson_g12a_drvdata g12a_drvdata = {
.otg_switch_supported = true,
.clks = meson_g12a_clocks,
@@ -520,11 +534,7 @@ static int dwc3_meson_g12a_role_set(struct usb_role_switch *sw,
return 0;
if (priv->drvdata->otg_phy_host_port_disable)
- dev_warn_once(priv->dev, "Manual OTG switch is broken on this "\
- "SoC, when manual switching from "\
- "Host to device, DWC3 controller "\
- "will need to be resetted in order "\
- "to recover usage of the Host port");
+ dev_warn_once(priv->dev, "Broken manual OTG switch\n");
return dwc3_meson_g12a_otg_mode_set(priv, mode);
}
@@ -626,10 +636,7 @@ static int dwc3_meson_gxl_setup_regmaps(struct dwc3_meson_g12a *priv,
/* GXL controls the PHY mode in the PHY registers unlike G12A */
priv->usb_glue_regmap = devm_regmap_init_mmio(priv->dev, base,
&phy_meson_g12a_usb_glue_regmap_conf);
- if (IS_ERR(priv->usb_glue_regmap))
- return PTR_ERR(priv->usb_glue_regmap);
-
- return 0;
+ return PTR_ERR_OR_ZERO(priv->usb_glue_regmap);
}
static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
@@ -906,8 +913,8 @@ static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
return ret;
}
- if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) {
- ret = regulator_enable(priv->vbus);
+ if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) {
+ ret = regulator_enable(priv->vbus);
if (ret)
return ret;
}
@@ -931,6 +938,10 @@ static const struct of_device_id dwc3_meson_g12a_match[] = {
.data = &gxm_drvdata,
},
{
+ .compatible = "amlogic,meson-axg-usb-ctrl",
+ .data = &axg_drvdata,
+ },
+ {
.compatible = "amlogic,meson-g12a-usb-ctrl",
.data = &g12a_drvdata,
},
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 7df115012935..e62ecd22b3ed 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -176,6 +176,8 @@ static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "cavium,octeon-7130-usb-uctl" },
{ .compatible = "sprd,sc9860-dwc3" },
{ .compatible = "allwinner,sun50i-h6-dwc3" },
+ { .compatible = "hisilicon,hi3670-dwc3" },
+ { .compatible = "intel,keembay-dwc3" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index f5a61f57c74f..bae6a70664c8 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -40,6 +40,7 @@
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
#define PCI_DEVICE_ID_INTEL_JSP 0x4dee
+#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -147,7 +148,8 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
- pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) {
+ pdev->device == PCI_DEVICE_ID_INTEL_BXT_M ||
+ pdev->device == PCI_DEVICE_ID_INTEL_EHLLP) {
guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid);
dwc->has_dsm_for_pm = true;
}
@@ -366,6 +368,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
(kernel_ulong_t) &dwc3_pci_amd_properties, },
{ } /* Terminating Entry */
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index e1e78e9824b1..c703d552bbcf 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/extcon.h>
+#include <linux/interconnect.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
@@ -43,6 +44,14 @@
#define SDM845_QSCRATCH_SIZE 0x400
#define SDM845_DWC3_CORE_SIZE 0xcd00
+/* Interconnect path bandwidths in MBps */
+#define USB_MEMORY_AVG_HS_BW MBps_to_icc(240)
+#define USB_MEMORY_PEAK_HS_BW MBps_to_icc(700)
+#define USB_MEMORY_AVG_SS_BW MBps_to_icc(1000)
+#define USB_MEMORY_PEAK_SS_BW MBps_to_icc(2500)
+#define APPS_USB_AVG_BW 0
+#define APPS_USB_PEAK_BW MBps_to_icc(40)
+
struct dwc3_acpi_pdata {
u32 qscratch_base_offset;
u32 qscratch_base_size;
@@ -76,6 +85,8 @@ struct dwc3_qcom {
enum usb_dr_mode mode;
bool is_suspended;
bool pm_suspended;
+ struct icc_path *icc_path_ddr;
+ struct icc_path *icc_path_apps;
};
static inline void dwc3_qcom_setbits(void __iomem *base, u32 offset, u32 val)
@@ -190,6 +201,96 @@ static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom)
return 0;
}
+static int dwc3_qcom_interconnect_enable(struct dwc3_qcom *qcom)
+{
+ int ret;
+
+ ret = icc_enable(qcom->icc_path_ddr);
+ if (ret)
+ return ret;
+
+ ret = icc_enable(qcom->icc_path_apps);
+ if (ret)
+ icc_disable(qcom->icc_path_ddr);
+
+ return ret;
+}
+
+static int dwc3_qcom_interconnect_disable(struct dwc3_qcom *qcom)
+{
+ int ret;
+
+ ret = icc_disable(qcom->icc_path_ddr);
+ if (ret)
+ return ret;
+
+ ret = icc_disable(qcom->icc_path_apps);
+ if (ret)
+ icc_enable(qcom->icc_path_ddr);
+
+ return ret;
+}
+
+/**
+ * dwc3_qcom_interconnect_init() - Get interconnect path handles
+ * and set bandwidhth.
+ * @qcom: Pointer to the concerned usb core.
+ *
+ */
+static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
+{
+ struct device *dev = qcom->dev;
+ int ret;
+
+ qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
+ if (IS_ERR(qcom->icc_path_ddr)) {
+ dev_err(dev, "failed to get usb-ddr path: %ld\n",
+ PTR_ERR(qcom->icc_path_ddr));
+ return PTR_ERR(qcom->icc_path_ddr);
+ }
+
+ qcom->icc_path_apps = of_icc_get(dev, "apps-usb");
+ if (IS_ERR(qcom->icc_path_apps)) {
+ dev_err(dev, "failed to get apps-usb path: %ld\n",
+ PTR_ERR(qcom->icc_path_apps));
+ return PTR_ERR(qcom->icc_path_apps);
+ }
+
+ if (usb_get_maximum_speed(&qcom->dwc3->dev) >= USB_SPEED_SUPER ||
+ usb_get_maximum_speed(&qcom->dwc3->dev) == USB_SPEED_UNKNOWN)
+ ret = icc_set_bw(qcom->icc_path_ddr,
+ USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW);
+ else
+ ret = icc_set_bw(qcom->icc_path_ddr,
+ USB_MEMORY_AVG_HS_BW, USB_MEMORY_PEAK_HS_BW);
+
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret);
+ return ret;
+ }
+
+ ret = icc_set_bw(qcom->icc_path_apps,
+ APPS_USB_AVG_BW, APPS_USB_PEAK_BW);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * dwc3_qcom_interconnect_exit() - Release interconnect path handles
+ * @qcom: Pointer to the concerned usb core.
+ *
+ * This function is used to release interconnect path handle.
+ */
+static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
+{
+ icc_put(qcom->icc_path_ddr);
+ icc_put(qcom->icc_path_apps);
+}
+
static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom)
{
if (qcom->hs_phy_irq) {
@@ -239,7 +340,7 @@ static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
{
u32 val;
- int i;
+ int i, ret;
if (qcom->is_suspended)
return 0;
@@ -251,6 +352,10 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
for (i = qcom->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(qcom->clks[i]);
+ ret = dwc3_qcom_interconnect_disable(qcom);
+ if (ret)
+ dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret);
+
qcom->is_suspended = true;
dwc3_qcom_enable_interrupts(qcom);
@@ -276,6 +381,10 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
}
}
+ ret = dwc3_qcom_interconnect_enable(qcom);
+ if (ret)
+ dev_warn(qcom->dev, "failed to enable interconnect: %d\n", ret);
+
/* Clear existing events from PHY related to L2 in/out */
dwc3_qcom_setbits(qcom->qscratch_base, PWR_EVNT_IRQ_STAT_REG,
PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
@@ -335,7 +444,9 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
const struct dwc3_acpi_pdata *pdata = qcom->acpi_pdata;
- int irq, ret;
+ int irq;
+ int ret;
+
irq = dwc3_qcom_get_irq(pdev, "hs_phy_irq",
pdata ? pdata->hs_phy_irq_index : -1);
if (irq > 0) {
@@ -454,7 +565,7 @@ static const struct property_entry dwc3_qcom_acpi_properties[] = {
static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct resource *res, *child_res = NULL;
int irq;
@@ -514,7 +625,7 @@ out:
static int dwc3_qcom_of_register_core(struct platform_device *pdev)
{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node, *dwc3_np;
struct device *dev = &pdev->dev;
int ret;
@@ -638,6 +749,10 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
goto depopulate;
}
+ ret = dwc3_qcom_interconnect_init(qcom);
+ if (ret)
+ goto depopulate;
+
qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
/* enable vbus override for device mode */
@@ -647,7 +762,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
/* register extcon to override sw_vbus on Vbus change later */
ret = dwc3_qcom_register_extcon(qcom);
if (ret)
- goto depopulate;
+ goto interconnect_exit;
device_init_wakeup(&pdev->dev, 1);
qcom->is_suspended = false;
@@ -657,6 +772,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
return 0;
+interconnect_exit:
+ dwc3_qcom_interconnect_exit(qcom);
depopulate:
if (np)
of_platform_depopulate(&pdev->dev);
@@ -687,6 +804,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
}
qcom->num_clocks = 0;
+ dwc3_qcom_interconnect_exit(qcom);
reset_control_assert(qcom->resets);
pm_runtime_allow(dev);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 59f2e8c31bd1..8b668ef46f7f 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -105,7 +105,7 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
* IRQ we were waiting for is long gone.
*/
if (dep->flags & DWC3_EP_PENDING_REQUEST) {
- unsigned direction;
+ unsigned int direction;
direction = !!(dep->flags & DWC3_EP0_DIR_IN);
@@ -127,11 +127,11 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
* handle it here.
*/
if (dwc->delayed_status) {
- unsigned direction;
+ unsigned int direction;
direction = !dwc->ep0_expect_in;
dwc->delayed_status = false;
- usb_gadget_set_state(&dwc->gadget, USB_STATE_CONFIGURED);
+ usb_gadget_set_state(dwc->gadget, USB_STATE_CONFIGURED);
if (dwc->ep0state == EP0_STATUS_PHASE)
__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
@@ -172,7 +172,7 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
* XferNotReady(STATUS).
*/
if (dwc->three_stage_setup) {
- unsigned direction;
+ unsigned int direction;
direction = dwc->ep0_expect_in;
dwc->ep0state = EP0_DATA_PHASE;
@@ -197,7 +197,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
int ret;
spin_lock_irqsave(&dwc->lock, flags);
- if (!dep->endpoint.desc) {
+ if (!dep->endpoint.desc || !dwc->pullups_connected) {
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
dep->name);
ret = -ESHUTDOWN;
@@ -325,7 +325,7 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
/*
* LTM will be set once we know how to set this in HW.
*/
- usb_status |= dwc->gadget.is_selfpowered;
+ usb_status |= dwc->gadget->is_selfpowered;
if ((dwc->speed == DWC3_DSTS_SUPERSPEED) ||
(dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
@@ -450,7 +450,7 @@ static int dwc3_ep0_handle_device(struct dwc3 *dwc,
wValue = le16_to_cpu(ctrl->wValue);
wIndex = le16_to_cpu(ctrl->wIndex);
- state = dwc->gadget.state;
+ state = dwc->gadget->state;
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
@@ -524,6 +524,11 @@ static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
ret = __dwc3_gadget_ep_set_halt(dep, set, true);
if (ret)
return -EINVAL;
+
+ /* ClearFeature(Halt) may need delayed status */
+ if (!set && (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ return USB_GADGET_DELAYED_STATUS;
+
break;
default:
return -EINVAL;
@@ -559,7 +564,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
- enum usb_device_state state = dwc->gadget.state;
+ enum usb_device_state state = dwc->gadget->state;
u32 addr;
u32 reg;
@@ -580,9 +585,9 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
if (addr)
- usb_gadget_set_state(&dwc->gadget, USB_STATE_ADDRESS);
+ usb_gadget_set_state(dwc->gadget, USB_STATE_ADDRESS);
else
- usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
+ usb_gadget_set_state(dwc->gadget, USB_STATE_DEFAULT);
return 0;
}
@@ -592,14 +597,14 @@ static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
int ret;
spin_unlock(&dwc->lock);
- ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
+ ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
spin_lock(&dwc->lock);
return ret;
}
static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
- enum usb_device_state state = dwc->gadget.state;
+ enum usb_device_state state = dwc->gadget->state;
u32 cfg;
int ret;
u32 reg;
@@ -622,7 +627,7 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
* to change the state on the next usb_ep_queue()
*/
if (ret == 0)
- usb_gadget_set_state(&dwc->gadget,
+ usb_gadget_set_state(dwc->gadget,
USB_STATE_CONFIGURED);
/*
@@ -641,7 +646,7 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
case USB_STATE_CONFIGURED:
ret = dwc3_ep0_delegate_req(dwc, ctrl);
if (!cfg && !ret)
- usb_gadget_set_state(&dwc->gadget,
+ usb_gadget_set_state(dwc->gadget,
USB_STATE_ADDRESS);
break;
default:
@@ -697,7 +702,7 @@ static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
struct dwc3_ep *dep;
- enum usb_device_state state = dwc->gadget.state;
+ enum usb_device_state state = dwc->gadget->state;
u16 wLength;
if (state == USB_STATE_DEFAULT)
@@ -741,7 +746,7 @@ static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ct
if (wIndex || wLength)
return -EINVAL;
- dwc->gadget.isoch_delay = wValue;
+ dwc->gadget->isoch_delay = wValue;
return 0;
}
@@ -942,12 +947,16 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
struct dwc3_ep *dep, struct dwc3_request *req)
{
+ unsigned int trb_length = 0;
int ret;
req->direction = !!dep->number;
if (req->request.length == 0) {
- dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0,
+ if (!req->direction)
+ trb_length = dep->endpoint.maxpacket;
+
+ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, trb_length,
DWC3_TRBCTL_CONTROL_DATA, false);
ret = dwc3_ep0_start_trans(dep);
} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
@@ -994,9 +1003,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
+ if (!req->direction)
+ trb_length = dep->endpoint.maxpacket;
+
/* Now prepare one extra TRB to align transfer size */
dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
- 0, DWC3_TRBCTL_CONTROL_DATA,
+ trb_length, DWC3_TRBCTL_CONTROL_DATA,
false);
ret = dwc3_ep0_start_trans(dep);
} else {
@@ -1042,6 +1054,18 @@ static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
__dwc3_ep0_do_control_status(dwc, dep);
}
+void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
+{
+ unsigned int direction = !dwc->ep0_expect_in;
+
+ dwc->delayed_status = false;
+
+ if (dwc->ep0state != EP0_STATUS_PHASE)
+ return;
+
+ __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
+}
+
static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
@@ -1102,7 +1126,7 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
*/
if (!list_empty(&dep->pending_list)) {
dwc->delayed_status = false;
- usb_gadget_set_state(&dwc->gadget,
+ usb_gadget_set_state(dwc->gadget,
USB_STATE_CONFIGURED);
dwc3_ep0_do_control_status(dwc, event);
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index c2a0f64f8d1e..78cb4db8a6e4 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -227,7 +227,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
* Caller should take care of locking. Issue @cmd with a given @param to @dwc
* and wait for its completion.
*/
-int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
+int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
+ u32 param)
{
u32 timeout = 500;
int status = 0;
@@ -268,7 +269,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
* Caller should handle locking. This function will issue @cmd with given
* @params to @dep and wait for its completion.
*/
-int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
struct dwc3_gadget_ep_cmd_params *params)
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
@@ -290,7 +291,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
*
* DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
*/
- if (dwc->gadget.speed <= USB_SPEED_HIGH) {
+ if (dwc->gadget->speed <= USB_SPEED_HIGH) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
@@ -422,7 +423,7 @@ static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
*/
if (dep->direction &&
!DWC3_VER_IS_PRIOR(DWC3, 260A) &&
- (dwc->gadget.speed >= USB_SPEED_SUPER))
+ (dwc->gadget->speed >= USB_SPEED_SUPER))
cmd |= DWC3_DEPCMD_CLEARPENDIN;
memset(&params, 0, sizeof(params));
@@ -562,8 +563,9 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
/* Burst size is only needed in SuperSpeed mode */
- if (dwc->gadget.speed >= USB_SPEED_SUPER) {
+ if (dwc->gadget->speed >= USB_SPEED_SUPER) {
u32 burst = dep->endpoint.maxburst;
+
params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
}
@@ -942,12 +944,13 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
}
static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
- dma_addr_t dma, unsigned length, unsigned chain, unsigned node,
- unsigned stream_id, unsigned short_not_ok,
- unsigned no_interrupt, unsigned is_last)
+ dma_addr_t dma, unsigned int length, unsigned int chain,
+ unsigned int node, unsigned int stream_id,
+ unsigned int short_not_ok, unsigned int no_interrupt,
+ unsigned int is_last, bool must_interrupt)
{
struct dwc3 *dwc = dep->dwc;
- struct usb_gadget *gadget = &dwc->gadget;
+ struct usb_gadget *gadget = dwc->gadget;
enum usb_device_speed speed = gadget->speed;
trb->size = DWC3_TRB_SIZE_LENGTH(length);
@@ -1031,8 +1034,7 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
}
- if ((!no_interrupt && !chain) ||
- (dwc3_calc_trbs_left(dep) == 1))
+ if ((!no_interrupt && !chain) || must_interrupt)
trb->ctrl |= DWC3_TRB_CTRL_IOC;
if (chain)
@@ -1057,19 +1059,24 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
* @trb_length: buffer size of the TRB
* @chain: should this TRB be chained to the next?
* @node: only for isochronous endpoints. First TRB needs different type.
+ * @use_bounce_buffer: set to use bounce buffer
+ * @must_interrupt: set to interrupt on TRB completion
*/
static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
struct dwc3_request *req, unsigned int trb_length,
- unsigned chain, unsigned node)
+ unsigned int chain, unsigned int node, bool use_bounce_buffer,
+ bool must_interrupt)
{
struct dwc3_trb *trb;
dma_addr_t dma;
- unsigned stream_id = req->request.stream_id;
- unsigned short_not_ok = req->request.short_not_ok;
- unsigned no_interrupt = req->request.no_interrupt;
- unsigned is_last = req->request.is_last;
-
- if (req->request.num_sgs > 0)
+ unsigned int stream_id = req->request.stream_id;
+ unsigned int short_not_ok = req->request.short_not_ok;
+ unsigned int no_interrupt = req->request.no_interrupt;
+ unsigned int is_last = req->request.is_last;
+
+ if (use_bounce_buffer)
+ dma = dep->dwc->bounce_addr;
+ else if (req->request.num_sgs > 0)
dma = sg_dma_address(req->start_sg);
else
dma = req->request.dma;
@@ -1085,10 +1092,63 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
req->num_trbs++;
__dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
- stream_id, short_not_ok, no_interrupt, is_last);
+ stream_id, short_not_ok, no_interrupt, is_last,
+ must_interrupt);
}
-static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req)
+{
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ unsigned int rem = req->request.length % maxp;
+
+ if ((req->request.length && req->request.zero && !rem &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc)) ||
+ (!req->direction && rem))
+ return true;
+
+ return false;
+}
+
+/**
+ * dwc3_prepare_last_sg - prepare TRBs for the last SG entry
+ * @dep: The endpoint that the request belongs to
+ * @req: The request to prepare
+ * @entry_length: The last SG entry size
+ * @node: Indicates whether this is not the first entry (for isoc only)
+ *
+ * Return the number of TRBs prepared.
+ */
+static int dwc3_prepare_last_sg(struct dwc3_ep *dep,
+ struct dwc3_request *req, unsigned int entry_length,
+ unsigned int node)
+{
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ unsigned int rem = req->request.length % maxp;
+ unsigned int num_trbs = 1;
+
+ if (dwc3_needs_extra_trb(dep, req))
+ num_trbs++;
+
+ if (dwc3_calc_trbs_left(dep) < num_trbs)
+ return 0;
+
+ req->needs_extra_trb = num_trbs > 1;
+
+ /* Prepare a normal TRB */
+ if (req->direction || req->request.length)
+ dwc3_prepare_one_trb(dep, req, entry_length,
+ req->needs_extra_trb, node, false, false);
+
+ /* Prepare extra TRBs for ZLP and MPS OUT transfer alignment */
+ if ((!req->direction && !req->request.length) || req->needs_extra_trb)
+ dwc3_prepare_one_trb(dep, req,
+ req->direction ? 0 : maxp - rem,
+ false, 1, true, false);
+
+ return num_trbs;
+}
+
+static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
struct dwc3_request *req)
{
struct scatterlist *sg = req->start_sg;
@@ -1097,6 +1157,8 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
unsigned int length = req->request.length;
unsigned int remaining = req->request.num_mapped_sgs
- req->num_queued_sgs;
+ unsigned int num_trbs = req->num_trbs;
+ bool needs_extra_trb = dwc3_needs_extra_trb(dep, req);
/*
* If we resume preparing the request, then get the remaining length of
@@ -1106,10 +1168,10 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
length -= sg_dma_len(s);
for_each_sg(sg, s, remaining, i) {
- unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
- unsigned int rem = length % maxp;
+ unsigned int num_trbs_left = dwc3_calc_trbs_left(dep);
unsigned int trb_length;
- unsigned chain = true;
+ bool must_interrupt = false;
+ bool last_sg = false;
trb_length = min_t(unsigned int, length, sg_dma_len(s));
@@ -1123,59 +1185,28 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
* mapped sg.
*/
if ((i == remaining - 1) || !length)
- chain = false;
-
- if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
- struct dwc3 *dwc = dep->dwc;
- struct dwc3_trb *trb;
-
- req->needs_extra_trb = true;
-
- /* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, trb_length, true, i);
-
- /* Now prepare one extra TRB to align transfer size */
- trb = &dep->trb_pool[dep->trb_enqueue];
- req->num_trbs++;
- __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr,
- maxp - rem, false, 1,
- req->request.stream_id,
- req->request.short_not_ok,
- req->request.no_interrupt,
- req->request.is_last);
- } else if (req->request.zero && req->request.length &&
- !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- !rem && !chain) {
- struct dwc3 *dwc = dep->dwc;
- struct dwc3_trb *trb;
-
- req->needs_extra_trb = true;
-
- /* Prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, trb_length, true, i);
-
- /* Prepare one extra TRB to handle ZLP */
- trb = &dep->trb_pool[dep->trb_enqueue];
- req->num_trbs++;
- __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
- !req->direction, 1,
- req->request.stream_id,
- req->request.short_not_ok,
- req->request.no_interrupt,
- req->request.is_last);
-
- /* Prepare one more TRB to handle MPS alignment */
- if (!req->direction) {
- trb = &dep->trb_pool[dep->trb_enqueue];
- req->num_trbs++;
- __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
- false, 1, req->request.stream_id,
- req->request.short_not_ok,
- req->request.no_interrupt,
- req->request.is_last);
- }
+ last_sg = true;
+
+ if (!num_trbs_left)
+ break;
+
+ if (last_sg) {
+ if (!dwc3_prepare_last_sg(dep, req, trb_length, i))
+ break;
} else {
- dwc3_prepare_one_trb(dep, req, trb_length, chain, i);
+ /*
+ * Look ahead to check if we have enough TRBs for the
+ * next SG entry. If not, set interrupt on this TRB to
+ * resume preparing the next SG entry when more TRBs are
+ * free.
+ */
+ if (num_trbs_left == 1 || (needs_extra_trb &&
+ num_trbs_left <= 2 &&
+ sg_dma_len(sg_next(s)) >= length))
+ must_interrupt = true;
+
+ dwc3_prepare_one_trb(dep, req, trb_length, 1, i, false,
+ must_interrupt);
}
/*
@@ -1185,7 +1216,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
* we have free trbs we can continue queuing from where we
* previously stopped
*/
- if (chain)
+ if (!last_sg)
req->start_sg = sg_next(s);
req->num_queued_sgs++;
@@ -1200,68 +1231,17 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
break;
}
- if (!dwc3_calc_trbs_left(dep))
+ if (must_interrupt)
break;
}
+
+ return req->num_trbs - num_trbs;
}
-static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
+static int dwc3_prepare_trbs_linear(struct dwc3_ep *dep,
struct dwc3_request *req)
{
- unsigned int length = req->request.length;
- unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
- unsigned int rem = length % maxp;
-
- if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) {
- struct dwc3 *dwc = dep->dwc;
- struct dwc3_trb *trb;
-
- req->needs_extra_trb = true;
-
- /* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, length, true, 0);
-
- /* Now prepare one extra TRB to align transfer size */
- trb = &dep->trb_pool[dep->trb_enqueue];
- req->num_trbs++;
- __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
- false, 1, req->request.stream_id,
- req->request.short_not_ok,
- req->request.no_interrupt,
- req->request.is_last);
- } else if (req->request.zero && req->request.length &&
- !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- (IS_ALIGNED(req->request.length, maxp))) {
- struct dwc3 *dwc = dep->dwc;
- struct dwc3_trb *trb;
-
- req->needs_extra_trb = true;
-
- /* prepare normal TRB */
- dwc3_prepare_one_trb(dep, req, length, true, 0);
-
- /* Prepare one extra TRB to handle ZLP */
- trb = &dep->trb_pool[dep->trb_enqueue];
- req->num_trbs++;
- __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
- !req->direction, 1, req->request.stream_id,
- req->request.short_not_ok,
- req->request.no_interrupt,
- req->request.is_last);
-
- /* Prepare one more TRB to handle MPS alignment for OUT */
- if (!req->direction) {
- trb = &dep->trb_pool[dep->trb_enqueue];
- req->num_trbs++;
- __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
- false, 1, req->request.stream_id,
- req->request.short_not_ok,
- req->request.no_interrupt,
- req->request.is_last);
- }
- } else {
- dwc3_prepare_one_trb(dep, req, length, false, 0);
- }
+ return dwc3_prepare_last_sg(dep, req, req->request.length, 0);
}
/*
@@ -1271,10 +1251,13 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
* The function goes through the requests list and sets up TRBs for the
* transfers. The function returns once there are no more TRBs available or
* it runs out of requests.
+ *
+ * Returns the number of TRBs prepared or negative errno.
*/
-static void dwc3_prepare_trbs(struct dwc3_ep *dep)
+static int dwc3_prepare_trbs(struct dwc3_ep *dep)
{
struct dwc3_request *req, *n;
+ int ret = 0;
BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
@@ -1289,11 +1272,14 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
* break things.
*/
list_for_each_entry(req, &dep->started_list, list) {
- if (req->num_pending_sgs > 0)
- dwc3_prepare_one_trb_sg(dep, req);
+ if (req->num_pending_sgs > 0) {
+ ret = dwc3_prepare_trbs_sg(dep, req);
+ if (!ret || req->num_pending_sgs)
+ return ret;
+ }
if (!dwc3_calc_trbs_left(dep))
- return;
+ return ret;
/*
* Don't prepare beyond a transfer. In DWC_usb32, its transfer
@@ -1301,30 +1287,32 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
* active transfer instead of stopping.
*/
if (dep->stream_capable && req->request.is_last)
- return;
+ return ret;
}
list_for_each_entry_safe(req, n, &dep->pending_list, list) {
struct dwc3 *dwc = dep->dwc;
- int ret;
ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
dep->direction);
if (ret)
- return;
+ return ret;
req->sg = req->request.sg;
req->start_sg = req->sg;
req->num_queued_sgs = 0;
req->num_pending_sgs = req->request.num_mapped_sgs;
- if (req->num_pending_sgs > 0)
- dwc3_prepare_one_trb_sg(dep, req);
- else
- dwc3_prepare_one_trb_linear(dep, req);
+ if (req->num_pending_sgs > 0) {
+ ret = dwc3_prepare_trbs_sg(dep, req);
+ if (req->num_pending_sgs)
+ return ret;
+ } else {
+ ret = dwc3_prepare_trbs_linear(dep, req);
+ }
- if (!dwc3_calc_trbs_left(dep))
- return;
+ if (!ret || !dwc3_calc_trbs_left(dep))
+ return ret;
/*
* Don't prepare beyond a transfer. In DWC_usb32, its transfer
@@ -1332,8 +1320,10 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
* active transfer instead of stopping.
*/
if (dep->stream_capable && req->request.is_last)
- return;
+ return ret;
}
+
+ return ret;
}
static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
@@ -1346,12 +1336,24 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
int ret;
u32 cmd;
- if (!dwc3_calc_trbs_left(dep))
- return 0;
+ /*
+ * Note that it's normal to have no new TRBs prepared (i.e. ret == 0).
+ * This happens when we need to stop and restart a transfer such as in
+ * the case of reinitiating a stream or retrying an isoc transfer.
+ */
+ ret = dwc3_prepare_trbs(dep);
+ if (ret < 0)
+ return ret;
starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED);
- dwc3_prepare_trbs(dep);
+ /*
+ * If there's no new TRB prepared and we don't need to restart a
+ * transfer, there's no need to update the transfer.
+ */
+ if (!ret && !starting)
+ return ret;
+
req = next_request(&dep->started_list);
if (!req) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
@@ -1539,12 +1541,12 @@ static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
if (!dwc->dis_start_transfer_quirk &&
(DWC3_VER_IS_PRIOR(DWC31, 170A) ||
DWC3_VER_TYPE_IS_WITHIN(DWC31, 170A, EA01, EA06))) {
- if (dwc->gadget.speed <= USB_SPEED_HIGH && dep->direction)
+ if (dwc->gadget->speed <= USB_SPEED_HIGH && dep->direction)
return dwc3_gadget_start_isoc_quirk(dep);
}
if (desc->bInterval <= 14 &&
- dwc->gadget.speed >= USB_SPEED_HIGH) {
+ dwc->gadget->speed >= USB_SPEED_HIGH) {
u32 frame = __dwc3_gadget_get_frame(dwc);
bool rollover = frame <
(dep->frame_number & DWC3_FRNUMBER_MASK);
@@ -1600,7 +1602,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
- if (!dep->endpoint.desc) {
+ if (!dep->endpoint.desc || !dwc->pullups_connected) {
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
dep->name);
return -ESHUTDOWN;
@@ -1628,8 +1630,13 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)
return 0;
- /* Start the transfer only after the END_TRANSFER is completed */
- if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
+ /*
+ * Start the transfer only after the END_TRANSFER is completed
+ * and endpoint STALL is cleared.
+ */
+ if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
+ (dep->flags & DWC3_EP_WEDGE) ||
+ (dep->flags & DWC3_EP_STALL)) {
dep->flags |= DWC3_EP_DELAY_START;
return 0;
}
@@ -1648,9 +1655,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return 0;
if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
- if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) {
+ if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
return __dwc3_gadget_start_isoc(dep);
- }
}
}
@@ -1788,8 +1794,8 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
if (value) {
struct dwc3_trb *trb;
- unsigned transfer_in_flight;
- unsigned started;
+ unsigned int transfer_in_flight;
+ unsigned int started;
if (dep->number > 1)
trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
@@ -1822,6 +1828,18 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
return 0;
}
+ dwc3_stop_active_transfer(dep, true, true);
+
+ list_for_each_entry_safe(req, tmp, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(req);
+
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
+ dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
+ return 0;
+ }
+
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+
ret = dwc3_send_clear_stall_ep_cmd(dep);
if (ret) {
dev_err(dwc->dev, "failed to clear STALL on %s\n",
@@ -1831,18 +1849,11 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
- dwc3_stop_active_transfer(dep, true, true);
-
- list_for_each_entry_safe(req, tmp, &dep->started_list, list)
- dwc3_gadget_move_cancelled_request(req);
-
- list_for_each_entry_safe(req, tmp, &dep->pending_list, list)
- dwc3_gadget_move_cancelled_request(req);
+ if ((dep->flags & DWC3_EP_DELAY_START) &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ __dwc3_gadget_kick_transfer(dep);
- if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) {
- dep->flags &= ~DWC3_EP_DELAY_START;
- dwc3_gadget_ep_cleanup_cancelled_requests(dep);
- }
+ dep->flags &= ~DWC3_EP_DELAY_START;
}
return ret;
@@ -2010,6 +2021,21 @@ static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
return 0;
}
+static void dwc3_stop_active_transfers(struct dwc3 *dwc)
+{
+ u32 epnum;
+
+ for (epnum = 2; epnum < dwc->num_eps; epnum++) {
+ struct dwc3_ep *dep;
+
+ dep = dwc->eps[epnum];
+ if (!dep)
+ continue;
+
+ dwc3_remove_requests(dwc, dep);
+ }
+}
+
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
@@ -2055,6 +2081,9 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
return 0;
}
+static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
+static void __dwc3_gadget_stop(struct dwc3 *dwc);
+
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
struct dwc3 *dwc = gadget_to_dwc(g);
@@ -2078,7 +2107,46 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
}
}
+ /*
+ * Synchronize any pending event handling before executing the controller
+ * halt routine.
+ */
+ if (!is_on) {
+ dwc3_gadget_disable_irq(dwc);
+ synchronize_irq(dwc->irq_gadget);
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
+
+ if (!is_on) {
+ u32 count;
+
+ /*
+ * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
+ * Section 4.1.8 Table 4-7, it states that for a device-initiated
+ * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
+ * command for any active transfers" before clearing the RunStop
+ * bit.
+ */
+ dwc3_stop_active_transfers(dwc);
+ __dwc3_gadget_stop(dwc);
+
+ /*
+ * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
+ * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
+ * "software needs to acknowledge the events that are generated
+ * (by writing to GEVNTCOUNTn) while it is waiting for this bit
+ * to be set to '1'."
+ */
+ count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
+ count &= DWC3_GEVNTCOUNT_MASK;
+ if (count > 0) {
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
+ dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
+ dwc->ev_buf->length;
+ }
+ }
+
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -2244,7 +2312,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
spin_lock_irqsave(&dwc->lock, flags);
if (dwc->gadget_driver) {
dev_err(dwc->dev, "%s is already bound to %s\n",
- dwc->gadget.name,
+ dwc->gadget->name,
dwc->gadget_driver->driver.name);
ret = -EBUSY;
goto err1;
@@ -2416,7 +2484,7 @@ static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep)
dep->endpoint.maxburst = 1;
dep->endpoint.ops = &dwc3_gadget_ep0_ops;
if (!dep->direction)
- dwc->gadget.ep0 = &dep->endpoint;
+ dwc->gadget->ep0 = &dep->endpoint;
dep->endpoint.caps.type_control = true;
@@ -2459,10 +2527,10 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
usb_ep_set_maxpacket_limit(&dep->endpoint, size);
- dep->endpoint.max_streams = 15;
+ dep->endpoint.max_streams = 16;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
- &dwc->gadget.ep_list);
+ &dwc->gadget->ep_list);
dep->endpoint.caps.type_iso = true;
dep->endpoint.caps.type_bulk = true;
dep->endpoint.caps.type_int = true;
@@ -2508,10 +2576,10 @@ static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
size /= 3;
usb_ep_set_maxpacket_limit(&dep->endpoint, size);
- dep->endpoint.max_streams = 15;
+ dep->endpoint.max_streams = 16;
dep->endpoint.ops = &dwc3_gadget_ep_ops;
list_add_tail(&dep->endpoint.ep_list,
- &dwc->gadget.ep_list);
+ &dwc->gadget->ep_list);
dep->endpoint.caps.type_iso = true;
dep->endpoint.caps.type_bulk = true;
dep->endpoint.caps.type_int = true;
@@ -2572,7 +2640,7 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
{
u8 epnum;
- INIT_LIST_HEAD(&dwc->gadget.ep_list);
+ INIT_LIST_HEAD(&dwc->gadget->ep_list);
for (epnum = 0; epnum < total; epnum++) {
int ret;
@@ -2652,12 +2720,12 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
}
/*
- * If we're dealing with unaligned size OUT transfer, we will be left
- * with one TRB pending in the ring. We need to manually clear HWO bit
- * from that TRB.
+ * We use bounce buffer for requests that needs extra TRB or OUT ZLP. If
+ * this TRB points to the bounce buffer address, it's a MPS alignment
+ * TRB. Don't add it to req->remaining calculation.
*/
-
- if (req->needs_extra_trb && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) {
+ if (trb->bpl == lower_32_bits(dep->dwc->bounce_addr) &&
+ trb->bph == upper_32_bits(dep->dwc->bounce_addr)) {
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
return 1;
}
@@ -2732,26 +2800,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
status);
- if (req->needs_extra_trb) {
- unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ req->request.actual = req->request.length - req->remaining;
+
+ if (!dwc3_gadget_ep_request_completed(req))
+ goto out;
+ if (req->needs_extra_trb) {
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
status);
-
- /* Reclaim MPS padding TRB for ZLP */
- if (!req->direction && req->request.zero && req->request.length &&
- !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- (IS_ALIGNED(req->request.length, maxp)))
- ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status);
-
req->needs_extra_trb = false;
}
- req->request.actual = req->request.length - req->remaining;
-
- if (!dwc3_gadget_ep_request_completed(req))
- goto out;
-
dwc3_gadget_giveback(dep, req, status);
out:
@@ -2896,6 +2955,43 @@ static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
(void) __dwc3_gadget_start_isoc(dep);
}
+static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ u8 cmd = DEPEVT_PARAMETER_CMD(event->parameters);
+
+ if (cmd != DWC3_DEPCMD_ENDTRANSFER)
+ return;
+
+ dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+
+ if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) {
+ struct dwc3 *dwc = dep->dwc;
+
+ dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL;
+ if (dwc3_send_clear_stall_ep_cmd(dep)) {
+ struct usb_ep *ep0 = &dwc->eps[0]->endpoint;
+
+ dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name);
+ if (dwc->delayed_status)
+ __dwc3_gadget_ep0_set_halt(ep0, 1);
+ return;
+ }
+
+ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ if (dwc->delayed_status)
+ dwc3_ep0_send_delayed_status(dwc);
+ }
+
+ if ((dep->flags & DWC3_EP_DELAY_START) &&
+ !usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ __dwc3_gadget_kick_transfer(dep);
+
+ dep->flags &= ~DWC3_EP_DELAY_START;
+}
+
static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
@@ -2965,7 +3061,6 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
{
struct dwc3_ep *dep;
u8 epnum = event->endpoint_number;
- u8 cmd;
dep = dwc->eps[epnum];
@@ -2991,18 +3086,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dwc3_gadget_endpoint_transfer_not_ready(dep, event);
break;
case DWC3_DEPEVT_EPCMDCMPLT:
- cmd = DEPEVT_PARAMETER_CMD(event->parameters);
-
- if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
- dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
- dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
- dwc3_gadget_ep_cleanup_cancelled_requests(dep);
- if ((dep->flags & DWC3_EP_DELAY_START) &&
- !usb_endpoint_xfer_isoc(dep->endpoint.desc))
- __dwc3_gadget_kick_transfer(dep);
-
- dep->flags &= ~DWC3_EP_DELAY_START;
- }
+ dwc3_gadget_endpoint_command_complete(dep, event);
break;
case DWC3_DEPEVT_XFERCOMPLETE:
dwc3_gadget_endpoint_transfer_complete(dep, event);
@@ -3019,7 +3103,7 @@ static void dwc3_disconnect_gadget(struct dwc3 *dwc)
{
if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
spin_unlock(&dwc->lock);
- dwc->gadget_driver->disconnect(&dwc->gadget);
+ dwc->gadget_driver->disconnect(dwc->gadget);
spin_lock(&dwc->lock);
}
}
@@ -3028,7 +3112,7 @@ static void dwc3_suspend_gadget(struct dwc3 *dwc)
{
if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
spin_unlock(&dwc->lock);
- dwc->gadget_driver->suspend(&dwc->gadget);
+ dwc->gadget_driver->suspend(dwc->gadget);
spin_lock(&dwc->lock);
}
}
@@ -3037,7 +3121,7 @@ static void dwc3_resume_gadget(struct dwc3 *dwc)
{
if (dwc->gadget_driver && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
- dwc->gadget_driver->resume(&dwc->gadget);
+ dwc->gadget_driver->resume(dwc->gadget);
spin_lock(&dwc->lock);
}
}
@@ -3047,9 +3131,9 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
if (!dwc->gadget_driver)
return;
- if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
+ if (dwc->gadget->speed != USB_SPEED_UNKNOWN) {
spin_unlock(&dwc->lock);
- usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
+ usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
spin_lock(&dwc->lock);
}
}
@@ -3150,9 +3234,9 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc3_disconnect_gadget(dwc);
- dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ dwc->gadget->speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
- usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
+ usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
dwc->connected = false;
}
@@ -3195,6 +3279,13 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
}
dwc3_reset_gadget(dwc);
+ /*
+ * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
+ * Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
+ * needs to ensure that it sends "a DEPENDXFER command for any active
+ * transfers."
+ */
+ dwc3_stop_active_transfers(dwc);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_TSTCTRL_MASK;
@@ -3231,8 +3322,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
switch (speed) {
case DWC3_DSTS_SUPERSPEED_PLUS:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
- dwc->gadget.ep0->maxpacket = 512;
- dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
+ dwc->gadget->ep0->maxpacket = 512;
+ dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
break;
case DWC3_DSTS_SUPERSPEED:
/*
@@ -3252,27 +3343,27 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_gadget_reset_interrupt(dwc);
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
- dwc->gadget.ep0->maxpacket = 512;
- dwc->gadget.speed = USB_SPEED_SUPER;
+ dwc->gadget->ep0->maxpacket = 512;
+ dwc->gadget->speed = USB_SPEED_SUPER;
break;
case DWC3_DSTS_HIGHSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
- dwc->gadget.ep0->maxpacket = 64;
- dwc->gadget.speed = USB_SPEED_HIGH;
+ dwc->gadget->ep0->maxpacket = 64;
+ dwc->gadget->speed = USB_SPEED_HIGH;
break;
case DWC3_DSTS_FULLSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
- dwc->gadget.ep0->maxpacket = 64;
- dwc->gadget.speed = USB_SPEED_FULL;
+ dwc->gadget->ep0->maxpacket = 64;
+ dwc->gadget->speed = USB_SPEED_FULL;
break;
case DWC3_DSTS_LOWSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
- dwc->gadget.ep0->maxpacket = 8;
- dwc->gadget.speed = USB_SPEED_LOW;
+ dwc->gadget->ep0->maxpacket = 8;
+ dwc->gadget->speed = USB_SPEED_LOW;
break;
}
- dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
+ dwc->eps[1]->endpoint.maxpacket = dwc->gadget->ep0->maxpacket;
/* Enable USB2 LPM Capability */
@@ -3340,7 +3431,7 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
if (dwc->gadget_driver && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
- dwc->gadget_driver->resume(&dwc->gadget);
+ dwc->gadget_driver->resume(dwc->gadget);
spin_lock(&dwc->lock);
}
}
@@ -3511,7 +3602,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
* Ignore suspend event until the gadget enters into
* USB_STATE_CONFIGURED state.
*/
- if (dwc->gadget.state >= USB_STATE_CONFIGURED)
+ if (dwc->gadget->state >= USB_STATE_CONFIGURED)
dwc3_gadget_suspend_interrupt(dwc,
event->event_info);
}
@@ -3686,6 +3777,13 @@ out:
return irq;
}
+static void dwc_gadget_release(struct device *dev)
+{
+ struct usb_gadget *gadget = container_of(dev, struct usb_gadget, dev);
+
+ kfree(gadget);
+}
+
/**
* dwc3_gadget_init - initializes gadget related registers
* @dwc: pointer to our controller context structure
@@ -3696,6 +3794,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
{
int ret;
int irq;
+ struct device *dev;
irq = dwc3_gadget_get_irq(dwc);
if (irq < 0) {
@@ -3728,12 +3827,21 @@ int dwc3_gadget_init(struct dwc3 *dwc)
}
init_completion(&dwc->ep0_in_setup);
+ dwc->gadget = kzalloc(sizeof(struct usb_gadget), GFP_KERNEL);
+ if (!dwc->gadget) {
+ ret = -ENOMEM;
+ goto err3;
+ }
- dwc->gadget.ops = &dwc3_gadget_ops;
- dwc->gadget.speed = USB_SPEED_UNKNOWN;
- dwc->gadget.sg_supported = true;
- dwc->gadget.name = "dwc3-gadget";
- dwc->gadget.lpm_capable = true;
+
+ usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
+ dev = &dwc->gadget->dev;
+ dev->platform_data = dwc;
+ dwc->gadget->ops = &dwc3_gadget_ops;
+ dwc->gadget->speed = USB_SPEED_UNKNOWN;
+ dwc->gadget->sg_supported = true;
+ dwc->gadget->name = "dwc3-gadget";
+ dwc->gadget->lpm_capable = true;
/*
* FIXME We might be setting max_speed to <SUPER, however versions
@@ -3756,7 +3864,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dev_info(dwc->dev, "changing max_speed on rev %08x\n",
dwc->revision);
- dwc->gadget.max_speed = dwc->maximum_speed;
+ dwc->gadget->max_speed = dwc->maximum_speed;
/*
* REVISIT: Here we should clear all pending IRQs to be
@@ -3765,21 +3873,22 @@ int dwc3_gadget_init(struct dwc3 *dwc)
ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps);
if (ret)
- goto err3;
+ goto err4;
- ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
+ ret = usb_add_gadget(dwc->gadget);
if (ret) {
- dev_err(dwc->dev, "failed to register udc\n");
- goto err4;
+ dev_err(dwc->dev, "failed to add gadget\n");
+ goto err5;
}
- dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
+ dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
return 0;
-err4:
+err5:
dwc3_gadget_free_endpoints(dwc);
-
+err4:
+ usb_put_gadget(dwc->gadget);
err3:
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
@@ -3799,7 +3908,7 @@ err0:
void dwc3_gadget_exit(struct dwc3 *dwc)
{
- usb_del_gadget_udc(&dwc->gadget);
+ usb_del_gadget_udc(dwc->gadget);
dwc3_gadget_free_endpoints(dwc);
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index bd85eb7fa9ef..0cd281949970 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -17,7 +17,7 @@
struct dwc3;
#define to_dwc3_ep(ep) (container_of(ep, struct dwc3_ep, endpoint))
-#define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget))
+#define gadget_to_dwc(g) (dev_get_platdata(&g->dev))
/* DEPCFG parameter 1 */
#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0)
@@ -113,6 +113,7 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags);
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+void dwc3_ep0_send_delayed_status(struct dwc3 *dwc);
/**
* dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index da1be01637c8..97f4f1125a41 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -104,8 +104,8 @@ DECLARE_EVENT_CLASS(dwc3_log_request,
TP_STRUCT__entry(
__string(name, req->dep->name)
__field(struct dwc3_request *, req)
- __field(unsigned, actual)
- __field(unsigned, length)
+ __field(unsigned int, actual)
+ __field(unsigned int, length)
__field(int, status)
__field(int, zero)
__field(int, short_not_ok)
@@ -246,6 +246,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__entry->dequeue, __entry->bph, __entry->bpl,
({char *s;
int pcm = ((__entry->size >> 24) & 3) + 1;
+
switch (__entry->type) {
case USB_ENDPOINT_XFER_INT:
case USB_ENDPOINT_XFER_ISOC:
@@ -291,12 +292,12 @@ DECLARE_EVENT_CLASS(dwc3_log_ep,
TP_ARGS(dep),
TP_STRUCT__entry(
__string(name, dep->name)
- __field(unsigned, maxpacket)
- __field(unsigned, maxpacket_limit)
- __field(unsigned, max_streams)
- __field(unsigned, maxburst)
- __field(unsigned, flags)
- __field(unsigned, direction)
+ __field(unsigned int, maxpacket)
+ __field(unsigned int, maxpacket_limit)
+ __field(unsigned int, max_streams)
+ __field(unsigned int, maxburst)
+ __field(unsigned int, flags)
+ __field(unsigned int, direction)
__field(u8, trb_enqueue)
__field(u8, trb_dequeue)
),
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index e6e6176386a4..aa213c9815f6 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -19,7 +19,7 @@
static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
{
- unsigned count = 1000;
+ unsigned int count = 1000;
u32 reg;
while (count--) {
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index b075dbfad730..45b42d8f6453 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -15,6 +15,7 @@
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
#include <linux/usb/ch9.h>
@@ -161,17 +162,11 @@ static inline u32 dbgp_pid_read_update(u32 x, u32 tok)
static int dbgp_wait_until_complete(void)
{
u32 ctrl;
- int loop = DBGP_TIMEOUT;
-
- do {
- ctrl = readl(&ehci_debug->control);
- /* Stop when the transaction is finished */
- if (ctrl & DBGP_DONE)
- break;
- udelay(1);
- } while (--loop > 0);
+ int ret;
- if (!loop)
+ ret = readl_poll_timeout_atomic(&ehci_debug->control, ctrl,
+ (ctrl & DBGP_DONE), 1, DBGP_TIMEOUT);
+ if (ret)
return -DBGP_TIMEOUT;
/*
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index c0507767a8e3..be4ecbabdd58 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -14,6 +14,7 @@
#include <linux/pci_ids.h>
#include <linux/memblock.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
#include <linux/bcd.h>
@@ -135,16 +136,9 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay)
{
u32 result;
- do {
- result = readl(ptr);
- result &= mask;
- if (result == done)
- return 0;
- udelay(delay);
- wait -= delay;
- } while (wait > 0);
-
- return -ETIMEDOUT;
+ return readl_poll_timeout_atomic(ptr, result,
+ ((result & mask) == done),
+ delay, wait);
}
static void __init xdbc_bios_handoff(void)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 05b176c82cc5..c6d455f2bb92 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1245,7 +1245,7 @@ int usb_string_id(struct usb_composite_dev *cdev)
EXPORT_SYMBOL_GPL(usb_string_id);
/**
- * usb_string_ids() - allocate unused string IDs in batch
+ * usb_string_ids_tab() - allocate unused string IDs in batch
* @cdev: the device whose string descriptor IDs are being allocated
* @str: an array of usb_string objects to assign numbers to
* Context: single threaded during gadget setup
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 200596ea9557..46647bfac2ef 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -425,9 +425,11 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* we know alt == 0, so this is an activation or a reset */
if (intf == acm->ctrl_id) {
- dev_vdbg(&cdev->gadget->dev,
- "reset acm control interface %d\n", intf);
- usb_ep_disable(acm->notify);
+ if (acm->notify->enabled) {
+ dev_vdbg(&cdev->gadget->dev,
+ "reset acm control interface %d\n", intf);
+ usb_ep_disable(acm->notify);
+ }
if (!acm->notify->desc)
if (config_ep_by_speed(cdev->gadget, f, acm->notify))
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 46af0aa07e2e..85cb15734aa8 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -698,9 +698,9 @@ drop_out:
f_midi_drop_out_substreams(midi);
}
-static void f_midi_in_tasklet(unsigned long data)
+static void f_midi_in_tasklet(struct tasklet_struct *t)
{
- struct f_midi *midi = (struct f_midi *) data;
+ struct f_midi *midi = from_tasklet(midi, t, tasklet);
f_midi_transmit(midi);
}
@@ -875,7 +875,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
int status, n, jack = 1, i = 0, endpoint_descriptor_index = 0;
midi->gadget = cdev->gadget;
- tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
+ tasklet_setup(&midi->tasklet, f_midi_in_tasklet);
status = f_midi_register_card(midi);
if (status < 0)
goto fail_register;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 1f638759a953..019bea8e09cc 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -85,8 +85,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
/* peak (theoretical) bulk transfer rate in bits-per-second */
static inline unsigned ncm_bitrate(struct usb_gadget *g)
{
- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
- return 13 * 1024 * 8 * 1000 * 8;
+ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
+ return 4250000000U;
+ else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 3750000000U;
else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
return 13 * 512 * 8 * 1000 * 8;
else
@@ -376,7 +378,7 @@ static struct usb_ss_ep_comp_descriptor ss_ncm_bulk_comp_desc = {
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
- /* .bMaxBurst = 0, */
+ .bMaxBurst = 15,
/* .bmAttributes = 0, */
};
@@ -1534,7 +1536,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
fs_ncm_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
- ncm_ss_function, NULL);
+ ncm_ss_function, ncm_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 68697f596066..64a4112068fc 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/cdev.h>
+#include <linux/kref.h>
#include <asm/byteorder.h>
#include <linux/io.h>
@@ -64,7 +65,7 @@ struct printer_dev {
struct usb_gadget *gadget;
s8 interface;
struct usb_ep *in_ep, *out_ep;
-
+ struct kref kref;
struct list_head rx_reqs; /* List of free RX structs */
struct list_head rx_reqs_active; /* List of Active RX xfers */
struct list_head rx_buffers; /* List of completed xfers */
@@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget,
/*-------------------------------------------------------------------------*/
+static void printer_dev_free(struct kref *kref)
+{
+ struct printer_dev *dev = container_of(kref, struct printer_dev, kref);
+
+ kfree(dev);
+}
+
static struct usb_request *
printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags)
{
@@ -353,6 +361,7 @@ printer_open(struct inode *inode, struct file *fd)
spin_unlock_irqrestore(&dev->lock, flags);
+ kref_get(&dev->kref);
DBG(dev, "printer_open returned %x\n", ret);
return ret;
}
@@ -370,6 +379,7 @@ printer_close(struct inode *inode, struct file *fd)
dev->printer_status &= ~PRINTER_SELECTED;
spin_unlock_irqrestore(&dev->lock, flags);
+ kref_put(&dev->kref, printer_dev_free);
DBG(dev, "printer_close\n");
return 0;
@@ -1386,7 +1396,8 @@ static void gprinter_free(struct usb_function *f)
struct f_printer_opts *opts;
opts = container_of(f->fi, struct f_printer_opts, func_inst);
- kfree(dev);
+
+ kref_put(&dev->kref, printer_dev_free);
mutex_lock(&opts->lock);
--opts->refcnt;
mutex_unlock(&opts->lock);
@@ -1455,6 +1466,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
return ERR_PTR(-ENOMEM);
}
+ kref_init(&dev->kref);
++opts->refcnt;
dev->minor = opts->minor;
dev->pnp_string = opts->pnp_string;
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 184165e27908..410fa89eae8f 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -392,12 +392,12 @@ static void bot_set_alt(struct f_uas *fu)
fu->flags = USBG_IS_BOT;
- config_ep_by_speed(gadget, f, fu->ep_in);
+ config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_BBB);
ret = usb_ep_enable(fu->ep_in);
if (ret)
goto err_b_in;
- config_ep_by_speed(gadget, f, fu->ep_out);
+ config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_BBB);
ret = usb_ep_enable(fu->ep_out);
if (ret)
goto err_b_out;
@@ -852,21 +852,21 @@ static void uasp_set_alt(struct f_uas *fu)
if (gadget->speed >= USB_SPEED_SUPER)
fu->flags |= USBG_USE_STREAMS;
- config_ep_by_speed(gadget, f, fu->ep_in);
+ config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_UAS);
ret = usb_ep_enable(fu->ep_in);
if (ret)
goto err_b_in;
- config_ep_by_speed(gadget, f, fu->ep_out);
+ config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_UAS);
ret = usb_ep_enable(fu->ep_out);
if (ret)
goto err_b_out;
- config_ep_by_speed(gadget, f, fu->ep_cmd);
+ config_ep_by_speed_and_alt(gadget, f, fu->ep_cmd, USB_G_ALT_INT_UAS);
ret = usb_ep_enable(fu->ep_cmd);
if (ret)
goto err_cmd;
- config_ep_by_speed(gadget, f, fu->ep_status);
+ config_ep_by_speed_and_alt(gadget, f, fu->ep_status, USB_G_ALT_INT_UAS);
ret = usb_ep_enable(fu->ep_status);
if (ret)
goto err_status;
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 0b9712616455..44b4352a2676 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -740,20 +740,20 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
/* Initialise video. */
ret = uvcg_video_init(&uvc->video, uvc);
if (ret < 0)
- goto error;
+ goto v4l2_error;
/* Register a V4L2 device. */
ret = uvc_register_video(uvc);
if (ret < 0) {
uvcg_err(f, "failed to register video device\n");
- goto error;
+ goto v4l2_error;
}
return 0;
-error:
+v4l2_error:
v4l2_device_unregister(&uvc->v4l2_dev);
-
+error:
if (uvc->control_req)
usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
kfree(uvc->control_buf);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index c3cc6bd14e61..31ea76adcc0d 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -93,7 +93,7 @@ struct eth_dev {
static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
{
if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
- gadget->speed == USB_SPEED_SUPER))
+ gadget->speed >= USB_SPEED_SUPER))
return qmult * DEFAULT_QLEN;
else
return DEFAULT_QLEN;
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 127ecc2b4317..2caccbb6e014 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -1391,6 +1391,7 @@ void gserial_disconnect(struct gserial *gser)
if (port->port.tty)
tty_hangup(port->port.tty);
}
+ port->suspended = false;
spin_unlock_irqrestore(&port->port_lock, flags);
/* disable endpoints, aborting down any active I/O */
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index e01e366d89cd..062dfac30399 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -564,9 +564,12 @@ static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value)
return -ENODEV;
}
length = min(arg.length, event->length);
- if (copy_to_user((void __user *)value, event, sizeof(*event) + length))
+ if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) {
+ kfree(event);
return -EFAULT;
+ }
+ kfree(event);
return 0;
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
index cdf96911e4b1..be7bb64e3594 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
@@ -135,13 +135,9 @@ static irqreturn_t ast_vhub_irq(int irq, void *data)
/* Handle device interrupts */
if (istat & vhub->port_irq_mask) {
- unsigned long bitmap = istat;
- int offset = VHUB_IRQ_DEV1_BIT;
- int size = VHUB_IRQ_DEV1_BIT + vhub->max_ports;
-
- for_each_set_bit_from(offset, &bitmap, size) {
- i = offset - VHUB_IRQ_DEV1_BIT;
- ast_vhub_dev_irq(&vhub->ports[i].dev);
+ for (i = 0; i < vhub->max_ports; i++) {
+ if (istat & VHUB_DEV_IRQ(i))
+ ast_vhub_dev_irq(&vhub->ports[i].dev);
}
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index 2e5a1ef14a75..87a5dea12d3c 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -67,6 +67,9 @@
#define VHUB_IRQ_HUB_EP0_SETUP (1 << 0)
#define VHUB_IRQ_ACK_ALL 0x1ff
+/* Downstream device IRQ mask. */
+#define VHUB_DEV_IRQ(n) (VHUB_IRQ_DEVICE1 << (n))
+
/* SW reset reg */
#define VHUB_SW_RESET_EP_POOL (1 << 9)
#define VHUB_SW_RESET_DMA_CONTROLLER (1 << 8)
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index a6426dd1cfef..2b893bceea45 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1056,16 +1056,19 @@ found_ep:
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
+ ep->nr_banks = 1;
break;
case USB_ENDPOINT_XFER_ISOC:
ep->fifo_size = 1024;
- ep->nr_banks = 2;
+ if (ep->udc->ep_prealloc)
+ ep->nr_banks = 2;
break;
case USB_ENDPOINT_XFER_BULK:
ep->fifo_size = 512;
- ep->nr_banks = 1;
+ if (ep->udc->ep_prealloc)
+ ep->nr_banks = 1;
break;
case USB_ENDPOINT_XFER_INT:
@@ -1075,7 +1078,8 @@ found_ep:
else
ep->fifo_size =
roundup_pow_of_two(le16_to_cpu(desc->wMaxPacketSize));
- ep->nr_banks = 1;
+ if (ep->udc->ep_prealloc)
+ ep->nr_banks = 1;
break;
}
@@ -1091,8 +1095,6 @@ found_ep:
USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3);
ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks);
-
- ep->udc->configured_ep++;
}
return _ep;
@@ -1786,7 +1788,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
if (status & USBA_END_OF_RESET) {
struct usba_ep *ep0, *ep;
- int i, n;
+ int i;
usba_writel(udc, INT_CLR,
USBA_END_OF_RESET|USBA_END_OF_RESUME
@@ -1834,13 +1836,14 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
"ODD: EP0 configuration is invalid!\n");
/* Preallocate other endpoints */
- n = fifo_mode ? udc->num_ep : udc->configured_ep;
- for (i = 1; i < n; i++) {
+ for (i = 1; i < udc->num_ep; i++) {
ep = &udc->usba_ep[i];
- usba_ep_writel(ep, CFG, ep->ept_cfg);
- if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED))
- dev_err(&udc->pdev->dev,
- "ODD: EP%d configuration is invalid!\n", i);
+ if (ep->ep.claimed) {
+ usba_ep_writel(ep, CFG, ep->ept_cfg);
+ if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED))
+ dev_err(&udc->pdev->dev,
+ "ODD: EP%d configuration is invalid!\n", i);
+ }
}
}
@@ -2025,9 +2028,6 @@ static int atmel_usba_stop(struct usb_gadget *gadget)
if (udc->vbus_pin)
disable_irq(gpiod_to_irq(udc->vbus_pin));
- if (fifo_mode == 0)
- udc->configured_ep = 1;
-
udc->suspended = false;
usba_stop(udc);
@@ -2090,33 +2090,51 @@ static const struct usba_udc_config udc_at91sam9rl_cfg = {
.errata = &at91sam9rl_errata,
.config = ep_config_sam9,
.num_ep = ARRAY_SIZE(ep_config_sam9),
+ .ep_prealloc = true,
};
static const struct usba_udc_config udc_at91sam9g45_cfg = {
.errata = &at91sam9g45_errata,
.config = ep_config_sam9,
.num_ep = ARRAY_SIZE(ep_config_sam9),
+ .ep_prealloc = true,
};
static const struct usba_udc_config udc_sama5d3_cfg = {
.config = ep_config_sama5,
.num_ep = ARRAY_SIZE(ep_config_sama5),
+ .ep_prealloc = true,
+};
+
+static const struct usba_udc_config udc_sam9x60_cfg = {
+ .num_ep = ARRAY_SIZE(ep_config_sam9),
+ .config = ep_config_sam9,
+ .ep_prealloc = false,
};
static const struct of_device_id atmel_udc_dt_ids[] = {
{ .compatible = "atmel,at91sam9rl-udc", .data = &udc_at91sam9rl_cfg },
{ .compatible = "atmel,at91sam9g45-udc", .data = &udc_at91sam9g45_cfg },
{ .compatible = "atmel,sama5d3-udc", .data = &udc_sama5d3_cfg },
+ { .compatible = "microchip,sam9x60-udc", .data = &udc_sam9x60_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids);
+static const struct of_device_id atmel_pmc_dt_ids[] = {
+ { .compatible = "atmel,at91sam9g45-pmc" },
+ { .compatible = "atmel,at91sam9rl-pmc" },
+ { .compatible = "atmel,at91sam9x5-pmc" },
+ { /* sentinel */ }
+};
+
static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
struct usba_udc *udc)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
+ struct device_node *pp;
int i, ret;
struct usba_ep *eps, *ep;
const struct usba_udc_config *udc_config;
@@ -2126,14 +2144,19 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
return ERR_PTR(-EINVAL);
udc_config = match->data;
+ udc->ep_prealloc = udc_config->ep_prealloc;
udc->errata = udc_config->errata;
- udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
- if (IS_ERR(udc->pmc))
- udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc");
- if (IS_ERR(udc->pmc))
- udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
- if (udc->errata && IS_ERR(udc->pmc))
- return ERR_CAST(udc->pmc);
+ if (udc->errata) {
+ pp = of_find_matching_node_and_match(NULL, atmel_pmc_dt_ids,
+ NULL);
+ if (!pp)
+ return ERR_PTR(-ENODEV);
+
+ udc->pmc = syscon_node_to_regmap(pp);
+ of_node_put(pp);
+ if (IS_ERR(udc->pmc))
+ return ERR_CAST(udc->pmc);
+ }
udc->num_ep = 0;
@@ -2142,7 +2165,6 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
if (fifo_mode == 0) {
udc->num_ep = udc_config->num_ep;
- udc->configured_ep = 1;
} else {
udc->num_ep = usba_config_fifo_table(udc);
}
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index 48e332439ed5..620472f218bc 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -317,6 +317,7 @@ struct usba_udc_config {
const struct usba_udc_errata *errata;
const struct usba_ep_config *config;
const int num_ep;
+ const bool ep_prealloc;
};
struct usba_udc {
@@ -336,7 +337,6 @@ struct usba_udc {
int irq;
struct gpio_desc *vbus_pin;
int num_ep;
- int configured_ep;
struct usba_fifo_cfg *fifo_cfg;
struct clk *pclk;
struct clk *hclk;
@@ -344,6 +344,7 @@ struct usba_udc {
bool bias_pulse_needed;
bool clocked;
bool suspended;
+ bool ep_prealloc;
u16 devstatus;
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index feaec00a3c16..9cd4a70ccdd6 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -26,6 +26,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/timer.h>
+#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/workqueue.h>
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 5ff36525044e..0bef6b3f049b 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -484,7 +484,7 @@ static void bdc_phy_exit(struct bdc *bdc)
static int bdc_probe(struct platform_device *pdev)
{
struct bdc *bdc;
- int ret = -ENOMEM;
+ int ret;
int irq;
u32 temp;
struct device *dev = &pdev->dev;
@@ -510,10 +510,9 @@ static int bdc_probe(struct platform_device *pdev)
bdc->clk = clk;
bdc->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(bdc->regs)) {
- dev_err(dev, "ioremap error\n");
- return -ENOMEM;
- }
+ if (IS_ERR(bdc->regs))
+ return PTR_ERR(bdc->regs);
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 4f82bcd31fd3..debf54205d22 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -715,6 +715,9 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
goto out;
}
+ if (!gadget->connected)
+ goto out;
+
if (gadget->deactivated) {
/*
* If gadget is deactivated we only save new state.
@@ -1164,21 +1167,18 @@ static int check_pending_gadget_drivers(struct usb_udc *udc)
}
/**
- * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list
+ * usb_initialize_gadget - initialize a gadget and its embedded struct device
* @parent: the parent device to this udc. Usually the controller driver's
* device.
- * @gadget: the gadget to be added to the list.
+ * @gadget: the gadget to be initialized.
* @release: a gadget release function.
*
* Returns zero on success, negative errno otherwise.
* Calls the gadget release function in the latter case.
*/
-int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
+void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget,
void (*release)(struct device *dev))
{
- struct usb_udc *udc;
- int ret = -ENOMEM;
-
dev_set_name(&gadget->dev, "gadget");
INIT_WORK(&gadget->work, usb_gadget_state_work);
gadget->dev.parent = parent;
@@ -1189,17 +1189,32 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
gadget->dev.release = usb_udc_nop_release;
device_initialize(&gadget->dev);
+}
+EXPORT_SYMBOL_GPL(usb_initialize_gadget);
+
+/**
+ * usb_add_gadget - adds a new gadget to the udc class driver list
+ * @gadget: the gadget to be added to the list.
+ *
+ * Returns zero on success, negative errno otherwise.
+ * Does not do a final usb_put_gadget() if an error occurs.
+ */
+int usb_add_gadget(struct usb_gadget *gadget)
+{
+ struct usb_udc *udc;
+ int ret = -ENOMEM;
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
if (!udc)
- goto err_put_gadget;
+ goto error;
device_initialize(&udc->dev);
udc->dev.release = usb_udc_release;
udc->dev.class = udc_class;
udc->dev.groups = usb_udc_attr_groups;
- udc->dev.parent = parent;
- ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
+ udc->dev.parent = gadget->dev.parent;
+ ret = dev_set_name(&udc->dev, "%s",
+ kobject_name(&gadget->dev.parent->kobj));
if (ret)
goto err_put_udc;
@@ -1242,8 +1257,30 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
err_put_udc:
put_device(&udc->dev);
- err_put_gadget:
- put_device(&gadget->dev);
+ error:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget);
+
+/**
+ * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list
+ * @parent: the parent device to this udc. Usually the controller driver's
+ * device.
+ * @gadget: the gadget to be added to the list.
+ * @release: a gadget release function.
+ *
+ * Returns zero on success, negative errno otherwise.
+ * Calls the gadget release function in the latter case.
+ */
+int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
+ void (*release)(struct device *dev))
+{
+ int ret;
+
+ usb_initialize_gadget(parent, gadget, release);
+ ret = usb_add_gadget(gadget);
+ if (ret)
+ usb_put_gadget(gadget);
return ret;
}
EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release);
@@ -1311,13 +1348,14 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
}
/**
- * usb_del_gadget_udc - deletes @udc from udc_list
+ * usb_del_gadget - deletes @udc from udc_list
* @gadget: the gadget to be removed.
*
- * This, will call usb_gadget_unregister_driver() if
+ * This will call usb_gadget_unregister_driver() if
* the @udc is still busy.
+ * It will not do a final usb_put_gadget().
*/
-void usb_del_gadget_udc(struct usb_gadget *gadget)
+void usb_del_gadget(struct usb_gadget *gadget)
{
struct usb_udc *udc = gadget->udc;
@@ -1340,8 +1378,20 @@ void usb_del_gadget_udc(struct usb_gadget *gadget)
kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
flush_work(&gadget->work);
device_unregister(&udc->dev);
- device_unregister(&gadget->dev);
- memset(&gadget->dev, 0x00, sizeof(gadget->dev));
+ device_del(&gadget->dev);
+}
+EXPORT_SYMBOL_GPL(usb_del_gadget);
+
+/**
+ * usb_del_gadget_udc - deletes @udc from udc_list
+ * @gadget: the gadget to be removed.
+ *
+ * Calls usb_del_gadget() and does a final usb_put_gadget().
+ */
+void usb_del_gadget_udc(struct usb_gadget *gadget)
+{
+ usb_del_gadget(gadget);
+ usb_put_gadget(gadget);
}
EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 2707be628298..fa66449b3907 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -923,9 +923,9 @@ static int qe_ep_rxframe_handle(struct qe_ep *ep)
return 0;
}
-static void ep_rx_tasklet(unsigned long data)
+static void ep_rx_tasklet(struct tasklet_struct *t)
{
- struct qe_udc *udc = (struct qe_udc *)data;
+ struct qe_udc *udc = from_tasklet(udc, t, rx_tasklet);
struct qe_ep *ep;
struct qe_frame *pframe;
struct qe_bd __iomem *bd;
@@ -2553,8 +2553,7 @@ static int qe_udc_probe(struct platform_device *ofdev)
DMA_TO_DEVICE);
}
- tasklet_init(&udc->rx_tasklet, ep_rx_tasklet,
- (unsigned long)udc);
+ tasklet_setup(&udc->rx_tasklet, ep_rx_tasklet);
/* request irq and disable DR */
udc->usb_irq = irq_of_parse_and_map(np, 0);
if (!udc->usb_irq) {
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index a6f7b2594c09..ad6ff9c4188e 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1051,7 +1051,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
u32 bitmask;
struct ep_queue_head *qh;
- if (!_ep || _ep->desc || !(_ep->desc->bEndpointAddress&0xF))
+ if (!_ep || !_ep->desc || !(_ep->desc->bEndpointAddress&0xF))
return -ENODEV;
ep = container_of(_ep, struct fsl_ep, ep);
@@ -2061,7 +2061,7 @@ static int fsl_proc_read(struct seq_file *m, void *v)
"Sleep Enable: %d SOF Received Enable: %d "
"Reset Enable: %d\n"
"System Error Enable: %d "
- "Port Change Dectected Enable: %d\n"
+ "Port Change Detected Enable: %d\n"
"USB Error Intr Enable: %d USB Intr Enable: %d\n\n",
(tmp_reg & USB_INTR_DEVICE_SUSPEND) ? 1 : 0,
(tmp_reg & USB_INTR_SOF_EN) ? 1 : 0,
@@ -2439,11 +2439,12 @@ static int fsl_udc_probe(struct platform_device *pdev)
/* DEN is bidirectional ep number, max_ep doubles the number */
udc_controller->max_ep = (dccparams & DCCPARAMS_DEN_MASK) * 2;
- udc_controller->irq = platform_get_irq(pdev, 0);
- if (udc_controller->irq <= 0) {
- ret = udc_controller->irq ? : -ENODEV;
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ ret = ret ? : -ENODEV;
goto err_iounmap;
}
+ udc_controller->irq = ret;
ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED,
driver_name, udc_controller);
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index 25c1d6ab5adb..3e1267d38774 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1760,6 +1760,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
+ pci_set_drvdata(pdev, dev);
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
@@ -1793,7 +1794,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev->regs = (struct goku_udc_regs __iomem *) base;
- pci_set_drvdata(pdev, dev);
INFO(dev, "%s\n", driver_desc);
INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index e8a4637a9a17..3f1c62adce4b 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -495,7 +495,7 @@ static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep)
}
}
-static int proc_udc_show(struct seq_file *s, void *unused)
+static int udc_show(struct seq_file *s, void *unused)
{
struct lpc32xx_udc *udc = s->private;
struct lpc32xx_ep *ep;
@@ -524,22 +524,11 @@ static int proc_udc_show(struct seq_file *s, void *unused)
return 0;
}
-static int proc_udc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_udc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations proc_ops = {
- .owner = THIS_MODULE,
- .open = proc_udc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(udc);
static void create_debug_file(struct lpc32xx_udc *udc)
{
- udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &proc_ops);
+ udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &udc_fops);
}
static void remove_debug_file(struct lpc32xx_udc *udc)
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 44d1ea2307bb..23a735641c3d 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -9,7 +9,6 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
-#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -2196,7 +2195,8 @@ static int net2272_present(struct net2272 *dev)
static void
net2272_gadget_release(struct device *_dev)
{
- struct net2272 *dev = dev_get_drvdata(_dev);
+ struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev);
+
kfree(dev);
}
@@ -2205,7 +2205,8 @@ net2272_gadget_release(struct device *_dev)
static void
net2272_remove(struct net2272 *dev)
{
- usb_del_gadget_udc(&dev->gadget);
+ if (dev->added)
+ usb_del_gadget(&dev->gadget);
free_irq(dev->irq, dev);
iounmap(dev->base_addr);
device_remove_file(dev->dev, &dev_attr_registers);
@@ -2235,6 +2236,7 @@ static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
/* the "gadget" abstracts/virtualizes the controller */
ret->gadget.name = driver_name;
+ usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release);
return ret;
}
@@ -2273,10 +2275,10 @@ net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
if (ret)
goto err_irq;
- ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget,
- net2272_gadget_release);
+ ret = usb_add_gadget(&dev->gadget);
if (ret)
goto err_add_udc;
+ dev->added = 1;
return 0;
@@ -2451,7 +2453,7 @@ net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_enable_device(pdev) < 0) {
ret = -ENODEV;
- goto err_free;
+ goto err_put;
}
pci_set_master(pdev);
@@ -2474,8 +2476,8 @@ net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_pci:
pci_disable_device(pdev);
- err_free:
- kfree(dev);
+ err_put:
+ usb_put_gadget(&dev->gadget);
return ret;
}
@@ -2536,7 +2538,7 @@ net2272_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
- kfree(dev);
+ usb_put_gadget(&dev->gadget);
}
/* Table of matching PCI IDs */
@@ -2649,7 +2651,7 @@ net2272_plat_probe(struct platform_device *pdev)
err_req:
release_mem_region(base, len);
err:
- kfree(dev);
+ usb_put_gadget(&dev->gadget);
return ret;
}
@@ -2664,7 +2666,7 @@ net2272_plat_remove(struct platform_device *pdev)
release_mem_region(pdev->resource[0].start,
resource_size(&pdev->resource[0]));
- kfree(dev);
+ usb_put_gadget(&dev->gadget);
return 0;
}
diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h
index 87d0ab9ffeeb..c669308111c2 100644
--- a/drivers/usb/gadget/udc/net2272.h
+++ b/drivers/usb/gadget/udc/net2272.h
@@ -441,6 +441,7 @@ struct net2272 {
unsigned protocol_stall:1,
softconnect:1,
wakeup:1,
+ added:1,
dma_eot_polarity:1,
dma_dack_polarity:1,
dma_dreq_polarity:1,
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 7530bd9a08c4..fc9f99fe7f37 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -52,6 +52,7 @@
#include <linux/usb/gadget.h>
#include <linux/prefetch.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
@@ -360,18 +361,16 @@ print_err:
static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
{
u32 result;
+ int ret;
- do {
- result = readl(ptr);
- if (result == ~(u32)0) /* "device unplugged" */
- return -ENODEV;
- result &= mask;
- if (result == done)
- return 0;
- udelay(1);
- usec--;
- } while (usec > 0);
- return -ETIMEDOUT;
+ ret = readl_poll_timeout_atomic(ptr, result,
+ ((result & mask) == done ||
+ result == U32_MAX),
+ 1, usec);
+ if (result == U32_MAX) /* device unplugged */
+ return -ENODEV;
+
+ return ret;
}
static const struct usb_ep_ops net2280_ep_ops;
@@ -3561,7 +3560,7 @@ static irqreturn_t net2280_irq(int irq, void *_dev)
static void gadget_release(struct device *_dev)
{
- struct net2280 *dev = dev_get_drvdata(_dev);
+ struct net2280 *dev = container_of(_dev, struct net2280, gadget.dev);
kfree(dev);
}
@@ -3572,7 +3571,8 @@ static void net2280_remove(struct pci_dev *pdev)
{
struct net2280 *dev = pci_get_drvdata(pdev);
- usb_del_gadget_udc(&dev->gadget);
+ if (dev->added)
+ usb_del_gadget(&dev->gadget);
BUG_ON(dev->driver);
@@ -3603,6 +3603,7 @@ static void net2280_remove(struct pci_dev *pdev)
device_remove_file(&pdev->dev, &dev_attr_registers);
ep_info(dev, "unbind\n");
+ usb_put_gadget(&dev->gadget);
}
/* wrap this driver around the specified device, but
@@ -3624,6 +3625,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
pci_set_drvdata(pdev, dev);
+ usb_initialize_gadget(&pdev->dev, &dev->gadget, gadget_release);
spin_lock_init(&dev->lock);
dev->quirks = id->driver_data;
dev->pdev = pdev;
@@ -3774,10 +3776,10 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (retval)
goto done;
- retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
- gadget_release);
+ retval = usb_add_gadget(&dev->gadget);
if (retval)
goto done;
+ dev->added = 1;
return 0;
done:
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 85d3ca1698ba..7da3dc1e9729 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -156,6 +156,7 @@ struct net2280 {
softconnect : 1,
got_irq : 1,
region:1,
+ added:1,
u1_enable:1,
u2_enable:1,
ltm_enable:1,
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 8afc31d94b0e..a3c1fc924268 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -12,12 +12,9 @@
#include <linux/interrupt.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
-/* GPIO port for VBUS detecting */
-static int vbus_gpio_port = -1; /* GPIO port number (-1:Not used) */
-
#define PCH_VBUS_PERIOD 3000 /* VBUS polling period (msec) */
#define PCH_VBUS_INTERVAL 10 /* VBUS polling interval (msec) */
@@ -301,13 +298,13 @@ struct pch_udc_ep {
/**
* struct pch_vbus_gpio_data - Structure holding GPIO informaton
* for detecting VBUS
- * @port: gpio port number
+ * @port: gpio descriptor for the VBUS GPIO
* @intr: gpio interrupt number
* @irq_work_fall: Structure for WorkQueue
* @irq_work_rise: Structure for WorkQueue
*/
struct pch_vbus_gpio_data {
- int port;
+ struct gpio_desc *port;
int intr;
struct work_struct irq_work_fall;
struct work_struct irq_work_rise;
@@ -1254,7 +1251,7 @@ static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
int vbus = 0;
if (dev->vbus_gpio.port)
- vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0;
+ vbus = gpiod_get_value(dev->vbus_gpio.port) ? 1 : 0;
else
vbus = -1;
@@ -1356,42 +1353,30 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
/**
* pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
* @dev: Reference to the driver structure
- * @vbus_gpio_port: Number of GPIO port to detect gpio
*
* Return codes:
* 0: Success
* -EINVAL: GPIO port is invalid or can't be initialized.
*/
-static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port)
+static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
{
int err;
int irq_num = 0;
+ struct gpio_desc *gpiod;
- dev->vbus_gpio.port = 0;
+ dev->vbus_gpio.port = NULL;
dev->vbus_gpio.intr = 0;
- if (vbus_gpio_port <= -1)
- return -EINVAL;
-
- err = gpio_is_valid(vbus_gpio_port);
- if (!err) {
- pr_err("%s: gpio port %d is invalid\n",
- __func__, vbus_gpio_port);
- return -EINVAL;
- }
-
- err = gpio_request(vbus_gpio_port, "pch_vbus");
- if (err) {
- pr_err("%s: can't request gpio port %d, err: %d\n",
- __func__, vbus_gpio_port, err);
- return -EINVAL;
- }
+ /* Retrieve the GPIO line from the USB gadget device */
+ gpiod = devm_gpiod_get(dev->gadget.dev.parent, NULL, GPIOD_IN);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ gpiod_set_consumer_name(gpiod, "pch_vbus");
- dev->vbus_gpio.port = vbus_gpio_port;
- gpio_direction_input(vbus_gpio_port);
+ dev->vbus_gpio.port = gpiod;
INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
- irq_num = gpio_to_irq(vbus_gpio_port);
+ irq_num = gpiod_to_irq(gpiod);
if (irq_num > 0) {
irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
@@ -1417,9 +1402,6 @@ static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
{
if (dev->vbus_gpio.intr)
free_irq(dev->vbus_gpio.intr, dev);
-
- if (dev->vbus_gpio.port)
- gpio_free(dev->vbus_gpio.port);
}
/**
@@ -2894,7 +2876,7 @@ static int pch_udc_pcd_init(struct pch_udc_dev *dev)
{
pch_udc_init(dev);
pch_udc_pcd_reinit(dev);
- pch_vbus_gpio_init(dev, vbus_gpio_port);
+ pch_vbus_gpio_init(dev);
return 0;
}
@@ -3096,6 +3078,13 @@ static int pch_udc_probe(struct pci_dev *pdev,
dev->base_addr = pcim_iomap_table(pdev)[bar];
+ /*
+ * FIXME: add a GPIO descriptor table to pdev.dev using
+ * gpiod_add_descriptor_table() from <linux/gpio/machine.h> based on
+ * the PCI subsystem ID. The system-dependent GPIO is necessary for
+ * VBUS operation.
+ */
+
/* initialize the hardware */
if (pch_udc_pcd_init(dev))
return -ENODEV;
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index aaca1b0a2f59..7bd5182ce3ef 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -30,8 +30,6 @@
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
-#include <mach/regs-s3c2443-clock.h>
-
#define S3C_HSUDC_REG(x) (x)
/* Non-Indexed Registers */
@@ -186,53 +184,6 @@ static inline void __orr32(void __iomem *ptr, u32 val)
writel(readl(ptr) | val, ptr);
}
-static void s3c_hsudc_init_phy(void)
-{
- u32 cfg;
-
- cfg = readl(S3C2443_PWRCFG) | S3C2443_PWRCFG_USBPHY;
- writel(cfg, S3C2443_PWRCFG);
-
- cfg = readl(S3C2443_URSTCON);
- cfg |= (S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST);
- writel(cfg, S3C2443_URSTCON);
- mdelay(1);
-
- cfg = readl(S3C2443_URSTCON);
- cfg &= ~(S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST);
- writel(cfg, S3C2443_URSTCON);
-
- cfg = readl(S3C2443_PHYCTRL);
- cfg &= ~(S3C2443_PHYCTRL_CLKSEL | S3C2443_PHYCTRL_DSPORT);
- cfg |= (S3C2443_PHYCTRL_EXTCLK | S3C2443_PHYCTRL_PLLSEL);
- writel(cfg, S3C2443_PHYCTRL);
-
- cfg = readl(S3C2443_PHYPWR);
- cfg &= ~(S3C2443_PHYPWR_FSUSPEND | S3C2443_PHYPWR_PLL_PWRDN |
- S3C2443_PHYPWR_XO_ON | S3C2443_PHYPWR_PLL_REFCLK |
- S3C2443_PHYPWR_ANALOG_PD);
- cfg |= S3C2443_PHYPWR_COMMON_ON;
- writel(cfg, S3C2443_PHYPWR);
-
- cfg = readl(S3C2443_UCLKCON);
- cfg |= (S3C2443_UCLKCON_DETECT_VBUS | S3C2443_UCLKCON_FUNC_CLKEN |
- S3C2443_UCLKCON_TCLKEN);
- writel(cfg, S3C2443_UCLKCON);
-}
-
-static void s3c_hsudc_uninit_phy(void)
-{
- u32 cfg;
-
- cfg = readl(S3C2443_PWRCFG) & ~S3C2443_PWRCFG_USBPHY;
- writel(cfg, S3C2443_PWRCFG);
-
- writel(S3C2443_PHYPWR_FSUSPEND, S3C2443_PHYPWR);
-
- cfg = readl(S3C2443_UCLKCON) & ~S3C2443_UCLKCON_FUNC_CLKEN;
- writel(cfg, S3C2443_UCLKCON);
-}
-
/**
* s3c_hsudc_complete_request - Complete a transfer request.
* @hsep: Endpoint to which the request belongs.
@@ -1188,7 +1139,8 @@ static int s3c_hsudc_start(struct usb_gadget *gadget,
pm_runtime_get_sync(hsudc->dev);
- s3c_hsudc_init_phy();
+ if (hsudc->pd->phy_init)
+ hsudc->pd->phy_init();
if (hsudc->pd->gpio_init)
hsudc->pd->gpio_init();
@@ -1210,7 +1162,8 @@ static int s3c_hsudc_stop(struct usb_gadget *gadget)
spin_lock_irqsave(&hsudc->lock, flags);
hsudc->gadget.speed = USB_SPEED_UNKNOWN;
- s3c_hsudc_uninit_phy();
+ if (hsudc->pd->phy_uninit)
+ hsudc->pd->phy_uninit();
pm_runtime_put(hsudc->dev);
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index bc2e8eb737c3..f1ea51476add 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -36,15 +36,11 @@
#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
-#include <mach/irqs.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-udc.h>
#include <linux/platform_data/usb-s3c2410_udc.h>
-
#include "s3c2410_udc.h"
+#include "s3c2410_udc_regs.h"
#define DRIVER_DESC "S3C2410 USB Device Controller Gadget"
#define DRIVER_AUTHOR "Herbert Pötzl <herbert@13thfloor.at>, " \
@@ -57,6 +53,7 @@ static struct s3c2410_udc *the_controller;
static struct clk *udc_clock;
static struct clk *usb_bus_clock;
static void __iomem *base_addr;
+static int irq_usbd;
static u64 rsrc_start;
static u64 rsrc_len;
static struct dentry *s3c2410_udc_debugfs_root;
@@ -835,8 +832,6 @@ static void s3c2410_udc_handle_ep(struct s3c2410_ep *ep)
}
}
-#include <mach/regs-irq.h>
-
/*
* s3c2410_udc_irq - interrupt handler
*/
@@ -977,7 +972,7 @@ static irqreturn_t s3c2410_udc_irq(int dummy, void *_dev)
}
}
- dprintk(DEBUG_VERBOSE, "irq: %d s3c2410_udc_done.\n", IRQ_USBD);
+ dprintk(DEBUG_VERBOSE, "irq: %d s3c2410_udc_done.\n", irq_usbd);
/* Restore old index */
udc_write(idx, S3C2410_UDC_INDEX_REG);
@@ -1270,7 +1265,6 @@ static int s3c2410_udc_queue(struct usb_ep *_ep, struct usb_request *_req,
static int s3c2410_udc_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
- struct s3c2410_udc *udc;
int retval = -EINVAL;
unsigned long flags;
struct s3c2410_request *req = NULL;
@@ -1283,8 +1277,6 @@ static int s3c2410_udc_dequeue(struct usb_ep *_ep, struct usb_request *_req)
if (!_ep || !_req)
return retval;
- udc = to_s3c2410_udc(ep->gadget);
-
local_irq_save(flags);
list_for_each_entry(req, &ep->queue, queue) {
@@ -1780,13 +1772,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
spin_lock_init(&udc->lock);
udc_info = dev_get_platdata(&pdev->dev);
- rsrc_start = S3C2410_PA_USBDEV;
- rsrc_len = S3C24XX_SZ_USBDEV;
-
- if (!request_mem_region(rsrc_start, rsrc_len, gadget_name))
- return -EBUSY;
-
- base_addr = ioremap(rsrc_start, rsrc_len);
+ base_addr = devm_platform_ioremap_resource(pdev, 0);
if (!base_addr) {
retval = -ENOMEM;
goto err_mem;
@@ -1798,17 +1784,19 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
s3c2410_udc_disable(udc);
s3c2410_udc_reinit(udc);
+ irq_usbd = platform_get_irq(pdev, 0);
+
/* irq setup after old hardware state is cleaned up */
- retval = request_irq(IRQ_USBD, s3c2410_udc_irq,
+ retval = request_irq(irq_usbd, s3c2410_udc_irq,
0, gadget_name, udc);
if (retval != 0) {
- dev_err(dev, "cannot get irq %i, err %d\n", IRQ_USBD, retval);
+ dev_err(dev, "cannot get irq %i, err %d\n", irq_usbd, retval);
retval = -EBUSY;
goto err_map;
}
- dev_dbg(dev, "got irq %i\n", IRQ_USBD);
+ dev_dbg(dev, "got irq %i\n", irq_usbd);
if (udc_info && udc_info->vbus_pin > 0) {
retval = gpio_request(udc_info->vbus_pin, "udc vbus");
@@ -1875,7 +1863,7 @@ err_gpio_claim:
if (udc_info && udc_info->vbus_pin > 0)
gpio_free(udc_info->vbus_pin);
err_int:
- free_irq(IRQ_USBD, udc);
+ free_irq(irq_usbd, udc);
err_map:
iounmap(base_addr);
err_mem:
@@ -1909,7 +1897,7 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
free_irq(irq, udc);
}
- free_irq(IRQ_USBD, udc);
+ free_irq(irq_usbd, udc);
iounmap(base_addr);
release_mem_region(rsrc_start, rsrc_len);
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.h b/drivers/usb/gadget/udc/s3c2410_udc.h
index bdcaa8dd300f..68bdf3e5aac2 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.h
+++ b/drivers/usb/gadget/udc/s3c2410_udc.h
@@ -90,6 +90,7 @@ struct s3c2410_udc {
unsigned req_pending : 1;
u8 vbus;
struct dentry *regs_info;
+ int irq;
};
#define to_s3c2410(g) (container_of((g), struct s3c2410_udc, gadget))
diff --git a/drivers/usb/gadget/udc/s3c2410_udc_regs.h b/drivers/usb/gadget/udc/s3c2410_udc_regs.h
new file mode 100644
index 000000000000..d8d2eeaca088
--- /dev/null
+++ b/drivers/usb/gadget/udc/s3c2410_udc_regs.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2004 Herbert Poetzl <herbert@13thfloor.at>
+ */
+
+#ifndef __ASM_ARCH_REGS_UDC_H
+#define __ASM_ARCH_REGS_UDC_H
+
+#define S3C2410_USBDREG(x) (x)
+
+#define S3C2410_UDC_FUNC_ADDR_REG S3C2410_USBDREG(0x0140)
+#define S3C2410_UDC_PWR_REG S3C2410_USBDREG(0x0144)
+#define S3C2410_UDC_EP_INT_REG S3C2410_USBDREG(0x0148)
+
+#define S3C2410_UDC_USB_INT_REG S3C2410_USBDREG(0x0158)
+#define S3C2410_UDC_EP_INT_EN_REG S3C2410_USBDREG(0x015c)
+
+#define S3C2410_UDC_USB_INT_EN_REG S3C2410_USBDREG(0x016c)
+
+#define S3C2410_UDC_FRAME_NUM1_REG S3C2410_USBDREG(0x0170)
+#define S3C2410_UDC_FRAME_NUM2_REG S3C2410_USBDREG(0x0174)
+
+#define S3C2410_UDC_EP0_FIFO_REG S3C2410_USBDREG(0x01c0)
+#define S3C2410_UDC_EP1_FIFO_REG S3C2410_USBDREG(0x01c4)
+#define S3C2410_UDC_EP2_FIFO_REG S3C2410_USBDREG(0x01c8)
+#define S3C2410_UDC_EP3_FIFO_REG S3C2410_USBDREG(0x01cc)
+#define S3C2410_UDC_EP4_FIFO_REG S3C2410_USBDREG(0x01d0)
+
+#define S3C2410_UDC_EP1_DMA_CON S3C2410_USBDREG(0x0200)
+#define S3C2410_UDC_EP1_DMA_UNIT S3C2410_USBDREG(0x0204)
+#define S3C2410_UDC_EP1_DMA_FIFO S3C2410_USBDREG(0x0208)
+#define S3C2410_UDC_EP1_DMA_TTC_L S3C2410_USBDREG(0x020c)
+#define S3C2410_UDC_EP1_DMA_TTC_M S3C2410_USBDREG(0x0210)
+#define S3C2410_UDC_EP1_DMA_TTC_H S3C2410_USBDREG(0x0214)
+
+#define S3C2410_UDC_EP2_DMA_CON S3C2410_USBDREG(0x0218)
+#define S3C2410_UDC_EP2_DMA_UNIT S3C2410_USBDREG(0x021c)
+#define S3C2410_UDC_EP2_DMA_FIFO S3C2410_USBDREG(0x0220)
+#define S3C2410_UDC_EP2_DMA_TTC_L S3C2410_USBDREG(0x0224)
+#define S3C2410_UDC_EP2_DMA_TTC_M S3C2410_USBDREG(0x0228)
+#define S3C2410_UDC_EP2_DMA_TTC_H S3C2410_USBDREG(0x022c)
+
+#define S3C2410_UDC_EP3_DMA_CON S3C2410_USBDREG(0x0240)
+#define S3C2410_UDC_EP3_DMA_UNIT S3C2410_USBDREG(0x0244)
+#define S3C2410_UDC_EP3_DMA_FIFO S3C2410_USBDREG(0x0248)
+#define S3C2410_UDC_EP3_DMA_TTC_L S3C2410_USBDREG(0x024c)
+#define S3C2410_UDC_EP3_DMA_TTC_M S3C2410_USBDREG(0x0250)
+#define S3C2410_UDC_EP3_DMA_TTC_H S3C2410_USBDREG(0x0254)
+
+#define S3C2410_UDC_EP4_DMA_CON S3C2410_USBDREG(0x0258)
+#define S3C2410_UDC_EP4_DMA_UNIT S3C2410_USBDREG(0x025c)
+#define S3C2410_UDC_EP4_DMA_FIFO S3C2410_USBDREG(0x0260)
+#define S3C2410_UDC_EP4_DMA_TTC_L S3C2410_USBDREG(0x0264)
+#define S3C2410_UDC_EP4_DMA_TTC_M S3C2410_USBDREG(0x0268)
+#define S3C2410_UDC_EP4_DMA_TTC_H S3C2410_USBDREG(0x026c)
+
+#define S3C2410_UDC_INDEX_REG S3C2410_USBDREG(0x0178)
+
+/* indexed registers */
+
+#define S3C2410_UDC_MAXP_REG S3C2410_USBDREG(0x0180)
+
+#define S3C2410_UDC_EP0_CSR_REG S3C2410_USBDREG(0x0184)
+
+#define S3C2410_UDC_IN_CSR1_REG S3C2410_USBDREG(0x0184)
+#define S3C2410_UDC_IN_CSR2_REG S3C2410_USBDREG(0x0188)
+
+#define S3C2410_UDC_OUT_CSR1_REG S3C2410_USBDREG(0x0190)
+#define S3C2410_UDC_OUT_CSR2_REG S3C2410_USBDREG(0x0194)
+#define S3C2410_UDC_OUT_FIFO_CNT1_REG S3C2410_USBDREG(0x0198)
+#define S3C2410_UDC_OUT_FIFO_CNT2_REG S3C2410_USBDREG(0x019c)
+
+#define S3C2410_UDC_FUNCADDR_UPDATE (1 << 7)
+
+#define S3C2410_UDC_PWR_ISOUP (1 << 7) /* R/W */
+#define S3C2410_UDC_PWR_RESET (1 << 3) /* R */
+#define S3C2410_UDC_PWR_RESUME (1 << 2) /* R/W */
+#define S3C2410_UDC_PWR_SUSPEND (1 << 1) /* R */
+#define S3C2410_UDC_PWR_ENSUSPEND (1 << 0) /* R/W */
+
+#define S3C2410_UDC_PWR_DEFAULT (0x00)
+
+#define S3C2410_UDC_INT_EP4 (1 << 4) /* R/W (clear only) */
+#define S3C2410_UDC_INT_EP3 (1 << 3) /* R/W (clear only) */
+#define S3C2410_UDC_INT_EP2 (1 << 2) /* R/W (clear only) */
+#define S3C2410_UDC_INT_EP1 (1 << 1) /* R/W (clear only) */
+#define S3C2410_UDC_INT_EP0 (1 << 0) /* R/W (clear only) */
+
+#define S3C2410_UDC_USBINT_RESET (1 << 2) /* R/W (clear only) */
+#define S3C2410_UDC_USBINT_RESUME (1 << 1) /* R/W (clear only) */
+#define S3C2410_UDC_USBINT_SUSPEND (1 << 0) /* R/W (clear only) */
+
+#define S3C2410_UDC_INTE_EP4 (1 << 4) /* R/W */
+#define S3C2410_UDC_INTE_EP3 (1 << 3) /* R/W */
+#define S3C2410_UDC_INTE_EP2 (1 << 2) /* R/W */
+#define S3C2410_UDC_INTE_EP1 (1 << 1) /* R/W */
+#define S3C2410_UDC_INTE_EP0 (1 << 0) /* R/W */
+
+#define S3C2410_UDC_USBINTE_RESET (1 << 2) /* R/W */
+#define S3C2410_UDC_USBINTE_SUSPEND (1 << 0) /* R/W */
+
+#define S3C2410_UDC_INDEX_EP0 (0x00)
+#define S3C2410_UDC_INDEX_EP1 (0x01)
+#define S3C2410_UDC_INDEX_EP2 (0x02)
+#define S3C2410_UDC_INDEX_EP3 (0x03)
+#define S3C2410_UDC_INDEX_EP4 (0x04)
+
+#define S3C2410_UDC_ICSR1_CLRDT (1 << 6) /* R/W */
+#define S3C2410_UDC_ICSR1_SENTSTL (1 << 5) /* R/W (clear only) */
+#define S3C2410_UDC_ICSR1_SENDSTL (1 << 4) /* R/W */
+#define S3C2410_UDC_ICSR1_FFLUSH (1 << 3) /* W (set only) */
+#define S3C2410_UDC_ICSR1_UNDRUN (1 << 2) /* R/W (clear only) */
+#define S3C2410_UDC_ICSR1_PKTRDY (1 << 0) /* R/W (set only) */
+
+#define S3C2410_UDC_ICSR2_AUTOSET (1 << 7) /* R/W */
+#define S3C2410_UDC_ICSR2_ISO (1 << 6) /* R/W */
+#define S3C2410_UDC_ICSR2_MODEIN (1 << 5) /* R/W */
+#define S3C2410_UDC_ICSR2_DMAIEN (1 << 4) /* R/W */
+
+#define S3C2410_UDC_OCSR1_CLRDT (1 << 7) /* R/W */
+#define S3C2410_UDC_OCSR1_SENTSTL (1 << 6) /* R/W (clear only) */
+#define S3C2410_UDC_OCSR1_SENDSTL (1 << 5) /* R/W */
+#define S3C2410_UDC_OCSR1_FFLUSH (1 << 4) /* R/W */
+#define S3C2410_UDC_OCSR1_DERROR (1 << 3) /* R */
+#define S3C2410_UDC_OCSR1_OVRRUN (1 << 2) /* R/W (clear only) */
+#define S3C2410_UDC_OCSR1_PKTRDY (1 << 0) /* R/W (clear only) */
+
+#define S3C2410_UDC_OCSR2_AUTOCLR (1 << 7) /* R/W */
+#define S3C2410_UDC_OCSR2_ISO (1 << 6) /* R/W */
+#define S3C2410_UDC_OCSR2_DMAIEN (1 << 5) /* R/W */
+
+#define S3C2410_UDC_EP0_CSR_OPKRDY (1 << 0)
+#define S3C2410_UDC_EP0_CSR_IPKRDY (1 << 1)
+#define S3C2410_UDC_EP0_CSR_SENTSTL (1 << 2)
+#define S3C2410_UDC_EP0_CSR_DE (1 << 3)
+#define S3C2410_UDC_EP0_CSR_SE (1 << 4)
+#define S3C2410_UDC_EP0_CSR_SENDSTL (1 << 5)
+#define S3C2410_UDC_EP0_CSR_SOPKTRDY (1 << 6)
+#define S3C2410_UDC_EP0_CSR_SSE (1 << 7)
+
+#define S3C2410_UDC_MAXP_8 (1 << 0)
+#define S3C2410_UDC_MAXP_16 (1 << 1)
+#define S3C2410_UDC_MAXP_32 (1 << 2)
+#define S3C2410_UDC_MAXP_64 (1 << 3)
+
+#endif
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index d6ff68c06911..580bef8eb4cb 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -705,11 +705,11 @@ static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
err = phy_power_on(xudc->curr_utmi_phy);
if (err < 0)
- dev_err(xudc->dev, "utmi power on failed %d\n", err);
+ dev_err(xudc->dev, "UTMI power on failed: %d\n", err);
err = phy_power_on(xudc->curr_usb3_phy);
if (err < 0)
- dev_err(xudc->dev, "usb3 phy power on failed %d\n", err);
+ dev_err(xudc->dev, "USB3 PHY power on failed: %d\n", err);
dev_dbg(xudc->dev, "device mode on\n");
@@ -759,11 +759,11 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
err = phy_power_off(xudc->curr_utmi_phy);
if (err < 0)
- dev_err(xudc->dev, "utmi_phy power off failed %d\n", err);
+ dev_err(xudc->dev, "UTMI PHY power off failed: %d\n", err);
err = phy_power_off(xudc->curr_usb3_phy);
if (err < 0)
- dev_err(xudc->dev, "usb3_phy power off failed %d\n", err);
+ dev_err(xudc->dev, "USB3 PHY power off failed: %d\n", err);
pm_runtime_put(xudc->dev);
}
@@ -1539,7 +1539,7 @@ static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
return -EINVAL;
if (usb_endpoint_xfer_isoc(ep->desc)) {
- dev_err(xudc->dev, "can't halt isoc EP\n");
+ dev_err(xudc->dev, "can't halt isochronous EP\n");
return -ENOTSUPP;
}
@@ -1788,7 +1788,7 @@ static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
if (usb_endpoint_xfer_isoc(desc)) {
if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
- dev_err(xudc->dev, "too many isoch endpoints\n");
+ dev_err(xudc->dev, "too many isochronous endpoints\n");
return -EBUSY;
}
xudc->nr_isoch_eps++;
@@ -3509,7 +3509,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
if (IS_ERR(xudc->utmi_phy[i])) {
err = PTR_ERR(xudc->utmi_phy[i]);
if (err != -EPROBE_DEFER)
- dev_err(xudc->dev, "failed to get usb2-%d phy: %d\n",
+ dev_err(xudc->dev, "failed to get usb2-%d PHY: %d\n",
i, err);
goto clean_up;
@@ -3539,12 +3539,12 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
if (IS_ERR(xudc->usb3_phy[i])) {
err = PTR_ERR(xudc->usb3_phy[i]);
if (err != -EPROBE_DEFER)
- dev_err(xudc->dev, "failed to get usb3-%d phy: %d\n",
+ dev_err(xudc->dev, "failed to get usb3-%d PHY: %d\n",
usb3, err);
goto clean_up;
} else if (xudc->usb3_phy[i])
- dev_dbg(xudc->dev, "usb3_phy-%d registered", usb3);
+ dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
}
return err;
@@ -3577,13 +3577,13 @@ static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
for (i = 0; i < xudc->soc->num_phys; i++) {
err = phy_init(xudc->utmi_phy[i]);
if (err < 0) {
- dev_err(xudc->dev, "utmi phy init failed: %d\n", err);
+ dev_err(xudc->dev, "UTMI PHY #%u initialization failed: %d\n", i, err);
goto exit_phy;
}
err = phy_init(xudc->usb3_phy[i]);
if (err < 0) {
- dev_err(xudc->dev, "usb3 phy init failed: %d\n", err);
+ dev_err(xudc->dev, "USB3 PHY #%u initialization failed: %d\n", i, err);
goto exit_phy;
}
}
@@ -3692,34 +3692,33 @@ static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
struct device *dev = xudc->dev;
int err;
- xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev,
- "dev");
+ xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev");
if (IS_ERR(xudc->genpd_dev_device)) {
err = PTR_ERR(xudc->genpd_dev_device);
- dev_err(dev, "failed to get dev pm-domain: %d\n", err);
+ dev_err(dev, "failed to get device power domain: %d\n", err);
return err;
}
xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
if (IS_ERR(xudc->genpd_dev_ss)) {
err = PTR_ERR(xudc->genpd_dev_ss);
- dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
+ dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err);
return err;
}
xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
- DL_FLAG_PM_RUNTIME |
- DL_FLAG_STATELESS);
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
if (!xudc->genpd_dl_device) {
- dev_err(dev, "adding usb device device link failed!\n");
+ dev_err(dev, "failed to add USB device link\n");
return -ENODEV;
}
xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
- DL_FLAG_PM_RUNTIME |
- DL_FLAG_STATELESS);
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
if (!xudc->genpd_dl_ss) {
- dev_err(dev, "adding superspeed device link failed!\n");
+ dev_err(dev, "failed to add SuperSpeed device link\n");
return -ENODEV;
}
@@ -3733,7 +3732,7 @@ static int tegra_xudc_probe(struct platform_device *pdev)
unsigned int i;
int err;
- xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_ATOMIC);
+ xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_KERNEL);
if (!xudc)
return -ENOMEM;
@@ -3772,18 +3771,19 @@ static int tegra_xudc_probe(struct platform_device *pdev)
return err;
}
- xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks,
- sizeof(*xudc->clks), GFP_KERNEL);
+ xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks, sizeof(*xudc->clks),
+ GFP_KERNEL);
if (!xudc->clks)
return -ENOMEM;
for (i = 0; i < xudc->soc->num_clks; i++)
xudc->clks[i].id = xudc->soc->clock_names[i];
- err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks,
- xudc->clks);
+ err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks, xudc->clks);
if (err) {
- dev_err(xudc->dev, "failed to request clks %d\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(xudc->dev, "failed to request clocks: %d\n", err);
+
return err;
}
@@ -3798,7 +3798,9 @@ static int tegra_xudc_probe(struct platform_device *pdev)
err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
xudc->supplies);
if (err) {
- dev_err(xudc->dev, "failed to request regulators %d\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(xudc->dev, "failed to request regulators: %d\n", err);
+
return err;
}
@@ -3808,7 +3810,7 @@ static int tegra_xudc_probe(struct platform_device *pdev)
err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
if (err) {
- dev_err(xudc->dev, "failed to enable regulators %d\n", err);
+ dev_err(xudc->dev, "failed to enable regulators: %d\n", err);
goto put_padctl;
}
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index b1b777f33521..337b425dd4b0 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -498,15 +498,4 @@ static struct bcma_driver bcma_hcd_driver = {
.suspend = bcma_hcd_suspend,
.resume = bcma_hcd_resume,
};
-
-static int __init bcma_hcd_init(void)
-{
- return bcma_driver_register(&bcma_hcd_driver);
-}
-module_init(bcma_hcd_init);
-
-static void __exit bcma_hcd_exit(void)
-{
- bcma_driver_unregister(&bcma_hcd_driver);
-}
-module_exit(bcma_hcd_exit);
+module_bcma_driver(bcma_hcd_driver);
diff --git a/drivers/usb/host/ehci-npcm7xx.c b/drivers/usb/host/ehci-npcm7xx.c
index adaf8fb4b459..6b5a7a873e01 100644
--- a/drivers/usb/host/ehci-npcm7xx.c
+++ b/drivers/usb/host/ehci-npcm7xx.c
@@ -37,8 +37,7 @@ static const char hcd_name[] = "npcm7xx-ehci";
static struct hc_driver __read_mostly ehci_npcm7xx_hc_driver;
-#ifdef CONFIG_PM_SLEEP
-static int ehci_npcm7xx_drv_suspend(struct device *dev)
+static int __maybe_unused ehci_npcm7xx_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
bool do_wakeup = device_may_wakeup(dev);
@@ -46,14 +45,13 @@ static int ehci_npcm7xx_drv_suspend(struct device *dev)
return ehci_suspend(hcd, do_wakeup);
}
-static int ehci_npcm7xx_drv_resume(struct device *dev)
+static int __maybe_unused ehci_npcm7xx_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
ehci_resume(hcd, false);
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(ehci_npcm7xx_pm_ops, ehci_npcm7xx_drv_suspend,
ehci_npcm7xx_drv_resume);
@@ -183,7 +181,7 @@ static struct platform_driver npcm7xx_ehci_hcd_driver = {
.driver = {
.name = "npcm7xx-ehci",
.bus = &platform_bus_type,
- .pm = &ehci_npcm7xx_pm_ops,
+ .pm = pm_ptr(&ehci_npcm7xx_pm_ops),
.of_match_table = npcm7xx_ehci_id_table,
}
};
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 006c4f6188a5..a48dd3fac153 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -42,6 +42,9 @@
#define EHCI_MAX_CLKS 4
#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
+#define BCM_USB_FIFO_THRESHOLD 0x00800040
+#define bcm_iproc_insnreg01 hostpc[0]
+
struct ehci_platform_priv {
struct clk *clks[EHCI_MAX_CLKS];
struct reset_control *rsts;
@@ -75,6 +78,11 @@ static int ehci_platform_reset(struct usb_hcd *hcd)
if (pdata->no_io_watchdog)
ehci->need_io_watchdog = 0;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "brcm,xgs-iproc-ehci"))
+ ehci_writel(ehci, BCM_USB_FIFO_THRESHOLD,
+ &ehci->regs->bcm_iproc_insnreg01);
+
return 0;
}
@@ -410,8 +418,7 @@ static int ehci_platform_remove(struct platform_device *dev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int ehci_platform_suspend(struct device *dev)
+static int __maybe_unused ehci_platform_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
@@ -433,7 +440,7 @@ static int ehci_platform_suspend(struct device *dev)
return ret;
}
-static int ehci_platform_resume(struct device *dev)
+static int __maybe_unused ehci_platform_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
@@ -464,7 +471,6 @@ static int ehci_platform_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static const struct of_device_id vt8500_ehci_ids[] = {
{ .compatible = "via,vt8500-ehci", },
@@ -499,7 +505,7 @@ static struct platform_driver ehci_platform_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ehci-platform",
- .pm = &ehci_platform_pm_ops,
+ .pm = pm_ptr(&ehci_platform_pm_ops),
.of_match_table = vt8500_ehci_ids,
.acpi_match_table = ACPI_PTR(ehci_acpi_match),
}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 847979f265b1..6dfb242f9a4b 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -307,26 +307,6 @@ static int __maybe_unused same_tt(struct usb_device *dev1,
#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
-/* Which uframe does the low/fullspeed transfer start in?
- *
- * The parameter is the mask of ssplits in "H-frame" terms
- * and this returns the transfer start uframe in "B-frame" terms,
- * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
- * will cause a transfer in "B-frame" uframe 0. "B-frames" lag
- * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7.
- */
-static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
-{
- unsigned char smask = hc32_to_cpu(ehci, mask) & QH_SMASK;
-
- if (!smask) {
- ehci_err(ehci, "invalid empty smask!\n");
- /* uframe 7 can't have bw so this will indicate failure */
- return 7;
- }
- return ffs(smask) - 1;
-}
-
static const unsigned char
max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index add796c78561..3694e450a11a 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -34,8 +34,7 @@ struct spear_ehci {
static struct hc_driver __read_mostly ehci_spear_hc_driver;
-#ifdef CONFIG_PM_SLEEP
-static int ehci_spear_drv_suspend(struct device *dev)
+static int __maybe_unused ehci_spear_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
bool do_wakeup = device_may_wakeup(dev);
@@ -43,14 +42,13 @@ static int ehci_spear_drv_suspend(struct device *dev)
return ehci_suspend(hcd, do_wakeup);
}
-static int ehci_spear_drv_resume(struct device *dev)
+static int __maybe_unused ehci_spear_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
ehci_resume(hcd, false);
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend,
ehci_spear_drv_resume);
@@ -155,7 +153,7 @@ static struct platform_driver spear_ehci_hcd_driver = {
.driver = {
.name = "spear-ehci",
.bus = &platform_bus_type,
- .pm = &ehci_spear_pm_ops,
+ .pm = pm_ptr(&ehci_spear_pm_ops),
.of_match_table = spear_ehci_id_table,
}
};
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index e077b2ca53c5..869d9c4de5fc 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -479,8 +479,8 @@ static int tegra_ehci_probe(struct platform_device *pdev)
u_phy->otg->host = hcd_to_bus(hcd);
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- err = -ENODEV;
+ if (irq < 0) {
+ err = irq;
goto cleanup_phy;
}
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 194df8282471..1d94fcfac2c2 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/clk.h>
#include <asm/byteorder.h>
@@ -883,18 +884,15 @@ static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
+ int ret;
- do {
- result = fotg210_readl(fotg210, ptr);
- if (result == ~(u32)0) /* card removed */
- return -ENODEV;
- result &= mask;
- if (result == done)
- return 0;
- udelay(1);
- usec--;
- } while (usec > 0);
- return -ETIMEDOUT;
+ ret = readl_poll_timeout_atomic(ptr, result,
+ ((result & mask) == done ||
+ result == U32_MAX), 1, usec);
+ if (result == U32_MAX) /* card removed */
+ return -ENODEV;
+
+ return ret;
}
/* Force HC to halt state from unknown (EHCI spec section 2.3).
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index ae8f60f6e6a5..44a7e58a26e3 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -94,10 +94,13 @@ static struct platform_device *fsl_usb2_device_register(
pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
- if (!pdev->dev.dma_mask)
+ if (!pdev->dev.dma_mask) {
pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask;
- else
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ retval = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (retval)
+ goto error;
+ }
retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
if (retval)
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index dd37e77dae00..73e13e7c2b46 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -102,7 +102,7 @@ static void io_watchdog_func(struct timer_list *t);
/* Some boards misreport power switching/overcurrent */
-static bool distrust_firmware = true;
+static bool distrust_firmware;
module_param (distrust_firmware, bool, 0);
MODULE_PARM_DESC (distrust_firmware,
"true to distrust firmware power/overcurrent setup");
@@ -673,20 +673,24 @@ retry:
/* handle root hub init quirks ... */
val = roothub_a (ohci);
- val &= ~(RH_A_PSM | RH_A_OCPM);
+ /* Configure for per-port over-current protection by default */
+ val &= ~RH_A_NOCP;
+ val |= RH_A_OCPM;
if (ohci->flags & OHCI_QUIRK_SUPERIO) {
- /* NSC 87560 and maybe others */
+ /* NSC 87560 and maybe others.
+ * Ganged power switching, no over-current protection.
+ */
val |= RH_A_NOCP;
- val &= ~(RH_A_POTPGT | RH_A_NPS);
- ohci_writel (ohci, val, &ohci->regs->roothub.a);
+ val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM);
} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
/* hub power always on; required for AMD-756 and some
- * Mac platforms. ganged overcurrent reporting, if any.
+ * Mac platforms.
*/
val |= RH_A_NPS;
- ohci_writel (ohci, val, &ohci->regs->roothub.a);
}
+ ohci_writel(ohci, val, &ohci->regs->roothub.a);
+
ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
&ohci->regs->roothub.b);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index cfa7dd2cc7d3..27dbbe1b28b1 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -24,6 +24,7 @@
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
@@ -748,18 +749,16 @@ static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
+ int ret;
- do {
- result = readl(ptr);
- if (result == ~(u32)0) /* card removed */
- return -ENODEV;
- result &= mask;
- if (result == done)
- return 0;
- udelay(1);
- usec--;
- } while (usec > 0);
- return -ETIMEDOUT;
+ ret = readl_poll_timeout_atomic(ptr, result,
+ ((result & mask) == done ||
+ result == U32_MAX),
+ 1, usec);
+ if (result == U32_MAX) /* card removed */
+ return -ENODEV;
+
+ return ret;
}
/* Force HC to halt state from unknown (EHCI spec section 2.3) */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 8c1bbac6d136..ef08d68b9714 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -16,8 +16,8 @@
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
-
-#include <soc/bcm2835/raspberrypi-firmware.h>
+#include <linux/of.h>
+#include <linux/iopoll.h>
#include "pci-quirks.h"
#include "xhci-ext-caps.h"
@@ -1013,15 +1013,9 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
{
u32 result;
- do {
- result = readl(ptr);
- result &= mask;
- if (result == done)
- return 0;
- udelay(delay_usec);
- wait_usec -= delay_usec;
- } while (wait_usec > 0);
- return -ETIMEDOUT;
+ return readl_poll_timeout_atomic(ptr, result,
+ ((result & mask) == done),
+ delay_usec, wait_usec);
}
/*
@@ -1247,7 +1241,8 @@ iounmap:
static void quirk_usb_early_handoff(struct pci_dev *pdev)
{
- int ret;
+ struct device_node *parent;
+ bool is_rpi;
/* Skip Netlogic mips SoC's internal PCI USB controller.
* This device does not need/support EHCI/OHCI handoff
@@ -1255,14 +1250,16 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
if (pdev->vendor == 0x184e) /* vendor Netlogic */
return;
+ /*
+ * Bypass the Raspberry Pi 4 controller xHCI controller, things are
+ * taken care of by the board's co-processor.
+ */
if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
- ret = rpi_firmware_init_vl805(pdev);
- if (ret) {
- /* Firmware might be outdated, or something failed */
- dev_warn(&pdev->dev,
- "Failed to load VL805's firmware: %d. Will continue to attempt to work, but bad things might happen. You should fix this...\n",
- ret);
- }
+ parent = of_get_parent(pdev->bus->dev.of_node);
+ is_rpi = of_device_is_compatible(parent, "brcm,bcm2711-pcie");
+ of_node_put(parent);
+ if (is_rpi)
+ return;
}
if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index b8918f73a432..ae4e4ab638b5 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -288,14 +288,14 @@ static const struct tty_operations dbc_tty_ops = {
.unthrottle = dbc_tty_unthrottle,
};
-static void dbc_rx_push(unsigned long _port)
+static void dbc_rx_push(struct tasklet_struct *t)
{
struct dbc_request *req;
struct tty_struct *tty;
unsigned long flags;
bool do_push = false;
bool disconnect = false;
- struct dbc_port *port = (void *)_port;
+ struct dbc_port *port = from_tasklet(port, t, push);
struct list_head *queue = &port->read_queue;
spin_lock_irqsave(&port->port_lock, flags);
@@ -382,7 +382,7 @@ xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
{
tty_port_init(&port->port);
spin_lock_init(&port->port_lock);
- tasklet_init(&port->push, dbc_rx_push, (unsigned long)port);
+ tasklet_setup(&port->push, dbc_rx_push);
INIT_LIST_HEAD(&port->read_pool);
INIT_LIST_HEAD(&port->read_queue);
INIT_LIST_HEAD(&port->write_pool);
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index c88bffd68742..2c0fda57869e 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -451,9 +451,11 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
if (!epriv)
return;
+ epriv->show_ring = dev->eps[ep_index].ring;
+
snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
epriv->root = xhci_debugfs_create_ring_dir(xhci,
- &dev->eps[ep_index].ring,
+ &epriv->show_ring,
epriv->name,
spriv->root);
spriv->eps[ep_index] = epriv;
@@ -475,6 +477,111 @@ void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
kfree(epriv);
}
+static int xhci_stream_id_show(struct seq_file *s, void *unused)
+{
+ struct xhci_ep_priv *epriv = s->private;
+
+ if (!epriv->stream_info)
+ return -EPERM;
+
+ seq_printf(s, "Show stream ID %d trb ring, supported [1 - %d]\n",
+ epriv->stream_id, epriv->stream_info->num_streams - 1);
+
+ return 0;
+}
+
+static int xhci_stream_id_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xhci_stream_id_show, inode->i_private);
+}
+
+static ssize_t xhci_stream_id_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xhci_ep_priv *epriv = s->private;
+ int ret;
+ u16 stream_id; /* MaxPStreams + 1 <= 16 */
+
+ if (!epriv->stream_info)
+ return -EPERM;
+
+ /* Decimal number */
+ ret = kstrtou16_from_user(ubuf, count, 10, &stream_id);
+ if (ret)
+ return ret;
+
+ if (stream_id == 0 || stream_id >= epriv->stream_info->num_streams)
+ return -EINVAL;
+
+ epriv->stream_id = stream_id;
+ epriv->show_ring = epriv->stream_info->stream_rings[stream_id];
+
+ return count;
+}
+
+static const struct file_operations stream_id_fops = {
+ .open = xhci_stream_id_open,
+ .write = xhci_stream_id_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int xhci_stream_context_array_show(struct seq_file *s, void *unused)
+{
+ struct xhci_ep_priv *epriv = s->private;
+ struct xhci_stream_ctx *stream_ctx;
+ dma_addr_t dma;
+ int id;
+
+ if (!epriv->stream_info)
+ return -EPERM;
+
+ seq_printf(s, "Allocated %d streams and %d stream context array entries\n",
+ epriv->stream_info->num_streams,
+ epriv->stream_info->num_stream_ctxs);
+
+ for (id = 0; id < epriv->stream_info->num_stream_ctxs; id++) {
+ stream_ctx = epriv->stream_info->stream_ctx_array + id;
+ dma = epriv->stream_info->ctx_array_dma + id * 16;
+ if (id < epriv->stream_info->num_streams)
+ seq_printf(s, "%pad stream id %d deq %016llx\n", &dma,
+ id, le64_to_cpu(stream_ctx->stream_ring));
+ else
+ seq_printf(s, "%pad stream context entry not used deq %016llx\n",
+ &dma, le64_to_cpu(stream_ctx->stream_ring));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(xhci_stream_context_array);
+
+void xhci_debugfs_create_stream_files(struct xhci_hcd *xhci,
+ struct xhci_virt_device *dev,
+ int ep_index)
+{
+ struct xhci_slot_priv *spriv = dev->debugfs_private;
+ struct xhci_ep_priv *epriv;
+
+ if (!spriv || !spriv->eps[ep_index] ||
+ !dev->eps[ep_index].stream_info)
+ return;
+
+ epriv = spriv->eps[ep_index];
+ epriv->stream_info = dev->eps[ep_index].stream_info;
+
+ /* Show trb ring of stream ID 1 by default */
+ epriv->stream_id = 1;
+ epriv->show_ring = epriv->stream_info->stream_rings[1];
+ debugfs_create_file("stream_id", 0644,
+ epriv->root, epriv,
+ &stream_id_fops);
+ debugfs_create_file("stream_context_array", 0444,
+ epriv->root, epriv,
+ &xhci_stream_context_array_fops);
+}
+
void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_slot_priv *priv;
diff --git a/drivers/usb/host/xhci-debugfs.h b/drivers/usb/host/xhci-debugfs.h
index 56db635fcd6e..7c074b4be819 100644
--- a/drivers/usb/host/xhci-debugfs.h
+++ b/drivers/usb/host/xhci-debugfs.h
@@ -91,6 +91,9 @@ struct xhci_file_map {
struct xhci_ep_priv {
char name[DEBUGFS_NAMELEN];
struct dentry *root;
+ struct xhci_stream_info *stream_info;
+ struct xhci_ring *show_ring;
+ unsigned int stream_id;
};
struct xhci_slot_priv {
@@ -113,6 +116,9 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int ep_index);
+void xhci_debugfs_create_stream_files(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int ep_index);
#else
static inline void xhci_debugfs_init(struct xhci_hcd *xhci) { }
static inline void xhci_debugfs_exit(struct xhci_hcd *xhci) { }
@@ -128,6 +134,10 @@ static inline void
xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int ep_index) { }
+static inline void
+xhci_debugfs_create_stream_files(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int ep_index) { }
#endif /* CONFIG_DEBUG_FS */
#endif /* __LINUX_XHCI_DEBUGFS_H */
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
index 5546e7e013a8..08369857686e 100644
--- a/drivers/usb/host/xhci-histb.c
+++ b/drivers/usb/host/xhci-histb.c
@@ -240,7 +240,7 @@ static int xhci_histb_probe(struct platform_device *pdev)
/* Initialize dma_mask and coherent_dma_mask to 32-bits */
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
- return ret;
+ goto disable_pm;
hcd = usb_create_hcd(driver, dev, dev_name(dev));
if (!hcd) {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index fe405cd38dbc..138ba4528dd3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2252,8 +2252,8 @@ static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
if (!rhub->num_ports)
return;
- rhub->ports = kcalloc_node(rhub->num_ports, sizeof(rhub->ports), flags,
- dev_to_node(dev));
+ rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
+ flags, dev_to_node(dev));
for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
if (xhci->hw_ports[i].rhub != rhub ||
xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 4311d4c9b68d..8f321f39ab96 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -77,7 +77,7 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
{
struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
u32 value, check_val;
- int u3_ports_disabed = 0;
+ int u3_ports_disabled = 0;
int ret;
int i;
@@ -92,7 +92,7 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
/* power on and enable u3 ports except skipped ones */
for (i = 0; i < mtk->num_u3_ports; i++) {
if ((0x1 << i) & mtk->u3p_dis_msk) {
- u3_ports_disabed++;
+ u3_ports_disabled++;
continue;
}
@@ -117,7 +117,7 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
check_val = STS1_SYSPLL_STABLE | STS1_REF_RST |
STS1_SYS125_RST | STS1_XHCI_RST;
- if (mtk->num_u3_ports > u3_ports_disabed)
+ if (mtk->num_u3_ports > u3_ports_disabled)
check_val |= STS1_U3_MAC_RST;
ret = readl_poll_timeout(&ippc->ip_pw_sts1, value,
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 3feaafebfe58..bf89172c43ca 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/acpi.h>
+#include <linux/reset.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -22,6 +23,8 @@
#define SSIC_PORT_CFG2_OFFSET 0x30
#define PROG_DONE (1 << 30)
#define SSIC_PORT_UNUSED (1 << 31)
+#define SPARSE_DISABLE_BIT 17
+#define SPARSE_CNTL_ENABLE 0xC12C
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
@@ -160,6 +163,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == 0x15e0 || pdev->device == 0x15e1))
xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
+ xhci->quirks |= XHCI_DISABLE_SPARSE;
+
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
@@ -346,6 +352,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
struct xhci_driver_data *driver_data;
+ struct reset_control *reset;
driver_data = (struct xhci_driver_data *)id->driver_data;
if (driver_data && driver_data->quirks & XHCI_RENESAS_FW_QUIRK) {
@@ -354,6 +361,11 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return retval;
}
+ reset = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+ reset_control_reset(reset);
+
/* Prevent runtime suspending between USB-2 and USB-3 initialization */
pm_runtime_get_noresume(&dev->dev);
@@ -371,6 +383,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* USB 2.0 roothub is stored in the PCI device now. */
hcd = dev_get_drvdata(&dev->dev);
xhci = hcd_to_xhci(hcd);
+ xhci->reset = reset;
xhci->shared_hcd = usb_create_shared_hcd(&xhci_pci_hc_driver, &dev->dev,
pci_name(dev), hcd);
if (!xhci->shared_hcd) {
@@ -490,6 +503,15 @@ static void xhci_pme_quirk(struct usb_hcd *hcd)
readl(reg);
}
+static void xhci_sparse_control_quirk(struct usb_hcd *hcd)
+{
+ u32 reg;
+
+ reg = readl(hcd->regs + SPARSE_CNTL_ENABLE);
+ reg &= ~BIT(SPARSE_DISABLE_BIT);
+ writel(reg, hcd->regs + SPARSE_CNTL_ENABLE);
+}
+
static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -509,6 +531,9 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
xhci_ssic_port_unused_quirk(hcd, true);
+ if (xhci->quirks & XHCI_DISABLE_SPARSE)
+ xhci_sparse_control_quirk(hcd);
+
ret = xhci_suspend(xhci, do_wakeup);
if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
xhci_ssic_port_unused_quirk(hcd, false);
@@ -522,6 +547,8 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval = 0;
+ reset_control_reset(xhci->reset);
+
/* The BIOS on systems with the Intel Panther Point chipset may or may
* not support xHCI natively. That means that during system resume, it
* may switch the ports back to EHCI so that users can use their
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 3057cfc76d6a..aa2d35f98200 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -54,6 +54,16 @@ static int xhci_priv_init_quirk(struct usb_hcd *hcd)
return priv->init_quirk(hcd);
}
+static int xhci_priv_suspend_quirk(struct usb_hcd *hcd)
+{
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+ if (!priv->suspend_quirk)
+ return 0;
+
+ return priv->suspend_quirk(hcd);
+}
+
static int xhci_priv_resume_quirk(struct usb_hcd *hcd)
{
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
@@ -173,6 +183,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
struct usb_hcd *hcd;
int ret;
int irq;
+ struct xhci_plat_priv *priv = NULL;
+
if (usb_disabled())
return -ENODEV;
@@ -264,16 +276,18 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (ret)
goto disable_reg_clk;
- priv_match = of_device_get_match_data(&pdev->dev);
- if (priv_match) {
- struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+ if (pdev->dev.of_node)
+ priv_match = of_device_get_match_data(&pdev->dev);
+ else
+ priv_match = dev_get_platdata(&pdev->dev);
+ if (priv_match) {
+ priv = hcd_to_xhci_priv(hcd);
/* Just copy data for now */
- if (priv_match)
- *priv = *priv_match;
+ *priv = *priv_match;
}
- device_wakeup_enable(hcd->self.controller);
+ device_set_wakeup_capable(&pdev->dev, true);
xhci->main_hcd = hcd;
xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
@@ -316,6 +330,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
xhci->shared_hcd->tpl_support = hcd->tpl_support;
+ if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
+ hcd->skip_phy_initialization = 1;
+
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto disable_usb_phy;
@@ -397,14 +414,14 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
+ ret = xhci_priv_suspend_quirk(hcd);
+ if (ret)
+ return ret;
/*
* xhci_suspend() needs `do_wakeup` to know whether host is allowed
- * to do wakeup during suspend. Since xhci_plat_suspend is currently
- * only designed for system suspend, device_may_wakeup() is enough
- * to dertermine whether host is allowed to do wakeup. Need to
- * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
- * also applies to runtime suspend.
+ * to do wakeup during suspend.
*/
return xhci_suspend(xhci, device_may_wakeup(dev));
}
@@ -434,6 +451,11 @@ static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
+
+ ret = xhci_priv_suspend_quirk(hcd);
+ if (ret)
+ return ret;
return xhci_suspend(xhci, true);
}
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index b49f6447bd3a..1fb149d1fbce 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -15,6 +15,7 @@ struct xhci_plat_priv {
unsigned long long quirks;
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
+ int (*suspend_quirk)(struct usb_hcd *);
int (*resume_quirk)(struct usb_hcd *);
};
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index c1025d321a41..1bc4fe7b8c75 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -6,6 +6,7 @@
*/
#include <linux/firmware.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -127,8 +128,7 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
void __iomem *regs = hcd->regs;
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
const struct firmware *fw;
- int retval, index, j, time;
- int timeout = 10000;
+ int retval, index, j;
u32 data, val, temp;
u32 quirks = 0;
const struct soc_device_attribute *attr;
@@ -166,32 +166,19 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
temp |= RCAR_USB3_DL_CTRL_FW_SET_DATA0;
writel(temp, regs + RCAR_USB3_DL_CTRL);
- for (time = 0; time < timeout; time++) {
- val = readl(regs + RCAR_USB3_DL_CTRL);
- if ((val & RCAR_USB3_DL_CTRL_FW_SET_DATA0) == 0)
- break;
- udelay(1);
- }
- if (time == timeout) {
- retval = -ETIMEDOUT;
+ retval = readl_poll_timeout_atomic(regs + RCAR_USB3_DL_CTRL,
+ val, !(val & RCAR_USB3_DL_CTRL_FW_SET_DATA0),
+ 1, 10000);
+ if (retval < 0)
break;
- }
}
temp = readl(regs + RCAR_USB3_DL_CTRL);
temp &= ~RCAR_USB3_DL_CTRL_ENABLE;
writel(temp, regs + RCAR_USB3_DL_CTRL);
- for (time = 0; time < timeout; time++) {
- val = readl(regs + RCAR_USB3_DL_CTRL);
- if (val & RCAR_USB3_DL_CTRL_FW_SUCCESS) {
- retval = 0;
- break;
- }
- udelay(1);
- }
- if (time == timeout)
- retval = -ETIMEDOUT;
+ retval = readl_poll_timeout_atomic((regs + RCAR_USB3_DL_CTRL),
+ val, val & RCAR_USB3_DL_CTRL_FW_SUCCESS, 1, 10000);
release_firmware(fw);
@@ -200,18 +187,12 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
static bool xhci_rcar_wait_for_pll_active(struct usb_hcd *hcd)
{
- int timeout = 1000;
+ int retval;
u32 val, mask = RCAR_USB3_AXH_STA_PLL_ACTIVE_MASK;
- while (timeout > 0) {
- val = readl(hcd->regs + RCAR_USB3_AXH_STA);
- if ((val & mask) == mask)
- return true;
- udelay(1);
- timeout--;
- }
-
- return false;
+ retval = readl_poll_timeout_atomic(hcd->regs + RCAR_USB3_AXH_STA,
+ val, (val & mask) == mask, 1, 1000);
+ return !retval;
}
/* This function needs to initialize a "phy" of usb before */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a741a38a4c69..167dae117f73 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3736,6 +3736,24 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
return start_frame;
}
+/* Check if we should generate event interrupt for a TD in an isoc URB */
+static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
+{
+ if (xhci->hci_version < 0x100)
+ return false;
+ /* always generate an event interrupt for the last TD */
+ if (i == num_tds - 1)
+ return false;
+ /*
+ * If AVOID_BEI is set the host handles full event rings poorly,
+ * generate an event at least every 8th TD to clear the event ring
+ */
+ if (i && xhci->quirks & XHCI_AVOID_BEI)
+ return !!(i % 8);
+
+ return true;
+}
+
/* This is for isoc transfer */
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
@@ -3843,10 +3861,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = false;
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
- /* set BEI, except for the last TD */
- if (xhci->hci_version >= 0x100 &&
- !(xhci->quirks & XHCI_AVOID_BEI) &&
- i < num_tds - 1)
+ if (trb_block_event_intr(xhci, num_tds, i))
field |= TRB_BEI;
}
/* Calculate TRB length */
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 190923d8b246..934be1686352 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1866,7 +1866,6 @@ static const struct tegra_xusb_phy_type tegra124_phy_types[] = {
static const unsigned int tegra124_xusb_context_ipfs[] = {
IPFS_XUSB_HOST_MSI_BAR_SZ_0,
- IPFS_XUSB_HOST_MSI_BAR_SZ_0,
IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0,
IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0,
IPFS_XUSB_HOST_MSI_VEC0_0,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f4cedcaee14b..d4a8d0efbbc4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -982,12 +982,15 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
xhci->shared_hcd->state != HC_STATE_SUSPENDED)
return -EINVAL;
- xhci_dbc_suspend(xhci);
-
/* Clear root port wake on bits if wakeup not allowed. */
if (!do_wakeup)
xhci_disable_port_wake_on_bits(xhci);
+ if (!HCD_HW_ACCESSIBLE(hcd))
+ return 0;
+
+ xhci_dbc_suspend(xhci);
+
/* Don't poll the roothubs on bus suspend. */
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -1915,8 +1918,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
trace_xhci_add_endpoint(ep_ctx);
- xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
-
xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
(unsigned int) ep->desc.bEndpointAddress,
udev->slot_id,
@@ -2949,6 +2950,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
+ xhci_debugfs_create_endpoint(xhci, virt_dev, i);
}
command_cleanup:
kfree(command->completion);
@@ -3535,6 +3537,10 @@ static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
xhci_free_command(xhci, config_cmd);
spin_unlock_irqrestore(&xhci->lock, flags);
+ for (i = 0; i < num_eps; i++) {
+ ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
+ }
/* Subtract 1 for stream 0, which drivers can't use */
return num_streams - 1;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ea1754f185a2..ebb359ebb261 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1770,6 +1770,8 @@ struct xhci_hcd {
/* optional clocks */
struct clk *clk;
struct clk *reg_clk;
+ /* optional reset controller */
+ struct reset_control *reset;
/* data structures */
struct xhci_device_context_array *dcbaa;
struct xhci_ring *cmd_ring;
@@ -1874,6 +1876,8 @@ struct xhci_hcd {
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
#define XHCI_RENESAS_FW_QUIRK BIT_ULL(36)
+#define XHCI_SKIP_PHY_INIT BIT_ULL(37)
+#define XHCI_DISABLE_SPARSE BIT_ULL(38)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 360416680e82..59b02a539963 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -389,7 +389,7 @@ void mts_int_submit_urb (struct urb* transfer,
res = usb_submit_urb( transfer, GFP_ATOMIC );
if ( unlikely(res) ) {
MTS_INT_ERROR( "could not submit URB! Error was %d\n",(int)res );
- context->srb->result = DID_ERROR << 16;
+ set_host_byte(context->srb, DID_ERROR);
mts_transfer_cleanup(transfer);
}
}
@@ -438,7 +438,7 @@ static void mts_data_done( struct urb* transfer )
scsi_set_resid(context->srb, context->data_length -
transfer->actual_length);
} else if ( unlikely(status) ) {
- context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16;
+ set_host_byte(context->srb, (status == -ENOENT ? DID_ABORT : DID_ERROR));
}
mts_get_status(transfer);
@@ -455,12 +455,12 @@ static void mts_command_done( struct urb *transfer )
if (status == -ENOENT) {
/* We are being killed */
MTS_DEBUG_GOT_HERE();
- context->srb->result = DID_ABORT<<16;
+ set_host_byte(context->srb, DID_ABORT);
} else {
/* A genuine error has occurred */
MTS_DEBUG_GOT_HERE();
- context->srb->result = DID_ERROR<<16;
+ set_host_byte(context->srb, DID_ERROR);
}
mts_transfer_cleanup(transfer);
@@ -495,7 +495,7 @@ static void mts_do_sg (struct urb* transfer)
scsi_sg_count(context->srb));
if (unlikely(status)) {
- context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16;
+ set_host_byte(context->srb, (status == -ENOENT ? DID_ABORT : DID_ERROR));
mts_transfer_cleanup(transfer);
}
@@ -578,7 +578,7 @@ mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback
MTS_DEBUG("this device doesn't exist\n");
- srb->result = DID_BAD_TARGET << 16;
+ set_host_byte(srb, DID_BAD_TARGET);
if(likely(callback != NULL))
callback(srb);
@@ -605,7 +605,7 @@ mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback
if(unlikely(res)){
MTS_ERROR("error %d submitting URB\n",(int)res);
- srb->result = DID_ERROR << 16;
+ set_host_byte(srb, DID_ERROR);
if(likely(callback != NULL))
callback(srb);
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index dd74ab7a2f9c..33ae656c4b68 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -22,6 +22,7 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/mm.h>
#include <linux/timer.h>
#include <asm/unaligned.h>
@@ -380,18 +381,15 @@ static int handshake(struct usb_hcd *hcd, u32 reg,
u32 mask, u32 done, int usec)
{
u32 result;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(hcd->regs + reg, result,
+ ((result & mask) == done ||
+ result == U32_MAX), 1, usec);
+ if (result == U32_MAX)
+ return -ENODEV;
- do {
- result = reg_read32(hcd->regs, reg);
- if (result == ~0)
- return -ENODEV;
- result &= mask;
- if (result == done)
- return 0;
- udelay(1);
- usec--;
- } while (usec > 0);
- return -ETIMEDOUT;
+ return ret;
}
/* reset a non-running (STS_HALT == 1) controller */
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index a7eefe11f31a..45a387979935 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -209,6 +209,7 @@ static void adu_interrupt_out_callback(struct urb *urb)
if (status != 0) {
if ((status != -ENOENT) &&
+ (status != -ESHUTDOWN) &&
(status != -ECONNRESET)) {
dev_dbg(&dev->udev->dev,
"%s :nonzero status received: %d\n", __func__,
diff --git a/drivers/usb/misc/apple-mfi-fastcharge.c b/drivers/usb/misc/apple-mfi-fastcharge.c
index b403094a6b3a..9de0171b5177 100644
--- a/drivers/usb/misc/apple-mfi-fastcharge.c
+++ b/drivers/usb/misc/apple-mfi-fastcharge.c
@@ -120,8 +120,10 @@ static int apple_mfi_fc_set_property(struct power_supply *psy,
dev_dbg(&mfi->udev->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(&mfi->udev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(&mfi->udev->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
@@ -163,17 +165,23 @@ static const struct power_supply_desc apple_mfi_fc_desc = {
.property_is_writeable = apple_mfi_fc_property_is_writeable
};
+static bool mfi_fc_match(struct usb_device *udev)
+{
+ int idProduct;
+
+ idProduct = le16_to_cpu(udev->descriptor.idProduct);
+ /* See comment above mfi_fc_id_table[] */
+ return (idProduct >= 0x1200 && idProduct <= 0x12ff);
+}
+
static int mfi_fc_probe(struct usb_device *udev)
{
struct power_supply_config battery_cfg = {};
struct mfi_device *mfi = NULL;
- int err, idProduct;
+ int err;
- idProduct = le16_to_cpu(udev->descriptor.idProduct);
- /* See comment above mfi_fc_id_table[] */
- if (idProduct < 0x1200 || idProduct > 0x12ff) {
+ if (!mfi_fc_match(udev))
return -ENODEV;
- }
mfi = kzalloc(sizeof(struct mfi_device), GFP_KERNEL);
if (!mfi) {
@@ -220,6 +228,7 @@ static struct usb_device_driver mfi_fc_driver = {
.probe = mfi_fc_probe,
.disconnect = mfi_fc_disconnect,
.id_table = mfi_fc_id_table,
+ .match = mfi_fc_match,
.generic_subclass = 1,
};
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 36fed1a09666..c8098e9b432e 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -342,20 +342,8 @@ static struct usb_driver appledisplay_driver = {
.disconnect = appledisplay_disconnect,
.id_table = appledisplay_table,
};
-
-static int __init appledisplay_init(void)
-{
- return usb_register(&appledisplay_driver);
-}
-
-static void __exit appledisplay_exit(void)
-{
- usb_deregister(&appledisplay_driver);
-}
+module_usb_driver(appledisplay_driver);
MODULE_AUTHOR("Michael Hanselmann");
MODULE_DESCRIPTION("Apple Cinema Display driver");
MODULE_LICENSE("GPL");
-
-module_init(appledisplay_init);
-module_exit(appledisplay_exit);
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index f922544056de..ba655b4af4fc 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -308,15 +308,9 @@ static int tower_open(struct inode *inode, struct file *file)
int subminor;
int retval = 0;
struct usb_interface *interface;
- struct tower_reset_reply *reset_reply;
+ struct tower_reset_reply reset_reply;
int result;
- reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL);
- if (!reset_reply) {
- retval = -ENOMEM;
- goto exit;
- }
-
nonseekable_open(inode, file);
subminor = iminor(inode);
@@ -347,15 +341,12 @@ static int tower_open(struct inode *inode, struct file *file)
}
/* reset the tower */
- result = usb_control_msg(dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- LEGO_USB_TOWER_REQUEST_RESET,
- USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- reset_reply,
- sizeof(*reset_reply),
- 1000);
+ result = usb_control_msg_recv(dev->udev, 0,
+ LEGO_USB_TOWER_REQUEST_RESET,
+ USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
+ 0, 0,
+ &reset_reply, sizeof(reset_reply), 1000,
+ GFP_KERNEL);
if (result < 0) {
dev_err(&dev->udev->dev,
"LEGO USB Tower reset control request failed\n");
@@ -394,7 +385,6 @@ unlock_exit:
mutex_unlock(&dev->lock);
exit:
- kfree(reset_reply);
return retval;
}
@@ -753,7 +743,7 @@ static int tower_probe(struct usb_interface *interface, const struct usb_device_
struct device *idev = &interface->dev;
struct usb_device *udev = interface_to_usbdev(interface);
struct lego_usb_tower *dev;
- struct tower_get_version_reply *get_version_reply = NULL;
+ struct tower_get_version_reply get_version_reply;
int retval = -ENOMEM;
int result;
@@ -798,34 +788,25 @@ static int tower_probe(struct usb_interface *interface, const struct usb_device_
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
- get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL);
- if (!get_version_reply) {
- retval = -ENOMEM;
- goto error;
- }
-
/* get the firmware version and log it */
- result = usb_control_msg(udev,
- usb_rcvctrlpipe(udev, 0),
- LEGO_USB_TOWER_REQUEST_GET_VERSION,
- USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- get_version_reply,
- sizeof(*get_version_reply),
- 1000);
- if (result != sizeof(*get_version_reply)) {
- if (result >= 0)
- result = -EIO;
+ result = usb_control_msg_recv(udev, 0,
+ LEGO_USB_TOWER_REQUEST_GET_VERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
+ 0,
+ 0,
+ &get_version_reply,
+ sizeof(get_version_reply),
+ 1000, GFP_KERNEL);
+ if (!result) {
dev_err(idev, "get version request failed: %d\n", result);
retval = result;
goto error;
}
dev_info(&interface->dev,
"LEGO USB Tower firmware version is %d.%d build %d\n",
- get_version_reply->major,
- get_version_reply->minor,
- le16_to_cpu(get_version_reply->build_no));
+ get_version_reply.major,
+ get_version_reply.minor,
+ le16_to_cpu(get_version_reply.build_no));
/* we can register the device now, as it is ready */
usb_set_intfdata(interface, dev);
@@ -844,11 +825,9 @@ static int tower_probe(struct usb_interface *interface, const struct usb_device_
USB_MAJOR, dev->minor);
exit:
- kfree(get_version_reply);
return retval;
error:
- kfree(get_version_reply);
tower_delete(dev);
return retval;
}
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 116bd789e568..48099c6bf04c 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -322,8 +322,7 @@ static int usb3503_platform_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int usb3503_suspend(struct usb3503 *hub)
+static int __maybe_unused usb3503_suspend(struct usb3503 *hub)
{
usb3503_switch_mode(hub, USB3503_MODE_STANDBY);
clk_disable_unprepare(hub->clk);
@@ -331,7 +330,7 @@ static int usb3503_suspend(struct usb3503 *hub)
return 0;
}
-static int usb3503_resume(struct usb3503 *hub)
+static int __maybe_unused usb3503_resume(struct usb3503 *hub)
{
clk_prepare_enable(hub->clk);
usb3503_switch_mode(hub, hub->mode);
@@ -339,30 +338,29 @@ static int usb3503_resume(struct usb3503 *hub)
return 0;
}
-static int usb3503_i2c_suspend(struct device *dev)
+static int __maybe_unused usb3503_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
return usb3503_suspend(i2c_get_clientdata(client));
}
-static int usb3503_i2c_resume(struct device *dev)
+static int __maybe_unused usb3503_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
return usb3503_resume(i2c_get_clientdata(client));
}
-static int usb3503_platform_suspend(struct device *dev)
+static int __maybe_unused usb3503_platform_suspend(struct device *dev)
{
return usb3503_suspend(dev_get_drvdata(dev));
}
-static int usb3503_platform_resume(struct device *dev)
+static int __maybe_unused usb3503_platform_resume(struct device *dev)
{
return usb3503_resume(dev_get_drvdata(dev));
}
-#endif
static SIMPLE_DEV_PM_OPS(usb3503_i2c_pm_ops, usb3503_i2c_suspend,
usb3503_i2c_resume);
@@ -388,7 +386,7 @@ MODULE_DEVICE_TABLE(of, usb3503_of_match);
static struct i2c_driver usb3503_i2c_driver = {
.driver = {
.name = USB3503_I2C_NAME,
- .pm = &usb3503_i2c_pm_ops,
+ .pm = pm_ptr(&usb3503_i2c_pm_ops),
.of_match_table = of_match_ptr(usb3503_of_match),
},
.probe = usb3503_i2c_probe,
@@ -400,7 +398,7 @@ static struct platform_driver usb3503_platform_driver = {
.driver = {
.name = USB3503_I2C_NAME,
.of_match_table = of_match_ptr(usb3503_of_match),
- .pm = &usb3503_platform_pm_ops,
+ .pm = pm_ptr(&usb3503_platform_pm_ops),
},
.probe = usb3503_platform_probe,
.remove = usb3503_platform_remove,
diff --git a/drivers/usb/misc/usb4604.c b/drivers/usb/misc/usb4604.c
index 1b4de651e697..2142af9bbdec 100644
--- a/drivers/usb/misc/usb4604.c
+++ b/drivers/usb/misc/usb4604.c
@@ -112,8 +112,7 @@ static int usb4604_i2c_probe(struct i2c_client *i2c,
return usb4604_probe(hub);
}
-#ifdef CONFIG_PM_SLEEP
-static int usb4604_i2c_suspend(struct device *dev)
+static int __maybe_unused usb4604_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct usb4604 *hub = i2c_get_clientdata(client);
@@ -123,7 +122,7 @@ static int usb4604_i2c_suspend(struct device *dev)
return 0;
}
-static int usb4604_i2c_resume(struct device *dev)
+static int __maybe_unused usb4604_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct usb4604 *hub = i2c_get_clientdata(client);
@@ -132,7 +131,6 @@ static int usb4604_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(usb4604_i2c_pm_ops, usb4604_i2c_suspend,
usb4604_i2c_resume);
@@ -154,7 +152,7 @@ MODULE_DEVICE_TABLE(of, usb4604_of_match);
static struct i2c_driver usb4604_i2c_driver = {
.driver = {
.name = "usb4604",
- .pm = &usb4604_i2c_pm_ops,
+ .pm = pm_ptr(&usb4604_i2c_pm_ops),
.of_match_table = of_match_ptr(usb4604_of_match),
},
.probe = usb4604_i2c_probe,
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 61e9e987fe4a..bb546f624a45 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -187,7 +187,6 @@ static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
default:
return -ENOTTY;
- break;
}
return 0;
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index b2e09883c7e2..e3165d79b5f6 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -96,15 +96,13 @@ static void yurex_delete(struct kref *kref)
if (dev->cntl_urb) {
usb_kill_urb(dev->cntl_urb);
kfree(dev->cntl_req);
- if (dev->cntl_buffer)
- usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
+ usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
dev->cntl_buffer, dev->cntl_urb->transfer_dma);
usb_free_urb(dev->cntl_urb);
}
if (dev->urb) {
usb_kill_urb(dev->urb);
- if (dev->int_buffer)
- usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
+ usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
dev->int_buffer, dev->urb->transfer_dma);
usb_free_urb(dev->urb);
}
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index 71f4f02c05c6..aef0a0bba25a 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -370,12 +370,6 @@ static inline struct mtu3 *gadget_to_mtu3(struct usb_gadget *g)
return container_of(g, struct mtu3, g);
}
-static inline int is_first_entry(const struct list_head *list,
- const struct list_head *head)
-{
- return list_is_last(head, list);
-}
-
static inline struct mtu3_request *to_mtu3_request(struct usb_request *req)
{
return req ? container_of(req, struct mtu3_request, request) : NULL;
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index 1de5c9a1d20a..38f17d66d5bc 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -564,6 +564,7 @@ static int mtu3_gadget_stop(struct usb_gadget *g)
spin_unlock_irqrestore(&mtu->lock, flags);
+ synchronize_irq(mtu->irq);
return 0;
}
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 30085b2be7b9..5892f3ce0cdc 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -429,10 +429,12 @@ static int dsps_musb_init(struct musb *musb)
struct platform_device *parent = to_platform_device(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
void __iomem *reg_base;
+ struct resource *r;
u32 rev, val;
int ret;
- reg_base = devm_platform_ioremap_resource_byname(parent, "control");
+ r = platform_get_resource_byname(parent, IORESOURCE_MEM, "control");
+ reg_base = devm_ioremap_resource(dev, r);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
musb->ctrl_base = reg_base;
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 44d3cb02fa76..6d7336727388 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -1024,7 +1024,7 @@ static int musb_g_ep0_halt(struct usb_ep *e, int value)
case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */
case MUSB_EP0_STAGE_RX: /* control-OUT data */
csr = musb_readw(regs, MUSB_CSR0);
- /* FALLTHROUGH */
+ fallthrough;
/* It's also OK to issue stalls during callbacks when a non-empty
* DATA stage buffer has been read (or even written).
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index aa4a3140394b..4c52ba96f17e 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -518,7 +518,7 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab,
* 3. Enable AB regulators
* 4. Enable USB phy
* 5. Reset the musb controller
- * 6. Switch the ULPI GPIO pins to fucntion mode
+ * 6. Switch the ULPI GPIO pins to function mode
* 7. Enable the musb Peripheral5 clock
* 8. Restore MUSB context
*/
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index ce767ecc0636..576d925af77c 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/uaccess.h>
#include <linux/device.h>
#include <linux/proc_fs.h>
@@ -135,8 +136,8 @@ static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
static int mv_otg_reset(struct mv_otg *mvotg)
{
- unsigned int loops;
u32 tmp;
+ int ret;
/* Stop the controller */
tmp = readl(&mvotg->op_regs->usbcmd);
@@ -146,15 +147,12 @@ static int mv_otg_reset(struct mv_otg *mvotg)
/* Reset the controller to get default values */
writel(USBCMD_CTRL_RESET, &mvotg->op_regs->usbcmd);
- loops = 500;
- while (readl(&mvotg->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
- if (loops == 0) {
- dev_err(&mvotg->pdev->dev,
- "Wait for RESET completed TIMEOUT\n");
- return -ETIMEDOUT;
- }
- loops--;
- udelay(20);
+ ret = readl_poll_timeout_atomic(&mvotg->op_regs->usbcmd, tmp,
+ (tmp & USBCMD_CTRL_RESET), 10, 10000);
+ if (ret < 0) {
+ dev_err(&mvotg->pdev->dev,
+ "Wait for RESET completed TIMEOUT\n");
+ return ret;
}
writel(0x0, &mvotg->op_regs->usbintr);
diff --git a/drivers/usb/phy/phy-ulpi-viewport.c b/drivers/usb/phy/phy-ulpi-viewport.c
index 7a14e0e3b635..0f61e328eaef 100644
--- a/drivers/usb/phy/phy-ulpi-viewport.c
+++ b/drivers/usb/phy/phy-ulpi-viewport.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/usb.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
@@ -20,16 +21,9 @@
static int ulpi_viewport_wait(void __iomem *view, u32 mask)
{
- unsigned long usec = 2000;
+ u32 val;
- while (usec--) {
- if (!(readl(view) & mask))
- return 0;
-
- udelay(1);
- }
-
- return -ETIMEDOUT;
+ return readl_poll_timeout_atomic(view, val, !(val & mask), 1, 2000);
}
static int ulpi_viewport_read(struct usb_phy *otg, u32 reg)
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 27d92af29635..97f37077b7f9 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -87,19 +87,15 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
}
EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
-static void *usb_role_switch_match(struct device_connection *con, int ep,
+static void *usb_role_switch_match(struct fwnode_handle *fwnode, const char *id,
void *data)
{
struct device *dev;
- if (con->fwnode) {
- if (con->id && !fwnode_property_present(con->fwnode, con->id))
- return NULL;
+ if (id && !fwnode_property_present(fwnode, id))
+ return NULL;
- dev = class_find_device_by_fwnode(role_class, con->fwnode);
- } else {
- dev = class_find_device_by_name(role_class, con->endpoint[ep]);
- }
+ dev = class_find_device_by_fwnode(role_class, fwnode);
return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 821970609695..2e40908963da 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -357,11 +357,12 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
struct device *dev = &port->dev;
int status = urb->status;
unsigned long flags;
+ bool resubmitted = false;
- set_bit(0, &port->write_urbs_free);
if (status) {
dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
__func__, status);
+ set_bit(0, &port->write_urbs_free);
return;
}
@@ -394,6 +395,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
goto exit;
}
+ resubmitted = true;
+
dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent);
dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled);
@@ -410,6 +413,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
exit:
spin_unlock_irqrestore(&priv->lock, flags);
+ if (!resubmitted)
+ set_bit(0, &port->write_urbs_free);
usb_serial_port_softint(port);
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 9823bb424abd..e0f4c3d9649c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1037,6 +1037,11 @@ static const struct usb_device_id id_table_combined[] = {
/* U-Blox devices */
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
+ /* FreeCalypso USB adapters */
+ { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_BUF_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ } /* Terminating entry */
};
@@ -1566,7 +1571,8 @@ static void ftdi_determine_type(struct usb_serial_port *port)
dev_dbg(&port->dev, "%s: bcdDevice = 0x%x, bNumInterfaces = %u\n", __func__,
version, interfaces);
if (interfaces > 1) {
- int inter;
+ struct usb_interface *intf = serial->interface;
+ int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
/* Multiple interfaces.*/
if (version == 0x0800) {
@@ -1581,16 +1587,15 @@ static void ftdi_determine_type(struct usb_serial_port *port)
priv->chip_type = FT2232C;
/* Determine interface code. */
- inter = serial->interface->altsetting->desc.bInterfaceNumber;
- if (inter == 0) {
+ if (ifnum == 0)
priv->interface = INTERFACE_A;
- } else if (inter == 1) {
+ else if (ifnum == 1)
priv->interface = INTERFACE_B;
- } else if (inter == 2) {
+ else if (ifnum == 2)
priv->interface = INTERFACE_C;
- } else if (inter == 3) {
+ else if (ifnum == 3)
priv->interface = INTERFACE_D;
- }
+
/* BM-type devices have a bug where bcdDevice gets set
* to 0x200 when iSerialNumber is 0. */
if (version < 0x500) {
@@ -2330,12 +2335,11 @@ static int ftdi_NDI_device_setup(struct usb_serial *serial)
*/
static int ftdi_jtag_probe(struct usb_serial *serial)
{
- struct usb_device *udev = serial->dev;
- struct usb_interface *interface = serial->interface;
+ struct usb_interface *intf = serial->interface;
+ int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
- if (interface == udev->actconfig->interface[0]) {
- dev_info(&udev->dev,
- "Ignoring serial port reserved for JTAG\n");
+ if (ifnum == 0) {
+ dev_info(&intf->dev, "Ignoring interface reserved for JTAG\n");
return -ENODEV;
}
@@ -2367,12 +2371,11 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
*/
static int ftdi_stmclite_probe(struct usb_serial *serial)
{
- struct usb_device *udev = serial->dev;
- struct usb_interface *interface = serial->interface;
+ struct usb_interface *intf = serial->interface;
+ int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
- if (interface == udev->actconfig->interface[0] ||
- interface == udev->actconfig->interface[1]) {
- dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
+ if (ifnum < 2) {
+ dev_info(&intf->dev, "Ignoring interface reserved for JTAG\n");
return -ENODEV;
}
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index b5ca17a5967a..3d47c6d72256 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -39,6 +39,13 @@
#define FTDI_LUMEL_PD12_PID 0x6002
+/*
+ * Custom USB adapters made by Falconia Partners LLC
+ * for FreeCalypso project, ID codes allocated to Falconia by FTDI.
+ */
+#define FTDI_FALCONIA_JTAG_BUF_PID 0x7150
+#define FTDI_FALCONIA_JTAG_UNBUF_PID 0x7151
+
/* Sienna Serial Interface by Secyourit GmbH */
#define FTDI_SIENNA_PID 0x8348
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 2ec4eeacebc7..5eed1078fac8 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -282,11 +282,12 @@ static void destroy_urbtracker(struct kref *kref)
* port callback had to be deferred because the disconnect mutex could not be
* obtained at the time.
*/
-static void send_deferred_urbs(unsigned long _mos_parport)
+static void send_deferred_urbs(struct tasklet_struct *t)
{
int ret_val;
unsigned long flags;
- struct mos7715_parport *mos_parport = (void *)_mos_parport;
+ struct mos7715_parport *mos_parport = from_tasklet(mos_parport, t,
+ urb_tasklet);
struct urbtracker *urbtrack, *tmp;
struct list_head *cursor, *next;
struct device *dev;
@@ -716,8 +717,7 @@ static int mos7715_parport_init(struct usb_serial *serial)
INIT_LIST_HEAD(&mos_parport->deferred_urbs);
usb_set_serial_data(serial, mos_parport); /* hijack private pointer */
mos_parport->serial = serial;
- tasklet_init(&mos_parport->urb_tasklet, send_deferred_urbs,
- (unsigned long) mos_parport);
+ tasklet_setup(&mos_parport->urb_tasklet, send_deferred_urbs);
init_completion(&mos_parport->syncmsg_compl);
/* cycle parallel port reset bit */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 0c6f160a214a..54ca85cc920d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -250,6 +250,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
+#define QUECTEL_PRODUCT_EC200T 0x6026
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -528,6 +529,7 @@ static void option_instat_callback(struct urb *urb);
/* Cellient products */
#define CELLIENT_VENDOR_ID 0x2692
#define CELLIENT_PRODUCT_MEN200 0x9005
+#define CELLIENT_PRODUCT_MPL200 0x9025
/* Hyundai Petatel Inc. products */
#define PETATEL_VENDOR_ID 0x1ff4
@@ -1116,6 +1118,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -1186,6 +1189,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff), /* Telit FT980-KS */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1198,6 +1205,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1212,6 +1221,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff), /* Telit LE910Cx (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
@@ -1982,6 +1995,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200),
+ .driver_info = RSVD(1) | RSVD(4) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 048452d8a4a4..be8067017eaa 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -100,6 +100,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD381GC_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 7d3090ee7e0c..0f681ddbfd28 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -127,6 +127,7 @@
/* Hewlett-Packard POS Pole Displays */
#define HP_VENDOR_ID 0x03f0
+#define HP_LD381GC_PRODUCT_ID 0x0183
#define HP_LM920_PRODUCT_ID 0x026b
#define HP_TD620_PRODUCT_ID 0x0956
#define HP_LD960_PRODUCT_ID 0x0b39
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c8d1ea0e6e6f..83da8236e3c8 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -243,11 +243,11 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
/* QDL mode */
/* Gobi 2000 has a single altsetting, older ones have two */
if (serial->interface->num_altsetting == 2)
- intf = &serial->interface->altsetting[1];
+ intf = usb_altnum_to_altsetting(serial->interface, 1);
else if (serial->interface->num_altsetting > 2)
goto done;
- if (intf->desc.bNumEndpoints == 2 &&
+ if (intf && intf->desc.bNumEndpoints == 2 &&
usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) &&
usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
dev_dbg(dev, "QDL port found\n");
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 89f5e33a6e6d..3c76336e43bb 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1383,7 +1383,7 @@ static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us,
ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
isd200_srb_set_bufflen(srb, 0);
} else {
- usb_stor_dbg(us, " Not removeable media, just report okay\n");
+ usb_stor_dbg(us, " Not removable media, just report okay\n");
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index e5a971b83e3f..560efd1479ba 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -92,7 +92,7 @@ static int slave_alloc (struct scsi_device *sdev)
static int slave_configure(struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
- struct device *dev = us->pusb_dev->bus->sysdev;
+ struct device *dev = sdev->host->dma_dev;
/*
* Many devices have trouble transferring more than 32KB at a time,
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 8183504e3abb..c8a577309e8f 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -279,17 +279,17 @@ static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd *
switch (response_code) {
case RC_INCORRECT_LUN:
- cmnd->result = DID_BAD_TARGET << 16;
+ set_host_byte(cmnd, DID_BAD_TARGET);
break;
case RC_TMF_SUCCEEDED:
- cmnd->result = DID_OK << 16;
+ set_host_byte(cmnd, DID_OK);
break;
case RC_TMF_NOT_SUPPORTED:
- cmnd->result = DID_TARGET_FAILURE << 16;
+ set_host_byte(cmnd, DID_TARGET_FAILURE);
break;
default:
uas_log_cmd_state(cmnd, "response iu", response_code);
- cmnd->result = DID_ERROR << 16;
+ set_host_byte(cmnd, DID_ERROR);
break;
}
@@ -660,7 +660,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->resetting) {
- cmnd->result = DID_ERROR << 16;
+ set_host_byte(cmnd, DID_ERROR);
cmnd->scsi_done(cmnd);
goto zombie;
}
@@ -704,7 +704,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
* of queueing, no matter how fatal the error
*/
if (err == -ENODEV) {
- cmnd->result = DID_ERROR << 16;
+ set_host_byte(cmnd, DID_ERROR);
cmnd->scsi_done(cmnd);
goto zombie;
}
@@ -837,17 +837,24 @@ static int uas_slave_alloc(struct scsi_device *sdev)
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
- if (devinfo->flags & US_FL_MAX_SECTORS_64)
- blk_queue_max_hw_sectors(sdev->request_queue, 64);
- else if (devinfo->flags & US_FL_MAX_SECTORS_240)
- blk_queue_max_hw_sectors(sdev->request_queue, 240);
-
return 0;
}
static int uas_slave_configure(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo = sdev->hostdata;
+ struct device *dev = sdev->host->dma_dev;
+
+ if (devinfo->flags & US_FL_MAX_SECTORS_64)
+ blk_queue_max_hw_sectors(sdev->request_queue, 64);
+ else if (devinfo->flags & US_FL_MAX_SECTORS_240)
+ blk_queue_max_hw_sectors(sdev->request_queue, 240);
+ else if (devinfo->udev->speed >= USB_SPEED_SUPER)
+ blk_queue_max_hw_sectors(sdev->request_queue, 2048);
+
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
+ dma_max_mapping_size(dev) >> SECTOR_SHIFT));
if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
sdev->no_report_opcodes = 1;
@@ -1033,7 +1040,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
shost->can_queue = devinfo->qdepth - 2;
usb_set_intfdata(intf, shost);
- result = scsi_add_host(shost, &intf->dev);
+ result = scsi_add_host_with_dma(shost, &intf->dev, udev->bus->sysdev);
if (result)
goto free_streams;
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 94a64729dc27..c2ef367cf257 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -1049,8 +1049,9 @@ int usb_stor_probe2(struct us_data *us)
goto BadDevice;
usb_autopm_get_interface_no_resume(us->pusb_intf);
snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s",
- dev_name(&us->pusb_intf->dev));
- result = scsi_add_host(us_to_host(us), dev);
+ dev_name(dev));
+ result = scsi_add_host_with_dma(us_to_host(us), dev,
+ us->pusb_dev->bus->sysdev);
if (result) {
dev_warn(dev,
"Unable to add the scsi host\n");
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 559dd06117e7..6c5908a37ee8 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -73,6 +73,30 @@ config TYPEC_TPS6598X
If you choose to build this driver as a dynamically linked module, the
module will be called tps6598x.ko.
+config TYPEC_STUSB160X
+ tristate "STMicroelectronics STUSB160x Type-C controller driver"
+ depends on I2C
+ depends on REGMAP_I2C
+ depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
+ help
+ Say Y or M here if your system has STMicroelectronics STUSB160x
+ Type-C port controller.
+
+ If you choose to build this driver as a dynamically linked module, the
+ module will be called stusb160x.ko.
+
+config TYPEC_QCOM_PMIC
+ tristate "Qualcomm PMIC USB Type-C driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Driver for supporting role switch over the Qualcomm PMIC. This will
+ handle the USB Type-C role and orientation detection reported by the
+ QCOM PMIC if the PMIC has the capability to handle USB Type-C
+ detection.
+
+ It will also enable the VBUS output to connected devices when a
+ DFP connection is made.
+
source "drivers/usb/typec/mux/Kconfig"
source "drivers/usb/typec/altmodes/Kconfig"
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index 7753a5c3cd46..d03b48c4b864 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -6,4 +6,6 @@ obj-$(CONFIG_TYPEC_TCPM) += tcpm/
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
obj-$(CONFIG_TYPEC_HD3SS3220) += hd3ss3220.o
obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o
+obj-$(CONFIG_TYPEC_QCOM_PMIC) += qcom-pmic-typec.o
+obj-$(CONFIG_TYPEC_STUSB160X) += stusb160x.o
obj-$(CONFIG_TYPEC) += mux/
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 7b20073d7fc0..e62e5e3da01e 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -190,7 +190,7 @@ static void dp_altmode_work(struct work_struct *work)
switch (dp->state) {
case DP_STATE_ENTER:
ret = typec_altmode_enter(dp->alt, NULL);
- if (ret)
+ if (ret && ret != -EBUSY)
dev_err(&dp->alt->dev, "failed to enter mode\n");
break;
case DP_STATE_UPDATE:
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 02655694f200..35eec707cb51 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1449,6 +1449,21 @@ void typec_set_pwr_opmode(struct typec_port *port,
EXPORT_SYMBOL_GPL(typec_set_pwr_opmode);
/**
+ * typec_find_pwr_opmode - Get the typec power operation mode capability
+ * @name: power operation mode string
+ *
+ * This routine is used to find the typec_pwr_opmode by its string @name.
+ *
+ * Returns typec_pwr_opmode if success, otherwise negative error code.
+ */
+int typec_find_pwr_opmode(const char *name)
+{
+ return match_string(typec_pwr_opmodes,
+ ARRAY_SIZE(typec_pwr_opmodes), name);
+}
+EXPORT_SYMBOL_GPL(typec_find_pwr_opmode);
+
+/**
* typec_find_orientation - Convert orientation string to enum typec_orientation
* @name: Orientation string
*
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
index 323dfa8160ab..f633ec15b1a1 100644
--- a/drivers/usb/typec/hd3ss3220.c
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -155,7 +155,7 @@ static int hd3ss3220_probe(struct i2c_client *client,
{
struct typec_capability typec_cap = { };
struct hd3ss3220 *hd3ss3220;
- struct fwnode_handle *connector;
+ struct fwnode_handle *connector, *ep;
int ret;
unsigned int data;
@@ -173,11 +173,21 @@ static int hd3ss3220_probe(struct i2c_client *client,
hd3ss3220_set_source_pref(hd3ss3220,
HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
+ /* For backward compatibility check the connector child node first */
connector = device_get_named_child_node(hd3ss3220->dev, "connector");
- if (!connector)
- return -ENODEV;
+ if (connector) {
+ hd3ss3220->role_sw = fwnode_usb_role_switch_get(connector);
+ } else {
+ ep = fwnode_graph_get_next_endpoint(dev_fwnode(hd3ss3220->dev), NULL);
+ if (!ep)
+ return -ENODEV;
+ connector = fwnode_graph_get_remote_port_parent(ep);
+ fwnode_handle_put(ep);
+ if (!connector)
+ return -ENODEV;
+ hd3ss3220->role_sw = usb_role_switch_get(hd3ss3220->dev);
+ }
- hd3ss3220->role_sw = fwnode_usb_role_switch_get(connector);
if (IS_ERR(hd3ss3220->role_sw)) {
ret = PTR_ERR(hd3ss3220->role_sw);
goto err_put_fwnode;
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 52ad277e4565..cf720e944aaa 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -34,15 +34,15 @@ static int switch_fwnode_match(struct device *dev, const void *fwnode)
return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-switch");
}
-static void *typec_switch_match(struct device_connection *con, int ep,
+static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
void *data)
{
struct device *dev;
- if (con->id && !fwnode_property_present(con->fwnode, con->id))
+ if (id && !fwnode_property_present(fwnode, id))
return NULL;
- dev = class_find_device(&typec_mux_class, NULL, con->fwnode,
+ dev = class_find_device(&typec_mux_class, NULL, fwnode,
switch_fwnode_match);
return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
@@ -71,7 +71,7 @@ struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_typec_switch_get);
/**
- * typec_put_switch - Release USB Type-C orientation switch
+ * typec_switch_put - Release USB Type-C orientation switch
* @sw: USB Type-C orientation switch
*
* Decrement reference count for @sw.
@@ -183,7 +183,8 @@ static int mux_fwnode_match(struct device *dev, const void *fwnode)
return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-mux");
}
-static void *typec_mux_match(struct device_connection *con, int ep, void *data)
+static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
+ void *data)
{
const struct typec_altmode_desc *desc = data;
struct device *dev;
@@ -196,20 +197,20 @@ static void *typec_mux_match(struct device_connection *con, int ep, void *data)
* Check has the identifier already been "consumed". If it
* has, no need to do any extra connection identification.
*/
- match = !con->id;
+ match = !id;
if (match)
goto find_mux;
/* Accessory Mode muxes */
if (!desc) {
- match = fwnode_property_present(con->fwnode, "accessory");
+ match = fwnode_property_present(fwnode, "accessory");
if (match)
goto find_mux;
return NULL;
}
/* Alternate Mode muxes */
- nval = fwnode_property_count_u16(con->fwnode, "svid");
+ nval = fwnode_property_count_u16(fwnode, "svid");
if (nval <= 0)
return NULL;
@@ -217,7 +218,7 @@ static void *typec_mux_match(struct device_connection *con, int ep, void *data)
if (!val)
return ERR_PTR(-ENOMEM);
- nval = fwnode_property_read_u16_array(con->fwnode, "svid", val, nval);
+ nval = fwnode_property_read_u16_array(fwnode, "svid", val, nval);
if (nval < 0) {
kfree(val);
return ERR_PTR(nval);
@@ -234,7 +235,7 @@ static void *typec_mux_match(struct device_connection *con, int ep, void *data)
return NULL;
find_mux:
- dev = class_find_device(&typec_mux_class, NULL, con->fwnode,
+ dev = class_find_device(&typec_mux_class, NULL, fwnode,
mux_fwnode_match);
return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index a4dbd11f8ee2..edead555835e 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -11,6 +11,7 @@ config TYPEC_MUX_PI3USB30532
config TYPEC_MUX_INTEL_PMC
tristate "Intel PMC mux control"
+ depends on ACPI
depends on INTEL_SCU_IPC
select USB_ROLE_SWITCH
help
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 676b525c2a66..d7f63b74c6b1 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -80,10 +80,48 @@ enum {
#define PMC_USB_DP_HPD_LVL BIT(4)
#define PMC_USB_DP_HPD_IRQ BIT(5)
+/*
+ * Input Output Manager (IOM) PORT STATUS
+ */
+#define IOM_PORT_STATUS_OFFSET 0x560
+
+#define IOM_PORT_STATUS_ACTIVITY_TYPE_MASK GENMASK(9, 6)
+#define IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT 6
+#define IOM_PORT_STATUS_ACTIVITY_TYPE_USB 0x03
+/* activity type: Safe Mode */
+#define IOM_PORT_STATUS_ACTIVITY_TYPE_SAFE_MODE 0x04
+/* activity type: Display Port */
+#define IOM_PORT_STATUS_ACTIVITY_TYPE_DP 0x05
+/* activity type: Display Port Multi Function Device */
+#define IOM_PORT_STATUS_ACTIVITY_TYPE_DP_MFD 0x06
+/* activity type: Thunderbolt */
+#define IOM_PORT_STATUS_ACTIVITY_TYPE_TBT 0x07
+#define IOM_PORT_STATUS_ACTIVITY_TYPE_ALT_MODE_USB 0x0c
+#define IOM_PORT_STATUS_ACTIVITY_TYPE_ALT_MODE_TBT_USB 0x0d
+/* Upstream Facing Port Information */
+#define IOM_PORT_STATUS_UFP BIT(10)
+/* Display Port Hot Plug Detect status */
+#define IOM_PORT_STATUS_DHPD_HPD_STATUS_MASK GENMASK(13, 12)
+#define IOM_PORT_STATUS_DHPD_HPD_STATUS_SHIFT 12
+#define IOM_PORT_STATUS_DHPD_HPD_STATUS_ASSERT 0x01
+#define IOM_PORT_STATUS_DHPD_HPD_SOURCE_TBT BIT(14)
+#define IOM_PORT_STATUS_CONNECTED BIT(31)
+
+#define IOM_PORT_ACTIVITY_IS(_status_, _type_) \
+ ((((_status_) & IOM_PORT_STATUS_ACTIVITY_TYPE_MASK) >> \
+ IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT) == \
+ (IOM_PORT_STATUS_ACTIVITY_TYPE_##_type_))
+
+#define IOM_PORT_HPD_ASSERTED(_status_) \
+ ((((_status_) & IOM_PORT_STATUS_DHPD_HPD_STATUS_MASK) >> \
+ IOM_PORT_STATUS_DHPD_HPD_STATUS_SHIFT) & \
+ IOM_PORT_STATUS_DHPD_HPD_STATUS_ASSERT)
+
struct pmc_usb;
struct pmc_usb_port {
int num;
+ u32 iom_status;
struct pmc_usb *pmc;
struct typec_mux *typec_mux;
struct typec_switch *typec_sw;
@@ -104,8 +142,21 @@ struct pmc_usb {
struct device *dev;
struct intel_scu_ipc_dev *ipc;
struct pmc_usb_port *port;
+ struct acpi_device *iom_adev;
+ void __iomem *iom_base;
};
+static void update_port_status(struct pmc_usb_port *port)
+{
+ u8 port_num;
+
+ /* SoC expects the USB Type-C port numbers to start with 0 */
+ port_num = port->usb3_port - 1;
+
+ port->iom_status = readl(port->pmc->iom_base + IOM_PORT_STATUS_OFFSET +
+ port_num * sizeof(u32));
+}
+
static int sbu_orientation(struct pmc_usb_port *port)
{
if (port->sbu_orientation)
@@ -148,18 +199,17 @@ static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
}
static int
-pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_mux_state *state)
+pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
{
- struct typec_displayport_data *data = state->data;
u8 msg[2] = { };
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
- if (data->status & DP_STATUS_IRQ_HPD)
+ if (dp->status & DP_STATUS_IRQ_HPD)
msg[1] = PMC_USB_DP_HPD_IRQ;
- if (data->status & DP_STATUS_HPD_STATE)
+ if (dp->status & DP_STATUS_HPD_STATE)
msg[1] |= PMC_USB_DP_HPD_LVL;
return pmc_usb_command(port, msg, sizeof(msg));
@@ -172,8 +222,15 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
struct altmode_req req = { };
int ret;
- if (data->status & DP_STATUS_IRQ_HPD)
- return pmc_usb_mux_dp_hpd(port, state);
+ if (IOM_PORT_ACTIVITY_IS(port->iom_status, DP) ||
+ IOM_PORT_ACTIVITY_IS(port->iom_status, DP_MFD)) {
+ if (IOM_PORT_HPD_ASSERTED(port->iom_status) &&
+ (!(data->status & DP_STATUS_IRQ_HPD) &&
+ data->status & DP_STATUS_HPD_STATE))
+ return 0;
+
+ return pmc_usb_mux_dp_hpd(port, state->data);
+ }
req.usage = PMC_USB_ALT_MODE;
req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
@@ -189,8 +246,8 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
if (ret)
return ret;
- if (data->status & DP_STATUS_HPD_STATE)
- return pmc_usb_mux_dp_hpd(port, state);
+ if (data->status & (DP_STATUS_IRQ_HPD | DP_STATUS_HPD_STATE))
+ return pmc_usb_mux_dp_hpd(port, state->data);
return 0;
}
@@ -202,6 +259,10 @@ pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state)
u8 cable_speed = TBT_CABLE_SPEED(data->cable_mode);
struct altmode_req req = { };
+ if (IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) ||
+ IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB))
+ return 0;
+
req.usage = PMC_USB_ALT_MODE;
req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
req.mode_type = PMC_USB_MODE_TYPE_TBT << PMC_USB_MODE_TYPE_SHIFT;
@@ -233,6 +294,10 @@ pmc_usb_mux_usb4(struct pmc_usb_port *port, struct typec_mux_state *state)
struct altmode_req req = { };
u8 cable_speed;
+ if (IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) ||
+ IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB))
+ return 0;
+
req.usage = PMC_USB_ALT_MODE;
req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
req.mode_type = PMC_USB_MODE_TYPE_TBT << PMC_USB_MODE_TYPE_SHIFT;
@@ -267,34 +332,61 @@ static int pmc_usb_mux_safe_state(struct pmc_usb_port *port)
{
u8 msg;
+ if (IOM_PORT_ACTIVITY_IS(port->iom_status, SAFE_MODE))
+ return 0;
+
msg = PMC_USB_SAFE_MODE;
msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
return pmc_usb_command(port, &msg, sizeof(msg));
}
-static int pmc_usb_connect(struct pmc_usb_port *port)
+static int pmc_usb_disconnect(struct pmc_usb_port *port)
{
+ struct typec_displayport_data data = { };
u8 msg[2];
- msg[0] = PMC_USB_CONNECT;
+ if (!(port->iom_status & IOM_PORT_STATUS_CONNECTED))
+ return 0;
+
+ /* Clear DisplayPort HPD if it's still asserted. */
+ if (IOM_PORT_HPD_ASSERTED(port->iom_status))
+ pmc_usb_mux_dp_hpd(port, &data);
+
+ msg[0] = PMC_USB_DISCONNECT;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT;
- msg[1] |= hsl_orientation(port) << PMC_USB_MSG_ORI_HSL_SHIFT;
- msg[1] |= sbu_orientation(port) << PMC_USB_MSG_ORI_AUX_SHIFT;
return pmc_usb_command(port, msg, sizeof(msg));
}
-static int pmc_usb_disconnect(struct pmc_usb_port *port)
+static int pmc_usb_connect(struct pmc_usb_port *port, enum usb_role role)
{
+ u8 ufp = role == USB_ROLE_DEVICE ? 1 : 0;
u8 msg[2];
+ int ret;
- msg[0] = PMC_USB_DISCONNECT;
+ if (port->orientation == TYPEC_ORIENTATION_NONE)
+ return -EINVAL;
+
+ if (port->iom_status & IOM_PORT_STATUS_CONNECTED) {
+ if (port->role == role || port->role == USB_ROLE_NONE)
+ return 0;
+
+ /* Role swap */
+ ret = pmc_usb_disconnect(port);
+ if (ret)
+ return ret;
+ }
+
+ msg[0] = PMC_USB_CONNECT;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT;
+ msg[1] |= ufp << PMC_USB_MSG_UFP_SHIFT;
+ msg[1] |= hsl_orientation(port) << PMC_USB_MSG_ORI_HSL_SHIFT;
+ msg[1] |= sbu_orientation(port) << PMC_USB_MSG_ORI_AUX_SHIFT;
return pmc_usb_command(port, msg, sizeof(msg));
}
@@ -304,13 +396,15 @@ pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
{
struct pmc_usb_port *port = typec_mux_get_drvdata(mux);
+ update_port_status(port);
+
if (port->orientation == TYPEC_ORIENTATION_NONE || port->role == USB_ROLE_NONE)
return 0;
if (state->mode == TYPEC_STATE_SAFE)
return pmc_usb_mux_safe_state(port);
if (state->mode == TYPEC_STATE_USB)
- return pmc_usb_connect(port);
+ return pmc_usb_connect(port, port->role);
if (state->alt) {
switch (state->alt->svid) {
@@ -325,7 +419,7 @@ pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state)
/* REVISIT: Try with usb3_port set to 0? */
break;
case TYPEC_MODE_USB3:
- return pmc_usb_connect(port);
+ return pmc_usb_connect(port, port->role);
case TYPEC_MODE_USB4:
return pmc_usb_mux_usb4(port, state);
}
@@ -339,38 +433,28 @@ static int pmc_usb_set_orientation(struct typec_switch *sw,
{
struct pmc_usb_port *port = typec_switch_get_drvdata(sw);
- if (port->orientation == orientation)
- return 0;
+ update_port_status(port);
port->orientation = orientation;
- if (port->role) {
- if (orientation == TYPEC_ORIENTATION_NONE)
- return pmc_usb_disconnect(port);
- else
- return pmc_usb_connect(port);
- }
-
return 0;
}
static int pmc_usb_set_role(struct usb_role_switch *sw, enum usb_role role)
{
struct pmc_usb_port *port = usb_role_switch_get_drvdata(sw);
+ int ret;
- if (port->role == role)
- return 0;
+ update_port_status(port);
- port->role = role;
+ if (role == USB_ROLE_NONE)
+ ret = pmc_usb_disconnect(port);
+ else
+ ret = pmc_usb_connect(port, role);
- if (port->orientation) {
- if (role == USB_ROLE_NONE)
- return pmc_usb_disconnect(port);
- else
- return pmc_usb_connect(port);
- }
+ port->role = role;
- return 0;
+ return ret;
}
static int pmc_usb_register_port(struct pmc_usb *pmc, int index,
@@ -444,6 +528,45 @@ err_unregister_switch:
return ret;
}
+static int is_memory(struct acpi_resource *res, void *data)
+{
+ struct resource r;
+
+ return !acpi_dev_resource_memory(res, &r);
+}
+
+static int pmc_usb_probe_iom(struct pmc_usb *pmc)
+{
+ struct list_head resource_list;
+ struct resource_entry *rentry;
+ struct acpi_device *adev;
+ int ret;
+
+ adev = acpi_dev_get_first_match_dev("INTC1072", NULL, -1);
+ if (!adev)
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
+ if (ret < 0)
+ return ret;
+
+ rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
+ if (rentry)
+ pmc->iom_base = devm_ioremap_resource(pmc->dev, rentry->res);
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (!pmc->iom_base) {
+ put_device(&adev->dev);
+ return -ENOMEM;
+ }
+
+ pmc->iom_adev = adev;
+
+ return 0;
+}
+
static int pmc_usb_probe(struct platform_device *pdev)
{
struct fwnode_handle *fwnode = NULL;
@@ -458,6 +581,12 @@ static int pmc_usb_probe(struct platform_device *pdev)
device_for_each_child_node(&pdev->dev, fwnode)
pmc->num_ports++;
+ /* The IOM microcontroller has a limitation of max 4 ports. */
+ if (pmc->num_ports > 4) {
+ dev_err(&pdev->dev, "driver limited to 4 ports\n");
+ return -ERANGE;
+ }
+
pmc->port = devm_kcalloc(&pdev->dev, pmc->num_ports,
sizeof(struct pmc_usb_port), GFP_KERNEL);
if (!pmc->port)
@@ -469,6 +598,10 @@ static int pmc_usb_probe(struct platform_device *pdev)
pmc->dev = &pdev->dev;
+ ret = pmc_usb_probe_iom(pmc);
+ if (ret)
+ return ret;
+
/*
* For every physical USB connector (USB2 and USB3 combo) there is a
* child ACPI device node under the PMC mux ACPI device object.
@@ -494,6 +627,8 @@ err_remove_ports:
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
+ put_device(&pmc->iom_adev->dev);
+
return ret;
}
@@ -508,6 +643,8 @@ static int pmc_usb_remove(struct platform_device *pdev)
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
+ put_device(&pmc->iom_adev->dev);
+
return 0;
}
diff --git a/drivers/usb/typec/qcom-pmic-typec.c b/drivers/usb/typec/qcom-pmic-typec.c
new file mode 100644
index 000000000000..a0454a80c4a2
--- /dev/null
+++ b/drivers/usb/typec/qcom-pmic-typec.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/role.h>
+#include <linux/usb/typec_mux.h>
+
+#define TYPEC_MISC_STATUS 0xb
+#define CC_ATTACHED BIT(0)
+#define CC_ORIENTATION BIT(1)
+#define SNK_SRC_MODE BIT(6)
+#define TYPEC_MODE_CFG 0x44
+#define TYPEC_DISABLE_CMD BIT(0)
+#define EN_SNK_ONLY BIT(1)
+#define EN_SRC_ONLY BIT(2)
+#define TYPEC_VCONN_CONTROL 0x46
+#define VCONN_EN_SRC BIT(0)
+#define VCONN_EN_VAL BIT(1)
+#define TYPEC_EXIT_STATE_CFG 0x50
+#define SEL_SRC_UPPER_REF BIT(2)
+#define TYPEC_INTR_EN_CFG_1 0x5e
+#define TYPEC_INTR_EN_CFG_1_MASK GENMASK(7, 0)
+
+struct qcom_pmic_typec {
+ struct device *dev;
+ struct regmap *regmap;
+ u32 base;
+
+ struct typec_port *port;
+ struct usb_role_switch *role_sw;
+
+ struct regulator *vbus_reg;
+ bool vbus_enabled;
+};
+
+static void qcom_pmic_typec_enable_vbus_regulator(struct qcom_pmic_typec
+ *qcom_usb, bool enable)
+{
+ int ret;
+
+ if (enable == qcom_usb->vbus_enabled)
+ return;
+
+ if (enable) {
+ ret = regulator_enable(qcom_usb->vbus_reg);
+ if (ret)
+ return;
+ } else {
+ ret = regulator_disable(qcom_usb->vbus_reg);
+ if (ret)
+ return;
+ }
+ qcom_usb->vbus_enabled = enable;
+}
+
+static void qcom_pmic_typec_check_connection(struct qcom_pmic_typec *qcom_usb)
+{
+ enum typec_orientation orientation;
+ enum usb_role role;
+ unsigned int stat;
+ bool enable_vbus;
+
+ regmap_read(qcom_usb->regmap, qcom_usb->base + TYPEC_MISC_STATUS,
+ &stat);
+
+ if (stat & CC_ATTACHED) {
+ orientation = (stat & CC_ORIENTATION) ?
+ TYPEC_ORIENTATION_REVERSE :
+ TYPEC_ORIENTATION_NORMAL;
+ typec_set_orientation(qcom_usb->port, orientation);
+
+ role = (stat & SNK_SRC_MODE) ? USB_ROLE_HOST : USB_ROLE_DEVICE;
+ if (role == USB_ROLE_HOST)
+ enable_vbus = true;
+ else
+ enable_vbus = false;
+ } else {
+ role = USB_ROLE_NONE;
+ enable_vbus = false;
+ }
+
+ qcom_pmic_typec_enable_vbus_regulator(qcom_usb, enable_vbus);
+ usb_role_switch_set_role(qcom_usb->role_sw, role);
+}
+
+static irqreturn_t qcom_pmic_typec_interrupt(int irq, void *_qcom_usb)
+{
+ struct qcom_pmic_typec *qcom_usb = _qcom_usb;
+
+ qcom_pmic_typec_check_connection(qcom_usb);
+ return IRQ_HANDLED;
+}
+
+static void qcom_pmic_typec_typec_hw_init(struct qcom_pmic_typec *qcom_usb,
+ enum typec_port_type type)
+{
+ u8 mode = 0;
+
+ regmap_update_bits(qcom_usb->regmap,
+ qcom_usb->base + TYPEC_INTR_EN_CFG_1,
+ TYPEC_INTR_EN_CFG_1_MASK, 0);
+
+ if (type == TYPEC_PORT_SRC)
+ mode = EN_SRC_ONLY;
+ else if (type == TYPEC_PORT_SNK)
+ mode = EN_SNK_ONLY;
+
+ regmap_update_bits(qcom_usb->regmap, qcom_usb->base + TYPEC_MODE_CFG,
+ EN_SNK_ONLY | EN_SRC_ONLY, mode);
+
+ regmap_update_bits(qcom_usb->regmap,
+ qcom_usb->base + TYPEC_VCONN_CONTROL,
+ VCONN_EN_SRC | VCONN_EN_VAL, VCONN_EN_SRC);
+ regmap_update_bits(qcom_usb->regmap,
+ qcom_usb->base + TYPEC_EXIT_STATE_CFG,
+ SEL_SRC_UPPER_REF, SEL_SRC_UPPER_REF);
+}
+
+static int qcom_pmic_typec_probe(struct platform_device *pdev)
+{
+ struct qcom_pmic_typec *qcom_usb;
+ struct device *dev = &pdev->dev;
+ struct fwnode_handle *fwnode;
+ struct typec_capability cap;
+ const char *buf;
+ int ret, irq, role;
+ u32 reg;
+
+ ret = device_property_read_u32(dev, "reg", &reg);
+ if (ret < 0) {
+ dev_err(dev, "missing base address\n");
+ return ret;
+ }
+
+ qcom_usb = devm_kzalloc(dev, sizeof(*qcom_usb), GFP_KERNEL);
+ if (!qcom_usb)
+ return -ENOMEM;
+
+ qcom_usb->dev = dev;
+ qcom_usb->base = reg;
+
+ qcom_usb->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!qcom_usb->regmap) {
+ dev_err(dev, "Failed to get regmap\n");
+ return -EINVAL;
+ }
+
+ qcom_usb->vbus_reg = devm_regulator_get(qcom_usb->dev, "usb_vbus");
+ if (IS_ERR(qcom_usb->vbus_reg))
+ return PTR_ERR(qcom_usb->vbus_reg);
+
+ fwnode = device_get_named_child_node(dev, "connector");
+ if (!fwnode)
+ return -EINVAL;
+
+ ret = fwnode_property_read_string(fwnode, "power-role", &buf);
+ if (!ret) {
+ role = typec_find_port_power_role(buf);
+ if (role < 0)
+ role = TYPEC_PORT_SNK;
+ } else {
+ role = TYPEC_PORT_SNK;
+ }
+ cap.type = role;
+
+ ret = fwnode_property_read_string(fwnode, "data-role", &buf);
+ if (!ret) {
+ role = typec_find_port_data_role(buf);
+ if (role < 0)
+ role = TYPEC_PORT_UFP;
+ } else {
+ role = TYPEC_PORT_UFP;
+ }
+ cap.data = role;
+
+ cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ cap.fwnode = fwnode;
+ qcom_usb->port = typec_register_port(dev, &cap);
+ if (IS_ERR(qcom_usb->port)) {
+ ret = PTR_ERR(qcom_usb->port);
+ dev_err(dev, "Failed to register type c port %d\n", ret);
+ goto err_put_node;
+ }
+ fwnode_handle_put(fwnode);
+
+ qcom_usb->role_sw = fwnode_usb_role_switch_get(dev_fwnode(qcom_usb->dev));
+ if (IS_ERR(qcom_usb->role_sw)) {
+ if (PTR_ERR(qcom_usb->role_sw) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get role switch\n");
+ ret = PTR_ERR(qcom_usb->role_sw);
+ goto err_typec_port;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto err_usb_role_sw;
+
+ ret = devm_request_threaded_irq(qcom_usb->dev, irq, NULL,
+ qcom_pmic_typec_interrupt, IRQF_ONESHOT,
+ "qcom-pmic-typec", qcom_usb);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request IRQ\n");
+ goto err_usb_role_sw;
+ }
+
+ platform_set_drvdata(pdev, qcom_usb);
+ qcom_pmic_typec_typec_hw_init(qcom_usb, cap.type);
+ qcom_pmic_typec_check_connection(qcom_usb);
+
+ return 0;
+
+err_usb_role_sw:
+ usb_role_switch_put(qcom_usb->role_sw);
+err_typec_port:
+ typec_unregister_port(qcom_usb->port);
+err_put_node:
+ fwnode_handle_put(fwnode);
+
+ return ret;
+}
+
+static int qcom_pmic_typec_remove(struct platform_device *pdev)
+{
+ struct qcom_pmic_typec *qcom_usb = platform_get_drvdata(pdev);
+
+ usb_role_switch_set_role(qcom_usb->role_sw, USB_ROLE_NONE);
+ qcom_pmic_typec_enable_vbus_regulator(qcom_usb, 0);
+
+ typec_unregister_port(qcom_usb->port);
+ usb_role_switch_put(qcom_usb->role_sw);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pmic_typec_table[] = {
+ { .compatible = "qcom,pm8150b-usb-typec" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_pmic_typec_table);
+
+static struct platform_driver qcom_pmic_typec = {
+ .driver = {
+ .name = "qcom,pmic-typec",
+ .of_match_table = qcom_pmic_typec_table,
+ },
+ .probe = qcom_pmic_typec_probe,
+ .remove = qcom_pmic_typec_remove,
+};
+module_platform_driver(qcom_pmic_typec);
+
+MODULE_DESCRIPTION("QCOM PMIC USB type C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
new file mode 100644
index 000000000000..2a618f02f4f1
--- /dev/null
+++ b/drivers/usb/typec/stusb160x.c
@@ -0,0 +1,873 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * STMicroelectronics STUSB160x Type-C controller family driver
+ *
+ * Copyright (C) 2020, STMicroelectronics
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/role.h>
+#include <linux/usb/typec.h>
+
+#define STUSB160X_ALERT_STATUS 0x0B /* RC */
+#define STUSB160X_ALERT_STATUS_MASK_CTRL 0x0C /* RW */
+#define STUSB160X_CC_CONNECTION_STATUS_TRANS 0x0D /* RC */
+#define STUSB160X_CC_CONNECTION_STATUS 0x0E /* RO */
+#define STUSB160X_MONITORING_STATUS_TRANS 0x0F /* RC */
+#define STUSB160X_MONITORING_STATUS 0x10 /* RO */
+#define STUSB160X_CC_OPERATION_STATUS 0x11 /* RO */
+#define STUSB160X_HW_FAULT_STATUS_TRANS 0x12 /* RC */
+#define STUSB160X_HW_FAULT_STATUS 0x13 /* RO */
+#define STUSB160X_CC_CAPABILITY_CTRL 0x18 /* RW */
+#define STUSB160X_CC_VCONN_SWITCH_CTRL 0x1E /* RW */
+#define STUSB160X_VCONN_MONITORING_CTRL 0x20 /* RW */
+#define STUSB160X_VBUS_MONITORING_RANGE_CTRL 0x22 /* RW */
+#define STUSB160X_RESET_CTRL 0x23 /* RW */
+#define STUSB160X_VBUS_DISCHARGE_TIME_CTRL 0x25 /* RW */
+#define STUSB160X_VBUS_DISCHARGE_STATUS 0x26 /* RO */
+#define STUSB160X_VBUS_ENABLE_STATUS 0x27 /* RO */
+#define STUSB160X_CC_POWER_MODE_CTRL 0x28 /* RW */
+#define STUSB160X_VBUS_MONITORING_CTRL 0x2E /* RW */
+#define STUSB1600_REG_MAX 0x2F /* RO - Reserved */
+
+/* STUSB160X_ALERT_STATUS/STUSB160X_ALERT_STATUS_MASK_CTRL bitfields */
+#define STUSB160X_HW_FAULT BIT(4)
+#define STUSB160X_MONITORING BIT(5)
+#define STUSB160X_CC_CONNECTION BIT(6)
+#define STUSB160X_ALL_ALERTS GENMASK(6, 4)
+
+/* STUSB160X_CC_CONNECTION_STATUS_TRANS bitfields */
+#define STUSB160X_CC_ATTACH_TRANS BIT(0)
+
+/* STUSB160X_CC_CONNECTION_STATUS bitfields */
+#define STUSB160X_CC_ATTACH BIT(0)
+#define STUSB160X_CC_VCONN_SUPPLY BIT(1)
+#define STUSB160X_CC_DATA_ROLE(s) (!!((s) & BIT(2)))
+#define STUSB160X_CC_POWER_ROLE(s) (!!((s) & BIT(3)))
+#define STUSB160X_CC_ATTACHED_MODE GENMASK(7, 5)
+
+/* STUSB160X_MONITORING_STATUS_TRANS bitfields */
+#define STUSB160X_VCONN_PRESENCE_TRANS BIT(0)
+#define STUSB160X_VBUS_PRESENCE_TRANS BIT(1)
+#define STUSB160X_VBUS_VSAFE0V_TRANS BIT(2)
+#define STUSB160X_VBUS_VALID_TRANS BIT(3)
+
+/* STUSB160X_MONITORING_STATUS bitfields */
+#define STUSB160X_VCONN_PRESENCE BIT(0)
+#define STUSB160X_VBUS_PRESENCE BIT(1)
+#define STUSB160X_VBUS_VSAFE0V BIT(2)
+#define STUSB160X_VBUS_VALID BIT(3)
+
+/* STUSB160X_CC_OPERATION_STATUS bitfields */
+#define STUSB160X_TYPEC_FSM_STATE GENMASK(4, 0)
+#define STUSB160X_SINK_POWER_STATE GENMASK(6, 5)
+#define STUSB160X_CC_ATTACHED BIT(7)
+
+/* STUSB160X_HW_FAULT_STATUS_TRANS bitfields */
+#define STUSB160X_VCONN_SW_OVP_FAULT_TRANS BIT(0)
+#define STUSB160X_VCONN_SW_OCP_FAULT_TRANS BIT(1)
+#define STUSB160X_VCONN_SW_RVP_FAULT_TRANS BIT(2)
+#define STUSB160X_VPU_VALID_TRANS BIT(4)
+#define STUSB160X_VPU_OVP_FAULT_TRANS BIT(5)
+#define STUSB160X_THERMAL_FAULT BIT(7)
+
+/* STUSB160X_HW_FAULT_STATUS bitfields */
+#define STUSB160X_VCONN_SW_OVP_FAULT_CC2 BIT(0)
+#define STUSB160X_VCONN_SW_OVP_FAULT_CC1 BIT(1)
+#define STUSB160X_VCONN_SW_OCP_FAULT_CC2 BIT(2)
+#define STUSB160X_VCONN_SW_OCP_FAULT_CC1 BIT(3)
+#define STUSB160X_VCONN_SW_RVP_FAULT_CC2 BIT(4)
+#define STUSB160X_VCONN_SW_RVP_FAULT_CC1 BIT(5)
+#define STUSB160X_VPU_VALID BIT(6)
+#define STUSB160X_VPU_OVP_FAULT BIT(7)
+
+/* STUSB160X_CC_CAPABILITY_CTRL bitfields */
+#define STUSB160X_CC_VCONN_SUPPLY_EN BIT(0)
+#define STUSB160X_CC_VCONN_DISCHARGE_EN BIT(4)
+#define STUSB160X_CC_CURRENT_ADVERTISED GENMASK(7, 6)
+
+/* STUSB160X_VCONN_SWITCH_CTRL bitfields */
+#define STUSB160X_CC_VCONN_SWITCH_ILIM GENMASK(3, 0)
+
+/* STUSB160X_VCONN_MONITORING_CTRL bitfields */
+#define STUSB160X_VCONN_UVLO_THRESHOLD BIT(6)
+#define STUSB160X_VCONN_MONITORING_EN BIT(7)
+
+/* STUSB160X_VBUS_MONITORING_RANGE_CTRL bitfields */
+#define STUSB160X_SHIFT_LOW_VBUS_LIMIT GENMASK(3, 0)
+#define STUSB160X_SHIFT_HIGH_VBUS_LIMIT GENMASK(7, 4)
+
+/* STUSB160X_RESET_CTRL bitfields */
+#define STUSB160X_SW_RESET_EN BIT(0)
+
+/* STUSB160X_VBUS_DISCHARGE_TIME_CTRL bitfields */
+#define STUSBXX02_VBUS_DISCHARGE_TIME_TO_PDO GENMASK(3, 0)
+#define STUSB160X_VBUS_DISCHARGE_TIME_TO_0V GENMASK(7, 4)
+
+/* STUSB160X_VBUS_DISCHARGE_STATUS bitfields */
+#define STUSB160X_VBUS_DISCHARGE_EN BIT(7)
+
+/* STUSB160X_VBUS_ENABLE_STATUS bitfields */
+#define STUSB160X_VBUS_SOURCE_EN BIT(0)
+#define STUSB160X_VBUS_SINK_EN BIT(1)
+
+/* STUSB160X_CC_POWER_MODE_CTRL bitfields */
+#define STUSB160X_CC_POWER_MODE GENMASK(2, 0)
+
+/* STUSB160X_VBUS_MONITORING_CTRL bitfields */
+#define STUSB160X_VDD_UVLO_DISABLE BIT(0)
+#define STUSB160X_VBUS_VSAFE0V_THRESHOLD GENMASK(2, 1)
+#define STUSB160X_VBUS_RANGE_DISABLE BIT(4)
+#define STUSB160X_VDD_OVLO_DISABLE BIT(6)
+
+enum stusb160x_pwr_mode {
+ SOURCE_WITH_ACCESSORY,
+ SINK_WITH_ACCESSORY,
+ SINK_WITHOUT_ACCESSORY,
+ DUAL_WITH_ACCESSORY,
+ DUAL_WITH_ACCESSORY_AND_TRY_SRC,
+ DUAL_WITH_ACCESSORY_AND_TRY_SNK,
+};
+
+enum stusb160x_attached_mode {
+ NO_DEVICE_ATTACHED,
+ SINK_ATTACHED,
+ SOURCE_ATTACHED,
+ DEBUG_ACCESSORY_ATTACHED,
+ AUDIO_ACCESSORY_ATTACHED,
+};
+
+struct stusb160x {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator *vdd_supply;
+ struct regulator *vsys_supply;
+ struct regulator *vconn_supply;
+ struct regulator *main_supply;
+
+ struct typec_port *port;
+ struct typec_capability capability;
+ struct typec_partner *partner;
+
+ enum typec_port_type port_type;
+ enum typec_pwr_opmode pwr_opmode;
+ bool vbus_on;
+
+ struct usb_role_switch *role_sw;
+};
+
+static bool stusb160x_reg_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case STUSB160X_ALERT_STATUS_MASK_CTRL:
+ case STUSB160X_CC_CAPABILITY_CTRL:
+ case STUSB160X_CC_VCONN_SWITCH_CTRL:
+ case STUSB160X_VCONN_MONITORING_CTRL:
+ case STUSB160X_VBUS_MONITORING_RANGE_CTRL:
+ case STUSB160X_RESET_CTRL:
+ case STUSB160X_VBUS_DISCHARGE_TIME_CTRL:
+ case STUSB160X_CC_POWER_MODE_CTRL:
+ case STUSB160X_VBUS_MONITORING_CTRL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool stusb160x_reg_readable(struct device *dev, unsigned int reg)
+{
+ if (reg <= 0x0A ||
+ (reg >= 0x14 && reg <= 0x17) ||
+ (reg >= 0x19 && reg <= 0x1D) ||
+ (reg >= 0x29 && reg <= 0x2D) ||
+ (reg == 0x1F || reg == 0x21 || reg == 0x24 || reg == 0x2F))
+ return false;
+ else
+ return true;
+}
+
+static bool stusb160x_reg_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case STUSB160X_ALERT_STATUS:
+ case STUSB160X_CC_CONNECTION_STATUS_TRANS:
+ case STUSB160X_CC_CONNECTION_STATUS:
+ case STUSB160X_MONITORING_STATUS_TRANS:
+ case STUSB160X_MONITORING_STATUS:
+ case STUSB160X_CC_OPERATION_STATUS:
+ case STUSB160X_HW_FAULT_STATUS_TRANS:
+ case STUSB160X_HW_FAULT_STATUS:
+ case STUSB160X_VBUS_DISCHARGE_STATUS:
+ case STUSB160X_VBUS_ENABLE_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool stusb160x_reg_precious(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case STUSB160X_ALERT_STATUS:
+ case STUSB160X_CC_CONNECTION_STATUS_TRANS:
+ case STUSB160X_MONITORING_STATUS_TRANS:
+ case STUSB160X_HW_FAULT_STATUS_TRANS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config stusb1600_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .max_register = STUSB1600_REG_MAX,
+ .writeable_reg = stusb160x_reg_writeable,
+ .readable_reg = stusb160x_reg_readable,
+ .volatile_reg = stusb160x_reg_volatile,
+ .precious_reg = stusb160x_reg_precious,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static bool stusb160x_get_vconn(struct stusb160x *chip)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(chip->regmap, STUSB160X_CC_CAPABILITY_CTRL, &val);
+ if (ret) {
+ dev_err(chip->dev, "Unable to get Vconn status: %d\n", ret);
+ return false;
+ }
+
+ return !!FIELD_GET(STUSB160X_CC_VCONN_SUPPLY_EN, val);
+}
+
+static int stusb160x_set_vconn(struct stusb160x *chip, bool on)
+{
+ int ret;
+
+ /* Manage VCONN input supply */
+ if (chip->vconn_supply) {
+ if (on) {
+ ret = regulator_enable(chip->vconn_supply);
+ if (ret) {
+ dev_err(chip->dev,
+ "failed to enable vconn supply: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ regulator_disable(chip->vconn_supply);
+ }
+ }
+
+ /* Manage VCONN monitoring and power path */
+ ret = regmap_update_bits(chip->regmap, STUSB160X_VCONN_MONITORING_CTRL,
+ STUSB160X_VCONN_MONITORING_EN,
+ on ? STUSB160X_VCONN_MONITORING_EN : 0);
+ if (ret)
+ goto vconn_reg_disable;
+
+ return 0;
+
+vconn_reg_disable:
+ if (chip->vconn_supply && on)
+ regulator_disable(chip->vconn_supply);
+
+ return ret;
+}
+
+static enum typec_pwr_opmode stusb160x_get_pwr_opmode(struct stusb160x *chip)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(chip->regmap, STUSB160X_CC_CAPABILITY_CTRL, &val);
+ if (ret) {
+ dev_err(chip->dev, "Unable to get pwr opmode: %d\n", ret);
+ return TYPEC_PWR_MODE_USB;
+ }
+
+ return FIELD_GET(STUSB160X_CC_CURRENT_ADVERTISED, val);
+}
+
+static enum typec_accessory stusb160x_get_accessory(u32 status)
+{
+ enum stusb160x_attached_mode mode;
+
+ mode = FIELD_GET(STUSB160X_CC_ATTACHED_MODE, status);
+
+ switch (mode) {
+ case DEBUG_ACCESSORY_ATTACHED:
+ return TYPEC_ACCESSORY_DEBUG;
+ case AUDIO_ACCESSORY_ATTACHED:
+ return TYPEC_ACCESSORY_AUDIO;
+ default:
+ return TYPEC_ACCESSORY_NONE;
+ }
+}
+
+static enum typec_role stusb160x_get_vconn_role(u32 status)
+{
+ if (FIELD_GET(STUSB160X_CC_VCONN_SUPPLY, status))
+ return TYPEC_SOURCE;
+
+ return TYPEC_SINK;
+}
+
+static void stusb160x_set_data_role(struct stusb160x *chip,
+ enum typec_data_role data_role,
+ bool attached)
+{
+ enum usb_role usb_role = USB_ROLE_NONE;
+
+ if (attached) {
+ if (data_role == TYPEC_HOST)
+ usb_role = USB_ROLE_HOST;
+ else
+ usb_role = USB_ROLE_DEVICE;
+ }
+
+ usb_role_switch_set_role(chip->role_sw, usb_role);
+ typec_set_data_role(chip->port, data_role);
+}
+
+static int stusb160x_attach(struct stusb160x *chip, u32 status)
+{
+ struct typec_partner_desc desc;
+ int ret;
+
+ if ((STUSB160X_CC_POWER_ROLE(status) == TYPEC_SOURCE) &&
+ chip->vdd_supply) {
+ ret = regulator_enable(chip->vdd_supply);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to enable Vbus supply: %d\n", ret);
+ return ret;
+ }
+ chip->vbus_on = true;
+ }
+
+ desc.usb_pd = false;
+ desc.accessory = stusb160x_get_accessory(status);
+ desc.identity = NULL;
+
+ chip->partner = typec_register_partner(chip->port, &desc);
+ if (IS_ERR(chip->partner)) {
+ ret = PTR_ERR(chip->partner);
+ goto vbus_disable;
+ }
+
+ typec_set_pwr_role(chip->port, STUSB160X_CC_POWER_ROLE(status));
+ typec_set_pwr_opmode(chip->port, stusb160x_get_pwr_opmode(chip));
+ typec_set_vconn_role(chip->port, stusb160x_get_vconn_role(status));
+ stusb160x_set_data_role(chip, STUSB160X_CC_DATA_ROLE(status), true);
+
+ return 0;
+
+vbus_disable:
+ if (chip->vbus_on) {
+ regulator_disable(chip->vdd_supply);
+ chip->vbus_on = false;
+ }
+
+ return ret;
+}
+
+static void stusb160x_detach(struct stusb160x *chip, u32 status)
+{
+ typec_unregister_partner(chip->partner);
+ chip->partner = NULL;
+
+ typec_set_pwr_role(chip->port, STUSB160X_CC_POWER_ROLE(status));
+ typec_set_pwr_opmode(chip->port, TYPEC_PWR_MODE_USB);
+ typec_set_vconn_role(chip->port, stusb160x_get_vconn_role(status));
+ stusb160x_set_data_role(chip, STUSB160X_CC_DATA_ROLE(status), false);
+
+ if (chip->vbus_on) {
+ regulator_disable(chip->vdd_supply);
+ chip->vbus_on = false;
+ }
+}
+
+static irqreturn_t stusb160x_irq_handler(int irq, void *data)
+{
+ struct stusb160x *chip = data;
+ u32 pending, trans, status;
+ int ret;
+
+ ret = regmap_read(chip->regmap, STUSB160X_ALERT_STATUS, &pending);
+ if (ret)
+ goto err;
+
+ if (pending & STUSB160X_CC_CONNECTION) {
+ ret = regmap_read(chip->regmap,
+ STUSB160X_CC_CONNECTION_STATUS_TRANS, &trans);
+ if (ret)
+ goto err;
+ ret = regmap_read(chip->regmap,
+ STUSB160X_CC_CONNECTION_STATUS, &status);
+ if (ret)
+ goto err;
+
+ if (trans & STUSB160X_CC_ATTACH_TRANS) {
+ if (status & STUSB160X_CC_ATTACH) {
+ ret = stusb160x_attach(chip, status);
+ if (ret)
+ goto err;
+ } else {
+ stusb160x_detach(chip, status);
+ }
+ }
+ }
+err:
+ return IRQ_HANDLED;
+}
+
+static int stusb160x_irq_init(struct stusb160x *chip, int irq)
+{
+ u32 status;
+ int ret;
+
+ ret = regmap_read(chip->regmap,
+ STUSB160X_CC_CONNECTION_STATUS, &status);
+ if (ret)
+ return ret;
+
+ if (status & STUSB160X_CC_ATTACH) {
+ ret = stusb160x_attach(chip, status);
+ if (ret)
+ dev_err(chip->dev, "attach failed: %d\n", ret);
+ }
+
+ ret = devm_request_threaded_irq(chip->dev, irq, NULL,
+ stusb160x_irq_handler, IRQF_ONESHOT,
+ dev_name(chip->dev), chip);
+ if (ret)
+ goto partner_unregister;
+
+ /* Unmask CC_CONNECTION events */
+ ret = regmap_write_bits(chip->regmap, STUSB160X_ALERT_STATUS_MASK_CTRL,
+ STUSB160X_CC_CONNECTION, 0);
+ if (ret)
+ goto partner_unregister;
+
+ return 0;
+
+partner_unregister:
+ if (chip->partner) {
+ typec_unregister_partner(chip->partner);
+ chip->partner = NULL;
+ }
+
+ return ret;
+}
+
+static int stusb160x_chip_init(struct stusb160x *chip)
+{
+ u32 val;
+ int ret;
+
+ /* Change the default Type-C power mode */
+ if (chip->port_type == TYPEC_PORT_SRC)
+ ret = regmap_update_bits(chip->regmap,
+ STUSB160X_CC_POWER_MODE_CTRL,
+ STUSB160X_CC_POWER_MODE,
+ SOURCE_WITH_ACCESSORY);
+ else if (chip->port_type == TYPEC_PORT_SNK)
+ ret = regmap_update_bits(chip->regmap,
+ STUSB160X_CC_POWER_MODE_CTRL,
+ STUSB160X_CC_POWER_MODE,
+ SINK_WITH_ACCESSORY);
+ else /* (chip->port_type == TYPEC_PORT_DRP) */
+ ret = regmap_update_bits(chip->regmap,
+ STUSB160X_CC_POWER_MODE_CTRL,
+ STUSB160X_CC_POWER_MODE,
+ DUAL_WITH_ACCESSORY);
+ if (ret)
+ return ret;
+
+ if (chip->port_type == TYPEC_PORT_SNK)
+ goto skip_src;
+
+ /* Change the default Type-C Source power operation mode capability */
+ ret = regmap_update_bits(chip->regmap, STUSB160X_CC_CAPABILITY_CTRL,
+ STUSB160X_CC_CURRENT_ADVERTISED,
+ FIELD_PREP(STUSB160X_CC_CURRENT_ADVERTISED,
+ chip->pwr_opmode));
+ if (ret)
+ return ret;
+
+ /* Manage Type-C Source Vconn supply */
+ if (stusb160x_get_vconn(chip)) {
+ ret = stusb160x_set_vconn(chip, true);
+ if (ret)
+ return ret;
+ }
+
+skip_src:
+ /* Mask all events interrupts - to be unmasked with interrupt support */
+ ret = regmap_update_bits(chip->regmap, STUSB160X_ALERT_STATUS_MASK_CTRL,
+ STUSB160X_ALL_ALERTS, STUSB160X_ALL_ALERTS);
+ if (ret)
+ return ret;
+
+ /* Read status at least once to clear any stale interrupts */
+ regmap_read(chip->regmap, STUSB160X_ALERT_STATUS, &val);
+ regmap_read(chip->regmap, STUSB160X_CC_CONNECTION_STATUS_TRANS, &val);
+ regmap_read(chip->regmap, STUSB160X_MONITORING_STATUS_TRANS, &val);
+ regmap_read(chip->regmap, STUSB160X_HW_FAULT_STATUS_TRANS, &val);
+
+ return 0;
+}
+
+static int stusb160x_get_fw_caps(struct stusb160x *chip,
+ struct fwnode_handle *fwnode)
+{
+ const char *cap_str;
+ int ret;
+
+ chip->capability.fwnode = fwnode;
+
+ /*
+ * Supported port type can be configured through device tree
+ * else it is read from chip registers in stusb160x_get_caps.
+ */
+ ret = fwnode_property_read_string(fwnode, "power-role", &cap_str);
+ if (!ret) {
+ ret = typec_find_port_power_role(cap_str);
+ if (ret < 0)
+ return ret;
+ chip->port_type = ret;
+ }
+ chip->capability.type = chip->port_type;
+
+ /* Skip DRP/Source capabilities in case of Sink only */
+ if (chip->port_type == TYPEC_PORT_SNK)
+ return 0;
+
+ if (chip->port_type == TYPEC_PORT_DRP)
+ chip->capability.prefer_role = TYPEC_SINK;
+
+ /*
+ * Supported power operation mode can be configured through device tree
+ * else it is read from chip registers in stusb160x_get_caps.
+ */
+ ret = fwnode_property_read_string(fwnode, "power-opmode", &cap_str);
+ if (!ret) {
+ ret = typec_find_pwr_opmode(cap_str);
+ /* Power delivery not yet supported */
+ if (ret < 0 || ret == TYPEC_PWR_MODE_PD) {
+ dev_err(chip->dev, "bad power operation mode: %d\n", ret);
+ return -EINVAL;
+ }
+ chip->pwr_opmode = ret;
+ }
+
+ return 0;
+}
+
+static int stusb160x_get_caps(struct stusb160x *chip)
+{
+ enum typec_port_type *type = &chip->capability.type;
+ enum typec_port_data *data = &chip->capability.data;
+ enum typec_accessory *accessory = chip->capability.accessory;
+ u32 val;
+ int ret;
+
+ chip->capability.revision = USB_TYPEC_REV_1_2;
+
+ ret = regmap_read(chip->regmap, STUSB160X_CC_POWER_MODE_CTRL, &val);
+ if (ret)
+ return ret;
+
+ switch (FIELD_GET(STUSB160X_CC_POWER_MODE, val)) {
+ case SOURCE_WITH_ACCESSORY:
+ *type = TYPEC_PORT_SRC;
+ *data = TYPEC_PORT_DFP;
+ *accessory++ = TYPEC_ACCESSORY_AUDIO;
+ *accessory++ = TYPEC_ACCESSORY_DEBUG;
+ break;
+ case SINK_WITH_ACCESSORY:
+ *type = TYPEC_PORT_SNK;
+ *data = TYPEC_PORT_UFP;
+ *accessory++ = TYPEC_ACCESSORY_AUDIO;
+ *accessory++ = TYPEC_ACCESSORY_DEBUG;
+ break;
+ case SINK_WITHOUT_ACCESSORY:
+ *type = TYPEC_PORT_SNK;
+ *data = TYPEC_PORT_UFP;
+ break;
+ case DUAL_WITH_ACCESSORY:
+ case DUAL_WITH_ACCESSORY_AND_TRY_SRC:
+ case DUAL_WITH_ACCESSORY_AND_TRY_SNK:
+ *type = TYPEC_PORT_DRP;
+ *data = TYPEC_PORT_DRD;
+ *accessory++ = TYPEC_ACCESSORY_AUDIO;
+ *accessory++ = TYPEC_ACCESSORY_DEBUG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ chip->port_type = *type;
+ chip->pwr_opmode = stusb160x_get_pwr_opmode(chip);
+
+ return 0;
+}
+
+static const struct of_device_id stusb160x_of_match[] = {
+ { .compatible = "st,stusb1600", .data = &stusb1600_regmap_config},
+ {},
+};
+MODULE_DEVICE_TABLE(of, stusb160x_of_match);
+
+static int stusb160x_probe(struct i2c_client *client)
+{
+ struct stusb160x *chip;
+ const struct of_device_id *match;
+ struct regmap_config *regmap_config;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ chip = devm_kzalloc(&client->dev, sizeof(struct stusb160x), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+
+ match = i2c_of_match_device(stusb160x_of_match, client);
+ regmap_config = (struct regmap_config *)match->data;
+ chip->regmap = devm_regmap_init_i2c(client, regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ dev_err(&client->dev,
+ "Failed to allocate register map:%d\n", ret);
+ return ret;
+ }
+
+ chip->dev = &client->dev;
+
+ chip->vsys_supply = devm_regulator_get_optional(chip->dev, "vsys");
+ if (IS_ERR(chip->vsys_supply)) {
+ ret = PTR_ERR(chip->vsys_supply);
+ if (ret != -ENODEV)
+ return ret;
+ chip->vsys_supply = NULL;
+ }
+
+ chip->vdd_supply = devm_regulator_get_optional(chip->dev, "vdd");
+ if (IS_ERR(chip->vdd_supply)) {
+ ret = PTR_ERR(chip->vdd_supply);
+ if (ret != -ENODEV)
+ return ret;
+ chip->vdd_supply = NULL;
+ }
+
+ chip->vconn_supply = devm_regulator_get_optional(chip->dev, "vconn");
+ if (IS_ERR(chip->vconn_supply)) {
+ ret = PTR_ERR(chip->vconn_supply);
+ if (ret != -ENODEV)
+ return ret;
+ chip->vconn_supply = NULL;
+ }
+
+ fwnode = device_get_named_child_node(chip->dev, "connector");
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
+
+ /*
+ * When both VDD and VSYS power supplies are present, the low power
+ * supply VSYS is selected when VSYS voltage is above 3.1 V.
+ * Otherwise VDD is selected.
+ */
+ if (chip->vdd_supply &&
+ (!chip->vsys_supply ||
+ (regulator_get_voltage(chip->vsys_supply) <= 3100000)))
+ chip->main_supply = chip->vdd_supply;
+ else
+ chip->main_supply = chip->vsys_supply;
+
+ if (chip->main_supply) {
+ ret = regulator_enable(chip->main_supply);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to enable main supply: %d\n", ret);
+ goto fwnode_put;
+ }
+ }
+
+ /* Get configuration from chip */
+ ret = stusb160x_get_caps(chip);
+ if (ret) {
+ dev_err(chip->dev, "Failed to get port caps: %d\n", ret);
+ goto main_reg_disable;
+ }
+
+ /* Get optional re-configuration from device tree */
+ ret = stusb160x_get_fw_caps(chip, fwnode);
+ if (ret) {
+ dev_err(chip->dev, "Failed to get connector caps: %d\n", ret);
+ goto main_reg_disable;
+ }
+
+ ret = stusb160x_chip_init(chip);
+ if (ret) {
+ dev_err(chip->dev, "Failed to init port: %d\n", ret);
+ goto main_reg_disable;
+ }
+
+ chip->port = typec_register_port(chip->dev, &chip->capability);
+ if (IS_ERR(chip->port)) {
+ ret = PTR_ERR(chip->port);
+ goto all_reg_disable;
+ }
+
+ /*
+ * Default power operation mode initialization: will be updated upon
+ * attach/detach interrupt
+ */
+ typec_set_pwr_opmode(chip->port, chip->pwr_opmode);
+
+ if (client->irq) {
+ ret = stusb160x_irq_init(chip, client->irq);
+ if (ret)
+ goto port_unregister;
+
+ chip->role_sw = fwnode_usb_role_switch_get(fwnode);
+ if (IS_ERR(chip->role_sw)) {
+ ret = PTR_ERR(chip->role_sw);
+ if (ret != -EPROBE_DEFER)
+ dev_err(chip->dev,
+ "Failed to get usb role switch: %d\n",
+ ret);
+ goto port_unregister;
+ }
+ } else {
+ /*
+ * If Source or Dual power role, need to enable VDD supply
+ * providing Vbus if present. In case of interrupt support,
+ * VDD supply will be dynamically managed upon attach/detach
+ * interrupt.
+ */
+ if (chip->port_type != TYPEC_PORT_SNK && chip->vdd_supply) {
+ ret = regulator_enable(chip->vdd_supply);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to enable VDD supply: %d\n",
+ ret);
+ goto port_unregister;
+ }
+ chip->vbus_on = true;
+ }
+ }
+
+ fwnode_handle_put(fwnode);
+
+ return 0;
+
+port_unregister:
+ typec_unregister_port(chip->port);
+all_reg_disable:
+ if (stusb160x_get_vconn(chip))
+ stusb160x_set_vconn(chip, false);
+main_reg_disable:
+ if (chip->main_supply)
+ regulator_disable(chip->main_supply);
+fwnode_put:
+ fwnode_handle_put(fwnode);
+
+ return ret;
+}
+
+static int stusb160x_remove(struct i2c_client *client)
+{
+ struct stusb160x *chip = i2c_get_clientdata(client);
+
+ if (chip->partner) {
+ typec_unregister_partner(chip->partner);
+ chip->partner = NULL;
+ }
+
+ if (chip->vbus_on)
+ regulator_disable(chip->vdd_supply);
+
+ if (chip->role_sw)
+ usb_role_switch_put(chip->role_sw);
+
+ typec_unregister_port(chip->port);
+
+ if (stusb160x_get_vconn(chip))
+ stusb160x_set_vconn(chip, false);
+
+ if (chip->main_supply)
+ regulator_disable(chip->main_supply);
+
+ return 0;
+}
+
+static int __maybe_unused stusb160x_suspend(struct device *dev)
+{
+ struct stusb160x *chip = dev_get_drvdata(dev);
+
+ /* Mask interrupts */
+ return regmap_update_bits(chip->regmap,
+ STUSB160X_ALERT_STATUS_MASK_CTRL,
+ STUSB160X_ALL_ALERTS, STUSB160X_ALL_ALERTS);
+}
+
+static int __maybe_unused stusb160x_resume(struct device *dev)
+{
+ struct stusb160x *chip = dev_get_drvdata(dev);
+ u32 status;
+ int ret;
+
+ ret = regcache_sync(chip->regmap);
+ if (ret)
+ return ret;
+
+ /* Check if attach/detach occurred during low power */
+ ret = regmap_read(chip->regmap,
+ STUSB160X_CC_CONNECTION_STATUS, &status);
+ if (ret)
+ return ret;
+
+ if (chip->partner && !(status & STUSB160X_CC_ATTACH))
+ stusb160x_detach(chip, status);
+
+ if (!chip->partner && (status & STUSB160X_CC_ATTACH)) {
+ ret = stusb160x_attach(chip, status);
+ if (ret)
+ dev_err(chip->dev, "attach failed: %d\n", ret);
+ }
+
+ /* Unmask interrupts */
+ return regmap_write_bits(chip->regmap, STUSB160X_ALERT_STATUS_MASK_CTRL,
+ STUSB160X_CC_CONNECTION, 0);
+}
+
+static SIMPLE_DEV_PM_OPS(stusb160x_pm_ops, stusb160x_suspend, stusb160x_resume);
+
+static struct i2c_driver stusb160x_driver = {
+ .driver = {
+ .name = "stusb160x",
+ .pm = &stusb160x_pm_ops,
+ .of_match_table = stusb160x_of_match,
+ },
+ .probe_new = stusb160x_probe,
+ .remove = stusb160x_remove,
+};
+module_i2c_driver(stusb160x_driver);
+
+MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STUSB160x Type-C controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index fa3f39336246..557f392fe24d 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -27,6 +27,20 @@ config TYPEC_RT1711H
Type-C Port Controller Manager to provide USB PD and USB
Type-C functionalities.
+config TYPEC_MT6360
+ tristate "Mediatek MT6360 Type-C driver"
+ depends on MFD_MT6360
+ help
+ Mediatek MT6360 is a multi-functional IC that includes
+ USB Type-C. It works with Type-C Port Controller Manager
+ to provide USB PD and USB Type-C functionalities.
+
+config TYPEC_TCPCI_MAXIM
+ tristate "Maxim TCPCI based Type-C chip driver"
+ help
+ MAXIM TCPCI based Type-C/PD chip driver. Works with
+ with Type-C Port Controller Manager.
+
endif # TYPEC_TCPCI
config TYPEC_FUSB302
diff --git a/drivers/usb/typec/tcpm/Makefile b/drivers/usb/typec/tcpm/Makefile
index a5ff6c8eb892..7d499f3569fd 100644
--- a/drivers/usb/typec/tcpm/Makefile
+++ b/drivers/usb/typec/tcpm/Makefile
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
-obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o
-obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
-typec_wcove-y := wcove.o
-obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
-obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
+obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
+obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o
+obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
+typec_wcove-y := wcove.o
+obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
+obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
+obj-$(CONFIG_TYPEC_MT6360) += tcpci_mt6360.o
+obj-$(CONFIG_TYPEC_TCPCI_MAXIM) += tcpci_maxim.o
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index bd80e03b2b6f..f9f0af64da5f 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -38,6 +38,12 @@ struct tcpci_chip {
struct tcpci_data data;
};
+struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci)
+{
+ return tcpci->port;
+}
+EXPORT_SYMBOL_GPL(tcpci_get_tcpm_port);
+
static inline struct tcpci *tcpc_to_tcpci(struct tcpc_dev *tcpc)
{
return container_of(tcpc, struct tcpci, tcpc);
@@ -191,12 +197,47 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc,
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
unsigned int reg;
int ret;
+ enum typec_cc_status cc1, cc2;
- /* Keep the disconnect cc line open */
+ /* Obtain Rp setting from role control */
ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &reg);
if (ret < 0)
return ret;
+ ret = tcpci_get_cc(tcpc, &cc1, &cc2);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * When port has drp toggling enabled, ROLE_CONTROL would only have the initial
+ * terminations for the toggling and does not indicate the final cc
+ * terminations when ConnectionResult is 0 i.e. drp toggling stops and
+ * the connection is resolbed. Infer port role from TCPC_CC_STATUS based on the
+ * terminations seen. The port role is then used to set the cc terminations.
+ */
+ if (reg & TCPC_ROLE_CTRL_DRP) {
+ /* Disable DRP for the OPEN setting to take effect */
+ reg = reg & ~TCPC_ROLE_CTRL_DRP;
+
+ if (polarity == TYPEC_POLARITY_CC2) {
+ reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
+ /* Local port is source */
+ if (cc2 == TYPEC_CC_RD)
+ /* Role control would have the Rp setting when DRP was enabled */
+ reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT;
+ else
+ reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT;
+ } else {
+ reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
+ /* Local port is source */
+ if (cc1 == TYPEC_CC_RD)
+ /* Role control would have the Rp setting when DRP was enabled */
+ reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT;
+ else
+ reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT;
+ }
+ }
+
if (polarity == TYPEC_POLARITY_CC2)
reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT;
else
@@ -227,6 +268,22 @@ static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable)
enable ? TCPC_POWER_CTRL_VCONN_ENABLE : 0);
}
+static int tcpci_enable_frs(struct tcpc_dev *dev, bool enable)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(dev);
+ int ret;
+
+ /* To prevent disconnect during FRS, set disconnect threshold to 3.5V */
+ ret = tcpci_write16(tcpci, TCPC_VBUS_SINK_DISCONNECT_THRESH, enable ? 0 : 0x8c);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_FAST_ROLE_SWAP_EN, enable ?
+ TCPC_FAST_ROLE_SWAP_EN : 0);
+
+ return ret;
+}
+
static int tcpci_set_bist_data(struct tcpc_dev *tcpc, bool enable)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
@@ -287,6 +344,13 @@ static int tcpci_set_vbus(struct tcpc_dev *tcpc, bool source, bool sink)
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
int ret;
+ if (tcpci->data->set_vbus) {
+ ret = tcpci->data->set_vbus(tcpci, tcpci->data, source, sink);
+ /* Bypass when ret > 0 */
+ if (ret != 0)
+ return ret < 0 ? ret : 0;
+ }
+
/* Disable both source and sink first before enabling anything */
if (!source) {
@@ -330,23 +394,47 @@ static int tcpci_pd_transmit(struct tcpc_dev *tcpc,
int ret;
cnt = msg ? pd_header_cnt(header) * 4 : 0;
- ret = regmap_write(tcpci->regmap, TCPC_TX_BYTE_CNT, cnt + 2);
- if (ret < 0)
- return ret;
+ /**
+ * TCPCI spec forbids direct access of TCPC_TX_DATA.
+ * But, since some of the chipsets offer this capability,
+ * it's fair to support both.
+ */
+ if (tcpci->data->TX_BUF_BYTE_x_hidden) {
+ u8 buf[TCPC_TRANSMIT_BUFFER_MAX_LEN] = {0,};
+ u8 pos = 0;
- ret = tcpci_write16(tcpci, TCPC_TX_HDR, header);
- if (ret < 0)
- return ret;
+ /* Payload + header + TCPC_TX_BYTE_CNT */
+ buf[pos++] = cnt + 2;
+
+ if (msg)
+ memcpy(&buf[pos], &msg->header, sizeof(msg->header));
- if (cnt > 0) {
- ret = regmap_raw_write(tcpci->regmap, TCPC_TX_DATA,
- &msg->payload, cnt);
+ pos += sizeof(header);
+
+ if (cnt > 0)
+ memcpy(&buf[pos], msg->payload, cnt);
+
+ pos += cnt;
+ ret = regmap_raw_write(tcpci->regmap, TCPC_TX_BYTE_CNT, buf, pos);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = regmap_write(tcpci->regmap, TCPC_TX_BYTE_CNT, cnt + 2);
if (ret < 0)
return ret;
+
+ ret = tcpci_write16(tcpci, TCPC_TX_HDR, header);
+ if (ret < 0)
+ return ret;
+
+ if (cnt > 0) {
+ ret = regmap_raw_write(tcpci->regmap, TCPC_TX_DATA, &msg->payload, cnt);
+ if (ret < 0)
+ return ret;
+ }
}
- reg = (PD_RETRY_COUNT << TCPC_TRANSMIT_RETRY_SHIFT) |
- (type << TCPC_TRANSMIT_TYPE_SHIFT);
+ reg = (PD_RETRY_COUNT << TCPC_TRANSMIT_RETRY_SHIFT) | (type << TCPC_TRANSMIT_TYPE_SHIFT);
ret = regmap_write(tcpci->regmap, TCPC_TRANSMIT, reg);
if (ret < 0)
return ret;
@@ -539,6 +627,7 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
tcpci->tcpc.set_roles = tcpci_set_roles;
tcpci->tcpc.pd_transmit = tcpci_pd_transmit;
tcpci->tcpc.set_bist_data = tcpci_set_bist_data;
+ tcpci->tcpc.enable_frs = tcpci_enable_frs;
err = tcpci_parse_config(tcpci);
if (err < 0)
diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
index 11c36d086c86..5ef07a56d67a 100644
--- a/drivers/usb/typec/tcpm/tcpci.h
+++ b/drivers/usb/typec/tcpm/tcpci.h
@@ -16,6 +16,8 @@
#define TCPC_PD_INT_REV 0xa
#define TCPC_ALERT 0x10
+#define TCPC_ALERT_EXTND BIT(14)
+#define TCPC_ALERT_EXTENDED_STATUS BIT(13)
#define TCPC_ALERT_VBUS_DISCNCT BIT(11)
#define TCPC_ALERT_RX_BUF_OVF BIT(10)
#define TCPC_ALERT_FAULT BIT(9)
@@ -32,6 +34,13 @@
#define TCPC_ALERT_MASK 0x12
#define TCPC_POWER_STATUS_MASK 0x14
#define TCPC_FAULT_STATUS_MASK 0x15
+
+#define TCPC_EXTENDED_STATUS_MASK 0x16
+#define TCPC_EXTENDED_STATUS_MASK_VSAFE0V BIT(0)
+
+#define TCPC_ALERT_EXTENDED_MASK 0x17
+#define TCPC_SINK_FAST_ROLE_SWAP BIT(0)
+
#define TCPC_CONFIG_STD_OUTPUT 0x18
#define TCPC_TCPC_CTRL 0x19
@@ -58,6 +67,7 @@
#define TCPC_POWER_CTRL 0x1c
#define TCPC_POWER_CTRL_VCONN_ENABLE BIT(0)
+#define TCPC_FAST_ROLE_SWAP_EN BIT(7)
#define TCPC_CC_STATUS 0x1d
#define TCPC_CC_STATUS_TOGGLING BIT(5)
@@ -69,11 +79,14 @@
#define TCPC_POWER_STATUS 0x1e
#define TCPC_POWER_STATUS_UNINIT BIT(6)
+#define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4)
#define TCPC_POWER_STATUS_VBUS_DET BIT(3)
#define TCPC_POWER_STATUS_VBUS_PRES BIT(2)
#define TCPC_FAULT_STATUS 0x1f
+#define TCPC_ALERT_EXTENDED 0x21
+
#define TCPC_COMMAND 0x23
#define TCPC_CMD_WAKE_I2C 0x11
#define TCPC_CMD_DISABLE_VBUS_DETECT 0x22
@@ -104,6 +117,7 @@
#define TCPC_RX_BYTE_CNT 0x30
#define TCPC_RX_BUF_FRAME_TYPE 0x31
+#define TCPC_RX_BUF_FRAME_TYPE_SOP 0
#define TCPC_RX_HDR 0x32
#define TCPC_RX_DATA 0x34 /* through 0x4f */
@@ -123,18 +137,29 @@
#define TCPC_VBUS_VOLTAGE_ALARM_HI_CFG 0x76
#define TCPC_VBUS_VOLTAGE_ALARM_LO_CFG 0x78
+/* I2C_WRITE_BYTE_COUNT + 1 when TX_BUF_BYTE_x is only accessible I2C_WRITE_BYTE_COUNT */
+#define TCPC_TRANSMIT_BUFFER_MAX_LEN 31
+
+/*
+ * @TX_BUF_BYTE_x_hidden
+ * optional; Set when TX_BUF_BYTE_x can only be accessed through I2C_WRITE_BYTE_COUNT.
+ */
struct tcpci;
struct tcpci_data {
struct regmap *regmap;
+ unsigned char TX_BUF_BYTE_x_hidden:1;
int (*init)(struct tcpci *tcpci, struct tcpci_data *data);
int (*set_vconn)(struct tcpci *tcpci, struct tcpci_data *data,
bool enable);
int (*start_drp_toggling)(struct tcpci *tcpci, struct tcpci_data *data,
enum typec_cc_status cc);
+ int (*set_vbus)(struct tcpci *tcpci, struct tcpci_data *data, bool source, bool sink);
};
struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data);
void tcpci_unregister_port(struct tcpci *tcpci);
irqreturn_t tcpci_irq(struct tcpci *tcpci);
+struct tcpm_port;
+struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci);
#endif /* __LINUX_USB_TCPCI_H */
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim.c
new file mode 100644
index 000000000000..723d7dd38f75
--- /dev/null
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Google LLC
+ *
+ * MAXIM TCPCI based TCPC driver
+ */
+
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/tcpm.h>
+#include <linux/usb/typec.h>
+
+#include "tcpci.h"
+
+#define PD_ACTIVITY_TIMEOUT_MS 10000
+
+#define TCPC_VENDOR_ALERT 0x80
+
+#define TCPC_RECEIVE_BUFFER_COUNT_OFFSET 0
+#define TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET 1
+#define TCPC_RECEIVE_BUFFER_RX_BYTE_BUF_OFFSET 2
+
+/*
+ * LongMessage not supported, hence 32 bytes for buf to be read from RECEIVE_BUFFER.
+ * DEVICE_CAPABILITIES_2.LongMessage = 0, the value in READABLE_BYTE_COUNT reg shall be
+ * less than or equal to 31. Since, RECEIVE_BUFFER len = 31 + 1(READABLE_BYTE_COUNT).
+ */
+#define TCPC_RECEIVE_BUFFER_LEN 32
+
+#define MAX_BUCK_BOOST_SID 0x69
+#define MAX_BUCK_BOOST_OP 0xb9
+#define MAX_BUCK_BOOST_OFF 0
+#define MAX_BUCK_BOOST_SOURCE 0xa
+#define MAX_BUCK_BOOST_SINK 0x5
+
+struct max_tcpci_chip {
+ struct tcpci_data data;
+ struct tcpci *tcpci;
+ struct device *dev;
+ struct i2c_client *client;
+ struct tcpm_port *port;
+};
+
+static const struct regmap_range max_tcpci_tcpci_range[] = {
+ regmap_reg_range(0x00, 0x95)
+};
+
+const struct regmap_access_table max_tcpci_tcpci_write_table = {
+ .yes_ranges = max_tcpci_tcpci_range,
+ .n_yes_ranges = ARRAY_SIZE(max_tcpci_tcpci_range),
+};
+
+static const struct regmap_config max_tcpci_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x95,
+ .wr_table = &max_tcpci_tcpci_write_table,
+};
+
+static struct max_tcpci_chip *tdata_to_max_tcpci(struct tcpci_data *tdata)
+{
+ return container_of(tdata, struct max_tcpci_chip, data);
+}
+
+static int max_tcpci_read16(struct max_tcpci_chip *chip, unsigned int reg, u16 *val)
+{
+ return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u16));
+}
+
+static int max_tcpci_write16(struct max_tcpci_chip *chip, unsigned int reg, u16 val)
+{
+ return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u16));
+}
+
+static int max_tcpci_read8(struct max_tcpci_chip *chip, unsigned int reg, u8 *val)
+{
+ return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u8));
+}
+
+static int max_tcpci_write8(struct max_tcpci_chip *chip, unsigned int reg, u8 val)
+{
+ return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u8));
+}
+
+static void max_tcpci_init_regs(struct max_tcpci_chip *chip)
+{
+ u16 alert_mask = 0;
+ int ret;
+
+ ret = max_tcpci_write16(chip, TCPC_ALERT, 0xffff);
+ if (ret < 0) {
+ dev_err(chip->dev, "Error writing to TCPC_ALERT ret:%d\n", ret);
+ return;
+ }
+
+ ret = max_tcpci_write16(chip, TCPC_VENDOR_ALERT, 0xffff);
+ if (ret < 0) {
+ dev_err(chip->dev, "Error writing to TCPC_VENDOR_ALERT ret:%d\n", ret);
+ return;
+ }
+
+ ret = max_tcpci_write8(chip, TCPC_ALERT_EXTENDED, 0xff);
+ if (ret < 0) {
+ dev_err(chip->dev, "Unable to clear TCPC_ALERT_EXTENDED ret:%d\n", ret);
+ return;
+ }
+
+ alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_TX_FAILED |
+ TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_RX_STATUS | TCPC_ALERT_CC_STATUS |
+ TCPC_ALERT_VBUS_DISCNCT | TCPC_ALERT_RX_BUF_OVF | TCPC_ALERT_POWER_STATUS |
+ /* Enable Extended alert for detecting Fast Role Swap Signal */
+ TCPC_ALERT_EXTND;
+
+ ret = max_tcpci_write16(chip, TCPC_ALERT_MASK, alert_mask);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Error enabling TCPC_ALERT: TCPC_ALERT_MASK write failed ret:%d\n", ret);
+ return;
+ }
+
+ /* Enable vbus voltage monitoring and voltage alerts */
+ ret = max_tcpci_write8(chip, TCPC_POWER_CTRL, 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Error writing to TCPC_POWER_CTRL ret:%d\n", ret);
+ return;
+ }
+
+ ret = max_tcpci_write8(chip, TCPC_ALERT_EXTENDED_MASK, TCPC_SINK_FAST_ROLE_SWAP);
+ if (ret < 0)
+ return;
+}
+
+static void process_rx(struct max_tcpci_chip *chip, u16 status)
+{
+ struct pd_message msg;
+ u8 count, frame_type, rx_buf[TCPC_RECEIVE_BUFFER_LEN];
+ int ret, payload_index;
+ u8 *rx_buf_ptr;
+
+ /*
+ * READABLE_BYTE_COUNT: Indicates the number of bytes in the RX_BUF_BYTE_x registers
+ * plus one (for the RX_BUF_FRAME_TYPE) Table 4-36.
+ * Read the count and frame type.
+ */
+ ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, 2);
+ if (ret < 0) {
+ dev_err(chip->dev, "TCPC_RX_BYTE_CNT read failed ret:%d", ret);
+ return;
+ }
+
+ count = rx_buf[TCPC_RECEIVE_BUFFER_COUNT_OFFSET];
+ frame_type = rx_buf[TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET];
+
+ if (count == 0 || frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP) {
+ max_tcpci_write16(chip, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
+ dev_err(chip->dev, "%s", count == 0 ? "error: count is 0" :
+ "error frame_type is not SOP");
+ return;
+ }
+
+ if (count > sizeof(struct pd_message) || count + 1 > TCPC_RECEIVE_BUFFER_LEN) {
+ dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d", count);
+ return;
+ }
+
+ /*
+ * Read count + 1 as RX_BUF_BYTE_x is hidden and can only be read through
+ * TCPC_RX_BYTE_CNT
+ */
+ count += 1;
+ ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, count);
+ if (ret < 0) {
+ dev_err(chip->dev, "Error: TCPC_RX_BYTE_CNT read failed: %d", ret);
+ return;
+ }
+
+ rx_buf_ptr = rx_buf + TCPC_RECEIVE_BUFFER_RX_BYTE_BUF_OFFSET;
+ msg.header = cpu_to_le16(*(u16 *)rx_buf_ptr);
+ rx_buf_ptr = rx_buf_ptr + sizeof(msg.header);
+ for (payload_index = 0; payload_index < pd_header_cnt_le(msg.header); payload_index++,
+ rx_buf_ptr += sizeof(msg.payload[0]))
+ msg.payload[payload_index] = cpu_to_le32(*(u32 *)rx_buf_ptr);
+
+ /*
+ * Read complete, clear RX status alert bit.
+ * Clear overflow as well if set.
+ */
+ ret = max_tcpci_write16(chip, TCPC_ALERT, status & TCPC_ALERT_RX_BUF_OVF ?
+ TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF :
+ TCPC_ALERT_RX_STATUS);
+ if (ret < 0)
+ return;
+
+ tcpm_pd_receive(chip->port, &msg);
+}
+
+static int max_tcpci_set_vbus(struct tcpci *tcpci, struct tcpci_data *tdata, bool source, bool sink)
+{
+ struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
+ u8 buffer_source[2] = {MAX_BUCK_BOOST_OP, MAX_BUCK_BOOST_SOURCE};
+ u8 buffer_sink[2] = {MAX_BUCK_BOOST_OP, MAX_BUCK_BOOST_SINK};
+ u8 buffer_none[2] = {MAX_BUCK_BOOST_OP, MAX_BUCK_BOOST_OFF};
+ struct i2c_client *i2c = chip->client;
+ int ret;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = MAX_BUCK_BOOST_SID,
+ .flags = i2c->flags & I2C_M_TEN,
+ .len = 2,
+ .buf = source ? buffer_source : sink ? buffer_sink : buffer_none,
+ },
+ };
+
+ if (source && sink) {
+ dev_err(chip->dev, "Both source and sink set\n");
+ return -EINVAL;
+ }
+
+ ret = i2c_transfer(i2c->adapter, msgs, 1);
+
+ return ret < 0 ? ret : 1;
+}
+
+static void process_power_status(struct max_tcpci_chip *chip)
+{
+ u8 pwr_status;
+ int ret;
+
+ ret = max_tcpci_read8(chip, TCPC_POWER_STATUS, &pwr_status);
+ if (ret < 0)
+ return;
+
+ if (pwr_status == 0xff) {
+ max_tcpci_init_regs(chip);
+ } else if (pwr_status & TCPC_POWER_STATUS_SOURCING_VBUS) {
+ tcpm_sourcing_vbus(chip->port);
+ /*
+ * Alawys re-enable boost here.
+ * In normal case, when say an headset is attached, TCPM would
+ * have instructed to TCPC to enable boost, so the call is a
+ * no-op.
+ * But for Fast Role Swap case, Boost turns on autonomously without
+ * AP intervention, but, needs AP to enable source mode explicitly
+ * for AP to regain control.
+ */
+ max_tcpci_set_vbus(chip->tcpci, &chip->data, true, false);
+ } else {
+ tcpm_vbus_change(chip->port);
+ }
+}
+
+static void process_tx(struct max_tcpci_chip *chip, u16 status)
+{
+ if (status & TCPC_ALERT_TX_SUCCESS)
+ tcpm_pd_transmit_complete(chip->port, TCPC_TX_SUCCESS);
+ else if (status & TCPC_ALERT_TX_DISCARDED)
+ tcpm_pd_transmit_complete(chip->port, TCPC_TX_DISCARDED);
+ else if (status & TCPC_ALERT_TX_FAILED)
+ tcpm_pd_transmit_complete(chip->port, TCPC_TX_FAILED);
+
+ /* Reinit regs as Hard reset sets them to default value */
+ if ((status & TCPC_ALERT_TX_SUCCESS) && (status & TCPC_ALERT_TX_FAILED))
+ max_tcpci_init_regs(chip);
+}
+
+static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
+{
+ u16 mask;
+ int ret;
+ u8 reg_status;
+
+ /*
+ * Clear alert status for everything except RX_STATUS, which shouldn't
+ * be cleared until we have successfully retrieved message.
+ */
+ if (status & ~TCPC_ALERT_RX_STATUS) {
+ mask = status & TCPC_ALERT_RX_BUF_OVF ?
+ status & ~(TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF) :
+ status & ~TCPC_ALERT_RX_STATUS;
+ ret = max_tcpci_write16(chip, TCPC_ALERT, mask);
+ if (ret < 0) {
+ dev_err(chip->dev, "ALERT clear failed\n");
+ return ret;
+ }
+ }
+
+ if (status & TCPC_ALERT_RX_BUF_OVF && !(status & TCPC_ALERT_RX_STATUS)) {
+ ret = max_tcpci_write16(chip, TCPC_ALERT, (TCPC_ALERT_RX_STATUS |
+ TCPC_ALERT_RX_BUF_OVF));
+ if (ret < 0) {
+ dev_err(chip->dev, "ALERT clear failed\n");
+ return ret;
+ }
+ }
+
+ if (status & TCPC_ALERT_EXTND) {
+ ret = max_tcpci_read8(chip, TCPC_ALERT_EXTENDED, &reg_status);
+ if (ret < 0)
+ return ret;
+
+ ret = max_tcpci_write8(chip, TCPC_ALERT_EXTENDED, reg_status);
+ if (ret < 0)
+ return ret;
+
+ if (reg_status & TCPC_SINK_FAST_ROLE_SWAP) {
+ dev_info(chip->dev, "FRS Signal");
+ tcpm_sink_frs(chip->port);
+ }
+ }
+
+ if (status & TCPC_ALERT_RX_STATUS)
+ process_rx(chip, status);
+
+ if (status & TCPC_ALERT_VBUS_DISCNCT)
+ tcpm_vbus_change(chip->port);
+
+ if (status & TCPC_ALERT_CC_STATUS)
+ tcpm_cc_change(chip->port);
+
+ if (status & TCPC_ALERT_POWER_STATUS)
+ process_power_status(chip);
+
+ if (status & TCPC_ALERT_RX_HARD_RST) {
+ tcpm_pd_hard_reset(chip->port);
+ max_tcpci_init_regs(chip);
+ }
+
+ if (status & TCPC_ALERT_TX_SUCCESS || status & TCPC_ALERT_TX_DISCARDED || status &
+ TCPC_ALERT_TX_FAILED)
+ process_tx(chip, status);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t max_tcpci_irq(int irq, void *dev_id)
+{
+ struct max_tcpci_chip *chip = dev_id;
+ u16 status;
+ irqreturn_t irq_return;
+ int ret;
+
+ if (!chip->port)
+ return IRQ_HANDLED;
+
+ ret = max_tcpci_read16(chip, TCPC_ALERT, &status);
+ if (ret < 0) {
+ dev_err(chip->dev, "ALERT read failed\n");
+ return ret;
+ }
+ while (status) {
+ irq_return = _max_tcpci_irq(chip, status);
+ /* Do not return if the ALERT is already set. */
+ ret = max_tcpci_read16(chip, TCPC_ALERT, &status);
+ if (ret < 0)
+ break;
+ }
+
+ return irq_return;
+}
+
+static irqreturn_t max_tcpci_isr(int irq, void *dev_id)
+{
+ struct max_tcpci_chip *chip = dev_id;
+
+ pm_wakeup_event(chip->dev, PD_ACTIVITY_TIMEOUT_MS);
+
+ if (!chip->port)
+ return IRQ_HANDLED;
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int max_tcpci_init_alert(struct max_tcpci_chip *chip, struct i2c_client *client)
+{
+ int ret;
+
+ ret = devm_request_threaded_irq(chip->dev, client->irq, max_tcpci_isr, max_tcpci_irq,
+ (IRQF_TRIGGER_LOW | IRQF_ONESHOT), dev_name(chip->dev),
+ chip);
+
+ if (ret < 0)
+ return ret;
+
+ enable_irq_wake(client->irq);
+ return 0;
+}
+
+static int max_tcpci_start_toggling(struct tcpci *tcpci, struct tcpci_data *tdata,
+ enum typec_cc_status cc)
+{
+ struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
+
+ max_tcpci_init_regs(chip);
+
+ return 0;
+}
+
+static int tcpci_init(struct tcpci *tcpci, struct tcpci_data *data)
+{
+ /*
+ * Generic TCPCI overwrites the regs once this driver initializes
+ * them. Prevent this by returning -1.
+ */
+ return -1;
+}
+
+static int max_tcpci_probe(struct i2c_client *client, const struct i2c_device_id *i2c_id)
+{
+ int ret;
+ struct max_tcpci_chip *chip;
+ u8 power_status;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->client = client;
+ chip->data.regmap = devm_regmap_init_i2c(client, &max_tcpci_regmap_config);
+ if (IS_ERR(chip->data.regmap)) {
+ dev_err(&client->dev, "Regmap init failed\n");
+ return PTR_ERR(chip->data.regmap);
+ }
+
+ chip->dev = &client->dev;
+ i2c_set_clientdata(client, chip);
+
+ ret = max_tcpci_read8(chip, TCPC_POWER_STATUS, &power_status);
+ if (ret < 0)
+ return ret;
+
+ /* Chip level tcpci callbacks */
+ chip->data.set_vbus = max_tcpci_set_vbus;
+ chip->data.start_drp_toggling = max_tcpci_start_toggling;
+ chip->data.TX_BUF_BYTE_x_hidden = true;
+ chip->data.init = tcpci_init;
+
+ max_tcpci_init_regs(chip);
+ chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
+ if (IS_ERR_OR_NULL(chip->tcpci)) {
+ dev_err(&client->dev, "TCPCI port registration failed");
+ ret = PTR_ERR(chip->tcpci);
+ return PTR_ERR(chip->tcpci);
+ }
+ chip->port = tcpci_get_tcpm_port(chip->tcpci);
+ ret = max_tcpci_init_alert(chip, client);
+ if (ret < 0)
+ goto unreg_port;
+
+ device_init_wakeup(chip->dev, true);
+ return 0;
+
+unreg_port:
+ tcpci_unregister_port(chip->tcpci);
+
+ return ret;
+}
+
+static int max_tcpci_remove(struct i2c_client *client)
+{
+ struct max_tcpci_chip *chip = i2c_get_clientdata(client);
+
+ if (!IS_ERR_OR_NULL(chip->tcpci))
+ tcpci_unregister_port(chip->tcpci);
+
+ return 0;
+}
+
+static const struct i2c_device_id max_tcpci_id[] = {
+ { "maxtcpc", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max_tcpci_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id max_tcpci_of_match[] = {
+ { .compatible = "maxim,tcpc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, max_tcpci_of_match);
+#endif
+
+static struct i2c_driver max_tcpci_i2c_driver = {
+ .driver = {
+ .name = "maxtcpc",
+ .of_match_table = of_match_ptr(max_tcpci_of_match),
+ },
+ .probe = max_tcpci_probe,
+ .remove = max_tcpci_remove,
+ .id_table = max_tcpci_id,
+};
+module_i2c_driver(max_tcpci_i2c_driver);
+
+MODULE_AUTHOR("Badhri Jagan Sridharan <badhri@google.com>");
+MODULE_DESCRIPTION("Maxim TCPCI based USB Type-C Port Controller Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6360.c b/drivers/usb/typec/tcpm/tcpci_mt6360.c
new file mode 100644
index 000000000000..f1bd9e09bc87
--- /dev/null
+++ b/drivers/usb/typec/tcpm/tcpci_mt6360.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/usb/tcpm.h>
+
+#include "tcpci.h"
+
+#define MT6360_REG_VCONNCTRL1 0x8C
+#define MT6360_REG_MODECTRL2 0x8F
+#define MT6360_REG_SWRESET 0xA0
+#define MT6360_REG_DEBCTRL1 0xA1
+#define MT6360_REG_DRPCTRL1 0xA2
+#define MT6360_REG_DRPCTRL2 0xA3
+#define MT6360_REG_I2CTORST 0xBF
+#define MT6360_REG_RXCTRL2 0xCF
+#define MT6360_REG_CTDCTRL2 0xEC
+
+/* MT6360_REG_VCONNCTRL1 */
+#define MT6360_VCONNCL_ENABLE BIT(0)
+/* MT6360_REG_RXCTRL2 */
+#define MT6360_OPEN40M_ENABLE BIT(7)
+/* MT6360_REG_CTDCTRL2 */
+#define MT6360_RPONESHOT_ENABLE BIT(6)
+
+struct mt6360_tcpc_info {
+ struct tcpci_data tdata;
+ struct tcpci *tcpci;
+ struct device *dev;
+ int irq;
+};
+
+static inline int mt6360_tcpc_read16(struct regmap *regmap,
+ unsigned int reg, u16 *val)
+{
+ return regmap_raw_read(regmap, reg, val, sizeof(u16));
+}
+
+static inline int mt6360_tcpc_write16(struct regmap *regmap,
+ unsigned int reg, u16 val)
+{
+ return regmap_raw_write(regmap, reg, &val, sizeof(u16));
+}
+
+static int mt6360_tcpc_init(struct tcpci *tcpci, struct tcpci_data *tdata)
+{
+ struct regmap *regmap = tdata->regmap;
+ int ret;
+
+ ret = regmap_write(regmap, MT6360_REG_SWRESET, 0x01);
+ if (ret)
+ return ret;
+
+ /* after reset command, wait 1~2ms to wait IC action */
+ usleep_range(1000, 2000);
+
+ /* write all alert to masked */
+ ret = mt6360_tcpc_write16(regmap, TCPC_ALERT_MASK, 0);
+ if (ret)
+ return ret;
+
+ /* config I2C timeout reset enable , and timeout to 200ms */
+ ret = regmap_write(regmap, MT6360_REG_I2CTORST, 0x8F);
+ if (ret)
+ return ret;
+
+ /* config CC Detect Debounce : 26.7*val us */
+ ret = regmap_write(regmap, MT6360_REG_DEBCTRL1, 0x10);
+ if (ret)
+ return ret;
+
+ /* DRP Toggle Cycle : 51.2 + 6.4*val ms */
+ ret = regmap_write(regmap, MT6360_REG_DRPCTRL1, 4);
+ if (ret)
+ return ret;
+
+ /* DRP Duyt Ctrl : dcSRC: /1024 */
+ ret = mt6360_tcpc_write16(regmap, MT6360_REG_DRPCTRL2, 330);
+ if (ret)
+ return ret;
+
+ /* Enable VCONN Current Limit function */
+ ret = regmap_update_bits(regmap, MT6360_REG_VCONNCTRL1, MT6360_VCONNCL_ENABLE,
+ MT6360_VCONNCL_ENABLE);
+ if (ret)
+ return ret;
+
+ /* Enable cc open 40ms when pmic send vsysuv signal */
+ ret = regmap_update_bits(regmap, MT6360_REG_RXCTRL2, MT6360_OPEN40M_ENABLE,
+ MT6360_OPEN40M_ENABLE);
+ if (ret)
+ return ret;
+
+ /* Enable Rpdet oneshot detection */
+ ret = regmap_update_bits(regmap, MT6360_REG_CTDCTRL2, MT6360_RPONESHOT_ENABLE,
+ MT6360_RPONESHOT_ENABLE);
+ if (ret)
+ return ret;
+
+ /* Set shipping mode off, AUTOIDLE on */
+ return regmap_write(regmap, MT6360_REG_MODECTRL2, 0x7A);
+}
+
+static irqreturn_t mt6360_irq(int irq, void *dev_id)
+{
+ struct mt6360_tcpc_info *mti = dev_id;
+
+ return tcpci_irq(mti->tcpci);
+}
+
+static int mt6360_tcpc_probe(struct platform_device *pdev)
+{
+ struct mt6360_tcpc_info *mti;
+ int ret;
+
+ mti = devm_kzalloc(&pdev->dev, sizeof(*mti), GFP_KERNEL);
+ if (!mti)
+ return -ENOMEM;
+
+ mti->dev = &pdev->dev;
+
+ mti->tdata.regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!mti->tdata.regmap) {
+ dev_err(&pdev->dev, "Failed to get parent regmap\n");
+ return -ENODEV;
+ }
+
+ mti->irq = platform_get_irq_byname(pdev, "PD_IRQB");
+ if (mti->irq < 0)
+ return mti->irq;
+
+ mti->tdata.init = mt6360_tcpc_init;
+ mti->tcpci = tcpci_register_port(&pdev->dev, &mti->tdata);
+ if (IS_ERR(mti->tcpci)) {
+ dev_err(&pdev->dev, "Failed to register tcpci port\n");
+ return PTR_ERR(mti->tcpci);
+ }
+
+ ret = devm_request_threaded_irq(mti->dev, mti->irq, NULL, mt6360_irq, IRQF_ONESHOT,
+ dev_name(&pdev->dev), mti);
+ if (ret) {
+ dev_err(mti->dev, "Failed to register irq\n");
+ tcpci_unregister_port(mti->tcpci);
+ return ret;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+ platform_set_drvdata(pdev, mti);
+
+ return 0;
+}
+
+static int mt6360_tcpc_remove(struct platform_device *pdev)
+{
+ struct mt6360_tcpc_info *mti = platform_get_drvdata(pdev);
+
+ disable_irq(mti->irq);
+ tcpci_unregister_port(mti->tcpci);
+ return 0;
+}
+
+static int __maybe_unused mt6360_tcpc_suspend(struct device *dev)
+{
+ struct mt6360_tcpc_info *mti = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(mti->irq);
+
+ return 0;
+}
+
+static int __maybe_unused mt6360_tcpc_resume(struct device *dev)
+{
+ struct mt6360_tcpc_info *mti = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(mti->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mt6360_tcpc_pm_ops, mt6360_tcpc_suspend, mt6360_tcpc_resume);
+
+static const struct of_device_id __maybe_unused mt6360_tcpc_of_id[] = {
+ { .compatible = "mediatek,mt6360-tcpc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt6360_tcpc_of_id);
+
+static struct platform_driver mt6360_tcpc_driver = {
+ .driver = {
+ .name = "mt6360-tcpc",
+ .pm = &mt6360_tcpc_pm_ops,
+ .of_match_table = mt6360_tcpc_of_id,
+ },
+ .probe = mt6360_tcpc_probe,
+ .remove = mt6360_tcpc_remove,
+};
+module_platform_driver(mt6360_tcpc_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("MT6360 USB Type-C Port Controller Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index a48e3f90d196..a6fae1f86505 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -8,8 +8,10 @@
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/hrtimer.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/power_supply.h>
@@ -28,7 +30,8 @@
#include <linux/usb/role.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_altmode.h>
-#include <linux/workqueue.h>
+
+#include <uapi/linux/sched/types.h>
#define FOREACH_STATE(S) \
S(INVALID_STATE), \
@@ -103,6 +106,13 @@
S(VCONN_SWAP_TURN_ON_VCONN), \
S(VCONN_SWAP_TURN_OFF_VCONN), \
\
+ S(FR_SWAP_SEND), \
+ S(FR_SWAP_SEND_TIMEOUT), \
+ S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF), \
+ S(FR_SWAP_SNK_SRC_NEW_SINK_READY), \
+ S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED), \
+ S(FR_SWAP_CANCEL), \
+ \
S(SNK_TRY), \
S(SNK_TRY_WAIT), \
S(SNK_TRY_WAIT_DEBOUNCE), \
@@ -124,6 +134,9 @@
S(GET_PPS_STATUS_SEND), \
S(GET_PPS_STATUS_SEND_TIMEOUT), \
\
+ S(GET_SINK_CAP), \
+ S(GET_SINK_CAP_TIMEOUT), \
+ \
S(ERROR_RECOVERY), \
S(PORT_RESET), \
S(PORT_RESET_WAIT_OFF)
@@ -167,11 +180,25 @@ enum adev_actions {
ADEV_ATTENTION,
};
+/*
+ * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
+ * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
+ * Version 1.2"
+ */
+enum frs_typec_current {
+ FRS_NOT_SUPPORTED,
+ FRS_DEFAULT_POWER,
+ FRS_5V_1P5A,
+ FRS_5V_3A,
+};
+
/* Events from low level driver */
#define TCPM_CC_EVENT BIT(0)
#define TCPM_VBUS_EVENT BIT(1)
#define TCPM_RESET_EVENT BIT(2)
+#define TCPM_FRS_EVENT BIT(3)
+#define TCPM_SOURCING_VBUS BIT(4)
#define LOG_BUFFER_ENTRIES 1024
#define LOG_BUFFER_ENTRY_SIZE 128
@@ -181,6 +208,8 @@ enum adev_actions {
#define SVID_DISCOVERY_MAX 16
#define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
+#define GET_SINK_CAP_RETRY_MS 100
+
struct pd_mode_data {
int svid_index; /* current SVID index */
int nsvids;
@@ -203,7 +232,7 @@ struct tcpm_port {
struct device *dev;
struct mutex lock; /* tcpm state machine lock */
- struct workqueue_struct *wq;
+ struct kthread_worker *wq;
struct typec_capability typec_caps;
struct typec_port *typec_port;
@@ -247,15 +276,19 @@ struct tcpm_port {
enum tcpm_state prev_state;
enum tcpm_state state;
enum tcpm_state delayed_state;
- unsigned long delayed_runtime;
+ ktime_t delayed_runtime;
unsigned long delay_ms;
spinlock_t pd_event_lock;
u32 pd_events;
- struct work_struct event_work;
- struct delayed_work state_machine;
- struct delayed_work vdm_state_machine;
+ struct kthread_work event_work;
+ struct hrtimer state_machine_timer;
+ struct kthread_work state_machine;
+ struct hrtimer vdm_state_machine_timer;
+ struct kthread_work vdm_state_machine;
+ struct hrtimer enable_frs_timer;
+ struct kthread_work enable_frs;
bool state_machine_running;
struct completion tx_complete;
@@ -330,6 +363,12 @@ struct tcpm_port {
/* port belongs to a self powered device */
bool self_powered;
+ /* FRS */
+ enum frs_typec_current frs_current;
+
+ /* Sink caps have been queried */
+ bool sink_cap_done;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
@@ -340,7 +379,7 @@ struct tcpm_port {
};
struct pd_rx_event {
- struct work_struct work;
+ struct kthread_work work;
struct tcpm_port *port;
struct pd_message msg;
};
@@ -914,6 +953,37 @@ static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
}
+static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
+{
+ if (delay_ms) {
+ hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
+ } else {
+ hrtimer_cancel(&port->state_machine_timer);
+ kthread_queue_work(port->wq, &port->state_machine);
+ }
+}
+
+static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
+{
+ if (delay_ms) {
+ hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
+ HRTIMER_MODE_REL);
+ } else {
+ hrtimer_cancel(&port->vdm_state_machine_timer);
+ kthread_queue_work(port->wq, &port->vdm_state_machine);
+ }
+}
+
+static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
+{
+ if (delay_ms) {
+ hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
+ } else {
+ hrtimer_cancel(&port->enable_frs_timer);
+ kthread_queue_work(port->wq, &port->enable_frs);
+ }
+}
+
static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
unsigned int delay_ms)
{
@@ -922,9 +992,8 @@ static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
tcpm_states[port->state], tcpm_states[state],
delay_ms);
port->delayed_state = state;
- mod_delayed_work(port->wq, &port->state_machine,
- msecs_to_jiffies(delay_ms));
- port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms);
+ mod_tcpm_delayed_work(port, delay_ms);
+ port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
port->delay_ms = delay_ms;
} else {
tcpm_log(port, "state change %s -> %s",
@@ -939,7 +1008,7 @@ static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
* machine.
*/
if (!port->state_machine_running)
- mod_delayed_work(port->wq, &port->state_machine, 0);
+ mod_tcpm_delayed_work(port, 0);
}
}
@@ -960,7 +1029,7 @@ static void tcpm_queue_message(struct tcpm_port *port,
enum pd_msg_request message)
{
port->queued_message = message;
- mod_delayed_work(port->wq, &port->state_machine, 0);
+ mod_tcpm_delayed_work(port, 0);
}
/*
@@ -981,7 +1050,7 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
port->vdm_retries = 0;
port->vdm_state = VDM_STATE_READY;
- mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
+ mod_vdm_delayed_work(port, 0);
}
static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
@@ -1244,8 +1313,7 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
CMDT_INIT;
- mod_delayed_work(port->wq, &port->vdm_state_machine,
- msecs_to_jiffies(PD_T_VDM_BUSY));
+ mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
return;
}
port->vdm_state = VDM_STATE_DONE;
@@ -1390,8 +1458,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
port->vdm_retries = 0;
port->vdm_state = VDM_STATE_BUSY;
timeout = vdm_ready_timeout(port->vdo_data[0]);
- mod_delayed_work(port->wq, &port->vdm_state_machine,
- timeout);
+ mod_vdm_delayed_work(port, timeout);
}
break;
case VDM_STATE_WAIT_RSP_BUSY:
@@ -1420,10 +1487,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
}
}
-static void vdm_state_machine_work(struct work_struct *work)
+static void vdm_state_machine_work(struct kthread_work *work)
{
- struct tcpm_port *port = container_of(work, struct tcpm_port,
- vdm_state_machine.work);
+ struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
enum vdm_states prev_state;
mutex_lock(&port->lock);
@@ -1591,6 +1657,7 @@ static int tcpm_altmode_vdm(struct typec_altmode *altmode,
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
tcpm_queue_vdm_unlocked(port, header, data, count - 1);
+
return 0;
}
@@ -1646,6 +1713,9 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
unsigned int cnt = pd_header_cnt_le(msg->header);
unsigned int rev = pd_header_rev_le(msg->header);
unsigned int i;
+ enum frs_typec_current frs_current;
+ bool frs_enable;
+ int ret;
switch (type) {
case PD_DATA_SOURCE_CAP:
@@ -1715,7 +1785,21 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
/* We don't do anything with this at the moment... */
for (i = 0; i < cnt; i++)
port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
+
+ frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
+ PDO_FIXED_FRS_CURR_SHIFT;
+ frs_enable = frs_current && (frs_current <= port->frs_current);
+ tcpm_log(port,
+ "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
+ frs_current, port->frs_current, frs_enable ? 'y' : 'n');
+ if (frs_enable) {
+ ret = port->tcpc->enable_frs(port->tcpc, true);
+ tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
+ }
+
port->nr_sink_caps = cnt;
+ port->sink_cap_done = true;
+ tcpm_set_state(port, SNK_READY, 0);
break;
case PD_DATA_VENDOR_DEF:
tcpm_handle_vdm_request(port, msg->payload, cnt);
@@ -1810,6 +1894,9 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
case VCONN_SWAP_WAIT_FOR_VCONN:
tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
break;
+ case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
+ tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
+ break;
default:
break;
}
@@ -1849,6 +1936,13 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
-EAGAIN : -EOPNOTSUPP);
tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
break;
+ case FR_SWAP_SEND:
+ tcpm_set_state(port, FR_SWAP_CANCEL, 0);
+ break;
+ case GET_SINK_CAP:
+ port->sink_cap_done = true;
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
default:
break;
}
@@ -1883,6 +1977,9 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
case VCONN_SWAP_SEND:
tcpm_set_state(port, VCONN_SWAP_START, 0);
break;
+ case FR_SWAP_SEND:
+ tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
+ break;
default:
break;
}
@@ -2005,7 +2102,7 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
}
}
-static void tcpm_pd_rx_handler(struct work_struct *work)
+static void tcpm_pd_rx_handler(struct kthread_work *work)
{
struct pd_rx_event *event = container_of(work,
struct pd_rx_event, work);
@@ -2067,10 +2164,10 @@ void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
if (!event)
return;
- INIT_WORK(&event->work, tcpm_pd_rx_handler);
+ kthread_init_work(&event->work, tcpm_pd_rx_handler);
event->port = port;
memcpy(&event->msg, msg, sizeof(*msg));
- queue_work(port->wq, &event->work);
+ kthread_queue_work(port->wq, &event->work);
}
EXPORT_SYMBOL_GPL(tcpm_pd_receive);
@@ -2123,9 +2220,9 @@ static bool tcpm_send_queued_message(struct tcpm_port *port)
} while (port->queued_message != PD_MSG_NONE);
if (port->delayed_state != INVALID_STATE) {
- if (time_is_after_jiffies(port->delayed_runtime)) {
- mod_delayed_work(port->wq, &port->state_machine,
- port->delayed_runtime - jiffies);
+ if (ktime_after(port->delayed_runtime, ktime_get())) {
+ mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
+ ktime_get())));
return true;
}
port->delayed_state = INVALID_STATE;
@@ -2783,12 +2880,19 @@ static void tcpm_reset_port(struct tcpm_port *port)
port->try_src_count = 0;
port->try_snk_count = 0;
port->usb_type = POWER_SUPPLY_USB_TYPE_C;
+ port->nr_sink_caps = 0;
+ port->sink_cap_done = false;
+ if (port->tcpc->enable_frs)
+ port->tcpc->enable_frs(port->tcpc, false);
power_supply_changed(port->psy);
}
static void tcpm_detach(struct tcpm_port *port)
{
+ if (tcpm_port_is_disconnected(port))
+ port->hard_reset_count = 0;
+
if (!port->attached)
return;
@@ -2797,9 +2901,6 @@ static void tcpm_detach(struct tcpm_port *port)
port->tcpc->set_bist_data(port->tcpc, false);
}
- if (tcpm_port_is_disconnected(port))
- port->hard_reset_count = 0;
-
tcpm_reset_port(port);
}
@@ -3258,10 +3359,9 @@ static void run_state_machine(struct tcpm_port *port)
case SNK_DISCOVERY_DEBOUNCE_DONE:
if (!tcpm_port_is_disconnected(port) &&
tcpm_port_is_sink(port) &&
- time_is_after_jiffies(port->delayed_runtime)) {
+ ktime_after(port->delayed_runtime, ktime_get())) {
tcpm_set_state(port, SNK_DISCOVERY,
- jiffies_to_msecs(port->delayed_runtime -
- jiffies));
+ ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
break;
}
tcpm_set_state(port, unattached_state(port), 0);
@@ -3334,10 +3434,9 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_swap_complete(port, 0);
tcpm_typec_connect(port);
tcpm_check_send_discover(port);
+ mod_enable_frs_delayed_work(port, 0);
tcpm_pps_complete(port, port->pps_status);
-
power_supply_changed(port->psy);
-
break;
/* Accessory states */
@@ -3361,9 +3460,13 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, HARD_RESET_START, 0);
break;
case HARD_RESET_START:
+ port->sink_cap_done = false;
+ if (port->tcpc->enable_frs)
+ port->tcpc->enable_frs(port->tcpc, false);
port->hard_reset_count++;
port->tcpc->set_pd_rx(port->tcpc, false);
tcpm_unregister_altmodes(port);
+ port->nr_sink_caps = 0;
port->send_discover = true;
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
@@ -3495,6 +3598,35 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, ready_state(port), 0);
break;
+ case FR_SWAP_SEND:
+ if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) {
+ tcpm_set_state(port, ERROR_RECOVERY, 0);
+ break;
+ }
+ tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
+ break;
+ case FR_SWAP_SEND_TIMEOUT:
+ tcpm_set_state(port, ERROR_RECOVERY, 0);
+ break;
+ case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
+ tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_OFF);
+ break;
+ case FR_SWAP_SNK_SRC_NEW_SINK_READY:
+ if (port->vbus_source)
+ tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
+ else
+ tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
+ break;
+ case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
+ tcpm_set_pwr_role(port, TYPEC_SOURCE);
+ if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
+ tcpm_set_state(port, ERROR_RECOVERY, 0);
+ break;
+ }
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
+ break;
+
/* PR_Swap states */
case PR_SWAP_ACCEPT:
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
@@ -3573,7 +3705,7 @@ static void run_state_machine(struct tcpm_port *port)
*/
tcpm_set_pwr_role(port, TYPEC_SOURCE);
tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
- tcpm_set_state(port, SRC_STARTUP, 0);
+ tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
break;
case VCONN_SWAP_ACCEPT:
@@ -3618,6 +3750,12 @@ static void run_state_machine(struct tcpm_port *port)
else
tcpm_set_state(port, SNK_READY, 0);
break;
+ case FR_SWAP_CANCEL:
+ if (port->pwr_role == TYPEC_SOURCE)
+ tcpm_set_state(port, SRC_READY, 0);
+ else
+ tcpm_set_state(port, SNK_READY, 0);
+ break;
case BIST_RX:
switch (BDO_MODE_MASK(port->bist_request)) {
@@ -3652,6 +3790,14 @@ static void run_state_machine(struct tcpm_port *port)
case GET_PPS_STATUS_SEND_TIMEOUT:
tcpm_set_state(port, ready_state(port), 0);
break;
+ case GET_SINK_CAP:
+ tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP);
+ tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
+ break;
+ case GET_SINK_CAP_TIMEOUT:
+ port->sink_cap_done = true;
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
case ERROR_RECOVERY:
tcpm_swap_complete(port, -EPROTO);
tcpm_pps_complete(port, -EPROTO);
@@ -3674,10 +3820,9 @@ static void run_state_machine(struct tcpm_port *port)
}
}
-static void tcpm_state_machine_work(struct work_struct *work)
+static void tcpm_state_machine_work(struct kthread_work *work)
{
- struct tcpm_port *port = container_of(work, struct tcpm_port,
- state_machine.work);
+ struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
enum tcpm_state prev_state;
mutex_lock(&port->lock);
@@ -3868,6 +4013,13 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
* Ignore it.
*/
break;
+ case FR_SWAP_SEND:
+ case FR_SWAP_SEND_TIMEOUT:
+ case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
+ case FR_SWAP_SNK_SRC_NEW_SINK_READY:
+ case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
+ /* Do nothing, CC change expected */
+ break;
case PORT_RESET:
case PORT_RESET_WAIT_OFF:
@@ -3938,6 +4090,9 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port)
case SRC_TRY_DEBOUNCE:
/* Do nothing, waiting for sink detection */
break;
+ case FR_SWAP_SNK_SRC_NEW_SINK_READY:
+ tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
+ break;
case PORT_RESET:
case PORT_RESET_WAIT_OFF:
@@ -4017,6 +4172,14 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
*/
break;
+ case FR_SWAP_SEND:
+ case FR_SWAP_SEND_TIMEOUT:
+ case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
+ case FR_SWAP_SNK_SRC_NEW_SINK_READY:
+ case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
+ /* Do nothing, vbus drop expected */
+ break;
+
default:
if (port->pwr_role == TYPEC_SINK &&
port->attached)
@@ -4041,7 +4204,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
0);
}
-static void tcpm_pd_event_handler(struct work_struct *work)
+static void tcpm_pd_event_handler(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port,
event_work);
@@ -4071,6 +4234,25 @@ static void tcpm_pd_event_handler(struct work_struct *work)
if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
_tcpm_cc_change(port, cc1, cc2);
}
+ if (events & TCPM_FRS_EVENT) {
+ if (port->state == SNK_READY)
+ tcpm_set_state(port, FR_SWAP_SEND, 0);
+ else
+ tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
+ }
+ if (events & TCPM_SOURCING_VBUS) {
+ tcpm_log(port, "sourcing vbus");
+ /*
+ * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
+ * true as TCPM wouldn't have called tcpm_set_vbus.
+ *
+ * When vbus is sourced on the command on TCPM i.e. TCPM called
+ * tcpm_set_vbus to source vbus, vbus_source would already be true.
+ */
+ port->vbus_source = true;
+ _tcpm_pd_vbus_on(port);
+ }
+
spin_lock(&port->pd_event_lock);
}
spin_unlock(&port->pd_event_lock);
@@ -4082,7 +4264,7 @@ void tcpm_cc_change(struct tcpm_port *port)
spin_lock(&port->pd_event_lock);
port->pd_events |= TCPM_CC_EVENT;
spin_unlock(&port->pd_event_lock);
- queue_work(port->wq, &port->event_work);
+ kthread_queue_work(port->wq, &port->event_work);
}
EXPORT_SYMBOL_GPL(tcpm_cc_change);
@@ -4091,7 +4273,7 @@ void tcpm_vbus_change(struct tcpm_port *port)
spin_lock(&port->pd_event_lock);
port->pd_events |= TCPM_VBUS_EVENT;
spin_unlock(&port->pd_event_lock);
- queue_work(port->wq, &port->event_work);
+ kthread_queue_work(port->wq, &port->event_work);
}
EXPORT_SYMBOL_GPL(tcpm_vbus_change);
@@ -4100,10 +4282,54 @@ void tcpm_pd_hard_reset(struct tcpm_port *port)
spin_lock(&port->pd_event_lock);
port->pd_events = TCPM_RESET_EVENT;
spin_unlock(&port->pd_event_lock);
- queue_work(port->wq, &port->event_work);
+ kthread_queue_work(port->wq, &port->event_work);
}
EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
+void tcpm_sink_frs(struct tcpm_port *port)
+{
+ spin_lock(&port->pd_event_lock);
+ port->pd_events = TCPM_FRS_EVENT;
+ spin_unlock(&port->pd_event_lock);
+ kthread_queue_work(port->wq, &port->event_work);
+}
+EXPORT_SYMBOL_GPL(tcpm_sink_frs);
+
+void tcpm_sourcing_vbus(struct tcpm_port *port)
+{
+ spin_lock(&port->pd_event_lock);
+ port->pd_events = TCPM_SOURCING_VBUS;
+ spin_unlock(&port->pd_event_lock);
+ kthread_queue_work(port->wq, &port->event_work);
+}
+EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
+
+static void tcpm_enable_frs_work(struct kthread_work *work)
+{
+ struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
+
+ mutex_lock(&port->lock);
+ /* Not FRS capable */
+ if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
+ port->pwr_opmode != TYPEC_PWR_MODE_PD ||
+ !port->tcpc->enable_frs ||
+ /* Sink caps queried */
+ port->sink_cap_done || port->negotiated_rev < PD_REV30)
+ goto unlock;
+
+ /* Send when the state machine is idle */
+ if (port->state != SNK_READY || port->vdm_state != VDM_STATE_DONE || port->send_discover)
+ goto resched;
+
+ tcpm_set_state(port, GET_SINK_CAP, 0);
+ port->sink_cap_done = true;
+
+resched:
+ mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
+unlock:
+ mutex_unlock(&port->lock);
+}
+
static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
{
struct tcpm_port *port = typec_get_drvdata(p);
@@ -4511,7 +4737,7 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
{
const char *cap_str;
int ret;
- u32 mw;
+ u32 mw, frs_current;
if (!fwnode)
return -EINVAL;
@@ -4580,6 +4806,13 @@ sink:
port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
+ /* FRS can only be supported byb DRP ports */
+ if (port->port_type == TYPEC_PORT_DRP) {
+ ret = fwnode_property_read_u32(fwnode, "frs-typec-current", &frs_current);
+ if (ret >= 0 && frs_current <= FRS_5V_3A)
+ port->frs_current = frs_current;
+ }
+
return 0;
}
@@ -4808,6 +5041,30 @@ static int devm_tcpm_psy_register(struct tcpm_port *port)
return PTR_ERR_OR_ZERO(port->psy);
}
+static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
+{
+ struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
+
+ kthread_queue_work(port->wq, &port->state_machine);
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
+{
+ struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
+
+ kthread_queue_work(port->wq, &port->vdm_state_machine);
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
+{
+ struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
+
+ kthread_queue_work(port->wq, &port->enable_frs);
+ return HRTIMER_NORESTART;
+}
+
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
{
struct tcpm_port *port;
@@ -4829,12 +5086,21 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
mutex_init(&port->lock);
mutex_init(&port->swap_lock);
- port->wq = create_singlethread_workqueue(dev_name(dev));
- if (!port->wq)
- return ERR_PTR(-ENOMEM);
- INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work);
- INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work);
- INIT_WORK(&port->event_work, tcpm_pd_event_handler);
+ port->wq = kthread_create_worker(0, dev_name(dev));
+ if (IS_ERR(port->wq))
+ return ERR_CAST(port->wq);
+ sched_set_fifo(port->wq->task);
+
+ kthread_init_work(&port->state_machine, tcpm_state_machine_work);
+ kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
+ kthread_init_work(&port->event_work, tcpm_pd_event_handler);
+ kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
+ hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ port->state_machine_timer.function = state_machine_timer_handler;
+ hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
+ hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ port->enable_frs_timer.function = enable_frs_timer_handler;
spin_lock_init(&port->pd_event_lock);
@@ -4886,7 +5152,7 @@ out_role_sw_put:
usb_role_switch_put(port->role_sw);
out_destroy_wq:
tcpm_debugfs_exit(port);
- destroy_workqueue(port->wq);
+ kthread_destroy_worker(port->wq);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(tcpm_register_port);
@@ -4901,7 +5167,7 @@ void tcpm_unregister_port(struct tcpm_port *port)
typec_unregister_port(port->typec_port);
usb_role_switch_put(port->role_sw);
tcpm_debugfs_exit(port);
- destroy_workqueue(port->wq);
+ kthread_destroy_worker(port->wq);
}
EXPORT_SYMBOL_GPL(tcpm_unregister_port);
diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
index 26ed0b520749..571a51e16234 100644
--- a/drivers/usb/typec/ucsi/psy.c
+++ b/drivers/usb/typec/ucsi/psy.c
@@ -238,4 +238,13 @@ void ucsi_unregister_port_psy(struct ucsi_connector *con)
return;
power_supply_unregister(con->psy);
+ con->psy = NULL;
+}
+
+void ucsi_port_psy_changed(struct ucsi_connector *con)
+{
+ if (IS_ERR_OR_NULL(con->psy))
+ return;
+
+ power_supply_changed(con->psy);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 758b988ac518..51a570d40a42 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -643,8 +643,10 @@ static void ucsi_handle_connector_change(struct work_struct *work)
role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE ||
- con->status.change & UCSI_CONSTAT_POWER_LEVEL_CHANGE)
+ con->status.change & UCSI_CONSTAT_POWER_LEVEL_CHANGE) {
ucsi_pwr_opmode_change(con);
+ ucsi_port_psy_changed(con);
+ }
if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
typec_set_pwr_role(con->port, role);
@@ -674,6 +676,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
ucsi_register_partner(con);
else
ucsi_unregister_partner(con);
+
+ ucsi_port_psy_changed(con);
}
if (con->status.change & UCSI_CONSTAT_CAM_CHANGE) {
@@ -994,6 +998,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
!!(con->status.flags & UCSI_CONSTAT_PWR_DIR));
ucsi_pwr_opmode_change(con);
ucsi_register_partner(con);
+ ucsi_port_psy_changed(con);
}
if (con->partner) {
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index cba6f77bea61..b7a92f246050 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -340,9 +340,11 @@ int ucsi_resume(struct ucsi *ucsi);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
int ucsi_register_port_psy(struct ucsi_connector *con);
void ucsi_unregister_port_psy(struct ucsi_connector *con);
+void ucsi_port_psy_changed(struct ucsi_connector *con);
#else
static inline int ucsi_register_port_psy(struct ucsi_connector *con) { return 0; }
static inline void ucsi_unregister_port_psy(struct ucsi_connector *con) { }
+static inline void ucsi_port_psy_changed(struct ucsi_connector *con) { }
#endif /* CONFIG_POWER_SUPPLY */
#if IS_ENABLED(CONFIG_TYPEC_DP_ALTMODE)
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index e4b96674c405..4ce6c6a45eb1 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -755,13 +755,7 @@ EXPORT_SYMBOL_GPL(usbip_recv_xbuff);
static int __init usbip_core_init(void)
{
- int ret;
-
- ret = usbip_init_eh();
- if (ret)
- return ret;
-
- return 0;
+ return usbip_init_eh();
}
static void __exit usbip_core_exit(void)
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 1b598db5d8b9..66cde5e5f796 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -797,8 +797,14 @@ no_need_xmit:
usb_hcd_unlink_urb_from_ep(hcd, urb);
no_need_unlink:
spin_unlock_irqrestore(&vhci->lock, flags);
- if (!ret)
+ if (!ret) {
+ /* usb_hcd_giveback_urb() should be called with
+ * irqs disabled
+ */
+ local_irq_disable();
usb_hcd_giveback_urb(hcd, urb, urb->status);
+ local_irq_enable();
+ }
return ret;
}
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index d7d32b656102..358f6048dd3c 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -13,6 +13,7 @@ config VDPA_SIM
depends on RUNTIME_TESTING_MENU && HAS_DMA
select DMA_OPS
select VHOST_RING
+ select GENERIC_NET_UTILS
default n
help
vDPA networking device simulator which loop TX traffic back
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index ef1c550f8266..4b6195666c58 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -239,7 +239,6 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
u64 paend;
struct scatterlist *sg;
struct device *dma = mvdev->mdev->device;
- int ret;
for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
@@ -277,8 +276,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
done:
mr->log_size = log_entity_size;
mr->nsg = nsg;
- ret = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
- if (!ret)
+ err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+ if (!err)
goto err_map;
err = create_direct_mr(mvdev, mr);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 74264e590695..1fa6fcac8299 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1522,6 +1522,11 @@ static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
(mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
}
+static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
+{
+ return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
+}
+
static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -1535,8 +1540,8 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
return err;
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
- ndev->config.mtu = __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev),
- ndev->mtu);
+ ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
+ ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
return err;
}
@@ -1653,6 +1658,9 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *
if (err)
goto err_mr;
+ if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
+ return 0;
+
restore_channels_info(ndev);
err = setup_driver(ndev);
if (err)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 62d640327145..6a90fdb9cbfc 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -18,7 +18,7 @@
#include <linux/wait.h>
#include <linux/uuid.h>
#include <linux/iommu.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/sysfs.h>
#include <linux/file.h>
#include <linux/etherdevice.h>
@@ -38,6 +38,10 @@ static int batch_mapping = 1;
module_param(batch_mapping, int, 0444);
MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
+static char *macaddr;
+module_param(macaddr, charp, 0);
+MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
+
struct vdpasim_virtqueue {
struct vringh vring;
struct vringh_kiov iov;
@@ -60,7 +64,8 @@ struct vdpasim_virtqueue {
static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
(1ULL << VIRTIO_F_VERSION_1) |
- (1ULL << VIRTIO_F_ACCESS_PLATFORM);
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
+ (1ULL << VIRTIO_NET_F_MAC);
/* State of each vdpasim device */
struct vdpasim {
@@ -361,7 +366,9 @@ static struct vdpasim *vdpasim_create(void)
spin_lock_init(&vdpasim->iommu_lock);
dev = &vdpasim->vdpa.dev;
- dev->coherent_dma_mask = DMA_BIT_MASK(64);
+ dev->dma_mask = &dev->coherent_dma_mask;
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+ goto err_iommu;
set_dma_ops(dev, &vdpasim_dma_ops);
vdpasim->iommu = vhost_iotlb_alloc(2048, 0);
@@ -372,7 +379,15 @@ static struct vdpasim *vdpasim_create(void)
if (!vdpasim->buffer)
goto err_iommu;
- eth_random_addr(vdpasim->config.mac);
+ if (macaddr) {
+ mac_pton(macaddr, vdpasim->config.mac);
+ if (!is_valid_ether_addr(vdpasim->config.mac)) {
+ ret = -EADDRNOTAVAIL;
+ goto err_iommu;
+ }
+ } else {
+ eth_random_addr(vdpasim->config.mac);
+ }
vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu);
vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
@@ -574,6 +589,16 @@ static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
return vdpasim->generation;
}
+static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
+{
+ struct vdpa_iova_range range = {
+ .first = 0ULL,
+ .last = ULLONG_MAX,
+ };
+
+ return range;
+}
+
static int vdpasim_set_map(struct vdpa_device *vdpa,
struct vhost_iotlb *iotlb)
{
@@ -657,6 +682,7 @@ static const struct vdpa_config_ops vdpasim_net_config_ops = {
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
+ .get_iova_range = vdpasim_get_iova_range,
.dma_map = vdpasim_dma_map,
.dma_unmap = vdpasim_dma_unmap,
.free = vdpasim_free,
@@ -683,6 +709,7 @@ static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
+ .get_iova_range = vdpasim_get_iova_range,
.set_map = vdpasim_set_map,
.free = vdpasim_free,
};
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index fd17db9b432f..5533df91b257 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -47,4 +47,5 @@ menuconfig VFIO_NOIOMMU
source "drivers/vfio/pci/Kconfig"
source "drivers/vfio/platform/Kconfig"
source "drivers/vfio/mdev/Kconfig"
+source "drivers/vfio/fsl-mc/Kconfig"
source "virt/lib/Kconfig"
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index de67c4725cce..fee73f3d9480 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o
obj-$(CONFIG_VFIO_PCI) += pci/
obj-$(CONFIG_VFIO_PLATFORM) += platform/
obj-$(CONFIG_VFIO_MDEV) += mdev/
+obj-$(CONFIG_VFIO_FSL_MC) += fsl-mc/
diff --git a/drivers/vfio/fsl-mc/Kconfig b/drivers/vfio/fsl-mc/Kconfig
new file mode 100644
index 000000000000..b1a527d6b6f2
--- /dev/null
+++ b/drivers/vfio/fsl-mc/Kconfig
@@ -0,0 +1,9 @@
+config VFIO_FSL_MC
+ tristate "VFIO support for QorIQ DPAA2 fsl-mc bus devices"
+ depends on VFIO && FSL_MC_BUS && EVENTFD
+ help
+ Driver to enable support for the VFIO QorIQ DPAA2 fsl-mc
+ (Management Complex) devices. This is required to passthrough
+ fsl-mc bus devices using the VFIO framework.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/fsl-mc/Makefile b/drivers/vfio/fsl-mc/Makefile
new file mode 100644
index 000000000000..cad6dbf0b735
--- /dev/null
+++ b/drivers/vfio/fsl-mc/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+
+vfio-fsl-mc-y := vfio_fsl_mc.o vfio_fsl_mc_intr.o
+obj-$(CONFIG_VFIO_FSL_MC) += vfio-fsl-mc.o
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
new file mode 100644
index 000000000000..f27e25112c40
--- /dev/null
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -0,0 +1,687 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017,2019-2020 NXP
+ */
+
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vfio.h>
+#include <linux/fsl/mc.h>
+#include <linux/delay.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+
+#include "vfio_fsl_mc_private.h"
+
+static struct fsl_mc_driver vfio_fsl_mc_driver;
+
+static DEFINE_MUTEX(reflck_lock);
+
+static void vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck *reflck)
+{
+ kref_get(&reflck->kref);
+}
+
+static void vfio_fsl_mc_reflck_release(struct kref *kref)
+{
+ struct vfio_fsl_mc_reflck *reflck = container_of(kref,
+ struct vfio_fsl_mc_reflck,
+ kref);
+
+ mutex_destroy(&reflck->lock);
+ kfree(reflck);
+ mutex_unlock(&reflck_lock);
+}
+
+static void vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck *reflck)
+{
+ kref_put_mutex(&reflck->kref, vfio_fsl_mc_reflck_release, &reflck_lock);
+}
+
+static struct vfio_fsl_mc_reflck *vfio_fsl_mc_reflck_alloc(void)
+{
+ struct vfio_fsl_mc_reflck *reflck;
+
+ reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
+ if (!reflck)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&reflck->kref);
+ mutex_init(&reflck->lock);
+
+ return reflck;
+}
+
+static int vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device *vdev)
+{
+ int ret = 0;
+
+ mutex_lock(&reflck_lock);
+ if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
+ vdev->reflck = vfio_fsl_mc_reflck_alloc();
+ ret = PTR_ERR_OR_ZERO(vdev->reflck);
+ } else {
+ struct device *mc_cont_dev = vdev->mc_dev->dev.parent;
+ struct vfio_device *device;
+ struct vfio_fsl_mc_device *cont_vdev;
+
+ device = vfio_device_get_from_dev(mc_cont_dev);
+ if (!device) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ cont_vdev = vfio_device_data(device);
+ if (!cont_vdev || !cont_vdev->reflck) {
+ vfio_device_put(device);
+ ret = -ENODEV;
+ goto unlock;
+ }
+ vfio_fsl_mc_reflck_get(cont_vdev->reflck);
+ vdev->reflck = cont_vdev->reflck;
+ vfio_device_put(device);
+ }
+
+unlock:
+ mutex_unlock(&reflck_lock);
+ return ret;
+}
+
+static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
+{
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ int count = mc_dev->obj_desc.region_count;
+ int i;
+
+ vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
+ GFP_KERNEL);
+ if (!vdev->regions)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ struct resource *res = &mc_dev->regions[i];
+ int no_mmap = is_fsl_mc_bus_dprc(mc_dev);
+
+ vdev->regions[i].addr = res->start;
+ vdev->regions[i].size = resource_size(res);
+ vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS;
+ /*
+ * Only regions addressed with PAGE granularity may be
+ * MMAPed securely.
+ */
+ if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) &&
+ !(vdev->regions[i].size & ~PAGE_MASK))
+ vdev->regions[i].flags |=
+ VFIO_REGION_INFO_FLAG_MMAP;
+ vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
+ if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
+ vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
+ }
+
+ return 0;
+}
+
+static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
+{
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ int i;
+
+ for (i = 0; i < mc_dev->obj_desc.region_count; i++)
+ iounmap(vdev->regions[i].ioaddr);
+ kfree(vdev->regions);
+}
+
+static int vfio_fsl_mc_open(void *device_data)
+{
+ struct vfio_fsl_mc_device *vdev = device_data;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ mutex_lock(&vdev->reflck->lock);
+ if (!vdev->refcnt) {
+ ret = vfio_fsl_mc_regions_init(vdev);
+ if (ret)
+ goto err_reg_init;
+ }
+ vdev->refcnt++;
+
+ mutex_unlock(&vdev->reflck->lock);
+
+ return 0;
+
+err_reg_init:
+ mutex_unlock(&vdev->reflck->lock);
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void vfio_fsl_mc_release(void *device_data)
+{
+ struct vfio_fsl_mc_device *vdev = device_data;
+ int ret;
+
+ mutex_lock(&vdev->reflck->lock);
+
+ if (!(--vdev->refcnt)) {
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
+ struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
+
+ vfio_fsl_mc_regions_cleanup(vdev);
+
+ /* reset the device before cleaning up the interrupts */
+ ret = dprc_reset_container(mc_cont->mc_io, 0,
+ mc_cont->mc_handle,
+ mc_cont->obj_desc.id,
+ DPRC_RESET_OPTION_NON_RECURSIVE);
+
+ if (ret) {
+ dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
+ ret);
+ WARN_ON(1);
+ }
+
+ vfio_fsl_mc_irqs_cleanup(vdev);
+
+ fsl_mc_cleanup_irq_pool(mc_cont);
+ }
+
+ mutex_unlock(&vdev->reflck->lock);
+
+ module_put(THIS_MODULE);
+}
+
+static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned long minsz;
+ struct vfio_fsl_mc_device *vdev = device_data;
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ {
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
+
+ if (is_fsl_mc_bus_dprc(mc_dev))
+ info.flags |= VFIO_DEVICE_FLAGS_RESET;
+
+ info.num_regions = mc_dev->obj_desc.region_count;
+ info.num_irqs = mc_dev->obj_desc.irq_count;
+
+ return copy_to_user((void __user *)arg, &info, minsz) ?
+ -EFAULT : 0;
+ }
+ case VFIO_DEVICE_GET_REGION_INFO:
+ {
+ struct vfio_region_info info;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ if (info.index >= mc_dev->obj_desc.region_count)
+ return -EINVAL;
+
+ /* map offset to the physical address */
+ info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
+ info.size = vdev->regions[info.index].size;
+ info.flags = vdev->regions[info.index].flags;
+
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+ return 0;
+ }
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ if (info.index >= mc_dev->obj_desc.irq_count)
+ return -EINVAL;
+
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+ info.count = 1;
+
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+ return 0;
+ }
+ case VFIO_DEVICE_SET_IRQS:
+ {
+ struct vfio_irq_set hdr;
+ u8 *data = NULL;
+ int ret = 0;
+ size_t data_size = 0;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ ret = vfio_set_irqs_validate_and_prepare(&hdr, mc_dev->obj_desc.irq_count,
+ mc_dev->obj_desc.irq_count, &data_size);
+ if (ret)
+ return ret;
+
+ if (data_size) {
+ data = memdup_user((void __user *)(arg + minsz),
+ data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+ }
+
+ mutex_lock(&vdev->igate);
+ ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
+ hdr.index, hdr.start,
+ hdr.count, data);
+ mutex_unlock(&vdev->igate);
+ kfree(data);
+
+ return ret;
+ }
+ case VFIO_DEVICE_RESET:
+ {
+ int ret;
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+
+ /* reset is supported only for the DPRC */
+ if (!is_fsl_mc_bus_dprc(mc_dev))
+ return -ENOTTY;
+
+ ret = dprc_reset_container(mc_dev->mc_io, 0,
+ mc_dev->mc_handle,
+ mc_dev->obj_desc.id,
+ DPRC_RESET_OPTION_NON_RECURSIVE);
+ return ret;
+
+ }
+ default:
+ return -ENOTTY;
+ }
+}
+
+static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_fsl_mc_device *vdev = device_data;
+ unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
+ loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ struct vfio_fsl_mc_region *region;
+ u64 data[8];
+ int i;
+
+ if (index >= mc_dev->obj_desc.region_count)
+ return -EINVAL;
+
+ region = &vdev->regions[index];
+
+ if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
+ return -EINVAL;
+
+ if (!region->ioaddr) {
+ region->ioaddr = ioremap(region->addr, region->size);
+ if (!region->ioaddr)
+ return -ENOMEM;
+ }
+
+ if (count != 64 || off != 0)
+ return -EINVAL;
+
+ for (i = 7; i >= 0; i--)
+ data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
+
+ if (copy_to_user(buf, data, 64))
+ return -EFAULT;
+
+ return count;
+}
+
+#define MC_CMD_COMPLETION_TIMEOUT_MS 5000
+#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
+
+static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
+{
+ int i;
+ enum mc_cmd_status status;
+ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
+
+ /* Write at command parameter into portal */
+ for (i = 7; i >= 1; i--)
+ writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t));
+
+ /* Write command header in the end */
+ writeq(cmd_data[0], ioaddr);
+
+ /* Wait for response before returning to user-space
+ * This can be optimized in future to even prepare response
+ * before returning to user-space and avoid read ioctl.
+ */
+ for (;;) {
+ u64 header;
+ struct mc_cmd_header *resp_hdr;
+
+ header = cpu_to_le64(readq_relaxed(ioaddr));
+
+ resp_hdr = (struct mc_cmd_header *)&header;
+ status = (enum mc_cmd_status)resp_hdr->status;
+ if (status != MC_CMD_STATUS_READY)
+ break;
+
+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
+ if (timeout_usecs == 0)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_fsl_mc_device *vdev = device_data;
+ unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
+ loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ struct vfio_fsl_mc_region *region;
+ u64 data[8];
+ int ret;
+
+ if (index >= mc_dev->obj_desc.region_count)
+ return -EINVAL;
+
+ region = &vdev->regions[index];
+
+ if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
+ return -EINVAL;
+
+ if (!region->ioaddr) {
+ region->ioaddr = ioremap(region->addr, region->size);
+ if (!region->ioaddr)
+ return -ENOMEM;
+ }
+
+ if (count != 64 || off != 0)
+ return -EINVAL;
+
+ if (copy_from_user(&data, buf, 64))
+ return -EFAULT;
+
+ ret = vfio_fsl_mc_send_command(region->ioaddr, data);
+ if (ret)
+ return ret;
+
+ return count;
+
+}
+
+static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
+ struct vm_area_struct *vma)
+{
+ u64 size = vma->vm_end - vma->vm_start;
+ u64 pgoff, base;
+ u8 region_cacheable;
+
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ base = pgoff << PAGE_SHIFT;
+
+ if (region.size < PAGE_SIZE || base + size > region.size)
+ return -EINVAL;
+
+ region_cacheable = (region.type & FSL_MC_REGION_CACHEABLE) &&
+ (region.type & FSL_MC_REGION_SHAREABLE);
+ if (!region_cacheable)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
+
+ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ size, vma->vm_page_prot);
+}
+
+static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
+{
+ struct vfio_fsl_mc_device *vdev = device_data;
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ unsigned int index;
+
+ index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
+
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+ if (vma->vm_start & ~PAGE_MASK)
+ return -EINVAL;
+ if (vma->vm_end & ~PAGE_MASK)
+ return -EINVAL;
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+ if (index >= mc_dev->obj_desc.region_count)
+ return -EINVAL;
+
+ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
+ return -EINVAL;
+
+ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
+ && (vma->vm_flags & VM_READ))
+ return -EINVAL;
+
+ if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
+ && (vma->vm_flags & VM_WRITE))
+ return -EINVAL;
+
+ vma->vm_private_data = mc_dev;
+
+ return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
+}
+
+static const struct vfio_device_ops vfio_fsl_mc_ops = {
+ .name = "vfio-fsl-mc",
+ .open = vfio_fsl_mc_open,
+ .release = vfio_fsl_mc_release,
+ .ioctl = vfio_fsl_mc_ioctl,
+ .read = vfio_fsl_mc_read,
+ .write = vfio_fsl_mc_write,
+ .mmap = vfio_fsl_mc_mmap,
+};
+
+static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct vfio_fsl_mc_device *vdev = container_of(nb,
+ struct vfio_fsl_mc_device, nb);
+ struct device *dev = data;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
+
+ if (action == BUS_NOTIFY_ADD_DEVICE &&
+ vdev->mc_dev == mc_cont) {
+ mc_dev->driver_override = kasprintf(GFP_KERNEL, "%s",
+ vfio_fsl_mc_ops.name);
+ if (!mc_dev->driver_override)
+ dev_warn(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s failed\n",
+ dev_name(&mc_cont->dev));
+ else
+ dev_info(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s\n",
+ dev_name(&mc_cont->dev));
+ } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
+ vdev->mc_dev == mc_cont) {
+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
+
+ if (mc_drv && mc_drv != &vfio_fsl_mc_driver)
+ dev_warn(dev, "VFIO_FSL_MC: Object %s bound to driver %s while DPRC bound to vfio-fsl-mc\n",
+ dev_name(dev), mc_drv->driver.name);
+ }
+
+ return 0;
+}
+
+static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
+{
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ int ret;
+
+ /* Non-dprc devices share mc_io from parent */
+ if (!is_fsl_mc_bus_dprc(mc_dev)) {
+ struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
+
+ mc_dev->mc_io = mc_cont->mc_io;
+ return 0;
+ }
+
+ vdev->nb.notifier_call = vfio_fsl_mc_bus_notifier;
+ ret = bus_register_notifier(&fsl_mc_bus_type, &vdev->nb);
+ if (ret)
+ return ret;
+
+ /* open DPRC, allocate a MC portal */
+ ret = dprc_setup(mc_dev);
+ if (ret) {
+ dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
+ goto out_nc_unreg;
+ }
+
+ ret = dprc_scan_container(mc_dev, false);
+ if (ret) {
+ dev_err(&mc_dev->dev, "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
+ goto out_dprc_cleanup;
+ }
+
+ return 0;
+
+out_dprc_cleanup:
+ dprc_remove_devices(mc_dev, NULL, 0);
+ dprc_cleanup(mc_dev);
+out_nc_unreg:
+ bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
+ vdev->nb.notifier_call = NULL;
+
+ return ret;
+}
+
+static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
+{
+ struct iommu_group *group;
+ struct vfio_fsl_mc_device *vdev;
+ struct device *dev = &mc_dev->dev;
+ int ret;
+
+ group = vfio_iommu_group_get(dev);
+ if (!group) {
+ dev_err(dev, "VFIO_FSL_MC: No IOMMU group\n");
+ return -EINVAL;
+ }
+
+ vdev = devm_kzalloc(dev, sizeof(*vdev), GFP_KERNEL);
+ if (!vdev) {
+ ret = -ENOMEM;
+ goto out_group_put;
+ }
+
+ vdev->mc_dev = mc_dev;
+
+ ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
+ if (ret) {
+ dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
+ goto out_group_put;
+ }
+
+ ret = vfio_fsl_mc_reflck_attach(vdev);
+ if (ret)
+ goto out_group_dev;
+
+ ret = vfio_fsl_mc_init_device(vdev);
+ if (ret)
+ goto out_reflck;
+
+ mutex_init(&vdev->igate);
+
+ return 0;
+
+out_reflck:
+ vfio_fsl_mc_reflck_put(vdev->reflck);
+out_group_dev:
+ vfio_del_group_dev(dev);
+out_group_put:
+ vfio_iommu_group_put(group, dev);
+ return ret;
+}
+
+static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
+{
+ struct vfio_fsl_mc_device *vdev;
+ struct device *dev = &mc_dev->dev;
+
+ vdev = vfio_del_group_dev(dev);
+ if (!vdev)
+ return -EINVAL;
+
+ mutex_destroy(&vdev->igate);
+
+ vfio_fsl_mc_reflck_put(vdev->reflck);
+
+ if (is_fsl_mc_bus_dprc(mc_dev)) {
+ dprc_remove_devices(mc_dev, NULL, 0);
+ dprc_cleanup(mc_dev);
+ }
+
+ if (vdev->nb.notifier_call)
+ bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
+
+ vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
+
+ return 0;
+}
+
+static struct fsl_mc_driver vfio_fsl_mc_driver = {
+ .probe = vfio_fsl_mc_probe,
+ .remove = vfio_fsl_mc_remove,
+ .driver = {
+ .name = "vfio-fsl-mc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init vfio_fsl_mc_driver_init(void)
+{
+ return fsl_mc_driver_register(&vfio_fsl_mc_driver);
+}
+
+static void __exit vfio_fsl_mc_driver_exit(void)
+{
+ fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
+}
+
+module_init(vfio_fsl_mc_driver_init);
+module_exit(vfio_fsl_mc_driver_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("VFIO for FSL-MC devices - User Level meta-driver");
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
new file mode 100644
index 000000000000..0d9f3002df7f
--- /dev/null
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2019 NXP
+ */
+
+#include <linux/vfio.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/eventfd.h>
+#include <linux/msi.h>
+
+#include "linux/fsl/mc.h"
+#include "vfio_fsl_mc_private.h"
+
+static int vfio_fsl_mc_irqs_allocate(struct vfio_fsl_mc_device *vdev)
+{
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ struct vfio_fsl_mc_irq *mc_irq;
+ int irq_count;
+ int ret, i;
+
+ /* Device does not support any interrupt */
+ if (mc_dev->obj_desc.irq_count == 0)
+ return 0;
+
+ /* interrupts were already allocated for this device */
+ if (vdev->mc_irqs)
+ return 0;
+
+ irq_count = mc_dev->obj_desc.irq_count;
+
+ mc_irq = kcalloc(irq_count, sizeof(*mc_irq), GFP_KERNEL);
+ if (!mc_irq)
+ return -ENOMEM;
+
+ /* Allocate IRQs */
+ ret = fsl_mc_allocate_irqs(mc_dev);
+ if (ret) {
+ kfree(mc_irq);
+ return ret;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ mc_irq[i].count = 1;
+ mc_irq[i].flags = VFIO_IRQ_INFO_EVENTFD;
+ }
+
+ vdev->mc_irqs = mc_irq;
+
+ return 0;
+}
+
+static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
+{
+ struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
+
+ eventfd_signal(mc_irq->trigger, 1);
+ return IRQ_HANDLED;
+}
+
+static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev,
+ int index, int fd)
+{
+ struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
+ struct eventfd_ctx *trigger;
+ int hwirq;
+ int ret;
+
+ hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
+ if (irq->trigger) {
+ free_irq(hwirq, irq);
+ kfree(irq->name);
+ eventfd_ctx_put(irq->trigger);
+ irq->trigger = NULL;
+ }
+
+ if (fd < 0) /* Disable only */
+ return 0;
+
+ irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
+ hwirq, dev_name(&vdev->mc_dev->dev));
+ if (!irq->name)
+ return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ kfree(irq->name);
+ return PTR_ERR(trigger);
+ }
+
+ irq->trigger = trigger;
+
+ ret = request_irq(hwirq, vfio_fsl_mc_irq_handler, 0,
+ irq->name, irq);
+ if (ret) {
+ kfree(irq->name);
+ eventfd_ctx_put(trigger);
+ irq->trigger = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
+ unsigned int index, unsigned int start,
+ unsigned int count, u32 flags,
+ void *data)
+{
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ int ret, hwirq;
+ struct vfio_fsl_mc_irq *irq;
+ struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
+ struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
+
+ if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
+ return vfio_set_trigger(vdev, index, -1);
+
+ if (start != 0 || count != 1)
+ return -EINVAL;
+
+ mutex_lock(&vdev->reflck->lock);
+ ret = fsl_mc_populate_irq_pool(mc_cont,
+ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ if (ret)
+ goto unlock;
+
+ ret = vfio_fsl_mc_irqs_allocate(vdev);
+ if (ret)
+ goto unlock;
+ mutex_unlock(&vdev->reflck->lock);
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ s32 fd = *(s32 *)data;
+
+ return vfio_set_trigger(vdev, index, fd);
+ }
+
+ hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
+
+ irq = &vdev->mc_irqs[index];
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ vfio_fsl_mc_irq_handler(hwirq, irq);
+
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ u8 trigger = *(u8 *)data;
+
+ if (trigger)
+ vfio_fsl_mc_irq_handler(hwirq, irq);
+ }
+
+ return 0;
+
+unlock:
+ mutex_unlock(&vdev->reflck->lock);
+ return ret;
+
+}
+
+int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
+ u32 flags, unsigned int index,
+ unsigned int start, unsigned int count,
+ void *data)
+{
+ if (flags & VFIO_IRQ_SET_ACTION_TRIGGER)
+ return vfio_fsl_mc_set_irq_trigger(vdev, index, start,
+ count, flags, data);
+ else
+ return -EINVAL;
+}
+
+/* Free All IRQs for the given MC object */
+void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev)
+{
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ int irq_count = mc_dev->obj_desc.irq_count;
+ int i;
+
+ /*
+ * Device does not support any interrupt or the interrupts
+ * were not configured
+ */
+ if (!vdev->mc_irqs)
+ return;
+
+ for (i = 0; i < irq_count; i++)
+ vfio_set_trigger(vdev, i, -1);
+
+ fsl_mc_free_irqs(mc_dev);
+ kfree(vdev->mc_irqs);
+ vdev->mc_irqs = NULL;
+}
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
new file mode 100644
index 000000000000..a97ee691ed47
--- /dev/null
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016,2019-2020 NXP
+ */
+
+#ifndef VFIO_FSL_MC_PRIVATE_H
+#define VFIO_FSL_MC_PRIVATE_H
+
+#define VFIO_FSL_MC_OFFSET_SHIFT 40
+#define VFIO_FSL_MC_OFFSET_MASK (((u64)(1) << VFIO_FSL_MC_OFFSET_SHIFT) - 1)
+
+#define VFIO_FSL_MC_OFFSET_TO_INDEX(off) ((off) >> VFIO_FSL_MC_OFFSET_SHIFT)
+
+#define VFIO_FSL_MC_INDEX_TO_OFFSET(index) \
+ ((u64)(index) << VFIO_FSL_MC_OFFSET_SHIFT)
+
+struct vfio_fsl_mc_irq {
+ u32 flags;
+ u32 count;
+ struct eventfd_ctx *trigger;
+ char *name;
+};
+
+struct vfio_fsl_mc_reflck {
+ struct kref kref;
+ struct mutex lock;
+};
+
+struct vfio_fsl_mc_region {
+ u32 flags;
+ u32 type;
+ u64 addr;
+ resource_size_t size;
+ void __iomem *ioaddr;
+};
+
+struct vfio_fsl_mc_device {
+ struct fsl_mc_device *mc_dev;
+ struct notifier_block nb;
+ int refcnt;
+ struct vfio_fsl_mc_region *regions;
+ struct vfio_fsl_mc_reflck *reflck;
+ struct mutex igate;
+ struct vfio_fsl_mc_irq *mc_irqs;
+};
+
+extern int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
+ u32 flags, unsigned int index,
+ unsigned int start, unsigned int count,
+ void *data);
+
+void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev);
+
+#endif /* VFIO_FSL_MC_PRIVATE_H */
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index ac3c1dd3edef..40a223381ab6 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -45,3 +45,15 @@ config VFIO_PCI_NVLINK2
depends on VFIO_PCI && PPC_POWERNV
help
VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
+
+config VFIO_PCI_ZDEV
+ bool "VFIO PCI ZPCI device CLP support"
+ depends on VFIO_PCI && S390
+ default y
+ help
+ Enabling this option exposes VFIO capabilities containing hardware
+ configuration for zPCI devices. This enables userspace (e.g. QEMU)
+ to supply proper configuration values instead of hard-coded defaults
+ for zPCI devices passed through via VFIO on s390.
+
+ Say Y here.
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index f027f8a0e89c..781e0809d6ee 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -3,5 +3,6 @@
vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
vfio-pci-$(CONFIG_VFIO_PCI_NVLINK2) += vfio_pci_nvlink2.o
+vfio-pci-$(CONFIG_VFIO_PCI_ZDEV) += vfio_pci_zdev.o
obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 1ab1f5cda4ac..e6190173482c 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -385,7 +385,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
pdev->vendor == PCI_VENDOR_ID_INTEL &&
IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
ret = vfio_pci_igd_init(vdev);
- if (ret) {
+ if (ret && ret != -ENODEV) {
pci_warn(pdev, "Failed to setup Intel IGD regions\n");
goto disable_exit;
}
@@ -807,15 +807,25 @@ static long vfio_pci_ioctl(void *device_data,
if (cmd == VFIO_DEVICE_GET_INFO) {
struct vfio_device_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ unsigned long capsz;
minsz = offsetofend(struct vfio_device_info, num_irqs);
+ /* For backward compatibility, cannot require this */
+ capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
+
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
+ if (info.argsz >= capsz) {
+ minsz = capsz;
+ info.cap_offset = 0;
+ }
+
info.flags = VFIO_DEVICE_FLAGS_PCI;
if (vdev->reset_works)
@@ -824,6 +834,33 @@ static long vfio_pci_ioctl(void *device_data,
info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
+ if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV)) {
+ int ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
+
+ if (ret && ret != -ENODEV) {
+ pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n");
+ return ret;
+ }
+ }
+
+ if (caps.size) {
+ info.flags |= VFIO_DEVICE_FLAGS_CAPS;
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user((void __user *)arg +
+ sizeof(info), caps.buf,
+ caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info.cap_offset = sizeof(info);
+ }
+
+ kfree(caps.buf);
+ }
+
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
@@ -1480,31 +1517,29 @@ static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
} else {
mmap_read_lock(mm);
}
- if (mmget_still_valid(mm)) {
- if (try) {
- if (!mutex_trylock(&vdev->vma_lock)) {
- mmap_read_unlock(mm);
- mmput(mm);
- return 0;
- }
- } else {
- mutex_lock(&vdev->vma_lock);
+ if (try) {
+ if (!mutex_trylock(&vdev->vma_lock)) {
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return 0;
}
- list_for_each_entry_safe(mmap_vma, tmp,
- &vdev->vma_list, vma_next) {
- struct vm_area_struct *vma = mmap_vma->vma;
+ } else {
+ mutex_lock(&vdev->vma_lock);
+ }
+ list_for_each_entry_safe(mmap_vma, tmp,
+ &vdev->vma_list, vma_next) {
+ struct vm_area_struct *vma = mmap_vma->vma;
- if (vma->vm_mm != mm)
- continue;
+ if (vma->vm_mm != mm)
+ continue;
- list_del(&mmap_vma->vma_next);
- kfree(mmap_vma);
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
- zap_vma_ptes(vma, vma->vm_start,
- vma->vm_end - vma->vm_start);
- }
- mutex_unlock(&vdev->vma_lock);
+ zap_vma_ptes(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
}
+ mutex_unlock(&vdev->vma_lock);
mmap_read_unlock(mm);
mmput(mm);
}
@@ -1862,7 +1897,6 @@ static const struct vfio_device_ops vfio_pci_ops = {
static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev);
static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck);
-static struct pci_driver vfio_pci_driver;
static int vfio_pci_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index d98843feddce..a402adee8a21 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -406,7 +406,7 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
* PF SR-IOV capability, there's therefore no need to trigger
* faults based on the virtual value.
*/
- return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
+ return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY);
}
/*
@@ -467,6 +467,9 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
__le32 *vbar;
u64 mask;
+ if (!vdev->bardirty)
+ return;
+
vbar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
for (i = 0; i < PCI_STD_NUM_BARS; i++, vbar++) {
@@ -520,8 +523,8 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
- /* Mask in virtual memory enable for SR-IOV devices */
- if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) {
+ /* Mask in virtual memory enable */
+ if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) {
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
u32 tmp_val = le32_to_cpu(*val);
@@ -589,9 +592,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
* shows it disabled (phys_mem/io, then the device has
* undergone some kind of backdoor reset and needs to be
* restored before we allow it to enable the bars.
- * SR-IOV devices will trigger this, but we catch them later
+ * SR-IOV devices will trigger this - for mem enable let's
+ * catch this now and for io enable it will be caught later
*/
- if ((new_mem && virt_mem && !phys_mem) ||
+ if ((new_mem && virt_mem && !phys_mem &&
+ !pdev->no_command_memory) ||
(new_io && virt_io && !phys_io) ||
vfio_need_bar_restore(vdev))
vfio_bar_restore(vdev);
@@ -1734,12 +1739,14 @@ int vfio_config_init(struct vfio_pci_device *vdev)
vconfig[PCI_INTERRUPT_PIN]);
vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
-
+ }
+ if (pdev->no_command_memory) {
/*
- * VFs do no implement the memory enable bit of the COMMAND
- * register therefore we'll not have it set in our initial
- * copy of config space after pci_enable_device(). For
- * consistency with PFs, set the virtual enable bit here.
+ * VFs and devices that set pdev->no_command_memory do not
+ * implement the memory enable bit of the COMMAND register
+ * therefore we'll not have it set in our initial copy of
+ * config space after pci_enable_device(). For consistency
+ * with PFs, set the virtual enable bit here.
*/
*(__le16 *)&vconfig[PCI_COMMAND] |=
cpu_to_le16(PCI_COMMAND_MEMORY);
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 1d9fb2592945..869dce5f134d 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -352,11 +352,13 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
vdev->ctx[vector].producer.token = trigger;
vdev->ctx[vector].producer.irq = irq;
ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
- if (unlikely(ret))
+ if (unlikely(ret)) {
dev_info(&pdev->dev,
"irq bypass producer (token %p) registration fails: %d\n",
vdev->ctx[vector].producer.token, ret);
+ vdev->ctx[vector].producer.token = NULL;
+ }
vdev->ctx[vector].trigger = trigger;
return 0;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 61ca8ab165dc..5c90e560c5c7 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -213,4 +213,16 @@ static inline int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
return -ENODEV;
}
#endif
+
+#ifdef CONFIG_VFIO_PCI_ZDEV
+extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
+ struct vfio_info_cap *caps);
+#else
+static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* VFIO_PCI_PRIVATE_H */
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 9e353c484ace..a0b5fc8e46f4 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -356,34 +356,60 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
return done;
}
-static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
+static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
+ bool test_mem)
{
- struct vfio_pci_ioeventfd *ioeventfd = opaque;
-
switch (ioeventfd->count) {
case 1:
- vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem,
+ vfio_pci_iowrite8(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
case 2:
- vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem,
+ vfio_pci_iowrite16(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
case 4:
- vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem,
+ vfio_pci_iowrite32(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
#ifdef iowrite64
case 8:
- vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem,
+ vfio_pci_iowrite64(ioeventfd->vdev, test_mem,
ioeventfd->data, ioeventfd->addr);
break;
#endif
}
+}
+
+static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
+{
+ struct vfio_pci_ioeventfd *ioeventfd = opaque;
+ struct vfio_pci_device *vdev = ioeventfd->vdev;
+
+ if (ioeventfd->test_mem) {
+ if (!down_read_trylock(&vdev->memory_lock))
+ return 1; /* Lock contended, use thread */
+ if (!__vfio_pci_memory_enabled(vdev)) {
+ up_read(&vdev->memory_lock);
+ return 0;
+ }
+ }
+
+ vfio_pci_ioeventfd_do_write(ioeventfd, false);
+
+ if (ioeventfd->test_mem)
+ up_read(&vdev->memory_lock);
return 0;
}
+static void vfio_pci_ioeventfd_thread(void *opaque, void *unused)
+{
+ struct vfio_pci_ioeventfd *ioeventfd = opaque;
+
+ vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem);
+}
+
long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
uint64_t data, int count, int fd)
{
@@ -457,7 +483,8 @@ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM;
ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler,
- NULL, NULL, &ioeventfd->virqfd, fd);
+ vfio_pci_ioeventfd_thread, NULL,
+ &ioeventfd->virqfd, fd);
if (ret) {
kfree(ioeventfd);
goto out_unlock;
diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
new file mode 100644
index 000000000000..229685634031
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_zdev.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VFIO ZPCI devices support
+ *
+ * Copyright (C) IBM Corp. 2020. All rights reserved.
+ * Author(s): Pierre Morel <pmorel@linux.ibm.com>
+ * Matthew Rosato <mjrosato@linux.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/vfio_zdev.h>
+#include <asm/pci_clp.h>
+#include <asm/pci_io.h>
+
+#include "vfio_pci_private.h"
+
+/*
+ * Add the Base PCI Function information to the device info region.
+ */
+static int zpci_base_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_device_info_cap_zpci_base cap = {
+ .header.id = VFIO_DEVICE_INFO_CAP_ZPCI_BASE,
+ .header.version = 1,
+ .start_dma = zdev->start_dma,
+ .end_dma = zdev->end_dma,
+ .pchid = zdev->pchid,
+ .vfn = zdev->vfn,
+ .fmb_length = zdev->fmb_length,
+ .pft = zdev->pft,
+ .gid = zdev->pfgid
+ };
+
+ return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
+}
+
+/*
+ * Add the Base PCI Function Group information to the device info region.
+ */
+static int zpci_group_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_device_info_cap_zpci_group cap = {
+ .header.id = VFIO_DEVICE_INFO_CAP_ZPCI_GROUP,
+ .header.version = 1,
+ .dasm = zdev->dma_mask,
+ .msi_addr = zdev->msi_addr,
+ .flags = VFIO_DEVICE_INFO_ZPCI_FLAG_REFRESH,
+ .mui = zdev->fmb_update,
+ .noi = zdev->max_msi,
+ .maxstbl = ZPCI_MAX_WRITE_SIZE,
+ .version = zdev->version
+ };
+
+ return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
+}
+
+/*
+ * Add the device utility string to the device info region.
+ */
+static int zpci_util_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_device_info_cap_zpci_util *cap;
+ int cap_size = sizeof(*cap) + CLP_UTIL_STR_LEN;
+ int ret;
+
+ cap = kmalloc(cap_size, GFP_KERNEL);
+
+ cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_UTIL;
+ cap->header.version = 1;
+ cap->size = CLP_UTIL_STR_LEN;
+ memcpy(cap->util_str, zdev->util_str, cap->size);
+
+ ret = vfio_info_add_capability(caps, &cap->header, cap_size);
+
+ kfree(cap);
+
+ return ret;
+}
+
+/*
+ * Add the function path string to the device info region.
+ */
+static int zpci_pfip_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_device_info_cap_zpci_pfip *cap;
+ int cap_size = sizeof(*cap) + CLP_PFIP_NR_SEGMENTS;
+ int ret;
+
+ cap = kmalloc(cap_size, GFP_KERNEL);
+
+ cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_PFIP;
+ cap->header.version = 1;
+ cap->size = CLP_PFIP_NR_SEGMENTS;
+ memcpy(cap->pfip, zdev->pfip, cap->size);
+
+ ret = vfio_info_add_capability(caps, &cap->header, cap_size);
+
+ kfree(cap);
+
+ return ret;
+}
+
+/*
+ * Add all supported capabilities to the VFIO_DEVICE_GET_INFO capability chain.
+ */
+int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ struct zpci_dev *zdev = to_zpci(vdev->pdev);
+ int ret;
+
+ if (!zdev)
+ return -ENODEV;
+
+ ret = zpci_base_cap(zdev, vdev, caps);
+ if (ret)
+ return ret;
+
+ ret = zpci_group_cap(zdev, vdev, caps);
+ if (ret)
+ return ret;
+
+ if (zdev->util_str_avail) {
+ ret = zpci_util_cap(zdev, vdev, caps);
+ if (ret)
+ return ret;
+ }
+
+ ret = zpci_pfip_cap(zdev, vdev, caps);
+
+ return ret;
+}
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index c0771a9567fb..fb4b385191f2 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -267,7 +267,7 @@ static int vfio_platform_open(void *device_data)
ret = pm_runtime_get_sync(vdev->device);
if (ret < 0)
- goto err_pm;
+ goto err_rst;
ret = vfio_platform_call_reset(vdev, &extra_dbg);
if (ret && vdev->reset_required) {
@@ -284,7 +284,6 @@ static int vfio_platform_open(void *device_data)
err_rst:
pm_runtime_put(vdev->device);
-err_pm:
vfio_platform_irq_cleanup(vdev);
err_irq:
vfio_platform_regions_cleanup(vdev);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 262ab0efd06c..2151bc7f87ab 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1949,8 +1949,10 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
if (!group)
return -ENODEV;
- if (group->dev_counter > 1)
- return -EINVAL;
+ if (group->dev_counter > 1) {
+ ret = -EINVAL;
+ goto err_pin_pages;
+ }
ret = vfio_group_add_container_user(group);
if (ret)
@@ -2051,6 +2053,9 @@ int vfio_group_pin_pages(struct vfio_group *group,
if (!group || !user_iova_pfn || !phys_pfn || !npage)
return -EINVAL;
+ if (group->dev_counter > 1)
+ return -EINVAL;
+
if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
return -E2BIG;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 5fbf0c1f7433..67e827638995 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -693,7 +693,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
if (ret) {
- vfio_unpin_page_external(dma, iova, do_accounting);
+ if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
+ vfio_lock_acct(dma, -1, true);
goto pin_unwind;
}
@@ -774,7 +775,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
long unlocked = 0;
struct vfio_regions *entry, *next;
- iommu_tlb_sync(domain->domain, iotlb_gather);
+ iommu_iotlb_sync(domain->domain, iotlb_gather);
list_for_each_entry_safe(entry, next, regions, list) {
unlocked += vfio_unpin_pages_remote(dma,
@@ -1992,6 +1993,7 @@ static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
list_splice_tail(iova_copy, iova);
}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
@@ -2008,18 +2010,10 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
mutex_lock(&iommu->lock);
- list_for_each_entry(d, &iommu->domain_list, next) {
- if (find_iommu_group(d, iommu_group)) {
- mutex_unlock(&iommu->lock);
- return -EINVAL;
- }
- }
-
- if (iommu->external_domain) {
- if (find_iommu_group(iommu->external_domain, iommu_group)) {
- mutex_unlock(&iommu->lock);
- return -EINVAL;
- }
+ /* Check for duplicates */
+ if (vfio_iommu_find_iommu_group(iommu, iommu_group)) {
+ mutex_unlock(&iommu->lock);
+ return -EINVAL;
}
group = kzalloc(sizeof(*group), GFP_KERNEL);
@@ -2609,6 +2603,20 @@ static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
}
+static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_iommu_type1_info_dma_avail cap_dma_avail;
+
+ cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL;
+ cap_dma_avail.header.version = 1;
+
+ cap_dma_avail.avail = iommu->dma_avail;
+
+ return vfio_info_add_capability(caps, &cap_dma_avail.header,
+ sizeof(cap_dma_avail));
+}
+
static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
unsigned long arg)
{
@@ -2642,6 +2650,9 @@ static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
ret = vfio_iommu_migration_build_caps(iommu, &caps);
if (!ret)
+ ret = vfio_iommu_dma_avail_build_caps(iommu, &caps);
+
+ if (!ret)
ret = vfio_iommu_iova_build_caps(iommu, &caps);
mutex_unlock(&iommu->lock);
@@ -2933,7 +2944,8 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
* size
*/
bitmap_set(dma->bitmap, offset >> pgshift,
- *copied >> pgshift);
+ ((offset + *copied - 1) >> pgshift) -
+ (offset >> pgshift) + 1);
}
} else
*copied = copy_from_user(data, (void __user *)vaddr,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index b22adf03f584..f22fce549862 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -52,7 +52,6 @@
#define VHOST_SCSI_VERSION "v0.1"
#define VHOST_SCSI_NAMELEN 256
#define VHOST_SCSI_MAX_CDB_SIZE 32
-#define VHOST_SCSI_DEFAULT_TAGS 256
#define VHOST_SCSI_PREALLOC_SGLS 2048
#define VHOST_SCSI_PREALLOC_UPAGES 2048
#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
@@ -140,6 +139,7 @@ struct vhost_scsi_tpg {
struct se_portal_group se_tpg;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi *vhost_scsi;
+ struct list_head tmf_queue;
};
struct vhost_scsi_tport {
@@ -189,6 +189,9 @@ struct vhost_scsi_virtqueue {
* Writers must also take dev mutex and flush under it.
*/
int inflight_idx;
+ struct vhost_scsi_cmd *scsi_cmds;
+ struct sbitmap scsi_tags;
+ int max_cmds;
};
struct vhost_scsi {
@@ -209,6 +212,20 @@ struct vhost_scsi {
int vs_events_nr; /* num of pending events, protected by vq->mutex */
};
+struct vhost_scsi_tmf {
+ struct vhost_work vwork;
+ struct vhost_scsi_tpg *tpg;
+ struct vhost_scsi *vhost;
+ struct vhost_scsi_virtqueue *svq;
+ struct list_head queue_entry;
+
+ struct se_cmd se_cmd;
+ struct vhost_scsi_inflight *inflight;
+ struct iovec resp_iov;
+ int in_iovs;
+ int vq_desc;
+};
+
/*
* Context for processing request and control queue operations.
*/
@@ -320,11 +337,13 @@ static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
return 1;
}
-static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
+static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
{
struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
- struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
+ struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
int i;
if (tv_cmd->tvc_sgl_count) {
@@ -336,8 +355,36 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
}
- vhost_scsi_put_inflight(tv_cmd->inflight);
- target_free_tag(se_sess, se_cmd);
+ sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
+ vhost_scsi_put_inflight(inflight);
+}
+
+static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
+{
+ struct vhost_scsi_tpg *tpg = tmf->tpg;
+ struct vhost_scsi_inflight *inflight = tmf->inflight;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ vhost_scsi_put_inflight(inflight);
+}
+
+static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
+{
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+ struct vhost_scsi_tmf *tmf = container_of(se_cmd,
+ struct vhost_scsi_tmf, se_cmd);
+
+ vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
+ } else {
+ struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+ struct vhost_scsi_cmd, tvc_se_cmd);
+ struct vhost_scsi *vs = cmd->tvc_vhost;
+
+ llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
+ vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+ }
}
static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
@@ -362,34 +409,24 @@ static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
return 0;
}
-static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
-{
- struct vhost_scsi *vs = cmd->tvc_vhost;
-
- llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
-
- vhost_work_queue(&vs->dev, &vs->vs_completion_work);
-}
-
static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
{
- struct vhost_scsi_cmd *cmd = container_of(se_cmd,
- struct vhost_scsi_cmd, tvc_se_cmd);
- vhost_scsi_complete_cmd(cmd);
+ transport_generic_free_cmd(se_cmd, 0);
return 0;
}
static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
{
- struct vhost_scsi_cmd *cmd = container_of(se_cmd,
- struct vhost_scsi_cmd, tvc_se_cmd);
- vhost_scsi_complete_cmd(cmd);
+ transport_generic_free_cmd(se_cmd, 0);
return 0;
}
static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
{
- return;
+ struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
+ se_cmd);
+
+ transport_generic_free_cmd(&tmf->se_cmd, 0);
}
static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
@@ -429,15 +466,6 @@ vhost_scsi_allocate_evt(struct vhost_scsi *vs,
return evt;
}
-static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
-{
- struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
-
- /* TODO locking against target/backend threads? */
- transport_generic_free_cmd(se_cmd, 0);
-
-}
-
static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
{
return target_put_sess_cmd(se_cmd);
@@ -556,7 +584,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
- vhost_scsi_free_cmd(cmd);
+ vhost_scsi_release_cmd_res(se_cmd);
}
vq = -1;
@@ -566,31 +594,31 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
}
static struct vhost_scsi_cmd *
-vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
+vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
u32 exp_data_len, int data_direction)
{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
struct vhost_scsi_cmd *cmd;
struct vhost_scsi_nexus *tv_nexus;
- struct se_session *se_sess;
struct scatterlist *sg, *prot_sg;
struct page **pages;
- int tag, cpu;
+ int tag;
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
pr_err("Unable to locate active struct vhost_scsi_nexus\n");
return ERR_PTR(-EIO);
}
- se_sess = tv_nexus->tvn_se_sess;
- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ tag = sbitmap_get(&svq->scsi_tags, 0, false);
if (tag < 0) {
pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
return ERR_PTR(-ENOMEM);
}
- cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
+ cmd = &svq->scsi_cmds[tag];
sg = cmd->tvc_sgl;
prot_sg = cmd->tvc_prot_sgl;
pages = cmd->tvc_upages;
@@ -599,7 +627,6 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
cmd->tvc_prot_sgl = prot_sg;
cmd->tvc_upages = pages;
cmd->tvc_se_cmd.map_tag = tag;
- cmd->tvc_se_cmd.map_cpu = cpu;
cmd->tvc_tag = scsi_tag;
cmd->tvc_lun = lun;
cmd->tvc_task_attr = task_attr;
@@ -907,6 +934,11 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
return ret;
}
+static u16 vhost_buf_to_lun(u8 *lun_buf)
+{
+ return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
+}
+
static void
vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
{
@@ -1045,12 +1077,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
tag = vhost64_to_cpu(vq, v_req_pi.tag);
task_attr = v_req_pi.task_attr;
cdb = &v_req_pi.cdb[0];
- lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
+ lun = vhost_buf_to_lun(v_req_pi.lun);
} else {
tag = vhost64_to_cpu(vq, v_req.tag);
task_attr = v_req.task_attr;
cdb = &v_req.cdb[0];
- lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
+ lun = vhost_buf_to_lun(v_req.lun);
}
/*
* Check that the received CDB size does not exceeded our
@@ -1065,11 +1097,11 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
goto err;
}
- cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
+ cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
exp_data_len + prot_bytes,
data_direction);
if (IS_ERR(cmd)) {
- vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
+ vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
PTR_ERR(cmd));
goto err;
}
@@ -1088,7 +1120,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
&prot_iter, exp_data_len,
&data_iter))) {
vq_err(vq, "Failed to map iov to sgl\n");
- vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
+ vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
goto err;
}
}
@@ -1124,9 +1156,9 @@ out:
}
static void
-vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
- struct vhost_virtqueue *vq,
- struct vhost_scsi_ctx *vc)
+vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ int in_iovs, int vq_desc, struct iovec *resp_iov,
+ int tmf_resp_code)
{
struct virtio_scsi_ctrl_tmf_resp rsp;
struct iov_iter iov_iter;
@@ -1134,17 +1166,87 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
pr_debug("%s\n", __func__);
memset(&rsp, 0, sizeof(rsp));
- rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+ rsp.response = tmf_resp_code;
- iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
+ iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));
ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
if (likely(ret == sizeof(rsp)))
- vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+ vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
else
pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
}
+static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
+{
+ struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
+ vwork);
+ int resp_code;
+
+ if (tmf->se_cmd.se_tmr_req->response == TMR_FUNCTION_COMPLETE)
+ resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ else
+ resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+
+ vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
+ tmf->vq_desc, &tmf->resp_iov, resp_code);
+ vhost_scsi_release_tmf_res(tmf);
+}
+
+static void
+vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
+ struct vhost_virtqueue *vq,
+ struct virtio_scsi_ctrl_tmf_req *vtmf,
+ struct vhost_scsi_ctx *vc)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_tmf *tmf;
+
+ if (vhost32_to_cpu(vq, vtmf->subtype) !=
+ VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
+ goto send_reject;
+
+ if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
+ pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
+ goto send_reject;
+ }
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ if (list_empty(&tpg->tmf_queue)) {
+ pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ goto send_reject;
+ }
+
+ tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
+ queue_entry);
+ list_del_init(&tmf->queue_entry);
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ tmf->tpg = tpg;
+ tmf->vhost = vs;
+ tmf->svq = svq;
+ tmf->resp_iov = vq->iov[vc->out];
+ tmf->vq_desc = vc->head;
+ tmf->in_iovs = vc->in;
+ tmf->inflight = vhost_scsi_get_inflight(vq);
+
+ if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
+ vhost_buf_to_lun(vtmf->lun), NULL,
+ TMR_LUN_RESET, GFP_KERNEL, 0,
+ TARGET_SCF_ACK_KREF) < 0) {
+ vhost_scsi_release_tmf_res(tmf);
+ goto send_reject;
+ }
+
+ return;
+
+send_reject:
+ vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
+ VIRTIO_SCSI_S_FUNCTION_REJECTED);
+}
+
static void
vhost_scsi_send_an_resp(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
@@ -1170,6 +1272,7 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
static void
vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
{
+ struct vhost_scsi_tpg *tpg;
union {
__virtio32 type;
struct virtio_scsi_ctrl_an_req an;
@@ -1251,12 +1354,12 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vc.req += typ_size;
vc.req_size -= typ_size;
- ret = vhost_scsi_get_req(vq, &vc, NULL);
+ ret = vhost_scsi_get_req(vq, &vc, &tpg);
if (ret)
goto err;
if (v_req.type == VIRTIO_SCSI_T_TMF)
- vhost_scsi_send_tmf_reject(vs, vq, &vc);
+ vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
else
vhost_scsi_send_an_resp(vs, vq, &vc);
err:
@@ -1373,6 +1476,83 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
wait_for_completion(&old_inflight[i]->comp);
}
+static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_cmd *tv_cmd;
+ unsigned int i;
+
+ if (!svq->scsi_cmds)
+ return;
+
+ for (i = 0; i < svq->max_cmds; i++) {
+ tv_cmd = &svq->scsi_cmds[i];
+
+ kfree(tv_cmd->tvc_sgl);
+ kfree(tv_cmd->tvc_prot_sgl);
+ kfree(tv_cmd->tvc_upages);
+ }
+
+ sbitmap_free(&svq->scsi_tags);
+ kfree(svq->scsi_cmds);
+ svq->scsi_cmds = NULL;
+}
+
+static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
+{
+ struct vhost_scsi_virtqueue *svq = container_of(vq,
+ struct vhost_scsi_virtqueue, vq);
+ struct vhost_scsi_cmd *tv_cmd;
+ unsigned int i;
+
+ if (svq->scsi_cmds)
+ return 0;
+
+ if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
+ NUMA_NO_NODE))
+ return -ENOMEM;
+ svq->max_cmds = max_cmds;
+
+ svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
+ if (!svq->scsi_cmds) {
+ sbitmap_free(&svq->scsi_tags);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < max_cmds; i++) {
+ tv_cmd = &svq->scsi_cmds[i];
+
+ tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_sgl) {
+ pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
+ goto out;
+ }
+
+ tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_upages) {
+ pr_err("Unable to allocate tv_cmd->tvc_upages\n");
+ goto out;
+ }
+
+ tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_prot_sgl) {
+ pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
+ goto out;
+ }
+ }
+ return 0;
+out:
+ vhost_scsi_destroy_vq_cmds(vq);
+ return -ENOMEM;
+}
+
/*
* Called from vhost_scsi_ioctl() context to walk the list of available
* vhost_scsi_tpg with an active struct vhost_scsi_nexus
@@ -1427,10 +1607,9 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
- kfree(vs_tpg);
mutex_unlock(&tpg->tv_tpg_mutex);
ret = -EEXIST;
- goto out;
+ goto undepend;
}
/*
* In order to ensure individual vhost-scsi configfs
@@ -1442,9 +1621,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
ret = target_depend_item(&se_tpg->tpg_group.cg_item);
if (ret) {
pr_warn("target_depend_item() failed: %d\n", ret);
- kfree(vs_tpg);
mutex_unlock(&tpg->tv_tpg_mutex);
- goto out;
+ goto undepend;
}
tpg->tv_tpg_vhost_count++;
tpg->vhost_scsi = vs;
@@ -1457,6 +1635,16 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
if (match) {
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
+
+ for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
+ vq = &vs->vqs[i].vq;
+ if (!vhost_vq_is_setup(vq))
+ continue;
+
+ if (vhost_scsi_setup_vq_cmds(vq, vq->num))
+ goto destroy_vq_cmds;
+ }
+
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
@@ -1476,7 +1664,22 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
vhost_scsi_flush(vs);
kfree(vs->vs_tpg);
vs->vs_tpg = vs_tpg;
+ goto out;
+destroy_vq_cmds:
+ for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
+ if (!vhost_vq_get_backend(&vs->vqs[i].vq))
+ vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
+ }
+undepend:
+ for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
+ tpg = vs_tpg[i];
+ if (tpg) {
+ tpg->tv_tpg_vhost_count--;
+ target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
+ }
+ }
+ kfree(vs_tpg);
out:
mutex_unlock(&vs->dev.mutex);
mutex_unlock(&vhost_scsi_mutex);
@@ -1549,6 +1752,12 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
mutex_lock(&vq->mutex);
vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
+ /*
+ * Make sure cmds are not running before tearing them
+ * down.
+ */
+ vhost_scsi_flush(vs);
+ vhost_scsi_destroy_vq_cmds(vq);
}
}
/*
@@ -1811,11 +2020,19 @@ static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_tmf *tmf;
+
+ tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
+ if (!tmf)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&tmf->queue_entry);
+ vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count++;
+ list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotplug(tpg, lun);
@@ -1830,11 +2047,16 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
+ struct vhost_scsi_tmf *tmf;
mutex_lock(&vhost_scsi_mutex);
mutex_lock(&tpg->tv_tpg_mutex);
tpg->tv_tpg_port_count--;
+ tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
+ queue_entry);
+ list_del(&tmf->queue_entry);
+ kfree(tmf);
mutex_unlock(&tpg->tv_tpg_mutex);
vhost_scsi_hotunplug(tpg, lun);
@@ -1842,23 +2064,6 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
mutex_unlock(&vhost_scsi_mutex);
}
-static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
-{
- struct vhost_scsi_cmd *tv_cmd;
- unsigned int i;
-
- if (!se_sess->sess_cmd_map)
- return;
-
- for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
- tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
-
- kfree(tv_cmd->tvc_sgl);
- kfree(tv_cmd->tvc_prot_sgl);
- kfree(tv_cmd->tvc_upages);
- }
-}
-
static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
struct config_item *item, const char *page, size_t count)
{
@@ -1898,45 +2103,6 @@ static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
NULL,
};
-static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
- struct se_session *se_sess, void *p)
-{
- struct vhost_scsi_cmd *tv_cmd;
- unsigned int i;
-
- for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
- tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
-
- tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
- sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!tv_cmd->tvc_sgl) {
- pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
- goto out;
- }
-
- tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
- sizeof(struct page *),
- GFP_KERNEL);
- if (!tv_cmd->tvc_upages) {
- pr_err("Unable to allocate tv_cmd->tvc_upages\n");
- goto out;
- }
-
- tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
- sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!tv_cmd->tvc_prot_sgl) {
- pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
- goto out;
- }
- }
- return 0;
-out:
- vhost_scsi_free_cmd_map_res(se_sess);
- return -ENOMEM;
-}
-
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name)
{
@@ -1960,12 +2126,9 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
* struct se_node_acl for the vhost_scsi struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group 'name'.
*/
- tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
- VHOST_SCSI_DEFAULT_TAGS,
- sizeof(struct vhost_scsi_cmd),
+ tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
- (unsigned char *)name, tv_nexus,
- vhost_scsi_nexus_cb);
+ (unsigned char *)name, tv_nexus, NULL);
if (IS_ERR(tv_nexus->tvn_se_sess)) {
mutex_unlock(&tpg->tv_tpg_mutex);
kfree(tv_nexus);
@@ -2015,7 +2178,6 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
- vhost_scsi_free_cmd_map_res(se_sess);
/*
* Release the SCSI I_T Nexus to the emulated vhost Target Port
*/
@@ -2155,6 +2317,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
}
mutex_init(&tpg->tv_tpg_mutex);
INIT_LIST_HEAD(&tpg->tv_tpg_list);
+ INIT_LIST_HEAD(&tpg->tmf_queue);
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 62a9bb0efc55..2754f3069738 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -22,7 +22,6 @@
#include <linux/nospec.h>
#include <linux/vhost.h>
#include <linux/virtio_net.h>
-#include <linux/kernel.h>
#include "vhost.h"
@@ -48,6 +47,7 @@ struct vhost_vdpa {
int minor;
struct eventfd_ctx *config_ctx;
int in_batch;
+ struct vdpa_iova_range range;
};
static DEFINE_IDA(vhost_vdpa_ida);
@@ -97,26 +97,23 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
return;
irq = ops->get_vq_irq(vdpa, qid);
- spin_lock(&vq->call_ctx.ctx_lock);
irq_bypass_unregister_producer(&vq->call_ctx.producer);
- if (!vq->call_ctx.ctx || irq < 0) {
- spin_unlock(&vq->call_ctx.ctx_lock);
+ if (!vq->call_ctx.ctx || irq < 0)
return;
- }
vq->call_ctx.producer.token = vq->call_ctx.ctx;
vq->call_ctx.producer.irq = irq;
ret = irq_bypass_register_producer(&vq->call_ctx.producer);
- spin_unlock(&vq->call_ctx.ctx_lock);
+ if (unlikely(ret))
+ dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
+ qid, vq->call_ctx.producer.token, ret);
}
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
{
struct vhost_virtqueue *vq = &v->vqs[qid];
- spin_lock(&vq->call_ctx.ctx_lock);
irq_bypass_unregister_producer(&vq->call_ctx.producer);
- spin_unlock(&vq->call_ctx.ctx_lock);
}
static void vhost_vdpa_reset(struct vhost_vdpa *v)
@@ -344,6 +341,16 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
return 0;
}
+static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
+{
+ struct vhost_vdpa_iova_range range = {
+ .first = v->range.first,
+ .last = v->range.last,
+ };
+
+ return copy_to_user(argp, &range, sizeof(range));
+}
+
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
@@ -428,12 +435,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
u64 features;
- long r;
+ long r = 0;
if (cmd == VHOST_SET_BACKEND_FEATURES) {
- r = copy_from_user(&features, featurep, sizeof(features));
- if (r)
- return r;
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
if (features & ~VHOST_VDPA_BACKEND_FEATURES)
return -EOPNOTSUPP;
vhost_set_backend_features(&v->vdev, features);
@@ -476,7 +482,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
break;
case VHOST_GET_BACKEND_FEATURES:
features = VHOST_VDPA_BACKEND_FEATURES;
- r = copy_to_user(featurep, &features, sizeof(features));
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ r = -EFAULT;
+ break;
+ case VHOST_VDPA_GET_IOVA_RANGE:
+ r = vhost_vdpa_get_iova_range(v, argp);
break;
default:
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
@@ -595,19 +605,25 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
struct page **page_list;
- struct vm_area_struct **vmas;
+ unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
unsigned int gup_flags = FOLL_LONGTERM;
- unsigned long map_pfn, last_pfn = 0;
- unsigned long npages, lock_limit;
- unsigned long i, nmap = 0;
+ unsigned long npages, cur_base, map_pfn, last_pfn = 0;
+ unsigned long locked, lock_limit, pinned, i;
u64 iova = msg->iova;
- long pinned;
int ret = 0;
+ if (msg->iova < v->range.first ||
+ msg->iova + msg->size - 1 > v->range.last)
+ return -EINVAL;
+
if (vhost_iotlb_itree_first(iotlb, msg->iova,
msg->iova + msg->size - 1))
return -EEXIST;
+ page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
if (msg->perm & VHOST_ACCESS_WO)
gup_flags |= FOLL_WRITE;
@@ -615,86 +631,61 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (!npages)
return -EINVAL;
- page_list = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- vmas = kvmalloc_array(npages, sizeof(struct vm_area_struct *),
- GFP_KERNEL);
- if (!page_list || !vmas) {
- ret = -ENOMEM;
- goto free;
- }
-
mmap_read_lock(dev->mm);
+ locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
- ret = -ENOMEM;
- goto unlock;
- }
- pinned = pin_user_pages(msg->uaddr & PAGE_MASK, npages, gup_flags,
- page_list, vmas);
- if (npages != pinned) {
- if (pinned < 0) {
- ret = pinned;
- } else {
- unpin_user_pages(page_list, pinned);
- ret = -ENOMEM;
- }
- goto unlock;
+ if (locked > lock_limit) {
+ ret = -ENOMEM;
+ goto out;
}
+ cur_base = msg->uaddr & PAGE_MASK;
iova &= PAGE_MASK;
- map_pfn = page_to_pfn(page_list[0]);
-
- /* One more iteration to avoid extra vdpa_map() call out of loop. */
- for (i = 0; i <= npages; i++) {
- unsigned long this_pfn;
- u64 csize;
-
- /* The last chunk may have no valid PFN next to it */
- this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL;
-
- if (last_pfn && (this_pfn == -1UL ||
- this_pfn != last_pfn + 1)) {
- /* Pin a contiguous chunk of memory */
- csize = last_pfn - map_pfn + 1;
- ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT,
- map_pfn << PAGE_SHIFT,
- msg->perm);
- if (ret) {
- /*
- * Unpin the rest chunks of memory on the
- * flight with no corresponding vdpa_map()
- * calls having been made yet. On the other
- * hand, vdpa_unmap() in the failure path
- * is in charge of accounting the number of
- * pinned pages for its own.
- * This asymmetrical pattern of accounting
- * is for efficiency to pin all pages at
- * once, while there is no other callsite
- * of vdpa_map() than here above.
- */
- unpin_user_pages(&page_list[nmap],
- npages - nmap);
- goto out;
+
+ while (npages) {
+ pinned = min_t(unsigned long, npages, list_size);
+ ret = pin_user_pages(cur_base, pinned,
+ gup_flags, page_list, NULL);
+ if (ret != pinned)
+ goto out;
+
+ if (!last_pfn)
+ map_pfn = page_to_pfn(page_list[0]);
+
+ for (i = 0; i < ret; i++) {
+ unsigned long this_pfn = page_to_pfn(page_list[i]);
+ u64 csize;
+
+ if (last_pfn && (this_pfn != last_pfn + 1)) {
+ /* Pin a contiguous chunk of memory */
+ csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+ if (vhost_vdpa_map(v, iova, csize,
+ map_pfn << PAGE_SHIFT,
+ msg->perm))
+ goto out;
+ map_pfn = this_pfn;
+ iova += csize;
}
- atomic64_add(csize, &dev->mm->pinned_vm);
- nmap += csize;
- iova += csize << PAGE_SHIFT;
- map_pfn = this_pfn;
+
+ last_pfn = this_pfn;
}
- last_pfn = this_pfn;
+
+ cur_base += ret << PAGE_SHIFT;
+ npages -= ret;
}
- WARN_ON(nmap != npages);
+ /* Pin the rest chunk */
+ ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
+ map_pfn << PAGE_SHIFT, msg->perm);
out:
- if (ret)
+ if (ret) {
vhost_vdpa_unmap(v, msg->iova, msg->size);
-unlock:
+ atomic64_sub(npages, &dev->mm->pinned_vm);
+ }
mmap_read_unlock(dev->mm);
-free:
- kvfree(vmas);
- kvfree(page_list);
+ free_page((unsigned long)page_list);
return ret;
}
@@ -790,6 +781,27 @@ static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
v->domain = NULL;
}
+static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
+{
+ struct vdpa_iova_range *range = &v->range;
+ struct iommu_domain_geometry geo;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (ops->get_iova_range) {
+ *range = ops->get_iova_range(vdpa);
+ } else if (v->domain &&
+ !iommu_domain_get_attr(v->domain,
+ DOMAIN_ATTR_GEOMETRY, &geo) &&
+ geo.force_aperture) {
+ range->first = geo.aperture_start;
+ range->last = geo.aperture_end;
+ } else {
+ range->first = 0;
+ range->last = ULLONG_MAX;
+ }
+}
+
static int vhost_vdpa_open(struct inode *inode, struct file *filep)
{
struct vhost_vdpa *v;
@@ -830,6 +842,8 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
if (r)
goto err_init_iotlb;
+ vhost_vdpa_set_iova_range(v);
+
filep->private_data = v;
return 0;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9ad45e1d27f0..a262e12c6dc2 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -302,9 +302,14 @@ static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
{
call_ctx->ctx = NULL;
memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
- spin_lock_init(&call_ctx->ctx_lock);
}
+bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
+{
+ return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
+}
+EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -1650,9 +1655,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
break;
}
- spin_lock(&vq->call_ctx.ctx_lock);
swap(ctx, vq->call_ctx.ctx);
- spin_unlock(&vq->call_ctx.ctx_lock);
break;
case VHOST_SET_VRING_ERR:
if (copy_from_user(&f, argp, sizeof f)) {
@@ -1897,7 +1900,7 @@ static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
{
- struct iovec iov[64];
+ struct iovec *iov = vq->log_iov;
int i, ret;
if (!vq->iotlb)
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 9032d3c2a9f4..b063324c7669 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -64,7 +64,6 @@ enum vhost_uaddr_type {
struct vhost_vring_call {
struct eventfd_ctx *ctx;
struct irq_bypass_producer producer;
- spinlock_t ctx_lock;
};
/* The virtqueue structure describes a queue attached to a device. */
@@ -123,6 +122,7 @@ struct vhost_virtqueue {
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;
+ struct iovec log_iov[64];
/* Ring endianness. Defaults to legacy native endianness.
* Set to true when starting a modern virtio device. */
@@ -190,6 +190,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
struct vhost_log *log, unsigned int *log_num);
void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
+bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
int vhost_vq_init_access(struct vhost_virtqueue *);
int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index e059a9a47cdf..8bd8b403f087 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -284,13 +284,14 @@ __vringh_iov(struct vringh *vrh, u16 i,
desc_max = vrh->vring.num;
up_next = -1;
+ /* You must want something! */
+ if (WARN_ON(!riov && !wiov))
+ return -EINVAL;
+
if (riov)
riov->i = riov->used = 0;
- else if (wiov)
+ if (wiov)
wiov->i = wiov->used = 0;
- else
- /* You must want something! */
- BUG();
for (;;) {
void *addr;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 87f9fc238d28..d83c87b902c1 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -182,6 +182,14 @@ config BACKLIGHT_IPAQ_MICRO
computers. Say yes if you have one of the h3100/h3600/h3700
machines.
+config BACKLIGHT_KTD253
+ tristate "Backlight Driver for Kinetic KTD253"
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Say y to enabled the backlight driver for the Kinetic KTD253
+ which is a 1-wire GPIO-controlled backlight found in some mobile
+ phones.
+
config BACKLIGHT_LM3533
tristate "Backlight Driver for LM3533"
depends on MFD_LM3533
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 13463b99f1f9..685f3f1ca4df 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_BACKLIGHT_GPIO) += gpio_backlight.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
obj-$(CONFIG_BACKLIGHT_IPAQ_MICRO) += ipaq_micro_bl.o
+obj-$(CONFIG_BACKLIGHT_KTD253) += ktd253-backlight.o
obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
obj-$(CONFIG_BACKLIGHT_LM3630A) += lm3630a_bl.o
obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o
diff --git a/drivers/video/backlight/ktd253-backlight.c b/drivers/video/backlight/ktd253-backlight.c
new file mode 100644
index 000000000000..e3fee3f1f582
--- /dev/null
+++ b/drivers/video/backlight/ktd253-backlight.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Backlight driver for the Kinetic KTD253
+ * Based on code and know-how from the Samsung GT-S7710
+ * Gareth Phillips <gareth.phillips@samsung.com>
+ */
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+/* Current ratio is n/32 from 1/32 to 32/32 */
+#define KTD253_MIN_RATIO 1
+#define KTD253_MAX_RATIO 32
+#define KTD253_DEFAULT_RATIO 13
+
+#define KTD253_T_LOW_NS (200 + 10) /* Additional 10ns as safety factor */
+#define KTD253_T_HIGH_NS (200 + 10) /* Additional 10ns as safety factor */
+#define KTD253_T_OFF_MS 3
+
+struct ktd253_backlight {
+ struct device *dev;
+ struct backlight_device *bl;
+ struct gpio_desc *gpiod;
+ u16 ratio;
+};
+
+static int ktd253_backlight_update_status(struct backlight_device *bl)
+{
+ struct ktd253_backlight *ktd253 = bl_get_data(bl);
+ int brightness = backlight_get_brightness(bl);
+ u16 target_ratio;
+ u16 current_ratio = ktd253->ratio;
+ unsigned long flags;
+
+ dev_dbg(ktd253->dev, "new brightness/ratio: %d/32\n", brightness);
+
+ target_ratio = brightness;
+
+ if (target_ratio == current_ratio)
+ /* This is already right */
+ return 0;
+
+ if (target_ratio == 0) {
+ gpiod_set_value_cansleep(ktd253->gpiod, 0);
+ /*
+ * We need to keep the GPIO low for at least this long
+ * to actually switch the KTD253 off.
+ */
+ msleep(KTD253_T_OFF_MS);
+ ktd253->ratio = 0;
+ return 0;
+ }
+
+ if (current_ratio == 0) {
+ gpiod_set_value_cansleep(ktd253->gpiod, 1);
+ ndelay(KTD253_T_HIGH_NS);
+ /* We always fall back to this when we power on */
+ current_ratio = KTD253_MAX_RATIO;
+ }
+
+ /*
+ * WARNING:
+ * The loop to set the correct current level is performed
+ * with interrupts disabled as it is timing critical.
+ * The maximum number of cycles of the loop is 32
+ * so the time taken will be (T_LOW_NS + T_HIGH_NS + loop_time) * 32,
+ */
+ local_irq_save(flags);
+ while (current_ratio != target_ratio) {
+ /*
+ * These GPIO operations absolutely can NOT sleep so no
+ * _cansleep suffixes, and no using GPIO expanders on
+ * slow buses for this!
+ */
+ gpiod_set_value(ktd253->gpiod, 0);
+ ndelay(KTD253_T_LOW_NS);
+ gpiod_set_value(ktd253->gpiod, 1);
+ ndelay(KTD253_T_HIGH_NS);
+ /* After 1/32 we loop back to 32/32 */
+ if (current_ratio == KTD253_MIN_RATIO)
+ current_ratio = KTD253_MAX_RATIO;
+ else
+ current_ratio--;
+ }
+ local_irq_restore(flags);
+ ktd253->ratio = current_ratio;
+
+ dev_dbg(ktd253->dev, "new ratio set to %d/32\n", target_ratio);
+
+ return 0;
+}
+
+static const struct backlight_ops ktd253_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = ktd253_backlight_update_status,
+};
+
+static int ktd253_backlight_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct backlight_device *bl;
+ struct ktd253_backlight *ktd253;
+ u32 max_brightness;
+ u32 brightness;
+ int ret;
+
+ ktd253 = devm_kzalloc(dev, sizeof(*ktd253), GFP_KERNEL);
+ if (!ktd253)
+ return -ENOMEM;
+ ktd253->dev = dev;
+
+ ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
+ if (ret)
+ max_brightness = KTD253_MAX_RATIO;
+ if (max_brightness > KTD253_MAX_RATIO) {
+ /* Clamp brightness to hardware max */
+ dev_err(dev, "illegal max brightness specified\n");
+ max_brightness = KTD253_MAX_RATIO;
+ }
+
+ ret = device_property_read_u32(dev, "default-brightness", &brightness);
+ if (ret)
+ brightness = KTD253_DEFAULT_RATIO;
+ if (brightness > max_brightness) {
+ /* Clamp default brightness to max brightness */
+ dev_err(dev, "default brightness exceeds max brightness\n");
+ brightness = max_brightness;
+ }
+
+ if (brightness)
+ /* This will be the default ratio when the KTD253 is enabled */
+ ktd253->ratio = KTD253_MAX_RATIO;
+ else
+ ktd253->ratio = 0;
+
+ ktd253->gpiod = devm_gpiod_get(dev, "enable",
+ brightness ? GPIOD_OUT_HIGH :
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ktd253->gpiod)) {
+ ret = PTR_ERR(ktd253->gpiod);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "gpio line missing or invalid.\n");
+ return ret;
+ }
+ gpiod_set_consumer_name(ktd253->gpiod, dev_name(dev));
+
+ bl = devm_backlight_device_register(dev, dev_name(dev), dev, ktd253,
+ &ktd253_backlight_ops, NULL);
+ if (IS_ERR(bl)) {
+ dev_err(dev, "failed to register backlight\n");
+ return PTR_ERR(bl);
+ }
+ bl->props.max_brightness = max_brightness;
+ /* When we just enable the GPIO line we set max brightness */
+ if (brightness) {
+ bl->props.brightness = brightness;
+ bl->props.power = FB_BLANK_UNBLANK;
+ } else {
+ bl->props.brightness = 0;
+ bl->props.power = FB_BLANK_POWERDOWN;
+ }
+
+ ktd253->bl = bl;
+ platform_set_drvdata(pdev, bl);
+ backlight_update_status(bl);
+
+ return 0;
+}
+
+static const struct of_device_id ktd253_backlight_of_match[] = {
+ { .compatible = "kinetic,ktd253" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ktd253_backlight_of_match);
+
+static struct platform_driver ktd253_backlight_driver = {
+ .driver = {
+ .name = "ktd253-backlight",
+ .of_match_table = ktd253_backlight_of_match,
+ },
+ .probe = ktd253_backlight_probe,
+};
+module_platform_driver(ktd253_backlight_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Kinetic KTD253 Backlight Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ktd253-backlight");
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
index 0ce181585008..8268ac43d54f 100644
--- a/drivers/video/backlight/sky81452-backlight.c
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -217,6 +217,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt(
num_entry);
if (ret < 0) {
dev_err(dev, "led-sources node is invalid.\n");
+ of_node_put(np);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index cff5e96fd988..6df6fcd132e3 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -11,7 +11,7 @@
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/slab.h>
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 113116d3585c..38765544345b 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -12,7 +12,7 @@
#include <linux/spi/spi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/lcd.h>
#include <linux/fb.h>
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 39deb22a4180..ee33b8ec62bb 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -119,6 +119,7 @@ config STI_CONSOLE
bool "STI text console"
depends on PARISC && HAS_IOMEM
select FONT_SUPPORT
+ select CRC32
default y
help
The STI console is the builtin display/keyboard on HP-PARISC
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index cd51b7a17a21..d9c682ae0392 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -125,6 +125,8 @@ static const struct linux_logo *newport_show_logo(void)
npregs->go.hostrw0 = *data++ << 24;
return logo;
+#else
+ return NULL;
#endif /* CONFIG_LOGO_SGI_CLUT224 */
}
@@ -671,11 +673,6 @@ static bool newport_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
return true;
}
-static int newport_set_origin(struct vc_data *vc)
-{
- return 0;
-}
-
static void newport_save_screen(struct vc_data *vc) { }
const struct consw newport_con = {
@@ -692,7 +689,6 @@ const struct consw newport_con = {
.con_blank = newport_blank,
.con_font_set = newport_font_set,
.con_font_default = newport_font_default,
- .con_set_origin = newport_set_origin,
.con_save_screen = newport_save_screen
};
@@ -744,18 +740,6 @@ static struct gio_driver newport_driver = {
.probe = newport_probe,
.remove = newport_remove,
};
-
-int __init newport_console_init(void)
-{
- return gio_register_driver(&newport_driver);
-}
-
-void __exit newport_console_exit(void)
-{
- gio_unregister_driver(&newport_driver);
-}
-
-module_init(newport_console_init);
-module_exit(newport_console_exit);
+module_driver(newport_driver, gio_register_driver, gio_unregister_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 21a5c280c8c9..1b451165311c 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -2,7 +2,7 @@
* linux/drivers/video/console/sticon.c - console driver using HP's STI firmware
*
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
- * Copyright (C) 2002 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2002-2020 Helge Deller <deller@gmx.de>
*
* Based on linux/drivers/video/vgacon.c and linux/drivers/video/fbcon.c,
* which were
@@ -43,6 +43,9 @@
#include <linux/kd.h>
#include <linux/selection.h>
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/font.h>
+#include <linux/crc32.h>
#include <asm/io.h>
@@ -52,27 +55,15 @@
#define BLANK 0
static int vga_is_gfx;
-/* this is the sti_struct used for this console */
-static struct sti_struct *sticon_sti;
-
-/* Software scrollback */
-static unsigned long softback_buf, softback_curr;
-static unsigned long softback_in;
-static unsigned long /* softback_top, */ softback_end;
-static int softback_lines;
-
-/* software cursor */
-static int cursor_drawn;
-#define CURSOR_DRAW_DELAY (1)
-#define DEFAULT_CURSOR_BLINK_RATE (20)
+#define STI_DEF_FONT sticon_sti->font
-static int vbl_cursor_cnt;
+/* borrowed from fbcon.c */
+#define FNTREFCOUNT(fd) (fd->refcount)
+#define FNTCRC(fd) (fd->crc)
+static struct sti_cooked_font *font_data[MAX_NR_CONSOLES];
-static inline void cursor_undrawn(void)
-{
- vbl_cursor_cnt = 0;
- cursor_drawn = 0;
-}
+/* this is the sti_struct used for this console */
+static struct sti_struct *sticon_sti;
static const char *sticon_startup(void)
{
@@ -81,61 +72,43 @@ static const char *sticon_startup(void)
static void sticon_putc(struct vc_data *conp, int c, int ypos, int xpos)
{
- int redraw_cursor = 0;
-
if (vga_is_gfx || console_blanked)
return;
if (conp->vc_mode != KD_TEXT)
return;
-#if 0
- if ((p->cursor_x == xpos) && (p->cursor_y == ypos)) {
- cursor_undrawn();
- redraw_cursor = 1;
- }
-#endif
- sti_putc(sticon_sti, c, ypos, xpos);
-
- if (redraw_cursor)
- vbl_cursor_cnt = CURSOR_DRAW_DELAY;
+ sti_putc(sticon_sti, c, ypos, xpos, font_data[conp->vc_num]);
}
static void sticon_putcs(struct vc_data *conp, const unsigned short *s,
int count, int ypos, int xpos)
{
- int redraw_cursor = 0;
-
if (vga_is_gfx || console_blanked)
return;
if (conp->vc_mode != KD_TEXT)
return;
-#if 0
- if ((p->cursor_y == ypos) && (xpos <= p->cursor_x) &&
- (p->cursor_x < (xpos + count))) {
- cursor_undrawn();
- redraw_cursor = 1;
- }
-#endif
-
while (count--) {
- sti_putc(sticon_sti, scr_readw(s++), ypos, xpos++);
+ sti_putc(sticon_sti, scr_readw(s++), ypos, xpos++,
+ font_data[conp->vc_num]);
}
-
- if (redraw_cursor)
- vbl_cursor_cnt = CURSOR_DRAW_DELAY;
}
static void sticon_cursor(struct vc_data *conp, int mode)
{
unsigned short car1;
+ /* no cursor update if screen is blanked */
+ if (vga_is_gfx || console_blanked)
+ return;
+
car1 = conp->vc_screenbuf[conp->state.x + conp->state.y * conp->vc_cols];
switch (mode) {
case CM_ERASE:
- sti_putc(sticon_sti, car1, conp->state.y, conp->state.x);
+ sti_putc(sticon_sti, car1, conp->state.y, conp->state.x,
+ font_data[conp->vc_num]);
break;
case CM_MOVE:
case CM_DRAW:
@@ -146,7 +119,7 @@ static void sticon_cursor(struct vc_data *conp, int mode)
case CUR_TWO_THIRDS:
case CUR_BLOCK:
sti_putc(sticon_sti, (car1 & 255) + (0 << 8) + (7 << 11),
- conp->state.y, conp->state.x);
+ conp->state.y, conp->state.x, font_data[conp->vc_num]);
break;
}
break;
@@ -165,42 +138,164 @@ static bool sticon_scroll(struct vc_data *conp, unsigned int t,
switch (dir) {
case SM_UP:
- sti_bmove(sti, t + count, 0, t, 0, b - t - count, conp->vc_cols);
- sti_clear(sti, b - count, 0, count, conp->vc_cols, conp->vc_video_erase_char);
+ sti_bmove(sti, t + count, 0, t, 0, b - t - count, conp->vc_cols,
+ font_data[conp->vc_num]);
+ sti_clear(sti, b - count, 0, count, conp->vc_cols,
+ conp->vc_video_erase_char, font_data[conp->vc_num]);
break;
case SM_DOWN:
- sti_bmove(sti, t, 0, t + count, 0, b - t - count, conp->vc_cols);
- sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_video_erase_char);
+ sti_bmove(sti, t, 0, t + count, 0, b - t - count, conp->vc_cols,
+ font_data[conp->vc_num]);
+ sti_clear(sti, t, 0, count, conp->vc_cols,
+ conp->vc_video_erase_char, font_data[conp->vc_num]);
break;
}
return false;
}
+static int sticon_set_def_font(int unit, struct console_font *op)
+{
+ if (font_data[unit] != STI_DEF_FONT) {
+ if (--FNTREFCOUNT(font_data[unit]) == 0) {
+ kfree(font_data[unit]->raw_ptr);
+ kfree(font_data[unit]);
+ }
+ font_data[unit] = STI_DEF_FONT;
+ }
+
+ return 0;
+}
+
+static int sticon_set_font(struct vc_data *vc, struct console_font *op)
+{
+ struct sti_struct *sti = sticon_sti;
+ int vc_cols, vc_rows, vc_old_cols, vc_old_rows;
+ int unit = vc->vc_num;
+ int w = op->width;
+ int h = op->height;
+ int size, i, bpc, pitch;
+ struct sti_rom_font *new_font;
+ struct sti_cooked_font *cooked_font;
+ unsigned char *data = op->data, *p;
+
+ if ((w < 6) || (h < 6) || (w > 32) || (h > 32)
+ || (op->charcount != 256 && op->charcount != 512))
+ return -EINVAL;
+ pitch = ALIGN(w, 8) / 8;
+ bpc = pitch * h;
+ size = bpc * op->charcount;
+
+ new_font = kmalloc(sizeof(*new_font) + size, STI_LOWMEM);
+ if (!new_font)
+ return -ENOMEM;
+
+ new_font->first_char = 0;
+ new_font->last_char = op->charcount - 1;
+ new_font->width = w;
+ new_font->height = h;
+ new_font->font_type = STI_FONT_HPROMAN8;
+ new_font->bytes_per_char = bpc;
+ new_font->underline_height = 0;
+ new_font->underline_pos = 0;
+
+ cooked_font = kzalloc(sizeof(*cooked_font), GFP_KERNEL);
+ if (!cooked_font) {
+ kfree(new_font);
+ return -ENOMEM;
+ }
+ cooked_font->raw = new_font;
+ cooked_font->raw_ptr = new_font;
+ cooked_font->width = w;
+ cooked_font->height = h;
+ FNTREFCOUNT(cooked_font) = 0; /* usage counter */
+
+ p = (unsigned char *) new_font;
+ p += sizeof(*new_font);
+ for (i = 0; i < op->charcount; i++) {
+ memcpy(p, data, bpc);
+ data += pitch*32;
+ p += bpc;
+ }
+ FNTCRC(cooked_font) = crc32(0, new_font, size + sizeof(*new_font));
+ sti_font_convert_bytemode(sti, cooked_font);
+ new_font = cooked_font->raw_ptr;
+
+ /* check if font is already used by other console */
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ if (font_data[i] != STI_DEF_FONT
+ && (FNTCRC(font_data[i]) == FNTCRC(cooked_font))) {
+ kfree(new_font);
+ kfree(cooked_font);
+ /* current font is the same as the new one */
+ if (i == unit)
+ return 0;
+ cooked_font = font_data[i];
+ new_font = cooked_font->raw_ptr;
+ break;
+ }
+ }
+
+ /* clear screen with old font: we now may have less rows */
+ vc_old_rows = vc->vc_rows;
+ vc_old_cols = vc->vc_cols;
+ sti_clear(sticon_sti, 0, 0, vc_old_rows, vc_old_cols,
+ vc->vc_video_erase_char, font_data[vc->vc_num]);
+
+ /* delete old font in case it is a user font */
+ sticon_set_def_font(unit, NULL);
+
+ FNTREFCOUNT(cooked_font)++;
+ font_data[unit] = cooked_font;
+
+ vc_cols = sti_onscreen_x(sti) / cooked_font->width;
+ vc_rows = sti_onscreen_y(sti) / cooked_font->height;
+ vc_resize(vc, vc_cols, vc_rows);
+
+ /* need to repaint screen if cols & rows are same as old font */
+ if (vc_cols == vc_old_cols && vc_rows == vc_old_rows)
+ update_screen(vc);
+
+ return 0;
+}
+
+static int sticon_font_default(struct vc_data *vc, struct console_font *op, char *name)
+{
+ return sticon_set_def_font(vc->vc_num, op);
+}
+
+static int sticon_font_set(struct vc_data *vc, struct console_font *font,
+ unsigned int flags)
+{
+ return sticon_set_font(vc, font);
+}
+
static void sticon_init(struct vc_data *c, int init)
{
struct sti_struct *sti = sticon_sti;
int vc_cols, vc_rows;
sti_set(sti, 0, 0, sti_onscreen_y(sti), sti_onscreen_x(sti), 0);
- vc_cols = sti_onscreen_x(sti) / sti->font_width;
- vc_rows = sti_onscreen_y(sti) / sti->font_height;
+ vc_cols = sti_onscreen_x(sti) / sti->font->width;
+ vc_rows = sti_onscreen_y(sti) / sti->font->height;
c->vc_can_do_color = 1;
if (init) {
c->vc_cols = vc_cols;
c->vc_rows = vc_rows;
} else {
- /* vc_rows = (c->vc_rows > vc_rows) ? vc_rows : c->vc_rows; */
- /* vc_cols = (c->vc_cols > vc_cols) ? vc_cols : c->vc_cols; */
vc_resize(c, vc_cols, vc_rows);
-/* vc_resize_con(vc_rows, vc_cols, c->vc_num); */
}
}
static void sticon_deinit(struct vc_data *c)
{
+ int i;
+
+ /* free memory used by user font */
+ for (i = 0; i < MAX_NR_CONSOLES; i++)
+ sticon_set_def_font(i, NULL);
}
static void sticon_clear(struct vc_data *conp, int sy, int sx, int height,
@@ -209,7 +304,8 @@ static void sticon_clear(struct vc_data *conp, int sy, int sx, int height,
if (!height || !width)
return;
- sti_clear(sticon_sti, sy, sx, height, width, conp->vc_video_erase_char);
+ sti_clear(sticon_sti, sy, sx, height, width,
+ conp->vc_video_erase_char, font_data[conp->vc_num]);
}
static int sticon_switch(struct vc_data *conp)
@@ -217,11 +313,6 @@ static int sticon_switch(struct vc_data *conp)
return 1; /* needs refreshing */
}
-static int sticon_set_origin(struct vc_data *conp)
-{
- return 0;
-}
-
static int sticon_blank(struct vc_data *c, int blank, int mode_switch)
{
if (blank == 0) {
@@ -229,65 +320,13 @@ static int sticon_blank(struct vc_data *c, int blank, int mode_switch)
vga_is_gfx = 0;
return 1;
}
- sticon_set_origin(c);
- sti_clear(sticon_sti, 0,0, c->vc_rows, c->vc_cols, BLANK);
+ sti_clear(sticon_sti, 0, 0, c->vc_rows, c->vc_cols, BLANK,
+ font_data[c->vc_num]);
if (mode_switch)
vga_is_gfx = 1;
return 1;
}
-static u16 *sticon_screen_pos(struct vc_data *conp, int offset)
-{
- int line;
- unsigned long p;
-
- if (conp->vc_num != fg_console || !softback_lines)
- return (u16 *)(conp->vc_origin + offset);
- line = offset / conp->vc_size_row;
- if (line >= softback_lines)
- return (u16 *)(conp->vc_origin + offset - softback_lines * conp->vc_size_row);
- p = softback_curr + offset;
- if (p >= softback_end)
- p += softback_buf - softback_end;
- return (u16 *)p;
-}
-
-static unsigned long sticon_getxy(struct vc_data *conp, unsigned long pos,
- int *px, int *py)
-{
- int x, y;
- unsigned long ret;
- if (pos >= conp->vc_origin && pos < conp->vc_scr_end) {
- unsigned long offset = (pos - conp->vc_origin) / 2;
-
- x = offset % conp->vc_cols;
- y = offset / conp->vc_cols;
- if (conp->vc_num == fg_console)
- y += softback_lines;
- ret = pos + (conp->vc_cols - x) * 2;
- } else if (conp->vc_num == fg_console && softback_lines) {
- unsigned long offset = pos - softback_curr;
-
- if (pos < softback_curr)
- offset += softback_end - softback_buf;
- offset /= 2;
- x = offset % conp->vc_cols;
- y = offset / conp->vc_cols;
- ret = pos + (conp->vc_cols - x) * 2;
- if (ret == softback_end)
- ret = softback_buf;
- if (ret == softback_in)
- ret = conp->vc_origin;
- } else {
- /* Should not happen */
- x = y = 0;
- ret = conp->vc_origin;
- }
- if (px) *px = x;
- if (py) *py = y;
- return ret;
-}
-
static u8 sticon_build_attr(struct vc_data *conp, u8 color,
enum vc_intensity intens,
bool blink, bool underline, bool reverse,
@@ -318,10 +357,6 @@ static void sticon_invert_region(struct vc_data *conp, u16 *p, int count)
}
}
-static void sticon_save_screen(struct vc_data *conp)
-{
-}
-
static const struct consw sti_con = {
.owner = THIS_MODULE,
.con_startup = sticon_startup,
@@ -334,19 +369,18 @@ static const struct consw sti_con = {
.con_scroll = sticon_scroll,
.con_switch = sticon_switch,
.con_blank = sticon_blank,
- .con_set_origin = sticon_set_origin,
- .con_save_screen = sticon_save_screen,
+ .con_font_set = sticon_font_set,
+ .con_font_default = sticon_font_default,
.con_build_attr = sticon_build_attr,
.con_invert_region = sticon_invert_region,
- .con_screen_pos = sticon_screen_pos,
- .con_getxy = sticon_getxy,
};
static int __init sticonsole_init(void)
{
- int err;
+ int err, i;
+
/* already initialized ? */
if (sticon_sti)
return 0;
@@ -355,14 +389,16 @@ static int __init sticonsole_init(void)
if (!sticon_sti)
return -ENODEV;
- if (conswitchp == &dummy_con) {
- printk(KERN_INFO "sticon: Initializing STI text console.\n");
- console_lock();
- err = do_take_over_console(&sti_con, 0, MAX_NR_CONSOLES - 1, 1);
- console_unlock();
- return err;
- }
- return 0;
+ for (i = 0; i < MAX_NR_CONSOLES; i++)
+ font_data[i] = STI_DEF_FONT;
+
+ pr_info("sticon: Initializing STI text console.\n");
+ console_lock();
+ err = do_take_over_console(&sti_con, 0, MAX_NR_CONSOLES - 1,
+ PAGE0->mem_cons.cl_class != CL_DUPLEX);
+ console_unlock();
+
+ return err;
}
module_init(sticonsole_init);
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 84c3ca37040a..6a26a364f9bd 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -4,7 +4,7 @@
* core code for console driver using HP's STI firmware
*
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
- * Copyright (C) 2001-2013 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2001-2020 Helge Deller <deller@gmx.de>
* Copyright (C) 2001-2002 Thomas Bogendoerfer <tsbogend@alpha.franken.de>
*
* TODO:
@@ -14,6 +14,8 @@
*
*/
+#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -133,16 +135,17 @@ static const struct sti_font_flags default_font_flags = {
};
void
-sti_putc(struct sti_struct *sti, int c, int y, int x)
+sti_putc(struct sti_struct *sti, int c, int y, int x,
+ struct sti_cooked_font *font)
{
struct sti_font_inptr *inptr = &sti->sti_data->font_inptr;
struct sti_font_inptr inptr_default = {
- .font_start_addr= STI_PTR(sti->font->raw),
+ .font_start_addr = STI_PTR(font->raw),
.index = c_index(sti, c),
.fg_color = c_fg(sti, c),
.bg_color = c_bg(sti, c),
- .dest_x = x * sti->font_width,
- .dest_y = y * sti->font_height,
+ .dest_x = x * font->width,
+ .dest_y = y * font->height,
};
struct sti_font_outptr *outptr = &sti->sti_data->font_outptr;
s32 ret;
@@ -193,18 +196,18 @@ sti_set(struct sti_struct *sti, int src_y, int src_x,
void
sti_clear(struct sti_struct *sti, int src_y, int src_x,
- int height, int width, int c)
+ int height, int width, int c, struct sti_cooked_font *font)
{
struct sti_blkmv_inptr *inptr = &sti->sti_data->blkmv_inptr;
struct sti_blkmv_inptr inptr_default = {
.fg_color = c_fg(sti, c),
.bg_color = c_bg(sti, c),
- .src_x = src_x * sti->font_width,
- .src_y = src_y * sti->font_height,
- .dest_x = src_x * sti->font_width,
- .dest_y = src_y * sti->font_height,
- .width = width * sti->font_width,
- .height = height* sti->font_height,
+ .src_x = src_x * font->width,
+ .src_y = src_y * font->height,
+ .dest_x = src_x * font->width,
+ .dest_y = src_y * font->height,
+ .width = width * font->width,
+ .height = height * font->height,
};
struct sti_blkmv_outptr *outptr = &sti->sti_data->blkmv_outptr;
s32 ret;
@@ -225,16 +228,17 @@ static const struct sti_blkmv_flags default_blkmv_flags = {
void
sti_bmove(struct sti_struct *sti, int src_y, int src_x,
- int dst_y, int dst_x, int height, int width)
+ int dst_y, int dst_x, int height, int width,
+ struct sti_cooked_font *font)
{
struct sti_blkmv_inptr *inptr = &sti->sti_data->blkmv_inptr;
struct sti_blkmv_inptr inptr_default = {
- .src_x = src_x * sti->font_width,
- .src_y = src_y * sti->font_height,
- .dest_x = dst_x * sti->font_width,
- .dest_y = dst_y * sti->font_height,
- .width = width * sti->font_width,
- .height = height* sti->font_height,
+ .src_x = src_x * font->width,
+ .src_y = src_y * font->height,
+ .dest_x = dst_x * font->width,
+ .dest_y = dst_y * font->height,
+ .width = width * font->width,
+ .height = height * font->height,
};
struct sti_blkmv_outptr *outptr = &sti->sti_data->blkmv_outptr;
s32 ret;
@@ -301,36 +305,32 @@ __setup("sti=", sti_setup);
-static char *font_name[MAX_STI_ROMS];
-static int font_index[MAX_STI_ROMS],
- font_height[MAX_STI_ROMS],
- font_width[MAX_STI_ROMS];
+static char *font_name;
+static int font_index,
+ font_height,
+ font_width;
#ifndef MODULE
static int sti_font_setup(char *str)
{
- char *x;
- int i = 0;
+ /*
+ * The default font can be selected in various ways.
+ * a) sti_font=VGA8x16, sti_font=10x20, sti_font=10*20 selects
+ * an built-in Linux framebuffer font.
+ * b) sti_font=<index>, where index is (1..x) with 1 selecting
+ * the first HP STI ROM built-in font..
+ */
- /* we accept sti_font=VGA8x16, sti_font=10x20, sti_font=10*20
- * or sti_font=7 style command lines. */
+ if (*str >= '0' && *str <= '9') {
+ char *x;
- while (i<MAX_STI_ROMS && str && *str) {
- if (*str>='0' && *str<='9') {
- if ((x = strchr(str, 'x')) || (x = strchr(str, '*'))) {
- font_height[i] = simple_strtoul(str, NULL, 0);
- font_width[i] = simple_strtoul(x+1, NULL, 0);
- } else {
- font_index[i] = simple_strtoul(str, NULL, 0);
- }
+ if ((x = strchr(str, 'x')) || (x = strchr(str, '*'))) {
+ font_height = simple_strtoul(str, NULL, 0);
+ font_width = simple_strtoul(x+1, NULL, 0);
} else {
- font_name[i] = str; /* fb font name */
+ font_index = simple_strtoul(str, NULL, 0);
}
-
- if ((x = strchr(str, ',')))
- *x++ = 0;
- str = x;
-
- i++;
+ } else {
+ font_name = str; /* fb font name */
}
return 1;
@@ -344,7 +344,7 @@ static int sti_font_setup(char *str)
* framebuffer font names (e.g. VGA8x16, SUN22x18).
* This is only available if the fonts have been statically compiled
* in with e.g. the CONFIG_FONT_8x16 or CONFIG_FONT_SUN12x22 options.
- * - sti_font=<number>
+ * - sti_font=<number> (<number> = 1,2,3,...)
* most STI ROMs have built-in HP specific fonts, which can be selected
* by giving the desired number to the sticon driver.
* NOTE: This number is machine and STI ROM dependend.
@@ -364,8 +364,7 @@ static void sti_dump_globcfg(struct sti_glob_cfg *glob_cfg,
{
struct sti_glob_cfg_ext *cfg;
- DPRINTK((KERN_INFO
- "%d text planes\n"
+ pr_debug("%d text planes\n"
"%4d x %4d screen resolution\n"
"%4d x %4d offscreen\n"
"%4d x %4d layout\n"
@@ -382,12 +381,11 @@ static void sti_dump_globcfg(struct sti_glob_cfg *glob_cfg,
glob_cfg->region_ptrs[4], glob_cfg->region_ptrs[5],
glob_cfg->region_ptrs[6], glob_cfg->region_ptrs[7],
glob_cfg->reent_lvl,
- glob_cfg->save_addr));
+ glob_cfg->save_addr);
/* dump extended cfg */
cfg = PTR_STI((unsigned long)glob_cfg->ext_ptr);
- DPRINTK(( KERN_INFO
- "monitor %d\n"
+ pr_debug("monitor %d\n"
"in friendly mode: %d\n"
"power consumption %d watts\n"
"freq ref %d\n"
@@ -396,20 +394,19 @@ static void sti_dump_globcfg(struct sti_glob_cfg *glob_cfg,
cfg->friendly_boot,
cfg->power,
cfg->freq_ref,
- cfg->sti_mem_addr, sti_mem_request));
+ cfg->sti_mem_addr, sti_mem_request);
}
static void sti_dump_outptr(struct sti_struct *sti)
{
- DPRINTK((KERN_INFO
- "%d bits per pixel\n"
+ pr_debug("%d bits per pixel\n"
"%d used bits\n"
"%d planes\n"
"attributes %08x\n",
sti->sti_data->inq_outptr.bits_per_pixel,
sti->sti_data->inq_outptr.bits_used,
sti->sti_data->inq_outptr.planes,
- sti->sti_data->inq_outptr.attributes));
+ sti->sti_data->inq_outptr.attributes);
}
static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
@@ -448,8 +445,7 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
if (offs != PCI_ROM_ADDRESS &&
(offs < PCI_BASE_ADDRESS_0 ||
offs > PCI_BASE_ADDRESS_5)) {
- printk (KERN_WARNING
- "STI pci region mapping for region %d (%02x) can't be mapped\n",
+ pr_warn("STI pci region mapping for region %d (%02x) can't be mapped\n",
i,sti->rm_entry[i]);
continue;
}
@@ -464,14 +460,14 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
if (len)
glob_cfg->region_ptrs[i] = sti->regions_phys[i];
- DPRINTK(("region #%d: phys %08lx, region_ptr %08x, len=%lukB, "
+ pr_debug("region #%d: phys %08lx, region_ptr %08x, len=%lukB, "
"btlb=%d, sysonly=%d, cache=%d, last=%d\n",
i, sti->regions_phys[i], glob_cfg->region_ptrs[i],
len/1024,
sti->regions[i].region_desc.btlb,
sti->regions[i].region_desc.sys_only,
sti->regions[i].region_desc.cache,
- sti->regions[i].region_desc.last));
+ sti->regions[i].region_desc.last);
/* last entry reached ? */
if (sti->regions[i].region_desc.last)
@@ -479,8 +475,8 @@ static int sti_init_glob_cfg(struct sti_struct *sti, unsigned long rom_address,
}
if (++i<8 && sti->regions[i].region)
- printk(KERN_WARNING "%s: *future ptr (0x%8x) not yet supported !\n",
- __FILE__, sti->regions[i].region);
+ pr_warn("future ptr (0x%8x) not yet supported !\n",
+ sti->regions[i].region);
glob_cfg_ext->sti_mem_addr = STI_PTR(sti_mem_addr);
@@ -538,6 +534,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
}
cooked_font->raw = nf;
+ cooked_font->raw_ptr = nf;
cooked_font->next_font = NULL;
cooked_rom->font_start = cooked_font;
@@ -552,24 +549,38 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
}
#endif
-static struct sti_cooked_font *sti_select_font(struct sti_cooked_rom *rom,
- int (*search_font_fnc)(struct sti_cooked_rom *, int, int))
+static int sti_search_font(struct sti_cooked_rom *rom, int height, int width)
+{
+ struct sti_cooked_font *font;
+ int i = 0;
+
+ for (font = rom->font_start; font; font = font->next_font, i++) {
+ if ((font->raw->width == width) &&
+ (font->raw->height == height))
+ return i;
+ }
+ return 0;
+}
+
+static struct sti_cooked_font *sti_select_font(struct sti_cooked_rom *rom)
{
struct sti_cooked_font *font;
int i;
- int index = num_sti_roms;
/* check for framebuffer-font first */
- if ((font = sti_select_fbfont(rom, font_name[index])))
- return font;
+ if (!font_index) {
+ font = sti_select_fbfont(rom, font_name);
+ if (font)
+ return font;
+ }
- if (font_width[index] && font_height[index])
- font_index[index] = search_font_fnc(rom,
- font_height[index], font_width[index]);
+ if (font_width && font_height)
+ font_index = sti_search_font(rom,
+ font_height, font_width);
- for (font = rom->font_start, i = font_index[index];
- font && (i > 0);
- font = font->next_font, i--);
+ for (font = rom->font_start, i = font_index - 1;
+ font && (i > 0);
+ font = font->next_font, i--);
if (font)
return font;
@@ -578,20 +589,35 @@ static struct sti_cooked_font *sti_select_font(struct sti_cooked_rom *rom,
}
-static void sti_dump_rom(struct sti_rom *rom)
+static void sti_dump_rom(struct sti_struct *sti)
{
- printk(KERN_INFO " id %04x-%04x, conforms to spec rev. %d.%02x\n",
+ struct sti_rom *rom = sti->rom->raw;
+ struct sti_cooked_font *font_start;
+ int nr;
+
+ pr_info(" id %04x-%04x, conforms to spec rev. %d.%02x\n",
rom->graphics_id[0],
rom->graphics_id[1],
rom->revno[0] >> 4,
rom->revno[0] & 0x0f);
- DPRINTK((" supports %d monitors\n", rom->num_mons));
- DPRINTK((" font start %08x\n", rom->font_start));
- DPRINTK((" region list %08x\n", rom->region_list));
- DPRINTK((" init_graph %08x\n", rom->init_graph));
- DPRINTK((" bus support %02x\n", rom->bus_support));
- DPRINTK((" ext bus support %02x\n", rom->ext_bus_support));
- DPRINTK((" alternate code type %d\n", rom->alt_code_type));
+ pr_debug(" supports %d monitors\n", rom->num_mons);
+ pr_debug(" font start %08x\n", rom->font_start);
+ pr_debug(" region list %08x\n", rom->region_list);
+ pr_debug(" init_graph %08x\n", rom->init_graph);
+ pr_debug(" bus support %02x\n", rom->bus_support);
+ pr_debug(" ext bus support %02x\n", rom->ext_bus_support);
+ pr_debug(" alternate code type %d\n", rom->alt_code_type);
+
+ font_start = sti->rom->font_start;
+ nr = 0;
+ while (font_start) {
+ struct sti_rom_font *f = font_start->raw;
+
+ pr_info(" built-in font #%d: size %dx%d, chars %d-%d, bpc %d\n", ++nr,
+ f->width, f->height,
+ f->first_char, f->last_char, f->bytes_per_char);
+ font_start = font_start->next_font;
+ }
}
@@ -628,39 +654,34 @@ static int sti_cook_fonts(struct sti_cooked_rom *cooked_rom,
return 1;
}
-
-static int sti_search_font(struct sti_cooked_rom *rom, int height, int width)
-{
- struct sti_cooked_font *font;
- int i = 0;
-
- for (font = rom->font_start; font; font = font->next_font, i++) {
- if ((font->raw->width == width) &&
- (font->raw->height == height))
- return i;
- }
- return 0;
-}
-
#define BMODE_RELOCATE(offset) offset = (offset) / 4;
#define BMODE_LAST_ADDR_OFFS 0x50
-static void *sti_bmode_font_raw(struct sti_cooked_font *f)
+void sti_font_convert_bytemode(struct sti_struct *sti, struct sti_cooked_font *f)
{
unsigned char *n, *p, *q;
- int size = f->raw->bytes_per_char*256+sizeof(struct sti_rom_font);
-
+ int size = f->raw->bytes_per_char * 256 + sizeof(struct sti_rom_font);
+ struct sti_rom_font *old_font;
+
+ if (sti->wordmode)
+ return;
+
+ old_font = f->raw_ptr;
n = kcalloc(4, size, STI_LOWMEM);
+ f->raw_ptr = n;
if (!n)
- return NULL;
+ return;
p = n + 3;
- q = (unsigned char *)f->raw;
+ q = (unsigned char *) f->raw;
while (size--) {
*p = *q++;
- p+=4;
+ p += 4;
}
- return n + 3;
+ /* store new ptr to byte-mode font and delete old font */
+ f->raw = (struct sti_rom_font *) (n + 3);
+ kfree(old_font);
}
+EXPORT_SYMBOL(sti_font_convert_bytemode);
static void sti_bmode_rom_copy(unsigned long base, unsigned long count,
void *dest)
@@ -747,7 +768,7 @@ static int sti_read_rom(int wordmode, struct sti_struct *sti,
goto out_err;
if (!sti_cook_fonts(cooked, raw)) {
- printk(KERN_ERR "No font found for STI at %08lx\n", address);
+ pr_warn("No font found for STI at %08lx\n", address);
goto out_err;
}
@@ -756,7 +777,8 @@ static int sti_read_rom(int wordmode, struct sti_struct *sti,
address = (unsigned long) STI_PTR(raw);
- pr_info("STI ROM supports 32 %sbit firmware functions.\n",
+ pr_info("STI %s ROM supports 32 %sbit firmware functions.\n",
+ wordmode ? "word mode" : "byte mode",
raw->alt_code_type == ALT_CODE_TYPE_PA_RISC_64
? "and 64 " : "");
@@ -767,18 +789,17 @@ static int sti_read_rom(int wordmode, struct sti_struct *sti,
sti->rom = cooked;
sti->rom->raw = raw;
-
- sti->font = sti_select_font(sti->rom, sti_search_font);
- sti->font_width = sti->font->raw->width;
- sti->font_height = sti->font->raw->height;
- if (!wordmode)
- sti->font->raw = sti_bmode_font_raw(sti->font);
+ sti_dump_rom(sti);
+
+ sti->wordmode = wordmode;
+ sti->font = sti_select_font(sti->rom);
+ sti->font->width = sti->font->raw->width;
+ sti->font->height = sti->font->raw->height;
+ sti_font_convert_bytemode(sti, sti->font);
sti->sti_mem_request = raw->sti_mem_req;
sti->graphics_id[0] = raw->graphics_id[0];
sti->graphics_id[1] = raw->graphics_id[1];
-
- sti_dump_rom(raw);
/* check if the ROM routines in this card are compatible */
if (wordmode || sti->graphics_id[1] != 0x09A02587)
@@ -804,9 +825,9 @@ ok:
return 1;
msg_not_supported:
- printk(KERN_ERR "Sorry, this GSC/STI card is not yet supported.\n");
- printk(KERN_ERR "Please see http://parisc-linux.org/faq/"
- "graphics-howto.html for more info.\n");
+ pr_warn("Sorry, this GSC/STI card is not yet supported.\n");
+ pr_warn("Please see https://parisc.wiki.kernel.org/"
+ "index.php/Graphics_howto for more info.\n");
/* fall through */
out_err:
kfree(raw);
@@ -823,7 +844,7 @@ static struct sti_struct *sti_try_rom_generic(unsigned long address,
u32 sig;
if (num_sti_roms >= MAX_STI_ROMS) {
- printk(KERN_WARNING "maximum number of STI ROMS reached !\n");
+ pr_warn("maximum number of STI ROMS reached !\n");
return NULL;
}
@@ -849,16 +870,15 @@ test_rom:
if (i != 1) {
/* The ROM could have multiple architecture
* dependent images (e.g. i386, parisc,...) */
- printk(KERN_WARNING
- "PCI ROM is not a STI ROM type image (0x%8x)\n", i);
+ pr_warn("PCI ROM is not a STI ROM type image (0x%8x)\n", i);
goto out_err;
}
sti->pd = pd;
i = gsc_readl(address+0x0c);
- DPRINTK(("PCI ROM size (from header) = %d kB\n",
- le16_to_cpu(i>>16)*512/1024));
+ pr_debug("PCI ROM size (from header) = %d kB\n",
+ le16_to_cpu(i>>16)*512/1024);
rm_offset = le16_to_cpu(i & 0xffff);
if (rm_offset) {
/* read 16 bytes from the pci region mapper array */
@@ -867,29 +887,24 @@ test_rom:
*rm++ = gsc_readl(address+rm_offset+0x04);
*rm++ = gsc_readl(address+rm_offset+0x08);
*rm++ = gsc_readl(address+rm_offset+0x0c);
- DPRINTK(("PCI region Mapper offset = %08x: ",
- rm_offset));
- for (i=0; i<16; i++)
- DPRINTK(("%02x ", sti->rm_entry[i]));
- DPRINTK(("\n"));
}
address += le32_to_cpu(gsc_readl(address+8));
- DPRINTK(("sig %04x, PCI STI ROM at %08lx\n", sig, address));
+ pr_debug("sig %04x, PCI STI ROM at %08lx\n", sig, address);
goto test_rom;
}
ok = 0;
if ((sig & 0xff) == 0x01) {
- DPRINTK((" byte mode ROM at %08lx, hpa at %08lx\n",
- address, hpa));
+ pr_debug(" byte mode ROM at %08lx, hpa at %08lx\n",
+ address, hpa);
ok = sti_read_rom(0, sti, address);
}
if ((sig & 0xffff) == 0x0303) {
- DPRINTK((" word mode ROM at %08lx, hpa at %08lx\n",
- address, hpa));
+ pr_debug(" word mode ROM at %08lx, hpa at %08lx\n",
+ address, hpa);
ok = sti_read_rom(1, sti, address);
}
@@ -906,7 +921,7 @@ test_rom:
unsigned long rom_base;
rom_base = pci_resource_start(sti->pd, PCI_ROM_RESOURCE);
pci_write_config_dword(sti->pd, PCI_ROM_ADDRESS, rom_base & ~PCI_ROM_ADDRESS_ENABLE);
- DPRINTK((KERN_DEBUG "STI PCI ROM disabled\n"));
+ pr_debug("STI PCI ROM disabled\n");
}
if (sti_init_graph(sti))
@@ -981,14 +996,14 @@ static int sticore_pci_init(struct pci_dev *pd, const struct pci_device_id *ent)
rom_len = pci_resource_len(pd, PCI_ROM_RESOURCE);
if (rom_base) {
pci_write_config_dword(pd, PCI_ROM_ADDRESS, rom_base | PCI_ROM_ADDRESS_ENABLE);
- DPRINTK((KERN_DEBUG "STI PCI ROM enabled at 0x%08lx\n", rom_base));
+ pr_debug("STI PCI ROM enabled at 0x%08lx\n", rom_base);
}
- printk(KERN_INFO "STI PCI graphic ROM found at %08lx (%u kB), fb at %08lx (%u MB)\n",
+ pr_info("STI PCI graphic ROM found at %08lx (%u kB), fb at %08lx (%u MB)\n",
rom_base, rom_len/1024, fb_base, fb_len/1024/1024);
- DPRINTK((KERN_DEBUG "Trying PCI STI ROM at %08lx, PCI hpa at %08lx\n",
- rom_base, fb_base));
+ pr_debug("Trying PCI STI ROM at %08lx, PCI hpa at %08lx\n",
+ rom_base, fb_base);
sti = sti_try_rom_generic(rom_base, fb_base, pd);
if (sti) {
@@ -998,8 +1013,7 @@ static int sticore_pci_init(struct pci_dev *pd, const struct pci_device_id *ent)
}
if (!sti) {
- printk(KERN_WARNING "Unable to handle STI device '%s'\n",
- pci_name(pd));
+ pr_warn("Unable to handle STI device '%s'\n", pci_name(pd));
return -ENODEV;
}
#endif /* CONFIG_PCI */
@@ -1058,7 +1072,7 @@ static void sti_init_roms(void)
sticore_initialized = 1;
- printk(KERN_INFO "STI GSC/PCI core graphics driver "
+ pr_info("STI GSC/PCI core graphics driver "
STI_DRIVERVERSION "\n");
/* Register drivers for native & PCI cards */
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 402e85450bb5..cfb7f5612ef0 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1795,25 +1795,6 @@ config PXA3XX_GCU
If you compile this as a module, it will be called pxa3xx_gcu.
-config FB_MBX
- tristate "2700G LCD framebuffer support"
- depends on FB && ARCH_PXA
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- Framebuffer driver for the Intel 2700G (Marathon) Graphics
- Accelerator
-
-config FB_MBX_DEBUG
- bool "Enable debugging info via debugfs"
- depends on FB_MBX && DEBUG_FS
- help
- Enable this if you want debugging information using the debug
- filesystem (debugfs)
-
- If unsure, say N.
-
config FB_FSL_DIU
tristate "Freescale DIU framebuffer support"
depends on FB && FSL_SOC
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index a0705b99e643..477b9624b703 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_FB_VIA) += via/
obj-$(CONFIG_FB_KYRO) += kyro/
obj-$(CONFIG_FB_SAVAGE) += savage/
obj-$(CONFIG_FB_GEODE) += geode/
-obj-$(CONFIG_FB_MBX) += mbx/
obj-$(CONFIG_FB_NEOMAGIC) += neofb.o
obj-$(CONFIG_FB_3DFX) += tdfxfb.o
obj-$(CONFIG_FB_CONTROL) += controlfb.o
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index ae3d8e8b8d33..1447324ed0b6 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -419,7 +419,7 @@ static int arcfb_ioctl(struct fb_info *info,
schedule();
finish_wait(&arcfb_waitq, &wait);
}
- fallthrough;
+ fallthrough;
case FBIO_GETCONTROL2:
{
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index 11ab9a153860..edf169d0816e 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -1085,12 +1085,11 @@ static void ark_pci_remove(struct pci_dev *dev)
}
-#ifdef CONFIG_PM
/* PCI suspend */
-static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
+static int __maybe_unused ark_pci_suspend(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct arkfb_info *par = info->par;
dev_info(info->device, "suspend\n");
@@ -1098,7 +1097,7 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
console_lock();
mutex_lock(&(par->open_lock));
- if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
+ if (par->ref_count == 0) {
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
@@ -1106,10 +1105,6 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
fb_set_suspend(info, 1);
- pci_save_state(dev);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
-
mutex_unlock(&(par->open_lock));
console_unlock();
@@ -1119,9 +1114,9 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
/* PCI resume */
-static int ark_pci_resume (struct pci_dev* dev)
+static int __maybe_unused ark_pci_resume(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct arkfb_info *par = info->par;
dev_info(info->device, "resume\n");
@@ -1132,14 +1127,6 @@ static int ark_pci_resume (struct pci_dev* dev)
if (par->ref_count == 0)
goto fail;
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
-
- if (pci_enable_device(dev))
- goto fail;
-
- pci_set_master(dev);
-
arkfb_set_par(info);
fb_set_suspend(info, 0);
@@ -1148,10 +1135,17 @@ fail:
console_unlock();
return 0;
}
-#else
-#define ark_pci_suspend NULL
-#define ark_pci_resume NULL
-#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops ark_pci_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ark_pci_suspend,
+ .resume = ark_pci_resume,
+ .freeze = NULL,
+ .thaw = ark_pci_resume,
+ .poweroff = ark_pci_suspend,
+ .restore = ark_pci_resume,
+#endif
+};
/* List of boards that we are trying to support */
@@ -1168,8 +1162,7 @@ static struct pci_driver arkfb_pci_driver = {
.id_table = ark_devices,
.probe = ark_pci_probe,
.remove = ark_pci_remove,
- .suspend = ark_pci_suspend,
- .resume = ark_pci_resume,
+ .driver.pm = &ark_pci_pm_ops,
};
/* Cleanup */
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index bfd2f00b403b..8c1d47e52b1a 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -633,7 +633,7 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
case 2: value |= ATMEL_LCDC_PIXELSIZE_2; break;
case 4: value |= ATMEL_LCDC_PIXELSIZE_4; break;
case 8: value |= ATMEL_LCDC_PIXELSIZE_8; break;
- case 15:
+ case 15: fallthrough;
case 16: value |= ATMEL_LCDC_PIXELSIZE_16; break;
case 24: value |= ATMEL_LCDC_PIXELSIZE_24; break;
case 32: value |= ATMEL_LCDC_PIXELSIZE_32; break;
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index 6fae6ad6cb77..e6a48689c294 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -162,10 +162,22 @@ static char * const r128_family[] = {
static int aty128_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
static void aty128_remove(struct pci_dev *pdev);
-static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state);
-static int aty128_pci_resume(struct pci_dev *pdev);
+static int aty128_pci_suspend_late(struct device *dev, pm_message_t state);
+static int __maybe_unused aty128_pci_suspend(struct device *dev);
+static int __maybe_unused aty128_pci_hibernate(struct device *dev);
+static int __maybe_unused aty128_pci_freeze(struct device *dev);
+static int __maybe_unused aty128_pci_resume(struct device *dev);
static int aty128_do_resume(struct pci_dev *pdev);
+static const struct dev_pm_ops aty128_pci_pm_ops = {
+ .suspend = aty128_pci_suspend,
+ .resume = aty128_pci_resume,
+ .freeze = aty128_pci_freeze,
+ .thaw = aty128_pci_resume,
+ .poweroff = aty128_pci_hibernate,
+ .restore = aty128_pci_resume,
+};
+
/* supported Rage128 chipsets */
static const struct pci_device_id aty128_pci_tbl[] = {
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RAGE128_LE,
@@ -272,8 +284,7 @@ static struct pci_driver aty128fb_driver = {
.id_table = aty128_pci_tbl,
.probe = aty128_probe,
.remove = aty128_remove,
- .suspend = aty128_pci_suspend,
- .resume = aty128_pci_resume,
+ .driver.pm = &aty128_pci_pm_ops,
};
/* packed BIOS settings */
@@ -2316,7 +2327,6 @@ static int aty128fb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
static void aty128_set_suspend(struct aty128fb_par *par, int suspend)
{
u32 pmgt;
- struct pci_dev *pdev = par->pdev;
if (!par->pdev->pm_cap)
return;
@@ -2343,23 +2353,15 @@ static void aty128_set_suspend(struct aty128fb_par *par, int suspend)
aty_st_le32(BUS_CNTL1, 0x00000010);
aty_st_le32(MEM_POWER_MISC, 0x0c830000);
msleep(100);
-
- /* Switch PCI power management to D2 */
- pci_set_power_state(pdev, PCI_D2);
}
}
-static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int aty128_pci_suspend_late(struct device *dev, pm_message_t state)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct fb_info *info = pci_get_drvdata(pdev);
struct aty128fb_par *par = info->par;
- /* Because we may change PCI D state ourselves, we need to
- * first save the config space content so the core can
- * restore it properly on resume.
- */
- pci_save_state(pdev);
-
/* We don't do anything but D2, for now we return 0, but
* we may want to change that. How do we know if the BIOS
* can properly take care of D3 ? Also, with swsusp, we
@@ -2418,6 +2420,21 @@ static int aty128_pci_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
+static int __maybe_unused aty128_pci_suspend(struct device *dev)
+{
+ return aty128_pci_suspend_late(dev, PMSG_SUSPEND);
+}
+
+static int __maybe_unused aty128_pci_hibernate(struct device *dev)
+{
+ return aty128_pci_suspend_late(dev, PMSG_HIBERNATE);
+}
+
+static int __maybe_unused aty128_pci_freeze(struct device *dev)
+{
+ return aty128_pci_suspend_late(dev, PMSG_FREEZE);
+}
+
static int aty128_do_resume(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
@@ -2464,12 +2481,12 @@ static int aty128_do_resume(struct pci_dev *pdev)
return 0;
}
-static int aty128_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused aty128_pci_resume(struct device *dev)
{
int rc;
console_lock();
- rc = aty128_do_resume(pdev);
+ rc = aty128_do_resume(to_pci_dev(dev));
console_unlock();
return rc;
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index a7833bc98225..551372f9b9aa 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -287,8 +287,8 @@ static inline void aty_st_8(int regindex, u8 val, const struct atyfb_par *par)
#endif
}
-#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
-defined (CONFIG_FB_ATY_GENERIC_LCD) || defined (CONFIG_FB_ATY_BACKLIGHT)
+#if defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) || \
+defined (CONFIG_FB_ATY_BACKLIGHT)
extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par);
extern u32 aty_ld_lcd(int index, const struct atyfb_par *par);
#endif
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index ad9cfe34c9ff..c8feff0ee8da 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -132,8 +132,8 @@
#define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args)
#define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args)
-#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
-defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT)
+#if defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_GENERIC_LCD) || \
+defined(CONFIG_FB_ATY_BACKLIGHT)
static const u32 lt_lcd_regs[] = {
CNFG_PANEL_LG,
LCD_GEN_CNTL_LG,
@@ -175,7 +175,7 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par)
return aty_ld_le32(LCD_DATA, par);
}
}
-#endif /* defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */
+#endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */
#ifdef CONFIG_FB_ATY_GENERIC_LCD
/*
@@ -1989,7 +1989,7 @@ static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-#if defined(CONFIG_PM) && defined(CONFIG_PCI)
+#if defined(CONFIG_PCI)
#ifdef CONFIG_PPC_PMAC
/* Power management routines. Those are used for PowerBook sleep.
@@ -2050,8 +2050,9 @@ static int aty_power_mgmt(int sleep, struct atyfb_par *par)
}
#endif /* CONFIG_PPC_PMAC */
-static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int atyfb_pci_suspend_late(struct device *dev, pm_message_t state)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct fb_info *info = pci_get_drvdata(pdev);
struct atyfb_par *par = (struct atyfb_par *) info->par;
@@ -2077,7 +2078,6 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
* first save the config space content so the core can
* restore it properly on resume.
*/
- pci_save_state(pdev);
#ifdef CONFIG_PPC_PMAC
/* Set chip to "suspend" mode */
@@ -2089,8 +2089,6 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
console_unlock();
return -EIO;
}
-#else
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
#endif
console_unlock();
@@ -2100,6 +2098,21 @@ static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
+static int __maybe_unused atyfb_pci_suspend(struct device *dev)
+{
+ return atyfb_pci_suspend_late(dev, PMSG_SUSPEND);
+}
+
+static int __maybe_unused atyfb_pci_hibernate(struct device *dev)
+{
+ return atyfb_pci_suspend_late(dev, PMSG_HIBERNATE);
+}
+
+static int __maybe_unused atyfb_pci_freeze(struct device *dev)
+{
+ return atyfb_pci_suspend_late(dev, PMSG_FREEZE);
+}
+
static void aty_resume_chip(struct fb_info *info)
{
struct atyfb_par *par = info->par;
@@ -2114,8 +2127,9 @@ static void aty_resume_chip(struct fb_info *info)
aty_ld_le32(BUS_CNTL, par) | BUS_APER_REG_DIS, par);
}
-static int atyfb_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused atyfb_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct fb_info *info = pci_get_drvdata(pdev);
struct atyfb_par *par = (struct atyfb_par *) info->par;
@@ -2157,7 +2171,18 @@ static int atyfb_pci_resume(struct pci_dev *pdev)
return 0;
}
-#endif /* defined(CONFIG_PM) && defined(CONFIG_PCI) */
+static const struct dev_pm_ops atyfb_pci_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = atyfb_pci_suspend,
+ .resume = atyfb_pci_resume,
+ .freeze = atyfb_pci_freeze,
+ .thaw = atyfb_pci_resume,
+ .poweroff = atyfb_pci_hibernate,
+ .restore = atyfb_pci_resume,
+#endif /* CONFIG_PM_SLEEP */
+};
+
+#endif /* defined(CONFIG_PCI) */
/* Backlight */
#ifdef CONFIG_FB_ATY_BACKLIGHT
@@ -3796,10 +3821,7 @@ static struct pci_driver atyfb_driver = {
.id_table = atyfb_pci_tbl,
.probe = atyfb_pci_probe,
.remove = atyfb_pci_remove,
-#ifdef CONFIG_PM
- .suspend = atyfb_pci_suspend,
- .resume = atyfb_pci_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = &atyfb_pci_pm_ops,
};
#endif /* CONFIG_PCI */
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 3fe509cb9b87..2fe690150420 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -2307,7 +2307,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
ret = radeon_kick_out_firmware_fb(pdev);
if (ret)
- return ret;
+ goto err_release_fb;
/* request the mem regions */
ret = pci_request_region(pdev, 0, "radeonfb framebuffer");
@@ -2555,16 +2555,18 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
framebuffer_release(info);
}
+#ifdef CONFIG_PM
+#define RADEONFB_PCI_PM_OPS (&radeonfb_pci_pm_ops)
+#else
+#define RADEONFB_PCI_PM_OPS NULL
+#endif
static struct pci_driver radeonfb_driver = {
.name = "radeonfb",
.id_table = radeonfb_pci_table,
.probe = radeonfb_pci_register,
.remove = radeonfb_pci_unregister,
-#ifdef CONFIG_PM
- .suspend = radeonfb_pci_suspend,
- .resume = radeonfb_pci_resume,
-#endif /* CONFIG_PM */
+ .driver.pm = RADEONFB_PCI_PM_OPS,
};
#ifndef MODULE
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index f3d8123d7f36..b5fbd5329652 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -1431,7 +1431,6 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
mdelay( 15);
}
-#if defined(CONFIG_PM)
#if defined(CONFIG_X86) || defined(CONFIG_PPC_PMAC)
static void radeon_pm_reset_pad_ctlr_strength(struct radeonfb_info *rinfo)
{
@@ -2210,7 +2209,6 @@ static void radeon_reinitialize_M9P(struct radeonfb_info *rinfo)
radeon_pm_m10_enable_lvds_spread_spectrum(rinfo);
}
#endif
-#endif
#if 0 /* Not ready yet */
static void radeon_reinitialize_QW(struct radeonfb_info *rinfo)
@@ -2613,8 +2611,9 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
}
}
-int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
+static int radeonfb_pci_suspend_late(struct device *dev, pm_message_t mesg)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct fb_info *info = pci_get_drvdata(pdev);
struct radeonfb_info *rinfo = info->par;
@@ -2662,11 +2661,6 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
pmac_suspend_agp_for_card(pdev);
#endif /* CONFIG_PPC_PMAC */
- /* It's unclear whether or when the generic code will do that, so let's
- * do it ourselves. We save state before we do any power management
- */
- pci_save_state(pdev);
-
/* If we support wakeup from poweroff, we save all regs we can including cfg
* space
*/
@@ -2691,7 +2685,6 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
msleep(20);
OUTREG(LVDS_GEN_CNTL, INREG(LVDS_GEN_CNTL) & ~(LVDS_DIGON));
}
- pci_disable_device(pdev);
}
/* If we support D2, we go to it (should be fixed later with a flag forcing
* D3 only for some laptops)
@@ -2707,6 +2700,21 @@ int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
return 0;
}
+static int radeonfb_pci_suspend(struct device *dev)
+{
+ return radeonfb_pci_suspend_late(dev, PMSG_SUSPEND);
+}
+
+static int radeonfb_pci_hibernate(struct device *dev)
+{
+ return radeonfb_pci_suspend_late(dev, PMSG_HIBERNATE);
+}
+
+static int radeonfb_pci_freeze(struct device *dev)
+{
+ return radeonfb_pci_suspend_late(dev, PMSG_FREEZE);
+}
+
static int radeon_check_power_loss(struct radeonfb_info *rinfo)
{
return rinfo->save_regs[4] != INPLL(CLK_PIN_CNTL) ||
@@ -2714,8 +2722,9 @@ static int radeon_check_power_loss(struct radeonfb_info *rinfo)
rinfo->save_regs[3] != INPLL(SCLK_CNTL);
}
-int radeonfb_pci_resume(struct pci_dev *pdev)
+static int radeonfb_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct fb_info *info = pci_get_drvdata(pdev);
struct radeonfb_info *rinfo = info->par;
int rc = 0;
@@ -2797,6 +2806,15 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
return rc;
}
+const struct dev_pm_ops radeonfb_pci_pm_ops = {
+ .suspend = radeonfb_pci_suspend,
+ .resume = radeonfb_pci_resume,
+ .freeze = radeonfb_pci_freeze,
+ .thaw = radeonfb_pci_resume,
+ .poweroff = radeonfb_pci_hibernate,
+ .restore = radeonfb_pci_resume,
+};
+
#ifdef CONFIG_PPC__disabled
static void radeonfb_early_resume(void *data)
{
diff --git a/drivers/video/fbdev/aty/radeonfb.h b/drivers/video/fbdev/aty/radeonfb.h
index 131b34dd65af..93f403cbb415 100644
--- a/drivers/video/fbdev/aty/radeonfb.h
+++ b/drivers/video/fbdev/aty/radeonfb.h
@@ -483,8 +483,7 @@ extern void radeon_delete_i2c_busses(struct radeonfb_info *rinfo);
extern int radeon_probe_i2c_connector(struct radeonfb_info *rinfo, int conn, u8 **out_edid);
/* PM Functions */
-extern int radeonfb_pci_suspend(struct pci_dev *pdev, pm_message_t state);
-extern int radeonfb_pci_resume(struct pci_dev *pdev);
+extern const struct dev_pm_ops radeonfb_pci_pm_ops;
extern void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlist, int force_sleep);
extern void radeonfb_pm_exit(struct radeonfb_info *rinfo);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 8c7bd0a29eaa..cef437817b0d 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -163,8 +163,6 @@ static const struct consw fb_con;
#define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
-static int fbcon_set_origin(struct vc_data *);
-
static int fbcon_cursor_noblink;
#define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1)
@@ -1727,7 +1725,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
vc->vc_video_erase_char,
vc->vc_size_row * count);
return true;
- break;
case SCROLL_WRAP_MOVE:
if (b - t - count > 3 * vc->vc_rows >> 2) {
@@ -1818,7 +1815,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
vc->vc_video_erase_char,
vc->vc_size_row * count);
return true;
- break;
case SCROLL_WRAP_MOVE:
if (b - t - count > 3 * vc->vc_rows >> 2) {
@@ -2600,7 +2596,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
fb_set_cmap(&palette_cmap, info);
}
-static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+static u16 *fbcon_screen_pos(const struct vc_data *vc, int offset)
{
return (u16 *) (vc->vc_origin + offset);
}
@@ -2647,11 +2643,6 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
}
}
-static int fbcon_set_origin(struct vc_data *vc)
-{
- return 0;
-}
-
void fbcon_suspended(struct fb_info *info)
{
struct vc_data *vc = NULL;
@@ -3122,7 +3113,6 @@ static const struct consw fb_con = {
.con_font_default = fbcon_set_def_font,
.con_font_copy = fbcon_copy_font,
.con_set_palette = fbcon_set_palette,
- .con_set_origin = fbcon_set_origin,
.con_invert_region = fbcon_invert_region,
.con_screen_pos = fbcon_screen_pos,
.con_getxy = fbcon_getxy,
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 6815bfb7f572..8268bbee8cae 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -777,7 +777,7 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (info->fbops->fb_read)
return info->fbops->fb_read(info, buf, count, ppos);
-
+
total_size = info->screen_size;
if (total_size == 0)
@@ -842,7 +842,7 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
if (info->fbops->fb_write)
return info->fbops->fb_write(info, buf, count, ppos);
-
+
total_size = info->screen_size;
if (total_size == 0)
@@ -1006,6 +1006,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
return 0;
}
+ /* bitfill_aligned() assumes that it's at least 8x8 */
+ if (var->xres < 8 || var->yres < 8)
+ return -EINVAL;
+
ret = info->fbops->fb_check_var(var, info);
if (ret)
@@ -1057,7 +1061,7 @@ EXPORT_SYMBOL(fb_set_var);
int
fb_blank(struct fb_info *info, int blank)
-{
+{
struct fb_event event;
int ret = -EINVAL;
@@ -1433,7 +1437,7 @@ out:
return res;
}
-static int
+static int
fb_release(struct inode *inode, struct file *file)
__acquires(&info->lock)
__releases(&info->lock)
@@ -1623,7 +1627,7 @@ static int do_register_framebuffer(struct fb_info *fb_info)
fb_info->pixmap.access_align = 32;
fb_info->pixmap.flags = FB_PIXMAP_DEFAULT;
}
- }
+ }
fb_info->pixmap.offset = 0;
if (!fb_info->pixmap.blit_x)
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 42d37bed518a..d45355b9a58c 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -1810,7 +1810,7 @@ static void cyberpro_pci_remove(struct pci_dev *dev)
}
}
-static int cyberpro_pci_suspend(struct pci_dev *dev, pm_message_t state)
+static int __maybe_unused cyberpro_pci_suspend(struct device *dev)
{
return 0;
}
@@ -1818,9 +1818,9 @@ static int cyberpro_pci_suspend(struct pci_dev *dev, pm_message_t state)
/*
* Re-initialise the CyberPro hardware
*/
-static int cyberpro_pci_resume(struct pci_dev *dev)
+static int __maybe_unused cyberpro_pci_resume(struct device *dev)
{
- struct cfb_info *cfb = pci_get_drvdata(dev);
+ struct cfb_info *cfb = dev_get_drvdata(dev);
if (cfb) {
cyberpro_pci_enable_mmio(cfb);
@@ -1846,12 +1846,15 @@ static struct pci_device_id cyberpro_pci_table[] = {
MODULE_DEVICE_TABLE(pci, cyberpro_pci_table);
+static SIMPLE_DEV_PM_OPS(cyberpro_pci_pm_ops,
+ cyberpro_pci_suspend,
+ cyberpro_pci_resume);
+
static struct pci_driver cyberpro_driver = {
.name = "CyberPro",
.probe = cyberpro_pci_probe,
.remove = cyberpro_pci_remove,
- .suspend = cyberpro_pci_suspend,
- .resume = cyberpro_pci_resume,
+ .driver.pm = &cyberpro_pci_pm_ops,
.id_table = cyberpro_pci_table
};
diff --git a/drivers/video/fbdev/geode/gxfb.h b/drivers/video/fbdev/geode/gxfb.h
index d2e9c5c8e294..792c111c21e4 100644
--- a/drivers/video/fbdev/geode/gxfb.h
+++ b/drivers/video/fbdev/geode/gxfb.h
@@ -21,7 +21,6 @@ struct gxfb_par {
void __iomem *dc_regs;
void __iomem *vid_regs;
void __iomem *gp_regs;
-#ifdef CONFIG_PM
int powered_down;
/* register state, for power management functionality */
@@ -36,7 +35,6 @@ struct gxfb_par {
uint64_t fp[FP_REG_COUNT];
uint32_t pal[DC_PAL_COUNT];
-#endif
};
unsigned int gx_frame_buffer_size(void);
@@ -49,11 +47,8 @@ void gx_set_dclk_frequency(struct fb_info *info);
void gx_configure_display(struct fb_info *info);
int gx_blank_display(struct fb_info *info, int blank_mode);
-#ifdef CONFIG_PM
int gx_powerdown(struct fb_info *info);
int gx_powerup(struct fb_info *info);
-#endif
-
/* Graphics Processor registers (table 6-23 from the data book) */
enum gp_registers {
diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c
index d38a148d4746..44089b331f91 100644
--- a/drivers/video/fbdev/geode/gxfb_core.c
+++ b/drivers/video/fbdev/geode/gxfb_core.c
@@ -322,17 +322,14 @@ static struct fb_info *gxfb_init_fbinfo(struct device *dev)
return info;
}
-#ifdef CONFIG_PM
-static int gxfb_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused gxfb_suspend(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(pdev);
+ struct fb_info *info = dev_get_drvdata(dev);
- if (state.event == PM_EVENT_SUSPEND) {
- console_lock();
- gx_powerdown(info);
- fb_set_suspend(info, 1);
- console_unlock();
- }
+ console_lock();
+ gx_powerdown(info);
+ fb_set_suspend(info, 1);
+ console_unlock();
/* there's no point in setting PCI states; we emulate PCI, so
* we don't end up getting power savings anyways */
@@ -340,9 +337,9 @@ static int gxfb_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int gxfb_resume(struct pci_dev *pdev)
+static int __maybe_unused gxfb_resume(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(pdev);
+ struct fb_info *info = dev_get_drvdata(dev);
int ret;
console_lock();
@@ -356,7 +353,6 @@ static int gxfb_resume(struct pci_dev *pdev)
console_unlock();
return 0;
}
-#endif
static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -467,15 +463,23 @@ static const struct pci_device_id gxfb_id_table[] = {
MODULE_DEVICE_TABLE(pci, gxfb_id_table);
+static const struct dev_pm_ops gxfb_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = gxfb_suspend,
+ .resume = gxfb_resume,
+ .freeze = NULL,
+ .thaw = gxfb_resume,
+ .poweroff = NULL,
+ .restore = gxfb_resume,
+#endif
+};
+
static struct pci_driver gxfb_driver = {
.name = "gxfb",
.id_table = gxfb_id_table,
.probe = gxfb_probe,
.remove = gxfb_remove,
-#ifdef CONFIG_PM
- .suspend = gxfb_suspend,
- .resume = gxfb_resume,
-#endif
+ .driver.pm = &gxfb_pm_ops,
};
#ifndef MODULE
diff --git a/drivers/video/fbdev/geode/lxfb.h b/drivers/video/fbdev/geode/lxfb.h
index ef24bf6d49dc..d37b32dbcd68 100644
--- a/drivers/video/fbdev/geode/lxfb.h
+++ b/drivers/video/fbdev/geode/lxfb.h
@@ -29,7 +29,6 @@ struct lxfb_par {
void __iomem *gp_regs;
void __iomem *dc_regs;
void __iomem *vp_regs;
-#ifdef CONFIG_PM
int powered_down;
/* register state, for power mgmt functionality */
@@ -50,7 +49,6 @@ struct lxfb_par {
uint32_t hcoeff[DC_HFILT_COUNT * 2];
uint32_t vcoeff[DC_VFILT_COUNT];
uint32_t vp_coeff[VP_COEFF_SIZE / 4];
-#endif
};
static inline unsigned int lx_get_pitch(unsigned int xres, int bpp)
@@ -64,11 +62,8 @@ int lx_blank_display(struct fb_info *, int);
void lx_set_palette_reg(struct fb_info *, unsigned int, unsigned int,
unsigned int, unsigned int);
-#ifdef CONFIG_PM
int lx_powerdown(struct fb_info *info);
int lx_powerup(struct fb_info *info);
-#endif
-
/* Graphics Processor registers (table 6-29 from the data book) */
enum gp_registers {
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index adc2d9c2395e..66c81262d18f 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -443,17 +443,14 @@ static struct fb_info *lxfb_init_fbinfo(struct device *dev)
return info;
}
-#ifdef CONFIG_PM
-static int lxfb_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused lxfb_suspend(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(pdev);
+ struct fb_info *info = dev_get_drvdata(dev);
- if (state.event == PM_EVENT_SUSPEND) {
- console_lock();
- lx_powerdown(info);
- fb_set_suspend(info, 1);
- console_unlock();
- }
+ console_lock();
+ lx_powerdown(info);
+ fb_set_suspend(info, 1);
+ console_unlock();
/* there's no point in setting PCI states; we emulate PCI, so
* we don't end up getting power savings anyways */
@@ -461,9 +458,9 @@ static int lxfb_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int lxfb_resume(struct pci_dev *pdev)
+static int __maybe_unused lxfb_resume(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(pdev);
+ struct fb_info *info = dev_get_drvdata(dev);
int ret;
console_lock();
@@ -477,10 +474,6 @@ static int lxfb_resume(struct pci_dev *pdev)
console_unlock();
return 0;
}
-#else
-#define lxfb_suspend NULL
-#define lxfb_resume NULL
-#endif
static int lxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -600,13 +593,23 @@ static struct pci_device_id lxfb_id_table[] = {
MODULE_DEVICE_TABLE(pci, lxfb_id_table);
+static const struct dev_pm_ops lxfb_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = lxfb_suspend,
+ .resume = lxfb_resume,
+ .freeze = NULL,
+ .thaw = lxfb_resume,
+ .poweroff = NULL,
+ .restore = lxfb_resume,
+#endif
+};
+
static struct pci_driver lxfb_driver = {
.name = "lxfb",
.id_table = lxfb_id_table,
.probe = lxfb_probe,
.remove = lxfb_remove,
- .suspend = lxfb_suspend,
- .resume = lxfb_resume,
+ .driver.pm = &lxfb_pm_ops,
};
#ifndef MODULE
diff --git a/drivers/video/fbdev/geode/lxfb_ops.c b/drivers/video/fbdev/geode/lxfb_ops.c
index 5be8bc62844c..b3a041fce570 100644
--- a/drivers/video/fbdev/geode/lxfb_ops.c
+++ b/drivers/video/fbdev/geode/lxfb_ops.c
@@ -580,8 +580,6 @@ int lx_blank_display(struct fb_info *info, int blank_mode)
return 0;
}
-#ifdef CONFIG_PM
-
static void lx_save_regs(struct lxfb_par *par)
{
uint32_t filt;
@@ -837,5 +835,3 @@ int lx_powerup(struct fb_info *info)
par->powered_down = 0;
return 0;
}
-
-#endif
diff --git a/drivers/video/fbdev/geode/suspend_gx.c b/drivers/video/fbdev/geode/suspend_gx.c
index 1110a527c35c..8c49d4e98772 100644
--- a/drivers/video/fbdev/geode/suspend_gx.c
+++ b/drivers/video/fbdev/geode/suspend_gx.c
@@ -11,8 +11,6 @@
#include "gxfb.h"
-#ifdef CONFIG_PM
-
static void gx_save_regs(struct gxfb_par *par)
{
int i;
@@ -259,5 +257,3 @@ int gx_powerup(struct fb_info *info)
par->powered_down = 0;
return 0;
}
-
-#endif
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 02411d89cb46..5bc86f481a78 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -47,6 +47,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/fb.h>
@@ -1114,8 +1115,15 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
getmem_done:
remove_conflicting_framebuffers(info->apertures,
KBUILD_MODNAME, false);
- if (!gen2vm)
+
+ if (gen2vm) {
+ /* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
+ screen_info.lfb_size = 0;
+ screen_info.lfb_base = 0;
+ screen_info.orig_video_isVGA = 0;
+ } else {
pci_dev_put(pdev);
+ }
kfree(info->apertures);
return 0;
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index e6f35f8feefc..52cce0db8bd3 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -1175,16 +1175,11 @@ static void i740fb_remove(struct pci_dev *dev)
}
}
-#ifdef CONFIG_PM
-static int i740fb_suspend(struct pci_dev *dev, pm_message_t state)
+static int __maybe_unused i740fb_suspend(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct i740fb_par *par = info->par;
- /* don't disable console during hibernation and wakeup from it */
- if (state.event == PM_EVENT_FREEZE || state.event == PM_EVENT_PRETHAW)
- return 0;
-
console_lock();
mutex_lock(&(par->open_lock));
@@ -1197,19 +1192,15 @@ static int i740fb_suspend(struct pci_dev *dev, pm_message_t state)
fb_set_suspend(info, 1);
- pci_save_state(dev);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
-
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
-static int i740fb_resume(struct pci_dev *dev)
+static int __maybe_unused i740fb_resume(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct i740fb_par *par = info->par;
console_lock();
@@ -1218,11 +1209,6 @@ static int i740fb_resume(struct pci_dev *dev)
if (par->ref_count == 0)
goto fail;
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
- if (pci_enable_device(dev))
- goto fail;
-
i740fb_set_par(info);
fb_set_suspend(info, 0);
@@ -1231,10 +1217,17 @@ fail:
console_unlock();
return 0;
}
-#else
-#define i740fb_suspend NULL
-#define i740fb_resume NULL
-#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops i740fb_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = i740fb_suspend,
+ .resume = i740fb_resume,
+ .freeze = NULL,
+ .thaw = i740fb_resume,
+ .poweroff = i740fb_suspend,
+ .restore = i740fb_resume,
+#endif /* CONFIG_PM_SLEEP */
+};
#define I740_ID_PCI 0x00d1
#define I740_ID_AGP 0x7800
@@ -1251,8 +1244,7 @@ static struct pci_driver i740fb_driver = {
.id_table = i740fb_id_table,
.probe = i740fb_probe,
.remove = i740fb_remove,
- .suspend = i740fb_suspend,
- .resume = i740fb_resume,
+ .driver.pm = &i740fb_pm_ops,
};
#ifndef MODULE
diff --git a/drivers/video/fbdev/kyro/STG4000InitDevice.c b/drivers/video/fbdev/kyro/STG4000InitDevice.c
index 1d3f2080aa6f..21875d3c2dc2 100644
--- a/drivers/video/fbdev/kyro/STG4000InitDevice.c
+++ b/drivers/video/fbdev/kyro/STG4000InitDevice.c
@@ -120,7 +120,7 @@ u32 ProgramClock(u32 refClock,
{
u32 R = 0, F = 0, OD = 0, ODIndex = 0;
u32 ulBestR = 0, ulBestF = 0, ulBestOD = 0;
- u32 ulBestVCO = 0, ulBestClk = 0, ulBestScore = 0;
+ u32 ulBestClk = 0, ulBestScore = 0;
u32 ulScore, ulPhaseScore, ulVcoScore;
u32 ulTmp = 0, ulVCO;
u32 ulScaleClockReq, ulMinClock, ulMaxClock;
@@ -189,7 +189,6 @@ u32 ProgramClock(u32 refClock,
ulScore = ulPhaseScore + ulVcoScore;
if (!ulBestScore) {
- ulBestVCO = ulVCO;
ulBestOD = OD;
ulBestF = F;
ulBestR = R;
@@ -206,7 +205,6 @@ u32 ProgramClock(u32 refClock,
but we shall keep this code in case new restrictions come into play
--------------------------------------------------------------------------*/
if ((ulScore >= ulBestScore) && (OD > 0)) {
- ulBestVCO = ulVCO;
ulBestOD = OD;
ulBestF = F;
ulBestR = R;
@@ -244,7 +242,6 @@ int SetCoreClockPLL(volatile STG4000REG __iomem *pSTGReg, struct pci_dev *pDev)
{
u32 F, R, P;
u16 core_pll = 0, sub;
- u32 ulCoreClock;
u32 tmp;
u32 ulChipSpeed;
@@ -282,7 +279,7 @@ int SetCoreClockPLL(volatile STG4000REG __iomem *pSTGReg, struct pci_dev *pDev)
if (ulChipSpeed == 0)
return -EINVAL;
- ulCoreClock = ProgramClock(REF_FREQ, CORE_PLL_FREQ, &F, &R, &P);
+ ProgramClock(REF_FREQ, CORE_PLL_FREQ, &F, &R, &P);
core_pll |= ((P) | ((F - 2) << 2) | ((R - 2) << 11));
diff --git a/drivers/video/fbdev/mbx/Makefile b/drivers/video/fbdev/mbx/Makefile
deleted file mode 100644
index 3e8e7ff41f18..000000000000
--- a/drivers/video/fbdev/mbx/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-# Makefile for the 2700G controller driver.
-
-obj-y += mbxfb.o
diff --git a/drivers/video/fbdev/mbx/mbxdebugfs.c b/drivers/video/fbdev/mbx/mbxdebugfs.c
deleted file mode 100644
index 09af721638fb..000000000000
--- a/drivers/video/fbdev/mbx/mbxdebugfs.c
+++ /dev/null
@@ -1,232 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-
-#define BIG_BUFFER_SIZE (1024)
-
-static char big_buffer[BIG_BUFFER_SIZE];
-
-struct mbxfb_debugfs_data {
- struct dentry *dir;
- struct dentry *sysconf;
- struct dentry *clock;
- struct dentry *display;
- struct dentry *gsctl;
- struct dentry *sdram;
- struct dentry *misc;
-};
-
-static ssize_t write_file_dummy(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return count;
-}
-
-static ssize_t sysconf_read_file(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- char * s = big_buffer;
-
- s += sprintf(s, "SYSCFG = %08x\n", readl(SYSCFG));
- s += sprintf(s, "PFBASE = %08x\n", readl(PFBASE));
- s += sprintf(s, "PFCEIL = %08x\n", readl(PFCEIL));
- s += sprintf(s, "POLLFLAG = %08x\n", readl(POLLFLAG));
- s += sprintf(s, "SYSRST = %08x\n", readl(SYSRST));
-
- return simple_read_from_buffer(userbuf, count, ppos,
- big_buffer, s-big_buffer);
-}
-
-
-static ssize_t gsctl_read_file(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- char * s = big_buffer;
-
- s += sprintf(s, "GSCTRL = %08x\n", readl(GSCTRL));
- s += sprintf(s, "VSCTRL = %08x\n", readl(VSCTRL));
- s += sprintf(s, "GBBASE = %08x\n", readl(GBBASE));
- s += sprintf(s, "VBBASE = %08x\n", readl(VBBASE));
- s += sprintf(s, "GDRCTRL = %08x\n", readl(GDRCTRL));
- s += sprintf(s, "VCMSK = %08x\n", readl(VCMSK));
- s += sprintf(s, "GSCADR = %08x\n", readl(GSCADR));
- s += sprintf(s, "VSCADR = %08x\n", readl(VSCADR));
- s += sprintf(s, "VUBASE = %08x\n", readl(VUBASE));
- s += sprintf(s, "VVBASE = %08x\n", readl(VVBASE));
- s += sprintf(s, "GSADR = %08x\n", readl(GSADR));
- s += sprintf(s, "VSADR = %08x\n", readl(VSADR));
- s += sprintf(s, "HCCTRL = %08x\n", readl(HCCTRL));
- s += sprintf(s, "HCSIZE = %08x\n", readl(HCSIZE));
- s += sprintf(s, "HCPOS = %08x\n", readl(HCPOS));
- s += sprintf(s, "HCBADR = %08x\n", readl(HCBADR));
- s += sprintf(s, "HCCKMSK = %08x\n", readl(HCCKMSK));
- s += sprintf(s, "GPLUT = %08x\n", readl(GPLUT));
-
- return simple_read_from_buffer(userbuf, count, ppos,
- big_buffer, s-big_buffer);
-}
-
-static ssize_t display_read_file(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- char * s = big_buffer;
-
- s += sprintf(s, "DSCTRL = %08x\n", readl(DSCTRL));
- s += sprintf(s, "DHT01 = %08x\n", readl(DHT01));
- s += sprintf(s, "DHT02 = %08x\n", readl(DHT02));
- s += sprintf(s, "DHT03 = %08x\n", readl(DHT03));
- s += sprintf(s, "DVT01 = %08x\n", readl(DVT01));
- s += sprintf(s, "DVT02 = %08x\n", readl(DVT02));
- s += sprintf(s, "DVT03 = %08x\n", readl(DVT03));
- s += sprintf(s, "DBCOL = %08x\n", readl(DBCOL));
- s += sprintf(s, "BGCOLOR = %08x\n", readl(BGCOLOR));
- s += sprintf(s, "DINTRS = %08x\n", readl(DINTRS));
- s += sprintf(s, "DINTRE = %08x\n", readl(DINTRE));
- s += sprintf(s, "DINTRCNT = %08x\n", readl(DINTRCNT));
- s += sprintf(s, "DSIG = %08x\n", readl(DSIG));
- s += sprintf(s, "DMCTRL = %08x\n", readl(DMCTRL));
- s += sprintf(s, "CLIPCTRL = %08x\n", readl(CLIPCTRL));
- s += sprintf(s, "SPOCTRL = %08x\n", readl(SPOCTRL));
- s += sprintf(s, "SVCTRL = %08x\n", readl(SVCTRL));
- s += sprintf(s, "DLSTS = %08x\n", readl(DLSTS));
- s += sprintf(s, "DLLCTRL = %08x\n", readl(DLLCTRL));
- s += sprintf(s, "DVLNUM = %08x\n", readl(DVLNUM));
- s += sprintf(s, "DUCTRL = %08x\n", readl(DUCTRL));
- s += sprintf(s, "DVECTRL = %08x\n", readl(DVECTRL));
- s += sprintf(s, "DHDET = %08x\n", readl(DHDET));
- s += sprintf(s, "DVDET = %08x\n", readl(DVDET));
- s += sprintf(s, "DODMSK = %08x\n", readl(DODMSK));
- s += sprintf(s, "CSC01 = %08x\n", readl(CSC01));
- s += sprintf(s, "CSC02 = %08x\n", readl(CSC02));
- s += sprintf(s, "CSC03 = %08x\n", readl(CSC03));
- s += sprintf(s, "CSC04 = %08x\n", readl(CSC04));
- s += sprintf(s, "CSC05 = %08x\n", readl(CSC05));
-
- return simple_read_from_buffer(userbuf, count, ppos,
- big_buffer, s-big_buffer);
-}
-
-static ssize_t clock_read_file(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- char * s = big_buffer;
-
- s += sprintf(s, "SYSCLKSRC = %08x\n", readl(SYSCLKSRC));
- s += sprintf(s, "PIXCLKSRC = %08x\n", readl(PIXCLKSRC));
- s += sprintf(s, "CLKSLEEP = %08x\n", readl(CLKSLEEP));
- s += sprintf(s, "COREPLL = %08x\n", readl(COREPLL));
- s += sprintf(s, "DISPPLL = %08x\n", readl(DISPPLL));
- s += sprintf(s, "PLLSTAT = %08x\n", readl(PLLSTAT));
- s += sprintf(s, "VOVRCLK = %08x\n", readl(VOVRCLK));
- s += sprintf(s, "PIXCLK = %08x\n", readl(PIXCLK));
- s += sprintf(s, "MEMCLK = %08x\n", readl(MEMCLK));
- s += sprintf(s, "M24CLK = %08x\n", readl(M24CLK));
- s += sprintf(s, "MBXCLK = %08x\n", readl(MBXCLK));
- s += sprintf(s, "SDCLK = %08x\n", readl(SDCLK));
- s += sprintf(s, "PIXCLKDIV = %08x\n", readl(PIXCLKDIV));
-
- return simple_read_from_buffer(userbuf, count, ppos,
- big_buffer, s-big_buffer);
-}
-
-static ssize_t sdram_read_file(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- char * s = big_buffer;
-
- s += sprintf(s, "LMRST = %08x\n", readl(LMRST));
- s += sprintf(s, "LMCFG = %08x\n", readl(LMCFG));
- s += sprintf(s, "LMPWR = %08x\n", readl(LMPWR));
- s += sprintf(s, "LMPWRSTAT = %08x\n", readl(LMPWRSTAT));
- s += sprintf(s, "LMCEMR = %08x\n", readl(LMCEMR));
- s += sprintf(s, "LMTYPE = %08x\n", readl(LMTYPE));
- s += sprintf(s, "LMTIM = %08x\n", readl(LMTIM));
- s += sprintf(s, "LMREFRESH = %08x\n", readl(LMREFRESH));
- s += sprintf(s, "LMPROTMIN = %08x\n", readl(LMPROTMIN));
- s += sprintf(s, "LMPROTMAX = %08x\n", readl(LMPROTMAX));
- s += sprintf(s, "LMPROTCFG = %08x\n", readl(LMPROTCFG));
- s += sprintf(s, "LMPROTERR = %08x\n", readl(LMPROTERR));
-
- return simple_read_from_buffer(userbuf, count, ppos,
- big_buffer, s-big_buffer);
-}
-
-static ssize_t misc_read_file(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- char * s = big_buffer;
-
- s += sprintf(s, "LCD_CONFIG = %08x\n", readl(LCD_CONFIG));
- s += sprintf(s, "ODFBPWR = %08x\n", readl(ODFBPWR));
- s += sprintf(s, "ODFBSTAT = %08x\n", readl(ODFBSTAT));
- s += sprintf(s, "ID = %08x\n", readl(ID));
-
- return simple_read_from_buffer(userbuf, count, ppos,
- big_buffer, s-big_buffer);
-}
-
-
-static const struct file_operations sysconf_fops = {
- .read = sysconf_read_file,
- .write = write_file_dummy,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static const struct file_operations clock_fops = {
- .read = clock_read_file,
- .write = write_file_dummy,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static const struct file_operations display_fops = {
- .read = display_read_file,
- .write = write_file_dummy,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static const struct file_operations gsctl_fops = {
- .read = gsctl_read_file,
- .write = write_file_dummy,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static const struct file_operations sdram_fops = {
- .read = sdram_read_file,
- .write = write_file_dummy,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static const struct file_operations misc_fops = {
- .read = misc_read_file,
- .write = write_file_dummy,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
-static void mbxfb_debugfs_init(struct fb_info *fbi)
-{
- struct mbxfb_info *mfbi = fbi->par;
- struct dentry *dir;
-
- dir = debugfs_create_dir("mbxfb", NULL);
- mfbi->debugfs_dir = dir;
-
- debugfs_create_file("sysconf", 0444, dir, fbi, &sysconf_fops);
- debugfs_create_file("clock", 0444, dir, fbi, &clock_fops);
- debugfs_create_file("display", 0444, dir, fbi, &display_fops);
- debugfs_create_file("gsctl", 0444, dir, fbi, &gsctl_fops);
- debugfs_create_file("sdram", 0444, dir, fbi, &sdram_fops);
- debugfs_create_file("misc", 0444, dir, fbi, &misc_fops);
-}
-
-static void mbxfb_debugfs_remove(struct fb_info *fbi)
-{
- struct mbxfb_info *mfbi = fbi->par;
-
- debugfs_remove_recursive(mfbi->debugfs_dir);
-}
diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c
deleted file mode 100644
index 6dc287c819cb..000000000000
--- a/drivers/video/fbdev/mbx/mbxfb.c
+++ /dev/null
@@ -1,1053 +0,0 @@
-/*
- * linux/drivers/video/mbx/mbxfb.c
- *
- * Copyright (C) 2006-2007 8D Technologies inc
- * Raphael Assenat <raph@8d.com>
- * - Added video overlay support
- * - Various improvements
- *
- * Copyright (C) 2006 Compulab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
- * - Creation of driver
- *
- * Based on pxafb.c
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- * Intel 2700G (Marathon) Graphics Accelerator Frame Buffer Driver
- *
- */
-
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-
-#include <video/mbxfb.h>
-
-#include "regs.h"
-#include "reg_bits.h"
-
-static void __iomem *virt_base_2700;
-
-#define write_reg(val, reg) do { writel((val), (reg)); } while(0)
-
-/* Without this delay, the graphics appears somehow scaled and
- * there is a lot of jitter in scanlines. This delay is probably
- * needed only after setting some specific register(s) somewhere,
- * not all over the place... */
-#define write_reg_dly(val, reg) do { writel((val), reg); udelay(1000); } while(0)
-
-#define MIN_XRES 16
-#define MIN_YRES 16
-#define MAX_XRES 2048
-#define MAX_YRES 2048
-
-#define MAX_PALETTES 16
-
-/* FIXME: take care of different chip revisions with different sizes
- of ODFB */
-#define MEMORY_OFFSET 0x60000
-
-struct mbxfb_info {
- struct device *dev;
-
- struct resource *fb_res;
- struct resource *fb_req;
-
- struct resource *reg_res;
- struct resource *reg_req;
-
- void __iomem *fb_virt_addr;
- unsigned long fb_phys_addr;
-
- void __iomem *reg_virt_addr;
- unsigned long reg_phys_addr;
-
- int (*platform_probe) (struct fb_info * fb);
- int (*platform_remove) (struct fb_info * fb);
-
- u32 pseudo_palette[MAX_PALETTES];
-#ifdef CONFIG_FB_MBX_DEBUG
- struct dentry *debugfs_dir;
-#endif
-
-};
-
-static const struct fb_var_screeninfo mbxfb_default = {
- .xres = 640,
- .yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel = 16,
- .red = {11, 5, 0},
- .green = {5, 6, 0},
- .blue = {0, 5, 0},
- .activate = FB_ACTIVATE_TEST,
- .height = -1,
- .width = -1,
- .pixclock = 40000,
- .left_margin = 48,
- .right_margin = 16,
- .upper_margin = 33,
- .lower_margin = 10,
- .hsync_len = 96,
- .vsync_len = 2,
- .vmode = FB_VMODE_NONINTERLACED,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-};
-
-static const struct fb_fix_screeninfo mbxfb_fix = {
- .id = "MBX",
- .type = FB_TYPE_PACKED_PIXELS,
- .visual = FB_VISUAL_TRUECOLOR,
- .xpanstep = 0,
- .ypanstep = 0,
- .ywrapstep = 0,
- .accel = FB_ACCEL_NONE,
-};
-
-struct pixclock_div {
- u8 m;
- u8 n;
- u8 p;
-};
-
-static unsigned int mbxfb_get_pixclock(unsigned int pixclock_ps,
- struct pixclock_div *div)
-{
- u8 m, n, p;
- unsigned int err = 0;
- unsigned int min_err = ~0x0;
- unsigned int clk;
- unsigned int best_clk = 0;
- unsigned int ref_clk = 13000; /* FIXME: take from platform data */
- unsigned int pixclock;
-
- /* convert pixclock to KHz */
- pixclock = PICOS2KHZ(pixclock_ps);
-
- /* PLL output freq = (ref_clk * M) / (N * 2^P)
- *
- * M: 1 to 63
- * N: 1 to 7
- * P: 0 to 7
- */
-
- /* RAPH: When N==1, the resulting pixel clock appears to
- * get divided by 2. Preventing N=1 by starting the following
- * loop at 2 prevents this. Is this a bug with my chip
- * revision or something I dont understand? */
- for (m = 1; m < 64; m++) {
- for (n = 2; n < 8; n++) {
- for (p = 0; p < 8; p++) {
- clk = (ref_clk * m) / (n * (1 << p));
- err = (clk > pixclock) ? (clk - pixclock) :
- (pixclock - clk);
- if (err < min_err) {
- min_err = err;
- best_clk = clk;
- div->m = m;
- div->n = n;
- div->p = p;
- }
- }
- }
- }
- return KHZ2PICOS(best_clk);
-}
-
-static int mbxfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int trans, struct fb_info *info)
-{
- u32 val, ret = 1;
-
- if (regno < MAX_PALETTES) {
- u32 *pal = info->pseudo_palette;
-
- val = (red & 0xf800) | ((green & 0xfc00) >> 5) |
- ((blue & 0xf800) >> 11);
- pal[regno] = val;
- ret = 0;
- }
-
- return ret;
-}
-
-static int mbxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
- struct pixclock_div div;
-
- var->pixclock = mbxfb_get_pixclock(var->pixclock, &div);
-
- if (var->xres < MIN_XRES)
- var->xres = MIN_XRES;
- if (var->yres < MIN_YRES)
- var->yres = MIN_YRES;
- if (var->xres > MAX_XRES)
- return -EINVAL;
- if (var->yres > MAX_YRES)
- return -EINVAL;
- var->xres_virtual = max(var->xres_virtual, var->xres);
- var->yres_virtual = max(var->yres_virtual, var->yres);
-
- switch (var->bits_per_pixel) {
- /* 8 bits-per-pixel is not supported yet */
- case 8:
- return -EINVAL;
- case 16:
- var->green.length = (var->green.length == 5) ? 5 : 6;
- var->red.length = 5;
- var->blue.length = 5;
- var->transp.length = 6 - var->green.length;
- var->blue.offset = 0;
- var->green.offset = 5;
- var->red.offset = 5 + var->green.length;
- var->transp.offset = (5 + var->red.offset) & 15;
- break;
- case 24: /* RGB 888 */
- case 32: /* RGBA 8888 */
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.length = var->bits_per_pixel - 24;
- var->transp.offset = (var->transp.length) ? 24 : 0;
- break;
- }
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
- var->transp.msb_right = 0;
-
- return 0;
-}
-
-static int mbxfb_set_par(struct fb_info *info)
-{
- struct fb_var_screeninfo *var = &info->var;
- struct pixclock_div div;
- ushort hbps, ht, hfps, has;
- ushort vbps, vt, vfps, vas;
- u32 gsctrl = readl(GSCTRL);
- u32 gsadr = readl(GSADR);
-
- info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8;
-
- /* setup color mode */
- gsctrl &= ~(FMsk(GSCTRL_GPIXFMT));
- /* FIXME: add *WORKING* support for 8-bits per color */
- if (info->var.bits_per_pixel == 8) {
- return -EINVAL;
- } else {
- fb_dealloc_cmap(&info->cmap);
- gsctrl &= ~GSCTRL_LUT_EN;
-
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- switch (info->var.bits_per_pixel) {
- case 16:
- if (info->var.green.length == 5)
- gsctrl |= GSCTRL_GPIXFMT_ARGB1555;
- else
- gsctrl |= GSCTRL_GPIXFMT_RGB565;
- break;
- case 24:
- gsctrl |= GSCTRL_GPIXFMT_RGB888;
- break;
- case 32:
- gsctrl |= GSCTRL_GPIXFMT_ARGB8888;
- break;
- }
- }
-
- /* setup resolution */
- gsctrl &= ~(FMsk(GSCTRL_GSWIDTH) | FMsk(GSCTRL_GSHEIGHT));
- gsctrl |= Gsctrl_Width(info->var.xres) |
- Gsctrl_Height(info->var.yres);
- write_reg_dly(gsctrl, GSCTRL);
-
- gsadr &= ~(FMsk(GSADR_SRCSTRIDE));
- gsadr |= Gsadr_Srcstride(info->var.xres * info->var.bits_per_pixel /
- (8 * 16) - 1);
- write_reg_dly(gsadr, GSADR);
-
- /* setup timings */
- var->pixclock = mbxfb_get_pixclock(info->var.pixclock, &div);
-
- write_reg_dly((Disp_Pll_M(div.m) | Disp_Pll_N(div.n) |
- Disp_Pll_P(div.p) | DISP_PLL_EN), DISPPLL);
-
- hbps = var->hsync_len;
- has = hbps + var->left_margin;
- hfps = has + var->xres;
- ht = hfps + var->right_margin;
-
- vbps = var->vsync_len;
- vas = vbps + var->upper_margin;
- vfps = vas + var->yres;
- vt = vfps + var->lower_margin;
-
- write_reg_dly((Dht01_Hbps(hbps) | Dht01_Ht(ht)), DHT01);
- write_reg_dly((Dht02_Hlbs(has) | Dht02_Has(has)), DHT02);
- write_reg_dly((Dht03_Hfps(hfps) | Dht03_Hrbs(hfps)), DHT03);
- write_reg_dly((Dhdet_Hdes(has) | Dhdet_Hdef(hfps)), DHDET);
-
- write_reg_dly((Dvt01_Vbps(vbps) | Dvt01_Vt(vt)), DVT01);
- write_reg_dly((Dvt02_Vtbs(vas) | Dvt02_Vas(vas)), DVT02);
- write_reg_dly((Dvt03_Vfps(vfps) | Dvt03_Vbbs(vfps)), DVT03);
- write_reg_dly((Dvdet_Vdes(vas) | Dvdet_Vdef(vfps)), DVDET);
- write_reg_dly((Dvectrl_Vevent(vfps) | Dvectrl_Vfetch(vbps)), DVECTRL);
-
- write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
-
- write_reg_dly(DINTRE_VEVENT0_EN, DINTRE);
-
- return 0;
-}
-
-static int mbxfb_blank(int blank, struct fb_info *info)
-{
- switch (blank) {
- case FB_BLANK_POWERDOWN:
- case FB_BLANK_VSYNC_SUSPEND:
- case FB_BLANK_HSYNC_SUSPEND:
- case FB_BLANK_NORMAL:
- write_reg_dly((readl(DSCTRL) & ~DSCTRL_SYNCGEN_EN), DSCTRL);
- write_reg_dly((readl(PIXCLK) & ~PIXCLK_EN), PIXCLK);
- write_reg_dly((readl(VOVRCLK) & ~VOVRCLK_EN), VOVRCLK);
- break;
- case FB_BLANK_UNBLANK:
- write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
- write_reg_dly((readl(PIXCLK) | PIXCLK_EN), PIXCLK);
- break;
- }
- return 0;
-}
-
-static int mbxfb_setupOverlay(struct mbxfb_overlaySetup *set)
-{
- u32 vsctrl, vscadr, vsadr;
- u32 sssize, spoctrl, shctrl;
- u32 vubase, vvbase;
- u32 vovrclk;
-
- if (set->scaled_width==0 || set->scaled_height==0)
- return -EINVAL;
-
- /* read registers which have reserved bits
- * so we can write them back as-is. */
- vovrclk = readl(VOVRCLK);
- vsctrl = readl(VSCTRL);
- vscadr = readl(VSCADR);
- vubase = readl(VUBASE);
- vvbase = readl(VVBASE);
- shctrl = readl(SHCTRL);
-
- spoctrl = readl(SPOCTRL);
- sssize = readl(SSSIZE);
-
- vsctrl &= ~( FMsk(VSCTRL_VSWIDTH) |
- FMsk(VSCTRL_VSHEIGHT) |
- FMsk(VSCTRL_VPIXFMT) |
- VSCTRL_GAMMA_EN | VSCTRL_CSC_EN |
- VSCTRL_COSITED );
- vsctrl |= Vsctrl_Width(set->width) | Vsctrl_Height(set->height) |
- VSCTRL_CSC_EN;
-
- vscadr &= ~(VSCADR_STR_EN | FMsk(VSCADR_VBASE_ADR) );
- vubase &= ~(VUBASE_UVHALFSTR | FMsk(VUBASE_UBASE_ADR));
- vvbase &= ~(FMsk(VVBASE_VBASE_ADR));
-
- switch (set->fmt) {
- case MBXFB_FMT_YUV16:
- vsctrl |= VSCTRL_VPIXFMT_YUV12;
-
- set->Y_stride = ((set->width) + 0xf ) & ~0xf;
- break;
- case MBXFB_FMT_YUV12:
- vsctrl |= VSCTRL_VPIXFMT_YUV12;
-
- set->Y_stride = ((set->width) + 0xf ) & ~0xf;
- vubase |= VUBASE_UVHALFSTR;
-
- break;
- case MBXFB_FMT_UY0VY1:
- vsctrl |= VSCTRL_VPIXFMT_UY0VY1;
- set->Y_stride = (set->width*2 + 0xf ) & ~0xf;
- break;
- case MBXFB_FMT_VY0UY1:
- vsctrl |= VSCTRL_VPIXFMT_VY0UY1;
- set->Y_stride = (set->width*2 + 0xf ) & ~0xf;
- break;
- case MBXFB_FMT_Y0UY1V:
- vsctrl |= VSCTRL_VPIXFMT_Y0UY1V;
- set->Y_stride = (set->width*2 + 0xf ) & ~0xf;
- break;
- case MBXFB_FMT_Y0VY1U:
- vsctrl |= VSCTRL_VPIXFMT_Y0VY1U;
- set->Y_stride = (set->width*2 + 0xf ) & ~0xf;
- break;
- default:
- return -EINVAL;
- }
-
- /* VSCTRL has the bits which sets the Video Pixel Format.
- * When passing from a packed to planar format,
- * if we write VSCTRL first, VVBASE and VUBASE would
- * be zero if we would not set them here. (And then,
- * the chips hangs and only a reset seems to fix it).
- *
- * If course, the values calculated here have no meaning
- * for packed formats.
- */
- set->UV_stride = ((set->width/2) + 0x7 ) & ~0x7;
- set->U_offset = set->height * set->Y_stride;
- set->V_offset = set->U_offset +
- set->height * set->UV_stride;
- vubase |= Vubase_Ubase_Adr(
- (0x60000 + set->mem_offset + set->U_offset)>>3);
- vvbase |= Vvbase_Vbase_Adr(
- (0x60000 + set->mem_offset + set->V_offset)>>3);
-
-
- vscadr |= Vscadr_Vbase_Adr((0x60000 + set->mem_offset)>>4);
-
- if (set->enable)
- vscadr |= VSCADR_STR_EN;
-
-
- vsadr = Vsadr_Srcstride((set->Y_stride)/16-1) |
- Vsadr_Xstart(set->x) | Vsadr_Ystart(set->y);
-
- sssize &= ~(FMsk(SSSIZE_SC_WIDTH) | FMsk(SSSIZE_SC_HEIGHT));
- sssize = Sssize_Sc_Width(set->scaled_width-1) |
- Sssize_Sc_Height(set->scaled_height-1);
-
- spoctrl &= ~(SPOCTRL_H_SC_BP | SPOCTRL_V_SC_BP |
- SPOCTRL_HV_SC_OR | SPOCTRL_VS_UR_C |
- FMsk(SPOCTRL_VPITCH));
- spoctrl |= Spoctrl_Vpitch((set->height<<11)/set->scaled_height);
-
- /* Bypass horiz/vert scaler when same size */
- if (set->scaled_width == set->width)
- spoctrl |= SPOCTRL_H_SC_BP;
- if (set->scaled_height == set->height)
- spoctrl |= SPOCTRL_V_SC_BP;
-
- shctrl &= ~(FMsk(SHCTRL_HPITCH) | SHCTRL_HDECIM);
- shctrl |= Shctrl_Hpitch((set->width<<11)/set->scaled_width);
-
- /* Video plane registers */
- write_reg(vsctrl, VSCTRL);
- write_reg(vscadr, VSCADR);
- write_reg(vubase, VUBASE);
- write_reg(vvbase, VVBASE);
- write_reg(vsadr, VSADR);
-
- /* Video scaler registers */
- write_reg(sssize, SSSIZE);
- write_reg(spoctrl, SPOCTRL);
- write_reg(shctrl, SHCTRL);
-
- /* Clock */
- if (set->enable)
- vovrclk |= 1;
- else
- vovrclk &= ~1;
-
- write_reg(vovrclk, VOVRCLK);
-
- return 0;
-}
-
-static int mbxfb_ioctl_planeorder(struct mbxfb_planeorder *porder)
-{
- unsigned long gscadr, vscadr;
-
- if (porder->bottom == porder->top)
- return -EINVAL;
-
- gscadr = readl(GSCADR);
- vscadr = readl(VSCADR);
-
- gscadr &= ~(FMsk(GSCADR_BLEND_POS));
- vscadr &= ~(FMsk(VSCADR_BLEND_POS));
-
- switch (porder->bottom) {
- case MBXFB_PLANE_GRAPHICS:
- gscadr |= GSCADR_BLEND_GFX;
- break;
- case MBXFB_PLANE_VIDEO:
- vscadr |= VSCADR_BLEND_GFX;
- break;
- default:
- return -EINVAL;
- }
-
- switch (porder->top) {
- case MBXFB_PLANE_GRAPHICS:
- gscadr |= GSCADR_BLEND_VID;
- break;
- case MBXFB_PLANE_VIDEO:
- vscadr |= GSCADR_BLEND_VID;
- break;
- default:
- return -EINVAL;
- }
-
- write_reg_dly(vscadr, VSCADR);
- write_reg_dly(gscadr, GSCADR);
-
- return 0;
-
-}
-
-static int mbxfb_ioctl_alphactl(struct mbxfb_alphaCtl *alpha)
-{
- unsigned long vscadr, vbbase, vcmsk;
- unsigned long gscadr, gbbase, gdrctrl;
-
- vbbase = Vbbase_Glalpha(alpha->overlay_global_alpha) |
- Vbbase_Colkey(alpha->overlay_colorkey);
-
- gbbase = Gbbase_Glalpha(alpha->graphics_global_alpha) |
- Gbbase_Colkey(alpha->graphics_colorkey);
-
- vcmsk = readl(VCMSK);
- vcmsk &= ~(FMsk(VCMSK_COLKEY_M));
- vcmsk |= Vcmsk_colkey_m(alpha->overlay_colorkey_mask);
-
- gdrctrl = readl(GDRCTRL);
- gdrctrl &= ~(FMsk(GDRCTRL_COLKEYM));
- gdrctrl |= Gdrctrl_Colkeym(alpha->graphics_colorkey_mask);
-
- vscadr = readl(VSCADR);
- vscadr &= ~(FMsk(VSCADR_BLEND_M) | VSCADR_COLKEYSRC | VSCADR_COLKEY_EN);
-
- gscadr = readl(GSCADR);
- gscadr &= ~(FMsk(GSCADR_BLEND_M) | GSCADR_COLKEY_EN | GSCADR_COLKEYSRC);
-
- switch (alpha->overlay_colorkey_mode) {
- case MBXFB_COLORKEY_DISABLED:
- break;
- case MBXFB_COLORKEY_PREVIOUS:
- vscadr |= VSCADR_COLKEY_EN;
- break;
- case MBXFB_COLORKEY_CURRENT:
- vscadr |= VSCADR_COLKEY_EN | VSCADR_COLKEYSRC;
- break;
- default:
- return -EINVAL;
- }
-
- switch (alpha->overlay_blend_mode) {
- case MBXFB_ALPHABLEND_NONE:
- vscadr |= VSCADR_BLEND_NONE;
- break;
- case MBXFB_ALPHABLEND_GLOBAL:
- vscadr |= VSCADR_BLEND_GLOB;
- break;
- case MBXFB_ALPHABLEND_PIXEL:
- vscadr |= VSCADR_BLEND_PIX;
- break;
- default:
- return -EINVAL;
- }
-
- switch (alpha->graphics_colorkey_mode) {
- case MBXFB_COLORKEY_DISABLED:
- break;
- case MBXFB_COLORKEY_PREVIOUS:
- gscadr |= GSCADR_COLKEY_EN;
- break;
- case MBXFB_COLORKEY_CURRENT:
- gscadr |= GSCADR_COLKEY_EN | GSCADR_COLKEYSRC;
- break;
- default:
- return -EINVAL;
- }
-
- switch (alpha->graphics_blend_mode) {
- case MBXFB_ALPHABLEND_NONE:
- gscadr |= GSCADR_BLEND_NONE;
- break;
- case MBXFB_ALPHABLEND_GLOBAL:
- gscadr |= GSCADR_BLEND_GLOB;
- break;
- case MBXFB_ALPHABLEND_PIXEL:
- gscadr |= GSCADR_BLEND_PIX;
- break;
- default:
- return -EINVAL;
- }
-
- write_reg_dly(vbbase, VBBASE);
- write_reg_dly(gbbase, GBBASE);
- write_reg_dly(vcmsk, VCMSK);
- write_reg_dly(gdrctrl, GDRCTRL);
- write_reg_dly(gscadr, GSCADR);
- write_reg_dly(vscadr, VSCADR);
-
- return 0;
-}
-
-static int mbxfb_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
-{
- struct mbxfb_overlaySetup setup;
- struct mbxfb_planeorder porder;
- struct mbxfb_alphaCtl alpha;
- struct mbxfb_reg reg;
- int res;
- __u32 tmp;
-
- switch (cmd)
- {
- case MBXFB_IOCX_OVERLAY:
- if (copy_from_user(&setup, (void __user*)arg,
- sizeof(struct mbxfb_overlaySetup)))
- return -EFAULT;
-
- res = mbxfb_setupOverlay(&setup);
- if (res)
- return res;
-
- if (copy_to_user((void __user*)arg, &setup,
- sizeof(struct mbxfb_overlaySetup)))
- return -EFAULT;
-
- return 0;
-
- case MBXFB_IOCS_PLANEORDER:
- if (copy_from_user(&porder, (void __user*)arg,
- sizeof(struct mbxfb_planeorder)))
- return -EFAULT;
-
- return mbxfb_ioctl_planeorder(&porder);
-
- case MBXFB_IOCS_ALPHA:
- if (copy_from_user(&alpha, (void __user*)arg,
- sizeof(struct mbxfb_alphaCtl)))
- return -EFAULT;
-
- return mbxfb_ioctl_alphactl(&alpha);
-
- case MBXFB_IOCS_REG:
- if (copy_from_user(&reg, (void __user*)arg,
- sizeof(struct mbxfb_reg)))
- return -EFAULT;
-
- if (reg.addr >= 0x10000) /* regs are from 0x3fe0000 to 0x3feffff */
- return -EINVAL;
-
- tmp = readl(virt_base_2700 + reg.addr);
- tmp &= ~reg.mask;
- tmp |= reg.val & reg.mask;
- writel(tmp, virt_base_2700 + reg.addr);
-
- return 0;
- case MBXFB_IOCX_REG:
- if (copy_from_user(&reg, (void __user*)arg,
- sizeof(struct mbxfb_reg)))
- return -EFAULT;
-
- if (reg.addr >= 0x10000) /* regs are from 0x3fe0000 to 0x3feffff */
- return -EINVAL;
- reg.val = readl(virt_base_2700 + reg.addr);
-
- if (copy_to_user((void __user*)arg, &reg,
- sizeof(struct mbxfb_reg)))
- return -EFAULT;
-
- return 0;
- }
- return -EINVAL;
-}
-
-static const struct fb_ops mbxfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = mbxfb_check_var,
- .fb_set_par = mbxfb_set_par,
- .fb_setcolreg = mbxfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
- .fb_blank = mbxfb_blank,
- .fb_ioctl = mbxfb_ioctl,
-};
-
-/*
- Enable external SDRAM controller. Assume that all clocks are active
- by now.
-*/
-static void setup_memc(struct fb_info *fbi)
-{
- unsigned long tmp;
- int i;
-
- /* FIXME: use platform specific parameters */
- /* setup SDRAM controller */
- write_reg_dly((LMCFG_LMC_DS | LMCFG_LMC_TS | LMCFG_LMD_TS |
- LMCFG_LMA_TS),
- LMCFG);
-
- write_reg_dly(LMPWR_MC_PWR_ACT, LMPWR);
-
- /* setup SDRAM timings */
- write_reg_dly((Lmtim_Tras(7) | Lmtim_Trp(3) | Lmtim_Trcd(3) |
- Lmtim_Trc(9) | Lmtim_Tdpl(2)),
- LMTIM);
- /* setup SDRAM refresh rate */
- write_reg_dly(0xc2b, LMREFRESH);
- /* setup SDRAM type parameters */
- write_reg_dly((LMTYPE_CASLAT_3 | LMTYPE_BKSZ_2 | LMTYPE_ROWSZ_11 |
- LMTYPE_COLSZ_8),
- LMTYPE);
- /* enable memory controller */
- write_reg_dly(LMPWR_MC_PWR_ACT, LMPWR);
- /* perform dummy reads */
- for ( i = 0; i < 16; i++ ) {
- tmp = readl(fbi->screen_base);
- }
-}
-
-static void enable_clocks(struct fb_info *fbi)
-{
- /* enable clocks */
- write_reg_dly(SYSCLKSRC_PLL_2, SYSCLKSRC);
- write_reg_dly(PIXCLKSRC_PLL_1, PIXCLKSRC);
- write_reg_dly(0x00000000, CLKSLEEP);
-
- /* PLL output = (Frefclk * M) / (N * 2^P )
- *
- * M: 0x17, N: 0x3, P: 0x0 == 100 Mhz!
- * M: 0xb, N: 0x1, P: 0x1 == 71 Mhz
- * */
- write_reg_dly((Core_Pll_M(0xb) | Core_Pll_N(0x1) | Core_Pll_P(0x1) |
- CORE_PLL_EN),
- COREPLL);
-
- write_reg_dly((Disp_Pll_M(0x1b) | Disp_Pll_N(0x7) | Disp_Pll_P(0x1) |
- DISP_PLL_EN),
- DISPPLL);
-
- write_reg_dly(0x00000000, VOVRCLK);
- write_reg_dly(PIXCLK_EN, PIXCLK);
- write_reg_dly(MEMCLK_EN, MEMCLK);
- write_reg_dly(0x00000001, M24CLK);
- write_reg_dly(0x00000001, MBXCLK);
- write_reg_dly(SDCLK_EN, SDCLK);
- write_reg_dly(0x00000001, PIXCLKDIV);
-}
-
-static void setup_graphics(struct fb_info *fbi)
-{
- unsigned long gsctrl;
- unsigned long vscadr;
-
- gsctrl = GSCTRL_GAMMA_EN | Gsctrl_Width(fbi->var.xres) |
- Gsctrl_Height(fbi->var.yres);
- switch (fbi->var.bits_per_pixel) {
- case 16:
- if (fbi->var.green.length == 5)
- gsctrl |= GSCTRL_GPIXFMT_ARGB1555;
- else
- gsctrl |= GSCTRL_GPIXFMT_RGB565;
- break;
- case 24:
- gsctrl |= GSCTRL_GPIXFMT_RGB888;
- break;
- case 32:
- gsctrl |= GSCTRL_GPIXFMT_ARGB8888;
- break;
- }
-
- write_reg_dly(gsctrl, GSCTRL);
- write_reg_dly(0x00000000, GBBASE);
- write_reg_dly(0x00ffffff, GDRCTRL);
- write_reg_dly((GSCADR_STR_EN | Gscadr_Gbase_Adr(0x6000)), GSCADR);
- write_reg_dly(0x00000000, GPLUT);
-
- vscadr = readl(VSCADR);
- vscadr &= ~(FMsk(VSCADR_BLEND_POS) | FMsk(VSCADR_BLEND_M));
- vscadr |= VSCADR_BLEND_VID | VSCADR_BLEND_NONE;
- write_reg_dly(vscadr, VSCADR);
-}
-
-static void setup_display(struct fb_info *fbi)
-{
- unsigned long dsctrl = 0;
-
- dsctrl = DSCTRL_BLNK_POL;
- if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT)
- dsctrl |= DSCTRL_HS_POL;
- if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)
- dsctrl |= DSCTRL_VS_POL;
- write_reg_dly(dsctrl, DSCTRL);
- write_reg_dly(0xd0303010, DMCTRL);
- write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
-}
-
-static void enable_controller(struct fb_info *fbi)
-{
- u32 svctrl, shctrl;
-
- write_reg_dly(SYSRST_RST, SYSRST);
-
- /* setup a timeout, raise drive strength */
- write_reg_dly(0xffffff0c, SYSCFG);
-
- enable_clocks(fbi);
- setup_memc(fbi);
- setup_graphics(fbi);
- setup_display(fbi);
-
- shctrl = readl(SHCTRL);
- shctrl &= ~(FMsk(SHCTRL_HINITIAL));
- shctrl |= Shctrl_Hinitial(4<<11);
- writel(shctrl, SHCTRL);
-
- svctrl = Svctrl_Initial1(1<<10) | Svctrl_Initial2(1<<10);
- writel(svctrl, SVCTRL);
-
- writel(SPOCTRL_H_SC_BP | SPOCTRL_V_SC_BP | SPOCTRL_VORDER_4TAP
- , SPOCTRL);
-
- /* Those coefficients are good for scaling up. For scaling
- * down, the application has to calculate them. */
- write_reg(0xff000100, VSCOEFF0);
- write_reg(0xfdfcfdfe, VSCOEFF1);
- write_reg(0x170d0500, VSCOEFF2);
- write_reg(0x3d372d22, VSCOEFF3);
- write_reg(0x00000040, VSCOEFF4);
-
- write_reg(0xff010100, HSCOEFF0);
- write_reg(0x00000000, HSCOEFF1);
- write_reg(0x02010000, HSCOEFF2);
- write_reg(0x01020302, HSCOEFF3);
- write_reg(0xf9fbfe00, HSCOEFF4);
- write_reg(0xfbf7f6f7, HSCOEFF5);
- write_reg(0x1c110700, HSCOEFF6);
- write_reg(0x3e393127, HSCOEFF7);
- write_reg(0x00000040, HSCOEFF8);
-
-}
-
-#ifdef CONFIG_PM
-/*
- * Power management hooks. Note that we won't be called from IRQ context,
- * unlike the blank functions above, so we may sleep.
- */
-static int mbxfb_suspend(struct platform_device *dev, pm_message_t state)
-{
- /* make frame buffer memory enter self-refresh mode */
- write_reg_dly(LMPWR_MC_PWR_SRM, LMPWR);
- while (readl(LMPWRSTAT) != LMPWRSTAT_MC_PWR_SRM)
- ; /* empty statement */
-
- /* reset the device, since it's initial state is 'mostly sleeping' */
- write_reg_dly(SYSRST_RST, SYSRST);
- return 0;
-}
-
-static int mbxfb_resume(struct platform_device *dev)
-{
- struct fb_info *fbi = platform_get_drvdata(dev);
-
- enable_clocks(fbi);
-/* setup_graphics(fbi); */
-/* setup_display(fbi); */
-
- write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL);
- return 0;
-}
-#else
-#define mbxfb_suspend NULL
-#define mbxfb_resume NULL
-#endif
-
-/* debugfs entries */
-#ifndef CONFIG_FB_MBX_DEBUG
-#define mbxfb_debugfs_init(x) do {} while(0)
-#define mbxfb_debugfs_remove(x) do {} while(0)
-#else
-#include "mbxdebugfs.c"
-#endif
-
-#define res_size(_r) (((_r)->end - (_r)->start) + 1)
-
-static int mbxfb_probe(struct platform_device *dev)
-{
- int ret;
- struct fb_info *fbi;
- struct mbxfb_info *mfbi;
- struct mbxfb_platform_data *pdata;
-
- dev_dbg(&dev->dev, "mbxfb_probe\n");
-
- pdata = dev_get_platdata(&dev->dev);
- if (!pdata) {
- dev_err(&dev->dev, "platform data is required\n");
- return -EINVAL;
- }
-
- fbi = framebuffer_alloc(sizeof(struct mbxfb_info), &dev->dev);
- if (!fbi)
- return -ENOMEM;
-
- mfbi = fbi->par;
- fbi->pseudo_palette = mfbi->pseudo_palette;
-
-
- if (pdata->probe)
- mfbi->platform_probe = pdata->probe;
- if (pdata->remove)
- mfbi->platform_remove = pdata->remove;
-
- mfbi->fb_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- mfbi->reg_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
-
- if (!mfbi->fb_res || !mfbi->reg_res) {
- dev_err(&dev->dev, "no resources found\n");
- ret = -ENODEV;
- goto err1;
- }
-
- mfbi->fb_req = request_mem_region(mfbi->fb_res->start,
- res_size(mfbi->fb_res), dev->name);
- if (mfbi->fb_req == NULL) {
- dev_err(&dev->dev, "failed to claim framebuffer memory\n");
- ret = -EINVAL;
- goto err1;
- }
- mfbi->fb_phys_addr = mfbi->fb_res->start;
-
- mfbi->reg_req = request_mem_region(mfbi->reg_res->start,
- res_size(mfbi->reg_res), dev->name);
- if (mfbi->reg_req == NULL) {
- dev_err(&dev->dev, "failed to claim Marathon registers\n");
- ret = -EINVAL;
- goto err2;
- }
- mfbi->reg_phys_addr = mfbi->reg_res->start;
-
- mfbi->reg_virt_addr = devm_ioremap(&dev->dev,
- mfbi->reg_phys_addr,
- res_size(mfbi->reg_req));
- if (!mfbi->reg_virt_addr) {
- dev_err(&dev->dev, "failed to ioremap Marathon registers\n");
- ret = -EINVAL;
- goto err3;
- }
- virt_base_2700 = mfbi->reg_virt_addr;
-
- mfbi->fb_virt_addr = devm_ioremap(&dev->dev, mfbi->fb_phys_addr,
- res_size(mfbi->fb_req));
- if (!mfbi->fb_virt_addr) {
- dev_err(&dev->dev, "failed to ioremap frame buffer\n");
- ret = -EINVAL;
- goto err3;
- }
-
- fbi->screen_base = (char __iomem *)(mfbi->fb_virt_addr + 0x60000);
- fbi->screen_size = pdata->memsize;
- fbi->fbops = &mbxfb_ops;
-
- fbi->var = mbxfb_default;
- fbi->fix = mbxfb_fix;
- fbi->fix.smem_start = mfbi->fb_phys_addr + 0x60000;
- fbi->fix.smem_len = pdata->memsize;
- fbi->fix.line_length = mbxfb_default.xres_virtual *
- mbxfb_default.bits_per_pixel / 8;
-
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret < 0) {
- dev_err(&dev->dev, "fb_alloc_cmap failed\n");
- ret = -EINVAL;
- goto err3;
- }
-
- platform_set_drvdata(dev, fbi);
-
- fb_info(fbi, "mbx frame buffer device\n");
-
- if (mfbi->platform_probe)
- mfbi->platform_probe(fbi);
-
- enable_controller(fbi);
-
- mbxfb_debugfs_init(fbi);
-
- ret = register_framebuffer(fbi);
- if (ret < 0) {
- dev_err(&dev->dev, "register_framebuffer failed\n");
- ret = -EINVAL;
- goto err6;
- }
-
- return 0;
-
-err6:
- fb_dealloc_cmap(&fbi->cmap);
-err3:
- release_mem_region(mfbi->reg_res->start, res_size(mfbi->reg_res));
-err2:
- release_mem_region(mfbi->fb_res->start, res_size(mfbi->fb_res));
-err1:
- framebuffer_release(fbi);
-
- return ret;
-}
-
-static int mbxfb_remove(struct platform_device *dev)
-{
- struct fb_info *fbi = platform_get_drvdata(dev);
-
- write_reg_dly(SYSRST_RST, SYSRST);
-
- mbxfb_debugfs_remove(fbi);
-
- if (fbi) {
- struct mbxfb_info *mfbi = fbi->par;
-
- unregister_framebuffer(fbi);
- if (mfbi) {
- if (mfbi->platform_remove)
- mfbi->platform_remove(fbi);
-
-
- if (mfbi->reg_req)
- release_mem_region(mfbi->reg_req->start,
- res_size(mfbi->reg_req));
- if (mfbi->fb_req)
- release_mem_region(mfbi->fb_req->start,
- res_size(mfbi->fb_req));
- }
- framebuffer_release(fbi);
- }
-
- return 0;
-}
-
-static struct platform_driver mbxfb_driver = {
- .probe = mbxfb_probe,
- .remove = mbxfb_remove,
- .suspend = mbxfb_suspend,
- .resume = mbxfb_resume,
- .driver = {
- .name = "mbx-fb",
- },
-};
-
-module_platform_driver(mbxfb_driver);
-
-MODULE_DESCRIPTION("loadable framebuffer driver for Marathon device");
-MODULE_AUTHOR("Mike Rapoport, Compulab");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/mbx/reg_bits.h b/drivers/video/fbdev/mbx/reg_bits.h
deleted file mode 100644
index 6607f353639b..000000000000
--- a/drivers/video/fbdev/mbx/reg_bits.h
+++ /dev/null
@@ -1,614 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __REG_BITS_2700G_
-#define __REG_BITS_2700G_
-
-/* use defines from asm-arm/arch-pxa/bitfields.h for bit fields access */
-#define UData(Data) ((unsigned long) (Data))
-#define Fld(Size, Shft) (((Size) << 16) + (Shft))
-#define FSize(Field) ((Field) >> 16)
-#define FShft(Field) ((Field) & 0x0000FFFF)
-#define FMsk(Field) (((UData (1) << FSize (Field)) - 1) << FShft (Field))
-#define FAlnMsk(Field) ((UData (1) << FSize (Field)) - 1)
-#define F1stBit(Field) (UData (1) << FShft (Field))
-
-#define SYSRST_RST (1 << 0)
-
-/* SYSCLKSRC - SYSCLK Source Control Register */
-#define SYSCLKSRC_SEL Fld(2,0)
-#define SYSCLKSRC_REF ((0x0) << FShft(SYSCLKSRC_SEL))
-#define SYSCLKSRC_PLL_1 ((0x1) << FShft(SYSCLKSRC_SEL))
-#define SYSCLKSRC_PLL_2 ((0x2) << FShft(SYSCLKSRC_SEL))
-
-/* PIXCLKSRC - PIXCLK Source Control Register */
-#define PIXCLKSRC_SEL Fld(2,0)
-#define PIXCLKSRC_REF ((0x0) << FShft(PIXCLKSRC_SEL))
-#define PIXCLKSRC_PLL_1 ((0x1) << FShft(PIXCLKSRC_SEL))
-#define PIXCLKSRC_PLL_2 ((0x2) << FShft(PIXCLKSRC_SEL))
-
-/* Clock Disable Register */
-#define CLKSLEEP_SLP (1 << 0)
-
-/* Core PLL Control Register */
-#define CORE_PLL_M Fld(6,7)
-#define Core_Pll_M(x) ((x) << FShft(CORE_PLL_M))
-#define CORE_PLL_N Fld(3,4)
-#define Core_Pll_N(x) ((x) << FShft(CORE_PLL_N))
-#define CORE_PLL_P Fld(3,1)
-#define Core_Pll_P(x) ((x) << FShft(CORE_PLL_P))
-#define CORE_PLL_EN (1 << 0)
-
-/* Display PLL Control Register */
-#define DISP_PLL_M Fld(6,7)
-#define Disp_Pll_M(x) ((x) << FShft(DISP_PLL_M))
-#define DISP_PLL_N Fld(3,4)
-#define Disp_Pll_N(x) ((x) << FShft(DISP_PLL_N))
-#define DISP_PLL_P Fld(3,1)
-#define Disp_Pll_P(x) ((x) << FShft(DISP_PLL_P))
-#define DISP_PLL_EN (1 << 0)
-
-/* PLL status register */
-#define PLLSTAT_CORE_PLL_LOST_L (1 << 3)
-#define PLLSTAT_CORE_PLL_LSTS (1 << 2)
-#define PLLSTAT_DISP_PLL_LOST_L (1 << 1)
-#define PLLSTAT_DISP_PLL_LSTS (1 << 0)
-
-/* Video and scale clock control register */
-#define VOVRCLK_EN (1 << 0)
-
-/* Pixel clock control register */
-#define PIXCLK_EN (1 << 0)
-
-/* Memory clock control register */
-#define MEMCLK_EN (1 << 0)
-
-/* MBX clock control register */
-#define MBXCLK_DIV Fld(2,2)
-#define MBXCLK_DIV_1 ((0x0) << FShft(MBXCLK_DIV))
-#define MBXCLK_DIV_2 ((0x1) << FShft(MBXCLK_DIV))
-#define MBXCLK_DIV_3 ((0x2) << FShft(MBXCLK_DIV))
-#define MBXCLK_DIV_4 ((0x3) << FShft(MBXCLK_DIV))
-#define MBXCLK_EN Fld(2,0)
-#define MBXCLK_EN_NONE ((0x0) << FShft(MBXCLK_EN))
-#define MBXCLK_EN_2D ((0x1) << FShft(MBXCLK_EN))
-#define MBXCLK_EN_BOTH ((0x2) << FShft(MBXCLK_EN))
-
-/* M24 clock control register */
-#define M24CLK_DIV Fld(2,1)
-#define M24CLK_DIV_1 ((0x0) << FShft(M24CLK_DIV))
-#define M24CLK_DIV_2 ((0x1) << FShft(M24CLK_DIV))
-#define M24CLK_DIV_3 ((0x2) << FShft(M24CLK_DIV))
-#define M24CLK_DIV_4 ((0x3) << FShft(M24CLK_DIV))
-#define M24CLK_EN (1 << 0)
-
-/* SDRAM clock control register */
-#define SDCLK_EN (1 << 0)
-
-/* PixClk Divisor Register */
-#define PIXCLKDIV_PD Fld(9,0)
-#define Pixclkdiv_Pd(x) ((x) << FShft(PIXCLKDIV_PD))
-
-/* LCD Config control register */
-#define LCDCFG_IN_FMT Fld(3,28)
-#define Lcdcfg_In_Fmt(x) ((x) << FShft(LCDCFG_IN_FMT))
-#define LCDCFG_LCD1DEN_POL (1 << 27)
-#define LCDCFG_LCD1FCLK_POL (1 << 26)
-#define LCDCFG_LCD1LCLK_POL (1 << 25)
-#define LCDCFG_LCD1D_POL (1 << 24)
-#define LCDCFG_LCD2DEN_POL (1 << 23)
-#define LCDCFG_LCD2FCLK_POL (1 << 22)
-#define LCDCFG_LCD2LCLK_POL (1 << 21)
-#define LCDCFG_LCD2D_POL (1 << 20)
-#define LCDCFG_LCD1_TS (1 << 19)
-#define LCDCFG_LCD1D_DS (1 << 18)
-#define LCDCFG_LCD1C_DS (1 << 17)
-#define LCDCFG_LCD1_IS_IN (1 << 16)
-#define LCDCFG_LCD2_TS (1 << 3)
-#define LCDCFG_LCD2D_DS (1 << 2)
-#define LCDCFG_LCD2C_DS (1 << 1)
-#define LCDCFG_LCD2_IS_IN (1 << 0)
-
-/* On-Die Frame Buffer Power Control Register */
-#define ODFBPWR_SLOW (1 << 2)
-#define ODFBPWR_MODE Fld(2,0)
-#define ODFBPWR_MODE_ACT ((0x0) << FShft(ODFBPWR_MODE))
-#define ODFBPWR_MODE_ACT_LP ((0x1) << FShft(ODFBPWR_MODE))
-#define ODFBPWR_MODE_SLEEP ((0x2) << FShft(ODFBPWR_MODE))
-#define ODFBPWR_MODE_SHUTD ((0x3) << FShft(ODFBPWR_MODE))
-
-/* On-Die Frame Buffer Power State Status Register */
-#define ODFBSTAT_ACT (1 << 2)
-#define ODFBSTAT_SLP (1 << 1)
-#define ODFBSTAT_SDN (1 << 0)
-
-/* LMRST - Local Memory (SDRAM) Reset */
-#define LMRST_MC_RST (1 << 0)
-
-/* LMCFG - Local Memory (SDRAM) Configuration Register */
-#define LMCFG_LMC_DS (1 << 5)
-#define LMCFG_LMD_DS (1 << 4)
-#define LMCFG_LMA_DS (1 << 3)
-#define LMCFG_LMC_TS (1 << 2)
-#define LMCFG_LMD_TS (1 << 1)
-#define LMCFG_LMA_TS (1 << 0)
-
-/* LMPWR - Local Memory (SDRAM) Power Control Register */
-#define LMPWR_MC_PWR_CNT Fld(2,0)
-#define LMPWR_MC_PWR_ACT ((0x0) << FShft(LMPWR_MC_PWR_CNT)) /* Active */
-#define LMPWR_MC_PWR_SRM ((0x1) << FShft(LMPWR_MC_PWR_CNT)) /* Self-refresh */
-#define LMPWR_MC_PWR_DPD ((0x3) << FShft(LMPWR_MC_PWR_CNT)) /* deep power down */
-
-/* LMPWRSTAT - Local Memory (SDRAM) Power Status Register */
-#define LMPWRSTAT_MC_PWR_CNT Fld(2,0)
-#define LMPWRSTAT_MC_PWR_ACT ((0x0) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* Active */
-#define LMPWRSTAT_MC_PWR_SRM ((0x1) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* Self-refresh */
-#define LMPWRSTAT_MC_PWR_DPD ((0x3) << FShft(LMPWRSTAT_MC_PWR_CNT)) /* deep power down */
-
-/* LMTYPE - Local Memory (SDRAM) Type Register */
-#define LMTYPE_CASLAT Fld(3,10)
-#define LMTYPE_CASLAT_1 ((0x1) << FShft(LMTYPE_CASLAT))
-#define LMTYPE_CASLAT_2 ((0x2) << FShft(LMTYPE_CASLAT))
-#define LMTYPE_CASLAT_3 ((0x3) << FShft(LMTYPE_CASLAT))
-#define LMTYPE_BKSZ Fld(2,8)
-#define LMTYPE_BKSZ_1 ((0x1) << FShft(LMTYPE_BKSZ))
-#define LMTYPE_BKSZ_2 ((0x2) << FShft(LMTYPE_BKSZ))
-#define LMTYPE_ROWSZ Fld(4,4)
-#define LMTYPE_ROWSZ_11 ((0xb) << FShft(LMTYPE_ROWSZ))
-#define LMTYPE_ROWSZ_12 ((0xc) << FShft(LMTYPE_ROWSZ))
-#define LMTYPE_ROWSZ_13 ((0xd) << FShft(LMTYPE_ROWSZ))
-#define LMTYPE_COLSZ Fld(4,0)
-#define LMTYPE_COLSZ_7 ((0x7) << FShft(LMTYPE_COLSZ))
-#define LMTYPE_COLSZ_8 ((0x8) << FShft(LMTYPE_COLSZ))
-#define LMTYPE_COLSZ_9 ((0x9) << FShft(LMTYPE_COLSZ))
-#define LMTYPE_COLSZ_10 ((0xa) << FShft(LMTYPE_COLSZ))
-#define LMTYPE_COLSZ_11 ((0xb) << FShft(LMTYPE_COLSZ))
-#define LMTYPE_COLSZ_12 ((0xc) << FShft(LMTYPE_COLSZ))
-
-/* LMTIM - Local Memory (SDRAM) Timing Register */
-#define LMTIM_TRAS Fld(4,16)
-#define Lmtim_Tras(x) ((x) << FShft(LMTIM_TRAS))
-#define LMTIM_TRP Fld(4,12)
-#define Lmtim_Trp(x) ((x) << FShft(LMTIM_TRP))
-#define LMTIM_TRCD Fld(4,8)
-#define Lmtim_Trcd(x) ((x) << FShft(LMTIM_TRCD))
-#define LMTIM_TRC Fld(4,4)
-#define Lmtim_Trc(x) ((x) << FShft(LMTIM_TRC))
-#define LMTIM_TDPL Fld(4,0)
-#define Lmtim_Tdpl(x) ((x) << FShft(LMTIM_TDPL))
-
-/* LMREFRESH - Local Memory (SDRAM) tREF Control Register */
-#define LMREFRESH_TREF Fld(2,0)
-#define Lmrefresh_Tref(x) ((x) << FShft(LMREFRESH_TREF))
-
-/* GSCTRL - Graphics surface control register */
-#define GSCTRL_LUT_EN (1 << 31)
-#define GSCTRL_GPIXFMT Fld(4,27)
-#define GSCTRL_GPIXFMT_INDEXED ((0x0) << FShft(GSCTRL_GPIXFMT))
-#define GSCTRL_GPIXFMT_ARGB4444 ((0x4) << FShft(GSCTRL_GPIXFMT))
-#define GSCTRL_GPIXFMT_ARGB1555 ((0x5) << FShft(GSCTRL_GPIXFMT))
-#define GSCTRL_GPIXFMT_RGB888 ((0x6) << FShft(GSCTRL_GPIXFMT))
-#define GSCTRL_GPIXFMT_RGB565 ((0x7) << FShft(GSCTRL_GPIXFMT))
-#define GSCTRL_GPIXFMT_ARGB8888 ((0x8) << FShft(GSCTRL_GPIXFMT))
-#define GSCTRL_GAMMA_EN (1 << 26)
-
-#define GSCTRL_GSWIDTH Fld(11,11)
-#define Gsctrl_Width(Pixel) /* Display Width [1..2048 pix.] */ \
- (((Pixel) - 1) << FShft(GSCTRL_GSWIDTH))
-
-#define GSCTRL_GSHEIGHT Fld(11,0)
-#define Gsctrl_Height(Pixel) /* Display Height [1..2048 pix.] */ \
- (((Pixel) - 1) << FShft(GSCTRL_GSHEIGHT))
-
-/* GBBASE fileds */
-#define GBBASE_GLALPHA Fld(8,24)
-#define Gbbase_Glalpha(x) ((x) << FShft(GBBASE_GLALPHA))
-
-#define GBBASE_COLKEY Fld(24,0)
-#define Gbbase_Colkey(x) ((x) << FShft(GBBASE_COLKEY))
-
-/* GDRCTRL fields */
-#define GDRCTRL_PIXDBL (1 << 31)
-#define GDRCTRL_PIXHLV (1 << 30)
-#define GDRCTRL_LNDBL (1 << 29)
-#define GDRCTRL_LNHLV (1 << 28)
-#define GDRCTRL_COLKEYM Fld(24,0)
-#define Gdrctrl_Colkeym(x) ((x) << FShft(GDRCTRL_COLKEYM))
-
-/* GSCADR graphics stream control address register fields */
-#define GSCADR_STR_EN (1 << 31)
-#define GSCADR_COLKEY_EN (1 << 30)
-#define GSCADR_COLKEYSRC (1 << 29)
-#define GSCADR_BLEND_M Fld(2,27)
-#define GSCADR_BLEND_NONE ((0x0) << FShft(GSCADR_BLEND_M))
-#define GSCADR_BLEND_INV ((0x1) << FShft(GSCADR_BLEND_M))
-#define GSCADR_BLEND_GLOB ((0x2) << FShft(GSCADR_BLEND_M))
-#define GSCADR_BLEND_PIX ((0x3) << FShft(GSCADR_BLEND_M))
-#define GSCADR_BLEND_POS Fld(2,24)
-#define GSCADR_BLEND_GFX ((0x0) << FShft(GSCADR_BLEND_POS))
-#define GSCADR_BLEND_VID ((0x1) << FShft(GSCADR_BLEND_POS))
-#define GSCADR_BLEND_CUR ((0x2) << FShft(GSCADR_BLEND_POS))
-#define GSCADR_GBASE_ADR Fld(23,0)
-#define Gscadr_Gbase_Adr(x) ((x) << FShft(GSCADR_GBASE_ADR))
-
-/* GSADR graphics stride address register fields */
-#define GSADR_SRCSTRIDE Fld(10,22)
-#define Gsadr_Srcstride(x) ((x) << FShft(GSADR_SRCSTRIDE))
-#define GSADR_XSTART Fld(11,11)
-#define Gsadr_Xstart(x) ((x) << FShft(GSADR_XSTART))
-#define GSADR_YSTART Fld(11,0)
-#define Gsadr_Ystart(y) ((y) << FShft(GSADR_YSTART))
-
-/* GPLUT graphics palette register fields */
-#define GPLUT_LUTADR Fld(8,24)
-#define Gplut_Lutadr(x) ((x) << FShft(GPLUT_LUTADR))
-#define GPLUT_LUTDATA Fld(24,0)
-#define Gplut_Lutdata(x) ((x) << FShft(GPLUT_LUTDATA))
-
-/* VSCTRL - Video Surface Control Register */
-#define VSCTRL_VPIXFMT Fld(4,27)
-#define VSCTRL_VPIXFMT_YUV12 ((0x9) << FShft(VSCTRL_VPIXFMT))
-#define VSCTRL_VPIXFMT_UY0VY1 ((0xc) << FShft(VSCTRL_VPIXFMT))
-#define VSCTRL_VPIXFMT_VY0UY1 ((0xd) << FShft(VSCTRL_VPIXFMT))
-#define VSCTRL_VPIXFMT_Y0UY1V ((0xe) << FShft(VSCTRL_VPIXFMT))
-#define VSCTRL_VPIXFMT_Y0VY1U ((0xf) << FShft(VSCTRL_VPIXFMT))
-#define VSCTRL_GAMMA_EN (1 << 26)
-#define VSCTRL_CSC_EN (1 << 25)
-#define VSCTRL_COSITED (1 << 22)
-#define VSCTRL_VSWIDTH Fld(11,11)
-#define Vsctrl_Width(Pixels) /* Video Width [1-2048] */ \
- (((Pixels) - 1) << FShft(VSCTRL_VSWIDTH))
-#define VSCTRL_VSHEIGHT Fld(11,0)
-#define Vsctrl_Height(Pixels) /* Video Height [1-2048] */ \
- (((Pixels) - 1) << FShft(VSCTRL_VSHEIGHT))
-
-/* VBBASE - Video Blending Base Register */
-#define VBBASE_GLALPHA Fld(8,24)
-#define Vbbase_Glalpha(x) ((x) << FShft(VBBASE_GLALPHA))
-
-#define VBBASE_COLKEY Fld(24,0)
-#define Vbbase_Colkey(x) ((x) << FShft(VBBASE_COLKEY))
-
-/* VCMSK - Video Color Key Mask Register */
-#define VCMSK_COLKEY_M Fld(24,0)
-#define Vcmsk_colkey_m(x) ((x) << FShft(VCMSK_COLKEY_M))
-
-/* VSCADR - Video Stream Control Rddress Register */
-#define VSCADR_STR_EN (1 << 31)
-#define VSCADR_COLKEY_EN (1 << 30)
-#define VSCADR_COLKEYSRC (1 << 29)
-#define VSCADR_BLEND_M Fld(2,27)
-#define VSCADR_BLEND_NONE ((0x0) << FShft(VSCADR_BLEND_M))
-#define VSCADR_BLEND_INV ((0x1) << FShft(VSCADR_BLEND_M))
-#define VSCADR_BLEND_GLOB ((0x2) << FShft(VSCADR_BLEND_M))
-#define VSCADR_BLEND_PIX ((0x3) << FShft(VSCADR_BLEND_M))
-#define VSCADR_BLEND_POS Fld(2,24)
-#define VSCADR_BLEND_GFX ((0x0) << FShft(VSCADR_BLEND_POS))
-#define VSCADR_BLEND_VID ((0x1) << FShft(VSCADR_BLEND_POS))
-#define VSCADR_BLEND_CUR ((0x2) << FShft(VSCADR_BLEND_POS))
-#define VSCADR_VBASE_ADR Fld(23,0)
-#define Vscadr_Vbase_Adr(x) ((x) << FShft(VSCADR_VBASE_ADR))
-
-/* VUBASE - Video U Base Register */
-#define VUBASE_UVHALFSTR (1 << 31)
-#define VUBASE_UBASE_ADR Fld(24,0)
-#define Vubase_Ubase_Adr(x) ((x) << FShft(VUBASE_UBASE_ADR))
-
-/* VVBASE - Video V Base Register */
-#define VVBASE_VBASE_ADR Fld(24,0)
-#define Vvbase_Vbase_Adr(x) ((x) << FShft(VVBASE_VBASE_ADR))
-
-/* VSADR - Video Stride Address Register */
-#define VSADR_SRCSTRIDE Fld(10,22)
-#define Vsadr_Srcstride(x) ((x) << FShft(VSADR_SRCSTRIDE))
-#define VSADR_XSTART Fld(11,11)
-#define Vsadr_Xstart(x) ((x) << FShft(VSADR_XSTART))
-#define VSADR_YSTART Fld(11,0)
-#define Vsadr_Ystart(x) ((x) << FShft(VSADR_YSTART))
-
-/* VSCTRL - Video Surface Control Register */
-#define VSCTRL_VPIXFMT Fld(4,27)
-#define VSCTRL_VPIXFMT_YUV12 ((0x9) << FShft(VSCTRL_VPIXFMT))
-#define VSCTRL_VPIXFMT_UY0VY1 ((0xc) << FShft(VSCTRL_VPIXFMT))
-#define VSCTRL_VPIXFMT_VY0UY1 ((0xd) << FShft(VSCTRL_VPIXFMT))
-#define VSCTRL_VPIXFMT_Y0UY1V ((0xe) << FShft(VSCTRL_VPIXFMT))
-#define VSCTRL_VPIXFMT_Y0VY1U ((0xf) << FShft(VSCTRL_VPIXFMT))
-#define VSCTRL_GAMMA_EN (1 << 26)
-#define VSCTRL_CSC_EN (1 << 25)
-#define VSCTRL_COSITED (1 << 22)
-#define VSCTRL_VSWIDTH Fld(11,11)
-#define Vsctrl_Width(Pixels) /* Video Width [1-2048] */ \
- (((Pixels) - 1) << FShft(VSCTRL_VSWIDTH))
-#define VSCTRL_VSHEIGHT Fld(11,0)
-#define Vsctrl_Height(Pixels) /* Video Height [1-2048] */ \
- (((Pixels) - 1) << FShft(VSCTRL_VSHEIGHT))
-
-/* VBBASE - Video Blending Base Register */
-#define VBBASE_GLALPHA Fld(8,24)
-#define Vbbase_Glalpha(x) ((x) << FShft(VBBASE_GLALPHA))
-
-#define VBBASE_COLKEY Fld(24,0)
-#define Vbbase_Colkey(x) ((x) << FShft(VBBASE_COLKEY))
-
-/* VCMSK - Video Color Key Mask Register */
-#define VCMSK_COLKEY_M Fld(24,0)
-#define Vcmsk_colkey_m(x) ((x) << FShft(VCMSK_COLKEY_M))
-
-/* VSCADR - Video Stream Control Rddress Register */
-#define VSCADR_STR_EN (1 << 31)
-#define VSCADR_COLKEY_EN (1 << 30)
-#define VSCADR_COLKEYSRC (1 << 29)
-#define VSCADR_BLEND_M Fld(2,27)
-#define VSCADR_BLEND_NONE ((0x0) << FShft(VSCADR_BLEND_M))
-#define VSCADR_BLEND_INV ((0x1) << FShft(VSCADR_BLEND_M))
-#define VSCADR_BLEND_GLOB ((0x2) << FShft(VSCADR_BLEND_M))
-#define VSCADR_BLEND_PIX ((0x3) << FShft(VSCADR_BLEND_M))
-#define VSCADR_BLEND_POS Fld(2,24)
-#define VSCADR_BLEND_GFX ((0x0) << FShft(VSCADR_BLEND_POS))
-#define VSCADR_BLEND_VID ((0x1) << FShft(VSCADR_BLEND_POS))
-#define VSCADR_BLEND_CUR ((0x2) << FShft(VSCADR_BLEND_POS))
-#define VSCADR_VBASE_ADR Fld(23,0)
-#define Vscadr_Vbase_Adr(x) ((x) << FShft(VSCADR_VBASE_ADR))
-
-/* VUBASE - Video U Base Register */
-#define VUBASE_UVHALFSTR (1 << 31)
-#define VUBASE_UBASE_ADR Fld(24,0)
-#define Vubase_Ubase_Adr(x) ((x) << FShft(VUBASE_UBASE_ADR))
-
-/* VVBASE - Video V Base Register */
-#define VVBASE_VBASE_ADR Fld(24,0)
-#define Vvbase_Vbase_Adr(x) ((x) << FShft(VVBASE_VBASE_ADR))
-
-/* VSADR - Video Stride Address Register */
-#define VSADR_SRCSTRIDE Fld(10,22)
-#define Vsadr_Srcstride(x) ((x) << FShft(VSADR_SRCSTRIDE))
-#define VSADR_XSTART Fld(11,11)
-#define Vsadr_Xstart(x) ((x) << FShft(VSADR_XSTART))
-#define VSADR_YSTART Fld(11,0)
-#define Vsadr_Ystart(x) ((x) << FShft(VSADR_YSTART))
-
-/* HCCTRL - Hardware Cursor Register fields */
-#define HCCTRL_CUR_EN (1 << 31)
-#define HCCTRL_COLKEY_EN (1 << 29)
-#define HCCTRL_COLKEYSRC (1 << 28)
-#define HCCTRL_BLEND_M Fld(2,26)
-#define HCCTRL_BLEND_NONE ((0x0) << FShft(HCCTRL_BLEND_M))
-#define HCCTRL_BLEND_INV ((0x1) << FShft(HCCTRL_BLEND_M))
-#define HCCTRL_BLEND_GLOB ((0x2) << FShft(HCCTRL_BLEND_M))
-#define HCCTRL_BLEND_PIX ((0x3) << FShft(HCCTRL_BLEND_M))
-#define HCCTRL_CPIXFMT Fld(3,23)
-#define HCCTRL_CPIXFMT_RGB332 ((0x3) << FShft(HCCTRL_CPIXFMT))
-#define HCCTRL_CPIXFMT_ARGB4444 ((0x4) << FShft(HCCTRL_CPIXFMT))
-#define HCCTRL_CPIXFMT_ARGB1555 ((0x5) << FShft(HCCTRL_CPIXFMT))
-#define HCCTRL_CBASE_ADR Fld(23,0)
-#define Hcctrl_Cbase_Adr(x) ((x) << FShft(HCCTRL_CBASE_ADR))
-
-/* HCSIZE Hardware Cursor Size Register fields */
-#define HCSIZE_BLEND_POS Fld(2,29)
-#define HCSIZE_BLEND_GFX ((0x0) << FShft(HCSIZE_BLEND_POS))
-#define HCSIZE_BLEND_VID ((0x1) << FShft(HCSIZE_BLEND_POS))
-#define HCSIZE_BLEND_CUR ((0x2) << FShft(HCSIZE_BLEND_POS))
-#define HCSIZE_CWIDTH Fld(3,16)
-#define Hcsize_Cwidth(x) ((x) << FShft(HCSIZE_CWIDTH))
-#define HCSIZE_CHEIGHT Fld(3,0)
-#define Hcsize_Cheight(x) ((x) << FShft(HCSIZE_CHEIGHT))
-
-/* HCPOS Hardware Cursor Position Register fields */
-#define HCPOS_SWITCHSRC (1 << 30)
-#define HCPOS_CURBLINK Fld(6,24)
-#define Hcpos_Curblink(x) ((x) << FShft(HCPOS_CURBLINK))
-#define HCPOS_XSTART Fld(12,12)
-#define Hcpos_Xstart(x) ((x) << FShft(HCPOS_XSTART))
-#define HCPOS_YSTART Fld(12,0)
-#define Hcpos_Ystart(y) ((y) << FShft(HCPOS_YSTART))
-
-/* HCBADR Hardware Cursor Blend Address Register */
-#define HCBADR_GLALPHA Fld(8,24)
-#define Hcbadr_Glalpha(x) ((x) << FShft(HCBADR_GLALPHA))
-#define HCBADR_COLKEY Fld(24,0)
-#define Hcbadr_Colkey(x) ((x) << FShft(HCBADR_COLKEY))
-
-/* HCCKMSK - Hardware Cursor Color Key Mask Register */
-#define HCCKMSK_COLKEY_M Fld(24,0)
-#define Hcckmsk_Colkey_M(x) ((x) << FShft(HCCKMSK_COLKEY_M))
-
-/* DSCTRL - Display sync control register */
-#define DSCTRL_SYNCGEN_EN (1 << 31)
-#define DSCTRL_DPL_RST (1 << 29)
-#define DSCTRL_PWRDN_M (1 << 28)
-#define DSCTRL_UPDSYNCCNT (1 << 26)
-#define DSCTRL_UPDINTCNT (1 << 25)
-#define DSCTRL_UPDCNT (1 << 24)
-#define DSCTRL_UPDWAIT Fld(4,16)
-#define Dsctrl_Updwait(x) ((x) << FShft(DSCTRL_UPDWAIT))
-#define DSCTRL_CLKPOL (1 << 11)
-#define DSCTRL_CSYNC_EN (1 << 10)
-#define DSCTRL_VS_SLAVE (1 << 7)
-#define DSCTRL_HS_SLAVE (1 << 6)
-#define DSCTRL_BLNK_POL (1 << 5)
-#define DSCTRL_BLNK_DIS (1 << 4)
-#define DSCTRL_VS_POL (1 << 3)
-#define DSCTRL_VS_DIS (1 << 2)
-#define DSCTRL_HS_POL (1 << 1)
-#define DSCTRL_HS_DIS (1 << 0)
-
-/* DHT01 - Display horizontal timing register 01 */
-#define DHT01_HBPS Fld(12,16)
-#define Dht01_Hbps(x) ((x) << FShft(DHT01_HBPS))
-#define DHT01_HT Fld(12,0)
-#define Dht01_Ht(x) ((x) << FShft(DHT01_HT))
-
-/* DHT02 - Display horizontal timing register 02 */
-#define DHT02_HAS Fld(12,16)
-#define Dht02_Has(x) ((x) << FShft(DHT02_HAS))
-#define DHT02_HLBS Fld(12,0)
-#define Dht02_Hlbs(x) ((x) << FShft(DHT02_HLBS))
-
-/* DHT03 - Display horizontal timing register 03 */
-#define DHT03_HFPS Fld(12,16)
-#define Dht03_Hfps(x) ((x) << FShft(DHT03_HFPS))
-#define DHT03_HRBS Fld(12,0)
-#define Dht03_Hrbs(x) ((x) << FShft(DHT03_HRBS))
-
-/* DVT01 - Display vertical timing register 01 */
-#define DVT01_VBPS Fld(12,16)
-#define Dvt01_Vbps(x) ((x) << FShft(DVT01_VBPS))
-#define DVT01_VT Fld(12,0)
-#define Dvt01_Vt(x) ((x) << FShft(DVT01_VT))
-
-/* DVT02 - Display vertical timing register 02 */
-#define DVT02_VAS Fld(12,16)
-#define Dvt02_Vas(x) ((x) << FShft(DVT02_VAS))
-#define DVT02_VTBS Fld(12,0)
-#define Dvt02_Vtbs(x) ((x) << FShft(DVT02_VTBS))
-
-/* DVT03 - Display vertical timing register 03 */
-#define DVT03_VFPS Fld(12,16)
-#define Dvt03_Vfps(x) ((x) << FShft(DVT03_VFPS))
-#define DVT03_VBBS Fld(12,0)
-#define Dvt03_Vbbs(x) ((x) << FShft(DVT03_VBBS))
-
-/* DVECTRL - display vertical event control register */
-#define DVECTRL_VEVENT Fld(12,16)
-#define Dvectrl_Vevent(x) ((x) << FShft(DVECTRL_VEVENT))
-#define DVECTRL_VFETCH Fld(12,0)
-#define Dvectrl_Vfetch(x) ((x) << FShft(DVECTRL_VFETCH))
-
-/* DHDET - display horizontal DE timing register */
-#define DHDET_HDES Fld(12,16)
-#define Dhdet_Hdes(x) ((x) << FShft(DHDET_HDES))
-#define DHDET_HDEF Fld(12,0)
-#define Dhdet_Hdef(x) ((x) << FShft(DHDET_HDEF))
-
-/* DVDET - display vertical DE timing register */
-#define DVDET_VDES Fld(12,16)
-#define Dvdet_Vdes(x) ((x) << FShft(DVDET_VDES))
-#define DVDET_VDEF Fld(12,0)
-#define Dvdet_Vdef(x) ((x) << FShft(DVDET_VDEF))
-
-/* DODMSK - display output data mask register */
-#define DODMSK_MASK_LVL (1 << 31)
-#define DODMSK_BLNK_LVL (1 << 30)
-#define DODMSK_MASK_B Fld(8,16)
-#define Dodmsk_Mask_B(x) ((x) << FShft(DODMSK_MASK_B))
-#define DODMSK_MASK_G Fld(8,8)
-#define Dodmsk_Mask_G(x) ((x) << FShft(DODMSK_MASK_G))
-#define DODMSK_MASK_R Fld(8,0)
-#define Dodmsk_Mask_R(x) ((x) << FShft(DODMSK_MASK_R))
-
-/* DBCOL - display border color control register */
-#define DBCOL_BORDCOL Fld(24,0)
-#define Dbcol_Bordcol(x) ((x) << FShft(DBCOL_BORDCOL))
-
-/* DVLNUM - display vertical line number register */
-#define DVLNUM_VLINE Fld(12,0)
-#define Dvlnum_Vline(x) ((x) << FShft(DVLNUM_VLINE))
-
-/* DMCTRL - Display Memory Control Register */
-#define DMCTRL_MEM_REF Fld(2,30)
-#define DMCTRL_MEM_REF_ACT ((0x0) << FShft(DMCTRL_MEM_REF))
-#define DMCTRL_MEM_REF_HB ((0x1) << FShft(DMCTRL_MEM_REF))
-#define DMCTRL_MEM_REF_VB ((0x2) << FShft(DMCTRL_MEM_REF))
-#define DMCTRL_MEM_REF_BOTH ((0x3) << FShft(DMCTRL_MEM_REF))
-#define DMCTRL_UV_THRHLD Fld(6,24)
-#define Dmctrl_Uv_Thrhld(x) ((x) << FShft(DMCTRL_UV_THRHLD))
-#define DMCTRL_V_THRHLD Fld(7,16)
-#define Dmctrl_V_Thrhld(x) ((x) << FShft(DMCTRL_V_THRHLD))
-#define DMCTRL_D_THRHLD Fld(7,8)
-#define Dmctrl_D_Thrhld(x) ((x) << FShft(DMCTRL_D_THRHLD))
-#define DMCTRL_BURSTLEN Fld(6,0)
-#define Dmctrl_Burstlen(x) ((x) << FShft(DMCTRL_BURSTLEN))
-
-/* DINTRS - Display Interrupt Status Register */
-#define DINTRS_CUR_OR_S (1 << 18)
-#define DINTRS_STR2_OR_S (1 << 17)
-#define DINTRS_STR1_OR_S (1 << 16)
-#define DINTRS_CUR_UR_S (1 << 6)
-#define DINTRS_STR2_UR_S (1 << 5)
-#define DINTRS_STR1_UR_S (1 << 4)
-#define DINTRS_VEVENT1_S (1 << 3)
-#define DINTRS_VEVENT0_S (1 << 2)
-#define DINTRS_HBLNK1_S (1 << 1)
-#define DINTRS_HBLNK0_S (1 << 0)
-
-/* DINTRE - Display Interrupt Enable Register */
-#define DINTRE_CUR_OR_EN (1 << 18)
-#define DINTRE_STR2_OR_EN (1 << 17)
-#define DINTRE_STR1_OR_EN (1 << 16)
-#define DINTRE_CUR_UR_EN (1 << 6)
-#define DINTRE_STR2_UR_EN (1 << 5)
-#define DINTRE_STR1_UR_EN (1 << 4)
-#define DINTRE_VEVENT1_EN (1 << 3)
-#define DINTRE_VEVENT0_EN (1 << 2)
-#define DINTRE_HBLNK1_EN (1 << 1)
-#define DINTRE_HBLNK0_EN (1 << 0)
-
-/* DINTRS - Display Interrupt Status Register */
-#define DINTRS_CUR_OR_S (1 << 18)
-#define DINTRS_STR2_OR_S (1 << 17)
-#define DINTRS_STR1_OR_S (1 << 16)
-#define DINTRS_CUR_UR_S (1 << 6)
-#define DINTRS_STR2_UR_S (1 << 5)
-#define DINTRS_STR1_UR_S (1 << 4)
-#define DINTRS_VEVENT1_S (1 << 3)
-#define DINTRS_VEVENT0_S (1 << 2)
-#define DINTRS_HBLNK1_S (1 << 1)
-#define DINTRS_HBLNK0_S (1 << 0)
-
-/* DINTRE - Display Interrupt Enable Register */
-#define DINTRE_CUR_OR_EN (1 << 18)
-#define DINTRE_STR2_OR_EN (1 << 17)
-#define DINTRE_STR1_OR_EN (1 << 16)
-#define DINTRE_CUR_UR_EN (1 << 6)
-#define DINTRE_STR2_UR_EN (1 << 5)
-#define DINTRE_STR1_UR_EN (1 << 4)
-#define DINTRE_VEVENT1_EN (1 << 3)
-#define DINTRE_VEVENT0_EN (1 << 2)
-#define DINTRE_HBLNK1_EN (1 << 1)
-#define DINTRE_HBLNK0_EN (1 << 0)
-
-
-/* DLSTS - display load status register */
-#define DLSTS_RLD_ADONE (1 << 23)
-/* #define DLSTS_RLD_ADOUT Fld(23,0) */
-
-/* DLLCTRL - display list load control register */
-#define DLLCTRL_RLD_ADRLN Fld(8,24)
-#define Dllctrl_Rld_Adrln(x) ((x) << FShft(DLLCTRL_RLD_ADRLN))
-
-/* CLIPCTRL - Clipping Control Register */
-#define CLIPCTRL_HSKIP Fld(11,16)
-#define Clipctrl_Hskip ((x) << FShft(CLIPCTRL_HSKIP))
-#define CLIPCTRL_VSKIP Fld(11,0)
-#define Clipctrl_Vskip ((x) << FShft(CLIPCTRL_VSKIP))
-
-/* SPOCTRL - Scale Pitch/Order Control Register */
-#define SPOCTRL_H_SC_BP (1 << 31)
-#define SPOCTRL_V_SC_BP (1 << 30)
-#define SPOCTRL_HV_SC_OR (1 << 29)
-#define SPOCTRL_VS_UR_C (1 << 27)
-#define SPOCTRL_VORDER Fld(2,16)
-#define SPOCTRL_VORDER_1TAP ((0x0) << FShft(SPOCTRL_VORDER))
-#define SPOCTRL_VORDER_2TAP ((0x1) << FShft(SPOCTRL_VORDER))
-#define SPOCTRL_VORDER_4TAP ((0x3) << FShft(SPOCTRL_VORDER))
-#define SPOCTRL_VPITCH Fld(16,0)
-#define Spoctrl_Vpitch(x) ((x) << FShft(SPOCTRL_VPITCH))
-
-/* SVCTRL - Scale Vertical Control Register */
-#define SVCTRL_INITIAL1 Fld(16,16)
-#define Svctrl_Initial1(x) ((x) << FShft(SVCTRL_INITIAL1))
-#define SVCTRL_INITIAL2 Fld(16,0)
-#define Svctrl_Initial2(x) ((x) << FShft(SVCTRL_INITIAL2))
-
-/* SHCTRL - Scale Horizontal Control Register */
-#define SHCTRL_HINITIAL Fld(16,16)
-#define Shctrl_Hinitial(x) ((x) << FShft(SHCTRL_HINITIAL))
-#define SHCTRL_HDECIM (1 << 15)
-#define SHCTRL_HPITCH Fld(15,0)
-#define Shctrl_Hpitch(x) ((x) << FShft(SHCTRL_HPITCH))
-
-/* SSSIZE - Scale Surface Size Register */
-#define SSSIZE_SC_WIDTH Fld(11,16)
-#define Sssize_Sc_Width(x) ((x) << FShft(SSSIZE_SC_WIDTH))
-#define SSSIZE_SC_HEIGHT Fld(11,0)
-#define Sssize_Sc_Height(x) ((x) << FShft(SSSIZE_SC_HEIGHT))
-
-#endif /* __REG_BITS_2700G_ */
diff --git a/drivers/video/fbdev/mbx/regs.h b/drivers/video/fbdev/mbx/regs.h
deleted file mode 100644
index 591fc9d26084..000000000000
--- a/drivers/video/fbdev/mbx/regs.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __REGS_2700G_
-#define __REGS_2700G_
-
-/* extern unsigned long virt_base_2700; */
-/* #define __REG_2700G(x) (*(volatile unsigned long*)((x)+virt_base_2700)) */
-#define __REG_2700G(x) ((x)+virt_base_2700)
-
-/* System Configuration Registers (0x0000_0000 0x0000_0010) */
-#define SYSCFG __REG_2700G(0x00000000)
-#define PFBASE __REG_2700G(0x00000004)
-#define PFCEIL __REG_2700G(0x00000008)
-#define POLLFLAG __REG_2700G(0x0000000c)
-#define SYSRST __REG_2700G(0x00000010)
-
-/* Interrupt Control Registers (0x0000_0014 0x0000_002F) */
-#define NINTPW __REG_2700G(0x00000014)
-#define MINTENABLE __REG_2700G(0x00000018)
-#define MINTSTAT __REG_2700G(0x0000001c)
-#define SINTENABLE __REG_2700G(0x00000020)
-#define SINTSTAT __REG_2700G(0x00000024)
-#define SINTCLR __REG_2700G(0x00000028)
-
-/* Clock Control Registers (0x0000_002C 0x0000_005F) */
-#define SYSCLKSRC __REG_2700G(0x0000002c)
-#define PIXCLKSRC __REG_2700G(0x00000030)
-#define CLKSLEEP __REG_2700G(0x00000034)
-#define COREPLL __REG_2700G(0x00000038)
-#define DISPPLL __REG_2700G(0x0000003c)
-#define PLLSTAT __REG_2700G(0x00000040)
-#define VOVRCLK __REG_2700G(0x00000044)
-#define PIXCLK __REG_2700G(0x00000048)
-#define MEMCLK __REG_2700G(0x0000004c)
-#define M24CLK __REG_2700G(0x00000050)
-#define MBXCLK __REG_2700G(0x00000054)
-#define SDCLK __REG_2700G(0x00000058)
-#define PIXCLKDIV __REG_2700G(0x0000005c)
-
-/* LCD Port Control Register (0x0000_0060 0x0000_006F) */
-#define LCD_CONFIG __REG_2700G(0x00000060)
-
-/* On-Die Frame Buffer Registers (0x0000_0064 0x0000_006B) */
-#define ODFBPWR __REG_2700G(0x00000064)
-#define ODFBSTAT __REG_2700G(0x00000068)
-
-/* GPIO Registers (0x0000_006C 0x0000_007F) */
-#define GPIOCGF __REG_2700G(0x0000006c)
-#define GPIOHI __REG_2700G(0x00000070)
-#define GPIOLO __REG_2700G(0x00000074)
-#define GPIOSTAT __REG_2700G(0x00000078)
-
-/* Pulse Width Modulator (PWM) Registers (0x0000_0200 0x0000_02FF) */
-#define PWMRST __REG_2700G(0x00000200)
-#define PWMCFG __REG_2700G(0x00000204)
-#define PWM0DIV __REG_2700G(0x00000210)
-#define PWM0DUTY __REG_2700G(0x00000214)
-#define PWM0PER __REG_2700G(0x00000218)
-#define PWM1DIV __REG_2700G(0x00000220)
-#define PWM1DUTY __REG_2700G(0x00000224)
-#define PWM1PER __REG_2700G(0x00000228)
-
-/* Identification (ID) Registers (0x0000_0300 0x0000_0FFF) */
-#define ID __REG_2700G(0x00000FF0)
-
-/* Local Memory (SDRAM) Interface Registers (0x0000_1000 0x0000_1FFF) */
-#define LMRST __REG_2700G(0x00001000)
-#define LMCFG __REG_2700G(0x00001004)
-#define LMPWR __REG_2700G(0x00001008)
-#define LMPWRSTAT __REG_2700G(0x0000100c)
-#define LMCEMR __REG_2700G(0x00001010)
-#define LMTYPE __REG_2700G(0x00001014)
-#define LMTIM __REG_2700G(0x00001018)
-#define LMREFRESH __REG_2700G(0x0000101c)
-#define LMPROTMIN __REG_2700G(0x00001020)
-#define LMPROTMAX __REG_2700G(0x00001024)
-#define LMPROTCFG __REG_2700G(0x00001028)
-#define LMPROTERR __REG_2700G(0x0000102c)
-
-/* Plane Controller Registers (0x0000_2000 0x0000_2FFF) */
-#define GSCTRL __REG_2700G(0x00002000)
-#define VSCTRL __REG_2700G(0x00002004)
-#define GBBASE __REG_2700G(0x00002020)
-#define VBBASE __REG_2700G(0x00002024)
-#define GDRCTRL __REG_2700G(0x00002040)
-#define VCMSK __REG_2700G(0x00002044)
-#define GSCADR __REG_2700G(0x00002060)
-#define VSCADR __REG_2700G(0x00002064)
-#define VUBASE __REG_2700G(0x00002084)
-#define VVBASE __REG_2700G(0x000020a4)
-#define GSADR __REG_2700G(0x000020c0)
-#define VSADR __REG_2700G(0x000020c4)
-#define HCCTRL __REG_2700G(0x00002100)
-#define HCSIZE __REG_2700G(0x00002110)
-#define HCPOS __REG_2700G(0x00002120)
-#define HCBADR __REG_2700G(0x00002130)
-#define HCCKMSK __REG_2700G(0x00002140)
-#define GPLUT __REG_2700G(0x00002150)
-#define DSCTRL __REG_2700G(0x00002154)
-#define DHT01 __REG_2700G(0x00002158)
-#define DHT02 __REG_2700G(0x0000215c)
-#define DHT03 __REG_2700G(0x00002160)
-#define DVT01 __REG_2700G(0x00002164)
-#define DVT02 __REG_2700G(0x00002168)
-#define DVT03 __REG_2700G(0x0000216c)
-#define DBCOL __REG_2700G(0x00002170)
-#define BGCOLOR __REG_2700G(0x00002174)
-#define DINTRS __REG_2700G(0x00002178)
-#define DINTRE __REG_2700G(0x0000217c)
-#define DINTRCNT __REG_2700G(0x00002180)
-#define DSIG __REG_2700G(0x00002184)
-#define DMCTRL __REG_2700G(0x00002188)
-#define CLIPCTRL __REG_2700G(0x0000218c)
-#define SPOCTRL __REG_2700G(0x00002190)
-#define SVCTRL __REG_2700G(0x00002194)
-
-/* 0x0000_2198 */
-/* 0x0000_21A8 VSCOEFF[0:4] Video Scalar Vertical Coefficient [0:4] 4.14.5 */
-#define VSCOEFF0 __REG_2700G(0x00002198)
-#define VSCOEFF1 __REG_2700G(0x0000219c)
-#define VSCOEFF2 __REG_2700G(0x000021a0)
-#define VSCOEFF3 __REG_2700G(0x000021a4)
-#define VSCOEFF4 __REG_2700G(0x000021a8)
-
-#define SHCTRL __REG_2700G(0x000021b0)
-
-/* 0x0000_21B4 */
-/* 0x0000_21D4 HSCOEFF[0:8] Video Scalar Horizontal Coefficient [0:8] 4.14.7 */
-#define HSCOEFF0 __REG_2700G(0x000021b4)
-#define HSCOEFF1 __REG_2700G(0x000021b8)
-#define HSCOEFF2 __REG_2700G(0x000021bc)
-#define HSCOEFF3 __REG_2700G(0x000021c0)
-#define HSCOEFF4 __REG_2700G(0x000021c4)
-#define HSCOEFF5 __REG_2700G(0x000021c8)
-#define HSCOEFF6 __REG_2700G(0x000021cc)
-#define HSCOEFF7 __REG_2700G(0x000021d0)
-#define HSCOEFF8 __REG_2700G(0x000021d4)
-
-#define SSSIZE __REG_2700G(0x000021D8)
-
-/* 0x0000_2200 */
-/* 0x0000_2240 VIDGAM[0:16] Video Gamma LUT Index [0:16] 4.15.2 */
-#define VIDGAM0 __REG_2700G(0x00002200)
-#define VIDGAM1 __REG_2700G(0x00002204)
-#define VIDGAM2 __REG_2700G(0x00002208)
-#define VIDGAM3 __REG_2700G(0x0000220c)
-#define VIDGAM4 __REG_2700G(0x00002210)
-#define VIDGAM5 __REG_2700G(0x00002214)
-#define VIDGAM6 __REG_2700G(0x00002218)
-#define VIDGAM7 __REG_2700G(0x0000221c)
-#define VIDGAM8 __REG_2700G(0x00002220)
-#define VIDGAM9 __REG_2700G(0x00002224)
-#define VIDGAM10 __REG_2700G(0x00002228)
-#define VIDGAM11 __REG_2700G(0x0000222c)
-#define VIDGAM12 __REG_2700G(0x00002230)
-#define VIDGAM13 __REG_2700G(0x00002234)
-#define VIDGAM14 __REG_2700G(0x00002238)
-#define VIDGAM15 __REG_2700G(0x0000223c)
-#define VIDGAM16 __REG_2700G(0x00002240)
-
-/* 0x0000_2250 */
-/* 0x0000_2290 GFXGAM[0:16] Graphics Gamma LUT Index [0:16] 4.15.3 */
-#define GFXGAM0 __REG_2700G(0x00002250)
-#define GFXGAM1 __REG_2700G(0x00002254)
-#define GFXGAM2 __REG_2700G(0x00002258)
-#define GFXGAM3 __REG_2700G(0x0000225c)
-#define GFXGAM4 __REG_2700G(0x00002260)
-#define GFXGAM5 __REG_2700G(0x00002264)
-#define GFXGAM6 __REG_2700G(0x00002268)
-#define GFXGAM7 __REG_2700G(0x0000226c)
-#define GFXGAM8 __REG_2700G(0x00002270)
-#define GFXGAM9 __REG_2700G(0x00002274)
-#define GFXGAM10 __REG_2700G(0x00002278)
-#define GFXGAM11 __REG_2700G(0x0000227c)
-#define GFXGAM12 __REG_2700G(0x00002280)
-#define GFXGAM13 __REG_2700G(0x00002284)
-#define GFXGAM14 __REG_2700G(0x00002288)
-#define GFXGAM15 __REG_2700G(0x0000228c)
-#define GFXGAM16 __REG_2700G(0x00002290)
-
-#define DLSTS __REG_2700G(0x00002300)
-#define DLLCTRL __REG_2700G(0x00002304)
-#define DVLNUM __REG_2700G(0x00002308)
-#define DUCTRL __REG_2700G(0x0000230c)
-#define DVECTRL __REG_2700G(0x00002310)
-#define DHDET __REG_2700G(0x00002314)
-#define DVDET __REG_2700G(0x00002318)
-#define DODMSK __REG_2700G(0x0000231c)
-#define CSC01 __REG_2700G(0x00002330)
-#define CSC02 __REG_2700G(0x00002334)
-#define CSC03 __REG_2700G(0x00002338)
-#define CSC04 __REG_2700G(0x0000233c)
-#define CSC05 __REG_2700G(0x00002340)
-
-#define FB_MEMORY_START __REG_2700G(0x00060000)
-
-#endif /* __REGS_2700G_ */
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index c6820e21875d..a372a183c1f0 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -1037,10 +1037,9 @@ static struct fb_ops nvidia_fb_ops = {
.fb_sync = nvidiafb_sync,
};
-#ifdef CONFIG_PM
-static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg)
+static int nvidiafb_suspend_late(struct device *dev, pm_message_t mesg)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct nvidia_par *par = info->par;
if (mesg.event == PM_EVENT_PRETHAW)
@@ -1052,46 +1051,54 @@ static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg)
fb_set_suspend(info, 1);
nvidiafb_blank(FB_BLANK_POWERDOWN, info);
nvidia_write_regs(par, &par->SavedReg);
- pci_save_state(dev);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, mesg));
}
- dev->dev.power.power_state = mesg;
+ dev->power.power_state = mesg;
console_unlock();
return 0;
}
-static int nvidiafb_resume(struct pci_dev *dev)
+static int __maybe_unused nvidiafb_suspend(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
- struct nvidia_par *par = info->par;
+ return nvidiafb_suspend_late(dev, PMSG_SUSPEND);
+}
- console_lock();
- pci_set_power_state(dev, PCI_D0);
+static int __maybe_unused nvidiafb_hibernate(struct device *dev)
+{
+ return nvidiafb_suspend_late(dev, PMSG_HIBERNATE);
+}
- if (par->pm_state != PM_EVENT_FREEZE) {
- pci_restore_state(dev);
+static int __maybe_unused nvidiafb_freeze(struct device *dev)
+{
+ return nvidiafb_suspend_late(dev, PMSG_FREEZE);
+}
- if (pci_enable_device(dev))
- goto fail;
+static int __maybe_unused nvidiafb_resume(struct device *dev)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct nvidia_par *par = info->par;
- pci_set_master(dev);
- }
+ console_lock();
par->pm_state = PM_EVENT_ON;
nvidiafb_set_par(info);
fb_set_suspend (info, 0);
nvidiafb_blank(FB_BLANK_UNBLANK, info);
-fail:
console_unlock();
return 0;
}
-#else
-#define nvidiafb_suspend NULL
-#define nvidiafb_resume NULL
-#endif
+
+static const struct dev_pm_ops nvidiafb_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = nvidiafb_suspend,
+ .resume = nvidiafb_resume,
+ .freeze = nvidiafb_freeze,
+ .thaw = nvidiafb_resume,
+ .poweroff = nvidiafb_hibernate,
+ .restore = nvidiafb_resume,
+#endif /* CONFIG_PM_SLEEP */
+};
static int nvidia_set_fbinfo(struct fb_info *info)
{
@@ -1492,12 +1499,11 @@ static int nvidiafb_setup(char *options)
#endif /* !MODULE */
static struct pci_driver nvidiafb_driver = {
- .name = "nvidiafb",
- .id_table = nvidiafb_pci_tbl,
- .probe = nvidiafb_probe,
- .suspend = nvidiafb_suspend,
- .resume = nvidiafb_resume,
- .remove = nvidiafb_remove,
+ .name = "nvidiafb",
+ .id_table = nvidiafb_pci_tbl,
+ .probe = nvidiafb_probe,
+ .driver.pm = &nvidiafb_pm_ops,
+ .remove = nvidiafb_remove,
};
/* ------------------------------------------------------------------------- *
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 22f1d37a968a..496b43bdad21 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -19,7 +19,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <video/omapfb_dss.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index a06b6f1355bd..e3d441ade241 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -24,7 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <video/omapfb_dss.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index 0b0ad20afd63..f560fa4d7786 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -787,7 +787,7 @@ static int venc_probe_of(struct platform_device *pdev)
venc.type = OMAP_DSS_VENC_TYPE_SVIDEO;
break;
default:
- dev_err(&pdev->dev, "bad channel propert '%d'\n", channels);
+ dev_err(&pdev->dev, "bad channel property '%d'\n", channels);
r = -EINVAL;
goto err;
}
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 2d9f69b93392..f4add36cb5f4 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -1028,6 +1028,8 @@ static int __init pvr2fb_setup(char *options)
if (!options || !*options)
return 0;
+ cable_arg[0] = output_arg[0] = 0;
+
while ((this_opt = strsep(&options, ","))) {
if (!*this_opt)
continue;
diff --git a/drivers/video/fbdev/s3c2410fb-regs-lcd.h b/drivers/video/fbdev/s3c2410fb-regs-lcd.h
new file mode 100644
index 000000000000..1e46f7a788e5
--- /dev/null
+++ b/drivers/video/fbdev/s3c2410fb-regs-lcd.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
+ * http://www.simtec.co.uk/products/SWLINUX/
+ */
+
+#ifndef ___ASM_ARCH_REGS_LCD_H
+#define ___ASM_ARCH_REGS_LCD_H
+
+/*
+ * a couple of values are used as platform data in
+ * include/linux/platform_data/fb-s3c2410.h and not
+ * duplicated here.
+ */
+#include <linux/platform_data/fb-s3c2410.h>
+
+#define S3C2410_LCDREG(x) (x)
+
+/* LCD control registers */
+#define S3C2410_LCDCON1 S3C2410_LCDREG(0x00)
+#define S3C2410_LCDCON2 S3C2410_LCDREG(0x04)
+#define S3C2410_LCDCON3 S3C2410_LCDREG(0x08)
+#define S3C2410_LCDCON4 S3C2410_LCDREG(0x0C)
+#define S3C2410_LCDCON5 S3C2410_LCDREG(0x10)
+
+#define S3C2410_LCDCON1_CLKVAL(x) ((x) << 8)
+#define S3C2410_LCDCON1_MMODE (1<<7)
+#define S3C2410_LCDCON1_DSCAN4 (0<<5)
+#define S3C2410_LCDCON1_STN4 (1<<5)
+#define S3C2410_LCDCON1_STN8 (2<<5)
+#define S3C2410_LCDCON1_TFT (3<<5)
+
+#define S3C2410_LCDCON1_STN1BPP (0<<1)
+#define S3C2410_LCDCON1_STN2GREY (1<<1)
+#define S3C2410_LCDCON1_STN4GREY (2<<1)
+#define S3C2410_LCDCON1_STN8BPP (3<<1)
+#define S3C2410_LCDCON1_STN12BPP (4<<1)
+
+#define S3C2410_LCDCON1_ENVID (1)
+
+#define S3C2410_LCDCON1_MODEMASK 0x1E
+
+#define S3C2410_LCDCON2_VBPD(x) ((x) << 24)
+#define S3C2410_LCDCON2_LINEVAL(x) ((x) << 14)
+#define S3C2410_LCDCON2_VFPD(x) ((x) << 6)
+#define S3C2410_LCDCON2_VSPW(x) ((x) << 0)
+
+#define S3C2410_LCDCON2_GET_VBPD(x) ( ((x) >> 24) & 0xFF)
+#define S3C2410_LCDCON2_GET_VFPD(x) ( ((x) >> 6) & 0xFF)
+#define S3C2410_LCDCON2_GET_VSPW(x) ( ((x) >> 0) & 0x3F)
+
+#define S3C2410_LCDCON3_HBPD(x) ((x) << 19)
+#define S3C2410_LCDCON3_WDLY(x) ((x) << 19)
+#define S3C2410_LCDCON3_HOZVAL(x) ((x) << 8)
+#define S3C2410_LCDCON3_HFPD(x) ((x) << 0)
+#define S3C2410_LCDCON3_LINEBLANK(x)((x) << 0)
+
+#define S3C2410_LCDCON3_GET_HBPD(x) ( ((x) >> 19) & 0x7F)
+#define S3C2410_LCDCON3_GET_HFPD(x) ( ((x) >> 0) & 0xFF)
+
+/* LDCCON4 changes for STN mode on the S3C2412 */
+
+#define S3C2410_LCDCON4_MVAL(x) ((x) << 8)
+#define S3C2410_LCDCON4_HSPW(x) ((x) << 0)
+#define S3C2410_LCDCON4_WLH(x) ((x) << 0)
+
+#define S3C2410_LCDCON4_GET_HSPW(x) ( ((x) >> 0) & 0xFF)
+
+/* framebuffer start addressed */
+#define S3C2410_LCDSADDR1 S3C2410_LCDREG(0x14)
+#define S3C2410_LCDSADDR2 S3C2410_LCDREG(0x18)
+#define S3C2410_LCDSADDR3 S3C2410_LCDREG(0x1C)
+
+#define S3C2410_LCDBANK(x) ((x) << 21)
+#define S3C2410_LCDBASEU(x) (x)
+
+#define S3C2410_OFFSIZE(x) ((x) << 11)
+#define S3C2410_PAGEWIDTH(x) (x)
+
+/* colour lookup and miscellaneous controls */
+
+#define S3C2410_REDLUT S3C2410_LCDREG(0x20)
+#define S3C2410_GREENLUT S3C2410_LCDREG(0x24)
+#define S3C2410_BLUELUT S3C2410_LCDREG(0x28)
+
+#define S3C2410_DITHMODE S3C2410_LCDREG(0x4C)
+#define S3C2410_TPAL S3C2410_LCDREG(0x50)
+
+#define S3C2410_TPAL_EN (1<<24)
+
+/* interrupt info */
+#define S3C2410_LCDINTPND S3C2410_LCDREG(0x54)
+#define S3C2410_LCDSRCPND S3C2410_LCDREG(0x58)
+#define S3C2410_LCDINTMSK S3C2410_LCDREG(0x5C)
+#define S3C2410_LCDINT_FIWSEL (1<<2)
+#define S3C2410_LCDINT_FRSYNC (1<<1)
+#define S3C2410_LCDINT_FICNT (1<<0)
+
+/* s3c2442 extra stn registers */
+
+#define S3C2442_REDLUT S3C2410_LCDREG(0x20)
+#define S3C2442_GREENLUT S3C2410_LCDREG(0x24)
+#define S3C2442_BLUELUT S3C2410_LCDREG(0x28)
+#define S3C2442_DITHMODE S3C2410_LCDREG(0x20)
+
+#define S3C2410_LPCSEL S3C2410_LCDREG(0x60)
+
+#define S3C2410_TFTPAL(x) S3C2410_LCDREG((0x400 + (x)*4))
+
+/* S3C2412 registers */
+
+#define S3C2412_TPAL S3C2410_LCDREG(0x20)
+
+#define S3C2412_LCDINTPND S3C2410_LCDREG(0x24)
+#define S3C2412_LCDSRCPND S3C2410_LCDREG(0x28)
+#define S3C2412_LCDINTMSK S3C2410_LCDREG(0x2C)
+
+#define S3C2412_TCONSEL S3C2410_LCDREG(0x30)
+
+#define S3C2412_LCDCON6 S3C2410_LCDREG(0x34)
+#define S3C2412_LCDCON7 S3C2410_LCDREG(0x38)
+#define S3C2412_LCDCON8 S3C2410_LCDREG(0x3C)
+#define S3C2412_LCDCON9 S3C2410_LCDREG(0x40)
+
+#define S3C2412_REDLUT(x) S3C2410_LCDREG(0x44 + ((x)*4))
+#define S3C2412_GREENLUT(x) S3C2410_LCDREG(0x60 + ((x)*4))
+#define S3C2412_BLUELUT(x) S3C2410_LCDREG(0x98 + ((x)*4))
+
+#define S3C2412_FRCPAT(x) S3C2410_LCDREG(0xB4 + ((x)*4))
+
+/* general registers */
+
+/* base of the LCD registers, where INTPND, INTSRC and then INTMSK
+ * are available. */
+
+#define S3C2410_LCDINTBASE S3C2410_LCDREG(0x54)
+#define S3C2412_LCDINTBASE S3C2410_LCDREG(0x24)
+
+#define S3C24XX_LCDINTPND (0x00)
+#define S3C24XX_LCDSRCPND (0x04)
+#define S3C24XX_LCDINTMSK (0x08)
+
+#endif /* ___ASM_ARCH_REGS_LCD_H */
diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
index 6f8fa501583f..d8ae5258de46 100644
--- a/drivers/video/fbdev/s3c2410fb.c
+++ b/drivers/video/fbdev/s3c2410fb.c
@@ -29,19 +29,18 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/io.h>
+#include <linux/platform_data/fb-s3c2410.h>
#include <asm/div64.h>
#include <asm/mach/map.h>
-#include <mach/regs-lcd.h>
-#include <mach/regs-gpio.h>
-#include <mach/fb.h>
#ifdef CONFIG_PM
#include <linux/pm.h>
#endif
#include "s3c2410fb.h"
+#include "s3c2410fb-regs-lcd.h"
/* Debugging stuff */
static int debug = IS_BUILTIN(CONFIG_FB_S3C2410_DEBUG);
@@ -672,6 +671,9 @@ static inline void modify_gpio(void __iomem *reg,
{
unsigned long tmp;
+ if (!reg)
+ return;
+
tmp = readl(reg) & ~mask;
writel(tmp | set, reg);
}
@@ -702,10 +704,10 @@ static int s3c2410fb_init_registers(struct fb_info *info)
/* modify the gpio(s) with interrupts set (bjd) */
- modify_gpio(S3C2410_GPCUP, mach_info->gpcup, mach_info->gpcup_mask);
- modify_gpio(S3C2410_GPCCON, mach_info->gpccon, mach_info->gpccon_mask);
- modify_gpio(S3C2410_GPDUP, mach_info->gpdup, mach_info->gpdup_mask);
- modify_gpio(S3C2410_GPDCON, mach_info->gpdcon, mach_info->gpdcon_mask);
+ modify_gpio(mach_info->gpcup_reg, mach_info->gpcup, mach_info->gpcup_mask);
+ modify_gpio(mach_info->gpccon_reg, mach_info->gpccon, mach_info->gpccon_mask);
+ modify_gpio(mach_info->gpdup_reg, mach_info->gpdup, mach_info->gpdup_mask);
+ modify_gpio(mach_info->gpdcon_reg, mach_info->gpdcon, mach_info->gpdcon_mask);
local_irq_restore(flags);
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index 60c424fae988..5c74253e7b2c 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -1410,9 +1410,9 @@ static void s3_pci_remove(struct pci_dev *dev)
/* PCI suspend */
-static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
+static int __maybe_unused s3_pci_suspend(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct s3fb_info *par = info->par;
dev_info(info->device, "suspend\n");
@@ -1420,7 +1420,7 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
console_lock();
mutex_lock(&(par->open_lock));
- if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
+ if (par->ref_count == 0) {
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
@@ -1428,10 +1428,6 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
fb_set_suspend(info, 1);
- pci_save_state(dev);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
-
mutex_unlock(&(par->open_lock));
console_unlock();
@@ -1441,11 +1437,10 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
/* PCI resume */
-static int s3_pci_resume(struct pci_dev* dev)
+static int __maybe_unused s3_pci_resume(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct s3fb_info *par = info->par;
- int err;
dev_info(info->device, "resume\n");
@@ -1458,17 +1453,6 @@ static int s3_pci_resume(struct pci_dev* dev)
return 0;
}
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
- err = pci_enable_device(dev);
- if (err) {
- mutex_unlock(&(par->open_lock));
- console_unlock();
- dev_err(info->device, "error %d enabling device for resume\n", err);
- return err;
- }
- pci_set_master(dev);
-
s3fb_set_par(info);
fb_set_suspend(info, 0);
@@ -1478,6 +1462,16 @@ static int s3_pci_resume(struct pci_dev* dev)
return 0;
}
+static const struct dev_pm_ops s3_pci_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = s3_pci_suspend,
+ .resume = s3_pci_resume,
+ .freeze = NULL,
+ .thaw = s3_pci_resume,
+ .poweroff = s3_pci_suspend,
+ .restore = s3_pci_resume,
+#endif
+};
/* List of boards that we are trying to support */
@@ -1510,8 +1504,7 @@ static struct pci_driver s3fb_pci_driver = {
.id_table = s3_devices,
.probe = s3_pci_probe,
.remove = s3_pci_remove,
- .suspend = s3_pci_suspend,
- .resume = s3_pci_resume,
+ .driver.pm = &s3_pci_pm_ops,
};
/* Parse user specified options */
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index a2442aae7e12..0ac750cc5ea1 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -1859,7 +1859,6 @@ static int savage_init_hw(struct savagefb_par *par)
vga_out8(0x3d4, 0x68, par); /* memory control 1 */
if ((vga_in8(0x3d5, par) & 0xC0) == (0x01 << 6))
RamSavage4[1] = 8;
-
fallthrough;
case S3_SAVAGE2000:
@@ -2348,9 +2347,9 @@ static void savagefb_remove(struct pci_dev *dev)
}
}
-static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg)
+static int savagefb_suspend_late(struct device *dev, pm_message_t mesg)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct savagefb_par *par = info->par;
DBG("savagefb_suspend");
@@ -2358,7 +2357,7 @@ static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg)
if (mesg.event == PM_EVENT_PRETHAW)
mesg.event = PM_EVENT_FREEZE;
par->pm_state = mesg.event;
- dev->dev.power.power_state = mesg;
+ dev->power.power_state = mesg;
/*
* For PM_EVENT_FREEZE, do not power down so the console
@@ -2376,17 +2375,29 @@ static int savagefb_suspend(struct pci_dev *dev, pm_message_t mesg)
savagefb_blank(FB_BLANK_POWERDOWN, info);
savage_set_default_par(par, &par->save);
savage_disable_mmio(par);
- pci_save_state(dev);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, mesg));
console_unlock();
return 0;
}
-static int savagefb_resume(struct pci_dev* dev)
+static int __maybe_unused savagefb_suspend(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ return savagefb_suspend_late(dev, PMSG_SUSPEND);
+}
+
+static int __maybe_unused savagefb_hibernate(struct device *dev)
+{
+ return savagefb_suspend_late(dev, PMSG_HIBERNATE);
+}
+
+static int __maybe_unused savagefb_freeze(struct device *dev)
+{
+ return savagefb_suspend_late(dev, PMSG_FREEZE);
+}
+
+static int __maybe_unused savagefb_resume(struct device *dev)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
struct savagefb_par *par = info->par;
int cur_state = par->pm_state;
@@ -2398,20 +2409,11 @@ static int savagefb_resume(struct pci_dev* dev)
* The adapter was not powered down coming back from a
* PM_EVENT_FREEZE.
*/
- if (cur_state == PM_EVENT_FREEZE) {
- pci_set_power_state(dev, PCI_D0);
+ if (cur_state == PM_EVENT_FREEZE)
return 0;
- }
console_lock();
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
-
- if (pci_enable_device(dev))
- DBG("err");
-
- pci_set_master(dev);
savage_enable_mmio(par);
savage_init_hw(par);
savagefb_set_par(info);
@@ -2422,6 +2424,16 @@ static int savagefb_resume(struct pci_dev* dev)
return 0;
}
+static const struct dev_pm_ops savagefb_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = savagefb_suspend,
+ .resume = savagefb_resume,
+ .freeze = savagefb_freeze,
+ .thaw = savagefb_resume,
+ .poweroff = savagefb_hibernate,
+ .restore = savagefb_resume,
+#endif
+};
static const struct pci_device_id savagefb_devices[] = {
{PCI_VENDOR_ID_S3, PCI_CHIP_SUPSAV_MX128,
@@ -2502,8 +2514,7 @@ static struct pci_driver savagefb_driver = {
.name = "savagefb",
.id_table = savagefb_devices,
.probe = savagefb_probe,
- .suspend = savagefb_suspend,
- .resume = savagefb_resume,
+ .driver.pm = &savagefb_pm_ops,
.remove = savagefb_remove,
};
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index dfe3eb769638..fde27feae5d0 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -2428,6 +2428,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
i = 0;
+ if (SiS_Pr->ChipType == SIS_730)
+ queuedata = &FQBQData730[0];
+ else
+ queuedata = &FQBQData[0];
+
if(ModeNo > 0x13) {
/* Get VCLK */
@@ -2445,12 +2450,6 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
/* Get half colordepth */
colorth = colortharray[(SiS_Pr->SiS_ModeType - ModeEGA)];
- if(SiS_Pr->ChipType == SIS_730) {
- queuedata = &FQBQData730[0];
- } else {
- queuedata = &FQBQData[0];
- }
-
do {
templ = SiS_CalcDelay2(SiS_Pr, queuedata[i]) * VCLK * colorth;
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index bdbe9c68e274..0dbc6bf8268a 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1604,6 +1604,14 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
sfb->fb->fix.mmio_start = mmio_base;
sfb->fb->fix.mmio_len = 0x00200000;
sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size);
+ if (!sfb->dp_regs) {
+ dev_err(&pdev->dev,
+ "%s: unable to map memory mapped IO!\n",
+ sfb->fb->fix.id);
+ err = -ENOMEM;
+ goto failed_fb;
+ }
+
sfb->lfb = sfb->dp_regs + 0x00200000;
sfb->mmio = (smtc_regbaseaddress =
sfb->dp_regs + 0x000c0000);
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 09425ec317ba..eda448b7a0c9 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -74,6 +74,7 @@ struct ssd1307fb_par {
struct fb_info *info;
u8 lookup_table[4];
u32 page_offset;
+ u32 col_offset;
u32 prechargep1;
u32 prechargep2;
struct pwm_device *pwm;
@@ -458,11 +459,11 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
if (ret < 0)
return ret;
- ret = ssd1307fb_write_cmd(par->client, 0x0);
+ ret = ssd1307fb_write_cmd(par->client, par->col_offset);
if (ret < 0)
return ret;
- ret = ssd1307fb_write_cmd(par->client, par->width - 1);
+ ret = ssd1307fb_write_cmd(par->client, par->col_offset + par->width - 1);
if (ret < 0)
return ret;
@@ -626,6 +627,9 @@ static int ssd1307fb_probe(struct i2c_client *client)
if (device_property_read_u32(dev, "solomon,page-offset", &par->page_offset))
par->page_offset = 1;
+ if (device_property_read_u32(dev, "solomon,col-offset", &par->col_offset))
+ par->col_offset = 0;
+
if (device_property_read_u32(dev, "solomon,com-offset", &par->com_offset))
par->com_offset = 0;
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index afe6d1b7c3a0..c05cdabeb11c 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -733,7 +733,7 @@ static ssize_t show_vgapass(struct device *device, struct device_attribute *attr
{
struct fb_info *info = dev_get_drvdata(device);
struct sstfb_par *par = info->par;
- return snprintf(buf, PAGE_SIZE, "%d\n", par->vgapass);
+ return sprintf(buf, "%d\n", par->vgapass);
}
static struct device_attribute device_attrs[] = {
diff --git a/drivers/video/fbdev/sticore.h b/drivers/video/fbdev/sticore.h
index fb8f58f9867a..c338f7848ae2 100644
--- a/drivers/video/fbdev/sticore.h
+++ b/drivers/video/fbdev/sticore.h
@@ -4,12 +4,6 @@
/* generic STI structures & functions */
-#if 0
-#define DPRINTK(x) printk x
-#else
-#define DPRINTK(x)
-#endif
-
#define MAX_STI_ROMS 4 /* max no. of ROMs which this driver handles */
#define STI_REGION_MAX 8 /* hardcoded STI constants */
@@ -246,8 +240,12 @@ struct sti_rom_font {
/* sticore internal font handling */
struct sti_cooked_font {
- struct sti_rom_font *raw;
+ struct sti_rom_font *raw; /* native ptr for STI functions */
+ void *raw_ptr; /* kmalloc'ed font data */
struct sti_cooked_font *next_font;
+ int height, width;
+ int refcount;
+ u32 crc;
};
struct sti_cooked_rom {
@@ -341,9 +339,6 @@ struct sti_all_data {
struct sti_struct {
spinlock_t lock;
- /* the following fields needs to be filled in by the word/byte routines */
- int font_width;
- int font_height;
/* char **mon_strings; */
int sti_mem_request;
u32 graphics_id[2];
@@ -362,6 +357,7 @@ struct sti_struct {
struct sti_glob_cfg *glob_cfg; /* points into sti_all_data */
+ int wordmode;
struct sti_cooked_font *font; /* ptr to selected font (cooked) */
struct pci_dev *pd;
@@ -380,6 +376,7 @@ struct sti_struct {
/* sticore interface functions */
struct sti_struct *sti_get_rom(unsigned int index); /* 0: default sti */
+void sti_font_convert_bytemode(struct sti_struct *sti, struct sti_cooked_font *f);
/* sticore main function to call STI firmware */
@@ -391,12 +388,14 @@ int sti_call(const struct sti_struct *sti, unsigned long func,
/* functions to call the STI ROM directly */
-void sti_putc(struct sti_struct *sti, int c, int y, int x);
+void sti_putc(struct sti_struct *sti, int c, int y, int x,
+ struct sti_cooked_font *font);
void sti_set(struct sti_struct *sti, int src_y, int src_x,
- int height, int width, u8 color);
+ int height, int width, u8 color);
void sti_clear(struct sti_struct *sti, int src_y, int src_x,
- int height, int width, int c);
+ int height, int width, int c, struct sti_cooked_font *font);
void sti_bmove(struct sti_struct *sti, int src_y, int src_x,
- int dst_y, int dst_x, int height, int width);
+ int dst_y, int dst_x, int height, int width,
+ struct sti_cooked_font *font);
#endif /* STICORE_H */
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index e9869135d833..666fbe2f671c 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -989,8 +989,10 @@ tgafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
/* We can fill 2k pixels per operation. Notice blocks that fit
the width of the screen so that we can take advantage of this
and fill more than one line per write. */
- if (width == line_length)
- width *= height, height = 1;
+ if (width == line_length) {
+ width *= height;
+ height = 1;
+ }
/* The write into the frame buffer must be aligned to 4 bytes,
but we are allowed to encode the offset within the word in
@@ -1171,8 +1173,10 @@ copyarea_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
More than anything else, these control how we do copies. */
depos = dy * line_length + dx;
sepos = sy * line_length + sx;
- if (backward)
- depos += width, sepos += width;
+ if (backward) {
+ depos += width;
+ sepos += width;
+ }
/* Next copy full words at a time. */
n32 = width / 32;
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 5b014b479f83..f9b3c1cb9530 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1457,7 +1457,7 @@ static ssize_t edid_show(
struct file *filp,
struct kobject *kobj, struct bin_attribute *a,
char *buf, loff_t off, size_t count) {
- struct device *fbdev = container_of(kobj, struct device, kobj);
+ struct device *fbdev = kobj_to_dev(kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
@@ -1479,7 +1479,7 @@ static ssize_t edid_store(
struct file *filp,
struct kobject *kobj, struct bin_attribute *a,
char *src, loff_t src_off, size_t src_size) {
- struct device *fbdev = container_of(kobj, struct device, kobj);
+ struct device *fbdev = kobj_to_dev(kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dlfb = fb_info->par;
int ret;
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index 578d3541e3d6..1e8a38a7967d 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -243,7 +243,7 @@ static void vga16fb_update_fix(struct fb_info *info)
}
static void vga16fb_clock_chip(struct vga16fb_par *par,
- unsigned int pixclock,
+ unsigned int *pixclock,
const struct fb_info *info,
int mul, int div)
{
@@ -259,14 +259,14 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
{ 0 /* bad */, 0x00, 0x00}};
int err;
- pixclock = (pixclock * mul) / div;
+ *pixclock = (*pixclock * mul) / div;
best = vgaclocks;
- err = pixclock - best->pixclock;
+ err = *pixclock - best->pixclock;
if (err < 0) err = -err;
for (ptr = vgaclocks + 1; ptr->pixclock; ptr++) {
int tmp;
- tmp = pixclock - ptr->pixclock;
+ tmp = *pixclock - ptr->pixclock;
if (tmp < 0) tmp = -tmp;
if (tmp < err) {
err = tmp;
@@ -275,7 +275,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
}
par->misc |= best->misc;
par->clkdiv = best->seq_clock_mode;
- pixclock = (best->pixclock * div) / mul;
+ *pixclock = (best->pixclock * div) / mul;
}
#define FAIL(X) return -EINVAL
@@ -497,10 +497,10 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
if (mode & MODE_8BPP)
/* pixel clock == vga clock / 2 */
- vga16fb_clock_chip(par, var->pixclock, info, 1, 2);
+ vga16fb_clock_chip(par, &var->pixclock, info, 1, 2);
else
/* pixel clock == vga clock */
- vga16fb_clock_chip(par, var->pixclock, info, 1, 1);
+ vga16fb_clock_chip(par, &var->pixclock, info, 1, 1);
var->red.offset = var->green.offset = var->blue.offset =
var->transp.offset = 0;
diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
index 703ddee9a244..89d75079b730 100644
--- a/drivers/video/fbdev/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
@@ -558,9 +558,8 @@ static void via_teardown_subdevs(void)
/*
* Power management functions
*/
-#ifdef CONFIG_PM
-static LIST_HEAD(viafb_pm_hooks);
-static DEFINE_MUTEX(viafb_pm_hooks_lock);
+static __maybe_unused LIST_HEAD(viafb_pm_hooks);
+static __maybe_unused DEFINE_MUTEX(viafb_pm_hooks_lock);
void viafb_pm_register(struct viafb_pm_hooks *hooks)
{
@@ -580,12 +579,10 @@ void viafb_pm_unregister(struct viafb_pm_hooks *hooks)
}
EXPORT_SYMBOL_GPL(viafb_pm_unregister);
-static int via_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused via_suspend(struct device *dev)
{
struct viafb_pm_hooks *hooks;
- if (state.event != PM_EVENT_SUSPEND)
- return 0;
/*
* "I've occasionally hit a few drivers that caused suspend
* failures, and each and every time it was a driver bug, and
@@ -600,24 +597,13 @@ static int via_suspend(struct pci_dev *pdev, pm_message_t state)
hooks->suspend(hooks->private);
mutex_unlock(&viafb_pm_hooks_lock);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int via_resume(struct pci_dev *pdev)
+static int __maybe_unused via_resume(struct device *dev)
{
struct viafb_pm_hooks *hooks;
- /* Get the bus side powered up */
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- if (pci_enable_device(pdev))
- return 0;
-
- pci_set_master(pdev);
-
/* Now bring back any subdevs */
mutex_lock(&viafb_pm_hooks_lock);
list_for_each_entry(hooks, &viafb_pm_hooks, list)
@@ -626,7 +612,6 @@ static int via_resume(struct pci_dev *pdev)
return 0;
}
-#endif /* CONFIG_PM */
static int via_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -712,15 +697,23 @@ static const struct pci_device_id via_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, via_pci_table);
+static const struct dev_pm_ops via_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = via_suspend,
+ .resume = via_resume,
+ .freeze = NULL,
+ .thaw = via_resume,
+ .poweroff = NULL,
+ .restore = via_resume,
+#endif
+};
+
static struct pci_driver via_driver = {
.name = "viafb",
.id_table = via_pci_table,
.probe = via_pci_probe,
.remove = via_pci_remove,
-#ifdef CONFIG_PM
- .suspend = via_suspend,
- .resume = via_resume,
-#endif
+ .driver.pm = &via_pm_ops,
};
static int __init via_core_init(void)
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 98ff8235c9e9..7a959e5ba90b 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -815,12 +815,11 @@ static void vt8623_pci_remove(struct pci_dev *dev)
}
-#ifdef CONFIG_PM
/* PCI suspend */
-static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
+static int __maybe_unused vt8623_pci_suspend(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct vt8623fb_info *par = info->par;
dev_info(info->device, "suspend\n");
@@ -828,7 +827,7 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
console_lock();
mutex_lock(&(par->open_lock));
- if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
+ if (par->ref_count == 0) {
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
@@ -836,10 +835,6 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
fb_set_suspend(info, 1);
- pci_save_state(dev);
- pci_disable_device(dev);
- pci_set_power_state(dev, pci_choose_state(dev, state));
-
mutex_unlock(&(par->open_lock));
console_unlock();
@@ -849,9 +844,9 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
/* PCI resume */
-static int vt8623_pci_resume(struct pci_dev* dev)
+static int __maybe_unused vt8623_pci_resume(struct device *dev)
{
- struct fb_info *info = pci_get_drvdata(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct vt8623fb_info *par = info->par;
dev_info(info->device, "resume\n");
@@ -862,14 +857,6 @@ static int vt8623_pci_resume(struct pci_dev* dev)
if (par->ref_count == 0)
goto fail;
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
-
- if (pci_enable_device(dev))
- goto fail;
-
- pci_set_master(dev);
-
vt8623fb_set_par(info);
fb_set_suspend(info, 0);
@@ -879,10 +866,17 @@ fail:
return 0;
}
-#else
-#define vt8623_pci_suspend NULL
-#define vt8623_pci_resume NULL
-#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops vt8623_pci_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = vt8623_pci_suspend,
+ .resume = vt8623_pci_resume,
+ .freeze = NULL,
+ .thaw = vt8623_pci_resume,
+ .poweroff = vt8623_pci_suspend,
+ .restore = vt8623_pci_resume,
+#endif /* CONFIG_PM_SLEEP */
+};
/* List of boards that we are trying to support */
@@ -898,8 +892,7 @@ static struct pci_driver vt8623fb_pci_driver = {
.id_table = vt8623_devices,
.probe = vt8623_pci_probe,
.remove = vt8623_pci_remove,
- .suspend = vt8623_pci_suspend,
- .resume = vt8623_pci_resume,
+ .driver.pm = &vt8623_pci_pm_ops,
};
/* Cleanup */
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
index cbc1f25c79ab..80c5f9c16ec1 100644
--- a/drivers/virt/Kconfig
+++ b/drivers/virt/Kconfig
@@ -32,4 +32,6 @@ config FSL_HV_MANAGER
partition shuts down.
source "drivers/virt/vboxguest/Kconfig"
+
+source "drivers/virt/nitro_enclaves/Kconfig"
endif
diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile
index fd331247c27a..f28425ce4b39 100644
--- a/drivers/virt/Makefile
+++ b/drivers/virt/Makefile
@@ -5,3 +5,5 @@
obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o
obj-y += vboxguest/
+
+obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves/
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 1b0b11b55d2a..46ee0a0998b6 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
unsigned int i;
long ret = 0;
- int num_pinned; /* return value from get_user_pages() */
+ int num_pinned = 0; /* return value from get_user_pages_fast() */
phys_addr_t remote_paddr; /* The next address in the remote buffer */
uint32_t count; /* The number of bytes left to copy */
@@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
return -EINVAL;
/*
- * The array of pages returned by get_user_pages() covers only
+ * The array of pages returned by get_user_pages_fast() covers only
* page-aligned memory. Since the user buffer is probably not
* page-aligned, we need to handle the discrepancy.
*
@@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
/*
* 'pages' is an array of struct page pointers that's initialized by
- * get_user_pages().
+ * get_user_pages_fast().
*/
pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
@@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
if (!sg_list_unaligned) {
pr_debug("fsl-hv: could not allocate S/G list\n");
ret = -ENOMEM;
- goto exit;
+ goto free_pages;
}
sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
@@ -250,7 +250,6 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
if (num_pinned != num_pages) {
- /* get_user_pages() failed */
pr_debug("fsl-hv: could not lock source buffer\n");
ret = (num_pinned < 0) ? num_pinned : -EFAULT;
goto exit;
@@ -292,13 +291,13 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
virt_to_phys(sg_list), num_pages);
exit:
- if (pages) {
- for (i = 0; i < num_pages; i++)
- if (pages[i])
- put_page(pages[i]);
+ if (pages && (num_pinned > 0)) {
+ for (i = 0; i < num_pinned; i++)
+ put_page(pages[i]);
}
kfree(sg_list_unaligned);
+free_pages:
kfree(pages);
if (!ret)
diff --git a/drivers/virt/nitro_enclaves/Kconfig b/drivers/virt/nitro_enclaves/Kconfig
new file mode 100644
index 000000000000..8c9387a232df
--- /dev/null
+++ b/drivers/virt/nitro_enclaves/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+
+# Amazon Nitro Enclaves (NE) support.
+# Nitro is a hypervisor that has been developed by Amazon.
+
+# TODO: Add dependency for ARM64 once NE is supported on Arm platforms. For now,
+# the NE kernel driver can be built for aarch64 arch.
+# depends on (ARM64 || X86) && HOTPLUG_CPU && PCI && SMP
+
+config NITRO_ENCLAVES
+ tristate "Nitro Enclaves Support"
+ depends on X86 && HOTPLUG_CPU && PCI && SMP
+ help
+ This driver consists of support for enclave lifetime management
+ for Nitro Enclaves (NE).
+
+ To compile this driver as a module, choose M here.
+ The module will be called nitro_enclaves.
diff --git a/drivers/virt/nitro_enclaves/Makefile b/drivers/virt/nitro_enclaves/Makefile
new file mode 100644
index 000000000000..da61260f2be6
--- /dev/null
+++ b/drivers/virt/nitro_enclaves/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+
+# Enclave lifetime management support for Nitro Enclaves (NE).
+
+obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves.o
+
+nitro_enclaves-y := ne_pci_dev.o ne_misc_dev.o
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
new file mode 100644
index 000000000000..f1964ea4b826
--- /dev/null
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
@@ -0,0 +1,1731 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+/**
+ * DOC: Enclave lifetime management driver for Nitro Enclaves (NE).
+ * Nitro is a hypervisor that has been developed by Amazon.
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/capability.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/hugetlb.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nitro_enclaves.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <uapi/linux/vm_sockets.h>
+
+#include "ne_misc_dev.h"
+#include "ne_pci_dev.h"
+
+/**
+ * NE_CPUS_SIZE - Size for max 128 CPUs, for now, in a cpu-list string, comma
+ * separated. The NE CPU pool includes CPUs from a single NUMA
+ * node.
+ */
+#define NE_CPUS_SIZE (512)
+
+/**
+ * NE_EIF_LOAD_OFFSET - The offset where to copy the Enclave Image Format (EIF)
+ * image in enclave memory.
+ */
+#define NE_EIF_LOAD_OFFSET (8 * 1024UL * 1024UL)
+
+/**
+ * NE_MIN_ENCLAVE_MEM_SIZE - The minimum memory size an enclave can be launched
+ * with.
+ */
+#define NE_MIN_ENCLAVE_MEM_SIZE (64 * 1024UL * 1024UL)
+
+/**
+ * NE_MIN_MEM_REGION_SIZE - The minimum size of an enclave memory region.
+ */
+#define NE_MIN_MEM_REGION_SIZE (2 * 1024UL * 1024UL)
+
+/**
+ * NE_PARENT_VM_CID - The CID for the vsock device of the primary / parent VM.
+ */
+#define NE_PARENT_VM_CID (3)
+
+static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+static const struct file_operations ne_fops = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = ne_ioctl,
+};
+
+static struct miscdevice ne_misc_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "nitro_enclaves",
+ .fops = &ne_fops,
+ .mode = 0660,
+};
+
+struct ne_devs ne_devs = {
+ .ne_misc_dev = &ne_misc_dev,
+};
+
+/*
+ * TODO: Update logic to create new sysfs entries instead of using
+ * a kernel parameter e.g. if multiple sysfs files needed.
+ */
+static int ne_set_kernel_param(const char *val, const struct kernel_param *kp);
+
+static const struct kernel_param_ops ne_cpu_pool_ops = {
+ .get = param_get_string,
+ .set = ne_set_kernel_param,
+};
+
+static char ne_cpus[NE_CPUS_SIZE];
+static struct kparam_string ne_cpus_arg = {
+ .maxlen = sizeof(ne_cpus),
+ .string = ne_cpus,
+};
+
+module_param_cb(ne_cpus, &ne_cpu_pool_ops, &ne_cpus_arg, 0644);
+/* https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html#cpu-lists */
+MODULE_PARM_DESC(ne_cpus, "<cpu-list> - CPU pool used for Nitro Enclaves");
+
+/**
+ * struct ne_cpu_pool - CPU pool used for Nitro Enclaves.
+ * @avail_threads_per_core: Available full CPU cores to be dedicated to
+ * enclave(s). The cpumasks from the array, indexed
+ * by core id, contain all the threads from the
+ * available cores, that are not set for created
+ * enclave(s). The full CPU cores are part of the
+ * NE CPU pool.
+ * @mutex: Mutex for the access to the NE CPU pool.
+ * @nr_parent_vm_cores : The size of the available threads per core array.
+ * The total number of CPU cores available on the
+ * primary / parent VM.
+ * @nr_threads_per_core: The number of threads that a full CPU core has.
+ * @numa_node: NUMA node of the CPUs in the pool.
+ */
+struct ne_cpu_pool {
+ cpumask_var_t *avail_threads_per_core;
+ struct mutex mutex;
+ unsigned int nr_parent_vm_cores;
+ unsigned int nr_threads_per_core;
+ int numa_node;
+};
+
+static struct ne_cpu_pool ne_cpu_pool;
+
+/**
+ * ne_check_enclaves_created() - Verify if at least one enclave has been created.
+ * @void: No parameters provided.
+ *
+ * Context: Process context.
+ * Return:
+ * * True if at least one enclave is created.
+ * * False otherwise.
+ */
+static bool ne_check_enclaves_created(void)
+{
+ struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
+ bool ret = false;
+
+ if (!ne_pci_dev)
+ return ret;
+
+ mutex_lock(&ne_pci_dev->enclaves_list_mutex);
+
+ if (!list_empty(&ne_pci_dev->enclaves_list))
+ ret = true;
+
+ mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
+
+ return ret;
+}
+
+/**
+ * ne_setup_cpu_pool() - Set the NE CPU pool after handling sanity checks such
+ * as not sharing CPU cores with the primary / parent VM
+ * or not using CPU 0, which should remain available for
+ * the primary / parent VM. Offline the CPUs from the
+ * pool after the checks passed.
+ * @ne_cpu_list: The CPU list used for setting NE CPU pool.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_setup_cpu_pool(const char *ne_cpu_list)
+{
+ int core_id = -1;
+ unsigned int cpu = 0;
+ cpumask_var_t cpu_pool;
+ unsigned int cpu_sibling = 0;
+ unsigned int i = 0;
+ int numa_node = -1;
+ int rc = -EINVAL;
+
+ if (!zalloc_cpumask_var(&cpu_pool, GFP_KERNEL))
+ return -ENOMEM;
+
+ mutex_lock(&ne_cpu_pool.mutex);
+
+ rc = cpulist_parse(ne_cpu_list, cpu_pool);
+ if (rc < 0) {
+ pr_err("%s: Error in cpulist parse [rc=%d]\n", ne_misc_dev.name, rc);
+
+ goto free_pool_cpumask;
+ }
+
+ cpu = cpumask_any(cpu_pool);
+ if (cpu >= nr_cpu_ids) {
+ pr_err("%s: No CPUs available in CPU pool\n", ne_misc_dev.name);
+
+ rc = -EINVAL;
+
+ goto free_pool_cpumask;
+ }
+
+ /*
+ * Check if the CPUs are online, to further get info about them
+ * e.g. numa node, core id, siblings.
+ */
+ for_each_cpu(cpu, cpu_pool)
+ if (cpu_is_offline(cpu)) {
+ pr_err("%s: CPU %d is offline, has to be online to get its metadata\n",
+ ne_misc_dev.name, cpu);
+
+ rc = -EINVAL;
+
+ goto free_pool_cpumask;
+ }
+
+ /*
+ * Check if the CPUs from the NE CPU pool are from the same NUMA node.
+ */
+ for_each_cpu(cpu, cpu_pool)
+ if (numa_node < 0) {
+ numa_node = cpu_to_node(cpu);
+ if (numa_node < 0) {
+ pr_err("%s: Invalid NUMA node %d\n",
+ ne_misc_dev.name, numa_node);
+
+ rc = -EINVAL;
+
+ goto free_pool_cpumask;
+ }
+ } else {
+ if (numa_node != cpu_to_node(cpu)) {
+ pr_err("%s: CPUs with different NUMA nodes\n",
+ ne_misc_dev.name);
+
+ rc = -EINVAL;
+
+ goto free_pool_cpumask;
+ }
+ }
+
+ /*
+ * Check if CPU 0 and its siblings are included in the provided CPU pool
+ * They should remain available for the primary / parent VM.
+ */
+ if (cpumask_test_cpu(0, cpu_pool)) {
+ pr_err("%s: CPU 0 has to remain available\n", ne_misc_dev.name);
+
+ rc = -EINVAL;
+
+ goto free_pool_cpumask;
+ }
+
+ for_each_cpu(cpu_sibling, topology_sibling_cpumask(0)) {
+ if (cpumask_test_cpu(cpu_sibling, cpu_pool)) {
+ pr_err("%s: CPU sibling %d for CPU 0 is in CPU pool\n",
+ ne_misc_dev.name, cpu_sibling);
+
+ rc = -EINVAL;
+
+ goto free_pool_cpumask;
+ }
+ }
+
+ /*
+ * Check if CPU siblings are included in the provided CPU pool. The
+ * expectation is that full CPU cores are made available in the CPU pool
+ * for enclaves.
+ */
+ for_each_cpu(cpu, cpu_pool) {
+ for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu)) {
+ if (!cpumask_test_cpu(cpu_sibling, cpu_pool)) {
+ pr_err("%s: CPU %d is not in CPU pool\n",
+ ne_misc_dev.name, cpu_sibling);
+
+ rc = -EINVAL;
+
+ goto free_pool_cpumask;
+ }
+ }
+ }
+
+ /* Calculate the number of threads from a full CPU core. */
+ cpu = cpumask_any(cpu_pool);
+ for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu))
+ ne_cpu_pool.nr_threads_per_core++;
+
+ ne_cpu_pool.nr_parent_vm_cores = nr_cpu_ids / ne_cpu_pool.nr_threads_per_core;
+
+ ne_cpu_pool.avail_threads_per_core = kcalloc(ne_cpu_pool.nr_parent_vm_cores,
+ sizeof(*ne_cpu_pool.avail_threads_per_core),
+ GFP_KERNEL);
+ if (!ne_cpu_pool.avail_threads_per_core) {
+ rc = -ENOMEM;
+
+ goto free_pool_cpumask;
+ }
+
+ for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
+ if (!zalloc_cpumask_var(&ne_cpu_pool.avail_threads_per_core[i], GFP_KERNEL)) {
+ rc = -ENOMEM;
+
+ goto free_cores_cpumask;
+ }
+
+ /*
+ * Split the NE CPU pool in threads per core to keep the CPU topology
+ * after offlining the CPUs.
+ */
+ for_each_cpu(cpu, cpu_pool) {
+ core_id = topology_core_id(cpu);
+ if (core_id < 0 || core_id >= ne_cpu_pool.nr_parent_vm_cores) {
+ pr_err("%s: Invalid core id %d for CPU %d\n",
+ ne_misc_dev.name, core_id, cpu);
+
+ rc = -EINVAL;
+
+ goto clear_cpumask;
+ }
+
+ cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id]);
+ }
+
+ /*
+ * CPUs that are given to enclave(s) should not be considered online
+ * by Linux anymore, as the hypervisor will degrade them to floating.
+ * The physical CPUs (full cores) are carved out of the primary / parent
+ * VM and given to the enclave VM. The same number of vCPUs would run
+ * on less pCPUs for the primary / parent VM.
+ *
+ * We offline them here, to not degrade performance and expose correct
+ * topology to Linux and user space.
+ */
+ for_each_cpu(cpu, cpu_pool) {
+ rc = remove_cpu(cpu);
+ if (rc != 0) {
+ pr_err("%s: CPU %d is not offlined [rc=%d]\n",
+ ne_misc_dev.name, cpu, rc);
+
+ goto online_cpus;
+ }
+ }
+
+ free_cpumask_var(cpu_pool);
+
+ ne_cpu_pool.numa_node = numa_node;
+
+ mutex_unlock(&ne_cpu_pool.mutex);
+
+ return 0;
+
+online_cpus:
+ for_each_cpu(cpu, cpu_pool)
+ add_cpu(cpu);
+clear_cpumask:
+ for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
+ cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]);
+free_cores_cpumask:
+ for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
+ free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]);
+ kfree(ne_cpu_pool.avail_threads_per_core);
+free_pool_cpumask:
+ free_cpumask_var(cpu_pool);
+ ne_cpu_pool.nr_parent_vm_cores = 0;
+ ne_cpu_pool.nr_threads_per_core = 0;
+ ne_cpu_pool.numa_node = -1;
+ mutex_unlock(&ne_cpu_pool.mutex);
+
+ return rc;
+}
+
+/**
+ * ne_teardown_cpu_pool() - Online the CPUs from the NE CPU pool and cleanup the
+ * CPU pool.
+ * @void: No parameters provided.
+ *
+ * Context: Process context.
+ */
+static void ne_teardown_cpu_pool(void)
+{
+ unsigned int cpu = 0;
+ unsigned int i = 0;
+ int rc = -EINVAL;
+
+ mutex_lock(&ne_cpu_pool.mutex);
+
+ if (!ne_cpu_pool.nr_parent_vm_cores) {
+ mutex_unlock(&ne_cpu_pool.mutex);
+
+ return;
+ }
+
+ for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) {
+ for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]) {
+ rc = add_cpu(cpu);
+ if (rc != 0)
+ pr_err("%s: CPU %d is not onlined [rc=%d]\n",
+ ne_misc_dev.name, cpu, rc);
+ }
+
+ cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]);
+
+ free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]);
+ }
+
+ kfree(ne_cpu_pool.avail_threads_per_core);
+ ne_cpu_pool.nr_parent_vm_cores = 0;
+ ne_cpu_pool.nr_threads_per_core = 0;
+ ne_cpu_pool.numa_node = -1;
+
+ mutex_unlock(&ne_cpu_pool.mutex);
+}
+
+/**
+ * ne_set_kernel_param() - Set the NE CPU pool value via the NE kernel parameter.
+ * @val: NE CPU pool string value.
+ * @kp : NE kernel parameter associated with the NE CPU pool.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_set_kernel_param(const char *val, const struct kernel_param *kp)
+{
+ char error_val[] = "";
+ int rc = -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (ne_check_enclaves_created()) {
+ pr_err("%s: The CPU pool is used by enclave(s)\n", ne_misc_dev.name);
+
+ return -EPERM;
+ }
+
+ ne_teardown_cpu_pool();
+
+ rc = ne_setup_cpu_pool(val);
+ if (rc < 0) {
+ pr_err("%s: Error in setup CPU pool [rc=%d]\n", ne_misc_dev.name, rc);
+
+ param_set_copystring(error_val, kp);
+
+ return rc;
+ }
+
+ rc = param_set_copystring(val, kp);
+ if (rc < 0) {
+ pr_err("%s: Error in param set copystring [rc=%d]\n", ne_misc_dev.name, rc);
+
+ ne_teardown_cpu_pool();
+
+ param_set_copystring(error_val, kp);
+
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * ne_donated_cpu() - Check if the provided CPU is already used by the enclave.
+ * @ne_enclave : Private data associated with the current enclave.
+ * @cpu: CPU to check if already used.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * True if the provided CPU is already used by the enclave.
+ * * False otherwise.
+ */
+static bool ne_donated_cpu(struct ne_enclave *ne_enclave, unsigned int cpu)
+{
+ if (cpumask_test_cpu(cpu, ne_enclave->vcpu_ids))
+ return true;
+
+ return false;
+}
+
+/**
+ * ne_get_unused_core_from_cpu_pool() - Get the id of a full core from the
+ * NE CPU pool.
+ * @void: No parameters provided.
+ *
+ * Context: Process context. This function is called with the ne_enclave and
+ * ne_cpu_pool mutexes held.
+ * Return:
+ * * Core id.
+ * * -1 if no CPU core available in the pool.
+ */
+static int ne_get_unused_core_from_cpu_pool(void)
+{
+ int core_id = -1;
+ unsigned int i = 0;
+
+ for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
+ if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i])) {
+ core_id = i;
+
+ break;
+ }
+
+ return core_id;
+}
+
+/**
+ * ne_set_enclave_threads_per_core() - Set the threads of the provided core in
+ * the enclave data structure.
+ * @ne_enclave : Private data associated with the current enclave.
+ * @core_id: Core id to get its threads from the NE CPU pool.
+ * @vcpu_id: vCPU id part of the provided core.
+ *
+ * Context: Process context. This function is called with the ne_enclave and
+ * ne_cpu_pool mutexes held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_set_enclave_threads_per_core(struct ne_enclave *ne_enclave,
+ int core_id, u32 vcpu_id)
+{
+ unsigned int cpu = 0;
+
+ if (core_id < 0 && vcpu_id == 0) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "No CPUs available in NE CPU pool\n");
+
+ return -NE_ERR_NO_CPUS_AVAIL_IN_POOL;
+ }
+
+ if (core_id < 0) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "CPU %d is not in NE CPU pool\n", vcpu_id);
+
+ return -NE_ERR_VCPU_NOT_IN_CPU_POOL;
+ }
+
+ if (core_id >= ne_enclave->nr_parent_vm_cores) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Invalid core id %d - ne_enclave\n", core_id);
+
+ return -NE_ERR_VCPU_INVALID_CPU_CORE;
+ }
+
+ for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id])
+ cpumask_set_cpu(cpu, ne_enclave->threads_per_core[core_id]);
+
+ cpumask_clear(ne_cpu_pool.avail_threads_per_core[core_id]);
+
+ return 0;
+}
+
+/**
+ * ne_get_cpu_from_cpu_pool() - Get a CPU from the NE CPU pool, either from the
+ * remaining sibling(s) of a CPU core or the first
+ * sibling of a new CPU core.
+ * @ne_enclave : Private data associated with the current enclave.
+ * @vcpu_id: vCPU to get from the NE CPU pool.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_get_cpu_from_cpu_pool(struct ne_enclave *ne_enclave, u32 *vcpu_id)
+{
+ int core_id = -1;
+ unsigned int cpu = 0;
+ unsigned int i = 0;
+ int rc = -EINVAL;
+
+ /*
+ * If previously allocated a thread of a core to this enclave, first
+ * check remaining sibling(s) for new CPU allocations, so that full
+ * CPU cores are used for the enclave.
+ */
+ for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
+ for_each_cpu(cpu, ne_enclave->threads_per_core[i])
+ if (!ne_donated_cpu(ne_enclave, cpu)) {
+ *vcpu_id = cpu;
+
+ return 0;
+ }
+
+ mutex_lock(&ne_cpu_pool.mutex);
+
+ /*
+ * If no remaining siblings, get a core from the NE CPU pool and keep
+ * track of all the threads in the enclave threads per core data structure.
+ */
+ core_id = ne_get_unused_core_from_cpu_pool();
+
+ rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, *vcpu_id);
+ if (rc < 0)
+ goto unlock_mutex;
+
+ *vcpu_id = cpumask_any(ne_enclave->threads_per_core[core_id]);
+
+ rc = 0;
+
+unlock_mutex:
+ mutex_unlock(&ne_cpu_pool.mutex);
+
+ return rc;
+}
+
+/**
+ * ne_get_vcpu_core_from_cpu_pool() - Get from the NE CPU pool the id of the
+ * core associated with the provided vCPU.
+ * @vcpu_id: Provided vCPU id to get its associated core id.
+ *
+ * Context: Process context. This function is called with the ne_enclave and
+ * ne_cpu_pool mutexes held.
+ * Return:
+ * * Core id.
+ * * -1 if the provided vCPU is not in the pool.
+ */
+static int ne_get_vcpu_core_from_cpu_pool(u32 vcpu_id)
+{
+ int core_id = -1;
+ unsigned int i = 0;
+
+ for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
+ if (cpumask_test_cpu(vcpu_id, ne_cpu_pool.avail_threads_per_core[i])) {
+ core_id = i;
+
+ break;
+ }
+
+ return core_id;
+}
+
+/**
+ * ne_check_cpu_in_cpu_pool() - Check if the given vCPU is in the available CPUs
+ * from the pool.
+ * @ne_enclave : Private data associated with the current enclave.
+ * @vcpu_id: ID of the vCPU to check if available in the NE CPU pool.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_check_cpu_in_cpu_pool(struct ne_enclave *ne_enclave, u32 vcpu_id)
+{
+ int core_id = -1;
+ unsigned int i = 0;
+ int rc = -EINVAL;
+
+ if (ne_donated_cpu(ne_enclave, vcpu_id)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "CPU %d already used\n", vcpu_id);
+
+ return -NE_ERR_VCPU_ALREADY_USED;
+ }
+
+ /*
+ * If previously allocated a thread of a core to this enclave, but not
+ * the full core, first check remaining sibling(s).
+ */
+ for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
+ if (cpumask_test_cpu(vcpu_id, ne_enclave->threads_per_core[i]))
+ return 0;
+
+ mutex_lock(&ne_cpu_pool.mutex);
+
+ /*
+ * If no remaining siblings, get from the NE CPU pool the core
+ * associated with the vCPU and keep track of all the threads in the
+ * enclave threads per core data structure.
+ */
+ core_id = ne_get_vcpu_core_from_cpu_pool(vcpu_id);
+
+ rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, vcpu_id);
+ if (rc < 0)
+ goto unlock_mutex;
+
+ rc = 0;
+
+unlock_mutex:
+ mutex_unlock(&ne_cpu_pool.mutex);
+
+ return rc;
+}
+
+/**
+ * ne_add_vcpu_ioctl() - Add a vCPU to the slot associated with the current
+ * enclave.
+ * @ne_enclave : Private data associated with the current enclave.
+ * @vcpu_id: ID of the CPU to be associated with the given slot,
+ * apic id on x86.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_add_vcpu_ioctl(struct ne_enclave *ne_enclave, u32 vcpu_id)
+{
+ struct ne_pci_dev_cmd_reply cmd_reply = {};
+ struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
+ int rc = -EINVAL;
+ struct slot_add_vcpu_req slot_add_vcpu_req = {};
+
+ if (ne_enclave->mm != current->mm)
+ return -EIO;
+
+ slot_add_vcpu_req.slot_uid = ne_enclave->slot_uid;
+ slot_add_vcpu_req.vcpu_id = vcpu_id;
+
+ rc = ne_do_request(pdev, SLOT_ADD_VCPU,
+ &slot_add_vcpu_req, sizeof(slot_add_vcpu_req),
+ &cmd_reply, sizeof(cmd_reply));
+ if (rc < 0) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Error in slot add vCPU [rc=%d]\n", rc);
+
+ return rc;
+ }
+
+ cpumask_set_cpu(vcpu_id, ne_enclave->vcpu_ids);
+
+ ne_enclave->nr_vcpus++;
+
+ return 0;
+}
+
+/**
+ * ne_sanity_check_user_mem_region() - Sanity check the user space memory
+ * region received during the set user
+ * memory region ioctl call.
+ * @ne_enclave : Private data associated with the current enclave.
+ * @mem_region : User space memory region to be sanity checked.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_sanity_check_user_mem_region(struct ne_enclave *ne_enclave,
+ struct ne_user_memory_region mem_region)
+{
+ struct ne_mem_region *ne_mem_region = NULL;
+
+ if (ne_enclave->mm != current->mm)
+ return -EIO;
+
+ if (mem_region.memory_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "User space memory size is not multiple of 2 MiB\n");
+
+ return -NE_ERR_INVALID_MEM_REGION_SIZE;
+ }
+
+ if (!IS_ALIGNED(mem_region.userspace_addr, NE_MIN_MEM_REGION_SIZE)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "User space address is not 2 MiB aligned\n");
+
+ return -NE_ERR_UNALIGNED_MEM_REGION_ADDR;
+ }
+
+ if ((mem_region.userspace_addr & (NE_MIN_MEM_REGION_SIZE - 1)) ||
+ !access_ok((void __user *)(unsigned long)mem_region.userspace_addr,
+ mem_region.memory_size)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Invalid user space address range\n");
+
+ return -NE_ERR_INVALID_MEM_REGION_ADDR;
+ }
+
+ list_for_each_entry(ne_mem_region, &ne_enclave->mem_regions_list,
+ mem_region_list_entry) {
+ u64 memory_size = ne_mem_region->memory_size;
+ u64 userspace_addr = ne_mem_region->userspace_addr;
+
+ if ((userspace_addr <= mem_region.userspace_addr &&
+ mem_region.userspace_addr < (userspace_addr + memory_size)) ||
+ (mem_region.userspace_addr <= userspace_addr &&
+ (mem_region.userspace_addr + mem_region.memory_size) > userspace_addr)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "User space memory region already used\n");
+
+ return -NE_ERR_MEM_REGION_ALREADY_USED;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ne_sanity_check_user_mem_region_page() - Sanity check a page from the user space
+ * memory region received during the set
+ * user memory region ioctl call.
+ * @ne_enclave : Private data associated with the current enclave.
+ * @mem_region_page: Page from the user space memory region to be sanity checked.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave,
+ struct page *mem_region_page)
+{
+ if (!PageHuge(mem_region_page)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Not a hugetlbfs page\n");
+
+ return -NE_ERR_MEM_NOT_HUGE_PAGE;
+ }
+
+ if (page_size(mem_region_page) & (NE_MIN_MEM_REGION_SIZE - 1)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Page size not multiple of 2 MiB\n");
+
+ return -NE_ERR_INVALID_PAGE_SIZE;
+ }
+
+ if (ne_enclave->numa_node != page_to_nid(mem_region_page)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Page is not from NUMA node %d\n",
+ ne_enclave->numa_node);
+
+ return -NE_ERR_MEM_DIFFERENT_NUMA_NODE;
+ }
+
+ return 0;
+}
+
+/**
+ * ne_set_user_memory_region_ioctl() - Add user space memory region to the slot
+ * associated with the current enclave.
+ * @ne_enclave : Private data associated with the current enclave.
+ * @mem_region : User space memory region to be associated with the given slot.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
+ struct ne_user_memory_region mem_region)
+{
+ long gup_rc = 0;
+ unsigned long i = 0;
+ unsigned long max_nr_pages = 0;
+ unsigned long memory_size = 0;
+ struct ne_mem_region *ne_mem_region = NULL;
+ unsigned long nr_phys_contig_mem_regions = 0;
+ struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
+ struct page **phys_contig_mem_regions = NULL;
+ int rc = -EINVAL;
+
+ rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region);
+ if (rc < 0)
+ return rc;
+
+ ne_mem_region = kzalloc(sizeof(*ne_mem_region), GFP_KERNEL);
+ if (!ne_mem_region)
+ return -ENOMEM;
+
+ max_nr_pages = mem_region.memory_size / NE_MIN_MEM_REGION_SIZE;
+
+ ne_mem_region->pages = kcalloc(max_nr_pages, sizeof(*ne_mem_region->pages),
+ GFP_KERNEL);
+ if (!ne_mem_region->pages) {
+ rc = -ENOMEM;
+
+ goto free_mem_region;
+ }
+
+ phys_contig_mem_regions = kcalloc(max_nr_pages, sizeof(*phys_contig_mem_regions),
+ GFP_KERNEL);
+ if (!phys_contig_mem_regions) {
+ rc = -ENOMEM;
+
+ goto free_mem_region;
+ }
+
+ do {
+ i = ne_mem_region->nr_pages;
+
+ if (i == max_nr_pages) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Reached max nr of pages in the pages data struct\n");
+
+ rc = -ENOMEM;
+
+ goto put_pages;
+ }
+
+ gup_rc = get_user_pages(mem_region.userspace_addr + memory_size, 1, FOLL_GET,
+ ne_mem_region->pages + i, NULL);
+ if (gup_rc < 0) {
+ rc = gup_rc;
+
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Error in get user pages [rc=%d]\n", rc);
+
+ goto put_pages;
+ }
+
+ rc = ne_sanity_check_user_mem_region_page(ne_enclave, ne_mem_region->pages[i]);
+ if (rc < 0)
+ goto put_pages;
+
+ /*
+ * TODO: Update once handled non-contiguous memory regions
+ * received from user space or contiguous physical memory regions
+ * larger than 2 MiB e.g. 8 MiB.
+ */
+ phys_contig_mem_regions[i] = ne_mem_region->pages[i];
+
+ memory_size += page_size(ne_mem_region->pages[i]);
+
+ ne_mem_region->nr_pages++;
+ } while (memory_size < mem_region.memory_size);
+
+ /*
+ * TODO: Update once handled non-contiguous memory regions received
+ * from user space or contiguous physical memory regions larger than
+ * 2 MiB e.g. 8 MiB.
+ */
+ nr_phys_contig_mem_regions = ne_mem_region->nr_pages;
+
+ if ((ne_enclave->nr_mem_regions + nr_phys_contig_mem_regions) >
+ ne_enclave->max_mem_regions) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Reached max memory regions %lld\n",
+ ne_enclave->max_mem_regions);
+
+ rc = -NE_ERR_MEM_MAX_REGIONS;
+
+ goto put_pages;
+ }
+
+ for (i = 0; i < nr_phys_contig_mem_regions; i++) {
+ u64 phys_region_addr = page_to_phys(phys_contig_mem_regions[i]);
+ u64 phys_region_size = page_size(phys_contig_mem_regions[i]);
+
+ if (phys_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Physical mem region size is not multiple of 2 MiB\n");
+
+ rc = -EINVAL;
+
+ goto put_pages;
+ }
+
+ if (!IS_ALIGNED(phys_region_addr, NE_MIN_MEM_REGION_SIZE)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Physical mem region address is not 2 MiB aligned\n");
+
+ rc = -EINVAL;
+
+ goto put_pages;
+ }
+ }
+
+ ne_mem_region->memory_size = mem_region.memory_size;
+ ne_mem_region->userspace_addr = mem_region.userspace_addr;
+
+ list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list);
+
+ for (i = 0; i < nr_phys_contig_mem_regions; i++) {
+ struct ne_pci_dev_cmd_reply cmd_reply = {};
+ struct slot_add_mem_req slot_add_mem_req = {};
+
+ slot_add_mem_req.slot_uid = ne_enclave->slot_uid;
+ slot_add_mem_req.paddr = page_to_phys(phys_contig_mem_regions[i]);
+ slot_add_mem_req.size = page_size(phys_contig_mem_regions[i]);
+
+ rc = ne_do_request(pdev, SLOT_ADD_MEM,
+ &slot_add_mem_req, sizeof(slot_add_mem_req),
+ &cmd_reply, sizeof(cmd_reply));
+ if (rc < 0) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Error in slot add mem [rc=%d]\n", rc);
+
+ kfree(phys_contig_mem_regions);
+
+ /*
+ * Exit here without put pages as memory regions may
+ * already been added.
+ */
+ return rc;
+ }
+
+ ne_enclave->mem_size += slot_add_mem_req.size;
+ ne_enclave->nr_mem_regions++;
+ }
+
+ kfree(phys_contig_mem_regions);
+
+ return 0;
+
+put_pages:
+ for (i = 0; i < ne_mem_region->nr_pages; i++)
+ put_page(ne_mem_region->pages[i]);
+free_mem_region:
+ kfree(phys_contig_mem_regions);
+ kfree(ne_mem_region->pages);
+ kfree(ne_mem_region);
+
+ return rc;
+}
+
+/**
+ * ne_start_enclave_ioctl() - Trigger enclave start after the enclave resources,
+ * such as memory and CPU, have been set.
+ * @ne_enclave : Private data associated with the current enclave.
+ * @enclave_start_info : Enclave info that includes enclave cid and flags.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_start_enclave_ioctl(struct ne_enclave *ne_enclave,
+ struct ne_enclave_start_info *enclave_start_info)
+{
+ struct ne_pci_dev_cmd_reply cmd_reply = {};
+ unsigned int cpu = 0;
+ struct enclave_start_req enclave_start_req = {};
+ unsigned int i = 0;
+ struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
+ int rc = -EINVAL;
+
+ if (!ne_enclave->nr_mem_regions) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Enclave has no mem regions\n");
+
+ return -NE_ERR_NO_MEM_REGIONS_ADDED;
+ }
+
+ if (ne_enclave->mem_size < NE_MIN_ENCLAVE_MEM_SIZE) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Enclave memory is less than %ld\n",
+ NE_MIN_ENCLAVE_MEM_SIZE);
+
+ return -NE_ERR_ENCLAVE_MEM_MIN_SIZE;
+ }
+
+ if (!ne_enclave->nr_vcpus) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Enclave has no vCPUs\n");
+
+ return -NE_ERR_NO_VCPUS_ADDED;
+ }
+
+ for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
+ for_each_cpu(cpu, ne_enclave->threads_per_core[i])
+ if (!cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Full CPU cores not used\n");
+
+ return -NE_ERR_FULL_CORES_NOT_USED;
+ }
+
+ enclave_start_req.enclave_cid = enclave_start_info->enclave_cid;
+ enclave_start_req.flags = enclave_start_info->flags;
+ enclave_start_req.slot_uid = ne_enclave->slot_uid;
+
+ rc = ne_do_request(pdev, ENCLAVE_START,
+ &enclave_start_req, sizeof(enclave_start_req),
+ &cmd_reply, sizeof(cmd_reply));
+ if (rc < 0) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Error in enclave start [rc=%d]\n", rc);
+
+ return rc;
+ }
+
+ ne_enclave->state = NE_STATE_RUNNING;
+
+ enclave_start_info->enclave_cid = cmd_reply.enclave_cid;
+
+ return 0;
+}
+
+/**
+ * ne_enclave_ioctl() - Ioctl function provided by the enclave file.
+ * @file: File associated with this ioctl function.
+ * @cmd: The command that is set for the ioctl call.
+ * @arg: The argument that is provided for the ioctl call.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static long ne_enclave_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ne_enclave *ne_enclave = file->private_data;
+
+ switch (cmd) {
+ case NE_ADD_VCPU: {
+ int rc = -EINVAL;
+ u32 vcpu_id = 0;
+
+ if (copy_from_user(&vcpu_id, (void __user *)arg, sizeof(vcpu_id)))
+ return -EFAULT;
+
+ mutex_lock(&ne_enclave->enclave_info_mutex);
+
+ if (ne_enclave->state != NE_STATE_INIT) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Enclave is not in init state\n");
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ return -NE_ERR_NOT_IN_INIT_STATE;
+ }
+
+ if (vcpu_id >= (ne_enclave->nr_parent_vm_cores *
+ ne_enclave->nr_threads_per_core)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "vCPU id higher than max CPU id\n");
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ return -NE_ERR_INVALID_VCPU;
+ }
+
+ if (!vcpu_id) {
+ /* Use the CPU pool for choosing a CPU for the enclave. */
+ rc = ne_get_cpu_from_cpu_pool(ne_enclave, &vcpu_id);
+ if (rc < 0) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Error in get CPU from pool [rc=%d]\n",
+ rc);
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ return rc;
+ }
+ } else {
+ /* Check if the provided vCPU is available in the NE CPU pool. */
+ rc = ne_check_cpu_in_cpu_pool(ne_enclave, vcpu_id);
+ if (rc < 0) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Error in check CPU %d in pool [rc=%d]\n",
+ vcpu_id, rc);
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ return rc;
+ }
+ }
+
+ rc = ne_add_vcpu_ioctl(ne_enclave, vcpu_id);
+ if (rc < 0) {
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ return rc;
+ }
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ if (copy_to_user((void __user *)arg, &vcpu_id, sizeof(vcpu_id)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ case NE_GET_IMAGE_LOAD_INFO: {
+ struct ne_image_load_info image_load_info = {};
+
+ if (copy_from_user(&image_load_info, (void __user *)arg, sizeof(image_load_info)))
+ return -EFAULT;
+
+ mutex_lock(&ne_enclave->enclave_info_mutex);
+
+ if (ne_enclave->state != NE_STATE_INIT) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Enclave is not in init state\n");
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ return -NE_ERR_NOT_IN_INIT_STATE;
+ }
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ if (!image_load_info.flags ||
+ image_load_info.flags >= NE_IMAGE_LOAD_MAX_FLAG_VAL) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Incorrect flag in enclave image load info\n");
+
+ return -NE_ERR_INVALID_FLAG_VALUE;
+ }
+
+ if (image_load_info.flags == NE_EIF_IMAGE)
+ image_load_info.memory_offset = NE_EIF_LOAD_OFFSET;
+
+ if (copy_to_user((void __user *)arg, &image_load_info, sizeof(image_load_info)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ case NE_SET_USER_MEMORY_REGION: {
+ struct ne_user_memory_region mem_region = {};
+ int rc = -EINVAL;
+
+ if (copy_from_user(&mem_region, (void __user *)arg, sizeof(mem_region)))
+ return -EFAULT;
+
+ if (mem_region.flags >= NE_MEMORY_REGION_MAX_FLAG_VAL) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Incorrect flag for user memory region\n");
+
+ return -NE_ERR_INVALID_FLAG_VALUE;
+ }
+
+ mutex_lock(&ne_enclave->enclave_info_mutex);
+
+ if (ne_enclave->state != NE_STATE_INIT) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Enclave is not in init state\n");
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ return -NE_ERR_NOT_IN_INIT_STATE;
+ }
+
+ rc = ne_set_user_memory_region_ioctl(ne_enclave, mem_region);
+ if (rc < 0) {
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ return rc;
+ }
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ return 0;
+ }
+
+ case NE_START_ENCLAVE: {
+ struct ne_enclave_start_info enclave_start_info = {};
+ int rc = -EINVAL;
+
+ if (copy_from_user(&enclave_start_info, (void __user *)arg,
+ sizeof(enclave_start_info)))
+ return -EFAULT;
+
+ if (enclave_start_info.flags >= NE_ENCLAVE_START_MAX_FLAG_VAL) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Incorrect flag in enclave start info\n");
+
+ return -NE_ERR_INVALID_FLAG_VALUE;
+ }
+
+ /*
+ * Do not use well-known CIDs - 0, 1, 2 - for enclaves.
+ * VMADDR_CID_ANY = -1U
+ * VMADDR_CID_HYPERVISOR = 0
+ * VMADDR_CID_LOCAL = 1
+ * VMADDR_CID_HOST = 2
+ * Note: 0 is used as a placeholder to auto-generate an enclave CID.
+ * http://man7.org/linux/man-pages/man7/vsock.7.html
+ */
+ if (enclave_start_info.enclave_cid > 0 &&
+ enclave_start_info.enclave_cid <= VMADDR_CID_HOST) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Well-known CID value, not to be used for enclaves\n");
+
+ return -NE_ERR_INVALID_ENCLAVE_CID;
+ }
+
+ if (enclave_start_info.enclave_cid == U32_MAX) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Well-known CID value, not to be used for enclaves\n");
+
+ return -NE_ERR_INVALID_ENCLAVE_CID;
+ }
+
+ /*
+ * Do not use the CID of the primary / parent VM for enclaves.
+ */
+ if (enclave_start_info.enclave_cid == NE_PARENT_VM_CID) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "CID of the parent VM, not to be used for enclaves\n");
+
+ return -NE_ERR_INVALID_ENCLAVE_CID;
+ }
+
+ /* 64-bit CIDs are not yet supported for the vsock device. */
+ if (enclave_start_info.enclave_cid > U32_MAX) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "64-bit CIDs not yet supported for the vsock device\n");
+
+ return -NE_ERR_INVALID_ENCLAVE_CID;
+ }
+
+ mutex_lock(&ne_enclave->enclave_info_mutex);
+
+ if (ne_enclave->state != NE_STATE_INIT) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Enclave is not in init state\n");
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ return -NE_ERR_NOT_IN_INIT_STATE;
+ }
+
+ rc = ne_start_enclave_ioctl(ne_enclave, &enclave_start_info);
+ if (rc < 0) {
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ return rc;
+ }
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+
+ if (copy_to_user((void __user *)arg, &enclave_start_info,
+ sizeof(enclave_start_info)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+/**
+ * ne_enclave_remove_all_mem_region_entries() - Remove all memory region entries
+ * from the enclave data structure.
+ * @ne_enclave : Private data associated with the current enclave.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ */
+static void ne_enclave_remove_all_mem_region_entries(struct ne_enclave *ne_enclave)
+{
+ unsigned long i = 0;
+ struct ne_mem_region *ne_mem_region = NULL;
+ struct ne_mem_region *ne_mem_region_tmp = NULL;
+
+ list_for_each_entry_safe(ne_mem_region, ne_mem_region_tmp,
+ &ne_enclave->mem_regions_list,
+ mem_region_list_entry) {
+ list_del(&ne_mem_region->mem_region_list_entry);
+
+ for (i = 0; i < ne_mem_region->nr_pages; i++)
+ put_page(ne_mem_region->pages[i]);
+
+ kfree(ne_mem_region->pages);
+
+ kfree(ne_mem_region);
+ }
+}
+
+/**
+ * ne_enclave_remove_all_vcpu_id_entries() - Remove all vCPU id entries from
+ * the enclave data structure.
+ * @ne_enclave : Private data associated with the current enclave.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ */
+static void ne_enclave_remove_all_vcpu_id_entries(struct ne_enclave *ne_enclave)
+{
+ unsigned int cpu = 0;
+ unsigned int i = 0;
+
+ mutex_lock(&ne_cpu_pool.mutex);
+
+ for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) {
+ for_each_cpu(cpu, ne_enclave->threads_per_core[i])
+ /* Update the available NE CPU pool. */
+ cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]);
+
+ free_cpumask_var(ne_enclave->threads_per_core[i]);
+ }
+
+ mutex_unlock(&ne_cpu_pool.mutex);
+
+ kfree(ne_enclave->threads_per_core);
+
+ free_cpumask_var(ne_enclave->vcpu_ids);
+}
+
+/**
+ * ne_pci_dev_remove_enclave_entry() - Remove the enclave entry from the data
+ * structure that is part of the NE PCI
+ * device private data.
+ * @ne_enclave : Private data associated with the current enclave.
+ * @ne_pci_dev : Private data associated with the PCI device.
+ *
+ * Context: Process context. This function is called with the ne_pci_dev enclave
+ * mutex held.
+ */
+static void ne_pci_dev_remove_enclave_entry(struct ne_enclave *ne_enclave,
+ struct ne_pci_dev *ne_pci_dev)
+{
+ struct ne_enclave *ne_enclave_entry = NULL;
+ struct ne_enclave *ne_enclave_entry_tmp = NULL;
+
+ list_for_each_entry_safe(ne_enclave_entry, ne_enclave_entry_tmp,
+ &ne_pci_dev->enclaves_list, enclave_list_entry) {
+ if (ne_enclave_entry->slot_uid == ne_enclave->slot_uid) {
+ list_del(&ne_enclave_entry->enclave_list_entry);
+
+ break;
+ }
+ }
+}
+
+/**
+ * ne_enclave_release() - Release function provided by the enclave file.
+ * @inode: Inode associated with this file release function.
+ * @file: File associated with this release function.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_enclave_release(struct inode *inode, struct file *file)
+{
+ struct ne_pci_dev_cmd_reply cmd_reply = {};
+ struct enclave_stop_req enclave_stop_request = {};
+ struct ne_enclave *ne_enclave = file->private_data;
+ struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
+ struct pci_dev *pdev = ne_pci_dev->pdev;
+ int rc = -EINVAL;
+ struct slot_free_req slot_free_req = {};
+
+ if (!ne_enclave)
+ return 0;
+
+ /*
+ * Early exit in case there is an error in the enclave creation logic
+ * and fput() is called on the cleanup path.
+ */
+ if (!ne_enclave->slot_uid)
+ return 0;
+
+ /*
+ * Acquire the enclave list mutex before the enclave mutex
+ * in order to avoid deadlocks with @ref ne_event_work_handler.
+ */
+ mutex_lock(&ne_pci_dev->enclaves_list_mutex);
+ mutex_lock(&ne_enclave->enclave_info_mutex);
+
+ if (ne_enclave->state != NE_STATE_INIT && ne_enclave->state != NE_STATE_STOPPED) {
+ enclave_stop_request.slot_uid = ne_enclave->slot_uid;
+
+ rc = ne_do_request(pdev, ENCLAVE_STOP,
+ &enclave_stop_request, sizeof(enclave_stop_request),
+ &cmd_reply, sizeof(cmd_reply));
+ if (rc < 0) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Error in enclave stop [rc=%d]\n", rc);
+
+ goto unlock_mutex;
+ }
+
+ memset(&cmd_reply, 0, sizeof(cmd_reply));
+ }
+
+ slot_free_req.slot_uid = ne_enclave->slot_uid;
+
+ rc = ne_do_request(pdev, SLOT_FREE,
+ &slot_free_req, sizeof(slot_free_req),
+ &cmd_reply, sizeof(cmd_reply));
+ if (rc < 0) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Error in slot free [rc=%d]\n", rc);
+
+ goto unlock_mutex;
+ }
+
+ ne_pci_dev_remove_enclave_entry(ne_enclave, ne_pci_dev);
+ ne_enclave_remove_all_mem_region_entries(ne_enclave);
+ ne_enclave_remove_all_vcpu_id_entries(ne_enclave);
+
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+ mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
+
+ kfree(ne_enclave);
+
+ return 0;
+
+unlock_mutex:
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+ mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
+
+ return rc;
+}
+
+/**
+ * ne_enclave_poll() - Poll functionality used for enclave out-of-band events.
+ * @file: File associated with this poll function.
+ * @wait: Poll table data structure.
+ *
+ * Context: Process context.
+ * Return:
+ * * Poll mask.
+ */
+static __poll_t ne_enclave_poll(struct file *file, poll_table *wait)
+{
+ __poll_t mask = 0;
+ struct ne_enclave *ne_enclave = file->private_data;
+
+ poll_wait(file, &ne_enclave->eventq, wait);
+
+ if (ne_enclave->has_event)
+ mask |= EPOLLHUP;
+
+ return mask;
+}
+
+static const struct file_operations ne_enclave_fops = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .poll = ne_enclave_poll,
+ .unlocked_ioctl = ne_enclave_ioctl,
+ .release = ne_enclave_release,
+};
+
+/**
+ * ne_create_vm_ioctl() - Alloc slot to be associated with an enclave. Create
+ * enclave file descriptor to be further used for enclave
+ * resources handling e.g. memory regions and CPUs.
+ * @ne_pci_dev : Private data associated with the PCI device.
+ * @slot_uid: Generated unique slot id associated with an enclave.
+ *
+ * Context: Process context. This function is called with the ne_pci_dev enclave
+ * mutex held.
+ * Return:
+ * * Enclave fd on success.
+ * * Negative return value on failure.
+ */
+static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
+{
+ struct ne_pci_dev_cmd_reply cmd_reply = {};
+ int enclave_fd = -1;
+ struct file *enclave_file = NULL;
+ unsigned int i = 0;
+ struct ne_enclave *ne_enclave = NULL;
+ struct pci_dev *pdev = ne_pci_dev->pdev;
+ int rc = -EINVAL;
+ struct slot_alloc_req slot_alloc_req = {};
+
+ mutex_lock(&ne_cpu_pool.mutex);
+
+ for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
+ if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i]))
+ break;
+
+ if (i == ne_cpu_pool.nr_parent_vm_cores) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "No CPUs available in CPU pool\n");
+
+ mutex_unlock(&ne_cpu_pool.mutex);
+
+ return -NE_ERR_NO_CPUS_AVAIL_IN_POOL;
+ }
+
+ mutex_unlock(&ne_cpu_pool.mutex);
+
+ ne_enclave = kzalloc(sizeof(*ne_enclave), GFP_KERNEL);
+ if (!ne_enclave)
+ return -ENOMEM;
+
+ mutex_lock(&ne_cpu_pool.mutex);
+
+ ne_enclave->nr_parent_vm_cores = ne_cpu_pool.nr_parent_vm_cores;
+ ne_enclave->nr_threads_per_core = ne_cpu_pool.nr_threads_per_core;
+ ne_enclave->numa_node = ne_cpu_pool.numa_node;
+
+ mutex_unlock(&ne_cpu_pool.mutex);
+
+ ne_enclave->threads_per_core = kcalloc(ne_enclave->nr_parent_vm_cores,
+ sizeof(*ne_enclave->threads_per_core), GFP_KERNEL);
+ if (!ne_enclave->threads_per_core) {
+ rc = -ENOMEM;
+
+ goto free_ne_enclave;
+ }
+
+ for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
+ if (!zalloc_cpumask_var(&ne_enclave->threads_per_core[i], GFP_KERNEL)) {
+ rc = -ENOMEM;
+
+ goto free_cpumask;
+ }
+
+ if (!zalloc_cpumask_var(&ne_enclave->vcpu_ids, GFP_KERNEL)) {
+ rc = -ENOMEM;
+
+ goto free_cpumask;
+ }
+
+ enclave_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (enclave_fd < 0) {
+ rc = enclave_fd;
+
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Error in getting unused fd [rc=%d]\n", rc);
+
+ goto free_cpumask;
+ }
+
+ enclave_file = anon_inode_getfile("ne-vm", &ne_enclave_fops, ne_enclave, O_RDWR);
+ if (IS_ERR(enclave_file)) {
+ rc = PTR_ERR(enclave_file);
+
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Error in anon inode get file [rc=%d]\n", rc);
+
+ goto put_fd;
+ }
+
+ rc = ne_do_request(pdev, SLOT_ALLOC,
+ &slot_alloc_req, sizeof(slot_alloc_req),
+ &cmd_reply, sizeof(cmd_reply));
+ if (rc < 0) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Error in slot alloc [rc=%d]\n", rc);
+
+ goto put_file;
+ }
+
+ init_waitqueue_head(&ne_enclave->eventq);
+ ne_enclave->has_event = false;
+ mutex_init(&ne_enclave->enclave_info_mutex);
+ ne_enclave->max_mem_regions = cmd_reply.mem_regions;
+ INIT_LIST_HEAD(&ne_enclave->mem_regions_list);
+ ne_enclave->mm = current->mm;
+ ne_enclave->slot_uid = cmd_reply.slot_uid;
+ ne_enclave->state = NE_STATE_INIT;
+
+ list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
+
+ *slot_uid = ne_enclave->slot_uid;
+
+ fd_install(enclave_fd, enclave_file);
+
+ return enclave_fd;
+
+put_file:
+ fput(enclave_file);
+put_fd:
+ put_unused_fd(enclave_fd);
+free_cpumask:
+ free_cpumask_var(ne_enclave->vcpu_ids);
+ for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
+ free_cpumask_var(ne_enclave->threads_per_core[i]);
+ kfree(ne_enclave->threads_per_core);
+free_ne_enclave:
+ kfree(ne_enclave);
+
+ return rc;
+}
+
+/**
+ * ne_ioctl() - Ioctl function provided by the NE misc device.
+ * @file: File associated with this ioctl function.
+ * @cmd: The command that is set for the ioctl call.
+ * @arg: The argument that is provided for the ioctl call.
+ *
+ * Context: Process context.
+ * Return:
+ * * Ioctl result (e.g. enclave file descriptor) on success.
+ * * Negative return value on failure.
+ */
+static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case NE_CREATE_VM: {
+ int enclave_fd = -1;
+ struct file *enclave_file = NULL;
+ struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
+ int rc = -EINVAL;
+ u64 slot_uid = 0;
+
+ mutex_lock(&ne_pci_dev->enclaves_list_mutex);
+
+ enclave_fd = ne_create_vm_ioctl(ne_pci_dev, &slot_uid);
+ if (enclave_fd < 0) {
+ rc = enclave_fd;
+
+ mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
+
+ return rc;
+ }
+
+ mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
+
+ if (copy_to_user((void __user *)arg, &slot_uid, sizeof(slot_uid))) {
+ enclave_file = fget(enclave_fd);
+ /* Decrement file refs to have release() called. */
+ fput(enclave_file);
+ fput(enclave_file);
+ put_unused_fd(enclave_fd);
+
+ return -EFAULT;
+ }
+
+ return enclave_fd;
+ }
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static int __init ne_init(void)
+{
+ mutex_init(&ne_cpu_pool.mutex);
+
+ return pci_register_driver(&ne_pci_driver);
+}
+
+static void __exit ne_exit(void)
+{
+ pci_unregister_driver(&ne_pci_driver);
+
+ ne_teardown_cpu_pool();
+}
+
+module_init(ne_init);
+module_exit(ne_exit);
+
+MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
+MODULE_DESCRIPTION("Nitro Enclaves Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.h b/drivers/virt/nitro_enclaves/ne_misc_dev.h
new file mode 100644
index 000000000000..2a4d2224baba
--- /dev/null
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#ifndef _NE_MISC_DEV_H_
+#define _NE_MISC_DEV_H_
+
+#include <linux/cpumask.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+
+#include "ne_pci_dev.h"
+
+/**
+ * struct ne_mem_region - Entry in the enclave user space memory regions list.
+ * @mem_region_list_entry: Entry in the list of enclave memory regions.
+ * @memory_size: Size of the user space memory region.
+ * @nr_pages: Number of pages that make up the memory region.
+ * @pages: Pages that make up the user space memory region.
+ * @userspace_addr: User space address of the memory region.
+ */
+struct ne_mem_region {
+ struct list_head mem_region_list_entry;
+ u64 memory_size;
+ unsigned long nr_pages;
+ struct page **pages;
+ u64 userspace_addr;
+};
+
+/**
+ * struct ne_enclave - Per-enclave data used for enclave lifetime management.
+ * @enclave_info_mutex : Mutex for accessing this internal state.
+ * @enclave_list_entry : Entry in the list of created enclaves.
+ * @eventq: Wait queue used for out-of-band event notifications
+ * triggered from the PCI device event handler to
+ * the enclave process via the poll function.
+ * @has_event: Variable used to determine if the out-of-band event
+ * was triggered.
+ * @max_mem_regions: The maximum number of memory regions that can be
+ * handled by the hypervisor.
+ * @mem_regions_list: Enclave user space memory regions list.
+ * @mem_size: Enclave memory size.
+ * @mm : Enclave process abstraction mm data struct.
+ * @nr_mem_regions: Number of memory regions associated with the enclave.
+ * @nr_parent_vm_cores : The size of the threads per core array. The
+ * total number of CPU cores available on the
+ * parent / primary VM.
+ * @nr_threads_per_core: The number of threads that a full CPU core has.
+ * @nr_vcpus: Number of vcpus associated with the enclave.
+ * @numa_node: NUMA node of the enclave memory and CPUs.
+ * @slot_uid: Slot unique id mapped to the enclave.
+ * @state: Enclave state, updated during enclave lifetime.
+ * @threads_per_core: Enclave full CPU cores array, indexed by core id,
+ * consisting of cpumasks with all their threads.
+ * Full CPU cores are taken from the NE CPU pool
+ * and are available to the enclave.
+ * @vcpu_ids: Cpumask of the vCPUs that are set for the enclave.
+ */
+struct ne_enclave {
+ struct mutex enclave_info_mutex;
+ struct list_head enclave_list_entry;
+ wait_queue_head_t eventq;
+ bool has_event;
+ u64 max_mem_regions;
+ struct list_head mem_regions_list;
+ u64 mem_size;
+ struct mm_struct *mm;
+ unsigned int nr_mem_regions;
+ unsigned int nr_parent_vm_cores;
+ unsigned int nr_threads_per_core;
+ unsigned int nr_vcpus;
+ int numa_node;
+ u64 slot_uid;
+ u16 state;
+ cpumask_var_t *threads_per_core;
+ cpumask_var_t vcpu_ids;
+};
+
+/**
+ * enum ne_state - States available for an enclave.
+ * @NE_STATE_INIT: The enclave has not been started yet.
+ * @NE_STATE_RUNNING: The enclave was started and is running as expected.
+ * @NE_STATE_STOPPED: The enclave exited without userspace interaction.
+ */
+enum ne_state {
+ NE_STATE_INIT = 0,
+ NE_STATE_RUNNING = 2,
+ NE_STATE_STOPPED = U16_MAX,
+};
+
+/**
+ * struct ne_devs - Data structure to keep refs to the NE misc and PCI devices.
+ * @ne_misc_dev: Nitro Enclaves misc device.
+ * @ne_pci_dev : Nitro Enclaves PCI device.
+ */
+struct ne_devs {
+ struct miscdevice *ne_misc_dev;
+ struct ne_pci_dev *ne_pci_dev;
+};
+
+/* Nitro Enclaves (NE) data structure for keeping refs to the NE misc and PCI devices. */
+extern struct ne_devs ne_devs;
+
+#endif /* _NE_MISC_DEV_H_ */
diff --git a/drivers/virt/nitro_enclaves/ne_pci_dev.c b/drivers/virt/nitro_enclaves/ne_pci_dev.c
new file mode 100644
index 000000000000..b9c1de41e300
--- /dev/null
+++ b/drivers/virt/nitro_enclaves/ne_pci_dev.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+/**
+ * DOC: Nitro Enclaves (NE) PCI device driver.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nitro_enclaves.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "ne_misc_dev.h"
+#include "ne_pci_dev.h"
+
+/**
+ * NE_DEFAULT_TIMEOUT_MSECS - Default timeout to wait for a reply from
+ * the NE PCI device.
+ */
+#define NE_DEFAULT_TIMEOUT_MSECS (120000) /* 120 sec */
+
+static const struct pci_device_id ne_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_NE) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, ne_pci_ids);
+
+/**
+ * ne_submit_request() - Submit command request to the PCI device based on the
+ * command type.
+ * @pdev: PCI device to send the command to.
+ * @cmd_type: Command type of the request sent to the PCI device.
+ * @cmd_request: Command request payload.
+ * @cmd_request_size: Size of the command request payload.
+ *
+ * Context: Process context. This function is called with the ne_pci_dev mutex held.
+ */
+static void ne_submit_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type,
+ void *cmd_request, size_t cmd_request_size)
+{
+ struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
+
+ memcpy_toio(ne_pci_dev->iomem_base + NE_SEND_DATA, cmd_request, cmd_request_size);
+
+ iowrite32(cmd_type, ne_pci_dev->iomem_base + NE_COMMAND);
+}
+
+/**
+ * ne_retrieve_reply() - Retrieve reply from the PCI device.
+ * @pdev: PCI device to receive the reply from.
+ * @cmd_reply: Command reply payload.
+ * @cmd_reply_size: Size of the command reply payload.
+ *
+ * Context: Process context. This function is called with the ne_pci_dev mutex held.
+ */
+static void ne_retrieve_reply(struct pci_dev *pdev, struct ne_pci_dev_cmd_reply *cmd_reply,
+ size_t cmd_reply_size)
+{
+ struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
+
+ memcpy_fromio(cmd_reply, ne_pci_dev->iomem_base + NE_RECV_DATA, cmd_reply_size);
+}
+
+/**
+ * ne_wait_for_reply() - Wait for a reply of a PCI device command.
+ * @pdev: PCI device for which a reply is waited.
+ *
+ * Context: Process context. This function is called with the ne_pci_dev mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_wait_for_reply(struct pci_dev *pdev)
+{
+ struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
+ int rc = -EINVAL;
+
+ /*
+ * TODO: Update to _interruptible and handle interrupted wait event
+ * e.g. -ERESTARTSYS, incoming signals + update timeout, if needed.
+ */
+ rc = wait_event_timeout(ne_pci_dev->cmd_reply_wait_q,
+ atomic_read(&ne_pci_dev->cmd_reply_avail) != 0,
+ msecs_to_jiffies(NE_DEFAULT_TIMEOUT_MSECS));
+ if (!rc)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ne_do_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type,
+ void *cmd_request, size_t cmd_request_size,
+ struct ne_pci_dev_cmd_reply *cmd_reply, size_t cmd_reply_size)
+{
+ struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
+ int rc = -EINVAL;
+
+ if (cmd_type <= INVALID_CMD || cmd_type >= MAX_CMD) {
+ dev_err_ratelimited(&pdev->dev, "Invalid cmd type=%u\n", cmd_type);
+
+ return -EINVAL;
+ }
+
+ if (!cmd_request) {
+ dev_err_ratelimited(&pdev->dev, "Null cmd request for cmd type=%u\n",
+ cmd_type);
+
+ return -EINVAL;
+ }
+
+ if (cmd_request_size > NE_SEND_DATA_SIZE) {
+ dev_err_ratelimited(&pdev->dev, "Invalid req size=%zu for cmd type=%u\n",
+ cmd_request_size, cmd_type);
+
+ return -EINVAL;
+ }
+
+ if (!cmd_reply) {
+ dev_err_ratelimited(&pdev->dev, "Null cmd reply for cmd type=%u\n",
+ cmd_type);
+
+ return -EINVAL;
+ }
+
+ if (cmd_reply_size > NE_RECV_DATA_SIZE) {
+ dev_err_ratelimited(&pdev->dev, "Invalid reply size=%zu for cmd type=%u\n",
+ cmd_reply_size, cmd_type);
+
+ return -EINVAL;
+ }
+
+ /*
+ * Use this mutex so that the PCI device handles one command request at
+ * a time.
+ */
+ mutex_lock(&ne_pci_dev->pci_dev_mutex);
+
+ atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
+
+ ne_submit_request(pdev, cmd_type, cmd_request, cmd_request_size);
+
+ rc = ne_wait_for_reply(pdev);
+ if (rc < 0) {
+ dev_err_ratelimited(&pdev->dev, "Error in wait for reply for cmd type=%u [rc=%d]\n",
+ cmd_type, rc);
+
+ goto unlock_mutex;
+ }
+
+ ne_retrieve_reply(pdev, cmd_reply, cmd_reply_size);
+
+ atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
+
+ if (cmd_reply->rc < 0) {
+ rc = cmd_reply->rc;
+
+ dev_err_ratelimited(&pdev->dev, "Error in cmd process logic, cmd type=%u [rc=%d]\n",
+ cmd_type, rc);
+
+ goto unlock_mutex;
+ }
+
+ rc = 0;
+
+unlock_mutex:
+ mutex_unlock(&ne_pci_dev->pci_dev_mutex);
+
+ return rc;
+}
+
+/**
+ * ne_reply_handler() - Interrupt handler for retrieving a reply matching a
+ * request sent to the PCI device for enclave lifetime
+ * management.
+ * @irq: Received interrupt for a reply sent by the PCI device.
+ * @args: PCI device private data structure.
+ *
+ * Context: Interrupt context.
+ * Return:
+ * * IRQ_HANDLED on handled interrupt.
+ */
+static irqreturn_t ne_reply_handler(int irq, void *args)
+{
+ struct ne_pci_dev *ne_pci_dev = (struct ne_pci_dev *)args;
+
+ atomic_set(&ne_pci_dev->cmd_reply_avail, 1);
+
+ /* TODO: Update to _interruptible. */
+ wake_up(&ne_pci_dev->cmd_reply_wait_q);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ne_event_work_handler() - Work queue handler for notifying enclaves on a
+ * state change received by the event interrupt
+ * handler.
+ * @work: Item containing the NE PCI device for which an out-of-band event
+ * was issued.
+ *
+ * An out-of-band event is being issued by the Nitro Hypervisor when at least
+ * one enclave is changing state without client interaction.
+ *
+ * Context: Work queue context.
+ */
+static void ne_event_work_handler(struct work_struct *work)
+{
+ struct ne_pci_dev_cmd_reply cmd_reply = {};
+ struct ne_enclave *ne_enclave = NULL;
+ struct ne_pci_dev *ne_pci_dev =
+ container_of(work, struct ne_pci_dev, notify_work);
+ struct pci_dev *pdev = ne_pci_dev->pdev;
+ int rc = -EINVAL;
+ struct slot_info_req slot_info_req = {};
+
+ mutex_lock(&ne_pci_dev->enclaves_list_mutex);
+
+ /*
+ * Iterate over all enclaves registered for the Nitro Enclaves
+ * PCI device and determine for which enclave(s) the out-of-band event
+ * is corresponding to.
+ */
+ list_for_each_entry(ne_enclave, &ne_pci_dev->enclaves_list, enclave_list_entry) {
+ mutex_lock(&ne_enclave->enclave_info_mutex);
+
+ /*
+ * Enclaves that were never started cannot receive out-of-band
+ * events.
+ */
+ if (ne_enclave->state != NE_STATE_RUNNING)
+ goto unlock;
+
+ slot_info_req.slot_uid = ne_enclave->slot_uid;
+
+ rc = ne_do_request(pdev, SLOT_INFO,
+ &slot_info_req, sizeof(slot_info_req),
+ &cmd_reply, sizeof(cmd_reply));
+ if (rc < 0)
+ dev_err(&pdev->dev, "Error in slot info [rc=%d]\n", rc);
+
+ /* Notify enclave process that the enclave state changed. */
+ if (ne_enclave->state != cmd_reply.state) {
+ ne_enclave->state = cmd_reply.state;
+
+ ne_enclave->has_event = true;
+
+ wake_up_interruptible(&ne_enclave->eventq);
+ }
+
+unlock:
+ mutex_unlock(&ne_enclave->enclave_info_mutex);
+ }
+
+ mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
+}
+
+/**
+ * ne_event_handler() - Interrupt handler for PCI device out-of-band events.
+ * This interrupt does not supply any data in the MMIO
+ * region. It notifies a change in the state of any of
+ * the launched enclaves.
+ * @irq: Received interrupt for an out-of-band event.
+ * @args: PCI device private data structure.
+ *
+ * Context: Interrupt context.
+ * Return:
+ * * IRQ_HANDLED on handled interrupt.
+ */
+static irqreturn_t ne_event_handler(int irq, void *args)
+{
+ struct ne_pci_dev *ne_pci_dev = (struct ne_pci_dev *)args;
+
+ queue_work(ne_pci_dev->event_wq, &ne_pci_dev->notify_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ne_setup_msix() - Setup MSI-X vectors for the PCI device.
+ * @pdev: PCI device to setup the MSI-X for.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_setup_msix(struct pci_dev *pdev)
+{
+ struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
+ int nr_vecs = 0;
+ int rc = -EINVAL;
+
+ nr_vecs = pci_msix_vec_count(pdev);
+ if (nr_vecs < 0) {
+ rc = nr_vecs;
+
+ dev_err(&pdev->dev, "Error in getting vec count [rc=%d]\n", rc);
+
+ return rc;
+ }
+
+ rc = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error in alloc MSI-X vecs [rc=%d]\n", rc);
+
+ return rc;
+ }
+
+ /*
+ * This IRQ gets triggered every time the PCI device responds to a
+ * command request. The reply is then retrieved, reading from the MMIO
+ * space of the PCI device.
+ */
+ rc = request_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_reply_handler,
+ 0, "enclave_cmd", ne_pci_dev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error in request irq reply [rc=%d]\n", rc);
+
+ goto free_irq_vectors;
+ }
+
+ ne_pci_dev->event_wq = create_singlethread_workqueue("ne_pci_dev_wq");
+ if (!ne_pci_dev->event_wq) {
+ rc = -ENOMEM;
+
+ dev_err(&pdev->dev, "Cannot get wq for dev events [rc=%d]\n", rc);
+
+ goto free_reply_irq_vec;
+ }
+
+ INIT_WORK(&ne_pci_dev->notify_work, ne_event_work_handler);
+
+ /*
+ * This IRQ gets triggered every time any enclave's state changes. Its
+ * handler then scans for the changes and propagates them to the user
+ * space.
+ */
+ rc = request_irq(pci_irq_vector(pdev, NE_VEC_EVENT), ne_event_handler,
+ 0, "enclave_evt", ne_pci_dev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error in request irq event [rc=%d]\n", rc);
+
+ goto destroy_wq;
+ }
+
+ return 0;
+
+destroy_wq:
+ destroy_workqueue(ne_pci_dev->event_wq);
+free_reply_irq_vec:
+ free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev);
+free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+
+ return rc;
+}
+
+/**
+ * ne_teardown_msix() - Teardown MSI-X vectors for the PCI device.
+ * @pdev: PCI device to teardown the MSI-X for.
+ *
+ * Context: Process context.
+ */
+static void ne_teardown_msix(struct pci_dev *pdev)
+{
+ struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
+
+ free_irq(pci_irq_vector(pdev, NE_VEC_EVENT), ne_pci_dev);
+
+ flush_work(&ne_pci_dev->notify_work);
+ flush_workqueue(ne_pci_dev->event_wq);
+ destroy_workqueue(ne_pci_dev->event_wq);
+
+ free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev);
+
+ pci_free_irq_vectors(pdev);
+}
+
+/**
+ * ne_pci_dev_enable() - Select the PCI device version and enable it.
+ * @pdev: PCI device to select version for and then enable.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_pci_dev_enable(struct pci_dev *pdev)
+{
+ u8 dev_enable_reply = 0;
+ u16 dev_version_reply = 0;
+ struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
+
+ iowrite16(NE_VERSION_MAX, ne_pci_dev->iomem_base + NE_VERSION);
+
+ dev_version_reply = ioread16(ne_pci_dev->iomem_base + NE_VERSION);
+ if (dev_version_reply != NE_VERSION_MAX) {
+ dev_err(&pdev->dev, "Error in pci dev version cmd\n");
+
+ return -EIO;
+ }
+
+ iowrite8(NE_ENABLE_ON, ne_pci_dev->iomem_base + NE_ENABLE);
+
+ dev_enable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE);
+ if (dev_enable_reply != NE_ENABLE_ON) {
+ dev_err(&pdev->dev, "Error in pci dev enable cmd\n");
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ne_pci_dev_disable() - Disable the PCI device.
+ * @pdev: PCI device to disable.
+ *
+ * Context: Process context.
+ */
+static void ne_pci_dev_disable(struct pci_dev *pdev)
+{
+ u8 dev_disable_reply = 0;
+ struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
+ const unsigned int sleep_time = 10; /* 10 ms */
+ unsigned int sleep_time_count = 0;
+
+ iowrite8(NE_ENABLE_OFF, ne_pci_dev->iomem_base + NE_ENABLE);
+
+ /*
+ * Check for NE_ENABLE_OFF in a loop, to handle cases when the device
+ * state is not immediately set to disabled and going through a
+ * transitory state of disabling.
+ */
+ while (sleep_time_count < NE_DEFAULT_TIMEOUT_MSECS) {
+ dev_disable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE);
+ if (dev_disable_reply == NE_ENABLE_OFF)
+ return;
+
+ msleep_interruptible(sleep_time);
+ sleep_time_count += sleep_time;
+ }
+
+ dev_disable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE);
+ if (dev_disable_reply != NE_ENABLE_OFF)
+ dev_err(&pdev->dev, "Error in pci dev disable cmd\n");
+}
+
+/**
+ * ne_pci_probe() - Probe function for the NE PCI device.
+ * @pdev: PCI device to match with the NE PCI driver.
+ * @id : PCI device id table associated with the NE PCI driver.
+ *
+ * Context: Process context.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ne_pci_dev *ne_pci_dev = NULL;
+ int rc = -EINVAL;
+
+ ne_pci_dev = kzalloc(sizeof(*ne_pci_dev), GFP_KERNEL);
+ if (!ne_pci_dev)
+ return -ENOMEM;
+
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error in pci dev enable [rc=%d]\n", rc);
+
+ goto free_ne_pci_dev;
+ }
+
+ rc = pci_request_regions_exclusive(pdev, "nitro_enclaves");
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error in pci request regions [rc=%d]\n", rc);
+
+ goto disable_pci_dev;
+ }
+
+ ne_pci_dev->iomem_base = pci_iomap(pdev, PCI_BAR_NE, 0);
+ if (!ne_pci_dev->iomem_base) {
+ rc = -ENOMEM;
+
+ dev_err(&pdev->dev, "Error in pci iomap [rc=%d]\n", rc);
+
+ goto release_pci_regions;
+ }
+
+ pci_set_drvdata(pdev, ne_pci_dev);
+
+ rc = ne_setup_msix(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error in pci dev msix setup [rc=%d]\n", rc);
+
+ goto iounmap_pci_bar;
+ }
+
+ ne_pci_dev_disable(pdev);
+
+ rc = ne_pci_dev_enable(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error in ne_pci_dev enable [rc=%d]\n", rc);
+
+ goto teardown_msix;
+ }
+
+ atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
+ init_waitqueue_head(&ne_pci_dev->cmd_reply_wait_q);
+ INIT_LIST_HEAD(&ne_pci_dev->enclaves_list);
+ mutex_init(&ne_pci_dev->enclaves_list_mutex);
+ mutex_init(&ne_pci_dev->pci_dev_mutex);
+ ne_pci_dev->pdev = pdev;
+
+ ne_devs.ne_pci_dev = ne_pci_dev;
+
+ rc = misc_register(ne_devs.ne_misc_dev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error in misc dev register [rc=%d]\n", rc);
+
+ goto disable_ne_pci_dev;
+ }
+
+ return 0;
+
+disable_ne_pci_dev:
+ ne_devs.ne_pci_dev = NULL;
+ ne_pci_dev_disable(pdev);
+teardown_msix:
+ ne_teardown_msix(pdev);
+iounmap_pci_bar:
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, ne_pci_dev->iomem_base);
+release_pci_regions:
+ pci_release_regions(pdev);
+disable_pci_dev:
+ pci_disable_device(pdev);
+free_ne_pci_dev:
+ kfree(ne_pci_dev);
+
+ return rc;
+}
+
+/**
+ * ne_pci_remove() - Remove function for the NE PCI device.
+ * @pdev: PCI device associated with the NE PCI driver.
+ *
+ * Context: Process context.
+ */
+static void ne_pci_remove(struct pci_dev *pdev)
+{
+ struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
+
+ misc_deregister(ne_devs.ne_misc_dev);
+
+ ne_devs.ne_pci_dev = NULL;
+
+ ne_pci_dev_disable(pdev);
+
+ ne_teardown_msix(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ pci_iounmap(pdev, ne_pci_dev->iomem_base);
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+
+ kfree(ne_pci_dev);
+}
+
+/**
+ * ne_pci_shutdown() - Shutdown function for the NE PCI device.
+ * @pdev: PCI device associated with the NE PCI driver.
+ *
+ * Context: Process context.
+ */
+static void ne_pci_shutdown(struct pci_dev *pdev)
+{
+ struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
+
+ if (!ne_pci_dev)
+ return;
+
+ misc_deregister(ne_devs.ne_misc_dev);
+
+ ne_devs.ne_pci_dev = NULL;
+
+ ne_pci_dev_disable(pdev);
+
+ ne_teardown_msix(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ pci_iounmap(pdev, ne_pci_dev->iomem_base);
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+
+ kfree(ne_pci_dev);
+}
+
+/*
+ * TODO: Add suspend / resume functions for power management w/ CONFIG_PM, if
+ * needed.
+ */
+/* NE PCI device driver. */
+struct pci_driver ne_pci_driver = {
+ .name = "nitro_enclaves",
+ .id_table = ne_pci_ids,
+ .probe = ne_pci_probe,
+ .remove = ne_pci_remove,
+ .shutdown = ne_pci_shutdown,
+};
diff --git a/drivers/virt/nitro_enclaves/ne_pci_dev.h b/drivers/virt/nitro_enclaves/ne_pci_dev.h
new file mode 100644
index 000000000000..8bfbc6607818
--- /dev/null
+++ b/drivers/virt/nitro_enclaves/ne_pci_dev.h
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#ifndef _NE_PCI_DEV_H_
+#define _NE_PCI_DEV_H_
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/wait.h>
+
+/**
+ * DOC: Nitro Enclaves (NE) PCI device
+ */
+
+/**
+ * PCI_DEVICE_ID_NE - Nitro Enclaves PCI device id.
+ */
+#define PCI_DEVICE_ID_NE (0xe4c1)
+/**
+ * PCI_BAR_NE - Nitro Enclaves PCI device MMIO BAR.
+ */
+#define PCI_BAR_NE (0x03)
+
+/**
+ * DOC: Device registers in the NE PCI device MMIO BAR
+ */
+
+/**
+ * NE_ENABLE - (1 byte) Register to notify the device that the driver is using
+ * it (Read/Write).
+ */
+#define NE_ENABLE (0x0000)
+#define NE_ENABLE_OFF (0x00)
+#define NE_ENABLE_ON (0x01)
+
+/**
+ * NE_VERSION - (2 bytes) Register to select the device run-time version
+ * (Read/Write).
+ */
+#define NE_VERSION (0x0002)
+#define NE_VERSION_MAX (0x0001)
+
+/**
+ * NE_COMMAND - (4 bytes) Register to notify the device what command was
+ * requested (Write-Only).
+ */
+#define NE_COMMAND (0x0004)
+
+/**
+ * NE_EVTCNT - (4 bytes) Register to notify the driver that a reply or a device
+ * event is available (Read-Only):
+ * - Lower half - command reply counter
+ * - Higher half - out-of-band device event counter
+ */
+#define NE_EVTCNT (0x000c)
+#define NE_EVTCNT_REPLY_SHIFT (0)
+#define NE_EVTCNT_REPLY_MASK (0x0000ffff)
+#define NE_EVTCNT_REPLY(cnt) (((cnt) & NE_EVTCNT_REPLY_MASK) >> \
+ NE_EVTCNT_REPLY_SHIFT)
+#define NE_EVTCNT_EVENT_SHIFT (16)
+#define NE_EVTCNT_EVENT_MASK (0xffff0000)
+#define NE_EVTCNT_EVENT(cnt) (((cnt) & NE_EVTCNT_EVENT_MASK) >> \
+ NE_EVTCNT_EVENT_SHIFT)
+
+/**
+ * NE_SEND_DATA - (240 bytes) Buffer for sending the command request payload
+ * (Read/Write).
+ */
+#define NE_SEND_DATA (0x0010)
+
+/**
+ * NE_RECV_DATA - (240 bytes) Buffer for receiving the command reply payload
+ * (Read-Only).
+ */
+#define NE_RECV_DATA (0x0100)
+
+/**
+ * DOC: Device MMIO buffer sizes
+ */
+
+/**
+ * NE_SEND_DATA_SIZE / NE_RECV_DATA_SIZE - 240 bytes for send / recv buffer.
+ */
+#define NE_SEND_DATA_SIZE (240)
+#define NE_RECV_DATA_SIZE (240)
+
+/**
+ * DOC: MSI-X interrupt vectors
+ */
+
+/**
+ * NE_VEC_REPLY - MSI-X vector used for command reply notification.
+ */
+#define NE_VEC_REPLY (0)
+
+/**
+ * NE_VEC_EVENT - MSI-X vector used for out-of-band events e.g. enclave crash.
+ */
+#define NE_VEC_EVENT (1)
+
+/**
+ * enum ne_pci_dev_cmd_type - Device command types.
+ * @INVALID_CMD: Invalid command.
+ * @ENCLAVE_START: Start an enclave, after setting its resources.
+ * @ENCLAVE_GET_SLOT: Get the slot uid of an enclave.
+ * @ENCLAVE_STOP: Terminate an enclave.
+ * @SLOT_ALLOC : Allocate a slot for an enclave.
+ * @SLOT_FREE: Free the slot allocated for an enclave
+ * @SLOT_ADD_MEM: Add a memory region to an enclave slot.
+ * @SLOT_ADD_VCPU: Add a vCPU to an enclave slot.
+ * @SLOT_COUNT : Get the number of allocated slots.
+ * @NEXT_SLOT: Get the next slot in the list of allocated slots.
+ * @SLOT_INFO: Get the info for a slot e.g. slot uid, vCPUs count.
+ * @SLOT_ADD_BULK_VCPUS: Add a number of vCPUs, not providing CPU ids.
+ * @MAX_CMD: A gatekeeper for max possible command type.
+ */
+enum ne_pci_dev_cmd_type {
+ INVALID_CMD = 0,
+ ENCLAVE_START = 1,
+ ENCLAVE_GET_SLOT = 2,
+ ENCLAVE_STOP = 3,
+ SLOT_ALLOC = 4,
+ SLOT_FREE = 5,
+ SLOT_ADD_MEM = 6,
+ SLOT_ADD_VCPU = 7,
+ SLOT_COUNT = 8,
+ NEXT_SLOT = 9,
+ SLOT_INFO = 10,
+ SLOT_ADD_BULK_VCPUS = 11,
+ MAX_CMD,
+};
+
+/**
+ * DOC: Device commands - payload structure for requests and replies.
+ */
+
+/**
+ * struct enclave_start_req - ENCLAVE_START request.
+ * @slot_uid: Slot unique id mapped to the enclave to start.
+ * @enclave_cid: Context ID (CID) for the enclave vsock device.
+ * If 0, CID is autogenerated.
+ * @flags: Flags for the enclave to start with (e.g. debug mode).
+ */
+struct enclave_start_req {
+ u64 slot_uid;
+ u64 enclave_cid;
+ u64 flags;
+};
+
+/**
+ * struct enclave_get_slot_req - ENCLAVE_GET_SLOT request.
+ * @enclave_cid: Context ID (CID) for the enclave vsock device.
+ */
+struct enclave_get_slot_req {
+ u64 enclave_cid;
+};
+
+/**
+ * struct enclave_stop_req - ENCLAVE_STOP request.
+ * @slot_uid: Slot unique id mapped to the enclave to stop.
+ */
+struct enclave_stop_req {
+ u64 slot_uid;
+};
+
+/**
+ * struct slot_alloc_req - SLOT_ALLOC request.
+ * @unused: In order to avoid weird sizeof edge cases.
+ */
+struct slot_alloc_req {
+ u8 unused;
+};
+
+/**
+ * struct slot_free_req - SLOT_FREE request.
+ * @slot_uid: Slot unique id mapped to the slot to free.
+ */
+struct slot_free_req {
+ u64 slot_uid;
+};
+
+/* TODO: Add flags field to the request to add memory region. */
+/**
+ * struct slot_add_mem_req - SLOT_ADD_MEM request.
+ * @slot_uid: Slot unique id mapped to the slot to add the memory region to.
+ * @paddr: Physical address of the memory region to add to the slot.
+ * @size: Memory size, in bytes, of the memory region to add to the slot.
+ */
+struct slot_add_mem_req {
+ u64 slot_uid;
+ u64 paddr;
+ u64 size;
+};
+
+/**
+ * struct slot_add_vcpu_req - SLOT_ADD_VCPU request.
+ * @slot_uid: Slot unique id mapped to the slot to add the vCPU to.
+ * @vcpu_id: vCPU ID of the CPU to add to the enclave.
+ * @padding: Padding for the overall data structure.
+ */
+struct slot_add_vcpu_req {
+ u64 slot_uid;
+ u32 vcpu_id;
+ u8 padding[4];
+};
+
+/**
+ * struct slot_count_req - SLOT_COUNT request.
+ * @unused: In order to avoid weird sizeof edge cases.
+ */
+struct slot_count_req {
+ u8 unused;
+};
+
+/**
+ * struct next_slot_req - NEXT_SLOT request.
+ * @slot_uid: Slot unique id of the next slot in the iteration.
+ */
+struct next_slot_req {
+ u64 slot_uid;
+};
+
+/**
+ * struct slot_info_req - SLOT_INFO request.
+ * @slot_uid: Slot unique id mapped to the slot to get information about.
+ */
+struct slot_info_req {
+ u64 slot_uid;
+};
+
+/**
+ * struct slot_add_bulk_vcpus_req - SLOT_ADD_BULK_VCPUS request.
+ * @slot_uid: Slot unique id mapped to the slot to add vCPUs to.
+ * @nr_vcpus: Number of vCPUs to add to the slot.
+ */
+struct slot_add_bulk_vcpus_req {
+ u64 slot_uid;
+ u64 nr_vcpus;
+};
+
+/**
+ * struct ne_pci_dev_cmd_reply - NE PCI device command reply.
+ * @rc : Return code of the logic that processed the request.
+ * @padding0: Padding for the overall data structure.
+ * @slot_uid: Valid for all commands except SLOT_COUNT.
+ * @enclave_cid: Valid for ENCLAVE_START command.
+ * @slot_count : Valid for SLOT_COUNT command.
+ * @mem_regions: Valid for SLOT_ALLOC and SLOT_INFO commands.
+ * @mem_size: Valid for SLOT_INFO command.
+ * @nr_vcpus: Valid for SLOT_INFO command.
+ * @flags: Valid for SLOT_INFO command.
+ * @state: Valid for SLOT_INFO command.
+ * @padding1: Padding for the overall data structure.
+ */
+struct ne_pci_dev_cmd_reply {
+ s32 rc;
+ u8 padding0[4];
+ u64 slot_uid;
+ u64 enclave_cid;
+ u64 slot_count;
+ u64 mem_regions;
+ u64 mem_size;
+ u64 nr_vcpus;
+ u64 flags;
+ u16 state;
+ u8 padding1[6];
+};
+
+/**
+ * struct ne_pci_dev - Nitro Enclaves (NE) PCI device.
+ * @cmd_reply_avail: Variable set if a reply has been sent by the
+ * PCI device.
+ * @cmd_reply_wait_q: Wait queue for handling command reply from the
+ * PCI device.
+ * @enclaves_list: List of the enclaves managed by the PCI device.
+ * @enclaves_list_mutex: Mutex for accessing the list of enclaves.
+ * @event_wq: Work queue for handling out-of-band events
+ * triggered by the Nitro Hypervisor which require
+ * enclave state scanning and propagation to the
+ * enclave process.
+ * @iomem_base : MMIO region of the PCI device.
+ * @notify_work: Work item for every received out-of-band event.
+ * @pci_dev_mutex: Mutex for accessing the PCI device MMIO space.
+ * @pdev: PCI device data structure.
+ */
+struct ne_pci_dev {
+ atomic_t cmd_reply_avail;
+ wait_queue_head_t cmd_reply_wait_q;
+ struct list_head enclaves_list;
+ struct mutex enclaves_list_mutex;
+ struct workqueue_struct *event_wq;
+ void __iomem *iomem_base;
+ struct work_struct notify_work;
+ struct mutex pci_dev_mutex;
+ struct pci_dev *pdev;
+};
+
+/**
+ * ne_do_request() - Submit command request to the PCI device based on the command
+ * type and retrieve the associated reply.
+ * @pdev: PCI device to send the command to and receive the reply from.
+ * @cmd_type: Command type of the request sent to the PCI device.
+ * @cmd_request: Command request payload.
+ * @cmd_request_size: Size of the command request payload.
+ * @cmd_reply: Command reply payload.
+ * @cmd_reply_size: Size of the command reply payload.
+ *
+ * Context: Process context. This function uses the ne_pci_dev mutex to handle
+ * one command at a time.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+int ne_do_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type,
+ void *cmd_request, size_t cmd_request_size,
+ struct ne_pci_dev_cmd_reply *cmd_reply,
+ size_t cmd_reply_size);
+
+/* Nitro Enclaves (NE) PCI device driver */
+extern struct pci_driver ne_pci_driver;
+
+#endif /* _NE_PCI_DEV_H_ */
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 32c2c52f7e84..73eb34849eab 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -35,7 +35,7 @@ static u32 vbg_misc_device_requestor(struct inode *inode)
VMMDEV_REQUESTOR_CON_DONT_KNOW |
VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
- if (from_kuid(current_user_ns(), current->cred->uid) == 0)
+ if (from_kuid(current_user_ns(), current_uid()) == 0)
requestor |= VMMDEV_REQUESTOR_USR_ROOT;
else
requestor |= VMMDEV_REQUESTOR_USR_USER;
@@ -202,13 +202,8 @@ static int vbg_input_open(struct input_dev *input)
{
struct vbg_dev *gdev = input_get_drvdata(input);
u32 feat = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_NEW_PROTOCOL;
- int ret;
- ret = vbg_core_set_mouse_status(gdev, feat);
- if (ret)
- return ret;
-
- return 0;
+ return vbg_core_set_mouse_status(gdev, feat);
}
/**
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 5c92e4a50882..7b41130d3f35 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -6,6 +6,12 @@ config VIRTIO
bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG
or CONFIG_S390_GUEST.
+config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
+ bool
+ help
+ This option is selected if the architecture may need to enforce
+ VIRTIO_F_ACCESS_PLATFORM
+
menuconfig VIRTIO_MENU
bool "Virtio drivers"
default y
@@ -126,4 +132,11 @@ config VIRTIO_MMIO_CMDLINE_DEVICES
If unsure, say 'N'.
+config VIRTIO_DMA_SHARED_BUFFER
+ tristate
+ depends on DMA_SHARED_BUFFER
+ help
+ This option adds a flavor of dma buffers that are backed by
+ virtio resources.
+
endif # VIRTIO_MENU
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 4d993791f2d7..591e6f72aa54 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
obj-$(CONFIG_VIRTIO_MEM) += virtio_mem.o
+obj-$(CONFIG_VIRTIO_DMA_SHARED_BUFFER) += virtio_dma_buf.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index a977e32a88f2..42e09cc1b8ac 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -176,6 +176,21 @@ int virtio_finalize_features(struct virtio_device *dev)
if (ret)
return ret;
+ ret = arch_has_restricted_virtio_memory_access();
+ if (ret) {
+ if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) {
+ dev_warn(&dev->dev,
+ "device must provide VIRTIO_F_VERSION_1\n");
+ return -ENODEV;
+ }
+
+ if (!virtio_has_feature(dev, VIRTIO_F_ACCESS_PLATFORM)) {
+ dev_warn(&dev->dev,
+ "device must provide VIRTIO_F_ACCESS_PLATFORM\n");
+ return -ENODEV;
+ }
+ }
+
if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1))
return 0;
@@ -357,6 +372,12 @@ out:
}
EXPORT_SYMBOL_GPL(register_virtio_device);
+bool is_virtio_device(struct device *dev)
+{
+ return dev->bus == &virtio_bus;
+}
+EXPORT_SYMBOL_GPL(is_virtio_device);
+
void unregister_virtio_device(struct virtio_device *dev)
{
int index = dev->index; /* save for after device release */
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 31cc97f2f515..481611c09dae 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -128,7 +128,7 @@ struct virtio_balloon {
struct page_reporting_dev_info pr_dev_info;
};
-static struct virtio_device_id id_table[] = {
+static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID },
{ 0 },
};
diff --git a/drivers/virtio/virtio_dma_buf.c b/drivers/virtio/virtio_dma_buf.c
new file mode 100644
index 000000000000..5127a2f0c986
--- /dev/null
+++ b/drivers/virtio/virtio_dma_buf.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dma-bufs for virtio exported objects
+ *
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/virtio_dma_buf.h>
+
+/**
+ * virtio_dma_buf_export - Creates a new dma-buf for a virtio exported object
+ * @exp_info: [in] see dma_buf_export(). ops MUST refer to a dma_buf_ops
+ * struct embedded in a virtio_dma_buf_ops.
+ *
+ * This wraps dma_buf_export() to allow virtio drivers to create a dma-buf
+ * for an virtio exported object that can be queried by other virtio drivers
+ * for the object's UUID.
+ */
+struct dma_buf *virtio_dma_buf_export
+ (const struct dma_buf_export_info *exp_info)
+{
+ const struct virtio_dma_buf_ops *virtio_ops =
+ container_of(exp_info->ops,
+ const struct virtio_dma_buf_ops, ops);
+
+ if (!exp_info->ops ||
+ exp_info->ops->attach != &virtio_dma_buf_attach ||
+ !virtio_ops->get_uuid) {
+ return ERR_PTR(-EINVAL);
+ }
+
+ return dma_buf_export(exp_info);
+}
+EXPORT_SYMBOL(virtio_dma_buf_export);
+
+/**
+ * virtio_dma_buf_attach - mandatory attach callback for virtio dma-bufs
+ */
+int virtio_dma_buf_attach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ int ret;
+ const struct virtio_dma_buf_ops *ops =
+ container_of(dma_buf->ops,
+ const struct virtio_dma_buf_ops, ops);
+
+ if (ops->device_attach) {
+ ret = ops->device_attach(dma_buf, attach);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(virtio_dma_buf_attach);
+
+/**
+ * is_virtio_dma_buf - returns true if the given dma-buf is a virtio dma-buf
+ * @dma_buf: buffer to query
+ */
+bool is_virtio_dma_buf(struct dma_buf *dma_buf)
+{
+ return dma_buf->ops->attach == &virtio_dma_buf_attach;
+}
+EXPORT_SYMBOL(is_virtio_dma_buf);
+
+/**
+ * virtio_dma_buf_get_uuid - gets a virtio dma-buf's exported object's uuid
+ * @dma_buf: [in] buffer to query
+ * @uuid: [out] the uuid
+ *
+ * Returns: 0 on success, negative on failure.
+ */
+int virtio_dma_buf_get_uuid(struct dma_buf *dma_buf,
+ uuid_t *uuid)
+{
+ const struct virtio_dma_buf_ops *ops =
+ container_of(dma_buf->ops,
+ const struct virtio_dma_buf_ops, ops);
+
+ if (!is_virtio_dma_buf(dma_buf))
+ return -EINVAL;
+
+ return ops->get_uuid(dma_buf, uuid);
+}
+EXPORT_SYMBOL(virtio_dma_buf_get_uuid);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index 877b2ea3ed05..f1f6208edcf5 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -363,7 +363,7 @@ static int virtinput_restore(struct virtio_device *vdev)
static unsigned int features[] = {
/* none */
};
-static struct virtio_device_id id_table[] = {
+static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_INPUT, VIRTIO_DEV_ANY_ID },
{ 0 },
};
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index c08512fcea90..181e2f18beae 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -36,18 +36,10 @@ enum virtio_mem_mb_state {
VIRTIO_MEM_MB_STATE_OFFLINE,
/* Partially plugged, fully added to Linux, offline. */
VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL,
- /* Fully plugged, fully added to Linux, online (!ZONE_MOVABLE). */
+ /* Fully plugged, fully added to Linux, online. */
VIRTIO_MEM_MB_STATE_ONLINE,
- /* Partially plugged, fully added to Linux, online (!ZONE_MOVABLE). */
+ /* Partially plugged, fully added to Linux, online. */
VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL,
- /*
- * Fully plugged, fully added to Linux, online (ZONE_MOVABLE).
- * We are not allowed to allocate (unplug) parts of this block that
- * are not movable (similar to gigantic pages). We will never allow
- * to online OFFLINE_PARTIAL to ZONE_MOVABLE (as they would contain
- * unmovable parts).
- */
- VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE,
VIRTIO_MEM_MB_STATE_COUNT
};
@@ -432,7 +424,8 @@ static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
return add_memory_driver_managed(nid, addr, memory_block_size_bytes(),
- vm->resource_name);
+ vm->resource_name,
+ MEMHP_MERGE_RESOURCE);
}
/*
@@ -526,21 +519,10 @@ static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id)
}
static int virtio_mem_notify_going_online(struct virtio_mem *vm,
- unsigned long mb_id,
- enum zone_type zone)
+ unsigned long mb_id)
{
switch (virtio_mem_mb_get_state(vm, mb_id)) {
case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
- /*
- * We won't allow to online a partially plugged memory block
- * to the MOVABLE zone - it would contain unmovable parts.
- */
- if (zone == ZONE_MOVABLE) {
- dev_warn_ratelimited(&vm->vdev->dev,
- "memory block has holes, MOVABLE not supported\n");
- return NOTIFY_BAD;
- }
- return NOTIFY_OK;
case VIRTIO_MEM_MB_STATE_OFFLINE:
return NOTIFY_OK;
default:
@@ -560,7 +542,6 @@ static void virtio_mem_notify_offline(struct virtio_mem *vm,
VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
break;
case VIRTIO_MEM_MB_STATE_ONLINE:
- case VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE:
virtio_mem_mb_set_state(vm, mb_id,
VIRTIO_MEM_MB_STATE_OFFLINE);
break;
@@ -579,24 +560,17 @@ static void virtio_mem_notify_offline(struct virtio_mem *vm,
virtio_mem_retry(vm);
}
-static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id,
- enum zone_type zone)
+static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id)
{
unsigned long nb_offline;
switch (virtio_mem_mb_get_state(vm, mb_id)) {
case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
- BUG_ON(zone == ZONE_MOVABLE);
virtio_mem_mb_set_state(vm, mb_id,
VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
break;
case VIRTIO_MEM_MB_STATE_OFFLINE:
- if (zone == ZONE_MOVABLE)
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE);
- else
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE);
+ virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_ONLINE);
break;
default:
BUG();
@@ -675,7 +649,6 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
const unsigned long start = PFN_PHYS(mhp->start_pfn);
const unsigned long size = PFN_PHYS(mhp->nr_pages);
const unsigned long mb_id = virtio_mem_phys_to_mb_id(start);
- enum zone_type zone;
int rc = NOTIFY_OK;
if (!virtio_mem_overlaps_range(vm, start, size))
@@ -717,8 +690,7 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
break;
}
vm->hotplug_active = true;
- zone = page_zonenum(pfn_to_page(mhp->start_pfn));
- rc = virtio_mem_notify_going_online(vm, mb_id, zone);
+ rc = virtio_mem_notify_going_online(vm, mb_id);
break;
case MEM_OFFLINE:
virtio_mem_notify_offline(vm, mb_id);
@@ -726,8 +698,7 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
mutex_unlock(&vm->hotplug_mutex);
break;
case MEM_ONLINE:
- zone = page_zonenum(pfn_to_page(mhp->start_pfn));
- virtio_mem_notify_online(vm, mb_id, zone);
+ virtio_mem_notify_online(vm, mb_id);
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
@@ -1906,8 +1877,7 @@ static void virtio_mem_remove(struct virtio_device *vdev)
if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] ||
vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] ||
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) {
+ vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL]) {
dev_warn(&vdev->dev, "device still has system memory added\n");
} else {
virtio_mem_delete_resource(vm);
@@ -1957,7 +1927,7 @@ static unsigned int virtio_mem_features[] = {
#endif
};
-static struct virtio_device_id virtio_mem_id_table[] = {
+static const struct virtio_device_id virtio_mem_id_table[] = {
{ VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
{ 0 },
};
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 627ac0487494..238383ff1064 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -498,6 +498,36 @@ static const char *vm_bus_name(struct virtio_device *vdev)
return vm_dev->pdev->name;
}
+static bool vm_get_shm_region(struct virtio_device *vdev,
+ struct virtio_shm_region *region, u8 id)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ u64 len, addr;
+
+ /* Select the region we're interested in */
+ writel(id, vm_dev->base + VIRTIO_MMIO_SHM_SEL);
+
+ /* Read the region size */
+ len = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_LOW);
+ len |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_HIGH) << 32;
+
+ region->len = len;
+
+ /* Check if region length is -1. If that's the case, the shared memory
+ * region does not exist and there is no need to proceed further.
+ */
+ if (len == ~(u64)0)
+ return false;
+
+ /* Read the region base address */
+ addr = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_LOW);
+ addr |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_HIGH) << 32;
+
+ region->addr = addr;
+
+ return true;
+}
+
static const struct virtio_config_ops virtio_mmio_config_ops = {
.get = vm_get,
.set = vm_set,
@@ -510,6 +540,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
.get_features = vm_get_features,
.finalize_features = vm_finalize_features,
.bus_name = vm_bus_name,
+ .get_shm_region = vm_get_shm_region,
};
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 3e14e700b231..3d6ae5a5e252 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -444,6 +444,99 @@ static void del_vq(struct virtio_pci_vq_info *info)
vring_del_virtqueue(vq);
}
+static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
+ u8 *bar, u64 *offset, u64 *len)
+{
+ int pos;
+
+ for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
+ pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
+ u8 type, cap_len, id;
+ u32 tmp32;
+ u64 res_offset, res_length;
+
+ pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
+ cfg_type), &type);
+ if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG)
+ continue;
+
+ pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
+ cap_len), &cap_len);
+ if (cap_len != sizeof(struct virtio_pci_cap64)) {
+ dev_err(&dev->dev, "%s: shm cap with bad size offset:"
+ " %d size: %d\n", __func__, pos, cap_len);
+ continue;
+ }
+
+ pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
+ id), &id);
+ if (id != required_id)
+ continue;
+
+ /* Type, and ID match, looks good */
+ pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
+ bar), bar);
+
+ /* Read the lower 32bit of length and offset */
+ pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
+ offset), &tmp32);
+ res_offset = tmp32;
+ pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
+ length), &tmp32);
+ res_length = tmp32;
+
+ /* and now the top half */
+ pci_read_config_dword(dev,
+ pos + offsetof(struct virtio_pci_cap64,
+ offset_hi), &tmp32);
+ res_offset |= ((u64)tmp32) << 32;
+ pci_read_config_dword(dev,
+ pos + offsetof(struct virtio_pci_cap64,
+ length_hi), &tmp32);
+ res_length |= ((u64)tmp32) << 32;
+
+ *offset = res_offset;
+ *len = res_length;
+
+ return pos;
+ }
+ return 0;
+}
+
+static bool vp_get_shm_region(struct virtio_device *vdev,
+ struct virtio_shm_region *region, u8 id)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ struct pci_dev *pci_dev = vp_dev->pci_dev;
+ u8 bar;
+ u64 offset, len;
+ phys_addr_t phys_addr;
+ size_t bar_len;
+
+ if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len))
+ return false;
+
+ phys_addr = pci_resource_start(pci_dev, bar);
+ bar_len = pci_resource_len(pci_dev, bar);
+
+ if ((offset + len) < offset) {
+ dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected\n",
+ __func__);
+ return false;
+ }
+
+ if (offset + len > bar_len) {
+ dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len\n",
+ __func__);
+ return false;
+ }
+
+ region->len = len;
+ region->addr = (u64) phys_addr + offset;
+
+ return true;
+}
+
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.get = NULL,
.set = NULL,
@@ -458,6 +551,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
.bus_name = vp_bus_name,
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
+ .get_shm_region = vp_get_shm_region,
};
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -474,6 +568,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.bus_name = vp_bus_name,
.set_vq_affinity = vp_set_vq_affinity,
.get_vq_affinity = vp_get_vq_affinity,
+ .get_shm_region = vp_get_shm_region,
};
/**
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 1ca880e01476..090cbbf9e1e2 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -7,7 +7,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/jiffies.h>
+#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
@@ -40,12 +40,12 @@ struct mxc_w1_device {
static u8 mxc_w1_ds2_reset_bus(void *data)
{
struct mxc_w1_device *dev = data;
- unsigned long timeout;
+ ktime_t timeout;
writeb(MXC_W1_CONTROL_RPP, dev->regs + MXC_W1_CONTROL);
/* Wait for reset sequence 511+512us, use 1500us for sure */
- timeout = jiffies + usecs_to_jiffies(1500);
+ timeout = ktime_add_us(ktime_get(), 1500);
udelay(511 + 512);
@@ -55,7 +55,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
/* PST bit is valid after the RPP bit is self-cleared */
if (!(ctrl & MXC_W1_CONTROL_RPP))
return !(ctrl & MXC_W1_CONTROL_PST);
- } while (time_is_after_jiffies(timeout));
+ } while (ktime_before(ktime_get(), timeout));
return 1;
}
@@ -68,12 +68,12 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
{
struct mxc_w1_device *dev = data;
- unsigned long timeout;
+ ktime_t timeout;
writeb(MXC_W1_CONTROL_WR(bit), dev->regs + MXC_W1_CONTROL);
/* Wait for read/write bit (60us, Max 120us), use 200us for sure */
- timeout = jiffies + usecs_to_jiffies(200);
+ timeout = ktime_add_us(ktime_get(), 200);
udelay(60);
@@ -83,7 +83,7 @@ static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit)
/* RDST bit is valid after the WR1/RD bit is self-cleared */
if (!(ctrl & MXC_W1_CONTROL_WR(bit)))
return !!(ctrl & MXC_W1_CONTROL_RDST);
- } while (time_is_after_jiffies(timeout));
+ } while (ktime_before(ktime_get(), timeout));
return 0;
}
diff --git a/drivers/w1/slaves/w1_ds2405.c b/drivers/w1/slaves/w1_ds2405.c
index 86cd97309d87..1d9a1183e83f 100644
--- a/drivers/w1/slaves/w1_ds2405.c
+++ b/drivers/w1/slaves/w1_ds2405.c
@@ -206,7 +206,7 @@ static struct attribute *w1_ds2405_attrs[] = {
ATTRIBUTE_GROUPS(w1_ds2405);
-static struct w1_family_ops w1_ds2405_fops = {
+static const struct w1_family_ops w1_ds2405_fops = {
.groups = w1_ds2405_groups
};
diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c
index 762e5e4e2b48..6c269af73c80 100644
--- a/drivers/w1/slaves/w1_ds2406.c
+++ b/drivers/w1/slaves/w1_ds2406.c
@@ -138,7 +138,7 @@ static void w1_f12_remove_slave(struct w1_slave *sl)
&(w1_f12_sysfs_bin_files[i]));
}
-static struct w1_family_ops w1_f12_fops = {
+static const struct w1_family_ops w1_f12_fops = {
.add_slave = w1_f12_add_slave,
.remove_slave = w1_f12_remove_slave,
};
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index 83f8d94bb814..ad102c577122 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -336,7 +336,7 @@ static const struct attribute_group *w1_f29_groups[] = {
NULL,
};
-static struct w1_family_ops w1_f29_fops = {
+static const struct w1_family_ops w1_f29_fops = {
.add_slave = w1_f29_disable_test_mode,
.groups = w1_f29_groups,
};
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
index f1fb18afbcea..c8cfac555b48 100644
--- a/drivers/w1/slaves/w1_ds2413.c
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -143,7 +143,7 @@ static const struct attribute_group *w1_f3a_groups[] = {
NULL,
};
-static struct w1_family_ops w1_f3a_fops = {
+static const struct w1_family_ops w1_f3a_fops = {
.groups = w1_f3a_groups,
};
diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c
index f4367282dcc1..b6bd18d5b3f6 100644
--- a/drivers/w1/slaves/w1_ds2423.c
+++ b/drivers/w1/slaves/w1_ds2423.c
@@ -117,7 +117,7 @@ static struct attribute *w1_f1d_attrs[] = {
};
ATTRIBUTE_GROUPS(w1_f1d);
-static struct w1_family_ops w1_f1d_fops = {
+static const struct w1_family_ops w1_f1d_fops = {
.groups = w1_f1d_groups,
};
diff --git a/drivers/w1/slaves/w1_ds2430.c b/drivers/w1/slaves/w1_ds2430.c
index 75bb8a88620b..0ea7d779d17a 100644
--- a/drivers/w1/slaves/w1_ds2430.c
+++ b/drivers/w1/slaves/w1_ds2430.c
@@ -279,7 +279,7 @@ static const struct attribute_group *w1_f14_groups[] = {
NULL,
};
-static struct w1_family_ops w1_f14_fops = {
+static const struct w1_family_ops w1_f14_fops = {
.groups = w1_f14_groups,
};
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c
index e5bd7e2354d7..6856b1c29e17 100644
--- a/drivers/w1/slaves/w1_ds2431.c
+++ b/drivers/w1/slaves/w1_ds2431.c
@@ -278,7 +278,7 @@ static const struct attribute_group *w1_f2d_groups[] = {
NULL,
};
-static struct w1_family_ops w1_f2d_fops = {
+static const struct w1_family_ops w1_f2d_fops = {
.groups = w1_f2d_groups,
};
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 1f805c86517a..0f72df15a024 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -288,7 +288,7 @@ static void w1_f23_remove_slave(struct w1_slave *sl)
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
}
-static struct w1_family_ops w1_f23_fops = {
+static const struct w1_family_ops w1_f23_fops = {
.add_slave = w1_f23_add_slave,
.remove_slave = w1_f23_remove_slave,
.groups = w1_f23_groups,
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
index d199e5a25cc0..5cfb0ae23e91 100644
--- a/drivers/w1/slaves/w1_ds2438.c
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -412,7 +412,7 @@ static const struct attribute_group *w1_ds2438_groups[] = {
NULL,
};
-static struct w1_family_ops w1_ds2438_fops = {
+static const struct w1_family_ops w1_ds2438_fops = {
.groups = w1_ds2438_groups,
};
diff --git a/drivers/w1/slaves/w1_ds250x.c b/drivers/w1/slaves/w1_ds250x.c
index e507117444d8..7592c7050d1d 100644
--- a/drivers/w1/slaves/w1_ds250x.c
+++ b/drivers/w1/slaves/w1_ds250x.c
@@ -215,7 +215,7 @@ static int w1_eprom_add_slave(struct w1_slave *sl)
return PTR_ERR_OR_ZERO(nvmem);
}
-static struct w1_family_ops w1_eprom_fops = {
+static const struct w1_family_ops w1_eprom_fops = {
.add_slave = w1_eprom_add_slave,
};
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
index c689b1b987b8..c281fe5ed688 100644
--- a/drivers/w1/slaves/w1_ds2780.c
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -141,7 +141,7 @@ static void w1_ds2780_remove_slave(struct w1_slave *sl)
platform_device_unregister(pdev);
}
-static struct w1_family_ops w1_ds2780_fops = {
+static const struct w1_family_ops w1_ds2780_fops = {
.add_slave = w1_ds2780_add_slave,
.remove_slave = w1_ds2780_remove_slave,
.groups = w1_ds2780_groups,
diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c
index 84d6ceec5da5..f0d393ae070b 100644
--- a/drivers/w1/slaves/w1_ds2781.c
+++ b/drivers/w1/slaves/w1_ds2781.c
@@ -138,7 +138,7 @@ static void w1_ds2781_remove_slave(struct w1_slave *sl)
platform_device_unregister(pdev);
}
-static struct w1_family_ops w1_ds2781_fops = {
+static const struct w1_family_ops w1_ds2781_fops = {
.add_slave = w1_ds2781_add_slave,
.remove_slave = w1_ds2781_remove_slave,
.groups = w1_ds2781_groups,
diff --git a/drivers/w1/slaves/w1_ds2805.c b/drivers/w1/slaves/w1_ds2805.c
index ccb753a474b1..206186db727d 100644
--- a/drivers/w1/slaves/w1_ds2805.c
+++ b/drivers/w1/slaves/w1_ds2805.c
@@ -281,7 +281,7 @@ static void w1_f0d_remove_slave(struct w1_slave *sl)
sysfs_remove_bin_file(&sl->dev.kobj, &w1_f0d_bin_attr);
}
-static struct w1_family_ops w1_f0d_fops = {
+static const struct w1_family_ops w1_f0d_fops = {
.add_slave = w1_f0d_add_slave,
.remove_slave = w1_f0d_remove_slave,
};
diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c
index 8a640f159078..e4f336111edc 100644
--- a/drivers/w1/slaves/w1_ds28e04.c
+++ b/drivers/w1/slaves/w1_ds28e04.c
@@ -410,7 +410,7 @@ static void w1_f1C_remove_slave(struct w1_slave *sl)
sl->family_data = NULL;
}
-static struct w1_family_ops w1_f1C_fops = {
+static const struct w1_family_ops w1_f1C_fops = {
.add_slave = w1_f1C_add_slave,
.remove_slave = w1_f1C_remove_slave,
.groups = w1_f1C_groups,
diff --git a/drivers/w1/slaves/w1_ds28e17.c b/drivers/w1/slaves/w1_ds28e17.c
index 046ddda83df9..6b00db7169ab 100644
--- a/drivers/w1/slaves/w1_ds28e17.c
+++ b/drivers/w1/slaves/w1_ds28e17.c
@@ -741,7 +741,7 @@ static void w1_f19_remove_slave(struct w1_slave *sl)
/* Declarations within the w1 subsystem. */
-static struct w1_family_ops w1_f19_fops = {
+static const struct w1_family_ops w1_f19_fops = {
.add_slave = w1_f19_add_slave,
.remove_slave = w1_f19_remove_slave,
.groups = w1_f19_groups,
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index c1b4eda16719..cddf60b7309c 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/hwmon.h>
#include <linux/string.h>
+#include <linux/jiffies.h>
#include <linux/w1.h>
@@ -65,6 +66,32 @@ static u16 bulk_read_device_counter; /* =0 as per C standard */
#define MIN_TEMP -55 /* min temperature that can be mesured */
#define MAX_TEMP 125 /* max temperature that can be mesured */
+/* Allowed values for sysfs conv_time attribute */
+#define CONV_TIME_DEFAULT 0
+#define CONV_TIME_MEASURE 1
+
+/* Bits in sysfs "features" value */
+#define W1_THERM_CHECK_RESULT 1 /* Enable conversion success check */
+#define W1_THERM_POLL_COMPLETION 2 /* Poll for conversion completion */
+#define W1_THERM_FEATURES_MASK 3 /* All values mask */
+
+/* Poll period in milliseconds. Should be less then a shortest operation on the device */
+#define W1_POLL_PERIOD 32
+#define W1_POLL_CONVERT_TEMP 2000 /* Timeout for W1_CONVERT_TEMP, ms */
+#define W1_POLL_RECALL_EEPROM 500 /* Timeout for W1_RECALL_EEPROM, ms*/
+
+/* Masks for resolution functions, work with all devices */
+/* Bit mask for config register for all devices, bits 7,6,5 */
+#define W1_THERM_RESOLUTION_MASK 0xE0
+/* Bit offset of resolution in config register for all devices */
+#define W1_THERM_RESOLUTION_SHIFT 5
+/* Bit offset of resolution in config register for all devices */
+#define W1_THERM_RESOLUTION_SHIFT 5
+/* Add this to bit value to get resolution */
+#define W1_THERM_RESOLUTION_MIN 9
+/* Maximum allowed value */
+#define W1_THERM_RESOLUTION_MAX 14
+
/* Helpers Macros */
/*
@@ -89,6 +116,20 @@ static u16 bulk_read_device_counter; /* =0 as per C standard */
(((struct w1_therm_family_data *)(sl->family_data))->resolution)
/*
+ * return the conv_time_override of the sl slave
+ * always test family data existence before using this macro
+ */
+ #define SLAVE_CONV_TIME_OVERRIDE(sl) \
+ (((struct w1_therm_family_data *)(sl->family_data))->conv_time_override)
+
+/*
+ * return the features of the sl slave
+ * always test family data existence before using this macro
+ */
+ #define SLAVE_FEATURES(sl) \
+ (((struct w1_therm_family_data *)(sl->family_data))->features)
+
+/*
* return whether or not a converT command has been issued to the slave
* * 0: no bulk read is pending
* * -1: conversion is in progress
@@ -136,6 +177,8 @@ struct w1_therm_family_converter {
* -x error or undefined
* @resolution: current device resolution
* @convert_triggered: conversion state of the device
+ * @conv_time_override: user selected conversion time or CONV_TIME_DEFAULT
+ * @features: bit mask - enable temperature validity check, poll for completion
* @specific_functions: pointer to struct of device specific function
*/
struct w1_therm_family_data {
@@ -144,6 +187,8 @@ struct w1_therm_family_data {
int external_powered;
int resolution;
int convert_triggered;
+ int conv_time_override;
+ unsigned int features;
struct w1_therm_family_converter *specific_functions;
};
@@ -285,6 +330,19 @@ static ssize_t therm_bulk_read_store(struct device *device,
static ssize_t therm_bulk_read_show(struct device *device,
struct device_attribute *attr, char *buf);
+static ssize_t conv_time_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t conv_time_store(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t size);
+
+static ssize_t features_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t features_store(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t size);
/* Attributes declarations */
static DEVICE_ATTR_RW(w1_slave);
@@ -294,6 +352,8 @@ static DEVICE_ATTR_RO(ext_power);
static DEVICE_ATTR_RW(resolution);
static DEVICE_ATTR_WO(eeprom);
static DEVICE_ATTR_RW(alarms);
+static DEVICE_ATTR_RW(conv_time);
+static DEVICE_ATTR_RW(features);
static DEVICE_ATTR_RW(therm_bulk_read); /* attribut at master level */
@@ -328,6 +388,8 @@ static struct attribute *w1_therm_attrs[] = {
&dev_attr_resolution.attr,
&dev_attr_eeprom.attr,
&dev_attr_alarms.attr,
+ &dev_attr_conv_time.attr,
+ &dev_attr_features.attr,
NULL,
};
@@ -337,6 +399,8 @@ static struct attribute *w1_ds18s20_attrs[] = {
&dev_attr_ext_power.attr,
&dev_attr_eeprom.attr,
&dev_attr_alarms.attr,
+ &dev_attr_conv_time.attr,
+ &dev_attr_features.attr,
NULL,
};
@@ -348,6 +412,8 @@ static struct attribute *w1_ds28ea00_attrs[] = {
&dev_attr_resolution.attr,
&dev_attr_eeprom.attr,
&dev_attr_alarms.attr,
+ &dev_attr_conv_time.attr,
+ &dev_attr_features.attr,
NULL,
};
@@ -409,21 +475,21 @@ static const struct hwmon_chip_info w1_chip_info = {
/* Family operations */
-static struct w1_family_ops w1_therm_fops = {
+static const struct w1_family_ops w1_therm_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
.groups = w1_therm_groups,
.chip_info = W1_CHIPINFO,
};
-static struct w1_family_ops w1_ds18s20_fops = {
+static const struct w1_family_ops w1_ds18s20_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
.groups = w1_ds18s20_groups,
.chip_info = W1_CHIPINFO,
};
-static struct w1_family_ops w1_ds28ea00_fops = {
+static const struct w1_family_ops w1_ds28ea00_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
.groups = w1_ds28ea00_groups,
@@ -466,7 +532,12 @@ static inline int w1_DS18B20_convert_time(struct w1_slave *sl)
if (!sl->family_data)
return -ENODEV; /* device unknown */
- /* return time in ms for conversion operation */
+ if (SLAVE_CONV_TIME_OVERRIDE(sl) != CONV_TIME_DEFAULT)
+ return SLAVE_CONV_TIME_OVERRIDE(sl);
+
+ /* Return the conversion time, depending on resolution,
+ * select maximum conversion time among all compatible devices
+ */
switch (SLAVE_RESOLUTION(sl)) {
case 9:
ret = 95;
@@ -478,6 +549,14 @@ static inline int w1_DS18B20_convert_time(struct w1_slave *sl)
ret = 375;
break;
case 12:
+ ret = 750;
+ break;
+ case 13:
+ ret = 850; /* GX20MH01 only. Datasheet says 500ms, but that's not enough. */
+ break;
+ case 14:
+ ret = 1600; /* GX20MH01 only. Datasheet says 1000ms - not enough */
+ break;
default:
ret = 750;
}
@@ -486,8 +565,13 @@ static inline int w1_DS18B20_convert_time(struct w1_slave *sl)
static inline int w1_DS18S20_convert_time(struct w1_slave *sl)
{
- (void)(sl);
- return 750; /* always 750ms for DS18S20 */
+ if (!sl->family_data)
+ return -ENODEV; /* device unknown */
+
+ if (SLAVE_CONV_TIME_OVERRIDE(sl) == CONV_TIME_DEFAULT)
+ return 750; /* default for DS18S20 */
+ else
+ return SLAVE_CONV_TIME_OVERRIDE(sl);
}
static inline int w1_DS18B20_write_data(struct w1_slave *sl,
@@ -506,52 +590,71 @@ static inline int w1_DS18S20_write_data(struct w1_slave *sl,
static inline int w1_DS18B20_set_resolution(struct w1_slave *sl, int val)
{
int ret;
- u8 new_config_register[3]; /* array of data to be written */
- struct therm_info info;
+ struct therm_info info, info2;
- /* resolution of DS18B20 is in the range [9..12] bits */
- if (val < 9 || val > 12)
+ /* DS18B20 resolution is 9 to 12 bits */
+ /* GX20MH01 resolution is 9 to 14 bits */
+ if (val < W1_THERM_RESOLUTION_MIN || val > W1_THERM_RESOLUTION_MAX)
return -EINVAL;
- val -= 9; /* soustract 9 the lowest resolution in bit */
- val = (val << 5); /* shift to position bit 5 & bit 6 */
+ /* Calc bit value from resolution */
+ val = (val - W1_THERM_RESOLUTION_MIN) << W1_THERM_RESOLUTION_SHIFT;
/*
* Read the scratchpad to change only the required bits
* (bit5 & bit 6 from byte 4)
*/
ret = read_scratchpad(sl, &info);
- if (!ret) {
- new_config_register[0] = info.rom[2];
- new_config_register[1] = info.rom[3];
- /* config register is byte 4 & mask 0b10011111*/
- new_config_register[2] = (info.rom[4] & 0x9F) |
- (u8) val;
- } else
+
+ if (ret)
return ret;
+
+ info.rom[4] &= ~W1_THERM_RESOLUTION_MASK;
+ info.rom[4] |= val;
+
/* Write data in the device RAM */
- ret = w1_DS18B20_write_data(sl, new_config_register);
+ ret = w1_DS18B20_write_data(sl, info.rom + 2);
+ if (ret)
+ return ret;
- return ret;
+ /* Have to read back the resolution to verify an actual value
+ * GX20MH01 and DS18B20 are indistinguishable by family number, but resolutions differ
+ * Some DS18B20 clones don't support resolution change
+ */
+ ret = read_scratchpad(sl, &info2);
+ if (ret)
+ /* Scratchpad read fail */
+ return ret;
+
+ if ((info2.rom[4] & W1_THERM_RESOLUTION_MASK) == (info.rom[4] & W1_THERM_RESOLUTION_MASK))
+ return 0;
+
+ /* Resolution verify error */
+ return -EIO;
}
static inline int w1_DS18B20_get_resolution(struct w1_slave *sl)
{
int ret;
- u8 config_register;
+ int resolution;
struct therm_info info;
ret = read_scratchpad(sl, &info);
- if (!ret) {
- config_register = info.rom[4]; /* config register is byte 4 */
- config_register &= 0x60; /* 0b01100000 keep only bit 5 & 6 */
- config_register = (config_register >> 5); /* shift */
- config_register += 9; /* add 9 the lowest resolution in bit */
- ret = (int) config_register;
- }
- return ret;
+ if (ret)
+ return ret;
+
+ resolution = ((info.rom[4] & W1_THERM_RESOLUTION_MASK) >> W1_THERM_RESOLUTION_SHIFT)
+ + W1_THERM_RESOLUTION_MIN;
+ /* GX20MH01 has one special case:
+ * >=14 means 14 bits when getting resolution from bit value.
+ * Other devices have no more then 12 bits.
+ */
+ if (resolution > W1_THERM_RESOLUTION_MAX)
+ resolution = W1_THERM_RESOLUTION_MAX;
+
+ return resolution;
}
/**
@@ -564,11 +667,28 @@ static inline int w1_DS18B20_get_resolution(struct w1_slave *sl)
*/
static inline int w1_DS18B20_convert_temp(u8 rom[9])
{
- s16 t = le16_to_cpup((__le16 *)rom);
+ int t;
+ u32 bv;
+
+ /* Config register bit R2 = 1 - GX20MH01 in 13 or 14 bit resolution mode */
+ if (rom[4] & 0x80) {
+ /* Signed 16-bit value to unsigned, cpu order */
+ bv = le16_to_cpup((__le16 *)rom);
+
+ /* Insert two temperature bits from config register */
+ /* Avoid arithmetic shift of signed value */
+ bv = (bv << 2) | (rom[4] & 3);
+
+ t = (int) sign_extend32(bv, 17); /* Degrees, lowest bit is 2^-6 */
+ return (t*1000)/64; /* Millidegrees */
+ }
+ t = (int)le16_to_cpup((__le16 *)rom);
return t*1000/16;
}
+
+
/**
* w1_DS18S20_convert_temp() - temperature computation for DS18S20
* @rom: data read from device RAM (8 data bytes + 1 CRC byte)
@@ -600,6 +720,7 @@ static inline int w1_DS18S20_convert_temp(u8 rom[9])
}
/* Device capability description */
+/* GX20MH01 device shares family number and structure with DS18B20 */
static struct w1_therm_family_converter w1_therm_families[] = {
{
@@ -621,6 +742,7 @@ static struct w1_therm_family_converter w1_therm_families[] = {
.bulk_read = true
},
{
+ /* Also used for GX20MH01 */
.f = &w1_therm_family_DS18B20,
.convert = w1_DS18B20_convert_temp,
.get_conversion_time = w1_DS18B20_convert_time,
@@ -700,6 +822,22 @@ static inline bool bus_mutex_lock(struct mutex *lock)
}
/**
+ * check_family_data() - Check if family data and specific functions are present
+ * @sl: W1 device data
+ *
+ * Return: 0 - OK, negative value - error
+ */
+static int check_family_data(struct w1_slave *sl)
+{
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(&sl->dev,
+ "%s: Device is not supported by the driver\n", __func__);
+ return -EINVAL; /* No device family */
+ }
+ return 0;
+}
+
+/**
* support_bulk_read() - check if slave support bulk read
* @sl: device to check the ability
*
@@ -883,6 +1021,34 @@ static int reset_select_slave(struct w1_slave *sl)
return 0;
}
+/**
+ * w1_poll_completion - Poll for operation completion, with timeout
+ * @dev_master: the device master of the bus
+ * @tout_ms: timeout in milliseconds
+ *
+ * The device is answering 0's while an operation is in progress and 1's after it completes
+ * Timeout may happen if the previous command was not recognised due to a line noise
+ *
+ * Return: 0 - OK, negative error - timeout
+ */
+static int w1_poll_completion(struct w1_master *dev_master, int tout_ms)
+{
+ int i;
+
+ for (i = 0; i < tout_ms/W1_POLL_PERIOD; i++) {
+ /* Delay is before poll, for device to recognize a command */
+ msleep(W1_POLL_PERIOD);
+
+ /* Compare all 8 bits to mitigate a noise on the bus */
+ if (w1_read_8(dev_master) == 0xFF)
+ break;
+ }
+ if (i == tout_ms/W1_POLL_PERIOD)
+ return -EIO;
+
+ return 0;
+}
+
static int convert_t(struct w1_slave *sl, struct therm_info *info)
{
struct w1_master *dev_master = sl->master;
@@ -898,6 +1064,13 @@ static int convert_t(struct w1_slave *sl, struct therm_info *info)
(!SLAVE_POWERMODE(sl) &&
w1_strong_pullup));
+ if (strong_pullup && SLAVE_FEATURES(sl) & W1_THERM_POLL_COMPLETION) {
+ dev_warn(&sl->dev,
+ "%s: Disabling W1_THERM_POLL_COMPLETION in parasite power mode.\n",
+ __func__);
+ SLAVE_FEATURES(sl) &= ~W1_THERM_POLL_COMPLETION;
+ }
+
/* get conversion duration device and id dependent */
t_conv = conversion_time(sl);
@@ -933,15 +1106,38 @@ static int convert_t(struct w1_slave *sl, struct therm_info *info)
}
mutex_unlock(&dev_master->bus_mutex);
} else { /*no device need pullup */
- mutex_unlock(&dev_master->bus_mutex);
-
- sleep_rem = msleep_interruptible(t_conv);
- if (sleep_rem != 0) {
- ret = -EINTR;
- goto dec_refcnt;
+ if (SLAVE_FEATURES(sl) & W1_THERM_POLL_COMPLETION) {
+ ret = w1_poll_completion(dev_master, W1_POLL_CONVERT_TEMP);
+ if (ret) {
+ dev_dbg(&sl->dev, "%s: Timeout\n", __func__);
+ goto mt_unlock;
+ }
+ mutex_unlock(&dev_master->bus_mutex);
+ } else {
+ /* Fixed delay */
+ mutex_unlock(&dev_master->bus_mutex);
+ sleep_rem = msleep_interruptible(t_conv);
+ if (sleep_rem != 0) {
+ ret = -EINTR;
+ goto dec_refcnt;
+ }
}
}
ret = read_scratchpad(sl, info);
+
+ /* If enabled, check for conversion success */
+ if ((SLAVE_FEATURES(sl) & W1_THERM_CHECK_RESULT) &&
+ (info->rom[6] == 0xC) &&
+ ((info->rom[1] == 0x5 && info->rom[0] == 0x50) ||
+ (info->rom[1] == 0x7 && info->rom[0] == 0xFF))
+ ) {
+ /* Invalid reading (scratchpad byte 6 = 0xC)
+ * due to insufficient conversion time
+ * or power failure.
+ */
+ ret = -EIO;
+ }
+
goto dec_refcnt;
}
@@ -955,6 +1151,76 @@ error:
return ret;
}
+static int conv_time_measure(struct w1_slave *sl, int *conv_time)
+{
+ struct therm_info inf,
+ *info = &inf;
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int ret = -ENODEV;
+ bool strong_pullup;
+
+ if (!sl->family_data)
+ goto error;
+
+ strong_pullup = (w1_strong_pullup == 2 ||
+ (!SLAVE_POWERMODE(sl) &&
+ w1_strong_pullup));
+
+ if (strong_pullup) {
+ pr_info("%s: Measure with strong_pullup is not supported.\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(info->rom, 0, sizeof(info->rom));
+
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(sl->family_data));
+
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
+ goto dec_refcnt;
+ }
+
+ while (max_trying-- && ret) { /* ret should be 0 */
+ info->verdict = 0;
+ info->crc = 0;
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+ int j_start, j_end;
+
+ /*no device need pullup */
+ w1_write_8(dev_master, W1_CONVERT_TEMP);
+
+ j_start = jiffies;
+ ret = w1_poll_completion(dev_master, W1_POLL_CONVERT_TEMP);
+ if (ret) {
+ dev_dbg(&sl->dev, "%s: Timeout\n", __func__);
+ goto mt_unlock;
+ }
+ j_end = jiffies;
+ /* 1.2x increase for variation and changes over temperature range */
+ *conv_time = jiffies_to_msecs(j_end-j_start)*12/10;
+ pr_debug("W1 Measure complete, conv_time = %d, HZ=%d.\n",
+ *conv_time, HZ);
+ if (*conv_time <= CONV_TIME_MEASURE) {
+ ret = -EIO;
+ goto mt_unlock;
+ }
+ mutex_unlock(&dev_master->bus_mutex);
+ ret = read_scratchpad(sl, info);
+ goto dec_refcnt;
+ }
+
+ }
+mt_unlock:
+ mutex_unlock(&dev_master->bus_mutex);
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(sl->family_data));
+error:
+ return ret;
+}
+
static int read_scratchpad(struct w1_slave *sl, struct therm_info *info)
{
struct w1_master *dev_master = sl->master;
@@ -1118,10 +1384,7 @@ static int recall_eeprom(struct w1_slave *sl)
if (!reset_select_slave(sl)) {
w1_write_8(dev_master, W1_RECALL_EEPROM);
-
- ret = 1; /* Slave will pull line to 0 */
- while (ret)
- ret = 1 - w1_touch_bit(dev_master, 1);
+ ret = w1_poll_completion(dev_master, W1_POLL_RECALL_EEPROM);
}
}
@@ -1345,11 +1608,13 @@ static ssize_t w1_slave_store(struct device *device,
}
if (ret) {
- dev_info(device,
- "%s: writing error %d\n", __func__, ret);
- /* return size to avoid call back again */
- } else
- SLAVE_RESOLUTION(sl) = val;
+ dev_warn(device, "%s: Set resolution - error %d\n", __func__, ret);
+ /* Propagate error to userspace */
+ return ret;
+ }
+ SLAVE_RESOLUTION(sl) = val;
+ /* Reset the conversion time to default - it depends on resolution */
+ SLAVE_CONV_TIME_OVERRIDE(sl) = CONV_TIME_DEFAULT;
return size; /* always return size to avoid infinite calling */
}
@@ -1465,12 +1730,12 @@ static ssize_t resolution_store(struct device *device,
/* get the correct function depending on the device */
ret = SLAVE_SPECIFIC_FUNC(sl)->set_resolution(sl, val);
- if (ret) {
- dev_info(device,
- "%s: writing error %d\n", __func__, ret);
- /* return size to avoid call back again */
- } else
- SLAVE_RESOLUTION(sl) = val;
+ if (ret)
+ return ret;
+
+ SLAVE_RESOLUTION(sl) = val;
+ /* Reset the conversion time to default because it depends on resolution */
+ SLAVE_CONV_TIME_OVERRIDE(sl) = CONV_TIME_DEFAULT;
return size;
}
@@ -1660,6 +1925,96 @@ show_result:
return sprintf(buf, "%d\n", ret);
}
+static ssize_t conv_time_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(device,
+ "%s: Device is not supported by the driver\n", __func__);
+ return 0; /* No device family */
+ }
+ return sprintf(buf, "%d\n", conversion_time(sl));
+}
+
+static ssize_t conv_time_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int val, ret = 0;
+ struct w1_slave *sl = dev_to_w1_slave(device);
+
+ if (kstrtoint(buf, 10, &val)) /* converting user entry to int */
+ return -EINVAL;
+
+ if (check_family_data(sl))
+ return -ENODEV;
+
+ if (val != CONV_TIME_MEASURE) {
+ if (val >= CONV_TIME_DEFAULT)
+ SLAVE_CONV_TIME_OVERRIDE(sl) = val;
+ else
+ return -EINVAL;
+
+ } else {
+ int conv_time;
+
+ ret = conv_time_measure(sl, &conv_time);
+ if (ret)
+ return -EIO;
+ SLAVE_CONV_TIME_OVERRIDE(sl) = conv_time;
+ }
+ return size;
+}
+
+static ssize_t features_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(device,
+ "%s: Device not supported by the driver\n", __func__);
+ return 0; /* No device family */
+ }
+ return sprintf(buf, "%u\n", SLAVE_FEATURES(sl));
+}
+
+static ssize_t features_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int val, ret = 0;
+ bool strong_pullup;
+ struct w1_slave *sl = dev_to_w1_slave(device);
+
+ ret = kstrtouint(buf, 10, &val); /* converting user entry to int */
+ if (ret)
+ return -EINVAL; /* invalid number */
+
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(device, "%s: Device not supported by the driver\n", __func__);
+ return -ENODEV;
+ }
+
+ if ((val & W1_THERM_FEATURES_MASK) != val)
+ return -EINVAL;
+
+ SLAVE_FEATURES(sl) = val;
+
+ strong_pullup = (w1_strong_pullup == 2 ||
+ (!SLAVE_POWERMODE(sl) &&
+ w1_strong_pullup));
+
+ if (strong_pullup && SLAVE_FEATURES(sl) & W1_THERM_POLL_COMPLETION) {
+ dev_warn(&sl->dev,
+ "%s: W1_THERM_POLL_COMPLETION disabled in parasite power mode.\n",
+ __func__);
+ SLAVE_FEATURES(sl) &= ~W1_THERM_POLL_COMPLETION;
+ }
+
+ return size;
+}
+
#if IS_REACHABLE(CONFIG_HWMON)
static int w1_read_temp(struct device *device, u32 attr, int channel,
long *val)
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index e58c7592008d..15a2ee32f116 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -160,7 +160,7 @@ static const struct attribute_group *w1_slave_default_groups[] = {
NULL,
};
-static struct w1_family_ops w1_default_fops = {
+static const struct w1_family_ops w1_default_fops = {
.groups = w1_slave_default_groups,
};
@@ -613,7 +613,7 @@ end:
static int w1_family_notify(unsigned long action, struct w1_slave *sl)
{
- struct w1_family_ops *fops;
+ const struct w1_family_ops *fops;
int err;
fops = sl->family->fops;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ab7aad5a1e69..fd7968635e6d 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -340,6 +340,17 @@ config MLX_WDT
To compile this driver as a module, choose M here: the
module will be called mlx-wdt.
+config SL28CPLD_WATCHDOG
+ tristate "Kontron sl28cpld Watchdog"
+ depends on MFD_SL28CPLD || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer
+ on the Kontron sl28 CPLD.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sl28cpld_wdt.
+
# ALPHA Architecture
# ARM Architecture
@@ -478,16 +489,10 @@ config IXP4XX_WATCHDOG
Say N if you are unsure.
-config HAVE_S3C2410_WATCHDOG
- bool
- help
- This will include watchdog timer support for Samsung SoCs. If
- you want to include watchdog support for any machine, kindly
- select this in the respective mach-XXXX/Kconfig file.
-
config S3C2410_WATCHDOG
tristate "S3C2410 Watchdog"
- depends on HAVE_S3C2410_WATCHDOG || COMPILE_TEST
+ depends on ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || \
+ COMPILE_TEST
select WATCHDOG_CORE
select MFD_SYSCON if ARCH_EXYNOS
help
@@ -1004,6 +1009,14 @@ config PM8916_WATCHDOG
Say Y here to include support watchdog timer embedded into the
pm8916 module.
+config VISCONTI_WATCHDOG
+ tristate "Toshiba Visconti series watchdog support"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer in Toshiba
+ Visconti SoCs.
+
# X86 (i386 + ia64 + x86_64) Architecture
config ACQUIRE_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 97bed1d3d97c..071a2e50be98 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -95,6 +95,7 @@ obj-$(CONFIG_RTD119X_WATCHDOG) += rtd119x_wdt.o
obj-$(CONFIG_SPRD_WATCHDOG) += sprd_wdt.o
obj-$(CONFIG_PM8916_WATCHDOG) += pm8916_wdt.o
obj-$(CONFIG_ARM_SMC_WATCHDOG) += arm_smc_wdt.o
+obj-$(CONFIG_VISCONTI_WATCHDOG) += visconti_wdt.o
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
@@ -225,3 +226,4 @@ obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o
obj-$(CONFIG_MENZ069_WATCHDOG) += menz69_wdt.o
obj-$(CONFIG_RAVE_SP_WATCHDOG) += rave-sp-wdt.o
obj-$(CONFIG_STPMIC1_WATCHDOG) += stpmic1_wdt.o
+obj-$(CONFIG_SL28CPLD_WATCHDOG) += sl28cpld_wdt.o
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 672b184da875..bc99e9164930 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -334,12 +334,9 @@ static int cdns_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(cdns_wdt_device, wdt);
wdt->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(wdt->clk)) {
- ret = PTR_ERR(wdt->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "input clock not found\n");
- return ret;
- }
+ if (IS_ERR(wdt->clk))
+ return dev_err_probe(dev, PTR_ERR(wdt->clk),
+ "input clock not found\n");
ret = clk_prepare_enable(wdt->clk);
if (ret) {
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 2b3f3cd382ef..e6eaba6bae5b 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -206,12 +206,9 @@ static int davinci_wdt_probe(struct platform_device *pdev)
return -ENOMEM;
davinci_wdt->clk = devm_clk_get(dev, NULL);
-
- if (IS_ERR(davinci_wdt->clk)) {
- if (PTR_ERR(davinci_wdt->clk) != -EPROBE_DEFER)
- dev_err(dev, "failed to get clock node\n");
- return PTR_ERR(davinci_wdt->clk);
- }
+ if (IS_ERR(davinci_wdt->clk))
+ return dev_err_probe(dev, PTR_ERR(davinci_wdt->clk),
+ "failed to get clock node\n");
ret = clk_prepare_enable(davinci_wdt->clk);
if (ret) {
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 7993c8c41b3a..922b60374295 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -21,6 +22,8 @@
#define WDOG_CS_CLK (LPO_CLK << LPO_CLK_SHIFT)
#define WDOG_CS_EN BIT(7)
#define WDOG_CS_UPDATE BIT(5)
+#define WDOG_CS_WAIT BIT(1)
+#define WDOG_CS_STOP BIT(0)
#define WDOG_CNT 0x4
#define WDOG_TOVAL 0x8
@@ -36,6 +39,7 @@
#define DEFAULT_TIMEOUT 60
#define MAX_TIMEOUT 128
#define WDOG_CLOCK_RATE 1000
+#define WDOG_WAIT_TIMEOUT 20
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0000);
@@ -48,17 +52,40 @@ struct imx7ulp_wdt_device {
struct clk *clk;
};
-static void imx7ulp_wdt_enable(struct watchdog_device *wdog, bool enable)
+static int imx7ulp_wdt_wait(void __iomem *base, u32 mask)
+{
+ u32 val = readl(base + WDOG_CS);
+
+ if (!(val & mask) && readl_poll_timeout_atomic(base + WDOG_CS, val,
+ val & mask, 0,
+ WDOG_WAIT_TIMEOUT))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int imx7ulp_wdt_enable(struct watchdog_device *wdog, bool enable)
{
struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
u32 val = readl(wdt->base + WDOG_CS);
+ int ret;
+ local_irq_disable();
writel(UNLOCK, wdt->base + WDOG_CNT);
+ ret = imx7ulp_wdt_wait(wdt->base, WDOG_CS_ULK);
+ if (ret)
+ goto enable_out;
if (enable)
writel(val | WDOG_CS_EN, wdt->base + WDOG_CS);
else
writel(val & ~WDOG_CS_EN, wdt->base + WDOG_CS);
+ imx7ulp_wdt_wait(wdt->base, WDOG_CS_RCS);
+
+enable_out:
+ local_irq_enable();
+
+ return ret;
}
static bool imx7ulp_wdt_is_enabled(void __iomem *base)
@@ -79,17 +106,12 @@ static int imx7ulp_wdt_ping(struct watchdog_device *wdog)
static int imx7ulp_wdt_start(struct watchdog_device *wdog)
{
-
- imx7ulp_wdt_enable(wdog, true);
-
- return 0;
+ return imx7ulp_wdt_enable(wdog, true);
}
static int imx7ulp_wdt_stop(struct watchdog_device *wdog)
{
- imx7ulp_wdt_enable(wdog, false);
-
- return 0;
+ return imx7ulp_wdt_enable(wdog, false);
}
static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog,
@@ -97,22 +119,37 @@ static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog,
{
struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
u32 val = WDOG_CLOCK_RATE * timeout;
+ int ret;
+ local_irq_disable();
writel(UNLOCK, wdt->base + WDOG_CNT);
+ ret = imx7ulp_wdt_wait(wdt->base, WDOG_CS_ULK);
+ if (ret)
+ goto timeout_out;
writel(val, wdt->base + WDOG_TOVAL);
+ imx7ulp_wdt_wait(wdt->base, WDOG_CS_RCS);
wdog->timeout = timeout;
- return 0;
+timeout_out:
+ local_irq_enable();
+
+ return ret;
}
static int imx7ulp_wdt_restart(struct watchdog_device *wdog,
unsigned long action, void *data)
{
struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+ int ret;
+
+ ret = imx7ulp_wdt_enable(wdog, true);
+ if (ret)
+ return ret;
- imx7ulp_wdt_enable(wdog, true);
- imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
+ ret = imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
+ if (ret)
+ return ret;
/* wait for wdog to fire */
while (true)
@@ -136,19 +173,31 @@ static const struct watchdog_info imx7ulp_wdt_info = {
WDIOF_MAGICCLOSE,
};
-static void imx7ulp_wdt_init(void __iomem *base, unsigned int timeout)
+static int imx7ulp_wdt_init(void __iomem *base, unsigned int timeout)
{
u32 val;
+ int ret;
+ local_irq_disable();
/* unlock the wdog for reconfiguration */
writel_relaxed(UNLOCK_SEQ0, base + WDOG_CNT);
writel_relaxed(UNLOCK_SEQ1, base + WDOG_CNT);
+ ret = imx7ulp_wdt_wait(base, WDOG_CS_ULK);
+ if (ret)
+ goto init_out;
/* set an initial timeout value in TOVAL */
writel(timeout, base + WDOG_TOVAL);
/* enable 32bit command sequence and reconfigure */
- val = WDOG_CS_CMD32EN | WDOG_CS_CLK | WDOG_CS_UPDATE;
+ val = WDOG_CS_CMD32EN | WDOG_CS_CLK | WDOG_CS_UPDATE |
+ WDOG_CS_WAIT | WDOG_CS_STOP;
writel(val, base + WDOG_CS);
+ imx7ulp_wdt_wait(base, WDOG_CS_RCS);
+
+init_out:
+ local_irq_enable();
+
+ return ret;
}
static void imx7ulp_wdt_action(void *data)
@@ -199,7 +248,9 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev)
watchdog_stop_on_reboot(wdog);
watchdog_stop_on_unregister(wdog);
watchdog_set_drvdata(wdog, imx7ulp_wdt);
- imx7ulp_wdt_init(imx7ulp_wdt->base, wdog->timeout * WDOG_CLOCK_RATE);
+ ret = imx7ulp_wdt_init(imx7ulp_wdt->base, wdog->timeout * WDOG_CLOCK_RATE);
+ if (ret)
+ return ret;
return devm_watchdog_register_device(dev, wdog);
}
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index f3bf3ea50e39..2b4831842162 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -15,7 +15,7 @@
* Support of the watchdog timers, which are available on
* IT8607, IT8620, IT8622, IT8625, IT8628, IT8655, IT8665, IT8686,
* IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726, IT8728,
- * and IT8783.
+ * IT8772, IT8783 and IT8784.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -66,7 +66,9 @@
#define IT8721_ID 0x8721
#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */
#define IT8728_ID 0x8728
+#define IT8772_ID 0x8772
#define IT8783_ID 0x8783
+#define IT8784_ID 0x8784
#define IT8786_ID 0x8786
/* GPIO Configuration Registers LDN=0x07 */
@@ -294,7 +296,9 @@ static int __init it87_wdt_init(void)
case IT8720_ID:
case IT8721_ID:
case IT8728_ID:
+ case IT8772_ID:
case IT8783_ID:
+ case IT8784_ID:
case IT8786_ID:
max_units = 65535;
break;
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 41a928eb91ed..1bdaf17c1d38 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -656,7 +656,7 @@ static int usb_pcwd_probe(struct usb_interface *interface,
/* set up the memory buffer's */
usb_pcwd->intr_buffer = usb_alloc_coherent(udev, usb_pcwd->intr_size,
- GFP_ATOMIC, &usb_pcwd->intr_dma);
+ GFP_KERNEL, &usb_pcwd->intr_dma);
if (!usb_pcwd->intr_buffer) {
pr_err("Out of memory\n");
goto error;
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 57187efeb86f..f0c94ea51c3e 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -231,6 +231,8 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
rdc321x_wdt_device.sb_pdev = pdata->sb_pdev;
rdc321x_wdt_device.base_reg = r->start;
+ rdc321x_wdt_device.queue = 0;
+ rdc321x_wdt_device.default_ticks = ticks;
err = misc_register(&rdc321x_wdt_misc);
if (err < 0) {
@@ -245,14 +247,11 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
rdc321x_wdt_device.base_reg, RDC_WDT_RST);
init_completion(&rdc321x_wdt_device.stop);
- rdc321x_wdt_device.queue = 0;
clear_bit(0, &rdc321x_wdt_device.inuse);
timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
- rdc321x_wdt_device.default_ticks = ticks;
-
dev_info(&pdev->dev, "watchdog init success\n");
return 0;
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index 00662a8e039c..47fce4de0110 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -194,6 +194,7 @@ static int rwdt_probe(struct platform_device *pdev)
struct clk *clk;
unsigned long clks_per_sec;
int ret, i;
+ u8 csra;
if (rwdt_blacklisted(dev))
return -ENODEV;
@@ -213,8 +214,8 @@ static int rwdt_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
priv->clk_rate = clk_get_rate(clk);
- priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) &
- RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0;
+ csra = readb_relaxed(priv->base + RWTCSRA);
+ priv->wdev.bootstatus = csra & RWTCSRA_WOVF ? WDIOF_CARDRESET : 0;
pm_runtime_put(dev);
if (!priv->clk_rate) {
@@ -252,6 +253,13 @@ static int rwdt_probe(struct platform_device *pdev)
/* This overrides the default timeout only if DT configuration was found */
watchdog_init_timeout(&priv->wdev, 0, dev);
+ /* Check if FW enabled the watchdog */
+ if (csra & RWTCSRA_TME) {
+ /* Ensure properly initialized dividers */
+ rwdt_start(&priv->wdev);
+ set_bit(WDOG_HW_RUNNING, &priv->wdev.status);
+ }
+
ret = watchdog_register_device(&priv->wdev);
if (ret < 0)
goto out_pm_disable;
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 705e8f7523e8..836319cbaca9 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -205,11 +205,8 @@ static int rti_wdt_probe(struct platform_device *pdev)
return -ENOMEM;
clk = clk_get(dev, NULL);
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- dev_err(dev, "failed to get clock\n");
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get clock\n");
wdt->freq = clk_get_rate(clk);
@@ -230,11 +227,8 @@ static int rti_wdt_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "runtime pm failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "runtime pm failed\n");
platform_set_drvdata(pdev, wdt);
diff --git a/drivers/watchdog/sl28cpld_wdt.c b/drivers/watchdog/sl28cpld_wdt.c
new file mode 100644
index 000000000000..a45047d8d9ab
--- /dev/null
+++ b/drivers/watchdog/sl28cpld_wdt.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sl28cpld watchdog driver
+ *
+ * Copyright 2020 Kontron Europe GmbH
+ */
+
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/watchdog.h>
+
+/*
+ * Watchdog timer block registers.
+ */
+#define WDT_CTRL 0x00
+#define WDT_CTRL_EN BIT(0)
+#define WDT_CTRL_LOCK BIT(2)
+#define WDT_CTRL_ASSERT_SYS_RESET BIT(6)
+#define WDT_CTRL_ASSERT_WDT_TIMEOUT BIT(7)
+#define WDT_TIMEOUT 0x01
+#define WDT_KICK 0x02
+#define WDT_KICK_VALUE 0x6b
+#define WDT_COUNT 0x03
+
+#define WDT_DEFAULT_TIMEOUT 10
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int timeout;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds");
+
+struct sl28cpld_wdt {
+ struct watchdog_device wdd;
+ struct regmap *regmap;
+ u32 offset;
+ bool assert_wdt_timeout;
+};
+
+static int sl28cpld_wdt_ping(struct watchdog_device *wdd)
+{
+ struct sl28cpld_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ return regmap_write(wdt->regmap, wdt->offset + WDT_KICK,
+ WDT_KICK_VALUE);
+}
+
+static int sl28cpld_wdt_start(struct watchdog_device *wdd)
+{
+ struct sl28cpld_wdt *wdt = watchdog_get_drvdata(wdd);
+ unsigned int val;
+
+ val = WDT_CTRL_EN | WDT_CTRL_ASSERT_SYS_RESET;
+ if (wdt->assert_wdt_timeout)
+ val |= WDT_CTRL_ASSERT_WDT_TIMEOUT;
+ if (nowayout)
+ val |= WDT_CTRL_LOCK;
+
+ return regmap_update_bits(wdt->regmap, wdt->offset + WDT_CTRL,
+ val, val);
+}
+
+static int sl28cpld_wdt_stop(struct watchdog_device *wdd)
+{
+ struct sl28cpld_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ return regmap_update_bits(wdt->regmap, wdt->offset + WDT_CTRL,
+ WDT_CTRL_EN, 0);
+}
+
+static unsigned int sl28cpld_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct sl28cpld_wdt *wdt = watchdog_get_drvdata(wdd);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(wdt->regmap, wdt->offset + WDT_COUNT, &val);
+ if (ret)
+ return 0;
+
+ return val;
+}
+
+static int sl28cpld_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ struct sl28cpld_wdt *wdt = watchdog_get_drvdata(wdd);
+ int ret;
+
+ ret = regmap_write(wdt->regmap, wdt->offset + WDT_TIMEOUT, timeout);
+ if (ret)
+ return ret;
+
+ wdd->timeout = timeout;
+
+ return 0;
+}
+
+static const struct watchdog_info sl28cpld_wdt_info = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "sl28cpld watchdog",
+};
+
+static struct watchdog_ops sl28cpld_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = sl28cpld_wdt_start,
+ .stop = sl28cpld_wdt_stop,
+ .ping = sl28cpld_wdt_ping,
+ .set_timeout = sl28cpld_wdt_set_timeout,
+ .get_timeleft = sl28cpld_wdt_get_timeleft,
+};
+
+static int sl28cpld_wdt_probe(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd;
+ struct sl28cpld_wdt *wdt;
+ unsigned int status;
+ unsigned int val;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -ENODEV;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!wdt->regmap)
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "reg", &wdt->offset);
+ if (ret)
+ return -EINVAL;
+
+ wdt->assert_wdt_timeout = device_property_read_bool(&pdev->dev,
+ "kontron,assert-wdt-timeout-pin");
+
+ /* initialize struct watchdog_device */
+ wdd = &wdt->wdd;
+ wdd->parent = &pdev->dev;
+ wdd->info = &sl28cpld_wdt_info;
+ wdd->ops = &sl28cpld_wdt_ops;
+ wdd->min_timeout = 1;
+ wdd->max_timeout = 255;
+
+ watchdog_set_drvdata(wdd, wdt);
+ watchdog_stop_on_reboot(wdd);
+
+ /*
+ * Read the status early, in case of an error, we haven't modified the
+ * hardware.
+ */
+ ret = regmap_read(wdt->regmap, wdt->offset + WDT_CTRL, &status);
+ if (ret)
+ return ret;
+
+ /*
+ * Initial timeout value, may be overwritten by device tree or module
+ * parmeter in watchdog_init_timeout().
+ *
+ * Reading a zero here means that either the hardware has a default
+ * value of zero (which is very unlikely and definitely a hardware
+ * bug) or the bootloader set it to zero. In any case, we handle
+ * this case gracefully and set out own timeout.
+ */
+ ret = regmap_read(wdt->regmap, wdt->offset + WDT_TIMEOUT, &val);
+ if (ret)
+ return ret;
+
+ if (val)
+ wdd->timeout = val;
+ else
+ wdd->timeout = WDT_DEFAULT_TIMEOUT;
+
+ watchdog_init_timeout(wdd, timeout, &pdev->dev);
+ sl28cpld_wdt_set_timeout(wdd, wdd->timeout);
+
+ /* if the watchdog is locked, we set nowayout */
+ if (status & WDT_CTRL_LOCK)
+ nowayout = true;
+ watchdog_set_nowayout(wdd, nowayout);
+
+ /*
+ * If watchdog is already running, keep it enabled, but make
+ * sure its mode is set correctly.
+ */
+ if (status & WDT_CTRL_EN) {
+ sl28cpld_wdt_start(wdd);
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+ }
+
+ ret = devm_watchdog_register_device(&pdev->dev, wdd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register watchdog device\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "initial timeout %d sec%s\n",
+ wdd->timeout, nowayout ? ", nowayout" : "");
+
+ return 0;
+}
+
+static const struct of_device_id sl28cpld_wdt_of_match[] = {
+ { .compatible = "kontron,sl28cpld-wdt" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sl28cpld_wdt_of_match);
+
+static struct platform_driver sl28cpld_wdt_driver = {
+ .probe = sl28cpld_wdt_probe,
+ .driver = {
+ .name = "sl28cpld-wdt",
+ .of_match_table = sl28cpld_wdt_of_match,
+ },
+};
+module_platform_driver(sl28cpld_wdt_driver);
+
+MODULE_DESCRIPTION("sl28cpld Watchdog Driver");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 85e9664318c9..a730ecbf78cd 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -17,6 +17,12 @@
* AMD Publication 51192 "AMD Bolton FCH Register Reference Guide"
* AMD Publication 52740 "BIOS and Kernel Developer’s Guide (BKDG)
* for AMD Family 16h Models 30h-3Fh Processors"
+ * AMD Publication 55570-B1-PUB "Processor Programming Reference (PPR)
+ * for AMD Family 17h Model 18h, Revision B1
+ * Processors (PUB)
+ * AMD Publication 55772-A1-PUB "Processor Programming Reference (PPR)
+ * for AMD Family 17h Model 20h, Revision A1
+ * Processors (PUB)
*/
/*
@@ -241,6 +247,18 @@ static int sp5100_tco_setupdevice(struct device *dev,
break;
case efch:
dev_name = SB800_DEVNAME;
+ /*
+ * On Family 17h devices, the EFCH_PM_DECODEEN_WDT_TMREN bit of
+ * EFCH_PM_DECODEEN not only enables the EFCH_PM_WDT_ADDR memory
+ * region, it also enables the watchdog itself.
+ */
+ if (boot_cpu_data.x86 == 0x17) {
+ val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
+ if (!(val & EFCH_PM_DECODEEN_WDT_TMREN)) {
+ sp5100_tco_update_pm_reg8(EFCH_PM_DECODEEN, 0xff,
+ EFCH_PM_DECODEEN_WDT_TMREN);
+ }
+ }
val = sp5100_tco_read_pm_reg8(EFCH_PM_DECODEEN);
if (val & EFCH_PM_DECODEEN_WDT_TMREN)
mmio_addr = EFCH_PM_WDT_ADDR;
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
index 87eaf357ae01..adf015aa4126 100644
--- a/drivers/watchdog/sp5100_tco.h
+++ b/drivers/watchdog/sp5100_tco.h
@@ -70,7 +70,7 @@
#define EFCH_PM_DECODEEN_WDT_TMREN BIT(7)
-#define EFCH_PM_DECODEEN3 0x00
+#define EFCH_PM_DECODEEN3 0x03
#define EFCH_PM_DECODEEN_SECOND_RES GENMASK(1, 0)
#define EFCH_PM_WATCHDOG_DISABLE ((u8)GENMASK(3, 2))
diff --git a/drivers/watchdog/visconti_wdt.c b/drivers/watchdog/visconti_wdt.c
new file mode 100644
index 000000000000..83ef55e66ca8
--- /dev/null
+++ b/drivers/watchdog/visconti_wdt.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 TOSHIBA CORPORATION
+ * Copyright (c) 2020 Toshiba Electronic Devices & Storage Corporation
+ * Copyright (c) 2020 Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+#define WDT_CNT 0x00
+#define WDT_MIN 0x04
+#define WDT_MAX 0x08
+#define WDT_CTL 0x0c
+#define WDT_CMD 0x10
+#define WDT_CMD_CLEAR 0x4352
+#define WDT_CMD_START_STOP 0x5354
+#define WDT_DIV 0x30
+
+#define VISCONTI_WDT_FREQ 2000000 /* 2MHz */
+#define WDT_DEFAULT_TIMEOUT 10U /* in seconds */
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(
+ nowayout,
+ "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT)")");
+
+struct visconti_wdt_priv {
+ struct watchdog_device wdev;
+ void __iomem *base;
+ u32 div;
+};
+
+static int visconti_wdt_start(struct watchdog_device *wdev)
+{
+ struct visconti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ u32 timeout = wdev->timeout * VISCONTI_WDT_FREQ;
+
+ writel(priv->div, priv->base + WDT_DIV);
+ writel(0, priv->base + WDT_MIN);
+ writel(timeout, priv->base + WDT_MAX);
+ writel(0, priv->base + WDT_CTL);
+ writel(WDT_CMD_START_STOP, priv->base + WDT_CMD);
+
+ return 0;
+}
+
+static int visconti_wdt_stop(struct watchdog_device *wdev)
+{
+ struct visconti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ writel(1, priv->base + WDT_CTL);
+ writel(WDT_CMD_START_STOP, priv->base + WDT_CMD);
+
+ return 0;
+}
+
+static int visconti_wdt_ping(struct watchdog_device *wdd)
+{
+ struct visconti_wdt_priv *priv = watchdog_get_drvdata(wdd);
+
+ writel(WDT_CMD_CLEAR, priv->base + WDT_CMD);
+
+ return 0;
+}
+
+static unsigned int visconti_wdt_get_timeleft(struct watchdog_device *wdev)
+{
+ struct visconti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ u32 timeout = wdev->timeout * VISCONTI_WDT_FREQ;
+ u32 cnt = readl(priv->base + WDT_CNT);
+
+ if (timeout <= cnt)
+ return 0;
+ timeout -= cnt;
+
+ return timeout / VISCONTI_WDT_FREQ;
+}
+
+static int visconti_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout)
+{
+ u32 val;
+ struct visconti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ wdev->timeout = timeout;
+ val = wdev->timeout * VISCONTI_WDT_FREQ;
+
+ /* Clear counter before setting timeout because WDT expires */
+ writel(WDT_CMD_CLEAR, priv->base + WDT_CMD);
+ writel(val, priv->base + WDT_MAX);
+
+ return 0;
+}
+
+static const struct watchdog_info visconti_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .identity = "Visconti Watchdog",
+};
+
+static const struct watchdog_ops visconti_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = visconti_wdt_start,
+ .stop = visconti_wdt_stop,
+ .ping = visconti_wdt_ping,
+ .get_timeleft = visconti_wdt_get_timeleft,
+ .set_timeout = visconti_wdt_set_timeout,
+};
+
+static void visconti_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int visconti_wdt_probe(struct platform_device *pdev)
+{
+ struct watchdog_device *wdev;
+ struct visconti_wdt_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int ret;
+ unsigned long clk_freq;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Could not get clock\n");
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "Could not enable clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, visconti_clk_disable_unprepare, clk);
+ if (ret)
+ return ret;
+
+ clk_freq = clk_get_rate(clk);
+ if (!clk_freq)
+ return -EINVAL;
+
+ priv->div = clk_freq / VISCONTI_WDT_FREQ;
+
+ /* Initialize struct watchdog_device. */
+ wdev = &priv->wdev;
+ wdev->info = &visconti_wdt_info;
+ wdev->ops = &visconti_wdt_ops;
+ wdev->parent = dev;
+ wdev->min_timeout = 1;
+ wdev->max_timeout = 0xffffffff / VISCONTI_WDT_FREQ;
+ wdev->timeout = min(wdev->max_timeout, WDT_DEFAULT_TIMEOUT);
+
+ watchdog_set_drvdata(wdev, priv);
+ watchdog_set_nowayout(wdev, nowayout);
+ watchdog_stop_on_unregister(wdev);
+
+ /* This overrides the default timeout only if DT configuration was found */
+ ret = watchdog_init_timeout(wdev, 0, dev);
+ if (ret)
+ dev_warn(dev, "Specified timeout value invalid, using default\n");
+
+ return devm_watchdog_register_device(dev, wdev);
+}
+
+static const struct of_device_id visconti_wdt_of_match[] = {
+ { .compatible = "toshiba,visconti-wdt", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, visconti_wdt_of_match);
+
+static struct platform_driver visconti_wdt_driver = {
+ .driver = {
+ .name = "visconti_wdt",
+ .of_match_table = visconti_wdt_of_match,
+ },
+ .probe = visconti_wdt_probe,
+};
+module_platform_driver(visconti_wdt_driver);
+
+MODULE_DESCRIPTION("TOSHIBA Visconti Watchdog Driver");
+MODULE_AUTHOR("Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 6798addabd5a..2946f3a63110 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -43,8 +43,6 @@
#include <linux/watchdog.h> /* For watchdog specific items */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
-#include <uapi/linux/sched/types.h> /* For struct sched_param */
-
#include "watchdog_core.h"
#include "watchdog_pretimeout.h"
@@ -994,8 +992,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
wd_data->wdd = wdd;
wdd->wd_data = wd_data;
- if (IS_ERR_OR_NULL(watchdog_kworker))
+ if (IS_ERR_OR_NULL(watchdog_kworker)) {
+ kfree(wd_data);
return -ENODEV;
+ }
device_initialize(&wd_data->dev);
wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
@@ -1021,7 +1021,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
pr_err("%s: a legacy watchdog module is probably present.\n",
wdd->info->identity);
old_wd_data = NULL;
- kfree(wd_data);
+ put_device(&wd_data->dev);
return err;
}
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 51427c752b37..b57b2067ecbf 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -331,7 +331,7 @@ static enum bp_state reserve_additional_memory(void)
mutex_unlock(&balloon_mutex);
/* add_memory_resource() requires the device_hotplug lock */
lock_device_hotplug();
- rc = add_memory_resource(nid, resource);
+ rc = add_memory_resource(nid, resource, MEMHP_MERGE_RESOURCE);
unlock_device_hotplug();
mutex_lock(&balloon_mutex);
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index 64df919a2111..da87f3a1e351 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -47,10 +47,11 @@ static unsigned evtchn_2l_max_channels(void)
return EVTCHN_2L_NR_CHANNELS;
}
-static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
+static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
+ unsigned int old_cpu)
{
- clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
- set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
+ clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu)));
+ set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
}
static void evtchn_2l_clear_pending(evtchn_port_t port)
@@ -91,6 +92,8 @@ static void evtchn_2l_unmask(evtchn_port_t port)
BUG_ON(!irqs_disabled());
+ smp_wmb(); /* All writes before unmask must be visible. */
+
if (unlikely((cpu != cpu_from_evtchn(port))))
do_hypercall = 1;
else {
@@ -159,7 +162,7 @@ static inline xen_ulong_t active_evtchns(unsigned int cpu,
* a bitset of words which contain pending event bits. The second
* level is a bitset of pending events themselves.
*/
-static void evtchn_2l_handle_events(unsigned cpu)
+static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
{
int irq;
xen_ulong_t pending_words;
@@ -240,10 +243,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
/* Process port. */
port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
- irq = get_evtchn_to_irq(port);
-
- if (irq != -1)
- generic_handle_irq(irq);
+ handle_irq_for_port(port, ctrl);
bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 6f02c18fa65c..6038c4c35db5 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -33,6 +33,10 @@
#include <linux/slab.h>
#include <linux/irqnr.h>
#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/cpuhotplug.h>
+#include <linux/atomic.h>
+#include <linux/ktime.h>
#ifdef CONFIG_X86
#include <asm/desc.h>
@@ -63,6 +67,66 @@
#include "events_internal.h"
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "xen."
+
+/* Interrupt types. */
+enum xen_irq_type {
+ IRQT_UNBOUND = 0,
+ IRQT_PIRQ,
+ IRQT_VIRQ,
+ IRQT_IPI,
+ IRQT_EVTCHN
+};
+
+/*
+ * Packed IRQ information:
+ * type - enum xen_irq_type
+ * event channel - irq->event channel mapping
+ * cpu - cpu this event channel is bound to
+ * index - type-specific information:
+ * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
+ * guest, or GSI (real passthrough IRQ) of the device.
+ * VIRQ - virq number
+ * IPI - IPI vector
+ * EVTCHN -
+ */
+struct irq_info {
+ struct list_head list;
+ struct list_head eoi_list;
+ short refcnt;
+ short spurious_cnt;
+ enum xen_irq_type type; /* type */
+ unsigned irq;
+ evtchn_port_t evtchn; /* event channel */
+ unsigned short cpu; /* cpu bound */
+ unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
+ unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
+ u64 eoi_time; /* Time in jiffies when to EOI. */
+
+ union {
+ unsigned short virq;
+ enum ipi_vector ipi;
+ struct {
+ unsigned short pirq;
+ unsigned short gsi;
+ unsigned char vector;
+ unsigned char flags;
+ uint16_t domid;
+ } pirq;
+ } u;
+};
+
+#define PIRQ_NEEDS_EOI (1 << 0)
+#define PIRQ_SHAREABLE (1 << 1)
+#define PIRQ_MSI_GROUP (1 << 2)
+
+static uint __read_mostly event_loop_timeout = 2;
+module_param(event_loop_timeout, uint, 0644);
+
+static uint __read_mostly event_eoi_delay = 10;
+module_param(event_eoi_delay, uint, 0644);
+
const struct evtchn_ops *evtchn_ops;
/*
@@ -71,6 +135,24 @@ const struct evtchn_ops *evtchn_ops;
*/
static DEFINE_MUTEX(irq_mapping_update_lock);
+/*
+ * Lock protecting event handling loop against removing event channels.
+ * Adding of event channels is no issue as the associated IRQ becomes active
+ * only after everything is setup (before request_[threaded_]irq() the handler
+ * can't be entered for an event, as the event channel will be unmasked only
+ * then).
+ */
+static DEFINE_RWLOCK(evtchn_rwlock);
+
+/*
+ * Lock hierarchy:
+ *
+ * irq_mapping_update_lock
+ * evtchn_rwlock
+ * IRQ-desc lock
+ * percpu eoi_list_lock
+ */
+
static LIST_HEAD(xen_irq_list_head);
/* IRQ <-> VIRQ mapping. */
@@ -79,7 +161,7 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
-int **evtchn_to_irq;
+static int **evtchn_to_irq;
#ifdef CONFIG_X86
static unsigned long *pirq_eoi_map;
#endif
@@ -95,17 +177,20 @@ static bool (*pirq_needs_eoi)(unsigned irq);
static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
static struct irq_chip xen_dynamic_chip;
+static struct irq_chip xen_lateeoi_chip;
static struct irq_chip xen_percpu_chip;
static struct irq_chip xen_pirq_chip;
static void enable_dynirq(struct irq_data *data);
static void disable_dynirq(struct irq_data *data);
+static DEFINE_PER_CPU(unsigned int, irq_epoch);
+
static void clear_evtchn_to_irq_row(unsigned row)
{
unsigned col;
for (col = 0; col < EVTCHN_PER_ROW; col++)
- evtchn_to_irq[row][col] = -1;
+ WRITE_ONCE(evtchn_to_irq[row][col], -1);
}
static void clear_evtchn_to_irq_all(void)
@@ -142,7 +227,7 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
clear_evtchn_to_irq_row(row);
}
- evtchn_to_irq[row][col] = irq;
+ WRITE_ONCE(evtchn_to_irq[row][col], irq);
return 0;
}
@@ -152,11 +237,11 @@ int get_evtchn_to_irq(evtchn_port_t evtchn)
return -1;
if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
return -1;
- return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
+ return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
}
/* Get info for IRQ */
-struct irq_info *info_for_irq(unsigned irq)
+static struct irq_info *info_for_irq(unsigned irq)
{
if (irq < nr_legacy_irqs())
return legacy_info_ptrs[irq];
@@ -194,7 +279,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
- return xen_evtchn_port_setup(info);
+ return xen_evtchn_port_setup(evtchn);
}
static int xen_irq_info_evtchn_setup(unsigned irq,
@@ -261,10 +346,14 @@ static void xen_irq_info_cleanup(struct irq_info *info)
*/
evtchn_port_t evtchn_from_irq(unsigned irq)
{
- if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))
+ const struct irq_info *info = NULL;
+
+ if (likely(irq < nr_irqs))
+ info = info_for_irq(irq);
+ if (!info)
return 0;
- return info_for_irq(irq)->evtchn;
+ return info->evtchn;
}
unsigned int irq_from_evtchn(evtchn_port_t evtchn)
@@ -313,7 +402,7 @@ static enum xen_irq_type type_from_irq(unsigned irq)
return info_for_irq(irq)->type;
}
-unsigned cpu_from_irq(unsigned irq)
+static unsigned cpu_from_irq(unsigned irq)
{
return info_for_irq(irq)->cpu;
}
@@ -353,7 +442,7 @@ static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu)
#ifdef CONFIG_SMP
cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
#endif
- xen_evtchn_port_bind_to_cpu(info, cpu);
+ xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
info->cpu = cpu;
}
@@ -375,9 +464,157 @@ void notify_remote_via_irq(int irq)
}
EXPORT_SYMBOL_GPL(notify_remote_via_irq);
+struct lateeoi_work {
+ struct delayed_work delayed;
+ spinlock_t eoi_list_lock;
+ struct list_head eoi_list;
+};
+
+static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
+
+static void lateeoi_list_del(struct irq_info *info)
+{
+ struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
+ unsigned long flags;
+
+ spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+ list_del_init(&info->eoi_list);
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
+}
+
+static void lateeoi_list_add(struct irq_info *info)
+{
+ struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
+ struct irq_info *elem;
+ u64 now = get_jiffies_64();
+ unsigned long delay;
+ unsigned long flags;
+
+ if (now < info->eoi_time)
+ delay = info->eoi_time - now;
+ else
+ delay = 1;
+
+ spin_lock_irqsave(&eoi->eoi_list_lock, flags);
+
+ if (list_empty(&eoi->eoi_list)) {
+ list_add(&info->eoi_list, &eoi->eoi_list);
+ mod_delayed_work_on(info->eoi_cpu, system_wq,
+ &eoi->delayed, delay);
+ } else {
+ list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
+ if (elem->eoi_time <= info->eoi_time)
+ break;
+ }
+ list_add(&info->eoi_list, &elem->eoi_list);
+ }
+
+ spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
+}
+
+static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
+{
+ evtchn_port_t evtchn;
+ unsigned int cpu;
+ unsigned int delay = 0;
+
+ evtchn = info->evtchn;
+ if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
+ return;
+
+ if (spurious) {
+ if ((1 << info->spurious_cnt) < (HZ << 2))
+ info->spurious_cnt++;
+ if (info->spurious_cnt > 1) {
+ delay = 1 << (info->spurious_cnt - 2);
+ if (delay > HZ)
+ delay = HZ;
+ if (!info->eoi_time)
+ info->eoi_cpu = smp_processor_id();
+ info->eoi_time = get_jiffies_64() + delay;
+ }
+ } else {
+ info->spurious_cnt = 0;
+ }
+
+ cpu = info->eoi_cpu;
+ if (info->eoi_time &&
+ (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
+ lateeoi_list_add(info);
+ return;
+ }
+
+ info->eoi_time = 0;
+ unmask_evtchn(evtchn);
+}
+
+static void xen_irq_lateeoi_worker(struct work_struct *work)
+{
+ struct lateeoi_work *eoi;
+ struct irq_info *info;
+ u64 now = get_jiffies_64();
+ unsigned long flags;
+
+ eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
+
+ read_lock_irqsave(&evtchn_rwlock, flags);
+
+ while (true) {
+ spin_lock(&eoi->eoi_list_lock);
+
+ info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
+ eoi_list);
+
+ if (info == NULL || now < info->eoi_time) {
+ spin_unlock(&eoi->eoi_list_lock);
+ break;
+ }
+
+ list_del_init(&info->eoi_list);
+
+ spin_unlock(&eoi->eoi_list_lock);
+
+ info->eoi_time = 0;
+
+ xen_irq_lateeoi_locked(info, false);
+ }
+
+ if (info)
+ mod_delayed_work_on(info->eoi_cpu, system_wq,
+ &eoi->delayed, info->eoi_time - now);
+
+ read_unlock_irqrestore(&evtchn_rwlock, flags);
+}
+
+static void xen_cpu_init_eoi(unsigned int cpu)
+{
+ struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
+
+ INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
+ spin_lock_init(&eoi->eoi_list_lock);
+ INIT_LIST_HEAD(&eoi->eoi_list);
+}
+
+void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
+{
+ struct irq_info *info;
+ unsigned long flags;
+
+ read_lock_irqsave(&evtchn_rwlock, flags);
+
+ info = info_for_irq(irq);
+
+ if (info)
+ xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
+
+ read_unlock_irqrestore(&evtchn_rwlock, flags);
+}
+EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
+
static void xen_irq_init(unsigned irq)
{
struct irq_info *info;
+
#ifdef CONFIG_SMP
/* By default all event channels notify CPU#0. */
cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
@@ -392,6 +629,7 @@ static void xen_irq_init(unsigned irq)
set_info_for_irq(irq, info);
+ INIT_LIST_HEAD(&info->eoi_list);
list_add_tail(&info->list, &xen_irq_list_head);
}
@@ -440,16 +678,24 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
static void xen_free_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
+ unsigned long flags;
if (WARN_ON(!info))
return;
+ write_lock_irqsave(&evtchn_rwlock, flags);
+
+ if (!list_empty(&info->eoi_list))
+ lateeoi_list_del(info);
+
list_del(&info->list);
set_info_for_irq(irq, NULL);
WARN_ON(info->refcnt > 0);
+ write_unlock_irqrestore(&evtchn_rwlock, flags);
+
kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */
@@ -550,7 +796,7 @@ static unsigned int __startup_pirq(unsigned int irq)
info->evtchn = evtchn;
bind_evtchn_to_cpu(evtchn, 0);
- rc = xen_evtchn_port_setup(info);
+ rc = xen_evtchn_port_setup(evtchn);
if (rc)
goto err;
@@ -841,7 +1087,7 @@ int xen_pirq_from_irq(unsigned irq)
}
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
-int bind_evtchn_to_irq(evtchn_port_t evtchn)
+static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
{
int irq;
int ret;
@@ -858,7 +1104,7 @@ int bind_evtchn_to_irq(evtchn_port_t evtchn)
if (irq < 0)
goto out;
- irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
+ irq_set_chip_and_handler_name(irq, chip,
handle_edge_irq, "event");
ret = xen_irq_info_evtchn_setup(irq, evtchn);
@@ -879,8 +1125,19 @@ out:
return irq;
}
+
+int bind_evtchn_to_irq(evtchn_port_t evtchn)
+{
+ return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip);
+}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
+{
+ return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
+
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
@@ -922,8 +1179,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
return irq;
}
-int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
- evtchn_port_t remote_port)
+static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,
+ evtchn_port_t remote_port,
+ struct irq_chip *chip)
{
struct evtchn_bind_interdomain bind_interdomain;
int err;
@@ -934,9 +1192,17 @@ int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
&bind_interdomain);
- return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
+ return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
+ chip);
+}
+
+int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
+ evtchn_port_t remote_port)
+{
+ return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
+ &xen_lateeoi_chip);
}
-EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
+EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
{
@@ -1034,14 +1300,15 @@ static void unbind_from_irq(unsigned int irq)
mutex_unlock(&irq_mapping_update_lock);
}
-int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
- irq_handler_t handler,
- unsigned long irqflags,
- const char *devname, void *dev_id)
+static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname, void *dev_id,
+ struct irq_chip *chip)
{
int irq, retval;
- irq = bind_evtchn_to_irq(evtchn);
+ irq = bind_evtchn_to_irq_chip(evtchn, chip);
if (irq < 0)
return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
@@ -1052,18 +1319,38 @@ int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
return irq;
}
+
+int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname, void *dev_id)
+{
+ return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
+ devname, dev_id,
+ &xen_dynamic_chip);
+}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
-int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
- evtchn_port_t remote_port,
- irq_handler_t handler,
- unsigned long irqflags,
- const char *devname,
- void *dev_id)
+int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname, void *dev_id)
+{
+ return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
+ devname, dev_id,
+ &xen_lateeoi_chip);
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
+
+static int bind_interdomain_evtchn_to_irqhandler_chip(
+ unsigned int remote_domain, evtchn_port_t remote_port,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id, struct irq_chip *chip)
{
int irq, retval;
- irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
+ irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
+ chip);
if (irq < 0)
return irq;
@@ -1075,7 +1362,19 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
return irq;
}
-EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
+
+int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
+ evtchn_port_t remote_port,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id)
+{
+ return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
+ remote_port, handler, irqflags, devname,
+ dev_id, &xen_lateeoi_chip);
+}
+EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
irq_handler_t handler,
@@ -1189,7 +1488,7 @@ int evtchn_get(evtchn_port_t evtchn)
goto done;
err = -EINVAL;
- if (info->refcnt <= 0)
+ if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
goto done;
info->refcnt++;
@@ -1228,21 +1527,81 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
notify_remote_via_irq(irq);
}
+struct evtchn_loop_ctrl {
+ ktime_t timeout;
+ unsigned count;
+ bool defer_eoi;
+};
+
+void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+{
+ int irq;
+ struct irq_info *info;
+
+ irq = get_evtchn_to_irq(port);
+ if (irq == -1)
+ return;
+
+ /*
+ * Check for timeout every 256 events.
+ * We are setting the timeout value only after the first 256
+ * events in order to not hurt the common case of few loop
+ * iterations. The 256 is basically an arbitrary value.
+ *
+ * In case we are hitting the timeout we need to defer all further
+ * EOIs in order to ensure to leave the event handling loop rather
+ * sooner than later.
+ */
+ if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
+ ktime_t kt = ktime_get();
+
+ if (!ctrl->timeout) {
+ kt = ktime_add_ms(kt,
+ jiffies_to_msecs(event_loop_timeout));
+ ctrl->timeout = kt;
+ } else if (kt > ctrl->timeout) {
+ ctrl->defer_eoi = true;
+ }
+ }
+
+ info = info_for_irq(irq);
+
+ if (ctrl->defer_eoi) {
+ info->eoi_cpu = smp_processor_id();
+ info->irq_epoch = __this_cpu_read(irq_epoch);
+ info->eoi_time = get_jiffies_64() + event_eoi_delay;
+ }
+
+ generic_handle_irq(irq);
+}
+
static void __xen_evtchn_do_upcall(void)
{
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
int cpu = smp_processor_id();
+ struct evtchn_loop_ctrl ctrl = { 0 };
+
+ read_lock(&evtchn_rwlock);
do {
vcpu_info->evtchn_upcall_pending = 0;
- xen_evtchn_handle_events(cpu);
+ xen_evtchn_handle_events(cpu, &ctrl);
BUG_ON(!irqs_disabled());
virt_rmb(); /* Hypervisor can set upcall pending. */
} while (vcpu_info->evtchn_upcall_pending);
+
+ read_unlock(&evtchn_rwlock);
+
+ /*
+ * Increment irq_epoch only now to defer EOIs only for
+ * xen_irq_lateeoi() invocations occurring from inside the loop
+ * above.
+ */
+ __this_cpu_inc(irq_epoch);
}
void xen_evtchn_do_upcall(struct pt_regs *regs)
@@ -1606,6 +1965,21 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
.irq_retrigger = retrigger_dynirq,
};
+static struct irq_chip xen_lateeoi_chip __read_mostly = {
+ /* The chip name needs to contain "xen-dyn" for irqbalance to work. */
+ .name = "xen-dyn-lateeoi",
+
+ .irq_disable = disable_dynirq,
+ .irq_mask = disable_dynirq,
+ .irq_unmask = enable_dynirq,
+
+ .irq_ack = mask_ack_dynirq,
+ .irq_mask_ack = mask_ack_dynirq,
+
+ .irq_set_affinity = set_affinity_irq,
+ .irq_retrigger = retrigger_dynirq,
+};
+
static struct irq_chip xen_pirq_chip __read_mostly = {
.name = "xen-pirq",
@@ -1676,21 +2050,48 @@ void xen_setup_callback_vector(void) {}
static inline void xen_alloc_callback_vector(void) {}
#endif
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "xen."
+bool xen_fifo_events = true;
+module_param_named(fifo_events, xen_fifo_events, bool, 0);
+
+static int xen_evtchn_cpu_prepare(unsigned int cpu)
+{
+ int ret = 0;
+
+ xen_cpu_init_eoi(cpu);
+
+ if (evtchn_ops->percpu_init)
+ ret = evtchn_ops->percpu_init(cpu);
+
+ return ret;
+}
+
+static int xen_evtchn_cpu_dead(unsigned int cpu)
+{
+ int ret = 0;
-static bool fifo_events = true;
-module_param(fifo_events, bool, 0);
+ if (evtchn_ops->percpu_deinit)
+ ret = evtchn_ops->percpu_deinit(cpu);
+
+ return ret;
+}
void __init xen_init_IRQ(void)
{
int ret = -EINVAL;
evtchn_port_t evtchn;
- if (fifo_events)
+ if (xen_fifo_events)
ret = xen_evtchn_fifo_init();
- if (ret < 0)
+ if (ret < 0) {
xen_evtchn_2l_init();
+ xen_fifo_events = false;
+ }
+
+ xen_cpu_init_eoi(smp_processor_id());
+
+ cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
+ "xen/evtchn:prepare",
+ xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
sizeof(*evtchn_to_irq), GFP_KERNEL);
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index c60ee0450173..b234f1766810 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -138,9 +138,8 @@ static void init_array_page(event_word_t *array_page)
array_page[i] = 1 << EVTCHN_FIFO_MASKED;
}
-static int evtchn_fifo_setup(struct irq_info *info)
+static int evtchn_fifo_setup(evtchn_port_t port)
{
- evtchn_port_t port = info->evtchn;
unsigned new_array_pages;
int ret;
@@ -186,7 +185,8 @@ static int evtchn_fifo_setup(struct irq_info *info)
return ret;
}
-static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
+static void evtchn_fifo_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
+ unsigned int old_cpu)
{
/* no-op */
}
@@ -227,19 +227,28 @@ static bool evtchn_fifo_is_masked(evtchn_port_t port)
return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
}
/*
- * Clear MASKED, spinning if BUSY is set.
+ * Clear MASKED if not PENDING, spinning if BUSY is set.
+ * Return true if mask was cleared.
*/
-static void clear_masked(volatile event_word_t *word)
+static bool clear_masked_cond(volatile event_word_t *word)
{
event_word_t new, old, w;
w = *word;
do {
+ if (!(w & (1 << EVTCHN_FIFO_MASKED)))
+ return true;
+
+ if (w & (1 << EVTCHN_FIFO_PENDING))
+ return false;
+
old = w & ~(1 << EVTCHN_FIFO_BUSY);
new = old & ~(1 << EVTCHN_FIFO_MASKED);
w = sync_cmpxchg(word, old, new);
} while (w != old);
+
+ return true;
}
static void evtchn_fifo_unmask(evtchn_port_t port)
@@ -248,8 +257,7 @@ static void evtchn_fifo_unmask(evtchn_port_t port)
BUG_ON(!irqs_disabled());
- clear_masked(word);
- if (evtchn_fifo_is_pending(port)) {
+ if (!clear_masked_cond(word)) {
struct evtchn_unmask unmask = { .port = port };
(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
}
@@ -270,19 +278,9 @@ static uint32_t clear_linked(volatile event_word_t *word)
return w & EVTCHN_FIFO_LINK_MASK;
}
-static void handle_irq_for_port(evtchn_port_t port)
-{
- int irq;
-
- irq = get_evtchn_to_irq(port);
- if (irq != -1)
- generic_handle_irq(irq);
-}
-
-static void consume_one_event(unsigned cpu,
+static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl,
struct evtchn_fifo_control_block *control_block,
- unsigned priority, unsigned long *ready,
- bool drop)
+ unsigned priority, unsigned long *ready)
{
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head;
@@ -315,16 +313,17 @@ static void consume_one_event(unsigned cpu,
clear_bit(priority, ready);
if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
- if (unlikely(drop))
+ if (unlikely(!ctrl))
pr_warn("Dropping pending event for port %u\n", port);
else
- handle_irq_for_port(port);
+ handle_irq_for_port(port, ctrl);
}
q->head[priority] = head;
}
-static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
+static void __evtchn_fifo_handle_events(unsigned cpu,
+ struct evtchn_loop_ctrl *ctrl)
{
struct evtchn_fifo_control_block *control_block;
unsigned long ready;
@@ -336,14 +335,15 @@ static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
while (ready) {
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
- consume_one_event(cpu, control_block, q, &ready, drop);
+ consume_one_event(cpu, ctrl, control_block, q, &ready);
ready |= xchg(&control_block->ready, 0);
}
}
-static void evtchn_fifo_handle_events(unsigned cpu)
+static void evtchn_fifo_handle_events(unsigned cpu,
+ struct evtchn_loop_ctrl *ctrl)
{
- __evtchn_fifo_handle_events(cpu, false);
+ __evtchn_fifo_handle_events(cpu, ctrl);
}
static void evtchn_fifo_resume(void)
@@ -380,21 +380,6 @@ static void evtchn_fifo_resume(void)
event_array_pages = 0;
}
-static const struct evtchn_ops evtchn_ops_fifo = {
- .max_channels = evtchn_fifo_max_channels,
- .nr_channels = evtchn_fifo_nr_channels,
- .setup = evtchn_fifo_setup,
- .bind_to_cpu = evtchn_fifo_bind_to_cpu,
- .clear_pending = evtchn_fifo_clear_pending,
- .set_pending = evtchn_fifo_set_pending,
- .is_pending = evtchn_fifo_is_pending,
- .test_and_set_mask = evtchn_fifo_test_and_set_mask,
- .mask = evtchn_fifo_mask,
- .unmask = evtchn_fifo_unmask,
- .handle_events = evtchn_fifo_handle_events,
- .resume = evtchn_fifo_resume,
-};
-
static int evtchn_fifo_alloc_control_block(unsigned cpu)
{
void *control_block = NULL;
@@ -417,19 +402,36 @@ static int evtchn_fifo_alloc_control_block(unsigned cpu)
return ret;
}
-static int xen_evtchn_cpu_prepare(unsigned int cpu)
+static int evtchn_fifo_percpu_init(unsigned int cpu)
{
if (!per_cpu(cpu_control_block, cpu))
return evtchn_fifo_alloc_control_block(cpu);
return 0;
}
-static int xen_evtchn_cpu_dead(unsigned int cpu)
+static int evtchn_fifo_percpu_deinit(unsigned int cpu)
{
- __evtchn_fifo_handle_events(cpu, true);
+ __evtchn_fifo_handle_events(cpu, NULL);
return 0;
}
+static const struct evtchn_ops evtchn_ops_fifo = {
+ .max_channels = evtchn_fifo_max_channels,
+ .nr_channels = evtchn_fifo_nr_channels,
+ .setup = evtchn_fifo_setup,
+ .bind_to_cpu = evtchn_fifo_bind_to_cpu,
+ .clear_pending = evtchn_fifo_clear_pending,
+ .set_pending = evtchn_fifo_set_pending,
+ .is_pending = evtchn_fifo_is_pending,
+ .test_and_set_mask = evtchn_fifo_test_and_set_mask,
+ .mask = evtchn_fifo_mask,
+ .unmask = evtchn_fifo_unmask,
+ .handle_events = evtchn_fifo_handle_events,
+ .resume = evtchn_fifo_resume,
+ .percpu_init = evtchn_fifo_percpu_init,
+ .percpu_deinit = evtchn_fifo_percpu_deinit,
+};
+
int __init xen_evtchn_fifo_init(void)
{
int cpu = smp_processor_id();
@@ -443,9 +445,5 @@ int __init xen_evtchn_fifo_init(void)
evtchn_ops = &evtchn_ops_fifo;
- cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
- "xen/evtchn:prepare",
- xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
-
return ret;
}
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index 10684feb094e..0a97c0549db7 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -7,58 +7,15 @@
#ifndef __EVENTS_INTERNAL_H__
#define __EVENTS_INTERNAL_H__
-/* Interrupt types. */
-enum xen_irq_type {
- IRQT_UNBOUND = 0,
- IRQT_PIRQ,
- IRQT_VIRQ,
- IRQT_IPI,
- IRQT_EVTCHN
-};
-
-/*
- * Packed IRQ information:
- * type - enum xen_irq_type
- * event channel - irq->event channel mapping
- * cpu - cpu this event channel is bound to
- * index - type-specific information:
- * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
- * guest, or GSI (real passthrough IRQ) of the device.
- * VIRQ - virq number
- * IPI - IPI vector
- * EVTCHN -
- */
-struct irq_info {
- struct list_head list;
- int refcnt;
- enum xen_irq_type type; /* type */
- unsigned irq;
- evtchn_port_t evtchn; /* event channel */
- unsigned short cpu; /* cpu bound */
-
- union {
- unsigned short virq;
- enum ipi_vector ipi;
- struct {
- unsigned short pirq;
- unsigned short gsi;
- unsigned char vector;
- unsigned char flags;
- uint16_t domid;
- } pirq;
- } u;
-};
-
-#define PIRQ_NEEDS_EOI (1 << 0)
-#define PIRQ_SHAREABLE (1 << 1)
-#define PIRQ_MSI_GROUP (1 << 2)
+struct evtchn_loop_ctrl;
struct evtchn_ops {
unsigned (*max_channels)(void);
unsigned (*nr_channels)(void);
- int (*setup)(struct irq_info *info);
- void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
+ int (*setup)(evtchn_port_t port);
+ void (*bind_to_cpu)(evtchn_port_t evtchn, unsigned int cpu,
+ unsigned int old_cpu);
void (*clear_pending)(evtchn_port_t port);
void (*set_pending)(evtchn_port_t port);
@@ -67,17 +24,18 @@ struct evtchn_ops {
void (*mask)(evtchn_port_t port);
void (*unmask)(evtchn_port_t port);
- void (*handle_events)(unsigned cpu);
+ void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl);
void (*resume)(void);
+
+ int (*percpu_init)(unsigned int cpu);
+ int (*percpu_deinit)(unsigned int cpu);
};
extern const struct evtchn_ops *evtchn_ops;
-extern int **evtchn_to_irq;
int get_evtchn_to_irq(evtchn_port_t evtchn);
+void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
-struct irq_info *info_for_irq(unsigned irq);
-unsigned cpu_from_irq(unsigned irq);
unsigned int cpu_from_evtchn(evtchn_port_t evtchn);
static inline unsigned xen_evtchn_max_channels(void)
@@ -89,17 +47,18 @@ static inline unsigned xen_evtchn_max_channels(void)
* Do any ABI specific setup for a bound event channel before it can
* be unmasked and used.
*/
-static inline int xen_evtchn_port_setup(struct irq_info *info)
+static inline int xen_evtchn_port_setup(evtchn_port_t evtchn)
{
if (evtchn_ops->setup)
- return evtchn_ops->setup(info);
+ return evtchn_ops->setup(evtchn);
return 0;
}
-static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
- unsigned cpu)
+static inline void xen_evtchn_port_bind_to_cpu(evtchn_port_t evtchn,
+ unsigned int cpu,
+ unsigned int old_cpu)
{
- evtchn_ops->bind_to_cpu(info, cpu);
+ evtchn_ops->bind_to_cpu(evtchn, cpu, old_cpu);
}
static inline void clear_evtchn(evtchn_port_t port)
@@ -132,9 +91,10 @@ static inline void unmask_evtchn(evtchn_port_t port)
return evtchn_ops->unmask(port);
}
-static inline void xen_evtchn_handle_events(unsigned cpu)
+static inline void xen_evtchn_handle_events(unsigned cpu,
+ struct evtchn_loop_ctrl *ctrl)
{
- return evtchn_ops->handle_events(cpu);
+ return evtchn_ops->handle_events(cpu, ctrl);
}
static inline void xen_evtchn_resume(void)
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 6e0b1dd5573c..5dc016d68f83 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -167,7 +167,6 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
"Interrupt for port %u, but apparently not enabled; per-user %p\n",
evtchn->port, u);
- disable_irq_nosync(irq);
evtchn->enabled = false;
spin_lock(&u->ring_prod_lock);
@@ -293,7 +292,7 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
evtchn = find_evtchn(u, port);
if (evtchn && !evtchn->enabled) {
evtchn->enabled = true;
- enable_irq(irq_from_evtchn(port));
+ xen_irq_lateeoi(irq_from_evtchn(port), 0);
}
}
@@ -393,8 +392,8 @@ static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port)
if (rc < 0)
goto err;
- rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
- u->name, evtchn);
+ rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
+ u->name, evtchn);
if (rc < 0)
goto err;
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index b1b6eebafd5d..4c13cbc99896 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -247,10 +247,9 @@ static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
if (sgt) {
if (gntdev_dmabuf_attach->dir != DMA_NONE)
- dma_unmap_sg_attrs(attach->dev, sgt->sgl,
- sgt->nents,
- gntdev_dmabuf_attach->dir,
- DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(attach->dev, sgt,
+ gntdev_dmabuf_attach->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
}
@@ -288,8 +287,8 @@ dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
gntdev_dmabuf->nr_pages);
if (!IS_ERR(sgt)) {
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC)) {
+ if (dma_map_sgtable(attach->dev, sgt, dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
sg_free_table(sgt);
kfree(sgt);
sgt = ERR_PTR(-ENOMEM);
@@ -633,7 +632,7 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
/* Now convert sgt to array of pages and check for page validity. */
i = 0;
- for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
+ for_each_sgtable_page(sgt, &sg_iter, 0) {
struct page *page = sg_page_iter_page(&sg_iter);
/*
* Check if page is valid: this can happen if we are given
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 64a9025a87be..a36b71286bcf 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -720,17 +720,18 @@ struct gntdev_copy_batch {
s16 __user *status[GNTDEV_COPY_BATCH];
unsigned int nr_ops;
unsigned int nr_pages;
+ bool writeable;
};
static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
- bool writeable, unsigned long *gfn)
+ unsigned long *gfn)
{
unsigned long addr = (unsigned long)virt;
struct page *page;
unsigned long xen_pfn;
int ret;
- ret = get_user_pages_fast(addr, 1, writeable ? FOLL_WRITE : 0, &page);
+ ret = pin_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page);
if (ret < 0)
return ret;
@@ -744,11 +745,9 @@ static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
static void gntdev_put_pages(struct gntdev_copy_batch *batch)
{
- unsigned int i;
-
- for (i = 0; i < batch->nr_pages; i++)
- put_page(batch->pages[i]);
+ unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable);
batch->nr_pages = 0;
+ batch->writeable = false;
}
static int gntdev_copy(struct gntdev_copy_batch *batch)
@@ -837,8 +836,9 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
virt = seg->source.virt + copied;
off = (unsigned long)virt & ~XEN_PAGE_MASK;
len = min(len, (size_t)XEN_PAGE_SIZE - off);
+ batch->writeable = false;
- ret = gntdev_get_page(batch, virt, false, &gfn);
+ ret = gntdev_get_page(batch, virt, &gfn);
if (ret < 0)
return ret;
@@ -856,8 +856,9 @@ static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
virt = seg->dest.virt + copied;
off = (unsigned long)virt & ~XEN_PAGE_MASK;
len = min(len, (size_t)XEN_PAGE_SIZE - off);
+ batch->writeable = true;
- ret = gntdev_get_page(batch, virt, true, &gfn);
+ ret = gntdev_get_page(batch, virt, &gfn);
if (ret < 0)
return ret;
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 9eae1fceec1e..a7d293fa8d14 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -66,6 +66,7 @@ struct sock_mapping {
atomic_t write;
atomic_t io;
atomic_t release;
+ atomic_t eoi;
void (*saved_data_ready)(struct sock *sk);
struct pvcalls_ioworker ioworker;
};
@@ -87,7 +88,7 @@ static int pvcalls_back_release_active(struct xenbus_device *dev,
struct pvcalls_fedata *fedata,
struct sock_mapping *map);
-static void pvcalls_conn_back_read(void *opaque)
+static bool pvcalls_conn_back_read(void *opaque)
{
struct sock_mapping *map = (struct sock_mapping *)opaque;
struct msghdr msg;
@@ -107,17 +108,17 @@ static void pvcalls_conn_back_read(void *opaque)
virt_mb();
if (error)
- return;
+ return false;
size = pvcalls_queued(prod, cons, array_size);
if (size >= array_size)
- return;
+ return false;
spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
atomic_set(&map->read, 0);
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
flags);
- return;
+ return true;
}
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
wanted = array_size - size;
@@ -141,7 +142,7 @@ static void pvcalls_conn_back_read(void *opaque)
ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
WARN_ON(ret > wanted);
if (ret == -EAGAIN) /* shouldn't happen */
- return;
+ return true;
if (!ret)
ret = -ENOTCONN;
spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
@@ -160,10 +161,10 @@ static void pvcalls_conn_back_read(void *opaque)
virt_wmb();
notify_remote_via_irq(map->irq);
- return;
+ return true;
}
-static void pvcalls_conn_back_write(struct sock_mapping *map)
+static bool pvcalls_conn_back_write(struct sock_mapping *map)
{
struct pvcalls_data_intf *intf = map->ring;
struct pvcalls_data *data = &map->data;
@@ -180,7 +181,7 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
array_size = XEN_FLEX_RING_SIZE(map->ring_order);
size = pvcalls_queued(prod, cons, array_size);
if (size == 0)
- return;
+ return false;
memset(&msg, 0, sizeof(msg));
msg.msg_flags |= MSG_DONTWAIT;
@@ -198,12 +199,11 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
atomic_set(&map->write, 0);
ret = inet_sendmsg(map->sock, &msg, size);
- if (ret == -EAGAIN || (ret >= 0 && ret < size)) {
+ if (ret == -EAGAIN) {
atomic_inc(&map->write);
atomic_inc(&map->io);
+ return true;
}
- if (ret == -EAGAIN)
- return;
/* write the data, then update the indexes */
virt_wmb();
@@ -216,9 +216,13 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
}
/* update the indexes, then notify the other end */
virt_wmb();
- if (prod != cons + ret)
+ if (prod != cons + ret) {
atomic_inc(&map->write);
+ atomic_inc(&map->io);
+ }
notify_remote_via_irq(map->irq);
+
+ return true;
}
static void pvcalls_back_ioworker(struct work_struct *work)
@@ -227,6 +231,7 @@ static void pvcalls_back_ioworker(struct work_struct *work)
struct pvcalls_ioworker, register_work);
struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
ioworker);
+ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
while (atomic_read(&map->io) > 0) {
if (atomic_read(&map->release) > 0) {
@@ -234,10 +239,18 @@ static void pvcalls_back_ioworker(struct work_struct *work)
return;
}
- if (atomic_read(&map->read) > 0)
- pvcalls_conn_back_read(map);
- if (atomic_read(&map->write) > 0)
- pvcalls_conn_back_write(map);
+ if (atomic_read(&map->read) > 0 &&
+ pvcalls_conn_back_read(map))
+ eoi_flags = 0;
+ if (atomic_read(&map->write) > 0 &&
+ pvcalls_conn_back_write(map))
+ eoi_flags = 0;
+
+ if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
+ atomic_set(&map->eoi, 0);
+ xen_irq_lateeoi(map->irq, eoi_flags);
+ eoi_flags = XEN_EOI_FLAG_SPURIOUS;
+ }
atomic_dec(&map->io);
}
@@ -334,12 +347,9 @@ static struct sock_mapping *pvcalls_new_active_socket(
goto out;
map->bytes = page;
- ret = bind_interdomain_evtchn_to_irqhandler(fedata->dev->otherend_id,
- evtchn,
- pvcalls_back_conn_event,
- 0,
- "pvcalls-backend",
- map);
+ ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
+ fedata->dev->otherend_id, evtchn,
+ pvcalls_back_conn_event, 0, "pvcalls-backend", map);
if (ret < 0)
goto out;
map->irq = ret;
@@ -873,15 +883,18 @@ static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
{
struct xenbus_device *dev = dev_id;
struct pvcalls_fedata *fedata = NULL;
+ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
- if (dev == NULL)
- return IRQ_HANDLED;
+ if (dev) {
+ fedata = dev_get_drvdata(&dev->dev);
+ if (fedata) {
+ pvcalls_back_work(fedata);
+ eoi_flags = 0;
+ }
+ }
- fedata = dev_get_drvdata(&dev->dev);
- if (fedata == NULL)
- return IRQ_HANDLED;
+ xen_irq_lateeoi(irq, eoi_flags);
- pvcalls_back_work(fedata);
return IRQ_HANDLED;
}
@@ -891,12 +904,15 @@ static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
struct pvcalls_ioworker *iow;
if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
- map->sock->sk->sk_user_data != map)
+ map->sock->sk->sk_user_data != map) {
+ xen_irq_lateeoi(irq, 0);
return IRQ_HANDLED;
+ }
iow = &map->ioworker;
atomic_inc(&map->write);
+ atomic_inc(&map->eoi);
atomic_inc(&map->io);
queue_work(iow->wq, &iow->register_work);
@@ -932,7 +948,7 @@ static int backend_connect(struct xenbus_device *dev)
goto error;
}
- err = bind_interdomain_evtchn_to_irq(dev->otherend_id, evtchn);
+ err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
if (err < 0)
goto error;
fedata->irq = err;
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 72d725a0ab5c..7984645b5956 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -371,7 +371,7 @@ out:
static int create_active(struct sock_mapping *map, evtchn_port_t *evtchn)
{
void *bytes;
- int ret = -ENOMEM, irq = -1, i;
+ int ret, irq = -1, i;
*evtchn = 0;
init_waitqueue_head(&map->active.inflight_conn_req);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 39a0f2e0847c..2b385c1b4a99 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -28,7 +28,7 @@
#include <linux/memblock.h>
#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
+#include <linux/dma-map-ops.h>
#include <linux/export.h>
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
@@ -395,8 +395,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
- map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start),
- phys, size, size, dir, attrs);
+ map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
@@ -578,4 +577,6 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.dma_supported = xen_swiotlb_dma_supported,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
};
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
index 3b98dc921426..8c512ea550bb 100644
--- a/drivers/xen/unpopulated-alloc.c
+++ b/drivers/xen/unpopulated-alloc.c
@@ -18,27 +18,38 @@ static unsigned int list_count;
static int fill_list(unsigned int nr_pages)
{
struct dev_pagemap *pgmap;
+ struct resource *res;
void *vaddr;
unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
- int ret;
+ int ret = -ENOMEM;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
if (!pgmap)
- return -ENOMEM;
+ goto err_pgmap;
pgmap->type = MEMORY_DEVICE_GENERIC;
- pgmap->res.name = "Xen scratch";
- pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->name = "Xen scratch";
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- ret = allocate_resource(&iomem_resource, &pgmap->res,
+ ret = allocate_resource(&iomem_resource, res,
alloc_pages * PAGE_SIZE, 0, -1,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) {
pr_err("Cannot allocate new IOMEM resource\n");
- kfree(pgmap);
- return ret;
+ goto err_resource;
}
+ pgmap->range = (struct range) {
+ .start = res->start,
+ .end = res->end,
+ };
+ pgmap->nr_range = 1;
+ pgmap->owner = res;
+
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* memremap will build page tables for the new memory so
@@ -50,14 +61,13 @@ static int fill_list(unsigned int nr_pages)
* conflict with any devices.
*/
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- xen_pfn_t pfn = PFN_DOWN(pgmap->res.start);
+ xen_pfn_t pfn = PFN_DOWN(res->start);
for (i = 0; i < alloc_pages; i++) {
if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
pr_warn("set_phys_to_machine() failed, no memory added\n");
- release_resource(&pgmap->res);
- kfree(pgmap);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_memremap;
}
}
}
@@ -66,9 +76,8 @@ static int fill_list(unsigned int nr_pages)
vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
if (IS_ERR(vaddr)) {
pr_err("Cannot remap memory range\n");
- release_resource(&pgmap->res);
- kfree(pgmap);
- return PTR_ERR(vaddr);
+ ret = PTR_ERR(vaddr);
+ goto err_memremap;
}
for (i = 0; i < alloc_pages; i++) {
@@ -80,6 +89,14 @@ static int fill_list(unsigned int nr_pages)
}
return 0;
+
+err_memremap:
+ release_resource(res);
+err_resource:
+ kfree(pgmap);
+err_pgmap:
+ kfree(res);
+ return ret;
}
/**
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index e876c3d6dad1..cb904ac83006 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -734,10 +734,17 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
wmb();
notify_remote_via_irq(pdev->evtchn_irq);
+ /* Enable IRQ to signal "request done". */
+ xen_pcibk_lateeoi(pdev, 0);
+
ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
!(test_bit(_XEN_PCIB_active, (unsigned long *)
&sh_info->flags)), 300*HZ);
+ /* Enable IRQ for pcifront request if not already active. */
+ if (!test_bit(_PDEVF_op_active, &pdev->flags))
+ xen_pcibk_lateeoi(pdev, 0);
+
if (!ret) {
if (test_bit(_XEN_PCIB_active,
(unsigned long *)&sh_info->flags)) {
@@ -751,12 +758,6 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
}
clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
- if (test_bit(_XEN_PCIF_active,
- (unsigned long *)&sh_info->flags)) {
- dev_dbg(&psdev->dev->dev, "schedule pci_conf service\n");
- xen_pcibk_test_and_schedule_op(psdev->pdev);
- }
-
res = (pci_ers_result_t)aer_op->err;
return res;
}
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index f1ed2dbf685c..95e28ee48d52 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/atomic.h>
+#include <xen/events.h>
#include <xen/interface/io/pciif.h>
#define DRV_NAME "xen-pciback"
@@ -27,6 +28,8 @@ struct pci_dev_entry {
#define PDEVF_op_active (1<<(_PDEVF_op_active))
#define _PCIB_op_pending (1)
#define PCIB_op_pending (1<<(_PCIB_op_pending))
+#define _EOI_pending (2)
+#define EOI_pending (1<<(_EOI_pending))
struct xen_pcibk_device {
void *pci_dev_data;
@@ -183,10 +186,15 @@ static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
void xen_pcibk_do_op(struct work_struct *data);
+static inline void xen_pcibk_lateeoi(struct xen_pcibk_device *pdev,
+ unsigned int eoi_flag)
+{
+ if (test_and_clear_bit(_EOI_pending, &pdev->flags))
+ xen_irq_lateeoi(pdev->evtchn_irq, eoi_flag);
+}
+
int xen_pcibk_xenbus_register(void);
void xen_pcibk_xenbus_unregister(void);
-
-void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev);
#endif
/* Handles shared IRQs that can to device domain and control domain. */
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index e11a7438e1a2..3fbc21466a93 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -276,26 +276,41 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
return 0;
}
#endif
+
+static inline bool xen_pcibk_test_op_pending(struct xen_pcibk_device *pdev)
+{
+ return test_bit(_XEN_PCIF_active,
+ (unsigned long *)&pdev->sh_info->flags) &&
+ !test_and_set_bit(_PDEVF_op_active, &pdev->flags);
+}
+
/*
* Now the same evtchn is used for both pcifront conf_read_write request
* as well as pcie aer front end ack. We use a new work_queue to schedule
* xen_pcibk conf_read_write service for avoiding confict with aer_core
* do_recovery job which also use the system default work_queue
*/
-void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
+static void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
{
+ bool eoi = true;
+
/* Check that frontend is requesting an operation and that we are not
* already processing a request */
- if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
- && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
+ if (xen_pcibk_test_op_pending(pdev)) {
schedule_work(&pdev->op_work);
+ eoi = false;
}
/*_XEN_PCIB_active should have been cleared by pcifront. And also make
sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
&& test_bit(_PCIB_op_pending, &pdev->flags)) {
wake_up(&xen_pcibk_aer_wait_queue);
+ eoi = false;
}
+
+ /* EOI if there was nothing to do. */
+ if (eoi)
+ xen_pcibk_lateeoi(pdev, XEN_EOI_FLAG_SPURIOUS);
}
/* Performing the configuration space reads/writes must not be done in atomic
@@ -303,10 +318,8 @@ void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
* use of semaphores). This function is intended to be called from a work
* queue in process context taking a struct xen_pcibk_device as a parameter */
-void xen_pcibk_do_op(struct work_struct *data)
+static void xen_pcibk_do_one_op(struct xen_pcibk_device *pdev)
{
- struct xen_pcibk_device *pdev =
- container_of(data, struct xen_pcibk_device, op_work);
struct pci_dev *dev;
struct xen_pcibk_dev_data *dev_data = NULL;
struct xen_pci_op *op = &pdev->op;
@@ -379,16 +392,31 @@ void xen_pcibk_do_op(struct work_struct *data)
smp_mb__before_atomic(); /* /after/ clearing PCIF_active */
clear_bit(_PDEVF_op_active, &pdev->flags);
smp_mb__after_atomic(); /* /before/ final check for work */
+}
- /* Check to see if the driver domain tried to start another request in
- * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
- */
- xen_pcibk_test_and_schedule_op(pdev);
+void xen_pcibk_do_op(struct work_struct *data)
+{
+ struct xen_pcibk_device *pdev =
+ container_of(data, struct xen_pcibk_device, op_work);
+
+ do {
+ xen_pcibk_do_one_op(pdev);
+ } while (xen_pcibk_test_op_pending(pdev));
+
+ xen_pcibk_lateeoi(pdev, 0);
}
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id)
{
struct xen_pcibk_device *pdev = dev_id;
+ bool eoi;
+
+ /* IRQs might come in before pdev->evtchn_irq is written. */
+ if (unlikely(pdev->evtchn_irq != irq))
+ pdev->evtchn_irq = irq;
+
+ eoi = test_and_set_bit(_EOI_pending, &pdev->flags);
+ WARN(eoi, "IRQ while EOI pending\n");
xen_pcibk_test_and_schedule_op(pdev);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index b500466a6c37..4b99ec3dec58 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -123,7 +123,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
pdev->sh_info = vaddr;
- err = bind_interdomain_evtchn_to_irqhandler(
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
0, DRV_NAME, pdev);
if (err < 0) {
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 1e8cfd80a4e6..4acc4e899600 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -91,7 +91,6 @@ struct vscsibk_info {
unsigned int irq;
struct vscsiif_back_ring ring;
- int ring_error;
spinlock_t ring_lock;
atomic_t nr_unreplied_reqs;
@@ -722,7 +721,8 @@ static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
return pending_req;
}
-static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+static int scsiback_do_cmd_fn(struct vscsibk_info *info,
+ unsigned int *eoi_flags)
{
struct vscsiif_back_ring *ring = &info->ring;
struct vscsiif_request ring_req;
@@ -739,11 +739,12 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
rc = ring->rsp_prod_pvt;
pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
info->domid, rp, rc, rp - rc);
- info->ring_error = 1;
- return 0;
+ return -EINVAL;
}
while ((rc != rp)) {
+ *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
+
if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
break;
@@ -802,13 +803,16 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
static irqreturn_t scsiback_irq_fn(int irq, void *dev_id)
{
struct vscsibk_info *info = dev_id;
+ int rc;
+ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
- if (info->ring_error)
- return IRQ_HANDLED;
-
- while (scsiback_do_cmd_fn(info))
+ while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0)
cond_resched();
+ /* In case of a ring error we keep the event channel masked. */
+ if (!rc)
+ xen_irq_lateeoi(irq, eoi_flags);
+
return IRQ_HANDLED;
}
@@ -829,7 +833,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
sring = (struct vscsiif_sring *)area;
BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
- err = bind_interdomain_evtchn_to_irq(info->domid, evtchn);
+ err = bind_interdomain_evtchn_to_irq_lateeoi(info->domid, evtchn);
if (err < 0)
goto unmap_page;
@@ -1253,7 +1257,6 @@ static int scsiback_probe(struct xenbus_device *dev,
info->domid = dev->otherend_id;
spin_lock_init(&info->ring_lock);
- info->ring_error = 0;
atomic_set(&info->nr_unreplied_reqs, 0);
init_waitqueue_head(&info->waiting_to_free);
info->dev = dev;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 2690318ad50f..fd80e318b99c 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -73,16 +73,13 @@ struct map_ring_valloc {
struct xenbus_map_node *node;
/* Why do we need two arrays? See comment of __xenbus_map_ring */
- union {
- unsigned long addrs[XENBUS_MAX_RING_GRANTS];
- pte_t *ptes[XENBUS_MAX_RING_GRANTS];
- };
+ unsigned long addrs[XENBUS_MAX_RING_GRANTS];
phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
- unsigned int idx; /* HVM only. */
+ unsigned int idx;
};
static DEFINE_SPINLOCK(xenbus_valloc_lock);
@@ -686,6 +683,14 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
#ifdef CONFIG_XEN_PV
+static int map_ring_apply(pte_t *pte, unsigned long addr, void *data)
+{
+ struct map_ring_valloc *info = data;
+
+ info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr;
+ return 0;
+}
+
static int xenbus_map_ring_pv(struct xenbus_device *dev,
struct map_ring_valloc *info,
grant_ref_t *gnt_refs,
@@ -694,18 +699,15 @@ static int xenbus_map_ring_pv(struct xenbus_device *dev,
{
struct xenbus_map_node *node = info->node;
struct vm_struct *area;
- int err = GNTST_okay;
- int i;
- bool leaked;
+ bool leaked = false;
+ int err = -ENOMEM;
- area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes);
+ area = get_vm_area(XEN_PAGE_SIZE * nr_grefs, VM_IOREMAP);
if (!area)
return -ENOMEM;
-
- for (i = 0; i < nr_grefs; i++)
- info->phys_addrs[i] =
- arbitrary_virt_to_machine(info->ptes[i]).maddr;
-
+ if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
+ XEN_PAGE_SIZE * nr_grefs, map_ring_apply, info))
+ goto failed;
err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
info, GNTMAP_host_map | GNTMAP_contains_pte,
&leaked);
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 47c733817903..1b9928648583 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -181,7 +181,7 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
z->resource.name = z->name;
r = zorro_find_parent_resource(pdev, z);
error = request_resource(r, &z->resource);
- if (error)
+ if (error && !(z->rom.er_Type & ERTF_MEMLIST))
dev_err(&bus->dev,
"Address space collision on device %s %pR\n",
z->name, &z->resource);